aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig36
-rw-r--r--drivers/acpi/ac.c25
-rw-r--r--drivers/acpi/acpi_cmos_rtc.c2
-rw-r--r--drivers/acpi/acpi_lpss.c82
-rw-r--r--drivers/acpi/acpi_pad.c24
-rw-r--r--drivers/acpi/acpica/Makefile4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/acdispat.h15
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h364
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h10
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h16
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h5
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h16
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h5
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h4
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c61
-rw-r--r--drivers/acpi/acpica/dsmethod.c158
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c18
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c13
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c21
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c231
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c14
-rw-r--r--drivers/acpi/acpica/exutils.c82
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c7
-rw-r--r--drivers/acpi/acpica/nsload.c6
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c12
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c42
-rw-r--r--drivers/acpi/acpica/nsrepair.c31
-rw-r--r--drivers/acpi/acpica/nsrepair2.c39
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c35
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c6
-rw-r--r--drivers/acpi/acpica/psobject.c9
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c4
-rw-r--r--drivers/acpi/acpica/rscreate.c12
-rw-r--r--drivers/acpi/acpica/rsdump.c5
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c4
-rw-r--r--drivers/acpi/acpica/rsinfo.c6
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c2
-rw-r--r--drivers/acpi/acpica/rslist.c2
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c2
-rw-r--r--drivers/acpi/acpica/rsserial.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c17
-rw-r--r--drivers/acpi/acpica/tbprint.c22
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c8
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c17
-rw-r--r--drivers/acpi/acpica/uterror.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utexcep.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c39
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c28
-rw-r--r--drivers/acpi/acpica/utownerid.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c5
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utstring.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c5
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c2
-rw-r--r--drivers/acpi/apei/Kconfig2
-rw-r--r--drivers/acpi/battery.c7
-rw-r--r--drivers/acpi/battery.h10
-rw-r--r--drivers/acpi/bus.c63
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/container.c5
-rw-r--r--drivers/acpi/device_pm.c41
-rw-r--r--drivers/acpi/dock.c460
-rw-r--r--drivers/acpi/fan.c13
-rw-r--r--drivers/acpi/glue.c12
-rw-r--r--drivers/acpi/internal.h13
-rw-r--r--drivers/acpi/numa.c16
-rw-r--r--drivers/acpi/osl.c46
-rw-r--r--drivers/acpi/pci_irq.c36
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/processor_core.c52
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/processor_perflib.c14
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/scan.c261
-rw-r--r--drivers/acpi/sysfs.c2
-rw-r--r--drivers/acpi/tables.c128
-rw-r--r--drivers/acpi/thermal.c39
-rw-r--r--drivers/acpi/utils.c19
-rw-r--r--drivers/acpi/video.c10
-rw-r--r--drivers/acpi/video_detect.c2
-rw-r--r--drivers/amba/bus.c4
-rw-r--r--drivers/amba/tegra-ahb.c2
-rw-r--r--drivers/ata/Kconfig56
-rw-r--r--drivers/ata/Makefile8
-rw-r--r--drivers/ata/acard-ahci.c1
-rw-r--r--drivers/ata/ahci.c27
-rw-r--r--drivers/ata/ahci.h14
-rw-r--r--drivers/ata/ahci_da850.c114
-rw-r--r--drivers/ata/ahci_imx.c333
-rw-r--r--drivers/ata/ahci_platform.c291
-rw-r--r--drivers/ata/ahci_st.c245
-rw-r--r--drivers/ata/ahci_sunxi.c249
-rw-r--r--drivers/ata/ahci_xgene.c486
-rw-r--r--drivers/ata/ata_generic.c1
-rw-r--r--drivers/ata/libahci.c38
-rw-r--r--drivers/ata/libahci_platform.c541
-rw-r--r--drivers/ata/libata-acpi.c73
-rw-r--r--drivers/ata/libata-core.c139
-rw-r--r--drivers/ata/libata-eh.c18
-rw-r--r--drivers/ata/libata-zpodd.c21
-rw-r--r--drivers/ata/pata_acpi.c1
-rw-r--r--drivers/ata/pata_amd.c1
-rw-r--r--drivers/ata/pata_arasan_cf.c2
-rw-r--r--drivers/ata/pata_artop.c1
-rw-r--r--drivers/ata/pata_at91.c1
-rw-r--r--drivers/ata/pata_atiixp.c1
-rw-r--r--drivers/ata/pata_atp867x.c1
-rw-r--r--drivers/ata/pata_cmd640.c1
-rw-r--r--drivers/ata/pata_cmd64x.c1
-rw-r--r--drivers/ata/pata_cs5520.c1
-rw-r--r--drivers/ata/pata_cs5530.c1
-rw-r--r--drivers/ata/pata_cs5535.c1
-rw-r--r--drivers/ata/pata_cs5536.c1
-rw-r--r--drivers/ata/pata_cypress.c1
-rw-r--r--drivers/ata/pata_efar.c1
-rw-r--r--drivers/ata/pata_ep93xx.c1
-rw-r--r--drivers/ata/pata_hpt366.c1
-rw-r--r--drivers/ata/pata_hpt37x.c1
-rw-r--r--drivers/ata/pata_hpt3x2n.c1
-rw-r--r--drivers/ata/pata_hpt3x3.c1
-rw-r--r--drivers/ata/pata_imx.c18
-rw-r--r--drivers/ata/pata_it8213.c1
-rw-r--r--drivers/ata/pata_it821x.c1
-rw-r--r--drivers/ata/pata_jmicron.c1
-rw-r--r--drivers/ata/pata_legacy.c1
-rw-r--r--drivers/ata/pata_marvell.c1
-rw-r--r--drivers/ata/pata_mpiix.c1
-rw-r--r--drivers/ata/pata_netcell.c1
-rw-r--r--drivers/ata/pata_ninja32.c1
-rw-r--r--drivers/ata/pata_ns87410.c1
-rw-r--r--drivers/ata/pata_ns87415.c1
-rw-r--r--drivers/ata/pata_oldpiix.c1
-rw-r--r--drivers/ata/pata_opti.c1
-rw-r--r--drivers/ata/pata_optidma.c1
-rw-r--r--drivers/ata/pata_pcmcia.c1
-rw-r--r--drivers/ata/pata_pdc2027x.c1
-rw-r--r--drivers/ata/pata_pdc202xx_old.c1
-rw-r--r--drivers/ata/pata_piccolo.c1
-rw-r--r--drivers/ata/pata_platform.c1
-rw-r--r--drivers/ata/pata_pxa.c1
-rw-r--r--drivers/ata/pata_radisys.c1
-rw-r--r--drivers/ata/pata_rdc.c1
-rw-r--r--drivers/ata/pata_rz1000.c1
-rw-r--r--drivers/ata/pata_sc1200.c1
-rw-r--r--drivers/ata/pata_scc.c1
-rw-r--r--drivers/ata/pata_sch.c1
-rw-r--r--drivers/ata/pata_serverworks.c1
-rw-r--r--drivers/ata/pata_sil680.c1
-rw-r--r--drivers/ata/pata_sis.c1
-rw-r--r--drivers/ata/pata_sl82c105.c1
-rw-r--r--drivers/ata/pata_triflex.c1
-rw-r--r--drivers/ata/pata_via.c1
-rw-r--r--drivers/ata/pdc_adma.c1
-rw-r--r--drivers/ata/sata_dwc_460ex.c4
-rw-r--r--drivers/ata/sata_highbank.c6
-rw-r--r--drivers/ata/sata_nv.c1
-rw-r--r--drivers/ata/sata_promise.c1
-rw-r--r--drivers/ata/sata_qstor.c1
-rw-r--r--drivers/ata/sata_sil.c1
-rw-r--r--drivers/ata/sata_sis.c1
-rw-r--r--drivers/ata/sata_svw.c1
-rw-r--r--drivers/ata/sata_sx4.c10
-rw-r--r--drivers/ata/sata_uli.c1
-rw-r--r--drivers/ata/sata_via.c1
-rw-r--r--drivers/ata/sata_vsc.c1
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/firestream.c6
-rw-r--r--drivers/atm/idt77105.c6
-rw-r--r--drivers/atm/nicstar.c24
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/attribute_container.c1
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/core.c20
-rw-r--r--drivers/base/cpu.c46
-rw-r--r--drivers/base/devres.c26
-rw-r--r--drivers/base/dma-buf.c18
-rw-r--r--drivers/base/firmware_class.c13
-rw-r--r--drivers/base/node.c4
-rw-r--r--drivers/base/platform.c11
-rw-r--r--drivers/base/power/Makefile3
-rw-r--r--drivers/base/power/clock_ops.c1
-rw-r--r--drivers/base/power/common.c1
-rw-r--r--drivers/base/power/domain.c16
-rw-r--r--drivers/base/power/domain_governor.c1
-rw-r--r--drivers/base/power/generic_ops.c2
-rw-r--r--drivers/base/power/main.c280
-rw-r--r--drivers/base/power/opp.c1
-rw-r--r--drivers/base/power/power.h4
-rw-r--r--drivers/base/power/qos.c220
-rw-r--r--drivers/base/power/runtime.c164
-rw-r--r--drivers/base/power/sysfs.c97
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regcache.c13
-rw-r--r--drivers/base/regmap/regmap-debugfs.c2
-rw-r--r--drivers/base/regmap/regmap-i2c.c1
-rw-r--r--drivers/base/regmap/regmap-irq.c6
-rw-r--r--drivers/base/regmap/regmap-mmio.c57
-rw-r--r--drivers/base/regmap/regmap-spi.c1
-rw-r--r--drivers/base/regmap/regmap-spmi.c228
-rw-r--r--drivers/base/regmap/regmap.c352
-rw-r--r--drivers/base/topology.c13
-rw-r--r--drivers/bcma/driver_gpio.c9
-rw-r--r--drivers/block/DAC960.c34
-rw-r--r--drivers/block/ataflop.c16
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/drbd/drbd_actlog.c629
-rw-r--r--drivers/block/drbd/drbd_bitmap.c368
-rw-r--r--drivers/block/drbd/drbd_int.h1130
-rw-r--r--drivers/block/drbd/drbd_main.c2009
-rw-r--r--drivers/block/drbd/drbd_nl.c1653
-rw-r--r--drivers/block/drbd/drbd_proc.c140
-rw-r--r--drivers/block/drbd/drbd_protocol.h295
-rw-r--r--drivers/block/drbd/drbd_receiver.c2532
-rw-r--r--drivers/block/drbd/drbd_req.c464
-rw-r--r--drivers/block/drbd/drbd_req.h20
-rw-r--r--drivers/block/drbd/drbd_state.c859
-rw-r--r--drivers/block/drbd/drbd_state.h40
-rw-r--r--drivers/block/drbd/drbd_strings.c1
-rw-r--r--drivers/block/drbd/drbd_strings.h9
-rw-r--r--drivers/block/drbd/drbd_worker.c944
-rw-r--r--drivers/block/drbd/drbd_wrappers.h14
-rw-r--r--drivers/block/floppy.c42
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c92
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h2
-rw-r--r--drivers/block/nvme-core.c735
-rw-r--r--drivers/block/nvme-scsi.c43
-rw-r--r--drivers/block/rbd.c87
-rw-r--r--drivers/block/skd_main.c67
-rw-r--r--drivers/block/swim3.c18
-rw-r--r--drivers/block/virtio_blk.c23
-rw-r--r--drivers/block/zram/Kconfig10
-rw-r--r--drivers/block/zram/Makefile4
-rw-r--r--drivers/block/zram/zcomp.c353
-rw-r--r--drivers/block/zram/zcomp.h68
-rw-r--r--drivers/block/zram/zcomp_lz4.c47
-rw-r--r--drivers/block/zram/zcomp_lz4.h17
-rw-r--r--drivers/block/zram/zcomp_lzo.c47
-rw-r--r--drivers/block/zram/zcomp_lzo.h17
-rw-r--r--drivers/block/zram/zram_drv.c383
-rw-r--r--drivers/block/zram/zram_drv.h21
-rw-r--r--drivers/bluetooth/Kconfig3
-rw-r--r--drivers/bluetooth/ath3k.c97
-rw-r--r--drivers/bluetooth/bfusb.c14
-rw-r--r--drivers/bluetooth/bluecard_cs.c11
-rw-r--r--drivers/bluetooth/bt3c_cs.c7
-rw-r--r--drivers/bluetooth/btmrvl_main.c11
-rw-r--r--drivers/bluetooth/btuart_cs.c6
-rw-r--r--drivers/bluetooth/btusb.c59
-rw-r--r--drivers/bluetooth/dtl1_cs.c9
-rw-r--r--drivers/bluetooth/hci_bcsp.c31
-rw-r--r--drivers/bluetooth/hci_h5.c10
-rw-r--r--drivers/bluetooth/hci_ldisc.c9
-rw-r--r--drivers/bluetooth/hci_vhci.c3
-rw-r--r--drivers/bus/arm-cci.c24
-rw-r--r--drivers/bus/imx-weim.c58
-rw-r--r--drivers/bus/mvebu-mbus.c7
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/frontend.c1
-rw-r--r--drivers/char/agp/generic.c1
-rw-r--r--drivers/char/agp/intel-gtt.c1
-rw-r--r--drivers/char/agp/sgi-agp.c1
-rw-r--r--drivers/char/hw_random/Kconfig6
-rw-r--r--drivers/char/hw_random/atmel-rng.c23
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c1
-rw-r--r--drivers/char/hw_random/core.c18
-rw-r--r--drivers/char/hw_random/exynos-rng.c1
-rw-r--r--drivers/char/hw_random/n2-drv.c1
-rw-r--r--drivers/char/hw_random/nomadik-rng.c14
-rw-r--r--drivers/char/hw_random/octeon-rng.c1
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c3
-rw-r--r--drivers/char/hw_random/picoxcell-rng.c27
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c40
-rw-r--r--drivers/char/hw_random/virtio-rng.c3
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c1
-rw-r--r--drivers/char/mem.c6
-rw-r--r--drivers/char/mwave/3780i.c1
-rw-r--r--drivers/char/random.c244
-rw-r--r--drivers/char/tile-srom.c1
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c1
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.c1
-rw-r--r--drivers/clk/Kconfig7
-rw-r--r--drivers/clk/Makefile4
-rw-r--r--drivers/clk/at91/clk-programmable.c202
-rw-r--r--drivers/clk/at91/clk-system.c76
-rw-r--r--drivers/clk/bcm/Kconfig9
-rw-r--r--drivers/clk/bcm/Makefile3
-rw-r--r--drivers/clk/bcm/clk-bcm281xx.c416
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c769
-rw-r--r--drivers/clk/bcm/clk-kona.c1033
-rw-r--r--drivers/clk/bcm/clk-kona.h410
-rw-r--r--drivers/clk/clk-axi-clkgen.c312
-rw-r--r--drivers/clk/clk-divider.c10
-rw-r--r--drivers/clk/clk-moxart.c97
-rw-r--r--drivers/clk/clk-ppc-corenet.c70
-rw-r--r--drivers/clk/clk-s2mps11.c29
-rw-r--r--drivers/clk/clk.c131
-rw-r--r--drivers/clk/clkdev.c2
-rw-r--r--drivers/clk/hisilicon/Makefile5
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c298
-rw-r--r--drivers/clk/hisilicon/clk-hip04.c58
-rw-r--r--drivers/clk/hisilicon/clk.c62
-rw-r--r--drivers/clk/hisilicon/clk.h17
-rw-r--r--drivers/clk/mmp/clk-frac.c20
-rw-r--r--drivers/clk/mvebu/Kconfig8
-rw-r--r--drivers/clk/mvebu/Makefile2
-rw-r--r--drivers/clk/mvebu/armada-375.c184
-rw-r--r--drivers/clk/mvebu/armada-38x.c167
-rw-r--r--drivers/clk/mvebu/clk-corediv.c154
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4.c172
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c49
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c49
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c2
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c79
-rw-r--r--drivers/clk/samsung/clk.c71
-rw-r--r--drivers/clk/samsung/clk.h14
-rw-r--r--drivers/clk/shmobile/Makefile1
-rw-r--r--drivers/clk/shmobile/clk-div6.c2
-rw-r--r--drivers/clk/shmobile/clk-mstp.c2
-rw-r--r--drivers/clk/shmobile/clk-rcar-gen2.c8
-rw-r--r--drivers/clk/shmobile/clk-rz.c103
-rw-r--r--drivers/clk/sirf/clk-atlas6.c3
-rw-r--r--drivers/clk/sirf/clk-common.c3
-rw-r--r--drivers/clk/sirf/clk-prima2.c3
-rw-r--r--drivers/clk/socfpga/Makefile3
-rw-r--r--drivers/clk/socfpga/clk-gate.c263
-rw-r--r--drivers/clk/socfpga/clk-periph.c94
-rw-r--r--drivers/clk/socfpga/clk-pll.c131
-rw-r--r--drivers/clk/socfpga/clk.c326
-rw-r--r--drivers/clk/socfpga/clk.h57
-rw-r--r--drivers/clk/st/Makefile1
-rw-r--r--drivers/clk/st/clkgen-fsyn.c1039
-rw-r--r--drivers/clk/st/clkgen-mux.c820
-rw-r--r--drivers/clk/st/clkgen-pll.c698
-rw-r--r--drivers/clk/st/clkgen.h48
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c305
-rw-r--r--drivers/clk/tegra/clk-periph.c2
-rw-r--r--drivers/clk/ti/clk-33xx.c1
-rw-r--r--drivers/clk/ti/clk-3xxx.c4
-rw-r--r--drivers/clk/ti/clk-44xx.c1
-rw-r--r--drivers/clk/ti/clk-54xx.c1
-rw-r--r--drivers/clk/ti/clk-7xx.c1
-rw-r--r--drivers/clk/ti/divider.c8
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c3
-rw-r--r--drivers/clk/versatile/clk-icst.c21
-rw-r--r--drivers/clk/versatile/clk-icst.h1
-rw-r--r--drivers/clk/versatile/clk-impd1.c12
-rw-r--r--drivers/clk/versatile/clk-integrator.c83
-rw-r--r--drivers/clk/versatile/clk-realview.c4
-rw-r--r--drivers/clk/zynq/clkc.c93
-rw-r--r--drivers/clk/zynq/pll.c18
-rw-r--r--drivers/clocksource/Kconfig51
-rw-r--r--drivers/clocksource/Makefile4
-rw-r--r--drivers/clocksource/arm_arch_timer.c1
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c121
-rw-r--r--drivers/clocksource/cyclone.c113
-rw-r--r--drivers/clocksource/dummy_timer.c11
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/clocksource/qcom-timer.c330
-rw-r--r--drivers/clocksource/sun4i_timer.c2
-rw-r--r--drivers/clocksource/time-armada-370-xp.c12
-rw-r--r--drivers/clocksource/time-orion.c28
-rw-r--r--drivers/clocksource/timer-keystone.c241
-rw-r--r--drivers/clocksource/timer-marco.c13
-rw-r--r--drivers/clocksource/timer-prima2.c16
-rw-r--r--drivers/clocksource/timer-u300.c447
-rw-r--r--drivers/connector/cn_proc.c18
-rw-r--r--drivers/connector/connector.c21
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm25
-rw-r--r--drivers/cpufreq/Kconfig.powerpc8
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c10
-rw-r--r--drivers/cpufreq/arm_big_little.c6
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c2
-rw-r--r--drivers/cpufreq/blackfin-cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c1
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c4
-rw-r--r--drivers/cpufreq/cpufreq.c497
-rw-r--r--drivers/cpufreq/cpufreq_stats.c40
-rw-r--r--drivers/cpufreq/cris-artpec3-cpufreq.c7
-rw-r--r--drivers/cpufreq/cris-etraxfs-cpufreq.c7
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c1
-rw-r--r--drivers/cpufreq/e_powersaver.c1
-rw-r--r--drivers/cpufreq/elanfreq.c19
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c97
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c12
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c30
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c34
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c5
-rw-r--r--drivers/cpufreq/freq_table.c57
-rw-r--r--drivers/cpufreq/gx-suspmod.c4
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c1
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c45
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c7
-rw-r--r--drivers/cpufreq/longhaul.c7
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c3
-rw-r--r--drivers/cpufreq/maple-cpufreq.c6
-rw-r--r--drivers/cpufreq/omap-cpufreq.c1
-rw-r--r--drivers/cpufreq/p4-clockmod.c21
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c13
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c4
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c6
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c6
-rw-r--r--drivers/cpufreq/powernow-k6.c23
-rw-r--r--drivers/cpufreq/powernow-k7.c6
-rw-r--r--drivers/cpufreq/powernow-k8.c11
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c341
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c6
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c19
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c1
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c1
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c20
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c8
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c26
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c61
-rw-r--r--drivers/cpufreq/sc520_freq.c7
-rw-r--r--drivers/cpufreq/sh-cpufreq.c5
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c4
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c4
-rw-r--r--drivers/cpufreq/spear-cpufreq.c21
-rw-r--r--drivers/cpufreq/speedstep-centrino.c2
-rw-r--r--drivers/cpufreq/speedstep-ich.c7
-rw-r--r--drivers/cpufreq/speedstep-smi.c7
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c47
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c6
-rw-r--r--drivers/cpuidle/Kconfig.arm2
-rw-r--r--drivers/cpuidle/coupled.c2
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c107
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c6
-rw-r--r--drivers/cpuidle/cpuidle.c108
-rw-r--r--drivers/cpuidle/driver.c2
-rw-r--r--drivers/cpuidle/governors/menu.c75
-rw-r--r--drivers/cpuidle/sysfs.c3
-rw-r--r--drivers/crypto/Kconfig22
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/bfin_crc.c45
-rw-r--r--drivers/crypto/caam/caamalg.c384
-rw-r--r--drivers/crypto/caam/caamrng.c17
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c61
-rw-r--r--drivers/crypto/caam/ctrl.h2
-rw-r--r--drivers/crypto/caam/desc_constr.h27
-rw-r--r--drivers/crypto/caam/regs.h4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c224
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c130
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h8
-rw-r--r--drivers/crypto/ccp/ccp-dev.c21
-rw-r--r--drivers/crypto/ccp/ccp-ops.c108
-rw-r--r--drivers/crypto/mxs-dcp.c83
-rw-r--r--drivers/crypto/omap-aes.c4
-rw-r--r--drivers/crypto/omap-des.c1216
-rw-r--r--drivers/crypto/omap-sham.c12
-rw-r--r--drivers/crypto/picoxcell_crypto.c16
-rw-r--r--drivers/crypto/s5p-sss.c13
-rw-r--r--drivers/crypto/sahara.c26
-rw-r--r--drivers/crypto/talitos.c4
-rw-r--r--drivers/crypto/tegra-aes.c1087
-rw-r--r--drivers/crypto/tegra-aes.h103
-rw-r--r--drivers/devfreq/devfreq.c31
-rw-r--r--drivers/dma/Kconfig21
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/acpi-dma.c17
-rw-r--r--drivers/dma/at_hdmac.c1
-rw-r--r--drivers/dma/cppi41.c7
-rw-r--r--drivers/dma/dmaengine.c9
-rw-r--r--drivers/dma/dmatest.c4
-rw-r--r--drivers/dma/dw/core.c21
-rw-r--r--drivers/dma/dw/pci.c36
-rw-r--r--drivers/dma/dw/regs.h4
-rw-r--r--drivers/dma/edma.c5
-rw-r--r--drivers/dma/fsl-edma.c975
-rw-r--r--drivers/dma/imx-dma.c13
-rw-r--r--drivers/dma/mmp_pdma.c8
-rw-r--r--drivers/dma/mmp_tdma.c50
-rw-r--r--drivers/dma/omap-dma.c677
-rw-r--r--drivers/dma/pch_dma.c4
-rw-r--r--drivers/dma/qcom_bam_dma.c1111
-rw-r--r--drivers/dma/s3c24xx-dma.c2
-rw-r--r--drivers/dma/sh/Kconfig6
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rcar-audmapp.c320
-rw-r--r--drivers/dma/sh/shdma-base.c10
-rw-r--r--drivers/dma/sh/shdma-of.c3
-rw-r--r--drivers/dma/sh/shdmac.c13
-rw-r--r--drivers/dma/sh/sudmac.c4
-rw-r--r--drivers/dma/sirf-dma.c23
-rw-r--r--drivers/edac/amd64_edac.c38
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/edac/amd8111_edac.c44
-rw-r--r--drivers/edac/e752x_edac.c30
-rw-r--r--drivers/edac/edac_mc_sysfs.c2
-rw-r--r--drivers/edac/ghes_edac.c2
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5100_edac.c17
-rw-r--r--drivers/edac/i5400_edac.c6
-rw-r--r--drivers/edac/i7300_edac.c4
-rw-r--r--drivers/edac/i7core_edac.c14
-rw-r--r--drivers/edac/i82875p_edac.c2
-rw-r--r--drivers/edac/mce_amd.c65
-rw-r--r--drivers/edac/mpc85xx_edac.c6
-rw-r--r--drivers/edac/octeon_edac-lmc.c179
-rw-r--r--drivers/edac/sb_edac.c31
-rw-r--r--drivers/extcon/Kconfig4
-rw-r--r--drivers/extcon/Makefile2
-rw-r--r--drivers/extcon/extcon-class.c42
-rw-r--r--drivers/extcon/extcon-gpio.c4
-rw-r--r--drivers/extcon/extcon-palmas.c5
-rw-r--r--drivers/extcon/of_extcon.c64
-rw-r--r--drivers/firmware/dcdbas.c2
-rw-r--r--drivers/firmware/efi/efi-stub-helper.c136
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/firmware/efi/efivars.c2
-rw-r--r--drivers/firmware/google/gsmi.c7
-rw-r--r--drivers/firmware/google/memconsole.c47
-rw-r--r--drivers/fmc/fmc-core.c22
-rw-r--r--drivers/fmc/fmc-sdb.c41
-rw-r--r--drivers/gpio/Kconfig42
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/gpio-adnp.c15
-rw-r--r--drivers/gpio/gpio-adp5588.c16
-rw-r--r--drivers/gpio/gpio-bcm-kona.c109
-rw-r--r--drivers/gpio/gpio-clps711x.c1
-rw-r--r--drivers/gpio/gpio-davinci.c75
-rw-r--r--drivers/gpio/gpio-dwapb.c438
-rw-r--r--drivers/gpio/gpio-em.c14
-rw-r--r--drivers/gpio/gpio-generic.c20
-rw-r--r--drivers/gpio/gpio-ich.c96
-rw-r--r--drivers/gpio/gpio-intel-mid.c22
-rw-r--r--drivers/gpio/gpio-iop.c2
-rw-r--r--drivers/gpio/gpio-lynxpoint.c16
-rw-r--r--drivers/gpio/gpio-max732x.c9
-rw-r--r--drivers/gpio/gpio-mcp23s08.c36
-rw-r--r--drivers/gpio/gpio-moxart.c40
-rw-r--r--drivers/gpio/gpio-mvebu.c7
-rw-r--r--drivers/gpio/gpio-mxs.c3
-rw-r--r--drivers/gpio/gpio-omap.c20
-rw-r--r--drivers/gpio/gpio-pca953x.c10
-rw-r--r--drivers/gpio/gpio-pch.c8
-rw-r--r--drivers/gpio/gpio-pl061.c108
-rw-r--r--drivers/gpio/gpio-rc5t583.c2
-rw-r--r--drivers/gpio/gpio-rcar.c32
-rw-r--r--drivers/gpio/gpio-samsung.c1
-rw-r--r--drivers/gpio/gpio-syscon.c191
-rw-r--r--drivers/gpio/gpio-tnetv107x.c206
-rw-r--r--drivers/gpio/gpio-twl4030.c6
-rw-r--r--drivers/gpio/gpio-tz1090.c28
-rw-r--r--drivers/gpio/gpio-vr41xx.c20
-rw-r--r--drivers/gpio/gpio-zevio.c220
-rw-r--r--drivers/gpio/gpiolib-acpi.c474
-rw-r--r--drivers/gpio/gpiolib-of.c3
-rw-r--r--drivers/gpio/gpiolib.c417
-rw-r--r--drivers/gpio/gpiolib.h3
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c24
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c12
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c5
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c6
-rw-r--r--drivers/gpu/drm/bridge/Kconfig5
-rw-r--r--drivers/gpu/drm/bridge/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/ptn3460.c350
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c5
-rw-r--r--drivers/gpu/drm/drm_cache.c10
-rw-r--r--drivers/gpu/drm/drm_crtc.c936
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c247
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h38
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c396
-rw-r--r--drivers/gpu/drm/drm_drv.c136
-rw-r--r--drivers/gpu/drm/drm_edid.c34
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c45
-rw-r--r--drivers/gpu/drm/drm_fops.c124
-rw-r--r--drivers/gpu/drm/drm_gem.c71
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c38
-rw-r--r--drivers/gpu/drm/drm_ioctl.c7
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/drm_mm.c285
-rw-r--r--drivers/gpu/drm/drm_modes.c346
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c333
-rw-r--r--drivers/gpu/drm/drm_platform.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c112
-rw-r--r--drivers/gpu/drm/drm_stub.c503
-rw-r--r--drivers/gpu/drm/drm_usb.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig24
-rw-r--r--drivers/gpu/drm/exynos/Makefile9
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c (renamed from drivers/video/exynos/exynos_dp_core.c)304
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h (renamed from drivers/video/exynos/exynos_dp_core.h)9
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_reg.c (renamed from drivers/video/exynos/exynos_dp_reg.c)0
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_reg.h (renamed from drivers/video/exynos/exynos_dp_reg.h)0
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c92
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c193
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c159
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c339
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c197
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h165
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1524
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c359
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c700
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c439
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h67
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c441
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c472
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c573
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.h20
-rw-r--r--drivers/gpu/drm/gma500/Kconfig1
-rw-r--r--drivers/gpu/drm/gma500/Makefile2
-rw-r--r--drivers/gpu/drm/gma500/blitter.c51
-rw-r--r--drivers/gpu/drm/gma500/blitter.h22
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c40
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c9
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c73
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c11
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c56
-rw-r--r--drivers/gpu/drm/gma500/gem.h21
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c56
-rw-r--r--drivers/gpu/drm/gma500/gma_device.h21
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c23
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h3
-rw-r--r--drivers/gpu/drm/gma500/gtt.c45
-rw-r--r--drivers/gpu/drm/gma500/gtt.h3
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c16
-rw-r--r--drivers/gpu/drm/gma500/mmu.c299
-rw-r--r--drivers/gpu/drm/gma500/mmu.h93
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c12
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c9
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/opregion.c25
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c42
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c404
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h203
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c32
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c22
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c81
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c602
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Makefile80
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c4
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c30
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c24
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c485
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c716
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c162
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c299
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h729
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c697
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c483
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c63
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c198
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1256
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c616
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c836
-rw-r--r--drivers/gpu/drm/i915/i915_params.c154
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h560
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c40
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c71
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h24
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h174
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c70
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c117
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1579
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c912
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h106
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c23
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c371
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c108
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c24
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c40
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c21
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1202
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c152
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h42
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c78
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c18
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c281
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c26
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c5
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c105
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c65
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c50
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h25
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c273
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c33
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c27
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c139
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c85
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h16
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig3
-rw-r--r--drivers/gpu/drm/nouveau/Makefile16
-rw-r--r--drivers/gpu/drm/nouveau/core/core/namedb.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c85
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/gm100.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm107.c101
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c370
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h54
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c191
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c67
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c361
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c187
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/priv.h10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/falcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c509
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c507
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c989
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c1041
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c308
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h170
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c270
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc3.c)78
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c216
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c278
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c285
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c782
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc542
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h473
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h354
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h336
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h396
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h396
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h396
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc540
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h916
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gm107.c465
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv108.c133
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c312
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h214
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc4.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nvc3.c)86
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c154
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve4.c192
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c136
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/xtensa.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/namedb.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/device.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/devinit.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/P0260.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c55
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gm107.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramgm107.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c)109
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c142
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c55
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c25
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/gk20a.c57
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c17
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c13
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c34
-rw-r--r--drivers/gpu/drm/panel/Kconfig14
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c376
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c1069
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c157
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c7
-rw-r--r--drivers/gpu/drm/radeon/Makefile8
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c20
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c268
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c82
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c67
-rw-r--r--drivers/gpu/drm/radeon/cik.c142
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/radeon/cikd.h49
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c210
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c4
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c64
-rw-r--r--drivers/gpu/drm/radeon/ni.c8
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c6
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/r100.c49
-rw-r--r--drivers/gpu/drm/radeon/r200.c20
-rw-r--r--drivers/gpu/drm/radeon/r300.c32
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c110
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c48
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h158
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c163
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c248
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c59
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c958
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c149
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c119
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c699
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c966
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/si.c10
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/sid.h47
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c2
-rw-r--r--drivers/gpu/drm/radeon/vce_v1_0.c187
-rw-r--r--drivers/gpu/drm/radeon/vce_v2_0.c181
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c5
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c16
-rw-r--r--drivers/gpu/drm/tegra/Makefile2
-rw-r--r--drivers/gpu/drm/tegra/bus.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c16
-rw-r--r--drivers/gpu/drm/tegra/dc.h1
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c544
-rw-r--r--drivers/gpu/drm/tegra/dpaux.h73
-rw-r--r--drivers/gpu/drm/tegra/drm.c19
-rw-r--r--drivers/gpu/drm/tegra/drm.h20
-rw-r--r--drivers/gpu/drm/tegra/dsi.c20
-rw-r--r--drivers/gpu/drm/tegra/dsi.h20
-rw-r--r--drivers/gpu/drm/tegra/gem.c25
-rw-r--r--drivers/gpu/drm/tegra/gem.h14
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c14
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.c20
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.h20
-rw-r--r--drivers/gpu/drm/tegra/output.c8
-rw-r--r--drivers/gpu/drm/tegra/sor.c1092
-rw-r--r--drivers/gpu/drm/tegra/sor.h278
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c46
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c148
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c143
-rw-r--r--drivers/gpu/host1x/syncpt.c1
-rw-r--r--drivers/hid/Kconfig19
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-core.c53
-rw-r--r--drivers/hid/hid-cp2112.c1073
-rw-r--r--drivers/hid/hid-hyperv.c10
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-input.c21
-rw-r--r--drivers/hid/hid-lg.c6
-rw-r--r--drivers/hid/hid-logitech-dj.c111
-rw-r--r--drivers/hid/hid-magicmouse.c4
-rw-r--r--drivers/hid/hid-microsoft.c74
-rw-r--r--drivers/hid/hid-multitouch.c281
-rw-r--r--drivers/hid/hid-picolcd_cir.c2
-rw-r--r--drivers/hid/hid-prodikeys.c5
-rw-r--r--drivers/hid/hid-sensor-hub.c217
-rw-r--r--drivers/hid/hid-sony.c808
-rw-r--r--drivers/hid/hid-thingm.c4
-rw-r--r--drivers/hid/hid-wacom.c28
-rw-r--r--drivers/hid/hid-wiimote-core.c4
-rw-r--r--drivers/hid/hidraw.c27
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c139
-rw-r--r--drivers/hid/uhid.c103
-rw-r--r--drivers/hid/usbhid/hid-core.c102
-rw-r--r--drivers/hv/Makefile2
-rw-r--r--drivers/hv/channel.c42
-rw-r--r--drivers/hv/hv_balloon.c3
-rw-r--r--drivers/hv/hv_fcopy.c414
-rw-r--r--drivers/hv/hv_kvp.c4
-rw-r--r--drivers/hv/hv_snapshot.c2
-rw-r--r--drivers/hv/hv_util.c11
-rw-r--r--drivers/hv/hyperv_vmbus.h8
-rw-r--r--drivers/hv/ring_buffer.c17
-rw-r--r--drivers/hv/vmbus_drv.c108
-rw-r--r--drivers/hwmon/Kconfig604
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/adc128d818.c491
-rw-r--r--drivers/hwmon/adm1021.c70
-rw-r--r--drivers/hwmon/asc7621.c1
-rw-r--r--drivers/hwmon/atxp1.c2
-rw-r--r--drivers/hwmon/coretemp.c85
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/f71805f.c2
-rw-r--r--drivers/hwmon/hwmon.c5
-rw-r--r--drivers/hwmon/iio_hwmon.c37
-rw-r--r--drivers/hwmon/it87.c9
-rw-r--r--drivers/hwmon/jz4740-hwmon.c25
-rw-r--r--drivers/hwmon/k10temp.c1
-rw-r--r--drivers/hwmon/lm63.c157
-rw-r--r--drivers/hwmon/lm77.c1
-rw-r--r--drivers/hwmon/lm80.c70
-rw-r--r--drivers/hwmon/lm83.c1
-rw-r--r--drivers/hwmon/lm87.c1
-rw-r--r--drivers/hwmon/lm90.c111
-rw-r--r--drivers/hwmon/lm92.c1
-rw-r--r--drivers/hwmon/lm93.c1
-rw-r--r--drivers/hwmon/lm95241.c93
-rw-r--r--drivers/hwmon/lm95245.c112
-rw-r--r--drivers/hwmon/ltc2945.c519
-rw-r--r--drivers/hwmon/ltc4215.c51
-rw-r--r--drivers/hwmon/ltc4222.c237
-rw-r--r--drivers/hwmon/ltc4245.c30
-rw-r--r--drivers/hwmon/ltc4260.c200
-rw-r--r--drivers/hwmon/max1619.c1
-rw-r--r--drivers/hwmon/max1668.c81
-rw-r--r--drivers/hwmon/max6639.c91
-rw-r--r--drivers/hwmon/max6650.c257
-rw-r--r--drivers/hwmon/pc87360.c12
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c25
-rw-r--r--drivers/hwmon/smm665.c2
-rw-r--r--drivers/hwmon/via-cputemp.c14
-rw-r--r--drivers/hwmon/w83792d.c1
-rw-r--r--drivers/hwmon/w83l785ts.c4
-rw-r--r--drivers/i2c/busses/Kconfig29
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c2
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-at91.c12
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c26
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c20
-rw-r--r--drivers/i2c/busses/i2c-cadence.c905
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c27
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c125
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c9
-rw-r--r--drivers/i2c/busses/i2c-efm32.c481
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c4
-rw-r--r--drivers/i2c/busses/i2c-gpio.c3
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c5
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c83
-rw-r--r--drivers/i2c/busses/i2c-mxs.c18
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c253
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c8
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c2
-rw-r--r--drivers/i2c/busses/i2c-qup.c768
-rw-r--r--drivers/i2c/busses/i2c-rcar.c3
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c39
-rw-r--r--drivers/i2c/busses/i2c-sirf.c2
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c2
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/i2c/busses/i2c-xiic.c2
-rw-r--r--drivers/i2c/busses/scx200_acb.c2
-rw-r--r--drivers/i2c/i2c-core.c67
-rw-r--r--drivers/idle/intel_idle.c216
-rw-r--r--drivers/iio/accel/bma180.c4
-rw-r--r--drivers/iio/adc/Kconfig43
-rw-r--r--drivers/iio/adc/Makefile5
-rw-r--r--drivers/iio/adc/max1363.c16
-rw-r--r--drivers/iio/adc/men_z188_adc.c172
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c1
-rw-r--r--drivers/iio/adc/twl4030-madc.c (renamed from drivers/mfd/twl4030-madc.c)321
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c1
-rw-r--r--drivers/iio/adc/vf610_adc.c711
-rw-r--r--drivers/iio/adc/viperboard_adc.c2
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c1333
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c254
-rw-r--r--drivers/iio/adc/xilinx-xadc.h209
-rw-r--r--drivers/iio/buffer_cb.c7
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c39
-rw-r--r--drivers/iio/dac/ad7303.c2
-rw-r--r--drivers/iio/dac/max517.c1
-rw-r--r--drivers/iio/dac/mcp4725.c1
-rw-r--r--drivers/iio/humidity/Kconfig10
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/si7005.c189
-rw-r--r--drivers/iio/imu/Kconfig4
-rw-r--r--drivers/iio/imu/adis16400_core.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h38
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c1
-rw-r--r--drivers/iio/industrialio-buffer.c16
-rw-r--r--drivers/iio/industrialio-core.c51
-rw-r--r--drivers/iio/industrialio-event.c100
-rw-r--r--drivers/iio/industrialio-trigger.c11
-rw-r--r--drivers/iio/light/Kconfig26
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/adjd_s311.c3
-rw-r--r--drivers/iio/light/hid-sensor-prox.c375
-rw-r--r--drivers/iio/light/ltr501.c445
-rw-r--r--drivers/iio/light/tcs3472.c2
-rw-r--r--drivers/iio/magnetometer/ak8975.c1
-rw-r--r--drivers/iio/magnetometer/mag3110.c17
-rw-r--r--drivers/iio/pressure/Kconfig16
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c376
-rw-r--r--drivers/iio/pressure/mpl3115.c2
-rw-r--r--drivers/iio/pressure/st_pressure.h1
-rw-r--r--drivers/iio/pressure/st_pressure_core.c87
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c1
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c1
-rw-r--r--drivers/infiniband/core/cm.c17
-rw-r--r--drivers/infiniband/core/cma.c26
-rw-r--r--drivers/infiniband/core/mad.c14
-rw-r--r--drivers/infiniband/core/umem.c120
-rw-r--r--drivers/infiniband/core/verbs.c47
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c23
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c19
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c134
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c31
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c183
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h11
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c55
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c45
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c148
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h6
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c257
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c66
-rw-r--r--drivers/infiniband/hw/ipath/ipath_dma.c43
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c39
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c80
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c42
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c122
-rw-r--r--drivers/infiniband/hw/mlx4/main.c42
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h24
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c39
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c321
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c5
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c62
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c12
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c80
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c157
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c540
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c43
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c121
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h3
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h5
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c261
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/Makefile2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h110
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h7
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c299
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c81
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h261
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c623
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h54
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c233
-rw-r--r--drivers/infiniband/hw/qib/qib.h12
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c52
-rw-r--r--drivers/infiniband/hw/qib/qib_dma.c21
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c11
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c37
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c96
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c44
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c136
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h16
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c93
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h85
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c154
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c470
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c325
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c83
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h1
-rw-r--r--drivers/input/evdev.c4
-rw-r--r--drivers/input/keyboard/Kconfig22
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/clps711x-keypad.c207
-rw-r--r--drivers/input/keyboard/imx_keypad.c4
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c348
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c329
-rw-r--r--drivers/input/misc/Kconfig16
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ims-pcu.c258
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c2
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c9
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c33
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c111
-rw-r--r--drivers/input/misc/soc_button_array.c218
-rw-r--r--drivers/input/misc/uinput.c97
-rw-r--r--drivers/input/misc/wistron_btns.c19
-rw-r--r--drivers/input/mouse/appletouch.c143
-rw-r--r--drivers/input/mousedev.c73
-rw-r--r--drivers/input/serio/Kconfig2
-rw-r--r--drivers/input/serio/hp_sdc.c2
-rw-r--r--drivers/input/sparse-keymap.c2
-rw-r--r--drivers/input/tablet/gtco.c2
-rw-r--r--drivers/input/touchscreen/Kconfig9
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c519
-rw-r--r--drivers/input/touchscreen/st1232.c3
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c384
-rw-r--r--drivers/input/touchscreen/zforce_ts.c95
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/amd_iommu.c8
-rw-r--r--drivers/iommu/amd_iommu_init.c16
-rw-r--r--drivers/iommu/amd_iommu_types.h12
-rw-r--r--drivers/iommu/arm-smmu.c105
-rw-r--r--drivers/iommu/dmar.c513
-rw-r--r--drivers/iommu/intel-iommu.c1610
-rw-r--r--drivers/iommu/intel_irq_remapping.c108
-rw-r--r--drivers/iommu/iova.c64
-rw-r--r--drivers/iommu/omap-iommu.c162
-rw-r--r--drivers/iommu/omap-iommu.h5
-rw-r--r--drivers/iommu/omap-iommu2.c3
-rw-r--r--drivers/iommu/shmobile-iommu.c2
-rw-r--r--drivers/irqchip/Kconfig16
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/exynos-combiner.c3
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c98
-rw-r--r--drivers/irqchip/irq-bcm2835.c4
-rw-r--r--drivers/irqchip/irq-clps711x.c243
-rw-r--r--drivers/irqchip/irq-crossbar.c208
-rw-r--r--drivers/irqchip/irq-gic.c97
-rw-r--r--drivers/irqchip/irq-mmp.c8
-rw-r--r--drivers/irqchip/irq-moxart.c2
-rw-r--r--drivers/irqchip/irq-orion.c2
-rw-r--r--drivers/irqchip/irq-sirfsoc.c2
-rw-r--r--drivers/irqchip/irq-sun4i.c42
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c208
-rw-r--r--drivers/irqchip/irq-vic.c62
-rw-r--r--drivers/irqchip/irq-vt8500.c3
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c2
-rw-r--r--drivers/irqchip/irq-zevio.c2
-rw-r--r--drivers/irqchip/irqchip.c3
-rw-r--r--drivers/isdn/act2000/module.c2
-rw-r--r--drivers/isdn/divert/divert_procfs.c7
-rw-r--r--drivers/isdn/hisax/elsa.c9
-rw-r--r--drivers/isdn/hisax/elsa_ser.c3
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c7
-rw-r--r--drivers/isdn/i4l/isdn_common.c15
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c61
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c23
-rw-r--r--drivers/isdn/pcbit/drv.c6
-rw-r--r--drivers/isdn/sc/init.c4
-rw-r--r--drivers/leds/Kconfig8
-rw-r--r--drivers/leds/led-core.c6
-rw-r--r--drivers/leds/led-triggers.c6
-rw-r--r--drivers/leds/leds-88pm860x.c1
-rw-r--r--drivers/leds/leds-adp5520.c1
-rw-r--r--drivers/leds/leds-asic3.c1
-rw-r--r--drivers/leds/leds-blinkm.c3
-rw-r--r--drivers/leds/leds-clevo-mail.c5
-rw-r--r--drivers/leds/leds-cobalt-qube.c1
-rw-r--r--drivers/leds/leds-da903x.c1
-rw-r--r--drivers/leds/leds-da9052.c1
-rw-r--r--drivers/leds/leds-fsg.c1
-rw-r--r--drivers/leds/leds-gpio.c6
-rw-r--r--drivers/leds/leds-hp6xx.c1
-rw-r--r--drivers/leds/leds-lm3533.c1
-rw-r--r--drivers/leds/leds-lp5521.c1
-rw-r--r--drivers/leds/leds-lp5523.c1
-rw-r--r--drivers/leds/leds-lp5562.c7
-rw-r--r--drivers/leds/leds-lt3593.c1
-rw-r--r--drivers/leds/leds-mc13783.c223
-rw-r--r--drivers/leds/leds-netxbig.c1
-rw-r--r--drivers/leds/leds-ns2.c1
-rw-r--r--drivers/leds/leds-ot200.c1
-rw-r--r--drivers/leds/leds-pwm.c24
-rw-r--r--drivers/leds/leds-s3c24xx.c1
-rw-r--r--drivers/leds/leds-ss4200.c4
-rw-r--r--drivers/leds/leds-wm831x-status.c1
-rw-r--r--drivers/leds/leds-wm8350.c1
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c24
-rw-r--r--drivers/lguest/page_tables.c6
-rw-r--r--drivers/macintosh/adb.c57
-rw-r--r--drivers/mcb/Kconfig31
-rw-r--r--drivers/mcb/Makefile7
-rw-r--r--drivers/mcb/mcb-core.c414
-rw-r--r--drivers/mcb/mcb-internal.h118
-rw-r--r--drivers/mcb/mcb-parse.c159
-rw-r--r--drivers/mcb/mcb-pci.c114
-rw-r--r--drivers/md/Kconfig11
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/bcache/Kconfig8
-rw-r--r--drivers/md/bcache/alloc.c173
-rw-r--r--drivers/md/bcache/bcache.h56
-rw-r--r--drivers/md/bcache/bset.c4
-rw-r--r--drivers/md/bcache/bset.h6
-rw-r--r--drivers/md/bcache/btree.c592
-rw-r--r--drivers/md/bcache/btree.h12
-rw-r--r--drivers/md/bcache/extents.c36
-rw-r--r--drivers/md/bcache/journal.c46
-rw-r--r--drivers/md/bcache/journal.h1
-rw-r--r--drivers/md/bcache/movinggc.c18
-rw-r--r--drivers/md/bcache/request.c201
-rw-r--r--drivers/md/bcache/request.h19
-rw-r--r--drivers/md/bcache/stats.c3
-rw-r--r--drivers/md/bcache/super.c64
-rw-r--r--drivers/md/bcache/sysfs.c155
-rw-r--r--drivers/md/bcache/trace.c2
-rw-r--r--drivers/md/bitmap.c1
-rw-r--r--drivers/md/dm-cache-block-types.h11
-rw-r--r--drivers/md/dm-cache-metadata.c132
-rw-r--r--drivers/md/dm-cache-metadata.h15
-rw-r--r--drivers/md/dm-cache-target.c131
-rw-r--r--drivers/md/dm-era-target.c1746
-rw-r--r--drivers/md/dm-log-userspace-transfer.c2
-rw-r--r--drivers/md/dm-mpath.c219
-rw-r--r--drivers/md/dm-table.c21
-rw-r--r--drivers/md/dm-thin-metadata.c80
-rw-r--r--drivers/md/dm-thin.c263
-rw-r--r--drivers/md/dm.c24
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/md.c65
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/persistent-data/dm-bitset.c10
-rw-r--r--drivers/md/persistent-data/dm-bitset.h1
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c15
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h3
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c5
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.h17
-rw-r--r--drivers/md/raid1.c17
-rw-r--r--drivers/md/raid5.c28
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c2
-rw-r--r--drivers/media/common/siano/smsir.c2
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c10
-rw-r--r--drivers/media/dvb-frontends/Kconfig12
-rw-r--r--drivers/media/dvb-frontends/Makefile2
-rw-r--r--drivers/media/dvb-frontends/af9033.c59
-rw-r--r--drivers/media/dvb-frontends/af9033.h34
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/Kconfig7
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/Makefile6
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h139
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx39xxj.h45
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h256
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_driver.h2343
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_driver_version.h72
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c12400
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.h650
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj_map.h15055
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c4
-rw-r--r--drivers/media/dvb-frontends/ds3000.c2
-rw-r--r--drivers/media/dvb-frontends/it913x-fe-priv.h1051
-rw-r--r--drivers/media/dvb-frontends/it913x-fe.c1045
-rw-r--r--drivers/media/dvb-frontends/it913x-fe.h237
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c30
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c19
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c4
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.h2
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c191
-rw-r--r--drivers/media/dvb-frontends/rtl2832.h34
-rw-r--r--drivers/media/dvb-frontends/rtl2832_priv.h54
-rw-r--r--drivers/media/dvb-frontends/s921.c4
-rw-r--r--drivers/media/dvb-frontends/s921.h2
-rw-r--r--drivers/media/dvb-frontends/stb6100.c2
-rw-r--r--drivers/media/dvb-frontends/stv0900_sw.c2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c68
-rw-r--r--drivers/media/dvb-frontends/tda10071.h2
-rw-r--r--drivers/media/i2c/Kconfig24
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/ad9389b.c2
-rw-r--r--drivers/media/i2c/adv7180.c118
-rw-r--r--drivers/media/i2c/adv7343.c4
-rw-r--r--drivers/media/i2c/adv7511.c2
-rw-r--r--drivers/media/i2c/adv7604.c4
-rw-r--r--drivers/media/i2c/adv7842.c153
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c4
-rw-r--r--drivers/media/i2c/lm3560.c22
-rw-r--r--drivers/media/i2c/lm3646.c414
-rw-r--r--drivers/media/i2c/mt9p031.c53
-rw-r--r--drivers/media/i2c/mt9t001.c229
-rw-r--r--drivers/media/i2c/mt9v011.c4
-rw-r--r--drivers/media/i2c/mt9v032.c10
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c207
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-spi.c6
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h4
-rw-r--r--drivers/media/i2c/s5k5baf.c3
-rw-r--r--drivers/media/i2c/s5k6a3.c389
-rw-r--r--drivers/media/i2c/sr030pc30.c2
-rw-r--r--drivers/media/i2c/ths8200.c26
-rw-r--r--drivers/media/i2c/tvp514x.c3
-rw-r--r--drivers/media/i2c/tvp5150.c8
-rw-r--r--drivers/media/i2c/tvp7002.c3
-rw-r--r--drivers/media/parport/bw-qcam.c8
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c17
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c1
-rw-r--r--drivers/media/pci/bt8xx/bttv.h1
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-main.c9
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c1
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c7
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c6
-rw-r--r--drivers/media/pci/cx88/cx88-input.c2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c6
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c9
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c6
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c4
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c7
-rw-r--r--drivers/media/pci/ttpci/av7110_hw.c2
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/arv.c6
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c2
-rw-r--r--drivers/media/platform/coda.c7
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c9
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c9
-rw-r--r--drivers/media/platform/davinci/vpif_display.c9
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c12
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig9
-rw-r--r--drivers/media/platform/exynos4-is/Makefile4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-param.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-param.h5
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-regs.c16
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-regs.h1
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-sensor.c285
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-sensor.h49
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c100
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.h9
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c660
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.h44
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c29
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.h27
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c7
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c376
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.h32
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c5
-rw-r--r--drivers/media/platform/m2m-deinterlace.c11
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c3
-rw-r--r--drivers/media/platform/mem2mem_testdev.c95
-rw-r--r--drivers/media/platform/mx2_emmaprp.c13
-rw-r--r--drivers/media/platform/omap/omap_vout.c1
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c3
-rw-r--r--drivers/media/platform/omap3isp/isp.c7
-rw-r--r--drivers/media/platform/omap3isp/isp.h12
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c10
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.h6
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c6
-rw-r--r--drivers/media/platform/omap3isp/isphist.c4
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c22
-rw-r--r--drivers/media/platform/omap3isp/ispqueue.c2
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c6
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.h4
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c4
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c12
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c17
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c7
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c7
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-regs.h24
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v6.h1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c17
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c24
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c8
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c6
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c2
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c11
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c6
-rw-r--r--drivers/media/platform/vivi.c54
-rw-r--r--drivers/media/platform/vsp1/vsp1.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c2
-rw-r--r--drivers/media/radio/radio-cadet.c46
-rw-r--r--drivers/media/radio/radio-keene.c19
-rw-r--r--drivers/media/radio/si4713/Kconfig6
-rw-r--r--drivers/media/radio/si4713/radio-usb-si4713.c4
-rw-r--r--drivers/media/rc/Kconfig11
-rw-r--r--drivers/media/rc/Makefile2
-rw-r--r--drivers/media/rc/ati_remote.c2
-rw-r--r--drivers/media/rc/ene_ir.c2
-rw-r--r--drivers/media/rc/fintek-cir.c2
-rw-r--r--drivers/media/rc/gpio-ir-recv.c4
-rw-r--r--drivers/media/rc/iguanair.c31
-rw-r--r--drivers/media/rc/img-ir/Kconfig61
-rw-r--r--drivers/media/rc/img-ir/Makefile11
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c176
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c1053
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.h269
-rw-r--r--drivers/media/rc/img-ir/img-ir-jvc.c81
-rw-r--r--drivers/media/rc/img-ir/img-ir-nec.c148
-rw-r--r--drivers/media/rc/img-ir/img-ir-raw.c151
-rw-r--r--drivers/media/rc/img-ir/img-ir-raw.h60
-rw-r--r--drivers/media/rc/img-ir/img-ir-sanyo.c122
-rw-r--r--drivers/media/rc/img-ir/img-ir-sharp.c99
-rw-r--r--drivers/media/rc/img-ir/img-ir-sony.c145
-rw-r--r--drivers/media/rc/img-ir/img-ir.h166
-rw-r--r--drivers/media/rc/imon.c7
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c2
-rw-r--r--drivers/media/rc/ir-lirc-codec.c2
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c2
-rw-r--r--drivers/media/rc/ir-nec-decoder.c11
-rw-r--r--drivers/media/rc/ir-raw.c5
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c10
-rw-r--r--drivers/media/rc/ir-rc5-sz-decoder.c4
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c6
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c6
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c200
-rw-r--r--drivers/media/rc/ir-sony-decoder.c10
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c4
-rw-r--r--drivers/media/rc/keymaps/rc-apac-viewcomp.c4
-rw-r--r--drivers/media/rc/keymaps/rc-asus-pc39.c4
-rw-r--r--drivers/media/rc/keymaps/rc-asus-ps3-100.c4
-rw-r--r--drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c4
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-a16d.c4
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-cardbus.c4
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-dvbt.c4
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m135a.c4
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c2
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia.c4
-rw-r--r--drivers/media/rc/keymaps/rc-avertv-303.c4
-rw-r--r--drivers/media/rc/keymaps/rc-behold-columbus.c4
-rw-r--r--drivers/media/rc/keymaps/rc-behold.c4
-rw-r--r--drivers/media/rc/keymaps/rc-budget-ci-old.c4
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy-1400.c4
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy.c4
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-nec.c4
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-rc5.c4
-rw-r--r--drivers/media/rc/keymaps/rc-dm1105-nec.c4
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c4
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c4
-rw-r--r--drivers/media/rc/keymaps/rc-em-terratec.c4
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv-fm53.c4
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv.c4
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv2.c4
-rw-r--r--drivers/media/rc/keymaps/rc-evga-indtube.c4
-rw-r--r--drivers/media/rc/keymaps/rc-eztv.c4
-rw-r--r--drivers/media/rc/keymaps/rc-flydvb.c4
-rw-r--r--drivers/media/rc/keymaps/rc-flyvideo.c4
-rw-r--r--drivers/media/rc/keymaps/rc-fusionhdtv-mce.c4
-rw-r--r--drivers/media/rc/keymaps/rc-gadmei-rm008z.c4
-rw-r--r--drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c4
-rw-r--r--drivers/media/rc/keymaps/rc-gotview7135.c4
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c4
-rw-r--r--drivers/media/rc/keymaps/rc-iodata-bctv7e.c4
-rw-r--r--drivers/media/rc/keymaps/rc-kaiomy.c4
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-315u.c4
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-pc150u.c2
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c4
-rw-r--r--drivers/media/rc/keymaps/rc-manli.c4
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c4
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere.c4
-rw-r--r--drivers/media/rc/keymaps/rc-nebula.c4
-rw-r--r--drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c6
-rw-r--r--drivers/media/rc/keymaps/rc-norwood.c4
-rw-r--r--drivers/media/rc/keymaps/rc-npgtech.c4
-rw-r--r--drivers/media/rc/keymaps/rc-pctv-sedna.c4
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-color.c4
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-grey.c4
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c4
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-002t.c4
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-mk12.c4
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-new.c4
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview.c4
-rw-r--r--drivers/media/rc/keymaps/rc-powercolor-real-angel.c4
-rw-r--r--drivers/media/rc/keymaps/rc-proteus-2309.c4
-rw-r--r--drivers/media/rc/keymaps/rc-purpletv.c4
-rw-r--r--drivers/media/rc/keymaps/rc-pv951.c4
-rw-r--r--drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c4
-rw-r--r--drivers/media/rc/keymaps/rc-tbs-nec.c4
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c4
-rw-r--r--drivers/media/rc/keymaps/rc-tevii-nec.c4
-rw-r--r--drivers/media/rc/keymaps/rc-tivo.c86
-rw-r--r--drivers/media/rc/keymaps/rc-tt-1500.c4
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-s350.c4
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-tv-pvr.c4
-rw-r--r--drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c4
-rw-r--r--drivers/media/rc/keymaps/rc-winfast.c4
-rw-r--r--drivers/media/rc/mceusb.c184
-rw-r--r--drivers/media/rc/nuvoton-cir.c12
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-core-priv.h15
-rw-r--r--drivers/media/rc/rc-loopback.c2
-rw-r--r--drivers/media/rc/rc-main.c253
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/rc/st_rc.c2
-rw-r--r--drivers/media/rc/streamzap.c2
-rw-r--r--drivers/media/rc/ttusbir.c2
-rw-r--r--drivers/media/rc/winbond-cir.c2
-rw-r--r--drivers/media/tuners/Kconfig1
-rw-r--r--drivers/media/tuners/e4000.c608
-rw-r--r--drivers/media/tuners/e4000.h21
-rw-r--r--drivers/media/tuners/e4000_priv.h88
-rw-r--r--drivers/media/tuners/mt2063.c4
-rw-r--r--drivers/media/tuners/r820t.c4
-rw-r--r--drivers/media/tuners/tda18212.c12
-rw-r--r--drivers/media/tuners/tda18212.h2
-rw-r--r--drivers/media/tuners/tuner-xc2028.c3
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c23
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig9
-rw-r--r--drivers/media/usb/dvb-usb-v2/Makefile4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c102
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/it913x.c828
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c123
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.h2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-remote.c2
-rw-r--r--drivers/media/usb/em28xx/Kconfig2
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c108
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c129
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c54
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c193
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c41
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c55
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c56
-rw-r--r--drivers/media/usb/em28xx/em28xx.h15
-rw-r--r--drivers/media/usb/gspca/kinect.c7
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c1
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c2
-rw-r--r--drivers/media/usb/gspca/topro.c10
-rw-r--r--drivers/media/usb/pwc/pwc-if.c19
-rw-r--r--drivers/media/usb/s2255/Kconfig2
-rw-r--r--drivers/media/usb/s2255/s2255drv.c1271
-rw-r--r--drivers/media/usb/siano/smsusb.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-ac97.c6
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c2
-rw-r--r--drivers/media/usb/tlg2300/pd-alsa.c3
-rw-r--r--drivers/media/usb/tm6000/tm6000-alsa.c8
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-stds.c2
-rw-r--r--drivers/media/usb/usbtv/Makefile3
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c134
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c (renamed from drivers/media/usb/usbtv/usbtv.c)171
-rw-r--r--drivers/media/usb/usbtv/usbtv.h99
-rw-r--r--drivers/media/usb/usbvision/usbvision.h8
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c24
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c29
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c11
-rw-r--r--drivers/media/usb/uvc/uvc_video.c23
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h16
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c135
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c33
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c32
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c8
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c91
-rw-r--r--drivers/media/v4l2-core/v4l2-of.c133
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c18
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c658
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c2
-rw-r--r--drivers/memory/Kconfig15
-rw-r--r--drivers/memory/Makefile2
-rw-r--r--drivers/memory/fsl_ifc.c309
-rw-r--r--drivers/memory/ti-aemif.c427
-rw-r--r--drivers/message/i2o/iop.c85
-rw-r--r--drivers/mfd/88pm800.c3
-rw-r--r--drivers/mfd/88pm860x-core.c6
-rw-r--r--drivers/mfd/Kconfig98
-rw-r--r--drivers/mfd/Makefile6
-rw-r--r--drivers/mfd/ab8500-core.c27
-rw-r--r--drivers/mfd/adp5520.c1
-rw-r--r--drivers/mfd/arizona-core.c4
-rw-r--r--drivers/mfd/as3722.c1
-rw-r--r--drivers/mfd/bcm590xx.c93
-rw-r--r--drivers/mfd/cs5535-mfd.c1
-rw-r--r--drivers/mfd/da9052-core.c3
-rw-r--r--drivers/mfd/da9052-i2c.c5
-rw-r--r--drivers/mfd/da9052-spi.c1
-rw-r--r--drivers/mfd/da9055-i2c.c8
-rw-r--r--drivers/mfd/da9063-core.c25
-rw-r--r--drivers/mfd/db8500-prcmu.c34
-rw-r--r--drivers/mfd/janz-cmodio.c1
-rw-r--r--drivers/mfd/kempld-core.c31
-rw-r--r--drivers/mfd/lpc_ich.c151
-rw-r--r--drivers/mfd/lpc_sch.c1
-rw-r--r--drivers/mfd/max14577.c6
-rw-r--r--drivers/mfd/max77686.c4
-rw-r--r--drivers/mfd/max77693.c12
-rw-r--r--drivers/mfd/max8925-i2c.c9
-rw-r--r--drivers/mfd/max8997.c18
-rw-r--r--drivers/mfd/max8998.c4
-rw-r--r--drivers/mfd/mc13xxx-spi.c5
-rw-r--r--drivers/mfd/mcp-sa11x0.c1
-rw-r--r--drivers/mfd/omap-usb-host.c189
-rw-r--r--drivers/mfd/omap-usb-tll.c2
-rw-r--r--drivers/mfd/pcf50633-adc.c1
-rw-r--r--drivers/mfd/pm8921-core.c419
-rw-r--r--drivers/mfd/pm8xxx-irq.c371
-rw-r--r--drivers/mfd/rc5t583-irq.c1
-rw-r--r--drivers/mfd/rdc321x-southbridge.c1
-rw-r--r--drivers/mfd/retu-mfd.c1
-rw-r--r--drivers/mfd/rtsx_pcr.c132
-rw-r--r--drivers/mfd/rtsx_usb.c760
-rw-r--r--drivers/mfd/sec-core.c129
-rw-r--r--drivers/mfd/sec-irq.c97
-rw-r--r--drivers/mfd/smsc-ece1099.c1
-rw-r--r--drivers/mfd/stmpe.c2
-rw-r--r--drivers/mfd/stw481x.c8
-rw-r--r--drivers/mfd/syscon.c9
-rw-r--r--drivers/mfd/tc3589x.c84
-rw-r--r--drivers/mfd/ti-ssp.c465
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c24
-rw-r--r--drivers/mfd/timberdale.c2
-rw-r--r--drivers/mfd/tps65218.c282
-rw-r--r--drivers/mfd/tps65910.c5
-rw-r--r--drivers/mfd/tps65912-core.c1
-rw-r--r--drivers/mfd/tps65912-irq.c1
-rw-r--r--drivers/mfd/twl-core.c10
-rw-r--r--drivers/mfd/twl4030-irq.c1
-rw-r--r--drivers/mfd/twl6030-irq.c1
-rw-r--r--drivers/mfd/twl6040.c6
-rw-r--r--drivers/mfd/ucb1x00-core.c4
-rw-r--r--drivers/mfd/vexpress-config.c3
-rw-r--r--drivers/mfd/vexpress-sysreg.c2
-rw-r--r--drivers/mfd/wm5102-tables.c57
-rw-r--r--drivers/mfd/wm5110-tables.c174
-rw-r--r--drivers/mfd/wm8350-core.c1
-rw-r--r--drivers/mfd/wm8350-irq.c1
-rw-r--r--drivers/mfd/wm8400-core.c22
-rw-r--r--drivers/misc/Kconfig3
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ad525x_dpot.c1
-rw-r--r--drivers/misc/apds9802als.c1
-rw-r--r--drivers/misc/atmel-ssc.c6
-rw-r--r--drivers/misc/bmp085.c1
-rw-r--r--drivers/misc/carma/carma-fpga.c1
-rw-r--r--drivers/misc/ds1682.c1
-rw-r--r--drivers/misc/echo/Kconfig (renamed from drivers/staging/echo/Kconfig)0
-rw-r--r--drivers/misc/echo/Makefile (renamed from drivers/staging/echo/Makefile)0
-rw-r--r--drivers/misc/echo/echo.c (renamed from drivers/staging/echo/echo.c)0
-rw-r--r--drivers/misc/echo/echo.h (renamed from drivers/staging/echo/echo.h)0
-rw-r--r--drivers/misc/echo/fir.h (renamed from drivers/staging/echo/fir.h)0
-rw-r--r--drivers/misc/echo/oslec.h (renamed from drivers/staging/echo/oslec.h)0
-rw-r--r--drivers/misc/eeprom/at25.c1
-rw-r--r--drivers/misc/eeprom/eeprom.c1
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c1
-rw-r--r--drivers/misc/eeprom/max6875.c1
-rw-r--r--drivers/misc/eeprom/sunxi_sid.c3
-rw-r--r--drivers/misc/genwqe/card_debugfs.c1
-rw-r--r--drivers/misc/hmc6352.c1
-rw-r--r--drivers/misc/isl29003.c1
-rw-r--r--drivers/misc/isl29020.c1
-rw-r--r--drivers/misc/lattice-ecp3-config.c1
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c1
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c1
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c1
-rw-r--r--drivers/misc/lkdtm.c74
-rw-r--r--drivers/misc/mei/Kconfig9
-rw-r--r--drivers/misc/mei/Makefile6
-rw-r--r--drivers/misc/mei/amthif.c72
-rw-r--r--drivers/misc/mei/bus.c21
-rw-r--r--drivers/misc/mei/client.c300
-rw-r--r--drivers/misc/mei/client.h30
-rw-r--r--drivers/misc/mei/debugfs.c54
-rw-r--r--drivers/misc/mei/hbm.c281
-rw-r--r--drivers/misc/mei/hbm.h1
-rw-r--r--drivers/misc/mei/hw-me.c30
-rw-r--r--drivers/misc/mei/hw-me.h1
-rw-r--r--drivers/misc/mei/hw-txe-regs.h294
-rw-r--r--drivers/misc/mei/hw-txe.c1107
-rw-r--r--drivers/misc/mei/hw-txe.h74
-rw-r--r--drivers/misc/mei/hw.h20
-rw-r--r--drivers/misc/mei/init.c43
-rw-r--r--drivers/misc/mei/interrupt.c159
-rw-r--r--drivers/misc/mei/main.c23
-rw-r--r--drivers/misc/mei/mei_dev.h54
-rw-r--r--drivers/misc/mei/nfc.c14
-rw-r--r--drivers/misc/mei/pci-me.c14
-rw-r--r--drivers/misc/mei/pci-txe.c293
-rw-r--r--drivers/misc/mei/wd.c136
-rw-r--r--drivers/misc/mic/Kconfig2
-rw-r--r--drivers/misc/mic/card/mic_device.h1
-rw-r--r--drivers/misc/mic/host/mic_device.h1
-rw-r--r--drivers/misc/mic/host/mic_intr.c2
-rw-r--r--drivers/misc/pch_phub.c5
-rw-r--r--drivers/misc/sgi-gru/grukdump.c6
-rw-r--r--drivers/misc/sram.c127
-rw-r--r--drivers/misc/ti-st/st_core.c1
-rw-r--r--drivers/misc/ti_dac7512.c1
-rw-r--r--drivers/misc/tsl2550.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c7
-rw-r--r--drivers/mmc/card/block.c181
-rw-r--r--drivers/mmc/core/Kconfig15
-rw-r--r--drivers/mmc/core/bus.c12
-rw-r--r--drivers/mmc/core/core.c87
-rw-r--r--drivers/mmc/core/host.c20
-rw-r--r--drivers/mmc/core/mmc.c65
-rw-r--r--drivers/mmc/core/mmc_ops.c64
-rw-r--r--drivers/mmc/core/sd.c23
-rw-r--r--drivers/mmc/core/slot-gpio.c180
-rw-r--r--drivers/mmc/host/Kconfig23
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/davinci_mmc.c4
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c2
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c12
-rw-r--r--drivers/mmc/host/dw_mmc-socfpga.c138
-rw-r--r--drivers/mmc/host/dw_mmc.c4
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mmc/host/mmci.c54
-rw-r--r--drivers/mmc/host/mmci.h11
-rw-r--r--drivers/mmc/host/omap.c93
-rw-r--r--drivers/mmc/host/omap_hsmmc.c242
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c529
-rw-r--r--drivers/mmc/host/sdhci-acpi.c80
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c39
-rw-r--r--drivers/mmc/host/sdhci-dove.c2
-rw-r--r--drivers/mmc/host/sdhci-msm.c618
-rw-r--r--drivers/mmc/host/sdhci-pci.c20
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c68
-rw-r--r--drivers/mmc/host/sdhci-s3c.c170
-rw-r--r--drivers/mmc/host/sdhci-spear.c199
-rw-r--r--drivers/mmc/host/sdhci.c24
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c50
-rw-r--r--drivers/mmc/host/tmio_mmc.c30
-rw-r--r--drivers/mmc/host/tmio_mmc.h7
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c7
-rw-r--r--drivers/mmc/host/ushc.c2
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c4
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/bcm47xxpart.c11
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c40
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c9
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c3
-rw-r--r--drivers/mtd/chips/cfi_probe.c4
-rw-r--r--drivers/mtd/chips/cfi_util.c4
-rw-r--r--drivers/mtd/chips/gen_probe.c2
-rw-r--r--drivers/mtd/devices/Kconfig8
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/block2mtd.c19
-rw-r--r--drivers/mtd/devices/elm.c49
-rw-r--r--drivers/mtd/devices/m25p80.c35
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c24
-rw-r--r--drivers/mtd/devices/phram.c41
-rw-r--r--drivers/mtd/devices/pmc551.c7
-rw-r--r--drivers/mtd/devices/serial_flash_cmds.h81
-rw-r--r--drivers/mtd/devices/spear_smi.c2
-rw-r--r--drivers/mtd/devices/sst25l.c1
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c2108
-rw-r--r--drivers/mtd/inftlmount.c1
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c4
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c5
-rw-r--r--drivers/mtd/maps/Kconfig10
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c1
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c1
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c1
-rw-r--r--drivers/mtd/maps/ixp4xx.c1
-rw-r--r--drivers/mtd/maps/lantiq-flash.c1
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c1
-rw-r--r--drivers/mtd/maps/pci.c1
-rw-r--r--drivers/mtd/maps/physmap_of.c1
-rw-r--r--drivers/mtd/maps/plat-ram.c2
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c1
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c1
-rw-r--r--drivers/mtd/maps/scb2_flash.c1
-rw-r--r--drivers/mtd/maps/sun_uflash.c1
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/mtdchar.c20
-rw-r--r--drivers/mtd/mtdcore.c24
-rw-r--r--drivers/mtd/mtdpart.c14
-rw-r--r--drivers/mtd/nand/Kconfig3
-rw-r--r--drivers/mtd/nand/ams-delta.c1
-rw-r--r--drivers/mtd/nand/atmel_nand.c14
-rw-r--r--drivers/mtd/nand/au1550nd.c4
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c1
-rw-r--r--drivers/mtd/nand/cafe_nand.c68
-rw-r--r--drivers/mtd/nand/davinci_nand.c23
-rw-r--r--drivers/mtd/nand/denali_dt.c39
-rw-r--r--drivers/mtd/nand/diskonchip.c5
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c1
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c3
-rw-r--r--drivers/mtd/nand/gpio.c1
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c102
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c1
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/nand_base.c165
-rw-r--r--drivers/mtd/nand/nand_ids.c3
-rw-r--r--drivers/mtd/nand/nuc900_nand.c6
-rw-r--r--drivers/mtd/nand/omap2.c512
-rw-r--r--drivers/mtd/nand/pasemi_nand.c1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c3
-rw-r--r--drivers/mtd/nand/s3c2410.c1
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/onenand/generic.c1
-rw-r--r--drivers/mtd/onenand/omap2.c1
-rw-r--r--drivers/mtd/onenand/onenand_base.c38
-rw-r--r--drivers/mtd/onenand/samsung.c4
-rw-r--r--drivers/mtd/rfd_ftl.c9
-rw-r--r--drivers/mtd/sm_ftl.c11
-rw-r--r--drivers/mtd/tests/mtd_test.c1
-rw-r--r--drivers/mtd/ubi/Kconfig16
-rw-r--r--drivers/mtd/ubi/Makefile1
-rw-r--r--drivers/mtd/ubi/block.c647
-rw-r--r--drivers/mtd/ubi/build.c11
-rw-r--r--drivers/mtd/ubi/cdev.c20
-rw-r--r--drivers/mtd/ubi/ubi.h21
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/bonding/bond_3ad.c74
-rw-r--r--drivers/net/bonding/bond_3ad.h175
-rw-r--r--drivers/net/bonding/bond_alb.c138
-rw-r--r--drivers/net/bonding/bond_debugfs.c10
-rw-r--r--drivers/net/bonding/bond_main.c468
-rw-r--r--drivers/net/bonding/bond_netlink.c8
-rw-r--r--drivers/net/bonding/bond_options.c328
-rw-r--r--drivers/net/bonding/bond_options.h62
-rw-r--r--drivers/net/bonding/bond_procfs.c14
-rw-r--r--drivers/net/bonding/bond_sysfs.c24
-rw-r--r--drivers/net/bonding/bonding.h43
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/caif/caif_spi.c1
-rw-r--r--drivers/net/can/at91_can.c10
-rw-r--r--drivers/net/can/bfin_can.c1
-rw-r--r--drivers/net/can/c_can/c_can.c349
-rw-r--r--drivers/net/can/c_can/c_can.h29
-rw-r--r--drivers/net/can/c_can/c_can_platform.c47
-rw-r--r--drivers/net/can/cc770/cc770.c1
-rw-r--r--drivers/net/can/dev.c183
-rw-r--r--drivers/net/can/flexcan.c11
-rw-r--r--drivers/net/can/grcan.c1
-rw-r--r--drivers/net/can/janz-ican3.c65
-rw-r--r--drivers/net/can/mcp251x.c22
-rw-r--r--drivers/net/can/mscan/mscan.c7
-rw-r--r--drivers/net/can/pch_can.c1
-rw-r--r--drivers/net/can/sja1000/Kconfig15
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/ems_pcmcia.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c1
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c1
-rw-r--r--drivers/net/can/sja1000/sja1000.c10
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c220
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c194
-rw-r--r--drivers/net/can/slcan.c6
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/usb_8dev.c3
-rw-r--r--drivers/net/dummy.c12
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c1127
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/lib8390.c2
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c9
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/altera/Kconfig8
-rw-r--r--drivers/net/ethernet/altera/Makefile7
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c202
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.h34
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h167
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c509
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.h35
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h124
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h486
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c235
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1543
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c44
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h27
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c3
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c124
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c34
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c14
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c15
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c10
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c162
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h49
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c385
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c155
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c1876
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h368
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c619
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h22
-rw-r--r--drivers/net/ethernet/broadcom/genet/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2583
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h628
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c464
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c30
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c65
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c3
-rw-r--r--drivers/net/ethernet/cadence/macb.c8
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c230
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c172
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c152
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c6
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c24
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/dnet.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig8
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h27
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c213
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h97
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c30
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c467
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h5
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c6
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c19
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1327
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h114
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c165
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c340
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile7
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h55
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h52
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c64
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h55
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c427
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h72
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c373
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c53
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c53
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c370
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c87
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c476
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c546
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c117
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c434
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c135
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c369
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c90
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h16
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h48
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c299
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c27
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h12
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h75
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c25
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h14
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c76
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h36
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h18
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c76
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c170
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c64
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c10
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c360
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c200
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c212
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c141
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c161
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h33
-rw-r--r--drivers/net/ethernet/jme.c16
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c20
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c179
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c267
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c289
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c85
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c40
-rw-r--r--drivers/net/ethernet/neterion/s2io.c14
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c35
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c57
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c11
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h122
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c91
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c23
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c179
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c85
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c233
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c102
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c25
-rw-r--r--drivers/net/ethernet/rdc/r6040.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c7
-rw-r--r--drivers/net/ethernet/realtek/8139too.c12
-rw-r--r--drivers/net/ethernet/realtek/r8169.c14
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c271
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/samsung/Kconfig32
-rw-r--r--drivers/net/ethernet/samsung/Makefile5
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/Makefile4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h535
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c262
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c515
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h298
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c381
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h50
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c524
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2324
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c244
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c254
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h104
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c259
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h488
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c91
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h38
-rw-r--r--drivers/net/ethernet/sfc/ef10.c27
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h61
-rw-r--r--drivers/net/ethernet/sfc/efx.c33
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c39
-rw-r--r--drivers/net/ethernet/sfc/falcon.c6
-rw-r--r--drivers/net/ethernet/sfc/farch.c5
-rw-r--r--drivers/net/ethernet/sfc/filter.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c14
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h1
-rw-r--r--drivers/net/ethernet/sfc/nic.c1
-rw-r--r--drivers/net/ethernet/sfc/ptp.c93
-rw-r--r--drivers/net/ethernet/sfc/selftest.c6
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c13
-rw-r--r--drivers/net/ethernet/sfc/tx.c21
-rw-r--r--drivers/net/ethernet/silan/sc92031.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c130
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c1
-rw-r--r--drivers/net/ethernet/sun/niu.c11
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c34
-rw-r--r--drivers/net/ethernet/ti/cpts.c13
-rw-r--r--drivers/net/ethernet/tile/tilegx.c4
-rw-r--r--drivers/net/ethernet/tile/tilepro.c11
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c16
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c9
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c9
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c15
-rw-r--r--drivers/net/ethernet/xscale/Kconfig1
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c11
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h207
-rw-r--r--drivers/net/hyperv/netvsc.c93
-rw-r--r--drivers/net/hyperv/netvsc_drv.c337
-rw-r--r--drivers/net/hyperv/rndis_filter.c187
-rw-r--r--drivers/net/ieee802154/Kconfig34
-rw-r--r--drivers/net/ieee802154/at86rf230.c520
-rw-r--r--drivers/net/ieee802154/fakehard.c22
-rw-r--r--drivers/net/ieee802154/mrf24j40.c17
-rw-r--r--drivers/net/ifb.c8
-rw-r--r--drivers/net/loopback.c16
-rw-r--r--drivers/net/macvlan.c13
-rw-r--r--drivers/net/nlmon.c18
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/at803x.c30
-rw-r--r--drivers/net/phy/bcm7xxx.c359
-rw-r--r--drivers/net/phy/broadcom.c52
-rw-r--r--drivers/net/phy/dp83640.c94
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/mdio_bus.c20
-rw-r--r--drivers/net/phy/micrel.c49
-rw-r--r--drivers/net/phy/phy.c51
-rw-r--r--drivers/net/phy/phy_device.c57
-rw-r--r--drivers/net/phy/spi_ks8995.c52
-rw-r--r--drivers/net/ppp/ppp_generic.c60
-rw-r--r--drivers/net/rionet.c1
-rw-r--r--drivers/net/team/team.c28
-rw-r--r--drivers/net/team/team_mode_loadbalance.c4
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_ncm.c17
-rw-r--r--drivers/net/usb/lg-vl600.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c1124
-rw-r--r--drivers/net/veth.c23
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c112
-rw-r--r--drivers/net/vxlan.c19
-rw-r--r--drivers/net/wimax/i2400m/netdev.c3
-rw-r--r--drivers/net/wireless/Kconfig11
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/airo.c17
-rw-r--r--drivers/net/wireless/ath/ath.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c36
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h81
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h25
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h18
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c269
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c205
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c850
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c536
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h28
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c132
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h34
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c27
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c85
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c235
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h35
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c203
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c180
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.h26
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c244
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c244
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h35
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c86
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c260
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c240
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c54
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c179
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c247
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c1495
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h248
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c267
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/regd.c10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c72
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c64
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h10
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c241
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c181
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c33
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c234
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c37
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c177
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c334
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h164
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c136
-rw-r--r--drivers/net/wireless/atmel.c8
-rw-r--r--drivers/net/wireless/b43/Kconfig2
-rw-r--r--drivers/net/wireless/b43/debugfs.h2
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43/main.h35
-rw-r--r--drivers/net/wireless/b43/phy_common.c4
-rw-r--r--drivers/net/wireless/b43/pio.c10
-rw-r--r--drivers/net/wireless/b43/sysfs.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c14
-rw-r--r--drivers/net/wireless/b43legacy/main.c4
-rw-r--r--drivers/net/wireless/b43legacy/sysfs.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c107
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c1034
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.h91
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c982
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c972
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h231
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h91
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c283
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c20
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h3
-rw-r--r--drivers/net/wireless/cw1200/fwio.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c13
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c15
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h3
-rw-r--r--drivers/net/wireless/iwlegacy/common.c83
-rw-r--r--drivers/net/wireless/iwlegacy/common.h17
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c23
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c132
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h47
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c270
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h69
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h40
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c (renamed from drivers/net/wireless/iwlwifi/mvm/bt-coex.c)474
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c226
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c119
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c485
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h (renamed from drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h)21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h134
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h106
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c79
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c78
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c601
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h259
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c87
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/offloading.c215
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c481
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c426
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power_legacy.c319
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c100
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c210
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c255
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c213
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h62
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c43
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c138
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c85
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c52
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c410
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c73
-rw-r--r--drivers/net/wireless/libertas/cfg.c3
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c113
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h6
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c192
-rw-r--r--drivers/net/wireless/mwifiex/11ac.h2
-rw-r--r--drivers/net/wireless/mwifiex/11h.c4
-rw-r--r--drivers/net/wireless/mwifiex/11n.c106
-rw-r--r--drivers/net/wireless/mwifiex/11n.h58
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c203
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h3
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/README2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c403
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c205
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c161
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c8
-rw-r--r--drivers/net/wireless/mwifiex/decl.h23
-rw-r--r--drivers/net/wireless/mwifiex/fw.h202
-rw-r--r--drivers/net/wireless/mwifiex/ie.c6
-rw-r--r--drivers/net/wireless/mwifiex/init.c8
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h25
-rw-r--r--drivers/net/wireless/mwifiex/join.c66
-rw-r--r--drivers/net/wireless/mwifiex/main.c10
-rw-r--r--drivers/net/wireless/mwifiex/main.h112
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c180
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h5
-rw-r--r--drivers/net/wireless/mwifiex/scan.c615
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c11
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c471
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c130
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c52
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c164
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c34
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c3
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c1044
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c24
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c130
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c22
-rw-r--r--drivers/net/wireless/mwifiex/usb.c11
-rw-r--r--drivers/net/wireless/mwifiex/util.c118
-rw-r--r--drivers/net/wireless/mwifiex/util.h20
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c131
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h18
-rw-r--r--drivers/net/wireless/mwl8k.c197
-rw-r--r--drivers/net/wireless/orinoco/cfg.c5
-rw-r--r--drivers/net/wireless/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/orinoco/scan.c5
-rw-r--r--drivers/net/wireless/orinoco/wext.c2
-rw-r--r--drivers/net/wireless/p54/p54usb.c6
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c7
-rw-r--r--drivers/net/wireless/rsi/Kconfig30
-rw-r--r--drivers/net/wireless/rsi/Makefile12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c342
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c339
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1008
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c295
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c1304
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_pkt.c196
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c850
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c566
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c575
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c177
-rw-r--r--drivers/net/wireless/rsi/rsi_boot_params.h126
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h87
-rw-r--r--drivers/net/wireless/rsi/rsi_debugfs.h48
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h218
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h285
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h129
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h68
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rtl818x/Kconfig4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/Makefile2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c1009
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8180.h76
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.c23
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c475
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h61
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c20
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h269
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig27
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile3
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/Makefile7
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h75
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c3698
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h173
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c1011
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h559
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c218
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h52
-rw-r--r--drivers/net/wireless/rtlwifi/core.c124
-rw-r--r--drivers/net/wireless/rtlwifi/core.h4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c131
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h14
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c119
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h60
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/Makefile1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/dm.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/fw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c128
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.c63
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/reg.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.h8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c34
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c71
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c31
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c71
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c50
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c429
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/reg.h14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c48
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c87
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/reg.h12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.h8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/Makefile1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/def.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.c47
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.c260
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.h18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c127
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c530
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.h21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/reg.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/Makefile19
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/def.h248
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/dm.c1325
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/dm.h310
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/fw.c620
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/fw.h248
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.c2523
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.h64
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/led.c153
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/led.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/phy.c2156
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/phy.h217
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c106
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h304
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c140
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h95
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/reg.h2277
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/rf.c504
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/rf.h43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c384
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/table.c572
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/table.h43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.c960
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.h617
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/Makefile9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c65
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h33
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c329
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h126
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/main.c33
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c434
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h89
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c4
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h518
-rw-r--r--drivers/net/wireless/ti/wilink_platform_data.c37
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c31
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c71
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c67
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h53
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c85
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c4
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h62
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c24
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h9
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h9
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h8
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c200
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c19
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c45
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h27
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h86
-rw-r--r--drivers/net/wireless/wl3501_cs.c6
-rw-r--r--drivers/net/wireless/zd1201.c9
-rw-r--r--drivers/net/xen-netback/common.h113
-rw-r--r--drivers/net/xen-netback/interface.c141
-rw-r--r--drivers/net/xen-netback/netback.c871
-rw-r--r--drivers/net/xen-netfront.c14
-rw-r--r--drivers/nfc/Kconfig12
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/pn533.c28
-rw-r--r--drivers/nfc/pn544/i2c.c194
-rw-r--r--drivers/nfc/pn544/pn544.c2
-rw-r--r--drivers/nfc/pn544/pn544.h3
-rw-r--r--drivers/nfc/port100.c25
-rw-r--r--drivers/nfc/trf7970a.c1370
-rw-r--r--drivers/of/Kconfig18
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/address.c8
-rw-r--r--drivers/of/base.c458
-rw-r--r--drivers/of/fdt.c145
-rw-r--r--drivers/of/of_mdio.c23
-rw-r--r--drivers/of/of_mtd.c34
-rw-r--r--drivers/of/of_net.c33
-rw-r--r--drivers/of/of_reserved_mem.c217
-rw-r--r--drivers/of/pdt.c3
-rw-r--r--drivers/of/selftest.c62
-rw-r--r--drivers/of/testcase-data/tests-phandle.dtsi3
-rw-r--r--drivers/oprofile/nmi_timer_int.c23
-rw-r--r--drivers/parport/share.c3
-rw-r--r--drivers/pci/Makefile23
-rw-r--r--drivers/pci/bus.c6
-rw-r--r--drivers/pci/host-bridge.c8
-rw-r--r--drivers/pci/host/Kconfig2
-rw-r--r--drivers/pci/host/pci-imx6.c47
-rw-r--r--drivers/pci/host/pci-mvebu.c26
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c180
-rw-r--r--drivers/pci/host/pcie-designware.c6
-rw-r--r--drivers/pci/hotplug/acpiphp.h16
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c529
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c4
-rw-r--r--drivers/pci/hotplug/pciehp.h5
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c8
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c173
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c75
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c2
-rw-r--r--drivers/pci/iov.c119
-rw-r--r--drivers/pci/pci-driver.c33
-rw-r--r--drivers/pci/pci-sysfs.c17
-rw-r--r--drivers/pci/pci.c118
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/probe.c93
-rw-r--r--drivers/pci/quirks.c190
-rw-r--r--drivers/pci/rom.c2
-rw-r--r--drivers/pci/search.c10
-rw-r--r--drivers/pci/setup-res.c37
-rw-r--r--drivers/pci/slot.c6
-rw-r--r--drivers/pcmcia/sa11xx_base.c3
-rw-r--r--drivers/pcmcia/yenta_socket.c18
-rw-r--r--drivers/phy/Kconfig107
-rw-r--r--drivers/phy/Makefile9
-rw-r--r--drivers/phy/phy-bcm-kona-usb2.c4
-rw-r--r--drivers/phy/phy-core.c76
-rw-r--r--drivers/phy/phy-exynos4210-usb2.c261
-rw-r--r--drivers/phy/phy-exynos4x12-usb2.c328
-rw-r--r--drivers/phy/phy-exynos5250-sata.c251
-rw-r--r--drivers/phy/phy-exynos5250-usb2.c404
-rw-r--r--drivers/phy/phy-omap-control.c (renamed from drivers/usb/phy/phy-omap-control.c)169
-rw-r--r--drivers/phy/phy-omap-usb2.c131
-rw-r--r--drivers/phy/phy-samsung-usb2.c228
-rw-r--r--drivers/phy/phy-samsung-usb2.h67
-rw-r--r--drivers/phy/phy-sun4i-usb.c331
-rw-r--r--drivers/phy/phy-ti-pipe3.c470
-rw-r--r--drivers/phy/phy-twl4030-usb.c6
-rw-r--r--drivers/phy/phy-xgene.c1750
-rw-r--r--drivers/pinctrl/Kconfig6
-rw-r--r--drivers/pinctrl/devicetree.c4
-rw-r--r--drivers/pinctrl/mvebu/Kconfig9
-rw-r--r--drivers/pinctrl/mvebu/Makefile2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-370.c20
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-375.c459
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-38x.c462
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c24
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c404
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c25
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c122
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.h55
-rw-r--r--drivers/pinctrl/pinctrl-adi2-bf54x.c138
-rw-r--r--drivers/pinctrl/pinctrl-adi2-bf60x.c128
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c32
-rw-r--r--drivers/pinctrl/pinctrl-adi2.h8
-rw-r--r--drivers/pinctrl/pinctrl-at91.c39
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c56
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c186
-rw-r--r--drivers/pinctrl/pinctrl-exynos.c82
-rw-r--r--drivers/pinctrl/pinctrl-imx.c2
-rw-r--r--drivers/pinctrl/pinctrl-msm.c124
-rw-r--r--drivers/pinctrl/pinctrl-msm.h5
-rw-r--r--drivers/pinctrl/pinctrl-msm8x74.c14
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c178
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c2
-rw-r--r--drivers/pinctrl/pinctrl-samsung.h1
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/pinctrl-st.c462
-rw-r--r--drivers/pinctrl/pinctrl-sunxi-pins.h12
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c38
-rw-r--r--drivers/pinctrl/pinctrl-tegra.h4
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c1100
-rw-r--r--drivers/pinctrl/pinctrl-tegra124.c1243
-rw-r--r--drivers/pinctrl/pinctrl-tegra20.c640
-rw-r--r--drivers/pinctrl/pinctrl-tegra30.c1287
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c171
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c596
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas6.c46
-rw-r--r--drivers/pinctrl/sirf/pinctrl-prima2.c3
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c17
-rw-r--r--drivers/platform/x86/Kconfig24
-rw-r--r--drivers/platform/x86/Makefile2
-rw-r--r--drivers/platform/x86/alienware-wmi.c565
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c1
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c65
-rw-r--r--drivers/platform/x86/intel_baytrail.c224
-rw-r--r--drivers/platform/x86/intel_baytrail.h90
-rw-r--r--drivers/platform/x86/panasonic-laptop.c11
-rw-r--r--drivers/platform/x86/sony-laptop.c539
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c233
-rw-r--r--drivers/platform/x86/toshiba_acpi.c635
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c9
-rw-r--r--drivers/pnp/resource.c2
-rw-r--r--drivers/power/reset/Kconfig2
-rw-r--r--drivers/power/reset/qnap-poweroff.c49
-rw-r--r--drivers/powercap/intel_rapl.c27
-rw-r--r--drivers/ps3/ps3-vuart.c4
-rw-r--r--drivers/ptp/Kconfig1
-rw-r--r--drivers/ptp/ptp_chardev.c128
-rw-r--r--drivers/ptp/ptp_clock.c23
-rw-r--r--drivers/ptp/ptp_ixp46x.c1
-rw-r--r--drivers/ptp/ptp_pch.c1
-rw-r--r--drivers/ptp/ptp_private.h8
-rw-r--r--drivers/ptp/ptp_sysfs.c115
-rw-r--r--drivers/pwm/Kconfig30
-rw-r--r--drivers/pwm/Makefile3
-rw-r--r--drivers/pwm/pwm-atmel.c9
-rw-r--r--drivers/pwm/pwm-clps711x.c176
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c495
-rw-r--r--drivers/pwm/pwm-lpss.c183
-rw-r--r--drivers/pwm/pwm-pxa.c4
-rw-r--r--drivers/pwm/pwm-samsung.c5
-rw-r--r--drivers/rapidio/devices/tsi721.c1
-rw-r--r--drivers/rapidio/devices/tsi721.h4
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c111
-rw-r--r--drivers/rapidio/rio-driver.c22
-rw-r--r--drivers/rapidio/rio-scan.c1
-rw-r--r--drivers/rapidio/rio-sysfs.c40
-rw-r--r--drivers/rapidio/rio.c11
-rw-r--r--drivers/rapidio/rio.h1
-rw-r--r--drivers/regulator/88pm800.c4
-rw-r--r--drivers/regulator/88pm8607.c8
-rw-r--r--drivers/regulator/Kconfig54
-rw-r--r--drivers/regulator/Makefile5
-rw-r--r--drivers/regulator/aat2870-regulator.c1
-rw-r--r--drivers/regulator/act8865-regulator.c21
-rw-r--r--drivers/regulator/anatop-regulator.c124
-rw-r--r--drivers/regulator/arizona-ldo1.c11
-rw-r--r--drivers/regulator/arizona-micsupp.c4
-rw-r--r--drivers/regulator/as3711-regulator.c15
-rw-r--r--drivers/regulator/as3722-regulator.c1
-rw-r--r--drivers/regulator/bcm590xx-regulator.c404
-rw-r--r--drivers/regulator/core.c16
-rw-r--r--drivers/regulator/da9052-regulator.c29
-rw-r--r--drivers/regulator/da9055-regulator.c69
-rw-r--r--drivers/regulator/da9063-regulator.c13
-rw-r--r--drivers/regulator/da9210-regulator.c5
-rw-r--r--drivers/regulator/db8500-prcmu.c2
-rw-r--r--drivers/regulator/dbx500-prcmu.c16
-rw-r--r--drivers/regulator/dummy.c6
-rw-r--r--drivers/regulator/fan53555.c13
-rw-r--r--drivers/regulator/fixed.c46
-rw-r--r--drivers/regulator/gpio-regulator.c34
-rw-r--r--drivers/regulator/helpers.c48
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/lp872x.c4
-rw-r--r--drivers/regulator/max14577.c8
-rw-r--r--drivers/regulator/max1586.c15
-rw-r--r--drivers/regulator/max77686.c15
-rw-r--r--drivers/regulator/max77693.c44
-rw-r--r--drivers/regulator/max8649.c8
-rw-r--r--drivers/regulator/max8660.c35
-rw-r--r--drivers/regulator/max8907-regulator.c16
-rw-r--r--drivers/regulator/max8925-regulator.c8
-rw-r--r--drivers/regulator/max8952.c26
-rw-r--r--drivers/regulator/max8973-regulator.c6
-rw-r--r--drivers/regulator/max8997.c22
-rw-r--r--drivers/regulator/max8998.c27
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c12
-rw-r--r--drivers/regulator/pbias-regulator.c255
-rw-r--r--drivers/regulator/pfuze100-regulator.c204
-rw-r--r--drivers/regulator/rc5t583-regulator.c13
-rw-r--r--drivers/regulator/s2mpa01.c479
-rw-r--r--drivers/regulator/s2mps11.c376
-rw-r--r--drivers/regulator/s5m8767.c172
-rw-r--r--drivers/regulator/st-pwm.c190
-rw-r--r--drivers/regulator/ti-abb-regulator.c140
-rw-r--r--drivers/regulator/tps51632-regulator.c8
-rw-r--r--drivers/regulator/tps62360-regulator.c9
-rw-r--r--drivers/regulator/tps6507x-regulator.c29
-rw-r--r--drivers/regulator/tps65090-regulator.c13
-rw-r--r--drivers/regulator/tps65217-regulator.c17
-rw-r--r--drivers/regulator/tps65218-regulator.c285
-rw-r--r--drivers/regulator/tps6524x-regulator.c5
-rw-r--r--drivers/regulator/tps6586x-regulator.c20
-rw-r--r--drivers/regulator/tps65910-regulator.c21
-rw-r--r--drivers/regulator/tps80031-regulator.c6
-rw-r--r--drivers/regulator/wm831x-dcdc.c16
-rw-r--r--drivers/regulator/wm831x-isink.c4
-rw-r--r--drivers/regulator/wm831x-ldo.c12
-rw-r--r--drivers/regulator/wm8350-regulator.c4
-rw-r--r--drivers/regulator/wm8994-regulator.c4
-rw-r--r--drivers/reset/Kconfig2
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c71
-rw-r--r--drivers/reset/sti/Kconfig15
-rw-r--r--drivers/reset/sti/Makefile4
-rw-r--r--drivers/reset/sti/reset-stih415.c112
-rw-r--r--drivers/reset/sti/reset-stih416.c143
-rw-r--r--drivers/reset/sti/reset-syscfg.c186
-rw-r--r--drivers/reset/sti/reset-syscfg.h69
-rw-r--r--drivers/rtc/Kconfig12
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/interface.c3
-rw-r--r--drivers/rtc/rtc-as3722.c5
-rw-r--r--drivers/rtc/rtc-at32ap700x.c4
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-cmos.c8
-rw-r--r--drivers/rtc/rtc-coh901331.c18
-rw-r--r--drivers/rtc/rtc-da9052.c4
-rw-r--r--drivers/rtc/rtc-da9055.c4
-rw-r--r--drivers/rtc/rtc-davinci.c33
-rw-r--r--drivers/rtc/rtc-ds1305.c10
-rw-r--r--drivers/rtc/rtc-ds1307.c243
-rw-r--r--drivers/rtc/rtc-ds1347.c166
-rw-r--r--drivers/rtc/rtc-ds1390.c5
-rw-r--r--drivers/rtc/rtc-ds1511.c21
-rw-r--r--drivers/rtc/rtc-ds1553.c21
-rw-r--r--drivers/rtc/rtc-ds1672.c11
-rw-r--r--drivers/rtc/rtc-ds1742.c5
-rw-r--r--drivers/rtc/rtc-ds3232.c100
-rw-r--r--drivers/rtc/rtc-imxdi.c4
-rw-r--r--drivers/rtc/rtc-isl12057.c6
-rw-r--r--drivers/rtc/rtc-jz4740.c25
-rw-r--r--drivers/rtc/rtc-lpc32xx.c5
-rw-r--r--drivers/rtc/rtc-mc13xxx.c139
-rw-r--r--drivers/rtc/rtc-moxart.c4
-rw-r--r--drivers/rtc/rtc-mv.c12
-rw-r--r--drivers/rtc/rtc-nuc900.c5
-rw-r--r--drivers/rtc/rtc-palmas.c5
-rw-r--r--drivers/rtc/rtc-pm8xxx.c288
-rw-r--r--drivers/rtc/rtc-pxa.c1
-rw-r--r--drivers/rtc/rtc-rv3029c2.c12
-rw-r--r--drivers/rtc/rtc-rx8025.c1
-rw-r--r--drivers/rtc/rtc-s3c.c4
-rw-r--r--drivers/rtc/rtc-sirfsoc.c68
-rw-r--r--drivers/rtc/rtc-spear.c4
-rw-r--r--drivers/rtc/rtc-stk17ta8.c3
-rw-r--r--drivers/rtc/rtc-sunxi.c2
-rw-r--r--drivers/rtc/rtc-test.c9
-rw-r--r--drivers/rtc/rtc-tx4939.c4
-rw-r--r--drivers/rtc/rtc-vt8500.c28
-rw-r--r--drivers/rtc/rtc-x1205.c2
-rw-r--r--drivers/s390/block/dasd_diag.c4
-rw-r--r--drivers/s390/block/dcssblk.c14
-rw-r--r--drivers/s390/char/con3215.c8
-rw-r--r--drivers/s390/char/con3270.c13
-rw-r--r--drivers/s390/char/raw3270.c35
-rw-r--r--drivers/s390/char/raw3270.h3
-rw-r--r--drivers/s390/char/sclp.c88
-rw-r--r--drivers/s390/char/sclp.h9
-rw-r--r--drivers/s390/char/sclp_cmd.c17
-rw-r--r--drivers/s390/char/sclp_early.c31
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/cio/airq.c66
-rw-r--r--drivers/s390/cio/ccwgroup.c26
-rw-r--r--drivers/s390/cio/chsc_sch.c3
-rw-r--r--drivers/s390/cio/cio.c12
-rw-r--r--drivers/s390/cio/device.c52
-rw-r--r--drivers/s390/crypto/zcrypt_api.c4
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c18
-rw-r--r--drivers/s390/kvm/kvm_virtio.c2
-rw-r--r--drivers/s390/kvm/virtio_ccw.c323
-rw-r--r--drivers/s390/net/lcs.c1
-rw-r--r--drivers/s390/net/qeth_core.h7
-rw-r--r--drivers/s390/net/qeth_core_main.c8
-rw-r--r--drivers/s390/net/qeth_l2_main.c6
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/rx.c9
-rw-r--r--drivers/scsi/aacraid/sa.c3
-rw-r--r--drivers/scsi/aacraid/src.c4
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c7
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/cumana_2.c2
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/atari_scsi.c12
-rw-r--r--drivers/scsi/be2iscsi/be.h11
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c121
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h10
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c13
-rw-r--r--drivers/scsi/be2iscsi/be_main.c311
-rw-r--r--drivers/scsi/be2iscsi/be_main.h23
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c22
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c11
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c4
-rw-r--r--drivers/scsi/bfa/bfad_im.c7
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c20
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c19
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c46
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c8
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c59
-rw-r--r--drivers/scsi/dtc.c2
-rw-r--r--drivers/scsi/eata.c2
-rw-r--r--drivers/scsi/eata_pio.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_log.c8
-rw-r--r--drivers/scsi/fcoe/fcoe.c15
-rw-r--r--drivers/scsi/g_NCR5380.c2
-rw-r--r--drivers/scsi/gdth.c6
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/hpsa.c2618
-rw-r--r--drivers/scsi/hpsa.h171
-rw-r--r--drivers/scsi/hpsa_cmd.h270
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/in2000.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c382
-rw-r--r--drivers/scsi/ipr.h22
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/isci/request.c8
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c1
-rw-r--r--drivers/scsi/iscsi_tcp.c25
-rw-r--r--drivers/scsi/libiscsi.c264
-rw-r--r--drivers/scsi/libiscsi_tcp.c71
-rw-r--r--drivers/scsi/libsas/sas_ata.c35
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c628
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h24
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c108
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c200
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c277
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c47
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c552
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c267
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c120
-rw-r--r--drivers/scsi/megaraid.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h114
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c815
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c11
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c272
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h3
-rw-r--r--drivers/scsi/pas16.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c38
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c105
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c12
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h12
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c97
-rw-r--r--drivers/scsi/qla1280.c7
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c193
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c134
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h65
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c426
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c93
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c252
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h57
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c171
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c65
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c909
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h205
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c36
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h17
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h31
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c7
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c69
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c25
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c89
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c232
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c382
-rw-r--r--drivers/scsi/scsi_debug.c141
-rw-r--r--drivers/scsi/scsi_error.c6
-rw-r--r--drivers/scsi/scsi_lib.c116
-rw-r--r--drivers/scsi/scsi_pm.c128
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c117
-rw-r--r--drivers/scsi/scsi_sysfs.c257
-rw-r--r--drivers/scsi/scsi_tgt_lib.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c6
-rw-r--r--drivers/scsi/scsi_transport_srp.c1
-rw-r--r--drivers/scsi/sd.c43
-rw-r--r--drivers/scsi/sd.h6
-rw-r--r--drivers/scsi/ses.c38
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/t128.c2
-rw-r--r--drivers/scsi/u14-34f.c2
-rw-r--r--drivers/scsi/vmw_pvscsi.c242
-rw-r--r--drivers/scsi/vmw_pvscsi.h19
-rw-r--r--drivers/scsi/wd7000.c2
-rw-r--r--drivers/sh/clk/cpg.c38
-rw-r--r--drivers/sh/intc/Kconfig2
-rw-r--r--drivers/spi/Kconfig55
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/spi-altera.c7
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-atmel.c34
-rw-r--r--drivers/spi/spi-au1550.c39
-rw-r--r--drivers/spi/spi-bcm2835.c1
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c8
-rw-r--r--drivers/spi/spi-bcm63xx.c6
-rw-r--r--drivers/spi/spi-bfin-sport.c1
-rw-r--r--drivers/spi/spi-bfin-v3.c3
-rw-r--r--drivers/spi/spi-bfin5xx.c8
-rw-r--r--drivers/spi/spi-bitbang.c5
-rw-r--r--drivers/spi/spi-butterfly.c3
-rw-r--r--drivers/spi/spi-clps711x.c227
-rw-r--r--drivers/spi/spi-coldfire-qspi.c112
-rw-r--r--drivers/spi/spi-davinci.c14
-rw-r--r--drivers/spi/spi-dw-mmio.c2
-rw-r--r--drivers/spi/spi-dw.c17
-rw-r--r--drivers/spi/spi-efm32.c46
-rw-r--r--drivers/spi/spi-ep93xx.c21
-rw-r--r--drivers/spi/spi-falcon.c5
-rw-r--r--drivers/spi/spi-fsl-dspi.c94
-rw-r--r--drivers/spi/spi-fsl-espi.c8
-rw-r--r--drivers/spi/spi-fsl-lib.c14
-rw-r--r--drivers/spi/spi-fsl-spi.c33
-rw-r--r--drivers/spi/spi-gpio.c8
-rw-r--r--drivers/spi/spi-imx.c7
-rw-r--r--drivers/spi/spi-mpc512x-psc.c20
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c4
-rw-r--r--drivers/spi/spi-mpc52xx.c23
-rw-r--r--drivers/spi/spi-mxs.c7
-rw-r--r--drivers/spi/spi-nuc900.c28
-rw-r--r--drivers/spi/spi-oc-tiny.c3
-rw-r--r--drivers/spi/spi-octeon.c80
-rw-r--r--drivers/spi/spi-omap-100k.c52
-rw-r--r--drivers/spi/spi-omap-uwire.c34
-rw-r--r--drivers/spi/spi-omap2-mcspi.c91
-rw-r--r--drivers/spi/spi-orion.c80
-rw-r--r--drivers/spi/spi-pl022.c80
-rw-r--r--drivers/spi/spi-ppc4xx.c1
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c1
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c1
-rw-r--r--drivers/spi/spi-pxa2xx.c3
-rw-r--r--drivers/spi/spi-qup.c779
-rw-r--r--drivers/spi/spi-rspi.c842
-rw-r--r--drivers/spi/spi-s3c24xx.c19
-rw-r--r--drivers/spi/spi-s3c64xx.c424
-rw-r--r--drivers/spi/spi-sc18is602.c29
-rw-r--r--drivers/spi/spi-sh-hspi.c43
-rw-r--r--drivers/spi/spi-sh-msiof.c385
-rw-r--r--drivers/spi/spi-sh-sci.c8
-rw-r--r--drivers/spi/spi-sh.c6
-rw-r--r--drivers/spi/spi-sirf.c116
-rw-r--r--drivers/spi/spi-sun4i.c478
-rw-r--r--drivers/spi/spi-sun6i.c484
-rw-r--r--drivers/spi/spi-tegra114.c27
-rw-r--r--drivers/spi/spi-tegra20-sflash.c26
-rw-r--r--drivers/spi/spi-tegra20-slink.c20
-rw-r--r--drivers/spi/spi-ti-qspi.c5
-rw-r--r--drivers/spi/spi-ti-ssp.c378
-rw-r--r--drivers/spi/spi-topcliff-pch.c47
-rw-r--r--drivers/spi/spi-txx9.c28
-rw-r--r--drivers/spi/spi-xcomm.c13
-rw-r--r--drivers/spi/spi-xilinx.c27
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c170
-rw-r--r--drivers/spi/spi.c247
-rw-r--r--drivers/spi/spidev.c23
-rw-r--r--drivers/spmi/Kconfig27
-rw-r--r--drivers/spmi/Makefile6
-rw-r--r--drivers/spmi/spmi-pmic-arb.c778
-rw-r--r--drivers/spmi/spmi.c574
-rw-r--r--drivers/staging/Kconfig14
-rw-r--r--drivers/staging/Makefile7
-rw-r--r--drivers/staging/android/Kconfig15
-rw-r--r--drivers/staging/android/android_alarm.h44
-rw-r--r--drivers/staging/android/ashmem.h30
-rw-r--r--drivers/staging/android/binder.c268
-rw-r--r--drivers/staging/android/binder.h308
-rw-r--r--drivers/staging/android/binder_trace.h14
-rw-r--r--drivers/staging/android/ion/ion.c156
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c2
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c12
-rw-r--r--drivers/staging/android/ion/ion_heap.c67
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c8
-rw-r--r--drivers/staging/android/ion/ion_priv.h89
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c84
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c10
-rw-r--r--drivers/staging/android/lowmemorykiller.c5
-rw-r--r--drivers/staging/android/sw_sync.h20
-rw-r--r--drivers/staging/android/sync.h88
-rw-r--r--drivers/staging/android/timed_gpio.c8
-rw-r--r--drivers/staging/android/timed_output.h4
-rw-r--r--drivers/staging/android/uapi/android_alarm.h62
-rw-r--r--drivers/staging/android/uapi/ashmem.h47
-rw-r--r--drivers/staging/android/uapi/binder.h351
-rw-r--r--drivers/staging/android/uapi/sw_sync.h32
-rw-r--r--drivers/staging/android/uapi/sync.h97
-rw-r--r--drivers/staging/bcm/Adapter.h6
-rw-r--r--drivers/staging/bcm/Bcmchar.c3439
-rw-r--r--drivers/staging/bcm/CmHost.c61
-rw-r--r--drivers/staging/bcm/DDRInit.c32
-rw-r--r--drivers/staging/bcm/InterfaceInit.c416
-rw-r--r--drivers/staging/bcm/InterfaceIsr.c223
-rw-r--r--drivers/staging/bcm/Kconfig1
-rw-r--r--drivers/staging/bcm/Qos.c35
-rw-r--r--drivers/staging/bcm/Typedefs.h22
-rw-r--r--drivers/staging/bcm/headers.h2
-rw-r--r--drivers/staging/bcm/hostmibs.c8
-rw-r--r--drivers/staging/bcm/nvm.c30
-rw-r--r--drivers/staging/ced1401/ced_ioc.c163
-rw-r--r--drivers/staging/ced1401/ced_ioctl.h31
-rw-r--r--drivers/staging/ced1401/usb1401.c195
-rw-r--r--drivers/staging/ced1401/usb1401.h6
-rw-r--r--drivers/staging/ced1401/use14_ioc.h5
-rw-r--r--drivers/staging/ced1401/userspace/use1401.c10
-rw-r--r--drivers/staging/comedi/Kconfig24
-rw-r--r--drivers/staging/comedi/comedi_fops.c191
-rw-r--r--drivers/staging/comedi/comedidev.h56
-rw-r--r--drivers/staging/comedi/drivers.c30
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c37
-rw-r--r--drivers/staging/comedi/drivers/Makefile2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c597
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c1187
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c857
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c106
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c73
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c24
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c14
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c24
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c16
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c26
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3200.c36
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c6
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c57
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c22
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c3
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c73
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c92
-rw-r--r--drivers/staging/comedi/drivers/adq12b.c31
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c67
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c2
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c10
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c31
-rw-r--r--drivers/staging/comedi/drivers/aio_iiro_16.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c35
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c29
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c42
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c2
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c491
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c150
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c51
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c59
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c41
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c4
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c5
-rw-r--r--drivers/staging/comedi/drivers/comedi_fc.c99
-rw-r--r--drivers/staging/comedi/drivers/comedi_fc.h84
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c7
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c2
-rw-r--r--drivers/staging/comedi/drivers/dac02.c172
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c85
-rw-r--r--drivers/staging/comedi/drivers/das08.c35
-rw-r--r--drivers/staging/comedi/drivers/das16.c19
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c38
-rw-r--r--drivers/staging/comedi/drivers/das1800.c10
-rw-r--r--drivers/staging/comedi/drivers/das6402.c672
-rw-r--r--drivers/staging/comedi/drivers/das800.c43
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c83
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c23
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c29
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c39
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c73
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c8
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c40
-rw-r--r--drivers/staging/comedi/drivers/fl512.c207
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c1099
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c109
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c20
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c891
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.h6
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c209
-rw-r--r--drivers/staging/comedi/drivers/me4000.c3
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c36
-rw-r--r--drivers/staging/comedi/drivers/mf6x4.c23
-rw-r--r--drivers/staging/comedi/drivers/mite.c11
-rw-r--r--drivers/staging/comedi/drivers/mite.h6
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c30
-rw-r--r--drivers/staging/comedi/drivers/multiq3.c38
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c55
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c61
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c56
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c33
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c78
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c16
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h8
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c26
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c1484
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c1045
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c1523
-rw-r--r--drivers/staging/comedi/drivers/pcm3724.c6
-rw-r--r--drivers/staging/comedi/drivers/pcmad.c21
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c42
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h8
-rw-r--r--drivers/staging/comedi/drivers/poc.c157
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c5
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c86
-rw-r--r--drivers/staging/comedi/drivers/rti800.c29
-rw-r--r--drivers/staging/comedi/drivers/rti802.c127
-rw-r--r--drivers/staging/comedi/drivers/s526.c34
-rw-r--r--drivers/staging/comedi/drivers/s626.c174
-rw-r--r--drivers/staging/comedi/drivers/skel.c43
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c3
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c51
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c3
-rw-r--r--drivers/staging/comedi/proc.c48
-rw-r--r--drivers/staging/comedi/range.c37
-rw-r--r--drivers/staging/crystalhd/bcm_70012_regs.h3
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c8
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c21
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.h4
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.c2
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h2
-rw-r--r--drivers/staging/cxt1e1/Makefile1
-rw-r--r--drivers/staging/cxt1e1/comet.c44
-rw-r--r--drivers/staging/cxt1e1/comet_tables.c1
-rw-r--r--drivers/staging/cxt1e1/functions.c373
-rw-r--r--drivers/staging/cxt1e1/hwprobe.c598
-rw-r--r--drivers/staging/cxt1e1/libsbew.h56
-rw-r--r--drivers/staging/cxt1e1/linux.c1643
-rw-r--r--drivers/staging/cxt1e1/musycc.c43
-rw-r--r--drivers/staging/cxt1e1/ossiRelease.c29
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.c6
-rw-r--r--drivers/staging/cxt1e1/pmcc4.h1
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c116
-rw-r--r--drivers/staging/cxt1e1/pmcc4_private.h1
-rw-r--r--drivers/staging/cxt1e1/pmcc4_sysdep.h1
-rw-r--r--drivers/staging/cxt1e1/sbeproc.c4
-rw-r--r--drivers/staging/dgap/Makefile6
-rw-r--r--drivers/staging/dgap/dgap.c7675
-rw-r--r--drivers/staging/dgap/dgap.h1322
-rw-r--r--drivers/staging/dgap/dgap_conf.h290
-rw-r--r--drivers/staging/dgap/dgap_downld.h69
-rw-r--r--drivers/staging/dgap/dgap_driver.c1030
-rw-r--r--drivers/staging/dgap/dgap_driver.h620
-rw-r--r--drivers/staging/dgap/dgap_fep5.c1938
-rw-r--r--drivers/staging/dgap/dgap_fep5.h253
-rw-r--r--drivers/staging/dgap/dgap_kcompat.h64
-rw-r--r--drivers/staging/dgap/dgap_parse.c1374
-rw-r--r--drivers/staging/dgap/dgap_parse.h35
-rw-r--r--drivers/staging/dgap/dgap_pci.h92
-rw-r--r--drivers/staging/dgap/dgap_sysfs.c793
-rw-r--r--drivers/staging/dgap/dgap_sysfs.h48
-rw-r--r--drivers/staging/dgap/dgap_trace.c186
-rw-r--r--drivers/staging/dgap/dgap_trace.h36
-rw-r--r--drivers/staging/dgap/dgap_tty.c3580
-rw-r--r--drivers/staging/dgap/dgap_tty.h39
-rw-r--r--drivers/staging/dgap/dgap_types.h36
-rw-r--r--drivers/staging/dgap/digi.h376
-rw-r--r--drivers/staging/dgap/downld.c798
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c11
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c4
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c27
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c3
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c53
-rw-r--r--drivers/staging/dgrp/dgrp_sysfs.c4
-rw-r--r--drivers/staging/dgrp/dgrp_tty.c22
-rw-r--r--drivers/staging/echo/TODO5
-rw-r--r--drivers/staging/et131x/et131x.c23
-rw-r--r--drivers/staging/frontier/Kconfig1
-rw-r--r--drivers/staging/frontier/alphatrack.c11
-rw-r--r--drivers/staging/frontier/tranzport.c12
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h8
-rw-r--r--drivers/staging/ft1000/ft1000.h156
-rw-r--r--drivers/staging/fwserial/fwserial.c13
-rw-r--r--drivers/staging/fwserial/fwserial.h1
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c187
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h2
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c26
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c56
-rw-r--r--drivers/staging/gdm724x/netlink_k.c3
-rw-r--r--drivers/staging/gdm72xx/TODO2
-rw-r--r--drivers/staging/gdm72xx/gdm_sdio.c50
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c37
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.c77
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.h20
-rw-r--r--drivers/staging/gs_fpgaboot/Kconfig8
-rw-r--r--drivers/staging/gs_fpgaboot/Makefile4
-rw-r--r--drivers/staging/gs_fpgaboot/README71
-rw-r--r--drivers/staging/gs_fpgaboot/TODO7
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c422
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.h56
-rw-r--r--drivers/staging/gs_fpgaboot/io.c294
-rw-r--r--drivers/staging/gs_fpgaboot/io.h90
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h22
-rw-r--r--drivers/staging/iio/Documentation/lsiio.c157
-rw-r--r--drivers/staging/iio/accel/sca3000.h26
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c172
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad799x.h10
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c56
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c15
-rw-r--r--drivers/staging/iio/addac/adt7316.c21
-rw-r--r--drivers/staging/iio/addac/adt7316.h8
-rw-r--r--drivers/staging/iio/light/tsl2583.c6
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c8
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c4
-rw-r--r--drivers/staging/imx-drm/Kconfig1
-rw-r--r--drivers/staging/imx-drm/Makefile3
-rw-r--r--drivers/staging/imx-drm/TODO5
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c859
-rw-r--r--drivers/staging/imx-drm/imx-drm.h44
-rw-r--r--drivers/staging/imx-drm/imx-fb.c47
-rw-r--r--drivers/staging/imx-drm/imx-fbdev.c74
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.c695
-rw-r--r--drivers/staging/imx-drm/imx-ldb.c150
-rw-r--r--drivers/staging/imx-drm/imx-tve.c137
-rw-r--r--drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h2
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-dc.c2
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-di.c317
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c3
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c99
-rw-r--r--drivers/staging/imx-drm/ipuv3-plane.c6
-rw-r--r--drivers/staging/imx-drm/parallel-display.c127
-rw-r--r--drivers/staging/keucr/common.h8
-rw-r--r--drivers/staging/keucr/init.c16
-rw-r--r--drivers/staging/keucr/init.h6
-rw-r--r--drivers/staging/keucr/smil.h112
-rw-r--r--drivers/staging/keucr/smilecc.c58
-rw-r--r--drivers/staging/keucr/smilmain.c82
-rw-r--r--drivers/staging/keucr/smilsub.c170
-rw-r--r--drivers/staging/keucr/smscsi.c28
-rw-r--r--drivers/staging/keucr/transport.c8
-rw-r--r--drivers/staging/keucr/transport.h6
-rw-r--r--drivers/staging/keucr/usb.c6
-rw-r--r--drivers/staging/keucr/usb.h86
-rw-r--r--drivers/staging/line6/audio.c5
-rw-r--r--drivers/staging/line6/capture.c1
-rw-r--r--drivers/staging/line6/driver.c56
-rw-r--r--drivers/staging/line6/driver.h2
-rw-r--r--drivers/staging/line6/midi.c7
-rw-r--r--drivers/staging/line6/pcm.c2
-rw-r--r--drivers/staging/line6/usbdefs.h8
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h9
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h8
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h9
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h30
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h16
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c132
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c10
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c15
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c3
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c5
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c12
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c7
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c12
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c3
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c24
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c3
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h6
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c34
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c15
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h14
-rw-r--r--drivers/staging/lustre/lustre/include/ioctl.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h4
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_acl.h2
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h4
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h85
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h22
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h18
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h21
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h12
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h21
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_linkea.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h11
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h26
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_quota.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h4
-rw-r--r--drivers/staging/lustre/lustre/include/md_object.h2
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h11
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h15
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h2
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c41
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c5
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/debug.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/fail.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c102
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_string.c54
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c19
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c65
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-module.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c38
-rw-r--r--drivers/staging/lustre/lustre/libcfs/nidstrings.c7
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c5
-rw-r--r--drivers/staging/lustre/lustre/libcfs/upcall_cache.c5
-rw-r--r--drivers/staging/lustre/lustre/libcfs/workitem.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c328
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c35
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c105
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h42
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c32
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c124
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c37
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c31
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c125
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c5
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c16
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c3
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c4
-rw-r--r--drivers/staging/lustre/lustre/lvfs/lvfs_linux.c4
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c117
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c3
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c74
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c92
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_lvfs.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_osd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/local_storage.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/md_attrs.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c54
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo.c3
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c20
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c12
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c16
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c187
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c36
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c37
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c5
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c57
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c2
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile2
-rw-r--r--drivers/staging/media/as102/as10x_handle.h14
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c12
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c6
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c3
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h2
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c7
-rw-r--r--drivers/staging/media/go7007/Kconfig3
-rw-r--r--drivers/staging/media/go7007/go7007-loader.c2
-rw-r--r--drivers/staging/media/go7007/go7007-v4l2.c25
-rw-r--r--drivers/staging/media/go7007/snd-go7007.c5
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c2
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c4
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c26
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c13
-rw-r--r--drivers/staging/media/msi3101/Kconfig7
-rw-r--r--drivers/staging/media/msi3101/Makefile1
-rw-r--r--drivers/staging/media/msi3101/msi001.c500
-rw-r--r--drivers/staging/media/msi3101/sdr-msi3101.c1566
-rw-r--r--drivers/staging/media/omap24xx/tcm825x.c8
-rw-r--r--drivers/staging/media/omap24xx/tcm825x.h2
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/media/rtl2832u_sdr/Kconfig7
-rw-r--r--drivers/staging/media/rtl2832u_sdr/Makefile6
-rw-r--r--drivers/staging/media/rtl2832u_sdr/rtl2832_sdr.c1500
-rw-r--r--drivers/staging/media/rtl2832u_sdr/rtl2832_sdr.h54
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_core.c85
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_hv7131d.c15
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_hv7131r.c15
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_ov7630.c24
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_ov7660.c24
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_pas106b.c18
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_pas202bcb.c15
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-core.c2
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-g723.c6
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-tw28.c2
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c46
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-v4l2.c2
-rw-r--r--drivers/staging/netlogic/xlr_net.c5
-rw-r--r--drivers/staging/nokia_h4p/Kconfig9
-rw-r--r--drivers/staging/nokia_h4p/Makefile6
-rw-r--r--drivers/staging/nokia_h4p/TODO132
-rw-r--r--drivers/staging/nokia_h4p/hci_h4p.h222
-rw-r--r--drivers/staging/nokia_h4p/nokia_core.c1206
-rw-r--r--drivers/staging/nokia_h4p/nokia_fw-bcm.c148
-rw-r--r--drivers/staging/nokia_h4p/nokia_fw-csr.c149
-rw-r--r--drivers/staging/nokia_h4p/nokia_fw-ti1273.c110
-rw-r--r--drivers/staging/nokia_h4p/nokia_fw.c208
-rw-r--r--drivers/staging/nokia_h4p/nokia_uart.c199
-rw-r--r--drivers/staging/nvec/nvec.c3
-rw-r--r--drivers/staging/nvec/nvec_ps2.c2
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c752
-rw-r--r--drivers/staging/octeon/ethernet-defines.h4
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c20
-rw-r--r--drivers/staging/octeon/ethernet-mem.c11
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c23
-rw-r--r--drivers/staging/octeon/ethernet-tx.c8
-rw-r--r--drivers/staging/octeon/ethernet.c10
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h2
-rw-r--r--drivers/staging/ozwpan/ozcdev.c4
-rw-r--r--drivers/staging/ozwpan/ozhcd.c8
-rw-r--r--drivers/staging/ozwpan/ozpd.c31
-rw-r--r--drivers/staging/ozwpan/ozpd.h5
-rw-r--r--drivers/staging/ozwpan/ozproto.c57
-rw-r--r--drivers/staging/ozwpan/ozproto.h2
-rw-r--r--drivers/staging/ozwpan/ozprotocol.h6
-rw-r--r--drivers/staging/ozwpan/ozusbif.h2
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c2
-rw-r--r--drivers/staging/panel/panel.c41
-rw-r--r--drivers/staging/rtl8187se/Kconfig1
-rw-r--r--drivers/staging/rtl8187se/Module.symvers0
-rw-r--r--drivers/staging/rtl8187se/ieee80211/dot11d.h11
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h12
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c14
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c15
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c1203
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c151
-rw-r--r--drivers/staging/rtl8187se/r8180.h750
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c894
-rw-r--r--drivers/staging/rtl8187se/r8180_dm.c6
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225.h21
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225z2.c59
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c10
-rw-r--r--drivers/staging/rtl8187se/r8185b_init.c62
-rw-r--r--drivers/staging/rtl8188eu/Kconfig5
-rw-r--r--drivers/staging/rtl8188eu/Makefile1
-rw-r--r--drivers/staging/rtl8188eu/TODO5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c81
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_br_ext.c13
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c220
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c59
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c58
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_io.c28
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c52
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c303
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c302
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c78
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_p2p.c50
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c27
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c502
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c93
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c87
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c46
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c129
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c13
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c37
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c15
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_interface.c101
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c12
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c87
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_mp.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c37
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c3
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_ops_linux.c63
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h6
-rw-r--r--drivers/staging/rtl8188eu/include/ethernet.h42
-rw-r--r--drivers/staging/rtl8188eu/include/if_ether.h111
-rw-r--r--drivers/staging/rtl8188eu/include/ioctl_cfg80211.h107
-rw-r--r--drivers/staging/rtl8188eu/include/ip.h126
-rw-r--r--drivers/staging/rtl8188eu/include/nic_spec.h44
-rw-r--r--drivers/staging/rtl8188eu/include/odm_interface.h23
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h34
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h11
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h7
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h24
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_io.h44
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h32
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h49
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h139
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h5
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c242
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c9
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c10
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c35
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c44
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c17
-rw-r--r--drivers/staging/rtl8192e/dot11d.c9
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h52
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c4
-rw-r--r--drivers/staging/rtl8192u/Kconfig1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/EndianFree.h194
-rw-r--r--drivers/staging/rtl8192u/ieee80211/aes.c468
-rw-r--r--drivers/staging/rtl8192u/ieee80211/arc4.c103
-rw-r--r--drivers/staging/rtl8192u/ieee80211/autoload.c40
-rw-r--r--drivers/staging/rtl8192u/ieee80211/cipher.c298
-rw-r--r--drivers/staging/rtl8192u/ieee80211/compress.c64
-rw-r--r--drivers/staging/rtl8192u/ieee80211/crypto_compat.h58
-rw-r--r--drivers/staging/rtl8192u/ieee80211/digest.c108
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c173
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c22
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c107
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c5
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/internal.h81
-rw-r--r--drivers/staging/rtl8192u/ieee80211/michael_mic.c194
-rw-r--r--drivers/staging/rtl8192u/ieee80211/proc.c112
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c12
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c70
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c39
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl_crypto.h398
-rw-r--r--drivers/staging/rtl8192u/ieee80211/scatterwalk.c117
-rw-r--r--drivers/staging/rtl8192u/ieee80211/scatterwalk.h51
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c10
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c138
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c99
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h40
-rw-r--r--drivers/staging/rtl8192u/r819xU_HTGen.h12
-rw-r--r--drivers/staging/rtl8192u/r819xU_HTType.h379
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c50
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware_img.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c29
-rw-r--r--drivers/staging/rtl8712/Kconfig2
-rw-r--r--drivers/staging/rtl8712/drv_types.h2
-rw-r--r--drivers/staging/rtl8712/ieee80211.c6
-rw-r--r--drivers/staging/rtl8712/os_intfs.c2
-rw-r--r--drivers/staging/rtl8712/osdep_service.h5
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c11
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c13
-rw-r--r--drivers/staging/rtl8723au/Kconfig38
-rw-r--r--drivers/staging/rtl8723au/Makefile58
-rw-r--r--drivers/staging/rtl8723au/TODO13
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ap.c2087
-rw-r--r--drivers/staging/rtl8723au/core/rtw_cmd.c1876
-rw-r--r--drivers/staging/rtl8723au/core/rtw_efuse.c716
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ieee80211.c1861
-rw-r--r--drivers/staging/rtl8723au/core/rtw_io.c266
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ioctl_set.c601
-rw-r--r--drivers/staging/rtl8723au/core/rtw_led.c1899
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme.c2500
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme_ext.c9990
-rw-r--r--drivers/staging/rtl8723au/core/rtw_p2p.c4001
-rw-r--r--drivers/staging/rtl8723au/core/rtw_pwrctrl.c689
-rw-r--r--drivers/staging/rtl8723au/core/rtw_recv.c2471
-rw-r--r--drivers/staging/rtl8723au/core/rtw_security.c1652
-rw-r--r--drivers/staging/rtl8723au/core/rtw_sreset.c255
-rw-r--r--drivers/staging/rtl8723au/core/rtw_sta_mgt.c509
-rw-r--r--drivers/staging/rtl8723au/core/rtw_wlan_util.c1760
-rw-r--r--drivers/staging/rtl8723au/core/rtw_xmit.c2460
-rw-r--r--drivers/staging/rtl8723au/hal/Hal8723PwrSeq.c80
-rw-r--r--drivers/staging/rtl8723au/hal/Hal8723UHWImg_CE.c136
-rw-r--r--drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c1063
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c726
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c188
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c259
-rw-r--r--drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c163
-rw-r--r--drivers/staging/rtl8723au/hal/hal_com.c881
-rw-r--r--drivers/staging/rtl8723au/hal/hal_intf.c420
-rw-r--r--drivers/staging/rtl8723au/hal/odm.c2090
-rw-r--r--drivers/staging/rtl8723au/hal/odm_HWConfig.c481
-rw-r--r--drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c162
-rw-r--r--drivers/staging/rtl8723au/hal/odm_debug.c24
-rw-r--r--drivers/staging/rtl8723au/hal/odm_interface.c236
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c11304
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_cmd.c845
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_dm.c273
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c3452
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c1162
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c507
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_rxdesc.c69
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_sreset.c73
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_xmit.c52
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723au_led.c113
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723au_recv.c247
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723au_xmit.c548
-rw-r--r--drivers/staging/rtl8723au/hal/usb_halinit.c1834
-rw-r--r--drivers/staging/rtl8723au/hal/usb_ops_linux.c848
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723APhyCfg.h230
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723APhyReg.h1078
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723PwrSeq.h150
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h29
-rw-r--r--drivers/staging/rtl8723au/include/HalDMOutSrc8723A.h64
-rw-r--r--drivers/staging/rtl8723au/include/HalHWImg8723A_BB.h44
-rw-r--r--drivers/staging/rtl8723au/include/HalHWImg8723A_FW.h28
-rw-r--r--drivers/staging/rtl8723au/include/HalHWImg8723A_MAC.h26
-rw-r--r--drivers/staging/rtl8723au/include/HalHWImg8723A_RF.h25
-rw-r--r--drivers/staging/rtl8723au/include/HalPwrSeqCmd.h130
-rw-r--r--drivers/staging/rtl8723au/include/HalVerDef.h136
-rw-r--r--drivers/staging/rtl8723au/include/cmd_osdep.h26
-rw-r--r--drivers/staging/rtl8723au/include/drv_types.h360
-rw-r--r--drivers/staging/rtl8723au/include/ethernet.h (renamed from drivers/staging/rtl8188eu/include/h2clbk.h)23
-rw-r--r--drivers/staging/rtl8723au/include/hal_com.h211
-rw-r--r--drivers/staging/rtl8723au/include/hal_intf.h392
-rw-r--r--drivers/staging/rtl8723au/include/ieee80211.h603
-rw-r--r--drivers/staging/rtl8723au/include/ioctl_cfg80211.h119
-rw-r--r--drivers/staging/rtl8723au/include/mlme_osdep.h28
-rw-r--r--drivers/staging/rtl8723au/include/mp_custom_oid.h342
-rw-r--r--drivers/staging/rtl8723au/include/odm.h1205
-rw-r--r--drivers/staging/rtl8723au/include/odm_HWConfig.h174
-rw-r--r--drivers/staging/rtl8723au/include/odm_RegConfig8723A.h34
-rw-r--r--drivers/staging/rtl8723au/include/odm_RegDefine11AC.h49
-rw-r--r--drivers/staging/rtl8723au/include/odm_RegDefine11N.h165
-rw-r--r--drivers/staging/rtl8723au/include/odm_debug.h139
-rw-r--r--drivers/staging/rtl8723au/include/odm_interface.h131
-rw-r--r--drivers/staging/rtl8723au/include/odm_precomp.h54
-rw-r--r--drivers/staging/rtl8723au/include/odm_reg.h114
-rw-r--r--drivers/staging/rtl8723au/include/odm_types.h36
-rw-r--r--drivers/staging/rtl8723au/include/osdep_intf.h46
-rw-r--r--drivers/staging/rtl8723au/include/osdep_service.h207
-rw-r--r--drivers/staging/rtl8723au/include/recv_osdep.h45
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h1672
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_cmd.h160
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_dm.h144
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_hal.h575
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_led.h30
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_pg.h98
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_recv.h70
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_rf.h58
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_spec.h2158
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_sreset.h25
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_xmit.h229
-rw-r--r--drivers/staging/rtl8723au/include/rtw_ap.h55
-rw-r--r--drivers/staging/rtl8723au/include/rtw_cmd.h835
-rw-r--r--drivers/staging/rtl8723au/include/rtw_debug.h192
-rw-r--r--drivers/staging/rtl8723au/include/rtw_eeprom.h135
-rw-r--r--drivers/staging/rtl8723au/include/rtw_efuse.h109
-rw-r--r--drivers/staging/rtl8723au/include/rtw_event.h114
-rw-r--r--drivers/staging/rtl8723au/include/rtw_ht.h43
-rw-r--r--drivers/staging/rtl8723au/include/rtw_io.h416
-rw-r--r--drivers/staging/rtl8723au/include/rtw_ioctl.h26
-rw-r--r--drivers/staging/rtl8723au/include/rtw_ioctl_set.h39
-rw-r--r--drivers/staging/rtl8723au/include/rtw_led.h181
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme.h624
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme_ext.h780
-rw-r--r--drivers/staging/rtl8723au/include/rtw_p2p.h158
-rw-r--r--drivers/staging/rtl8723au/include/rtw_pwrctrl.h265
-rw-r--r--drivers/staging/rtl8723au/include/rtw_qos.h26
-rw-r--r--drivers/staging/rtl8723au/include/rtw_recv.h318
-rw-r--r--drivers/staging/rtl8723au/include/rtw_rf.h113
-rw-r--r--drivers/staging/rtl8723au/include/rtw_security.h357
-rw-r--r--drivers/staging/rtl8723au/include/rtw_sreset.h56
-rw-r--r--drivers/staging/rtl8723au/include/rtw_version.h1
-rw-r--r--drivers/staging/rtl8723au/include/rtw_xmit.h407
-rw-r--r--drivers/staging/rtl8723au/include/sta_info.h385
-rw-r--r--drivers/staging/rtl8723au/include/usb_hal.h20
-rw-r--r--drivers/staging/rtl8723au/include/usb_ops.h97
-rw-r--r--drivers/staging/rtl8723au/include/usb_ops_linux.h46
-rw-r--r--drivers/staging/rtl8723au/include/usb_osintf.h24
-rw-r--r--drivers/staging/rtl8723au/include/usb_vendor_req.h31
-rw-r--r--drivers/staging/rtl8723au/include/wifi.h707
-rw-r--r--drivers/staging/rtl8723au/include/wlan_bssdef.h215
-rw-r--r--drivers/staging/rtl8723au/include/xmit_osdep.h57
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c4532
-rw-r--r--drivers/staging/rtl8723au/os_dep/mlme_linux.c187
-rw-r--r--drivers/staging/rtl8723au/os_dep/os_intfs.c970
-rw-r--r--drivers/staging/rtl8723au/os_dep/osdep_service.c175
-rw-r--r--drivers/staging/rtl8723au/os_dep/recv_linux.c225
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c833
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_ops_linux.c283
-rw-r--r--drivers/staging/rtl8723au/os_dep/xmit_linux.c195
-rw-r--r--drivers/staging/rtl8821ae/base.c199
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c2
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c2
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c2
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c2
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c2
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c2
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c2
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c12
-rw-r--r--drivers/staging/rtl8821ae/core.c12
-rw-r--r--drivers/staging/rtl8821ae/core.h8
-rw-r--r--drivers/staging/rtl8821ae/debug.c10
-rw-r--r--drivers/staging/rtl8821ae/debug.h12
-rw-r--r--drivers/staging/rtl8821ae/efuse.c26
-rw-r--r--drivers/staging/rtl8821ae/pci.c274
-rw-r--r--drivers/staging/rtl8821ae/pci.h8
-rw-r--r--drivers/staging/rtl8821ae/ps.c2
-rw-r--r--drivers/staging/rtl8821ae/rc.c1
-rw-r--r--drivers/staging/rtl8821ae/regd.c4
-rw-r--r--drivers/staging/rtl8821ae/regd.h4
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/dm.c24
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/fw.c8
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h2
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c6
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hw.c13
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/phy.c8
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/phy.h2
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h10
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c2
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/reg.h28
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/sw.c4
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/trx.c12
-rw-r--r--drivers/staging/rtl8821ae/wifi.h486
-rw-r--r--drivers/staging/rts5139/ms.c21
-rw-r--r--drivers/staging/rts5139/ms_mg.c1
-rw-r--r--drivers/staging/rts5139/rts51x.c6
-rw-r--r--drivers/staging/rts5139/rts51x_fop.c10
-rw-r--r--drivers/staging/rts5139/rts51x_fop.h4
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.c4
-rw-r--r--drivers/staging/rts5139/rts51x_transport.c4
-rw-r--r--drivers/staging/rts5139/sd.c4
-rw-r--r--drivers/staging/rts5139/sd_cprm.c1
-rw-r--r--drivers/staging/rts5139/xd.c18
-rw-r--r--drivers/staging/rts5208/ms.c2
-rw-r--r--drivers/staging/rts5208/rtsx.c6
-rw-r--r--drivers/staging/rts5208/rtsx_card.c71
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c8
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c4
-rw-r--r--drivers/staging/sb105x/Kconfig9
-rw-r--r--drivers/staging/sb105x/Makefile3
-rw-r--r--drivers/staging/sb105x/sb_mp_register.h295
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c3189
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.h291
-rw-r--r--drivers/staging/sb105x/sb_ser_core.h368
-rw-r--r--drivers/staging/sbe-2t3e3/2t3e3.h21
-rw-r--r--drivers/staging/sbe-2t3e3/ctrl.c17
-rw-r--r--drivers/staging/sbe-2t3e3/ctrl.h16
-rw-r--r--drivers/staging/sbe-2t3e3/dc.c7
-rw-r--r--drivers/staging/sbe-2t3e3/intr.c4
-rw-r--r--drivers/staging/sbe-2t3e3/maps.c9
-rw-r--r--drivers/staging/sbe-2t3e3/module.c2
-rw-r--r--drivers/staging/sbe-2t3e3/netdev.c6
-rw-r--r--drivers/staging/sep/sep_crypto.c2
-rw-r--r--drivers/staging/sep/sep_main.c94
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c18
-rw-r--r--drivers/staging/silicom/bp_mod.h25
-rw-r--r--drivers/staging/silicom/bpctl_mod.c629
-rw-r--r--drivers/staging/silicom/bypasslib/bp_ioctl.h8
-rw-r--r--drivers/staging/silicom/bypasslib/libbp_sd.h313
-rw-r--r--drivers/staging/slicoss/README40
-rw-r--r--drivers/staging/slicoss/TODO38
-rw-r--r--drivers/staging/slicoss/slic.h9
-rw-r--r--drivers/staging/slicoss/slicoss.c73
-rw-r--r--drivers/staging/sm7xxfb/Kconfig13
-rw-r--r--drivers/staging/sm7xxfb/Makefile1
-rw-r--r--drivers/staging/sm7xxfb/TODO9
-rw-r--r--drivers/staging/sm7xxfb/sm7xx.h779
-rw-r--r--drivers/staging/sm7xxfb/sm7xxfb.c1028
-rw-r--r--drivers/staging/speakup/kobjects.c62
-rw-r--r--drivers/staging/speakup/serialio.c4
-rw-r--r--drivers/staging/speakup/speakup.h2
-rw-r--r--drivers/staging/speakup/speakup_acntpc.c22
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c22
-rw-r--r--drivers/staging/speakup/speakup_apollo.c24
-rw-r--r--drivers/staging/speakup/speakup_audptr.c24
-rw-r--r--drivers/staging/speakup/speakup_bns.c22
-rw-r--r--drivers/staging/speakup/speakup_decext.c24
-rw-r--r--drivers/staging/speakup/speakup_decpc.c24
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c30
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c28
-rw-r--r--drivers/staging/speakup/speakup_dummy.c22
-rw-r--r--drivers/staging/speakup/speakup_keypc.c18
-rw-r--r--drivers/staging/speakup/speakup_ltlk.c28
-rw-r--r--drivers/staging/speakup/speakup_soft.c30
-rw-r--r--drivers/staging/speakup/speakup_spkout.c24
-rw-r--r--drivers/staging/speakup/speakup_txprt.c22
-rw-r--r--drivers/staging/tidspbridge/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c2
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c23
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c15
-rw-r--r--drivers/staging/tidspbridge/dynload/tramp.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c4
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/mgr.c8
-rw-r--r--drivers/staging/tidspbridge/rmgr/nldr.c5
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c20
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt174
-rw-r--r--drivers/staging/unisys/Documentation/proc-entries.txt93
-rw-r--r--drivers/staging/unisys/Kconfig20
-rw-r--r--drivers/staging/unisys/MAINTAINERS6
-rw-r--r--drivers/staging/unisys/Makefile10
-rw-r--r--drivers/staging/unisys/TODO21
-rw-r--r--drivers/staging/unisys/channels/Kconfig10
-rw-r--r--drivers/staging/unisys/channels/Makefile13
-rw-r--r--drivers/staging/unisys/channels/channel.c219
-rw-r--r--drivers/staging/unisys/channels/chanstub.c70
-rw-r--r--drivers/staging/unisys/channels/chanstub.h23
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/channel.h661
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/channel_guid.h64
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/controlframework.h77
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h619
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/diagchannel.h427
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/iochannel.h933
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/vbuschannel.h135
-rw-r--r--drivers/staging/unisys/common-spar/include/controlvmcompletionstatus.h92
-rw-r--r--drivers/staging/unisys/common-spar/include/diagnostics/appos_subsystems.h310
-rw-r--r--drivers/staging/unisys/common-spar/include/iovmcall_gnuc.h53
-rw-r--r--drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h209
-rw-r--r--drivers/staging/unisys/common-spar/include/version.h46
-rw-r--r--drivers/staging/unisys/common-spar/include/vmcallinterface.h167
-rw-r--r--drivers/staging/unisys/include/commontypes.h170
-rw-r--r--drivers/staging/unisys/include/guestlinuxdebug.h182
-rw-r--r--drivers/staging/unisys/include/guidutils.h203
-rw-r--r--drivers/staging/unisys/include/periodic_work.h40
-rw-r--r--drivers/staging/unisys/include/procobjecttree.h48
-rw-r--r--drivers/staging/unisys/include/sparstop.h30
-rw-r--r--drivers/staging/unisys/include/timskmod.h324
-rw-r--r--drivers/staging/unisys/include/timskmodutils.h75
-rw-r--r--drivers/staging/unisys/include/uisqueue.h440
-rw-r--r--drivers/staging/unisys/include/uisthread.h46
-rw-r--r--drivers/staging/unisys/include/uisutils.h348
-rw-r--r--drivers/staging/unisys/include/uniklog.h193
-rw-r--r--drivers/staging/unisys/include/vbushelper.h47
-rw-r--r--drivers/staging/unisys/uislib/Kconfig10
-rw-r--r--drivers/staging/unisys/uislib/Makefile17
-rw-r--r--drivers/staging/unisys/uislib/uislib.c2421
-rw-r--r--drivers/staging/unisys/uislib/uisqueue.c160
-rw-r--r--drivers/staging/unisys/uislib/uisthread.c85
-rw-r--r--drivers/staging/unisys/uislib/uisutils.c350
-rw-r--r--drivers/staging/unisys/virthba/Kconfig10
-rw-r--r--drivers/staging/unisys/virthba/Makefile16
-rw-r--r--drivers/staging/unisys/virthba/virthba.c1823
-rw-r--r--drivers/staging/unisys/virthba/virthba.h31
-rw-r--r--drivers/staging/unisys/virtpci/Kconfig10
-rw-r--r--drivers/staging/unisys/virtpci/Makefile13
-rw-r--r--drivers/staging/unisys/virtpci/virtpci.c1771
-rw-r--r--drivers/staging/unisys/virtpci/virtpci.h104
-rw-r--r--drivers/staging/unisys/visorchannel/Kconfig10
-rw-r--r--drivers/staging/unisys/visorchannel/Makefile14
-rw-r--r--drivers/staging/unisys/visorchannel/globals.h29
-rw-r--r--drivers/staging/unisys/visorchannel/visorchannel.h106
-rw-r--r--drivers/staging/unisys/visorchannel/visorchannel_funcs.c674
-rw-r--r--drivers/staging/unisys/visorchannel/visorchannel_main.c49
-rw-r--r--drivers/staging/unisys/visorchipset/Kconfig10
-rw-r--r--drivers/staging/unisys/visorchipset/Makefile18
-rw-r--r--drivers/staging/unisys/visorchipset/controlvm.h27
-rw-r--r--drivers/staging/unisys/visorchipset/controlvm_direct.c62
-rw-r--r--drivers/staging/unisys/visorchipset/file.c226
-rw-r--r--drivers/staging/unisys/visorchipset/file.h26
-rw-r--r--drivers/staging/unisys/visorchipset/filexfer.c506
-rw-r--r--drivers/staging/unisys/visorchipset/filexfer.h147
-rw-r--r--drivers/staging/unisys/visorchipset/globals.h45
-rw-r--r--drivers/staging/unisys/visorchipset/parser.c474
-rw-r--r--drivers/staging/unisys/visorchipset/parser.h45
-rw-r--r--drivers/staging/unisys/visorchipset/testing.h41
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset.h307
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset_main.c2963
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset_umode.h37
-rw-r--r--drivers/staging/unisys/visorutil/Kconfig10
-rw-r--r--drivers/staging/unisys/visorutil/Makefile11
-rw-r--r--drivers/staging/unisys/visorutil/charqueue.c141
-rw-r--r--drivers/staging/unisys/visorutil/charqueue.h37
-rw-r--r--drivers/staging/unisys/visorutil/easyproc.c371
-rw-r--r--drivers/staging/unisys/visorutil/easyproc.h92
-rw-r--r--drivers/staging/unisys/visorutil/memregion.h43
-rw-r--r--drivers/staging/unisys/visorutil/memregion_direct.c223
-rw-r--r--drivers/staging/unisys/visorutil/periodic_work.c236
-rw-r--r--drivers/staging/unisys/visorutil/procobjecttree.c348
-rw-r--r--drivers/staging/unisys/visorutil/visorkmodutils.c71
-rw-r--r--drivers/staging/usbip/Kconfig4
-rw-r--r--drivers/staging/usbip/stub.h3
-rw-r--r--drivers/staging/usbip/stub_dev.c164
-rw-r--r--drivers/staging/usbip/stub_main.c45
-rw-r--r--drivers/staging/usbip/stub_rx.c28
-rw-r--r--drivers/staging/usbip/stub_tx.c16
-rw-r--r--drivers/staging/usbip/uapi/usbip.h26
-rw-r--r--drivers/staging/usbip/usbip_common.c35
-rw-r--r--drivers/staging/usbip/usbip_common.h19
-rw-r--r--drivers/staging/usbip/userspace/README8
-rw-r--r--drivers/staging/usbip/userspace/configure.ac12
-rw-r--r--drivers/staging/usbip/userspace/libsrc/Makefile.am3
-rw-r--r--drivers/staging/usbip/userspace/libsrc/list.h136
-rw-r--r--drivers/staging/usbip/userspace/libsrc/sysfs_utils.c31
-rw-r--r--drivers/staging/usbip/userspace/libsrc/sysfs_utils.h8
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_common.c93
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_common.h41
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c303
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h7
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.c397
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.h14
-rw-r--r--drivers/staging/usbip/userspace/src/Makefile.am1
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_attach.c1
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_bind.c195
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_detach.c2
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_list.c146
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_network.h1
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_unbind.c129
-rw-r--r--drivers/staging/usbip/userspace/src/usbipd.c21
-rw-r--r--drivers/staging/usbip/userspace/src/utils.c48
-rw-r--r--drivers/staging/usbip/vhci_hcd.c44
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c15
-rw-r--r--drivers/staging/vt6655/80211mgr.c331
-rw-r--r--drivers/staging/vt6655/aes_ccmp.c36
-rw-r--r--drivers/staging/vt6655/desc.h15
-rw-r--r--drivers/staging/vt6655/dpc.c36
-rw-r--r--drivers/staging/vt6655/key.c8
-rw-r--r--drivers/staging/vt6655/mac.c10
-rw-r--r--drivers/staging/vt6655/mac.h2
-rw-r--r--drivers/staging/vt6655/michael.c14
-rw-r--r--drivers/staging/vt6655/michael.h6
-rw-r--r--drivers/staging/vt6655/rxtx.c62
-rw-r--r--drivers/staging/vt6655/wmgr.c30
-rw-r--r--drivers/staging/vt6656/80211hdr.h2
-rw-r--r--drivers/staging/vt6656/aes_ccmp.c6
-rw-r--r--drivers/staging/vt6656/card.h7
-rw-r--r--drivers/staging/vt6656/device.h85
-rw-r--r--drivers/staging/vt6656/dpc.c92
-rw-r--r--drivers/staging/vt6656/int.c150
-rw-r--r--drivers/staging/vt6656/int.h45
-rw-r--r--drivers/staging/vt6656/iwctl.c10
-rw-r--r--drivers/staging/vt6656/mac.c16
-rw-r--r--drivers/staging/vt6656/mac.h2
-rw-r--r--drivers/staging/vt6656/main_usb.c350
-rw-r--r--drivers/staging/vt6656/power.c50
-rw-r--r--drivers/staging/vt6656/rxtx.c185
-rw-r--r--drivers/staging/vt6656/rxtx.h92
-rw-r--r--drivers/staging/vt6656/usbpipe.c451
-rw-r--r--drivers/staging/vt6656/wcmd.c77
-rw-r--r--drivers/staging/vt6656/wcmd.h4
-rw-r--r--drivers/staging/vt6656/wmgr.c8
-rw-r--r--drivers/staging/winbond/localpara.h84
-rw-r--r--drivers/staging/winbond/wb35reg.c140
-rw-r--r--drivers/staging/winbond/wb35rx.c27
-rw-r--r--drivers/staging/wlags49_h2/dhf.c10
-rw-r--r--drivers/staging/wlags49_h2/hcf.c16
-rw-r--r--drivers/staging/wlags49_h2/sta_h2.c2
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c311
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c1992
-rw-r--r--drivers/staging/wlags49_h2/wl_util.c37
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c6
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c11
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h4
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h172
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c2
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c100
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c15
-rw-r--r--drivers/staging/xillybus/Kconfig3
-rw-r--r--drivers/staging/xillybus/xillybus_core.c6
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/imx_thermal.c39
-rw-r--r--drivers/thermal/rcar_thermal.c9
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c6
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c14
-rw-r--r--drivers/tty/hvc/hvc_console.c6
-rw-r--r--drivers/tty/hvc/hvc_opal.c22
-rw-r--r--drivers/tty/ipwireless/tty.c3
-rw-r--r--drivers/tty/n_tty.c11
-rw-r--r--drivers/tty/serial/8250/8250_core.c19
-rw-r--r--drivers/tty/serial/8250/8250_pci.c43
-rw-r--r--drivers/tty/serial/Kconfig6
-rw-r--r--drivers/tty/serial/amba-pl011.c21
-rw-r--r--drivers/tty/serial/atmel_serial.c77
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c16
-rw-r--r--drivers/tty/serial/clps711x.c21
-rw-r--r--drivers/tty/serial/crisv10.c112
-rw-r--r--drivers/tty/serial/efm32-uart.c5
-rw-r--r--drivers/tty/serial/fsl_lpuart.c430
-rw-r--r--drivers/tty/serial/imx.c82
-rw-r--r--drivers/tty/serial/max310x.c417
-rw-r--r--drivers/tty/serial/msm_serial.c140
-rw-r--r--drivers/tty/serial/msm_serial.h9
-rw-r--r--drivers/tty/serial/omap-serial.c11
-rw-r--r--drivers/tty/serial/pch_uart.c2
-rw-r--r--drivers/tty/serial/samsung.c40
-rw-r--r--drivers/tty/serial/serial_core.c20
-rw-r--r--drivers/tty/serial/sh-sci.c89
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c195
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h5
-rw-r--r--drivers/tty/synclink.c1
-rw-r--r--drivers/tty/synclinkmp.c1
-rw-r--r--drivers/tty/tty_audit.c3
-rw-r--r--drivers/tty/tty_buffer.c20
-rw-r--r--drivers/tty/tty_io.c23
-rw-r--r--drivers/tty/tty_ldsem.c15
-rw-r--r--drivers/tty/vt/vt.c20
-rw-r--r--drivers/usb/Kconfig10
-rw-r--r--drivers/usb/chipidea/Makefile1
-rw-r--r--drivers/usb/chipidea/bits.h2
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_zevio.c72
-rw-r--r--drivers/usb/chipidea/core.c87
-rw-r--r--drivers/usb/chipidea/udc.c324
-rw-r--r--drivers/usb/core/Kconfig1
-rw-r--r--drivers/usb/core/config.c1
-rw-r--r--drivers/usb/core/devio.c286
-rw-r--r--drivers/usb/core/driver.c121
-rw-r--r--drivers/usb/core/generic.c1
-rw-r--r--drivers/usb/core/hcd.c37
-rw-r--r--drivers/usb/core/hub.c115
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/message.c10
-rw-r--r--drivers/usb/core/urb.c2
-rw-r--r--drivers/usb/core/usb.h10
-rw-r--r--drivers/usb/dwc2/core_intr.c25
-rw-r--r--drivers/usb/dwc2/hcd_intr.c14
-rw-r--r--drivers/usb/dwc3/core.c251
-rw-r--r--drivers/usb/dwc3/core.h105
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c8
-rw-r--r--drivers/usb/dwc3/gadget.c183
-rw-r--r--drivers/usb/dwc3/gadget.h12
-rw-r--r--drivers/usb/gadget/Kconfig3
-rw-r--r--drivers/usb/gadget/at91_udc.c14
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c16
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.h2
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/f_fs.c616
-rw-r--r--drivers/usb/gadget/f_midi.c7
-rw-r--r--drivers/usb/gadget/f_subset.c2
-rw-r--r--drivers/usb/gadget/f_uac2.c4
-rw-r--r--drivers/usb/gadget/gr_udc.c10
-rw-r--r--drivers/usb/gadget/inode.c9
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c5
-rw-r--r--drivers/usb/gadget/printer.c8
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c143
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c1
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c2
-rw-r--r--drivers/usb/gadget/u_ether.c101
-rw-r--r--drivers/usb/gadget/u_fs.h30
-rw-r--r--drivers/usb/gadget/u_serial.c4
-rw-r--r--drivers/usb/host/Kconfig4
-rw-r--r--drivers/usb/host/ehci-platform.c182
-rw-r--r--drivers/usb/host/ehci-tegra.c4
-rw-r--r--drivers/usb/host/hwa-hc.c42
-rw-r--r--drivers/usb/host/ohci-platform.c199
-rw-r--r--drivers/usb/host/uhci-platform.c1
-rw-r--r--drivers/usb/host/xhci-hub.c8
-rw-r--r--drivers/usb/host/xhci-mem.c212
-rw-r--r--drivers/usb/host/xhci-pci.c18
-rw-r--r--drivers/usb/host/xhci-plat.c4
-rw-r--r--drivers/usb/host/xhci-ring.c146
-rw-r--r--drivers/usb/host/xhci.c33
-rw-r--r--drivers/usb/host/xhci.h5
-rw-r--r--drivers/usb/misc/Kconfig1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c10
-rw-r--r--drivers/usb/misc/usbled.c34
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/musb_core.c5
-rw-r--r--drivers/usb/musb/musb_cppi41.c69
-rw-r--r--drivers/usb/musb/musb_dsps.c58
-rw-r--r--drivers/usb/musb/musb_host.c30
-rw-r--r--drivers/usb/musb/omap2430.c2
-rw-r--r--drivers/usb/phy/Kconfig21
-rw-r--r--drivers/usb/phy/Makefile2
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c9
-rw-r--r--drivers/usb/phy/phy-msm-usb.c3
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c310
-rw-r--r--drivers/usb/phy/phy-omap-usb3.c361
-rw-r--r--drivers/usb/phy/phy-rcar-gen2-usb.c6
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c2
-rw-r--r--drivers/usb/phy/phy-ulpi.c2
-rw-r--r--drivers/usb/serial/ch341.c6
-rw-r--r--drivers/usb/serial/cyberjack.c2
-rw-r--r--drivers/usb/serial/cypress_m8.c21
-rw-r--r--drivers/usb/serial/generic.c61
-rw-r--r--drivers/usb/serial/iuu_phoenix.c2
-rw-r--r--drivers/usb/serial/keyspan.c30
-rw-r--r--drivers/usb/serial/keyspan_pda.c2
-rw-r--r--drivers/usb/serial/kl5kusb105.c4
-rw-r--r--drivers/usb/serial/kobil_sct.c3
-rw-r--r--drivers/usb/serial/mos7720.c12
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/quatech2.c2
-rw-r--r--drivers/usb/serial/spcp8x5.c7
-rw-r--r--drivers/usb/serial/symbolserial.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c4
-rw-r--r--drivers/usb/serial/usb-serial.c17
-rw-r--r--drivers/usb/storage/Kconfig2
-rw-r--r--drivers/usb/storage/uas-detect.h96
-rw-r--r--drivers/usb/storage/uas.c687
-rw-r--r--drivers/usb/storage/unusual_devs.h5
-rw-r--r--drivers/usb/storage/unusual_uas.h52
-rw-r--r--drivers/usb/storage/usb.c26
-rw-r--r--drivers/usb/storage/usb.h3
-rw-r--r--drivers/usb/wusbcore/devconnect.c4
-rw-r--r--drivers/usb/wusbcore/wa-hc.c2
-rw-r--r--drivers/usb/wusbcore/wa-hc.h26
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c2
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c583
-rw-r--r--drivers/vfio/Kconfig1
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c12
-rw-r--r--drivers/vfio/vfio.c6
-rw-r--r--drivers/vfio/vfio_iommu_type1.c656
-rw-r--r--drivers/video/Kconfig19
-rw-r--r--drivers/video/Makefile3
-rw-r--r--drivers/video/atmel_lcdfb.c8
-rw-r--r--drivers/video/aty/atyfb_base.c7
-rw-r--r--drivers/video/aty/mach64_accel.c3
-rw-r--r--drivers/video/aty/mach64_cursor.c22
-rw-r--r--drivers/video/backlight/aat2870_bl.c2
-rw-r--r--drivers/video/backlight/adp8860_bl.c4
-rw-r--r--drivers/video/backlight/adp8870_bl.c4
-rw-r--r--drivers/video/backlight/backlight.c30
-rw-r--r--drivers/video/backlight/corgi_lcd.c4
-rw-r--r--drivers/video/backlight/gpio_backlight.c58
-rw-r--r--drivers/video/backlight/hx8357.c4
-rw-r--r--drivers/video/backlight/ili922x.c4
-rw-r--r--drivers/video/backlight/ili9320.c4
-rw-r--r--drivers/video/backlight/l4f00242t03.c5
-rw-r--r--drivers/video/backlight/lm3533_bl.c5
-rw-r--r--drivers/video/backlight/lm3639_bl.c17
-rw-r--r--drivers/video/backlight/lms283gf05.c4
-rw-r--r--drivers/video/backlight/platform_lcd.c4
-rw-r--r--drivers/video/backlight/tps65217_bl.c5
-rw-r--r--drivers/video/cfbcopyarea.c153
-rw-r--r--drivers/video/console/fbcon.c27
-rw-r--r--drivers/video/da8xx-fb.c22
-rw-r--r--drivers/video/efifb.c13
-rw-r--r--drivers/video/exynos/Kconfig7
-rw-r--r--drivers/video/exynos/Makefile1
-rw-r--r--drivers/video/exynos/s6e8ax0.c13
-rw-r--r--drivers/video/fbmem.c3
-rw-r--r--drivers/video/gbefb.c4
-rw-r--r--drivers/video/hyperv_fb.c88
-rw-r--r--drivers/video/imxfb.c380
-rw-r--r--drivers/video/logo/Kconfig2
-rw-r--r--drivers/video/logo/logo.c2
-rw-r--r--drivers/video/matrox/matroxfb_accel.c38
-rw-r--r--drivers/video/matrox/matroxfb_base.c3
-rw-r--r--drivers/video/matrox/matroxfb_base.h2
-rw-r--r--drivers/video/omap2/displays-new/connector-analog-tv.c45
-rw-r--r--drivers/video/omap2/displays-new/connector-dvi.c45
-rw-r--r--drivers/video/omap2/displays-new/connector-hdmi.c32
-rw-r--r--drivers/video/omap2/displays-new/encoder-tfp410.c43
-rw-r--r--drivers/video/omap2/displays-new/encoder-tpd12s015.c56
-rw-r--r--drivers/video/omap2/displays-new/panel-dsi-cm.c62
-rw-r--r--drivers/video/omap2/displays-new/panel-lgphilips-lb035q02.c2
-rw-r--r--drivers/video/omap2/displays-new/panel-nec-nl8048hl11.c4
-rw-r--r--drivers/video/omap2/displays-new/panel-sharp-ls037v7dw01.c2
-rw-r--r--drivers/video/omap2/displays-new/panel-sony-acx565akm.c35
-rw-r--r--drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c2
-rw-r--r--drivers/video/omap2/displays-new/panel-tpo-td043mtea1.c2
-rw-r--r--drivers/video/omap2/dss/Makefile2
-rw-r--r--drivers/video/omap2/dss/dispc.c40
-rw-r--r--drivers/video/omap2/dss/display-sysfs.c4
-rw-r--r--drivers/video/omap2/dss/display.c32
-rw-r--r--drivers/video/omap2/dss/dpi.c72
-rw-r--r--drivers/video/omap2/dss/dsi.c163
-rw-r--r--drivers/video/omap2/dss/dss-of.c159
-rw-r--r--drivers/video/omap2/dss/dss.c82
-rw-r--r--drivers/video/omap2/dss/dss.h8
-rw-r--r--drivers/video/omap2/dss/hdmi4.c23
-rw-r--r--drivers/video/omap2/dss/hdmi_common.c74
-rw-r--r--drivers/video/omap2/dss/hdmi_wp.c2
-rw-r--r--drivers/video/omap2/dss/sdi.c60
-rw-r--r--drivers/video/omap2/dss/venc.c70
-rw-r--r--drivers/video/omap2/dss/venc_panel.c2
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c75
-rw-r--r--drivers/video/output.c133
-rw-r--r--drivers/video/pxa3xx-gcu.c191
-rw-r--r--drivers/video/sgivwfb.c889
-rw-r--r--drivers/video/sis/init.c1
-rw-r--r--drivers/video/tgafb.c288
-rw-r--r--drivers/video/uvesafb.c19
-rw-r--r--drivers/video/vesafb.c13
-rw-r--r--drivers/video/xilinxfb.c15
-rw-r--r--drivers/virtio/virtio_balloon.c14
-rw-r--r--drivers/virtio/virtio_pci.c6
-rw-r--r--drivers/virtio/virtio_ring.c12
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c29
-rw-r--r--drivers/vme/bridges/vme_tsi148.c18
-rw-r--r--drivers/w1/masters/Kconfig3
-rw-r--r--drivers/w1/masters/ds2490.c155
-rw-r--r--drivers/w1/masters/mxc_w1.c43
-rw-r--r--drivers/w1/masters/w1-gpio.c19
-rw-r--r--drivers/w1/slaves/Kconfig5
-rw-r--r--drivers/w1/slaves/w1_therm.c21
-rw-r--r--drivers/w1/w1.c269
-rw-r--r--drivers/w1/w1.h186
-rw-r--r--drivers/w1/w1_family.c8
-rw-r--r--drivers/w1/w1_family.h13
-rw-r--r--drivers/w1/w1_int.c25
-rw-r--r--drivers/w1/w1_io.c102
-rw-r--r--drivers/w1/w1_netlink.c359
-rw-r--r--drivers/w1/w1_netlink.h33
-rw-r--r--drivers/watchdog/Kconfig38
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/acquirewdt.c21
-rw-r--r--drivers/watchdog/advantechwdt.c21
-rw-r--r--drivers/watchdog/ar7_wdt.c1
-rw-r--r--drivers/watchdog/at32ap700x_wdt.c4
-rw-r--r--drivers/watchdog/ath79_wdt.c1
-rw-r--r--drivers/watchdog/bcm2835_wdt.c4
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c1
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c2
-rw-r--r--drivers/watchdog/booke_wdt.c8
-rw-r--r--drivers/watchdog/cpu5wdt.c1
-rw-r--r--drivers/watchdog/cpwd.c1
-rw-r--r--drivers/watchdog/da9052_wdt.c1
-rw-r--r--drivers/watchdog/da9055_wdt.c4
-rw-r--r--drivers/watchdog/davinci_wdt.c1
-rw-r--r--drivers/watchdog/ep93xx_wdt.c19
-rw-r--r--drivers/watchdog/geodewdt.c17
-rw-r--r--drivers/watchdog/hpwdt.c1
-rw-r--r--drivers/watchdog/i6300esb.c1
-rw-r--r--drivers/watchdog/iTCO_wdt.c145
-rw-r--r--drivers/watchdog/ib700wdt.c21
-rw-r--r--drivers/watchdog/ibmasr.c2
-rw-r--r--drivers/watchdog/indydog.c13
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c4
-rw-r--r--drivers/watchdog/it87_wdt.c41
-rw-r--r--drivers/watchdog/jz4740_wdt.c1
-rw-r--r--drivers/watchdog/max63xx_wdt.c1
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c1
-rw-r--r--drivers/watchdog/mtx-1_wdt.c1
-rw-r--r--drivers/watchdog/nuc900_wdt.c1
-rw-r--r--drivers/watchdog/octeon-wdt-main.c11
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c390
-rw-r--r--drivers/watchdog/omap_wdt.c23
-rw-r--r--drivers/watchdog/orion_wdt.c382
-rw-r--r--drivers/watchdog/pc87413_wdt.c7
-rw-r--r--drivers/watchdog/pcwd_usb.c4
-rw-r--r--drivers/watchdog/pnx4008_wdt.c1
-rw-r--r--drivers/watchdog/rdc321x_wdt.c1
-rw-r--r--drivers/watchdog/retu_wdt.c1
-rw-r--r--drivers/watchdog/riowd.c1
-rw-r--r--drivers/watchdog/s3c2410_wdt.c9
-rw-r--r--drivers/watchdog/sc520_wdt.c3
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/watchdog/softdog.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c19
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c1
-rw-r--r--drivers/watchdog/sunxi_wdt.c2
-rw-r--r--drivers/watchdog/tegra_wdt.c302
-rw-r--r--drivers/watchdog/ts72xx_wdt.c6
-rw-r--r--drivers/watchdog/w83697hf_wdt.c4
-rw-r--r--drivers/watchdog/wdt285.c3
-rw-r--r--drivers/watchdog/wdt_pci.c1
-rw-r--r--drivers/watchdog/wm831x_wdt.c1
-rw-r--r--drivers/xen/balloon.c36
-rw-r--r--drivers/xen/events/events_2l.c15
-rw-r--r--drivers/xen/events/events_base.c108
-rw-r--r--drivers/xen/events/events_fifo.c8
-rw-r--r--drivers/xen/events/events_internal.h1
-rw-r--r--drivers/xen/grant-table.c73
-rw-r--r--drivers/xen/manage.c16
-rw-r--r--drivers/xen/pcpu.c1
-rw-r--r--drivers/xen/platform-pci.c2
-rw-r--r--drivers/xen/xen-acpi-cpuhotplug.c2
-rw-r--r--drivers/xen/xen-acpi-memhotplug.c2
-rw-r--r--drivers/xen/xen-acpi-pad.c26
-rw-r--r--drivers/xen/xen-acpi-processor.c15
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c3
-rw-r--r--drivers/xen/xen-selfballoon.c1
-rw-r--r--drivers/xen/xenbus/xenbus_client.c27
4776 files changed, 415435 insertions, 136810 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index b3138fbb46a4..0a0a90f52d26 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -52,6 +52,8 @@ source "drivers/i2c/Kconfig"
source "drivers/spi/Kconfig"
+source "drivers/spmi/Kconfig"
+
source "drivers/hsi/Kconfig"
source "drivers/pps/Kconfig"
@@ -170,4 +172,6 @@ source "drivers/phy/Kconfig"
source "drivers/powercap/Kconfig"
+source "drivers/mcb/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 8e3b8b06c0b2..e3ced91b1784 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_TARGET_CORE) += target/
obj-$(CONFIG_MTD) += mtd/
obj-$(CONFIG_SPI) += spi/
+obj-$(CONFIG_SPMI) += spmi/
obj-y += hsi/
obj-y += net/
obj-$(CONFIG_ATM) += atm/
@@ -155,3 +156,4 @@ obj-$(CONFIG_IPACK_BUS) += ipack/
obj-$(CONFIG_NTB) += ntb/
obj-$(CONFIG_FMC) += fmc/
obj-$(CONFIG_POWERCAP) += powercap/
+obj-$(CONFIG_MCB) += mcb/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 4770de5707b9..ab686b310100 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -31,10 +31,14 @@ menuconfig ACPI
ACPI CA, see:
<http://acpica.org/>
- ACPI is an open industry specification co-developed by
- Hewlett-Packard, Intel, Microsoft, Phoenix, and Toshiba.
+ ACPI is an open industry specification originally co-developed by
+ Hewlett-Packard, Intel, Microsoft, Phoenix, and Toshiba. Currently,
+ it is developed by the ACPI Specification Working Group (ASWG) under
+ the UEFI Forum and any UEFI member can join the ASWG and contribute
+ to the ACPI specification.
The specification is available at:
<http://www.acpi.info>
+ <http://www.uefi.org/acpi/specs>
if ACPI
@@ -43,19 +47,6 @@ config ACPI_SLEEP
depends on SUSPEND || HIBERNATION
default y
-config ACPI_PROCFS
- bool "Deprecated /proc/acpi files"
- depends on PROC_FS
- help
- For backwards compatibility, this option allows
- deprecated /proc/acpi/ files to exist, even when
- they have been replaced by functions in /sys.
-
- This option has no effect on /proc/acpi/ files
- and functions which do not yet exist in /sys.
-
- Say N to delete /proc/acpi/ files that have moved to /sys/
-
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
default n
@@ -115,7 +106,7 @@ config ACPI_BUTTON
config ACPI_VIDEO
tristate "Video"
- depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL
+ depends on X86 && BACKLIGHT_CLASS_DEVICE
depends on INPUT
select THERMAL
help
@@ -343,6 +334,19 @@ config ACPI_BGRT
data from the firmware boot splash. It will appear under
/sys/firmware/acpi/bgrt/ .
+config ACPI_REDUCED_HARDWARE_ONLY
+ bool "Hardware-reduced ACPI support only" if EXPERT
+ def_bool n
+ depends on ACPI
+ help
+ This config item changes the way the ACPI code is built. When this
+ option is selected, the kernel will use a specialized version of
+ ACPICA that ONLY supports the ACPI "reduced hardware" mode. The
+ resulting kernel will be smaller but it will also be restricted to
+ running in ACPI reduced hardware mode ONLY.
+
+ If you are unsure what to do, do not enable this option.
+
source "drivers/acpi/apei/Kconfig"
config ACPI_EXTLOG
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 6f190bc2b8b7..2c01c1da29ce 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -33,6 +33,7 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/acpi.h>
+#include "battery.h"
#define PREFIX "ACPI: "
@@ -57,6 +58,7 @@ struct acpi_ac {
struct power_supply charger;
struct platform_device *pdev;
unsigned long long state;
+ struct notifier_block battery_nb;
};
#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
@@ -152,6 +154,26 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
return;
}
+static int acpi_ac_battery_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct acpi_ac *ac = container_of(nb, struct acpi_ac, battery_nb);
+ struct acpi_bus_event *event = (struct acpi_bus_event *)data;
+
+ /*
+ * On HP Pavilion dv6-6179er AC status notifications aren't triggered
+ * when adapter is plugged/unplugged. However, battery status
+ * notifcations are triggered when battery starts charging or
+ * discharging. Re-reading AC status triggers lost AC notifications,
+ * if AC status has changed.
+ */
+ if (strcmp(event->device_class, ACPI_BATTERY_CLASS) == 0 &&
+ event->type == ACPI_BATTERY_NOTIFY_STATUS)
+ acpi_ac_get_state(ac);
+
+ return NOTIFY_OK;
+}
+
static int thinkpad_e530_quirk(const struct dmi_system_id *d)
{
ac_sleep_before_get_state_ms = 1000;
@@ -215,6 +237,8 @@ static int acpi_ac_probe(struct platform_device *pdev)
acpi_device_name(adev), acpi_device_bid(adev),
ac->state ? "on-line" : "off-line");
+ ac->battery_nb.notifier_call = acpi_ac_battery_notify;
+ register_acpi_notifier(&ac->battery_nb);
end:
if (result)
kfree(ac);
@@ -261,6 +285,7 @@ static int acpi_ac_remove(struct platform_device *pdev)
ac = platform_get_drvdata(pdev);
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
+ unregister_acpi_notifier(&ac->battery_nb);
kfree(ac);
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c
index 84190ed89c04..961b45d18a5d 100644
--- a/drivers/acpi/acpi_cmos_rtc.c
+++ b/drivers/acpi/acpi_cmos_rtc.c
@@ -18,8 +18,6 @@
#include "internal.h"
-#define PREFIX "ACPI: "
-
ACPI_MODULE_NAME("cmos rtc");
static const struct acpi_device_id acpi_cmos_rtc_ids[] = {
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 6745fe137b9e..69e29f409d4c 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -33,6 +33,13 @@ ACPI_MODULE_NAME("acpi_lpss");
#define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
#define LPSS_SW_LTR 0x10
#define LPSS_AUTO_LTR 0x14
+#define LPSS_LTR_SNOOP_REQ BIT(15)
+#define LPSS_LTR_SNOOP_MASK 0x0000FFFF
+#define LPSS_LTR_SNOOP_LAT_1US 0x800
+#define LPSS_LTR_SNOOP_LAT_32US 0xC00
+#define LPSS_LTR_SNOOP_LAT_SHIFT 5
+#define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
+#define LPSS_LTR_MAX_VAL 0x3FF
#define LPSS_TX_INT 0x20
#define LPSS_TX_INT_MASK BIT(1)
@@ -102,6 +109,16 @@ static struct lpss_device_desc lpt_sdio_dev_desc = {
.ltr_required = true,
};
+static struct lpss_shared_clock pwm_clock = {
+ .name = "pwm_clk",
+ .rate = 25000000,
+};
+
+static struct lpss_device_desc byt_pwm_dev_desc = {
+ .clk_required = true,
+ .shared_clock = &pwm_clock,
+};
+
static struct lpss_shared_clock uart_clock = {
.name = "uart_clk",
.rate = 44236800,
@@ -157,6 +174,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT33C7", },
/* BayTrail LPSS devices */
+ { "80860F09", (unsigned long)&byt_pwm_dev_desc },
{ "80860F0A", (unsigned long)&byt_uart_dev_desc },
{ "80860F0E", (unsigned long)&byt_spi_dev_desc },
{ "80860F14", (unsigned long)&byt_sdio_dev_desc },
@@ -315,6 +333,17 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
return ret;
}
+static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
+{
+ return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+}
+
+static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
+ unsigned int reg)
+{
+ writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+}
+
static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
{
struct acpi_device *adev;
@@ -336,7 +365,7 @@ static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
ret = -ENODEV;
goto out;
}
- *val = readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+ *val = __lpss_reg_read(pdata, reg);
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -389,6 +418,37 @@ static struct attribute_group lpss_attr_group = {
.name = "lpss_ltr",
};
+static void acpi_lpss_set_ltr(struct device *dev, s32 val)
+{
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+ u32 ltr_mode, ltr_val;
+
+ ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
+ if (val < 0) {
+ if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
+ ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
+ __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
+ }
+ return;
+ }
+ ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
+ if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
+ ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
+ val = LPSS_LTR_MAX_VAL;
+ } else if (val > LPSS_LTR_MAX_VAL) {
+ ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
+ val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
+ } else {
+ ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
+ }
+ ltr_val |= val;
+ __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
+ if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
+ ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
+ __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
+ }
+}
+
static int acpi_lpss_platform_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -426,9 +486,29 @@ static struct notifier_block acpi_lpss_nb = {
.notifier_call = acpi_lpss_platform_notify,
};
+static void acpi_lpss_bind(struct device *dev)
+{
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+
+ if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required)
+ return;
+
+ if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
+ dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
+ else
+ dev_err(dev, "MMIO size insufficient to access LTR\n");
+}
+
+static void acpi_lpss_unbind(struct device *dev)
+{
+ dev->power.set_latency_tolerance = NULL;
+}
+
static struct acpi_scan_handler lpss_handler = {
.ids = acpi_lpss_device_ids,
.attach = acpi_lpss_create_device,
+ .bind = acpi_lpss_bind,
+ .unbind = acpi_lpss_unbind,
};
void __init acpi_lpss_init(void)
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index df96a0fe4890..37d73024b82e 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -408,28 +408,14 @@ static int acpi_pad_pur(acpi_handle handle)
return num;
}
-/* Notify firmware how many CPUs are idle */
-static void acpi_pad_ost(acpi_handle handle, int stat,
- uint32_t idle_cpus)
-{
- union acpi_object params[3] = {
- {.type = ACPI_TYPE_INTEGER,},
- {.type = ACPI_TYPE_INTEGER,},
- {.type = ACPI_TYPE_BUFFER,},
- };
- struct acpi_object_list arg_list = {3, params};
-
- params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
- params[1].integer.value = stat;
- params[2].buffer.length = 4;
- params[2].buffer.pointer = (void *)&idle_cpus;
- acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-}
-
static void acpi_pad_handle_notify(acpi_handle handle)
{
int num_cpus;
uint32_t idle_cpus;
+ struct acpi_buffer param = {
+ .length = 4,
+ .pointer = (void *)&idle_cpus,
+ };
mutex_lock(&isolated_cpus_lock);
num_cpus = acpi_pad_pur(handle);
@@ -439,7 +425,7 @@ static void acpi_pad_handle_notify(acpi_handle handle)
}
acpi_pad_idle_cpus(num_cpus);
idle_cpus = acpi_pad_idle_cpus_num();
- acpi_pad_ost(handle, 0, idle_cpus);
+ acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, &param);
mutex_unlock(&isolated_cpus_lock);
}
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 438304086ff1..b7ed86a20427 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -122,6 +122,8 @@ acpi-y += \
rsaddr.o \
rscalc.o \
rscreate.o \
+ rsdump.o \
+ rsdumpinfo.o \
rsinfo.o \
rsio.o \
rsirq.o \
@@ -132,8 +134,6 @@ acpi-y += \
rsutils.o \
rsxface.o
-acpi-$(ACPI_FUTURE_USAGE) += rsdump.o rsdumpinfo.o
-
acpi-y += \
tbfadt.o \
tbfind.o \
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 8a6c4a0d22db..6f1c616910ac 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 2bf3ca2b8a7a..68a91eb0fa48 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -115,6 +115,8 @@ ACPI_HW_DEPENDENT_RETURN_VOID(void
char *block_arg))
ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_generate_sci(void))
+void acpi_db_execute_test(char *type_arg);
+
/*
* dbconvert - miscellaneous conversion routines
*/
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 427db72a6302..d3e2cc395d7f 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -139,20 +139,21 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
struct acpi_walk_state *walk_state);
/*
- * dsload - Parser/Interpreter interface, pass 1 namespace load callbacks
+ * dsload - Parser/Interpreter interface
*/
acpi_status
acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number);
+/* dsload - pass 1 namespace load callbacks */
+
acpi_status
acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object **out_op);
acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state);
-/*
- * dsload - Parser/Interpreter interface, pass 2 namespace load callbacks
- */
+/* dsload - pass 2 namespace load callbacks */
+
acpi_status
acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object **out_op);
@@ -200,7 +201,9 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state);
/*
* dsmethod - Parser/Interpreter interface - control method parsing
*/
-acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node);
+acpi_status
+acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
+ union acpi_operand_object *obj_desc);
acpi_status
acpi_ds_call_control_method(struct acpi_thread_state *thread,
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 0fb0adf435d6..68ec61fff188 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 4ed1aa384df2..49bbc71fad54 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,11 +51,19 @@
* to simplify maintenance of the code.
*/
#ifdef DEFINE_ACPI_GLOBALS
-#define ACPI_EXTERN
-#define ACPI_INIT_GLOBAL(a,b) a=b
+#define ACPI_GLOBAL(type,name) \
+ extern type name; \
+ type name
+
+#define ACPI_INIT_GLOBAL(type,name,value) \
+ type name=value
+
#else
-#define ACPI_EXTERN extern
-#define ACPI_INIT_GLOBAL(a,b) a
+#define ACPI_GLOBAL(type,name) \
+ extern type name
+
+#define ACPI_INIT_GLOBAL(type,name,value) \
+ extern type name
#endif
#ifdef DEFINE_ACPI_GLOBALS
@@ -82,33 +90,34 @@
* 5) Allow unresolved references (invalid target name) in package objects
* 6) Enable warning messages for behavior that is not ACPI spec compliant
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_interpreter_slack, FALSE);
/*
- * Automatically serialize ALL control methods? Default is FALSE, meaning
- * to use the Serialized/not_serialized method flags on a per method basis.
- * Only change this if the ASL code is poorly written and cannot handle
- * reentrancy even though methods are marked "NotSerialized".
+ * Automatically serialize all methods that create named objects? Default
+ * is TRUE, meaning that all non_serialized methods are scanned once at
+ * table load time to determine those that create named objects. Methods
+ * that create named objects are marked Serialized in order to prevent
+ * possible run-time problems if they are entered by more than one thread.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_auto_serialize_methods, TRUE);
/*
* Create the predefined _OSI method in the namespace? Default is TRUE
* because ACPI CA is fully compatible with other ACPI implementations.
* Changing this will revert ACPI CA (and machine ASL) to pre-OSI behavior.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_create_osi_method, TRUE);
/*
* Optionally use default values for the ACPI register widths. Set this to
* TRUE to use the defaults, if an FADT contains incorrect widths/lengths.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use_default_register_widths, TRUE);
/*
* Optionally enable output from the AML Debug Object.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_aml_debug_object, FALSE);
/*
* Optionally copy the entire DSDT to local memory (instead of simply
@@ -116,7 +125,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
* DSDT, creating the need for this option. Default is FALSE, do not copy
* the DSDT.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE);
/*
* Optionally ignore an XSDT if present and use the RSDT instead.
@@ -124,7 +133,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
* of the RSDT, the XSDT has been found to be corrupt or ill-formed on
* some machines. Default behavior is to use the XSDT if present.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_do_not_use_xsdt, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
/*
* Optionally use 32-bit FADT addresses if and when there is a conflict
@@ -134,7 +143,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_do_not_use_xsdt, FALSE);
* some machines have been found to have a corrupted non-zero 64-bit
* address. Default is FALSE, do not favor the 32-bit addresses.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_use32_bit_fadt_addresses, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
/*
* Optionally truncate I/O addresses to 16 bits. Provides compatibility
@@ -142,47 +151,28 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_use32_bit_fadt_addresses, FALSE);
* this value is set to TRUE if any Windows OSI strings have been
* requested by the BIOS.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_truncate_io_addresses, FALSE);
/*
* Disable runtime checking and repair of values returned by control methods.
* Use only if the repair is causing a problem on a particular machine.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_auto_repair, FALSE);
/*
* Optionally do not load any SSDTs from the RSDT/XSDT during initialization.
* This can be useful for debugging ACPI problems on some machines.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_ssdt_table_load, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_ssdt_table_load, FALSE);
/*
* We keep track of the latest version of Windows that has been requested by
* the BIOS.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_osi_data, 0);
-
-/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
-
-struct acpi_table_fadt acpi_gbl_FADT;
-u32 acpi_current_gpe_count;
-u32 acpi_gbl_trace_flags;
-acpi_name acpi_gbl_trace_method_name;
-u8 acpi_gbl_system_awake_and_running;
-
-/*
- * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
- * that the ACPI hardware is no longer required. A flag in the FADT indicates
- * a reduced HW machine, and that flag is duplicated here for convenience.
- */
-u8 acpi_gbl_reduced_hardware;
+ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
#endif /* DEFINE_ACPI_GLOBALS */
-/* Do not disassemble buffers to resource descriptors */
-
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
-
/*****************************************************************************
*
* ACPI Table globals
@@ -190,37 +180,36 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
****************************************************************************/
/*
- * acpi_gbl_root_table_list is the master list of ACPI tables that were
- * found in the RSDT/XSDT.
+ * Master list of all ACPI tables that were found in the RSDT/XSDT.
*/
-ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
+ACPI_GLOBAL(struct acpi_table_list, acpi_gbl_root_table_list);
+
+/* DSDT information. Used to check for DSDT corruption */
+
+ACPI_GLOBAL(struct acpi_table_header *, acpi_gbl_DSDT);
+ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
#if (!ACPI_REDUCED_HARDWARE)
-ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
+ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
#endif /* !ACPI_REDUCED_HARDWARE */
/* These addresses are calculated from the FADT Event Block addresses */
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_status;
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
-
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_status;
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1a_status);
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1a_enable);
-/* DSDT information. Used to check for DSDT corruption */
-
-ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
-ACPI_EXTERN struct acpi_table_header acpi_gbl_original_dsdt_header;
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1b_status);
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1b_enable);
/*
- * Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
+ * Handle both ACPI 1.0 and ACPI 2.0+ Integer widths. The integer width is
* determined by the revision of the DSDT: If the DSDT revision is less than
* 2, use only the lower 32 bits of the internal 64-bit Integer.
*/
-ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
-ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
-ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
+ACPI_GLOBAL(u8, acpi_gbl_integer_bit_width);
+ACPI_GLOBAL(u8, acpi_gbl_integer_byte_width);
+ACPI_GLOBAL(u8, acpi_gbl_integer_nybble_width);
/*****************************************************************************
*
@@ -233,36 +222,36 @@ ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
* actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
* (The table maps local handles to the real OS handles)
*/
-ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
+ACPI_GLOBAL(struct acpi_mutex_info, acpi_gbl_mutex_info[ACPI_NUM_MUTEX]);
/*
* Global lock mutex is an actual AML mutex object
* Global lock semaphore works in conjunction with the actual global lock
* Global lock spinlock is used for "pending" handshake
*/
-ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
-ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
-ACPI_EXTERN acpi_spinlock acpi_gbl_global_lock_pending_lock;
-ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
-ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
-ACPI_EXTERN u8 acpi_gbl_global_lock_present;
-ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
+ACPI_GLOBAL(union acpi_operand_object *, acpi_gbl_global_lock_mutex);
+ACPI_GLOBAL(acpi_semaphore, acpi_gbl_global_lock_semaphore);
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_global_lock_pending_lock);
+ACPI_GLOBAL(u16, acpi_gbl_global_lock_handle);
+ACPI_GLOBAL(u8, acpi_gbl_global_lock_acquired);
+ACPI_GLOBAL(u8, acpi_gbl_global_lock_present);
+ACPI_GLOBAL(u8, acpi_gbl_global_lock_pending);
/*
* Spinlocks are used for interfaces that can be possibly called at
* interrupt level
*/
-ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
-ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock;
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
/* Mutex for _OSI support */
-ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
+ACPI_GLOBAL(acpi_mutex, acpi_gbl_osi_mutex);
/* Reader/Writer lock is used for namespace walk and dynamic table unload */
-ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
+ACPI_GLOBAL(struct acpi_rw_lock, acpi_gbl_namespace_rw_lock);
/*****************************************************************************
*
@@ -272,70 +261,69 @@ ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
/* Object caches */
-ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_state_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_ext_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_namespace_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_state_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_ps_node_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_ps_node_ext_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_operand_cache);
+
+/* System */
+
+ACPI_INIT_GLOBAL(u32, acpi_gbl_startup_flags, 0);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_shutdown, TRUE);
/* Global handlers */
-ACPI_EXTERN struct acpi_global_notify_handler acpi_gbl_global_notify[2];
-ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
-ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
-ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler;
-ACPI_EXTERN void *acpi_gbl_table_handler_context;
-ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
-ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
-ACPI_EXTERN struct acpi_sci_handler_info *acpi_gbl_sci_handler_list;
+ACPI_GLOBAL(struct acpi_global_notify_handler, acpi_gbl_global_notify[2]);
+ACPI_GLOBAL(acpi_exception_handler, acpi_gbl_exception_handler);
+ACPI_GLOBAL(acpi_init_handler, acpi_gbl_init_handler);
+ACPI_GLOBAL(acpi_table_handler, acpi_gbl_table_handler);
+ACPI_GLOBAL(void *, acpi_gbl_table_handler_context);
+ACPI_GLOBAL(struct acpi_walk_state *, acpi_gbl_breakpoint_walk);
+ACPI_GLOBAL(acpi_interface_handler, acpi_gbl_interface_handler);
+ACPI_GLOBAL(struct acpi_sci_handler_info *, acpi_gbl_sci_handler_list);
/* Owner ID support */
-ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS];
-ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
-ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
+ACPI_GLOBAL(u32, acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS]);
+ACPI_GLOBAL(u8, acpi_gbl_last_owner_id_index);
+ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
/* Initialization sequencing */
-ACPI_EXTERN u8 acpi_gbl_reg_methods_executed;
+ACPI_GLOBAL(u8, acpi_gbl_reg_methods_executed);
/* Misc */
-ACPI_EXTERN u32 acpi_gbl_original_mode;
-ACPI_EXTERN u32 acpi_gbl_rsdp_original_location;
-ACPI_EXTERN u32 acpi_gbl_ns_lookup_count;
-ACPI_EXTERN u32 acpi_gbl_ps_find_count;
-ACPI_EXTERN u16 acpi_gbl_pm1_enable_register_save;
-ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
-ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
-ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
-ACPI_EXTERN u8 acpi_gbl_events_initialized;
-ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
-ACPI_EXTERN struct acpi_address_range
- *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
-
-#ifndef DEFINE_ACPI_GLOBALS
-
-/* Other miscellaneous */
-
-extern u8 acpi_gbl_shutdown;
-extern u32 acpi_gbl_startup_flags;
+ACPI_GLOBAL(u32, acpi_gbl_original_mode);
+ACPI_GLOBAL(u32, acpi_gbl_rsdp_original_location);
+ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count);
+ACPI_GLOBAL(u32, acpi_gbl_ps_find_count);
+ACPI_GLOBAL(u16, acpi_gbl_pm1_enable_register_save);
+ACPI_GLOBAL(u8, acpi_gbl_debugger_configuration);
+ACPI_GLOBAL(u8, acpi_gbl_step_to_next_call);
+ACPI_GLOBAL(u8, acpi_gbl_acpi_hardware_present);
+ACPI_GLOBAL(u8, acpi_gbl_events_initialized);
+ACPI_GLOBAL(struct acpi_interface_info *, acpi_gbl_supported_interfaces);
+ACPI_GLOBAL(struct acpi_address_range *,
+ acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX]);
+
+/* Other miscellaneous, declared and initialized in utglobal */
+
extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
extern const char *acpi_gbl_lowest_dstate_names[ACPI_NUM_sx_w_METHODS];
extern const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS];
-extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
-
-#endif
+extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-/* Lists for tracking memory allocations */
+/* Lists for tracking memory allocations (debug only) */
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
-ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
-ACPI_EXTERN u8 acpi_gbl_disable_mem_tracking;
+ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list);
+ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list);
+ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats);
+ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
#endif
/*****************************************************************************
@@ -350,22 +338,23 @@ ACPI_EXTERN u8 acpi_gbl_disable_mem_tracking;
#define NUM_PREDEFINED_NAMES 9
#endif
-ACPI_EXTERN struct acpi_namespace_node acpi_gbl_root_node_struct;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_root_node;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_fadt_gpe_device;
-ACPI_EXTERN union acpi_operand_object *acpi_gbl_module_code_list;
+ACPI_GLOBAL(struct acpi_namespace_node, acpi_gbl_root_node_struct);
+ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_root_node);
+ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_fadt_gpe_device);
+ACPI_GLOBAL(union acpi_operand_object *, acpi_gbl_module_code_list);
extern const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES];
extern const struct acpi_predefined_names
acpi_gbl_pre_defined_names[NUM_PREDEFINED_NAMES];
#ifdef ACPI_DEBUG_OUTPUT
-ACPI_EXTERN u32 acpi_gbl_current_node_count;
-ACPI_EXTERN u32 acpi_gbl_current_node_size;
-ACPI_EXTERN u32 acpi_gbl_max_concurrent_node_count;
-ACPI_EXTERN acpi_size *acpi_gbl_entry_stack_pointer;
-ACPI_EXTERN acpi_size *acpi_gbl_lowest_stack_pointer;
-ACPI_EXTERN u32 acpi_gbl_deepest_nesting;
+ACPI_GLOBAL(u32, acpi_gbl_current_node_count);
+ACPI_GLOBAL(u32, acpi_gbl_current_node_size);
+ACPI_GLOBAL(u32, acpi_gbl_max_concurrent_node_count);
+ACPI_GLOBAL(acpi_size *, acpi_gbl_entry_stack_pointer);
+ACPI_GLOBAL(acpi_size *, acpi_gbl_lowest_stack_pointer);
+ACPI_GLOBAL(u32, acpi_gbl_deepest_nesting);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
#endif
/*****************************************************************************
@@ -374,11 +363,11 @@ ACPI_EXTERN u32 acpi_gbl_deepest_nesting;
*
****************************************************************************/
-ACPI_EXTERN struct acpi_thread_state *acpi_gbl_current_walk_list;
+ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
/* Control method single step flag */
-ACPI_EXTERN u8 acpi_gbl_cm_single_step;
+ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
/*****************************************************************************
*
@@ -388,8 +377,9 @@ ACPI_EXTERN u8 acpi_gbl_cm_single_step;
extern struct acpi_bit_register_info
acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
-ACPI_EXTERN u8 acpi_gbl_sleep_type_a;
-ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
+
+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
/*****************************************************************************
*
@@ -399,14 +389,15 @@ ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
#if (!ACPI_REDUCED_HARDWARE)
-ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
-ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
-ACPI_EXTERN struct acpi_gpe_block_info
- *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
-ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler;
-ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
-ACPI_EXTERN struct acpi_fixed_event_handler
- acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
+ACPI_GLOBAL(u8, acpi_gbl_all_gpes_initialized);
+ACPI_GLOBAL(struct acpi_gpe_xrupt_info *, acpi_gbl_gpe_xrupt_list_head);
+ACPI_GLOBAL(struct acpi_gpe_block_info *,
+ acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]);
+ACPI_GLOBAL(acpi_gbl_event_handler, acpi_gbl_global_event_handler);
+ACPI_GLOBAL(void *, acpi_gbl_global_event_handler_context);
+ACPI_GLOBAL(struct acpi_fixed_event_handler,
+ acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]);
+
extern struct acpi_fixed_event_info
acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
@@ -418,23 +409,19 @@ extern struct acpi_fixed_event_info
*
****************************************************************************/
-/* Procedure nesting level for debug output */
-
-extern u32 acpi_gbl_nesting_level;
-
/* Event counters */
-ACPI_EXTERN u32 acpi_method_count;
-ACPI_EXTERN u32 acpi_gpe_count;
-ACPI_EXTERN u32 acpi_sci_count;
-ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
+ACPI_GLOBAL(u32, acpi_method_count);
+ACPI_GLOBAL(u32, acpi_gpe_count);
+ACPI_GLOBAL(u32, acpi_sci_count);
+ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
/* Support for dynamic control method tracing mechanism */
-ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
+ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
+ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
+ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_level);
+ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_layer);
/*****************************************************************************
*
@@ -442,61 +429,64 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
*
****************************************************************************/
-ACPI_EXTERN u8 acpi_gbl_db_output_flags;
+ACPI_GLOBAL(u8, acpi_gbl_db_output_flags);
#ifdef ACPI_DISASSEMBLER
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
+/* Do not disassemble buffers to resource descriptors */
+
+ACPI_INIT_GLOBAL(u8, acpi_gbl_no_resource_disassembly, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE);
-ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
-ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
-ACPI_EXTERN u8 acpi_gbl_num_external_methods;
-ACPI_EXTERN u32 acpi_gbl_resolved_external_methods;
-ACPI_EXTERN struct acpi_external_list *acpi_gbl_external_list;
-ACPI_EXTERN struct acpi_external_file *acpi_gbl_external_file_list;
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose);
+ACPI_GLOBAL(u8, acpi_gbl_num_external_methods);
+ACPI_GLOBAL(u32, acpi_gbl_resolved_external_methods);
+ACPI_GLOBAL(struct acpi_external_list *, acpi_gbl_external_list);
+ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
#endif
#ifdef ACPI_DEBUGGER
-extern u8 acpi_gbl_method_executing;
-extern u8 acpi_gbl_abort_method;
-extern u8 acpi_gbl_db_terminate_threads;
+ACPI_INIT_GLOBAL(u8, acpi_gbl_db_terminate_threads, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE);
-ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
-ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
-ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
-ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
-ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
-ACPI_EXTERN char *acpi_gbl_db_buffer;
-ACPI_EXTERN char *acpi_gbl_db_filename;
-ACPI_EXTERN u32 acpi_gbl_db_debug_level;
-ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_tables);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_stats);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_ini_methods);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_region_support);
+ACPI_GLOBAL(u8, acpi_gbl_db_output_to_file);
+ACPI_GLOBAL(char *, acpi_gbl_db_buffer);
+ACPI_GLOBAL(char *, acpi_gbl_db_filename);
+ACPI_GLOBAL(u32, acpi_gbl_db_debug_level);
+ACPI_GLOBAL(u32, acpi_gbl_db_console_debug_level);
+ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_db_scope_node);
-ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_GLOBAL(char *, acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]);
+ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]);
/* These buffers should all be the same size */
-ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_GLOBAL(char, acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]);
+ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
+ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
+ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
/*
* Statistic globals
*/
-ACPI_EXTERN u16 acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
-ACPI_EXTERN u16 acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
-ACPI_EXTERN u16 acpi_gbl_obj_type_count_misc;
-ACPI_EXTERN u16 acpi_gbl_node_type_count_misc;
-ACPI_EXTERN u32 acpi_gbl_num_nodes;
-ACPI_EXTERN u32 acpi_gbl_num_objects;
-
-ACPI_EXTERN u32 acpi_gbl_size_of_parse_tree;
-ACPI_EXTERN u32 acpi_gbl_size_of_method_trees;
-ACPI_EXTERN u32 acpi_gbl_size_of_node_entries;
-ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
+ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
+ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
+ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
+ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
+ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
+ACPI_GLOBAL(u32, acpi_gbl_num_objects);
+
+ACPI_GLOBAL(u32, acpi_gbl_size_of_parse_tree);
+ACPI_GLOBAL(u32, acpi_gbl_size_of_method_trees);
+ACPI_GLOBAL(u32, acpi_gbl_size_of_node_entries);
+ACPI_GLOBAL(u32, acpi_gbl_size_of_acpi_objects);
#endif /* ACPI_DEBUGGER */
@@ -508,7 +498,7 @@ ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
#ifdef ACPI_APPLICATION
-ACPI_FILE ACPI_INIT_GLOBAL(acpi_gbl_debug_file, NULL);
+ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
#endif /* ACPI_APPLICATION */
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 6357e932bfd9..2ad2351a9833 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 8af8c9bdeb35..b01f71ce0523 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -87,6 +87,10 @@ typedef const struct acpi_exdump_info {
#define ACPI_EXD_PACKAGE 11
#define ACPI_EXD_FIELD 12
#define ACPI_EXD_REFERENCE 13
+#define ACPI_EXD_LIST 14 /* Operand object list */
+#define ACPI_EXD_HDLR_LIST 15 /* Address Handler list */
+#define ACPI_EXD_RGN_LIST 16 /* Region list */
+#define ACPI_EXD_NODE 17 /* Namespace Node */
/* restore default alignment */
@@ -454,10 +458,6 @@ void acpi_ex_enter_interpreter(void);
void acpi_ex_exit_interpreter(void);
-void acpi_ex_reacquire_interpreter(void);
-
-void acpi_ex_relinquish_interpreter(void);
-
u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
void acpi_ex_acquire_global_lock(u32 rule);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index d95ca5449ace..52a21dafb540 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 2a86c65d873b..4bceb11c7380 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,17 +63,21 @@
#define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
/*
- * printf() format helpers
+ * printf() format helpers. These macros are workarounds for the difficulties
+ * with emitting 64-bit integers and 64-bit pointers with the same code
+ * for both 32-bit and 64-bit hosts.
*/
-
-/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
-
#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i)
+#define ACPI_FORMAT_TO_UINT(i) ACPI_FORMAT_UINT64(i)
+#define ACPI_PRINTF_UINT "0x%8.8X%8.8X"
+
#else
-#define ACPI_FORMAT_NATIVE_UINT(i) 0, (i)
+#define ACPI_FORMAT_NATIVE_UINT(i) 0, (u32) (i)
+#define ACPI_FORMAT_TO_UINT(i) (u32) (i)
+#define ACPI_PRINTF_UINT "0x%8.8X"
#endif
/*
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index e6138ac4a160..ee1c040f321c 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index cc7ab6dd724e..22fb6449d3d6 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -193,7 +193,8 @@ struct acpi_object_method {
#define ACPI_METHOD_INTERNAL_ONLY 0x02 /* Method is implemented internally (_OSI) */
#define ACPI_METHOD_SERIALIZED 0x04 /* Method is serialized */
#define ACPI_METHOD_SERIALIZED_PENDING 0x08 /* Method is to be marked serialized */
-#define ACPI_METHOD_MODIFIED_NAMESPACE 0x10 /* Method modified the namespace */
+#define ACPI_METHOD_IGNORE_SYNC_LEVEL 0x10 /* Method was auto-serialized at table load time */
+#define ACPI_METHOD_MODIFIED_NAMESPACE 0x20 /* Method modified the namespace */
/******************************************************************************
*
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 3fc9ca7e8aa3..dda0e6affcf1 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index aed319318835..6168b85463ed 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index f600aded7261..a48d713e9599 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
*
* Return Package types
*
- * 1) PTYPE1 packages do not contain sub-packages.
+ * 1) PTYPE1 packages do not contain subpackages.
*
* ACPI_PTYPE1_FIXED: Fixed-length length, 1 or 2 object types:
* object type
@@ -63,8 +63,8 @@
* (Used for _PRW)
*
*
- * 2) PTYPE2 packages contain a Variable-length number of sub-packages. Each
- * of the different types describe the contents of each of the sub-packages.
+ * 2) PTYPE2 packages contain a Variable-length number of subpackages. Each
+ * of the different types describe the contents of each of the subpackages.
*
* ACPI_PTYPE2: Each subpackage contains 1 or 2 object types. Zero-length
* parent package is allowed:
@@ -560,7 +560,7 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
/*
* For _HPX, a single package is returned, containing a variable-length number
- * of sub-packages. Each sub-package contains a PCI record setting.
+ * of subpackages. Each subpackage contains a PCI record setting.
* There are several different type of record settings, of different
* lengths, but all elements of all settings are Integers.
*/
@@ -698,6 +698,12 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+ {{"_PRP", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each: 1 Str, 1 Int/Str/Pkg */
+ PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_STRING, 1,
+ ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING |
+ ACPI_RTYPE_PACKAGE | ACPI_RTYPE_REFERENCE, 1, 0),
+
{{"_PRS", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index ff97430455cb..4b008e8884a1 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index fc83c0a5ca70..cf7346110bd8 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -133,6 +133,9 @@ struct acpi_init_walk_info {
u32 table_index;
u32 object_count;
u32 method_count;
+ u32 serial_method_count;
+ u32 non_serial_method_count;
+ u32 serialized_method_count;
u32 device_count;
u32 op_region_count;
u32 field_count;
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index c54f42c64fe2..5fa4b2027697 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index be8180c17d7e..ceeec0b7ccb1 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,7 +49,7 @@ extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[];
/* Strings used by the disassembler and debugger resource dump routines */
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
+#if defined(ACPI_DEBUG_OUTPUT) || defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
extern const char *acpi_gbl_bm_decode[];
extern const char *acpi_gbl_config_decode[];
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 48a3e331b72d..5908ccec6aea 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 87c26366d1df..f3f834408441 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index afdc6df17abf..720b1cdda711 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index eb56b66444b5..8daf9de82b73 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index e7a57c554e84..3661c8e90540 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 14424200d246..aee5e45f6d35 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,8 +83,8 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
(struct acpi_init_walk_info *)context;
struct acpi_namespace_node *node =
(struct acpi_namespace_node *)obj_handle;
- acpi_object_type type;
acpi_status status;
+ union acpi_operand_object *obj_desc;
ACPI_FUNCTION_ENTRY();
@@ -100,9 +100,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
/* And even then, we are only interested in a few object types */
- type = acpi_ns_get_type(obj_handle);
-
- switch (type) {
+ switch (acpi_ns_get_type(obj_handle)) {
case ACPI_TYPE_REGION:
status = acpi_ds_initialize_region(obj_handle);
@@ -117,8 +115,44 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
break;
case ACPI_TYPE_METHOD:
-
+ /*
+ * Auto-serialization support. We will examine each method that is
+ * not_serialized to determine if it creates any Named objects. If
+ * it does, it will be marked serialized to prevent problems if
+ * the method is entered by two or more threads and an attempt is
+ * made to create the same named object twice -- which results in
+ * an AE_ALREADY_EXISTS exception and method abort.
+ */
info->method_count++;
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ break;
+ }
+
+ /* Ignore if already serialized */
+
+ if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
+ info->serial_method_count++;
+ break;
+ }
+
+ if (acpi_gbl_auto_serialize_methods) {
+
+ /* Parse/scan method and serialize it if necessary */
+
+ acpi_ds_auto_serialize_method(node, obj_desc);
+ if (obj_desc->method.
+ info_flags & ACPI_METHOD_SERIALIZED) {
+
+ /* Method was just converted to Serialized */
+
+ info->serial_method_count++;
+ info->serialized_method_count++;
+ break;
+ }
+ }
+
+ info->non_serial_method_count++;
break;
case ACPI_TYPE_DEVICE:
@@ -170,7 +204,6 @@ acpi_ds_initialize_objects(u32 table_index,
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
/* Set all init info to zero */
@@ -205,14 +238,16 @@ acpi_ds_initialize_objects(u32 table_index,
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "\nTable [%4.4s](id %4.4X) - %u Objects with %u Devices %u Methods %u Regions\n",
+ "Table [%4.4s] (id %4.4X) - %4u Objects with %3u Devices, "
+ "%3u Regions, %3u Methods (%u/%u/%u Serial/Non/Cvt)\n",
table->signature, owner_id, info.object_count,
- info.device_count, info.method_count,
- info.op_region_count));
+ info.device_count, info.op_region_count,
+ info.method_count, info.serial_method_count,
+ info.non_serial_method_count,
+ info.serialized_method_count));
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "%u Methods, %u Regions\n", info.method_count,
- info.op_region_count));
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "%u Methods, %u Regions\n",
+ info.method_count, info.op_region_count));
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 81a78ba84311..3c7f7378b94d 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,16 +49,155 @@
#ifdef ACPI_DISASSEMBLER
#include "acdisasm.h"
#endif
+#include "acparser.h"
+#include "amlcode.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsmethod")
/* Local prototypes */
static acpi_status
+acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op);
+
+static acpi_status
acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
/*******************************************************************************
*
+ * FUNCTION: acpi_ds_auto_serialize_method
+ *
+ * PARAMETERS: node - Namespace Node of the method
+ * obj_desc - Method object attached to node
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Parse a control method AML to scan for control methods that
+ * need serialization due to the creation of named objects.
+ *
+ * NOTE: It is a bit of overkill to mark all such methods serialized, since
+ * there is only a problem if the method actually blocks during execution.
+ * A blocking operation is, for example, a Sleep() operation, or any access
+ * to an operation region. However, it is probably not possible to easily
+ * detect whether a method will block or not, so we simply mark all suspicious
+ * methods as serialized.
+ *
+ * NOTE2: This code is essentially a generic routine for parsing a single
+ * control method.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
+ union acpi_operand_object *obj_desc)
+{
+ acpi_status status;
+ union acpi_parse_object *op = NULL;
+ struct acpi_walk_state *walk_state;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Method auto-serialization parse [%4.4s] %p\n",
+ acpi_ut_get_node_name(node), node));
+
+ /* Create/Init a root op for the method parse tree */
+
+ op = acpi_ps_alloc_op(AML_METHOD_OP);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ acpi_ps_set_name(op, node->name.integer);
+ op->common.node = node;
+
+ /* Create and initialize a new walk state */
+
+ walk_state =
+ acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
+ if (!walk_state) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ status =
+ acpi_ds_init_aml_walk(walk_state, op, node,
+ obj_desc->method.aml_start,
+ obj_desc->method.aml_length, NULL, 0);
+ if (ACPI_FAILURE(status)) {
+ acpi_ds_delete_walk_state(walk_state);
+ return_ACPI_STATUS(status);
+ }
+
+ walk_state->descending_callback = acpi_ds_detect_named_opcodes;
+
+ /* Parse the method, scan for creation of named objects */
+
+ status = acpi_ps_parse_aml(walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_ps_delete_parse_tree(op);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_detect_named_opcodes
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk
+ * out_op - Unused, required for parser interface
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Descending callback used during the loading of ACPI tables.
+ * Currently used to detect methods that must be marked serialized
+ * in order to avoid problems with the creation of named objects.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op)
+{
+
+ ACPI_FUNCTION_NAME(acpi_ds_detect_named_opcodes);
+
+ /* We are only interested in opcodes that create a new name */
+
+ if (!
+ (walk_state->op_info->
+ flags & (AML_NAMED | AML_CREATE | AML_FIELD))) {
+ return (AE_OK);
+ }
+
+ /*
+ * At this point, we know we have a Named object opcode.
+ * Mark the method as serialized. Later code will create a mutex for
+ * this method to enforce serialization.
+ *
+ * Note, ACPI_METHOD_IGNORE_SYNC_LEVEL flag means that we will ignore the
+ * Sync Level mechanism for this method, even though it is now serialized.
+ * Otherwise, there can be conflicts with existing ASL code that actually
+ * uses sync levels.
+ */
+ walk_state->method_desc->method.sync_level = 0;
+ walk_state->method_desc->method.info_flags |=
+ (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Method serialized [%4.4s] %p - [%s] (%4.4X)\n",
+ walk_state->method_node->name.ascii,
+ walk_state->method_node, walk_state->op_info->name,
+ walk_state->opcode));
+
+ /* Abort the parse, no need to examine this method any further */
+
+ return (AE_CTRL_TERMINATE);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ds_method_error
*
* PARAMETERS: status - Execution status
@@ -74,7 +213,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
******************************************************************************/
acpi_status
-acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
+acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
{
ACPI_FUNCTION_ENTRY();
@@ -217,13 +356,19 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
/*
* The current_sync_level (per-thread) must be less than or equal to
* the sync level of the method. This mechanism provides some
- * deadlock prevention
+ * deadlock prevention.
+ *
+ * If the method was auto-serialized, we just ignore the sync level
+ * mechanism, because auto-serialization of methods can interfere
+ * with ASL code that actually uses sync levels.
*
* Top-level method invocation has no walk state at this point
*/
if (walk_state &&
- (walk_state->thread->current_sync_level >
- obj_desc->method.mutex->mutex.sync_level)) {
+ (!(obj_desc->method.
+ info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL))
+ && (walk_state->thread->current_sync_level >
+ obj_desc->method.mutex->mutex.sync_level)) {
ACPI_ERROR((AE_INFO,
"Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
acpi_ut_get_node_name(method_node),
@@ -668,7 +813,8 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
method_desc->method.info_flags &=
~ACPI_METHOD_SERIALIZED_PENDING;
method_desc->method.info_flags |=
- ACPI_METHOD_SERIALIZED;
+ (ACPI_METHOD_SERIALIZED |
+ ACPI_METHOD_IGNORE_SYNC_LEVEL);
method_desc->method.sync_level = 0;
}
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index c4b0b3657237..b67522df01ac 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index b1746a68dad1..a1e7e6b6fcf7 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 5205edcf2c01..6c0759c0db47 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index d7f53fb2979a..9f74795e2268 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 1bbb22fd6fa0..f7f5107e754d 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 2dbe109727c8..15623da26200 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,8 +73,20 @@ acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number)
{
switch (pass_number) {
+ case 0:
+
+ /* Parse only - caller will setup callbacks */
+
+ walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 |
+ ACPI_PARSE_DELETE_TREE | ACPI_PARSE_DISASSEMBLE;
+ walk_state->descending_callback = NULL;
+ walk_state->ascending_callback = NULL;
+ break;
+
case 1:
+ /* Load pass 1 */
+
walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 |
ACPI_PARSE_DELETE_TREE;
walk_state->descending_callback = acpi_ds_load1_begin_op;
@@ -83,6 +95,8 @@ acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number)
case 2:
+ /* Load pass 2 */
+
walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 |
ACPI_PARSE_DELETE_TREE;
walk_state->descending_callback = acpi_ds_load2_begin_op;
@@ -91,6 +105,8 @@ acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number)
case 3:
+ /* Execution pass */
+
#ifndef ACPI_NO_METHOD_EXECUTION
walk_state->parse_flags |= ACPI_PARSE_EXECUTE |
ACPI_PARSE_DELETE_TREE;
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 7f569d573027..2ac28d297305 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index d67891de1b54..9d6e2c1de1f8 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index ecb12e2137ff..24f7d5ea678a 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 83cd45f4a870..c7bffff9ed32 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 4c67193a9fa7..3393a73ca0d6 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index a9cb4a1a4bb8..955f83da68a5 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index a31e549e64cc..caaed3c673fd 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index a3e2f38aadf6..ae779c1e871d 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 4d764e847a08..17e4bbfdb096 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index e3157313eb27..78ac29351c9e 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index a5687540e9a6..5d594eb2e5ec 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 144cbb9b73bc..9957297d1580 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -314,6 +314,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
{
union acpi_operand_object *handler_obj;
union acpi_operand_object *obj_desc;
+ union acpi_operand_object *start_desc;
union acpi_operand_object **last_obj_ptr;
acpi_adr_space_setup region_setup;
void **region_context;
@@ -341,6 +342,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
/* Find this region in the handler's list */
obj_desc = handler_obj->address_space.region_list;
+ start_desc = obj_desc;
last_obj_ptr = &handler_obj->address_space.region_list;
while (obj_desc) {
@@ -438,6 +440,15 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
last_obj_ptr = &obj_desc->region.next;
obj_desc = obj_desc->region.next;
+
+ /* Prevent infinite loop if list is corrupted */
+
+ if (obj_desc == start_desc) {
+ ACPI_ERROR((AE_INFO,
+ "Circular handler list in region object %p",
+ region_obj));
+ return_VOID;
+ }
}
/* If we get here, the region was not in the handler's region list */
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 8354c4f7f10c..1b148a440d67 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 9e9e3454d893..4d8a709c1fc4 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 23a7fadca412..a734b27da061 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 39d06af5e347..e286640ad4ff 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 5713da77c665..20a1392ffe06 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -583,6 +583,18 @@ acpi_install_gpe_block(acpi_handle gpe_device,
goto unlock_and_exit;
}
+ /* Validate the parent device */
+
+ if (node->type != ACPI_TYPE_DEVICE) {
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
+ if (node->object) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
+
/*
* For user-installed GPE Block Devices, the gpe_block_base_number
* is always zero
@@ -666,6 +678,13 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
goto unlock_and_exit;
}
+ /* Validate the parent device */
+
+ if (node->type != ACPI_TYPE_DEVICE) {
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
/* Get the device_object attached to the node */
obj_desc = acpi_ns_get_attached_object(node);
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 02ed75ac56cd..2d6f187939c7 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 06d216c8d43a..8ba1464efd11 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 69e4a8cc9b71..c545386fee96 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 3c2e6dcdad3e..95d23dabcfbb 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 81c72a4ecd82..4cfc3d3b5c97 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 4d046faac48c..973fdae00f94 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -94,12 +94,13 @@ static struct acpi_exdump_info acpi_ex_dump_buffer[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
{ACPI_EXD_BUFFER, 0, NULL}
};
-static struct acpi_exdump_info acpi_ex_dump_package[5] = {
+static struct acpi_exdump_info acpi_ex_dump_package[6] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(package.node), "Parent Node"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"},
@@ -108,11 +109,11 @@ static struct acpi_exdump_info acpi_ex_dump_package[5] = {
static struct acpi_exdump_info acpi_ex_dump_device[4] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.handler), "Handler"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[0]),
"System Notify"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[1]),
- "Device Notify"}
+ "Device Notify"},
+ {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(device.handler), "Handler"}
};
static struct acpi_exdump_info acpi_ex_dump_event[2] = {
@@ -142,17 +143,18 @@ static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
};
-static struct acpi_exdump_info acpi_ex_dump_region[7] = {
+static struct acpi_exdump_info acpi_ex_dump_region[8] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region), NULL},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.space_id), "Space Id"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.flags), "Flags"},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(region.node), "Parent Node"},
{ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(region.address), "Address"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(region.length), "Length"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.handler), "Handler"},
+ {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(region.handler), "Handler"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.next), "Next"}
};
-static struct acpi_exdump_info acpi_ex_dump_power[5] = {
+static struct acpi_exdump_info acpi_ex_dump_power[6] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_power), NULL},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.system_level),
"System Level"},
@@ -161,7 +163,8 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[0]),
"System Notify"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[1]),
- "Device Notify"}
+ "Device Notify"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.handler), "Handler"}
};
static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
@@ -225,7 +228,7 @@ static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.value), "Value"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.node), "Node"},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(reference.node), "Node"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"},
{ACPI_EXD_REFERENCE, 0, NULL}
};
@@ -234,16 +237,16 @@ static struct acpi_exdump_info acpi_ex_dump_address_handler[6] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_address_handler),
NULL},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(address_space.space_id), "Space Id"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.next), "Next"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.region_list),
+ {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(address_space.next), "Next"},
+ {ACPI_EXD_RGN_LIST, ACPI_EXD_OFFSET(address_space.region_list),
"Region List"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.node), "Node"},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(address_space.node), "Node"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"}
};
static struct acpi_exdump_info acpi_ex_dump_notify[7] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.node), "Node"},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(notify.node), "Node"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(notify.handler_type), "Handler Type"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.handler), "Handler"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"},
@@ -252,14 +255,31 @@ static struct acpi_exdump_info acpi_ex_dump_notify[7] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.next[1]), "Next Device Notify"}
};
+static struct acpi_exdump_info acpi_ex_dump_extra[6] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_extra), NULL},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.method_REG), "_REG Method"},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(extra.scope_node), "Scope Node"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.region_context),
+ "Region Context"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.aml_start), "Aml Start"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(extra.aml_length), "Aml Length"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_data[3] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_data), NULL},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(data.handler), "Handler"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(data.pointer), "Raw Data"}
+};
+
/* Miscellaneous tables */
-static struct acpi_exdump_info acpi_ex_dump_common[4] = {
+static struct acpi_exdump_info acpi_ex_dump_common[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_common), NULL},
{ACPI_EXD_TYPE, 0, NULL},
{ACPI_EXD_UINT16, ACPI_EXD_OFFSET(common.reference_count),
"Reference Count"},
- {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"}
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"},
+ {ACPI_EXD_LIST, ACPI_EXD_OFFSET(common.next_object), "Object List"}
};
static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
@@ -274,15 +294,17 @@ static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
"Field Bit Offset"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.base_byte_offset),
"Base Byte Offset"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
};
-static struct acpi_exdump_info acpi_ex_dump_node[5] = {
+static struct acpi_exdump_info acpi_ex_dump_node[7] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL},
{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"},
{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"},
- {ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(child), "Child List"},
- {ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(peer), "Next Peer"}
+ {ACPI_EXD_LIST, ACPI_EXD_NSOFFSET(object), "Object List"},
+ {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(parent), "Parent"},
+ {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(child), "Child"},
+ {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(peer), "Peer"}
};
/* Dispatch table, indexed by object type */
@@ -315,7 +337,9 @@ static struct acpi_exdump_info *acpi_ex_dump_info[] = {
acpi_ex_dump_address_handler,
NULL,
NULL,
- NULL
+ NULL,
+ acpi_ex_dump_extra,
+ acpi_ex_dump_data
};
/*******************************************************************************
@@ -340,6 +364,10 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
char *name;
const char *reference_name;
u8 count;
+ union acpi_operand_object *start;
+ union acpi_operand_object *data = NULL;
+ union acpi_operand_object *next;
+ struct acpi_namespace_node *node;
if (!info) {
acpi_os_printf
@@ -363,9 +391,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
case ACPI_EXD_TYPE:
- acpi_ex_out_string("Type",
- acpi_ut_get_object_type_name
- (obj_desc));
+ acpi_os_printf("%20s : %2.2X [%s]\n", "Type",
+ obj_desc->common.type,
+ acpi_ut_get_object_type_name(obj_desc));
break;
case ACPI_EXD_UINT8:
@@ -433,6 +461,121 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
acpi_ex_dump_reference_obj(obj_desc);
break;
+ case ACPI_EXD_LIST:
+
+ start = *ACPI_CAST_PTR(void *, target);
+ next = start;
+
+ acpi_os_printf("%20s : %p", name, next);
+ if (next) {
+ acpi_os_printf("(%s %2.2X)",
+ acpi_ut_get_object_type_name
+ (next), next->common.type);
+
+ while (next->common.next_object) {
+ if ((next->common.type ==
+ ACPI_TYPE_LOCAL_DATA) && !data) {
+ data = next;
+ }
+
+ next = next->common.next_object;
+ acpi_os_printf("->%p(%s %2.2X)", next,
+ acpi_ut_get_object_type_name
+ (next),
+ next->common.type);
+
+ if ((next == start) || (next == data)) {
+ acpi_os_printf
+ ("\n**** Error: Object list appears to be circular linked");
+ break;
+ }
+ }
+ }
+
+ acpi_os_printf("\n", next);
+ break;
+
+ case ACPI_EXD_HDLR_LIST:
+
+ start = *ACPI_CAST_PTR(void *, target);
+ next = start;
+
+ acpi_os_printf("%20s : %p", name, next);
+ if (next) {
+ acpi_os_printf("(%s %2.2X)",
+ acpi_ut_get_object_type_name
+ (next), next->common.type);
+
+ while (next->address_space.next) {
+ if ((next->common.type ==
+ ACPI_TYPE_LOCAL_DATA) && !data) {
+ data = next;
+ }
+
+ next = next->address_space.next;
+ acpi_os_printf("->%p(%s %2.2X)", next,
+ acpi_ut_get_object_type_name
+ (next),
+ next->common.type);
+
+ if ((next == start) || (next == data)) {
+ acpi_os_printf
+ ("\n**** Error: Handler list appears to be circular linked");
+ break;
+ }
+ }
+ }
+
+ acpi_os_printf("\n", next);
+ break;
+
+ case ACPI_EXD_RGN_LIST:
+
+ start = *ACPI_CAST_PTR(void *, target);
+ next = start;
+
+ acpi_os_printf("%20s : %p", name, next);
+ if (next) {
+ acpi_os_printf("(%s %2.2X)",
+ acpi_ut_get_object_type_name
+ (next), next->common.type);
+
+ while (next->region.next) {
+ if ((next->common.type ==
+ ACPI_TYPE_LOCAL_DATA) && !data) {
+ data = next;
+ }
+
+ next = next->region.next;
+ acpi_os_printf("->%p(%s %2.2X)", next,
+ acpi_ut_get_object_type_name
+ (next),
+ next->common.type);
+
+ if ((next == start) || (next == data)) {
+ acpi_os_printf
+ ("\n**** Error: Region list appears to be circular linked");
+ break;
+ }
+ }
+ }
+
+ acpi_os_printf("\n", next);
+ break;
+
+ case ACPI_EXD_NODE:
+
+ node =
+ *ACPI_CAST_PTR(struct acpi_namespace_node *,
+ target);
+
+ acpi_os_printf("%20s : %p", name, node);
+ if (node) {
+ acpi_os_printf(" [%4.4s]", node->name.ascii);
+ }
+ acpi_os_printf("\n");
+ break;
+
default:
acpi_os_printf("**** Invalid table opcode [%X] ****\n",
@@ -821,10 +964,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
}
acpi_os_printf("%20s : %4.4s\n", "Name", acpi_ut_get_node_name(node));
- acpi_ex_out_string("Type", acpi_ut_get_type_name(node->type));
- acpi_ex_out_pointer("Attached Object",
- acpi_ns_get_attached_object(node));
- acpi_ex_out_pointer("Parent", node->parent);
+ acpi_os_printf("%20s : %2.2X [%s]\n", "Type",
+ node->type, acpi_ut_get_type_name(node->type));
acpi_ex_dump_object(ACPI_CAST_PTR(union acpi_operand_object, node),
acpi_ex_dump_node);
@@ -1017,22 +1158,26 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
((struct acpi_namespace_node *)obj_desc)->
object);
- acpi_ex_dump_object_descriptor(((struct acpi_namespace_node *)
- obj_desc)->object, flags);
- return_VOID;
+ obj_desc = ((struct acpi_namespace_node *)obj_desc)->object;
+ goto dump_object;
}
if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
- acpi_os_printf
- ("ExDumpObjectDescriptor: %p is not an ACPI operand object: [%s]\n",
- obj_desc, acpi_ut_get_descriptor_name(obj_desc));
+ acpi_os_printf("%p is not an ACPI operand object: [%s]\n",
+ obj_desc, acpi_ut_get_descriptor_name(obj_desc));
return_VOID;
}
- if (obj_desc->common.type > ACPI_TYPE_NS_NODE_MAX) {
+ /* Validate the object type */
+
+ if (obj_desc->common.type > ACPI_TYPE_LOCAL_MAX) {
+ acpi_os_printf("Not a known object type: %2.2X\n",
+ obj_desc->common.type);
return_VOID;
}
+dump_object:
+
/* Common Fields */
acpi_ex_dump_object(obj_desc, acpi_ex_dump_common);
@@ -1040,6 +1185,22 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
/* Object-specific fields */
acpi_ex_dump_object(obj_desc, acpi_ex_dump_info[obj_desc->common.type]);
+
+ if (obj_desc->common.type == ACPI_TYPE_REGION) {
+ obj_desc = obj_desc->common.next_object;
+ if (obj_desc->common.type > ACPI_TYPE_LOCAL_MAX) {
+ acpi_os_printf
+ ("Secondary object is not a known object type: %2.2X\n",
+ obj_desc->common.type);
+
+ return_VOID;
+ }
+
+ acpi_os_printf("\nExtra attached Object (%p):\n", obj_desc);
+ acpi_ex_dump_object(obj_desc,
+ acpi_ex_dump_info[obj_desc->common.type]);
+ }
+
return_VOID;
}
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index cfd875243421..68d97441432c 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 49fb742d61b9..1d1b27a96c5b 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 65d93607f368..2207e624f538 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 7be0205ad067..b49ea2a95f4f 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 14689dec4960..dbb03b544e8c 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index d74cea416ca0..1b8e94104407 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index d6fa0fce1fc9..2ede656ee26a 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index bc042adf8804..363767cf01e5 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 4459e32c683d..29e9e99f7fe3 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 5a588611ab48..ee3f872870bc 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 9d28867e60dc..cd5288a257a9 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 7ca6925a87ca..ab060261b43e 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 1606524312e3..3cde553bcbe1 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index be3f66973ee8..3af8de3fcea4 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index f0b09bf9887d..daf49f7ea311 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 20d809d90c5b..04bd16c08f9e 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 26e371073b1a..fd11018b0168 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 6578dee2e51b..f7da64123ed5 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,7 +77,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
/* We must wait, so unlock the interpreter */
- acpi_ex_relinquish_interpreter();
+ acpi_ex_exit_interpreter();
status = acpi_os_wait_semaphore(semaphore, 1, timeout);
@@ -87,7 +87,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
/* Reacquire the interpreter */
- acpi_ex_reacquire_interpreter();
+ acpi_ex_enter_interpreter();
}
return_ACPI_STATUS(status);
@@ -123,7 +123,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
/* We must wait, so unlock the interpreter */
- acpi_ex_relinquish_interpreter();
+ acpi_ex_exit_interpreter();
status = acpi_os_acquire_mutex(mutex, timeout);
@@ -133,7 +133,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
/* Reacquire the interpreter */
- acpi_ex_reacquire_interpreter();
+ acpi_ex_enter_interpreter();
}
return_ACPI_STATUS(status);
@@ -198,7 +198,7 @@ acpi_status acpi_ex_system_do_sleep(u64 how_long)
/* Since this thread will sleep, we must release the interpreter */
- acpi_ex_relinquish_interpreter();
+ acpi_ex_exit_interpreter();
/*
* For compatibility with other ACPI implementations and to prevent
@@ -212,7 +212,7 @@ acpi_status acpi_ex_system_do_sleep(u64 how_long)
/* And now we must get the interpreter again */
- acpi_ex_reacquire_interpreter();
+ acpi_ex_enter_interpreter();
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 99dc7b287d55..d9d72dff2a76 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -100,37 +100,6 @@ void acpi_ex_enter_interpreter(void)
/*******************************************************************************
*
- * FUNCTION: acpi_ex_reacquire_interpreter
- *
- * PARAMETERS: None
- *
- * RETURN: None
- *
- * DESCRIPTION: Reacquire the interpreter execution region from within the
- * interpreter code. Failure to enter the interpreter region is a
- * fatal system error. Used in conjunction with
- * relinquish_interpreter
- *
- ******************************************************************************/
-
-void acpi_ex_reacquire_interpreter(void)
-{
- ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
-
- /*
- * If the global serialized flag is set, do not release the interpreter,
- * since it was not actually released by acpi_ex_relinquish_interpreter.
- * This forces the interpreter to be single threaded.
- */
- if (!acpi_gbl_all_methods_serialized) {
- acpi_ex_enter_interpreter();
- }
-
- return_VOID;
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ex_exit_interpreter
*
* PARAMETERS: None
@@ -139,7 +108,16 @@ void acpi_ex_reacquire_interpreter(void)
*
* DESCRIPTION: Exit the interpreter execution region. This is the top level
* routine used to exit the interpreter when all processing has
- * been completed.
+ * been completed, or when the method blocks.
+ *
+ * Cases where the interpreter is unlocked internally:
+ * 1) Method will be blocked on a Sleep() AML opcode
+ * 2) Method will be blocked on an Acquire() AML opcode
+ * 3) Method will be blocked on a Wait() AML opcode
+ * 4) Method will be blocked to acquire the global lock
+ * 5) Method will be blocked waiting to execute a serialized control
+ * method that is currently executing
+ * 6) About to invoke a user-installed opregion handler
*
******************************************************************************/
@@ -160,44 +138,6 @@ void acpi_ex_exit_interpreter(void)
/*******************************************************************************
*
- * FUNCTION: acpi_ex_relinquish_interpreter
- *
- * PARAMETERS: None
- *
- * RETURN: None
- *
- * DESCRIPTION: Exit the interpreter execution region, from within the
- * interpreter - before attempting an operation that will possibly
- * block the running thread.
- *
- * Cases where the interpreter is unlocked internally
- * 1) Method to be blocked on a Sleep() AML opcode
- * 2) Method to be blocked on an Acquire() AML opcode
- * 3) Method to be blocked on a Wait() AML opcode
- * 4) Method to be blocked to acquire the global lock
- * 5) Method to be blocked waiting to execute a serialized control method
- * that is currently executing
- * 6) About to invoke a user-installed opregion handler
- *
- ******************************************************************************/
-
-void acpi_ex_relinquish_interpreter(void)
-{
- ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
-
- /*
- * If the global serialized flag is set, do not release the interpreter.
- * This forces the interpreter to be single threaded.
- */
- if (!acpi_gbl_all_methods_serialized) {
- acpi_ex_exit_interpreter();
- }
-
- return_VOID;
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ex_truncate_for32bit_table
*
* PARAMETERS: obj_desc - Object to be truncated
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 3d36df828f52..1e66d960fc11 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 414076818d40..858fdd6be598 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 96540506058f..2e6caabba07a 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 0889a629505f..e701d8c33dbf 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 12e6cff54f78..e0fd9b4978cd 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index e3828cc4361b..d590693eb54e 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 3c498dc1636e..76ab5c1a814e 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index eab70d58852a..6b919127cd9d 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index b4b47db2dee2..96d007df65ec 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 15dddc10fc9b..6921c7f3d208 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 14f65f6345b9..f1249e3463be 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index fd1ff54cda19..607eb9e5150d 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 74b24c82707e..80fcfc8c9c1b 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index acd2964c2690..b55642c4ee58 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 48b9c6f12643..3d88ef4a3e0d 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 283762511b73..42d37109aa5d 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 963ceef063f8..e634a05974db 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 3a0423af968c..a3fb7e4c0809 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -111,9 +111,8 @@ acpi_status acpi_ns_initialize_objects(void)
info.object_count));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "%u Control Methods found\n", info.method_count));
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "%u Op Regions found\n", info.op_region_count));
+ "%u Control Methods found\n%u Op Regions found\n",
+ info.method_count, info.op_region_count));
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 89ec645e7730..7c9d0181f341 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -128,12 +128,12 @@ unlock:
* parse trees.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "**** Begin Table Method Parsing and Object Initialization\n"));
+ "**** Begin Table Object Initialization\n"));
status = acpi_ds_initialize_objects(table_index, node);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "**** Completed Table Method Parsing and Object Initialization\n"));
+ "**** Completed Table Object Initialization\n"));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 90a0380fb8a0..7eee0a6f02f6 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 7a736f4d1fd8..fe54a8c73b8c 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -222,13 +222,19 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
}
}
- /* Clear the entry in all cases */
+ /* Clear the Node entry in all cases */
node->object = NULL;
if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_OPERAND) {
+
+ /* Unlink object from front of possible object list */
+
node->object = obj_desc->common.next_object;
+
+ /* Handle possible 2-descriptor object */
+
if (node->object &&
- ((node->object)->common.type != ACPI_TYPE_LOCAL_DATA)) {
+ (node->object->common.type != ACPI_TYPE_LOCAL_DATA)) {
node->object = node->object->common.next_object;
}
}
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 177857340271..e83cff31754b 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index d2855d9857c4..392910ffbed9 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 3d5391f9bcb5..68f725839eb6 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -132,12 +132,12 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
* Decode the type of the expected package contents
*
* PTYPE1 packages contain no subpackages
- * PTYPE2 packages contain sub-packages
+ * PTYPE2 packages contain subpackages
*/
switch (package->ret_info.type) {
case ACPI_PTYPE1_FIXED:
/*
- * The package count is fixed and there are no sub-packages
+ * The package count is fixed and there are no subpackages
*
* If package is too small, exit.
* If package is larger than expected, issue warning but continue
@@ -169,7 +169,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
case ACPI_PTYPE1_VAR:
/*
- * The package count is variable, there are no sub-packages, and all
+ * The package count is variable, there are no subpackages, and all
* elements must be of the same type
*/
for (i = 0; i < count; i++) {
@@ -185,7 +185,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
case ACPI_PTYPE1_OPTION:
/*
- * The package count is variable, there are no sub-packages. There are
+ * The package count is variable, there are no subpackages. There are
* a fixed number of required elements, and a variable number of
* optional elements.
*
@@ -242,7 +242,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
elements++;
count--;
- /* Examine the sub-packages */
+ /* Examine the subpackages */
status =
acpi_ns_check_package_list(info, package, elements, count);
@@ -250,7 +250,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
case ACPI_PTYPE2_PKG_COUNT:
- /* First element is the (Integer) count of sub-packages to follow */
+ /* First element is the (Integer) count of subpackages to follow */
status = acpi_ns_check_object_type(info, elements,
ACPI_RTYPE_INTEGER, 0);
@@ -270,7 +270,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
count = expected_count;
elements++;
- /* Examine the sub-packages */
+ /* Examine the subpackages */
status =
acpi_ns_check_package_list(info, package, elements, count);
@@ -283,9 +283,9 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
case ACPI_PTYPE2_FIX_VAR:
/*
* These types all return a single Package that consists of a
- * variable number of sub-Packages.
+ * variable number of subpackages.
*
- * First, ensure that the first element is a sub-Package. If not,
+ * First, ensure that the first element is a subpackage. If not,
* the BIOS may have incorrectly returned the object as a single
* package instead of a Package of Packages (a common error if
* there is only one entry). We may be able to repair this by
@@ -310,7 +310,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
count = 1;
}
- /* Examine the sub-packages */
+ /* Examine the subpackages */
status =
acpi_ns_check_package_list(info, package, elements, count);
@@ -370,9 +370,9 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
u32 j;
/*
- * Validate each sub-Package in the parent Package
+ * Validate each subpackage in the parent Package
*
- * NOTE: assumes list of sub-packages contains no NULL elements.
+ * NOTE: assumes list of subpackages contains no NULL elements.
* Any NULL elements should have been removed by earlier call
* to acpi_ns_remove_null_elements.
*/
@@ -389,7 +389,7 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
return (status);
}
- /* Examine the different types of expected sub-packages */
+ /* Examine the different types of expected subpackages */
info->parent_package = sub_package;
switch (package->ret_info.type) {
@@ -450,14 +450,14 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
case ACPI_PTYPE2_FIXED:
- /* Each sub-package has a fixed length */
+ /* Each subpackage has a fixed length */
expected_count = package->ret_info2.count;
if (sub_package->package.count < expected_count) {
goto package_too_small;
}
- /* Check the type of each sub-package element */
+ /* Check the type of each subpackage element */
for (j = 0; j < expected_count; j++) {
status =
@@ -475,14 +475,14 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
case ACPI_PTYPE2_MIN:
- /* Each sub-package has a variable but minimum length */
+ /* Each subpackage has a variable but minimum length */
expected_count = package->ret_info.count1;
if (sub_package->package.count < expected_count) {
goto package_too_small;
}
- /* Check the type of each sub-package element */
+ /* Check the type of each subpackage element */
status =
acpi_ns_check_package_elements(info, sub_elements,
@@ -531,7 +531,7 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
(*sub_elements)->integer.value = expected_count;
}
- /* Check the type of each sub-package element */
+ /* Check the type of each subpackage element */
status =
acpi_ns_check_package_elements(info,
@@ -557,10 +557,10 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
package_too_small:
- /* The sub-package count was smaller than required */
+ /* The subpackage count was smaller than required */
ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags,
- "Return Sub-Package[%u] is too small - found %u elements, expected %u",
+ "Return SubPackage[%u] is too small - found %u elements, expected %u",
i, sub_package->package.count, expected_count));
return (AE_AML_OPERAND_VALUE);
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index a05afff50eb9..7e417aa5c91e 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -207,13 +207,30 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
* this predefined name. Either one return value is expected, or none,
* for both methods and other objects.
*
- * Exit now if there is no return object. Warning if one was expected.
+ * Try to fix if there was no return object. Warning if failed to fix.
*/
if (!return_object) {
if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
- ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
- ACPI_WARN_ALWAYS,
- "Missing expected return value"));
+ if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
+ ACPI_WARN_PREDEFINED((AE_INFO,
+ info->full_pathname,
+ ACPI_WARN_ALWAYS,
+ "Found unexpected NULL package element"));
+
+ status =
+ acpi_ns_repair_null_element(info,
+ expected_btypes,
+ package_index,
+ return_object_ptr);
+ if (ACPI_SUCCESS(status)) {
+ return (AE_OK); /* Repair was successful */
+ }
+ } else {
+ ACPI_WARN_PREDEFINED((AE_INFO,
+ info->full_pathname,
+ ACPI_WARN_ALWAYS,
+ "Missing expected return value"));
+ }
return (AE_AML_NO_RETURN_VALUE);
}
@@ -448,7 +465,7 @@ acpi_ns_repair_null_element(struct acpi_evaluate_info * info,
* RETURN: None.
*
* DESCRIPTION: Remove all NULL package elements from packages that contain
- * a variable number of sub-packages. For these types of
+ * a variable number of subpackages. For these types of
* packages, NULL elements can be safely removed.
*
*****************************************************************************/
@@ -469,7 +486,7 @@ acpi_ns_remove_null_elements(struct acpi_evaluate_info *info,
/*
* We can safely remove all NULL elements from these package types:
* PTYPE1_VAR packages contain a variable number of simple data types.
- * PTYPE2 packages contain a variable number of sub-packages.
+ * PTYPE2 packages contain a variable number of subpackages.
*/
switch (package_type) {
case ACPI_PTYPE1_VAR:
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 6a25d320b169..b09e6bef72b8 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -432,8 +432,8 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
* DESCRIPTION: Repair for the _CST object:
* 1. Sort the list ascending by C state type
* 2. Ensure type cannot be zero
- * 3. A sub-package count of zero means _CST is meaningless
- * 4. Count must match the number of C state sub-packages
+ * 3. A subpackage count of zero means _CST is meaningless
+ * 4. Count must match the number of C state subpackages
*
*****************************************************************************/
@@ -611,6 +611,7 @@ acpi_ns_repair_PRT(struct acpi_evaluate_info *info,
union acpi_operand_object **top_object_list;
union acpi_operand_object **sub_object_list;
union acpi_operand_object *obj_desc;
+ union acpi_operand_object *sub_package;
u32 element_count;
u32 index;
@@ -619,8 +620,17 @@ acpi_ns_repair_PRT(struct acpi_evaluate_info *info,
top_object_list = package_object->package.elements;
element_count = package_object->package.count;
- for (index = 0; index < element_count; index++) {
- sub_object_list = (*top_object_list)->package.elements;
+ /* Examine each subpackage */
+
+ for (index = 0; index < element_count; index++, top_object_list++) {
+ sub_package = *top_object_list;
+ sub_object_list = sub_package->package.elements;
+
+ /* Check for minimum required element count */
+
+ if (sub_package->package.count < 4) {
+ continue;
+ }
/*
* If the BIOS has erroneously reversed the _PRT source_name (index 2)
@@ -634,15 +644,12 @@ acpi_ns_repair_PRT(struct acpi_evaluate_info *info,
sub_object_list[2] = obj_desc;
info->return_flags |= ACPI_OBJECT_REPAIRED;
- ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+ ACPI_WARN_PREDEFINED((AE_INFO,
+ info->full_pathname,
info->node_flags,
"PRT[%X]: Fixed reversed SourceName and SourceIndex",
index));
}
-
- /* Point to the next union acpi_operand_object in the top level package */
-
- top_object_list++;
}
return (AE_OK);
@@ -679,7 +686,7 @@ acpi_ns_repair_PSS(struct acpi_evaluate_info *info,
u32 i;
/*
- * Entries (sub-packages) in the _PSS Package must be sorted by power
+ * Entries (subpackages) in the _PSS Package must be sorted by power
* dissipation, in descending order. If it appears that the list is
* incorrectly sorted, sort it. We sort by cpu_frequency, since this
* should be proportional to the power.
@@ -767,9 +774,9 @@ acpi_ns_repair_TSS(struct acpi_evaluate_info *info,
*
* PARAMETERS: info - Method execution information block
* return_object - Pointer to the top-level returned object
- * start_index - Index of the first sub-package
- * expected_count - Minimum length of each sub-package
- * sort_index - Sub-package entry to sort on
+ * start_index - Index of the first subpackage
+ * expected_count - Minimum length of each subpackage
+ * sort_index - Subpackage entry to sort on
* sort_direction - Ascending or descending
* sort_key_name - Name of the sort_index field
*
@@ -805,7 +812,7 @@ acpi_ns_check_sorted_list(struct acpi_evaluate_info *info,
}
/*
- * NOTE: assumes list of sub-packages contains no NULL elements.
+ * NOTE: assumes list of subpackages contains no NULL elements.
* Any NULL elements should have been removed by earlier call
* to acpi_ns_remove_null_elements.
*/
@@ -832,7 +839,7 @@ acpi_ns_check_sorted_list(struct acpi_evaluate_info *info,
return (AE_AML_OPERAND_TYPE);
}
- /* Each sub-package must have the minimum length */
+ /* Each subpackage must have the minimum length */
if ((*outer_elements)->package.count < expected_count) {
return (AE_AML_PACKAGE_LIMIT);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 47420faef073..af1cc42a8aa1 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 4a0665b6bcc1..4a5e3f5c0ff7 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index e81f15ef659a..4758a1f2ce22 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 1f0c28ba50df..4bd558bf10d2 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -923,19 +923,22 @@ ACPI_EXPORT_SYMBOL(acpi_detach_data)
/*******************************************************************************
*
- * FUNCTION: acpi_get_data
+ * FUNCTION: acpi_get_data_full
*
* PARAMETERS: obj_handle - Namespace node
* handler - Handler used in call to attach_data
* data - Where the data is returned
+ * callback - function to execute before returning
*
* RETURN: Status
*
- * DESCRIPTION: Retrieve data that was previously attached to a namespace node.
+ * DESCRIPTION: Retrieve data that was previously attached to a namespace node
+ * and execute a callback before returning.
*
******************************************************************************/
acpi_status
-acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
+acpi_get_data_full(acpi_handle obj_handle, acpi_object_handler handler,
+ void **data, void (*callback)(void *))
{
struct acpi_namespace_node *node;
acpi_status status;
@@ -960,10 +963,34 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
}
status = acpi_ns_get_attached_data(node, handler, data);
+ if (ACPI_SUCCESS(status) && callback) {
+ callback(*data);
+ }
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
+ACPI_EXPORT_SYMBOL(acpi_get_data_full)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_data
+ *
+ * PARAMETERS: obj_handle - Namespace node
+ * handler - Handler used in call to attach_data
+ * data - Where the data is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Retrieve data that was previously attached to a namespace node.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
+{
+ return acpi_get_data_full(obj_handle, handler, data, NULL);
+}
+
ACPI_EXPORT_SYMBOL(acpi_get_data)
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 3a4bd3ff49a3..8c6c11ce9760 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 0e6d79e462d4..dae9401be7a2 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 91a5a69db80c..314d314340ae 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 065b44ae538f..b058e2390fdd 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -480,6 +480,10 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
status = AE_OK;
}
+ if (status == AE_CTRL_TERMINATE) {
+ return_ACPI_STATUS(status);
+ }
+
status =
acpi_ps_complete_op(walk_state, &op,
status);
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 95dc608a66a8..a6885077d59e 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -219,7 +219,10 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
status = walk_state->descending_callback(walk_state, op);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
+ if (status != AE_CTRL_TERMINATE) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During name lookup/catalog"));
+ }
return_ACPI_STATUS(status);
}
@@ -230,7 +233,7 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
status = acpi_ps_next_parse_state(walk_state, *op, status);
if (ACPI_FAILURE(status)) {
if (status == AE_CTRL_PENDING) {
- return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
+ status = AE_CTRL_PARSE_PENDING;
}
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 1b659e59710a..1755d2ac5656 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index b0c9787dbe61..0d8d37ffd04d 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 79d9a28dedef..6d27b597394e 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 6a4b6fb39f32..32d250feea21 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 877dc0de8df3..0b64181e7720 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 91fa73a6e55e..3cd48802eede 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index abd65624754f..9cb07e1e76d9 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index fcb7a840e996..e135acaa5e1c 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index f3a9276ac665..916fd095ff34 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index b60c9cf82862..689556744b03 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -636,7 +636,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
for (index = 0; index < number_of_elements; index++) {
- /* Dereference the sub-package */
+ /* Dereference the subpackage */
package_element = *top_object_list;
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 3a2ace93e62c..75d369050657 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -273,7 +273,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
*/
user_prt->length = (sizeof(struct acpi_pci_routing_table) - 4);
- /* Each sub-package must be of length 4 */
+ /* Each subpackage must be of length 4 */
if ((*top_object_list)->package.count != 4) {
ACPI_ERROR((AE_INFO,
@@ -283,7 +283,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
}
/*
- * Dereference the sub-package.
+ * Dereference the subpackage.
* The sub_object_list will now point to an array of the four IRQ
* elements: [Address, Pin, Source, source_index]
*/
@@ -292,7 +292,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
/* 1) First subobject: Dereference the PRT.Address */
obj_desc = sub_object_list[0];
- if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
+ if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
"(PRT[%u].Address) Need Integer, found %s",
index,
@@ -305,7 +305,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
/* 2) Second subobject: Dereference the PRT.Pin */
obj_desc = sub_object_list[1];
- if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
+ if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
"(PRT[%u].Pin) Need Integer, found %s",
index,
@@ -394,7 +394,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
/* 4) Fourth subobject: Dereference the PRT.source_index */
obj_desc = sub_object_list[3];
- if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
+ if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
"(PRT[%u].SourceIndex) Need Integer, found %s",
index,
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 8a2d4986b0aa..c3c56b5a9788 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,8 @@
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsdump")
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
/* Local prototypes */
static void acpi_rs_out_string(char *title, char *value);
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index 46192bd53653..2f9332d5c973 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsdumpinfo")
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
#define ACPI_RSD_OFFSET(f) (u8) ACPI_OFFSET (union acpi_resource_data,f)
#define ACPI_PRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
#define ACPI_RSD_TABLE_SIZE(name) (sizeof(name) / sizeof (struct acpi_rsdump_info))
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 41fed78e0de6..9d3f8a9a24bd 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -132,8 +132,7 @@ struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
acpi_rs_convert_uart_serial_bus,
};
-#ifdef ACPI_FUTURE_USAGE
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
/* Dispatch table for resource dump functions */
@@ -168,7 +167,6 @@ struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
};
#endif
-#endif /* ACPI_FUTURE_USAGE */
/*
* Base sizes for external AML resource descriptors, indexed by internal type.
* Includes size of the descriptor header (1 byte for small descriptors,
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index ca183755a6f9..19d64873290a 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 364decc1028a..3461f7db26df 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 6053aa182093..77291293af64 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index ebc773a1b350..eab4483ff5f8 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index c99cec9cefde..41eea4bc089c 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index fe49fc43e10f..9e8407223d95 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 14a7982c9961..897a5ceb0420 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 01e476988aae..877ab9202133 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 8f89263ac47e..ec14588254d4 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index e4f4f02d49e7..c12003947bd5 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 634357d51fe9..e3040947e9a0 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -292,10 +292,11 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
new_table = acpi_os_map_memory(new_address, new_table_length);
if (!new_table) {
ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
- "%4.4s %p Attempted physical table override failed",
+ "%4.4s " ACPI_PRINTF_UINT
+ " Attempted physical table override failed",
table_header->signature,
- ACPI_CAST_PTR(void,
- table_desc->address)));
+ ACPI_FORMAT_TO_UINT(table_desc->
+ address)));
return (NULL);
}
@@ -308,11 +309,11 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
finish_override:
- ACPI_INFO((AE_INFO,
- "%4.4s %p %s table override, new table: %p",
+ ACPI_INFO((AE_INFO, "%4.4s " ACPI_PRINTF_UINT
+ " %s table override, new table: " ACPI_PRINTF_UINT,
table_header->signature,
- ACPI_CAST_PTR(void, table_desc->address),
- override_type, new_table));
+ ACPI_FORMAT_TO_UINT(table_desc->address),
+ override_type, ACPI_FORMAT_TO_UINT(new_table)));
/* We can now unmap/delete the original table (if fully mapped) */
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 6866e767ba90..df3bb20ea325 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -128,15 +128,17 @@ acpi_tb_print_table_header(acpi_physical_address address,
struct acpi_table_header local_header;
/*
- * The reason that the Address is cast to a void pointer is so that we
- * can use %p which will work properly on both 32-bit and 64-bit hosts.
+ * The reason that we use ACPI_PRINTF_UINT and ACPI_FORMAT_TO_UINT is to
+ * support both 32-bit and 64-bit hosts/addresses in a consistent manner.
+ * The %p specifier does not emit uniform output on all hosts. On some,
+ * leading zeros are not supported.
*/
if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
- ACPI_INFO((AE_INFO, "%4.4s %p %06X",
- header->signature, ACPI_CAST_PTR(void, address),
+ ACPI_INFO((AE_INFO, "%-4.4s " ACPI_PRINTF_UINT " %06X",
+ header->signature, ACPI_FORMAT_TO_UINT(address),
header->length));
} else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
@@ -147,8 +149,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
header)->oem_id, ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
- ACPI_INFO((AE_INFO, "RSDP %p %06X (v%.2d %6.6s)",
- ACPI_CAST_PTR(void, address),
+ ACPI_INFO((AE_INFO,
+ "RSDP " ACPI_PRINTF_UINT " %06X (v%.2d %-6.6s)",
+ ACPI_FORMAT_TO_UINT(address),
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
revision >
0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
@@ -162,8 +165,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
acpi_tb_cleanup_table_header(&local_header, header);
ACPI_INFO((AE_INFO,
- "%4.4s %p %06X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
- local_header.signature, ACPI_CAST_PTR(void, address),
+ "%-4.4s " ACPI_PRINTF_UINT
+ " %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
+ local_header.signature, ACPI_FORMAT_TO_UINT(address),
local_header.length, local_header.revision,
local_header.oem_id, local_header.oem_table_id,
local_header.oem_revision,
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 6412d3c301cb..a4702eee91a8 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index db826eaadd1c..a1593159d9ea 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 60b5a871833c..0909420fc776 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index e4e1468877c3..65ab8fed3d5e 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 2c2b6ae5dfc4..a1acec9d2ef3 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 1851762fc5b5..efac83c606dc 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 11fde93be120..3c1699740653 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index cacd2fd9e665..78fde0aac487 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index edff4e653d9a..270c16464dd9 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -535,10 +535,10 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
case ACPI_TYPE_LOCAL_REFERENCE:
- /* TBD: should validate incoming handle */
+ /* An incoming reference is defined to be a namespace node */
- internal_object->reference.class = ACPI_REFCLASS_NAME;
- internal_object->reference.node =
+ internal_object->reference.class = ACPI_REFCLASS_REFOF;
+ internal_object->reference.object =
external_object->reference.handle;
break;
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index d971c8631263..21a20ac5b1e1 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index b3f31dd89a45..fbfa9eca011f 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index c07d2227ea42..a3516de213fa 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -75,6 +75,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
union acpi_operand_object *handler_desc;
union acpi_operand_object *second_desc;
union acpi_operand_object *next_desc;
+ union acpi_operand_object *start_desc;
union acpi_operand_object **last_obj_ptr;
ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
@@ -235,10 +236,11 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
if (handler_desc) {
next_desc =
handler_desc->address_space.region_list;
+ start_desc = next_desc;
last_obj_ptr =
&handler_desc->address_space.region_list;
- /* Remove the region object from the handler's list */
+ /* Remove the region object from the handler list */
while (next_desc) {
if (next_desc == object) {
@@ -247,10 +249,19 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
break;
}
- /* Walk the linked list of handler */
+ /* Walk the linked list of handlers */
last_obj_ptr = &next_desc->region.next;
next_desc = next_desc->region.next;
+
+ /* Prevent infinite loop if list is corrupted */
+
+ if (next_desc == start_desc) {
+ ACPI_ERROR((AE_INFO,
+ "Circular region list in address handler object %p",
+ handler_desc));
+ return_VOID;
+ }
}
if (handler_desc->address_space.handler_flags &
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 154fdcaa5830..8e544d4688cd 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 16fb90506db7..8fed1482d228 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index 3cf7b597edb9..0403dcaabaf2 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 030cb0dc673c..f3abeae9d2f8 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,31 +55,27 @@ ACPI_MODULE_NAME("utglobal")
* Static global variable initialization.
*
******************************************************************************/
-/*
- * We want the debug switches statically initialized so they
- * are already set when the debugger is entered.
- */
-/* Debug switch - level and trace mask */
+/* Debug output control masks */
u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
-/* Debug switch - layer (component) mask */
-
u32 acpi_dbg_layer = 0;
-u32 acpi_gbl_nesting_level = 0;
-/* Debugger globals */
+/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
-u8 acpi_gbl_db_terminate_threads = FALSE;
-u8 acpi_gbl_abort_method = FALSE;
-u8 acpi_gbl_method_executing = FALSE;
+struct acpi_table_fadt acpi_gbl_FADT;
+u32 acpi_gbl_trace_flags;
+acpi_name acpi_gbl_trace_method_name;
+u8 acpi_gbl_system_awake_and_running;
+u32 acpi_current_gpe_count;
-/* System flags */
-
-u32 acpi_gbl_startup_flags = 0;
-
-/* System starts uninitialized */
+/*
+ * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
+ * that the ACPI hardware is no longer required. A flag in the FADT indicates
+ * a reduced HW machine, and that flag is duplicated here for convenience.
+ */
+u8 acpi_gbl_reduced_hardware;
-u8 acpi_gbl_shutdown = TRUE;
+/* Various state name strings */
const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = {
"\\_S0_",
@@ -335,7 +331,6 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_DSDT = NULL;
acpi_gbl_cm_single_step = FALSE;
- acpi_gbl_db_terminate_threads = FALSE;
acpi_gbl_shutdown = FALSE;
acpi_gbl_ns_lookup_count = 0;
acpi_gbl_ps_find_count = 0;
@@ -382,6 +377,10 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_disable_mem_tracking = FALSE;
#endif
+#ifdef ACPI_DEBUGGER
+ acpi_gbl_db_terminate_threads = FALSE;
+#endif
+
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index bfca7b4b6731..4b12880e5b11 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index c5d1ac44c07d..5f56fc49021e 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index 5c26ad420344..dc6e96547f18 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 909fe66e1934..d44dee6ee10a 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 02f9101b65e4..2e2bb14e1099 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 08c323245584..82717fff9ffc 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 517af700399d..dfa9009bfc87 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 8856bd37bc76..685766fc6ca8 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,31 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utosi")
+/******************************************************************************
+ *
+ * ACPICA policy for new _OSI strings:
+ *
+ * It is the stated policy of ACPICA that new _OSI strings will be integrated
+ * into this module as soon as possible after they are defined. It is strongly
+ * recommended that all ACPICA hosts mirror this policy and integrate any
+ * changes to this module as soon as possible. There are several historical
+ * reasons behind this policy:
+ *
+ * 1) New BIOSs tend to test only the case where the host responds TRUE to
+ * the latest version of Windows, which would respond to the latest/newest
+ * _OSI string. Not responding TRUE to the latest version of Windows will
+ * risk executing untested code paths throughout the DSDT and SSDTs.
+ *
+ * 2) If a new _OSI string is recognized only after a significant delay, this
+ * has the potential to cause problems on existing working machines because
+ * of the possibility that a new and different path through the ASL code
+ * will be executed.
+ *
+ * 3) New _OSI strings are tending to come out about once per year. A delay
+ * in recognizing a new string for a significant amount of time risks the
+ * release of another string which only compounds the initial problem.
+ *
+ *****************************************************************************/
/*
* Strings supported by the _OSI predefined control method (which is
* implemented internally within this module.)
@@ -74,6 +99,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */
{"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
{"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
+ {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index eb3aca761369..36bec57ebd23 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 2b1ce4cd3207..db30caff130a 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 2c2accb9e534..14cb6c0c8be2 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,8 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utresrc")
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
/*
* Strings used to decode resource descriptors.
* Used by both the disassembler and the debugger resource dump routines
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 03c4c2febd84..1cc97a752c15 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 45c0eb26b33d..77219336c7e0 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index c0027773cccb..7d0ee969d781 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -276,7 +276,8 @@ acpi_ut_free_and_track(void *allocation,
}
acpi_os_free(debug_block);
- ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation));
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed (block %p)\n",
+ allocation, debug_block));
return_VOID;
}
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index be322c83643a..502a8492dc83 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index f7edb88f6054..edd861102f1b 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 246ef68681f4..13380d818462 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index 312299721ba1..2a0f9e04d3a4 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 3650b2183227..c4dac7150960 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -12,7 +12,7 @@ config ACPI_APEI
config ACPI_APEI_GHES
bool "APEI Generic Hardware Error Source"
- depends on ACPI_APEI && X86
+ depends on ACPI_APEI
select ACPI_HED
select IRQ_WORK
select GENERIC_ALLOCATOR
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 797a6938d051..9a2c63b20050 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -39,15 +39,13 @@
#include <linux/acpi.h>
#include <linux/power_supply.h>
+#include "battery.h"
+
#define PREFIX "ACPI: "
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
-#define ACPI_BATTERY_CLASS "battery"
#define ACPI_BATTERY_DEVICE_NAME "Battery"
-#define ACPI_BATTERY_NOTIFY_STATUS 0x80
-#define ACPI_BATTERY_NOTIFY_INFO 0x81
-#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82
/* Battery power unit: 0 means mW, 1 means mA */
#define ACPI_BATTERY_POWER_UNIT_MA 1
@@ -736,6 +734,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
acpi_battery_present(battery));
+ acpi_notifier_call_chain(device, event, acpi_battery_present(battery));
/* acpi_battery_update could remove power_supply object */
if (old && battery->bat.dev)
power_supply_changed(&battery->bat);
diff --git a/drivers/acpi/battery.h b/drivers/acpi/battery.h
new file mode 100644
index 000000000000..6c084976987d
--- /dev/null
+++ b/drivers/acpi/battery.h
@@ -0,0 +1,10 @@
+#ifndef __ACPI_BATTERY_H
+#define __ACPI_BATTERY_H
+
+#define ACPI_BATTERY_CLASS "battery"
+
+#define ACPI_BATTERY_NOTIFY_STATUS 0x80
+#define ACPI_BATTERY_NOTIFY_INFO 0x81
+#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82
+
+#endif
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index fcb59c21c68d..e7e5844c87d0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -311,9 +311,7 @@ static void acpi_bus_osc_support(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
#endif
-#ifdef ACPI_HOTPLUG_OST
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
-#endif
if (!ghes_disable)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
@@ -340,60 +338,77 @@ static void acpi_bus_osc_support(void)
*/
static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{
- struct acpi_device *device = NULL;
+ struct acpi_device *adev;
struct acpi_driver *driver;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n",
- type, handle));
+ acpi_status status;
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
switch (type) {
-
case ACPI_NOTIFY_BUS_CHECK:
- /* TBD */
+ acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
break;
case ACPI_NOTIFY_DEVICE_CHECK:
- /* TBD */
+ acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
break;
case ACPI_NOTIFY_DEVICE_WAKE:
- /* TBD */
+ acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
break;
case ACPI_NOTIFY_EJECT_REQUEST:
- /* TBD */
+ acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
break;
case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
+ acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
/* TBD: Exactly what does 'light' mean? */
break;
case ACPI_NOTIFY_FREQUENCY_MISMATCH:
- /* TBD */
+ acpi_handle_err(handle, "Device cannot be configured due "
+ "to a frequency mismatch\n");
break;
case ACPI_NOTIFY_BUS_MODE_MISMATCH:
- /* TBD */
+ acpi_handle_err(handle, "Device cannot be configured due "
+ "to a bus mode mismatch\n");
break;
case ACPI_NOTIFY_POWER_FAULT:
- /* TBD */
+ acpi_handle_err(handle, "Device has suffered a power fault\n");
break;
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received unknown/unsupported notification [%08x]\n",
- type));
- break;
+ acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
+ ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
+ goto err;
}
- acpi_bus_get_device(handle, &device);
- if (device) {
- driver = device->driver;
- if (driver && driver->ops.notify &&
- (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
- driver->ops.notify(device, type);
+ adev = acpi_bus_get_acpi_device(handle);
+ if (!adev)
+ goto err;
+
+ driver = adev->driver;
+ if (driver && driver->ops.notify &&
+ (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
+ driver->ops.notify(adev, type);
+
+ switch (type) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ status = acpi_hotplug_schedule(adev, type);
+ if (ACPI_SUCCESS(status))
+ return;
+ default:
+ break;
}
+ acpi_bus_put_acpi_device(adev);
+ return;
+
+ err:
+ acpi_evaluate_ost(handle, type, ost_code, NULL);
}
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 714e957a871a..db35594d4df7 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -302,6 +302,10 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
input_sync(input);
pm_wakeup_event(&device->dev, 0);
+ acpi_bus_generate_netlink_event(
+ device->pnp.device_class,
+ dev_name(&device->dev),
+ event, ++button->pushed);
}
break;
default:
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 368f9ddb8480..63119d09b354 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -31,8 +31,6 @@
#include "internal.h"
-#define PREFIX "ACPI: "
-
#define _COMPONENT ACPI_CONTAINER_COMPONENT
ACPI_MODULE_NAME("container");
@@ -68,6 +66,9 @@ static int container_device_attach(struct acpi_device *adev,
struct device *dev;
int ret;
+ if (adev->flags.is_dock_station)
+ return 0;
+
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return -ENOMEM;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index c14a00d3dca6..d047739f3380 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -901,15 +901,30 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
int acpi_subsys_prepare(struct device *dev)
{
/*
- * Follow PCI and resume devices suspended at run time before running
- * their system suspend callbacks.
+ * Devices having power.ignore_children set may still be necessary for
+ * suspending their children in the next phase of device suspend.
*/
- pm_runtime_resume(dev);
+ if (dev->power.ignore_children)
+ pm_runtime_resume(dev);
+
return pm_generic_prepare(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
/**
+ * acpi_subsys_suspend - Run the device driver's suspend callback.
+ * @dev: Device to handle.
+ *
+ * Follow PCI and resume devices suspended at run time before running their
+ * system suspend callbacks.
+ */
+int acpi_subsys_suspend(struct device *dev)
+{
+ pm_runtime_resume(dev);
+ return pm_generic_suspend(dev);
+}
+
+/**
* acpi_subsys_suspend_late - Suspend device using ACPI.
* @dev: Device to suspend.
*
@@ -937,6 +952,23 @@ int acpi_subsys_resume_early(struct device *dev)
return ret ? ret : pm_generic_resume_early(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
+
+/**
+ * acpi_subsys_freeze - Run the device driver's freeze callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_freeze(struct device *dev)
+{
+ /*
+ * This used to be done in acpi_subsys_prepare() for all devices and
+ * some drivers may depend on it, so do it here. Ideally, however,
+ * runtime-suspended devices should not be touched during freeze/thaw
+ * transitions.
+ */
+ pm_runtime_resume(dev);
+ return pm_generic_freeze(dev);
+}
+
#endif /* CONFIG_PM_SLEEP */
static struct dev_pm_domain acpi_general_pm_domain = {
@@ -947,8 +979,11 @@ static struct dev_pm_domain acpi_general_pm_domain = {
#endif
#ifdef CONFIG_PM_SLEEP
.prepare = acpi_subsys_prepare,
+ .suspend = acpi_subsys_suspend,
.suspend_late = acpi_subsys_suspend_late,
.resume_early = acpi_subsys_resume_early,
+ .freeze = acpi_subsys_freeze,
+ .poweroff = acpi_subsys_suspend,
.poweroff_late = acpi_subsys_suspend_late,
.restore_early = acpi_subsys_resume_early,
#endif
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 5bfd769fc91f..d9339b442a4e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -1,7 +1,9 @@
/*
* dock.c - ACPI dock station driver
*
- * Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
+ * Copyright (C) 2006, 2014, Intel Corp.
+ * Author: Kristen Carlson Accardi <kristen.c.accardi@intel.com>
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -35,8 +37,6 @@
#include "internal.h"
-#define PREFIX "ACPI: "
-
#define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver"
ACPI_MODULE_NAME("dock");
@@ -51,12 +51,6 @@ MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
" the driver to wait for userspace to write the undock sysfs file "
" before undocking");
-static const struct acpi_device_id dock_device_ids[] = {
- {"LNXDOCK", 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, dock_device_ids);
-
struct dock_station {
acpi_handle handle;
unsigned long last_dock_time;
@@ -68,15 +62,10 @@ struct dock_station {
};
static LIST_HEAD(dock_stations);
static int dock_station_count;
-static DEFINE_MUTEX(hotplug_lock);
struct dock_dependent_device {
struct list_head list;
- acpi_handle handle;
- const struct acpi_dock_ops *hp_ops;
- void *hp_context;
- unsigned int hp_refcount;
- void (*hp_release)(void *);
+ struct acpi_device *adev;
};
#define DOCK_DOCKING 0x00000001
@@ -98,13 +87,13 @@ enum dock_callback_type {
*****************************************************************************/
/**
* add_dock_dependent_device - associate a device with the dock station
- * @ds: The dock station
- * @handle: handle of the dependent device
+ * @ds: Dock station.
+ * @adev: Dependent ACPI device object.
*
* Add the dependent device to the dock's dependent device list.
*/
-static int __init
-add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
+static int add_dock_dependent_device(struct dock_station *ds,
+ struct acpi_device *adev)
{
struct dock_dependent_device *dd;
@@ -112,180 +101,120 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
if (!dd)
return -ENOMEM;
- dd->handle = handle;
+ dd->adev = adev;
INIT_LIST_HEAD(&dd->list);
list_add_tail(&dd->list, &ds->dependent_devices);
return 0;
}
-static void remove_dock_dependent_devices(struct dock_station *ds)
+static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
+ enum dock_callback_type cb_type)
{
- struct dock_dependent_device *dd, *aux;
+ struct acpi_device *adev = dd->adev;
- list_for_each_entry_safe(dd, aux, &ds->dependent_devices, list) {
- list_del(&dd->list);
- kfree(dd);
- }
-}
+ acpi_lock_hp_context();
-/**
- * dock_init_hotplug - Initialize a hotplug device on a docking station.
- * @dd: Dock-dependent device.
- * @ops: Dock operations to attach to the dependent device.
- * @context: Data to pass to the @ops callbacks and @release.
- * @init: Optional initialization routine to run after setting up context.
- * @release: Optional release routine to run on removal.
- */
-static int dock_init_hotplug(struct dock_dependent_device *dd,
- const struct acpi_dock_ops *ops, void *context,
- void (*init)(void *), void (*release)(void *))
-{
- int ret = 0;
+ if (!adev->hp)
+ goto out;
- mutex_lock(&hotplug_lock);
- if (WARN_ON(dd->hp_context)) {
- ret = -EEXIST;
- } else {
- dd->hp_refcount = 1;
- dd->hp_ops = ops;
- dd->hp_context = context;
- dd->hp_release = release;
- if (init)
- init(context);
- }
- mutex_unlock(&hotplug_lock);
- return ret;
-}
+ if (cb_type == DOCK_CALL_FIXUP) {
+ void (*fixup)(struct acpi_device *);
-/**
- * dock_release_hotplug - Decrement hotplug reference counter of dock device.
- * @dd: Dock-dependent device.
- *
- * Decrement the reference counter of @dd and if 0, detach its hotplug
- * operations from it, reset its context pointer and run the optional release
- * routine if present.
- */
-static void dock_release_hotplug(struct dock_dependent_device *dd)
-{
- mutex_lock(&hotplug_lock);
- if (dd->hp_context && !--dd->hp_refcount) {
- void (*release)(void *) = dd->hp_release;
- void *context = dd->hp_context;
-
- dd->hp_ops = NULL;
- dd->hp_context = NULL;
- dd->hp_release = NULL;
- if (release)
- release(context);
- }
- mutex_unlock(&hotplug_lock);
-}
+ fixup = adev->hp->fixup;
+ if (fixup) {
+ acpi_unlock_hp_context();
+ fixup(adev);
+ return;
+ }
+ } else if (cb_type == DOCK_CALL_UEVENT) {
+ void (*uevent)(struct acpi_device *, u32);
+
+ uevent = adev->hp->uevent;
+ if (uevent) {
+ acpi_unlock_hp_context();
+ uevent(adev, event);
+ return;
+ }
+ } else {
+ int (*notify)(struct acpi_device *, u32);
-static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
- enum dock_callback_type cb_type)
-{
- acpi_notify_handler cb = NULL;
- bool run = false;
-
- mutex_lock(&hotplug_lock);
-
- if (dd->hp_context) {
- run = true;
- dd->hp_refcount++;
- if (dd->hp_ops) {
- switch (cb_type) {
- case DOCK_CALL_FIXUP:
- cb = dd->hp_ops->fixup;
- break;
- case DOCK_CALL_UEVENT:
- cb = dd->hp_ops->uevent;
- break;
- default:
- cb = dd->hp_ops->handler;
- }
+ notify = adev->hp->notify;
+ if (notify) {
+ acpi_unlock_hp_context();
+ notify(adev, event);
+ return;
}
}
- mutex_unlock(&hotplug_lock);
+ out:
+ acpi_unlock_hp_context();
+}
- if (!run)
- return;
+static struct dock_station *find_dock_station(acpi_handle handle)
+{
+ struct dock_station *ds;
- if (cb)
- cb(dd->handle, event, dd->hp_context);
+ list_for_each_entry(ds, &dock_stations, sibling)
+ if (ds->handle == handle)
+ return ds;
- dock_release_hotplug(dd);
+ return NULL;
}
/**
* find_dock_dependent_device - get a device dependent on this dock
* @ds: the dock station
- * @handle: the acpi_handle of the device we want
+ * @adev: ACPI device object to find.
*
* iterate over the dependent device list for this dock. If the
* dependent device matches the handle, return.
*/
static struct dock_dependent_device *
-find_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
+find_dock_dependent_device(struct dock_station *ds, struct acpi_device *adev)
{
struct dock_dependent_device *dd;
list_for_each_entry(dd, &ds->dependent_devices, list)
- if (handle == dd->handle)
+ if (adev == dd->adev)
return dd;
return NULL;
}
-/*****************************************************************************
- * Dock functions *
- *****************************************************************************/
-static int __init is_battery(acpi_handle handle)
+void register_dock_dependent_device(struct acpi_device *adev,
+ acpi_handle dshandle)
{
- struct acpi_device_info *info;
- int ret = 1;
+ struct dock_station *ds = find_dock_station(dshandle);
- if (!ACPI_SUCCESS(acpi_get_object_info(handle, &info)))
- return 0;
- if (!(info->valid & ACPI_VALID_HID))
- ret = 0;
- else
- ret = !strcmp("PNP0C0A", info->hardware_id.string);
-
- kfree(info);
- return ret;
+ if (ds && !find_dock_dependent_device(ds, adev))
+ add_dock_dependent_device(ds, adev);
}
-/* Check whether ACPI object is an ejectable battery or disk bay */
-static bool __init is_ejectable_bay(acpi_handle handle)
-{
- if (acpi_has_method(handle, "_EJ0") && is_battery(handle))
- return true;
-
- return acpi_bay_match(handle);
-}
+/*****************************************************************************
+ * Dock functions *
+ *****************************************************************************/
/**
* is_dock_device - see if a device is on a dock station
- * @handle: acpi handle of the device
+ * @adev: ACPI device object to check.
*
* If this device is either the dock station itself,
* or is a device dependent on the dock station, then it
* is a dock device
*/
-int is_dock_device(acpi_handle handle)
+int is_dock_device(struct acpi_device *adev)
{
struct dock_station *dock_station;
if (!dock_station_count)
return 0;
- if (acpi_dock_match(handle))
+ if (acpi_dock_match(adev->handle))
return 1;
list_for_each_entry(dock_station, &dock_stations, sibling)
- if (find_dock_dependent_device(dock_station, handle))
+ if (find_dock_dependent_device(dock_station, adev))
return 1;
return 0;
@@ -313,43 +242,6 @@ static int dock_present(struct dock_station *ds)
}
/**
- * dock_create_acpi_device - add new devices to acpi
- * @handle - handle of the device to add
- *
- * This function will create a new acpi_device for the given
- * handle if one does not exist already. This should cause
- * acpi to scan for drivers for the given devices, and call
- * matching driver's add routine.
- */
-static void dock_create_acpi_device(acpi_handle handle)
-{
- struct acpi_device *device = NULL;
- int ret;
-
- acpi_bus_get_device(handle, &device);
- if (!acpi_device_enumerated(device)) {
- ret = acpi_bus_scan(handle);
- if (ret)
- pr_debug("error adding bus, %x\n", -ret);
- }
-}
-
-/**
- * dock_remove_acpi_device - remove the acpi_device struct from acpi
- * @handle - the handle of the device to remove
- *
- * Tell acpi to remove the acpi_device. This should cause any loaded
- * driver to have it's remove routine called.
- */
-static void dock_remove_acpi_device(acpi_handle handle)
-{
- struct acpi_device *device;
-
- if (!acpi_bus_get_device(handle, &device))
- acpi_bus_trim(device);
-}
-
-/**
* hot_remove_dock_devices - Remove dock station devices.
* @ds: Dock station.
*/
@@ -366,7 +258,7 @@ static void hot_remove_dock_devices(struct dock_station *ds)
dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST, false);
list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
- dock_remove_acpi_device(dd->handle);
+ acpi_bus_trim(dd->adev);
}
/**
@@ -392,12 +284,20 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
dock_hotplug_event(dd, event, DOCK_CALL_HANDLER);
/*
- * Now make sure that an acpi_device is created for each dependent
- * device. That will cause scan handlers to be attached to device
- * objects or acpi_drivers to be stopped/started if they are present.
+ * Check if all devices have been enumerated already. If not, run
+ * acpi_bus_scan() for them and that will cause scan handlers to be
+ * attached to device objects or acpi_drivers to be stopped/started if
+ * they are present.
*/
- list_for_each_entry(dd, &ds->dependent_devices, list)
- dock_create_acpi_device(dd->handle);
+ list_for_each_entry(dd, &ds->dependent_devices, list) {
+ struct acpi_device *adev = dd->adev;
+
+ if (!acpi_device_enumerated(adev)) {
+ int ret = acpi_bus_scan(adev->handle);
+ if (ret)
+ dev_dbg(&adev->dev, "scan error %d\n", -ret);
+ }
+ }
}
static void dock_event(struct dock_station *ds, u32 event, int num)
@@ -501,71 +401,6 @@ static int dock_in_progress(struct dock_station *ds)
}
/**
- * register_hotplug_dock_device - register a hotplug function
- * @handle: the handle of the device
- * @ops: handlers to call after docking
- * @context: device specific data
- * @init: Optional initialization routine to run after registration
- * @release: Optional release routine to run on unregistration
- *
- * If a driver would like to perform a hotplug operation after a dock
- * event, they can register an acpi_notifiy_handler to be called by
- * the dock driver after _DCK is executed.
- */
-int register_hotplug_dock_device(acpi_handle handle,
- const struct acpi_dock_ops *ops, void *context,
- void (*init)(void *), void (*release)(void *))
-{
- struct dock_dependent_device *dd;
- struct dock_station *dock_station;
- int ret = -EINVAL;
-
- if (WARN_ON(!context))
- return -EINVAL;
-
- if (!dock_station_count)
- return -ENODEV;
-
- /*
- * make sure this handle is for a device dependent on the dock,
- * this would include the dock station itself
- */
- list_for_each_entry(dock_station, &dock_stations, sibling) {
- /*
- * An ATA bay can be in a dock and itself can be ejected
- * separately, so there are two 'dock stations' which need the
- * ops
- */
- dd = find_dock_dependent_device(dock_station, handle);
- if (dd && !dock_init_hotplug(dd, ops, context, init, release))
- ret = 0;
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
-
-/**
- * unregister_hotplug_dock_device - remove yourself from the hotplug list
- * @handle: the acpi handle of the device
- */
-void unregister_hotplug_dock_device(acpi_handle handle)
-{
- struct dock_dependent_device *dd;
- struct dock_station *dock_station;
-
- if (!dock_station_count)
- return;
-
- list_for_each_entry(dock_station, &dock_stations, sibling) {
- dd = find_dock_dependent_device(dock_station, handle);
- if (dd)
- dock_release_hotplug(dd);
- }
-}
-EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
-
-/**
* handle_eject_request - handle an undock request checking for error conditions
*
* Check to make sure the dock device is still present, then undock and
@@ -598,20 +433,23 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
}
/**
- * dock_notify - act upon an acpi dock notification
- * @ds: dock station
- * @event: the acpi event
+ * dock_notify - Handle ACPI dock notification.
+ * @adev: Dock station's ACPI device object.
+ * @event: Event code.
*
* If we are notified to dock, then check to see if the dock is
* present and then dock. Notify all drivers of the dock event,
* and then hotplug and devices that may need hotplugging.
*/
-static void dock_notify(struct dock_station *ds, u32 event)
+int dock_notify(struct acpi_device *adev, u32 event)
{
- acpi_handle handle = ds->handle;
- struct acpi_device *adev = NULL;
+ acpi_handle handle = adev->handle;
+ struct dock_station *ds = find_dock_station(handle);
int surprise_removal = 0;
+ if (!ds)
+ return -ENODEV;
+
/*
* According to acpi spec 3.0a, if a DEVICE_CHECK notification
* is sent and _DCK is present, it is assumed to mean an undock
@@ -632,7 +470,6 @@ static void dock_notify(struct dock_station *ds, u32 event)
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
- acpi_bus_get_device(handle, &adev);
if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) {
begin_dock(ds);
dock(ds);
@@ -662,49 +499,8 @@ static void dock_notify(struct dock_station *ds, u32 event)
else
dock_event(ds, event, UNDOCK_EVENT);
break;
- default:
- acpi_handle_err(handle, "Unknown dock event %d\n", event);
}
-}
-
-static void acpi_dock_deferred_cb(void *data, u32 event)
-{
- acpi_scan_lock_acquire();
- dock_notify(data, event);
- acpi_scan_lock_release();
-}
-
-static void dock_notify_handler(acpi_handle handle, u32 event, void *data)
-{
- if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
- && event != ACPI_NOTIFY_EJECT_REQUEST)
- return;
-
- acpi_hotplug_execute(acpi_dock_deferred_cb, data, event);
-}
-
-/**
- * find_dock_devices - find devices on the dock station
- * @handle: the handle of the device we are examining
- * @lvl: unused
- * @context: the dock station private data
- * @rv: unused
- *
- * This function is called by acpi_walk_namespace. It will
- * check to see if an object has an _EJD method. If it does, then it
- * will see if it is dependent on the dock station.
- */
-static acpi_status __init find_dock_devices(acpi_handle handle, u32 lvl,
- void *context, void **rv)
-{
- struct dock_station *ds = context;
- acpi_handle ejd = NULL;
-
- acpi_bus_get_ejd(handle, &ejd);
- if (ejd == ds->handle)
- add_dock_dependent_device(ds, handle);
-
- return AE_OK;
+ return 0;
}
/*
@@ -803,23 +599,28 @@ static struct attribute_group dock_attribute_group = {
};
/**
- * dock_add - add a new dock station
- * @handle: the dock station handle
+ * acpi_dock_add - Add a new dock station
+ * @adev: Dock station ACPI device object.
*
- * allocated and initialize a new dock station device. Find all devices
- * that are on the dock station, and register for dock event notifications.
+ * allocated and initialize a new dock station device.
*/
-static int __init dock_add(acpi_handle handle)
+void acpi_dock_add(struct acpi_device *adev)
{
struct dock_station *dock_station, ds = { NULL, };
+ struct platform_device_info pdevinfo;
+ acpi_handle handle = adev->handle;
struct platform_device *dd;
- acpi_status status;
int ret;
- dd = platform_device_register_data(NULL, "dock", dock_station_count,
- &ds, sizeof(ds));
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.name = "dock";
+ pdevinfo.id = dock_station_count;
+ pdevinfo.acpi_node.companion = adev;
+ pdevinfo.data = &ds;
+ pdevinfo.size_data = sizeof(ds);
+ dd = platform_device_register_full(&pdevinfo);
if (IS_ERR(dd))
- return PTR_ERR(dd);
+ return;
dock_station = dd->dev.platform_data;
@@ -837,72 +638,29 @@ static int __init dock_add(acpi_handle handle)
dock_station->flags |= DOCK_IS_DOCK;
if (acpi_ata_match(handle))
dock_station->flags |= DOCK_IS_ATA;
- if (is_battery(handle))
+ if (acpi_device_is_battery(adev))
dock_station->flags |= DOCK_IS_BAT;
ret = sysfs_create_group(&dd->dev.kobj, &dock_attribute_group);
if (ret)
goto err_unregister;
- /* Find dependent devices */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_dock_devices, NULL,
- dock_station, NULL);
-
/* add the dock station as a device dependent on itself */
- ret = add_dock_dependent_device(dock_station, handle);
+ ret = add_dock_dependent_device(dock_station, adev);
if (ret)
goto err_rmgroup;
- status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- dock_notify_handler, dock_station);
- if (ACPI_FAILURE(status)) {
- ret = -ENODEV;
- goto err_rmgroup;
- }
-
dock_station_count++;
list_add(&dock_station->sibling, &dock_stations);
- return 0;
+ adev->flags.is_dock_station = true;
+ dev_info(&adev->dev, "ACPI dock station (docks/bays count: %d)\n",
+ dock_station_count);
+ return;
err_rmgroup:
- remove_dock_dependent_devices(dock_station);
sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
+
err_unregister:
platform_device_unregister(dd);
acpi_handle_err(handle, "%s encountered error %d\n", __func__, ret);
- return ret;
-}
-
-/**
- * find_dock_and_bay - look for dock stations and bays
- * @handle: acpi handle of a device
- * @lvl: unused
- * @context: unused
- * @rv: unused
- *
- * This is called by acpi_walk_namespace to look for dock stations and bays.
- */
-static acpi_status __init
-find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- if (acpi_dock_match(handle) || is_ejectable_bay(handle))
- dock_add(handle);
-
- return AE_OK;
-}
-
-void __init acpi_dock_init(void)
-{
- /* look for dock stations and bays */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_dock_and_bay, NULL, NULL, NULL);
-
- if (!dock_station_count) {
- pr_info(PREFIX "No dock devices found.\n");
- return;
- }
-
- pr_info(PREFIX "%s: %d docks/bays found\n",
- ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
}
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 09e423f3d8ad..8acf53e62966 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -55,11 +55,16 @@ MODULE_DEVICE_TABLE(acpi, fan_device_ids);
#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev);
static int acpi_fan_resume(struct device *dev);
+static struct dev_pm_ops acpi_fan_pm = {
+ .resume = acpi_fan_resume,
+ .freeze = acpi_fan_suspend,
+ .thaw = acpi_fan_resume,
+ .restore = acpi_fan_resume,
+};
+#define FAN_PM_OPS_PTR (&acpi_fan_pm)
#else
-#define acpi_fan_suspend NULL
-#define acpi_fan_resume NULL
+#define FAN_PM_OPS_PTR NULL
#endif
-static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
static struct acpi_driver acpi_fan_driver = {
.name = "fan",
@@ -69,7 +74,7 @@ static struct acpi_driver acpi_fan_driver = {
.add = acpi_fan_add,
.remove = acpi_fan_remove,
},
- .drv.pm = &acpi_fan_pm,
+ .drv.pm = FAN_PM_OPS_PTR,
};
/* thermal cooling device callbacks */
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 0c789224d40d..f774c65ecb8b 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -287,6 +287,7 @@ EXPORT_SYMBOL_GPL(acpi_unbind_one);
static int acpi_platform_notify(struct device *dev)
{
struct acpi_bus_type *type = acpi_get_bus_type(dev);
+ struct acpi_device *adev;
int ret;
ret = acpi_bind_one(dev, NULL);
@@ -303,9 +304,14 @@ static int acpi_platform_notify(struct device *dev)
if (ret)
goto out;
}
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ goto out;
if (type && type->setup)
type->setup(dev);
+ else if (adev->handler && adev->handler->bind)
+ adev->handler->bind(dev);
out:
#if ACPI_GLUE_DEBUG
@@ -324,11 +330,17 @@ static int acpi_platform_notify(struct device *dev)
static int acpi_platform_notify_remove(struct device *dev)
{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_bus_type *type;
+ if (!adev)
+ return 0;
+
type = acpi_get_bus_type(dev);
if (type && type->cleanup)
type->cleanup(dev);
+ else if (adev->handler && adev->handler->unbind)
+ adev->handler->unbind(dev);
acpi_unbind_one(dev);
return 0;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index dedbb2d802f1..957391306cbf 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -37,9 +37,15 @@ void acpi_container_init(void);
static inline void acpi_container_init(void) {}
#endif
#ifdef CONFIG_ACPI_DOCK
-void acpi_dock_init(void);
+void register_dock_dependent_device(struct acpi_device *adev,
+ acpi_handle dshandle);
+int dock_notify(struct acpi_device *adev, u32 event);
+void acpi_dock_add(struct acpi_device *adev);
#else
-static inline void acpi_dock_init(void) {}
+static inline void register_dock_dependent_device(struct acpi_device *adev,
+ acpi_handle dshandle) {}
+static inline int dock_notify(struct acpi_device *adev, u32 event) { return -ENODEV; }
+static inline void acpi_dock_add(struct acpi_device *adev) {}
#endif
#ifdef CONFIG_ACPI_HOTPLUG_MEMORY
void acpi_memory_hotplug_init(void);
@@ -72,7 +78,9 @@ void acpi_lpss_init(void);
static inline void acpi_lpss_init(void) {}
#endif
+acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src);
bool acpi_queue_hotplug_work(struct work_struct *work);
+void acpi_device_hotplug(struct acpi_device *adev, u32 src);
bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
/* --------------------------------------------------------------------------
@@ -90,6 +98,7 @@ void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
int acpi_bind_one(struct device *dev, struct acpi_device *adev);
int acpi_unbind_one(struct device *dev);
bool acpi_device_is_present(struct acpi_device *adev);
+bool acpi_device_is_battery(struct acpi_device *adev);
/* --------------------------------------------------------------------------
Power Resource
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 9e6816ef280a..24b5476449a1 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -60,7 +60,7 @@ int node_to_pxm(int node)
return node_to_pxm_map[node];
}
-void __acpi_map_pxm_to_node(int pxm, int node)
+static void __acpi_map_pxm_to_node(int pxm, int node)
{
if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm])
pxm_to_node_map[pxm] = node;
@@ -193,7 +193,7 @@ static int __init acpi_parse_slit(struct acpi_table_header *table)
return 0;
}
-void __init __attribute__ ((weak))
+void __init __weak
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
{
printk(KERN_WARNING PREFIX
@@ -314,7 +314,7 @@ int __init acpi_numa_init(void)
return 0;
}
-int acpi_get_pxm(acpi_handle h)
+static int acpi_get_pxm(acpi_handle h)
{
unsigned long long pxm;
acpi_status status;
@@ -331,14 +331,14 @@ int acpi_get_pxm(acpi_handle h)
return -1;
}
-int acpi_get_node(acpi_handle *handle)
+int acpi_get_node(acpi_handle handle)
{
- int pxm, node = NUMA_NO_NODE;
+ int pxm;
pxm = acpi_get_pxm(handle);
- if (pxm >= 0 && pxm < MAX_PXM_DOMAINS)
- node = acpi_map_pxm_to_node(pxm);
+ if (pxm < 0 || pxm >= MAX_PXM_DOMAINS)
+ return NUMA_NO_NODE;
- return node;
+ return acpi_map_pxm_to_node(pxm);
}
EXPORT_SYMBOL(acpi_get_node);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index fc1aa7909690..6776c599816f 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -52,7 +52,7 @@
#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("osl");
-#define PREFIX "ACPI: "
+
struct acpi_os_dpc {
acpi_osd_exec_callback function;
void *context;
@@ -1168,8 +1168,7 @@ void acpi_os_wait_events_complete(void)
struct acpi_hp_work {
struct work_struct work;
- acpi_hp_callback func;
- void *data;
+ struct acpi_device *adev;
u32 src;
};
@@ -1178,25 +1177,24 @@ static void acpi_hotplug_work_fn(struct work_struct *work)
struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
acpi_os_wait_events_complete();
- hpw->func(hpw->data, hpw->src);
+ acpi_device_hotplug(hpw->adev, hpw->src);
kfree(hpw);
}
-acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src)
+acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
{
struct acpi_hp_work *hpw;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Scheduling function [%p(%p, %u)] for deferred execution.\n",
- func, data, src));
+ "Scheduling hotplug event (%p, %u) for deferred execution.\n",
+ adev, src));
hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
if (!hpw)
return AE_NO_MEMORY;
INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
- hpw->func = func;
- hpw->data = data;
+ hpw->adev = adev;
hpw->src = src;
/*
* We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
@@ -1221,10 +1219,9 @@ acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
{
struct semaphore *sem = NULL;
- sem = acpi_os_allocate(sizeof(struct semaphore));
+ sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
if (!sem)
return AE_NO_MEMORY;
- memset(sem, 0, sizeof(struct semaphore));
sema_init(sem, initial_units);
@@ -1539,17 +1536,21 @@ static int __init osi_setup(char *str)
__setup("acpi_osi=", osi_setup);
-/* enable serialization to combat AE_ALREADY_EXISTS errors */
-static int __init acpi_serialize_setup(char *str)
+/*
+ * Disable the auto-serialization of named objects creation methods.
+ *
+ * This feature is enabled by default. It marks the AML control methods
+ * that contain the opcodes to create named objects as "Serialized".
+ */
+static int __init acpi_no_auto_serialize_setup(char *str)
{
- printk(KERN_INFO PREFIX "serialize enabled\n");
-
- acpi_gbl_all_methods_serialized = TRUE;
+ acpi_gbl_auto_serialize_methods = FALSE;
+ pr_info("ACPI: auto-serialization disabled\n");
return 1;
}
-__setup("acpi_serialize", acpi_serialize_setup);
+__setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
/* Check of resource interference between native drivers and ACPI
* OperationRegions (SystemIO and System Memory only).
@@ -1780,6 +1781,17 @@ static int __init acpi_no_auto_ssdt_setup(char *s)
__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
+static int __init acpi_disable_return_repair(char *s)
+{
+ printk(KERN_NOTICE PREFIX
+ "ACPI: Predefined validation mechanism disabled\n");
+ acpi_gbl_disable_auto_repair = TRUE;
+
+ return 1;
+}
+
+__setup("acpica_no_return_repair", acpi_disable_return_repair);
+
acpi_status __init acpi_os_initialize(void)
{
acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 361b40c10c3f..9c62340c2360 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -370,6 +370,30 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
return NULL;
}
+#if IS_ENABLED(CONFIG_ISA) || IS_ENABLED(CONFIG_EISA)
+static int acpi_isa_register_gsi(struct pci_dev *dev)
+{
+ u32 dev_gsi;
+
+ /* Interrupt Line values above 0xF are forbidden */
+ if (dev->irq > 0 && (dev->irq <= 0xF) &&
+ (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
+ dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
+ pin_name(dev->pin), dev->irq);
+ acpi_register_gsi(&dev->dev, dev_gsi,
+ ACPI_LEVEL_SENSITIVE,
+ ACPI_ACTIVE_LOW);
+ return 0;
+ }
+ return -EINVAL;
+}
+#else
+static inline int acpi_isa_register_gsi(struct pci_dev *dev)
+{
+ return -ENODEV;
+}
+#endif
+
int acpi_pci_irq_enable(struct pci_dev *dev)
{
struct acpi_prt_entry *entry;
@@ -416,19 +440,9 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
* driver reported one, then use it. Exit in any case.
*/
if (gsi < 0) {
- u32 dev_gsi;
- /* Interrupt Line values above 0xF are forbidden */
- if (dev->irq > 0 && (dev->irq <= 0xF) &&
- (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
- dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
- pin_name(pin), dev->irq);
- acpi_register_gsi(&dev->dev, dev_gsi,
- ACPI_LEVEL_SENSITIVE,
- ACPI_ACTIVE_LOW);
- } else {
+ if (acpi_isa_register_gsi(dev))
dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
pin_name(pin));
- }
kfree(entry);
return 0;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 9418c7a1f786..cfd7581cc19f 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -43,8 +43,6 @@
#include "internal.h"
-#define PREFIX "ACPI: "
-
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_link");
#define ACPI_PCI_LINK_CLASS "pci_irq_routing"
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index c1c4102e6478..d388f13d48b4 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -39,8 +39,6 @@
#include "internal.h"
-#define PREFIX "ACPI: "
-
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_root");
#define ACPI_PCI_ROOT_CLASS "pci_bridge"
@@ -51,7 +49,7 @@ static void acpi_pci_root_remove(struct acpi_device *device);
static int acpi_pci_root_scan_dependent(struct acpi_device *adev)
{
- acpiphp_check_host_bridge(adev->handle);
+ acpiphp_check_host_bridge(adev);
return 0;
}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index ad7da686e6e6..e0bcfb642b52 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -46,8 +46,6 @@
#include "sleep.h"
#include "internal.h"
-#define PREFIX "ACPI: "
-
#define _COMPONENT ACPI_POWER_COMPONENT
ACPI_MODULE_NAME("power");
#define ACPI_POWER_CLASS "power_resource"
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index a4eea9a508d3..71e2065639a6 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -15,28 +15,9 @@
#include "internal.h"
-#define PREFIX "ACPI: "
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_core");
-static int __init set_no_mwait(const struct dmi_system_id *id)
-{
- printk(KERN_NOTICE PREFIX "%s detected - "
- "disabling mwait for CPU C-states\n", id->ident);
- boot_option_idle_override = IDLE_NOMWAIT;
- return 0;
-}
-
-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
- {
- set_no_mwait, "Extensa 5220", {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
- DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
- {},
-};
-
static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id)
{
@@ -323,7 +304,7 @@ static struct acpi_object_list *acpi_processor_alloc_pdc(void)
* _PDC is required for a BIOS-OS handshake for most of the newer
* ACPI processor features.
*/
-static int
+static acpi_status
acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
{
acpi_status status = AE_OK;
@@ -379,16 +360,43 @@ early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
-void __init acpi_early_processor_set_pdc(void)
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+static int __init set_no_mwait(const struct dmi_system_id *id)
+{
+ pr_notice(PREFIX "%s detected - disabling mwait for CPU C-states\n",
+ id->ident);
+ boot_option_idle_override = IDLE_NOMWAIT;
+ return 0;
+}
+
+static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
+ {
+ set_no_mwait, "Extensa 5220", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
+ {},
+};
+
+static void __init processor_dmi_check(void)
{
/*
* Check whether the system is DMI table. If yes, OSPM
* should not use mwait for CPU-states.
*/
dmi_check_system(processor_idle_dmi_table);
+}
+#else
+static inline void processor_dmi_check(void) {}
+#endif
+
+void __init acpi_early_processor_set_pdc(void)
+{
+ processor_dmi_check();
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
early_init_pdc, NULL, NULL, NULL);
- acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
+ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
}
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index c1c35623550f..7f70f3182d50 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -41,8 +41,6 @@
#include "internal.h"
-#define PREFIX "ACPI: "
-
#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
#define ACPI_PROCESSOR_NOTIFY_POWER 0x81
#define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index ff90054f04fd..cfc8aba72f86 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -156,17 +156,9 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
*/
static void acpi_processor_ppc_ost(acpi_handle handle, int status)
{
- union acpi_object params[2] = {
- {.type = ACPI_TYPE_INTEGER,},
- {.type = ACPI_TYPE_INTEGER,},
- };
- struct acpi_object_list arg_list = {2, params};
-
- if (acpi_has_method(handle, "_OST")) {
- params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE;
- params[1].integer.value = status;
- acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
- }
+ if (acpi_has_method(handle, "_OST"))
+ acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
+ status, NULL);
}
int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index dbd48498b938..366ca40a6f70 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -37,12 +37,12 @@
#include <linux/power_supply.h>
#include "sbshc.h"
+#include "battery.h"
#define PREFIX "ACPI: "
#define ACPI_SBS_CLASS "sbs"
#define ACPI_AC_CLASS "ac_adapter"
-#define ACPI_BATTERY_CLASS "battery"
#define ACPI_SBS_DEVICE_NAME "Smart Battery System"
#define ACPI_SBS_FILE_INFO "info"
#define ACPI_SBS_FILE_STATE "state"
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 57b053f424d1..7efe546a8c42 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -41,6 +41,7 @@ static DEFINE_MUTEX(acpi_scan_lock);
static LIST_HEAD(acpi_scan_handlers_list);
DEFINE_MUTEX(acpi_device_lock);
LIST_HEAD(acpi_wakeup_device_list);
+static DEFINE_MUTEX(acpi_hp_context_lock);
struct acpi_device_bus_id{
char bus_id[15];
@@ -60,6 +61,27 @@ void acpi_scan_lock_release(void)
}
EXPORT_SYMBOL_GPL(acpi_scan_lock_release);
+void acpi_lock_hp_context(void)
+{
+ mutex_lock(&acpi_hp_context_lock);
+}
+
+void acpi_unlock_hp_context(void)
+{
+ mutex_unlock(&acpi_hp_context_lock);
+}
+
+void acpi_initialize_hp_context(struct acpi_device *adev,
+ struct acpi_hotplug_context *hp,
+ int (*notify)(struct acpi_device *, u32),
+ void (*uevent)(struct acpi_device *, u32))
+{
+ acpi_lock_hp_context();
+ acpi_set_hp_context(adev, hp, notify, uevent, NULL);
+ acpi_unlock_hp_context();
+}
+EXPORT_SYMBOL_GPL(acpi_initialize_hp_context);
+
int acpi_scan_add_handler(struct acpi_scan_handler *handler)
{
if (!handler || !handler->attach)
@@ -439,90 +461,75 @@ static int acpi_scan_bus_check(struct acpi_device *adev)
return 0;
}
-static void acpi_device_hotplug(void *data, u32 src)
+static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type)
{
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
- struct acpi_device *adev = data;
- int error;
-
- lock_device_hotplug();
- mutex_lock(&acpi_scan_lock);
-
- /*
- * The device object's ACPI handle cannot become invalid as long as we
- * are holding acpi_scan_lock, but it may have become invalid before
- * that lock was acquired.
- */
- if (adev->handle == INVALID_ACPI_HANDLE)
- goto out;
-
- switch (src) {
+ switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
- error = acpi_scan_bus_check(adev);
- break;
+ return acpi_scan_bus_check(adev);
case ACPI_NOTIFY_DEVICE_CHECK:
- error = acpi_scan_device_check(adev);
- break;
+ return acpi_scan_device_check(adev);
case ACPI_NOTIFY_EJECT_REQUEST:
case ACPI_OST_EC_OSPM_EJECT:
- error = acpi_scan_hot_remove(adev);
- break;
- default:
- error = -EINVAL;
- break;
+ if (adev->handler && !adev->handler->hotplug.enabled) {
+ dev_info(&adev->dev, "Eject disabled\n");
+ return -EPERM;
+ }
+ acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_EJECT_REQUEST,
+ ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+ return acpi_scan_hot_remove(adev);
}
- if (!error)
- ost_code = ACPI_OST_SC_SUCCESS;
-
- out:
- acpi_evaluate_hotplug_ost(adev->handle, src, ost_code, NULL);
- put_device(&adev->dev);
- mutex_unlock(&acpi_scan_lock);
- unlock_device_hotplug();
+ return -EINVAL;
}
-static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
+void acpi_device_hotplug(struct acpi_device *adev, u32 src)
{
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
- struct acpi_device *adev;
- acpi_status status;
+ int error = -ENODEV;
- if (acpi_bus_get_device(handle, &adev))
- goto err_out;
+ lock_device_hotplug();
+ mutex_lock(&acpi_scan_lock);
- switch (type) {
- case ACPI_NOTIFY_BUS_CHECK:
- acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
- break;
- case ACPI_NOTIFY_DEVICE_CHECK:
- acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
- break;
- case ACPI_NOTIFY_EJECT_REQUEST:
- acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
- if (!adev->handler)
- goto err_out;
+ /*
+ * The device object's ACPI handle cannot become invalid as long as we
+ * are holding acpi_scan_lock, but it might have become invalid before
+ * that lock was acquired.
+ */
+ if (adev->handle == INVALID_ACPI_HANDLE)
+ goto err_out;
- if (!adev->handler->hotplug.enabled) {
- acpi_handle_err(handle, "Eject disabled\n");
+ if (adev->flags.is_dock_station) {
+ error = dock_notify(adev, src);
+ } else if (adev->flags.hotplug_notify) {
+ error = acpi_generic_hotplug_event(adev, src);
+ if (error == -EPERM) {
ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
goto err_out;
}
- acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
- ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
- break;
- default:
- /* non-hotplug event; possibly handled by other handler */
- return;
- }
- get_device(&adev->dev);
- status = acpi_hotplug_execute(acpi_device_hotplug, adev, type);
- if (ACPI_SUCCESS(status))
- return;
+ } else {
+ int (*notify)(struct acpi_device *, u32);
- put_device(&adev->dev);
+ acpi_lock_hp_context();
+ notify = adev->hp ? adev->hp->notify : NULL;
+ acpi_unlock_hp_context();
+ /*
+ * There may be additional notify handlers for device objects
+ * without the .event() callback, so ignore them here.
+ */
+ if (notify)
+ error = notify(adev, src);
+ else
+ goto out;
+ }
+ if (!error)
+ ost_code = ACPI_OST_SC_SUCCESS;
err_out:
- acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
+ acpi_evaluate_ost(adev->handle, src, ost_code, NULL);
+
+ out:
+ acpi_bus_put_acpi_device(adev);
+ mutex_unlock(&acpi_scan_lock);
+ unlock_device_hotplug();
}
static ssize_t real_power_state_show(struct device *dev,
@@ -570,17 +577,14 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
return -ENODEV;
- acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
- ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
get_device(&acpi_device->dev);
- status = acpi_hotplug_execute(acpi_device_hotplug, acpi_device,
- ACPI_OST_EC_OSPM_EJECT);
+ status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
if (ACPI_SUCCESS(status))
return count;
put_device(&acpi_device->dev);
- acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
- ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+ acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
+ ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
}
@@ -1114,14 +1118,16 @@ static void acpi_scan_drop_device(acpi_handle handle, void *context)
mutex_unlock(&acpi_device_del_lock);
}
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
+static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
+ void (*callback)(void *))
{
acpi_status status;
if (!device)
return -EINVAL;
- status = acpi_get_data(handle, acpi_scan_drop_device, (void **)device);
+ status = acpi_get_data_full(handle, acpi_scan_drop_device,
+ (void **)device, callback);
if (ACPI_FAILURE(status) || !*device) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
handle));
@@ -1129,8 +1135,32 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
}
return 0;
}
+
+int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
+{
+ return acpi_get_device_data(handle, device, NULL);
+}
EXPORT_SYMBOL(acpi_bus_get_device);
+static void get_acpi_device(void *dev)
+{
+ if (dev)
+ get_device(&((struct acpi_device *)dev)->dev);
+}
+
+struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
+{
+ struct acpi_device *adev = NULL;
+
+ acpi_get_device_data(handle, &adev, get_acpi_device);
+ return adev;
+}
+
+void acpi_bus_put_acpi_device(struct acpi_device *adev)
+{
+ put_device(&adev->dev);
+}
+
int acpi_device_add(struct acpi_device *device,
void (*release)(struct device *))
{
@@ -1641,6 +1671,27 @@ bool acpi_bay_match(acpi_handle handle)
return acpi_ata_match(phandle);
}
+bool acpi_device_is_battery(struct acpi_device *adev)
+{
+ struct acpi_hardware_id *hwid;
+
+ list_for_each_entry(hwid, &adev->pnp.ids, list)
+ if (!strcmp("PNP0C0A", hwid->id))
+ return true;
+
+ return false;
+}
+
+static bool is_ejectable_bay(struct acpi_device *adev)
+{
+ acpi_handle handle = adev->handle;
+
+ if (acpi_has_method(handle, "_EJ0") && acpi_device_is_battery(adev))
+ return true;
+
+ return acpi_bay_match(handle);
+}
+
/*
* acpi_dock_match - see if an acpi object has a _DCK method
*/
@@ -1706,6 +1757,20 @@ static bool acpi_ibm_smbus_match(acpi_handle handle)
return false;
}
+static bool acpi_object_is_system_bus(acpi_handle handle)
+{
+ acpi_handle tmp;
+
+ if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_SB", &tmp)) &&
+ tmp == handle)
+ return true;
+ if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_TZ", &tmp)) &&
+ tmp == handle)
+ return true;
+
+ return false;
+}
+
static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
int device_type)
{
@@ -1757,8 +1822,10 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
acpi_add_id(pnp, ACPI_DOCK_HID);
else if (acpi_ibm_smbus_match(handle))
acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
- else if (list_empty(&pnp->ids) && handle == ACPI_ROOT_OBJECT) {
- acpi_add_id(pnp, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
+ else if (list_empty(&pnp->ids) &&
+ acpi_object_is_system_bus(handle)) {
+ /* \_SB, \_TZ, LNXSYBUS */
+ acpi_add_id(pnp, ACPI_BUS_HID);
strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
strcpy(pnp->device_class, ACPI_BUS_CLASS);
}
@@ -1941,33 +2008,23 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
mutex_unlock(&acpi_scan_lock);
}
-static void acpi_scan_init_hotplug(acpi_handle handle, int type)
+static void acpi_scan_init_hotplug(struct acpi_device *adev)
{
- struct acpi_device_pnp pnp = {};
struct acpi_hardware_id *hwid;
- struct acpi_scan_handler *handler;
- INIT_LIST_HEAD(&pnp.ids);
- acpi_set_pnp_ids(handle, &pnp, type);
-
- if (!pnp.type.hardware_id)
- goto out;
+ if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) {
+ acpi_dock_add(adev);
+ return;
+ }
+ list_for_each_entry(hwid, &adev->pnp.ids, list) {
+ struct acpi_scan_handler *handler;
- /*
- * This relies on the fact that acpi_install_notify_handler() will not
- * install the same notify handler routine twice for the same handle.
- */
- list_for_each_entry(hwid, &pnp.ids, list) {
handler = acpi_scan_match_handler(hwid->id, NULL);
if (handler) {
- acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- acpi_hotplug_notify_cb, handler);
+ adev->flags.hotplug_notify = true;
break;
}
}
-
-out:
- acpi_free_pnp_ids(&pnp);
}
static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
@@ -1991,12 +2048,12 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
return AE_OK;
}
- acpi_scan_init_hotplug(handle, type);
-
acpi_add_single_object(&device, handle, type, sta);
if (!device)
return AE_CTRL_DEPTH;
+ acpi_scan_init_hotplug(device);
+
out:
if (!*return_value)
*return_value = device;
@@ -2015,13 +2072,14 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
handler = acpi_scan_match_handler(hwid->id, &devid);
if (handler) {
+ device->handler = handler;
ret = handler->attach(device, devid);
- if (ret > 0) {
- device->handler = handler;
+ if (ret > 0)
break;
- } else if (ret < 0) {
+
+ device->handler = NULL;
+ if (ret < 0)
break;
- }
}
}
return ret;
@@ -2030,8 +2088,12 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
static void acpi_bus_attach(struct acpi_device *device)
{
struct acpi_device *child;
+ acpi_handle ejd;
int ret;
+ if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd)))
+ register_dock_dependent_device(device, ejd);
+
acpi_bus_get_status(device);
/* Skip devices that are not present. */
if (!acpi_device_is_present(device)) {
@@ -2184,7 +2246,6 @@ int __init acpi_scan_init(void)
acpi_cmos_rtc_init();
acpi_container_init();
acpi_memory_hotplug_init();
- acpi_dock_init();
mutex_lock(&acpi_scan_lock);
/*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 91a32cefb11f..38cb9782d4b8 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -12,8 +12,6 @@
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("sysfs");
-#define PREFIX "ACPI: "
-
#ifdef CONFIG_ACPI_DEBUG
/*
* ACPI debug sysfs I/F, including:
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 5837f857ac2e..21782290df41 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -23,6 +23,8 @@
*
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/smp.h>
@@ -33,8 +35,6 @@
#include <linux/acpi.h>
#include <linux/bootmem.h>
-#define PREFIX "ACPI: "
-
#define ACPI_MAX_TABLES 128
static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
@@ -55,10 +55,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_apic *p =
(struct acpi_madt_local_apic *)header;
- printk(KERN_INFO PREFIX
- "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
- p->processor_id, p->id,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_info("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
+ p->processor_id, p->id,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -66,11 +65,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_x2apic *p =
(struct acpi_madt_local_x2apic *)header;
- printk(KERN_INFO PREFIX
- "X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
- p->local_apic_id, p->uid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ?
- "enabled" : "disabled");
+ pr_info("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
+ p->local_apic_id, p->uid,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -78,9 +75,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_io_apic *p =
(struct acpi_madt_io_apic *)header;
- printk(KERN_INFO PREFIX
- "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
- p->id, p->address, p->global_irq_base);
+ pr_info("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
+ p->id, p->address, p->global_irq_base);
}
break;
@@ -88,18 +84,15 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_interrupt_override *p =
(struct acpi_madt_interrupt_override *)header;
- printk(KERN_INFO PREFIX
- "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
- p->bus, p->source_irq, p->global_irq,
- mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
- mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
+ pr_info("INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
+ p->bus, p->source_irq, p->global_irq,
+ mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+ mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
if (p->inti_flags &
~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK))
- printk(KERN_INFO PREFIX
- "INT_SRC_OVR unexpected reserved flags: 0x%x\n",
- p->inti_flags &
+ pr_info("INT_SRC_OVR unexpected reserved flags: 0x%x\n",
+ p->inti_flags &
~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK));
-
}
break;
@@ -107,11 +100,10 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_nmi_source *p =
(struct acpi_madt_nmi_source *)header;
- printk(KERN_INFO PREFIX
- "NMI_SRC (%s %s global_irq %d)\n",
- mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
- mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
- p->global_irq);
+ pr_info("NMI_SRC (%s %s global_irq %d)\n",
+ mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+ mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+ p->global_irq);
}
break;
@@ -119,12 +111,11 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_apic_nmi *p =
(struct acpi_madt_local_apic_nmi *)header;
- printk(KERN_INFO PREFIX
- "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
- p->processor_id,
- mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ],
- mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
- p->lint);
+ pr_info("LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
+ p->processor_id,
+ mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ],
+ mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+ p->lint);
}
break;
@@ -137,12 +128,11 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
polarity = p->inti_flags & ACPI_MADT_POLARITY_MASK;
trigger = (p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
- printk(KERN_INFO PREFIX
- "X2APIC_NMI (uid[0x%02x] %s %s lint[0x%x])\n",
- p->uid,
- mps_inti_flags_polarity[polarity],
- mps_inti_flags_trigger[trigger],
- p->lint);
+ pr_info("X2APIC_NMI (uid[0x%02x] %s %s lint[0x%x])\n",
+ p->uid,
+ mps_inti_flags_polarity[polarity],
+ mps_inti_flags_trigger[trigger],
+ p->lint);
}
break;
@@ -150,9 +140,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_apic_override *p =
(struct acpi_madt_local_apic_override *)header;
- printk(KERN_INFO PREFIX
- "LAPIC_ADDR_OVR (address[%p])\n",
- (void *)(unsigned long)p->address);
+ pr_info("LAPIC_ADDR_OVR (address[%p])\n",
+ (void *)(unsigned long)p->address);
}
break;
@@ -160,10 +149,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_io_sapic *p =
(struct acpi_madt_io_sapic *)header;
- printk(KERN_INFO PREFIX
- "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
- p->id, (void *)(unsigned long)p->address,
- p->global_irq_base);
+ pr_info("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
+ p->id, (void *)(unsigned long)p->address,
+ p->global_irq_base);
}
break;
@@ -171,10 +159,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_sapic *p =
(struct acpi_madt_local_sapic *)header;
- printk(KERN_INFO PREFIX
- "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
- p->processor_id, p->id, p->eid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_info("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
+ p->processor_id, p->id, p->eid,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -182,19 +169,17 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_interrupt_source *p =
(struct acpi_madt_interrupt_source *)header;
- printk(KERN_INFO PREFIX
- "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
- mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
- mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
- p->type, p->id, p->eid, p->io_sapic_vector,
- p->global_irq);
+ pr_info("PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
+ mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+ mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+ p->type, p->id, p->eid, p->io_sapic_vector,
+ p->global_irq);
}
break;
default:
- printk(KERN_WARNING PREFIX
- "Found unsupported MADT entry (type = 0x%x)\n",
- header->type);
+ pr_warn("Found unsupported MADT entry (type = 0x%x)\n",
+ header->type);
break;
}
}
@@ -225,7 +210,7 @@ acpi_table_parse_entries(char *id,
acpi_get_table_with_size(id, 0, &table_header, &tbl_size);
if (!table_header) {
- printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
+ pr_warn("%4.4s not present\n", id);
return -ENODEV;
}
@@ -248,7 +233,7 @@ acpi_table_parse_entries(char *id,
* infinite loop.
*/
if (entry->length == 0) {
- pr_err(PREFIX "[%4.4s:0x%02x] Invalid zero length\n", id, entry_id);
+ pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, entry_id);
goto err;
}
@@ -256,8 +241,8 @@ acpi_table_parse_entries(char *id,
((unsigned long)entry + entry->length);
}
if (max_entries && count > max_entries) {
- printk(KERN_WARNING PREFIX "[%4.4s:0x%02x] ignored %i entries of "
- "%i found\n", id, entry_id, count - max_entries, count);
+ pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
+ id, entry_id, count - max_entries, count);
}
early_acpi_os_unmap_memory((char *)table_header, tbl_size);
@@ -322,13 +307,11 @@ static void __init check_multiple_madt(void)
acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
if (table) {
- printk(KERN_WARNING PREFIX
- "BIOS bug: multiple APIC/MADT found,"
- " using %d\n", acpi_apic_instance);
- printk(KERN_WARNING PREFIX
- "If \"acpi_apic_instance=%d\" works better, "
- "notify linux-acpi@vger.kernel.org\n",
- acpi_apic_instance ? 0 : 2);
+ pr_warn("BIOS bug: multiple APIC/MADT found, using %d\n",
+ acpi_apic_instance);
+ pr_warn("If \"acpi_apic_instance=%d\" works better, "
+ "notify linux-acpi@vger.kernel.org\n",
+ acpi_apic_instance ? 0 : 2);
early_acpi_os_unmap_memory(table, tbl_size);
} else
@@ -365,8 +348,7 @@ static int __init acpi_parse_apic_instance(char *str)
acpi_apic_instance = simple_strtoul(str, NULL, 0);
- printk(KERN_NOTICE PREFIX "Shall use APIC/MADT table %d\n",
- acpi_apic_instance);
+ pr_notice("Shall use APIC/MADT table %d\n", acpi_apic_instance);
return 0;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 08626c851be7..c1e31a41f949 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -43,6 +43,7 @@
#include <linux/device.h>
#include <linux/thermal.h>
#include <linux/acpi.h>
+#include <linux/workqueue.h>
#include <asm/uaccess.h>
#define PREFIX "ACPI: "
@@ -90,6 +91,8 @@ static int psv;
module_param(psv, int, 0644);
MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
+static struct workqueue_struct *acpi_thermal_pm_queue;
+
static int acpi_thermal_add(struct acpi_device *device);
static int acpi_thermal_remove(struct acpi_device *device);
static void acpi_thermal_notify(struct acpi_device *device, u32 event);
@@ -101,11 +104,13 @@ static const struct acpi_device_id thermal_device_ids[] = {
MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
#ifdef CONFIG_PM_SLEEP
+static int acpi_thermal_suspend(struct device *dev);
static int acpi_thermal_resume(struct device *dev);
#else
+#define acpi_thermal_suspend NULL
#define acpi_thermal_resume NULL
#endif
-static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
+static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, acpi_thermal_suspend, acpi_thermal_resume);
static struct acpi_driver acpi_thermal_driver = {
.name = "thermal",
@@ -186,6 +191,7 @@ struct acpi_thermal {
struct thermal_zone_device *thermal_zone;
int tz_enabled;
int kelvin_offset;
+ struct work_struct thermal_check_work;
};
/* --------------------------------------------------------------------------
@@ -338,7 +344,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
tz->trips.hot.flags.valid = 1;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Found hot threshold [%lu]\n",
- tz->trips.critical.temperature));
+ tz->trips.hot.temperature));
}
}
@@ -1064,6 +1070,13 @@ static void acpi_thermal_guess_offset(struct acpi_thermal *tz)
tz->kelvin_offset = 2732;
}
+static void acpi_thermal_check_fn(struct work_struct *work)
+{
+ struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
+ thermal_check_work);
+ acpi_thermal_check(tz);
+}
+
static int acpi_thermal_add(struct acpi_device *device)
{
int result = 0;
@@ -1093,6 +1106,8 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
+ INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
+
pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature));
goto end;
@@ -1110,6 +1125,7 @@ static int acpi_thermal_remove(struct acpi_device *device)
if (!device || !acpi_driver_data(device))
return -EINVAL;
+ flush_workqueue(acpi_thermal_pm_queue);
tz = acpi_driver_data(device);
acpi_thermal_unregister_thermal_zone(tz);
@@ -1118,6 +1134,13 @@ static int acpi_thermal_remove(struct acpi_device *device)
}
#ifdef CONFIG_PM_SLEEP
+static int acpi_thermal_suspend(struct device *dev)
+{
+ /* Make sure the previously queued thermal check work has been done */
+ flush_workqueue(acpi_thermal_pm_queue);
+ return 0;
+}
+
static int acpi_thermal_resume(struct device *dev)
{
struct acpi_thermal *tz;
@@ -1148,7 +1171,7 @@ static int acpi_thermal_resume(struct device *dev)
tz->state.active |= tz->trips.active[i].flags.enabled;
}
- acpi_thermal_check(tz);
+ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
return AE_OK;
}
@@ -1240,16 +1263,22 @@ static int __init acpi_thermal_init(void)
return -ENODEV;
}
+ acpi_thermal_pm_queue = create_workqueue("acpi_thermal_pm");
+ if (!acpi_thermal_pm_queue)
+ return -ENODEV;
+
result = acpi_bus_register_driver(&acpi_thermal_driver);
- if (result < 0)
+ if (result < 0) {
+ destroy_workqueue(acpi_thermal_pm_queue);
return -ENODEV;
+ }
return 0;
}
static void __exit acpi_thermal_exit(void)
{
-
+ destroy_workqueue(acpi_thermal_pm_queue);
acpi_bus_unregister_driver(&acpi_thermal_driver);
return;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 85e3b612bdc0..bba526148583 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -164,11 +164,10 @@ acpi_extract_package(union acpi_object *package,
* Validate output buffer.
*/
if (buffer->length == ACPI_ALLOCATE_BUFFER) {
- buffer->pointer = ACPI_ALLOCATE(size_required);
+ buffer->pointer = ACPI_ALLOCATE_ZEROED(size_required);
if (!buffer->pointer)
return AE_NO_MEMORY;
buffer->length = size_required;
- memset(buffer->pointer, 0, size_required);
} else {
if (buffer->length < size_required) {
buffer->length = size_required;
@@ -422,7 +421,7 @@ out:
EXPORT_SYMBOL(acpi_get_physical_device_location);
/**
- * acpi_evaluate_hotplug_ost: Evaluate _OST for hotplug operations
+ * acpi_evaluate_ost: Evaluate _OST for hotplug operations
* @handle: ACPI device handle
* @source_event: source event code
* @status_code: status code
@@ -433,17 +432,15 @@ EXPORT_SYMBOL(acpi_get_physical_device_location);
* When the platform does not support _OST, this function has no effect.
*/
acpi_status
-acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
- u32 status_code, struct acpi_buffer *status_buf)
+acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
+ struct acpi_buffer *status_buf)
{
-#ifdef ACPI_HOTPLUG_OST
union acpi_object params[3] = {
{.type = ACPI_TYPE_INTEGER,},
{.type = ACPI_TYPE_INTEGER,},
{.type = ACPI_TYPE_BUFFER,}
};
struct acpi_object_list arg_list = {3, params};
- acpi_status status;
params[0].integer.value = source_event;
params[1].integer.value = status_code;
@@ -455,13 +452,9 @@ acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
params[2].buffer.length = 0;
}
- status = acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
- return status;
-#else
- return AE_OK;
-#endif
+ return acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
}
-EXPORT_SYMBOL(acpi_evaluate_hotplug_ost);
+EXPORT_SYMBOL(acpi_evaluate_ost);
/**
* acpi_handle_printk: Print message with ACPI prefix and object path
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index b6ba88ed31ae..8b6990e417ec 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -45,8 +45,6 @@
#include "internal.h"
-#define PREFIX "ACPI: "
-
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
@@ -490,6 +488,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
},
{
+ .callback = video_set_use_native_backlight,
+ .ident = "Thinkpad Helix",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
+ },
+ },
+ {
.callback = video_set_use_native_backlight,
.ident = "Dell Inspiron 7520",
.matches = {
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 19080c8e2f2a..33e3db548a29 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -40,8 +40,6 @@
#include "internal.h"
-#define PREFIX "ACPI: "
-
ACPI_MODULE_NAME("video");
#define _COMPONENT ACPI_VIDEO_COMPONENT
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 9e6029105607..3cf61a127ee5 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -83,7 +83,7 @@ static struct device_attribute amba_dev_attrs[] = {
__ATTR_NULL,
};
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
/*
* Hooks to provide runtime PM of the pclk (bus clock). It is safe to
* enable/disable the bus clock at runtime PM suspend/resume as this
@@ -123,7 +123,7 @@ static const struct dev_pm_ops amba_pm = {
.thaw = pm_generic_thaw,
.poweroff = pm_generic_poweroff,
.restore = pm_generic_restore,
- SET_RUNTIME_PM_OPS(
+ SET_PM_RUNTIME_PM_OPS(
amba_pm_runtime_suspend,
amba_pm_runtime_resume,
NULL
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index 1f44e56cc65d..558a239954e8 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -256,8 +256,6 @@ static int tegra_ahb_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
ahb->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ahb->regs))
return PTR_ERR(ahb->regs);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 868429a47be4..20e03a7eb8b4 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -11,13 +11,13 @@ config HAVE_PATA_PLATFORM
to update the PATA_PLATFORM entry.
menuconfig ATA
- tristate "Serial ATA and Parallel ATA drivers"
+ tristate "Serial ATA and Parallel ATA drivers (libata)"
depends on HAS_IOMEM
depends on BLOCK
depends on !(M32R || M68K || S390) || BROKEN
select SCSI
---help---
- If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
+ If you want to use an ATA hard disk, ATA tape drive, ATA CD-ROM or
any other ATA device under Linux, say Y and make sure that you know
the name of your ATA host adapter (the card inside your computer
that "speaks" the ATA protocol, also called ATA controller),
@@ -60,7 +60,7 @@ config ATA_ACPI
config SATA_ZPODD
bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
- depends on ATA_ACPI
+ depends on ATA_ACPI && PM_RUNTIME
default n
help
This option adds support for SATA Zero Power Optical Disc
@@ -97,15 +97,48 @@ config SATA_AHCI_PLATFORM
If unsure, say N.
+config AHCI_DA850
+ tristate "DaVinci DA850 AHCI SATA support"
+ depends on ARCH_DAVINCI_DA850
+ help
+ This option enables support for the DaVinci DA850 SoC's
+ onboard AHCI SATA.
+
+ If unsure, say N.
+
+config AHCI_ST
+ tristate "ST AHCI SATA support"
+ depends on ARCH_STI
+ help
+ This option enables support for ST AHCI SATA controller.
+
+ If unsure, say N.
+
config AHCI_IMX
tristate "Freescale i.MX AHCI SATA support"
- depends on SATA_AHCI_PLATFORM && MFD_SYSCON
+ depends on MFD_SYSCON
help
This option enables support for the Freescale i.MX SoC's
onboard AHCI SATA.
If unsure, say N.
+config AHCI_SUNXI
+ tristate "Allwinner sunxi AHCI SATA support"
+ depends on ARCH_SUNXI
+ help
+ This option enables support for the Allwinner sunxi SoC's
+ onboard AHCI SATA.
+
+ If unsure, say N.
+
+config AHCI_XGENE
+ tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
+ depends on ARM64 || COMPILE_TEST
+ select PHY_XGENE
+ help
+ This option enables support for APM X-Gene SoC SATA host controller.
+
config SATA_FSL
tristate "Freescale 3.0Gbps SATA support"
depends on FSL_SOC
@@ -239,6 +272,7 @@ config SATA_DWC_VDEBUG
config SATA_HIGHBANK
tristate "Calxeda Highbank SATA support"
+ depends on ARCH_HIGHBANK || COMPILE_TEST
help
This option enables support for the Calxeda Highbank SoC's
onboard SATA.
@@ -247,6 +281,8 @@ config SATA_HIGHBANK
config SATA_MV
tristate "Marvell SATA support"
+ depends on PCI || ARCH_DOVE || ARCH_KIRKWOOD || ARCH_MV78XX0 || \
+ ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
select GENERIC_PHY
help
This option enables support for the Marvell Serial ATA family.
@@ -273,6 +309,7 @@ config SATA_PROMISE
config SATA_RCAR
tristate "Renesas R-Car SATA support"
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
This option enables support for Renesas R-Car Serial ATA.
@@ -352,6 +389,7 @@ config PATA_AMD
config PATA_ARASAN_CF
tristate "ARASAN CompactFlash PATA Controller Support"
+ depends on ARCH_SPEAR13XX || COMPILE_TEST
depends on DMADEVICES
select DMA_ENGINE
help
@@ -403,7 +441,7 @@ config PATA_CMD64X
config PATA_CS5520
tristate "CS5510/5520 PATA support"
- depends on PCI
+ depends on PCI && (X86_32 || COMPILE_TEST)
help
This option enables support for the Cyrix 5510/5520
companion chip used with the MediaGX/Geode processor family.
@@ -412,7 +450,7 @@ config PATA_CS5520
config PATA_CS5530
tristate "CS5530 PATA support"
- depends on PCI
+ depends on PCI && (X86_32 || COMPILE_TEST)
help
This option enables support for the Cyrix/NatSemi/AMD CS5530
companion chip used with the MediaGX/Geode processor family.
@@ -421,7 +459,7 @@ config PATA_CS5530
config PATA_CS5535
tristate "CS5535 PATA support (Experimental)"
- depends on PCI && X86 && !X86_64
+ depends on PCI && X86_32
help
This option enables support for the NatSemi/AMD CS5535
companion chip used with the Geode processor family.
@@ -430,7 +468,7 @@ config PATA_CS5535
config PATA_CS5536
tristate "CS5536 PATA support"
- depends on PCI
+ depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
help
This option enables support for the AMD CS5536
companion chip used with the Geode LX processor family.
@@ -666,7 +704,7 @@ config PATA_RDC
config PATA_SC1200
tristate "SC1200 PATA support"
- depends on PCI
+ depends on PCI && (X86_32 || COMPILE_TEST)
help
This option enables support for the NatSemi/AMD SC1200 SoC
companion chip used with the Geode processor family.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 46518c622460..44c8016e565c 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -4,13 +4,17 @@ obj-$(CONFIG_ATA) += libata.o
# non-SFF interface
obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o
-obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
+obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
obj-$(CONFIG_SATA_FSL) += sata_fsl.o
obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
-obj-$(CONFIG_AHCI_IMX) += ahci_imx.o
+obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_XGENE) += ahci_xgene.o libahci.o libahci_platform.o
# SFF w/ custom DMA
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index fd665d919df2..b51605ac5974 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -36,7 +36,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index c81d809c111b..5a0bf8ed649b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -35,7 +35,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -578,6 +577,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
bool online;
int rc;
@@ -588,7 +588,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
deadline, &online, NULL);
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
@@ -603,6 +603,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
{
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
bool online;
@@ -618,7 +619,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
deadline, &online, NULL);
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
/* The pseudo configuration device on SIMG4726 attached to
* ASUS P5W-DH Deluxe doesn't send signature FIS after
@@ -1165,13 +1166,13 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
struct ahci_host_priv *hpriv)
{
- int rc, nvec;
+ int nvec;
if (hpriv->flags & AHCI_HFLAG_NO_MSI)
goto intx;
- rc = pci_msi_vec_count(pdev);
- if (rc < 0)
+ nvec = pci_msi_vec_count(pdev);
+ if (nvec < 0)
goto intx;
/*
@@ -1179,21 +1180,19 @@ static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
* Message mode could be enforced. In this case assume that advantage
* of multipe MSIs is negated and use single MSI mode instead.
*/
- if (rc < n_ports)
+ if (nvec < n_ports)
goto single_msi;
- nvec = rc;
- rc = pci_enable_msi_block(pdev, nvec);
- if (rc < 0)
- goto intx;
- else if (rc > 0)
+ nvec = pci_enable_msi_range(pdev, nvec, nvec);
+ if (nvec == -ENOSPC)
goto single_msi;
+ else if (nvec < 0)
+ goto intx;
return nvec;
single_msi:
- rc = pci_enable_msi(pdev);
- if (rc)
+ if (pci_enable_msi(pdev))
goto intx;
return 1;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 2289efdf8203..51af275b3388 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -37,6 +37,8 @@
#include <linux/clk.h>
#include <linux/libata.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
/* Enclosure Management Control */
#define EM_CTRL_MSG_TYPE 0x000f0000
@@ -51,6 +53,7 @@
enum {
AHCI_MAX_PORTS = 32,
+ AHCI_MAX_CLKS = 3,
AHCI_MAX_SG = 168, /* hardware max is 64K */
AHCI_DMA_BOUNDARY = 0xffffffff,
AHCI_MAX_CMDS = 32,
@@ -321,8 +324,17 @@ struct ahci_host_priv {
u32 em_loc; /* enclosure management location */
u32 em_buf_sz; /* EM buffer size in byte */
u32 em_msg_type; /* EM message type */
- struct clk *clk; /* Only for platforms supporting clk */
+ bool got_runtime_pm; /* Did we do pm_runtime_get? */
+ struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
+ struct regulator *target_pwr; /* Optional */
+ struct phy *phy; /* If platform uses phy */
void *plat_data; /* Other platform data */
+ /*
+ * Optional ahci_start_engine override, if not set this gets set to the
+ * default ahci_start_engine during ahci_save_initial_config, this can
+ * be overridden anytime before the host is activated.
+ */
+ void (*start_engine)(struct ata_port *ap);
};
extern int ahci_ignore_sss;
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
new file mode 100644
index 000000000000..2c83613ce2db
--- /dev/null
+++ b/drivers/ata/ahci_da850.c
@@ -0,0 +1,114 @@
+/*
+ * DaVinci DA850 AHCI SATA platform driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include "ahci.h"
+
+/* SATA PHY Control Register offset from AHCI base */
+#define SATA_P0PHYCR_REG 0x178
+
+#define SATA_PHY_MPY(x) ((x) << 0)
+#define SATA_PHY_LOS(x) ((x) << 6)
+#define SATA_PHY_RXCDR(x) ((x) << 10)
+#define SATA_PHY_RXEQ(x) ((x) << 13)
+#define SATA_PHY_TXSWING(x) ((x) << 19)
+#define SATA_PHY_ENPLL(x) ((x) << 31)
+
+/*
+ * The multiplier needed for 1.5GHz PLL output.
+ *
+ * NOTE: This is currently hardcoded to be suitable for 100MHz crystal
+ * frequency (which is used by DA850 EVM board) and may need to be changed
+ * if you would like to use this driver on some other board.
+ */
+#define DA850_SATA_CLK_MULTIPLIER 7
+
+static void da850_sata_init(struct device *dev, void __iomem *pwrdn_reg,
+ void __iomem *ahci_base)
+{
+ unsigned int val;
+
+ /* Enable SATA clock receiver */
+ val = readl(pwrdn_reg);
+ val &= ~BIT(0);
+ writel(val, pwrdn_reg);
+
+ val = SATA_PHY_MPY(DA850_SATA_CLK_MULTIPLIER + 1) | SATA_PHY_LOS(1) |
+ SATA_PHY_RXCDR(4) | SATA_PHY_RXEQ(1) | SATA_PHY_TXSWING(3) |
+ SATA_PHY_ENPLL(1);
+
+ writel(val, ahci_base + SATA_P0PHYCR_REG);
+}
+
+static const struct ata_port_info ahci_da850_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
+};
+
+static int ahci_da850_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_host_priv *hpriv;
+ struct resource *res;
+ void __iomem *pwrdn_reg;
+ int rc;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ goto disable_resources;
+
+ pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
+ if (!pwrdn_reg)
+ goto disable_resources;
+
+ da850_sata_init(dev, pwrdn_reg, hpriv->mmio);
+
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info, 0, 0);
+ if (rc)
+ goto disable_resources;
+
+ return 0;
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_da850_pm_ops, ahci_platform_suspend,
+ ahci_platform_resume);
+
+static struct platform_driver ahci_da850_driver = {
+ .probe = ahci_da850_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "ahci_da850",
+ .owner = THIS_MODULE,
+ .pm = &ahci_da850_pm_ops,
+ },
+};
+module_platform_driver(ahci_da850_driver);
+
+MODULE_DESCRIPTION("DaVinci DA850 AHCI SATA platform driver");
+MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index dd4d6f74d7bd..497c7abe1c7d 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -42,13 +42,7 @@ enum ahci_imx_type {
struct imx_ahci_priv {
struct platform_device *ahci_pdev;
enum ahci_imx_type type;
-
- /* i.MX53 clock */
- struct clk *sata_gate_clk;
- /* Common clock */
- struct clk *sata_ref_clk;
struct clk *ahb_clk;
-
struct regmap *gpr;
bool no_device;
bool first_time;
@@ -58,28 +52,52 @@ static int ahci_imx_hotplug;
module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
-static int imx_sata_clock_enable(struct device *dev)
+static void ahci_imx_host_stop(struct ata_host *host);
+
+static int imx_sata_enable(struct ahci_host_priv *hpriv)
{
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
int ret;
- if (imxpriv->type == AHCI_IMX53) {
- ret = clk_prepare_enable(imxpriv->sata_gate_clk);
- if (ret < 0) {
- dev_err(dev, "prepare-enable sata_gate clock err:%d\n",
- ret);
+ if (imxpriv->no_device)
+ return 0;
+
+ if (hpriv->target_pwr) {
+ ret = regulator_enable(hpriv->target_pwr);
+ if (ret)
return ret;
- }
}
- ret = clk_prepare_enable(imxpriv->sata_ref_clk);
- if (ret < 0) {
- dev_err(dev, "prepare-enable sata_ref clock err:%d\n",
- ret);
- goto clk_err;
- }
+ ret = ahci_platform_enable_clks(hpriv);
+ if (ret < 0)
+ goto disable_regulator;
if (imxpriv->type == AHCI_IMX6Q) {
+ /*
+ * set PHY Paremeters, two steps to configure the GPR13,
+ * one write for rest of parameters, mask of first write
+ * is 0x07ffffff, and the other one write for setting
+ * the mpll_clk_en.
+ */
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
+ IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
+ IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
+ IMX6Q_GPR13_SATA_SPD_MODE_MASK |
+ IMX6Q_GPR13_SATA_MPLL_SS_EN |
+ IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
+ IMX6Q_GPR13_SATA_TX_BOOST_MASK |
+ IMX6Q_GPR13_SATA_TX_LVL_MASK |
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN |
+ IMX6Q_GPR13_SATA_TX_EDGE_RATE,
+ IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
+ IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
+ IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
+ IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
+ IMX6Q_GPR13_SATA_MPLL_SS_EN |
+ IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
+ IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
+ IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
IMX6Q_GPR13_SATA_MPLL_CLK_EN,
IMX6Q_GPR13_SATA_MPLL_CLK_EN);
@@ -89,15 +107,19 @@ static int imx_sata_clock_enable(struct device *dev)
return 0;
-clk_err:
- if (imxpriv->type == AHCI_IMX53)
- clk_disable_unprepare(imxpriv->sata_gate_clk);
+disable_regulator:
+ if (hpriv->target_pwr)
+ regulator_disable(hpriv->target_pwr);
+
return ret;
}
-static void imx_sata_clock_disable(struct device *dev)
+static void imx_sata_disable(struct ahci_host_priv *hpriv)
{
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+
+ if (imxpriv->no_device)
+ return;
if (imxpriv->type == AHCI_IMX6Q) {
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
@@ -105,10 +127,10 @@ static void imx_sata_clock_disable(struct device *dev)
!IMX6Q_GPR13_SATA_MPLL_CLK_EN);
}
- clk_disable_unprepare(imxpriv->sata_ref_clk);
+ ahci_platform_disable_clks(hpriv);
- if (imxpriv->type == AHCI_IMX53)
- clk_disable_unprepare(imxpriv->sata_gate_clk);
+ if (hpriv->target_pwr)
+ regulator_disable(hpriv->target_pwr);
}
static void ahci_imx_error_handler(struct ata_port *ap)
@@ -118,7 +140,7 @@ static void ahci_imx_error_handler(struct ata_port *ap)
struct ata_host *host = dev_get_drvdata(ap->dev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
+ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
ahci_error_handler(ap);
@@ -136,7 +158,7 @@ static void ahci_imx_error_handler(struct ata_port *ap)
*/
reg_val = readl(mmio + PORT_PHY_CTL);
writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
- imx_sata_clock_disable(ap->dev);
+ imx_sata_disable(hpriv);
imxpriv->no_device = true;
}
@@ -144,7 +166,9 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
+ struct ata_host *host = dev_get_drvdata(ap->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
int ret = -EIO;
if (imxpriv->type == AHCI_IMX53)
@@ -156,7 +180,8 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
}
static struct ata_port_operations ahci_imx_ops = {
- .inherits = &ahci_platform_ops,
+ .inherits = &ahci_ops,
+ .host_stop = ahci_imx_host_stop,
.error_handler = ahci_imx_error_handler,
.softreset = ahci_imx_softreset,
};
@@ -168,79 +193,6 @@ static const struct ata_port_info ahci_imx_port_info = {
.port_ops = &ahci_imx_ops,
};
-static int imx_sata_init(struct device *dev, void __iomem *mmio)
-{
- int ret = 0;
- unsigned int reg_val;
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
-
- ret = imx_sata_clock_enable(dev);
- if (ret < 0)
- return ret;
-
- /*
- * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
- * and IP vendor specific register HOST_TIMER1MS.
- * Configure CAP_SSS (support stagered spin up).
- * Implement the port0.
- * Get the ahb clock rate, and configure the TIMER1MS register.
- */
- reg_val = readl(mmio + HOST_CAP);
- if (!(reg_val & HOST_CAP_SSS)) {
- reg_val |= HOST_CAP_SSS;
- writel(reg_val, mmio + HOST_CAP);
- }
- reg_val = readl(mmio + HOST_PORTS_IMPL);
- if (!(reg_val & 0x1)) {
- reg_val |= 0x1;
- writel(reg_val, mmio + HOST_PORTS_IMPL);
- }
-
- reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
- writel(reg_val, mmio + HOST_TIMER1MS);
-
- return 0;
-}
-
-static void imx_sata_exit(struct device *dev)
-{
- imx_sata_clock_disable(dev);
-}
-
-static int imx_ahci_suspend(struct device *dev)
-{
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
-
- /*
- * If no_device is set, The CLKs had been gated off in the
- * initialization so don't do it again here.
- */
- if (!imxpriv->no_device)
- imx_sata_clock_disable(dev);
-
- return 0;
-}
-
-static int imx_ahci_resume(struct device *dev)
-{
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
- int ret = 0;
-
- if (!imxpriv->no_device)
- ret = imx_sata_clock_enable(dev);
-
- return ret;
-}
-
-static struct ahci_platform_data imx_sata_pdata = {
- .init = imx_sata_init,
- .exit = imx_sata_exit,
- .ata_port_info = &ahci_imx_port_info,
- .suspend = imx_ahci_suspend,
- .resume = imx_ahci_resume,
-
-};
-
static const struct of_device_id imx_ahci_of_match[] = {
{ .compatible = "fsl,imx53-ahci", .data = (void *)AHCI_IMX53 },
{ .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q },
@@ -251,151 +203,124 @@ MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
static int imx_ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *mem, *irq, res[2];
const struct of_device_id *of_id;
- enum ahci_imx_type type;
- const struct ahci_platform_data *pdata = NULL;
+ struct ahci_host_priv *hpriv;
struct imx_ahci_priv *imxpriv;
- struct device *ahci_dev;
- struct platform_device *ahci_pdev;
+ unsigned int reg_val;
int ret;
of_id = of_match_device(imx_ahci_of_match, dev);
if (!of_id)
return -EINVAL;
- type = (enum ahci_imx_type)of_id->data;
- pdata = &imx_sata_pdata;
-
imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
- if (!imxpriv) {
- dev_err(dev, "can't alloc ahci_host_priv\n");
+ if (!imxpriv)
return -ENOMEM;
- }
-
- ahci_pdev = platform_device_alloc("ahci", -1);
- if (!ahci_pdev)
- return -ENODEV;
-
- ahci_dev = &ahci_pdev->dev;
- ahci_dev->parent = dev;
imxpriv->no_device = false;
imxpriv->first_time = true;
- imxpriv->type = type;
-
+ imxpriv->type = (enum ahci_imx_type)of_id->data;
imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(imxpriv->ahb_clk)) {
dev_err(dev, "can't get ahb clock.\n");
- ret = PTR_ERR(imxpriv->ahb_clk);
- goto err_out;
+ return PTR_ERR(imxpriv->ahb_clk);
}
- if (type == AHCI_IMX53) {
- imxpriv->sata_gate_clk = devm_clk_get(dev, "sata_gate");
- if (IS_ERR(imxpriv->sata_gate_clk)) {
- dev_err(dev, "can't get sata_gate clock.\n");
- ret = PTR_ERR(imxpriv->sata_gate_clk);
- goto err_out;
+ if (imxpriv->type == AHCI_IMX6Q) {
+ imxpriv->gpr = syscon_regmap_lookup_by_compatible(
+ "fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imxpriv->gpr)) {
+ dev_err(dev,
+ "failed to find fsl,imx6q-iomux-gpr regmap\n");
+ return PTR_ERR(imxpriv->gpr);
}
}
- imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
- if (IS_ERR(imxpriv->sata_ref_clk)) {
- dev_err(dev, "can't get sata_ref clock.\n");
- ret = PTR_ERR(imxpriv->sata_ref_clk);
- goto err_out;
- }
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ hpriv->plat_data = imxpriv;
- imxpriv->ahci_pdev = ahci_pdev;
- platform_set_drvdata(pdev, imxpriv);
+ ret = imx_sata_enable(hpriv);
+ if (ret)
+ return ret;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!mem || !irq) {
- dev_err(dev, "no mmio/irq resource\n");
- ret = -ENOMEM;
- goto err_out;
+ /*
+ * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
+ * and IP vendor specific register HOST_TIMER1MS.
+ * Configure CAP_SSS (support stagered spin up).
+ * Implement the port0.
+ * Get the ahb clock rate, and configure the TIMER1MS register.
+ */
+ reg_val = readl(hpriv->mmio + HOST_CAP);
+ if (!(reg_val & HOST_CAP_SSS)) {
+ reg_val |= HOST_CAP_SSS;
+ writel(reg_val, hpriv->mmio + HOST_CAP);
+ }
+ reg_val = readl(hpriv->mmio + HOST_PORTS_IMPL);
+ if (!(reg_val & 0x1)) {
+ reg_val |= 0x1;
+ writel(reg_val, hpriv->mmio + HOST_PORTS_IMPL);
}
- res[0] = *mem;
- res[1] = *irq;
+ reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
+ writel(reg_val, hpriv->mmio + HOST_TIMER1MS);
- ahci_dev->coherent_dma_mask = DMA_BIT_MASK(32);
- ahci_dev->dma_mask = &ahci_dev->coherent_dma_mask;
- ahci_dev->of_node = dev->of_node;
+ ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0);
+ if (ret)
+ imx_sata_disable(hpriv);
- if (type == AHCI_IMX6Q) {
- imxpriv->gpr = syscon_regmap_lookup_by_compatible(
- "fsl,imx6q-iomuxc-gpr");
- if (IS_ERR(imxpriv->gpr)) {
- dev_err(dev,
- "failed to find fsl,imx6q-iomux-gpr regmap\n");
- ret = PTR_ERR(imxpriv->gpr);
- goto err_out;
- }
+ return ret;
+}
- /*
- * Set PHY Paremeters, two steps to configure the GPR13,
- * one write for rest of parameters, mask of first write
- * is 0x07fffffe, and the other one write for setting
- * the mpll_clk_en happens in imx_sata_clock_enable().
- */
- regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
- IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
- IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
- IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
- IMX6Q_GPR13_SATA_SPD_MODE_MASK |
- IMX6Q_GPR13_SATA_MPLL_SS_EN |
- IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
- IMX6Q_GPR13_SATA_TX_BOOST_MASK |
- IMX6Q_GPR13_SATA_TX_LVL_MASK |
- IMX6Q_GPR13_SATA_MPLL_CLK_EN |
- IMX6Q_GPR13_SATA_TX_EDGE_RATE,
- IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
- IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
- IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
- IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
- IMX6Q_GPR13_SATA_MPLL_SS_EN |
- IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
- IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
- IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
- }
+static void ahci_imx_host_stop(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
- ret = platform_device_add_resources(ahci_pdev, res, 2);
- if (ret)
- goto err_out;
+ imx_sata_disable(hpriv);
+}
- ret = platform_device_add_data(ahci_pdev, pdata, sizeof(*pdata));
- if (ret)
- goto err_out;
+#ifdef CONFIG_PM_SLEEP
+static int imx_ahci_suspend(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int ret;
- ret = platform_device_add(ahci_pdev);
- if (ret) {
-err_out:
- platform_device_put(ahci_pdev);
+ ret = ahci_platform_suspend_host(dev);
+ if (ret)
return ret;
- }
+
+ imx_sata_disable(hpriv);
return 0;
}
-static int imx_ahci_remove(struct platform_device *pdev)
+static int imx_ahci_resume(struct device *dev)
{
- struct imx_ahci_priv *imxpriv = platform_get_drvdata(pdev);
- struct platform_device *ahci_pdev = imxpriv->ahci_pdev;
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int ret;
- platform_device_unregister(ahci_pdev);
- return 0;
+ ret = imx_sata_enable(hpriv);
+ if (ret)
+ return ret;
+
+ return ahci_platform_resume_host(dev);
}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ahci_imx_pm_ops, imx_ahci_suspend, imx_ahci_resume);
static struct platform_driver imx_ahci_driver = {
.probe = imx_ahci_probe,
- .remove = imx_ahci_remove,
+ .remove = ata_platform_remove_one,
.driver = {
.name = "ahci-imx",
.owner = THIS_MODULE,
.of_match_table = imx_ahci_of_match,
+ .pm = &ahci_imx_pm_ops,
},
};
module_platform_driver(imx_ahci_driver);
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 4b231baceb09..ef67e79944f9 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -12,135 +12,36 @@
* any later version.
*/
-#include <linux/clk.h>
#include <linux/kernel.h>
-#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/pm.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
#include "ahci.h"
-static void ahci_host_stop(struct ata_host *host);
-
-enum ahci_type {
- AHCI, /* standard platform ahci */
- IMX53_AHCI, /* ahci on i.mx53 */
- STRICT_AHCI, /* delayed DMA engine start */
-};
-
-static struct platform_device_id ahci_devtype[] = {
- {
- .name = "ahci",
- .driver_data = AHCI,
- }, {
- .name = "imx53-ahci",
- .driver_data = IMX53_AHCI,
- }, {
- .name = "strict-ahci",
- .driver_data = STRICT_AHCI,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, ahci_devtype);
-
-struct ata_port_operations ahci_platform_ops = {
- .inherits = &ahci_ops,
- .host_stop = ahci_host_stop,
-};
-EXPORT_SYMBOL_GPL(ahci_platform_ops);
-
-static struct ata_port_operations ahci_platform_retry_srst_ops = {
- .inherits = &ahci_pmp_retry_srst_ops,
- .host_stop = ahci_host_stop,
-};
-
-static const struct ata_port_info ahci_port_info[] = {
- /* by features */
- [AHCI] = {
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_platform_ops,
- },
- [IMX53_AHCI] = {
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_platform_retry_srst_ops,
- },
- [STRICT_AHCI] = {
- AHCI_HFLAGS (AHCI_HFLAG_DELAY_ENGINE),
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_platform_ops,
- },
-};
-
-static struct scsi_host_template ahci_platform_sht = {
- AHCI_SHT("ahci_platform"),
+static const struct ata_port_info ahci_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
};
static int ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_platform_data *pdata = dev_get_platdata(dev);
- const struct platform_device_id *id = platform_get_device_id(pdev);
- struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
- const struct ata_port_info *ppi[] = { &pi, NULL };
struct ahci_host_priv *hpriv;
- struct ata_host *host;
- struct resource *mem;
- int irq;
- int n_ports;
- int i;
int rc;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(dev, "no mmio space\n");
- return -EINVAL;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "no irq\n");
- return -EINVAL;
- }
-
- if (pdata && pdata->ata_port_info)
- pi = *pdata->ata_port_info;
-
- hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
- if (!hpriv) {
- dev_err(dev, "can't alloc ahci_host_priv\n");
- return -ENOMEM;
- }
-
- hpriv->flags |= (unsigned long)pi.private_data;
-
- hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
- if (!hpriv->mmio) {
- dev_err(dev, "can't map %pR\n", mem);
- return -ENOMEM;
- }
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
- hpriv->clk = clk_get(dev, NULL);
- if (IS_ERR(hpriv->clk)) {
- dev_err(dev, "can't get clock\n");
- } else {
- rc = clk_prepare_enable(hpriv->clk);
- if (rc) {
- dev_err(dev, "clock prepare enable failed");
- goto free_clk;
- }
- }
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
/*
* Some platforms might need to prepare for mmio region access,
@@ -151,69 +52,10 @@ static int ahci_probe(struct platform_device *pdev)
if (pdata && pdata->init) {
rc = pdata->init(dev, hpriv->mmio);
if (rc)
- goto disable_unprepare_clk;
- }
-
- ahci_save_initial_config(dev, hpriv,
- pdata ? pdata->force_port_map : 0,
- pdata ? pdata->mask_port_map : 0);
-
- /* prepare host */
- if (hpriv->cap & HOST_CAP_NCQ)
- pi.flags |= ATA_FLAG_NCQ;
-
- if (hpriv->cap & HOST_CAP_PMP)
- pi.flags |= ATA_FLAG_PMP;
-
- ahci_set_em_messages(hpriv, &pi);
-
- /* CAP.NP sometimes indicate the index of the last enabled
- * port, at other times, that of the last possible port, so
- * determining the maximum port number requires looking at
- * both CAP.NP and port_map.
- */
- n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
-
- host = ata_host_alloc_pinfo(dev, ppi, n_ports);
- if (!host) {
- rc = -ENOMEM;
- goto pdata_exit;
- }
-
- host->private_data = hpriv;
-
- if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
- host->flags |= ATA_HOST_PARALLEL_SCAN;
- else
- dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
-
- if (pi.flags & ATA_FLAG_EM)
- ahci_reset_em(host);
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- ata_port_desc(ap, "mmio %pR", mem);
- ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
-
- /* set enclosure management message type */
- if (ap->flags & ATA_FLAG_EM)
- ap->em_message_type = hpriv->em_msg_type;
-
- /* disabled/not-implemented port */
- if (!(hpriv->port_map & (1 << i)))
- ap->ops = &ata_dummy_port_ops;
+ goto disable_resources;
}
- rc = ahci_reset_controller(host);
- if (rc)
- goto pdata_exit;
-
- ahci_init_controller(host);
- ahci_print_info(host, "platform");
-
- rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
- &ahci_platform_sht);
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, 0, 0);
if (rc)
goto pdata_exit;
@@ -221,115 +63,19 @@ static int ahci_probe(struct platform_device *pdev)
pdata_exit:
if (pdata && pdata->exit)
pdata->exit(dev);
-disable_unprepare_clk:
- if (!IS_ERR(hpriv->clk))
- clk_disable_unprepare(hpriv->clk);
-free_clk:
- if (!IS_ERR(hpriv->clk))
- clk_put(hpriv->clk);
- return rc;
-}
-
-static void ahci_host_stop(struct ata_host *host)
-{
- struct device *dev = host->dev;
- struct ahci_platform_data *pdata = dev_get_platdata(dev);
- struct ahci_host_priv *hpriv = host->private_data;
-
- if (pdata && pdata->exit)
- pdata->exit(dev);
-
- if (!IS_ERR(hpriv->clk)) {
- clk_disable_unprepare(hpriv->clk);
- clk_put(hpriv->clk);
- }
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ahci_suspend(struct device *dev)
-{
- struct ahci_platform_data *pdata = dev_get_platdata(dev);
- struct ata_host *host = dev_get_drvdata(dev);
- struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = hpriv->mmio;
- u32 ctl;
- int rc;
-
- if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
- dev_err(dev, "firmware update required for suspend/resume\n");
- return -EIO;
- }
-
- /*
- * AHCI spec rev1.1 section 8.3.3:
- * Software must disable interrupts prior to requesting a
- * transition of the HBA to D3 state.
- */
- ctl = readl(mmio + HOST_CTL);
- ctl &= ~HOST_IRQ_EN;
- writel(ctl, mmio + HOST_CTL);
- readl(mmio + HOST_CTL); /* flush */
-
- rc = ata_host_suspend(host, PMSG_SUSPEND);
- if (rc)
- return rc;
-
- if (pdata && pdata->suspend)
- return pdata->suspend(dev);
-
- if (!IS_ERR(hpriv->clk))
- clk_disable_unprepare(hpriv->clk);
-
- return 0;
-}
-
-static int ahci_resume(struct device *dev)
-{
- struct ahci_platform_data *pdata = dev_get_platdata(dev);
- struct ata_host *host = dev_get_drvdata(dev);
- struct ahci_host_priv *hpriv = host->private_data;
- int rc;
-
- if (!IS_ERR(hpriv->clk)) {
- rc = clk_prepare_enable(hpriv->clk);
- if (rc) {
- dev_err(dev, "clock prepare enable failed");
- return rc;
- }
- }
-
- if (pdata && pdata->resume) {
- rc = pdata->resume(dev);
- if (rc)
- goto disable_unprepare_clk;
- }
-
- if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
- rc = ahci_reset_controller(host);
- if (rc)
- goto disable_unprepare_clk;
-
- ahci_init_controller(host);
- }
-
- ata_host_resume(host);
-
- return 0;
-
-disable_unprepare_clk:
- if (!IS_ERR(hpriv->clk))
- clk_disable_unprepare(hpriv->clk);
-
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
return rc;
}
-#endif
-static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+ ahci_platform_resume);
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "snps,spear-ahci", },
{ .compatible = "snps,exynos5440-ahci", },
{ .compatible = "ibm,476gtr-ahci", },
+ { .compatible = "snps,dwc-ahci", },
{},
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
@@ -343,7 +89,6 @@ static struct platform_driver ahci_driver = {
.of_match_table = ahci_of_match,
.pm = &ahci_pm_ops,
},
- .id_table = ahci_devtype,
};
module_platform_driver(ahci_driver);
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
new file mode 100644
index 000000000000..633222226c19
--- /dev/null
+++ b/drivers/ata/ahci_st.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2012 STMicroelectronics Limited
+ *
+ * Authors: Francesco Virlinzi <francesco.virlinzi@st.com>
+ * Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/ahci_platform.h>
+#include <linux/libata.h>
+#include <linux/reset.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+
+#include "ahci.h"
+
+#define ST_AHCI_OOBR 0xbc
+#define ST_AHCI_OOBR_WE BIT(31)
+#define ST_AHCI_OOBR_CWMIN_SHIFT 24
+#define ST_AHCI_OOBR_CWMAX_SHIFT 16
+#define ST_AHCI_OOBR_CIMIN_SHIFT 8
+#define ST_AHCI_OOBR_CIMAX_SHIFT 0
+
+struct st_ahci_drv_data {
+ struct platform_device *ahci;
+ struct reset_control *pwr;
+ struct reset_control *sw_rst;
+ struct reset_control *pwr_rst;
+ struct ahci_host_priv *hpriv;
+};
+
+static void st_ahci_configure_oob(void __iomem *mmio)
+{
+ unsigned long old_val, new_val;
+
+ new_val = (0x02 << ST_AHCI_OOBR_CWMIN_SHIFT) |
+ (0x04 << ST_AHCI_OOBR_CWMAX_SHIFT) |
+ (0x08 << ST_AHCI_OOBR_CIMIN_SHIFT) |
+ (0x0C << ST_AHCI_OOBR_CIMAX_SHIFT);
+
+ old_val = readl(mmio + ST_AHCI_OOBR);
+ writel(old_val | ST_AHCI_OOBR_WE, mmio + ST_AHCI_OOBR);
+ writel(new_val | ST_AHCI_OOBR_WE, mmio + ST_AHCI_OOBR);
+ writel(new_val, mmio + ST_AHCI_OOBR);
+}
+
+static int st_ahci_deassert_resets(struct device *dev)
+{
+ struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+ int err;
+
+ if (drv_data->pwr) {
+ err = reset_control_deassert(drv_data->pwr);
+ if (err) {
+ dev_err(dev, "unable to bring out of pwrdwn\n");
+ return err;
+ }
+ }
+
+ st_ahci_configure_oob(drv_data->hpriv->mmio);
+
+ if (drv_data->sw_rst) {
+ err = reset_control_deassert(drv_data->sw_rst);
+ if (err) {
+ dev_err(dev, "unable to bring out of sw-rst\n");
+ return err;
+ }
+ }
+
+ if (drv_data->pwr_rst) {
+ err = reset_control_deassert(drv_data->pwr_rst);
+ if (err) {
+ dev_err(dev, "unable to bring out of pwr-rst\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void st_ahci_host_stop(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct device *dev = host->dev;
+ struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+ int err;
+
+ if (drv_data->pwr) {
+ err = reset_control_assert(drv_data->pwr);
+ if (err)
+ dev_err(dev, "unable to pwrdwn\n");
+ }
+
+ ahci_platform_disable_resources(hpriv);
+}
+
+static int st_ahci_probe_resets(struct platform_device *pdev)
+{
+ struct st_ahci_drv_data *drv_data = platform_get_drvdata(pdev);
+
+ drv_data->pwr = devm_reset_control_get(&pdev->dev, "pwr-dwn");
+ if (IS_ERR(drv_data->pwr)) {
+ dev_info(&pdev->dev, "power reset control not defined\n");
+ drv_data->pwr = NULL;
+ }
+
+ drv_data->sw_rst = devm_reset_control_get(&pdev->dev, "sw-rst");
+ if (IS_ERR(drv_data->sw_rst)) {
+ dev_info(&pdev->dev, "soft reset control not defined\n");
+ drv_data->sw_rst = NULL;
+ }
+
+ drv_data->pwr_rst = devm_reset_control_get(&pdev->dev, "pwr-rst");
+ if (IS_ERR(drv_data->pwr_rst)) {
+ dev_dbg(&pdev->dev, "power soft reset control not defined\n");
+ drv_data->pwr_rst = NULL;
+ }
+
+ return st_ahci_deassert_resets(&pdev->dev);
+}
+
+static struct ata_port_operations st_ahci_port_ops = {
+ .inherits = &ahci_platform_ops,
+ .host_stop = st_ahci_host_stop,
+};
+
+static const struct ata_port_info st_ahci_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &st_ahci_port_ops,
+};
+
+static int st_ahci_probe(struct platform_device *pdev)
+{
+ struct st_ahci_drv_data *drv_data;
+ struct ahci_host_priv *hpriv;
+ int err;
+
+ drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, drv_data);
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ drv_data->hpriv = hpriv;
+
+ err = st_ahci_probe_resets(pdev);
+ if (err)
+ return err;
+
+ err = ahci_platform_enable_resources(hpriv);
+ if (err)
+ return err;
+
+ err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, 0, 0);
+ if (err) {
+ ahci_platform_disable_resources(hpriv);
+ return err;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int st_ahci_suspend(struct device *dev)
+{
+ struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = drv_data->hpriv;
+ int err;
+
+ err = ahci_platform_suspend_host(dev);
+ if (err)
+ return err;
+
+ if (drv_data->pwr) {
+ err = reset_control_assert(drv_data->pwr);
+ if (err) {
+ dev_err(dev, "unable to pwrdwn");
+ return err;
+ }
+ }
+
+ ahci_platform_disable_resources(hpriv);
+
+ return 0;
+}
+
+static int st_ahci_resume(struct device *dev)
+{
+ struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = drv_data->hpriv;
+ int err;
+
+ err = ahci_platform_enable_resources(hpriv);
+ if (err)
+ return err;
+
+ err = st_ahci_deassert_resets(dev);
+ if (err) {
+ ahci_platform_disable_resources(hpriv);
+ return err;
+ }
+
+ return ahci_platform_resume_host(dev);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
+
+static struct of_device_id st_ahci_match[] = {
+ { .compatible = "st,ahci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_ahci_match);
+
+static struct platform_driver st_ahci_driver = {
+ .driver = {
+ .name = "st_ahci",
+ .owner = THIS_MODULE,
+ .pm = &st_ahci_pm_ops,
+ .of_match_table = of_match_ptr(st_ahci_match),
+ },
+ .probe = st_ahci_probe,
+ .remove = ata_platform_remove_one,
+};
+module_platform_driver(st_ahci_driver);
+
+MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@st.com>");
+MODULE_AUTHOR("Francesco Virlinzi <francesco.virlinzi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SATA AHCI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
new file mode 100644
index 000000000000..42d3f64e74b3
--- /dev/null
+++ b/drivers/ata/ahci_sunxi.c
@@ -0,0 +1,249 @@
+/*
+ * Allwinner sunxi AHCI SATA platform driver
+ * Copyright 2013 Olliver Schinagl <oliver@schinagl.nl>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ * Based on code from Allwinner Technology Co., Ltd. <www.allwinnertech.com>,
+ * Daniel Wang <danielwang@allwinnertech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include "ahci.h"
+
+#define AHCI_BISTAFR 0x00a0
+#define AHCI_BISTCR 0x00a4
+#define AHCI_BISTFCTR 0x00a8
+#define AHCI_BISTSR 0x00ac
+#define AHCI_BISTDECR 0x00b0
+#define AHCI_DIAGNR0 0x00b4
+#define AHCI_DIAGNR1 0x00b8
+#define AHCI_OOBR 0x00bc
+#define AHCI_PHYCS0R 0x00c0
+#define AHCI_PHYCS1R 0x00c4
+#define AHCI_PHYCS2R 0x00c8
+#define AHCI_TIMER1MS 0x00e0
+#define AHCI_GPARAM1R 0x00e8
+#define AHCI_GPARAM2R 0x00ec
+#define AHCI_PPARAMR 0x00f0
+#define AHCI_TESTR 0x00f4
+#define AHCI_VERSIONR 0x00f8
+#define AHCI_IDR 0x00fc
+#define AHCI_RWCR 0x00fc
+#define AHCI_P0DMACR 0x0170
+#define AHCI_P0PHYCR 0x0178
+#define AHCI_P0PHYSR 0x017c
+
+static void sunxi_clrbits(void __iomem *reg, u32 clr_val)
+{
+ u32 reg_val;
+
+ reg_val = readl(reg);
+ reg_val &= ~(clr_val);
+ writel(reg_val, reg);
+}
+
+static void sunxi_setbits(void __iomem *reg, u32 set_val)
+{
+ u32 reg_val;
+
+ reg_val = readl(reg);
+ reg_val |= set_val;
+ writel(reg_val, reg);
+}
+
+static void sunxi_clrsetbits(void __iomem *reg, u32 clr_val, u32 set_val)
+{
+ u32 reg_val;
+
+ reg_val = readl(reg);
+ reg_val &= ~(clr_val);
+ reg_val |= set_val;
+ writel(reg_val, reg);
+}
+
+static u32 sunxi_getbits(void __iomem *reg, u8 mask, u8 shift)
+{
+ return (readl(reg) >> shift) & mask;
+}
+
+static int ahci_sunxi_phy_init(struct device *dev, void __iomem *reg_base)
+{
+ u32 reg_val;
+ int timeout;
+
+ /* This magic is from the original code */
+ writel(0, reg_base + AHCI_RWCR);
+ msleep(5);
+
+ sunxi_setbits(reg_base + AHCI_PHYCS1R, BIT(19));
+ sunxi_clrsetbits(reg_base + AHCI_PHYCS0R,
+ (0x7 << 24),
+ (0x5 << 24) | BIT(23) | BIT(18));
+ sunxi_clrsetbits(reg_base + AHCI_PHYCS1R,
+ (0x3 << 16) | (0x1f << 8) | (0x3 << 6),
+ (0x2 << 16) | (0x6 << 8) | (0x2 << 6));
+ sunxi_setbits(reg_base + AHCI_PHYCS1R, BIT(28) | BIT(15));
+ sunxi_clrbits(reg_base + AHCI_PHYCS1R, BIT(19));
+ sunxi_clrsetbits(reg_base + AHCI_PHYCS0R,
+ (0x7 << 20), (0x3 << 20));
+ sunxi_clrsetbits(reg_base + AHCI_PHYCS2R,
+ (0x1f << 5), (0x19 << 5));
+ msleep(5);
+
+ sunxi_setbits(reg_base + AHCI_PHYCS0R, (0x1 << 19));
+
+ timeout = 250; /* Power up takes aprox 50 us */
+ do {
+ reg_val = sunxi_getbits(reg_base + AHCI_PHYCS0R, 0x7, 28);
+ if (reg_val == 0x02)
+ break;
+
+ if (--timeout == 0) {
+ dev_err(dev, "PHY power up failed.\n");
+ return -EIO;
+ }
+ udelay(1);
+ } while (1);
+
+ sunxi_setbits(reg_base + AHCI_PHYCS2R, (0x1 << 24));
+
+ timeout = 100; /* Calibration takes aprox 10 us */
+ do {
+ reg_val = sunxi_getbits(reg_base + AHCI_PHYCS2R, 0x1, 24);
+ if (reg_val == 0x00)
+ break;
+
+ if (--timeout == 0) {
+ dev_err(dev, "PHY calibration failed.\n");
+ return -EIO;
+ }
+ udelay(1);
+ } while (1);
+
+ msleep(15);
+
+ writel(0x7, reg_base + AHCI_RWCR);
+
+ return 0;
+}
+
+static void ahci_sunxi_start_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ /* Setup DMA before DMA start */
+ sunxi_clrsetbits(hpriv->mmio + AHCI_P0DMACR, 0x0000ff00, 0x00004400);
+
+ /* Start DMA */
+ sunxi_setbits(port_mmio + PORT_CMD, PORT_CMD_START);
+}
+
+static const struct ata_port_info ahci_sunxi_port_info = {
+ AHCI_HFLAGS(AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
+ AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ),
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
+};
+
+static int ahci_sunxi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_host_priv *hpriv;
+ int rc;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ hpriv->start_engine = ahci_sunxi_start_engine;
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ rc = ahci_sunxi_phy_init(dev, hpriv->mmio);
+ if (rc)
+ goto disable_resources;
+
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info, 0, 0);
+ if (rc)
+ goto disable_resources;
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ahci_sunxi_resume(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ rc = ahci_sunxi_phy_init(dev, hpriv->mmio);
+ if (rc)
+ goto disable_resources;
+
+ rc = ahci_platform_resume_host(dev);
+ if (rc)
+ goto disable_resources;
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return rc;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ahci_sunxi_pm_ops, ahci_platform_suspend,
+ ahci_sunxi_resume);
+
+static const struct of_device_id ahci_sunxi_of_match[] = {
+ { .compatible = "allwinner,sun4i-a10-ahci", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
+
+static struct platform_driver ahci_sunxi_driver = {
+ .probe = ahci_sunxi_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "ahci-sunxi",
+ .owner = THIS_MODULE,
+ .of_match_table = ahci_sunxi_of_match,
+ .pm = &ahci_sunxi_pm_ops,
+ },
+};
+module_platform_driver(ahci_sunxi_driver);
+
+MODULE_DESCRIPTION("Allwinner sunxi AHCI SATA driver");
+MODULE_AUTHOR("Olliver Schinagl <oliver@schinagl.nl>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
new file mode 100644
index 000000000000..77c89bf171f1
--- /dev/null
+++ b/drivers/ata/ahci_xgene.c
@@ -0,0 +1,486 @@
+/*
+ * AppliedMicro X-Gene SoC SATA Host Controller Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Loc Ho <lho@apm.com>
+ * Tuan Phan <tphan@apm.com>
+ * Suman Tripathi <stripathi@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * NOTE: PM support is not currently available.
+ *
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/ahci_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/phy/phy.h>
+#include "ahci.h"
+
+/* Max # of disk per a controller */
+#define MAX_AHCI_CHN_PERCTR 2
+
+/* MUX CSR */
+#define SATA_ENET_CONFIG_REG 0x00000000
+#define CFG_SATA_ENET_SELECT_MASK 0x00000001
+
+/* SATA core host controller CSR */
+#define SLVRDERRATTRIBUTES 0x00000000
+#define SLVWRERRATTRIBUTES 0x00000004
+#define MSTRDERRATTRIBUTES 0x00000008
+#define MSTWRERRATTRIBUTES 0x0000000c
+#define BUSCTLREG 0x00000014
+#define IOFMSTRWAUX 0x00000018
+#define INTSTATUSMASK 0x0000002c
+#define ERRINTSTATUS 0x00000030
+#define ERRINTSTATUSMASK 0x00000034
+
+/* SATA host AHCI CSR */
+#define PORTCFG 0x000000a4
+#define PORTADDR_SET(dst, src) \
+ (((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
+#define PORTPHY1CFG 0x000000a8
+#define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
+ (((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
+#define PORTPHY2CFG 0x000000ac
+#define PORTPHY3CFG 0x000000b0
+#define PORTPHY4CFG 0x000000b4
+#define PORTPHY5CFG 0x000000b8
+#define SCTL0 0x0000012C
+#define PORTPHY5CFG_RTCHG_SET(dst, src) \
+ (((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
+#define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
+ (((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
+#define PORTAXICFG 0x000000bc
+#define PORTAXICFG_OUTTRANS_SET(dst, src) \
+ (((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
+
+/* SATA host controller AXI CSR */
+#define INT_SLV_TMOMASK 0x00000010
+
+/* SATA diagnostic CSR */
+#define CFG_MEM_RAM_SHUTDOWN 0x00000070
+#define BLOCK_MEM_RDY 0x00000074
+
+struct xgene_ahci_context {
+ struct ahci_host_priv *hpriv;
+ struct device *dev;
+ void __iomem *csr_core; /* Core CSR address of IP */
+ void __iomem *csr_diag; /* Diag CSR address of IP */
+ void __iomem *csr_axi; /* AXI CSR address of IP */
+ void __iomem *csr_mux; /* MUX CSR address of IP */
+};
+
+static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
+{
+ dev_dbg(ctx->dev, "Release memory from shutdown\n");
+ writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
+ readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
+ msleep(1); /* reset may take up to 1ms */
+ if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
+ dev_err(ctx->dev, "failed to release memory from shutdown\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/**
+ * xgene_ahci_read_id - Read ID data from the specified device
+ * @dev: device
+ * @tf: proposed taskfile
+ * @id: data buffer
+ *
+ * This custom read ID function is required due to the fact that the HW
+ * does not support DEVSLP and the controller state machine may get stuck
+ * after processing the ID query command.
+ */
+static unsigned int xgene_ahci_read_id(struct ata_device *dev,
+ struct ata_taskfile *tf, u16 *id)
+{
+ u32 err_mask;
+ void __iomem *port_mmio = ahci_port_base(dev->link->ap);
+
+ err_mask = ata_do_dev_read_id(dev, tf, id);
+ if (err_mask)
+ return err_mask;
+
+ /*
+ * Mask reserved area. Word78 spec of Link Power Management
+ * bit15-8: reserved
+ * bit7: NCQ autosence
+ * bit6: Software settings preservation supported
+ * bit5: reserved
+ * bit4: In-order sata delivery supported
+ * bit3: DIPM requests supported
+ * bit2: DMA Setup FIS Auto-Activate optimization supported
+ * bit1: DMA Setup FIX non-Zero buffer offsets supported
+ * bit0: Reserved
+ *
+ * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
+ */
+ id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
+
+ /*
+ * Due to HW errata, restart the port if no other command active.
+ * Otherwise the controller may get stuck.
+ */
+ if (!readl(port_mmio + PORT_CMD_ISSUE)) {
+ writel(PORT_CMD_FIS_RX, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* Force a barrier */
+ writel(PORT_CMD_FIS_RX | PORT_CMD_START, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* Force a barrier */
+ }
+ return 0;
+}
+
+static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
+{
+ void __iomem *mmio = ctx->hpriv->mmio;
+ u32 val;
+
+ dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
+ mmio, channel);
+ val = readl(mmio + PORTCFG);
+ val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
+ writel(val, mmio + PORTCFG);
+ readl(mmio + PORTCFG); /* Force a barrier */
+ /* Disable fix rate */
+ writel(0x0001fffe, mmio + PORTPHY1CFG);
+ readl(mmio + PORTPHY1CFG); /* Force a barrier */
+ writel(0x5018461c, mmio + PORTPHY2CFG);
+ readl(mmio + PORTPHY2CFG); /* Force a barrier */
+ writel(0x1c081907, mmio + PORTPHY3CFG);
+ readl(mmio + PORTPHY3CFG); /* Force a barrier */
+ writel(0x1c080815, mmio + PORTPHY4CFG);
+ readl(mmio + PORTPHY4CFG); /* Force a barrier */
+ /* Set window negotiation */
+ val = readl(mmio + PORTPHY5CFG);
+ val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
+ writel(val, mmio + PORTPHY5CFG);
+ readl(mmio + PORTPHY5CFG); /* Force a barrier */
+ val = readl(mmio + PORTAXICFG);
+ val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
+ val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
+ writel(val, mmio + PORTAXICFG);
+ readl(mmio + PORTAXICFG); /* Force a barrier */
+}
+
+/**
+ * xgene_ahci_do_hardreset - Issue the actual COMRESET
+ * @link: link to reset
+ * @deadline: deadline jiffies for the operation
+ * @online: Return value to indicate if device online
+ *
+ * Due to the limitation of the hardware PHY, a difference set of setting is
+ * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
+ * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
+ * report disparity error and etc. In addition, during COMRESET, there can
+ * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
+ * SERR_10B_8B_ERR, the PHY receiver line must be reseted. The following
+ * algorithm is followed to proper configure the hardware PHY during COMRESET:
+ *
+ * Alg Part 1:
+ * 1. Start the PHY at Gen3 speed (default setting)
+ * 2. Issue the COMRESET
+ * 3. If no link, go to Alg Part 3
+ * 4. If link up, determine if the negotiated speed matches the PHY
+ * configured speed
+ * 5. If they matched, go to Alg Part 2
+ * 6. If they do not matched and first time, configure the PHY for the linked
+ * up disk speed and repeat step 2
+ * 7. Go to Alg Part 2
+ *
+ * Alg Part 2:
+ * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
+ * reported in the register PORT_SCR_ERR, then reset the PHY receiver line
+ * 2. Go to Alg Part 3
+ *
+ * Alg Part 3:
+ * 1. Clear any pending from register PORT_SCR_ERR.
+ *
+ * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
+ * and until the underlying PHY supports an method to reset the receiver
+ * line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
+ * an warning message will be printed.
+ */
+static int xgene_ahci_do_hardreset(struct ata_link *link,
+ unsigned long deadline, bool *online)
+{
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct xgene_ahci_context *ctx = hpriv->plat_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_taskfile tf;
+ int rc;
+ u32 val;
+
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = ATA_BUSY;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+ rc = sata_link_hardreset(link, timing, deadline, online,
+ ahci_check_ready);
+
+ val = readl(port_mmio + PORT_SCR_ERR);
+ if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
+ dev_warn(ctx->dev, "link has error\n");
+
+ /* clear all errors if any pending */
+ val = readl(port_mmio + PORT_SCR_ERR);
+ writel(val, port_mmio + PORT_SCR_ERR);
+
+ return rc;
+}
+
+static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ bool online;
+ int rc;
+ u32 portcmd_saved;
+ u32 portclb_saved;
+ u32 portclbhi_saved;
+ u32 portrxfis_saved;
+ u32 portrxfishi_saved;
+
+ /* As hardreset resets these CSR, save it to restore later */
+ portcmd_saved = readl(port_mmio + PORT_CMD);
+ portclb_saved = readl(port_mmio + PORT_LST_ADDR);
+ portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
+ portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
+ portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
+
+ ahci_stop_engine(ap);
+
+ rc = xgene_ahci_do_hardreset(link, deadline, &online);
+
+ /* As controller hardreset clears them, restore them */
+ writel(portcmd_saved, port_mmio + PORT_CMD);
+ writel(portclb_saved, port_mmio + PORT_LST_ADDR);
+ writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
+ writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
+ writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
+
+ hpriv->start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+
+ return rc;
+}
+
+static void xgene_ahci_host_stop(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ ahci_platform_disable_resources(hpriv);
+}
+
+static struct ata_port_operations xgene_ahci_ops = {
+ .inherits = &ahci_ops,
+ .host_stop = xgene_ahci_host_stop,
+ .hardreset = xgene_ahci_hardreset,
+ .read_id = xgene_ahci_read_id,
+};
+
+static const struct ata_port_info xgene_ahci_port_info = {
+ AHCI_HFLAGS(AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ),
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &xgene_ahci_ops,
+};
+
+static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
+{
+ struct xgene_ahci_context *ctx = hpriv->plat_data;
+ int i;
+ int rc;
+ u32 val;
+
+ /* Remove IP RAM out of shutdown */
+ rc = xgene_ahci_init_memram(ctx);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
+ xgene_ahci_set_phy_cfg(ctx, i);
+
+ /* AXI disable Mask */
+ writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
+ readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
+ writel(0, ctx->csr_core + INTSTATUSMASK);
+ val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
+ dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
+ INTSTATUSMASK, val);
+
+ writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
+ readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
+ writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
+ readl(ctx->csr_axi + INT_SLV_TMOMASK);
+
+ /* Enable AXI Interrupt */
+ writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
+ writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
+ writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
+ writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
+
+ /* Enable coherency */
+ val = readl(ctx->csr_core + BUSCTLREG);
+ val &= ~0x00000002; /* Enable write coherency */
+ val &= ~0x00000001; /* Enable read coherency */
+ writel(val, ctx->csr_core + BUSCTLREG);
+
+ val = readl(ctx->csr_core + IOFMSTRWAUX);
+ val |= (1 << 3); /* Enable read coherency */
+ val |= (1 << 9); /* Enable write coherency */
+ writel(val, ctx->csr_core + IOFMSTRWAUX);
+ val = readl(ctx->csr_core + IOFMSTRWAUX);
+ dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
+ IOFMSTRWAUX, val);
+
+ return rc;
+}
+
+static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
+{
+ u32 val;
+
+ /* Check for optional MUX resource */
+ if (IS_ERR(ctx->csr_mux))
+ return 0;
+
+ val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
+ val &= ~CFG_SATA_ENET_SELECT_MASK;
+ writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
+ val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
+ return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
+}
+
+static int xgene_ahci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_host_priv *hpriv;
+ struct xgene_ahci_context *ctx;
+ struct resource *res;
+ int rc;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ hpriv->plat_data = ctx;
+ ctx->hpriv = hpriv;
+ ctx->dev = dev;
+
+ /* Retrieve the IP core resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ctx->csr_core = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctx->csr_core))
+ return PTR_ERR(ctx->csr_core);
+
+ /* Retrieve the IP diagnostic resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ ctx->csr_diag = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctx->csr_diag))
+ return PTR_ERR(ctx->csr_diag);
+
+ /* Retrieve the IP AXI resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ ctx->csr_axi = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctx->csr_axi))
+ return PTR_ERR(ctx->csr_axi);
+
+ /* Retrieve the optional IP mux resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+ ctx->csr_mux = devm_ioremap_resource(dev, res);
+
+ dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
+ hpriv->mmio);
+
+ /* Select ATA */
+ if ((rc = xgene_ahci_mux_select(ctx))) {
+ dev_err(dev, "SATA mux selection failed error %d\n", rc);
+ return -ENODEV;
+ }
+
+ /* Due to errata, HW requires full toggle transition */
+ rc = ahci_platform_enable_clks(hpriv);
+ if (rc)
+ goto disable_resources;
+ ahci_platform_disable_clks(hpriv);
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ goto disable_resources;
+
+ /* Configure the host controller */
+ xgene_ahci_hw_init(hpriv);
+
+ /*
+ * Setup DMA mask. This is preliminary until the DMA range is sorted
+ * out.
+ */
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(dev, "Unable to set dma mask\n");
+ goto disable_resources;
+ }
+
+ rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info, 0, 0);
+ if (rc)
+ goto disable_resources;
+
+ dev_dbg(dev, "X-Gene SATA host controller initialized\n");
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return rc;
+}
+
+static const struct of_device_id xgene_ahci_of_match[] = {
+ {.compatible = "apm,xgene-ahci"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
+
+static struct platform_driver xgene_ahci_driver = {
+ .probe = xgene_ahci_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "xgene-ahci",
+ .owner = THIS_MODULE,
+ .of_match_table = xgene_ahci_of_match,
+ },
+};
+
+module_platform_driver(xgene_ahci_driver);
+
+MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
+MODULE_AUTHOR("Loc Ho <lho@apm.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.4");
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 7d196656adb5..9498a7d3846f 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 36605abe5a67..6bd4f660b4e1 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -35,7 +35,6 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -394,6 +393,9 @@ static ssize_t ahci_show_em_supported(struct device *dev,
*
* If inconsistent, config values are fixed up by this function.
*
+ * If it is not set already this function sets hpriv->start_engine to
+ * ahci_start_engine.
+ *
* LOCKING:
* None.
*/
@@ -500,6 +502,9 @@ void ahci_save_initial_config(struct device *dev,
hpriv->cap = cap;
hpriv->cap2 = cap2;
hpriv->port_map = port_map;
+
+ if (!hpriv->start_engine)
+ hpriv->start_engine = ahci_start_engine;
}
EXPORT_SYMBOL_GPL(ahci_save_initial_config);
@@ -766,7 +771,7 @@ static void ahci_start_port(struct ata_port *ap)
/* enable DMA */
if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE))
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
/* turn on LEDs */
if (ap->flags & ATA_FLAG_EM) {
@@ -1032,12 +1037,13 @@ static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
size_t size)
{
- int state;
+ unsigned int state;
int pmp;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp;
- state = simple_strtoul(buf, NULL, 0);
+ if (kstrtouint(buf, 0, &state) < 0)
+ return -EINVAL;
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
@@ -1234,7 +1240,7 @@ int ahci_kick_engine(struct ata_port *ap)
/* restart engine */
out_restart:
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_kick_engine);
@@ -1387,8 +1393,8 @@ static int ahci_bad_pmp_check_ready(struct ata_link *link)
return ata_check_ready(status);
}
-int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
+static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
{
struct ata_port *ap = link->ap;
void __iomem *port_mmio = ahci_port_base(ap);
@@ -1426,6 +1432,7 @@ static int ahci_hardreset(struct ata_link *link, unsigned int *class,
const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
bool online;
@@ -1443,7 +1450,7 @@ static int ahci_hardreset(struct ata_link *link, unsigned int *class,
rc = sata_link_hardreset(link, timing, deadline, &online,
ahci_check_ready);
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
if (online)
*class = ahci_dev_classify(ap);
@@ -1629,7 +1636,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
}
if (irq_stat & PORT_IRQ_UNK_FIS) {
- u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
+ u32 *unk = pp->rx_fis + RX_FIS_UNK;
active_ehi->err_mask |= AC_ERR_HSM;
active_ehi->action |= ATA_EH_RESET;
@@ -2007,10 +2014,12 @@ static void ahci_thaw(struct ata_port *ap)
void ahci_error_handler(struct ata_port *ap)
{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
/* restart engine */
ahci_stop_engine(ap);
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
}
sata_pmp_error_handler(ap);
@@ -2031,6 +2040,7 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_device *dev = ap->link.device;
u32 devslp, dm, dito, mdat, deto;
@@ -2094,7 +2104,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
PORT_DEVSLP_ADSE);
writel(devslp, port_mmio + PORT_DEVSLP);
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
/* enable device sleep feature for the drive */
err_mask = ata_dev_set_feature(dev,
@@ -2106,6 +2116,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
static void ahci_enable_fbs(struct ata_port *ap)
{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 fbs;
@@ -2134,11 +2145,12 @@ static void ahci_enable_fbs(struct ata_port *ap)
} else
dev_err(ap->host->dev, "Failed to enable FBS\n");
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
}
static void ahci_disable_fbs(struct ata_port *ap)
{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 fbs;
@@ -2166,7 +2178,7 @@ static void ahci_disable_fbs(struct ata_port *ap)
pp->fbs_enabled = false;
}
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
}
static void ahci_pmp_attach(struct ata_port *ap)
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
new file mode 100644
index 000000000000..7cb3a85719c0
--- /dev/null
+++ b/drivers/ata/libahci_platform.c
@@ -0,0 +1,541 @@
+/*
+ * AHCI SATA platform library
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ * Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2010 MontaVista Software, LLC.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
+#include "ahci.h"
+
+static void ahci_host_stop(struct ata_host *host);
+
+struct ata_port_operations ahci_platform_ops = {
+ .inherits = &ahci_ops,
+ .host_stop = ahci_host_stop,
+};
+EXPORT_SYMBOL_GPL(ahci_platform_ops);
+
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT("ahci_platform"),
+};
+
+/**
+ * ahci_platform_enable_clks - Enable platform clocks
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all the clks found in hpriv->clks, starting at
+ * index 0. If any clk fails to enable it disables all the clks already
+ * enabled in reverse order, and then returns an error.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_clks(struct ahci_host_priv *hpriv)
+{
+ int c, rc;
+
+ for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) {
+ rc = clk_prepare_enable(hpriv->clks[c]);
+ if (rc)
+ goto disable_unprepare_clk;
+ }
+ return 0;
+
+disable_unprepare_clk:
+ while (--c >= 0)
+ clk_disable_unprepare(hpriv->clks[c]);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_clks);
+
+/**
+ * ahci_platform_disable_clks - Disable platform clocks
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all the clks found in hpriv->clks, in reverse
+ * order of ahci_platform_enable_clks (starting at the end of the array).
+ */
+void ahci_platform_disable_clks(struct ahci_host_priv *hpriv)
+{
+ int c;
+
+ for (c = AHCI_MAX_CLKS - 1; c >= 0; c--)
+ if (hpriv->clks[c])
+ clk_disable_unprepare(hpriv->clks[c]);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
+
+/**
+ * ahci_platform_enable_resources - Enable platform resources
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all ahci_platform managed resources in the
+ * following order:
+ * 1) Regulator
+ * 2) Clocks (through ahci_platform_enable_clks)
+ * 3) Phy
+ *
+ * If resource enabling fails at any point the previous enabled resources
+ * are disabled in reverse order.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
+{
+ int rc;
+
+ if (hpriv->target_pwr) {
+ rc = regulator_enable(hpriv->target_pwr);
+ if (rc)
+ return rc;
+ }
+
+ rc = ahci_platform_enable_clks(hpriv);
+ if (rc)
+ goto disable_regulator;
+
+ if (hpriv->phy) {
+ rc = phy_init(hpriv->phy);
+ if (rc)
+ goto disable_clks;
+
+ rc = phy_power_on(hpriv->phy);
+ if (rc) {
+ phy_exit(hpriv->phy);
+ goto disable_clks;
+ }
+ }
+
+ return 0;
+
+disable_clks:
+ ahci_platform_disable_clks(hpriv);
+
+disable_regulator:
+ if (hpriv->target_pwr)
+ regulator_disable(hpriv->target_pwr);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
+
+/**
+ * ahci_platform_disable_resources - Disable platform resources
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all ahci_platform managed resources in the
+ * following order:
+ * 1) Phy
+ * 2) Clocks (through ahci_platform_disable_clks)
+ * 3) Regulator
+ */
+void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
+{
+ if (hpriv->phy) {
+ phy_power_off(hpriv->phy);
+ phy_exit(hpriv->phy);
+ }
+
+ ahci_platform_disable_clks(hpriv);
+
+ if (hpriv->target_pwr)
+ regulator_disable(hpriv->target_pwr);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_resources);
+
+static void ahci_platform_put_resources(struct device *dev, void *res)
+{
+ struct ahci_host_priv *hpriv = res;
+ int c;
+
+ if (hpriv->got_runtime_pm) {
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ }
+
+ for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++)
+ clk_put(hpriv->clks[c]);
+}
+
+/**
+ * ahci_platform_get_resources - Get platform resources
+ * @pdev: platform device to get resources for
+ *
+ * This function allocates an ahci_host_priv struct, and gets the following
+ * resources, storing a reference to them inside the returned struct:
+ *
+ * 1) mmio registers (IORESOURCE_MEM 0, mandatory)
+ * 2) regulator for controlling the targets power (optional)
+ * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
+ * or for non devicetree enabled platforms a single clock
+ * 4) phy (optional)
+ *
+ * RETURNS:
+ * The allocated ahci_host_priv on success, otherwise an ERR_PTR value
+ */
+struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_host_priv *hpriv;
+ struct clk *clk;
+ int i, rc = -ENOMEM;
+
+ if (!devres_open_group(dev, NULL, GFP_KERNEL))
+ return ERR_PTR(-ENOMEM);
+
+ hpriv = devres_alloc(ahci_platform_put_resources, sizeof(*hpriv),
+ GFP_KERNEL);
+ if (!hpriv)
+ goto err_out;
+
+ devres_add(dev, hpriv);
+
+ hpriv->mmio = devm_ioremap_resource(dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ if (IS_ERR(hpriv->mmio)) {
+ dev_err(dev, "no mmio space\n");
+ rc = PTR_ERR(hpriv->mmio);
+ goto err_out;
+ }
+
+ hpriv->target_pwr = devm_regulator_get_optional(dev, "target");
+ if (IS_ERR(hpriv->target_pwr)) {
+ rc = PTR_ERR(hpriv->target_pwr);
+ if (rc == -EPROBE_DEFER)
+ goto err_out;
+ hpriv->target_pwr = NULL;
+ }
+
+ for (i = 0; i < AHCI_MAX_CLKS; i++) {
+ /*
+ * For now we must use clk_get(dev, NULL) for the first clock,
+ * because some platforms (da850, spear13xx) are not yet
+ * converted to use devicetree for clocks. For new platforms
+ * this is equivalent to of_clk_get(dev->of_node, 0).
+ */
+ if (i == 0)
+ clk = clk_get(dev, NULL);
+ else
+ clk = of_clk_get(dev->of_node, i);
+
+ if (IS_ERR(clk)) {
+ rc = PTR_ERR(clk);
+ if (rc == -EPROBE_DEFER)
+ goto err_out;
+ break;
+ }
+ hpriv->clks[i] = clk;
+ }
+
+ hpriv->phy = devm_phy_get(dev, "sata-phy");
+ if (IS_ERR(hpriv->phy)) {
+ rc = PTR_ERR(hpriv->phy);
+ switch (rc) {
+ case -ENODEV:
+ case -ENOSYS:
+ /* continue normally */
+ hpriv->phy = NULL;
+ break;
+
+ case -EPROBE_DEFER:
+ goto err_out;
+
+ default:
+ dev_err(dev, "couldn't get sata-phy\n");
+ goto err_out;
+ }
+ }
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+ hpriv->got_runtime_pm = true;
+
+ devres_remove_group(dev, NULL);
+ return hpriv;
+
+err_out:
+ devres_release_group(dev, NULL);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
+
+/**
+ * ahci_platform_init_host - Bring up an ahci-platform host
+ * @pdev: platform device pointer for the host
+ * @hpriv: ahci-host private data for the host
+ * @pi_template: template for the ata_port_info to use
+ * @force_port_map: param passed to ahci_save_initial_config
+ * @mask_port_map: param passed to ahci_save_initial_config
+ *
+ * This function does all the usual steps needed to bring up an
+ * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
+ * must be initialized / enabled before calling this.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_init_host(struct platform_device *pdev,
+ struct ahci_host_priv *hpriv,
+ const struct ata_port_info *pi_template,
+ unsigned int force_port_map,
+ unsigned int mask_port_map)
+{
+ struct device *dev = &pdev->dev;
+ struct ata_port_info pi = *pi_template;
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct ata_host *host;
+ int i, irq, n_ports, rc;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "no irq\n");
+ return -EINVAL;
+ }
+
+ /* prepare host */
+ hpriv->flags |= (unsigned long)pi.private_data;
+
+ ahci_save_initial_config(dev, hpriv, force_port_map, mask_port_map);
+
+ if (hpriv->cap & HOST_CAP_NCQ)
+ pi.flags |= ATA_FLAG_NCQ;
+
+ if (hpriv->cap & HOST_CAP_PMP)
+ pi.flags |= ATA_FLAG_PMP;
+
+ ahci_set_em_messages(hpriv, &pi);
+
+ /* CAP.NP sometimes indicate the index of the last enabled
+ * port, at other times, that of the last possible port, so
+ * determining the maximum port number requires looking at
+ * both CAP.NP and port_map.
+ */
+ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+ host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+ if (!host)
+ return -ENOMEM;
+
+ host->private_data = hpriv;
+
+ if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+ host->flags |= ATA_HOST_PARALLEL_SCAN;
+ else
+ dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
+
+ if (pi.flags & ATA_FLAG_EM)
+ ahci_reset_em(host);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ata_port_desc(ap, "mmio %pR",
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+
+ /* set enclosure management message type */
+ if (ap->flags & ATA_FLAG_EM)
+ ap->em_message_type = hpriv->em_msg_type;
+
+ /* disabled/not-implemented port */
+ if (!(hpriv->port_map & (1 << i)))
+ ap->ops = &ata_dummy_port_ops;
+ }
+
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ ahci_init_controller(host);
+ ahci_print_info(host, "platform");
+
+ return ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
+ &ahci_platform_sht);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_init_host);
+
+static void ahci_host_stop(struct ata_host *host)
+{
+ struct device *dev = host->dev;
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ if (pdata && pdata->exit)
+ pdata->exit(dev);
+
+ ahci_platform_disable_resources(hpriv);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * ahci_platform_suspend_host - Suspend an ahci-platform host
+ * @dev: device pointer for the host
+ *
+ * This function does all the usual steps needed to suspend an
+ * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
+ * must be disabled after calling this.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_suspend_host(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 ctl;
+
+ if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+ dev_err(dev, "firmware update required for suspend/resume\n");
+ return -EIO;
+ }
+
+ /*
+ * AHCI spec rev1.1 section 8.3.3:
+ * Software must disable interrupts prior to requesting a
+ * transition of the HBA to D3 state.
+ */
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+
+ return ata_host_suspend(host, PMSG_SUSPEND);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
+
+/**
+ * ahci_platform_resume_host - Resume an ahci-platform host
+ * @dev: device pointer for the host
+ *
+ * This function does all the usual steps needed to resume an ahci-platform
+ * host, note any necessary resources (ie clks, phy, etc.) must be
+ * initialized / enabled before calling this.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_resume_host(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ int rc;
+
+ if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ ahci_init_controller(host);
+ }
+
+ ata_host_resume(host);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_resume_host);
+
+/**
+ * ahci_platform_suspend - Suspend an ahci-platform device
+ * @dev: the platform device to suspend
+ *
+ * This function suspends the host associated with the device, followed by
+ * disabling all the resources of the device.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_suspend(struct device *dev)
+{
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_platform_suspend_host(dev);
+ if (rc)
+ return rc;
+
+ if (pdata && pdata->suspend) {
+ rc = pdata->suspend(dev);
+ if (rc)
+ goto resume_host;
+ }
+
+ ahci_platform_disable_resources(hpriv);
+
+ return 0;
+
+resume_host:
+ ahci_platform_resume_host(dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_suspend);
+
+/**
+ * ahci_platform_resume - Resume an ahci-platform device
+ * @dev: the platform device to resume
+ *
+ * This function enables all the resources of the device followed by
+ * resuming the host associated with the device.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_resume(struct device *dev)
+{
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ if (pdata && pdata->resume) {
+ rc = pdata->resume(dev);
+ if (rc)
+ goto disable_resources;
+ }
+
+ rc = ahci_platform_resume_host(dev);
+ if (rc)
+ goto disable_resources;
+
+ /* We resumed so update PM runtime state */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_resume);
+#endif
+
+MODULE_DESCRIPTION("AHCI SATA platform library");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 9e69a5308693..97a14fe47de1 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -38,6 +38,16 @@ static void ata_acpi_clear_gtf(struct ata_device *dev)
dev->gtf_cache = NULL;
}
+struct ata_acpi_hotplug_context {
+ struct acpi_hotplug_context hp;
+ union {
+ struct ata_port *ap;
+ struct ata_device *dev;
+ } data;
+};
+
+#define ata_hotplug_data(context) (container_of((context), struct ata_acpi_hotplug_context, hp)->data)
+
/**
* ata_dev_acpi_handle - provide the acpi_handle for an ata_device
* @dev: the acpi_handle returned will correspond to this device
@@ -121,18 +131,17 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
ata_port_wait_eh(ap);
}
-static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
+static int ata_acpi_dev_notify_dock(struct acpi_device *adev, u32 event)
{
- struct ata_device *dev = data;
-
+ struct ata_device *dev = ata_hotplug_data(adev->hp).dev;
ata_acpi_handle_hotplug(dev->link->ap, dev, event);
+ return 0;
}
-static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data)
+static int ata_acpi_ap_notify_dock(struct acpi_device *adev, u32 event)
{
- struct ata_port *ap = data;
-
- ata_acpi_handle_hotplug(ap, NULL, event);
+ ata_acpi_handle_hotplug(ata_hotplug_data(adev->hp).ap, NULL, event);
+ return 0;
}
static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
@@ -154,31 +163,23 @@ static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
}
}
-static void ata_acpi_ap_uevent(acpi_handle handle, u32 event, void *data)
+static void ata_acpi_ap_uevent(struct acpi_device *adev, u32 event)
{
- ata_acpi_uevent(data, NULL, event);
+ ata_acpi_uevent(ata_hotplug_data(adev->hp).ap, NULL, event);
}
-static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
+static void ata_acpi_dev_uevent(struct acpi_device *adev, u32 event)
{
- struct ata_device *dev = data;
+ struct ata_device *dev = ata_hotplug_data(adev->hp).dev;
ata_acpi_uevent(dev->link->ap, dev, event);
}
-static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
- .handler = ata_acpi_dev_notify_dock,
- .uevent = ata_acpi_dev_uevent,
-};
-
-static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
- .handler = ata_acpi_ap_notify_dock,
- .uevent = ata_acpi_ap_uevent,
-};
-
/* bind acpi handle to pata port */
void ata_acpi_bind_port(struct ata_port *ap)
{
struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
+ struct acpi_device *adev;
+ struct ata_acpi_hotplug_context *context;
if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_companion)
return;
@@ -188,9 +189,17 @@ void ata_acpi_bind_port(struct ata_port *ap)
if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
- /* we might be on a docking station */
- register_hotplug_dock_device(ACPI_HANDLE(&ap->tdev),
- &ata_acpi_ap_dock_ops, ap, NULL, NULL);
+ adev = ACPI_COMPANION(&ap->tdev);
+ if (!adev || adev->hp)
+ return;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return;
+
+ context->data.ap = ap;
+ acpi_initialize_hp_context(adev, &context->hp, ata_acpi_ap_notify_dock,
+ ata_acpi_ap_uevent);
}
void ata_acpi_bind_dev(struct ata_device *dev)
@@ -198,7 +207,8 @@ void ata_acpi_bind_dev(struct ata_device *dev)
struct ata_port *ap = dev->link->ap;
struct acpi_device *port_companion = ACPI_COMPANION(&ap->tdev);
struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
- struct acpi_device *parent;
+ struct acpi_device *parent, *adev;
+ struct ata_acpi_hotplug_context *context;
u64 adr;
/*
@@ -221,9 +231,17 @@ void ata_acpi_bind_dev(struct ata_device *dev)
}
acpi_preset_companion(&dev->tdev, parent, adr);
+ adev = ACPI_COMPANION(&dev->tdev);
+ if (!adev || adev->hp)
+ return;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return;
- register_hotplug_dock_device(ata_dev_acpi_handle(dev),
- &ata_acpi_dev_dock_ops, dev, NULL, NULL);
+ context->data.dev = dev;
+ acpi_initialize_hp_context(adev, &context->hp, ata_acpi_dev_notify_dock,
+ ata_acpi_dev_uevent);
}
/**
@@ -835,6 +853,7 @@ void ata_acpi_on_resume(struct ata_port *ap)
ata_for_each_dev(dev, &ap->link, ALL) {
ata_acpi_clear_gtf(dev);
if (ata_dev_enabled(dev) &&
+ ata_dev_acpi_handle(dev) &&
ata_dev_get_GTF(dev, NULL) >= 0)
dev->flags |= ATA_DFLAG_ACPI_PENDING;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8cb2522d592a..c19734d96d7e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1524,7 +1524,7 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
* @dev: Device to which the command is sent
* @tf: Taskfile registers for the command and the result
* @cdb: CDB for packet command
- * @dma_dir: Data tranfer direction of the command
+ * @dma_dir: Data transfer direction of the command
* @sgl: sg list for the data buffer of the command
* @n_elem: Number of sg entries
* @timeout: Timeout in msecs (0 for default)
@@ -1712,7 +1712,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
* @dev: Device to which the command is sent
* @tf: Taskfile registers for the command and the result
* @cdb: CDB for packet command
- * @dma_dir: Data tranfer direction of the command
+ * @dma_dir: Data transfer direction of the command
* @buf: Data buffer of the command
* @buflen: Length of data buffer
* @timeout: Timeout in msecs (0 for default)
@@ -5352,22 +5352,17 @@ bool ata_link_offline(struct ata_link *link)
}
#ifdef CONFIG_PM
-static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
- unsigned int action, unsigned int ehi_flags,
- int *async)
+static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
+ unsigned int action, unsigned int ehi_flags,
+ bool async)
{
struct ata_link *link;
unsigned long flags;
- int rc = 0;
/* Previous resume operation might still be in
* progress. Wait for PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
- if (async) {
- *async = -EAGAIN;
- return 0;
- }
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
@@ -5376,11 +5371,6 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_lock_irqsave(ap->lock, flags);
ap->pm_mesg = mesg;
- if (async)
- ap->pm_result = async;
- else
- ap->pm_result = &rc;
-
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
link->eh_info.action |= action;
@@ -5391,87 +5381,81 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_unlock_irqrestore(ap->lock, flags);
- /* wait and check result */
if (!async) {
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
-
- return rc;
}
-static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
+/*
+ * On some hardware, device fails to respond after spun down for suspend. As
+ * the device won't be used before being resumed, we don't need to touch the
+ * device. Ask EH to skip the usual stuff and proceed directly to suspend.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/46764
+ */
+static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
+ | ATA_EHI_NO_AUTOPSY
+ | ATA_EHI_NO_RECOVERY;
+
+static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
- /*
- * On some hardware, device fails to respond after spun down
- * for suspend. As the device won't be used before being
- * resumed, we don't need to touch the device. Ask EH to skip
- * the usual stuff and proceed directly to suspend.
- *
- * http://thread.gmane.org/gmane.linux.ide/46764
- */
- unsigned int ehi_flags = ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
- ATA_EHI_NO_RECOVERY;
- return ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
+ ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
}
-static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
+static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
{
- struct ata_port *ap = to_ata_port(dev);
-
- return __ata_port_suspend_common(ap, mesg, NULL);
+ ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
}
-static int ata_port_suspend(struct device *dev)
+static int ata_port_pm_suspend(struct device *dev)
{
+ struct ata_port *ap = to_ata_port(dev);
+
if (pm_runtime_suspended(dev))
return 0;
- return ata_port_suspend_common(dev, PMSG_SUSPEND);
+ ata_port_suspend(ap, PMSG_SUSPEND);
+ return 0;
}
-static int ata_port_do_freeze(struct device *dev)
+static int ata_port_pm_freeze(struct device *dev)
{
+ struct ata_port *ap = to_ata_port(dev);
+
if (pm_runtime_suspended(dev))
return 0;
- return ata_port_suspend_common(dev, PMSG_FREEZE);
+ ata_port_suspend(ap, PMSG_FREEZE);
+ return 0;
}
-static int ata_port_poweroff(struct device *dev)
+static int ata_port_pm_poweroff(struct device *dev)
{
- return ata_port_suspend_common(dev, PMSG_HIBERNATE);
+ ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
+ return 0;
}
-static int __ata_port_resume_common(struct ata_port *ap, pm_message_t mesg,
- int *async)
-{
- int rc;
+static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
+ | ATA_EHI_QUIET;
- rc = ata_port_request_pm(ap, mesg, ATA_EH_RESET,
- ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
- return rc;
+static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
+{
+ ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
}
-static int ata_port_resume_common(struct device *dev, pm_message_t mesg)
+static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
{
- struct ata_port *ap = to_ata_port(dev);
-
- return __ata_port_resume_common(ap, mesg, NULL);
+ ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
}
-static int ata_port_resume(struct device *dev)
+static int ata_port_pm_resume(struct device *dev)
{
- int rc;
-
- rc = ata_port_resume_common(dev, PMSG_RESUME);
- if (!rc) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
-
- return rc;
+ ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ return 0;
}
/*
@@ -5500,21 +5484,23 @@ static int ata_port_runtime_idle(struct device *dev)
static int ata_port_runtime_suspend(struct device *dev)
{
- return ata_port_suspend_common(dev, PMSG_AUTO_SUSPEND);
+ ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
+ return 0;
}
static int ata_port_runtime_resume(struct device *dev)
{
- return ata_port_resume_common(dev, PMSG_AUTO_RESUME);
+ ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
+ return 0;
}
static const struct dev_pm_ops ata_port_pm_ops = {
- .suspend = ata_port_suspend,
- .resume = ata_port_resume,
- .freeze = ata_port_do_freeze,
- .thaw = ata_port_resume,
- .poweroff = ata_port_poweroff,
- .restore = ata_port_resume,
+ .suspend = ata_port_pm_suspend,
+ .resume = ata_port_pm_resume,
+ .freeze = ata_port_pm_freeze,
+ .thaw = ata_port_pm_resume,
+ .poweroff = ata_port_pm_poweroff,
+ .restore = ata_port_pm_resume,
.runtime_suspend = ata_port_runtime_suspend,
.runtime_resume = ata_port_runtime_resume,
@@ -5526,18 +5512,17 @@ static const struct dev_pm_ops ata_port_pm_ops = {
* level. sas suspend/resume is async to allow parallel port recovery
* since sas has multiple ata_port instances per Scsi_Host.
*/
-int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
+void ata_sas_port_suspend(struct ata_port *ap)
{
- return __ata_port_suspend_common(ap, PMSG_SUSPEND, async);
+ ata_port_suspend_async(ap, PMSG_SUSPEND);
}
-EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
+EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
-int ata_sas_port_async_resume(struct ata_port *ap, int *async)
+void ata_sas_port_resume(struct ata_port *ap)
{
- return __ata_port_resume_common(ap, PMSG_RESUME, async);
+ ata_port_resume_async(ap, PMSG_RESUME);
}
-EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
-
+EXPORT_SYMBOL_GPL(ata_sas_port_resume);
/**
* ata_host_suspend - suspend host
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 6d8757008318..6760fc4e85b8 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -95,12 +95,13 @@ enum {
* represents timeout for that try. The first try can be soft or
* hardreset. All others are hardreset if available. In most cases
* the first reset w/ 10sec timeout should succeed. Following entries
- * are mostly for error handling, hotplug and retarded devices.
+ * are mostly for error handling, hotplug and those outlier devices that
+ * take an exceptionally long time to recover from reset.
*/
static const unsigned long ata_eh_reset_timeouts[] = {
10000, /* most drives spin up by 10sec */
10000, /* > 99% working drives spin up before 20sec */
- 35000, /* give > 30 secs of idleness for retarded devices */
+ 35000, /* give > 30 secs of idleness for outlier devices */
5000, /* and sweet one last chance */
ULONG_MAX, /* > 1 min has elapsed, give up */
};
@@ -4069,7 +4070,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
ata_acpi_set_state(ap, ap->pm_mesg);
out:
- /* report result */
+ /* update the flags */
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~ATA_PFLAG_PM_PENDING;
@@ -4078,11 +4079,6 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
else if (ap->pflags & ATA_PFLAG_FROZEN)
ata_port_schedule_eh(ap);
- if (ap->pm_result) {
- *ap->pm_result = rc;
- ap->pm_result = NULL;
- }
-
spin_unlock_irqrestore(ap->lock, flags);
return;
@@ -4134,13 +4130,9 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
/* tell ACPI that we're resuming */
ata_acpi_on_resume(ap);
- /* report result */
+ /* update the flags */
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
- if (ap->pm_result) {
- *ap->pm_result = rc;
- ap->pm_result = NULL;
- }
spin_unlock_irqrestore(ap->lock, flags);
}
#endif /* CONFIG_PM */
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 88949c6d55dd..f3a65a3140d3 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -85,21 +85,6 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
return ODD_MECH_TYPE_UNSUPPORTED;
}
-static bool odd_can_poweroff(struct ata_device *ata_dev)
-{
- acpi_handle handle;
- struct acpi_device *acpi_dev;
-
- handle = ata_dev_acpi_handle(ata_dev);
- if (!handle)
- return false;
-
- if (acpi_bus_get_device(handle, &acpi_dev))
- return false;
-
- return acpi_device_can_poweroff(acpi_dev);
-}
-
/* Test if ODD is zero power ready by sense code */
static bool zpready(struct ata_device *dev)
{
@@ -267,13 +252,11 @@ static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
void zpodd_init(struct ata_device *dev)
{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->tdev);
enum odd_mech_type mech_type;
struct zpodd *zpodd;
- if (dev->zpodd)
- return;
-
- if (!odd_can_poweroff(dev))
+ if (dev->zpodd || !adev || !acpi_device_can_poweroff(adev))
return;
mech_type = zpodd_get_mech_type(dev);
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 62c9ac80c6e9..5108b8744dce 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index d23e2b3ca0b6..1206fa6b62ca 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 73492dd4a4bc..6fac524c2f50 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -356,7 +356,7 @@ static void cf_exit(struct arasan_cf_dev *acdev)
static void dma_callback(void *dev)
{
- struct arasan_cf_dev *acdev = (struct arasan_cf_dev *) dev;
+ struct arasan_cf_dev *acdev = dev;
complete(&acdev->dma_completion);
}
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 1581dee2967a..3aa4e655e3c6 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index d63ee8f41a4f..e9c87274a781 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 24e51056ac26..30fa4ca4cef6 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 2ca5026f2c15..7e73a0f1e323 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 8fb69e5ca1b7..57f1be64dbf2 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/gfp.h>
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 1275a8d4dedc..6bca3505b9e9 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index f10baabbf5db..bcde4b786807 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -34,7 +34,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index f07f2296acdc..8afe854a5a50 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index 997e16a3a63f..2c0986fa4bb2 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -31,7 +31,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 0448860a2077..32ddcae5a360 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/libata.h>
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 810bc9964dde..3435bd6a5cc9 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 3c12fd7acd41..f440892225f4 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 980b88e109fc..cad9d45749c4 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -34,7 +34,6 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 35b521348d31..8e76f79689d3 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index a9d74eff5fc4..3ba843f5cdc0 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 4be0398c153d..b93c0f0729e7 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 85cf2861e0b7..255c5aaff3a8 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index b0b18ec5465f..e0872db913d6 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -15,7 +15,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
@@ -100,13 +99,9 @@ static int pata_imx_probe(struct platform_device *pdev)
struct resource *io_res;
int ret;
- io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (io_res == NULL)
- return -EINVAL;
-
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -EINVAL;
+ if (irq < 0)
+ return irq;
priv = devm_kzalloc(&pdev->dev,
sizeof(struct pata_imx_priv), GFP_KERNEL);
@@ -136,11 +131,10 @@ static int pata_imx_probe(struct platform_device *pdev)
ap->pio_mask = ATA_PIO0;
ap->flags |= ATA_FLAG_SLAVE_POSS;
- priv->host_regs = devm_ioremap(&pdev->dev, io_res->start,
- resource_size(io_res));
- if (!priv->host_regs) {
- dev_err(&pdev->dev, "failed to map IO/CTL base\n");
- ret = -EBUSY;
+ io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->host_regs = devm_ioremap_resource(&pdev->dev, io_res);
+ if (IS_ERR(priv->host_regs)) {
+ ret = PTR_ERR(priv->host_regs);
goto err;
}
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index 2a8dd9527ecc..81369d187a5c 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 581e04d80367..dc3d7877f29d 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -72,7 +72,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 76e739b031b6..b1cfa0258fd3 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index be816428b430..bce2a8ca4678 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -916,7 +916,6 @@ static __init int probe_chip_type(struct legacy_probe *probe)
local_irq_restore(flags);
return BIOS;
}
- local_irq_restore(flags);
}
if (ht6560a & mask)
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index a4f5e781c8c2..6bad3df3a13c 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 1f5f28bb0bb8..f39a5379e816 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index ad1a0febd620..e3b97093ef9a 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 9513e071040d..56201a69af12 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -37,7 +37,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 0c424dae56e7..6154c3ee11a5 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index 16dc3a63a23d..d44df7ccfe43 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index d77b2e1054ef..319b64491b7b 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 4ea70cd22aee..fb042e0519d0 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 78ede3fd1875..bb71ea214b99 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 40254f4df584..bcc4b968c049 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 9d874c85d64d..1151f23177bb 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index c34fc50070a6..defa050e1784 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index 2beb6b5045f8..0b46be117051 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 02794885de10..a5579b55e332 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -13,7 +13,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index a6f05acad61e..73259bfda1e3 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/ata.h>
#include <linux/libata.h>
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index f582ba180a7d..be3f10240dca 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 79a970f05a2e..521b2137ea3e 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -24,7 +24,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 040b093617a4..caedc90855b2 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index ce2f828c17b3..96a232fffae6 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -32,7 +32,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index f35f15f4d83e..f1f5b5ae3382 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -35,7 +35,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index d3830c45a369..5a1cde0ea360 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 96c6a79ef606..e27f31fe1b67 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -34,7 +34,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index c4b0b073ba8e..73fe362d9716 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 1e8363640bf5..78d913aa93c8 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 6816911ac422..900f0e4a1faf 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 94473da68c02..7bc78e264f9e 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -36,7 +36,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index c3ab9a6c3965..f6c9632bdff6 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -55,7 +55,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/gfp.h>
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 8ea6e6afd041..f10631beffa8 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -36,7 +36,6 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 523524b68022..0bb2cabd2197 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -462,8 +461,7 @@ static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
int chan;
u32 tfr_reg, err_reg;
unsigned long flags;
- struct sata_dwc_device *hsdev =
- (struct sata_dwc_device *)hsdev_instance;
+ struct sata_dwc_device *hsdev = hsdev_instance;
struct ata_host *host = (struct ata_host *)hsdev->host;
struct ata_port *ap;
struct sata_dwc_device_port *hsdevp;
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 870b11eadc6d..65965cf5af06 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -142,7 +141,7 @@ static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
ssize_t size)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ecx_plat_data *pdata = (struct ecx_plat_data *) hpriv->plat_data;
+ struct ecx_plat_data *pdata = hpriv->plat_data;
struct ahci_port_priv *pp = ap->private_data;
unsigned long flags;
int pmp, i;
@@ -403,6 +402,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
static const unsigned long timing[] = { 5, 100, 500};
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
bool online;
@@ -431,7 +431,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
break;
} while (!online && retry--);
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
if (online)
*class = ahci_dev_classify(ap);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index d74def823d3e..ba5f27120332 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -40,7 +40,6 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 97f4acb54ad6..3638887476f6 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -35,7 +35,6 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 3b0dd57984e1..9a6bd4cd29a0 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -31,7 +31,6 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index b7695e804635..3062f8605b29 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -37,7 +37,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 1ad2f62d34b9..b513428171b3 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index dc4f70179e7d..c630fa812624 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -39,7 +39,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 9947010afc0f..39b5de60a1f9 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -82,7 +82,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -1021,8 +1020,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
idx++;
dist = ((long) (window_size - (offset + size))) >= 0 ? size :
(long) (window_size - offset);
- memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
- dist);
+ memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
psource += dist;
size -= dist;
@@ -1031,8 +1029,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
readl(mmio + PDC_GENERAL_CTLR);
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
- memcpy_fromio((char *) psource, (char *) (dimm_mmio),
- window_size / 4);
+ memcpy_fromio(psource, dimm_mmio, window_size / 4);
psource += window_size;
size -= window_size;
idx++;
@@ -1043,8 +1040,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
readl(mmio + PDC_GENERAL_CTLR);
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
- memcpy_fromio((char *) psource, (char *) (dimm_mmio),
- size / 4);
+ memcpy_fromio(psource, dimm_mmio, size / 4);
}
}
#endif
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 6d6489118873..08f98c3ed5c8 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 87f056e54a9d..f72e84228c5c 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -36,7 +36,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 44f304b3de63..29e847aac34b 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -37,7 +37,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 62a76076b548..f1a9198dfe5a 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1925,7 +1925,7 @@ static int ucode_init(loader_block *lb, amb_dev *dev)
const struct firmware *fw;
unsigned long start_address;
const struct ihex_binrec *rec;
- const char *errmsg = 0;
+ const char *errmsg = NULL;
int res;
res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index b41c9481b67b..82f2ae0d7cc4 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -736,8 +736,8 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
skb = td->skb;
if (skb == FS_VCC (ATM_SKB(skb)->vcc)->last_skb) {
- wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait);
FS_VCC (ATM_SKB(skb)->vcc)->last_skb = NULL;
+ wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait);
}
td->dev->ntxpckts--;
@@ -1123,7 +1123,7 @@ static void fs_close(struct atm_vcc *atm_vcc)
this sleep_on, we'll lose any reference to these packets. Memory leak!
On the other hand, it's awfully convenient that we can abort a "close" that
is taking too long. Maybe just use non-interruptible sleep on? -- REW */
- interruptible_sleep_on (& vcc->close_wait);
+ wait_event_interruptible(vcc->close_wait, !vcc->last_skb);
}
txtp = &atm_vcc->qos.txtp;
@@ -2000,7 +2000,7 @@ static void firestream_remove_one(struct pci_dev *pdev)
fs_dprintk (FS_DEBUG_CLEANUP, "Freeing irq%d.\n", dev->irq);
free_irq (dev->irq, dev);
- del_timer (&dev->timer);
+ del_timer_sync (&dev->timer);
atm_dev_deregister(dev->atm_dev);
free_queue (dev, &dev->hp_txq);
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index 45d506363aba..909c95bd7be2 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -368,9 +368,9 @@ EXPORT_SYMBOL(idt77105_init);
static void __exit idt77105_exit(void)
{
- /* turn off timers */
- del_timer(&stats_timer);
- del_timer(&restart_timer);
+ /* turn off timers */
+ del_timer_sync(&stats_timer);
+ del_timer_sync(&restart_timer);
}
module_exit(idt77105_exit);
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 9587e959ce1a..9988ac98b6d8 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -639,9 +639,9 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
card->hbnr.init = NUM_HB;
card->hbnr.max = MAX_HB;
- card->sm_handle = 0x00000000;
+ card->sm_handle = NULL;
card->sm_addr = 0x00000000;
- card->lg_handle = 0x00000000;
+ card->lg_handle = NULL;
card->lg_addr = 0x00000000;
card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */
@@ -979,7 +979,7 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
addr2 = card->sm_addr;
handle2 = card->sm_handle;
card->sm_addr = 0x00000000;
- card->sm_handle = 0x00000000;
+ card->sm_handle = NULL;
} else { /* (!sm_addr) */
card->sm_addr = addr1;
@@ -993,7 +993,7 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
addr2 = card->lg_addr;
handle2 = card->lg_handle;
card->lg_addr = 0x00000000;
- card->lg_handle = 0x00000000;
+ card->lg_handle = NULL;
} else { /* (!lg_addr) */
card->lg_addr = addr1;
@@ -1739,10 +1739,10 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
}
scq->full = 1;
- spin_unlock_irqrestore(&scq->lock, flags);
- interruptible_sleep_on_timeout(&scq->scqfull_waitq,
- SCQFULL_TIMEOUT);
- spin_lock_irqsave(&scq->lock, flags);
+ wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq,
+ scq->tail != scq->next,
+ scq->lock,
+ SCQFULL_TIMEOUT);
if (scq->full) {
spin_unlock_irqrestore(&scq->lock, flags);
@@ -1789,10 +1789,10 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
scq->full = 1;
if (has_run++)
break;
- spin_unlock_irqrestore(&scq->lock, flags);
- interruptible_sleep_on_timeout(&scq->scqfull_waitq,
- SCQFULL_TIMEOUT);
- spin_lock_irqsave(&scq->lock, flags);
+ wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq,
+ scq->tail != scq->next,
+ scq->lock,
+ SCQFULL_TIMEOUT);
}
if (!scq->full) {
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index e3fb496c7163..943cf0d6abaf 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -760,7 +760,7 @@ static irqreturn_t solos_irq(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-void solos_bh(unsigned long card_arg)
+static void solos_bh(unsigned long card_arg)
{
struct solos_card *card = (void *)card_arg;
uint32_t card_flags;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index ec36e7772e57..8fa8deab6449 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -185,6 +185,9 @@ config GENERIC_CPU_DEVICES
bool
default n
+config GENERIC_CPU_AUTOPROBE
+ bool
+
config SOC_BUS
bool
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index ecc1929d7f6a..b84ca8f13f9e 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -12,7 +12,6 @@
*/
#include <linux/attribute_container.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 59dc8086e4fa..83e910a57563 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1218,7 +1218,7 @@ err_dev:
* with the name of the subsystem. The root device can carry subsystem-
* wide attributes. All registered devices are below this single root
* device and are named after the subsystem with a simple enumeration
- * number appended. The registered devices are not explicitely named;
+ * number appended. The registered devices are not explicitly named;
* only 'id' in the device needs to be set.
*
* Do not use this interface for anything new, it exists for compatibility
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2b567177ef78..0dd65281cc65 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -23,7 +23,6 @@
#include <linux/genhd.h>
#include <linux/kallsyms.h>
#include <linux/mutex.h>
-#include <linux/async.h>
#include <linux/pm_runtime.h>
#include <linux/netdevice.h>
#include <linux/sysfs.h>
@@ -571,6 +570,23 @@ void device_remove_file(struct device *dev,
EXPORT_SYMBOL_GPL(device_remove_file);
/**
+ * device_remove_file_self - remove sysfs attribute file from its own method.
+ * @dev: device.
+ * @attr: device attribute descriptor.
+ *
+ * See kernfs_remove_self() for details.
+ */
+bool device_remove_file_self(struct device *dev,
+ const struct device_attribute *attr)
+{
+ if (dev)
+ return sysfs_remove_file_self(&dev->kobj, &attr->attr);
+ else
+ return false;
+}
+EXPORT_SYMBOL_GPL(device_remove_file_self);
+
+/**
* device_create_bin_file - create sysfs binary attribute file for device.
* @dev: device.
* @attr: device binary attribute descriptor.
@@ -2003,7 +2019,6 @@ void device_shutdown(void)
spin_lock(&devices_kset->list_lock);
}
spin_unlock(&devices_kset->list_lock);
- async_synchronize_full();
}
/*
@@ -2058,7 +2073,6 @@ create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
return pos;
}
-EXPORT_SYMBOL(create_syslog_header);
int dev_vprintk_emit(int level, const struct device *dev,
const char *fmt, va_list args)
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f48370dfc908..006b1bc5297d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -15,6 +15,7 @@
#include <linux/percpu.h>
#include <linux/acpi.h>
#include <linux/of.h>
+#include <linux/cpufeature.h>
#include "base.h"
@@ -286,6 +287,41 @@ static void cpu_device_release(struct device *dev)
*/
}
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static ssize_t print_cpu_modalias(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t n;
+ u32 i;
+
+ n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
+ CPU_FEATURE_TYPEVAL);
+
+ for (i = 0; i < MAX_CPU_FEATURES; i++)
+ if (cpu_have_feature(i)) {
+ if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
+ WARN(1, "CPU features overflow page\n");
+ break;
+ }
+ n += sprintf(&buf[n], ",%04X", i);
+ }
+ buf[n++] = '\n';
+ return n;
+}
+
+static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buf) {
+ print_cpu_modalias(NULL, NULL, buf);
+ add_uevent_var(env, "MODALIAS=%s", buf);
+ kfree(buf);
+ }
+ return 0;
+}
+#endif
+
/*
* register_cpu - Setup a sysfs device for a CPU.
* @cpu - cpu->hotpluggable field set to 1 will generate a control file in
@@ -306,8 +342,8 @@ int register_cpu(struct cpu *cpu, int num)
cpu->dev.offline_disabled = !cpu->hotpluggable;
cpu->dev.offline = !cpu_online(num);
cpu->dev.of_node = of_get_cpu_node(num, NULL);
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
- cpu->dev.bus->uevent = arch_cpu_uevent;
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+ cpu->dev.bus->uevent = cpu_uevent;
#endif
cpu->dev.groups = common_cpu_attr_groups;
if (cpu->hotpluggable)
@@ -330,8 +366,8 @@ struct device *get_cpu_device(unsigned cpu)
}
EXPORT_SYMBOL_GPL(get_cpu_device);
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
-static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
#endif
static struct attribute *cpu_root_attrs[] = {
@@ -344,7 +380,7 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
&dev_attr_modalias.attr,
#endif
NULL
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 545c4de412c3..db4e264eecb6 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -791,6 +791,32 @@ void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
EXPORT_SYMBOL_GPL(devm_kmalloc);
/**
+ * devm_kstrdup - Allocate resource managed space and
+ * copy an existing string into that.
+ * @dev: Device to allocate memory for
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ * allocating memory
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
+{
+ size_t size;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ size = strlen(s) + 1;
+ buf = devm_kmalloc(dev, size, gfp);
+ if (buf)
+ memcpy(buf, s, size);
+ return buf;
+}
+EXPORT_SYMBOL_GPL(devm_kstrdup);
+
+/**
* devm_kfree - Resource-managed kfree
* @dev: Device this memory belongs to
* @p: Memory to free
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 61d6d62cc0d3..ea77701deda4 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -251,9 +251,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
*
- * Returns struct dma_buf_attachment * for this attachment; may return negative
- * error codes.
- *
+ * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
+ * error.
*/
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev)
@@ -319,9 +318,8 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
* @attach: [in] attachment whose scatterlist is to be returned
* @direction: [in] direction of DMA transfer
*
- * Returns sg_table containing the scatterlist to be returned; may return NULL
- * or ERR_PTR.
- *
+ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error.
*/
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
@@ -334,6 +332,8 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return ERR_PTR(-EINVAL);
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ if (!sg_table)
+ sg_table = ERR_PTR(-ENOMEM);
return sg_table;
}
@@ -544,6 +544,8 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
* Please attempt to use kmap/kunmap before thinking about these interfaces.
+ *
+ * Returns NULL on error.
*/
void *dma_buf_vmap(struct dma_buf *dmabuf)
{
@@ -566,7 +568,9 @@ void *dma_buf_vmap(struct dma_buf *dmabuf)
BUG_ON(dmabuf->vmap_ptr);
ptr = dmabuf->ops->vmap(dmabuf);
- if (IS_ERR_OR_NULL(ptr))
+ if (WARN_ON_ONCE(IS_ERR(ptr)))
+ ptr = NULL;
+ if (!ptr)
goto out_unlock;
dmabuf->vmap_ptr = ptr;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index c30df50e4440..d276e33880be 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -649,7 +649,9 @@ static ssize_t firmware_loading_store(struct device *dev,
* see the mapped 'buf->data' once the loading
* is completed.
* */
- fw_map_pages_buf(fw_buf);
+ if (fw_map_pages_buf(fw_buf))
+ dev_err(dev, "%s: map pages failed\n",
+ __func__);
list_del_init(&fw_buf->pending_list);
complete_all(&fw_buf->completion);
break;
@@ -900,7 +902,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
if (timeout != MAX_SCHEDULE_TIMEOUT)
- schedule_delayed_work(&fw_priv->timeout_work, timeout);
+ queue_delayed_work(system_power_efficient_wq,
+ &fw_priv->timeout_work, timeout);
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
@@ -908,6 +911,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
wait_for_completion(&buf->completion);
cancel_delayed_work_sync(&fw_priv->timeout_work);
+ if (!buf->data)
+ retval = -ENOMEM;
device_remove_file(f_dev, &dev_attr_loading);
err_del_bin_attr:
@@ -1570,8 +1575,8 @@ static void device_uncache_fw_images_work(struct work_struct *work)
*/
static void device_uncache_fw_images_delay(unsigned long delay)
{
- schedule_delayed_work(&fw_cache.work,
- msecs_to_jiffies(delay));
+ queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
+ msecs_to_jiffies(delay));
}
static int fw_pm_notify(struct notifier_block *notify_block,
diff --git a/drivers/base/node.c b/drivers/base/node.c
index bc9f43bf7e29..8f7ed9933a7c 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -599,7 +599,11 @@ int register_one_node(int nid)
void unregister_one_node(int nid)
{
+ if (!node_devices[nid])
+ return;
+
unregister_node(node_devices[nid]);
+ kfree(node_devices[nid]);
node_devices[nid] = NULL;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index bc78848dd59a..e714709704e4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -481,11 +481,10 @@ static int platform_drv_probe(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
int ret;
- if (ACPI_HANDLE(_dev))
- acpi_dev_pm_attach(_dev, true);
+ acpi_dev_pm_attach(_dev, true);
ret = drv->probe(dev);
- if (ret && ACPI_HANDLE(_dev))
+ if (ret)
acpi_dev_pm_detach(_dev, true);
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
@@ -508,8 +507,7 @@ static int platform_drv_remove(struct device *_dev)
int ret;
ret = drv->remove(dev);
- if (ACPI_HANDLE(_dev))
- acpi_dev_pm_detach(_dev, true);
+ acpi_dev_pm_detach(_dev, true);
return ret;
}
@@ -520,8 +518,7 @@ static void platform_drv_shutdown(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
drv->shutdown(dev);
- if (ACPI_HANDLE(_dev))
- acpi_dev_pm_detach(_dev, true);
+ acpi_dev_pm_detach(_dev, true);
}
/**
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 2e58ebb1f6c0..1cb8544598d5 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,6 +1,5 @@
-obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o
+obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
-obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index e870bbe9ec4e..b99e6c06ee67 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/io.h>
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 5da914041305..df2e5eeaeb05 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/export.h>
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index bfb8955c406c..ae098a261fcd 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
@@ -42,7 +41,7 @@
struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
if (!__retval && __elapsed > __td->field) { \
__td->field = __elapsed; \
- dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
+ dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \
__elapsed); \
genpd->max_off_time_changed = true; \
__td->constraint_changed = true; \
@@ -706,6 +705,14 @@ static int pm_genpd_runtime_resume(struct device *dev)
return 0;
}
+static bool pd_ignore_unused;
+static int __init pd_ignore_unused_setup(char *__unused)
+{
+ pd_ignore_unused = true;
+ return 1;
+}
+__setup("pd_ignore_unused", pd_ignore_unused_setup);
+
/**
* pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
*/
@@ -713,6 +720,11 @@ void pm_genpd_poweroff_unused(void)
{
struct generic_pm_domain *genpd;
+ if (pd_ignore_unused) {
+ pr_warn("genpd: Not disabling unused power domains\n");
+ return;
+ }
+
mutex_lock(&gpd_list_lock);
list_for_each_entry(genpd, &gpd_list, gpd_list_node)
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 28dee3053f1f..a089e3bcdfbc 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index a2e55bfdf572..96a92db83cad 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -285,7 +285,7 @@ int pm_generic_restore(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_restore);
/**
- * pm_generic_complete - Generic routine competing a device power transition.
+ * pm_generic_complete - Generic routine completing a device power transition.
* @dev: Device to handle.
*
* Complete a device power transition during a system-wide power transition.
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 1b41fca3d65a..86d5e4fb5b98 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -29,6 +29,7 @@
#include <linux/async.h>
#include <linux/suspend.h>
#include <trace/events/power.h>
+#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
#include <linux/timer.h>
@@ -91,6 +92,8 @@ void device_pm_sleep_init(struct device *dev)
{
dev->power.is_prepared = false;
dev->power.is_suspended = false;
+ dev->power.is_noirq_suspended = false;
+ dev->power.is_late_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
@@ -467,7 +470,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_resume_noirq(struct device *dev, pm_message_t state)
+static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
@@ -479,6 +482,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
if (dev->power.syscore)
goto Out;
+ if (!dev->power.is_noirq_suspended)
+ goto Out;
+
+ dpm_wait(dev->parent, async);
+
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -499,12 +507,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
}
error = dpm_run_callback(callback, dev, state, info);
+ dev->power.is_noirq_suspended = false;
Out:
+ complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
}
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_noirq(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -514,29 +542,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
*/
static void dpm_resume_noirq(pm_message_t state)
{
+ struct device *dev;
ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_noirq_list)) {
- struct device *dev = to_device(dpm_noirq_list.next);
- int error;
+ pm_transition = state;
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+ reinit_completion(&dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume_noirq, dev);
+ }
+ }
+
+ while (!list_empty(&dpm_noirq_list)) {
+ dev = to_device(dpm_noirq_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
mutex_unlock(&dpm_list_mtx);
- error = device_resume_noirq(dev, state);
- if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " noirq", error);
+ if (!is_async(dev)) {
+ int error;
+
+ error = device_resume_noirq(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_noirq++;
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " noirq", error);
+ }
}
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
cpuidle_resume();
@@ -549,7 +596,7 @@ static void dpm_resume_noirq(pm_message_t state)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_resume_early(struct device *dev, pm_message_t state)
+static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
@@ -561,6 +608,11 @@ static int device_resume_early(struct device *dev, pm_message_t state)
if (dev->power.syscore)
goto Out;
+ if (!dev->power.is_late_suspended)
+ goto Out;
+
+ dpm_wait(dev->parent, async);
+
if (dev->pm_domain) {
info = "early power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -581,43 +633,75 @@ static int device_resume_early(struct device *dev, pm_message_t state)
}
error = dpm_run_callback(callback, dev, state, info);
+ dev->power.is_late_suspended = false;
Out:
TRACE_RESUME(error);
pm_runtime_enable(dev);
+ complete_all(&dev->power.completion);
return error;
}
+static void async_resume_early(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_early(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
static void dpm_resume_early(pm_message_t state)
{
+ struct device *dev;
ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.next);
- int error;
+ pm_transition = state;
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+ reinit_completion(&dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume_early, dev);
+ }
+ }
+
+ while (!list_empty(&dpm_late_early_list)) {
+ dev = to_device(dpm_late_early_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
mutex_unlock(&dpm_list_mtx);
- error = device_resume_early(dev, state);
- if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " early", error);
- }
+ if (!is_async(dev)) {
+ int error;
+ error = device_resume_early(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " early", error);
+ }
+ }
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, "early");
}
@@ -732,12 +816,6 @@ static void async_resume(void *data, async_cookie_t cookie)
put_device(dev);
}
-static bool is_async(struct device *dev)
-{
- return dev->power.async_suspend && pm_async_enabled
- && !pm_trace_is_enabled();
-}
-
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -789,6 +867,8 @@ void dpm_resume(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, NULL);
+
+ cpufreq_resume();
}
/**
@@ -913,13 +993,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_suspend_noirq(struct device *dev, pm_message_t state)
+static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
+ int error = 0;
+
+ if (async_error)
+ goto Complete;
+
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
if (dev->power.syscore)
- return 0;
+ goto Complete;
+
+ dpm_wait_for_children(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -940,7 +1031,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
callback = pm_noirq_op(dev->driver->pm, state);
}
- return dpm_run_callback(callback, dev, state, info);
+ error = dpm_run_callback(callback, dev, state, info);
+ if (!error)
+ dev->power.is_noirq_suspended = true;
+ else
+ async_error = error;
+
+Complete:
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend_noirq(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+
+ put_device(dev);
+}
+
+static int device_suspend_noirq(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend_noirq, dev);
+ return 0;
+ }
+ return __device_suspend_noirq(dev, pm_transition, false);
}
/**
@@ -958,19 +1083,20 @@ static int dpm_suspend_noirq(pm_message_t state)
cpuidle_pause();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_noirq(dev, state);
+ error = device_suspend_noirq(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " noirq", error);
- suspend_stats.failed_suspend_noirq++;
- dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
@@ -979,16 +1105,21 @@ static int dpm_suspend_noirq(pm_message_t state)
list_move(&dev->power.entry, &dpm_noirq_list);
put_device(dev);
- if (pm_wakeup_pending()) {
- error = -EBUSY;
+ if (async_error)
break;
- }
}
mutex_unlock(&dpm_list_mtx);
- if (error)
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
+
+ if (error) {
+ suspend_stats.failed_suspend_noirq++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_resume_noirq(resume_event(state));
- else
+ } else {
dpm_show_time(starttime, state, "noirq");
+ }
return error;
}
@@ -999,15 +1130,26 @@ static int dpm_suspend_noirq(pm_message_t state)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_suspend_late(struct device *dev, pm_message_t state)
+static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
+ int error = 0;
__pm_runtime_disable(dev, false);
+ if (async_error)
+ goto Complete;
+
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
if (dev->power.syscore)
- return 0;
+ goto Complete;
+
+ dpm_wait_for_children(dev, async);
if (dev->pm_domain) {
info = "late power domain ";
@@ -1028,7 +1170,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
callback = pm_late_early_op(dev->driver->pm, state);
}
- return dpm_run_callback(callback, dev, state, info);
+ error = dpm_run_callback(callback, dev, state, info);
+ if (!error)
+ dev->power.is_late_suspended = true;
+ else
+ async_error = error;
+
+Complete:
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_suspend_late(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend_late(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+ put_device(dev);
+}
+
+static int device_suspend_late(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend_late, dev);
+ return 0;
+ }
+
+ return __device_suspend_late(dev, pm_transition, false);
}
/**
@@ -1041,19 +1217,20 @@ static int dpm_suspend_late(pm_message_t state)
int error = 0;
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
while (!list_empty(&dpm_suspended_list)) {
struct device *dev = to_device(dpm_suspended_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_late(dev, state);
+ error = device_suspend_late(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " late", error);
- suspend_stats.failed_suspend_late++;
- dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
@@ -1062,17 +1239,18 @@ static int dpm_suspend_late(pm_message_t state)
list_move(&dev->power.entry, &dpm_late_early_list);
put_device(dev);
- if (pm_wakeup_pending()) {
- error = -EBUSY;
+ if (async_error)
break;
- }
}
mutex_unlock(&dpm_list_mtx);
- if (error)
+ async_synchronize_full();
+ if (error) {
+ suspend_stats.failed_suspend_late++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
- else
+ } else {
dpm_show_time(starttime, state, "late");
-
+ }
return error;
}
@@ -1259,6 +1437,8 @@ int dpm_suspend(pm_message_t state)
might_sleep();
+ cpufreq_suspend();
+
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index fa4187418440..25538675d59e 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index cfc3226ec492..a21223d95926 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev);
extern void rpm_sysfs_remove(struct device *dev);
extern int wakeup_sysfs_add(struct device *dev);
extern void wakeup_sysfs_remove(struct device *dev);
-extern int pm_qos_sysfs_add_latency(struct device *dev);
-extern void pm_qos_sysfs_remove_latency(struct device *dev);
+extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
+extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
extern int pm_qos_sysfs_add_flags(struct device *dev);
extern void pm_qos_sysfs_remove_flags(struct device *dev);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 5c1361a9e5dd..36b9eb4862cb 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
s32 __dev_pm_qos_read_value(struct device *dev)
{
return IS_ERR_OR_NULL(dev->power.qos) ?
- 0 : pm_qos_read_value(&dev->power.qos->latency);
+ 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
}
/**
@@ -141,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req,
int ret;
switch(req->type) {
- case DEV_PM_QOS_LATENCY:
- ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
- action, value);
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = pm_qos_update_target(&qos->resume_latency,
+ &req->data.pnode, action, value);
if (ret) {
- value = pm_qos_read_value(&qos->latency);
+ value = pm_qos_read_value(&qos->resume_latency);
blocking_notifier_call_chain(&dev_pm_notifiers,
(unsigned long)value,
req);
}
break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ ret = pm_qos_update_target(&qos->latency_tolerance,
+ &req->data.pnode, action, value);
+ if (ret) {
+ value = pm_qos_read_value(&qos->latency_tolerance);
+ req->dev->power.set_latency_tolerance(req->dev, value);
+ }
+ break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
@@ -186,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
}
BLOCKING_INIT_NOTIFIER_HEAD(n);
- c = &qos->latency;
+ c = &qos->resume_latency;
plist_head_init(&c->list);
- c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
- c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+ c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->type = PM_QOS_MIN;
c->notifiers = n;
+ c = &qos->latency_tolerance;
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+ c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ c->type = PM_QOS_MIN;
+
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
@@ -224,7 +240,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
- pm_qos_sysfs_remove_latency(dev);
+ pm_qos_sysfs_remove_resume_latency(dev);
pm_qos_sysfs_remove_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
@@ -237,7 +253,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
goto out;
/* Flush the constraints lists for the device. */
- c = &qos->latency;
+ c = &qos->resume_latency;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
/*
* Update constraints list and call the notification
@@ -246,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+ c = &qos->latency_tolerance;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -265,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
+static bool dev_pm_qos_invalid_request(struct device *dev,
+ struct dev_pm_qos_request *req)
+{
+ return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
+ && !dev->power.set_latency_tolerance);
+}
+
+static int __dev_pm_qos_add_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
+{
+ int ret = 0;
+
+ if (!dev || dev_pm_qos_invalid_request(dev, req))
+ return -EINVAL;
+
+ if (WARN(dev_pm_qos_request_active(req),
+ "%s() called for already added request\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
+
+ trace_dev_pm_qos_add_request(dev_name(dev), type, value);
+ if (!ret) {
+ req->dev = dev;
+ req->type = type;
+ ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+ }
+ return ret;
+}
+
/**
* dev_pm_qos_add_request - inserts new qos request into the list
* @dev: target device for the constraint
@@ -290,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value)
{
- int ret = 0;
-
- if (!dev || !req) /*guard against callers passing in null */
- return -EINVAL;
-
- if (WARN(dev_pm_qos_request_active(req),
- "%s() called for already added request\n", __func__))
- return -EINVAL;
+ int ret;
mutex_lock(&dev_pm_qos_mtx);
-
- if (IS_ERR(dev->power.qos))
- ret = -ENODEV;
- else if (!dev->power.qos)
- ret = dev_pm_qos_constraints_allocate(dev);
-
- trace_dev_pm_qos_add_request(dev_name(dev), type, value);
- if (!ret) {
- req->dev = dev;
- req->type = type;
- ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
- }
-
+ ret = __dev_pm_qos_add_request(dev, req, type, value);
mutex_unlock(&dev_pm_qos_mtx);
-
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
@@ -341,7 +376,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
return -ENODEV;
switch(req->type) {
- case DEV_PM_QOS_LATENCY:
+ case DEV_PM_QOS_RESUME_LATENCY:
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
curr_value = req->data.pnode.prio;
break;
case DEV_PM_QOS_FLAGS:
@@ -460,8 +496,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
ret = dev_pm_qos_constraints_allocate(dev);
if (!ret)
- ret = blocking_notifier_chain_register(
- dev->power.qos->latency.notifiers, notifier);
+ ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
+ notifier);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
@@ -487,9 +523,8 @@ int dev_pm_qos_remove_notifier(struct device *dev,
/* Silently return if the constraints object is not present. */
if (!IS_ERR_OR_NULL(dev->power.qos))
- retval = blocking_notifier_chain_unregister(
- dev->power.qos->latency.notifiers,
- notifier);
+ retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
+ notifier);
mutex_unlock(&dev_pm_qos_mtx);
return retval;
@@ -530,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
* dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
* @dev: Device whose ancestor to add the request for.
* @req: Pointer to the preallocated handle.
+ * @type: Type of the request.
* @value: Constraint latency value.
*/
int dev_pm_qos_add_ancestor_request(struct device *dev,
- struct dev_pm_qos_request *req, s32 value)
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
{
struct device *ancestor = dev->parent;
int ret = -ENODEV;
- while (ancestor && !ancestor->power.ignore_children)
- ancestor = ancestor->parent;
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ while (ancestor && !ancestor->power.ignore_children)
+ ancestor = ancestor->parent;
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ while (ancestor && !ancestor->power.set_latency_tolerance)
+ ancestor = ancestor->parent;
+
+ break;
+ default:
+ ancestor = NULL;
+ }
if (ancestor)
- ret = dev_pm_qos_add_request(ancestor, req,
- DEV_PM_QOS_LATENCY, value);
+ ret = dev_pm_qos_add_request(ancestor, req, type, value);
if (ret < 0)
req->dev = NULL;
@@ -559,9 +606,13 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
struct dev_pm_qos_request *req = NULL;
switch(type) {
- case DEV_PM_QOS_LATENCY:
- req = dev->power.qos->latency_req;
- dev->power.qos->latency_req = NULL;
+ case DEV_PM_QOS_RESUME_LATENCY:
+ req = dev->power.qos->resume_latency_req;
+ dev->power.qos->resume_latency_req = NULL;
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ req = dev->power.qos->latency_tolerance_req;
+ dev->power.qos->latency_tolerance_req = NULL;
break;
case DEV_PM_QOS_FLAGS:
req = dev->power.qos->flags_req;
@@ -597,7 +648,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (!req)
return -ENOMEM;
- ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
if (ret < 0) {
kfree(req);
return ret;
@@ -609,7 +660,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (IS_ERR_OR_NULL(dev->power.qos))
ret = -ENODEV;
- else if (dev->power.qos->latency_req)
+ else if (dev->power.qos->resume_latency_req)
ret = -EEXIST;
if (ret < 0) {
@@ -618,13 +669,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
- dev->power.qos->latency_req = req;
+ dev->power.qos->resume_latency_req = req;
mutex_unlock(&dev_pm_qos_mtx);
- ret = pm_qos_sysfs_add_latency(dev);
+ ret = pm_qos_sysfs_add_resume_latency(dev);
if (ret)
- dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
out:
mutex_unlock(&dev_pm_qos_sysfs_mtx);
@@ -634,8 +685,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
}
/**
@@ -646,7 +697,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev)
{
mutex_lock(&dev_pm_qos_sysfs_mtx);
- pm_qos_sysfs_remove_latency(dev);
+ pm_qos_sysfs_remove_resume_latency(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
@@ -768,6 +819,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
pm_runtime_put(dev);
return ret;
}
+
+/**
+ * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
+ * @dev: Device to obtain the user space latency tolerance for.
+ */
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
+{
+ s32 ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req ?
+ PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
+ dev->power.qos->latency_tolerance_req->data.pnode.prio;
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+
+/**
+ * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
+ * @dev: Device to update the user space latency tolerance for.
+ * @val: New user space latency tolerance for @dev (negative values disable).
+ */
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+{
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req) {
+ struct dev_pm_qos_request *req;
+
+ if (val < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
+ if (ret < 0) {
+ kfree(req);
+ goto out;
+ }
+ dev->power.qos->latency_tolerance_req = req;
+ } else {
+ if (val < 0) {
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
+ ret = 0;
+ } else {
+ ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
+ }
+ }
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
#else /* !CONFIG_PM_RUNTIME */
static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
static void __dev_pm_qos_hide_flags(struct device *dev) {}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 72e00e66ecc5..67c7938e430b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -13,6 +13,43 @@
#include <trace/events/rpm.h>
#include "power.h"
+#define RPM_GET_CALLBACK(dev, cb) \
+({ \
+ int (*__rpm_cb)(struct device *__d); \
+ \
+ if (dev->pm_domain) \
+ __rpm_cb = dev->pm_domain->ops.cb; \
+ else if (dev->type && dev->type->pm) \
+ __rpm_cb = dev->type->pm->cb; \
+ else if (dev->class && dev->class->pm) \
+ __rpm_cb = dev->class->pm->cb; \
+ else if (dev->bus && dev->bus->pm) \
+ __rpm_cb = dev->bus->pm->cb; \
+ else \
+ __rpm_cb = NULL; \
+ \
+ if (!__rpm_cb && dev->driver && dev->driver->pm) \
+ __rpm_cb = dev->driver->pm->cb; \
+ \
+ __rpm_cb; \
+})
+
+static int (*rpm_get_suspend_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_suspend);
+}
+
+static int (*rpm_get_resume_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_resume);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int (*rpm_get_idle_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_idle);
+}
+
static int rpm_resume(struct device *dev, int rpmflags);
static int rpm_suspend(struct device *dev, int rpmflags);
@@ -310,19 +347,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_idle;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_idle;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_idle;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_idle;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_idle;
+ callback = rpm_get_idle_cb(dev);
if (callback)
retval = __rpm_callback(callback, dev);
@@ -492,19 +517,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_suspend;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_suspend;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_suspend;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_suspend;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_suspend;
+ callback = rpm_get_suspend_cb(dev);
retval = rpm_callback(callback, dev);
if (retval)
@@ -724,19 +737,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_RESUMING);
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_resume;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_resume;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_resume;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_resume;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_resume;
+ callback = rpm_get_resume_cb(dev);
retval = rpm_callback(callback, dev);
if (retval) {
@@ -1130,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier);
* @dev: Device to handle.
* @check_resume: If set, check if there's a resume request for the device.
*
- * Increment power.disable_depth for the device and if was zero previously,
+ * Increment power.disable_depth for the device and if it was zero previously,
* cancel all pending runtime PM requests for the device and wait for all
* operations in progress to complete. The device can be either active or
* suspended after its runtime PM has been disabled.
@@ -1401,3 +1402,86 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.irq_safe && dev->parent)
pm_runtime_put(dev->parent);
}
+#endif
+
+/**
+ * pm_runtime_force_suspend - Force a device into suspend state if needed.
+ * @dev: Device to suspend.
+ *
+ * Disable runtime PM so we safely can check the device's runtime PM status and
+ * if it is active, invoke it's .runtime_suspend callback to bring it into
+ * suspend state. Keep runtime PM disabled to preserve the state unless we
+ * encounter errors.
+ *
+ * Typically this function may be invoked from a system suspend callback to make
+ * sure the device is put into low power state.
+ */
+int pm_runtime_force_suspend(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ pm_runtime_disable(dev);
+
+ /*
+ * Note that pm_runtime_status_suspended() returns false while
+ * !CONFIG_PM_RUNTIME, which means the device will be put into low
+ * power state.
+ */
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ callback = rpm_get_suspend_cb(dev);
+
+ if (!callback) {
+ ret = -ENOSYS;
+ goto err;
+ }
+
+ ret = callback(dev);
+ if (ret)
+ goto err;
+
+ pm_runtime_set_suspended(dev);
+ return 0;
+err:
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+
+/**
+ * pm_runtime_force_resume - Force a device into resume state.
+ * @dev: Device to resume.
+ *
+ * Prior invoking this function we expect the user to have brought the device
+ * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
+ * those actions and brings the device into full power. We update the runtime PM
+ * status and re-enables runtime PM.
+ *
+ * Typically this function may be invoked from a system resume callback to make
+ * sure the device is put into full power state.
+ */
+int pm_runtime_force_resume(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ callback = rpm_get_resume_cb(dev);
+
+ if (!callback) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ ret = callback(dev);
+ if (ret)
+ goto out;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_mark_last_busy(dev);
+out:
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 03e089ade5ce..95b181d1ca6d 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
autosuspend_delay_ms_store);
-static ssize_t pm_qos_latency_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pm_qos_resume_latency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev));
+ return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
}
-static ssize_t pm_qos_latency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_resume_latency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -237,12 +238,47 @@ static ssize_t pm_qos_latency_store(struct device *dev,
if (value < 0)
return -EINVAL;
- ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value);
+ ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
+ value);
return ret < 0 ? ret : n;
}
static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
- pm_qos_latency_show, pm_qos_latency_store);
+ pm_qos_resume_latency_show, pm_qos_resume_latency_store);
+
+static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
+
+ if (value < 0)
+ return sprintf(buf, "auto\n");
+ else if (value == PM_QOS_LATENCY_ANY)
+ return sprintf(buf, "any\n");
+
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ s32 value;
+ int ret;
+
+ if (kstrtos32(buf, 0, &value)) {
+ if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
+ value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+ value = PM_QOS_LATENCY_ANY;
+ }
+ ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
+ pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
@@ -618,15 +654,26 @@ static struct attribute_group pm_runtime_attr_group = {
.attrs = runtime_attrs,
};
-static struct attribute *pm_qos_latency_attrs[] = {
+static struct attribute *pm_qos_resume_latency_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
&dev_attr_pm_qos_resume_latency_us.attr,
#endif /* CONFIG_PM_RUNTIME */
NULL,
};
-static struct attribute_group pm_qos_latency_attr_group = {
+static struct attribute_group pm_qos_resume_latency_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_resume_latency_attrs,
+};
+
+static struct attribute *pm_qos_latency_tolerance_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_pm_qos_latency_tolerance_us.attr,
+#endif /* CONFIG_PM_RUNTIME */
+ NULL,
+};
+static struct attribute_group pm_qos_latency_tolerance_attr_group = {
.name = power_group_name,
- .attrs = pm_qos_latency_attrs,
+ .attrs = pm_qos_latency_tolerance_attrs,
};
static struct attribute *pm_qos_flags_attrs[] = {
@@ -654,18 +701,23 @@ int dpm_sysfs_add(struct device *dev)
if (rc)
goto err_out;
}
-
if (device_can_wakeup(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
- if (rc) {
- if (pm_runtime_callbacks_present(dev))
- sysfs_unmerge_group(&dev->kobj,
- &pm_runtime_attr_group);
- goto err_out;
- }
+ if (rc)
+ goto err_runtime;
+ }
+ if (dev->power.set_latency_tolerance) {
+ rc = sysfs_merge_group(&dev->kobj,
+ &pm_qos_latency_tolerance_attr_group);
+ if (rc)
+ goto err_wakeup;
}
return 0;
+ err_wakeup:
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ err_runtime:
+ sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
err_out:
sysfs_remove_group(&dev->kobj, &pm_attr_group);
return rc;
@@ -681,14 +733,14 @@ void wakeup_sysfs_remove(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
-int pm_qos_sysfs_add_latency(struct device *dev)
+int pm_qos_sysfs_add_resume_latency(struct device *dev)
{
- return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group);
+ return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
}
-void pm_qos_sysfs_remove_latency(struct device *dev)
+void pm_qos_sysfs_remove_resume_latency(struct device *dev)
{
- sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
}
int pm_qos_sysfs_add_flags(struct device *dev)
@@ -708,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev)
{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 33414b1de201..7d1326985bee 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -134,6 +134,8 @@ struct regmap {
/* if set, converts bulk rw to single rw */
bool use_single_rw;
+ /* if set, the device supports multi write mode */
+ bool can_multi_write;
struct rb_root range_tree;
void *selector_work_buf; /* Scratch buffer used for selector */
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d4dd77134814..29b4128da0b0 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -249,11 +249,12 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
{
unsigned int reg;
- for (reg = min; reg <= max; reg++) {
+ for (reg = min; reg <= max; reg += map->reg_stride) {
unsigned int val;
int ret;
- if (regmap_volatile(map, reg))
+ if (regmap_volatile(map, reg) ||
+ !regmap_writeable(map, reg))
continue;
ret = regcache_read(map, reg, &val);
@@ -312,10 +313,6 @@ int regcache_sync(struct regmap *map)
/* Apply any patch first */
map->cache_bypass = 1;
for (i = 0; i < map->patch_regs; i++) {
- if (map->patch[i].reg % map->reg_stride) {
- ret = -EINVAL;
- goto out;
- }
ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
if (ret != 0) {
dev_err(map->dev, "Failed to write %x = %x: %d\n",
@@ -636,10 +633,10 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
if (*data == NULL)
return 0;
- count = cur - base;
+ count = (cur - base) / map->reg_stride;
dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
- count * val_bytes, count, base, cur - 1);
+ count * val_bytes, count, base, cur - map->reg_stride);
map->cache_bypass = 1;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index c5471cd6ebb7..45d812c0ea77 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -511,7 +511,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
debugfs_create_file("range", 0400, map->debugfs,
map, &regmap_reg_ranges_fops);
- if (map->max_register) {
+ if (map->max_register || regmap_readable(map, 0)) {
debugfs_create_file("registers", 0400, map->debugfs,
map, &regmap_map_fops);
debugfs_create_file("access", 0400, map->debugfs,
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index fa6bf5279d28..ebd189529760 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -13,7 +13,6 @@
#include <linux/regmap.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/init.h>
static int regmap_i2c_write(void *context, const void *data, size_t count)
{
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 82692068d3cb..edf88f20cbce 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -368,8 +368,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (!d)
return -ENOMEM;
- *data = d;
-
d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
GFP_KERNEL);
if (!d->status_buf)
@@ -506,6 +504,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
goto err_domain;
}
+ *data = d;
+
return 0;
err_domain:
@@ -533,7 +533,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
return;
free_irq(irq, d);
- /* We should unmap the domain but... */
+ irq_domain_remove(d->domain);
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 81f977510775..1e03e7f8bacb 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -18,7 +18,6 @@
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -26,10 +25,47 @@
struct regmap_mmio_context {
void __iomem *regs;
+ unsigned reg_bytes;
unsigned val_bytes;
+ unsigned pad_bytes;
struct clk *clk;
};
+static inline void regmap_mmio_regsize_check(size_t reg_size)
+{
+ switch (reg_size) {
+ case 1:
+ case 2:
+ case 4:
+#ifdef CONFIG_64BIT
+ case 8:
+#endif
+ break;
+ default:
+ BUG();
+ }
+}
+
+static int regmap_mmio_regbits_check(size_t reg_bits)
+{
+ switch (reg_bits) {
+ case 8:
+ case 16:
+ case 32:
+#ifdef CONFIG_64BIT
+ case 64:
+#endif
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline void regmap_mmio_count_check(size_t count)
+{
+ BUG_ON(count % 2 != 0);
+}
+
static int regmap_mmio_gather_write(void *context,
const void *reg, size_t reg_size,
const void *val, size_t val_size)
@@ -38,7 +74,7 @@ static int regmap_mmio_gather_write(void *context,
u32 offset;
int ret;
- BUG_ON(reg_size != 4);
+ regmap_mmio_regsize_check(reg_size);
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
@@ -81,9 +117,13 @@ static int regmap_mmio_gather_write(void *context,
static int regmap_mmio_write(void *context, const void *data, size_t count)
{
- BUG_ON(count < 4);
+ struct regmap_mmio_context *ctx = context;
+ u32 offset = ctx->reg_bytes + ctx->pad_bytes;
+
+ regmap_mmio_count_check(count);
- return regmap_mmio_gather_write(context, data, 4, data + 4, count - 4);
+ return regmap_mmio_gather_write(context, data, ctx->reg_bytes,
+ data + offset, count - offset);
}
static int regmap_mmio_read(void *context,
@@ -94,7 +134,7 @@ static int regmap_mmio_read(void *context,
u32 offset;
int ret;
- BUG_ON(reg_size != 4);
+ regmap_mmio_regsize_check(reg_size);
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
@@ -165,8 +205,9 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
int min_stride;
int ret;
- if (config->reg_bits != 32)
- return ERR_PTR(-EINVAL);
+ ret = regmap_mmio_regbits_check(config->reg_bits);
+ if (ret)
+ return ERR_PTR(ret);
if (config->pad_bits)
return ERR_PTR(-EINVAL);
@@ -209,6 +250,8 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
+ ctx->reg_bytes = config->reg_bits / 8;
+ ctx->pad_bytes = config->pad_bits / 8;
ctx->clk = ERR_PTR(-ENODEV);
if (clk_id == NULL)
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 37f12ae7aada..0eb3097c0d76 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -12,7 +12,6 @@
#include <linux/regmap.h>
#include <linux/spi/spi.h>
-#include <linux/init.h>
#include <linux/module.h>
#include "internal.h"
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index ac2391013db1..d7026dc33388 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -22,69 +22,235 @@
#include <linux/module.h>
#include <linux/init.h>
-static int regmap_spmi_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int regmap_spmi_base_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
{
+ u8 addr = *(u8 *)reg;
+ int err = 0;
+
+ BUG_ON(reg_size != 1);
+
+ while (val_size-- && !err)
+ err = spmi_register_read(context, addr++, val++);
+
+ return err;
+}
+
+static int regmap_spmi_base_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ const u8 *data = val;
+ u8 addr = *(u8 *)reg;
+ int err = 0;
+
+ BUG_ON(reg_size != 1);
+
+ /*
+ * SPMI defines a more bandwidth-efficient 'Register 0 Write' sequence,
+ * use it when possible.
+ */
+ if (addr == 0 && val_size) {
+ err = spmi_register_zero_write(context, *data);
+ if (err)
+ goto err_out;
+
+ data++;
+ addr++;
+ val_size--;
+ }
+
+ while (val_size) {
+ err = spmi_register_write(context, addr, *data);
+ if (err)
+ goto err_out;
+
+ data++;
+ addr++;
+ val_size--;
+ }
+
+err_out:
+ return err;
+}
+
+static int regmap_spmi_base_write(void *context, const void *data,
+ size_t count)
+{
+ BUG_ON(count < 1);
+ return regmap_spmi_base_gather_write(context, data, 1, data + 1,
+ count - 1);
+}
+
+static struct regmap_bus regmap_spmi_base = {
+ .read = regmap_spmi_base_read,
+ .write = regmap_spmi_base_write,
+ .gather_write = regmap_spmi_base_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+/**
+ * regmap_init_spmi_base(): Create regmap for the Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_spmi_base);
+
+/**
+ * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_base);
+
+static int regmap_spmi_ext_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ int err = 0;
+ size_t len;
+ u16 addr;
+
BUG_ON(reg_size != 2);
- return spmi_ext_register_readl(context, *(u16 *)reg,
- val, val_size);
+
+ addr = *(u16 *)reg;
+
+ /*
+ * Split accesses into two to take advantage of the more
+ * bandwidth-efficient 'Extended Register Read' command when possible
+ */
+ while (addr <= 0xFF && val_size) {
+ len = min_t(size_t, val_size, 16);
+
+ err = spmi_ext_register_read(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+ while (val_size) {
+ len = min_t(size_t, val_size, 8);
+
+ err = spmi_ext_register_readl(context, addr, val, val_size);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+err_out:
+ return err;
}
-static int regmap_spmi_gather_write(void *context,
- const void *reg, size_t reg_size,
- const void *val, size_t val_size)
+static int regmap_spmi_ext_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
{
+ int err = 0;
+ size_t len;
+ u16 addr;
+
BUG_ON(reg_size != 2);
- return spmi_ext_register_writel(context, *(u16 *)reg, val, val_size);
+
+ addr = *(u16 *)reg;
+
+ while (addr <= 0xFF && val_size) {
+ len = min_t(size_t, val_size, 16);
+
+ err = spmi_ext_register_write(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+ while (val_size) {
+ len = min_t(size_t, val_size, 8);
+
+ err = spmi_ext_register_writel(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+err_out:
+ return err;
}
-static int regmap_spmi_write(void *context, const void *data,
- size_t count)
+static int regmap_spmi_ext_write(void *context, const void *data,
+ size_t count)
{
BUG_ON(count < 2);
- return regmap_spmi_gather_write(context, data, 2, data + 2, count - 2);
+ return regmap_spmi_ext_gather_write(context, data, 2, data + 2,
+ count - 2);
}
-static struct regmap_bus regmap_spmi = {
- .read = regmap_spmi_read,
- .write = regmap_spmi_write,
- .gather_write = regmap_spmi_gather_write,
+static struct regmap_bus regmap_spmi_ext = {
+ .read = regmap_spmi_ext_read,
+ .write = regmap_spmi_ext_write,
+ .gather_write = regmap_spmi_ext_gather_write,
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
/**
- * regmap_init_spmi(): Initialize register map
- *
- * @sdev: Device that will be interacted with
- * @config: Configuration for register map
+ * regmap_init_spmi_ext(): Create regmap for Ext register space
+ * @sdev: Device that will be interacted with
+ * @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
* a struct regmap.
*/
-struct regmap *regmap_init_spmi(struct spmi_device *sdev,
- const struct regmap_config *config)
+struct regmap *regmap_init_spmi_ext(struct spmi_device *sdev,
+ const struct regmap_config *config)
{
- return regmap_init(&sdev->dev, &regmap_spmi, sdev, config);
+ return regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
}
-EXPORT_SYMBOL_GPL(regmap_init_spmi);
+EXPORT_SYMBOL_GPL(regmap_init_spmi_ext);
/**
- * devm_regmap_init_spmi(): Initialise managed register map
- *
- * @sdev: Device that will be interacted with
- * @config: Configuration for register map
+ * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer
* to a struct regmap. The regmap will be automatically freed by the
* device management code.
*/
-struct regmap *devm_regmap_init_spmi(struct spmi_device *sdev,
+struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *sdev,
const struct regmap_config *config)
{
- return devm_regmap_init(&sdev->dev, &regmap_spmi, sdev, config);
+ return devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_spmi);
+EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_ext);
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 6a19515f8a45..63e30ef096e2 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -380,6 +380,28 @@ static void regmap_range_exit(struct regmap *map)
kfree(map->selector_work_buf);
}
+int regmap_attach_dev(struct device *dev, struct regmap *map,
+ const struct regmap_config *config)
+{
+ struct regmap **m;
+
+ map->dev = dev;
+
+ regmap_debugfs_init(map, config->name);
+
+ /* Add a devres resource for dev_get_regmap() */
+ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
+ if (!m) {
+ regmap_debugfs_exit(map);
+ return -ENOMEM;
+ }
+ *m = map;
+ devres_add(dev, m);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_attach_dev);
+
/**
* regmap_init(): Initialise register map
*
@@ -397,7 +419,7 @@ struct regmap *regmap_init(struct device *dev,
void *bus_context,
const struct regmap_config *config)
{
- struct regmap *map, **m;
+ struct regmap *map;
int ret = -EINVAL;
enum regmap_endian reg_endian, val_endian;
int i, j;
@@ -439,6 +461,7 @@ struct regmap *regmap_init(struct device *dev,
else
map->reg_stride = 1;
map->use_single_rw = config->use_single_rw;
+ map->can_multi_write = config->can_multi_write;
map->dev = dev;
map->bus = bus;
map->bus_context = bus_context;
@@ -718,7 +741,7 @@ skip_format_initialization:
new->window_start = range_cfg->window_start;
new->window_len = range_cfg->window_len;
- if (_regmap_range_add(map, new) == false) {
+ if (!_regmap_range_add(map, new)) {
dev_err(map->dev, "Failed to add range %d\n", i);
kfree(new);
goto err_range;
@@ -734,25 +757,19 @@ skip_format_initialization:
}
}
- regmap_debugfs_init(map, config->name);
-
ret = regcache_init(map, config);
if (ret != 0)
goto err_range;
- /* Add a devres resource for dev_get_regmap() */
- m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
- if (!m) {
- ret = -ENOMEM;
- goto err_debugfs;
+ if (dev) {
+ ret = regmap_attach_dev(dev, map, config);
+ if (ret != 0)
+ goto err_regcache;
}
- *m = map;
- devres_add(dev, m);
return map;
-err_debugfs:
- regmap_debugfs_exit(map);
+err_regcache:
regcache_exit(map);
err_range:
regmap_range_exit(map);
@@ -1520,12 +1537,12 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map->lock_arg);
/*
* Some devices don't support bulk write, for
* them we have a series of single write operations.
*/
if (!map->bus || map->use_single_rw) {
+ map->lock(map->lock_arg);
for (i = 0; i < val_count; i++) {
unsigned int ival;
@@ -1554,31 +1571,239 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (ret != 0)
goto out;
}
+out:
+ map->unlock(map->lock_arg);
} else {
void *wval;
wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
if (!wval) {
- ret = -ENOMEM;
dev_err(map->dev, "Error in memory allocation\n");
- goto out;
+ return -ENOMEM;
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(wval + i);
+ map->lock(map->lock_arg);
ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+ map->unlock(map->lock_arg);
kfree(wval);
}
-out:
- map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
/*
+ * _regmap_raw_multi_reg_write()
+ *
+ * the (register,newvalue) pairs in regs have not been formatted, but
+ * they are all in the same page and have been changed to being page
+ * relative. The page register has been written if that was neccessary.
+ */
+static int _regmap_raw_multi_reg_write(struct regmap *map,
+ const struct reg_default *regs,
+ size_t num_regs)
+{
+ int ret;
+ void *buf;
+ int i;
+ u8 *u8;
+ size_t val_bytes = map->format.val_bytes;
+ size_t reg_bytes = map->format.reg_bytes;
+ size_t pad_bytes = map->format.pad_bytes;
+ size_t pair_size = reg_bytes + pad_bytes + val_bytes;
+ size_t len = pair_size * num_regs;
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* We have to linearise by hand. */
+
+ u8 = buf;
+
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ int val = regs[i].def;
+ trace_regmap_hw_write_start(map->dev, reg, 1);
+ map->format.format_reg(u8, reg, map->reg_shift);
+ u8 += reg_bytes + pad_bytes;
+ map->format.format_val(u8, val, 0);
+ u8 += val_bytes;
+ }
+ u8 = buf;
+ *u8 |= map->write_flag_mask;
+
+ ret = map->bus->write(map->bus_context, buf, len);
+
+ kfree(buf);
+
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ trace_regmap_hw_write_done(map->dev, reg, 1);
+ }
+ return ret;
+}
+
+static unsigned int _regmap_register_page(struct regmap *map,
+ unsigned int reg,
+ struct regmap_range_node *range)
+{
+ unsigned int win_page = (reg - range->range_min) / range->window_len;
+
+ return win_page;
+}
+
+static int _regmap_range_multi_paged_reg_write(struct regmap *map,
+ struct reg_default *regs,
+ size_t num_regs)
+{
+ int ret;
+ int i, n;
+ struct reg_default *base;
+ unsigned int this_page;
+ /*
+ * the set of registers are not neccessarily in order, but
+ * since the order of write must be preserved this algorithm
+ * chops the set each time the page changes
+ */
+ base = regs;
+ for (i = 0, n = 0; i < num_regs; i++, n++) {
+ unsigned int reg = regs[i].reg;
+ struct regmap_range_node *range;
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ unsigned int win_page = _regmap_register_page(map, reg,
+ range);
+
+ if (i == 0)
+ this_page = win_page;
+ if (win_page != this_page) {
+ this_page = win_page;
+ ret = _regmap_raw_multi_reg_write(map, base, n);
+ if (ret != 0)
+ return ret;
+ base += n;
+ n = 0;
+ }
+ ret = _regmap_select_page(map, &base[n].reg, range, 1);
+ if (ret != 0)
+ return ret;
+ }
+ }
+ if (n > 0)
+ return _regmap_raw_multi_reg_write(map, base, n);
+ return 0;
+}
+
+static int _regmap_multi_reg_write(struct regmap *map,
+ const struct reg_default *regs,
+ size_t num_regs)
+{
+ int i;
+ int ret;
+
+ if (!map->can_multi_write) {
+ for (i = 0; i < num_regs; i++) {
+ ret = _regmap_write(map, regs[i].reg, regs[i].def);
+ if (ret != 0)
+ return ret;
+ }
+ return 0;
+ }
+
+ if (!map->format.parse_inplace)
+ return -EINVAL;
+
+ if (map->writeable_reg)
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ if (!map->writeable_reg(map->dev, reg))
+ return -EINVAL;
+ if (reg % map->reg_stride)
+ return -EINVAL;
+ }
+
+ if (!map->cache_bypass) {
+ for (i = 0; i < num_regs; i++) {
+ unsigned int val = regs[i].def;
+ unsigned int reg = regs[i].reg;
+ ret = regcache_write(map, reg, val);
+ if (ret) {
+ dev_err(map->dev,
+ "Error in caching of register: %x ret: %d\n",
+ reg, ret);
+ return ret;
+ }
+ }
+ if (map->cache_only) {
+ map->cache_dirty = true;
+ return 0;
+ }
+ }
+
+ WARN_ON(!map->bus);
+
+ for (i = 0; i < num_regs; i++) {
+ unsigned int reg = regs[i].reg;
+ struct regmap_range_node *range;
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ size_t len = sizeof(struct reg_default)*num_regs;
+ struct reg_default *base = kmemdup(regs, len,
+ GFP_KERNEL);
+ if (!base)
+ return -ENOMEM;
+ ret = _regmap_range_multi_paged_reg_write(map, base,
+ num_regs);
+ kfree(base);
+
+ return ret;
+ }
+ }
+ return _regmap_raw_multi_reg_write(map, regs, num_regs);
+}
+
+/*
* regmap_multi_reg_write(): Write multiple registers to the device
*
+ * where the set of register,value pairs are supplied in any order,
+ * possibly not all in a single range.
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register,value to be written
+ * @num_regs: Number of registers to write
+ *
+ * The 'normal' block write mode will send ultimately send data on the
+ * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
+ * addressed. However, this alternative block multi write mode will send
+ * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
+ * must of course support the mode.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+ int num_regs)
+{
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
+
+/*
+ * regmap_multi_reg_write_bypassed(): Write multiple registers to the
+ * device but not the cache
+ *
* where the set of register are supplied in any order
*
* @map: Register map to write to
@@ -1592,30 +1817,27 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write);
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_multi_reg_write(struct regmap *map, struct reg_default *regs,
- int num_regs)
+int regmap_multi_reg_write_bypassed(struct regmap *map,
+ const struct reg_default *regs,
+ int num_regs)
{
- int ret = 0, i;
-
- for (i = 0; i < num_regs; i++) {
- int reg = regs[i].reg;
- if (reg % map->reg_stride)
- return -EINVAL;
- }
+ int ret;
+ bool bypass;
map->lock(map->lock_arg);
- for (i = 0; i < num_regs; i++) {
- ret = _regmap_write(map, regs[i].reg, regs[i].def);
- if (ret != 0)
- goto out;
- }
-out:
+ bypass = map->cache_bypass;
+ map->cache_bypass = true;
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+ map->cache_bypass = bypass;
+
map->unlock(map->lock_arg);
return ret;
}
-EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
/**
* regmap_raw_write_async(): Write raw values to one or more registers
@@ -1736,6 +1958,9 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
if (map->cache_only)
return -EBUSY;
+ if (!regmap_readable(map, reg))
+ return -EIO;
+
ret = map->reg_read(context, reg, val);
if (ret == 0) {
#ifdef LOG_DEVICE
@@ -1966,9 +2191,11 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
if (tmp != orig) {
ret = _regmap_write(map, reg, tmp);
- *change = true;
+ if (change)
+ *change = true;
} else {
- *change = false;
+ if (change)
+ *change = false;
}
return ret;
@@ -1987,11 +2214,10 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val)
{
- bool change;
int ret;
map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, &change);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL);
map->unlock(map->lock_arg);
return ret;
@@ -2016,14 +2242,13 @@ EXPORT_SYMBOL_GPL(regmap_update_bits);
int regmap_update_bits_async(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val)
{
- bool change;
int ret;
map->lock(map->lock_arg);
map->async = true;
- ret = _regmap_update_bits(map, reg, mask, val, &change);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL);
map->async = false;
@@ -2173,35 +2398,21 @@ EXPORT_SYMBOL_GPL(regmap_async_complete);
* apply them immediately. Typically this is used to apply
* corrections to be applied to the device defaults on startup, such
* as the updates some vendors provide to undocumented registers.
+ *
+ * The caller must ensure that this function cannot be called
+ * concurrently with either itself or regcache_sync().
*/
int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
int num_regs)
{
struct reg_default *p;
- int i, ret;
+ int ret;
bool bypass;
if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
num_regs))
return 0;
- map->lock(map->lock_arg);
-
- bypass = map->cache_bypass;
-
- map->cache_bypass = true;
- map->async = true;
-
- /* Write out first; it's useful to apply even if we fail later. */
- for (i = 0; i < num_regs; i++) {
- ret = _regmap_write(map, regs[i].reg, regs[i].def);
- if (ret != 0) {
- dev_err(map->dev, "Failed to write %x = %x: %d\n",
- regs[i].reg, regs[i].def, ret);
- goto out;
- }
- }
-
p = krealloc(map->patch,
sizeof(struct reg_default) * (map->patch_regs + num_regs),
GFP_KERNEL);
@@ -2210,9 +2421,20 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
map->patch = p;
map->patch_regs += num_regs;
} else {
- ret = -ENOMEM;
+ return -ENOMEM;
}
+ map->lock(map->lock_arg);
+
+ bypass = map->cache_bypass;
+
+ map->cache_bypass = true;
+ map->async = true;
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+ if (ret != 0)
+ goto out;
+
out:
map->async = false;
map->cache_bypass = bypass;
@@ -2240,6 +2462,18 @@ int regmap_get_val_bytes(struct regmap *map)
}
EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
+int regmap_parse_val(struct regmap *map, const void *buf,
+ unsigned int *val)
+{
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ *val = map->format.parse_val(buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_parse_val);
+
static int __init regmap_initcall(void)
{
regmap_debugfs_initcall();
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 94ffee378f10..bbcbd3c43926 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -23,7 +23,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/module.h>
@@ -161,16 +160,20 @@ static int topology_cpu_callback(struct notifier_block *nfb,
static int topology_sysfs_init(void)
{
int cpu;
- int rc;
+ int rc = 0;
+
+ cpu_notifier_register_begin();
for_each_online_cpu(cpu) {
rc = topology_add_dev(cpu);
if (rc)
- return rc;
+ goto out;
}
- hotcpu_notifier(topology_cpu_callback, 0);
+ __hotcpu_notifier(topology_cpu_callback, 0);
- return 0;
+out:
+ cpu_notifier_register_done();
+ return rc;
}
device_initcall(topology_sysfs_init);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 25f9887a35d0..d7f81ad56b8a 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -218,7 +218,14 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
#if IS_BUILTIN(CONFIG_BCMA_HOST_SOC)
chip->to_irq = bcma_gpio_to_irq;
#endif
- chip->ngpio = 16;
+ switch (cc->core->bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM5357:
+ chip->ngpio = 32;
+ break;
+ default:
+ chip->ngpio = 16;
+ }
+
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
* a random base number. */
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index eb3950113e42..125d84505738 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6411,12 +6411,12 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
.ScatterGatherSegments[0]
.SegmentByteCount =
CommandMailbox->ControllerInfo.DataTransferSize;
- DAC960_ExecuteCommand(Command);
- while (Controller->V2.NewControllerInformation->PhysicalScanActive)
- {
- DAC960_ExecuteCommand(Command);
- sleep_on_timeout(&Controller->CommandWaitQueue, HZ);
- }
+ while (1) {
+ DAC960_ExecuteCommand(Command);
+ if (!Controller->V2.NewControllerInformation->PhysicalScanActive)
+ break;
+ msleep(1000);
+ }
DAC960_UserCritical("Discovery Completed\n", Controller);
}
}
@@ -7035,18 +7035,16 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
ErrorCode = -EFAULT;
break;
}
- while (Controller->V2.HealthStatusBuffer->StatusChangeCounter
- == HealthStatusBuffer.StatusChangeCounter &&
- Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
- == HealthStatusBuffer.NextEventSequenceNumber)
- {
- interruptible_sleep_on_timeout(&Controller->HealthStatusWaitQueue,
- DAC960_MonitoringTimerInterval);
- if (signal_pending(current)) {
- ErrorCode = -EINTR;
- break;
- }
- }
+ ErrorCode = wait_event_interruptible_timeout(Controller->HealthStatusWaitQueue,
+ !(Controller->V2.HealthStatusBuffer->StatusChangeCounter
+ == HealthStatusBuffer.StatusChangeCounter &&
+ Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
+ == HealthStatusBuffer.NextEventSequenceNumber),
+ DAC960_MonitoringTimerInterval);
+ if (ErrorCode == -ERESTARTSYS) {
+ ErrorCode = -EINTR;
+ break;
+ }
if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
Controller->V2.HealthStatusBuffer,
sizeof(DAC960_V2_HealthStatusBuffer_T)))
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 0e30c6e5492a..96b629e1f0c9 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -68,6 +68,8 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
#include <asm/atafd.h>
#include <asm/atafdreg.h>
@@ -301,7 +303,7 @@ module_param_array(UserSteprate, int, NULL, 0);
/* Synchronization of FDC access. */
static volatile int fdc_busy = 0;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
-static DECLARE_WAIT_QUEUE_HEAD(format_wait);
+static DECLARE_COMPLETION(format_wait);
static unsigned long changed_floppies = 0xff, fake_change = 0;
#define CHECK_CHANGE_DELAY HZ/2
@@ -608,7 +610,7 @@ static void fd_error( void )
if (IsFormatting) {
IsFormatting = 0;
FormatError = 1;
- wake_up( &format_wait );
+ complete(&format_wait);
return;
}
@@ -650,9 +652,8 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
DPRINT(("do_format( dr=%d tr=%d he=%d offs=%d )\n",
drive, desc->track, desc->head, desc->sect_offset ));
+ wait_event(fdc_wait, cmpxchg(&fdc_busy, 0, 1) == 0);
local_irq_save(flags);
- while( fdc_busy ) sleep_on( &fdc_wait );
- fdc_busy = 1;
stdma_lock(floppy_irq, NULL);
atari_turnon_irq( IRQ_MFP_FDC ); /* should be already, just to be sure */
local_irq_restore(flags);
@@ -706,7 +707,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
ReqSide = desc->head;
do_fd_action( drive );
- sleep_on( &format_wait );
+ wait_for_completion(&format_wait);
redo_fd_request();
return( FormatError ? -EIO : 0 );
@@ -1229,7 +1230,7 @@ static void fd_writetrack_done( int status )
goto err_end;
}
- wake_up( &format_wait );
+ complete(&format_wait);
return;
err_end:
@@ -1497,8 +1498,7 @@ repeat:
void do_fd_request(struct request_queue * q)
{
DPRINT(("do_fd_request for pid %d\n",current->pid));
- while( fdc_busy ) sleep_on( &fdc_wait );
- fdc_busy = 1;
+ wait_event(fdc_wait, cmpxchg(&fdc_busy, 0, 1) == 0);
stdma_lock(floppy_irq, NULL);
atari_disable_irq( IRQ_MFP_FDC );
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 036e8ab86c71..73894ca33956 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -4092,11 +4092,9 @@ static void cciss_interrupt_mode(ctlr_info_t *h)
if (err > 0) {
dev_warn(&h->pdev->dev,
"only %d MSI-X vectors available\n", err);
- goto default_int_mode;
} else {
dev_warn(&h->pdev->dev,
"MSI-X init failed %d\n", err);
- goto default_int_mode;
}
}
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index a9b13f2cc420..90ae4ba8f9ee 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -95,34 +95,36 @@ struct __packed al_transaction_on_disk {
struct update_odbm_work {
struct drbd_work w;
+ struct drbd_device *device;
unsigned int enr;
};
struct update_al_work {
struct drbd_work w;
+ struct drbd_device *device;
struct completion event;
int err;
};
-void *drbd_md_get_buffer(struct drbd_conf *mdev)
+void *drbd_md_get_buffer(struct drbd_device *device)
{
int r;
- wait_event(mdev->misc_wait,
- (r = atomic_cmpxchg(&mdev->md_io_in_use, 0, 1)) == 0 ||
- mdev->state.disk <= D_FAILED);
+ wait_event(device->misc_wait,
+ (r = atomic_cmpxchg(&device->md_io_in_use, 0, 1)) == 0 ||
+ device->state.disk <= D_FAILED);
- return r ? NULL : page_address(mdev->md_io_page);
+ return r ? NULL : page_address(device->md_io_page);
}
-void drbd_md_put_buffer(struct drbd_conf *mdev)
+void drbd_md_put_buffer(struct drbd_device *device)
{
- if (atomic_dec_and_test(&mdev->md_io_in_use))
- wake_up(&mdev->misc_wait);
+ if (atomic_dec_and_test(&device->md_io_in_use))
+ wake_up(&device->misc_wait);
}
-void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev,
unsigned int *done)
{
long dt;
@@ -134,15 +136,15 @@ void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backi
if (dt == 0)
dt = MAX_SCHEDULE_TIMEOUT;
- dt = wait_event_timeout(mdev->misc_wait,
- *done || test_bit(FORCE_DETACH, &mdev->flags), dt);
+ dt = wait_event_timeout(device->misc_wait,
+ *done || test_bit(FORCE_DETACH, &device->flags), dt);
if (dt == 0) {
- dev_err(DEV, "meta-data IO operation timed out\n");
- drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
+ drbd_err(device, "meta-data IO operation timed out\n");
+ drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH);
}
}
-static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
+static int _drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev,
struct page *page, sector_t sector,
int rw, int size)
@@ -150,10 +152,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
struct bio *bio;
int err;
- mdev->md_io.done = 0;
- mdev->md_io.error = -ENODEV;
+ device->md_io.done = 0;
+ device->md_io.error = -ENODEV;
- if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
+ if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags))
rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
@@ -163,69 +165,69 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
err = -EIO;
if (bio_add_page(bio, page, size, 0) != size)
goto out;
- bio->bi_private = &mdev->md_io;
+ bio->bi_private = &device->md_io;
bio->bi_end_io = drbd_md_io_complete;
bio->bi_rw = rw;
- if (!(rw & WRITE) && mdev->state.disk == D_DISKLESS && mdev->ldev == NULL)
+ if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL)
/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
;
- else if (!get_ldev_if_state(mdev, D_ATTACHING)) {
+ else if (!get_ldev_if_state(device, D_ATTACHING)) {
/* Corresponding put_ldev in drbd_md_io_complete() */
- dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
+ drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
err = -ENODEV;
goto out;
}
bio_get(bio); /* one bio_put() is in the completion handler */
- atomic_inc(&mdev->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
- if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
+ atomic_inc(&device->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
+ if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_endio(bio, -EIO);
else
submit_bio(rw, bio);
- wait_until_done_or_force_detached(mdev, bdev, &mdev->md_io.done);
+ wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
if (bio_flagged(bio, BIO_UPTODATE))
- err = mdev->md_io.error;
+ err = device->md_io.error;
out:
bio_put(bio);
return err;
}
-int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
sector_t sector, int rw)
{
int err;
- struct page *iop = mdev->md_io_page;
+ struct page *iop = device->md_io_page;
- D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
+ D_ASSERT(device, atomic_read(&device->md_io_in_use) == 1);
BUG_ON(!bdev->md_bdev);
- dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
+ drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
current->comm, current->pid, __func__,
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
(void*)_RET_IP_ );
if (sector < drbd_md_first_sector(bdev) ||
sector + 7 > drbd_md_last_sector(bdev))
- dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
+ drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
current->comm, current->pid, __func__,
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
/* we do all our meta data IO in aligned 4k blocks. */
- err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, 4096);
+ err = _drbd_md_sync_page_io(device, bdev, iop, sector, rw, 4096);
if (err) {
- dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
+ drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
}
return err;
}
-static struct bm_extent *find_active_resync_extent(struct drbd_conf *mdev, unsigned int enr)
+static struct bm_extent *find_active_resync_extent(struct drbd_device *device, unsigned int enr)
{
struct lc_element *tmp;
- tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
+ tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
if (unlikely(tmp != NULL)) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
if (test_bit(BME_NO_WRITES, &bm_ext->flags))
@@ -234,47 +236,48 @@ static struct bm_extent *find_active_resync_extent(struct drbd_conf *mdev, unsig
return NULL;
}
-static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr, bool nonblock)
+static struct lc_element *_al_get(struct drbd_device *device, unsigned int enr, bool nonblock)
{
struct lc_element *al_ext;
struct bm_extent *bm_ext;
int wake;
- spin_lock_irq(&mdev->al_lock);
- bm_ext = find_active_resync_extent(mdev, enr);
+ spin_lock_irq(&device->al_lock);
+ bm_ext = find_active_resync_extent(device, enr);
if (bm_ext) {
wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
if (wake)
- wake_up(&mdev->al_wait);
+ wake_up(&device->al_wait);
return NULL;
}
if (nonblock)
- al_ext = lc_try_get(mdev->act_log, enr);
+ al_ext = lc_try_get(device->act_log, enr);
else
- al_ext = lc_get(mdev->act_log, enr);
- spin_unlock_irq(&mdev->al_lock);
+ al_ext = lc_get(device->act_log, enr);
+ spin_unlock_irq(&device->al_lock);
return al_ext;
}
-bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i)
+bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
- D_ASSERT((unsigned)(last - first) <= 1);
- D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
+ D_ASSERT(device, (unsigned)(last - first) <= 1);
+ D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
/* FIXME figure out a fast path for bios crossing AL extent boundaries */
if (first != last)
return false;
- return _al_get(mdev, first, true);
+ return _al_get(device, first, true);
}
-bool drbd_al_begin_io_prepare(struct drbd_conf *mdev, struct drbd_interval *i)
+static
+bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
@@ -283,20 +286,20 @@ bool drbd_al_begin_io_prepare(struct drbd_conf *mdev, struct drbd_interval *i)
unsigned enr;
bool need_transaction = false;
- D_ASSERT(first <= last);
- D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
+ D_ASSERT(device, first <= last);
+ D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
for (enr = first; enr <= last; enr++) {
struct lc_element *al_ext;
- wait_event(mdev->al_wait,
- (al_ext = _al_get(mdev, enr, false)) != NULL);
+ wait_event(device->al_wait,
+ (al_ext = _al_get(device, enr, false)) != NULL);
if (al_ext->lc_number != enr)
need_transaction = true;
}
return need_transaction;
}
-static int al_write_transaction(struct drbd_conf *mdev, bool delegate);
+static int al_write_transaction(struct drbd_device *device, bool delegate);
/* When called through generic_make_request(), we must delegate
* activity log I/O to the worker thread: a further request
@@ -310,58 +313,58 @@ static int al_write_transaction(struct drbd_conf *mdev, bool delegate);
/*
* @delegate: delegate activity log I/O to the worker thread
*/
-void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate)
+void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate)
{
bool locked = false;
- BUG_ON(delegate && current == mdev->tconn->worker.task);
+ BUG_ON(delegate && current == first_peer_device(device)->connection->worker.task);
/* Serialize multiple transactions.
* This uses test_and_set_bit, memory barrier is implicit.
*/
- wait_event(mdev->al_wait,
- mdev->act_log->pending_changes == 0 ||
- (locked = lc_try_lock_for_transaction(mdev->act_log)));
+ wait_event(device->al_wait,
+ device->act_log->pending_changes == 0 ||
+ (locked = lc_try_lock_for_transaction(device->act_log)));
if (locked) {
/* Double check: it may have been committed by someone else,
* while we have been waiting for the lock. */
- if (mdev->act_log->pending_changes) {
+ if (device->act_log->pending_changes) {
bool write_al_updates;
rcu_read_lock();
- write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
+ write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
rcu_read_unlock();
if (write_al_updates)
- al_write_transaction(mdev, delegate);
- spin_lock_irq(&mdev->al_lock);
+ al_write_transaction(device, delegate);
+ spin_lock_irq(&device->al_lock);
/* FIXME
if (err)
we need an "lc_cancel" here;
*/
- lc_committed(mdev->act_log);
- spin_unlock_irq(&mdev->al_lock);
+ lc_committed(device->act_log);
+ spin_unlock_irq(&device->al_lock);
}
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
+ lc_unlock(device->act_log);
+ wake_up(&device->al_wait);
}
}
/*
* @delegate: delegate activity log I/O to the worker thread
*/
-void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate)
+void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate)
{
- BUG_ON(delegate && current == mdev->tconn->worker.task);
+ BUG_ON(delegate && current == first_peer_device(device)->connection->worker.task);
- if (drbd_al_begin_io_prepare(mdev, i))
- drbd_al_begin_io_commit(mdev, delegate);
+ if (drbd_al_begin_io_prepare(device, i))
+ drbd_al_begin_io_commit(device, delegate);
}
-int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
+int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i)
{
- struct lru_cache *al = mdev->act_log;
+ struct lru_cache *al = device->act_log;
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
@@ -370,7 +373,7 @@ int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
unsigned available_update_slots;
unsigned enr;
- D_ASSERT(first <= last);
+ D_ASSERT(device, first <= last);
nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */
available_update_slots = min(al->nr_elements - al->used,
@@ -385,7 +388,7 @@ int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
/* Is resync active in this area? */
for (enr = first; enr <= last; enr++) {
struct lc_element *tmp;
- tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
+ tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
if (unlikely(tmp != NULL)) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
@@ -401,14 +404,14 @@ int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
* this has to be successful. */
for (enr = first; enr <= last; enr++) {
struct lc_element *al_ext;
- al_ext = lc_get_cumulative(mdev->act_log, enr);
+ al_ext = lc_get_cumulative(device->act_log, enr);
if (!al_ext)
- dev_info(DEV, "LOGIC BUG for enr=%u\n", enr);
+ drbd_info(device, "LOGIC BUG for enr=%u\n", enr);
}
return 0;
}
-void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
+void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
@@ -418,19 +421,19 @@ void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
struct lc_element *extent;
unsigned long flags;
- D_ASSERT(first <= last);
- spin_lock_irqsave(&mdev->al_lock, flags);
+ D_ASSERT(device, first <= last);
+ spin_lock_irqsave(&device->al_lock, flags);
for (enr = first; enr <= last; enr++) {
- extent = lc_find(mdev->act_log, enr);
+ extent = lc_find(device->act_log, enr);
if (!extent) {
- dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
+ drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr);
continue;
}
- lc_put(mdev->act_log, extent);
+ lc_put(device->act_log, extent);
}
- spin_unlock_irqrestore(&mdev->al_lock, flags);
- wake_up(&mdev->al_wait);
+ spin_unlock_irqrestore(&device->al_lock, flags);
+ wake_up(&device->al_wait);
}
#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
@@ -460,13 +463,13 @@ static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
(BM_EXT_SHIFT - BM_BLOCK_SHIFT));
}
-static sector_t al_tr_number_to_on_disk_sector(struct drbd_conf *mdev)
+static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device)
{
- const unsigned int stripes = mdev->ldev->md.al_stripes;
- const unsigned int stripe_size_4kB = mdev->ldev->md.al_stripe_size_4k;
+ const unsigned int stripes = device->ldev->md.al_stripes;
+ const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k;
/* transaction number, modulo on-disk ring buffer wrap around */
- unsigned int t = mdev->al_tr_number % (mdev->ldev->md.al_size_4k);
+ unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k);
/* ... to aligned 4k on disk block */
t = ((t % stripes) * stripe_size_4kB) + t/stripes;
@@ -475,11 +478,11 @@ static sector_t al_tr_number_to_on_disk_sector(struct drbd_conf *mdev)
t *= 8;
/* ... plus offset to the on disk position */
- return mdev->ldev->md.md_offset + mdev->ldev->md.al_offset + t;
+ return device->ldev->md.md_offset + device->ldev->md.al_offset + t;
}
static int
-_al_write_transaction(struct drbd_conf *mdev)
+_al_write_transaction(struct drbd_device *device)
{
struct al_transaction_on_disk *buffer;
struct lc_element *e;
@@ -489,31 +492,31 @@ _al_write_transaction(struct drbd_conf *mdev)
unsigned crc = 0;
int err = 0;
- if (!get_ldev(mdev)) {
- dev_err(DEV, "disk is %s, cannot start al transaction\n",
- drbd_disk_str(mdev->state.disk));
+ if (!get_ldev(device)) {
+ drbd_err(device, "disk is %s, cannot start al transaction\n",
+ drbd_disk_str(device->state.disk));
return -EIO;
}
/* The bitmap write may have failed, causing a state change. */
- if (mdev->state.disk < D_INCONSISTENT) {
- dev_err(DEV,
+ if (device->state.disk < D_INCONSISTENT) {
+ drbd_err(device,
"disk is %s, cannot write al transaction\n",
- drbd_disk_str(mdev->state.disk));
- put_ldev(mdev);
+ drbd_disk_str(device->state.disk));
+ put_ldev(device);
return -EIO;
}
- buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
+ buffer = drbd_md_get_buffer(device); /* protects md_io_buffer, al_tr_cycle, ... */
if (!buffer) {
- dev_err(DEV, "disk failed while waiting for md_io buffer\n");
- put_ldev(mdev);
+ drbd_err(device, "disk failed while waiting for md_io buffer\n");
+ put_ldev(device);
return -ENODEV;
}
memset(buffer, 0, sizeof(*buffer));
buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
- buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
+ buffer->tr_number = cpu_to_be32(device->al_tr_number);
i = 0;
@@ -521,8 +524,8 @@ _al_write_transaction(struct drbd_conf *mdev)
* once we set the LC_LOCKED -- from drbd_al_begin_io(),
* lc_try_lock_for_transaction() --, someone may still
* be in the process of changing it. */
- spin_lock_irq(&mdev->al_lock);
- list_for_each_entry(e, &mdev->act_log->to_be_changed, list) {
+ spin_lock_irq(&device->al_lock);
+ list_for_each_entry(e, &device->act_log->to_be_changed, list) {
if (i == AL_UPDATES_PER_TRANSACTION) {
i++;
break;
@@ -530,11 +533,11 @@ _al_write_transaction(struct drbd_conf *mdev)
buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
if (e->lc_number != LC_FREE)
- drbd_bm_mark_for_writeout(mdev,
+ drbd_bm_mark_for_writeout(device,
al_extent_to_bm_page(e->lc_number));
i++;
}
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
buffer->n_updates = cpu_to_be16(i);
@@ -543,48 +546,48 @@ _al_write_transaction(struct drbd_conf *mdev)
buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
}
- buffer->context_size = cpu_to_be16(mdev->act_log->nr_elements);
- buffer->context_start_slot_nr = cpu_to_be16(mdev->al_tr_cycle);
+ buffer->context_size = cpu_to_be16(device->act_log->nr_elements);
+ buffer->context_start_slot_nr = cpu_to_be16(device->al_tr_cycle);
mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
- mdev->act_log->nr_elements - mdev->al_tr_cycle);
+ device->act_log->nr_elements - device->al_tr_cycle);
for (i = 0; i < mx; i++) {
- unsigned idx = mdev->al_tr_cycle + i;
- extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number;
+ unsigned idx = device->al_tr_cycle + i;
+ extent_nr = lc_element_by_index(device->act_log, idx)->lc_number;
buffer->context[i] = cpu_to_be32(extent_nr);
}
for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
buffer->context[i] = cpu_to_be32(LC_FREE);
- mdev->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
- if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
- mdev->al_tr_cycle = 0;
+ device->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
+ if (device->al_tr_cycle >= device->act_log->nr_elements)
+ device->al_tr_cycle = 0;
- sector = al_tr_number_to_on_disk_sector(mdev);
+ sector = al_tr_number_to_on_disk_sector(device);
crc = crc32c(0, buffer, 4096);
buffer->crc32c = cpu_to_be32(crc);
- if (drbd_bm_write_hinted(mdev))
+ if (drbd_bm_write_hinted(device))
err = -EIO;
else {
bool write_al_updates;
rcu_read_lock();
- write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
+ write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
rcu_read_unlock();
if (write_al_updates) {
- if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
err = -EIO;
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
} else {
- mdev->al_tr_number++;
- mdev->al_writ_cnt++;
+ device->al_tr_number++;
+ device->al_writ_cnt++;
}
}
}
- drbd_md_put_buffer(mdev);
- put_ldev(mdev);
+ drbd_md_put_buffer(device);
+ put_ldev(device);
return err;
}
@@ -593,10 +596,10 @@ _al_write_transaction(struct drbd_conf *mdev)
static int w_al_write_transaction(struct drbd_work *w, int unused)
{
struct update_al_work *aw = container_of(w, struct update_al_work, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device = aw->device;
int err;
- err = _al_write_transaction(mdev);
+ err = _al_write_transaction(device);
aw->err = err;
complete(&aw->event);
@@ -606,63 +609,64 @@ static int w_al_write_transaction(struct drbd_work *w, int unused)
/* Calls from worker context (see w_restart_disk_io()) need to write the
transaction directly. Others came through generic_make_request(),
those need to delegate it to the worker. */
-static int al_write_transaction(struct drbd_conf *mdev, bool delegate)
+static int al_write_transaction(struct drbd_device *device, bool delegate)
{
if (delegate) {
struct update_al_work al_work;
init_completion(&al_work.event);
al_work.w.cb = w_al_write_transaction;
- al_work.w.mdev = mdev;
- drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
+ al_work.device = device;
+ drbd_queue_work_front(&first_peer_device(device)->connection->sender_work,
+ &al_work.w);
wait_for_completion(&al_work.event);
return al_work.err;
} else
- return _al_write_transaction(mdev);
+ return _al_write_transaction(device);
}
-static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
+static int _try_lc_del(struct drbd_device *device, struct lc_element *al_ext)
{
int rv;
- spin_lock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
rv = (al_ext->refcnt == 0);
if (likely(rv))
- lc_del(mdev->act_log, al_ext);
- spin_unlock_irq(&mdev->al_lock);
+ lc_del(device->act_log, al_ext);
+ spin_unlock_irq(&device->al_lock);
return rv;
}
/**
* drbd_al_shrink() - Removes all active extents form the activity log
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Removes all active extents form the activity log, waiting until
* the reference count of each entry dropped to 0 first, of course.
*
- * You need to lock mdev->act_log with lc_try_lock() / lc_unlock()
+ * You need to lock device->act_log with lc_try_lock() / lc_unlock()
*/
-void drbd_al_shrink(struct drbd_conf *mdev)
+void drbd_al_shrink(struct drbd_device *device)
{
struct lc_element *al_ext;
int i;
- D_ASSERT(test_bit(__LC_LOCKED, &mdev->act_log->flags));
+ D_ASSERT(device, test_bit(__LC_LOCKED, &device->act_log->flags));
- for (i = 0; i < mdev->act_log->nr_elements; i++) {
- al_ext = lc_element_by_index(mdev->act_log, i);
+ for (i = 0; i < device->act_log->nr_elements; i++) {
+ al_ext = lc_element_by_index(device->act_log, i);
if (al_ext->lc_number == LC_FREE)
continue;
- wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext));
+ wait_event(device->al_wait, _try_lc_del(device, al_ext));
}
- wake_up(&mdev->al_wait);
+ wake_up(&device->al_wait);
}
-int drbd_initialize_al(struct drbd_conf *mdev, void *buffer)
+int drbd_initialize_al(struct drbd_device *device, void *buffer)
{
struct al_transaction_on_disk *al = buffer;
- struct drbd_md *md = &mdev->ldev->md;
+ struct drbd_md *md = &device->ldev->md;
sector_t al_base = md->md_offset + md->al_offset;
int al_size_4k = md->al_stripes * md->al_stripe_size_4k;
int i;
@@ -673,7 +677,7 @@ int drbd_initialize_al(struct drbd_conf *mdev, void *buffer)
al->crc32c = cpu_to_be32(crc32c(0, al, 4096));
for (i = 0; i < al_size_4k; i++) {
- int err = drbd_md_sync_page_io(mdev, mdev->ldev, al_base + i * 8, WRITE);
+ int err = drbd_md_sync_page_io(device, device->ldev, al_base + i * 8, WRITE);
if (err)
return err;
}
@@ -683,32 +687,32 @@ int drbd_initialize_al(struct drbd_conf *mdev, void *buffer)
static int w_update_odbm(struct drbd_work *w, int unused)
{
struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device = udw->device;
struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
- if (!get_ldev(mdev)) {
+ if (!get_ldev(device)) {
if (__ratelimit(&drbd_ratelimit_state))
- dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
+ drbd_warn(device, "Can not update on disk bitmap, local IO disabled.\n");
kfree(udw);
return 0;
}
- drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr));
- put_ldev(mdev);
+ drbd_bm_write_page(device, rs_extent_to_bm_page(udw->enr));
+ put_ldev(device);
kfree(udw);
- if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) {
- switch (mdev->state.conn) {
+ if (drbd_bm_total_weight(device) <= device->rs_failed) {
+ switch (device->state.conn) {
case C_SYNC_SOURCE: case C_SYNC_TARGET:
case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T:
- drbd_resync_finished(mdev);
+ drbd_resync_finished(device);
default:
/* nothing to do */
break;
}
}
- drbd_bcast_event(mdev, &sib);
+ drbd_bcast_event(device, &sib);
return 0;
}
@@ -720,7 +724,7 @@ static int w_update_odbm(struct drbd_work *w, int unused)
*
* TODO will be obsoleted once we have a caching lru of the on disk bitmap
*/
-static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
+static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t sector,
int count, int success)
{
struct lc_element *e;
@@ -728,13 +732,13 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
unsigned int enr;
- D_ASSERT(atomic_read(&mdev->local_cnt));
+ D_ASSERT(device, atomic_read(&device->local_cnt));
/* I simply assume that a sector/size pair never crosses
* a 16 MB extent border. (Currently this is true...) */
enr = BM_SECT_TO_EXT(sector);
- e = lc_get(mdev->resync, enr);
+ e = lc_get(device->resync, enr);
if (e) {
struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
if (ext->lce.lc_number == enr) {
@@ -743,12 +747,12 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
else
ext->rs_failed += count;
if (ext->rs_left < ext->rs_failed) {
- dev_warn(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
+ drbd_warn(device, "BAD! sector=%llus enr=%u rs_left=%d "
"rs_failed=%d count=%d cstate=%s\n",
(unsigned long long)sector,
ext->lce.lc_number, ext->rs_left,
ext->rs_failed, count,
- drbd_conn_str(mdev->state.conn));
+ drbd_conn_str(device->state.conn));
/* We don't expect to be able to clear more bits
* than have been set when we originally counted
@@ -756,7 +760,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
* Whatever the reason (disconnect during resync,
* delayed local completion of an application write),
* try to fix it up by recounting here. */
- ext->rs_left = drbd_bm_e_weight(mdev, enr);
+ ext->rs_left = drbd_bm_e_weight(device, enr);
}
} else {
/* Normally this element should be in the cache,
@@ -765,16 +769,16 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
* But maybe an application write finished, and we set
* something outside the resync lru_cache in sync.
*/
- int rs_left = drbd_bm_e_weight(mdev, enr);
+ int rs_left = drbd_bm_e_weight(device, enr);
if (ext->flags != 0) {
- dev_warn(DEV, "changing resync lce: %d[%u;%02lx]"
+ drbd_warn(device, "changing resync lce: %d[%u;%02lx]"
" -> %d[%u;00]\n",
ext->lce.lc_number, ext->rs_left,
ext->flags, enr, rs_left);
ext->flags = 0;
}
if (ext->rs_failed) {
- dev_warn(DEV, "Kicking resync_lru element enr=%u "
+ drbd_warn(device, "Kicking resync_lru element enr=%u "
"out with rs_failed=%d\n",
ext->lce.lc_number, ext->rs_failed);
}
@@ -782,9 +786,9 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
ext->rs_failed = success ? 0 : count;
/* we don't keep a persistent log of the resync lru,
* we can commit any change right away. */
- lc_committed(mdev->resync);
+ lc_committed(device->resync);
}
- lc_put(mdev->resync, &ext->lce);
+ lc_put(device->resync, &ext->lce);
/* no race, we are within the al_lock! */
if (ext->rs_left == ext->rs_failed) {
@@ -794,32 +798,33 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
if (udw) {
udw->enr = ext->lce.lc_number;
udw->w.cb = w_update_odbm;
- udw->w.mdev = mdev;
- drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w);
+ udw->device = device;
+ drbd_queue_work_front(&first_peer_device(device)->connection->sender_work,
+ &udw->w);
} else {
- dev_warn(DEV, "Could not kmalloc an udw\n");
+ drbd_warn(device, "Could not kmalloc an udw\n");
}
}
} else {
- dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n",
- mdev->resync_locked,
- mdev->resync->nr_elements,
- mdev->resync->flags);
+ drbd_err(device, "lc_get() failed! locked=%d/%d flags=%lu\n",
+ device->resync_locked,
+ device->resync->nr_elements,
+ device->resync->flags);
}
}
-void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
+void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go)
{
unsigned long now = jiffies;
- unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
- int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
+ unsigned long last = device->rs_mark_time[device->rs_last_mark];
+ int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS;
if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
- if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go &&
- mdev->state.conn != C_PAUSED_SYNC_T &&
- mdev->state.conn != C_PAUSED_SYNC_S) {
- mdev->rs_mark_time[next] = now;
- mdev->rs_mark_left[next] = still_to_go;
- mdev->rs_last_mark = next;
+ if (device->rs_mark_left[device->rs_last_mark] != still_to_go &&
+ device->state.conn != C_PAUSED_SYNC_T &&
+ device->state.conn != C_PAUSED_SYNC_S) {
+ device->rs_mark_time[next] = now;
+ device->rs_mark_left[next] = still_to_go;
+ device->rs_last_mark = next;
}
}
}
@@ -831,7 +836,7 @@ void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
* called by worker on C_SYNC_TARGET and receiver on SyncSource.
*
*/
-void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
+void __drbd_set_in_sync(struct drbd_device *device, sector_t sector, int size,
const char *file, const unsigned int line)
{
/* Is called from worker and receiver context _only_ */
@@ -842,15 +847,15 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
unsigned long flags;
if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
- dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
+ drbd_err(device, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
}
- if (!get_ldev(mdev))
+ if (!get_ldev(device))
return; /* no disk, no metadata, no bitmap to clear bits in */
- nr_sectors = drbd_get_capacity(mdev->this_bdev);
+ nr_sectors = drbd_get_capacity(device->this_bdev);
esector = sector + (size >> 9) - 1;
if (!expect(sector < nr_sectors))
@@ -878,21 +883,21 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors.
*/
- count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
+ count = drbd_bm_clear_bits(device, sbnr, ebnr);
if (count) {
- drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
- spin_lock_irqsave(&mdev->al_lock, flags);
- drbd_try_clear_on_disk_bm(mdev, sector, count, true);
- spin_unlock_irqrestore(&mdev->al_lock, flags);
+ drbd_advance_rs_marks(device, drbd_bm_total_weight(device));
+ spin_lock_irqsave(&device->al_lock, flags);
+ drbd_try_clear_on_disk_bm(device, sector, count, true);
+ spin_unlock_irqrestore(&device->al_lock, flags);
/* just wake_up unconditional now, various lc_chaged(),
* lc_put() in drbd_try_clear_on_disk_bm(). */
wake_up = 1;
}
out:
- put_ldev(mdev);
+ put_ldev(device);
if (wake_up)
- wake_up(&mdev->al_wait);
+ wake_up(&device->al_wait);
}
/*
@@ -903,7 +908,7 @@ out:
* called by tl_clear and drbd_send_dblock (==drbd_make_request).
* so this can be _any_ process.
*/
-int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
+int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector, int size,
const char *file, const unsigned int line)
{
unsigned long sbnr, ebnr, flags;
@@ -916,15 +921,15 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
return 0;
if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
- dev_err(DEV, "sector: %llus, size: %d\n",
+ drbd_err(device, "sector: %llus, size: %d\n",
(unsigned long long)sector, size);
return 0;
}
- if (!get_ldev(mdev))
+ if (!get_ldev(device))
return 0; /* no disk, no metadata, no bitmap to set bits in */
- nr_sectors = drbd_get_capacity(mdev->this_bdev);
+ nr_sectors = drbd_get_capacity(device->this_bdev);
esector = sector + (size >> 9) - 1;
if (!expect(sector < nr_sectors))
@@ -939,55 +944,55 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
/* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors. */
- spin_lock_irqsave(&mdev->al_lock, flags);
- count = drbd_bm_set_bits(mdev, sbnr, ebnr);
+ spin_lock_irqsave(&device->al_lock, flags);
+ count = drbd_bm_set_bits(device, sbnr, ebnr);
enr = BM_SECT_TO_EXT(sector);
- e = lc_find(mdev->resync, enr);
+ e = lc_find(device->resync, enr);
if (e)
lc_entry(e, struct bm_extent, lce)->rs_left += count;
- spin_unlock_irqrestore(&mdev->al_lock, flags);
+ spin_unlock_irqrestore(&device->al_lock, flags);
out:
- put_ldev(mdev);
+ put_ldev(device);
return count;
}
static
-struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
+struct bm_extent *_bme_get(struct drbd_device *device, unsigned int enr)
{
struct lc_element *e;
struct bm_extent *bm_ext;
int wakeup = 0;
unsigned long rs_flags;
- spin_lock_irq(&mdev->al_lock);
- if (mdev->resync_locked > mdev->resync->nr_elements/2) {
- spin_unlock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
+ if (device->resync_locked > device->resync->nr_elements/2) {
+ spin_unlock_irq(&device->al_lock);
return NULL;
}
- e = lc_get(mdev->resync, enr);
+ e = lc_get(device->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
if (bm_ext->lce.lc_number != enr) {
- bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
+ bm_ext->rs_left = drbd_bm_e_weight(device, enr);
bm_ext->rs_failed = 0;
- lc_committed(mdev->resync);
+ lc_committed(device->resync);
wakeup = 1;
}
if (bm_ext->lce.refcnt == 1)
- mdev->resync_locked++;
+ device->resync_locked++;
set_bit(BME_NO_WRITES, &bm_ext->flags);
}
- rs_flags = mdev->resync->flags;
- spin_unlock_irq(&mdev->al_lock);
+ rs_flags = device->resync->flags;
+ spin_unlock_irq(&device->al_lock);
if (wakeup)
- wake_up(&mdev->al_wait);
+ wake_up(&device->al_wait);
if (!bm_ext) {
if (rs_flags & LC_STARVING)
- dev_warn(DEV, "Have to wait for element"
+ drbd_warn(device, "Have to wait for element"
" (resync LRU too small?)\n");
BUG_ON(rs_flags & LC_LOCKED);
}
@@ -995,25 +1000,25 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
return bm_ext;
}
-static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
+static int _is_in_al(struct drbd_device *device, unsigned int enr)
{
int rv;
- spin_lock_irq(&mdev->al_lock);
- rv = lc_is_used(mdev->act_log, enr);
- spin_unlock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
+ rv = lc_is_used(device->act_log, enr);
+ spin_unlock_irq(&device->al_lock);
return rv;
}
/**
* drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @sector: The sector number.
*
* This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
*/
-int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
+int drbd_rs_begin_io(struct drbd_device *device, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
struct bm_extent *bm_ext;
@@ -1022,8 +1027,8 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
200 times -> 20 seconds. */
retry:
- sig = wait_event_interruptible(mdev->al_wait,
- (bm_ext = _bme_get(mdev, enr)));
+ sig = wait_event_interruptible(device->al_wait,
+ (bm_ext = _bme_get(device, enr)));
if (sig)
return -EINTR;
@@ -1031,24 +1036,24 @@ retry:
return 0;
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
- sig = wait_event_interruptible(mdev->al_wait,
- !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) ||
+ sig = wait_event_interruptible(device->al_wait,
+ !_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) ||
test_bit(BME_PRIORITY, &bm_ext->flags));
if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) {
- spin_lock_irq(&mdev->al_lock);
- if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
+ spin_lock_irq(&device->al_lock);
+ if (lc_put(device->resync, &bm_ext->lce) == 0) {
bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
- mdev->resync_locked--;
- wake_up(&mdev->al_wait);
+ device->resync_locked--;
+ wake_up(&device->al_wait);
}
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
if (sig)
return -EINTR;
if (schedule_timeout_interruptible(HZ/10))
return -EINTR;
if (sa && --sa == 0)
- dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec."
+ drbd_warn(device, "drbd_rs_begin_io() stepped aside for 20sec."
"Resync stalled?\n");
goto retry;
}
@@ -1059,14 +1064,14 @@ retry:
/**
* drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @sector: The sector number.
*
* Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
* tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
* if there is still application IO going on in this area.
*/
-int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
+int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
@@ -1074,8 +1079,8 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
struct bm_extent *bm_ext;
int i;
- spin_lock_irq(&mdev->al_lock);
- if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) {
+ spin_lock_irq(&device->al_lock);
+ if (device->resync_wenr != LC_FREE && device->resync_wenr != enr) {
/* in case you have very heavy scattered io, it may
* stall the syncer undefined if we give up the ref count
* when we try again and requeue.
@@ -1089,193 +1094,193 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
* the lc_put here...
* we also have to wake_up
*/
- e = lc_find(mdev->resync, mdev->resync_wenr);
+ e = lc_find(device->resync, device->resync_wenr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
- D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
- D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
+ D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
+ D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
clear_bit(BME_NO_WRITES, &bm_ext->flags);
- mdev->resync_wenr = LC_FREE;
- if (lc_put(mdev->resync, &bm_ext->lce) == 0)
- mdev->resync_locked--;
- wake_up(&mdev->al_wait);
+ device->resync_wenr = LC_FREE;
+ if (lc_put(device->resync, &bm_ext->lce) == 0)
+ device->resync_locked--;
+ wake_up(&device->al_wait);
} else {
- dev_alert(DEV, "LOGIC BUG\n");
+ drbd_alert(device, "LOGIC BUG\n");
}
}
/* TRY. */
- e = lc_try_get(mdev->resync, enr);
+ e = lc_try_get(device->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
if (test_bit(BME_LOCKED, &bm_ext->flags))
goto proceed;
if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
- mdev->resync_locked++;
+ device->resync_locked++;
} else {
/* we did set the BME_NO_WRITES,
* but then could not set BME_LOCKED,
* so we tried again.
* drop the extra reference. */
bm_ext->lce.refcnt--;
- D_ASSERT(bm_ext->lce.refcnt > 0);
+ D_ASSERT(device, bm_ext->lce.refcnt > 0);
}
goto check_al;
} else {
/* do we rather want to try later? */
- if (mdev->resync_locked > mdev->resync->nr_elements-3)
+ if (device->resync_locked > device->resync->nr_elements-3)
goto try_again;
/* Do or do not. There is no try. -- Yoda */
- e = lc_get(mdev->resync, enr);
+ e = lc_get(device->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (!bm_ext) {
- const unsigned long rs_flags = mdev->resync->flags;
+ const unsigned long rs_flags = device->resync->flags;
if (rs_flags & LC_STARVING)
- dev_warn(DEV, "Have to wait for element"
+ drbd_warn(device, "Have to wait for element"
" (resync LRU too small?)\n");
BUG_ON(rs_flags & LC_LOCKED);
goto try_again;
}
if (bm_ext->lce.lc_number != enr) {
- bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
+ bm_ext->rs_left = drbd_bm_e_weight(device, enr);
bm_ext->rs_failed = 0;
- lc_committed(mdev->resync);
- wake_up(&mdev->al_wait);
- D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
+ lc_committed(device->resync);
+ wake_up(&device->al_wait);
+ D_ASSERT(device, test_bit(BME_LOCKED, &bm_ext->flags) == 0);
}
set_bit(BME_NO_WRITES, &bm_ext->flags);
- D_ASSERT(bm_ext->lce.refcnt == 1);
- mdev->resync_locked++;
+ D_ASSERT(device, bm_ext->lce.refcnt == 1);
+ device->resync_locked++;
goto check_al;
}
check_al:
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
- if (lc_is_used(mdev->act_log, al_enr+i))
+ if (lc_is_used(device->act_log, al_enr+i))
goto try_again;
}
set_bit(BME_LOCKED, &bm_ext->flags);
proceed:
- mdev->resync_wenr = LC_FREE;
- spin_unlock_irq(&mdev->al_lock);
+ device->resync_wenr = LC_FREE;
+ spin_unlock_irq(&device->al_lock);
return 0;
try_again:
if (bm_ext)
- mdev->resync_wenr = enr;
- spin_unlock_irq(&mdev->al_lock);
+ device->resync_wenr = enr;
+ spin_unlock_irq(&device->al_lock);
return -EAGAIN;
}
-void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
+void drbd_rs_complete_io(struct drbd_device *device, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
struct lc_element *e;
struct bm_extent *bm_ext;
unsigned long flags;
- spin_lock_irqsave(&mdev->al_lock, flags);
- e = lc_find(mdev->resync, enr);
+ spin_lock_irqsave(&device->al_lock, flags);
+ e = lc_find(device->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (!bm_ext) {
- spin_unlock_irqrestore(&mdev->al_lock, flags);
+ spin_unlock_irqrestore(&device->al_lock, flags);
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n");
+ drbd_err(device, "drbd_rs_complete_io() called, but extent not found\n");
return;
}
if (bm_ext->lce.refcnt == 0) {
- spin_unlock_irqrestore(&mdev->al_lock, flags);
- dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, "
+ spin_unlock_irqrestore(&device->al_lock, flags);
+ drbd_err(device, "drbd_rs_complete_io(,%llu [=%u]) called, "
"but refcnt is 0!?\n",
(unsigned long long)sector, enr);
return;
}
- if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
+ if (lc_put(device->resync, &bm_ext->lce) == 0) {
bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
- mdev->resync_locked--;
- wake_up(&mdev->al_wait);
+ device->resync_locked--;
+ wake_up(&device->al_wait);
}
- spin_unlock_irqrestore(&mdev->al_lock, flags);
+ spin_unlock_irqrestore(&device->al_lock, flags);
}
/**
* drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
- * @mdev: DRBD device.
+ * @device: DRBD device.
*/
-void drbd_rs_cancel_all(struct drbd_conf *mdev)
+void drbd_rs_cancel_all(struct drbd_device *device)
{
- spin_lock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
- if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
- lc_reset(mdev->resync);
- put_ldev(mdev);
+ if (get_ldev_if_state(device, D_FAILED)) { /* Makes sure ->resync is there. */
+ lc_reset(device->resync);
+ put_ldev(device);
}
- mdev->resync_locked = 0;
- mdev->resync_wenr = LC_FREE;
- spin_unlock_irq(&mdev->al_lock);
- wake_up(&mdev->al_wait);
+ device->resync_locked = 0;
+ device->resync_wenr = LC_FREE;
+ spin_unlock_irq(&device->al_lock);
+ wake_up(&device->al_wait);
}
/**
* drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Returns 0 upon success, -EAGAIN if at least one reference count was
* not zero.
*/
-int drbd_rs_del_all(struct drbd_conf *mdev)
+int drbd_rs_del_all(struct drbd_device *device)
{
struct lc_element *e;
struct bm_extent *bm_ext;
int i;
- spin_lock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
- if (get_ldev_if_state(mdev, D_FAILED)) {
+ if (get_ldev_if_state(device, D_FAILED)) {
/* ok, ->resync is there. */
- for (i = 0; i < mdev->resync->nr_elements; i++) {
- e = lc_element_by_index(mdev->resync, i);
+ for (i = 0; i < device->resync->nr_elements; i++) {
+ e = lc_element_by_index(device->resync, i);
bm_ext = lc_entry(e, struct bm_extent, lce);
if (bm_ext->lce.lc_number == LC_FREE)
continue;
- if (bm_ext->lce.lc_number == mdev->resync_wenr) {
- dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently"
+ if (bm_ext->lce.lc_number == device->resync_wenr) {
+ drbd_info(device, "dropping %u in drbd_rs_del_all, apparently"
" got 'synced' by application io\n",
- mdev->resync_wenr);
- D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
- D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
+ device->resync_wenr);
+ D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
+ D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
clear_bit(BME_NO_WRITES, &bm_ext->flags);
- mdev->resync_wenr = LC_FREE;
- lc_put(mdev->resync, &bm_ext->lce);
+ device->resync_wenr = LC_FREE;
+ lc_put(device->resync, &bm_ext->lce);
}
if (bm_ext->lce.refcnt != 0) {
- dev_info(DEV, "Retrying drbd_rs_del_all() later. "
+ drbd_info(device, "Retrying drbd_rs_del_all() later. "
"refcnt=%d\n", bm_ext->lce.refcnt);
- put_ldev(mdev);
- spin_unlock_irq(&mdev->al_lock);
+ put_ldev(device);
+ spin_unlock_irq(&device->al_lock);
return -EAGAIN;
}
- D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
- D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags));
- lc_del(mdev->resync, &bm_ext->lce);
+ D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
+ D_ASSERT(device, !test_bit(BME_NO_WRITES, &bm_ext->flags));
+ lc_del(device->resync, &bm_ext->lce);
}
- D_ASSERT(mdev->resync->used == 0);
- put_ldev(mdev);
+ D_ASSERT(device, device->resync->used == 0);
+ put_ldev(device);
}
- spin_unlock_irq(&mdev->al_lock);
- wake_up(&mdev->al_wait);
+ spin_unlock_irq(&device->al_lock);
+ wake_up(&device->al_wait);
return 0;
}
/**
* drbd_rs_failed_io() - Record information on a failure to resync the specified blocks
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @sector: The sector number.
* @size: Size of failed IO operation, in byte.
*/
-void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
+void drbd_rs_failed_io(struct drbd_device *device, sector_t sector, int size)
{
/* Is called from worker and receiver context _only_ */
unsigned long sbnr, ebnr, lbnr;
@@ -1284,11 +1289,11 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
int wake_up = 0;
if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
- dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
+ drbd_err(device, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
}
- nr_sectors = drbd_get_capacity(mdev->this_bdev);
+ nr_sectors = drbd_get_capacity(device->this_bdev);
esector = sector + (size >> 9) - 1;
if (!expect(sector < nr_sectors))
@@ -1316,21 +1321,21 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors.
*/
- spin_lock_irq(&mdev->al_lock);
- count = drbd_bm_count_bits(mdev, sbnr, ebnr);
+ spin_lock_irq(&device->al_lock);
+ count = drbd_bm_count_bits(device, sbnr, ebnr);
if (count) {
- mdev->rs_failed += count;
+ device->rs_failed += count;
- if (get_ldev(mdev)) {
- drbd_try_clear_on_disk_bm(mdev, sector, count, false);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ drbd_try_clear_on_disk_bm(device, sector, count, false);
+ put_ldev(device);
}
/* just wake_up unconditional now, various lc_chaged(),
* lc_put() in drbd_try_clear_on_disk_bm(). */
wake_up = 1;
}
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
if (wake_up)
- wake_up(&mdev->al_wait);
+ wake_up(&device->al_wait);
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 597f111df67b..1aa29f8fdfe1 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -113,54 +113,54 @@ struct drbd_bitmap {
};
#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
-static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
+static void __bm_print_lock_info(struct drbd_device *device, const char *func)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
if (!__ratelimit(&drbd_ratelimit_state))
return;
- dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
- drbd_task_to_thread_name(mdev->tconn, current),
- func, b->bm_why ?: "?",
- drbd_task_to_thread_name(mdev->tconn, b->bm_task));
+ drbd_err(device, "FIXME %s[%d] in %s, bitmap locked for '%s' by %s[%d]\n",
+ current->comm, task_pid_nr(current),
+ func, b->bm_why ?: "?",
+ b->bm_task->comm, task_pid_nr(b->bm_task));
}
-void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
+void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
int trylock_failed;
if (!b) {
- dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
+ drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n");
return;
}
trylock_failed = !mutex_trylock(&b->bm_change);
if (trylock_failed) {
- dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
- drbd_task_to_thread_name(mdev->tconn, current),
- why, b->bm_why ?: "?",
- drbd_task_to_thread_name(mdev->tconn, b->bm_task));
+ drbd_warn(device, "%s[%d] going to '%s' but bitmap already locked for '%s' by %s[%d]\n",
+ current->comm, task_pid_nr(current),
+ why, b->bm_why ?: "?",
+ b->bm_task->comm, task_pid_nr(b->bm_task));
mutex_lock(&b->bm_change);
}
if (BM_LOCKED_MASK & b->bm_flags)
- dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
+ drbd_err(device, "FIXME bitmap already locked in bm_lock\n");
b->bm_flags |= flags & BM_LOCKED_MASK;
b->bm_why = why;
b->bm_task = current;
}
-void drbd_bm_unlock(struct drbd_conf *mdev)
+void drbd_bm_unlock(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
if (!b) {
- dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
+ drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n");
return;
}
- if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
- dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
+ if (!(BM_LOCKED_MASK & device->bitmap->bm_flags))
+ drbd_err(device, "FIXME bitmap not locked in bm_unlock\n");
b->bm_flags &= ~BM_LOCKED_MASK;
b->bm_why = NULL;
@@ -211,19 +211,19 @@ static unsigned long bm_page_to_idx(struct page *page)
/* As is very unlikely that the same page is under IO from more than one
* context, we can get away with a bit per page and one wait queue per bitmap.
*/
-static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
+static void bm_page_lock_io(struct drbd_device *device, int page_nr)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
}
-static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
+static void bm_page_unlock_io(struct drbd_device *device, int page_nr)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
- wake_up(&mdev->bitmap->bm_io_wait);
+ wake_up(&device->bitmap->bm_io_wait);
}
/* set _before_ submit_io, so it may be reset due to being changed
@@ -242,22 +242,22 @@ static void bm_set_page_need_writeout(struct page *page)
/**
* drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @page_nr: the bitmap page to mark with the "hint" flag
*
* From within an activity log transaction, we mark a few pages with these
* hints, then call drbd_bm_write_hinted(), which will only write out changed
* pages which are flagged with this mark.
*/
-void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr)
+void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
{
struct page *page;
- if (page_nr >= mdev->bitmap->bm_number_of_pages) {
- dev_warn(DEV, "BAD: page_nr: %u, number_of_pages: %u\n",
- page_nr, (int)mdev->bitmap->bm_number_of_pages);
+ if (page_nr >= device->bitmap->bm_number_of_pages) {
+ drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n",
+ page_nr, (int)device->bitmap->bm_number_of_pages);
return;
}
- page = mdev->bitmap->bm_pages[page_nr];
+ page = device->bitmap->bm_pages[page_nr];
set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
}
@@ -340,7 +340,7 @@ static void bm_unmap(unsigned long *p_addr)
/*
* actually most functions herein should take a struct drbd_bitmap*, not a
- * struct drbd_conf*, but for the debug macros I like to have the mdev around
+ * struct drbd_device*, but for the debug macros I like to have the device around
* to be able to report device specific.
*/
@@ -436,11 +436,11 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
/*
* called on driver init only. TODO call when a device is created.
- * allocates the drbd_bitmap, and stores it in mdev->bitmap.
+ * allocates the drbd_bitmap, and stores it in device->bitmap.
*/
-int drbd_bm_init(struct drbd_conf *mdev)
+int drbd_bm_init(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
WARN_ON(b != NULL);
b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
if (!b)
@@ -449,28 +449,28 @@ int drbd_bm_init(struct drbd_conf *mdev)
mutex_init(&b->bm_change);
init_waitqueue_head(&b->bm_io_wait);
- mdev->bitmap = b;
+ device->bitmap = b;
return 0;
}
-sector_t drbd_bm_capacity(struct drbd_conf *mdev)
+sector_t drbd_bm_capacity(struct drbd_device *device)
{
- if (!expect(mdev->bitmap))
+ if (!expect(device->bitmap))
return 0;
- return mdev->bitmap->bm_dev_capacity;
+ return device->bitmap->bm_dev_capacity;
}
/* called on driver unload. TODO: call when a device is destroyed.
*/
-void drbd_bm_cleanup(struct drbd_conf *mdev)
+void drbd_bm_cleanup(struct drbd_device *device)
{
- if (!expect(mdev->bitmap))
+ if (!expect(device->bitmap))
return;
- bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
- bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
- kfree(mdev->bitmap);
- mdev->bitmap = NULL;
+ bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
+ bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags));
+ kfree(device->bitmap);
+ device->bitmap = NULL;
}
/*
@@ -631,9 +631,9 @@ static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
* In case this is actually a resize, we copy the old bitmap into the new one.
* Otherwise, the bitmap is initialized to all bits set.
*/
-int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
+int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long bits, words, owords, obits;
unsigned long want, have, onpages; /* number of pages */
struct page **npages, **opages = NULL;
@@ -643,9 +643,9 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
if (!expect(b))
return -ENOMEM;
- drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
+ drbd_bm_lock(device, "resize", BM_LOCKED_MASK);
- dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
+ drbd_info(device, "drbd_bm_resize called with capacity == %llu\n",
(unsigned long long)capacity);
if (capacity == b->bm_dev_capacity)
@@ -678,12 +678,12 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
*/
words = ALIGN(bits, 64) >> LN2_BPL;
- if (get_ldev(mdev)) {
- u64 bits_on_disk = drbd_md_on_disk_bits(mdev->ldev);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev);
+ put_ldev(device);
if (bits > bits_on_disk) {
- dev_info(DEV, "bits = %lu\n", bits);
- dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
+ drbd_info(device, "bits = %lu\n", bits);
+ drbd_info(device, "bits_on_disk = %llu\n", bits_on_disk);
err = -ENOSPC;
goto out;
}
@@ -692,10 +692,10 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
have = b->bm_number_of_pages;
if (want == have) {
- D_ASSERT(b->bm_pages != NULL);
+ D_ASSERT(device, b->bm_pages != NULL);
npages = b->bm_pages;
} else {
- if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
+ if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
npages = NULL;
else
npages = bm_realloc_pages(b, want);
@@ -742,10 +742,10 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
bm_vk_free(opages, opages_vmalloced);
if (!growing)
b->bm_set = bm_count_bits(b);
- dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
+ drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
out:
- drbd_bm_unlock(mdev);
+ drbd_bm_unlock(device);
return err;
}
@@ -757,9 +757,9 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
*
* maybe bm_set should be atomic_t ?
*/
-unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
+unsigned long _drbd_bm_total_weight(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long s;
unsigned long flags;
@@ -775,20 +775,20 @@ unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
return s;
}
-unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
+unsigned long drbd_bm_total_weight(struct drbd_device *device)
{
unsigned long s;
/* if I don't have a disk, I don't know about out-of-sync status */
- if (!get_ldev_if_state(mdev, D_NEGOTIATING))
+ if (!get_ldev_if_state(device, D_NEGOTIATING))
return 0;
- s = _drbd_bm_total_weight(mdev);
- put_ldev(mdev);
+ s = _drbd_bm_total_weight(device);
+ put_ldev(device);
return s;
}
-size_t drbd_bm_words(struct drbd_conf *mdev)
+size_t drbd_bm_words(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
if (!expect(b))
return 0;
if (!expect(b->bm_pages))
@@ -797,9 +797,9 @@ size_t drbd_bm_words(struct drbd_conf *mdev)
return b->bm_words;
}
-unsigned long drbd_bm_bits(struct drbd_conf *mdev)
+unsigned long drbd_bm_bits(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
if (!expect(b))
return 0;
@@ -811,10 +811,10 @@ unsigned long drbd_bm_bits(struct drbd_conf *mdev)
* bitmap must be locked by drbd_bm_lock.
* currently only used from receive_bitmap.
*/
-void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
+void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number,
unsigned long *buffer)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr, *bm;
unsigned long word, bits;
unsigned int idx;
@@ -860,10 +860,10 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
/* copy number words from the bitmap starting at offset into the buffer.
* buffer[i] will be little endian unsigned long.
*/
-void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
+void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number,
unsigned long *buffer)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr, *bm;
size_t end, do_now;
@@ -878,7 +878,7 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
if ((offset >= b->bm_words) ||
(end > b->bm_words) ||
(number <= 0))
- dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
+ drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n",
(unsigned long) offset,
(unsigned long) number,
(unsigned long) b->bm_words);
@@ -897,9 +897,9 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
}
/* set all bits in the bitmap */
-void drbd_bm_set_all(struct drbd_conf *mdev)
+void drbd_bm_set_all(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
if (!expect(b))
return;
if (!expect(b->bm_pages))
@@ -913,9 +913,9 @@ void drbd_bm_set_all(struct drbd_conf *mdev)
}
/* clear all bits in the bitmap */
-void drbd_bm_clear_all(struct drbd_conf *mdev)
+void drbd_bm_clear_all(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
if (!expect(b))
return;
if (!expect(b->bm_pages))
@@ -928,7 +928,7 @@ void drbd_bm_clear_all(struct drbd_conf *mdev)
}
struct bm_aio_ctx {
- struct drbd_conf *mdev;
+ struct drbd_device *device;
atomic_t in_flight;
unsigned int done;
unsigned flags;
@@ -943,7 +943,7 @@ static void bm_aio_ctx_destroy(struct kref *kref)
{
struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
- put_ldev(ctx->mdev);
+ put_ldev(ctx->device);
kfree(ctx);
}
@@ -951,8 +951,8 @@ static void bm_aio_ctx_destroy(struct kref *kref)
static void bm_async_io_complete(struct bio *bio, int error)
{
struct bm_aio_ctx *ctx = bio->bi_private;
- struct drbd_conf *mdev = ctx->mdev;
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_device *device = ctx->device;
+ struct drbd_bitmap *b = device->bitmap;
unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
int uptodate = bio_flagged(bio, BIO_UPTODATE);
@@ -966,7 +966,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
!bm_test_page_unchanged(b->bm_pages[idx]))
- dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx);
+ drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
if (error) {
/* ctx error will hold the completed-last non-zero error code,
@@ -976,14 +976,14 @@ static void bm_async_io_complete(struct bio *bio, int error)
/* Not identical to on disk version of it.
* Is BM_PAGE_IO_ERROR enough? */
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
+ drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
error, idx);
} else {
bm_clear_page_io_err(b->bm_pages[idx]);
- dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
+ dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
}
- bm_page_unlock_io(mdev, idx);
+ bm_page_unlock_io(device, idx);
if (ctx->flags & BM_AIO_COPY_PAGES)
mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
@@ -992,7 +992,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
if (atomic_dec_and_test(&ctx->in_flight)) {
ctx->done = 1;
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
}
}
@@ -1000,23 +1000,23 @@ static void bm_async_io_complete(struct bio *bio, int error)
static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
{
struct bio *bio = bio_alloc_drbd(GFP_NOIO);
- struct drbd_conf *mdev = ctx->mdev;
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_device *device = ctx->device;
+ struct drbd_bitmap *b = device->bitmap;
struct page *page;
unsigned int len;
sector_t on_disk_sector =
- mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
+ device->ldev->md.md_offset + device->ldev->md.bm_offset;
on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
/* this might happen with very small
* flexible external meta data device,
* or with PAGE_SIZE > 4k */
len = min_t(unsigned int, PAGE_SIZE,
- (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
+ (drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9);
/* serialize IO on this page */
- bm_page_lock_io(mdev, page_nr);
+ bm_page_lock_io(device, page_nr);
/* before memcpy and submit,
* so it can be redirtied any time */
bm_set_page_unchanged(b->bm_pages[page_nr]);
@@ -1027,7 +1027,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
- bio->bi_bdev = mdev->ldev->md_bdev;
+ bio->bi_bdev = device->ldev->md_bdev;
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
@@ -1035,24 +1035,24 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bio->bi_private = ctx;
bio->bi_end_io = bm_async_io_complete;
- if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
+ if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio->bi_rw |= rw;
bio_endio(bio, -EIO);
} else {
submit_bio(rw, bio);
/* this should not count as user activity and cause the
* resync to throttle -- see drbd_rs_should_slow_down(). */
- atomic_add(len >> 9, &mdev->rs_sect_ev);
+ atomic_add(len >> 9, &device->rs_sect_ev);
}
}
/*
* bm_rw: read/write the whole bitmap from/to its on disk location.
*/
-static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
+static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
{
struct bm_aio_ctx *ctx;
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
int num_pages, i, count = 0;
unsigned long now;
char ppb[10];
@@ -1072,7 +1072,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
return -ENOMEM;
*ctx = (struct bm_aio_ctx) {
- .mdev = mdev,
+ .device = device,
.in_flight = ATOMIC_INIT(1),
.done = 0,
.flags = flags,
@@ -1080,8 +1080,8 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
.kref = { ATOMIC_INIT(2) },
};
- if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
- dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
+ if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
+ drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
kfree(ctx);
return -ENODEV;
}
@@ -1106,14 +1106,14 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
if (!(flags & BM_WRITE_ALL_PAGES) &&
bm_test_page_unchanged(b->bm_pages[i])) {
- dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
+ dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i);
continue;
}
/* during lazy writeout,
* ignore those pages not marked for lazy writeout. */
if (lazy_writeout_upper_idx &&
!bm_test_page_lazy_writeout(b->bm_pages[i])) {
- dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
+ dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i);
continue;
}
}
@@ -1132,19 +1132,19 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
* "in_flight reached zero, all done" event.
*/
if (!atomic_dec_and_test(&ctx->in_flight))
- wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
+ wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
else
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
/* summary for global bitmap IO */
if (flags == 0)
- dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
+ drbd_info(device, "bitmap %s of %u pages took %lu jiffies\n",
rw == WRITE ? "WRITE" : "READ",
count, jiffies - now);
if (ctx->error) {
- dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n");
+ drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
err = -EIO; /* ctx->error ? */
}
@@ -1153,16 +1153,16 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
now = jiffies;
if (rw == WRITE) {
- drbd_md_flush(mdev);
+ drbd_md_flush(device);
} else /* rw == READ */ {
b->bm_set = bm_count_bits(b);
- dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
+ drbd_info(device, "recounting of set bits took additional %lu jiffies\n",
jiffies - now);
}
now = b->bm_set;
if (flags == 0)
- dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
+ drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
@@ -1171,48 +1171,38 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
/**
* drbd_bm_read() - Read the whole bitmap from its on disk location.
- * @mdev: DRBD device.
+ * @device: DRBD device.
*/
-int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
+int drbd_bm_read(struct drbd_device *device) __must_hold(local)
{
- return bm_rw(mdev, READ, 0, 0);
+ return bm_rw(device, READ, 0, 0);
}
/**
* drbd_bm_write() - Write the whole bitmap to its on disk location.
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Will only write pages that have changed since last IO.
*/
-int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
+int drbd_bm_write(struct drbd_device *device) __must_hold(local)
{
- return bm_rw(mdev, WRITE, 0, 0);
+ return bm_rw(device, WRITE, 0, 0);
}
/**
* drbd_bm_write_all() - Write the whole bitmap to its on disk location.
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Will write all pages.
*/
-int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local)
+int drbd_bm_write_all(struct drbd_device *device) __must_hold(local)
{
- return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0);
-}
-
-/**
- * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
- * @mdev: DRBD device.
- * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages
- */
-int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
-{
- return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx);
+ return bm_rw(device, WRITE, BM_WRITE_ALL_PAGES, 0);
}
/**
* drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Will only write pages that have changed since last IO.
* In contrast to drbd_bm_write(), this will copy the bitmap pages
@@ -1221,23 +1211,23 @@ int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(l
* verify is aborted due to a failed peer disk, while local IO continues, or
* pending resync acks are still being processed.
*/
-int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
+int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local)
{
- return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
+ return bm_rw(device, WRITE, BM_AIO_COPY_PAGES, 0);
}
/**
* drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
- * @mdev: DRBD device.
+ * @device: DRBD device.
*/
-int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local)
+int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local)
{
- return bm_rw(mdev, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
+ return bm_rw(device, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
}
/**
* drbd_bm_write_page() - Writes a PAGE_SIZE aligned piece of bitmap
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @idx: bitmap page index
*
* We don't want to special case on logical_block_size of the backend device,
@@ -1247,13 +1237,13 @@ int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local)
* In case this becomes an issue on systems with larger PAGE_SIZE,
* we may want to change this again to write 4k aligned 4k pieces.
*/
-int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
+int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold(local)
{
struct bm_aio_ctx *ctx;
int err;
- if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
- dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
+ if (bm_test_page_unchanged(device->bitmap->bm_pages[idx])) {
+ dynamic_drbd_dbg(device, "skipped bm page write for idx %u\n", idx);
return 0;
}
@@ -1262,7 +1252,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
return -ENOMEM;
*ctx = (struct bm_aio_ctx) {
- .mdev = mdev,
+ .device = device,
.in_flight = ATOMIC_INIT(1),
.done = 0,
.flags = BM_AIO_COPY_PAGES,
@@ -1270,21 +1260,21 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
.kref = { ATOMIC_INIT(2) },
};
- if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
- dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
+ if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
+ drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
kfree(ctx);
return -ENODEV;
}
bm_page_io_async(ctx, idx, WRITE_SYNC);
- wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
+ wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
if (ctx->error)
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
/* that causes us to detach, so the in memory bitmap will be
* gone in a moment as well. */
- mdev->bm_writ_cnt++;
+ device->bm_writ_cnt++;
err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
return err;
@@ -1298,17 +1288,17 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
*
* this returns a bit number, NOT a sector!
*/
-static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
+static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo,
const int find_zero_bit)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr;
unsigned long bit_offset;
unsigned i;
if (bm_fo > b->bm_bits) {
- dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
+ drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
bm_fo = DRBD_END_OF_BITMAP;
} else {
while (bm_fo < b->bm_bits) {
@@ -1338,10 +1328,10 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
return bm_fo;
}
-static unsigned long bm_find_next(struct drbd_conf *mdev,
+static unsigned long bm_find_next(struct drbd_device *device,
unsigned long bm_fo, const int find_zero_bit)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long i = DRBD_END_OF_BITMAP;
if (!expect(b))
@@ -1351,39 +1341,39 @@ static unsigned long bm_find_next(struct drbd_conf *mdev,
spin_lock_irq(&b->bm_lock);
if (BM_DONT_TEST & b->bm_flags)
- bm_print_lock_info(mdev);
+ bm_print_lock_info(device);
- i = __bm_find_next(mdev, bm_fo, find_zero_bit);
+ i = __bm_find_next(device, bm_fo, find_zero_bit);
spin_unlock_irq(&b->bm_lock);
return i;
}
-unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
+unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
{
- return bm_find_next(mdev, bm_fo, 0);
+ return bm_find_next(device, bm_fo, 0);
}
#if 0
/* not yet needed for anything. */
-unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
+unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
{
- return bm_find_next(mdev, bm_fo, 1);
+ return bm_find_next(device, bm_fo, 1);
}
#endif
/* does not spin_lock_irqsave.
* you must take drbd_bm_lock() first */
-unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
+unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
{
- /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
- return __bm_find_next(mdev, bm_fo, 0);
+ /* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
+ return __bm_find_next(device, bm_fo, 0);
}
-unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
+unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
{
- /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
- return __bm_find_next(mdev, bm_fo, 1);
+ /* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
+ return __bm_find_next(device, bm_fo, 1);
}
/* returns number of bits actually changed.
@@ -1392,10 +1382,10 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
* wants bitnr, not sector.
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */
-static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s,
unsigned long e, int val)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr = NULL;
unsigned long bitnr;
unsigned int last_page_nr = -1U;
@@ -1403,7 +1393,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
int changed_total = 0;
if (e >= b->bm_bits) {
- dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
+ drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
s, e, b->bm_bits);
e = b->bm_bits ? b->bm_bits -1 : 0;
}
@@ -1441,11 +1431,11 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
* for val != 0, we change 0 -> 1, return code positive
* for val == 0, we change 1 -> 0, return code negative
* wants bitnr, not sector */
-static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+static int bm_change_bits_to(struct drbd_device *device, const unsigned long s,
const unsigned long e, int val)
{
unsigned long flags;
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
int c = 0;
if (!expect(b))
@@ -1455,24 +1445,24 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
spin_lock_irqsave(&b->bm_lock, flags);
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
- bm_print_lock_info(mdev);
+ bm_print_lock_info(device);
- c = __bm_change_bits_to(mdev, s, e, val);
+ c = __bm_change_bits_to(device, s, e, val);
spin_unlock_irqrestore(&b->bm_lock, flags);
return c;
}
/* returns number of bits changed 0 -> 1 */
-int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
- return bm_change_bits_to(mdev, s, e, 1);
+ return bm_change_bits_to(device, s, e, 1);
}
/* returns number of bits changed 1 -> 0 */
-int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
- return -bm_change_bits_to(mdev, s, e, 0);
+ return -bm_change_bits_to(device, s, e, 0);
}
/* sets all bits in full words,
@@ -1504,7 +1494,7 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
* You must first drbd_bm_lock().
* Can be called to set the whole bitmap in one go.
* Sets bits from s to e _inclusive_. */
-void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
/* First set_bit from the first bit (s)
* up to the next long boundary (sl),
@@ -1514,7 +1504,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* Do not use memset, because we must account for changes,
* so we need to loop over the words with hweight() anyways.
*/
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long sl = ALIGN(s,BITS_PER_LONG);
unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
int first_page;
@@ -1526,7 +1516,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
if (e - s <= 3*BITS_PER_LONG) {
/* don't bother; el and sl may even be wrong. */
spin_lock_irq(&b->bm_lock);
- __bm_change_bits_to(mdev, s, e, 1);
+ __bm_change_bits_to(device, s, e, 1);
spin_unlock_irq(&b->bm_lock);
return;
}
@@ -1537,7 +1527,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
/* bits filling the current long */
if (sl)
- __bm_change_bits_to(mdev, s, sl-1, 1);
+ __bm_change_bits_to(device, s, sl-1, 1);
first_page = sl >> (3 + PAGE_SHIFT);
last_page = el >> (3 + PAGE_SHIFT);
@@ -1549,7 +1539,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
/* first and full pages, unless first page == last page */
for (page_nr = first_page; page_nr < last_page; page_nr++) {
- bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
+ bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word);
spin_unlock_irq(&b->bm_lock);
cond_resched();
first_word = 0;
@@ -1565,7 +1555,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* as we did not allocate it, it is not present in bitmap->bm_pages.
*/
if (last_word)
- bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+ bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word);
/* possibly trailing bits.
* example: (e & 63) == 63, el will be e+1.
@@ -1573,7 +1563,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* it would trigger an assert in __bm_change_bits_to()
*/
if (el <= e)
- __bm_change_bits_to(mdev, el, e, 1);
+ __bm_change_bits_to(device, el, e, 1);
spin_unlock_irq(&b->bm_lock);
}
@@ -1584,10 +1574,10 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* 0 ... bit not set
* -1 ... first out of bounds access, stop testing for bits!
*/
-int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
+int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr)
{
unsigned long flags;
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr;
int i;
@@ -1598,7 +1588,7 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
- bm_print_lock_info(mdev);
+ bm_print_lock_info(device);
if (bitnr < b->bm_bits) {
p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
@@ -1606,7 +1596,7 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
} else if (bitnr == b->bm_bits) {
i = -1;
} else { /* (bitnr > b->bm_bits) */
- dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
+ drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
i = 0;
}
@@ -1615,10 +1605,10 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
}
/* returns number of bits set in the range [s, e] */
-int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
unsigned long flags;
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr = NULL;
unsigned long bitnr;
unsigned int page_nr = -1U;
@@ -1635,7 +1625,7 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
- bm_print_lock_info(mdev);
+ bm_print_lock_info(device);
for (bitnr = s; bitnr <= e; bitnr++) {
unsigned int idx = bm_bit_to_page_idx(b, bitnr);
if (page_nr != idx) {
@@ -1647,7 +1637,7 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
if (expect(bitnr < b->bm_bits))
c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
else
- dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
+ drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
}
if (p_addr)
bm_unmap(p_addr);
@@ -1670,9 +1660,9 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* reference count of some bitmap extent element from some lru instead...
*
*/
-int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
+int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
int count, s, e;
unsigned long flags;
unsigned long *p_addr, *bm;
@@ -1684,7 +1674,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
- bm_print_lock_info(mdev);
+ bm_print_lock_info(device);
s = S2W(enr);
e = min((size_t)S2W(enr+1), b->bm_words);
@@ -1697,7 +1687,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
count += hweight_long(*bm++);
bm_unmap(p_addr);
} else {
- dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
+ drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n", s);
}
spin_unlock_irqrestore(&b->bm_lock, flags);
return count;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 0e06f0c5dd1e..e7093d4291f1 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -45,7 +45,9 @@
#include <linux/prefetch.h>
#include <linux/drbd_genl_api.h>
#include <linux/drbd.h>
+#include "drbd_strings.h"
#include "drbd_state.h"
+#include "drbd_protocol.h"
#ifdef __CHECKER__
# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
@@ -65,6 +67,7 @@
extern unsigned int minor_count;
extern bool disable_sendpage;
extern bool allow_oos;
+void tl_abort_disk_io(struct drbd_device *device);
#ifdef CONFIG_DRBD_FAULT_INJECTION
extern int enable_faults;
@@ -95,25 +98,60 @@ extern char usermode_helper[];
#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
-struct drbd_conf;
-struct drbd_tconn;
-
-
-/* to shorten dev_warn(DEV, "msg"); and relatives statements */
-#define DEV (disk_to_dev(mdev->vdisk))
-
-#define conn_printk(LEVEL, TCONN, FMT, ARGS...) \
- printk(LEVEL "d-con %s: " FMT, TCONN->name , ## ARGS)
-#define conn_alert(TCONN, FMT, ARGS...) conn_printk(KERN_ALERT, TCONN, FMT, ## ARGS)
-#define conn_crit(TCONN, FMT, ARGS...) conn_printk(KERN_CRIT, TCONN, FMT, ## ARGS)
-#define conn_err(TCONN, FMT, ARGS...) conn_printk(KERN_ERR, TCONN, FMT, ## ARGS)
-#define conn_warn(TCONN, FMT, ARGS...) conn_printk(KERN_WARNING, TCONN, FMT, ## ARGS)
-#define conn_notice(TCONN, FMT, ARGS...) conn_printk(KERN_NOTICE, TCONN, FMT, ## ARGS)
-#define conn_info(TCONN, FMT, ARGS...) conn_printk(KERN_INFO, TCONN, FMT, ## ARGS)
-#define conn_dbg(TCONN, FMT, ARGS...) conn_printk(KERN_DEBUG, TCONN, FMT, ## ARGS)
-
-#define D_ASSERT(exp) if (!(exp)) \
- dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
+struct drbd_device;
+struct drbd_connection;
+
+#define __drbd_printk_device(level, device, fmt, args...) \
+ dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
+#define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
+ dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
+#define __drbd_printk_resource(level, resource, fmt, args...) \
+ printk(level "drbd %s: " fmt, (resource)->name, ## args)
+#define __drbd_printk_connection(level, connection, fmt, args...) \
+ printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
+
+void drbd_printk_with_wrong_object_type(void);
+
+#define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
+ (__builtin_types_compatible_p(typeof(obj), type) || \
+ __builtin_types_compatible_p(typeof(obj), const type)), \
+ func(level, (const type)(obj), fmt, ## args)
+
+#define drbd_printk(level, obj, fmt, args...) \
+ __builtin_choose_expr( \
+ __drbd_printk_if_same_type(obj, struct drbd_device *, \
+ __drbd_printk_device, level, fmt, ## args), \
+ __builtin_choose_expr( \
+ __drbd_printk_if_same_type(obj, struct drbd_resource *, \
+ __drbd_printk_resource, level, fmt, ## args), \
+ __builtin_choose_expr( \
+ __drbd_printk_if_same_type(obj, struct drbd_connection *, \
+ __drbd_printk_connection, level, fmt, ## args), \
+ __builtin_choose_expr( \
+ __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
+ __drbd_printk_peer_device, level, fmt, ## args), \
+ drbd_printk_with_wrong_object_type()))))
+
+#define drbd_dbg(obj, fmt, args...) \
+ drbd_printk(KERN_DEBUG, obj, fmt, ## args)
+#define drbd_alert(obj, fmt, args...) \
+ drbd_printk(KERN_ALERT, obj, fmt, ## args)
+#define drbd_err(obj, fmt, args...) \
+ drbd_printk(KERN_ERR, obj, fmt, ## args)
+#define drbd_warn(obj, fmt, args...) \
+ drbd_printk(KERN_WARNING, obj, fmt, ## args)
+#define drbd_info(obj, fmt, args...) \
+ drbd_printk(KERN_INFO, obj, fmt, ## args)
+#define drbd_emerg(obj, fmt, args...) \
+ drbd_printk(KERN_EMERG, obj, fmt, ## args)
+
+#define dynamic_drbd_dbg(device, fmt, args...) \
+ dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
+
+#define D_ASSERT(device, exp) do { \
+ if (!(exp)) \
+ drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
+ } while (0)
/**
* expect - Make an assertion
@@ -123,7 +161,7 @@ struct drbd_tconn;
#define expect(exp) ({ \
bool _bool = (exp); \
if (!_bool) \
- dev_err(DEV, "ASSERTION %s FAILED in %s\n", \
+ drbd_err(device, "ASSERTION %s FAILED in %s\n", \
#exp, __func__); \
_bool; \
})
@@ -145,14 +183,14 @@ enum {
};
extern unsigned int
-_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
+_drbd_insert_fault(struct drbd_device *device, unsigned int type);
static inline int
-drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
+drbd_insert_fault(struct drbd_device *device, unsigned int type) {
#ifdef CONFIG_DRBD_FAULT_INJECTION
return fault_rate &&
(enable_faults & (1<<type)) &&
- _drbd_insert_fault(mdev, type);
+ _drbd_insert_fault(device, type);
#else
return 0;
#endif
@@ -164,74 +202,8 @@ drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
#define div_floor(A, B) ((A)/(B))
extern struct ratelimit_state drbd_ratelimit_state;
-extern struct idr minors; /* RCU, updates: genl_lock() */
-extern struct list_head drbd_tconns; /* RCU, updates: genl_lock() */
-
-/* on the wire */
-enum drbd_packet {
- /* receiver (data socket) */
- P_DATA = 0x00,
- P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */
- P_RS_DATA_REPLY = 0x02, /* Response to P_RS_DATA_REQUEST */
- P_BARRIER = 0x03,
- P_BITMAP = 0x04,
- P_BECOME_SYNC_TARGET = 0x05,
- P_BECOME_SYNC_SOURCE = 0x06,
- P_UNPLUG_REMOTE = 0x07, /* Used at various times to hint the peer */
- P_DATA_REQUEST = 0x08, /* Used to ask for a data block */
- P_RS_DATA_REQUEST = 0x09, /* Used to ask for a data block for resync */
- P_SYNC_PARAM = 0x0a,
- P_PROTOCOL = 0x0b,
- P_UUIDS = 0x0c,
- P_SIZES = 0x0d,
- P_STATE = 0x0e,
- P_SYNC_UUID = 0x0f,
- P_AUTH_CHALLENGE = 0x10,
- P_AUTH_RESPONSE = 0x11,
- P_STATE_CHG_REQ = 0x12,
-
- /* asender (meta socket */
- P_PING = 0x13,
- P_PING_ACK = 0x14,
- P_RECV_ACK = 0x15, /* Used in protocol B */
- P_WRITE_ACK = 0x16, /* Used in protocol C */
- P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
- P_SUPERSEDED = 0x18, /* Used in proto C, two-primaries conflict detection */
- P_NEG_ACK = 0x19, /* Sent if local disk is unusable */
- P_NEG_DREPLY = 0x1a, /* Local disk is broken... */
- P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */
- P_BARRIER_ACK = 0x1c,
- P_STATE_CHG_REPLY = 0x1d,
-
- /* "new" commands, no longer fitting into the ordering scheme above */
-
- P_OV_REQUEST = 0x1e, /* data socket */
- P_OV_REPLY = 0x1f,
- P_OV_RESULT = 0x20, /* meta socket */
- P_CSUM_RS_REQUEST = 0x21, /* data socket */
- P_RS_IS_IN_SYNC = 0x22, /* meta socket */
- P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
- P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */
- /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */
- /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */
- P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
- P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */
- P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
- P_CONN_ST_CHG_REQ = 0x2a, /* data sock: Connection wide state request */
- P_CONN_ST_CHG_REPLY = 0x2b, /* meta sock: Connection side state req reply */
- P_RETRY_WRITE = 0x2c, /* Protocol C: retry conflicting write request */
- P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */
-
- P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
- P_MAX_OPT_CMD = 0x101,
-
- /* special command ids for handshake */
-
- P_INITIAL_META = 0xfff1, /* First Packet on the MetaSock */
- P_INITIAL_DATA = 0xfff2, /* First Packet on the Socket */
-
- P_CONNECTION_FEATURES = 0xfffe /* FIXED for the next century! */
-};
+extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
+extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
extern const char *cmdname(enum drbd_packet cmd);
@@ -253,7 +225,7 @@ struct bm_xfer_ctx {
unsigned bytes[2];
};
-extern void INFO_bm_xfer_stats(struct drbd_conf *mdev,
+extern void INFO_bm_xfer_stats(struct drbd_device *device,
const char *direction, struct bm_xfer_ctx *c);
static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
@@ -275,233 +247,7 @@ static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
#endif
}
-#ifndef __packed
-#define __packed __attribute__((packed))
-#endif
-
-/* This is the layout for a packet on the wire.
- * The byteorder is the network byte order.
- * (except block_id and barrier fields.
- * these are pointers to local structs
- * and have no relevance for the partner,
- * which just echoes them as received.)
- *
- * NOTE that the payload starts at a long aligned offset,
- * regardless of 32 or 64 bit arch!
- */
-struct p_header80 {
- u32 magic;
- u16 command;
- u16 length; /* bytes of data after this header */
-} __packed;
-
-/* Header for big packets, Used for data packets exceeding 64kB */
-struct p_header95 {
- u16 magic; /* use DRBD_MAGIC_BIG here */
- u16 command;
- u32 length;
-} __packed;
-
-struct p_header100 {
- u32 magic;
- u16 volume;
- u16 command;
- u32 length;
- u32 pad;
-} __packed;
-
-extern unsigned int drbd_header_size(struct drbd_tconn *tconn);
-
-/* these defines must not be changed without changing the protocol version */
-#define DP_HARDBARRIER 1 /* depricated */
-#define DP_RW_SYNC 2 /* equals REQ_SYNC */
-#define DP_MAY_SET_IN_SYNC 4
-#define DP_UNPLUG 8 /* not used anymore */
-#define DP_FUA 16 /* equals REQ_FUA */
-#define DP_FLUSH 32 /* equals REQ_FLUSH */
-#define DP_DISCARD 64 /* equals REQ_DISCARD */
-#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
-#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
-
-struct p_data {
- u64 sector; /* 64 bits sector number */
- u64 block_id; /* to identify the request in protocol B&C */
- u32 seq_num;
- u32 dp_flags;
-} __packed;
-
-/*
- * commands which share a struct:
- * p_block_ack:
- * P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
- * P_SUPERSEDED (proto C, two-primaries conflict detection)
- * p_block_req:
- * P_DATA_REQUEST, P_RS_DATA_REQUEST
- */
-struct p_block_ack {
- u64 sector;
- u64 block_id;
- u32 blksize;
- u32 seq_num;
-} __packed;
-
-struct p_block_req {
- u64 sector;
- u64 block_id;
- u32 blksize;
- u32 pad; /* to multiple of 8 Byte */
-} __packed;
-
-/*
- * commands with their own struct for additional fields:
- * P_CONNECTION_FEATURES
- * P_BARRIER
- * P_BARRIER_ACK
- * P_SYNC_PARAM
- * ReportParams
- */
-
-struct p_connection_features {
- u32 protocol_min;
- u32 feature_flags;
- u32 protocol_max;
-
- /* should be more than enough for future enhancements
- * for now, feature_flags and the reserved array shall be zero.
- */
-
- u32 _pad;
- u64 reserved[7];
-} __packed;
-
-struct p_barrier {
- u32 barrier; /* barrier number _handle_ only */
- u32 pad; /* to multiple of 8 Byte */
-} __packed;
-
-struct p_barrier_ack {
- u32 barrier;
- u32 set_size;
-} __packed;
-
-struct p_rs_param {
- u32 resync_rate;
-
- /* Since protocol version 88 and higher. */
- char verify_alg[0];
-} __packed;
-
-struct p_rs_param_89 {
- u32 resync_rate;
- /* protocol version 89: */
- char verify_alg[SHARED_SECRET_MAX];
- char csums_alg[SHARED_SECRET_MAX];
-} __packed;
-
-struct p_rs_param_95 {
- u32 resync_rate;
- char verify_alg[SHARED_SECRET_MAX];
- char csums_alg[SHARED_SECRET_MAX];
- u32 c_plan_ahead;
- u32 c_delay_target;
- u32 c_fill_target;
- u32 c_max_rate;
-} __packed;
-
-enum drbd_conn_flags {
- CF_DISCARD_MY_DATA = 1,
- CF_DRY_RUN = 2,
-};
-
-struct p_protocol {
- u32 protocol;
- u32 after_sb_0p;
- u32 after_sb_1p;
- u32 after_sb_2p;
- u32 conn_flags;
- u32 two_primaries;
-
- /* Since protocol version 87 and higher. */
- char integrity_alg[0];
-
-} __packed;
-
-struct p_uuids {
- u64 uuid[UI_EXTENDED_SIZE];
-} __packed;
-
-struct p_rs_uuid {
- u64 uuid;
-} __packed;
-
-struct p_sizes {
- u64 d_size; /* size of disk */
- u64 u_size; /* user requested size */
- u64 c_size; /* current exported size */
- u32 max_bio_size; /* Maximal size of a BIO */
- u16 queue_order_type; /* not yet implemented in DRBD*/
- u16 dds_flags; /* use enum dds_flags here. */
-} __packed;
-
-struct p_state {
- u32 state;
-} __packed;
-
-struct p_req_state {
- u32 mask;
- u32 val;
-} __packed;
-
-struct p_req_state_reply {
- u32 retcode;
-} __packed;
-
-struct p_drbd06_param {
- u64 size;
- u32 state;
- u32 blksize;
- u32 protocol;
- u32 version;
- u32 gen_cnt[5];
- u32 bit_map_gen[5];
-} __packed;
-
-struct p_block_desc {
- u64 sector;
- u32 blksize;
- u32 pad; /* to multiple of 8 Byte */
-} __packed;
-
-/* Valid values for the encoding field.
- * Bump proto version when changing this. */
-enum drbd_bitmap_code {
- /* RLE_VLI_Bytes = 0,
- * and other bit variants had been defined during
- * algorithm evaluation. */
- RLE_VLI_Bits = 2,
-};
-
-struct p_compressed_bm {
- /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
- * (encoding & 0x80): polarity (set/unset) of first runlength
- * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
- * used to pad up to head.length bytes
- */
- u8 encoding;
-
- u8 code[0];
-} __packed;
-
-struct p_delay_probe93 {
- u32 seq_num; /* sequence number to match the two probe packets */
- u32 offset; /* usecs the probe got sent after the reference time point */
-} __packed;
-
-/*
- * Bitmap packets need to fit within a single page on the sender and receiver,
- * so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger).
- */
-#define DRBD_SOCKET_BUFFER_SIZE 4096
+extern unsigned int drbd_header_size(struct drbd_connection *connection);
/**********************************************************************/
enum drbd_thread_state {
@@ -517,9 +263,10 @@ struct drbd_thread {
struct completion stop;
enum drbd_thread_state t_state;
int (*function) (struct drbd_thread *);
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
int reset_cpu_mask;
- char name[9];
+ const char *name;
};
static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
@@ -535,18 +282,20 @@ static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
struct drbd_work {
struct list_head list;
int (*cb)(struct drbd_work *, int cancel);
- union {
- struct drbd_conf *mdev;
- struct drbd_tconn *tconn;
- };
+};
+
+struct drbd_device_work {
+ struct drbd_work w;
+ struct drbd_device *device;
};
#include "drbd_interval.h"
-extern int drbd_wait_misc(struct drbd_conf *, struct drbd_interval *);
+extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
struct drbd_request {
struct drbd_work w;
+ struct drbd_device *device;
/* if local IO is not allowed, will be NULL.
* if local IO _is_ allowed, holds the locally submitted bio clone,
@@ -579,7 +328,7 @@ struct drbd_request {
};
struct drbd_epoch {
- struct drbd_tconn *tconn;
+ struct drbd_connection *connection;
struct list_head list;
unsigned int barrier_nr;
atomic_t epoch_size; /* increased on every request added. */
@@ -587,6 +336,10 @@ struct drbd_epoch {
unsigned long flags;
};
+/* Prototype declaration of function defined in drbd_receiver.c */
+int drbdd_init(struct drbd_thread *);
+int drbd_asender(struct drbd_thread *);
+
/* drbd_epoch flag bits */
enum {
DE_HAVE_BARRIER_NUMBER,
@@ -599,11 +352,6 @@ enum epoch_event {
EV_CLEANUP = 32, /* used as flag */
};
-struct drbd_wq_barrier {
- struct drbd_work w;
- struct completion done;
-};
-
struct digest_info {
int digest_size;
void *digest;
@@ -611,6 +359,7 @@ struct digest_info {
struct drbd_peer_request {
struct drbd_work w;
+ struct drbd_peer_device *peer_device;
struct drbd_epoch *epoch; /* for writes */
struct page *pages;
atomic_t pending_bios;
@@ -663,7 +412,7 @@ enum {
#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
-/* flag bits per mdev */
+/* flag bits per device */
enum {
UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
MD_DIRTY, /* current uuids and flags not yet on disk */
@@ -695,7 +444,7 @@ enum {
READ_BALANCE_RR,
};
-struct drbd_bitmap; /* opaque for drbd_conf */
+struct drbd_bitmap; /* opaque for drbd_device */
/* definition of bits in bm_flags to be used in drbd_bm_lock
* and drbd_bitmap_io and friends. */
@@ -769,7 +518,7 @@ struct drbd_backing_dev {
struct block_device *backing_bdev;
struct block_device *md_bdev;
struct drbd_md md;
- struct disk_conf *disk_conf; /* RCU, for updates: mdev->tconn->conf_update */
+ struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
sector_t known_size; /* last known size of that backing device */
};
@@ -782,8 +531,8 @@ struct bm_io_work {
struct drbd_work w;
char *why;
enum bm_flag flags;
- int (*io_fn)(struct drbd_conf *mdev);
- void (*done)(struct drbd_conf *mdev, int rv);
+ int (*io_fn)(struct drbd_device *device);
+ void (*done)(struct drbd_device *device, int rv);
};
enum write_ordering_e {
@@ -800,7 +549,7 @@ struct fifo_buffer {
};
extern struct fifo_buffer *fifo_alloc(int fifo_size);
-/* flag bits per tconn */
+/* flag bits per connection */
enum {
NET_CONGESTED, /* The data socket is congested */
RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */
@@ -822,23 +571,35 @@ enum {
DISCONNECT_SENT,
};
-struct drbd_tconn { /* is a resource from the config file */
- char *name; /* Resource name */
- struct list_head all_tconn; /* linked on global drbd_tconns */
+struct drbd_resource {
+ char *name;
struct kref kref;
- struct idr volumes; /* <tconn, vnr> to mdev mapping */
- enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
+ struct idr devices; /* volume number to device mapping */
+ struct list_head connections;
+ struct list_head resources;
+ struct res_opts res_opts;
+ struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
+ spinlock_t req_lock;
+
unsigned susp:1; /* IO suspended by user */
unsigned susp_nod:1; /* IO suspended because no data */
unsigned susp_fen:1; /* IO suspended because fence peer handler runs */
+
+ cpumask_var_t cpu_mask;
+};
+
+struct drbd_connection {
+ struct list_head connections;
+ struct drbd_resource *resource;
+ struct kref kref;
+ struct idr peer_devices; /* volume number to peer device mapping */
+ enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
struct mutex cstate_mutex; /* Protects graceful disconnects */
unsigned int connect_cnt; /* Inc each time a connection is established */
unsigned long flags;
struct net_conf *net_conf; /* content protected by rcu */
- struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
- struct res_opts res_opts;
struct sockaddr_storage my_addr;
int my_addr_len;
@@ -851,12 +612,10 @@ struct drbd_tconn { /* is a resource from the config file */
unsigned long last_received; /* in jiffies, either socket */
unsigned int ko_count;
- spinlock_t req_lock;
-
struct list_head transfer_log; /* all requests not yet fully processed */
struct crypto_hash *cram_hmac_tfm;
- struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by tconn->data->mutex */
+ struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
struct crypto_hash *csums_tfm;
struct crypto_hash *verify_tfm;
@@ -875,7 +634,6 @@ struct drbd_tconn { /* is a resource from the config file */
struct drbd_thread receiver;
struct drbd_thread worker;
struct drbd_thread asender;
- cpumask_var_t cpu_mask;
/* sender side */
struct drbd_work_queue sender_work;
@@ -903,8 +661,15 @@ struct submit_worker {
struct list_head writes;
};
-struct drbd_conf {
- struct drbd_tconn *tconn;
+struct drbd_peer_device {
+ struct list_head peer_devices;
+ struct drbd_device *device;
+ struct drbd_connection *connection;
+};
+
+struct drbd_device {
+ struct drbd_resource *resource;
+ struct list_head peer_devices;
int vnr; /* volume number within the connection */
struct kref kref;
@@ -920,11 +685,11 @@ struct drbd_conf {
struct gendisk *vdisk;
unsigned long last_reattach_jif;
- struct drbd_work resync_work,
- unplug_work,
- go_diskless,
- md_sync_work,
- start_resync_work;
+ struct drbd_work resync_work;
+ struct drbd_work unplug_work;
+ struct drbd_work go_diskless;
+ struct drbd_work md_sync_work;
+ struct drbd_work start_resync_work;
struct timer_list resync_timer;
struct timer_list md_sync_timer;
struct timer_list start_resync_timer;
@@ -1030,7 +795,7 @@ struct drbd_conf {
struct bm_io_work bm_io_work;
u64 ed_uuid; /* UUID of the exposed data */
struct mutex own_state_mutex;
- struct mutex *state_mutex; /* either own_state_mutex or mdev->tconn->cstate_mutex */
+ struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
char congestion_reason; /* Why we where congested... */
atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
atomic_t rs_sect_ev; /* for submitted resync data rate, both */
@@ -1038,7 +803,7 @@ struct drbd_conf {
int rs_last_events; /* counter of read or write "events" (unit sectors)
* on the lower level device when we last looked. */
int c_sync_rate; /* current resync rate after syncer throttle magic */
- struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, tconn->conn_update) */
+ struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
unsigned int peer_max_bio_size;
@@ -1049,19 +814,46 @@ struct drbd_conf {
struct submit_worker submit;
};
-static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
+static inline struct drbd_device *minor_to_device(unsigned int minor)
{
- return (struct drbd_conf *)idr_find(&minors, minor);
+ return (struct drbd_device *)idr_find(&drbd_devices, minor);
}
-static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
+static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
{
- return mdev->minor;
+ return list_first_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
}
-static inline struct drbd_conf *vnr_to_mdev(struct drbd_tconn *tconn, int vnr)
+#define for_each_resource(resource, _resources) \
+ list_for_each_entry(resource, _resources, resources)
+
+#define for_each_resource_rcu(resource, _resources) \
+ list_for_each_entry_rcu(resource, _resources, resources)
+
+#define for_each_resource_safe(resource, tmp, _resources) \
+ list_for_each_entry_safe(resource, tmp, _resources, resources)
+
+#define for_each_connection(connection, resource) \
+ list_for_each_entry(connection, &resource->connections, connections)
+
+#define for_each_connection_rcu(connection, resource) \
+ list_for_each_entry_rcu(connection, &resource->connections, connections)
+
+#define for_each_connection_safe(connection, tmp, resource) \
+ list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
+
+#define for_each_peer_device(peer_device, device) \
+ list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
+
+#define for_each_peer_device_rcu(peer_device, device) \
+ list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
+
+#define for_each_peer_device_safe(peer_device, tmp, device) \
+ list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
+
+static inline unsigned int device_to_minor(struct drbd_device *device)
{
- return (struct drbd_conf *)idr_find(&tconn->volumes, vnr);
+ return device->minor;
}
/*
@@ -1075,96 +867,93 @@ enum dds_flags {
DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
};
-extern void drbd_init_set_defaults(struct drbd_conf *mdev);
+extern void drbd_init_set_defaults(struct drbd_device *device);
extern int drbd_thread_start(struct drbd_thread *thi);
extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
-extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task);
#ifdef CONFIG_SMP
extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
-extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn);
#else
#define drbd_thread_current_set_cpu(A) ({})
-#define drbd_calc_cpu_mask(A) ({})
#endif
-extern void tl_release(struct drbd_tconn *, unsigned int barrier_nr,
+extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
unsigned int set_size);
-extern void tl_clear(struct drbd_tconn *);
-extern void drbd_free_sock(struct drbd_tconn *tconn);
-extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
+extern void tl_clear(struct drbd_connection *);
+extern void drbd_free_sock(struct drbd_connection *connection);
+extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
void *buf, size_t size, unsigned msg_flags);
-extern int drbd_send_all(struct drbd_tconn *, struct socket *, void *, size_t,
+extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
unsigned);
-extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd);
-extern int drbd_send_protocol(struct drbd_tconn *tconn);
-extern int drbd_send_uuids(struct drbd_conf *mdev);
-extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
-extern void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
-extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
-extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
-extern int drbd_send_current_state(struct drbd_conf *mdev);
-extern int drbd_send_sync_param(struct drbd_conf *mdev);
-extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr,
+extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
+extern int drbd_send_protocol(struct drbd_connection *connection);
+extern int drbd_send_uuids(struct drbd_peer_device *);
+extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
+extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
+extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
+extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
+extern int drbd_send_current_state(struct drbd_peer_device *);
+extern int drbd_send_sync_param(struct drbd_peer_device *);
+extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
u32 set_size);
-extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet,
+extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
struct drbd_peer_request *);
-extern void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
+extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
struct p_block_req *rp);
-extern void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
+extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
struct p_data *dp, int data_size);
-extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
+extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
sector_t sector, int blksize, u64 block_id);
-extern int drbd_send_out_of_sync(struct drbd_conf *, struct drbd_request *);
-extern int drbd_send_block(struct drbd_conf *, enum drbd_packet,
+extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
+extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
struct drbd_peer_request *);
-extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
-extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
+extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
+extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
sector_t sector, int size, u64 block_id);
-extern int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector,
+extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
int size, void *digest, int digest_size,
enum drbd_packet cmd);
-extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size);
+extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
-extern int drbd_send_bitmap(struct drbd_conf *mdev);
-extern void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
-extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode);
+extern int drbd_send_bitmap(struct drbd_device *device);
+extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
+extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
extern void drbd_free_bc(struct drbd_backing_dev *ldev);
-extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
-void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
-
-extern void conn_md_sync(struct drbd_tconn *tconn);
-extern void drbd_md_write(struct drbd_conf *mdev, void *buffer);
-extern void drbd_md_sync(struct drbd_conf *mdev);
-extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
-extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
-extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
-extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
-extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
-extern void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local);
-extern void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
-extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
-extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
+extern void drbd_device_cleanup(struct drbd_device *device);
+void drbd_print_uuids(struct drbd_device *device, const char *text);
+
+extern void conn_md_sync(struct drbd_connection *connection);
+extern void drbd_md_write(struct drbd_device *device, void *buffer);
+extern void drbd_md_sync(struct drbd_device *device);
+extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
+extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
+extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
+extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
+extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
+extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
+extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
+extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
+extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
#ifndef DRBD_DEBUG_MD_SYNC
-extern void drbd_md_mark_dirty(struct drbd_conf *mdev);
+extern void drbd_md_mark_dirty(struct drbd_device *device);
#else
#define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ )
-extern void drbd_md_mark_dirty_(struct drbd_conf *mdev,
+extern void drbd_md_mark_dirty_(struct drbd_device *device,
unsigned int line, const char *func);
#endif
-extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
- void (*done)(struct drbd_conf *, int),
+extern void drbd_queue_bitmap_io(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *),
+ void (*done)(struct drbd_device *, int),
char *why, enum bm_flag flags);
-extern int drbd_bitmap_io(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
+extern int drbd_bitmap_io(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags);
-extern int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
+extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags);
-extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
-extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
-extern void drbd_ldev_destroy(struct drbd_conf *mdev);
+extern int drbd_bmio_set_n_write(struct drbd_device *device);
+extern int drbd_bmio_clear_n_write(struct drbd_device *device);
+extern void drbd_ldev_destroy(struct drbd_device *device);
/* Meta data layout
*
@@ -1350,52 +1139,52 @@ struct bm_extent {
#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
#define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
-extern int drbd_bm_init(struct drbd_conf *mdev);
-extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
-extern void drbd_bm_cleanup(struct drbd_conf *mdev);
-extern void drbd_bm_set_all(struct drbd_conf *mdev);
-extern void drbd_bm_clear_all(struct drbd_conf *mdev);
+extern int drbd_bm_init(struct drbd_device *device);
+extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
+extern void drbd_bm_cleanup(struct drbd_device *device);
+extern void drbd_bm_set_all(struct drbd_device *device);
+extern void drbd_bm_clear_all(struct drbd_device *device);
/* set/clear/test only a few bits at a time */
extern int drbd_bm_set_bits(
- struct drbd_conf *mdev, unsigned long s, unsigned long e);
+ struct drbd_device *device, unsigned long s, unsigned long e);
extern int drbd_bm_clear_bits(
- struct drbd_conf *mdev, unsigned long s, unsigned long e);
+ struct drbd_device *device, unsigned long s, unsigned long e);
extern int drbd_bm_count_bits(
- struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
+ struct drbd_device *device, const unsigned long s, const unsigned long e);
/* bm_set_bits variant for use while holding drbd_bm_lock,
* may process the whole bitmap in one go */
-extern void _drbd_bm_set_bits(struct drbd_conf *mdev,
+extern void _drbd_bm_set_bits(struct drbd_device *device,
const unsigned long s, const unsigned long e);
-extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
-extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
-extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
-extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
-extern void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr);
-extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
-extern int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local);
-extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local);
-extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
-extern size_t drbd_bm_words(struct drbd_conf *mdev);
-extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
-extern sector_t drbd_bm_capacity(struct drbd_conf *mdev);
+extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
+extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
+extern int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold(local);
+extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
+extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
+extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
+extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
+extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
+extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
+extern size_t drbd_bm_words(struct drbd_device *device);
+extern unsigned long drbd_bm_bits(struct drbd_device *device);
+extern sector_t drbd_bm_capacity(struct drbd_device *device);
#define DRBD_END_OF_BITMAP (~(unsigned long)0)
-extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
+extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
/* bm_find_next variants for use while you hold drbd_bm_lock() */
-extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
-extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo);
-extern unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev);
-extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev);
-extern int drbd_bm_rs_done(struct drbd_conf *mdev);
+extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
+extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
+extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
+extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
+extern int drbd_bm_rs_done(struct drbd_device *device);
/* for receive_bitmap */
-extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset,
+extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
size_t number, unsigned long *buffer);
/* for _drbd_send_bitmap */
-extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset,
+extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
size_t number, unsigned long *buffer);
-extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags);
-extern void drbd_bm_unlock(struct drbd_conf *mdev);
+extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
+extern void drbd_bm_unlock(struct drbd_device *device);
/* drbd_main.c */
extern struct kmem_cache *drbd_request_cache;
@@ -1439,35 +1228,40 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
extern rwlock_t global_state_lock;
-extern int conn_lowest_minor(struct drbd_tconn *tconn);
-enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr);
-extern void drbd_minor_destroy(struct kref *kref);
+extern int conn_lowest_minor(struct drbd_connection *connection);
+enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr);
+extern void drbd_destroy_device(struct kref *kref);
+extern void drbd_delete_device(struct drbd_device *mdev);
+
+extern struct drbd_resource *drbd_create_resource(const char *name);
+extern void drbd_free_resource(struct drbd_resource *resource);
-extern int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts);
-extern struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts);
-extern void conn_destroy(struct kref *kref);
-struct drbd_tconn *conn_get_by_name(const char *name);
-extern struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
+extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
+extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
+extern void drbd_destroy_connection(struct kref *kref);
+extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
void *peer_addr, int peer_addr_len);
-extern void conn_free_crypto(struct drbd_tconn *tconn);
+extern struct drbd_resource *drbd_find_resource(const char *name);
+extern void drbd_destroy_resource(struct kref *kref);
+extern void conn_free_crypto(struct drbd_connection *connection);
extern int proc_details;
/* drbd_req */
extern void do_submit(struct work_struct *ws);
-extern void __drbd_make_request(struct drbd_conf *, struct bio *, unsigned long);
+extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
extern void drbd_make_request(struct request_queue *q, struct bio *bio);
-extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
+extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
/* drbd_nl.c */
extern int drbd_msg_put_info(const char *info);
-extern void drbd_suspend_io(struct drbd_conf *mdev);
-extern void drbd_resume_io(struct drbd_conf *mdev);
+extern void drbd_suspend_io(struct drbd_device *device);
+extern void drbd_resume_io(struct drbd_device *device);
extern char *ppsize(char *buf, unsigned long long size);
-extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int);
+extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
enum determine_dev_size {
DS_ERROR_SHRINK = -3,
DS_ERROR_SPACE_MD = -2,
@@ -1478,48 +1272,47 @@ enum determine_dev_size {
DS_GREW_FROM_ZERO = 3,
};
extern enum determine_dev_size
-drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local);
-extern void resync_after_online_grow(struct drbd_conf *);
-extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
-extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
+drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
+extern void resync_after_online_grow(struct drbd_device *);
+extern void drbd_reconsider_max_bio_size(struct drbd_device *device);
+extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
enum drbd_role new_role,
int force);
-extern bool conn_try_outdate_peer(struct drbd_tconn *tconn);
-extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn);
-extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
+extern bool conn_try_outdate_peer(struct drbd_connection *connection);
+extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
+extern int drbd_khelper(struct drbd_device *device, char *cmd);
/* drbd_worker.c */
extern int drbd_worker(struct drbd_thread *thi);
-enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor);
-void drbd_resync_after_changed(struct drbd_conf *mdev);
-extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side);
-extern void resume_next_sg(struct drbd_conf *mdev);
-extern void suspend_other_sg(struct drbd_conf *mdev);
-extern int drbd_resync_finished(struct drbd_conf *mdev);
+enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
+void drbd_resync_after_changed(struct drbd_device *device);
+extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
+extern void resume_next_sg(struct drbd_device *device);
+extern void suspend_other_sg(struct drbd_device *device);
+extern int drbd_resync_finished(struct drbd_device *device);
/* maybe rather drbd_main.c ? */
-extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
-extern void drbd_md_put_buffer(struct drbd_conf *mdev);
-extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
+extern void *drbd_md_get_buffer(struct drbd_device *device);
+extern void drbd_md_put_buffer(struct drbd_device *device);
+extern int drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev, sector_t sector, int rw);
-extern void drbd_ov_out_of_sync_found(struct drbd_conf *, sector_t, int);
-extern void wait_until_done_or_force_detached(struct drbd_conf *mdev,
+extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
+extern void wait_until_done_or_force_detached(struct drbd_device *device,
struct drbd_backing_dev *bdev, unsigned int *done);
-extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
+extern void drbd_rs_controller_reset(struct drbd_device *device);
-static inline void ov_out_of_sync_print(struct drbd_conf *mdev)
+static inline void ov_out_of_sync_print(struct drbd_device *device)
{
- if (mdev->ov_last_oos_size) {
- dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n",
- (unsigned long long)mdev->ov_last_oos_start,
- (unsigned long)mdev->ov_last_oos_size);
+ if (device->ov_last_oos_size) {
+ drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
+ (unsigned long long)device->ov_last_oos_start,
+ (unsigned long)device->ov_last_oos_size);
}
- mdev->ov_last_oos_size=0;
+ device->ov_last_oos_size = 0;
}
-extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
-extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *,
- struct drbd_peer_request *, void *);
+extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *);
+extern void drbd_csum_ee(struct crypto_hash *, struct drbd_peer_request *, void *);
/* worker callbacks */
extern int w_e_end_data_req(struct drbd_work *, int);
extern int w_e_end_rsdata_req(struct drbd_work *, int);
@@ -1529,10 +1322,8 @@ extern int w_e_end_ov_req(struct drbd_work *, int);
extern int w_ov_finished(struct drbd_work *, int);
extern int w_resync_timer(struct drbd_work *, int);
extern int w_send_write_hint(struct drbd_work *, int);
-extern int w_make_resync_request(struct drbd_work *, int);
extern int w_send_dblock(struct drbd_work *, int);
extern int w_send_read_req(struct drbd_work *, int);
-extern int w_prev_work_done(struct drbd_work *, int);
extern int w_e_reissue(struct drbd_work *, int);
extern int w_restart_disk_io(struct drbd_work *, int);
extern int w_send_out_of_sync(struct drbd_work *, int);
@@ -1542,27 +1333,24 @@ extern void resync_timer_fn(unsigned long data);
extern void start_resync_timer_fn(unsigned long data);
/* drbd_receiver.c */
-extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector);
-extern int drbd_submit_peer_request(struct drbd_conf *,
+extern int drbd_receiver(struct drbd_thread *thi);
+extern int drbd_asender(struct drbd_thread *thi);
+extern int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector);
+extern int drbd_submit_peer_request(struct drbd_device *,
struct drbd_peer_request *, const unsigned,
const int);
-extern int drbd_free_peer_reqs(struct drbd_conf *, struct list_head *);
-extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *, u64,
+extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
+extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
sector_t, unsigned int,
gfp_t) __must_hold(local);
-extern void __drbd_free_peer_req(struct drbd_conf *, struct drbd_peer_request *,
+extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
int);
#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
-extern struct page *drbd_alloc_pages(struct drbd_conf *, unsigned int, bool);
-extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
-extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
-extern void conn_flush_workqueue(struct drbd_tconn *tconn);
-extern int drbd_connected(struct drbd_conf *mdev);
-static inline void drbd_flush_workqueue(struct drbd_conf *mdev)
-{
- conn_flush_workqueue(mdev->tconn);
-}
+extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
+extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
+extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
+extern int drbd_connected(struct drbd_peer_device *);
/* Yes, there is kernel_setsockopt, but only since 2.6.18.
* So we have our own copy of it here. */
@@ -1613,7 +1401,7 @@ static inline void drbd_tcp_quickack(struct socket *sock)
(char*)&val, sizeof(val));
}
-void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo);
+void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo);
/* drbd_proc.c */
extern struct proc_dir_entry *drbd_proc;
@@ -1622,29 +1410,29 @@ extern const char *drbd_conn_str(enum drbd_conns s);
extern const char *drbd_role_str(enum drbd_role s);
/* drbd_actlog.c */
-extern int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i);
-extern void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate);
-extern bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i);
-extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate);
-extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
-extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
-extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
-extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
-extern void drbd_rs_cancel_all(struct drbd_conf *mdev);
-extern int drbd_rs_del_all(struct drbd_conf *mdev);
-extern void drbd_rs_failed_io(struct drbd_conf *mdev,
+extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
+extern void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate);
+extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
+extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate);
+extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
+extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
+extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
+extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
+extern void drbd_rs_cancel_all(struct drbd_device *device);
+extern int drbd_rs_del_all(struct drbd_device *device);
+extern void drbd_rs_failed_io(struct drbd_device *device,
sector_t sector, int size);
-extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go);
-extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
+extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
+extern void __drbd_set_in_sync(struct drbd_device *device, sector_t sector,
int size, const char *file, const unsigned int line);
-#define drbd_set_in_sync(mdev, sector, size) \
- __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__)
-extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
+#define drbd_set_in_sync(device, sector, size) \
+ __drbd_set_in_sync(device, sector, size, __FILE__, __LINE__)
+extern int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector,
int size, const char *file, const unsigned int line);
-#define drbd_set_out_of_sync(mdev, sector, size) \
- __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
-extern void drbd_al_shrink(struct drbd_conf *mdev);
-extern int drbd_initialize_al(struct drbd_conf *, void *);
+#define drbd_set_out_of_sync(device, sector, size) \
+ __drbd_set_out_of_sync(device, sector, size, __FILE__, __LINE__)
+extern void drbd_al_shrink(struct drbd_device *device);
+extern int drbd_initialize_al(struct drbd_device *, void *);
/* drbd_nl.c */
/* state info broadcast */
@@ -1661,7 +1449,7 @@ struct sib_info {
};
};
};
-void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib);
+void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
/*
* inline helper functions
@@ -1690,26 +1478,27 @@ static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_r
}
static inline enum drbd_state_rv
-_drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
+_drbd_set_state(struct drbd_device *device, union drbd_state ns,
enum chg_state_flags flags, struct completion *done)
{
enum drbd_state_rv rv;
read_lock(&global_state_lock);
- rv = __drbd_set_state(mdev, ns, flags, done);
+ rv = __drbd_set_state(device, ns, flags, done);
read_unlock(&global_state_lock);
return rv;
}
-static inline union drbd_state drbd_read_state(struct drbd_conf *mdev)
+static inline union drbd_state drbd_read_state(struct drbd_device *device)
{
+ struct drbd_resource *resource = device->resource;
union drbd_state rv;
- rv.i = mdev->state.i;
- rv.susp = mdev->tconn->susp;
- rv.susp_nod = mdev->tconn->susp_nod;
- rv.susp_fen = mdev->tconn->susp_fen;
+ rv.i = device->state.i;
+ rv.susp = resource->susp;
+ rv.susp_nod = resource->susp_nod;
+ rv.susp_fen = resource->susp_fen;
return rv;
}
@@ -1722,22 +1511,22 @@ enum drbd_force_detach_flags {
};
#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
-static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
+static inline void __drbd_chk_io_error_(struct drbd_device *device,
enum drbd_force_detach_flags df,
const char *where)
{
enum drbd_io_error_p ep;
rcu_read_lock();
- ep = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
+ ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
rcu_read_unlock();
switch (ep) {
case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Local IO failed in %s.\n", where);
- if (mdev->state.disk > D_INCONSISTENT)
- _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL);
+ drbd_err(device, "Local IO failed in %s.\n", where);
+ if (device->state.disk > D_INCONSISTENT)
+ _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
break;
}
/* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
@@ -1763,14 +1552,14 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
* we read meta data only once during attach,
* which will fail in case of errors.
*/
- set_bit(WAS_IO_ERROR, &mdev->flags);
+ set_bit(WAS_IO_ERROR, &device->flags);
if (df == DRBD_READ_ERROR)
- set_bit(WAS_READ_ERROR, &mdev->flags);
+ set_bit(WAS_READ_ERROR, &device->flags);
if (df == DRBD_FORCE_DETACH)
- set_bit(FORCE_DETACH, &mdev->flags);
- if (mdev->state.disk > D_FAILED) {
- _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
- dev_err(DEV,
+ set_bit(FORCE_DETACH, &device->flags);
+ if (device->state.disk > D_FAILED) {
+ _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
+ drbd_err(device,
"Local IO failed in %s. Detaching...\n", where);
}
break;
@@ -1779,21 +1568,21 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
/**
* drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @error: Error code passed to the IO completion callback
* @forcedetach: Force detach. I.e. the error happened while accessing the meta data
*
* See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
*/
#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
-static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
+static inline void drbd_chk_io_error_(struct drbd_device *device,
int error, enum drbd_force_detach_flags forcedetach, const char *where)
{
if (error) {
unsigned long flags;
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- __drbd_chk_io_error_(mdev, forcedetach, where);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ __drbd_chk_io_error_(device, forcedetach, where);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
}
}
@@ -1916,31 +1705,33 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
wake_up(&q->q_wait);
}
-static inline void wake_asender(struct drbd_tconn *tconn)
+extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
+
+static inline void wake_asender(struct drbd_connection *connection)
{
- if (test_bit(SIGNAL_ASENDER, &tconn->flags))
- force_sig(DRBD_SIG, tconn->asender.task);
+ if (test_bit(SIGNAL_ASENDER, &connection->flags))
+ force_sig(DRBD_SIG, connection->asender.task);
}
-static inline void request_ping(struct drbd_tconn *tconn)
+static inline void request_ping(struct drbd_connection *connection)
{
- set_bit(SEND_PING, &tconn->flags);
- wake_asender(tconn);
+ set_bit(SEND_PING, &connection->flags);
+ wake_asender(connection);
}
-extern void *conn_prepare_command(struct drbd_tconn *, struct drbd_socket *);
-extern void *drbd_prepare_command(struct drbd_conf *, struct drbd_socket *);
-extern int conn_send_command(struct drbd_tconn *, struct drbd_socket *,
+extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
+extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
+extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
enum drbd_packet, unsigned int, void *,
unsigned int);
-extern int drbd_send_command(struct drbd_conf *, struct drbd_socket *,
+extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
enum drbd_packet, unsigned int, void *,
unsigned int);
-extern int drbd_send_ping(struct drbd_tconn *tconn);
-extern int drbd_send_ping_ack(struct drbd_tconn *tconn);
-extern int drbd_send_state_req(struct drbd_conf *, union drbd_state, union drbd_state);
-extern int conn_send_state_req(struct drbd_tconn *, union drbd_state, union drbd_state);
+extern int drbd_send_ping(struct drbd_connection *connection);
+extern int drbd_send_ping_ack(struct drbd_connection *connection);
+extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
+extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
static inline void drbd_thread_stop(struct drbd_thread *thi)
{
@@ -1979,22 +1770,22 @@ static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
* _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
* [from tl_clear_barrier]
*/
-static inline void inc_ap_pending(struct drbd_conf *mdev)
+static inline void inc_ap_pending(struct drbd_device *device)
{
- atomic_inc(&mdev->ap_pending_cnt);
+ atomic_inc(&device->ap_pending_cnt);
}
#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
- if (atomic_read(&mdev->which) < 0) \
- dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \
+ if (atomic_read(&device->which) < 0) \
+ drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
func, line, \
- atomic_read(&mdev->which))
+ atomic_read(&device->which))
-#define dec_ap_pending(mdev) _dec_ap_pending(mdev, __FUNCTION__, __LINE__)
-static inline void _dec_ap_pending(struct drbd_conf *mdev, const char *func, int line)
+#define dec_ap_pending(device) _dec_ap_pending(device, __FUNCTION__, __LINE__)
+static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
{
- if (atomic_dec_and_test(&mdev->ap_pending_cnt))
- wake_up(&mdev->misc_wait);
+ if (atomic_dec_and_test(&device->ap_pending_cnt))
+ wake_up(&device->misc_wait);
ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
}
@@ -2004,15 +1795,15 @@ static inline void _dec_ap_pending(struct drbd_conf *mdev, const char *func, int
* C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER)
* (or P_NEG_ACK with ID_SYNCER)
*/
-static inline void inc_rs_pending(struct drbd_conf *mdev)
+static inline void inc_rs_pending(struct drbd_device *device)
{
- atomic_inc(&mdev->rs_pending_cnt);
+ atomic_inc(&device->rs_pending_cnt);
}
-#define dec_rs_pending(mdev) _dec_rs_pending(mdev, __FUNCTION__, __LINE__)
-static inline void _dec_rs_pending(struct drbd_conf *mdev, const char *func, int line)
+#define dec_rs_pending(device) _dec_rs_pending(device, __FUNCTION__, __LINE__)
+static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
{
- atomic_dec(&mdev->rs_pending_cnt);
+ atomic_dec(&device->rs_pending_cnt);
ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
}
@@ -2025,103 +1816,104 @@ static inline void _dec_rs_pending(struct drbd_conf *mdev, const char *func, int
* receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
* receive_Barrier_* we need to send a P_BARRIER_ACK
*/
-static inline void inc_unacked(struct drbd_conf *mdev)
+static inline void inc_unacked(struct drbd_device *device)
{
- atomic_inc(&mdev->unacked_cnt);
+ atomic_inc(&device->unacked_cnt);
}
-#define dec_unacked(mdev) _dec_unacked(mdev, __FUNCTION__, __LINE__)
-static inline void _dec_unacked(struct drbd_conf *mdev, const char *func, int line)
+#define dec_unacked(device) _dec_unacked(device, __FUNCTION__, __LINE__)
+static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
{
- atomic_dec(&mdev->unacked_cnt);
+ atomic_dec(&device->unacked_cnt);
ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
-#define sub_unacked(mdev, n) _sub_unacked(mdev, n, __FUNCTION__, __LINE__)
-static inline void _sub_unacked(struct drbd_conf *mdev, int n, const char *func, int line)
+#define sub_unacked(device, n) _sub_unacked(device, n, __FUNCTION__, __LINE__)
+static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
{
- atomic_sub(n, &mdev->unacked_cnt);
+ atomic_sub(n, &device->unacked_cnt);
ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
/**
- * get_ldev() - Increase the ref count on mdev->ldev. Returns 0 if there is no ldev
+ * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
* @M: DRBD device.
*
- * You have to call put_ldev() when finished working with mdev->ldev.
+ * You have to call put_ldev() when finished working with device->ldev.
*/
#define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT))
#define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS))
-static inline void put_ldev(struct drbd_conf *mdev)
+static inline void put_ldev(struct drbd_device *device)
{
- int i = atomic_dec_return(&mdev->local_cnt);
+ int i = atomic_dec_return(&device->local_cnt);
/* This may be called from some endio handler,
* so we must not sleep here. */
__release(local);
- D_ASSERT(i >= 0);
+ D_ASSERT(device, i >= 0);
if (i == 0) {
- if (mdev->state.disk == D_DISKLESS)
+ if (device->state.disk == D_DISKLESS)
/* even internal references gone, safe to destroy */
- drbd_ldev_destroy(mdev);
- if (mdev->state.disk == D_FAILED) {
+ drbd_ldev_destroy(device);
+ if (device->state.disk == D_FAILED) {
/* all application IO references gone. */
- if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
+ if (!test_and_set_bit(GO_DISKLESS, &device->flags))
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &device->go_diskless);
}
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
}
}
#ifndef __CHECKER__
-static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
+static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
{
int io_allowed;
/* never get a reference while D_DISKLESS */
- if (mdev->state.disk == D_DISKLESS)
+ if (device->state.disk == D_DISKLESS)
return 0;
- atomic_inc(&mdev->local_cnt);
- io_allowed = (mdev->state.disk >= mins);
+ atomic_inc(&device->local_cnt);
+ io_allowed = (device->state.disk >= mins);
if (!io_allowed)
- put_ldev(mdev);
+ put_ldev(device);
return io_allowed;
}
#else
-extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins);
+extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
#endif
/* you must have an "get_ldev" reference */
-static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
+static inline void drbd_get_syncer_progress(struct drbd_device *device,
unsigned long *bits_left, unsigned int *per_mil_done)
{
/* this is to break it at compile time when we change that, in case we
* want to support more than (1<<32) bits on a 32bit arch. */
- typecheck(unsigned long, mdev->rs_total);
+ typecheck(unsigned long, device->rs_total);
/* note: both rs_total and rs_left are in bits, i.e. in
* units of BM_BLOCK_SIZE.
* for the percentage, we don't care. */
- if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
- *bits_left = mdev->ov_left;
+ if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
+ *bits_left = device->ov_left;
else
- *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
+ *bits_left = drbd_bm_total_weight(device) - device->rs_failed;
/* >> 10 to prevent overflow,
* +1 to prevent division by zero */
- if (*bits_left > mdev->rs_total) {
+ if (*bits_left > device->rs_total) {
/* doh. maybe a logic bug somewhere.
* may also be just a race condition
* between this and a disconnect during sync.
* for now, just prevent in-kernel buffer overflow.
*/
smp_rmb();
- dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
- drbd_conn_str(mdev->state.conn),
- *bits_left, mdev->rs_total, mdev->rs_failed);
+ drbd_warn(device, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
+ drbd_conn_str(device->state.conn),
+ *bits_left, device->rs_total, device->rs_failed);
*per_mil_done = 0;
} else {
/* Make sure the division happens in long context.
@@ -2133,9 +1925,9 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
* Note: currently we don't support such large bitmaps on 32bit
* arch anyways, but no harm done to be prepared for it here.
*/
- unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10;
+ unsigned int shift = device->rs_total > UINT_MAX ? 16 : 10;
unsigned long left = *bits_left >> shift;
- unsigned long total = 1UL + (mdev->rs_total >> shift);
+ unsigned long total = 1UL + (device->rs_total >> shift);
unsigned long tmp = 1000UL - left * 1000UL/total;
*per_mil_done = tmp;
}
@@ -2145,22 +1937,22 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
/* this throttles on-the-fly application requests
* according to max_buffers settings;
* maybe re-implement using semaphores? */
-static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
+static inline int drbd_get_max_buffers(struct drbd_device *device)
{
struct net_conf *nc;
int mxb;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
rcu_read_unlock();
return mxb;
}
-static inline int drbd_state_is_stable(struct drbd_conf *mdev)
+static inline int drbd_state_is_stable(struct drbd_device *device)
{
- union drbd_dev_state s = mdev->state;
+ union drbd_dev_state s = device->state;
/* DO NOT add a default clause, we want the compiler to warn us
* for any newly introduced state we may have forgotten to add here */
@@ -2194,7 +1986,7 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
/* Allow IO in BM exchange states with new protocols */
case C_WF_BITMAP_S:
- if (mdev->tconn->agreed_pro_version < 96)
+ if (first_peer_device(device)->connection->agreed_pro_version < 96)
return 0;
break;
@@ -2228,20 +2020,20 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
return 1;
}
-static inline int drbd_suspended(struct drbd_conf *mdev)
+static inline int drbd_suspended(struct drbd_device *device)
{
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_resource *resource = device->resource;
- return tconn->susp || tconn->susp_fen || tconn->susp_nod;
+ return resource->susp || resource->susp_fen || resource->susp_nod;
}
-static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
+static inline bool may_inc_ap_bio(struct drbd_device *device)
{
- int mxb = drbd_get_max_buffers(mdev);
+ int mxb = drbd_get_max_buffers(device);
- if (drbd_suspended(mdev))
+ if (drbd_suspended(device))
return false;
- if (test_bit(SUSPEND_IO, &mdev->flags))
+ if (test_bit(SUSPEND_IO, &device->flags))
return false;
/* to avoid potential deadlock or bitmap corruption,
@@ -2249,32 +2041,32 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
* to start during "stable" states. */
/* no new io accepted when attaching or detaching the disk */
- if (!drbd_state_is_stable(mdev))
+ if (!drbd_state_is_stable(device))
return false;
/* since some older kernels don't have atomic_add_unless,
* and we are within the spinlock anyways, we have this workaround. */
- if (atomic_read(&mdev->ap_bio_cnt) > mxb)
+ if (atomic_read(&device->ap_bio_cnt) > mxb)
return false;
- if (test_bit(BITMAP_IO, &mdev->flags))
+ if (test_bit(BITMAP_IO, &device->flags))
return false;
return true;
}
-static inline bool inc_ap_bio_cond(struct drbd_conf *mdev)
+static inline bool inc_ap_bio_cond(struct drbd_device *device)
{
bool rv = false;
- spin_lock_irq(&mdev->tconn->req_lock);
- rv = may_inc_ap_bio(mdev);
+ spin_lock_irq(&device->resource->req_lock);
+ rv = may_inc_ap_bio(device);
if (rv)
- atomic_inc(&mdev->ap_bio_cnt);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ atomic_inc(&device->ap_bio_cnt);
+ spin_unlock_irq(&device->resource->req_lock);
return rv;
}
-static inline void inc_ap_bio(struct drbd_conf *mdev)
+static inline void inc_ap_bio(struct drbd_device *device)
{
/* we wait here
* as long as the device is suspended
@@ -2284,42 +2076,44 @@ static inline void inc_ap_bio(struct drbd_conf *mdev)
* to avoid races with the reconnect code,
* we need to atomic_inc within the spinlock. */
- wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev));
+ wait_event(device->misc_wait, inc_ap_bio_cond(device));
}
-static inline void dec_ap_bio(struct drbd_conf *mdev)
+static inline void dec_ap_bio(struct drbd_device *device)
{
- int mxb = drbd_get_max_buffers(mdev);
- int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
+ int mxb = drbd_get_max_buffers(device);
+ int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
- D_ASSERT(ap_bio >= 0);
+ D_ASSERT(device, ap_bio >= 0);
- if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
- if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
+ if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
+ if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
+ drbd_queue_work(&first_peer_device(device)->
+ connection->sender_work,
+ &device->bm_io_work.w);
}
/* this currently does wake_up for every dec_ap_bio!
* maybe rather introduce some type of hysteresis?
* e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
if (ap_bio < mxb)
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
}
-static inline bool verify_can_do_stop_sector(struct drbd_conf *mdev)
+static inline bool verify_can_do_stop_sector(struct drbd_device *device)
{
- return mdev->tconn->agreed_pro_version >= 97 &&
- mdev->tconn->agreed_pro_version != 100;
+ return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
+ first_peer_device(device)->connection->agreed_pro_version != 100;
}
-static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
+static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
{
- int changed = mdev->ed_uuid != val;
- mdev->ed_uuid = val;
+ int changed = device->ed_uuid != val;
+ device->ed_uuid = val;
return changed;
}
-static inline int drbd_queue_order_type(struct drbd_conf *mdev)
+static inline int drbd_queue_order_type(struct drbd_device *device)
{
/* sorry, we currently have no working implementation
* of distributed TCQ stuff */
@@ -2329,23 +2123,29 @@ static inline int drbd_queue_order_type(struct drbd_conf *mdev)
return QUEUE_ORDERED_NONE;
}
-static inline void drbd_md_flush(struct drbd_conf *mdev)
+static inline void drbd_md_flush(struct drbd_device *device)
{
int r;
- if (mdev->ldev == NULL) {
- dev_warn(DEV, "mdev->ldev == NULL in drbd_md_flush\n");
+ if (device->ldev == NULL) {
+ drbd_warn(device, "device->ldev == NULL in drbd_md_flush\n");
return;
}
- if (test_bit(MD_NO_FUA, &mdev->flags))
+ if (test_bit(MD_NO_FUA, &device->flags))
return;
- r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
+ r = blkdev_issue_flush(device->ldev->md_bdev, GFP_NOIO, NULL);
if (r) {
- set_bit(MD_NO_FUA, &mdev->flags);
- dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
+ set_bit(MD_NO_FUA, &device->flags);
+ drbd_err(device, "meta data flush failed with status %d, disabling md-flushes\n", r);
}
}
+static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
+{
+ return list_first_entry(&resource->connections,
+ struct drbd_connection, connections);
+}
+
#endif
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 929468e1512a..331e5cc1227d 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -52,16 +52,12 @@
#include <linux/drbd_limits.h>
#include "drbd_int.h"
+#include "drbd_protocol.h"
#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
#include "drbd_vli.h"
static DEFINE_MUTEX(drbd_main_mutex);
-int drbdd_init(struct drbd_thread *);
-int drbd_worker(struct drbd_thread *);
-int drbd_asender(struct drbd_thread *);
-
-int drbd_init(void);
static int drbd_open(struct block_device *bdev, fmode_t mode);
static void drbd_release(struct gendisk *gd, fmode_t mode);
static int w_md_sync(struct drbd_work *w, int unused);
@@ -118,8 +114,8 @@ module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0
/* in 2.6.x, our device mapping and config info contains our virtual gendisks
* as member "struct gendisk *vdisk;"
*/
-struct idr minors;
-struct list_head drbd_tconns; /* list of struct drbd_tconn */
+struct idr drbd_devices;
+struct list_head drbd_resources;
struct kmem_cache *drbd_request_cache;
struct kmem_cache *drbd_ee_cache; /* peer requests */
@@ -166,15 +162,15 @@ struct bio *bio_alloc_drbd(gfp_t gfp_mask)
/* When checking with sparse, and this is an inline function, sparse will
give tons of false positives. When this is a real functions sparse works.
*/
-int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
+int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
{
int io_allowed;
- atomic_inc(&mdev->local_cnt);
- io_allowed = (mdev->state.disk >= mins);
+ atomic_inc(&device->local_cnt);
+ io_allowed = (device->state.disk >= mins);
if (!io_allowed) {
- if (atomic_dec_and_test(&mdev->local_cnt))
- wake_up(&mdev->misc_wait);
+ if (atomic_dec_and_test(&device->local_cnt))
+ wake_up(&device->misc_wait);
}
return io_allowed;
}
@@ -183,7 +179,7 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
/**
* tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
- * @tconn: DRBD connection.
+ * @connection: DRBD connection.
* @barrier_nr: Expected identifier of the DRBD write barrier packet.
* @set_size: Expected number of requests before that barrier.
*
@@ -191,7 +187,7 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
* epoch of not yet barrier-acked requests, this function will cause a
* termination of the connection.
*/
-void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
+void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
unsigned int set_size)
{
struct drbd_request *r;
@@ -199,11 +195,11 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
int expect_epoch = 0;
int expect_size = 0;
- spin_lock_irq(&tconn->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
/* find oldest not yet barrier-acked write request,
* count writes in its epoch. */
- list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
+ list_for_each_entry(r, &connection->transfer_log, tl_requests) {
const unsigned s = r->rq_state;
if (!req) {
if (!(s & RQ_WRITE))
@@ -228,18 +224,18 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
/* first some paranoia code */
if (req == NULL) {
- conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
+ drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
barrier_nr);
goto bail;
}
if (expect_epoch != barrier_nr) {
- conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
+ drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
barrier_nr, expect_epoch);
goto bail;
}
if (expect_size != set_size) {
- conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
+ drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
barrier_nr, set_size, expect_size);
goto bail;
}
@@ -248,90 +244,91 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
/* this extra list walk restart is paranoia,
* to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */
- list_for_each_entry(req, &tconn->transfer_log, tl_requests)
+ list_for_each_entry(req, &connection->transfer_log, tl_requests)
if (req->epoch == expect_epoch)
break;
- list_for_each_entry_safe_from(req, r, &tconn->transfer_log, tl_requests) {
+ list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
if (req->epoch != expect_epoch)
break;
_req_mod(req, BARRIER_ACKED);
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
return;
bail:
- spin_unlock_irq(&tconn->req_lock);
- conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
+ spin_unlock_irq(&connection->resource->req_lock);
+ conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
/**
* _tl_restart() - Walks the transfer log, and applies an action to all requests
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @what: The action/event to perform with all request objects
*
* @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
* RESTART_FROZEN_DISK_IO.
*/
/* must hold resource->req_lock */
-void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
+void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{
struct drbd_request *req, *r;
- list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests)
+ list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
_req_mod(req, what);
}
-void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
+void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{
- spin_lock_irq(&tconn->req_lock);
- _tl_restart(tconn, what);
- spin_unlock_irq(&tconn->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
+ _tl_restart(connection, what);
+ spin_unlock_irq(&connection->resource->req_lock);
}
/**
* tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* This is called after the connection to the peer was lost. The storage covered
* by the requests on the transfer gets marked as our of sync. Called from the
* receiver thread and the worker thread.
*/
-void tl_clear(struct drbd_tconn *tconn)
+void tl_clear(struct drbd_connection *connection)
{
- tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
+ tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
}
/**
- * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
- * @mdev: DRBD device.
+ * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
+ * @device: DRBD device.
*/
-void tl_abort_disk_io(struct drbd_conf *mdev)
+void tl_abort_disk_io(struct drbd_device *device)
{
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
struct drbd_request *req, *r;
- spin_lock_irq(&tconn->req_lock);
- list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
+ spin_lock_irq(&connection->resource->req_lock);
+ list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
if (!(req->rq_state & RQ_LOCAL_PENDING))
continue;
- if (req->w.mdev != mdev)
+ if (req->device != device)
continue;
_req_mod(req, ABORT_DISK_IO);
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
}
static int drbd_thread_setup(void *arg)
{
struct drbd_thread *thi = (struct drbd_thread *) arg;
- struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_resource *resource = thi->resource;
unsigned long flags;
int retval;
snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
- thi->name[0], thi->tconn->name);
+ thi->name[0],
+ resource->name);
restart:
retval = thi->function(thi);
@@ -349,7 +346,7 @@ restart:
*/
if (thi->t_state == RESTARTING) {
- conn_info(tconn, "Restarting %s thread\n", thi->name);
+ drbd_info(resource, "Restarting %s thread\n", thi->name);
thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
goto restart;
@@ -361,29 +358,32 @@ restart:
complete_all(&thi->stop);
spin_unlock_irqrestore(&thi->t_lock, flags);
- conn_info(tconn, "Terminating %s\n", current->comm);
+ drbd_info(resource, "Terminating %s\n", current->comm);
/* Release mod reference taken when thread was started */
- kref_put(&tconn->kref, &conn_destroy);
+ if (thi->connection)
+ kref_put(&thi->connection->kref, drbd_destroy_connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
module_put(THIS_MODULE);
return retval;
}
-static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
- int (*func) (struct drbd_thread *), char *name)
+static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
+ int (*func) (struct drbd_thread *), const char *name)
{
spin_lock_init(&thi->t_lock);
thi->task = NULL;
thi->t_state = NONE;
thi->function = func;
- thi->tconn = tconn;
- strncpy(thi->name, name, ARRAY_SIZE(thi->name));
+ thi->resource = resource;
+ thi->connection = NULL;
+ thi->name = name;
}
int drbd_thread_start(struct drbd_thread *thi)
{
- struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_resource *resource = thi->resource;
struct task_struct *nt;
unsigned long flags;
@@ -393,17 +393,19 @@ int drbd_thread_start(struct drbd_thread *thi)
switch (thi->t_state) {
case NONE:
- conn_info(tconn, "Starting %s thread (from %s [%d])\n",
+ drbd_info(resource, "Starting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid);
/* Get ref on module for thread - this is released when thread exits */
if (!try_module_get(THIS_MODULE)) {
- conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
+ drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
spin_unlock_irqrestore(&thi->t_lock, flags);
return false;
}
- kref_get(&thi->tconn->kref);
+ kref_get(&resource->kref);
+ if (thi->connection)
+ kref_get(&thi->connection->kref);
init_completion(&thi->stop);
thi->reset_cpu_mask = 1;
@@ -412,12 +414,14 @@ int drbd_thread_start(struct drbd_thread *thi)
flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
nt = kthread_create(drbd_thread_setup, (void *) thi,
- "drbd_%c_%s", thi->name[0], thi->tconn->name);
+ "drbd_%c_%s", thi->name[0], thi->resource->name);
if (IS_ERR(nt)) {
- conn_err(tconn, "Couldn't start thread\n");
+ drbd_err(resource, "Couldn't start thread\n");
- kref_put(&tconn->kref, &conn_destroy);
+ if (thi->connection)
+ kref_put(&thi->connection->kref, drbd_destroy_connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
module_put(THIS_MODULE);
return false;
}
@@ -429,7 +433,7 @@ int drbd_thread_start(struct drbd_thread *thi)
break;
case EXITING:
thi->t_state = RESTARTING;
- conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
+ drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid);
/* fall through */
case RUNNING:
@@ -478,65 +482,60 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
wait_for_completion(&thi->stop);
}
-static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
-{
- struct drbd_thread *thi =
- task == tconn->receiver.task ? &tconn->receiver :
- task == tconn->asender.task ? &tconn->asender :
- task == tconn->worker.task ? &tconn->worker : NULL;
-
- return thi;
-}
-
-char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
+int conn_lowest_minor(struct drbd_connection *connection)
{
- struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
- return thi ? thi->name : task->comm;
-}
-
-int conn_lowest_minor(struct drbd_tconn *tconn)
-{
- struct drbd_conf *mdev;
- int vnr = 0, m;
+ struct drbd_peer_device *peer_device;
+ int vnr = 0, minor = -1;
rcu_read_lock();
- mdev = idr_get_next(&tconn->volumes, &vnr);
- m = mdev ? mdev_to_minor(mdev) : -1;
+ peer_device = idr_get_next(&connection->peer_devices, &vnr);
+ if (peer_device)
+ minor = device_to_minor(peer_device->device);
rcu_read_unlock();
- return m;
+ return minor;
}
#ifdef CONFIG_SMP
/**
* drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
- * @mdev: DRBD device.
*
- * Forces all threads of a device onto the same CPU. This is beneficial for
+ * Forces all threads of a resource onto the same CPU. This is beneficial for
* DRBD's performance. May be overwritten by user's configuration.
*/
-void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
+static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
{
- int ord, cpu;
+ unsigned int *resources_per_cpu, min_index = ~0;
- /* user override. */
- if (cpumask_weight(tconn->cpu_mask))
- return;
+ resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL);
+ if (resources_per_cpu) {
+ struct drbd_resource *resource;
+ unsigned int cpu, min = ~0;
- ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
- for_each_online_cpu(cpu) {
- if (ord-- == 0) {
- cpumask_set_cpu(cpu, tconn->cpu_mask);
- return;
+ rcu_read_lock();
+ for_each_resource_rcu(resource, &drbd_resources) {
+ for_each_cpu(cpu, resource->cpu_mask)
+ resources_per_cpu[cpu]++;
}
+ rcu_read_unlock();
+ for_each_online_cpu(cpu) {
+ if (resources_per_cpu[cpu] < min) {
+ min = resources_per_cpu[cpu];
+ min_index = cpu;
+ }
+ }
+ kfree(resources_per_cpu);
}
- /* should not be reached */
- cpumask_setall(tconn->cpu_mask);
+ if (min_index == ~0) {
+ cpumask_setall(*cpu_mask);
+ return;
+ }
+ cpumask_set_cpu(min_index, *cpu_mask);
}
/**
* drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @thi: drbd_thread object
*
* call in the "main loop" of _all_ threads, no need for any mutex, current won't die
@@ -544,13 +543,16 @@ void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
*/
void drbd_thread_current_set_cpu(struct drbd_thread *thi)
{
+ struct drbd_resource *resource = thi->resource;
struct task_struct *p = current;
if (!thi->reset_cpu_mask)
return;
thi->reset_cpu_mask = 0;
- set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
+ set_cpus_allowed_ptr(p, resource->cpu_mask);
}
+#else
+#define drbd_calc_cpu_mask(A) ({})
#endif
/**
@@ -560,9 +562,9 @@ void drbd_thread_current_set_cpu(struct drbd_thread *thi)
* word aligned on 64-bit architectures. (The bitmap send and receive code
* relies on this.)
*/
-unsigned int drbd_header_size(struct drbd_tconn *tconn)
+unsigned int drbd_header_size(struct drbd_connection *connection)
{
- if (tconn->agreed_pro_version >= 100) {
+ if (connection->agreed_pro_version >= 100) {
BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
return sizeof(struct p_header100);
} else {
@@ -600,44 +602,44 @@ static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cm
return sizeof(struct p_header100);
}
-static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
+static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
void *buffer, enum drbd_packet cmd, int size)
{
- if (tconn->agreed_pro_version >= 100)
+ if (connection->agreed_pro_version >= 100)
return prepare_header100(buffer, cmd, size, vnr);
- else if (tconn->agreed_pro_version >= 95 &&
+ else if (connection->agreed_pro_version >= 95 &&
size > DRBD_MAX_SIZE_H80_PACKET)
return prepare_header95(buffer, cmd, size);
else
return prepare_header80(buffer, cmd, size);
}
-static void *__conn_prepare_command(struct drbd_tconn *tconn,
+static void *__conn_prepare_command(struct drbd_connection *connection,
struct drbd_socket *sock)
{
if (!sock->socket)
return NULL;
- return sock->sbuf + drbd_header_size(tconn);
+ return sock->sbuf + drbd_header_size(connection);
}
-void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
+void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
{
void *p;
mutex_lock(&sock->mutex);
- p = __conn_prepare_command(tconn, sock);
+ p = __conn_prepare_command(connection, sock);
if (!p)
mutex_unlock(&sock->mutex);
return p;
}
-void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
+void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
{
- return conn_prepare_command(mdev->tconn, sock);
+ return conn_prepare_command(peer_device->connection, sock);
}
-static int __send_command(struct drbd_tconn *tconn, int vnr,
+static int __send_command(struct drbd_connection *connection, int vnr,
struct drbd_socket *sock, enum drbd_packet cmd,
unsigned int header_size, void *data,
unsigned int size)
@@ -654,82 +656,82 @@ static int __send_command(struct drbd_tconn *tconn, int vnr,
*/
msg_flags = data ? MSG_MORE : 0;
- header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
+ header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
header_size + size);
- err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
+ err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
msg_flags);
if (data && !err)
- err = drbd_send_all(tconn, sock->socket, data, size, 0);
+ err = drbd_send_all(connection, sock->socket, data, size, 0);
return err;
}
-static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
+static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size)
{
- return __send_command(tconn, 0, sock, cmd, header_size, data, size);
+ return __send_command(connection, 0, sock, cmd, header_size, data, size);
}
-int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
+int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size)
{
int err;
- err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
+ err = __conn_send_command(connection, sock, cmd, header_size, data, size);
mutex_unlock(&sock->mutex);
return err;
}
-int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
+int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size)
{
int err;
- err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
- data, size);
+ err = __send_command(peer_device->connection, peer_device->device->vnr,
+ sock, cmd, header_size, data, size);
mutex_unlock(&sock->mutex);
return err;
}
-int drbd_send_ping(struct drbd_tconn *tconn)
+int drbd_send_ping(struct drbd_connection *connection)
{
struct drbd_socket *sock;
- sock = &tconn->meta;
- if (!conn_prepare_command(tconn, sock))
+ sock = &connection->meta;
+ if (!conn_prepare_command(connection, sock))
return -EIO;
- return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
+ return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
}
-int drbd_send_ping_ack(struct drbd_tconn *tconn)
+int drbd_send_ping_ack(struct drbd_connection *connection)
{
struct drbd_socket *sock;
- sock = &tconn->meta;
- if (!conn_prepare_command(tconn, sock))
+ sock = &connection->meta;
+ if (!conn_prepare_command(connection, sock))
return -EIO;
- return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
+ return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
}
-int drbd_send_sync_param(struct drbd_conf *mdev)
+int drbd_send_sync_param(struct drbd_peer_device *peer_device)
{
struct drbd_socket *sock;
struct p_rs_param_95 *p;
int size;
- const int apv = mdev->tconn->agreed_pro_version;
+ const int apv = peer_device->connection->agreed_pro_version;
enum drbd_packet cmd;
struct net_conf *nc;
struct disk_conf *dc;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(peer_device->connection->net_conf);
size = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
@@ -742,14 +744,14 @@ int drbd_send_sync_param(struct drbd_conf *mdev)
/* initialize verify_alg and csums_alg */
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
- if (get_ldev(mdev)) {
- dc = rcu_dereference(mdev->ldev->disk_conf);
+ if (get_ldev(peer_device->device)) {
+ dc = rcu_dereference(peer_device->device->ldev->disk_conf);
p->resync_rate = cpu_to_be32(dc->resync_rate);
p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
p->c_delay_target = cpu_to_be32(dc->c_delay_target);
p->c_fill_target = cpu_to_be32(dc->c_fill_target);
p->c_max_rate = cpu_to_be32(dc->c_max_rate);
- put_ldev(mdev);
+ put_ldev(peer_device->device);
} else {
p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
@@ -764,33 +766,33 @@ int drbd_send_sync_param(struct drbd_conf *mdev)
strcpy(p->csums_alg, nc->csums_alg);
rcu_read_unlock();
- return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
+ return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
}
-int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
+int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
{
struct drbd_socket *sock;
struct p_protocol *p;
struct net_conf *nc;
int size, cf;
- sock = &tconn->data;
- p = __conn_prepare_command(tconn, sock);
+ sock = &connection->data;
+ p = __conn_prepare_command(connection, sock);
if (!p)
return -EIO;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
- if (nc->tentative && tconn->agreed_pro_version < 92) {
+ if (nc->tentative && connection->agreed_pro_version < 92) {
rcu_read_unlock();
mutex_unlock(&sock->mutex);
- conn_err(tconn, "--dry-run is not supported by peer");
+ drbd_err(connection, "--dry-run is not supported by peer");
return -EOPNOTSUPP;
}
size = sizeof(*p);
- if (tconn->agreed_pro_version >= 87)
+ if (connection->agreed_pro_version >= 87)
size += strlen(nc->integrity_alg) + 1;
p->protocol = cpu_to_be32(nc->wire_protocol);
@@ -805,128 +807,131 @@ int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
cf |= CF_DRY_RUN;
p->conn_flags = cpu_to_be32(cf);
- if (tconn->agreed_pro_version >= 87)
+ if (connection->agreed_pro_version >= 87)
strcpy(p->integrity_alg, nc->integrity_alg);
rcu_read_unlock();
- return __conn_send_command(tconn, sock, cmd, size, NULL, 0);
+ return __conn_send_command(connection, sock, cmd, size, NULL, 0);
}
-int drbd_send_protocol(struct drbd_tconn *tconn)
+int drbd_send_protocol(struct drbd_connection *connection)
{
int err;
- mutex_lock(&tconn->data.mutex);
- err = __drbd_send_protocol(tconn, P_PROTOCOL);
- mutex_unlock(&tconn->data.mutex);
+ mutex_lock(&connection->data.mutex);
+ err = __drbd_send_protocol(connection, P_PROTOCOL);
+ mutex_unlock(&connection->data.mutex);
return err;
}
-int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
+static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_uuids *p;
int i;
- if (!get_ldev_if_state(mdev, D_NEGOTIATING))
+ if (!get_ldev_if_state(device, D_NEGOTIATING))
return 0;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p) {
- put_ldev(mdev);
+ put_ldev(device);
return -EIO;
}
- spin_lock_irq(&mdev->ldev->md.uuid_lock);
+ spin_lock_irq(&device->ldev->md.uuid_lock);
for (i = UI_CURRENT; i < UI_SIZE; i++)
- p->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
- spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+ p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
+ spin_unlock_irq(&device->ldev->md.uuid_lock);
- mdev->comm_bm_set = drbd_bm_total_weight(mdev);
- p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
+ device->comm_bm_set = drbd_bm_total_weight(device);
+ p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
rcu_read_lock();
- uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0;
+ uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
rcu_read_unlock();
- uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
- uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
+ uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
+ uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
- put_ldev(mdev);
- return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
+ put_ldev(device);
+ return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
}
-int drbd_send_uuids(struct drbd_conf *mdev)
+int drbd_send_uuids(struct drbd_peer_device *peer_device)
{
- return _drbd_send_uuids(mdev, 0);
+ return _drbd_send_uuids(peer_device, 0);
}
-int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
+int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
{
- return _drbd_send_uuids(mdev, 8);
+ return _drbd_send_uuids(peer_device, 8);
}
-void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
+void drbd_print_uuids(struct drbd_device *device, const char *text)
{
- if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
- u64 *uuid = mdev->ldev->md.uuid;
- dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
+ if (get_ldev_if_state(device, D_NEGOTIATING)) {
+ u64 *uuid = device->ldev->md.uuid;
+ drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
text,
(unsigned long long)uuid[UI_CURRENT],
(unsigned long long)uuid[UI_BITMAP],
(unsigned long long)uuid[UI_HISTORY_START],
(unsigned long long)uuid[UI_HISTORY_END]);
- put_ldev(mdev);
+ put_ldev(device);
} else {
- dev_info(DEV, "%s effective data uuid: %016llX\n",
+ drbd_info(device, "%s effective data uuid: %016llX\n",
text,
- (unsigned long long)mdev->ed_uuid);
+ (unsigned long long)device->ed_uuid);
}
}
-void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
+void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_rs_uuid *p;
u64 uuid;
- D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
+ D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
- uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ uuid = device->ldev->md.uuid[UI_BITMAP];
if (uuid && uuid != UUID_JUST_CREATED)
uuid = uuid + UUID_NEW_BM_OFFSET;
else
get_random_bytes(&uuid, sizeof(u64));
- drbd_uuid_set(mdev, UI_BITMAP, uuid);
- drbd_print_uuids(mdev, "updated sync UUID");
- drbd_md_sync(mdev);
+ drbd_uuid_set(device, UI_BITMAP, uuid);
+ drbd_print_uuids(device, "updated sync UUID");
+ drbd_md_sync(device);
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (p) {
p->uuid = cpu_to_be64(uuid);
- drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
+ drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
}
}
-int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
+int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_sizes *p;
sector_t d_size, u_size;
int q_order_type;
unsigned int max_bio_size;
- if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
- D_ASSERT(mdev->ldev->backing_bdev);
- d_size = drbd_get_max_capacity(mdev->ldev);
+ if (get_ldev_if_state(device, D_NEGOTIATING)) {
+ D_ASSERT(device, device->ldev->backing_bdev);
+ d_size = drbd_get_max_capacity(device->ldev);
rcu_read_lock();
- u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
- q_order_type = drbd_queue_order_type(mdev);
- max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
+ q_order_type = drbd_queue_order_type(device);
+ max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9;
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
- put_ldev(mdev);
+ put_ldev(device);
} else {
d_size = 0;
u_size = 0;
@@ -934,45 +939,45 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
}
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
- if (mdev->tconn->agreed_pro_version <= 94)
+ if (peer_device->connection->agreed_pro_version <= 94)
max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
- else if (mdev->tconn->agreed_pro_version < 100)
+ else if (peer_device->connection->agreed_pro_version < 100)
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
p->d_size = cpu_to_be64(d_size);
p->u_size = cpu_to_be64(u_size);
- p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
+ p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
p->max_bio_size = cpu_to_be32(max_bio_size);
p->queue_order_type = cpu_to_be16(q_order_type);
p->dds_flags = cpu_to_be16(flags);
- return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_SIZES, sizeof(*p), NULL, 0);
}
/**
* drbd_send_current_state() - Sends the drbd state to the peer
- * @mdev: DRBD device.
+ * @peer_device: DRBD peer device.
*/
-int drbd_send_current_state(struct drbd_conf *mdev)
+int drbd_send_current_state(struct drbd_peer_device *peer_device)
{
struct drbd_socket *sock;
struct p_state *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
- p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
- return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
+ p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
+ return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
}
/**
* drbd_send_state() - After a state change, sends the new state to the peer
- * @mdev: DRBD device.
+ * @peer_device: DRBD peer device.
* @state: the state to send, not necessarily the current state.
*
* Each state change queues an "after_state_ch" work, which will eventually
@@ -980,73 +985,73 @@ int drbd_send_current_state(struct drbd_conf *mdev)
* between queuing and processing of the after_state_ch work, we still
* want to send each intermediary state in the order it occurred.
*/
-int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
+int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
{
struct drbd_socket *sock;
struct p_state *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->state = cpu_to_be32(state.i); /* Within the send mutex */
- return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
}
-int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
+int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
{
struct drbd_socket *sock;
struct p_req_state *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->mask = cpu_to_be32(mask.i);
p->val = cpu_to_be32(val.i);
- return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
}
-int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
+int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
{
enum drbd_packet cmd;
struct drbd_socket *sock;
struct p_req_state *p;
- cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
- sock = &tconn->data;
- p = conn_prepare_command(tconn, sock);
+ cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
+ sock = &connection->data;
+ p = conn_prepare_command(connection, sock);
if (!p)
return -EIO;
p->mask = cpu_to_be32(mask.i);
p->val = cpu_to_be32(val.i);
- return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
+ return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
}
-void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
+void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
{
struct drbd_socket *sock;
struct p_req_state_reply *p;
- sock = &mdev->tconn->meta;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->meta;
+ p = drbd_prepare_command(peer_device, sock);
if (p) {
p->retcode = cpu_to_be32(retcode);
- drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
+ drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
}
}
-void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
+void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
{
struct drbd_socket *sock;
struct p_req_state_reply *p;
- enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
+ enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
- sock = &tconn->meta;
- p = conn_prepare_command(tconn, sock);
+ sock = &connection->meta;
+ p = conn_prepare_command(connection, sock);
if (p) {
p->retcode = cpu_to_be32(retcode);
- conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
+ conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
}
}
@@ -1067,7 +1072,7 @@ static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
}
-int fill_bitmap_rle_bits(struct drbd_conf *mdev,
+static int fill_bitmap_rle_bits(struct drbd_device *device,
struct p_compressed_bm *p,
unsigned int size,
struct bm_xfer_ctx *c)
@@ -1082,9 +1087,9 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
/* may we use this feature? */
rcu_read_lock();
- use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
+ use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
rcu_read_unlock();
- if (!use_rle || mdev->tconn->agreed_pro_version < 90)
+ if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
return 0;
if (c->bit_offset >= c->bm_bits)
@@ -1104,8 +1109,8 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
/* see how much plain bits we can stuff into one packet
* using RLE and VLI. */
do {
- tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
- : _drbd_bm_find_next(mdev, c->bit_offset);
+ tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
+ : _drbd_bm_find_next(device, c->bit_offset);
if (tmp == -1UL)
tmp = c->bm_bits;
rl = tmp - c->bit_offset;
@@ -1125,7 +1130,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
/* paranoia: catch zero runlength.
* can only happen if bitmap is modified while we scan it. */
if (rl == 0) {
- dev_err(DEV, "unexpected zero runlength while encoding bitmap "
+ drbd_err(device, "unexpected zero runlength while encoding bitmap "
"t:%u bo:%lu\n", toggle, c->bit_offset);
return -1;
}
@@ -1134,7 +1139,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
if (bits == -ENOBUFS) /* buffer full */
break;
if (bits <= 0) {
- dev_err(DEV, "error while encoding bitmap: %d\n", bits);
+ drbd_err(device, "error while encoding bitmap: %d\n", bits);
return 0;
}
@@ -1171,21 +1176,21 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
* code upon failure.
*/
static int
-send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
+send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
{
- struct drbd_socket *sock = &mdev->tconn->data;
- unsigned int header_size = drbd_header_size(mdev->tconn);
+ struct drbd_socket *sock = &first_peer_device(device)->connection->data;
+ unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
struct p_compressed_bm *p = sock->sbuf + header_size;
int len, err;
- len = fill_bitmap_rle_bits(mdev, p,
+ len = fill_bitmap_rle_bits(device, p,
DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
if (len < 0)
return -EIO;
if (len) {
dcbp_set_code(p, RLE_VLI_Bits);
- err = __send_command(mdev->tconn, mdev->vnr, sock,
+ err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
P_COMPRESSED_BITMAP, sizeof(*p) + len,
NULL, 0);
c->packets[0]++;
@@ -1205,8 +1210,8 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
c->bm_words - c->word_offset);
len = num_words * sizeof(*p);
if (len)
- drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
- err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
+ drbd_bm_get_lel(device, c->word_offset, num_words, p);
+ err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
@@ -1218,7 +1223,7 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
}
if (!err) {
if (len == 0) {
- INFO_bm_xfer_stats(mdev, "send", c);
+ INFO_bm_xfer_stats(device, "send", c);
return 0;
} else
return 1;
@@ -1227,128 +1232,128 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
}
/* See the comment at receive_bitmap() */
-static int _drbd_send_bitmap(struct drbd_conf *mdev)
+static int _drbd_send_bitmap(struct drbd_device *device)
{
struct bm_xfer_ctx c;
int err;
- if (!expect(mdev->bitmap))
+ if (!expect(device->bitmap))
return false;
- if (get_ldev(mdev)) {
- if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
- dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
- drbd_bm_set_all(mdev);
- if (drbd_bm_write(mdev)) {
+ if (get_ldev(device)) {
+ if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
+ drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
+ drbd_bm_set_all(device);
+ if (drbd_bm_write(device)) {
/* write_bm did fail! Leave full sync flag set in Meta P_DATA
* but otherwise process as per normal - need to tell other
* side that a full resync is required! */
- dev_err(DEV, "Failed to write bitmap to disk!\n");
+ drbd_err(device, "Failed to write bitmap to disk!\n");
} else {
- drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
- drbd_md_sync(mdev);
+ drbd_md_clear_flag(device, MDF_FULL_SYNC);
+ drbd_md_sync(device);
}
}
- put_ldev(mdev);
+ put_ldev(device);
}
c = (struct bm_xfer_ctx) {
- .bm_bits = drbd_bm_bits(mdev),
- .bm_words = drbd_bm_words(mdev),
+ .bm_bits = drbd_bm_bits(device),
+ .bm_words = drbd_bm_words(device),
};
do {
- err = send_bitmap_rle_or_plain(mdev, &c);
+ err = send_bitmap_rle_or_plain(device, &c);
} while (err > 0);
return err == 0;
}
-int drbd_send_bitmap(struct drbd_conf *mdev)
+int drbd_send_bitmap(struct drbd_device *device)
{
- struct drbd_socket *sock = &mdev->tconn->data;
+ struct drbd_socket *sock = &first_peer_device(device)->connection->data;
int err = -1;
mutex_lock(&sock->mutex);
if (sock->socket)
- err = !_drbd_send_bitmap(mdev);
+ err = !_drbd_send_bitmap(device);
mutex_unlock(&sock->mutex);
return err;
}
-void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size)
+void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
{
struct drbd_socket *sock;
struct p_barrier_ack *p;
- if (tconn->cstate < C_WF_REPORT_PARAMS)
+ if (connection->cstate < C_WF_REPORT_PARAMS)
return;
- sock = &tconn->meta;
- p = conn_prepare_command(tconn, sock);
+ sock = &connection->meta;
+ p = conn_prepare_command(connection, sock);
if (!p)
return;
p->barrier = barrier_nr;
p->set_size = cpu_to_be32(set_size);
- conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
+ conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
}
/**
* _drbd_send_ack() - Sends an ack packet
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @cmd: Packet command code.
* @sector: sector, needs to be in big endian byte order
* @blksize: size in byte, needs to be in big endian byte order
* @block_id: Id, big endian byte order
*/
-static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
u64 sector, u32 blksize, u64 block_id)
{
struct drbd_socket *sock;
struct p_block_ack *p;
- if (mdev->state.conn < C_CONNECTED)
+ if (peer_device->device->state.conn < C_CONNECTED)
return -EIO;
- sock = &mdev->tconn->meta;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->meta;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = sector;
p->block_id = block_id;
p->blksize = blksize;
- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
- return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
+ p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
+ return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
}
/* dp->sector and dp->block_id already/still in network byte order,
* data_size is payload size according to dp->head,
* and may need to be corrected for digest size. */
-void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
+void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct p_data *dp, int data_size)
{
- if (mdev->tconn->peer_integrity_tfm)
- data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
- _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
+ if (peer_device->connection->peer_integrity_tfm)
+ data_size -= crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+ _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
dp->block_id);
}
-void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
+void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct p_block_req *rp)
{
- _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
+ _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
}
/**
* drbd_send_ack() - Sends an ack packet
- * @mdev: DRBD device
+ * @device: DRBD device
* @cmd: packet command code
* @peer_req: peer request
*/
-int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct drbd_peer_request *peer_req)
{
- return _drbd_send_ack(mdev, cmd,
+ return _drbd_send_ack(peer_device, cmd,
cpu_to_be64(peer_req->i.sector),
cpu_to_be32(peer_req->i.size),
peer_req->block_id);
@@ -1356,32 +1361,32 @@ int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
/* This function misuses the block_id field to signal if the blocks
* are is sync or not. */
-int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
+int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
sector_t sector, int blksize, u64 block_id)
{
- return _drbd_send_ack(mdev, cmd,
+ return _drbd_send_ack(peer_device, cmd,
cpu_to_be64(sector),
cpu_to_be32(blksize),
cpu_to_be64(block_id));
}
-int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
+int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
sector_t sector, int size, u64 block_id)
{
struct drbd_socket *sock;
struct p_block_req *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(sector);
p->block_id = block_id;
p->blksize = cpu_to_be32(size);
- return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
}
-int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
+int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
void *digest, int digest_size, enum drbd_packet cmd)
{
struct drbd_socket *sock;
@@ -1389,64 +1394,63 @@ int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
/* FIXME: Put the digest into the preallocated socket buffer. */
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(sector);
p->block_id = ID_SYNCER /* unused */;
p->blksize = cpu_to_be32(size);
- return drbd_send_command(mdev, sock, cmd, sizeof(*p),
- digest, digest_size);
+ return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
}
-int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
+int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
{
struct drbd_socket *sock;
struct p_block_req *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(sector);
p->block_id = ID_SYNCER /* unused */;
p->blksize = cpu_to_be32(size);
- return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
}
/* called on sndtimeo
* returns false if we should retry,
* true if we think connection is dead
*/
-static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
+static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
{
int drop_it;
- /* long elapsed = (long)(jiffies - mdev->last_received); */
+ /* long elapsed = (long)(jiffies - device->last_received); */
- drop_it = tconn->meta.socket == sock
- || !tconn->asender.task
- || get_t_state(&tconn->asender) != RUNNING
- || tconn->cstate < C_WF_REPORT_PARAMS;
+ drop_it = connection->meta.socket == sock
+ || !connection->asender.task
+ || get_t_state(&connection->asender) != RUNNING
+ || connection->cstate < C_WF_REPORT_PARAMS;
if (drop_it)
return true;
- drop_it = !--tconn->ko_count;
+ drop_it = !--connection->ko_count;
if (!drop_it) {
- conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
- current->comm, current->pid, tconn->ko_count);
- request_ping(tconn);
+ drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
+ current->comm, current->pid, connection->ko_count);
+ request_ping(connection);
}
- return drop_it; /* && (mdev->state == R_PRIMARY) */;
+ return drop_it; /* && (device->state == R_PRIMARY) */;
}
-static void drbd_update_congested(struct drbd_tconn *tconn)
+static void drbd_update_congested(struct drbd_connection *connection)
{
- struct sock *sk = tconn->data.socket->sk;
+ struct sock *sk = connection->data.socket->sk;
if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
- set_bit(NET_CONGESTED, &tconn->flags);
+ set_bit(NET_CONGESTED, &connection->flags);
}
/* The idea of sendpage seems to be to put some kind of reference
@@ -1470,26 +1474,26 @@ static void drbd_update_congested(struct drbd_tconn *tconn)
* As a workaround, we disable sendpage on pages
* with page_count == 0 or PageSlab.
*/
-static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
+static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
int offset, size_t size, unsigned msg_flags)
{
struct socket *socket;
void *addr;
int err;
- socket = mdev->tconn->data.socket;
+ socket = peer_device->connection->data.socket;
addr = kmap(page) + offset;
- err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
+ err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
kunmap(page);
if (!err)
- mdev->send_cnt += size >> 9;
+ peer_device->device->send_cnt += size >> 9;
return err;
}
-static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
+static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
int offset, size_t size, unsigned msg_flags)
{
- struct socket *socket = mdev->tconn->data.socket;
+ struct socket *socket = peer_device->connection->data.socket;
mm_segment_t oldfs = get_fs();
int len = size;
int err = -EIO;
@@ -1501,10 +1505,10 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
- return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
+ return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
msg_flags |= MSG_NOSIGNAL;
- drbd_update_congested(mdev->tconn);
+ drbd_update_congested(peer_device->connection);
set_fs(KERNEL_DS);
do {
int sent;
@@ -1512,11 +1516,11 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
if (sent <= 0) {
if (sent == -EAGAIN) {
- if (we_should_drop_the_connection(mdev->tconn, socket))
+ if (we_should_drop_the_connection(peer_device->connection, socket))
break;
continue;
}
- dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
+ drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
__func__, (int)size, len, sent);
if (sent < 0)
err = sent;
@@ -1524,18 +1528,18 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
}
len -= sent;
offset += sent;
- } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
+ } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
set_fs(oldfs);
- clear_bit(NET_CONGESTED, &mdev->tconn->flags);
+ clear_bit(NET_CONGESTED, &peer_device->connection->flags);
if (len == 0) {
err = 0;
- mdev->send_cnt += size >> 9;
+ peer_device->device->send_cnt += size >> 9;
}
return err;
}
-static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
+static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
@@ -1544,7 +1548,7 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
bio_for_each_segment(bvec, bio, iter) {
int err;
- err = _drbd_no_send_page(mdev, bvec.bv_page,
+ err = _drbd_no_send_page(peer_device, bvec.bv_page,
bvec.bv_offset, bvec.bv_len,
bio_iter_last(bvec, iter)
? 0 : MSG_MORE);
@@ -1554,7 +1558,7 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
return 0;
}
-static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
+static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
@@ -1563,7 +1567,7 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
bio_for_each_segment(bvec, bio, iter) {
int err;
- err = _drbd_send_page(mdev, bvec.bv_page,
+ err = _drbd_send_page(peer_device, bvec.bv_page,
bvec.bv_offset, bvec.bv_len,
bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
if (err)
@@ -1572,7 +1576,7 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
return 0;
}
-static int _drbd_send_zc_ee(struct drbd_conf *mdev,
+static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
struct drbd_peer_request *peer_req)
{
struct page *page = peer_req->pages;
@@ -1583,7 +1587,7 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev,
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- err = _drbd_send_page(mdev, page, 0, l,
+ err = _drbd_send_page(peer_device, page, 0, l,
page_chain_next(page) ? MSG_MORE : 0);
if (err)
return err;
@@ -1592,9 +1596,9 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev,
return 0;
}
-static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
+static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw)
{
- if (mdev->tconn->agreed_pro_version >= 95)
+ if (connection->agreed_pro_version >= 95)
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
(bi_rw & REQ_FUA ? DP_FUA : 0) |
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
@@ -1606,28 +1610,30 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
/* Used to send write requests
* R_PRIMARY -> Peer (P_DATA)
*/
-int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
+int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_data *p;
unsigned int dp_flags = 0;
int dgs;
int err;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
- dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
+ dgs = peer_device->connection->integrity_tfm ?
+ crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
p->sector = cpu_to_be64(req->i.sector);
p->block_id = (unsigned long)req;
- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
- dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
- if (mdev->state.conn >= C_SYNC_SOURCE &&
- mdev->state.conn <= C_PAUSED_SYNC_T)
+ p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
+ dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
+ if (device->state.conn >= C_SYNC_SOURCE &&
+ device->state.conn <= C_PAUSED_SYNC_T)
dp_flags |= DP_MAY_SET_IN_SYNC;
- if (mdev->tconn->agreed_pro_version >= 100) {
+ if (peer_device->connection->agreed_pro_version >= 100) {
if (req->rq_state & RQ_EXP_RECEIVE_ACK)
dp_flags |= DP_SEND_RECEIVE_ACK;
if (req->rq_state & RQ_EXP_WRITE_ACK)
@@ -1635,8 +1641,8 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
}
p->dp_flags = cpu_to_be32(dp_flags);
if (dgs)
- drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
- err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
+ drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1);
+ err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
if (!err) {
/* For protocol A, we have to memcpy the payload into
* socket buffers, as we may complete right away
@@ -1650,18 +1656,18 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
* receiving side, we sure have detected corruption elsewhere.
*/
if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
- err = _drbd_send_bio(mdev, req->master_bio);
+ err = _drbd_send_bio(peer_device, req->master_bio);
else
- err = _drbd_send_zc_bio(mdev, req->master_bio);
+ err = _drbd_send_zc_bio(peer_device, req->master_bio);
/* double check digest, sometimes buffers have been modified in flight. */
if (dgs > 0 && dgs <= 64) {
/* 64 byte, 512 bit, is the largest digest size
* currently supported in kernel crypto. */
unsigned char digest[64];
- drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
+ drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
if (memcmp(p + 1, digest, dgs)) {
- dev_warn(DEV,
+ drbd_warn(device,
"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
(unsigned long long)req->i.sector, req->i.size);
}
@@ -1678,18 +1684,20 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
* Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
* C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
*/
-int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
+int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct drbd_peer_request *peer_req)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_data *p;
int err;
int dgs;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
- dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ dgs = peer_device->connection->integrity_tfm ?
+ crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -1698,27 +1706,27 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
p->seq_num = 0; /* unused */
p->dp_flags = 0;
if (dgs)
- drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
- err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
+ drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
+ err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
if (!err)
- err = _drbd_send_zc_ee(mdev, peer_req);
+ err = _drbd_send_zc_ee(peer_device, peer_req);
mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
return err;
}
-int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
+int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_socket *sock;
struct p_block_desc *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(req->i.sector);
p->blksize = cpu_to_be32(req->i.size);
- return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
}
/*
@@ -1737,7 +1745,7 @@ int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
/*
* you must have down()ed the appropriate [m]sock_mutex elsewhere!
*/
-int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
+int drbd_send(struct drbd_connection *connection, struct socket *sock,
void *buf, size_t size, unsigned msg_flags)
{
struct kvec iov;
@@ -1758,11 +1766,11 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
- if (sock == tconn->data.socket) {
+ if (sock == connection->data.socket) {
rcu_read_lock();
- tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
+ connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
rcu_read_unlock();
- drbd_update_congested(tconn);
+ drbd_update_congested(connection);
}
do {
/* STRANGE
@@ -1776,7 +1784,7 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
*/
rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
if (rv == -EAGAIN) {
- if (we_should_drop_the_connection(tconn, sock))
+ if (we_should_drop_the_connection(connection, sock))
break;
else
continue;
@@ -1792,17 +1800,17 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
iov.iov_len -= rv;
} while (sent < size);
- if (sock == tconn->data.socket)
- clear_bit(NET_CONGESTED, &tconn->flags);
+ if (sock == connection->data.socket)
+ clear_bit(NET_CONGESTED, &connection->flags);
if (rv <= 0) {
if (rv != -EAGAIN) {
- conn_err(tconn, "%s_sendmsg returned %d\n",
- sock == tconn->meta.socket ? "msock" : "sock",
+ drbd_err(connection, "%s_sendmsg returned %d\n",
+ sock == connection->meta.socket ? "msock" : "sock",
rv);
- conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
+ conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
} else
- conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
+ conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
}
return sent;
@@ -1813,12 +1821,12 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
*
* Returns 0 upon success and a negative error value otherwise.
*/
-int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
+int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
size_t size, unsigned msg_flags)
{
int err;
- err = drbd_send(tconn, sock, buffer, size, msg_flags);
+ err = drbd_send(connection, sock, buffer, size, msg_flags);
if (err < 0)
return err;
if (err != size)
@@ -1828,16 +1836,16 @@ int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
static int drbd_open(struct block_device *bdev, fmode_t mode)
{
- struct drbd_conf *mdev = bdev->bd_disk->private_data;
+ struct drbd_device *device = bdev->bd_disk->private_data;
unsigned long flags;
int rv = 0;
mutex_lock(&drbd_main_mutex);
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- /* to have a stable mdev->state.role
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ /* to have a stable device->state.role
* and no race with updating open_cnt */
- if (mdev->state.role != R_PRIMARY) {
+ if (device->state.role != R_PRIMARY) {
if (mode & FMODE_WRITE)
rv = -EROFS;
else if (!allow_oos)
@@ -1845,8 +1853,8 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
}
if (!rv)
- mdev->open_cnt++;
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ device->open_cnt++;
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
mutex_unlock(&drbd_main_mutex);
return rv;
@@ -1854,17 +1862,17 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
static void drbd_release(struct gendisk *gd, fmode_t mode)
{
- struct drbd_conf *mdev = gd->private_data;
+ struct drbd_device *device = gd->private_data;
mutex_lock(&drbd_main_mutex);
- mdev->open_cnt--;
+ device->open_cnt--;
mutex_unlock(&drbd_main_mutex);
}
-static void drbd_set_defaults(struct drbd_conf *mdev)
+static void drbd_set_defaults(struct drbd_device *device)
{
/* Beware! The actual layout differs
* between big endian and little endian */
- mdev->state = (union drbd_dev_state) {
+ device->state = (union drbd_dev_state) {
{ .role = R_SECONDARY,
.peer = R_UNKNOWN,
.conn = C_STANDALONE,
@@ -1873,130 +1881,123 @@ static void drbd_set_defaults(struct drbd_conf *mdev)
} };
}
-void drbd_init_set_defaults(struct drbd_conf *mdev)
+void drbd_init_set_defaults(struct drbd_device *device)
{
/* the memset(,0,) did most of this.
* note: only assignments, no allocation in here */
- drbd_set_defaults(mdev);
-
- atomic_set(&mdev->ap_bio_cnt, 0);
- atomic_set(&mdev->ap_pending_cnt, 0);
- atomic_set(&mdev->rs_pending_cnt, 0);
- atomic_set(&mdev->unacked_cnt, 0);
- atomic_set(&mdev->local_cnt, 0);
- atomic_set(&mdev->pp_in_use_by_net, 0);
- atomic_set(&mdev->rs_sect_in, 0);
- atomic_set(&mdev->rs_sect_ev, 0);
- atomic_set(&mdev->ap_in_flight, 0);
- atomic_set(&mdev->md_io_in_use, 0);
-
- mutex_init(&mdev->own_state_mutex);
- mdev->state_mutex = &mdev->own_state_mutex;
-
- spin_lock_init(&mdev->al_lock);
- spin_lock_init(&mdev->peer_seq_lock);
-
- INIT_LIST_HEAD(&mdev->active_ee);
- INIT_LIST_HEAD(&mdev->sync_ee);
- INIT_LIST_HEAD(&mdev->done_ee);
- INIT_LIST_HEAD(&mdev->read_ee);
- INIT_LIST_HEAD(&mdev->net_ee);
- INIT_LIST_HEAD(&mdev->resync_reads);
- INIT_LIST_HEAD(&mdev->resync_work.list);
- INIT_LIST_HEAD(&mdev->unplug_work.list);
- INIT_LIST_HEAD(&mdev->go_diskless.list);
- INIT_LIST_HEAD(&mdev->md_sync_work.list);
- INIT_LIST_HEAD(&mdev->start_resync_work.list);
- INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
-
- mdev->resync_work.cb = w_resync_timer;
- mdev->unplug_work.cb = w_send_write_hint;
- mdev->go_diskless.cb = w_go_diskless;
- mdev->md_sync_work.cb = w_md_sync;
- mdev->bm_io_work.w.cb = w_bitmap_io;
- mdev->start_resync_work.cb = w_start_resync;
-
- mdev->resync_work.mdev = mdev;
- mdev->unplug_work.mdev = mdev;
- mdev->go_diskless.mdev = mdev;
- mdev->md_sync_work.mdev = mdev;
- mdev->bm_io_work.w.mdev = mdev;
- mdev->start_resync_work.mdev = mdev;
-
- init_timer(&mdev->resync_timer);
- init_timer(&mdev->md_sync_timer);
- init_timer(&mdev->start_resync_timer);
- init_timer(&mdev->request_timer);
- mdev->resync_timer.function = resync_timer_fn;
- mdev->resync_timer.data = (unsigned long) mdev;
- mdev->md_sync_timer.function = md_sync_timer_fn;
- mdev->md_sync_timer.data = (unsigned long) mdev;
- mdev->start_resync_timer.function = start_resync_timer_fn;
- mdev->start_resync_timer.data = (unsigned long) mdev;
- mdev->request_timer.function = request_timer_fn;
- mdev->request_timer.data = (unsigned long) mdev;
-
- init_waitqueue_head(&mdev->misc_wait);
- init_waitqueue_head(&mdev->state_wait);
- init_waitqueue_head(&mdev->ee_wait);
- init_waitqueue_head(&mdev->al_wait);
- init_waitqueue_head(&mdev->seq_wait);
-
- mdev->resync_wenr = LC_FREE;
- mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
- mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
-}
-
-void drbd_mdev_cleanup(struct drbd_conf *mdev)
+ drbd_set_defaults(device);
+
+ atomic_set(&device->ap_bio_cnt, 0);
+ atomic_set(&device->ap_pending_cnt, 0);
+ atomic_set(&device->rs_pending_cnt, 0);
+ atomic_set(&device->unacked_cnt, 0);
+ atomic_set(&device->local_cnt, 0);
+ atomic_set(&device->pp_in_use_by_net, 0);
+ atomic_set(&device->rs_sect_in, 0);
+ atomic_set(&device->rs_sect_ev, 0);
+ atomic_set(&device->ap_in_flight, 0);
+ atomic_set(&device->md_io_in_use, 0);
+
+ mutex_init(&device->own_state_mutex);
+ device->state_mutex = &device->own_state_mutex;
+
+ spin_lock_init(&device->al_lock);
+ spin_lock_init(&device->peer_seq_lock);
+
+ INIT_LIST_HEAD(&device->active_ee);
+ INIT_LIST_HEAD(&device->sync_ee);
+ INIT_LIST_HEAD(&device->done_ee);
+ INIT_LIST_HEAD(&device->read_ee);
+ INIT_LIST_HEAD(&device->net_ee);
+ INIT_LIST_HEAD(&device->resync_reads);
+ INIT_LIST_HEAD(&device->resync_work.list);
+ INIT_LIST_HEAD(&device->unplug_work.list);
+ INIT_LIST_HEAD(&device->go_diskless.list);
+ INIT_LIST_HEAD(&device->md_sync_work.list);
+ INIT_LIST_HEAD(&device->start_resync_work.list);
+ INIT_LIST_HEAD(&device->bm_io_work.w.list);
+
+ device->resync_work.cb = w_resync_timer;
+ device->unplug_work.cb = w_send_write_hint;
+ device->go_diskless.cb = w_go_diskless;
+ device->md_sync_work.cb = w_md_sync;
+ device->bm_io_work.w.cb = w_bitmap_io;
+ device->start_resync_work.cb = w_start_resync;
+
+ init_timer(&device->resync_timer);
+ init_timer(&device->md_sync_timer);
+ init_timer(&device->start_resync_timer);
+ init_timer(&device->request_timer);
+ device->resync_timer.function = resync_timer_fn;
+ device->resync_timer.data = (unsigned long) device;
+ device->md_sync_timer.function = md_sync_timer_fn;
+ device->md_sync_timer.data = (unsigned long) device;
+ device->start_resync_timer.function = start_resync_timer_fn;
+ device->start_resync_timer.data = (unsigned long) device;
+ device->request_timer.function = request_timer_fn;
+ device->request_timer.data = (unsigned long) device;
+
+ init_waitqueue_head(&device->misc_wait);
+ init_waitqueue_head(&device->state_wait);
+ init_waitqueue_head(&device->ee_wait);
+ init_waitqueue_head(&device->al_wait);
+ init_waitqueue_head(&device->seq_wait);
+
+ device->resync_wenr = LC_FREE;
+ device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
+ device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
+}
+
+void drbd_device_cleanup(struct drbd_device *device)
{
int i;
- if (mdev->tconn->receiver.t_state != NONE)
- dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
- mdev->tconn->receiver.t_state);
-
- mdev->al_writ_cnt =
- mdev->bm_writ_cnt =
- mdev->read_cnt =
- mdev->recv_cnt =
- mdev->send_cnt =
- mdev->writ_cnt =
- mdev->p_size =
- mdev->rs_start =
- mdev->rs_total =
- mdev->rs_failed = 0;
- mdev->rs_last_events = 0;
- mdev->rs_last_sect_ev = 0;
+ if (first_peer_device(device)->connection->receiver.t_state != NONE)
+ drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
+ first_peer_device(device)->connection->receiver.t_state);
+
+ device->al_writ_cnt =
+ device->bm_writ_cnt =
+ device->read_cnt =
+ device->recv_cnt =
+ device->send_cnt =
+ device->writ_cnt =
+ device->p_size =
+ device->rs_start =
+ device->rs_total =
+ device->rs_failed = 0;
+ device->rs_last_events = 0;
+ device->rs_last_sect_ev = 0;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = 0;
- mdev->rs_mark_time[i] = 0;
+ device->rs_mark_left[i] = 0;
+ device->rs_mark_time[i] = 0;
}
- D_ASSERT(mdev->tconn->net_conf == NULL);
+ D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
- drbd_set_my_capacity(mdev, 0);
- if (mdev->bitmap) {
+ drbd_set_my_capacity(device, 0);
+ if (device->bitmap) {
/* maybe never allocated. */
- drbd_bm_resize(mdev, 0, 1);
- drbd_bm_cleanup(mdev);
+ drbd_bm_resize(device, 0, 1);
+ drbd_bm_cleanup(device);
}
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;
+ drbd_free_bc(device->ldev);
+ device->ldev = NULL;
- clear_bit(AL_SUSPENDED, &mdev->flags);
+ clear_bit(AL_SUSPENDED, &device->flags);
- D_ASSERT(list_empty(&mdev->active_ee));
- D_ASSERT(list_empty(&mdev->sync_ee));
- D_ASSERT(list_empty(&mdev->done_ee));
- D_ASSERT(list_empty(&mdev->read_ee));
- D_ASSERT(list_empty(&mdev->net_ee));
- D_ASSERT(list_empty(&mdev->resync_reads));
- D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
- D_ASSERT(list_empty(&mdev->resync_work.list));
- D_ASSERT(list_empty(&mdev->unplug_work.list));
- D_ASSERT(list_empty(&mdev->go_diskless.list));
+ D_ASSERT(device, list_empty(&device->active_ee));
+ D_ASSERT(device, list_empty(&device->sync_ee));
+ D_ASSERT(device, list_empty(&device->done_ee));
+ D_ASSERT(device, list_empty(&device->read_ee));
+ D_ASSERT(device, list_empty(&device->net_ee));
+ D_ASSERT(device, list_empty(&device->resync_reads));
+ D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
+ D_ASSERT(device, list_empty(&device->resync_work.list));
+ D_ASSERT(device, list_empty(&device->unplug_work.list));
+ D_ASSERT(device, list_empty(&device->go_diskless.list));
- drbd_set_defaults(mdev);
+ drbd_set_defaults(device);
}
@@ -2011,7 +2012,7 @@ static void drbd_destroy_mempools(void)
drbd_pp_vacant--;
}
- /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
+ /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
if (drbd_md_io_bio_set)
bioset_free(drbd_md_io_bio_set);
@@ -2131,69 +2132,73 @@ static struct notifier_block drbd_notifier = {
.notifier_call = drbd_notify_sys,
};
-static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
+static void drbd_release_all_peer_reqs(struct drbd_device *device)
{
int rr;
- rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
+ rr = drbd_free_peer_reqs(device, &device->active_ee);
if (rr)
- dev_err(DEV, "%d EEs in active list found!\n", rr);
+ drbd_err(device, "%d EEs in active list found!\n", rr);
- rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
+ rr = drbd_free_peer_reqs(device, &device->sync_ee);
if (rr)
- dev_err(DEV, "%d EEs in sync list found!\n", rr);
+ drbd_err(device, "%d EEs in sync list found!\n", rr);
- rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
+ rr = drbd_free_peer_reqs(device, &device->read_ee);
if (rr)
- dev_err(DEV, "%d EEs in read list found!\n", rr);
+ drbd_err(device, "%d EEs in read list found!\n", rr);
- rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
+ rr = drbd_free_peer_reqs(device, &device->done_ee);
if (rr)
- dev_err(DEV, "%d EEs in done list found!\n", rr);
+ drbd_err(device, "%d EEs in done list found!\n", rr);
- rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
+ rr = drbd_free_peer_reqs(device, &device->net_ee);
if (rr)
- dev_err(DEV, "%d EEs in net list found!\n", rr);
+ drbd_err(device, "%d EEs in net list found!\n", rr);
}
/* caution. no locking. */
-void drbd_minor_destroy(struct kref *kref)
+void drbd_destroy_device(struct kref *kref)
{
- struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_device *device = container_of(kref, struct drbd_device, kref);
+ struct drbd_resource *resource = device->resource;
+ struct drbd_connection *connection;
- del_timer_sync(&mdev->request_timer);
+ del_timer_sync(&device->request_timer);
/* paranoia asserts */
- D_ASSERT(mdev->open_cnt == 0);
+ D_ASSERT(device, device->open_cnt == 0);
/* end paranoia asserts */
/* cleanup stuff that may have been allocated during
* device (re-)configuration or state changes */
- if (mdev->this_bdev)
- bdput(mdev->this_bdev);
+ if (device->this_bdev)
+ bdput(device->this_bdev);
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;
+ drbd_free_bc(device->ldev);
+ device->ldev = NULL;
- drbd_release_all_peer_reqs(mdev);
+ drbd_release_all_peer_reqs(device);
- lc_destroy(mdev->act_log);
- lc_destroy(mdev->resync);
+ lc_destroy(device->act_log);
+ lc_destroy(device->resync);
- kfree(mdev->p_uuid);
- /* mdev->p_uuid = NULL; */
+ kfree(device->p_uuid);
+ /* device->p_uuid = NULL; */
- if (mdev->bitmap) /* should no longer be there. */
- drbd_bm_cleanup(mdev);
- __free_page(mdev->md_io_page);
- put_disk(mdev->vdisk);
- blk_cleanup_queue(mdev->rq_queue);
- kfree(mdev->rs_plan_s);
- kfree(mdev);
+ if (device->bitmap) /* should no longer be there. */
+ drbd_bm_cleanup(device);
+ __free_page(device->md_io_page);
+ put_disk(device->vdisk);
+ blk_cleanup_queue(device->rq_queue);
+ kfree(device->rs_plan_s);
+ kfree(first_peer_device(device));
+ kfree(device);
- kref_put(&tconn->kref, &conn_destroy);
+ for_each_connection(connection, resource)
+ kref_put(&connection->kref, drbd_destroy_connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
}
/* One global retry thread, if we need to push back some bio and have it
@@ -2218,19 +2223,19 @@ static void do_retry(struct work_struct *ws)
spin_unlock_irq(&retry->lock);
list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct bio *bio = req->master_bio;
unsigned long start_time = req->start_time;
bool expected;
- expected =
+ expected =
expect(atomic_read(&req->completion_ref) == 0) &&
expect(req->rq_state & RQ_POSTPONED) &&
expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
(req->rq_state & RQ_LOCAL_ABORTED) != 0);
if (!expected)
- dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n",
+ drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
req, atomic_read(&req->completion_ref),
req->rq_state);
@@ -2254,8 +2259,8 @@ static void do_retry(struct work_struct *ws)
/* We are not just doing generic_make_request(),
* as we want to keep the start_time information. */
- inc_ap_bio(mdev);
- __drbd_make_request(mdev, bio, start_time);
+ inc_ap_bio(device);
+ __drbd_make_request(device, bio, start_time);
}
}
@@ -2269,17 +2274,38 @@ void drbd_restart_request(struct drbd_request *req)
/* Drop the extra reference that would otherwise
* have been dropped by complete_master_bio.
* do_retry() needs to grab a new one. */
- dec_ap_bio(req->w.mdev);
+ dec_ap_bio(req->device);
queue_work(retry.wq, &retry.worker);
}
+void drbd_destroy_resource(struct kref *kref)
+{
+ struct drbd_resource *resource =
+ container_of(kref, struct drbd_resource, kref);
+
+ idr_destroy(&resource->devices);
+ free_cpumask_var(resource->cpu_mask);
+ kfree(resource->name);
+ kfree(resource);
+}
+
+void drbd_free_resource(struct drbd_resource *resource)
+{
+ struct drbd_connection *connection, *tmp;
+
+ for_each_connection_safe(connection, tmp, resource) {
+ list_del(&connection->connections);
+ kref_put(&connection->kref, drbd_destroy_connection);
+ }
+ kref_put(&resource->kref, drbd_destroy_resource);
+}
static void drbd_cleanup(void)
{
unsigned int i;
- struct drbd_conf *mdev;
- struct drbd_tconn *tconn, *tmp;
+ struct drbd_device *device;
+ struct drbd_resource *resource, *tmp;
unregister_reboot_notifier(&drbd_notifier);
@@ -2299,26 +2325,19 @@ static void drbd_cleanup(void)
drbd_genl_unregister();
- idr_for_each_entry(&minors, mdev, i) {
- idr_remove(&minors, mdev_to_minor(mdev));
- idr_remove(&mdev->tconn->volumes, mdev->vnr);
- destroy_workqueue(mdev->submit.wq);
- del_gendisk(mdev->vdisk);
- /* synchronize_rcu(); No other threads running at this point */
- kref_put(&mdev->kref, &drbd_minor_destroy);
- }
+ idr_for_each_entry(&drbd_devices, device, i)
+ drbd_delete_device(device);
/* not _rcu since, no other updater anymore. Genl already unregistered */
- list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
- list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */
- /* synchronize_rcu(); */
- kref_put(&tconn->kref, &conn_destroy);
+ for_each_resource_safe(resource, tmp, &drbd_resources) {
+ list_del(&resource->resources);
+ drbd_free_resource(resource);
}
drbd_destroy_mempools();
unregister_blkdev(DRBD_MAJOR, "drbd");
- idr_destroy(&minors);
+ idr_destroy(&drbd_devices);
printk(KERN_INFO "drbd: module cleanup done.\n");
}
@@ -2332,49 +2351,50 @@ static void drbd_cleanup(void)
*/
static int drbd_congested(void *congested_data, int bdi_bits)
{
- struct drbd_conf *mdev = congested_data;
+ struct drbd_device *device = congested_data;
struct request_queue *q;
char reason = '-';
int r = 0;
- if (!may_inc_ap_bio(mdev)) {
+ if (!may_inc_ap_bio(device)) {
/* DRBD has frozen IO */
r = bdi_bits;
reason = 'd';
goto out;
}
- if (test_bit(CALLBACK_PENDING, &mdev->tconn->flags)) {
+ if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
r |= (1 << BDI_async_congested);
/* Without good local data, we would need to read from remote,
* and that would need the worker thread as well, which is
* currently blocked waiting for that usermode helper to
* finish.
*/
- if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+ if (!get_ldev_if_state(device, D_UP_TO_DATE))
r |= (1 << BDI_sync_congested);
else
- put_ldev(mdev);
+ put_ldev(device);
r &= bdi_bits;
reason = 'c';
goto out;
}
- if (get_ldev(mdev)) {
- q = bdev_get_queue(mdev->ldev->backing_bdev);
+ if (get_ldev(device)) {
+ q = bdev_get_queue(device->ldev->backing_bdev);
r = bdi_congested(&q->backing_dev_info, bdi_bits);
- put_ldev(mdev);
+ put_ldev(device);
if (r)
reason = 'b';
}
- if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
+ if (bdi_bits & (1 << BDI_async_congested) &&
+ test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
r |= (1 << BDI_async_congested);
reason = reason == 'b' ? 'a' : 'n';
}
out:
- mdev->congestion_reason = reason;
+ device->congestion_reason = reason;
return r;
}
@@ -2385,45 +2405,72 @@ static void drbd_init_workqueue(struct drbd_work_queue* wq)
init_waitqueue_head(&wq->q_wait);
}
-struct drbd_tconn *conn_get_by_name(const char *name)
+struct completion_work {
+ struct drbd_work w;
+ struct completion done;
+};
+
+static int w_complete(struct drbd_work *w, int cancel)
+{
+ struct completion_work *completion_work =
+ container_of(w, struct completion_work, w);
+
+ complete(&completion_work->done);
+ return 0;
+}
+
+void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
+{
+ struct completion_work completion_work;
+
+ completion_work.w.cb = w_complete;
+ init_completion(&completion_work.done);
+ drbd_queue_work(work_queue, &completion_work.w);
+ wait_for_completion(&completion_work.done);
+}
+
+struct drbd_resource *drbd_find_resource(const char *name)
{
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
if (!name || !name[0])
return NULL;
rcu_read_lock();
- list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
- if (!strcmp(tconn->name, name)) {
- kref_get(&tconn->kref);
+ for_each_resource_rcu(resource, &drbd_resources) {
+ if (!strcmp(resource->name, name)) {
+ kref_get(&resource->kref);
goto found;
}
}
- tconn = NULL;
+ resource = NULL;
found:
rcu_read_unlock();
- return tconn;
+ return resource;
}
-struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
+struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
void *peer_addr, int peer_addr_len)
{
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
rcu_read_lock();
- list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
- if (tconn->my_addr_len == my_addr_len &&
- tconn->peer_addr_len == peer_addr_len &&
- !memcmp(&tconn->my_addr, my_addr, my_addr_len) &&
- !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) {
- kref_get(&tconn->kref);
- goto found;
+ for_each_resource_rcu(resource, &drbd_resources) {
+ for_each_connection_rcu(connection, resource) {
+ if (connection->my_addr_len == my_addr_len &&
+ connection->peer_addr_len == peer_addr_len &&
+ !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
+ !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
+ kref_get(&connection->kref);
+ goto found;
+ }
}
}
- tconn = NULL;
+ connection = NULL;
found:
rcu_read_unlock();
- return tconn;
+ return connection;
}
static int drbd_alloc_socket(struct drbd_socket *socket)
@@ -2443,29 +2490,30 @@ static void drbd_free_socket(struct drbd_socket *socket)
free_page((unsigned long) socket->rbuf);
}
-void conn_free_crypto(struct drbd_tconn *tconn)
+void conn_free_crypto(struct drbd_connection *connection)
{
- drbd_free_sock(tconn);
+ drbd_free_sock(connection);
- crypto_free_hash(tconn->csums_tfm);
- crypto_free_hash(tconn->verify_tfm);
- crypto_free_hash(tconn->cram_hmac_tfm);
- crypto_free_hash(tconn->integrity_tfm);
- crypto_free_hash(tconn->peer_integrity_tfm);
- kfree(tconn->int_dig_in);
- kfree(tconn->int_dig_vv);
+ crypto_free_hash(connection->csums_tfm);
+ crypto_free_hash(connection->verify_tfm);
+ crypto_free_hash(connection->cram_hmac_tfm);
+ crypto_free_hash(connection->integrity_tfm);
+ crypto_free_hash(connection->peer_integrity_tfm);
+ kfree(connection->int_dig_in);
+ kfree(connection->int_dig_vv);
- tconn->csums_tfm = NULL;
- tconn->verify_tfm = NULL;
- tconn->cram_hmac_tfm = NULL;
- tconn->integrity_tfm = NULL;
- tconn->peer_integrity_tfm = NULL;
- tconn->int_dig_in = NULL;
- tconn->int_dig_vv = NULL;
+ connection->csums_tfm = NULL;
+ connection->verify_tfm = NULL;
+ connection->cram_hmac_tfm = NULL;
+ connection->integrity_tfm = NULL;
+ connection->peer_integrity_tfm = NULL;
+ connection->int_dig_in = NULL;
+ connection->int_dig_vv = NULL;
}
-int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
+int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
{
+ struct drbd_connection *connection;
cpumask_var_t new_cpu_mask;
int err;
@@ -2478,22 +2526,24 @@ int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
/* silently ignore cpu mask on UP kernel */
if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
- /* FIXME: Get rid of constant 32 here */
- err = bitmap_parse(res_opts->cpu_mask, 32,
+ err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
cpumask_bits(new_cpu_mask), nr_cpu_ids);
if (err) {
- conn_warn(tconn, "bitmap_parse() failed with %d\n", err);
+ drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
/* retcode = ERR_CPU_MASK_PARSE; */
goto fail;
}
}
- tconn->res_opts = *res_opts;
- if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
- cpumask_copy(tconn->cpu_mask, new_cpu_mask);
- drbd_calc_cpu_mask(tconn);
- tconn->receiver.reset_cpu_mask = 1;
- tconn->asender.reset_cpu_mask = 1;
- tconn->worker.reset_cpu_mask = 1;
+ resource->res_opts = *res_opts;
+ if (cpumask_empty(new_cpu_mask))
+ drbd_calc_cpu_mask(&new_cpu_mask);
+ if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
+ cpumask_copy(resource->cpu_mask, new_cpu_mask);
+ for_each_connection_rcu(connection, resource) {
+ connection->receiver.reset_cpu_mask = 1;
+ connection->asender.reset_cpu_mask = 1;
+ connection->worker.reset_cpu_mask = 1;
+ }
}
err = 0;
@@ -2503,146 +2553,177 @@ fail:
}
+struct drbd_resource *drbd_create_resource(const char *name)
+{
+ struct drbd_resource *resource;
+
+ resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
+ if (!resource)
+ goto fail;
+ resource->name = kstrdup(name, GFP_KERNEL);
+ if (!resource->name)
+ goto fail_free_resource;
+ if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
+ goto fail_free_name;
+ kref_init(&resource->kref);
+ idr_init(&resource->devices);
+ INIT_LIST_HEAD(&resource->connections);
+ list_add_tail_rcu(&resource->resources, &drbd_resources);
+ mutex_init(&resource->conf_update);
+ spin_lock_init(&resource->req_lock);
+ return resource;
+
+fail_free_name:
+ kfree(resource->name);
+fail_free_resource:
+ kfree(resource);
+fail:
+ return NULL;
+}
+
/* caller must be under genl_lock() */
-struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
+struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
{
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
- tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
- if (!tconn)
+ connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
+ if (!connection)
return NULL;
- tconn->name = kstrdup(name, GFP_KERNEL);
- if (!tconn->name)
+ if (drbd_alloc_socket(&connection->data))
goto fail;
-
- if (drbd_alloc_socket(&tconn->data))
- goto fail;
- if (drbd_alloc_socket(&tconn->meta))
+ if (drbd_alloc_socket(&connection->meta))
goto fail;
- if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
+ connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
+ if (!connection->current_epoch)
goto fail;
- if (set_resource_options(tconn, res_opts))
- goto fail;
+ INIT_LIST_HEAD(&connection->transfer_log);
- tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
- if (!tconn->current_epoch)
- goto fail;
+ INIT_LIST_HEAD(&connection->current_epoch->list);
+ connection->epochs = 1;
+ spin_lock_init(&connection->epoch_lock);
+ connection->write_ordering = WO_bdev_flush;
- INIT_LIST_HEAD(&tconn->transfer_log);
+ connection->send.seen_any_write_yet = false;
+ connection->send.current_epoch_nr = 0;
+ connection->send.current_epoch_writes = 0;
- INIT_LIST_HEAD(&tconn->current_epoch->list);
- tconn->epochs = 1;
- spin_lock_init(&tconn->epoch_lock);
- tconn->write_ordering = WO_bdev_flush;
+ resource = drbd_create_resource(name);
+ if (!resource)
+ goto fail;
- tconn->send.seen_any_write_yet = false;
- tconn->send.current_epoch_nr = 0;
- tconn->send.current_epoch_writes = 0;
+ connection->cstate = C_STANDALONE;
+ mutex_init(&connection->cstate_mutex);
+ init_waitqueue_head(&connection->ping_wait);
+ idr_init(&connection->peer_devices);
- tconn->cstate = C_STANDALONE;
- mutex_init(&tconn->cstate_mutex);
- spin_lock_init(&tconn->req_lock);
- mutex_init(&tconn->conf_update);
- init_waitqueue_head(&tconn->ping_wait);
- idr_init(&tconn->volumes);
+ drbd_init_workqueue(&connection->sender_work);
+ mutex_init(&connection->data.mutex);
+ mutex_init(&connection->meta.mutex);
- drbd_init_workqueue(&tconn->sender_work);
- mutex_init(&tconn->data.mutex);
- mutex_init(&tconn->meta.mutex);
+ drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
+ connection->receiver.connection = connection;
+ drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
+ connection->worker.connection = connection;
+ drbd_thread_init(resource, &connection->asender, drbd_asender, "asender");
+ connection->asender.connection = connection;
- drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
- drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
- drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
+ kref_init(&connection->kref);
- kref_init(&tconn->kref);
- list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
+ connection->resource = resource;
- return tconn;
+ if (set_resource_options(resource, res_opts))
+ goto fail_resource;
-fail:
- kfree(tconn->current_epoch);
- free_cpumask_var(tconn->cpu_mask);
- drbd_free_socket(&tconn->meta);
- drbd_free_socket(&tconn->data);
- kfree(tconn->name);
- kfree(tconn);
+ kref_get(&resource->kref);
+ list_add_tail_rcu(&connection->connections, &resource->connections);
+ return connection;
+fail_resource:
+ list_del(&resource->resources);
+ drbd_free_resource(resource);
+fail:
+ kfree(connection->current_epoch);
+ drbd_free_socket(&connection->meta);
+ drbd_free_socket(&connection->data);
+ kfree(connection);
return NULL;
}
-void conn_destroy(struct kref *kref)
+void drbd_destroy_connection(struct kref *kref)
{
- struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
+ struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
+ struct drbd_resource *resource = connection->resource;
- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
- kfree(tconn->current_epoch);
+ if (atomic_read(&connection->current_epoch->epoch_size) != 0)
+ drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
+ kfree(connection->current_epoch);
- idr_destroy(&tconn->volumes);
+ idr_destroy(&connection->peer_devices);
- free_cpumask_var(tconn->cpu_mask);
- drbd_free_socket(&tconn->meta);
- drbd_free_socket(&tconn->data);
- kfree(tconn->name);
- kfree(tconn->int_dig_in);
- kfree(tconn->int_dig_vv);
- kfree(tconn);
+ drbd_free_socket(&connection->meta);
+ drbd_free_socket(&connection->data);
+ kfree(connection->int_dig_in);
+ kfree(connection->int_dig_vv);
+ kfree(connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
}
-int init_submitter(struct drbd_conf *mdev)
+static int init_submitter(struct drbd_device *device)
{
/* opencoded create_singlethread_workqueue(),
* to be able to say "drbd%d", ..., minor */
- mdev->submit.wq = alloc_workqueue("drbd%u_submit",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 1, mdev->minor);
- if (!mdev->submit.wq)
+ device->submit.wq = alloc_workqueue("drbd%u_submit",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1, device->minor);
+ if (!device->submit.wq)
return -ENOMEM;
- INIT_WORK(&mdev->submit.worker, do_submit);
- spin_lock_init(&mdev->submit.lock);
- INIT_LIST_HEAD(&mdev->submit.writes);
+ INIT_WORK(&device->submit.worker, do_submit);
+ spin_lock_init(&device->submit.lock);
+ INIT_LIST_HEAD(&device->submit.writes);
return 0;
}
-enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
+enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr)
{
- struct drbd_conf *mdev;
+ struct drbd_connection *connection;
+ struct drbd_device *device;
+ struct drbd_peer_device *peer_device, *tmp_peer_device;
struct gendisk *disk;
struct request_queue *q;
- int vnr_got = vnr;
- int minor_got = minor;
+ int id;
enum drbd_ret_code err = ERR_NOMEM;
- mdev = minor_to_mdev(minor);
- if (mdev)
+ device = minor_to_device(minor);
+ if (device)
return ERR_MINOR_EXISTS;
/* GFP_KERNEL, we are outside of all write-out paths */
- mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
- if (!mdev)
+ device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
+ if (!device)
return ERR_NOMEM;
+ kref_init(&device->kref);
- kref_get(&tconn->kref);
- mdev->tconn = tconn;
+ kref_get(&resource->kref);
+ device->resource = resource;
+ device->minor = minor;
+ device->vnr = vnr;
- mdev->minor = minor;
- mdev->vnr = vnr;
-
- drbd_init_set_defaults(mdev);
+ drbd_init_set_defaults(device);
q = blk_alloc_queue(GFP_KERNEL);
if (!q)
goto out_no_q;
- mdev->rq_queue = q;
- q->queuedata = mdev;
+ device->rq_queue = q;
+ q->queuedata = device;
disk = alloc_disk(1);
if (!disk)
goto out_no_disk;
- mdev->vdisk = disk;
+ device->vdisk = disk;
set_disk_ro(disk, true);
@@ -2651,14 +2732,14 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
disk->first_minor = minor;
disk->fops = &drbd_ops;
sprintf(disk->disk_name, "drbd%d", minor);
- disk->private_data = mdev;
+ disk->private_data = device;
- mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
+ device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
/* we have no partitions. we contain only ourselves. */
- mdev->this_bdev->bd_contains = mdev->this_bdev;
+ device->this_bdev->bd_contains = device->this_bdev;
q->backing_dev_info.congested_fn = drbd_congested;
- q->backing_dev_info.congested_data = mdev;
+ q->backing_dev_info.congested_data = device;
blk_queue_make_request(q, drbd_make_request);
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
@@ -2667,70 +2748,125 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec);
- q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
+ q->queue_lock = &resource->req_lock;
- mdev->md_io_page = alloc_page(GFP_KERNEL);
- if (!mdev->md_io_page)
+ device->md_io_page = alloc_page(GFP_KERNEL);
+ if (!device->md_io_page)
goto out_no_io_page;
- if (drbd_bm_init(mdev))
+ if (drbd_bm_init(device))
goto out_no_bitmap;
- mdev->read_requests = RB_ROOT;
- mdev->write_requests = RB_ROOT;
+ device->read_requests = RB_ROOT;
+ device->write_requests = RB_ROOT;
- minor_got = idr_alloc(&minors, mdev, minor, minor + 1, GFP_KERNEL);
- if (minor_got < 0) {
- if (minor_got == -ENOSPC) {
+ id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
+ if (id < 0) {
+ if (id == -ENOSPC) {
err = ERR_MINOR_EXISTS;
drbd_msg_put_info("requested minor exists already");
}
goto out_no_minor_idr;
}
+ kref_get(&device->kref);
- vnr_got = idr_alloc(&tconn->volumes, mdev, vnr, vnr + 1, GFP_KERNEL);
- if (vnr_got < 0) {
- if (vnr_got == -ENOSPC) {
- err = ERR_INVALID_REQUEST;
- drbd_msg_put_info("requested volume exists already");
+ id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
+ if (id < 0) {
+ if (id == -ENOSPC) {
+ err = ERR_MINOR_EXISTS;
+ drbd_msg_put_info("requested minor exists already");
}
goto out_idr_remove_minor;
}
+ kref_get(&device->kref);
+
+ INIT_LIST_HEAD(&device->peer_devices);
+ for_each_connection(connection, resource) {
+ peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
+ if (!peer_device)
+ goto out_idr_remove_from_resource;
+ peer_device->connection = connection;
+ peer_device->device = device;
+
+ list_add(&peer_device->peer_devices, &device->peer_devices);
+ kref_get(&device->kref);
- if (init_submitter(mdev)) {
+ id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
+ if (id < 0) {
+ if (id == -ENOSPC) {
+ err = ERR_INVALID_REQUEST;
+ drbd_msg_put_info("requested volume exists already");
+ }
+ goto out_idr_remove_from_resource;
+ }
+ kref_get(&connection->kref);
+ }
+
+ if (init_submitter(device)) {
err = ERR_NOMEM;
drbd_msg_put_info("unable to create submit workqueue");
goto out_idr_remove_vol;
}
add_disk(disk);
- kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
/* inherit the connection state */
- mdev->state.conn = tconn->cstate;
- if (mdev->state.conn == C_WF_REPORT_PARAMS)
- drbd_connected(mdev);
+ device->state.conn = first_connection(resource)->cstate;
+ if (device->state.conn == C_WF_REPORT_PARAMS) {
+ for_each_peer_device(peer_device, device)
+ drbd_connected(peer_device);
+ }
return NO_ERROR;
out_idr_remove_vol:
- idr_remove(&tconn->volumes, vnr_got);
+ idr_remove(&connection->peer_devices, vnr);
+out_idr_remove_from_resource:
+ for_each_connection(connection, resource) {
+ peer_device = idr_find(&connection->peer_devices, vnr);
+ if (peer_device) {
+ idr_remove(&connection->peer_devices, vnr);
+ kref_put(&connection->kref, drbd_destroy_connection);
+ }
+ }
+ for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
+ list_del(&peer_device->peer_devices);
+ kfree(peer_device);
+ }
+ idr_remove(&resource->devices, vnr);
out_idr_remove_minor:
- idr_remove(&minors, minor_got);
+ idr_remove(&drbd_devices, minor);
synchronize_rcu();
out_no_minor_idr:
- drbd_bm_cleanup(mdev);
+ drbd_bm_cleanup(device);
out_no_bitmap:
- __free_page(mdev->md_io_page);
+ __free_page(device->md_io_page);
out_no_io_page:
put_disk(disk);
out_no_disk:
blk_cleanup_queue(q);
out_no_q:
- kfree(mdev);
- kref_put(&tconn->kref, &conn_destroy);
+ kref_put(&resource->kref, drbd_destroy_resource);
+ kfree(device);
return err;
}
+void drbd_delete_device(struct drbd_device *device)
+{
+ struct drbd_resource *resource = device->resource;
+ struct drbd_connection *connection;
+ int refs = 3;
+
+ for_each_connection(connection, resource) {
+ idr_remove(&connection->peer_devices, device->vnr);
+ refs++;
+ }
+ idr_remove(&resource->devices, device->vnr);
+ idr_remove(&drbd_devices, device_to_minor(device));
+ del_gendisk(device->vdisk);
+ synchronize_rcu();
+ kref_sub(&device->kref, refs, drbd_destroy_device);
+}
+
int __init drbd_init(void)
{
int err;
@@ -2761,10 +2897,10 @@ int __init drbd_init(void)
init_waitqueue_head(&drbd_pp_wait);
drbd_proc = NULL; /* play safe for drbd_cleanup */
- idr_init(&minors);
+ idr_init(&drbd_devices);
rwlock_init(&global_state_lock);
- INIT_LIST_HEAD(&drbd_tconns);
+ INIT_LIST_HEAD(&drbd_resources);
err = drbd_genl_register();
if (err) {
@@ -2822,37 +2958,39 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
kfree(ldev);
}
-void drbd_free_sock(struct drbd_tconn *tconn)
+void drbd_free_sock(struct drbd_connection *connection)
{
- if (tconn->data.socket) {
- mutex_lock(&tconn->data.mutex);
- kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
- sock_release(tconn->data.socket);
- tconn->data.socket = NULL;
- mutex_unlock(&tconn->data.mutex);
+ if (connection->data.socket) {
+ mutex_lock(&connection->data.mutex);
+ kernel_sock_shutdown(connection->data.socket, SHUT_RDWR);
+ sock_release(connection->data.socket);
+ connection->data.socket = NULL;
+ mutex_unlock(&connection->data.mutex);
}
- if (tconn->meta.socket) {
- mutex_lock(&tconn->meta.mutex);
- kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
- sock_release(tconn->meta.socket);
- tconn->meta.socket = NULL;
- mutex_unlock(&tconn->meta.mutex);
+ if (connection->meta.socket) {
+ mutex_lock(&connection->meta.mutex);
+ kernel_sock_shutdown(connection->meta.socket, SHUT_RDWR);
+ sock_release(connection->meta.socket);
+ connection->meta.socket = NULL;
+ mutex_unlock(&connection->meta.mutex);
}
}
/* meta data management */
-void conn_md_sync(struct drbd_tconn *tconn)
+void conn_md_sync(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+
+ kref_get(&device->kref);
rcu_read_unlock();
- drbd_md_sync(mdev);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_md_sync(device);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
@@ -2883,7 +3021,7 @@ struct meta_data_on_disk {
-void drbd_md_write(struct drbd_conf *mdev, void *b)
+void drbd_md_write(struct drbd_device *device, void *b)
{
struct meta_data_on_disk *buffer = b;
sector_t sector;
@@ -2891,39 +3029,39 @@ void drbd_md_write(struct drbd_conf *mdev, void *b)
memset(buffer, 0, sizeof(*buffer));
- buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
+ buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
for (i = UI_CURRENT; i < UI_SIZE; i++)
- buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
- buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
+ buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
+ buffer->flags = cpu_to_be32(device->ldev->md.flags);
buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
- buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
- buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
- buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
+ buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
+ buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
+ buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
- buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
+ buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
- buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
- buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
+ buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
+ buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
- buffer->al_stripes = cpu_to_be32(mdev->ldev->md.al_stripes);
- buffer->al_stripe_size_4k = cpu_to_be32(mdev->ldev->md.al_stripe_size_4k);
+ buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
+ buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
- D_ASSERT(drbd_md_ss(mdev->ldev) == mdev->ldev->md.md_offset);
- sector = mdev->ldev->md.md_offset;
+ D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
+ sector = device->ldev->md.md_offset;
- if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
/* this was a try anyways ... */
- dev_err(DEV, "meta data update failed!\n");
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ drbd_err(device, "meta data update failed!\n");
+ drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
}
}
/**
* drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
- * @mdev: DRBD device.
+ * @device: DRBD device.
*/
-void drbd_md_sync(struct drbd_conf *mdev)
+void drbd_md_sync(struct drbd_device *device)
{
struct meta_data_on_disk *buffer;
@@ -2931,32 +3069,32 @@ void drbd_md_sync(struct drbd_conf *mdev)
BUILD_BUG_ON(UI_SIZE != 4);
BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
- del_timer(&mdev->md_sync_timer);
+ del_timer(&device->md_sync_timer);
/* timer may be rearmed by drbd_md_mark_dirty() now. */
- if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
+ if (!test_and_clear_bit(MD_DIRTY, &device->flags))
return;
/* We use here D_FAILED and not D_ATTACHING because we try to write
* metadata even if we detach due to a disk failure! */
- if (!get_ldev_if_state(mdev, D_FAILED))
+ if (!get_ldev_if_state(device, D_FAILED))
return;
- buffer = drbd_md_get_buffer(mdev);
+ buffer = drbd_md_get_buffer(device);
if (!buffer)
goto out;
- drbd_md_write(mdev, buffer);
+ drbd_md_write(device, buffer);
- /* Update mdev->ldev->md.la_size_sect,
+ /* Update device->ldev->md.la_size_sect,
* since we updated it on metadata. */
- mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
+ device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
- drbd_md_put_buffer(mdev);
+ drbd_md_put_buffer(device);
out:
- put_ldev(mdev);
+ put_ldev(device);
}
-static int check_activity_log_stripe_size(struct drbd_conf *mdev,
+static int check_activity_log_stripe_size(struct drbd_device *device,
struct meta_data_on_disk *on_disk,
struct drbd_md *in_core)
{
@@ -2996,12 +3134,12 @@ static int check_activity_log_stripe_size(struct drbd_conf *mdev,
return 0;
err:
- dev_err(DEV, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
+ drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
al_stripes, al_stripe_size_4k);
return -EINVAL;
}
-static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
{
sector_t capacity = drbd_get_capacity(bdev->md_bdev);
struct drbd_md *in_core = &bdev->md;
@@ -3068,7 +3206,7 @@ static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_d
return 0;
err:
- dev_err(DEV, "meta data offsets don't make sense: idx=%d "
+ drbd_err(device, "meta data offsets don't make sense: idx=%d "
"al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
"md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
in_core->meta_dev_idx,
@@ -3083,25 +3221,25 @@ err:
/**
* drbd_md_read() - Reads in the meta data super block
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @bdev: Device from which the meta data should be read in.
*
* Return NO_ERROR on success, and an enum drbd_ret_code in case
* something goes wrong.
*
* Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
- * even before @bdev is assigned to @mdev->ldev.
+ * even before @bdev is assigned to @device->ldev.
*/
-int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
{
struct meta_data_on_disk *buffer;
u32 magic, flags;
int i, rv = NO_ERROR;
- if (mdev->state.disk != D_DISKLESS)
+ if (device->state.disk != D_DISKLESS)
return ERR_DISK_CONFIGURED;
- buffer = drbd_md_get_buffer(mdev);
+ buffer = drbd_md_get_buffer(device);
if (!buffer)
return ERR_NOMEM;
@@ -3110,10 +3248,10 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
bdev->md.md_offset = drbd_md_ss(bdev);
- if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
+ if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
/* NOTE: can't do normal error processing here as this is
called BEFORE disk is attached */
- dev_err(DEV, "Error while reading metadata.\n");
+ drbd_err(device, "Error while reading metadata.\n");
rv = ERR_IO_MD_DISK;
goto err;
}
@@ -3123,7 +3261,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
(magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
/* btw: that's Activity Log clean, not "all" clean. */
- dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
+ drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
rv = ERR_MD_UNCLEAN;
goto err;
}
@@ -3131,14 +3269,14 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
rv = ERR_MD_INVALID;
if (magic != DRBD_MD_MAGIC_08) {
if (magic == DRBD_MD_MAGIC_07)
- dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
+ drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
else
- dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
+ drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
goto err;
}
if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
- dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
+ drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
goto err;
}
@@ -3155,182 +3293,182 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
- if (check_activity_log_stripe_size(mdev, buffer, &bdev->md))
+ if (check_activity_log_stripe_size(device, buffer, &bdev->md))
goto err;
- if (check_offsets_and_sizes(mdev, bdev))
+ if (check_offsets_and_sizes(device, bdev))
goto err;
if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
- dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
+ drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
goto err;
}
if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
- dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
+ drbd_err(device, "unexpected md_size: %u (expected %u)\n",
be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
goto err;
}
rv = NO_ERROR;
- spin_lock_irq(&mdev->tconn->req_lock);
- if (mdev->state.conn < C_CONNECTED) {
+ spin_lock_irq(&device->resource->req_lock);
+ if (device->state.conn < C_CONNECTED) {
unsigned int peer;
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
- mdev->peer_max_bio_size = peer;
+ device->peer_max_bio_size = peer;
}
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
err:
- drbd_md_put_buffer(mdev);
+ drbd_md_put_buffer(device);
return rv;
}
/**
* drbd_md_mark_dirty() - Mark meta data super block as dirty
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Call this function if you change anything that should be written to
* the meta-data super block. This function sets MD_DIRTY, and starts a
* timer that ensures that within five seconds you have to call drbd_md_sync().
*/
#ifdef DEBUG
-void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
+void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
{
- if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
- mod_timer(&mdev->md_sync_timer, jiffies + HZ);
- mdev->last_md_mark_dirty.line = line;
- mdev->last_md_mark_dirty.func = func;
+ if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
+ mod_timer(&device->md_sync_timer, jiffies + HZ);
+ device->last_md_mark_dirty.line = line;
+ device->last_md_mark_dirty.func = func;
}
}
#else
-void drbd_md_mark_dirty(struct drbd_conf *mdev)
+void drbd_md_mark_dirty(struct drbd_device *device)
{
- if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
- mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
+ if (!test_and_set_bit(MD_DIRTY, &device->flags))
+ mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
}
#endif
-void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
+void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
{
int i;
for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
- mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
+ device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
}
-void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{
if (idx == UI_CURRENT) {
- if (mdev->state.role == R_PRIMARY)
+ if (device->state.role == R_PRIMARY)
val |= 1;
else
val &= ~((u64)1);
- drbd_set_ed_uuid(mdev, val);
+ drbd_set_ed_uuid(device, val);
}
- mdev->ldev->md.uuid[idx] = val;
- drbd_md_mark_dirty(mdev);
+ device->ldev->md.uuid[idx] = val;
+ drbd_md_mark_dirty(device);
}
-void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{
unsigned long flags;
- spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
- __drbd_uuid_set(mdev, idx, val);
- spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
+ __drbd_uuid_set(device, idx, val);
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
}
-void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{
unsigned long flags;
- spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
- if (mdev->ldev->md.uuid[idx]) {
- drbd_uuid_move_history(mdev);
- mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
+ if (device->ldev->md.uuid[idx]) {
+ drbd_uuid_move_history(device);
+ device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
}
- __drbd_uuid_set(mdev, idx, val);
- spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+ __drbd_uuid_set(device, idx, val);
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
}
/**
* drbd_uuid_new_current() - Creates a new current UUID
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Creates a new current UUID, and rotates the old current UUID into
* the bitmap slot. Causes an incremental resync upon next connect.
*/
-void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
+void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
{
u64 val;
unsigned long long bm_uuid;
get_random_bytes(&val, sizeof(u64));
- spin_lock_irq(&mdev->ldev->md.uuid_lock);
- bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ spin_lock_irq(&device->ldev->md.uuid_lock);
+ bm_uuid = device->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
- dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
+ drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
- mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
- __drbd_uuid_set(mdev, UI_CURRENT, val);
- spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+ device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
+ __drbd_uuid_set(device, UI_CURRENT, val);
+ spin_unlock_irq(&device->ldev->md.uuid_lock);
- drbd_print_uuids(mdev, "new current UUID");
+ drbd_print_uuids(device, "new current UUID");
/* get it to stable storage _now_ */
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
}
-void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
+void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
{
unsigned long flags;
- if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
+ if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
- spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
if (val == 0) {
- drbd_uuid_move_history(mdev);
- mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
- mdev->ldev->md.uuid[UI_BITMAP] = 0;
+ drbd_uuid_move_history(device);
+ device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
+ device->ldev->md.uuid[UI_BITMAP] = 0;
} else {
- unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
- dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
+ drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
- mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
+ device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
}
- spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
- drbd_md_mark_dirty(mdev);
+ drbd_md_mark_dirty(device);
}
/**
* drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Sets all bits in the bitmap and writes the whole bitmap to stable storage.
*/
-int drbd_bmio_set_n_write(struct drbd_conf *mdev)
+int drbd_bmio_set_n_write(struct drbd_device *device)
{
int rv = -EIO;
- if (get_ldev_if_state(mdev, D_ATTACHING)) {
- drbd_md_set_flag(mdev, MDF_FULL_SYNC);
- drbd_md_sync(mdev);
- drbd_bm_set_all(mdev);
+ if (get_ldev_if_state(device, D_ATTACHING)) {
+ drbd_md_set_flag(device, MDF_FULL_SYNC);
+ drbd_md_sync(device);
+ drbd_bm_set_all(device);
- rv = drbd_bm_write(mdev);
+ rv = drbd_bm_write(device);
if (!rv) {
- drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
- drbd_md_sync(mdev);
+ drbd_md_clear_flag(device, MDF_FULL_SYNC);
+ drbd_md_sync(device);
}
- put_ldev(mdev);
+ put_ldev(device);
}
return rv;
@@ -3338,19 +3476,19 @@ int drbd_bmio_set_n_write(struct drbd_conf *mdev)
/**
* drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Clears all bits in the bitmap and writes the whole bitmap to stable storage.
*/
-int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
+int drbd_bmio_clear_n_write(struct drbd_device *device)
{
int rv = -EIO;
- drbd_resume_al(mdev);
- if (get_ldev_if_state(mdev, D_ATTACHING)) {
- drbd_bm_clear_all(mdev);
- rv = drbd_bm_write(mdev);
- put_ldev(mdev);
+ drbd_resume_al(device);
+ if (get_ldev_if_state(device, D_ATTACHING)) {
+ drbd_bm_clear_all(device);
+ rv = drbd_bm_write(device);
+ put_ldev(device);
}
return rv;
@@ -3358,50 +3496,52 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
static int w_bitmap_io(struct drbd_work *w, int unused)
{
- struct bm_io_work *work = container_of(w, struct bm_io_work, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, bm_io_work.w);
+ struct bm_io_work *work = &device->bm_io_work;
int rv = -EIO;
- D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
+ D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0);
- if (get_ldev(mdev)) {
- drbd_bm_lock(mdev, work->why, work->flags);
- rv = work->io_fn(mdev);
- drbd_bm_unlock(mdev);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ drbd_bm_lock(device, work->why, work->flags);
+ rv = work->io_fn(device);
+ drbd_bm_unlock(device);
+ put_ldev(device);
}
- clear_bit_unlock(BITMAP_IO, &mdev->flags);
- wake_up(&mdev->misc_wait);
+ clear_bit_unlock(BITMAP_IO, &device->flags);
+ wake_up(&device->misc_wait);
if (work->done)
- work->done(mdev, rv);
+ work->done(device, rv);
- clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
+ clear_bit(BITMAP_IO_QUEUED, &device->flags);
work->why = NULL;
work->flags = 0;
return 0;
}
-void drbd_ldev_destroy(struct drbd_conf *mdev)
+void drbd_ldev_destroy(struct drbd_device *device)
{
- lc_destroy(mdev->resync);
- mdev->resync = NULL;
- lc_destroy(mdev->act_log);
- mdev->act_log = NULL;
+ lc_destroy(device->resync);
+ device->resync = NULL;
+ lc_destroy(device->act_log);
+ device->act_log = NULL;
__no_warn(local,
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;);
+ drbd_free_bc(device->ldev);
+ device->ldev = NULL;);
- clear_bit(GO_DISKLESS, &mdev->flags);
+ clear_bit(GO_DISKLESS, &device->flags);
}
static int w_go_diskless(struct drbd_work *w, int unused)
{
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, go_diskless);
- D_ASSERT(mdev->state.disk == D_FAILED);
+ D_ASSERT(device, device->state.disk == D_FAILED);
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
* the protected members anymore, though, so once put_ldev reaches zero
@@ -3420,27 +3560,27 @@ static int w_go_diskless(struct drbd_work *w, int unused)
* We still need to check if both bitmap and ldev are present, we may
* end up here after a failed attach, before ldev was even assigned.
*/
- if (mdev->bitmap && mdev->ldev) {
+ if (device->bitmap && device->ldev) {
/* An interrupted resync or similar is allowed to recounts bits
* while we detach.
* Any modifications would not be expected anymore, though.
*/
- if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write,
+ if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
"detach", BM_LOCKED_TEST_ALLOWED)) {
- if (test_bit(WAS_READ_ERROR, &mdev->flags)) {
- drbd_md_set_flag(mdev, MDF_FULL_SYNC);
- drbd_md_sync(mdev);
+ if (test_bit(WAS_READ_ERROR, &device->flags)) {
+ drbd_md_set_flag(device, MDF_FULL_SYNC);
+ drbd_md_sync(device);
}
}
}
- drbd_force_state(mdev, NS(disk, D_DISKLESS));
+ drbd_force_state(device, NS(disk, D_DISKLESS));
return 0;
}
/**
* drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @io_fn: IO callback to be called when bitmap IO is possible
* @done: callback to be called after the bitmap IO was performed
* @why: Descriptive text of the reason for doing the IO
@@ -3450,76 +3590,77 @@ static int w_go_diskless(struct drbd_work *w, int unused)
* called from worker context. It MUST NOT be used while a previous such
* work is still pending!
*/
-void drbd_queue_bitmap_io(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
- void (*done)(struct drbd_conf *, int),
+void drbd_queue_bitmap_io(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *),
+ void (*done)(struct drbd_device *, int),
char *why, enum bm_flag flags)
{
- D_ASSERT(current == mdev->tconn->worker.task);
+ D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
- D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
- D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
- D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
- if (mdev->bm_io_work.why)
- dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
- why, mdev->bm_io_work.why);
+ D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
+ D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
+ D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
+ if (device->bm_io_work.why)
+ drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
+ why, device->bm_io_work.why);
- mdev->bm_io_work.io_fn = io_fn;
- mdev->bm_io_work.done = done;
- mdev->bm_io_work.why = why;
- mdev->bm_io_work.flags = flags;
+ device->bm_io_work.io_fn = io_fn;
+ device->bm_io_work.done = done;
+ device->bm_io_work.why = why;
+ device->bm_io_work.flags = flags;
- spin_lock_irq(&mdev->tconn->req_lock);
- set_bit(BITMAP_IO, &mdev->flags);
- if (atomic_read(&mdev->ap_bio_cnt) == 0) {
- if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
+ spin_lock_irq(&device->resource->req_lock);
+ set_bit(BITMAP_IO, &device->flags);
+ if (atomic_read(&device->ap_bio_cnt) == 0) {
+ if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &device->bm_io_work.w);
}
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
}
/**
* drbd_bitmap_io() - Does an IO operation on the whole bitmap
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @io_fn: IO callback to be called when bitmap IO is possible
* @why: Descriptive text of the reason for doing the IO
*
* freezes application IO while that the actual IO operations runs. This
* functions MAY NOT be called from worker context.
*/
-int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
+int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags)
{
int rv;
- D_ASSERT(current != mdev->tconn->worker.task);
+ D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
- drbd_suspend_io(mdev);
+ drbd_suspend_io(device);
- drbd_bm_lock(mdev, why, flags);
- rv = io_fn(mdev);
- drbd_bm_unlock(mdev);
+ drbd_bm_lock(device, why, flags);
+ rv = io_fn(device);
+ drbd_bm_unlock(device);
if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
- drbd_resume_io(mdev);
+ drbd_resume_io(device);
return rv;
}
-void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
+void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
{
- if ((mdev->ldev->md.flags & flag) != flag) {
- drbd_md_mark_dirty(mdev);
- mdev->ldev->md.flags |= flag;
+ if ((device->ldev->md.flags & flag) != flag) {
+ drbd_md_mark_dirty(device);
+ device->ldev->md.flags |= flag;
}
}
-void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
+void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
{
- if ((mdev->ldev->md.flags & flag) != 0) {
- drbd_md_mark_dirty(mdev);
- mdev->ldev->md.flags &= ~flag;
+ if ((device->ldev->md.flags & flag) != 0) {
+ drbd_md_mark_dirty(device);
+ device->ldev->md.flags &= ~flag;
}
}
int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
@@ -3529,23 +3670,25 @@ int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
static void md_sync_timer_fn(unsigned long data)
{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
+ struct drbd_device *device = (struct drbd_device *) data;
/* must not double-queue! */
- if (list_empty(&mdev->md_sync_work.list))
- drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
+ if (list_empty(&device->md_sync_work.list))
+ drbd_queue_work_front(&first_peer_device(device)->connection->sender_work,
+ &device->md_sync_work);
}
static int w_md_sync(struct drbd_work *w, int unused)
{
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, md_sync_work);
- dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
+ drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
#ifdef DEBUG
- dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
- mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
+ drbd_warn(device, "last md_mark_dirty: %s:%u\n",
+ device->last_md_mark_dirty.func, device->last_md_mark_dirty.line);
#endif
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
return 0;
}
@@ -3621,18 +3764,18 @@ const char *cmdname(enum drbd_packet cmd)
/**
* drbd_wait_misc - wait for a request to make progress
- * @mdev: device associated with the request
+ * @device: device associated with the request
* @i: the struct drbd_interval embedded in struct drbd_request or
* struct drbd_peer_request
*/
-int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
+int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
{
struct net_conf *nc;
DEFINE_WAIT(wait);
long timeout;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
if (!nc) {
rcu_read_unlock();
return -ETIMEDOUT;
@@ -3640,14 +3783,14 @@ int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
rcu_read_unlock();
- /* Indicate to wake up mdev->misc_wait on progress. */
+ /* Indicate to wake up device->misc_wait on progress. */
i->waiting = true;
- prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock_irq(&device->resource->req_lock);
timeout = schedule_timeout(timeout);
- finish_wait(&mdev->misc_wait, &wait);
- spin_lock_irq(&mdev->tconn->req_lock);
- if (!timeout || mdev->state.conn < C_CONNECTED)
+ finish_wait(&device->misc_wait, &wait);
+ spin_lock_irq(&device->resource->req_lock);
+ if (!timeout || device->state.conn < C_CONNECTED)
return -ETIMEDOUT;
if (signal_pending(current))
return -ERESTARTSYS;
@@ -3703,20 +3846,20 @@ _drbd_fault_str(unsigned int type) {
}
unsigned int
-_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
+_drbd_insert_fault(struct drbd_device *device, unsigned int type)
{
static struct fault_random_state rrs = {0, 0};
unsigned int ret = (
(fault_devs == 0 ||
- ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
+ ((1 << device_to_minor(device)) & fault_devs) != 0) &&
(((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
if (ret) {
fault_count++;
if (__ratelimit(&drbd_ratelimit_state))
- dev_warn(DEV, "***Simulating %s failure\n",
+ drbd_warn(device, "***Simulating %s failure\n",
_drbd_fault_str(type));
}
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index c706d50a8b06..526414bc2cab 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -32,6 +32,7 @@
#include <linux/blkpg.h>
#include <linux/cpumask.h>
#include "drbd_int.h"
+#include "drbd_protocol.h"
#include "drbd_req.h"
#include "drbd_wrappers.h"
#include <asm/unaligned.h>
@@ -44,8 +45,8 @@
// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
-int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
-int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
@@ -102,8 +103,9 @@ static struct drbd_config_context {
/* pointer into reply buffer */
struct drbd_genlmsghdr *reply_dh;
/* resolved from attributes, if possible */
- struct drbd_conf *mdev;
- struct drbd_tconn *tconn;
+ struct drbd_device *device;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
} adm_ctx;
static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
@@ -202,62 +204,67 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
if ((adm_ctx.my_addr &&
- nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
+ nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.connection->my_addr)) ||
(adm_ctx.peer_addr &&
- nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
+ nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.connection->peer_addr))) {
err = -EINVAL;
goto fail;
}
}
adm_ctx.minor = d_in->minor;
- adm_ctx.mdev = minor_to_mdev(d_in->minor);
- adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
+ adm_ctx.device = minor_to_device(d_in->minor);
+ if (adm_ctx.resource_name) {
+ adm_ctx.resource = drbd_find_resource(adm_ctx.resource_name);
+ }
- if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
+ if (!adm_ctx.device && (flags & DRBD_ADM_NEED_MINOR)) {
drbd_msg_put_info("unknown minor");
return ERR_MINOR_INVALID;
}
- if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
+ if (!adm_ctx.resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
drbd_msg_put_info("unknown resource");
+ if (adm_ctx.resource_name)
+ return ERR_RES_NOT_KNOWN;
return ERR_INVALID_REQUEST;
}
if (flags & DRBD_ADM_NEED_CONNECTION) {
- if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
+ if (adm_ctx.resource) {
drbd_msg_put_info("no resource name expected");
return ERR_INVALID_REQUEST;
}
- if (adm_ctx.mdev) {
+ if (adm_ctx.device) {
drbd_msg_put_info("no minor number expected");
return ERR_INVALID_REQUEST;
}
if (adm_ctx.my_addr && adm_ctx.peer_addr)
- adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
+ adm_ctx.connection = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
nla_len(adm_ctx.my_addr),
nla_data(adm_ctx.peer_addr),
nla_len(adm_ctx.peer_addr));
- if (!adm_ctx.tconn) {
+ if (!adm_ctx.connection) {
drbd_msg_put_info("unknown connection");
return ERR_INVALID_REQUEST;
}
}
/* some more paranoia, if the request was over-determined */
- if (adm_ctx.mdev && adm_ctx.tconn &&
- adm_ctx.mdev->tconn != adm_ctx.tconn) {
- pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
- adm_ctx.minor, adm_ctx.resource_name,
- adm_ctx.mdev->tconn->name);
+ if (adm_ctx.device && adm_ctx.resource &&
+ adm_ctx.device->resource != adm_ctx.resource) {
+ pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
+ adm_ctx.minor, adm_ctx.resource->name,
+ adm_ctx.device->resource->name);
drbd_msg_put_info("minor exists in different resource");
return ERR_INVALID_REQUEST;
}
- if (adm_ctx.mdev &&
+ if (adm_ctx.device &&
adm_ctx.volume != VOLUME_UNSPECIFIED &&
- adm_ctx.volume != adm_ctx.mdev->vnr) {
+ adm_ctx.volume != adm_ctx.device->vnr) {
pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
adm_ctx.minor, adm_ctx.volume,
- adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
+ adm_ctx.device->vnr,
+ adm_ctx.device->resource->name);
drbd_msg_put_info("minor exists as different volume");
return ERR_INVALID_REQUEST;
}
@@ -272,9 +279,13 @@ fail:
static int drbd_adm_finish(struct genl_info *info, int retcode)
{
- if (adm_ctx.tconn) {
- kref_put(&adm_ctx.tconn->kref, &conn_destroy);
- adm_ctx.tconn = NULL;
+ if (adm_ctx.connection) {
+ kref_put(&adm_ctx.connection->kref, drbd_destroy_connection);
+ adm_ctx.connection = NULL;
+ }
+ if (adm_ctx.resource) {
+ kref_put(&adm_ctx.resource->kref, drbd_destroy_resource);
+ adm_ctx.resource = NULL;
}
if (!adm_ctx.reply_skb)
@@ -285,34 +296,34 @@ static int drbd_adm_finish(struct genl_info *info, int retcode)
return 0;
}
-static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
+static void setup_khelper_env(struct drbd_connection *connection, char **envp)
{
char *afs;
/* FIXME: A future version will not allow this case. */
- if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
+ if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
return;
- switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
+ switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
case AF_INET6:
afs = "ipv6";
snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
- &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
+ &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
break;
case AF_INET:
afs = "ipv4";
snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+ &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
break;
default:
afs = "ssocks";
snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+ &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
}
snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
}
-int drbd_khelper(struct drbd_conf *mdev, char *cmd)
+int drbd_khelper(struct drbd_device *device, char *cmd)
{
char *envp[] = { "HOME=/",
"TERM=linux",
@@ -322,39 +333,39 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
NULL };
char mb[12];
char *argv[] = {usermode_helper, cmd, mb, NULL };
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
struct sib_info sib;
int ret;
- if (current == tconn->worker.task)
- set_bit(CALLBACK_PENDING, &tconn->flags);
+ if (current == connection->worker.task)
+ set_bit(CALLBACK_PENDING, &connection->flags);
- snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
- setup_khelper_env(tconn, envp);
+ snprintf(mb, 12, "minor-%d", device_to_minor(device));
+ setup_khelper_env(connection, envp);
/* The helper may take some time.
* write out any unsynced meta data changes now */
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
- dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
+ drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
sib.sib_reason = SIB_HELPER_PRE;
sib.helper_name = cmd;
- drbd_bcast_event(mdev, &sib);
+ drbd_bcast_event(device, &sib);
ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
- dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
+ drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
else
- dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
+ drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
sib.sib_reason = SIB_HELPER_POST;
sib.helper_exit_code = ret;
- drbd_bcast_event(mdev, &sib);
+ drbd_bcast_event(device, &sib);
- if (current == tconn->worker.task)
- clear_bit(CALLBACK_PENDING, &tconn->flags);
+ if (current == connection->worker.task)
+ clear_bit(CALLBACK_PENDING, &connection->flags);
if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0;
@@ -362,7 +373,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
return ret;
}
-int conn_khelper(struct drbd_tconn *tconn, char *cmd)
+static int conn_khelper(struct drbd_connection *connection, char *cmd)
{
char *envp[] = { "HOME=/",
"TERM=linux",
@@ -370,23 +381,24 @@ int conn_khelper(struct drbd_tconn *tconn, char *cmd)
(char[20]) { }, /* address family */
(char[60]) { }, /* address */
NULL };
- char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
+ char *resource_name = connection->resource->name;
+ char *argv[] = {usermode_helper, cmd, resource_name, NULL };
int ret;
- setup_khelper_env(tconn, envp);
- conn_md_sync(tconn);
+ setup_khelper_env(connection, envp);
+ conn_md_sync(connection);
- conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
+ drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
/* TODO: conn_bcast_event() ?? */
ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
- conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, tconn->name,
+ drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
+ usermode_helper, cmd, resource_name,
(ret >> 8) & 0xff, ret);
else
- conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, tconn->name,
+ drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
+ usermode_helper, cmd, resource_name,
(ret >> 8) & 0xff, ret);
/* TODO: conn_bcast_event() ?? */
@@ -396,18 +408,20 @@ int conn_khelper(struct drbd_tconn *tconn, char *cmd)
return ret;
}
-static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
+static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
{
enum drbd_fencing_p fp = FP_NOT_AVAIL;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (get_ldev_if_state(mdev, D_CONSISTENT)) {
- fp = max_t(enum drbd_fencing_p, fp,
- rcu_dereference(mdev->ldev->disk_conf)->fencing);
- put_ldev(mdev);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ if (get_ldev_if_state(device, D_CONSISTENT)) {
+ struct disk_conf *disk_conf =
+ rcu_dereference(peer_device->device->ldev->disk_conf);
+ fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
+ put_ldev(device);
}
}
rcu_read_unlock();
@@ -415,7 +429,7 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
return fp;
}
-bool conn_try_outdate_peer(struct drbd_tconn *tconn)
+bool conn_try_outdate_peer(struct drbd_connection *connection)
{
unsigned int connect_cnt;
union drbd_state mask = { };
@@ -424,26 +438,26 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
char *ex_to_string;
int r;
- if (tconn->cstate >= C_WF_REPORT_PARAMS) {
- conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
+ if (connection->cstate >= C_WF_REPORT_PARAMS) {
+ drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
return false;
}
- spin_lock_irq(&tconn->req_lock);
- connect_cnt = tconn->connect_cnt;
- spin_unlock_irq(&tconn->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
+ connect_cnt = connection->connect_cnt;
+ spin_unlock_irq(&connection->resource->req_lock);
- fp = highest_fencing_policy(tconn);
+ fp = highest_fencing_policy(connection);
switch (fp) {
case FP_NOT_AVAIL:
- conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
+ drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
goto out;
case FP_DONT_CARE:
return true;
default: ;
}
- r = conn_khelper(tconn, "fence-peer");
+ r = conn_khelper(connection, "fence-peer");
switch ((r>>8) & 0xff) {
case 3: /* peer is inconsistent */
@@ -457,7 +471,7 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
val.pdsk = D_OUTDATED;
break;
case 5: /* peer was down */
- if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
+ if (conn_highest_disk(connection) == D_UP_TO_DATE) {
/* we will(have) create(d) a new UUID anyways... */
ex_to_string = "peer is unreachable, assumed to be dead";
mask.pdsk = D_MASK;
@@ -470,70 +484,70 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
* This is useful when an unconnected R_SECONDARY is asked to
* become R_PRIMARY, but finds the other peer being active. */
ex_to_string = "peer is active";
- conn_warn(tconn, "Peer is primary, outdating myself.\n");
+ drbd_warn(connection, "Peer is primary, outdating myself.\n");
mask.disk = D_MASK;
val.disk = D_OUTDATED;
break;
case 7:
if (fp != FP_STONITH)
- conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
+ drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
ex_to_string = "peer was stonithed";
mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED;
break;
default:
/* The script is broken ... */
- conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
+ drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
return false; /* Eventually leave IO frozen */
}
- conn_info(tconn, "fence-peer helper returned %d (%s)\n",
+ drbd_info(connection, "fence-peer helper returned %d (%s)\n",
(r>>8) & 0xff, ex_to_string);
out:
/* Not using
- conn_request_state(tconn, mask, val, CS_VERBOSE);
+ conn_request_state(connection, mask, val, CS_VERBOSE);
here, because we might were able to re-establish the connection in the
meantime. */
- spin_lock_irq(&tconn->req_lock);
- if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) {
- if (tconn->connect_cnt != connect_cnt)
+ spin_lock_irq(&connection->resource->req_lock);
+ if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
+ if (connection->connect_cnt != connect_cnt)
/* In case the connection was established and droped
while the fence-peer handler was running, ignore it */
- conn_info(tconn, "Ignoring fence-peer exit code\n");
+ drbd_info(connection, "Ignoring fence-peer exit code\n");
else
- _conn_request_state(tconn, mask, val, CS_VERBOSE);
+ _conn_request_state(connection, mask, val, CS_VERBOSE);
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
- return conn_highest_pdsk(tconn) <= D_OUTDATED;
+ return conn_highest_pdsk(connection) <= D_OUTDATED;
}
static int _try_outdate_peer_async(void *data)
{
- struct drbd_tconn *tconn = (struct drbd_tconn *)data;
+ struct drbd_connection *connection = (struct drbd_connection *)data;
- conn_try_outdate_peer(tconn);
+ conn_try_outdate_peer(connection);
- kref_put(&tconn->kref, &conn_destroy);
+ kref_put(&connection->kref, drbd_destroy_connection);
return 0;
}
-void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
+void conn_try_outdate_peer_async(struct drbd_connection *connection)
{
struct task_struct *opa;
- kref_get(&tconn->kref);
- opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
+ kref_get(&connection->kref);
+ opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
if (IS_ERR(opa)) {
- conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
- kref_put(&tconn->kref, &conn_destroy);
+ drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
+ kref_put(&connection->kref, drbd_destroy_connection);
}
}
enum drbd_state_rv
-drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
+drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
{
const int max_tries = 4;
enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
@@ -542,16 +556,24 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
int forced = 0;
union drbd_state mask, val;
- if (new_role == R_PRIMARY)
- request_ping(mdev->tconn); /* Detect a dead peer ASAP */
+ if (new_role == R_PRIMARY) {
+ struct drbd_connection *connection;
- mutex_lock(mdev->state_mutex);
+ /* Detect dead peers as soon as possible. */
+
+ rcu_read_lock();
+ for_each_connection(connection, device->resource)
+ request_ping(connection);
+ rcu_read_unlock();
+ }
+
+ mutex_lock(device->state_mutex);
mask.i = 0; mask.role = R_MASK;
val.i = 0; val.role = new_role;
while (try++ < max_tries) {
- rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
+ rv = _drbd_request_state(device, mask, val, CS_WAIT_COMPLETE);
/* in case we first succeeded to outdate,
* but now suddenly could establish a connection */
@@ -562,8 +584,8 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
}
if (rv == SS_NO_UP_TO_DATE_DISK && force &&
- (mdev->state.disk < D_UP_TO_DATE &&
- mdev->state.disk >= D_INCONSISTENT)) {
+ (device->state.disk < D_UP_TO_DATE &&
+ device->state.disk >= D_INCONSISTENT)) {
mask.disk = D_MASK;
val.disk = D_UP_TO_DATE;
forced = 1;
@@ -571,10 +593,10 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
}
if (rv == SS_NO_UP_TO_DATE_DISK &&
- mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
- D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
+ device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
+ D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
- if (conn_try_outdate_peer(mdev->tconn)) {
+ if (conn_try_outdate_peer(first_peer_device(device)->connection)) {
val.disk = D_UP_TO_DATE;
mask.disk = D_MASK;
}
@@ -584,8 +606,8 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
if (rv == SS_NOTHING_TO_DO)
goto out;
if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
- if (!conn_try_outdate_peer(mdev->tconn) && force) {
- dev_warn(DEV, "Forced into split brain situation!\n");
+ if (!conn_try_outdate_peer(first_peer_device(device)->connection) && force) {
+ drbd_warn(device, "Forced into split brain situation!\n");
mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED;
@@ -597,7 +619,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
retry at most once more in this case. */
int timeo;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
rcu_read_unlock();
schedule_timeout_interruptible(timeo);
@@ -606,7 +628,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
continue;
}
if (rv < SS_SUCCESS) {
- rv = _drbd_request_state(mdev, mask, val,
+ rv = _drbd_request_state(device, mask, val,
CS_VERBOSE + CS_WAIT_COMPLETE);
if (rv < SS_SUCCESS)
goto out;
@@ -618,53 +640,53 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
goto out;
if (forced)
- dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
+ drbd_warn(device, "Forced to consider local data as UpToDate!\n");
/* Wait until nothing is on the fly :) */
- wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
+ wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
/* FIXME also wait for all pending P_BARRIER_ACK? */
if (new_role == R_SECONDARY) {
- set_disk_ro(mdev->vdisk, true);
- if (get_ldev(mdev)) {
- mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
- put_ldev(mdev);
+ set_disk_ro(device->vdisk, true);
+ if (get_ldev(device)) {
+ device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
+ put_ldev(device);
}
} else {
- mutex_lock(&mdev->tconn->conf_update);
- nc = mdev->tconn->net_conf;
+ mutex_lock(&device->resource->conf_update);
+ nc = first_peer_device(device)->connection->net_conf;
if (nc)
nc->discard_my_data = 0; /* without copy; single bit op is atomic */
- mutex_unlock(&mdev->tconn->conf_update);
+ mutex_unlock(&device->resource->conf_update);
- set_disk_ro(mdev->vdisk, false);
- if (get_ldev(mdev)) {
- if (((mdev->state.conn < C_CONNECTED ||
- mdev->state.pdsk <= D_FAILED)
- && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
- drbd_uuid_new_current(mdev);
+ set_disk_ro(device->vdisk, false);
+ if (get_ldev(device)) {
+ if (((device->state.conn < C_CONNECTED ||
+ device->state.pdsk <= D_FAILED)
+ && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
+ drbd_uuid_new_current(device);
- mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
- put_ldev(mdev);
+ device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
+ put_ldev(device);
}
}
/* writeout of activity log covered areas of the bitmap
* to stable storage done in after state change already */
- if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
+ if (device->state.conn >= C_WF_REPORT_PARAMS) {
/* if this was forced, we should consider sync */
if (forced)
- drbd_send_uuids(mdev);
- drbd_send_current_state(mdev);
+ drbd_send_uuids(first_peer_device(device));
+ drbd_send_current_state(first_peer_device(device));
}
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
- kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
+ kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
out:
- mutex_unlock(mdev->state_mutex);
+ mutex_unlock(device->state_mutex);
return rv;
}
@@ -699,9 +721,9 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
}
if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
- retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
+ retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
else
- retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
+ retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
out:
drbd_adm_finish(info, retcode);
return 0;
@@ -728,7 +750,7 @@ out:
* Activity log size used to be fixed 32kB,
* but is about to become configurable.
*/
-static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
+static void drbd_md_set_sector_offsets(struct drbd_device *device,
struct drbd_backing_dev *bdev)
{
sector_t md_size_sect = 0;
@@ -804,35 +826,35 @@ char *ppsize(char *buf, unsigned long long size)
* drbd_adm_suspend_io/drbd_adm_resume_io,
* which are (sub) state changes triggered by admin (drbdsetup),
* and can be long lived.
- * This changes an mdev->flag, is triggered by drbd internals,
+ * This changes an device->flag, is triggered by drbd internals,
* and should be short-lived. */
-void drbd_suspend_io(struct drbd_conf *mdev)
+void drbd_suspend_io(struct drbd_device *device)
{
- set_bit(SUSPEND_IO, &mdev->flags);
- if (drbd_suspended(mdev))
+ set_bit(SUSPEND_IO, &device->flags);
+ if (drbd_suspended(device))
return;
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
+ wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
}
-void drbd_resume_io(struct drbd_conf *mdev)
+void drbd_resume_io(struct drbd_device *device)
{
- clear_bit(SUSPEND_IO, &mdev->flags);
- wake_up(&mdev->misc_wait);
+ clear_bit(SUSPEND_IO, &device->flags);
+ wake_up(&device->misc_wait);
}
/**
* drbd_determine_dev_size() - Sets the right device size obeying all constraints
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Returns 0 on success, negative return values indicate errors.
* You should call drbd_md_sync() after calling this function.
*/
enum determine_dev_size
-drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
+drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
sector_t la_size_sect, u_size;
- struct drbd_md *md = &mdev->ldev->md;
+ struct drbd_md *md = &device->ldev->md;
u32 prev_al_stripe_size_4k;
u32 prev_al_stripes;
sector_t size;
@@ -851,19 +873,19 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
* Suspend IO right here.
* still lock the act_log to not trigger ASSERTs there.
*/
- drbd_suspend_io(mdev);
- buffer = drbd_md_get_buffer(mdev); /* Lock meta-data IO */
+ drbd_suspend_io(device);
+ buffer = drbd_md_get_buffer(device); /* Lock meta-data IO */
if (!buffer) {
- drbd_resume_io(mdev);
+ drbd_resume_io(device);
return DS_ERROR;
}
/* no wait necessary anymore, actually we could assert that */
- wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+ wait_event(device->al_wait, lc_try_lock(device->act_log));
- prev_first_sect = drbd_md_first_sector(mdev->ldev);
- prev_size = mdev->ldev->md.md_size_sect;
- la_size_sect = mdev->ldev->md.la_size_sect;
+ prev_first_sect = drbd_md_first_sector(device->ldev);
+ prev_size = device->ldev->md.md_size_sect;
+ la_size_sect = device->ldev->md.la_size_sect;
if (rs) {
/* rs is non NULL if we should change the AL layout only */
@@ -876,18 +898,18 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
}
- drbd_md_set_sector_offsets(mdev, mdev->ldev);
+ drbd_md_set_sector_offsets(device, device->ldev);
rcu_read_lock();
- u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
- size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
+ size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
if (size < la_size_sect) {
if (rs && u_size == 0) {
/* Remove "rs &&" later. This check should always be active, but
right now the receiver expects the permissive behavior */
- dev_warn(DEV, "Implicit shrink not allowed. "
+ drbd_warn(device, "Implicit shrink not allowed. "
"Use --size=%llus for explicit shrink.\n",
(unsigned long long)size);
rv = DS_ERROR_SHRINK;
@@ -898,60 +920,60 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
goto err_out;
}
- if (drbd_get_capacity(mdev->this_bdev) != size ||
- drbd_bm_capacity(mdev) != size) {
+ if (drbd_get_capacity(device->this_bdev) != size ||
+ drbd_bm_capacity(device) != size) {
int err;
- err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
+ err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
if (unlikely(err)) {
/* currently there is only one error: ENOMEM! */
- size = drbd_bm_capacity(mdev)>>1;
+ size = drbd_bm_capacity(device)>>1;
if (size == 0) {
- dev_err(DEV, "OUT OF MEMORY! "
+ drbd_err(device, "OUT OF MEMORY! "
"Could not allocate bitmap!\n");
} else {
- dev_err(DEV, "BM resizing failed. "
+ drbd_err(device, "BM resizing failed. "
"Leaving size unchanged at size = %lu KB\n",
(unsigned long)size);
}
rv = DS_ERROR;
}
/* racy, see comments above. */
- drbd_set_my_capacity(mdev, size);
- mdev->ldev->md.la_size_sect = size;
- dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
+ drbd_set_my_capacity(device, size);
+ device->ldev->md.la_size_sect = size;
+ drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
(unsigned long long)size>>1);
}
if (rv <= DS_ERROR)
goto err_out;
- la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
+ la_size_changed = (la_size_sect != device->ldev->md.la_size_sect);
- md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
- || prev_size != mdev->ldev->md.md_size_sect;
+ md_moved = prev_first_sect != drbd_md_first_sector(device->ldev)
+ || prev_size != device->ldev->md.md_size_sect;
if (la_size_changed || md_moved || rs) {
u32 prev_flags;
- drbd_al_shrink(mdev); /* All extents inactive. */
+ drbd_al_shrink(device); /* All extents inactive. */
prev_flags = md->flags;
md->flags &= ~MDF_PRIMARY_IND;
- drbd_md_write(mdev, buffer);
+ drbd_md_write(device, buffer);
- dev_info(DEV, "Writing the whole bitmap, %s\n",
+ drbd_info(device, "Writing the whole bitmap, %s\n",
la_size_changed && md_moved ? "size changed and md moved" :
la_size_changed ? "size changed" : "md moved");
/* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
- drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
+ drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
"size changed", BM_LOCKED_MASK);
- drbd_initialize_al(mdev, buffer);
+ drbd_initialize_al(device, buffer);
md->flags = prev_flags;
- drbd_md_write(mdev, buffer);
+ drbd_md_write(device, buffer);
if (rs)
- dev_info(DEV, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
- md->al_stripes, md->al_stripe_size_4k * 4);
+ drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
+ md->al_stripes, md->al_stripe_size_4k * 4);
}
if (size > la_size_sect)
@@ -966,30 +988,30 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
md->al_stripe_size_4k = prev_al_stripe_size_4k;
md->al_size_4k = (u64)prev_al_stripes * prev_al_stripe_size_4k;
- drbd_md_set_sector_offsets(mdev, mdev->ldev);
+ drbd_md_set_sector_offsets(device, device->ldev);
}
}
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
- drbd_md_put_buffer(mdev);
- drbd_resume_io(mdev);
+ lc_unlock(device->act_log);
+ wake_up(&device->al_wait);
+ drbd_md_put_buffer(device);
+ drbd_resume_io(device);
return rv;
}
sector_t
-drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
sector_t u_size, int assume_peer_has_space)
{
- sector_t p_size = mdev->p_size; /* partner's disk size. */
+ sector_t p_size = device->p_size; /* partner's disk size. */
sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
sector_t m_size; /* my size */
sector_t size = 0;
m_size = drbd_get_max_capacity(bdev);
- if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
- dev_warn(DEV, "Resize while not connected was forced by the user!\n");
+ if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
+ drbd_warn(device, "Resize while not connected was forced by the user!\n");
p_size = m_size;
}
@@ -1011,11 +1033,11 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
}
if (size == 0)
- dev_err(DEV, "Both nodes diskless!\n");
+ drbd_err(device, "Both nodes diskless!\n");
if (u_size) {
if (u_size > size)
- dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
+ drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
(unsigned long)u_size>>1, (unsigned long)size>>1);
else
size = u_size;
@@ -1026,71 +1048,71 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
/**
* drbd_check_al_size() - Ensures that the AL is of the right size
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
* failed, and 0 on success. You should call drbd_md_sync() after you called
* this function.
*/
-static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
+static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
{
struct lru_cache *n, *t;
struct lc_element *e;
unsigned int in_use;
int i;
- if (mdev->act_log &&
- mdev->act_log->nr_elements == dc->al_extents)
+ if (device->act_log &&
+ device->act_log->nr_elements == dc->al_extents)
return 0;
in_use = 0;
- t = mdev->act_log;
+ t = device->act_log;
n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
dc->al_extents, sizeof(struct lc_element), 0);
if (n == NULL) {
- dev_err(DEV, "Cannot allocate act_log lru!\n");
+ drbd_err(device, "Cannot allocate act_log lru!\n");
return -ENOMEM;
}
- spin_lock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
if (t) {
for (i = 0; i < t->nr_elements; i++) {
e = lc_element_by_index(t, i);
if (e->refcnt)
- dev_err(DEV, "refcnt(%d)==%d\n",
+ drbd_err(device, "refcnt(%d)==%d\n",
e->lc_number, e->refcnt);
in_use += e->refcnt;
}
}
if (!in_use)
- mdev->act_log = n;
- spin_unlock_irq(&mdev->al_lock);
+ device->act_log = n;
+ spin_unlock_irq(&device->al_lock);
if (in_use) {
- dev_err(DEV, "Activity log still in use!\n");
+ drbd_err(device, "Activity log still in use!\n");
lc_destroy(n);
return -EBUSY;
} else {
if (t)
lc_destroy(t);
}
- drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
+ drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
return 0;
}
-static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
+static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_bio_size)
{
- struct request_queue * const q = mdev->rq_queue;
+ struct request_queue * const q = device->rq_queue;
unsigned int max_hw_sectors = max_bio_size >> 9;
unsigned int max_segments = 0;
- if (get_ldev_if_state(mdev, D_ATTACHING)) {
- struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
+ if (get_ldev_if_state(device, D_ATTACHING)) {
+ struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue;
max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
rcu_read_lock();
- max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
+ max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
rcu_read_unlock();
- put_ldev(mdev);
+ put_ldev(device);
}
blk_queue_logical_block_size(q, 512);
@@ -1099,46 +1121,46 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
- if (get_ldev_if_state(mdev, D_ATTACHING)) {
- struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
+ if (get_ldev_if_state(device, D_ATTACHING)) {
+ struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue;
blk_queue_stack_limits(q, b);
if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
- dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
+ drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
q->backing_dev_info.ra_pages,
b->backing_dev_info.ra_pages);
q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
}
- put_ldev(mdev);
+ put_ldev(device);
}
}
-void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
+void drbd_reconsider_max_bio_size(struct drbd_device *device)
{
unsigned int now, new, local, peer;
- now = queue_max_hw_sectors(mdev->rq_queue) << 9;
- local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
- peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
+ now = queue_max_hw_sectors(device->rq_queue) << 9;
+ local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
+ peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
- if (get_ldev_if_state(mdev, D_ATTACHING)) {
- local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
- mdev->local_max_bio_size = local;
- put_ldev(mdev);
+ if (get_ldev_if_state(device, D_ATTACHING)) {
+ local = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9;
+ device->local_max_bio_size = local;
+ put_ldev(device);
}
local = min(local, DRBD_MAX_BIO_SIZE);
/* We may ignore peer limits if the peer is modern enough.
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
- if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
- if (mdev->tconn->agreed_pro_version < 94)
- peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ if (device->state.conn >= C_WF_REPORT_PARAMS) {
+ if (first_peer_device(device)->connection->agreed_pro_version < 94)
+ peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
- else if (mdev->tconn->agreed_pro_version == 94)
+ else if (first_peer_device(device)->connection->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
- else if (mdev->tconn->agreed_pro_version < 100)
+ else if (first_peer_device(device)->connection->agreed_pro_version < 100)
peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
else
peer = DRBD_MAX_BIO_SIZE;
@@ -1146,57 +1168,57 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
new = min(local, peer);
- if (mdev->state.role == R_PRIMARY && new < now)
- dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
+ if (device->state.role == R_PRIMARY && new < now)
+ drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
if (new != now)
- dev_info(DEV, "max BIO size = %u\n", new);
+ drbd_info(device, "max BIO size = %u\n", new);
- drbd_setup_queue_param(mdev, new);
+ drbd_setup_queue_param(device, new);
}
/* Starts the worker thread */
-static void conn_reconfig_start(struct drbd_tconn *tconn)
+static void conn_reconfig_start(struct drbd_connection *connection)
{
- drbd_thread_start(&tconn->worker);
- conn_flush_workqueue(tconn);
+ drbd_thread_start(&connection->worker);
+ drbd_flush_workqueue(&connection->sender_work);
}
/* if still unconfigured, stops worker again. */
-static void conn_reconfig_done(struct drbd_tconn *tconn)
+static void conn_reconfig_done(struct drbd_connection *connection)
{
bool stop_threads;
- spin_lock_irq(&tconn->req_lock);
- stop_threads = conn_all_vols_unconf(tconn) &&
- tconn->cstate == C_STANDALONE;
- spin_unlock_irq(&tconn->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
+ stop_threads = conn_all_vols_unconf(connection) &&
+ connection->cstate == C_STANDALONE;
+ spin_unlock_irq(&connection->resource->req_lock);
if (stop_threads) {
/* asender is implicitly stopped by receiver
* in conn_disconnect() */
- drbd_thread_stop(&tconn->receiver);
- drbd_thread_stop(&tconn->worker);
+ drbd_thread_stop(&connection->receiver);
+ drbd_thread_stop(&connection->worker);
}
}
/* Make sure IO is suspended before calling this function(). */
-static void drbd_suspend_al(struct drbd_conf *mdev)
+static void drbd_suspend_al(struct drbd_device *device)
{
int s = 0;
- if (!lc_try_lock(mdev->act_log)) {
- dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
+ if (!lc_try_lock(device->act_log)) {
+ drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
return;
}
- drbd_al_shrink(mdev);
- spin_lock_irq(&mdev->tconn->req_lock);
- if (mdev->state.conn < C_CONNECTED)
- s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
- spin_unlock_irq(&mdev->tconn->req_lock);
- lc_unlock(mdev->act_log);
+ drbd_al_shrink(device);
+ spin_lock_irq(&device->resource->req_lock);
+ if (device->state.conn < C_CONNECTED)
+ s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
+ spin_unlock_irq(&device->resource->req_lock);
+ lc_unlock(device->act_log);
if (s)
- dev_info(DEV, "Suspended AL updates\n");
+ drbd_info(device, "Suspended AL updates\n");
}
@@ -1237,7 +1259,7 @@ static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
- struct drbd_conf *mdev;
+ struct drbd_device *device;
struct disk_conf *new_disk_conf, *old_disk_conf;
struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
int err, fifo_size;
@@ -1248,11 +1270,11 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- mdev = adm_ctx.mdev;
+ device = adm_ctx.device;
/* we also need a disk
* to change the options on */
- if (!get_ldev(mdev)) {
+ if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto out;
}
@@ -1263,8 +1285,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
- mutex_lock(&mdev->tconn->conf_update);
- old_disk_conf = mdev->ldev->disk_conf;
+ mutex_lock(&device->resource->conf_update);
+ old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf;
if (should_set_defaults(info))
set_disk_conf_defaults(new_disk_conf);
@@ -1273,6 +1295,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto fail_unlock;
}
if (!expect(new_disk_conf->resync_rate >= 1))
@@ -1280,29 +1303,29 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
- if (new_disk_conf->al_extents > drbd_al_extents_max(mdev->ldev))
- new_disk_conf->al_extents = drbd_al_extents_max(mdev->ldev);
+ if (new_disk_conf->al_extents > drbd_al_extents_max(device->ldev))
+ new_disk_conf->al_extents = drbd_al_extents_max(device->ldev);
if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
- if (fifo_size != mdev->rs_plan_s->size) {
+ if (fifo_size != device->rs_plan_s->size) {
new_plan = fifo_alloc(fifo_size);
if (!new_plan) {
- dev_err(DEV, "kmalloc of fifo_buffer failed");
+ drbd_err(device, "kmalloc of fifo_buffer failed");
retcode = ERR_NOMEM;
goto fail_unlock;
}
}
- drbd_suspend_io(mdev);
- wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
- drbd_al_shrink(mdev);
- err = drbd_check_al_size(mdev, new_disk_conf);
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
- drbd_resume_io(mdev);
+ drbd_suspend_io(device);
+ wait_event(device->al_wait, lc_try_lock(device->act_log));
+ drbd_al_shrink(device);
+ err = drbd_check_al_size(device, new_disk_conf);
+ lc_unlock(device->act_log);
+ wake_up(&device->al_wait);
+ drbd_resume_io(device);
if (err) {
retcode = ERR_NOMEM;
@@ -1310,10 +1333,10 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
}
write_lock_irq(&global_state_lock);
- retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+ retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
if (retcode == NO_ERROR) {
- rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
- drbd_resync_after_changed(mdev);
+ rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
+ drbd_resync_after_changed(device);
}
write_unlock_irq(&global_state_lock);
@@ -1321,42 +1344,46 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
goto fail_unlock;
if (new_plan) {
- old_plan = mdev->rs_plan_s;
- rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+ old_plan = device->rs_plan_s;
+ rcu_assign_pointer(device->rs_plan_s, new_plan);
}
- mutex_unlock(&mdev->tconn->conf_update);
+ mutex_unlock(&device->resource->conf_update);
if (new_disk_conf->al_updates)
- mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+ device->ldev->md.flags &= ~MDF_AL_DISABLED;
else
- mdev->ldev->md.flags |= MDF_AL_DISABLED;
+ device->ldev->md.flags |= MDF_AL_DISABLED;
if (new_disk_conf->md_flushes)
- clear_bit(MD_NO_FUA, &mdev->flags);
+ clear_bit(MD_NO_FUA, &device->flags);
else
- set_bit(MD_NO_FUA, &mdev->flags);
+ set_bit(MD_NO_FUA, &device->flags);
- drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
+ drbd_bump_write_ordering(first_peer_device(device)->connection, WO_bdev_flush);
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
- if (mdev->state.conn >= C_CONNECTED)
- drbd_send_sync_param(mdev);
+ if (device->state.conn >= C_CONNECTED) {
+ struct drbd_peer_device *peer_device;
+
+ for_each_peer_device(peer_device, device)
+ drbd_send_sync_param(peer_device);
+ }
synchronize_rcu();
kfree(old_disk_conf);
kfree(old_plan);
- mod_timer(&mdev->request_timer, jiffies + HZ);
+ mod_timer(&device->request_timer, jiffies + HZ);
goto success;
fail_unlock:
- mutex_unlock(&mdev->tconn->conf_update);
+ mutex_unlock(&device->resource->conf_update);
fail:
kfree(new_disk_conf);
kfree(new_plan);
success:
- put_ldev(mdev);
+ put_ldev(device);
out:
drbd_adm_finish(info, retcode);
return 0;
@@ -1364,7 +1391,7 @@ success:
int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_conf *mdev;
+ struct drbd_device *device;
int err;
enum drbd_ret_code retcode;
enum determine_dev_size dd;
@@ -1385,11 +1412,11 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto finish;
- mdev = adm_ctx.mdev;
- conn_reconfig_start(mdev->tconn);
+ device = adm_ctx.device;
+ conn_reconfig_start(first_peer_device(device)->connection);
/* if you want to reconfigure, please tear down first */
- if (mdev->state.disk > D_DISKLESS) {
+ if (device->state.disk > D_DISKLESS) {
retcode = ERR_DISK_CONFIGURED;
goto fail;
}
@@ -1397,17 +1424,17 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
* drbd_ldev_destroy is done already, we may end up here very fast,
* e.g. if someone calls attach from the on-io-error handler,
* to realize a "hot spare" feature (not that I'd recommend that) */
- wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
+ wait_event(device->misc_wait, !atomic_read(&device->local_cnt));
/* make sure there is no leftover from previous force-detach attempts */
- clear_bit(FORCE_DETACH, &mdev->flags);
- clear_bit(WAS_IO_ERROR, &mdev->flags);
- clear_bit(WAS_READ_ERROR, &mdev->flags);
+ clear_bit(FORCE_DETACH, &device->flags);
+ clear_bit(WAS_IO_ERROR, &device->flags);
+ clear_bit(WAS_READ_ERROR, &device->flags);
/* and no leftover from previously aborted resync or verify, either */
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- atomic_set(&mdev->rs_pending_cnt, 0);
+ device->rs_total = 0;
+ device->rs_failed = 0;
+ atomic_set(&device->rs_pending_cnt, 0);
/* allocation not in the IO path, drbdsetup context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
@@ -1447,13 +1474,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
}
write_lock_irq(&global_state_lock);
- retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+ retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
write_unlock_irq(&global_state_lock);
if (retcode != NO_ERROR)
goto fail;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
if (nc) {
if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
rcu_read_unlock();
@@ -1464,9 +1491,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
rcu_read_unlock();
bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
- FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
+ FMODE_READ | FMODE_WRITE | FMODE_EXCL, device);
if (IS_ERR(bdev)) {
- dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
+ drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
PTR_ERR(bdev));
retcode = ERR_OPEN_DISK;
goto fail;
@@ -1484,9 +1511,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
FMODE_READ | FMODE_WRITE | FMODE_EXCL,
(new_disk_conf->meta_dev_idx < 0) ?
- (void *)mdev : (void *)drbd_m_holder);
+ (void *)device : (void *)drbd_m_holder);
if (IS_ERR(bdev)) {
- dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
+ drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
PTR_ERR(bdev));
retcode = ERR_OPEN_MD_DISK;
goto fail;
@@ -1510,7 +1537,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
/* Read our meta data super block early.
* This also sets other on-disk offsets. */
- retcode = drbd_md_read(mdev, nbc);
+ retcode = drbd_md_read(device, nbc);
if (retcode != NO_ERROR)
goto fail;
@@ -1520,7 +1547,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
new_disk_conf->al_extents = drbd_al_extents_max(nbc);
if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
- dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
+ drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
(unsigned long long) drbd_get_max_capacity(nbc),
(unsigned long long) new_disk_conf->disk_size);
retcode = ERR_DISK_TOO_SMALL;
@@ -1538,7 +1565,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
retcode = ERR_MD_DISK_TOO_SMALL;
- dev_warn(DEV, "refusing attach: md-device too small, "
+ drbd_warn(device, "refusing attach: md-device too small, "
"at least %llu sectors needed for this meta-disk type\n",
(unsigned long long) min_md_device_sectors);
goto fail;
@@ -1547,7 +1574,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
/* Make sure the new disk is big enough
* (we may currently be R_PRIMARY with no local disk...) */
if (drbd_get_max_capacity(nbc) <
- drbd_get_capacity(mdev->this_bdev)) {
+ drbd_get_capacity(device->this_bdev)) {
retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
@@ -1555,15 +1582,15 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
if (nbc->known_size > max_possible_sectors) {
- dev_warn(DEV, "==> truncating very big lower level device "
+ drbd_warn(device, "==> truncating very big lower level device "
"to currently maximum possible %llu sectors <==\n",
(unsigned long long) max_possible_sectors);
if (new_disk_conf->meta_dev_idx >= 0)
- dev_warn(DEV, "==>> using internal or flexible "
+ drbd_warn(device, "==>> using internal or flexible "
"meta data may help <<==\n");
}
- drbd_suspend_io(mdev);
+ drbd_suspend_io(device);
/* also wait for the last barrier ack. */
/* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
* We need a way to either ignore barrier acks for barriers sent before a device
@@ -1571,45 +1598,45 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
* As barriers are counted per resource,
* we'd need to suspend io on all devices of a resource.
*/
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
+ wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
/* and for any other previously queued work */
- drbd_flush_workqueue(mdev);
+ drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
- rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
+ rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
retcode = rv; /* FIXME: Type mismatch. */
- drbd_resume_io(mdev);
+ drbd_resume_io(device);
if (rv < SS_SUCCESS)
goto fail;
- if (!get_ldev_if_state(mdev, D_ATTACHING))
+ if (!get_ldev_if_state(device, D_ATTACHING))
goto force_diskless;
- if (!mdev->bitmap) {
- if (drbd_bm_init(mdev)) {
+ if (!device->bitmap) {
+ if (drbd_bm_init(device)) {
retcode = ERR_NOMEM;
goto force_diskless_dec;
}
}
- if (mdev->state.conn < C_CONNECTED &&
- mdev->state.role == R_PRIMARY &&
- (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
- dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
- (unsigned long long)mdev->ed_uuid);
+ if (device->state.conn < C_CONNECTED &&
+ device->state.role == R_PRIMARY &&
+ (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
+ drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
+ (unsigned long long)device->ed_uuid);
retcode = ERR_DATA_NOT_CURRENT;
goto force_diskless_dec;
}
/* Since we are diskless, fix the activity log first... */
- if (drbd_check_al_size(mdev, new_disk_conf)) {
+ if (drbd_check_al_size(device, new_disk_conf)) {
retcode = ERR_NOMEM;
goto force_diskless_dec;
}
/* Prevent shrinking of consistent devices ! */
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
- drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
- dev_warn(DEV, "refusing to truncate a consistent device\n");
+ drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
+ drbd_warn(device, "refusing to truncate a consistent device\n");
retcode = ERR_DISK_TOO_SMALL;
goto force_diskless_dec;
}
@@ -1617,40 +1644,40 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
/* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */
if (new_disk_conf->md_flushes)
- clear_bit(MD_NO_FUA, &mdev->flags);
+ clear_bit(MD_NO_FUA, &device->flags);
else
- set_bit(MD_NO_FUA, &mdev->flags);
+ set_bit(MD_NO_FUA, &device->flags);
/* Point of no return reached.
* Devices and memory are no longer released by error cleanup below.
- * now mdev takes over responsibility, and the state engine should
+ * now device takes over responsibility, and the state engine should
* clean it up somewhere. */
- D_ASSERT(mdev->ldev == NULL);
- mdev->ldev = nbc;
- mdev->resync = resync_lru;
- mdev->rs_plan_s = new_plan;
+ D_ASSERT(device, device->ldev == NULL);
+ device->ldev = nbc;
+ device->resync = resync_lru;
+ device->rs_plan_s = new_plan;
nbc = NULL;
resync_lru = NULL;
new_disk_conf = NULL;
new_plan = NULL;
- drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
+ drbd_bump_write_ordering(first_peer_device(device)->connection, WO_bdev_flush);
- if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
- set_bit(CRASHED_PRIMARY, &mdev->flags);
+ if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
+ set_bit(CRASHED_PRIMARY, &device->flags);
else
- clear_bit(CRASHED_PRIMARY, &mdev->flags);
+ clear_bit(CRASHED_PRIMARY, &device->flags);
- if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
- !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
- set_bit(CRASHED_PRIMARY, &mdev->flags);
+ if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
+ !(device->state.role == R_PRIMARY && device->resource->susp_nod))
+ set_bit(CRASHED_PRIMARY, &device->flags);
- mdev->send_cnt = 0;
- mdev->recv_cnt = 0;
- mdev->read_cnt = 0;
- mdev->writ_cnt = 0;
+ device->send_cnt = 0;
+ device->recv_cnt = 0;
+ device->read_cnt = 0;
+ device->writ_cnt = 0;
- drbd_reconsider_max_bio_size(mdev);
+ drbd_reconsider_max_bio_size(device);
/* If I am currently not R_PRIMARY,
* but meta data primary indicator is set,
@@ -1666,50 +1693,50 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
* so we can automatically recover from a crash of a
* degraded but active "cluster" after a certain timeout.
*/
- clear_bit(USE_DEGR_WFC_T, &mdev->flags);
- if (mdev->state.role != R_PRIMARY &&
- drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
- !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
- set_bit(USE_DEGR_WFC_T, &mdev->flags);
+ clear_bit(USE_DEGR_WFC_T, &device->flags);
+ if (device->state.role != R_PRIMARY &&
+ drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
+ !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
+ set_bit(USE_DEGR_WFC_T, &device->flags);
- dd = drbd_determine_dev_size(mdev, 0, NULL);
+ dd = drbd_determine_dev_size(device, 0, NULL);
if (dd <= DS_ERROR) {
retcode = ERR_NOMEM_BITMAP;
goto force_diskless_dec;
} else if (dd == DS_GREW)
- set_bit(RESYNC_AFTER_NEG, &mdev->flags);
+ set_bit(RESYNC_AFTER_NEG, &device->flags);
- if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
- (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
- drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
- dev_info(DEV, "Assuming that all blocks are out of sync "
+ if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
+ (test_bit(CRASHED_PRIMARY, &device->flags) &&
+ drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
+ drbd_info(device, "Assuming that all blocks are out of sync "
"(aka FullSync)\n");
- if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
+ if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
"set_n_write from attaching", BM_LOCKED_MASK)) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
} else {
- if (drbd_bitmap_io(mdev, &drbd_bm_read,
+ if (drbd_bitmap_io(device, &drbd_bm_read,
"read from attaching", BM_LOCKED_MASK)) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
}
- if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
- drbd_suspend_al(mdev); /* IO is still suspended here... */
+ if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
+ drbd_suspend_al(device); /* IO is still suspended here... */
- spin_lock_irq(&mdev->tconn->req_lock);
- os = drbd_read_state(mdev);
+ spin_lock_irq(&device->resource->req_lock);
+ os = drbd_read_state(device);
ns = os;
/* If MDF_CONSISTENT is not set go into inconsistent state,
otherwise investigate MDF_WasUpToDate...
If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
otherwise into D_CONSISTENT state.
*/
- if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
- if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
+ if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
+ if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
ns.disk = D_CONSISTENT;
else
ns.disk = D_OUTDATED;
@@ -1717,12 +1744,12 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
ns.disk = D_INCONSISTENT;
}
- if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
+ if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
ns.pdsk = D_OUTDATED;
rcu_read_lock();
if (ns.disk == D_CONSISTENT &&
- (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
+ (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
ns.disk = D_UP_TO_DATE;
/* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
@@ -1730,56 +1757,56 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
this point, because drbd_request_state() modifies these
flags. */
- if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
- mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+ if (rcu_dereference(device->ldev->disk_conf)->al_updates)
+ device->ldev->md.flags &= ~MDF_AL_DISABLED;
else
- mdev->ldev->md.flags |= MDF_AL_DISABLED;
+ device->ldev->md.flags |= MDF_AL_DISABLED;
rcu_read_unlock();
/* In case we are C_CONNECTED postpone any decision on the new disk
state after the negotiation phase. */
- if (mdev->state.conn == C_CONNECTED) {
- mdev->new_state_tmp.i = ns.i;
+ if (device->state.conn == C_CONNECTED) {
+ device->new_state_tmp.i = ns.i;
ns.i = os.i;
ns.disk = D_NEGOTIATING;
/* We expect to receive up-to-date UUIDs soon.
To avoid a race in receive_state, free p_uuid while
holding req_lock. I.e. atomic with the state change */
- kfree(mdev->p_uuid);
- mdev->p_uuid = NULL;
+ kfree(device->p_uuid);
+ device->p_uuid = NULL;
}
- rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
+ spin_unlock_irq(&device->resource->req_lock);
if (rv < SS_SUCCESS)
goto force_diskless_dec;
- mod_timer(&mdev->request_timer, jiffies + HZ);
+ mod_timer(&device->request_timer, jiffies + HZ);
- if (mdev->state.role == R_PRIMARY)
- mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
+ if (device->state.role == R_PRIMARY)
+ device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
else
- mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
+ device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
- drbd_md_mark_dirty(mdev);
- drbd_md_sync(mdev);
+ drbd_md_mark_dirty(device);
+ drbd_md_sync(device);
- kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
- put_ldev(mdev);
- conn_reconfig_done(mdev->tconn);
+ kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
+ put_ldev(device);
+ conn_reconfig_done(first_peer_device(device)->connection);
drbd_adm_finish(info, retcode);
return 0;
force_diskless_dec:
- put_ldev(mdev);
+ put_ldev(device);
force_diskless:
- drbd_force_state(mdev, NS(disk, D_DISKLESS));
- drbd_md_sync(mdev);
+ drbd_force_state(device, NS(disk, D_DISKLESS));
+ drbd_md_sync(device);
fail:
- conn_reconfig_done(mdev->tconn);
+ conn_reconfig_done(first_peer_device(device)->connection);
if (nbc) {
if (nbc->backing_bdev)
blkdev_put(nbc->backing_bdev,
@@ -1798,26 +1825,26 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static int adm_detach(struct drbd_conf *mdev, int force)
+static int adm_detach(struct drbd_device *device, int force)
{
enum drbd_state_rv retcode;
int ret;
if (force) {
- set_bit(FORCE_DETACH, &mdev->flags);
- drbd_force_state(mdev, NS(disk, D_FAILED));
+ set_bit(FORCE_DETACH, &device->flags);
+ drbd_force_state(device, NS(disk, D_FAILED));
retcode = SS_SUCCESS;
goto out;
}
- drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
- drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
- retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
- drbd_md_put_buffer(mdev);
+ drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
+ drbd_md_get_buffer(device); /* make sure there is no in-flight meta-data IO */
+ retcode = drbd_request_state(device, NS(disk, D_FAILED));
+ drbd_md_put_buffer(device);
/* D_FAILED will transition to DISKLESS. */
- ret = wait_event_interruptible(mdev->misc_wait,
- mdev->state.disk != D_FAILED);
- drbd_resume_io(mdev);
+ ret = wait_event_interruptible(device->misc_wait,
+ device->state.disk != D_FAILED);
+ drbd_resume_io(device);
if ((int)retcode == (int)SS_IS_DISKLESS)
retcode = SS_NOTHING_TO_DO;
if (ret)
@@ -1852,24 +1879,25 @@ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
}
}
- retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
+ retcode = adm_detach(adm_ctx.device, parms.force_detach);
out:
drbd_adm_finish(info, retcode);
return 0;
}
-static bool conn_resync_running(struct drbd_tconn *tconn)
+static bool conn_resync_running(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
bool rv = false;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (mdev->state.conn == C_SYNC_SOURCE ||
- mdev->state.conn == C_SYNC_TARGET ||
- mdev->state.conn == C_PAUSED_SYNC_S ||
- mdev->state.conn == C_PAUSED_SYNC_T) {
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ if (device->state.conn == C_SYNC_SOURCE ||
+ device->state.conn == C_SYNC_TARGET ||
+ device->state.conn == C_PAUSED_SYNC_S ||
+ device->state.conn == C_PAUSED_SYNC_T) {
rv = true;
break;
}
@@ -1879,16 +1907,17 @@ static bool conn_resync_running(struct drbd_tconn *tconn)
return rv;
}
-static bool conn_ov_running(struct drbd_tconn *tconn)
+static bool conn_ov_running(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
bool rv = false;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (mdev->state.conn == C_VERIFY_S ||
- mdev->state.conn == C_VERIFY_T) {
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ if (device->state.conn == C_VERIFY_S ||
+ device->state.conn == C_VERIFY_T) {
rv = true;
break;
}
@@ -1899,63 +1928,65 @@ static bool conn_ov_running(struct drbd_tconn *tconn)
}
static enum drbd_ret_code
-_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
+_check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int i;
- if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
- if (new_conf->wire_protocol != old_conf->wire_protocol)
+ if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
+ if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
return ERR_NEED_APV_100;
- if (new_conf->two_primaries != old_conf->two_primaries)
+ if (new_net_conf->two_primaries != old_net_conf->two_primaries)
return ERR_NEED_APV_100;
- if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
+ if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
return ERR_NEED_APV_100;
}
- if (!new_conf->two_primaries &&
- conn_highest_role(tconn) == R_PRIMARY &&
- conn_highest_peer(tconn) == R_PRIMARY)
+ if (!new_net_conf->two_primaries &&
+ conn_highest_role(connection) == R_PRIMARY &&
+ conn_highest_peer(connection) == R_PRIMARY)
return ERR_NEED_ALLOW_TWO_PRI;
- if (new_conf->two_primaries &&
- (new_conf->wire_protocol != DRBD_PROT_C))
+ if (new_net_conf->two_primaries &&
+ (new_net_conf->wire_protocol != DRBD_PROT_C))
return ERR_NOT_PROTO_C;
- idr_for_each_entry(&tconn->volumes, mdev, i) {
- if (get_ldev(mdev)) {
- enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
- put_ldev(mdev);
- if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
+ idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ struct drbd_device *device = peer_device->device;
+ if (get_ldev(device)) {
+ enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
+ put_ldev(device);
+ if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
return ERR_STONITH_AND_PROT_A;
}
- if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
+ if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
return ERR_DISCARD_IMPOSSIBLE;
}
- if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
+ if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
return ERR_CONG_NOT_PROTO_A;
return NO_ERROR;
}
static enum drbd_ret_code
-check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
+check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
{
static enum drbd_ret_code rv;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int i;
rcu_read_lock();
- rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
+ rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
rcu_read_unlock();
- /* tconn->volumes protected by genl_lock() here */
- idr_for_each_entry(&tconn->volumes, mdev, i) {
- if (!mdev->bitmap) {
- if(drbd_bm_init(mdev))
+ /* connection->volumes protected by genl_lock() here */
+ idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ struct drbd_device *device = peer_device->device;
+ if (!device->bitmap) {
+ if (drbd_bm_init(device))
return ERR_NOMEM;
}
}
@@ -1986,26 +2017,26 @@ alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
}
static enum drbd_ret_code
-alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
+alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
{
char hmac_name[CRYPTO_MAX_ALG_NAME];
enum drbd_ret_code rv;
- rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
+ rv = alloc_hash(&crypto->csums_tfm, new_net_conf->csums_alg,
ERR_CSUMS_ALG);
if (rv != NO_ERROR)
return rv;
- rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
+ rv = alloc_hash(&crypto->verify_tfm, new_net_conf->verify_alg,
ERR_VERIFY_ALG);
if (rv != NO_ERROR)
return rv;
- rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
+ rv = alloc_hash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
ERR_INTEGRITY_ALG);
if (rv != NO_ERROR)
return rv;
- if (new_conf->cram_hmac_alg[0] != 0) {
+ if (new_net_conf->cram_hmac_alg[0] != 0) {
snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
- new_conf->cram_hmac_alg);
+ new_net_conf->cram_hmac_alg);
rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
ERR_AUTH_ALG);
@@ -2025,8 +2056,8 @@ static void free_crypto(struct crypto *crypto)
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
- struct drbd_tconn *tconn;
- struct net_conf *old_conf, *new_conf = NULL;
+ struct drbd_connection *connection;
+ struct net_conf *old_net_conf, *new_net_conf = NULL;
int err;
int ovr; /* online verify running */
int rsr; /* re-sync running */
@@ -2038,98 +2069,103 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- tconn = adm_ctx.tconn;
+ connection = adm_ctx.connection;
- new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
- if (!new_conf) {
+ new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
+ if (!new_net_conf) {
retcode = ERR_NOMEM;
goto out;
}
- conn_reconfig_start(tconn);
+ conn_reconfig_start(connection);
- mutex_lock(&tconn->data.mutex);
- mutex_lock(&tconn->conf_update);
- old_conf = tconn->net_conf;
+ mutex_lock(&connection->data.mutex);
+ mutex_lock(&connection->resource->conf_update);
+ old_net_conf = connection->net_conf;
- if (!old_conf) {
+ if (!old_net_conf) {
drbd_msg_put_info("net conf missing, try connect");
retcode = ERR_INVALID_REQUEST;
goto fail;
}
- *new_conf = *old_conf;
+ *new_net_conf = *old_net_conf;
if (should_set_defaults(info))
- set_net_conf_defaults(new_conf);
+ set_net_conf_defaults(new_net_conf);
- err = net_conf_from_attrs_for_change(new_conf, info);
+ err = net_conf_from_attrs_for_change(new_net_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- retcode = check_net_options(tconn, new_conf);
+ retcode = check_net_options(connection, new_net_conf);
if (retcode != NO_ERROR)
goto fail;
/* re-sync running */
- rsr = conn_resync_running(tconn);
- if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
+ rsr = conn_resync_running(connection);
+ if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
retcode = ERR_CSUMS_RESYNC_RUNNING;
goto fail;
}
/* online verify running */
- ovr = conn_ov_running(tconn);
- if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
+ ovr = conn_ov_running(connection);
+ if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
retcode = ERR_VERIFY_RUNNING;
goto fail;
}
- retcode = alloc_crypto(&crypto, new_conf);
+ retcode = alloc_crypto(&crypto, new_net_conf);
if (retcode != NO_ERROR)
goto fail;
- rcu_assign_pointer(tconn->net_conf, new_conf);
+ rcu_assign_pointer(connection->net_conf, new_net_conf);
if (!rsr) {
- crypto_free_hash(tconn->csums_tfm);
- tconn->csums_tfm = crypto.csums_tfm;
+ crypto_free_hash(connection->csums_tfm);
+ connection->csums_tfm = crypto.csums_tfm;
crypto.csums_tfm = NULL;
}
if (!ovr) {
- crypto_free_hash(tconn->verify_tfm);
- tconn->verify_tfm = crypto.verify_tfm;
+ crypto_free_hash(connection->verify_tfm);
+ connection->verify_tfm = crypto.verify_tfm;
crypto.verify_tfm = NULL;
}
- crypto_free_hash(tconn->integrity_tfm);
- tconn->integrity_tfm = crypto.integrity_tfm;
- if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
- /* Do this without trying to take tconn->data.mutex again. */
- __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
+ crypto_free_hash(connection->integrity_tfm);
+ connection->integrity_tfm = crypto.integrity_tfm;
+ if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
+ /* Do this without trying to take connection->data.mutex again. */
+ __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
- crypto_free_hash(tconn->cram_hmac_tfm);
- tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
+ crypto_free_hash(connection->cram_hmac_tfm);
+ connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
- mutex_unlock(&tconn->conf_update);
- mutex_unlock(&tconn->data.mutex);
+ mutex_unlock(&connection->resource->conf_update);
+ mutex_unlock(&connection->data.mutex);
synchronize_rcu();
- kfree(old_conf);
+ kfree(old_net_conf);
+
+ if (connection->cstate >= C_WF_REPORT_PARAMS) {
+ struct drbd_peer_device *peer_device;
+ int vnr;
- if (tconn->cstate >= C_WF_REPORT_PARAMS)
- drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ drbd_send_sync_param(peer_device);
+ }
goto done;
fail:
- mutex_unlock(&tconn->conf_update);
- mutex_unlock(&tconn->data.mutex);
+ mutex_unlock(&connection->resource->conf_update);
+ mutex_unlock(&connection->data.mutex);
free_crypto(&crypto);
- kfree(new_conf);
+ kfree(new_net_conf);
done:
- conn_reconfig_done(tconn);
+ conn_reconfig_done(connection);
out:
drbd_adm_finish(info, retcode);
return 0;
@@ -2137,10 +2173,11 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_conf *mdev;
- struct net_conf *old_conf, *new_conf = NULL;
+ struct drbd_peer_device *peer_device;
+ struct net_conf *old_net_conf, *new_net_conf = NULL;
struct crypto crypto = { };
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
enum drbd_ret_code retcode;
int i;
int err;
@@ -2160,106 +2197,111 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
/* No need for _rcu here. All reconfiguration is
* strictly serialized on genl_lock(). We are protected against
* concurrent reconfiguration/addition/deletion */
- list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
- if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
- !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
- retcode = ERR_LOCAL_ADDR;
- goto out;
- }
+ for_each_resource(resource, &drbd_resources) {
+ for_each_connection(connection, resource) {
+ if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
+ !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
+ connection->my_addr_len)) {
+ retcode = ERR_LOCAL_ADDR;
+ goto out;
+ }
- if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
- !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
- retcode = ERR_PEER_ADDR;
- goto out;
+ if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
+ !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
+ connection->peer_addr_len)) {
+ retcode = ERR_PEER_ADDR;
+ goto out;
+ }
}
}
- tconn = adm_ctx.tconn;
- conn_reconfig_start(tconn);
+ connection = first_connection(adm_ctx.resource);
+ conn_reconfig_start(connection);
- if (tconn->cstate > C_STANDALONE) {
+ if (connection->cstate > C_STANDALONE) {
retcode = ERR_NET_CONFIGURED;
goto fail;
}
/* allocation not in the IO path, drbdsetup / netlink process context */
- new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
- if (!new_conf) {
+ new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
+ if (!new_net_conf) {
retcode = ERR_NOMEM;
goto fail;
}
- set_net_conf_defaults(new_conf);
+ set_net_conf_defaults(new_net_conf);
- err = net_conf_from_attrs(new_conf, info);
+ err = net_conf_from_attrs(new_net_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- retcode = check_net_options(tconn, new_conf);
+ retcode = check_net_options(connection, new_net_conf);
if (retcode != NO_ERROR)
goto fail;
- retcode = alloc_crypto(&crypto, new_conf);
+ retcode = alloc_crypto(&crypto, new_net_conf);
if (retcode != NO_ERROR)
goto fail;
- ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
+ ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
- conn_flush_workqueue(tconn);
+ drbd_flush_workqueue(&connection->sender_work);
- mutex_lock(&tconn->conf_update);
- old_conf = tconn->net_conf;
- if (old_conf) {
+ mutex_lock(&adm_ctx.resource->conf_update);
+ old_net_conf = connection->net_conf;
+ if (old_net_conf) {
retcode = ERR_NET_CONFIGURED;
- mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&adm_ctx.resource->conf_update);
goto fail;
}
- rcu_assign_pointer(tconn->net_conf, new_conf);
+ rcu_assign_pointer(connection->net_conf, new_net_conf);
- conn_free_crypto(tconn);
- tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
- tconn->integrity_tfm = crypto.integrity_tfm;
- tconn->csums_tfm = crypto.csums_tfm;
- tconn->verify_tfm = crypto.verify_tfm;
+ conn_free_crypto(connection);
+ connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
+ connection->integrity_tfm = crypto.integrity_tfm;
+ connection->csums_tfm = crypto.csums_tfm;
+ connection->verify_tfm = crypto.verify_tfm;
- tconn->my_addr_len = nla_len(adm_ctx.my_addr);
- memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
- tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
- memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
+ connection->my_addr_len = nla_len(adm_ctx.my_addr);
+ memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
+ connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
+ memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
- mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&adm_ctx.resource->conf_update);
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, i) {
- mdev->send_cnt = 0;
- mdev->recv_cnt = 0;
+ idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ struct drbd_device *device = peer_device->device;
+ device->send_cnt = 0;
+ device->recv_cnt = 0;
}
rcu_read_unlock();
- retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+ retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
- conn_reconfig_done(tconn);
+ conn_reconfig_done(connection);
drbd_adm_finish(info, retcode);
return 0;
fail:
free_crypto(&crypto);
- kfree(new_conf);
+ kfree(new_net_conf);
- conn_reconfig_done(tconn);
+ conn_reconfig_done(connection);
out:
drbd_adm_finish(info, retcode);
return 0;
}
-static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
+static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
{
enum drbd_state_rv rv;
- rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+ rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
force ? CS_HARD : 0);
switch (rv) {
@@ -2269,18 +2311,18 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
return SS_SUCCESS;
case SS_PRIMARY_NOP:
/* Our state checking code wants to see the peer outdated. */
- rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
+ rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
- rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE);
+ rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
break;
case SS_CW_FAILED_BY_PEER:
/* The peer probably wants to see us outdated. */
- rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+ rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
disk, D_OUTDATED), 0);
if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
- rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+ rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
CS_HARD);
}
break;
@@ -2294,18 +2336,18 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
* The state handling only uses drbd_thread_stop_nowait(),
* we want to really wait here until the receiver is no more.
*/
- drbd_thread_stop(&adm_ctx.tconn->receiver);
+ drbd_thread_stop(&connection->receiver);
/* Race breaker. This additional state change request may be
* necessary, if this was a forced disconnect during a receiver
* restart. We may have "killed" the receiver thread just
- * after drbdd_init() returned. Typically, we should be
+ * after drbd_receiver() returned. Typically, we should be
* C_STANDALONE already, now, and this becomes a no-op.
*/
- rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
+ rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
CS_VERBOSE | CS_HARD);
if (rv2 < SS_SUCCESS)
- conn_err(tconn,
+ drbd_err(connection,
"unexpected rv2=%d in conn_try_disconnect()\n",
rv2);
}
@@ -2315,7 +2357,7 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
{
struct disconnect_parms parms;
- struct drbd_tconn *tconn;
+ struct drbd_connection *connection;
enum drbd_state_rv rv;
enum drbd_ret_code retcode;
int err;
@@ -2326,7 +2368,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto fail;
- tconn = adm_ctx.tconn;
+ connection = adm_ctx.connection;
memset(&parms, 0, sizeof(parms));
if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
err = disconnect_parms_from_attrs(&parms, info);
@@ -2337,7 +2379,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
}
}
- rv = conn_try_disconnect(tconn, parms.force_disconnect);
+ rv = conn_try_disconnect(connection, parms.force_disconnect);
if (rv < SS_SUCCESS)
retcode = rv; /* FIXME: Type mismatch. */
else
@@ -2347,27 +2389,27 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-void resync_after_online_grow(struct drbd_conf *mdev)
+void resync_after_online_grow(struct drbd_device *device)
{
int iass; /* I am sync source */
- dev_info(DEV, "Resync of new storage after online grow\n");
- if (mdev->state.role != mdev->state.peer)
- iass = (mdev->state.role == R_PRIMARY);
+ drbd_info(device, "Resync of new storage after online grow\n");
+ if (device->state.role != device->state.peer)
+ iass = (device->state.role == R_PRIMARY);
else
- iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
+ iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
if (iass)
- drbd_start_resync(mdev, C_SYNC_SOURCE);
+ drbd_start_resync(device, C_SYNC_SOURCE);
else
- _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
+ _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
}
int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
{
struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
struct resize_parms rs;
- struct drbd_conf *mdev;
+ struct drbd_device *device;
enum drbd_ret_code retcode;
enum determine_dev_size dd;
bool change_al_layout = false;
@@ -2381,15 +2423,15 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto fail;
- mdev = adm_ctx.mdev;
- if (!get_ldev(mdev)) {
+ device = adm_ctx.device;
+ if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto fail;
}
memset(&rs, 0, sizeof(struct resize_parms));
- rs.al_stripes = mdev->ldev->md.al_stripes;
- rs.al_stripe_size = mdev->ldev->md.al_stripe_size_4k * 4;
+ rs.al_stripes = device->ldev->md.al_stripes;
+ rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
err = resize_parms_from_attrs(&rs, info);
if (err) {
@@ -2399,24 +2441,24 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
}
}
- if (mdev->state.conn > C_CONNECTED) {
+ if (device->state.conn > C_CONNECTED) {
retcode = ERR_RESIZE_RESYNC;
goto fail_ldev;
}
- if (mdev->state.role == R_SECONDARY &&
- mdev->state.peer == R_SECONDARY) {
+ if (device->state.role == R_SECONDARY &&
+ device->state.peer == R_SECONDARY) {
retcode = ERR_NO_PRIMARY;
goto fail_ldev;
}
- if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
+ if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
retcode = ERR_NEED_APV_93;
goto fail_ldev;
}
rcu_read_lock();
- u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
if (u_size != (sector_t)rs.resize_size) {
new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
@@ -2426,8 +2468,8 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
}
}
- if (mdev->ldev->md.al_stripes != rs.al_stripes ||
- mdev->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
+ if (device->ldev->md.al_stripes != rs.al_stripes ||
+ device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
if (al_size_k > (16 * 1024 * 1024)) {
@@ -2440,7 +2482,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
goto fail_ldev;
}
- if (mdev->state.conn != C_CONNECTED) {
+ if (device->state.conn != C_CONNECTED) {
retcode = ERR_MD_LAYOUT_CONNECTED;
goto fail_ldev;
}
@@ -2448,24 +2490,24 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
change_al_layout = true;
}
- if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
- mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
+ if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
+ device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
if (new_disk_conf) {
- mutex_lock(&mdev->tconn->conf_update);
- old_disk_conf = mdev->ldev->disk_conf;
+ mutex_lock(&device->resource->conf_update);
+ old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf;
new_disk_conf->disk_size = (sector_t)rs.resize_size;
- rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
- mutex_unlock(&mdev->tconn->conf_update);
+ rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
+ mutex_unlock(&device->resource->conf_update);
synchronize_rcu();
kfree(old_disk_conf);
}
ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
- dd = drbd_determine_dev_size(mdev, ddsf, change_al_layout ? &rs : NULL);
- drbd_md_sync(mdev);
- put_ldev(mdev);
+ dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
+ drbd_md_sync(device);
+ put_ldev(device);
if (dd == DS_ERROR) {
retcode = ERR_NOMEM_BITMAP;
goto fail;
@@ -2477,12 +2519,12 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
- if (mdev->state.conn == C_CONNECTED) {
+ if (device->state.conn == C_CONNECTED) {
if (dd == DS_GREW)
- set_bit(RESIZE_PENDING, &mdev->flags);
+ set_bit(RESIZE_PENDING, &device->flags);
- drbd_send_uuids(mdev);
- drbd_send_sizes(mdev, 1, ddsf);
+ drbd_send_uuids(first_peer_device(device));
+ drbd_send_sizes(first_peer_device(device), 1, ddsf);
}
fail:
@@ -2490,14 +2532,13 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
return 0;
fail_ldev:
- put_ldev(mdev);
+ put_ldev(device);
goto fail;
}
int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
- struct drbd_tconn *tconn;
struct res_opts res_opts;
int err;
@@ -2506,9 +2547,8 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
return retcode;
if (retcode != NO_ERROR)
goto fail;
- tconn = adm_ctx.tconn;
- res_opts = tconn->res_opts;
+ res_opts = adm_ctx.resource->res_opts;
if (should_set_defaults(info))
set_res_opts_defaults(&res_opts);
@@ -2519,7 +2559,7 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
- err = set_resource_options(tconn, &res_opts);
+ err = set_resource_options(adm_ctx.resource, &res_opts);
if (err) {
retcode = ERR_INVALID_REQUEST;
if (err == -ENOMEM)
@@ -2533,7 +2573,7 @@ fail:
int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_conf *mdev;
+ struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
@@ -2542,29 +2582,29 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- mdev = adm_ctx.mdev;
+ device = adm_ctx.device;
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
* Also wait for it's after_state_ch(). */
- drbd_suspend_io(mdev);
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
- drbd_flush_workqueue(mdev);
+ drbd_suspend_io(device);
+ wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
+ drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
/* If we happen to be C_STANDALONE R_SECONDARY, just change to
* D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
* try to start a resync handshake as sync target for full sync.
*/
- if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_SECONDARY) {
- retcode = drbd_request_state(mdev, NS(disk, D_INCONSISTENT));
+ if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
+ retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
if (retcode >= SS_SUCCESS) {
- if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
+ if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
"set_n_write from invalidate", BM_LOCKED_MASK))
retcode = ERR_IO_MD_DISK;
}
} else
- retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
- drbd_resume_io(mdev);
+ retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
+ drbd_resume_io(device);
out:
drbd_adm_finish(info, retcode);
@@ -2582,25 +2622,25 @@ static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *
if (retcode != NO_ERROR)
goto out;
- retcode = drbd_request_state(adm_ctx.mdev, mask, val);
+ retcode = drbd_request_state(adm_ctx.device, mask, val);
out:
drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
+static int drbd_bmio_set_susp_al(struct drbd_device *device)
{
int rv;
- rv = drbd_bmio_set_n_write(mdev);
- drbd_suspend_al(mdev);
+ rv = drbd_bmio_set_n_write(device);
+ drbd_suspend_al(device);
return rv;
}
int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
{
int retcode; /* drbd_ret_code, drbd_state_rv */
- struct drbd_conf *mdev;
+ struct drbd_device *device;
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
@@ -2608,32 +2648,32 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- mdev = adm_ctx.mdev;
+ device = adm_ctx.device;
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
* Also wait for it's after_state_ch(). */
- drbd_suspend_io(mdev);
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
- drbd_flush_workqueue(mdev);
+ drbd_suspend_io(device);
+ wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
+ drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
/* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
* in the bitmap. Otherwise, try to start a resync handshake
* as sync source for full sync.
*/
- if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_PRIMARY) {
+ if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
/* The peer will get a resync upon connect anyways. Just make that
into a full resync. */
- retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
+ retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
if (retcode >= SS_SUCCESS) {
- if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
+ if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
"set_n_write from invalidate_peer",
BM_LOCKED_SET_ALLOWED))
retcode = ERR_IO_MD_DISK;
}
} else
- retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
- drbd_resume_io(mdev);
+ retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
+ drbd_resume_io(device);
out:
drbd_adm_finish(info, retcode);
@@ -2650,7 +2690,7 @@ int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
+ if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
retcode = ERR_PAUSE_IS_SET;
out:
drbd_adm_finish(info, retcode);
@@ -2668,8 +2708,8 @@ int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
- s = adm_ctx.mdev->state;
+ if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
+ s = adm_ctx.device->state;
if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
@@ -2690,7 +2730,7 @@ int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_conf *mdev;
+ struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
@@ -2699,20 +2739,20 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- mdev = adm_ctx.mdev;
- if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
- drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
+ device = adm_ctx.device;
+ if (test_bit(NEW_CUR_UUID, &device->flags)) {
+ drbd_uuid_new_current(device);
+ clear_bit(NEW_CUR_UUID, &device->flags);
}
- drbd_suspend_io(mdev);
- retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
+ drbd_suspend_io(device);
+ retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
if (retcode == SS_SUCCESS) {
- if (mdev->state.conn < C_CONNECTED)
- tl_clear(mdev->tconn);
- if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
- tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
+ if (device->state.conn < C_CONNECTED)
+ tl_clear(first_peer_device(device)->connection);
+ if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
+ tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
}
- drbd_resume_io(mdev);
+ drbd_resume_io(device);
out:
drbd_adm_finish(info, retcode);
@@ -2724,23 +2764,28 @@ int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
}
-int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
+static int nla_put_drbd_cfg_context(struct sk_buff *skb,
+ struct drbd_resource *resource,
+ struct drbd_connection *connection,
+ struct drbd_device *device)
{
struct nlattr *nla;
nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
if (!nla)
goto nla_put_failure;
- if (vnr != VOLUME_UNSPECIFIED &&
- nla_put_u32(skb, T_ctx_volume, vnr))
- goto nla_put_failure;
- if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
- goto nla_put_failure;
- if (tconn->my_addr_len &&
- nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
+ if (device &&
+ nla_put_u32(skb, T_ctx_volume, device->vnr))
goto nla_put_failure;
- if (tconn->peer_addr_len &&
- nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
+ if (nla_put_string(skb, T_ctx_resource_name, resource->name))
goto nla_put_failure;
+ if (connection) {
+ if (connection->my_addr_len &&
+ nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
+ goto nla_put_failure;
+ if (connection->peer_addr_len &&
+ nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
+ goto nla_put_failure;
+ }
nla_nest_end(skb, nla);
return 0;
@@ -2750,9 +2795,22 @@ nla_put_failure:
return -EMSGSIZE;
}
-int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
+/*
+ * Return the connection of @resource if @resource has exactly one connection.
+ */
+static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
+{
+ struct list_head *connections = &resource->connections;
+
+ if (list_empty(connections) || connections->next->next != connections)
+ return NULL;
+ return list_first_entry(&resource->connections, struct drbd_connection, connections);
+}
+
+int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
const struct sib_info *sib)
{
+ struct drbd_resource *resource = device->resource;
struct state_info *si = NULL; /* for sizeof(si->member); */
struct nlattr *nla;
int got_ldev;
@@ -2772,27 +2830,27 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
* always in the context of the receiving process */
exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
- got_ldev = get_ldev(mdev);
+ got_ldev = get_ldev(device);
/* We need to add connection name and volume number information still.
* Minor number is in drbd_genlmsghdr. */
- if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
+ if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
goto nla_put_failure;
- if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
+ if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
goto nla_put_failure;
rcu_read_lock();
if (got_ldev) {
struct disk_conf *disk_conf;
- disk_conf = rcu_dereference(mdev->ldev->disk_conf);
+ disk_conf = rcu_dereference(device->ldev->disk_conf);
err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
}
if (!err) {
struct net_conf *nc;
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
if (nc)
err = net_conf_to_skb(skb, nc, exclude_sensitive);
}
@@ -2804,38 +2862,38 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
if (!nla)
goto nla_put_failure;
if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
- nla_put_u32(skb, T_current_state, mdev->state.i) ||
- nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
- nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
- nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
- nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
- nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
- nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
- nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
- nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
- nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
- nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
- nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
+ nla_put_u32(skb, T_current_state, device->state.i) ||
+ nla_put_u64(skb, T_ed_uuid, device->ed_uuid) ||
+ nla_put_u64(skb, T_capacity, drbd_get_capacity(device->this_bdev)) ||
+ nla_put_u64(skb, T_send_cnt, device->send_cnt) ||
+ nla_put_u64(skb, T_recv_cnt, device->recv_cnt) ||
+ nla_put_u64(skb, T_read_cnt, device->read_cnt) ||
+ nla_put_u64(skb, T_writ_cnt, device->writ_cnt) ||
+ nla_put_u64(skb, T_al_writ_cnt, device->al_writ_cnt) ||
+ nla_put_u64(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
+ nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
+ nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
+ nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
goto nla_put_failure;
if (got_ldev) {
int err;
- spin_lock_irq(&mdev->ldev->md.uuid_lock);
- err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
- spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+ spin_lock_irq(&device->ldev->md.uuid_lock);
+ err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
+ spin_unlock_irq(&device->ldev->md.uuid_lock);
if (err)
goto nla_put_failure;
- if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
- nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
- nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
+ if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
+ nla_put_u64(skb, T_bits_total, drbd_bm_bits(device)) ||
+ nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(device)))
goto nla_put_failure;
- if (C_SYNC_SOURCE <= mdev->state.conn &&
- C_PAUSED_SYNC_T >= mdev->state.conn) {
- if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
- nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
+ if (C_SYNC_SOURCE <= device->state.conn &&
+ C_PAUSED_SYNC_T >= device->state.conn) {
+ if (nla_put_u64(skb, T_bits_rs_total, device->rs_total) ||
+ nla_put_u64(skb, T_bits_rs_failed, device->rs_failed))
goto nla_put_failure;
}
}
@@ -2867,7 +2925,7 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
nla_put_failure:
err = -EMSGSIZE;
if (got_ldev)
- put_ldev(mdev);
+ put_ldev(device);
return err;
}
@@ -2882,7 +2940,7 @@ int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
+ err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
if (err) {
nlmsg_free(adm_ctx.reply_skb);
return err;
@@ -2892,22 +2950,23 @@ out:
return 0;
}
-int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
+static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct drbd_conf *mdev;
+ struct drbd_device *device;
struct drbd_genlmsghdr *dh;
- struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
- struct drbd_tconn *tconn = NULL;
- struct drbd_tconn *tmp;
+ struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
+ struct drbd_resource *resource = NULL;
+ struct drbd_resource *tmp;
unsigned volume = cb->args[1];
/* Open coded, deferred, iteration:
- * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
- * idr_for_each_entry(&tconn->volumes, mdev, i) {
+ * for_each_resource_safe(resource, tmp, &drbd_resources) {
+ * connection = "first connection of resource or undefined";
+ * idr_for_each_entry(&resource->devices, device, i) {
* ...
* }
* }
- * where tconn is cb->args[0];
+ * where resource is cb->args[0];
* and i is cb->args[1];
*
* cb->args[2] indicates if we shall loop over all resources,
@@ -2916,44 +2975,44 @@ int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
* This may miss entries inserted after this dump started,
* or entries deleted before they are reached.
*
- * We need to make sure the mdev won't disappear while
+ * We need to make sure the device won't disappear while
* we are looking at it, and revalidate our iterators
* on each iteration.
*/
- /* synchronize with conn_create()/conn_destroy() */
+ /* synchronize with conn_create()/drbd_destroy_connection() */
rcu_read_lock();
/* revalidate iterator position */
- list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
+ for_each_resource_rcu(tmp, &drbd_resources) {
if (pos == NULL) {
/* first iteration */
pos = tmp;
- tconn = pos;
+ resource = pos;
break;
}
if (tmp == pos) {
- tconn = pos;
+ resource = pos;
break;
}
}
- if (tconn) {
-next_tconn:
- mdev = idr_get_next(&tconn->volumes, &volume);
- if (!mdev) {
- /* No more volumes to dump on this tconn.
- * Advance tconn iterator. */
- pos = list_entry_rcu(tconn->all_tconn.next,
- struct drbd_tconn, all_tconn);
- /* Did we dump any volume on this tconn yet? */
+ if (resource) {
+next_resource:
+ device = idr_get_next(&resource->devices, &volume);
+ if (!device) {
+ /* No more volumes to dump on this resource.
+ * Advance resource iterator. */
+ pos = list_entry_rcu(resource->resources.next,
+ struct drbd_resource, resources);
+ /* Did we dump any volume of this resource yet? */
if (volume != 0) {
/* If we reached the end of the list,
* or only a single resource dump was requested,
* we are done. */
- if (&pos->all_tconn == &drbd_tconns || cb->args[2])
+ if (&pos->resources == &drbd_resources || cb->args[2])
goto out;
volume = 0;
- tconn = pos;
- goto next_tconn;
+ resource = pos;
+ goto next_resource;
}
}
@@ -2963,43 +3022,49 @@ next_tconn:
if (!dh)
goto out;
- if (!mdev) {
- /* This is a tconn without a single volume.
+ if (!device) {
+ /* This is a connection without a single volume.
* Suprisingly enough, it may have a network
* configuration. */
- struct net_conf *nc;
+ struct drbd_connection *connection;
+
dh->minor = -1U;
dh->ret_code = NO_ERROR;
- if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
- goto cancel;
- nc = rcu_dereference(tconn->net_conf);
- if (nc && net_conf_to_skb(skb, nc, 1) != 0)
+ connection = the_only_connection(resource);
+ if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
goto cancel;
+ if (connection) {
+ struct net_conf *nc;
+
+ nc = rcu_dereference(connection->net_conf);
+ if (nc && net_conf_to_skb(skb, nc, 1) != 0)
+ goto cancel;
+ }
goto done;
}
- D_ASSERT(mdev->vnr == volume);
- D_ASSERT(mdev->tconn == tconn);
+ D_ASSERT(device, device->vnr == volume);
+ D_ASSERT(device, device->resource == resource);
- dh->minor = mdev_to_minor(mdev);
+ dh->minor = device_to_minor(device);
dh->ret_code = NO_ERROR;
- if (nla_put_status_info(skb, mdev, NULL)) {
+ if (nla_put_status_info(skb, device, NULL)) {
cancel:
genlmsg_cancel(skb, dh);
goto out;
}
done:
genlmsg_end(skb, dh);
- }
+ }
out:
rcu_read_unlock();
/* where to start the next iteration */
- cb->args[0] = (long)pos;
- cb->args[1] = (pos == tconn) ? volume + 1 : 0;
+ cb->args[0] = (long)pos;
+ cb->args[1] = (pos == resource) ? volume + 1 : 0;
- /* No more tconns/volumes/minors found results in an empty skb.
+ /* No more resources/volumes/minors found results in an empty skb.
* Which will terminate the dump. */
return skb->len;
}
@@ -3019,7 +3084,7 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
struct nlattr *nla;
const char *resource_name;
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
int maxtype;
/* Is this a followup call? */
@@ -3048,18 +3113,19 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
if (!nla)
return -EINVAL;
resource_name = nla_data(nla);
- tconn = conn_get_by_name(resource_name);
-
- if (!tconn)
+ if (!*resource_name)
+ return -ENODEV;
+ resource = drbd_find_resource(resource_name);
+ if (!resource)
return -ENODEV;
- kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
+ kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
/* prime iterators, and set "filter" mode mark:
- * only dump this tconn. */
- cb->args[0] = (long)tconn;
+ * only dump this connection. */
+ cb->args[0] = (long)resource;
/* cb->args[1] = 0; passed in this way. */
- cb->args[2] = (long)tconn;
+ cb->args[2] = (long)resource;
dump:
return get_one_status(skb, cb);
@@ -3078,8 +3144,8 @@ int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
goto out;
tp.timeout_type =
- adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
- test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
+ adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
+ test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
UT_DEFAULT;
err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
@@ -3094,7 +3160,7 @@ out:
int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_conf *mdev;
+ struct drbd_device *device;
enum drbd_ret_code retcode;
struct start_ov_parms parms;
@@ -3104,10 +3170,10 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- mdev = adm_ctx.mdev;
+ device = adm_ctx.device;
/* resume from last known position, if possible */
- parms.ov_start_sector = mdev->ov_start_sector;
+ parms.ov_start_sector = device->ov_start_sector;
parms.ov_stop_sector = ULLONG_MAX;
if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
int err = start_ov_parms_from_attrs(&parms, info);
@@ -3118,15 +3184,15 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
}
}
/* w_make_ov_request expects position to be aligned */
- mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
- mdev->ov_stop_sector = parms.ov_stop_sector;
+ device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
+ device->ov_stop_sector = parms.ov_stop_sector;
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
- drbd_suspend_io(mdev);
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
- retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
- drbd_resume_io(mdev);
+ drbd_suspend_io(device);
+ wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
+ retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
+ drbd_resume_io(device);
out:
drbd_adm_finish(info, retcode);
return 0;
@@ -3135,7 +3201,7 @@ out:
int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_conf *mdev;
+ struct drbd_device *device;
enum drbd_ret_code retcode;
int skip_initial_sync = 0;
int err;
@@ -3147,7 +3213,7 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out_nolock;
- mdev = adm_ctx.mdev;
+ device = adm_ctx.device;
memset(&args, 0, sizeof(args));
if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
err = new_c_uuid_parms_from_attrs(&args, info);
@@ -3158,49 +3224,50 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
}
}
- mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
+ mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
- if (!get_ldev(mdev)) {
+ if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto out;
}
/* this is "skip initial sync", assume to be clean */
- if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
- mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
- dev_info(DEV, "Preparing to skip initial sync\n");
+ if (device->state.conn == C_CONNECTED &&
+ first_peer_device(device)->connection->agreed_pro_version >= 90 &&
+ device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
+ drbd_info(device, "Preparing to skip initial sync\n");
skip_initial_sync = 1;
- } else if (mdev->state.conn != C_STANDALONE) {
+ } else if (device->state.conn != C_STANDALONE) {
retcode = ERR_CONNECTED;
goto out_dec;
}
- drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
- drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
+ drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
+ drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
if (args.clear_bm) {
- err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
+ err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
"clear_n_write from new_c_uuid", BM_LOCKED_MASK);
if (err) {
- dev_err(DEV, "Writing bitmap failed with %d\n",err);
+ drbd_err(device, "Writing bitmap failed with %d\n", err);
retcode = ERR_IO_MD_DISK;
}
if (skip_initial_sync) {
- drbd_send_uuids_skip_initial_sync(mdev);
- _drbd_uuid_set(mdev, UI_BITMAP, 0);
- drbd_print_uuids(mdev, "cleared bitmap UUID");
- spin_lock_irq(&mdev->tconn->req_lock);
- _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
+ drbd_send_uuids_skip_initial_sync(first_peer_device(device));
+ _drbd_uuid_set(device, UI_BITMAP, 0);
+ drbd_print_uuids(device, "cleared bitmap UUID");
+ spin_lock_irq(&device->resource->req_lock);
+ _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
}
}
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
out_dec:
- put_ldev(mdev);
+ put_ldev(device);
out:
- mutex_unlock(mdev->state_mutex);
+ mutex_unlock(device->state_mutex);
out_nolock:
drbd_adm_finish(info, retcode);
return 0;
@@ -3246,7 +3313,7 @@ int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- if (adm_ctx.tconn) {
+ if (adm_ctx.resource) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
retcode = ERR_INVALID_REQUEST;
drbd_msg_put_info("resource exists");
@@ -3262,7 +3329,7 @@ out:
return 0;
}
-int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
+int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_genlmsghdr *dh = info->userhdr;
enum drbd_ret_code retcode;
@@ -3285,41 +3352,36 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
}
/* drbd_adm_prepare made sure already
- * that mdev->tconn and mdev->vnr match the request. */
- if (adm_ctx.mdev) {
+ * that first_peer_device(device)->connection and device->vnr match the request. */
+ if (adm_ctx.device) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
retcode = ERR_MINOR_EXISTS;
/* else: still NO_ERROR */
goto out;
}
- retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
+ retcode = drbd_create_device(adm_ctx.resource, dh->minor, adm_ctx.volume);
out:
drbd_adm_finish(info, retcode);
return 0;
}
-static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
+static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
{
- if (mdev->state.disk == D_DISKLESS &&
- /* no need to be mdev->state.conn == C_STANDALONE &&
+ if (device->state.disk == D_DISKLESS &&
+ /* no need to be device->state.conn == C_STANDALONE &&
* we may want to delete a minor from a live replication group.
*/
- mdev->state.role == R_SECONDARY) {
- _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
+ device->state.role == R_SECONDARY) {
+ _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
CS_VERBOSE + CS_WAIT_COMPLETE);
- idr_remove(&mdev->tconn->volumes, mdev->vnr);
- idr_remove(&minors, mdev_to_minor(mdev));
- destroy_workqueue(mdev->submit.wq);
- del_gendisk(mdev->vdisk);
- synchronize_rcu();
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_delete_device(device);
return NO_ERROR;
} else
return ERR_MINOR_CONFIGURED;
}
-int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
+int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
@@ -3329,7 +3391,7 @@ int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- retcode = adm_delete_minor(adm_ctx.mdev);
+ retcode = adm_del_minor(adm_ctx.device);
out:
drbd_adm_finish(info, retcode);
return 0;
@@ -3337,55 +3399,58 @@ out:
int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
+ struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
- struct drbd_conf *mdev;
unsigned i;
- retcode = drbd_adm_prepare(skb, info, 0);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
- if (!adm_ctx.tconn) {
- retcode = ERR_RES_NOT_KNOWN;
- goto out;
- }
-
+ resource = adm_ctx.resource;
/* demote */
- idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
- retcode = drbd_set_role(mdev, R_SECONDARY, 0);
+ for_each_connection(connection, resource) {
+ struct drbd_peer_device *peer_device;
+
+ idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
+ if (retcode < SS_SUCCESS) {
+ drbd_msg_put_info("failed to demote");
+ goto out;
+ }
+ }
+
+ retcode = conn_try_disconnect(connection, 0);
if (retcode < SS_SUCCESS) {
- drbd_msg_put_info("failed to demote");
+ drbd_msg_put_info("failed to disconnect");
goto out;
}
}
- retcode = conn_try_disconnect(adm_ctx.tconn, 0);
- if (retcode < SS_SUCCESS) {
- drbd_msg_put_info("failed to disconnect");
- goto out;
- }
-
/* detach */
- idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
- retcode = adm_detach(mdev, 0);
+ idr_for_each_entry(&resource->devices, device, i) {
+ retcode = adm_detach(device, 0);
if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
drbd_msg_put_info("failed to detach");
goto out;
}
}
- /* If we reach this, all volumes (of this tconn) are Secondary,
+ /* If we reach this, all volumes (of this connection) are Secondary,
* Disconnected, Diskless, aka Unconfigured. Make sure all threads have
* actually stopped, state handling only does drbd_thread_stop_nowait(). */
- drbd_thread_stop(&adm_ctx.tconn->worker);
+ for_each_connection(connection, resource)
+ drbd_thread_stop(&connection->worker);
/* Now, nothing can fail anymore */
/* delete volumes */
- idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
- retcode = adm_delete_minor(mdev);
+ idr_for_each_entry(&resource->devices, device, i) {
+ retcode = adm_del_minor(device);
if (retcode != NO_ERROR) {
/* "can not happen" */
drbd_msg_put_info("failed to delete volume");
@@ -3393,19 +3458,11 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
}
}
- /* delete connection */
- if (conn_lowest_minor(adm_ctx.tconn) < 0) {
- list_del_rcu(&adm_ctx.tconn->all_tconn);
- synchronize_rcu();
- kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+ list_del_rcu(&resource->resources);
+ synchronize_rcu();
+ drbd_free_resource(resource);
+ retcode = NO_ERROR;
- retcode = NO_ERROR;
- } else {
- /* "can not happen" */
- retcode = ERR_RES_IN_USE;
- drbd_msg_put_info("failed to delete connection");
- }
- goto out;
out:
drbd_adm_finish(info, retcode);
return 0;
@@ -3413,6 +3470,8 @@ out:
int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
enum drbd_ret_code retcode;
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
@@ -3421,24 +3480,30 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- if (conn_lowest_minor(adm_ctx.tconn) < 0) {
- list_del_rcu(&adm_ctx.tconn->all_tconn);
- synchronize_rcu();
- kref_put(&adm_ctx.tconn->kref, &conn_destroy);
-
- retcode = NO_ERROR;
- } else {
+ resource = adm_ctx.resource;
+ for_each_connection(connection, resource) {
+ if (connection->cstate > C_STANDALONE) {
+ retcode = ERR_NET_CONFIGURED;
+ goto out;
+ }
+ }
+ if (!idr_is_empty(&resource->devices)) {
retcode = ERR_RES_IN_USE;
+ goto out;
}
- if (retcode == NO_ERROR)
- drbd_thread_stop(&adm_ctx.tconn->worker);
+ list_del_rcu(&resource->resources);
+ for_each_connection(connection, resource)
+ drbd_thread_stop(&connection->worker);
+ synchronize_rcu();
+ drbd_free_resource(resource);
+ retcode = NO_ERROR;
out:
drbd_adm_finish(info, retcode);
return 0;
}
-void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
+void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
{
static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
struct sk_buff *msg;
@@ -3447,8 +3512,8 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
int err = -ENOMEM;
if (sib->sib_reason == SIB_SYNC_PROGRESS) {
- if (time_after(jiffies, mdev->rs_last_bcast + HZ))
- mdev->rs_last_bcast = jiffies;
+ if (time_after(jiffies, device->rs_last_bcast + HZ))
+ device->rs_last_bcast = jiffies;
else
return;
}
@@ -3462,10 +3527,10 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
if (!d_out) /* cannot happen, but anyways. */
goto nla_put_failure;
- d_out->minor = mdev_to_minor(mdev);
+ d_out->minor = device_to_minor(device);
d_out->ret_code = NO_ERROR;
- if (nla_put_status_info(msg, mdev, sib))
+ if (nla_put_status_info(msg, device, sib))
goto nla_put_failure;
genlmsg_end(msg, d_out);
err = drbd_genl_multicast_events(msg, 0);
@@ -3478,7 +3543,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
nla_put_failure:
nlmsg_free(msg);
failed:
- dev_err(DEV, "Error %d while broadcasting event. "
+ drbd_err(device, "Error %d while broadcasting event. "
"Event seq:%u sib_reason:%u\n",
err, seq, sib->sib_reason);
}
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index bf31d41dbaad..2f26e8ffa45b 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -46,7 +46,7 @@ const struct file_operations drbd_proc_fops = {
.release = drbd_proc_release,
};
-void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
+static void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
{
/* v is in kB/sec. We don't expect TiByte/sec yet. */
if (unlikely(v >= 1000000)) {
@@ -66,14 +66,14 @@ void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
* [=====>..............] 33.5% (23456/123456)
* finish: 2:20:20 speed: 6,345 (6,456) K/sec
*/
-static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
+static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *seq)
{
unsigned long db, dt, dbdt, rt, rs_left;
unsigned int res;
int i, x, y;
int stalled = 0;
- drbd_get_syncer_progress(mdev, &rs_left, &res);
+ drbd_get_syncer_progress(device, &rs_left, &res);
x = res/50;
y = 20-x;
@@ -85,21 +85,21 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
seq_printf(seq, ".");
seq_printf(seq, "] ");
- if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
+ if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
seq_printf(seq, "verified:");
else
seq_printf(seq, "sync'ed:");
seq_printf(seq, "%3u.%u%% ", res / 10, res % 10);
/* if more than a few GB, display in MB */
- if (mdev->rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
+ if (device->rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
seq_printf(seq, "(%lu/%lu)M",
(unsigned long) Bit2KB(rs_left >> 10),
- (unsigned long) Bit2KB(mdev->rs_total >> 10));
+ (unsigned long) Bit2KB(device->rs_total >> 10));
else
seq_printf(seq, "(%lu/%lu)K\n\t",
(unsigned long) Bit2KB(rs_left),
- (unsigned long) Bit2KB(mdev->rs_total));
+ (unsigned long) Bit2KB(device->rs_total));
/* see drivers/md/md.c
* We do not want to overflow, so the order of operands and
@@ -114,14 +114,14 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
* at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at
* least DRBD_SYNC_MARK_STEP time before it will be modified. */
/* ------------------------ ~18s average ------------------------ */
- i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS;
- dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
+ i = (device->rs_last_mark + 2) % DRBD_SYNC_MARKS;
+ dt = (jiffies - device->rs_mark_time[i]) / HZ;
if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS))
stalled = 1;
if (!dt)
dt++;
- db = mdev->rs_mark_left[i] - rs_left;
+ db = device->rs_mark_left[i] - rs_left;
rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */
seq_printf(seq, "finish: %lu:%02lu:%02lu",
@@ -134,11 +134,11 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
/* ------------------------- ~3s average ------------------------ */
if (proc_details >= 1) {
/* this is what drbd_rs_should_slow_down() uses */
- i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
- dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
+ i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
+ dt = (jiffies - device->rs_mark_time[i]) / HZ;
if (!dt)
dt++;
- db = mdev->rs_mark_left[i] - rs_left;
+ db = device->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
seq_printf(seq, " -- ");
@@ -147,34 +147,34 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
/* --------------------- long term average ---------------------- */
/* mean speed since syncer started
* we do account for PausedSync periods */
- dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
+ dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
if (dt == 0)
dt = 1;
- db = mdev->rs_total - rs_left;
+ db = device->rs_total - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
seq_printf(seq, ")");
- if (mdev->state.conn == C_SYNC_TARGET ||
- mdev->state.conn == C_VERIFY_S) {
+ if (device->state.conn == C_SYNC_TARGET ||
+ device->state.conn == C_VERIFY_S) {
seq_printf(seq, " want: ");
- seq_printf_with_thousands_grouping(seq, mdev->c_sync_rate);
+ seq_printf_with_thousands_grouping(seq, device->c_sync_rate);
}
seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
if (proc_details >= 1) {
/* 64 bit:
* we convert to sectors in the display below. */
- unsigned long bm_bits = drbd_bm_bits(mdev);
+ unsigned long bm_bits = drbd_bm_bits(device);
unsigned long bit_pos;
unsigned long long stop_sector = 0;
- if (mdev->state.conn == C_VERIFY_S ||
- mdev->state.conn == C_VERIFY_T) {
- bit_pos = bm_bits - mdev->ov_left;
- if (verify_can_do_stop_sector(mdev))
- stop_sector = mdev->ov_stop_sector;
+ if (device->state.conn == C_VERIFY_S ||
+ device->state.conn == C_VERIFY_T) {
+ bit_pos = bm_bits - device->ov_left;
+ if (verify_can_do_stop_sector(device))
+ stop_sector = device->ov_stop_sector;
} else
- bit_pos = mdev->bm_resync_fo;
+ bit_pos = device->bm_resync_fo;
/* Total sectors may be slightly off for oddly
* sized devices. So what. */
seq_printf(seq,
@@ -202,7 +202,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
{
int i, prev_i = -1;
const char *sn;
- struct drbd_conf *mdev;
+ struct drbd_device *device;
struct net_conf *nc;
char wp;
@@ -236,72 +236,72 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
*/
rcu_read_lock();
- idr_for_each_entry(&minors, mdev, i) {
+ idr_for_each_entry(&drbd_devices, device, i) {
if (prev_i != i - 1)
seq_printf(seq, "\n");
prev_i = i;
- sn = drbd_conn_str(mdev->state.conn);
+ sn = drbd_conn_str(device->state.conn);
- if (mdev->state.conn == C_STANDALONE &&
- mdev->state.disk == D_DISKLESS &&
- mdev->state.role == R_SECONDARY) {
+ if (device->state.conn == C_STANDALONE &&
+ device->state.disk == D_DISKLESS &&
+ device->state.role == R_SECONDARY) {
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
- /* reset mdev->congestion_reason */
- bdi_rw_congested(&mdev->rq_queue->backing_dev_info);
+ /* reset device->congestion_reason */
+ bdi_rw_congested(&device->rq_queue->backing_dev_info);
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
seq_printf(seq,
"%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
" ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
"lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c",
i, sn,
- drbd_role_str(mdev->state.role),
- drbd_role_str(mdev->state.peer),
- drbd_disk_str(mdev->state.disk),
- drbd_disk_str(mdev->state.pdsk),
+ drbd_role_str(device->state.role),
+ drbd_role_str(device->state.peer),
+ drbd_disk_str(device->state.disk),
+ drbd_disk_str(device->state.pdsk),
wp,
- drbd_suspended(mdev) ? 's' : 'r',
- mdev->state.aftr_isp ? 'a' : '-',
- mdev->state.peer_isp ? 'p' : '-',
- mdev->state.user_isp ? 'u' : '-',
- mdev->congestion_reason ?: '-',
- test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-',
- mdev->send_cnt/2,
- mdev->recv_cnt/2,
- mdev->writ_cnt/2,
- mdev->read_cnt/2,
- mdev->al_writ_cnt,
- mdev->bm_writ_cnt,
- atomic_read(&mdev->local_cnt),
- atomic_read(&mdev->ap_pending_cnt) +
- atomic_read(&mdev->rs_pending_cnt),
- atomic_read(&mdev->unacked_cnt),
- atomic_read(&mdev->ap_bio_cnt),
- mdev->tconn->epochs,
- write_ordering_chars[mdev->tconn->write_ordering]
+ drbd_suspended(device) ? 's' : 'r',
+ device->state.aftr_isp ? 'a' : '-',
+ device->state.peer_isp ? 'p' : '-',
+ device->state.user_isp ? 'u' : '-',
+ device->congestion_reason ?: '-',
+ test_bit(AL_SUSPENDED, &device->flags) ? 's' : '-',
+ device->send_cnt/2,
+ device->recv_cnt/2,
+ device->writ_cnt/2,
+ device->read_cnt/2,
+ device->al_writ_cnt,
+ device->bm_writ_cnt,
+ atomic_read(&device->local_cnt),
+ atomic_read(&device->ap_pending_cnt) +
+ atomic_read(&device->rs_pending_cnt),
+ atomic_read(&device->unacked_cnt),
+ atomic_read(&device->ap_bio_cnt),
+ first_peer_device(device)->connection->epochs,
+ write_ordering_chars[first_peer_device(device)->connection->write_ordering]
);
seq_printf(seq, " oos:%llu\n",
Bit2KB((unsigned long long)
- drbd_bm_total_weight(mdev)));
+ drbd_bm_total_weight(device)));
}
- if (mdev->state.conn == C_SYNC_SOURCE ||
- mdev->state.conn == C_SYNC_TARGET ||
- mdev->state.conn == C_VERIFY_S ||
- mdev->state.conn == C_VERIFY_T)
- drbd_syncer_progress(mdev, seq);
-
- if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) {
- lc_seq_printf_stats(seq, mdev->resync);
- lc_seq_printf_stats(seq, mdev->act_log);
- put_ldev(mdev);
+ if (device->state.conn == C_SYNC_SOURCE ||
+ device->state.conn == C_SYNC_TARGET ||
+ device->state.conn == C_VERIFY_S ||
+ device->state.conn == C_VERIFY_T)
+ drbd_syncer_progress(device, seq);
+
+ if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) {
+ lc_seq_printf_stats(seq, device->resync);
+ lc_seq_printf_stats(seq, device->act_log);
+ put_ldev(device);
}
if (proc_details >= 2) {
- if (mdev->resync) {
- lc_seq_dump_details(seq, mdev->resync, "rs_left",
+ if (device->resync) {
+ lc_seq_dump_details(seq, device->resync, "rs_left",
resync_dump_detail);
}
}
diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h
new file mode 100644
index 000000000000..3c04ec0ea333
--- /dev/null
+++ b/drivers/block/drbd/drbd_protocol.h
@@ -0,0 +1,295 @@
+#ifndef __DRBD_PROTOCOL_H
+#define __DRBD_PROTOCOL_H
+
+enum drbd_packet {
+ /* receiver (data socket) */
+ P_DATA = 0x00,
+ P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */
+ P_RS_DATA_REPLY = 0x02, /* Response to P_RS_DATA_REQUEST */
+ P_BARRIER = 0x03,
+ P_BITMAP = 0x04,
+ P_BECOME_SYNC_TARGET = 0x05,
+ P_BECOME_SYNC_SOURCE = 0x06,
+ P_UNPLUG_REMOTE = 0x07, /* Used at various times to hint the peer */
+ P_DATA_REQUEST = 0x08, /* Used to ask for a data block */
+ P_RS_DATA_REQUEST = 0x09, /* Used to ask for a data block for resync */
+ P_SYNC_PARAM = 0x0a,
+ P_PROTOCOL = 0x0b,
+ P_UUIDS = 0x0c,
+ P_SIZES = 0x0d,
+ P_STATE = 0x0e,
+ P_SYNC_UUID = 0x0f,
+ P_AUTH_CHALLENGE = 0x10,
+ P_AUTH_RESPONSE = 0x11,
+ P_STATE_CHG_REQ = 0x12,
+
+ /* asender (meta socket */
+ P_PING = 0x13,
+ P_PING_ACK = 0x14,
+ P_RECV_ACK = 0x15, /* Used in protocol B */
+ P_WRITE_ACK = 0x16, /* Used in protocol C */
+ P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
+ P_SUPERSEDED = 0x18, /* Used in proto C, two-primaries conflict detection */
+ P_NEG_ACK = 0x19, /* Sent if local disk is unusable */
+ P_NEG_DREPLY = 0x1a, /* Local disk is broken... */
+ P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */
+ P_BARRIER_ACK = 0x1c,
+ P_STATE_CHG_REPLY = 0x1d,
+
+ /* "new" commands, no longer fitting into the ordering scheme above */
+
+ P_OV_REQUEST = 0x1e, /* data socket */
+ P_OV_REPLY = 0x1f,
+ P_OV_RESULT = 0x20, /* meta socket */
+ P_CSUM_RS_REQUEST = 0x21, /* data socket */
+ P_RS_IS_IN_SYNC = 0x22, /* meta socket */
+ P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
+ P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */
+ /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */
+ /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */
+ P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
+ P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */
+ P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
+ P_CONN_ST_CHG_REQ = 0x2a, /* data sock: Connection wide state request */
+ P_CONN_ST_CHG_REPLY = 0x2b, /* meta sock: Connection side state req reply */
+ P_RETRY_WRITE = 0x2c, /* Protocol C: retry conflicting write request */
+ P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */
+
+ P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
+ P_MAX_OPT_CMD = 0x101,
+
+ /* special command ids for handshake */
+
+ P_INITIAL_META = 0xfff1, /* First Packet on the MetaSock */
+ P_INITIAL_DATA = 0xfff2, /* First Packet on the Socket */
+
+ P_CONNECTION_FEATURES = 0xfffe /* FIXED for the next century! */
+};
+
+#ifndef __packed
+#define __packed __attribute__((packed))
+#endif
+
+/* This is the layout for a packet on the wire.
+ * The byteorder is the network byte order.
+ * (except block_id and barrier fields.
+ * these are pointers to local structs
+ * and have no relevance for the partner,
+ * which just echoes them as received.)
+ *
+ * NOTE that the payload starts at a long aligned offset,
+ * regardless of 32 or 64 bit arch!
+ */
+struct p_header80 {
+ u32 magic;
+ u16 command;
+ u16 length; /* bytes of data after this header */
+} __packed;
+
+/* Header for big packets, Used for data packets exceeding 64kB */
+struct p_header95 {
+ u16 magic; /* use DRBD_MAGIC_BIG here */
+ u16 command;
+ u32 length;
+} __packed;
+
+struct p_header100 {
+ u32 magic;
+ u16 volume;
+ u16 command;
+ u32 length;
+ u32 pad;
+} __packed;
+
+/* these defines must not be changed without changing the protocol version */
+#define DP_HARDBARRIER 1 /* depricated */
+#define DP_RW_SYNC 2 /* equals REQ_SYNC */
+#define DP_MAY_SET_IN_SYNC 4
+#define DP_UNPLUG 8 /* not used anymore */
+#define DP_FUA 16 /* equals REQ_FUA */
+#define DP_FLUSH 32 /* equals REQ_FLUSH */
+#define DP_DISCARD 64 /* equals REQ_DISCARD */
+#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
+#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
+
+struct p_data {
+ u64 sector; /* 64 bits sector number */
+ u64 block_id; /* to identify the request in protocol B&C */
+ u32 seq_num;
+ u32 dp_flags;
+} __packed;
+
+/*
+ * commands which share a struct:
+ * p_block_ack:
+ * P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
+ * P_SUPERSEDED (proto C, two-primaries conflict detection)
+ * p_block_req:
+ * P_DATA_REQUEST, P_RS_DATA_REQUEST
+ */
+struct p_block_ack {
+ u64 sector;
+ u64 block_id;
+ u32 blksize;
+ u32 seq_num;
+} __packed;
+
+struct p_block_req {
+ u64 sector;
+ u64 block_id;
+ u32 blksize;
+ u32 pad; /* to multiple of 8 Byte */
+} __packed;
+
+/*
+ * commands with their own struct for additional fields:
+ * P_CONNECTION_FEATURES
+ * P_BARRIER
+ * P_BARRIER_ACK
+ * P_SYNC_PARAM
+ * ReportParams
+ */
+
+struct p_connection_features {
+ u32 protocol_min;
+ u32 feature_flags;
+ u32 protocol_max;
+
+ /* should be more than enough for future enhancements
+ * for now, feature_flags and the reserved array shall be zero.
+ */
+
+ u32 _pad;
+ u64 reserved[7];
+} __packed;
+
+struct p_barrier {
+ u32 barrier; /* barrier number _handle_ only */
+ u32 pad; /* to multiple of 8 Byte */
+} __packed;
+
+struct p_barrier_ack {
+ u32 barrier;
+ u32 set_size;
+} __packed;
+
+struct p_rs_param {
+ u32 resync_rate;
+
+ /* Since protocol version 88 and higher. */
+ char verify_alg[0];
+} __packed;
+
+struct p_rs_param_89 {
+ u32 resync_rate;
+ /* protocol version 89: */
+ char verify_alg[SHARED_SECRET_MAX];
+ char csums_alg[SHARED_SECRET_MAX];
+} __packed;
+
+struct p_rs_param_95 {
+ u32 resync_rate;
+ char verify_alg[SHARED_SECRET_MAX];
+ char csums_alg[SHARED_SECRET_MAX];
+ u32 c_plan_ahead;
+ u32 c_delay_target;
+ u32 c_fill_target;
+ u32 c_max_rate;
+} __packed;
+
+enum drbd_conn_flags {
+ CF_DISCARD_MY_DATA = 1,
+ CF_DRY_RUN = 2,
+};
+
+struct p_protocol {
+ u32 protocol;
+ u32 after_sb_0p;
+ u32 after_sb_1p;
+ u32 after_sb_2p;
+ u32 conn_flags;
+ u32 two_primaries;
+
+ /* Since protocol version 87 and higher. */
+ char integrity_alg[0];
+
+} __packed;
+
+struct p_uuids {
+ u64 uuid[UI_EXTENDED_SIZE];
+} __packed;
+
+struct p_rs_uuid {
+ u64 uuid;
+} __packed;
+
+struct p_sizes {
+ u64 d_size; /* size of disk */
+ u64 u_size; /* user requested size */
+ u64 c_size; /* current exported size */
+ u32 max_bio_size; /* Maximal size of a BIO */
+ u16 queue_order_type; /* not yet implemented in DRBD*/
+ u16 dds_flags; /* use enum dds_flags here. */
+} __packed;
+
+struct p_state {
+ u32 state;
+} __packed;
+
+struct p_req_state {
+ u32 mask;
+ u32 val;
+} __packed;
+
+struct p_req_state_reply {
+ u32 retcode;
+} __packed;
+
+struct p_drbd06_param {
+ u64 size;
+ u32 state;
+ u32 blksize;
+ u32 protocol;
+ u32 version;
+ u32 gen_cnt[5];
+ u32 bit_map_gen[5];
+} __packed;
+
+struct p_block_desc {
+ u64 sector;
+ u32 blksize;
+ u32 pad; /* to multiple of 8 Byte */
+} __packed;
+
+/* Valid values for the encoding field.
+ * Bump proto version when changing this. */
+enum drbd_bitmap_code {
+ /* RLE_VLI_Bytes = 0,
+ * and other bit variants had been defined during
+ * algorithm evaluation. */
+ RLE_VLI_Bits = 2,
+};
+
+struct p_compressed_bm {
+ /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
+ * (encoding & 0x80): polarity (set/unset) of first runlength
+ * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
+ * used to pad up to head.length bytes
+ */
+ u8 encoding;
+
+ u8 code[0];
+} __packed;
+
+struct p_delay_probe93 {
+ u32 seq_num; /* sequence number to match the two probe packets */
+ u32 offset; /* usecs the probe got sent after the reference time point */
+} __packed;
+
+/*
+ * Bitmap packets need to fit within a single page on the sender and receiver,
+ * so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger).
+ */
+#define DRBD_SOCKET_BUFFER_SIZE 4096
+
+#endif /* __DRBD_PROTOCOL_H */
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 1385714eccb7..68e3992e8838 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -44,6 +44,7 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include "drbd_int.h"
+#include "drbd_protocol.h"
#include "drbd_req.h"
#include "drbd_vli.h"
@@ -61,11 +62,11 @@ enum finish_epoch {
FE_RECYCLED,
};
-static int drbd_do_features(struct drbd_tconn *tconn);
-static int drbd_do_auth(struct drbd_tconn *tconn);
-static int drbd_disconnected(struct drbd_conf *mdev);
+static int drbd_do_features(struct drbd_connection *connection);
+static int drbd_do_auth(struct drbd_connection *connection);
+static int drbd_disconnected(struct drbd_peer_device *);
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
static int e_end_block(struct drbd_work *, int);
@@ -150,7 +151,7 @@ static void page_chain_add(struct page **head,
*head = chain_first;
}
-static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
+static struct page *__drbd_alloc_pages(struct drbd_device *device,
unsigned int number)
{
struct page *page = NULL;
@@ -196,41 +197,39 @@ static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
return NULL;
}
-static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
+static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
struct list_head *to_be_freed)
{
- struct drbd_peer_request *peer_req;
- struct list_head *le, *tle;
+ struct drbd_peer_request *peer_req, *tmp;
/* The EEs are always appended to the end of the list. Since
they are sent in order over the wire, they have to finish
in order. As soon as we see the first not finished we can
stop to examine the list... */
- list_for_each_safe(le, tle, &mdev->net_ee) {
- peer_req = list_entry(le, struct drbd_peer_request, w.list);
+ list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
if (drbd_peer_req_has_active_page(peer_req))
break;
- list_move(le, to_be_freed);
+ list_move(&peer_req->w.list, to_be_freed);
}
}
-static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
+static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device)
{
LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
- spin_lock_irq(&mdev->tconn->req_lock);
- reclaim_finished_net_peer_reqs(mdev, &reclaimed);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ reclaim_finished_net_peer_reqs(device, &reclaimed);
+ spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(mdev, peer_req);
+ drbd_free_net_peer_req(device, peer_req);
}
/**
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @number: number of pages requested
* @retry: whether to retry, if not enough pages are available right now
*
@@ -240,9 +239,10 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
*
* Returns a page chain linked via page->private.
*/
-struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
+struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
bool retry)
{
+ struct drbd_device *device = peer_device->device;
struct page *page = NULL;
struct net_conf *nc;
DEFINE_WAIT(wait);
@@ -251,20 +251,20 @@ struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
/* Yes, we may run up to @number over max_buffers. If we
* follow it strictly, the admin will get it wrong anyways. */
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(peer_device->connection->net_conf);
mxb = nc ? nc->max_buffers : 1000000;
rcu_read_unlock();
- if (atomic_read(&mdev->pp_in_use) < mxb)
- page = __drbd_alloc_pages(mdev, number);
+ if (atomic_read(&device->pp_in_use) < mxb)
+ page = __drbd_alloc_pages(device, number);
while (page == NULL) {
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
- drbd_kick_lo_and_reclaim_net(mdev);
+ drbd_kick_lo_and_reclaim_net(device);
- if (atomic_read(&mdev->pp_in_use) < mxb) {
- page = __drbd_alloc_pages(mdev, number);
+ if (atomic_read(&device->pp_in_use) < mxb) {
+ page = __drbd_alloc_pages(device, number);
if (page)
break;
}
@@ -273,7 +273,7 @@ struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
break;
if (signal_pending(current)) {
- dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
+ drbd_warn(device, "drbd_alloc_pages interrupted!\n");
break;
}
@@ -282,17 +282,17 @@ struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
finish_wait(&drbd_pp_wait, &wait);
if (page)
- atomic_add(number, &mdev->pp_in_use);
+ atomic_add(number, &device->pp_in_use);
return page;
}
/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
- * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
+ * Is also used from inside an other spin_lock_irq(&resource->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
-static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
+static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
{
- atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
+ atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
int i;
if (page == NULL)
@@ -310,7 +310,7 @@ static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_ne
}
i = atomic_sub_return(i, a);
if (i < 0)
- dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
+ drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
is_net ? "pp_in_use_by_net" : "pp_in_use", i);
wake_up(&drbd_pp_wait);
}
@@ -330,25 +330,26 @@ You must not have the req_lock:
*/
struct drbd_peer_request *
-drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
+drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
struct page *page = NULL;
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
- if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
+ if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
return NULL;
peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
if (!peer_req) {
if (!(gfp_mask & __GFP_NOWARN))
- dev_err(DEV, "%s: allocation failed\n", __func__);
+ drbd_err(device, "%s: allocation failed\n", __func__);
return NULL;
}
if (data_size) {
- page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+ page = drbd_alloc_pages(peer_device, nr_pages, (gfp_mask & __GFP_WAIT));
if (!page)
goto fail;
}
@@ -360,7 +361,7 @@ drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
peer_req->i.waiting = false;
peer_req->epoch = NULL;
- peer_req->w.mdev = mdev;
+ peer_req->peer_device = peer_device;
peer_req->pages = page;
atomic_set(&peer_req->pending_bios, 0);
peer_req->flags = 0;
@@ -377,30 +378,30 @@ drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
return NULL;
}
-void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
+void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
int is_net)
{
if (peer_req->flags & EE_HAS_DIGEST)
kfree(peer_req->digest);
- drbd_free_pages(mdev, peer_req->pages, is_net);
- D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
- D_ASSERT(drbd_interval_empty(&peer_req->i));
+ drbd_free_pages(device, peer_req->pages, is_net);
+ D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
+ D_ASSERT(device, drbd_interval_empty(&peer_req->i));
mempool_free(peer_req, drbd_ee_mempool);
}
-int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
+int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
{
LIST_HEAD(work_list);
struct drbd_peer_request *peer_req, *t;
int count = 0;
- int is_net = list == &mdev->net_ee;
+ int is_net = list == &device->net_ee;
- spin_lock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
list_splice_init(list, &work_list);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
- __drbd_free_peer_req(mdev, peer_req, is_net);
+ __drbd_free_peer_req(device, peer_req, is_net);
count++;
}
return count;
@@ -409,20 +410,20 @@ int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
/*
* See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
*/
-static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
+static int drbd_finish_peer_reqs(struct drbd_device *device)
{
LIST_HEAD(work_list);
LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
int err = 0;
- spin_lock_irq(&mdev->tconn->req_lock);
- reclaim_finished_net_peer_reqs(mdev, &reclaimed);
- list_splice_init(&mdev->done_ee, &work_list);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ reclaim_finished_net_peer_reqs(device, &reclaimed);
+ list_splice_init(&device->done_ee, &work_list);
+ spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(mdev, peer_req);
+ drbd_free_net_peer_req(device, peer_req);
/* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_superseded.
@@ -435,14 +436,14 @@ static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
err2 = peer_req->w.cb(&peer_req->w, !!err);
if (!err)
err = err2;
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
}
- wake_up(&mdev->ee_wait);
+ wake_up(&device->ee_wait);
return err;
}
-static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+static void _drbd_wait_ee_list_empty(struct drbd_device *device,
struct list_head *head)
{
DEFINE_WAIT(wait);
@@ -450,20 +451,20 @@ static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
/* avoids spin_lock/unlock
* and calling prepare_to_wait in the fast path */
while (!list_empty(head)) {
- prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&device->resource->req_lock);
io_schedule();
- finish_wait(&mdev->ee_wait, &wait);
- spin_lock_irq(&mdev->tconn->req_lock);
+ finish_wait(&device->ee_wait, &wait);
+ spin_lock_irq(&device->resource->req_lock);
}
}
-static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+static void drbd_wait_ee_list_empty(struct drbd_device *device,
struct list_head *head)
{
- spin_lock_irq(&mdev->tconn->req_lock);
- _drbd_wait_ee_list_empty(mdev, head);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ _drbd_wait_ee_list_empty(device, head);
+ spin_unlock_irq(&device->resource->req_lock);
}
static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
@@ -478,44 +479,44 @@ static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flag
return kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags);
}
-static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
+static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
{
int rv;
- rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
+ rv = drbd_recv_short(connection->data.socket, buf, size, 0);
if (rv < 0) {
if (rv == -ECONNRESET)
- conn_info(tconn, "sock was reset by peer\n");
+ drbd_info(connection, "sock was reset by peer\n");
else if (rv != -ERESTARTSYS)
- conn_err(tconn, "sock_recvmsg returned %d\n", rv);
+ drbd_err(connection, "sock_recvmsg returned %d\n", rv);
} else if (rv == 0) {
- if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
+ if (test_bit(DISCONNECT_SENT, &connection->flags)) {
long t;
rcu_read_lock();
- t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
+ t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
rcu_read_unlock();
- t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
+ t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
if (t)
goto out;
}
- conn_info(tconn, "sock was shut down by peer\n");
+ drbd_info(connection, "sock was shut down by peer\n");
}
if (rv != size)
- conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
+ conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
out:
return rv;
}
-static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
+static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
{
int err;
- err = drbd_recv(tconn, buf, size);
+ err = drbd_recv(connection, buf, size);
if (err != size) {
if (err >= 0)
err = -EIO;
@@ -524,13 +525,13 @@ static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
return err;
}
-static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
+static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
{
int err;
- err = drbd_recv_all(tconn, buf, size);
+ err = drbd_recv_all(connection, buf, size);
if (err && !signal_pending(current))
- conn_warn(tconn, "short read (expected size %d)\n", (int)size);
+ drbd_warn(connection, "short read (expected size %d)\n", (int)size);
return err;
}
@@ -553,7 +554,7 @@ static void drbd_setbufsize(struct socket *sock, unsigned int snd,
}
}
-static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
+static struct socket *drbd_try_connect(struct drbd_connection *connection)
{
const char *what;
struct socket *sock;
@@ -565,7 +566,7 @@ static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
int disconnect_on_error = 1;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
if (!nc) {
rcu_read_unlock();
return NULL;
@@ -575,16 +576,16 @@ static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
connect_int = nc->connect_int;
rcu_read_unlock();
- my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
- memcpy(&src_in6, &tconn->my_addr, my_addr_len);
+ my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
+ memcpy(&src_in6, &connection->my_addr, my_addr_len);
- if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
+ if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
src_in6.sin6_port = 0;
else
((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
- peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
- memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
+ peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
+ memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
what = "sock_create_kern";
err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
@@ -632,17 +633,17 @@ out:
disconnect_on_error = 0;
break;
default:
- conn_err(tconn, "%s failed, err = %d\n", what, err);
+ drbd_err(connection, "%s failed, err = %d\n", what, err);
}
if (disconnect_on_error)
- conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
return sock;
}
struct accept_wait_data {
- struct drbd_tconn *tconn;
+ struct drbd_connection *connection;
struct socket *s_listen;
struct completion door_bell;
void (*original_sk_state_change)(struct sock *sk);
@@ -660,7 +661,7 @@ static void drbd_incoming_connection(struct sock *sk)
state_change(sk);
}
-static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
+static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
{
int err, sndbuf_size, rcvbuf_size, my_addr_len;
struct sockaddr_in6 my_addr;
@@ -669,7 +670,7 @@ static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_da
const char *what;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
if (!nc) {
rcu_read_unlock();
return -EIO;
@@ -678,8 +679,8 @@ static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_da
rcvbuf_size = nc->rcvbuf_size;
rcu_read_unlock();
- my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
- memcpy(&my_addr, &tconn->my_addr, my_addr_len);
+ my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
+ memcpy(&my_addr, &connection->my_addr, my_addr_len);
what = "sock_create_kern";
err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
@@ -715,8 +716,8 @@ out:
sock_release(s_listen);
if (err < 0) {
if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
- conn_err(tconn, "%s failed, err = %d\n", what, err);
- conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ drbd_err(connection, "%s failed, err = %d\n", what, err);
+ conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
}
@@ -731,14 +732,14 @@ static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad
write_unlock_bh(&sk->sk_callback_lock);
}
-static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
+static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
{
int timeo, connect_int, err = 0;
struct socket *s_estab = NULL;
struct net_conf *nc;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
if (!nc) {
rcu_read_unlock();
return NULL;
@@ -757,8 +758,8 @@ static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct acc
err = kernel_accept(ad->s_listen, &s_estab, 0);
if (err < 0) {
if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
- conn_err(tconn, "accept failed, err = %d\n", err);
- conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ drbd_err(connection, "accept failed, err = %d\n", err);
+ conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
}
@@ -768,29 +769,29 @@ static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct acc
return s_estab;
}
-static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
+static int decode_header(struct drbd_connection *, void *, struct packet_info *);
-static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
+static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd)
{
- if (!conn_prepare_command(tconn, sock))
+ if (!conn_prepare_command(connection, sock))
return -EIO;
- return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
+ return conn_send_command(connection, sock, cmd, 0, NULL, 0);
}
-static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
+static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
{
- unsigned int header_size = drbd_header_size(tconn);
+ unsigned int header_size = drbd_header_size(connection);
struct packet_info pi;
int err;
- err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
+ err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
if (err != header_size) {
if (err >= 0)
err = -EIO;
return err;
}
- err = decode_header(tconn, tconn->data.rbuf, &pi);
+ err = decode_header(connection, connection->data.rbuf, &pi);
if (err)
return err;
return pi.cmd;
@@ -820,28 +821,29 @@ static int drbd_socket_okay(struct socket **sock)
}
/* Gets called if a connection is established, or if a new minor gets created
in a connection */
-int drbd_connected(struct drbd_conf *mdev)
+int drbd_connected(struct drbd_peer_device *peer_device)
{
+ struct drbd_device *device = peer_device->device;
int err;
- atomic_set(&mdev->packet_seq, 0);
- mdev->peer_seq = 0;
+ atomic_set(&device->packet_seq, 0);
+ device->peer_seq = 0;
- mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
- &mdev->tconn->cstate_mutex :
- &mdev->own_state_mutex;
+ device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
+ &peer_device->connection->cstate_mutex :
+ &device->own_state_mutex;
- err = drbd_send_sync_param(mdev);
+ err = drbd_send_sync_param(peer_device);
if (!err)
- err = drbd_send_sizes(mdev, 0, 0);
+ err = drbd_send_sizes(peer_device, 0, 0);
if (!err)
- err = drbd_send_uuids(mdev);
+ err = drbd_send_uuids(peer_device);
if (!err)
- err = drbd_send_current_state(mdev);
- clear_bit(USE_DEGR_WFC_T, &mdev->flags);
- clear_bit(RESIZE_PENDING, &mdev->flags);
- atomic_set(&mdev->ap_in_flight, 0);
- mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
+ err = drbd_send_current_state(peer_device);
+ clear_bit(USE_DEGR_WFC_T, &device->flags);
+ clear_bit(RESIZE_PENDING, &device->flags);
+ atomic_set(&device->ap_in_flight, 0);
+ mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
return err;
}
@@ -853,59 +855,59 @@ int drbd_connected(struct drbd_conf *mdev)
* no point in trying again, please go standalone.
* -2 We do not have a network config...
*/
-static int conn_connect(struct drbd_tconn *tconn)
+static int conn_connect(struct drbd_connection *connection)
{
struct drbd_socket sock, msock;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
struct net_conf *nc;
int vnr, timeout, h, ok;
bool discard_my_data;
enum drbd_state_rv rv;
struct accept_wait_data ad = {
- .tconn = tconn,
+ .connection = connection,
.door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
};
- clear_bit(DISCONNECT_SENT, &tconn->flags);
- if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
+ clear_bit(DISCONNECT_SENT, &connection->flags);
+ if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
return -2;
mutex_init(&sock.mutex);
- sock.sbuf = tconn->data.sbuf;
- sock.rbuf = tconn->data.rbuf;
+ sock.sbuf = connection->data.sbuf;
+ sock.rbuf = connection->data.rbuf;
sock.socket = NULL;
mutex_init(&msock.mutex);
- msock.sbuf = tconn->meta.sbuf;
- msock.rbuf = tconn->meta.rbuf;
+ msock.sbuf = connection->meta.sbuf;
+ msock.rbuf = connection->meta.rbuf;
msock.socket = NULL;
/* Assume that the peer only understands protocol 80 until we know better. */
- tconn->agreed_pro_version = 80;
+ connection->agreed_pro_version = 80;
- if (prepare_listen_socket(tconn, &ad))
+ if (prepare_listen_socket(connection, &ad))
return 0;
do {
struct socket *s;
- s = drbd_try_connect(tconn);
+ s = drbd_try_connect(connection);
if (s) {
if (!sock.socket) {
sock.socket = s;
- send_first_packet(tconn, &sock, P_INITIAL_DATA);
+ send_first_packet(connection, &sock, P_INITIAL_DATA);
} else if (!msock.socket) {
- clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ clear_bit(RESOLVE_CONFLICTS, &connection->flags);
msock.socket = s;
- send_first_packet(tconn, &msock, P_INITIAL_META);
+ send_first_packet(connection, &msock, P_INITIAL_META);
} else {
- conn_err(tconn, "Logic error in conn_connect()\n");
+ drbd_err(connection, "Logic error in conn_connect()\n");
goto out_release_sockets;
}
}
if (sock.socket && msock.socket) {
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
timeout = nc->ping_timeo * HZ / 10;
rcu_read_unlock();
schedule_timeout_interruptible(timeout);
@@ -916,15 +918,15 @@ static int conn_connect(struct drbd_tconn *tconn)
}
retry:
- s = drbd_wait_for_connect(tconn, &ad);
+ s = drbd_wait_for_connect(connection, &ad);
if (s) {
- int fp = receive_first_packet(tconn, s);
+ int fp = receive_first_packet(connection, s);
drbd_socket_okay(&sock.socket);
drbd_socket_okay(&msock.socket);
switch (fp) {
case P_INITIAL_DATA:
if (sock.socket) {
- conn_warn(tconn, "initial packet S crossed\n");
+ drbd_warn(connection, "initial packet S crossed\n");
sock_release(sock.socket);
sock.socket = s;
goto randomize;
@@ -932,9 +934,9 @@ retry:
sock.socket = s;
break;
case P_INITIAL_META:
- set_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ set_bit(RESOLVE_CONFLICTS, &connection->flags);
if (msock.socket) {
- conn_warn(tconn, "initial packet M crossed\n");
+ drbd_warn(connection, "initial packet M crossed\n");
sock_release(msock.socket);
msock.socket = s;
goto randomize;
@@ -942,7 +944,7 @@ retry:
msock.socket = s;
break;
default:
- conn_warn(tconn, "Error receiving initial packet\n");
+ drbd_warn(connection, "Error receiving initial packet\n");
sock_release(s);
randomize:
if (prandom_u32() & 1)
@@ -950,12 +952,12 @@ randomize:
}
}
- if (tconn->cstate <= C_DISCONNECTING)
+ if (connection->cstate <= C_DISCONNECTING)
goto out_release_sockets;
if (signal_pending(current)) {
flush_signals(current);
smp_rmb();
- if (get_t_state(&tconn->receiver) == EXITING)
+ if (get_t_state(&connection->receiver) == EXITING)
goto out_release_sockets;
}
@@ -976,12 +978,12 @@ randomize:
msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
/* NOT YET ...
- * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
+ * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
* sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
* first set it to the P_CONNECTION_FEATURES timeout,
* which we set to 4x the configured ping_timeout. */
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
sock.socket->sk->sk_sndtimeo =
sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
@@ -998,37 +1000,38 @@ randomize:
drbd_tcp_nodelay(sock.socket);
drbd_tcp_nodelay(msock.socket);
- tconn->data.socket = sock.socket;
- tconn->meta.socket = msock.socket;
- tconn->last_received = jiffies;
+ connection->data.socket = sock.socket;
+ connection->meta.socket = msock.socket;
+ connection->last_received = jiffies;
- h = drbd_do_features(tconn);
+ h = drbd_do_features(connection);
if (h <= 0)
return h;
- if (tconn->cram_hmac_tfm) {
- /* drbd_request_state(mdev, NS(conn, WFAuth)); */
- switch (drbd_do_auth(tconn)) {
+ if (connection->cram_hmac_tfm) {
+ /* drbd_request_state(device, NS(conn, WFAuth)); */
+ switch (drbd_do_auth(connection)) {
case -1:
- conn_err(tconn, "Authentication of peer failed\n");
+ drbd_err(connection, "Authentication of peer failed\n");
return -1;
case 0:
- conn_err(tconn, "Authentication of peer failed, trying again.\n");
+ drbd_err(connection, "Authentication of peer failed, trying again.\n");
return 0;
}
}
- tconn->data.socket->sk->sk_sndtimeo = timeout;
- tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ connection->data.socket->sk->sk_sndtimeo = timeout;
+ connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
+ if (drbd_send_protocol(connection) == -EOPNOTSUPP)
return -1;
- set_bit(STATE_SENT, &tconn->flags);
+ set_bit(STATE_SENT, &connection->flags);
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ kref_get(&device->kref);
rcu_read_unlock();
/* Prevent a race between resync-handshake and
@@ -1038,35 +1041,35 @@ randomize:
* drbd_set_role() is finished, and any incoming drbd_set_role
* will see the STATE_SENT flag, and wait for it to be cleared.
*/
- mutex_lock(mdev->state_mutex);
- mutex_unlock(mdev->state_mutex);
+ mutex_lock(device->state_mutex);
+ mutex_unlock(device->state_mutex);
if (discard_my_data)
- set_bit(DISCARD_MY_DATA, &mdev->flags);
+ set_bit(DISCARD_MY_DATA, &device->flags);
else
- clear_bit(DISCARD_MY_DATA, &mdev->flags);
+ clear_bit(DISCARD_MY_DATA, &device->flags);
- drbd_connected(mdev);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_connected(peer_device);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
- rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
- if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
- clear_bit(STATE_SENT, &tconn->flags);
+ rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
+ if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
+ clear_bit(STATE_SENT, &connection->flags);
return 0;
}
- drbd_thread_start(&tconn->asender);
+ drbd_thread_start(&connection->asender);
- mutex_lock(&tconn->conf_update);
+ mutex_lock(&connection->resource->conf_update);
/* The discard_my_data flag is a single-shot modifier to the next
* connection attempt, the handshake of which is now well underway.
* No need for rcu style copying of the whole struct
* just to clear a single value. */
- tconn->net_conf->discard_my_data = 0;
- mutex_unlock(&tconn->conf_update);
+ connection->net_conf->discard_my_data = 0;
+ mutex_unlock(&connection->resource->conf_update);
return h;
@@ -1080,15 +1083,15 @@ out_release_sockets:
return -1;
}
-static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
+static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
{
- unsigned int header_size = drbd_header_size(tconn);
+ unsigned int header_size = drbd_header_size(connection);
if (header_size == sizeof(struct p_header100) &&
*(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
struct p_header100 *h = header;
if (h->pad != 0) {
- conn_err(tconn, "Header padding is not zero\n");
+ drbd_err(connection, "Header padding is not zero\n");
return -EINVAL;
}
pi->vnr = be16_to_cpu(h->volume);
@@ -1107,55 +1110,57 @@ static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_i
pi->size = be16_to_cpu(h->length);
pi->vnr = 0;
} else {
- conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
+ drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
be32_to_cpu(*(__be32 *)header),
- tconn->agreed_pro_version);
+ connection->agreed_pro_version);
return -EINVAL;
}
pi->data = header + header_size;
return 0;
}
-static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
+static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
{
- void *buffer = tconn->data.rbuf;
+ void *buffer = connection->data.rbuf;
int err;
- err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
+ err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
if (err)
return err;
- err = decode_header(tconn, buffer, pi);
- tconn->last_received = jiffies;
+ err = decode_header(connection, buffer, pi);
+ connection->last_received = jiffies;
return err;
}
-static void drbd_flush(struct drbd_tconn *tconn)
+static void drbd_flush(struct drbd_connection *connection)
{
int rv;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
- if (tconn->write_ordering >= WO_bdev_flush) {
+ if (connection->write_ordering >= WO_bdev_flush) {
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (!get_ldev(mdev))
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+
+ if (!get_ldev(device))
continue;
- kref_get(&mdev->kref);
+ kref_get(&device->kref);
rcu_read_unlock();
- rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
+ rv = blkdev_issue_flush(device->ldev->backing_bdev,
GFP_NOIO, NULL);
if (rv) {
- dev_info(DEV, "local disk flush failed with status %d\n", rv);
+ drbd_info(device, "local disk flush failed with status %d\n", rv);
/* would rather check on EOPNOTSUPP, but that is not reliable.
* don't try again for ANY return value != 0
* if (rv == -EOPNOTSUPP) */
- drbd_bump_write_ordering(tconn, WO_drain_io);
+ drbd_bump_write_ordering(connection, WO_drain_io);
}
- put_ldev(mdev);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ put_ldev(device);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
if (rv)
@@ -1167,11 +1172,11 @@ static void drbd_flush(struct drbd_tconn *tconn)
/**
* drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @epoch: Epoch object.
* @ev: Epoch event.
*/
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
struct drbd_epoch *epoch,
enum epoch_event ev)
{
@@ -1179,7 +1184,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
struct drbd_epoch *next_epoch;
enum finish_epoch rv = FE_STILL_LIVE;
- spin_lock(&tconn->epoch_lock);
+ spin_lock(&connection->epoch_lock);
do {
next_epoch = NULL;
@@ -1201,22 +1206,22 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
atomic_read(&epoch->active) == 0 &&
(test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
- spin_unlock(&tconn->epoch_lock);
- drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
- spin_lock(&tconn->epoch_lock);
+ spin_unlock(&connection->epoch_lock);
+ drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
+ spin_lock(&connection->epoch_lock);
}
#if 0
/* FIXME: dec unacked on connection, once we have
* something to count pending connection packets in. */
if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
- dec_unacked(epoch->tconn);
+ dec_unacked(epoch->connection);
#endif
- if (tconn->current_epoch != epoch) {
+ if (connection->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
list_del(&epoch->list);
ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
- tconn->epochs--;
+ connection->epochs--;
kfree(epoch);
if (rv == FE_STILL_LIVE)
@@ -1236,20 +1241,20 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
epoch = next_epoch;
} while (1);
- spin_unlock(&tconn->epoch_lock);
+ spin_unlock(&connection->epoch_lock);
return rv;
}
/**
* drbd_bump_write_ordering() - Fall back to an other write ordering method
- * @tconn: DRBD connection.
+ * @connection: DRBD connection.
* @wo: Write ordering method to try.
*/
-void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
+void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo)
{
struct disk_conf *dc;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
enum write_ordering_e pwo;
int vnr;
static char *write_ordering_str[] = {
@@ -1258,29 +1263,31 @@ void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo
[WO_bdev_flush] = "flush",
};
- pwo = tconn->write_ordering;
+ pwo = connection->write_ordering;
wo = min(pwo, wo);
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (!get_ldev_if_state(mdev, D_ATTACHING))
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+
+ if (!get_ldev_if_state(device, D_ATTACHING))
continue;
- dc = rcu_dereference(mdev->ldev->disk_conf);
+ dc = rcu_dereference(device->ldev->disk_conf);
if (wo == WO_bdev_flush && !dc->disk_flushes)
wo = WO_drain_io;
if (wo == WO_drain_io && !dc->disk_drain)
wo = WO_none;
- put_ldev(mdev);
+ put_ldev(device);
}
rcu_read_unlock();
- tconn->write_ordering = wo;
- if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
- conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
+ connection->write_ordering = wo;
+ if (pwo != connection->write_ordering || wo == WO_bdev_flush)
+ drbd_info(connection, "Method to ensure write ordering: %s\n", write_ordering_str[connection->write_ordering]);
}
/**
* drbd_submit_peer_request()
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @peer_req: peer request
* @rw: flag field, see bio->bi_rw
*
@@ -1295,7 +1302,7 @@ void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo
* on certain Xen deployments.
*/
/* TODO allocate from our own bio_set. */
-int drbd_submit_peer_request(struct drbd_conf *mdev,
+int drbd_submit_peer_request(struct drbd_device *device,
struct drbd_peer_request *peer_req,
const unsigned rw, const int fault_type)
{
@@ -1319,12 +1326,12 @@ int drbd_submit_peer_request(struct drbd_conf *mdev,
next_bio:
bio = bio_alloc(GFP_NOIO, nr_pages);
if (!bio) {
- dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
+ drbd_err(device, "submit_ee: Allocation of a bio failed\n");
goto fail;
}
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = mdev->ldev->backing_bdev;
+ bio->bi_bdev = device->ldev->backing_bdev;
bio->bi_rw = rw;
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
@@ -1340,7 +1347,7 @@ next_bio:
* But in case it fails anyways,
* we deal with it, and complain (below). */
if (bio->bi_vcnt == 0) {
- dev_err(DEV,
+ drbd_err(device,
"bio_add_page failed for len=%u, "
"bi_vcnt=0 (bi_sector=%llu)\n",
len, (uint64_t)bio->bi_iter.bi_sector);
@@ -1353,8 +1360,8 @@ next_bio:
sector += len >> 9;
--nr_pages;
}
- D_ASSERT(page == NULL);
- D_ASSERT(ds == 0);
+ D_ASSERT(device, page == NULL);
+ D_ASSERT(device, ds == 0);
atomic_set(&peer_req->pending_bios, n_bios);
do {
@@ -1362,7 +1369,7 @@ next_bio:
bios = bios->bi_next;
bio->bi_next = NULL;
- drbd_generic_make_request(mdev, fault_type, bio);
+ drbd_generic_make_request(device, fault_type, bio);
} while (bios);
return 0;
@@ -1375,36 +1382,44 @@ fail:
return err;
}
-static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
+static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
struct drbd_peer_request *peer_req)
{
struct drbd_interval *i = &peer_req->i;
- drbd_remove_interval(&mdev->write_requests, i);
+ drbd_remove_interval(&device->write_requests, i);
drbd_clear_interval(i);
/* Wake up any processes waiting for this peer request to complete. */
if (i->waiting)
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
}
-void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
+static void conn_wait_active_ee_empty(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+
+ kref_get(&device->kref);
rcu_read_unlock();
- drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_wait_ee_list_empty(device, &device->active_ee);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
}
-static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
+static struct drbd_peer_device *
+conn_peer_device(struct drbd_connection *connection, int volume_number)
+{
+ return idr_find(&connection->peer_devices, volume_number);
+}
+
+static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
{
int rv;
struct p_barrier *p = pi->data;
@@ -1413,16 +1428,16 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
/* FIXME these are unacked on connection,
* not a specific (peer)device.
*/
- tconn->current_epoch->barrier_nr = p->barrier;
- tconn->current_epoch->tconn = tconn;
- rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
+ connection->current_epoch->barrier_nr = p->barrier;
+ connection->current_epoch->connection = connection;
+ rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
* the activity log, which means it would not be resynced in case the
* R_PRIMARY crashes now.
* Therefore we must send the barrier_ack after the barrier request was
* completed. */
- switch (tconn->write_ordering) {
+ switch (connection->write_ordering) {
case WO_none:
if (rv == FE_RECYCLED)
return 0;
@@ -1433,15 +1448,15 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
if (epoch)
break;
else
- conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
+ drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
/* Fall through */
case WO_bdev_flush:
case WO_drain_io:
- conn_wait_active_ee_empty(tconn);
- drbd_flush(tconn);
+ conn_wait_active_ee_empty(connection);
+ drbd_flush(connection);
- if (atomic_read(&tconn->current_epoch->epoch_size)) {
+ if (atomic_read(&connection->current_epoch->epoch_size)) {
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
if (epoch)
break;
@@ -1449,7 +1464,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
return 0;
default:
- conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
+ drbd_err(connection, "Strangeness in connection->write_ordering %d\n", connection->write_ordering);
return -EIO;
}
@@ -1457,16 +1472,16 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
atomic_set(&epoch->epoch_size, 0);
atomic_set(&epoch->active, 0);
- spin_lock(&tconn->epoch_lock);
- if (atomic_read(&tconn->current_epoch->epoch_size)) {
- list_add(&epoch->list, &tconn->current_epoch->list);
- tconn->current_epoch = epoch;
- tconn->epochs++;
+ spin_lock(&connection->epoch_lock);
+ if (atomic_read(&connection->current_epoch->epoch_size)) {
+ list_add(&epoch->list, &connection->current_epoch->list);
+ connection->current_epoch = epoch;
+ connection->epochs++;
} else {
/* The current_epoch got recycled while we allocated this one... */
kfree(epoch);
}
- spin_unlock(&tconn->epoch_lock);
+ spin_unlock(&connection->epoch_lock);
return 0;
}
@@ -1474,25 +1489,26 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
/* used from receive_RSDataReply (recv_resync_read)
* and from receive_Data */
static struct drbd_peer_request *
-read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
+read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
int data_size) __must_hold(local)
{
- const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
+ struct drbd_device *device = peer_device->device;
+ const sector_t capacity = drbd_get_capacity(device->this_bdev);
struct drbd_peer_request *peer_req;
struct page *page;
int dgs, ds, err;
- void *dig_in = mdev->tconn->int_dig_in;
- void *dig_vv = mdev->tconn->int_dig_vv;
+ void *dig_in = peer_device->connection->int_dig_in;
+ void *dig_vv = peer_device->connection->int_dig_vv;
unsigned long *data;
dgs = 0;
- if (mdev->tconn->peer_integrity_tfm) {
- dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ if (peer_device->connection->peer_integrity_tfm) {
+ dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
/*
* FIXME: Receive the incoming digest into the receive buffer
* here, together with its struct p_data?
*/
- err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
+ err = drbd_recv_all_warn(peer_device->connection, dig_in, dgs);
if (err)
return NULL;
data_size -= dgs;
@@ -1506,7 +1522,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
/* even though we trust out peer,
* we sometimes have to double check. */
if (sector + (data_size>>9) > capacity) {
- dev_err(DEV, "request from peer beyond end of local disk: "
+ drbd_err(device, "request from peer beyond end of local disk: "
"capacity: %llus < sector: %llus + size: %u\n",
(unsigned long long)capacity,
(unsigned long long)sector, data_size);
@@ -1516,7 +1532,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
+ peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, GFP_NOIO);
if (!peer_req)
return NULL;
@@ -1528,36 +1544,36 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
page_chain_for_each(page) {
unsigned len = min_t(int, ds, PAGE_SIZE);
data = kmap(page);
- err = drbd_recv_all_warn(mdev->tconn, data, len);
- if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
- dev_err(DEV, "Fault injection: Corrupting data on receive\n");
+ err = drbd_recv_all_warn(peer_device->connection, data, len);
+ if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
+ drbd_err(device, "Fault injection: Corrupting data on receive\n");
data[0] = data[0] ^ (unsigned long)-1;
}
kunmap(page);
if (err) {
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
return NULL;
}
ds -= len;
}
if (dgs) {
- drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
+ drbd_csum_ee(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
- dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
+ drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
(unsigned long long)sector, data_size);
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
return NULL;
}
}
- mdev->recv_cnt += data_size>>9;
+ device->recv_cnt += data_size>>9;
return peer_req;
}
/* drbd_drain_block() just takes a data block
* out of the socket input buffer, and discards it.
*/
-static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
+static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
{
struct page *page;
int err = 0;
@@ -1566,36 +1582,36 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
if (!data_size)
return 0;
- page = drbd_alloc_pages(mdev, 1, 1);
+ page = drbd_alloc_pages(peer_device, 1, 1);
data = kmap(page);
while (data_size) {
unsigned int len = min_t(int, data_size, PAGE_SIZE);
- err = drbd_recv_all_warn(mdev->tconn, data, len);
+ err = drbd_recv_all_warn(peer_device->connection, data, len);
if (err)
break;
data_size -= len;
}
kunmap(page);
- drbd_free_pages(mdev, page, 0);
+ drbd_free_pages(peer_device->device, page, 0);
return err;
}
-static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
+static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
sector_t sector, int data_size)
{
struct bio_vec bvec;
struct bvec_iter iter;
struct bio *bio;
int dgs, err, expect;
- void *dig_in = mdev->tconn->int_dig_in;
- void *dig_vv = mdev->tconn->int_dig_vv;
+ void *dig_in = peer_device->connection->int_dig_in;
+ void *dig_vv = peer_device->connection->int_dig_vv;
dgs = 0;
- if (mdev->tconn->peer_integrity_tfm) {
- dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
- err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
+ if (peer_device->connection->peer_integrity_tfm) {
+ dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+ err = drbd_recv_all_warn(peer_device->connection, dig_in, dgs);
if (err)
return err;
data_size -= dgs;
@@ -1603,15 +1619,15 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
/* optimistically update recv_cnt. if receiving fails below,
* we disconnect anyways, and counters will be reset. */
- mdev->recv_cnt += data_size>>9;
+ peer_device->device->recv_cnt += data_size>>9;
bio = req->master_bio;
- D_ASSERT(sector == bio->bi_iter.bi_sector);
+ D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
bio_for_each_segment(bvec, bio, iter) {
void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
expect = min_t(int, data_size, bvec.bv_len);
- err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
+ err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
kunmap(bvec.bv_page);
if (err)
return err;
@@ -1619,14 +1635,14 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
}
if (dgs) {
- drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
+ drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
- dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
+ drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
return -EINVAL;
}
}
- D_ASSERT(data_size == 0);
+ D_ASSERT(peer_device->device, data_size == 0);
return 0;
}
@@ -1638,64 +1654,67 @@ static int e_end_resync_block(struct drbd_work *w, int unused)
{
struct drbd_peer_request *peer_req =
container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
sector_t sector = peer_req->i.sector;
int err;
- D_ASSERT(drbd_interval_empty(&peer_req->i));
+ D_ASSERT(device, drbd_interval_empty(&peer_req->i));
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- drbd_set_in_sync(mdev, sector, peer_req->i.size);
- err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
+ drbd_set_in_sync(device, sector, peer_req->i.size);
+ err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
} else {
/* Record failure to sync */
- drbd_rs_failed_io(mdev, sector, peer_req->i.size);
+ drbd_rs_failed_io(device, sector, peer_req->i.size);
- err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
+ err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
}
- dec_unacked(mdev);
+ dec_unacked(device);
return err;
}
-static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
+static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
+ int data_size) __releases(local)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
- peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
+ peer_req = read_in_block(peer_device, ID_SYNCER, sector, data_size);
if (!peer_req)
goto fail;
- dec_rs_pending(mdev);
+ dec_rs_pending(device);
- inc_unacked(mdev);
+ inc_unacked(device);
/* corresponding dec_unacked() in e_end_resync_block()
* respective _drbd_clear_done_ee */
peer_req->w.cb = e_end_resync_block;
- spin_lock_irq(&mdev->tconn->req_lock);
- list_add(&peer_req->w.list, &mdev->sync_ee);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ list_add(&peer_req->w.list, &device->sync_ee);
+ spin_unlock_irq(&device->resource->req_lock);
- atomic_add(data_size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
+ atomic_add(data_size >> 9, &device->rs_sect_ev);
+ if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
return 0;
/* don't care for the reason here */
- dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->tconn->req_lock);
+ drbd_err(device, "submit failed, triggering re-connect\n");
+ spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
fail:
- put_ldev(mdev);
+ put_ldev(device);
return -EIO;
}
static struct drbd_request *
-find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
+find_request(struct drbd_device *device, struct rb_root *root, u64 id,
sector_t sector, bool missing_ok, const char *func)
{
struct drbd_request *req;
@@ -1705,36 +1724,38 @@ find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
return req;
if (!missing_ok) {
- dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
+ drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
(unsigned long)id, (unsigned long long)sector);
}
return NULL;
}
-static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct drbd_request *req;
sector_t sector;
int err;
struct p_data *p = pi->data;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
sector = be64_to_cpu(p->sector);
- spin_lock_irq(&mdev->tconn->req_lock);
- req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
+ spin_unlock_irq(&device->resource->req_lock);
if (unlikely(!req))
return -EIO;
/* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
* special casing it there for the various failure cases.
* still no race with drbd_fail_pending_reads */
- err = recv_dless_read(mdev, req, sector, pi->size);
+ err = recv_dless_read(peer_device, req, sector, pi->size);
if (!err)
req_mod(req, DATA_RECEIVED);
/* else: nothing. handled from drbd_disconnect...
@@ -1744,46 +1765,48 @@ static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
return err;
}
-static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
sector_t sector;
int err;
struct p_data *p = pi->data;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
sector = be64_to_cpu(p->sector);
- D_ASSERT(p->block_id == ID_SYNCER);
+ D_ASSERT(device, p->block_id == ID_SYNCER);
- if (get_ldev(mdev)) {
+ if (get_ldev(device)) {
/* data is submitted to disk within recv_resync_read.
* corresponding put_ldev done below on error,
* or in drbd_peer_request_endio. */
- err = recv_resync_read(mdev, sector, pi->size);
+ err = recv_resync_read(peer_device, sector, pi->size);
} else {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Can not write resync data to local disk.\n");
+ drbd_err(device, "Can not write resync data to local disk.\n");
- err = drbd_drain_block(mdev, pi->size);
+ err = drbd_drain_block(peer_device, pi->size);
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
+ drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
}
- atomic_add(pi->size >> 9, &mdev->rs_sect_in);
+ atomic_add(pi->size >> 9, &device->rs_sect_in);
return err;
}
-static void restart_conflicting_writes(struct drbd_conf *mdev,
+static void restart_conflicting_writes(struct drbd_device *device,
sector_t sector, int size)
{
struct drbd_interval *i;
struct drbd_request *req;
- drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ drbd_for_each_overlap(i, &device->write_requests, sector, size) {
if (!i->local)
continue;
req = container_of(i, struct drbd_request, i);
@@ -1803,52 +1826,53 @@ static int e_end_block(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req =
container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
sector_t sector = peer_req->i.sector;
int err = 0, pcmd;
if (peer_req->flags & EE_SEND_WRITE_ACK) {
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
- mdev->state.conn <= C_PAUSED_SYNC_T &&
+ pcmd = (device->state.conn >= C_SYNC_SOURCE &&
+ device->state.conn <= C_PAUSED_SYNC_T &&
peer_req->flags & EE_MAY_SET_IN_SYNC) ?
P_RS_WRITE_ACK : P_WRITE_ACK;
- err = drbd_send_ack(mdev, pcmd, peer_req);
+ err = drbd_send_ack(peer_device, pcmd, peer_req);
if (pcmd == P_RS_WRITE_ACK)
- drbd_set_in_sync(mdev, sector, peer_req->i.size);
+ drbd_set_in_sync(device, sector, peer_req->i.size);
} else {
- err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
+ err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
/* we expect it to be marked out of sync anyways...
* maybe assert this? */
}
- dec_unacked(mdev);
+ dec_unacked(device);
}
/* we delete from the conflict detection hash _after_ we sent out the
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
if (peer_req->flags & EE_IN_INTERVAL_TREE) {
- spin_lock_irq(&mdev->tconn->req_lock);
- D_ASSERT(!drbd_interval_empty(&peer_req->i));
- drbd_remove_epoch_entry_interval(mdev, peer_req);
+ spin_lock_irq(&device->resource->req_lock);
+ D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
+ drbd_remove_epoch_entry_interval(device, peer_req);
if (peer_req->flags & EE_RESTART_REQUESTS)
- restart_conflicting_writes(mdev, sector, peer_req->i.size);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ restart_conflicting_writes(device, sector, peer_req->i.size);
+ spin_unlock_irq(&device->resource->req_lock);
} else
- D_ASSERT(drbd_interval_empty(&peer_req->i));
+ D_ASSERT(device, drbd_interval_empty(&peer_req->i));
- drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
+ drbd_may_finish_epoch(first_peer_device(device)->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
return err;
}
static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
{
- struct drbd_conf *mdev = w->mdev;
struct drbd_peer_request *peer_req =
container_of(w, struct drbd_peer_request, w);
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
int err;
- err = drbd_send_ack(mdev, ack, peer_req);
- dec_unacked(mdev);
+ err = drbd_send_ack(peer_device, ack, peer_req);
+ dec_unacked(peer_device->device);
return err;
}
@@ -1860,9 +1884,11 @@ static int e_send_superseded(struct drbd_work *w, int unused)
static int e_send_retry_write(struct drbd_work *w, int unused)
{
- struct drbd_tconn *tconn = w->mdev->tconn;
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ struct drbd_connection *connection = peer_req->peer_device->connection;
- return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
+ return e_send_ack(w, connection->agreed_pro_version >= 100 ?
P_RETRY_WRITE : P_SUPERSEDED);
}
@@ -1881,18 +1907,19 @@ static u32 seq_max(u32 a, u32 b)
return seq_greater(a, b) ? a : b;
}
-static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
+static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
{
+ struct drbd_device *device = peer_device->device;
unsigned int newest_peer_seq;
- if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) {
- spin_lock(&mdev->peer_seq_lock);
- newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
- mdev->peer_seq = newest_peer_seq;
- spin_unlock(&mdev->peer_seq_lock);
- /* wake up only if we actually changed mdev->peer_seq */
+ if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) {
+ spin_lock(&device->peer_seq_lock);
+ newest_peer_seq = seq_max(device->peer_seq, peer_seq);
+ device->peer_seq = newest_peer_seq;
+ spin_unlock(&device->peer_seq_lock);
+ /* wake up only if we actually changed device->peer_seq */
if (peer_seq == newest_peer_seq)
- wake_up(&mdev->seq_wait);
+ wake_up(&device->seq_wait);
}
}
@@ -1902,20 +1929,20 @@ static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
}
/* maybe change sync_ee into interval trees as well? */
-static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
+static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
struct drbd_peer_request *rs_req;
bool rv = 0;
- spin_lock_irq(&mdev->tconn->req_lock);
- list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
+ spin_lock_irq(&device->resource->req_lock);
+ list_for_each_entry(rs_req, &device->sync_ee, w.list) {
if (overlaps(peer_req->i.sector, peer_req->i.size,
rs_req->i.sector, rs_req->i.size)) {
rv = 1;
break;
}
}
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
return rv;
}
@@ -1929,9 +1956,9 @@ static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_re
*
* Note: we don't care for Ack packets overtaking P_DATA packets.
*
- * In case packet_seq is larger than mdev->peer_seq number, there are
+ * In case packet_seq is larger than device->peer_seq number, there are
* outstanding packets on the msock. We wait for them to arrive.
- * In case we are the logically next packet, we update mdev->peer_seq
+ * In case we are the logically next packet, we update device->peer_seq
* ourselves. Correctly handles 32bit wrap around.
*
* Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
@@ -1941,19 +1968,20 @@ static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_re
*
* returns 0 if we may process the packet,
* -ERESTARTSYS if we were interrupted (by disconnect signal). */
-static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
+static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
{
+ struct drbd_device *device = peer_device->device;
DEFINE_WAIT(wait);
long timeout;
int ret = 0, tp;
- if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags))
+ if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags))
return 0;
- spin_lock(&mdev->peer_seq_lock);
+ spin_lock(&device->peer_seq_lock);
for (;;) {
- if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
- mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
+ if (!seq_greater(peer_seq - 1, device->peer_seq)) {
+ device->peer_seq = seq_max(device->peer_seq, peer_seq);
break;
}
@@ -1963,35 +1991,35 @@ static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_s
}
rcu_read_lock();
- tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ tp = rcu_dereference(first_peer_device(device)->connection->net_conf)->two_primaries;
rcu_read_unlock();
if (!tp)
break;
/* Only need to wait if two_primaries is enabled */
- prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
- spin_unlock(&mdev->peer_seq_lock);
+ prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock(&device->peer_seq_lock);
rcu_read_lock();
- timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
+ timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10;
rcu_read_unlock();
timeout = schedule_timeout(timeout);
- spin_lock(&mdev->peer_seq_lock);
+ spin_lock(&device->peer_seq_lock);
if (!timeout) {
ret = -ETIMEDOUT;
- dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
+ drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
break;
}
}
- spin_unlock(&mdev->peer_seq_lock);
- finish_wait(&mdev->seq_wait, &wait);
+ spin_unlock(&device->peer_seq_lock);
+ finish_wait(&device->seq_wait, &wait);
return ret;
}
/* see also bio_flags_to_wire()
* DRBD_REQ_*, because we need to semantically map the flags to data packet
* flags and back. We may replicate to other kernel versions. */
-static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
+static unsigned long wire_flags_to_bio(u32 dpf)
{
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
(dpf & DP_FUA ? REQ_FUA : 0) |
@@ -1999,13 +2027,13 @@ static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
(dpf & DP_DISCARD ? REQ_DISCARD : 0);
}
-static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
+static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
unsigned int size)
{
struct drbd_interval *i;
repeat:
- drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ drbd_for_each_overlap(i, &device->write_requests, sector, size) {
struct drbd_request *req;
struct bio_and_error m;
@@ -2016,19 +2044,19 @@ static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
continue;
req->rq_state &= ~RQ_POSTPONED;
__req_mod(req, NEG_ACKED, &m);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
if (m.bio)
- complete_master_bio(mdev, &m);
- spin_lock_irq(&mdev->tconn->req_lock);
+ complete_master_bio(device, &m);
+ spin_lock_irq(&device->resource->req_lock);
goto repeat;
}
}
-static int handle_write_conflicts(struct drbd_conf *mdev,
+static int handle_write_conflicts(struct drbd_device *device,
struct drbd_peer_request *peer_req)
{
- struct drbd_tconn *tconn = mdev->tconn;
- bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ struct drbd_connection *connection = peer_req->peer_device->connection;
+ bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
sector_t sector = peer_req->i.sector;
const unsigned int size = peer_req->i.size;
struct drbd_interval *i;
@@ -2039,10 +2067,10 @@ static int handle_write_conflicts(struct drbd_conf *mdev,
* Inserting the peer request into the write_requests tree will prevent
* new conflicting local requests from being added.
*/
- drbd_insert_interval(&mdev->write_requests, &peer_req->i);
+ drbd_insert_interval(&device->write_requests, &peer_req->i);
repeat:
- drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ drbd_for_each_overlap(i, &device->write_requests, sector, size) {
if (i == &peer_req->i)
continue;
@@ -2052,7 +2080,7 @@ static int handle_write_conflicts(struct drbd_conf *mdev,
* should not happen in a two-node setup. Wait for the
* earlier peer request to complete.
*/
- err = drbd_wait_misc(mdev, i);
+ err = drbd_wait_misc(device, i);
if (err)
goto out;
goto repeat;
@@ -2070,18 +2098,18 @@ static int handle_write_conflicts(struct drbd_conf *mdev,
(i->size >> 9) >= sector + (size >> 9);
if (!equal)
- dev_alert(DEV, "Concurrent writes detected: "
+ drbd_alert(device, "Concurrent writes detected: "
"local=%llus +%u, remote=%llus +%u, "
"assuming %s came first\n",
(unsigned long long)i->sector, i->size,
(unsigned long long)sector, size,
superseded ? "local" : "remote");
- inc_unacked(mdev);
+ inc_unacked(device);
peer_req->w.cb = superseded ? e_send_superseded :
e_send_retry_write;
- list_add_tail(&peer_req->w.list, &mdev->done_ee);
- wake_asender(mdev->tconn);
+ list_add_tail(&peer_req->w.list, &device->done_ee);
+ wake_asender(connection);
err = -ENOENT;
goto out;
@@ -2090,7 +2118,7 @@ static int handle_write_conflicts(struct drbd_conf *mdev,
container_of(i, struct drbd_request, i);
if (!equal)
- dev_alert(DEV, "Concurrent writes detected: "
+ drbd_alert(device, "Concurrent writes detected: "
"local=%llus +%u, remote=%llus +%u\n",
(unsigned long long)i->sector, i->size,
(unsigned long long)sector, size);
@@ -2108,12 +2136,10 @@ static int handle_write_conflicts(struct drbd_conf *mdev,
* request to finish locally before submitting
* the conflicting peer request.
*/
- err = drbd_wait_misc(mdev, &req->i);
+ err = drbd_wait_misc(device, &req->i);
if (err) {
- _conn_request_state(mdev->tconn,
- NS(conn, C_TIMEOUT),
- CS_HARD);
- fail_postponed_requests(mdev, sector, size);
+ _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
+ fail_postponed_requests(device, sector, size);
goto out;
}
goto repeat;
@@ -2129,14 +2155,15 @@ static int handle_write_conflicts(struct drbd_conf *mdev,
out:
if (err)
- drbd_remove_epoch_entry_interval(mdev, peer_req);
+ drbd_remove_epoch_entry_interval(device, peer_req);
return err;
}
/* mirrored write */
-static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
sector_t sector;
struct drbd_peer_request *peer_req;
struct p_data *p = pi->data;
@@ -2145,17 +2172,18 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
u32 dp_flags;
int err, tp;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- if (!get_ldev(mdev)) {
+ if (!get_ldev(device)) {
int err2;
- err = wait_for_and_update_peer_seq(mdev, peer_seq);
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
- atomic_inc(&tconn->current_epoch->epoch_size);
- err2 = drbd_drain_block(mdev, pi->size);
+ err = wait_for_and_update_peer_seq(peer_device, peer_seq);
+ drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
+ atomic_inc(&connection->current_epoch->epoch_size);
+ err2 = drbd_drain_block(peer_device, pi->size);
if (!err)
err = err2;
return err;
@@ -2168,61 +2196,61 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
*/
sector = be64_to_cpu(p->sector);
- peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
+ peer_req = read_in_block(peer_device, p->block_id, sector, pi->size);
if (!peer_req) {
- put_ldev(mdev);
+ put_ldev(device);
return -EIO;
}
peer_req->w.cb = e_end_block;
dp_flags = be32_to_cpu(p->dp_flags);
- rw |= wire_flags_to_bio(mdev, dp_flags);
+ rw |= wire_flags_to_bio(dp_flags);
if (peer_req->pages == NULL) {
- D_ASSERT(peer_req->i.size == 0);
- D_ASSERT(dp_flags & DP_FLUSH);
+ D_ASSERT(device, peer_req->i.size == 0);
+ D_ASSERT(device, dp_flags & DP_FLUSH);
}
if (dp_flags & DP_MAY_SET_IN_SYNC)
peer_req->flags |= EE_MAY_SET_IN_SYNC;
- spin_lock(&tconn->epoch_lock);
- peer_req->epoch = tconn->current_epoch;
+ spin_lock(&connection->epoch_lock);
+ peer_req->epoch = connection->current_epoch;
atomic_inc(&peer_req->epoch->epoch_size);
atomic_inc(&peer_req->epoch->active);
- spin_unlock(&tconn->epoch_lock);
+ spin_unlock(&connection->epoch_lock);
rcu_read_lock();
- tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries;
rcu_read_unlock();
if (tp) {
peer_req->flags |= EE_IN_INTERVAL_TREE;
- err = wait_for_and_update_peer_seq(mdev, peer_seq);
+ err = wait_for_and_update_peer_seq(peer_device, peer_seq);
if (err)
goto out_interrupted;
- spin_lock_irq(&mdev->tconn->req_lock);
- err = handle_write_conflicts(mdev, peer_req);
+ spin_lock_irq(&device->resource->req_lock);
+ err = handle_write_conflicts(device, peer_req);
if (err) {
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
if (err == -ENOENT) {
- put_ldev(mdev);
+ put_ldev(device);
return 0;
}
goto out_interrupted;
}
} else {
- update_peer_seq(mdev, peer_seq);
- spin_lock_irq(&mdev->tconn->req_lock);
+ update_peer_seq(peer_device, peer_seq);
+ spin_lock_irq(&device->resource->req_lock);
}
- list_add(&peer_req->w.list, &mdev->active_ee);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &device->active_ee);
+ spin_unlock_irq(&device->resource->req_lock);
- if (mdev->state.conn == C_SYNC_TARGET)
- wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
+ if (device->state.conn == C_SYNC_TARGET)
+ wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
- if (mdev->tconn->agreed_pro_version < 100) {
+ if (peer_device->connection->agreed_pro_version < 100) {
rcu_read_lock();
- switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
+ switch (rcu_dereference(peer_device->connection->net_conf)->wire_protocol) {
case DRBD_PROT_C:
dp_flags |= DP_SEND_WRITE_ACK;
break;
@@ -2235,7 +2263,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
if (dp_flags & DP_SEND_WRITE_ACK) {
peer_req->flags |= EE_SEND_WRITE_ACK;
- inc_unacked(mdev);
+ inc_unacked(device);
/* corresponding dec_unacked() in e_end_block()
* respective _drbd_clear_done_ee */
}
@@ -2243,34 +2271,34 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
if (dp_flags & DP_SEND_RECEIVE_ACK) {
/* I really don't like it that the receiver thread
* sends on the msock, but anyways */
- drbd_send_ack(mdev, P_RECV_ACK, peer_req);
+ drbd_send_ack(first_peer_device(device), P_RECV_ACK, peer_req);
}
- if (mdev->state.pdsk < D_INCONSISTENT) {
+ if (device->state.pdsk < D_INCONSISTENT) {
/* In case we have the only disk of the cluster, */
- drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
+ drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
- drbd_al_begin_io(mdev, &peer_req->i, true);
+ drbd_al_begin_io(device, &peer_req->i, true);
}
- err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
+ err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR);
if (!err)
return 0;
/* don't care for the reason here */
- dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->tconn->req_lock);
+ drbd_err(device, "submit failed, triggering re-connect\n");
+ spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
- drbd_remove_epoch_entry_interval(mdev, peer_req);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ drbd_remove_epoch_entry_interval(device, peer_req);
+ spin_unlock_irq(&device->resource->req_lock);
if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
- drbd_al_complete_io(mdev, &peer_req->i);
+ drbd_al_complete_io(device, &peer_req->i);
out_interrupted:
- drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
- put_ldev(mdev);
- drbd_free_peer_req(mdev, peer_req);
+ drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT + EV_CLEANUP);
+ put_ldev(device);
+ drbd_free_peer_req(device, peer_req);
return err;
}
@@ -2285,9 +2313,9 @@ out_interrupted:
* The current sync rate used here uses only the most recent two step marks,
* to have a short time average so we can react faster.
*/
-int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
+int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector)
{
- struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
+ struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
unsigned long db, dt, dbdt;
struct lc_element *tmp;
int curr_events;
@@ -2295,48 +2323,48 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
unsigned int c_min_rate;
rcu_read_lock();
- c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
+ c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
rcu_read_unlock();
/* feature disabled? */
if (c_min_rate == 0)
return 0;
- spin_lock_irq(&mdev->al_lock);
- tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
+ spin_lock_irq(&device->al_lock);
+ tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
if (tmp) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
return 0;
}
/* Do not slow down if app IO is already waiting for this extent */
}
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
(int)part_stat_read(&disk->part0, sectors[1]) -
- atomic_read(&mdev->rs_sect_ev);
+ atomic_read(&device->rs_sect_ev);
- if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
+ if (!device->rs_last_events || curr_events - device->rs_last_events > 64) {
unsigned long rs_left;
int i;
- mdev->rs_last_events = curr_events;
+ device->rs_last_events = curr_events;
/* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
* approx. */
- i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
+ i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
- if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
- rs_left = mdev->ov_left;
+ if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
+ rs_left = device->ov_left;
else
- rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
+ rs_left = drbd_bm_total_weight(device) - device->rs_failed;
- dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
+ dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
if (!dt)
dt++;
- db = mdev->rs_mark_left[i] - rs_left;
+ db = device->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
if (dbdt > c_min_rate)
@@ -2346,9 +2374,10 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
}
-static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
sector_t sector;
sector_t capacity;
struct drbd_peer_request *peer_req;
@@ -2357,58 +2386,59 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
unsigned int fault_type;
struct p_block_req *p = pi->data;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
- capacity = drbd_get_capacity(mdev->this_bdev);
+ device = peer_device->device;
+ capacity = drbd_get_capacity(device->this_bdev);
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
- dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
+ drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
return -EINVAL;
}
if (sector + (size>>9) > capacity) {
- dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
+ drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
return -EINVAL;
}
- if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
+ if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
verb = 1;
switch (pi->cmd) {
case P_DATA_REQUEST:
- drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
+ drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
break;
case P_RS_DATA_REQUEST:
case P_CSUM_RS_REQUEST:
case P_OV_REQUEST:
- drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
+ drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
break;
case P_OV_REPLY:
verb = 0;
- dec_rs_pending(mdev);
- drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
+ dec_rs_pending(device);
+ drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
break;
default:
BUG();
}
if (verb && __ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Can not satisfy peer's read request, "
+ drbd_err(device, "Can not satisfy peer's read request, "
"no local data.\n");
/* drain possibly payload */
- return drbd_drain_block(mdev, pi->size);
+ return drbd_drain_block(peer_device, pi->size);
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
+ peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, GFP_NOIO);
if (!peer_req) {
- put_ldev(mdev);
+ put_ldev(device);
return -ENOMEM;
}
@@ -2423,7 +2453,7 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
/* used in the sector offset progress display */
- mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
+ device->bm_resync_fo = BM_SECT_TO_BIT(sector);
break;
case P_OV_REPLY:
@@ -2439,19 +2469,19 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
peer_req->digest = di;
peer_req->flags |= EE_HAS_DIGEST;
- if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
+ if (drbd_recv_all(peer_device->connection, di->digest, pi->size))
goto out_free_e;
if (pi->cmd == P_CSUM_RS_REQUEST) {
- D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
+ D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
peer_req->w.cb = w_e_end_csum_rs_req;
/* used in the sector offset progress display */
- mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
+ device->bm_resync_fo = BM_SECT_TO_BIT(sector);
} else if (pi->cmd == P_OV_REPLY) {
/* track progress, we may need to throttle */
- atomic_add(size >> 9, &mdev->rs_sect_in);
+ atomic_add(size >> 9, &device->rs_sect_in);
peer_req->w.cb = w_e_end_ov_reply;
- dec_rs_pending(mdev);
+ dec_rs_pending(device);
/* drbd_rs_begin_io done when we sent this request,
* but accounting still needs to be done. */
goto submit_for_resync;
@@ -2459,19 +2489,19 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
break;
case P_OV_REQUEST:
- if (mdev->ov_start_sector == ~(sector_t)0 &&
- mdev->tconn->agreed_pro_version >= 90) {
+ if (device->ov_start_sector == ~(sector_t)0 &&
+ peer_device->connection->agreed_pro_version >= 90) {
unsigned long now = jiffies;
int i;
- mdev->ov_start_sector = sector;
- mdev->ov_position = sector;
- mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
- mdev->rs_total = mdev->ov_left;
+ device->ov_start_sector = sector;
+ device->ov_position = sector;
+ device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
+ device->rs_total = device->ov_left;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = mdev->ov_left;
- mdev->rs_mark_time[i] = now;
+ device->rs_mark_left[i] = device->ov_left;
+ device->rs_mark_time[i] = now;
}
- dev_info(DEV, "Online Verify start sector: %llu\n",
+ drbd_info(device, "Online Verify start sector: %llu\n",
(unsigned long long)sector);
}
peer_req->w.cb = w_e_end_ov_req;
@@ -2504,57 +2534,61 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
* we would also throttle its application reads.
* In that case, throttling is done on the SyncTarget only.
*/
- if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
+ if (device->state.peer != R_PRIMARY && drbd_rs_should_slow_down(device, sector))
schedule_timeout_uninterruptible(HZ/10);
- if (drbd_rs_begin_io(mdev, sector))
+ if (drbd_rs_begin_io(device, sector))
goto out_free_e;
submit_for_resync:
- atomic_add(size >> 9, &mdev->rs_sect_ev);
+ atomic_add(size >> 9, &device->rs_sect_ev);
submit:
- inc_unacked(mdev);
- spin_lock_irq(&mdev->tconn->req_lock);
- list_add_tail(&peer_req->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ inc_unacked(device);
+ spin_lock_irq(&device->resource->req_lock);
+ list_add_tail(&peer_req->w.list, &device->read_ee);
+ spin_unlock_irq(&device->resource->req_lock);
- if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
+ if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
return 0;
/* don't care for the reason here */
- dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->tconn->req_lock);
+ drbd_err(device, "submit failed, triggering re-connect\n");
+ spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
/* no drbd_rs_complete_io(), we are dropping the connection anyways */
out_free_e:
- put_ldev(mdev);
- drbd_free_peer_req(mdev, peer_req);
+ put_ldev(device);
+ drbd_free_peer_req(device, peer_req);
return -EIO;
}
-static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
+/**
+ * drbd_asb_recover_0p - Recover after split-brain with no remaining primaries
+ */
+static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
{
+ struct drbd_device *device = peer_device->device;
int self, peer, rv = -100;
unsigned long ch_self, ch_peer;
enum drbd_after_sb_p after_sb_0p;
- self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
- peer = mdev->p_uuid[UI_BITMAP] & 1;
+ self = device->ldev->md.uuid[UI_BITMAP] & 1;
+ peer = device->p_uuid[UI_BITMAP] & 1;
- ch_peer = mdev->p_uuid[UI_SIZE];
- ch_self = mdev->comm_bm_set;
+ ch_peer = device->p_uuid[UI_SIZE];
+ ch_self = device->comm_bm_set;
rcu_read_lock();
- after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
+ after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p;
rcu_read_unlock();
switch (after_sb_0p) {
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
case ASB_CALL_HELPER:
case ASB_VIOLENTLY:
- dev_err(DEV, "Configuration error.\n");
+ drbd_err(device, "Configuration error.\n");
break;
case ASB_DISCONNECT:
break;
@@ -2578,11 +2612,11 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
break;
}
/* Else fall through to one of the other strategies... */
- dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
+ drbd_warn(device, "Discard younger/older primary did not find a decision\n"
"Using discard-least-changes instead\n");
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
- rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
+ rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
? -1 : 1;
break;
} else {
@@ -2598,7 +2632,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
rv = 1;
else /* ( ch_self == ch_peer ) */
/* Well, then use something else. */
- rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
+ rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
? -1 : 1;
break;
case ASB_DISCARD_LOCAL:
@@ -2611,13 +2645,17 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
return rv;
}
-static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
+/**
+ * drbd_asb_recover_1p - Recover after split-brain with one remaining primary
+ */
+static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
{
+ struct drbd_device *device = peer_device->device;
int hg, rv = -100;
enum drbd_after_sb_p after_sb_1p;
rcu_read_lock();
- after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
+ after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p;
rcu_read_unlock();
switch (after_sb_1p) {
case ASB_DISCARD_YOUNGER_PRI:
@@ -2626,35 +2664,35 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
case ASB_DISCARD_LOCAL:
case ASB_DISCARD_REMOTE:
case ASB_DISCARD_ZERO_CHG:
- dev_err(DEV, "Configuration error.\n");
+ drbd_err(device, "Configuration error.\n");
break;
case ASB_DISCONNECT:
break;
case ASB_CONSENSUS:
- hg = drbd_asb_recover_0p(mdev);
- if (hg == -1 && mdev->state.role == R_SECONDARY)
+ hg = drbd_asb_recover_0p(peer_device);
+ if (hg == -1 && device->state.role == R_SECONDARY)
rv = hg;
- if (hg == 1 && mdev->state.role == R_PRIMARY)
+ if (hg == 1 && device->state.role == R_PRIMARY)
rv = hg;
break;
case ASB_VIOLENTLY:
- rv = drbd_asb_recover_0p(mdev);
+ rv = drbd_asb_recover_0p(peer_device);
break;
case ASB_DISCARD_SECONDARY:
- return mdev->state.role == R_PRIMARY ? 1 : -1;
+ return device->state.role == R_PRIMARY ? 1 : -1;
case ASB_CALL_HELPER:
- hg = drbd_asb_recover_0p(mdev);
- if (hg == -1 && mdev->state.role == R_PRIMARY) {
+ hg = drbd_asb_recover_0p(peer_device);
+ if (hg == -1 && device->state.role == R_PRIMARY) {
enum drbd_state_rv rv2;
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
- rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
+ rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
if (rv2 != SS_SUCCESS) {
- drbd_khelper(mdev, "pri-lost-after-sb");
+ drbd_khelper(device, "pri-lost-after-sb");
} else {
- dev_warn(DEV, "Successfully gave up primary role.\n");
+ drbd_warn(device, "Successfully gave up primary role.\n");
rv = hg;
}
} else
@@ -2664,13 +2702,17 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
return rv;
}
-static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
+/**
+ * drbd_asb_recover_2p - Recover after split-brain with two remaining primaries
+ */
+static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
{
+ struct drbd_device *device = peer_device->device;
int hg, rv = -100;
enum drbd_after_sb_p after_sb_2p;
rcu_read_lock();
- after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
+ after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p;
rcu_read_unlock();
switch (after_sb_2p) {
case ASB_DISCARD_YOUNGER_PRI:
@@ -2681,26 +2723,26 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
case ASB_DISCARD_ZERO_CHG:
- dev_err(DEV, "Configuration error.\n");
+ drbd_err(device, "Configuration error.\n");
break;
case ASB_VIOLENTLY:
- rv = drbd_asb_recover_0p(mdev);
+ rv = drbd_asb_recover_0p(peer_device);
break;
case ASB_DISCONNECT:
break;
case ASB_CALL_HELPER:
- hg = drbd_asb_recover_0p(mdev);
+ hg = drbd_asb_recover_0p(peer_device);
if (hg == -1) {
enum drbd_state_rv rv2;
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
- rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
+ rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
if (rv2 != SS_SUCCESS) {
- drbd_khelper(mdev, "pri-lost-after-sb");
+ drbd_khelper(device, "pri-lost-after-sb");
} else {
- dev_warn(DEV, "Successfully gave up primary role.\n");
+ drbd_warn(device, "Successfully gave up primary role.\n");
rv = hg;
}
} else
@@ -2710,14 +2752,14 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
return rv;
}
-static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
+static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
u64 bits, u64 flags)
{
if (!uuid) {
- dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
+ drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
return;
}
- dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
+ drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
text,
(unsigned long long)uuid[UI_CURRENT],
(unsigned long long)uuid[UI_BITMAP],
@@ -2739,13 +2781,13 @@ static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
-1091 requires proto 91
-1096 requires proto 96
*/
-static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
+static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_hold(local)
{
u64 self, peer;
int i, j;
- self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
- peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
+ self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
+ peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
*rule_nr = 10;
if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
@@ -2764,46 +2806,46 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (self == peer) {
int rct, dc; /* roles at crash time */
- if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
+ if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
- if (mdev->tconn->agreed_pro_version < 91)
+ if (first_peer_device(device)->connection->agreed_pro_version < 91)
return -1091;
- if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
- (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
- dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
- drbd_uuid_move_history(mdev);
- mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
- mdev->ldev->md.uuid[UI_BITMAP] = 0;
+ if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
+ (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
+ drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
+ drbd_uuid_move_history(device);
+ device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
+ device->ldev->md.uuid[UI_BITMAP] = 0;
- drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
- mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
+ drbd_uuid_dump(device, "self", device->ldev->md.uuid,
+ device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
*rule_nr = 34;
} else {
- dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
+ drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
*rule_nr = 36;
}
return 1;
}
- if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
+ if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
- if (mdev->tconn->agreed_pro_version < 91)
+ if (first_peer_device(device)->connection->agreed_pro_version < 91)
return -1091;
- if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
- (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
- dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
+ if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
+ (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
+ drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
- mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
- mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
- mdev->p_uuid[UI_BITMAP] = 0UL;
+ device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
+ device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
+ device->p_uuid[UI_BITMAP] = 0UL;
- drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
+ drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
*rule_nr = 35;
} else {
- dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
+ drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
*rule_nr = 37;
}
@@ -2811,8 +2853,8 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
}
/* Common power [off|failure] */
- rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
- (mdev->p_uuid[UI_FLAGS] & 2);
+ rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
+ (device->p_uuid[UI_FLAGS] & 2);
/* lowest bit is set when we were primary,
* next bit (weight 2) is set when peer was primary */
*rule_nr = 40;
@@ -2822,72 +2864,72 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */
- dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
+ dc = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
return dc ? -1 : 1;
}
}
*rule_nr = 50;
- peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
+ peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
if (self == peer)
return -1;
*rule_nr = 51;
- peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
+ peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->tconn->agreed_pro_version < 96 ?
- (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
- (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
- peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
+ if (first_peer_device(device)->connection->agreed_pro_version < 96 ?
+ (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
+ (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
+ peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of the peer's UUIDs. */
- if (mdev->tconn->agreed_pro_version < 91)
+ if (first_peer_device(device)->connection->agreed_pro_version < 91)
return -1091;
- mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
- mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
+ device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
+ device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
- dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
- drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
+ drbd_info(device, "Lost last syncUUID packet, corrected:\n");
+ drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
return -1;
}
}
*rule_nr = 60;
- self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
+ self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
- peer = mdev->p_uuid[i] & ~((u64)1);
+ peer = device->p_uuid[i] & ~((u64)1);
if (self == peer)
return -2;
}
*rule_nr = 70;
- self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
- peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
+ self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
+ peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
if (self == peer)
return 1;
*rule_nr = 71;
- self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
+ self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->tconn->agreed_pro_version < 96 ?
- (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
- (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
- self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
+ if (first_peer_device(device)->connection->agreed_pro_version < 96 ?
+ (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
+ (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
+ self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of our UUIDs. */
- if (mdev->tconn->agreed_pro_version < 91)
+ if (first_peer_device(device)->connection->agreed_pro_version < 91)
return -1091;
- __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
- __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
+ __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
+ __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
- dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
- drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
- mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
+ drbd_info(device, "Last syncUUID did not get through, corrected:\n");
+ drbd_uuid_dump(device, "self", device->ldev->md.uuid,
+ device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
return 1;
}
@@ -2895,24 +2937,24 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
*rule_nr = 80;
- peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
+ peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
- self = mdev->ldev->md.uuid[i] & ~((u64)1);
+ self = device->ldev->md.uuid[i] & ~((u64)1);
if (self == peer)
return 2;
}
*rule_nr = 90;
- self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
- peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
+ self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
+ peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
if (self == peer && self != ((u64)0))
return 100;
*rule_nr = 100;
for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
- self = mdev->ldev->md.uuid[i] & ~((u64)1);
+ self = device->ldev->md.uuid[i] & ~((u64)1);
for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
- peer = mdev->p_uuid[j] & ~((u64)1);
+ peer = device->p_uuid[j] & ~((u64)1);
if (self == peer)
return -100;
}
@@ -2924,36 +2966,38 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
/* drbd_sync_handshake() returns the new conn state on success, or
CONN_MASK (-1) on failure.
*/
-static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
+static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
+ enum drbd_role peer_role,
enum drbd_disk_state peer_disk) __must_hold(local)
{
+ struct drbd_device *device = peer_device->device;
enum drbd_conns rv = C_MASK;
enum drbd_disk_state mydisk;
struct net_conf *nc;
int hg, rule_nr, rr_conflict, tentative;
- mydisk = mdev->state.disk;
+ mydisk = device->state.disk;
if (mydisk == D_NEGOTIATING)
- mydisk = mdev->new_state_tmp.disk;
+ mydisk = device->new_state_tmp.disk;
- dev_info(DEV, "drbd_sync_handshake:\n");
+ drbd_info(device, "drbd_sync_handshake:\n");
- spin_lock_irq(&mdev->ldev->md.uuid_lock);
- drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
- drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
- mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
+ spin_lock_irq(&device->ldev->md.uuid_lock);
+ drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
+ drbd_uuid_dump(device, "peer", device->p_uuid,
+ device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
- hg = drbd_uuid_compare(mdev, &rule_nr);
- spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+ hg = drbd_uuid_compare(device, &rule_nr);
+ spin_unlock_irq(&device->ldev->md.uuid_lock);
- dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
+ drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
if (hg == -1000) {
- dev_alert(DEV, "Unrelated data, aborting!\n");
+ drbd_alert(device, "Unrelated data, aborting!\n");
return C_MASK;
}
if (hg < -1000) {
- dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
+ drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
return C_MASK;
}
@@ -2963,38 +3007,38 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
hg = mydisk > D_INCONSISTENT ? 1 : -1;
if (f)
hg = hg*2;
- dev_info(DEV, "Becoming sync %s due to disk states.\n",
+ drbd_info(device, "Becoming sync %s due to disk states.\n",
hg > 0 ? "source" : "target");
}
if (abs(hg) == 100)
- drbd_khelper(mdev, "initial-split-brain");
+ drbd_khelper(device, "initial-split-brain");
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(peer_device->connection->net_conf);
if (hg == 100 || (hg == -100 && nc->always_asbp)) {
- int pcount = (mdev->state.role == R_PRIMARY)
+ int pcount = (device->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
int forced = (hg == -100);
switch (pcount) {
case 0:
- hg = drbd_asb_recover_0p(mdev);
+ hg = drbd_asb_recover_0p(peer_device);
break;
case 1:
- hg = drbd_asb_recover_1p(mdev);
+ hg = drbd_asb_recover_1p(peer_device);
break;
case 2:
- hg = drbd_asb_recover_2p(mdev);
+ hg = drbd_asb_recover_2p(peer_device);
break;
}
if (abs(hg) < 100) {
- dev_warn(DEV, "Split-Brain detected, %d primaries, "
+ drbd_warn(device, "Split-Brain detected, %d primaries, "
"automatically solved. Sync from %s node\n",
pcount, (hg < 0) ? "peer" : "this");
if (forced) {
- dev_warn(DEV, "Doing a full sync, since"
+ drbd_warn(device, "Doing a full sync, since"
" UUIDs where ambiguous.\n");
hg = hg*2;
}
@@ -3002,13 +3046,13 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
}
if (hg == -100) {
- if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
+ if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
hg = -1;
- if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
+ if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
hg = 1;
if (abs(hg) < 100)
- dev_warn(DEV, "Split-Brain detected, manually solved. "
+ drbd_warn(device, "Split-Brain detected, manually solved. "
"Sync from %s node\n",
(hg < 0) ? "peer" : "this");
}
@@ -3021,44 +3065,44 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
* after an attempted attach on a diskless node.
* We just refuse to attach -- well, we drop the "connection"
* to that disk, in a way... */
- dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
- drbd_khelper(mdev, "split-brain");
+ drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
+ drbd_khelper(device, "split-brain");
return C_MASK;
}
if (hg > 0 && mydisk <= D_INCONSISTENT) {
- dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
+ drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
return C_MASK;
}
if (hg < 0 && /* by intention we do not use mydisk here. */
- mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
+ device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
switch (rr_conflict) {
case ASB_CALL_HELPER:
- drbd_khelper(mdev, "pri-lost");
+ drbd_khelper(device, "pri-lost");
/* fall through */
case ASB_DISCONNECT:
- dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
+ drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
return C_MASK;
case ASB_VIOLENTLY:
- dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
+ drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
"assumption\n");
}
}
- if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
+ if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) {
if (hg == 0)
- dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
+ drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
else
- dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
+ drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
abs(hg) >= 2 ? "full" : "bit-map based");
return C_MASK;
}
if (abs(hg) >= 2) {
- dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
- if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
+ drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
+ if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
BM_LOCKED_SET_ALLOWED))
return C_MASK;
}
@@ -3069,9 +3113,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
rv = C_WF_BITMAP_T;
} else {
rv = C_CONNECTED;
- if (drbd_bm_total_weight(mdev)) {
- dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
- drbd_bm_total_weight(mdev));
+ if (drbd_bm_total_weight(device)) {
+ drbd_info(device, "No resync, but %lu bits in bitmap!\n",
+ drbd_bm_total_weight(device));
}
}
@@ -3092,7 +3136,7 @@ static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
return peer;
}
-static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
{
struct p_protocol *p = pi->data;
enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
@@ -3110,58 +3154,58 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
cf = be32_to_cpu(p->conn_flags);
p_discard_my_data = cf & CF_DISCARD_MY_DATA;
- if (tconn->agreed_pro_version >= 87) {
+ if (connection->agreed_pro_version >= 87) {
int err;
if (pi->size > sizeof(integrity_alg))
return -EIO;
- err = drbd_recv_all(tconn, integrity_alg, pi->size);
+ err = drbd_recv_all(connection, integrity_alg, pi->size);
if (err)
return err;
integrity_alg[SHARED_SECRET_MAX - 1] = 0;
}
if (pi->cmd != P_PROTOCOL_UPDATE) {
- clear_bit(CONN_DRY_RUN, &tconn->flags);
+ clear_bit(CONN_DRY_RUN, &connection->flags);
if (cf & CF_DRY_RUN)
- set_bit(CONN_DRY_RUN, &tconn->flags);
+ set_bit(CONN_DRY_RUN, &connection->flags);
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
if (p_proto != nc->wire_protocol) {
- conn_err(tconn, "incompatible %s settings\n", "protocol");
+ drbd_err(connection, "incompatible %s settings\n", "protocol");
goto disconnect_rcu_unlock;
}
if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
- conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
+ drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri");
goto disconnect_rcu_unlock;
}
if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
- conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
+ drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri");
goto disconnect_rcu_unlock;
}
if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
- conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
+ drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri");
goto disconnect_rcu_unlock;
}
if (p_discard_my_data && nc->discard_my_data) {
- conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
+ drbd_err(connection, "incompatible %s settings\n", "discard-my-data");
goto disconnect_rcu_unlock;
}
if (p_two_primaries != nc->two_primaries) {
- conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
+ drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries");
goto disconnect_rcu_unlock;
}
if (strcmp(integrity_alg, nc->integrity_alg)) {
- conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
+ drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg");
goto disconnect_rcu_unlock;
}
@@ -3182,7 +3226,7 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
if (!peer_integrity_tfm) {
- conn_err(tconn, "peer data-integrity-alg %s not supported\n",
+ drbd_err(connection, "peer data-integrity-alg %s not supported\n",
integrity_alg);
goto disconnect;
}
@@ -3191,20 +3235,20 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
int_dig_in = kmalloc(hash_size, GFP_KERNEL);
int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
if (!(int_dig_in && int_dig_vv)) {
- conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
+ drbd_err(connection, "Allocation of buffers for data integrity checking failed\n");
goto disconnect;
}
}
new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_net_conf) {
- conn_err(tconn, "Allocation of new net_conf failed\n");
+ drbd_err(connection, "Allocation of new net_conf failed\n");
goto disconnect;
}
- mutex_lock(&tconn->data.mutex);
- mutex_lock(&tconn->conf_update);
- old_net_conf = tconn->net_conf;
+ mutex_lock(&connection->data.mutex);
+ mutex_lock(&connection->resource->conf_update);
+ old_net_conf = connection->net_conf;
*new_net_conf = *old_net_conf;
new_net_conf->wire_protocol = p_proto;
@@ -3213,19 +3257,19 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
new_net_conf->two_primaries = p_two_primaries;
- rcu_assign_pointer(tconn->net_conf, new_net_conf);
- mutex_unlock(&tconn->conf_update);
- mutex_unlock(&tconn->data.mutex);
+ rcu_assign_pointer(connection->net_conf, new_net_conf);
+ mutex_unlock(&connection->resource->conf_update);
+ mutex_unlock(&connection->data.mutex);
- crypto_free_hash(tconn->peer_integrity_tfm);
- kfree(tconn->int_dig_in);
- kfree(tconn->int_dig_vv);
- tconn->peer_integrity_tfm = peer_integrity_tfm;
- tconn->int_dig_in = int_dig_in;
- tconn->int_dig_vv = int_dig_vv;
+ crypto_free_hash(connection->peer_integrity_tfm);
+ kfree(connection->int_dig_in);
+ kfree(connection->int_dig_vv);
+ connection->peer_integrity_tfm = peer_integrity_tfm;
+ connection->int_dig_in = int_dig_in;
+ connection->int_dig_vv = int_dig_vv;
if (strcmp(old_net_conf->integrity_alg, integrity_alg))
- conn_info(tconn, "peer data-integrity-alg: %s\n",
+ drbd_info(connection, "peer data-integrity-alg: %s\n",
integrity_alg[0] ? integrity_alg : "(none)");
synchronize_rcu();
@@ -3238,7 +3282,7 @@ disconnect:
crypto_free_hash(peer_integrity_tfm);
kfree(int_dig_in);
kfree(int_dig_vv);
- conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
@@ -3247,7 +3291,8 @@ disconnect:
* return: NULL (alg name was "")
* ERR_PTR(error) if something goes wrong
* or the crypto hash ptr, if it worked out ok. */
-struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
+static
+struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
const char *alg, const char *name)
{
struct crypto_hash *tfm;
@@ -3257,21 +3302,21 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
- dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
+ drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
alg, name, PTR_ERR(tfm));
return tfm;
}
return tfm;
}
-static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
+static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
{
- void *buffer = tconn->data.rbuf;
+ void *buffer = connection->data.rbuf;
int size = pi->size;
while (size) {
int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
- s = drbd_recv(tconn, buffer, s);
+ s = drbd_recv(connection, buffer, s);
if (s <= 0) {
if (s < 0)
return s;
@@ -3295,30 +3340,32 @@ static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info
*
* (We can also end up here if drbd is misconfigured.)
*/
-static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
+static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
{
- conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
+ drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n",
cmdname(pi->cmd), pi->vnr);
- return ignore_remaining_packet(tconn, pi);
+ return ignore_remaining_packet(connection, pi);
}
-static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_rs_param_95 *p;
unsigned int header_size, data_size, exp_max_sz;
struct crypto_hash *verify_tfm = NULL;
struct crypto_hash *csums_tfm = NULL;
struct net_conf *old_net_conf, *new_net_conf = NULL;
struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
- const int apv = tconn->agreed_pro_version;
+ const int apv = connection->agreed_pro_version;
struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
int fifo_size = 0;
int err;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
- return config_unknown_volume(tconn, pi);
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
+ return config_unknown_volume(connection, pi);
+ device = peer_device->device;
exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
@@ -3327,7 +3374,7 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
: /* apv >= 95 */ sizeof(struct p_rs_param_95);
if (pi->size > exp_max_sz) {
- dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
+ drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
pi->size, exp_max_sz);
return -EIO;
}
@@ -3338,33 +3385,33 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
} else if (apv <= 94) {
header_size = sizeof(struct p_rs_param_89);
data_size = pi->size - header_size;
- D_ASSERT(data_size == 0);
+ D_ASSERT(device, data_size == 0);
} else {
header_size = sizeof(struct p_rs_param_95);
data_size = pi->size - header_size;
- D_ASSERT(data_size == 0);
+ D_ASSERT(device, data_size == 0);
}
/* initialize verify_alg and csums_alg */
p = pi->data;
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
- err = drbd_recv_all(mdev->tconn, p, header_size);
+ err = drbd_recv_all(peer_device->connection, p, header_size);
if (err)
return err;
- mutex_lock(&mdev->tconn->conf_update);
- old_net_conf = mdev->tconn->net_conf;
- if (get_ldev(mdev)) {
+ mutex_lock(&connection->resource->conf_update);
+ old_net_conf = peer_device->connection->net_conf;
+ if (get_ldev(device)) {
new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
- put_ldev(mdev);
- mutex_unlock(&mdev->tconn->conf_update);
- dev_err(DEV, "Allocation of new disk_conf failed\n");
+ put_ldev(device);
+ mutex_unlock(&connection->resource->conf_update);
+ drbd_err(device, "Allocation of new disk_conf failed\n");
return -ENOMEM;
}
- old_disk_conf = mdev->ldev->disk_conf;
+ old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf;
new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
@@ -3373,37 +3420,37 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
if (apv >= 88) {
if (apv == 88) {
if (data_size > SHARED_SECRET_MAX || data_size == 0) {
- dev_err(DEV, "verify-alg of wrong size, "
+ drbd_err(device, "verify-alg of wrong size, "
"peer wants %u, accepting only up to %u byte\n",
data_size, SHARED_SECRET_MAX);
err = -EIO;
goto reconnect;
}
- err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
+ err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size);
if (err)
goto reconnect;
/* we expect NUL terminated string */
/* but just in case someone tries to be evil */
- D_ASSERT(p->verify_alg[data_size-1] == 0);
+ D_ASSERT(device, p->verify_alg[data_size-1] == 0);
p->verify_alg[data_size-1] = 0;
} else /* apv >= 89 */ {
/* we still expect NUL terminated strings */
/* but just in case someone tries to be evil */
- D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
- D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
+ D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
+ D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
p->verify_alg[SHARED_SECRET_MAX-1] = 0;
p->csums_alg[SHARED_SECRET_MAX-1] = 0;
}
if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
- if (mdev->state.conn == C_WF_REPORT_PARAMS) {
- dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
+ if (device->state.conn == C_WF_REPORT_PARAMS) {
+ drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
old_net_conf->verify_alg, p->verify_alg);
goto disconnect;
}
- verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
+ verify_tfm = drbd_crypto_alloc_digest_safe(device,
p->verify_alg, "verify-alg");
if (IS_ERR(verify_tfm)) {
verify_tfm = NULL;
@@ -3412,12 +3459,12 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
}
if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
- if (mdev->state.conn == C_WF_REPORT_PARAMS) {
- dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
+ if (device->state.conn == C_WF_REPORT_PARAMS) {
+ drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
old_net_conf->csums_alg, p->csums_alg);
goto disconnect;
}
- csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
+ csums_tfm = drbd_crypto_alloc_digest_safe(device,
p->csums_alg, "csums-alg");
if (IS_ERR(csums_tfm)) {
csums_tfm = NULL;
@@ -3432,11 +3479,11 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
- if (fifo_size != mdev->rs_plan_s->size) {
+ if (fifo_size != device->rs_plan_s->size) {
new_plan = fifo_alloc(fifo_size);
if (!new_plan) {
- dev_err(DEV, "kmalloc of fifo_buffer failed");
- put_ldev(mdev);
+ drbd_err(device, "kmalloc of fifo_buffer failed");
+ put_ldev(device);
goto disconnect;
}
}
@@ -3445,7 +3492,7 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
if (verify_tfm || csums_tfm) {
new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_net_conf) {
- dev_err(DEV, "Allocation of new net_conf failed\n");
+ drbd_err(device, "Allocation of new net_conf failed\n");
goto disconnect;
}
@@ -3454,32 +3501,32 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
if (verify_tfm) {
strcpy(new_net_conf->verify_alg, p->verify_alg);
new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
- crypto_free_hash(mdev->tconn->verify_tfm);
- mdev->tconn->verify_tfm = verify_tfm;
- dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
+ crypto_free_hash(peer_device->connection->verify_tfm);
+ peer_device->connection->verify_tfm = verify_tfm;
+ drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
}
if (csums_tfm) {
strcpy(new_net_conf->csums_alg, p->csums_alg);
new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
- crypto_free_hash(mdev->tconn->csums_tfm);
- mdev->tconn->csums_tfm = csums_tfm;
- dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
+ crypto_free_hash(peer_device->connection->csums_tfm);
+ peer_device->connection->csums_tfm = csums_tfm;
+ drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
}
- rcu_assign_pointer(tconn->net_conf, new_net_conf);
+ rcu_assign_pointer(connection->net_conf, new_net_conf);
}
}
if (new_disk_conf) {
- rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
- put_ldev(mdev);
+ rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
+ put_ldev(device);
}
if (new_plan) {
- old_plan = mdev->rs_plan_s;
- rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+ old_plan = device->rs_plan_s;
+ rcu_assign_pointer(device->rs_plan_s, new_plan);
}
- mutex_unlock(&mdev->tconn->conf_update);
+ mutex_unlock(&connection->resource->conf_update);
synchronize_rcu();
if (new_net_conf)
kfree(old_net_conf);
@@ -3490,30 +3537,30 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
reconnect:
if (new_disk_conf) {
- put_ldev(mdev);
+ put_ldev(device);
kfree(new_disk_conf);
}
- mutex_unlock(&mdev->tconn->conf_update);
+ mutex_unlock(&connection->resource->conf_update);
return -EIO;
disconnect:
kfree(new_plan);
if (new_disk_conf) {
- put_ldev(mdev);
+ put_ldev(device);
kfree(new_disk_conf);
}
- mutex_unlock(&mdev->tconn->conf_update);
+ mutex_unlock(&connection->resource->conf_update);
/* just for completeness: actually not needed,
* as this is not reached if csums_tfm was ok. */
crypto_free_hash(csums_tfm);
/* but free the verify_tfm again, if csums_tfm did not work out */
crypto_free_hash(verify_tfm);
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
/* warn if the arguments differ by more than 12.5% */
-static void warn_if_differ_considerably(struct drbd_conf *mdev,
+static void warn_if_differ_considerably(struct drbd_device *device,
const char *s, sector_t a, sector_t b)
{
sector_t d;
@@ -3521,54 +3568,56 @@ static void warn_if_differ_considerably(struct drbd_conf *mdev,
return;
d = (a > b) ? (a - b) : (b - a);
if (d > (a>>3) || d > (b>>3))
- dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
+ drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
(unsigned long long)a, (unsigned long long)b);
}
-static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_sizes *p = pi->data;
enum determine_dev_size dd = DS_UNCHANGED;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
enum dds_flags ddsf;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
- return config_unknown_volume(tconn, pi);
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
+ return config_unknown_volume(connection, pi);
+ device = peer_device->device;
p_size = be64_to_cpu(p->d_size);
p_usize = be64_to_cpu(p->u_size);
/* just store the peer's disk size for now.
* we still need to figure out whether we accept that. */
- mdev->p_size = p_size;
+ device->p_size = p_size;
- if (get_ldev(mdev)) {
+ if (get_ldev(device)) {
rcu_read_lock();
- my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
- warn_if_differ_considerably(mdev, "lower level device sizes",
- p_size, drbd_get_max_capacity(mdev->ldev));
- warn_if_differ_considerably(mdev, "user requested size",
+ warn_if_differ_considerably(device, "lower level device sizes",
+ p_size, drbd_get_max_capacity(device->ldev));
+ warn_if_differ_considerably(device, "user requested size",
p_usize, my_usize);
/* if this is the first connect, or an otherwise expected
* param exchange, choose the minimum */
- if (mdev->state.conn == C_WF_REPORT_PARAMS)
+ if (device->state.conn == C_WF_REPORT_PARAMS)
p_usize = min_not_zero(my_usize, p_usize);
/* Never shrink a device with usable data during connect.
But allow online shrinking if we are connected. */
- if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
- drbd_get_capacity(mdev->this_bdev) &&
- mdev->state.disk >= D_OUTDATED &&
- mdev->state.conn < C_CONNECTED) {
- dev_err(DEV, "The peer's disk size is too small!\n");
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
- put_ldev(mdev);
+ if (drbd_new_dev_size(device, device->ldev, p_usize, 0) <
+ drbd_get_capacity(device->this_bdev) &&
+ device->state.disk >= D_OUTDATED &&
+ device->state.conn < C_CONNECTED) {
+ drbd_err(device, "The peer's disk size is too small!\n");
+ conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
+ put_ldev(device);
return -EIO;
}
@@ -3577,145 +3626,147 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
- dev_err(DEV, "Allocation of new disk_conf failed\n");
- put_ldev(mdev);
+ drbd_err(device, "Allocation of new disk_conf failed\n");
+ put_ldev(device);
return -ENOMEM;
}
- mutex_lock(&mdev->tconn->conf_update);
- old_disk_conf = mdev->ldev->disk_conf;
+ mutex_lock(&connection->resource->conf_update);
+ old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf;
new_disk_conf->disk_size = p_usize;
- rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
- mutex_unlock(&mdev->tconn->conf_update);
+ rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
+ mutex_unlock(&connection->resource->conf_update);
synchronize_rcu();
kfree(old_disk_conf);
- dev_info(DEV, "Peer sets u_size to %lu sectors\n",
+ drbd_info(device, "Peer sets u_size to %lu sectors\n",
(unsigned long)my_usize);
}
- put_ldev(mdev);
+ put_ldev(device);
}
ddsf = be16_to_cpu(p->dds_flags);
- if (get_ldev(mdev)) {
- dd = drbd_determine_dev_size(mdev, ddsf, NULL);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ dd = drbd_determine_dev_size(device, ddsf, NULL);
+ put_ldev(device);
if (dd == DS_ERROR)
return -EIO;
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
} else {
/* I am diskless, need to accept the peer's size. */
- drbd_set_my_capacity(mdev, p_size);
+ drbd_set_my_capacity(device, p_size);
}
- mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
- drbd_reconsider_max_bio_size(mdev);
+ device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
+ drbd_reconsider_max_bio_size(device);
- if (get_ldev(mdev)) {
- if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
- mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
+ if (get_ldev(device)) {
+ if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
+ device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
ldsc = 1;
}
- put_ldev(mdev);
+ put_ldev(device);
}
- if (mdev->state.conn > C_WF_REPORT_PARAMS) {
+ if (device->state.conn > C_WF_REPORT_PARAMS) {
if (be64_to_cpu(p->c_size) !=
- drbd_get_capacity(mdev->this_bdev) || ldsc) {
+ drbd_get_capacity(device->this_bdev) || ldsc) {
/* we have different sizes, probably peer
* needs to know my new size... */
- drbd_send_sizes(mdev, 0, ddsf);
+ drbd_send_sizes(peer_device, 0, ddsf);
}
- if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
- (dd == DS_GREW && mdev->state.conn == C_CONNECTED)) {
- if (mdev->state.pdsk >= D_INCONSISTENT &&
- mdev->state.disk >= D_INCONSISTENT) {
+ if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
+ (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
+ if (device->state.pdsk >= D_INCONSISTENT &&
+ device->state.disk >= D_INCONSISTENT) {
if (ddsf & DDSF_NO_RESYNC)
- dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
+ drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
else
- resync_after_online_grow(mdev);
+ resync_after_online_grow(device);
} else
- set_bit(RESYNC_AFTER_NEG, &mdev->flags);
+ set_bit(RESYNC_AFTER_NEG, &device->flags);
}
}
return 0;
}
-static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_uuids *p = pi->data;
u64 *p_uuid;
int i, updated_uuids = 0;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
- return config_unknown_volume(tconn, pi);
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
+ return config_unknown_volume(connection, pi);
+ device = peer_device->device;
p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
if (!p_uuid) {
- dev_err(DEV, "kmalloc of p_uuid failed\n");
+ drbd_err(device, "kmalloc of p_uuid failed\n");
return false;
}
for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
p_uuid[i] = be64_to_cpu(p->uuid[i]);
- kfree(mdev->p_uuid);
- mdev->p_uuid = p_uuid;
+ kfree(device->p_uuid);
+ device->p_uuid = p_uuid;
- if (mdev->state.conn < C_CONNECTED &&
- mdev->state.disk < D_INCONSISTENT &&
- mdev->state.role == R_PRIMARY &&
- (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
- dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
- (unsigned long long)mdev->ed_uuid);
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ if (device->state.conn < C_CONNECTED &&
+ device->state.disk < D_INCONSISTENT &&
+ device->state.role == R_PRIMARY &&
+ (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
+ drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
+ (unsigned long long)device->ed_uuid);
+ conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
- if (get_ldev(mdev)) {
+ if (get_ldev(device)) {
int skip_initial_sync =
- mdev->state.conn == C_CONNECTED &&
- mdev->tconn->agreed_pro_version >= 90 &&
- mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
+ device->state.conn == C_CONNECTED &&
+ peer_device->connection->agreed_pro_version >= 90 &&
+ device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
(p_uuid[UI_FLAGS] & 8);
if (skip_initial_sync) {
- dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
- drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
+ drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
+ drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
"clear_n_write from receive_uuids",
BM_LOCKED_TEST_ALLOWED);
- _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
- _drbd_uuid_set(mdev, UI_BITMAP, 0);
- _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
+ _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
+ _drbd_uuid_set(device, UI_BITMAP, 0);
+ _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL);
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
updated_uuids = 1;
}
- put_ldev(mdev);
- } else if (mdev->state.disk < D_INCONSISTENT &&
- mdev->state.role == R_PRIMARY) {
+ put_ldev(device);
+ } else if (device->state.disk < D_INCONSISTENT &&
+ device->state.role == R_PRIMARY) {
/* I am a diskless primary, the peer just created a new current UUID
for me. */
- updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
+ updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
}
/* Before we test for the disk state, we should wait until an eventually
ongoing cluster wide state change is finished. That is important if
we are primary and are detaching from our disk. We need to see the
new disk state... */
- mutex_lock(mdev->state_mutex);
- mutex_unlock(mdev->state_mutex);
- if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
- updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
+ mutex_lock(device->state_mutex);
+ mutex_unlock(device->state_mutex);
+ if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
+ updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
if (updated_uuids)
- drbd_print_uuids(mdev, "receiver updated UUIDs to");
+ drbd_print_uuids(device, "receiver updated UUIDs to");
return 0;
}
@@ -3751,38 +3802,40 @@ static union drbd_state convert_state(union drbd_state ps)
return ms;
}
-static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_req_state *p = pi->data;
union drbd_state mask, val;
enum drbd_state_rv rv;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
- if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
- mutex_is_locked(mdev->state_mutex)) {
- drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
+ if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) &&
+ mutex_is_locked(device->state_mutex)) {
+ drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG);
return 0;
}
mask = convert_state(mask);
val = convert_state(val);
- rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
- drbd_send_sr_reply(mdev, rv);
+ rv = drbd_change_state(device, CS_VERBOSE, mask, val);
+ drbd_send_sr_reply(peer_device, rv);
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
return 0;
}
-static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
{
struct p_req_state *p = pi->data;
union drbd_state mask, val;
@@ -3791,46 +3844,48 @@ static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
- if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
- mutex_is_locked(&tconn->cstate_mutex)) {
- conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
+ if (test_bit(RESOLVE_CONFLICTS, &connection->flags) &&
+ mutex_is_locked(&connection->cstate_mutex)) {
+ conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG);
return 0;
}
mask = convert_state(mask);
val = convert_state(val);
- rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
- conn_send_sr_reply(tconn, rv);
+ rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
+ conn_send_sr_reply(connection, rv);
return 0;
}
-static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_state *p = pi->data;
union drbd_state os, ns, peer_state;
enum drbd_disk_state real_peer_disk;
enum chg_state_flags cs_flags;
int rv;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
- return config_unknown_volume(tconn, pi);
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
+ return config_unknown_volume(connection, pi);
+ device = peer_device->device;
peer_state.i = be32_to_cpu(p->state);
real_peer_disk = peer_state.disk;
if (peer_state.disk == D_NEGOTIATING) {
- real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
- dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
+ real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
+ drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
}
- spin_lock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
retry:
- os = ns = drbd_read_state(mdev);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ os = ns = drbd_read_state(device);
+ spin_unlock_irq(&device->resource->req_lock);
/* If some other part of the code (asender thread, timeout)
* already decided to close the connection again,
@@ -3862,8 +3917,8 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
* Maybe we should finish it up, too? */
else if (os.conn >= C_SYNC_SOURCE &&
peer_state.conn == C_CONNECTED) {
- if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
- drbd_resync_finished(mdev);
+ if (drbd_bm_total_weight(device) <= device->rs_failed)
+ drbd_resync_finished(device);
return 0;
}
}
@@ -3871,8 +3926,8 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
/* explicit verify finished notification, stop sector reached. */
if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
- ov_out_of_sync_print(mdev);
- drbd_resync_finished(mdev);
+ ov_out_of_sync_print(device);
+ drbd_resync_finished(device);
return 0;
}
@@ -3891,8 +3946,8 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
if (peer_state.conn == C_AHEAD)
ns.conn = C_BEHIND;
- if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
- get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
+ get_ldev_if_state(device, D_NEGOTIATING)) {
int cr; /* consider resync */
/* if we established a new connection */
@@ -3904,7 +3959,7 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
os.disk == D_NEGOTIATING));
/* if we have both been inconsistent, and the peer has been
* forced to be UpToDate with --overwrite-data */
- cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
+ cr |= test_bit(CONSIDER_RESYNC, &device->flags);
/* if we had been plain connected, and the admin requested to
* start a sync by "invalidate" or "invalidate-remote" */
cr |= (os.conn == C_CONNECTED &&
@@ -3912,55 +3967,55 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
peer_state.conn <= C_WF_BITMAP_T));
if (cr)
- ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
+ ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk);
- put_ldev(mdev);
+ put_ldev(device);
if (ns.conn == C_MASK) {
ns.conn = C_CONNECTED;
- if (mdev->state.disk == D_NEGOTIATING) {
- drbd_force_state(mdev, NS(disk, D_FAILED));
+ if (device->state.disk == D_NEGOTIATING) {
+ drbd_force_state(device, NS(disk, D_FAILED));
} else if (peer_state.disk == D_NEGOTIATING) {
- dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
+ drbd_err(device, "Disk attach process on the peer node was aborted.\n");
peer_state.disk = D_DISKLESS;
real_peer_disk = D_DISKLESS;
} else {
- if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
+ if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags))
return -EIO;
- D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
+ conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
}
}
- spin_lock_irq(&mdev->tconn->req_lock);
- if (os.i != drbd_read_state(mdev).i)
+ spin_lock_irq(&device->resource->req_lock);
+ if (os.i != drbd_read_state(device).i)
goto retry;
- clear_bit(CONSIDER_RESYNC, &mdev->flags);
+ clear_bit(CONSIDER_RESYNC, &device->flags);
ns.peer = peer_state.role;
ns.pdsk = real_peer_disk;
ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
- ns.disk = mdev->new_state_tmp.disk;
+ ns.disk = device->new_state_tmp.disk;
cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
- if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
- test_bit(NEW_CUR_UUID, &mdev->flags)) {
+ if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
+ test_bit(NEW_CUR_UUID, &device->flags)) {
/* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
for temporal network outages! */
- spin_unlock_irq(&mdev->tconn->req_lock);
- dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
- tl_clear(mdev->tconn);
- drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
- conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
+ spin_unlock_irq(&device->resource->req_lock);
+ drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
+ tl_clear(peer_device->connection);
+ drbd_uuid_new_current(device);
+ clear_bit(NEW_CUR_UUID, &device->flags);
+ conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
return -EIO;
}
- rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
- ns = drbd_read_state(mdev);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ rv = _drbd_set_state(device, ns, cs_flags, NULL);
+ ns = drbd_read_state(device);
+ spin_unlock_irq(&device->resource->req_lock);
if (rv < SS_SUCCESS) {
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
@@ -3970,47 +4025,49 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
/* we want resync, peer has not yet decided to sync... */
/* Nowadays only used when forcing a node into primary role and
setting its disk to UpToDate with that */
- drbd_send_uuids(mdev);
- drbd_send_current_state(mdev);
+ drbd_send_uuids(peer_device);
+ drbd_send_current_state(peer_device);
}
}
- clear_bit(DISCARD_MY_DATA, &mdev->flags);
+ clear_bit(DISCARD_MY_DATA, &device->flags);
- drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
+ drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
return 0;
}
-static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_rs_uuid *p = pi->data;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- wait_event(mdev->misc_wait,
- mdev->state.conn == C_WF_SYNC_UUID ||
- mdev->state.conn == C_BEHIND ||
- mdev->state.conn < C_CONNECTED ||
- mdev->state.disk < D_NEGOTIATING);
+ wait_event(device->misc_wait,
+ device->state.conn == C_WF_SYNC_UUID ||
+ device->state.conn == C_BEHIND ||
+ device->state.conn < C_CONNECTED ||
+ device->state.disk < D_NEGOTIATING);
- /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
+ /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */
/* Here the _drbd_uuid_ functions are right, current should
_not_ be rotated into the history */
- if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
- _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
- _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
+ if (get_ldev_if_state(device, D_NEGOTIATING)) {
+ _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
+ _drbd_uuid_set(device, UI_BITMAP, 0UL);
- drbd_print_uuids(mdev, "updated sync uuid");
- drbd_start_resync(mdev, C_SYNC_TARGET);
+ drbd_print_uuids(device, "updated sync uuid");
+ drbd_start_resync(device, C_SYNC_TARGET);
- put_ldev(mdev);
+ put_ldev(device);
} else
- dev_err(DEV, "Ignoring SyncUUID packet!\n");
+ drbd_err(device, "Ignoring SyncUUID packet!\n");
return 0;
}
@@ -4022,27 +4079,27 @@ static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
* code upon failure.
*/
static int
-receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
+receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size,
unsigned long *p, struct bm_xfer_ctx *c)
{
unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
- drbd_header_size(mdev->tconn);
+ drbd_header_size(peer_device->connection);
unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
c->bm_words - c->word_offset);
unsigned int want = num_words * sizeof(*p);
int err;
if (want != size) {
- dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
+ drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size);
return -EIO;
}
if (want == 0)
return 0;
- err = drbd_recv_all(mdev->tconn, p, want);
+ err = drbd_recv_all(peer_device->connection, p, want);
if (err)
return err;
- drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
+ drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
@@ -4074,7 +4131,7 @@ static int dcbp_get_pad_bits(struct p_compressed_bm *p)
* code upon failure.
*/
static int
-recv_bm_rle_bits(struct drbd_conf *mdev,
+recv_bm_rle_bits(struct drbd_peer_device *peer_device,
struct p_compressed_bm *p,
struct bm_xfer_ctx *c,
unsigned int len)
@@ -4103,14 +4160,14 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
if (toggle) {
e = s + rl -1;
if (e >= c->bm_bits) {
- dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
+ drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
return -EIO;
}
- _drbd_bm_set_bits(mdev, s, e);
+ _drbd_bm_set_bits(peer_device->device, s, e);
}
if (have < bits) {
- dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
+ drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
have, bits, look_ahead,
(unsigned int)(bs.cur.b - p->code),
(unsigned int)bs.buf_len);
@@ -4143,28 +4200,28 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
* code upon failure.
*/
static int
-decode_bitmap_c(struct drbd_conf *mdev,
+decode_bitmap_c(struct drbd_peer_device *peer_device,
struct p_compressed_bm *p,
struct bm_xfer_ctx *c,
unsigned int len)
{
if (dcbp_get_code(p) == RLE_VLI_Bits)
- return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
+ return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p));
/* other variants had been implemented for evaluation,
* but have been dropped as this one turned out to be "best"
* during all our tests. */
- dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
- conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
+ drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
+ conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
return -EIO;
}
-void INFO_bm_xfer_stats(struct drbd_conf *mdev,
+void INFO_bm_xfer_stats(struct drbd_device *device,
const char *direction, struct bm_xfer_ctx *c)
{
/* what would it take to transfer it "plaintext" */
- unsigned int header_size = drbd_header_size(mdev->tconn);
+ unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
unsigned int plain =
header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
@@ -4188,7 +4245,7 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
r = 1000;
r = 1000 - r;
- dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
+ drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
"total %u; compression: %u.%u%%\n",
direction,
c->bytes[1], c->packets[1],
@@ -4204,129 +4261,133 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
in order to be agnostic to the 32 vs 64 bits issue.
returns 0 on failure, 1 if we successfully received it. */
-static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct bm_xfer_ctx c;
int err;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
+ drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
/* you are supposed to send additional out-of-sync information
* if you actually set bits during this phase */
c = (struct bm_xfer_ctx) {
- .bm_bits = drbd_bm_bits(mdev),
- .bm_words = drbd_bm_words(mdev),
+ .bm_bits = drbd_bm_bits(device),
+ .bm_words = drbd_bm_words(device),
};
for(;;) {
if (pi->cmd == P_BITMAP)
- err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
+ err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c);
else if (pi->cmd == P_COMPRESSED_BITMAP) {
/* MAYBE: sanity check that we speak proto >= 90,
* and the feature is enabled! */
struct p_compressed_bm *p = pi->data;
- if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
- dev_err(DEV, "ReportCBitmap packet too large\n");
+ if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
+ drbd_err(device, "ReportCBitmap packet too large\n");
err = -EIO;
goto out;
}
if (pi->size <= sizeof(*p)) {
- dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
+ drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
err = -EIO;
goto out;
}
- err = drbd_recv_all(mdev->tconn, p, pi->size);
+ err = drbd_recv_all(peer_device->connection, p, pi->size);
if (err)
goto out;
- err = decode_bitmap_c(mdev, p, &c, pi->size);
+ err = decode_bitmap_c(peer_device, p, &c, pi->size);
} else {
- dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
+ drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
err = -EIO;
goto out;
}
c.packets[pi->cmd == P_BITMAP]++;
- c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
+ c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size;
if (err <= 0) {
if (err < 0)
goto out;
break;
}
- err = drbd_recv_header(mdev->tconn, pi);
+ err = drbd_recv_header(peer_device->connection, pi);
if (err)
goto out;
}
- INFO_bm_xfer_stats(mdev, "receive", &c);
+ INFO_bm_xfer_stats(device, "receive", &c);
- if (mdev->state.conn == C_WF_BITMAP_T) {
+ if (device->state.conn == C_WF_BITMAP_T) {
enum drbd_state_rv rv;
- err = drbd_send_bitmap(mdev);
+ err = drbd_send_bitmap(device);
if (err)
goto out;
/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
- rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
- D_ASSERT(rv == SS_SUCCESS);
- } else if (mdev->state.conn != C_WF_BITMAP_S) {
+ rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
+ D_ASSERT(device, rv == SS_SUCCESS);
+ } else if (device->state.conn != C_WF_BITMAP_S) {
/* admin may have requested C_DISCONNECTING,
* other threads may have noticed network errors */
- dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
- drbd_conn_str(mdev->state.conn));
+ drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
+ drbd_conn_str(device->state.conn));
}
err = 0;
out:
- drbd_bm_unlock(mdev);
- if (!err && mdev->state.conn == C_WF_BITMAP_S)
- drbd_start_resync(mdev, C_SYNC_SOURCE);
+ drbd_bm_unlock(device);
+ if (!err && device->state.conn == C_WF_BITMAP_S)
+ drbd_start_resync(device, C_SYNC_SOURCE);
return err;
}
-static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
{
- conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
+ drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n",
pi->cmd, pi->size);
- return ignore_remaining_packet(tconn, pi);
+ return ignore_remaining_packet(connection, pi);
}
-static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
{
/* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */
- drbd_tcp_quickack(tconn->data.socket);
+ drbd_tcp_quickack(connection->data.socket);
return 0;
}
-static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_block_desc *p = pi->data;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- switch (mdev->state.conn) {
+ switch (device->state.conn) {
case C_WF_SYNC_UUID:
case C_WF_BITMAP_T:
case C_BEHIND:
break;
default:
- dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
- drbd_conn_str(mdev->state.conn));
+ drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
+ drbd_conn_str(device->state.conn));
}
- drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
+ drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
return 0;
}
@@ -4334,7 +4395,7 @@ static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
struct data_cmd {
int expect_payload;
size_t pkt_size;
- int (*fn)(struct drbd_tconn *, struct packet_info *);
+ int (*fn)(struct drbd_connection *, struct packet_info *);
};
static struct data_cmd drbd_cmd_handler[] = {
@@ -4364,43 +4425,43 @@ static struct data_cmd drbd_cmd_handler[] = {
[P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
};
-static void drbdd(struct drbd_tconn *tconn)
+static void drbdd(struct drbd_connection *connection)
{
struct packet_info pi;
size_t shs; /* sub header size */
int err;
- while (get_t_state(&tconn->receiver) == RUNNING) {
+ while (get_t_state(&connection->receiver) == RUNNING) {
struct data_cmd *cmd;
- drbd_thread_current_set_cpu(&tconn->receiver);
- if (drbd_recv_header(tconn, &pi))
+ drbd_thread_current_set_cpu(&connection->receiver);
+ if (drbd_recv_header(connection, &pi))
goto err_out;
cmd = &drbd_cmd_handler[pi.cmd];
if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
- conn_err(tconn, "Unexpected data packet %s (0x%04x)",
+ drbd_err(connection, "Unexpected data packet %s (0x%04x)",
cmdname(pi.cmd), pi.cmd);
goto err_out;
}
shs = cmd->pkt_size;
if (pi.size > shs && !cmd->expect_payload) {
- conn_err(tconn, "No payload expected %s l:%d\n",
+ drbd_err(connection, "No payload expected %s l:%d\n",
cmdname(pi.cmd), pi.size);
goto err_out;
}
if (shs) {
- err = drbd_recv_all_warn(tconn, pi.data, shs);
+ err = drbd_recv_all_warn(connection, pi.data, shs);
if (err)
goto err_out;
pi.size -= shs;
}
- err = cmd->fn(tconn, &pi);
+ err = cmd->fn(connection, &pi);
if (err) {
- conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
+ drbd_err(connection, "error receiving %s, e: %d l: %d!\n",
cmdname(pi.cmd), err, pi.size);
goto err_out;
}
@@ -4408,27 +4469,16 @@ static void drbdd(struct drbd_tconn *tconn)
return;
err_out:
- conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
-}
-
-void conn_flush_workqueue(struct drbd_tconn *tconn)
-{
- struct drbd_wq_barrier barr;
-
- barr.w.cb = w_prev_work_done;
- barr.w.tconn = tconn;
- init_completion(&barr.done);
- drbd_queue_work(&tconn->sender_work, &barr.w);
- wait_for_completion(&barr.done);
+ conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
-static void conn_disconnect(struct drbd_tconn *tconn)
+static void conn_disconnect(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
enum drbd_conns oc;
int vnr;
- if (tconn->cstate == C_STANDALONE)
+ if (connection->cstate == C_STANDALONE)
return;
/* We are about to start the cleanup after connection loss.
@@ -4436,54 +4486,56 @@ static void conn_disconnect(struct drbd_tconn *tconn)
* Usually we should be in some network failure state already,
* but just in case we are not, we fix it up here.
*/
- conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
+ conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
/* asender does not clean up anything. it must not interfere, either */
- drbd_thread_stop(&tconn->asender);
- drbd_free_sock(tconn);
+ drbd_thread_stop(&connection->asender);
+ drbd_free_sock(connection);
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ kref_get(&device->kref);
rcu_read_unlock();
- drbd_disconnected(mdev);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_disconnected(peer_device);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
- if (!list_empty(&tconn->current_epoch->list))
- conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
+ if (!list_empty(&connection->current_epoch->list))
+ drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
- atomic_set(&tconn->current_epoch->epoch_size, 0);
- tconn->send.seen_any_write_yet = false;
+ atomic_set(&connection->current_epoch->epoch_size, 0);
+ connection->send.seen_any_write_yet = false;
- conn_info(tconn, "Connection closed\n");
+ drbd_info(connection, "Connection closed\n");
- if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
- conn_try_outdate_peer_async(tconn);
+ if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
+ conn_try_outdate_peer_async(connection);
- spin_lock_irq(&tconn->req_lock);
- oc = tconn->cstate;
+ spin_lock_irq(&connection->resource->req_lock);
+ oc = connection->cstate;
if (oc >= C_UNCONNECTED)
- _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+ _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
if (oc == C_DISCONNECTING)
- conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
+ conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
}
-static int drbd_disconnected(struct drbd_conf *mdev)
+static int drbd_disconnected(struct drbd_peer_device *peer_device)
{
+ struct drbd_device *device = peer_device->device;
unsigned int i;
/* wait for current activity to cease. */
- spin_lock_irq(&mdev->tconn->req_lock);
- _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
- _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ _drbd_wait_ee_list_empty(device, &device->active_ee);
+ _drbd_wait_ee_list_empty(device, &device->sync_ee);
+ _drbd_wait_ee_list_empty(device, &device->read_ee);
+ spin_unlock_irq(&device->resource->req_lock);
/* We do not have data structures that would allow us to
* get the rs_pending_cnt down to 0 again.
@@ -4495,42 +4547,42 @@ static int drbd_disconnected(struct drbd_conf *mdev)
* resync_LRU. The resync_LRU tracks the whole operation including
* the disk-IO, while the rs_pending_cnt only tracks the blocks
* on the fly. */
- drbd_rs_cancel_all(mdev);
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- atomic_set(&mdev->rs_pending_cnt, 0);
- wake_up(&mdev->misc_wait);
+ drbd_rs_cancel_all(device);
+ device->rs_total = 0;
+ device->rs_failed = 0;
+ atomic_set(&device->rs_pending_cnt, 0);
+ wake_up(&device->misc_wait);
- del_timer_sync(&mdev->resync_timer);
- resync_timer_fn((unsigned long)mdev);
+ del_timer_sync(&device->resync_timer);
+ resync_timer_fn((unsigned long)device);
/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
* w_make_resync_request etc. which may still be on the worker queue
* to be "canceled" */
- drbd_flush_workqueue(mdev);
+ drbd_flush_workqueue(&peer_device->connection->sender_work);
- drbd_finish_peer_reqs(mdev);
+ drbd_finish_peer_reqs(device);
/* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
might have issued a work again. The one before drbd_finish_peer_reqs() is
necessary to reclain net_ee in drbd_finish_peer_reqs(). */
- drbd_flush_workqueue(mdev);
+ drbd_flush_workqueue(&peer_device->connection->sender_work);
/* need to do it again, drbd_finish_peer_reqs() may have populated it
* again via drbd_try_clear_on_disk_bm(). */
- drbd_rs_cancel_all(mdev);
+ drbd_rs_cancel_all(device);
- kfree(mdev->p_uuid);
- mdev->p_uuid = NULL;
+ kfree(device->p_uuid);
+ device->p_uuid = NULL;
- if (!drbd_suspended(mdev))
- tl_clear(mdev->tconn);
+ if (!drbd_suspended(device))
+ tl_clear(peer_device->connection);
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
/* serialize with bitmap writeout triggered by the state change,
* if any. */
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
/* tcp_close and release of sendpage pages can be deferred. I don't
* want to use SO_LINGER, because apparently it can be deferred for
@@ -4539,20 +4591,20 @@ static int drbd_disconnected(struct drbd_conf *mdev)
* Actually we don't care for exactly when the network stack does its
* put_page(), but release our reference on these pages right here.
*/
- i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
+ i = drbd_free_peer_reqs(device, &device->net_ee);
if (i)
- dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
- i = atomic_read(&mdev->pp_in_use_by_net);
+ drbd_info(device, "net_ee not empty, killed %u entries\n", i);
+ i = atomic_read(&device->pp_in_use_by_net);
if (i)
- dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
- i = atomic_read(&mdev->pp_in_use);
+ drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
+ i = atomic_read(&device->pp_in_use);
if (i)
- dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
+ drbd_info(device, "pp_in_use = %d, expected 0\n", i);
- D_ASSERT(list_empty(&mdev->read_ee));
- D_ASSERT(list_empty(&mdev->active_ee));
- D_ASSERT(list_empty(&mdev->sync_ee));
- D_ASSERT(list_empty(&mdev->done_ee));
+ D_ASSERT(device, list_empty(&device->read_ee));
+ D_ASSERT(device, list_empty(&device->active_ee));
+ D_ASSERT(device, list_empty(&device->sync_ee));
+ D_ASSERT(device, list_empty(&device->done_ee));
return 0;
}
@@ -4566,19 +4618,19 @@ static int drbd_disconnected(struct drbd_conf *mdev)
*
* for now, they are expected to be zero, but ignored.
*/
-static int drbd_send_features(struct drbd_tconn *tconn)
+static int drbd_send_features(struct drbd_connection *connection)
{
struct drbd_socket *sock;
struct p_connection_features *p;
- sock = &tconn->data;
- p = conn_prepare_command(tconn, sock);
+ sock = &connection->data;
+ p = conn_prepare_command(connection, sock);
if (!p)
return -EIO;
memset(p, 0, sizeof(*p));
p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
- return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
+ return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
}
/*
@@ -4588,36 +4640,36 @@ static int drbd_send_features(struct drbd_tconn *tconn)
* -1 peer talks different language,
* no point in trying again, please go standalone.
*/
-static int drbd_do_features(struct drbd_tconn *tconn)
+static int drbd_do_features(struct drbd_connection *connection)
{
- /* ASSERT current == tconn->receiver ... */
+ /* ASSERT current == connection->receiver ... */
struct p_connection_features *p;
const int expect = sizeof(struct p_connection_features);
struct packet_info pi;
int err;
- err = drbd_send_features(tconn);
+ err = drbd_send_features(connection);
if (err)
return 0;
- err = drbd_recv_header(tconn, &pi);
+ err = drbd_recv_header(connection, &pi);
if (err)
return 0;
if (pi.cmd != P_CONNECTION_FEATURES) {
- conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
+ drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
cmdname(pi.cmd), pi.cmd);
return -1;
}
if (pi.size != expect) {
- conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
+ drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n",
expect, pi.size);
return -1;
}
p = pi.data;
- err = drbd_recv_all_warn(tconn, p, expect);
+ err = drbd_recv_all_warn(connection, p, expect);
if (err)
return 0;
@@ -4630,15 +4682,15 @@ static int drbd_do_features(struct drbd_tconn *tconn)
PRO_VERSION_MIN > p->protocol_max)
goto incompat;
- tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
+ connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
- conn_info(tconn, "Handshake successful: "
- "Agreed network protocol version %d\n", tconn->agreed_pro_version);
+ drbd_info(connection, "Handshake successful: "
+ "Agreed network protocol version %d\n", connection->agreed_pro_version);
return 1;
incompat:
- conn_err(tconn, "incompatible DRBD dialects: "
+ drbd_err(connection, "incompatible DRBD dialects: "
"I support %d-%d, peer supports %d-%d\n",
PRO_VERSION_MIN, PRO_VERSION_MAX,
p->protocol_min, p->protocol_max);
@@ -4646,10 +4698,10 @@ static int drbd_do_features(struct drbd_tconn *tconn)
}
#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
-static int drbd_do_auth(struct drbd_tconn *tconn)
+static int drbd_do_auth(struct drbd_connection *connection)
{
- conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
- conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
+ drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
+ drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
return -1;
}
#else
@@ -4661,7 +4713,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
-1 - auth failed, don't try again.
*/
-static int drbd_do_auth(struct drbd_tconn *tconn)
+static int drbd_do_auth(struct drbd_connection *connection)
{
struct drbd_socket *sock;
char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
@@ -4680,69 +4732,69 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
/* FIXME: Put the challenge/response into the preallocated socket buffer. */
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
key_len = strlen(nc->shared_secret);
memcpy(secret, nc->shared_secret, key_len);
rcu_read_unlock();
- desc.tfm = tconn->cram_hmac_tfm;
+ desc.tfm = connection->cram_hmac_tfm;
desc.flags = 0;
- rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
+ rv = crypto_hash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
if (rv) {
- conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
+ drbd_err(connection, "crypto_hash_setkey() failed with %d\n", rv);
rv = -1;
goto fail;
}
get_random_bytes(my_challenge, CHALLENGE_LEN);
- sock = &tconn->data;
- if (!conn_prepare_command(tconn, sock)) {
+ sock = &connection->data;
+ if (!conn_prepare_command(connection, sock)) {
rv = 0;
goto fail;
}
- rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
+ rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0,
my_challenge, CHALLENGE_LEN);
if (!rv)
goto fail;
- err = drbd_recv_header(tconn, &pi);
+ err = drbd_recv_header(connection, &pi);
if (err) {
rv = 0;
goto fail;
}
if (pi.cmd != P_AUTH_CHALLENGE) {
- conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
+ drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n",
cmdname(pi.cmd), pi.cmd);
rv = 0;
goto fail;
}
if (pi.size > CHALLENGE_LEN * 2) {
- conn_err(tconn, "expected AuthChallenge payload too big.\n");
+ drbd_err(connection, "expected AuthChallenge payload too big.\n");
rv = -1;
goto fail;
}
peers_ch = kmalloc(pi.size, GFP_NOIO);
if (peers_ch == NULL) {
- conn_err(tconn, "kmalloc of peers_ch failed\n");
+ drbd_err(connection, "kmalloc of peers_ch failed\n");
rv = -1;
goto fail;
}
- err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
+ err = drbd_recv_all_warn(connection, peers_ch, pi.size);
if (err) {
rv = 0;
goto fail;
}
- resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
+ resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm);
response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) {
- conn_err(tconn, "kmalloc of response failed\n");
+ drbd_err(connection, "kmalloc of response failed\n");
rv = -1;
goto fail;
}
@@ -4752,40 +4804,40 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
rv = crypto_hash_digest(&desc, &sg, sg.length, response);
if (rv) {
- conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
+ drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
- if (!conn_prepare_command(tconn, sock)) {
+ if (!conn_prepare_command(connection, sock)) {
rv = 0;
goto fail;
}
- rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
+ rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0,
response, resp_size);
if (!rv)
goto fail;
- err = drbd_recv_header(tconn, &pi);
+ err = drbd_recv_header(connection, &pi);
if (err) {
rv = 0;
goto fail;
}
if (pi.cmd != P_AUTH_RESPONSE) {
- conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
+ drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n",
cmdname(pi.cmd), pi.cmd);
rv = 0;
goto fail;
}
if (pi.size != resp_size) {
- conn_err(tconn, "expected AuthResponse payload of wrong size\n");
+ drbd_err(connection, "expected AuthResponse payload of wrong size\n");
rv = 0;
goto fail;
}
- err = drbd_recv_all_warn(tconn, response , resp_size);
+ err = drbd_recv_all_warn(connection, response , resp_size);
if (err) {
rv = 0;
goto fail;
@@ -4793,7 +4845,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
right_response = kmalloc(resp_size, GFP_NOIO);
if (right_response == NULL) {
- conn_err(tconn, "kmalloc of right_response failed\n");
+ drbd_err(connection, "kmalloc of right_response failed\n");
rv = -1;
goto fail;
}
@@ -4802,7 +4854,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
if (rv) {
- conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
+ drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
@@ -4810,7 +4862,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
rv = !memcmp(response, right_response, resp_size);
if (rv)
- conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
+ drbd_info(connection, "Peer authenticated using %d bytes HMAC\n",
resp_size);
else
rv = -1;
@@ -4824,163 +4876,169 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
}
#endif
-int drbdd_init(struct drbd_thread *thi)
+int drbd_receiver(struct drbd_thread *thi)
{
- struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_connection *connection = thi->connection;
int h;
- conn_info(tconn, "receiver (re)started\n");
+ drbd_info(connection, "receiver (re)started\n");
do {
- h = conn_connect(tconn);
+ h = conn_connect(connection);
if (h == 0) {
- conn_disconnect(tconn);
+ conn_disconnect(connection);
schedule_timeout_interruptible(HZ);
}
if (h == -1) {
- conn_warn(tconn, "Discarding network configuration.\n");
- conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ drbd_warn(connection, "Discarding network configuration.\n");
+ conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
} while (h == 0);
if (h > 0)
- drbdd(tconn);
+ drbdd(connection);
- conn_disconnect(tconn);
+ conn_disconnect(connection);
- conn_info(tconn, "receiver terminated\n");
+ drbd_info(connection, "receiver terminated\n");
return 0;
}
/* ********* acknowledge sender ******** */
-static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
{
struct p_req_state_reply *p = pi->data;
int retcode = be32_to_cpu(p->retcode);
if (retcode >= SS_SUCCESS) {
- set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
+ set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags);
} else {
- set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
- conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
+ set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags);
+ drbd_err(connection, "Requested state change failed by peer: %s (%d)\n",
drbd_set_st_err_str(retcode), retcode);
}
- wake_up(&tconn->ping_wait);
+ wake_up(&connection->ping_wait);
return 0;
}
-static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_req_state_reply *p = pi->data;
int retcode = be32_to_cpu(p->retcode);
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
- D_ASSERT(tconn->agreed_pro_version < 100);
- return got_conn_RqSReply(tconn, pi);
+ if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
+ D_ASSERT(device, connection->agreed_pro_version < 100);
+ return got_conn_RqSReply(connection, pi);
}
if (retcode >= SS_SUCCESS) {
- set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
+ set_bit(CL_ST_CHG_SUCCESS, &device->flags);
} else {
- set_bit(CL_ST_CHG_FAIL, &mdev->flags);
- dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
+ set_bit(CL_ST_CHG_FAIL, &device->flags);
+ drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
drbd_set_st_err_str(retcode), retcode);
}
- wake_up(&mdev->state_wait);
+ wake_up(&device->state_wait);
return 0;
}
-static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
{
- return drbd_send_ping_ack(tconn);
+ return drbd_send_ping_ack(connection);
}
-static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
{
/* restore idle timeout */
- tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
- if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
- wake_up(&tconn->ping_wait);
+ connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
+ if (!test_and_set_bit(GOT_PING_ACK, &connection->flags))
+ wake_up(&connection->ping_wait);
return 0;
}
-static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
+ D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
- update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+ update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
- if (get_ldev(mdev)) {
- drbd_rs_complete_io(mdev, sector);
- drbd_set_in_sync(mdev, sector, blksize);
+ if (get_ldev(device)) {
+ drbd_rs_complete_io(device, sector);
+ drbd_set_in_sync(device, sector, blksize);
/* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
- mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
- put_ldev(mdev);
+ device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
+ put_ldev(device);
}
- dec_rs_pending(mdev);
- atomic_add(blksize >> 9, &mdev->rs_sect_in);
+ dec_rs_pending(device);
+ atomic_add(blksize >> 9, &device->rs_sect_in);
return 0;
}
static int
-validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
+validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
struct rb_root *root, const char *func,
enum drbd_req_event what, bool missing_ok)
{
struct drbd_request *req;
struct bio_and_error m;
- spin_lock_irq(&mdev->tconn->req_lock);
- req = find_request(mdev, root, id, sector, missing_ok, func);
+ spin_lock_irq(&device->resource->req_lock);
+ req = find_request(device, root, id, sector, missing_ok, func);
if (unlikely(!req)) {
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
return -EIO;
}
__req_mod(req, what, &m);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
if (m.bio)
- complete_master_bio(mdev, &m);
+ complete_master_bio(device, &m);
return 0;
}
-static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
enum drbd_req_event what;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+ update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (p->block_id == ID_SYNCER) {
- drbd_set_in_sync(mdev, sector, blksize);
- dec_rs_pending(mdev);
+ drbd_set_in_sync(device, sector, blksize);
+ dec_rs_pending(device);
return 0;
}
switch (pi->cmd) {
@@ -5003,33 +5061,35 @@ static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
BUG();
}
- return validate_req_change_req_state(mdev, p->block_id, sector,
- &mdev->write_requests, __func__,
+ return validate_req_change_req_state(device, p->block_id, sector,
+ &device->write_requests, __func__,
what, false);
}
-static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int size = be32_to_cpu(p->blksize);
int err;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+ update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (p->block_id == ID_SYNCER) {
- dec_rs_pending(mdev);
- drbd_rs_failed_io(mdev, sector, size);
+ dec_rs_pending(device);
+ drbd_rs_failed_io(device, sector, size);
return 0;
}
- err = validate_req_change_req_state(mdev, p->block_id, sector,
- &mdev->write_requests, __func__,
+ err = validate_req_change_req_state(device, p->block_id, sector,
+ &device->write_requests, __func__,
NEG_ACKED, true);
if (err) {
/* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
@@ -5037,80 +5097,86 @@ static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
request is no longer in the collision hash. */
/* In Protocol B we might already have got a P_RECV_ACK
but then get a P_NEG_ACK afterwards. */
- drbd_set_out_of_sync(mdev, sector, size);
+ drbd_set_out_of_sync(device, sector, size);
}
return 0;
}
-static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+ update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
- dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
+ drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
(unsigned long long)sector, be32_to_cpu(p->blksize));
- return validate_req_change_req_state(mdev, p->block_id, sector,
- &mdev->read_requests, __func__,
+ return validate_req_change_req_state(device, p->block_id, sector,
+ &device->read_requests, __func__,
NEG_ACKED, false);
}
-static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
sector_t sector;
int size;
struct p_block_ack *p = pi->data;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+ update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
- dec_rs_pending(mdev);
+ dec_rs_pending(device);
- if (get_ldev_if_state(mdev, D_FAILED)) {
- drbd_rs_complete_io(mdev, sector);
+ if (get_ldev_if_state(device, D_FAILED)) {
+ drbd_rs_complete_io(device, sector);
switch (pi->cmd) {
case P_NEG_RS_DREPLY:
- drbd_rs_failed_io(mdev, sector, size);
+ drbd_rs_failed_io(device, sector, size);
case P_RS_CANCEL:
break;
default:
BUG();
}
- put_ldev(mdev);
+ put_ldev(device);
}
return 0;
}
-static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
{
struct p_barrier_ack *p = pi->data;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
- tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
+ tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (mdev->state.conn == C_AHEAD &&
- atomic_read(&mdev->ap_in_flight) == 0 &&
- !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
- mdev->start_resync_timer.expires = jiffies + HZ;
- add_timer(&mdev->start_resync_timer);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+
+ if (device->state.conn == C_AHEAD &&
+ atomic_read(&device->ap_in_flight) == 0 &&
+ !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
+ device->start_resync_timer.expires = jiffies + HZ;
+ add_timer(&device->start_resync_timer);
}
}
rcu_read_unlock();
@@ -5118,90 +5184,94 @@ static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
return 0;
}
-static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_block_ack *p = pi->data;
- struct drbd_work *w;
+ struct drbd_device_work *dw;
sector_t sector;
int size;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+ update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
- drbd_ov_out_of_sync_found(mdev, sector, size);
+ drbd_ov_out_of_sync_found(device, sector, size);
else
- ov_out_of_sync_print(mdev);
+ ov_out_of_sync_print(device);
- if (!get_ldev(mdev))
+ if (!get_ldev(device))
return 0;
- drbd_rs_complete_io(mdev, sector);
- dec_rs_pending(mdev);
+ drbd_rs_complete_io(device, sector);
+ dec_rs_pending(device);
- --mdev->ov_left;
+ --device->ov_left;
/* let's advance progress step marks only for every other megabyte */
- if ((mdev->ov_left & 0x200) == 0x200)
- drbd_advance_rs_marks(mdev, mdev->ov_left);
-
- if (mdev->ov_left == 0) {
- w = kmalloc(sizeof(*w), GFP_NOIO);
- if (w) {
- w->cb = w_ov_finished;
- w->mdev = mdev;
- drbd_queue_work(&mdev->tconn->sender_work, w);
+ if ((device->ov_left & 0x200) == 0x200)
+ drbd_advance_rs_marks(device, device->ov_left);
+
+ if (device->ov_left == 0) {
+ dw = kmalloc(sizeof(*dw), GFP_NOIO);
+ if (dw) {
+ dw->w.cb = w_ov_finished;
+ dw->device = device;
+ drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
} else {
- dev_err(DEV, "kmalloc(w) failed.");
- ov_out_of_sync_print(mdev);
- drbd_resync_finished(mdev);
+ drbd_err(device, "kmalloc(dw) failed.");
+ ov_out_of_sync_print(device);
+ drbd_resync_finished(device);
}
}
- put_ldev(mdev);
+ put_ldev(device);
return 0;
}
-static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
{
return 0;
}
-static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
+static int connection_finish_peer_reqs(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr, not_empty = 0;
do {
- clear_bit(SIGNAL_ASENDER, &tconn->flags);
+ clear_bit(SIGNAL_ASENDER, &connection->flags);
flush_signals(current);
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ kref_get(&device->kref);
rcu_read_unlock();
- if (drbd_finish_peer_reqs(mdev)) {
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ if (drbd_finish_peer_reqs(device)) {
+ kref_put(&device->kref, drbd_destroy_device);
return 1;
}
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
- set_bit(SIGNAL_ASENDER, &tconn->flags);
+ set_bit(SIGNAL_ASENDER, &connection->flags);
- spin_lock_irq(&tconn->req_lock);
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- not_empty = !list_empty(&mdev->done_ee);
+ spin_lock_irq(&connection->resource->req_lock);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ not_empty = !list_empty(&device->done_ee);
if (not_empty)
break;
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
rcu_read_unlock();
} while (not_empty);
@@ -5210,7 +5280,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
struct asender_cmd {
size_t pkt_size;
- int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
+ int (*fn)(struct drbd_connection *connection, struct packet_info *);
};
static struct asender_cmd asender_tbl[] = {
@@ -5235,13 +5305,13 @@ static struct asender_cmd asender_tbl[] = {
int drbd_asender(struct drbd_thread *thi)
{
- struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_connection *connection = thi->connection;
struct asender_cmd *cmd = NULL;
struct packet_info pi;
int rv;
- void *buf = tconn->meta.rbuf;
+ void *buf = connection->meta.rbuf;
int received = 0;
- unsigned int header_size = drbd_header_size(tconn);
+ unsigned int header_size = drbd_header_size(connection);
int expect = header_size;
bool ping_timeout_active = false;
struct net_conf *nc;
@@ -5250,45 +5320,45 @@ int drbd_asender(struct drbd_thread *thi)
rv = sched_setscheduler(current, SCHED_RR, &param);
if (rv < 0)
- conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv);
+ drbd_err(connection, "drbd_asender: ERROR set priority, ret=%d\n", rv);
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
ping_timeo = nc->ping_timeo;
tcp_cork = nc->tcp_cork;
ping_int = nc->ping_int;
rcu_read_unlock();
- if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
- if (drbd_send_ping(tconn)) {
- conn_err(tconn, "drbd_send_ping has failed\n");
+ if (test_and_clear_bit(SEND_PING, &connection->flags)) {
+ if (drbd_send_ping(connection)) {
+ drbd_err(connection, "drbd_send_ping has failed\n");
goto reconnect;
}
- tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
+ connection->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
ping_timeout_active = true;
}
/* TODO: conditionally cork; it may hurt latency if we cork without
much to send */
if (tcp_cork)
- drbd_tcp_cork(tconn->meta.socket);
- if (tconn_finish_peer_reqs(tconn)) {
- conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
+ drbd_tcp_cork(connection->meta.socket);
+ if (connection_finish_peer_reqs(connection)) {
+ drbd_err(connection, "connection_finish_peer_reqs() failed\n");
goto reconnect;
}
/* but unconditionally uncork unless disabled */
if (tcp_cork)
- drbd_tcp_uncork(tconn->meta.socket);
+ drbd_tcp_uncork(connection->meta.socket);
/* short circuit, recv_msg would return EINTR anyways. */
if (signal_pending(current))
continue;
- rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
- clear_bit(SIGNAL_ASENDER, &tconn->flags);
+ rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0);
+ clear_bit(SIGNAL_ASENDER, &connection->flags);
flush_signals(current);
@@ -5306,51 +5376,51 @@ int drbd_asender(struct drbd_thread *thi)
received += rv;
buf += rv;
} else if (rv == 0) {
- if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
+ if (test_bit(DISCONNECT_SENT, &connection->flags)) {
long t;
rcu_read_lock();
- t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
+ t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
rcu_read_unlock();
- t = wait_event_timeout(tconn->ping_wait,
- tconn->cstate < C_WF_REPORT_PARAMS,
+ t = wait_event_timeout(connection->ping_wait,
+ connection->cstate < C_WF_REPORT_PARAMS,
t);
if (t)
break;
}
- conn_err(tconn, "meta connection shut down by peer.\n");
+ drbd_err(connection, "meta connection shut down by peer.\n");
goto reconnect;
} else if (rv == -EAGAIN) {
/* If the data socket received something meanwhile,
* that is good enough: peer is still alive. */
- if (time_after(tconn->last_received,
- jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
+ if (time_after(connection->last_received,
+ jiffies - connection->meta.socket->sk->sk_rcvtimeo))
continue;
if (ping_timeout_active) {
- conn_err(tconn, "PingAck did not arrive in time.\n");
+ drbd_err(connection, "PingAck did not arrive in time.\n");
goto reconnect;
}
- set_bit(SEND_PING, &tconn->flags);
+ set_bit(SEND_PING, &connection->flags);
continue;
} else if (rv == -EINTR) {
continue;
} else {
- conn_err(tconn, "sock_recvmsg returned %d\n", rv);
+ drbd_err(connection, "sock_recvmsg returned %d\n", rv);
goto reconnect;
}
if (received == expect && cmd == NULL) {
- if (decode_header(tconn, tconn->meta.rbuf, &pi))
+ if (decode_header(connection, connection->meta.rbuf, &pi))
goto reconnect;
cmd = &asender_tbl[pi.cmd];
if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
- conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
+ drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n",
cmdname(pi.cmd), pi.cmd);
goto disconnect;
}
expect = header_size + cmd->pkt_size;
if (pi.size != expect - header_size) {
- conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
+ drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n",
pi.cmd, pi.size);
goto reconnect;
}
@@ -5358,21 +5428,21 @@ int drbd_asender(struct drbd_thread *thi)
if (received == expect) {
bool err;
- err = cmd->fn(tconn, &pi);
+ err = cmd->fn(connection, &pi);
if (err) {
- conn_err(tconn, "%pf failed\n", cmd->fn);
+ drbd_err(connection, "%pf failed\n", cmd->fn);
goto reconnect;
}
- tconn->last_received = jiffies;
+ connection->last_received = jiffies;
if (cmd == &asender_tbl[P_PING_ACK]) {
/* restore idle timeout */
- tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
+ connection->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
ping_timeout_active = false;
}
- buf = tconn->meta.rbuf;
+ buf = connection->meta.rbuf;
received = 0;
expect = header_size;
cmd = NULL;
@@ -5381,16 +5451,16 @@ int drbd_asender(struct drbd_thread *thi)
if (0) {
reconnect:
- conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
- conn_md_sync(tconn);
+ conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
+ conn_md_sync(connection);
}
if (0) {
disconnect:
- conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
- clear_bit(SIGNAL_ASENDER, &tconn->flags);
+ clear_bit(SIGNAL_ASENDER, &connection->flags);
- conn_info(tconn, "asender terminated\n");
+ drbd_info(connection, "asender terminated\n");
return 0;
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 104a040f24de..3779c8d2875b 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -31,37 +31,37 @@
#include "drbd_req.h"
-static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
+static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
/* Update disk stats at start of I/O request */
-static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
+static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
{
const int rw = bio_data_dir(req->master_bio);
int cpu;
cpu = part_stat_lock();
- part_round_stats(cpu, &mdev->vdisk->part0);
- part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
- part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], req->i.size >> 9);
+ part_round_stats(cpu, &device->vdisk->part0);
+ part_stat_inc(cpu, &device->vdisk->part0, ios[rw]);
+ part_stat_add(cpu, &device->vdisk->part0, sectors[rw], req->i.size >> 9);
(void) cpu; /* The macro invocations above want the cpu argument, I do not like
the compiler warning about cpu only assigned but never used... */
- part_inc_in_flight(&mdev->vdisk->part0, rw);
+ part_inc_in_flight(&device->vdisk->part0, rw);
part_stat_unlock();
}
/* Update disk stats when completing request upwards */
-static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
+static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
{
int rw = bio_data_dir(req->master_bio);
unsigned long duration = jiffies - req->start_time;
int cpu;
cpu = part_stat_lock();
- part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration);
- part_round_stats(cpu, &mdev->vdisk->part0);
- part_dec_in_flight(&mdev->vdisk->part0, rw);
+ part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration);
+ part_round_stats(cpu, &device->vdisk->part0);
+ part_dec_in_flight(&device->vdisk->part0, rw);
part_stat_unlock();
}
-static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
+static struct drbd_request *drbd_req_new(struct drbd_device *device,
struct bio *bio_src)
{
struct drbd_request *req;
@@ -72,7 +72,7 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
drbd_req_make_private_bio(req, bio_src);
req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
- req->w.mdev = mdev;
+ req->device = device;
req->master_bio = bio_src;
req->epoch = 0;
@@ -95,14 +95,14 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
void drbd_req_destroy(struct kref *kref)
{
struct drbd_request *req = container_of(kref, struct drbd_request, kref);
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
const unsigned s = req->rq_state;
if ((req->master_bio && !(s & RQ_POSTPONED)) ||
atomic_read(&req->completion_ref) ||
(s & RQ_LOCAL_PENDING) ||
((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
- dev_err(DEV, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
+ drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
s, atomic_read(&req->completion_ref));
return;
}
@@ -132,10 +132,10 @@ void drbd_req_destroy(struct kref *kref)
*/
if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
- drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
+ drbd_set_out_of_sync(device, req->i.sector, req->i.size);
if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
- drbd_set_in_sync(mdev, req->i.sector, req->i.size);
+ drbd_set_in_sync(device, req->i.sector, req->i.size);
}
/* one might be tempted to move the drbd_al_complete_io
@@ -149,11 +149,11 @@ void drbd_req_destroy(struct kref *kref)
* we would forget to resync the corresponding extent.
*/
if (s & RQ_IN_ACT_LOG) {
- if (get_ldev_if_state(mdev, D_FAILED)) {
- drbd_al_complete_io(mdev, &req->i);
- put_ldev(mdev);
+ if (get_ldev_if_state(device, D_FAILED)) {
+ drbd_al_complete_io(device, &req->i);
+ put_ldev(device);
} else if (__ratelimit(&drbd_ratelimit_state)) {
- dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), "
+ drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
"but my Disk seems to have failed :(\n",
(unsigned long long) req->i.sector, req->i.size);
}
@@ -163,41 +163,42 @@ void drbd_req_destroy(struct kref *kref)
mempool_free(req, drbd_request_mempool);
}
-static void wake_all_senders(struct drbd_tconn *tconn) {
- wake_up(&tconn->sender_work.q_wait);
+static void wake_all_senders(struct drbd_connection *connection)
+{
+ wake_up(&connection->sender_work.q_wait);
}
/* must hold resource->req_lock */
-void start_new_tl_epoch(struct drbd_tconn *tconn)
+void start_new_tl_epoch(struct drbd_connection *connection)
{
/* no point closing an epoch, if it is empty, anyways. */
- if (tconn->current_tle_writes == 0)
+ if (connection->current_tle_writes == 0)
return;
- tconn->current_tle_writes = 0;
- atomic_inc(&tconn->current_tle_nr);
- wake_all_senders(tconn);
+ connection->current_tle_writes = 0;
+ atomic_inc(&connection->current_tle_nr);
+ wake_all_senders(connection);
}
-void complete_master_bio(struct drbd_conf *mdev,
+void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m)
{
bio_endio(m->bio, m->error);
- dec_ap_bio(mdev);
+ dec_ap_bio(device);
}
static void drbd_remove_request_interval(struct rb_root *root,
struct drbd_request *req)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct drbd_interval *i = &req->i;
drbd_remove_interval(root, i);
/* Wake up any processes waiting for this request to complete. */
if (i->waiting)
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
}
/* Helper for __req_mod().
@@ -210,7 +211,7 @@ static
void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{
const unsigned s = req->rq_state;
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
int rw;
int error, ok;
@@ -226,12 +227,12 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
(s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
(s & RQ_COMPLETION_SUSP)) {
- dev_err(DEV, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
+ drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
return;
}
if (!req->master_bio) {
- dev_err(DEV, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
+ drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
return;
}
@@ -259,9 +260,9 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
struct rb_root *root;
if (rw == WRITE)
- root = &mdev->write_requests;
+ root = &device->write_requests;
else
- root = &mdev->read_requests;
+ root = &device->read_requests;
drbd_remove_request_interval(root, req);
}
@@ -273,11 +274,11 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* and reset the transfer log epoch write_cnt.
*/
if (rw == WRITE &&
- req->epoch == atomic_read(&mdev->tconn->current_tle_nr))
- start_new_tl_epoch(mdev->tconn);
+ req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
+ start_new_tl_epoch(first_peer_device(device)->connection);
/* Update disk stats */
- _drbd_end_io_acct(mdev, req);
+ _drbd_end_io_acct(device, req);
/* If READ failed,
* have it be pushed back to the retry work queue,
@@ -305,8 +306,8 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
{
- struct drbd_conf *mdev = req->w.mdev;
- D_ASSERT(m || (req->rq_state & RQ_POSTPONED));
+ struct drbd_device *device = req->device;
+ D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
if (!atomic_sub_and_test(put, &req->completion_ref))
return 0;
@@ -328,12 +329,12 @@ static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_
static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
int clear, int set)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
unsigned s = req->rq_state;
int c_put = 0;
int k_put = 0;
- if (drbd_suspended(mdev) && !((s | clear) & RQ_COMPLETION_SUSP))
+ if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
set |= RQ_COMPLETION_SUSP;
/* apply */
@@ -351,7 +352,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
atomic_inc(&req->completion_ref);
if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
- inc_ap_pending(mdev);
+ inc_ap_pending(device);
atomic_inc(&req->completion_ref);
}
@@ -362,7 +363,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
kref_get(&req->kref); /* wait for the DONE */
if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT))
- atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
+ atomic_add(req->i.size >> 9, &device->ap_in_flight);
if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
atomic_inc(&req->completion_ref);
@@ -373,7 +374,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
++c_put;
if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
- D_ASSERT(req->rq_state & RQ_LOCAL_PENDING);
+ D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
/* local completion may still come in later,
* we need to keep the req object around. */
kref_get(&req->kref);
@@ -388,7 +389,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
}
if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
- dec_ap_pending(mdev);
+ dec_ap_pending(device);
++c_put;
}
@@ -397,7 +398,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
if ((s & RQ_EXP_BARR_ACK) && !(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
if (req->rq_state & RQ_NET_SENT)
- atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
+ atomic_sub(req->i.size >> 9, &device->ap_in_flight);
++k_put;
}
@@ -409,14 +410,14 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
int at_least = k_put + !!c_put;
int refcount = atomic_read(&req->kref.refcount);
if (refcount < at_least)
- dev_err(DEV,
+ drbd_err(device,
"mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
s, req->rq_state, refcount, at_least);
}
/* If we made progress, retry conflicting peer requests, if any. */
if (req->i.waiting)
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
if (c_put)
k_put += drbd_req_put_completion_ref(req, m, c_put);
@@ -424,18 +425,18 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
kref_sub(&req->kref, k_put, drbd_req_destroy);
}
-static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *req)
+static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
{
char b[BDEVNAME_SIZE];
if (!__ratelimit(&drbd_ratelimit_state))
return;
- dev_warn(DEV, "local %s IO error sector %llu+%u on %s\n",
+ drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
(req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
(unsigned long long)req->i.sector,
req->i.size >> 9,
- bdevname(mdev->ldev->backing_bdev, b));
+ bdevname(device->ldev->backing_bdev, b));
}
/* obviously this could be coded as many single functions
@@ -453,7 +454,7 @@ static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *re
int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct net_conf *nc;
int p, rv = 0;
@@ -462,7 +463,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
switch (what) {
default:
- dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
+ drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
break;
/* does not happen...
@@ -474,9 +475,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
case TO_BE_SENT: /* via network */
/* reached via __drbd_make_request
* and from w_read_retry_remote */
- D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+ D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
p = nc->wire_protocol;
rcu_read_unlock();
req->rq_state |=
@@ -487,15 +488,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
case TO_BE_SUBMITTED: /* locally */
/* reached via __drbd_make_request */
- D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
+ D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
break;
case COMPLETED_OK:
if (req->rq_state & RQ_WRITE)
- mdev->writ_cnt += req->i.size >> 9;
+ device->writ_cnt += req->i.size >> 9;
else
- mdev->read_cnt += req->i.size >> 9;
+ device->read_cnt += req->i.size >> 9;
mod_rq_state(req, m, RQ_LOCAL_PENDING,
RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
@@ -506,15 +507,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case WRITE_COMPLETED_WITH_ERROR:
- drbd_report_io_error(mdev, req);
- __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
+ drbd_report_io_error(device, req);
+ __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
case READ_COMPLETED_WITH_ERROR:
- drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
- drbd_report_io_error(mdev, req);
- __drbd_chk_io_error(mdev, DRBD_READ_ERROR);
+ drbd_set_out_of_sync(device, req->i.sector, req->i.size);
+ drbd_report_io_error(device, req);
+ __drbd_chk_io_error(device, DRBD_READ_ERROR);
/* fall through. */
case READ_AHEAD_COMPLETED_WITH_ERROR:
/* it is legal to fail READA, no __drbd_chk_io_error in that case. */
@@ -532,16 +533,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* So we can verify the handle in the answer packet.
* Corresponding drbd_remove_request_interval is in
* drbd_req_complete() */
- D_ASSERT(drbd_interval_empty(&req->i));
- drbd_insert_interval(&mdev->read_requests, &req->i);
+ D_ASSERT(device, drbd_interval_empty(&req->i));
+ drbd_insert_interval(&device->read_requests, &req->i);
- set_bit(UNPLUG_REMOTE, &mdev->flags);
+ set_bit(UNPLUG_REMOTE, &device->flags);
- D_ASSERT(req->rq_state & RQ_NET_PENDING);
- D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
+ D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_read_req;
- drbd_queue_work(&mdev->tconn->sender_work, &req->w);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &req->w);
break;
case QUEUE_FOR_NET_WRITE:
@@ -550,8 +552,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* Corresponding drbd_remove_request_interval is in
* drbd_req_complete() */
- D_ASSERT(drbd_interval_empty(&req->i));
- drbd_insert_interval(&mdev->write_requests, &req->i);
+ D_ASSERT(device, drbd_interval_empty(&req->i));
+ drbd_insert_interval(&device->write_requests, &req->i);
/* NOTE
* In case the req ended up on the transfer log before being
@@ -570,28 +572,30 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* otherwise we may lose an unplug, which may cause some remote
* io-scheduler timeout to expire, increasing maximum latency,
* hurting performance. */
- set_bit(UNPLUG_REMOTE, &mdev->flags);
+ set_bit(UNPLUG_REMOTE, &device->flags);
/* queue work item to send data */
- D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
req->w.cb = w_send_dblock;
- drbd_queue_work(&mdev->tconn->sender_work, &req->w);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &req->w);
/* close the epoch, in case it outgrew the limit */
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
p = nc->max_epoch_size;
rcu_read_unlock();
- if (mdev->tconn->current_tle_writes >= p)
- start_new_tl_epoch(mdev->tconn);
+ if (first_peer_device(device)->connection->current_tle_writes >= p)
+ start_new_tl_epoch(first_peer_device(device)->connection);
break;
case QUEUE_FOR_SEND_OOS:
mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_out_of_sync;
- drbd_queue_work(&mdev->tconn->sender_work, &req->w);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &req->w);
break;
case READ_RETRY_REMOTE_CANCELED:
@@ -639,15 +643,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* If this request had been marked as RQ_POSTPONED before,
* it will actually not be completed, but "restarted",
* resubmitted from the retry worker context. */
- D_ASSERT(req->rq_state & RQ_NET_PENDING);
- D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
+ D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
break;
case WRITE_ACKED_BY_PEER_AND_SIS:
req->rq_state |= RQ_NET_SIS;
case WRITE_ACKED_BY_PEER:
- D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
+ D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
/* protocol C; successfully written on peer.
* Nothing more to do here.
* We want to keep the tl in place for all protocols, to cater
@@ -655,25 +659,25 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
goto ack_common;
case RECV_ACKED_BY_PEER:
- D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK);
+ D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
/* protocol B; pretends to be successfully written on peer.
* see also notes above in HANDED_OVER_TO_NETWORK about
* protocol != C */
ack_common:
- D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
break;
case POSTPONE_WRITE:
- D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
+ D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
/* If this node has already detected the write conflict, the
* worker will be waiting on misc_wait. Wake it up once this
* request has completed locally.
*/
- D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_POSTPONED;
if (req->i.waiting)
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
/* Do not clear RQ_NET_PENDING. This request will make further
* progress via restart_conflicting_writes() or
* fail_postponed_requests(). Hopefully. */
@@ -701,9 +705,10 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
if (bio_data_dir(req->master_bio) == WRITE)
rv = MR_WRITE;
- get_ldev(mdev); /* always succeeds in this call path */
+ get_ldev(device); /* always succeeds in this call path */
req->w.cb = w_restart_disk_io;
- drbd_queue_work(&mdev->tconn->sender_work, &req->w);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &req->w);
break;
case RESEND:
@@ -719,12 +724,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
Throwing them out of the TL here by pretending we got a BARRIER_ACK.
During connection handshake, we ensure that the peer was not rebooted. */
if (!(req->rq_state & RQ_NET_OK)) {
- /* FIXME could this possibly be a req->w.cb == w_send_out_of_sync?
+ /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
* in that case we must not set RQ_NET_PENDING. */
mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
if (req->w.cb) {
- drbd_queue_work(&mdev->tconn->sender_work, &req->w);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
} /* else: FIXME can this happen? */
break;
@@ -740,7 +746,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* barrier came in before all requests were acked.
* this is bad, because if the connection is lost now,
* we won't be able to clean them up... */
- dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
+ drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
}
/* Allowed to complete requests, even while suspended.
* As this is called for all requests within a matching epoch,
@@ -751,12 +757,12 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case DATA_RECEIVED:
- D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
break;
case QUEUE_AS_DRBD_BARRIER:
- start_new_tl_epoch(mdev->tconn);
+ start_new_tl_epoch(first_peer_device(device)->connection);
mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
break;
};
@@ -771,27 +777,27 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* since size may be bigger than BM_BLOCK_SIZE,
* we may need to check several bits.
*/
-static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
+static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
{
unsigned long sbnr, ebnr;
sector_t esector, nr_sectors;
- if (mdev->state.disk == D_UP_TO_DATE)
+ if (device->state.disk == D_UP_TO_DATE)
return true;
- if (mdev->state.disk != D_INCONSISTENT)
+ if (device->state.disk != D_INCONSISTENT)
return false;
esector = sector + (size >> 9) - 1;
- nr_sectors = drbd_get_capacity(mdev->this_bdev);
- D_ASSERT(sector < nr_sectors);
- D_ASSERT(esector < nr_sectors);
+ nr_sectors = drbd_get_capacity(device->this_bdev);
+ D_ASSERT(device, sector < nr_sectors);
+ D_ASSERT(device, esector < nr_sectors);
sbnr = BM_SECT_TO_BIT(sector);
ebnr = BM_SECT_TO_BIT(esector);
- return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0;
+ return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
}
-static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector,
+static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
enum drbd_read_balancing rbm)
{
struct backing_dev_info *bdi;
@@ -799,11 +805,11 @@ static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector
switch (rbm) {
case RB_CONGESTED_REMOTE:
- bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
+ bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
return bdi_read_congested(bdi);
case RB_LEAST_PENDING:
- return atomic_read(&mdev->local_cnt) >
- atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt);
+ return atomic_read(&device->local_cnt) >
+ atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
case RB_32K_STRIPING: /* stripe_shift = 15 */
case RB_64K_STRIPING:
case RB_128K_STRIPING:
@@ -813,7 +819,7 @@ static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector
stripe_shift = (rbm - RB_32K_STRIPING + 15);
return (sector >> (stripe_shift - 9)) & 1;
case RB_ROUND_ROBIN:
- return test_and_change_bit(READ_BALANCE_RR, &mdev->flags);
+ return test_and_change_bit(READ_BALANCE_RR, &device->flags);
case RB_PREFER_REMOTE:
return true;
case RB_PREFER_LOCAL:
@@ -834,73 +840,73 @@ static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector
static void complete_conflicting_writes(struct drbd_request *req)
{
DEFINE_WAIT(wait);
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct drbd_interval *i;
sector_t sector = req->i.sector;
int size = req->i.size;
- i = drbd_find_overlap(&mdev->write_requests, sector, size);
+ i = drbd_find_overlap(&device->write_requests, sector, size);
if (!i)
return;
for (;;) {
- prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
- i = drbd_find_overlap(&mdev->write_requests, sector, size);
+ prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
+ i = drbd_find_overlap(&device->write_requests, sector, size);
if (!i)
break;
/* Indicate to wake up device->misc_wait on progress. */
i->waiting = true;
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
schedule();
- spin_lock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
}
- finish_wait(&mdev->misc_wait, &wait);
+ finish_wait(&device->misc_wait, &wait);
}
/* called within req_lock and rcu_read_lock() */
-static void maybe_pull_ahead(struct drbd_conf *mdev)
+static void maybe_pull_ahead(struct drbd_device *device)
{
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
struct net_conf *nc;
bool congested = false;
enum drbd_on_congestion on_congestion;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
on_congestion = nc ? nc->on_congestion : OC_BLOCK;
rcu_read_unlock();
if (on_congestion == OC_BLOCK ||
- tconn->agreed_pro_version < 96)
+ connection->agreed_pro_version < 96)
return;
/* If I don't even have good local storage, we can not reasonably try
* to pull ahead of the peer. We also need the local reference to make
- * sure mdev->act_log is there.
+ * sure device->act_log is there.
*/
- if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+ if (!get_ldev_if_state(device, D_UP_TO_DATE))
return;
if (nc->cong_fill &&
- atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) {
- dev_info(DEV, "Congestion-fill threshold reached\n");
+ atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
+ drbd_info(device, "Congestion-fill threshold reached\n");
congested = true;
}
- if (mdev->act_log->used >= nc->cong_extents) {
- dev_info(DEV, "Congestion-extents threshold reached\n");
+ if (device->act_log->used >= nc->cong_extents) {
+ drbd_info(device, "Congestion-extents threshold reached\n");
congested = true;
}
if (congested) {
/* start a new epoch for non-mirrored writes */
- start_new_tl_epoch(mdev->tconn);
+ start_new_tl_epoch(first_peer_device(device)->connection);
if (on_congestion == OC_PULL_AHEAD)
- _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+ _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
else /*nc->on_congestion == OC_DISCONNECT */
- _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+ _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
}
- put_ldev(mdev);
+ put_ldev(device);
}
/* If this returns false, and req->private_bio is still set,
@@ -914,19 +920,19 @@ static void maybe_pull_ahead(struct drbd_conf *mdev)
*/
static bool do_remote_read(struct drbd_request *req)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
enum drbd_read_balancing rbm;
if (req->private_bio) {
- if (!drbd_may_do_local_read(mdev,
+ if (!drbd_may_do_local_read(device,
req->i.sector, req->i.size)) {
bio_put(req->private_bio);
req->private_bio = NULL;
- put_ldev(mdev);
+ put_ldev(device);
}
}
- if (mdev->state.pdsk != D_UP_TO_DATE)
+ if (device->state.pdsk != D_UP_TO_DATE)
return false;
if (req->private_bio == NULL)
@@ -936,17 +942,17 @@ static bool do_remote_read(struct drbd_request *req)
* protocol, pending requests etc. */
rcu_read_lock();
- rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing;
+ rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
rcu_read_unlock();
if (rbm == RB_PREFER_LOCAL && req->private_bio)
return false; /* submit locally */
- if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) {
+ if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
if (req->private_bio) {
bio_put(req->private_bio);
req->private_bio = NULL;
- put_ldev(mdev);
+ put_ldev(device);
}
return true;
}
@@ -959,11 +965,11 @@ static bool do_remote_read(struct drbd_request *req)
* which does NOT include those that we are L_AHEAD for. */
static int drbd_process_write_request(struct drbd_request *req)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
int remote, send_oos;
- remote = drbd_should_do_remote(mdev->state);
- send_oos = drbd_should_send_out_of_sync(mdev->state);
+ remote = drbd_should_do_remote(device->state);
+ send_oos = drbd_should_send_out_of_sync(device->state);
/* Need to replicate writes. Unless it is an empty flush,
* which is better mapped to a DRBD P_BARRIER packet,
@@ -973,7 +979,7 @@ static int drbd_process_write_request(struct drbd_request *req)
* replicating, in which case there is no point. */
if (unlikely(req->i.size == 0)) {
/* The only size==0 bios we expect are empty flushes. */
- D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH);
+ D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH);
if (remote)
_req_mod(req, QUEUE_AS_DRBD_BARRIER);
return remote;
@@ -982,12 +988,12 @@ static int drbd_process_write_request(struct drbd_request *req)
if (!remote && !send_oos)
return 0;
- D_ASSERT(!(remote && send_oos));
+ D_ASSERT(device, !(remote && send_oos));
if (remote) {
_req_mod(req, TO_BE_SENT);
_req_mod(req, QUEUE_FOR_NET_WRITE);
- } else if (drbd_set_out_of_sync(mdev, req->i.sector, req->i.size))
+ } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
_req_mod(req, QUEUE_FOR_SEND_OOS);
return remote;
@@ -996,36 +1002,36 @@ static int drbd_process_write_request(struct drbd_request *req)
static void
drbd_submit_req_private_bio(struct drbd_request *req)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct bio *bio = req->private_bio;
const int rw = bio_rw(bio);
- bio->bi_bdev = mdev->ldev->backing_bdev;
+ bio->bi_bdev = device->ldev->backing_bdev;
/* State may have changed since we grabbed our reference on the
* ->ldev member. Double check, and short-circuit to endio.
* In case the last activity log transaction failed to get on
* stable storage, and this is a WRITE, we may not even submit
* this bio. */
- if (get_ldev(mdev)) {
- if (drbd_insert_fault(mdev,
+ if (get_ldev(device)) {
+ if (drbd_insert_fault(device,
rw == WRITE ? DRBD_FAULT_DT_WR
: rw == READ ? DRBD_FAULT_DT_RD
: DRBD_FAULT_DT_RA))
bio_endio(bio, -EIO);
else
generic_make_request(bio);
- put_ldev(mdev);
+ put_ldev(device);
} else
bio_endio(bio, -EIO);
}
-static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req)
+static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
{
- spin_lock(&mdev->submit.lock);
- list_add_tail(&req->tl_requests, &mdev->submit.writes);
- spin_unlock(&mdev->submit.lock);
- queue_work(mdev->submit.wq, &mdev->submit.worker);
+ spin_lock(&device->submit.lock);
+ list_add_tail(&req->tl_requests, &device->submit.writes);
+ spin_unlock(&device->submit.lock);
+ queue_work(device->submit.wq, &device->submit.worker);
}
/* returns the new drbd_request pointer, if the caller is expected to
@@ -1033,36 +1039,36 @@ static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req)
* request on the submitter thread.
* Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
*/
-struct drbd_request *
-drbd_request_prepare(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+static struct drbd_request *
+drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_time)
{
const int rw = bio_data_dir(bio);
struct drbd_request *req;
/* allocate outside of all locks; */
- req = drbd_req_new(mdev, bio);
+ req = drbd_req_new(device, bio);
if (!req) {
- dec_ap_bio(mdev);
+ dec_ap_bio(device);
/* only pass the error to the upper layers.
* if user cannot handle io errors, that's not our business. */
- dev_err(DEV, "could not kmalloc() req\n");
+ drbd_err(device, "could not kmalloc() req\n");
bio_endio(bio, -ENOMEM);
return ERR_PTR(-ENOMEM);
}
req->start_time = start_time;
- if (!get_ldev(mdev)) {
+ if (!get_ldev(device)) {
bio_put(req->private_bio);
req->private_bio = NULL;
}
/* Update disk stats */
- _drbd_start_io_acct(mdev, req);
+ _drbd_start_io_acct(device, req);
if (rw == WRITE && req->private_bio && req->i.size
- && !test_bit(AL_SUSPENDED, &mdev->flags)) {
- if (!drbd_al_begin_io_fastpath(mdev, &req->i)) {
- drbd_queue_write(mdev, req);
+ && !test_bit(AL_SUSPENDED, &device->flags)) {
+ if (!drbd_al_begin_io_fastpath(device, &req->i)) {
+ drbd_queue_write(device, req);
return NULL;
}
req->rq_state |= RQ_IN_ACT_LOG;
@@ -1071,13 +1077,13 @@ drbd_request_prepare(struct drbd_conf *mdev, struct bio *bio, unsigned long star
return req;
}
-static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *req)
+static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{
const int rw = bio_rw(req->master_bio);
struct bio_and_error m = { NULL, };
bool no_remote = false;
- spin_lock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
if (rw == WRITE) {
/* This may temporarily give up the req_lock,
* but will re-aquire it before it returns here.
@@ -1087,17 +1093,17 @@ static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *re
/* check for congestion, and potentially stop sending
* full data updates, but start sending "dirty bits" only. */
- maybe_pull_ahead(mdev);
+ maybe_pull_ahead(device);
}
- if (drbd_suspended(mdev)) {
+ if (drbd_suspended(device)) {
/* push back and retry: */
req->rq_state |= RQ_POSTPONED;
if (req->private_bio) {
bio_put(req->private_bio);
req->private_bio = NULL;
- put_ldev(mdev);
+ put_ldev(device);
}
goto out;
}
@@ -1111,15 +1117,15 @@ static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *re
}
/* which transfer log epoch does this belong to? */
- req->epoch = atomic_read(&mdev->tconn->current_tle_nr);
+ req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
/* no point in adding empty flushes to the transfer log,
* they are mapped to drbd barriers already. */
if (likely(req->i.size!=0)) {
if (rw == WRITE)
- mdev->tconn->current_tle_writes++;
+ first_peer_device(device)->connection->current_tle_writes++;
- list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log);
+ list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
}
if (rw == WRITE) {
@@ -1139,13 +1145,13 @@ static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *re
/* needs to be marked within the same spinlock */
_req_mod(req, TO_BE_SUBMITTED);
/* but we need to give up the spinlock to submit */
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
drbd_submit_req_private_bio(req);
- spin_lock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
} else if (no_remote) {
nodata:
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
+ drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
(unsigned long long)req->i.sector, req->i.size >> 9);
/* A write may have been queued for send_oos, however.
* So we can not simply free it, we must go through drbd_req_put_completion_ref() */
@@ -1154,21 +1160,21 @@ nodata:
out:
if (drbd_req_put_completion_ref(req, &m, 1))
kref_put(&req->kref, drbd_req_destroy);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
if (m.bio)
- complete_master_bio(mdev, &m);
+ complete_master_bio(device, &m);
}
-void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_time)
{
- struct drbd_request *req = drbd_request_prepare(mdev, bio, start_time);
+ struct drbd_request *req = drbd_request_prepare(device, bio, start_time);
if (IS_ERR_OR_NULL(req))
return;
- drbd_send_and_submit(mdev, req);
+ drbd_send_and_submit(device, req);
}
-static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming)
+static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
{
struct drbd_request *req, *tmp;
list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
@@ -1176,19 +1182,19 @@ static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming)
if (rw == WRITE /* rw != WRITE should not even end up here! */
&& req->private_bio && req->i.size
- && !test_bit(AL_SUSPENDED, &mdev->flags)) {
- if (!drbd_al_begin_io_fastpath(mdev, &req->i))
+ && !test_bit(AL_SUSPENDED, &device->flags)) {
+ if (!drbd_al_begin_io_fastpath(device, &req->i))
continue;
req->rq_state |= RQ_IN_ACT_LOG;
}
list_del_init(&req->tl_requests);
- drbd_send_and_submit(mdev, req);
+ drbd_send_and_submit(device, req);
}
}
-static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev,
+static bool prepare_al_transaction_nonblock(struct drbd_device *device,
struct list_head *incoming,
struct list_head *pending)
{
@@ -1196,9 +1202,9 @@ static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev,
int wake = 0;
int err;
- spin_lock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
- err = drbd_al_begin_io_nonblock(mdev, &req->i);
+ err = drbd_al_begin_io_nonblock(device, &req->i);
if (err == -EBUSY)
wake = 1;
if (err)
@@ -1206,30 +1212,30 @@ static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev,
req->rq_state |= RQ_IN_ACT_LOG;
list_move_tail(&req->tl_requests, pending);
}
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
if (wake)
- wake_up(&mdev->al_wait);
+ wake_up(&device->al_wait);
return !list_empty(pending);
}
void do_submit(struct work_struct *ws)
{
- struct drbd_conf *mdev = container_of(ws, struct drbd_conf, submit.worker);
+ struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
LIST_HEAD(incoming);
LIST_HEAD(pending);
struct drbd_request *req, *tmp;
for (;;) {
- spin_lock(&mdev->submit.lock);
- list_splice_tail_init(&mdev->submit.writes, &incoming);
- spin_unlock(&mdev->submit.lock);
+ spin_lock(&device->submit.lock);
+ list_splice_tail_init(&device->submit.writes, &incoming);
+ spin_unlock(&device->submit.lock);
- submit_fast_path(mdev, &incoming);
+ submit_fast_path(device, &incoming);
if (list_empty(&incoming))
break;
- wait_event(mdev->al_wait, prepare_al_transaction_nonblock(mdev, &incoming, &pending));
+ wait_event(device->al_wait, prepare_al_transaction_nonblock(device, &incoming, &pending));
/* Maybe more was queued, while we prepared the transaction?
* Try to stuff them into this transaction as well.
* Be strictly non-blocking here, no wait_event, we already
@@ -1243,17 +1249,17 @@ void do_submit(struct work_struct *ws)
/* It is ok to look outside the lock,
* it's only an optimization anyways */
- if (list_empty(&mdev->submit.writes))
+ if (list_empty(&device->submit.writes))
break;
- spin_lock(&mdev->submit.lock);
- list_splice_tail_init(&mdev->submit.writes, &more_incoming);
- spin_unlock(&mdev->submit.lock);
+ spin_lock(&device->submit.lock);
+ list_splice_tail_init(&device->submit.writes, &more_incoming);
+ spin_unlock(&device->submit.lock);
if (list_empty(&more_incoming))
break;
- made_progress = prepare_al_transaction_nonblock(mdev, &more_incoming, &more_pending);
+ made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending);
list_splice_tail_init(&more_pending, &pending);
list_splice_tail_init(&more_incoming, &incoming);
@@ -1261,18 +1267,18 @@ void do_submit(struct work_struct *ws)
if (!made_progress)
break;
}
- drbd_al_begin_io_commit(mdev, false);
+ drbd_al_begin_io_commit(device, false);
list_for_each_entry_safe(req, tmp, &pending, tl_requests) {
list_del_init(&req->tl_requests);
- drbd_send_and_submit(mdev, req);
+ drbd_send_and_submit(device, req);
}
}
}
void drbd_make_request(struct request_queue *q, struct bio *bio)
{
- struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
+ struct drbd_device *device = (struct drbd_device *) q->queuedata;
unsigned long start_time;
start_time = jiffies;
@@ -1280,10 +1286,10 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
/*
* what we "blindly" assume:
*/
- D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
+ D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
- inc_ap_bio(mdev);
- __drbd_make_request(mdev, bio, start_time);
+ inc_ap_bio(device);
+ __drbd_make_request(device, bio, start_time);
}
/* This is called by bio_add_page().
@@ -1300,32 +1306,32 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
*/
int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
{
- struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
+ struct drbd_device *device = (struct drbd_device *) q->queuedata;
unsigned int bio_size = bvm->bi_size;
int limit = DRBD_MAX_BIO_SIZE;
int backing_limit;
- if (bio_size && get_ldev(mdev)) {
+ if (bio_size && get_ldev(device)) {
unsigned int max_hw_sectors = queue_max_hw_sectors(q);
struct request_queue * const b =
- mdev->ldev->backing_bdev->bd_disk->queue;
+ device->ldev->backing_bdev->bd_disk->queue;
if (b->merge_bvec_fn) {
backing_limit = b->merge_bvec_fn(b, bvm, bvec);
limit = min(limit, backing_limit);
}
- put_ldev(mdev);
+ put_ldev(device);
if ((limit >> 9) > max_hw_sectors)
limit = max_hw_sectors << 9;
}
return limit;
}
-struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
+static struct drbd_request *find_oldest_request(struct drbd_connection *connection)
{
/* Walk the transfer log,
* and find the oldest not yet completed request */
struct drbd_request *r;
- list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
+ list_for_each_entry(r, &connection->transfer_log, tl_requests) {
if (atomic_read(&r->completion_ref))
return r;
}
@@ -1334,21 +1340,21 @@ struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
void request_timer_fn(unsigned long data)
{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_device *device = (struct drbd_device *) data;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
struct drbd_request *req; /* oldest request */
struct net_conf *nc;
unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
unsigned long now;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
- if (nc && mdev->state.conn >= C_WF_REPORT_PARAMS)
+ nc = rcu_dereference(connection->net_conf);
+ if (nc && device->state.conn >= C_WF_REPORT_PARAMS)
ent = nc->timeout * HZ/10 * nc->ko_count;
- if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */
- dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10;
- put_ldev(mdev);
+ if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
+ dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
+ put_ldev(device);
}
rcu_read_unlock();
@@ -1359,11 +1365,11 @@ void request_timer_fn(unsigned long data)
now = jiffies;
- spin_lock_irq(&tconn->req_lock);
- req = find_oldest_request(tconn);
+ spin_lock_irq(&device->resource->req_lock);
+ req = find_oldest_request(connection);
if (!req) {
- spin_unlock_irq(&tconn->req_lock);
- mod_timer(&mdev->request_timer, now + et);
+ spin_unlock_irq(&device->resource->req_lock);
+ mod_timer(&device->request_timer, now + et);
return;
}
@@ -1385,17 +1391,17 @@ void request_timer_fn(unsigned long data)
*/
if (ent && req->rq_state & RQ_NET_PENDING &&
time_after(now, req->start_time + ent) &&
- !time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) {
- dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
- _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
+ !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
+ drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n");
+ _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
}
- if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev &&
+ if (dt && req->rq_state & RQ_LOCAL_PENDING && req->device == device &&
time_after(now, req->start_time + dt) &&
- !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
- dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
- __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH);
+ !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
+ drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
+ __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
}
nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
- spin_unlock_irq(&tconn->req_lock);
- mod_timer(&mdev->request_timer, nt);
+ spin_unlock_irq(&connection->resource->req_lock);
+ mod_timer(&device->request_timer, nt);
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 28e15d91197a..c684c963538e 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -275,17 +275,17 @@ struct bio_and_error {
int error;
};
-extern void start_new_tl_epoch(struct drbd_tconn *tconn);
+extern void start_new_tl_epoch(struct drbd_connection *connection);
extern void drbd_req_destroy(struct kref *kref);
extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m);
-extern void complete_master_bio(struct drbd_conf *mdev,
+extern void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m);
extern void request_timer_fn(unsigned long data);
-extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
-extern void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
+extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
+extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
/* this is in drbd_main.c */
extern void drbd_restart_request(struct drbd_request *req);
@@ -294,14 +294,14 @@ extern void drbd_restart_request(struct drbd_request *req);
* outside the spinlock, e.g. when walking some list on cleanup. */
static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct bio_and_error m;
int rv;
/* __req_mod possibly frees req, do not touch req after that! */
rv = __req_mod(req, what, &m);
if (m.bio)
- complete_master_bio(mdev, &m);
+ complete_master_bio(device, &m);
return rv;
}
@@ -314,16 +314,16 @@ static inline int req_mod(struct drbd_request *req,
enum drbd_req_event what)
{
unsigned long flags;
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct bio_and_error m;
int rv;
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
rv = __req_mod(req, what, &m);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (m.bio)
- complete_master_bio(mdev, &m);
+ complete_master_bio(device, &m);
return rv;
}
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 216d47b7e88b..1a84345a3868 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -27,13 +27,12 @@
#include <linux/drbd_limits.h>
#include "drbd_int.h"
+#include "drbd_protocol.h"
#include "drbd_req.h"
-/* in drbd_main.c */
-extern void tl_abort_disk_io(struct drbd_conf *mdev);
-
struct after_state_chg_work {
struct drbd_work w;
+ struct drbd_device *device;
union drbd_state os;
union drbd_state ns;
enum chg_state_flags flags;
@@ -50,12 +49,12 @@ enum sanitize_state_warnings {
};
static int w_after_state_ch(struct drbd_work *w, int unused);
-static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+static void after_state_ch(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum chg_state_flags flags);
-static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
-static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_tconn *);
+static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state);
+static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_connection *);
static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
-static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
+static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns,
enum sanitize_state_warnings *warn);
static inline bool is_susp(union drbd_state s)
@@ -63,17 +62,18 @@ static inline bool is_susp(union drbd_state s)
return s.susp || s.susp_nod || s.susp_fen;
}
-bool conn_all_vols_unconf(struct drbd_tconn *tconn)
+bool conn_all_vols_unconf(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
bool rv = true;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (mdev->state.disk != D_DISKLESS ||
- mdev->state.conn != C_STANDALONE ||
- mdev->state.role != R_SECONDARY) {
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ if (device->state.disk != D_DISKLESS ||
+ device->state.conn != C_STANDALONE ||
+ device->state.role != R_SECONDARY) {
rv = false;
break;
}
@@ -102,99 +102,111 @@ static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
return R_PRIMARY;
}
-enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
+enum drbd_role conn_highest_role(struct drbd_connection *connection)
{
enum drbd_role role = R_UNKNOWN;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- role = max_role(role, mdev->state.role);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ role = max_role(role, device->state.role);
+ }
rcu_read_unlock();
return role;
}
-enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
+enum drbd_role conn_highest_peer(struct drbd_connection *connection)
{
enum drbd_role peer = R_UNKNOWN;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- peer = max_role(peer, mdev->state.peer);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ peer = max_role(peer, device->state.peer);
+ }
rcu_read_unlock();
return peer;
}
-enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
+enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection)
{
enum drbd_disk_state ds = D_DISKLESS;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- ds = max_t(enum drbd_disk_state, ds, mdev->state.disk);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ ds = max_t(enum drbd_disk_state, ds, device->state.disk);
+ }
rcu_read_unlock();
return ds;
}
-enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn)
+enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection)
{
enum drbd_disk_state ds = D_MASK;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- ds = min_t(enum drbd_disk_state, ds, mdev->state.disk);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ ds = min_t(enum drbd_disk_state, ds, device->state.disk);
+ }
rcu_read_unlock();
return ds;
}
-enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
+enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection)
{
enum drbd_disk_state ds = D_DISKLESS;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- ds = max_t(enum drbd_disk_state, ds, mdev->state.pdsk);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ ds = max_t(enum drbd_disk_state, ds, device->state.pdsk);
+ }
rcu_read_unlock();
return ds;
}
-enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn)
+enum drbd_conns conn_lowest_conn(struct drbd_connection *connection)
{
enum drbd_conns conn = C_MASK;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- conn = min_t(enum drbd_conns, conn, mdev->state.conn);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ conn = min_t(enum drbd_conns, conn, device->state.conn);
+ }
rcu_read_unlock();
return conn;
}
-static bool no_peer_wf_report_params(struct drbd_tconn *tconn)
+static bool no_peer_wf_report_params(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
bool rv = true;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- if (mdev->state.conn == C_WF_REPORT_PARAMS) {
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ if (peer_device->device->state.conn == C_WF_REPORT_PARAMS) {
rv = false;
break;
}
@@ -206,11 +218,11 @@ static bool no_peer_wf_report_params(struct drbd_tconn *tconn)
/**
* cl_wide_st_chg() - true if the state change is a cluster wide one
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @os: old (current) state.
* @ns: new (wanted) state.
*/
-static int cl_wide_st_chg(struct drbd_conf *mdev,
+static int cl_wide_st_chg(struct drbd_device *device,
union drbd_state os, union drbd_state ns)
{
return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
@@ -232,72 +244,72 @@ apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
}
enum drbd_state_rv
-drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
+drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
union drbd_state mask, union drbd_state val)
{
unsigned long flags;
union drbd_state ns;
enum drbd_state_rv rv;
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- ns = apply_mask_val(drbd_read_state(mdev), mask, val);
- rv = _drbd_set_state(mdev, ns, f, NULL);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ ns = apply_mask_val(drbd_read_state(device), mask, val);
+ rv = _drbd_set_state(device, ns, f, NULL);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
return rv;
}
/**
* drbd_force_state() - Impose a change which happens outside our control on our state
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
*/
-void drbd_force_state(struct drbd_conf *mdev,
+void drbd_force_state(struct drbd_device *device,
union drbd_state mask, union drbd_state val)
{
- drbd_change_state(mdev, CS_HARD, mask, val);
+ drbd_change_state(device, CS_HARD, mask, val);
}
static enum drbd_state_rv
-_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
+_req_st_cond(struct drbd_device *device, union drbd_state mask,
union drbd_state val)
{
union drbd_state os, ns;
unsigned long flags;
enum drbd_state_rv rv;
- if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
+ if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &device->flags))
return SS_CW_SUCCESS;
- if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
+ if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
return SS_CW_FAILED_BY_PEER;
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- os = drbd_read_state(mdev);
- ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ os = drbd_read_state(device);
+ ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
rv = is_valid_transition(os, ns);
if (rv >= SS_SUCCESS)
rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
- if (!cl_wide_st_chg(mdev, os, ns))
+ if (!cl_wide_st_chg(device, os, ns))
rv = SS_CW_NO_NEED;
if (rv == SS_UNKNOWN_ERROR) {
- rv = is_valid_state(mdev, ns);
+ rv = is_valid_state(device, ns);
if (rv >= SS_SUCCESS) {
- rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
if (rv >= SS_SUCCESS)
rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
}
}
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
return rv;
}
/**
* drbd_req_state() - Perform an eventually cluster wide state change
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
* @f: flags
@@ -306,7 +318,7 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
* _drbd_request_state().
*/
static enum drbd_state_rv
-drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
+drbd_req_state(struct drbd_device *device, union drbd_state mask,
union drbd_state val, enum chg_state_flags f)
{
struct completion done;
@@ -317,68 +329,68 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
init_completion(&done);
if (f & CS_SERIALIZE)
- mutex_lock(mdev->state_mutex);
+ mutex_lock(device->state_mutex);
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- os = drbd_read_state(mdev);
- ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ os = drbd_read_state(device);
+ ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
rv = is_valid_transition(os, ns);
if (rv < SS_SUCCESS) {
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
goto abort;
}
- if (cl_wide_st_chg(mdev, os, ns)) {
- rv = is_valid_state(mdev, ns);
+ if (cl_wide_st_chg(device, os, ns)) {
+ rv = is_valid_state(device, ns);
if (rv == SS_SUCCESS)
- rv = is_valid_soft_transition(os, ns, mdev->tconn);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (rv < SS_SUCCESS) {
if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
+ print_st_err(device, os, ns, rv);
goto abort;
}
- if (drbd_send_state_req(mdev, mask, val)) {
+ if (drbd_send_state_req(first_peer_device(device), mask, val)) {
rv = SS_CW_FAILED_BY_PEER;
if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
+ print_st_err(device, os, ns, rv);
goto abort;
}
- wait_event(mdev->state_wait,
- (rv = _req_st_cond(mdev, mask, val)));
+ wait_event(device->state_wait,
+ (rv = _req_st_cond(device, mask, val)));
if (rv < SS_SUCCESS) {
if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
+ print_st_err(device, os, ns, rv);
goto abort;
}
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- ns = apply_mask_val(drbd_read_state(mdev), mask, val);
- rv = _drbd_set_state(mdev, ns, f, &done);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ ns = apply_mask_val(drbd_read_state(device), mask, val);
+ rv = _drbd_set_state(device, ns, f, &done);
} else {
- rv = _drbd_set_state(mdev, ns, f, &done);
+ rv = _drbd_set_state(device, ns, f, &done);
}
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
- D_ASSERT(current != mdev->tconn->worker.task);
+ D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
wait_for_completion(&done);
}
abort:
if (f & CS_SERIALIZE)
- mutex_unlock(mdev->state_mutex);
+ mutex_unlock(device->state_mutex);
return rv;
}
/**
* _drbd_request_state() - Request a state change (with flags)
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
* @f: flags
@@ -387,20 +399,20 @@ abort:
* flag, or when logging of failed state change requests is not desired.
*/
enum drbd_state_rv
-_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
+_drbd_request_state(struct drbd_device *device, union drbd_state mask,
union drbd_state val, enum chg_state_flags f)
{
enum drbd_state_rv rv;
- wait_event(mdev->state_wait,
- (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
+ wait_event(device->state_wait,
+ (rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE);
return rv;
}
-static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
+static void print_st(struct drbd_device *device, char *name, union drbd_state ns)
{
- dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
+ drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
name,
drbd_conn_str(ns.conn),
drbd_role_str(ns.role),
@@ -416,14 +428,14 @@ static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
);
}
-void print_st_err(struct drbd_conf *mdev, union drbd_state os,
+void print_st_err(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum drbd_state_rv err)
{
if (err == SS_IN_TRANSIENT_STATE)
return;
- dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
- print_st(mdev, " state", os);
- print_st(mdev, "wanted", ns);
+ drbd_err(device, "State change failed: %s\n", drbd_set_st_err_str(err));
+ print_st(device, " state", os);
+ print_st(device, "wanted", ns);
}
static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
@@ -457,7 +469,7 @@ static long print_state_change(char *pb, union drbd_state os, union drbd_state n
return pbp - pb;
}
-static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns,
+static void drbd_pr_state_change(struct drbd_device *device, union drbd_state os, union drbd_state ns,
enum chg_state_flags flags)
{
char pb[300];
@@ -479,10 +491,10 @@ static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, un
ns.user_isp);
if (pbp != pb)
- dev_info(DEV, "%s\n", pb);
+ drbd_info(device, "%s\n", pb);
}
-static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, union drbd_state ns,
+static void conn_pr_state_change(struct drbd_connection *connection, union drbd_state os, union drbd_state ns,
enum chg_state_flags flags)
{
char pb[300];
@@ -496,17 +508,17 @@ static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os,
is_susp(ns));
if (pbp != pb)
- conn_info(tconn, "%s\n", pb);
+ drbd_info(connection, "%s\n", pb);
}
/**
* is_valid_state() - Returns an SS_ error code if ns is not valid
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @ns: State to consider.
*/
static enum drbd_state_rv
-is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
+is_valid_state(struct drbd_device *device, union drbd_state ns)
{
/* See drbd_state_sw_errors in drbd_strings.c */
@@ -516,24 +528,24 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
rcu_read_lock();
fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ fp = rcu_dereference(device->ldev->disk_conf)->fencing;
+ put_ldev(device);
}
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
if (nc) {
if (!nc->two_primaries && ns.role == R_PRIMARY) {
if (ns.peer == R_PRIMARY)
rv = SS_TWO_PRIMARIES;
- else if (conn_highest_peer(mdev->tconn) == R_PRIMARY)
+ else if (conn_highest_peer(first_peer_device(device)->connection) == R_PRIMARY)
rv = SS_O_VOL_PEER_PRI;
}
}
if (rv <= 0)
/* already found a reason to abort */;
- else if (ns.role == R_SECONDARY && mdev->open_cnt)
+ else if (ns.role == R_SECONDARY && device->open_cnt)
rv = SS_DEVICE_IN_USE;
else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
@@ -567,7 +579,7 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
rv = SS_NO_VERIFY_ALG;
else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
- mdev->tconn->agreed_pro_version < 88)
+ first_peer_device(device)->connection->agreed_pro_version < 88)
rv = SS_NOT_SUPPORTED;
else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
@@ -589,12 +601,12 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
* is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
* This function limits state transitions that may be declined by DRBD. I.e.
* user requests (aka soft transitions).
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @ns: new state.
* @os: old state.
*/
static enum drbd_state_rv
-is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_tconn *tconn)
+is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_connection *connection)
{
enum drbd_state_rv rv = SS_SUCCESS;
@@ -622,7 +634,7 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_t
/* While establishing a connection only allow cstate to change.
Delay/refuse role changes, detach attach etc... */
- if (test_bit(STATE_SENT, &tconn->flags) &&
+ if (test_bit(STATE_SENT, &connection->flags) &&
!(os.conn == C_WF_REPORT_PARAMS ||
(ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
rv = SS_IN_TRANSIENT_STATE;
@@ -703,7 +715,7 @@ is_valid_transition(union drbd_state os, union drbd_state ns)
return rv;
}
-static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
+static void print_sanitize_warnings(struct drbd_device *device, enum sanitize_state_warnings warn)
{
static const char *msg_table[] = {
[NO_WARNING] = "",
@@ -715,12 +727,12 @@ static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_
};
if (warn != NO_WARNING)
- dev_warn(DEV, "%s\n", msg_table[warn]);
+ drbd_warn(device, "%s\n", msg_table[warn]);
}
/**
* sanitize_state() - Resolves implicitly necessary additional changes to a state transition
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @os: old state.
* @ns: new state.
* @warn_sync_abort:
@@ -728,7 +740,7 @@ static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_
* When we loose connection, we have to set the state of the peers disk (pdsk)
* to D_UNKNOWN. This rule and many more along those lines are in this function.
*/
-static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
+static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns,
enum sanitize_state_warnings *warn)
{
enum drbd_fencing_p fp;
@@ -738,11 +750,11 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
*warn = NO_WARNING;
fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
+ if (get_ldev(device)) {
rcu_read_lock();
- fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
+ fp = rcu_dereference(device->ldev->disk_conf)->fencing;
rcu_read_unlock();
- put_ldev(mdev);
+ put_ldev(device);
}
/* Implications from connection to peer and peer_isp */
@@ -768,17 +780,17 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
/* Connection breaks down before we finished "Negotiating" */
if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
- get_ldev_if_state(mdev, D_NEGOTIATING)) {
- if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
- ns.disk = mdev->new_state_tmp.disk;
- ns.pdsk = mdev->new_state_tmp.pdsk;
+ get_ldev_if_state(device, D_NEGOTIATING)) {
+ if (device->ed_uuid == device->ldev->md.uuid[UI_CURRENT]) {
+ ns.disk = device->new_state_tmp.disk;
+ ns.pdsk = device->new_state_tmp.pdsk;
} else {
if (warn)
*warn = CONNECTION_LOST_NEGOTIATING;
ns.disk = D_DISKLESS;
ns.pdsk = D_UNKNOWN;
}
- put_ldev(mdev);
+ put_ldev(device);
}
/* D_CONSISTENT and D_OUTDATED vanish when we get connected */
@@ -873,7 +885,7 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
(ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
- if (mdev->tconn->res_opts.on_no_data == OND_SUSPEND_IO &&
+ if (device->resource->res_opts.on_no_data == OND_SUSPEND_IO &&
(ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
@@ -892,42 +904,42 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
return ns;
}
-void drbd_resume_al(struct drbd_conf *mdev)
+void drbd_resume_al(struct drbd_device *device)
{
- if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
- dev_info(DEV, "Resumed AL updates\n");
+ if (test_and_clear_bit(AL_SUSPENDED, &device->flags))
+ drbd_info(device, "Resumed AL updates\n");
}
/* helper for __drbd_set_state */
-static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
+static void set_ov_position(struct drbd_device *device, enum drbd_conns cs)
{
- if (mdev->tconn->agreed_pro_version < 90)
- mdev->ov_start_sector = 0;
- mdev->rs_total = drbd_bm_bits(mdev);
- mdev->ov_position = 0;
+ if (first_peer_device(device)->connection->agreed_pro_version < 90)
+ device->ov_start_sector = 0;
+ device->rs_total = drbd_bm_bits(device);
+ device->ov_position = 0;
if (cs == C_VERIFY_T) {
/* starting online verify from an arbitrary position
* does not fit well into the existing protocol.
* on C_VERIFY_T, we initialize ov_left and friends
* implicitly in receive_DataRequest once the
* first P_OV_REQUEST is received */
- mdev->ov_start_sector = ~(sector_t)0;
+ device->ov_start_sector = ~(sector_t)0;
} else {
- unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
- if (bit >= mdev->rs_total) {
- mdev->ov_start_sector =
- BM_BIT_TO_SECT(mdev->rs_total - 1);
- mdev->rs_total = 1;
+ unsigned long bit = BM_SECT_TO_BIT(device->ov_start_sector);
+ if (bit >= device->rs_total) {
+ device->ov_start_sector =
+ BM_BIT_TO_SECT(device->rs_total - 1);
+ device->rs_total = 1;
} else
- mdev->rs_total -= bit;
- mdev->ov_position = mdev->ov_start_sector;
+ device->rs_total -= bit;
+ device->ov_position = device->ov_start_sector;
}
- mdev->ov_left = mdev->rs_total;
+ device->ov_left = device->rs_total;
}
/**
* __drbd_set_state() - Set a new DRBD state
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @ns: new state.
* @flags: Flags
* @done: Optional completion, that will get completed after the after_state_ch() finished
@@ -935,7 +947,7 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
* Caller needs to hold req_lock, and global_state_lock. Do not call directly.
*/
enum drbd_state_rv
-__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
+__drbd_set_state(struct drbd_device *device, union drbd_state ns,
enum chg_state_flags flags, struct completion *done)
{
union drbd_state os;
@@ -944,9 +956,9 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
struct after_state_chg_work *ascw;
bool did_remote, should_do_remote;
- os = drbd_read_state(mdev);
+ os = drbd_read_state(device);
- ns = sanitize_state(mdev, ns, &ssw);
+ ns = sanitize_state(device, ns, &ssw);
if (ns.i == os.i)
return SS_NOTHING_TO_DO;
@@ -958,32 +970,33 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
/* pre-state-change checks ; only look at ns */
/* See drbd_state_sw_errors in drbd_strings.c */
- rv = is_valid_state(mdev, ns);
+ rv = is_valid_state(device, ns);
if (rv < SS_SUCCESS) {
/* If the old state was illegal as well, then let
this happen...*/
- if (is_valid_state(mdev, os) == rv)
- rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ if (is_valid_state(device, os) == rv)
+ rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
} else
- rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
}
if (rv < SS_SUCCESS) {
if (flags & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
+ print_st_err(device, os, ns, rv);
return rv;
}
- print_sanitize_warnings(mdev, ssw);
+ print_sanitize_warnings(device, ssw);
- drbd_pr_state_change(mdev, os, ns, flags);
+ drbd_pr_state_change(device, os, ns, flags);
/* Display changes to the susp* flags that where caused by the call to
sanitize_state(). Only display it here if we where not called from
_conn_request_state() */
if (!(flags & CS_DC_SUSP))
- conn_pr_state_change(mdev->tconn, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP);
+ conn_pr_state_change(first_peer_device(device)->connection, os, ns,
+ (flags & ~CS_DC_MASK) | CS_DC_SUSP);
/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
* on the ldev here, to be sure the transition -> D_DISKLESS resp.
@@ -991,55 +1004,55 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
* after_state_ch works run, where we put_ldev again. */
if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
(os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
- atomic_inc(&mdev->local_cnt);
+ atomic_inc(&device->local_cnt);
- did_remote = drbd_should_do_remote(mdev->state);
- mdev->state.i = ns.i;
- should_do_remote = drbd_should_do_remote(mdev->state);
- mdev->tconn->susp = ns.susp;
- mdev->tconn->susp_nod = ns.susp_nod;
- mdev->tconn->susp_fen = ns.susp_fen;
+ did_remote = drbd_should_do_remote(device->state);
+ device->state.i = ns.i;
+ should_do_remote = drbd_should_do_remote(device->state);
+ device->resource->susp = ns.susp;
+ device->resource->susp_nod = ns.susp_nod;
+ device->resource->susp_fen = ns.susp_fen;
/* put replicated vs not-replicated requests in seperate epochs */
if (did_remote != should_do_remote)
- start_new_tl_epoch(mdev->tconn);
+ start_new_tl_epoch(first_peer_device(device)->connection);
if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
- drbd_print_uuids(mdev, "attached to UUIDs");
+ drbd_print_uuids(device, "attached to UUIDs");
/* Wake up role changes, that were delayed because of connection establishing */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
- no_peer_wf_report_params(mdev->tconn))
- clear_bit(STATE_SENT, &mdev->tconn->flags);
+ no_peer_wf_report_params(first_peer_device(device)->connection))
+ clear_bit(STATE_SENT, &first_peer_device(device)->connection->flags);
- wake_up(&mdev->misc_wait);
- wake_up(&mdev->state_wait);
- wake_up(&mdev->tconn->ping_wait);
+ wake_up(&device->misc_wait);
+ wake_up(&device->state_wait);
+ wake_up(&first_peer_device(device)->connection->ping_wait);
/* Aborted verify run, or we reached the stop sector.
* Log the last position, unless end-of-device. */
if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
ns.conn <= C_CONNECTED) {
- mdev->ov_start_sector =
- BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
- if (mdev->ov_left)
- dev_info(DEV, "Online Verify reached sector %llu\n",
- (unsigned long long)mdev->ov_start_sector);
+ device->ov_start_sector =
+ BM_BIT_TO_SECT(drbd_bm_bits(device) - device->ov_left);
+ if (device->ov_left)
+ drbd_info(device, "Online Verify reached sector %llu\n",
+ (unsigned long long)device->ov_start_sector);
}
if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
(ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
- dev_info(DEV, "Syncer continues.\n");
- mdev->rs_paused += (long)jiffies
- -(long)mdev->rs_mark_time[mdev->rs_last_mark];
+ drbd_info(device, "Syncer continues.\n");
+ device->rs_paused += (long)jiffies
+ -(long)device->rs_mark_time[device->rs_last_mark];
if (ns.conn == C_SYNC_TARGET)
- mod_timer(&mdev->resync_timer, jiffies);
+ mod_timer(&device->resync_timer, jiffies);
}
if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
(ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
- dev_info(DEV, "Resync suspended\n");
- mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
+ drbd_info(device, "Resync suspended\n");
+ device->rs_mark_time[device->rs_last_mark] = jiffies;
}
if (os.conn == C_CONNECTED &&
@@ -1047,77 +1060,77 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
unsigned long now = jiffies;
int i;
- set_ov_position(mdev, ns.conn);
- mdev->rs_start = now;
- mdev->rs_last_events = 0;
- mdev->rs_last_sect_ev = 0;
- mdev->ov_last_oos_size = 0;
- mdev->ov_last_oos_start = 0;
+ set_ov_position(device, ns.conn);
+ device->rs_start = now;
+ device->rs_last_events = 0;
+ device->rs_last_sect_ev = 0;
+ device->ov_last_oos_size = 0;
+ device->ov_last_oos_start = 0;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = mdev->ov_left;
- mdev->rs_mark_time[i] = now;
+ device->rs_mark_left[i] = device->ov_left;
+ device->rs_mark_time[i] = now;
}
- drbd_rs_controller_reset(mdev);
+ drbd_rs_controller_reset(device);
if (ns.conn == C_VERIFY_S) {
- dev_info(DEV, "Starting Online Verify from sector %llu\n",
- (unsigned long long)mdev->ov_position);
- mod_timer(&mdev->resync_timer, jiffies);
+ drbd_info(device, "Starting Online Verify from sector %llu\n",
+ (unsigned long long)device->ov_position);
+ mod_timer(&device->resync_timer, jiffies);
}
}
- if (get_ldev(mdev)) {
- u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
+ if (get_ldev(device)) {
+ u32 mdf = device->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
mdf &= ~MDF_AL_CLEAN;
- if (test_bit(CRASHED_PRIMARY, &mdev->flags))
+ if (test_bit(CRASHED_PRIMARY, &device->flags))
mdf |= MDF_CRASHED_PRIMARY;
- if (mdev->state.role == R_PRIMARY ||
- (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
+ if (device->state.role == R_PRIMARY ||
+ (device->state.pdsk < D_INCONSISTENT && device->state.peer == R_PRIMARY))
mdf |= MDF_PRIMARY_IND;
- if (mdev->state.conn > C_WF_REPORT_PARAMS)
+ if (device->state.conn > C_WF_REPORT_PARAMS)
mdf |= MDF_CONNECTED_IND;
- if (mdev->state.disk > D_INCONSISTENT)
+ if (device->state.disk > D_INCONSISTENT)
mdf |= MDF_CONSISTENT;
- if (mdev->state.disk > D_OUTDATED)
+ if (device->state.disk > D_OUTDATED)
mdf |= MDF_WAS_UP_TO_DATE;
- if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
+ if (device->state.pdsk <= D_OUTDATED && device->state.pdsk >= D_INCONSISTENT)
mdf |= MDF_PEER_OUT_DATED;
- if (mdf != mdev->ldev->md.flags) {
- mdev->ldev->md.flags = mdf;
- drbd_md_mark_dirty(mdev);
+ if (mdf != device->ldev->md.flags) {
+ device->ldev->md.flags = mdf;
+ drbd_md_mark_dirty(device);
}
if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
- drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
- put_ldev(mdev);
+ drbd_set_ed_uuid(device, device->ldev->md.uuid[UI_CURRENT]);
+ put_ldev(device);
}
/* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
- set_bit(CONSIDER_RESYNC, &mdev->flags);
+ set_bit(CONSIDER_RESYNC, &device->flags);
/* Receiver should clean up itself */
if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
- drbd_thread_stop_nowait(&mdev->tconn->receiver);
+ drbd_thread_stop_nowait(&first_peer_device(device)->connection->receiver);
/* Now the receiver finished cleaning up itself, it should die */
if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
- drbd_thread_stop_nowait(&mdev->tconn->receiver);
+ drbd_thread_stop_nowait(&first_peer_device(device)->connection->receiver);
/* Upon network failure, we need to restart the receiver. */
if (os.conn > C_WF_CONNECTION &&
ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
- drbd_thread_restart_nowait(&mdev->tconn->receiver);
+ drbd_thread_restart_nowait(&first_peer_device(device)->connection->receiver);
/* Resume AL writing if we get a connection */
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
- drbd_resume_al(mdev);
- mdev->tconn->connect_cnt++;
+ drbd_resume_al(device);
+ first_peer_device(device)->connection->connect_cnt++;
}
/* remember last attach time so request_timer_fn() won't
@@ -1125,7 +1138,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
* previously frozen IO */
if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
ns.disk > D_NEGOTIATING)
- mdev->last_reattach_jif = jiffies;
+ device->last_reattach_jif = jiffies;
ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
if (ascw) {
@@ -1133,11 +1146,12 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
ascw->ns = ns;
ascw->flags = flags;
ascw->w.cb = w_after_state_ch;
- ascw->w.mdev = mdev;
+ ascw->device = device;
ascw->done = done;
- drbd_queue_work(&mdev->tconn->sender_work, &ascw->w);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &ascw->w);
} else {
- dev_err(DEV, "Could not kmalloc an ascw\n");
+ drbd_err(device, "Could not kmalloc an ascw\n");
}
return rv;
@@ -1147,66 +1161,65 @@ static int w_after_state_ch(struct drbd_work *w, int unused)
{
struct after_state_chg_work *ascw =
container_of(w, struct after_state_chg_work, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device = ascw->device;
- after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
- if (ascw->flags & CS_WAIT_COMPLETE) {
- D_ASSERT(ascw->done != NULL);
+ after_state_ch(device, ascw->os, ascw->ns, ascw->flags);
+ if (ascw->flags & CS_WAIT_COMPLETE)
complete(ascw->done);
- }
kfree(ascw);
return 0;
}
-static void abw_start_sync(struct drbd_conf *mdev, int rv)
+static void abw_start_sync(struct drbd_device *device, int rv)
{
if (rv) {
- dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
- _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
+ drbd_err(device, "Writing the bitmap failed not starting resync.\n");
+ _drbd_request_state(device, NS(conn, C_CONNECTED), CS_VERBOSE);
return;
}
- switch (mdev->state.conn) {
+ switch (device->state.conn) {
case C_STARTING_SYNC_T:
- _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
+ _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
break;
case C_STARTING_SYNC_S:
- drbd_start_resync(mdev, C_SYNC_SOURCE);
+ drbd_start_resync(device, C_SYNC_SOURCE);
break;
}
}
-int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
+int drbd_bitmap_io_from_worker(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags)
{
int rv;
- D_ASSERT(current == mdev->tconn->worker.task);
+ D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
- /* open coded non-blocking drbd_suspend_io(mdev); */
- set_bit(SUSPEND_IO, &mdev->flags);
+ /* open coded non-blocking drbd_suspend_io(device); */
+ set_bit(SUSPEND_IO, &device->flags);
- drbd_bm_lock(mdev, why, flags);
- rv = io_fn(mdev);
- drbd_bm_unlock(mdev);
+ drbd_bm_lock(device, why, flags);
+ rv = io_fn(device);
+ drbd_bm_unlock(device);
- drbd_resume_io(mdev);
+ drbd_resume_io(device);
return rv;
}
/**
* after_state_ch() - Perform after state change actions that may sleep
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @os: old state.
* @ns: new state.
* @flags: Flags
*/
-static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+static void after_state_ch(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum chg_state_flags flags)
{
+ struct drbd_resource *resource = device->resource;
struct sib_info sib;
sib.sib_reason = SIB_STATE_CHANGE;
@@ -1214,63 +1227,63 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
sib.ns = ns;
if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
- clear_bit(CRASHED_PRIMARY, &mdev->flags);
- if (mdev->p_uuid)
- mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
+ clear_bit(CRASHED_PRIMARY, &device->flags);
+ if (device->p_uuid)
+ device->p_uuid[UI_FLAGS] &= ~((u64)2);
}
/* Inform userspace about the change... */
- drbd_bcast_event(mdev, &sib);
+ drbd_bcast_event(device, &sib);
if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
(ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
- drbd_khelper(mdev, "pri-on-incon-degr");
+ drbd_khelper(device, "pri-on-incon-degr");
/* Here we have the actions that are performed after a
state change. This function might sleep */
if (ns.susp_nod) {
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
enum drbd_req_event what = NOTHING;
- spin_lock_irq(&tconn->req_lock);
- if (os.conn < C_CONNECTED && conn_lowest_conn(tconn) >= C_CONNECTED)
+ spin_lock_irq(&device->resource->req_lock);
+ if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
what = RESEND;
if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
- conn_lowest_disk(tconn) > D_NEGOTIATING)
+ conn_lowest_disk(connection) > D_NEGOTIATING)
what = RESTART_FROZEN_DISK_IO;
- if (tconn->susp_nod && what != NOTHING) {
- _tl_restart(tconn, what);
- _conn_request_state(tconn,
+ if (resource->susp_nod && what != NOTHING) {
+ _tl_restart(connection, what);
+ _conn_request_state(connection,
(union drbd_state) { { .susp_nod = 1 } },
(union drbd_state) { { .susp_nod = 0 } },
CS_VERBOSE);
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
}
if (ns.susp_fen) {
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
- spin_lock_irq(&tconn->req_lock);
- if (tconn->susp_fen && conn_lowest_conn(tconn) >= C_CONNECTED) {
+ spin_lock_irq(&device->resource->req_lock);
+ if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
/* case2: The connection was established again: */
- struct drbd_conf *odev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, odev, vnr)
- clear_bit(NEW_CUR_UUID, &odev->flags);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ clear_bit(NEW_CUR_UUID, &peer_device->device->flags);
rcu_read_unlock();
- _tl_restart(tconn, RESEND);
- _conn_request_state(tconn,
+ _tl_restart(connection, RESEND);
+ _conn_request_state(connection,
(union drbd_state) { { .susp_fen = 1 } },
(union drbd_state) { { .susp_fen = 0 } },
CS_VERBOSE);
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
}
/* Became sync source. With protocol >= 96, we still need to send out
@@ -1279,9 +1292,9 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
* which is unexpected. */
if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
(ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
- mdev->tconn->agreed_pro_version >= 96 && get_ldev(mdev)) {
- drbd_gen_and_send_sync_uuid(mdev);
- put_ldev(mdev);
+ first_peer_device(device)->connection->agreed_pro_version >= 96 && get_ldev(device)) {
+ drbd_gen_and_send_sync_uuid(first_peer_device(device));
+ put_ldev(device);
}
/* Do not change the order of the if above and the two below... */
@@ -1289,20 +1302,20 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
ns.pdsk > D_DISKLESS && ns.pdsk != D_UNKNOWN) { /* attach on the peer */
/* we probably will start a resync soon.
* make sure those things are properly reset. */
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- atomic_set(&mdev->rs_pending_cnt, 0);
- drbd_rs_cancel_all(mdev);
+ device->rs_total = 0;
+ device->rs_failed = 0;
+ atomic_set(&device->rs_pending_cnt, 0);
+ drbd_rs_cancel_all(device);
- drbd_send_uuids(mdev);
- drbd_send_state(mdev, ns);
+ drbd_send_uuids(first_peer_device(device));
+ drbd_send_state(first_peer_device(device), ns);
}
/* No point in queuing send_bitmap if we don't have a connection
* anymore, so check also the _current_ state, not only the new state
* at the time this work was queued. */
if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
- mdev->state.conn == C_WF_BITMAP_S)
- drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
+ device->state.conn == C_WF_BITMAP_S)
+ drbd_queue_bitmap_io(device, &drbd_send_bitmap, NULL,
"send_bitmap (WFBitMapS)",
BM_LOCKED_TEST_ALLOWED);
@@ -1313,80 +1326,80 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
&& (ns.pdsk < D_INCONSISTENT ||
ns.pdsk == D_UNKNOWN ||
ns.pdsk == D_OUTDATED)) {
- if (get_ldev(mdev)) {
+ if (get_ldev(device)) {
if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
- mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
- if (drbd_suspended(mdev)) {
- set_bit(NEW_CUR_UUID, &mdev->flags);
+ device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
+ if (drbd_suspended(device)) {
+ set_bit(NEW_CUR_UUID, &device->flags);
} else {
- drbd_uuid_new_current(mdev);
- drbd_send_uuids(mdev);
+ drbd_uuid_new_current(device);
+ drbd_send_uuids(first_peer_device(device));
}
}
- put_ldev(mdev);
+ put_ldev(device);
}
}
- if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
+ if (ns.pdsk < D_INCONSISTENT && get_ldev(device)) {
if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
- mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
- drbd_uuid_new_current(mdev);
- drbd_send_uuids(mdev);
+ device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
+ drbd_uuid_new_current(device);
+ drbd_send_uuids(first_peer_device(device));
}
/* D_DISKLESS Peer becomes secondary */
if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
/* We may still be Primary ourselves.
* No harm done if the bitmap still changes,
* redirtied pages will follow later. */
- drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+ drbd_bitmap_io_from_worker(device, &drbd_bm_write,
"demote diskless peer", BM_LOCKED_SET_ALLOWED);
- put_ldev(mdev);
+ put_ldev(device);
}
/* Write out all changed bits on demote.
* Though, no need to da that just yet
* if there is a resync going on still */
if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
- mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
+ device->state.conn <= C_CONNECTED && get_ldev(device)) {
/* No changes to the bitmap expected this time, so assert that,
* even though no harm was done if it did change. */
- drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+ drbd_bitmap_io_from_worker(device, &drbd_bm_write,
"demote", BM_LOCKED_TEST_ALLOWED);
- put_ldev(mdev);
+ put_ldev(device);
}
/* Last part of the attaching process ... */
if (ns.conn >= C_CONNECTED &&
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
- drbd_send_sizes(mdev, 0, 0); /* to start sync... */
- drbd_send_uuids(mdev);
- drbd_send_state(mdev, ns);
+ drbd_send_sizes(first_peer_device(device), 0, 0); /* to start sync... */
+ drbd_send_uuids(first_peer_device(device));
+ drbd_send_state(first_peer_device(device), ns);
}
/* We want to pause/continue resync, tell peer. */
if (ns.conn >= C_CONNECTED &&
((os.aftr_isp != ns.aftr_isp) ||
(os.user_isp != ns.user_isp)))
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
/* In case one of the isp bits got set, suspend other devices. */
if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
(ns.aftr_isp || ns.peer_isp || ns.user_isp))
- suspend_other_sg(mdev);
+ suspend_other_sg(device);
/* Make sure the peer gets informed about eventual state
changes (ISP bits) while we were in WFReportParams. */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
/* We are in the progress to start a full sync... */
if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
(os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
/* no other bitmap changes expected during this phase */
- drbd_queue_bitmap_io(mdev,
+ drbd_queue_bitmap_io(device,
&drbd_bmio_set_n_write, &abw_start_sync,
"set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
@@ -1399,15 +1412,15 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
* our cleanup here with the transition to D_DISKLESS.
* But is is still not save to dreference ldev here, since
* we might come from an failed Attach before ldev was set. */
- if (mdev->ldev) {
+ if (device->ldev) {
rcu_read_lock();
- eh = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
+ eh = rcu_dereference(device->ldev->disk_conf)->on_io_error;
rcu_read_unlock();
- was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+ was_io_error = test_and_clear_bit(WAS_IO_ERROR, &device->flags);
if (was_io_error && eh == EP_CALL_HELPER)
- drbd_khelper(mdev, "local-io-error");
+ drbd_khelper(device, "local-io-error");
/* Immediately allow completion of all application IO,
* that waits for completion from the local disk,
@@ -1422,76 +1435,76 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
* So aborting local requests may cause crashes,
* or even worse, silent data corruption.
*/
- if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
- tl_abort_disk_io(mdev);
+ if (test_and_clear_bit(FORCE_DETACH, &device->flags))
+ tl_abort_disk_io(device);
/* current state still has to be D_FAILED,
* there is only one way out: to D_DISKLESS,
* and that may only happen after our put_ldev below. */
- if (mdev->state.disk != D_FAILED)
- dev_err(DEV,
+ if (device->state.disk != D_FAILED)
+ drbd_err(device,
"ASSERT FAILED: disk is %s during detach\n",
- drbd_disk_str(mdev->state.disk));
+ drbd_disk_str(device->state.disk));
if (ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
- drbd_rs_cancel_all(mdev);
+ drbd_rs_cancel_all(device);
/* In case we want to get something to stable storage still,
* this may be the last chance.
* Following put_ldev may transition to D_DISKLESS. */
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
}
- put_ldev(mdev);
+ put_ldev(device);
}
- /* second half of local IO error, failure to attach,
- * or administrative detach,
- * after local_cnt references have reached zero again */
- if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
- /* We must still be diskless,
- * re-attach has to be serialized with this! */
- if (mdev->state.disk != D_DISKLESS)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s while going diskless\n",
- drbd_disk_str(mdev->state.disk));
+ /* second half of local IO error, failure to attach,
+ * or administrative detach,
+ * after local_cnt references have reached zero again */
+ if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
+ /* We must still be diskless,
+ * re-attach has to be serialized with this! */
+ if (device->state.disk != D_DISKLESS)
+ drbd_err(device,
+ "ASSERT FAILED: disk is %s while going diskless\n",
+ drbd_disk_str(device->state.disk));
if (ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
/* corresponding get_ldev in __drbd_set_state
* this may finally trigger drbd_ldev_destroy. */
- put_ldev(mdev);
+ put_ldev(device);
}
/* Notify peer that I had a local IO error, and did not detached.. */
if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
/* Disks got bigger while they were detached */
if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
- test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
+ test_and_clear_bit(RESYNC_AFTER_NEG, &device->flags)) {
if (ns.conn == C_CONNECTED)
- resync_after_online_grow(mdev);
+ resync_after_online_grow(device);
}
/* A resync finished or aborted, wake paused devices... */
if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
(os.peer_isp && !ns.peer_isp) ||
(os.user_isp && !ns.user_isp))
- resume_next_sg(mdev);
+ resume_next_sg(device);
/* sync target done with resync. Explicitly notify peer, even though
* it should (at least for non-empty resyncs) already know itself. */
if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
/* Verify finished, or reached stop sector. Peer did not know about
* the stop sector, and we may even have changed the stop sector during
* verify to interrupt/stop early. Send the new state. */
if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED
- && verify_can_do_stop_sector(mdev))
- drbd_send_state(mdev, ns);
+ && verify_can_do_stop_sector(device))
+ drbd_send_state(first_peer_device(device), ns);
/* This triggers bitmap writeout of potentially still unwritten pages
* if the resync finished cleanly, or aborted because of peer disk
@@ -1500,56 +1513,57 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
* any bitmap writeout anymore.
* No harm done if some bits change during this phase.
*/
- if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
- drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
+ if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(device)) {
+ drbd_queue_bitmap_io(device, &drbd_bm_write_copy_pages, NULL,
"write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
- put_ldev(mdev);
+ put_ldev(device);
}
if (ns.disk == D_DISKLESS &&
ns.conn == C_STANDALONE &&
ns.role == R_SECONDARY) {
if (os.aftr_isp != ns.aftr_isp)
- resume_next_sg(mdev);
+ resume_next_sg(device);
}
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
}
struct after_conn_state_chg_work {
struct drbd_work w;
enum drbd_conns oc;
union drbd_state ns_min;
- union drbd_state ns_max; /* new, max state, over all mdevs */
+ union drbd_state ns_max; /* new, max state, over all devices */
enum chg_state_flags flags;
+ struct drbd_connection *connection;
};
static int w_after_conn_state_ch(struct drbd_work *w, int unused)
{
struct after_conn_state_chg_work *acscw =
container_of(w, struct after_conn_state_chg_work, w);
- struct drbd_tconn *tconn = w->tconn;
+ struct drbd_connection *connection = acscw->connection;
enum drbd_conns oc = acscw->oc;
union drbd_state ns_max = acscw->ns_max;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
kfree(acscw);
/* Upon network configuration, we need to start the receiver */
if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
- drbd_thread_start(&tconn->receiver);
+ drbd_thread_start(&connection->receiver);
if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
struct net_conf *old_conf;
- mutex_lock(&tconn->conf_update);
- old_conf = tconn->net_conf;
- tconn->my_addr_len = 0;
- tconn->peer_addr_len = 0;
- rcu_assign_pointer(tconn->net_conf, NULL);
- conn_free_crypto(tconn);
- mutex_unlock(&tconn->conf_update);
+ mutex_lock(&connection->resource->conf_update);
+ old_conf = connection->net_conf;
+ connection->my_addr_len = 0;
+ connection->peer_addr_len = 0;
+ rcu_assign_pointer(connection->net_conf, NULL);
+ conn_free_crypto(connection);
+ mutex_unlock(&connection->resource->conf_update);
synchronize_rcu();
kfree(old_conf);
@@ -1559,45 +1573,47 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
/* case1: The outdate peer handler is successful: */
if (ns_max.pdsk <= D_OUTDATED) {
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
- drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ if (test_bit(NEW_CUR_UUID, &device->flags)) {
+ drbd_uuid_new_current(device);
+ clear_bit(NEW_CUR_UUID, &device->flags);
}
}
rcu_read_unlock();
- spin_lock_irq(&tconn->req_lock);
- _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
- _conn_request_state(tconn,
+ spin_lock_irq(&connection->resource->req_lock);
+ _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
+ _conn_request_state(connection,
(union drbd_state) { { .susp_fen = 1 } },
(union drbd_state) { { .susp_fen = 0 } },
CS_VERBOSE);
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
}
}
- kref_put(&tconn->kref, &conn_destroy);
+ kref_put(&connection->kref, drbd_destroy_connection);
- conn_md_sync(tconn);
+ conn_md_sync(connection);
return 0;
}
-void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf)
+void conn_old_common_state(struct drbd_connection *connection, union drbd_state *pcs, enum chg_state_flags *pf)
{
enum chg_state_flags flags = ~0;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr, first_vol = 1;
union drbd_dev_state os, cs = {
{ .role = R_SECONDARY,
.peer = R_UNKNOWN,
- .conn = tconn->cstate,
+ .conn = connection->cstate,
.disk = D_DISKLESS,
.pdsk = D_UNKNOWN,
} };
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- os = mdev->state;
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ os = device->state;
if (first_vol) {
cs = os;
@@ -1628,18 +1644,19 @@ void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum
}
static enum drbd_state_rv
-conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+conn_is_valid_transition(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags)
{
enum drbd_state_rv rv = SS_SUCCESS;
union drbd_state ns, os;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- os = drbd_read_state(mdev);
- ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ os = drbd_read_state(device);
+ ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
ns.disk = os.disk;
@@ -1648,30 +1665,29 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union
continue;
rv = is_valid_transition(os, ns);
- if (rv < SS_SUCCESS)
- break;
- if (!(flags & CS_HARD)) {
- rv = is_valid_state(mdev, ns);
+ if (rv >= SS_SUCCESS && !(flags & CS_HARD)) {
+ rv = is_valid_state(device, ns);
if (rv < SS_SUCCESS) {
- if (is_valid_state(mdev, os) == rv)
- rv = is_valid_soft_transition(os, ns, tconn);
+ if (is_valid_state(device, os) == rv)
+ rv = is_valid_soft_transition(os, ns, connection);
} else
- rv = is_valid_soft_transition(os, ns, tconn);
+ rv = is_valid_soft_transition(os, ns, connection);
}
- if (rv < SS_SUCCESS)
+
+ if (rv < SS_SUCCESS) {
+ if (flags & CS_VERBOSE)
+ print_st_err(device, os, ns, rv);
break;
+ }
}
rcu_read_unlock();
- if (rv < SS_SUCCESS && flags & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
-
return rv;
}
void
-conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+conn_set_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
{
union drbd_state ns, os, ns_max = { };
@@ -1682,7 +1698,7 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
.disk = D_MASK,
.pdsk = D_MASK
} };
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
enum drbd_state_rv rv;
int vnr, number_of_volumes = 0;
@@ -1690,27 +1706,28 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
/* remember last connect time so request_timer_fn() won't
* kill newly established sessions while we are still trying to thaw
* previously frozen IO */
- if (tconn->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
- tconn->last_reconnect_jif = jiffies;
+ if (connection->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
+ connection->last_reconnect_jif = jiffies;
- tconn->cstate = val.conn;
+ connection->cstate = val.conn;
}
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
number_of_volumes++;
- os = drbd_read_state(mdev);
+ os = drbd_read_state(device);
ns = apply_mask_val(os, mask, val);
- ns = sanitize_state(mdev, ns, NULL);
+ ns = sanitize_state(device, ns, NULL);
if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
ns.disk = os.disk;
- rv = __drbd_set_state(mdev, ns, flags, NULL);
+ rv = __drbd_set_state(device, ns, flags, NULL);
if (rv < SS_SUCCESS)
BUG();
- ns.i = mdev->state.i;
+ ns.i = device->state.i;
ns_max.role = max_role(ns.role, ns_max.role);
ns_max.peer = max_role(ns.peer, ns_max.peer);
ns_max.conn = max_t(enum drbd_conns, ns.conn, ns_max.conn);
@@ -1735,39 +1752,39 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
} };
}
- ns_min.susp = ns_max.susp = tconn->susp;
- ns_min.susp_nod = ns_max.susp_nod = tconn->susp_nod;
- ns_min.susp_fen = ns_max.susp_fen = tconn->susp_fen;
+ ns_min.susp = ns_max.susp = connection->resource->susp;
+ ns_min.susp_nod = ns_max.susp_nod = connection->resource->susp_nod;
+ ns_min.susp_fen = ns_max.susp_fen = connection->resource->susp_fen;
*pns_min = ns_min;
*pns_max = ns_max;
}
static enum drbd_state_rv
-_conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
+_conn_rq_cond(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
{
enum drbd_state_rv rv;
- if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags))
+ if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags))
return SS_CW_SUCCESS;
- if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
+ if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags))
return SS_CW_FAILED_BY_PEER;
- rv = conn_is_valid_transition(tconn, mask, val, 0);
- if (rv == SS_SUCCESS && tconn->cstate == C_WF_REPORT_PARAMS)
+ rv = conn_is_valid_transition(connection, mask, val, 0);
+ if (rv == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS)
rv = SS_UNKNOWN_ERROR; /* continue waiting */
return rv;
}
enum drbd_state_rv
-_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags)
{
enum drbd_state_rv rv = SS_SUCCESS;
struct after_conn_state_chg_work *acscw;
- enum drbd_conns oc = tconn->cstate;
+ enum drbd_conns oc = connection->cstate;
union drbd_state ns_max, ns_min, os;
bool have_mutex = false;
@@ -1777,7 +1794,7 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
goto abort;
}
- rv = conn_is_valid_transition(tconn, mask, val, flags);
+ rv = conn_is_valid_transition(connection, mask, val, flags);
if (rv < SS_SUCCESS)
goto abort;
@@ -1787,38 +1804,38 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
/* This will be a cluster-wide state change.
* Need to give up the spinlock, grab the mutex,
* then send the state change request, ... */
- spin_unlock_irq(&tconn->req_lock);
- mutex_lock(&tconn->cstate_mutex);
+ spin_unlock_irq(&connection->resource->req_lock);
+ mutex_lock(&connection->cstate_mutex);
have_mutex = true;
- set_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
- if (conn_send_state_req(tconn, mask, val)) {
+ set_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
+ if (conn_send_state_req(connection, mask, val)) {
/* sending failed. */
- clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
rv = SS_CW_FAILED_BY_PEER;
/* need to re-aquire the spin lock, though */
goto abort_unlocked;
}
if (val.conn == C_DISCONNECTING)
- set_bit(DISCONNECT_SENT, &tconn->flags);
+ set_bit(DISCONNECT_SENT, &connection->flags);
/* ... and re-aquire the spinlock.
* If _conn_rq_cond() returned >= SS_SUCCESS, we must call
* conn_set_state() within the same spinlock. */
- spin_lock_irq(&tconn->req_lock);
- wait_event_lock_irq(tconn->ping_wait,
- (rv = _conn_rq_cond(tconn, mask, val)),
- tconn->req_lock);
- clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ spin_lock_irq(&connection->resource->req_lock);
+ wait_event_lock_irq(connection->ping_wait,
+ (rv = _conn_rq_cond(connection, mask, val)),
+ connection->resource->req_lock);
+ clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
if (rv < SS_SUCCESS)
goto abort;
}
- conn_old_common_state(tconn, &os, &flags);
+ conn_old_common_state(connection, &os, &flags);
flags |= CS_DC_SUSP;
- conn_set_state(tconn, mask, val, &ns_min, &ns_max, flags);
- conn_pr_state_change(tconn, os, ns_max, flags);
+ conn_set_state(connection, mask, val, &ns_min, &ns_max, flags);
+ conn_pr_state_change(connection, os, ns_max, flags);
acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
if (acscw) {
@@ -1827,39 +1844,39 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
acscw->ns_max = ns_max;
acscw->flags = flags;
acscw->w.cb = w_after_conn_state_ch;
- kref_get(&tconn->kref);
- acscw->w.tconn = tconn;
- drbd_queue_work(&tconn->sender_work, &acscw->w);
+ kref_get(&connection->kref);
+ acscw->connection = connection;
+ drbd_queue_work(&connection->sender_work, &acscw->w);
} else {
- conn_err(tconn, "Could not kmalloc an acscw\n");
+ drbd_err(connection, "Could not kmalloc an acscw\n");
}
abort:
if (have_mutex) {
/* mutex_unlock() "... must not be used in interrupt context.",
* so give up the spinlock, then re-aquire it */
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
abort_unlocked:
- mutex_unlock(&tconn->cstate_mutex);
- spin_lock_irq(&tconn->req_lock);
+ mutex_unlock(&connection->cstate_mutex);
+ spin_lock_irq(&connection->resource->req_lock);
}
if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
- conn_err(tconn, "State change failed: %s\n", drbd_set_st_err_str(rv));
- conn_err(tconn, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
- conn_err(tconn, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
+ drbd_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
+ drbd_err(connection, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
+ drbd_err(connection, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
}
return rv;
}
enum drbd_state_rv
-conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags)
{
enum drbd_state_rv rv;
- spin_lock_irq(&tconn->req_lock);
- rv = _conn_request_state(tconn, mask, val, flags);
- spin_unlock_irq(&tconn->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
+ rv = _conn_request_state(connection, mask, val, flags);
+ spin_unlock_irq(&connection->resource->req_lock);
return rv;
}
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
index a3c361bbc4b6..cc41605ba21c 100644
--- a/drivers/block/drbd/drbd_state.h
+++ b/drivers/block/drbd/drbd_state.h
@@ -1,8 +1,8 @@
#ifndef DRBD_STATE_H
#define DRBD_STATE_H
-struct drbd_conf;
-struct drbd_tconn;
+struct drbd_device;
+struct drbd_connection;
/**
* DOC: DRBD State macros
@@ -107,36 +107,36 @@ union drbd_dev_state {
unsigned int i;
};
-extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
+extern enum drbd_state_rv drbd_change_state(struct drbd_device *device,
enum chg_state_flags f,
union drbd_state mask,
union drbd_state val);
-extern void drbd_force_state(struct drbd_conf *, union drbd_state,
+extern void drbd_force_state(struct drbd_device *, union drbd_state,
union drbd_state);
-extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
+extern enum drbd_state_rv _drbd_request_state(struct drbd_device *,
union drbd_state,
union drbd_state,
enum chg_state_flags);
-extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
+extern enum drbd_state_rv __drbd_set_state(struct drbd_device *, union drbd_state,
enum chg_state_flags,
struct completion *done);
-extern void print_st_err(struct drbd_conf *, union drbd_state,
+extern void print_st_err(struct drbd_device *, union drbd_state,
union drbd_state, int);
enum drbd_state_rv
-_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags);
enum drbd_state_rv
-conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags);
-extern void drbd_resume_al(struct drbd_conf *mdev);
-extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
+extern void drbd_resume_al(struct drbd_device *device);
+extern bool conn_all_vols_unconf(struct drbd_connection *connection);
/**
* drbd_request_state() - Reqest a state change
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
*
@@ -144,18 +144,18 @@ extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
* quite verbose in case the state change is not possible, and all those
* state changes are globally serialized.
*/
-static inline int drbd_request_state(struct drbd_conf *mdev,
+static inline int drbd_request_state(struct drbd_device *device,
union drbd_state mask,
union drbd_state val)
{
- return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
+ return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED);
}
-enum drbd_role conn_highest_role(struct drbd_tconn *tconn);
-enum drbd_role conn_highest_peer(struct drbd_tconn *tconn);
-enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn);
-enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn);
-enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn);
-enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn);
+enum drbd_role conn_highest_role(struct drbd_connection *connection);
+enum drbd_role conn_highest_peer(struct drbd_connection *connection);
+enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection);
+enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection);
+enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection);
+enum drbd_conns conn_lowest_conn(struct drbd_connection *connection);
#endif
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 58e08ff2b2ce..80b0f63c7075 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -24,6 +24,7 @@
*/
#include <linux/drbd.h>
+#include "drbd_strings.h"
static const char *drbd_conn_s_names[] = {
[C_STANDALONE] = "StandAlone",
diff --git a/drivers/block/drbd/drbd_strings.h b/drivers/block/drbd/drbd_strings.h
new file mode 100644
index 000000000000..f9923cc88afb
--- /dev/null
+++ b/drivers/block/drbd/drbd_strings.h
@@ -0,0 +1,9 @@
+#ifndef __DRBD_STRINGS_H
+#define __DRBD_STRINGS_H
+
+extern const char *drbd_conn_str(enum drbd_conns);
+extern const char *drbd_role_str(enum drbd_role);
+extern const char *drbd_disk_str(enum drbd_disk_state);
+extern const char *drbd_set_st_err_str(enum drbd_state_rv);
+
+#endif /* __DRBD_STRINGS_H */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 84d3175d493a..2c4ce42c3657 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -21,7 +21,7 @@
along with drbd; see the file COPYING. If not, write to
the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- */
+*/
#include <linux/module.h>
#include <linux/drbd.h>
@@ -36,10 +36,11 @@
#include <linux/scatterlist.h>
#include "drbd_int.h"
+#include "drbd_protocol.h"
#include "drbd_req.h"
-static int w_make_ov_request(struct drbd_work *w, int cancel);
-
+static int make_ov_request(struct drbd_device *, int);
+static int make_resync_request(struct drbd_device *, int);
/* endio handlers:
* drbd_md_io_complete (defined here)
@@ -67,10 +68,10 @@ rwlock_t global_state_lock;
void drbd_md_io_complete(struct bio *bio, int error)
{
struct drbd_md_io *md_io;
- struct drbd_conf *mdev;
+ struct drbd_device *device;
md_io = (struct drbd_md_io *)bio->bi_private;
- mdev = container_of(md_io, struct drbd_conf, md_io);
+ device = container_of(md_io, struct drbd_device, md_io);
md_io->error = error;
@@ -83,35 +84,36 @@ void drbd_md_io_complete(struct bio *bio, int error)
* Make sure we first drop the reference, and only then signal
* completion, or we may (in drbd_al_read_log()) cycle so fast into the
* next drbd_md_sync_page_io(), that we trigger the
- * ASSERT(atomic_read(&mdev->md_io_in_use) == 1) there.
+ * ASSERT(atomic_read(&device->md_io_in_use) == 1) there.
*/
- drbd_md_put_buffer(mdev);
+ drbd_md_put_buffer(device);
md_io->done = 1;
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
bio_put(bio);
- if (mdev->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */
- put_ldev(mdev);
+ if (device->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */
+ put_ldev(device);
}
/* reads on behalf of the partner,
* "submitted" by the receiver
*/
-void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
+static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
- struct drbd_conf *mdev = peer_req->w.mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- mdev->read_cnt += peer_req->i.size >> 9;
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ device->read_cnt += peer_req->i.size >> 9;
list_del(&peer_req->w.list);
- if (list_empty(&mdev->read_ee))
- wake_up(&mdev->ee_wait);
+ if (list_empty(&device->read_ee))
+ wake_up(&device->ee_wait);
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
- __drbd_chk_io_error(mdev, DRBD_READ_ERROR);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ __drbd_chk_io_error(device, DRBD_READ_ERROR);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
- drbd_queue_work(&mdev->tconn->sender_work, &peer_req->w);
- put_ldev(mdev);
+ drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w);
+ put_ldev(device);
}
/* writes on behalf of the partner, or resync writes,
@@ -119,7 +121,8 @@ void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(lo
static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
- struct drbd_conf *mdev = peer_req->w.mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
struct drbd_interval i;
int do_wake;
u64 block_id;
@@ -133,35 +136,35 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
block_id = peer_req->block_id;
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- mdev->writ_cnt += peer_req->i.size >> 9;
- list_move_tail(&peer_req->w.list, &mdev->done_ee);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ device->writ_cnt += peer_req->i.size >> 9;
+ list_move_tail(&peer_req->w.list, &device->done_ee);
/*
* Do not remove from the write_requests tree here: we did not send the
* Ack yet and did not wake possibly waiting conflicting requests.
* Removed from the tree from "drbd_process_done_ee" within the
- * appropriate w.cb (e_end_block/e_end_resync_block) or from
+ * appropriate dw.cb (e_end_block/e_end_resync_block) or from
* _drbd_clear_done_ee.
*/
- do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee);
+ do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee);
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
- __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (block_id == ID_SYNCER)
- drbd_rs_complete_io(mdev, i.sector);
+ drbd_rs_complete_io(device, i.sector);
if (do_wake)
- wake_up(&mdev->ee_wait);
+ wake_up(&device->ee_wait);
if (do_al_complete_io)
- drbd_al_complete_io(mdev, &i);
+ drbd_al_complete_io(device, &i);
- wake_asender(mdev->tconn);
- put_ldev(mdev);
+ wake_asender(peer_device->connection);
+ put_ldev(device);
}
/* writes on behalf of the partner, or resync writes,
@@ -170,17 +173,17 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
void drbd_peer_request_endio(struct bio *bio, int error)
{
struct drbd_peer_request *peer_req = bio->bi_private;
- struct drbd_conf *mdev = peer_req->w.mdev;
+ struct drbd_device *device = peer_req->peer_device->device;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE;
if (error && __ratelimit(&drbd_ratelimit_state))
- dev_warn(DEV, "%s: error=%d s=%llus\n",
+ drbd_warn(device, "%s: error=%d s=%llus\n",
is_write ? "write" : "read", error,
(unsigned long long)peer_req->i.sector);
if (!error && !uptodate) {
if (__ratelimit(&drbd_ratelimit_state))
- dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
+ drbd_warn(device, "%s: setting error to -EIO s=%llus\n",
is_write ? "write" : "read",
(unsigned long long)peer_req->i.sector);
/* strange behavior of some lower level drivers...
@@ -207,13 +210,13 @@ void drbd_request_endio(struct bio *bio, int error)
{
unsigned long flags;
struct drbd_request *req = bio->bi_private;
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct bio_and_error m;
enum drbd_req_event what;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
if (!error && !uptodate) {
- dev_warn(DEV, "p %s: setting error to -EIO\n",
+ drbd_warn(device, "p %s: setting error to -EIO\n",
bio_data_dir(bio) == WRITE ? "write" : "read");
/* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag,
@@ -252,7 +255,7 @@ void drbd_request_endio(struct bio *bio, int error)
*/
if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) {
if (__ratelimit(&drbd_ratelimit_state))
- dev_emerg(DEV, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
+ drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
if (!error)
panic("possible random memory corruption caused by delayed completion of aborted local request\n");
@@ -272,17 +275,16 @@ void drbd_request_endio(struct bio *bio, int error)
req->private_bio = ERR_PTR(error);
/* not req_mod(), we need irqsave here! */
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
__req_mod(req, what, &m);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
- put_ldev(mdev);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
+ put_ldev(device);
if (m.bio)
- complete_master_bio(mdev, &m);
+ complete_master_bio(device, &m);
}
-void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
- struct drbd_peer_request *peer_req, void *digest)
+void drbd_csum_ee(struct crypto_hash *tfm, struct drbd_peer_request *peer_req, void *digest)
{
struct hash_desc desc;
struct scatterlist sg;
@@ -309,7 +311,7 @@ void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
crypto_hash_final(&desc, digest);
}
-void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
+void drbd_csum_bio(struct crypto_hash *tfm, struct bio *bio, void *digest)
{
struct hash_desc desc;
struct scatterlist sg;
@@ -333,7 +335,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
static int w_e_send_csum(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
int digest_size;
void *digest;
int err = 0;
@@ -344,89 +347,92 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out;
- digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
+ digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
sector_t sector = peer_req->i.sector;
unsigned int size = peer_req->i.size;
- drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
+ drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
/* Free peer_req and pages before send.
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
* drbd_alloc_pages due to pp_in_use > max_buffers. */
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
peer_req = NULL;
- inc_rs_pending(mdev);
- err = drbd_send_drequest_csum(mdev, sector, size,
+ inc_rs_pending(device);
+ err = drbd_send_drequest_csum(peer_device, sector, size,
digest, digest_size,
P_CSUM_RS_REQUEST);
kfree(digest);
} else {
- dev_err(DEV, "kmalloc() of digest failed.\n");
+ drbd_err(device, "kmalloc() of digest failed.\n");
err = -ENOMEM;
}
out:
if (peer_req)
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
if (unlikely(err))
- dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
+ drbd_err(device, "drbd_send_drequest(..., csum) failed\n");
return err;
}
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
-static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
+static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
- if (!get_ldev(mdev))
+ if (!get_ldev(device))
return -EIO;
- if (drbd_rs_should_slow_down(mdev, sector))
+ if (drbd_rs_should_slow_down(device, sector))
goto defer;
/* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */
- peer_req = drbd_alloc_peer_req(mdev, ID_SYNCER /* unused */, sector,
+ peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector,
size, GFP_TRY);
if (!peer_req)
goto defer;
peer_req->w.cb = w_e_send_csum;
- spin_lock_irq(&mdev->tconn->req_lock);
- list_add(&peer_req->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ list_add(&peer_req->w.list, &device->read_ee);
+ spin_unlock_irq(&device->resource->req_lock);
- atomic_add(size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_peer_request(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
+ atomic_add(size >> 9, &device->rs_sect_ev);
+ if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
return 0;
/* If it failed because of ENOMEM, retry should help. If it failed
* because bio_add_page failed (probably broken lower level driver),
* retry may or may not help.
* If it does not, you may need to force disconnect. */
- spin_lock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
defer:
- put_ldev(mdev);
+ put_ldev(device);
return -EAGAIN;
}
int w_resync_timer(struct drbd_work *w, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
- switch (mdev->state.conn) {
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, resync_work);
+
+ switch (device->state.conn) {
case C_VERIFY_S:
- w_make_ov_request(w, cancel);
+ make_ov_request(device, cancel);
break;
case C_SYNC_TARGET:
- w_make_resync_request(w, cancel);
+ make_resync_request(device, cancel);
break;
}
@@ -435,10 +441,11 @@ int w_resync_timer(struct drbd_work *w, int cancel)
void resync_timer_fn(unsigned long data)
{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
+ struct drbd_device *device = (struct drbd_device *) data;
- if (list_empty(&mdev->resync_work.list))
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work);
+ if (list_empty(&device->resync_work.list))
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &device->resync_work);
}
static void fifo_set(struct fifo_buffer *fb, int value)
@@ -485,7 +492,7 @@ struct fifo_buffer *fifo_alloc(int fifo_size)
return fb;
}
-static int drbd_rs_controller(struct drbd_conf *mdev)
+static int drbd_rs_controller(struct drbd_device *device)
{
struct disk_conf *dc;
unsigned int sect_in; /* Number of sectors that came in since the last turn */
@@ -498,22 +505,22 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
int max_sect;
struct fifo_buffer *plan;
- sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
- mdev->rs_in_flight -= sect_in;
+ sect_in = atomic_xchg(&device->rs_sect_in, 0); /* Number of sectors that came in */
+ device->rs_in_flight -= sect_in;
- dc = rcu_dereference(mdev->ldev->disk_conf);
- plan = rcu_dereference(mdev->rs_plan_s);
+ dc = rcu_dereference(device->ldev->disk_conf);
+ plan = rcu_dereference(device->rs_plan_s);
steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
- if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
+ if (device->rs_in_flight + sect_in == 0) { /* At start of resync */
want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
} else { /* normal path */
want = dc->c_fill_target ? dc->c_fill_target :
sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10);
}
- correction = want - mdev->rs_in_flight - plan->total;
+ correction = want - device->rs_in_flight - plan->total;
/* Plan ahead */
cps = correction / steps;
@@ -533,25 +540,25 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
req_sect = max_sect;
/*
- dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
- sect_in, mdev->rs_in_flight, want, correction,
- steps, cps, mdev->rs_planed, curr_corr, req_sect);
+ drbd_warn(device, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
+ sect_in, device->rs_in_flight, want, correction,
+ steps, cps, device->rs_planed, curr_corr, req_sect);
*/
return req_sect;
}
-static int drbd_rs_number_requests(struct drbd_conf *mdev)
+static int drbd_rs_number_requests(struct drbd_device *device)
{
int number;
rcu_read_lock();
- if (rcu_dereference(mdev->rs_plan_s)->size) {
- number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
- mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
+ if (rcu_dereference(device->rs_plan_s)->size) {
+ number = drbd_rs_controller(device) >> (BM_BLOCK_SHIFT - 9);
+ device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
} else {
- mdev->c_sync_rate = rcu_dereference(mdev->ldev->disk_conf)->resync_rate;
- number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
+ device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate;
+ number = SLEEP_TIME * device->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
}
rcu_read_unlock();
@@ -560,12 +567,11 @@ static int drbd_rs_number_requests(struct drbd_conf *mdev)
return number;
}
-int w_make_resync_request(struct drbd_work *w, int cancel)
+static int make_resync_request(struct drbd_device *device, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
unsigned long bit;
sector_t sector;
- const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
+ const sector_t capacity = drbd_get_capacity(device->this_bdev);
int max_bio_size;
int number, rollback_i, size;
int align, queued, sndbuf;
@@ -574,61 +580,61 @@ int w_make_resync_request(struct drbd_work *w, int cancel)
if (unlikely(cancel))
return 0;
- if (mdev->rs_total == 0) {
+ if (device->rs_total == 0) {
/* empty resync? */
- drbd_resync_finished(mdev);
+ drbd_resync_finished(device);
return 0;
}
- if (!get_ldev(mdev)) {
- /* Since we only need to access mdev->rsync a
- get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
+ if (!get_ldev(device)) {
+ /* Since we only need to access device->rsync a
+ get_ldev_if_state(device,D_FAILED) would be sufficient, but
to continue resync with a broken disk makes no sense at
all */
- dev_err(DEV, "Disk broke down during resync!\n");
+ drbd_err(device, "Disk broke down during resync!\n");
return 0;
}
- max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
- number = drbd_rs_number_requests(mdev);
+ max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
+ number = drbd_rs_number_requests(device);
if (number == 0)
goto requeue;
for (i = 0; i < number; i++) {
/* Stop generating RS requests, when half of the send buffer is filled */
- mutex_lock(&mdev->tconn->data.mutex);
- if (mdev->tconn->data.socket) {
- queued = mdev->tconn->data.socket->sk->sk_wmem_queued;
- sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf;
+ mutex_lock(&first_peer_device(device)->connection->data.mutex);
+ if (first_peer_device(device)->connection->data.socket) {
+ queued = first_peer_device(device)->connection->data.socket->sk->sk_wmem_queued;
+ sndbuf = first_peer_device(device)->connection->data.socket->sk->sk_sndbuf;
} else {
queued = 1;
sndbuf = 0;
}
- mutex_unlock(&mdev->tconn->data.mutex);
+ mutex_unlock(&first_peer_device(device)->connection->data.mutex);
if (queued > sndbuf / 2)
goto requeue;
next_sector:
size = BM_BLOCK_SIZE;
- bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
+ bit = drbd_bm_find_next(device, device->bm_resync_fo);
if (bit == DRBD_END_OF_BITMAP) {
- mdev->bm_resync_fo = drbd_bm_bits(mdev);
- put_ldev(mdev);
+ device->bm_resync_fo = drbd_bm_bits(device);
+ put_ldev(device);
return 0;
}
sector = BM_BIT_TO_SECT(bit);
- if (drbd_rs_should_slow_down(mdev, sector) ||
- drbd_try_rs_begin_io(mdev, sector)) {
- mdev->bm_resync_fo = bit;
+ if (drbd_rs_should_slow_down(device, sector) ||
+ drbd_try_rs_begin_io(device, sector)) {
+ device->bm_resync_fo = bit;
goto requeue;
}
- mdev->bm_resync_fo = bit + 1;
+ device->bm_resync_fo = bit + 1;
- if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) {
- drbd_rs_complete_io(mdev, sector);
+ if (unlikely(drbd_bm_test_bit(device, bit) == 0)) {
+ drbd_rs_complete_io(device, sector);
goto next_sector;
}
@@ -657,7 +663,7 @@ next_sector:
* obscure reason; ( b == 0 ) would get the out-of-band
* only accidentally right because of the "oddly sized"
* adjustment below */
- if (drbd_bm_test_bit(mdev, bit+1) != 1)
+ if (drbd_bm_test_bit(device, bit+1) != 1)
break;
bit++;
size += BM_BLOCK_SIZE;
@@ -668,20 +674,21 @@ next_sector:
/* if we merged some,
* reset the offset to start the next drbd_bm_find_next from */
if (size > BM_BLOCK_SIZE)
- mdev->bm_resync_fo = bit + 1;
+ device->bm_resync_fo = bit + 1;
#endif
/* adjust very last sectors, in case we are oddly sized */
if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9;
- if (mdev->tconn->agreed_pro_version >= 89 && mdev->tconn->csums_tfm) {
- switch (read_for_csum(mdev, sector, size)) {
+ if (first_peer_device(device)->connection->agreed_pro_version >= 89 &&
+ first_peer_device(device)->connection->csums_tfm) {
+ switch (read_for_csum(first_peer_device(device), sector, size)) {
case -EIO: /* Disk failure */
- put_ldev(mdev);
+ put_ldev(device);
return -EIO;
case -EAGAIN: /* allocation failed, or ldev busy */
- drbd_rs_complete_io(mdev, sector);
- mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
+ drbd_rs_complete_io(device, sector);
+ device->bm_resync_fo = BM_SECT_TO_BIT(sector);
i = rollback_i;
goto requeue;
case 0:
@@ -693,50 +700,49 @@ next_sector:
} else {
int err;
- inc_rs_pending(mdev);
- err = drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
+ inc_rs_pending(device);
+ err = drbd_send_drequest(first_peer_device(device), P_RS_DATA_REQUEST,
sector, size, ID_SYNCER);
if (err) {
- dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
- dec_rs_pending(mdev);
- put_ldev(mdev);
+ drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
+ dec_rs_pending(device);
+ put_ldev(device);
return err;
}
}
}
- if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
+ if (device->bm_resync_fo >= drbd_bm_bits(device)) {
/* last syncer _request_ was sent,
* but the P_RS_DATA_REPLY not yet received. sync will end (and
* next sync group will resume), as soon as we receive the last
* resync data block, and the last bit is cleared.
* until then resync "work" is "inactive" ...
*/
- put_ldev(mdev);
+ put_ldev(device);
return 0;
}
requeue:
- mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
- mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
- put_ldev(mdev);
+ device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
+ mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
+ put_ldev(device);
return 0;
}
-static int w_make_ov_request(struct drbd_work *w, int cancel)
+static int make_ov_request(struct drbd_device *device, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
int number, i, size;
sector_t sector;
- const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
+ const sector_t capacity = drbd_get_capacity(device->this_bdev);
bool stop_sector_reached = false;
if (unlikely(cancel))
return 1;
- number = drbd_rs_number_requests(mdev);
+ number = drbd_rs_number_requests(device);
- sector = mdev->ov_position;
+ sector = device->ov_position;
for (i = 0; i < number; i++) {
if (sector >= capacity)
return 1;
@@ -745,116 +751,121 @@ static int w_make_ov_request(struct drbd_work *w, int cancel)
* w_e_end_ov_reply().
* We need to send at least one request out. */
stop_sector_reached = i > 0
- && verify_can_do_stop_sector(mdev)
- && sector >= mdev->ov_stop_sector;
+ && verify_can_do_stop_sector(device)
+ && sector >= device->ov_stop_sector;
if (stop_sector_reached)
break;
size = BM_BLOCK_SIZE;
- if (drbd_rs_should_slow_down(mdev, sector) ||
- drbd_try_rs_begin_io(mdev, sector)) {
- mdev->ov_position = sector;
+ if (drbd_rs_should_slow_down(device, sector) ||
+ drbd_try_rs_begin_io(device, sector)) {
+ device->ov_position = sector;
goto requeue;
}
if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9;
- inc_rs_pending(mdev);
- if (drbd_send_ov_request(mdev, sector, size)) {
- dec_rs_pending(mdev);
+ inc_rs_pending(device);
+ if (drbd_send_ov_request(first_peer_device(device), sector, size)) {
+ dec_rs_pending(device);
return 0;
}
sector += BM_SECT_PER_BIT;
}
- mdev->ov_position = sector;
+ device->ov_position = sector;
requeue:
- mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
+ device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
if (i == 0 || !stop_sector_reached)
- mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
+ mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
return 1;
}
int w_ov_finished(struct drbd_work *w, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
- kfree(w);
- ov_out_of_sync_print(mdev);
- drbd_resync_finished(mdev);
+ struct drbd_device_work *dw =
+ container_of(w, struct drbd_device_work, w);
+ struct drbd_device *device = dw->device;
+ kfree(dw);
+ ov_out_of_sync_print(device);
+ drbd_resync_finished(device);
return 0;
}
static int w_resync_finished(struct drbd_work *w, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
- kfree(w);
+ struct drbd_device_work *dw =
+ container_of(w, struct drbd_device_work, w);
+ struct drbd_device *device = dw->device;
+ kfree(dw);
- drbd_resync_finished(mdev);
+ drbd_resync_finished(device);
return 0;
}
-static void ping_peer(struct drbd_conf *mdev)
+static void ping_peer(struct drbd_device *device)
{
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
- clear_bit(GOT_PING_ACK, &tconn->flags);
- request_ping(tconn);
- wait_event(tconn->ping_wait,
- test_bit(GOT_PING_ACK, &tconn->flags) || mdev->state.conn < C_CONNECTED);
+ clear_bit(GOT_PING_ACK, &connection->flags);
+ request_ping(connection);
+ wait_event(connection->ping_wait,
+ test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED);
}
-int drbd_resync_finished(struct drbd_conf *mdev)
+int drbd_resync_finished(struct drbd_device *device)
{
unsigned long db, dt, dbdt;
unsigned long n_oos;
union drbd_state os, ns;
- struct drbd_work *w;
+ struct drbd_device_work *dw;
char *khelper_cmd = NULL;
int verify_done = 0;
/* Remove all elements from the resync LRU. Since future actions
* might set bits in the (main) bitmap, then the entries in the
* resync LRU would be wrong. */
- if (drbd_rs_del_all(mdev)) {
+ if (drbd_rs_del_all(device)) {
/* In case this is not possible now, most probably because
* there are P_RS_DATA_REPLY Packets lingering on the worker's
* queue (or even the read operations for those packets
* is not finished by now). Retry in 100ms. */
schedule_timeout_interruptible(HZ / 10);
- w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
- if (w) {
- w->cb = w_resync_finished;
- w->mdev = mdev;
- drbd_queue_work(&mdev->tconn->sender_work, w);
+ dw = kmalloc(sizeof(struct drbd_device_work), GFP_ATOMIC);
+ if (dw) {
+ dw->w.cb = w_resync_finished;
+ dw->device = device;
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &dw->w);
return 1;
}
- dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
+ drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n");
}
- dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
+ dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
if (dt <= 0)
dt = 1;
-
- db = mdev->rs_total;
+
+ db = device->rs_total;
/* adjust for verify start and stop sectors, respective reached position */
- if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
- db -= mdev->ov_left;
+ if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
+ db -= device->ov_left;
dbdt = Bit2KB(db/dt);
- mdev->rs_paused /= HZ;
+ device->rs_paused /= HZ;
- if (!get_ldev(mdev))
+ if (!get_ldev(device))
goto out;
- ping_peer(mdev);
+ ping_peer(device);
- spin_lock_irq(&mdev->tconn->req_lock);
- os = drbd_read_state(mdev);
+ spin_lock_irq(&device->resource->req_lock);
+ os = drbd_read_state(device);
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
@@ -866,41 +877,41 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ns = os;
ns.conn = C_CONNECTED;
- dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
+ drbd_info(device, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
verify_done ? "Online verify" : "Resync",
- dt + mdev->rs_paused, mdev->rs_paused, dbdt);
+ dt + device->rs_paused, device->rs_paused, dbdt);
- n_oos = drbd_bm_total_weight(mdev);
+ n_oos = drbd_bm_total_weight(device);
if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
if (n_oos) {
- dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
+ drbd_alert(device, "Online verify found %lu %dk block out of sync!\n",
n_oos, Bit2KB(1));
khelper_cmd = "out-of-sync";
}
} else {
- D_ASSERT((n_oos - mdev->rs_failed) == 0);
+ D_ASSERT(device, (n_oos - device->rs_failed) == 0);
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
khelper_cmd = "after-resync-target";
- if (mdev->tconn->csums_tfm && mdev->rs_total) {
- const unsigned long s = mdev->rs_same_csum;
- const unsigned long t = mdev->rs_total;
+ if (first_peer_device(device)->connection->csums_tfm && device->rs_total) {
+ const unsigned long s = device->rs_same_csum;
+ const unsigned long t = device->rs_total;
const int ratio =
(t == 0) ? 0 :
(t < 100000) ? ((s*100)/t) : (s/(t/100));
- dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; "
+ drbd_info(device, "%u %% had equal checksums, eliminated: %luK; "
"transferred %luK total %luK\n",
ratio,
- Bit2KB(mdev->rs_same_csum),
- Bit2KB(mdev->rs_total - mdev->rs_same_csum),
- Bit2KB(mdev->rs_total));
+ Bit2KB(device->rs_same_csum),
+ Bit2KB(device->rs_total - device->rs_same_csum),
+ Bit2KB(device->rs_total));
}
}
- if (mdev->rs_failed) {
- dev_info(DEV, " %lu failed blocks\n", mdev->rs_failed);
+ if (device->rs_failed) {
+ drbd_info(device, " %lu failed blocks\n", device->rs_failed);
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
ns.disk = D_INCONSISTENT;
@@ -914,179 +925,181 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ns.pdsk = D_UP_TO_DATE;
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
- if (mdev->p_uuid) {
+ if (device->p_uuid) {
int i;
for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
- _drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
- drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]);
- _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
+ _drbd_uuid_set(device, i, device->p_uuid[i]);
+ drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]);
+ _drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]);
} else {
- dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
+ drbd_err(device, "device->p_uuid is NULL! BUG\n");
}
}
if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
/* for verify runs, we don't update uuids here,
* so there would be nothing to report. */
- drbd_uuid_set_bm(mdev, 0UL);
- drbd_print_uuids(mdev, "updated UUIDs");
- if (mdev->p_uuid) {
+ drbd_uuid_set_bm(device, 0UL);
+ drbd_print_uuids(device, "updated UUIDs");
+ if (device->p_uuid) {
/* Now the two UUID sets are equal, update what we
* know of the peer. */
int i;
for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
- mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
+ device->p_uuid[i] = device->ldev->md.uuid[i];
}
}
}
- _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
+ _drbd_set_state(device, ns, CS_VERBOSE, NULL);
out_unlock:
- spin_unlock_irq(&mdev->tconn->req_lock);
- put_ldev(mdev);
+ spin_unlock_irq(&device->resource->req_lock);
+ put_ldev(device);
out:
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- mdev->rs_paused = 0;
+ device->rs_total = 0;
+ device->rs_failed = 0;
+ device->rs_paused = 0;
/* reset start sector, if we reached end of device */
- if (verify_done && mdev->ov_left == 0)
- mdev->ov_start_sector = 0;
+ if (verify_done && device->ov_left == 0)
+ device->ov_start_sector = 0;
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
if (khelper_cmd)
- drbd_khelper(mdev, khelper_cmd);
+ drbd_khelper(device, khelper_cmd);
return 1;
}
/* helper */
-static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
+static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
if (drbd_peer_req_has_active_page(peer_req)) {
/* This might happen if sendpage() has not finished */
int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
- atomic_add(i, &mdev->pp_in_use_by_net);
- atomic_sub(i, &mdev->pp_in_use);
- spin_lock_irq(&mdev->tconn->req_lock);
- list_add_tail(&peer_req->w.list, &mdev->net_ee);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ atomic_add(i, &device->pp_in_use_by_net);
+ atomic_sub(i, &device->pp_in_use);
+ spin_lock_irq(&device->resource->req_lock);
+ list_add_tail(&peer_req->w.list, &device->net_ee);
+ spin_unlock_irq(&device->resource->req_lock);
wake_up(&drbd_pp_wait);
} else
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
}
/**
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
int w_e_end_data_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(mdev, peer_req);
- dec_unacked(mdev);
+ drbd_free_peer_req(device, peer_req);
+ dec_unacked(device);
return 0;
}
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- err = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
+ err = drbd_send_block(peer_device, P_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
+ drbd_err(device, "Sending NegDReply. sector=%llus.\n",
(unsigned long long)peer_req->i.sector);
- err = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
+ err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
}
- dec_unacked(mdev);
+ dec_unacked(device);
- move_to_net_ee_or_free(mdev, peer_req);
+ move_to_net_ee_or_free(device, peer_req);
if (unlikely(err))
- dev_err(DEV, "drbd_send_block() failed\n");
+ drbd_err(device, "drbd_send_block() failed\n");
return err;
}
/**
* w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
- * @mdev: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(mdev, peer_req);
- dec_unacked(mdev);
+ drbd_free_peer_req(device, peer_req);
+ dec_unacked(device);
return 0;
}
- if (get_ldev_if_state(mdev, D_FAILED)) {
- drbd_rs_complete_io(mdev, peer_req->i.sector);
- put_ldev(mdev);
+ if (get_ldev_if_state(device, D_FAILED)) {
+ drbd_rs_complete_io(device, peer_req->i.sector);
+ put_ldev(device);
}
- if (mdev->state.conn == C_AHEAD) {
- err = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
+ if (device->state.conn == C_AHEAD) {
+ err = drbd_send_ack(peer_device, P_RS_CANCEL, peer_req);
} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
- inc_rs_pending(mdev);
- err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
+ if (likely(device->state.pdsk >= D_INCONSISTENT)) {
+ inc_rs_pending(device);
+ err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Not sending RSDataReply, "
+ drbd_err(device, "Not sending RSDataReply, "
"partner DISKLESS!\n");
err = 0;
}
} else {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
+ drbd_err(device, "Sending NegRSDReply. sector %llus.\n",
(unsigned long long)peer_req->i.sector);
- err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
+ err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
/* update resync data with failure */
- drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
+ drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size);
}
- dec_unacked(mdev);
+ dec_unacked(device);
- move_to_net_ee_or_free(mdev, peer_req);
+ move_to_net_ee_or_free(device, peer_req);
if (unlikely(err))
- dev_err(DEV, "drbd_send_block() failed\n");
+ drbd_err(device, "drbd_send_block() failed\n");
return err;
}
int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
struct digest_info *di;
int digest_size;
void *digest = NULL;
int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_peer_req(mdev, peer_req);
- dec_unacked(mdev);
+ drbd_free_peer_req(device, peer_req);
+ dec_unacked(device);
return 0;
}
- if (get_ldev(mdev)) {
- drbd_rs_complete_io(mdev, peer_req->i.sector);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ drbd_rs_complete_io(device, peer_req->i.sector);
+ put_ldev(device);
}
di = peer_req->digest;
@@ -1095,47 +1108,48 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
/* quick hack to try to avoid a race against reconfiguration.
* a real fix would be much more involved,
* introducing more locking mechanisms */
- if (mdev->tconn->csums_tfm) {
- digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
- D_ASSERT(digest_size == di->digest_size);
+ if (peer_device->connection->csums_tfm) {
+ digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
+ D_ASSERT(device, digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO);
}
if (digest) {
- drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
+ drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
if (eq) {
- drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
+ drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size);
/* rs_same_csums unit is BM_BLOCK_SIZE */
- mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
- err = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
+ device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
+ err = drbd_send_ack(peer_device, P_RS_IS_IN_SYNC, peer_req);
} else {
- inc_rs_pending(mdev);
+ inc_rs_pending(device);
peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
kfree(di);
- err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
+ err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
}
} else {
- err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
+ err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
+ drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
}
- dec_unacked(mdev);
- move_to_net_ee_or_free(mdev, peer_req);
+ dec_unacked(device);
+ move_to_net_ee_or_free(device, peer_req);
if (unlikely(err))
- dev_err(DEV, "drbd_send_block/ack() failed\n");
+ drbd_err(device, "drbd_send_block/ack() failed\n");
return err;
}
int w_e_end_ov_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
sector_t sector = peer_req->i.sector;
unsigned int size = peer_req->i.size;
int digest_size;
@@ -1145,7 +1159,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
if (unlikely(cancel))
goto out;
- digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
+ digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
err = 1; /* terminate the connection in case the allocation failed */
@@ -1153,7 +1167,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
}
if (likely(!(peer_req->flags & EE_WAS_ERROR)))
- drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
+ drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
else
memset(digest, 0, digest_size);
@@ -1162,36 +1176,37 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
* drbd_alloc_pages due to pp_in_use > max_buffers. */
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
peer_req = NULL;
- inc_rs_pending(mdev);
- err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY);
+ inc_rs_pending(device);
+ err = drbd_send_drequest_csum(peer_device, sector, size, digest, digest_size, P_OV_REPLY);
if (err)
- dec_rs_pending(mdev);
+ dec_rs_pending(device);
kfree(digest);
out:
if (peer_req)
- drbd_free_peer_req(mdev, peer_req);
- dec_unacked(mdev);
+ drbd_free_peer_req(device, peer_req);
+ dec_unacked(device);
return err;
}
-void drbd_ov_out_of_sync_found(struct drbd_conf *mdev, sector_t sector, int size)
+void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size)
{
- if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
- mdev->ov_last_oos_size += size>>9;
+ if (device->ov_last_oos_start + device->ov_last_oos_size == sector) {
+ device->ov_last_oos_size += size>>9;
} else {
- mdev->ov_last_oos_start = sector;
- mdev->ov_last_oos_size = size>>9;
+ device->ov_last_oos_start = sector;
+ device->ov_last_oos_size = size>>9;
}
- drbd_set_out_of_sync(mdev, sector, size);
+ drbd_set_out_of_sync(device, sector, size);
}
int w_e_end_ov_reply(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
struct digest_info *di;
void *digest;
sector_t sector = peer_req->i.sector;
@@ -1201,27 +1216,27 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
bool stop_sector_reached = false;
if (unlikely(cancel)) {
- drbd_free_peer_req(mdev, peer_req);
- dec_unacked(mdev);
+ drbd_free_peer_req(device, peer_req);
+ dec_unacked(device);
return 0;
}
/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
* the resync lru has been cleaned up already */
- if (get_ldev(mdev)) {
- drbd_rs_complete_io(mdev, peer_req->i.sector);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ drbd_rs_complete_io(device, peer_req->i.sector);
+ put_ldev(device);
}
di = peer_req->digest;
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
+ digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
+ drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
- D_ASSERT(digest_size == di->digest_size);
+ D_ASSERT(device, digest_size == di->digest_size);
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
@@ -1232,102 +1247,95 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
* drbd_alloc_pages due to pp_in_use > max_buffers. */
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
if (!eq)
- drbd_ov_out_of_sync_found(mdev, sector, size);
+ drbd_ov_out_of_sync_found(device, sector, size);
else
- ov_out_of_sync_print(mdev);
+ ov_out_of_sync_print(device);
- err = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
+ err = drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size,
eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
- dec_unacked(mdev);
+ dec_unacked(device);
- --mdev->ov_left;
+ --device->ov_left;
/* let's advance progress step marks only for every other megabyte */
- if ((mdev->ov_left & 0x200) == 0x200)
- drbd_advance_rs_marks(mdev, mdev->ov_left);
+ if ((device->ov_left & 0x200) == 0x200)
+ drbd_advance_rs_marks(device, device->ov_left);
- stop_sector_reached = verify_can_do_stop_sector(mdev) &&
- (sector + (size>>9)) >= mdev->ov_stop_sector;
+ stop_sector_reached = verify_can_do_stop_sector(device) &&
+ (sector + (size>>9)) >= device->ov_stop_sector;
- if (mdev->ov_left == 0 || stop_sector_reached) {
- ov_out_of_sync_print(mdev);
- drbd_resync_finished(mdev);
+ if (device->ov_left == 0 || stop_sector_reached) {
+ ov_out_of_sync_print(device);
+ drbd_resync_finished(device);
}
return err;
}
-int w_prev_work_done(struct drbd_work *w, int cancel)
-{
- struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
-
- complete(&b->done);
- return 0;
-}
-
/* FIXME
* We need to track the number of pending barrier acks,
* and to be able to wait for them.
* See also comment in drbd_adm_attach before drbd_suspend_io.
*/
-int drbd_send_barrier(struct drbd_tconn *tconn)
+static int drbd_send_barrier(struct drbd_connection *connection)
{
struct p_barrier *p;
struct drbd_socket *sock;
- sock = &tconn->data;
- p = conn_prepare_command(tconn, sock);
+ sock = &connection->data;
+ p = conn_prepare_command(connection, sock);
if (!p)
return -EIO;
- p->barrier = tconn->send.current_epoch_nr;
+ p->barrier = connection->send.current_epoch_nr;
p->pad = 0;
- tconn->send.current_epoch_writes = 0;
+ connection->send.current_epoch_writes = 0;
- return conn_send_command(tconn, sock, P_BARRIER, sizeof(*p), NULL, 0);
+ return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
}
int w_send_write_hint(struct drbd_work *w, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, unplug_work);
struct drbd_socket *sock;
if (cancel)
return 0;
- sock = &mdev->tconn->data;
- if (!drbd_prepare_command(mdev, sock))
+ sock = &first_peer_device(device)->connection->data;
+ if (!drbd_prepare_command(first_peer_device(device), sock))
return -EIO;
- return drbd_send_command(mdev, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
+ return drbd_send_command(first_peer_device(device), sock, P_UNPLUG_REMOTE, 0, NULL, 0);
}
-static void re_init_if_first_write(struct drbd_tconn *tconn, unsigned int epoch)
+static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
{
- if (!tconn->send.seen_any_write_yet) {
- tconn->send.seen_any_write_yet = true;
- tconn->send.current_epoch_nr = epoch;
- tconn->send.current_epoch_writes = 0;
+ if (!connection->send.seen_any_write_yet) {
+ connection->send.seen_any_write_yet = true;
+ connection->send.current_epoch_nr = epoch;
+ connection->send.current_epoch_writes = 0;
}
}
-static void maybe_send_barrier(struct drbd_tconn *tconn, unsigned int epoch)
+static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch)
{
/* re-init if first write on this connection */
- if (!tconn->send.seen_any_write_yet)
+ if (!connection->send.seen_any_write_yet)
return;
- if (tconn->send.current_epoch_nr != epoch) {
- if (tconn->send.current_epoch_writes)
- drbd_send_barrier(tconn);
- tconn->send.current_epoch_nr = epoch;
+ if (connection->send.current_epoch_nr != epoch) {
+ if (connection->send.current_epoch_writes)
+ drbd_send_barrier(connection);
+ connection->send.current_epoch_nr = epoch;
}
}
int w_send_out_of_sync(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- struct drbd_conf *mdev = w->mdev;
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_device *device = req->device;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
int err;
if (unlikely(cancel)) {
@@ -1335,13 +1343,13 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
return 0;
}
- /* this time, no tconn->send.current_epoch_writes++;
+ /* this time, no connection->send.current_epoch_writes++;
* If it was sent, it was the closing barrier for the last
* replicated epoch, before we went into AHEAD mode.
* No more barriers will be sent, until we leave AHEAD mode again. */
- maybe_send_barrier(tconn, req->epoch);
+ maybe_send_barrier(connection, req->epoch);
- err = drbd_send_out_of_sync(mdev, req);
+ err = drbd_send_out_of_sync(first_peer_device(device), req);
req_mod(req, OOS_HANDED_TO_NETWORK);
return err;
@@ -1349,15 +1357,14 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
/**
* w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
- * @mdev: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
int w_send_dblock(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- struct drbd_conf *mdev = w->mdev;
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_device *device = req->device;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
int err;
if (unlikely(cancel)) {
@@ -1365,11 +1372,11 @@ int w_send_dblock(struct drbd_work *w, int cancel)
return 0;
}
- re_init_if_first_write(tconn, req->epoch);
- maybe_send_barrier(tconn, req->epoch);
- tconn->send.current_epoch_writes++;
+ re_init_if_first_write(connection, req->epoch);
+ maybe_send_barrier(connection, req->epoch);
+ connection->send.current_epoch_writes++;
- err = drbd_send_dblock(mdev, req);
+ err = drbd_send_dblock(first_peer_device(device), req);
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
return err;
@@ -1377,15 +1384,14 @@ int w_send_dblock(struct drbd_work *w, int cancel)
/**
* w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
- * @mdev: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
int w_send_read_req(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- struct drbd_conf *mdev = w->mdev;
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_device *device = req->device;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
int err;
if (unlikely(cancel)) {
@@ -1395,9 +1401,9 @@ int w_send_read_req(struct drbd_work *w, int cancel)
/* Even read requests may close a write epoch,
* if there was any yet. */
- maybe_send_barrier(tconn, req->epoch);
+ maybe_send_barrier(connection, req->epoch);
- err = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size,
+ err = drbd_send_drequest(first_peer_device(device), P_DATA_REQUEST, req->i.sector, req->i.size,
(unsigned long)req);
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
@@ -1408,21 +1414,21 @@ int w_send_read_req(struct drbd_work *w, int cancel)
int w_restart_disk_io(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device = req->device;
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
- drbd_al_begin_io(mdev, &req->i, false);
+ drbd_al_begin_io(device, &req->i, false);
drbd_req_make_private_bio(req, req->master_bio);
- req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
+ req->private_bio->bi_bdev = device->ldev->backing_bdev;
generic_make_request(req->private_bio);
return 0;
}
-static int _drbd_may_sync_now(struct drbd_conf *mdev)
+static int _drbd_may_sync_now(struct drbd_device *device)
{
- struct drbd_conf *odev = mdev;
+ struct drbd_device *odev = device;
int resync_after;
while (1) {
@@ -1433,7 +1439,7 @@ static int _drbd_may_sync_now(struct drbd_conf *mdev)
rcu_read_unlock();
if (resync_after == -1)
return 1;
- odev = minor_to_mdev(resync_after);
+ odev = minor_to_device(resync_after);
if (!odev)
return 1;
if ((odev->state.conn >= C_SYNC_SOURCE &&
@@ -1446,17 +1452,17 @@ static int _drbd_may_sync_now(struct drbd_conf *mdev)
/**
* _drbd_pause_after() - Pause resync on all devices that may not resync now
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Called from process context only (admin command and after_state_ch).
*/
-static int _drbd_pause_after(struct drbd_conf *mdev)
+static int _drbd_pause_after(struct drbd_device *device)
{
- struct drbd_conf *odev;
+ struct drbd_device *odev;
int i, rv = 0;
rcu_read_lock();
- idr_for_each_entry(&minors, odev, i) {
+ idr_for_each_entry(&drbd_devices, odev, i) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (!_drbd_may_sync_now(odev))
@@ -1470,17 +1476,17 @@ static int _drbd_pause_after(struct drbd_conf *mdev)
/**
* _drbd_resume_next() - Resume resync on all devices that may resync now
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Called from process context only (admin command and worker).
*/
-static int _drbd_resume_next(struct drbd_conf *mdev)
+static int _drbd_resume_next(struct drbd_device *device)
{
- struct drbd_conf *odev;
+ struct drbd_device *odev;
int i, rv = 0;
rcu_read_lock();
- idr_for_each_entry(&minors, odev, i) {
+ idr_for_each_entry(&drbd_devices, odev, i) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (odev->state.aftr_isp) {
@@ -1494,24 +1500,24 @@ static int _drbd_resume_next(struct drbd_conf *mdev)
return rv;
}
-void resume_next_sg(struct drbd_conf *mdev)
+void resume_next_sg(struct drbd_device *device)
{
write_lock_irq(&global_state_lock);
- _drbd_resume_next(mdev);
+ _drbd_resume_next(device);
write_unlock_irq(&global_state_lock);
}
-void suspend_other_sg(struct drbd_conf *mdev)
+void suspend_other_sg(struct drbd_device *device)
{
write_lock_irq(&global_state_lock);
- _drbd_pause_after(mdev);
+ _drbd_pause_after(device);
write_unlock_irq(&global_state_lock);
}
/* caller must hold global_state_lock */
-enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
+enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor)
{
- struct drbd_conf *odev;
+ struct drbd_device *odev;
int resync_after;
if (o_minor == -1)
@@ -1520,9 +1526,9 @@ enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
return ERR_RESYNC_AFTER;
/* check for loops */
- odev = minor_to_mdev(o_minor);
+ odev = minor_to_device(o_minor);
while (1) {
- if (odev == mdev)
+ if (odev == device)
return ERR_RESYNC_AFTER_CYCLE;
/* You are free to depend on diskless, non-existing,
@@ -1542,35 +1548,35 @@ enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
return NO_ERROR;
/* follow the dependency chain */
- odev = minor_to_mdev(resync_after);
+ odev = minor_to_device(resync_after);
}
}
/* caller must hold global_state_lock */
-void drbd_resync_after_changed(struct drbd_conf *mdev)
+void drbd_resync_after_changed(struct drbd_device *device)
{
int changes;
do {
- changes = _drbd_pause_after(mdev);
- changes |= _drbd_resume_next(mdev);
+ changes = _drbd_pause_after(device);
+ changes |= _drbd_resume_next(device);
} while (changes);
}
-void drbd_rs_controller_reset(struct drbd_conf *mdev)
+void drbd_rs_controller_reset(struct drbd_device *device)
{
struct fifo_buffer *plan;
- atomic_set(&mdev->rs_sect_in, 0);
- atomic_set(&mdev->rs_sect_ev, 0);
- mdev->rs_in_flight = 0;
+ atomic_set(&device->rs_sect_in, 0);
+ atomic_set(&device->rs_sect_ev, 0);
+ device->rs_in_flight = 0;
/* Updating the RCU protected object in place is necessary since
this function gets called from atomic context.
It is valid since all other updates also lead to an completely
empty fifo */
rcu_read_lock();
- plan = rcu_dereference(mdev->rs_plan_s);
+ plan = rcu_dereference(device->rs_plan_s);
plan->total = 0;
fifo_set(plan, 0);
rcu_read_unlock();
@@ -1578,101 +1584,104 @@ void drbd_rs_controller_reset(struct drbd_conf *mdev)
void start_resync_timer_fn(unsigned long data)
{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
+ struct drbd_device *device = (struct drbd_device *) data;
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &device->start_resync_work);
}
int w_start_resync(struct drbd_work *w, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, start_resync_work);
- if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
- dev_warn(DEV, "w_start_resync later...\n");
- mdev->start_resync_timer.expires = jiffies + HZ/10;
- add_timer(&mdev->start_resync_timer);
+ if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) {
+ drbd_warn(device, "w_start_resync later...\n");
+ device->start_resync_timer.expires = jiffies + HZ/10;
+ add_timer(&device->start_resync_timer);
return 0;
}
- drbd_start_resync(mdev, C_SYNC_SOURCE);
- clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
+ drbd_start_resync(device, C_SYNC_SOURCE);
+ clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags);
return 0;
}
/**
* drbd_start_resync() - Start the resync process
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
*
* This function might bring you directly into one of the
* C_PAUSED_SYNC_* states.
*/
-void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
+void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
{
union drbd_state ns;
int r;
- if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
- dev_err(DEV, "Resync already running!\n");
+ if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) {
+ drbd_err(device, "Resync already running!\n");
return;
}
- if (!test_bit(B_RS_H_DONE, &mdev->flags)) {
+ if (!test_bit(B_RS_H_DONE, &device->flags)) {
if (side == C_SYNC_TARGET) {
/* Since application IO was locked out during C_WF_BITMAP_T and
C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
we check that we might make the data inconsistent. */
- r = drbd_khelper(mdev, "before-resync-target");
+ r = drbd_khelper(device, "before-resync-target");
r = (r >> 8) & 0xff;
if (r > 0) {
- dev_info(DEV, "before-resync-target handler returned %d, "
+ drbd_info(device, "before-resync-target handler returned %d, "
"dropping connection.\n", r);
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return;
}
} else /* C_SYNC_SOURCE */ {
- r = drbd_khelper(mdev, "before-resync-source");
+ r = drbd_khelper(device, "before-resync-source");
r = (r >> 8) & 0xff;
if (r > 0) {
if (r == 3) {
- dev_info(DEV, "before-resync-source handler returned %d, "
+ drbd_info(device, "before-resync-source handler returned %d, "
"ignoring. Old userland tools?", r);
} else {
- dev_info(DEV, "before-resync-source handler returned %d, "
+ drbd_info(device, "before-resync-source handler returned %d, "
"dropping connection.\n", r);
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(first_peer_device(device)->connection,
+ NS(conn, C_DISCONNECTING), CS_HARD);
return;
}
}
}
}
- if (current == mdev->tconn->worker.task) {
+ if (current == first_peer_device(device)->connection->worker.task) {
/* The worker should not sleep waiting for state_mutex,
that can take long */
- if (!mutex_trylock(mdev->state_mutex)) {
- set_bit(B_RS_H_DONE, &mdev->flags);
- mdev->start_resync_timer.expires = jiffies + HZ/5;
- add_timer(&mdev->start_resync_timer);
+ if (!mutex_trylock(device->state_mutex)) {
+ set_bit(B_RS_H_DONE, &device->flags);
+ device->start_resync_timer.expires = jiffies + HZ/5;
+ add_timer(&device->start_resync_timer);
return;
}
} else {
- mutex_lock(mdev->state_mutex);
+ mutex_lock(device->state_mutex);
}
- clear_bit(B_RS_H_DONE, &mdev->flags);
+ clear_bit(B_RS_H_DONE, &device->flags);
write_lock_irq(&global_state_lock);
/* Did some connection breakage or IO error race with us? */
- if (mdev->state.conn < C_CONNECTED
- || !get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ if (device->state.conn < C_CONNECTED
+ || !get_ldev_if_state(device, D_NEGOTIATING)) {
write_unlock_irq(&global_state_lock);
- mutex_unlock(mdev->state_mutex);
+ mutex_unlock(device->state_mutex);
return;
}
- ns = drbd_read_state(mdev);
+ ns = drbd_read_state(device);
- ns.aftr_isp = !_drbd_may_sync_now(mdev);
+ ns.aftr_isp = !_drbd_may_sync_now(device);
ns.conn = side;
@@ -1681,43 +1690,43 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
else /* side == C_SYNC_SOURCE */
ns.pdsk = D_INCONSISTENT;
- r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- ns = drbd_read_state(mdev);
+ r = __drbd_set_state(device, ns, CS_VERBOSE, NULL);
+ ns = drbd_read_state(device);
if (ns.conn < C_CONNECTED)
r = SS_UNKNOWN_ERROR;
if (r == SS_SUCCESS) {
- unsigned long tw = drbd_bm_total_weight(mdev);
+ unsigned long tw = drbd_bm_total_weight(device);
unsigned long now = jiffies;
int i;
- mdev->rs_failed = 0;
- mdev->rs_paused = 0;
- mdev->rs_same_csum = 0;
- mdev->rs_last_events = 0;
- mdev->rs_last_sect_ev = 0;
- mdev->rs_total = tw;
- mdev->rs_start = now;
+ device->rs_failed = 0;
+ device->rs_paused = 0;
+ device->rs_same_csum = 0;
+ device->rs_last_events = 0;
+ device->rs_last_sect_ev = 0;
+ device->rs_total = tw;
+ device->rs_start = now;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = tw;
- mdev->rs_mark_time[i] = now;
+ device->rs_mark_left[i] = tw;
+ device->rs_mark_time[i] = now;
}
- _drbd_pause_after(mdev);
+ _drbd_pause_after(device);
}
write_unlock_irq(&global_state_lock);
if (r == SS_SUCCESS) {
/* reset rs_last_bcast when a resync or verify is started,
* to deal with potential jiffies wrap. */
- mdev->rs_last_bcast = jiffies - HZ;
+ device->rs_last_bcast = jiffies - HZ;
- dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
+ drbd_info(device, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
drbd_conn_str(ns.conn),
- (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
- (unsigned long) mdev->rs_total);
+ (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10),
+ (unsigned long) device->rs_total);
if (side == C_SYNC_TARGET)
- mdev->bm_resync_fo = 0;
+ device->bm_resync_fo = 0;
/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
* with w_send_oos, or the sync target will get confused as to
@@ -1726,10 +1735,12 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
* drbd_resync_finished from here in that case.
* We drbd_gen_and_send_sync_uuid here for protocol < 96,
* and from after_state_ch otherwise. */
- if (side == C_SYNC_SOURCE && mdev->tconn->agreed_pro_version < 96)
- drbd_gen_and_send_sync_uuid(mdev);
+ if (side == C_SYNC_SOURCE &&
+ first_peer_device(device)->connection->agreed_pro_version < 96)
+ drbd_gen_and_send_sync_uuid(first_peer_device(device));
- if (mdev->tconn->agreed_pro_version < 95 && mdev->rs_total == 0) {
+ if (first_peer_device(device)->connection->agreed_pro_version < 95 &&
+ device->rs_total == 0) {
/* This still has a race (about when exactly the peers
* detect connection loss) that can lead to a full sync
* on next handshake. In 8.3.9 we fixed this with explicit
@@ -1745,33 +1756,33 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
int timeo;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
rcu_read_unlock();
schedule_timeout_interruptible(timeo);
}
- drbd_resync_finished(mdev);
+ drbd_resync_finished(device);
}
- drbd_rs_controller_reset(mdev);
- /* ns.conn may already be != mdev->state.conn,
+ drbd_rs_controller_reset(device);
+ /* ns.conn may already be != device->state.conn,
* we may have been paused in between, or become paused until
* the timer triggers.
* No matter, that is handled in resync_timer_fn() */
if (ns.conn == C_SYNC_TARGET)
- mod_timer(&mdev->resync_timer, jiffies);
+ mod_timer(&device->resync_timer, jiffies);
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
}
- put_ldev(mdev);
- mutex_unlock(mdev->state_mutex);
+ put_ldev(device);
+ mutex_unlock(device->state_mutex);
}
/* If the resource already closed the current epoch, but we did not
* (because we have not yet seen new requests), we should send the
* corresponding barrier now. Must be checked within the same spinlock
* that is used to check for new requests. */
-bool need_to_send_barrier(struct drbd_tconn *connection)
+static bool need_to_send_barrier(struct drbd_connection *connection)
{
if (!connection->send.seen_any_write_yet)
return false;
@@ -1795,7 +1806,7 @@ bool need_to_send_barrier(struct drbd_tconn *connection)
return true;
}
-bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
+static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
{
spin_lock_irq(&queue->q_lock);
list_splice_init(&queue->q, work_list);
@@ -1803,7 +1814,7 @@ bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_li
return !list_empty(work_list);
}
-bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_list)
+static bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_list)
{
spin_lock_irq(&queue->q_lock);
if (!list_empty(&queue->q))
@@ -1812,7 +1823,7 @@ bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_lis
return !list_empty(work_list);
}
-void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
+static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list)
{
DEFINE_WAIT(wait);
struct net_conf *nc;
@@ -1842,7 +1853,7 @@ void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
for (;;) {
int send_barrier;
prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
- spin_lock_irq(&connection->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
/* dequeue single item only,
* we still use drbd_queue_work_front() in some places */
@@ -1850,11 +1861,11 @@ void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
list_move(connection->sender_work.q.next, work_list);
spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
if (!list_empty(work_list) || signal_pending(current)) {
- spin_unlock_irq(&connection->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
break;
}
send_barrier = need_to_send_barrier(connection);
- spin_unlock_irq(&connection->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
if (send_barrier) {
drbd_send_barrier(connection);
connection->send.current_epoch_nr++;
@@ -1883,9 +1894,9 @@ void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
int drbd_worker(struct drbd_thread *thi)
{
- struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_connection *connection = thi->connection;
struct drbd_work *w = NULL;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
LIST_HEAD(work_list);
int vnr;
@@ -1895,12 +1906,12 @@ int drbd_worker(struct drbd_thread *thi)
/* as long as we use drbd_queue_work_front(),
* we may only dequeue single work items here, not batches. */
if (list_empty(&work_list))
- wait_for_work(tconn, &work_list);
+ wait_for_work(connection, &work_list);
if (signal_pending(current)) {
flush_signals(current);
if (get_t_state(thi) == RUNNING) {
- conn_warn(tconn, "Worker got an unexpected signal\n");
+ drbd_warn(connection, "Worker got an unexpected signal\n");
continue;
}
break;
@@ -1912,10 +1923,10 @@ int drbd_worker(struct drbd_thread *thi)
while (!list_empty(&work_list)) {
w = list_first_entry(&work_list, struct drbd_work, list);
list_del_init(&w->list);
- if (w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS) == 0)
+ if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0)
continue;
- if (tconn->cstate >= C_WF_REPORT_PARAMS)
- conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
+ if (connection->cstate >= C_WF_REPORT_PARAMS)
+ conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
}
}
@@ -1925,16 +1936,17 @@ int drbd_worker(struct drbd_thread *thi)
list_del_init(&w->list);
w->cb(w, 1);
}
- dequeue_work_batch(&tconn->sender_work, &work_list);
+ dequeue_work_batch(&connection->sender_work, &work_list);
} while (!list_empty(&work_list));
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ D_ASSERT(device, device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE);
+ kref_get(&device->kref);
rcu_read_unlock();
- drbd_mdev_cleanup(mdev);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_device_cleanup(device);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index 328f18e4b4ee..3db9ebaf64f6 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -9,12 +9,12 @@
extern char *drbd_sec_holder;
/* sets the number of 512 byte sectors of our virtual device */
-static inline void drbd_set_my_capacity(struct drbd_conf *mdev,
+static inline void drbd_set_my_capacity(struct drbd_device *device,
sector_t size)
{
- /* set_capacity(mdev->this_bdev->bd_disk, size); */
- set_capacity(mdev->vdisk, size);
- mdev->this_bdev->bd_inode->i_size = (loff_t)size << 9;
+ /* set_capacity(device->this_bdev->bd_disk, size); */
+ set_capacity(device->vdisk, size);
+ device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
}
#define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE)
@@ -27,20 +27,20 @@ extern void drbd_request_endio(struct bio *bio, int error);
/*
* used to submit our private bio
*/
-static inline void drbd_generic_make_request(struct drbd_conf *mdev,
+static inline void drbd_generic_make_request(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
if (!bio->bi_bdev) {
printk(KERN_ERR "drbd%d: drbd_generic_make_request: "
"bio->bi_bdev == NULL\n",
- mdev_to_minor(mdev));
+ device_to_minor(device));
dump_stack();
bio_endio(bio, -ENODEV);
return;
}
- if (drbd_insert_fault(mdev, fault_type))
+ if (drbd_insert_fault(device, fault_type))
bio_endio(bio, -EIO);
else
generic_make_request(bio);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 2023043ce7c0..8f5565bf34cd 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -961,17 +961,31 @@ static void empty(void)
{
}
-static DECLARE_WORK(floppy_work, NULL);
+static void (*floppy_work_fn)(void);
+
+static void floppy_work_workfn(struct work_struct *work)
+{
+ floppy_work_fn();
+}
+
+static DECLARE_WORK(floppy_work, floppy_work_workfn);
static void schedule_bh(void (*handler)(void))
{
WARN_ON(work_pending(&floppy_work));
- PREPARE_WORK(&floppy_work, (work_func_t)handler);
+ floppy_work_fn = handler;
queue_work(floppy_wq, &floppy_work);
}
-static DECLARE_DELAYED_WORK(fd_timer, NULL);
+static void (*fd_timer_fn)(void) = NULL;
+
+static void fd_timer_workfn(struct work_struct *work)
+{
+ fd_timer_fn();
+}
+
+static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn);
static void cancel_activity(void)
{
@@ -982,7 +996,7 @@ static void cancel_activity(void)
/* this function makes sure that the disk stays in the drive during the
* transfer */
-static void fd_watchdog(struct work_struct *arg)
+static void fd_watchdog(void)
{
debug_dcl(DP->flags, "calling disk change from watchdog\n");
@@ -993,7 +1007,7 @@ static void fd_watchdog(struct work_struct *arg)
reset_fdc();
} else {
cancel_delayed_work(&fd_timer);
- PREPARE_DELAYED_WORK(&fd_timer, fd_watchdog);
+ fd_timer_fn = fd_watchdog;
queue_delayed_work(floppy_wq, &fd_timer, HZ / 10);
}
}
@@ -1005,7 +1019,8 @@ static void main_command_interrupt(void)
}
/* waits for a delay (spinup or select) to pass */
-static int fd_wait_for_completion(unsigned long expires, work_func_t function)
+static int fd_wait_for_completion(unsigned long expires,
+ void (*function)(void))
{
if (FDCS->reset) {
reset_fdc(); /* do the reset during sleep to win time
@@ -1016,7 +1031,7 @@ static int fd_wait_for_completion(unsigned long expires, work_func_t function)
if (time_before(jiffies, expires)) {
cancel_delayed_work(&fd_timer);
- PREPARE_DELAYED_WORK(&fd_timer, function);
+ fd_timer_fn = function;
queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies);
return 1;
}
@@ -1334,8 +1349,7 @@ static int fdc_dtr(void)
* Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
*/
FDCS->dtr = raw_cmd->rate & 3;
- return fd_wait_for_completion(jiffies + 2UL * HZ / 100,
- (work_func_t)floppy_ready);
+ return fd_wait_for_completion(jiffies + 2UL * HZ / 100, floppy_ready);
} /* fdc_dtr */
static void tell_sector(void)
@@ -1440,7 +1454,7 @@ static void setup_rw_floppy(void)
int flags;
int dflags;
unsigned long ready_date;
- work_func_t function;
+ void (*function)(void);
flags = raw_cmd->flags;
if (flags & (FD_RAW_READ | FD_RAW_WRITE))
@@ -1454,9 +1468,9 @@ static void setup_rw_floppy(void)
*/
if (time_after(ready_date, jiffies + DP->select_delay)) {
ready_date -= DP->select_delay;
- function = (work_func_t)floppy_start;
+ function = floppy_start;
} else
- function = (work_func_t)setup_rw_floppy;
+ function = setup_rw_floppy;
/* wait until the floppy is spinning fast enough */
if (fd_wait_for_completion(ready_date, function))
@@ -1486,7 +1500,7 @@ static void setup_rw_floppy(void)
inr = result();
cont->interrupt();
} else if (flags & FD_RAW_NEED_DISK)
- fd_watchdog(NULL);
+ fd_watchdog();
}
static int blind_seek;
@@ -1863,7 +1877,7 @@ static int start_motor(void (*function)(void))
/* wait_for_completion also schedules reset if needed. */
return fd_wait_for_completion(DRS->select_date + DP->select_delay,
- (work_func_t)function);
+ function);
}
static void floppy_ready(void)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 66e8c3b94ef3..f70a230a2945 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -237,7 +237,7 @@ static int __do_lo_send_write(struct file *file,
file_end_write(file);
if (likely(bw == len))
return 0;
- printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
+ printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
(unsigned long long)pos, len);
if (bw >= 0)
bw = -EIO;
@@ -277,7 +277,7 @@ static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
return __do_lo_send_write(lo->lo_backing_file,
page_address(page), bvec->bv_len,
pos);
- printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
+ printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
"length %i.\n", (unsigned long long)pos, bvec->bv_len);
if (ret > 0)
ret = -EIO;
@@ -316,7 +316,7 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
out:
return ret;
fail:
- printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
+ printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
ret = -ENOMEM;
goto out;
}
@@ -345,7 +345,7 @@ lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
size = p->bsize;
if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
- printk(KERN_ERR "loop: transfer error block %ld\n",
+ printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n",
page->index);
size = -EINVAL;
}
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index d777bb7cea93..59c5abe32f06 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -252,38 +252,45 @@ static void mtip_async_complete(struct mtip_port *port,
void *data,
int status)
{
- struct mtip_cmd *command;
+ struct mtip_cmd *cmd;
struct driver_data *dd = data;
- int cb_status = status ? -EIO : 0;
+ int unaligned, cb_status = status ? -EIO : 0;
+ void (*func)(void *, int);
if (unlikely(!dd) || unlikely(!port))
return;
- command = &port->commands[tag];
+ cmd = &port->commands[tag];
if (unlikely(status == PORT_IRQ_TF_ERR)) {
dev_warn(&port->dd->pdev->dev,
"Command tag %d failed due to TFE\n", tag);
}
+ /* Clear the active flag */
+ atomic_set(&port->commands[tag].active, 0);
+
/* Upper layer callback */
- if (likely(command->async_callback))
- command->async_callback(command->async_data, cb_status);
+ func = cmd->async_callback;
+ if (likely(func && cmpxchg(&cmd->async_callback, func, 0) == func)) {
- command->async_callback = NULL;
- command->comp_func = NULL;
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev,
+ cmd->sg,
+ cmd->scatter_ents,
+ cmd->direction);
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&dd->pdev->dev,
- command->sg,
- command->scatter_ents,
- command->direction);
+ func(cmd->async_data, cb_status);
+ unaligned = cmd->unaligned;
- /* Clear the allocated and active bits for the command */
- atomic_set(&port->commands[tag].active, 0);
- release_slot(port, tag);
+ /* Clear the allocated bit for the command */
+ release_slot(port, tag);
- up(&port->cmd_slot);
+ if (unlikely(unaligned))
+ up(&port->cmd_slot_unal);
+ else
+ up(&port->cmd_slot);
+ }
}
/*
@@ -660,11 +667,12 @@ static void mtip_timeout_function(unsigned long int data)
{
struct mtip_port *port = (struct mtip_port *) data;
struct host_to_dev_fis *fis;
- struct mtip_cmd *command;
- int tag, cmdto_cnt = 0;
+ struct mtip_cmd *cmd;
+ int unaligned, tag, cmdto_cnt = 0;
unsigned int bit, group;
unsigned int num_command_slots;
unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
+ void (*func)(void *, int);
if (unlikely(!port))
return;
@@ -694,8 +702,8 @@ static void mtip_timeout_function(unsigned long int data)
group = tag >> 5;
bit = tag & 0x1F;
- command = &port->commands[tag];
- fis = (struct host_to_dev_fis *) command->command;
+ cmd = &port->commands[tag];
+ fis = (struct host_to_dev_fis *) cmd->command;
set_bit(tag, tagaccum);
cmdto_cnt++;
@@ -709,27 +717,30 @@ static void mtip_timeout_function(unsigned long int data)
*/
writel(1 << bit, port->completed[group]);
- /* Call the async completion callback. */
- if (likely(command->async_callback))
- command->async_callback(command->async_data,
- -EIO);
- command->async_callback = NULL;
- command->comp_func = NULL;
+ /* Clear the active flag for the command */
+ atomic_set(&port->commands[tag].active, 0);
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&port->dd->pdev->dev,
- command->sg,
- command->scatter_ents,
- command->direction);
+ func = cmd->async_callback;
+ if (func &&
+ cmpxchg(&cmd->async_callback, func, 0) == func) {
- /*
- * Clear the allocated bit and active tag for the
- * command.
- */
- atomic_set(&port->commands[tag].active, 0);
- release_slot(port, tag);
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&port->dd->pdev->dev,
+ cmd->sg,
+ cmd->scatter_ents,
+ cmd->direction);
- up(&port->cmd_slot);
+ func(cmd->async_data, -EIO);
+ unaligned = cmd->unaligned;
+
+ /* Clear the allocated bit for the command. */
+ release_slot(port, tag);
+
+ if (unaligned)
+ up(&port->cmd_slot_unal);
+ else
+ up(&port->cmd_slot);
+ }
}
}
@@ -4213,6 +4224,7 @@ skip_create_disk:
blk_queue_max_hw_sectors(dd->queue, 0xffff);
blk_queue_max_segment_size(dd->queue, 0x400000);
blk_queue_io_min(dd->queue, 4096);
+ blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
/*
* write back cache is not supported in the device. FUA depends on
@@ -4615,7 +4627,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
if (rv) {
dev_warn(&pdev->dev,
"Unable to enable MSI interrupt.\n");
- goto block_initialize_err;
+ goto msi_initialize_err;
}
/* Initialize the block layer. */
@@ -4645,6 +4657,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
block_initialize_err:
pci_disable_msi(pdev);
+
+msi_initialize_err:
if (dd->isr_workq) {
flush_workqueue(dd->isr_workq);
destroy_workqueue(dd->isr_workq);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 54174cb32feb..ffb955e7ccb9 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -92,7 +92,7 @@
/* Driver name and version strings */
#define MTIP_DRV_NAME "mtip32xx"
-#define MTIP_DRV_VERSION "1.3.0"
+#define MTIP_DRV_VERSION "1.3.1"
/* Maximum number of minor device numbers per device. */
#define MTIP_MAX_MINORS 16
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 51824d1f23ea..7c64fa756cce 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1,6 +1,6 @@
/*
* NVM Express device driver
- * Copyright (c) 2011, Intel Corporation.
+ * Copyright (c) 2011-2014, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -20,10 +20,12 @@
#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
+#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
+#include <linux/hdreg.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -35,6 +37,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
+#include <linux/percpu.h>
#include <linux/poison.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
@@ -47,6 +50,11 @@
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
#define ADMIN_TIMEOUT (60 * HZ)
+#define IOD_TIMEOUT (4 * NVME_IO_TIMEOUT)
+
+unsigned char io_timeout = 30;
+module_param(io_timeout, byte, 0644);
+MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
static int nvme_major;
module_param(nvme_major, int, 0);
@@ -58,6 +66,7 @@ static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
static struct workqueue_struct *nvme_workq;
+static wait_queue_head_t nvme_kthread_wait;
static void nvme_reset_failed_dev(struct work_struct *ws);
@@ -74,6 +83,7 @@ struct async_cmd_info {
* commands and one for I/O commands).
*/
struct nvme_queue {
+ struct rcu_head r_head;
struct device *q_dmadev;
struct nvme_dev *dev;
char irqname[24]; /* nvme4294967295-65535\0 */
@@ -85,6 +95,7 @@ struct nvme_queue {
wait_queue_head_t sq_full;
wait_queue_t sq_cong_wait;
struct bio_list sq_cong;
+ struct list_head iod_bio;
u32 __iomem *q_db;
u16 q_depth;
u16 cq_vector;
@@ -95,6 +106,7 @@ struct nvme_queue {
u8 cq_phase;
u8 cqe_seen;
u8 q_suspended;
+ cpumask_var_t cpu_mask;
struct async_cmd_info cmdinfo;
unsigned long cmdid_data[];
};
@@ -118,7 +130,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
}
-typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
+typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
struct nvme_completion *);
struct nvme_cmd_info {
@@ -190,7 +202,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
-static void special_completion(struct nvme_dev *dev, void *ctx,
+static void special_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
if (ctx == CMD_CTX_CANCELLED)
@@ -198,26 +210,26 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
if (ctx == CMD_CTX_FLUSH)
return;
if (ctx == CMD_CTX_ABORT) {
- ++dev->abort_limit;
+ ++nvmeq->dev->abort_limit;
return;
}
if (ctx == CMD_CTX_COMPLETED) {
- dev_warn(&dev->pci_dev->dev,
+ dev_warn(nvmeq->q_dmadev,
"completed id %d twice on queue %d\n",
cqe->command_id, le16_to_cpup(&cqe->sq_id));
return;
}
if (ctx == CMD_CTX_INVALID) {
- dev_warn(&dev->pci_dev->dev,
+ dev_warn(nvmeq->q_dmadev,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpup(&cqe->sq_id));
return;
}
- dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
+ dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
}
-static void async_completion(struct nvme_dev *dev, void *ctx,
+static void async_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
struct async_cmd_info *cmdinfo = ctx;
@@ -262,14 +274,34 @@ static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
return ctx;
}
-struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
+{
+ return rcu_dereference_raw(dev->queues[qid]);
+}
+
+static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
{
- return dev->queues[get_cpu() + 1];
+ unsigned queue_id = get_cpu_var(*dev->io_queue);
+ rcu_read_lock();
+ return rcu_dereference(dev->queues[queue_id]);
}
-void put_nvmeq(struct nvme_queue *nvmeq)
+static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
{
- put_cpu();
+ rcu_read_unlock();
+ put_cpu_var(nvmeq->dev->io_queue);
+}
+
+static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
+ __acquires(RCU)
+{
+ rcu_read_lock();
+ return rcu_dereference(dev->queues[q_idx]);
+}
+
+static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
+{
+ rcu_read_unlock();
}
/**
@@ -284,6 +316,10 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
unsigned long flags;
u16 tail;
spin_lock_irqsave(&nvmeq->q_lock, flags);
+ if (nvmeq->q_suspended) {
+ spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+ return -EBUSY;
+ }
tail = nvmeq->sq_tail;
memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
if (++tail == nvmeq->q_depth)
@@ -323,6 +359,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
iod->npages = -1;
iod->length = nbytes;
iod->nents = 0;
+ iod->first_dma = 0ULL;
iod->start_time = jiffies;
}
@@ -371,19 +408,31 @@ static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
part_stat_unlock();
}
-static void bio_completion(struct nvme_dev *dev, void *ctx,
+static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
struct nvme_iod *iod = ctx;
struct bio *bio = iod->private;
u16 status = le16_to_cpup(&cqe->status) >> 1;
+ if (unlikely(status)) {
+ if (!(status & NVME_SC_DNR ||
+ bio->bi_rw & REQ_FAILFAST_MASK) &&
+ (jiffies - iod->start_time) < IOD_TIMEOUT) {
+ if (!waitqueue_active(&nvmeq->sq_full))
+ add_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
+ list_add_tail(&iod->node, &nvmeq->iod_bio);
+ wake_up(&nvmeq->sq_full);
+ return;
+ }
+ }
if (iod->nents) {
- dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
nvme_end_io_acct(bio, iod->start_time);
}
- nvme_free_iod(dev, iod);
+ nvme_free_iod(nvmeq->dev, iod);
if (status)
bio_endio(bio, -EIO);
else
@@ -391,8 +440,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
}
/* length is in bytes. gfp flags indicates whether we may sleep. */
-int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
- struct nvme_iod *iod, int total_len, gfp_t gfp)
+int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
+ gfp_t gfp)
{
struct dma_pool *pool;
int length = total_len;
@@ -405,7 +454,6 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
dma_addr_t prp_dma;
int nprps, i;
- cmd->prp1 = cpu_to_le64(dma_addr);
length -= (PAGE_SIZE - offset);
if (length <= 0)
return total_len;
@@ -420,7 +468,7 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
}
if (length <= PAGE_SIZE) {
- cmd->prp2 = cpu_to_le64(dma_addr);
+ iod->first_dma = dma_addr;
return total_len;
}
@@ -435,13 +483,12 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
if (!prp_list) {
- cmd->prp2 = cpu_to_le64(dma_addr);
+ iod->first_dma = dma_addr;
iod->npages = -1;
return (total_len - length) + PAGE_SIZE;
}
list[0] = prp_list;
iod->first_dma = prp_dma;
- cmd->prp2 = cpu_to_le64(prp_dma);
i = 0;
for (;;) {
if (i == PAGE_SIZE / 8) {
@@ -480,10 +527,11 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
bio_chain(split, bio);
- if (bio_list_empty(&nvmeq->sq_cong))
+ if (!waitqueue_active(&nvmeq->sq_full))
add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
bio_list_add(&nvmeq->sq_cong, split);
bio_list_add(&nvmeq->sq_cong, bio);
+ wake_up(&nvmeq->sq_full);
return 0;
}
@@ -536,25 +584,13 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
return length;
}
-/*
- * We reuse the small pool to allocate the 16-byte range here as it is not
- * worth having a special pool for these or additional cases to handle freeing
- * the iod.
- */
static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
struct bio *bio, struct nvme_iod *iod, int cmdid)
{
- struct nvme_dsm_range *range;
+ struct nvme_dsm_range *range =
+ (struct nvme_dsm_range *)iod_list(iod)[0];
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
- range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
- &iod->first_dma);
- if (!range)
- return -ENOMEM;
-
- iod_list(iod)[0] = (__le64 *)range;
- iod->npages = 0;
-
range->cattr = cpu_to_le32(0);
range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
@@ -601,44 +637,22 @@ int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
return nvme_submit_flush(nvmeq, ns, cmdid);
}
-/*
- * Called with local interrupts disabled and the q_lock held. May not sleep.
- */
-static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
- struct bio *bio)
+static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
{
+ struct bio *bio = iod->private;
+ struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
struct nvme_command *cmnd;
- struct nvme_iod *iod;
- enum dma_data_direction dma_dir;
- int cmdid, length, result;
+ int cmdid;
u16 control;
u32 dsmgmt;
- int psegs = bio_phys_segments(ns->queue, bio);
- if ((bio->bi_rw & REQ_FLUSH) && psegs) {
- result = nvme_submit_flush_data(nvmeq, ns);
- if (result)
- return result;
- }
-
- result = -ENOMEM;
- iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
- if (!iod)
- goto nomem;
- iod->private = bio;
-
- result = -EBUSY;
cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
if (unlikely(cmdid < 0))
- goto free_iod;
+ return cmdid;
- if (bio->bi_rw & REQ_DISCARD) {
- result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
- if (result)
- goto free_cmdid;
- return result;
- }
- if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+ if (bio->bi_rw & REQ_DISCARD)
+ return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
+ if ((bio->bi_rw & REQ_FLUSH) && !iod->nents)
return nvme_submit_flush(nvmeq, ns, cmdid);
control = 0;
@@ -652,42 +666,85 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
-
memset(cmnd, 0, sizeof(*cmnd));
- if (bio_data_dir(bio)) {
- cmnd->rw.opcode = nvme_cmd_write;
- dma_dir = DMA_TO_DEVICE;
- } else {
- cmnd->rw.opcode = nvme_cmd_read;
- dma_dir = DMA_FROM_DEVICE;
- }
-
- result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
- if (result <= 0)
- goto free_cmdid;
- length = result;
+ cmnd->rw.opcode = bio_data_dir(bio) ? nvme_cmd_write : nvme_cmd_read;
cmnd->rw.command_id = cmdid;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
- length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
- GFP_ATOMIC);
+ cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
- cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
+ cmnd->rw.length =
+ cpu_to_le16((bio->bi_iter.bi_size >> ns->lba_shift) - 1);
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
- nvme_start_io_acct(bio);
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
return 0;
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
+static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ struct bio *bio)
+{
+ struct nvme_iod *iod;
+ int psegs = bio_phys_segments(ns->queue, bio);
+ int result;
+
+ if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+ result = nvme_submit_flush_data(nvmeq, ns);
+ if (result)
+ return result;
+ }
+
+ iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
+ if (!iod)
+ return -ENOMEM;
+
+ iod->private = bio;
+ if (bio->bi_rw & REQ_DISCARD) {
+ void *range;
+ /*
+ * We reuse the small pool to allocate the 16-byte range here
+ * as it is not worth having a special pool for these or
+ * additional cases to handle freeing the iod.
+ */
+ range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
+ GFP_ATOMIC,
+ &iod->first_dma);
+ if (!range) {
+ result = -ENOMEM;
+ goto free_iod;
+ }
+ iod_list(iod)[0] = (__le64 *)range;
+ iod->npages = 0;
+ } else if (psegs) {
+ result = nvme_map_bio(nvmeq, iod, bio,
+ bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ psegs);
+ if (result <= 0)
+ goto free_iod;
+ if (nvme_setup_prps(nvmeq->dev, iod, result, GFP_ATOMIC) !=
+ result) {
+ result = -ENOMEM;
+ goto free_iod;
+ }
+ nvme_start_io_acct(bio);
+ }
+ if (unlikely(nvme_submit_iod(nvmeq, iod))) {
+ if (!waitqueue_active(&nvmeq->sq_full))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ list_add_tail(&iod->node, &nvmeq->iod_bio);
+ }
+ return 0;
- free_cmdid:
- free_cmdid(nvmeq, cmdid, NULL);
free_iod:
nvme_free_iod(nvmeq->dev, iod);
- nomem:
return result;
}
@@ -711,7 +768,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
}
ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
- fn(nvmeq->dev, ctx, &cqe);
+ fn(nvmeq, ctx, &cqe);
}
/* If the controller ignores the cq head doorbell and continuously
@@ -747,7 +804,7 @@ static void nvme_make_request(struct request_queue *q, struct bio *bio)
if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
result = nvme_submit_bio_queue(nvmeq, ns, bio);
if (unlikely(result)) {
- if (bio_list_empty(&nvmeq->sq_cong))
+ if (!waitqueue_active(&nvmeq->sq_full))
add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
bio_list_add(&nvmeq->sq_cong, bio);
}
@@ -791,7 +848,7 @@ struct sync_cmd_info {
int status;
};
-static void sync_completion(struct nvme_dev *dev, void *ctx,
+static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
struct sync_cmd_info *cmdinfo = ctx;
@@ -804,27 +861,46 @@ static void sync_completion(struct nvme_dev *dev, void *ctx,
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
-int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
+ struct nvme_command *cmd,
u32 *result, unsigned timeout)
{
- int cmdid;
+ int cmdid, ret;
struct sync_cmd_info cmdinfo;
+ struct nvme_queue *nvmeq;
+
+ nvmeq = lock_nvmeq(dev, q_idx);
+ if (!nvmeq) {
+ unlock_nvmeq(nvmeq);
+ return -ENODEV;
+ }
cmdinfo.task = current;
cmdinfo.status = -EINTR;
- cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
- timeout);
- if (cmdid < 0)
+ cmdid = alloc_cmdid(nvmeq, &cmdinfo, sync_completion, timeout);
+ if (cmdid < 0) {
+ unlock_nvmeq(nvmeq);
return cmdid;
+ }
cmd->common.command_id = cmdid;
set_current_state(TASK_KILLABLE);
- nvme_submit_cmd(nvmeq, cmd);
+ ret = nvme_submit_cmd(nvmeq, cmd);
+ if (ret) {
+ free_cmdid(nvmeq, cmdid, NULL);
+ unlock_nvmeq(nvmeq);
+ set_current_state(TASK_RUNNING);
+ return ret;
+ }
+ unlock_nvmeq(nvmeq);
schedule_timeout(timeout);
if (cmdinfo.status == -EINTR) {
- nvme_abort_command(nvmeq, cmdid);
+ nvmeq = lock_nvmeq(dev, q_idx);
+ if (nvmeq)
+ nvme_abort_command(nvmeq, cmdid);
+ unlock_nvmeq(nvmeq);
return -EINTR;
}
@@ -845,20 +921,26 @@ static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
return cmdid;
cmdinfo->status = -EINTR;
cmd->common.command_id = cmdid;
- nvme_submit_cmd(nvmeq, cmd);
- return 0;
+ return nvme_submit_cmd(nvmeq, cmd);
}
int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
- return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+ return nvme_submit_sync_cmd(dev, 0, cmd, result, ADMIN_TIMEOUT);
+}
+
+int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+ u32 *result)
+{
+ return nvme_submit_sync_cmd(dev, smp_processor_id() + 1, cmd, result,
+ NVME_IO_TIMEOUT);
}
static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
{
- return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+ return nvme_submit_async_cmd(raw_nvmeq(dev, 0), cmd, cmdinfo,
ADMIN_TIMEOUT);
}
@@ -985,6 +1067,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
struct nvme_command cmd;
struct nvme_dev *dev = nvmeq->dev;
struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ struct nvme_queue *adminq;
if (!nvmeq->qid || info[cmdid].aborted) {
if (work_busy(&dev->reset_work))
@@ -993,7 +1076,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
dev_warn(&dev->pci_dev->dev,
"I/O %d QID %d timeout, reset controller\n", cmdid,
nvmeq->qid);
- PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev);
+ dev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &dev->reset_work);
return;
}
@@ -1001,7 +1084,8 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
if (!dev->abort_limit)
return;
- a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+ adminq = rcu_dereference(dev->queues[0]);
+ a_cmdid = alloc_cmdid(adminq, CMD_CTX_ABORT, special_completion,
ADMIN_TIMEOUT);
if (a_cmdid < 0)
return;
@@ -1018,7 +1102,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
nvmeq->qid);
- nvme_submit_cmd(dev->queues[0], &cmd);
+ nvme_submit_cmd(adminq, &cmd);
}
/**
@@ -1051,23 +1135,38 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
nvmeq->qid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
- fn(nvmeq->dev, ctx, &cqe);
+ fn(nvmeq, ctx, &cqe);
}
}
-static void nvme_free_queue(struct nvme_queue *nvmeq)
+static void nvme_free_queue(struct rcu_head *r)
{
+ struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head);
+
spin_lock_irq(&nvmeq->q_lock);
while (bio_list_peek(&nvmeq->sq_cong)) {
struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
bio_endio(bio, -EIO);
}
+ while (!list_empty(&nvmeq->iod_bio)) {
+ static struct nvme_completion cqe = {
+ .status = cpu_to_le16(
+ (NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1),
+ };
+ struct nvme_iod *iod = list_first_entry(&nvmeq->iod_bio,
+ struct nvme_iod,
+ node);
+ list_del(&iod->node);
+ bio_completion(nvmeq, iod, &cqe);
+ }
spin_unlock_irq(&nvmeq->q_lock);
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+ if (nvmeq->qid)
+ free_cpumask_var(nvmeq->cpu_mask);
kfree(nvmeq);
}
@@ -1076,9 +1175,10 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
int i;
for (i = dev->queue_count - 1; i >= lowest; i--) {
- nvme_free_queue(dev->queues[i]);
+ struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
+ rcu_assign_pointer(dev->queues[i], NULL);
+ call_rcu(&nvmeq->r_head, nvme_free_queue);
dev->queue_count--;
- dev->queues[i] = NULL;
}
}
@@ -1098,6 +1198,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
return 1;
}
nvmeq->q_suspended = 1;
+ nvmeq->dev->online_queues--;
spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL);
@@ -1116,7 +1217,7 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
static void nvme_disable_queue(struct nvme_dev *dev, int qid)
{
- struct nvme_queue *nvmeq = dev->queues[qid];
+ struct nvme_queue *nvmeq = raw_nvmeq(dev, qid);
if (!nvmeq)
return;
@@ -1152,6 +1253,9 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
if (!nvmeq->sq_cmds)
goto free_cqdma;
+ if (qid && !zalloc_cpumask_var(&nvmeq->cpu_mask, GFP_KERNEL))
+ goto free_sqdma;
+
nvmeq->q_dmadev = dmadev;
nvmeq->dev = dev;
snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
@@ -1162,15 +1266,20 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
init_waitqueue_head(&nvmeq->sq_full);
init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
bio_list_init(&nvmeq->sq_cong);
+ INIT_LIST_HEAD(&nvmeq->iod_bio);
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
nvmeq->cq_vector = vector;
nvmeq->qid = qid;
nvmeq->q_suspended = 1;
dev->queue_count++;
+ rcu_assign_pointer(dev->queues[qid], nvmeq);
return nvmeq;
+ free_sqdma:
+ dma_free_coherent(dmadev, SQ_SIZE(depth), (void *)nvmeq->sq_cmds,
+ nvmeq->sq_dma_addr);
free_cqdma:
dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
@@ -1203,6 +1312,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
nvme_cancel_ios(nvmeq, false);
nvmeq->q_suspended = 0;
+ dev->online_queues++;
}
static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
@@ -1311,12 +1421,11 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result < 0)
return result;
- nvmeq = dev->queues[0];
+ nvmeq = raw_nvmeq(dev, 0);
if (!nvmeq) {
nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
if (!nvmeq)
return -ENOMEM;
- dev->queues[0] = nvmeq;
}
aqa = nvmeq->q_depth - 1;
@@ -1418,7 +1527,6 @@ void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
struct nvme_dev *dev = ns->dev;
- struct nvme_queue *nvmeq;
struct nvme_user_io io;
struct nvme_command c;
unsigned length, meta_len;
@@ -1492,22 +1600,14 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.metadata = cpu_to_le64(meta_dma_addr);
}
- length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
+ length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
+ c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ c.rw.prp2 = cpu_to_le64(iod->first_dma);
- nvmeq = get_nvmeq(dev);
- /*
- * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
- * disabled. We may be preempted at any point, and be rescheduled
- * to a different CPU. That will cause cacheline bouncing, but no
- * additional races since q_lock already protects against other CPUs.
- */
- put_nvmeq(nvmeq);
if (length != (io.nblocks + 1) << ns->lba_shift)
status = -ENOMEM;
- else if (!nvmeq || nvmeq->q_suspended)
- status = -EBUSY;
else
- status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+ status = nvme_submit_io_cmd(dev, &c, NULL);
if (meta_len) {
if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
@@ -1572,8 +1672,9 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
length);
if (IS_ERR(iod))
return PTR_ERR(iod);
- length = nvme_setup_prps(dev, &c.common, iod, length,
- GFP_KERNEL);
+ length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
+ c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ c.common.prp2 = cpu_to_le64(iod->first_dma);
}
timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
@@ -1581,8 +1682,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
if (length != cmd.data_len)
status = -ENOMEM;
else
- status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
- timeout);
+ status = nvme_submit_sync_cmd(dev, 0, &c, &cmd.result, timeout);
if (cmd.data_len) {
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
@@ -1653,25 +1753,51 @@ static void nvme_release(struct gendisk *disk, fmode_t mode)
kref_put(&dev->kref, nvme_free_dev);
}
+static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
+{
+ /* some standard values */
+ geo->heads = 1 << 6;
+ geo->sectors = 1 << 5;
+ geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+ return 0;
+}
+
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
.compat_ioctl = nvme_compat_ioctl,
.open = nvme_open,
.release = nvme_release,
+ .getgeo = nvme_getgeo,
};
+static void nvme_resubmit_iods(struct nvme_queue *nvmeq)
+{
+ struct nvme_iod *iod, *next;
+
+ list_for_each_entry_safe(iod, next, &nvmeq->iod_bio, node) {
+ if (unlikely(nvme_submit_iod(nvmeq, iod)))
+ break;
+ list_del(&iod->node);
+ if (bio_list_empty(&nvmeq->sq_cong) &&
+ list_empty(&nvmeq->iod_bio))
+ remove_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
+ }
+}
+
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
{
while (bio_list_peek(&nvmeq->sq_cong)) {
struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
- if (bio_list_empty(&nvmeq->sq_cong))
+ if (bio_list_empty(&nvmeq->sq_cong) &&
+ list_empty(&nvmeq->iod_bio))
remove_wait_queue(&nvmeq->sq_full,
&nvmeq->sq_cong_wait);
if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
- if (bio_list_empty(&nvmeq->sq_cong))
+ if (!waitqueue_active(&nvmeq->sq_full))
add_wait_queue(&nvmeq->sq_full,
&nvmeq->sq_cong_wait);
bio_list_add_head(&nvmeq->sq_cong, bio);
@@ -1696,13 +1822,14 @@ static int nvme_kthread(void *data)
list_del_init(&dev->node);
dev_warn(&dev->pci_dev->dev,
"Failed status, reset controller\n");
- PREPARE_WORK(&dev->reset_work,
- nvme_reset_failed_dev);
+ dev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &dev->reset_work);
continue;
}
+ rcu_read_lock();
for (i = 0; i < dev->queue_count; i++) {
- struct nvme_queue *nvmeq = dev->queues[i];
+ struct nvme_queue *nvmeq =
+ rcu_dereference(dev->queues[i]);
if (!nvmeq)
continue;
spin_lock_irq(&nvmeq->q_lock);
@@ -1711,9 +1838,11 @@ static int nvme_kthread(void *data)
nvme_process_cq(nvmeq);
nvme_cancel_ios(nvmeq, true);
nvme_resubmit_bios(nvmeq);
+ nvme_resubmit_iods(nvmeq);
unlock:
spin_unlock_irq(&nvmeq->q_lock);
}
+ rcu_read_unlock();
}
spin_unlock(&dev_list_lock);
schedule_timeout(round_jiffies_relative(HZ));
@@ -1788,6 +1917,143 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
return NULL;
}
+static int nvme_find_closest_node(int node)
+{
+ int n, val, min_val = INT_MAX, best_node = node;
+
+ for_each_online_node(n) {
+ if (n == node)
+ continue;
+ val = node_distance(node, n);
+ if (val < min_val) {
+ min_val = val;
+ best_node = n;
+ }
+ }
+ return best_node;
+}
+
+static void nvme_set_queue_cpus(cpumask_t *qmask, struct nvme_queue *nvmeq,
+ int count)
+{
+ int cpu;
+ for_each_cpu(cpu, qmask) {
+ if (cpumask_weight(nvmeq->cpu_mask) >= count)
+ break;
+ if (!cpumask_test_and_set_cpu(cpu, nvmeq->cpu_mask))
+ *per_cpu_ptr(nvmeq->dev->io_queue, cpu) = nvmeq->qid;
+ }
+}
+
+static void nvme_add_cpus(cpumask_t *mask, const cpumask_t *unassigned_cpus,
+ const cpumask_t *new_mask, struct nvme_queue *nvmeq, int cpus_per_queue)
+{
+ int next_cpu;
+ for_each_cpu(next_cpu, new_mask) {
+ cpumask_or(mask, mask, get_cpu_mask(next_cpu));
+ cpumask_or(mask, mask, topology_thread_cpumask(next_cpu));
+ cpumask_and(mask, mask, unassigned_cpus);
+ nvme_set_queue_cpus(mask, nvmeq, cpus_per_queue);
+ }
+}
+
+static void nvme_create_io_queues(struct nvme_dev *dev)
+{
+ unsigned i, max;
+
+ max = min(dev->max_qid, num_online_cpus());
+ for (i = dev->queue_count; i <= max; i++)
+ if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+ break;
+
+ max = min(dev->queue_count - 1, num_online_cpus());
+ for (i = dev->online_queues; i <= max; i++)
+ if (nvme_create_queue(raw_nvmeq(dev, i), i))
+ break;
+}
+
+/*
+ * If there are fewer queues than online cpus, this will try to optimally
+ * assign a queue to multiple cpus by grouping cpus that are "close" together:
+ * thread siblings, core, socket, closest node, then whatever else is
+ * available.
+ */
+static void nvme_assign_io_queues(struct nvme_dev *dev)
+{
+ unsigned cpu, cpus_per_queue, queues, remainder, i;
+ cpumask_var_t unassigned_cpus;
+
+ nvme_create_io_queues(dev);
+
+ queues = min(dev->online_queues - 1, num_online_cpus());
+ if (!queues)
+ return;
+
+ cpus_per_queue = num_online_cpus() / queues;
+ remainder = queues - (num_online_cpus() - queues * cpus_per_queue);
+
+ if (!alloc_cpumask_var(&unassigned_cpus, GFP_KERNEL))
+ return;
+
+ cpumask_copy(unassigned_cpus, cpu_online_mask);
+ cpu = cpumask_first(unassigned_cpus);
+ for (i = 1; i <= queues; i++) {
+ struct nvme_queue *nvmeq = lock_nvmeq(dev, i);
+ cpumask_t mask;
+
+ cpumask_clear(nvmeq->cpu_mask);
+ if (!cpumask_weight(unassigned_cpus)) {
+ unlock_nvmeq(nvmeq);
+ break;
+ }
+
+ mask = *get_cpu_mask(cpu);
+ nvme_set_queue_cpus(&mask, nvmeq, cpus_per_queue);
+ if (cpus_weight(mask) < cpus_per_queue)
+ nvme_add_cpus(&mask, unassigned_cpus,
+ topology_thread_cpumask(cpu),
+ nvmeq, cpus_per_queue);
+ if (cpus_weight(mask) < cpus_per_queue)
+ nvme_add_cpus(&mask, unassigned_cpus,
+ topology_core_cpumask(cpu),
+ nvmeq, cpus_per_queue);
+ if (cpus_weight(mask) < cpus_per_queue)
+ nvme_add_cpus(&mask, unassigned_cpus,
+ cpumask_of_node(cpu_to_node(cpu)),
+ nvmeq, cpus_per_queue);
+ if (cpus_weight(mask) < cpus_per_queue)
+ nvme_add_cpus(&mask, unassigned_cpus,
+ cpumask_of_node(
+ nvme_find_closest_node(
+ cpu_to_node(cpu))),
+ nvmeq, cpus_per_queue);
+ if (cpus_weight(mask) < cpus_per_queue)
+ nvme_add_cpus(&mask, unassigned_cpus,
+ unassigned_cpus,
+ nvmeq, cpus_per_queue);
+
+ WARN(cpumask_weight(nvmeq->cpu_mask) != cpus_per_queue,
+ "nvme%d qid:%d mis-matched queue-to-cpu assignment\n",
+ dev->instance, i);
+
+ irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+ nvmeq->cpu_mask);
+ cpumask_andnot(unassigned_cpus, unassigned_cpus,
+ nvmeq->cpu_mask);
+ cpu = cpumask_next(cpu, unassigned_cpus);
+ if (remainder && !--remainder)
+ cpus_per_queue++;
+ unlock_nvmeq(nvmeq);
+ }
+ WARN(cpumask_weight(unassigned_cpus), "nvme%d unassigned online cpus\n",
+ dev->instance);
+ i = 0;
+ cpumask_andnot(unassigned_cpus, cpu_possible_mask, cpu_online_mask);
+ for_each_cpu(cpu, unassigned_cpus)
+ *per_cpu_ptr(dev->io_queue, cpu) = (i++ % queues) + 1;
+ free_cpumask_var(unassigned_cpus);
+}
+
static int set_queue_count(struct nvme_dev *dev, int count)
{
int status;
@@ -1806,13 +2072,26 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
}
+static int nvme_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ struct nvme_dev *dev = container_of(self, struct nvme_dev, nb);
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ nvme_assign_io_queues(dev);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
- struct nvme_queue *adminq = dev->queues[0];
+ struct nvme_queue *adminq = raw_nvmeq(dev, 0);
struct pci_dev *pdev = dev->pci_dev;
- int result, cpu, i, vecs, nr_io_queues, size, q_depth;
+ int result, i, vecs, nr_io_queues, size;
- nr_io_queues = num_online_cpus();
+ nr_io_queues = num_possible_cpus();
result = set_queue_count(dev, nr_io_queues);
if (result < 0)
return result;
@@ -1831,37 +2110,22 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
size = db_bar_size(dev, nr_io_queues);
} while (1);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
- dev->queues[0]->q_db = dev->dbs;
+ adminq->q_db = dev->dbs;
}
/* Deregister the admin queue's interrupt */
free_irq(dev->entry[0].vector, adminq);
- vecs = nr_io_queues;
- for (i = 0; i < vecs; i++)
+ for (i = 0; i < nr_io_queues; i++)
dev->entry[i].entry = i;
- for (;;) {
- result = pci_enable_msix(pdev, dev->entry, vecs);
- if (result <= 0)
- break;
- vecs = result;
- }
-
- if (result < 0) {
- vecs = nr_io_queues;
- if (vecs > 32)
- vecs = 32;
- for (;;) {
- result = pci_enable_msi_block(pdev, vecs);
- if (result == 0) {
- for (i = 0; i < vecs; i++)
- dev->entry[i].vector = i + pdev->irq;
- break;
- } else if (result < 0) {
- vecs = 1;
- break;
- }
- vecs = result;
+ vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues);
+ if (vecs < 0) {
+ vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32));
+ if (vecs < 0) {
+ vecs = 1;
+ } else {
+ for (i = 0; i < vecs; i++)
+ dev->entry[i].vector = i + pdev->irq;
}
}
@@ -1872,6 +2136,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* number of interrupts.
*/
nr_io_queues = vecs;
+ dev->max_qid = nr_io_queues;
result = queue_request_irq(dev, adminq, adminq->irqname);
if (result) {
@@ -1880,49 +2145,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
/* Free previously allocated queues that are no longer usable */
- spin_lock(&dev_list_lock);
- for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
+ nvme_free_queues(dev, nr_io_queues + 1);
+ nvme_assign_io_queues(dev);
- spin_lock_irq(&nvmeq->q_lock);
- nvme_cancel_ios(nvmeq, false);
- spin_unlock_irq(&nvmeq->q_lock);
-
- nvme_free_queue(nvmeq);
- dev->queue_count--;
- dev->queues[i] = NULL;
- }
- spin_unlock(&dev_list_lock);
-
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < nr_io_queues; i++) {
- irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
-
- q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
- NVME_Q_DEPTH);
- for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
- dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i);
- if (!dev->queues[i + 1]) {
- result = -ENOMEM;
- goto free_queues;
- }
- }
-
- for (; i < num_possible_cpus(); i++) {
- int target = i % rounddown_pow_of_two(dev->queue_count - 1);
- dev->queues[i + 1] = dev->queues[target + 1];
- }
-
- for (i = 1; i < dev->queue_count; i++) {
- result = nvme_create_queue(dev->queues[i], i);
- if (result) {
- for (--i; i > 0; i--)
- nvme_disable_queue(dev, i);
- goto free_queues;
- }
- }
+ dev->nb.notifier_call = &nvme_cpu_notify;
+ result = register_hotcpu_notifier(&dev->nb);
+ if (result)
+ goto free_queues;
return 0;
@@ -2001,6 +2230,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
static int nvme_dev_map(struct nvme_dev *dev)
{
+ u64 cap;
int bars, result = -ENOMEM;
struct pci_dev *pdev = dev->pci_dev;
@@ -2024,7 +2254,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
result = -ENODEV;
goto unmap;
}
- dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
+ cap = readq(&dev->bar->cap);
+ dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
+ dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
return 0;
@@ -2180,7 +2412,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
atomic_set(&dq.refcount, 0);
dq.worker = &worker;
for (i = dev->queue_count - 1; i > 0; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
+ struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
if (nvme_suspend_queue(nvmeq))
continue;
@@ -2193,19 +2425,38 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
kthread_stop(kworker_task);
}
+/*
+* Remove the node from the device list and check
+* for whether or not we need to stop the nvme_thread.
+*/
+static void nvme_dev_list_remove(struct nvme_dev *dev)
+{
+ struct task_struct *tmp = NULL;
+
+ spin_lock(&dev_list_lock);
+ list_del_init(&dev->node);
+ if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
+ tmp = nvme_thread;
+ nvme_thread = NULL;
+ }
+ spin_unlock(&dev_list_lock);
+
+ if (tmp)
+ kthread_stop(tmp);
+}
+
static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
dev->initialized = 0;
+ unregister_hotcpu_notifier(&dev->nb);
- spin_lock(&dev_list_lock);
- list_del_init(&dev->node);
- spin_unlock(&dev_list_lock);
+ nvme_dev_list_remove(dev);
if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
for (i = dev->queue_count - 1; i >= 0; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
+ struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
nvme_suspend_queue(nvmeq);
nvme_clear_queue(nvmeq);
}
@@ -2298,6 +2549,7 @@ static void nvme_free_dev(struct kref *kref)
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
nvme_free_namespaces(dev);
+ free_percpu(dev->io_queue);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2341,6 +2593,7 @@ static const struct file_operations nvme_dev_fops = {
static int nvme_dev_start(struct nvme_dev *dev)
{
int result;
+ bool start_thread = false;
result = nvme_dev_map(dev);
if (result)
@@ -2351,9 +2604,24 @@ static int nvme_dev_start(struct nvme_dev *dev)
goto unmap;
spin_lock(&dev_list_lock);
+ if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
+ start_thread = true;
+ nvme_thread = NULL;
+ }
list_add(&dev->node, &dev_list);
spin_unlock(&dev_list_lock);
+ if (start_thread) {
+ nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+ wake_up(&nvme_kthread_wait);
+ } else
+ wait_event_killable(nvme_kthread_wait, nvme_thread);
+
+ if (IS_ERR_OR_NULL(nvme_thread)) {
+ result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
+ goto disable;
+ }
+
result = nvme_setup_io_queues(dev);
if (result && result != -EBUSY)
goto disable;
@@ -2362,9 +2630,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
disable:
nvme_disable_queue(dev, 0);
- spin_lock(&dev_list_lock);
- list_del_init(&dev->node);
- spin_unlock(&dev_list_lock);
+ nvme_dev_list_remove(dev);
unmap:
nvme_dev_unmap(dev);
return result;
@@ -2383,18 +2649,10 @@ static int nvme_remove_dead_ctrl(void *arg)
static void nvme_remove_disks(struct work_struct *ws)
{
- int i;
struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
nvme_dev_remove(dev);
- spin_lock(&dev_list_lock);
- for (i = dev->queue_count - 1; i > 0; i--) {
- BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
- nvme_free_queue(dev->queues[i]);
- dev->queue_count--;
- dev->queues[i] = NULL;
- }
- spin_unlock(&dev_list_lock);
+ nvme_free_queues(dev, 1);
}
static int nvme_dev_resume(struct nvme_dev *dev)
@@ -2406,7 +2664,7 @@ static int nvme_dev_resume(struct nvme_dev *dev)
return ret;
if (ret == -EBUSY) {
spin_lock(&dev_list_lock);
- PREPARE_WORK(&dev->reset_work, nvme_remove_disks);
+ dev->reset_workfn = nvme_remove_disks;
queue_work(nvme_workq, &dev->reset_work);
spin_unlock(&dev_list_lock);
}
@@ -2435,6 +2693,12 @@ static void nvme_reset_failed_dev(struct work_struct *ws)
nvme_dev_reset(dev);
}
+static void nvme_reset_workfn(struct work_struct *work)
+{
+ struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
+ dev->reset_workfn(work);
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int result = -ENOMEM;
@@ -2451,9 +2715,13 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
GFP_KERNEL);
if (!dev->queues)
goto free;
+ dev->io_queue = alloc_percpu(unsigned short);
+ if (!dev->io_queue)
+ goto free;
INIT_LIST_HEAD(&dev->namespaces);
- INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
+ dev->reset_workfn = nvme_reset_failed_dev;
+ INIT_WORK(&dev->reset_work, nvme_reset_workfn);
dev->pci_dev = pdev;
pci_set_drvdata(pdev, dev);
result = nvme_set_instance(dev);
@@ -2464,6 +2732,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto release;
+ kref_init(&dev->kref);
result = nvme_dev_start(dev);
if (result) {
if (result == -EBUSY)
@@ -2471,7 +2740,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto release_pools;
}
- kref_init(&dev->kref);
result = nvme_dev_add(dev);
if (result)
goto shutdown;
@@ -2500,6 +2768,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
release:
nvme_release_instance(dev);
free:
+ free_percpu(dev->io_queue);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2526,6 +2795,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_dev_remove(dev);
nvme_dev_shutdown(dev);
nvme_free_queues(dev, 0);
+ rcu_barrier();
nvme_release_instance(dev);
nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
@@ -2538,6 +2808,7 @@ static void nvme_remove(struct pci_dev *pdev)
#define nvme_slot_reset NULL
#define nvme_error_resume NULL
+#ifdef CONFIG_PM_SLEEP
static int nvme_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2553,11 +2824,12 @@ static int nvme_resume(struct device *dev)
struct nvme_dev *ndev = pci_get_drvdata(pdev);
if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
- PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev);
+ ndev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &ndev->reset_work);
}
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
@@ -2572,7 +2844,7 @@ static const struct pci_error_handlers nvme_err_handler = {
/* Move to pci_ids.h later */
#define PCI_CLASS_STORAGE_EXPRESS 0x010802
-static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
+static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
};
@@ -2594,14 +2866,11 @@ static int __init nvme_init(void)
{
int result;
- nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
- if (IS_ERR(nvme_thread))
- return PTR_ERR(nvme_thread);
+ init_waitqueue_head(&nvme_kthread_wait);
- result = -ENOMEM;
nvme_workq = create_singlethread_workqueue("nvme");
if (!nvme_workq)
- goto kill_kthread;
+ return -ENOMEM;
result = register_blkdev(nvme_major, "nvme");
if (result < 0)
@@ -2618,8 +2887,6 @@ static int __init nvme_init(void)
unregister_blkdev(nvme_major, "nvme");
kill_workq:
destroy_workqueue(nvme_workq);
- kill_kthread:
- kthread_stop(nvme_thread);
return result;
}
@@ -2628,11 +2895,11 @@ static void __exit nvme_exit(void)
pci_unregister_driver(&nvme_driver);
unregister_blkdev(nvme_major, "nvme");
destroy_workqueue(nvme_workq);
- kthread_stop(nvme_thread);
+ BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
}
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.8");
+MODULE_VERSION("0.9");
module_init(nvme_init);
module_exit(nvme_exit);
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a0ceb64e269..2c3f5be06da1 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -1562,13 +1562,14 @@ static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = PTR_ERR(iod);
goto out;
}
- length = nvme_setup_prps(dev, &c.common, iod, tot_len,
- GFP_KERNEL);
+ length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
if (length != tot_len) {
res = -ENOMEM;
goto out_unmap;
}
+ c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
} else if (opcode == nvme_admin_activate_fw) {
@@ -2033,7 +2034,6 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int res = SNTI_TRANSLATION_SUCCESS;
int nvme_sc;
struct nvme_dev *dev = ns->dev;
- struct nvme_queue *nvmeq;
u32 num_cmds;
struct nvme_iod *iod;
u64 unit_len;
@@ -2045,7 +2045,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
struct nvme_command c;
u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
u16 control;
- u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors);
+ u32 max_blocks = queue_max_hw_sectors(ns->queue);
num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
@@ -2093,8 +2093,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = PTR_ERR(iod);
goto out;
}
- retcode = nvme_setup_prps(dev, &c.common, iod, unit_len,
- GFP_KERNEL);
+ retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL);
if (retcode != unit_len) {
nvme_unmap_user_pages(dev,
(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2103,21 +2102,12 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = -ENOMEM;
goto out;
}
+ c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ c.rw.prp2 = cpu_to_le64(iod->first_dma);
nvme_offset += unit_num_blocks;
- nvmeq = get_nvmeq(dev);
- /*
- * Since nvme_submit_sync_cmd sleeps, we can't keep
- * preemption disabled. We may be preempted at any
- * point, and be rescheduled to a different CPU. That
- * will cause cacheline bouncing, but no additional
- * races since q_lock already protects against other
- * CPUs.
- */
- put_nvmeq(nvmeq);
- nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
- NVME_IO_TIMEOUT);
+ nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
if (nvme_sc != NVME_SC_SUCCESS) {
nvme_unmap_user_pages(dev,
(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2644,7 +2634,6 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{
int res = SNTI_TRANSLATION_SUCCESS;
int nvme_sc;
- struct nvme_queue *nvmeq;
struct nvme_command c;
u8 immed, pcmod, pc, no_flush, start;
@@ -2671,10 +2660,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.common.opcode = nvme_cmd_flush;
c.common.nsid = cpu_to_le32(ns->ns_id);
- nvmeq = get_nvmeq(ns->dev);
- put_nvmeq(nvmeq);
- nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
-
+ nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
goto out;
@@ -2697,15 +2683,12 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
int res = SNTI_TRANSLATION_SUCCESS;
int nvme_sc;
struct nvme_command c;
- struct nvme_queue *nvmeq;
memset(&c, 0, sizeof(c));
c.common.opcode = nvme_cmd_flush;
c.common.nsid = cpu_to_le32(ns->ns_id);
- nvmeq = get_nvmeq(ns->dev);
- put_nvmeq(nvmeq);
- nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+ nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
@@ -2872,7 +2855,6 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
struct nvme_dev *dev = ns->dev;
struct scsi_unmap_parm_list *plist;
struct nvme_dsm_range *range;
- struct nvme_queue *nvmeq;
struct nvme_command c;
int i, nvme_sc, res = -ENOMEM;
u16 ndesc, list_len;
@@ -2914,10 +2896,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.dsm.nr = cpu_to_le32(ndesc - 1);
c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
- nvmeq = get_nvmeq(dev);
- put_nvmeq(nvmeq);
-
- nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+ nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 34898d53395b..4c95b503b09e 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1654,7 +1654,7 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
if (osd_req->r_result < 0)
obj_request->result = osd_req->r_result;
- BUG_ON(osd_req->r_num_ops > 2);
+ rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
/*
* We support a 64-bit length, but ultimately it has to be
@@ -1662,11 +1662,15 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
*/
obj_request->xferred = osd_req->r_reply_op_len[0];
rbd_assert(obj_request->xferred < (u64)UINT_MAX);
+
opcode = osd_req->r_ops[0].op;
switch (opcode) {
case CEPH_OSD_OP_READ:
rbd_osd_read_callback(obj_request);
break;
+ case CEPH_OSD_OP_SETALLOCHINT:
+ rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
+ /* fall through */
case CEPH_OSD_OP_WRITE:
rbd_osd_write_callback(obj_request);
break;
@@ -1715,9 +1719,16 @@ static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
snapc, CEPH_NOSNAP, &mtime);
}
+/*
+ * Create an osd request. A read request has one osd op (read).
+ * A write request has either one (watch) or two (hint+write) osd ops.
+ * (All rbd data writes are prefixed with an allocation hint op, but
+ * technically osd watch is a write request, hence this distinction.)
+ */
static struct ceph_osd_request *rbd_osd_req_create(
struct rbd_device *rbd_dev,
bool write_request,
+ unsigned int num_ops,
struct rbd_obj_request *obj_request)
{
struct ceph_snap_context *snapc = NULL;
@@ -1733,10 +1744,13 @@ static struct ceph_osd_request *rbd_osd_req_create(
snapc = img_request->snapc;
}
- /* Allocate and initialize the request, for the single op */
+ rbd_assert(num_ops == 1 || (write_request && num_ops == 2));
+
+ /* Allocate and initialize the request, for the num_ops ops */
osdc = &rbd_dev->rbd_client->client->osdc;
- osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
+ osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
+ GFP_ATOMIC);
if (!osd_req)
return NULL; /* ENOMEM */
@@ -1756,8 +1770,8 @@ static struct ceph_osd_request *rbd_osd_req_create(
/*
* Create a copyup osd request based on the information in the
- * object request supplied. A copyup request has two osd ops,
- * a copyup method call, and a "normal" write request.
+ * object request supplied. A copyup request has three osd ops,
+ * a copyup method call, a hint op, and a write op.
*/
static struct ceph_osd_request *
rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
@@ -1773,12 +1787,12 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
rbd_assert(img_request);
rbd_assert(img_request_write_test(img_request));
- /* Allocate and initialize the request, for the two ops */
+ /* Allocate and initialize the request, for the three ops */
snapc = img_request->snapc;
rbd_dev = img_request->rbd_dev;
osdc = &rbd_dev->rbd_client->client->osdc;
- osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
+ osd_req = ceph_osdc_alloc_request(osdc, snapc, 3, false, GFP_ATOMIC);
if (!osd_req)
return NULL; /* ENOMEM */
@@ -2178,6 +2192,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
const char *object_name;
u64 offset;
u64 length;
+ unsigned int which = 0;
object_name = rbd_segment_name(rbd_dev, img_offset);
if (!object_name)
@@ -2190,6 +2205,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
rbd_segment_name_free(object_name);
if (!obj_request)
goto out_unwind;
+
/*
* set obj_request->img_request before creating the
* osd_request so that it gets the right snapc
@@ -2207,7 +2223,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
clone_size,
GFP_ATOMIC);
if (!obj_request->bio_list)
- goto out_partial;
+ goto out_unwind;
} else {
unsigned int page_count;
@@ -2220,19 +2236,27 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
}
osd_req = rbd_osd_req_create(rbd_dev, write_request,
- obj_request);
+ (write_request ? 2 : 1),
+ obj_request);
if (!osd_req)
- goto out_partial;
+ goto out_unwind;
obj_request->osd_req = osd_req;
obj_request->callback = rbd_img_obj_callback;
- osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
- 0, 0);
+ if (write_request) {
+ osd_req_op_alloc_hint_init(osd_req, which,
+ rbd_obj_bytes(&rbd_dev->header),
+ rbd_obj_bytes(&rbd_dev->header));
+ which++;
+ }
+
+ osd_req_op_extent_init(osd_req, which, opcode, offset, length,
+ 0, 0);
if (type == OBJ_REQUEST_BIO)
- osd_req_op_extent_osd_data_bio(osd_req, 0,
+ osd_req_op_extent_osd_data_bio(osd_req, which,
obj_request->bio_list, length);
else
- osd_req_op_extent_osd_data_pages(osd_req, 0,
+ osd_req_op_extent_osd_data_pages(osd_req, which,
obj_request->pages, length,
offset & ~PAGE_MASK, false, false);
@@ -2249,11 +2273,9 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
return 0;
-out_partial:
- rbd_obj_request_put(obj_request);
out_unwind:
for_each_obj_request_safe(img_request, obj_request, next_obj_request)
- rbd_obj_request_put(obj_request);
+ rbd_img_obj_request_del(img_request, obj_request);
return -ENOMEM;
}
@@ -2353,7 +2375,7 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
/*
* The original osd request is of no use to use any more.
- * We need a new one that can hold the two ops in a copyup
+ * We need a new one that can hold the three ops in a copyup
* request. Allocate the new copyup osd request for the
* original request, and release the old one.
*/
@@ -2372,17 +2394,22 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
false, false);
- /* Then the original write request op */
+ /* Then the hint op */
+
+ osd_req_op_alloc_hint_init(osd_req, 1, rbd_obj_bytes(&rbd_dev->header),
+ rbd_obj_bytes(&rbd_dev->header));
+
+ /* And the original write request op */
offset = orig_request->offset;
length = orig_request->length;
- osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
+ osd_req_op_extent_init(osd_req, 2, CEPH_OSD_OP_WRITE,
offset, length, 0, 0);
if (orig_request->type == OBJ_REQUEST_BIO)
- osd_req_op_extent_osd_data_bio(osd_req, 1,
+ osd_req_op_extent_osd_data_bio(osd_req, 2,
orig_request->bio_list, length);
else
- osd_req_op_extent_osd_data_pages(osd_req, 1,
+ osd_req_op_extent_osd_data_pages(osd_req, 2,
orig_request->pages, length,
offset & ~PAGE_MASK, false, false);
@@ -2603,8 +2630,8 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
rbd_assert(obj_request->img_request);
rbd_dev = obj_request->img_request->rbd_dev;
- stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
- stat_request);
+ stat_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
+ stat_request);
if (!stat_request->osd_req)
goto out;
stat_request->callback = rbd_img_obj_exists_callback;
@@ -2807,7 +2834,8 @@ static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
return -ENOMEM;
ret = -ENOMEM;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
+ obj_request);
if (!obj_request->osd_req)
goto out;
@@ -2870,7 +2898,8 @@ static int __rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
if (!obj_request)
goto out_cancel;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
+ obj_request);
if (!obj_request->osd_req)
goto out_cancel;
@@ -2978,7 +3007,8 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
obj_request->pages = pages;
obj_request->page_count = page_count;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
+ obj_request);
if (!obj_request->osd_req)
goto out;
@@ -3211,7 +3241,8 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
obj_request->pages = pages;
obj_request->page_count = page_count;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
+ obj_request);
if (!obj_request->osd_req)
goto out;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index eb6e1e0e8db2..a69dd93d1bd5 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -3910,18 +3910,22 @@ static void skd_release_msix(struct skd_device *skdev)
struct skd_msix_entry *qentry;
int i;
- if (skdev->msix_entries == NULL)
- return;
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
- skdev = qentry->rsp;
+ if (skdev->msix_entries) {
+ for (i = 0; i < skdev->msix_count; i++) {
+ qentry = &skdev->msix_entries[i];
+ skdev = qentry->rsp;
+
+ if (qentry->have_irq)
+ devm_free_irq(&skdev->pdev->dev,
+ qentry->vector, qentry->rsp);
+ }
- if (qentry->have_irq)
- devm_free_irq(&skdev->pdev->dev,
- qentry->vector, qentry->rsp);
+ kfree(skdev->msix_entries);
}
- pci_disable_msix(skdev->pdev);
- kfree(skdev->msix_entries);
+
+ if (skdev->msix_count)
+ pci_disable_msix(skdev->pdev);
+
skdev->msix_count = 0;
skdev->msix_entries = NULL;
}
@@ -3929,12 +3933,10 @@ static void skd_release_msix(struct skd_device *skdev)
static int skd_acquire_msix(struct skd_device *skdev)
{
int i, rc;
- struct pci_dev *pdev;
- struct msix_entry *entries = NULL;
+ struct pci_dev *pdev = skdev->pdev;
+ struct msix_entry *entries;
struct skd_msix_entry *qentry;
- pdev = skdev->pdev;
- skdev->msix_count = SKD_MAX_MSIX_COUNT;
entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
GFP_KERNEL);
if (!entries)
@@ -3943,40 +3945,26 @@ static int skd_acquire_msix(struct skd_device *skdev)
for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
entries[i].entry = i;
- rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT);
- if (rc < 0)
+ rc = pci_enable_msix_range(pdev, entries,
+ SKD_MIN_MSIX_COUNT, SKD_MAX_MSIX_COUNT);
+ if (rc < 0) {
+ pr_err("(%s): failed to enable MSI-X %d\n",
+ skd_name(skdev), rc);
goto msix_out;
- if (rc) {
- if (rc < SKD_MIN_MSIX_COUNT) {
- pr_err("(%s): failed to enable MSI-X %d\n",
- skd_name(skdev), rc);
- goto msix_out;
- }
- pr_debug("%s:%s:%d %s: <%s> allocated %d MSI-X vectors\n",
- skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name, rc);
-
- skdev->msix_count = rc;
- rc = pci_enable_msix(pdev, entries, skdev->msix_count);
- if (rc) {
- pr_err("(%s): failed to enable MSI-X "
- "support (%d) %d\n",
- skd_name(skdev), skdev->msix_count, rc);
- goto msix_out;
- }
}
+
+ skdev->msix_count = rc;
skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
skdev->msix_count, GFP_KERNEL);
if (!skdev->msix_entries) {
rc = -ENOMEM;
- skdev->msix_count = 0;
pr_err("(%s): msix table allocation error\n",
skd_name(skdev));
goto msix_out;
}
- qentry = skdev->msix_entries;
for (i = 0; i < skdev->msix_count; i++) {
+ qentry = &skdev->msix_entries[i];
qentry->vector = entries[i].vector;
qentry->entry = entries[i].entry;
qentry->rsp = NULL;
@@ -3985,11 +3973,10 @@ static int skd_acquire_msix(struct skd_device *skdev)
skdev->name, __func__, __LINE__,
pci_name(pdev), skdev->name,
i, qentry->vector, qentry->entry);
- qentry++;
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+ for (i = 0; i < skdev->msix_count; i++) {
qentry = &skdev->msix_entries[i];
snprintf(qentry->isr_name, sizeof(qentry->isr_name),
"%s%d-msix %s", DRV_NAME, skdev->devno,
@@ -4045,8 +4032,8 @@ RETRY_IRQ_TYPE:
case SKD_IRQ_MSI:
snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
DRV_NAME, skdev->devno);
- rc = pci_enable_msi(pdev);
- if (!rc) {
+ rc = pci_enable_msi_range(pdev, 1, 1);
+ if (rc > 0) {
rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
skdev->isr_name, skdev);
if (rc) {
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 20e061c3e023..c74f7b56e7c4 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -30,6 +30,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/wait.h>
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/prom.h>
@@ -840,14 +841,17 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
spin_lock_irqsave(&swim3_lock, flags);
if (fs->state != idle && fs->state != available) {
++fs->wanted;
- while (fs->state != available) {
+ /* this will enable irqs in order to sleep */
+ if (!interruptible)
+ wait_event_lock_irq(fs->wait,
+ fs->state == available,
+ swim3_lock);
+ else if (wait_event_interruptible_lock_irq(fs->wait,
+ fs->state == available,
+ swim3_lock)) {
+ --fs->wanted;
spin_unlock_irqrestore(&swim3_lock, flags);
- if (interruptible && signal_pending(current)) {
- --fs->wanted;
- return -EINTR;
- }
- interruptible_sleep_on(&fs->wait);
- spin_lock_irqsave(&swim3_lock, flags);
+ return -EINTR;
}
--fs->wanted;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index b1cb3f4c4db4..6d8a87f252de 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -158,6 +158,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
unsigned long flags;
unsigned int num;
const bool last = (req->cmd_flags & REQ_END) != 0;
+ int err;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
@@ -198,11 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
}
spin_lock_irqsave(&vblk->vq_lock, flags);
- if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) {
+ err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
+ if (err) {
virtqueue_kick(vblk->vq);
spin_unlock_irqrestore(&vblk->vq_lock, flags);
blk_mq_stop_hw_queue(hctx);
- return BLK_MQ_RQ_QUEUE_BUSY;
+ /* Out of mem doesn't actually happen, since we fall back
+ * to direct descriptors */
+ if (err == -ENOMEM || err == -ENOSPC)
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ return BLK_MQ_RQ_QUEUE_ERROR;
}
if (last)
@@ -485,18 +491,20 @@ static struct blk_mq_ops virtio_mq_ops = {
static struct blk_mq_reg virtio_mq_reg = {
.ops = &virtio_mq_ops,
.nr_hw_queues = 1,
- .queue_depth = 64,
+ .queue_depth = 0, /* Set in virtblk_probe */
.numa_node = NUMA_NO_NODE,
.flags = BLK_MQ_F_SHOULD_MERGE,
};
+module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444);
-static void virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
+static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
struct request *rq, unsigned int nr)
{
struct virtio_blk *vblk = data;
struct virtblk_req *vbr = rq->special;
sg_init_table(vbr->sg, vblk->sg_elems);
+ return 0;
}
static int virtblk_probe(struct virtio_device *vdev)
@@ -552,6 +560,13 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_vq;
}
+ /* Default queue sizing is to fill the ring. */
+ if (!virtio_mq_reg.queue_depth) {
+ virtio_mq_reg.queue_depth = vblk->vq->num_free;
+ /* ... but without indirect descs, we use 2 descs per req */
+ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
+ virtio_mq_reg.queue_depth /= 2;
+ }
virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 3450be850399..6489c0fd0ea6 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -15,6 +15,16 @@ config ZRAM
See zram.txt for more information.
+config ZRAM_LZ4_COMPRESS
+ bool "Enable LZ4 algorithm support"
+ depends on ZRAM
+ select LZ4_COMPRESS
+ select LZ4_DECOMPRESS
+ default n
+ help
+ This option enables LZ4 compression algorithm support. Compression
+ algorithm can be changed using `comp_algorithm' device attribute.
+
config ZRAM_DEBUG
bool "Compressed RAM block device debug support"
depends on ZRAM
diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile
index cb0f9ced6a93..be0763ff57a2 100644
--- a/drivers/block/zram/Makefile
+++ b/drivers/block/zram/Makefile
@@ -1,3 +1,5 @@
-zram-y := zram_drv.o
+zram-y := zcomp_lzo.o zcomp.o zram_drv.o
+
+zram-$(CONFIG_ZRAM_LZ4_COMPRESS) += zcomp_lz4.o
obj-$(CONFIG_ZRAM) += zram.o
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
new file mode 100644
index 000000000000..f1ff39a3d1c1
--- /dev/null
+++ b/drivers/block/zram/zcomp.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) 2014 Sergey Senozhatsky.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "zcomp.h"
+#include "zcomp_lzo.h"
+#ifdef CONFIG_ZRAM_LZ4_COMPRESS
+#include "zcomp_lz4.h"
+#endif
+
+/*
+ * single zcomp_strm backend
+ */
+struct zcomp_strm_single {
+ struct mutex strm_lock;
+ struct zcomp_strm *zstrm;
+};
+
+/*
+ * multi zcomp_strm backend
+ */
+struct zcomp_strm_multi {
+ /* protect strm list */
+ spinlock_t strm_lock;
+ /* max possible number of zstrm streams */
+ int max_strm;
+ /* number of available zstrm streams */
+ int avail_strm;
+ /* list of available strms */
+ struct list_head idle_strm;
+ wait_queue_head_t strm_wait;
+};
+
+static struct zcomp_backend *backends[] = {
+ &zcomp_lzo,
+#ifdef CONFIG_ZRAM_LZ4_COMPRESS
+ &zcomp_lz4,
+#endif
+ NULL
+};
+
+static struct zcomp_backend *find_backend(const char *compress)
+{
+ int i = 0;
+ while (backends[i]) {
+ if (sysfs_streq(compress, backends[i]->name))
+ break;
+ i++;
+ }
+ return backends[i];
+}
+
+static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
+{
+ if (zstrm->private)
+ comp->backend->destroy(zstrm->private);
+ free_pages((unsigned long)zstrm->buffer, 1);
+ kfree(zstrm);
+}
+
+/*
+ * allocate new zcomp_strm structure with ->private initialized by
+ * backend, return NULL on error
+ */
+static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
+{
+ struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
+ if (!zstrm)
+ return NULL;
+
+ zstrm->private = comp->backend->create();
+ /*
+ * allocate 2 pages. 1 for compressed data, plus 1 extra for the
+ * case when compressed size is larger than the original one
+ */
+ zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!zstrm->private || !zstrm->buffer) {
+ zcomp_strm_free(comp, zstrm);
+ zstrm = NULL;
+ }
+ return zstrm;
+}
+
+/*
+ * get idle zcomp_strm or wait until other process release
+ * (zcomp_strm_release()) one for us
+ */
+static struct zcomp_strm *zcomp_strm_multi_find(struct zcomp *comp)
+{
+ struct zcomp_strm_multi *zs = comp->stream;
+ struct zcomp_strm *zstrm;
+
+ while (1) {
+ spin_lock(&zs->strm_lock);
+ if (!list_empty(&zs->idle_strm)) {
+ zstrm = list_entry(zs->idle_strm.next,
+ struct zcomp_strm, list);
+ list_del(&zstrm->list);
+ spin_unlock(&zs->strm_lock);
+ return zstrm;
+ }
+ /* zstrm streams limit reached, wait for idle stream */
+ if (zs->avail_strm >= zs->max_strm) {
+ spin_unlock(&zs->strm_lock);
+ wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
+ continue;
+ }
+ /* allocate new zstrm stream */
+ zs->avail_strm++;
+ spin_unlock(&zs->strm_lock);
+
+ zstrm = zcomp_strm_alloc(comp);
+ if (!zstrm) {
+ spin_lock(&zs->strm_lock);
+ zs->avail_strm--;
+ spin_unlock(&zs->strm_lock);
+ wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
+ continue;
+ }
+ break;
+ }
+ return zstrm;
+}
+
+/* add stream back to idle list and wake up waiter or free the stream */
+static void zcomp_strm_multi_release(struct zcomp *comp, struct zcomp_strm *zstrm)
+{
+ struct zcomp_strm_multi *zs = comp->stream;
+
+ spin_lock(&zs->strm_lock);
+ if (zs->avail_strm <= zs->max_strm) {
+ list_add(&zstrm->list, &zs->idle_strm);
+ spin_unlock(&zs->strm_lock);
+ wake_up(&zs->strm_wait);
+ return;
+ }
+
+ zs->avail_strm--;
+ spin_unlock(&zs->strm_lock);
+ zcomp_strm_free(comp, zstrm);
+}
+
+/* change max_strm limit */
+static bool zcomp_strm_multi_set_max_streams(struct zcomp *comp, int num_strm)
+{
+ struct zcomp_strm_multi *zs = comp->stream;
+ struct zcomp_strm *zstrm;
+
+ spin_lock(&zs->strm_lock);
+ zs->max_strm = num_strm;
+ /*
+ * if user has lowered the limit and there are idle streams,
+ * immediately free as much streams (and memory) as we can.
+ */
+ while (zs->avail_strm > num_strm && !list_empty(&zs->idle_strm)) {
+ zstrm = list_entry(zs->idle_strm.next,
+ struct zcomp_strm, list);
+ list_del(&zstrm->list);
+ zcomp_strm_free(comp, zstrm);
+ zs->avail_strm--;
+ }
+ spin_unlock(&zs->strm_lock);
+ return true;
+}
+
+static void zcomp_strm_multi_destroy(struct zcomp *comp)
+{
+ struct zcomp_strm_multi *zs = comp->stream;
+ struct zcomp_strm *zstrm;
+
+ while (!list_empty(&zs->idle_strm)) {
+ zstrm = list_entry(zs->idle_strm.next,
+ struct zcomp_strm, list);
+ list_del(&zstrm->list);
+ zcomp_strm_free(comp, zstrm);
+ }
+ kfree(zs);
+}
+
+static int zcomp_strm_multi_create(struct zcomp *comp, int max_strm)
+{
+ struct zcomp_strm *zstrm;
+ struct zcomp_strm_multi *zs;
+
+ comp->destroy = zcomp_strm_multi_destroy;
+ comp->strm_find = zcomp_strm_multi_find;
+ comp->strm_release = zcomp_strm_multi_release;
+ comp->set_max_streams = zcomp_strm_multi_set_max_streams;
+ zs = kmalloc(sizeof(struct zcomp_strm_multi), GFP_KERNEL);
+ if (!zs)
+ return -ENOMEM;
+
+ comp->stream = zs;
+ spin_lock_init(&zs->strm_lock);
+ INIT_LIST_HEAD(&zs->idle_strm);
+ init_waitqueue_head(&zs->strm_wait);
+ zs->max_strm = max_strm;
+ zs->avail_strm = 1;
+
+ zstrm = zcomp_strm_alloc(comp);
+ if (!zstrm) {
+ kfree(zs);
+ return -ENOMEM;
+ }
+ list_add(&zstrm->list, &zs->idle_strm);
+ return 0;
+}
+
+static struct zcomp_strm *zcomp_strm_single_find(struct zcomp *comp)
+{
+ struct zcomp_strm_single *zs = comp->stream;
+ mutex_lock(&zs->strm_lock);
+ return zs->zstrm;
+}
+
+static void zcomp_strm_single_release(struct zcomp *comp,
+ struct zcomp_strm *zstrm)
+{
+ struct zcomp_strm_single *zs = comp->stream;
+ mutex_unlock(&zs->strm_lock);
+}
+
+static bool zcomp_strm_single_set_max_streams(struct zcomp *comp, int num_strm)
+{
+ /* zcomp_strm_single support only max_comp_streams == 1 */
+ return false;
+}
+
+static void zcomp_strm_single_destroy(struct zcomp *comp)
+{
+ struct zcomp_strm_single *zs = comp->stream;
+ zcomp_strm_free(comp, zs->zstrm);
+ kfree(zs);
+}
+
+static int zcomp_strm_single_create(struct zcomp *comp)
+{
+ struct zcomp_strm_single *zs;
+
+ comp->destroy = zcomp_strm_single_destroy;
+ comp->strm_find = zcomp_strm_single_find;
+ comp->strm_release = zcomp_strm_single_release;
+ comp->set_max_streams = zcomp_strm_single_set_max_streams;
+ zs = kmalloc(sizeof(struct zcomp_strm_single), GFP_KERNEL);
+ if (!zs)
+ return -ENOMEM;
+
+ comp->stream = zs;
+ mutex_init(&zs->strm_lock);
+ zs->zstrm = zcomp_strm_alloc(comp);
+ if (!zs->zstrm) {
+ kfree(zs);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* show available compressors */
+ssize_t zcomp_available_show(const char *comp, char *buf)
+{
+ ssize_t sz = 0;
+ int i = 0;
+
+ while (backends[i]) {
+ if (sysfs_streq(comp, backends[i]->name))
+ sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
+ "[%s] ", backends[i]->name);
+ else
+ sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
+ "%s ", backends[i]->name);
+ i++;
+ }
+ sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
+ return sz;
+}
+
+bool zcomp_set_max_streams(struct zcomp *comp, int num_strm)
+{
+ return comp->set_max_streams(comp, num_strm);
+}
+
+struct zcomp_strm *zcomp_strm_find(struct zcomp *comp)
+{
+ return comp->strm_find(comp);
+}
+
+void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm)
+{
+ comp->strm_release(comp, zstrm);
+}
+
+int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const unsigned char *src, size_t *dst_len)
+{
+ return comp->backend->compress(src, zstrm->buffer, dst_len,
+ zstrm->private);
+}
+
+int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
+ size_t src_len, unsigned char *dst)
+{
+ return comp->backend->decompress(src, src_len, dst);
+}
+
+void zcomp_destroy(struct zcomp *comp)
+{
+ comp->destroy(comp);
+ kfree(comp);
+}
+
+/*
+ * search available compressors for requested algorithm.
+ * allocate new zcomp and initialize it. return compressing
+ * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
+ * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
+ * case of allocation error.
+ */
+struct zcomp *zcomp_create(const char *compress, int max_strm)
+{
+ struct zcomp *comp;
+ struct zcomp_backend *backend;
+
+ backend = find_backend(compress);
+ if (!backend)
+ return ERR_PTR(-EINVAL);
+
+ comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
+ if (!comp)
+ return ERR_PTR(-ENOMEM);
+
+ comp->backend = backend;
+ if (max_strm > 1)
+ zcomp_strm_multi_create(comp, max_strm);
+ else
+ zcomp_strm_single_create(comp);
+ if (!comp->stream) {
+ kfree(comp);
+ return ERR_PTR(-ENOMEM);
+ }
+ return comp;
+}
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
new file mode 100644
index 000000000000..c59d1fca72c0
--- /dev/null
+++ b/drivers/block/zram/zcomp.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2014 Sergey Senozhatsky.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ZCOMP_H_
+#define _ZCOMP_H_
+
+#include <linux/mutex.h>
+
+struct zcomp_strm {
+ /* compression/decompression buffer */
+ void *buffer;
+ /*
+ * The private data of the compression stream, only compression
+ * stream backend can touch this (e.g. compression algorithm
+ * working memory)
+ */
+ void *private;
+ /* used in multi stream backend, protected by backend strm_lock */
+ struct list_head list;
+};
+
+/* static compression backend */
+struct zcomp_backend {
+ int (*compress)(const unsigned char *src, unsigned char *dst,
+ size_t *dst_len, void *private);
+
+ int (*decompress)(const unsigned char *src, size_t src_len,
+ unsigned char *dst);
+
+ void *(*create)(void);
+ void (*destroy)(void *private);
+
+ const char *name;
+};
+
+/* dynamic per-device compression frontend */
+struct zcomp {
+ void *stream;
+ struct zcomp_backend *backend;
+
+ struct zcomp_strm *(*strm_find)(struct zcomp *comp);
+ void (*strm_release)(struct zcomp *comp, struct zcomp_strm *zstrm);
+ bool (*set_max_streams)(struct zcomp *comp, int num_strm);
+ void (*destroy)(struct zcomp *comp);
+};
+
+ssize_t zcomp_available_show(const char *comp, char *buf);
+
+struct zcomp *zcomp_create(const char *comp, int max_strm);
+void zcomp_destroy(struct zcomp *comp);
+
+struct zcomp_strm *zcomp_strm_find(struct zcomp *comp);
+void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm);
+
+int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const unsigned char *src, size_t *dst_len);
+
+int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
+ size_t src_len, unsigned char *dst);
+
+bool zcomp_set_max_streams(struct zcomp *comp, int num_strm);
+#endif /* _ZCOMP_H_ */
diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c
new file mode 100644
index 000000000000..f2afb7e988c3
--- /dev/null
+++ b/drivers/block/zram/zcomp_lz4.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 Sergey Senozhatsky.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/lz4.h>
+
+#include "zcomp_lz4.h"
+
+static void *zcomp_lz4_create(void)
+{
+ return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
+}
+
+static void zcomp_lz4_destroy(void *private)
+{
+ kfree(private);
+}
+
+static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst,
+ size_t *dst_len, void *private)
+{
+ /* return : Success if return 0 */
+ return lz4_compress(src, PAGE_SIZE, dst, dst_len, private);
+}
+
+static int zcomp_lz4_decompress(const unsigned char *src, size_t src_len,
+ unsigned char *dst)
+{
+ size_t dst_len = PAGE_SIZE;
+ /* return : Success if return 0 */
+ return lz4_decompress_unknownoutputsize(src, src_len, dst, &dst_len);
+}
+
+struct zcomp_backend zcomp_lz4 = {
+ .compress = zcomp_lz4_compress,
+ .decompress = zcomp_lz4_decompress,
+ .create = zcomp_lz4_create,
+ .destroy = zcomp_lz4_destroy,
+ .name = "lz4",
+};
diff --git a/drivers/block/zram/zcomp_lz4.h b/drivers/block/zram/zcomp_lz4.h
new file mode 100644
index 000000000000..60613fb29dd8
--- /dev/null
+++ b/drivers/block/zram/zcomp_lz4.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2014 Sergey Senozhatsky.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ZCOMP_LZ4_H_
+#define _ZCOMP_LZ4_H_
+
+#include "zcomp.h"
+
+extern struct zcomp_backend zcomp_lz4;
+
+#endif /* _ZCOMP_LZ4_H_ */
diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c
new file mode 100644
index 000000000000..da1bc47d588e
--- /dev/null
+++ b/drivers/block/zram/zcomp_lzo.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 Sergey Senozhatsky.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/lzo.h>
+
+#include "zcomp_lzo.h"
+
+static void *lzo_create(void)
+{
+ return kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+}
+
+static void lzo_destroy(void *private)
+{
+ kfree(private);
+}
+
+static int lzo_compress(const unsigned char *src, unsigned char *dst,
+ size_t *dst_len, void *private)
+{
+ int ret = lzo1x_1_compress(src, PAGE_SIZE, dst, dst_len, private);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+static int lzo_decompress(const unsigned char *src, size_t src_len,
+ unsigned char *dst)
+{
+ size_t dst_len = PAGE_SIZE;
+ int ret = lzo1x_decompress_safe(src, src_len, dst, &dst_len);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+struct zcomp_backend zcomp_lzo = {
+ .compress = lzo_compress,
+ .decompress = lzo_decompress,
+ .create = lzo_create,
+ .destroy = lzo_destroy,
+ .name = "lzo",
+};
diff --git a/drivers/block/zram/zcomp_lzo.h b/drivers/block/zram/zcomp_lzo.h
new file mode 100644
index 000000000000..128c5807fa14
--- /dev/null
+++ b/drivers/block/zram/zcomp_lzo.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2014 Sergey Senozhatsky.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ZCOMP_LZO_H_
+#define _ZCOMP_LZO_H_
+
+#include "zcomp.h"
+
+extern struct zcomp_backend zcomp_lzo;
+
+#endif /* _ZCOMP_LZO_H_ */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 51c557cfd92b..9849b5233bf4 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -29,19 +29,36 @@
#include <linux/genhd.h>
#include <linux/highmem.h>
#include <linux/slab.h>
-#include <linux/lzo.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
+#include <linux/err.h>
#include "zram_drv.h"
/* Globals */
static int zram_major;
static struct zram *zram_devices;
+static const char *default_compressor = "lzo";
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
+#define ZRAM_ATTR_RO(name) \
+static ssize_t zram_attr_##name##_show(struct device *d, \
+ struct device_attribute *attr, char *b) \
+{ \
+ struct zram *zram = dev_to_zram(d); \
+ return scnprintf(b, PAGE_SIZE, "%llu\n", \
+ (u64)atomic64_read(&zram->stats.name)); \
+} \
+static struct device_attribute dev_attr_##name = \
+ __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
+
+static inline int init_done(struct zram *zram)
+{
+ return zram->meta != NULL;
+}
+
static inline struct zram *dev_to_zram(struct device *dev)
{
return (struct zram *)dev_to_disk(dev)->private_data;
@@ -52,92 +69,114 @@ static ssize_t disksize_show(struct device *dev,
{
struct zram *zram = dev_to_zram(dev);
- return sprintf(buf, "%llu\n", zram->disksize);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
}
static ssize_t initstate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ u32 val;
struct zram *zram = dev_to_zram(dev);
- return sprintf(buf, "%u\n", zram->init_done);
-}
-
-static ssize_t num_reads_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
+ down_read(&zram->init_lock);
+ val = init_done(zram);
+ up_read(&zram->init_lock);
- return sprintf(buf, "%llu\n",
- (u64)atomic64_read(&zram->stats.num_reads));
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
}
-static ssize_t num_writes_show(struct device *dev,
+static ssize_t orig_data_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zram *zram = dev_to_zram(dev);
- return sprintf(buf, "%llu\n",
- (u64)atomic64_read(&zram->stats.num_writes));
+ return scnprintf(buf, PAGE_SIZE, "%llu\n",
+ (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
}
-static ssize_t invalid_io_show(struct device *dev,
+static ssize_t mem_used_total_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ u64 val = 0;
struct zram *zram = dev_to_zram(dev);
+ struct zram_meta *meta = zram->meta;
- return sprintf(buf, "%llu\n",
- (u64)atomic64_read(&zram->stats.invalid_io));
-}
-
-static ssize_t notify_free_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
+ down_read(&zram->init_lock);
+ if (init_done(zram))
+ val = zs_get_total_size_bytes(meta->mem_pool);
+ up_read(&zram->init_lock);
- return sprintf(buf, "%llu\n",
- (u64)atomic64_read(&zram->stats.notify_free));
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
}
-static ssize_t zero_pages_show(struct device *dev,
+static ssize_t max_comp_streams_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ int val;
struct zram *zram = dev_to_zram(dev);
- return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
+ down_read(&zram->init_lock);
+ val = zram->max_comp_streams;
+ up_read(&zram->init_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t orig_data_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t max_comp_streams_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
{
+ int num;
struct zram *zram = dev_to_zram(dev);
+ int ret;
- return sprintf(buf, "%llu\n",
- (u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
-}
+ ret = kstrtoint(buf, 0, &num);
+ if (ret < 0)
+ return ret;
+ if (num < 1)
+ return -EINVAL;
-static ssize_t compr_data_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
+ down_write(&zram->init_lock);
+ if (init_done(zram)) {
+ if (!zcomp_set_max_streams(zram->comp, num)) {
+ pr_info("Cannot change max compression streams\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
- return sprintf(buf, "%llu\n",
- (u64)atomic64_read(&zram->stats.compr_size));
+ zram->max_comp_streams = num;
+ ret = len;
+out:
+ up_write(&zram->init_lock);
+ return ret;
}
-static ssize_t mem_used_total_show(struct device *dev,
+static ssize_t comp_algorithm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u64 val = 0;
+ size_t sz;
struct zram *zram = dev_to_zram(dev);
- struct zram_meta *meta = zram->meta;
down_read(&zram->init_lock);
- if (zram->init_done)
- val = zs_get_total_size_bytes(meta->mem_pool);
+ sz = zcomp_available_show(zram->compressor, buf);
up_read(&zram->init_lock);
- return sprintf(buf, "%llu\n", val);
+ return sz;
+}
+
+static ssize_t comp_algorithm_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ down_write(&zram->init_lock);
+ if (init_done(zram)) {
+ up_write(&zram->init_lock);
+ pr_info("Can't change algorithm for initialized device\n");
+ return -EBUSY;
+ }
+ strlcpy(zram->compressor, buf, sizeof(zram->compressor));
+ up_write(&zram->init_lock);
+ return len;
}
/* flag operations needs meta->tb_lock */
@@ -192,8 +231,6 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
static void zram_meta_free(struct zram_meta *meta)
{
zs_destroy_pool(meta->mem_pool);
- kfree(meta->compress_workmem);
- free_pages((unsigned long)meta->compress_buffer, 1);
vfree(meta->table);
kfree(meta);
}
@@ -205,22 +242,11 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
if (!meta)
goto out;
- meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
- if (!meta->compress_workmem)
- goto free_meta;
-
- meta->compress_buffer =
- (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
- if (!meta->compress_buffer) {
- pr_err("Error allocating compressor buffer space\n");
- goto free_workmem;
- }
-
num_pages = disksize >> PAGE_SHIFT;
meta->table = vzalloc(num_pages * sizeof(*meta->table));
if (!meta->table) {
pr_err("Error allocating zram address table\n");
- goto free_buffer;
+ goto free_meta;
}
meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
@@ -230,15 +256,10 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
}
rwlock_init(&meta->tb_lock);
- mutex_init(&meta->buffer_lock);
return meta;
free_table:
vfree(meta->table);
-free_buffer:
- free_pages((unsigned long)meta->compress_buffer, 1);
-free_workmem:
- kfree(meta->compress_workmem);
free_meta:
kfree(meta);
meta = NULL;
@@ -288,7 +309,6 @@ static void zram_free_page(struct zram *zram, size_t index)
{
struct zram_meta *meta = zram->meta;
unsigned long handle = meta->table[index].handle;
- u16 size = meta->table[index].size;
if (unlikely(!handle)) {
/*
@@ -297,21 +317,15 @@ static void zram_free_page(struct zram *zram, size_t index)
*/
if (zram_test_flag(meta, index, ZRAM_ZERO)) {
zram_clear_flag(meta, index, ZRAM_ZERO);
- atomic_dec(&zram->stats.pages_zero);
+ atomic64_dec(&zram->stats.zero_pages);
}
return;
}
- if (unlikely(size > max_zpage_size))
- atomic_dec(&zram->stats.bad_compress);
-
zs_free(meta->mem_pool, handle);
- if (size <= PAGE_SIZE / 2)
- atomic_dec(&zram->stats.good_compress);
-
- atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
- atomic_dec(&zram->stats.pages_stored);
+ atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
+ atomic64_dec(&zram->stats.pages_stored);
meta->table[index].handle = 0;
meta->table[index].size = 0;
@@ -319,8 +333,7 @@ static void zram_free_page(struct zram *zram, size_t index)
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
{
- int ret = LZO_E_OK;
- size_t clen = PAGE_SIZE;
+ int ret = 0;
unsigned char *cmem;
struct zram_meta *meta = zram->meta;
unsigned long handle;
@@ -340,12 +353,12 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
if (size == PAGE_SIZE)
copy_page(mem, cmem);
else
- ret = lzo1x_decompress_safe(cmem, size, mem, &clen);
+ ret = zcomp_decompress(zram->comp, cmem, size, mem);
zs_unmap_object(meta->mem_pool, handle);
read_unlock(&meta->tb_lock);
/* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret != LZO_E_OK)) {
+ if (unlikely(ret)) {
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
atomic64_inc(&zram->stats.failed_reads);
return ret;
@@ -388,7 +401,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
ret = zram_decompress_page(zram, uncmem, index);
/* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret != LZO_E_OK))
+ if (unlikely(ret))
goto out_cleanup;
if (is_partial_io(bvec))
@@ -413,11 +426,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
+ struct zcomp_strm *zstrm;
bool locked = false;
page = bvec->bv_page;
- src = meta->compress_buffer;
-
if (is_partial_io(bvec)) {
/*
* This is a partial IO. We need to read the full page
@@ -433,7 +445,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
- mutex_lock(&meta->buffer_lock);
+ zstrm = zcomp_strm_find(zram->comp);
locked = true;
user_mem = kmap_atomic(page);
@@ -454,28 +466,25 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
zram_set_flag(meta, index, ZRAM_ZERO);
write_unlock(&zram->meta->tb_lock);
- atomic_inc(&zram->stats.pages_zero);
+ atomic64_inc(&zram->stats.zero_pages);
ret = 0;
goto out;
}
- ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
- meta->compress_workmem);
+ ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
if (!is_partial_io(bvec)) {
kunmap_atomic(user_mem);
user_mem = NULL;
uncmem = NULL;
}
- if (unlikely(ret != LZO_E_OK)) {
+ if (unlikely(ret)) {
pr_err("Compression failed! err=%d\n", ret);
goto out;
}
-
+ src = zstrm->buffer;
if (unlikely(clen > max_zpage_size)) {
- atomic_inc(&zram->stats.bad_compress);
clen = PAGE_SIZE;
- src = NULL;
if (is_partial_io(bvec))
src = uncmem;
}
@@ -497,6 +506,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
memcpy(cmem, src, clen);
}
+ zcomp_strm_release(zram->comp, zstrm);
+ locked = false;
zs_unmap_object(meta->mem_pool, handle);
/*
@@ -511,49 +522,88 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
write_unlock(&zram->meta->tb_lock);
/* Update stats */
- atomic64_add(clen, &zram->stats.compr_size);
- atomic_inc(&zram->stats.pages_stored);
- if (clen <= PAGE_SIZE / 2)
- atomic_inc(&zram->stats.good_compress);
-
+ atomic64_add(clen, &zram->stats.compr_data_size);
+ atomic64_inc(&zram->stats.pages_stored);
out:
if (locked)
- mutex_unlock(&meta->buffer_lock);
+ zcomp_strm_release(zram->comp, zstrm);
if (is_partial_io(bvec))
kfree(uncmem);
-
if (ret)
atomic64_inc(&zram->stats.failed_writes);
return ret;
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, struct bio *bio, int rw)
+ int offset, struct bio *bio)
{
int ret;
+ int rw = bio_data_dir(bio);
- if (rw == READ)
+ if (rw == READ) {
+ atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset, bio);
- else
+ } else {
+ atomic64_inc(&zram->stats.num_writes);
ret = zram_bvec_write(zram, bvec, index, offset);
+ }
return ret;
}
+/*
+ * zram_bio_discard - handler on discard request
+ * @index: physical block index in PAGE_SIZE units
+ * @offset: byte offset within physical block
+ */
+static void zram_bio_discard(struct zram *zram, u32 index,
+ int offset, struct bio *bio)
+{
+ size_t n = bio->bi_iter.bi_size;
+
+ /*
+ * zram manages data in physical block size units. Because logical block
+ * size isn't identical with physical block size on some arch, we
+ * could get a discard request pointing to a specific offset within a
+ * certain physical block. Although we can handle this request by
+ * reading that physiclal block and decompressing and partially zeroing
+ * and re-compressing and then re-storing it, this isn't reasonable
+ * because our intent with a discard request is to save memory. So
+ * skipping this logical block is appropriate here.
+ */
+ if (offset) {
+ if (n < offset)
+ return;
+
+ n -= offset;
+ index++;
+ }
+
+ while (n >= PAGE_SIZE) {
+ /*
+ * Discard request can be large so the lock hold times could be
+ * lengthy. So take the lock once per page.
+ */
+ write_lock(&zram->meta->tb_lock);
+ zram_free_page(zram, index);
+ write_unlock(&zram->meta->tb_lock);
+ index++;
+ n -= PAGE_SIZE;
+ }
+}
+
static void zram_reset_device(struct zram *zram, bool reset_capacity)
{
size_t index;
struct zram_meta *meta;
down_write(&zram->init_lock);
- if (!zram->init_done) {
+ if (!init_done(zram)) {
up_write(&zram->init_lock);
return;
}
meta = zram->meta;
- zram->init_done = 0;
-
/* Free all pages that are still in this zram device */
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
unsigned long handle = meta->table[index].handle;
@@ -563,6 +613,9 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
zs_free(meta->mem_pool, handle);
}
+ zcomp_destroy(zram->comp);
+ zram->max_comp_streams = 1;
+
zram_meta_free(zram->meta);
zram->meta = NULL;
/* Reset stats */
@@ -574,37 +627,14 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
up_write(&zram->init_lock);
}
-static void zram_init_device(struct zram *zram, struct zram_meta *meta)
-{
- if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
- pr_info(
- "There is little point creating a zram of greater than "
- "twice the size of memory since we expect a 2:1 compression "
- "ratio. Note that zram uses about 0.1%% of the size of "
- "the disk when not in use so a huge zram is "
- "wasteful.\n"
- "\tMemory Size: %lu kB\n"
- "\tSize you selected: %llu kB\n"
- "Continuing anyway ...\n",
- (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
- );
- }
-
- /* zram devices sort of resembles non-rotational disks */
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
-
- zram->meta = meta;
- zram->init_done = 1;
-
- pr_debug("Initialization done!\n");
-}
-
static ssize_t disksize_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
u64 disksize;
+ struct zcomp *comp;
struct zram_meta *meta;
struct zram *zram = dev_to_zram(dev);
+ int err;
disksize = memparse(buf, NULL);
if (!disksize)
@@ -614,20 +644,35 @@ static ssize_t disksize_store(struct device *dev,
meta = zram_meta_alloc(disksize);
if (!meta)
return -ENOMEM;
+
+ comp = zcomp_create(zram->compressor, zram->max_comp_streams);
+ if (IS_ERR(comp)) {
+ pr_info("Cannot initialise %s compressing backend\n",
+ zram->compressor);
+ err = PTR_ERR(comp);
+ goto out_free_meta;
+ }
+
down_write(&zram->init_lock);
- if (zram->init_done) {
- up_write(&zram->init_lock);
- zram_meta_free(meta);
+ if (init_done(zram)) {
pr_info("Cannot change disksize for initialized device\n");
- return -EBUSY;
+ err = -EBUSY;
+ goto out_destroy_comp;
}
+ zram->meta = meta;
+ zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- zram_init_device(zram, meta);
up_write(&zram->init_lock);
-
return len;
+
+out_destroy_comp:
+ up_write(&zram->init_lock);
+ zcomp_destroy(comp);
+out_free_meta:
+ zram_meta_free(meta);
+ return err;
}
static ssize_t reset_store(struct device *dev,
@@ -671,26 +716,23 @@ out:
return ret;
}
-static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
+static void __zram_make_request(struct zram *zram, struct bio *bio)
{
int offset;
u32 index;
struct bio_vec bvec;
struct bvec_iter iter;
- switch (rw) {
- case READ:
- atomic64_inc(&zram->stats.num_reads);
- break;
- case WRITE:
- atomic64_inc(&zram->stats.num_writes);
- break;
- }
-
index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
offset = (bio->bi_iter.bi_sector &
(SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
+ if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ zram_bio_discard(zram, index, offset, bio);
+ bio_endio(bio, 0);
+ return;
+ }
+
bio_for_each_segment(bvec, bio, iter) {
int max_transfer_size = PAGE_SIZE - offset;
@@ -705,16 +747,15 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
bv.bv_len = max_transfer_size;
bv.bv_offset = bvec.bv_offset;
- if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
+ if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
goto out;
bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
- if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
+ if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
goto out;
} else
- if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
- < 0)
+ if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
goto out;
update_position(&index, &offset, &bvec);
@@ -736,7 +777,7 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
struct zram *zram = queue->queuedata;
down_read(&zram->init_lock);
- if (unlikely(!zram->init_done))
+ if (unlikely(!init_done(zram)))
goto error;
if (!valid_io_request(zram, bio)) {
@@ -744,7 +785,7 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
goto error;
}
- __zram_make_request(zram, bio, bio_data_dir(bio));
+ __zram_make_request(zram, bio);
up_read(&zram->init_lock);
return;
@@ -778,14 +819,21 @@ static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
disksize_show, disksize_store);
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
-static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
-static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
-static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
-static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
-static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
-static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
+static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
+ max_comp_streams_show, max_comp_streams_store);
+static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
+ comp_algorithm_show, comp_algorithm_store);
+
+ZRAM_ATTR_RO(num_reads);
+ZRAM_ATTR_RO(num_writes);
+ZRAM_ATTR_RO(failed_reads);
+ZRAM_ATTR_RO(failed_writes);
+ZRAM_ATTR_RO(invalid_io);
+ZRAM_ATTR_RO(notify_free);
+ZRAM_ATTR_RO(zero_pages);
+ZRAM_ATTR_RO(compr_data_size);
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
@@ -793,12 +841,16 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_reset.attr,
&dev_attr_num_reads.attr,
&dev_attr_num_writes.attr,
+ &dev_attr_failed_reads.attr,
+ &dev_attr_failed_writes.attr,
&dev_attr_invalid_io.attr,
&dev_attr_notify_free.attr,
&dev_attr_zero_pages.attr,
&dev_attr_orig_data_size.attr,
&dev_attr_compr_data_size.attr,
&dev_attr_mem_used_total.attr,
+ &dev_attr_max_comp_streams.attr,
+ &dev_attr_comp_algorithm.attr,
NULL,
};
@@ -839,7 +891,8 @@ static int create_device(struct zram *zram, int device_id)
/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
-
+ /* zram devices sort of resembles non-rotational disks */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
/*
* To ensure that we always get PAGE_SIZE aligned
* and n*PAGE_SIZED sized I/O requests.
@@ -849,6 +902,21 @@ static int create_device(struct zram *zram, int device_id)
ZRAM_LOGICAL_BLOCK_SIZE);
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
+ zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
+ zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
+ /*
+ * zram_bio_discard() will clear all logical blocks if logical block
+ * size is identical with physical block size(PAGE_SIZE). But if it is
+ * different, we will skip discarding some parts of logical blocks in
+ * the part of the request range which isn't aligned to physical block
+ * size. So we can't ensure that all discarded logical blocks are
+ * zeroed.
+ */
+ if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
+ zram->disk->queue->limits.discard_zeroes_data = 1;
+ else
+ zram->disk->queue->limits.discard_zeroes_data = 0;
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
add_disk(zram->disk);
@@ -858,8 +926,9 @@ static int create_device(struct zram *zram, int device_id)
pr_warn("Error creating sysfs group");
goto out_free_disk;
}
-
- zram->init_done = 0;
+ strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
+ zram->meta = NULL;
+ zram->max_comp_streams = 1;
return 0;
out_free_disk:
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index ad8aa35bae00..7f21c145e317 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -16,9 +16,10 @@
#define _ZRAM_DRV_H_
#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/zsmalloc.h>
+#include "zcomp.h"
+
/*
* Some arbitrary value. This is just to catch
* invalid value for num_devices module parameter.
@@ -64,38 +65,33 @@ enum zram_pageflags {
struct table {
unsigned long handle;
u16 size; /* object size (excluding header) */
- u8 count; /* object ref count (not yet used) */
u8 flags;
} __aligned(4);
struct zram_stats {
- atomic64_t compr_size; /* compressed size of pages stored */
+ atomic64_t compr_data_size; /* compressed size of pages stored */
atomic64_t num_reads; /* failed + successful */
atomic64_t num_writes; /* --do-- */
atomic64_t failed_reads; /* should NEVER! happen */
atomic64_t failed_writes; /* can happen when memory is too low */
atomic64_t invalid_io; /* non-page-aligned I/O requests */
atomic64_t notify_free; /* no. of swap slot free notifications */
- atomic_t pages_zero; /* no. of zero filled pages */
- atomic_t pages_stored; /* no. of pages currently stored */
- atomic_t good_compress; /* % of pages with compression ratio<=50% */
- atomic_t bad_compress; /* % of pages with compression ratio>=75% */
+ atomic64_t zero_pages; /* no. of zero filled pages */
+ atomic64_t pages_stored; /* no. of pages currently stored */
};
struct zram_meta {
rwlock_t tb_lock; /* protect table */
- void *compress_workmem;
- void *compress_buffer;
struct table *table;
struct zs_pool *mem_pool;
- struct mutex buffer_lock; /* protect compress buffers */
};
struct zram {
struct zram_meta *meta;
struct request_queue *queue;
struct gendisk *disk;
- int init_done;
+ struct zcomp *comp;
+
/* Prevent concurrent execution of device init, reset and R/W request */
struct rw_semaphore init_lock;
/*
@@ -103,7 +99,8 @@ struct zram {
* we can store in a disk.
*/
u64 disksize; /* bytes */
-
+ int max_comp_streams;
struct zram_stats stats;
+ char compressor[10];
};
#endif
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11a6104a1e4f..f5ce64e03fd7 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -241,5 +241,6 @@ config BT_WILINK
core driver to communicate with the BT core of the combo chip.
Say Y here to compile support for Texas Instrument's WiLink7 driver
- into the kernel or say M to compile it as module.
+ into the kernel or say M to compile it as module (btwilink).
+
endmenu
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 106d1d8e16ad..be571fef185d 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -62,50 +62,54 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0CF3, 0x3000) },
/* Atheros AR3011 with sflash firmware*/
+ { USB_DEVICE(0x0489, 0xE027) },
+ { USB_DEVICE(0x0489, 0xE03D) },
+ { USB_DEVICE(0x0930, 0x0215) },
{ USB_DEVICE(0x0CF3, 0x3002) },
{ USB_DEVICE(0x0CF3, 0xE019) },
{ USB_DEVICE(0x13d3, 0x3304) },
- { USB_DEVICE(0x0930, 0x0215) },
- { USB_DEVICE(0x0489, 0xE03D) },
- { USB_DEVICE(0x0489, 0xE027) },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03F0, 0x311D) },
/* Atheros AR3012 with sflash firmware*/
- { USB_DEVICE(0x0CF3, 0x0036) },
- { USB_DEVICE(0x0CF3, 0x3004) },
- { USB_DEVICE(0x0CF3, 0x3008) },
- { USB_DEVICE(0x0CF3, 0x311D) },
- { USB_DEVICE(0x0CF3, 0x817a) },
- { USB_DEVICE(0x13d3, 0x3375) },
+ { USB_DEVICE(0x0489, 0xe04d) },
+ { USB_DEVICE(0x0489, 0xe04e) },
+ { USB_DEVICE(0x0489, 0xe057) },
+ { USB_DEVICE(0x0489, 0xe056) },
+ { USB_DEVICE(0x0489, 0xe05f) },
+ { USB_DEVICE(0x04c5, 0x1330) },
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
{ USB_DEVICE(0x04CA, 0x3006) },
{ USB_DEVICE(0x04CA, 0x3008) },
{ USB_DEVICE(0x04CA, 0x300b) },
- { USB_DEVICE(0x13d3, 0x3362) },
- { USB_DEVICE(0x0CF3, 0xE004) },
- { USB_DEVICE(0x0CF3, 0xE005) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x0220) },
- { USB_DEVICE(0x0489, 0xe057) },
- { USB_DEVICE(0x13d3, 0x3393) },
- { USB_DEVICE(0x0489, 0xe04e) },
- { USB_DEVICE(0x0489, 0xe056) },
- { USB_DEVICE(0x0489, 0xe04d) },
- { USB_DEVICE(0x04c5, 0x1330) },
- { USB_DEVICE(0x13d3, 0x3402) },
+ { USB_DEVICE(0x0b05, 0x17d0) },
+ { USB_DEVICE(0x0CF3, 0x0036) },
+ { USB_DEVICE(0x0CF3, 0x3004) },
+ { USB_DEVICE(0x0CF3, 0x3005) },
+ { USB_DEVICE(0x0CF3, 0x3008) },
+ { USB_DEVICE(0x0CF3, 0x311D) },
+ { USB_DEVICE(0x0CF3, 0x311E) },
+ { USB_DEVICE(0x0CF3, 0x311F) },
{ USB_DEVICE(0x0cf3, 0x3121) },
+ { USB_DEVICE(0x0CF3, 0x817a) },
{ USB_DEVICE(0x0cf3, 0xe003) },
- { USB_DEVICE(0x0489, 0xe05f) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
+ { USB_DEVICE(0x0CF3, 0xE005) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x13d3, 0x3375) },
+ { USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x13d3, 0x3402) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
/* Atheros AR5BBU22 with sflash firmware */
- { USB_DEVICE(0x0489, 0xE03C) },
{ USB_DEVICE(0x0489, 0xE036) },
+ { USB_DEVICE(0x0489, 0xE03C) },
{ } /* Terminating entry */
};
@@ -118,36 +122,40 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
static const struct usb_device_id ath3k_blist_tbl[] = {
/* Atheros AR3012 with sflash firmware*/
- { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
- { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
{ } /* Terminating entry */
};
@@ -174,10 +182,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
}
memcpy(send_buf, firmware->data, 20);
- if ((err = usb_control_msg(udev, pipe,
- USB_REQ_DFU_DNLOAD,
- USB_TYPE_VENDOR, 0, 0,
- send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
+ err = usb_control_msg(udev, pipe, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR,
+ 0, 0, send_buf, 20, USB_CTRL_SET_TIMEOUT);
+ if (err < 0) {
BT_ERR("Can't change to loading configuration err");
goto error;
}
@@ -360,7 +367,7 @@ static int ath3k_load_patch(struct usb_device *udev)
}
snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu",
- fw_version.rom_version);
+ le32_to_cpu(fw_version.rom_version));
ret = request_firmware(&firmware, filename, &udev->dev);
if (ret < 0) {
@@ -422,7 +429,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
}
snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
- fw_version.rom_version, clk_value, ".dfu");
+ le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
ret = request_firmware(&firmware, filename, &udev->dev);
if (ret < 0) {
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 31386998c9a7..b2e7e94a6771 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -131,8 +131,11 @@ static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb)
BT_DBG("bfusb %p skb %p len %d", data, skb, skb->len);
- if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC)))
- return -ENOMEM;
+ if (!urb) {
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+ }
pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep);
@@ -218,8 +221,11 @@ static int bfusb_rx_submit(struct bfusb_data *data, struct urb *urb)
BT_DBG("bfusb %p urb %p", data, urb);
- if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC)))
- return -ENOMEM;
+ if (!urb) {
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+ }
skb = bt_skb_alloc(size, GFP_ATOMIC);
if (!skb) {
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 57427de864a6..dfa5043e68ba 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -257,7 +257,8 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
ready_bit = XMIT_BUF_ONE_READY;
}
- if (!(skb = skb_dequeue(&(info->txq))))
+ skb = skb_dequeue(&(info->txq));
+ if (!skb)
break;
if (bt_cb(skb)->pkt_type & 0x80) {
@@ -391,7 +392,8 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
- if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!info->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
return;
}
@@ -566,7 +568,8 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
/* Ericsson baud rate command */
unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 };
- if (!(skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!skb) {
BT_ERR("Can't allocate mem for new packet");
return -1;
}
@@ -898,7 +901,7 @@ static void bluecard_release(struct pcmcia_device *link)
bluecard_close(info);
- del_timer(&(info->timer));
+ del_timer_sync(&(info->timer));
pcmcia_disable_device(link);
}
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 73d87994d028..1d82721cf9c6 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -193,8 +193,8 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
if (!pcmcia_dev_present(info->p_dev))
break;
-
- if (!(skb = skb_dequeue(&(info->txq)))) {
+ skb = skb_dequeue(&(info->txq));
+ if (!skb) {
clear_bit(XMIT_SENDING, &(info->tx_state));
break;
}
@@ -238,7 +238,8 @@ static void bt3c_receive(bt3c_info_t *info)
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
- if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!info->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
return;
}
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 1e0320af00c6..2c4997ce2484 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -59,12 +59,13 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
priv->btmrvl_dev.sendcmdflag = false;
priv->adapter->cmd_complete = true;
wake_up_interruptible(&priv->adapter->cmd_wait_q);
- }
- if (hci_opcode_ogf(opcode) == 0x3F) {
- BT_DBG("vendor event skipped: opcode=%#4.4x", opcode);
- kfree_skb(skb);
- return false;
+ if (hci_opcode_ogf(opcode) == 0x3F) {
+ BT_DBG("vendor event skipped: opcode=%#4.4x",
+ opcode);
+ kfree_skb(skb);
+ return false;
+ }
}
}
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index a03ecc22a561..fb948f02eda5 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -149,7 +149,8 @@ static void btuart_write_wakeup(btuart_info_t *info)
if (!pcmcia_dev_present(info->p_dev))
return;
- if (!(skb = skb_dequeue(&(info->txq))))
+ skb = skb_dequeue(&(info->txq));
+ if (!skb)
break;
/* Send frame */
@@ -190,7 +191,8 @@ static void btuart_receive(btuart_info_t *info)
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
- if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!info->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
return;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index baeaaed299e4..f338b0c5a8de 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -101,21 +101,24 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x0c10, 0x0000) },
/* Broadcom BCM20702A0 */
+ { USB_DEVICE(0x0489, 0xe042) },
+ { USB_DEVICE(0x04ca, 0x2003) },
{ USB_DEVICE(0x0b05, 0x17b5) },
{ USB_DEVICE(0x0b05, 0x17cb) },
- { USB_DEVICE(0x04ca, 0x2003) },
- { USB_DEVICE(0x0489, 0xe042) },
{ USB_DEVICE(0x413c, 0x8197) },
/* Foxconn - Hon Hai */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
- /*Broadcom devices with vendor specific id */
+ /* Broadcom devices with vendor specific id */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
/* Belkin F8065bf - Broadcom based */
{ USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
+ /* IMC Networks - Broadcom based */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
+
{ } /* Terminating entry */
};
@@ -129,55 +132,59 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE },
/* Atheros 3011 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
/* Atheros 3012 with sflash firmware */
- { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
/* Atheros AR5BBU12 with sflash firmware */
- { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* Broadcom BCM2035 */
- { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
- { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
+ { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
+ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
/* Broadcom BCM2045 */
{ USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU },
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 52eed1f3565d..2bd8fad17206 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -153,7 +153,8 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
if (!pcmcia_dev_present(info->p_dev))
return;
- if (!(skb = skb_dequeue(&(info->txq))))
+ skb = skb_dequeue(&(info->txq));
+ if (!skb)
break;
/* Send frame */
@@ -215,13 +216,15 @@ static void dtl1_receive(dtl1_info_t *info)
info->hdev->stat.byte_rx++;
/* Allocate packet */
- if (info->rx_skb == NULL)
- if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ if (info->rx_skb == NULL) {
+ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!info->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
info->rx_state = RECV_WAIT_NSH;
info->rx_count = NSHL;
return;
}
+ }
*skb_put(info->rx_skb, 1) = inb(iobase + UART_RX);
nsh = (nsh_t *)info->rx_skb->data;
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 0bc87f7abd95..21cc45b34f13 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -291,7 +291,8 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
/* First of all, check for unreliable messages in the queue,
since they have priority */
- if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) {
+ skb = skb_dequeue(&bcsp->unrel);
+ if (skb != NULL) {
struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
if (nskb) {
kfree_skb(skb);
@@ -308,16 +309,20 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
- if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) {
- struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
- if (nskb) {
- __skb_queue_tail(&bcsp->unack, skb);
- mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
- spin_unlock_irqrestore(&bcsp->unack.lock, flags);
- return nskb;
- } else {
- skb_queue_head(&bcsp->rel, skb);
- BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ if (bcsp->unack.qlen < BCSP_TXWINSIZE) {
+ skb = skb_dequeue(&bcsp->rel);
+ if (skb != NULL) {
+ struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,
+ bt_cb(skb)->pkt_type);
+ if (nskb) {
+ __skb_queue_tail(&bcsp->unack, skb);
+ mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
+ spin_unlock_irqrestore(&bcsp->unack.lock, flags);
+ return nskb;
+ } else {
+ skb_queue_head(&bcsp->rel, skb);
+ BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ }
}
}
@@ -715,6 +720,9 @@ static int bcsp_open(struct hci_uart *hu)
static int bcsp_close(struct hci_uart *hu)
{
struct bcsp_struct *bcsp = hu->priv;
+
+ del_timer_sync(&bcsp->tbcsp);
+
hu->priv = NULL;
BT_DBG("hu %p", hu);
@@ -722,7 +730,6 @@ static int bcsp_close(struct hci_uart *hu)
skb_queue_purge(&bcsp->unack);
skb_queue_purge(&bcsp->rel);
skb_queue_purge(&bcsp->unrel);
- del_timer(&bcsp->tbcsp);
kfree(bcsp);
return 0;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index f6f497450560..04680ead9275 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -206,12 +206,12 @@ static int h5_close(struct hci_uart *hu)
{
struct h5 *h5 = hu->priv;
+ del_timer_sync(&h5->timer);
+
skb_queue_purge(&h5->unack);
skb_queue_purge(&h5->rel);
skb_queue_purge(&h5->unrel);
- del_timer(&h5->timer);
-
kfree(h5);
return 0;
@@ -673,7 +673,8 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
}
- if ((skb = skb_dequeue(&h5->unrel)) != NULL) {
+ skb = skb_dequeue(&h5->unrel);
+ if (skb != NULL) {
nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
skb->data, skb->len);
if (nskb) {
@@ -690,7 +691,8 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
if (h5->unack.qlen >= h5->tx_win)
goto unlock;
- if ((skb = skb_dequeue(&h5->rel)) != NULL) {
+ skb = skb_dequeue(&h5->rel);
+ if (skb != NULL) {
nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
skb->data, skb->len);
if (nskb) {
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 6e06f6f69152..f1fbf4f1e5be 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -271,7 +271,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
- if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) {
+ hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL);
+ if (!hu) {
BT_ERR("Can't allocate control structure");
return -ENFILE;
}
@@ -569,7 +570,8 @@ static int __init hci_uart_init(void)
hci_uart_ldisc.write_wakeup = hci_uart_tty_wakeup;
hci_uart_ldisc.owner = THIS_MODULE;
- if ((err = tty_register_ldisc(N_HCI, &hci_uart_ldisc))) {
+ err = tty_register_ldisc(N_HCI, &hci_uart_ldisc);
+ if (err) {
BT_ERR("HCI line discipline registration failed. (%d)", err);
return err;
}
@@ -614,7 +616,8 @@ static void __exit hci_uart_exit(void)
#endif
/* Release tty registration of line discipline */
- if ((err = tty_unregister_ldisc(N_HCI)))
+ err = tty_unregister_ldisc(N_HCI);
+ if (err)
BT_ERR("Can't unregister HCI line discipline (%d)", err);
}
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 1ef6990a5c7e..add1c6a72063 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -359,7 +359,7 @@ static const struct file_operations vhci_fops = {
static struct miscdevice vhci_miscdev= {
.name = "vhci",
.fops = &vhci_fops,
- .minor = MISC_DYNAMIC_MINOR,
+ .minor = VHCI_MINOR,
};
static int __init vhci_init(void)
@@ -385,3 +385,4 @@ MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS("devname:vhci");
+MODULE_ALIAS_MISCDEV(VHCI_MINOR);
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 962fd35cbd8d..5a86da97a70b 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -31,7 +31,6 @@
#define DRIVER_NAME "CCI-400"
#define DRIVER_NAME_PMU DRIVER_NAME " PMU"
-#define PMU_NAME "CCI_400"
#define CCI_PORT_CTRL 0x0
#define CCI_CTRL_STATUS 0xc
@@ -88,8 +87,7 @@ static unsigned long cci_ctrl_phys;
#define CCI_REV_R0 0
#define CCI_REV_R1 1
-#define CCI_REV_R0_P4 4
-#define CCI_REV_R1_P2 6
+#define CCI_REV_R1_PX 5
#define CCI_PMU_EVT_SEL 0x000
#define CCI_PMU_CNTR 0x004
@@ -163,6 +161,15 @@ static struct pmu_port_event_ranges port_event_range[] = {
},
};
+/*
+ * Export different PMU names for the different revisions so userspace knows
+ * because the event ids are different
+ */
+static char *const pmu_names[] = {
+ [CCI_REV_R0] = "CCI_400",
+ [CCI_REV_R1] = "CCI_400_r1",
+};
+
struct cci_pmu_drv_data {
void __iomem *base;
struct arm_pmu *cci_pmu;
@@ -193,21 +200,16 @@ static int probe_cci_revision(void)
rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
rev >>= CCI_PID2_REV_SHIFT;
- if (rev <= CCI_REV_R0_P4)
+ if (rev < CCI_REV_R1_PX)
return CCI_REV_R0;
- else if (rev <= CCI_REV_R1_P2)
+ else
return CCI_REV_R1;
-
- return -ENOENT;
}
static struct pmu_port_event_ranges *port_range_by_rev(void)
{
int rev = probe_cci_revision();
- if (rev < 0)
- return NULL;
-
return &port_event_range[rev];
}
@@ -526,7 +528,7 @@ static void pmu_write_counter(struct perf_event *event, u32 value)
static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev)
{
*cci_pmu = (struct arm_pmu){
- .name = PMU_NAME,
+ .name = pmu_names[probe_cci_revision()],
.max_period = (1LLU << 32) - 1,
.get_hw_events = pmu_get_hw_events,
.get_event_idx = pmu_get_event_idx,
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 3ef58c8dbf11..f8ee13c7bf7b 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -11,6 +11,9 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/regmap.h>
struct imx_weim_devtype {
unsigned int cs_count;
@@ -56,6 +59,55 @@ static const struct of_device_id weim_id_table[] = {
};
MODULE_DEVICE_TABLE(of, weim_id_table);
+static int __init imx_weim_gpr_setup(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct property *prop;
+ const __be32 *p;
+ struct regmap *gpr;
+ u32 gprvals[4] = {
+ 05, /* CS0(128M) CS1(0M) CS2(0M) CS3(0M) */
+ 033, /* CS0(64M) CS1(64M) CS2(0M) CS3(0M) */
+ 0113, /* CS0(64M) CS1(32M) CS2(32M) CS3(0M) */
+ 01111, /* CS0(32M) CS1(32M) CS2(32M) CS3(32M) */
+ };
+ u32 gprval = 0;
+ u32 val;
+ int cs = 0;
+ int i = 0;
+
+ gpr = syscon_regmap_lookup_by_phandle(np, "fsl,weim-cs-gpr");
+ if (IS_ERR(gpr)) {
+ dev_dbg(&pdev->dev, "failed to find weim-cs-gpr\n");
+ return 0;
+ }
+
+ of_property_for_each_u32(np, "ranges", prop, p, val) {
+ if (i % 4 == 0) {
+ cs = val;
+ } else if (i % 4 == 3 && val) {
+ val = (val / SZ_32M) | 1;
+ gprval |= val << cs * 3;
+ }
+ i++;
+ }
+
+ if (i == 0 || i % 4)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
+ if (gprval == gprvals[i]) {
+ /* Found it. Set up IOMUXC_GPR1[11:0] with it. */
+ regmap_update_bits(gpr, IOMUXC_GPR1, 0xfff, gprval);
+ return 0;
+ }
+ }
+
+err:
+ dev_err(&pdev->dev, "Invalid 'ranges' configuration\n");
+ return -EINVAL;
+}
+
/* Parse and set the timing for this device. */
static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
const struct imx_weim_devtype *devtype)
@@ -92,6 +144,12 @@ static int __init weim_parse_dt(struct platform_device *pdev,
struct device_node *child;
int ret;
+ if (devtype == &imx50_weim_devtype) {
+ ret = imx_weim_gpr_setup(pdev);
+ if (ret)
+ return ret;
+ }
+
for_each_child_of_node(pdev->dev.of_node, child) {
if (!child->name)
continue;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 725c46162bbd..293e2e0a0a87 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -870,14 +870,14 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
if (!ret) {
mem->start = reg[0];
- mem->end = mem->start + reg[1];
+ mem->end = mem->start + reg[1] - 1;
mem->flags = IORESOURCE_MEM;
}
ret = of_property_read_u32_array(np, "pcie-io-aperture", reg, ARRAY_SIZE(reg));
if (!ret) {
io->start = reg[0];
- io->end = io->start + reg[1];
+ io->end = io->start + reg[1] - 1;
io->flags = IORESOURCE_IO;
}
}
@@ -890,13 +890,12 @@ int __init mvebu_mbus_dt_init(void)
const __be32 *prop;
int ret;
- np = of_find_matching_node(NULL, of_mvebu_mbus_ids);
+ np = of_find_matching_node_and_match(NULL, of_mvebu_mbus_ids, &of_id);
if (!np) {
pr_err("could not find a matching SoC family\n");
return -ENODEV;
}
- of_id = of_match_node(of_mvebu_mbus_ids, np);
mbus_state.soc = of_id->data;
prop = of_get_property(np, "controller", NULL);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 1386749b48ff..fbae63e3d304 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -408,7 +408,7 @@ config APPLICOM
config SONYPI
tristate "Sony Vaio Programmable I/O Control Device support"
- depends on X86 && PCI && INPUT && !64BIT
+ depends on X86_32 && PCI && INPUT
---help---
This driver enables access to the Sony Programmable I/O Control
Device which can be found in many (all ?) Sony Vaio laptops.
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 1b192395a90c..8121b4c70ede 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -31,7 +31,6 @@
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/agp_backend.h>
#include <linux/agpgart.h>
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index f39437addb58..0fbccce1cee9 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -29,7 +29,6 @@
*/
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/miscdevice.h>
#include <linux/pm.h>
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 5c85350f4c3d..9a024f899dd4 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index 05b8d0241bde..3051c73bc383 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/agp_backend.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 2f2b08457c67..244759bbd7b7 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -342,11 +342,11 @@ config HW_RANDOM_TPM
If unsure, say Y.
config HW_RANDOM_MSM
- tristate "Qualcomm MSM Random Number Generator support"
- depends on HW_RANDOM && ARCH_MSM
+ tristate "Qualcomm SoCs Random Number Generator support"
+ depends on HW_RANDOM && ARCH_QCOM
---help---
This driver provides kernel-side support for the Random Number
- Generator hardware found on Qualcomm MSM SoCs.
+ Generator hardware found on Qualcomm SoCs.
To compile this driver as a module, choose M here. the
module will be called msm-rng.
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index bf9fc6b79328..851bc7e20ad2 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -54,29 +54,22 @@ static int atmel_trng_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
if (!trng)
return -ENOMEM;
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
-
- trng->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!trng->base)
- return -EBUSY;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ trng->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(trng->base))
+ return PTR_ERR(trng->base);
- trng->clk = clk_get(&pdev->dev, NULL);
+ trng->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(trng->clk))
return PTR_ERR(trng->clk);
ret = clk_enable(trng->clk);
if (ret)
- goto err_enable;
+ return ret;
writel(TRNG_KEY | 1, trng->base + TRNG_CR);
trng->rng.name = pdev->name;
@@ -92,9 +85,6 @@ static int atmel_trng_probe(struct platform_device *pdev)
err_register:
clk_disable(trng->clk);
-err_enable:
- clk_put(trng->clk);
-
return ret;
}
@@ -106,7 +96,6 @@ static int atmel_trng_remove(struct platform_device *pdev)
writel(TRNG_KEY, trng->base + TRNG_CR);
clk_disable(trng->clk);
- clk_put(trng->clk);
return 0;
}
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 43577ca780e3..8c3b255e629a 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -8,7 +8,6 @@
*/
#include <linux/hw_random.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index a0f7724852eb..334601cc81cf 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -37,10 +37,10 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
@@ -302,9 +302,10 @@ err_misc_dereg:
int hwrng_register(struct hwrng *rng)
{
- int must_register_misc;
int err = -EINVAL;
struct hwrng *old_rng, *tmp;
+ unsigned char bytes[16];
+ int bytes_read;
if (rng->name == NULL ||
(rng->data_read == NULL && rng->read == NULL))
@@ -327,7 +328,6 @@ int hwrng_register(struct hwrng *rng)
goto out_unlock;
}
- must_register_misc = (current_rng == NULL);
old_rng = current_rng;
if (!old_rng) {
err = hwrng_init(rng);
@@ -336,18 +336,20 @@ int hwrng_register(struct hwrng *rng)
current_rng = rng;
}
err = 0;
- if (must_register_misc) {
+ if (!old_rng) {
err = register_miscdev();
if (err) {
- if (!old_rng) {
- hwrng_cleanup(rng);
- current_rng = NULL;
- }
+ hwrng_cleanup(rng);
+ current_rng = NULL;
goto out_unlock;
}
}
INIT_LIST_HEAD(&rng->list);
list_add_tail(&rng->list, &rng_list);
+
+ bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ if (bytes_read > 0)
+ add_device_randomness(bytes, bytes_read);
out_unlock:
mutex_unlock(&rng_mutex);
out:
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 402ccfb625c5..9f8277cc44b4 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -22,7 +22,6 @@
#include <linux/hw_random.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index f9beed54d0c8..432232eefe05 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -7,7 +7,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/preempt.h>
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 232b87fb5fc9..9c8581577246 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/hw_random.h>
@@ -44,7 +43,7 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
void __iomem *base;
int ret;
- rng_clk = clk_get(&dev->dev, NULL);
+ rng_clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(rng_clk)) {
dev_err(&dev->dev, "could not get rng clock\n");
ret = PTR_ERR(rng_clk);
@@ -57,33 +56,28 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
if (ret)
goto out_clk;
ret = -ENOMEM;
- base = ioremap(dev->res.start, resource_size(&dev->res));
+ base = devm_ioremap(&dev->dev, dev->res.start,
+ resource_size(&dev->res));
if (!base)
goto out_release;
nmk_rng.priv = (unsigned long)base;
ret = hwrng_register(&nmk_rng);
if (ret)
- goto out_unmap;
+ goto out_release;
return 0;
-out_unmap:
- iounmap(base);
out_release:
amba_release_regions(dev);
out_clk:
clk_disable(rng_clk);
- clk_put(rng_clk);
return ret;
}
static int nmk_rng_remove(struct amba_device *dev)
{
- void __iomem *base = (void __iomem *)nmk_rng.priv;
hwrng_unregister(&nmk_rng);
- iounmap(base);
amba_release_regions(dev);
clk_disable(rng_clk);
- clk_put(rng_clk);
return 0;
}
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index f2885dbe1849..b5cc3420c659 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/hw_random.h>
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index c853e9e68573..6f2eaffed623 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -103,7 +103,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev)
}
setup_timer(&idle_timer, omap3_rom_rng_idle, 0);
- rng_clk = clk_get(&pdev->dev, "ick");
+ rng_clk = devm_clk_get(&pdev->dev, "ick");
if (IS_ERR(rng_clk)) {
pr_err("unable to get RNG clock\n");
return PTR_ERR(rng_clk);
@@ -120,7 +120,6 @@ static int omap3_rom_rng_remove(struct platform_device *pdev)
{
hwrng_unregister(&omap3_rom_rng_ops);
clk_disable_unprepare(rng_clk);
- clk_put(rng_clk);
return 0;
}
diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c
index 3d4c2293c6f5..eab5448ad56f 100644
--- a/drivers/char/hw_random/picoxcell-rng.c
+++ b/drivers/char/hw_random/picoxcell-rng.c
@@ -104,24 +104,11 @@ static int picoxcell_trng_probe(struct platform_device *pdev)
int ret;
struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_warn(&pdev->dev, "no memory resource\n");
- return -ENOMEM;
- }
-
- if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
- "picoxcell_trng")) {
- dev_warn(&pdev->dev, "unable to request io mem\n");
- return -EBUSY;
- }
+ rng_base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(rng_base))
+ return PTR_ERR(rng_base);
- rng_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
- if (!rng_base) {
- dev_warn(&pdev->dev, "unable to remap io mem\n");
- return -ENOMEM;
- }
-
- rng_clk = clk_get(&pdev->dev, NULL);
+ rng_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rng_clk)) {
dev_warn(&pdev->dev, "no clk\n");
return PTR_ERR(rng_clk);
@@ -130,7 +117,7 @@ static int picoxcell_trng_probe(struct platform_device *pdev)
ret = clk_enable(rng_clk);
if (ret) {
dev_warn(&pdev->dev, "unable to enable clk\n");
- goto err_enable;
+ return ret;
}
picoxcell_trng_start();
@@ -145,9 +132,6 @@ static int picoxcell_trng_probe(struct platform_device *pdev)
err_register:
clk_disable(rng_clk);
-err_enable:
- clk_put(rng_clk);
-
return ret;
}
@@ -155,7 +139,6 @@ static int picoxcell_trng_remove(struct platform_device *pdev)
{
hwrng_unregister(&picoxcell_trng);
clk_disable(rng_clk);
- clk_put(rng_clk);
return 0;
}
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 73ce739f8e19..439ff8b28c43 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -118,7 +118,8 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
}
/* Allocate memory for the device structure (and zero it) */
- priv = kzalloc(sizeof(struct timeriomem_rng_private_data), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct timeriomem_rng_private_data), GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "failed to allocate device structure.\n");
return -ENOMEM;
@@ -134,17 +135,16 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
period = i;
else {
dev_err(&pdev->dev, "missing period\n");
- err = -EINVAL;
- goto out_free;
+ return -EINVAL;
}
- } else
+ } else {
period = pdata->period;
+ }
priv->period = usecs_to_jiffies(period);
if (priv->period < 1) {
dev_err(&pdev->dev, "period is less than one jiffy\n");
- err = -EINVAL;
- goto out_free;
+ return -EINVAL;
}
priv->expires = jiffies;
@@ -160,24 +160,16 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->timeriomem_rng_ops.data_read = timeriomem_rng_data_read;
priv->timeriomem_rng_ops.priv = (unsigned long)priv;
- if (!request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev))) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- err = -EBUSY;
+ priv->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->io_base)) {
+ err = PTR_ERR(priv->io_base);
goto out_timer;
}
- priv->io_base = ioremap(res->start, resource_size(res));
- if (priv->io_base == NULL) {
- dev_err(&pdev->dev, "ioremap failed\n");
- err = -EIO;
- goto out_release_io;
- }
-
err = hwrng_register(&priv->timeriomem_rng_ops);
if (err) {
dev_err(&pdev->dev, "problem registering\n");
- goto out;
+ goto out_timer;
}
dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
@@ -185,30 +177,18 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
return 0;
-out:
- iounmap(priv->io_base);
-out_release_io:
- release_mem_region(res->start, resource_size(res));
out_timer:
del_timer_sync(&priv->timer);
-out_free:
- kfree(priv);
return err;
}
static int timeriomem_rng_remove(struct platform_device *pdev)
{
struct timeriomem_rng_private_data *priv = platform_get_drvdata(pdev);
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hwrng_unregister(&priv->timeriomem_rng_ops);
del_timer_sync(&priv->timer);
- iounmap(priv->io_base);
- release_mem_region(res->start, resource_size(res));
- kfree(priv);
return 0;
}
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index c12398d1517c..2ce0e225e58c 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -47,8 +47,7 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */
- if (virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL) < 0)
- BUG();
+ virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL);
virtqueue_kick(vq);
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 03f41896d090..b7efd3c1a882 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -61,7 +61,6 @@
#include <linux/ipmi_smi.h>
#include <asm/io.h>
#include "ipmi_si_sm.h"
-#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/string.h>
#include <linux/ctype.h>
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 92c5937f80c3..917403fe10da 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -99,6 +99,9 @@ static ssize_t read_mem(struct file *file, char __user *buf,
ssize_t read, sz;
char *ptr;
+ if (p != *ppos)
+ return 0;
+
if (!valid_phys_addr_range(p, count))
return -EFAULT;
read = 0;
@@ -157,6 +160,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
unsigned long copied;
void *ptr;
+ if (p != *ppos)
+ return -EFBIG;
+
if (!valid_phys_addr_range(p, count))
return -EFAULT;
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
index 881c9e595939..28740046bc83 100644
--- a/drivers/char/mwave/3780i.c
+++ b/drivers/char/mwave/3780i.c
@@ -50,7 +50,6 @@
#include <linux/unistd.h>
#include <linux/delay.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/sched.h> /* cond_resched() */
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 429b75bb60e8..6b75713d953a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -295,17 +295,17 @@
* The minimum number of bits of entropy before we wake up a read on
* /dev/random. Should be enough to do a significant reseed.
*/
-static int random_read_wakeup_thresh = 64;
+static int random_read_wakeup_bits = 64;
/*
* If the entropy count falls under this number of bits, then we
* should wake up processes which are selecting or polling on write
* access to /dev/random.
*/
-static int random_write_wakeup_thresh = 28 * OUTPUT_POOL_WORDS;
+static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
/*
- * The minimum number of seconds between urandom pool resending. We
+ * The minimum number of seconds between urandom pool reseeding. We
* do this to limit the amount of entropy that can be drained from the
* input pool even if there are heavy demands on /dev/urandom.
*/
@@ -322,7 +322,7 @@ static int random_min_urandom_seed = 60;
* Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
* generators. ACM Transactions on Modeling and Computer Simulation
* 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
- * GFSR generators II. ACM Transactions on Mdeling and Computer
+ * GFSR generators II. ACM Transactions on Modeling and Computer
* Simulation 4:254-266)
*
* Thanks to Colin Plumb for suggesting this.
@@ -666,10 +666,10 @@ retry:
r->entropy_total, _RET_IP_);
if (r == &input_pool) {
- int entropy_bytes = entropy_count >> ENTROPY_SHIFT;
+ int entropy_bits = entropy_count >> ENTROPY_SHIFT;
/* should we wake readers? */
- if (entropy_bytes >= random_read_wakeup_thresh) {
+ if (entropy_bits >= random_read_wakeup_bits) {
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
@@ -678,9 +678,9 @@ retry:
* forth between them, until the output pools are 75%
* full.
*/
- if (entropy_bytes > random_write_wakeup_thresh &&
+ if (entropy_bits > random_write_wakeup_bits &&
r->initialized &&
- r->entropy_total >= 2*random_read_wakeup_thresh) {
+ r->entropy_total >= 2*random_read_wakeup_bits) {
static struct entropy_store *last = &blocking_pool;
struct entropy_store *other = &blocking_pool;
@@ -844,6 +844,8 @@ void add_interrupt_randomness(int irq, int irq_flags)
cycles_t cycles = random_get_entropy();
__u32 input[4], c_high, j_high;
__u64 ip;
+ unsigned long seed;
+ int credit;
c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
j_high = (sizeof(now) > 4) ? now >> 32 : 0;
@@ -862,20 +864,33 @@ void add_interrupt_randomness(int irq, int irq_flags)
r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
+
/*
* If we don't have a valid cycle counter, and we see
* back-to-back timer interrupts, then skip giving credit for
- * any entropy.
+ * any entropy, otherwise credit 1 bit.
*/
+ credit = 1;
if (cycles == 0) {
if (irq_flags & __IRQF_TIMER) {
if (fast_pool->last_timer_intr)
- return;
+ credit = 0;
fast_pool->last_timer_intr = 1;
} else
fast_pool->last_timer_intr = 0;
}
- credit_entropy_bits(r, 1);
+
+ /*
+ * If we have architectural seed generator, produce a seed and
+ * add it to the pool. For the sake of paranoia count it as
+ * 50% entropic.
+ */
+ if (arch_get_random_seed_long(&seed)) {
+ __mix_pool_bytes(r, &seed, sizeof(seed), NULL);
+ credit += sizeof(seed) * 4;
+ }
+
+ credit_entropy_bits(r, credit);
}
#ifdef CONFIG_BLOCK
@@ -924,19 +939,19 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
__u32 tmp[OUTPUT_POOL_WORDS];
- /* For /dev/random's pool, always leave two wakeup worth's BITS */
- int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
+ /* For /dev/random's pool, always leave two wakeups' worth */
+ int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
int bytes = nbytes;
- /* pull at least as many as BYTES as wakeup BITS */
- bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
+ /* pull at least as much as a wakeup */
+ bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
/* but never more than the buffer size */
bytes = min_t(int, bytes, sizeof(tmp));
trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
bytes = extract_entropy(r->pull, tmp, bytes,
- random_read_wakeup_thresh / 8, rsvd);
+ random_read_wakeup_bits / 8, rsvd_bytes);
mix_pool_bytes(r, tmp, bytes, NULL);
credit_entropy_bits(r, bytes*8);
}
@@ -952,35 +967,22 @@ static void push_to_pool(struct work_struct *work)
struct entropy_store *r = container_of(work, struct entropy_store,
push_work);
BUG_ON(!r);
- _xfer_secondary_pool(r, random_read_wakeup_thresh/8);
+ _xfer_secondary_pool(r, random_read_wakeup_bits/8);
trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
r->pull->entropy_count >> ENTROPY_SHIFT);
}
/*
- * These functions extracts randomness from the "entropy pool", and
- * returns it in a buffer.
- *
- * The min parameter specifies the minimum amount we can pull before
- * failing to avoid races that defeat catastrophic reseeding while the
- * reserved parameter indicates how much entropy we must leave in the
- * pool after each pull to avoid starving other readers.
- *
- * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words.
+ * This function decides how many bytes to actually take from the
+ * given pool, and also debits the entropy count accordingly.
*/
-
static size_t account(struct entropy_store *r, size_t nbytes, int min,
int reserved)
{
- unsigned long flags;
- int wakeup_write = 0;
int have_bytes;
int entropy_count, orig;
size_t ibytes;
- /* Hold lock while accounting */
- spin_lock_irqsave(&r->lock, flags);
-
BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
/* Can we pull enough? */
@@ -988,29 +990,19 @@ retry:
entropy_count = orig = ACCESS_ONCE(r->entropy_count);
have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
ibytes = nbytes;
- if (have_bytes < min + reserved) {
+ /* If limited, never pull more than available */
+ if (r->limit)
+ ibytes = min_t(size_t, ibytes, have_bytes - reserved);
+ if (ibytes < min)
ibytes = 0;
- } else {
- /* If limited, never pull more than available */
- if (r->limit && ibytes + reserved >= have_bytes)
- ibytes = have_bytes - reserved;
-
- if (have_bytes >= ibytes + reserved)
- entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
- else
- entropy_count = reserved << (ENTROPY_SHIFT + 3);
-
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
-
- if ((r->entropy_count >> ENTROPY_SHIFT)
- < random_write_wakeup_thresh)
- wakeup_write = 1;
- }
- spin_unlock_irqrestore(&r->lock, flags);
+ entropy_count = max_t(int, 0,
+ entropy_count - (ibytes << (ENTROPY_SHIFT + 3)));
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ goto retry;
trace_debit_entropy(r->name, 8 * ibytes);
- if (wakeup_write) {
+ if (ibytes &&
+ (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
@@ -1018,6 +1010,12 @@ retry:
return ibytes;
}
+/*
+ * This function does the actual extraction for extract_entropy and
+ * extract_entropy_user.
+ *
+ * Note: we assume that .poolwords is a multiple of 16 words.
+ */
static void extract_buf(struct entropy_store *r, __u8 *out)
{
int i;
@@ -1029,23 +1027,23 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
__u8 extract[64];
unsigned long flags;
- /* Generate a hash across the pool, 16 words (512 bits) at a time */
- sha_init(hash.w);
- spin_lock_irqsave(&r->lock, flags);
- for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
-
/*
- * If we have a architectural hardware random number
- * generator, mix that in, too.
+ * If we have an architectural hardware random number
+ * generator, use it for SHA's initial vector
*/
+ sha_init(hash.w);
for (i = 0; i < LONGS(20); i++) {
unsigned long v;
if (!arch_get_random_long(&v))
break;
- hash.l[i] ^= v;
+ hash.l[i] = v;
}
+ /* Generate a hash across the pool, 16 words (512 bits) at a time */
+ spin_lock_irqsave(&r->lock, flags);
+ for (i = 0; i < r->poolinfo->poolwords; i += 16)
+ sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+
/*
* We mix the hash back into the pool to prevent backtracking
* attacks (where the attacker knows the state of the pool
@@ -1079,6 +1077,15 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
memset(&hash, 0, sizeof(hash));
}
+/*
+ * This function extracts randomness from the "entropy pool", and
+ * returns it in a buffer.
+ *
+ * The min parameter specifies the minimum amount we can pull before
+ * failing to avoid races that defeat catastrophic reseeding while the
+ * reserved parameter indicates how much entropy we must leave in the
+ * pool after each pull to avoid starving other readers.
+ */
static ssize_t extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int min, int reserved)
{
@@ -1129,6 +1136,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
return ret;
}
+/*
+ * This function extracts randomness from the "entropy pool", and
+ * returns it in a userspace buffer.
+ */
static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
size_t nbytes)
{
@@ -1170,8 +1181,9 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
/*
* This function is the exported kernel interface. It returns some
* number of good random numbers, suitable for key generation, seeding
- * TCP sequence numbers, etc. It does not use the hw random number
- * generator, if available; use get_random_bytes_arch() for that.
+ * TCP sequence numbers, etc. It does not rely on the hardware random
+ * number generator. For random bytes direct from the hardware RNG
+ * (when available), use get_random_bytes_arch().
*/
void get_random_bytes(void *buf, int nbytes)
{
@@ -1238,7 +1250,8 @@ static void init_std_data(struct entropy_store *r)
r->last_pulled = jiffies;
mix_pool_bytes(r, &now, sizeof(now), NULL);
for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
- if (!arch_get_random_long(&rv))
+ if (!arch_get_random_seed_long(&rv) &&
+ !arch_get_random_long(&rv))
rv = random_get_entropy();
mix_pool_bytes(r, &rv, sizeof(rv), NULL);
}
@@ -1281,56 +1294,71 @@ void rand_initialize_disk(struct gendisk *disk)
}
#endif
-static ssize_t
-random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+/*
+ * Attempt an emergency refill using arch_get_random_seed_long().
+ *
+ * As with add_interrupt_randomness() be paranoid and only
+ * credit the output as 50% entropic.
+ */
+static int arch_random_refill(void)
{
- ssize_t n, retval = 0, count = 0;
+ const unsigned int nlongs = 64; /* Arbitrary number */
+ unsigned int n = 0;
+ unsigned int i;
+ unsigned long buf[nlongs];
- if (nbytes == 0)
+ if (!arch_has_random_seed())
return 0;
- while (nbytes > 0) {
- n = nbytes;
- if (n > SEC_XFER_SIZE)
- n = SEC_XFER_SIZE;
+ for (i = 0; i < nlongs; i++) {
+ if (arch_get_random_seed_long(&buf[n]))
+ n++;
+ }
- n = extract_entropy_user(&blocking_pool, buf, n);
+ if (n) {
+ unsigned int rand_bytes = n * sizeof(unsigned long);
- if (n < 0) {
- retval = n;
- break;
- }
+ mix_pool_bytes(&input_pool, buf, rand_bytes, NULL);
+ credit_entropy_bits(&input_pool, rand_bytes*4);
+ }
+ return n;
+}
+
+static ssize_t
+random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+{
+ ssize_t n;
+
+ if (nbytes == 0)
+ return 0;
+
+ nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
+ while (1) {
+ n = extract_entropy_user(&blocking_pool, buf, nbytes);
+ if (n < 0)
+ return n;
trace_random_read(n*8, (nbytes-n)*8,
ENTROPY_BITS(&blocking_pool),
ENTROPY_BITS(&input_pool));
+ if (n > 0)
+ return n;
- if (n == 0) {
- if (file->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- break;
- }
-
- wait_event_interruptible(random_read_wait,
- ENTROPY_BITS(&input_pool) >=
- random_read_wakeup_thresh);
-
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
+ /* Pool is (near) empty. Maybe wait and retry. */
+ /* First try an emergency refill */
+ if (arch_random_refill())
continue;
- }
- count += n;
- buf += n;
- nbytes -= n;
- break; /* This break makes the device work */
- /* like a named pipe */
- }
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
- return (count ? count : retval);
+ wait_event_interruptible(random_read_wait,
+ ENTROPY_BITS(&input_pool) >=
+ random_read_wakeup_bits);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ }
}
static ssize_t
@@ -1358,9 +1386,9 @@ random_poll(struct file *file, poll_table * wait)
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &random_write_wait, wait);
mask = 0;
- if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_thresh)
+ if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
mask |= POLLIN | POLLRDNORM;
- if (ENTROPY_BITS(&input_pool) < random_write_wakeup_thresh)
+ if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
mask |= POLLOUT | POLLWRNORM;
return mask;
}
@@ -1507,18 +1535,18 @@ EXPORT_SYMBOL(generate_random_uuid);
#include <linux/sysctl.h>
static int min_read_thresh = 8, min_write_thresh;
-static int max_read_thresh = INPUT_POOL_WORDS * 32;
+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
static int max_write_thresh = INPUT_POOL_WORDS * 32;
static char sysctl_bootid[16];
/*
- * These functions is used to return both the bootid UUID, and random
+ * This function is used to return both the bootid UUID, and random
* UUID. The difference is in whether table->data is NULL; if it is,
* then a new UUID is generated and returned to the user.
*
- * If the user accesses this via the proc interface, it will be returned
- * as an ASCII string in the standard UUID format. If accesses via the
- * sysctl system call, it is returned as 16 bytes of binary data.
+ * If the user accesses this via the proc interface, the UUID will be
+ * returned as an ASCII string in the standard UUID format; if via the
+ * sysctl system call, as 16 bytes of binary data.
*/
static int proc_do_uuid(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -1583,7 +1611,7 @@ struct ctl_table random_table[] = {
},
{
.procname = "read_wakeup_threshold",
- .data = &random_read_wakeup_thresh,
+ .data = &random_read_wakeup_bits,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@@ -1592,7 +1620,7 @@ struct ctl_table random_table[] = {
},
{
.procname = "write_wakeup_threshold",
- .data = &random_write_wakeup_thresh,
+ .data = &random_write_wakeup_bits,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 0e506bad1986..bd377472dcfb 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/init.h>
#include <linux/kernel.h> /* printk() */
#include <linux/slab.h> /* kmalloc() */
#include <linux/fs.h> /* everything... */
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 1a65838888cd..c54cac3f8bc8 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -74,7 +74,7 @@ config TCG_NSC
config TCG_ATMEL
tristate "Atmel TPM Interface"
- depends on PPC64 || HAS_IOPORT
+ depends on PPC64 || HAS_IOPORT_MAP
---help---
If you have a TPM security chip from Atmel say Yes and it
will be accessible from within Linux. To compile this driver
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 52b9b2b2f300..472af4bb1b61 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -21,7 +21,6 @@
*
*
*/
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/wait.h>
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
index 5b0dd8ef74c0..3b7bf2162898 100644
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -38,7 +38,6 @@
#include <linux/miscdevice.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/wait.h>
#include <linux/string.h>
#include <linux/interrupt.h>
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 7641965d208d..6f56d3a4f010 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -65,10 +65,12 @@ config COMMON_CLK_SI570
clock generators.
config COMMON_CLK_S2MPS11
- tristate "Clock driver for S2MPS11 MFD"
+ tristate "Clock driver for S2MPS11/S5M8767 MFD"
depends on MFD_SEC_CORE
---help---
- This driver supports S2MPS11 crystal oscillator clock.
+ This driver supports S2MPS11/S5M8767 crystal oscillator clock. These
+ multi-function devices have 3 fixed-rate oscillators, clocked at
+ 32KHz each.
config CLK_TWL6040
tristate "External McPDM functional clock from twl6040"
@@ -111,4 +113,5 @@ source "drivers/clk/qcom/Kconfig"
endmenu
+source "drivers/clk/bcm/Kconfig"
source "drivers/clk/mvebu/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index a367a9831717..5f8a28735c96 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
+obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o
@@ -29,7 +30,9 @@ obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
obj-$(CONFIG_COMMON_CLK_AT91) += at91/
+obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm/
obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/
+obj-$(CONFIG_ARCH_HIP04) += hisilicon/
obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
@@ -43,6 +46,7 @@ obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += shmobile/
obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
+obj-$(CONFIG_ARCH_STI) += st/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_ARCH_OMAP2PLUS) += ti/
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index fd792b203eaf..62e2509f9df1 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -13,12 +13,9 @@
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/wait.h>
#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
#include "pmc.h"
@@ -38,104 +35,59 @@ struct clk_programmable_layout {
struct clk_programmable {
struct clk_hw hw;
struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
u8 id;
- u8 css;
- u8 pres;
- u8 slckmck;
const struct clk_programmable_layout *layout;
};
#define to_clk_programmable(hw) container_of(hw, struct clk_programmable, hw)
-
-static irqreturn_t clk_programmable_irq_handler(int irq, void *dev_id)
-{
- struct clk_programmable *prog = (struct clk_programmable *)dev_id;
-
- wake_up(&prog->wait);
-
- return IRQ_HANDLED;
-}
-
-static int clk_programmable_prepare(struct clk_hw *hw)
+static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
- u32 tmp;
+ u32 pres;
struct clk_programmable *prog = to_clk_programmable(hw);
struct at91_pmc *pmc = prog->pmc;
const struct clk_programmable_layout *layout = prog->layout;
- u8 id = prog->id;
- u32 mask = PROG_STATUS_MASK(id);
-
- tmp = prog->css | (prog->pres << layout->pres_shift);
- if (layout->have_slck_mck && prog->slckmck)
- tmp |= AT91_PMC_CSSMCK_MCK;
-
- pmc_write(pmc, AT91_PMC_PCKR(id), tmp);
-
- while (!(pmc_read(pmc, AT91_PMC_SR) & mask))
- wait_event(prog->wait, pmc_read(pmc, AT91_PMC_SR) & mask);
- return 0;
+ pres = (pmc_read(pmc, AT91_PMC_PCKR(prog->id)) >> layout->pres_shift) &
+ PROG_PRES_MASK;
+ return parent_rate >> pres;
}
-static int clk_programmable_is_ready(struct clk_hw *hw)
+static long clk_programmable_determine_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk **best_parent_clk)
{
- struct clk_programmable *prog = to_clk_programmable(hw);
- struct at91_pmc *pmc = prog->pmc;
-
- return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_PCKR(prog->id));
-}
+ struct clk *parent = NULL;
+ long best_rate = -EINVAL;
+ unsigned long parent_rate;
+ unsigned long tmp_rate;
+ int shift;
+ int i;
-static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- u32 tmp;
- struct clk_programmable *prog = to_clk_programmable(hw);
- struct at91_pmc *pmc = prog->pmc;
- const struct clk_programmable_layout *layout = prog->layout;
+ for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
+ parent = clk_get_parent_by_index(hw->clk, i);
+ if (!parent)
+ continue;
- tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id));
- prog->pres = (tmp >> layout->pres_shift) & PROG_PRES_MASK;
+ parent_rate = __clk_get_rate(parent);
+ for (shift = 0; shift < PROG_PRES_MASK; shift++) {
+ tmp_rate = parent_rate >> shift;
+ if (tmp_rate <= rate)
+ break;
+ }
- return parent_rate >> prog->pres;
-}
+ if (tmp_rate > rate)
+ continue;
-static long clk_programmable_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- unsigned long best_rate = *parent_rate;
- unsigned long best_diff;
- unsigned long new_diff;
- unsigned long cur_rate;
- int shift = shift;
-
- if (rate > *parent_rate)
- return *parent_rate;
- else
- best_diff = *parent_rate - rate;
-
- if (!best_diff)
- return best_rate;
-
- for (shift = 1; shift < PROG_PRES_MASK; shift++) {
- cur_rate = *parent_rate >> shift;
-
- if (cur_rate > rate)
- new_diff = cur_rate - rate;
- else
- new_diff = rate - cur_rate;
-
- if (!new_diff)
- return cur_rate;
-
- if (new_diff < best_diff) {
- best_diff = new_diff;
- best_rate = cur_rate;
+ if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ *best_parent_rate = parent_rate;
+ *best_parent_clk = parent;
}
- if (rate > cur_rate)
+ if (!best_rate)
break;
}
@@ -146,17 +98,22 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_programmable *prog = to_clk_programmable(hw);
const struct clk_programmable_layout *layout = prog->layout;
+ struct at91_pmc *pmc = prog->pmc;
+ u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & ~layout->css_mask;
+
+ if (layout->have_slck_mck)
+ tmp &= AT91_PMC_CSSMCK_MCK;
+
if (index > layout->css_mask) {
if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) {
- prog->css = 0;
- prog->slckmck = 1;
+ tmp |= AT91_PMC_CSSMCK_MCK;
return 0;
} else {
return -EINVAL;
}
}
- prog->css = index;
+ pmc_write(pmc, AT91_PMC_PCKR(prog->id), tmp | index);
return 0;
}
@@ -169,13 +126,9 @@ static u8 clk_programmable_get_parent(struct clk_hw *hw)
const struct clk_programmable_layout *layout = prog->layout;
tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id));
- prog->css = tmp & layout->css_mask;
- ret = prog->css;
- if (layout->have_slck_mck) {
- prog->slckmck = !!(tmp & AT91_PMC_CSSMCK_MCK);
- if (prog->slckmck && !ret)
- ret = PROG_MAX_RM9200_CSS + 1;
- }
+ ret = tmp & layout->css_mask;
+ if (layout->have_slck_mck && (tmp & AT91_PMC_CSSMCK_MCK) && !ret)
+ ret = PROG_MAX_RM9200_CSS + 1;
return ret;
}
@@ -184,67 +137,47 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_programmable *prog = to_clk_programmable(hw);
- unsigned long best_rate = parent_rate;
- unsigned long best_diff;
- unsigned long new_diff;
- unsigned long cur_rate;
+ struct at91_pmc *pmc = prog->pmc;
+ const struct clk_programmable_layout *layout = prog->layout;
+ unsigned long div = parent_rate / rate;
int shift = 0;
+ u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) &
+ ~(PROG_PRES_MASK << layout->pres_shift);
- if (rate > parent_rate)
- return parent_rate;
- else
- best_diff = parent_rate - rate;
-
- if (!best_diff) {
- prog->pres = shift;
- return 0;
- }
+ if (!div)
+ return -EINVAL;
- for (shift = 1; shift < PROG_PRES_MASK; shift++) {
- cur_rate = parent_rate >> shift;
+ shift = fls(div) - 1;
- if (cur_rate > rate)
- new_diff = cur_rate - rate;
- else
- new_diff = rate - cur_rate;
+ if (div != (1<<shift))
+ return -EINVAL;
- if (!new_diff)
- break;
+ if (shift >= PROG_PRES_MASK)
+ return -EINVAL;
- if (new_diff < best_diff) {
- best_diff = new_diff;
- best_rate = cur_rate;
- }
+ pmc_write(pmc, AT91_PMC_PCKR(prog->id),
+ tmp | (shift << layout->pres_shift));
- if (rate > cur_rate)
- break;
- }
-
- prog->pres = shift;
return 0;
}
static const struct clk_ops programmable_ops = {
- .prepare = clk_programmable_prepare,
- .is_prepared = clk_programmable_is_ready,
.recalc_rate = clk_programmable_recalc_rate,
- .round_rate = clk_programmable_round_rate,
+ .determine_rate = clk_programmable_determine_rate,
.get_parent = clk_programmable_get_parent,
.set_parent = clk_programmable_set_parent,
.set_rate = clk_programmable_set_rate,
};
static struct clk * __init
-at91_clk_register_programmable(struct at91_pmc *pmc, unsigned int irq,
+at91_clk_register_programmable(struct at91_pmc *pmc,
const char *name, const char **parent_names,
u8 num_parents, u8 id,
const struct clk_programmable_layout *layout)
{
- int ret;
struct clk_programmable *prog;
struct clk *clk = NULL;
struct clk_init_data init;
- char irq_name[11];
if (id > PROG_ID_MAX)
return ERR_PTR(-EINVAL);
@@ -263,14 +196,6 @@ at91_clk_register_programmable(struct at91_pmc *pmc, unsigned int irq,
prog->layout = layout;
prog->hw.init = &init;
prog->pmc = pmc;
- prog->irq = irq;
- init_waitqueue_head(&prog->wait);
- irq_set_status_flags(prog->irq, IRQ_NOAUTOEN);
- snprintf(irq_name, sizeof(irq_name), "clk-prog%d", id);
- ret = request_irq(prog->irq, clk_programmable_irq_handler,
- IRQF_TRIGGER_HIGH, irq_name, prog);
- if (ret)
- return ERR_PTR(ret);
clk = clk_register(NULL, &prog->hw);
if (IS_ERR(clk))
@@ -304,7 +229,6 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
int num;
u32 id;
int i;
- unsigned int irq;
struct clk *clk;
int num_parents;
const char *parent_names[PROG_SOURCE_MAX];
@@ -332,11 +256,7 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
if (of_property_read_string(np, "clock-output-names", &name))
name = progclknp->name;
- irq = irq_of_parse_and_map(progclknp, 0);
- if (!irq)
- continue;
-
- clk = at91_clk_register_programmable(pmc, irq, name,
+ clk = at91_clk_register_programmable(pmc, name,
parent_names, num_parents,
id, layout);
if (IS_ERR(clk))
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index 8f7c0434a09f..8c96307d7363 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -14,6 +14,11 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
#include "pmc.h"
@@ -25,19 +30,48 @@
struct clk_system {
struct clk_hw hw;
struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
u8 id;
};
-static int clk_system_enable(struct clk_hw *hw)
+static inline int is_pck(int id)
+{
+ return (id >= 8) && (id <= 15);
+}
+static irqreturn_t clk_system_irq_handler(int irq, void *dev_id)
+{
+ struct clk_system *sys = (struct clk_system *)dev_id;
+
+ wake_up(&sys->wait);
+ disable_irq_nosync(sys->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_system_prepare(struct clk_hw *hw)
{
struct clk_system *sys = to_clk_system(hw);
struct at91_pmc *pmc = sys->pmc;
+ u32 mask = 1 << sys->id;
- pmc_write(pmc, AT91_PMC_SCER, 1 << sys->id);
+ pmc_write(pmc, AT91_PMC_SCER, mask);
+
+ if (!is_pck(sys->id))
+ return 0;
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) {
+ if (sys->irq) {
+ enable_irq(sys->irq);
+ wait_event(sys->wait,
+ pmc_read(pmc, AT91_PMC_SR) & mask);
+ } else
+ cpu_relax();
+ }
return 0;
}
-static void clk_system_disable(struct clk_hw *hw)
+static void clk_system_unprepare(struct clk_hw *hw)
{
struct clk_system *sys = to_clk_system(hw);
struct at91_pmc *pmc = sys->pmc;
@@ -45,27 +79,34 @@ static void clk_system_disable(struct clk_hw *hw)
pmc_write(pmc, AT91_PMC_SCDR, 1 << sys->id);
}
-static int clk_system_is_enabled(struct clk_hw *hw)
+static int clk_system_is_prepared(struct clk_hw *hw)
{
struct clk_system *sys = to_clk_system(hw);
struct at91_pmc *pmc = sys->pmc;
- return !!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id));
+ if (!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id)))
+ return 0;
+
+ if (!is_pck(sys->id))
+ return 1;
+
+ return !!(pmc_read(pmc, AT91_PMC_SR) & (1 << sys->id));
}
static const struct clk_ops system_ops = {
- .enable = clk_system_enable,
- .disable = clk_system_disable,
- .is_enabled = clk_system_is_enabled,
+ .prepare = clk_system_prepare,
+ .unprepare = clk_system_unprepare,
+ .is_prepared = clk_system_is_prepared,
};
static struct clk * __init
at91_clk_register_system(struct at91_pmc *pmc, const char *name,
- const char *parent_name, u8 id)
+ const char *parent_name, u8 id, int irq)
{
struct clk_system *sys;
struct clk *clk = NULL;
struct clk_init_data init;
+ int ret;
if (!parent_name || id > SYSTEM_MAX_ID)
return ERR_PTR(-EINVAL);
@@ -84,11 +125,20 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name,
* (see drivers/memory) which would request and enable the ddrck clock.
* When this is done we will be able to remove CLK_IGNORE_UNUSED flag.
*/
- init.flags = CLK_IGNORE_UNUSED;
+ init.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED;
sys->id = id;
sys->hw.init = &init;
sys->pmc = pmc;
+ sys->irq = irq;
+ if (irq) {
+ init_waitqueue_head(&sys->wait);
+ irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);
+ ret = request_irq(sys->irq, clk_system_irq_handler,
+ IRQF_TRIGGER_HIGH, name, sys);
+ if (ret)
+ return ERR_PTR(ret);
+ }
clk = clk_register(NULL, &sys->hw);
if (IS_ERR(clk))
@@ -101,6 +151,7 @@ static void __init
of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc)
{
int num;
+ int irq = 0;
u32 id;
struct clk *clk;
const char *name;
@@ -118,9 +169,12 @@ of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc)
if (of_property_read_string(np, "clock-output-names", &name))
name = sysclknp->name;
+ if (is_pck(id))
+ irq = irq_of_parse_and_map(sysclknp, 0);
+
parent_name = of_clk_get_parent_name(sysclknp, 0);
- clk = at91_clk_register_system(pmc, name, parent_name, id);
+ clk = at91_clk_register_system(pmc, name, parent_name, id, irq);
if (IS_ERR(clk))
continue;
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
new file mode 100644
index 000000000000..a7262fb8ce55
--- /dev/null
+++ b/drivers/clk/bcm/Kconfig
@@ -0,0 +1,9 @@
+config CLK_BCM_KONA
+ bool "Broadcom Kona CCU clock support"
+ depends on ARCH_BCM_MOBILE
+ depends on COMMON_CLK
+ default y
+ help
+ Enable common clock framework support for Broadcom SoCs
+ using "Kona" style clock control units, including those
+ in the BCM281xx family.
diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile
new file mode 100644
index 000000000000..cf93359aa862
--- /dev/null
+++ b/drivers/clk/bcm/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CLK_BCM_KONA) += clk-kona.o
+obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o
+obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm281xx.o
diff --git a/drivers/clk/bcm/clk-bcm281xx.c b/drivers/clk/bcm/clk-bcm281xx.c
new file mode 100644
index 000000000000..3c66de696aeb
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm281xx.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ * Copyright 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "clk-kona.h"
+#include "dt-bindings/clock/bcm281xx.h"
+
+/* bcm11351 CCU device tree "compatible" strings */
+#define BCM11351_DT_ROOT_CCU_COMPAT "brcm,bcm11351-root-ccu"
+#define BCM11351_DT_AON_CCU_COMPAT "brcm,bcm11351-aon-ccu"
+#define BCM11351_DT_HUB_CCU_COMPAT "brcm,bcm11351-hub-ccu"
+#define BCM11351_DT_MASTER_CCU_COMPAT "brcm,bcm11351-master-ccu"
+#define BCM11351_DT_SLAVE_CCU_COMPAT "brcm,bcm11351-slave-ccu"
+
+/* Root CCU clocks */
+
+static struct peri_clk_data frac_1m_data = {
+ .gate = HW_SW_GATE(0x214, 16, 0, 1),
+ .trig = TRIGGER(0x0e04, 0),
+ .div = FRAC_DIVIDER(0x0e00, 0, 22, 16),
+ .clocks = CLOCKS("ref_crystal"),
+};
+
+/* AON CCU clocks */
+
+static struct peri_clk_data hub_timer_data = {
+ .gate = HW_SW_GATE(0x0414, 16, 0, 1),
+ .clocks = CLOCKS("bbl_32k",
+ "frac_1m",
+ "dft_19_5m"),
+ .sel = SELECTOR(0x0a10, 0, 2),
+ .trig = TRIGGER(0x0a40, 4),
+};
+
+static struct peri_clk_data pmu_bsc_data = {
+ .gate = HW_SW_GATE(0x0418, 16, 0, 1),
+ .clocks = CLOCKS("ref_crystal",
+ "pmu_bsc_var",
+ "bbl_32k"),
+ .sel = SELECTOR(0x0a04, 0, 2),
+ .div = DIVIDER(0x0a04, 3, 4),
+ .trig = TRIGGER(0x0a40, 0),
+};
+
+static struct peri_clk_data pmu_bsc_var_data = {
+ .clocks = CLOCKS("var_312m",
+ "ref_312m"),
+ .sel = SELECTOR(0x0a00, 0, 2),
+ .div = DIVIDER(0x0a00, 4, 5),
+ .trig = TRIGGER(0x0a40, 2),
+};
+
+/* Hub CCU clocks */
+
+static struct peri_clk_data tmon_1m_data = {
+ .gate = HW_SW_GATE(0x04a4, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "frac_1m"),
+ .sel = SELECTOR(0x0e74, 0, 2),
+ .trig = TRIGGER(0x0e84, 1),
+};
+
+/* Master CCU clocks */
+
+static struct peri_clk_data sdio1_data = {
+ .gate = HW_SW_GATE(0x0358, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a28, 0, 3),
+ .div = DIVIDER(0x0a28, 4, 14),
+ .trig = TRIGGER(0x0afc, 9),
+};
+
+static struct peri_clk_data sdio2_data = {
+ .gate = HW_SW_GATE(0x035c, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a2c, 0, 3),
+ .div = DIVIDER(0x0a2c, 4, 14),
+ .trig = TRIGGER(0x0afc, 10),
+};
+
+static struct peri_clk_data sdio3_data = {
+ .gate = HW_SW_GATE(0x0364, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a34, 0, 3),
+ .div = DIVIDER(0x0a34, 4, 14),
+ .trig = TRIGGER(0x0afc, 12),
+};
+
+static struct peri_clk_data sdio4_data = {
+ .gate = HW_SW_GATE(0x0360, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a30, 0, 3),
+ .div = DIVIDER(0x0a30, 4, 14),
+ .trig = TRIGGER(0x0afc, 11),
+};
+
+static struct peri_clk_data usb_ic_data = {
+ .gate = HW_SW_GATE(0x0354, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_96m",
+ "ref_96m"),
+ .div = FIXED_DIVIDER(2),
+ .sel = SELECTOR(0x0a24, 0, 2),
+ .trig = TRIGGER(0x0afc, 7),
+};
+
+/* also called usbh_48m */
+static struct peri_clk_data hsic2_48m_data = {
+ .gate = HW_SW_GATE(0x0370, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a38, 0, 2),
+ .div = FIXED_DIVIDER(2),
+ .trig = TRIGGER(0x0afc, 5),
+};
+
+/* also called usbh_12m */
+static struct peri_clk_data hsic2_12m_data = {
+ .gate = HW_SW_GATE(0x0370, 20, 4, 5),
+ .div = DIVIDER(0x0a38, 12, 2),
+ .clocks = CLOCKS("ref_crystal",
+ "var_96m",
+ "ref_96m"),
+ .pre_div = FIXED_DIVIDER(2),
+ .sel = SELECTOR(0x0a38, 0, 2),
+ .trig = TRIGGER(0x0afc, 5),
+};
+
+/* Slave CCU clocks */
+
+static struct peri_clk_data uartb_data = {
+ .gate = HW_SW_GATE(0x0400, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a10, 0, 2),
+ .div = FRAC_DIVIDER(0x0a10, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 2),
+};
+
+static struct peri_clk_data uartb2_data = {
+ .gate = HW_SW_GATE(0x0404, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a14, 0, 2),
+ .div = FRAC_DIVIDER(0x0a14, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 3),
+};
+
+static struct peri_clk_data uartb3_data = {
+ .gate = HW_SW_GATE(0x0408, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a18, 0, 2),
+ .div = FRAC_DIVIDER(0x0a18, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 4),
+};
+
+static struct peri_clk_data uartb4_data = {
+ .gate = HW_SW_GATE(0x0408, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a1c, 0, 2),
+ .div = FRAC_DIVIDER(0x0a1c, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 5),
+};
+
+static struct peri_clk_data ssp0_data = {
+ .gate = HW_SW_GATE(0x0410, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a20, 0, 3),
+ .div = DIVIDER(0x0a20, 4, 14),
+ .trig = TRIGGER(0x0afc, 6),
+};
+
+static struct peri_clk_data ssp2_data = {
+ .gate = HW_SW_GATE(0x0418, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a28, 0, 3),
+ .div = DIVIDER(0x0a28, 4, 14),
+ .trig = TRIGGER(0x0afc, 8),
+};
+
+static struct peri_clk_data bsc1_data = {
+ .gate = HW_SW_GATE(0x0458, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a64, 0, 3),
+ .trig = TRIGGER(0x0afc, 23),
+};
+
+static struct peri_clk_data bsc2_data = {
+ .gate = HW_SW_GATE(0x045c, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a68, 0, 3),
+ .trig = TRIGGER(0x0afc, 24),
+};
+
+static struct peri_clk_data bsc3_data = {
+ .gate = HW_SW_GATE(0x0484, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a84, 0, 3),
+ .trig = TRIGGER(0x0b00, 2),
+};
+
+static struct peri_clk_data pwm_data = {
+ .gate = HW_SW_GATE(0x0468, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m"),
+ .sel = SELECTOR(0x0a70, 0, 2),
+ .div = DIVIDER(0x0a70, 4, 3),
+ .trig = TRIGGER(0x0afc, 15),
+};
+
+/*
+ * CCU setup routines
+ *
+ * These are called from kona_dt_ccu_setup() to initialize the array
+ * of clocks provided by the CCU. Once allocated, the entries in
+ * the array are initialized by calling kona_clk_setup() with the
+ * initialization data for each clock. They return 0 if successful
+ * or an error code otherwise.
+ */
+static int __init bcm281xx_root_ccu_clks_setup(struct ccu_data *ccu)
+{
+ struct clk **clks;
+ size_t count = BCM281XX_ROOT_CCU_CLOCK_COUNT;
+
+ clks = kzalloc(count * sizeof(*clks), GFP_KERNEL);
+ if (!clks) {
+ pr_err("%s: failed to allocate root clocks\n", __func__);
+ return -ENOMEM;
+ }
+ ccu->data.clks = clks;
+ ccu->data.clk_num = count;
+
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_ROOT_CCU_FRAC_1M, frac_1m);
+
+ return 0;
+}
+
+static int __init bcm281xx_aon_ccu_clks_setup(struct ccu_data *ccu)
+{
+ struct clk **clks;
+ size_t count = BCM281XX_AON_CCU_CLOCK_COUNT;
+
+ clks = kzalloc(count * sizeof(*clks), GFP_KERNEL);
+ if (!clks) {
+ pr_err("%s: failed to allocate aon clocks\n", __func__);
+ return -ENOMEM;
+ }
+ ccu->data.clks = clks;
+ ccu->data.clk_num = count;
+
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_HUB_TIMER, hub_timer);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_PMU_BSC, pmu_bsc);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_PMU_BSC_VAR, pmu_bsc_var);
+
+ return 0;
+}
+
+static int __init bcm281xx_hub_ccu_clks_setup(struct ccu_data *ccu)
+{
+ struct clk **clks;
+ size_t count = BCM281XX_HUB_CCU_CLOCK_COUNT;
+
+ clks = kzalloc(count * sizeof(*clks), GFP_KERNEL);
+ if (!clks) {
+ pr_err("%s: failed to allocate hub clocks\n", __func__);
+ return -ENOMEM;
+ }
+ ccu->data.clks = clks;
+ ccu->data.clk_num = count;
+
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_HUB_CCU_TMON_1M, tmon_1m);
+
+ return 0;
+}
+
+static int __init bcm281xx_master_ccu_clks_setup(struct ccu_data *ccu)
+{
+ struct clk **clks;
+ size_t count = BCM281XX_MASTER_CCU_CLOCK_COUNT;
+
+ clks = kzalloc(count * sizeof(*clks), GFP_KERNEL);
+ if (!clks) {
+ pr_err("%s: failed to allocate master clocks\n", __func__);
+ return -ENOMEM;
+ }
+ ccu->data.clks = clks;
+ ccu->data.clk_num = count;
+
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO1, sdio1);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO2, sdio2);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO3, sdio3);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO4, sdio4);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_USB_IC, usb_ic);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_HSIC2_48M, hsic2_48m);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_HSIC2_12M, hsic2_12m);
+
+ return 0;
+}
+
+static int __init bcm281xx_slave_ccu_clks_setup(struct ccu_data *ccu)
+{
+ struct clk **clks;
+ size_t count = BCM281XX_SLAVE_CCU_CLOCK_COUNT;
+
+ clks = kzalloc(count * sizeof(*clks), GFP_KERNEL);
+ if (!clks) {
+ pr_err("%s: failed to allocate slave clocks\n", __func__);
+ return -ENOMEM;
+ }
+ ccu->data.clks = clks;
+ ccu->data.clk_num = count;
+
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB, uartb);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB2, uartb2);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB3, uartb3);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB4, uartb4);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_SSP0, ssp0);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_SSP2, ssp2);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC1, bsc1);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC2, bsc2);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC3, bsc3);
+ PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_PWM, pwm);
+
+ return 0;
+}
+
+/* Device tree match table callback functions */
+
+static void __init kona_dt_root_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(node, bcm281xx_root_ccu_clks_setup);
+}
+
+static void __init kona_dt_aon_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(node, bcm281xx_aon_ccu_clks_setup);
+}
+
+static void __init kona_dt_hub_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(node, bcm281xx_hub_ccu_clks_setup);
+}
+
+static void __init kona_dt_master_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(node, bcm281xx_master_ccu_clks_setup);
+}
+
+static void __init kona_dt_slave_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(node, bcm281xx_slave_ccu_clks_setup);
+}
+
+CLK_OF_DECLARE(bcm11351_root_ccu, BCM11351_DT_ROOT_CCU_COMPAT,
+ kona_dt_root_ccu_setup);
+CLK_OF_DECLARE(bcm11351_aon_ccu, BCM11351_DT_AON_CCU_COMPAT,
+ kona_dt_aon_ccu_setup);
+CLK_OF_DECLARE(bcm11351_hub_ccu, BCM11351_DT_HUB_CCU_COMPAT,
+ kona_dt_hub_ccu_setup);
+CLK_OF_DECLARE(bcm11351_master_ccu, BCM11351_DT_MASTER_CCU_COMPAT,
+ kona_dt_master_ccu_setup);
+CLK_OF_DECLARE(bcm11351_slave_ccu, BCM11351_DT_SLAVE_CCU_COMPAT,
+ kona_dt_slave_ccu_setup);
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
new file mode 100644
index 000000000000..c7607feb18dd
--- /dev/null
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -0,0 +1,769 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ * Copyright 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of_address.h>
+
+#include "clk-kona.h"
+
+/* These are used when a selector or trigger is found to be unneeded */
+#define selector_clear_exists(sel) ((sel)->width = 0)
+#define trigger_clear_exists(trig) FLAG_CLEAR(trig, TRIG, EXISTS)
+
+LIST_HEAD(ccu_list); /* The list of set up CCUs */
+
+/* Validity checking */
+
+static bool clk_requires_trigger(struct kona_clk *bcm_clk)
+{
+ struct peri_clk_data *peri = bcm_clk->peri;
+ struct bcm_clk_sel *sel;
+ struct bcm_clk_div *div;
+
+ if (bcm_clk->type != bcm_clk_peri)
+ return false;
+
+ sel = &peri->sel;
+ if (sel->parent_count && selector_exists(sel))
+ return true;
+
+ div = &peri->div;
+ if (!divider_exists(div))
+ return false;
+
+ /* Fixed dividers don't need triggers */
+ if (!divider_is_fixed(div))
+ return true;
+
+ div = &peri->pre_div;
+
+ return divider_exists(div) && !divider_is_fixed(div);
+}
+
+static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
+{
+ struct peri_clk_data *peri;
+ struct bcm_clk_gate *gate;
+ struct bcm_clk_div *div;
+ struct bcm_clk_sel *sel;
+ struct bcm_clk_trig *trig;
+ const char *name;
+ u32 range;
+ u32 limit;
+
+ BUG_ON(bcm_clk->type != bcm_clk_peri);
+ peri = bcm_clk->peri;
+ name = bcm_clk->name;
+ range = bcm_clk->ccu->range;
+
+ limit = range - sizeof(u32);
+ limit = round_down(limit, sizeof(u32));
+
+ gate = &peri->gate;
+ if (gate_exists(gate)) {
+ if (gate->offset > limit) {
+ pr_err("%s: bad gate offset for %s (%u > %u)\n",
+ __func__, name, gate->offset, limit);
+ return false;
+ }
+ }
+
+ div = &peri->div;
+ if (divider_exists(div)) {
+ if (div->offset > limit) {
+ pr_err("%s: bad divider offset for %s (%u > %u)\n",
+ __func__, name, div->offset, limit);
+ return false;
+ }
+ }
+
+ div = &peri->pre_div;
+ if (divider_exists(div)) {
+ if (div->offset > limit) {
+ pr_err("%s: bad pre-divider offset for %s "
+ "(%u > %u)\n",
+ __func__, name, div->offset, limit);
+ return false;
+ }
+ }
+
+ sel = &peri->sel;
+ if (selector_exists(sel)) {
+ if (sel->offset > limit) {
+ pr_err("%s: bad selector offset for %s (%u > %u)\n",
+ __func__, name, sel->offset, limit);
+ return false;
+ }
+ }
+
+ trig = &peri->trig;
+ if (trigger_exists(trig)) {
+ if (trig->offset > limit) {
+ pr_err("%s: bad trigger offset for %s (%u > %u)\n",
+ __func__, name, trig->offset, limit);
+ return false;
+ }
+ }
+
+ trig = &peri->pre_trig;
+ if (trigger_exists(trig)) {
+ if (trig->offset > limit) {
+ pr_err("%s: bad pre-trigger offset for %s (%u > %u)\n",
+ __func__, name, trig->offset, limit);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* A bit position must be less than the number of bits in a 32-bit register. */
+static bool bit_posn_valid(u32 bit_posn, const char *field_name,
+ const char *clock_name)
+{
+ u32 limit = BITS_PER_BYTE * sizeof(u32) - 1;
+
+ if (bit_posn > limit) {
+ pr_err("%s: bad %s bit for %s (%u > %u)\n", __func__,
+ field_name, clock_name, bit_posn, limit);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * A bitfield must be at least 1 bit wide. Both the low-order and
+ * high-order bits must lie within a 32-bit register. We require
+ * fields to be less than 32 bits wide, mainly because we use
+ * shifting to produce field masks, and shifting a full word width
+ * is not well-defined by the C standard.
+ */
+static bool bitfield_valid(u32 shift, u32 width, const char *field_name,
+ const char *clock_name)
+{
+ u32 limit = BITS_PER_BYTE * sizeof(u32);
+
+ if (!width) {
+ pr_err("%s: bad %s field width 0 for %s\n", __func__,
+ field_name, clock_name);
+ return false;
+ }
+ if (shift + width > limit) {
+ pr_err("%s: bad %s for %s (%u + %u > %u)\n", __func__,
+ field_name, clock_name, shift, width, limit);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * All gates, if defined, have a status bit, and for hardware-only
+ * gates, that's it. Gates that can be software controlled also
+ * have an enable bit. And a gate that can be hardware or software
+ * controlled will have a hardware/software select bit.
+ */
+static bool gate_valid(struct bcm_clk_gate *gate, const char *field_name,
+ const char *clock_name)
+{
+ if (!bit_posn_valid(gate->status_bit, "gate status", clock_name))
+ return false;
+
+ if (gate_is_sw_controllable(gate)) {
+ if (!bit_posn_valid(gate->en_bit, "gate enable", clock_name))
+ return false;
+
+ if (gate_is_hw_controllable(gate)) {
+ if (!bit_posn_valid(gate->hw_sw_sel_bit,
+ "gate hw/sw select",
+ clock_name))
+ return false;
+ }
+ } else {
+ BUG_ON(!gate_is_hw_controllable(gate));
+ }
+
+ return true;
+}
+
+/*
+ * A selector bitfield must be valid. Its parent_sel array must
+ * also be reasonable for the field.
+ */
+static bool sel_valid(struct bcm_clk_sel *sel, const char *field_name,
+ const char *clock_name)
+{
+ if (!bitfield_valid(sel->shift, sel->width, field_name, clock_name))
+ return false;
+
+ if (sel->parent_count) {
+ u32 max_sel;
+ u32 limit;
+
+ /*
+ * Make sure the selector field can hold all the
+ * selector values we expect to be able to use. A
+ * clock only needs to have a selector defined if it
+ * has more than one parent. And in that case the
+ * highest selector value will be in the last entry
+ * in the array.
+ */
+ max_sel = sel->parent_sel[sel->parent_count - 1];
+ limit = (1 << sel->width) - 1;
+ if (max_sel > limit) {
+ pr_err("%s: bad selector for %s "
+ "(%u needs > %u bits)\n",
+ __func__, clock_name, max_sel,
+ sel->width);
+ return false;
+ }
+ } else {
+ pr_warn("%s: ignoring selector for %s (no parents)\n",
+ __func__, clock_name);
+ selector_clear_exists(sel);
+ kfree(sel->parent_sel);
+ sel->parent_sel = NULL;
+ }
+
+ return true;
+}
+
+/*
+ * A fixed divider just needs to be non-zero. A variable divider
+ * has to have a valid divider bitfield, and if it has a fraction,
+ * the width of the fraction must not be no more than the width of
+ * the divider as a whole.
+ */
+static bool div_valid(struct bcm_clk_div *div, const char *field_name,
+ const char *clock_name)
+{
+ if (divider_is_fixed(div)) {
+ /* Any fixed divider value but 0 is OK */
+ if (div->fixed == 0) {
+ pr_err("%s: bad %s fixed value 0 for %s\n", __func__,
+ field_name, clock_name);
+ return false;
+ }
+ return true;
+ }
+ if (!bitfield_valid(div->shift, div->width, field_name, clock_name))
+ return false;
+
+ if (divider_has_fraction(div))
+ if (div->frac_width > div->width) {
+ pr_warn("%s: bad %s fraction width for %s (%u > %u)\n",
+ __func__, field_name, clock_name,
+ div->frac_width, div->width);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * If a clock has two dividers, the combined number of fractional
+ * bits must be representable in a 32-bit unsigned value. This
+ * is because we scale up a dividend using both dividers before
+ * dividing to improve accuracy, and we need to avoid overflow.
+ */
+static bool kona_dividers_valid(struct kona_clk *bcm_clk)
+{
+ struct peri_clk_data *peri = bcm_clk->peri;
+ struct bcm_clk_div *div;
+ struct bcm_clk_div *pre_div;
+ u32 limit;
+
+ BUG_ON(bcm_clk->type != bcm_clk_peri);
+
+ if (!divider_exists(&peri->div) || !divider_exists(&peri->pre_div))
+ return true;
+
+ div = &peri->div;
+ pre_div = &peri->pre_div;
+ if (divider_is_fixed(div) || divider_is_fixed(pre_div))
+ return true;
+
+ limit = BITS_PER_BYTE * sizeof(u32);
+
+ return div->frac_width + pre_div->frac_width <= limit;
+}
+
+
+/* A trigger just needs to represent a valid bit position */
+static bool trig_valid(struct bcm_clk_trig *trig, const char *field_name,
+ const char *clock_name)
+{
+ return bit_posn_valid(trig->bit, field_name, clock_name);
+}
+
+/* Determine whether the set of peripheral clock registers are valid. */
+static bool
+peri_clk_data_valid(struct kona_clk *bcm_clk)
+{
+ struct peri_clk_data *peri;
+ struct bcm_clk_gate *gate;
+ struct bcm_clk_sel *sel;
+ struct bcm_clk_div *div;
+ struct bcm_clk_div *pre_div;
+ struct bcm_clk_trig *trig;
+ const char *name;
+
+ BUG_ON(bcm_clk->type != bcm_clk_peri);
+
+ /*
+ * First validate register offsets. This is the only place
+ * where we need something from the ccu, so we do these
+ * together.
+ */
+ if (!peri_clk_data_offsets_valid(bcm_clk))
+ return false;
+
+ peri = bcm_clk->peri;
+ name = bcm_clk->name;
+ gate = &peri->gate;
+ if (gate_exists(gate) && !gate_valid(gate, "gate", name))
+ return false;
+
+ sel = &peri->sel;
+ if (selector_exists(sel)) {
+ if (!sel_valid(sel, "selector", name))
+ return false;
+
+ } else if (sel->parent_count > 1) {
+ pr_err("%s: multiple parents but no selector for %s\n",
+ __func__, name);
+
+ return false;
+ }
+
+ div = &peri->div;
+ pre_div = &peri->pre_div;
+ if (divider_exists(div)) {
+ if (!div_valid(div, "divider", name))
+ return false;
+
+ if (divider_exists(pre_div))
+ if (!div_valid(pre_div, "pre-divider", name))
+ return false;
+ } else if (divider_exists(pre_div)) {
+ pr_err("%s: pre-divider but no divider for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ trig = &peri->trig;
+ if (trigger_exists(trig)) {
+ if (!trig_valid(trig, "trigger", name))
+ return false;
+
+ if (trigger_exists(&peri->pre_trig)) {
+ if (!trig_valid(trig, "pre-trigger", name)) {
+ return false;
+ }
+ }
+ if (!clk_requires_trigger(bcm_clk)) {
+ pr_warn("%s: ignoring trigger for %s (not needed)\n",
+ __func__, name);
+ trigger_clear_exists(trig);
+ }
+ } else if (trigger_exists(&peri->pre_trig)) {
+ pr_err("%s: pre-trigger but no trigger for %s\n", __func__,
+ name);
+ return false;
+ } else if (clk_requires_trigger(bcm_clk)) {
+ pr_err("%s: required trigger missing for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ return kona_dividers_valid(bcm_clk);
+}
+
+static bool kona_clk_valid(struct kona_clk *bcm_clk)
+{
+ switch (bcm_clk->type) {
+ case bcm_clk_peri:
+ if (!peri_clk_data_valid(bcm_clk))
+ return false;
+ break;
+ default:
+ pr_err("%s: unrecognized clock type (%d)\n", __func__,
+ (int)bcm_clk->type);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Scan an array of parent clock names to determine whether there
+ * are any entries containing BAD_CLK_NAME. Such entries are
+ * placeholders for non-supported clocks. Keep track of the
+ * position of each clock name in the original array.
+ *
+ * Allocates an array of pointers to to hold the names of all
+ * non-null entries in the original array, and returns a pointer to
+ * that array in *names. This will be used for registering the
+ * clock with the common clock code. On successful return,
+ * *count indicates how many entries are in that names array.
+ *
+ * If there is more than one entry in the resulting names array,
+ * another array is allocated to record the parent selector value
+ * for each (defined) parent clock. This is the value that
+ * represents this parent clock in the clock's source selector
+ * register. The position of the clock in the original parent array
+ * defines that selector value. The number of entries in this array
+ * is the same as the number of entries in the parent names array.
+ *
+ * The array of selector values is returned. If the clock has no
+ * parents, no selector is required and a null pointer is returned.
+ *
+ * Returns a null pointer if the clock names array supplied was
+ * null. (This is not an error.)
+ *
+ * Returns a pointer-coded error if an error occurs.
+ */
+static u32 *parent_process(const char *clocks[],
+ u32 *count, const char ***names)
+{
+ static const char **parent_names;
+ static u32 *parent_sel;
+ const char **clock;
+ u32 parent_count;
+ u32 bad_count = 0;
+ u32 orig_count;
+ u32 i;
+ u32 j;
+
+ *count = 0; /* In case of early return */
+ *names = NULL;
+ if (!clocks)
+ return NULL;
+
+ /*
+ * Count the number of names in the null-terminated array,
+ * and find out how many of those are actually clock names.
+ */
+ for (clock = clocks; *clock; clock++)
+ if (*clock == BAD_CLK_NAME)
+ bad_count++;
+ orig_count = (u32)(clock - clocks);
+ parent_count = orig_count - bad_count;
+
+ /* If all clocks are unsupported, we treat it as no clock */
+ if (!parent_count)
+ return NULL;
+
+ /* Avoid exceeding our parent clock limit */
+ if (parent_count > PARENT_COUNT_MAX) {
+ pr_err("%s: too many parents (%u > %u)\n", __func__,
+ parent_count, PARENT_COUNT_MAX);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * There is one parent name for each defined parent clock.
+ * We also maintain an array containing the selector value
+ * for each defined clock. If there's only one clock, the
+ * selector is not required, but we allocate space for the
+ * array anyway to keep things simple.
+ */
+ parent_names = kmalloc(parent_count * sizeof(parent_names), GFP_KERNEL);
+ if (!parent_names) {
+ pr_err("%s: error allocating %u parent names\n", __func__,
+ parent_count);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* There is at least one parent, so allocate a selector array */
+
+ parent_sel = kmalloc(parent_count * sizeof(*parent_sel), GFP_KERNEL);
+ if (!parent_sel) {
+ pr_err("%s: error allocating %u parent selectors\n", __func__,
+ parent_count);
+ kfree(parent_names);
+
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Now fill in the parent names and selector arrays */
+ for (i = 0, j = 0; i < orig_count; i++) {
+ if (clocks[i] != BAD_CLK_NAME) {
+ parent_names[j] = clocks[i];
+ parent_sel[j] = i;
+ j++;
+ }
+ }
+ *names = parent_names;
+ *count = parent_count;
+
+ return parent_sel;
+}
+
+static int
+clk_sel_setup(const char **clocks, struct bcm_clk_sel *sel,
+ struct clk_init_data *init_data)
+{
+ const char **parent_names = NULL;
+ u32 parent_count = 0;
+ u32 *parent_sel;
+
+ /*
+ * If a peripheral clock has multiple parents, the value
+ * used by the hardware to select that parent is represented
+ * by the parent clock's position in the "clocks" list. Some
+ * values don't have defined or supported clocks; these will
+ * have BAD_CLK_NAME entries in the parents[] array. The
+ * list is terminated by a NULL entry.
+ *
+ * We need to supply (only) the names of defined parent
+ * clocks when registering a clock though, so we use an
+ * array of parent selector values to map between the
+ * indexes the common clock code uses and the selector
+ * values we need.
+ */
+ parent_sel = parent_process(clocks, &parent_count, &parent_names);
+ if (IS_ERR(parent_sel)) {
+ int ret = PTR_ERR(parent_sel);
+
+ pr_err("%s: error processing parent clocks for %s (%d)\n",
+ __func__, init_data->name, ret);
+
+ return ret;
+ }
+
+ init_data->parent_names = parent_names;
+ init_data->num_parents = parent_count;
+
+ sel->parent_count = parent_count;
+ sel->parent_sel = parent_sel;
+
+ return 0;
+}
+
+static void clk_sel_teardown(struct bcm_clk_sel *sel,
+ struct clk_init_data *init_data)
+{
+ kfree(sel->parent_sel);
+ sel->parent_sel = NULL;
+ sel->parent_count = 0;
+
+ init_data->num_parents = 0;
+ kfree(init_data->parent_names);
+ init_data->parent_names = NULL;
+}
+
+static void peri_clk_teardown(struct peri_clk_data *data,
+ struct clk_init_data *init_data)
+{
+ clk_sel_teardown(&data->sel, init_data);
+ init_data->ops = NULL;
+}
+
+/*
+ * Caller is responsible for freeing the parent_names[] and
+ * parent_sel[] arrays in the peripheral clock's "data" structure
+ * that can be assigned if the clock has one or more parent clocks
+ * associated with it.
+ */
+static int peri_clk_setup(struct ccu_data *ccu, struct peri_clk_data *data,
+ struct clk_init_data *init_data)
+{
+ init_data->ops = &kona_peri_clk_ops;
+ init_data->flags = CLK_IGNORE_UNUSED;
+
+ return clk_sel_setup(data->clocks, &data->sel, init_data);
+}
+
+static void bcm_clk_teardown(struct kona_clk *bcm_clk)
+{
+ switch (bcm_clk->type) {
+ case bcm_clk_peri:
+ peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data);
+ break;
+ default:
+ break;
+ }
+ bcm_clk->data = NULL;
+ bcm_clk->type = bcm_clk_none;
+}
+
+static void kona_clk_teardown(struct clk *clk)
+{
+ struct clk_hw *hw;
+ struct kona_clk *bcm_clk;
+
+ if (!clk)
+ return;
+
+ hw = __clk_get_hw(clk);
+ if (!hw) {
+ pr_err("%s: clk %p has null hw pointer\n", __func__, clk);
+ return;
+ }
+ clk_unregister(clk);
+
+ bcm_clk = to_kona_clk(hw);
+ bcm_clk_teardown(bcm_clk);
+}
+
+struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name,
+ enum bcm_clk_type type, void *data)
+{
+ struct kona_clk *bcm_clk;
+ struct clk_init_data *init_data;
+ struct clk *clk = NULL;
+
+ bcm_clk = kzalloc(sizeof(*bcm_clk), GFP_KERNEL);
+ if (!bcm_clk) {
+ pr_err("%s: failed to allocate bcm_clk for %s\n", __func__,
+ name);
+ return NULL;
+ }
+ bcm_clk->ccu = ccu;
+ bcm_clk->name = name;
+
+ init_data = &bcm_clk->init_data;
+ init_data->name = name;
+ switch (type) {
+ case bcm_clk_peri:
+ if (peri_clk_setup(ccu, data, init_data))
+ goto out_free;
+ break;
+ default:
+ data = NULL;
+ break;
+ }
+ bcm_clk->type = type;
+ bcm_clk->data = data;
+
+ /* Make sure everything makes sense before we set it up */
+ if (!kona_clk_valid(bcm_clk)) {
+ pr_err("%s: clock data invalid for %s\n", __func__, name);
+ goto out_teardown;
+ }
+
+ bcm_clk->hw.init = init_data;
+ clk = clk_register(NULL, &bcm_clk->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: error registering clock %s (%ld)\n", __func__,
+ name, PTR_ERR(clk));
+ goto out_teardown;
+ }
+ BUG_ON(!clk);
+
+ return clk;
+out_teardown:
+ bcm_clk_teardown(bcm_clk);
+out_free:
+ kfree(bcm_clk);
+
+ return NULL;
+}
+
+static void ccu_clks_teardown(struct ccu_data *ccu)
+{
+ u32 i;
+
+ for (i = 0; i < ccu->data.clk_num; i++)
+ kona_clk_teardown(ccu->data.clks[i]);
+ kfree(ccu->data.clks);
+}
+
+static void kona_ccu_teardown(struct ccu_data *ccu)
+{
+ if (!ccu)
+ return;
+
+ if (!ccu->base)
+ goto done;
+
+ of_clk_del_provider(ccu->node); /* safe if never added */
+ ccu_clks_teardown(ccu);
+ list_del(&ccu->links);
+ of_node_put(ccu->node);
+ iounmap(ccu->base);
+done:
+ kfree(ccu->name);
+ kfree(ccu);
+}
+
+/*
+ * Set up a CCU. Call the provided ccu_clks_setup callback to
+ * initialize the array of clocks provided by the CCU.
+ */
+void __init kona_dt_ccu_setup(struct device_node *node,
+ int (*ccu_clks_setup)(struct ccu_data *))
+{
+ struct ccu_data *ccu;
+ struct resource res = { 0 };
+ resource_size_t range;
+ int ret;
+
+ ccu = kzalloc(sizeof(*ccu), GFP_KERNEL);
+ if (ccu)
+ ccu->name = kstrdup(node->name, GFP_KERNEL);
+ if (!ccu || !ccu->name) {
+ pr_err("%s: unable to allocate CCU struct for %s\n",
+ __func__, node->name);
+ kfree(ccu);
+
+ return;
+ }
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ pr_err("%s: no valid CCU registers found for %s\n", __func__,
+ node->name);
+ goto out_err;
+ }
+
+ range = resource_size(&res);
+ if (range > (resource_size_t)U32_MAX) {
+ pr_err("%s: address range too large for %s\n", __func__,
+ node->name);
+ goto out_err;
+ }
+
+ ccu->range = (u32)range;
+ ccu->base = ioremap(res.start, ccu->range);
+ if (!ccu->base) {
+ pr_err("%s: unable to map CCU registers for %s\n", __func__,
+ node->name);
+ goto out_err;
+ }
+
+ spin_lock_init(&ccu->lock);
+ INIT_LIST_HEAD(&ccu->links);
+ ccu->node = of_node_get(node);
+
+ list_add_tail(&ccu->links, &ccu_list);
+
+ /* Set up clocks array (in ccu->data) */
+ if (ccu_clks_setup(ccu))
+ goto out_err;
+
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, &ccu->data);
+ if (ret) {
+ pr_err("%s: error adding ccu %s as provider (%d)\n", __func__,
+ node->name, ret);
+ goto out_err;
+ }
+
+ if (!kona_ccu_init(ccu))
+ pr_err("Broadcom %s initialization had errors\n", node->name);
+
+ return;
+out_err:
+ kona_ccu_teardown(ccu);
+ pr_err("Broadcom %s setup aborted\n", node->name);
+}
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
new file mode 100644
index 000000000000..e3d339e08309
--- /dev/null
+++ b/drivers/clk/bcm/clk-kona.c
@@ -0,0 +1,1033 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ * Copyright 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "clk-kona.h"
+
+#include <linux/delay.h>
+
+#define CCU_ACCESS_PASSWORD 0xA5A500
+#define CLK_GATE_DELAY_LOOP 2000
+
+/* Bitfield operations */
+
+/* Produces a mask of set bits covering a range of a 32-bit value */
+static inline u32 bitfield_mask(u32 shift, u32 width)
+{
+ return ((1 << width) - 1) << shift;
+}
+
+/* Extract the value of a bitfield found within a given register value */
+static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
+{
+ return (reg_val & bitfield_mask(shift, width)) >> shift;
+}
+
+/* Replace the value of a bitfield found within a given register value */
+static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
+{
+ u32 mask = bitfield_mask(shift, width);
+
+ return (reg_val & ~mask) | (val << shift);
+}
+
+/* Divider and scaling helpers */
+
+/*
+ * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values
+ * unsigned. Note that unlike do_div(), the remainder is discarded
+ * and the return value is the quotient (not the remainder).
+ */
+u64 do_div_round_closest(u64 dividend, unsigned long divisor)
+{
+ u64 result;
+
+ result = dividend + ((u64)divisor >> 1);
+ (void)do_div(result, divisor);
+
+ return result;
+}
+
+/* Convert a divider into the scaled divisor value it represents. */
+static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
+{
+ return (u64)reg_div + ((u64)1 << div->frac_width);
+}
+
+/*
+ * Build a scaled divider value as close as possible to the
+ * given whole part (div_value) and fractional part (expressed
+ * in billionths).
+ */
+u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
+{
+ u64 combined;
+
+ BUG_ON(!div_value);
+ BUG_ON(billionths >= BILLION);
+
+ combined = (u64)div_value * BILLION + billionths;
+ combined <<= div->frac_width;
+
+ return do_div_round_closest(combined, BILLION);
+}
+
+/* The scaled minimum divisor representable by a divider */
+static inline u64
+scaled_div_min(struct bcm_clk_div *div)
+{
+ if (divider_is_fixed(div))
+ return (u64)div->fixed;
+
+ return scaled_div_value(div, 0);
+}
+
+/* The scaled maximum divisor representable by a divider */
+u64 scaled_div_max(struct bcm_clk_div *div)
+{
+ u32 reg_div;
+
+ if (divider_is_fixed(div))
+ return (u64)div->fixed;
+
+ reg_div = ((u32)1 << div->width) - 1;
+
+ return scaled_div_value(div, reg_div);
+}
+
+/*
+ * Convert a scaled divisor into its divider representation as
+ * stored in a divider register field.
+ */
+static inline u32
+divider(struct bcm_clk_div *div, u64 scaled_div)
+{
+ BUG_ON(scaled_div < scaled_div_min(div));
+ BUG_ON(scaled_div > scaled_div_max(div));
+
+ return (u32)(scaled_div - ((u64)1 << div->frac_width));
+}
+
+/* Return a rate scaled for use when dividing by a scaled divisor. */
+static inline u64
+scale_rate(struct bcm_clk_div *div, u32 rate)
+{
+ if (divider_is_fixed(div))
+ return (u64)rate;
+
+ return (u64)rate << div->frac_width;
+}
+
+/* CCU access */
+
+/* Read a 32-bit register value from a CCU's address space. */
+static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
+{
+ return readl(ccu->base + reg_offset);
+}
+
+/* Write a 32-bit register value into a CCU's address space. */
+static inline void
+__ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
+{
+ writel(reg_val, ccu->base + reg_offset);
+}
+
+static inline unsigned long ccu_lock(struct ccu_data *ccu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccu->lock, flags);
+
+ return flags;
+}
+static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
+{
+ spin_unlock_irqrestore(&ccu->lock, flags);
+}
+
+/*
+ * Enable/disable write access to CCU protected registers. The
+ * WR_ACCESS register for all CCUs is at offset 0.
+ */
+static inline void __ccu_write_enable(struct ccu_data *ccu)
+{
+ if (ccu->write_enabled) {
+ pr_err("%s: access already enabled for %s\n", __func__,
+ ccu->name);
+ return;
+ }
+ ccu->write_enabled = true;
+ __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
+}
+
+static inline void __ccu_write_disable(struct ccu_data *ccu)
+{
+ if (!ccu->write_enabled) {
+ pr_err("%s: access wasn't enabled for %s\n", __func__,
+ ccu->name);
+ return;
+ }
+
+ __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
+ ccu->write_enabled = false;
+}
+
+/*
+ * Poll a register in a CCU's address space, returning when the
+ * specified bit in that register's value is set (or clear). Delay
+ * a microsecond after each read of the register. Returns true if
+ * successful, or false if we gave up trying.
+ *
+ * Caller must ensure the CCU lock is held.
+ */
+static inline bool
+__ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
+{
+ unsigned int tries;
+ u32 bit_mask = 1 << bit;
+
+ for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
+ u32 val;
+ bool bit_val;
+
+ val = __ccu_read(ccu, reg_offset);
+ bit_val = (val & bit_mask) != 0;
+ if (bit_val == want)
+ return true;
+ udelay(1);
+ }
+ return false;
+}
+
+/* Gate operations */
+
+/* Determine whether a clock is gated. CCU lock must be held. */
+static bool
+__is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
+{
+ u32 bit_mask;
+ u32 reg_val;
+
+ /* If there is no gate we can assume it's enabled. */
+ if (!gate_exists(gate))
+ return true;
+
+ bit_mask = 1 << gate->status_bit;
+ reg_val = __ccu_read(ccu, gate->offset);
+
+ return (reg_val & bit_mask) != 0;
+}
+
+/* Determine whether a clock is gated. */
+static bool
+is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
+{
+ long flags;
+ bool ret;
+
+ /* Avoid taking the lock if we can */
+ if (!gate_exists(gate))
+ return true;
+
+ flags = ccu_lock(ccu);
+ ret = __is_clk_gate_enabled(ccu, gate);
+ ccu_unlock(ccu, flags);
+
+ return ret;
+}
+
+/*
+ * Commit our desired gate state to the hardware.
+ * Returns true if successful, false otherwise.
+ */
+static bool
+__gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
+{
+ u32 reg_val;
+ u32 mask;
+ bool enabled = false;
+
+ BUG_ON(!gate_exists(gate));
+ if (!gate_is_sw_controllable(gate))
+ return true; /* Nothing we can change */
+
+ reg_val = __ccu_read(ccu, gate->offset);
+
+ /* For a hardware/software gate, set which is in control */
+ if (gate_is_hw_controllable(gate)) {
+ mask = (u32)1 << gate->hw_sw_sel_bit;
+ if (gate_is_sw_managed(gate))
+ reg_val |= mask;
+ else
+ reg_val &= ~mask;
+ }
+
+ /*
+ * If software is in control, enable or disable the gate.
+ * If hardware is, clear the enabled bit for good measure.
+ * If a software controlled gate can't be disabled, we're
+ * required to write a 0 into the enable bit (but the gate
+ * will be enabled).
+ */
+ mask = (u32)1 << gate->en_bit;
+ if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
+ !gate_is_no_disable(gate))
+ reg_val |= mask;
+ else
+ reg_val &= ~mask;
+
+ __ccu_write(ccu, gate->offset, reg_val);
+
+ /* For a hardware controlled gate, we're done */
+ if (!gate_is_sw_managed(gate))
+ return true;
+
+ /* Otherwise wait for the gate to be in desired state */
+ return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
+}
+
+/*
+ * Initialize a gate. Our desired state (hardware/software select,
+ * and if software, its enable state) is committed to hardware
+ * without the usual checks to see if it's already set up that way.
+ * Returns true if successful, false otherwise.
+ */
+static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
+{
+ if (!gate_exists(gate))
+ return true;
+ return __gate_commit(ccu, gate);
+}
+
+/*
+ * Set a gate to enabled or disabled state. Does nothing if the
+ * gate is not currently under software control, or if it is already
+ * in the requested state. Returns true if successful, false
+ * otherwise. CCU lock must be held.
+ */
+static bool
+__clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
+{
+ bool ret;
+
+ if (!gate_exists(gate) || !gate_is_sw_managed(gate))
+ return true; /* Nothing to do */
+
+ if (!enable && gate_is_no_disable(gate)) {
+ pr_warn("%s: invalid gate disable request (ignoring)\n",
+ __func__);
+ return true;
+ }
+
+ if (enable == gate_is_enabled(gate))
+ return true; /* No change */
+
+ gate_flip_enabled(gate);
+ ret = __gate_commit(ccu, gate);
+ if (!ret)
+ gate_flip_enabled(gate); /* Revert the change */
+
+ return ret;
+}
+
+/* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */
+static int clk_gate(struct ccu_data *ccu, const char *name,
+ struct bcm_clk_gate *gate, bool enable)
+{
+ unsigned long flags;
+ bool success;
+
+ /*
+ * Avoid taking the lock if we can. We quietly ignore
+ * requests to change state that don't make sense.
+ */
+ if (!gate_exists(gate) || !gate_is_sw_managed(gate))
+ return 0;
+ if (!enable && gate_is_no_disable(gate))
+ return 0;
+
+ flags = ccu_lock(ccu);
+ __ccu_write_enable(ccu);
+
+ success = __clk_gate(ccu, gate, enable);
+
+ __ccu_write_disable(ccu);
+ ccu_unlock(ccu, flags);
+
+ if (success)
+ return 0;
+
+ pr_err("%s: failed to %s gate for %s\n", __func__,
+ enable ? "enable" : "disable", name);
+
+ return -EIO;
+}
+
+/* Trigger operations */
+
+/*
+ * Caller must ensure CCU lock is held and access is enabled.
+ * Returns true if successful, false otherwise.
+ */
+static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
+{
+ /* Trigger the clock and wait for it to finish */
+ __ccu_write(ccu, trig->offset, 1 << trig->bit);
+
+ return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
+}
+
+/* Divider operations */
+
+/* Read a divider value and return the scaled divisor it represents. */
+static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
+{
+ unsigned long flags;
+ u32 reg_val;
+ u32 reg_div;
+
+ if (divider_is_fixed(div))
+ return (u64)div->fixed;
+
+ flags = ccu_lock(ccu);
+ reg_val = __ccu_read(ccu, div->offset);
+ ccu_unlock(ccu, flags);
+
+ /* Extract the full divider field from the register value */
+ reg_div = bitfield_extract(reg_val, div->shift, div->width);
+
+ /* Return the scaled divisor value it represents */
+ return scaled_div_value(div, reg_div);
+}
+
+/*
+ * Convert a divider's scaled divisor value into its recorded form
+ * and commit it into the hardware divider register.
+ *
+ * Returns 0 on success. Returns -EINVAL for invalid arguments.
+ * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
+ */
+static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_div *div, struct bcm_clk_trig *trig)
+{
+ bool enabled;
+ u32 reg_div;
+ u32 reg_val;
+ int ret = 0;
+
+ BUG_ON(divider_is_fixed(div));
+
+ /*
+ * If we're just initializing the divider, and no initial
+ * state was defined in the device tree, we just find out
+ * what its current value is rather than updating it.
+ */
+ if (div->scaled_div == BAD_SCALED_DIV_VALUE) {
+ reg_val = __ccu_read(ccu, div->offset);
+ reg_div = bitfield_extract(reg_val, div->shift, div->width);
+ div->scaled_div = scaled_div_value(div, reg_div);
+
+ return 0;
+ }
+
+ /* Convert the scaled divisor to the value we need to record */
+ reg_div = divider(div, div->scaled_div);
+
+ /* Clock needs to be enabled before changing the rate */
+ enabled = __is_clk_gate_enabled(ccu, gate);
+ if (!enabled && !__clk_gate(ccu, gate, true)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /* Replace the divider value and record the result */
+ reg_val = __ccu_read(ccu, div->offset);
+ reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div);
+ __ccu_write(ccu, div->offset, reg_val);
+
+ /* If the trigger fails we still want to disable the gate */
+ if (!__clk_trigger(ccu, trig))
+ ret = -EIO;
+
+ /* Disable the clock again if it was disabled to begin with */
+ if (!enabled && !__clk_gate(ccu, gate, false))
+ ret = ret ? ret : -ENXIO; /* return first error */
+out:
+ return ret;
+}
+
+/*
+ * Initialize a divider by committing our desired state to hardware
+ * without the usual checks to see if it's already set up that way.
+ * Returns true if successful, false otherwise.
+ */
+static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_div *div, struct bcm_clk_trig *trig)
+{
+ if (!divider_exists(div) || divider_is_fixed(div))
+ return true;
+ return !__div_commit(ccu, gate, div, trig);
+}
+
+static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_div *div, struct bcm_clk_trig *trig,
+ u64 scaled_div)
+{
+ unsigned long flags;
+ u64 previous;
+ int ret;
+
+ BUG_ON(divider_is_fixed(div));
+
+ previous = div->scaled_div;
+ if (previous == scaled_div)
+ return 0; /* No change */
+
+ div->scaled_div = scaled_div;
+
+ flags = ccu_lock(ccu);
+ __ccu_write_enable(ccu);
+
+ ret = __div_commit(ccu, gate, div, trig);
+
+ __ccu_write_disable(ccu);
+ ccu_unlock(ccu, flags);
+
+ if (ret)
+ div->scaled_div = previous; /* Revert the change */
+
+ return ret;
+
+}
+
+/* Common clock rate helpers */
+
+/*
+ * Implement the common clock framework recalc_rate method, taking
+ * into account a divider and an optional pre-divider. The
+ * pre-divider register pointer may be NULL.
+ */
+static unsigned long clk_recalc_rate(struct ccu_data *ccu,
+ struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
+ unsigned long parent_rate)
+{
+ u64 scaled_parent_rate;
+ u64 scaled_div;
+ u64 result;
+
+ if (!divider_exists(div))
+ return parent_rate;
+
+ if (parent_rate > (unsigned long)LONG_MAX)
+ return 0; /* actually this would be a caller bug */
+
+ /*
+ * If there is a pre-divider, divide the scaled parent rate
+ * by the pre-divider value first. In this case--to improve
+ * accuracy--scale the parent rate by *both* the pre-divider
+ * value and the divider before actually computing the
+ * result of the pre-divider.
+ *
+ * If there's only one divider, just scale the parent rate.
+ */
+ if (pre_div && divider_exists(pre_div)) {
+ u64 scaled_rate;
+
+ scaled_rate = scale_rate(pre_div, parent_rate);
+ scaled_rate = scale_rate(div, scaled_rate);
+ scaled_div = divider_read_scaled(ccu, pre_div);
+ scaled_parent_rate = do_div_round_closest(scaled_rate,
+ scaled_div);
+ } else {
+ scaled_parent_rate = scale_rate(div, parent_rate);
+ }
+
+ /*
+ * Get the scaled divisor value, and divide the scaled
+ * parent rate by that to determine this clock's resulting
+ * rate.
+ */
+ scaled_div = divider_read_scaled(ccu, div);
+ result = do_div_round_closest(scaled_parent_rate, scaled_div);
+
+ return (unsigned long)result;
+}
+
+/*
+ * Compute the output rate produced when a given parent rate is fed
+ * into two dividers. The pre-divider can be NULL, and even if it's
+ * non-null it may be nonexistent. It's also OK for the divider to
+ * be nonexistent, and in that case the pre-divider is also ignored.
+ *
+ * If scaled_div is non-null, it is used to return the scaled divisor
+ * value used by the (downstream) divider to produce that rate.
+ */
+static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
+ struct bcm_clk_div *pre_div,
+ unsigned long rate, unsigned long parent_rate,
+ u64 *scaled_div)
+{
+ u64 scaled_parent_rate;
+ u64 min_scaled_div;
+ u64 max_scaled_div;
+ u64 best_scaled_div;
+ u64 result;
+
+ BUG_ON(!divider_exists(div));
+ BUG_ON(!rate);
+ BUG_ON(parent_rate > (u64)LONG_MAX);
+
+ /*
+ * If there is a pre-divider, divide the scaled parent rate
+ * by the pre-divider value first. In this case--to improve
+ * accuracy--scale the parent rate by *both* the pre-divider
+ * value and the divider before actually computing the
+ * result of the pre-divider.
+ *
+ * If there's only one divider, just scale the parent rate.
+ *
+ * For simplicity we treat the pre-divider as fixed (for now).
+ */
+ if (divider_exists(pre_div)) {
+ u64 scaled_rate;
+ u64 scaled_pre_div;
+
+ scaled_rate = scale_rate(pre_div, parent_rate);
+ scaled_rate = scale_rate(div, scaled_rate);
+ scaled_pre_div = divider_read_scaled(ccu, pre_div);
+ scaled_parent_rate = do_div_round_closest(scaled_rate,
+ scaled_pre_div);
+ } else {
+ scaled_parent_rate = scale_rate(div, parent_rate);
+ }
+
+ /*
+ * Compute the best possible divider and ensure it is in
+ * range. A fixed divider can't be changed, so just report
+ * the best we can do.
+ */
+ if (!divider_is_fixed(div)) {
+ best_scaled_div = do_div_round_closest(scaled_parent_rate,
+ rate);
+ min_scaled_div = scaled_div_min(div);
+ max_scaled_div = scaled_div_max(div);
+ if (best_scaled_div > max_scaled_div)
+ best_scaled_div = max_scaled_div;
+ else if (best_scaled_div < min_scaled_div)
+ best_scaled_div = min_scaled_div;
+ } else {
+ best_scaled_div = divider_read_scaled(ccu, div);
+ }
+
+ /* OK, figure out the resulting rate */
+ result = do_div_round_closest(scaled_parent_rate, best_scaled_div);
+
+ if (scaled_div)
+ *scaled_div = best_scaled_div;
+
+ return (long)result;
+}
+
+/* Common clock parent helpers */
+
+/*
+ * For a given parent selector (register field) value, find the
+ * index into a selector's parent_sel array that contains it.
+ * Returns the index, or BAD_CLK_INDEX if it's not found.
+ */
+static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
+{
+ u8 i;
+
+ BUG_ON(sel->parent_count > (u32)U8_MAX);
+ for (i = 0; i < sel->parent_count; i++)
+ if (sel->parent_sel[i] == parent_sel)
+ return i;
+ return BAD_CLK_INDEX;
+}
+
+/*
+ * Fetch the current value of the selector, and translate that into
+ * its corresponding index in the parent array we registered with
+ * the clock framework.
+ *
+ * Returns parent array index that corresponds with the value found,
+ * or BAD_CLK_INDEX if the found value is out of range.
+ */
+static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
+{
+ unsigned long flags;
+ u32 reg_val;
+ u32 parent_sel;
+ u8 index;
+
+ /* If there's no selector, there's only one parent */
+ if (!selector_exists(sel))
+ return 0;
+
+ /* Get the value in the selector register */
+ flags = ccu_lock(ccu);
+ reg_val = __ccu_read(ccu, sel->offset);
+ ccu_unlock(ccu, flags);
+
+ parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
+
+ /* Look up that selector's parent array index and return it */
+ index = parent_index(sel, parent_sel);
+ if (index == BAD_CLK_INDEX)
+ pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
+ __func__, parent_sel, ccu->name, sel->offset);
+
+ return index;
+}
+
+/*
+ * Commit our desired selector value to the hardware.
+ *
+ * Returns 0 on success. Returns -EINVAL for invalid arguments.
+ * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
+ */
+static int
+__sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
+{
+ u32 parent_sel;
+ u32 reg_val;
+ bool enabled;
+ int ret = 0;
+
+ BUG_ON(!selector_exists(sel));
+
+ /*
+ * If we're just initializing the selector, and no initial
+ * state was defined in the device tree, we just find out
+ * what its current value is rather than updating it.
+ */
+ if (sel->clk_index == BAD_CLK_INDEX) {
+ u8 index;
+
+ reg_val = __ccu_read(ccu, sel->offset);
+ parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
+ index = parent_index(sel, parent_sel);
+ if (index == BAD_CLK_INDEX)
+ return -EINVAL;
+ sel->clk_index = index;
+
+ return 0;
+ }
+
+ BUG_ON((u32)sel->clk_index >= sel->parent_count);
+ parent_sel = sel->parent_sel[sel->clk_index];
+
+ /* Clock needs to be enabled before changing the parent */
+ enabled = __is_clk_gate_enabled(ccu, gate);
+ if (!enabled && !__clk_gate(ccu, gate, true))
+ return -ENXIO;
+
+ /* Replace the selector value and record the result */
+ reg_val = __ccu_read(ccu, sel->offset);
+ reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
+ __ccu_write(ccu, sel->offset, reg_val);
+
+ /* If the trigger fails we still want to disable the gate */
+ if (!__clk_trigger(ccu, trig))
+ ret = -EIO;
+
+ /* Disable the clock again if it was disabled to begin with */
+ if (!enabled && !__clk_gate(ccu, gate, false))
+ ret = ret ? ret : -ENXIO; /* return first error */
+
+ return ret;
+}
+
+/*
+ * Initialize a selector by committing our desired state to hardware
+ * without the usual checks to see if it's already set up that way.
+ * Returns true if successful, false otherwise.
+ */
+static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
+{
+ if (!selector_exists(sel))
+ return true;
+ return !__sel_commit(ccu, gate, sel, trig);
+}
+
+/*
+ * Write a new value into a selector register to switch to a
+ * different parent clock. Returns 0 on success, or an error code
+ * (from __sel_commit()) otherwise.
+ */
+static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
+ u8 index)
+{
+ unsigned long flags;
+ u8 previous;
+ int ret;
+
+ previous = sel->clk_index;
+ if (previous == index)
+ return 0; /* No change */
+
+ sel->clk_index = index;
+
+ flags = ccu_lock(ccu);
+ __ccu_write_enable(ccu);
+
+ ret = __sel_commit(ccu, gate, sel, trig);
+
+ __ccu_write_disable(ccu);
+ ccu_unlock(ccu, flags);
+
+ if (ret)
+ sel->clk_index = previous; /* Revert the change */
+
+ return ret;
+}
+
+/* Clock operations */
+
+static int kona_peri_clk_enable(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+
+ return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true);
+}
+
+static void kona_peri_clk_disable(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+
+ (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false);
+}
+
+static int kona_peri_clk_is_enabled(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+
+ return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
+}
+
+static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct peri_clk_data *data = bcm_clk->peri;
+
+ return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
+ parent_rate);
+}
+
+static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct bcm_clk_div *div = &bcm_clk->peri->div;
+
+ if (!divider_exists(div))
+ return __clk_get_rate(hw->clk);
+
+ /* Quietly avoid a zero rate */
+ return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div,
+ rate ? rate : 1, *parent_rate, NULL);
+}
+
+static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct peri_clk_data *data = bcm_clk->peri;
+ struct bcm_clk_sel *sel = &data->sel;
+ struct bcm_clk_trig *trig;
+ int ret;
+
+ BUG_ON(index >= sel->parent_count);
+
+ /* If there's only one parent we don't require a selector */
+ if (!selector_exists(sel))
+ return 0;
+
+ /*
+ * The regular trigger is used by default, but if there's a
+ * pre-trigger we want to use that instead.
+ */
+ trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
+ : &data->trig;
+
+ ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
+ if (ret == -ENXIO) {
+ pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name);
+ ret = -EIO; /* Don't proliferate weird errors */
+ } else if (ret == -EIO) {
+ pr_err("%s: %strigger failed for %s\n", __func__,
+ trig == &data->pre_trig ? "pre-" : "",
+ bcm_clk->name);
+ }
+
+ return ret;
+}
+
+static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct peri_clk_data *data = bcm_clk->peri;
+ u8 index;
+
+ index = selector_read_index(bcm_clk->ccu, &data->sel);
+
+ /* Not all callers would handle an out-of-range value gracefully */
+ return index == BAD_CLK_INDEX ? 0 : index;
+}
+
+static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct peri_clk_data *data = bcm_clk->peri;
+ struct bcm_clk_div *div = &data->div;
+ u64 scaled_div = 0;
+ int ret;
+
+ if (parent_rate > (unsigned long)LONG_MAX)
+ return -EINVAL;
+
+ if (rate == __clk_get_rate(hw->clk))
+ return 0;
+
+ if (!divider_exists(div))
+ return rate == parent_rate ? 0 : -EINVAL;
+
+ /*
+ * A fixed divider can't be changed. (Nor can a fixed
+ * pre-divider be, but for now we never actually try to
+ * change that.) Tolerate a request for a no-op change.
+ */
+ if (divider_is_fixed(&data->div))
+ return rate == parent_rate ? 0 : -EINVAL;
+
+ /*
+ * Get the scaled divisor value needed to achieve a clock
+ * rate as close as possible to what was requested, given
+ * the parent clock rate supplied.
+ */
+ (void)round_rate(bcm_clk->ccu, div, &data->pre_div,
+ rate ? rate : 1, parent_rate, &scaled_div);
+
+ /*
+ * We aren't updating any pre-divider at this point, so
+ * we'll use the regular trigger.
+ */
+ ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
+ &data->trig, scaled_div);
+ if (ret == -ENXIO) {
+ pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name);
+ ret = -EIO; /* Don't proliferate weird errors */
+ } else if (ret == -EIO) {
+ pr_err("%s: trigger failed for %s\n", __func__, bcm_clk->name);
+ }
+
+ return ret;
+}
+
+struct clk_ops kona_peri_clk_ops = {
+ .enable = kona_peri_clk_enable,
+ .disable = kona_peri_clk_disable,
+ .is_enabled = kona_peri_clk_is_enabled,
+ .recalc_rate = kona_peri_clk_recalc_rate,
+ .round_rate = kona_peri_clk_round_rate,
+ .set_parent = kona_peri_clk_set_parent,
+ .get_parent = kona_peri_clk_get_parent,
+ .set_rate = kona_peri_clk_set_rate,
+};
+
+/* Put a peripheral clock into its initial state */
+static bool __peri_clk_init(struct kona_clk *bcm_clk)
+{
+ struct ccu_data *ccu = bcm_clk->ccu;
+ struct peri_clk_data *peri = bcm_clk->peri;
+ const char *name = bcm_clk->name;
+ struct bcm_clk_trig *trig;
+
+ BUG_ON(bcm_clk->type != bcm_clk_peri);
+
+ if (!gate_init(ccu, &peri->gate)) {
+ pr_err("%s: error initializing gate for %s\n", __func__, name);
+ return false;
+ }
+ if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
+ pr_err("%s: error initializing divider for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ /*
+ * For the pre-divider and selector, the pre-trigger is used
+ * if it's present, otherwise we just use the regular trigger.
+ */
+ trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
+ : &peri->trig;
+
+ if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
+ pr_err("%s: error initializing pre-divider for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
+ pr_err("%s: error initializing selector for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ return true;
+}
+
+static bool __kona_clk_init(struct kona_clk *bcm_clk)
+{
+ switch (bcm_clk->type) {
+ case bcm_clk_peri:
+ return __peri_clk_init(bcm_clk);
+ default:
+ BUG();
+ }
+ return -EINVAL;
+}
+
+/* Set a CCU and all its clocks into their desired initial state */
+bool __init kona_ccu_init(struct ccu_data *ccu)
+{
+ unsigned long flags;
+ unsigned int which;
+ struct clk **clks = ccu->data.clks;
+ bool success = true;
+
+ flags = ccu_lock(ccu);
+ __ccu_write_enable(ccu);
+
+ for (which = 0; which < ccu->data.clk_num; which++) {
+ struct kona_clk *bcm_clk;
+
+ if (!clks[which])
+ continue;
+ bcm_clk = to_kona_clk(__clk_get_hw(clks[which]));
+ success &= __kona_clk_init(bcm_clk);
+ }
+
+ __ccu_write_disable(ccu);
+ ccu_unlock(ccu, flags);
+ return success;
+}
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
new file mode 100644
index 000000000000..5e139adc3dc5
--- /dev/null
+++ b/drivers/clk/bcm/clk-kona.h
@@ -0,0 +1,410 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ * Copyright 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CLK_KONA_H
+#define _CLK_KONA_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/clk-provider.h>
+
+#define BILLION 1000000000
+
+/* The common clock framework uses u8 to represent a parent index */
+#define PARENT_COUNT_MAX ((u32)U8_MAX)
+
+#define BAD_CLK_INDEX U8_MAX /* Can't ever be valid */
+#define BAD_CLK_NAME ((const char *)-1)
+
+#define BAD_SCALED_DIV_VALUE U64_MAX
+
+/*
+ * Utility macros for object flag management. If possible, flags
+ * should be defined such that 0 is the desired default value.
+ */
+#define FLAG(type, flag) BCM_CLK_ ## type ## _FLAGS_ ## flag
+#define FLAG_SET(obj, type, flag) ((obj)->flags |= FLAG(type, flag))
+#define FLAG_CLEAR(obj, type, flag) ((obj)->flags &= ~(FLAG(type, flag)))
+#define FLAG_FLIP(obj, type, flag) ((obj)->flags ^= FLAG(type, flag))
+#define FLAG_TEST(obj, type, flag) (!!((obj)->flags & FLAG(type, flag)))
+
+/* Clock field state tests */
+
+#define gate_exists(gate) FLAG_TEST(gate, GATE, EXISTS)
+#define gate_is_enabled(gate) FLAG_TEST(gate, GATE, ENABLED)
+#define gate_is_hw_controllable(gate) FLAG_TEST(gate, GATE, HW)
+#define gate_is_sw_controllable(gate) FLAG_TEST(gate, GATE, SW)
+#define gate_is_sw_managed(gate) FLAG_TEST(gate, GATE, SW_MANAGED)
+#define gate_is_no_disable(gate) FLAG_TEST(gate, GATE, NO_DISABLE)
+
+#define gate_flip_enabled(gate) FLAG_FLIP(gate, GATE, ENABLED)
+
+#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS)
+#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED)
+#define divider_has_fraction(div) (!divider_is_fixed(div) && \
+ (div)->frac_width > 0)
+
+#define selector_exists(sel) ((sel)->width != 0)
+#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS)
+
+/* Clock type, used to tell common block what it's part of */
+enum bcm_clk_type {
+ bcm_clk_none, /* undefined clock type */
+ bcm_clk_bus,
+ bcm_clk_core,
+ bcm_clk_peri
+};
+
+/*
+ * Each CCU defines a mapped area of memory containing registers
+ * used to manage clocks implemented by the CCU. Access to memory
+ * within the CCU's space is serialized by a spinlock. Before any
+ * (other) address can be written, a special access "password" value
+ * must be written to its WR_ACCESS register (located at the base
+ * address of the range). We keep track of the name of each CCU as
+ * it is set up, and maintain them in a list.
+ */
+struct ccu_data {
+ void __iomem *base; /* base of mapped address space */
+ spinlock_t lock; /* serialization lock */
+ bool write_enabled; /* write access is currently enabled */
+ struct list_head links; /* for ccu_list */
+ struct device_node *node;
+ struct clk_onecell_data data;
+ const char *name;
+ u32 range; /* byte range of address space */
+};
+
+/*
+ * Gating control and status is managed by a 32-bit gate register.
+ *
+ * There are several types of gating available:
+ * - (no gate)
+ * A clock with no gate is assumed to be always enabled.
+ * - hardware-only gating (auto-gating)
+ * Enabling or disabling clocks with this type of gate is
+ * managed automatically by the hardware. Such clocks can be
+ * considered by the software to be enabled. The current status
+ * of auto-gated clocks can be read from the gate status bit.
+ * - software-only gating
+ * Auto-gating is not available for this type of clock.
+ * Instead, software manages whether it's enabled by setting or
+ * clearing the enable bit. The current gate status of a gate
+ * under software control can be read from the gate status bit.
+ * To ensure a change to the gating status is complete, the
+ * status bit can be polled to verify that the gate has entered
+ * the desired state.
+ * - selectable hardware or software gating
+ * Gating for this type of clock can be configured to be either
+ * under software or hardware control. Which type is in use is
+ * determined by the hw_sw_sel bit of the gate register.
+ */
+struct bcm_clk_gate {
+ u32 offset; /* gate register offset */
+ u32 status_bit; /* 0: gate is disabled; 0: gatge is enabled */
+ u32 en_bit; /* 0: disable; 1: enable */
+ u32 hw_sw_sel_bit; /* 0: hardware gating; 1: software gating */
+ u32 flags; /* BCM_CLK_GATE_FLAGS_* below */
+};
+
+/*
+ * Gate flags:
+ * HW means this gate can be auto-gated
+ * SW means the state of this gate can be software controlled
+ * NO_DISABLE means this gate is (only) enabled if under software control
+ * SW_MANAGED means the status of this gate is under software control
+ * ENABLED means this software-managed gate is *supposed* to be enabled
+ */
+#define BCM_CLK_GATE_FLAGS_EXISTS ((u32)1 << 0) /* Gate is valid */
+#define BCM_CLK_GATE_FLAGS_HW ((u32)1 << 1) /* Can auto-gate */
+#define BCM_CLK_GATE_FLAGS_SW ((u32)1 << 2) /* Software control */
+#define BCM_CLK_GATE_FLAGS_NO_DISABLE ((u32)1 << 3) /* HW or enabled */
+#define BCM_CLK_GATE_FLAGS_SW_MANAGED ((u32)1 << 4) /* SW now in control */
+#define BCM_CLK_GATE_FLAGS_ENABLED ((u32)1 << 5) /* If SW_MANAGED */
+
+/*
+ * Gate initialization macros.
+ *
+ * Any gate initially under software control will be enabled.
+ */
+
+/* A hardware/software gate initially under software control */
+#define HW_SW_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .en_bit = (_en_bit), \
+ .hw_sw_sel_bit = (_hw_sw_sel_bit), \
+ .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \
+ FLAG(GATE, SW_MANAGED)|FLAG(GATE, ENABLED)| \
+ FLAG(GATE, EXISTS), \
+ }
+
+/* A hardware/software gate initially under hardware control */
+#define HW_SW_GATE_AUTO(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .en_bit = (_en_bit), \
+ .hw_sw_sel_bit = (_hw_sw_sel_bit), \
+ .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \
+ FLAG(GATE, EXISTS), \
+ }
+
+/* A hardware-or-enabled gate (enabled if not under hardware control) */
+#define HW_ENABLE_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .en_bit = (_en_bit), \
+ .hw_sw_sel_bit = (_hw_sw_sel_bit), \
+ .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \
+ FLAG(GATE, NO_DISABLE)|FLAG(GATE, EXISTS), \
+ }
+
+/* A software-only gate */
+#define SW_ONLY_GATE(_offset, _status_bit, _en_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .en_bit = (_en_bit), \
+ .flags = FLAG(GATE, SW)|FLAG(GATE, SW_MANAGED)| \
+ FLAG(GATE, ENABLED)|FLAG(GATE, EXISTS), \
+ }
+
+/* A hardware-only gate */
+#define HW_ONLY_GATE(_offset, _status_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .flags = FLAG(GATE, HW)|FLAG(GATE, EXISTS), \
+ }
+
+/*
+ * Each clock can have zero, one, or two dividers which change the
+ * output rate of the clock. Each divider can be either fixed or
+ * variable. If there are two dividers, they are the "pre-divider"
+ * and the "regular" or "downstream" divider. If there is only one,
+ * there is no pre-divider.
+ *
+ * A fixed divider is any non-zero (positive) value, and it
+ * indicates how the input rate is affected by the divider.
+ *
+ * The value of a variable divider is maintained in a sub-field of a
+ * 32-bit divider register. The position of the field in the
+ * register is defined by its offset and width. The value recorded
+ * in this field is always 1 less than the value it represents.
+ *
+ * In addition, a variable divider can indicate that some subset
+ * of its bits represent a "fractional" part of the divider. Such
+ * bits comprise the low-order portion of the divider field, and can
+ * be viewed as representing the portion of the divider that lies to
+ * the right of the decimal point. Most variable dividers have zero
+ * fractional bits. Variable dividers with non-zero fraction width
+ * still record a value 1 less than the value they represent; the
+ * added 1 does *not* affect the low-order bit in this case, it
+ * affects the bits above the fractional part only. (Often in this
+ * code a divider field value is distinguished from the value it
+ * represents by referring to the latter as a "divisor".)
+ *
+ * In order to avoid dealing with fractions, divider arithmetic is
+ * performed using "scaled" values. A scaled value is one that's
+ * been left-shifted by the fractional width of a divider. Dividing
+ * a scaled value by a scaled divisor produces the desired quotient
+ * without loss of precision and without any other special handling
+ * for fractions.
+ *
+ * The recorded value of a variable divider can be modified. To
+ * modify either divider (or both), a clock must be enabled (i.e.,
+ * using its gate). In addition, a trigger register (described
+ * below) must be used to commit the change, and polled to verify
+ * the change is complete.
+ */
+struct bcm_clk_div {
+ union {
+ struct { /* variable divider */
+ u32 offset; /* divider register offset */
+ u32 shift; /* field shift */
+ u32 width; /* field width */
+ u32 frac_width; /* field fraction width */
+
+ u64 scaled_div; /* scaled divider value */
+ };
+ u32 fixed; /* non-zero fixed divider value */
+ };
+ u32 flags; /* BCM_CLK_DIV_FLAGS_* below */
+};
+
+/*
+ * Divider flags:
+ * EXISTS means this divider exists
+ * FIXED means it is a fixed-rate divider
+ */
+#define BCM_CLK_DIV_FLAGS_EXISTS ((u32)1 << 0) /* Divider is valid */
+#define BCM_CLK_DIV_FLAGS_FIXED ((u32)1 << 1) /* Fixed-value */
+
+/* Divider initialization macros */
+
+/* A fixed (non-zero) divider */
+#define FIXED_DIVIDER(_value) \
+ { \
+ .fixed = (_value), \
+ .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \
+ }
+
+/* A divider with an integral divisor */
+#define DIVIDER(_offset, _shift, _width) \
+ { \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .scaled_div = BAD_SCALED_DIV_VALUE, \
+ .flags = FLAG(DIV, EXISTS), \
+ }
+
+/* A divider whose divisor has an integer and fractional part */
+#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \
+ { \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .frac_width = (_frac_width), \
+ .scaled_div = BAD_SCALED_DIV_VALUE, \
+ .flags = FLAG(DIV, EXISTS), \
+ }
+
+/*
+ * Clocks may have multiple "parent" clocks. If there is more than
+ * one, a selector must be specified to define which of the parent
+ * clocks is currently in use. The selected clock is indicated in a
+ * sub-field of a 32-bit selector register. The range of
+ * representable selector values typically exceeds the number of
+ * available parent clocks. Occasionally the reset value of a
+ * selector field is explicitly set to a (specific) value that does
+ * not correspond to a defined input clock.
+ *
+ * We register all known parent clocks with the common clock code
+ * using a packed array (i.e., no empty slots) of (parent) clock
+ * names, and refer to them later using indexes into that array.
+ * We maintain an array of selector values indexed by common clock
+ * index values in order to map between these common clock indexes
+ * and the selector values used by the hardware.
+ *
+ * Like dividers, a selector can be modified, but to do so a clock
+ * must be enabled, and a trigger must be used to commit the change.
+ */
+struct bcm_clk_sel {
+ u32 offset; /* selector register offset */
+ u32 shift; /* field shift */
+ u32 width; /* field width */
+
+ u32 parent_count; /* number of entries in parent_sel[] */
+ u32 *parent_sel; /* array of parent selector values */
+ u8 clk_index; /* current selected index in parent_sel[] */
+};
+
+/* Selector initialization macro */
+#define SELECTOR(_offset, _shift, _width) \
+ { \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .clk_index = BAD_CLK_INDEX, \
+ }
+
+/*
+ * Making changes to a variable divider or a selector for a clock
+ * requires the use of a trigger. A trigger is defined by a single
+ * bit within a register. To signal a change, a 1 is written into
+ * that bit. To determine when the change has been completed, that
+ * trigger bit is polled; the read value will be 1 while the change
+ * is in progress, and 0 when it is complete.
+ *
+ * Occasionally a clock will have more than one trigger. In this
+ * case, the "pre-trigger" will be used when changing a clock's
+ * selector and/or its pre-divider.
+ */
+struct bcm_clk_trig {
+ u32 offset; /* trigger register offset */
+ u32 bit; /* trigger bit */
+ u32 flags; /* BCM_CLK_TRIG_FLAGS_* below */
+};
+
+/*
+ * Trigger flags:
+ * EXISTS means this trigger exists
+ */
+#define BCM_CLK_TRIG_FLAGS_EXISTS ((u32)1 << 0) /* Trigger is valid */
+
+/* Trigger initialization macro */
+#define TRIGGER(_offset, _bit) \
+ { \
+ .offset = (_offset), \
+ .bit = (_bit), \
+ .flags = FLAG(TRIG, EXISTS), \
+ }
+
+struct peri_clk_data {
+ struct bcm_clk_gate gate;
+ struct bcm_clk_trig pre_trig;
+ struct bcm_clk_div pre_div;
+ struct bcm_clk_trig trig;
+ struct bcm_clk_div div;
+ struct bcm_clk_sel sel;
+ const char *clocks[]; /* must be last; use CLOCKS() to declare */
+};
+#define CLOCKS(...) { __VA_ARGS__, NULL, }
+#define NO_CLOCKS { NULL, } /* Must use of no parent clocks */
+
+struct kona_clk {
+ struct clk_hw hw;
+ struct clk_init_data init_data;
+ const char *name; /* name of this clock */
+ struct ccu_data *ccu; /* ccu this clock is associated with */
+ enum bcm_clk_type type;
+ union {
+ void *data;
+ struct peri_clk_data *peri;
+ };
+};
+#define to_kona_clk(_hw) \
+ container_of(_hw, struct kona_clk, hw)
+
+/* Exported globals */
+
+extern struct clk_ops kona_peri_clk_ops;
+
+/* Help functions */
+
+#define PERI_CLK_SETUP(clks, ccu, id, name) \
+ clks[id] = kona_clk_setup(ccu, #name, bcm_clk_peri, &name ## _data)
+
+/* Externally visible functions */
+
+extern u64 do_div_round_closest(u64 dividend, unsigned long divisor);
+extern u64 scaled_div_max(struct bcm_clk_div *div);
+extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
+ u32 billionths);
+
+extern struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name,
+ enum bcm_clk_type type, void *data);
+extern void __init kona_dt_ccu_setup(struct device_node *node,
+ int (*ccu_clks_setup)(struct ccu_data *));
+extern bool __init kona_ccu_init(struct ccu_data *ccu);
+
+#endif /* _CLK_KONA_H */
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index 8137327847c3..1127ee46b802 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -17,23 +17,75 @@
#include <linux/module.h>
#include <linux/err.h>
-#define AXI_CLKGEN_REG_UPDATE_ENABLE 0x04
-#define AXI_CLKGEN_REG_CLK_OUT1 0x08
-#define AXI_CLKGEN_REG_CLK_OUT2 0x0c
-#define AXI_CLKGEN_REG_CLK_DIV 0x10
-#define AXI_CLKGEN_REG_CLK_FB1 0x14
-#define AXI_CLKGEN_REG_CLK_FB2 0x18
-#define AXI_CLKGEN_REG_LOCK1 0x1c
-#define AXI_CLKGEN_REG_LOCK2 0x20
-#define AXI_CLKGEN_REG_LOCK3 0x24
-#define AXI_CLKGEN_REG_FILTER1 0x28
-#define AXI_CLKGEN_REG_FILTER2 0x2c
+#define AXI_CLKGEN_V1_REG_UPDATE_ENABLE 0x04
+#define AXI_CLKGEN_V1_REG_CLK_OUT1 0x08
+#define AXI_CLKGEN_V1_REG_CLK_OUT2 0x0c
+#define AXI_CLKGEN_V1_REG_CLK_DIV 0x10
+#define AXI_CLKGEN_V1_REG_CLK_FB1 0x14
+#define AXI_CLKGEN_V1_REG_CLK_FB2 0x18
+#define AXI_CLKGEN_V1_REG_LOCK1 0x1c
+#define AXI_CLKGEN_V1_REG_LOCK2 0x20
+#define AXI_CLKGEN_V1_REG_LOCK3 0x24
+#define AXI_CLKGEN_V1_REG_FILTER1 0x28
+#define AXI_CLKGEN_V1_REG_FILTER2 0x2c
+
+#define AXI_CLKGEN_V2_REG_RESET 0x40
+#define AXI_CLKGEN_V2_REG_DRP_CNTRL 0x70
+#define AXI_CLKGEN_V2_REG_DRP_STATUS 0x74
+
+#define AXI_CLKGEN_V2_RESET_MMCM_ENABLE BIT(1)
+#define AXI_CLKGEN_V2_RESET_ENABLE BIT(0)
+
+#define AXI_CLKGEN_V2_DRP_CNTRL_SEL BIT(29)
+#define AXI_CLKGEN_V2_DRP_CNTRL_READ BIT(28)
+
+#define AXI_CLKGEN_V2_DRP_STATUS_BUSY BIT(16)
+
+#define MMCM_REG_CLKOUT0_1 0x08
+#define MMCM_REG_CLKOUT0_2 0x09
+#define MMCM_REG_CLK_FB1 0x14
+#define MMCM_REG_CLK_FB2 0x15
+#define MMCM_REG_CLK_DIV 0x16
+#define MMCM_REG_LOCK1 0x18
+#define MMCM_REG_LOCK2 0x19
+#define MMCM_REG_LOCK3 0x1a
+#define MMCM_REG_FILTER1 0x4e
+#define MMCM_REG_FILTER2 0x4f
+
+struct axi_clkgen;
+
+struct axi_clkgen_mmcm_ops {
+ void (*enable)(struct axi_clkgen *axi_clkgen, bool enable);
+ int (*write)(struct axi_clkgen *axi_clkgen, unsigned int reg,
+ unsigned int val, unsigned int mask);
+ int (*read)(struct axi_clkgen *axi_clkgen, unsigned int reg,
+ unsigned int *val);
+};
struct axi_clkgen {
void __iomem *base;
+ const struct axi_clkgen_mmcm_ops *mmcm_ops;
struct clk_hw clk_hw;
};
+static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen,
+ bool enable)
+{
+ axi_clkgen->mmcm_ops->enable(axi_clkgen, enable);
+}
+
+static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int val, unsigned int mask)
+{
+ return axi_clkgen->mmcm_ops->write(axi_clkgen, reg, val, mask);
+}
+
+static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int *val)
+{
+ return axi_clkgen->mmcm_ops->read(axi_clkgen, reg, val);
+}
+
static uint32_t axi_clkgen_lookup_filter(unsigned int m)
{
switch (m) {
@@ -156,6 +208,148 @@ static void axi_clkgen_read(struct axi_clkgen *axi_clkgen,
*val = readl(axi_clkgen->base + reg);
}
+static unsigned int axi_clkgen_v1_map_mmcm_reg(unsigned int reg)
+{
+ switch (reg) {
+ case MMCM_REG_CLKOUT0_1:
+ return AXI_CLKGEN_V1_REG_CLK_OUT1;
+ case MMCM_REG_CLKOUT0_2:
+ return AXI_CLKGEN_V1_REG_CLK_OUT2;
+ case MMCM_REG_CLK_FB1:
+ return AXI_CLKGEN_V1_REG_CLK_FB1;
+ case MMCM_REG_CLK_FB2:
+ return AXI_CLKGEN_V1_REG_CLK_FB2;
+ case MMCM_REG_CLK_DIV:
+ return AXI_CLKGEN_V1_REG_CLK_DIV;
+ case MMCM_REG_LOCK1:
+ return AXI_CLKGEN_V1_REG_LOCK1;
+ case MMCM_REG_LOCK2:
+ return AXI_CLKGEN_V1_REG_LOCK2;
+ case MMCM_REG_LOCK3:
+ return AXI_CLKGEN_V1_REG_LOCK3;
+ case MMCM_REG_FILTER1:
+ return AXI_CLKGEN_V1_REG_FILTER1;
+ case MMCM_REG_FILTER2:
+ return AXI_CLKGEN_V1_REG_FILTER2;
+ default:
+ return 0;
+ }
+}
+
+static int axi_clkgen_v1_mmcm_write(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int val, unsigned int mask)
+{
+ reg = axi_clkgen_v1_map_mmcm_reg(reg);
+ if (reg == 0)
+ return -EINVAL;
+
+ axi_clkgen_write(axi_clkgen, reg, val);
+
+ return 0;
+}
+
+static int axi_clkgen_v1_mmcm_read(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int *val)
+{
+ reg = axi_clkgen_v1_map_mmcm_reg(reg);
+ if (reg == 0)
+ return -EINVAL;
+
+ axi_clkgen_read(axi_clkgen, reg, val);
+
+ return 0;
+}
+
+static void axi_clkgen_v1_mmcm_enable(struct axi_clkgen *axi_clkgen,
+ bool enable)
+{
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V1_REG_UPDATE_ENABLE, enable);
+}
+
+static const struct axi_clkgen_mmcm_ops axi_clkgen_v1_mmcm_ops = {
+ .write = axi_clkgen_v1_mmcm_write,
+ .read = axi_clkgen_v1_mmcm_read,
+ .enable = axi_clkgen_v1_mmcm_enable,
+};
+
+static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen)
+{
+ unsigned int timeout = 10000;
+ unsigned int val;
+
+ do {
+ axi_clkgen_read(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_STATUS, &val);
+ } while ((val & AXI_CLKGEN_V2_DRP_STATUS_BUSY) && --timeout);
+
+ if (val & AXI_CLKGEN_V2_DRP_STATUS_BUSY)
+ return -EIO;
+
+ return val & 0xffff;
+}
+
+static int axi_clkgen_v2_mmcm_read(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int *val)
+{
+ unsigned int reg_val;
+ int ret;
+
+ ret = axi_clkgen_wait_non_busy(axi_clkgen);
+ if (ret < 0)
+ return ret;
+
+ reg_val = AXI_CLKGEN_V2_DRP_CNTRL_SEL | AXI_CLKGEN_V2_DRP_CNTRL_READ;
+ reg_val |= (reg << 16);
+
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_CNTRL, reg_val);
+
+ ret = axi_clkgen_wait_non_busy(axi_clkgen);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+}
+
+static int axi_clkgen_v2_mmcm_write(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int val, unsigned int mask)
+{
+ unsigned int reg_val = 0;
+ int ret;
+
+ ret = axi_clkgen_wait_non_busy(axi_clkgen);
+ if (ret < 0)
+ return ret;
+
+ if (mask != 0xffff) {
+ axi_clkgen_v2_mmcm_read(axi_clkgen, reg, &reg_val);
+ reg_val &= ~mask;
+ }
+
+ reg_val |= AXI_CLKGEN_V2_DRP_CNTRL_SEL | (reg << 16) | (val & mask);
+
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_CNTRL, reg_val);
+
+ return 0;
+}
+
+static void axi_clkgen_v2_mmcm_enable(struct axi_clkgen *axi_clkgen,
+ bool enable)
+{
+ unsigned int val = AXI_CLKGEN_V2_RESET_ENABLE;
+
+ if (enable)
+ val |= AXI_CLKGEN_V2_RESET_MMCM_ENABLE;
+
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_RESET, val);
+}
+
+static const struct axi_clkgen_mmcm_ops axi_clkgen_v2_mmcm_ops = {
+ .write = axi_clkgen_v2_mmcm_write,
+ .read = axi_clkgen_v2_mmcm_read,
+ .enable = axi_clkgen_v2_mmcm_enable,
+};
+
static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
{
return container_of(clk_hw, struct axi_clkgen, clk_hw);
@@ -184,33 +378,29 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
filter = axi_clkgen_lookup_filter(m - 1);
lock = axi_clkgen_lookup_lock(m - 1);
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_UPDATE_ENABLE, 0);
-
axi_clkgen_calc_clk_params(dout, &low, &high, &edge, &nocount);
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT1,
- (high << 6) | low);
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT2,
- (edge << 7) | (nocount << 6));
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLKOUT0_1,
+ (high << 6) | low, 0xefff);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLKOUT0_2,
+ (edge << 7) | (nocount << 6), 0x03ff);
axi_clkgen_calc_clk_params(d, &low, &high, &edge, &nocount);
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_DIV,
- (edge << 13) | (nocount << 12) | (high << 6) | low);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_DIV,
+ (edge << 13) | (nocount << 12) | (high << 6) | low, 0x3fff);
axi_clkgen_calc_clk_params(m, &low, &high, &edge, &nocount);
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_FB1,
- (high << 6) | low);
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_FB2,
- (edge << 7) | (nocount << 6));
-
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK1, lock & 0x3ff);
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK2,
- (((lock >> 16) & 0x1f) << 10) | 0x1);
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK3,
- (((lock >> 24) & 0x1f) << 10) | 0x3e9);
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_FILTER1, filter >> 16);
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_FILTER2, filter);
-
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_UPDATE_ENABLE, 1);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_FB1,
+ (high << 6) | low, 0xefff);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_FB2,
+ (edge << 7) | (nocount << 6), 0x03ff);
+
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK1, lock & 0x3ff, 0x3ff);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK2,
+ (((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK3,
+ (((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER1, filter >> 16, 0x9900);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER2, filter, 0x9900);
return 0;
}
@@ -236,11 +426,11 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
unsigned int reg;
unsigned long long tmp;
- axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT1, &reg);
+ axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, &reg);
dout = (reg & 0x3f) + ((reg >> 6) & 0x3f);
- axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_DIV, &reg);
+ axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, &reg);
d = (reg & 0x3f) + ((reg >> 6) & 0x3f);
- axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_FB1, &reg);
+ axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, &reg);
m = (reg & 0x3f) + ((reg >> 6) & 0x3f);
if (d == 0 || dout == 0)
@@ -255,14 +445,45 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
return tmp;
}
+static int axi_clkgen_enable(struct clk_hw *clk_hw)
+{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+
+ axi_clkgen_mmcm_enable(axi_clkgen, true);
+
+ return 0;
+}
+
+static void axi_clkgen_disable(struct clk_hw *clk_hw)
+{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+
+ axi_clkgen_mmcm_enable(axi_clkgen, false);
+}
+
static const struct clk_ops axi_clkgen_ops = {
.recalc_rate = axi_clkgen_recalc_rate,
.round_rate = axi_clkgen_round_rate,
.set_rate = axi_clkgen_set_rate,
+ .enable = axi_clkgen_enable,
+ .disable = axi_clkgen_disable,
};
+static const struct of_device_id axi_clkgen_ids[] = {
+ {
+ .compatible = "adi,axi-clkgen-1.00.a",
+ .data = &axi_clkgen_v1_mmcm_ops
+ }, {
+ .compatible = "adi,axi-clkgen-2.00.a",
+ .data = &axi_clkgen_v2_mmcm_ops,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, axi_clkgen_ids);
+
static int axi_clkgen_probe(struct platform_device *pdev)
{
+ const struct of_device_id *id;
struct axi_clkgen *axi_clkgen;
struct clk_init_data init;
const char *parent_name;
@@ -270,10 +491,19 @@ static int axi_clkgen_probe(struct platform_device *pdev)
struct resource *mem;
struct clk *clk;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ id = of_match_node(axi_clkgen_ids, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
axi_clkgen = devm_kzalloc(&pdev->dev, sizeof(*axi_clkgen), GFP_KERNEL);
if (!axi_clkgen)
return -ENOMEM;
+ axi_clkgen->mmcm_ops = id->data;
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(axi_clkgen->base))
@@ -289,10 +519,12 @@ static int axi_clkgen_probe(struct platform_device *pdev)
init.name = clk_name;
init.ops = &axi_clkgen_ops;
- init.flags = 0;
+ init.flags = CLK_SET_RATE_GATE;
init.parent_names = &parent_name;
init.num_parents = 1;
+ axi_clkgen_mmcm_enable(axi_clkgen, false);
+
axi_clkgen->clk_hw.init = &init;
clk = devm_clk_register(&pdev->dev, &axi_clkgen->clk_hw);
if (IS_ERR(clk))
@@ -309,12 +541,6 @@ static int axi_clkgen_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id axi_clkgen_ids[] = {
- { .compatible = "adi,axi-clkgen-1.00.a" },
- { },
-};
-MODULE_DEVICE_TABLE(of, axi_clkgen_ids);
-
static struct platform_driver axi_clkgen_driver = {
.driver = {
.name = "adi-axi-clkgen",
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 5543b7df8e16..ec22112e569f 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -24,7 +24,7 @@
* Traits of this clock:
* prepare - clk_prepare only ensures that parents are prepared
* enable - clk_enable only ensures that parents are enabled
- * rate - rate is adjustable. clk->rate = parent->rate / divisor
+ * rate - rate is adjustable. clk->rate = DIV_ROUND_UP(parent->rate / divisor)
* parent - fixed parent. No clk_set_parent support
*/
@@ -115,7 +115,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
- return parent_rate / div;
+ return DIV_ROUND_UP(parent_rate, div);
}
/*
@@ -185,7 +185,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
}
parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
MULT_ROUND_UP(rate, i));
- now = parent_rate / i;
+ now = DIV_ROUND_UP(parent_rate, i);
if (now <= rate && now > best) {
bestdiv = i;
best = now;
@@ -207,7 +207,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
int div;
div = clk_divider_bestdiv(hw, rate, prate);
- return *prate / div;
+ return DIV_ROUND_UP(*prate, div);
}
static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -218,7 +218,7 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags = 0;
u32 val;
- div = parent_rate / rate;
+ div = DIV_ROUND_UP(parent_rate, rate);
value = _get_val(divider, div);
if (value > div_mask(divider))
diff --git a/drivers/clk/clk-moxart.c b/drivers/clk/clk-moxart.c
new file mode 100644
index 000000000000..30a3b6999e10
--- /dev/null
+++ b/drivers/clk/clk-moxart.c
@@ -0,0 +1,97 @@
+/*
+ * MOXA ART SoCs clock driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/clkdev.h>
+
+void __init moxart_of_pll_clk_init(struct device_node *node)
+{
+ static void __iomem *base;
+ struct clk *clk, *ref_clk;
+ unsigned int mul;
+ const char *name = node->name;
+ const char *parent_name;
+
+ of_property_read_string(node, "clock-output-names", &name);
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s: of_iomap failed\n", node->full_name);
+ return;
+ }
+
+ mul = readl(base + 0x30) >> 3 & 0x3f;
+ iounmap(base);
+
+ ref_clk = of_clk_get(node, 0);
+ if (IS_ERR(ref_clk)) {
+ pr_err("%s: of_clk_get failed\n", node->full_name);
+ return;
+ }
+
+ clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mul, 1);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock\n", node->full_name);
+ return;
+ }
+
+ clk_register_clkdev(clk, NULL, name);
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+CLK_OF_DECLARE(moxart_pll_clock, "moxa,moxart-pll-clock",
+ moxart_of_pll_clk_init);
+
+void __init moxart_of_apb_clk_init(struct device_node *node)
+{
+ static void __iomem *base;
+ struct clk *clk, *pll_clk;
+ unsigned int div, val;
+ unsigned int div_idx[] = { 2, 3, 4, 6, 8};
+ const char *name = node->name;
+ const char *parent_name;
+
+ of_property_read_string(node, "clock-output-names", &name);
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s: of_iomap failed\n", node->full_name);
+ return;
+ }
+
+ val = readl(base + 0xc) >> 4 & 0x7;
+ iounmap(base);
+
+ if (val > 4)
+ val = 0;
+ div = div_idx[val] * 2;
+
+ pll_clk = of_clk_get(node, 0);
+ if (IS_ERR(pll_clk)) {
+ pr_err("%s: of_clk_get failed\n", node->full_name);
+ return;
+ }
+
+ clk = clk_register_fixed_factor(NULL, name, parent_name, 0, 1, div);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock\n", node->full_name);
+ return;
+ }
+
+ clk_register_clkdev(clk, NULL, name);
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+CLK_OF_DECLARE(moxart_apb_clock, "moxa,moxart-apb-clock",
+ moxart_of_apb_clk_init);
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
index c4f76ed914b0..8b284be4efa4 100644
--- a/drivers/clk/clk-ppc-corenet.c
+++ b/drivers/clk/clk-ppc-corenet.c
@@ -27,7 +27,6 @@ struct cmux_clk {
#define CLKSEL_ADJUST BIT(0)
#define to_cmux_clk(p) container_of(p, struct cmux_clk, hw)
-static void __iomem *base;
static unsigned int clocks_per_pll;
static int cmux_set_parent(struct clk_hw *hw, u8 idx)
@@ -100,7 +99,11 @@ static void __init core_mux_init(struct device_node *np)
pr_err("%s: could not allocate cmux_clk\n", __func__);
goto err_name;
}
- cmux_clk->reg = base + offset;
+ cmux_clk->reg = of_iomap(np, 0);
+ if (!cmux_clk->reg) {
+ pr_err("%s: could not map register\n", __func__);
+ goto err_clk;
+ }
node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
if (node && (offset >= 0x80))
@@ -143,38 +146,39 @@ err_name:
static void __init core_pll_init(struct device_node *np)
{
- u32 offset, mult;
+ u32 mult;
int i, rc, count;
const char *clk_name, *parent_name;
struct clk_onecell_data *onecell_data;
struct clk **subclks;
+ void __iomem *base;
- rc = of_property_read_u32(np, "reg", &offset);
- if (rc) {
- pr_err("%s: could not get reg property\n", np->name);
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("clk-ppc: iomap error\n");
return;
}
/* get the multiple of PLL */
- mult = ioread32be(base + offset);
+ mult = ioread32be(base);
/* check if this PLL is disabled */
if (mult & PLL_KILL) {
pr_debug("PLL:%s is disabled\n", np->name);
- return;
+ goto err_map;
}
mult = (mult >> 1) & 0x3f;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name) {
pr_err("PLL: %s must have a parent\n", np->name);
- return;
+ goto err_map;
}
count = of_property_count_strings(np, "clock-output-names");
if (count < 0 || count > 4) {
pr_err("%s: clock is not supported\n", np->name);
- return;
+ goto err_map;
}
/* output clock number per PLL */
@@ -183,7 +187,7 @@ static void __init core_pll_init(struct device_node *np)
subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL);
if (!subclks) {
pr_err("%s: could not allocate subclks\n", __func__);
- return;
+ goto err_map;
}
onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
@@ -230,30 +234,52 @@ static void __init core_pll_init(struct device_node *np)
goto err_cell;
}
+ iounmap(base);
return;
err_cell:
kfree(onecell_data);
err_clks:
kfree(subclks);
+err_map:
+ iounmap(base);
+}
+
+static void __init sysclk_init(struct device_node *node)
+{
+ struct clk *clk;
+ const char *clk_name = node->name;
+ struct device_node *np = of_get_parent(node);
+ u32 rate;
+
+ if (!np) {
+ pr_err("ppc-clk: could not get parent node\n");
+ return;
+ }
+
+ if (of_property_read_u32(np, "clock-frequency", &rate)) {
+ of_node_put(node);
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &clk_name);
+
+ clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
+ if (!IS_ERR(clk))
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
static const struct of_device_id clk_match[] __initconst = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- { .compatible = "fsl,core-pll-clock", .data = core_pll_init, },
- { .compatible = "fsl,core-mux-clock", .data = core_mux_init, },
+ { .compatible = "fsl,qoriq-sysclk-1.0", .data = sysclk_init, },
+ { .compatible = "fsl,qoriq-sysclk-2.0", .data = sysclk_init, },
+ { .compatible = "fsl,qoriq-core-pll-1.0", .data = core_pll_init, },
+ { .compatible = "fsl,qoriq-core-pll-2.0", .data = core_pll_init, },
+ { .compatible = "fsl,qoriq-core-mux-1.0", .data = core_mux_init, },
+ { .compatible = "fsl,qoriq-core-mux-2.0", .data = core_mux_init, },
{}
};
static int __init ppc_corenet_clk_probe(struct platform_device *pdev)
{
- struct device_node *np;
-
- np = pdev->dev.of_node;
- base = of_iomap(np, 0);
- if (!base) {
- dev_err(&pdev->dev, "iomap error\n");
- return -ENOMEM;
- }
of_clk_init(clk_match);
return 0;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 00a3abe103a5..f2f62a1bf61a 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -27,6 +27,7 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s5m8767.h>
#include <linux/mfd/samsung/core.h>
#define s2mps11_name(a) (a->hw.init->name)
@@ -48,6 +49,7 @@ struct s2mps11_clk {
struct clk_lookup *lookup;
u32 mask;
bool enabled;
+ unsigned int reg;
};
static struct s2mps11_clk *to_s2mps11_clk(struct clk_hw *hw)
@@ -61,7 +63,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw)
int ret;
ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
- S2MPS11_REG_RTC_CTRL,
+ s2mps11->reg,
s2mps11->mask, s2mps11->mask);
if (!ret)
s2mps11->enabled = true;
@@ -74,7 +76,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw)
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
int ret;
- ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL,
+ ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg,
s2mps11->mask, ~s2mps11->mask);
if (!ret)
@@ -130,9 +132,9 @@ static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev)
int i;
if (!iodev->dev->of_node)
- return NULL;
+ return ERR_PTR(-EINVAL);
- clk_np = of_find_node_by_name(iodev->dev->of_node, "clocks");
+ clk_np = of_get_child_by_name(iodev->dev->of_node, "clocks");
if (!clk_np) {
dev_err(&pdev->dev, "could not find clock sub-node\n");
return ERR_PTR(-EINVAL);
@@ -155,6 +157,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct s2mps11_clk *s2mps11_clks, *s2mps11_clk;
struct device_node *clk_np = NULL;
+ unsigned int s2mps11_reg;
int i, ret = 0;
u32 val;
@@ -169,13 +172,26 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
if (IS_ERR(clk_np))
return PTR_ERR(clk_np);
+ switch(platform_get_device_id(pdev)->driver_data) {
+ case S2MPS11X:
+ s2mps11_reg = S2MPS11_REG_RTC_CTRL;
+ break;
+ case S5M8767X:
+ s2mps11_reg = S5M8767_REG_CTRL1;
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device type\n");
+ return -EINVAL;
+ };
+
for (i = 0; i < S2MPS11_CLKS_NUM; i++, s2mps11_clk++) {
s2mps11_clk->iodev = iodev;
s2mps11_clk->hw.init = &s2mps11_clks_init[i];
s2mps11_clk->mask = 1 << i;
+ s2mps11_clk->reg = s2mps11_reg;
ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
- S2MPS11_REG_RTC_CTRL, &val);
+ s2mps11_clk->reg, &val);
if (ret < 0)
goto err_reg;
@@ -241,7 +257,8 @@ static int s2mps11_clk_remove(struct platform_device *pdev)
}
static const struct platform_device_id s2mps11_clk_id[] = {
- { "s2mps11-clk", 0},
+ { "s2mps11-clk", S2MPS11X},
+ { "s5m8767-clk", S5M8767X},
{ },
};
MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c42e608af6bb..dff0373f53c1 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -277,6 +277,10 @@ static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
if (!d)
goto err_out;
+ if (clk->ops->debug_init)
+ if (clk->ops->debug_init(clk->hw, clk->dentry))
+ goto err_out;
+
ret = 0;
goto out;
@@ -1339,8 +1343,11 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
if (clk->notifier_count)
ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
- if (ret & NOTIFY_STOP_MASK)
+ if (ret & NOTIFY_STOP_MASK) {
+ pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
+ __func__, clk->name, ret);
goto out;
+ }
hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_speculate_rates(child, new_rate);
@@ -1588,7 +1595,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
/* notify that we are about to change rates */
fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
if (fail_clk) {
- pr_warn("%s: failed to set %s rate\n", __func__,
+ pr_debug("%s: failed to set %s rate\n", __func__,
fail_clk->name);
clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
ret = -EBUSY;
@@ -2260,20 +2267,11 @@ void __clk_put(struct clk *clk)
* re-enter into the clk framework by calling any top-level clk APIs;
* this will cause a nested prepare_lock mutex.
*
- * Pre-change notifier callbacks will be passed the current, pre-change
- * rate of the clk via struct clk_notifier_data.old_rate. The new,
- * post-change rate of the clk is passed via struct
- * clk_notifier_data.new_rate.
- *
- * Post-change notifiers will pass the now-current, post-change rate of
- * the clk in both struct clk_notifier_data.old_rate and struct
+ * In all notification cases cases (pre, post and abort rate change) the
+ * original clock rate is passed to the callback via struct
+ * clk_notifier_data.old_rate and the new frequency is passed via struct
* clk_notifier_data.new_rate.
*
- * Abort-change notifiers are effectively the opposite of pre-change
- * notifiers: the original pre-change clk rate is passed in via struct
- * clk_notifier_data.new_rate and the failed post-change rate is passed
- * in via struct clk_notifier_data.old_rate.
- *
* clk_notifier_register() must be called from non-atomic context.
* Returns -EINVAL if called with null arguments, -ENOMEM upon
* allocation failure; otherwise, passes along the return value of
@@ -2473,7 +2471,7 @@ EXPORT_SYMBOL_GPL(of_clk_del_provider);
struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
struct of_clk_provider *provider;
- struct clk *clk = ERR_PTR(-ENOENT);
+ struct clk *clk = ERR_PTR(-EPROBE_DEFER);
/* Check if we have such a provider in our array */
list_for_each_entry(provider, &of_clk_providers, link) {
@@ -2506,8 +2504,12 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
const char *of_clk_get_parent_name(struct device_node *np, int index)
{
struct of_phandle_args clkspec;
+ struct property *prop;
const char *clk_name;
+ const __be32 *vp;
+ u32 pv;
int rc;
+ int count;
if (index < 0)
return NULL;
@@ -2517,8 +2519,22 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
if (rc)
return NULL;
+ index = clkspec.args_count ? clkspec.args[0] : 0;
+ count = 0;
+
+ /* if there is an indices property, use it to transfer the index
+ * specified into an array offset for the clock-output-names property.
+ */
+ of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
+ if (index == pv) {
+ index = count;
+ break;
+ }
+ count++;
+ }
+
if (of_property_read_string_index(clkspec.np, "clock-output-names",
- clkspec.args_count ? clkspec.args[0] : 0,
+ index,
&clk_name) < 0)
clk_name = clkspec.np->name;
@@ -2527,24 +2543,99 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
+struct clock_provider {
+ of_clk_init_cb_t clk_init_cb;
+ struct device_node *np;
+ struct list_head node;
+};
+
+static LIST_HEAD(clk_provider_list);
+
+/*
+ * This function looks for a parent clock. If there is one, then it
+ * checks that the provider for this parent clock was initialized, in
+ * this case the parent clock will be ready.
+ */
+static int parent_ready(struct device_node *np)
+{
+ int i = 0;
+
+ while (true) {
+ struct clk *clk = of_clk_get(np, i);
+
+ /* this parent is ready we can check the next one */
+ if (!IS_ERR(clk)) {
+ clk_put(clk);
+ i++;
+ continue;
+ }
+
+ /* at least one parent is not ready, we exit now */
+ if (PTR_ERR(clk) == -EPROBE_DEFER)
+ return 0;
+
+ /*
+ * Here we make assumption that the device tree is
+ * written correctly. So an error means that there is
+ * no more parent. As we didn't exit yet, then the
+ * previous parent are ready. If there is no clock
+ * parent, no need to wait for them, then we can
+ * consider their absence as being ready
+ */
+ return 1;
+ }
+}
+
/**
* of_clk_init() - Scan and init clock providers from the DT
* @matches: array of compatible values and init functions for providers.
*
- * This function scans the device tree for matching clock providers and
- * calls their initialization functions
+ * This function scans the device tree for matching clock providers
+ * and calls their initialization functions. It also does it by trying
+ * to follow the dependencies.
*/
void __init of_clk_init(const struct of_device_id *matches)
{
const struct of_device_id *match;
struct device_node *np;
+ struct clock_provider *clk_provider, *next;
+ bool is_init_done;
+ bool force = false;
if (!matches)
matches = &__clk_of_table;
+ /* First prepare the list of the clocks providers */
for_each_matching_node_and_match(np, matches, &match) {
- of_clk_init_cb_t clk_init_cb = match->data;
- clk_init_cb(np);
+ struct clock_provider *parent =
+ kzalloc(sizeof(struct clock_provider), GFP_KERNEL);
+
+ parent->clk_init_cb = match->data;
+ parent->np = np;
+ list_add_tail(&parent->node, &clk_provider_list);
+ }
+
+ while (!list_empty(&clk_provider_list)) {
+ is_init_done = false;
+ list_for_each_entry_safe(clk_provider, next,
+ &clk_provider_list, node) {
+ if (force || parent_ready(clk_provider->np)) {
+ clk_provider->clk_init_cb(clk_provider->np);
+ list_del(&clk_provider->node);
+ kfree(clk_provider);
+ is_init_done = true;
+ }
+ }
+
+ /*
+ * We didn't manage to initialize any of the
+ * remaining providers during the last loop, so now we
+ * initialize all the remaining ones unconditionally
+ * in case the clock parent was not mandatory
+ */
+ if (!is_init_done)
+ force = true;
+
}
}
#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 48f67218247c..a360b2eca5cb 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -167,6 +167,8 @@ struct clk *clk_get(struct device *dev, const char *con_id)
clk = of_clk_get_by_name(dev->of_node, con_id);
if (!IS_ERR(clk))
return clk;
+ if (PTR_ERR(clk) == -EPROBE_DEFER)
+ return clk;
}
return clk_get_sys(dev_id, con_id);
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index a049108341fc..40b33c6a8257 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -2,4 +2,7 @@
# Hisilicon Clock specific Makefile
#
-obj-y += clk.o clkgate-separated.o clk-hi3620.o
+obj-y += clk.o clkgate-separated.o
+
+obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o
+obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index f24ad6a3a797..339945d2503b 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -210,33 +210,297 @@ static struct hisi_gate_clock hi3620_seperated_gate_clks[] __initdata = {
static void __init hi3620_clk_init(struct device_node *np)
{
- void __iomem *base;
+ struct hisi_clock_data *clk_data;
- if (np) {
- base = of_iomap(np, 0);
- if (!base) {
- pr_err("failed to map Hi3620 clock registers\n");
- return;
- }
- } else {
- pr_err("failed to find Hi3620 clock node in DTS\n");
+ clk_data = hisi_clk_init(np, HI3620_NR_CLKS);
+ if (!clk_data)
return;
- }
-
- hisi_clk_init(np, HI3620_NR_CLKS);
hisi_clk_register_fixed_rate(hi3620_fixed_rate_clks,
ARRAY_SIZE(hi3620_fixed_rate_clks),
- base);
+ clk_data);
hisi_clk_register_fixed_factor(hi3620_fixed_factor_clks,
ARRAY_SIZE(hi3620_fixed_factor_clks),
- base);
+ clk_data);
hisi_clk_register_mux(hi3620_mux_clks, ARRAY_SIZE(hi3620_mux_clks),
- base);
+ clk_data);
hisi_clk_register_divider(hi3620_div_clks, ARRAY_SIZE(hi3620_div_clks),
- base);
+ clk_data);
hisi_clk_register_gate_sep(hi3620_seperated_gate_clks,
ARRAY_SIZE(hi3620_seperated_gate_clks),
- base);
+ clk_data);
}
CLK_OF_DECLARE(hi3620_clk, "hisilicon,hi3620-clock", hi3620_clk_init);
+
+struct hisi_mmc_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ u32 clken_reg;
+ u32 clken_bit;
+ u32 div_reg;
+ u32 div_off;
+ u32 div_bits;
+ u32 drv_reg;
+ u32 drv_off;
+ u32 drv_bits;
+ u32 sam_reg;
+ u32 sam_off;
+ u32 sam_bits;
+};
+
+struct clk_mmc {
+ struct clk_hw hw;
+ u32 id;
+ void __iomem *clken_reg;
+ u32 clken_bit;
+ void __iomem *div_reg;
+ u32 div_off;
+ u32 div_bits;
+ void __iomem *drv_reg;
+ u32 drv_off;
+ u32 drv_bits;
+ void __iomem *sam_reg;
+ u32 sam_off;
+ u32 sam_bits;
+};
+
+#define to_mmc(_hw) container_of(_hw, struct clk_mmc, hw)
+
+static struct hisi_mmc_clock hi3620_mmc_clks[] __initdata = {
+ { HI3620_SD_CIUCLK, "sd_bclk1", "sd_clk", CLK_SET_RATE_PARENT, 0x1f8, 0, 0x1f8, 1, 3, 0x1f8, 4, 4, 0x1f8, 8, 4},
+ { HI3620_MMC_CIUCLK1, "mmc_bclk1", "mmc_clk1", CLK_SET_RATE_PARENT, 0x1f8, 12, 0x1f8, 13, 3, 0x1f8, 16, 4, 0x1f8, 20, 4},
+ { HI3620_MMC_CIUCLK2, "mmc_bclk2", "mmc_clk2", CLK_SET_RATE_PARENT, 0x1f8, 24, 0x1f8, 25, 3, 0x1f8, 28, 4, 0x1fc, 0, 4},
+ { HI3620_MMC_CIUCLK3, "mmc_bclk3", "mmc_clk3", CLK_SET_RATE_PARENT, 0x1fc, 4, 0x1fc, 5, 3, 0x1fc, 8, 4, 0x1fc, 12, 4},
+};
+
+static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ switch (parent_rate) {
+ case 26000000:
+ return 13000000;
+ case 180000000:
+ return 25000000;
+ case 360000000:
+ return 50000000;
+ case 720000000:
+ return 100000000;
+ case 1440000000:
+ return 180000000;
+ default:
+ return parent_rate;
+ }
+}
+
+static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk **best_parent_p)
+{
+ struct clk_mmc *mclk = to_mmc(hw);
+ unsigned long best = 0;
+
+ if ((rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) {
+ rate = 13000000;
+ best = 26000000;
+ } else if (rate <= 26000000) {
+ rate = 25000000;
+ best = 180000000;
+ } else if (rate <= 52000000) {
+ rate = 50000000;
+ best = 360000000;
+ } else if (rate <= 100000000) {
+ rate = 100000000;
+ best = 720000000;
+ } else {
+ /* max is 180M */
+ rate = 180000000;
+ best = 1440000000;
+ }
+ *best_parent_rate = best;
+ return rate;
+}
+
+static u32 mmc_clk_delay(u32 val, u32 para, u32 off, u32 len)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++) {
+ if (para % 2)
+ val |= 1 << (off + i);
+ else
+ val &= ~(1 << (off + i));
+ para = para >> 1;
+ }
+
+ return val;
+}
+
+static int mmc_clk_set_timing(struct clk_hw *hw, unsigned long rate)
+{
+ struct clk_mmc *mclk = to_mmc(hw);
+ unsigned long flags;
+ u32 sam, drv, div, val;
+ static DEFINE_SPINLOCK(mmc_clk_lock);
+
+ switch (rate) {
+ case 13000000:
+ sam = 3;
+ drv = 1;
+ div = 1;
+ break;
+ case 25000000:
+ sam = 13;
+ drv = 6;
+ div = 6;
+ break;
+ case 50000000:
+ sam = 3;
+ drv = 6;
+ div = 6;
+ break;
+ case 100000000:
+ sam = 6;
+ drv = 4;
+ div = 6;
+ break;
+ case 180000000:
+ sam = 6;
+ drv = 4;
+ div = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&mmc_clk_lock, flags);
+
+ val = readl_relaxed(mclk->clken_reg);
+ val &= ~(1 << mclk->clken_bit);
+ writel_relaxed(val, mclk->clken_reg);
+
+ val = readl_relaxed(mclk->sam_reg);
+ val = mmc_clk_delay(val, sam, mclk->sam_off, mclk->sam_bits);
+ writel_relaxed(val, mclk->sam_reg);
+
+ val = readl_relaxed(mclk->drv_reg);
+ val = mmc_clk_delay(val, drv, mclk->drv_off, mclk->drv_bits);
+ writel_relaxed(val, mclk->drv_reg);
+
+ val = readl_relaxed(mclk->div_reg);
+ val = mmc_clk_delay(val, div, mclk->div_off, mclk->div_bits);
+ writel_relaxed(val, mclk->div_reg);
+
+ val = readl_relaxed(mclk->clken_reg);
+ val |= 1 << mclk->clken_bit;
+ writel_relaxed(val, mclk->clken_reg);
+
+ spin_unlock_irqrestore(&mmc_clk_lock, flags);
+
+ return 0;
+}
+
+static int mmc_clk_prepare(struct clk_hw *hw)
+{
+ struct clk_mmc *mclk = to_mmc(hw);
+ unsigned long rate;
+
+ if (mclk->id == HI3620_MMC_CIUCLK1)
+ rate = 13000000;
+ else
+ rate = 25000000;
+
+ return mmc_clk_set_timing(hw, rate);
+}
+
+static int mmc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return mmc_clk_set_timing(hw, rate);
+}
+
+static struct clk_ops clk_mmc_ops = {
+ .prepare = mmc_clk_prepare,
+ .determine_rate = mmc_clk_determine_rate,
+ .set_rate = mmc_clk_set_rate,
+ .recalc_rate = mmc_clk_recalc_rate,
+};
+
+static struct clk *hisi_register_clk_mmc(struct hisi_mmc_clock *mmc_clk,
+ void __iomem *base, struct device_node *np)
+{
+ struct clk_mmc *mclk;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ mclk = kzalloc(sizeof(*mclk), GFP_KERNEL);
+ if (!mclk) {
+ pr_err("%s: fail to allocate mmc clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = mmc_clk->name;
+ init.ops = &clk_mmc_ops;
+ init.flags = mmc_clk->flags | CLK_IS_BASIC;
+ init.parent_names = (mmc_clk->parent_name ? &mmc_clk->parent_name : NULL);
+ init.num_parents = (mmc_clk->parent_name ? 1 : 0);
+ mclk->hw.init = &init;
+
+ mclk->id = mmc_clk->id;
+ mclk->clken_reg = base + mmc_clk->clken_reg;
+ mclk->clken_bit = mmc_clk->clken_bit;
+ mclk->div_reg = base + mmc_clk->div_reg;
+ mclk->div_off = mmc_clk->div_off;
+ mclk->div_bits = mmc_clk->div_bits;
+ mclk->drv_reg = base + mmc_clk->drv_reg;
+ mclk->drv_off = mmc_clk->drv_off;
+ mclk->drv_bits = mmc_clk->drv_bits;
+ mclk->sam_reg = base + mmc_clk->sam_reg;
+ mclk->sam_off = mmc_clk->sam_off;
+ mclk->sam_bits = mmc_clk->sam_bits;
+
+ clk = clk_register(NULL, &mclk->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ kfree(mclk);
+ return clk;
+}
+
+static void __init hi3620_mmc_clk_init(struct device_node *node)
+{
+ void __iomem *base;
+ int i, num = ARRAY_SIZE(hi3620_mmc_clks);
+ struct clk_onecell_data *clk_data;
+
+ if (!node) {
+ pr_err("failed to find pctrl node in DTS\n");
+ return;
+ }
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("failed to map pctrl\n");
+ return;
+ }
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (WARN_ON(!clk_data))
+ return;
+
+ clk_data->clks = kzalloc(sizeof(struct clk *) * num, GFP_KERNEL);
+ if (!clk_data->clks) {
+ pr_err("%s: fail to allocate mmc clk\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < num; i++) {
+ struct hisi_mmc_clock *mmc_clk = &hi3620_mmc_clks[i];
+ clk_data->clks[mmc_clk->id] =
+ hisi_register_clk_mmc(mmc_clk, base, node);
+ }
+
+ clk_data->clk_num = num;
+ of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+CLK_OF_DECLARE(hi3620_mmc_clk, "hisilicon,hi3620-mmc-clock", hi3620_mmc_clk_init);
diff --git a/drivers/clk/hisilicon/clk-hip04.c b/drivers/clk/hisilicon/clk-hip04.c
new file mode 100644
index 000000000000..132b57a0ce09
--- /dev/null
+++ b/drivers/clk/hisilicon/clk-hip04.c
@@ -0,0 +1,58 @@
+/*
+ * Hisilicon HiP04 clock driver
+ *
+ * Copyright (c) 2013-2014 Hisilicon Limited.
+ * Copyright (c) 2013-2014 Linaro Limited.
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include <dt-bindings/clock/hip04-clock.h>
+
+#include "clk.h"
+
+/* fixed rate clocks */
+static struct hisi_fixed_rate_clock hip04_fixed_rate_clks[] __initdata = {
+ { HIP04_OSC50M, "osc50m", NULL, CLK_IS_ROOT, 50000000, },
+ { HIP04_CLK_50M, "clk50m", NULL, CLK_IS_ROOT, 50000000, },
+ { HIP04_CLK_168M, "clk168m", NULL, CLK_IS_ROOT, 168750000, },
+};
+
+static void __init hip04_clk_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+
+ clk_data = hisi_clk_init(np, HIP04_NR_CLKS);
+ if (!clk_data)
+ return;
+
+ hisi_clk_register_fixed_rate(hip04_fixed_rate_clks,
+ ARRAY_SIZE(hip04_fixed_rate_clks),
+ clk_data);
+}
+CLK_OF_DECLARE(hip04_clk, "hisilicon,hip04-clock", hip04_clk_init);
diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c
index a3a7152c92d9..276f672e7b1a 100644
--- a/drivers/clk/hisilicon/clk.c
+++ b/drivers/clk/hisilicon/clk.c
@@ -37,23 +37,49 @@
#include "clk.h"
static DEFINE_SPINLOCK(hisi_clk_lock);
-static struct clk **clk_table;
-static struct clk_onecell_data clk_data;
-void __init hisi_clk_init(struct device_node *np, int nr_clks)
+struct hisi_clock_data __init *hisi_clk_init(struct device_node *np,
+ int nr_clks)
{
+ struct hisi_clock_data *clk_data;
+ struct clk **clk_table;
+ void __iomem *base;
+
+ if (np) {
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("failed to map Hisilicon clock registers\n");
+ goto err;
+ }
+ } else {
+ pr_err("failed to find Hisilicon clock node in DTS\n");
+ goto err;
+ }
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data) {
+ pr_err("%s: could not allocate clock data\n", __func__);
+ goto err;
+ }
+ clk_data->base = base;
+
clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL);
if (!clk_table) {
pr_err("%s: could not allocate clock lookup table\n", __func__);
- return;
+ goto err_data;
}
- clk_data.clks = clk_table;
- clk_data.clk_num = nr_clks;
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ clk_data->clk_data.clks = clk_table;
+ clk_data->clk_data.clk_num = nr_clks;
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data->clk_data);
+ return clk_data;
+err_data:
+ kfree(clk_data);
+err:
+ return NULL;
}
void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *clks,
- int nums, void __iomem *base)
+ int nums, struct hisi_clock_data *data)
{
struct clk *clk;
int i;
@@ -68,11 +94,13 @@ void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *clks,
__func__, clks[i].name);
continue;
}
+ data->clk_data.clks[clks[i].id] = clk;
}
}
void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *clks,
- int nums, void __iomem *base)
+ int nums,
+ struct hisi_clock_data *data)
{
struct clk *clk;
int i;
@@ -87,13 +115,15 @@ void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *clks,
__func__, clks[i].name);
continue;
}
+ data->clk_data.clks[clks[i].id] = clk;
}
}
void __init hisi_clk_register_mux(struct hisi_mux_clock *clks,
- int nums, void __iomem *base)
+ int nums, struct hisi_clock_data *data)
{
struct clk *clk;
+ void __iomem *base = data->base;
int i;
for (i = 0; i < nums; i++) {
@@ -111,14 +141,15 @@ void __init hisi_clk_register_mux(struct hisi_mux_clock *clks,
if (clks[i].alias)
clk_register_clkdev(clk, clks[i].alias, NULL);
- clk_table[clks[i].id] = clk;
+ data->clk_data.clks[clks[i].id] = clk;
}
}
void __init hisi_clk_register_divider(struct hisi_divider_clock *clks,
- int nums, void __iomem *base)
+ int nums, struct hisi_clock_data *data)
{
struct clk *clk;
+ void __iomem *base = data->base;
int i;
for (i = 0; i < nums; i++) {
@@ -139,14 +170,15 @@ void __init hisi_clk_register_divider(struct hisi_divider_clock *clks,
if (clks[i].alias)
clk_register_clkdev(clk, clks[i].alias, NULL);
- clk_table[clks[i].id] = clk;
+ data->clk_data.clks[clks[i].id] = clk;
}
}
void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks,
- int nums, void __iomem *base)
+ int nums, struct hisi_clock_data *data)
{
struct clk *clk;
+ void __iomem *base = data->base;
int i;
for (i = 0; i < nums; i++) {
@@ -166,6 +198,6 @@ void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks,
if (clks[i].alias)
clk_register_clkdev(clk, clks[i].alias, NULL);
- clk_table[clks[i].id] = clk;
+ data->clk_data.clks[clks[i].id] = clk;
}
}
diff --git a/drivers/clk/hisilicon/clk.h b/drivers/clk/hisilicon/clk.h
index 4a6beebefb7a..43fa5da88f02 100644
--- a/drivers/clk/hisilicon/clk.h
+++ b/drivers/clk/hisilicon/clk.h
@@ -30,6 +30,11 @@
#include <linux/io.h>
#include <linux/spinlock.h>
+struct hisi_clock_data {
+ struct clk_onecell_data clk_data;
+ void __iomem *base;
+};
+
struct hisi_fixed_rate_clock {
unsigned int id;
char *name;
@@ -89,15 +94,15 @@ struct clk *hisi_register_clkgate_sep(struct device *, const char *,
void __iomem *, u8,
u8, spinlock_t *);
-void __init hisi_clk_init(struct device_node *, int);
+struct hisi_clock_data __init *hisi_clk_init(struct device_node *, int);
void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *,
- int, void __iomem *);
+ int, struct hisi_clock_data *);
void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *,
- int, void __iomem *);
+ int, struct hisi_clock_data *);
void __init hisi_clk_register_mux(struct hisi_mux_clock *, int,
- void __iomem *);
+ struct hisi_clock_data *);
void __init hisi_clk_register_divider(struct hisi_divider_clock *,
- int, void __iomem *);
+ int, struct hisi_clock_data *);
void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *,
- int, void __iomem *);
+ int, struct hisi_clock_data *);
#endif /* __HISI_CLK_H */
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 80c1dd15d15c..23a56f561812 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -40,15 +40,19 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
for (i = 0; i < factor->ftbl_cnt; i++) {
prev_rate = rate;
- rate = (((*prate / 10000) * factor->ftbl[i].num) /
- (factor->ftbl[i].den * factor->masks->factor)) * 10000;
+ rate = (((*prate / 10000) * factor->ftbl[i].den) /
+ (factor->ftbl[i].num * factor->masks->factor)) * 10000;
if (rate > drate)
break;
}
- if (i == 0)
+ if ((i == 0) || (i == factor->ftbl_cnt)) {
return rate;
- else
- return prev_rate;
+ } else {
+ if ((drate - prev_rate) > (rate - drate))
+ return rate;
+ else
+ return prev_rate;
+ }
}
static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
@@ -64,7 +68,7 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
num = (val >> masks->num_shift) & masks->num_mask;
/* calculate denominator */
- den = (val >> masks->den_shift) & masks->num_mask;
+ den = (val >> masks->den_shift) & masks->den_mask;
if (!den)
return 0;
@@ -85,8 +89,8 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
for (i = 0; i < factor->ftbl_cnt; i++) {
prev_rate = rate;
- rate = (((prate / 10000) * factor->ftbl[i].num) /
- (factor->ftbl[i].den * factor->masks->factor)) * 10000;
+ rate = (((prate / 10000) * factor->ftbl[i].den) /
+ (factor->ftbl[i].num * factor->masks->factor)) * 10000;
if (rate > drate)
break;
}
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index c339b829d3e3..693f7be129f1 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -13,6 +13,14 @@ config ARMADA_370_CLK
select MVEBU_CLK_CPU
select MVEBU_CLK_COREDIV
+config ARMADA_375_CLK
+ bool
+ select MVEBU_CLK_COMMON
+
+config ARMADA_38X_CLK
+ bool
+ select MVEBU_CLK_COMMON
+
config ARMADA_XP_CLK
bool
select MVEBU_CLK_COMMON
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
index 21bbfb4a9f42..4c66162fb0b4 100644
--- a/drivers/clk/mvebu/Makefile
+++ b/drivers/clk/mvebu/Makefile
@@ -3,6 +3,8 @@ obj-$(CONFIG_MVEBU_CLK_CPU) += clk-cpu.o
obj-$(CONFIG_MVEBU_CLK_COREDIV) += clk-corediv.o
obj-$(CONFIG_ARMADA_370_CLK) += armada-370.o
+obj-$(CONFIG_ARMADA_375_CLK) += armada-375.o
+obj-$(CONFIG_ARMADA_38X_CLK) += armada-38x.o
obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o
obj-$(CONFIG_DOVE_CLK) += dove.o
obj-$(CONFIG_KIRKWOOD_CLK) += kirkwood.o
diff --git a/drivers/clk/mvebu/armada-375.c b/drivers/clk/mvebu/armada-375.c
new file mode 100644
index 000000000000..c991a4d95e10
--- /dev/null
+++ b/drivers/clk/mvebu/armada-375.c
@@ -0,0 +1,184 @@
+/*
+ * Marvell Armada 375 SoC clocks
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Andrew Lunn <andrew@lunn.ch>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include "common.h"
+
+/*
+ * Core Clocks
+ */
+
+/*
+ * For the Armada 375 SoCs, the CPU, DDR and L2 clocks frequencies are
+ * all modified at the same time, and not separately as for the Armada
+ * 370 or the Armada XP SoCs.
+ *
+ * SAR0[21:17] : CPU frequency DDR frequency L2 frequency
+ * 6 = 400 MHz 400 MHz 200 MHz
+ * 15 = 600 MHz 600 MHz 300 MHz
+ * 21 = 800 MHz 534 MHz 400 MHz
+ * 25 = 1000 MHz 500 MHz 500 MHz
+ * others reserved.
+ *
+ * SAR0[22] : TCLK frequency
+ * 0 = 166 MHz
+ * 1 = 200 MHz
+ */
+
+#define SAR1_A375_TCLK_FREQ_OPT 22
+#define SAR1_A375_TCLK_FREQ_OPT_MASK 0x1
+#define SAR1_A375_CPU_DDR_L2_FREQ_OPT 17
+#define SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK 0x1F
+
+static const u32 armada_375_tclk_frequencies[] __initconst = {
+ 166000000,
+ 200000000,
+};
+
+static u32 __init armada_375_get_tclk_freq(void __iomem *sar)
+{
+ u8 tclk_freq_select;
+
+ tclk_freq_select = ((readl(sar) >> SAR1_A375_TCLK_FREQ_OPT) &
+ SAR1_A375_TCLK_FREQ_OPT_MASK);
+ return armada_375_tclk_frequencies[tclk_freq_select];
+}
+
+
+static const u32 armada_375_cpu_frequencies[] __initconst = {
+ 0, 0, 0, 0, 0, 0,
+ 400000000,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 600000000,
+ 0, 0, 0, 0, 0,
+ 800000000,
+ 0, 0, 0,
+ 1000000000,
+};
+
+static u32 __init armada_375_get_cpu_freq(void __iomem *sar)
+{
+ u8 cpu_freq_select;
+
+ cpu_freq_select = ((readl(sar) >> SAR1_A375_CPU_DDR_L2_FREQ_OPT) &
+ SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK);
+ if (cpu_freq_select >= ARRAY_SIZE(armada_375_cpu_frequencies)) {
+ pr_err("Selected CPU frequency (%d) unsupported\n",
+ cpu_freq_select);
+ return 0;
+ } else
+ return armada_375_cpu_frequencies[cpu_freq_select];
+}
+
+enum { A375_CPU_TO_DDR, A375_CPU_TO_L2 };
+
+static const struct coreclk_ratio armada_375_coreclk_ratios[] __initconst = {
+ { .id = A375_CPU_TO_L2, .name = "l2clk" },
+ { .id = A375_CPU_TO_DDR, .name = "ddrclk" },
+};
+
+static const int armada_375_cpu_l2_ratios[32][2] __initconst = {
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {1, 2}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {1, 2},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {1, 2}, {0, 1}, {0, 1},
+ {0, 1}, {1, 2}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static const int armada_375_cpu_ddr_ratios[32][2] __initconst = {
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {1, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {2, 3},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {2, 3}, {0, 1}, {0, 1},
+ {0, 1}, {1, 2}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static void __init armada_375_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ u32 opt = ((readl(sar) >> SAR1_A375_CPU_DDR_L2_FREQ_OPT) &
+ SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK);
+
+ switch (id) {
+ case A375_CPU_TO_L2:
+ *mult = armada_375_cpu_l2_ratios[opt][0];
+ *div = armada_375_cpu_l2_ratios[opt][1];
+ break;
+ case A375_CPU_TO_DDR:
+ *mult = armada_375_cpu_ddr_ratios[opt][0];
+ *div = armada_375_cpu_ddr_ratios[opt][1];
+ break;
+ }
+}
+
+static const struct coreclk_soc_desc armada_375_coreclks = {
+ .get_tclk_freq = armada_375_get_tclk_freq,
+ .get_cpu_freq = armada_375_get_cpu_freq,
+ .get_clk_ratio = armada_375_get_clk_ratio,
+ .ratios = armada_375_coreclk_ratios,
+ .num_ratios = ARRAY_SIZE(armada_375_coreclk_ratios),
+};
+
+static void __init armada_375_coreclk_init(struct device_node *np)
+{
+ mvebu_coreclk_setup(np, &armada_375_coreclks);
+}
+CLK_OF_DECLARE(armada_375_core_clk, "marvell,armada-375-core-clock",
+ armada_375_coreclk_init);
+
+/*
+ * Clock Gating Control
+ */
+static const struct clk_gating_soc_desc armada_375_gating_desc[] __initconst = {
+ { "mu", NULL, 2 },
+ { "pp", NULL, 3 },
+ { "ptp", NULL, 4 },
+ { "pex0", NULL, 5 },
+ { "pex1", NULL, 6 },
+ { "audio", NULL, 8 },
+ { "nd_clk", "nand", 11 },
+ { "sata0_link", "sata0_core", 14 },
+ { "sata0_core", NULL, 15 },
+ { "usb3", NULL, 16 },
+ { "sdio", NULL, 17 },
+ { "usb", NULL, 18 },
+ { "gop", NULL, 19 },
+ { "sata1_link", "sata1_core", 20 },
+ { "sata1_core", NULL, 21 },
+ { "xor0", NULL, 22 },
+ { "xor1", NULL, 23 },
+ { "copro", NULL, 24 },
+ { "tdm", NULL, 25 },
+ { "crypto0_enc", NULL, 28 },
+ { "crypto0_core", NULL, 29 },
+ { "crypto1_enc", NULL, 30 },
+ { "crypto1_core", NULL, 31 },
+ { }
+};
+
+static void __init armada_375_clk_gating_init(struct device_node *np)
+{
+ mvebu_clk_gating_setup(np, armada_375_gating_desc);
+}
+CLK_OF_DECLARE(armada_375_clk_gating, "marvell,armada-375-gating-clock",
+ armada_375_clk_gating_init);
diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
new file mode 100644
index 000000000000..8bccf4ecdab6
--- /dev/null
+++ b/drivers/clk/mvebu/armada-38x.c
@@ -0,0 +1,167 @@
+/*
+ * Marvell Armada 380/385 SoC clocks
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Andrew Lunn <andrew@lunn.ch>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include "common.h"
+
+/*
+ * SAR[14:10] : Ratios between PCLK0, NBCLK, HCLK and DRAM clocks
+ *
+ * SAR[15] : TCLK frequency
+ * 0 = 250 MHz
+ * 1 = 200 MHz
+ */
+
+#define SAR_A380_TCLK_FREQ_OPT 15
+#define SAR_A380_TCLK_FREQ_OPT_MASK 0x1
+#define SAR_A380_CPU_DDR_L2_FREQ_OPT 10
+#define SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK 0x1F
+
+static const u32 armada_38x_tclk_frequencies[] __initconst = {
+ 250000000,
+ 200000000,
+};
+
+static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
+{
+ u8 tclk_freq_select;
+
+ tclk_freq_select = ((readl(sar) >> SAR_A380_TCLK_FREQ_OPT) &
+ SAR_A380_TCLK_FREQ_OPT_MASK);
+ return armada_38x_tclk_frequencies[tclk_freq_select];
+}
+
+static const u32 armada_38x_cpu_frequencies[] __initconst = {
+ 0, 0, 0, 0,
+ 1066 * 1000 * 1000, 0, 0, 0,
+ 1332 * 1000 * 1000, 0, 0, 0,
+ 1600 * 1000 * 1000,
+};
+
+static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
+{
+ u8 cpu_freq_select;
+
+ cpu_freq_select = ((readl(sar) >> SAR_A380_CPU_DDR_L2_FREQ_OPT) &
+ SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK);
+ if (cpu_freq_select >= ARRAY_SIZE(armada_38x_cpu_frequencies)) {
+ pr_err("Selected CPU frequency (%d) unsupported\n",
+ cpu_freq_select);
+ return 0;
+ }
+
+ return armada_38x_cpu_frequencies[cpu_freq_select];
+}
+
+enum { A380_CPU_TO_DDR, A380_CPU_TO_L2 };
+
+static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
+ { .id = A380_CPU_TO_L2, .name = "l2clk" },
+ { .id = A380_CPU_TO_DDR, .name = "ddrclk" },
+};
+
+static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static void __init armada_38x_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ u32 opt = ((readl(sar) >> SAR_A380_CPU_DDR_L2_FREQ_OPT) &
+ SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK);
+
+ switch (id) {
+ case A380_CPU_TO_L2:
+ *mult = armada_38x_cpu_l2_ratios[opt][0];
+ *div = armada_38x_cpu_l2_ratios[opt][1];
+ break;
+ case A380_CPU_TO_DDR:
+ *mult = armada_38x_cpu_ddr_ratios[opt][0];
+ *div = armada_38x_cpu_ddr_ratios[opt][1];
+ break;
+ }
+}
+
+static const struct coreclk_soc_desc armada_38x_coreclks = {
+ .get_tclk_freq = armada_38x_get_tclk_freq,
+ .get_cpu_freq = armada_38x_get_cpu_freq,
+ .get_clk_ratio = armada_38x_get_clk_ratio,
+ .ratios = armada_38x_coreclk_ratios,
+ .num_ratios = ARRAY_SIZE(armada_38x_coreclk_ratios),
+};
+
+static void __init armada_38x_coreclk_init(struct device_node *np)
+{
+ mvebu_coreclk_setup(np, &armada_38x_coreclks);
+}
+CLK_OF_DECLARE(armada_38x_core_clk, "marvell,armada-380-core-clock",
+ armada_38x_coreclk_init);
+
+/*
+ * Clock Gating Control
+ */
+static const struct clk_gating_soc_desc armada_38x_gating_desc[] __initconst = {
+ { "audio", NULL, 0 },
+ { "ge2", NULL, 2 },
+ { "ge1", NULL, 3 },
+ { "ge0", NULL, 4 },
+ { "pex1", NULL, 5 },
+ { "pex2", NULL, 6 },
+ { "pex3", NULL, 7 },
+ { "pex0", NULL, 8 },
+ { "usb3h0", NULL, 9 },
+ { "usb3h1", NULL, 10 },
+ { "usb3d", NULL, 11 },
+ { "bm", NULL, 13 },
+ { "crypto0z", NULL, 14 },
+ { "sata0", NULL, 15 },
+ { "crypto1z", NULL, 16 },
+ { "sdio", NULL, 17 },
+ { "usb2", NULL, 18 },
+ { "crypto1", NULL, 21 },
+ { "xor0", NULL, 22 },
+ { "crypto0", NULL, 23 },
+ { "tdm", NULL, 25 },
+ { "xor1", NULL, 28 },
+ { "sata1", NULL, 30 },
+ { }
+};
+
+static void __init armada_38x_clk_gating_init(struct device_node *np)
+{
+ mvebu_clk_gating_setup(np, armada_38x_gating_desc);
+}
+CLK_OF_DECLARE(armada_38x_clk_gating, "marvell,armada-380-gating-clock",
+ armada_38x_clk_gating_init);
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
index 7162615bcdcd..d1e5863d3375 100644
--- a/drivers/clk/mvebu/clk-corediv.c
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -18,26 +18,56 @@
#include "common.h"
#define CORE_CLK_DIV_RATIO_MASK 0xff
-#define CORE_CLK_DIV_RATIO_RELOAD BIT(8)
-#define CORE_CLK_DIV_ENABLE_OFFSET 24
-#define CORE_CLK_DIV_RATIO_OFFSET 0x8
+/*
+ * This structure describes the hardware details (bit offset and mask)
+ * to configure one particular core divider clock. Those hardware
+ * details may differ from one SoC to another. This structure is
+ * therefore typically instantiated statically to describe the
+ * hardware details.
+ */
struct clk_corediv_desc {
unsigned int mask;
unsigned int offset;
unsigned int fieldbit;
};
+/*
+ * This structure describes the hardware details to configure the core
+ * divider clocks on a given SoC. Amongst others, it points to the
+ * array of core divider clock descriptors for this SoC, as well as
+ * the corresponding operations to manipulate them.
+ */
+struct clk_corediv_soc_desc {
+ const struct clk_corediv_desc *descs;
+ unsigned int ndescs;
+ const struct clk_ops ops;
+ u32 ratio_reload;
+ u32 enable_bit_offset;
+ u32 ratio_offset;
+};
+
+/*
+ * This structure represents one core divider clock for the clock
+ * framework, and is dynamically allocated for each core divider clock
+ * existing in the current SoC.
+ */
struct clk_corediv {
struct clk_hw hw;
void __iomem *reg;
- struct clk_corediv_desc desc;
+ const struct clk_corediv_desc *desc;
+ const struct clk_corediv_soc_desc *soc_desc;
spinlock_t lock;
};
static struct clk_onecell_data clk_data;
-static const struct clk_corediv_desc mvebu_corediv_desc[] __initconst = {
+/*
+ * Description of the core divider clocks available. For now, we
+ * support only NAND, and it is available at the same register
+ * locations regardless of the SoC.
+ */
+static const struct clk_corediv_desc mvebu_corediv_desc[] = {
{ .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */
};
@@ -46,8 +76,9 @@ static const struct clk_corediv_desc mvebu_corediv_desc[] __initconst = {
static int clk_corediv_is_enabled(struct clk_hw *hwclk)
{
struct clk_corediv *corediv = to_corediv_clk(hwclk);
- struct clk_corediv_desc *desc = &corediv->desc;
- u32 enable_mask = BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET;
+ const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
+ const struct clk_corediv_desc *desc = corediv->desc;
+ u32 enable_mask = BIT(desc->fieldbit) << soc_desc->enable_bit_offset;
return !!(readl(corediv->reg) & enable_mask);
}
@@ -55,14 +86,15 @@ static int clk_corediv_is_enabled(struct clk_hw *hwclk)
static int clk_corediv_enable(struct clk_hw *hwclk)
{
struct clk_corediv *corediv = to_corediv_clk(hwclk);
- struct clk_corediv_desc *desc = &corediv->desc;
+ const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
+ const struct clk_corediv_desc *desc = corediv->desc;
unsigned long flags = 0;
u32 reg;
spin_lock_irqsave(&corediv->lock, flags);
reg = readl(corediv->reg);
- reg |= (BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET);
+ reg |= (BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
writel(reg, corediv->reg);
spin_unlock_irqrestore(&corediv->lock, flags);
@@ -73,14 +105,15 @@ static int clk_corediv_enable(struct clk_hw *hwclk)
static void clk_corediv_disable(struct clk_hw *hwclk)
{
struct clk_corediv *corediv = to_corediv_clk(hwclk);
- struct clk_corediv_desc *desc = &corediv->desc;
+ const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
+ const struct clk_corediv_desc *desc = corediv->desc;
unsigned long flags = 0;
u32 reg;
spin_lock_irqsave(&corediv->lock, flags);
reg = readl(corediv->reg);
- reg &= ~(BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET);
+ reg &= ~(BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
writel(reg, corediv->reg);
spin_unlock_irqrestore(&corediv->lock, flags);
@@ -90,10 +123,11 @@ static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct clk_corediv *corediv = to_corediv_clk(hwclk);
- struct clk_corediv_desc *desc = &corediv->desc;
+ const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
+ const struct clk_corediv_desc *desc = corediv->desc;
u32 reg, div;
- reg = readl(corediv->reg + CORE_CLK_DIV_RATIO_OFFSET);
+ reg = readl(corediv->reg + soc_desc->ratio_offset);
div = (reg >> desc->offset) & desc->mask;
return parent_rate / div;
}
@@ -117,7 +151,8 @@ static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
unsigned long parent_rate)
{
struct clk_corediv *corediv = to_corediv_clk(hwclk);
- struct clk_corediv_desc *desc = &corediv->desc;
+ const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
+ const struct clk_corediv_desc *desc = corediv->desc;
unsigned long flags = 0;
u32 reg, div;
@@ -126,17 +161,17 @@ static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
spin_lock_irqsave(&corediv->lock, flags);
/* Write new divider to the divider ratio register */
- reg = readl(corediv->reg + CORE_CLK_DIV_RATIO_OFFSET);
+ reg = readl(corediv->reg + soc_desc->ratio_offset);
reg &= ~(desc->mask << desc->offset);
reg |= (div & desc->mask) << desc->offset;
- writel(reg, corediv->reg + CORE_CLK_DIV_RATIO_OFFSET);
+ writel(reg, corediv->reg + soc_desc->ratio_offset);
/* Set reload-force for this clock */
reg = readl(corediv->reg) | BIT(desc->fieldbit);
writel(reg, corediv->reg);
/* Now trigger the clock update */
- reg = readl(corediv->reg) | CORE_CLK_DIV_RATIO_RELOAD;
+ reg = readl(corediv->reg) | soc_desc->ratio_reload;
writel(reg, corediv->reg);
/*
@@ -144,7 +179,7 @@ static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
* ratios request and the reload request.
*/
udelay(1000);
- reg &= ~(CORE_CLK_DIV_RATIO_MASK | CORE_CLK_DIV_RATIO_RELOAD);
+ reg &= ~(CORE_CLK_DIV_RATIO_MASK | soc_desc->ratio_reload);
writel(reg, corediv->reg);
udelay(1000);
@@ -153,16 +188,53 @@ static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
return 0;
}
-static const struct clk_ops corediv_ops = {
- .enable = clk_corediv_enable,
- .disable = clk_corediv_disable,
- .is_enabled = clk_corediv_is_enabled,
- .recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
- .set_rate = clk_corediv_set_rate,
+static const struct clk_corediv_soc_desc armada370_corediv_soc = {
+ .descs = mvebu_corediv_desc,
+ .ndescs = ARRAY_SIZE(mvebu_corediv_desc),
+ .ops = {
+ .enable = clk_corediv_enable,
+ .disable = clk_corediv_disable,
+ .is_enabled = clk_corediv_is_enabled,
+ .recalc_rate = clk_corediv_recalc_rate,
+ .round_rate = clk_corediv_round_rate,
+ .set_rate = clk_corediv_set_rate,
+ },
+ .ratio_reload = BIT(8),
+ .enable_bit_offset = 24,
+ .ratio_offset = 0x8,
+};
+
+static const struct clk_corediv_soc_desc armada380_corediv_soc = {
+ .descs = mvebu_corediv_desc,
+ .ndescs = ARRAY_SIZE(mvebu_corediv_desc),
+ .ops = {
+ .enable = clk_corediv_enable,
+ .disable = clk_corediv_disable,
+ .is_enabled = clk_corediv_is_enabled,
+ .recalc_rate = clk_corediv_recalc_rate,
+ .round_rate = clk_corediv_round_rate,
+ .set_rate = clk_corediv_set_rate,
+ },
+ .ratio_reload = BIT(8),
+ .enable_bit_offset = 16,
+ .ratio_offset = 0x4,
};
-static void __init mvebu_corediv_clk_init(struct device_node *node)
+static const struct clk_corediv_soc_desc armada375_corediv_soc = {
+ .descs = mvebu_corediv_desc,
+ .ndescs = ARRAY_SIZE(mvebu_corediv_desc),
+ .ops = {
+ .recalc_rate = clk_corediv_recalc_rate,
+ .round_rate = clk_corediv_round_rate,
+ .set_rate = clk_corediv_set_rate,
+ },
+ .ratio_reload = BIT(8),
+ .ratio_offset = 0x4,
+};
+
+static void __init
+mvebu_corediv_clk_init(struct device_node *node,
+ const struct clk_corediv_soc_desc *soc_desc)
{
struct clk_init_data init;
struct clk_corediv *corediv;
@@ -178,7 +250,7 @@ static void __init mvebu_corediv_clk_init(struct device_node *node)
parent_name = of_clk_get_parent_name(node, 0);
- clk_data.clk_num = ARRAY_SIZE(mvebu_corediv_desc);
+ clk_data.clk_num = soc_desc->ndescs;
/* clks holds the clock array */
clks = kcalloc(clk_data.clk_num, sizeof(struct clk *),
@@ -199,10 +271,11 @@ static void __init mvebu_corediv_clk_init(struct device_node *node)
init.num_parents = 1;
init.parent_names = &parent_name;
init.name = clk_name;
- init.ops = &corediv_ops;
+ init.ops = &soc_desc->ops;
init.flags = 0;
- corediv[i].desc = mvebu_corediv_desc[i];
+ corediv[i].soc_desc = soc_desc;
+ corediv[i].desc = soc_desc->descs + i;
corediv[i].reg = base;
corediv[i].hw.init = &init;
@@ -219,5 +292,24 @@ err_free_clks:
err_unmap:
iounmap(base);
}
-CLK_OF_DECLARE(mvebu_corediv_clk, "marvell,armada-370-corediv-clock",
- mvebu_corediv_clk_init);
+
+static void __init armada370_corediv_clk_init(struct device_node *node)
+{
+ return mvebu_corediv_clk_init(node, &armada370_corediv_soc);
+}
+CLK_OF_DECLARE(armada370_corediv_clk, "marvell,armada-370-corediv-clock",
+ armada370_corediv_clk_init);
+
+static void __init armada375_corediv_clk_init(struct device_node *node)
+{
+ return mvebu_corediv_clk_init(node, &armada375_corediv_soc);
+}
+CLK_OF_DECLARE(armada375_corediv_clk, "marvell,armada-375-corediv-clock",
+ armada375_corediv_clk_init);
+
+static void __init armada380_corediv_clk_init(struct device_node *node)
+{
+ return mvebu_corediv_clk_init(node, &armada380_corediv_soc);
+}
+CLK_OF_DECLARE(armada380_corediv_clk, "marvell,armada-380-corediv-clock",
+ armada380_corediv_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 884187fbfe00..13eae14c2cc2 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -17,7 +17,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <dt-bindings/clk/exynos-audss-clk.h>
+#include <dt-bindings/clock/exynos-audss-clk.h>
enum exynos_audss_clk_type {
TYPE_EXYNOS4210,
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 010f071af883..b4f967210175 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -16,6 +16,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
#include "clk.h"
@@ -130,6 +131,17 @@ enum exynos4_plls {
nr_plls /* number of PLLs */
};
+static void __iomem *reg_base;
+static enum exynos4_soc exynos4_soc;
+
+/*
+ * Support for CMU save/restore across system suspends
+ */
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *exynos4_save_common;
+static struct samsung_clk_reg_dump *exynos4_save_soc;
+static struct samsung_clk_reg_dump *exynos4_save_pll;
+
/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
@@ -154,6 +166,17 @@ static unsigned long exynos4x12_clk_save[] __initdata = {
E4X12_MPLL_CON0,
};
+static unsigned long exynos4_clk_pll_regs[] __initdata = {
+ EPLL_LOCK,
+ VPLL_LOCK,
+ EPLL_CON0,
+ EPLL_CON1,
+ EPLL_CON2,
+ VPLL_CON0,
+ VPLL_CON1,
+ VPLL_CON2,
+};
+
static unsigned long exynos4_clk_regs[] __initdata = {
SRC_LEFTBUS,
DIV_LEFTBUS,
@@ -161,12 +184,6 @@ static unsigned long exynos4_clk_regs[] __initdata = {
SRC_RIGHTBUS,
DIV_RIGHTBUS,
GATE_IP_RIGHTBUS,
- EPLL_CON0,
- EPLL_CON1,
- EPLL_CON2,
- VPLL_CON0,
- VPLL_CON1,
- VPLL_CON2,
SRC_TOP0,
SRC_TOP1,
SRC_CAM,
@@ -227,6 +244,124 @@ static unsigned long exynos4_clk_regs[] __initdata = {
GATE_IP_CPU,
};
+static const struct samsung_clk_reg_dump src_mask_suspend[] = {
+ { .offset = SRC_MASK_TOP, .value = 0x00000001, },
+ { .offset = SRC_MASK_CAM, .value = 0x11111111, },
+ { .offset = SRC_MASK_TV, .value = 0x00000111, },
+ { .offset = SRC_MASK_LCD0, .value = 0x00001111, },
+ { .offset = SRC_MASK_MAUDIO, .value = 0x00000001, },
+ { .offset = SRC_MASK_FSYS, .value = 0x01011111, },
+ { .offset = SRC_MASK_PERIL0, .value = 0x01111111, },
+ { .offset = SRC_MASK_PERIL1, .value = 0x01110111, },
+ { .offset = SRC_MASK_DMC, .value = 0x00010000, },
+};
+
+static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = {
+ { .offset = E4210_SRC_MASK_LCD1, .value = 0x00001111, },
+};
+
+#define PLL_ENABLED (1 << 31)
+#define PLL_LOCKED (1 << 29)
+
+static void exynos4_clk_wait_for_pll(u32 reg)
+{
+ u32 pll_con;
+
+ pll_con = readl(reg_base + reg);
+ if (!(pll_con & PLL_ENABLED))
+ return;
+
+ while (!(pll_con & PLL_LOCKED)) {
+ cpu_relax();
+ pll_con = readl(reg_base + reg);
+ }
+}
+
+static int exynos4_clk_suspend(void)
+{
+ samsung_clk_save(reg_base, exynos4_save_common,
+ ARRAY_SIZE(exynos4_clk_regs));
+ samsung_clk_save(reg_base, exynos4_save_pll,
+ ARRAY_SIZE(exynos4_clk_pll_regs));
+
+ if (exynos4_soc == EXYNOS4210) {
+ samsung_clk_save(reg_base, exynos4_save_soc,
+ ARRAY_SIZE(exynos4210_clk_save));
+ samsung_clk_restore(reg_base, src_mask_suspend_e4210,
+ ARRAY_SIZE(src_mask_suspend_e4210));
+ } else {
+ samsung_clk_save(reg_base, exynos4_save_soc,
+ ARRAY_SIZE(exynos4x12_clk_save));
+ }
+
+ samsung_clk_restore(reg_base, src_mask_suspend,
+ ARRAY_SIZE(src_mask_suspend));
+
+ return 0;
+}
+
+static void exynos4_clk_resume(void)
+{
+ samsung_clk_restore(reg_base, exynos4_save_pll,
+ ARRAY_SIZE(exynos4_clk_pll_regs));
+
+ exynos4_clk_wait_for_pll(EPLL_CON0);
+ exynos4_clk_wait_for_pll(VPLL_CON0);
+
+ samsung_clk_restore(reg_base, exynos4_save_common,
+ ARRAY_SIZE(exynos4_clk_regs));
+
+ if (exynos4_soc == EXYNOS4210)
+ samsung_clk_restore(reg_base, exynos4_save_soc,
+ ARRAY_SIZE(exynos4210_clk_save));
+ else
+ samsung_clk_restore(reg_base, exynos4_save_soc,
+ ARRAY_SIZE(exynos4x12_clk_save));
+}
+
+static struct syscore_ops exynos4_clk_syscore_ops = {
+ .suspend = exynos4_clk_suspend,
+ .resume = exynos4_clk_resume,
+};
+
+static void exynos4_clk_sleep_init(void)
+{
+ exynos4_save_common = samsung_clk_alloc_reg_dump(exynos4_clk_regs,
+ ARRAY_SIZE(exynos4_clk_regs));
+ if (!exynos4_save_common)
+ goto err_warn;
+
+ if (exynos4_soc == EXYNOS4210)
+ exynos4_save_soc = samsung_clk_alloc_reg_dump(
+ exynos4210_clk_save,
+ ARRAY_SIZE(exynos4210_clk_save));
+ else
+ exynos4_save_soc = samsung_clk_alloc_reg_dump(
+ exynos4x12_clk_save,
+ ARRAY_SIZE(exynos4x12_clk_save));
+ if (!exynos4_save_soc)
+ goto err_common;
+
+ exynos4_save_pll = samsung_clk_alloc_reg_dump(exynos4_clk_pll_regs,
+ ARRAY_SIZE(exynos4_clk_pll_regs));
+ if (!exynos4_save_pll)
+ goto err_soc;
+
+ register_syscore_ops(&exynos4_clk_syscore_ops);
+ return;
+
+err_soc:
+ kfree(exynos4_save_soc);
+err_common:
+ kfree(exynos4_save_common);
+err_warn:
+ pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
+ __func__);
+}
+#else
+static void exynos4_clk_sleep_init(void) {}
+#endif
+
/* list of all parent clock list */
PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", };
@@ -908,12 +1043,13 @@ static unsigned long exynos4_get_xom(void)
return xom;
}
-static void __init exynos4_clk_register_finpll(unsigned long xom)
+static void __init exynos4_clk_register_finpll(void)
{
struct samsung_fixed_rate_clock fclk;
struct clk *clk;
unsigned long finpll_f = 24000000;
char *parent_name;
+ unsigned int xom = exynos4_get_xom();
parent_name = xom & 1 ? "xusbxti" : "xxti";
clk = clk_get(NULL, parent_name);
@@ -1038,27 +1174,21 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
/* register exynos4 clocks */
static void __init exynos4_clk_init(struct device_node *np,
- enum exynos4_soc exynos4_soc,
- void __iomem *reg_base, unsigned long xom)
+ enum exynos4_soc soc)
{
+ exynos4_soc = soc;
+
reg_base = of_iomap(np, 0);
if (!reg_base)
panic("%s: failed to map registers\n", __func__);
- if (exynos4_soc == EXYNOS4210)
- samsung_clk_init(np, reg_base, CLK_NR_CLKS,
- exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs),
- exynos4210_clk_save, ARRAY_SIZE(exynos4210_clk_save));
- else
- samsung_clk_init(np, reg_base, CLK_NR_CLKS,
- exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs),
- exynos4x12_clk_save, ARRAY_SIZE(exynos4x12_clk_save));
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS);
samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks,
ARRAY_SIZE(exynos4_fixed_rate_ext_clks),
ext_clk_match);
- exynos4_clk_register_finpll(xom);
+ exynos4_clk_register_finpll();
if (exynos4_soc == EXYNOS4210) {
samsung_clk_register_mux(exynos4210_mux_early,
@@ -1125,6 +1255,8 @@ static void __init exynos4_clk_init(struct device_node *np,
samsung_clk_register_alias(exynos4_aliases,
ARRAY_SIZE(exynos4_aliases));
+ exynos4_clk_sleep_init();
+
pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n"
"\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n",
exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12",
@@ -1136,12 +1268,12 @@ static void __init exynos4_clk_init(struct device_node *np,
static void __init exynos4210_clk_init(struct device_node *np)
{
- exynos4_clk_init(np, EXYNOS4210, NULL, exynos4_get_xom());
+ exynos4_clk_init(np, EXYNOS4210);
}
CLK_OF_DECLARE(exynos4210_clk, "samsung,exynos4210-clock", exynos4210_clk_init);
static void __init exynos4412_clk_init(struct device_node *np)
{
- exynos4_clk_init(np, EXYNOS4X12, NULL, exynos4_get_xom());
+ exynos4_clk_init(np, EXYNOS4X12);
}
CLK_OF_DECLARE(exynos4412_clk, "samsung,exynos4412-clock", exynos4412_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index ff4beebe1f0b..e7ee4420da81 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -16,6 +16,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
#include "clk.h"
@@ -85,6 +86,11 @@ enum exynos5250_plls {
nr_plls /* number of PLLs */
};
+static void __iomem *reg_base;
+
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *exynos5250_save;
+
/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
@@ -137,6 +143,41 @@ static unsigned long exynos5250_clk_regs[] __initdata = {
GATE_IP_ACP,
};
+static int exynos5250_clk_suspend(void)
+{
+ samsung_clk_save(reg_base, exynos5250_save,
+ ARRAY_SIZE(exynos5250_clk_regs));
+
+ return 0;
+}
+
+static void exynos5250_clk_resume(void)
+{
+ samsung_clk_restore(reg_base, exynos5250_save,
+ ARRAY_SIZE(exynos5250_clk_regs));
+}
+
+static struct syscore_ops exynos5250_clk_syscore_ops = {
+ .suspend = exynos5250_clk_suspend,
+ .resume = exynos5250_clk_resume,
+};
+
+static void exynos5250_clk_sleep_init(void)
+{
+ exynos5250_save = samsung_clk_alloc_reg_dump(exynos5250_clk_regs,
+ ARRAY_SIZE(exynos5250_clk_regs));
+ if (!exynos5250_save) {
+ pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
+ __func__);
+ return;
+ }
+
+ register_syscore_ops(&exynos5250_clk_syscore_ops);
+}
+#else
+static void exynos5250_clk_sleep_init(void) {}
+#endif
+
/* list of all parent clock list */
PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", };
@@ -645,8 +686,6 @@ static struct of_device_id ext_clk_match[] __initdata = {
/* register exynox5250 clocks */
static void __init exynos5250_clk_init(struct device_node *np)
{
- void __iomem *reg_base;
-
if (np) {
reg_base = of_iomap(np, 0);
if (!reg_base)
@@ -655,9 +694,7 @@ static void __init exynos5250_clk_init(struct device_node *np)
panic("%s: unable to determine soc\n", __func__);
}
- samsung_clk_init(np, reg_base, CLK_NR_CLKS,
- exynos5250_clk_regs, ARRAY_SIZE(exynos5250_clk_regs),
- NULL, 0);
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS);
samsung_clk_of_register_fixed_ext(exynos5250_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5250_fixed_rate_ext_clks),
ext_clk_match);
@@ -685,6 +722,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_register_gate(exynos5250_gate_clks,
ARRAY_SIZE(exynos5250_gate_clks));
+ exynos5250_clk_sleep_init();
+
pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
_get_rate("div_arm2"));
}
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index ab4f2f7d88ef..60b26819bed5 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -16,6 +16,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
#include "clk.h"
@@ -108,6 +109,11 @@ enum exynos5420_plls {
nr_plls /* number of PLLs */
};
+static void __iomem *reg_base;
+
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *exynos5420_save;
+
/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
@@ -174,6 +180,41 @@ static unsigned long exynos5420_clk_regs[] __initdata = {
DIV_KFC0,
};
+static int exynos5420_clk_suspend(void)
+{
+ samsung_clk_save(reg_base, exynos5420_save,
+ ARRAY_SIZE(exynos5420_clk_regs));
+
+ return 0;
+}
+
+static void exynos5420_clk_resume(void)
+{
+ samsung_clk_restore(reg_base, exynos5420_save,
+ ARRAY_SIZE(exynos5420_clk_regs));
+}
+
+static struct syscore_ops exynos5420_clk_syscore_ops = {
+ .suspend = exynos5420_clk_suspend,
+ .resume = exynos5420_clk_resume,
+};
+
+static void exynos5420_clk_sleep_init(void)
+{
+ exynos5420_save = samsung_clk_alloc_reg_dump(exynos5420_clk_regs,
+ ARRAY_SIZE(exynos5420_clk_regs));
+ if (!exynos5420_save) {
+ pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
+ __func__);
+ return;
+ }
+
+ register_syscore_ops(&exynos5420_clk_syscore_ops);
+}
+#else
+static void exynos5420_clk_sleep_init(void) {}
+#endif
+
/* list of all parent clocks */
PNAME(mspll_cpu_p) = { "sclk_cpll", "sclk_dpll",
"sclk_mpll", "sclk_spll" };
@@ -737,8 +778,6 @@ static struct of_device_id ext_clk_match[] __initdata = {
/* register exynos5420 clocks */
static void __init exynos5420_clk_init(struct device_node *np)
{
- void __iomem *reg_base;
-
if (np) {
reg_base = of_iomap(np, 0);
if (!reg_base)
@@ -747,9 +786,7 @@ static void __init exynos5420_clk_init(struct device_node *np)
panic("%s: unable to determine soc\n", __func__);
}
- samsung_clk_init(np, reg_base, CLK_NR_CLKS,
- exynos5420_clk_regs, ARRAY_SIZE(exynos5420_clk_regs),
- NULL, 0);
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS);
samsung_clk_of_register_fixed_ext(exynos5420_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5420_fixed_rate_ext_clks),
ext_clk_match);
@@ -765,5 +802,7 @@ static void __init exynos5420_clk_init(struct device_node *np)
ARRAY_SIZE(exynos5420_div_clks));
samsung_clk_register_gate(exynos5420_gate_clks,
ARRAY_SIZE(exynos5420_gate_clks));
+
+ exynos5420_clk_sleep_init();
}
CLK_OF_DECLARE(exynos5420_clk, "samsung,exynos5420-clock", exynos5420_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index cbc15b56891d..2bfad5a993d0 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -101,7 +101,7 @@ static void __init exynos5440_clk_init(struct device_node *np)
return;
}
- samsung_clk_init(np, reg_base, CLK_NR_CLKS, NULL, 0, NULL, 0);
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS);
samsung_clk_of_register_fixed_ext(exynos5440_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match);
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index 8e27aee6887e..8bda658137a8 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
#include <dt-bindings/clock/samsung,s3c64xx-clock.h>
@@ -61,6 +62,13 @@ enum s3c64xx_plls {
apll, mpll, epll,
};
+static void __iomem *reg_base;
+static bool is_s3c6400;
+
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *s3c64xx_save_common;
+static struct samsung_clk_reg_dump *s3c64xx_save_soc;
+
/*
* List of controller registers to be saved and restored during
* a suspend/resume cycle.
@@ -87,6 +95,60 @@ static unsigned long s3c6410_clk_regs[] __initdata = {
MEM0_GATE,
};
+static int s3c64xx_clk_suspend(void)
+{
+ samsung_clk_save(reg_base, s3c64xx_save_common,
+ ARRAY_SIZE(s3c64xx_clk_regs));
+
+ if (!is_s3c6400)
+ samsung_clk_save(reg_base, s3c64xx_save_soc,
+ ARRAY_SIZE(s3c6410_clk_regs));
+
+ return 0;
+}
+
+static void s3c64xx_clk_resume(void)
+{
+ samsung_clk_restore(reg_base, s3c64xx_save_common,
+ ARRAY_SIZE(s3c64xx_clk_regs));
+
+ if (!is_s3c6400)
+ samsung_clk_restore(reg_base, s3c64xx_save_soc,
+ ARRAY_SIZE(s3c6410_clk_regs));
+}
+
+static struct syscore_ops s3c64xx_clk_syscore_ops = {
+ .suspend = s3c64xx_clk_suspend,
+ .resume = s3c64xx_clk_resume,
+};
+
+static void s3c64xx_clk_sleep_init(void)
+{
+ s3c64xx_save_common = samsung_clk_alloc_reg_dump(s3c64xx_clk_regs,
+ ARRAY_SIZE(s3c64xx_clk_regs));
+ if (!s3c64xx_save_common)
+ goto err_warn;
+
+ if (!is_s3c6400) {
+ s3c64xx_save_soc = samsung_clk_alloc_reg_dump(s3c6410_clk_regs,
+ ARRAY_SIZE(s3c6410_clk_regs));
+ if (!s3c64xx_save_soc)
+ goto err_soc;
+ }
+
+ register_syscore_ops(&s3c64xx_clk_syscore_ops);
+ return;
+
+err_soc:
+ kfree(s3c64xx_save_common);
+err_warn:
+ pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
+ __func__);
+}
+#else
+static void s3c64xx_clk_sleep_init(void) {}
+#endif
+
/* List of parent clocks common for all S3C64xx SoCs. */
PNAME(spi_mmc_p) = { "mout_epll", "dout_mpll", "fin_pll", "clk27m" };
PNAME(uart_p) = { "mout_epll", "dout_mpll" };
@@ -391,11 +453,11 @@ static void __init s3c64xx_clk_register_fixed_ext(unsigned long fin_pll_f,
/* Register s3c64xx clocks. */
void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
- unsigned long xusbxti_f, bool is_s3c6400,
- void __iomem *reg_base)
+ unsigned long xusbxti_f, bool s3c6400,
+ void __iomem *base)
{
- unsigned long *soc_regs = NULL;
- unsigned long nr_soc_regs = 0;
+ reg_base = base;
+ is_s3c6400 = s3c6400;
if (np) {
reg_base = of_iomap(np, 0);
@@ -403,13 +465,7 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
panic("%s: failed to map registers\n", __func__);
}
- if (!is_s3c6400) {
- soc_regs = s3c6410_clk_regs;
- nr_soc_regs = ARRAY_SIZE(s3c6410_clk_regs);
- }
-
- samsung_clk_init(np, reg_base, NR_CLKS, s3c64xx_clk_regs,
- ARRAY_SIZE(s3c64xx_clk_regs), soc_regs, nr_soc_regs);
+ samsung_clk_init(np, reg_base, NR_CLKS);
/* Register external clocks. */
if (!np)
@@ -452,6 +508,7 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
samsung_clk_register_alias(s3c64xx_clock_aliases,
ARRAY_SIZE(s3c64xx_clock_aliases));
+ s3c64xx_clk_sleep_init();
pr_info("%s clocks: apll = %lu, mpll = %lu\n"
"\tepll = %lu, arm_clk = %lu\n",
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index f503f32e2f80..91bec3ebdc8f 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -21,64 +21,45 @@ static void __iomem *reg_base;
static struct clk_onecell_data clk_data;
#endif
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *reg_dump;
-static unsigned long nr_reg_dump;
-
-static int samsung_clk_suspend(void)
+void samsung_clk_save(void __iomem *base,
+ struct samsung_clk_reg_dump *rd,
+ unsigned int num_regs)
{
- struct samsung_clk_reg_dump *rd = reg_dump;
- unsigned long i;
-
- for (i = 0; i < nr_reg_dump; i++, rd++)
- rd->value = __raw_readl(reg_base + rd->offset);
+ for (; num_regs > 0; --num_regs, ++rd)
+ rd->value = readl(base + rd->offset);
+}
- return 0;
+void samsung_clk_restore(void __iomem *base,
+ const struct samsung_clk_reg_dump *rd,
+ unsigned int num_regs)
+{
+ for (; num_regs > 0; --num_regs, ++rd)
+ writel(rd->value, base + rd->offset);
}
-static void samsung_clk_resume(void)
+struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump(
+ const unsigned long *rdump,
+ unsigned long nr_rdump)
{
- struct samsung_clk_reg_dump *rd = reg_dump;
- unsigned long i;
+ struct samsung_clk_reg_dump *rd;
+ unsigned int i;
- for (i = 0; i < nr_reg_dump; i++, rd++)
- __raw_writel(rd->value, reg_base + rd->offset);
-}
+ rd = kcalloc(nr_rdump, sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return NULL;
+
+ for (i = 0; i < nr_rdump; ++i)
+ rd[i].offset = rdump[i];
-static struct syscore_ops samsung_clk_syscore_ops = {
- .suspend = samsung_clk_suspend,
- .resume = samsung_clk_resume,
-};
-#endif /* CONFIG_PM_SLEEP */
+ return rd;
+}
/* setup the essentials required to support clock lookup using ccf */
void __init samsung_clk_init(struct device_node *np, void __iomem *base,
- unsigned long nr_clks, unsigned long *rdump,
- unsigned long nr_rdump, unsigned long *soc_rdump,
- unsigned long nr_soc_rdump)
+ unsigned long nr_clks)
{
reg_base = base;
-#ifdef CONFIG_PM_SLEEP
- if (rdump && nr_rdump) {
- unsigned int idx;
- reg_dump = kzalloc(sizeof(struct samsung_clk_reg_dump)
- * (nr_rdump + nr_soc_rdump), GFP_KERNEL);
- if (!reg_dump) {
- pr_err("%s: memory alloc for register dump failed\n",
- __func__);
- return;
- }
-
- for (idx = 0; idx < nr_rdump; idx++)
- reg_dump[idx].offset = rdump[idx];
- for (idx = 0; idx < nr_soc_rdump; idx++)
- reg_dump[nr_rdump + idx].offset = soc_rdump[idx];
- nr_reg_dump = nr_rdump + nr_soc_rdump;
- register_syscore_ops(&samsung_clk_syscore_ops);
- }
-#endif
-
clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL);
if (!clk_table)
panic("could not allocate clock lookup table\n");
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 31b4174e7a5b..c7141ba826e0 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -313,9 +313,7 @@ struct samsung_pll_clock {
_lock, _con, _rtable, _alias)
extern void __init samsung_clk_init(struct device_node *np, void __iomem *base,
- unsigned long nr_clks, unsigned long *rdump,
- unsigned long nr_rdump, unsigned long *soc_rdump,
- unsigned long nr_soc_rdump);
+ unsigned long nr_clks);
extern void __init samsung_clk_of_register_fixed_ext(
struct samsung_fixed_rate_clock *fixed_rate_clk,
unsigned int nr_fixed_rate_clk,
@@ -340,4 +338,14 @@ extern void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list,
extern unsigned long _get_rate(const char *clk_name);
+extern void samsung_clk_save(void __iomem *base,
+ struct samsung_clk_reg_dump *rd,
+ unsigned int num_regs);
+extern void samsung_clk_restore(void __iomem *base,
+ const struct samsung_clk_reg_dump *rd,
+ unsigned int num_regs);
+extern struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump(
+ const unsigned long *rdump,
+ unsigned long nr_rdump);
+
#endif /* __SAMSUNG_CLK_H */
diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile
index 9ecef140dba7..5404cb931ebf 100644
--- a/drivers/clk/shmobile/Makefile
+++ b/drivers/clk/shmobile/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o
+obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o
obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o
obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o
obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-div6.o
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c
index aac4756ec52e..f065f694cb65 100644
--- a/drivers/clk/shmobile/clk-div6.c
+++ b/drivers/clk/shmobile/clk-div6.c
@@ -23,7 +23,7 @@
#define CPG_DIV6_DIV_MASK 0x3f
/**
- * struct div6_clock - MSTP gating clock
+ * struct div6_clock - CPG 6 bit divider clock
* @hw: handle between common and hardware-specific interfaces
* @reg: IO-remapped register
* @div: divisor value (1-64)
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c
index 42d5912b1d25..2e5810c88d11 100644
--- a/drivers/clk/shmobile/clk-mstp.c
+++ b/drivers/clk/shmobile/clk-mstp.c
@@ -137,7 +137,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name,
init.name = name;
init.ops = &cpg_mstp_clock_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
init.parent_names = &parent_name;
init.num_parents = 1;
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
index 99c27b1c625b..dff7f79a19b9 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -242,22 +242,22 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
parent_name = "main";
mult = config->pll3_mult;
} else if (!strcmp(name, "lb")) {
- parent_name = "pll1_div2";
+ parent_name = "pll1";
div = cpg_mode & BIT(18) ? 36 : 24;
} else if (!strcmp(name, "qspi")) {
parent_name = "pll1_div2";
div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2)
? 8 : 10;
} else if (!strcmp(name, "sdh")) {
- parent_name = "pll1_div2";
+ parent_name = "pll1";
table = cpg_sdh_div_table;
shift = 8;
} else if (!strcmp(name, "sd0")) {
- parent_name = "pll1_div2";
+ parent_name = "pll1";
table = cpg_sd01_div_table;
shift = 4;
} else if (!strcmp(name, "sd1")) {
- parent_name = "pll1_div2";
+ parent_name = "pll1";
table = cpg_sd01_div_table;
shift = 0;
} else if (!strcmp(name, "z")) {
diff --git a/drivers/clk/shmobile/clk-rz.c b/drivers/clk/shmobile/clk-rz.c
new file mode 100644
index 000000000000..7e68e8630962
--- /dev/null
+++ b/drivers/clk/shmobile/clk-rz.c
@@ -0,0 +1,103 @@
+/*
+ * rz Core CPG Clocks
+ *
+ * Copyright (C) 2013 Ideas On Board SPRL
+ * Copyright (C) 2014 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+struct rz_cpg {
+ struct clk_onecell_data data;
+ void __iomem *reg;
+};
+
+#define CPG_FRQCR 0x10
+#define CPG_FRQCR2 0x14
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+static struct clk * __init
+rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *name)
+{
+ u32 val;
+ unsigned mult;
+ static const unsigned frqcr_tab[4] = { 3, 2, 0, 1 };
+
+ if (strcmp(name, "pll") == 0) {
+ /* FIXME: cpg_mode should be read from GPIO. But no GPIO support yet */
+ unsigned cpg_mode = 0; /* hardcoded to EXTAL for now */
+ const char *parent_name = of_clk_get_parent_name(np, cpg_mode);
+
+ mult = cpg_mode ? (32 / 4) : 30;
+
+ return clk_register_fixed_factor(NULL, name, parent_name, 0, mult, 1);
+ }
+
+ /* If mapping regs failed, skip non-pll clocks. System will boot anyhow */
+ if (!cpg->reg)
+ return ERR_PTR(-ENXIO);
+
+ /* FIXME:"i" and "g" are variable clocks with non-integer dividers (e.g. 2/3)
+ * and the constraint that always g <= i. To get the rz platform started,
+ * let them run at fixed current speed and implement the details later.
+ */
+ if (strcmp(name, "i") == 0)
+ val = (clk_readl(cpg->reg + CPG_FRQCR) >> 8) & 3;
+ else if (strcmp(name, "g") == 0)
+ val = clk_readl(cpg->reg + CPG_FRQCR2) & 3;
+ else
+ return ERR_PTR(-EINVAL);
+
+ mult = frqcr_tab[val];
+ return clk_register_fixed_factor(NULL, name, "pll", 0, mult, 3);
+}
+
+static void __init rz_cpg_clocks_init(struct device_node *np)
+{
+ struct rz_cpg *cpg;
+ struct clk **clks;
+ unsigned i;
+ int num_clks;
+
+ num_clks = of_property_count_strings(np, "clock-output-names");
+ if (WARN(num_clks <= 0, "can't count CPG clocks\n"))
+ return;
+
+ cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ clks = kzalloc(num_clks * sizeof(*clks), GFP_KERNEL);
+ BUG_ON(!cpg || !clks);
+
+ cpg->data.clks = clks;
+ cpg->data.clk_num = num_clks;
+
+ cpg->reg = of_iomap(np, 0);
+
+ for (i = 0; i < num_clks; ++i) {
+ const char *name;
+ struct clk *clk;
+
+ of_property_read_string_index(np, "clock-output-names", i, &name);
+
+ clk = rz_cpg_register_clock(np, cpg, name);
+ if (IS_ERR(clk))
+ pr_err("%s: failed to register %s %s clock (%ld)\n",
+ __func__, np->name, name, PTR_ERR(clk));
+ else
+ cpg->data.clks[i] = clk;
+ }
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+}
+CLK_OF_DECLARE(rz_cpg_clks, "renesas,rz-cpg-clocks", rz_cpg_clocks_init);
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
index f9f4a15a64ab..d63b76ca60c3 100644
--- a/drivers/clk/sirf/clk-atlas6.c
+++ b/drivers/clk/sirf/clk-atlas6.c
@@ -1,7 +1,8 @@
/*
* Clock tree for CSR SiRFatlasVI
*
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
*
* Licensed under GPLv2 or later.
*/
diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
index 7dde6a82f514..37af51c5f213 100644
--- a/drivers/clk/sirf/clk-common.c
+++ b/drivers/clk/sirf/clk-common.c
@@ -1,7 +1,8 @@
/*
* common clks module for all SiRF SoCs
*
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
*
* Licensed under GPLv2 or later.
*/
diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c
index 7adc5c70c7ff..6968e2ebcd8a 100644
--- a/drivers/clk/sirf/clk-prima2.c
+++ b/drivers/clk/sirf/clk-prima2.c
@@ -1,7 +1,8 @@
/*
* Clock tree for CSR SiRFprimaII
*
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
*
* Licensed under GPLv2 or later.
*/
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
index 0303c0b99cd0..7e2d15a0c7b8 100644
--- a/drivers/clk/socfpga/Makefile
+++ b/drivers/clk/socfpga/Makefile
@@ -1 +1,4 @@
obj-y += clk.o
+obj-y += clk-gate.o
+obj-y += clk-pll.o
+obj-y += clk-periph.o
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
new file mode 100644
index 000000000000..501d513bf890
--- /dev/null
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2011-2012 Calxeda, Inc.
+ * Copyright (C) 2012-2013 Altera Corporation <www.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Based from clk-highbank.c
+ *
+ */
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "clk.h"
+
+#define SOCFPGA_L4_MP_CLK "l4_mp_clk"
+#define SOCFPGA_L4_SP_CLK "l4_sp_clk"
+#define SOCFPGA_NAND_CLK "nand_clk"
+#define SOCFPGA_NAND_X_CLK "nand_x_clk"
+#define SOCFPGA_MMC_CLK "sdmmc_clk"
+#define SOCFPGA_GPIO_DB_CLK_OFFSET 0xA8
+
+#define div_mask(width) ((1 << (width)) - 1)
+#define streq(a, b) (strcmp((a), (b)) == 0)
+
+#define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw)
+
+/* SDMMC Group for System Manager defines */
+#define SYSMGR_SDMMCGRP_CTRL_OFFSET 0x108
+#define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \
+ ((((smplsel) & 0x7) << 3) | (((drvsel) & 0x7) << 0))
+
+static u8 socfpga_clk_get_parent(struct clk_hw *hwclk)
+{
+ u32 l4_src;
+ u32 perpll_src;
+
+ if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) {
+ l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
+ return l4_src &= 0x1;
+ }
+ if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) {
+ l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
+ return !!(l4_src & 2);
+ }
+
+ perpll_src = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
+ if (streq(hwclk->init->name, SOCFPGA_MMC_CLK))
+ return perpll_src &= 0x3;
+ if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) ||
+ streq(hwclk->init->name, SOCFPGA_NAND_X_CLK))
+ return (perpll_src >> 2) & 3;
+
+ /* QSPI clock */
+ return (perpll_src >> 4) & 3;
+
+}
+
+static int socfpga_clk_set_parent(struct clk_hw *hwclk, u8 parent)
+{
+ u32 src_reg;
+
+ if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) {
+ src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
+ src_reg &= ~0x1;
+ src_reg |= parent;
+ writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC);
+ } else if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) {
+ src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
+ src_reg &= ~0x2;
+ src_reg |= (parent << 1);
+ writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC);
+ } else {
+ src_reg = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
+ if (streq(hwclk->init->name, SOCFPGA_MMC_CLK)) {
+ src_reg &= ~0x3;
+ src_reg |= parent;
+ } else if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) ||
+ streq(hwclk->init->name, SOCFPGA_NAND_X_CLK)) {
+ src_reg &= ~0xC;
+ src_reg |= (parent << 2);
+ } else {/* QSPI clock */
+ src_reg &= ~0x30;
+ src_reg |= (parent << 4);
+ }
+ writel(src_reg, clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
+ }
+
+ return 0;
+}
+
+static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ u32 div = 1, val;
+
+ if (socfpgaclk->fixed_div)
+ div = socfpgaclk->fixed_div;
+ else if (socfpgaclk->div_reg) {
+ val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
+ val &= div_mask(socfpgaclk->width);
+ /* Check for GPIO_DB_CLK by its offset */
+ if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
+ div = val + 1;
+ else
+ div = (1 << val);
+ }
+
+ return parent_rate / div;
+}
+
+static int socfpga_clk_prepare(struct clk_hw *hwclk)
+{
+ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ struct regmap *sys_mgr_base_addr;
+ int i;
+ u32 hs_timing;
+ u32 clk_phase[2];
+
+ if (socfpgaclk->clk_phase[0] || socfpgaclk->clk_phase[1]) {
+ sys_mgr_base_addr = syscon_regmap_lookup_by_compatible("altr,sys-mgr");
+ if (IS_ERR(sys_mgr_base_addr)) {
+ pr_err("%s: failed to find altr,sys-mgr regmap!\n", __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 2; i++) {
+ switch (socfpgaclk->clk_phase[i]) {
+ case 0:
+ clk_phase[i] = 0;
+ break;
+ case 45:
+ clk_phase[i] = 1;
+ break;
+ case 90:
+ clk_phase[i] = 2;
+ break;
+ case 135:
+ clk_phase[i] = 3;
+ break;
+ case 180:
+ clk_phase[i] = 4;
+ break;
+ case 225:
+ clk_phase[i] = 5;
+ break;
+ case 270:
+ clk_phase[i] = 6;
+ break;
+ case 315:
+ clk_phase[i] = 7;
+ break;
+ default:
+ clk_phase[i] = 0;
+ break;
+ }
+ }
+ hs_timing = SYSMGR_SDMMC_CTRL_SET(clk_phase[0], clk_phase[1]);
+ regmap_write(sys_mgr_base_addr, SYSMGR_SDMMCGRP_CTRL_OFFSET,
+ hs_timing);
+ }
+ return 0;
+}
+
+static struct clk_ops gateclk_ops = {
+ .prepare = socfpga_clk_prepare,
+ .recalc_rate = socfpga_clk_recalc_rate,
+ .get_parent = socfpga_clk_get_parent,
+ .set_parent = socfpga_clk_set_parent,
+};
+
+static void __init __socfpga_gate_init(struct device_node *node,
+ const struct clk_ops *ops)
+{
+ u32 clk_gate[2];
+ u32 div_reg[3];
+ u32 clk_phase[2];
+ u32 fixed_div;
+ struct clk *clk;
+ struct socfpga_gate_clk *socfpga_clk;
+ const char *clk_name = node->name;
+ const char *parent_name[SOCFPGA_MAX_PARENTS];
+ struct clk_init_data init;
+ int rc;
+ int i = 0;
+
+ socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
+ if (WARN_ON(!socfpga_clk))
+ return;
+
+ rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
+ if (rc)
+ clk_gate[0] = 0;
+
+ if (clk_gate[0]) {
+ socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
+ socfpga_clk->hw.bit_idx = clk_gate[1];
+
+ gateclk_ops.enable = clk_gate_ops.enable;
+ gateclk_ops.disable = clk_gate_ops.disable;
+ }
+
+ rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
+ if (rc)
+ socfpga_clk->fixed_div = 0;
+ else
+ socfpga_clk->fixed_div = fixed_div;
+
+ rc = of_property_read_u32_array(node, "div-reg", div_reg, 3);
+ if (!rc) {
+ socfpga_clk->div_reg = clk_mgr_base_addr + div_reg[0];
+ socfpga_clk->shift = div_reg[1];
+ socfpga_clk->width = div_reg[2];
+ } else {
+ socfpga_clk->div_reg = 0;
+ }
+
+ rc = of_property_read_u32_array(node, "clk-phase", clk_phase, 2);
+ if (!rc) {
+ socfpga_clk->clk_phase[0] = clk_phase[0];
+ socfpga_clk->clk_phase[1] = clk_phase[1];
+ }
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ init.name = clk_name;
+ init.ops = ops;
+ init.flags = 0;
+ while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] =
+ of_clk_get_parent_name(node, i)) != NULL)
+ i++;
+
+ init.parent_names = parent_name;
+ init.num_parents = i;
+ socfpga_clk->hw.hw.init = &init;
+
+ clk = clk_register(NULL, &socfpga_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(socfpga_clk);
+ return;
+ }
+ rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (WARN_ON(rc))
+ return;
+}
+
+void __init socfpga_gate_init(struct device_node *node)
+{
+ __socfpga_gate_init(node, &gateclk_ops);
+}
diff --git a/drivers/clk/socfpga/clk-periph.c b/drivers/clk/socfpga/clk-periph.c
new file mode 100644
index 000000000000..81623a3736f9
--- /dev/null
+++ b/drivers/clk/socfpga/clk-periph.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2011-2012 Calxeda, Inc.
+ * Copyright (C) 2012-2013 Altera Corporation <www.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Based from clk-highbank.c
+ *
+ */
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include "clk.h"
+
+#define to_socfpga_periph_clk(p) container_of(p, struct socfpga_periph_clk, hw.hw)
+
+static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_periph_clk *socfpgaclk = to_socfpga_periph_clk(hwclk);
+ u32 div;
+
+ if (socfpgaclk->fixed_div)
+ div = socfpgaclk->fixed_div;
+ else
+ div = ((readl(socfpgaclk->hw.reg) & 0x1ff) + 1);
+
+ return parent_rate / div;
+}
+
+static const struct clk_ops periclk_ops = {
+ .recalc_rate = clk_periclk_recalc_rate,
+};
+
+static __init void __socfpga_periph_init(struct device_node *node,
+ const struct clk_ops *ops)
+{
+ u32 reg;
+ struct clk *clk;
+ struct socfpga_periph_clk *periph_clk;
+ const char *clk_name = node->name;
+ const char *parent_name;
+ struct clk_init_data init;
+ int rc;
+ u32 fixed_div;
+
+ of_property_read_u32(node, "reg", &reg);
+
+ periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
+ if (WARN_ON(!periph_clk))
+ return;
+
+ periph_clk->hw.reg = clk_mgr_base_addr + reg;
+
+ rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
+ if (rc)
+ periph_clk->fixed_div = 0;
+ else
+ periph_clk->fixed_div = fixed_div;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ init.name = clk_name;
+ init.ops = ops;
+ init.flags = 0;
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ periph_clk->hw.hw.init = &init;
+
+ clk = clk_register(NULL, &periph_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(periph_clk);
+ return;
+ }
+ rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+
+void __init socfpga_periph_init(struct device_node *node)
+{
+ __socfpga_periph_init(node, &periclk_ops);
+}
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
new file mode 100644
index 000000000000..88dafb5e9627
--- /dev/null
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2011-2012 Calxeda, Inc.
+ * Copyright (C) 2012-2013 Altera Corporation <www.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Based from clk-highbank.c
+ *
+ */
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include "clk.h"
+
+/* Clock bypass bits */
+#define MAINPLL_BYPASS (1<<0)
+#define SDRAMPLL_BYPASS (1<<1)
+#define SDRAMPLL_SRC_BYPASS (1<<2)
+#define PERPLL_BYPASS (1<<3)
+#define PERPLL_SRC_BYPASS (1<<4)
+
+#define SOCFPGA_PLL_BG_PWRDWN 0
+#define SOCFPGA_PLL_EXT_ENA 1
+#define SOCFPGA_PLL_PWR_DOWN 2
+#define SOCFPGA_PLL_DIVF_MASK 0x0000FFF8
+#define SOCFPGA_PLL_DIVF_SHIFT 3
+#define SOCFPGA_PLL_DIVQ_MASK 0x003F0000
+#define SOCFPGA_PLL_DIVQ_SHIFT 16
+
+#define CLK_MGR_PLL_CLK_SRC_SHIFT 22
+#define CLK_MGR_PLL_CLK_SRC_MASK 0x3
+
+#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ unsigned long divf, divq, reg;
+ unsigned long long vco_freq;
+ unsigned long bypass;
+
+ reg = readl(socfpgaclk->hw.reg);
+ bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS);
+ if (bypass & MAINPLL_BYPASS)
+ return parent_rate;
+
+ divf = (reg & SOCFPGA_PLL_DIVF_MASK) >> SOCFPGA_PLL_DIVF_SHIFT;
+ divq = (reg & SOCFPGA_PLL_DIVQ_MASK) >> SOCFPGA_PLL_DIVQ_SHIFT;
+ vco_freq = (unsigned long long)parent_rate * (divf + 1);
+ do_div(vco_freq, (1 + divq));
+ return (unsigned long)vco_freq;
+}
+
+static u8 clk_pll_get_parent(struct clk_hw *hwclk)
+{
+ u32 pll_src;
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+
+ pll_src = readl(socfpgaclk->hw.reg);
+ return (pll_src >> CLK_MGR_PLL_CLK_SRC_SHIFT) &
+ CLK_MGR_PLL_CLK_SRC_MASK;
+}
+
+static struct clk_ops clk_pll_ops = {
+ .recalc_rate = clk_pll_recalc_rate,
+ .get_parent = clk_pll_get_parent,
+};
+
+static __init struct clk *__socfpga_pll_init(struct device_node *node,
+ const struct clk_ops *ops)
+{
+ u32 reg;
+ struct clk *clk;
+ struct socfpga_pll *pll_clk;
+ const char *clk_name = node->name;
+ const char *parent_name[SOCFPGA_MAX_PARENTS];
+ struct clk_init_data init;
+ int rc;
+ int i = 0;
+
+ of_property_read_u32(node, "reg", &reg);
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (WARN_ON(!pll_clk))
+ return NULL;
+
+ pll_clk->hw.reg = clk_mgr_base_addr + reg;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ init.name = clk_name;
+ init.ops = ops;
+ init.flags = 0;
+
+ while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] =
+ of_clk_get_parent_name(node, i)) != NULL)
+ i++;
+
+ init.num_parents = i;
+ init.parent_names = parent_name;
+ pll_clk->hw.hw.init = &init;
+
+ pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
+ clk_pll_ops.enable = clk_gate_ops.enable;
+ clk_pll_ops.disable = clk_gate_ops.disable;
+
+ clk = clk_register(NULL, &pll_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(pll_clk);
+ return NULL;
+ }
+ rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ return clk;
+}
+
+void __init socfpga_pll_init(struct device_node *node)
+{
+ __socfpga_pll_init(node, &clk_pll_ops);
+}
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 5983a26a8c5f..35a960a993f9 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -22,325 +22,23 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_address.h>
-/* Clock Manager offsets */
-#define CLKMGR_CTRL 0x0
-#define CLKMGR_BYPASS 0x4
-#define CLKMGR_L4SRC 0x70
-#define CLKMGR_PERPLL_SRC 0xAC
+#include "clk.h"
-/* Clock bypass bits */
-#define MAINPLL_BYPASS (1<<0)
-#define SDRAMPLL_BYPASS (1<<1)
-#define SDRAMPLL_SRC_BYPASS (1<<2)
-#define PERPLL_BYPASS (1<<3)
-#define PERPLL_SRC_BYPASS (1<<4)
+void __iomem *clk_mgr_base_addr;
-#define SOCFPGA_PLL_BG_PWRDWN 0
-#define SOCFPGA_PLL_EXT_ENA 1
-#define SOCFPGA_PLL_PWR_DOWN 2
-#define SOCFPGA_PLL_DIVF_MASK 0x0000FFF8
-#define SOCFPGA_PLL_DIVF_SHIFT 3
-#define SOCFPGA_PLL_DIVQ_MASK 0x003F0000
-#define SOCFPGA_PLL_DIVQ_SHIFT 16
-#define SOCFGPA_MAX_PARENTS 3
-
-#define SOCFPGA_L4_MP_CLK "l4_mp_clk"
-#define SOCFPGA_L4_SP_CLK "l4_sp_clk"
-#define SOCFPGA_NAND_CLK "nand_clk"
-#define SOCFPGA_NAND_X_CLK "nand_x_clk"
-#define SOCFPGA_MMC_CLK "sdmmc_clk"
-#define SOCFPGA_DB_CLK "gpio_db_clk"
-
-#define div_mask(width) ((1 << (width)) - 1)
-#define streq(a, b) (strcmp((a), (b)) == 0)
-
-extern void __iomem *clk_mgr_base_addr;
-
-struct socfpga_clk {
- struct clk_gate hw;
- char *parent_name;
- char *clk_name;
- u32 fixed_div;
- void __iomem *div_reg;
- u32 width; /* only valid if div_reg != 0 */
- u32 shift; /* only valid if div_reg != 0 */
-};
-#define to_socfpga_clk(p) container_of(p, struct socfpga_clk, hw.hw)
-
-static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
- unsigned long parent_rate)
-{
- struct socfpga_clk *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long divf, divq, vco_freq, reg;
- unsigned long bypass;
-
- reg = readl(socfpgaclk->hw.reg);
- bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS);
- if (bypass & MAINPLL_BYPASS)
- return parent_rate;
-
- divf = (reg & SOCFPGA_PLL_DIVF_MASK) >> SOCFPGA_PLL_DIVF_SHIFT;
- divq = (reg & SOCFPGA_PLL_DIVQ_MASK) >> SOCFPGA_PLL_DIVQ_SHIFT;
- vco_freq = parent_rate * (divf + 1);
- return vco_freq / (1 + divq);
-}
-
-
-static struct clk_ops clk_pll_ops = {
- .recalc_rate = clk_pll_recalc_rate,
-};
-
-static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
- unsigned long parent_rate)
-{
- struct socfpga_clk *socfpgaclk = to_socfpga_clk(hwclk);
- u32 div;
-
- if (socfpgaclk->fixed_div)
- div = socfpgaclk->fixed_div;
- else
- div = ((readl(socfpgaclk->hw.reg) & 0x1ff) + 1);
-
- return parent_rate / div;
-}
-
-static const struct clk_ops periclk_ops = {
- .recalc_rate = clk_periclk_recalc_rate,
-};
-
-static __init struct clk *socfpga_clk_init(struct device_node *node,
- const struct clk_ops *ops)
-{
- u32 reg;
- struct clk *clk;
- struct socfpga_clk *socfpga_clk;
- const char *clk_name = node->name;
- const char *parent_name;
- struct clk_init_data init;
- int rc;
- u32 fixed_div;
-
- of_property_read_u32(node, "reg", &reg);
-
- socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
- if (WARN_ON(!socfpga_clk))
- return NULL;
-
- socfpga_clk->hw.reg = clk_mgr_base_addr + reg;
-
- rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
- if (rc)
- socfpga_clk->fixed_div = 0;
- else
- socfpga_clk->fixed_div = fixed_div;
-
- of_property_read_string(node, "clock-output-names", &clk_name);
-
- init.name = clk_name;
- init.ops = ops;
- init.flags = 0;
- parent_name = of_clk_get_parent_name(node, 0);
- init.parent_names = &parent_name;
- init.num_parents = 1;
-
- socfpga_clk->hw.hw.init = &init;
-
- if (streq(clk_name, "main_pll") ||
- streq(clk_name, "periph_pll") ||
- streq(clk_name, "sdram_pll")) {
- socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
- clk_pll_ops.enable = clk_gate_ops.enable;
- clk_pll_ops.disable = clk_gate_ops.disable;
- }
-
- clk = clk_register(NULL, &socfpga_clk->hw.hw);
- if (WARN_ON(IS_ERR(clk))) {
- kfree(socfpga_clk);
- return NULL;
- }
- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
- return clk;
-}
-
-static u8 socfpga_clk_get_parent(struct clk_hw *hwclk)
-{
- u32 l4_src;
- u32 perpll_src;
-
- if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) {
- l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
- return l4_src &= 0x1;
- }
- if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) {
- l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
- return !!(l4_src & 2);
- }
-
- perpll_src = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
- if (streq(hwclk->init->name, SOCFPGA_MMC_CLK))
- return perpll_src &= 0x3;
- if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) ||
- streq(hwclk->init->name, SOCFPGA_NAND_X_CLK))
- return (perpll_src >> 2) & 3;
-
- /* QSPI clock */
- return (perpll_src >> 4) & 3;
-
-}
-
-static int socfpga_clk_set_parent(struct clk_hw *hwclk, u8 parent)
-{
- u32 src_reg;
-
- if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) {
- src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
- src_reg &= ~0x1;
- src_reg |= parent;
- writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC);
- } else if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) {
- src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
- src_reg &= ~0x2;
- src_reg |= (parent << 1);
- writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC);
- } else {
- src_reg = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
- if (streq(hwclk->init->name, SOCFPGA_MMC_CLK)) {
- src_reg &= ~0x3;
- src_reg |= parent;
- } else if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) ||
- streq(hwclk->init->name, SOCFPGA_NAND_X_CLK)) {
- src_reg &= ~0xC;
- src_reg |= (parent << 2);
- } else {/* QSPI clock */
- src_reg &= ~0x30;
- src_reg |= (parent << 4);
- }
- writel(src_reg, clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
- }
-
- return 0;
-}
-
-static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
- unsigned long parent_rate)
-{
- struct socfpga_clk *socfpgaclk = to_socfpga_clk(hwclk);
- u32 div = 1, val;
-
- if (socfpgaclk->fixed_div)
- div = socfpgaclk->fixed_div;
- else if (socfpgaclk->div_reg) {
- val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
- val &= div_mask(socfpgaclk->width);
- if (streq(hwclk->init->name, SOCFPGA_DB_CLK))
- div = val + 1;
- else
- div = (1 << val);
- }
-
- return parent_rate / div;
-}
-
-static struct clk_ops gateclk_ops = {
- .recalc_rate = socfpga_clk_recalc_rate,
- .get_parent = socfpga_clk_get_parent,
- .set_parent = socfpga_clk_set_parent,
+static const struct of_device_id socfpga_child_clocks[] __initconst = {
+ { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, },
+ { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, },
+ { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, },
+ {},
};
-static void __init socfpga_gate_clk_init(struct device_node *node,
- const struct clk_ops *ops)
-{
- u32 clk_gate[2];
- u32 div_reg[3];
- u32 fixed_div;
- struct clk *clk;
- struct socfpga_clk *socfpga_clk;
- const char *clk_name = node->name;
- const char *parent_name[SOCFGPA_MAX_PARENTS];
- struct clk_init_data init;
- int rc;
- int i = 0;
-
- socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
- if (WARN_ON(!socfpga_clk))
- return;
-
- rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
- if (rc)
- clk_gate[0] = 0;
-
- if (clk_gate[0]) {
- socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
- socfpga_clk->hw.bit_idx = clk_gate[1];
-
- gateclk_ops.enable = clk_gate_ops.enable;
- gateclk_ops.disable = clk_gate_ops.disable;
- }
-
- rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
- if (rc)
- socfpga_clk->fixed_div = 0;
- else
- socfpga_clk->fixed_div = fixed_div;
-
- rc = of_property_read_u32_array(node, "div-reg", div_reg, 3);
- if (!rc) {
- socfpga_clk->div_reg = clk_mgr_base_addr + div_reg[0];
- socfpga_clk->shift = div_reg[1];
- socfpga_clk->width = div_reg[2];
- } else {
- socfpga_clk->div_reg = NULL;
- }
-
- of_property_read_string(node, "clock-output-names", &clk_name);
-
- init.name = clk_name;
- init.ops = ops;
- init.flags = 0;
- while (i < SOCFGPA_MAX_PARENTS && (parent_name[i] =
- of_clk_get_parent_name(node, i)) != NULL)
- i++;
-
- init.parent_names = parent_name;
- init.num_parents = i;
- socfpga_clk->hw.hw.init = &init;
-
- clk = clk_register(NULL, &socfpga_clk->hw.hw);
- if (WARN_ON(IS_ERR(clk))) {
- kfree(socfpga_clk);
- return;
- }
- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
- if (WARN_ON(rc))
- return;
-}
-
-static void __init socfpga_pll_init(struct device_node *node)
+static void __init socfpga_clkmgr_init(struct device_node *node)
{
- socfpga_clk_init(node, &clk_pll_ops);
+ clk_mgr_base_addr = of_iomap(node, 0);
+ of_clk_init(socfpga_child_clocks);
}
-CLK_OF_DECLARE(socfpga_pll, "altr,socfpga-pll-clock", socfpga_pll_init);
+CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init);
-static void __init socfpga_periph_init(struct device_node *node)
-{
- socfpga_clk_init(node, &periclk_ops);
-}
-CLK_OF_DECLARE(socfpga_periph, "altr,socfpga-perip-clk", socfpga_periph_init);
-
-static void __init socfpga_gate_init(struct device_node *node)
-{
- socfpga_gate_clk_init(node, &gateclk_ops);
-}
-CLK_OF_DECLARE(socfpga_gate, "altr,socfpga-gate-clk", socfpga_gate_init);
-
-void __init socfpga_init_clocks(void)
-{
- struct clk *clk;
- int ret;
-
- clk = clk_register_fixed_factor(NULL, "smp_twd", "mpuclk", 0, 1, 4);
- ret = clk_register_clkdev(clk, NULL, "smp_twd");
- if (ret)
- pr_err("smp_twd alias not registered\n");
-}
diff --git a/drivers/clk/socfpga/clk.h b/drivers/clk/socfpga/clk.h
new file mode 100644
index 000000000000..d2e54019c94f
--- /dev/null
+++ b/drivers/clk/socfpga/clk.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013, Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ *
+ * based on drivers/clk/tegra/clk.h
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __SOCFPGA_CLK_H
+#define __SOCFPGA_CLK_H
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+
+/* Clock Manager offsets */
+#define CLKMGR_CTRL 0x0
+#define CLKMGR_BYPASS 0x4
+#define CLKMGR_L4SRC 0x70
+#define CLKMGR_PERPLL_SRC 0xAC
+
+#define SOCFPGA_MAX_PARENTS 3
+
+extern void __iomem *clk_mgr_base_addr;
+
+void __init socfpga_pll_init(struct device_node *node);
+void __init socfpga_periph_init(struct device_node *node);
+void __init socfpga_gate_init(struct device_node *node);
+
+struct socfpga_pll {
+ struct clk_gate hw;
+};
+
+struct socfpga_gate_clk {
+ struct clk_gate hw;
+ char *parent_name;
+ u32 fixed_div;
+ void __iomem *div_reg;
+ u32 width; /* only valid if div_reg != 0 */
+ u32 shift; /* only valid if div_reg != 0 */
+ u32 clk_phase[2];
+};
+
+struct socfpga_periph_clk {
+ struct clk_gate hw;
+ char *parent_name;
+ u32 fixed_div;
+};
+
+#endif /* SOCFPGA_CLK_H */
diff --git a/drivers/clk/st/Makefile b/drivers/clk/st/Makefile
new file mode 100644
index 000000000000..c7455ffdbdf7
--- /dev/null
+++ b/drivers/clk/st/Makefile
@@ -0,0 +1 @@
+obj-y += clkgen-mux.o clkgen-pll.o clkgen-fsyn.o
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
new file mode 100644
index 000000000000..4f53ee0778d9
--- /dev/null
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -0,0 +1,1039 @@
+/*
+ * Copyright (C) 2014 STMicroelectronics R&D Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/*
+ * Authors:
+ * Stephen Gallimore <stephen.gallimore@st.com>,
+ * Pankaj Dev <pankaj.dev@st.com>.
+ */
+
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/clk-provider.h>
+
+#include "clkgen.h"
+
+/*
+ * Maximum input clock to the PLL before we divide it down by 2
+ * although in reality in actual systems this has never been seen to
+ * be used.
+ */
+#define QUADFS_NDIV_THRESHOLD 30000000
+
+#define PLL_BW_GOODREF (0L)
+#define PLL_BW_VBADREF (1L)
+#define PLL_BW_BADREF (2L)
+#define PLL_BW_VGOODREF (3L)
+
+#define QUADFS_MAX_CHAN 4
+
+struct stm_fs {
+ unsigned long ndiv;
+ unsigned long mdiv;
+ unsigned long pe;
+ unsigned long sdiv;
+ unsigned long nsdiv;
+};
+
+static struct stm_fs fs216c65_rtbl[] = {
+ { .mdiv = 0x1f, .pe = 0x0, .sdiv = 0x7, .nsdiv = 0 }, /* 312.5 Khz */
+ { .mdiv = 0x17, .pe = 0x25ed, .sdiv = 0x1, .nsdiv = 0 }, /* 27 MHz */
+ { .mdiv = 0x1a, .pe = 0x7b36, .sdiv = 0x2, .nsdiv = 1 }, /* 36.87 MHz */
+ { .mdiv = 0x13, .pe = 0x0, .sdiv = 0x2, .nsdiv = 1 }, /* 48 MHz */
+ { .mdiv = 0x11, .pe = 0x1c72, .sdiv = 0x1, .nsdiv = 1 }, /* 108 MHz */
+};
+
+static struct stm_fs fs432c65_rtbl[] = {
+ { .mdiv = 0x1f, .pe = 0x0, .sdiv = 0x7, .nsdiv = 0 }, /* 625 Khz */
+ { .mdiv = 0x11, .pe = 0x1c72, .sdiv = 0x2, .nsdiv = 1 }, /* 108 MHz */
+ { .mdiv = 0x19, .pe = 0x121a, .sdiv = 0x0, .nsdiv = 1 }, /* 297 MHz */
+};
+
+static struct stm_fs fs660c32_rtbl[] = {
+ { .mdiv = 0x01, .pe = 0x2aaa, .sdiv = 0x8, .nsdiv = 0 }, /* 600 KHz */
+ { .mdiv = 0x02, .pe = 0x3d33, .sdiv = 0x0, .nsdiv = 0 }, /* 148.5 Mhz */
+ { .mdiv = 0x13, .pe = 0x5bcc, .sdiv = 0x0, .nsdiv = 1 }, /* 297 Mhz */
+ { .mdiv = 0x0e, .pe = 0x1025, .sdiv = 0x0, .nsdiv = 1 }, /* 333 Mhz */
+ { .mdiv = 0x0b, .pe = 0x715f, .sdiv = 0x0, .nsdiv = 1 }, /* 350 Mhz */
+};
+
+struct clkgen_quadfs_data {
+ bool reset_present;
+ bool bwfilter_present;
+ bool lockstatus_present;
+ bool nsdiv_present;
+ struct clkgen_field ndiv;
+ struct clkgen_field ref_bw;
+ struct clkgen_field nreset;
+ struct clkgen_field npda;
+ struct clkgen_field lock_status;
+
+ struct clkgen_field nsb[QUADFS_MAX_CHAN];
+ struct clkgen_field en[QUADFS_MAX_CHAN];
+ struct clkgen_field mdiv[QUADFS_MAX_CHAN];
+ struct clkgen_field pe[QUADFS_MAX_CHAN];
+ struct clkgen_field sdiv[QUADFS_MAX_CHAN];
+ struct clkgen_field nsdiv[QUADFS_MAX_CHAN];
+
+ const struct clk_ops *pll_ops;
+ struct stm_fs *rtbl;
+ u8 rtbl_cnt;
+ int (*get_rate)(unsigned long , struct stm_fs *,
+ unsigned long *);
+};
+
+static const struct clk_ops st_quadfs_pll_c65_ops;
+static const struct clk_ops st_quadfs_pll_c32_ops;
+static const struct clk_ops st_quadfs_fs216c65_ops;
+static const struct clk_ops st_quadfs_fs432c65_ops;
+static const struct clk_ops st_quadfs_fs660c32_ops;
+
+static int clk_fs216c65_get_rate(unsigned long, struct stm_fs *,
+ unsigned long *);
+static int clk_fs432c65_get_rate(unsigned long, struct stm_fs *,
+ unsigned long *);
+static int clk_fs660c32_dig_get_rate(unsigned long, struct stm_fs *,
+ unsigned long *);
+/*
+ * Values for all of the standalone instances of this clock
+ * generator found in STiH415 and STiH416 SYSCFG register banks. Note
+ * that the individual channel standby control bits (nsb) are in the
+ * first register along with the PLL control bits.
+ */
+static struct clkgen_quadfs_data st_fs216c65_416 = {
+ /* 416 specific */
+ .npda = CLKGEN_FIELD(0x0, 0x1, 14),
+ .nsb = { CLKGEN_FIELD(0x0, 0x1, 10),
+ CLKGEN_FIELD(0x0, 0x1, 11),
+ CLKGEN_FIELD(0x0, 0x1, 12),
+ CLKGEN_FIELD(0x0, 0x1, 13) },
+ .nsdiv_present = true,
+ .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18),
+ CLKGEN_FIELD(0x0, 0x1, 19),
+ CLKGEN_FIELD(0x0, 0x1, 20),
+ CLKGEN_FIELD(0x0, 0x1, 21) },
+ .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0),
+ CLKGEN_FIELD(0x14, 0x1f, 0),
+ CLKGEN_FIELD(0x24, 0x1f, 0),
+ CLKGEN_FIELD(0x34, 0x1f, 0) },
+ .en = { CLKGEN_FIELD(0x10, 0x1, 0),
+ CLKGEN_FIELD(0x20, 0x1, 0),
+ CLKGEN_FIELD(0x30, 0x1, 0),
+ CLKGEN_FIELD(0x40, 0x1, 0) },
+ .ndiv = CLKGEN_FIELD(0x0, 0x1, 15),
+ .bwfilter_present = true,
+ .ref_bw = CLKGEN_FIELD(0x0, 0x3, 16),
+ .pe = { CLKGEN_FIELD(0x8, 0xffff, 0),
+ CLKGEN_FIELD(0x18, 0xffff, 0),
+ CLKGEN_FIELD(0x28, 0xffff, 0),
+ CLKGEN_FIELD(0x38, 0xffff, 0) },
+ .sdiv = { CLKGEN_FIELD(0xC, 0x7, 0),
+ CLKGEN_FIELD(0x1C, 0x7, 0),
+ CLKGEN_FIELD(0x2C, 0x7, 0),
+ CLKGEN_FIELD(0x3C, 0x7, 0) },
+ .pll_ops = &st_quadfs_pll_c65_ops,
+ .rtbl = fs216c65_rtbl,
+ .rtbl_cnt = ARRAY_SIZE(fs216c65_rtbl),
+ .get_rate = clk_fs216c65_get_rate,
+};
+
+static struct clkgen_quadfs_data st_fs432c65_416 = {
+ .npda = CLKGEN_FIELD(0x0, 0x1, 14),
+ .nsb = { CLKGEN_FIELD(0x0, 0x1, 10),
+ CLKGEN_FIELD(0x0, 0x1, 11),
+ CLKGEN_FIELD(0x0, 0x1, 12),
+ CLKGEN_FIELD(0x0, 0x1, 13) },
+ .nsdiv_present = true,
+ .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18),
+ CLKGEN_FIELD(0x0, 0x1, 19),
+ CLKGEN_FIELD(0x0, 0x1, 20),
+ CLKGEN_FIELD(0x0, 0x1, 21) },
+ .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0),
+ CLKGEN_FIELD(0x14, 0x1f, 0),
+ CLKGEN_FIELD(0x24, 0x1f, 0),
+ CLKGEN_FIELD(0x34, 0x1f, 0) },
+ .en = { CLKGEN_FIELD(0x10, 0x1, 0),
+ CLKGEN_FIELD(0x20, 0x1, 0),
+ CLKGEN_FIELD(0x30, 0x1, 0),
+ CLKGEN_FIELD(0x40, 0x1, 0) },
+ .ndiv = CLKGEN_FIELD(0x0, 0x1, 15),
+ .bwfilter_present = true,
+ .ref_bw = CLKGEN_FIELD(0x0, 0x3, 16),
+ .pe = { CLKGEN_FIELD(0x8, 0xffff, 0),
+ CLKGEN_FIELD(0x18, 0xffff, 0),
+ CLKGEN_FIELD(0x28, 0xffff, 0),
+ CLKGEN_FIELD(0x38, 0xffff, 0) },
+ .sdiv = { CLKGEN_FIELD(0xC, 0x7, 0),
+ CLKGEN_FIELD(0x1C, 0x7, 0),
+ CLKGEN_FIELD(0x2C, 0x7, 0),
+ CLKGEN_FIELD(0x3C, 0x7, 0) },
+ .pll_ops = &st_quadfs_pll_c65_ops,
+ .rtbl = fs432c65_rtbl,
+ .rtbl_cnt = ARRAY_SIZE(fs432c65_rtbl),
+ .get_rate = clk_fs432c65_get_rate,
+};
+
+static struct clkgen_quadfs_data st_fs660c32_E_416 = {
+ .npda = CLKGEN_FIELD(0x0, 0x1, 14),
+ .nsb = { CLKGEN_FIELD(0x0, 0x1, 10),
+ CLKGEN_FIELD(0x0, 0x1, 11),
+ CLKGEN_FIELD(0x0, 0x1, 12),
+ CLKGEN_FIELD(0x0, 0x1, 13) },
+ .nsdiv_present = true,
+ .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18),
+ CLKGEN_FIELD(0x0, 0x1, 19),
+ CLKGEN_FIELD(0x0, 0x1, 20),
+ CLKGEN_FIELD(0x0, 0x1, 21) },
+ .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0),
+ CLKGEN_FIELD(0x14, 0x1f, 0),
+ CLKGEN_FIELD(0x24, 0x1f, 0),
+ CLKGEN_FIELD(0x34, 0x1f, 0) },
+ .en = { CLKGEN_FIELD(0x10, 0x1, 0),
+ CLKGEN_FIELD(0x20, 0x1, 0),
+ CLKGEN_FIELD(0x30, 0x1, 0),
+ CLKGEN_FIELD(0x40, 0x1, 0) },
+ .ndiv = CLKGEN_FIELD(0x0, 0x7, 15),
+ .pe = { CLKGEN_FIELD(0x8, 0x7fff, 0),
+ CLKGEN_FIELD(0x18, 0x7fff, 0),
+ CLKGEN_FIELD(0x28, 0x7fff, 0),
+ CLKGEN_FIELD(0x38, 0x7fff, 0) },
+ .sdiv = { CLKGEN_FIELD(0xC, 0xf, 0),
+ CLKGEN_FIELD(0x1C, 0xf, 0),
+ CLKGEN_FIELD(0x2C, 0xf, 0),
+ CLKGEN_FIELD(0x3C, 0xf, 0) },
+ .lockstatus_present = true,
+ .lock_status = CLKGEN_FIELD(0xAC, 0x1, 0),
+ .pll_ops = &st_quadfs_pll_c32_ops,
+ .rtbl = fs660c32_rtbl,
+ .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl),
+ .get_rate = clk_fs660c32_dig_get_rate,
+};
+
+static struct clkgen_quadfs_data st_fs660c32_F_416 = {
+ .npda = CLKGEN_FIELD(0x0, 0x1, 14),
+ .nsb = { CLKGEN_FIELD(0x0, 0x1, 10),
+ CLKGEN_FIELD(0x0, 0x1, 11),
+ CLKGEN_FIELD(0x0, 0x1, 12),
+ CLKGEN_FIELD(0x0, 0x1, 13) },
+ .nsdiv_present = true,
+ .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18),
+ CLKGEN_FIELD(0x0, 0x1, 19),
+ CLKGEN_FIELD(0x0, 0x1, 20),
+ CLKGEN_FIELD(0x0, 0x1, 21) },
+ .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0),
+ CLKGEN_FIELD(0x14, 0x1f, 0),
+ CLKGEN_FIELD(0x24, 0x1f, 0),
+ CLKGEN_FIELD(0x34, 0x1f, 0) },
+ .en = { CLKGEN_FIELD(0x10, 0x1, 0),
+ CLKGEN_FIELD(0x20, 0x1, 0),
+ CLKGEN_FIELD(0x30, 0x1, 0),
+ CLKGEN_FIELD(0x40, 0x1, 0) },
+ .ndiv = CLKGEN_FIELD(0x0, 0x7, 15),
+ .pe = { CLKGEN_FIELD(0x8, 0x7fff, 0),
+ CLKGEN_FIELD(0x18, 0x7fff, 0),
+ CLKGEN_FIELD(0x28, 0x7fff, 0),
+ CLKGEN_FIELD(0x38, 0x7fff, 0) },
+ .sdiv = { CLKGEN_FIELD(0xC, 0xf, 0),
+ CLKGEN_FIELD(0x1C, 0xf, 0),
+ CLKGEN_FIELD(0x2C, 0xf, 0),
+ CLKGEN_FIELD(0x3C, 0xf, 0) },
+ .lockstatus_present = true,
+ .lock_status = CLKGEN_FIELD(0xEC, 0x1, 0),
+ .pll_ops = &st_quadfs_pll_c32_ops,
+ .rtbl = fs660c32_rtbl,
+ .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl),
+ .get_rate = clk_fs660c32_dig_get_rate,
+};
+
+/**
+ * DOC: A Frequency Synthesizer that multiples its input clock by a fixed factor
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional & control the Fsyn
+ * rate - inherits rate from parent. set_rate/round_rate/recalc_rate
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+/**
+ * struct st_clk_quadfs_pll - A pll which outputs a fixed multiplier of
+ * its parent clock, found inside a type of
+ * ST quad channel frequency synthesizer block
+ *
+ * @hw: handle between common and hardware-specific interfaces.
+ * @ndiv: regmap field for the ndiv control.
+ * @regs_base: base address of the configuration registers.
+ * @lock: spinlock.
+ *
+ */
+struct st_clk_quadfs_pll {
+ struct clk_hw hw;
+ void __iomem *regs_base;
+ spinlock_t *lock;
+ struct clkgen_quadfs_data *data;
+ u32 ndiv;
+};
+
+#define to_quadfs_pll(_hw) container_of(_hw, struct st_clk_quadfs_pll, hw)
+
+static int quadfs_pll_enable(struct clk_hw *hw)
+{
+ struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
+ unsigned long flags = 0, timeout = jiffies + msecs_to_jiffies(10);
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ /*
+ * Bring block out of reset if we have reset control.
+ */
+ if (pll->data->reset_present)
+ CLKGEN_WRITE(pll, nreset, 1);
+
+ /*
+ * Use a fixed input clock noise bandwidth filter for the moment
+ */
+ if (pll->data->bwfilter_present)
+ CLKGEN_WRITE(pll, ref_bw, PLL_BW_GOODREF);
+
+
+ CLKGEN_WRITE(pll, ndiv, pll->ndiv);
+
+ /*
+ * Power up the PLL
+ */
+ CLKGEN_WRITE(pll, npda, 1);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ if (pll->data->lockstatus_present)
+ while (!CLKGEN_READ(pll, lock_status)) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static void quadfs_pll_disable(struct clk_hw *hw)
+{
+ struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
+ unsigned long flags = 0;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ /*
+ * Powerdown the PLL and then put block into soft reset if we have
+ * reset control.
+ */
+ CLKGEN_WRITE(pll, npda, 0);
+
+ if (pll->data->reset_present)
+ CLKGEN_WRITE(pll, nreset, 0);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+static int quadfs_pll_is_enabled(struct clk_hw *hw)
+{
+ struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
+ u32 npda = CLKGEN_READ(pll, npda);
+
+ return !!npda;
+}
+
+int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
+ unsigned long *rate)
+{
+ unsigned long nd = fs->ndiv + 16; /* ndiv value */
+
+ *rate = input * nd;
+
+ return 0;
+}
+
+static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
+ unsigned long rate = 0;
+ struct stm_fs params;
+
+ params.ndiv = CLKGEN_READ(pll, ndiv);
+ if (clk_fs660c32_vco_get_rate(parent_rate, &params, &rate))
+ pr_err("%s:%s error calculating rate\n",
+ __clk_get_name(hw->clk), __func__);
+
+ pll->ndiv = params.ndiv;
+
+ return rate;
+}
+
+int clk_fs660c32_vco_get_params(unsigned long input,
+ unsigned long output, struct stm_fs *fs)
+{
+/* Formula
+ VCO frequency = (fin x ndiv) / pdiv
+ ndiv = VCOfreq * pdiv / fin
+ */
+ unsigned long pdiv = 1, n;
+
+ /* Output clock range: 384Mhz to 660Mhz */
+ if (output < 384000000 || output > 660000000)
+ return -EINVAL;
+
+ if (input > 40000000)
+ /* This means that PDIV would be 2 instead of 1.
+ Not supported today. */
+ return -EINVAL;
+
+ input /= 1000;
+ output /= 1000;
+
+ n = output * pdiv / input;
+ if (n < 16)
+ n = 16;
+ fs->ndiv = n - 16; /* Converting formula value to reg value */
+
+ return 0;
+}
+
+static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw, unsigned long rate
+ , unsigned long *prate)
+{
+ struct stm_fs params;
+
+ if (!clk_fs660c32_vco_get_params(*prate, rate, &params))
+ clk_fs660c32_vco_get_rate(*prate, &params, &rate);
+
+ pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
+ __func__, __clk_get_name(hw->clk),
+ rate, (unsigned int)params.sdiv,
+ (unsigned int)params.mdiv,
+ (unsigned int)params.pe, (unsigned int)params.nsdiv);
+
+ return rate;
+}
+
+static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
+ struct stm_fs params;
+ long hwrate = 0;
+ unsigned long flags = 0;
+
+ if (!rate || !parent_rate)
+ return -EINVAL;
+
+ if (!clk_fs660c32_vco_get_params(parent_rate, rate, &params))
+ clk_fs660c32_vco_get_rate(parent_rate, &params, &hwrate);
+
+ pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n",
+ __func__, __clk_get_name(hw->clk),
+ hwrate, (unsigned int)params.ndiv);
+
+ if (!hwrate)
+ return -EINVAL;
+
+ pll->ndiv = params.ndiv;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ CLKGEN_WRITE(pll, ndiv, pll->ndiv);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops st_quadfs_pll_c65_ops = {
+ .enable = quadfs_pll_enable,
+ .disable = quadfs_pll_disable,
+ .is_enabled = quadfs_pll_is_enabled,
+};
+
+static const struct clk_ops st_quadfs_pll_c32_ops = {
+ .enable = quadfs_pll_enable,
+ .disable = quadfs_pll_disable,
+ .is_enabled = quadfs_pll_is_enabled,
+ .recalc_rate = quadfs_pll_fs660c32_recalc_rate,
+ .round_rate = quadfs_pll_fs660c32_round_rate,
+ .set_rate = quadfs_pll_fs660c32_set_rate,
+};
+
+static struct clk * __init st_clk_register_quadfs_pll(
+ const char *name, const char *parent_name,
+ struct clkgen_quadfs_data *quadfs, void __iomem *reg,
+ spinlock_t *lock)
+{
+ struct st_clk_quadfs_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /*
+ * Sanity check required pointers.
+ */
+ if (WARN_ON(!name || !parent_name))
+ return ERR_PTR(-EINVAL);
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = quadfs->pll_ops;
+ init.flags = CLK_IS_BASIC;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll->data = quadfs;
+ pll->regs_base = reg;
+ pll->lock = lock;
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+/**
+ * DOC: A digital frequency synthesizer
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional
+ * rate - set rate is functional
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+/**
+ * struct st_clk_quadfs_fsynth - One clock output from a four channel digital
+ * frequency synthesizer (fsynth) block.
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ *
+ * @nsb: regmap field in the output control register for the digital
+ * standby of this fsynth channel. This control is active low so
+ * the channel is in standby when the control bit is cleared.
+ *
+ * @nsdiv: regmap field in the output control register for
+ * for the optional divide by 3 of this fsynth channel. This control
+ * is active low so the divide by 3 is active when the control bit is
+ * cleared and the divide is bypassed when the bit is set.
+ */
+struct st_clk_quadfs_fsynth {
+ struct clk_hw hw;
+ void __iomem *regs_base;
+ spinlock_t *lock;
+ struct clkgen_quadfs_data *data;
+
+ u32 chan;
+ /*
+ * Cached hardware values from set_rate so we can program the
+ * hardware in enable. There are two reasons for this:
+ *
+ * 1. The registers may not be writable until the parent has been
+ * enabled.
+ *
+ * 2. It restores the clock rate when a driver does an enable
+ * on PM restore, after a suspend to RAM has lost the hardware
+ * setup.
+ */
+ u32 md;
+ u32 pe;
+ u32 sdiv;
+ u32 nsdiv;
+};
+
+#define to_quadfs_fsynth(_hw) \
+ container_of(_hw, struct st_clk_quadfs_fsynth, hw)
+
+static void quadfs_fsynth_program_enable(struct st_clk_quadfs_fsynth *fs)
+{
+ /*
+ * Pulse the program enable register lsb to make the hardware take
+ * notice of the new md/pe values with a glitchless transition.
+ */
+ CLKGEN_WRITE(fs, en[fs->chan], 1);
+ CLKGEN_WRITE(fs, en[fs->chan], 0);
+}
+
+static void quadfs_fsynth_program_rate(struct st_clk_quadfs_fsynth *fs)
+{
+ unsigned long flags = 0;
+
+ /*
+ * Ensure the md/pe parameters are ignored while we are
+ * reprogramming them so we can get a glitchless change
+ * when fine tuning the speed of a running clock.
+ */
+ CLKGEN_WRITE(fs, en[fs->chan], 0);
+
+ CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md);
+ CLKGEN_WRITE(fs, pe[fs->chan], fs->pe);
+ CLKGEN_WRITE(fs, sdiv[fs->chan], fs->sdiv);
+
+ if (fs->lock)
+ spin_lock_irqsave(fs->lock, flags);
+
+ if (fs->data->nsdiv_present)
+ CLKGEN_WRITE(fs, nsdiv[fs->chan], fs->nsdiv);
+
+ if (fs->lock)
+ spin_unlock_irqrestore(fs->lock, flags);
+}
+
+static int quadfs_fsynth_enable(struct clk_hw *hw)
+{
+ struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
+ unsigned long flags = 0;
+
+ pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk));
+
+ quadfs_fsynth_program_rate(fs);
+
+ if (fs->lock)
+ spin_lock_irqsave(fs->lock, flags);
+
+ CLKGEN_WRITE(fs, nsb[fs->chan], 1);
+
+ if (fs->lock)
+ spin_unlock_irqrestore(fs->lock, flags);
+
+ quadfs_fsynth_program_enable(fs);
+
+ return 0;
+}
+
+static void quadfs_fsynth_disable(struct clk_hw *hw)
+{
+ struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
+ unsigned long flags = 0;
+
+ pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk));
+
+ if (fs->lock)
+ spin_lock_irqsave(fs->lock, flags);
+
+ CLKGEN_WRITE(fs, nsb[fs->chan], 0);
+
+ if (fs->lock)
+ spin_unlock_irqrestore(fs->lock, flags);
+}
+
+static int quadfs_fsynth_is_enabled(struct clk_hw *hw)
+{
+ struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
+ u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]);
+
+ pr_debug("%s: %s enable bit = 0x%x\n",
+ __func__, __clk_get_name(hw->clk), nsb);
+
+ return !!nsb;
+}
+
+#define P15 (uint64_t)(1 << 15)
+
+static int clk_fs216c65_get_rate(unsigned long input, struct stm_fs *fs,
+ unsigned long *rate)
+{
+ uint64_t res;
+ unsigned long ns;
+ unsigned long nd = 8; /* ndiv stuck at 0 => val = 8 */
+ unsigned long s;
+ long m;
+
+ m = fs->mdiv - 32;
+ s = 1 << (fs->sdiv + 1);
+ ns = (fs->nsdiv ? 1 : 3);
+
+ res = (uint64_t)(s * ns * P15 * (uint64_t)(m + 33));
+ res = res - (s * ns * fs->pe);
+ *rate = div64_u64(P15 * nd * input * 32, res);
+
+ return 0;
+}
+
+static int clk_fs432c65_get_rate(unsigned long input, struct stm_fs *fs,
+ unsigned long *rate)
+{
+ uint64_t res;
+ unsigned long nd = 16; /* ndiv value; stuck at 0 (30Mhz input) */
+ long m;
+ unsigned long sd;
+ unsigned long ns;
+
+ m = fs->mdiv - 32;
+ sd = 1 << (fs->sdiv + 1);
+ ns = (fs->nsdiv ? 1 : 3);
+
+ res = (uint64_t)(sd * ns * P15 * (uint64_t)(m + 33));
+ res = res - (sd * ns * fs->pe);
+ *rate = div64_u64(P15 * nd * input * 32, res);
+
+ return 0;
+}
+
+#define P20 (uint64_t)(1 << 20)
+
+static int clk_fs660c32_dig_get_rate(unsigned long input,
+ struct stm_fs *fs, unsigned long *rate)
+{
+ unsigned long s = (1 << fs->sdiv);
+ unsigned long ns;
+ uint64_t res;
+
+ /*
+ * 'nsdiv' is a register value ('BIN') which is translated
+ * to a decimal value according to following rules.
+ *
+ * nsdiv ns.dec
+ * 0 3
+ * 1 1
+ */
+ ns = (fs->nsdiv == 1) ? 1 : 3;
+
+ res = (P20 * (32 + fs->mdiv) + 32 * fs->pe) * s * ns;
+ *rate = (unsigned long)div64_u64(input * P20 * 32, res);
+
+ return 0;
+}
+
+static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs,
+ struct stm_fs *params)
+{
+ /*
+ * Get the initial hardware values for recalc_rate
+ */
+ params->mdiv = CLKGEN_READ(fs, mdiv[fs->chan]);
+ params->pe = CLKGEN_READ(fs, pe[fs->chan]);
+ params->sdiv = CLKGEN_READ(fs, sdiv[fs->chan]);
+
+ if (fs->data->nsdiv_present)
+ params->nsdiv = CLKGEN_READ(fs, nsdiv[fs->chan]);
+ else
+ params->nsdiv = 1;
+
+ /*
+ * If All are NULL then assume no clock rate is programmed.
+ */
+ if (!params->mdiv && !params->pe && !params->sdiv)
+ return 1;
+
+ fs->md = params->mdiv;
+ fs->pe = params->pe;
+ fs->sdiv = params->sdiv;
+ fs->nsdiv = params->nsdiv;
+
+ return 0;
+}
+
+static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate, struct stm_fs *params)
+{
+ struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
+ int (*clk_fs_get_rate)(unsigned long ,
+ struct stm_fs *, unsigned long *);
+ struct stm_fs prev_params;
+ unsigned long prev_rate, rate = 0;
+ unsigned long diff_rate, prev_diff_rate = ~0;
+ int index;
+
+ clk_fs_get_rate = fs->data->get_rate;
+
+ for (index = 0; index < fs->data->rtbl_cnt; index++) {
+ prev_rate = rate;
+
+ *params = fs->data->rtbl[index];
+ prev_params = *params;
+
+ clk_fs_get_rate(prate, &fs->data->rtbl[index], &rate);
+
+ diff_rate = abs(drate - rate);
+
+ if (diff_rate > prev_diff_rate) {
+ rate = prev_rate;
+ *params = prev_params;
+ break;
+ }
+
+ prev_diff_rate = diff_rate;
+
+ if (drate == rate)
+ return rate;
+ }
+
+
+ if (index == fs->data->rtbl_cnt)
+ *params = prev_params;
+
+ return rate;
+}
+
+static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
+ unsigned long rate = 0;
+ struct stm_fs params;
+ int (*clk_fs_get_rate)(unsigned long ,
+ struct stm_fs *, unsigned long *);
+
+ clk_fs_get_rate = fs->data->get_rate;
+
+ if (quadfs_fsynt_get_hw_value_for_recalc(fs, &params))
+ return 0;
+
+ if (clk_fs_get_rate(parent_rate, &params, &rate)) {
+ pr_err("%s:%s error calculating rate\n",
+ __clk_get_name(hw->clk), __func__);
+ }
+
+ pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+
+ return rate;
+}
+
+static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct stm_fs params;
+
+ rate = quadfs_find_best_rate(hw, rate, *prate, &params);
+
+ pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
+ __func__, __clk_get_name(hw->clk),
+ rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
+ (unsigned int)params.pe, (unsigned int)params.nsdiv);
+
+ return rate;
+}
+
+
+static void quadfs_program_and_enable(struct st_clk_quadfs_fsynth *fs,
+ struct stm_fs *params)
+{
+ fs->md = params->mdiv;
+ fs->pe = params->pe;
+ fs->sdiv = params->sdiv;
+ fs->nsdiv = params->nsdiv;
+
+ /*
+ * In some integrations you can only change the fsynth programming when
+ * the parent entity containing it is enabled.
+ */
+ quadfs_fsynth_program_rate(fs);
+ quadfs_fsynth_program_enable(fs);
+}
+
+static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
+ struct stm_fs params;
+ long hwrate;
+ int uninitialized_var(i);
+
+ if (!rate || !parent_rate)
+ return -EINVAL;
+
+ memset(&params, 0, sizeof(struct stm_fs));
+
+ hwrate = quadfs_find_best_rate(hw, rate, parent_rate, &params);
+ if (!hwrate)
+ return -EINVAL;
+
+ quadfs_program_and_enable(fs, &params);
+
+ return 0;
+}
+
+
+
+static const struct clk_ops st_quadfs_ops = {
+ .enable = quadfs_fsynth_enable,
+ .disable = quadfs_fsynth_disable,
+ .is_enabled = quadfs_fsynth_is_enabled,
+ .round_rate = quadfs_round_rate,
+ .set_rate = quadfs_set_rate,
+ .recalc_rate = quadfs_recalc_rate,
+};
+
+static struct clk * __init st_clk_register_quadfs_fsynth(
+ const char *name, const char *parent_name,
+ struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan,
+ spinlock_t *lock)
+{
+ struct st_clk_quadfs_fsynth *fs;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /*
+ * Sanity check required pointers, note that nsdiv3 is optional.
+ */
+ if (WARN_ON(!name || !parent_name))
+ return ERR_PTR(-EINVAL);
+
+ fs = kzalloc(sizeof(*fs), GFP_KERNEL);
+ if (!fs)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &st_quadfs_ops;
+ init.flags = CLK_GET_RATE_NOCACHE | CLK_IS_BASIC;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ fs->data = quadfs;
+ fs->regs_base = reg;
+ fs->chan = chan;
+ fs->lock = lock;
+ fs->hw.init = &init;
+
+ clk = clk_register(NULL, &fs->hw);
+
+ if (IS_ERR(clk))
+ kfree(fs);
+
+ return clk;
+}
+
+static struct of_device_id quadfs_of_match[] = {
+ {
+ .compatible = "st,stih416-quadfs216",
+ .data = (void *)&st_fs216c65_416
+ },
+ {
+ .compatible = "st,stih416-quadfs432",
+ .data = (void *)&st_fs432c65_416
+ },
+ {
+ .compatible = "st,stih416-quadfs660-E",
+ .data = (void *)&st_fs660c32_E_416
+ },
+ {
+ .compatible = "st,stih416-quadfs660-F",
+ .data = (void *)&st_fs660c32_F_416
+ },
+ {}
+};
+
+static void __init st_of_create_quadfs_fsynths(
+ struct device_node *np, const char *pll_name,
+ struct clkgen_quadfs_data *quadfs, void __iomem *reg,
+ spinlock_t *lock)
+{
+ struct clk_onecell_data *clk_data;
+ int fschan;
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->clk_num = QUADFS_MAX_CHAN;
+ clk_data->clks = kzalloc(QUADFS_MAX_CHAN * sizeof(struct clk *),
+ GFP_KERNEL);
+
+ if (!clk_data->clks) {
+ kfree(clk_data);
+ return;
+ }
+
+ for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) {
+ struct clk *clk;
+ const char *clk_name;
+
+ if (of_property_read_string_index(np, "clock-output-names",
+ fschan, &clk_name)) {
+ break;
+ }
+
+ /*
+ * If we read an empty clock name then the channel is unused
+ */
+ if (*clk_name == '\0')
+ continue;
+
+ clk = st_clk_register_quadfs_fsynth(clk_name, pll_name,
+ quadfs, reg, fschan, lock);
+
+ /*
+ * If there was an error registering this clock output, clean
+ * up and move on to the next one.
+ */
+ if (!IS_ERR(clk)) {
+ clk_data->clks[fschan] = clk;
+ pr_debug("%s: parent %s rate %u\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ (unsigned int)clk_get_rate(clk));
+ }
+ }
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+}
+
+static void __init st_of_quadfs_setup(struct device_node *np)
+{
+ const struct of_device_id *match;
+ struct clk *clk;
+ const char *pll_name, *clk_parent_name;
+ void __iomem *reg;
+ spinlock_t *lock;
+
+ match = of_match_node(quadfs_of_match, np);
+ if (WARN_ON(!match))
+ return;
+
+ reg = of_iomap(np, 0);
+ if (!reg)
+ return;
+
+ clk_parent_name = of_clk_get_parent_name(np, 0);
+ if (!clk_parent_name)
+ return;
+
+ pll_name = kasprintf(GFP_KERNEL, "%s.pll", np->name);
+ if (!pll_name)
+ return;
+
+ lock = kzalloc(sizeof(*lock), GFP_KERNEL);
+ if (!lock)
+ goto err_exit;
+
+ spin_lock_init(lock);
+
+ clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name,
+ (struct clkgen_quadfs_data *) match->data, reg, lock);
+ if (IS_ERR(clk))
+ goto err_exit;
+ else
+ pr_debug("%s: parent %s rate %u\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ (unsigned int)clk_get_rate(clk));
+
+ st_of_create_quadfs_fsynths(np, pll_name,
+ (struct clkgen_quadfs_data *)match->data,
+ reg, lock);
+
+err_exit:
+ kfree(pll_name); /* No longer need local copy of the PLL name */
+}
+CLK_OF_DECLARE(quadfs, "st,quadfs", st_of_quadfs_setup);
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
new file mode 100644
index 000000000000..a329906d1e81
--- /dev/null
+++ b/drivers/clk/st/clkgen-mux.c
@@ -0,0 +1,820 @@
+/*
+ * clkgen-mux.c: ST GEN-MUX Clock driver
+ *
+ * Copyright (C) 2014 STMicroelectronics (R&D) Limited
+ *
+ * Authors: Stephen Gallimore <stephen.gallimore@st.com>
+ * Pankaj Dev <pankaj.dev@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/clk-provider.h>
+
+static DEFINE_SPINLOCK(clkgena_divmux_lock);
+static DEFINE_SPINLOCK(clkgenf_lock);
+
+static const char ** __init clkgen_mux_get_parents(struct device_node *np,
+ int *num_parents)
+{
+ const char **parents;
+ int nparents, i;
+
+ nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (WARN_ON(nparents <= 0))
+ return ERR_PTR(-EINVAL);
+
+ parents = kzalloc(nparents * sizeof(const char *), GFP_KERNEL);
+ if (!parents)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nparents; i++)
+ parents[i] = of_clk_get_parent_name(np, i);
+
+ *num_parents = nparents;
+ return parents;
+}
+
+/**
+ * DOC: Clock mux with a programmable divider on each of its three inputs.
+ * The mux has an input setting which effectively gates its output.
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional & control gating
+ * rate - set rate is supported
+ * parent - set/get parent
+ */
+
+#define NUM_INPUTS 3
+
+struct clkgena_divmux {
+ struct clk_hw hw;
+ /* Subclassed mux and divider structures */
+ struct clk_mux mux;
+ struct clk_divider div[NUM_INPUTS];
+ /* Enable/running feedback register bits for each input */
+ void __iomem *feedback_reg[NUM_INPUTS];
+ int feedback_bit_idx;
+
+ u8 muxsel;
+};
+
+#define to_clkgena_divmux(_hw) container_of(_hw, struct clkgena_divmux, hw)
+
+struct clkgena_divmux_data {
+ int num_outputs;
+ int mux_offset;
+ int mux_offset2;
+ int mux_start_bit;
+ int div_offsets[NUM_INPUTS];
+ int fb_offsets[NUM_INPUTS];
+ int fb_start_bit_idx;
+};
+
+#define CKGAX_CLKOPSRC_SWITCH_OFF 0x3
+
+static int clkgena_divmux_is_running(struct clkgena_divmux *mux)
+{
+ u32 regval = readl(mux->feedback_reg[mux->muxsel]);
+ u32 running = regval & BIT(mux->feedback_bit_idx);
+ return !!running;
+}
+
+static int clkgena_divmux_enable(struct clk_hw *hw)
+{
+ struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
+ struct clk_hw *mux_hw = &genamux->mux.hw;
+ unsigned long timeout;
+ int ret = 0;
+
+ mux_hw->clk = hw->clk;
+
+ ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel);
+ if (ret)
+ return ret;
+
+ timeout = jiffies + msecs_to_jiffies(10);
+
+ while (!clkgena_divmux_is_running(genamux)) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static void clkgena_divmux_disable(struct clk_hw *hw)
+{
+ struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
+ struct clk_hw *mux_hw = &genamux->mux.hw;
+
+ mux_hw->clk = hw->clk;
+
+ clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF);
+}
+
+static int clkgena_divmux_is_enabled(struct clk_hw *hw)
+{
+ struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
+ struct clk_hw *mux_hw = &genamux->mux.hw;
+
+ mux_hw->clk = hw->clk;
+
+ return (s8)clk_mux_ops.get_parent(mux_hw) > 0;
+}
+
+u8 clkgena_divmux_get_parent(struct clk_hw *hw)
+{
+ struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
+ struct clk_hw *mux_hw = &genamux->mux.hw;
+
+ mux_hw->clk = hw->clk;
+
+ genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
+ if ((s8)genamux->muxsel < 0) {
+ pr_debug("%s: %s: Invalid parent, setting to default.\n",
+ __func__, __clk_get_name(hw->clk));
+ genamux->muxsel = 0;
+ }
+
+ return genamux->muxsel;
+}
+
+static int clkgena_divmux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
+
+ if (index >= CKGAX_CLKOPSRC_SWITCH_OFF)
+ return -EINVAL;
+
+ genamux->muxsel = index;
+
+ /*
+ * If the mux is already enabled, call enable directly to set the
+ * new mux position and wait for it to start running again. Otherwise
+ * do nothing.
+ */
+ if (clkgena_divmux_is_enabled(hw))
+ clkgena_divmux_enable(hw);
+
+ return 0;
+}
+
+unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
+ struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
+
+ div_hw->clk = hw->clk;
+
+ return clk_divider_ops.recalc_rate(div_hw, parent_rate);
+}
+
+static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
+ struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
+
+ div_hw->clk = hw->clk;
+
+ return clk_divider_ops.set_rate(div_hw, rate, parent_rate);
+}
+
+static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
+ struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
+
+ div_hw->clk = hw->clk;
+
+ return clk_divider_ops.round_rate(div_hw, rate, prate);
+}
+
+static const struct clk_ops clkgena_divmux_ops = {
+ .enable = clkgena_divmux_enable,
+ .disable = clkgena_divmux_disable,
+ .is_enabled = clkgena_divmux_is_enabled,
+ .get_parent = clkgena_divmux_get_parent,
+ .set_parent = clkgena_divmux_set_parent,
+ .round_rate = clkgena_divmux_round_rate,
+ .recalc_rate = clkgena_divmux_recalc_rate,
+ .set_rate = clkgena_divmux_set_rate,
+};
+
+/**
+ * clk_register_genamux - register a genamux clock with the clock framework
+ */
+struct clk *clk_register_genamux(const char *name,
+ const char **parent_names, u8 num_parents,
+ void __iomem *reg,
+ const struct clkgena_divmux_data *muxdata,
+ u32 idx)
+{
+ /*
+ * Fixed constants across all ClockgenA variants
+ */
+ const int mux_width = 2;
+ const int divider_width = 5;
+ struct clkgena_divmux *genamux;
+ struct clk *clk;
+ struct clk_init_data init;
+ int i;
+
+ genamux = kzalloc(sizeof(*genamux), GFP_KERNEL);
+ if (!genamux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clkgena_divmux_ops;
+ init.flags = CLK_IS_BASIC;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ genamux->mux.lock = &clkgena_divmux_lock;
+ genamux->mux.mask = BIT(mux_width) - 1;
+ genamux->mux.shift = muxdata->mux_start_bit + (idx * mux_width);
+ if (genamux->mux.shift > 31) {
+ /*
+ * We have spilled into the second mux register so
+ * adjust the register address and the bit shift accordingly
+ */
+ genamux->mux.reg = reg + muxdata->mux_offset2;
+ genamux->mux.shift -= 32;
+ } else {
+ genamux->mux.reg = reg + muxdata->mux_offset;
+ }
+
+ for (i = 0; i < NUM_INPUTS; i++) {
+ /*
+ * Divider config for each input
+ */
+ void __iomem *divbase = reg + muxdata->div_offsets[i];
+ genamux->div[i].width = divider_width;
+ genamux->div[i].reg = divbase + (idx * sizeof(u32));
+
+ /*
+ * Mux enabled/running feedback register for each input.
+ */
+ genamux->feedback_reg[i] = reg + muxdata->fb_offsets[i];
+ }
+
+ genamux->feedback_bit_idx = muxdata->fb_start_bit_idx + idx;
+ genamux->hw.init = &init;
+
+ clk = clk_register(NULL, &genamux->hw);
+ if (IS_ERR(clk)) {
+ kfree(genamux);
+ goto err;
+ }
+
+ pr_debug("%s: parent %s rate %lu\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ clk_get_rate(clk));
+err:
+ return clk;
+}
+
+static struct clkgena_divmux_data st_divmux_c65hs = {
+ .num_outputs = 4,
+ .mux_offset = 0x14,
+ .mux_start_bit = 0,
+ .div_offsets = { 0x800, 0x900, 0xb00 },
+ .fb_offsets = { 0x18, 0x1c, 0x20 },
+ .fb_start_bit_idx = 0,
+};
+
+static struct clkgena_divmux_data st_divmux_c65ls = {
+ .num_outputs = 14,
+ .mux_offset = 0x14,
+ .mux_offset2 = 0x24,
+ .mux_start_bit = 8,
+ .div_offsets = { 0x810, 0xa10, 0xb10 },
+ .fb_offsets = { 0x18, 0x1c, 0x20 },
+ .fb_start_bit_idx = 4,
+};
+
+static struct clkgena_divmux_data st_divmux_c32odf0 = {
+ .num_outputs = 8,
+ .mux_offset = 0x1c,
+ .mux_start_bit = 0,
+ .div_offsets = { 0x800, 0x900, 0xa60 },
+ .fb_offsets = { 0x2c, 0x24, 0x28 },
+ .fb_start_bit_idx = 0,
+};
+
+static struct clkgena_divmux_data st_divmux_c32odf1 = {
+ .num_outputs = 8,
+ .mux_offset = 0x1c,
+ .mux_start_bit = 16,
+ .div_offsets = { 0x820, 0x980, 0xa80 },
+ .fb_offsets = { 0x2c, 0x24, 0x28 },
+ .fb_start_bit_idx = 8,
+};
+
+static struct clkgena_divmux_data st_divmux_c32odf2 = {
+ .num_outputs = 8,
+ .mux_offset = 0x20,
+ .mux_start_bit = 0,
+ .div_offsets = { 0x840, 0xa20, 0xb10 },
+ .fb_offsets = { 0x2c, 0x24, 0x28 },
+ .fb_start_bit_idx = 16,
+};
+
+static struct clkgena_divmux_data st_divmux_c32odf3 = {
+ .num_outputs = 8,
+ .mux_offset = 0x20,
+ .mux_start_bit = 16,
+ .div_offsets = { 0x860, 0xa40, 0xb30 },
+ .fb_offsets = { 0x2c, 0x24, 0x28 },
+ .fb_start_bit_idx = 24,
+};
+
+static struct of_device_id clkgena_divmux_of_match[] = {
+ {
+ .compatible = "st,clkgena-divmux-c65-hs",
+ .data = &st_divmux_c65hs,
+ },
+ {
+ .compatible = "st,clkgena-divmux-c65-ls",
+ .data = &st_divmux_c65ls,
+ },
+ {
+ .compatible = "st,clkgena-divmux-c32-odf0",
+ .data = &st_divmux_c32odf0,
+ },
+ {
+ .compatible = "st,clkgena-divmux-c32-odf1",
+ .data = &st_divmux_c32odf1,
+ },
+ {
+ .compatible = "st,clkgena-divmux-c32-odf2",
+ .data = &st_divmux_c32odf2,
+ },
+ {
+ .compatible = "st,clkgena-divmux-c32-odf3",
+ .data = &st_divmux_c32odf3,
+ },
+ {}
+};
+
+static void __iomem * __init clkgen_get_register_base(
+ struct device_node *np)
+{
+ struct device_node *pnode;
+ void __iomem *reg = NULL;
+
+ pnode = of_get_parent(np);
+ if (!pnode)
+ return NULL;
+
+ reg = of_iomap(pnode, 0);
+
+ of_node_put(pnode);
+ return reg;
+}
+
+void __init st_of_clkgena_divmux_setup(struct device_node *np)
+{
+ const struct of_device_id *match;
+ const struct clkgena_divmux_data *data;
+ struct clk_onecell_data *clk_data;
+ void __iomem *reg;
+ const char **parents;
+ int num_parents = 0, i;
+
+ match = of_match_node(clkgena_divmux_of_match, np);
+ if (WARN_ON(!match))
+ return;
+
+ data = (struct clkgena_divmux_data *)match->data;
+
+ reg = clkgen_get_register_base(np);
+ if (!reg)
+ return;
+
+ parents = clkgen_mux_get_parents(np, &num_parents);
+ if (IS_ERR(parents))
+ return;
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ goto err;
+
+ clk_data->clk_num = data->num_outputs;
+ clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
+ GFP_KERNEL);
+
+ if (!clk_data->clks)
+ goto err;
+
+ for (i = 0; i < clk_data->clk_num; i++) {
+ struct clk *clk;
+ const char *clk_name;
+
+ if (of_property_read_string_index(np, "clock-output-names",
+ i, &clk_name))
+ break;
+
+ /*
+ * If we read an empty clock name then the output is unused
+ */
+ if (*clk_name == '\0')
+ continue;
+
+ clk = clk_register_genamux(clk_name, parents, num_parents,
+ reg, data, i);
+
+ if (IS_ERR(clk))
+ goto err;
+
+ clk_data->clks[i] = clk;
+ }
+
+ kfree(parents);
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+ return;
+err:
+ if (clk_data)
+ kfree(clk_data->clks);
+
+ kfree(clk_data);
+ kfree(parents);
+}
+CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup);
+
+struct clkgena_prediv_data {
+ u32 offset;
+ u8 shift;
+ struct clk_div_table *table;
+};
+
+static struct clk_div_table prediv_table16[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 16 },
+ { .div = 0 },
+};
+
+static struct clkgena_prediv_data prediv_c65_data = {
+ .offset = 0x4c,
+ .shift = 31,
+ .table = prediv_table16,
+};
+
+static struct clkgena_prediv_data prediv_c32_data = {
+ .offset = 0x50,
+ .shift = 1,
+ .table = prediv_table16,
+};
+
+static struct of_device_id clkgena_prediv_of_match[] = {
+ { .compatible = "st,clkgena-prediv-c65", .data = &prediv_c65_data },
+ { .compatible = "st,clkgena-prediv-c32", .data = &prediv_c32_data },
+ {}
+};
+
+void __init st_of_clkgena_prediv_setup(struct device_node *np)
+{
+ const struct of_device_id *match;
+ void __iomem *reg;
+ const char *parent_name, *clk_name;
+ struct clk *clk;
+ struct clkgena_prediv_data *data;
+
+ match = of_match_node(clkgena_prediv_of_match, np);
+ if (!match) {
+ pr_err("%s: No matching data\n", __func__);
+ return;
+ }
+
+ data = (struct clkgena_prediv_data *)match->data;
+
+ reg = clkgen_get_register_base(np);
+ if (!reg)
+ return;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ if (of_property_read_string_index(np, "clock-output-names",
+ 0, &clk_name))
+ return;
+
+ clk = clk_register_divider_table(NULL, clk_name, parent_name, 0,
+ reg + data->offset, data->shift, 1,
+ 0, data->table, NULL);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ pr_debug("%s: parent %s rate %u\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ (unsigned int)clk_get_rate(clk));
+
+ return;
+}
+CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup);
+
+struct clkgen_mux_data {
+ u32 offset;
+ u8 shift;
+ u8 width;
+ spinlock_t *lock;
+ unsigned long clk_flags;
+ u8 mux_flags;
+};
+
+static struct clkgen_mux_data clkgen_mux_c_vcc_hd_416 = {
+ .offset = 0,
+ .shift = 0,
+ .width = 1,
+};
+
+static struct clkgen_mux_data clkgen_mux_f_vcc_fvdp_416 = {
+ .offset = 0,
+ .shift = 0,
+ .width = 1,
+};
+
+static struct clkgen_mux_data clkgen_mux_f_vcc_hva_416 = {
+ .offset = 0,
+ .shift = 0,
+ .width = 1,
+};
+
+static struct clkgen_mux_data clkgen_mux_f_vcc_hd_416 = {
+ .offset = 0,
+ .shift = 16,
+ .width = 1,
+ .lock = &clkgenf_lock,
+};
+
+static struct clkgen_mux_data clkgen_mux_c_vcc_sd_416 = {
+ .offset = 0,
+ .shift = 17,
+ .width = 1,
+ .lock = &clkgenf_lock,
+};
+
+static struct clkgen_mux_data stih415_a9_mux_data = {
+ .offset = 0,
+ .shift = 1,
+ .width = 2,
+};
+static struct clkgen_mux_data stih416_a9_mux_data = {
+ .offset = 0,
+ .shift = 0,
+ .width = 2,
+};
+
+static struct of_device_id mux_of_match[] = {
+ {
+ .compatible = "st,stih416-clkgenc-vcc-hd",
+ .data = &clkgen_mux_c_vcc_hd_416,
+ },
+ {
+ .compatible = "st,stih416-clkgenf-vcc-fvdp",
+ .data = &clkgen_mux_f_vcc_fvdp_416,
+ },
+ {
+ .compatible = "st,stih416-clkgenf-vcc-hva",
+ .data = &clkgen_mux_f_vcc_hva_416,
+ },
+ {
+ .compatible = "st,stih416-clkgenf-vcc-hd",
+ .data = &clkgen_mux_f_vcc_hd_416,
+ },
+ {
+ .compatible = "st,stih416-clkgenf-vcc-sd",
+ .data = &clkgen_mux_c_vcc_sd_416,
+ },
+ {
+ .compatible = "st,stih415-clkgen-a9-mux",
+ .data = &stih415_a9_mux_data,
+ },
+ {
+ .compatible = "st,stih416-clkgen-a9-mux",
+ .data = &stih416_a9_mux_data,
+ },
+ {}
+};
+
+void __init st_of_clkgen_mux_setup(struct device_node *np)
+{
+ const struct of_device_id *match;
+ struct clk *clk;
+ void __iomem *reg;
+ const char **parents;
+ int num_parents;
+ struct clkgen_mux_data *data;
+
+ match = of_match_node(mux_of_match, np);
+ if (!match) {
+ pr_err("%s: No matching data\n", __func__);
+ return;
+ }
+
+ data = (struct clkgen_mux_data *)match->data;
+
+ reg = of_iomap(np, 0);
+ if (!reg) {
+ pr_err("%s: Failed to get base address\n", __func__);
+ return;
+ }
+
+ parents = clkgen_mux_get_parents(np, &num_parents);
+ if (IS_ERR(parents)) {
+ pr_err("%s: Failed to get parents (%ld)\n",
+ __func__, PTR_ERR(parents));
+ return;
+ }
+
+ clk = clk_register_mux(NULL, np->name, parents, num_parents,
+ data->clk_flags | CLK_SET_RATE_PARENT,
+ reg + data->offset,
+ data->shift, data->width, data->mux_flags,
+ data->lock);
+ if (IS_ERR(clk))
+ goto err;
+
+ pr_debug("%s: parent %s rate %u\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ (unsigned int)clk_get_rate(clk));
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+
+err:
+ kfree(parents);
+
+ return;
+}
+CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup);
+
+#define VCC_MAX_CHANNELS 16
+
+#define VCC_GATE_OFFSET 0x0
+#define VCC_MUX_OFFSET 0x4
+#define VCC_DIV_OFFSET 0x8
+
+struct clkgen_vcc_data {
+ spinlock_t *lock;
+ unsigned long clk_flags;
+};
+
+static struct clkgen_vcc_data st_clkgenc_vcc_416 = {
+ .clk_flags = CLK_SET_RATE_PARENT,
+};
+
+static struct clkgen_vcc_data st_clkgenf_vcc_416 = {
+ .lock = &clkgenf_lock,
+};
+
+static struct of_device_id vcc_of_match[] = {
+ { .compatible = "st,stih416-clkgenc", .data = &st_clkgenc_vcc_416 },
+ { .compatible = "st,stih416-clkgenf", .data = &st_clkgenf_vcc_416 },
+ {}
+};
+
+void __init st_of_clkgen_vcc_setup(struct device_node *np)
+{
+ const struct of_device_id *match;
+ void __iomem *reg;
+ const char **parents;
+ int num_parents, i;
+ struct clk_onecell_data *clk_data;
+ struct clkgen_vcc_data *data;
+
+ match = of_match_node(vcc_of_match, np);
+ if (WARN_ON(!match))
+ return;
+ data = (struct clkgen_vcc_data *)match->data;
+
+ reg = of_iomap(np, 0);
+ if (!reg)
+ return;
+
+ parents = clkgen_mux_get_parents(np, &num_parents);
+ if (IS_ERR(parents))
+ return;
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ goto err;
+
+ clk_data->clk_num = VCC_MAX_CHANNELS;
+ clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
+ GFP_KERNEL);
+
+ if (!clk_data->clks)
+ goto err;
+
+ for (i = 0; i < clk_data->clk_num; i++) {
+ struct clk *clk;
+ const char *clk_name;
+ struct clk_gate *gate;
+ struct clk_divider *div;
+ struct clk_mux *mux;
+
+ if (of_property_read_string_index(np, "clock-output-names",
+ i, &clk_name))
+ break;
+
+ /*
+ * If we read an empty clock name then the output is unused
+ */
+ if (*clk_name == '\0')
+ continue;
+
+ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+ if (!gate)
+ break;
+
+ div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
+ if (!div) {
+ kfree(gate);
+ break;
+ }
+
+ mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
+ if (!mux) {
+ kfree(gate);
+ kfree(div);
+ break;
+ }
+
+ gate->reg = reg + VCC_GATE_OFFSET;
+ gate->bit_idx = i;
+ gate->flags = CLK_GATE_SET_TO_DISABLE;
+ gate->lock = data->lock;
+
+ div->reg = reg + VCC_DIV_OFFSET;
+ div->shift = 2 * i;
+ div->width = 2;
+ div->flags = CLK_DIVIDER_POWER_OF_TWO;
+
+ mux->reg = reg + VCC_MUX_OFFSET;
+ mux->shift = 2 * i;
+ mux->mask = 0x3;
+
+ clk = clk_register_composite(NULL, clk_name, parents,
+ num_parents,
+ &mux->hw, &clk_mux_ops,
+ &div->hw, &clk_divider_ops,
+ &gate->hw, &clk_gate_ops,
+ data->clk_flags);
+ if (IS_ERR(clk)) {
+ kfree(gate);
+ kfree(div);
+ kfree(mux);
+ goto err;
+ }
+
+ pr_debug("%s: parent %s rate %u\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ (unsigned int)clk_get_rate(clk));
+
+ clk_data->clks[i] = clk;
+ }
+
+ kfree(parents);
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+ return;
+
+err:
+ for (i = 0; i < clk_data->clk_num; i++) {
+ struct clk_composite *composite;
+
+ if (!clk_data->clks[i])
+ continue;
+
+ composite = container_of(__clk_get_hw(clk_data->clks[i]),
+ struct clk_composite, hw);
+ kfree(container_of(composite->gate_hw, struct clk_gate, hw));
+ kfree(container_of(composite->rate_hw, struct clk_divider, hw));
+ kfree(container_of(composite->mux_hw, struct clk_mux, hw));
+ }
+
+ if (clk_data)
+ kfree(clk_data->clks);
+
+ kfree(clk_data);
+ kfree(parents);
+}
+CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup);
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
new file mode 100644
index 000000000000..bca0a0badbfa
--- /dev/null
+++ b/drivers/clk/st/clkgen-pll.c
@@ -0,0 +1,698 @@
+/*
+ * Copyright (C) 2014 STMicroelectronics (R&D) Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+/*
+ * Authors:
+ * Stephen Gallimore <stephen.gallimore@st.com>,
+ * Pankaj Dev <pankaj.dev@st.com>.
+ */
+
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/clk-provider.h>
+
+#include "clkgen.h"
+
+static DEFINE_SPINLOCK(clkgena_c32_odf_lock);
+
+/*
+ * Common PLL configuration register bits for PLL800 and PLL1600 C65
+ */
+#define C65_MDIV_PLL800_MASK (0xff)
+#define C65_MDIV_PLL1600_MASK (0x7)
+#define C65_NDIV_MASK (0xff)
+#define C65_PDIV_MASK (0x7)
+
+/*
+ * PLL configuration register bits for PLL3200 C32
+ */
+#define C32_NDIV_MASK (0xff)
+#define C32_IDF_MASK (0x7)
+#define C32_ODF_MASK (0x3f)
+#define C32_LDF_MASK (0x7f)
+
+#define C32_MAX_ODFS (4)
+
+struct clkgen_pll_data {
+ struct clkgen_field pdn_status;
+ struct clkgen_field locked_status;
+ struct clkgen_field mdiv;
+ struct clkgen_field ndiv;
+ struct clkgen_field pdiv;
+ struct clkgen_field idf;
+ struct clkgen_field ldf;
+ unsigned int num_odfs;
+ struct clkgen_field odf[C32_MAX_ODFS];
+ struct clkgen_field odf_gate[C32_MAX_ODFS];
+ const struct clk_ops *ops;
+};
+
+static const struct clk_ops st_pll1600c65_ops;
+static const struct clk_ops st_pll800c65_ops;
+static const struct clk_ops stm_pll3200c32_ops;
+static const struct clk_ops st_pll1200c32_ops;
+
+static struct clkgen_pll_data st_pll1600c65_ax = {
+ .pdn_status = CLKGEN_FIELD(0x0, 0x1, 19),
+ .locked_status = CLKGEN_FIELD(0x0, 0x1, 31),
+ .mdiv = CLKGEN_FIELD(0x0, C65_MDIV_PLL1600_MASK, 0),
+ .ndiv = CLKGEN_FIELD(0x0, C65_NDIV_MASK, 8),
+ .ops = &st_pll1600c65_ops
+};
+
+static struct clkgen_pll_data st_pll800c65_ax = {
+ .pdn_status = CLKGEN_FIELD(0x0, 0x1, 19),
+ .locked_status = CLKGEN_FIELD(0x0, 0x1, 31),
+ .mdiv = CLKGEN_FIELD(0x0, C65_MDIV_PLL800_MASK, 0),
+ .ndiv = CLKGEN_FIELD(0x0, C65_NDIV_MASK, 8),
+ .pdiv = CLKGEN_FIELD(0x0, C65_PDIV_MASK, 16),
+ .ops = &st_pll800c65_ops
+};
+
+static struct clkgen_pll_data st_pll3200c32_a1x_0 = {
+ .pdn_status = CLKGEN_FIELD(0x0, 0x1, 31),
+ .locked_status = CLKGEN_FIELD(0x4, 0x1, 31),
+ .ndiv = CLKGEN_FIELD(0x0, C32_NDIV_MASK, 0x0),
+ .idf = CLKGEN_FIELD(0x4, C32_IDF_MASK, 0x0),
+ .num_odfs = 4,
+ .odf = { CLKGEN_FIELD(0x54, C32_ODF_MASK, 4),
+ CLKGEN_FIELD(0x54, C32_ODF_MASK, 10),
+ CLKGEN_FIELD(0x54, C32_ODF_MASK, 16),
+ CLKGEN_FIELD(0x54, C32_ODF_MASK, 22) },
+ .odf_gate = { CLKGEN_FIELD(0x54, 0x1, 0),
+ CLKGEN_FIELD(0x54, 0x1, 1),
+ CLKGEN_FIELD(0x54, 0x1, 2),
+ CLKGEN_FIELD(0x54, 0x1, 3) },
+ .ops = &stm_pll3200c32_ops,
+};
+
+static struct clkgen_pll_data st_pll3200c32_a1x_1 = {
+ .pdn_status = CLKGEN_FIELD(0xC, 0x1, 31),
+ .locked_status = CLKGEN_FIELD(0x10, 0x1, 31),
+ .ndiv = CLKGEN_FIELD(0xC, C32_NDIV_MASK, 0x0),
+ .idf = CLKGEN_FIELD(0x10, C32_IDF_MASK, 0x0),
+ .num_odfs = 4,
+ .odf = { CLKGEN_FIELD(0x58, C32_ODF_MASK, 4),
+ CLKGEN_FIELD(0x58, C32_ODF_MASK, 10),
+ CLKGEN_FIELD(0x58, C32_ODF_MASK, 16),
+ CLKGEN_FIELD(0x58, C32_ODF_MASK, 22) },
+ .odf_gate = { CLKGEN_FIELD(0x58, 0x1, 0),
+ CLKGEN_FIELD(0x58, 0x1, 1),
+ CLKGEN_FIELD(0x58, 0x1, 2),
+ CLKGEN_FIELD(0x58, 0x1, 3) },
+ .ops = &stm_pll3200c32_ops,
+};
+
+/* 415 specific */
+static struct clkgen_pll_data st_pll3200c32_a9_415 = {
+ .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
+ .locked_status = CLKGEN_FIELD(0x6C, 0x1, 0),
+ .ndiv = CLKGEN_FIELD(0x0, C32_NDIV_MASK, 9),
+ .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 22),
+ .num_odfs = 1,
+ .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 3) },
+ .odf_gate = { CLKGEN_FIELD(0x0, 0x1, 28) },
+ .ops = &stm_pll3200c32_ops,
+};
+
+static struct clkgen_pll_data st_pll3200c32_ddr_415 = {
+ .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
+ .locked_status = CLKGEN_FIELD(0x100, 0x1, 0),
+ .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0),
+ .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25),
+ .num_odfs = 2,
+ .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8),
+ CLKGEN_FIELD(0x8, C32_ODF_MASK, 14) },
+ .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28),
+ CLKGEN_FIELD(0x4, 0x1, 29) },
+ .ops = &stm_pll3200c32_ops,
+};
+
+static struct clkgen_pll_data st_pll1200c32_gpu_415 = {
+ .pdn_status = CLKGEN_FIELD(0x144, 0x1, 3),
+ .locked_status = CLKGEN_FIELD(0x168, 0x1, 0),
+ .ldf = CLKGEN_FIELD(0x0, C32_LDF_MASK, 3),
+ .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 0),
+ .num_odfs = 0,
+ .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 10) },
+ .ops = &st_pll1200c32_ops,
+};
+
+/* 416 specific */
+static struct clkgen_pll_data st_pll3200c32_a9_416 = {
+ .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
+ .locked_status = CLKGEN_FIELD(0x6C, 0x1, 0),
+ .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0),
+ .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25),
+ .num_odfs = 1,
+ .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8) },
+ .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28) },
+ .ops = &stm_pll3200c32_ops,
+};
+
+static struct clkgen_pll_data st_pll3200c32_ddr_416 = {
+ .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
+ .locked_status = CLKGEN_FIELD(0x10C, 0x1, 0),
+ .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0),
+ .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25),
+ .num_odfs = 2,
+ .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8),
+ CLKGEN_FIELD(0x8, C32_ODF_MASK, 14) },
+ .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28),
+ CLKGEN_FIELD(0x4, 0x1, 29) },
+ .ops = &stm_pll3200c32_ops,
+};
+
+static struct clkgen_pll_data st_pll1200c32_gpu_416 = {
+ .pdn_status = CLKGEN_FIELD(0x8E4, 0x1, 3),
+ .locked_status = CLKGEN_FIELD(0x90C, 0x1, 0),
+ .ldf = CLKGEN_FIELD(0x0, C32_LDF_MASK, 3),
+ .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 0),
+ .num_odfs = 0,
+ .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 10) },
+ .ops = &st_pll1200c32_ops,
+};
+
+/**
+ * DOC: Clock Generated by PLL, rate set and enabled by bootloader
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable/disable only ensures parent is enabled
+ * rate - rate is fixed. No clk_set_rate support
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+/**
+ * PLL clock that is integrated in the ClockGenA instances on the STiH415
+ * and STiH416.
+ *
+ * @hw: handle between common and hardware-specific interfaces.
+ * @type: PLL instance type.
+ * @regs_base: base of the PLL configuration register(s).
+ *
+ */
+struct clkgen_pll {
+ struct clk_hw hw;
+ struct clkgen_pll_data *data;
+ void __iomem *regs_base;
+};
+
+#define to_clkgen_pll(_hw) container_of(_hw, struct clkgen_pll, hw)
+
+static int clkgen_pll_is_locked(struct clk_hw *hw)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ u32 locked = CLKGEN_READ(pll, locked_status);
+
+ return !!locked;
+}
+
+static int clkgen_pll_is_enabled(struct clk_hw *hw)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ u32 poweroff = CLKGEN_READ(pll, pdn_status);
+ return !poweroff;
+}
+
+unsigned long recalc_stm_pll800c65(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ unsigned long mdiv, ndiv, pdiv;
+ unsigned long rate;
+ uint64_t res;
+
+ if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
+ return 0;
+
+ pdiv = CLKGEN_READ(pll, pdiv);
+ mdiv = CLKGEN_READ(pll, mdiv);
+ ndiv = CLKGEN_READ(pll, ndiv);
+
+ if (!mdiv)
+ mdiv++; /* mdiv=0 or 1 => MDIV=1 */
+
+ res = (uint64_t)2 * (uint64_t)parent_rate * (uint64_t)ndiv;
+ rate = (unsigned long)div64_u64(res, mdiv * (1 << pdiv));
+
+ pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+
+ return rate;
+
+}
+
+unsigned long recalc_stm_pll1600c65(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ unsigned long mdiv, ndiv;
+ unsigned long rate;
+
+ if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
+ return 0;
+
+ mdiv = CLKGEN_READ(pll, mdiv);
+ ndiv = CLKGEN_READ(pll, ndiv);
+
+ if (!mdiv)
+ mdiv = 1;
+
+ /* Note: input is divided by 1000 to avoid overflow */
+ rate = ((2 * (parent_rate / 1000) * ndiv) / mdiv) * 1000;
+
+ pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+
+ return rate;
+}
+
+unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ unsigned long ndiv, idf;
+ unsigned long rate = 0;
+
+ if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
+ return 0;
+
+ ndiv = CLKGEN_READ(pll, ndiv);
+ idf = CLKGEN_READ(pll, idf);
+
+ if (idf)
+ /* Note: input is divided to avoid overflow */
+ rate = ((2 * (parent_rate/1000) * ndiv) / idf) * 1000;
+
+ pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+
+ return rate;
+}
+
+unsigned long recalc_stm_pll1200c32(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ unsigned long odf, ldf, idf;
+ unsigned long rate;
+
+ if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
+ return 0;
+
+ odf = CLKGEN_READ(pll, odf[0]);
+ ldf = CLKGEN_READ(pll, ldf);
+ idf = CLKGEN_READ(pll, idf);
+
+ if (!idf) /* idf==0 means 1 */
+ idf = 1;
+ if (!odf) /* odf==0 means 1 */
+ odf = 1;
+
+ /* Note: input is divided by 1000 to avoid overflow */
+ rate = (((parent_rate / 1000) * ldf) / (odf * idf)) * 1000;
+
+ pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+
+ return rate;
+}
+
+static const struct clk_ops st_pll1600c65_ops = {
+ .is_enabled = clkgen_pll_is_enabled,
+ .recalc_rate = recalc_stm_pll1600c65,
+};
+
+static const struct clk_ops st_pll800c65_ops = {
+ .is_enabled = clkgen_pll_is_enabled,
+ .recalc_rate = recalc_stm_pll800c65,
+};
+
+static const struct clk_ops stm_pll3200c32_ops = {
+ .is_enabled = clkgen_pll_is_enabled,
+ .recalc_rate = recalc_stm_pll3200c32,
+};
+
+static const struct clk_ops st_pll1200c32_ops = {
+ .is_enabled = clkgen_pll_is_enabled,
+ .recalc_rate = recalc_stm_pll1200c32,
+};
+
+static struct clk * __init clkgen_pll_register(const char *parent_name,
+ struct clkgen_pll_data *pll_data,
+ void __iomem *reg,
+ const char *clk_name)
+{
+ struct clkgen_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = clk_name;
+ init.ops = pll_data->ops;
+
+ init.flags = CLK_IS_BASIC;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll->data = pll_data;
+ pll->regs_base = reg;
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ kfree(pll);
+ return clk;
+ }
+
+ pr_debug("%s: parent %s rate %lu\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ clk_get_rate(clk));
+
+ return clk;
+}
+
+static struct clk * __init clkgen_c65_lsdiv_register(const char *parent_name,
+ const char *clk_name)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_factor(NULL, clk_name, parent_name, 0, 1, 2);
+ if (IS_ERR(clk))
+ return clk;
+
+ pr_debug("%s: parent %s rate %lu\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ clk_get_rate(clk));
+ return clk;
+}
+
+static void __iomem * __init clkgen_get_register_base(
+ struct device_node *np)
+{
+ struct device_node *pnode;
+ void __iomem *reg = NULL;
+
+ pnode = of_get_parent(np);
+ if (!pnode)
+ return NULL;
+
+ reg = of_iomap(pnode, 0);
+
+ of_node_put(pnode);
+ return reg;
+}
+
+#define CLKGENAx_PLL0_OFFSET 0x0
+#define CLKGENAx_PLL1_OFFSET 0x4
+
+static void __init clkgena_c65_pll_setup(struct device_node *np)
+{
+ const int num_pll_outputs = 3;
+ struct clk_onecell_data *clk_data;
+ const char *parent_name;
+ void __iomem *reg;
+ const char *clk_name;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ reg = clkgen_get_register_base(np);
+ if (!reg)
+ return;
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->clk_num = num_pll_outputs;
+ clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
+ GFP_KERNEL);
+
+ if (!clk_data->clks)
+ goto err;
+
+ if (of_property_read_string_index(np, "clock-output-names",
+ 0, &clk_name))
+ goto err;
+
+ /*
+ * PLL0 HS (high speed) output
+ */
+ clk_data->clks[0] = clkgen_pll_register(parent_name,
+ &st_pll1600c65_ax,
+ reg + CLKGENAx_PLL0_OFFSET,
+ clk_name);
+
+ if (IS_ERR(clk_data->clks[0]))
+ goto err;
+
+ if (of_property_read_string_index(np, "clock-output-names",
+ 1, &clk_name))
+ goto err;
+
+ /*
+ * PLL0 LS (low speed) output, which is a fixed divide by 2 of the
+ * high speed output.
+ */
+ clk_data->clks[1] = clkgen_c65_lsdiv_register(__clk_get_name
+ (clk_data->clks[0]),
+ clk_name);
+
+ if (IS_ERR(clk_data->clks[1]))
+ goto err;
+
+ if (of_property_read_string_index(np, "clock-output-names",
+ 2, &clk_name))
+ goto err;
+
+ /*
+ * PLL1 output
+ */
+ clk_data->clks[2] = clkgen_pll_register(parent_name,
+ &st_pll800c65_ax,
+ reg + CLKGENAx_PLL1_OFFSET,
+ clk_name);
+
+ if (IS_ERR(clk_data->clks[2]))
+ goto err;
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+ return;
+
+err:
+ kfree(clk_data->clks);
+ kfree(clk_data);
+}
+CLK_OF_DECLARE(clkgena_c65_plls,
+ "st,clkgena-plls-c65", clkgena_c65_pll_setup);
+
+static struct clk * __init clkgen_odf_register(const char *parent_name,
+ void * __iomem reg,
+ struct clkgen_pll_data *pll_data,
+ int odf,
+ spinlock_t *odf_lock,
+ const char *odf_name)
+{
+ struct clk *clk;
+ unsigned long flags;
+ struct clk_gate *gate;
+ struct clk_divider *div;
+
+ flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_GATE;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->flags = CLK_GATE_SET_TO_DISABLE;
+ gate->reg = reg + pll_data->odf_gate[odf].offset;
+ gate->bit_idx = pll_data->odf_gate[odf].shift;
+ gate->lock = odf_lock;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+ div->reg = reg + pll_data->odf[odf].offset;
+ div->shift = pll_data->odf[odf].shift;
+ div->width = fls(pll_data->odf[odf].mask);
+ div->lock = odf_lock;
+
+ clk = clk_register_composite(NULL, odf_name, &parent_name, 1,
+ NULL, NULL,
+ &div->hw, &clk_divider_ops,
+ &gate->hw, &clk_gate_ops,
+ flags);
+ if (IS_ERR(clk))
+ return clk;
+
+ pr_debug("%s: parent %s rate %lu\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ clk_get_rate(clk));
+ return clk;
+}
+
+static struct of_device_id c32_pll_of_match[] = {
+ {
+ .compatible = "st,plls-c32-a1x-0",
+ .data = &st_pll3200c32_a1x_0,
+ },
+ {
+ .compatible = "st,plls-c32-a1x-1",
+ .data = &st_pll3200c32_a1x_1,
+ },
+ {
+ .compatible = "st,stih415-plls-c32-a9",
+ .data = &st_pll3200c32_a9_415,
+ },
+ {
+ .compatible = "st,stih415-plls-c32-ddr",
+ .data = &st_pll3200c32_ddr_415,
+ },
+ {
+ .compatible = "st,stih416-plls-c32-a9",
+ .data = &st_pll3200c32_a9_416,
+ },
+ {
+ .compatible = "st,stih416-plls-c32-ddr",
+ .data = &st_pll3200c32_ddr_416,
+ },
+ {}
+};
+
+static void __init clkgen_c32_pll_setup(struct device_node *np)
+{
+ const struct of_device_id *match;
+ struct clk *clk;
+ const char *parent_name, *pll_name;
+ void __iomem *pll_base;
+ int num_odfs, odf;
+ struct clk_onecell_data *clk_data;
+ struct clkgen_pll_data *data;
+
+ match = of_match_node(c32_pll_of_match, np);
+ if (!match) {
+ pr_err("%s: No matching data\n", __func__);
+ return;
+ }
+
+ data = (struct clkgen_pll_data *) match->data;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ pll_base = clkgen_get_register_base(np);
+ if (!pll_base)
+ return;
+
+ clk = clkgen_pll_register(parent_name, data, pll_base, np->name);
+ if (IS_ERR(clk))
+ return;
+
+ pll_name = __clk_get_name(clk);
+
+ num_odfs = data->num_odfs;
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->clk_num = num_odfs;
+ clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
+ GFP_KERNEL);
+
+ if (!clk_data->clks)
+ goto err;
+
+ for (odf = 0; odf < num_odfs; odf++) {
+ struct clk *clk;
+ const char *clk_name;
+
+ if (of_property_read_string_index(np, "clock-output-names",
+ odf, &clk_name))
+ return;
+
+ clk = clkgen_odf_register(pll_name, pll_base, data,
+ odf, &clkgena_c32_odf_lock, clk_name);
+ if (IS_ERR(clk))
+ goto err;
+
+ clk_data->clks[odf] = clk;
+ }
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+ return;
+
+err:
+ kfree(pll_name);
+ kfree(clk_data->clks);
+ kfree(clk_data);
+}
+CLK_OF_DECLARE(clkgen_c32_pll, "st,clkgen-plls-c32", clkgen_c32_pll_setup);
+
+static struct of_device_id c32_gpu_pll_of_match[] = {
+ {
+ .compatible = "st,stih415-gpu-pll-c32",
+ .data = &st_pll1200c32_gpu_415,
+ },
+ {
+ .compatible = "st,stih416-gpu-pll-c32",
+ .data = &st_pll1200c32_gpu_416,
+ },
+};
+
+static void __init clkgengpu_c32_pll_setup(struct device_node *np)
+{
+ const struct of_device_id *match;
+ struct clk *clk;
+ const char *parent_name;
+ void __iomem *reg;
+ const char *clk_name;
+ struct clkgen_pll_data *data;
+
+ match = of_match_node(c32_gpu_pll_of_match, np);
+ if (!match) {
+ pr_err("%s: No matching data\n", __func__);
+ return;
+ }
+
+ data = (struct clkgen_pll_data *)match->data;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ reg = clkgen_get_register_base(np);
+ if (!reg)
+ return;
+
+ if (of_property_read_string_index(np, "clock-output-names",
+ 0, &clk_name))
+ return;
+
+ /*
+ * PLL 1200MHz output
+ */
+ clk = clkgen_pll_register(parent_name, data, reg, clk_name);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+
+ return;
+}
+CLK_OF_DECLARE(clkgengpu_c32_pll,
+ "st,clkgengpu-pll-c32", clkgengpu_c32_pll_setup);
diff --git a/drivers/clk/st/clkgen.h b/drivers/clk/st/clkgen.h
new file mode 100644
index 000000000000..35c863295268
--- /dev/null
+++ b/drivers/clk/st/clkgen.h
@@ -0,0 +1,48 @@
+/************************************************************************
+File : Clock H/w specific Information
+
+Author: Pankaj Dev <pankaj.dev@st.com>
+
+Copyright (C) 2014 STMicroelectronics
+************************************************************************/
+
+#ifndef __CLKGEN_INFO_H
+#define __CLKGEN_INFO_H
+
+struct clkgen_field {
+ unsigned int offset;
+ unsigned int mask;
+ unsigned int shift;
+};
+
+static inline unsigned long clkgen_read(void __iomem *base,
+ struct clkgen_field *field)
+{
+ return (readl(base + field->offset) >> field->shift) & field->mask;
+}
+
+
+static inline void clkgen_write(void __iomem *base, struct clkgen_field *field,
+ unsigned long val)
+{
+ writel((readl(base + field->offset) &
+ ~(field->mask << field->shift)) | (val << field->shift),
+ base + field->offset);
+
+ return;
+}
+
+#define CLKGEN_FIELD(_offset, _mask, _shift) { \
+ .offset = _offset, \
+ .mask = _mask, \
+ .shift = _shift, \
+ }
+
+#define CLKGEN_READ(pll, field) clkgen_read(pll->regs_base, \
+ &pll->data->field)
+
+#define CLKGEN_WRITE(pll, field, val) clkgen_write(pll->regs_base, \
+ &pll->data->field, val)
+
+#endif /*__CLKGEN_INFO_H*/
+
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index abb6c5ac8a10..bd7dc733c1ca 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -18,6 +18,7 @@
#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/reset-controller.h>
#include "clk-factors.h"
@@ -51,6 +52,8 @@ static void __init sun4i_osc_clk_setup(struct device_node *node)
if (!gate)
goto err_free_fixed;
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
/* set up gate and fixed rate properties */
gate->reg = of_iomap(node, 0);
gate->bit_idx = SUNXI_OSC24M_GATE;
@@ -77,7 +80,7 @@ err_free_gate:
err_free_fixed:
kfree(fixed);
}
-CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-osc-clk", sun4i_osc_clk_setup);
+CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-a10-osc-clk", sun4i_osc_clk_setup);
@@ -249,7 +252,38 @@ static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
*n = DIV_ROUND_UP(div, (*k+1));
}
+/**
+ * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6
+ * PLL6 rate is calculated as follows
+ * rate = parent_rate * n * (k + 1) / 2
+ * parent_rate is always 24Mhz
+ */
+
+static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u8 div;
+
+ /*
+ * We always have 24MHz / 2, so we can just say that our
+ * parent clock is 12MHz.
+ */
+ parent_rate = parent_rate / 2;
+ /* Normalize value to a parent_rate multiple (24M / 2) */
+ div = *freq / parent_rate;
+ *freq = parent_rate * div;
+
+ /* we were called to round the frequency, we can now return */
+ if (n == NULL)
+ return;
+
+ *k = div / 32;
+ if (*k > 3)
+ *k = 3;
+
+ *n = DIV_ROUND_UP(div, (*k+1));
+}
/**
* sun4i_get_apb1_factors() - calculates m, p factors for APB1
@@ -265,7 +299,7 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
if (parent_rate < *freq)
*freq = parent_rate;
- parent_rate = (parent_rate + (*freq - 1)) / *freq;
+ parent_rate = DIV_ROUND_UP(parent_rate, *freq);
/* Invalid rate! */
if (parent_rate > 32)
@@ -296,7 +330,7 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
/**
* sun4i_get_mod0_factors() - calculates m, n factors for MOD0-style clocks
- * MMC rate is calculated as follows
+ * MOD0 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
*/
@@ -310,7 +344,7 @@ static void sun4i_get_mod0_factors(u32 *freq, u32 parent_rate,
if (*freq > parent_rate)
*freq = parent_rate;
- div = parent_rate / *freq;
+ div = DIV_ROUND_UP(parent_rate, *freq);
if (div < 16)
calcp = 0;
@@ -351,7 +385,7 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
if (*freq > parent_rate)
*freq = parent_rate;
- div = parent_rate / *freq;
+ div = DIV_ROUND_UP(parent_rate, *freq);
if (div < 32)
calcp = 0;
@@ -377,6 +411,102 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
/**
+ * sun7i_a20_gmac_clk_setup - Setup function for A20/A31 GMAC clock module
+ *
+ * This clock looks something like this
+ * ________________________
+ * MII TX clock from PHY >-----|___________ _________|----> to GMAC core
+ * GMAC Int. RGMII TX clk >----|___________\__/__gate---|----> to PHY
+ * Ext. 125MHz RGMII TX clk >--|__divider__/ |
+ * |________________________|
+ *
+ * The external 125 MHz reference is optional, i.e. GMAC can use its
+ * internal TX clock just fine. The A31 GMAC clock module does not have
+ * the divider controls for the external reference.
+ *
+ * To keep it simple, let the GMAC use either the MII TX clock for MII mode,
+ * and its internal TX clock for GMII and RGMII modes. The GMAC driver should
+ * select the appropriate source and gate/ungate the output to the PHY.
+ *
+ * Only the GMAC should use this clock. Altering the clock so that it doesn't
+ * match the GMAC's operation parameters will result in the GMAC not being
+ * able to send traffic out. The GMAC driver should set the clock rate and
+ * enable/disable this clock to configure the required state. The clock
+ * driver then responds by auto-reparenting the clock.
+ */
+
+#define SUN7I_A20_GMAC_GPIT 2
+#define SUN7I_A20_GMAC_MASK 0x3
+#define SUN7I_A20_GMAC_PARENTS 2
+
+static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ struct clk_mux *mux;
+ struct clk_gate *gate;
+ const char *clk_name = node->name;
+ const char *parents[SUN7I_A20_GMAC_PARENTS];
+ void *reg;
+
+ if (of_property_read_string(node, "clock-output-names", &clk_name))
+ return;
+
+ /* allocate mux and gate clock structs */
+ mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
+ if (!mux)
+ return;
+
+ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+ if (!gate)
+ goto free_mux;
+
+ /* gmac clock requires exactly 2 parents */
+ parents[0] = of_clk_get_parent_name(node, 0);
+ parents[1] = of_clk_get_parent_name(node, 1);
+ if (!parents[0] || !parents[1])
+ goto free_gate;
+
+ reg = of_iomap(node, 0);
+ if (!reg)
+ goto free_gate;
+
+ /* set up gate and fixed rate properties */
+ gate->reg = reg;
+ gate->bit_idx = SUN7I_A20_GMAC_GPIT;
+ gate->lock = &clk_lock;
+ mux->reg = reg;
+ mux->mask = SUN7I_A20_GMAC_MASK;
+ mux->flags = CLK_MUX_INDEX_BIT;
+ mux->lock = &clk_lock;
+
+ clk = clk_register_composite(NULL, clk_name,
+ parents, SUN7I_A20_GMAC_PARENTS,
+ &mux->hw, &clk_mux_ops,
+ NULL, NULL,
+ &gate->hw, &clk_gate_ops,
+ 0);
+
+ if (IS_ERR(clk))
+ goto iounmap_reg;
+
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+
+ return;
+
+iounmap_reg:
+ iounmap(reg);
+free_gate:
+ kfree(gate);
+free_mux:
+ kfree(mux);
+}
+CLK_OF_DECLARE(sun7i_a20_gmac, "allwinner,sun7i-a20-gmac-clk",
+ sun7i_a20_gmac_clk_setup);
+
+
+
+/**
* sunxi_factors_clk_setup() - Setup function for factor clocks
*/
@@ -387,6 +517,7 @@ struct factors_data {
int mux;
struct clk_factors_config *table;
void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
+ const char *name;
};
static struct clk_factors_config sun4i_pll1_config = {
@@ -416,6 +547,13 @@ static struct clk_factors_config sun4i_pll5_config = {
.kwidth = 2,
};
+static struct clk_factors_config sun6i_a31_pll6_config = {
+ .nshift = 8,
+ .nwidth = 5,
+ .kshift = 4,
+ .kwidth = 2,
+};
+
static struct clk_factors_config sun4i_apb1_config = {
.mshift = 0,
.mwidth = 5,
@@ -451,10 +589,30 @@ static const struct factors_data sun6i_a31_pll1_data __initconst = {
.getter = sun6i_a31_get_pll1_factors,
};
+static const struct factors_data sun7i_a20_pll4_data __initconst = {
+ .enable = 31,
+ .table = &sun4i_pll5_config,
+ .getter = sun4i_get_pll5_factors,
+};
+
static const struct factors_data sun4i_pll5_data __initconst = {
.enable = 31,
.table = &sun4i_pll5_config,
.getter = sun4i_get_pll5_factors,
+ .name = "pll5",
+};
+
+static const struct factors_data sun4i_pll6_data __initconst = {
+ .enable = 31,
+ .table = &sun4i_pll5_config,
+ .getter = sun4i_get_pll5_factors,
+ .name = "pll6",
+};
+
+static const struct factors_data sun6i_a31_pll6_data __initconst = {
+ .enable = 31,
+ .table = &sun6i_a31_pll6_config,
+ .getter = sun6i_a31_get_pll6_factors,
};
static const struct factors_data sun4i_apb1_data __initconst = {
@@ -497,14 +655,14 @@ static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
(parents[i] = of_clk_get_parent_name(node, i)) != NULL)
i++;
- /* Nodes should be providing the name via clock-output-names
- * but originally our dts didn't, and so we used node->name.
- * The new, better nodes look like clk@deadbeef, so we pull the
- * name just in this case */
- if (!strcmp("clk", clk_name)) {
- of_property_read_string_index(node, "clock-output-names",
- 0, &clk_name);
- }
+ /*
+ * some factor clocks, such as pll5 and pll6, may have multiple
+ * outputs, and have their name designated in factors_data
+ */
+ if (data->name)
+ clk_name = data->name;
+ else
+ of_property_read_string(node, "clock-output-names", &clk_name);
factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
if (!factors)
@@ -601,6 +759,8 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
(parents[i] = of_clk_get_parent_name(node, i)) != NULL)
i++;
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
clk = clk_register_mux(NULL, clk_name, parents, i,
CLK_SET_RATE_NO_REPARENT, reg,
data->shift, SUNXI_MUX_GATE_WIDTH,
@@ -660,6 +820,8 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
clk_parent = of_clk_get_parent_name(node, 0);
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
clk = clk_register_divider(NULL, clk_name, clk_parent, 0,
reg, data->shift, data->width,
data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
@@ -673,6 +835,59 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
/**
+ * sunxi_gates_reset... - reset bits in leaf gate clk registers handling
+ */
+
+struct gates_reset_data {
+ void __iomem *reg;
+ spinlock_t *lock;
+ struct reset_controller_dev rcdev;
+};
+
+static int sunxi_gates_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct gates_reset_data *data = container_of(rcdev,
+ struct gates_reset_data,
+ rcdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(data->lock, flags);
+
+ reg = readl(data->reg);
+ writel(reg & ~BIT(id), data->reg);
+
+ spin_unlock_irqrestore(data->lock, flags);
+
+ return 0;
+}
+
+static int sunxi_gates_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct gates_reset_data *data = container_of(rcdev,
+ struct gates_reset_data,
+ rcdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(data->lock, flags);
+
+ reg = readl(data->reg);
+ writel(reg | BIT(id), data->reg);
+
+ spin_unlock_irqrestore(data->lock, flags);
+
+ return 0;
+}
+
+static struct reset_control_ops sunxi_gates_reset_ops = {
+ .assert = sunxi_gates_reset_assert,
+ .deassert = sunxi_gates_reset_deassert,
+};
+
+/**
* sunxi_gates_clk_setup() - Setup function for leaf gates on clocks
*/
@@ -680,6 +895,7 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
struct gates_data {
DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE);
+ u32 reset_mask;
};
static const struct gates_data sun4i_axi_gates_data __initconst = {
@@ -746,10 +962,21 @@ static const struct gates_data sun7i_a20_apb1_gates_data __initconst = {
.mask = { 0xff80ff },
};
+static const struct gates_data sun4i_a10_usb_gates_data __initconst = {
+ .mask = {0x1C0},
+ .reset_mask = 0x07,
+};
+
+static const struct gates_data sun5i_a13_usb_gates_data __initconst = {
+ .mask = {0x140},
+ .reset_mask = 0x03,
+};
+
static void __init sunxi_gates_clk_setup(struct device_node *node,
struct gates_data *data)
{
struct clk_onecell_data *clk_data;
+ struct gates_reset_data *reset_data;
const char *clk_parent;
const char *clk_name;
void *reg;
@@ -793,6 +1020,21 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
clk_data->clk_num = i;
of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ /* Register a reset controler for gates with reset bits */
+ if (data->reset_mask == 0)
+ return;
+
+ reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
+ if (!reset_data)
+ return;
+
+ reset_data->reg = reg;
+ reset_data->lock = &clk_lock;
+ reset_data->rcdev.nr_resets = __fls(data->reset_mask) + 1;
+ reset_data->rcdev.ops = &sunxi_gates_reset_ops;
+ reset_data->rcdev.of_node = node;
+ reset_controller_register(&reset_data->rcdev);
}
@@ -832,7 +1074,7 @@ static const struct divs_data pll5_divs_data __initconst = {
};
static const struct divs_data pll6_divs_data __initconst = {
- .factors = &sun4i_pll5_data,
+ .factors = &sun4i_pll6_data,
.div = {
{ .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */
{ .fixed = 2 }, /* P, other */
@@ -854,7 +1096,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
struct divs_data *data)
{
struct clk_onecell_data *clk_data;
- const char *parent = node->name;
+ const char *parent;
const char *clk_name;
struct clk **clks, *pclk;
struct clk_hw *gate_hw, *rate_hw;
@@ -868,6 +1110,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
/* Set up factor clock that we will be dividing */
pclk = sunxi_factors_clk_setup(node, data->factors);
+ parent = __clk_get_name(pclk);
reg = of_iomap(node, 0);
@@ -970,56 +1213,60 @@ free_clkdata:
/* Matches for factors clocks */
static const struct of_device_id clk_factors_match[] __initconst = {
- {.compatible = "allwinner,sun4i-pll1-clk", .data = &sun4i_pll1_data,},
+ {.compatible = "allwinner,sun4i-a10-pll1-clk", .data = &sun4i_pll1_data,},
{.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
- {.compatible = "allwinner,sun4i-apb1-clk", .data = &sun4i_apb1_data,},
- {.compatible = "allwinner,sun4i-mod0-clk", .data = &sun4i_mod0_data,},
+ {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
+ {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_data,},
+ {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
+ {.compatible = "allwinner,sun4i-a10-mod0-clk", .data = &sun4i_mod0_data,},
{.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
{}
};
/* Matches for divider clocks */
static const struct of_device_id clk_div_match[] __initconst = {
- {.compatible = "allwinner,sun4i-axi-clk", .data = &sun4i_axi_data,},
- {.compatible = "allwinner,sun4i-ahb-clk", .data = &sun4i_ahb_data,},
- {.compatible = "allwinner,sun4i-apb0-clk", .data = &sun4i_apb0_data,},
+ {.compatible = "allwinner,sun4i-a10-axi-clk", .data = &sun4i_axi_data,},
+ {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,},
+ {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,},
{.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,},
{}
};
/* Matches for divided outputs */
static const struct of_device_id clk_divs_match[] __initconst = {
- {.compatible = "allwinner,sun4i-pll5-clk", .data = &pll5_divs_data,},
- {.compatible = "allwinner,sun4i-pll6-clk", .data = &pll6_divs_data,},
+ {.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,},
+ {.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,},
{}
};
/* Matches for mux clocks */
static const struct of_device_id clk_mux_match[] __initconst = {
- {.compatible = "allwinner,sun4i-cpu-clk", .data = &sun4i_cpu_mux_data,},
- {.compatible = "allwinner,sun4i-apb1-mux-clk", .data = &sun4i_apb1_mux_data,},
+ {.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,},
+ {.compatible = "allwinner,sun4i-a10-apb1-mux-clk", .data = &sun4i_apb1_mux_data,},
{.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
{}
};
/* Matches for gate clocks */
static const struct of_device_id clk_gates_match[] __initconst = {
- {.compatible = "allwinner,sun4i-axi-gates-clk", .data = &sun4i_axi_gates_data,},
- {.compatible = "allwinner,sun4i-ahb-gates-clk", .data = &sun4i_ahb_gates_data,},
+ {.compatible = "allwinner,sun4i-a10-axi-gates-clk", .data = &sun4i_axi_gates_data,},
+ {.compatible = "allwinner,sun4i-a10-ahb-gates-clk", .data = &sun4i_ahb_gates_data,},
{.compatible = "allwinner,sun5i-a10s-ahb-gates-clk", .data = &sun5i_a10s_ahb_gates_data,},
{.compatible = "allwinner,sun5i-a13-ahb-gates-clk", .data = &sun5i_a13_ahb_gates_data,},
{.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
{.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
- {.compatible = "allwinner,sun4i-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
+ {.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
{.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
{.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
{.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,},
- {.compatible = "allwinner,sun4i-apb1-gates-clk", .data = &sun4i_apb1_gates_data,},
+ {.compatible = "allwinner,sun4i-a10-apb1-gates-clk", .data = &sun4i_apb1_gates_data,},
{.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,},
{.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
{.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
{.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
{.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
+ {.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,},
+ {.compatible = "allwinner,sun5i-a13-usb-clk", .data = &sun5i_a13_usb_gates_data,},
{}
};
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index 356e9b804421..9e899c18af86 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -130,7 +130,7 @@ static const struct clk_ops tegra_clk_periph_nodiv_ops = {
.disable = clk_periph_disable,
};
-const struct clk_ops tegra_clk_periph_no_gate_ops = {
+static const struct clk_ops tegra_clk_periph_no_gate_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
.recalc_rate = clk_periph_recalc_rate,
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 776ee4594bd4..028b33783d38 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -34,7 +34,6 @@ static struct ti_dt_clk am33xx_clks[] = {
DT_CLK(NULL, "dpll_core_m5_ck", "dpll_core_m5_ck"),
DT_CLK(NULL, "dpll_core_m6_ck", "dpll_core_m6_ck"),
DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
- DT_CLK("cpu0", NULL, "dpll_mpu_ck"),
DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"),
DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"),
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index d3230234f07b..0d1750a8aea4 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -130,10 +130,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
DT_CLK(NULL, "dss_tv_fck", "dss_tv_fck"),
DT_CLK(NULL, "dss_96m_fck", "dss_96m_fck"),
DT_CLK(NULL, "dss2_alwon_fck", "dss2_alwon_fck"),
- DT_CLK(NULL, "utmi_p1_gfclk", "dummy_ck"),
- DT_CLK(NULL, "utmi_p2_gfclk", "dummy_ck"),
- DT_CLK(NULL, "xclk60mhsp1_ck", "dummy_ck"),
- DT_CLK(NULL, "xclk60mhsp2_ck", "dummy_ck"),
DT_CLK(NULL, "init_60m_fclk", "dummy_ck"),
DT_CLK(NULL, "gpt1_fck", "gpt1_fck"),
DT_CLK(NULL, "aes2_ick", "aes2_ick"),
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index ae00218b5da3..02517a8206bd 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -222,7 +222,6 @@ static struct ti_dt_clk omap44xx_clks[] = {
DT_CLK(NULL, "auxclk5_src_ck", "auxclk5_src_ck"),
DT_CLK(NULL, "auxclk5_ck", "auxclk5_ck"),
DT_CLK(NULL, "auxclkreq5_ck", "auxclkreq5_ck"),
- DT_CLK("50000000.gpmc", "fck", "dummy_ck"),
DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 0ef9f581286b..08f3d1b915b3 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -182,7 +182,6 @@ static struct ti_dt_clk omap54xx_clks[] = {
DT_CLK(NULL, "auxclk3_src_ck", "auxclk3_src_ck"),
DT_CLK(NULL, "auxclk3_ck", "auxclk3_ck"),
DT_CLK(NULL, "auxclkreq3_ck", "auxclkreq3_ck"),
- DT_CLK(NULL, "gpmc_ck", "dummy_ck"),
DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 9977653f2d63..f7e40734c819 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -262,7 +262,6 @@ static struct ti_dt_clk dra7xx_clks[] = {
DT_CLK(NULL, "vip1_gclk_mux", "vip1_gclk_mux"),
DT_CLK(NULL, "vip2_gclk_mux", "vip2_gclk_mux"),
DT_CLK(NULL, "vip3_gclk_mux", "vip3_gclk_mux"),
- DT_CLK(NULL, "gpmc_ck", "dummy_ck"),
DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index a15e445570b2..e6aa10db7bba 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -112,7 +112,7 @@ static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
- return parent_rate / div;
+ return DIV_ROUND_UP(parent_rate, div);
}
/*
@@ -182,7 +182,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
}
parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
MULT_ROUND_UP(rate, i));
- now = parent_rate / i;
+ now = DIV_ROUND_UP(parent_rate, i);
if (now <= rate && now > best) {
bestdiv = i;
best = now;
@@ -205,7 +205,7 @@ static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
int div;
div = ti_clk_divider_bestdiv(hw, rate, prate);
- return *prate / div;
+ return DIV_ROUND_UP(*prate, div);
}
static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -216,7 +216,7 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags = 0;
u32 val;
- div = parent_rate / rate;
+ div = DIV_ROUND_UP(parent_rate, rate);
value = _get_val(divider, div);
if (value > div_mask(divider))
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index cdeff299de26..7b55ef89baa5 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -29,7 +29,8 @@ static struct clk *prcc_kclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_C
#define PRCC_KCLK_STORE(clk, base, bit) \
prcc_kclk[(base * PRCC_PERIPHS_PER_CLUSTER) + bit] = clk
-struct clk *ux500_twocell_get(struct of_phandle_args *clkspec, void *data)
+static struct clk *ux500_twocell_get(struct of_phandle_args *clkspec,
+ void *data)
{
struct clk **clk_data = data;
unsigned int base, bit;
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 8cbfcf88fae3..a820b0cfcf57 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -33,7 +33,7 @@ struct clk_icst {
struct clk_hw hw;
void __iomem *vcoreg;
void __iomem *lockreg;
- const struct icst_params *params;
+ struct icst_params *params;
unsigned long rate;
};
@@ -84,6 +84,8 @@ static unsigned long icst_recalc_rate(struct clk_hw *hw,
struct clk_icst *icst = to_icst(hw);
struct icst_vco vco;
+ if (parent_rate)
+ icst->params->ref = parent_rate;
vco = vco_get(icst->vcoreg);
icst->rate = icst_hz(icst->params, vco);
return icst->rate;
@@ -105,6 +107,8 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_icst *icst = to_icst(hw);
struct icst_vco vco;
+ if (parent_rate)
+ icst->params->ref = parent_rate;
vco = icst_hz_to_vco(icst->params, rate);
icst->rate = icst_hz(icst->params, vco);
vco_set(icst->lockreg, icst->vcoreg, vco);
@@ -120,24 +124,33 @@ static const struct clk_ops icst_ops = {
struct clk *icst_clk_register(struct device *dev,
const struct clk_icst_desc *desc,
const char *name,
+ const char *parent_name,
void __iomem *base)
{
struct clk *clk;
struct clk_icst *icst;
struct clk_init_data init;
+ struct icst_params *pclone;
icst = kzalloc(sizeof(struct clk_icst), GFP_KERNEL);
if (!icst) {
pr_err("could not allocate ICST clock!\n");
return ERR_PTR(-ENOMEM);
}
+
+ pclone = kmemdup(desc->params, sizeof(*pclone), GFP_KERNEL);
+ if (!pclone) {
+ pr_err("could not clone ICST params\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
init.name = name;
init.ops = &icst_ops;
init.flags = CLK_IS_ROOT;
- init.parent_names = NULL;
- init.num_parents = 0;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
icst->hw.init = &init;
- icst->params = desc->params;
+ icst->params = pclone;
icst->vcoreg = base + desc->vco_offset;
icst->lockreg = base + desc->lock_offset;
diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h
index be99dd0da785..04e6f0aef588 100644
--- a/drivers/clk/versatile/clk-icst.h
+++ b/drivers/clk/versatile/clk-icst.h
@@ -16,4 +16,5 @@ struct clk_icst_desc {
struct clk *icst_clk_register(struct device *dev,
const struct clk_icst_desc *desc,
const char *name,
+ const char *parent_name,
void __iomem *base);
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index 844f8d711a12..31b44f025f9e 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -13,10 +13,12 @@
#include <linux/io.h>
#include <linux/platform_data/clk-integrator.h>
-#include <mach/impd1.h>
-
#include "clk-icst.h"
+#define IMPD1_OSC1 0x00
+#define IMPD1_OSC2 0x04
+#define IMPD1_LOCK 0x08
+
struct impd1_clk {
char *vco1name;
struct clk *vco1clk;
@@ -93,13 +95,15 @@ void integrator_impd1_clk_init(void __iomem *base, unsigned int id)
imc = &impd1_clks[id];
imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id);
- clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, base);
+ clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, NULL,
+ base);
imc->vco1clk = clk;
imc->clks[0] = clkdev_alloc(clk, NULL, "lm%x:01000", id);
/* VCO2 is also called "CLK2" */
imc->vco2name = kasprintf(GFP_KERNEL, "lm%x-vco2", id);
- clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, base);
+ clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, NULL,
+ base);
imc->vco2clk = clk;
/* MMCI uses CLK2 right off */
diff --git a/drivers/clk/versatile/clk-integrator.c b/drivers/clk/versatile/clk-integrator.c
index bda8967e09c2..734c4b8fe6ab 100644
--- a/drivers/clk/versatile/clk-integrator.c
+++ b/drivers/clk/versatile/clk-integrator.c
@@ -10,21 +10,17 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/err.h>
-#include <linux/platform_data/clk-integrator.h>
-
-#include <mach/hardware.h>
-#include <mach/platform.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include "clk-icst.h"
-/*
- * Implementation of the ARM Integrator/AP and Integrator/CP clock tree.
- * Inspired by portions of:
- * plat-versatile/clock.c and plat-versatile/include/plat/clock.h
- */
+#define INTEGRATOR_HDR_LOCK_OFFSET 0x14
-static const struct icst_params cp_auxvco_params = {
- .ref = 24000000,
+/* Base offset for the core module */
+static void __iomem *cm_base;
+
+static const struct icst_params cp_auxosc_params = {
.vco_max = ICST525_VCO_MAX_5V,
.vco_min = ICST525_VCO_MIN,
.vd_min = 8,
@@ -35,50 +31,39 @@ static const struct icst_params cp_auxvco_params = {
.idx2s = icst525_idx2s,
};
-static const struct clk_icst_desc __initdata cp_icst_desc = {
- .params = &cp_auxvco_params,
+static const struct clk_icst_desc __initdata cm_auxosc_desc = {
+ .params = &cp_auxosc_params,
.vco_offset = 0x1c,
.lock_offset = INTEGRATOR_HDR_LOCK_OFFSET,
};
-/*
- * integrator_clk_init() - set up the integrator clock tree
- * @is_cp: pass true if it's the Integrator/CP else AP is assumed
- */
-void __init integrator_clk_init(bool is_cp)
+static void __init of_integrator_cm_osc_setup(struct device_node *np)
{
- struct clk *clk;
-
- /* APB clock dummy */
- clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
- clk_register_clkdev(clk, "apb_pclk", NULL);
-
- /* UART reference clock */
- clk = clk_register_fixed_rate(NULL, "uartclk", NULL, CLK_IS_ROOT,
- 14745600);
- clk_register_clkdev(clk, NULL, "uart0");
- clk_register_clkdev(clk, NULL, "uart1");
- if (is_cp)
- clk_register_clkdev(clk, NULL, "mmci");
-
- /* 24 MHz clock */
- clk = clk_register_fixed_rate(NULL, "clk24mhz", NULL, CLK_IS_ROOT,
- 24000000);
- clk_register_clkdev(clk, NULL, "kmi0");
- clk_register_clkdev(clk, NULL, "kmi1");
- if (!is_cp)
- clk_register_clkdev(clk, NULL, "ap_timer");
+ struct clk *clk = ERR_PTR(-EINVAL);
+ const char *clk_name = np->name;
+ const struct clk_icst_desc *desc = &cm_auxosc_desc;
+ const char *parent_name;
- if (!is_cp)
- return;
+ if (!cm_base) {
+ /* Remap the core module base if not done yet */
+ struct device_node *parent;
- /* 1 MHz clock */
- clk = clk_register_fixed_rate(NULL, "clk1mhz", NULL, CLK_IS_ROOT,
- 1000000);
- clk_register_clkdev(clk, NULL, "sp804");
+ parent = of_get_parent(np);
+ if (!np) {
+ pr_err("no parent on core module clock\n");
+ return;
+ }
+ cm_base = of_iomap(parent, 0);
+ if (!cm_base) {
+ pr_err("could not remap core module base\n");
+ return;
+ }
+ }
- /* ICST VCO clock used on the Integrator/CP CLCD */
- clk = icst_clk_register(NULL, &cp_icst_desc, "icst",
- __io_address(INTEGRATOR_HDR_BASE));
- clk_register_clkdev(clk, NULL, "clcd");
+ parent_name = of_clk_get_parent_name(np, 0);
+ clk = icst_clk_register(NULL, desc, clk_name, parent_name, cm_base);
+ if (!IS_ERR(clk))
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(integrator_cm_auxosc_clk,
+ "arm,integrator-cm-auxosc", of_integrator_cm_osc_setup);
diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c
index 747e7b31117c..c8b523117fb7 100644
--- a/drivers/clk/versatile/clk-realview.c
+++ b/drivers/clk/versatile/clk-realview.c
@@ -85,10 +85,10 @@ void __init realview_clk_init(void __iomem *sysbase, bool is_pb1176)
/* ICST VCO clock */
if (is_pb1176)
clk = icst_clk_register(NULL, &realview_osc0_desc,
- "osc0", sysbase);
+ "osc0", NULL, sysbase);
else
clk = icst_clk_register(NULL, &realview_osc4_desc,
- "osc4", sysbase);
+ "osc4", NULL, sysbase);
clk_register_clkdev(clk, NULL, "dev:clcd");
clk_register_clkdev(clk, NULL, "issp:clcd");
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 09dd0173ea0a..52c09afdcfb7 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -21,34 +21,35 @@
#include <linux/clk/zynq.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/io.h>
-static void __iomem *zynq_slcr_base_priv;
-
-#define SLCR_ARMPLL_CTRL (zynq_slcr_base_priv + 0x100)
-#define SLCR_DDRPLL_CTRL (zynq_slcr_base_priv + 0x104)
-#define SLCR_IOPLL_CTRL (zynq_slcr_base_priv + 0x108)
-#define SLCR_PLL_STATUS (zynq_slcr_base_priv + 0x10c)
-#define SLCR_ARM_CLK_CTRL (zynq_slcr_base_priv + 0x120)
-#define SLCR_DDR_CLK_CTRL (zynq_slcr_base_priv + 0x124)
-#define SLCR_DCI_CLK_CTRL (zynq_slcr_base_priv + 0x128)
-#define SLCR_APER_CLK_CTRL (zynq_slcr_base_priv + 0x12c)
-#define SLCR_GEM0_CLK_CTRL (zynq_slcr_base_priv + 0x140)
-#define SLCR_GEM1_CLK_CTRL (zynq_slcr_base_priv + 0x144)
-#define SLCR_SMC_CLK_CTRL (zynq_slcr_base_priv + 0x148)
-#define SLCR_LQSPI_CLK_CTRL (zynq_slcr_base_priv + 0x14c)
-#define SLCR_SDIO_CLK_CTRL (zynq_slcr_base_priv + 0x150)
-#define SLCR_UART_CLK_CTRL (zynq_slcr_base_priv + 0x154)
-#define SLCR_SPI_CLK_CTRL (zynq_slcr_base_priv + 0x158)
-#define SLCR_CAN_CLK_CTRL (zynq_slcr_base_priv + 0x15c)
-#define SLCR_CAN_MIOCLK_CTRL (zynq_slcr_base_priv + 0x160)
-#define SLCR_DBG_CLK_CTRL (zynq_slcr_base_priv + 0x164)
-#define SLCR_PCAP_CLK_CTRL (zynq_slcr_base_priv + 0x168)
-#define SLCR_FPGA0_CLK_CTRL (zynq_slcr_base_priv + 0x170)
-#define SLCR_621_TRUE (zynq_slcr_base_priv + 0x1c4)
-#define SLCR_SWDT_CLK_SEL (zynq_slcr_base_priv + 0x304)
+static void __iomem *zynq_clkc_base;
+
+#define SLCR_ARMPLL_CTRL (zynq_clkc_base + 0x00)
+#define SLCR_DDRPLL_CTRL (zynq_clkc_base + 0x04)
+#define SLCR_IOPLL_CTRL (zynq_clkc_base + 0x08)
+#define SLCR_PLL_STATUS (zynq_clkc_base + 0x0c)
+#define SLCR_ARM_CLK_CTRL (zynq_clkc_base + 0x20)
+#define SLCR_DDR_CLK_CTRL (zynq_clkc_base + 0x24)
+#define SLCR_DCI_CLK_CTRL (zynq_clkc_base + 0x28)
+#define SLCR_APER_CLK_CTRL (zynq_clkc_base + 0x2c)
+#define SLCR_GEM0_CLK_CTRL (zynq_clkc_base + 0x40)
+#define SLCR_GEM1_CLK_CTRL (zynq_clkc_base + 0x44)
+#define SLCR_SMC_CLK_CTRL (zynq_clkc_base + 0x48)
+#define SLCR_LQSPI_CLK_CTRL (zynq_clkc_base + 0x4c)
+#define SLCR_SDIO_CLK_CTRL (zynq_clkc_base + 0x50)
+#define SLCR_UART_CLK_CTRL (zynq_clkc_base + 0x54)
+#define SLCR_SPI_CLK_CTRL (zynq_clkc_base + 0x58)
+#define SLCR_CAN_CLK_CTRL (zynq_clkc_base + 0x5c)
+#define SLCR_CAN_MIOCLK_CTRL (zynq_clkc_base + 0x60)
+#define SLCR_DBG_CLK_CTRL (zynq_clkc_base + 0x64)
+#define SLCR_PCAP_CLK_CTRL (zynq_clkc_base + 0x68)
+#define SLCR_FPGA0_CLK_CTRL (zynq_clkc_base + 0x70)
+#define SLCR_621_TRUE (zynq_clkc_base + 0xc4)
+#define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204)
#define NUM_MIO_PINS 54
@@ -148,7 +149,7 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
clks[fclk] = clk_register_gate(NULL, clk_name,
div1_name, CLK_SET_RATE_PARENT, fclk_gate_reg,
0, CLK_GATE_SET_TO_DISABLE, fclk_gate_lock);
- enable_reg = readl(fclk_gate_reg) & 1;
+ enable_reg = clk_readl(fclk_gate_reg) & 1;
if (enable && !enable_reg) {
if (clk_prepare_enable(clks[fclk]))
pr_warn("%s: FCLK%u enable failed\n", __func__,
@@ -277,7 +278,7 @@ static void __init zynq_clk_setup(struct device_node *np)
SLCR_IOPLL_CTRL, 4, 1, 0, &iopll_lock);
/* CPU clocks */
- tmp = readl(SLCR_621_TRUE) & 1;
+ tmp = clk_readl(SLCR_621_TRUE) & 1;
clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0,
&armclk_lock);
@@ -569,8 +570,42 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_OF_DECLARE(zynq_clkc, "xlnx,ps7-clkc", zynq_clk_setup);
-void __init zynq_clock_init(void __iomem *slcr_base)
+void __init zynq_clock_init(void)
{
- zynq_slcr_base_priv = slcr_base;
- of_clk_init(NULL);
+ struct device_node *np;
+ struct device_node *slcr;
+ struct resource res;
+
+ np = of_find_compatible_node(NULL, NULL, "xlnx,ps7-clkc");
+ if (!np) {
+ pr_err("%s: clkc node not found\n", __func__);
+ goto np_err;
+ }
+
+ if (of_address_to_resource(np, 0, &res)) {
+ pr_err("%s: failed to get resource\n", np->name);
+ goto np_err;
+ }
+
+ slcr = of_get_parent(np);
+
+ if (slcr->data) {
+ zynq_clkc_base = (__force void __iomem *)slcr->data + res.start;
+ } else {
+ pr_err("%s: Unable to get I/O memory\n", np->name);
+ of_node_put(slcr);
+ goto np_err;
+ }
+
+ pr_info("%s: clkc starts at %p\n", __func__, zynq_clkc_base);
+
+ of_node_put(slcr);
+ of_node_put(np);
+
+ return;
+
+np_err:
+ of_node_put(np);
+ BUG();
+ return;
}
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index 3226f54fa595..cec97596fe65 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -90,7 +90,7 @@ static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
* makes probably sense to redundantly save fbdiv in the struct
* zynq_pll to save the IO access.
*/
- fbdiv = (readl(clk->pll_ctrl) & PLLCTRL_FBDIV_MASK) >>
+ fbdiv = (clk_readl(clk->pll_ctrl) & PLLCTRL_FBDIV_MASK) >>
PLLCTRL_FBDIV_SHIFT;
return parent_rate * fbdiv;
@@ -112,7 +112,7 @@ static int zynq_pll_is_enabled(struct clk_hw *hw)
spin_lock_irqsave(clk->lock, flags);
- reg = readl(clk->pll_ctrl);
+ reg = clk_readl(clk->pll_ctrl);
spin_unlock_irqrestore(clk->lock, flags);
@@ -138,10 +138,10 @@ static int zynq_pll_enable(struct clk_hw *hw)
/* Power up PLL and wait for lock */
spin_lock_irqsave(clk->lock, flags);
- reg = readl(clk->pll_ctrl);
+ reg = clk_readl(clk->pll_ctrl);
reg &= ~(PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK);
- writel(reg, clk->pll_ctrl);
- while (!(readl(clk->pll_status) & (1 << clk->lockbit)))
+ clk_writel(reg, clk->pll_ctrl);
+ while (!(clk_readl(clk->pll_status) & (1 << clk->lockbit)))
;
spin_unlock_irqrestore(clk->lock, flags);
@@ -168,9 +168,9 @@ static void zynq_pll_disable(struct clk_hw *hw)
/* shut down PLL */
spin_lock_irqsave(clk->lock, flags);
- reg = readl(clk->pll_ctrl);
+ reg = clk_readl(clk->pll_ctrl);
reg |= PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK;
- writel(reg, clk->pll_ctrl);
+ clk_writel(reg, clk->pll_ctrl);
spin_unlock_irqrestore(clk->lock, flags);
}
@@ -225,9 +225,9 @@ struct clk *clk_register_zynq_pll(const char *name, const char *parent,
spin_lock_irqsave(pll->lock, flags);
- reg = readl(pll->pll_ctrl);
+ reg = clk_readl(pll->pll_ctrl);
reg &= ~PLLCTRL_BPQUAL_MASK;
- writel(reg, pll->pll_ctrl);
+ clk_writel(reg, pll->pll_ctrl);
spin_unlock_irqrestore(pll->lock, flags);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index cd6950fd8caf..96918e1f26a3 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -140,3 +140,54 @@ config VF_PIT_TIMER
bool
help
Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
+
+config SYS_SUPPORTS_SH_CMT
+ bool
+
+config SYS_SUPPORTS_SH_MTU2
+ bool
+
+config SYS_SUPPORTS_SH_TMU
+ bool
+
+config SYS_SUPPORTS_EM_STI
+ bool
+
+config SH_TIMER_CMT
+ bool "Renesas CMT timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ default SYS_SUPPORTS_SH_CMT
+ help
+ This enables build of a clocksource and clockevent driver for
+ the Compare Match Timer (CMT) hardware available in 16/32/48-bit
+ variants on a wide range of Mobile and Automotive SoCs from Renesas.
+
+config SH_TIMER_MTU2
+ bool "Renesas MTU2 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ default SYS_SUPPORTS_SH_MTU2
+ help
+ This enables build of a clockevent driver for the Multi-Function
+ Timer Pulse Unit 2 (TMU2) hardware available on SoCs from Renesas.
+ This hardware comes with 16 bit-timer registers.
+
+config SH_TIMER_TMU
+ bool "Renesas TMU timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ default SYS_SUPPORTS_SH_TMU
+ help
+ This enables build of a clocksource and clockevent driver for
+ the 32-bit Timer Unit (TMU) hardware available on a wide range
+ SoCs from Renesas.
+
+config EM_TIMER_STI
+ bool "Renesas STI timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ default SYS_SUPPORTS_EM_STI
+ help
+ This enables build of a clocksource and clockevent driver for
+ the 48-bit System Timer (STI) hardware available on a SoCs
+ such as EMEV2 from former NEC Electronics.
+
+config CLKSRC_QCOM
+ bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index c7ca50a9c232..98cb6c51aa87 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -1,6 +1,5 @@
obj-$(CONFIG_CLKSRC_OF) += clksrc-of.o
obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
-obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
@@ -21,6 +20,7 @@ obj-$(CONFIG_ARCH_MARCO) += timer-marco.o
obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
+obj-$(CONFIG_ARCH_U300) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
@@ -32,8 +32,10 @@ obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
+obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
+obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 95fb944e15ee..57e823c44d2a 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -277,6 +277,7 @@ static void __arch_timer_setup(unsigned type,
clk->set_next_event = arch_timer_set_next_event_phys;
}
} else {
+ clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
clk->rating = 400;
clk->cpumask = cpu_all_mask;
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 63f176de0d02..49fbe2847c84 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -16,6 +16,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <linux/of_address.h>
@@ -52,6 +53,8 @@
#define TTC_CNT_CNTRL_DISABLE_MASK 0x1
#define TTC_CLK_CNTRL_CSRC_MASK (1 << 5) /* clock source */
+#define TTC_CLK_CNTRL_PSV_MASK 0x1e
+#define TTC_CLK_CNTRL_PSV_SHIFT 1
/*
* Setup the timers to use pre-scaling, using a fixed value for now that will
@@ -63,6 +66,8 @@
#define CLK_CNTRL_PRESCALE_EN 1
#define CNT_CNTRL_RESET (1 << 4)
+#define MAX_F_ERR 50
+
/**
* struct ttc_timer - This definition defines local timer structure
*
@@ -82,6 +87,8 @@ struct ttc_timer {
container_of(x, struct ttc_timer, clk_rate_change_nb)
struct ttc_timer_clocksource {
+ u32 scale_clk_ctrl_reg_old;
+ u32 scale_clk_ctrl_reg_new;
struct ttc_timer ttc;
struct clocksource cs;
};
@@ -229,32 +236,89 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
struct ttc_timer_clocksource, ttc);
switch (event) {
- case POST_RATE_CHANGE:
+ case PRE_RATE_CHANGE:
+ {
+ u32 psv;
+ unsigned long factor, rate_low, rate_high;
+
+ if (ndata->new_rate > ndata->old_rate) {
+ factor = DIV_ROUND_CLOSEST(ndata->new_rate,
+ ndata->old_rate);
+ rate_low = ndata->old_rate;
+ rate_high = ndata->new_rate;
+ } else {
+ factor = DIV_ROUND_CLOSEST(ndata->old_rate,
+ ndata->new_rate);
+ rate_low = ndata->new_rate;
+ rate_high = ndata->old_rate;
+ }
+
+ if (!is_power_of_2(factor))
+ return NOTIFY_BAD;
+
+ if (abs(rate_high - (factor * rate_low)) > MAX_F_ERR)
+ return NOTIFY_BAD;
+
+ factor = __ilog2_u32(factor);
+
/*
- * Do whatever is necessary to maintain a proper time base
- *
- * I cannot find a way to adjust the currently used clocksource
- * to the new frequency. __clocksource_updatefreq_hz() sounds
- * good, but does not work. Not sure what's that missing.
- *
- * This approach works, but triggers two clocksource switches.
- * The first after unregister to clocksource jiffies. And
- * another one after the register to the newly registered timer.
- *
- * Alternatively we could 'waste' another HW timer to ping pong
- * between clock sources. That would also use one register and
- * one unregister call, but only trigger one clocksource switch
- * for the cost of another HW timer used by the OS.
+ * store timer clock ctrl register so we can restore it in case
+ * of an abort.
*/
- clocksource_unregister(&ttccs->cs);
- clocksource_register_hz(&ttccs->cs,
- ndata->new_rate / PRESCALE);
- /* fall through */
- case PRE_RATE_CHANGE:
+ ttccs->scale_clk_ctrl_reg_old =
+ __raw_readl(ttccs->ttc.base_addr +
+ TTC_CLK_CNTRL_OFFSET);
+
+ psv = (ttccs->scale_clk_ctrl_reg_old &
+ TTC_CLK_CNTRL_PSV_MASK) >>
+ TTC_CLK_CNTRL_PSV_SHIFT;
+ if (ndata->new_rate < ndata->old_rate)
+ psv -= factor;
+ else
+ psv += factor;
+
+ /* prescaler within legal range? */
+ if (psv & ~(TTC_CLK_CNTRL_PSV_MASK >> TTC_CLK_CNTRL_PSV_SHIFT))
+ return NOTIFY_BAD;
+
+ ttccs->scale_clk_ctrl_reg_new = ttccs->scale_clk_ctrl_reg_old &
+ ~TTC_CLK_CNTRL_PSV_MASK;
+ ttccs->scale_clk_ctrl_reg_new |= psv << TTC_CLK_CNTRL_PSV_SHIFT;
+
+
+ /* scale down: adjust divider in post-change notification */
+ if (ndata->new_rate < ndata->old_rate)
+ return NOTIFY_DONE;
+
+ /* scale up: adjust divider now - before frequency change */
+ __raw_writel(ttccs->scale_clk_ctrl_reg_new,
+ ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ break;
+ }
+ case POST_RATE_CHANGE:
+ /* scale up: pre-change notification did the adjustment */
+ if (ndata->new_rate > ndata->old_rate)
+ return NOTIFY_OK;
+
+ /* scale down: adjust divider now - after frequency change */
+ __raw_writel(ttccs->scale_clk_ctrl_reg_new,
+ ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ break;
+
case ABORT_RATE_CHANGE:
+ /* we have to undo the adjustment in case we scale up */
+ if (ndata->new_rate < ndata->old_rate)
+ return NOTIFY_OK;
+
+ /* restore original register value */
+ __raw_writel(ttccs->scale_clk_ctrl_reg_old,
+ ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ /* fall through */
default:
return NOTIFY_DONE;
}
+
+ return NOTIFY_DONE;
}
static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
@@ -321,25 +385,12 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
switch (event) {
case POST_RATE_CHANGE:
- {
- unsigned long flags;
-
- /*
- * clockevents_update_freq should be called with IRQ disabled on
- * the CPU the timer provides events for. The timer we use is
- * common to both CPUs, not sure if we need to run on both
- * cores.
- */
- local_irq_save(flags);
- clockevents_update_freq(&ttcce->ce,
- ndata->new_rate / PRESCALE);
- local_irq_restore(flags);
-
/* update cached frequency */
ttc->freq = ndata->new_rate;
+ clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE);
+
/* fall through */
- }
case PRE_RATE_CHANGE:
case ABORT_RATE_CHANGE:
default:
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c
deleted file mode 100644
index 9e0998f22885..000000000000
--- a/drivers/clocksource/cyclone.c
+++ /dev/null
@@ -1,113 +0,0 @@
-#include <linux/clocksource.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/timex.h>
-#include <linux/init.h>
-
-#include <asm/pgtable.h>
-#include <asm/io.h>
-
-#include <asm/mach_timer.h>
-
-#define CYCLONE_CBAR_ADDR 0xFEB00CD0 /* base address ptr */
-#define CYCLONE_PMCC_OFFSET 0x51A0 /* offset to control register */
-#define CYCLONE_MPCS_OFFSET 0x51A8 /* offset to select register */
-#define CYCLONE_MPMC_OFFSET 0x51D0 /* offset to count register */
-#define CYCLONE_TIMER_FREQ 99780000 /* 100Mhz, but not really */
-#define CYCLONE_TIMER_MASK CLOCKSOURCE_MASK(32) /* 32 bit mask */
-
-int use_cyclone = 0;
-static void __iomem *cyclone_ptr;
-
-static cycle_t read_cyclone(struct clocksource *cs)
-{
- return (cycle_t)readl(cyclone_ptr);
-}
-
-static struct clocksource clocksource_cyclone = {
- .name = "cyclone",
- .rating = 250,
- .read = read_cyclone,
- .mask = CYCLONE_TIMER_MASK,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static int __init init_cyclone_clocksource(void)
-{
- unsigned long base; /* saved value from CBAR */
- unsigned long offset;
- u32 __iomem* volatile cyclone_timer; /* Cyclone MPMC0 register */
- u32 __iomem* reg;
- int i;
-
- /* make sure we're on a summit box: */
- if (!use_cyclone)
- return -ENODEV;
-
- printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
-
- /* find base address: */
- offset = CYCLONE_CBAR_ADDR;
- reg = ioremap_nocache(offset, sizeof(reg));
- if (!reg) {
- printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n");
- return -ENODEV;
- }
- /* even on 64bit systems, this is only 32bits: */
- base = readl(reg);
- iounmap(reg);
- if (!base) {
- printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n");
- return -ENODEV;
- }
-
- /* setup PMCC: */
- offset = base + CYCLONE_PMCC_OFFSET;
- reg = ioremap_nocache(offset, sizeof(reg));
- if (!reg) {
- printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n");
- return -ENODEV;
- }
- writel(0x00000001,reg);
- iounmap(reg);
-
- /* setup MPCS: */
- offset = base + CYCLONE_MPCS_OFFSET;
- reg = ioremap_nocache(offset, sizeof(reg));
- if (!reg) {
- printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n");
- return -ENODEV;
- }
- writel(0x00000001,reg);
- iounmap(reg);
-
- /* map in cyclone_timer: */
- offset = base + CYCLONE_MPMC_OFFSET;
- cyclone_timer = ioremap_nocache(offset, sizeof(u64));
- if (!cyclone_timer) {
- printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n");
- return -ENODEV;
- }
-
- /* quick test to make sure its ticking: */
- for (i = 0; i < 3; i++){
- u32 old = readl(cyclone_timer);
- int stall = 100;
-
- while (stall--)
- barrier();
-
- if (readl(cyclone_timer) == old) {
- printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n");
- iounmap(cyclone_timer);
- cyclone_timer = NULL;
- return -ENODEV;
- }
- }
- cyclone_ptr = cyclone_timer;
-
- return clocksource_register_hz(&clocksource_cyclone,
- CYCLONE_TIMER_FREQ);
-}
-
-arch_initcall(init_cyclone_clocksource);
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index b3eb582d6a6f..ad3572541728 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -56,14 +56,19 @@ static struct notifier_block dummy_timer_cpu_nb = {
static int __init dummy_timer_register(void)
{
- int err = register_cpu_notifier(&dummy_timer_cpu_nb);
+ int err = 0;
+
+ cpu_notifier_register_begin();
+ err = __register_cpu_notifier(&dummy_timer_cpu_nb);
if (err)
- return err;
+ goto out;
/* We won't get a call on the boot CPU, so register immediately */
if (num_possible_cpus() > 1)
dummy_timer_setup();
- return 0;
+out:
+ cpu_notifier_register_done();
+ return err;
}
early_initcall(dummy_timer_register);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 48f76bc05da0..a6ee6d7cd63f 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -25,8 +25,6 @@
#include <linux/of_address.h>
#include <linux/clocksource.h>
-#include <asm/mach/time.h>
-
#define EXYNOS4_MCTREG(x) (x)
#define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
#define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104)
@@ -410,7 +408,7 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
mevt = container_of(evt, struct mct_clock_event_device, evt);
mevt->base = EXYNOS4_MCT_L_BASE(cpu);
- sprintf(mevt->name, "mct_tick%d", cpu);
+ snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
evt->name = mevt->name;
evt->cpumask = cpumask_of(cpu);
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
new file mode 100644
index 000000000000..e807acf4c665
--- /dev/null
+++ b/drivers/clocksource/qcom-timer.c
@@ -0,0 +1,330 @@
+/*
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2012,2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+
+#define TIMER_MATCH_VAL 0x0000
+#define TIMER_COUNT_VAL 0x0004
+#define TIMER_ENABLE 0x0008
+#define TIMER_ENABLE_CLR_ON_MATCH_EN BIT(1)
+#define TIMER_ENABLE_EN BIT(0)
+#define TIMER_CLEAR 0x000C
+#define DGT_CLK_CTL 0x10
+#define DGT_CLK_CTL_DIV_4 0x3
+#define TIMER_STS_GPT0_CLR_PEND BIT(10)
+
+#define GPT_HZ 32768
+
+#define MSM_DGT_SHIFT 5
+
+static void __iomem *event_base;
+static void __iomem *sts_base;
+
+static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ /* Stop the timer tick */
+ if (evt->mode == CLOCK_EVT_MODE_ONESHOT) {
+ u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
+ ctrl &= ~TIMER_ENABLE_EN;
+ writel_relaxed(ctrl, event_base + TIMER_ENABLE);
+ }
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static int msm_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
+
+ ctrl &= ~TIMER_ENABLE_EN;
+ writel_relaxed(ctrl, event_base + TIMER_ENABLE);
+
+ writel_relaxed(ctrl, event_base + TIMER_CLEAR);
+ writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
+
+ if (sts_base)
+ while (readl_relaxed(sts_base) & TIMER_STS_GPT0_CLR_PEND)
+ cpu_relax();
+
+ writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
+ return 0;
+}
+
+static void msm_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ ctrl = readl_relaxed(event_base + TIMER_ENABLE);
+ ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_RESUME:
+ case CLOCK_EVT_MODE_PERIODIC:
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* Timer is enabled in set_next_event */
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ break;
+ }
+ writel_relaxed(ctrl, event_base + TIMER_ENABLE);
+}
+
+static struct clock_event_device __percpu *msm_evt;
+
+static void __iomem *source_base;
+
+static notrace cycle_t msm_read_timer_count(struct clocksource *cs)
+{
+ return readl_relaxed(source_base + TIMER_COUNT_VAL);
+}
+
+static struct clocksource msm_clocksource = {
+ .name = "dg_timer",
+ .rating = 300,
+ .read = msm_read_timer_count,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int msm_timer_irq;
+static int msm_timer_has_ppi;
+
+static int msm_local_timer_setup(struct clock_event_device *evt)
+{
+ int cpu = smp_processor_id();
+ int err;
+
+ evt->irq = msm_timer_irq;
+ evt->name = "msm_timer";
+ evt->features = CLOCK_EVT_FEAT_ONESHOT;
+ evt->rating = 200;
+ evt->set_mode = msm_timer_set_mode;
+ evt->set_next_event = msm_timer_set_next_event;
+ evt->cpumask = cpumask_of(cpu);
+
+ clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff);
+
+ if (msm_timer_has_ppi) {
+ enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING);
+ } else {
+ err = request_irq(evt->irq, msm_timer_interrupt,
+ IRQF_TIMER | IRQF_NOBALANCING |
+ IRQF_TRIGGER_RISING, "gp_timer", evt);
+ if (err)
+ pr_err("request_irq failed\n");
+ }
+
+ return 0;
+}
+
+static void msm_local_timer_stop(struct clock_event_device *evt)
+{
+ evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+ disable_percpu_irq(evt->irq);
+}
+
+static int msm_timer_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * Grab cpu pointer in each case to avoid spurious
+ * preemptible warnings
+ */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ msm_local_timer_setup(this_cpu_ptr(msm_evt));
+ break;
+ case CPU_DYING:
+ msm_local_timer_stop(this_cpu_ptr(msm_evt));
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block msm_timer_cpu_nb = {
+ .notifier_call = msm_timer_cpu_notify,
+};
+
+static u64 notrace msm_sched_clock_read(void)
+{
+ return msm_clocksource.read(&msm_clocksource);
+}
+
+static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
+ bool percpu)
+{
+ struct clocksource *cs = &msm_clocksource;
+ int res = 0;
+
+ msm_timer_irq = irq;
+ msm_timer_has_ppi = percpu;
+
+ msm_evt = alloc_percpu(struct clock_event_device);
+ if (!msm_evt) {
+ pr_err("memory allocation failed for clockevents\n");
+ goto err;
+ }
+
+ if (percpu)
+ res = request_percpu_irq(irq, msm_timer_interrupt,
+ "gp_timer", msm_evt);
+
+ if (res) {
+ pr_err("request_percpu_irq failed\n");
+ } else {
+ res = register_cpu_notifier(&msm_timer_cpu_nb);
+ if (res) {
+ free_percpu_irq(irq, msm_evt);
+ goto err;
+ }
+
+ /* Immediately configure the timer on the boot CPU */
+ msm_local_timer_setup(__this_cpu_ptr(msm_evt));
+ }
+
+err:
+ writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
+ res = clocksource_register_hz(cs, dgt_hz);
+ if (res)
+ pr_err("clocksource_register failed\n");
+ sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
+}
+
+#ifdef CONFIG_ARCH_QCOM
+static void __init msm_dt_timer_init(struct device_node *np)
+{
+ u32 freq;
+ int irq;
+ struct resource res;
+ u32 percpu_offset;
+ void __iomem *base;
+ void __iomem *cpu0_base;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("Failed to map event base\n");
+ return;
+ }
+
+ /* We use GPT0 for the clockevent */
+ irq = irq_of_parse_and_map(np, 1);
+ if (irq <= 0) {
+ pr_err("Can't get irq\n");
+ return;
+ }
+
+ /* We use CPU0's DGT for the clocksource */
+ if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
+ percpu_offset = 0;
+
+ if (of_address_to_resource(np, 0, &res)) {
+ pr_err("Failed to parse DGT resource\n");
+ return;
+ }
+
+ cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res));
+ if (!cpu0_base) {
+ pr_err("Failed to map source base\n");
+ return;
+ }
+
+ if (of_property_read_u32(np, "clock-frequency", &freq)) {
+ pr_err("Unknown frequency\n");
+ return;
+ }
+
+ event_base = base + 0x4;
+ sts_base = base + 0x88;
+ source_base = cpu0_base + 0x24;
+ freq /= 4;
+ writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
+
+ msm_timer_init(freq, 32, irq, !!percpu_offset);
+}
+CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
+CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
+#else
+
+static int __init msm_timer_map(phys_addr_t addr, u32 event, u32 source,
+ u32 sts)
+{
+ void __iomem *base;
+
+ base = ioremap(addr, SZ_256);
+ if (!base) {
+ pr_err("Failed to map timer base\n");
+ return -ENOMEM;
+ }
+ event_base = base + event;
+ source_base = base + source;
+ if (sts)
+ sts_base = base + sts;
+
+ return 0;
+}
+
+static notrace cycle_t msm_read_timer_count_shift(struct clocksource *cs)
+{
+ /*
+ * Shift timer count down by a constant due to unreliable lower bits
+ * on some targets.
+ */
+ return msm_read_timer_count(cs) >> MSM_DGT_SHIFT;
+}
+
+void __init msm7x01_timer_init(void)
+{
+ struct clocksource *cs = &msm_clocksource;
+
+ if (msm_timer_map(0xc0100000, 0x0, 0x10, 0x0))
+ return;
+ cs->read = msm_read_timer_count_shift;
+ cs->mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT));
+ /* 600 KHz */
+ msm_timer_init(19200000 >> MSM_DGT_SHIFT, 32 - MSM_DGT_SHIFT, 7,
+ false);
+}
+
+void __init msm7x30_timer_init(void)
+{
+ if (msm_timer_map(0xc0100000, 0x4, 0x24, 0x80))
+ return;
+ msm_timer_init(24576000 / 4, 32, 1, false);
+}
+
+void __init qsd8x50_timer_init(void)
+{
+ if (msm_timer_map(0xAC100000, 0x0, 0x10, 0x34))
+ return;
+ msm_timer_init(19200000 / 4, 32, 7, false);
+}
+#endif
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index bf497afba9ad..efb17c3ee120 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -196,5 +196,5 @@ static void __init sun4i_timer_init(struct device_node *node)
clockevents_config_and_register(&sun4i_clockevent, rate,
TIMER_SYNC_TICKS, 0xffffffff);
}
-CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer",
+CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
sun4i_timer_init);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index ee8691b89944..0451e62fac7a 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -85,12 +85,6 @@ static u32 ticks_per_jiffy;
static struct clock_event_device __percpu *armada_370_xp_evt;
-static void timer_ctrl_clrset(u32 clr, u32 set)
-{
- writel((readl(timer_base + TIMER_CTRL_OFF) & ~clr) | set,
- timer_base + TIMER_CTRL_OFF);
-}
-
static void local_timer_ctrl_clrset(u32 clr, u32 set)
{
writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
@@ -245,7 +239,7 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
clr = TIMER0_25MHZ;
enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
}
- timer_ctrl_clrset(clr, set);
+ atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set);
local_timer_ctrl_clrset(clr, set);
/*
@@ -263,7 +257,9 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
- timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
+ atomic_io_modify(timer_base + TIMER_CTRL_OFF,
+ TIMER0_RELOAD_EN | enable_mask,
+ TIMER0_RELOAD_EN | enable_mask);
/*
* Set scale and timer for sched_clock.
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index 20066222f3f2..0b3ce0399c51 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -35,20 +35,6 @@
#define ORION_ONESHOT_MAX 0xfffffffe
static void __iomem *timer_base;
-static DEFINE_SPINLOCK(timer_ctrl_lock);
-
-/*
- * Thread-safe access to TIMER_CTRL register
- * (shared with watchdog timer)
- */
-void orion_timer_ctrl_clrset(u32 clr, u32 set)
-{
- spin_lock(&timer_ctrl_lock);
- writel((readl(timer_base + TIMER_CTRL) & ~clr) | set,
- timer_base + TIMER_CTRL);
- spin_unlock(&timer_ctrl_lock);
-}
-EXPORT_SYMBOL(orion_timer_ctrl_clrset);
/*
* Free-running clocksource handling.
@@ -68,7 +54,8 @@ static int orion_clkevt_next_event(unsigned long delta,
{
/* setup and enable one-shot timer */
writel(delta, timer_base + TIMER1_VAL);
- orion_timer_ctrl_clrset(TIMER1_RELOAD_EN, TIMER1_EN);
+ atomic_io_modify(timer_base + TIMER_CTRL,
+ TIMER1_RELOAD_EN | TIMER1_EN, TIMER1_EN);
return 0;
}
@@ -80,10 +67,13 @@ static void orion_clkevt_mode(enum clock_event_mode mode,
/* setup and enable periodic timer at 1/HZ intervals */
writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD);
writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL);
- orion_timer_ctrl_clrset(0, TIMER1_RELOAD_EN | TIMER1_EN);
+ atomic_io_modify(timer_base + TIMER_CTRL,
+ TIMER1_RELOAD_EN | TIMER1_EN,
+ TIMER1_RELOAD_EN | TIMER1_EN);
} else {
/* disable timer */
- orion_timer_ctrl_clrset(TIMER1_RELOAD_EN | TIMER1_EN, 0);
+ atomic_io_modify(timer_base + TIMER_CTRL,
+ TIMER1_RELOAD_EN | TIMER1_EN, 0);
}
}
@@ -131,7 +121,9 @@ static void __init orion_timer_init(struct device_node *np)
/* setup timer0 as free-running clocksource */
writel(~0, timer_base + TIMER0_VAL);
writel(~0, timer_base + TIMER0_RELOAD);
- orion_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | TIMER0_EN);
+ atomic_io_modify(timer_base + TIMER_CTRL,
+ TIMER0_RELOAD_EN | TIMER0_EN,
+ TIMER0_RELOAD_EN | TIMER0_EN);
clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
clk_get_rate(clk), 300, 32,
clocksource_mmio_readl_down);
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
new file mode 100644
index 000000000000..0250354f7e55
--- /dev/null
+++ b/drivers/clocksource/timer-keystone.c
@@ -0,0 +1,241 @@
+/*
+ * Keystone broadcast clock-event
+ *
+ * Copyright 2013 Texas Instruments, Inc.
+ *
+ * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define TIMER_NAME "timer-keystone"
+
+/* Timer register offsets */
+#define TIM12 0x10
+#define TIM34 0x14
+#define PRD12 0x18
+#define PRD34 0x1c
+#define TCR 0x20
+#define TGCR 0x24
+#define INTCTLSTAT 0x44
+
+/* Timer register bitfields */
+#define TCR_ENAMODE_MASK 0xC0
+#define TCR_ENAMODE_ONESHOT_MASK 0x40
+#define TCR_ENAMODE_PERIODIC_MASK 0x80
+
+#define TGCR_TIM_UNRESET_MASK 0x03
+#define INTCTLSTAT_ENINT_MASK 0x01
+
+/**
+ * struct keystone_timer: holds timer's data
+ * @base: timer memory base address
+ * @hz_period: cycles per HZ period
+ * @event_dev: event device based on timer
+ */
+static struct keystone_timer {
+ void __iomem *base;
+ unsigned long hz_period;
+ struct clock_event_device event_dev;
+} timer;
+
+static inline u32 keystone_timer_readl(unsigned long rg)
+{
+ return readl_relaxed(timer.base + rg);
+}
+
+static inline void keystone_timer_writel(u32 val, unsigned long rg)
+{
+ writel_relaxed(val, timer.base + rg);
+}
+
+/**
+ * keystone_timer_barrier: write memory barrier
+ * use explicit barrier to avoid using readl/writel non relaxed function
+ * variants, because in our case non relaxed variants hide the true places
+ * where barrier is needed.
+ */
+static inline void keystone_timer_barrier(void)
+{
+ __iowmb();
+}
+
+/**
+ * keystone_timer_config: configures timer to work in oneshot/periodic modes.
+ * @ mode: mode to configure
+ * @ period: cycles number to configure for
+ */
+static int keystone_timer_config(u64 period, enum clock_event_mode mode)
+{
+ u32 tcr;
+ u32 off;
+
+ tcr = keystone_timer_readl(TCR);
+ off = tcr & ~(TCR_ENAMODE_MASK);
+
+ /* set enable mode */
+ switch (mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ tcr |= TCR_ENAMODE_ONESHOT_MASK;
+ break;
+ case CLOCK_EVT_MODE_PERIODIC:
+ tcr |= TCR_ENAMODE_PERIODIC_MASK;
+ break;
+ default:
+ return -1;
+ }
+
+ /* disable timer */
+ keystone_timer_writel(off, TCR);
+ /* here we have to be sure the timer has been disabled */
+ keystone_timer_barrier();
+
+ /* reset counter to zero, set new period */
+ keystone_timer_writel(0, TIM12);
+ keystone_timer_writel(0, TIM34);
+ keystone_timer_writel(period & 0xffffffff, PRD12);
+ keystone_timer_writel(period >> 32, PRD34);
+
+ /*
+ * enable timer
+ * here we have to be sure that CNTLO, CNTHI, PRDLO, PRDHI registers
+ * have been written.
+ */
+ keystone_timer_barrier();
+ keystone_timer_writel(tcr, TCR);
+ return 0;
+}
+
+static void keystone_timer_disable(void)
+{
+ u32 tcr;
+
+ tcr = keystone_timer_readl(TCR);
+
+ /* disable timer */
+ tcr &= ~(TCR_ENAMODE_MASK);
+ keystone_timer_writel(tcr, TCR);
+}
+
+static irqreturn_t keystone_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static int keystone_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ return keystone_timer_config(cycles, evt->mode);
+}
+
+static void keystone_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ keystone_timer_config(timer.hz_period, CLOCK_EVT_MODE_PERIODIC);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_ONESHOT:
+ keystone_timer_disable();
+ break;
+ default:
+ break;
+ }
+}
+
+static void __init keystone_timer_init(struct device_node *np)
+{
+ struct clock_event_device *event_dev = &timer.event_dev;
+ unsigned long rate;
+ struct clk *clk;
+ int irq, error;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq == NO_IRQ) {
+ pr_err("%s: failed to map interrupts\n", __func__);
+ return;
+ }
+
+ timer.base = of_iomap(np, 0);
+ if (!timer.base) {
+ pr_err("%s: failed to map registers\n", __func__);
+ return;
+ }
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to get clock\n", __func__);
+ iounmap(timer.base);
+ return;
+ }
+
+ error = clk_prepare_enable(clk);
+ if (error) {
+ pr_err("%s: failed to enable clock\n", __func__);
+ goto err;
+ }
+
+ rate = clk_get_rate(clk);
+
+ /* disable, use internal clock source */
+ keystone_timer_writel(0, TCR);
+ /* here we have to be sure the timer has been disabled */
+ keystone_timer_barrier();
+
+ /* reset timer as 64-bit, no pre-scaler, plus features are disabled */
+ keystone_timer_writel(0, TGCR);
+
+ /* unreset timer */
+ keystone_timer_writel(TGCR_TIM_UNRESET_MASK, TGCR);
+
+ /* init counter to zero */
+ keystone_timer_writel(0, TIM12);
+ keystone_timer_writel(0, TIM34);
+
+ timer.hz_period = DIV_ROUND_UP(rate, HZ);
+
+ /* enable timer interrupts */
+ keystone_timer_writel(INTCTLSTAT_ENINT_MASK, INTCTLSTAT);
+
+ error = request_irq(irq, keystone_timer_interrupt, IRQF_TIMER,
+ TIMER_NAME, event_dev);
+ if (error) {
+ pr_err("%s: failed to setup irq\n", __func__);
+ goto err;
+ }
+
+ /* setup clockevent */
+ event_dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ event_dev->set_next_event = keystone_set_next_event;
+ event_dev->set_mode = keystone_set_mode;
+ event_dev->cpumask = cpu_all_mask;
+ event_dev->owner = THIS_MODULE;
+ event_dev->name = TIMER_NAME;
+ event_dev->irq = irq;
+
+ clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX);
+
+ pr_info("keystone timer clock @%lu Hz\n", rate);
+ return;
+err:
+ clk_put(clk);
+ iounmap(timer.base);
+}
+
+CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer",
+ keystone_timer_init);
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index 09a17d9a6594..b52e1c078b99 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -19,7 +19,8 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
-#include <asm/mach/time.h>
+
+#define MARCO_CLOCK_FREQ 1000000
#define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000
#define SIRFSOC_TIMER_32COUNTER_1_CTRL 0x0004
@@ -191,7 +192,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
ce->rating = 200;
ce->set_mode = sirfsoc_timer_set_mode;
ce->set_next_event = sirfsoc_timer_set_next_event;
- clockevents_calc_mult_shift(ce, CLOCK_TICK_RATE, 60);
+ clockevents_calc_mult_shift(ce, MARCO_CLOCK_FREQ, 60);
ce->max_delta_ns = clockevent_delta2ns(-2, ce);
ce->min_delta_ns = clockevent_delta2ns(2, ce);
ce->cpumask = cpumask_of(cpu);
@@ -263,11 +264,11 @@ static void __init sirfsoc_marco_timer_init(void)
BUG_ON(IS_ERR(clk));
rate = clk_get_rate(clk);
- BUG_ON(rate < CLOCK_TICK_RATE);
- BUG_ON(rate % CLOCK_TICK_RATE);
+ BUG_ON(rate < MARCO_CLOCK_FREQ);
+ BUG_ON(rate % MARCO_CLOCK_FREQ);
/* Initialize the timer dividers */
- timer_div = rate / CLOCK_TICK_RATE - 1;
+ timer_div = rate / MARCO_CLOCK_FREQ - 1;
writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL);
writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_1_CTRL);
@@ -283,7 +284,7 @@ static void __init sirfsoc_marco_timer_init(void)
/* Clear all interrupts */
writel_relaxed(0xFFFF, sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
- BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE));
+ BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, MARCO_CLOCK_FREQ));
sirfsoc_clockevent_init();
}
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index 8a492d34ff9f..1a6b2d6356d6 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -21,6 +21,8 @@
#include <linux/sched_clock.h>
#include <asm/mach/time.h>
+#define PRIMA2_CLOCK_FREQ 1000000
+
#define SIRFSOC_TIMER_COUNTER_LO 0x0000
#define SIRFSOC_TIMER_COUNTER_HI 0x0004
#define SIRFSOC_TIMER_MATCH_0 0x0008
@@ -173,7 +175,7 @@ static u64 notrace sirfsoc_read_sched_clock(void)
static void __init sirfsoc_clockevent_init(void)
{
sirfsoc_clockevent.cpumask = cpumask_of(0);
- clockevents_config_and_register(&sirfsoc_clockevent, CLOCK_TICK_RATE,
+ clockevents_config_and_register(&sirfsoc_clockevent, PRIMA2_CLOCK_FREQ,
2, -2);
}
@@ -190,8 +192,8 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
rate = clk_get_rate(clk);
- BUG_ON(rate < CLOCK_TICK_RATE);
- BUG_ON(rate % CLOCK_TICK_RATE);
+ BUG_ON(rate < PRIMA2_CLOCK_FREQ);
+ BUG_ON(rate % PRIMA2_CLOCK_FREQ);
sirfsoc_timer_base = of_iomap(np, 0);
if (!sirfsoc_timer_base)
@@ -199,14 +201,16 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
- writel_relaxed(rate / CLOCK_TICK_RATE / 2 - 1, sirfsoc_timer_base + SIRFSOC_TIMER_DIV);
+ writel_relaxed(rate / PRIMA2_CLOCK_FREQ / 2 - 1,
+ sirfsoc_timer_base + SIRFSOC_TIMER_DIV);
writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
- BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE));
+ BUG_ON(clocksource_register_hz(&sirfsoc_clocksource,
+ PRIMA2_CLOCK_FREQ));
- sched_clock_register(sirfsoc_read_sched_clock, 64, CLOCK_TICK_RATE);
+ sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ);
BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c
new file mode 100644
index 000000000000..5dcf756970e7
--- /dev/null
+++ b/drivers/clocksource/timer-u300.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (C) 2007-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * Timer COH 901 328, runs the OS timer interrupt.
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+
+/* Generic stuff */
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+
+/*
+ * APP side special timer registers
+ * This timer contains four timers which can fire an interrupt each.
+ * OS (operating system) timer @ 32768 Hz
+ * DD (device driver) timer @ 1 kHz
+ * GP1 (general purpose 1) timer @ 1MHz
+ * GP2 (general purpose 2) timer @ 1MHz
+ */
+
+/* Reset OS Timer 32bit (-/W) */
+#define U300_TIMER_APP_ROST (0x0000)
+#define U300_TIMER_APP_ROST_TIMER_RESET (0x00000000)
+/* Enable OS Timer 32bit (-/W) */
+#define U300_TIMER_APP_EOST (0x0004)
+#define U300_TIMER_APP_EOST_TIMER_ENABLE (0x00000000)
+/* Disable OS Timer 32bit (-/W) */
+#define U300_TIMER_APP_DOST (0x0008)
+#define U300_TIMER_APP_DOST_TIMER_DISABLE (0x00000000)
+/* OS Timer Mode Register 32bit (-/W) */
+#define U300_TIMER_APP_SOSTM (0x000c)
+#define U300_TIMER_APP_SOSTM_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_SOSTM_MODE_ONE_SHOT (0x00000001)
+/* OS Timer Status Register 32bit (R/-) */
+#define U300_TIMER_APP_OSTS (0x0010)
+#define U300_TIMER_APP_OSTS_TIMER_STATE_MASK (0x0000000F)
+#define U300_TIMER_APP_OSTS_TIMER_STATE_IDLE (0x00000001)
+#define U300_TIMER_APP_OSTS_TIMER_STATE_ACTIVE (0x00000002)
+#define U300_TIMER_APP_OSTS_ENABLE_IND (0x00000010)
+#define U300_TIMER_APP_OSTS_MODE_MASK (0x00000020)
+#define U300_TIMER_APP_OSTS_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_OSTS_MODE_ONE_SHOT (0x00000020)
+#define U300_TIMER_APP_OSTS_IRQ_ENABLED_IND (0x00000040)
+#define U300_TIMER_APP_OSTS_IRQ_PENDING_IND (0x00000080)
+/* OS Timer Current Count Register 32bit (R/-) */
+#define U300_TIMER_APP_OSTCC (0x0014)
+/* OS Timer Terminal Count Register 32bit (R/W) */
+#define U300_TIMER_APP_OSTTC (0x0018)
+/* OS Timer Interrupt Enable Register 32bit (-/W) */
+#define U300_TIMER_APP_OSTIE (0x001c)
+#define U300_TIMER_APP_OSTIE_IRQ_DISABLE (0x00000000)
+#define U300_TIMER_APP_OSTIE_IRQ_ENABLE (0x00000001)
+/* OS Timer Interrupt Acknowledge Register 32bit (-/W) */
+#define U300_TIMER_APP_OSTIA (0x0020)
+#define U300_TIMER_APP_OSTIA_IRQ_ACK (0x00000080)
+
+/* Reset DD Timer 32bit (-/W) */
+#define U300_TIMER_APP_RDDT (0x0040)
+#define U300_TIMER_APP_RDDT_TIMER_RESET (0x00000000)
+/* Enable DD Timer 32bit (-/W) */
+#define U300_TIMER_APP_EDDT (0x0044)
+#define U300_TIMER_APP_EDDT_TIMER_ENABLE (0x00000000)
+/* Disable DD Timer 32bit (-/W) */
+#define U300_TIMER_APP_DDDT (0x0048)
+#define U300_TIMER_APP_DDDT_TIMER_DISABLE (0x00000000)
+/* DD Timer Mode Register 32bit (-/W) */
+#define U300_TIMER_APP_SDDTM (0x004c)
+#define U300_TIMER_APP_SDDTM_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_SDDTM_MODE_ONE_SHOT (0x00000001)
+/* DD Timer Status Register 32bit (R/-) */
+#define U300_TIMER_APP_DDTS (0x0050)
+#define U300_TIMER_APP_DDTS_TIMER_STATE_MASK (0x0000000F)
+#define U300_TIMER_APP_DDTS_TIMER_STATE_IDLE (0x00000001)
+#define U300_TIMER_APP_DDTS_TIMER_STATE_ACTIVE (0x00000002)
+#define U300_TIMER_APP_DDTS_ENABLE_IND (0x00000010)
+#define U300_TIMER_APP_DDTS_MODE_MASK (0x00000020)
+#define U300_TIMER_APP_DDTS_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_DDTS_MODE_ONE_SHOT (0x00000020)
+#define U300_TIMER_APP_DDTS_IRQ_ENABLED_IND (0x00000040)
+#define U300_TIMER_APP_DDTS_IRQ_PENDING_IND (0x00000080)
+/* DD Timer Current Count Register 32bit (R/-) */
+#define U300_TIMER_APP_DDTCC (0x0054)
+/* DD Timer Terminal Count Register 32bit (R/W) */
+#define U300_TIMER_APP_DDTTC (0x0058)
+/* DD Timer Interrupt Enable Register 32bit (-/W) */
+#define U300_TIMER_APP_DDTIE (0x005c)
+#define U300_TIMER_APP_DDTIE_IRQ_DISABLE (0x00000000)
+#define U300_TIMER_APP_DDTIE_IRQ_ENABLE (0x00000001)
+/* DD Timer Interrupt Acknowledge Register 32bit (-/W) */
+#define U300_TIMER_APP_DDTIA (0x0060)
+#define U300_TIMER_APP_DDTIA_IRQ_ACK (0x00000080)
+
+/* Reset GP1 Timer 32bit (-/W) */
+#define U300_TIMER_APP_RGPT1 (0x0080)
+#define U300_TIMER_APP_RGPT1_TIMER_RESET (0x00000000)
+/* Enable GP1 Timer 32bit (-/W) */
+#define U300_TIMER_APP_EGPT1 (0x0084)
+#define U300_TIMER_APP_EGPT1_TIMER_ENABLE (0x00000000)
+/* Disable GP1 Timer 32bit (-/W) */
+#define U300_TIMER_APP_DGPT1 (0x0088)
+#define U300_TIMER_APP_DGPT1_TIMER_DISABLE (0x00000000)
+/* GP1 Timer Mode Register 32bit (-/W) */
+#define U300_TIMER_APP_SGPT1M (0x008c)
+#define U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT (0x00000001)
+/* GP1 Timer Status Register 32bit (R/-) */
+#define U300_TIMER_APP_GPT1S (0x0090)
+#define U300_TIMER_APP_GPT1S_TIMER_STATE_MASK (0x0000000F)
+#define U300_TIMER_APP_GPT1S_TIMER_STATE_IDLE (0x00000001)
+#define U300_TIMER_APP_GPT1S_TIMER_STATE_ACTIVE (0x00000002)
+#define U300_TIMER_APP_GPT1S_ENABLE_IND (0x00000010)
+#define U300_TIMER_APP_GPT1S_MODE_MASK (0x00000020)
+#define U300_TIMER_APP_GPT1S_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_GPT1S_MODE_ONE_SHOT (0x00000020)
+#define U300_TIMER_APP_GPT1S_IRQ_ENABLED_IND (0x00000040)
+#define U300_TIMER_APP_GPT1S_IRQ_PENDING_IND (0x00000080)
+/* GP1 Timer Current Count Register 32bit (R/-) */
+#define U300_TIMER_APP_GPT1CC (0x0094)
+/* GP1 Timer Terminal Count Register 32bit (R/W) */
+#define U300_TIMER_APP_GPT1TC (0x0098)
+/* GP1 Timer Interrupt Enable Register 32bit (-/W) */
+#define U300_TIMER_APP_GPT1IE (0x009c)
+#define U300_TIMER_APP_GPT1IE_IRQ_DISABLE (0x00000000)
+#define U300_TIMER_APP_GPT1IE_IRQ_ENABLE (0x00000001)
+/* GP1 Timer Interrupt Acknowledge Register 32bit (-/W) */
+#define U300_TIMER_APP_GPT1IA (0x00a0)
+#define U300_TIMER_APP_GPT1IA_IRQ_ACK (0x00000080)
+
+/* Reset GP2 Timer 32bit (-/W) */
+#define U300_TIMER_APP_RGPT2 (0x00c0)
+#define U300_TIMER_APP_RGPT2_TIMER_RESET (0x00000000)
+/* Enable GP2 Timer 32bit (-/W) */
+#define U300_TIMER_APP_EGPT2 (0x00c4)
+#define U300_TIMER_APP_EGPT2_TIMER_ENABLE (0x00000000)
+/* Disable GP2 Timer 32bit (-/W) */
+#define U300_TIMER_APP_DGPT2 (0x00c8)
+#define U300_TIMER_APP_DGPT2_TIMER_DISABLE (0x00000000)
+/* GP2 Timer Mode Register 32bit (-/W) */
+#define U300_TIMER_APP_SGPT2M (0x00cc)
+#define U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_SGPT2M_MODE_ONE_SHOT (0x00000001)
+/* GP2 Timer Status Register 32bit (R/-) */
+#define U300_TIMER_APP_GPT2S (0x00d0)
+#define U300_TIMER_APP_GPT2S_TIMER_STATE_MASK (0x0000000F)
+#define U300_TIMER_APP_GPT2S_TIMER_STATE_IDLE (0x00000001)
+#define U300_TIMER_APP_GPT2S_TIMER_STATE_ACTIVE (0x00000002)
+#define U300_TIMER_APP_GPT2S_ENABLE_IND (0x00000010)
+#define U300_TIMER_APP_GPT2S_MODE_MASK (0x00000020)
+#define U300_TIMER_APP_GPT2S_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_GPT2S_MODE_ONE_SHOT (0x00000020)
+#define U300_TIMER_APP_GPT2S_IRQ_ENABLED_IND (0x00000040)
+#define U300_TIMER_APP_GPT2S_IRQ_PENDING_IND (0x00000080)
+/* GP2 Timer Current Count Register 32bit (R/-) */
+#define U300_TIMER_APP_GPT2CC (0x00d4)
+/* GP2 Timer Terminal Count Register 32bit (R/W) */
+#define U300_TIMER_APP_GPT2TC (0x00d8)
+/* GP2 Timer Interrupt Enable Register 32bit (-/W) */
+#define U300_TIMER_APP_GPT2IE (0x00dc)
+#define U300_TIMER_APP_GPT2IE_IRQ_DISABLE (0x00000000)
+#define U300_TIMER_APP_GPT2IE_IRQ_ENABLE (0x00000001)
+/* GP2 Timer Interrupt Acknowledge Register 32bit (-/W) */
+#define U300_TIMER_APP_GPT2IA (0x00e0)
+#define U300_TIMER_APP_GPT2IA_IRQ_ACK (0x00000080)
+
+/* Clock request control register - all four timers */
+#define U300_TIMER_APP_CRC (0x100)
+#define U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE (0x00000001)
+
+static void __iomem *u300_timer_base;
+
+struct u300_clockevent_data {
+ struct clock_event_device cevd;
+ unsigned ticks_per_jiffy;
+};
+
+/*
+ * The u300_set_mode() function is always called first, if we
+ * have oneshot timer active, the oneshot scheduling function
+ * u300_set_next_event() is called immediately after.
+ */
+static void u300_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ struct u300_clockevent_data *cevdata =
+ container_of(evt, struct u300_clockevent_data, cevd);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* Disable interrupts on GPT1 */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Disable GP1 while we're reprogramming it. */
+ writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DGPT1);
+ /*
+ * Set the periodic mode to a certain number of ticks per
+ * jiffy.
+ */
+ writel(cevdata->ticks_per_jiffy,
+ u300_timer_base + U300_TIMER_APP_GPT1TC);
+ /*
+ * Set continuous mode, so the timer keeps triggering
+ * interrupts.
+ */
+ writel(U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS,
+ u300_timer_base + U300_TIMER_APP_SGPT1M);
+ /* Enable timer interrupts */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Then enable the OS timer again */
+ writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
+ u300_timer_base + U300_TIMER_APP_EGPT1);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* Just break; here? */
+ /*
+ * The actual event will be programmed by the next event hook,
+ * so we just set a dummy value somewhere at the end of the
+ * universe here.
+ */
+ /* Disable interrupts on GPT1 */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Disable GP1 while we're reprogramming it. */
+ writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DGPT1);
+ /*
+ * Expire far in the future, u300_set_next_event() will be
+ * called soon...
+ */
+ writel(0xFFFFFFFF, u300_timer_base + U300_TIMER_APP_GPT1TC);
+ /* We run one shot per tick here! */
+ writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT,
+ u300_timer_base + U300_TIMER_APP_SGPT1M);
+ /* Enable interrupts for this timer */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Enable timer */
+ writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
+ u300_timer_base + U300_TIMER_APP_EGPT1);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ /* Disable interrupts on GP1 */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Disable GP1 */
+ writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DGPT1);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ /* Ignore this call */
+ break;
+ }
+}
+
+/*
+ * The app timer in one shot mode obviously has to be reprogrammed
+ * in EXACTLY this sequence to work properly. Do NOT try to e.g. replace
+ * the interrupt disable + timer disable commands with a reset command,
+ * it will fail miserably. Apparently (and I found this the hard way)
+ * the timer is very sensitive to the instruction order, though you don't
+ * get that impression from the data sheet.
+ */
+static int u300_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+
+{
+ /* Disable interrupts on GPT1 */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Disable GP1 while we're reprogramming it. */
+ writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DGPT1);
+ /* Reset the General Purpose timer 1. */
+ writel(U300_TIMER_APP_RGPT1_TIMER_RESET,
+ u300_timer_base + U300_TIMER_APP_RGPT1);
+ /* IRQ in n * cycles */
+ writel(cycles, u300_timer_base + U300_TIMER_APP_GPT1TC);
+ /*
+ * We run one shot per tick here! (This is necessary to reconfigure,
+ * the timer will tilt if you don't!)
+ */
+ writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT,
+ u300_timer_base + U300_TIMER_APP_SGPT1M);
+ /* Enable timer interrupts */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Then enable the OS timer again */
+ writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
+ u300_timer_base + U300_TIMER_APP_EGPT1);
+ return 0;
+}
+
+static struct u300_clockevent_data u300_clockevent_data = {
+ /* Use general purpose timer 1 as clock event */
+ .cevd = {
+ .name = "GPT1",
+ /* Reasonably fast and accurate clock event */
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = u300_set_next_event,
+ .set_mode = u300_set_mode,
+ },
+};
+
+/* Clock event timer interrupt handler */
+static irqreturn_t u300_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &u300_clockevent_data.cevd;
+ /* ACK/Clear timer IRQ for the APP GPT1 Timer */
+
+ writel(U300_TIMER_APP_GPT1IA_IRQ_ACK,
+ u300_timer_base + U300_TIMER_APP_GPT1IA);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction u300_timer_irq = {
+ .name = "U300 Timer Tick",
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = u300_timer_interrupt,
+};
+
+/*
+ * Override the global weak sched_clock symbol with this
+ * local implementation which uses the clocksource to get some
+ * better resolution when scheduling the kernel. We accept that
+ * this wraps around for now, since it is just a relative time
+ * stamp. (Inspired by OMAP implementation.)
+ */
+
+static u64 notrace u300_read_sched_clock(void)
+{
+ return readl(u300_timer_base + U300_TIMER_APP_GPT2CC);
+}
+
+static unsigned long u300_read_current_timer(void)
+{
+ return readl(u300_timer_base + U300_TIMER_APP_GPT2CC);
+}
+
+static struct delay_timer u300_delay_timer;
+
+/*
+ * This sets up the system timers, clock source and clock event.
+ */
+static void __init u300_timer_init_of(struct device_node *np)
+{
+ unsigned int irq;
+ struct clk *clk;
+ unsigned long rate;
+
+ u300_timer_base = of_iomap(np, 0);
+ if (!u300_timer_base)
+ panic("could not ioremap system timer\n");
+
+ /* Get the IRQ for the GP1 timer */
+ irq = irq_of_parse_and_map(np, 2);
+ if (!irq)
+ panic("no IRQ for system timer\n");
+
+ pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq);
+
+ /* Clock the interrupt controller */
+ clk = of_clk_get(np, 0);
+ BUG_ON(IS_ERR(clk));
+ clk_prepare_enable(clk);
+ rate = clk_get_rate(clk);
+
+ u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
+
+ sched_clock_register(u300_read_sched_clock, 32, rate);
+
+ u300_delay_timer.read_current_timer = &u300_read_current_timer;
+ u300_delay_timer.freq = rate;
+ register_current_timer_delay(&u300_delay_timer);
+
+ /*
+ * Disable the "OS" and "DD" timers - these are designed for Symbian!
+ * Example usage in cnh1601578 cpu subsystem pd_timer_app.c
+ */
+ writel(U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE,
+ u300_timer_base + U300_TIMER_APP_CRC);
+ writel(U300_TIMER_APP_ROST_TIMER_RESET,
+ u300_timer_base + U300_TIMER_APP_ROST);
+ writel(U300_TIMER_APP_DOST_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DOST);
+ writel(U300_TIMER_APP_RDDT_TIMER_RESET,
+ u300_timer_base + U300_TIMER_APP_RDDT);
+ writel(U300_TIMER_APP_DDDT_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DDDT);
+
+ /* Reset the General Purpose timer 1. */
+ writel(U300_TIMER_APP_RGPT1_TIMER_RESET,
+ u300_timer_base + U300_TIMER_APP_RGPT1);
+
+ /* Set up the IRQ handler */
+ setup_irq(irq, &u300_timer_irq);
+
+ /* Reset the General Purpose timer 2 */
+ writel(U300_TIMER_APP_RGPT2_TIMER_RESET,
+ u300_timer_base + U300_TIMER_APP_RGPT2);
+ /* Set this timer to run around forever */
+ writel(0xFFFFFFFFU, u300_timer_base + U300_TIMER_APP_GPT2TC);
+ /* Set continuous mode so it wraps around */
+ writel(U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS,
+ u300_timer_base + U300_TIMER_APP_SGPT2M);
+ /* Disable timer interrupts */
+ writel(U300_TIMER_APP_GPT2IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT2IE);
+ /* Then enable the GP2 timer to use as a free running us counter */
+ writel(U300_TIMER_APP_EGPT2_TIMER_ENABLE,
+ u300_timer_base + U300_TIMER_APP_EGPT2);
+
+ /* Use general purpose timer 2 as clock source */
+ if (clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
+ "GPT2", rate, 300, 32, clocksource_mmio_readl_up))
+ pr_err("timer: failed to initialize U300 clock source\n");
+
+ /* Configure and register the clockevent */
+ clockevents_config_and_register(&u300_clockevent_data.cevd, rate,
+ 1, 0xffffffff);
+
+ /*
+ * TODO: init and register the rest of the timers too, they can be
+ * used by hrtimers!
+ */
+}
+
+CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
+ u300_timer_init_of);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 18c5b9b16645..148d707a1d43 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -95,7 +95,7 @@ void proc_fork_connector(struct task_struct *task)
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
/* If cn_netlink_send() failed, the data is not sent */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_exec_connector(struct task_struct *task)
@@ -122,7 +122,7 @@ void proc_exec_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_id_connector(struct task_struct *task, int which_id)
@@ -163,7 +163,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_sid_connector(struct task_struct *task)
@@ -190,7 +190,7 @@ void proc_sid_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
@@ -225,7 +225,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_comm_connector(struct task_struct *task)
@@ -253,7 +253,7 @@ void proc_comm_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_coredump_connector(struct task_struct *task)
@@ -280,7 +280,7 @@ void proc_coredump_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_exit_connector(struct task_struct *task)
@@ -309,7 +309,7 @@ void proc_exit_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
/*
@@ -343,7 +343,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
msg->ack = rcvd_ack + 1;
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
/**
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index a36749f1e44a..b14f1d36f897 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -50,7 +50,7 @@ static int cn_already_initialized;
*
* Sequence number is incremented with each message to be sent.
*
- * If we expect reply to our message then the sequence number in
+ * If we expect a reply to our message then the sequence number in
* received message MUST be the same as in original message, and
* acknowledge number MUST be the same + 1.
*
@@ -62,8 +62,11 @@ static int cn_already_initialized;
* the acknowledgement number in the original message + 1, then it is
* a new message.
*
+ * The message is sent to, the portid if given, the group if given, both if
+ * both, or if both are zero then the group is looked up and sent there.
*/
-int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
+int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
+ gfp_t gfp_mask)
{
struct cn_callback_entry *__cbq;
unsigned int size;
@@ -74,7 +77,9 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
u32 group = 0;
int found = 0;
- if (!__group) {
+ if (portid || __group) {
+ group = __group;
+ } else {
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(__cbq, &dev->cbdev->queue_list,
callback_entry) {
@@ -88,11 +93,9 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
if (!found)
return -ENODEV;
- } else {
- group = __group;
}
- if (!netlink_has_listeners(dev->nls, group))
+ if (!portid && !netlink_has_listeners(dev->nls, group))
return -ESRCH;
size = sizeof(*msg) + msg->len;
@@ -113,7 +116,10 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
NETLINK_CB(skb).dst_group = group;
- return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask);
+ if (group)
+ return netlink_broadcast(dev->nls, skb, portid, group,
+ gfp_mask);
+ return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT));
}
EXPORT_SYMBOL_GPL(cn_netlink_send);
@@ -139,7 +145,6 @@ static int cn_call_callback(struct sk_buff *skb)
spin_unlock_bh(&dev->cbdev->queue_lock);
if (cbq != NULL) {
- err = 0;
cbq->callback(msg, nsp);
kfree_skb(skb);
cn_queue_release_callback(cbq);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 4b029c0944af..1fbe11f2a146 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -200,7 +200,7 @@ source "drivers/cpufreq/Kconfig.x86"
endmenu
menu "ARM CPU frequency scaling drivers"
-depends on ARM
+depends on ARM || ARM64
source "drivers/cpufreq/Kconfig.arm"
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 31297499a60a..0e9cce82844b 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,6 +2,7 @@
# ARM CPU Frequency scaling drivers
#
+# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
@@ -16,12 +17,20 @@ config ARM_DT_BL_CPUFREQ
This enables probing via DT for Generic CPUfreq driver for ARM
big.LITTLE platform. This gets frequency tables from DT.
+config ARM_VEXPRESS_SPC_CPUFREQ
+ tristate "Versatile Express SPC based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+ help
+ This add the CPUfreq driver support for Versatile Express
+ big.LITTLE platforms using SPC for power management.
+
+
config ARM_EXYNOS_CPUFREQ
bool
config ARM_EXYNOS4210_CPUFREQ
bool "SAMSUNG EXYNOS4210"
- depends on CPU_EXYNOS4210
+ depends on CPU_EXYNOS4210 && !ARCH_MULTIPLATFORM
default y
select ARM_EXYNOS_CPUFREQ
help
@@ -32,7 +41,7 @@ config ARM_EXYNOS4210_CPUFREQ
config ARM_EXYNOS4X12_CPUFREQ
bool "SAMSUNG EXYNOS4x12"
- depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412)
+ depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
default y
select ARM_EXYNOS_CPUFREQ
help
@@ -43,7 +52,7 @@ config ARM_EXYNOS4X12_CPUFREQ
config ARM_EXYNOS5250_CPUFREQ
bool "SAMSUNG EXYNOS5250"
- depends on SOC_EXYNOS5250
+ depends on SOC_EXYNOS5250 && !ARCH_MULTIPLATFORM
default y
select ARM_EXYNOS_CPUFREQ
help
@@ -113,7 +122,7 @@ config ARM_INTEGRATOR
If in doubt, say Y.
config ARM_KIRKWOOD_CPUFREQ
- def_bool ARCH_KIRKWOOD && OF
+ def_bool MACH_KIRKWOOD
help
This adds the CPUFreq driver for Marvell Kirkwood
SoCs.
@@ -241,11 +250,3 @@ config ARM_TEGRA_CPUFREQ
default y
help
This adds the CPUFreq driver support for TEGRA SOCs.
-
-config ARM_VEXPRESS_SPC_CPUFREQ
- tristate "Versatile Express SPC based CPUfreq driver"
- select ARM_BIG_LITTLE_CPUFREQ
- depends on ARCH_VEXPRESS_SPC
- help
- This add the CPUfreq driver support for Versatile Express
- big.LITTLE platforms using SPC for power management.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index ca0021a96e19..72564b701b4a 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -54,3 +54,11 @@ config PPC_PASEMI_CPUFREQ
help
This adds the support for frequency switching on PA Semi
PWRficient processors.
+
+config POWERNV_CPUFREQ
+ tristate "CPU frequency scaling for IBM POWERNV platform"
+ depends on PPC_POWERNV
+ default y
+ help
+ This adds support for CPU frequency switching on IBM POWERNV
+ platform
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 74945652dd7a..0dbb963c1aef 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_PPC_CORENET_CPUFREQ) += ppc-corenet-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += pasemi-cpufreq.o
+obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o
##################################################################################
# Other platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 18448a7e9f86..000e4e0afd7e 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -754,7 +754,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg;
}
- data->freq_table = kmalloc(sizeof(*data->freq_table) *
+ data->freq_table = kzalloc(sizeof(*data->freq_table) *
(perf->state_count+1), GFP_KERNEL);
if (!data->freq_table) {
result = -ENOMEM;
@@ -855,7 +855,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
pr_debug("acpi_cpufreq_cpu_exit\n");
if (data) {
- cpufreq_frequency_table_put_attr(policy->cpu);
per_cpu(acfreq_data, policy->cpu) = NULL;
acpi_processor_unregister_performance(data->acpi_data,
policy->cpu);
@@ -907,15 +906,16 @@ static void __init acpi_cpufreq_boost_init(void)
acpi_cpufreq_driver.boost_supported = true;
acpi_cpufreq_driver.boost_enabled = boost_state(0);
- get_online_cpus();
+
+ cpu_notifier_register_begin();
/* Force all MSRs to the same value */
boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
cpu_online_mask);
- register_cpu_notifier(&boost_nb);
+ __register_cpu_notifier(&boost_nb);
- put_online_cpus();
+ cpu_notifier_register_done();
}
}
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 72f87e9317e3..bad2ed317ba2 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -446,9 +446,12 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
}
if (cur_cluster < MAX_CLUSTERS) {
+ int cpu;
+
cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
- per_cpu(physical_cluster, policy->cpu) = cur_cluster;
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(physical_cluster, cpu) = cur_cluster;
} else {
/* Assumption: during init, we are always running on A15 */
per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
@@ -478,7 +481,6 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
return -ENODEV;
}
- cpufreq_frequency_table_put_attr(policy->cpu);
put_cluster_clk_and_freq_table(cpu_dev);
dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index a1c79f549edb..7b612c8bb09e 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -52,7 +52,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
{
unsigned int frequency, rate, min_freq;
- static struct clk *cpuclk;
+ struct clk *cpuclk;
int retval, steps, i;
if (policy->cpu != 0)
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
index e9e63fc9c2c9..a9f8e5bd0716 100644
--- a/drivers/cpufreq/blackfin-cpufreq.c
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -195,7 +195,6 @@ static struct cpufreq_driver bfin_driver = {
.target_index = bfin_target,
.get = bfin_getfreq_khz,
.init = __bfin_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "bfin cpufreq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 0c12ffc0ebcb..1bf6bbac3e03 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -109,7 +109,6 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
.target_index = cpu0_set_target,
.get = cpufreq_generic_get,
.init = cpu0_cpufreq_init,
- .exit = cpufreq_generic_exit,
.name = "generic_cpu0",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index a05b876f375e..bc447b9003c3 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -270,7 +270,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
pr_debug("Old CPU frequency %d kHz, new %d kHz\n",
freqs.old, freqs.new);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
/* Disable IRQs */
/* local_irq_save(flags); */
@@ -285,7 +285,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
/* Enable IRQs */
/* local_irq_restore(flags); */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 199b52b7c3e1..abda6609d3e7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -26,7 +26,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/syscore_ops.h>
+#include <linux/suspend.h>
#include <linux/tick.h>
#include <trace/events/power.h>
@@ -42,10 +42,11 @@ static DEFINE_RWLOCK(cpufreq_driver_lock);
DEFINE_MUTEX(cpufreq_governor_lock);
static LIST_HEAD(cpufreq_policy_list);
-#ifdef CONFIG_HOTPLUG_CPU
/* This one keeps track of the previously set governor of a removed CPU */
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
-#endif
+
+/* Flag to suspend/resume CPUFreq governors */
+static bool cpufreq_suspended;
static inline bool has_target(void)
{
@@ -181,8 +182,8 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
if (!policy || IS_ERR(policy->clk)) {
- pr_err("%s: No %s associated to cpu: %d\n", __func__,
- policy ? "clk" : "policy", cpu);
+ pr_err("%s: No %s associated to cpu: %d\n",
+ __func__, policy ? "clk" : "policy", cpu);
return 0;
}
@@ -190,6 +191,12 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_generic_get);
+/* Only for cpufreq core internal use */
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
+{
+ return per_cpu(cpufreq_cpu_data, cpu);
+}
+
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
struct cpufreq_policy *policy = NULL;
@@ -254,15 +261,14 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
if (!l_p_j_ref_freq) {
l_p_j_ref = loops_per_jiffy;
l_p_j_ref_freq = ci->old;
- pr_debug("saving %lu as reference value for loops_per_jiffy; "
- "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
+ pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
+ l_p_j_ref, l_p_j_ref_freq);
}
- if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
- (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+ if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new);
- pr_debug("scaling loops_per_jiffy to %lu "
- "for frequency %u kHz\n", loops_per_jiffy, ci->new);
+ pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
+ loops_per_jiffy, ci->new);
}
}
#else
@@ -282,7 +288,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
freqs->flags = cpufreq_driver->flags;
pr_debug("notification %u of frequency transition to %u kHz\n",
- state, freqs->new);
+ state, freqs->new);
switch (state) {
@@ -294,9 +300,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
if ((policy) && (policy->cpu == freqs->cpu) &&
(policy->cur) && (policy->cur != freqs->old)) {
- pr_debug("Warning: CPU frequency is"
- " %u, cpufreq assumed %u kHz.\n",
- freqs->old, policy->cur);
+ pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
+ freqs->old, policy->cur);
freqs->old = policy->cur;
}
}
@@ -307,8 +312,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
case CPUFREQ_POSTCHANGE:
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
- pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
- (unsigned long)freqs->cpu);
+ pr_debug("FREQ: %lu - CPU: %lu\n",
+ (unsigned long)freqs->new, (unsigned long)freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
@@ -326,16 +331,15 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
* function. It is called twice on all CPU frequency changes that have
* external effects.
*/
-void cpufreq_notify_transition(struct cpufreq_policy *policy,
+static void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state)
{
for_each_cpu(freqs->cpu, policy->cpus)
__cpufreq_notify_transition(policy, freqs, state);
}
-EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
/* Do post notifications when there are chances that transition has failed */
-void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
+static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int transition_failed)
{
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
@@ -346,13 +350,47 @@ void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
}
-EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
+
+void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs)
+{
+wait:
+ wait_event(policy->transition_wait, !policy->transition_ongoing);
+
+ spin_lock(&policy->transition_lock);
+
+ if (unlikely(policy->transition_ongoing)) {
+ spin_unlock(&policy->transition_lock);
+ goto wait;
+ }
+
+ policy->transition_ongoing = true;
+
+ spin_unlock(&policy->transition_lock);
+
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
+}
+EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
+
+void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int transition_failed)
+{
+ if (unlikely(WARN_ON(!policy->transition_ongoing)))
+ return;
+
+ cpufreq_notify_post_transition(policy, freqs, transition_failed);
+
+ policy->transition_ongoing = false;
+
+ wake_up(&policy->transition_wait);
+}
+EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
/*********************************************************************
* SYSFS INTERFACE *
*********************************************************************/
-ssize_t show_boost(struct kobject *kobj,
+static ssize_t show_boost(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
@@ -368,13 +406,13 @@ static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
return -EINVAL;
if (cpufreq_boost_trigger_state(enable)) {
- pr_err("%s: Cannot %s BOOST!\n", __func__,
- enable ? "enable" : "disable");
+ pr_err("%s: Cannot %s BOOST!\n",
+ __func__, enable ? "enable" : "disable");
return -EINVAL;
}
- pr_debug("%s: cpufreq BOOST %s\n", __func__,
- enable ? "enabled" : "disabled");
+ pr_debug("%s: cpufreq BOOST %s\n",
+ __func__, enable ? "enabled" : "disabled");
return count;
}
@@ -879,18 +917,25 @@ err_out_kobj_put:
static void cpufreq_init_policy(struct cpufreq_policy *policy)
{
+ struct cpufreq_governor *gov = NULL;
struct cpufreq_policy new_policy;
int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy));
+ /* Update governor of new_policy to the governor used before hotplug */
+ gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
+ if (gov)
+ pr_debug("Restoring governor %s for cpu %d\n",
+ policy->governor->name, policy->cpu);
+ else
+ gov = CPUFREQ_DEFAULT_GOVERNOR;
+
+ new_policy.governor = gov;
+
/* Use the default policy if its valid. */
if (cpufreq_driver->setpolicy)
- cpufreq_parse_governor(policy->governor->name,
- &new_policy.policy, NULL);
-
- /* assure that the starting sequence is run in cpufreq_set_policy */
- policy->governor = NULL;
+ cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
/* set default policy */
ret = cpufreq_set_policy(policy, &new_policy);
@@ -927,8 +972,11 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
up_write(&policy->rwsem);
if (has_target()) {
- if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
- (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ if (!ret)
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+ if (ret) {
pr_err("%s: Failed to start governor\n", __func__);
return ret;
}
@@ -949,6 +997,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ policy->governor = NULL;
+
return policy;
}
@@ -968,6 +1018,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem);
+ spin_lock_init(&policy->transition_lock);
+ init_waitqueue_head(&policy->transition_wait);
return policy;
@@ -1022,21 +1074,19 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
up_write(&policy->rwsem);
- cpufreq_frequency_table_update_policy_cpu(policy);
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_UPDATE_POLICY_CPU, policy);
}
-static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
- bool frozen)
+static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int j, cpu = dev->id;
int ret = -ENOMEM;
struct cpufreq_policy *policy;
unsigned long flags;
+ bool recover_policy = cpufreq_suspended;
#ifdef CONFIG_HOTPLUG_CPU
struct cpufreq_policy *tpolicy;
- struct cpufreq_governor *gov;
#endif
if (cpu_is_offline(cpu))
@@ -1075,9 +1125,9 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
* Restore the saved policy when doing light-weight init and fall back
* to the full init if that fails.
*/
- policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
+ policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
if (!policy) {
- frozen = false;
+ recover_policy = false;
policy = cpufreq_policy_alloc();
if (!policy)
goto nomem_out;
@@ -1089,12 +1139,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
* the creation of a brand new one. So we need to perform this update
* by invoking update_policy_cpu().
*/
- if (frozen && cpu != policy->cpu)
+ if (recover_policy && cpu != policy->cpu)
update_policy_cpu(policy, cpu);
else
policy->cpu = cpu;
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
cpumask_copy(policy->cpus, cpumask_of(cpu));
init_completion(&policy->kobj_unregister);
@@ -1118,7 +1167,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
*/
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
- if (!frozen) {
+ if (!recover_policy) {
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
}
@@ -1180,16 +1229,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
-#ifdef CONFIG_HOTPLUG_CPU
- gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
- if (gov) {
- policy->governor = gov;
- pr_debug("Restoring governor %s for cpu %d\n",
- policy->governor->name, cpu);
- }
-#endif
-
- if (!frozen) {
+ if (!recover_policy) {
ret = cpufreq_add_dev_interface(policy, dev);
if (ret)
goto err_out_unregister;
@@ -1203,7 +1243,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
cpufreq_init_policy(policy);
- if (!frozen) {
+ if (!recover_policy) {
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
}
@@ -1226,7 +1266,7 @@ err_get_freq:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
- if (frozen) {
+ if (recover_policy) {
/* Do not leave stale fallback data behind. */
per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
cpufreq_policy_put_kobj(policy);
@@ -1250,7 +1290,7 @@ nomem_out:
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
- return __cpufreq_add_dev(dev, sif, false);
+ return __cpufreq_add_dev(dev, sif);
}
static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
@@ -1265,7 +1305,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
if (ret) {
- pr_err("%s: Failed to move kobj: %d", __func__, ret);
+ pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
down_write(&policy->rwsem);
cpumask_set_cpu(old_cpu, policy->cpus);
@@ -1281,8 +1321,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
}
static int __cpufreq_remove_dev_prepare(struct device *dev,
- struct subsys_interface *sif,
- bool frozen)
+ struct subsys_interface *sif)
{
unsigned int cpu = dev->id, cpus;
int new_cpu, ret;
@@ -1296,7 +1335,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
policy = per_cpu(cpufreq_cpu_data, cpu);
/* Save the policy somewhere when doing a light-weight tear-down */
- if (frozen)
+ if (cpufreq_suspended)
per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -1314,11 +1353,9 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
}
}
-#ifdef CONFIG_HOTPLUG_CPU
if (!cpufreq_driver->setpolicy)
strncpy(per_cpu(cpufreq_cpu_governor, cpu),
policy->governor->name, CPUFREQ_NAME_LEN);
-#endif
down_read(&policy->rwsem);
cpus = cpumask_weight(policy->cpus);
@@ -1331,19 +1368,19 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
if (new_cpu >= 0) {
update_policy_cpu(policy, new_cpu);
- if (!frozen) {
+ if (!cpufreq_suspended)
pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
- __func__, new_cpu, cpu);
- }
+ __func__, new_cpu, cpu);
}
+ } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
+ cpufreq_driver->stop_cpu(policy);
}
return 0;
}
static int __cpufreq_remove_dev_finish(struct device *dev,
- struct subsys_interface *sif,
- bool frozen)
+ struct subsys_interface *sif)
{
unsigned int cpu = dev->id, cpus;
int ret;
@@ -1373,12 +1410,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
CPUFREQ_GOV_POLICY_EXIT);
if (ret) {
pr_err("%s: Failed to exit governor\n",
- __func__);
+ __func__);
return ret;
}
}
- if (!frozen)
+ if (!cpufreq_suspended)
cpufreq_policy_put_kobj(policy);
/*
@@ -1394,16 +1431,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
list_del(&policy->policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (!frozen)
+ if (!cpufreq_suspended)
cpufreq_policy_free(policy);
- } else {
- if (has_target()) {
- if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
- (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
- pr_err("%s: Failed to start governor\n",
- __func__);
- return ret;
- }
+ } else if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ if (!ret)
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+ if (ret) {
+ pr_err("%s: Failed to start governor\n", __func__);
+ return ret;
}
}
@@ -1424,10 +1461,10 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (cpu_is_offline(cpu))
return 0;
- ret = __cpufreq_remove_dev_prepare(dev, sif, false);
+ ret = __cpufreq_remove_dev_prepare(dev, sif);
if (!ret)
- ret = __cpufreq_remove_dev_finish(dev, sif, false);
+ ret = __cpufreq_remove_dev_finish(dev, sif);
return ret;
}
@@ -1458,8 +1495,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
struct cpufreq_freqs freqs;
unsigned long flags;
- pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
- "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
+ pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
+ old_freq, new_freq);
freqs.old = old_freq;
freqs.new = new_freq;
@@ -1468,8 +1505,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
policy = per_cpu(cpufreq_cpu_data, cpu);
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
}
/**
@@ -1570,83 +1607,103 @@ static struct subsys_interface cpufreq_interface = {
.remove_dev = cpufreq_remove_dev,
};
+/*
+ * In case platform wants some specific frequency to be configured
+ * during suspend..
+ */
+int cpufreq_generic_suspend(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ if (!policy->suspend_freq) {
+ pr_err("%s: suspend_freq can't be zero\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: Setting suspend-freq: %u\n", __func__,
+ policy->suspend_freq);
+
+ ret = __cpufreq_driver_target(policy, policy->suspend_freq,
+ CPUFREQ_RELATION_H);
+ if (ret)
+ pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
+ __func__, policy->suspend_freq, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_generic_suspend);
+
/**
- * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
+ * cpufreq_suspend() - Suspend CPUFreq governors
*
- * This function is only executed for the boot processor. The other CPUs
- * have been put offline by means of CPU hotplug.
+ * Called during system wide Suspend/Hibernate cycles for suspending governors
+ * as some platforms can't change frequency after this point in suspend cycle.
+ * Because some of the devices (like: i2c, regulators, etc) they use for
+ * changing frequency are suspended quickly after this point.
*/
-static int cpufreq_bp_suspend(void)
+void cpufreq_suspend(void)
{
- int ret = 0;
-
- int cpu = smp_processor_id();
struct cpufreq_policy *policy;
- pr_debug("suspending cpu %u\n", cpu);
+ if (!cpufreq_driver)
+ return;
- /* If there's no policy for the boot CPU, we have nothing to do. */
- policy = cpufreq_cpu_get(cpu);
- if (!policy)
- return 0;
+ if (!has_target())
+ return;
- if (cpufreq_driver->suspend) {
- ret = cpufreq_driver->suspend(policy);
- if (ret)
- printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
- "step on CPU %u\n", policy->cpu);
+ pr_debug("%s: Suspending Governors\n", __func__);
+
+ list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+ if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
+ pr_err("%s: Failed to stop governor for policy: %p\n",
+ __func__, policy);
+ else if (cpufreq_driver->suspend
+ && cpufreq_driver->suspend(policy))
+ pr_err("%s: Failed to suspend driver: %p\n", __func__,
+ policy);
}
- cpufreq_cpu_put(policy);
- return ret;
+ cpufreq_suspended = true;
}
/**
- * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
- *
- * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
- * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
- * restored. It will verify that the current freq is in sync with
- * what we believe it to be. This is a bit later than when it
- * should be, but nonethteless it's better than calling
- * cpufreq_driver->get() here which might re-enable interrupts...
+ * cpufreq_resume() - Resume CPUFreq governors
*
- * This function is only executed for the boot CPU. The other CPUs have not
- * been turned on yet.
+ * Called during system wide Suspend/Hibernate cycle for resuming governors that
+ * are suspended with cpufreq_suspend().
*/
-static void cpufreq_bp_resume(void)
+void cpufreq_resume(void)
{
- int ret = 0;
-
- int cpu = smp_processor_id();
struct cpufreq_policy *policy;
- pr_debug("resuming cpu %u\n", cpu);
+ if (!cpufreq_driver)
+ return;
- /* If there's no policy for the boot CPU, we have nothing to do. */
- policy = cpufreq_cpu_get(cpu);
- if (!policy)
+ if (!has_target())
return;
- if (cpufreq_driver->resume) {
- ret = cpufreq_driver->resume(policy);
- if (ret) {
- printk(KERN_ERR "cpufreq: resume failed in ->resume "
- "step on CPU %u\n", policy->cpu);
- goto fail;
- }
- }
+ pr_debug("%s: Resuming Governors\n", __func__);
- schedule_work(&policy->update);
+ cpufreq_suspended = false;
-fail:
- cpufreq_cpu_put(policy);
-}
+ list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+ if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
+ pr_err("%s: Failed to resume driver: %p\n", __func__,
+ policy);
+ else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
+ || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
+ pr_err("%s: Failed to start governor for policy: %p\n",
+ __func__, policy);
-static struct syscore_ops cpufreq_syscore_ops = {
- .suspend = cpufreq_bp_suspend,
- .resume = cpufreq_bp_resume,
-};
+ /*
+ * schedule call cpufreq_update_policy() for boot CPU, i.e. last
+ * policy in list. It will verify that the current freq is in
+ * sync with what we believe it to be.
+ */
+ if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
+ schedule_work(&policy->update);
+ }
+}
/**
* cpufreq_get_current_driver - return current driver's name
@@ -1762,7 +1819,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
target_freq = policy->min;
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
- policy->cpu, target_freq, relation, old_target_freq);
+ policy->cpu, target_freq, relation, old_target_freq);
/*
* This might look like a redundant call as we are checking it again
@@ -1807,20 +1864,18 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
freqs.flags = 0;
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
- __func__, policy->cpu, freqs.old,
- freqs.new);
+ __func__, policy->cpu, freqs.old, freqs.new);
- cpufreq_notify_transition(policy, &freqs,
- CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
}
retval = cpufreq_driver->target_index(policy, index);
if (retval)
pr_err("%s: Failed to change cpu frequency: %d\n",
- __func__, retval);
+ __func__, retval);
if (notify)
- cpufreq_notify_post_transition(policy, &freqs, retval);
+ cpufreq_freq_transition_end(policy, &freqs, retval);
}
out:
@@ -1863,17 +1918,18 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
struct cpufreq_governor *gov = NULL;
#endif
+ /* Don't start any governor operations if we are entering suspend */
+ if (cpufreq_suspended)
+ return 0;
+
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >
policy->governor->max_transition_latency) {
if (!gov)
return -EINVAL;
else {
- printk(KERN_WARNING "%s governor failed, too long"
- " transition latency of HW, fallback"
- " to %s governor\n",
- policy->governor->name,
- gov->name);
+ pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
+ policy->governor->name, gov->name);
policy->governor = gov;
}
}
@@ -1883,7 +1939,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
return -EINVAL;
pr_debug("__cpufreq_governor for CPU %u, event %u\n",
- policy->cpu, event);
+ policy->cpu, event);
mutex_lock(&cpufreq_governor_lock);
if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
@@ -1950,9 +2006,7 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
{
-#ifdef CONFIG_HOTPLUG_CPU
int cpu;
-#endif
if (!governor)
return;
@@ -1960,14 +2014,12 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
if (cpufreq_disabled())
return;
-#ifdef CONFIG_HOTPLUG_CPU
for_each_present_cpu(cpu) {
if (cpu_online(cpu))
continue;
if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
}
-#endif
mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list);
@@ -2012,22 +2064,21 @@ EXPORT_SYMBOL(cpufreq_get_policy);
static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy)
{
- int ret = 0, failed = 1;
+ struct cpufreq_governor *old_gov;
+ int ret;
- pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
- new_policy->min, new_policy->max);
+ pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
+ new_policy->cpu, new_policy->min, new_policy->max);
memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
- if (new_policy->min > policy->max || new_policy->max < policy->min) {
- ret = -EINVAL;
- goto error_out;
- }
+ if (new_policy->min > policy->max || new_policy->max < policy->min)
+ return -EINVAL;
/* verify the cpu speed can be set within this limit */
ret = cpufreq_driver->verify(new_policy);
if (ret)
- goto error_out;
+ return ret;
/* adjust if necessary - all reasons */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
@@ -2043,7 +2094,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/
ret = cpufreq_driver->verify(new_policy);
if (ret)
- goto error_out;
+ return ret;
/* notification of the new policy */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
@@ -2053,63 +2104,53 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->max = new_policy->max;
pr_debug("new min and max freqs are %u - %u kHz\n",
- policy->min, policy->max);
+ policy->min, policy->max);
if (cpufreq_driver->setpolicy) {
policy->policy = new_policy->policy;
pr_debug("setting range\n");
- ret = cpufreq_driver->setpolicy(new_policy);
- } else {
- if (new_policy->governor != policy->governor) {
- /* save old, working values */
- struct cpufreq_governor *old_gov = policy->governor;
-
- pr_debug("governor switch\n");
-
- /* end old governor */
- if (policy->governor) {
- __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- up_write(&policy->rwsem);
- __cpufreq_governor(policy,
- CPUFREQ_GOV_POLICY_EXIT);
- down_write(&policy->rwsem);
- }
+ return cpufreq_driver->setpolicy(new_policy);
+ }
- /* start new governor */
- policy->governor = new_policy->governor;
- if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
- if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
- failed = 0;
- } else {
- up_write(&policy->rwsem);
- __cpufreq_governor(policy,
- CPUFREQ_GOV_POLICY_EXIT);
- down_write(&policy->rwsem);
- }
- }
+ if (new_policy->governor == policy->governor)
+ goto out;
- if (failed) {
- /* new governor failed, so re-start old one */
- pr_debug("starting governor %s failed\n",
- policy->governor->name);
- if (old_gov) {
- policy->governor = old_gov;
- __cpufreq_governor(policy,
- CPUFREQ_GOV_POLICY_INIT);
- __cpufreq_governor(policy,
- CPUFREQ_GOV_START);
- }
- ret = -EINVAL;
- goto error_out;
- }
- /* might be a policy change, too, so fall through */
- }
- pr_debug("governor: change or update limits\n");
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ pr_debug("governor switch\n");
+
+ /* save old, working values */
+ old_gov = policy->governor;
+ /* end old governor */
+ if (old_gov) {
+ __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ up_write(&policy->rwsem);
+ __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ down_write(&policy->rwsem);
}
-error_out:
- return ret;
+ /* start new governor */
+ policy->governor = new_policy->governor;
+ if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
+ if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
+ goto out;
+
+ up_write(&policy->rwsem);
+ __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ down_write(&policy->rwsem);
+ }
+
+ /* new governor failed, so re-start old one */
+ pr_debug("starting governor %s failed\n", policy->governor->name);
+ if (old_gov) {
+ policy->governor = old_gov;
+ __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+ __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ }
+
+ return -EINVAL;
+
+ out:
+ pr_debug("governor: change or update limits\n");
+ return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
}
/**
@@ -2145,8 +2186,13 @@ int cpufreq_update_policy(unsigned int cpu)
*/
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
new_policy.cur = cpufreq_driver->get(cpu);
+ if (WARN_ON(!new_policy.cur)) {
+ ret = -EIO;
+ goto no_policy;
+ }
+
if (!policy->cur) {
- pr_debug("Driver did not initialize current freq");
+ pr_debug("Driver did not initialize current freq\n");
policy->cur = new_policy.cur;
} else {
if (policy->cur != new_policy.cur && has_target())
@@ -2170,30 +2216,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long)hcpu;
struct device *dev;
- bool frozen = false;
dev = get_cpu_device(cpu);
if (dev) {
-
- if (action & CPU_TASKS_FROZEN)
- frozen = true;
-
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
- __cpufreq_add_dev(dev, NULL, frozen);
- cpufreq_update_policy(cpu);
+ __cpufreq_add_dev(dev, NULL);
break;
case CPU_DOWN_PREPARE:
- __cpufreq_remove_dev_prepare(dev, NULL, frozen);
+ __cpufreq_remove_dev_prepare(dev, NULL);
break;
case CPU_POST_DEAD:
- __cpufreq_remove_dev_finish(dev, NULL, frozen);
+ __cpufreq_remove_dev_finish(dev, NULL);
break;
case CPU_DOWN_FAILED:
- __cpufreq_add_dev(dev, NULL, frozen);
+ __cpufreq_add_dev(dev, NULL);
break;
}
}
@@ -2249,8 +2289,8 @@ int cpufreq_boost_trigger_state(int state)
cpufreq_driver->boost_enabled = !state;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- pr_err("%s: Cannot %s BOOST\n", __func__,
- state ? "enable" : "disable");
+ pr_err("%s: Cannot %s BOOST\n",
+ __func__, state ? "enable" : "disable");
}
return ret;
@@ -2295,7 +2335,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (!driver_data || !driver_data->verify || !driver_data->init ||
!(driver_data->setpolicy || driver_data->target_index ||
- driver_data->target))
+ driver_data->target) ||
+ (driver_data->setpolicy && (driver_data->target_index ||
+ driver_data->target)))
return -EINVAL;
pr_debug("trying to register driver %s\n", driver_data->name);
@@ -2322,7 +2364,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
ret = cpufreq_sysfs_create_file(&boost.attr);
if (ret) {
pr_err("%s: cannot register global BOOST sysfs file\n",
- __func__);
+ __func__);
goto err_null_driver;
}
}
@@ -2345,7 +2387,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
/* if all ->init() calls failed, unregister */
if (ret) {
pr_debug("no CPU initialized for driver %s\n",
- driver_data->name);
+ driver_data->name);
goto err_if_unreg;
}
}
@@ -2409,7 +2451,6 @@ static int __init cpufreq_core_init(void)
cpufreq_global_kobject = kobject_create();
BUG_ON(!cpufreq_global_kobject);
- register_syscore_ops(&cpufreq_syscore_ops);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 5793e1447fb1..ecaaebf969fc 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -13,7 +13,7 @@
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
static spinlock_t cpufreq_stats_lock;
@@ -180,27 +180,25 @@ static void cpufreq_stats_free_table(unsigned int cpu)
cpufreq_cpu_put(policy);
}
-static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table)
+static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
{
unsigned int i, j, count = 0, ret = 0;
struct cpufreq_stats *stat;
- struct cpufreq_policy *current_policy;
unsigned int alloc_size;
unsigned int cpu = policy->cpu;
+ struct cpufreq_frequency_table *table;
+
+ table = cpufreq_frequency_get_table(cpu);
+ if (unlikely(!table))
+ return 0;
+
if (per_cpu(cpufreq_stats_table, cpu))
return -EBUSY;
stat = kzalloc(sizeof(*stat), GFP_KERNEL);
if ((stat) == NULL)
return -ENOMEM;
- current_policy = cpufreq_cpu_get(cpu);
- if (current_policy == NULL) {
- ret = -EINVAL;
- goto error_get_fail;
- }
-
- ret = sysfs_create_group(&current_policy->kobj, &stats_attr_group);
+ ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
if (ret)
goto error_out;
@@ -223,7 +221,7 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
if (!stat->time_in_state) {
ret = -ENOMEM;
- goto error_out;
+ goto error_alloc;
}
stat->freq_table = (unsigned int *)(stat->time_in_state + count);
@@ -243,11 +241,10 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
stat->last_time = get_jiffies_64();
stat->last_index = freq_table_get_index(stat, policy->cur);
spin_unlock(&cpufreq_stats_lock);
- cpufreq_cpu_put(current_policy);
return 0;
+error_alloc:
+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
error_out:
- cpufreq_cpu_put(current_policy);
-error_get_fail:
kfree(stat);
per_cpu(cpufreq_stats_table, cpu) = NULL;
return ret;
@@ -256,7 +253,6 @@ error_get_fail:
static void cpufreq_stats_create_table(unsigned int cpu)
{
struct cpufreq_policy *policy;
- struct cpufreq_frequency_table *table;
/*
* "likely(!policy)" because normally cpufreq_stats will be registered
@@ -266,9 +262,7 @@ static void cpufreq_stats_create_table(unsigned int cpu)
if (likely(!policy))
return;
- table = cpufreq_frequency_get_table(policy->cpu);
- if (likely(table))
- __cpufreq_stats_create_table(policy, table);
+ __cpufreq_stats_create_table(policy);
cpufreq_cpu_put(policy);
}
@@ -291,20 +285,14 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
{
int ret = 0;
struct cpufreq_policy *policy = data;
- struct cpufreq_frequency_table *table;
- unsigned int cpu = policy->cpu;
if (val == CPUFREQ_UPDATE_POLICY_CPU) {
cpufreq_stats_update_policy_cpu(policy);
return 0;
}
- table = cpufreq_frequency_get_table(cpu);
- if (!table)
- return 0;
-
if (val == CPUFREQ_CREATE_POLICY)
- ret = __cpufreq_stats_create_table(policy, table);
+ ret = __cpufreq_stats_create_table(policy);
else if (val == CPUFREQ_REMOVE_POLICY)
__cpufreq_stats_free_table(policy);
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
index 86559040c54c..601b88c490cf 100644
--- a/drivers/cpufreq/cris-artpec3-cpufreq.c
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -15,9 +15,9 @@ static struct notifier_block cris_sdram_freq_notifier_block = {
};
static struct cpufreq_frequency_table cris_freq_table[] = {
- {0x01, 6000},
- {0x02, 200000},
- {0, CPUFREQ_TABLE_END},
+ {0, 0x01, 6000},
+ {0, 0x02, 200000},
+ {0, 0, CPUFREQ_TABLE_END},
};
static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
@@ -57,7 +57,6 @@ static struct cpufreq_driver cris_freq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cris_freq_target,
.init = cris_freq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "cris_freq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
index 26d940d40b1d..22b2cdde74d9 100644
--- a/drivers/cpufreq/cris-etraxfs-cpufreq.c
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -15,9 +15,9 @@ static struct notifier_block cris_sdram_freq_notifier_block = {
};
static struct cpufreq_frequency_table cris_freq_table[] = {
- {0x01, 6000},
- {0x02, 200000},
- {0, CPUFREQ_TABLE_END},
+ {0, 0x01, 6000},
+ {0, 0x02, 200000},
+ {0, 0, CPUFREQ_TABLE_END},
};
static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
@@ -57,7 +57,6 @@ static struct cpufreq_driver cris_freq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cris_freq_target,
.init = cris_freq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "cris_freq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 2cf33848d86e..28a16dc6e02e 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -125,7 +125,6 @@ static struct cpufreq_driver davinci_driver = {
.target_index = davinci_target,
.get = cpufreq_generic_get,
.init = davinci_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "davinci",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 9012b8bb6b64..a0d2a423cea9 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -382,7 +382,6 @@ static int eps_cpu_exit(struct cpufreq_policy *policy)
unsigned int cpu = policy->cpu;
/* Bye */
- cpufreq_frequency_table_put_attr(policy->cpu);
kfree(eps_cpu[cpu]);
eps_cpu[cpu] = NULL;
return 0;
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index de08acff5101..7f5d2a68c353 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -56,15 +56,15 @@ static struct s_elan_multiplier elan_multiplier[] = {
};
static struct cpufreq_frequency_table elanfreq_table[] = {
- {0, 1000},
- {1, 2000},
- {2, 4000},
- {3, 8000},
- {4, 16000},
- {5, 33000},
- {6, 66000},
- {7, 99000},
- {0, CPUFREQ_TABLE_END},
+ {0, 0, 1000},
+ {0, 1, 2000},
+ {0, 2, 4000},
+ {0, 3, 8000},
+ {0, 4, 16000},
+ {0, 5, 33000},
+ {0, 6, 66000},
+ {0, 7, 99000},
+ {0, 0, CPUFREQ_TABLE_END},
};
@@ -198,7 +198,6 @@ static struct cpufreq_driver elanfreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = elanfreq_target,
.init = elanfreq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "elanfreq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index fcd2914d081a..f99cfe24e7bc 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -16,7 +16,6 @@
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/cpufreq.h>
-#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <plat/cpu.h>
@@ -24,12 +23,8 @@
#include "exynos-cpufreq.h"
static struct exynos_dvfs_info *exynos_info;
-
static struct regulator *arm_regulator;
-
static unsigned int locking_frequency;
-static bool frequency_locked;
-static DEFINE_MUTEX(cpufreq_lock);
static int exynos_cpufreq_get_index(unsigned int freq)
{
@@ -134,83 +129,13 @@ out:
static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
- int ret = 0;
-
- mutex_lock(&cpufreq_lock);
-
- if (frequency_locked)
- goto out;
-
- ret = exynos_cpufreq_scale(freq_table[index].frequency);
-
-out:
- mutex_unlock(&cpufreq_lock);
-
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int exynos_cpufreq_suspend(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
-static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
-{
- return 0;
-}
-#endif
-
-/**
- * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
- * context
- * @notifier
- * @pm_event
- * @v
- *
- * While frequency_locked == true, target() ignores every frequency but
- * locking_frequency. The locking_frequency value is the initial frequency,
- * which is set by the bootloader. In order to eliminate possible
- * inconsistency in clock values, we save and restore frequencies during
- * suspend and resume and block CPUFREQ activities. Note that the standard
- * suspend/resume cannot be used as they are too deep (syscore_ops) for
- * regulator actions.
- */
-static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
- unsigned long pm_event, void *v)
-{
- int ret;
-
- switch (pm_event) {
- case PM_SUSPEND_PREPARE:
- mutex_lock(&cpufreq_lock);
- frequency_locked = true;
- mutex_unlock(&cpufreq_lock);
-
- ret = exynos_cpufreq_scale(locking_frequency);
- if (ret < 0)
- return NOTIFY_BAD;
-
- break;
-
- case PM_POST_SUSPEND:
- mutex_lock(&cpufreq_lock);
- frequency_locked = false;
- mutex_unlock(&cpufreq_lock);
- break;
- }
-
- return NOTIFY_OK;
+ return exynos_cpufreq_scale(exynos_info->freq_table[index].frequency);
}
-static struct notifier_block exynos_cpufreq_nb = {
- .notifier_call = exynos_cpufreq_pm_notifier,
-};
-
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
policy->clk = exynos_info->cpu_clk;
+ policy->suspend_freq = locking_frequency;
return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
}
@@ -220,15 +145,13 @@ static struct cpufreq_driver exynos_driver = {
.target_index = exynos_target,
.get = cpufreq_generic_get,
.init = exynos_cpufreq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "exynos_cpufreq",
.attr = cpufreq_generic_attr,
#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW
.boost_supported = true,
#endif
#ifdef CONFIG_PM
- .suspend = exynos_cpufreq_suspend,
- .resume = exynos_cpufreq_resume,
+ .suspend = cpufreq_generic_suspend,
#endif
};
@@ -263,19 +186,13 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
goto err_vdd_arm;
}
+ /* Done here as we want to capture boot frequency */
locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
- register_pm_notifier(&exynos_cpufreq_nb);
-
- if (cpufreq_register_driver(&exynos_driver)) {
- pr_err("%s: failed to register cpufreq driver\n", __func__);
- goto err_cpufreq;
- }
-
- return 0;
-err_cpufreq:
- unregister_pm_notifier(&exynos_cpufreq_nb);
+ if (!cpufreq_register_driver(&exynos_driver))
+ return 0;
+ pr_err("%s: failed to register cpufreq driver\n", __func__);
regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index 40d84c43d8f4..6384e5b9a347 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -29,12 +29,12 @@ static unsigned int exynos4210_volt_table[] = {
};
static struct cpufreq_frequency_table exynos4210_freq_table[] = {
- {L0, 1200 * 1000},
- {L1, 1000 * 1000},
- {L2, 800 * 1000},
- {L3, 500 * 1000},
- {L4, 200 * 1000},
- {0, CPUFREQ_TABLE_END},
+ {0, L0, 1200 * 1000},
+ {0, L1, 1000 * 1000},
+ {0, L2, 800 * 1000},
+ {0, L3, 500 * 1000},
+ {0, L4, 200 * 1000},
+ {0, 0, CPUFREQ_TABLE_END},
};
static struct apll_freq apll_freq_4210[] = {
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 7c11ace3b3fc..466c76ad335b 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -30,21 +30,21 @@ static unsigned int exynos4x12_volt_table[] = {
};
static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
- {CPUFREQ_BOOST_FREQ, 1500 * 1000},
- {L1, 1400 * 1000},
- {L2, 1300 * 1000},
- {L3, 1200 * 1000},
- {L4, 1100 * 1000},
- {L5, 1000 * 1000},
- {L6, 900 * 1000},
- {L7, 800 * 1000},
- {L8, 700 * 1000},
- {L9, 600 * 1000},
- {L10, 500 * 1000},
- {L11, 400 * 1000},
- {L12, 300 * 1000},
- {L13, 200 * 1000},
- {0, CPUFREQ_TABLE_END},
+ {CPUFREQ_BOOST_FREQ, L0, 1500 * 1000},
+ {0, L1, 1400 * 1000},
+ {0, L2, 1300 * 1000},
+ {0, L3, 1200 * 1000},
+ {0, L4, 1100 * 1000},
+ {0, L5, 1000 * 1000},
+ {0, L6, 900 * 1000},
+ {0, L7, 800 * 1000},
+ {0, L8, 700 * 1000},
+ {0, L9, 600 * 1000},
+ {0, L10, 500 * 1000},
+ {0, L11, 400 * 1000},
+ {0, L12, 300 * 1000},
+ {0, L13, 200 * 1000},
+ {0, 0, CPUFREQ_TABLE_END},
};
static struct apll_freq *apll_freq_4x12;
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index 5f90b82a4082..363a0b3fe1b1 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -34,23 +34,23 @@ static unsigned int exynos5250_volt_table[] = {
};
static struct cpufreq_frequency_table exynos5250_freq_table[] = {
- {L0, 1700 * 1000},
- {L1, 1600 * 1000},
- {L2, 1500 * 1000},
- {L3, 1400 * 1000},
- {L4, 1300 * 1000},
- {L5, 1200 * 1000},
- {L6, 1100 * 1000},
- {L7, 1000 * 1000},
- {L8, 900 * 1000},
- {L9, 800 * 1000},
- {L10, 700 * 1000},
- {L11, 600 * 1000},
- {L12, 500 * 1000},
- {L13, 400 * 1000},
- {L14, 300 * 1000},
- {L15, 200 * 1000},
- {0, CPUFREQ_TABLE_END},
+ {0, L0, 1700 * 1000},
+ {0, L1, 1600 * 1000},
+ {0, L2, 1500 * 1000},
+ {0, L3, 1400 * 1000},
+ {0, L4, 1300 * 1000},
+ {0, L5, 1200 * 1000},
+ {0, L6, 1100 * 1000},
+ {0, L7, 1000 * 1000},
+ {0, L8, 900 * 1000},
+ {0, L9, 800 * 1000},
+ {0, L10, 700 * 1000},
+ {0, L11, 600 * 1000},
+ {0, L12, 500 * 1000},
+ {0, L13, 400 * 1000},
+ {0, L14, 300 * 1000},
+ {0, L15, 200 * 1000},
+ {0, 0, CPUFREQ_TABLE_END},
};
static struct apll_freq apll_freq_5250[] = {
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index 49b756015316..a6b8214d7b77 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -219,7 +219,7 @@ static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
freqs.old = policy->cur;
freqs.new = freq_table[index].frequency;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
/* Set the target frequency in all C0_3_PSTATE register */
for_each_cpu(i, policy->cpus) {
@@ -258,7 +258,7 @@ static void exynos_cpufreq_work(struct work_struct *work)
dev_crit(dvfs_info->dev, "New frequency out of range\n");
freqs.new = freqs.old;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
cpufreq_cpu_put(policy);
mutex_unlock(&cpufreq_lock);
@@ -312,7 +312,6 @@ static struct cpufreq_driver exynos_driver = {
.target_index = exynos_target,
.get = cpufreq_generic_get,
.init = exynos_cpufreq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = CPUFREQ_NAME,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 8e54f97899ba..08e7bbcf6d73 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -33,11 +33,10 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
continue;
}
if (!cpufreq_boost_enabled()
- && table[i].driver_data == CPUFREQ_BOOST_FREQ)
+ && (table[i].flags & CPUFREQ_BOOST_FREQ))
continue;
- pr_debug("table entry %u: %u kHz, %u driver_data\n",
- i, freq, table[i].driver_data);
+ pr_debug("table entry %u: %u kHz\n", i, freq);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
@@ -91,8 +90,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
/*
- * Generic routine to verify policy & frequency table, requires driver to call
- * cpufreq_frequency_table_get_attr() prior to it.
+ * Generic routine to verify policy & frequency table, requires driver to set
+ * policy->freq_table prior to it.
*/
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
{
@@ -175,8 +174,8 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
} else
*index = optimal.driver_data;
- pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency,
- table[*index].driver_data);
+ pr_debug("target index is %u, freq is:%u kHz\n", *index,
+ table[*index].frequency);
return 0;
}
@@ -203,8 +202,6 @@ int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
-static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
-
/**
* show_available_freqs - show available frequencies for the specified CPU
*/
@@ -212,15 +209,12 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
bool show_boost)
{
unsigned int i = 0;
- unsigned int cpu = policy->cpu;
ssize_t count = 0;
- struct cpufreq_frequency_table *table;
+ struct cpufreq_frequency_table *table = policy->freq_table;
- if (!per_cpu(cpufreq_show_table, cpu))
+ if (!table)
return -ENODEV;
- table = per_cpu(cpufreq_show_table, cpu);
-
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
continue;
@@ -235,7 +229,7 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
* show_boost = false and driver_data != BOOST freq
* display NON BOOST freqs
*/
- if (show_boost ^ (table[i].driver_data == CPUFREQ_BOOST_FREQ))
+ if (show_boost ^ (table[i].flags & CPUFREQ_BOOST_FREQ))
continue;
count += sprintf(&buf[count], "%d ", table[i].frequency);
@@ -283,49 +277,24 @@ struct freq_attr *cpufreq_generic_attr[] = {
};
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
-/*
- * if you use these, you must assure that the frequency table is valid
- * all the time between get_attr and put_attr!
- */
-void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
- unsigned int cpu)
-{
- pr_debug("setting show_table for cpu %u to %p\n", cpu, table);
- per_cpu(cpufreq_show_table, cpu) = table;
-}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
-
-void cpufreq_frequency_table_put_attr(unsigned int cpu)
-{
- pr_debug("clearing show_table for cpu %u\n", cpu);
- per_cpu(cpufreq_show_table, cpu) = NULL;
-}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
-
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
int ret = cpufreq_frequency_table_cpuinfo(policy, table);
if (!ret)
- cpufreq_frequency_table_get_attr(table, policy->cpu);
+ policy->freq_table = table;
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
-void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
-{
- pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
- policy->cpu, policy->last_cpu);
- per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table,
- policy->last_cpu);
- per_cpu(cpufreq_show_table, policy->last_cpu) = NULL;
-}
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
{
- return per_cpu(cpufreq_show_table, cpu);
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ return policy ? policy->freq_table : NULL;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index d83e8266a58e..1d723dc8880c 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -265,7 +265,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
freqs.new = new_khz;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
local_irq_save(flags);
if (new_khz != stock_freq) {
@@ -314,7 +314,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
gx_params->pci_suscfg = suscfg;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
gx_params->on_duration * 32, gx_params->off_duration * 32);
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 53c6ac637e10..c30aaa6a54e8 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -254,7 +254,7 @@ acpi_cpufreq_cpu_init (
}
/* alloc freq_table */
- data->freq_table = kmalloc(sizeof(*data->freq_table) *
+ data->freq_table = kzalloc(sizeof(*data->freq_table) *
(data->acpi_data.state_count + 1),
GFP_KERNEL);
if (!data->freq_table) {
@@ -275,7 +275,6 @@ acpi_cpufreq_cpu_init (
/* table init */
for (i = 0; i <= data->acpi_data.state_count; i++)
{
- data->freq_table[i].driver_data = i;
if (i < data->acpi_data.state_count) {
data->freq_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000;
@@ -332,7 +331,6 @@ acpi_cpufreq_cpu_exit (
pr_debug("acpi_cpufreq_cpu_exit\n");
if (data) {
- cpufreq_frequency_table_put_attr(policy->cpu);
acpi_io_data[policy->cpu] = NULL;
acpi_processor_unregister_performance(&data->acpi_data,
policy->cpu);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index ce69059be1fc..e27fca86fe4f 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -144,7 +144,6 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.target_index = imx6q_set_target,
.get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
- .exit = cpufreq_generic_exit,
.name = "imx6q-cpufreq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index 0e27844e8c2d..e5122f1bfe78 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -122,7 +122,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
return 0;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
@@ -143,7 +143,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
*/
set_cpus_allowed(current, cpus_allowed);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2cd36b9297f3..099967302bf2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -99,8 +99,7 @@ struct cpudata {
u64 prev_aperf;
u64 prev_mperf;
unsigned long long prev_tsc;
- int sample_ptr;
- struct sample samples[SAMPLE_COUNT];
+ struct sample sample;
};
static struct cpudata **all_cpu_data;
@@ -154,7 +153,7 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
pid->setpoint = setpoint;
pid->deadband = deadband;
pid->integral = int_tofp(integral);
- pid->last_err = setpoint - busy;
+ pid->last_err = int_tofp(setpoint) - int_tofp(busy);
}
static inline void pid_p_gain_set(struct _pid *pid, int percent)
@@ -447,7 +446,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
if (limits.no_turbo)
val |= (u64)1 << 32;
- wrmsrl(MSR_IA32_PERF_CTL, val);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
}
static struct cpu_defaults core_params = {
@@ -586,15 +585,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
mperf = mperf >> FRAC_BITS;
tsc = tsc >> FRAC_BITS;
- cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
- cpu->samples[cpu->sample_ptr].aperf = aperf;
- cpu->samples[cpu->sample_ptr].mperf = mperf;
- cpu->samples[cpu->sample_ptr].tsc = tsc;
- cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
- cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
- cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
+ cpu->sample.aperf = aperf;
+ cpu->sample.mperf = mperf;
+ cpu->sample.tsc = tsc;
+ cpu->sample.aperf -= cpu->prev_aperf;
+ cpu->sample.mperf -= cpu->prev_mperf;
+ cpu->sample.tsc -= cpu->prev_tsc;
- intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
+ intel_pstate_calc_busy(cpu, &cpu->sample);
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
@@ -614,7 +612,7 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
{
int32_t core_busy, max_pstate, current_pstate;
- core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
+ core_busy = cpu->sample.core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
@@ -648,7 +646,7 @@ static void intel_pstate_timer_func(unsigned long __data)
intel_pstate_sample(cpu);
- sample = &cpu->samples[cpu->sample_ptr];
+ sample = &cpu->sample;
intel_pstate_adjust_busy_pstate(cpu);
@@ -729,7 +727,7 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
cpu = all_cpu_data[cpu_num];
if (!cpu)
return 0;
- sample = &cpu->samples[cpu->sample_ptr];
+ sample = &cpu->sample;
return sample->freq;
}
@@ -773,14 +771,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
return 0;
}
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
{
- int cpu = policy->cpu;
+ int cpu_num = policy->cpu;
+ struct cpudata *cpu = all_cpu_data[cpu_num];
- del_timer(&all_cpu_data[cpu]->timer);
- kfree(all_cpu_data[cpu]);
- all_cpu_data[cpu] = NULL;
- return 0;
+ pr_info("intel_pstate CPU %d exiting\n", cpu_num);
+
+ del_timer_sync(&all_cpu_data[cpu_num]->timer);
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+ kfree(all_cpu_data[cpu_num]);
+ all_cpu_data[cpu_num] = NULL;
}
static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -818,7 +819,7 @@ static struct cpufreq_driver intel_pstate_driver = {
.setpolicy = intel_pstate_set_policy,
.get = intel_pstate_get,
.init = intel_pstate_cpu_init,
- .exit = intel_pstate_cpu_exit,
+ .stop_cpu = intel_pstate_stop_cpu,
.name = "intel_pstate",
};
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index eb7abe345b50..37a480680cd0 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -43,9 +43,9 @@ static struct priv
* table.
*/
static struct cpufreq_frequency_table kirkwood_freq_table[] = {
- {STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */
- {STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */
- {0, CPUFREQ_TABLE_END},
+ {0, STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */
+ {0, STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */
+ {0, 0, CPUFREQ_TABLE_END},
};
static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
@@ -102,7 +102,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "kirkwood-cpufreq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 45bafddfd8ea..d00e5d1abd25 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -269,7 +269,7 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
freqs.old = calc_speed(longhaul_get_cpu_mult());
freqs.new = speed;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
@@ -386,7 +386,7 @@ retry_loop:
}
}
/* Report true CPU frequency */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
if (!bm_timeout)
printk(KERN_INFO PFX "Warning: Timeout while waiting for "
@@ -475,7 +475,7 @@ static int longhaul_get_ranges(void)
return -EINVAL;
}
- longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table),
+ longhaul_table = kzalloc((numscales + 1) * sizeof(*longhaul_table),
GFP_KERNEL);
if (!longhaul_table)
return -ENOMEM;
@@ -913,7 +913,6 @@ static struct cpufreq_driver longhaul_driver = {
.target_index = longhaul_target,
.get = longhaul_get,
.init = longhaul_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "longhaul",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index b6581abc9207..f0bc31f5db27 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -69,7 +69,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- static struct clk *cpuclk;
+ struct clk *cpuclk;
int i;
unsigned long rate;
int ret;
@@ -104,7 +104,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_put_attr(policy->cpu);
clk_put(policy->clk);
return 0;
}
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index c4dfa42a75ac..cc3408fc073f 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -59,9 +59,9 @@
#define CPUFREQ_LOW 1
static struct cpufreq_frequency_table maple_cpu_freqs[] = {
- {CPUFREQ_HIGH, 0},
- {CPUFREQ_LOW, 0},
- {0, CPUFREQ_TABLE_END},
+ {0, CPUFREQ_HIGH, 0},
+ {0, CPUFREQ_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
};
/* Power mode data is an array of the 32 bits PCR values to use for
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 590f5b66d181..5f69c9aa703c 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -143,7 +143,6 @@ fail:
static int omap_cpu_exit(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_put_attr(policy->cpu);
freq_table_free();
clk_put(policy->clk);
return 0;
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 3d1cba9fd5f9..529cfd92158f 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -92,16 +92,16 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
static struct cpufreq_frequency_table p4clockmod_table[] = {
- {DC_RESV, CPUFREQ_ENTRY_INVALID},
- {DC_DFLT, 0},
- {DC_25PT, 0},
- {DC_38PT, 0},
- {DC_50PT, 0},
- {DC_64PT, 0},
- {DC_75PT, 0},
- {DC_88PT, 0},
- {DC_DISABLE, 0},
- {DC_RESV, CPUFREQ_TABLE_END},
+ {0, DC_RESV, CPUFREQ_ENTRY_INVALID},
+ {0, DC_DFLT, 0},
+ {0, DC_25PT, 0},
+ {0, DC_38PT, 0},
+ {0, DC_50PT, 0},
+ {0, DC_64PT, 0},
+ {0, DC_75PT, 0},
+ {0, DC_88PT, 0},
+ {0, DC_DISABLE, 0},
+ {0, DC_RESV, CPUFREQ_TABLE_END},
};
@@ -237,7 +237,6 @@ static struct cpufreq_driver p4clockmod_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cpufreq_p4_target,
.init = cpufreq_p4_cpu_init,
- .exit = cpufreq_generic_exit,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
.attr = cpufreq_generic_attr,
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 0426008380d8..84c84b5f0f3a 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -60,12 +60,12 @@ static int current_astate;
/* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */
static struct cpufreq_frequency_table pas_freqs[] = {
- {0, 0},
- {1, 0},
- {2, 0},
- {3, 0},
- {4, 0},
- {0, CPUFREQ_TABLE_END},
+ {0, 0, 0},
+ {0, 1, 0},
+ {0, 2, 0},
+ {0, 3, 0},
+ {0, 4, 0},
+ {0, 0, CPUFREQ_TABLE_END},
};
/*
@@ -234,7 +234,6 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
if (sdcpwr_mapbase)
iounmap(sdcpwr_mapbase);
- cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 1c0f1067af73..728a2d879499 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -215,7 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
freqs.old = policy->cur;
freqs.new = target_freq;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
input_buffer = 0x1 | (((target_freq * 100)
/ (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
@@ -231,7 +231,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
status = ioread16(&pcch_hdr->status);
iowrite16(0, &pcch_hdr->status);
- cpufreq_notify_post_transition(policy, &freqs, status != CMD_COMPLETE);
+ cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
spin_unlock(&pcc_lock);
if (status != CMD_COMPLETE) {
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index cf55d202f332..7615180d7ee3 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -81,9 +81,9 @@ static int is_pmu_based;
#define CPUFREQ_LOW 1
static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
- {CPUFREQ_HIGH, 0},
- {CPUFREQ_LOW, 0},
- {0, CPUFREQ_TABLE_END},
+ {0, CPUFREQ_HIGH, 0},
+ {0, CPUFREQ_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
};
static inline void local_delay(unsigned long ms)
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 6a338f8c3860..8bc422977b5b 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -65,9 +65,9 @@
#define CPUFREQ_LOW 1
static struct cpufreq_frequency_table g5_cpu_freqs[] = {
- {CPUFREQ_HIGH, 0},
- {CPUFREQ_LOW, 0},
- {0, CPUFREQ_TABLE_END},
+ {0, CPUFREQ_HIGH, 0},
+ {0, CPUFREQ_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
};
/* Power mode data is an array of the 32 bits PCR values to use for
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index b9a444e358b5..49f120e1bc7b 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -37,15 +37,15 @@ MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
static struct cpufreq_frequency_table clock_ratio[] = {
- {60, /* 110 -> 6.0x */ 0},
- {55, /* 011 -> 5.5x */ 0},
- {50, /* 001 -> 5.0x */ 0},
- {45, /* 000 -> 4.5x */ 0},
- {40, /* 010 -> 4.0x */ 0},
- {35, /* 111 -> 3.5x */ 0},
- {30, /* 101 -> 3.0x */ 0},
- {20, /* 100 -> 2.0x */ 0},
- {0, CPUFREQ_TABLE_END}
+ {0, 60, /* 110 -> 6.0x */ 0},
+ {0, 55, /* 011 -> 5.5x */ 0},
+ {0, 50, /* 001 -> 5.0x */ 0},
+ {0, 45, /* 000 -> 4.5x */ 0},
+ {0, 40, /* 010 -> 4.0x */ 0},
+ {0, 35, /* 111 -> 3.5x */ 0},
+ {0, 30, /* 101 -> 3.0x */ 0},
+ {0, 20, /* 100 -> 2.0x */ 0},
+ {0, 0, CPUFREQ_TABLE_END}
};
static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
@@ -148,11 +148,11 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
freqs.new = busfreq * clock_ratio[best_i].driver_data;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
powernow_k6_set_cpu_multiplier(best_i);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
return 0;
}
@@ -231,7 +231,6 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
if (i == max_multiplier)
powernow_k6_target(policy, i);
}
- cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 946708a1d745..f911645c3f6d 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -269,7 +269,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
freqs.new = powernow_table[index].frequency;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
/* Now do the magic poking into the MSRs. */
@@ -290,7 +290,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
if (have_a0 == 1)
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
return 0;
}
@@ -664,8 +664,6 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
static int powernow_cpu_exit(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_put_attr(policy->cpu);
-
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (acpi_processor_perf) {
acpi_processor_unregister_performance(acpi_processor_perf, 0);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 6684e0342792..1b6ae6b57c11 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -623,7 +623,7 @@ static int fill_powernow_table(struct powernow_k8_data *data,
if (check_pst_table(data, pst, maxvid))
return -EINVAL;
- powernow_table = kmalloc((sizeof(*powernow_table)
+ powernow_table = kzalloc((sizeof(*powernow_table)
* (data->numps + 1)), GFP_KERNEL);
if (!powernow_table) {
printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
@@ -793,7 +793,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
}
/* fill in data->powernow_table */
- powernow_table = kmalloc((sizeof(*powernow_table)
+ powernow_table = kzalloc((sizeof(*powernow_table)
* (data->acpi_data.state_count + 1)), GFP_KERNEL);
if (!powernow_table) {
pr_debug("powernow_table memory alloc failure\n");
@@ -810,7 +810,6 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
powernow_table[data->acpi_data.state_count].frequency =
CPUFREQ_TABLE_END;
- powernow_table[data->acpi_data.state_count].driver_data = 0;
data->powernow_table = powernow_table;
if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
@@ -963,9 +962,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
policy = cpufreq_cpu_get(smp_processor_id());
cpufreq_cpu_put(policy);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
res = transition_fid_vid(data, fid, vid);
- cpufreq_notify_post_transition(policy, &freqs, res);
+ cpufreq_freq_transition_end(policy, &freqs, res);
return res;
}
@@ -1164,8 +1163,6 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
powernow_k8_cpu_exit_acpi(data);
- cpufreq_frequency_table_put_attr(pol->cpu);
-
kfree(data->powernow_table);
kfree(data);
for_each_cpu(cpu, pol->cpus)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
new file mode 100644
index 000000000000..9edccc63245d
--- /dev/null
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -0,0 +1,341 @@
+/*
+ * POWERNV cpufreq driver for the IBM POWER processors
+ *
+ * (C) Copyright IBM 2014
+ *
+ * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "powernv-cpufreq: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+
+#include <asm/cputhreads.h>
+#include <asm/reg.h>
+
+#define POWERNV_MAX_PSTATES 256
+
+static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
+
+/*
+ * Note: The set of pstates consists of contiguous integers, the
+ * smallest of which is indicated by powernv_pstate_info.min, the
+ * largest of which is indicated by powernv_pstate_info.max.
+ *
+ * The nominal pstate is the highest non-turbo pstate in this
+ * platform. This is indicated by powernv_pstate_info.nominal.
+ */
+static struct powernv_pstate_info {
+ int min;
+ int max;
+ int nominal;
+ int nr_pstates;
+} powernv_pstate_info;
+
+/*
+ * Initialize the freq table based on data obtained
+ * from the firmware passed via device-tree
+ */
+static int init_powernv_pstates(void)
+{
+ struct device_node *power_mgt;
+ int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0;
+ const __be32 *pstate_ids, *pstate_freqs;
+ u32 len_ids, len_freqs;
+
+ power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
+ if (!power_mgt) {
+ pr_warn("power-mgt node not found\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
+ pr_warn("ibm,pstate-min node not found\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
+ pr_warn("ibm,pstate-max node not found\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
+ &pstate_nominal)) {
+ pr_warn("ibm,pstate-nominal not found\n");
+ return -ENODEV;
+ }
+ pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
+ pstate_nominal, pstate_max);
+
+ pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
+ if (!pstate_ids) {
+ pr_warn("ibm,pstate-ids not found\n");
+ return -ENODEV;
+ }
+
+ pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
+ &len_freqs);
+ if (!pstate_freqs) {
+ pr_warn("ibm,pstate-frequencies-mhz not found\n");
+ return -ENODEV;
+ }
+
+ WARN_ON(len_ids != len_freqs);
+ nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
+ if (!nr_pstates) {
+ pr_warn("No PStates found\n");
+ return -ENODEV;
+ }
+
+ pr_debug("NR PStates %d\n", nr_pstates);
+ for (i = 0; i < nr_pstates; i++) {
+ u32 id = be32_to_cpu(pstate_ids[i]);
+ u32 freq = be32_to_cpu(pstate_freqs[i]);
+
+ pr_debug("PState id %d freq %d MHz\n", id, freq);
+ powernv_freqs[i].frequency = freq * 1000; /* kHz */
+ powernv_freqs[i].driver_data = id;
+ }
+ /* End of list marker entry */
+ powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
+
+ powernv_pstate_info.min = pstate_min;
+ powernv_pstate_info.max = pstate_max;
+ powernv_pstate_info.nominal = pstate_nominal;
+ powernv_pstate_info.nr_pstates = nr_pstates;
+
+ return 0;
+}
+
+/* Returns the CPU frequency corresponding to the pstate_id. */
+static unsigned int pstate_id_to_freq(int pstate_id)
+{
+ int i;
+
+ i = powernv_pstate_info.max - pstate_id;
+ BUG_ON(i >= powernv_pstate_info.nr_pstates || i < 0);
+
+ return powernv_freqs[i].frequency;
+}
+
+/*
+ * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
+ * the firmware
+ */
+static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
+ char *buf)
+{
+ return sprintf(buf, "%u\n",
+ pstate_id_to_freq(powernv_pstate_info.nominal));
+}
+
+struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
+ __ATTR_RO(cpuinfo_nominal_freq);
+
+static struct freq_attr *powernv_cpu_freq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &cpufreq_freq_attr_cpuinfo_nominal_freq,
+ NULL,
+};
+
+/* Helper routines */
+
+/* Access helpers to power mgt SPR */
+
+static inline unsigned long get_pmspr(unsigned long sprn)
+{
+ switch (sprn) {
+ case SPRN_PMCR:
+ return mfspr(SPRN_PMCR);
+
+ case SPRN_PMICR:
+ return mfspr(SPRN_PMICR);
+
+ case SPRN_PMSR:
+ return mfspr(SPRN_PMSR);
+ }
+ BUG();
+}
+
+static inline void set_pmspr(unsigned long sprn, unsigned long val)
+{
+ switch (sprn) {
+ case SPRN_PMCR:
+ mtspr(SPRN_PMCR, val);
+ return;
+
+ case SPRN_PMICR:
+ mtspr(SPRN_PMICR, val);
+ return;
+ }
+ BUG();
+}
+
+/*
+ * Use objects of this type to query/update
+ * pstates on a remote CPU via smp_call_function.
+ */
+struct powernv_smp_call_data {
+ unsigned int freq;
+ int pstate_id;
+};
+
+/*
+ * powernv_read_cpu_freq: Reads the current frequency on this CPU.
+ *
+ * Called via smp_call_function.
+ *
+ * Note: The caller of the smp_call_function should pass an argument of
+ * the type 'struct powernv_smp_call_data *' along with this function.
+ *
+ * The current frequency on this CPU will be returned via
+ * ((struct powernv_smp_call_data *)arg)->freq;
+ */
+static void powernv_read_cpu_freq(void *arg)
+{
+ unsigned long pmspr_val;
+ s8 local_pstate_id;
+ struct powernv_smp_call_data *freq_data = arg;
+
+ pmspr_val = get_pmspr(SPRN_PMSR);
+
+ /*
+ * The local pstate id corresponds bits 48..55 in the PMSR.
+ * Note: Watch out for the sign!
+ */
+ local_pstate_id = (pmspr_val >> 48) & 0xFF;
+ freq_data->pstate_id = local_pstate_id;
+ freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
+
+ pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
+ raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
+ freq_data->freq);
+}
+
+/*
+ * powernv_cpufreq_get: Returns the CPU frequency as reported by the
+ * firmware for CPU 'cpu'. This value is reported through the sysfs
+ * file cpuinfo_cur_freq.
+ */
+unsigned int powernv_cpufreq_get(unsigned int cpu)
+{
+ struct powernv_smp_call_data freq_data;
+
+ smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
+ &freq_data, 1);
+
+ return freq_data.freq;
+}
+
+/*
+ * set_pstate: Sets the pstate on this CPU.
+ *
+ * This is called via an smp_call_function.
+ *
+ * The caller must ensure that freq_data is of the type
+ * (struct powernv_smp_call_data *) and the pstate_id which needs to be set
+ * on this CPU should be present in freq_data->pstate_id.
+ */
+static void set_pstate(void *freq_data)
+{
+ unsigned long val;
+ unsigned long pstate_ul =
+ ((struct powernv_smp_call_data *) freq_data)->pstate_id;
+
+ val = get_pmspr(SPRN_PMCR);
+ val = val & 0x0000FFFFFFFFFFFFULL;
+
+ pstate_ul = pstate_ul & 0xFF;
+
+ /* Set both global(bits 56..63) and local(bits 48..55) PStates */
+ val = val | (pstate_ul << 56) | (pstate_ul << 48);
+
+ pr_debug("Setting cpu %d pmcr to %016lX\n",
+ raw_smp_processor_id(), val);
+ set_pmspr(SPRN_PMCR, val);
+}
+
+/*
+ * powernv_cpufreq_target_index: Sets the frequency corresponding to
+ * the cpufreq table entry indexed by new_index on the cpus in the
+ * mask policy->cpus
+ */
+static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
+ unsigned int new_index)
+{
+ struct powernv_smp_call_data freq_data;
+
+ freq_data.pstate_id = powernv_freqs[new_index].driver_data;
+
+ /*
+ * Use smp_call_function to send IPI and execute the
+ * mtspr on target CPU. We could do that without IPI
+ * if current CPU is within policy->cpus (core)
+ */
+ smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
+
+ return 0;
+}
+
+static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int base, i;
+
+ base = cpu_first_thread_sibling(policy->cpu);
+
+ for (i = 0; i < threads_per_core; i++)
+ cpumask_set_cpu(base + i, policy->cpus);
+
+ return cpufreq_table_validate_and_show(policy, powernv_freqs);
+}
+
+static struct cpufreq_driver powernv_cpufreq_driver = {
+ .name = "powernv-cpufreq",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = powernv_cpufreq_cpu_init,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernv_cpufreq_target_index,
+ .get = powernv_cpufreq_get,
+ .attr = powernv_cpu_freq_attr,
+};
+
+static int __init powernv_cpufreq_init(void)
+{
+ int rc = 0;
+
+ /* Discover pstates from device tree and init */
+ rc = init_powernv_pstates();
+ if (rc) {
+ pr_info("powernv-cpufreq disabled. System does not support PState control\n");
+ return rc;
+ }
+
+ return cpufreq_register_driver(&powernv_cpufreq_driver);
+}
+module_init(powernv_cpufreq_init);
+
+static void __exit powernv_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&powernv_cpufreq_driver);
+}
+module_exit(powernv_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 051000f44ca2..b7e677be1df0 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/errno.h>
-#include <sysdev/fsl_soc.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -21,6 +20,7 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/smp.h>
+#include <sysdev/fsl_soc.h>
/**
* struct cpu_data - per CPU data struct
@@ -205,7 +205,8 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
for_each_cpu(i, per_cpu(cpu_mask, cpu))
per_cpu(cpu_data, i) = data;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->cpuinfo.transition_latency =
+ (12 * NSEC_PER_SEC) / fsl_get_sys_freq();
of_node_put(np);
return 0;
@@ -228,7 +229,6 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
unsigned int cpu;
- cpufreq_frequency_table_put_attr(policy->cpu);
of_node_put(data->parent);
kfree(data->table);
kfree(data);
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index e42ca9c31cea..5be8a48dba74 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -32,15 +32,15 @@
/* the CBE supports an 8 step frequency scaling */
static struct cpufreq_frequency_table cbe_freqs[] = {
- {1, 0},
- {2, 0},
- {3, 0},
- {4, 0},
- {5, 0},
- {6, 0},
- {8, 0},
- {10, 0},
- {0, CPUFREQ_TABLE_END},
+ {0, 1, 0},
+ {0, 2, 0},
+ {0, 3, 0},
+ {0, 4, 0},
+ {0, 5, 0},
+ {0, 6, 0},
+ {0, 8, 0},
+ {0, 10, 0},
+ {0, 0, CPUFREQ_TABLE_END},
};
/*
@@ -141,7 +141,6 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cbe_cpufreq_target,
.init = cbe_cpufreq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "cbe-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
};
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index a9195a86b069..e24269ab4e9b 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -427,7 +427,6 @@ static struct cpufreq_driver pxa_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pxa_set_target,
.init = pxa_cpufreq_init,
- .exit = cpufreq_generic_exit,
.get = pxa_cpufreq_get,
.name = "PXA2xx",
};
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index 3785687e9d70..a01275900389 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -205,7 +205,6 @@ static struct cpufreq_driver pxa3xx_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pxa3xx_cpufreq_set,
.init = pxa3xx_cpufreq_init,
- .exit = cpufreq_generic_exit,
.get = pxa3xx_cpufreq_get,
.name = "pxa3xx-cpufreq",
};
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 826b8be23099..4626f90559b5 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -72,19 +72,19 @@ static struct s3c2416_dvfs s3c2416_dvfs_table[] = {
#endif
static struct cpufreq_frequency_table s3c2416_freq_table[] = {
- { SOURCE_HCLK, FREQ_DVS },
- { SOURCE_ARMDIV, 133333 },
- { SOURCE_ARMDIV, 266666 },
- { SOURCE_ARMDIV, 400000 },
- { 0, CPUFREQ_TABLE_END },
+ { 0, SOURCE_HCLK, FREQ_DVS },
+ { 0, SOURCE_ARMDIV, 133333 },
+ { 0, SOURCE_ARMDIV, 266666 },
+ { 0, SOURCE_ARMDIV, 400000 },
+ { 0, 0, CPUFREQ_TABLE_END },
};
static struct cpufreq_frequency_table s3c2450_freq_table[] = {
- { SOURCE_HCLK, FREQ_DVS },
- { SOURCE_ARMDIV, 133500 },
- { SOURCE_ARMDIV, 267000 },
- { SOURCE_ARMDIV, 534000 },
- { 0, CPUFREQ_TABLE_END },
+ { 0, SOURCE_HCLK, FREQ_DVS },
+ { 0, SOURCE_ARMDIV, 133500 },
+ { 0, SOURCE_ARMDIV, 267000 },
+ { 0, SOURCE_ARMDIV, 534000 },
+ { 0, 0, CPUFREQ_TABLE_END },
};
static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 25069741b507..be1b2b5c9753 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -217,7 +217,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
/* start the frequency change */
- cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs.freqs);
/* If hclk is staying the same, then we do not need to
* re-write the IO or the refresh timings whilst we are changing
@@ -261,7 +261,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
local_irq_restore(flags);
/* notify everyone we've done this */
- cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs.freqs, 0);
s3c_freq_dbg("%s: finished\n", __func__);
return 0;
@@ -586,7 +586,7 @@ static int s3c_cpufreq_build_freq(void)
size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
size++;
- ftab = kmalloc(sizeof(*ftab) * size, GFP_KERNEL);
+ ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL);
if (!ftab) {
printk(KERN_ERR "%s: no memory for tables\n", __func__);
return -ENOMEM;
@@ -664,7 +664,7 @@ int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
size = sizeof(*vals) * (plls_no + 1);
- vals = kmalloc(size, GFP_KERNEL);
+ vals = kzalloc(size, GFP_KERNEL);
if (vals) {
memcpy(vals, plls, size);
pll_reg = vals;
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index c4226de079ab..ff7d3ecb85f0 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -37,19 +37,19 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
};
static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
- { 0, 66000 },
- { 0, 100000 },
- { 0, 133000 },
- { 1, 200000 },
- { 1, 222000 },
- { 1, 266000 },
- { 2, 333000 },
- { 2, 400000 },
- { 2, 532000 },
- { 2, 533000 },
- { 3, 667000 },
- { 4, 800000 },
- { 0, CPUFREQ_TABLE_END },
+ { 0, 0, 66000 },
+ { 0, 0, 100000 },
+ { 0, 0, 133000 },
+ { 0, 1, 200000 },
+ { 0, 1, 222000 },
+ { 0, 1, 266000 },
+ { 0, 2, 333000 },
+ { 0, 2, 400000 },
+ { 0, 2, 532000 },
+ { 0, 2, 533000 },
+ { 0, 3, 667000 },
+ { 0, 4, 800000 },
+ { 0, 0, CPUFREQ_TABLE_END },
};
#endif
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 55a8e9fa9435..ab2c1a40d437 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -18,7 +18,6 @@
#include <linux/cpufreq.h>
#include <linux/reboot.h>
#include <linux/regulator/consumer.h>
-#include <linux/suspend.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
@@ -65,12 +64,12 @@ enum s5pv210_dmc_port {
};
static struct cpufreq_frequency_table s5pv210_freq_table[] = {
- {L0, 1000*1000},
- {L1, 800*1000},
- {L2, 400*1000},
- {L3, 200*1000},
- {L4, 100*1000},
- {0, CPUFREQ_TABLE_END},
+ {0, L0, 1000*1000},
+ {0, L1, 800*1000},
+ {0, L2, 400*1000},
+ {0, L3, 200*1000},
+ {0, L4, 100*1000},
+ {0, 0, CPUFREQ_TABLE_END},
};
static struct regulator *arm_regulator;
@@ -435,18 +434,6 @@ exit:
return ret;
}
-#ifdef CONFIG_PM
-static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
-static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
-{
- return 0;
-}
-#endif
-
static int check_mem_type(void __iomem *dmc_reg)
{
unsigned long val;
@@ -502,6 +489,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
+ policy->suspend_freq = SLEEP_FREQ;
return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
out_dmc1:
@@ -511,32 +499,6 @@ out_dmc0:
return ret;
}
-static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- int ret;
-
- switch (event) {
- case PM_SUSPEND_PREPARE:
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
- if (ret < 0)
- return NOTIFY_BAD;
-
- /* Disable updation of cpu frequency */
- no_cpufreq_access = true;
- return NOTIFY_OK;
- case PM_POST_RESTORE:
- case PM_POST_SUSPEND:
- /* Enable updation of cpu frequency */
- no_cpufreq_access = false;
- cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
-
- return NOTIFY_OK;
- }
-
- return NOTIFY_DONE;
-}
-
static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -558,15 +520,11 @@ static struct cpufreq_driver s5pv210_driver = {
.init = s5pv210_cpu_init,
.name = "s5pv210",
#ifdef CONFIG_PM
- .suspend = s5pv210_cpufreq_suspend,
- .resume = s5pv210_cpufreq_resume,
+ .suspend = cpufreq_generic_suspend,
+ .resume = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */
#endif
};
-static struct notifier_block s5pv210_cpufreq_notifier = {
- .notifier_call = s5pv210_cpufreq_notifier_event,
-};
-
static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
.notifier_call = s5pv210_cpufreq_reboot_notifier_event,
};
@@ -586,7 +544,6 @@ static int __init s5pv210_cpufreq_init(void)
return PTR_ERR(int_regulator);
}
- register_pm_notifier(&s5pv210_cpufreq_notifier);
register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
return cpufreq_register_driver(&s5pv210_driver);
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 6adb354e359c..ac84e4818014 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -33,9 +33,9 @@ static __u8 __iomem *cpuctl;
#define PFX "sc520_freq: "
static struct cpufreq_frequency_table sc520_freq_table[] = {
- {0x01, 100000},
- {0x02, 133000},
- {0, CPUFREQ_TABLE_END},
+ {0, 0x01, 100000},
+ {0, 0x02, 133000},
+ {0, 0, CPUFREQ_TABLE_END},
};
static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
@@ -93,7 +93,6 @@ static struct cpufreq_driver sc520_freq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sc520_freq_target,
.init = sc520_freq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "sc520_freq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 387af12503a6..86628e22b2a3 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -68,10 +68,10 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = (freq + 500) / 1000;
freqs.flags = 0;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
set_cpus_allowed_ptr(current, &cpus_allowed);
clk_set_rate(cpuclk, freq);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
dev_dbg(dev, "set frequency %lu Hz\n", freq);
@@ -143,7 +143,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
unsigned int cpu = policy->cpu;
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
- cpufreq_frequency_table_put_attr(cpu);
clk_put(cpuclk);
return 0;
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 62aa23e219d4..b73feeb666f9 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -301,10 +301,8 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
- if (cpufreq_us2e_driver) {
- cpufreq_frequency_table_put_attr(policy->cpu);
+ if (cpufreq_us2e_driver)
us2e_freq_target(policy, 0);
- }
return 0;
}
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index 724ffbd7105d..9bb42ba50efa 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -156,10 +156,8 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
{
- if (cpufreq_us3_driver) {
- cpufreq_frequency_table_put_attr(policy->cpu);
+ if (cpufreq_us3_driver)
us3_freq_target(policy, 0);
- }
return 0;
}
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 5c86e3fa5593..38678396636d 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -163,11 +164,10 @@ static struct cpufreq_driver spear_cpufreq_driver = {
.target_index = spear_cpufreq_target,
.get = cpufreq_generic_get,
.init = spear_cpufreq_init,
- .exit = cpufreq_generic_exit,
.attr = cpufreq_generic_attr,
};
-static int spear_cpufreq_driver_init(void)
+static int spear_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
const struct property *prop;
@@ -195,18 +195,15 @@ static int spear_cpufreq_driver_init(void)
cnt = prop->length / sizeof(u32);
val = prop->value;
- freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
+ freq_tbl = kzalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
if (!freq_tbl) {
ret = -ENOMEM;
goto out_put_node;
}
- for (i = 0; i < cnt; i++) {
- freq_tbl[i].driver_data = i;
+ for (i = 0; i < cnt; i++)
freq_tbl[i].frequency = be32_to_cpup(val++);
- }
- freq_tbl[i].driver_data = i;
freq_tbl[i].frequency = CPUFREQ_TABLE_END;
spear_cpufreq.freq_tbl = freq_tbl;
@@ -235,7 +232,15 @@ out_put_node:
of_node_put(np);
return ret;
}
-late_initcall(spear_cpufreq_driver_init);
+
+static struct platform_driver spear_cpufreq_platdrv = {
+ .driver = {
+ .name = "spear-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = spear_cpufreq_probe,
+};
+module_platform_driver(spear_cpufreq_platdrv);
MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>");
MODULE_DESCRIPTION("SPEAr CPUFreq driver");
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 4e1daca5ce3b..6723f0390f20 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -406,8 +406,6 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
if (!per_cpu(centrino_model, cpu))
return -ENODEV;
- cpufreq_frequency_table_put_attr(cpu);
-
per_cpu(centrino_model, cpu) = NULL;
return 0;
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 7639b2be2a90..1a07b5904ed5 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -49,9 +49,9 @@ static u32 pmbase;
* are in kHz for the time being.
*/
static struct cpufreq_frequency_table speedstep_freqs[] = {
- {SPEEDSTEP_HIGH, 0},
- {SPEEDSTEP_LOW, 0},
- {0, CPUFREQ_TABLE_END},
+ {0, SPEEDSTEP_HIGH, 0},
+ {0, SPEEDSTEP_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
};
@@ -311,7 +311,6 @@ static struct cpufreq_driver speedstep_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = speedstep_target,
.init = speedstep_cpu_init,
- .exit = cpufreq_generic_exit,
.get = speedstep_get,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 998c17b42200..8635eec96da5 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -42,9 +42,9 @@ static enum speedstep_processor speedstep_processor;
* are in kHz for the time being.
*/
static struct cpufreq_frequency_table speedstep_freqs[] = {
- {SPEEDSTEP_HIGH, 0},
- {SPEEDSTEP_LOW, 0},
- {0, CPUFREQ_TABLE_END},
+ {0, SPEEDSTEP_HIGH, 0},
+ {0, SPEEDSTEP_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
};
#define GET_SPEEDSTEP_OWNER 0
@@ -280,7 +280,6 @@ static struct cpufreq_driver speedstep_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = speedstep_target,
.init = speedstep_cpu_init,
- .exit = cpufreq_generic_exit,
.get = speedstep_get,
.resume = speedstep_resume,
.attr = cpufreq_generic_attr,
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index e652c1bd8d0f..63f00598a251 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -26,7 +26,6 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/suspend.h>
static struct cpufreq_frequency_table freq_table[] = {
{ .frequency = 216000 },
@@ -47,9 +46,6 @@ static struct clk *pll_x_clk;
static struct clk *pll_p_clk;
static struct clk *emc_clk;
-static DEFINE_MUTEX(tegra_cpu_lock);
-static bool is_suspended;
-
static int tegra_cpu_clk_set_rate(unsigned long rate)
{
int ret;
@@ -112,42 +108,9 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
{
- int ret = -EBUSY;
-
- mutex_lock(&tegra_cpu_lock);
-
- if (!is_suspended)
- ret = tegra_update_cpu_speed(policy,
- freq_table[index].frequency);
-
- mutex_unlock(&tegra_cpu_lock);
- return ret;
+ return tegra_update_cpu_speed(policy, freq_table[index].frequency);
}
-static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
- void *dummy)
-{
- mutex_lock(&tegra_cpu_lock);
- if (event == PM_SUSPEND_PREPARE) {
- struct cpufreq_policy *policy = cpufreq_cpu_get(0);
- is_suspended = true;
- pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
- freq_table[0].frequency);
- if (clk_get_rate(cpu_clk) / 1000 != freq_table[0].frequency)
- tegra_update_cpu_speed(policy, freq_table[0].frequency);
- cpufreq_cpu_put(policy);
- } else if (event == PM_POST_SUSPEND) {
- is_suspended = false;
- }
- mutex_unlock(&tegra_cpu_lock);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block tegra_cpu_pm_notifier = {
- .notifier_call = tegra_pm_notify,
-};
-
static int tegra_cpu_init(struct cpufreq_policy *policy)
{
int ret;
@@ -166,16 +129,13 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
return ret;
}
- if (policy->cpu == 0)
- register_pm_notifier(&tegra_cpu_pm_notifier);
-
policy->clk = cpu_clk;
+ policy->suspend_freq = freq_table[0].frequency;
return 0;
}
static int tegra_cpu_exit(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_put_attr(policy->cpu);
clk_disable_unprepare(cpu_clk);
clk_disable_unprepare(emc_clk);
return 0;
@@ -190,6 +150,9 @@ static struct cpufreq_driver tegra_cpufreq_driver = {
.exit = tegra_cpu_exit,
.name = "tegra",
.attr = cpufreq_generic_attr,
+#ifdef CONFIG_PM
+ .suspend = cpufreq_generic_suspend,
+#endif
};
static int __init tegra_cpufreq_init(void)
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 36cc330b8747..8d045afa7fb4 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -44,9 +44,9 @@ static int ucv2_target(struct cpufreq_policy *policy,
freqs.old = policy->cur;
freqs.new = target_freq;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- ret = clk_set_rate(policy->mclk, target_freq * 1000);
- cpufreq_notify_post_transition(policy, &freqs, ret);
+ cpufreq_freq_transition_begin(policy, &freqs);
+ ret = clk_set_rate(policy->clk, target_freq * 1000);
+ cpufreq_freq_transition_end(policy, &freqs, ret);
return ret;
}
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index d988948a89a0..97ccc31dbdd8 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -22,7 +22,7 @@ config ARM_HIGHBANK_CPUIDLE
config ARM_KIRKWOOD_CPUIDLE
bool "CPU Idle Driver for Marvell Kirkwood SoCs"
- depends on ARCH_KIRKWOOD
+ depends on ARCH_KIRKWOOD || MACH_KIRKWOOD
help
This adds the CPU Idle driver for Marvell Kirkwood SoCs.
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index e952936418d0..cb6654bfad77 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -323,7 +323,7 @@ static void cpuidle_coupled_poke(int cpu)
struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
- __smp_call_function_single(cpu, csd, 0);
+ smp_call_function_single_async(cpu, csd);
}
/**
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 78fd174c57e8..719f6fb5b1c3 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -11,9 +11,18 @@
#include <linux/cpuidle.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
+#include <linux/clockchips.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
+#include <asm/runlatch.h>
+
+/* Flags and constants used in PowerNV platform */
+
+#define MAX_POWERNV_IDLE_STATES 8
+#define IDLE_USE_INST_NAP 0x00010000 /* Use nap instruction */
+#define IDLE_USE_INST_SLEEP 0x00020000 /* Use sleep instruction */
struct cpuidle_driver powernv_idle_driver = {
.name = "powernv_idle",
@@ -30,12 +39,14 @@ static int snooze_loop(struct cpuidle_device *dev,
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
+ ppc64_runlatch_off();
while (!need_resched()) {
HMT_low();
HMT_very_low();
}
HMT_medium();
+ ppc64_runlatch_on();
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb();
return index;
@@ -45,14 +56,42 @@ static int nap_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
+ ppc64_runlatch_off();
power7_idle();
+ ppc64_runlatch_on();
+ return index;
+}
+
+static int fastsleep_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long old_lpcr = mfspr(SPRN_LPCR);
+ unsigned long new_lpcr;
+
+ if (unlikely(system_state < SYSTEM_RUNNING))
+ return index;
+
+ new_lpcr = old_lpcr;
+ new_lpcr &= ~(LPCR_MER | LPCR_PECE); /* lpcr[mer] must be 0 */
+
+ /* exit powersave upon external interrupt, but not decrementer
+ * interrupt.
+ */
+ new_lpcr |= LPCR_PECE0;
+
+ mtspr(SPRN_LPCR, new_lpcr);
+ power7_sleep();
+
+ mtspr(SPRN_LPCR, old_lpcr);
+
return index;
}
/*
* States for dedicated partition case.
*/
-static struct cpuidle_state powernv_states[] = {
+static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = {
{ /* Snooze */
.name = "snooze",
.desc = "snooze",
@@ -60,13 +99,6 @@ static struct cpuidle_state powernv_states[] = {
.exit_latency = 0,
.target_residency = 0,
.enter = &snooze_loop },
- { /* NAP */
- .name = "NAP",
- .desc = "NAP",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 10,
- .target_residency = 100,
- .enter = &nap_loop },
};
static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
@@ -127,19 +159,74 @@ static int powernv_cpuidle_driver_init(void)
return 0;
}
+static int powernv_add_idle_states(void)
+{
+ struct device_node *power_mgt;
+ struct property *prop;
+ int nr_idle_states = 1; /* Snooze */
+ int dt_idle_states;
+ u32 *flags;
+ int i;
+
+ /* Currently we have snooze statically defined */
+
+ power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
+ if (!power_mgt) {
+ pr_warn("opal: PowerMgmt Node not found\n");
+ return nr_idle_states;
+ }
+
+ prop = of_find_property(power_mgt, "ibm,cpu-idle-state-flags", NULL);
+ if (!prop) {
+ pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
+ return nr_idle_states;
+ }
+
+ dt_idle_states = prop->length / sizeof(u32);
+ flags = (u32 *) prop->value;
+
+ for (i = 0; i < dt_idle_states; i++) {
+
+ if (flags[i] & IDLE_USE_INST_NAP) {
+ /* Add NAP state */
+ strcpy(powernv_states[nr_idle_states].name, "Nap");
+ strcpy(powernv_states[nr_idle_states].desc, "Nap");
+ powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIME_VALID;
+ powernv_states[nr_idle_states].exit_latency = 10;
+ powernv_states[nr_idle_states].target_residency = 100;
+ powernv_states[nr_idle_states].enter = &nap_loop;
+ nr_idle_states++;
+ }
+
+ if (flags[i] & IDLE_USE_INST_SLEEP) {
+ /* Add FASTSLEEP state */
+ strcpy(powernv_states[nr_idle_states].name, "FastSleep");
+ strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
+ powernv_states[nr_idle_states].flags =
+ CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TIMER_STOP;
+ powernv_states[nr_idle_states].exit_latency = 300;
+ powernv_states[nr_idle_states].target_residency = 1000000;
+ powernv_states[nr_idle_states].enter = &fastsleep_loop;
+ nr_idle_states++;
+ }
+ }
+
+ return nr_idle_states;
+}
+
/*
* powernv_idle_probe()
* Choose state table for shared versus dedicated partition
*/
static int powernv_idle_probe(void)
{
-
if (cpuidle_disable != IDLE_NO_OVERRIDE)
return -ENODEV;
if (firmware_has_feature(FW_FEATURE_OPALv3)) {
cpuidle_state_table = powernv_states;
- max_idle_state = ARRAY_SIZE(powernv_states);
+ /* Device tree can indicate more idle states */
+ max_idle_state = powernv_add_idle_states();
} else
return -ENODEV;
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 7ab564aa0b1c..6f7b01956885 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -17,6 +17,7 @@
#include <asm/reg.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
+#include <asm/runlatch.h>
#include <asm/plpar_wrappers.h>
struct cpuidle_driver pseries_idle_driver = {
@@ -29,6 +30,7 @@ static struct cpuidle_state *cpuidle_state_table;
static inline void idle_loop_prolog(unsigned long *in_purr)
{
+ ppc64_runlatch_off();
*in_purr = mfspr(SPRN_PURR);
/*
* Indicate to the HV that we are idle. Now would be
@@ -45,6 +47,10 @@ static inline void idle_loop_epilog(unsigned long in_purr)
wait_cycles += mfspr(SPRN_PURR) - in_purr;
get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
get_lppaca()->idle = 0;
+
+ if (irqs_disabled())
+ local_irq_enable();
+ ppc64_runlatch_on();
}
static int snooze_loop(struct cpuidle_device *dev,
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index a55e68f2cfc8..8236746e46bb 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -65,6 +65,26 @@ int cpuidle_play_dead(void)
}
/**
+ * cpuidle_enabled - check if the cpuidle framework is ready
+ * @dev: cpuidle device for this cpu
+ * @drv: cpuidle driver for this cpu
+ *
+ * Return 0 on success, otherwise:
+ * -NODEV : the cpuidle framework is not available
+ * -EBUSY : the cpuidle framework is not initialized
+ */
+int cpuidle_enabled(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+{
+ if (off || !initialized)
+ return -ENODEV;
+
+ if (!drv || !dev || !dev->enabled)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
* cpuidle_enter_state - enter the state and update stats
* @dev: cpuidle device for this cpu
* @drv: cpuidle driver for this cpu
@@ -85,7 +105,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_end = ktime_get();
- local_irq_enable();
+ if (!cpuidle_state_is_coupled(dev, drv, entered_state))
+ local_irq_enable();
diff = ktime_to_us(ktime_sub(time_end, time_start));
if (diff > INT_MAX)
@@ -108,61 +129,48 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
}
/**
- * cpuidle_idle_call - the main idle loop
+ * cpuidle_select - ask the cpuidle framework to choose an idle state
*
- * NOTE: no locks or semaphores should be used here
- * return non-zero on failure
+ * @drv: the cpuidle driver
+ * @dev: the cpuidle device
+ *
+ * Returns the index of the idle state.
*/
-int cpuidle_idle_call(void)
+int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
- struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
- struct cpuidle_driver *drv;
- int next_state, entered_state;
- bool broadcast;
-
- if (off || !initialized)
- return -ENODEV;
-
- /* check if the device is ready */
- if (!dev || !dev->enabled)
- return -EBUSY;
-
- drv = cpuidle_get_cpu_driver(dev);
-
- /* ask the governor for the next state */
- next_state = cpuidle_curr_governor->select(drv, dev);
- if (need_resched()) {
- dev->last_residency = 0;
- /* give the governor an opportunity to reflect on the outcome */
- if (cpuidle_curr_governor->reflect)
- cpuidle_curr_governor->reflect(dev, next_state);
- local_irq_enable();
- return 0;
- }
-
- trace_cpu_idle_rcuidle(next_state, dev->cpu);
-
- broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
-
- if (broadcast)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
-
- if (cpuidle_state_is_coupled(dev, drv, next_state))
- entered_state = cpuidle_enter_state_coupled(dev, drv,
- next_state);
- else
- entered_state = cpuidle_enter_state(dev, drv, next_state);
-
- if (broadcast)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+ return cpuidle_curr_governor->select(drv, dev);
+}
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
+/**
+ * cpuidle_enter - enter into the specified idle state
+ *
+ * @drv: the cpuidle driver tied with the cpu
+ * @dev: the cpuidle device
+ * @index: the index in the idle state table
+ *
+ * Returns the index in the idle state, < 0 in case of error.
+ * The error code depends on the backend driver
+ */
+int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ int index)
+{
+ if (cpuidle_state_is_coupled(dev, drv, index))
+ return cpuidle_enter_state_coupled(dev, drv, index);
+ return cpuidle_enter_state(dev, drv, index);
+}
- /* give the governor an opportunity to reflect on the outcome */
+/**
+ * cpuidle_reflect - tell the underlying governor what was the state
+ * we were in
+ *
+ * @dev : the cpuidle device
+ * @index: the index in the idle state table
+ *
+ */
+void cpuidle_reflect(struct cpuidle_device *dev, int index)
+{
if (cpuidle_curr_governor->reflect)
- cpuidle_curr_governor->reflect(dev, entered_state);
-
- return 0;
+ cpuidle_curr_governor->reflect(dev, index);
}
/**
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 06dbe7c86199..136d6a283e0a 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -209,7 +209,7 @@ static void poll_idle_init(struct cpuidle_driver *drv)
state->exit_latency = 0;
state->target_residency = 0;
state->power_usage = -1;
- state->flags = 0;
+ state->flags = CPUIDLE_FLAG_TIME_VALID;
state->enter = poll_idle;
state->disabled = false;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index cf7f2f0e4ef5..71b523293354 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -122,9 +122,8 @@ struct menu_device {
int last_state_idx;
int needs_update;
- unsigned int expected_us;
+ unsigned int next_timer_us;
unsigned int predicted_us;
- unsigned int exit_us;
unsigned int bucket;
unsigned int correction_factor[BUCKETS];
unsigned int intervals[INTERVALS];
@@ -257,7 +256,7 @@ again:
stddev = int_sqrt(stddev);
if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
|| stddev <= 20) {
- if (data->expected_us > avg)
+ if (data->next_timer_us > avg)
data->predicted_us = avg;
return;
}
@@ -289,7 +288,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int i;
- int multiplier;
+ unsigned int interactivity_req;
struct timespec t;
if (data->needs_update) {
@@ -298,7 +297,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
}
data->last_state_idx = 0;
- data->exit_us = 0;
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0))
@@ -306,13 +304,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
/* determine the expected residency time, round up */
t = ktime_to_timespec(tick_nohz_get_sleep_length());
- data->expected_us =
+ data->next_timer_us =
t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
- data->bucket = which_bucket(data->expected_us);
-
- multiplier = performance_multiplier();
+ data->bucket = which_bucket(data->next_timer_us);
/*
* if the correction factor is 0 (eg first time init or cpu hotplug
@@ -326,17 +322,26 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* operands are 32 bits.
* Make sure to round up for half microseconds.
*/
- data->predicted_us = div_round64((uint64_t)data->expected_us *
+ data->predicted_us = div_round64((uint64_t)data->next_timer_us *
data->correction_factor[data->bucket],
RESOLUTION * DECAY);
get_typical_interval(data);
/*
+ * Performance multiplier defines a minimum predicted idle
+ * duration / latency ratio. Adjust the latency limit if
+ * necessary.
+ */
+ interactivity_req = data->predicted_us / performance_multiplier();
+ if (latency_req > interactivity_req)
+ latency_req = interactivity_req;
+
+ /*
* We want to default to C1 (hlt), not to busy polling
* unless the timer is happening really really soon.
*/
- if (data->expected_us > 5 &&
+ if (data->next_timer_us > 5 &&
!drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
@@ -355,11 +360,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
continue;
if (s->exit_latency > latency_req)
continue;
- if (s->exit_latency * multiplier > data->predicted_us)
- continue;
data->last_state_idx = i;
- data->exit_us = s->exit_latency;
}
return data->last_state_idx;
@@ -390,36 +392,47 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int last_idx = data->last_state_idx;
- unsigned int last_idle_us = cpuidle_get_last_residency(dev);
struct cpuidle_state *target = &drv->states[last_idx];
unsigned int measured_us;
unsigned int new_factor;
/*
- * Ugh, this idle state doesn't support residency measurements, so we
- * are basically lost in the dark. As a compromise, assume we slept
- * for the whole expected time.
+ * Try to figure out how much time passed between entry to low
+ * power state and occurrence of the wakeup event.
+ *
+ * If the entered idle state didn't support residency measurements,
+ * we are basically lost in the dark how much time passed.
+ * As a compromise, assume we slept for the whole expected time.
+ *
+ * Any measured amount of time will include the exit latency.
+ * Since we are interested in when the wakeup begun, not when it
+ * was completed, we must substract the exit latency. However, if
+ * the measured amount of time is less than the exit latency,
+ * assume the state was never reached and the exit latency is 0.
*/
- if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
- last_idle_us = data->expected_us;
+ if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) {
+ /* Use timer value as is */
+ measured_us = data->next_timer_us;
+ } else {
+ /* Use measured value */
+ measured_us = cpuidle_get_last_residency(dev);
- measured_us = last_idle_us;
-
- /*
- * We correct for the exit latency; we are assuming here that the
- * exit latency happens after the event that we're interested in.
- */
- if (measured_us > data->exit_us)
- measured_us -= data->exit_us;
+ /* Deduct exit latency */
+ if (measured_us > target->exit_latency)
+ measured_us -= target->exit_latency;
+ /* Make sure our coefficients do not exceed unity */
+ if (measured_us > data->next_timer_us)
+ measured_us = data->next_timer_us;
+ }
/* Update our correction ratio */
new_factor = data->correction_factor[data->bucket];
new_factor -= new_factor / DECAY;
- if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
- new_factor += RESOLUTION * measured_us / data->expected_us;
+ if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
+ new_factor += RESOLUTION * measured_us / data->next_timer_us;
else
/*
* we were idle so long that we count it as a perfect
@@ -439,7 +452,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
/* update the repeating-pattern data */
- data->intervals[data->interval_ptr++] = last_idle_us;
+ data->intervals[data->interval_ptr++] = measured_us;
if (data->interval_ptr >= INTERVALS)
data->interval_ptr = 0;
}
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index e918b6d0caf7..efe2f175168f 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -293,6 +293,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
}
define_show_state_function(exit_latency)
+define_show_state_function(target_residency)
define_show_state_function(power_usage)
define_show_state_ull_function(usage)
define_show_state_ull_function(time)
@@ -304,6 +305,7 @@ define_store_state_ull_function(disable)
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
define_one_state_ro(latency, show_state_exit_latency);
+define_one_state_ro(residency, show_state_target_residency);
define_one_state_ro(power, show_state_power_usage);
define_one_state_ro(usage, show_state_usage);
define_one_state_ro(time, show_state_time);
@@ -313,6 +315,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
&attr_name.attr,
&attr_desc.attr,
&attr_latency.attr,
+ &attr_residency.attr,
&attr_power.attr,
&attr_usage.attr,
&attr_time.attr,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 13857f5d28f7..03ccdb0ccf9e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -262,6 +262,17 @@ config CRYPTO_DEV_OMAP_AES
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
+config CRYPTO_DEV_OMAP_DES
+ tristate "Support for OMAP DES3DES hw engine"
+ depends on ARCH_OMAP2PLUS
+ select CRYPTO_DES
+ select CRYPTO_BLKCIPHER2
+ help
+ OMAP processors have DES/3DES module accelerator. Select this if you
+ want to use the OMAP module for DES and 3DES algorithms. Currently
+ the ECB and CBC modes of operation supported by the driver. Also
+ accesses made on unaligned boundaries are also supported.
+
config CRYPTO_DEV_PICOXCELL
tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
depends on ARCH_PICOXCELL && HAVE_CLK
@@ -300,17 +311,6 @@ config CRYPTO_DEV_S5P
Select this to offload Samsung S5PV210 or S5PC110 from AES
algorithms execution.
-config CRYPTO_DEV_TEGRA_AES
- tristate "Support for TEGRA AES hw engine"
- depends on ARCH_TEGRA
- select CRYPTO_AES
- help
- TEGRA processors have AES module accelerator. Select this if you
- want to use the TEGRA module for AES algorithms.
-
- To compile this driver as a module, choose M here: the module
- will be called tegra-aes.
-
config CRYPTO_DEV_NX
bool "Support for IBM Power7+ in-Nest cryptographic acceleration"
depends on PPC64 && IBMVIO
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 0bc6aa0a54d7..482f090d16d0 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
+obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
@@ -21,5 +22,4 @@ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
-obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index d797f31f5d85..c9ff298e6d26 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -139,7 +139,6 @@ static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
/* setup CRC interrupts */
crc->regs->status = CMPERRI | DCNTEXPI;
crc->regs->intrenset = CMPERRI | DCNTEXPI;
- SSYNC();
return 0;
}
@@ -285,17 +284,12 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
if (i == 0)
return;
- flush_dcache_range((unsigned int)crc->sg_cpu,
- (unsigned int)crc->sg_cpu +
- i * sizeof(struct dma_desc_array));
-
/* Set the last descriptor to stop mode */
crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
crc->sg_cpu[i - 1].cfg |= DI_EN;
set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
set_dma_x_count(crc->dma_ch, 0);
set_dma_x_modify(crc->dma_ch, 0);
- SSYNC();
set_dma_config(crc->dma_ch, dma_config);
}
@@ -415,7 +409,6 @@ finish_update:
/* finally kick off CRC operation */
crc->regs->control |= BLKEN;
- SSYNC();
return -EINPROGRESS;
}
@@ -539,7 +532,6 @@ static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
if (crc->regs->status & DCNTEXP) {
crc->regs->status = DCNTEXP;
- SSYNC();
/* prepare results */
put_unaligned_le32(crc->regs->result, crc->req->result);
@@ -594,7 +586,7 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev)
unsigned int timeout = 100000;
int ret;
- crc = kzalloc(sizeof(*crc), GFP_KERNEL);
+ crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
if (!crc) {
dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
return -ENOMEM;
@@ -610,42 +602,39 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
- ret = -ENOENT;
- goto out_error_free_mem;
+ return -ENOENT;
}
- crc->regs = ioremap(res->start, resource_size(res));
- if (!crc->regs) {
+ crc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR((void *)crc->regs)) {
dev_err(&pdev->dev, "Cannot map CRC IO\n");
- ret = -ENXIO;
- goto out_error_free_mem;
+ return PTR_ERR((void *)crc->regs);
}
crc->irq = platform_get_irq(pdev, 0);
if (crc->irq < 0) {
dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
- ret = -ENOENT;
- goto out_error_unmap;
+ return -ENOENT;
}
- ret = request_irq(crc->irq, bfin_crypto_crc_handler, IRQF_SHARED, dev_name(dev), crc);
+ ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler,
+ IRQF_SHARED, dev_name(dev), crc);
if (ret) {
dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
- goto out_error_unmap;
+ return ret;
}
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No CRC DMA channel specified\n");
- ret = -ENOENT;
- goto out_error_irq;
+ return -ENOENT;
}
crc->dma_ch = res->start;
ret = request_dma(crc->dma_ch, dev_name(dev));
if (ret) {
dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
- goto out_error_irq;
+ return ret;
}
crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
@@ -660,9 +649,7 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev)
crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
crc->regs->control = 0;
- SSYNC();
crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data;
- SSYNC();
while (!(crc->regs->status & LUTDONE) && (--timeout) > 0)
cpu_relax();
@@ -693,12 +680,6 @@ out_error_dma:
if (crc->sg_cpu)
dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
free_dma(crc->dma_ch);
-out_error_irq:
- free_irq(crc->irq, crc);
-out_error_unmap:
- iounmap((void *)crc->regs);
-out_error_free_mem:
- kfree(crc);
return ret;
}
@@ -721,10 +702,6 @@ static int bfin_crypto_crc_remove(struct platform_device *pdev)
crypto_unregister_ahash(&algs);
tasklet_kill(&crc->done_task);
free_dma(crc->dma_ch);
- if (crc->irq > 0)
- free_irq(crc->irq, crc);
- iounmap((void *)crc->regs);
- kfree(crc);
return 0;
}
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b71f2fd749df..5f891254db73 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -66,10 +66,14 @@
/* length of descriptors text */
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
+
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
20 * CAAM_CMD_SZ)
@@ -104,27 +108,14 @@ static inline void append_dec_op1(u32 *desc, u32 type)
}
/*
- * Wait for completion of class 1 key loading before allowing
- * error propagation
- */
-static inline void append_dec_shr_done(u32 *desc)
-{
- u32 *jump_cmd;
-
- jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
-
-/*
* For aead functions, read payload and write payload,
* both of which are specified in req->src and req->dst
*/
static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
{
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
}
/*
@@ -211,9 +202,196 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
append_key_aead(desc, ctx, keys_fit_inline);
set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+static int aead_null_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
+ u32 *desc;
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /*
+ * NULL encryption; IV is zero
+ * assoclen = (assoclen + cryptlen) - cryptlen
+ */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Prepare to read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ /* aead_decrypt shared descriptor */
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize);
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Prepare to read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH2 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /*
+ * Insert a NOP here, since we need at least 4 instructions between
+ * code patching the descriptor buffer and the location being patched.
+ */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
}
static int aead_set_sh_desc(struct crypto_aead *aead)
@@ -222,13 +400,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool keys_fit_inline = false;
- u32 *key_jump_cmd, *jump_cmd;
u32 geniv, moveiv;
u32 *desc;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->authsize)
return 0;
+ /* NULL encryption / decryption */
+ if (!ctx->enckeylen)
+ return aead_null_set_sh_desc(aead);
+
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
@@ -253,7 +434,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* assoclen + cryptlen = seqinlen - ivsize */
append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
- /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
/* read assoc before reading payload */
@@ -296,30 +477,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
- desc = ctx->sh_desc_dec;
-
/* aead_decrypt shared descriptor */
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_aead(desc, ctx, keys_fit_inline);
+ desc = ctx->sh_desc_dec;
- /* Only propagate error immediately if shared */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, key_jump_cmd);
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
- set_jump_tgt_here(desc, jump_cmd);
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
/* Class 2 operation */
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- /* assoclen + cryptlen = seqinlen - ivsize */
+ /* assoclen + cryptlen = seqinlen - ivsize - authsize */
append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
- ctx->authsize + tfm->ivsize)
+ ctx->authsize + tfm->ivsize);
/* assoclen = (assoclen + cryptlen) - cryptlen */
append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
@@ -340,7 +509,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Load ICV */
append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
- append_dec_shr_done(desc);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
@@ -532,7 +700,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
struct device *jrdev = ctx->jrdev;
int ret = 0;
- u32 *key_jump_cmd, *jump_cmd;
+ u32 *key_jump_cmd;
u32 *desc;
#ifdef DEBUG
@@ -563,9 +731,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
set_jump_tgt_here(desc, key_jump_cmd);
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-
/* Load iv */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | tfm->ivsize);
@@ -603,11 +768,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->enckeylen, CLASS_1 |
KEY_DEST_CLASS_REG);
- /* For aead, only propagate error immediately if shared */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, key_jump_cmd);
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
- set_jump_tgt_here(desc, jump_cmd);
/* load IV */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
@@ -619,9 +780,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Perform operation */
ablkcipher_append_src_dst(desc);
- /* Wait for key to load before allowing propagating error */
- append_dec_shr_done(desc);
-
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1459,6 +1617,11 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
return ret;
}
+static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
+{
+ return aead_encrypt(&areq->areq);
+}
+
/*
* allocate and map the ablkcipher extended descriptor for ablkcipher
*/
@@ -1648,6 +1811,124 @@ struct caam_alg_template {
static struct caam_alg_template driver_algs[] = {
/* single-pass ipsec_esp descriptor */
{
+ .name = "authenc(hmac(md5),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha1),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha224),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha256),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha384),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha512),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(md5),cbc(aes))",
.driver_name = "authenc-hmac-md5-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
@@ -2099,6 +2380,11 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
desc_bytes(ctx->sh_desc_givenc),
DMA_TO_DEVICE);
+ if (ctx->key_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->key_dma))
+ dma_unmap_single(ctx->jrdev, ctx->key_dma,
+ ctx->enckeylen + ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 28486b19fc36..3529b54048c9 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -76,7 +76,7 @@ struct caam_rng_ctx {
struct buf_data bufs[2];
};
-static struct caam_rng_ctx rng_ctx;
+static struct caam_rng_ctx *rng_ctx;
static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
{
@@ -137,7 +137,7 @@ static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
- struct caam_rng_ctx *ctx = &rng_ctx;
+ struct caam_rng_ctx *ctx = rng_ctx;
struct buf_data *bd = &ctx->bufs[ctx->current_buf];
int next_buf_idx, copied_idx;
int err;
@@ -237,12 +237,12 @@ static void caam_cleanup(struct hwrng *rng)
struct buf_data *bd;
for (i = 0; i < 2; i++) {
- bd = &rng_ctx.bufs[i];
+ bd = &rng_ctx->bufs[i];
if (atomic_read(&bd->empty) == BUF_PENDING)
wait_for_completion(&bd->filled);
}
- rng_unmap_ctx(&rng_ctx);
+ rng_unmap_ctx(rng_ctx);
}
static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
@@ -273,8 +273,9 @@ static struct hwrng caam_rng = {
static void __exit caam_rng_exit(void)
{
- caam_jr_free(rng_ctx.jrdev);
+ caam_jr_free(rng_ctx->jrdev);
hwrng_unregister(&caam_rng);
+ kfree(rng_ctx);
}
static int __init caam_rng_init(void)
@@ -286,8 +287,10 @@ static int __init caam_rng_init(void)
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(dev);
}
-
- caam_init_rng(&rng_ctx, dev);
+ rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
+ if (!rng_ctx)
+ return -ENOMEM;
+ caam_init_rng(rng_ctx, dev);
dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 762aeff626ac..f227922cea38 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -26,6 +26,7 @@
#include <net/xfrm.h>
#include <crypto/algapi.h>
+#include <crypto/null.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 63fb1af2c431..1c38f86bf63a 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -14,7 +14,6 @@
#include "jr.h"
#include "desc_constr.h"
#include "error.h"
-#include "ctrl.h"
/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
@@ -352,32 +351,17 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
/**
* caam_get_era() - Return the ERA of the SEC on SoC, based
- * on the SEC_VID register.
- * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown.
- * @caam_id - the value of the SEC_VID register
+ * on "sec-era" propery in the DTS. This property is updated by u-boot.
**/
-int caam_get_era(u64 caam_id)
+int caam_get_era(void)
{
- struct sec_vid *sec_vid = (struct sec_vid *)&caam_id;
- static const struct {
- u16 ip_id;
- u8 maj_rev;
- u8 era;
- } caam_eras[] = {
- {0x0A10, 1, 1},
- {0x0A10, 2, 2},
- {0x0A12, 1, 3},
- {0x0A14, 1, 3},
- {0x0A14, 2, 4},
- {0x0A16, 1, 4},
- {0x0A11, 1, 4}
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(caam_eras); i++)
- if (caam_eras[i].ip_id == sec_vid->ip_id &&
- caam_eras[i].maj_rev == sec_vid->maj_rev)
- return caam_eras[i].era;
+ struct device_node *caam_node;
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
+ "fsl,sec-era",
+ NULL);
+ return prop ? *prop : -ENOTSUPP;
+ }
return -ENOTSUPP;
}
@@ -443,13 +427,10 @@ static int caam_probe(struct platform_device *pdev)
* for all, then go probe each one.
*/
rspec = 0;
- for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
- rspec++;
- if (!rspec) {
- /* for backward compatible with device trees */
- for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
rspec++;
- }
ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
GFP_KERNEL);
@@ -460,18 +441,9 @@ static int caam_probe(struct platform_device *pdev)
ring = 0;
ctrlpriv->total_jobrs = 0;
- for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
- ctrlpriv->jrpdev[ring] =
- of_platform_device_create(np, NULL, dev);
- if (!ctrlpriv->jrpdev[ring]) {
- pr_warn("JR%d Platform device creation error\n", ring);
- continue;
- }
- ctrlpriv->total_jobrs++;
- ring++;
- }
- if (!ring) {
- for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
ctrlpriv->jrpdev[ring] =
of_platform_device_create(np, NULL, dev);
if (!ctrlpriv->jrpdev[ring]) {
@@ -482,7 +454,6 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->total_jobrs++;
ring++;
}
- }
/* Check to see if QI present. If so, enable */
ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
@@ -564,7 +535,7 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era(caam_id));
+ caam_get_era());
dev_info(dev, "job rings = %d, qi = %d\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present);
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
index 980d44eaaf40..cac5402a46eb 100644
--- a/drivers/crypto/caam/ctrl.h
+++ b/drivers/crypto/caam/ctrl.h
@@ -8,6 +8,6 @@
#define CTRL_H
/* Prototypes for backend-level services exposed to APIs */
-int caam_get_era(u64 caam_id);
+int caam_get_era(void);
#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index cd5f678847ce..7eec20bb3849 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -155,21 +155,29 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
append_data(desc, data, len);
}
-static inline u32 *append_jump(u32 *desc, u32 options)
-{
- u32 *cmd = desc_end(desc);
-
- PRINT_POS;
- append_cmd(desc, CMD_JUMP | options);
-
- return cmd;
+#define APPEND_CMD_RET(cmd, op) \
+static inline u32 *append_##cmd(u32 *desc, u32 options) \
+{ \
+ u32 *cmd = desc_end(desc); \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | options); \
+ return cmd; \
}
+APPEND_CMD_RET(jump, JUMP)
+APPEND_CMD_RET(move, MOVE)
static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
{
*jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
}
+static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+{
+ *move_cmd &= ~MOVE_OFFSET_MASK;
+ *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) &
+ MOVE_OFFSET_MASK);
+}
+
#define APPEND_CMD(cmd, op) \
static inline void append_##cmd(u32 *desc, u32 options) \
{ \
@@ -177,7 +185,6 @@ static inline void append_##cmd(u32 *desc, u32 options) \
append_cmd(desc, CMD_##op | options); \
}
APPEND_CMD(operation, OPERATION)
-APPEND_CMD(move, MOVE)
#define APPEND_CMD_LEN(cmd, op) \
static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
@@ -328,7 +335,7 @@ append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
do { \
APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
append_cmd(desc, data); \
-} while (0);
+} while (0)
#define append_math_add_imm_u32(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index d50174f45b21..cbde8b95a6f8 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -74,10 +74,10 @@
#endif
#else
#ifdef __LITTLE_ENDIAN
-#define wr_reg32(reg, data) __raw_writel(reg, data)
+#define wr_reg32(reg, data) __raw_writel(data, reg)
#define rd_reg32(reg) __raw_readl(reg)
#ifdef CONFIG_64BIT
-#define wr_reg64(reg, data) __raw_writeq(reg, data)
+#define wr_reg64(reg, data) __raw_writeq(data, reg)
#define rd_reg64(reg) __raw_readq(reg)
#endif
#endif
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 2636f044789d..20dc848481e7 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/ccp.h>
@@ -24,28 +25,33 @@ MODULE_LICENSE("GPL");
MODULE_VERSION("1.0.0");
MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
+static unsigned int aes_disable;
+module_param(aes_disable, uint, 0444);
+MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
+
+static unsigned int sha_disable;
+module_param(sha_disable, uint, 0444);
+MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
+
/* List heads for the supported algorithms */
static LIST_HEAD(hash_algs);
static LIST_HEAD(cipher_algs);
-/* For any tfm, requests for that tfm on the same CPU must be returned
- * in the order received. With multiple queues available, the CCP can
- * process more than one cmd at a time. Therefore we must maintain
- * a cmd list to insure the proper ordering of requests on a given tfm/cpu
- * combination.
+/* For any tfm, requests for that tfm must be returned on the order
+ * received. With multiple queues available, the CCP can process more
+ * than one cmd at a time. Therefore we must maintain a cmd list to insure
+ * the proper ordering of requests on a given tfm.
*/
-struct ccp_crypto_cpu_queue {
+struct ccp_crypto_queue {
struct list_head cmds;
struct list_head *backlog;
unsigned int cmd_count;
};
-#define CCP_CRYPTO_MAX_QLEN 50
+#define CCP_CRYPTO_MAX_QLEN 100
-struct ccp_crypto_percpu_queue {
- struct ccp_crypto_cpu_queue __percpu *cpu_queue;
-};
-static struct ccp_crypto_percpu_queue req_queue;
+static struct ccp_crypto_queue req_queue;
+static spinlock_t req_queue_lock;
struct ccp_crypto_cmd {
struct list_head entry;
@@ -62,8 +68,6 @@ struct ccp_crypto_cmd {
/* Used for held command processing to determine state */
int ret;
-
- int cpu;
};
struct ccp_crypto_cpu {
@@ -82,25 +86,21 @@ static inline bool ccp_crypto_success(int err)
return true;
}
-/*
- * ccp_crypto_cmd_complete must be called while running on the appropriate
- * cpu and the caller must have done a get_cpu to disable preemption
- */
static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
{
- struct ccp_crypto_cpu_queue *cpu_queue;
struct ccp_crypto_cmd *held = NULL, *tmp;
+ unsigned long flags;
*backlog = NULL;
- cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
+ spin_lock_irqsave(&req_queue_lock, flags);
/* Held cmds will be after the current cmd in the queue so start
* searching for a cmd with a matching tfm for submission.
*/
tmp = crypto_cmd;
- list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) {
+ list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
if (crypto_cmd->tfm != tmp->tfm)
continue;
held = tmp;
@@ -111,47 +111,45 @@ static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
* Because cmds can be executed from any point in the cmd list
* special precautions have to be taken when handling the backlog.
*/
- if (cpu_queue->backlog != &cpu_queue->cmds) {
+ if (req_queue.backlog != &req_queue.cmds) {
/* Skip over this cmd if it is the next backlog cmd */
- if (cpu_queue->backlog == &crypto_cmd->entry)
- cpu_queue->backlog = crypto_cmd->entry.next;
+ if (req_queue.backlog == &crypto_cmd->entry)
+ req_queue.backlog = crypto_cmd->entry.next;
- *backlog = container_of(cpu_queue->backlog,
+ *backlog = container_of(req_queue.backlog,
struct ccp_crypto_cmd, entry);
- cpu_queue->backlog = cpu_queue->backlog->next;
+ req_queue.backlog = req_queue.backlog->next;
/* Skip over this cmd if it is now the next backlog cmd */
- if (cpu_queue->backlog == &crypto_cmd->entry)
- cpu_queue->backlog = crypto_cmd->entry.next;
+ if (req_queue.backlog == &crypto_cmd->entry)
+ req_queue.backlog = crypto_cmd->entry.next;
}
/* Remove the cmd entry from the list of cmds */
- cpu_queue->cmd_count--;
+ req_queue.cmd_count--;
list_del(&crypto_cmd->entry);
+ spin_unlock_irqrestore(&req_queue_lock, flags);
+
return held;
}
-static void ccp_crypto_complete_on_cpu(struct work_struct *work)
+static void ccp_crypto_complete(void *data, int err)
{
- struct ccp_crypto_cpu *cpu_work =
- container_of(work, struct ccp_crypto_cpu, work);
- struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd;
+ struct ccp_crypto_cmd *crypto_cmd = data;
struct ccp_crypto_cmd *held, *next, *backlog;
struct crypto_async_request *req = crypto_cmd->req;
struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
- int cpu, ret;
-
- cpu = get_cpu();
+ int ret;
- if (cpu_work->err == -EINPROGRESS) {
+ if (err == -EINPROGRESS) {
/* Only propogate the -EINPROGRESS if necessary */
if (crypto_cmd->ret == -EBUSY) {
crypto_cmd->ret = -EINPROGRESS;
req->complete(req, -EINPROGRESS);
}
- goto e_cpu;
+ return;
}
/* Operation has completed - update the queue before invoking
@@ -169,18 +167,25 @@ static void ccp_crypto_complete_on_cpu(struct work_struct *work)
req->complete(req, -EINPROGRESS);
/* Completion callbacks */
- ret = cpu_work->err;
+ ret = err;
if (ctx->complete)
ret = ctx->complete(req, ret);
req->complete(req, ret);
/* Submit the next cmd */
while (held) {
+ /* Since we have already queued the cmd, we must indicate that
+ * we can backlog so as not to "lose" this request.
+ */
+ held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
ret = ccp_enqueue_cmd(held->cmd);
if (ccp_crypto_success(ret))
break;
/* Error occurred, report it and get the next entry */
+ ctx = crypto_tfm_ctx(held->req->tfm);
+ if (ctx->complete)
+ ret = ctx->complete(held->req, ret);
held->req->complete(held->req, ret);
next = ccp_crypto_cmd_complete(held, &backlog);
@@ -194,52 +199,29 @@ static void ccp_crypto_complete_on_cpu(struct work_struct *work)
}
kfree(crypto_cmd);
-
-e_cpu:
- put_cpu();
-
- complete(&cpu_work->completion);
-}
-
-static void ccp_crypto_complete(void *data, int err)
-{
- struct ccp_crypto_cmd *crypto_cmd = data;
- struct ccp_crypto_cpu cpu_work;
-
- INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu);
- init_completion(&cpu_work.completion);
- cpu_work.crypto_cmd = crypto_cmd;
- cpu_work.err = err;
-
- schedule_work_on(crypto_cmd->cpu, &cpu_work.work);
-
- /* Keep the completion call synchronous */
- wait_for_completion(&cpu_work.completion);
}
static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
{
- struct ccp_crypto_cpu_queue *cpu_queue;
struct ccp_crypto_cmd *active = NULL, *tmp;
- int cpu, ret;
-
- cpu = get_cpu();
- crypto_cmd->cpu = cpu;
+ unsigned long flags;
+ bool free_cmd = true;
+ int ret;
- cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
+ spin_lock_irqsave(&req_queue_lock, flags);
/* Check if the cmd can/should be queued */
- if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
+ if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
ret = -EBUSY;
if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
- goto e_cpu;
+ goto e_lock;
}
/* Look for an entry with the same tfm. If there is a cmd
- * with the same tfm in the list for this cpu then the current
- * cmd cannot be submitted to the CCP yet.
+ * with the same tfm in the list then the current cmd cannot
+ * be submitted to the CCP yet.
*/
- list_for_each_entry(tmp, &cpu_queue->cmds, entry) {
+ list_for_each_entry(tmp, &req_queue.cmds, entry) {
if (crypto_cmd->tfm != tmp->tfm)
continue;
active = tmp;
@@ -250,21 +232,29 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
if (!active) {
ret = ccp_enqueue_cmd(crypto_cmd->cmd);
if (!ccp_crypto_success(ret))
- goto e_cpu;
+ goto e_lock; /* Error, don't queue it */
+ if ((ret == -EBUSY) &&
+ !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
+ goto e_lock; /* Not backlogging, don't queue it */
}
- if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
+ if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
ret = -EBUSY;
- if (cpu_queue->backlog == &cpu_queue->cmds)
- cpu_queue->backlog = &crypto_cmd->entry;
+ if (req_queue.backlog == &req_queue.cmds)
+ req_queue.backlog = &crypto_cmd->entry;
}
crypto_cmd->ret = ret;
- cpu_queue->cmd_count++;
- list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds);
+ req_queue.cmd_count++;
+ list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
+
+ free_cmd = false;
-e_cpu:
- put_cpu();
+e_lock:
+ spin_unlock_irqrestore(&req_queue_lock, flags);
+
+ if (free_cmd)
+ kfree(crypto_cmd);
return ret;
}
@@ -281,7 +271,6 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req,
{
struct ccp_crypto_cmd *crypto_cmd;
gfp_t gfp;
- int ret;
gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
@@ -306,11 +295,7 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req,
else
cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
- ret = ccp_crypto_enqueue_cmd(crypto_cmd);
- if (!ccp_crypto_success(ret))
- kfree(crypto_cmd);
-
- return ret;
+ return ccp_crypto_enqueue_cmd(crypto_cmd);
}
struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
@@ -337,21 +322,25 @@ static int ccp_register_algs(void)
{
int ret;
- ret = ccp_register_aes_algs(&cipher_algs);
- if (ret)
- return ret;
+ if (!aes_disable) {
+ ret = ccp_register_aes_algs(&cipher_algs);
+ if (ret)
+ return ret;
- ret = ccp_register_aes_cmac_algs(&hash_algs);
- if (ret)
- return ret;
+ ret = ccp_register_aes_cmac_algs(&hash_algs);
+ if (ret)
+ return ret;
- ret = ccp_register_aes_xts_algs(&cipher_algs);
- if (ret)
- return ret;
+ ret = ccp_register_aes_xts_algs(&cipher_algs);
+ if (ret)
+ return ret;
+ }
- ret = ccp_register_sha_algs(&hash_algs);
- if (ret)
- return ret;
+ if (!sha_disable) {
+ ret = ccp_register_sha_algs(&hash_algs);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -374,50 +363,18 @@ static void ccp_unregister_algs(void)
}
}
-static int ccp_init_queues(void)
-{
- struct ccp_crypto_cpu_queue *cpu_queue;
- int cpu;
-
- req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue);
- if (!req_queue.cpu_queue)
- return -ENOMEM;
-
- for_each_possible_cpu(cpu) {
- cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
- INIT_LIST_HEAD(&cpu_queue->cmds);
- cpu_queue->backlog = &cpu_queue->cmds;
- cpu_queue->cmd_count = 0;
- }
-
- return 0;
-}
-
-static void ccp_fini_queue(void)
-{
- struct ccp_crypto_cpu_queue *cpu_queue;
- int cpu;
-
- for_each_possible_cpu(cpu) {
- cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
- BUG_ON(!list_empty(&cpu_queue->cmds));
- }
- free_percpu(req_queue.cpu_queue);
-}
-
static int ccp_crypto_init(void)
{
int ret;
- ret = ccp_init_queues();
- if (ret)
- return ret;
+ spin_lock_init(&req_queue_lock);
+ INIT_LIST_HEAD(&req_queue.cmds);
+ req_queue.backlog = &req_queue.cmds;
+ req_queue.cmd_count = 0;
ret = ccp_register_algs();
- if (ret) {
+ if (ret)
ccp_unregister_algs();
- ccp_fini_queue();
- }
return ret;
}
@@ -425,7 +382,6 @@ static int ccp_crypto_init(void)
static void ccp_crypto_exit(void)
{
ccp_unregister_algs();
- ccp_fini_queue();
}
module_init(ccp_crypto_init);
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 3867290b3531..873f23425245 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -24,75 +24,10 @@
#include "ccp-crypto.h"
-struct ccp_sha_result {
- struct completion completion;
- int err;
-};
-
-static void ccp_sync_hash_complete(struct crypto_async_request *req, int err)
-{
- struct ccp_sha_result *result = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- result->err = err;
- complete(&result->completion);
-}
-
-static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf,
- struct scatterlist *sg, unsigned int len)
-{
- struct ccp_sha_result result;
- struct ahash_request *req;
- int ret;
-
- init_completion(&result.completion);
-
- req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- ccp_sync_hash_complete, &result);
- ahash_request_set_crypt(req, sg, buf, len);
-
- ret = crypto_ahash_digest(req);
- if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
- ret = wait_for_completion_interruptible(&result.completion);
- if (!ret)
- ret = result.err;
- }
-
- ahash_request_free(req);
-
- return ret;
-}
-
-static int ccp_sha_finish_hmac(struct crypto_async_request *async_req)
-{
- struct ahash_request *req = ahash_request_cast(async_req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct scatterlist sg[2];
- unsigned int block_size =
- crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
- unsigned int digest_size = crypto_ahash_digestsize(tfm);
-
- sg_init_table(sg, ARRAY_SIZE(sg));
- sg_set_buf(&sg[0], ctx->u.sha.opad, block_size);
- sg_set_buf(&sg[1], rctx->ctx, digest_size);
-
- return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg,
- block_size + digest_size);
-}
-
static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
{
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
unsigned int digest_size = crypto_ahash_digestsize(tfm);
@@ -112,10 +47,6 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
if (req->result)
memcpy(req->result, rctx->ctx, digest_size);
- /* If we're doing an HMAC, we need to perform that on the final op */
- if (rctx->final && ctx->u.sha.key_len)
- ret = ccp_sha_finish_hmac(async_req);
-
e_free:
sg_free_table(&rctx->data_sg);
@@ -126,6 +57,7 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
unsigned int final)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct scatterlist *sg;
unsigned int block_size =
@@ -196,6 +128,11 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
rctx->cmd.u.sha.src = sg;
rctx->cmd.u.sha.src_len = rctx->hash_cnt;
+ rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
+ &ctx->u.sha.opad_sg : NULL;
+ rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ?
+ ctx->u.sha.opad_count : 0;
+ rctx->cmd.u.sha.first = rctx->first;
rctx->cmd.u.sha.final = rctx->final;
rctx->cmd.u.sha.msg_bits = rctx->msg_bits;
@@ -218,7 +155,6 @@ static int ccp_sha_init(struct ahash_request *req)
memset(rctx, 0, sizeof(*rctx));
- memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx));
rctx->type = alg->type;
rctx->first = 1;
@@ -261,10 +197,13 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct scatterlist sg;
- unsigned int block_size =
- crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
- unsigned int digest_size = crypto_ahash_digestsize(tfm);
+ struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
+ struct {
+ struct shash_desc sdesc;
+ char ctx[crypto_shash_descsize(shash)];
+ } desc;
+ unsigned int block_size = crypto_shash_blocksize(shash);
+ unsigned int digest_size = crypto_shash_digestsize(shash);
int i, ret;
/* Set to zero until complete */
@@ -277,8 +216,12 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
if (key_len > block_size) {
/* Must hash the input key */
- sg_init_one(&sg, key, key_len);
- ret = ccp_sync_hash(tfm, ctx->u.sha.key, &sg, key_len);
+ desc.sdesc.tfm = shash;
+ desc.sdesc.flags = crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ ret = crypto_shash_digest(&desc.sdesc, key, key_len,
+ ctx->u.sha.key);
if (ret) {
crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
@@ -293,6 +236,9 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c;
}
+ sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size);
+ ctx->u.sha.opad_count = block_size;
+
ctx->u.sha.key_len = key_len;
return 0;
@@ -319,10 +265,9 @@ static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm);
- struct crypto_ahash *hmac_tfm;
+ struct crypto_shash *hmac_tfm;
- hmac_tfm = crypto_alloc_ahash(alg->child_alg,
- CRYPTO_ALG_TYPE_AHASH, 0);
+ hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0);
if (IS_ERR(hmac_tfm)) {
pr_warn("could not load driver %s need for HMAC support\n",
alg->child_alg);
@@ -339,35 +284,14 @@ static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->u.sha.hmac_tfm)
- crypto_free_ahash(ctx->u.sha.hmac_tfm);
+ crypto_free_shash(ctx->u.sha.hmac_tfm);
ccp_sha_cra_exit(tfm);
}
-static const __be32 sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
- cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
- cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
- cpu_to_be32(SHA1_H4), 0, 0, 0,
-};
-
-static const __be32 sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
- cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
- cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
- cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
- cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
-};
-
-static const __be32 sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
- cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
- cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
- cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
- cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
-};
-
struct ccp_sha_def {
const char *name;
const char *drv_name;
- const __be32 *init;
enum ccp_sha_type type;
u32 digest_size;
u32 block_size;
@@ -377,7 +301,6 @@ static struct ccp_sha_def sha_algs[] = {
{
.name = "sha1",
.drv_name = "sha1-ccp",
- .init = sha1_init,
.type = CCP_SHA_TYPE_1,
.digest_size = SHA1_DIGEST_SIZE,
.block_size = SHA1_BLOCK_SIZE,
@@ -385,7 +308,6 @@ static struct ccp_sha_def sha_algs[] = {
{
.name = "sha224",
.drv_name = "sha224-ccp",
- .init = sha224_init,
.type = CCP_SHA_TYPE_224,
.digest_size = SHA224_DIGEST_SIZE,
.block_size = SHA224_BLOCK_SIZE,
@@ -393,7 +315,6 @@ static struct ccp_sha_def sha_algs[] = {
{
.name = "sha256",
.drv_name = "sha256-ccp",
- .init = sha256_init,
.type = CCP_SHA_TYPE_256,
.digest_size = SHA256_DIGEST_SIZE,
.block_size = SHA256_BLOCK_SIZE,
@@ -460,7 +381,6 @@ static int ccp_register_sha_alg(struct list_head *head,
INIT_LIST_HEAD(&ccp_alg->entry);
- ccp_alg->init = def->init;
ccp_alg->type = def->type;
alg = &ccp_alg->alg;
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index b222231b6169..9aa4ae184f7f 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -137,11 +137,14 @@ struct ccp_aes_cmac_req_ctx {
#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
struct ccp_sha_ctx {
+ struct scatterlist opad_sg;
+ unsigned int opad_count;
+
unsigned int key_len;
u8 key[MAX_SHA_BLOCK_SIZE];
u8 ipad[MAX_SHA_BLOCK_SIZE];
u8 opad[MAX_SHA_BLOCK_SIZE];
- struct crypto_ahash *hmac_tfm;
+ struct crypto_shash *hmac_tfm;
};
struct ccp_sha_req_ctx {
@@ -167,9 +170,6 @@ struct ccp_sha_req_ctx {
unsigned int buf_count;
u8 buf[MAX_SHA_BLOCK_SIZE];
- /* HMAC support field */
- struct scatterlist pad_sg;
-
/* CCP driver command */
struct ccp_cmd cmd;
};
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index c3bc21264600..2c7816149b01 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -30,6 +30,11 @@ MODULE_LICENSE("GPL");
MODULE_VERSION("1.0.0");
MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
+struct ccp_tasklet_data {
+ struct completion completion;
+ struct ccp_cmd *cmd;
+};
+
static struct ccp_device *ccp_dev;
static inline struct ccp_device *ccp_get_device(void)
@@ -192,17 +197,23 @@ static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
return cmd;
}
-static void ccp_do_cmd_complete(struct work_struct *work)
+static void ccp_do_cmd_complete(unsigned long data)
{
- struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
+ struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
+ struct ccp_cmd *cmd = tdata->cmd;
cmd->callback(cmd->data, cmd->ret);
+ complete(&tdata->completion);
}
static int ccp_cmd_queue_thread(void *data)
{
struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
struct ccp_cmd *cmd;
+ struct ccp_tasklet_data tdata;
+ struct tasklet_struct tasklet;
+
+ tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
@@ -220,8 +231,10 @@ static int ccp_cmd_queue_thread(void *data)
cmd->ret = ccp_run_cmd(cmd_q, cmd);
/* Schedule the completion callback */
- INIT_WORK(&cmd->work, ccp_do_cmd_complete);
- schedule_work(&cmd->work);
+ tdata.cmd = cmd;
+ init_completion(&tdata.completion);
+ tasklet_schedule(&tasklet);
+ wait_for_completion(&tdata.completion);
}
__set_current_state(TASK_RUNNING);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 71ed3ade7e12..9ae006d69df4 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -23,6 +23,7 @@
#include <linux/ccp.h>
#include <linux/scatterlist.h>
#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
#include "ccp-dev.h"
@@ -132,6 +133,27 @@ struct ccp_op {
} u;
};
+/* SHA initial context values */
+static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+ cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
+ cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
+ cpu_to_be32(SHA1_H4), 0, 0, 0,
+};
+
+static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+ cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
+ cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
+ cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
+ cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
+};
+
+static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+ cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
+ cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
+ cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
+ cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
+};
+
/* The CCP cannot perform zero-length sha operations so the caller
* is required to buffer data for the final operation. However, a
* sha operation for a message with a total length of zero is valid
@@ -1411,7 +1433,27 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (ret)
return ret;
- ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+ if (sha->first) {
+ const __be32 *init;
+
+ switch (sha->type) {
+ case CCP_SHA_TYPE_1:
+ init = ccp_sha1_init;
+ break;
+ case CCP_SHA_TYPE_224:
+ init = ccp_sha224_init;
+ break;
+ case CCP_SHA_TYPE_256:
+ init = ccp_sha256_init;
+ break;
+ default:
+ ret = -EINVAL;
+ goto e_ctx;
+ }
+ memcpy(ctx.address, init, CCP_SHA_CTXSIZE);
+ } else
+ ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+
ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -1451,6 +1493,66 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+ if (sha->final && sha->opad) {
+ /* HMAC operation, recursively perform final SHA */
+ struct ccp_cmd hmac_cmd;
+ struct scatterlist sg;
+ u64 block_size, digest_size;
+ u8 *hmac_buf;
+
+ switch (sha->type) {
+ case CCP_SHA_TYPE_1:
+ block_size = SHA1_BLOCK_SIZE;
+ digest_size = SHA1_DIGEST_SIZE;
+ break;
+ case CCP_SHA_TYPE_224:
+ block_size = SHA224_BLOCK_SIZE;
+ digest_size = SHA224_DIGEST_SIZE;
+ break;
+ case CCP_SHA_TYPE_256:
+ block_size = SHA256_BLOCK_SIZE;
+ digest_size = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ ret = -EINVAL;
+ goto e_data;
+ }
+
+ if (sha->opad_len != block_size) {
+ ret = -EINVAL;
+ goto e_data;
+ }
+
+ hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
+ if (!hmac_buf) {
+ ret = -ENOMEM;
+ goto e_data;
+ }
+ sg_init_one(&sg, hmac_buf, block_size + digest_size);
+
+ scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
+ memcpy(hmac_buf + block_size, ctx.address, digest_size);
+
+ memset(&hmac_cmd, 0, sizeof(hmac_cmd));
+ hmac_cmd.engine = CCP_ENGINE_SHA;
+ hmac_cmd.u.sha.type = sha->type;
+ hmac_cmd.u.sha.ctx = sha->ctx;
+ hmac_cmd.u.sha.ctx_len = sha->ctx_len;
+ hmac_cmd.u.sha.src = &sg;
+ hmac_cmd.u.sha.src_len = block_size + digest_size;
+ hmac_cmd.u.sha.opad = NULL;
+ hmac_cmd.u.sha.opad_len = 0;
+ hmac_cmd.u.sha.first = 1;
+ hmac_cmd.u.sha.final = 1;
+ hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
+
+ ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
+ if (ret)
+ cmd->engine_error = hmac_cmd.engine_error;
+
+ kfree(hmac_buf);
+ }
+
e_data:
ccp_free_data(&src, cmd_q);
@@ -1666,8 +1768,8 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
op.dst.type = CCP_MEMTYPE_SYSTEM;
op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
- op.src.u.dma.offset = dst.sg_wa.sg_used;
- op.src.u.dma.length = op.src.u.dma.length;
+ op.dst.u.dma.offset = dst.sg_wa.sg_used;
+ op.dst.u.dma.length = op.src.u.dma.length;
ret = ccp_perform_passthru(&op);
if (ret) {
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index a6db7fa6f891..7bbe0ab21eca 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -29,6 +29,8 @@
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
+#define DCP_ALIGNMENT 64
+
/* DCP DMA descriptor. */
struct dcp_dma_desc {
uint32_t next_cmd_addr;
@@ -48,7 +50,6 @@ struct dcp_coherent_block {
uint8_t sha_in_buf[DCP_BUF_SZ];
uint8_t aes_key[2 * AES_KEYSIZE_128];
- uint8_t sha_digest[SHA256_DIGEST_SIZE];
struct dcp_dma_desc desc[DCP_MAX_CHANS];
};
@@ -83,13 +84,16 @@ struct dcp_async_ctx {
unsigned int hot:1;
/* Crypto-specific context */
- unsigned int enc:1;
- unsigned int ecb:1;
struct crypto_ablkcipher *fallback;
unsigned int key_len;
uint8_t key[AES_KEYSIZE_128];
};
+struct dcp_aes_req_ctx {
+ unsigned int enc:1;
+ unsigned int ecb:1;
+};
+
struct dcp_sha_req_ctx {
unsigned int init:1;
unsigned int fini:1;
@@ -190,10 +194,12 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
/*
* Encryption (AES128)
*/
-static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init)
+static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
+ struct ablkcipher_request *req, int init)
{
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
+ struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
int ret;
dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
@@ -212,14 +218,14 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init)
/* Payload contains the key. */
desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
- if (actx->enc)
+ if (rctx->enc)
desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
if (init)
desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
- if (actx->ecb)
+ if (rctx->ecb)
desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
else
desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
@@ -247,6 +253,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
struct ablkcipher_request *req = ablkcipher_request_cast(arq);
struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
+ struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
struct scatterlist *dst = req->dst;
struct scatterlist *src = req->src;
@@ -271,7 +278,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
/* Copy the key from the temporary location. */
memcpy(key, actx->key, actx->key_len);
- if (!actx->ecb) {
+ if (!rctx->ecb) {
/* Copy the CBC IV just past the key. */
memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
/* CBC needs the INIT set. */
@@ -300,7 +307,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
* submit the buffer.
*/
if (actx->fill == out_off || sg_is_last(src)) {
- ret = mxs_dcp_run_aes(actx, init);
+ ret = mxs_dcp_run_aes(actx, req, init);
if (ret)
return ret;
init = 0;
@@ -391,13 +398,14 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
struct dcp *sdcp = global_sdcp;
struct crypto_async_request *arq = &req->base;
struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
+ struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
int ret;
if (unlikely(actx->key_len != AES_KEYSIZE_128))
return mxs_dcp_block_fallback(req, enc);
- actx->enc = enc;
- actx->ecb = ecb;
+ rctx->enc = enc;
+ rctx->ecb = ecb;
actx->chan = DCP_CHAN_CRYPTO;
mutex_lock(&sdcp->mutex[actx->chan]);
@@ -484,7 +492,7 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
return PTR_ERR(blk);
actx->fallback = blk;
- tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_async_ctx);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
return 0;
}
@@ -507,13 +515,11 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
- dma_addr_t digest_phys = dma_map_single(sdcp->dev,
- sdcp->coh->sha_digest,
- SHA256_DIGEST_SIZE,
- DMA_FROM_DEVICE);
+ dma_addr_t digest_phys = 0;
dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
@@ -534,14 +540,18 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
/* Set HASH_TERM bit for last transfer block. */
if (rctx->fini) {
+ digest_phys = dma_map_single(sdcp->dev, req->result,
+ halg->digestsize, DMA_FROM_DEVICE);
desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
desc->payload = digest_phys;
}
ret = mxs_dcp_start_dma(actx);
- dma_unmap_single(sdcp->dev, digest_phys, SHA256_DIGEST_SIZE,
- DMA_FROM_DEVICE);
+ if (rctx->fini)
+ dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
+ DMA_FROM_DEVICE);
+
dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
return ret;
@@ -558,7 +568,6 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
const int nents = sg_nents(req->src);
- uint8_t *digest = sdcp->coh->sha_digest;
uint8_t *in_buf = sdcp->coh->sha_in_buf;
uint8_t *src_buf;
@@ -605,14 +614,20 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
rctx->fini = 1;
/* Submit whatever is left. */
+ if (!req->result)
+ return -EINVAL;
+
ret = mxs_dcp_run_sha(req);
- if (ret || !req->result)
+ if (ret)
return ret;
+
actx->fill = 0;
/* For some reason, the result is flipped. */
- for (i = 0; i < halg->digestsize; i++)
- req->result[i] = digest[halg->digestsize - i - 1];
+ for (i = 0; i < halg->digestsize / 2; i++) {
+ swap(req->result[i],
+ req->result[halg->digestsize - i - 1]);
+ }
}
return 0;
@@ -901,9 +916,14 @@ static int mxs_dcp_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dcp_vmi_irq = platform_get_irq(pdev, 0);
+ if (dcp_vmi_irq < 0) {
+ ret = dcp_vmi_irq;
+ goto err_mutex;
+ }
+
dcp_irq = platform_get_irq(pdev, 1);
- if (dcp_vmi_irq < 0 || dcp_irq < 0) {
- ret = -EINVAL;
+ if (dcp_irq < 0) {
+ ret = dcp_irq;
goto err_mutex;
}
@@ -935,15 +955,20 @@ static int mxs_dcp_probe(struct platform_device *pdev)
}
/* Allocate coherent helper block. */
- sdcp->coh = kzalloc(sizeof(struct dcp_coherent_block), GFP_KERNEL);
+ sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
+ GFP_KERNEL);
if (!sdcp->coh) {
- dev_err(dev, "Error allocating coherent block\n");
ret = -ENOMEM;
goto err_mutex;
}
+ /* Re-align the structure so it fits the DCP constraints. */
+ sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
+
/* Restart the DCP block. */
- stmp_reset_block(sdcp->base);
+ ret = stmp_reset_block(sdcp->base);
+ if (ret)
+ goto err_mutex;
/* Initialize control register. */
writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
@@ -982,7 +1007,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
dev_err(dev, "Error starting SHA thread!\n");
ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
- goto err_free_coherent;
+ goto err_mutex;
}
sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
@@ -1040,8 +1065,6 @@ err_destroy_aes_thread:
err_destroy_sha_thread:
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
-err_free_coherent:
- kfree(sdcp->coh);
err_mutex:
mutex_unlock(&global_mutex);
return ret;
@@ -1051,8 +1074,6 @@ static int mxs_dcp_remove(struct platform_device *pdev)
{
struct dcp *sdcp = platform_get_drvdata(pdev);
- kfree(sdcp->coh);
-
if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
crypto_unregister_ahash(&dcp_sha256_alg);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index dde41f1df608..cb98fa54573d 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1307,9 +1307,7 @@ static int omap_aes_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops omap_aes_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume)
-};
+static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
static struct platform_driver omap_aes_driver = {
.probe = omap_aes_probe,
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
new file mode 100644
index 000000000000..ec5f13162b73
--- /dev/null
+++ b/drivers/crypto/omap-des.c
@@ -0,0 +1,1216 @@
+/*
+ * Support for OMAP DES and Triple DES HW acceleration.
+ *
+ * Copyright (c) 2013 Texas Instruments Incorporated
+ * Author: Joel Fernandes <joelf@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#ifdef DEBUG
+#define prn(num) printk(#num "=%d\n", num)
+#define prx(num) printk(#num "=%x\n", num)
+#else
+#define prn(num) do { } while (0)
+#define prx(num) do { } while (0)
+#endif
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/des.h>
+
+#define DST_MAXBURST 2
+
+#define DES_BLOCK_WORDS (DES_BLOCK_SIZE >> 2)
+
+#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
+
+#define DES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
+ ((x ^ 0x01) * 0x04))
+
+#define DES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
+
+#define DES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
+#define DES_REG_CTRL_CBC BIT(4)
+#define DES_REG_CTRL_TDES BIT(3)
+#define DES_REG_CTRL_DIRECTION BIT(2)
+#define DES_REG_CTRL_INPUT_READY BIT(1)
+#define DES_REG_CTRL_OUTPUT_READY BIT(0)
+
+#define DES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
+
+#define DES_REG_REV(dd) ((dd)->pdata->rev_ofs)
+
+#define DES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
+
+#define DES_REG_LENGTH_N(x) (0x24 + ((x) * 0x04))
+
+#define DES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
+#define DES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
+#define DES_REG_IRQ_DATA_IN BIT(1)
+#define DES_REG_IRQ_DATA_OUT BIT(2)
+
+#define FLAGS_MODE_MASK 0x000f
+#define FLAGS_ENCRYPT BIT(0)
+#define FLAGS_CBC BIT(1)
+#define FLAGS_INIT BIT(4)
+#define FLAGS_BUSY BIT(6)
+
+struct omap_des_ctx {
+ struct omap_des_dev *dd;
+
+ int keylen;
+ u32 key[(3 * DES_KEY_SIZE) / sizeof(u32)];
+ unsigned long flags;
+};
+
+struct omap_des_reqctx {
+ unsigned long mode;
+};
+
+#define OMAP_DES_QUEUE_LENGTH 1
+#define OMAP_DES_CACHE_SIZE 0
+
+struct omap_des_algs_info {
+ struct crypto_alg *algs_list;
+ unsigned int size;
+ unsigned int registered;
+};
+
+struct omap_des_pdata {
+ struct omap_des_algs_info *algs_info;
+ unsigned int algs_info_size;
+
+ void (*trigger)(struct omap_des_dev *dd, int length);
+
+ u32 key_ofs;
+ u32 iv_ofs;
+ u32 ctrl_ofs;
+ u32 data_ofs;
+ u32 rev_ofs;
+ u32 mask_ofs;
+ u32 irq_enable_ofs;
+ u32 irq_status_ofs;
+
+ u32 dma_enable_in;
+ u32 dma_enable_out;
+ u32 dma_start;
+
+ u32 major_mask;
+ u32 major_shift;
+ u32 minor_mask;
+ u32 minor_shift;
+};
+
+struct omap_des_dev {
+ struct list_head list;
+ unsigned long phys_base;
+ void __iomem *io_base;
+ struct omap_des_ctx *ctx;
+ struct device *dev;
+ unsigned long flags;
+ int err;
+
+ /* spinlock used for queues */
+ spinlock_t lock;
+ struct crypto_queue queue;
+
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
+
+ struct ablkcipher_request *req;
+ /*
+ * total is used by PIO mode for book keeping so introduce
+ * variable total_save as need it to calc page_order
+ */
+ size_t total;
+ size_t total_save;
+
+ struct scatterlist *in_sg;
+ struct scatterlist *out_sg;
+
+ /* Buffers for copying for unaligned cases */
+ struct scatterlist in_sgl;
+ struct scatterlist out_sgl;
+ struct scatterlist *orig_out;
+ int sgs_copied;
+
+ struct scatter_walk in_walk;
+ struct scatter_walk out_walk;
+ int dma_in;
+ struct dma_chan *dma_lch_in;
+ int dma_out;
+ struct dma_chan *dma_lch_out;
+ int in_sg_len;
+ int out_sg_len;
+ int pio_only;
+ const struct omap_des_pdata *pdata;
+};
+
+/* keep registered devices data here */
+static LIST_HEAD(dev_list);
+static DEFINE_SPINLOCK(list_lock);
+
+#ifdef DEBUG
+#define omap_des_read(dd, offset) \
+ ({ \
+ int _read_ret; \
+ _read_ret = __raw_readl(dd->io_base + offset); \
+ pr_err("omap_des_read(" #offset "=%#x)= %#x\n", \
+ offset, _read_ret); \
+ _read_ret; \
+ })
+#else
+static inline u32 omap_des_read(struct omap_des_dev *dd, u32 offset)
+{
+ return __raw_readl(dd->io_base + offset);
+}
+#endif
+
+#ifdef DEBUG
+#define omap_des_write(dd, offset, value) \
+ do { \
+ pr_err("omap_des_write(" #offset "=%#x) value=%#x\n", \
+ offset, value); \
+ __raw_writel(value, dd->io_base + offset); \
+ } while (0)
+#else
+static inline void omap_des_write(struct omap_des_dev *dd, u32 offset,
+ u32 value)
+{
+ __raw_writel(value, dd->io_base + offset);
+}
+#endif
+
+static inline void omap_des_write_mask(struct omap_des_dev *dd, u32 offset,
+ u32 value, u32 mask)
+{
+ u32 val;
+
+ val = omap_des_read(dd, offset);
+ val &= ~mask;
+ val |= value;
+ omap_des_write(dd, offset, val);
+}
+
+static void omap_des_write_n(struct omap_des_dev *dd, u32 offset,
+ u32 *value, int count)
+{
+ for (; count--; value++, offset += 4)
+ omap_des_write(dd, offset, *value);
+}
+
+static int omap_des_hw_init(struct omap_des_dev *dd)
+{
+ /*
+ * clocks are enabled when request starts and disabled when finished.
+ * It may be long delays between requests.
+ * Device might go to off mode to save power.
+ */
+ pm_runtime_get_sync(dd->dev);
+
+ if (!(dd->flags & FLAGS_INIT)) {
+ dd->flags |= FLAGS_INIT;
+ dd->err = 0;
+ }
+
+ return 0;
+}
+
+static int omap_des_write_ctrl(struct omap_des_dev *dd)
+{
+ unsigned int key32;
+ int i, err;
+ u32 val = 0, mask = 0;
+
+ err = omap_des_hw_init(dd);
+ if (err)
+ return err;
+
+ key32 = dd->ctx->keylen / sizeof(u32);
+
+ /* it seems a key should always be set even if it has not changed */
+ for (i = 0; i < key32; i++) {
+ omap_des_write(dd, DES_REG_KEY(dd, i),
+ __le32_to_cpu(dd->ctx->key[i]));
+ }
+
+ if ((dd->flags & FLAGS_CBC) && dd->req->info)
+ omap_des_write_n(dd, DES_REG_IV(dd, 0), dd->req->info, 2);
+
+ if (dd->flags & FLAGS_CBC)
+ val |= DES_REG_CTRL_CBC;
+ if (dd->flags & FLAGS_ENCRYPT)
+ val |= DES_REG_CTRL_DIRECTION;
+ if (key32 == 6)
+ val |= DES_REG_CTRL_TDES;
+
+ mask |= DES_REG_CTRL_CBC | DES_REG_CTRL_DIRECTION | DES_REG_CTRL_TDES;
+
+ omap_des_write_mask(dd, DES_REG_CTRL(dd), val, mask);
+
+ return 0;
+}
+
+static void omap_des_dma_trigger_omap4(struct omap_des_dev *dd, int length)
+{
+ u32 mask, val;
+
+ omap_des_write(dd, DES_REG_LENGTH_N(0), length);
+
+ val = dd->pdata->dma_start;
+
+ if (dd->dma_lch_out != NULL)
+ val |= dd->pdata->dma_enable_out;
+ if (dd->dma_lch_in != NULL)
+ val |= dd->pdata->dma_enable_in;
+
+ mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
+ dd->pdata->dma_start;
+
+ omap_des_write_mask(dd, DES_REG_MASK(dd), val, mask);
+}
+
+static void omap_des_dma_stop(struct omap_des_dev *dd)
+{
+ u32 mask;
+
+ mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
+ dd->pdata->dma_start;
+
+ omap_des_write_mask(dd, DES_REG_MASK(dd), 0, mask);
+}
+
+static struct omap_des_dev *omap_des_find_dev(struct omap_des_ctx *ctx)
+{
+ struct omap_des_dev *dd = NULL, *tmp;
+
+ spin_lock_bh(&list_lock);
+ if (!ctx->dd) {
+ list_for_each_entry(tmp, &dev_list, list) {
+ /* FIXME: take fist available des core */
+ dd = tmp;
+ break;
+ }
+ ctx->dd = dd;
+ } else {
+ /* already found before */
+ dd = ctx->dd;
+ }
+ spin_unlock_bh(&list_lock);
+
+ return dd;
+}
+
+static void omap_des_dma_out_callback(void *data)
+{
+ struct omap_des_dev *dd = data;
+
+ /* dma_lch_out - completed */
+ tasklet_schedule(&dd->done_task);
+}
+
+static int omap_des_dma_init(struct omap_des_dev *dd)
+{
+ int err = -ENOMEM;
+ dma_cap_mask_t mask;
+
+ dd->dma_lch_out = NULL;
+ dd->dma_lch_in = NULL;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ dd->dma_lch_in = dma_request_slave_channel_compat(mask,
+ omap_dma_filter_fn,
+ &dd->dma_in,
+ dd->dev, "rx");
+ if (!dd->dma_lch_in) {
+ dev_err(dd->dev, "Unable to request in DMA channel\n");
+ goto err_dma_in;
+ }
+
+ dd->dma_lch_out = dma_request_slave_channel_compat(mask,
+ omap_dma_filter_fn,
+ &dd->dma_out,
+ dd->dev, "tx");
+ if (!dd->dma_lch_out) {
+ dev_err(dd->dev, "Unable to request out DMA channel\n");
+ goto err_dma_out;
+ }
+
+ return 0;
+
+err_dma_out:
+ dma_release_channel(dd->dma_lch_in);
+err_dma_in:
+ if (err)
+ pr_err("error: %d\n", err);
+ return err;
+}
+
+static void omap_des_dma_cleanup(struct omap_des_dev *dd)
+{
+ dma_release_channel(dd->dma_lch_out);
+ dma_release_channel(dd->dma_lch_in);
+}
+
+static void sg_copy_buf(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_advance(&walk, start);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
+static int omap_des_crypt_dma(struct crypto_tfm *tfm,
+ struct scatterlist *in_sg, struct scatterlist *out_sg,
+ int in_sg_len, int out_sg_len)
+{
+ struct omap_des_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct omap_des_dev *dd = ctx->dd;
+ struct dma_async_tx_descriptor *tx_in, *tx_out;
+ struct dma_slave_config cfg;
+ int ret;
+
+ if (dd->pio_only) {
+ scatterwalk_start(&dd->in_walk, dd->in_sg);
+ scatterwalk_start(&dd->out_walk, dd->out_sg);
+
+ /* Enable DATAIN interrupt and let it take
+ care of the rest */
+ omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2);
+ return 0;
+ }
+
+ dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
+
+ memset(&cfg, 0, sizeof(cfg));
+
+ cfg.src_addr = dd->phys_base + DES_REG_DATA_N(dd, 0);
+ cfg.dst_addr = dd->phys_base + DES_REG_DATA_N(dd, 0);
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = DST_MAXBURST;
+ cfg.dst_maxburst = DST_MAXBURST;
+
+ /* IN */
+ ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx_in) {
+ dev_err(dd->dev, "IN prep_slave_sg() failed\n");
+ return -EINVAL;
+ }
+
+ /* No callback necessary */
+ tx_in->callback_param = dd;
+
+ /* OUT */
+ ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx_out) {
+ dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
+ return -EINVAL;
+ }
+
+ tx_out->callback = omap_des_dma_out_callback;
+ tx_out->callback_param = dd;
+
+ dmaengine_submit(tx_in);
+ dmaengine_submit(tx_out);
+
+ dma_async_issue_pending(dd->dma_lch_in);
+ dma_async_issue_pending(dd->dma_lch_out);
+
+ /* start DMA */
+ dd->pdata->trigger(dd, dd->total);
+
+ return 0;
+}
+
+static int omap_des_crypt_dma_start(struct omap_des_dev *dd)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
+ crypto_ablkcipher_reqtfm(dd->req));
+ int err;
+
+ pr_debug("total: %d\n", dd->total);
+
+ if (!dd->pio_only) {
+ err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
+ DMA_TO_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
+
+ err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
+ }
+
+ err = omap_des_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
+ dd->out_sg_len);
+ if (err && !dd->pio_only) {
+ dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
+ }
+
+ return err;
+}
+
+static void omap_des_finish_req(struct omap_des_dev *dd, int err)
+{
+ struct ablkcipher_request *req = dd->req;
+
+ pr_debug("err: %d\n", err);
+
+ pm_runtime_put(dd->dev);
+ dd->flags &= ~FLAGS_BUSY;
+
+ req->base.complete(&req->base, err);
+}
+
+static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
+{
+ int err = 0;
+
+ pr_debug("total: %d\n", dd->total);
+
+ omap_des_dma_stop(dd);
+
+ dmaengine_terminate_all(dd->dma_lch_in);
+ dmaengine_terminate_all(dd->dma_lch_out);
+
+ dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
+
+ return err;
+}
+
+static int omap_des_copy_needed(struct scatterlist *sg)
+{
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, 4))
+ return -1;
+ if (!IS_ALIGNED(sg->length, DES_BLOCK_SIZE))
+ return -1;
+ sg = sg_next(sg);
+ }
+ return 0;
+}
+
+static int omap_des_copy_sgs(struct omap_des_dev *dd)
+{
+ void *buf_in, *buf_out;
+ int pages;
+
+ pages = dd->total >> PAGE_SHIFT;
+
+ if (dd->total & (PAGE_SIZE-1))
+ pages++;
+
+ BUG_ON(!pages);
+
+ buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+ buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ if (!buf_in || !buf_out) {
+ pr_err("Couldn't allocated pages for unaligned cases.\n");
+ return -1;
+ }
+
+ dd->orig_out = dd->out_sg;
+
+ sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
+
+ sg_init_table(&dd->in_sgl, 1);
+ sg_set_buf(&dd->in_sgl, buf_in, dd->total);
+ dd->in_sg = &dd->in_sgl;
+
+ sg_init_table(&dd->out_sgl, 1);
+ sg_set_buf(&dd->out_sgl, buf_out, dd->total);
+ dd->out_sg = &dd->out_sgl;
+
+ return 0;
+}
+
+static int omap_des_handle_queue(struct omap_des_dev *dd,
+ struct ablkcipher_request *req)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct omap_des_ctx *ctx;
+ struct omap_des_reqctx *rctx;
+ unsigned long flags;
+ int err, ret = 0;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (req)
+ ret = ablkcipher_enqueue_request(&dd->queue, req);
+ if (dd->flags & FLAGS_BUSY) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return ret;
+ }
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (async_req)
+ dd->flags |= FLAGS_BUSY;
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req)
+ return ret;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ablkcipher_request_cast(async_req);
+
+ /* assign new request to device */
+ dd->req = req;
+ dd->total = req->nbytes;
+ dd->total_save = req->nbytes;
+ dd->in_sg = req->src;
+ dd->out_sg = req->dst;
+
+ if (omap_des_copy_needed(dd->in_sg) ||
+ omap_des_copy_needed(dd->out_sg)) {
+ if (omap_des_copy_sgs(dd))
+ pr_err("Failed to copy SGs for unaligned cases\n");
+ dd->sgs_copied = 1;
+ } else {
+ dd->sgs_copied = 0;
+ }
+
+ dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
+ dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
+ BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
+
+ rctx = ablkcipher_request_ctx(req);
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx->mode &= FLAGS_MODE_MASK;
+ dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+ dd->ctx = ctx;
+ ctx->dd = dd;
+
+ err = omap_des_write_ctrl(dd);
+ if (!err)
+ err = omap_des_crypt_dma_start(dd);
+ if (err) {
+ /* des_task will not finish it, so do it here */
+ omap_des_finish_req(dd, err);
+ tasklet_schedule(&dd->queue_task);
+ }
+
+ return ret; /* return ret, which is enqueue return value */
+}
+
+static void omap_des_done_task(unsigned long data)
+{
+ struct omap_des_dev *dd = (struct omap_des_dev *)data;
+ void *buf_in, *buf_out;
+ int pages;
+
+ pr_debug("enter done_task\n");
+
+ if (!dd->pio_only) {
+ dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
+ omap_des_crypt_dma_stop(dd);
+ }
+
+ if (dd->sgs_copied) {
+ buf_in = sg_virt(&dd->in_sgl);
+ buf_out = sg_virt(&dd->out_sgl);
+
+ sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
+
+ pages = get_order(dd->total_save);
+ free_pages((unsigned long)buf_in, pages);
+ free_pages((unsigned long)buf_out, pages);
+ }
+
+ omap_des_finish_req(dd, 0);
+ omap_des_handle_queue(dd, NULL);
+
+ pr_debug("exit\n");
+}
+
+static void omap_des_queue_task(unsigned long data)
+{
+ struct omap_des_dev *dd = (struct omap_des_dev *)data;
+
+ omap_des_handle_queue(dd, NULL);
+}
+
+static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct omap_des_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct omap_des_dev *dd;
+
+ pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+ !!(mode & FLAGS_ENCRYPT),
+ !!(mode & FLAGS_CBC));
+
+ if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of DES blocks\n");
+ return -EINVAL;
+ }
+
+ dd = omap_des_find_dev(ctx);
+ if (!dd)
+ return -ENODEV;
+
+ rctx->mode = mode;
+
+ return omap_des_handle_queue(dd, req);
+}
+
+/* ********************** ALG API ************************************ */
+
+static int omap_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ if (keylen != DES_KEY_SIZE && keylen != (3*DES_KEY_SIZE))
+ return -EINVAL;
+
+ pr_debug("enter, keylen: %d\n", keylen);
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int omap_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return omap_des_crypt(req, FLAGS_ENCRYPT);
+}
+
+static int omap_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return omap_des_crypt(req, 0);
+}
+
+static int omap_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return omap_des_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
+}
+
+static int omap_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return omap_des_crypt(req, FLAGS_CBC);
+}
+
+static int omap_des_cra_init(struct crypto_tfm *tfm)
+{
+ pr_debug("enter\n");
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct omap_des_reqctx);
+
+ return 0;
+}
+
+static void omap_des_cra_exit(struct crypto_tfm *tfm)
+{
+ pr_debug("enter\n");
+}
+
+/* ********************** ALGS ************************************ */
+
+static struct crypto_alg algs_ecb_cbc[] = {
+{
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-omap",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_des_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_des_cra_init,
+ .cra_exit = omap_des_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_ecb_encrypt,
+ .decrypt = omap_des_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-omap",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_des_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_des_cra_init,
+ .cra_exit = omap_des_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_cbc_encrypt,
+ .decrypt = omap_des_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-omap",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_des_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_des_cra_init,
+ .cra_exit = omap_des_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 3*DES_KEY_SIZE,
+ .max_keysize = 3*DES_KEY_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_ecb_encrypt,
+ .decrypt = omap_des_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-omap",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_des_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_des_cra_init,
+ .cra_exit = omap_des_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 3*DES_KEY_SIZE,
+ .max_keysize = 3*DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_cbc_encrypt,
+ .decrypt = omap_des_cbc_decrypt,
+ }
+}
+};
+
+static struct omap_des_algs_info omap_des_algs_info_ecb_cbc[] = {
+ {
+ .algs_list = algs_ecb_cbc,
+ .size = ARRAY_SIZE(algs_ecb_cbc),
+ },
+};
+
+#ifdef CONFIG_OF
+static const struct omap_des_pdata omap_des_pdata_omap4 = {
+ .algs_info = omap_des_algs_info_ecb_cbc,
+ .algs_info_size = ARRAY_SIZE(omap_des_algs_info_ecb_cbc),
+ .trigger = omap_des_dma_trigger_omap4,
+ .key_ofs = 0x14,
+ .iv_ofs = 0x18,
+ .ctrl_ofs = 0x20,
+ .data_ofs = 0x28,
+ .rev_ofs = 0x30,
+ .mask_ofs = 0x34,
+ .irq_status_ofs = 0x3c,
+ .irq_enable_ofs = 0x40,
+ .dma_enable_in = BIT(5),
+ .dma_enable_out = BIT(6),
+ .major_mask = 0x0700,
+ .major_shift = 8,
+ .minor_mask = 0x003f,
+ .minor_shift = 0,
+};
+
+static irqreturn_t omap_des_irq(int irq, void *dev_id)
+{
+ struct omap_des_dev *dd = dev_id;
+ u32 status, i;
+ u32 *src, *dst;
+
+ status = omap_des_read(dd, DES_REG_IRQ_STATUS(dd));
+ if (status & DES_REG_IRQ_DATA_IN) {
+ omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x0);
+
+ BUG_ON(!dd->in_sg);
+
+ BUG_ON(_calc_walked(in) > dd->in_sg->length);
+
+ src = sg_virt(dd->in_sg) + _calc_walked(in);
+
+ for (i = 0; i < DES_BLOCK_WORDS; i++) {
+ omap_des_write(dd, DES_REG_DATA_N(dd, i), *src);
+
+ scatterwalk_advance(&dd->in_walk, 4);
+ if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg = scatterwalk_sg_next(dd->in_sg);
+ if (dd->in_sg) {
+ scatterwalk_start(&dd->in_walk,
+ dd->in_sg);
+ src = sg_virt(dd->in_sg) +
+ _calc_walked(in);
+ }
+ } else {
+ src++;
+ }
+ }
+
+ /* Clear IRQ status */
+ status &= ~DES_REG_IRQ_DATA_IN;
+ omap_des_write(dd, DES_REG_IRQ_STATUS(dd), status);
+
+ /* Enable DATA_OUT interrupt */
+ omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x4);
+
+ } else if (status & DES_REG_IRQ_DATA_OUT) {
+ omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x0);
+
+ BUG_ON(!dd->out_sg);
+
+ BUG_ON(_calc_walked(out) > dd->out_sg->length);
+
+ dst = sg_virt(dd->out_sg) + _calc_walked(out);
+
+ for (i = 0; i < DES_BLOCK_WORDS; i++) {
+ *dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
+ scatterwalk_advance(&dd->out_walk, 4);
+ if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg = scatterwalk_sg_next(dd->out_sg);
+ if (dd->out_sg) {
+ scatterwalk_start(&dd->out_walk,
+ dd->out_sg);
+ dst = sg_virt(dd->out_sg) +
+ _calc_walked(out);
+ }
+ } else {
+ dst++;
+ }
+ }
+
+ dd->total -= DES_BLOCK_SIZE;
+
+ BUG_ON(dd->total < 0);
+
+ /* Clear IRQ status */
+ status &= ~DES_REG_IRQ_DATA_OUT;
+ omap_des_write(dd, DES_REG_IRQ_STATUS(dd), status);
+
+ if (!dd->total)
+ /* All bytes read! */
+ tasklet_schedule(&dd->done_task);
+ else
+ /* Enable DATA_IN interrupt for next block */
+ omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id omap_des_of_match[] = {
+ {
+ .compatible = "ti,omap4-des",
+ .data = &omap_des_pdata_omap4,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_des_of_match);
+
+static int omap_des_get_of(struct omap_des_dev *dd,
+ struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(omap_des_of_match), &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "no compatible OF match\n");
+ return -EINVAL;
+ }
+
+ dd->dma_out = -1; /* Dummy value that's unused */
+ dd->dma_in = -1; /* Dummy value that's unused */
+ dd->pdata = match->data;
+
+ return 0;
+}
+#else
+static int omap_des_get_of(struct omap_des_dev *dd,
+ struct device *dev)
+{
+ return -EINVAL;
+}
+#endif
+
+static int omap_des_get_pdev(struct omap_des_dev *dd,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ int err = 0;
+
+ /* Get the DMA out channel */
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r) {
+ dev_err(dev, "no DMA out resource info\n");
+ err = -ENODEV;
+ goto err;
+ }
+ dd->dma_out = r->start;
+
+ /* Get the DMA in channel */
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!r) {
+ dev_err(dev, "no DMA in resource info\n");
+ err = -ENODEV;
+ goto err;
+ }
+ dd->dma_in = r->start;
+
+ /* non-DT devices get pdata from pdev */
+ dd->pdata = pdev->dev.platform_data;
+
+err:
+ return err;
+}
+
+static int omap_des_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct omap_des_dev *dd;
+ struct crypto_alg *algp;
+ struct resource *res;
+ int err = -ENOMEM, i, j, irq = -1;
+ u32 reg;
+
+ dd = devm_kzalloc(dev, sizeof(struct omap_des_dev), GFP_KERNEL);
+ if (dd == NULL) {
+ dev_err(dev, "unable to alloc data struct.\n");
+ goto err_data;
+ }
+ dd->dev = dev;
+ platform_set_drvdata(pdev, dd);
+
+ spin_lock_init(&dd->lock);
+ crypto_init_queue(&dd->queue, OMAP_DES_QUEUE_LENGTH);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no MEM resource info\n");
+ goto err_res;
+ }
+
+ err = (dev->of_node) ? omap_des_get_of(dd, pdev) :
+ omap_des_get_pdev(dd, pdev);
+ if (err)
+ goto err_res;
+
+ dd->io_base = devm_request_and_ioremap(dev, res);
+ if (!dd->io_base) {
+ dev_err(dev, "can't ioremap\n");
+ err = -ENOMEM;
+ goto err_res;
+ }
+ dd->phys_base = res->start;
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ omap_des_dma_stop(dd);
+
+ reg = omap_des_read(dd, DES_REG_REV(dd));
+
+ pm_runtime_put_sync(dev);
+
+ dev_info(dev, "OMAP DES hw accel rev: %u.%u\n",
+ (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
+ (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
+
+ tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd);
+ tasklet_init(&dd->queue_task, omap_des_queue_task, (unsigned long)dd);
+
+ err = omap_des_dma_init(dd);
+ if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) {
+ dd->pio_only = 1;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "can't get IRQ resource\n");
+ goto err_irq;
+ }
+
+ err = devm_request_irq(dev, irq, omap_des_irq, 0,
+ dev_name(dev), dd);
+ if (err) {
+ dev_err(dev, "Unable to grab omap-des IRQ\n");
+ goto err_irq;
+ }
+ }
+
+
+ INIT_LIST_HEAD(&dd->list);
+ spin_lock(&list_lock);
+ list_add_tail(&dd->list, &dev_list);
+ spin_unlock(&list_lock);
+
+ for (i = 0; i < dd->pdata->algs_info_size; i++) {
+ for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+ algp = &dd->pdata->algs_info[i].algs_list[j];
+
+ pr_debug("reg alg: %s\n", algp->cra_name);
+ INIT_LIST_HEAD(&algp->cra_list);
+
+ err = crypto_register_alg(algp);
+ if (err)
+ goto err_algs;
+
+ dd->pdata->algs_info[i].registered++;
+ }
+ }
+
+ return 0;
+err_algs:
+ for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ crypto_unregister_alg(
+ &dd->pdata->algs_info[i].algs_list[j]);
+ if (!dd->pio_only)
+ omap_des_dma_cleanup(dd);
+err_irq:
+ tasklet_kill(&dd->done_task);
+ tasklet_kill(&dd->queue_task);
+ pm_runtime_disable(dev);
+err_res:
+ dd = NULL;
+err_data:
+ dev_err(dev, "initialization failed.\n");
+ return err;
+}
+
+static int omap_des_remove(struct platform_device *pdev)
+{
+ struct omap_des_dev *dd = platform_get_drvdata(pdev);
+ int i, j;
+
+ if (!dd)
+ return -ENODEV;
+
+ spin_lock(&list_lock);
+ list_del(&dd->list);
+ spin_unlock(&list_lock);
+
+ for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ crypto_unregister_alg(
+ &dd->pdata->algs_info[i].algs_list[j]);
+
+ tasklet_kill(&dd->done_task);
+ tasklet_kill(&dd->queue_task);
+ omap_des_dma_cleanup(dd);
+ pm_runtime_disable(dd->dev);
+ dd = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int omap_des_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
+static int omap_des_resume(struct device *dev)
+{
+ pm_runtime_get_sync(dev);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(omap_des_pm_ops, omap_des_suspend, omap_des_resume);
+
+static struct platform_driver omap_des_driver = {
+ .probe = omap_des_probe,
+ .remove = omap_des_remove,
+ .driver = {
+ .name = "omap-des",
+ .owner = THIS_MODULE,
+ .pm = &omap_des_pm_ops,
+ .of_match_table = of_match_ptr(omap_des_of_match),
+ },
+};
+
+module_platform_driver(omap_des_driver);
+
+MODULE_DESCRIPTION("OMAP DES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joel Fernandes <joelf@ti.com>");
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index a727a6a59653..710d86386965 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -636,11 +636,17 @@ static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
{
size_t count;
+ const u8 *vaddr;
while (ctx->sg) {
+ vaddr = kmap_atomic(sg_page(ctx->sg));
+
count = omap_sham_append_buffer(ctx,
- sg_virt(ctx->sg) + ctx->offset,
+ vaddr + ctx->offset,
ctx->sg->length - ctx->offset);
+
+ kunmap_atomic((void *)vaddr);
+
if (!count)
break;
ctx->offset += count;
@@ -2022,9 +2028,7 @@ static int omap_sham_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops omap_sham_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend, omap_sham_resume)
-};
+static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
static struct platform_driver omap_sham_driver = {
.probe = omap_sham_probe,
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index a6175ba6d238..5da5b98b8f29 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1720,22 +1720,16 @@ static int spacc_probe(struct platform_device *pdev)
engine->name = dev_name(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ engine->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(engine->regs))
+ return PTR_ERR(engine->regs);
+
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!mem || !irq) {
+ if (!irq) {
dev_err(&pdev->dev, "no memory/irq resource for engine\n");
return -ENXIO;
}
- if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
- engine->name))
- return -ENOMEM;
-
- engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
- if (!engine->regs) {
- dev_err(&pdev->dev, "memory map failed\n");
- return -ENOMEM;
- }
-
if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
engine->name, engine)) {
dev_err(engine->dev, "failed to request IRQ\n");
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index cf149b19ff47..be45762f390a 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -568,17 +568,14 @@ static int s5p_aes_probe(struct platform_device *pdev)
if (s5p_dev)
return -EEXIST;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (!devm_request_mem_region(dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->ioaddr))
+ return PTR_ERR(pdata->ioaddr);
pdata->clk = devm_clk_get(dev, "secss");
if (IS_ERR(pdata->clk)) {
@@ -589,8 +586,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
clk_enable(pdata->clk);
spin_lock_init(&pdata->lock);
- pdata->ioaddr = devm_ioremap(dev, res->start,
- resource_size(res));
pdata->irq_hash = platform_get_irq_byname(pdev, "hash");
if (pdata->irq_hash < 0) {
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 785a9ded7bdf..07a5987ce67d 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -885,22 +885,9 @@ static int sahara_probe(struct platform_device *pdev)
/* Get the base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get memory region resource\n");
- return -ENODEV;
- }
-
- if (devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), SAHARA_NAME) == NULL) {
- dev_err(&pdev->dev, "failed to request memory region\n");
- return -ENOENT;
- }
- dev->regs_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!dev->regs_base) {
- dev_err(&pdev->dev, "failed to ioremap address region\n");
- return -ENOENT;
- }
+ dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->regs_base))
+ return PTR_ERR(dev->regs_base);
/* Get the IRQ */
irq = platform_get_irq(pdev, 0);
@@ -909,10 +896,11 @@ static int sahara_probe(struct platform_device *pdev)
return irq;
}
- if (devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
- 0, SAHARA_NAME, dev) < 0) {
+ err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
+ 0, dev_name(&pdev->dev), dev);
+ if (err) {
dev_err(&pdev->dev, "failed to request irq\n");
- return -ENOENT;
+ return err;
}
/* clocks */
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 5967667e1a8f..624b8be0c365 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2637,6 +2637,8 @@ static int talitos_probe(struct platform_device *ofdev)
if (!priv)
return -ENOMEM;
+ INIT_LIST_HEAD(&priv->alg_list);
+
dev_set_drvdata(dev, priv);
priv->ofdev = ofdev;
@@ -2657,8 +2659,6 @@ static int talitos_probe(struct platform_device *ofdev)
(unsigned long)dev);
}
- INIT_LIST_HEAD(&priv->alg_list);
-
priv->reg = of_iomap(np, 0);
if (!priv->reg) {
dev_err(dev, "failed to of_iomap\n");
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
deleted file mode 100644
index 060eecc5dbc3..000000000000
--- a/drivers/crypto/tegra-aes.c
+++ /dev/null
@@ -1,1087 +0,0 @@
-/*
- * drivers/crypto/tegra-aes.c
- *
- * Driver for NVIDIA Tegra AES hardware engine residing inside the
- * Bit Stream Engine for Video (BSEV) hardware block.
- *
- * The programming sequence for this engine is with the help
- * of commands which travel via a command queue residing between the
- * CPU and the BSEV block. The BSEV engine has an internal RAM (VRAM)
- * where the final input plaintext, keys and the IV have to be copied
- * before starting the encrypt/decrypt operation.
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-
-#include <crypto/scatterwalk.h>
-#include <crypto/aes.h>
-#include <crypto/internal/rng.h>
-
-#include "tegra-aes.h"
-
-#define FLAGS_MODE_MASK 0x00FF
-#define FLAGS_ENCRYPT BIT(0)
-#define FLAGS_CBC BIT(1)
-#define FLAGS_GIV BIT(2)
-#define FLAGS_RNG BIT(3)
-#define FLAGS_OFB BIT(4)
-#define FLAGS_NEW_KEY BIT(5)
-#define FLAGS_NEW_IV BIT(6)
-#define FLAGS_INIT BIT(7)
-#define FLAGS_FAST BIT(8)
-#define FLAGS_BUSY 9
-
-/*
- * Defines AES engine Max process bytes size in one go, which takes 1 msec.
- * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte
- * The duration CPU can use the BSE to 1 msec, then the number of available
- * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB
- * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB.
- */
-#define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000
-
-/*
- * The key table length is 64 bytes
- * (This includes first upto 32 bytes key + 16 bytes original initial vector
- * and 16 bytes updated initial vector)
- */
-#define AES_HW_KEY_TABLE_LENGTH_BYTES 64
-
-/*
- * The memory being used is divides as follows:
- * 1. Key - 32 bytes
- * 2. Original IV - 16 bytes
- * 3. Updated IV - 16 bytes
- * 4. Key schedule - 256 bytes
- *
- * 1+2+3 constitute the hw key table.
- */
-#define AES_HW_IV_SIZE 16
-#define AES_HW_KEYSCHEDULE_LEN 256
-#define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN)
-
-/* Define commands required for AES operation */
-enum {
- CMD_BLKSTARTENGINE = 0x0E,
- CMD_DMASETUP = 0x10,
- CMD_DMACOMPLETE = 0x11,
- CMD_SETTABLE = 0x15,
- CMD_MEMDMAVD = 0x22,
-};
-
-/* Define sub-commands */
-enum {
- SUBCMD_VRAM_SEL = 0x1,
- SUBCMD_CRYPTO_TABLE_SEL = 0x3,
- SUBCMD_KEY_TABLE_SEL = 0x8,
-};
-
-/* memdma_vd command */
-#define MEMDMA_DIR_DTOVRAM 0 /* sdram -> vram */
-#define MEMDMA_DIR_VTODRAM 1 /* vram -> sdram */
-#define MEMDMA_DIR_SHIFT 25
-#define MEMDMA_NUM_WORDS_SHIFT 12
-
-/* command queue bit shifts */
-enum {
- CMDQ_KEYTABLEADDR_SHIFT = 0,
- CMDQ_KEYTABLEID_SHIFT = 17,
- CMDQ_VRAMSEL_SHIFT = 23,
- CMDQ_TABLESEL_SHIFT = 24,
- CMDQ_OPCODE_SHIFT = 26,
-};
-
-/*
- * The secure key slot contains a unique secure key generated
- * and loaded by the bootloader. This slot is marked as non-accessible
- * to the kernel.
- */
-#define SSK_SLOT_NUM 4
-
-#define AES_NR_KEYSLOTS 8
-#define TEGRA_AES_QUEUE_LENGTH 50
-#define DEFAULT_RNG_BLK_SZ 16
-
-/* The command queue depth */
-#define AES_HW_MAX_ICQ_LENGTH 5
-
-struct tegra_aes_slot {
- struct list_head node;
- int slot_num;
-};
-
-static struct tegra_aes_slot ssk = {
- .slot_num = SSK_SLOT_NUM,
-};
-
-struct tegra_aes_reqctx {
- unsigned long mode;
-};
-
-struct tegra_aes_dev {
- struct device *dev;
- void __iomem *io_base;
- dma_addr_t ivkey_phys_base;
- void __iomem *ivkey_base;
- struct clk *aes_clk;
- struct tegra_aes_ctx *ctx;
- int irq;
- unsigned long flags;
- struct completion op_complete;
- u32 *buf_in;
- dma_addr_t dma_buf_in;
- u32 *buf_out;
- dma_addr_t dma_buf_out;
- u8 *iv;
- u8 dt[DEFAULT_RNG_BLK_SZ];
- int ivlen;
- u64 ctr;
- spinlock_t lock;
- struct crypto_queue queue;
- struct tegra_aes_slot *slots;
- struct ablkcipher_request *req;
- size_t total;
- struct scatterlist *in_sg;
- size_t in_offset;
- struct scatterlist *out_sg;
- size_t out_offset;
-};
-
-static struct tegra_aes_dev *aes_dev;
-
-struct tegra_aes_ctx {
- struct tegra_aes_dev *dd;
- unsigned long flags;
- struct tegra_aes_slot *slot;
- u8 key[AES_MAX_KEY_SIZE];
- size_t keylen;
-};
-
-static struct tegra_aes_ctx rng_ctx = {
- .flags = FLAGS_NEW_KEY,
- .keylen = AES_KEYSIZE_128,
-};
-
-/* keep registered devices data here */
-static struct list_head dev_list;
-static DEFINE_SPINLOCK(list_lock);
-static DEFINE_MUTEX(aes_lock);
-
-static void aes_workqueue_handler(struct work_struct *work);
-static DECLARE_WORK(aes_work, aes_workqueue_handler);
-static struct workqueue_struct *aes_wq;
-
-static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
-{
- return readl(dd->io_base + offset);
-}
-
-static inline void aes_writel(struct tegra_aes_dev *dd, u32 val, u32 offset)
-{
- writel(val, dd->io_base + offset);
-}
-
-static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
- int nblocks, int mode, bool upd_iv)
-{
- u32 cmdq[AES_HW_MAX_ICQ_LENGTH];
- int i, eng_busy, icq_empty, ret;
- u32 value;
-
- /* reset all the interrupt bits */
- aes_writel(dd, 0xFFFFFFFF, TEGRA_AES_INTR_STATUS);
-
- /* enable error, dma xfer complete interrupts */
- aes_writel(dd, 0x33, TEGRA_AES_INT_ENB);
-
- cmdq[0] = CMD_DMASETUP << CMDQ_OPCODE_SHIFT;
- cmdq[1] = in_addr;
- cmdq[2] = CMD_BLKSTARTENGINE << CMDQ_OPCODE_SHIFT | (nblocks-1);
- cmdq[3] = CMD_DMACOMPLETE << CMDQ_OPCODE_SHIFT;
-
- value = aes_readl(dd, TEGRA_AES_CMDQUE_CONTROL);
- /* access SDRAM through AHB */
- value &= ~TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD;
- value &= ~TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD;
- value |= TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD |
- TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD |
- TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD;
- aes_writel(dd, value, TEGRA_AES_CMDQUE_CONTROL);
- dev_dbg(dd->dev, "cmd_q_ctrl=0x%x", value);
-
- value = (0x1 << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT) |
- ((dd->ctx->keylen * 8) <<
- TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT) |
- ((u32)upd_iv << TEGRA_AES_SECURE_IV_SELECT_SHIFT);
-
- if (mode & FLAGS_CBC) {
- value |= ((((mode & FLAGS_ENCRYPT) ? 2 : 3)
- << TEGRA_AES_SECURE_XOR_POS_SHIFT) |
- (((mode & FLAGS_ENCRYPT) ? 2 : 3)
- << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT) |
- ((mode & FLAGS_ENCRYPT) ? 1 : 0)
- << TEGRA_AES_SECURE_CORE_SEL_SHIFT);
- } else if (mode & FLAGS_OFB) {
- value |= ((TEGRA_AES_SECURE_XOR_POS_FIELD) |
- (2 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT) |
- (TEGRA_AES_SECURE_CORE_SEL_FIELD));
- } else if (mode & FLAGS_RNG) {
- value |= (((mode & FLAGS_ENCRYPT) ? 1 : 0)
- << TEGRA_AES_SECURE_CORE_SEL_SHIFT |
- TEGRA_AES_SECURE_RNG_ENB_FIELD);
- } else {
- value |= (((mode & FLAGS_ENCRYPT) ? 1 : 0)
- << TEGRA_AES_SECURE_CORE_SEL_SHIFT);
- }
-
- dev_dbg(dd->dev, "secure_in_sel=0x%x", value);
- aes_writel(dd, value, TEGRA_AES_SECURE_INPUT_SELECT);
-
- aes_writel(dd, out_addr, TEGRA_AES_SECURE_DEST_ADDR);
- reinit_completion(&dd->op_complete);
-
- for (i = 0; i < AES_HW_MAX_ICQ_LENGTH - 1; i++) {
- do {
- value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
- eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
- icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
- } while (eng_busy && !icq_empty);
- aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR);
- }
-
- ret = wait_for_completion_timeout(&dd->op_complete,
- msecs_to_jiffies(150));
- if (ret == 0) {
- dev_err(dd->dev, "timed out (0x%x)\n",
- aes_readl(dd, TEGRA_AES_INTR_STATUS));
- return -ETIMEDOUT;
- }
-
- aes_writel(dd, cmdq[AES_HW_MAX_ICQ_LENGTH - 1], TEGRA_AES_ICMDQUE_WR);
- return 0;
-}
-
-static void aes_release_key_slot(struct tegra_aes_slot *slot)
-{
- if (slot->slot_num == SSK_SLOT_NUM)
- return;
-
- spin_lock(&list_lock);
- list_add_tail(&slot->node, &dev_list);
- slot = NULL;
- spin_unlock(&list_lock);
-}
-
-static struct tegra_aes_slot *aes_find_key_slot(void)
-{
- struct tegra_aes_slot *slot = NULL;
- struct list_head *new_head;
- int empty;
-
- spin_lock(&list_lock);
- empty = list_empty(&dev_list);
- if (!empty) {
- slot = list_entry(&dev_list, struct tegra_aes_slot, node);
- new_head = dev_list.next;
- list_del(&dev_list);
- dev_list.next = new_head->next;
- dev_list.prev = NULL;
- }
- spin_unlock(&list_lock);
-
- return slot;
-}
-
-static int aes_set_key(struct tegra_aes_dev *dd)
-{
- u32 value, cmdq[2];
- struct tegra_aes_ctx *ctx = dd->ctx;
- int eng_busy, icq_empty, dma_busy;
- bool use_ssk = false;
-
- /* use ssk? */
- if (!dd->ctx->slot) {
- dev_dbg(dd->dev, "using ssk");
- dd->ctx->slot = &ssk;
- use_ssk = true;
- }
-
- /* enable key schedule generation in hardware */
- value = aes_readl(dd, TEGRA_AES_SECURE_CONFIG_EXT);
- value &= ~TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD;
- aes_writel(dd, value, TEGRA_AES_SECURE_CONFIG_EXT);
-
- /* select the key slot */
- value = aes_readl(dd, TEGRA_AES_SECURE_CONFIG);
- value &= ~TEGRA_AES_SECURE_KEY_INDEX_FIELD;
- value |= (ctx->slot->slot_num << TEGRA_AES_SECURE_KEY_INDEX_SHIFT);
- aes_writel(dd, value, TEGRA_AES_SECURE_CONFIG);
-
- if (use_ssk)
- return 0;
-
- /* copy the key table from sdram to vram */
- cmdq[0] = CMD_MEMDMAVD << CMDQ_OPCODE_SHIFT |
- MEMDMA_DIR_DTOVRAM << MEMDMA_DIR_SHIFT |
- AES_HW_KEY_TABLE_LENGTH_BYTES / sizeof(u32) <<
- MEMDMA_NUM_WORDS_SHIFT;
- cmdq[1] = (u32)dd->ivkey_phys_base;
-
- aes_writel(dd, cmdq[0], TEGRA_AES_ICMDQUE_WR);
- aes_writel(dd, cmdq[1], TEGRA_AES_ICMDQUE_WR);
-
- do {
- value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
- eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
- icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
- dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD;
- } while (eng_busy && !icq_empty && dma_busy);
-
- /* settable command to get key into internal registers */
- value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT |
- SUBCMD_CRYPTO_TABLE_SEL << CMDQ_TABLESEL_SHIFT |
- SUBCMD_VRAM_SEL << CMDQ_VRAMSEL_SHIFT |
- (SUBCMD_KEY_TABLE_SEL | ctx->slot->slot_num) <<
- CMDQ_KEYTABLEID_SHIFT;
- aes_writel(dd, value, TEGRA_AES_ICMDQUE_WR);
-
- do {
- value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
- eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
- icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
- } while (eng_busy && !icq_empty);
-
- return 0;
-}
-
-static int tegra_aes_handle_req(struct tegra_aes_dev *dd)
-{
- struct crypto_async_request *async_req, *backlog;
- struct crypto_ablkcipher *tfm;
- struct tegra_aes_ctx *ctx;
- struct tegra_aes_reqctx *rctx;
- struct ablkcipher_request *req;
- unsigned long flags;
- int dma_max = AES_HW_DMA_BUFFER_SIZE_BYTES;
- int ret = 0, nblocks, total;
- int count = 0;
- dma_addr_t addr_in, addr_out;
- struct scatterlist *in_sg, *out_sg;
-
- if (!dd)
- return -EINVAL;
-
- spin_lock_irqsave(&dd->lock, flags);
- backlog = crypto_get_backlog(&dd->queue);
- async_req = crypto_dequeue_request(&dd->queue);
- if (!async_req)
- clear_bit(FLAGS_BUSY, &dd->flags);
- spin_unlock_irqrestore(&dd->lock, flags);
-
- if (!async_req)
- return -ENODATA;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- req = ablkcipher_request_cast(async_req);
-
- dev_dbg(dd->dev, "%s: get new req\n", __func__);
-
- if (!req->src || !req->dst)
- return -EINVAL;
-
- /* take mutex to access the aes hw */
- mutex_lock(&aes_lock);
-
- /* assign new request to device */
- dd->req = req;
- dd->total = req->nbytes;
- dd->in_offset = 0;
- dd->in_sg = req->src;
- dd->out_offset = 0;
- dd->out_sg = req->dst;
-
- in_sg = dd->in_sg;
- out_sg = dd->out_sg;
-
- total = dd->total;
-
- tfm = crypto_ablkcipher_reqtfm(req);
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(tfm);
- rctx->mode &= FLAGS_MODE_MASK;
- dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
-
- dd->iv = (u8 *)req->info;
- dd->ivlen = crypto_ablkcipher_ivsize(tfm);
-
- /* assign new context to device */
- ctx->dd = dd;
- dd->ctx = ctx;
-
- if (ctx->flags & FLAGS_NEW_KEY) {
- /* copy the key */
- memcpy(dd->ivkey_base, ctx->key, ctx->keylen);
- memset(dd->ivkey_base + ctx->keylen, 0, AES_HW_KEY_TABLE_LENGTH_BYTES - ctx->keylen);
- aes_set_key(dd);
- ctx->flags &= ~FLAGS_NEW_KEY;
- }
-
- if (((dd->flags & FLAGS_CBC) || (dd->flags & FLAGS_OFB)) && dd->iv) {
- /* set iv to the aes hw slot
- * Hw generates updated iv only after iv is set in slot.
- * So key and iv is passed asynchronously.
- */
- memcpy(dd->buf_in, dd->iv, dd->ivlen);
-
- ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
- dd->dma_buf_out, 1, FLAGS_CBC, false);
- if (ret < 0) {
- dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
- goto out;
- }
- }
-
- while (total) {
- dev_dbg(dd->dev, "remain: %d\n", total);
- ret = dma_map_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
- if (!ret) {
- dev_err(dd->dev, "dma_map_sg() error\n");
- goto out;
- }
-
- ret = dma_map_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
- if (!ret) {
- dev_err(dd->dev, "dma_map_sg() error\n");
- dma_unmap_sg(dd->dev, dd->in_sg,
- 1, DMA_TO_DEVICE);
- goto out;
- }
-
- addr_in = sg_dma_address(in_sg);
- addr_out = sg_dma_address(out_sg);
- dd->flags |= FLAGS_FAST;
- count = min_t(int, sg_dma_len(in_sg), dma_max);
- WARN_ON(sg_dma_len(in_sg) != sg_dma_len(out_sg));
- nblocks = DIV_ROUND_UP(count, AES_BLOCK_SIZE);
-
- ret = aes_start_crypt(dd, addr_in, addr_out, nblocks,
- dd->flags, true);
-
- dma_unmap_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
- dma_unmap_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
-
- if (ret < 0) {
- dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
- goto out;
- }
- dd->flags &= ~FLAGS_FAST;
-
- dev_dbg(dd->dev, "out: copied %d\n", count);
- total -= count;
- in_sg = sg_next(in_sg);
- out_sg = sg_next(out_sg);
- WARN_ON(((total != 0) && (!in_sg || !out_sg)));
- }
-
-out:
- mutex_unlock(&aes_lock);
-
- dd->total = total;
-
- if (dd->req->base.complete)
- dd->req->base.complete(&dd->req->base, ret);
-
- dev_dbg(dd->dev, "%s: exit\n", __func__);
- return ret;
-}
-
-static int tegra_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct tegra_aes_dev *dd = aes_dev;
- struct tegra_aes_slot *key_slot;
-
- if ((keylen != AES_KEYSIZE_128) && (keylen != AES_KEYSIZE_192) &&
- (keylen != AES_KEYSIZE_256)) {
- dev_err(dd->dev, "unsupported key size\n");
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- dev_dbg(dd->dev, "keylen: %d\n", keylen);
-
- ctx->dd = dd;
-
- if (key) {
- if (!ctx->slot) {
- key_slot = aes_find_key_slot();
- if (!key_slot) {
- dev_err(dd->dev, "no empty slot\n");
- return -ENOMEM;
- }
-
- ctx->slot = key_slot;
- }
-
- memcpy(ctx->key, key, keylen);
- ctx->keylen = keylen;
- }
-
- ctx->flags |= FLAGS_NEW_KEY;
- dev_dbg(dd->dev, "done\n");
- return 0;
-}
-
-static void aes_workqueue_handler(struct work_struct *work)
-{
- struct tegra_aes_dev *dd = aes_dev;
- int ret;
-
- ret = clk_prepare_enable(dd->aes_clk);
- if (ret)
- BUG_ON("clock enable failed");
-
- /* empty the crypto queue and then return */
- do {
- ret = tegra_aes_handle_req(dd);
- } while (!ret);
-
- clk_disable_unprepare(dd->aes_clk);
-}
-
-static irqreturn_t aes_irq(int irq, void *dev_id)
-{
- struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id;
- u32 value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
- int busy = test_bit(FLAGS_BUSY, &dd->flags);
-
- if (!busy) {
- dev_dbg(dd->dev, "spurious interrupt\n");
- return IRQ_NONE;
- }
-
- dev_dbg(dd->dev, "irq_stat: 0x%x\n", value);
- if (value & TEGRA_AES_INT_ERROR_MASK)
- aes_writel(dd, TEGRA_AES_INT_ERROR_MASK, TEGRA_AES_INTR_STATUS);
-
- if (!(value & TEGRA_AES_ENGINE_BUSY_FIELD))
- complete(&dd->op_complete);
- else
- return IRQ_NONE;
-
- return IRQ_HANDLED;
-}
-
-static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
-{
- struct tegra_aes_reqctx *rctx = ablkcipher_request_ctx(req);
- struct tegra_aes_dev *dd = aes_dev;
- unsigned long flags;
- int err = 0;
- int busy;
-
- dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d, ofb: %d\n",
- req->nbytes, !!(mode & FLAGS_ENCRYPT),
- !!(mode & FLAGS_CBC), !!(mode & FLAGS_OFB));
-
- rctx->mode = mode;
-
- spin_lock_irqsave(&dd->lock, flags);
- err = ablkcipher_enqueue_request(&dd->queue, req);
- busy = test_and_set_bit(FLAGS_BUSY, &dd->flags);
- spin_unlock_irqrestore(&dd->lock, flags);
-
- if (!busy)
- queue_work(aes_wq, &aes_work);
-
- return err;
-}
-
-static int tegra_aes_ecb_encrypt(struct ablkcipher_request *req)
-{
- return tegra_aes_crypt(req, FLAGS_ENCRYPT);
-}
-
-static int tegra_aes_ecb_decrypt(struct ablkcipher_request *req)
-{
- return tegra_aes_crypt(req, 0);
-}
-
-static int tegra_aes_cbc_encrypt(struct ablkcipher_request *req)
-{
- return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
-}
-
-static int tegra_aes_cbc_decrypt(struct ablkcipher_request *req)
-{
- return tegra_aes_crypt(req, FLAGS_CBC);
-}
-
-static int tegra_aes_ofb_encrypt(struct ablkcipher_request *req)
-{
- return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_OFB);
-}
-
-static int tegra_aes_ofb_decrypt(struct ablkcipher_request *req)
-{
- return tegra_aes_crypt(req, FLAGS_OFB);
-}
-
-static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen)
-{
- struct tegra_aes_dev *dd = aes_dev;
- struct tegra_aes_ctx *ctx = &rng_ctx;
- int ret, i;
- u8 *dest = rdata, *dt = dd->dt;
-
- /* take mutex to access the aes hw */
- mutex_lock(&aes_lock);
-
- ret = clk_prepare_enable(dd->aes_clk);
- if (ret) {
- mutex_unlock(&aes_lock);
- return ret;
- }
-
- ctx->dd = dd;
- dd->ctx = ctx;
- dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
-
- memcpy(dd->buf_in, dt, DEFAULT_RNG_BLK_SZ);
-
- ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
- (u32)dd->dma_buf_out, 1, dd->flags, true);
- if (ret < 0) {
- dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
- dlen = ret;
- goto out;
- }
- memcpy(dest, dd->buf_out, dlen);
-
- /* update the DT */
- for (i = DEFAULT_RNG_BLK_SZ - 1; i >= 0; i--) {
- dt[i] += 1;
- if (dt[i] != 0)
- break;
- }
-
-out:
- clk_disable_unprepare(dd->aes_clk);
- mutex_unlock(&aes_lock);
-
- dev_dbg(dd->dev, "%s: done\n", __func__);
- return dlen;
-}
-
-static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
- unsigned int slen)
-{
- struct tegra_aes_dev *dd = aes_dev;
- struct tegra_aes_ctx *ctx = &rng_ctx;
- struct tegra_aes_slot *key_slot;
- int ret = 0;
- u8 tmp[16]; /* 16 bytes = 128 bits of entropy */
- u8 *dt;
-
- if (!ctx || !dd) {
- pr_err("ctx=0x%x, dd=0x%x\n",
- (unsigned int)ctx, (unsigned int)dd);
- return -EINVAL;
- }
-
- if (slen < (DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
- dev_err(dd->dev, "seed size invalid");
- return -ENOMEM;
- }
-
- /* take mutex to access the aes hw */
- mutex_lock(&aes_lock);
-
- if (!ctx->slot) {
- key_slot = aes_find_key_slot();
- if (!key_slot) {
- dev_err(dd->dev, "no empty slot\n");
- mutex_unlock(&aes_lock);
- return -ENOMEM;
- }
- ctx->slot = key_slot;
- }
-
- ctx->dd = dd;
- dd->ctx = ctx;
- dd->ctr = 0;
-
- ctx->keylen = AES_KEYSIZE_128;
- ctx->flags |= FLAGS_NEW_KEY;
-
- /* copy the key to the key slot */
- memcpy(dd->ivkey_base, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128);
- memset(dd->ivkey_base + AES_KEYSIZE_128, 0, AES_HW_KEY_TABLE_LENGTH_BYTES - AES_KEYSIZE_128);
-
- dd->iv = seed;
- dd->ivlen = slen;
-
- dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
-
- ret = clk_prepare_enable(dd->aes_clk);
- if (ret) {
- mutex_unlock(&aes_lock);
- return ret;
- }
-
- aes_set_key(dd);
-
- /* set seed to the aes hw slot */
- memcpy(dd->buf_in, dd->iv, DEFAULT_RNG_BLK_SZ);
- ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
- dd->dma_buf_out, 1, FLAGS_CBC, false);
- if (ret < 0) {
- dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
- goto out;
- }
-
- if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
- dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
- } else {
- get_random_bytes(tmp, sizeof(tmp));
- dt = tmp;
- }
- memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
-
-out:
- clk_disable_unprepare(dd->aes_clk);
- mutex_unlock(&aes_lock);
-
- dev_dbg(dd->dev, "%s: done\n", __func__);
- return ret;
-}
-
-static int tegra_aes_cra_init(struct crypto_tfm *tfm)
-{
- tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_aes_reqctx);
-
- return 0;
-}
-
-static void tegra_aes_cra_exit(struct crypto_tfm *tfm)
-{
- struct tegra_aes_ctx *ctx =
- crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
-
- if (ctx && ctx->slot)
- aes_release_key_slot(ctx->slot);
-}
-
-static struct crypto_alg algs[] = {
- {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-tegra",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = tegra_aes_setkey,
- .encrypt = tegra_aes_ecb_encrypt,
- .decrypt = tegra_aes_ecb_decrypt,
- },
- }, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-tegra",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_MIN_KEY_SIZE,
- .setkey = tegra_aes_setkey,
- .encrypt = tegra_aes_cbc_encrypt,
- .decrypt = tegra_aes_cbc_decrypt,
- }
- }, {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "ofb-aes-tegra",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_MIN_KEY_SIZE,
- .setkey = tegra_aes_setkey,
- .encrypt = tegra_aes_ofb_encrypt,
- .decrypt = tegra_aes_ofb_decrypt,
- }
- }, {
- .cra_name = "ansi_cprng",
- .cra_driver_name = "rng-aes-tegra",
- .cra_flags = CRYPTO_ALG_TYPE_RNG,
- .cra_ctxsize = sizeof(struct tegra_aes_ctx),
- .cra_type = &crypto_rng_type,
- .cra_u.rng = {
- .rng_make_random = tegra_aes_get_random,
- .rng_reset = tegra_aes_rng_reset,
- .seedsize = AES_KEYSIZE_128 + (2 * DEFAULT_RNG_BLK_SZ),
- }
- }
-};
-
-static int tegra_aes_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct tegra_aes_dev *dd;
- struct resource *res;
- int err = -ENOMEM, i = 0, j;
-
- dd = devm_kzalloc(dev, sizeof(struct tegra_aes_dev), GFP_KERNEL);
- if (dd == NULL) {
- dev_err(dev, "unable to alloc data struct.\n");
- return err;
- }
-
- dd->dev = dev;
- platform_set_drvdata(pdev, dd);
-
- dd->slots = devm_kzalloc(dev, sizeof(struct tegra_aes_slot) *
- AES_NR_KEYSLOTS, GFP_KERNEL);
- if (dd->slots == NULL) {
- dev_err(dev, "unable to alloc slot struct.\n");
- goto out;
- }
-
- spin_lock_init(&dd->lock);
- crypto_init_queue(&dd->queue, TEGRA_AES_QUEUE_LENGTH);
-
- /* Get the module base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "invalid resource type: base\n");
- err = -ENODEV;
- goto out;
- }
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res),
- dev_name(&pdev->dev))) {
- dev_err(&pdev->dev, "Couldn't request MEM resource\n");
- return -ENODEV;
- }
-
- dd->io_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!dd->io_base) {
- dev_err(dev, "can't ioremap register space\n");
- err = -ENOMEM;
- goto out;
- }
-
- /* Initialize the vde clock */
- dd->aes_clk = devm_clk_get(dev, "vde");
- if (IS_ERR(dd->aes_clk)) {
- dev_err(dev, "iclock intialization failed.\n");
- err = -ENODEV;
- goto out;
- }
-
- err = clk_set_rate(dd->aes_clk, ULONG_MAX);
- if (err) {
- dev_err(dd->dev, "iclk set_rate fail(%d)\n", err);
- goto out;
- }
-
- /*
- * the foll contiguous memory is allocated as follows -
- * - hardware key table
- * - key schedule
- */
- dd->ivkey_base = dma_alloc_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES,
- &dd->ivkey_phys_base,
- GFP_KERNEL);
- if (!dd->ivkey_base) {
- dev_err(dev, "can not allocate iv/key buffer\n");
- err = -ENOMEM;
- goto out;
- }
-
- dd->buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
- &dd->dma_buf_in, GFP_KERNEL);
- if (!dd->buf_in) {
- dev_err(dev, "can not allocate dma-in buffer\n");
- err = -ENOMEM;
- goto out;
- }
-
- dd->buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
- &dd->dma_buf_out, GFP_KERNEL);
- if (!dd->buf_out) {
- dev_err(dev, "can not allocate dma-out buffer\n");
- err = -ENOMEM;
- goto out;
- }
-
- init_completion(&dd->op_complete);
- aes_wq = alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI | WQ_UNBOUND, 1);
- if (!aes_wq) {
- dev_err(dev, "alloc_workqueue failed\n");
- err = -ENOMEM;
- goto out;
- }
-
- /* get the irq */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "invalid resource type: base\n");
- err = -ENODEV;
- goto out;
- }
- dd->irq = res->start;
-
- err = devm_request_irq(dev, dd->irq, aes_irq, IRQF_TRIGGER_HIGH |
- IRQF_SHARED, "tegra-aes", dd);
- if (err) {
- dev_err(dev, "request_irq failed\n");
- goto out;
- }
-
- mutex_init(&aes_lock);
- INIT_LIST_HEAD(&dev_list);
-
- spin_lock_init(&list_lock);
- spin_lock(&list_lock);
- for (i = 0; i < AES_NR_KEYSLOTS; i++) {
- if (i == SSK_SLOT_NUM)
- continue;
- dd->slots[i].slot_num = i;
- INIT_LIST_HEAD(&dd->slots[i].node);
- list_add_tail(&dd->slots[i].node, &dev_list);
- }
- spin_unlock(&list_lock);
-
- aes_dev = dd;
- for (i = 0; i < ARRAY_SIZE(algs); i++) {
- algs[i].cra_priority = 300;
- algs[i].cra_ctxsize = sizeof(struct tegra_aes_ctx);
- algs[i].cra_module = THIS_MODULE;
- algs[i].cra_init = tegra_aes_cra_init;
- algs[i].cra_exit = tegra_aes_cra_exit;
-
- err = crypto_register_alg(&algs[i]);
- if (err)
- goto out;
- }
-
- dev_info(dev, "registered");
- return 0;
-
-out:
- for (j = 0; j < i; j++)
- crypto_unregister_alg(&algs[j]);
- if (dd->ivkey_base)
- dma_free_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES,
- dd->ivkey_base, dd->ivkey_phys_base);
- if (dd->buf_in)
- dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
- dd->buf_in, dd->dma_buf_in);
- if (dd->buf_out)
- dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
- dd->buf_out, dd->dma_buf_out);
- if (aes_wq)
- destroy_workqueue(aes_wq);
- spin_lock(&list_lock);
- list_del(&dev_list);
- spin_unlock(&list_lock);
-
- aes_dev = NULL;
-
- dev_err(dev, "%s: initialization failed.\n", __func__);
- return err;
-}
-
-static int tegra_aes_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct tegra_aes_dev *dd = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(algs); i++)
- crypto_unregister_alg(&algs[i]);
-
- cancel_work_sync(&aes_work);
- destroy_workqueue(aes_wq);
- spin_lock(&list_lock);
- list_del(&dev_list);
- spin_unlock(&list_lock);
-
- dma_free_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES,
- dd->ivkey_base, dd->ivkey_phys_base);
- dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
- dd->buf_in, dd->dma_buf_in);
- dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
- dd->buf_out, dd->dma_buf_out);
- aes_dev = NULL;
-
- return 0;
-}
-
-static struct of_device_id tegra_aes_of_match[] = {
- { .compatible = "nvidia,tegra20-aes", },
- { .compatible = "nvidia,tegra30-aes", },
- { },
-};
-
-static struct platform_driver tegra_aes_driver = {
- .probe = tegra_aes_probe,
- .remove = tegra_aes_remove,
- .driver = {
- .name = "tegra-aes",
- .owner = THIS_MODULE,
- .of_match_table = tegra_aes_of_match,
- },
-};
-
-module_platform_driver(tegra_aes_driver);
-
-MODULE_DESCRIPTION("Tegra AES/OFB/CPRNG hw acceleration support.");
-MODULE_AUTHOR("NVIDIA Corporation");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/tegra-aes.h b/drivers/crypto/tegra-aes.h
deleted file mode 100644
index 6006333a8934..000000000000
--- a/drivers/crypto/tegra-aes.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __CRYPTODEV_TEGRA_AES_H
-#define __CRYPTODEV_TEGRA_AES_H
-
-#define TEGRA_AES_ICMDQUE_WR 0x1000
-#define TEGRA_AES_CMDQUE_CONTROL 0x1008
-#define TEGRA_AES_INTR_STATUS 0x1018
-#define TEGRA_AES_INT_ENB 0x1040
-#define TEGRA_AES_CONFIG 0x1044
-#define TEGRA_AES_IRAM_ACCESS_CFG 0x10A0
-#define TEGRA_AES_SECURE_DEST_ADDR 0x1100
-#define TEGRA_AES_SECURE_INPUT_SELECT 0x1104
-#define TEGRA_AES_SECURE_CONFIG 0x1108
-#define TEGRA_AES_SECURE_CONFIG_EXT 0x110C
-#define TEGRA_AES_SECURE_SECURITY 0x1110
-#define TEGRA_AES_SECURE_HASH_RESULT0 0x1120
-#define TEGRA_AES_SECURE_HASH_RESULT1 0x1124
-#define TEGRA_AES_SECURE_HASH_RESULT2 0x1128
-#define TEGRA_AES_SECURE_HASH_RESULT3 0x112C
-#define TEGRA_AES_SECURE_SEC_SEL0 0x1140
-#define TEGRA_AES_SECURE_SEC_SEL1 0x1144
-#define TEGRA_AES_SECURE_SEC_SEL2 0x1148
-#define TEGRA_AES_SECURE_SEC_SEL3 0x114C
-#define TEGRA_AES_SECURE_SEC_SEL4 0x1150
-#define TEGRA_AES_SECURE_SEC_SEL5 0x1154
-#define TEGRA_AES_SECURE_SEC_SEL6 0x1158
-#define TEGRA_AES_SECURE_SEC_SEL7 0x115C
-
-/* interrupt status reg masks and shifts */
-#define TEGRA_AES_ENGINE_BUSY_FIELD BIT(0)
-#define TEGRA_AES_ICQ_EMPTY_FIELD BIT(3)
-#define TEGRA_AES_DMA_BUSY_FIELD BIT(23)
-
-/* secure select reg masks and shifts */
-#define TEGRA_AES_SECURE_SEL0_KEYREAD_ENB0_FIELD BIT(0)
-
-/* secure config ext masks and shifts */
-#define TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD BIT(15)
-
-/* secure config masks and shifts */
-#define TEGRA_AES_SECURE_KEY_INDEX_SHIFT 20
-#define TEGRA_AES_SECURE_KEY_INDEX_FIELD (0x1F << TEGRA_AES_SECURE_KEY_INDEX_SHIFT)
-#define TEGRA_AES_SECURE_BLOCK_CNT_SHIFT 0
-#define TEGRA_AES_SECURE_BLOCK_CNT_FIELD (0xFFFFF << TEGRA_AES_SECURE_BLOCK_CNT_SHIFT)
-
-/* stream interface select masks and shifts */
-#define TEGRA_AES_CMDQ_CTRL_UCMDQEN_FIELD BIT(0)
-#define TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD BIT(1)
-#define TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD BIT(4)
-#define TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD BIT(5)
-
-/* config register masks and shifts */
-#define TEGRA_AES_CONFIG_ENDIAN_ENB_FIELD BIT(10)
-#define TEGRA_AES_CONFIG_MODE_SEL_SHIFT 0
-#define TEGRA_AES_CONFIG_MODE_SEL_FIELD (0x1F << TEGRA_AES_CONFIG_MODE_SEL_SHIFT)
-
-/* extended config */
-#define TEGRA_AES_SECURE_OFFSET_CNT_SHIFT 24
-#define TEGRA_AES_SECURE_OFFSET_CNT_FIELD (0xFF << TEGRA_AES_SECURE_OFFSET_CNT_SHIFT)
-#define TEGRA_AES_SECURE_KEYSCHED_GEN_FIELD BIT(15)
-
-/* init vector select */
-#define TEGRA_AES_SECURE_IV_SELECT_SHIFT 10
-#define TEGRA_AES_SECURE_IV_SELECT_FIELD BIT(10)
-
-/* secure engine input */
-#define TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT 28
-#define TEGRA_AES_SECURE_INPUT_ALG_SEL_FIELD (0xF << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT)
-#define TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT 16
-#define TEGRA_AES_SECURE_INPUT_KEY_LEN_FIELD (0xFFF << TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT)
-#define TEGRA_AES_SECURE_RNG_ENB_FIELD BIT(11)
-#define TEGRA_AES_SECURE_CORE_SEL_SHIFT 9
-#define TEGRA_AES_SECURE_CORE_SEL_FIELD BIT(9)
-#define TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT 7
-#define TEGRA_AES_SECURE_VCTRAM_SEL_FIELD (0x3 << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT)
-#define TEGRA_AES_SECURE_INPUT_SEL_SHIFT 5
-#define TEGRA_AES_SECURE_INPUT_SEL_FIELD (0x3 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT)
-#define TEGRA_AES_SECURE_XOR_POS_SHIFT 3
-#define TEGRA_AES_SECURE_XOR_POS_FIELD (0x3 << TEGRA_AES_SECURE_XOR_POS_SHIFT)
-#define TEGRA_AES_SECURE_HASH_ENB_FIELD BIT(2)
-#define TEGRA_AES_SECURE_ON_THE_FLY_FIELD BIT(0)
-
-/* interrupt error mask */
-#define TEGRA_AES_INT_ERROR_MASK 0xFFF000
-
-#endif
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index a0b2f7e0eedb..2042ec3656ba 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -91,26 +91,35 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
*/
static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
{
- int lev, prev_lev;
+ int lev, prev_lev, ret = 0;
unsigned long cur_time;
- lev = devfreq_get_freq_level(devfreq, freq);
- if (lev < 0)
- return lev;
-
cur_time = jiffies;
- devfreq->time_in_state[lev] +=
+
+ prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
+ if (prev_lev < 0) {
+ ret = prev_lev;
+ goto out;
+ }
+
+ devfreq->time_in_state[prev_lev] +=
cur_time - devfreq->last_stat_updated;
- if (freq != devfreq->previous_freq) {
- prev_lev = devfreq_get_freq_level(devfreq,
- devfreq->previous_freq);
+
+ lev = devfreq_get_freq_level(devfreq, freq);
+ if (lev < 0) {
+ ret = lev;
+ goto out;
+ }
+
+ if (lev != prev_lev) {
devfreq->trans_table[(prev_lev *
devfreq->profile->max_state) + lev]++;
devfreq->total_trans++;
}
- devfreq->last_stat_updated = cur_time;
- return 0;
+out:
+ devfreq->last_stat_updated = cur_time;
+ return ret;
}
/**
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 605b016bcea4..ba06d1d2f99e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -308,7 +308,7 @@ config DMA_OMAP
config DMA_BCM2835
tristate "BCM2835 DMA engine support"
- depends on (ARCH_BCM2835 || MACH_BCM2708)
+ depends on ARCH_BCM2835
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -350,6 +350,16 @@ config MOXART_DMA
select DMA_VIRTUAL_CHANNELS
help
Enable support for the MOXA ART SoC DMA controller.
+
+config FSL_EDMA
+ tristate "Freescale eDMA engine support"
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the Freescale eDMA engine with programmable channel
+ multiplexing capability for DMA request sources(slot).
+ This module can be found on Freescale Vybrid and LS-1 SoCs.
config DMA_ENGINE
bool
@@ -401,4 +411,13 @@ config DMATEST
config DMA_ENGINE_RAID
bool
+config QCOM_BAM_DMA
+ tristate "QCOM BAM DMA support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the QCOM BAM DMA controller. This controller
+ provides DMA capabilities for a variety of on-chip devices.
+
endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a029d0f4a1be..5150c82c9caf 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -44,3 +44,5 @@ obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 1e506afa33f5..de361a156b34 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -13,6 +13,7 @@
*/
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -265,7 +266,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
*/
void devm_acpi_dma_controller_free(struct device *dev)
{
- WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL));
+ WARN_ON(devres_release(dev, devm_acpi_dma_release, NULL, NULL));
}
EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
@@ -343,7 +344,7 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
* @index: index of FixedDMA descriptor for @dev
*
* Return:
- * Pointer to appropriate dma channel on success or NULL on error.
+ * Pointer to appropriate dma channel on success or an error pointer.
*/
struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
size_t index)
@@ -358,10 +359,10 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
/* Check if the device was enumerated by ACPI */
if (!dev || !ACPI_HANDLE(dev))
- return NULL;
+ return ERR_PTR(-ENODEV);
if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
- return NULL;
+ return ERR_PTR(-ENODEV);
memset(&pdata, 0, sizeof(pdata));
pdata.index = index;
@@ -376,7 +377,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
acpi_dev_free_resource_list(&resource_list);
if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0)
- return NULL;
+ return ERR_PTR(-ENODEV);
mutex_lock(&acpi_dma_lock);
@@ -399,7 +400,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
}
mutex_unlock(&acpi_dma_lock);
- return chan;
+ return chan ? chan : ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
@@ -413,7 +414,7 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
* the first FixedDMA descriptor is TX and second is RX.
*
* Return:
- * Pointer to appropriate dma channel on success or NULL on error.
+ * Pointer to appropriate dma channel on success or an error pointer.
*/
struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
const char *name)
@@ -425,7 +426,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
else if (!strcmp(name, "rx"))
index = 1;
else
- return NULL;
+ return ERR_PTR(-ENODEV);
return acpi_dma_request_slave_chan_by_index(dev, index);
}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index e2c04dc81e2a..c13a3bb0f594 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1569,7 +1569,6 @@ static int at_dma_remove(struct platform_device *pdev)
/* Disable interrupts */
atc_disable_chan_irq(atdma, chan->chan_id);
- tasklet_disable(&atchan->tasklet);
tasklet_kill(&atchan->tasklet);
list_del(&chan->device_node);
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index c18aebf7d5aa..d028f36ae655 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -620,12 +620,15 @@ static int cppi41_stop_chan(struct dma_chan *chan)
u32 desc_phys;
int ret;
+ desc_phys = lower_32_bits(c->desc_phys);
+ desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+ if (!cdd->chan_busy[desc_num])
+ return 0;
+
ret = cppi41_tear_down_chan(c);
if (ret)
return ret;
- desc_phys = lower_32_bits(c->desc_phys);
- desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
WARN_ON(!cdd->chan_busy[desc_num]);
cdd->chan_busy[desc_num] = NULL;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ed610b497518..a886713937fd 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -627,18 +627,13 @@ EXPORT_SYMBOL_GPL(__dma_request_channel);
struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
const char *name)
{
- struct dma_chan *chan;
-
/* If device-tree is present get slave info from here */
if (dev->of_node)
return of_dma_request_slave_channel(dev->of_node, name);
/* If device was enumerated by ACPI get slave info from here */
- if (ACPI_HANDLE(dev)) {
- chan = acpi_dma_request_slave_chan_by_name(dev, name);
- if (chan)
- return chan;
- }
+ if (ACPI_HANDLE(dev))
+ return acpi_dma_request_slave_chan_by_name(dev, name);
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 05b6dea770a4..e27cec25c59e 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -340,7 +340,7 @@ static unsigned int min_odd(unsigned int x, unsigned int y)
static void result(const char *err, unsigned int n, unsigned int src_off,
unsigned int dst_off, unsigned int len, unsigned long data)
{
- pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
+ pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
current->comm, n, err, src_off, dst_off, len, data);
}
@@ -348,7 +348,7 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
unsigned int dst_off, unsigned int len,
unsigned long data)
{
- pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
+ pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
current->comm, n, err, src_off, dst_off, len, data);
}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 13ac3f240e79..cfdbb92aae1d 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -33,8 +33,8 @@
* of which use ARM any more). See the "Databook" from Synopsys for
* information beyond what licensees probably provide.
*
- * The driver has currently been tested only with the Atmel AT32AP7000,
- * which does not support descriptor writeback.
+ * The driver has been tested with the Atmel AT32AP7000, which does not
+ * support descriptor writeback.
*/
static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
@@ -1479,7 +1479,6 @@ static void dw_dma_off(struct dw_dma *dw)
int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
{
struct dw_dma *dw;
- size_t size;
bool autocfg;
unsigned int dw_params;
unsigned int nr_channels;
@@ -1487,6 +1486,13 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
int err;
int i;
+ dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ dw->regs = chip->regs;
+ chip->dw = dw;
+
dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
@@ -1509,9 +1515,9 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
else
nr_channels = pdata->nr_channels;
- size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
- dw = devm_kzalloc(chip->dev, size, GFP_KERNEL);
- if (!dw)
+ dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan),
+ GFP_KERNEL);
+ if (!dw->chan)
return -ENOMEM;
dw->clk = devm_clk_get(chip->dev, "hclk");
@@ -1519,9 +1525,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
return PTR_ERR(dw->clk);
clk_prepare_enable(dw->clk);
- dw->regs = chip->regs;
- chip->dw = dw;
-
/* Get hardware configuration parameters */
if (autocfg) {
max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index e89fc24b8293..fec59f1a77bb 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -75,6 +75,36 @@ static void dw_pci_remove(struct pci_dev *pdev)
dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
}
+#ifdef CONFIG_PM_SLEEP
+
+static int dw_pci_suspend_late(struct device *dev)
+{
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct dw_dma_chip *chip = pci_get_drvdata(pci);
+
+ return dw_dma_suspend(chip);
+};
+
+static int dw_pci_resume_early(struct device *dev)
+{
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct dw_dma_chip *chip = pci_get_drvdata(pci);
+
+ return dw_dma_resume(chip);
+};
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define dw_pci_suspend_late NULL
+#define dw_pci_resume_early NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops dw_pci_dev_pm_ops = {
+ .suspend_late = dw_pci_suspend_late,
+ .resume_early = dw_pci_resume_early,
+};
+
static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
/* Medfield */
{ PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata },
@@ -83,6 +113,9 @@ static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
/* BayTrail */
{ PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata },
{ PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata },
+
+ /* Haswell */
+ { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata },
{ }
};
MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
@@ -92,6 +125,9 @@ static struct pci_driver dw_pci_driver = {
.id_table = dw_pci_id_table,
.probe = dw_pci_probe,
.remove = dw_pci_remove,
+ .driver = {
+ .pm = &dw_pci_dev_pm_ops,
+ },
};
module_pci_driver(dw_pci_driver);
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index deb4274f80f4..bb98d3e91e8b 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -252,13 +252,13 @@ struct dw_dma {
struct tasklet_struct tasklet;
struct clk *clk;
+ /* channels */
+ struct dw_dma_chan *chan;
u8 all_chan_mask;
/* hardware configuration */
unsigned char nr_masters;
unsigned char data_width[4];
-
- struct dw_dma_chan chan[0];
};
static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index cd8da451d199..cd04eb7b182e 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -539,6 +539,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
edma_alloc_slot(EDMA_CTLR(echan->ch_num),
EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
+ kfree(edesc);
dev_err(dev, "Failed to allocate slot\n");
return NULL;
}
@@ -553,8 +554,10 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
dst_addr, burst, dev_width, period_len,
direction);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(edesc);
return NULL;
+ }
if (direction == DMA_DEV_TO_MEM)
dst_addr += period_len;
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
new file mode 100644
index 000000000000..381e793184ba
--- /dev/null
+++ b/drivers/dma/fsl-edma.c
@@ -0,0 +1,975 @@
+/*
+ * drivers/dma/fsl-edma.c
+ *
+ * Copyright 2013-2014 Freescale Semiconductor, Inc.
+ *
+ * Driver for the Freescale eDMA engine with flexible channel multiplexing
+ * capability for DMA request sources. The eDMA block can be found on some
+ * Vybrid and Layerscape SoCs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define EDMA_CR 0x00
+#define EDMA_ES 0x04
+#define EDMA_ERQ 0x0C
+#define EDMA_EEI 0x14
+#define EDMA_SERQ 0x1B
+#define EDMA_CERQ 0x1A
+#define EDMA_SEEI 0x19
+#define EDMA_CEEI 0x18
+#define EDMA_CINT 0x1F
+#define EDMA_CERR 0x1E
+#define EDMA_SSRT 0x1D
+#define EDMA_CDNE 0x1C
+#define EDMA_INTR 0x24
+#define EDMA_ERR 0x2C
+
+#define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
+#define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
+#define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
+#define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
+#define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
+#define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
+#define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
+#define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
+#define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
+#define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
+#define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
+#define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
+#define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
+
+#define EDMA_CR_EDBG BIT(1)
+#define EDMA_CR_ERCA BIT(2)
+#define EDMA_CR_ERGA BIT(3)
+#define EDMA_CR_HOE BIT(4)
+#define EDMA_CR_HALT BIT(5)
+#define EDMA_CR_CLM BIT(6)
+#define EDMA_CR_EMLM BIT(7)
+#define EDMA_CR_ECX BIT(16)
+#define EDMA_CR_CX BIT(17)
+
+#define EDMA_SEEI_SEEI(x) ((x) & 0x1F)
+#define EDMA_CEEI_CEEI(x) ((x) & 0x1F)
+#define EDMA_CINT_CINT(x) ((x) & 0x1F)
+#define EDMA_CERR_CERR(x) ((x) & 0x1F)
+
+#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
+#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
+#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
+#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
+#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
+#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
+#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
+#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
+#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
+#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
+#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
+#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
+#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
+#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
+
+#define EDMA_TCD_SOFF_SOFF(x) (x)
+#define EDMA_TCD_NBYTES_NBYTES(x) (x)
+#define EDMA_TCD_SLAST_SLAST(x) (x)
+#define EDMA_TCD_DADDR_DADDR(x) (x)
+#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
+#define EDMA_TCD_DOFF_DOFF(x) (x)
+#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
+#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
+
+#define EDMA_TCD_CSR_START BIT(0)
+#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
+#define EDMA_TCD_CSR_INT_HALF BIT(2)
+#define EDMA_TCD_CSR_D_REQ BIT(3)
+#define EDMA_TCD_CSR_E_SG BIT(4)
+#define EDMA_TCD_CSR_E_LINK BIT(5)
+#define EDMA_TCD_CSR_ACTIVE BIT(6)
+#define EDMA_TCD_CSR_DONE BIT(7)
+
+#define EDMAMUX_CHCFG_DIS 0x0
+#define EDMAMUX_CHCFG_ENBL 0x80
+#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
+
+#define DMAMUX_NR 2
+
+#define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+struct fsl_edma_hw_tcd {
+ u32 saddr;
+ u16 soff;
+ u16 attr;
+ u32 nbytes;
+ u32 slast;
+ u32 daddr;
+ u16 doff;
+ u16 citer;
+ u32 dlast_sga;
+ u16 csr;
+ u16 biter;
+};
+
+struct fsl_edma_sw_tcd {
+ dma_addr_t ptcd;
+ struct fsl_edma_hw_tcd *vtcd;
+};
+
+struct fsl_edma_slave_config {
+ enum dma_transfer_direction dir;
+ enum dma_slave_buswidth addr_width;
+ u32 dev_addr;
+ u32 burst;
+ u32 attr;
+};
+
+struct fsl_edma_chan {
+ struct virt_dma_chan vchan;
+ enum dma_status status;
+ struct fsl_edma_engine *edma;
+ struct fsl_edma_desc *edesc;
+ struct fsl_edma_slave_config fsc;
+ struct dma_pool *tcd_pool;
+};
+
+struct fsl_edma_desc {
+ struct virt_dma_desc vdesc;
+ struct fsl_edma_chan *echan;
+ bool iscyclic;
+ unsigned int n_tcds;
+ struct fsl_edma_sw_tcd tcd[];
+};
+
+struct fsl_edma_engine {
+ struct dma_device dma_dev;
+ void __iomem *membase;
+ void __iomem *muxbase[DMAMUX_NR];
+ struct clk *muxclk[DMAMUX_NR];
+ struct mutex fsl_edma_mutex;
+ u32 n_chans;
+ int txirq;
+ int errirq;
+ bool big_endian;
+ struct fsl_edma_chan chans[];
+};
+
+/*
+ * R/W functions for big- or little-endian registers
+ * the eDMA controller's endian is independent of the CPU core's endian.
+ */
+
+static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+ if (edma->big_endian)
+ return ioread16be(addr);
+ else
+ return ioread16(addr);
+}
+
+static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+ if (edma->big_endian)
+ return ioread32be(addr);
+ else
+ return ioread32(addr);
+}
+
+static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
+{
+ iowrite8(val, addr);
+}
+
+static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
+{
+ if (edma->big_endian)
+ iowrite16be(val, addr);
+ else
+ iowrite16(val, addr);
+}
+
+static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
+{
+ if (edma->big_endian)
+ iowrite32be(val, addr);
+ else
+ iowrite32(val, addr);
+}
+
+static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct fsl_edma_chan, vchan.chan);
+}
+
+static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct fsl_edma_desc, vdesc);
+}
+
+static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
+{
+ void __iomem *addr = fsl_chan->edma->membase;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
+ edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
+}
+
+static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
+{
+ void __iomem *addr = fsl_chan->edma->membase;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
+ edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
+}
+
+static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
+ unsigned int slot, bool enable)
+{
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+ void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR];
+ unsigned chans_per_mux, ch_off;
+
+ chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
+ ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+
+ if (enable)
+ edma_writeb(fsl_chan->edma,
+ EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot),
+ muxaddr + ch_off);
+ else
+ edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
+}
+
+static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
+{
+ switch (addr_width) {
+ case 1:
+ return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
+ case 2:
+ return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
+ case 4:
+ return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+ case 8:
+ return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
+ default:
+ return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+ }
+}
+
+static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct fsl_edma_desc *fsl_desc;
+ int i;
+
+ fsl_desc = to_fsl_edma_desc(vdesc);
+ for (i = 0; i < fsl_desc->n_tcds; i++)
+ dma_pool_free(fsl_desc->echan->tcd_pool,
+ fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
+ kfree(fsl_desc);
+}
+
+static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct dma_slave_config *cfg = (void *)arg;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_edma_disable_request(fsl_chan);
+ fsl_chan->edesc = NULL;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ return 0;
+
+ case DMA_SLAVE_CONFIG:
+ fsl_chan->fsc.dir = cfg->direction;
+ if (cfg->direction == DMA_DEV_TO_MEM) {
+ fsl_chan->fsc.dev_addr = cfg->src_addr;
+ fsl_chan->fsc.addr_width = cfg->src_addr_width;
+ fsl_chan->fsc.burst = cfg->src_maxburst;
+ fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
+ } else if (cfg->direction == DMA_MEM_TO_DEV) {
+ fsl_chan->fsc.dev_addr = cfg->dst_addr;
+ fsl_chan->fsc.addr_width = cfg->dst_addr_width;
+ fsl_chan->fsc.burst = cfg->dst_maxburst;
+ fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+
+ case DMA_PAUSE:
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->edesc) {
+ fsl_edma_disable_request(fsl_chan);
+ fsl_chan->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ return 0;
+
+ case DMA_RESUME:
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->edesc) {
+ fsl_edma_enable_request(fsl_chan);
+ fsl_chan->status = DMA_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ return 0;
+
+ default:
+ return -ENXIO;
+ }
+}
+
+static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
+ struct virt_dma_desc *vdesc, bool in_progress)
+{
+ struct fsl_edma_desc *edesc = fsl_chan->edesc;
+ void __iomem *addr = fsl_chan->edma->membase;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+ enum dma_transfer_direction dir = fsl_chan->fsc.dir;
+ dma_addr_t cur_addr, dma_addr;
+ size_t len, size;
+ int i;
+
+ /* calculate the total size in this desc */
+ for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
+ len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
+ * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
+
+ if (!in_progress)
+ return len;
+
+ if (dir == DMA_MEM_TO_DEV)
+ cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
+ else
+ cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
+
+ /* figure out the finished and calculate the residue */
+ for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
+ size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
+ * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
+ if (dir == DMA_MEM_TO_DEV)
+ dma_addr = edma_readl(fsl_chan->edma,
+ &(edesc->tcd[i].vtcd->saddr));
+ else
+ dma_addr = edma_readl(fsl_chan->edma,
+ &(edesc->tcd[i].vtcd->daddr));
+
+ len -= size;
+ if (cur_addr > dma_addr && cur_addr < dma_addr + size) {
+ len += dma_addr + size - cur_addr;
+ break;
+ }
+ }
+
+ return len;
+}
+
+static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ if (status == DMA_COMPLETE)
+ return status;
+
+ if (!txstate)
+ return fsl_chan->status;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
+ if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
+ txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
+ else if (vdesc)
+ txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
+ else
+ txstate->residue = 0;
+
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ return fsl_chan->status;
+}
+
+static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan,
+ u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes,
+ u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga,
+ u16 csr)
+{
+ void __iomem *addr = fsl_chan->edma->membase;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ /*
+ * TCD parameters have been swapped in fill_tcd_params(),
+ * so just write them to registers in the cpu endian here
+ */
+ writew(0, addr + EDMA_TCD_CSR(ch));
+ writel(src, addr + EDMA_TCD_SADDR(ch));
+ writel(dst, addr + EDMA_TCD_DADDR(ch));
+ writew(attr, addr + EDMA_TCD_ATTR(ch));
+ writew(soff, addr + EDMA_TCD_SOFF(ch));
+ writel(nbytes, addr + EDMA_TCD_NBYTES(ch));
+ writel(slast, addr + EDMA_TCD_SLAST(ch));
+ writew(citer, addr + EDMA_TCD_CITER(ch));
+ writew(biter, addr + EDMA_TCD_BITER(ch));
+ writew(doff, addr + EDMA_TCD_DOFF(ch));
+ writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch));
+ writew(csr, addr + EDMA_TCD_CSR(ch));
+}
+
+static void fill_tcd_params(struct fsl_edma_engine *edma,
+ struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+ u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
+ u16 biter, u16 doff, u32 dlast_sga, bool major_int,
+ bool disable_req, bool enable_sg)
+{
+ u16 csr = 0;
+
+ /*
+ * eDMA hardware SGs require the TCD parameters stored in memory
+ * the same endian as the eDMA module so that they can be loaded
+ * automatically by the engine
+ */
+ edma_writel(edma, src, &(tcd->saddr));
+ edma_writel(edma, dst, &(tcd->daddr));
+ edma_writew(edma, attr, &(tcd->attr));
+ edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff));
+ edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes));
+ edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast));
+ edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer));
+ edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff));
+ edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga));
+ edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter));
+ if (major_int)
+ csr |= EDMA_TCD_CSR_INT_MAJOR;
+
+ if (disable_req)
+ csr |= EDMA_TCD_CSR_D_REQ;
+
+ if (enable_sg)
+ csr |= EDMA_TCD_CSR_E_SG;
+
+ edma_writew(edma, csr, &(tcd->csr));
+}
+
+static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
+ int sg_len)
+{
+ struct fsl_edma_desc *fsl_desc;
+ int i;
+
+ fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
+ GFP_NOWAIT);
+ if (!fsl_desc)
+ return NULL;
+
+ fsl_desc->echan = fsl_chan;
+ fsl_desc->n_tcds = sg_len;
+ for (i = 0; i < sg_len; i++) {
+ fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
+ GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
+ if (!fsl_desc->tcd[i].vtcd)
+ goto err;
+ }
+ return fsl_desc;
+
+err:
+ while (--i >= 0)
+ dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
+ kfree(fsl_desc);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct fsl_edma_desc *fsl_desc;
+ dma_addr_t dma_buf_next;
+ int sg_len, i;
+ u32 src_addr, dst_addr, last_sg, nbytes;
+ u16 soff, doff, iter;
+
+ if (!is_slave_direction(fsl_chan->fsc.dir))
+ return NULL;
+
+ sg_len = buf_len / period_len;
+ fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = true;
+
+ dma_buf_next = dma_addr;
+ nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
+ iter = period_len / nbytes;
+
+ for (i = 0; i < sg_len; i++) {
+ if (dma_buf_next >= dma_addr + buf_len)
+ dma_buf_next = dma_addr;
+
+ /* get next sg's physical address */
+ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+ if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+ src_addr = dma_buf_next;
+ dst_addr = fsl_chan->fsc.dev_addr;
+ soff = fsl_chan->fsc.addr_width;
+ doff = 0;
+ } else {
+ src_addr = fsl_chan->fsc.dev_addr;
+ dst_addr = dma_buf_next;
+ soff = 0;
+ doff = fsl_chan->fsc.addr_width;
+ }
+
+ fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr,
+ dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
+ iter, iter, doff, last_sg, true, false, true);
+ dma_buf_next += period_len;
+ }
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct fsl_edma_desc *fsl_desc;
+ struct scatterlist *sg;
+ u32 src_addr, dst_addr, last_sg, nbytes;
+ u16 soff, doff, iter;
+ int i;
+
+ if (!is_slave_direction(fsl_chan->fsc.dir))
+ return NULL;
+
+ fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = false;
+
+ nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
+ for_each_sg(sgl, sg, sg_len, i) {
+ /* get next sg's physical address */
+ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+ if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+ src_addr = sg_dma_address(sg);
+ dst_addr = fsl_chan->fsc.dev_addr;
+ soff = fsl_chan->fsc.addr_width;
+ doff = 0;
+ } else {
+ src_addr = fsl_chan->fsc.dev_addr;
+ dst_addr = sg_dma_address(sg);
+ soff = 0;
+ doff = fsl_chan->fsc.addr_width;
+ }
+
+ iter = sg_dma_len(sg) / nbytes;
+ if (i < sg_len - 1) {
+ last_sg = fsl_desc->tcd[(i + 1)].ptcd;
+ fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
+ src_addr, dst_addr, fsl_chan->fsc.attr,
+ soff, nbytes, 0, iter, iter, doff, last_sg,
+ false, false, true);
+ } else {
+ last_sg = 0;
+ fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
+ src_addr, dst_addr, fsl_chan->fsc.attr,
+ soff, nbytes, 0, iter, iter, doff, last_sg,
+ true, true, false);
+ }
+ }
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+
+static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
+{
+ struct fsl_edma_hw_tcd *tcd;
+ struct virt_dma_desc *vdesc;
+
+ vdesc = vchan_next_desc(&fsl_chan->vchan);
+ if (!vdesc)
+ return;
+ fsl_chan->edesc = to_fsl_edma_desc(vdesc);
+ tcd = fsl_chan->edesc->tcd[0].vtcd;
+ fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr,
+ tcd->soff, tcd->nbytes, tcd->slast, tcd->citer,
+ tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr);
+ fsl_edma_enable_request(fsl_chan);
+ fsl_chan->status = DMA_IN_PROGRESS;
+}
+
+static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ unsigned int intr, ch;
+ void __iomem *base_addr;
+ struct fsl_edma_chan *fsl_chan;
+
+ base_addr = fsl_edma->membase;
+
+ intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
+ if (!intr)
+ return IRQ_NONE;
+
+ for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+ if (intr & (0x1 << ch)) {
+ edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
+ base_addr + EDMA_CINT);
+
+ fsl_chan = &fsl_edma->chans[ch];
+
+ spin_lock(&fsl_chan->vchan.lock);
+ if (!fsl_chan->edesc->iscyclic) {
+ list_del(&fsl_chan->edesc->vdesc.node);
+ vchan_cookie_complete(&fsl_chan->edesc->vdesc);
+ fsl_chan->edesc = NULL;
+ fsl_chan->status = DMA_COMPLETE;
+ } else {
+ vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
+ }
+
+ if (!fsl_chan->edesc)
+ fsl_edma_xfer_desc(fsl_chan);
+
+ spin_unlock(&fsl_chan->vchan.lock);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ unsigned int err, ch;
+
+ err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
+ if (!err)
+ return IRQ_NONE;
+
+ for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+ if (err & (0x1 << ch)) {
+ fsl_edma_disable_request(&fsl_edma->chans[ch]);
+ edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
+ fsl_edma->membase + EDMA_CERR);
+ fsl_edma->chans[ch].status = DMA_ERROR;
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
+{
+ if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
+ return IRQ_HANDLED;
+
+ return fsl_edma_err_handler(irq, dev_id);
+}
+
+static void fsl_edma_issue_pending(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+
+ if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
+ fsl_edma_xfer_desc(fsl_chan);
+
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+}
+
+static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
+ struct dma_chan *chan, *_chan;
+
+ if (dma_spec->args_count != 2)
+ return NULL;
+
+ mutex_lock(&fsl_edma->fsl_edma_mutex);
+ list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
+ if (chan->client_count)
+ continue;
+ if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) {
+ chan = dma_get_slave_channel(chan);
+ if (chan) {
+ chan->device->privatecnt++;
+ fsl_edma_chan_mux(to_fsl_edma_chan(chan),
+ dma_spec->args[1], true);
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return chan;
+ }
+ }
+ }
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return NULL;
+}
+
+static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+ fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
+ sizeof(struct fsl_edma_hw_tcd),
+ 32, 0);
+ return 0;
+}
+
+static void fsl_edma_free_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_edma_disable_request(fsl_chan);
+ fsl_edma_chan_mux(fsl_chan, 0, false);
+ fsl_chan->edesc = NULL;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ dma_pool_destroy(fsl_chan->tcd_pool);
+ fsl_chan->tcd_pool = NULL;
+}
+
+static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
+ caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = true;
+ caps->cmd_terminate = true;
+
+ return 0;
+}
+
+static int
+fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+ int ret;
+
+ fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
+ if (fsl_edma->txirq < 0) {
+ dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
+ return fsl_edma->txirq;
+ }
+
+ fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
+ if (fsl_edma->errirq < 0) {
+ dev_err(&pdev->dev, "Can't get edma-err irq.\n");
+ return fsl_edma->errirq;
+ }
+
+ if (fsl_edma->txirq == fsl_edma->errirq) {
+ ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
+ fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
+ return ret;
+ }
+ } else {
+ ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
+ fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
+ fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int fsl_edma_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct fsl_edma_engine *fsl_edma;
+ struct fsl_edma_chan *fsl_chan;
+ struct resource *res;
+ int len, chans;
+ int ret, i;
+
+ ret = of_property_read_u32(np, "dma-channels", &chans);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get dma-channels.\n");
+ return ret;
+ }
+
+ len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
+ fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!fsl_edma)
+ return -ENOMEM;
+
+ fsl_edma->n_chans = chans;
+ mutex_init(&fsl_edma->fsl_edma_mutex);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_edma->membase))
+ return PTR_ERR(fsl_edma->membase);
+
+ for (i = 0; i < DMAMUX_NR; i++) {
+ char clkname[32];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
+ fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_edma->muxbase[i]))
+ return PTR_ERR(fsl_edma->muxbase[i]);
+
+ sprintf(clkname, "dmamux%d", i);
+ fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
+ if (IS_ERR(fsl_edma->muxclk[i])) {
+ dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
+ return PTR_ERR(fsl_edma->muxclk[i]);
+ }
+
+ ret = clk_prepare_enable(fsl_edma->muxclk[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
+ return ret;
+ }
+
+ }
+
+ ret = fsl_edma_irq_init(pdev, fsl_edma);
+ if (ret)
+ return ret;
+
+ fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
+
+ INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+
+ fsl_chan->edma = fsl_edma;
+
+ fsl_chan->vchan.desc_free = fsl_edma_free_desc;
+ vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
+
+ edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+ fsl_edma_chan_mux(fsl_chan, 0, false);
+ }
+
+ dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
+ dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
+
+ fsl_edma->dma_dev.dev = &pdev->dev;
+ fsl_edma->dma_dev.device_alloc_chan_resources
+ = fsl_edma_alloc_chan_resources;
+ fsl_edma->dma_dev.device_free_chan_resources
+ = fsl_edma_free_chan_resources;
+ fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
+ fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
+ fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
+ fsl_edma->dma_dev.device_control = fsl_edma_control;
+ fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
+ fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps;
+
+ platform_set_drvdata(pdev, fsl_edma);
+
+ ret = dma_async_device_register(&fsl_edma->dma_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
+ dma_async_device_unregister(&fsl_edma->dma_dev);
+ return ret;
+ }
+
+ /* enable round robin arbitration */
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
+
+ return 0;
+}
+
+static int fsl_edma_remove(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
+ int i;
+
+ of_dma_controller_free(np);
+ dma_async_device_unregister(&fsl_edma->dma_dev);
+
+ for (i = 0; i < DMAMUX_NR; i++)
+ clk_disable_unprepare(fsl_edma->muxclk[i]);
+
+ return 0;
+}
+
+static const struct of_device_id fsl_edma_dt_ids[] = {
+ { .compatible = "fsl,vf610-edma", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+
+static struct platform_driver fsl_edma_driver = {
+ .driver = {
+ .name = "fsl-edma",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_edma_dt_ids,
+ },
+ .probe = fsl_edma_probe,
+ .remove = fsl_edma_remove,
+};
+
+module_platform_driver(fsl_edma_driver);
+
+MODULE_ALIAS("platform:fsl-edma");
+MODULE_DESCRIPTION("Freescale eDMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 6f9ac2022abd..286660a12cc6 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -422,12 +422,12 @@ static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
/* Tasklet error handler */
tasklet_schedule(&imxdma->channel[i].dma_tasklet);
- printk(KERN_WARNING
- "DMA timeout on channel %d -%s%s%s%s\n", i,
- errcode & IMX_DMA_ERR_BURST ? " burst" : "",
- errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
- errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
- errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
+ dev_warn(imxdma->dev,
+ "DMA timeout on channel %d -%s%s%s%s\n", i,
+ errcode & IMX_DMA_ERR_BURST ? " burst" : "",
+ errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
+ errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
+ errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
}
return IRQ_HANDLED;
}
@@ -1236,6 +1236,7 @@ static int imxdma_remove(struct platform_device *pdev)
static struct platform_driver imxdma_driver = {
.driver = {
.name = "imx-dma",
+ .owner = THIS_MODULE,
.of_match_table = imx_dma_of_dev_id,
},
.id_table = imx_dma_devtype,
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index b439679f4126..bf02e7beb51a 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -867,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
phy->base = pdev->base;
if (irq) {
- ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0,
- "pdma", phy);
+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
+ IRQF_SHARED, "pdma", phy);
if (ret) {
dev_err(pdev->dev, "channel request irq fail!\n");
return ret;
@@ -957,8 +957,8 @@ static int mmp_pdma_probe(struct platform_device *op)
if (irq_num != dma_channels) {
/* all chan share one irq, demux inside */
irq = platform_get_irq(op, 0);
- ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0,
- "pdma", pdev);
+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
+ IRQF_SHARED, "pdma", pdev);
if (ret)
return ret;
}
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 33f96aaa80c7..724f7f4c9720 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -22,6 +22,7 @@
#include <mach/regs-icu.h>
#include <linux/platform_data/dma-mmp_tdma.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include "dmaengine.h"
@@ -541,6 +542,45 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
return 0;
}
+struct mmp_tdma_filter_param {
+ struct device_node *of_node;
+ unsigned int chan_id;
+};
+
+static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
+{
+ struct mmp_tdma_filter_param *param = fn_param;
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+ struct dma_device *pdma_device = tdmac->chan.device;
+
+ if (pdma_device->dev->of_node != param->of_node)
+ return false;
+
+ if (chan->chan_id != param->chan_id)
+ return false;
+
+ return true;
+}
+
+struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct mmp_tdma_device *tdev = ofdma->of_dma_data;
+ dma_cap_mask_t mask = tdev->device.cap_mask;
+ struct mmp_tdma_filter_param param;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ param.of_node = ofdma->of_node;
+ param.chan_id = dma_spec->args[0];
+
+ if (param.chan_id >= TDMA_CHANNEL_NUM)
+ return NULL;
+
+ return dma_request_channel(mask, mmp_tdma_filter_fn, &param);
+}
+
static struct of_device_id mmp_tdma_dt_ids[] = {
{ .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
{ .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
@@ -631,6 +671,16 @@ static int mmp_tdma_probe(struct platform_device *pdev)
return ret;
}
+ if (pdev->dev.of_node) {
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ mmp_tdma_xlate, tdev);
+ if (ret) {
+ dev_err(tdev->device.dev,
+ "failed to register controller\n");
+ dma_async_device_unregister(&tdev->device);
+ }
+ }
+
dev_info(tdev->device.dev, "initialized\n");
return 0;
}
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 362e7c49f2e1..b19f04f4390b 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
@@ -26,11 +27,21 @@ struct omap_dmadev {
spinlock_t lock;
struct tasklet_struct task;
struct list_head pending;
+ void __iomem *base;
+ const struct omap_dma_reg *reg_map;
+ struct omap_system_dma_plat_info *plat;
+ bool legacy;
+ spinlock_t irq_lock;
+ uint32_t irq_enable_mask;
+ struct omap_chan *lch_map[32];
};
struct omap_chan {
struct virt_dma_chan vc;
struct list_head node;
+ void __iomem *channel_base;
+ const struct omap_dma_reg *reg_map;
+ uint32_t ccr;
struct dma_slave_config cfg;
unsigned dma_sig;
@@ -54,19 +65,93 @@ struct omap_desc {
dma_addr_t dev_addr;
int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
- uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
- uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
- uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
- uint8_t periph_port; /* Peripheral port */
+ uint8_t es; /* CSDP_DATA_TYPE_xxx */
+ uint32_t ccr; /* CCR value */
+ uint16_t clnk_ctrl; /* CLNK_CTRL value */
+ uint16_t cicr; /* CICR value */
+ uint32_t csdp; /* CSDP value */
unsigned sglen;
struct omap_sg sg[0];
};
+enum {
+ CCR_FS = BIT(5),
+ CCR_READ_PRIORITY = BIT(6),
+ CCR_ENABLE = BIT(7),
+ CCR_AUTO_INIT = BIT(8), /* OMAP1 only */
+ CCR_REPEAT = BIT(9), /* OMAP1 only */
+ CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */
+ CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */
+ CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */
+ CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */
+ CCR_SRC_AMODE_CONSTANT = 0 << 12,
+ CCR_SRC_AMODE_POSTINC = 1 << 12,
+ CCR_SRC_AMODE_SGLIDX = 2 << 12,
+ CCR_SRC_AMODE_DBLIDX = 3 << 12,
+ CCR_DST_AMODE_CONSTANT = 0 << 14,
+ CCR_DST_AMODE_POSTINC = 1 << 14,
+ CCR_DST_AMODE_SGLIDX = 2 << 14,
+ CCR_DST_AMODE_DBLIDX = 3 << 14,
+ CCR_CONSTANT_FILL = BIT(16),
+ CCR_TRANSPARENT_COPY = BIT(17),
+ CCR_BS = BIT(18),
+ CCR_SUPERVISOR = BIT(22),
+ CCR_PREFETCH = BIT(23),
+ CCR_TRIGGER_SRC = BIT(24),
+ CCR_BUFFERING_DISABLE = BIT(25),
+ CCR_WRITE_PRIORITY = BIT(26),
+ CCR_SYNC_ELEMENT = 0,
+ CCR_SYNC_FRAME = CCR_FS,
+ CCR_SYNC_BLOCK = CCR_BS,
+ CCR_SYNC_PACKET = CCR_BS | CCR_FS,
+
+ CSDP_DATA_TYPE_8 = 0,
+ CSDP_DATA_TYPE_16 = 1,
+ CSDP_DATA_TYPE_32 = 2,
+ CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */
+ CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */
+ CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */
+ CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */
+ CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */
+ CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */
+ CSDP_SRC_PACKED = BIT(6),
+ CSDP_SRC_BURST_1 = 0 << 7,
+ CSDP_SRC_BURST_16 = 1 << 7,
+ CSDP_SRC_BURST_32 = 2 << 7,
+ CSDP_SRC_BURST_64 = 3 << 7,
+ CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */
+ CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */
+ CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */
+ CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */
+ CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */
+ CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */
+ CSDP_DST_PACKED = BIT(13),
+ CSDP_DST_BURST_1 = 0 << 14,
+ CSDP_DST_BURST_16 = 1 << 14,
+ CSDP_DST_BURST_32 = 2 << 14,
+ CSDP_DST_BURST_64 = 3 << 14,
+
+ CICR_TOUT_IE = BIT(0), /* OMAP1 only */
+ CICR_DROP_IE = BIT(1),
+ CICR_HALF_IE = BIT(2),
+ CICR_FRAME_IE = BIT(3),
+ CICR_LAST_IE = BIT(4),
+ CICR_BLOCK_IE = BIT(5),
+ CICR_PKT_IE = BIT(7), /* OMAP2+ only */
+ CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */
+ CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */
+ CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */
+ CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */
+ CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */
+
+ CLNK_CTRL_ENABLE_LNK = BIT(15),
+};
+
static const unsigned es_bytes[] = {
- [OMAP_DMA_DATA_TYPE_S8] = 1,
- [OMAP_DMA_DATA_TYPE_S16] = 2,
- [OMAP_DMA_DATA_TYPE_S32] = 4,
+ [CSDP_DATA_TYPE_8] = 1,
+ [CSDP_DATA_TYPE_16] = 2,
+ [CSDP_DATA_TYPE_32] = 4,
};
static struct of_dma_filter_info omap_dma_info = {
@@ -93,28 +178,214 @@ static void omap_dma_desc_free(struct virt_dma_desc *vd)
kfree(container_of(vd, struct omap_desc, vd));
}
+static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
+{
+ switch (type) {
+ case OMAP_DMA_REG_16BIT:
+ writew_relaxed(val, addr);
+ break;
+ case OMAP_DMA_REG_2X16BIT:
+ writew_relaxed(val, addr);
+ writew_relaxed(val >> 16, addr + 2);
+ break;
+ case OMAP_DMA_REG_32BIT:
+ writel_relaxed(val, addr);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static unsigned omap_dma_read(unsigned type, void __iomem *addr)
+{
+ unsigned val;
+
+ switch (type) {
+ case OMAP_DMA_REG_16BIT:
+ val = readw_relaxed(addr);
+ break;
+ case OMAP_DMA_REG_2X16BIT:
+ val = readw_relaxed(addr);
+ val |= readw_relaxed(addr + 2) << 16;
+ break;
+ case OMAP_DMA_REG_32BIT:
+ val = readl_relaxed(addr);
+ break;
+ default:
+ WARN_ON(1);
+ val = 0;
+ }
+
+ return val;
+}
+
+static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
+{
+ const struct omap_dma_reg *r = od->reg_map + reg;
+
+ WARN_ON(r->stride);
+
+ omap_dma_write(val, r->type, od->base + r->offset);
+}
+
+static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
+{
+ const struct omap_dma_reg *r = od->reg_map + reg;
+
+ WARN_ON(r->stride);
+
+ return omap_dma_read(r->type, od->base + r->offset);
+}
+
+static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
+{
+ const struct omap_dma_reg *r = c->reg_map + reg;
+
+ omap_dma_write(val, r->type, c->channel_base + r->offset);
+}
+
+static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
+{
+ const struct omap_dma_reg *r = c->reg_map + reg;
+
+ return omap_dma_read(r->type, c->channel_base + r->offset);
+}
+
+static void omap_dma_clear_csr(struct omap_chan *c)
+{
+ if (dma_omap1())
+ omap_dma_chan_read(c, CSR);
+ else
+ omap_dma_chan_write(c, CSR, ~0);
+}
+
+static unsigned omap_dma_get_csr(struct omap_chan *c)
+{
+ unsigned val = omap_dma_chan_read(c, CSR);
+
+ if (!dma_omap1())
+ omap_dma_chan_write(c, CSR, val);
+
+ return val;
+}
+
+static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
+ unsigned lch)
+{
+ c->channel_base = od->base + od->plat->channel_stride * lch;
+
+ od->lch_map[lch] = c;
+}
+
+static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
+{
+ struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+
+ if (__dma_omap15xx(od->plat->dma_attr))
+ omap_dma_chan_write(c, CPC, 0);
+ else
+ omap_dma_chan_write(c, CDAC, 0);
+
+ omap_dma_clear_csr(c);
+
+ /* Enable interrupts */
+ omap_dma_chan_write(c, CICR, d->cicr);
+
+ /* Enable channel */
+ omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
+}
+
+static void omap_dma_stop(struct omap_chan *c)
+{
+ struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+ uint32_t val;
+
+ /* disable irq */
+ omap_dma_chan_write(c, CICR, 0);
+
+ omap_dma_clear_csr(c);
+
+ val = omap_dma_chan_read(c, CCR);
+ if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
+ uint32_t sysconfig;
+ unsigned i;
+
+ sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
+ val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
+ val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
+ omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
+
+ val = omap_dma_chan_read(c, CCR);
+ val &= ~CCR_ENABLE;
+ omap_dma_chan_write(c, CCR, val);
+
+ /* Wait for sDMA FIFO to drain */
+ for (i = 0; ; i++) {
+ val = omap_dma_chan_read(c, CCR);
+ if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
+ break;
+
+ if (i > 100)
+ break;
+
+ udelay(5);
+ }
+
+ if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
+ dev_err(c->vc.chan.device->dev,
+ "DMA drain did not complete on lch %d\n",
+ c->dma_ch);
+
+ omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
+ } else {
+ val &= ~CCR_ENABLE;
+ omap_dma_chan_write(c, CCR, val);
+ }
+
+ mb();
+
+ if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
+ val = omap_dma_chan_read(c, CLNK_CTRL);
+
+ if (dma_omap1())
+ val |= 1 << 14; /* set the STOP_LNK bit */
+ else
+ val &= ~CLNK_CTRL_ENABLE_LNK;
+
+ omap_dma_chan_write(c, CLNK_CTRL, val);
+ }
+}
+
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
unsigned idx)
{
struct omap_sg *sg = d->sg + idx;
+ unsigned cxsa, cxei, cxfi;
- if (d->dir == DMA_DEV_TO_MEM)
- omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
- OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
- else
- omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
- OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
+ if (d->dir == DMA_DEV_TO_MEM) {
+ cxsa = CDSA;
+ cxei = CDEI;
+ cxfi = CDFI;
+ } else {
+ cxsa = CSSA;
+ cxei = CSEI;
+ cxfi = CSFI;
+ }
- omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
- d->sync_mode, c->dma_sig, d->sync_type);
+ omap_dma_chan_write(c, cxsa, sg->addr);
+ omap_dma_chan_write(c, cxei, 0);
+ omap_dma_chan_write(c, cxfi, 0);
+ omap_dma_chan_write(c, CEN, sg->en);
+ omap_dma_chan_write(c, CFN, sg->fn);
- omap_start_dma(c->dma_ch);
+ omap_dma_start(c, d);
}
static void omap_dma_start_desc(struct omap_chan *c)
{
struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
struct omap_desc *d;
+ unsigned cxsa, cxei, cxfi;
if (!vd) {
c->desc = NULL;
@@ -126,12 +397,32 @@ static void omap_dma_start_desc(struct omap_chan *c)
c->desc = d = to_omap_dma_desc(&vd->tx);
c->sgidx = 0;
- if (d->dir == DMA_DEV_TO_MEM)
- omap_set_dma_src_params(c->dma_ch, d->periph_port,
- OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
- else
- omap_set_dma_dest_params(c->dma_ch, d->periph_port,
- OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
+ /*
+ * This provides the necessary barrier to ensure data held in
+ * DMA coherent memory is visible to the DMA engine prior to
+ * the transfer starting.
+ */
+ mb();
+
+ omap_dma_chan_write(c, CCR, d->ccr);
+ if (dma_omap1())
+ omap_dma_chan_write(c, CCR2, d->ccr >> 16);
+
+ if (d->dir == DMA_DEV_TO_MEM) {
+ cxsa = CSSA;
+ cxei = CSEI;
+ cxfi = CSFI;
+ } else {
+ cxsa = CDSA;
+ cxei = CDEI;
+ cxfi = CDFI;
+ }
+
+ omap_dma_chan_write(c, cxsa, d->dev_addr);
+ omap_dma_chan_write(c, cxei, 0);
+ omap_dma_chan_write(c, cxfi, d->fi);
+ omap_dma_chan_write(c, CSDP, d->csdp);
+ omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
omap_dma_start_sg(c, d, 0);
}
@@ -186,24 +477,118 @@ static void omap_dma_sched(unsigned long data)
}
}
+static irqreturn_t omap_dma_irq(int irq, void *devid)
+{
+ struct omap_dmadev *od = devid;
+ unsigned status, channel;
+
+ spin_lock(&od->irq_lock);
+
+ status = omap_dma_glbl_read(od, IRQSTATUS_L1);
+ status &= od->irq_enable_mask;
+ if (status == 0) {
+ spin_unlock(&od->irq_lock);
+ return IRQ_NONE;
+ }
+
+ while ((channel = ffs(status)) != 0) {
+ unsigned mask, csr;
+ struct omap_chan *c;
+
+ channel -= 1;
+ mask = BIT(channel);
+ status &= ~mask;
+
+ c = od->lch_map[channel];
+ if (c == NULL) {
+ /* This should never happen */
+ dev_err(od->ddev.dev, "invalid channel %u\n", channel);
+ continue;
+ }
+
+ csr = omap_dma_get_csr(c);
+ omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
+
+ omap_dma_callback(channel, csr, c);
+ }
+
+ spin_unlock(&od->irq_lock);
+
+ return IRQ_HANDLED;
+}
+
static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
{
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
struct omap_chan *c = to_omap_dma_chan(chan);
+ int ret;
- dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+ if (od->legacy) {
+ ret = omap_request_dma(c->dma_sig, "DMA engine",
+ omap_dma_callback, c, &c->dma_ch);
+ } else {
+ ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
+ &c->dma_ch);
+ }
+
+ dev_dbg(od->ddev.dev, "allocating channel %u for %u\n",
+ c->dma_ch, c->dma_sig);
+
+ if (ret >= 0) {
+ omap_dma_assign(od, c, c->dma_ch);
+
+ if (!od->legacy) {
+ unsigned val;
+
+ spin_lock_irq(&od->irq_lock);
+ val = BIT(c->dma_ch);
+ omap_dma_glbl_write(od, IRQSTATUS_L1, val);
+ od->irq_enable_mask |= val;
+ omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
+
+ val = omap_dma_glbl_read(od, IRQENABLE_L0);
+ val &= ~BIT(c->dma_ch);
+ omap_dma_glbl_write(od, IRQENABLE_L0, val);
+ spin_unlock_irq(&od->irq_lock);
+ }
+ }
+
+ if (dma_omap1()) {
+ if (__dma_omap16xx(od->plat->dma_attr)) {
+ c->ccr = CCR_OMAP31_DISABLE;
+ /* Duplicate what plat-omap/dma.c does */
+ c->ccr |= c->dma_ch + 1;
+ } else {
+ c->ccr = c->dma_sig & 0x1f;
+ }
+ } else {
+ c->ccr = c->dma_sig & 0x1f;
+ c->ccr |= (c->dma_sig & ~0x1f) << 14;
+ }
+ if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
+ c->ccr |= CCR_BUFFERING_DISABLE;
- return omap_request_dma(c->dma_sig, "DMA engine",
- omap_dma_callback, c, &c->dma_ch);
+ return ret;
}
static void omap_dma_free_chan_resources(struct dma_chan *chan)
{
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
struct omap_chan *c = to_omap_dma_chan(chan);
+ if (!od->legacy) {
+ spin_lock_irq(&od->irq_lock);
+ od->irq_enable_mask &= ~BIT(c->dma_ch);
+ omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
+ spin_unlock_irq(&od->irq_lock);
+ }
+
+ c->channel_base = NULL;
+ od->lch_map[c->dma_ch] = NULL;
vchan_free_chan_resources(&c->vc);
omap_free_dma(c->dma_ch);
- dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+ dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig);
}
static size_t omap_dma_sg_size(struct omap_sg *sg)
@@ -239,6 +624,74 @@ static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
return size;
}
+/*
+ * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
+ * read before the DMA controller finished disabling the channel.
+ */
+static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
+{
+ struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+ uint32_t val;
+
+ val = omap_dma_chan_read(c, reg);
+ if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
+ val = omap_dma_chan_read(c, reg);
+
+ return val;
+}
+
+static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
+{
+ struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+ dma_addr_t addr, cdac;
+
+ if (__dma_omap15xx(od->plat->dma_attr)) {
+ addr = omap_dma_chan_read(c, CPC);
+ } else {
+ addr = omap_dma_chan_read_3_3(c, CSAC);
+ cdac = omap_dma_chan_read_3_3(c, CDAC);
+
+ /*
+ * CDAC == 0 indicates that the DMA transfer on the channel has
+ * not been started (no data has been transferred so far).
+ * Return the programmed source start address in this case.
+ */
+ if (cdac == 0)
+ addr = omap_dma_chan_read(c, CSSA);
+ }
+
+ if (dma_omap1())
+ addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
+
+ return addr;
+}
+
+static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
+{
+ struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+ dma_addr_t addr;
+
+ if (__dma_omap15xx(od->plat->dma_attr)) {
+ addr = omap_dma_chan_read(c, CPC);
+ } else {
+ addr = omap_dma_chan_read_3_3(c, CDAC);
+
+ /*
+ * CDAC == 0 indicates that the DMA transfer on the channel
+ * has not been started (no data has been transferred so
+ * far). Return the programmed destination start address in
+ * this case.
+ */
+ if (addr == 0)
+ addr = omap_dma_chan_read(c, CDSA);
+ }
+
+ if (dma_omap1())
+ addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
+
+ return addr;
+}
+
static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
@@ -260,9 +713,9 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
dma_addr_t pos;
if (d->dir == DMA_MEM_TO_DEV)
- pos = omap_get_dma_src_pos(c->dma_ch);
+ pos = omap_dma_get_src_pos(c);
else if (d->dir == DMA_DEV_TO_MEM)
- pos = omap_get_dma_dst_pos(c->dma_ch);
+ pos = omap_dma_get_dst_pos(c);
else
pos = 0;
@@ -304,24 +757,23 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
{
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
struct omap_chan *c = to_omap_dma_chan(chan);
enum dma_slave_buswidth dev_width;
struct scatterlist *sgent;
struct omap_desc *d;
dma_addr_t dev_addr;
- unsigned i, j = 0, es, en, frame_bytes, sync_type;
+ unsigned i, j = 0, es, en, frame_bytes;
u32 burst;
if (dir == DMA_DEV_TO_MEM) {
dev_addr = c->cfg.src_addr;
dev_width = c->cfg.src_addr_width;
burst = c->cfg.src_maxburst;
- sync_type = OMAP_DMA_SRC_SYNC;
} else if (dir == DMA_MEM_TO_DEV) {
dev_addr = c->cfg.dst_addr;
dev_width = c->cfg.dst_addr_width;
burst = c->cfg.dst_maxburst;
- sync_type = OMAP_DMA_DST_SYNC;
} else {
dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
return NULL;
@@ -330,13 +782,13 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
/* Bus width translates to the element size (ES) */
switch (dev_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
- es = OMAP_DMA_DATA_TYPE_S8;
+ es = CSDP_DATA_TYPE_8;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
- es = OMAP_DMA_DATA_TYPE_S16;
+ es = CSDP_DATA_TYPE_16;
break;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
- es = OMAP_DMA_DATA_TYPE_S32;
+ es = CSDP_DATA_TYPE_32;
break;
default: /* not reached */
return NULL;
@@ -350,9 +802,31 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->dir = dir;
d->dev_addr = dev_addr;
d->es = es;
- d->sync_mode = OMAP_DMA_SYNC_FRAME;
- d->sync_type = sync_type;
- d->periph_port = OMAP_DMA_PORT_TIPB;
+
+ d->ccr = c->ccr | CCR_SYNC_FRAME;
+ if (dir == DMA_DEV_TO_MEM)
+ d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
+ else
+ d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
+
+ d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
+ d->csdp = es;
+
+ if (dma_omap1()) {
+ d->cicr |= CICR_TOUT_IE;
+
+ if (dir == DMA_DEV_TO_MEM)
+ d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
+ else
+ d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
+ } else {
+ if (dir == DMA_DEV_TO_MEM)
+ d->ccr |= CCR_TRIGGER_SRC;
+
+ d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+ }
+ if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
+ d->clnk_ctrl = c->dma_ch;
/*
* Build our scatterlist entries: each contains the address,
@@ -382,23 +856,22 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
void *context)
{
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
struct omap_chan *c = to_omap_dma_chan(chan);
enum dma_slave_buswidth dev_width;
struct omap_desc *d;
dma_addr_t dev_addr;
- unsigned es, sync_type;
+ unsigned es;
u32 burst;
if (dir == DMA_DEV_TO_MEM) {
dev_addr = c->cfg.src_addr;
dev_width = c->cfg.src_addr_width;
burst = c->cfg.src_maxburst;
- sync_type = OMAP_DMA_SRC_SYNC;
} else if (dir == DMA_MEM_TO_DEV) {
dev_addr = c->cfg.dst_addr;
dev_width = c->cfg.dst_addr_width;
burst = c->cfg.dst_maxburst;
- sync_type = OMAP_DMA_DST_SYNC;
} else {
dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
return NULL;
@@ -407,13 +880,13 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
/* Bus width translates to the element size (ES) */
switch (dev_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
- es = OMAP_DMA_DATA_TYPE_S8;
+ es = CSDP_DATA_TYPE_8;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
- es = OMAP_DMA_DATA_TYPE_S16;
+ es = CSDP_DATA_TYPE_16;
break;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
- es = OMAP_DMA_DATA_TYPE_S32;
+ es = CSDP_DATA_TYPE_32;
break;
default: /* not reached */
return NULL;
@@ -428,32 +901,51 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
d->dev_addr = dev_addr;
d->fi = burst;
d->es = es;
- if (burst)
- d->sync_mode = OMAP_DMA_SYNC_PACKET;
- else
- d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
- d->sync_type = sync_type;
- d->periph_port = OMAP_DMA_PORT_MPUI;
d->sg[0].addr = buf_addr;
d->sg[0].en = period_len / es_bytes[es];
d->sg[0].fn = buf_len / period_len;
d->sglen = 1;
- if (!c->cyclic) {
- c->cyclic = true;
- omap_dma_link_lch(c->dma_ch, c->dma_ch);
+ d->ccr = c->ccr;
+ if (dir == DMA_DEV_TO_MEM)
+ d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
+ else
+ d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
- if (flags & DMA_PREP_INTERRUPT)
- omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
+ d->cicr = CICR_DROP_IE;
+ if (flags & DMA_PREP_INTERRUPT)
+ d->cicr |= CICR_FRAME_IE;
- omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
- }
+ d->csdp = es;
+
+ if (dma_omap1()) {
+ d->cicr |= CICR_TOUT_IE;
+
+ if (dir == DMA_DEV_TO_MEM)
+ d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
+ else
+ d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
+ } else {
+ if (burst)
+ d->ccr |= CCR_SYNC_PACKET;
+ else
+ d->ccr |= CCR_SYNC_ELEMENT;
+
+ if (dir == DMA_DEV_TO_MEM)
+ d->ccr |= CCR_TRIGGER_SRC;
- if (dma_omap2plus()) {
- omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
- omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
+ d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+
+ d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
}
+ if (__dma_omap15xx(od->plat->dma_attr))
+ d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
+ else
+ d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
+
+ c->cyclic = true;
+
return vchan_tx_prep(&c->vc, &d->vd, flags);
}
@@ -483,20 +975,19 @@ static int omap_dma_terminate_all(struct omap_chan *c)
/*
* Stop DMA activity: we assume the callback will not be called
- * after omap_stop_dma() returns (even if it does, it will see
+ * after omap_dma_stop() returns (even if it does, it will see
* c->desc is NULL and exit.)
*/
if (c->desc) {
c->desc = NULL;
/* Avoid stopping the dma twice */
if (!c->paused)
- omap_stop_dma(c->dma_ch);
+ omap_dma_stop(c);
}
if (c->cyclic) {
c->cyclic = false;
c->paused = false;
- omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
}
vchan_get_all_descriptors(&c->vc, &head);
@@ -513,7 +1004,7 @@ static int omap_dma_pause(struct omap_chan *c)
return -EINVAL;
if (!c->paused) {
- omap_stop_dma(c->dma_ch);
+ omap_dma_stop(c);
c->paused = true;
}
@@ -527,7 +1018,7 @@ static int omap_dma_resume(struct omap_chan *c)
return -EINVAL;
if (c->paused) {
- omap_start_dma(c->dma_ch);
+ omap_dma_start(c, c->desc);
c->paused = false;
}
@@ -573,6 +1064,7 @@ static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
if (!c)
return -ENOMEM;
+ c->reg_map = od->reg_map;
c->dma_sig = dma_sig;
c->vc.desc_free = omap_dma_desc_free;
vchan_init(&c->vc, &od->ddev);
@@ -594,18 +1086,46 @@ static void omap_dma_free(struct omap_dmadev *od)
tasklet_kill(&c->vc.task);
kfree(c);
}
- kfree(od);
+}
+
+#define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static int omap_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = OMAP_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = true;
+ caps->cmd_terminate = true;
+ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ return 0;
}
static int omap_dma_probe(struct platform_device *pdev)
{
struct omap_dmadev *od;
- int rc, i;
+ struct resource *res;
+ int rc, i, irq;
- od = kzalloc(sizeof(*od), GFP_KERNEL);
+ od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ od->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(od->base))
+ return PTR_ERR(od->base);
+
+ od->plat = omap_get_plat_info();
+ if (!od->plat)
+ return -EPROBE_DEFER;
+
+ od->reg_map = od->plat->reg_map;
+
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
@@ -615,10 +1135,12 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
od->ddev.device_control = omap_dma_control;
+ od->ddev.device_slave_caps = omap_dma_device_slave_caps;
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
INIT_LIST_HEAD(&od->pending);
spin_lock_init(&od->lock);
+ spin_lock_init(&od->irq_lock);
tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
@@ -630,6 +1152,21 @@ static int omap_dma_probe(struct platform_device *pdev)
}
}
+ irq = platform_get_irq(pdev, 1);
+ if (irq <= 0) {
+ dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
+ od->legacy = true;
+ } else {
+ /* Disable all interrupts */
+ od->irq_enable_mask = 0;
+ omap_dma_glbl_write(od, IRQENABLE_L1, 0);
+
+ rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
+ IRQF_SHARED, "omap-dma-engine", od);
+ if (rc)
+ return rc;
+ }
+
rc = dma_async_device_register(&od->ddev);
if (rc) {
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
@@ -666,6 +1203,12 @@ static int omap_dma_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&od->ddev);
+
+ if (!od->legacy) {
+ /* Disable all interrupts */
+ omap_dma_glbl_write(od, IRQENABLE_L0, 0);
+ }
+
omap_dma_free(od);
return 0;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 61fdc54a3c88..05fa548bd659 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -964,16 +964,16 @@ static void pch_dma_remove(struct pci_dev *pdev)
if (pd) {
dma_async_device_unregister(&pd->dma);
+ free_irq(pdev->irq, pd);
+
list_for_each_entry_safe(chan, _c, &pd->dma.channels,
device_node) {
pd_chan = to_pd_chan(chan);
- tasklet_disable(&pd_chan->tasklet);
tasklet_kill(&pd_chan->tasklet);
}
pci_pool_destroy(pd->pool);
- free_irq(pdev->irq, pd);
pci_iounmap(pdev, pd->membase);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
new file mode 100644
index 000000000000..82c923146e49
--- /dev/null
+++ b/drivers/dma/qcom_bam_dma.c
@@ -0,0 +1,1111 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * QCOM BAM DMA engine driver
+ *
+ * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
+ * peripherals on the MSM 8x74. The configuration of the channels are dependent
+ * on the way they are hard wired to that specific peripheral. The peripheral
+ * device tree entries specify the configuration of each channel.
+ *
+ * The DMA controller requires the use of external memory for storage of the
+ * hardware descriptors for each channel. The descriptor FIFO is accessed as a
+ * circular buffer and operations are managed according to the offset within the
+ * FIFO. After pipe/channel reset, all of the pipe registers and internal state
+ * are back to defaults.
+ *
+ * During DMA operations, we write descriptors to the FIFO, being careful to
+ * handle wrapping and then write the last FIFO offset to that channel's
+ * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register
+ * indicates the current FIFO offset that is being processed, so there is some
+ * indication of where the hardware is currently working.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+struct bam_desc_hw {
+ u32 addr; /* Buffer physical address */
+ u16 size; /* Buffer size in bytes */
+ u16 flags;
+};
+
+#define DESC_FLAG_INT BIT(15)
+#define DESC_FLAG_EOT BIT(14)
+#define DESC_FLAG_EOB BIT(13)
+
+struct bam_async_desc {
+ struct virt_dma_desc vd;
+
+ u32 num_desc;
+ u32 xfer_len;
+ struct bam_desc_hw *curr_desc;
+
+ enum dma_transfer_direction dir;
+ size_t length;
+ struct bam_desc_hw desc[0];
+};
+
+#define BAM_CTRL 0x0000
+#define BAM_REVISION 0x0004
+#define BAM_SW_REVISION 0x0080
+#define BAM_NUM_PIPES 0x003C
+#define BAM_TIMER 0x0040
+#define BAM_TIMER_CTRL 0x0044
+#define BAM_DESC_CNT_TRSHLD 0x0008
+#define BAM_IRQ_SRCS 0x000C
+#define BAM_IRQ_SRCS_MSK 0x0010
+#define BAM_IRQ_SRCS_UNMASKED 0x0030
+#define BAM_IRQ_STTS 0x0014
+#define BAM_IRQ_CLR 0x0018
+#define BAM_IRQ_EN 0x001C
+#define BAM_CNFG_BITS 0x007C
+#define BAM_IRQ_SRCS_EE(ee) (0x0800 + ((ee) * 0x80))
+#define BAM_IRQ_SRCS_MSK_EE(ee) (0x0804 + ((ee) * 0x80))
+#define BAM_P_CTRL(pipe) (0x1000 + ((pipe) * 0x1000))
+#define BAM_P_RST(pipe) (0x1004 + ((pipe) * 0x1000))
+#define BAM_P_HALT(pipe) (0x1008 + ((pipe) * 0x1000))
+#define BAM_P_IRQ_STTS(pipe) (0x1010 + ((pipe) * 0x1000))
+#define BAM_P_IRQ_CLR(pipe) (0x1014 + ((pipe) * 0x1000))
+#define BAM_P_IRQ_EN(pipe) (0x1018 + ((pipe) * 0x1000))
+#define BAM_P_EVNT_DEST_ADDR(pipe) (0x182C + ((pipe) * 0x1000))
+#define BAM_P_EVNT_REG(pipe) (0x1818 + ((pipe) * 0x1000))
+#define BAM_P_SW_OFSTS(pipe) (0x1800 + ((pipe) * 0x1000))
+#define BAM_P_DATA_FIFO_ADDR(pipe) (0x1824 + ((pipe) * 0x1000))
+#define BAM_P_DESC_FIFO_ADDR(pipe) (0x181C + ((pipe) * 0x1000))
+#define BAM_P_EVNT_TRSHLD(pipe) (0x1828 + ((pipe) * 0x1000))
+#define BAM_P_FIFO_SIZES(pipe) (0x1820 + ((pipe) * 0x1000))
+
+/* BAM CTRL */
+#define BAM_SW_RST BIT(0)
+#define BAM_EN BIT(1)
+#define BAM_EN_ACCUM BIT(4)
+#define BAM_TESTBUS_SEL_SHIFT 5
+#define BAM_TESTBUS_SEL_MASK 0x3F
+#define BAM_DESC_CACHE_SEL_SHIFT 13
+#define BAM_DESC_CACHE_SEL_MASK 0x3
+#define BAM_CACHED_DESC_STORE BIT(15)
+#define IBC_DISABLE BIT(16)
+
+/* BAM REVISION */
+#define REVISION_SHIFT 0
+#define REVISION_MASK 0xFF
+#define NUM_EES_SHIFT 8
+#define NUM_EES_MASK 0xF
+#define CE_BUFFER_SIZE BIT(13)
+#define AXI_ACTIVE BIT(14)
+#define USE_VMIDMT BIT(15)
+#define SECURED BIT(16)
+#define BAM_HAS_NO_BYPASS BIT(17)
+#define HIGH_FREQUENCY_BAM BIT(18)
+#define INACTIV_TMRS_EXST BIT(19)
+#define NUM_INACTIV_TMRS BIT(20)
+#define DESC_CACHE_DEPTH_SHIFT 21
+#define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT)
+#define CMD_DESC_EN BIT(23)
+#define INACTIV_TMR_BASE_SHIFT 24
+#define INACTIV_TMR_BASE_MASK 0xFF
+
+/* BAM NUM PIPES */
+#define BAM_NUM_PIPES_SHIFT 0
+#define BAM_NUM_PIPES_MASK 0xFF
+#define PERIPH_NON_PIPE_GRP_SHIFT 16
+#define PERIPH_NON_PIP_GRP_MASK 0xFF
+#define BAM_NON_PIPE_GRP_SHIFT 24
+#define BAM_NON_PIPE_GRP_MASK 0xFF
+
+/* BAM CNFG BITS */
+#define BAM_PIPE_CNFG BIT(2)
+#define BAM_FULL_PIPE BIT(11)
+#define BAM_NO_EXT_P_RST BIT(12)
+#define BAM_IBC_DISABLE BIT(13)
+#define BAM_SB_CLK_REQ BIT(14)
+#define BAM_PSM_CSW_REQ BIT(15)
+#define BAM_PSM_P_RES BIT(16)
+#define BAM_AU_P_RES BIT(17)
+#define BAM_SI_P_RES BIT(18)
+#define BAM_WB_P_RES BIT(19)
+#define BAM_WB_BLK_CSW BIT(20)
+#define BAM_WB_CSW_ACK_IDL BIT(21)
+#define BAM_WB_RETR_SVPNT BIT(22)
+#define BAM_WB_DSC_AVL_P_RST BIT(23)
+#define BAM_REG_P_EN BIT(24)
+#define BAM_PSM_P_HD_DATA BIT(25)
+#define BAM_AU_ACCUMED BIT(26)
+#define BAM_CMD_ENABLE BIT(27)
+
+#define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \
+ BAM_NO_EXT_P_RST | \
+ BAM_IBC_DISABLE | \
+ BAM_SB_CLK_REQ | \
+ BAM_PSM_CSW_REQ | \
+ BAM_PSM_P_RES | \
+ BAM_AU_P_RES | \
+ BAM_SI_P_RES | \
+ BAM_WB_P_RES | \
+ BAM_WB_BLK_CSW | \
+ BAM_WB_CSW_ACK_IDL | \
+ BAM_WB_RETR_SVPNT | \
+ BAM_WB_DSC_AVL_P_RST | \
+ BAM_REG_P_EN | \
+ BAM_PSM_P_HD_DATA | \
+ BAM_AU_ACCUMED | \
+ BAM_CMD_ENABLE)
+
+/* PIPE CTRL */
+#define P_EN BIT(1)
+#define P_DIRECTION BIT(3)
+#define P_SYS_STRM BIT(4)
+#define P_SYS_MODE BIT(5)
+#define P_AUTO_EOB BIT(6)
+#define P_AUTO_EOB_SEL_SHIFT 7
+#define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT)
+#define P_PREFETCH_LIMIT_SHIFT 9
+#define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT)
+#define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT)
+#define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT)
+#define P_WRITE_NWD BIT(11)
+#define P_LOCK_GROUP_SHIFT 16
+#define P_LOCK_GROUP_MASK 0x1F
+
+/* BAM_DESC_CNT_TRSHLD */
+#define CNT_TRSHLD 0xffff
+#define DEFAULT_CNT_THRSHLD 0x4
+
+/* BAM_IRQ_SRCS */
+#define BAM_IRQ BIT(31)
+#define P_IRQ 0x7fffffff
+
+/* BAM_IRQ_SRCS_MSK */
+#define BAM_IRQ_MSK BAM_IRQ
+#define P_IRQ_MSK P_IRQ
+
+/* BAM_IRQ_STTS */
+#define BAM_TIMER_IRQ BIT(4)
+#define BAM_EMPTY_IRQ BIT(3)
+#define BAM_ERROR_IRQ BIT(2)
+#define BAM_HRESP_ERR_IRQ BIT(1)
+
+/* BAM_IRQ_CLR */
+#define BAM_TIMER_CLR BIT(4)
+#define BAM_EMPTY_CLR BIT(3)
+#define BAM_ERROR_CLR BIT(2)
+#define BAM_HRESP_ERR_CLR BIT(1)
+
+/* BAM_IRQ_EN */
+#define BAM_TIMER_EN BIT(4)
+#define BAM_EMPTY_EN BIT(3)
+#define BAM_ERROR_EN BIT(2)
+#define BAM_HRESP_ERR_EN BIT(1)
+
+/* BAM_P_IRQ_EN */
+#define P_PRCSD_DESC_EN BIT(0)
+#define P_TIMER_EN BIT(1)
+#define P_WAKE_EN BIT(2)
+#define P_OUT_OF_DESC_EN BIT(3)
+#define P_ERR_EN BIT(4)
+#define P_TRNSFR_END_EN BIT(5)
+#define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
+
+/* BAM_P_SW_OFSTS */
+#define P_SW_OFSTS_MASK 0xffff
+
+#define BAM_DESC_FIFO_SIZE SZ_32K
+#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
+#define BAM_MAX_DATA_SIZE (SZ_32K - 8)
+
+struct bam_chan {
+ struct virt_dma_chan vc;
+
+ struct bam_device *bdev;
+
+ /* configuration from device tree */
+ u32 id;
+
+ struct bam_async_desc *curr_txd; /* current running dma */
+
+ /* runtime configuration */
+ struct dma_slave_config slave;
+
+ /* fifo storage */
+ struct bam_desc_hw *fifo_virt;
+ dma_addr_t fifo_phys;
+
+ /* fifo markers */
+ unsigned short head; /* start of active descriptor entries */
+ unsigned short tail; /* end of active descriptor entries */
+
+ unsigned int initialized; /* is the channel hw initialized? */
+ unsigned int paused; /* is the channel paused? */
+ unsigned int reconfigure; /* new slave config? */
+
+ struct list_head node;
+};
+
+static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
+{
+ return container_of(common, struct bam_chan, vc.chan);
+}
+
+struct bam_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct device_dma_parameters dma_parms;
+ struct bam_chan *channels;
+ u32 num_channels;
+
+ /* execution environment ID, from DT */
+ u32 ee;
+
+ struct clk *bamclk;
+ int irq;
+
+ /* dma start transaction tasklet */
+ struct tasklet_struct task;
+};
+
+/**
+ * bam_reset_channel - Reset individual BAM DMA channel
+ * @bchan: bam channel
+ *
+ * This function resets a specific BAM channel
+ */
+static void bam_reset_channel(struct bam_chan *bchan)
+{
+ struct bam_device *bdev = bchan->bdev;
+
+ lockdep_assert_held(&bchan->vc.lock);
+
+ /* reset channel */
+ writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id));
+ writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id));
+
+ /* don't allow cpu to reorder BAM register accesses done after this */
+ wmb();
+
+ /* make sure hw is initialized when channel is used the first time */
+ bchan->initialized = 0;
+}
+
+/**
+ * bam_chan_init_hw - Initialize channel hardware
+ * @bchan: bam channel
+ *
+ * This function resets and initializes the BAM channel
+ */
+static void bam_chan_init_hw(struct bam_chan *bchan,
+ enum dma_transfer_direction dir)
+{
+ struct bam_device *bdev = bchan->bdev;
+ u32 val;
+
+ /* Reset the channel to clear internal state of the FIFO */
+ bam_reset_channel(bchan);
+
+ /*
+ * write out 8 byte aligned address. We have enough space for this
+ * because we allocated 1 more descriptor (8 bytes) than we can use
+ */
+ writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
+ bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id));
+ writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs +
+ BAM_P_FIFO_SIZES(bchan->id));
+
+ /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
+ writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id));
+
+ /* unmask the specific pipe and EE combo */
+ val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ val |= BIT(bchan->id);
+ writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+ /* don't allow cpu to reorder the channel enable done below */
+ wmb();
+
+ /* set fixed direction and mode, then enable channel */
+ val = P_EN | P_SYS_MODE;
+ if (dir == DMA_DEV_TO_MEM)
+ val |= P_DIRECTION;
+
+ writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id));
+
+ bchan->initialized = 1;
+
+ /* init FIFO pointers */
+ bchan->head = 0;
+ bchan->tail = 0;
+}
+
+/**
+ * bam_alloc_chan - Allocate channel resources for DMA channel.
+ * @chan: specified channel
+ *
+ * This function allocates the FIFO descriptor memory
+ */
+static int bam_alloc_chan(struct dma_chan *chan)
+{
+ struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_device *bdev = bchan->bdev;
+
+ if (bchan->fifo_virt)
+ return 0;
+
+ /* allocate FIFO descriptor space, but only if necessary */
+ bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
+ &bchan->fifo_phys, GFP_KERNEL);
+
+ if (!bchan->fifo_virt) {
+ dev_err(bdev->dev, "Failed to allocate desc fifo\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * bam_free_chan - Frees dma resources associated with specific channel
+ * @chan: specified channel
+ *
+ * Free the allocated fifo descriptor memory and channel resources
+ *
+ */
+static void bam_free_chan(struct dma_chan *chan)
+{
+ struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_device *bdev = bchan->bdev;
+ u32 val;
+ unsigned long flags;
+
+ vchan_free_chan_resources(to_virt_chan(chan));
+
+ if (bchan->curr_txd) {
+ dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
+ return;
+ }
+
+ spin_lock_irqsave(&bchan->vc.lock, flags);
+ bam_reset_channel(bchan);
+ spin_unlock_irqrestore(&bchan->vc.lock, flags);
+
+ dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
+ bchan->fifo_phys);
+ bchan->fifo_virt = NULL;
+
+ /* mask irq for pipe/channel */
+ val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ val &= ~BIT(bchan->id);
+ writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+ /* disable irq */
+ writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id));
+}
+
+/**
+ * bam_slave_config - set slave configuration for channel
+ * @chan: dma channel
+ * @cfg: slave configuration
+ *
+ * Sets slave configuration for channel
+ *
+ */
+static void bam_slave_config(struct bam_chan *bchan,
+ struct dma_slave_config *cfg)
+{
+ memcpy(&bchan->slave, cfg, sizeof(*cfg));
+ bchan->reconfigure = 1;
+}
+
+/**
+ * bam_prep_slave_sg - Prep slave sg transaction
+ *
+ * @chan: dma channel
+ * @sgl: scatter gather list
+ * @sg_len: length of sg
+ * @direction: DMA transfer direction
+ * @flags: DMA flags
+ * @context: transfer context (unused)
+ */
+static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
+{
+ struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_device *bdev = bchan->bdev;
+ struct bam_async_desc *async_desc;
+ struct scatterlist *sg;
+ u32 i;
+ struct bam_desc_hw *desc;
+ unsigned int num_alloc = 0;
+
+
+ if (!is_slave_direction(direction)) {
+ dev_err(bdev->dev, "invalid dma direction\n");
+ return NULL;
+ }
+
+ /* calculate number of required entries */
+ for_each_sg(sgl, sg, sg_len, i)
+ num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE);
+
+ /* allocate enough room to accomodate the number of entries */
+ async_desc = kzalloc(sizeof(*async_desc) +
+ (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
+
+ if (!async_desc)
+ goto err_out;
+
+ async_desc->num_desc = num_alloc;
+ async_desc->curr_desc = async_desc->desc;
+ async_desc->dir = direction;
+
+ /* fill in temporary descriptors */
+ desc = async_desc->desc;
+ for_each_sg(sgl, sg, sg_len, i) {
+ unsigned int remainder = sg_dma_len(sg);
+ unsigned int curr_offset = 0;
+
+ do {
+ desc->addr = sg_dma_address(sg) + curr_offset;
+
+ if (remainder > BAM_MAX_DATA_SIZE) {
+ desc->size = BAM_MAX_DATA_SIZE;
+ remainder -= BAM_MAX_DATA_SIZE;
+ curr_offset += BAM_MAX_DATA_SIZE;
+ } else {
+ desc->size = remainder;
+ remainder = 0;
+ }
+
+ async_desc->length += desc->size;
+ desc++;
+ } while (remainder > 0);
+ }
+
+ return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
+
+err_out:
+ kfree(async_desc);
+ return NULL;
+}
+
+/**
+ * bam_dma_terminate_all - terminate all transactions on a channel
+ * @bchan: bam dma channel
+ *
+ * Dequeues and frees all transactions
+ * No callbacks are done
+ *
+ */
+static void bam_dma_terminate_all(struct bam_chan *bchan)
+{
+ unsigned long flag;
+ LIST_HEAD(head);
+
+ /* remove all transactions, including active transaction */
+ spin_lock_irqsave(&bchan->vc.lock, flag);
+ if (bchan->curr_txd) {
+ list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
+ bchan->curr_txd = NULL;
+ }
+
+ vchan_get_all_descriptors(&bchan->vc, &head);
+ spin_unlock_irqrestore(&bchan->vc.lock, flag);
+
+ vchan_dma_desc_free_list(&bchan->vc, &head);
+}
+
+/**
+ * bam_control - DMA device control
+ * @chan: dma channel
+ * @cmd: control cmd
+ * @arg: cmd argument
+ *
+ * Perform DMA control command
+ *
+ */
+static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_device *bdev = bchan->bdev;
+ int ret = 0;
+ unsigned long flag;
+
+ switch (cmd) {
+ case DMA_PAUSE:
+ spin_lock_irqsave(&bchan->vc.lock, flag);
+ writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id));
+ bchan->paused = 1;
+ spin_unlock_irqrestore(&bchan->vc.lock, flag);
+ break;
+
+ case DMA_RESUME:
+ spin_lock_irqsave(&bchan->vc.lock, flag);
+ writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id));
+ bchan->paused = 0;
+ spin_unlock_irqrestore(&bchan->vc.lock, flag);
+ break;
+
+ case DMA_TERMINATE_ALL:
+ bam_dma_terminate_all(bchan);
+ break;
+
+ case DMA_SLAVE_CONFIG:
+ spin_lock_irqsave(&bchan->vc.lock, flag);
+ bam_slave_config(bchan, (struct dma_slave_config *)arg);
+ spin_unlock_irqrestore(&bchan->vc.lock, flag);
+ break;
+
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * process_channel_irqs - processes the channel interrupts
+ * @bdev: bam controller
+ *
+ * This function processes the channel interrupts
+ *
+ */
+static u32 process_channel_irqs(struct bam_device *bdev)
+{
+ u32 i, srcs, pipe_stts;
+ unsigned long flags;
+ struct bam_async_desc *async_desc;
+
+ srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee));
+
+ /* return early if no pipe/channel interrupts are present */
+ if (!(srcs & P_IRQ))
+ return srcs;
+
+ for (i = 0; i < bdev->num_channels; i++) {
+ struct bam_chan *bchan = &bdev->channels[i];
+
+ if (!(srcs & BIT(i)))
+ continue;
+
+ /* clear pipe irq */
+ pipe_stts = readl_relaxed(bdev->regs +
+ BAM_P_IRQ_STTS(i));
+
+ writel_relaxed(pipe_stts, bdev->regs +
+ BAM_P_IRQ_CLR(i));
+
+ spin_lock_irqsave(&bchan->vc.lock, flags);
+ async_desc = bchan->curr_txd;
+
+ if (async_desc) {
+ async_desc->num_desc -= async_desc->xfer_len;
+ async_desc->curr_desc += async_desc->xfer_len;
+ bchan->curr_txd = NULL;
+
+ /* manage FIFO */
+ bchan->head += async_desc->xfer_len;
+ bchan->head %= MAX_DESCRIPTORS;
+
+ /*
+ * if complete, process cookie. Otherwise
+ * push back to front of desc_issued so that
+ * it gets restarted by the tasklet
+ */
+ if (!async_desc->num_desc)
+ vchan_cookie_complete(&async_desc->vd);
+ else
+ list_add(&async_desc->vd.node,
+ &bchan->vc.desc_issued);
+ }
+
+ spin_unlock_irqrestore(&bchan->vc.lock, flags);
+ }
+
+ return srcs;
+}
+
+/**
+ * bam_dma_irq - irq handler for bam controller
+ * @irq: IRQ of interrupt
+ * @data: callback data
+ *
+ * IRQ handler for the bam controller
+ */
+static irqreturn_t bam_dma_irq(int irq, void *data)
+{
+ struct bam_device *bdev = data;
+ u32 clr_mask = 0, srcs = 0;
+
+ srcs |= process_channel_irqs(bdev);
+
+ /* kick off tasklet to start next dma transfer */
+ if (srcs & P_IRQ)
+ tasklet_schedule(&bdev->task);
+
+ if (srcs & BAM_IRQ)
+ clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS);
+
+ /* don't allow reorder of the various accesses to the BAM registers */
+ mb();
+
+ writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * bam_tx_status - returns status of transaction
+ * @chan: dma channel
+ * @cookie: transaction cookie
+ * @txstate: DMA transaction state
+ *
+ * Return status of dma transaction
+ */
+static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct bam_chan *bchan = to_bam_chan(chan);
+ struct virt_dma_desc *vd;
+ int ret;
+ size_t residue = 0;
+ unsigned int i;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ if (!txstate)
+ return bchan->paused ? DMA_PAUSED : ret;
+
+ spin_lock_irqsave(&bchan->vc.lock, flags);
+ vd = vchan_find_desc(&bchan->vc, cookie);
+ if (vd)
+ residue = container_of(vd, struct bam_async_desc, vd)->length;
+ else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
+ for (i = 0; i < bchan->curr_txd->num_desc; i++)
+ residue += bchan->curr_txd->curr_desc[i].size;
+
+ spin_unlock_irqrestore(&bchan->vc.lock, flags);
+
+ dma_set_residue(txstate, residue);
+
+ if (ret == DMA_IN_PROGRESS && bchan->paused)
+ ret = DMA_PAUSED;
+
+ return ret;
+}
+
+/**
+ * bam_apply_new_config
+ * @bchan: bam dma channel
+ * @dir: DMA direction
+ */
+static void bam_apply_new_config(struct bam_chan *bchan,
+ enum dma_transfer_direction dir)
+{
+ struct bam_device *bdev = bchan->bdev;
+ u32 maxburst;
+
+ if (dir == DMA_DEV_TO_MEM)
+ maxburst = bchan->slave.src_maxburst;
+ else
+ maxburst = bchan->slave.dst_maxburst;
+
+ writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD);
+
+ bchan->reconfigure = 0;
+}
+
+/**
+ * bam_start_dma - start next transaction
+ * @bchan - bam dma channel
+ */
+static void bam_start_dma(struct bam_chan *bchan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
+ struct bam_device *bdev = bchan->bdev;
+ struct bam_async_desc *async_desc;
+ struct bam_desc_hw *desc;
+ struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
+ sizeof(struct bam_desc_hw));
+
+ lockdep_assert_held(&bchan->vc.lock);
+
+ if (!vd)
+ return;
+
+ list_del(&vd->node);
+
+ async_desc = container_of(vd, struct bam_async_desc, vd);
+ bchan->curr_txd = async_desc;
+
+ /* on first use, initialize the channel hardware */
+ if (!bchan->initialized)
+ bam_chan_init_hw(bchan, async_desc->dir);
+
+ /* apply new slave config changes, if necessary */
+ if (bchan->reconfigure)
+ bam_apply_new_config(bchan, async_desc->dir);
+
+ desc = bchan->curr_txd->curr_desc;
+
+ if (async_desc->num_desc > MAX_DESCRIPTORS)
+ async_desc->xfer_len = MAX_DESCRIPTORS;
+ else
+ async_desc->xfer_len = async_desc->num_desc;
+
+ /* set INT on last descriptor */
+ desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
+
+ if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
+ u32 partial = MAX_DESCRIPTORS - bchan->tail;
+
+ memcpy(&fifo[bchan->tail], desc,
+ partial * sizeof(struct bam_desc_hw));
+ memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
+ sizeof(struct bam_desc_hw));
+ } else {
+ memcpy(&fifo[bchan->tail], desc,
+ async_desc->xfer_len * sizeof(struct bam_desc_hw));
+ }
+
+ bchan->tail += async_desc->xfer_len;
+ bchan->tail %= MAX_DESCRIPTORS;
+
+ /* ensure descriptor writes and dma start not reordered */
+ wmb();
+ writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
+ bdev->regs + BAM_P_EVNT_REG(bchan->id));
+}
+
+/**
+ * dma_tasklet - DMA IRQ tasklet
+ * @data: tasklet argument (bam controller structure)
+ *
+ * Sets up next DMA operation and then processes all completed transactions
+ */
+static void dma_tasklet(unsigned long data)
+{
+ struct bam_device *bdev = (struct bam_device *)data;
+ struct bam_chan *bchan;
+ unsigned long flags;
+ unsigned int i;
+
+ /* go through the channels and kick off transactions */
+ for (i = 0; i < bdev->num_channels; i++) {
+ bchan = &bdev->channels[i];
+ spin_lock_irqsave(&bchan->vc.lock, flags);
+
+ if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
+ bam_start_dma(bchan);
+ spin_unlock_irqrestore(&bchan->vc.lock, flags);
+ }
+}
+
+/**
+ * bam_issue_pending - starts pending transactions
+ * @chan: dma channel
+ *
+ * Calls tasklet directly which in turn starts any pending transactions
+ */
+static void bam_issue_pending(struct dma_chan *chan)
+{
+ struct bam_chan *bchan = to_bam_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bchan->vc.lock, flags);
+
+ /* if work pending and idle, start a transaction */
+ if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
+ bam_start_dma(bchan);
+
+ spin_unlock_irqrestore(&bchan->vc.lock, flags);
+}
+
+/**
+ * bam_dma_free_desc - free descriptor memory
+ * @vd: virtual descriptor
+ *
+ */
+static void bam_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct bam_async_desc *async_desc = container_of(vd,
+ struct bam_async_desc, vd);
+
+ kfree(async_desc);
+}
+
+static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *of)
+{
+ struct bam_device *bdev = container_of(of->of_dma_data,
+ struct bam_device, common);
+ unsigned int request;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ request = dma_spec->args[0];
+ if (request >= bdev->num_channels)
+ return NULL;
+
+ return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
+}
+
+/**
+ * bam_init
+ * @bdev: bam device
+ *
+ * Initialization helper for global bam registers
+ */
+static int bam_init(struct bam_device *bdev)
+{
+ u32 val;
+
+ /* read revision and configuration information */
+ val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT;
+ val &= NUM_EES_MASK;
+
+ /* check that configured EE is within range */
+ if (bdev->ee >= val)
+ return -EINVAL;
+
+ val = readl_relaxed(bdev->regs + BAM_NUM_PIPES);
+ bdev->num_channels = val & BAM_NUM_PIPES_MASK;
+
+ /* s/w reset bam */
+ /* after reset all pipes are disabled and idle */
+ val = readl_relaxed(bdev->regs + BAM_CTRL);
+ val |= BAM_SW_RST;
+ writel_relaxed(val, bdev->regs + BAM_CTRL);
+ val &= ~BAM_SW_RST;
+ writel_relaxed(val, bdev->regs + BAM_CTRL);
+
+ /* make sure previous stores are visible before enabling BAM */
+ wmb();
+
+ /* enable bam */
+ val |= BAM_EN;
+ writel_relaxed(val, bdev->regs + BAM_CTRL);
+
+ /* set descriptor threshhold, start with 4 bytes */
+ writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD);
+
+ /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
+ writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS);
+
+ /* enable irqs for errors */
+ writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
+ bdev->regs + BAM_IRQ_EN);
+
+ /* unmask global bam interrupt */
+ writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+ return 0;
+}
+
+static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
+ u32 index)
+{
+ bchan->id = index;
+ bchan->bdev = bdev;
+
+ vchan_init(&bchan->vc, &bdev->common);
+ bchan->vc.desc_free = bam_dma_free_desc;
+}
+
+static int bam_dma_probe(struct platform_device *pdev)
+{
+ struct bam_device *bdev;
+ struct resource *iores;
+ int ret, i;
+
+ bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ bdev->dev = &pdev->dev;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(bdev->regs))
+ return PTR_ERR(bdev->regs);
+
+ bdev->irq = platform_get_irq(pdev, 0);
+ if (bdev->irq < 0)
+ return bdev->irq;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
+ if (ret) {
+ dev_err(bdev->dev, "Execution environment unspecified\n");
+ return ret;
+ }
+
+ bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
+ if (IS_ERR(bdev->bamclk))
+ return PTR_ERR(bdev->bamclk);
+
+ ret = clk_prepare_enable(bdev->bamclk);
+ if (ret) {
+ dev_err(bdev->dev, "failed to prepare/enable clock\n");
+ return ret;
+ }
+
+ ret = bam_init(bdev);
+ if (ret)
+ goto err_disable_clk;
+
+ tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
+
+ bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
+ sizeof(*bdev->channels), GFP_KERNEL);
+
+ if (!bdev->channels) {
+ ret = -ENOMEM;
+ goto err_disable_clk;
+ }
+
+ /* allocate and initialize channels */
+ INIT_LIST_HEAD(&bdev->common.channels);
+
+ for (i = 0; i < bdev->num_channels; i++)
+ bam_channel_init(bdev, &bdev->channels[i], i);
+
+ ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
+ IRQF_TRIGGER_HIGH, "bam_dma", bdev);
+ if (ret)
+ goto err_disable_clk;
+
+ /* set max dma segment size */
+ bdev->common.dev = bdev->dev;
+ bdev->common.dev->dma_parms = &bdev->dma_parms;
+ ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE);
+ if (ret) {
+ dev_err(bdev->dev, "cannot set maximum segment size\n");
+ goto err_disable_clk;
+ }
+
+ platform_set_drvdata(pdev, bdev);
+
+ /* set capabilities */
+ dma_cap_zero(bdev->common.cap_mask);
+ dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
+
+ /* initialize dmaengine apis */
+ bdev->common.device_alloc_chan_resources = bam_alloc_chan;
+ bdev->common.device_free_chan_resources = bam_free_chan;
+ bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
+ bdev->common.device_control = bam_control;
+ bdev->common.device_issue_pending = bam_issue_pending;
+ bdev->common.device_tx_status = bam_tx_status;
+ bdev->common.dev = bdev->dev;
+
+ ret = dma_async_device_register(&bdev->common);
+ if (ret) {
+ dev_err(bdev->dev, "failed to register dma async device\n");
+ goto err_disable_clk;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
+ &bdev->common);
+ if (ret)
+ goto err_unregister_dma;
+
+ return 0;
+
+err_unregister_dma:
+ dma_async_device_unregister(&bdev->common);
+err_disable_clk:
+ clk_disable_unprepare(bdev->bamclk);
+ return ret;
+}
+
+static int bam_dma_remove(struct platform_device *pdev)
+{
+ struct bam_device *bdev = platform_get_drvdata(pdev);
+ u32 i;
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&bdev->common);
+
+ /* mask all interrupts for this execution environment */
+ writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+ devm_free_irq(bdev->dev, bdev->irq, bdev);
+
+ for (i = 0; i < bdev->num_channels; i++) {
+ bam_dma_terminate_all(&bdev->channels[i]);
+ tasklet_kill(&bdev->channels[i].vc.task);
+
+ dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
+ bdev->channels[i].fifo_virt,
+ bdev->channels[i].fifo_phys);
+ }
+
+ tasklet_kill(&bdev->task);
+
+ clk_disable_unprepare(bdev->bamclk);
+
+ return 0;
+}
+
+static const struct of_device_id bam_of_match[] = {
+ { .compatible = "qcom,bam-v1.4.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bam_of_match);
+
+static struct platform_driver bam_dma_driver = {
+ .probe = bam_dma_probe,
+ .remove = bam_dma_remove,
+ .driver = {
+ .name = "bam-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = bam_of_match,
+ },
+};
+
+module_platform_driver(bam_dma_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 4eddedb6eb7d..b209a0f17344 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -192,7 +192,7 @@ struct s3c24xx_dma_phy {
unsigned int id;
bool valid;
void __iomem *base;
- unsigned int irq;
+ int irq;
struct clk *clk;
spinlock_t lock;
struct s3c24xx_dma_chan *serving;
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index dadd9e010c0b..b4c813831006 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -29,6 +29,12 @@ config RCAR_HPB_DMAE
help
Enable support for the Renesas R-Car series DMA controllers.
+config RCAR_AUDMAC_PP
+ tristate "Renesas R-Car Audio DMAC Peripheral Peripheral support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
+
config SHDMA_R8A73A4
def_bool y
depends on ARCH_R8A73A4 && SH_DMAE != n
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index e856af23b789..1ce88b28cfc6 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -7,3 +7,4 @@ endif
shdma-objs := $(shdma-y)
obj-$(CONFIG_SUDMAC) += sudmac.o
obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
+obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
new file mode 100644
index 000000000000..2de77289a2e9
--- /dev/null
+++ b/drivers/dma/sh/rcar-audmapp.c
@@ -0,0 +1,320 @@
+/*
+ * This is for Renesas R-Car Audio-DMAC-peri-peri.
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * based on the drivers/dma/sh/shdma.c
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_data/dma-rcar-audmapp.h>
+#include <linux/platform_device.h>
+#include <linux/shdma-base.h>
+
+/*
+ * DMA register
+ */
+#define PDMASAR 0x00
+#define PDMADAR 0x04
+#define PDMACHCR 0x0c
+
+/* PDMACHCR */
+#define PDMACHCR_DE (1 << 0)
+
+#define AUDMAPP_MAX_CHANNELS 29
+
+/* Default MEMCPY transfer size = 2^2 = 4 bytes */
+#define LOG2_DEFAULT_XFER_SIZE 2
+#define AUDMAPP_SLAVE_NUMBER 256
+#define AUDMAPP_LEN_MAX (16 * 1024 * 1024)
+
+struct audmapp_chan {
+ struct shdma_chan shdma_chan;
+ struct audmapp_slave_config *config;
+ void __iomem *base;
+};
+
+struct audmapp_device {
+ struct shdma_dev shdma_dev;
+ struct audmapp_pdata *pdata;
+ struct device *dev;
+ void __iomem *chan_reg;
+};
+
+#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan)
+#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \
+ struct audmapp_device, shdma_dev.dma_dev)
+
+static void audmapp_write(struct audmapp_chan *auchan, u32 data, u32 reg)
+{
+ struct audmapp_device *audev = to_dev(auchan);
+ struct device *dev = audev->dev;
+
+ dev_dbg(dev, "w %p : %08x\n", auchan->base + reg, data);
+
+ iowrite32(data, auchan->base + reg);
+}
+
+static u32 audmapp_read(struct audmapp_chan *auchan, u32 reg)
+{
+ return ioread32(auchan->base + reg);
+}
+
+static void audmapp_halt(struct shdma_chan *schan)
+{
+ struct audmapp_chan *auchan = to_chan(schan);
+ int i;
+
+ audmapp_write(auchan, 0, PDMACHCR);
+
+ for (i = 0; i < 1024; i++) {
+ if (0 == audmapp_read(auchan, PDMACHCR))
+ return;
+ udelay(1);
+ }
+}
+
+static void audmapp_start_xfer(struct shdma_chan *schan,
+ struct shdma_desc *sdecs)
+{
+ struct audmapp_chan *auchan = to_chan(schan);
+ struct audmapp_device *audev = to_dev(auchan);
+ struct audmapp_slave_config *cfg = auchan->config;
+ struct device *dev = audev->dev;
+ u32 chcr = cfg->chcr | PDMACHCR_DE;
+
+ dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n",
+ &cfg->src, &cfg->dst, cfg->chcr);
+
+ audmapp_write(auchan, cfg->src, PDMASAR);
+ audmapp_write(auchan, cfg->dst, PDMADAR);
+ audmapp_write(auchan, chcr, PDMACHCR);
+}
+
+static struct audmapp_slave_config *
+audmapp_find_slave(struct audmapp_chan *auchan, int slave_id)
+{
+ struct audmapp_device *audev = to_dev(auchan);
+ struct audmapp_pdata *pdata = audev->pdata;
+ struct audmapp_slave_config *cfg;
+ int i;
+
+ if (slave_id >= AUDMAPP_SLAVE_NUMBER)
+ return NULL;
+
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+ if (cfg->slave_id == slave_id)
+ return cfg;
+
+ return NULL;
+}
+
+static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
+ dma_addr_t slave_addr, bool try)
+{
+ struct audmapp_chan *auchan = to_chan(schan);
+ struct audmapp_slave_config *cfg =
+ audmapp_find_slave(auchan, slave_id);
+
+ if (!cfg)
+ return -ENODEV;
+ if (try)
+ return 0;
+
+ auchan->config = cfg;
+
+ return 0;
+}
+
+static int audmapp_desc_setup(struct shdma_chan *schan,
+ struct shdma_desc *sdecs,
+ dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+ struct audmapp_chan *auchan = to_chan(schan);
+ struct audmapp_slave_config *cfg = auchan->config;
+
+ if (!cfg)
+ return -ENODEV;
+
+ if (*len > (size_t)AUDMAPP_LEN_MAX)
+ *len = (size_t)AUDMAPP_LEN_MAX;
+
+ return 0;
+}
+
+static void audmapp_setup_xfer(struct shdma_chan *schan,
+ int slave_id)
+{
+}
+
+static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan)
+{
+ return 0; /* always fixed address */
+}
+
+static bool audmapp_channel_busy(struct shdma_chan *schan)
+{
+ struct audmapp_chan *auchan = to_chan(schan);
+ u32 chcr = audmapp_read(auchan, PDMACHCR);
+
+ return chcr & ~PDMACHCR_DE;
+}
+
+static bool audmapp_desc_completed(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ return true;
+}
+
+static struct shdma_desc *audmapp_embedded_desc(void *buf, int i)
+{
+ return &((struct shdma_desc *)buf)[i];
+}
+
+static const struct shdma_ops audmapp_shdma_ops = {
+ .halt_channel = audmapp_halt,
+ .desc_setup = audmapp_desc_setup,
+ .set_slave = audmapp_set_slave,
+ .start_xfer = audmapp_start_xfer,
+ .embedded_desc = audmapp_embedded_desc,
+ .setup_xfer = audmapp_setup_xfer,
+ .slave_addr = audmapp_slave_addr,
+ .channel_busy = audmapp_channel_busy,
+ .desc_completed = audmapp_desc_completed,
+};
+
+static int audmapp_chan_probe(struct platform_device *pdev,
+ struct audmapp_device *audev, int id)
+{
+ struct shdma_dev *sdev = &audev->shdma_dev;
+ struct audmapp_chan *auchan;
+ struct shdma_chan *schan;
+ struct device *dev = audev->dev;
+
+ auchan = devm_kzalloc(dev, sizeof(*auchan), GFP_KERNEL);
+ if (!auchan)
+ return -ENOMEM;
+
+ schan = &auchan->shdma_chan;
+ schan->max_xfer_len = AUDMAPP_LEN_MAX;
+
+ shdma_chan_probe(sdev, schan, id);
+
+ auchan->base = audev->chan_reg + 0x20 + (0x10 * id);
+ dev_dbg(dev, "%02d : %p / %p", id, auchan->base, audev->chan_reg);
+
+ return 0;
+}
+
+static void audmapp_chan_remove(struct audmapp_device *audev)
+{
+ struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
+ struct shdma_chan *schan;
+ int i;
+
+ shdma_for_each_chan(schan, &audev->shdma_dev, i) {
+ BUG_ON(!schan);
+ shdma_chan_remove(schan);
+ }
+ dma_dev->chancnt = 0;
+}
+
+static int audmapp_probe(struct platform_device *pdev)
+{
+ struct audmapp_pdata *pdata = pdev->dev.platform_data;
+ struct audmapp_device *audev;
+ struct shdma_dev *sdev;
+ struct dma_device *dma_dev;
+ struct resource *res;
+ int err, i;
+
+ if (!pdata)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ audev = devm_kzalloc(&pdev->dev, sizeof(*audev), GFP_KERNEL);
+ if (!audev)
+ return -ENOMEM;
+
+ audev->dev = &pdev->dev;
+ audev->pdata = pdata;
+ audev->chan_reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(audev->chan_reg))
+ return PTR_ERR(audev->chan_reg);
+
+ sdev = &audev->shdma_dev;
+ sdev->ops = &audmapp_shdma_ops;
+ sdev->desc_size = sizeof(struct shdma_desc);
+
+ dma_dev = &sdev->dma_dev;
+ dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ err = shdma_init(&pdev->dev, sdev, AUDMAPP_MAX_CHANNELS);
+ if (err < 0)
+ return err;
+
+ platform_set_drvdata(pdev, audev);
+
+ /* Create DMA Channel */
+ for (i = 0; i < AUDMAPP_MAX_CHANNELS; i++) {
+ err = audmapp_chan_probe(pdev, audev, i);
+ if (err)
+ goto chan_probe_err;
+ }
+
+ err = dma_async_device_register(dma_dev);
+ if (err < 0)
+ goto chan_probe_err;
+
+ return err;
+
+chan_probe_err:
+ audmapp_chan_remove(audev);
+ shdma_cleanup(sdev);
+
+ return err;
+}
+
+static int audmapp_remove(struct platform_device *pdev)
+{
+ struct audmapp_device *audev = platform_get_drvdata(pdev);
+ struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+
+ audmapp_chan_remove(audev);
+ shdma_cleanup(&audev->shdma_dev);
+
+ return 0;
+}
+
+static struct platform_driver audmapp_driver = {
+ .probe = audmapp_probe,
+ .remove = audmapp_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "rcar-audmapp-engine",
+ },
+};
+module_platform_driver(audmapp_driver);
+
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_DESCRIPTION("Renesas R-Car Audio DMAC peri-peri driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 2e7b394def80..52396771acbe 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -227,7 +227,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
struct shdma_chan *schan = to_shdma_chan(chan);
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
- int match = (int)arg;
+ int match = (long)arg;
int ret;
if (match < 0)
@@ -491,8 +491,8 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
}
dev_dbg(schan->dev,
- "chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
- copy_size, *len, *src, *dst, &new->async_tx,
+ "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
+ copy_size, *len, src, dst, &new->async_tx,
new->async_tx.cookie);
new->mark = DESC_PREPARED;
@@ -555,8 +555,8 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
goto err_get_desc;
do {
- dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n",
- i, sg, len, (unsigned long long)sg_addr);
+ dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
+ i, sg, len, &sg_addr);
if (direction == DMA_DEV_TO_MEM)
new = shdma_add_desc(schan, flags,
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index 06473a05fe4e..b4ff9d3e56d1 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -33,7 +33,8 @@ static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
/* Only slave DMA channels can be allocated via DT */
dma_cap_set(DMA_SLAVE, mask);
- chan = dma_request_channel(mask, shdma_chan_filter, (void *)id);
+ chan = dma_request_channel(mask, shdma_chan_filter,
+ (void *)(uintptr_t)id);
if (chan)
to_shdma_chan(chan)->hw_req = id;
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 0d765c0e21ec..dda7e7563f5d 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -443,6 +443,7 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
return ret;
}
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
static irqreturn_t sh_dmae_err(int irq, void *data)
{
struct sh_dmae_device *shdev = data;
@@ -453,6 +454,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
sh_dmae_reset(shdev);
return IRQ_HANDLED;
}
+#endif
static bool sh_dmae_desc_completed(struct shdma_chan *schan,
struct shdma_desc *sdesc)
@@ -637,7 +639,7 @@ static int sh_dmae_resume(struct device *dev)
#define sh_dmae_resume NULL
#endif
-const struct dev_pm_ops sh_dmae_pm = {
+static const struct dev_pm_ops sh_dmae_pm = {
.suspend = sh_dmae_suspend,
.resume = sh_dmae_resume,
.runtime_suspend = sh_dmae_runtime_suspend,
@@ -685,9 +687,12 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
static int sh_dmae_probe(struct platform_device *pdev)
{
const struct sh_dmae_pdata *pdata;
- unsigned long irqflags = 0,
- chan_flag[SH_DMAE_MAX_CHANNELS] = {};
- int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
+ unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
+ int chan_irq[SH_DMAE_MAX_CHANNELS];
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
+ unsigned long irqflags = 0;
+ int errirq;
+#endif
int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
struct sh_dmae_device *shdev;
struct dma_device *dma_dev;
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index c7e9cdff0708..4e7df43b50d6 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -178,8 +178,8 @@ static int sudmac_desc_setup(struct shdma_chan *schan,
struct sudmac_chan *sc = to_chan(schan);
struct sudmac_desc *sd = to_desc(sdesc);
- dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n",
- __func__, src, dst, *len);
+ dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n",
+ __func__, &src, &dst, *len);
if (*len > schan->max_xfer_len)
*len = schan->max_xfer_len;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index d4d3a3109b16..a1bd8298d55f 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -18,6 +18,7 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
+#include <linux/of_dma.h>
#include <linux/sirfsoc_dma.h>
#include "dmaengine.h"
@@ -659,6 +660,18 @@ static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
return 0;
}
+static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct sirfsoc_dma *sdma = ofdma->of_dma_data;
+ unsigned int request = dma_spec->args[0];
+
+ if (request > SIRFSOC_DMA_CHANNELS)
+ return NULL;
+
+ return dma_get_slave_channel(&sdma->channels[request].chan);
+}
+
static int sirfsoc_dma_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
@@ -764,11 +777,20 @@ static int sirfsoc_dma_probe(struct platform_device *op)
if (ret)
goto free_irq;
+ /* Device-tree DMA controller registration */
+ ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
+ if (ret) {
+ dev_err(dev, "failed to register DMA controller\n");
+ goto unreg_dma_dev;
+ }
+
pm_runtime_enable(&op->dev);
dev_info(dev, "initialized SIRFSOC DMAC driver\n");
return 0;
+unreg_dma_dev:
+ dma_async_device_unregister(dma);
free_irq:
free_irq(sdma->irq, sdma);
irq_dispose:
@@ -781,6 +803,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
struct device *dev = &op->dev;
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ of_dma_controller_free(op->dev.of_node);
dma_async_device_unregister(&sdma->dma);
free_irq(sdma->irq, sdma);
irq_dispose_mapping(sdma->irq);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 98e14ee4833c..f8bf00010d45 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1239,9 +1239,17 @@ static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
if (num_dcts_intlv == 2) {
select = (sys_addr >> 8) & 0x3;
channel = select ? 0x3 : 0;
- } else if (num_dcts_intlv == 4)
- channel = (sys_addr >> 8) & 0x7;
-
+ } else if (num_dcts_intlv == 4) {
+ u8 intlv_addr = dct_sel_interleave_addr(pvt);
+ switch (intlv_addr) {
+ case 0x4:
+ channel = (sys_addr >> 8) & 0x3;
+ break;
+ case 0x5:
+ channel = (sys_addr >> 9) & 0x3;
+ break;
+ }
+ }
return channel;
}
@@ -1799,6 +1807,17 @@ static struct amd64_family_type family_types[] = {
.read_dct_pci_cfg = f10_read_dct_pci_cfg,
}
},
+ [F16_M30H_CPUS] = {
+ .ctl_name = "F16h_M30h",
+ .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
+ .f3_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F3,
+ .ops = {
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f16_dbam_to_chip_select,
+ .read_dct_pci_cfg = f10_read_dct_pci_cfg,
+ }
+ },
};
/*
@@ -2578,6 +2597,11 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
break;
case 0x16:
+ if (pvt->model == 0x30) {
+ fam_type = &family_types[F16_M30H_CPUS];
+ pvt->ops = &family_types[F16_M30H_CPUS].ops;
+ break;
+ }
fam_type = &family_types[F16_CPUS];
pvt->ops = &family_types[F16_CPUS].ops;
break;
@@ -2830,6 +2854,14 @@ static const struct pci_device_id amd64_pci_table[] = {
.class = 0,
.class_mask = 0,
},
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ },
{0, }
};
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 6dc1fcc25afb..d903e0c21144 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -168,6 +168,8 @@
#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
#define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531
#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532
+#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
+#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
/*
* Function 1 - Address Map
@@ -300,6 +302,7 @@ enum amd_families {
F15_CPUS,
F15_M30H_CPUS,
F16_CPUS,
+ F16_M30H_CPUS,
NUM_FAMILIES,
};
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index ddd890052ce2..2b63f7c2d6d2 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -350,6 +350,7 @@ static int amd8111_dev_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct amd8111_dev_info *dev_info = &amd8111_devices[id->driver_data];
+ int ret = -ENODEV;
dev_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
dev_info->err_dev, NULL);
@@ -359,16 +360,15 @@ static int amd8111_dev_probe(struct pci_dev *dev,
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, dev_info->err_dev,
dev_info->ctl_name);
- return -ENODEV;
+ goto err;
}
if (pci_enable_device(dev_info->dev)) {
- pci_dev_put(dev_info->dev);
printk(KERN_ERR "failed to enable:"
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, dev_info->err_dev,
dev_info->ctl_name);
- return -ENODEV;
+ goto err_dev_put;
}
/*
@@ -381,8 +381,10 @@ static int amd8111_dev_probe(struct pci_dev *dev,
edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
NULL, 0, 0,
NULL, 0, dev_info->edac_idx);
- if (!dev_info->edac_dev)
- return -ENOMEM;
+ if (!dev_info->edac_dev) {
+ ret = -ENOMEM;
+ goto err_dev_put;
+ }
dev_info->edac_dev->pvt_info = dev_info;
dev_info->edac_dev->dev = &dev_info->dev->dev;
@@ -399,8 +401,7 @@ static int amd8111_dev_probe(struct pci_dev *dev,
if (edac_device_add_device(dev_info->edac_dev) > 0) {
printk(KERN_ERR "failed to add edac_dev for %s\n",
dev_info->ctl_name);
- edac_device_free_ctl_info(dev_info->edac_dev);
- return -ENODEV;
+ goto err_edac_free_ctl;
}
printk(KERN_INFO "added one edac_dev on AMD8111 "
@@ -409,6 +410,13 @@ static int amd8111_dev_probe(struct pci_dev *dev,
dev_info->ctl_name);
return 0;
+
+err_edac_free_ctl:
+ edac_device_free_ctl_info(dev_info->edac_dev);
+err_dev_put:
+ pci_dev_put(dev_info->dev);
+err:
+ return ret;
}
static void amd8111_dev_remove(struct pci_dev *dev)
@@ -437,6 +445,7 @@ static int amd8111_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct amd8111_pci_info *pci_info = &amd8111_pcis[id->driver_data];
+ int ret = -ENODEV;
pci_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
pci_info->err_dev, NULL);
@@ -446,16 +455,15 @@ static int amd8111_pci_probe(struct pci_dev *dev,
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, pci_info->err_dev,
pci_info->ctl_name);
- return -ENODEV;
+ goto err;
}
if (pci_enable_device(pci_info->dev)) {
- pci_dev_put(pci_info->dev);
printk(KERN_ERR "failed to enable:"
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, pci_info->err_dev,
pci_info->ctl_name);
- return -ENODEV;
+ goto err_dev_put;
}
/*
@@ -465,8 +473,10 @@ static int amd8111_pci_probe(struct pci_dev *dev,
*/
pci_info->edac_idx = edac_pci_alloc_index();
pci_info->edac_dev = edac_pci_alloc_ctl_info(0, pci_info->ctl_name);
- if (!pci_info->edac_dev)
- return -ENOMEM;
+ if (!pci_info->edac_dev) {
+ ret = -ENOMEM;
+ goto err_dev_put;
+ }
pci_info->edac_dev->pvt_info = pci_info;
pci_info->edac_dev->dev = &pci_info->dev->dev;
@@ -483,8 +493,7 @@ static int amd8111_pci_probe(struct pci_dev *dev,
if (edac_pci_add_device(pci_info->edac_dev, pci_info->edac_idx) > 0) {
printk(KERN_ERR "failed to add edac_pci for %s\n",
pci_info->ctl_name);
- edac_pci_free_ctl_info(pci_info->edac_dev);
- return -ENODEV;
+ goto err_edac_free_ctl;
}
printk(KERN_INFO "added one edac_pci on AMD8111 "
@@ -493,6 +502,13 @@ static int amd8111_pci_probe(struct pci_dev *dev,
pci_info->ctl_name);
return 0;
+
+err_edac_free_ctl:
+ edac_pci_free_ctl_info(pci_info->edac_dev);
+err_dev_put:
+ pci_dev_put(pci_info->dev);
+err:
+ return ret;
}
static void amd8111_pci_remove(struct pci_dev *dev)
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 92d54fa65f93..b2d71388172b 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -209,7 +209,6 @@ enum e752x_chips {
*/
struct e752x_pvt {
- struct pci_dev *bridge_ck;
struct pci_dev *dev_d0f0;
struct pci_dev *dev_d0f1;
u32 tolm;
@@ -891,7 +890,7 @@ static void e752x_get_error_info(struct mem_ctl_info *mci,
info->buf_ferr);
if (info->dram_ferr)
- pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
+ pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_FERR,
info->dram_ferr, info->dram_ferr);
pci_write_config_dword(dev, E752X_FERR_GLOBAL,
@@ -936,7 +935,7 @@ static void e752x_get_error_info(struct mem_ctl_info *mci,
info->buf_nerr);
if (info->dram_nerr)
- pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
+ pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_NERR,
info->dram_nerr, info->dram_nerr);
pci_write_config_dword(dev, E752X_NERR_GLOBAL,
@@ -1177,38 +1176,33 @@ static void e752x_init_mem_map_table(struct pci_dev *pdev,
static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
struct e752x_pvt *pvt)
{
- struct pci_dev *dev;
-
- pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
- pvt->dev_info->err_dev, pvt->bridge_ck);
+ pvt->dev_d0f1 = pci_get_device(PCI_VENDOR_ID_INTEL,
+ pvt->dev_info->err_dev, NULL);
- if (pvt->bridge_ck == NULL) {
- pvt->bridge_ck = pci_scan_single_device(pdev->bus,
+ if (pvt->dev_d0f1 == NULL) {
+ pvt->dev_d0f1 = pci_scan_single_device(pdev->bus,
PCI_DEVFN(0, 1));
- pci_dev_get(pvt->bridge_ck);
+ pci_dev_get(pvt->dev_d0f1);
}
- if (pvt->bridge_ck == NULL) {
+ if (pvt->dev_d0f1 == NULL) {
e752x_printk(KERN_ERR, "error reporting device not found:"
"vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
return 1;
}
- dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ pvt->dev_d0f0 = pci_get_device(PCI_VENDOR_ID_INTEL,
e752x_devs[dev_idx].ctl_dev,
NULL);
- if (dev == NULL)
+ if (pvt->dev_d0f0 == NULL)
goto fail;
- pvt->dev_d0f0 = dev;
- pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
-
return 0;
fail:
- pci_dev_put(pvt->bridge_ck);
+ pci_dev_put(pvt->dev_d0f1);
return 1;
}
@@ -1385,7 +1379,6 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
fail:
pci_dev_put(pvt->dev_d0f0);
pci_dev_put(pvt->dev_d0f1);
- pci_dev_put(pvt->bridge_ck);
edac_mc_free(mci);
return -ENODEV;
@@ -1419,7 +1412,6 @@ static void e752x_remove_one(struct pci_dev *pdev)
pvt = (struct e752x_pvt *)mci->pvt_info;
pci_dev_put(pvt->dev_d0f0);
pci_dev_put(pvt->dev_d0f1);
- pci_dev_put(pvt->bridge_ck);
edac_mc_free(mci);
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index b335c6ab5efe..01fae8289cf0 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -7,7 +7,7 @@
*
* Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
*
- * (c) 2012-2013 - Mauro Carvalho Chehab <mchehab@redhat.com>
+ * (c) 2012-2013 - Mauro Carvalho Chehab
* The entire API were re-written, and ported to use struct device
*
*/
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index d5a98a45c062..8399b4e16fe0 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -4,7 +4,7 @@
* This file may be distributed under the terms of the GNU General Public
* License version 2.
*
- * Copyright (c) 2013 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2013 by Mauro Carvalho Chehab
*
* Red Hat Inc. http://www.redhat.com
*/
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index fa1326e5a4b0..022a70273ada 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -464,6 +464,8 @@ static void i3200_remove_one(struct pci_dev *pdev)
iounmap(priv->window);
edac_mc_free(mci);
+
+ pci_disable_device(pdev);
}
static const struct pci_device_id i3200_pci_tbl[] = {
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 36a38ee94fa8..6247d186177e 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -869,16 +869,13 @@ static void i5100_init_csrows(struct mem_ctl_info *mci)
chan, rank, 0);
dimm->nr_pages = npages;
- if (npages) {
- dimm->grain = 32;
- dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
- DEV_X4 : DEV_X8;
- dimm->mtype = MEM_RDDR2;
- dimm->edac_mode = EDAC_SECDED;
- snprintf(dimm->label, sizeof(dimm->label),
- "DIMM%u",
- i5100_rank_to_slot(mci, chan, rank));
- }
+ dimm->grain = 32;
+ dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
+ DEV_X4 : DEV_X8;
+ dimm->mtype = MEM_RDDR2;
+ dimm->edac_mode = EDAC_SECDED;
+ snprintf(dimm->label, sizeof(dimm->label), "DIMM%u",
+ i5100_rank_to_slot(mci, chan, rank));
edac_dbg(2, "dimm channel %d, rank %d, size %ld\n",
chan, rank, (long)PAGES_TO_MiB(npages));
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index e080cbfa8fc9..6ef6ad1ba16e 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -6,7 +6,7 @@
*
* Copyright (c) 2008 by:
* Ben Woodard <woodard@redhat.com>
- * Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Mauro Carvalho Chehab
*
* Red Hat Inc. http://www.redhat.com
*
@@ -1408,6 +1408,8 @@ static void i5400_remove_one(struct pci_dev *pdev)
/* retrieve references to resources, and free those resources */
i5400_put_devices(mci);
+ pci_disable_device(pdev);
+
edac_mc_free(mci);
}
@@ -1467,7 +1469,7 @@ module_exit(i5400_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Woodard <woodard@redhat.com>");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - "
I5400_REVISION);
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 57e96a3350f0..dcac982fdc7a 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -5,7 +5,7 @@
* GNU General Public License version 2 only.
*
* Copyright (c) 2010 by:
- * Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Mauro Carvalho Chehab
*
* Red Hat Inc. http://www.redhat.com
*
@@ -1209,7 +1209,7 @@ module_init(i7300_init);
module_exit(i7300_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
I7300_REVISION);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index d871275196f6..9cd0b301f81b 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -9,7 +9,7 @@
* GNU General Public License version 2 only.
*
* Copyright (c) 2009-2010 by:
- * Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Mauro Carvalho Chehab
*
* Red Hat Inc. http://www.redhat.com
*
@@ -1708,7 +1708,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
- char *type, *optype, *err;
+ char *optype, *err;
enum hw_event_mc_err_type tp_event;
unsigned long error = m->status & 0x1ff0000l;
bool uncorrected_error = m->mcgstatus & 1ll << 61;
@@ -1721,15 +1721,11 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
u32 errnum = find_first_bit(&error, 32);
if (uncorrected_error) {
- if (ripv) {
- type = "FATAL";
+ if (ripv)
tp_event = HW_EVENT_ERR_FATAL;
- } else {
- type = "NON_FATAL";
+ else
tp_event = HW_EVENT_ERR_UNCORRECTED;
- }
} else {
- type = "CORRECTED";
tp_event = HW_EVENT_ERR_CORRECTED;
}
@@ -2461,7 +2457,7 @@ module_init(i7core_init);
module_exit(i7core_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
I7CORE_REVISION);
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 80573df0a4d7..8d0450b9b9af 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -406,8 +406,6 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
edac_dbg(0, "\n");
- ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
-
if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
return -ENODEV;
drc = readl(ovrfl_window + I82875P_DRC);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 30f7309446a6..51b9caa0b024 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -741,6 +741,36 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
if (amd_filter_mce(m))
return NOTIFY_STOP;
+ pr_emerg(HW_ERR "%s\n", decode_error_status(m));
+
+ pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s",
+ m->extcpu,
+ c->x86, c->x86_model, c->x86_mask,
+ m->bank,
+ ((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
+ ((m->status & MCI_STATUS_UC) ? "UE" : "CE"),
+ ((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"),
+ ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
+ ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
+
+ if (c->x86 == 0x15 || c->x86 == 0x16)
+ pr_cont("|%s|%s",
+ ((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
+ ((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
+
+ /* do the two bits[14:13] together */
+ ecc = (m->status >> 45) & 0x3;
+ if (ecc)
+ pr_cont("|%sECC", ((ecc == 2) ? "C" : "U"));
+
+ pr_cont("]: 0x%016llx\n", m->status);
+
+ if (m->status & MCI_STATUS_ADDRV)
+ pr_emerg(HW_ERR "MC%d_ADDR: 0x%016llx\n", m->bank, m->addr);
+
+ if (!fam_ops)
+ goto err_code;
+
switch (m->bank) {
case 0:
decode_mc0_mce(m);
@@ -774,33 +804,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
break;
}
- pr_emerg(HW_ERR "Error Status: %s\n", decode_error_status(m));
-
- pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s",
- m->extcpu,
- c->x86, c->x86_model, c->x86_mask,
- m->bank,
- ((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
- ((m->status & MCI_STATUS_UC) ? "UE" : "CE"),
- ((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"),
- ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
- ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
-
- if (c->x86 == 0x15 || c->x86 == 0x16)
- pr_cont("|%s|%s",
- ((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
- ((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
-
- /* do the two bits[14:13] together */
- ecc = (m->status >> 45) & 0x3;
- if (ecc)
- pr_cont("|%sECC", ((ecc == 2) ? "C" : "U"));
-
- pr_cont("]: 0x%016llx\n", m->status);
-
- if (m->status & MCI_STATUS_ADDRV)
- pr_emerg(HW_ERR "MC%d_ADDR: 0x%016llx\n", m->bank, m->addr);
-
+ err_code:
amd_decode_err_code(m->status & 0xffff);
return NOTIFY_STOP;
@@ -816,10 +820,7 @@ static int __init mce_amd_init(void)
struct cpuinfo_x86 *c = &boot_cpu_data;
if (c->x86_vendor != X86_VENDOR_AMD)
- return 0;
-
- if (c->x86 < 0xf || c->x86 > 0x16)
- return 0;
+ return -ENODEV;
fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
if (!fam_ops)
@@ -874,7 +875,7 @@ static int __init mce_amd_init(void)
default:
printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
kfree(fam_ops);
- return -EINVAL;
+ fam_ops = NULL;
}
pr_info("MCE: In-kernel MCE decoding enabled.\n");
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 8f9182179a7c..f4aec2e6ef56 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -357,7 +357,7 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
mpc85xx_pci_isr,
- IRQF_DISABLED | IRQF_SHARED,
+ IRQF_SHARED,
"[EDAC] PCI err", pci);
if (res < 0) {
printk(KERN_ERR
@@ -633,7 +633,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
if (edac_op_state == EDAC_OPSTATE_INT) {
pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
- mpc85xx_l2_isr, IRQF_DISABLED,
+ mpc85xx_l2_isr, 0,
"[EDAC] L2 err", edac_dev);
if (res < 0) {
printk(KERN_ERR
@@ -1133,7 +1133,7 @@ static int mpc85xx_mc_err_probe(struct platform_device *op)
pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
mpc85xx_mc_isr,
- IRQF_DISABLED | IRQF_SHARED,
+ IRQF_SHARED,
"[EDAC] MC err", mci);
if (res < 0) {
printk(KERN_ERR "%s: Unable to request irq %d for "
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index 93412d6b3af1..4bd10f94f068 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -5,12 +5,16 @@
*
* Copyright (C) 2009 Wind River Systems,
* written by Ralf Baechle <ralf@linux-mips.org>
+ *
+ * Copyright (c) 2013 by Cisco Systems, Inc.
+ * All rights reserved.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/edac.h>
+#include <linux/ctype.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-lmcx-defs.h>
@@ -20,6 +24,18 @@
#define OCTEON_MAX_MC 4
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+struct octeon_lmc_pvt {
+ unsigned long inject;
+ unsigned long error_type;
+ unsigned long dimm;
+ unsigned long rank;
+ unsigned long bank;
+ unsigned long row;
+ unsigned long col;
+};
+
static void octeon_lmc_edac_poll(struct mem_ctl_info *mci)
{
union cvmx_lmcx_mem_cfg0 cfg0;
@@ -55,14 +71,31 @@ static void octeon_lmc_edac_poll(struct mem_ctl_info *mci)
static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
{
+ struct octeon_lmc_pvt *pvt = mci->pvt_info;
union cvmx_lmcx_int int_reg;
bool do_clear = false;
char msg[64];
- int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
+ if (!pvt->inject)
+ int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
+ else {
+ if (pvt->error_type == 1)
+ int_reg.s.sec_err = 1;
+ if (pvt->error_type == 2)
+ int_reg.s.ded_err = 1;
+ }
+
if (int_reg.s.sec_err || int_reg.s.ded_err) {
union cvmx_lmcx_fadr fadr;
- fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
+ if (likely(!pvt->inject))
+ fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
+ else {
+ fadr.cn61xx.fdimm = pvt->dimm;
+ fadr.cn61xx.fbunk = pvt->rank;
+ fadr.cn61xx.fbank = pvt->bank;
+ fadr.cn61xx.frow = pvt->row;
+ fadr.cn61xx.fcol = pvt->col;
+ }
snprintf(msg, sizeof(msg),
"DIMM %d rank %d bank %d row %d col %d",
fadr.cn61xx.fdimm, fadr.cn61xx.fbunk,
@@ -82,8 +115,128 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
int_reg.s.ded_err = -1; /* Done, re-arm */
do_clear = true;
}
- if (do_clear)
- cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64);
+
+ if (do_clear) {
+ if (likely(!pvt->inject))
+ cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64);
+ else
+ pvt->inject = 0;
+ }
+}
+
+/************************ MC SYSFS parts ***********************************/
+
+/* Only a couple naming differences per template, so very similar */
+#define TEMPLATE_SHOW(reg) \
+static ssize_t octeon_mc_inject_##reg##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *data) \
+{ \
+ struct mem_ctl_info *mci = to_mci(dev); \
+ struct octeon_lmc_pvt *pvt = mci->pvt_info; \
+ return sprintf(data, "%016llu\n", (u64)pvt->reg); \
+}
+
+#define TEMPLATE_STORE(reg) \
+static ssize_t octeon_mc_inject_##reg##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *data, size_t count) \
+{ \
+ struct mem_ctl_info *mci = to_mci(dev); \
+ struct octeon_lmc_pvt *pvt = mci->pvt_info; \
+ if (isdigit(*data)) { \
+ if (!kstrtoul(data, 0, &pvt->reg)) \
+ return count; \
+ } \
+ return 0; \
+}
+
+TEMPLATE_SHOW(inject);
+TEMPLATE_STORE(inject);
+TEMPLATE_SHOW(dimm);
+TEMPLATE_STORE(dimm);
+TEMPLATE_SHOW(bank);
+TEMPLATE_STORE(bank);
+TEMPLATE_SHOW(rank);
+TEMPLATE_STORE(rank);
+TEMPLATE_SHOW(row);
+TEMPLATE_STORE(row);
+TEMPLATE_SHOW(col);
+TEMPLATE_STORE(col);
+
+static ssize_t octeon_mc_inject_error_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data,
+ size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct octeon_lmc_pvt *pvt = mci->pvt_info;
+
+ if (!strncmp(data, "single", 6))
+ pvt->error_type = 1;
+ else if (!strncmp(data, "double", 6))
+ pvt->error_type = 2;
+
+ return count;
+}
+
+static ssize_t octeon_mc_inject_error_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *data)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct octeon_lmc_pvt *pvt = mci->pvt_info;
+ if (pvt->error_type == 1)
+ return sprintf(data, "single");
+ else if (pvt->error_type == 2)
+ return sprintf(data, "double");
+
+ return 0;
+}
+
+static DEVICE_ATTR(inject, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_inject_show, octeon_mc_inject_inject_store);
+static DEVICE_ATTR(error_type, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_error_type_show, octeon_mc_inject_error_type_store);
+static DEVICE_ATTR(dimm, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_dimm_show, octeon_mc_inject_dimm_store);
+static DEVICE_ATTR(rank, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_rank_show, octeon_mc_inject_rank_store);
+static DEVICE_ATTR(bank, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_bank_show, octeon_mc_inject_bank_store);
+static DEVICE_ATTR(row, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_row_show, octeon_mc_inject_row_store);
+static DEVICE_ATTR(col, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_col_show, octeon_mc_inject_col_store);
+
+
+static int octeon_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
+{
+ int rc;
+
+ rc = device_create_file(&mci->dev, &dev_attr_inject);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_error_type);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_dimm);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_rank);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_bank);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_row);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_col);
+ if (rc < 0)
+ return rc;
+
+ return 0;
}
static int octeon_lmc_edac_probe(struct platform_device *pdev)
@@ -92,6 +245,8 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
struct edac_mc_layer layers[1];
int mc = pdev->id;
+ opstate_init();
+
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = 1;
layers[0].is_virt_csrow = false;
@@ -105,7 +260,7 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
return 0;
}
- mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
if (!mci)
return -ENXIO;
@@ -122,6 +277,12 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
return -ENXIO;
}
+ if (octeon_set_mc_sysfs_attributes(mci)) {
+ dev_err(&pdev->dev, "octeon_set_mc_sysfs_attributes() failed\n");
+ return -ENXIO;
+ }
+
+
cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
cfg0.s.intr_ded_ena = 0; /* We poll */
cfg0.s.intr_sec_ena = 0;
@@ -137,7 +298,7 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
return 0;
}
- mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
if (!mci)
return -ENXIO;
@@ -154,6 +315,12 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
return -ENXIO;
}
+ if (octeon_set_mc_sysfs_attributes(mci)) {
+ dev_err(&pdev->dev, "octeon_set_mc_sysfs_attributes() failed\n");
+ return -ENXIO;
+ }
+
+
en.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
en.s.intr_ded_ena = 0; /* We poll */
en.s.intr_sec_ena = 0;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 54e2abe671f7..deea0dc9999b 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -7,7 +7,7 @@
* GNU General Public License version 2 only.
*
* Copyright (c) 2011 by:
- * Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Mauro Carvalho Chehab
*/
#include <linux/module.h>
@@ -1263,7 +1263,7 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
struct pci_dev *pdev = NULL;
u8 bus = 0;
- sbridge_printk(KERN_INFO,
+ sbridge_printk(KERN_DEBUG,
"Seeking for: dev %02x.%d PCI ID %04x:%04x\n",
dev_descr->dev, dev_descr->func,
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
@@ -1828,6 +1828,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
struct mce *mce = (struct mce *)data;
struct mem_ctl_info *mci;
struct sbridge_pvt *pvt;
+ char *type;
if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
return NOTIFY_DONE;
@@ -1846,17 +1847,23 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
if ((mce->status & 0xefff) >> 7 != 1)
return NOTIFY_DONE;
- printk("sbridge: HANDLING MCE MEMORY ERROR\n");
+ if (mce->mcgstatus & MCG_STATUS_MCIP)
+ type = "Exception";
+ else
+ type = "Event";
+
+ sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
- printk("CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
- mce->extcpu, mce->mcgstatus, mce->bank, mce->status);
- printk("TSC %llx ", mce->tsc);
- printk("ADDR %llx ", mce->addr);
- printk("MISC %llx ", mce->misc);
+ sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
+ "Bank %d: %016Lx\n", mce->extcpu, type,
+ mce->mcgstatus, mce->bank, mce->status);
+ sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
+ sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
+ sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
- printk("PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
- mce->cpuvendor, mce->cpuid, mce->time,
- mce->socketid, mce->apicid);
+ sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
+ "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
+ mce->time, mce->socketid, mce->apicid);
/* Only handle if it is the right mc controller */
if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
@@ -2176,7 +2183,7 @@ module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
SBRIDGE_REVISION);
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index bdb5a00f1dfa..be56e8ac95e6 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -14,10 +14,6 @@ if EXTCON
comment "Extcon Device Drivers"
-config OF_EXTCON
- def_tristate y
- depends on OF
-
config EXTCON_GPIO
tristate "GPIO extcon support"
depends on GPIOLIB
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 43eccc0e3448..bf7861ec0906 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -2,8 +2,6 @@
# Makefile for external connector class (extcon) devices
#
-obj-$(CONFIG_OF_EXTCON) += of_extcon.o
-
obj-$(CONFIG_EXTCON) += extcon-class.o
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 76322330cbd7..7ab21aa6eaa1 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -31,6 +31,7 @@
#include <linux/extcon.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/of.h>
/*
* extcon_cable_name suggests the standard cable names for commonly used
@@ -818,6 +819,47 @@ void extcon_dev_unregister(struct extcon_dev *edev)
}
EXPORT_SYMBOL_GPL(extcon_dev_unregister);
+#ifdef CONFIG_OF
+/*
+ * extcon_get_edev_by_phandle - Get the extcon device from devicetree
+ * @dev - instance to the given device
+ * @index - index into list of extcon_dev
+ *
+ * return the instance of extcon device
+ */
+struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
+{
+ struct device_node *node;
+ struct extcon_dev *edev;
+
+ if (!dev->of_node) {
+ dev_err(dev, "device does not have a device node entry\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ node = of_parse_phandle(dev->of_node, "extcon", index);
+ if (!node) {
+ dev_err(dev, "failed to get phandle in %s node\n",
+ dev->of_node->full_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ edev = extcon_get_extcon_dev(node->name);
+ if (!edev) {
+ dev_err(dev, "unable to get extcon device : %s\n", node->name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return edev;
+}
+#else
+struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(extcon_get_edev_by_phandle);
+
static int __init extcon_class_init(void)
{
return create_extcon_class();
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index a63a6b21c9ad..13d522255d81 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -176,9 +176,7 @@ static int gpio_extcon_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops gpio_extcon_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, gpio_extcon_resume)
-};
+static SIMPLE_DEV_PM_OPS(gpio_extcon_pm_ops, NULL, gpio_extcon_resume);
static struct platform_driver gpio_extcon_driver = {
.probe = gpio_extcon_probe,
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 2aea4bcdd7f3..ddff2b72f0a8 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -271,10 +271,7 @@ static int palmas_usb_resume(struct device *dev)
};
#endif
-static const struct dev_pm_ops palmas_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(palmas_usb_suspend,
- palmas_usb_resume)
-};
+static SIMPLE_DEV_PM_OPS(palmas_pm_ops, palmas_usb_suspend, palmas_usb_resume);
static struct of_device_id of_palmas_match_tbl[] = {
{ .compatible = "ti,palmas-usb", },
diff --git a/drivers/extcon/of_extcon.c b/drivers/extcon/of_extcon.c
deleted file mode 100644
index 72173ecbb311..000000000000
--- a/drivers/extcon/of_extcon.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * OF helpers for External connector (extcon) framework
- *
- * Copyright (C) 2013 Texas Instruments, Inc.
- * Kishon Vijay Abraham I <kishon@ti.com>
- *
- * Copyright (C) 2013 Samsung Electronics
- * Chanwoo Choi <cw00.choi@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/extcon.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/extcon/of_extcon.h>
-
-/*
- * of_extcon_get_extcon_dev - Get the name of extcon device from devicetree
- * @dev - instance to the given device
- * @index - index into list of extcon_dev
- *
- * return the instance of extcon device
- */
-struct extcon_dev *of_extcon_get_extcon_dev(struct device *dev, int index)
-{
- struct device_node *node;
- struct extcon_dev *edev;
- struct platform_device *extcon_parent_dev;
-
- if (!dev->of_node) {
- dev_dbg(dev, "device does not have a device node entry\n");
- return ERR_PTR(-EINVAL);
- }
-
- node = of_parse_phandle(dev->of_node, "extcon", index);
- if (!node) {
- dev_dbg(dev, "failed to get phandle in %s node\n",
- dev->of_node->full_name);
- return ERR_PTR(-ENODEV);
- }
-
- extcon_parent_dev = of_find_device_by_node(node);
- if (!extcon_parent_dev) {
- dev_dbg(dev, "unable to find device by node\n");
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- edev = extcon_get_extcon_dev(dev_name(&extcon_parent_dev->dev));
- if (!edev) {
- dev_dbg(dev, "unable to get extcon device : %s\n",
- dev_name(&extcon_parent_dev->dev));
- return ERR_PTR(-ENODEV);
- }
-
- return edev;
-}
-EXPORT_SYMBOL_GPL(of_extcon_get_extcon_dev);
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 1b5e8e46226d..7160c43c59fc 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -584,7 +584,7 @@ static struct platform_driver dcdbas_driver = {
.remove = dcdbas_remove,
};
-static const struct platform_device_info dcdbas_dev_info __initdata = {
+static const struct platform_device_info dcdbas_dev_info __initconst = {
.name = DRIVER_NAME,
.id = -1,
.dma_mask = DMA_BIT_MASK(32),
diff --git a/drivers/firmware/efi/efi-stub-helper.c b/drivers/firmware/efi/efi-stub-helper.c
index b6bffbfd3be7..2c41eaece2c1 100644
--- a/drivers/firmware/efi/efi-stub-helper.c
+++ b/drivers/firmware/efi/efi-stub-helper.c
@@ -16,18 +16,6 @@ struct file_info {
u64 size;
};
-
-
-
-static void efi_char16_printk(efi_system_table_t *sys_table_arg,
- efi_char16_t *str)
-{
- struct efi_simple_text_output_protocol *out;
-
- out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out;
- efi_call_phys2(out->output_string, out, str);
-}
-
static void efi_printk(efi_system_table_t *sys_table_arg, char *str)
{
char *s8;
@@ -65,20 +53,23 @@ again:
* allocation which may be in a new descriptor region.
*/
*map_size += sizeof(*m);
- status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
- EFI_LOADER_DATA, *map_size, (void **)&m);
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ *map_size, (void **)&m);
if (status != EFI_SUCCESS)
goto fail;
- status = efi_call_phys5(sys_table_arg->boottime->get_memory_map,
- map_size, m, &key, desc_size, &desc_version);
+ *desc_size = 0;
+ key = 0;
+ status = efi_call_early(get_memory_map, map_size, m,
+ &key, desc_size, &desc_version);
if (status == EFI_BUFFER_TOO_SMALL) {
- efi_call_phys1(sys_table_arg->boottime->free_pool, m);
+ efi_call_early(free_pool, m);
goto again;
}
if (status != EFI_SUCCESS)
- efi_call_phys1(sys_table_arg->boottime->free_pool, m);
+ efi_call_early(free_pool, m);
+
if (key_ptr && status == EFI_SUCCESS)
*key_ptr = key;
if (desc_ver && status == EFI_SUCCESS)
@@ -158,7 +149,7 @@ again:
if (!max_addr)
status = EFI_NOT_FOUND;
else {
- status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ status = efi_call_early(allocate_pages,
EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
nr_pages, &max_addr);
if (status != EFI_SUCCESS) {
@@ -170,8 +161,7 @@ again:
*addr = max_addr;
}
- efi_call_phys1(sys_table_arg->boottime->free_pool, map);
-
+ efi_call_early(free_pool, map);
fail:
return status;
}
@@ -231,7 +221,7 @@ static efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
if ((start + size) > end)
continue;
- status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ status = efi_call_early(allocate_pages,
EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
nr_pages, &start);
if (status == EFI_SUCCESS) {
@@ -243,7 +233,7 @@ static efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
if (i == map_size / desc_size)
status = EFI_NOT_FOUND;
- efi_call_phys1(sys_table_arg->boottime->free_pool, map);
+ efi_call_early(free_pool, map);
fail:
return status;
}
@@ -257,7 +247,7 @@ static void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
return;
nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- efi_call_phys2(sys_table_arg->boottime->free_pages, addr, nr_pages);
+ efi_call_early(free_pages, addr, nr_pages);
}
@@ -276,9 +266,7 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
{
struct file_info *files;
unsigned long file_addr;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
u64 file_size_total;
- efi_file_io_interface_t *io;
efi_file_handle_t *fh;
efi_status_t status;
int nr_files;
@@ -319,10 +307,8 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
if (!nr_files)
return EFI_SUCCESS;
- status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
- EFI_LOADER_DATA,
- nr_files * sizeof(*files),
- (void **)&files);
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ nr_files * sizeof(*files), (void **)&files);
if (status != EFI_SUCCESS) {
efi_printk(sys_table_arg, "Failed to alloc mem for file handle list\n");
goto fail;
@@ -331,13 +317,8 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
str = cmd_line;
for (i = 0; i < nr_files; i++) {
struct file_info *file;
- efi_file_handle_t *h;
- efi_file_info_t *info;
efi_char16_t filename_16[256];
- unsigned long info_sz;
- efi_guid_t info_guid = EFI_FILE_INFO_ID;
efi_char16_t *p;
- u64 file_sz;
str = strstr(str, option_string);
if (!str)
@@ -368,71 +349,18 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
/* Only open the volume once. */
if (!i) {
- efi_boot_services_t *boottime;
-
- boottime = sys_table_arg->boottime;
-
- status = efi_call_phys3(boottime->handle_protocol,
- image->device_handle, &fs_proto,
- (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
- goto free_files;
- }
-
- status = efi_call_phys2(io->open_volume, io, &fh);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to open volume\n");
+ status = efi_open_volume(sys_table_arg, image,
+ (void **)&fh);
+ if (status != EFI_SUCCESS)
goto free_files;
- }
}
- status = efi_call_phys5(fh->open, fh, &h, filename_16,
- EFI_FILE_MODE_READ, (u64)0);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to open file: ");
- efi_char16_printk(sys_table_arg, filename_16);
- efi_printk(sys_table_arg, "\n");
+ status = efi_file_size(sys_table_arg, fh, filename_16,
+ (void **)&file->handle, &file->size);
+ if (status != EFI_SUCCESS)
goto close_handles;
- }
- file->handle = h;
-
- info_sz = 0;
- status = efi_call_phys4(h->get_info, h, &info_guid,
- &info_sz, NULL);
- if (status != EFI_BUFFER_TOO_SMALL) {
- efi_printk(sys_table_arg, "Failed to get file info size\n");
- goto close_handles;
- }
-
-grow:
- status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
- EFI_LOADER_DATA, info_sz,
- (void **)&info);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
- goto close_handles;
- }
-
- status = efi_call_phys4(h->get_info, h, &info_guid,
- &info_sz, info);
- if (status == EFI_BUFFER_TOO_SMALL) {
- efi_call_phys1(sys_table_arg->boottime->free_pool,
- info);
- goto grow;
- }
-
- file_sz = info->file_size;
- efi_call_phys1(sys_table_arg->boottime->free_pool, info);
-
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to get file info\n");
- goto close_handles;
- }
-
- file->size = file_sz;
- file_size_total += file_sz;
+ file_size_total += file->size;
}
if (file_size_total) {
@@ -468,10 +396,10 @@ grow:
chunksize = EFI_READ_CHUNK_SIZE;
else
chunksize = size;
- status = efi_call_phys3(fh->read,
- files[j].handle,
- &chunksize,
- (void *)addr);
+
+ status = efi_file_read(files[j].handle,
+ &chunksize,
+ (void *)addr);
if (status != EFI_SUCCESS) {
efi_printk(sys_table_arg, "Failed to read file\n");
goto free_file_total;
@@ -480,12 +408,12 @@ grow:
size -= chunksize;
}
- efi_call_phys1(fh->close, files[j].handle);
+ efi_file_close(files[j].handle);
}
}
- efi_call_phys1(sys_table_arg->boottime->free_pool, files);
+ efi_call_early(free_pool, files);
*load_addr = file_addr;
*load_size = file_size_total;
@@ -497,9 +425,9 @@ free_file_total:
close_handles:
for (k = j; k < i; k++)
- efi_call_phys1(fh->close, files[k].handle);
+ efi_file_close(files[k].handle);
free_files:
- efi_call_phys1(sys_table_arg->boottime->free_pool, files);
+ efi_call_early(free_pool, files);
fail:
*load_addr = 0;
*load_size = 0;
@@ -545,7 +473,7 @@ static efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
* as possible while respecting the required alignment.
*/
nr_pages = round_up(alloc_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ status = efi_call_early(allocate_pages,
EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
nr_pages, &efi_addr);
new_addr = efi_addr;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 4753bac65279..af20f1712337 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -233,7 +233,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab},
{SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
{UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
- {NULL_GUID, NULL, 0},
+ {NULL_GUID, NULL, NULL},
};
static __init int match_config_table(efi_guid_t *guid,
@@ -313,5 +313,8 @@ int __init efi_config_init(efi_config_table_type_t *arch_tables)
}
pr_cont("\n");
early_iounmap(config_tables, efi.systab->nr_tables * sz);
+
+ set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
return 0;
}
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 3dc248239197..50ea412a25e6 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -227,7 +227,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
memcpy(&entry->var, new_var, count);
err = efivar_entry_set(entry, new_var->Attributes,
- new_var->DataSize, new_var->Data, false);
+ new_var->DataSize, new_var->Data, NULL);
if (err) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%d\n", err);
return -EIO;
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index e5a67b24587a..f1ab05ea56bb 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -892,13 +892,6 @@ static __init int gsmi_init(void)
goto out_remove_sysfs_files;
}
- ret = efivars_sysfs_init();
- if (ret) {
- printk(KERN_INFO "gsmi: Failed to create efivars files\n");
- efivars_unregister(&efivars);
- goto out_remove_sysfs_files;
- }
-
register_reboot_notifier(&gsmi_reboot_notifier);
register_die_notifier(&gsmi_die_notifier);
atomic_notifier_chain_register(&panic_notifier_list,
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index 2a90ba613613..2f569aaed4c7 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -15,6 +15,7 @@
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/dmi.h>
+#include <linux/io.h>
#include <asm/bios_ebda.h>
#define BIOS_MEMCONSOLE_V1_MAGIC 0xDEADBABE
@@ -41,15 +42,25 @@ struct biosmemcon_ebda {
};
} __packed;
-static char *memconsole_baseaddr;
+static u32 memconsole_baseaddr;
static size_t memconsole_length;
static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
- return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
- memconsole_length);
+ char *memconsole;
+ ssize_t ret;
+
+ memconsole = ioremap_cache(memconsole_baseaddr, memconsole_length);
+ if (!memconsole) {
+ pr_err("memconsole: ioremap_cache failed\n");
+ return -ENOMEM;
+ }
+ ret = memory_read_from_buffer(buf, count, &pos, memconsole,
+ memconsole_length);
+ iounmap(memconsole);
+ return ret;
}
static struct bin_attribute memconsole_bin_attr = {
@@ -58,43 +69,42 @@ static struct bin_attribute memconsole_bin_attr = {
};
-static void found_v1_header(struct biosmemcon_ebda *hdr)
+static void __init found_v1_header(struct biosmemcon_ebda *hdr)
{
- printk(KERN_INFO "BIOS console v1 EBDA structure found at %p\n", hdr);
- printk(KERN_INFO "BIOS console buffer at 0x%.8x, "
+ pr_info("BIOS console v1 EBDA structure found at %p\n", hdr);
+ pr_info("BIOS console buffer at 0x%.8x, "
"start = %d, end = %d, num = %d\n",
hdr->v1.buffer_addr, hdr->v1.start,
hdr->v1.end, hdr->v1.num_chars);
memconsole_length = hdr->v1.num_chars;
- memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr);
+ memconsole_baseaddr = hdr->v1.buffer_addr;
}
-static void found_v2_header(struct biosmemcon_ebda *hdr)
+static void __init found_v2_header(struct biosmemcon_ebda *hdr)
{
- printk(KERN_INFO "BIOS console v2 EBDA structure found at %p\n", hdr);
- printk(KERN_INFO "BIOS console buffer at 0x%.8x, "
+ pr_info("BIOS console v2 EBDA structure found at %p\n", hdr);
+ pr_info("BIOS console buffer at 0x%.8x, "
"start = %d, end = %d, num_bytes = %d\n",
hdr->v2.buffer_addr, hdr->v2.start,
hdr->v2.end, hdr->v2.num_bytes);
memconsole_length = hdr->v2.end - hdr->v2.start;
- memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr
- + hdr->v2.start);
+ memconsole_baseaddr = hdr->v2.buffer_addr + hdr->v2.start;
}
/*
* Search through the EBDA for the BIOS Memory Console, and
* set the global variables to point to it. Return true if found.
*/
-static bool found_memconsole(void)
+static bool __init found_memconsole(void)
{
unsigned int address;
size_t length, cur;
address = get_bios_ebda();
if (!address) {
- printk(KERN_INFO "BIOS EBDA non-existent.\n");
+ pr_info("BIOS EBDA non-existent.\n");
return false;
}
@@ -122,7 +132,7 @@ static bool found_memconsole(void)
}
}
- printk(KERN_INFO "BIOS console EBDA structure not found!\n");
+ pr_info("BIOS console EBDA structure not found!\n");
return false;
}
@@ -139,8 +149,6 @@ MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table);
static int __init memconsole_init(void)
{
- int ret;
-
if (!dmi_check_system(memconsole_dmi_table))
return -ENODEV;
@@ -148,10 +156,7 @@ static int __init memconsole_init(void)
return -ENODEV;
memconsole_bin_attr.size = memconsole_length;
-
- ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
-
- return ret;
+ return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
}
static void __exit memconsole_exit(void)
diff --git a/drivers/fmc/fmc-core.c b/drivers/fmc/fmc-core.c
index 24d52497524d..353fc546fb08 100644
--- a/drivers/fmc/fmc-core.c
+++ b/drivers/fmc/fmc-core.c
@@ -99,10 +99,23 @@ static ssize_t fmc_read_eeprom(struct file *file, struct kobject *kobj,
return count;
}
+static ssize_t fmc_write_eeprom(struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev;
+ struct fmc_device *fmc;
+
+ dev = container_of(kobj, struct device, kobj);
+ fmc = container_of(dev, struct fmc_device, dev);
+ return fmc->op->write_ee(fmc, off, buf, count);
+}
+
static struct bin_attribute fmc_eeprom_attr = {
- .attr = { .name = "eeprom", .mode = S_IRUGO, },
+ .attr = { .name = "eeprom", .mode = S_IRUGO | S_IWUSR, },
.size = 8192, /* more or less standard */
.read = fmc_read_eeprom,
+ .write = fmc_write_eeprom,
};
/*
@@ -154,7 +167,7 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
ret = -EINVAL;
break;
}
- if (fmc->flags == FMC_DEVICE_NO_MEZZANINE) {
+ if (fmc->flags & FMC_DEVICE_NO_MEZZANINE) {
dev_info(fmc->hwdev, "absent mezzanine in slot %d\n",
fmc->slot_id);
continue;
@@ -189,9 +202,6 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
for (i = 0; i < n; i++) {
fmc = devarray[i];
- if (fmc->flags == FMC_DEVICE_NO_MEZZANINE)
- continue; /* dev_info already done above */
-
fmc->nr_slots = n; /* each slot must know how many are there */
fmc->devarray = devarray;
@@ -263,8 +273,6 @@ void fmc_device_unregister_n(struct fmc_device **devs, int n)
kfree(devs[0]->devarray);
for (i = 0; i < n; i++) {
- if (devs[i]->flags == FMC_DEVICE_NO_MEZZANINE)
- continue;
sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
device_del(&devs[i]->dev);
fmc_free_id_info(devs[i]);
diff --git a/drivers/fmc/fmc-sdb.c b/drivers/fmc/fmc-sdb.c
index 79adc39221ea..4603fdb74465 100644
--- a/drivers/fmc/fmc-sdb.c
+++ b/drivers/fmc/fmc-sdb.c
@@ -150,23 +150,36 @@ int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
}
EXPORT_SYMBOL(fmc_reprogram);
+static char *__strip_trailing_space(char *buf, char *str, int len)
+{
+ int i = len - 1;
+
+ memcpy(buf, str, len);
+ while(i >= 0 && buf[i] == ' ')
+ buf[i--] = '\0';
+ return buf;
+}
+
+#define __sdb_string(buf, field) ({ \
+ BUILD_BUG_ON(sizeof(buf) < sizeof(field)); \
+ __strip_trailing_space(buf, (void *)(field), sizeof(field)); \
+ })
+
static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
const struct sdb_array *arr)
{
+ unsigned long base = arr->baseaddr;
int i, j, n = arr->len, level = arr->level;
- const struct sdb_array *ap;
+ char buf[64];
for (i = 0; i < n; i++) {
- unsigned long base;
union sdb_record *r;
struct sdb_product *p;
struct sdb_component *c;
r = &arr->record[i];
c = &r->dev.sdb_component;
p = &c->product;
- base = 0;
- for (ap = arr; ap; ap = ap->parent)
- base += ap->baseaddr;
+
dev_info(&fmc->dev, "SDB: ");
for (j = 0; j < level; j++)
@@ -193,8 +206,8 @@ static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
p->name,
__be64_to_cpu(c->addr_first) + base);
if (IS_ERR(arr->subtree[i])) {
- printk(KERN_CONT "(bridge error %li)\n",
- PTR_ERR(arr->subtree[i]));
+ dev_info(&fmc->dev, "SDB: (bridge error %li)\n",
+ PTR_ERR(arr->subtree[i]));
break;
}
__fmc_show_sdb_tree(fmc, arr->subtree[i]);
@@ -203,10 +216,20 @@ static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
printk(KERN_CONT "integration\n");
break;
case sdb_type_repo_url:
- printk(KERN_CONT "repo-url\n");
+ printk(KERN_CONT "Synthesis repository: %s\n",
+ __sdb_string(buf, r->repo_url.repo_url));
break;
case sdb_type_synthesis:
- printk(KERN_CONT "synthesis-info\n");
+ printk(KERN_CONT "Bitstream '%s' ",
+ __sdb_string(buf, r->synthesis.syn_name));
+ printk(KERN_CONT "synthesized %08x by %s ",
+ __be32_to_cpu(r->synthesis.date),
+ __sdb_string(buf, r->synthesis.user_name));
+ printk(KERN_CONT "(%s version %x), ",
+ __sdb_string(buf, r->synthesis.tool_name),
+ __be32_to_cpu(r->synthesis.tool_version));
+ printk(KERN_CONT "commit %pm\n",
+ r->synthesis.commit_id);
break;
case sdb_type_empty:
printk(KERN_CONT "empty\n");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 903f24d28ba0..a86c49a605c6 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -55,6 +55,9 @@ config GPIO_ACPI
def_bool y
depends on ACPI
+config GPIOLIB_IRQCHIP
+ bool
+
config DEBUG_GPIO
bool "Debug GPIO calls"
depends on DEBUG_KERNEL
@@ -128,6 +131,15 @@ config GPIO_GENERIC_PLATFORM
help
Say yes here to support basic platform_device memory-mapped GPIO controllers.
+config GPIO_DWAPB
+ tristate "Synopsys DesignWare APB GPIO driver"
+ select GPIO_GENERIC
+ select GENERIC_IRQ_CHIP
+ depends on OF_GPIO
+ help
+ Say Y or M here to build support for the Synopsys DesignWare APB
+ GPIO block.
+
config GPIO_IT8761E
tristate "IT8761E GPIO support"
depends on X86 # unconditional access to IO space.
@@ -145,6 +157,12 @@ config GPIO_EP93XX
depends on ARCH_EP93XX
select GPIO_GENERIC
+config GPIO_ZEVIO
+ bool "LSI ZEVIO SoC memory mapped GPIOs"
+ depends on ARM && OF_GPIO
+ help
+ Say yes here to support the GPIO controller in LSI ZEVIO SoCs.
+
config GPIO_MM_LANTIQ
bool "Lantiq Memory mapped GPIOs"
depends on LANTIQ && SOC_XWAY
@@ -192,7 +210,7 @@ config GPIO_MSM_V1
config GPIO_MSM_V2
tristate "Qualcomm MSM GPIO v2"
- depends on GPIOLIB && OF && ARCH_MSM
+ depends on GPIOLIB && OF && ARCH_QCOM
help
Say yes here to support the GPIO interface on ARM v7 based
Qualcomm MSM chips. Most of the pins on the MSM can be
@@ -228,7 +246,8 @@ config GPIO_OCTEON
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
- select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
help
Say yes here to support the PrimeCell PL061 GPIO device
@@ -275,8 +294,15 @@ config GPIO_STA2X11
Say yes here to support the STA2x11/ConneXt GPIO device.
The GPIO module has 128 GPIO pins with alternate functions.
+config GPIO_SYSCON
+ tristate "GPIO based on SYSCON"
+ depends on MFD_SYSCON && OF
+ help
+ Say yes here to support GPIO functionality though SYSCON driver.
+
config GPIO_TS5500
tristate "TS-5500 DIO blocks and compatibles"
+ depends on TS5500 || COMPILE_TEST
help
This driver supports Digital I/O exposed by pin blocks found on some
Technologic Systems platforms. It includes, but is not limited to, 3
@@ -462,7 +488,7 @@ config GPIO_MC9S08DZ60
Select this to enable the MC9S08DZ60 GPIO driver
config GPIO_PCA953X
- tristate "PCA953x, PCA955x, PCA957x, TCA64xx, and MAX7310 I/O ports"
+ tristate "PCA95[357]x, PCA9698, TCA64xx, and MAX7310 I/O ports"
depends on I2C
help
Say yes here to provide access to several register-oriented
@@ -472,11 +498,15 @@ config GPIO_PCA953X
4 bits: pca9536, pca9537
8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
- pca9556, pca9557, pca9574, tca6408
+ pca9556, pca9557, pca9574, tca6408, xra1202
16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
tca6416
+ 24 bits: tca6424
+
+ 40 bits: pca9505, pca9698
+
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
depends on GPIO_PCA953X=y
@@ -630,7 +660,7 @@ comment "PCI GPIO expanders:"
config GPIO_CS5535
tristate "AMD CS5535/CS5536 GPIO support"
- depends on PCI && X86 && MFD_CS5535
+ depends on MFD_CS5535
help
The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
can be used for quite a number of things. The CS5535/6 is found on
@@ -642,7 +672,7 @@ config GPIO_BT8XX
tristate "BT8XX GPIO abuser"
depends on PCI && VIDEO_BT848=n
help
- The BT8xx frame grabber chip has 24 GPIO pins than can be abused
+ The BT8xx frame grabber chip has 24 GPIO pins that can be abused
as a cheap PCI GPIO card.
This chip can be found on Miro, Hauppauge and STB TV-cards.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 5d50179ece16..6309aff1d806 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
obj-$(CONFIG_GPIO_DA9055) += gpio-da9055.o
obj-$(CONFIG_GPIO_DAVINCI) += gpio-davinci.o
+obj-$(CONFIG_GPIO_DWAPB) += gpio-dwapb.o
obj-$(CONFIG_GPIO_EM) += gpio-em.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_F7188X) += gpio-f7188x.o
@@ -76,11 +77,11 @@ obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
+obj-$(CONFIG_GPIO_SYSCON) += gpio-syscon.o
obj-$(CONFIG_GPIO_TB10X) += gpio-tb10x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
-obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o
obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
@@ -99,3 +100,4 @@ obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
+obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 6fc6206b38bd..b2239d678d01 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -408,24 +408,23 @@ static void adnp_irq_bus_unlock(struct irq_data *data)
mutex_unlock(&adnp->irq_lock);
}
-static unsigned int adnp_irq_startup(struct irq_data *data)
+static int adnp_irq_reqres(struct irq_data *data)
{
struct adnp *adnp = irq_data_get_irq_chip_data(data);
- if (gpio_lock_as_irq(&adnp->gpio, data->hwirq))
+ if (gpio_lock_as_irq(&adnp->gpio, data->hwirq)) {
dev_err(adnp->gpio.dev,
"unable to lock HW IRQ %lu for IRQ\n",
data->hwirq);
- /* Satisfy the .enable semantics by unmasking the line */
- adnp_irq_unmask(data);
+ return -EINVAL;
+ }
return 0;
}
-static void adnp_irq_shutdown(struct irq_data *data)
+static void adnp_irq_relres(struct irq_data *data)
{
struct adnp *adnp = irq_data_get_irq_chip_data(data);
- adnp_irq_mask(data);
gpio_unlock_as_irq(&adnp->gpio, data->hwirq);
}
@@ -436,8 +435,8 @@ static struct irq_chip adnp_irq_chip = {
.irq_set_type = adnp_irq_set_type,
.irq_bus_lock = adnp_irq_bus_lock,
.irq_bus_sync_unlock = adnp_irq_bus_unlock,
- .irq_startup = adnp_irq_startup,
- .irq_shutdown = adnp_irq_shutdown,
+ .irq_request_resources = adnp_irq_reqres,
+ .irq_release_resources = adnp_irq_relres,
};
static int adnp_irq_map(struct irq_domain *domain, unsigned int irq,
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 3f190e68f973..d974020b78bb 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -67,9 +67,20 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
{
struct adp5588_gpio *dev =
container_of(chip, struct adp5588_gpio, gpio_chip);
+ unsigned bank = ADP5588_BANK(off);
+ unsigned bit = ADP5588_BIT(off);
+ int val;
- return !!(adp5588_gpio_read(dev->client,
- GPIO_DAT_STAT1 + ADP5588_BANK(off)) & ADP5588_BIT(off));
+ mutex_lock(&dev->lock);
+
+ if (dev->dir[bank] & bit)
+ val = dev->dat_out[bank];
+ else
+ val = adp5588_gpio_read(dev->client, GPIO_DAT_STAT1 + bank);
+
+ mutex_unlock(&dev->lock);
+
+ return !!(val & bit);
}
static void adp5588_gpio_set_value(struct gpio_chip *chip,
@@ -386,6 +397,7 @@ static int adp5588_gpio_probe(struct i2c_client *client,
gc->ngpio = ADP5588_MAXGPIO;
gc->label = client->name;
gc->owner = THIS_MODULE;
+ gc->names = pdata->names;
mutex_init(&dev->lock);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index f32357e2d78d..3f6b33ce9bd4 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -28,6 +28,10 @@
#define GPIO_BANK(gpio) ((gpio) >> 5)
#define GPIO_BIT(gpio) ((gpio) & (GPIO_PER_BANK - 1))
+/* There is a GPIO control register for each GPIO */
+#define GPIO_CONTROL(gpio) (0x00000100 + ((gpio) << 2))
+
+/* The remaining registers are per GPIO bank */
#define GPIO_OUT_STATUS(bank) (0x00000000 + ((bank) << 2))
#define GPIO_IN_STATUS(bank) (0x00000020 + ((bank) << 2))
#define GPIO_OUT_SET(bank) (0x00000040 + ((bank) << 2))
@@ -35,7 +39,6 @@
#define GPIO_INT_STATUS(bank) (0x00000080 + ((bank) << 2))
#define GPIO_INT_MASK(bank) (0x000000a0 + ((bank) << 2))
#define GPIO_INT_MSKCLR(bank) (0x000000c0 + ((bank) << 2))
-#define GPIO_CONTROL(bank) (0x00000100 + ((bank) << 2))
#define GPIO_PWD_STATUS(bank) (0x00000500 + ((bank) << 2))
#define GPIO_GPPWR_OFFSET 0x00000520
@@ -80,22 +83,43 @@ static inline struct bcm_kona_gpio *to_kona_gpio(struct gpio_chip *chip)
return container_of(chip, struct bcm_kona_gpio, gpio_chip);
}
-static void bcm_kona_gpio_set_lockcode_bank(void __iomem *reg_base,
- int bank_id, int lockcode)
+static inline void bcm_kona_gpio_write_lock_regs(void __iomem *reg_base,
+ int bank_id, u32 lockcode)
{
writel(BCM_GPIO_PASSWD, reg_base + GPIO_GPPWR_OFFSET);
writel(lockcode, reg_base + GPIO_PWD_STATUS(bank_id));
}
-static inline void bcm_kona_gpio_lock_bank(void __iomem *reg_base, int bank_id)
+static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
+ unsigned gpio)
{
- bcm_kona_gpio_set_lockcode_bank(reg_base, bank_id, LOCK_CODE);
+ u32 val;
+ unsigned long flags;
+ int bank_id = GPIO_BANK(gpio);
+
+ spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+ val |= BIT(gpio);
+ bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+
+ spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
-static inline void bcm_kona_gpio_unlock_bank(void __iomem *reg_base,
- int bank_id)
+static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
+ unsigned gpio)
{
- bcm_kona_gpio_set_lockcode_bank(reg_base, bank_id, UNLOCK_CODE);
+ u32 val;
+ unsigned long flags;
+ int bank_id = GPIO_BANK(gpio);
+
+ spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+ val &= ~BIT(gpio);
+ bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+
+ spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
@@ -110,7 +134,6 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
kona_gpio = to_kona_gpio(chip);
reg_base = kona_gpio->reg_base;
spin_lock_irqsave(&kona_gpio->lock, flags);
- bcm_kona_gpio_unlock_bank(reg_base, bank_id);
/* determine the GPIO pin direction */
val = readl(reg_base + GPIO_CONTROL(gpio));
@@ -127,7 +150,6 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
writel(val, reg_base + reg_offset);
out:
- bcm_kona_gpio_lock_bank(reg_base, bank_id);
spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
@@ -143,7 +165,6 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
kona_gpio = to_kona_gpio(chip);
reg_base = kona_gpio->reg_base;
spin_lock_irqsave(&kona_gpio->lock, flags);
- bcm_kona_gpio_unlock_bank(reg_base, bank_id);
/* determine the GPIO pin direction */
val = readl(reg_base + GPIO_CONTROL(gpio));
@@ -154,32 +175,43 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
GPIO_IN_STATUS(bank_id) : GPIO_OUT_STATUS(bank_id);
val = readl(reg_base + reg_offset);
- bcm_kona_gpio_lock_bank(reg_base, bank_id);
spin_unlock_irqrestore(&kona_gpio->lock, flags);
/* return the specified bit status */
return !!(val & BIT(bit));
}
+static int bcm_kona_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcm_kona_gpio *kona_gpio = to_kona_gpio(chip);
+
+ bcm_kona_gpio_unlock_gpio(kona_gpio, gpio);
+ return 0;
+}
+
+static void bcm_kona_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcm_kona_gpio *kona_gpio = to_kona_gpio(chip);
+
+ bcm_kona_gpio_lock_gpio(kona_gpio, gpio);
+}
+
static int bcm_kona_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
u32 val;
unsigned long flags;
- int bank_id = GPIO_BANK(gpio);
kona_gpio = to_kona_gpio(chip);
reg_base = kona_gpio->reg_base;
spin_lock_irqsave(&kona_gpio->lock, flags);
- bcm_kona_gpio_unlock_bank(reg_base, bank_id);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_IOTR_MASK;
val |= GPIO_GPCTR0_IOTR_CMD_INPUT;
writel(val, reg_base + GPIO_CONTROL(gpio));
- bcm_kona_gpio_lock_bank(reg_base, bank_id);
spin_unlock_irqrestore(&kona_gpio->lock, flags);
return 0;
@@ -198,7 +230,6 @@ static int bcm_kona_gpio_direction_output(struct gpio_chip *chip,
kona_gpio = to_kona_gpio(chip);
reg_base = kona_gpio->reg_base;
spin_lock_irqsave(&kona_gpio->lock, flags);
- bcm_kona_gpio_unlock_bank(reg_base, bank_id);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_IOTR_MASK;
@@ -210,7 +241,6 @@ static int bcm_kona_gpio_direction_output(struct gpio_chip *chip,
val |= BIT(bit);
writel(val, reg_base + reg_offset);
- bcm_kona_gpio_lock_bank(reg_base, bank_id);
spin_unlock_irqrestore(&kona_gpio->lock, flags);
return 0;
@@ -233,7 +263,6 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
void __iomem *reg_base;
u32 val, res;
unsigned long flags;
- int bank_id = GPIO_BANK(gpio);
kona_gpio = to_kona_gpio(chip);
reg_base = kona_gpio->reg_base;
@@ -257,7 +286,6 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
/* spin lock for read-modify-write of the GPIO register */
spin_lock_irqsave(&kona_gpio->lock, flags);
- bcm_kona_gpio_unlock_bank(reg_base, bank_id);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_DBR_MASK;
@@ -272,7 +300,6 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
writel(val, reg_base + GPIO_CONTROL(gpio));
- bcm_kona_gpio_lock_bank(reg_base, bank_id);
spin_unlock_irqrestore(&kona_gpio->lock, flags);
return 0;
@@ -281,6 +308,8 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
static struct gpio_chip template_chip = {
.label = "bcm-kona-gpio",
.owner = THIS_MODULE,
+ .request = bcm_kona_gpio_request,
+ .free = bcm_kona_gpio_free,
.direction_input = bcm_kona_gpio_direction_input,
.get = bcm_kona_gpio_get,
.direction_output = bcm_kona_gpio_direction_output,
@@ -294,7 +323,7 @@ static void bcm_kona_gpio_irq_ack(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
- int gpio = d->hwirq;
+ unsigned gpio = d->hwirq;
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val;
@@ -303,13 +332,11 @@ static void bcm_kona_gpio_irq_ack(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
spin_lock_irqsave(&kona_gpio->lock, flags);
- bcm_kona_gpio_unlock_bank(reg_base, bank_id);
val = readl(reg_base + GPIO_INT_STATUS(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_STATUS(bank_id));
- bcm_kona_gpio_lock_bank(reg_base, bank_id);
spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
@@ -317,7 +344,7 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
- int gpio = d->hwirq;
+ unsigned gpio = d->hwirq;
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val;
@@ -326,13 +353,11 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
spin_lock_irqsave(&kona_gpio->lock, flags);
- bcm_kona_gpio_unlock_bank(reg_base, bank_id);
val = readl(reg_base + GPIO_INT_MASK(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_MASK(bank_id));
- bcm_kona_gpio_lock_bank(reg_base, bank_id);
spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
@@ -340,7 +365,7 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
- int gpio = d->hwirq;
+ unsigned gpio = d->hwirq;
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val;
@@ -349,13 +374,11 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
spin_lock_irqsave(&kona_gpio->lock, flags);
- bcm_kona_gpio_unlock_bank(reg_base, bank_id);
val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_MSKCLR(bank_id));
- bcm_kona_gpio_lock_bank(reg_base, bank_id);
spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
@@ -363,11 +386,10 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
- int gpio = d->hwirq;
+ unsigned gpio = d->hwirq;
u32 lvl_type;
u32 val;
unsigned long flags;
- int bank_id = GPIO_BANK(gpio);
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
@@ -394,14 +416,12 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
}
spin_lock_irqsave(&kona_gpio->lock, flags);
- bcm_kona_gpio_unlock_bank(reg_base, bank_id);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_ITR_MASK;
val |= lvl_type << GPIO_GPCTR0_ITR_SHIFT;
writel(val, reg_base + GPIO_CONTROL(gpio));
- bcm_kona_gpio_lock_bank(reg_base, bank_id);
spin_unlock_irqrestore(&kona_gpio->lock, flags);
return 0;
@@ -424,7 +444,6 @@ static void bcm_kona_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
*/
reg_base = bank->kona_gpio->reg_base;
bank_id = bank->id;
- bcm_kona_gpio_unlock_bank(reg_base, bank_id);
while ((sta = readl(reg_base + GPIO_INT_STATUS(bank_id)) &
(~(readl(reg_base + GPIO_INT_MASK(bank_id)))))) {
@@ -444,28 +463,26 @@ static void bcm_kona_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
}
}
- bcm_kona_gpio_lock_bank(reg_base, bank_id);
-
chained_irq_exit(chip, desc);
}
-static unsigned int bcm_kona_gpio_irq_startup(struct irq_data *d)
+static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&kona_gpio->gpio_chip, d->hwirq))
+ if (gpio_lock_as_irq(&kona_gpio->gpio_chip, d->hwirq)) {
dev_err(kona_gpio->gpio_chip.dev,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
- bcm_kona_gpio_irq_unmask(d);
+ return -EINVAL;
+ }
return 0;
}
-static void bcm_kona_gpio_irq_shutdown(struct irq_data *d)
+static void bcm_kona_gpio_irq_relres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
- bcm_kona_gpio_irq_mask(d);
gpio_unlock_as_irq(&kona_gpio->gpio_chip, d->hwirq);
}
@@ -475,8 +492,8 @@ static struct irq_chip bcm_gpio_irq_chip = {
.irq_mask = bcm_kona_gpio_irq_mask,
.irq_unmask = bcm_kona_gpio_irq_unmask,
.irq_set_type = bcm_kona_gpio_irq_set_type,
- .irq_startup = bcm_kona_gpio_irq_startup,
- .irq_shutdown = bcm_kona_gpio_irq_shutdown,
+ .irq_request_resources = bcm_kona_gpio_irq_reqres,
+ .irq_release_resources = bcm_kona_gpio_irq_relres,
};
static struct __initconst of_device_id bcm_kona_gpio_of_match[] = {
@@ -531,10 +548,12 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
reg_base = kona_gpio->reg_base;
/* disable interrupts and clear status */
for (i = 0; i < kona_gpio->num_bank; i++) {
- bcm_kona_gpio_unlock_bank(reg_base, i);
+ /* Unlock the entire bank first */
+ bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE);
writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
- bcm_kona_gpio_lock_bank(reg_base, i);
+ /* Now re-lock the bank */
+ bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE);
}
}
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index 3c2ba2ad0ada..e1e861239e95 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -65,6 +65,7 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
}
bgc->gc.base = id * 8;
+ bgc->gc.owner = THIS_MODULE;
platform_set_drvdata(pdev, bgc);
return gpiochip_add(&bgc->gc);
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 7629b4f12b7f..339f9dac591b 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -37,6 +37,8 @@ struct davinci_gpio_regs {
u32 intstat;
};
+typedef struct irq_chip *(*gpio_get_irq_chip_cb_t)(unsigned int irq);
+
#define BINTEN 0x8 /* GPIO Interrupt Per-Bank Enable Register */
#define chip2controller(chip) \
@@ -172,6 +174,27 @@ of_err:
return NULL;
}
+#ifdef CONFIG_OF_GPIO
+static int davinci_gpio_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ struct davinci_gpio_controller *chips = dev_get_drvdata(gc->dev);
+ struct davinci_gpio_platform_data *pdata = dev_get_platdata(gc->dev);
+
+ if (gpiospec->args[0] > pdata->ngpio)
+ return -EINVAL;
+
+ if (gc != &chips[gpiospec->args[0] / 32].chip)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[1];
+
+ return gpiospec->args[0] % 32;
+}
+#endif
+
static int davinci_gpio_probe(struct platform_device *pdev)
{
int i, base;
@@ -236,6 +259,9 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips[i].chip.ngpio = 32;
#ifdef CONFIG_OF_GPIO
+ chips[i].chip.of_gpio_n_cells = 2;
+ chips[i].chip.of_xlate = davinci_gpio_of_xlate;
+ chips[i].chip.dev = dev;
chips[i].chip.of_node = dev->of_node;
#endif
spin_lock_init(&chips[i].lock);
@@ -413,6 +439,26 @@ static const struct irq_domain_ops davinci_gpio_irq_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
+static struct irq_chip *davinci_gpio_get_irq_chip(unsigned int irq)
+{
+ static struct irq_chip_type gpio_unbanked;
+
+ gpio_unbanked = *container_of(irq_get_chip(irq),
+ struct irq_chip_type, chip);
+
+ return &gpio_unbanked.chip;
+};
+
+static struct irq_chip *keystone_gpio_get_irq_chip(unsigned int irq)
+{
+ static struct irq_chip gpio_unbanked;
+
+ gpio_unbanked = *irq_get_chip(irq);
+ return &gpio_unbanked;
+};
+
+static const struct of_device_id davinci_gpio_ids[];
+
/*
* NOTE: for suspend/resume, probably best to make a platform_device with
* suspend_late/resume_resume calls hooking into results of the set_wake()
@@ -423,7 +469,8 @@ static const struct irq_domain_ops davinci_gpio_irq_ops = {
static int davinci_gpio_irq_setup(struct platform_device *pdev)
{
- unsigned gpio, irq, bank;
+ unsigned gpio, bank;
+ int irq;
struct clk *clk;
u32 binten = 0;
unsigned ngpio, bank_irq;
@@ -433,6 +480,18 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
struct davinci_gpio_platform_data *pdata = dev->platform_data;
struct davinci_gpio_regs __iomem *g;
struct irq_domain *irq_domain = NULL;
+ const struct of_device_id *match;
+ struct irq_chip *irq_chip;
+ gpio_get_irq_chip_cb_t gpio_get_irq_chip;
+
+ /*
+ * Use davinci_gpio_get_irq_chip by default to handle non DT cases
+ */
+ gpio_get_irq_chip = davinci_gpio_get_irq_chip;
+ match = of_match_device(of_match_ptr(davinci_gpio_ids),
+ dev);
+ if (match)
+ gpio_get_irq_chip = (gpio_get_irq_chip_cb_t)match->data;
ngpio = pdata->ngpio;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -489,8 +548,6 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
* IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs.
*/
if (pdata->gpio_unbanked) {
- static struct irq_chip_type gpio_unbanked;
-
/* pass "bank 0" GPIO IRQs to AINTC */
chips[0].chip.to_irq = gpio_to_irq_unbanked;
chips[0].gpio_irq = bank_irq;
@@ -499,10 +556,9 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
/* AINTC handles mask/unmask; GPIO handles triggering */
irq = bank_irq;
- gpio_unbanked = *container_of(irq_get_chip(irq),
- struct irq_chip_type, chip);
- gpio_unbanked.chip.name = "GPIO-AINTC";
- gpio_unbanked.chip.irq_set_type = gpio_irq_type_unbanked;
+ irq_chip = gpio_get_irq_chip(irq);
+ irq_chip->name = "GPIO-AINTC";
+ irq_chip->irq_set_type = gpio_irq_type_unbanked;
/* default trigger: both edges */
g = gpio2regs(0);
@@ -511,7 +567,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
/* set the direct IRQs up to use that irqchip */
for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++, irq++) {
- irq_set_chip(irq, &gpio_unbanked.chip);
+ irq_set_chip(irq, irq_chip);
irq_set_handler_data(irq, &chips[gpio / 32]);
irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH);
}
@@ -554,7 +610,8 @@ done:
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id davinci_gpio_ids[] = {
- { .compatible = "ti,dm6441-gpio", },
+ { .compatible = "ti,keystone-gpio", keystone_gpio_get_irq_chip},
+ { .compatible = "ti,dm6441-gpio", davinci_gpio_get_irq_chip},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, davinci_gpio_ids);
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
new file mode 100644
index 000000000000..ed5711f77e2d
--- /dev/null
+++ b/drivers/gpio/gpio-dwapb.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2011 Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * All enquiries to support@picochip.com
+ */
+#include <linux/basic_mmio_gpio.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define GPIO_SWPORTA_DR 0x00
+#define GPIO_SWPORTA_DDR 0x04
+#define GPIO_SWPORTB_DR 0x0c
+#define GPIO_SWPORTB_DDR 0x10
+#define GPIO_SWPORTC_DR 0x18
+#define GPIO_SWPORTC_DDR 0x1c
+#define GPIO_SWPORTD_DR 0x24
+#define GPIO_SWPORTD_DDR 0x28
+#define GPIO_INTEN 0x30
+#define GPIO_INTMASK 0x34
+#define GPIO_INTTYPE_LEVEL 0x38
+#define GPIO_INT_POLARITY 0x3c
+#define GPIO_INTSTATUS 0x40
+#define GPIO_PORTA_EOI 0x4c
+#define GPIO_EXT_PORTA 0x50
+#define GPIO_EXT_PORTB 0x54
+#define GPIO_EXT_PORTC 0x58
+#define GPIO_EXT_PORTD 0x5c
+
+#define DWAPB_MAX_PORTS 4
+#define GPIO_EXT_PORT_SIZE (GPIO_EXT_PORTB - GPIO_EXT_PORTA)
+#define GPIO_SWPORT_DR_SIZE (GPIO_SWPORTB_DR - GPIO_SWPORTA_DR)
+#define GPIO_SWPORT_DDR_SIZE (GPIO_SWPORTB_DDR - GPIO_SWPORTA_DDR)
+
+struct dwapb_gpio;
+
+struct dwapb_gpio_port {
+ struct bgpio_chip bgc;
+ bool is_registered;
+ struct dwapb_gpio *gpio;
+};
+
+struct dwapb_gpio {
+ struct device *dev;
+ void __iomem *regs;
+ struct dwapb_gpio_port *ports;
+ unsigned int nr_ports;
+ struct irq_domain *domain;
+};
+
+static int dwapb_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+ struct dwapb_gpio_port *port = container_of(bgc, struct
+ dwapb_gpio_port, bgc);
+ struct dwapb_gpio *gpio = port->gpio;
+
+ return irq_find_mapping(gpio->domain, offset);
+}
+
+static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
+{
+ u32 v = readl(gpio->regs + GPIO_INT_POLARITY);
+
+ if (gpio_get_value(gpio->ports[0].bgc.gc.base + offs))
+ v &= ~BIT(offs);
+ else
+ v |= BIT(offs);
+
+ writel(v, gpio->regs + GPIO_INT_POLARITY);
+}
+
+static void dwapb_irq_handler(u32 irq, struct irq_desc *desc)
+{
+ struct dwapb_gpio *gpio = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 irq_status = readl_relaxed(gpio->regs + GPIO_INTSTATUS);
+
+ while (irq_status) {
+ int hwirq = fls(irq_status) - 1;
+ int gpio_irq = irq_find_mapping(gpio->domain, hwirq);
+
+ generic_handle_irq(gpio_irq);
+ irq_status &= ~BIT(hwirq);
+
+ if ((irq_get_trigger_type(gpio_irq) & IRQ_TYPE_SENSE_MASK)
+ == IRQ_TYPE_EDGE_BOTH)
+ dwapb_toggle_trigger(gpio, hwirq);
+ }
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(irq_desc_get_irq_data(desc));
+}
+
+static void dwapb_irq_enable(struct irq_data *d)
+{
+ struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = igc->private;
+ struct bgpio_chip *bgc = &gpio->ports[0].bgc;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&bgc->lock, flags);
+ val = readl(gpio->regs + GPIO_INTEN);
+ val |= BIT(d->hwirq);
+ writel(val, gpio->regs + GPIO_INTEN);
+ spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+static void dwapb_irq_disable(struct irq_data *d)
+{
+ struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = igc->private;
+ struct bgpio_chip *bgc = &gpio->ports[0].bgc;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&bgc->lock, flags);
+ val = readl(gpio->regs + GPIO_INTEN);
+ val &= ~BIT(d->hwirq);
+ writel(val, gpio->regs + GPIO_INTEN);
+ spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+static int dwapb_irq_reqres(struct irq_data *d)
+{
+ struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = igc->private;
+ struct bgpio_chip *bgc = &gpio->ports[0].bgc;
+
+ if (gpio_lock_as_irq(&bgc->gc, irqd_to_hwirq(d))) {
+ dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n",
+ irqd_to_hwirq(d));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void dwapb_irq_relres(struct irq_data *d)
+{
+ struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = igc->private;
+ struct bgpio_chip *bgc = &gpio->ports[0].bgc;
+
+ gpio_unlock_as_irq(&bgc->gc, irqd_to_hwirq(d));
+}
+
+static int dwapb_irq_set_type(struct irq_data *d, u32 type)
+{
+ struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = igc->private;
+ struct bgpio_chip *bgc = &gpio->ports[0].bgc;
+ int bit = d->hwirq;
+ unsigned long level, polarity, flags;
+
+ if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
+ IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ return -EINVAL;
+
+ spin_lock_irqsave(&bgc->lock, flags);
+ level = readl(gpio->regs + GPIO_INTTYPE_LEVEL);
+ polarity = readl(gpio->regs + GPIO_INT_POLARITY);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ level |= BIT(bit);
+ dwapb_toggle_trigger(gpio, bit);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ level |= BIT(bit);
+ polarity |= BIT(bit);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ level |= BIT(bit);
+ polarity &= ~BIT(bit);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ level &= ~BIT(bit);
+ polarity |= BIT(bit);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ level &= ~BIT(bit);
+ polarity &= ~BIT(bit);
+ break;
+ }
+
+ writel(level, gpio->regs + GPIO_INTTYPE_LEVEL);
+ writel(polarity, gpio->regs + GPIO_INT_POLARITY);
+ spin_unlock_irqrestore(&bgc->lock, flags);
+
+ return 0;
+}
+
+static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
+ struct dwapb_gpio_port *port)
+{
+ struct gpio_chip *gc = &port->bgc.gc;
+ struct device_node *node = gc->of_node;
+ struct irq_chip_generic *irq_gc;
+ unsigned int hwirq, ngpio = gc->ngpio;
+ struct irq_chip_type *ct;
+ int err, irq;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ dev_warn(gpio->dev, "no irq for bank %s\n",
+ port->bgc.gc.of_node->full_name);
+ return;
+ }
+
+ gpio->domain = irq_domain_add_linear(node, ngpio,
+ &irq_generic_chip_ops, gpio);
+ if (!gpio->domain)
+ return;
+
+ err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 1,
+ "gpio-dwapb", handle_level_irq,
+ IRQ_NOREQUEST, 0,
+ IRQ_GC_INIT_NESTED_LOCK);
+ if (err) {
+ dev_info(gpio->dev, "irq_alloc_domain_generic_chips failed\n");
+ irq_domain_remove(gpio->domain);
+ gpio->domain = NULL;
+ return;
+ }
+
+ irq_gc = irq_get_domain_generic_chip(gpio->domain, 0);
+ if (!irq_gc) {
+ irq_domain_remove(gpio->domain);
+ gpio->domain = NULL;
+ return;
+ }
+
+ irq_gc->reg_base = gpio->regs;
+ irq_gc->private = gpio;
+
+ ct = irq_gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+ ct->chip.irq_set_type = dwapb_irq_set_type;
+ ct->chip.irq_enable = dwapb_irq_enable;
+ ct->chip.irq_disable = dwapb_irq_disable;
+ ct->chip.irq_request_resources = dwapb_irq_reqres;
+ ct->chip.irq_release_resources = dwapb_irq_relres;
+ ct->regs.ack = GPIO_PORTA_EOI;
+ ct->regs.mask = GPIO_INTMASK;
+
+ irq_setup_generic_chip(irq_gc, IRQ_MSK(port->bgc.gc.ngpio),
+ IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0);
+
+ irq_set_chained_handler(irq, dwapb_irq_handler);
+ irq_set_handler_data(irq, gpio);
+
+ for (hwirq = 0 ; hwirq < ngpio ; hwirq++)
+ irq_create_mapping(gpio->domain, hwirq);
+
+ port->bgc.gc.to_irq = dwapb_gpio_to_irq;
+}
+
+static void dwapb_irq_teardown(struct dwapb_gpio *gpio)
+{
+ struct dwapb_gpio_port *port = &gpio->ports[0];
+ struct gpio_chip *gc = &port->bgc.gc;
+ unsigned int ngpio = gc->ngpio;
+ irq_hw_number_t hwirq;
+
+ if (!gpio->domain)
+ return;
+
+ for (hwirq = 0 ; hwirq < ngpio ; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(gpio->domain, hwirq));
+
+ irq_domain_remove(gpio->domain);
+ gpio->domain = NULL;
+}
+
+static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
+ struct device_node *port_np,
+ unsigned int offs)
+{
+ struct dwapb_gpio_port *port;
+ u32 port_idx, ngpio;
+ void __iomem *dat, *set, *dirout;
+ int err;
+
+ if (of_property_read_u32(port_np, "reg", &port_idx) ||
+ port_idx >= DWAPB_MAX_PORTS) {
+ dev_err(gpio->dev, "missing/invalid port index for %s\n",
+ port_np->full_name);
+ return -EINVAL;
+ }
+
+ port = &gpio->ports[offs];
+ port->gpio = gpio;
+
+ if (of_property_read_u32(port_np, "snps,nr-gpios", &ngpio)) {
+ dev_info(gpio->dev, "failed to get number of gpios for %s\n",
+ port_np->full_name);
+ ngpio = 32;
+ }
+
+ dat = gpio->regs + GPIO_EXT_PORTA + (port_idx * GPIO_EXT_PORT_SIZE);
+ set = gpio->regs + GPIO_SWPORTA_DR + (port_idx * GPIO_SWPORT_DR_SIZE);
+ dirout = gpio->regs + GPIO_SWPORTA_DDR +
+ (port_idx * GPIO_SWPORT_DDR_SIZE);
+
+ err = bgpio_init(&port->bgc, gpio->dev, 4, dat, set, NULL, dirout,
+ NULL, false);
+ if (err) {
+ dev_err(gpio->dev, "failed to init gpio chip for %s\n",
+ port_np->full_name);
+ return err;
+ }
+
+ port->bgc.gc.ngpio = ngpio;
+ port->bgc.gc.of_node = port_np;
+
+ /*
+ * Only port A can provide interrupts in all configurations of the IP.
+ */
+ if (port_idx == 0 &&
+ of_property_read_bool(port_np, "interrupt-controller"))
+ dwapb_configure_irqs(gpio, port);
+
+ err = gpiochip_add(&port->bgc.gc);
+ if (err)
+ dev_err(gpio->dev, "failed to register gpiochip for %s\n",
+ port_np->full_name);
+ else
+ port->is_registered = true;
+
+ return err;
+}
+
+static void dwapb_gpio_unregister(struct dwapb_gpio *gpio)
+{
+ unsigned int m;
+
+ for (m = 0; m < gpio->nr_ports; ++m)
+ if (gpio->ports[m].is_registered)
+ WARN_ON(gpiochip_remove(&gpio->ports[m].bgc.gc));
+}
+
+static int dwapb_gpio_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct dwapb_gpio *gpio;
+ struct device_node *np;
+ int err;
+ unsigned int offs = 0;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+ gpio->dev = &pdev->dev;
+
+ gpio->nr_ports = of_get_child_count(pdev->dev.of_node);
+ if (!gpio->nr_ports) {
+ err = -EINVAL;
+ goto out_err;
+ }
+ gpio->ports = devm_kzalloc(&pdev->dev, gpio->nr_ports *
+ sizeof(*gpio->ports), GFP_KERNEL);
+ if (!gpio->ports) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gpio->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpio->regs)) {
+ err = PTR_ERR(gpio->regs);
+ goto out_err;
+ }
+
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ err = dwapb_gpio_add_port(gpio, np, offs++);
+ if (err)
+ goto out_unregister;
+ }
+ platform_set_drvdata(pdev, gpio);
+
+ return 0;
+
+out_unregister:
+ dwapb_gpio_unregister(gpio);
+ dwapb_irq_teardown(gpio);
+
+out_err:
+ return err;
+}
+
+static int dwapb_gpio_remove(struct platform_device *pdev)
+{
+ struct dwapb_gpio *gpio = platform_get_drvdata(pdev);
+
+ dwapb_gpio_unregister(gpio);
+ dwapb_irq_teardown(gpio);
+
+ return 0;
+}
+
+static const struct of_device_id dwapb_of_match[] = {
+ { .compatible = "snps,dw-apb-gpio" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dwapb_of_match);
+
+static struct platform_driver dwapb_gpio_driver = {
+ .driver = {
+ .name = "gpio-dwapb",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dwapb_of_match),
+ },
+ .probe = dwapb_gpio_probe,
+ .remove = dwapb_gpio_remove,
+};
+
+module_platform_driver(dwapb_gpio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("Synopsys DesignWare APB GPIO driver");
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 1e98a9873967..8765bd6f48e1 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -99,23 +99,23 @@ static void em_gio_irq_enable(struct irq_data *d)
em_gio_write(p, GIO_IEN, BIT(irqd_to_hwirq(d)));
}
-static unsigned int em_gio_irq_startup(struct irq_data *d)
+static int em_gio_irq_reqres(struct irq_data *d)
{
struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&p->gpio_chip, irqd_to_hwirq(d)))
+ if (gpio_lock_as_irq(&p->gpio_chip, irqd_to_hwirq(d))) {
dev_err(p->gpio_chip.dev,
"unable to lock HW IRQ %lu for IRQ\n",
irqd_to_hwirq(d));
- em_gio_irq_enable(d);
+ return -EINVAL;
+ }
return 0;
}
-static void em_gio_irq_shutdown(struct irq_data *d)
+static void em_gio_irq_relres(struct irq_data *d)
{
struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
- em_gio_irq_disable(d);
gpio_unlock_as_irq(&p->gpio_chip, irqd_to_hwirq(d));
}
@@ -359,8 +359,8 @@ static int em_gio_probe(struct platform_device *pdev)
irq_chip->irq_mask = em_gio_irq_disable;
irq_chip->irq_unmask = em_gio_irq_enable;
irq_chip->irq_set_type = em_gio_irq_set_type;
- irq_chip->irq_startup = em_gio_irq_startup;
- irq_chip->irq_shutdown = em_gio_irq_shutdown;
+ irq_chip->irq_request_resources = em_gio_irq_reqres;
+ irq_chip->irq_release_resources = em_gio_irq_relres;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index d2196bf73847..b5dff9e742f8 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -139,7 +139,7 @@ static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct bgpio_chip *bgc = to_bgpio_chip(gc);
- return bgc->read_reg(bgc->reg_dat) & bgc->pin2mask(bgc, gpio);
+ return !!(bgc->read_reg(bgc->reg_dat) & bgc->pin2mask(bgc, gpio));
}
static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -488,7 +488,7 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
void __iomem *dirout;
void __iomem *dirin;
unsigned long sz;
- unsigned long flags = 0;
+ unsigned long flags = pdev->id_entry->driver_data;
int err;
struct bgpio_chip *bgc;
struct bgpio_pdata *pdata = dev_get_platdata(dev);
@@ -519,9 +519,6 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
if (err)
return err;
- if (!strcmp(platform_get_device_id(pdev)->name, "basic-mmio-gpio-be"))
- flags |= BGPIOF_BIG_ENDIAN;
-
bgc = devm_kzalloc(&pdev->dev, sizeof(*bgc), GFP_KERNEL);
if (!bgc)
return -ENOMEM;
@@ -531,6 +528,8 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
return err;
if (pdata) {
+ if (pdata->label)
+ bgc->gc.label = pdata->label;
bgc->gc.base = pdata->base;
if (pdata->ngpio > 0)
bgc->gc.ngpio = pdata->ngpio;
@@ -549,9 +548,14 @@ static int bgpio_pdev_remove(struct platform_device *pdev)
}
static const struct platform_device_id bgpio_id_table[] = {
- { "basic-mmio-gpio", },
- { "basic-mmio-gpio-be", },
- {},
+ {
+ .name = "basic-mmio-gpio",
+ .driver_data = 0,
+ }, {
+ .name = "basic-mmio-gpio-be",
+ .driver_data = BGPIOF_BIG_ENDIAN,
+ },
+ { }
};
MODULE_DEVICE_TABLE(platform, bgpio_id_table);
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index f5bf3c38bca6..e73c6755a5eb 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -1,5 +1,5 @@
/*
- * Intel ICH6-10, Series 5 and 6 GPIO driver
+ * Intel ICH6-10, Series 5 and 6, Atom C2000 (Avoton/Rangeley) GPIO driver
*
* Copyright (C) 2010 Extreme Engineering Solutions.
*
@@ -55,6 +55,16 @@ static const u8 ichx_reglen[3] = {
0x30, 0x10, 0x10,
};
+static const u8 avoton_regs[4][3] = {
+ {0x00, 0x80, 0x00},
+ {0x04, 0x84, 0x00},
+ {0x08, 0x88, 0x00},
+};
+
+static const u8 avoton_reglen[3] = {
+ 0x10, 0x10, 0x00,
+};
+
#define ICHX_WRITE(val, reg, base_res) outl(val, (reg) + (base_res)->start)
#define ICHX_READ(reg, base_res) inl((reg) + (base_res)->start)
@@ -62,6 +72,13 @@ struct ichx_desc {
/* Max GPIO pins the chipset can have */
uint ngpio;
+ /* chipset registers */
+ const u8 (*regs)[3];
+ const u8 *reglen;
+
+ /* GPO_BLINK is available on this chipset */
+ bool have_blink;
+
/* Whether the chipset has GPIO in GPE0_STS in the PM IO region */
bool uses_gpe0;
@@ -71,6 +88,12 @@ struct ichx_desc {
/* Some chipsets have quirks, let these use their own request/get */
int (*request)(struct gpio_chip *chip, unsigned offset);
int (*get)(struct gpio_chip *chip, unsigned offset);
+
+ /*
+ * Some chipsets don't let reading output values on GPIO_LVL register
+ * this option allows driver caching written output values
+ */
+ bool use_outlvl_cache;
};
static struct {
@@ -82,6 +105,7 @@ static struct {
struct ichx_desc *desc; /* Pointer to chipset-specific description */
u32 orig_gpio_ctrl; /* Orig CTRL value, used to restore on exit */
u8 use_gpio; /* Which GPIO groups are usable */
+ int outlvl_cache[3]; /* cached output values */
} ichx_priv;
static int modparam_gpiobase = -1; /* dynamic */
@@ -99,13 +123,23 @@ static int ichx_write_bit(int reg, unsigned nr, int val, int verify)
spin_lock_irqsave(&ichx_priv.lock, flags);
- data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+ if (reg == GPIO_LVL && ichx_priv.desc->use_outlvl_cache)
+ data = ichx_priv.outlvl_cache[reg_nr];
+ else
+ data = ICHX_READ(ichx_priv.desc->regs[reg][reg_nr],
+ ichx_priv.gpio_base);
+
if (val)
data |= 1 << bit;
else
data &= ~(1 << bit);
- ICHX_WRITE(data, ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
- tmp = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+ ICHX_WRITE(data, ichx_priv.desc->regs[reg][reg_nr],
+ ichx_priv.gpio_base);
+ if (reg == GPIO_LVL && ichx_priv.desc->use_outlvl_cache)
+ ichx_priv.outlvl_cache[reg_nr] = data;
+
+ tmp = ICHX_READ(ichx_priv.desc->regs[reg][reg_nr],
+ ichx_priv.gpio_base);
if (verify && data != tmp)
ret = -EPERM;
@@ -123,7 +157,11 @@ static int ichx_read_bit(int reg, unsigned nr)
spin_lock_irqsave(&ichx_priv.lock, flags);
- data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+ data = ICHX_READ(ichx_priv.desc->regs[reg][reg_nr],
+ ichx_priv.gpio_base);
+
+ if (reg == GPIO_LVL && ichx_priv.desc->use_outlvl_cache)
+ data = ichx_priv.outlvl_cache[reg_nr] | data;
spin_unlock_irqrestore(&ichx_priv.lock, flags);
@@ -151,7 +189,7 @@ static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
int val)
{
/* Disable blink hardware which is available for GPIOs from 0 to 31. */
- if (nr < 32)
+ if (nr < 32 && ichx_priv.desc->have_blink)
ichx_write_bit(GPO_BLINK, nr, 0, 0);
/* Set GPIO output value. */
@@ -266,6 +304,7 @@ static struct ichx_desc ich6_desc = {
.uses_gpe0 = true,
.ngpio = 50,
+ .have_blink = true,
};
/* Intel 3100 */
@@ -290,24 +329,49 @@ static struct ichx_desc i3100_desc = {
/* ICH7 and ICH8-based */
static struct ichx_desc ich7_desc = {
.ngpio = 50,
+ .have_blink = true,
+ .regs = ichx_regs,
+ .reglen = ichx_reglen,
};
/* ICH9-based */
static struct ichx_desc ich9_desc = {
.ngpio = 61,
+ .have_blink = true,
+ .regs = ichx_regs,
+ .reglen = ichx_reglen,
};
/* ICH10-based - Consumer/corporate versions have different amount of GPIO */
static struct ichx_desc ich10_cons_desc = {
.ngpio = 61,
+ .have_blink = true,
+ .regs = ichx_regs,
+ .reglen = ichx_reglen,
};
static struct ichx_desc ich10_corp_desc = {
.ngpio = 72,
+ .have_blink = true,
+ .regs = ichx_regs,
+ .reglen = ichx_reglen,
};
/* Intel 5 series, 6 series, 3400 series, and C200 series */
static struct ichx_desc intel5_desc = {
.ngpio = 76,
+ .regs = ichx_regs,
+ .reglen = ichx_reglen,
+};
+
+/* Avoton */
+static struct ichx_desc avoton_desc = {
+ /* Avoton has only 59 GPIOs, but we assume the first set of register
+ * (Core) has 32 instead of 31 to keep gpio-ich compliance
+ */
+ .ngpio = 60,
+ .regs = avoton_regs,
+ .reglen = avoton_reglen,
+ .use_outlvl_cache = true,
};
static int ichx_gpio_request_regions(struct resource *res_base,
@@ -318,11 +382,12 @@ static int ichx_gpio_request_regions(struct resource *res_base,
if (!res_base || !res_base->start || !res_base->end)
return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(ichx_regs[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(ichx_priv.desc->regs[0]); i++) {
if (!(use_gpio & (1 << i)))
continue;
- if (!request_region(res_base->start + ichx_regs[0][i],
- ichx_reglen[i], name))
+ if (!request_region(
+ res_base->start + ichx_priv.desc->regs[0][i],
+ ichx_priv.desc->reglen[i], name))
goto request_err;
}
return 0;
@@ -332,8 +397,8 @@ request_err:
for (i--; i >= 0; i--) {
if (!(use_gpio & (1 << i)))
continue;
- release_region(res_base->start + ichx_regs[0][i],
- ichx_reglen[i]);
+ release_region(res_base->start + ichx_priv.desc->regs[0][i],
+ ichx_priv.desc->reglen[i]);
}
return -EBUSY;
}
@@ -342,11 +407,11 @@ static void ichx_gpio_release_regions(struct resource *res_base, u8 use_gpio)
{
int i;
- for (i = 0; i < ARRAY_SIZE(ichx_regs[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(ichx_priv.desc->regs[0]); i++) {
if (!(use_gpio & (1 << i)))
continue;
- release_region(res_base->start + ichx_regs[0][i],
- ichx_reglen[i]);
+ release_region(res_base->start + ichx_priv.desc->regs[0][i],
+ ichx_priv.desc->reglen[i]);
}
}
@@ -383,6 +448,9 @@ static int ichx_gpio_probe(struct platform_device *pdev)
case ICH_V10CONS_GPIO:
ichx_priv.desc = &ich10_cons_desc;
break;
+ case AVOTON_GPIO:
+ ichx_priv.desc = &avoton_desc;
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index e585163f1ad5..118a6bf455d9 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -1,7 +1,7 @@
/*
- * Moorestown platform Langwell chip GPIO driver
+ * Intel MID GPIO driver
*
- * Copyright (c) 2008, 2009, 2013, Intel Corporation.
+ * Copyright (c) 2008-2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
@@ -235,23 +231,23 @@ static void intel_mid_irq_mask(struct irq_data *d)
{
}
-static unsigned int intel_mid_irq_startup(struct irq_data *d)
+static int intel_mid_irq_reqres(struct irq_data *d)
{
struct intel_mid_gpio *priv = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&priv->chip, irqd_to_hwirq(d)))
+ if (gpio_lock_as_irq(&priv->chip, irqd_to_hwirq(d))) {
dev_err(priv->chip.dev,
"unable to lock HW IRQ %lu for IRQ\n",
irqd_to_hwirq(d));
- intel_mid_irq_unmask(d);
+ return -EINVAL;
+ }
return 0;
}
-static void intel_mid_irq_shutdown(struct irq_data *d)
+static void intel_mid_irq_relres(struct irq_data *d)
{
struct intel_mid_gpio *priv = irq_data_get_irq_chip_data(d);
- intel_mid_irq_mask(d);
gpio_unlock_as_irq(&priv->chip, irqd_to_hwirq(d));
}
@@ -260,8 +256,8 @@ static struct irq_chip intel_mid_irqchip = {
.irq_mask = intel_mid_irq_mask,
.irq_unmask = intel_mid_irq_unmask,
.irq_set_type = intel_mid_irq_type,
- .irq_startup = intel_mid_irq_startup,
- .irq_shutdown = intel_mid_irq_shutdown,
+ .irq_request_resources = intel_mid_irq_reqres,
+ .irq_release_resources = intel_mid_irq_relres,
};
static const struct intel_mid_gpio_ddata gpio_lincroft = {
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index c22a61be3a9c..0a5e9d3f308c 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -111,6 +111,8 @@ static int iop3xx_gpio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
return gpiochip_add(&iop3xx_chip);
}
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 66b18535b5ae..9a82a9074a2c 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -188,7 +188,7 @@ static int lp_irq_type(struct irq_data *d, unsigned type)
static int lp_gpio_get(struct gpio_chip *chip, unsigned offset)
{
unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- return inl(reg) & IN_LVL_BIT;
+ return !!(inl(reg) & IN_LVL_BIT);
}
static void lp_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -301,23 +301,23 @@ static void lp_irq_disable(struct irq_data *d)
spin_unlock_irqrestore(&lg->lock, flags);
}
-static unsigned int lp_irq_startup(struct irq_data *d)
+static int lp_irq_reqres(struct irq_data *d)
{
struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&lg->chip, irqd_to_hwirq(d)))
+ if (gpio_lock_as_irq(&lg->chip, irqd_to_hwirq(d))) {
dev_err(lg->chip.dev,
"unable to lock HW IRQ %lu for IRQ\n",
irqd_to_hwirq(d));
- lp_irq_enable(d);
+ return -EINVAL;
+ }
return 0;
}
-static void lp_irq_shutdown(struct irq_data *d)
+static void lp_irq_relres(struct irq_data *d)
{
struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
- lp_irq_disable(d);
gpio_unlock_as_irq(&lg->chip, irqd_to_hwirq(d));
}
@@ -328,8 +328,8 @@ static struct irq_chip lp_irqchip = {
.irq_enable = lp_irq_enable,
.irq_disable = lp_irq_disable,
.irq_set_type = lp_irq_type,
- .irq_startup = lp_irq_startup,
- .irq_shutdown = lp_irq_shutdown,
+ .irq_request_resources = lp_irq_reqres,
+ .irq_release_resources = lp_irq_relres,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 36cb290764b6..7c36f2b0983d 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -622,6 +622,13 @@ static int max732x_probe(struct i2c_client *client,
goto out_failed;
}
+ if (nr_port > 8 && !chip->client_dummy) {
+ dev_err(&client->dev,
+ "Failed to allocate second group I2C device\n");
+ ret = -ENODEV;
+ goto out_failed;
+ }
+
mutex_init(&chip->lock);
max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
@@ -647,6 +654,8 @@ static int max732x_probe(struct i2c_client *client,
return 0;
out_failed:
+ if (chip->client_dummy)
+ i2c_unregister_device(chip->client_dummy);
max732x_irq_teardown(chip);
return ret;
}
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 1ac288ea810d..99a68310e7c0 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -173,7 +173,7 @@ static int mcp23s08_read(struct mcp23s08 *mcp, unsigned reg)
tx[0] = mcp->addr | 0x01;
tx[1] = reg;
- status = spi_write_then_read(mcp->data, tx, sizeof tx, rx, sizeof rx);
+ status = spi_write_then_read(mcp->data, tx, sizeof(tx), rx, sizeof(rx));
return (status < 0) ? status : rx[0];
}
@@ -184,7 +184,7 @@ static int mcp23s08_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
tx[0] = mcp->addr;
tx[1] = reg;
tx[2] = val;
- return spi_write_then_read(mcp->data, tx, sizeof tx, NULL, 0);
+ return spi_write_then_read(mcp->data, tx, sizeof(tx), NULL, 0);
}
static int
@@ -193,13 +193,13 @@ mcp23s08_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
u8 tx[2], *tmp;
int status;
- if ((n + reg) > sizeof mcp->cache)
+ if ((n + reg) > sizeof(mcp->cache))
return -EINVAL;
tx[0] = mcp->addr | 0x01;
tx[1] = reg;
tmp = (u8 *)vals;
- status = spi_write_then_read(mcp->data, tx, sizeof tx, tmp, n);
+ status = spi_write_then_read(mcp->data, tx, sizeof(tx), tmp, n);
if (status >= 0) {
while (n--)
vals[n] = tmp[n]; /* expand to 16bit */
@@ -214,7 +214,7 @@ static int mcp23s17_read(struct mcp23s08 *mcp, unsigned reg)
tx[0] = mcp->addr | 0x01;
tx[1] = reg << 1;
- status = spi_write_then_read(mcp->data, tx, sizeof tx, rx, sizeof rx);
+ status = spi_write_then_read(mcp->data, tx, sizeof(tx), rx, sizeof(rx));
return (status < 0) ? status : (rx[0] | (rx[1] << 8));
}
@@ -226,7 +226,7 @@ static int mcp23s17_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
tx[1] = reg << 1;
tx[2] = val;
tx[3] = val >> 8;
- return spi_write_then_read(mcp->data, tx, sizeof tx, NULL, 0);
+ return spi_write_then_read(mcp->data, tx, sizeof(tx), NULL, 0);
}
static int
@@ -235,12 +235,12 @@ mcp23s17_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
u8 tx[2];
int status;
- if ((n + reg) > sizeof mcp->cache)
+ if ((n + reg) > sizeof(mcp->cache))
return -EINVAL;
tx[0] = mcp->addr | 0x01;
tx[1] = reg << 1;
- status = spi_write_then_read(mcp->data, tx, sizeof tx,
+ status = spi_write_then_read(mcp->data, tx, sizeof(tx),
(u8 *)vals, n * 2);
if (status >= 0) {
while (n--)
@@ -440,24 +440,24 @@ static void mcp23s08_irq_bus_unlock(struct irq_data *data)
mutex_unlock(&mcp->irq_lock);
}
-static unsigned int mcp23s08_irq_startup(struct irq_data *data)
+static int mcp23s08_irq_reqres(struct irq_data *data)
{
struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
- if (gpio_lock_as_irq(&mcp->chip, data->hwirq))
+ if (gpio_lock_as_irq(&mcp->chip, data->hwirq)) {
dev_err(mcp->chip.dev,
"unable to lock HW IRQ %lu for IRQ usage\n",
data->hwirq);
+ return -EINVAL;
+ }
- mcp23s08_irq_unmask(data);
return 0;
}
-static void mcp23s08_irq_shutdown(struct irq_data *data)
+static void mcp23s08_irq_relres(struct irq_data *data)
{
struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
- mcp23s08_irq_mask(data);
gpio_unlock_as_irq(&mcp->chip, data->hwirq);
}
@@ -468,8 +468,8 @@ static struct irq_chip mcp23s08_irq_chip = {
.irq_set_type = mcp23s08_irq_set_type,
.irq_bus_lock = mcp23s08_irq_bus_lock,
.irq_bus_sync_unlock = mcp23s08_irq_bus_unlock,
- .irq_startup = mcp23s08_irq_startup,
- .irq_shutdown = mcp23s08_irq_shutdown,
+ .irq_request_resources = mcp23s08_irq_reqres,
+ .irq_release_resources = mcp23s08_irq_relres,
};
static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
@@ -567,7 +567,7 @@ static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip)
(mcp->cache[MCP_GPIO] & mask) ? "hi" : "lo",
(mcp->cache[MCP_GPPU] & mask) ? "up" : " ");
/* NOTE: ignoring the irq-related registers */
- seq_printf(s, "\n");
+ seq_puts(s, "\n");
}
done:
mutex_unlock(&mcp->lock);
@@ -789,7 +789,7 @@ static int mcp230xx_probe(struct i2c_client *client,
pullups = pdata->chip[0].pullups;
}
- mcp = kzalloc(sizeof *mcp, GFP_KERNEL);
+ mcp = kzalloc(sizeof(*mcp), GFP_KERNEL);
if (!mcp)
return -ENOMEM;
@@ -925,7 +925,7 @@ static int mcp23s08_probe(struct spi_device *spi)
base = pdata->base;
}
- data = kzalloc(sizeof *data + chips * sizeof(struct mcp23s08),
+ data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08),
GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c
index 2af990022cc9..ccd45704e5fd 100644
--- a/drivers/gpio/gpio-moxart.c
+++ b/drivers/gpio/gpio-moxart.c
@@ -48,25 +48,6 @@ static void moxart_gpio_free(struct gpio_chip *chip, unsigned offset)
pinctrl_free_gpio(offset);
}
-static int moxart_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
- void __iomem *ioaddr = gc->base + GPIO_PIN_DIRECTION;
-
- writel(readl(ioaddr) & ~BIT(offset), ioaddr);
- return 0;
-}
-
-static int moxart_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
- void __iomem *ioaddr = gc->base + GPIO_PIN_DIRECTION;
-
- writel(readl(ioaddr) | BIT(offset), ioaddr);
- return 0;
-}
-
static void moxart_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
@@ -78,7 +59,6 @@ static void moxart_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
else
reg = reg & ~BIT(offset);
-
writel(reg, ioaddr);
}
@@ -93,6 +73,26 @@ static int moxart_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(readl(gc->base + GPIO_DATA_IN) & BIT(offset));
}
+static int moxart_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
+ void __iomem *ioaddr = gc->base + GPIO_PIN_DIRECTION;
+
+ writel(readl(ioaddr) & ~BIT(offset), ioaddr);
+ return 0;
+}
+
+static int moxart_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
+ void __iomem *ioaddr = gc->base + GPIO_PIN_DIRECTION;
+
+ moxart_gpio_set(chip, offset, value);
+ writel(readl(ioaddr) | BIT(offset), ioaddr);
+ return 0;
+}
+
static struct gpio_chip moxart_template_chip = {
.label = "moxart-gpio",
.request = moxart_gpio_request,
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 3b1fd1ce460f..d42509422394 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -44,6 +44,7 @@
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/irqchip/chained_irq.h>
/*
* GPIO unit register offsets.
@@ -438,12 +439,15 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct mvebu_gpio_chip *mvchip = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
u32 cause, type;
int i;
if (mvchip == NULL)
return;
+ chained_irq_enter(chip, desc);
+
cause = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) &
readl_relaxed(mvebu_gpioreg_level_mask(mvchip));
cause |= readl_relaxed(mvebu_gpioreg_edge_cause(mvchip)) &
@@ -466,8 +470,11 @@ static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
polarity ^= 1 << i;
writel_relaxed(polarity, mvebu_gpioreg_in_pol(mvchip));
}
+
generic_handle_irq(irq);
}
+
+ chained_irq_exit(chip, desc);
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 532bcb336eff..8ffdd7d2bade 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -214,7 +214,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
ct->regs.mask = PINCTRL_IRQEN(port);
- irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
+ irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
+ IRQ_NOREQUEST, 0);
}
static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 424319061e09..19b886c21b1d 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1214,24 +1214,10 @@ static int omap_gpio_probe(struct platform_device *pdev)
/* Static mapping, never released */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(!res)) {
- dev_err(dev, "Invalid mem resource\n");
- irq_domain_remove(bank->domain);
- return -ENODEV;
- }
-
- if (!devm_request_mem_region(dev, res->start, resource_size(res),
- pdev->name)) {
- dev_err(dev, "Region already claimed\n");
+ bank->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(bank->base)) {
irq_domain_remove(bank->domain);
- return -EBUSY;
- }
-
- bank->base = devm_ioremap(dev, res->start, resource_size(res));
- if (!bank->base) {
- dev_err(dev, "Could not ioremap\n");
- irq_domain_remove(bank->domain);
- return -ENOMEM;
+ return PTR_ERR(bank->base);
}
platform_set_drvdata(pdev, bank);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 019b23b955a2..d550d8e58705 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1,5 +1,5 @@
/*
- * PCA953x 4/8/16 bit I/O ports
+ * PCA953x 4/8/16/24/40 bit I/O ports
*
* Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
* Copyright (C) 2007 Marvell International Ltd.
@@ -59,6 +59,7 @@ static const struct i2c_device_id pca953x_id[] = {
{ "pca9557", 8 | PCA953X_TYPE, },
{ "pca9574", 8 | PCA957X_TYPE | PCA_INT, },
{ "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
+ { "pca9698", 40 | PCA953X_TYPE, },
{ "max7310", 8 | PCA953X_TYPE, },
{ "max7312", 16 | PCA953X_TYPE | PCA_INT, },
@@ -68,6 +69,7 @@ static const struct i2c_device_id pca953x_id[] = {
{ "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
{ "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
+ { "xra1202", 8 | PCA953X_TYPE },
{ }
};
MODULE_DEVICE_TABLE(i2c, pca953x_id);
@@ -625,11 +627,12 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, u32 *invert)
const __be32 *val;
int size;
+ *gpio_base = -1;
+
node = client->dev.of_node;
if (node == NULL)
return;
- *gpio_base = -1;
val = of_get_property(node, "linux,gpio-base", &size);
WARN(val, "%s: device-tree property 'linux,gpio-base' is deprecated!", __func__);
if (val) {
@@ -812,6 +815,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "nxp,pca9557", },
{ .compatible = "nxp,pca9574", },
{ .compatible = "nxp,pca9575", },
+ { .compatible = "nxp,pca9698", },
{ .compatible = "maxim,max7310", },
{ .compatible = "maxim,max7312", },
@@ -822,6 +826,8 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "ti,tca6408", },
{ .compatible = "ti,tca6416", },
{ .compatible = "ti,tca6424", },
+
+ { .compatible = "exar,xra1202", },
{ }
};
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 9656c196772e..83a156397474 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -138,9 +138,6 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
unsigned long flags;
spin_lock_irqsave(&chip->spinlock, flags);
- pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1);
- pm |= (1 << nr);
- iowrite32(pm, &chip->reg->pm);
reg_val = ioread32(&chip->reg->po);
if (val)
@@ -148,6 +145,11 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
else
reg_val &= ~(1 << nr);
iowrite32(reg_val, &chip->reg->po);
+
+ pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1);
+ pm |= (1 << nr);
+ iowrite32(pm, &chip->reg->pm);
+
spin_unlock_irqrestore(&chip->spinlock, flags);
return 0;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index b4d42112d02d..b0f475243cef 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/bitops.h>
#include <linux/workqueue.h>
@@ -53,7 +52,6 @@ struct pl061_gpio {
spinlock_t lock;
void __iomem *base;
- struct irq_domain *domain;
struct gpio_chip gc;
#ifdef CONFIG_PM
@@ -137,19 +135,14 @@ static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
writeb(!!value << offset, chip->base + (1 << (offset + 2)));
}
-static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
-{
- struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
-
- return irq_create_mapping(chip->domain, offset);
-}
-
static int pl061_irq_type(struct irq_data *d, unsigned trigger)
{
- struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
int offset = irqd_to_hwirq(d);
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
+ u8 bit = BIT(offset);
if (offset < 0 || offset >= PL061_GPIO_NR)
return -EINVAL;
@@ -157,30 +150,31 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
spin_lock_irqsave(&chip->lock, flags);
gpioiev = readb(chip->base + GPIOIEV);
-
gpiois = readb(chip->base + GPIOIS);
+ gpioibe = readb(chip->base + GPIOIBE);
+
if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
- gpiois |= 1 << offset;
+ gpiois |= bit;
if (trigger & IRQ_TYPE_LEVEL_HIGH)
- gpioiev |= 1 << offset;
+ gpioiev |= bit;
else
- gpioiev &= ~(1 << offset);
+ gpioiev &= ~bit;
} else
- gpiois &= ~(1 << offset);
- writeb(gpiois, chip->base + GPIOIS);
+ gpiois &= ~bit;
- gpioibe = readb(chip->base + GPIOIBE);
if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
- gpioibe |= 1 << offset;
+ /* Setting this makes GPIOEV be ignored */
+ gpioibe |= bit;
else {
- gpioibe &= ~(1 << offset);
+ gpioibe &= ~bit;
if (trigger & IRQ_TYPE_EDGE_RISING)
- gpioiev |= 1 << offset;
+ gpioiev |= bit;
else if (trigger & IRQ_TYPE_EDGE_FALLING)
- gpioiev &= ~(1 << offset);
+ gpioiev &= ~bit;
}
- writeb(gpioibe, chip->base + GPIOIBE);
+ writeb(gpiois, chip->base + GPIOIS);
+ writeb(gpioibe, chip->base + GPIOIBE);
writeb(gpioiev, chip->base + GPIOIEV);
spin_unlock_irqrestore(&chip->lock, flags);
@@ -192,7 +186,8 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
{
unsigned long pending;
int offset;
- struct pl061_gpio *chip = irq_desc_get_handler_data(desc);
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
chained_irq_enter(irqchip, desc);
@@ -201,7 +196,8 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
writeb(pending, chip->base + GPIOIC);
if (pending) {
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
- generic_handle_irq(pl061_to_irq(&chip->gc, offset));
+ generic_handle_irq(irq_find_mapping(gc->irqdomain,
+ offset));
}
chained_irq_exit(irqchip, desc);
@@ -209,7 +205,8 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
static void pl061_irq_mask(struct irq_data *d)
{
- struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
u8 mask = 1 << (irqd_to_hwirq(d) % PL061_GPIO_NR);
u8 gpioie;
@@ -221,7 +218,8 @@ static void pl061_irq_mask(struct irq_data *d)
static void pl061_irq_unmask(struct irq_data *d)
{
- struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
u8 mask = 1 << (irqd_to_hwirq(d) % PL061_GPIO_NR);
u8 gpioie;
@@ -232,30 +230,12 @@ static void pl061_irq_unmask(struct irq_data *d)
}
static struct irq_chip pl061_irqchip = {
- .name = "pl061 gpio",
+ .name = "pl061",
.irq_mask = pl061_irq_mask,
.irq_unmask = pl061_irq_unmask,
.irq_set_type = pl061_irq_type,
};
-static int pl061_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct pl061_gpio *chip = d->host_data;
-
- irq_set_chip_and_handler_name(irq, &pl061_irqchip, handle_simple_irq,
- "pl061");
- irq_set_chip_data(irq, chip);
- irq_set_irq_type(irq, IRQ_TYPE_NONE);
-
- return 0;
-}
-
-static const struct irq_domain_ops pl061_domain_ops = {
- .map = pl061_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
@@ -270,21 +250,18 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
if (pdata) {
chip->gc.base = pdata->gpio_base;
irq_base = pdata->irq_base;
- if (irq_base <= 0)
+ if (irq_base <= 0) {
+ dev_err(&adev->dev, "invalid IRQ base in pdata\n");
return -ENODEV;
+ }
} else {
chip->gc.base = -1;
irq_base = 0;
}
- if (!devm_request_mem_region(dev, adev->res.start,
- resource_size(&adev->res), "pl061"))
- return -EBUSY;
-
- chip->base = devm_ioremap(dev, adev->res.start,
- resource_size(&adev->res));
- if (!chip->base)
- return -ENOMEM;
+ chip->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
spin_lock_init(&chip->lock);
@@ -294,7 +271,6 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
chip->gc.direction_output = pl061_direction_output;
chip->gc.get = pl061_get_value;
chip->gc.set = pl061_set_value;
- chip->gc.to_irq = pl061_to_irq;
chip->gc.ngpio = PL061_GPIO_NR;
chip->gc.label = dev_name(dev);
chip->gc.dev = dev;
@@ -309,16 +285,20 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
*/
writeb(0, chip->base + GPIOIE); /* disable irqs */
irq = adev->irq[0];
- if (irq < 0)
+ if (irq < 0) {
+ dev_err(&adev->dev, "invalid IRQ\n");
return -ENODEV;
+ }
- irq_set_chained_handler(irq, pl061_irq_handler);
- irq_set_handler_data(irq, chip);
-
- chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR,
- irq_base, &pl061_domain_ops, chip);
- if (!chip->domain)
- return -ENODEV;
+ ret = gpiochip_irqchip_add(&chip->gc, &pl061_irqchip,
+ irq_base, handle_simple_irq,
+ IRQ_TYPE_NONE);
+ if (ret) {
+ dev_info(&adev->dev, "could not add irqchip\n");
+ return ret;
+ }
+ gpiochip_set_chained_irqchip(&chip->gc, &pl061_irqchip,
+ irq, pl061_irq_handler);
for (i = 0; i < PL061_GPIO_NR; i++) {
if (pdata) {
@@ -331,6 +311,8 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
}
amba_set_drvdata(adev, chip);
+ dev_info(&adev->dev, "PL061 GPIO chip @%pa registered\n",
+ &adev->res.start);
return 0;
}
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 122b776fdc0b..9b423173ab50 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -97,7 +97,7 @@ static int rc5t583_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
- if ((offset >= 0) && (offset < 8))
+ if (offset < RC5T583_MAX_GPIO)
return rc5t583_gpio->rc5t583->irq_base +
RC5T583_IRQ_GPIO0 + offset;
return -EINVAL;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index ca76ce751540..03c91482432c 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -356,12 +356,13 @@ static int gpio_rcar_probe(struct platform_device *pdev)
struct resource *io, *irq;
struct gpio_chip *gpio_chip;
struct irq_chip *irq_chip;
- const char *name = dev_name(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ const char *name = dev_name(dev);
int ret;
- p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
+ p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
if (!p) {
- dev_err(&pdev->dev, "failed to allocate driver data\n");
+ dev_err(dev, "failed to allocate driver data\n");
ret = -ENOMEM;
goto err0;
}
@@ -380,15 +381,14 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!io || !irq) {
- dev_err(&pdev->dev, "missing IRQ or IOMEM\n");
+ dev_err(dev, "missing IRQ or IOMEM\n");
ret = -EINVAL;
goto err0;
}
- p->base = devm_ioremap_nocache(&pdev->dev, io->start,
- resource_size(io));
+ p->base = devm_ioremap_nocache(dev, io->start, resource_size(io));
if (!p->base) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ dev_err(dev, "failed to remap I/O memory\n");
ret = -ENXIO;
goto err0;
}
@@ -402,7 +402,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip->set = gpio_rcar_set;
gpio_chip->to_irq = gpio_rcar_to_irq;
gpio_chip->label = name;
- gpio_chip->dev = &pdev->dev;
+ gpio_chip->dev = dev;
gpio_chip->owner = THIS_MODULE;
gpio_chip->base = p->config.gpio_base;
gpio_chip->ngpio = p->config.number_of_pins;
@@ -421,30 +421,30 @@ static int gpio_rcar_probe(struct platform_device *pdev)
&gpio_rcar_irq_domain_ops, p);
if (!p->irq_domain) {
ret = -ENXIO;
- dev_err(&pdev->dev, "cannot initialize irq domain\n");
+ dev_err(dev, "cannot initialize irq domain\n");
goto err0;
}
- if (devm_request_irq(&pdev->dev, irq->start,
- gpio_rcar_irq_handler, IRQF_SHARED, name, p)) {
- dev_err(&pdev->dev, "failed to request IRQ\n");
+ if (devm_request_irq(dev, irq->start, gpio_rcar_irq_handler,
+ IRQF_SHARED, name, p)) {
+ dev_err(dev, "failed to request IRQ\n");
ret = -ENOENT;
goto err1;
}
ret = gpiochip_add(gpio_chip);
if (ret) {
- dev_err(&pdev->dev, "failed to add GPIO controller\n");
+ dev_err(dev, "failed to add GPIO controller\n");
goto err1;
}
- dev_info(&pdev->dev, "driving %d GPIOs\n", p->config.number_of_pins);
+ dev_info(dev, "driving %d GPIOs\n", p->config.number_of_pins);
/* warn in case of mismatch if irq base is specified */
if (p->config.irq_base) {
ret = irq_find_mapping(p->irq_domain, 0);
if (p->config.irq_base != ret)
- dev_warn(&pdev->dev, "irq base mismatch (%u/%u)\n",
+ dev_warn(dev, "irq base mismatch (%u/%u)\n",
p->config.irq_base, ret);
}
@@ -452,7 +452,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
ret = gpiochip_add_pin_range(gpio_chip, p->config.pctl_name, 0,
gpio_chip->base, gpio_chip->ngpio);
if (ret < 0)
- dev_warn(&pdev->dev, "failed to add pin range\n");
+ dev_warn(dev, "failed to add pin range\n");
}
return 0;
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index a85e00bf9834..07105ee5c9ae 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -379,6 +379,7 @@ static int s5p64x0_gpio_setcfg_rbank(struct samsung_gpio_chip *chip,
case 6:
shift = ((off + 1) & 7) * 4;
reg -= 4;
+ break;
default:
shift = ((off + 1) & 7) * 4;
break;
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
new file mode 100644
index 000000000000..b50fe1297748
--- /dev/null
+++ b/drivers/gpio/gpio-syscon.c
@@ -0,0 +1,191 @@
+/*
+ * SYSCON GPIO driver
+ *
+ * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define GPIO_SYSCON_FEAT_IN BIT(0)
+#define GPIO_SYSCON_FEAT_OUT BIT(1)
+#define GPIO_SYSCON_FEAT_DIR BIT(2)
+
+/* SYSCON driver is designed to use 32-bit wide registers */
+#define SYSCON_REG_SIZE (4)
+#define SYSCON_REG_BITS (SYSCON_REG_SIZE * 8)
+
+/**
+ * struct syscon_gpio_data - Configuration for the device.
+ * compatible: SYSCON driver compatible string.
+ * flags: Set of GPIO_SYSCON_FEAT_ flags:
+ * GPIO_SYSCON_FEAT_IN: GPIOs supports input,
+ * GPIO_SYSCON_FEAT_OUT: GPIOs supports output,
+ * GPIO_SYSCON_FEAT_DIR: GPIOs supports switch direction.
+ * bit_count: Number of bits used as GPIOs.
+ * dat_bit_offset: Offset (in bits) to the first GPIO bit.
+ * dir_bit_offset: Optional offset (in bits) to the first bit to switch
+ * GPIO direction (Used with GPIO_SYSCON_FEAT_DIR flag).
+ */
+
+struct syscon_gpio_data {
+ const char *compatible;
+ unsigned int flags;
+ unsigned int bit_count;
+ unsigned int dat_bit_offset;
+ unsigned int dir_bit_offset;
+};
+
+struct syscon_gpio_priv {
+ struct gpio_chip chip;
+ struct regmap *syscon;
+ const struct syscon_gpio_data *data;
+};
+
+static inline struct syscon_gpio_priv *to_syscon_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct syscon_gpio_priv, chip);
+}
+
+static int syscon_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct syscon_gpio_priv *priv = to_syscon_gpio(chip);
+ unsigned int val, offs = priv->data->dat_bit_offset + offset;
+ int ret;
+
+ ret = regmap_read(priv->syscon,
+ (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(offs % SYSCON_REG_BITS));
+}
+
+static void syscon_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ struct syscon_gpio_priv *priv = to_syscon_gpio(chip);
+ unsigned int offs = priv->data->dat_bit_offset + offset;
+
+ regmap_update_bits(priv->syscon,
+ (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE,
+ BIT(offs % SYSCON_REG_BITS),
+ val ? BIT(offs % SYSCON_REG_BITS) : 0);
+}
+
+static int syscon_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+{
+ struct syscon_gpio_priv *priv = to_syscon_gpio(chip);
+
+ if (priv->data->flags & GPIO_SYSCON_FEAT_DIR) {
+ unsigned int offs = priv->data->dir_bit_offset + offset;
+
+ regmap_update_bits(priv->syscon,
+ (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE,
+ BIT(offs % SYSCON_REG_BITS), 0);
+ }
+
+ return 0;
+}
+
+static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val)
+{
+ struct syscon_gpio_priv *priv = to_syscon_gpio(chip);
+
+ if (priv->data->flags & GPIO_SYSCON_FEAT_DIR) {
+ unsigned int offs = priv->data->dir_bit_offset + offset;
+
+ regmap_update_bits(priv->syscon,
+ (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE,
+ BIT(offs % SYSCON_REG_BITS),
+ BIT(offs % SYSCON_REG_BITS));
+ }
+
+ syscon_gpio_set(chip, offset, val);
+
+ return 0;
+}
+
+static const struct syscon_gpio_data clps711x_mctrl_gpio = {
+ /* ARM CLPS711X SYSFLG1 Bits 8-10 */
+ .compatible = "cirrus,clps711x-syscon1",
+ .flags = GPIO_SYSCON_FEAT_IN,
+ .bit_count = 3,
+ .dat_bit_offset = 0x40 * 8 + 8,
+};
+
+static const struct of_device_id syscon_gpio_ids[] = {
+ {
+ .compatible = "cirrus,clps711x-mctrl-gpio",
+ .data = &clps711x_mctrl_gpio,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, syscon_gpio_ids);
+
+static int syscon_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id = of_match_device(syscon_gpio_ids, dev);
+ struct syscon_gpio_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = of_id->data;
+
+ priv->syscon =
+ syscon_regmap_lookup_by_compatible(priv->data->compatible);
+ if (IS_ERR(priv->syscon))
+ return PTR_ERR(priv->syscon);
+
+ priv->chip.dev = dev;
+ priv->chip.owner = THIS_MODULE;
+ priv->chip.label = dev_name(dev);
+ priv->chip.base = -1;
+ priv->chip.ngpio = priv->data->bit_count;
+ priv->chip.get = syscon_gpio_get;
+ if (priv->data->flags & GPIO_SYSCON_FEAT_IN)
+ priv->chip.direction_input = syscon_gpio_dir_in;
+ if (priv->data->flags & GPIO_SYSCON_FEAT_OUT) {
+ priv->chip.set = syscon_gpio_set;
+ priv->chip.direction_output = syscon_gpio_dir_out;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return gpiochip_add(&priv->chip);
+}
+
+static int syscon_gpio_remove(struct platform_device *pdev)
+{
+ struct syscon_gpio_priv *priv = platform_get_drvdata(pdev);
+
+ return gpiochip_remove(&priv->chip);
+}
+
+static struct platform_driver syscon_gpio_driver = {
+ .driver = {
+ .name = "gpio-syscon",
+ .owner = THIS_MODULE,
+ .of_match_table = syscon_gpio_ids,
+ },
+ .probe = syscon_gpio_probe,
+ .remove = syscon_gpio_remove,
+};
+module_platform_driver(syscon_gpio_driver);
+
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("SYSCON GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tnetv107x.c b/drivers/gpio/gpio-tnetv107x.c
deleted file mode 100644
index 4aa481579a05..000000000000
--- a/drivers/gpio/gpio-tnetv107x.c
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Texas Instruments TNETV107X GPIO Controller
- *
- * Copyright (C) 2010 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/platform_data/gpio-davinci.h>
-
-#include <mach/common.h>
-#include <mach/tnetv107x.h>
-
-struct tnetv107x_gpio_regs {
- u32 idver;
- u32 data_in[3];
- u32 data_out[3];
- u32 direction[3];
- u32 enable[3];
-};
-
-#define gpio_reg_index(gpio) ((gpio) >> 5)
-#define gpio_reg_bit(gpio) BIT((gpio) & 0x1f)
-
-#define gpio_reg_rmw(reg, mask, val) \
- __raw_writel((__raw_readl(reg) & ~(mask)) | (val), (reg))
-
-#define gpio_reg_set_bit(reg, gpio) \
- gpio_reg_rmw((reg) + gpio_reg_index(gpio), 0, gpio_reg_bit(gpio))
-
-#define gpio_reg_clear_bit(reg, gpio) \
- gpio_reg_rmw((reg) + gpio_reg_index(gpio), gpio_reg_bit(gpio), 0)
-
-#define gpio_reg_get_bit(reg, gpio) \
- (__raw_readl((reg) + gpio_reg_index(gpio)) & gpio_reg_bit(gpio))
-
-#define chip2controller(chip) \
- container_of(chip, struct davinci_gpio_controller, chip)
-
-#define TNETV107X_GPIO_CTLRS DIV_ROUND_UP(TNETV107X_N_GPIO, 32)
-
-static struct davinci_gpio_controller chips[TNETV107X_GPIO_CTLRS];
-
-static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- struct davinci_gpio_controller *ctlr = chip2controller(chip);
- struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
- unsigned gpio = chip->base + offset;
- unsigned long flags;
-
- spin_lock_irqsave(&ctlr->lock, flags);
-
- gpio_reg_set_bit(regs->enable, gpio);
-
- spin_unlock_irqrestore(&ctlr->lock, flags);
-
- return 0;
-}
-
-static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
- struct davinci_gpio_controller *ctlr = chip2controller(chip);
- struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
- unsigned gpio = chip->base + offset;
- unsigned long flags;
-
- spin_lock_irqsave(&ctlr->lock, flags);
-
- gpio_reg_clear_bit(regs->enable, gpio);
-
- spin_unlock_irqrestore(&ctlr->lock, flags);
-}
-
-static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
-{
- struct davinci_gpio_controller *ctlr = chip2controller(chip);
- struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
- unsigned gpio = chip->base + offset;
- unsigned long flags;
-
- spin_lock_irqsave(&ctlr->lock, flags);
-
- gpio_reg_set_bit(regs->direction, gpio);
-
- spin_unlock_irqrestore(&ctlr->lock, flags);
-
- return 0;
-}
-
-static int tnetv107x_gpio_dir_out(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct davinci_gpio_controller *ctlr = chip2controller(chip);
- struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
- unsigned gpio = chip->base + offset;
- unsigned long flags;
-
- spin_lock_irqsave(&ctlr->lock, flags);
-
- if (value)
- gpio_reg_set_bit(regs->data_out, gpio);
- else
- gpio_reg_clear_bit(regs->data_out, gpio);
-
- gpio_reg_clear_bit(regs->direction, gpio);
-
- spin_unlock_irqrestore(&ctlr->lock, flags);
-
- return 0;
-}
-
-static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct davinci_gpio_controller *ctlr = chip2controller(chip);
- struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
- unsigned gpio = chip->base + offset;
- int ret;
-
- ret = gpio_reg_get_bit(regs->data_in, gpio);
-
- return ret ? 1 : 0;
-}
-
-static void tnetv107x_gpio_set(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct davinci_gpio_controller *ctlr = chip2controller(chip);
- struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
- unsigned gpio = chip->base + offset;
- unsigned long flags;
-
- spin_lock_irqsave(&ctlr->lock, flags);
-
- if (value)
- gpio_reg_set_bit(regs->data_out, gpio);
- else
- gpio_reg_clear_bit(regs->data_out, gpio);
-
- spin_unlock_irqrestore(&ctlr->lock, flags);
-}
-
-static int __init tnetv107x_gpio_setup(void)
-{
- int i, base;
- unsigned ngpio;
- struct davinci_soc_info *soc_info = &davinci_soc_info;
- struct tnetv107x_gpio_regs *regs;
- struct davinci_gpio_controller *ctlr;
-
- if (soc_info->gpio_type != GPIO_TYPE_TNETV107X)
- return 0;
-
- ngpio = soc_info->gpio_num;
- if (ngpio == 0) {
- pr_err("GPIO setup: how many GPIOs?\n");
- return -EINVAL;
- }
-
- if (WARN_ON(TNETV107X_N_GPIO < ngpio))
- ngpio = TNETV107X_N_GPIO;
-
- regs = ioremap(soc_info->gpio_base, SZ_4K);
- if (WARN_ON(!regs))
- return -EINVAL;
-
- for (i = 0, base = 0; base < ngpio; i++, base += 32) {
- ctlr = &chips[i];
-
- ctlr->chip.label = "tnetv107x";
- ctlr->chip.can_sleep = false;
- ctlr->chip.base = base;
- ctlr->chip.ngpio = ngpio - base;
- if (ctlr->chip.ngpio > 32)
- ctlr->chip.ngpio = 32;
-
- ctlr->chip.request = tnetv107x_gpio_request;
- ctlr->chip.free = tnetv107x_gpio_free;
- ctlr->chip.direction_input = tnetv107x_gpio_dir_in;
- ctlr->chip.get = tnetv107x_gpio_get;
- ctlr->chip.direction_output = tnetv107x_gpio_dir_out;
- ctlr->chip.set = tnetv107x_gpio_set;
-
- spin_lock_init(&ctlr->lock);
-
- ctlr->regs = regs;
- ctlr->set_data = &regs->data_out[i];
- ctlr->clr_data = &regs->data_out[i];
- ctlr->in_data = &regs->data_in[i];
-
- gpiochip_add(&ctlr->chip);
- }
-
- soc_info->gpio_ctlrs = chips;
- soc_info->gpio_ctlrs_num = DIV_ROUND_UP(ngpio, 32);
- return 0;
-}
-pure_initcall(tnetv107x_gpio_setup);
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 8b88ca2eda9c..3ebb1a5ff22e 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -139,7 +139,6 @@ static u8 cached_leden;
static void twl4030_led_set_value(int led, int value)
{
u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM;
- int status;
if (led)
mask <<= 1;
@@ -148,8 +147,9 @@ static void twl4030_led_set_value(int led, int value)
cached_leden &= ~mask;
else
cached_leden |= mask;
- status = twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
- TWL4030_LED_LEDEN_REG);
+
+ WARN_ON_ONCE(twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
+ TWL4030_LED_LEDEN_REG));
}
static int twl4030_set_gpio_direction(int gpio, int is_input)
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index 23e061392411..5246a60eff6d 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -488,26 +488,26 @@ static int tz1090_gpio_bank_probe(struct tz1090_gpio_bank_info *info)
gc->chip_types[0].handler = handle_level_irq;
gc->chip_types[0].regs.ack = REG_GPIO_IRQ_STS;
gc->chip_types[0].regs.mask = REG_GPIO_IRQ_EN;
- gc->chip_types[0].chip.irq_startup = gpio_startup_irq,
- gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit,
- gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit,
- gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit,
- gc->chip_types[0].chip.irq_set_type = gpio_set_irq_type,
- gc->chip_types[0].chip.irq_set_wake = gpio_set_irq_wake,
- gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND,
+ gc->chip_types[0].chip.irq_startup = gpio_startup_irq;
+ gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_set_type = gpio_set_irq_type;
+ gc->chip_types[0].chip.irq_set_wake = gpio_set_irq_wake;
+ gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
/* edge chip type */
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
gc->chip_types[1].handler = handle_edge_irq;
gc->chip_types[1].regs.ack = REG_GPIO_IRQ_STS;
gc->chip_types[1].regs.mask = REG_GPIO_IRQ_EN;
- gc->chip_types[1].chip.irq_startup = gpio_startup_irq,
- gc->chip_types[1].chip.irq_ack = irq_gc_ack_clr_bit,
- gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit,
- gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit,
- gc->chip_types[1].chip.irq_set_type = gpio_set_irq_type,
- gc->chip_types[1].chip.irq_set_wake = gpio_set_irq_wake,
- gc->chip_types[1].chip.flags = IRQCHIP_MASK_ON_SUSPEND,
+ gc->chip_types[1].chip.irq_startup = gpio_startup_irq;
+ gc->chip_types[1].chip.irq_ack = irq_gc_ack_clr_bit;
+ gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[1].chip.irq_set_type = gpio_set_irq_type;
+ gc->chip_types[1].chip.irq_set_wake = gpio_set_irq_wake;
+ gc->chip_types[1].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
/* Setup chained handler for this GPIO bank */
irq_set_handler_data(bank->irq, bank);
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index 9902732a382d..66cbcc108e62 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -81,6 +81,7 @@ static DEFINE_SPINLOCK(giu_lock);
static unsigned long giu_flags;
static void __iomem *giu_base;
+static struct gpio_chip vr41xx_gpio_chip;
#define giu_read(offset) readw(giu_base + (offset))
#define giu_write(offset, value) writew((value), giu_base + (offset))
@@ -135,12 +136,31 @@ static void unmask_giuint_low(struct irq_data *d)
giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
}
+static unsigned int startup_giuint(struct irq_data *data)
+{
+ if (gpio_lock_as_irq(&vr41xx_gpio_chip, data->hwirq))
+ dev_err(vr41xx_gpio_chip.dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ data->hwirq);
+ /* Satisfy the .enable semantics by unmasking the line */
+ unmask_giuint_low(data);
+ return 0;
+}
+
+static void shutdown_giuint(struct irq_data *data)
+{
+ mask_giuint_low(data);
+ gpio_unlock_as_irq(&vr41xx_gpio_chip, data->hwirq);
+}
+
static struct irq_chip giuint_low_irq_chip = {
.name = "GIUINTL",
.irq_ack = ack_giuint_low,
.irq_mask = mask_giuint_low,
.irq_mask_ack = mask_ack_giuint_low,
.irq_unmask = unmask_giuint_low,
+ .irq_startup = startup_giuint,
+ .irq_shutdown = shutdown_giuint,
};
static void ack_giuint_high(struct irq_data *d)
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
new file mode 100644
index 000000000000..9bf5034b6cdb
--- /dev/null
+++ b/drivers/gpio/gpio-zevio.c
@@ -0,0 +1,220 @@
+/*
+ * GPIO controller in LSI ZEVIO SoCs.
+ *
+ * Author: Fabian Vogt <fabian@ritter-vogt.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+
+/*
+ * Memory layout:
+ * This chip has four gpio sections, each controls 8 GPIOs.
+ * Bit 0 in section 0 is GPIO 0, bit 2 in section 1 is GPIO 10.
+ * Disclaimer: Reverse engineered!
+ * For more information refer to:
+ * http://hackspire.unsads.com/wiki/index.php/Memory-mapped_I/O_ports#90000000_-_General_Purpose_I.2FO_.28GPIO.29
+ *
+ * 0x00-0x3F: Section 0
+ * +0x00: Masked interrupt status (read-only)
+ * +0x04: R: Interrupt status W: Reset interrupt status
+ * +0x08: R: Interrupt mask W: Mask interrupt
+ * +0x0C: W: Unmask interrupt (write-only)
+ * +0x10: Direction: I/O=1/0
+ * +0x14: Output
+ * +0x18: Input (read-only)
+ * +0x20: R: Level interrupt W: Set as level interrupt
+ * 0x40-0x7F: Section 1
+ * 0x80-0xBF: Section 2
+ * 0xC0-0xFF: Section 3
+ */
+
+#define ZEVIO_GPIO_SECTION_SIZE 0x40
+
+/* Offsets to various registers */
+#define ZEVIO_GPIO_INT_MASKED_STATUS 0x00
+#define ZEVIO_GPIO_INT_STATUS 0x04
+#define ZEVIO_GPIO_INT_UNMASK 0x08
+#define ZEVIO_GPIO_INT_MASK 0x0C
+#define ZEVIO_GPIO_DIRECTION 0x10
+#define ZEVIO_GPIO_OUTPUT 0x14
+#define ZEVIO_GPIO_INPUT 0x18
+#define ZEVIO_GPIO_INT_STICKY 0x20
+
+#define to_zevio_gpio(chip) container_of(to_of_mm_gpio_chip(chip), \
+ struct zevio_gpio, chip)
+
+/* Bit number of GPIO in its section */
+#define ZEVIO_GPIO_BIT(gpio) (gpio&7)
+
+struct zevio_gpio {
+ spinlock_t lock;
+ struct of_mm_gpio_chip chip;
+};
+
+static inline u32 zevio_gpio_port_get(struct zevio_gpio *c, unsigned pin,
+ unsigned port_offset)
+{
+ unsigned section_offset = ((pin >> 3) & 3)*ZEVIO_GPIO_SECTION_SIZE;
+ return readl(IOMEM(c->chip.regs + section_offset + port_offset));
+}
+
+static inline void zevio_gpio_port_set(struct zevio_gpio *c, unsigned pin,
+ unsigned port_offset, u32 val)
+{
+ unsigned section_offset = ((pin >> 3) & 3)*ZEVIO_GPIO_SECTION_SIZE;
+ writel(val, IOMEM(c->chip.regs + section_offset + port_offset));
+}
+
+/* Functions for struct gpio_chip */
+static int zevio_gpio_get(struct gpio_chip *chip, unsigned pin)
+{
+ struct zevio_gpio *controller = to_zevio_gpio(chip);
+
+ /* Only reading allowed, so no spinlock needed */
+ u32 val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_INPUT);
+
+ return (val >> ZEVIO_GPIO_BIT(pin)) & 0x1;
+}
+
+static void zevio_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+{
+ struct zevio_gpio *controller = to_zevio_gpio(chip);
+ u32 val;
+
+ spin_lock(&controller->lock);
+ val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_OUTPUT);
+ if (value)
+ val |= BIT(ZEVIO_GPIO_BIT(pin));
+ else
+ val &= ~BIT(ZEVIO_GPIO_BIT(pin));
+
+ zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_OUTPUT, val);
+ spin_unlock(&controller->lock);
+}
+
+static int zevio_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
+{
+ struct zevio_gpio *controller = to_zevio_gpio(chip);
+ u32 val;
+
+ spin_lock(&controller->lock);
+
+ val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_DIRECTION);
+ val |= BIT(ZEVIO_GPIO_BIT(pin));
+ zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_DIRECTION, val);
+
+ spin_unlock(&controller->lock);
+
+ return 0;
+}
+
+static int zevio_gpio_direction_output(struct gpio_chip *chip,
+ unsigned pin, int value)
+{
+ struct zevio_gpio *controller = to_zevio_gpio(chip);
+ u32 val;
+
+ spin_lock(&controller->lock);
+ val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_OUTPUT);
+ if (value)
+ val |= BIT(ZEVIO_GPIO_BIT(pin));
+ else
+ val &= ~BIT(ZEVIO_GPIO_BIT(pin));
+
+ zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_OUTPUT, val);
+ val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_DIRECTION);
+ val &= ~BIT(ZEVIO_GPIO_BIT(pin));
+ zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_DIRECTION, val);
+
+ spin_unlock(&controller->lock);
+
+ return 0;
+}
+
+static int zevio_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
+{
+ /*
+ * TODO: Implement IRQs.
+ * Not implemented yet due to weird lockups
+ */
+
+ return -ENXIO;
+}
+
+static struct gpio_chip zevio_gpio_chip = {
+ .direction_input = zevio_gpio_direction_input,
+ .direction_output = zevio_gpio_direction_output,
+ .set = zevio_gpio_set,
+ .get = zevio_gpio_get,
+ .to_irq = zevio_gpio_to_irq,
+ .base = 0,
+ .owner = THIS_MODULE,
+ .ngpio = 32,
+ .of_gpio_n_cells = 2,
+};
+
+/* Initialization */
+static int zevio_gpio_probe(struct platform_device *pdev)
+{
+ struct zevio_gpio *controller;
+ int status, i;
+
+ controller = devm_kzalloc(&pdev->dev, sizeof(*controller), GFP_KERNEL);
+ if (!controller) {
+ dev_err(&pdev->dev, "not enough free memory\n");
+ return -ENOMEM;
+ }
+
+ /* Copy our reference */
+ controller->chip.gc = zevio_gpio_chip;
+ controller->chip.gc.dev = &pdev->dev;
+
+ status = of_mm_gpiochip_add(pdev->dev.of_node, &(controller->chip));
+ if (status) {
+ dev_err(&pdev->dev, "failed to add gpiochip: %d\n", status);
+ return status;
+ }
+
+ spin_lock_init(&controller->lock);
+
+ /* Disable interrupts, they only cause errors */
+ for (i = 0; i < controller->chip.gc.ngpio; i += 8)
+ zevio_gpio_port_set(controller, i, ZEVIO_GPIO_INT_MASK, 0xFF);
+
+ dev_dbg(controller->chip.gc.dev, "ZEVIO GPIO controller set up!\n");
+
+ return 0;
+}
+
+static struct of_device_id zevio_gpio_of_match[] = {
+ { .compatible = "lsi,zevio-gpio", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, zevio_gpio_of_match);
+
+static struct platform_driver zevio_gpio_driver = {
+ .driver = {
+ .name = "gpio-zevio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(zevio_gpio_of_match),
+ },
+ .probe = zevio_gpio_probe,
+};
+module_platform_driver(zevio_gpio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Fabian Vogt <fabian@ritter-vogt.de>");
+MODULE_DESCRIPTION("LSI ZEVIO SoC GPIO driver");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 716ee9843110..bf0f8b476696 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -16,16 +16,35 @@
#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/interrupt.h>
+#include <linux/mutex.h>
#include "gpiolib.h"
-struct acpi_gpio_evt_pin {
+struct acpi_gpio_event {
struct list_head node;
- acpi_handle *evt_handle;
+ acpi_handle handle;
unsigned int pin;
unsigned int irq;
};
+struct acpi_gpio_connection {
+ struct list_head node;
+ struct gpio_desc *desc;
+};
+
+struct acpi_gpio_chip {
+ /*
+ * ACPICA requires that the first field of the context parameter
+ * passed to acpi_install_address_space_handler() is large enough
+ * to hold struct acpi_connection_info.
+ */
+ struct acpi_connection_info conn_info;
+ struct list_head conns;
+ struct mutex conn_lock;
+ struct gpio_chip *chip;
+ struct list_head events;
+};
+
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{
if (!gc->dev)
@@ -60,176 +79,195 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
if (pin < 0 || pin > chip->ngpio)
return ERR_PTR(-EINVAL);
- return gpio_to_desc(chip->base + pin);
+ return gpiochip_get_desc(chip, pin);
}
static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
{
- acpi_handle handle = data;
+ struct acpi_gpio_event *event = data;
- acpi_evaluate_object(handle, NULL, NULL, NULL);
+ acpi_evaluate_object(event->handle, NULL, NULL, NULL);
return IRQ_HANDLED;
}
static irqreturn_t acpi_gpio_irq_handler_evt(int irq, void *data)
{
- struct acpi_gpio_evt_pin *evt_pin = data;
+ struct acpi_gpio_event *event = data;
- acpi_execute_simple_method(evt_pin->evt_handle, NULL, evt_pin->pin);
+ acpi_execute_simple_method(event->handle, NULL, event->pin);
return IRQ_HANDLED;
}
-static void acpi_gpio_evt_dh(acpi_handle handle, void *data)
+static void acpi_gpio_chip_dh(acpi_handle handle, void *data)
{
/* The address of this function is used as a key. */
}
-/**
- * acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events
- * @chip: gpio chip
- *
- * ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are
- * handled by ACPI event methods which need to be called from the GPIO
- * chip's interrupt handler. acpi_gpiochip_request_interrupts finds out which
- * gpio pins have acpi event methods and assigns interrupt handlers that calls
- * the acpi event methods for those pins.
- */
-static void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
+static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
+ void *context)
{
- struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
- struct acpi_resource *res;
+ struct acpi_gpio_chip *acpi_gpio = context;
+ struct gpio_chip *chip = acpi_gpio->chip;
+ struct acpi_resource_gpio *agpio;
acpi_handle handle, evt_handle;
- struct list_head *evt_pins = NULL;
- acpi_status status;
- unsigned int pin;
- int irq, ret;
- char ev_name[5];
+ struct acpi_gpio_event *event;
+ irq_handler_t handler = NULL;
+ struct gpio_desc *desc;
+ unsigned long irqflags;
+ int ret, pin, irq;
- if (!chip->dev || !chip->to_irq)
- return;
+ if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
+ return AE_OK;
- handle = ACPI_HANDLE(chip->dev);
- if (!handle)
- return;
+ agpio = &ares->data.gpio;
+ if (agpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_INT)
+ return AE_OK;
- status = acpi_get_event_resources(handle, &buf);
- if (ACPI_FAILURE(status))
- return;
+ handle = ACPI_HANDLE(chip->dev);
+ pin = agpio->pin_table[0];
+
+ if (pin <= 255) {
+ char ev_name[5];
+ sprintf(ev_name, "_%c%02X",
+ agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
+ pin);
+ if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
+ handler = acpi_gpio_irq_handler;
+ }
+ if (!handler) {
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
+ handler = acpi_gpio_irq_handler_evt;
+ }
+ if (!handler)
+ return AE_BAD_PARAMETER;
- status = acpi_get_handle(handle, "_EVT", &evt_handle);
- if (ACPI_SUCCESS(status)) {
- evt_pins = kzalloc(sizeof(*evt_pins), GFP_KERNEL);
- if (evt_pins) {
- INIT_LIST_HEAD(evt_pins);
- status = acpi_attach_data(handle, acpi_gpio_evt_dh,
- evt_pins);
- if (ACPI_FAILURE(status)) {
- kfree(evt_pins);
- evt_pins = NULL;
- }
- }
+ desc = gpiochip_get_desc(chip, pin);
+ if (IS_ERR(desc)) {
+ dev_err(chip->dev, "Failed to get GPIO descriptor\n");
+ return AE_ERROR;
}
- /*
- * If a GPIO interrupt has an ACPI event handler method, or _EVT is
- * present, set up an interrupt handler that calls the ACPI event
- * handler.
- */
- for (res = buf.pointer;
- res && (res->type != ACPI_RESOURCE_TYPE_END_TAG);
- res = ACPI_NEXT_RESOURCE(res)) {
- irq_handler_t handler = NULL;
- void *data;
-
- if (res->type != ACPI_RESOURCE_TYPE_GPIO ||
- res->data.gpio.connection_type !=
- ACPI_RESOURCE_GPIO_TYPE_INT)
- continue;
+ ret = gpiochip_request_own_desc(desc, "ACPI:Event");
+ if (ret) {
+ dev_err(chip->dev, "Failed to request GPIO\n");
+ return AE_ERROR;
+ }
- pin = res->data.gpio.pin_table[0];
- if (pin > chip->ngpio)
- continue;
+ gpiod_direction_input(desc);
- irq = chip->to_irq(chip, pin);
- if (irq < 0)
- continue;
+ ret = gpiod_lock_as_irq(desc);
+ if (ret) {
+ dev_err(chip->dev, "Failed to lock GPIO as interrupt\n");
+ goto fail_free_desc;
+ }
- if (pin <= 255) {
- acpi_handle ev_handle;
+ irq = gpiod_to_irq(desc);
+ if (irq < 0) {
+ dev_err(chip->dev, "Failed to translate GPIO to IRQ\n");
+ goto fail_unlock_irq;
+ }
- sprintf(ev_name, "_%c%02X",
- res->data.gpio.triggering ? 'E' : 'L', pin);
- status = acpi_get_handle(handle, ev_name, &ev_handle);
- if (ACPI_SUCCESS(status)) {
- handler = acpi_gpio_irq_handler;
- data = ev_handle;
- }
+ irqflags = IRQF_ONESHOT;
+ if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
+ if (agpio->polarity == ACPI_ACTIVE_HIGH)
+ irqflags |= IRQF_TRIGGER_HIGH;
+ else
+ irqflags |= IRQF_TRIGGER_LOW;
+ } else {
+ switch (agpio->polarity) {
+ case ACPI_ACTIVE_HIGH:
+ irqflags |= IRQF_TRIGGER_RISING;
+ break;
+ case ACPI_ACTIVE_LOW:
+ irqflags |= IRQF_TRIGGER_FALLING;
+ break;
+ default:
+ irqflags |= IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING;
+ break;
}
- if (!handler && evt_pins) {
- struct acpi_gpio_evt_pin *evt_pin;
+ }
- evt_pin = kzalloc(sizeof(*evt_pin), GFP_KERNEL);
- if (!evt_pin)
- continue;
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ goto fail_unlock_irq;
- list_add_tail(&evt_pin->node, evt_pins);
- evt_pin->evt_handle = evt_handle;
- evt_pin->pin = pin;
- evt_pin->irq = irq;
- handler = acpi_gpio_irq_handler_evt;
- data = evt_pin;
- }
- if (!handler)
- continue;
+ event->handle = evt_handle;
+ event->irq = irq;
+ event->pin = pin;
- /* Assume BIOS sets the triggering, so no flags */
- ret = devm_request_threaded_irq(chip->dev, irq, NULL, handler,
- 0, "GPIO-signaled-ACPI-event",
- data);
- if (ret)
- dev_err(chip->dev,
- "Failed to request IRQ %d ACPI event handler\n",
- irq);
+ ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
+ "ACPI:Event", event);
+ if (ret) {
+ dev_err(chip->dev, "Failed to setup interrupt handler for %d\n",
+ event->irq);
+ goto fail_free_event;
}
+
+ list_add_tail(&event->node, &acpi_gpio->events);
+ return AE_OK;
+
+fail_free_event:
+ kfree(event);
+fail_unlock_irq:
+ gpiod_unlock_as_irq(desc);
+fail_free_desc:
+ gpiochip_free_own_desc(desc);
+
+ return AE_ERROR;
}
/**
- * acpi_gpiochip_free_interrupts() - Free GPIO _EVT ACPI event interrupts.
- * @chip: gpio chip
- *
- * Free interrupts associated with the _EVT method for the given GPIO chip.
+ * acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events
+ * @acpi_gpio: ACPI GPIO chip
*
- * The remaining ACPI event interrupts associated with the chip are freed
- * automatically.
+ * ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are
+ * handled by ACPI event methods which need to be called from the GPIO
+ * chip's interrupt handler. acpi_gpiochip_request_interrupts finds out which
+ * gpio pins have acpi event methods and assigns interrupt handlers that calls
+ * the acpi event methods for those pins.
*/
-static void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
+static void acpi_gpiochip_request_interrupts(struct acpi_gpio_chip *acpi_gpio)
{
- acpi_handle handle;
- acpi_status status;
- struct list_head *evt_pins;
- struct acpi_gpio_evt_pin *evt_pin, *ep;
+ struct gpio_chip *chip = acpi_gpio->chip;
if (!chip->dev || !chip->to_irq)
return;
- handle = ACPI_HANDLE(chip->dev);
- if (!handle)
- return;
+ INIT_LIST_HEAD(&acpi_gpio->events);
+ acpi_walk_resources(ACPI_HANDLE(chip->dev), "_AEI",
+ acpi_gpiochip_request_interrupt, acpi_gpio);
+}
- status = acpi_get_data(handle, acpi_gpio_evt_dh, (void **)&evt_pins);
- if (ACPI_FAILURE(status))
+/**
+ * acpi_gpiochip_free_interrupts() - Free GPIO ACPI event interrupts.
+ * @acpi_gpio: ACPI GPIO chip
+ *
+ * Free interrupts associated with GPIO ACPI event method for the given
+ * GPIO chip.
+ */
+static void acpi_gpiochip_free_interrupts(struct acpi_gpio_chip *acpi_gpio)
+{
+ struct acpi_gpio_event *event, *ep;
+ struct gpio_chip *chip = acpi_gpio->chip;
+
+ if (!chip->dev || !chip->to_irq)
return;
- list_for_each_entry_safe_reverse(evt_pin, ep, evt_pins, node) {
- devm_free_irq(chip->dev, evt_pin->irq, evt_pin);
- list_del(&evt_pin->node);
- kfree(evt_pin);
- }
+ list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
+ struct gpio_desc *desc;
- acpi_detach_data(handle, acpi_gpio_evt_dh);
- kfree(evt_pins);
+ free_irq(event->irq, event);
+ desc = gpiochip_get_desc(chip, event->pin);
+ if (WARN_ON(IS_ERR(desc)))
+ continue;
+ gpiod_unlock_as_irq(desc);
+ gpiochip_free_own_desc(desc);
+ list_del(&event->node);
+ kfree(event);
+ }
}
struct acpi_gpio_lookup {
@@ -310,12 +348,202 @@ struct gpio_desc *acpi_get_gpiod_by_index(struct device *dev, int index,
return lookup.desc ? lookup.desc : ERR_PTR(-ENOENT);
}
+static acpi_status
+acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
+ u32 bits, u64 *value, void *handler_context,
+ void *region_context)
+{
+ struct acpi_gpio_chip *achip = region_context;
+ struct gpio_chip *chip = achip->chip;
+ struct acpi_resource_gpio *agpio;
+ struct acpi_resource *ares;
+ acpi_status status;
+ bool pull_up;
+ int i;
+
+ status = acpi_buffer_to_resource(achip->conn_info.connection,
+ achip->conn_info.length, &ares);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ if (WARN_ON(ares->type != ACPI_RESOURCE_TYPE_GPIO)) {
+ ACPI_FREE(ares);
+ return AE_BAD_PARAMETER;
+ }
+
+ agpio = &ares->data.gpio;
+ pull_up = agpio->pin_config == ACPI_PIN_CONFIG_PULLUP;
+
+ if (WARN_ON(agpio->io_restriction == ACPI_IO_RESTRICT_INPUT &&
+ function == ACPI_WRITE)) {
+ ACPI_FREE(ares);
+ return AE_BAD_PARAMETER;
+ }
+
+ for (i = 0; i < agpio->pin_table_length; i++) {
+ unsigned pin = agpio->pin_table[i];
+ struct acpi_gpio_connection *conn;
+ struct gpio_desc *desc;
+ bool found;
+
+ desc = gpiochip_get_desc(chip, pin);
+ if (IS_ERR(desc)) {
+ status = AE_ERROR;
+ goto out;
+ }
+
+ mutex_lock(&achip->conn_lock);
+
+ found = false;
+ list_for_each_entry(conn, &achip->conns, node) {
+ if (conn->desc == desc) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ int ret;
+
+ ret = gpiochip_request_own_desc(desc, "ACPI:OpRegion");
+ if (ret) {
+ status = AE_ERROR;
+ mutex_unlock(&achip->conn_lock);
+ goto out;
+ }
+
+ switch (agpio->io_restriction) {
+ case ACPI_IO_RESTRICT_INPUT:
+ gpiod_direction_input(desc);
+ break;
+ case ACPI_IO_RESTRICT_OUTPUT:
+ /*
+ * ACPI GPIO resources don't contain an
+ * initial value for the GPIO. Therefore we
+ * deduce that value from the pull field
+ * instead. If the pin is pulled up we
+ * assume default to be high, otherwise
+ * low.
+ */
+ gpiod_direction_output(desc, pull_up);
+ break;
+ default:
+ /*
+ * Assume that the BIOS has configured the
+ * direction and pull accordingly.
+ */
+ break;
+ }
+
+ conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ if (!conn) {
+ status = AE_NO_MEMORY;
+ gpiochip_free_own_desc(desc);
+ mutex_unlock(&achip->conn_lock);
+ goto out;
+ }
+
+ conn->desc = desc;
+ list_add_tail(&conn->node, &achip->conns);
+ }
+
+ mutex_unlock(&achip->conn_lock);
+
+ if (function == ACPI_WRITE)
+ gpiod_set_raw_value(desc, !!((1 << i) & *value));
+ else
+ *value |= gpiod_get_raw_value(desc) << i;
+ }
+
+out:
+ ACPI_FREE(ares);
+ return status;
+}
+
+static void acpi_gpiochip_request_regions(struct acpi_gpio_chip *achip)
+{
+ struct gpio_chip *chip = achip->chip;
+ acpi_handle handle = ACPI_HANDLE(chip->dev);
+ acpi_status status;
+
+ INIT_LIST_HEAD(&achip->conns);
+ mutex_init(&achip->conn_lock);
+ status = acpi_install_address_space_handler(handle, ACPI_ADR_SPACE_GPIO,
+ acpi_gpio_adr_space_handler,
+ NULL, achip);
+ if (ACPI_FAILURE(status))
+ dev_err(chip->dev, "Failed to install GPIO OpRegion handler\n");
+}
+
+static void acpi_gpiochip_free_regions(struct acpi_gpio_chip *achip)
+{
+ struct gpio_chip *chip = achip->chip;
+ acpi_handle handle = ACPI_HANDLE(chip->dev);
+ struct acpi_gpio_connection *conn, *tmp;
+ acpi_status status;
+
+ status = acpi_remove_address_space_handler(handle, ACPI_ADR_SPACE_GPIO,
+ acpi_gpio_adr_space_handler);
+ if (ACPI_FAILURE(status)) {
+ dev_err(chip->dev, "Failed to remove GPIO OpRegion handler\n");
+ return;
+ }
+
+ list_for_each_entry_safe_reverse(conn, tmp, &achip->conns, node) {
+ gpiochip_free_own_desc(conn->desc);
+ list_del(&conn->node);
+ kfree(conn);
+ }
+}
+
void acpi_gpiochip_add(struct gpio_chip *chip)
{
- acpi_gpiochip_request_interrupts(chip);
+ struct acpi_gpio_chip *acpi_gpio;
+ acpi_handle handle;
+ acpi_status status;
+
+ handle = ACPI_HANDLE(chip->dev);
+ if (!handle)
+ return;
+
+ acpi_gpio = kzalloc(sizeof(*acpi_gpio), GFP_KERNEL);
+ if (!acpi_gpio) {
+ dev_err(chip->dev,
+ "Failed to allocate memory for ACPI GPIO chip\n");
+ return;
+ }
+
+ acpi_gpio->chip = chip;
+
+ status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
+ if (ACPI_FAILURE(status)) {
+ dev_err(chip->dev, "Failed to attach ACPI GPIO chip\n");
+ kfree(acpi_gpio);
+ return;
+ }
+
+ acpi_gpiochip_request_interrupts(acpi_gpio);
+ acpi_gpiochip_request_regions(acpi_gpio);
}
void acpi_gpiochip_remove(struct gpio_chip *chip)
{
- acpi_gpiochip_free_interrupts(chip);
+ struct acpi_gpio_chip *acpi_gpio;
+ acpi_handle handle;
+ acpi_status status;
+
+ handle = ACPI_HANDLE(chip->dev);
+ if (!handle)
+ return;
+
+ status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(chip->dev, "Failed to retrieve ACPI GPIO chip\n");
+ return;
+ }
+
+ acpi_gpiochip_free_regions(acpi_gpio);
+ acpi_gpiochip_free_interrupts(acpi_gpio);
+
+ acpi_detach_data(handle, acpi_gpio_chip_dh);
+ kfree(acpi_gpio);
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index e0a98f581f58..2024d45e5503 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -12,6 +12,7 @@
*/
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -90,7 +91,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
of_node_put(gg_data.gpiospec.np);
pr_debug("%s exited with status %d\n", __func__,
- PTR_RET(gg_data.out_gpio));
+ PTR_ERR_OR_ZERO(gg_data.out_gpio));
return gg_data.out_gpio;
}
EXPORT_SYMBOL(of_get_named_gpiod_flags);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 50c4922fe53a..761013f8b82f 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -164,16 +164,17 @@ struct gpio_desc *gpio_to_desc(unsigned gpio)
EXPORT_SYMBOL_GPL(gpio_to_desc);
/**
- * Convert an offset on a certain chip to a corresponding descriptor
+ * Get the GPIO descriptor corresponding to the given hw number for this chip.
*/
-static struct gpio_desc *gpiochip_offset_to_desc(struct gpio_chip *chip,
- unsigned int offset)
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
+ u16 hwnum)
{
- if (offset >= chip->ngpio)
+ if (hwnum >= chip->ngpio)
return ERR_PTR(-EINVAL);
- return &chip->desc[offset];
+ return &chip->desc[hwnum];
}
+EXPORT_SYMBOL_GPL(gpiochip_get_desc);
/**
* Convert a GPIO descriptor to the integer namespace.
@@ -350,9 +351,9 @@ static ssize_t gpio_direction_store(struct device *dev,
if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
else if (sysfs_streq(buf, "high"))
- status = gpiod_direction_output(desc, 1);
+ status = gpiod_direction_output_raw(desc, 1);
else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low"))
- status = gpiod_direction_output(desc, 0);
+ status = gpiod_direction_output_raw(desc, 0);
else if (sysfs_streq(buf, "in"))
status = gpiod_direction_input(desc);
else
@@ -1253,6 +1254,9 @@ fail:
}
EXPORT_SYMBOL_GPL(gpiochip_add);
+/* Forward-declaration */
+static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+
/**
* gpiochip_remove() - unregister a gpio_chip
* @chip: the chip to unregister
@@ -1265,11 +1269,13 @@ int gpiochip_remove(struct gpio_chip *chip)
int status = 0;
unsigned id;
+ acpi_gpiochip_remove(chip);
+
spin_lock_irqsave(&gpio_lock, flags);
+ gpiochip_irqchip_remove(chip);
gpiochip_remove_pin_ranges(chip);
of_gpiochip_remove(chip);
- acpi_gpiochip_remove(chip);
for (id = 0; id < chip->ngpio; id++) {
if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) {
@@ -1337,6 +1343,215 @@ static struct gpio_chip *find_chip_by_name(const char *name)
return gpiochip_find((void *)name, gpiochip_match_name);
}
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+
+/*
+ * The following is irqchip helper code for gpiochips.
+ */
+
+/**
+ * gpiochip_add_chained_irqchip() - adds a chained irqchip to a gpiochip
+ * @gpiochip: the gpiochip to add the irqchip to
+ * @irqchip: the irqchip to add to the gpiochip
+ * @parent_irq: the irq number corresponding to the parent IRQ for this
+ * chained irqchip
+ * @parent_handler: the parent interrupt handler for the accumulated IRQ
+ * coming out of the gpiochip
+ */
+void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ int parent_irq,
+ irq_flow_handler_t parent_handler)
+{
+ irq_set_chained_handler(parent_irq, parent_handler);
+ /*
+ * The parent irqchip is already using the chip_data for this
+ * irqchip, so our callbacks simply use the handler_data.
+ */
+ irq_set_handler_data(parent_irq, gpiochip);
+}
+EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
+
+/**
+ * gpiochip_irq_map() - maps an IRQ into a GPIO irqchip
+ * @d: the irqdomain used by this irqchip
+ * @irq: the global irq number used by this GPIO irqchip irq
+ * @hwirq: the local IRQ/GPIO line offset on this gpiochip
+ *
+ * This function will set up the mapping for a certain IRQ line on a
+ * gpiochip by assigning the gpiochip as chip data, and using the irqchip
+ * stored inside the gpiochip.
+ */
+static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct gpio_chip *chip = d->host_data;
+
+ irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
+ irq_set_chip_data(irq, chip);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+ irq_set_irq_type(irq, chip->irq_default_type);
+
+ return 0;
+}
+
+static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops gpiochip_domain_ops = {
+ .map = gpiochip_irq_map,
+ .unmap = gpiochip_irq_unmap,
+ /* Virtually all GPIO irqchips are twocell:ed */
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int gpiochip_irq_reqres(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ if (gpio_lock_as_irq(chip, d->hwirq)) {
+ chip_err(chip,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ d->hwirq);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void gpiochip_irq_relres(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ gpio_unlock_as_irq(chip, d->hwirq);
+}
+
+static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return irq_find_mapping(chip->irqdomain, offset);
+}
+
+/**
+ * gpiochip_irqchip_remove() - removes an irqchip added to a gpiochip
+ * @gpiochip: the gpiochip to remove the irqchip from
+ *
+ * This is called only from gpiochip_remove()
+ */
+static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
+{
+ unsigned int offset;
+
+ /* Remove all IRQ mappings and delete the domain */
+ if (gpiochip->irqdomain) {
+ for (offset = 0; offset < gpiochip->ngpio; offset++)
+ irq_dispose_mapping(gpiochip->irq_base + offset);
+ irq_domain_remove(gpiochip->irqdomain);
+ }
+
+ if (gpiochip->irqchip) {
+ gpiochip->irqchip->irq_request_resources = NULL;
+ gpiochip->irqchip->irq_release_resources = NULL;
+ gpiochip->irqchip = NULL;
+ }
+}
+
+/**
+ * gpiochip_irqchip_add() - adds an irqchip to a gpiochip
+ * @gpiochip: the gpiochip to add the irqchip to
+ * @irqchip: the irqchip to add to the gpiochip
+ * @first_irq: if not dynamically assigned, the base (first) IRQ to
+ * allocate gpiochip irqs from
+ * @handler: the irq handler to use (often a predefined irq core function)
+ * @type: the default type for IRQs on this irqchip
+ *
+ * This function closely associates a certain irqchip with a certain
+ * gpiochip, providing an irq domain to translate the local IRQs to
+ * global irqs in the gpiolib core, and making sure that the gpiochip
+ * is passed as chip data to all related functions. Driver callbacks
+ * need to use container_of() to get their local state containers back
+ * from the gpiochip passed as chip data. An irqdomain will be stored
+ * in the gpiochip that shall be used by the driver to handle IRQ number
+ * translation. The gpiochip will need to be initialized and registered
+ * before calling this function.
+ *
+ * This function will handle two cell:ed simple IRQs and assumes all
+ * the pins on the gpiochip can generate a unique IRQ. Everything else
+ * need to be open coded.
+ */
+int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type)
+{
+ struct device_node *of_node;
+ unsigned int offset;
+ unsigned irq_base = 0;
+
+ if (!gpiochip || !irqchip)
+ return -EINVAL;
+
+ if (!gpiochip->dev) {
+ pr_err("missing gpiochip .dev parent pointer\n");
+ return -EINVAL;
+ }
+ of_node = gpiochip->dev->of_node;
+#ifdef CONFIG_OF_GPIO
+ /*
+ * If the gpiochip has an assigned OF node this takes precendence
+ * FIXME: get rid of this and use gpiochip->dev->of_node everywhere
+ */
+ if (gpiochip->of_node)
+ of_node = gpiochip->of_node;
+#endif
+ gpiochip->irqchip = irqchip;
+ gpiochip->irq_handler = handler;
+ gpiochip->irq_default_type = type;
+ gpiochip->to_irq = gpiochip_to_irq;
+ gpiochip->irqdomain = irq_domain_add_simple(of_node,
+ gpiochip->ngpio, first_irq,
+ &gpiochip_domain_ops, gpiochip);
+ if (!gpiochip->irqdomain) {
+ gpiochip->irqchip = NULL;
+ return -EINVAL;
+ }
+ irqchip->irq_request_resources = gpiochip_irq_reqres;
+ irqchip->irq_release_resources = gpiochip_irq_relres;
+
+ /*
+ * Prepare the mapping since the irqchip shall be orthogonal to
+ * any gpiochip calls. If the first_irq was zero, this is
+ * necessary to allocate descriptors for all IRQs.
+ */
+ for (offset = 0; offset < gpiochip->ngpio; offset++) {
+ irq_base = irq_create_mapping(gpiochip->irqdomain, offset);
+ if (offset == 0)
+ /*
+ * Store the base into the gpiochip to be used when
+ * unmapping the irqs.
+ */
+ gpiochip->irq_base = irq_base;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_add);
+
+#else /* CONFIG_GPIOLIB_IRQCHIP */
+
+static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
+
+#endif /* CONFIG_GPIOLIB_IRQCHIP */
+
#ifdef CONFIG_PINCTRL
/**
@@ -1457,26 +1672,14 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
* on each other, and help provide better diagnostics in debugfs.
* They're called even less than the "set direction" calls.
*/
-static int gpiod_request(struct gpio_desc *desc, const char *label)
+static int __gpiod_request(struct gpio_desc *desc, const char *label)
{
- struct gpio_chip *chip;
- int status = -EPROBE_DEFER;
+ struct gpio_chip *chip = desc->chip;
+ int status;
unsigned long flags;
- if (!desc) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
-
spin_lock_irqsave(&gpio_lock, flags);
- chip = desc->chip;
- if (chip == NULL)
- goto done;
-
- if (!try_module_get(chip->owner))
- goto done;
-
/* NOTE: gpio_request() can be called in early boot,
* before IRQs are enabled, for non-sleeping (SOC) GPIOs.
*/
@@ -1486,7 +1689,6 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
status = 0;
} else {
status = -EBUSY;
- module_put(chip->owner);
goto done;
}
@@ -1498,7 +1700,6 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
if (status < 0) {
desc_set_label(desc, NULL);
- module_put(chip->owner);
clear_bit(FLAG_REQUESTED, &desc->flags);
goto done;
}
@@ -1510,9 +1711,34 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
spin_lock_irqsave(&gpio_lock, flags);
}
done:
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ return status;
+}
+
+static int gpiod_request(struct gpio_desc *desc, const char *label)
+{
+ int status = -EPROBE_DEFER;
+ struct gpio_chip *chip;
+
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
+
+ chip = desc->chip;
+ if (!chip)
+ goto done;
+
+ if (try_module_get(chip->owner)) {
+ status = __gpiod_request(desc, label);
+ if (status < 0)
+ module_put(chip->owner);
+ }
+
+done:
if (status)
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
- spin_unlock_irqrestore(&gpio_lock, flags);
+
return status;
}
@@ -1522,18 +1748,14 @@ int gpio_request(unsigned gpio, const char *label)
}
EXPORT_SYMBOL_GPL(gpio_request);
-static void gpiod_free(struct gpio_desc *desc)
+static bool __gpiod_free(struct gpio_desc *desc)
{
+ bool ret = false;
unsigned long flags;
struct gpio_chip *chip;
might_sleep();
- if (!desc) {
- WARN_ON(extra_checks);
- return;
- }
-
gpiod_unexport(desc);
spin_lock_irqsave(&gpio_lock, flags);
@@ -1547,15 +1769,23 @@ static void gpiod_free(struct gpio_desc *desc)
spin_lock_irqsave(&gpio_lock, flags);
}
desc_set_label(desc, NULL);
- module_put(desc->chip->owner);
clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
clear_bit(FLAG_REQUESTED, &desc->flags);
clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
- } else
- WARN_ON(extra_checks);
+ ret = true;
+ }
spin_unlock_irqrestore(&gpio_lock, flags);
+ return ret;
+}
+
+static void gpiod_free(struct gpio_desc *desc)
+{
+ if (desc && __gpiod_free(desc))
+ module_put(desc->chip->owner);
+ else
+ WARN_ON(extra_checks);
}
void gpio_free(unsigned gpio)
@@ -1590,7 +1820,7 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
if (flags & GPIOF_DIR_IN)
err = gpiod_direction_input(desc);
else
- err = gpiod_direction_output(desc,
+ err = gpiod_direction_output_raw(desc,
(flags & GPIOF_INIT_HIGH) ? 1 : 0);
if (err)
@@ -1677,6 +1907,37 @@ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
}
EXPORT_SYMBOL_GPL(gpiochip_is_requested);
+/**
+ * gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor
+ * @desc: GPIO descriptor to request
+ * @label: label for the GPIO
+ *
+ * Function allows GPIO chip drivers to request and use their own GPIO
+ * descriptors via gpiolib API. Difference to gpiod_request() is that this
+ * function will not increase reference count of the GPIO chip module. This
+ * allows the GPIO chip module to be unloaded as needed (we assume that the
+ * GPIO chip driver handles freeing the GPIOs it has requested).
+ */
+int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label)
+{
+ if (!desc || !desc->chip)
+ return -EINVAL;
+
+ return __gpiod_request(desc, label);
+}
+
+/**
+ * gpiochip_free_own_desc - Free GPIO requested by the chip driver
+ * @desc: GPIO descriptor to free
+ *
+ * Function frees the given GPIO requested previously with
+ * gpiochip_request_own_desc().
+ */
+void gpiochip_free_own_desc(struct gpio_desc *desc)
+{
+ if (desc)
+ __gpiod_free(desc);
+}
/* Drivers MUST set GPIO direction before making get/set calls. In
* some cases this is done in early boot, before IRQs are enabled.
@@ -1756,28 +2017,13 @@ fail:
}
EXPORT_SYMBOL_GPL(gpiod_direction_input);
-/**
- * gpiod_direction_output - set the GPIO direction to input
- * @desc: GPIO to set to output
- * @value: initial output value of the GPIO
- *
- * Set the direction of the passed GPIO to output, such as gpiod_set_value() can
- * be called safely on it. The initial value of the output must be specified.
- *
- * Return 0 in case of success, else an error code.
- */
-int gpiod_direction_output(struct gpio_desc *desc, int value)
+static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
unsigned long flags;
struct gpio_chip *chip;
int status = -EINVAL;
int offset;
- if (!desc || !desc->chip) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
-
/* GPIOs used for IRQs shall not be set as output */
if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) {
gpiod_err(desc,
@@ -1840,6 +2086,50 @@ fail:
gpiod_dbg(desc, "%s: gpio status %d\n", __func__, status);
return status;
}
+
+/**
+ * gpiod_direction_output_raw - set the GPIO direction to output
+ * @desc: GPIO to set to output
+ * @value: initial output value of the GPIO
+ *
+ * Set the direction of the passed GPIO to output, such as gpiod_set_value() can
+ * be called safely on it. The initial value of the output must be specified
+ * as raw value on the physical line without regard for the ACTIVE_LOW status.
+ *
+ * Return 0 in case of success, else an error code.
+ */
+int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
+{
+ if (!desc || !desc->chip) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
+ return _gpiod_direction_output_raw(desc, value);
+}
+EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
+
+/**
+ * gpiod_direction_output - set the GPIO direction to output
+ * @desc: GPIO to set to output
+ * @value: initial output value of the GPIO
+ *
+ * Set the direction of the passed GPIO to output, such as gpiod_set_value() can
+ * be called safely on it. The initial value of the output must be specified
+ * as the logical value of the GPIO, i.e. taking its ACTIVE_LOW status into
+ * account.
+ *
+ * Return 0 in case of success, else an error code.
+ */
+int gpiod_direction_output(struct gpio_desc *desc, int value)
+{
+ if (!desc || !desc->chip) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+ return _gpiod_direction_output_raw(desc, value);
+}
EXPORT_SYMBOL_GPL(gpiod_direction_output);
/**
@@ -1928,15 +2218,15 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
* that the GPIO was actually requested.
*/
-static int _gpiod_get_raw_value(const struct gpio_desc *desc)
+static bool _gpiod_get_raw_value(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
- int value;
+ bool value;
int offset;
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
- value = chip->get ? chip->get(chip, offset) : 0;
+ value = chip->get ? chip->get(chip, offset) : false;
trace_gpio_value(desc_to_gpio(desc), 1, value);
return value;
}
@@ -1992,7 +2282,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_value);
* @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherise it will set to LOW.
*/
-static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value)
+static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value)
{
int err = 0;
struct gpio_chip *chip = desc->chip;
@@ -2019,7 +2309,7 @@ static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value)
* @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherise it will set to LOW.
*/
-static void _gpio_set_open_source_value(struct gpio_desc *desc, int value)
+static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value)
{
int err = 0;
struct gpio_chip *chip = desc->chip;
@@ -2041,7 +2331,7 @@ static void _gpio_set_open_source_value(struct gpio_desc *desc, int value)
__func__, err);
}
-static void _gpiod_set_raw_value(struct gpio_desc *desc, int value)
+static void _gpiod_set_raw_value(struct gpio_desc *desc, bool value)
{
struct gpio_chip *chip;
@@ -2137,10 +2427,7 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq);
* @gpio: the GPIO line to lock as used for IRQ
*
* This is used directly by GPIO drivers that want to lock down
- * a certain GPIO line to be used as IRQs, for example in the
- * .to_irq() callback of their gpio_chip, or in the .irq_enable()
- * of its irq_chip implementation if the GPIO is known from that
- * code.
+ * a certain GPIO line to be used for IRQs.
*/
int gpiod_lock_as_irq(struct gpio_desc *desc)
{
@@ -2161,7 +2448,7 @@ EXPORT_SYMBOL_GPL(gpiod_lock_as_irq);
int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
- return gpiod_lock_as_irq(gpiochip_offset_to_desc(chip, offset));
+ return gpiod_lock_as_irq(gpiochip_get_desc(chip, offset));
}
EXPORT_SYMBOL_GPL(gpio_lock_as_irq);
@@ -2183,7 +2470,7 @@ EXPORT_SYMBOL_GPL(gpiod_unlock_as_irq);
void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
- return gpiod_unlock_as_irq(gpiochip_offset_to_desc(chip, offset));
+ return gpiod_unlock_as_irq(gpiochip_get_desc(chip, offset));
}
EXPORT_SYMBOL_GPL(gpio_unlock_as_irq);
@@ -2404,7 +2691,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
return ERR_PTR(-EINVAL);
}
- desc = gpiochip_offset_to_desc(chip, p->chip_hwnum);
+ desc = gpiochip_get_desc(chip, p->chip_hwnum);
*flags = p->flags;
return desc;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 82be586c1f90..cf092941a9fd 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -43,4 +43,7 @@ acpi_get_gpiod_by_index(struct device *dev, int index,
}
#endif
+int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label);
+void gpiochip_free_own_desc(struct gpio_desc *desc);
+
#endif /* GPIOLIB_H */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 8e7fa4dbaed8..d1cc2f613a78 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -199,3 +199,5 @@ source "drivers/gpu/drm/msm/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
source "drivers/gpu/drm/panel/Kconfig"
+
+source "drivers/gpu/drm/bridge/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 292a79d64146..9d25dbbe6771 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -13,7 +13,8 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
- drm_rect.o drm_vma_manager.o drm_flip_work.o
+ drm_rect.o drm_vma_manager.o drm_flip_work.o \
+ drm_plane_helper.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -63,3 +64,4 @@ obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/
obj-y += panel/
+obj-y += bridge/
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index d8e398275ca8..81c34f949dfc 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -478,11 +478,12 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
unsigned i;
bool interlaced;
- drm_framebuffer_reference(crtc->fb);
+ drm_framebuffer_reference(crtc->primary->fb);
interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
- i = armada_drm_crtc_calc_fb(dcrtc->crtc.fb, x, y, regs, interlaced);
+ i = armada_drm_crtc_calc_fb(dcrtc->crtc.primary->fb,
+ x, y, regs, interlaced);
rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
lm = adj->crtc_htotal - adj->crtc_hsync_end;
@@ -567,10 +568,10 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
}
val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
- val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt);
- val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.fb)->mod);
+ val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
+ val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
- if (drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt > CFG_420)
+ if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
val |= CFG_PALETTE_ENA;
if (interlaced)
@@ -608,7 +609,7 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct armada_regs regs[4];
unsigned i;
- i = armada_drm_crtc_calc_fb(crtc->fb, crtc->x, crtc->y, regs,
+ i = armada_drm_crtc_calc_fb(crtc->primary->fb, crtc->x, crtc->y, regs,
dcrtc->interlaced);
armada_reg_queue_end(regs, i);
@@ -616,7 +617,7 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
/* Take a reference to the new fb as we're using it */
- drm_framebuffer_reference(crtc->fb);
+ drm_framebuffer_reference(crtc->primary->fb);
/* Update the base in the CRTC */
armada_drm_crtc_update_regs(dcrtc, regs);
@@ -637,7 +638,7 @@ static void armada_drm_crtc_disable(struct drm_crtc *crtc)
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- armada_drm_crtc_finish_fb(dcrtc, crtc->fb, true);
+ armada_drm_crtc_finish_fb(dcrtc, crtc->primary->fb, true);
/* Power down most RAMs and FIFOs */
writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
@@ -678,6 +679,7 @@ static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
base + LCD_SPU_SRAM_WRDAT);
writel_relaxed(addr | SRAM_WRITE,
base + LCD_SPU_SRAM_CTRL);
+ readl_relaxed(base + LCD_SPU_HWC_OVSA_HPXL_VLN);
addr += 1;
if ((addr & 0x00ff) == 0)
addr += 0xf00;
@@ -904,7 +906,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
int ret;
/* We don't support changing the pixel format */
- if (fb->pixel_format != crtc->fb->pixel_format)
+ if (fb->pixel_format != crtc->primary->fb->pixel_format)
return -EINVAL;
work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -912,7 +914,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
return -ENOMEM;
work->event = event;
- work->old_fb = dcrtc->crtc.fb;
+ work->old_fb = dcrtc->crtc.primary->fb;
i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
dcrtc->interlaced);
@@ -941,7 +943,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
* will _not_ drop that reference on successful return from this
* function. Simply mark this new framebuffer as the current one.
*/
- dcrtc->crtc.fb = fb;
+ dcrtc->crtc.primary->fb = fb;
/*
* Finally, if the display is blanked, we won't receive an
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index cca063b11083..a4afdc8bb578 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -81,7 +81,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
u32 hborder, vborder;
- switch (crtc->fb->bits_per_pixel) {
+ switch (crtc->primary->fb->bits_per_pixel) {
case 8:
vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
color_index = VGAModeIndex - 1;
@@ -176,7 +176,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->fb->bits_per_pixel);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->primary->fb->bits_per_pixel);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
@@ -340,7 +340,7 @@ static void ast_set_offset_reg(struct drm_crtc *crtc)
u16 offset;
- offset = crtc->fb->pitches[0] >> 3;
+ offset = crtc->primary->fb->pitches[0] >> 3;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
}
@@ -365,7 +365,7 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
struct ast_private *ast = crtc->dev->dev_private;
u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
- switch (crtc->fb->bits_per_pixel) {
+ switch (crtc->primary->fb->bits_per_pixel) {
case 8:
jregA0 = 0x70;
jregA3 = 0x01;
@@ -418,7 +418,7 @@ static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mo
static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
- switch (crtc->fb->bits_per_pixel) {
+ switch (crtc->primary->fb->bits_per_pixel) {
case 8:
break;
default:
@@ -490,7 +490,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
ast_bo_unreserve(bo);
}
- ast_fb = to_ast_framebuffer(crtc->fb);
+ ast_fb = to_ast_framebuffer(crtc->primary->fb);
obj = ast_fb->obj;
bo = gem_to_ast_bo(obj);
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 4ea9b17ac17a..b8246227bab0 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -259,7 +259,9 @@ int ast_mm_init(struct ast_private *ast)
ret = ttm_bo_device_init(&ast->ttm.bdev,
ast->ttm.bo_global_ref.ref.object,
- &ast_bo_driver, DRM_FILE_PAGE_OFFSET,
+ &ast_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -324,7 +326,6 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
}
astbo->bo.bdev = &ast->ttm.bdev;
- astbo->bo.bdev->dev_mapping = dev->dev_mapping;
ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 62ec7d4b3816..dcf2e55f4ae9 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -62,10 +62,10 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
}
}
- if (WARN_ON(crtc->fb == NULL))
+ if (WARN_ON(crtc->primary->fb == NULL))
return -EINVAL;
- bochs_fb = to_bochs_framebuffer(crtc->fb);
+ bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
bo = gem_to_bochs_bo(bochs_fb->obj);
ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
if (ret)
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index ce6858765b37..f488be55d650 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -225,7 +225,9 @@ int bochs_mm_init(struct bochs_device *bochs)
ret = ttm_bo_device_init(&bochs->ttm.bdev,
bochs->ttm.bo_global_ref.ref.object,
- &bochs_bo_driver, DRM_FILE_PAGE_OFFSET,
+ &bochs_bo_driver,
+ bochs->dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -359,7 +361,7 @@ static int bochs_bo_create(struct drm_device *dev, int size, int align,
}
bochsbo->bo.bdev = &bochs->ttm.bdev;
- bochsbo->bo.bdev->dev_mapping = dev->dev_mapping;
+ bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
new file mode 100644
index 000000000000..884923f982d9
--- /dev/null
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -0,0 +1,5 @@
+config DRM_PTN3460
+ tristate "PTN3460 DP/LVDS bridge"
+ depends on DRM
+ select DRM_KMS_HELPER
+ ---help---
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
new file mode 100644
index 000000000000..b4733e1fbd2e
--- /dev/null
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -Iinclude/drm
+
+obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
new file mode 100644
index 000000000000..b171901a3553
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -0,0 +1,350 @@
+/*
+ * NXP PTN3460 DP/LVDS bridge driver
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#include "bridge/ptn3460.h"
+
+#define PTN3460_EDID_ADDR 0x0
+#define PTN3460_EDID_EMULATION_ADDR 0x84
+#define PTN3460_EDID_ENABLE_EMULATION 0
+#define PTN3460_EDID_EMULATION_SELECTION 1
+#define PTN3460_EDID_SRAM_LOAD_ADDR 0x85
+
+struct ptn3460_bridge {
+ struct drm_connector connector;
+ struct i2c_client *client;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+ struct edid *edid;
+ int gpio_pd_n;
+ int gpio_rst_n;
+ u32 edid_emulation;
+ bool enabled;
+};
+
+static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr,
+ u8 *buf, int len)
+{
+ int ret;
+
+ ret = i2c_master_send(ptn_bridge->client, &addr, 1);
+ if (ret <= 0) {
+ DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_master_recv(ptn_bridge->client, buf, len);
+ if (ret <= 0) {
+ DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr,
+ char val)
+{
+ int ret;
+ char buf[2];
+
+ buf[0] = addr;
+ buf[1] = val;
+
+ ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf));
+ if (ret <= 0) {
+ DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge)
+{
+ int ret;
+ char val;
+
+ /* Load the selected edid into SRAM (accessed at PTN3460_EDID_ADDR) */
+ ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_SRAM_LOAD_ADDR,
+ ptn_bridge->edid_emulation);
+ if (ret) {
+ DRM_ERROR("Failed to transfer edid to sram, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Enable EDID emulation and select the desired EDID */
+ val = 1 << PTN3460_EDID_ENABLE_EMULATION |
+ ptn_bridge->edid_emulation << PTN3460_EDID_EMULATION_SELECTION;
+
+ ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_EMULATION_ADDR, val);
+ if (ret) {
+ DRM_ERROR("Failed to write edid value, ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ptn3460_pre_enable(struct drm_bridge *bridge)
+{
+ struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
+ int ret;
+
+ if (ptn_bridge->enabled)
+ return;
+
+ if (gpio_is_valid(ptn_bridge->gpio_pd_n))
+ gpio_set_value(ptn_bridge->gpio_pd_n, 1);
+
+ if (gpio_is_valid(ptn_bridge->gpio_rst_n)) {
+ gpio_set_value(ptn_bridge->gpio_rst_n, 0);
+ udelay(10);
+ gpio_set_value(ptn_bridge->gpio_rst_n, 1);
+ }
+
+ /*
+ * There's a bug in the PTN chip where it falsely asserts hotplug before
+ * it is fully functional. We're forced to wait for the maximum start up
+ * time specified in the chip's datasheet to make sure we're really up.
+ */
+ msleep(90);
+
+ ret = ptn3460_select_edid(ptn_bridge);
+ if (ret)
+ DRM_ERROR("Select edid failed ret=%d\n", ret);
+
+ ptn_bridge->enabled = true;
+}
+
+static void ptn3460_enable(struct drm_bridge *bridge)
+{
+}
+
+static void ptn3460_disable(struct drm_bridge *bridge)
+{
+ struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
+
+ if (!ptn_bridge->enabled)
+ return;
+
+ ptn_bridge->enabled = false;
+
+ if (gpio_is_valid(ptn_bridge->gpio_rst_n))
+ gpio_set_value(ptn_bridge->gpio_rst_n, 1);
+
+ if (gpio_is_valid(ptn_bridge->gpio_pd_n))
+ gpio_set_value(ptn_bridge->gpio_pd_n, 0);
+}
+
+static void ptn3460_post_disable(struct drm_bridge *bridge)
+{
+}
+
+void ptn3460_bridge_destroy(struct drm_bridge *bridge)
+{
+ struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
+
+ drm_bridge_cleanup(bridge);
+ if (gpio_is_valid(ptn_bridge->gpio_pd_n))
+ gpio_free(ptn_bridge->gpio_pd_n);
+ if (gpio_is_valid(ptn_bridge->gpio_rst_n))
+ gpio_free(ptn_bridge->gpio_rst_n);
+ /* Nothing else to free, we've got devm allocated memory */
+}
+
+struct drm_bridge_funcs ptn3460_bridge_funcs = {
+ .pre_enable = ptn3460_pre_enable,
+ .enable = ptn3460_enable,
+ .disable = ptn3460_disable,
+ .post_disable = ptn3460_post_disable,
+ .destroy = ptn3460_bridge_destroy,
+};
+
+int ptn3460_get_modes(struct drm_connector *connector)
+{
+ struct ptn3460_bridge *ptn_bridge;
+ u8 *edid;
+ int ret, num_modes;
+ bool power_off;
+
+ ptn_bridge = container_of(connector, struct ptn3460_bridge, connector);
+
+ if (ptn_bridge->edid)
+ return drm_add_edid_modes(connector, ptn_bridge->edid);
+
+ power_off = !ptn_bridge->enabled;
+ ptn3460_pre_enable(ptn_bridge->bridge);
+
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!edid) {
+ DRM_ERROR("Failed to allocate edid\n");
+ return 0;
+ }
+
+ ret = ptn3460_read_bytes(ptn_bridge, PTN3460_EDID_ADDR, edid,
+ EDID_LENGTH);
+ if (ret) {
+ kfree(edid);
+ num_modes = 0;
+ goto out;
+ }
+
+ ptn_bridge->edid = (struct edid *)edid;
+ drm_mode_connector_update_edid_property(connector, ptn_bridge->edid);
+
+ num_modes = drm_add_edid_modes(connector, ptn_bridge->edid);
+
+out:
+ if (power_off)
+ ptn3460_disable(ptn_bridge->bridge);
+
+ return num_modes;
+}
+
+static int ptn3460_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
+{
+ struct ptn3460_bridge *ptn_bridge;
+
+ ptn_bridge = container_of(connector, struct ptn3460_bridge, connector);
+
+ return ptn_bridge->encoder;
+}
+
+struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
+ .get_modes = ptn3460_get_modes,
+ .mode_valid = ptn3460_mode_valid,
+ .best_encoder = ptn3460_best_encoder,
+};
+
+enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
+ bool force)
+{
+ return connector_status_connected;
+}
+
+void ptn3460_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+struct drm_connector_funcs ptn3460_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = ptn3460_detect,
+ .destroy = ptn3460_connector_destroy,
+};
+
+int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder,
+ struct i2c_client *client, struct device_node *node)
+{
+ int ret;
+ struct drm_bridge *bridge;
+ struct ptn3460_bridge *ptn_bridge;
+
+ bridge = devm_kzalloc(dev->dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ DRM_ERROR("Failed to allocate drm bridge\n");
+ return -ENOMEM;
+ }
+
+ ptn_bridge = devm_kzalloc(dev->dev, sizeof(*ptn_bridge), GFP_KERNEL);
+ if (!ptn_bridge) {
+ DRM_ERROR("Failed to allocate ptn bridge\n");
+ return -ENOMEM;
+ }
+
+ ptn_bridge->client = client;
+ ptn_bridge->encoder = encoder;
+ ptn_bridge->bridge = bridge;
+ ptn_bridge->gpio_pd_n = of_get_named_gpio(node, "powerdown-gpio", 0);
+ if (gpio_is_valid(ptn_bridge->gpio_pd_n)) {
+ ret = gpio_request_one(ptn_bridge->gpio_pd_n,
+ GPIOF_OUT_INIT_HIGH, "PTN3460_PD_N");
+ if (ret) {
+ DRM_ERROR("Request powerdown-gpio failed (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ ptn_bridge->gpio_rst_n = of_get_named_gpio(node, "reset-gpio", 0);
+ if (gpio_is_valid(ptn_bridge->gpio_rst_n)) {
+ /*
+ * Request the reset pin low to avoid the bridge being
+ * initialized prematurely
+ */
+ ret = gpio_request_one(ptn_bridge->gpio_rst_n,
+ GPIOF_OUT_INIT_LOW, "PTN3460_RST_N");
+ if (ret) {
+ DRM_ERROR("Request reset-gpio failed (%d)\n", ret);
+ gpio_free(ptn_bridge->gpio_pd_n);
+ return ret;
+ }
+ }
+
+ ret = of_property_read_u32(node, "edid-emulation",
+ &ptn_bridge->edid_emulation);
+ if (ret) {
+ DRM_ERROR("Can't read edid emulation value\n");
+ goto err;
+ }
+
+ ret = drm_bridge_init(dev, bridge, &ptn3460_bridge_funcs);
+ if (ret) {
+ DRM_ERROR("Failed to initialize bridge with drm\n");
+ goto err;
+ }
+
+ bridge->driver_private = ptn_bridge;
+ encoder->bridge = bridge;
+
+ ret = drm_connector_init(dev, &ptn_bridge->connector,
+ &ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ goto err;
+ }
+ drm_connector_helper_add(&ptn_bridge->connector,
+ &ptn3460_connector_helper_funcs);
+ drm_sysfs_connector_add(&ptn_bridge->connector);
+ drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder);
+
+ return 0;
+
+err:
+ if (gpio_is_valid(ptn_bridge->gpio_pd_n))
+ gpio_free(ptn_bridge->gpio_pd_n);
+ if (gpio_is_valid(ptn_bridge->gpio_rst_n))
+ gpio_free(ptn_bridge->gpio_rst_n);
+ return ret;
+}
+EXPORT_SYMBOL(ptn3460_init);
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 530f78f84dee..2d64aea83df2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -149,7 +149,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
cirrus_bo_unreserve(bo);
}
- cirrus_fb = to_cirrus_framebuffer(crtc->fb);
+ cirrus_fb = to_cirrus_framebuffer(crtc->primary->fb);
obj = cirrus_fb->obj;
bo = gem_to_cirrus_bo(obj);
@@ -268,7 +268,7 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
sr07 = RREG8(SEQ_DATA);
sr07 &= 0xe0;
hdr = 0;
- switch (crtc->fb->bits_per_pixel) {
+ switch (crtc->primary->fb->bits_per_pixel) {
case 8:
sr07 |= 0x11;
break;
@@ -291,13 +291,13 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
WREG_SEQ(0x7, sr07);
/* Program the pitch */
- tmp = crtc->fb->pitches[0] / 8;
+ tmp = crtc->primary->fb->pitches[0] / 8;
WREG_CRT(VGA_CRTC_OFFSET, tmp);
/* Enable extended blanking and pitch bits, and enable full memory */
tmp = 0x22;
- tmp |= (crtc->fb->pitches[0] >> 7) & 0x10;
- tmp |= (crtc->fb->pitches[0] >> 6) & 0x40;
+ tmp |= (crtc->primary->fb->pitches[0] >> 7) & 0x10;
+ tmp |= (crtc->primary->fb->pitches[0] >> 6) & 0x40;
WREG_CRT(0x1b, tmp);
/* Enable high-colour modes */
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 8b37c25ff9bd..92e6b7786097 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -259,7 +259,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
ret = ttm_bo_device_init(&cirrus->ttm.bdev,
cirrus->ttm.bo_global_ref.ref.object,
- &cirrus_bo_driver, DRM_FILE_PAGE_OFFSET,
+ &cirrus_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -329,7 +331,6 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
}
cirrusbo->bo.bdev = &cirrus->ttm.bdev;
- cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index bb8f58012189..534cb89b160d 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -32,6 +32,12 @@
#include <drm/drmP.h>
#if defined(CONFIG_X86)
+
+/*
+ * clflushopt is an unordered instruction which needs fencing with mfence or
+ * sfence to avoid ordering issues. For drm_clflush_page this fencing happens
+ * in the caller.
+ */
static void
drm_clflush_page(struct page *page)
{
@@ -44,7 +50,7 @@ drm_clflush_page(struct page *page)
page_virtual = kmap_atomic(page);
for (i = 0; i < PAGE_SIZE; i += size)
- clflush(page_virtual + i);
+ clflushopt(page_virtual + i);
kunmap_atomic(page_virtual);
}
@@ -133,7 +139,7 @@ drm_clflush_virt_range(char *addr, unsigned long length)
mb();
for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
clflush(addr);
- clflush(end - 1);
+ clflushopt(end - 1);
mb();
return;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3b7d32da1604..d8b7099abece 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -38,12 +38,15 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+#include "drm_crtc_internal.h"
+
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
*
* This function takes all modeset locks, suitable where a more fine-grained
- * scheme isn't (yet) implemented.
+ * scheme isn't (yet) implemented. Locks must be dropped with
+ * drm_modeset_unlock_all.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
@@ -59,6 +62,8 @@ EXPORT_SYMBOL(drm_modeset_lock_all);
/**
* drm_modeset_unlock_all - drop all modeset locks
* @dev: device
+ *
+ * This function drop all modeset locks taken by drm_modeset_lock_all.
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
@@ -74,6 +79,8 @@ EXPORT_SYMBOL(drm_modeset_unlock_all);
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @dev: device
+ *
+ * Useful as a debug assert.
*/
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
{
@@ -114,6 +121,13 @@ static const struct drm_prop_enum_list drm_dpms_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+static const struct drm_prop_enum_list drm_plane_type_enum_list[] =
+{
+ { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
+ { DRM_PLANE_TYPE_PRIMARY, "Primary" },
+ { DRM_PLANE_TYPE_CURSOR, "Cursor" },
+};
+
/*
* Optional properties
*/
@@ -215,6 +229,16 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_DSI, "DSI" },
};
+static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
+{
+ { SubPixelUnknown, "Unknown" },
+ { SubPixelHorizontalRGB, "Horizontal RGB" },
+ { SubPixelHorizontalBGR, "Horizontal BGR" },
+ { SubPixelVerticalRGB, "Vertical RGB" },
+ { SubPixelVerticalBGR, "Vertical BGR" },
+ { SubPixelNone, "None" },
+};
+
void drm_connector_ida_init(void)
{
int i;
@@ -231,6 +255,15 @@ void drm_connector_ida_destroy(void)
ida_destroy(&drm_connector_enum_list[i].ida);
}
+/**
+ * drm_get_encoder_name - return a string for encoder
+ * @encoder: encoder to compute name of
+ *
+ * Note that the buffer used by this function is globally shared and owned by
+ * the function itself.
+ *
+ * FIXME: This isn't really multithreading safe.
+ */
const char *drm_get_encoder_name(const struct drm_encoder *encoder)
{
static char buf[32];
@@ -242,6 +275,15 @@ const char *drm_get_encoder_name(const struct drm_encoder *encoder)
}
EXPORT_SYMBOL(drm_get_encoder_name);
+/**
+ * drm_get_connector_name - return a string for connector
+ * @connector: connector to compute name of
+ *
+ * Note that the buffer used by this function is globally shared and owned by
+ * the function itself.
+ *
+ * FIXME: This isn't really multithreading safe.
+ */
const char *drm_get_connector_name(const struct drm_connector *connector)
{
static char buf[32];
@@ -253,6 +295,13 @@ const char *drm_get_connector_name(const struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_get_connector_name);
+/**
+ * drm_get_connector_status_name - return a string for connector status
+ * @status: connector status to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
const char *drm_get_connector_status_name(enum drm_connector_status status)
{
if (status == connector_status_connected)
@@ -264,11 +313,33 @@ const char *drm_get_connector_status_name(enum drm_connector_status status)
}
EXPORT_SYMBOL(drm_get_connector_status_name);
+/**
+ * drm_get_subpixel_order_name - return a string for a given subpixel enum
+ * @order: enum of subpixel_order
+ *
+ * Note you could abuse this and return something out of bounds, but that
+ * would be a caller error. No unscrubbed user data should make it here.
+ */
+const char *drm_get_subpixel_order_name(enum subpixel_order order)
+{
+ return drm_subpixel_enum_list[order].name;
+}
+EXPORT_SYMBOL(drm_get_subpixel_order_name);
+
static char printable_char(int c)
{
return isascii(c) && isprint(c) ? c : '?';
}
+/**
+ * drm_get_format_name - return a string for drm fourcc format
+ * @format: format to compute name of
+ *
+ * Note that the buffer used by this function is globally shared and owned by
+ * the function itself.
+ *
+ * FIXME: This isn't really multithreading safe.
+ */
const char *drm_get_format_name(uint32_t format)
{
static char buf[32];
@@ -293,14 +364,16 @@ EXPORT_SYMBOL(drm_get_format_name);
* @obj_type: object type
*
* Create a unique identifier based on @ptr in @dev's identifier space. Used
- * for tracking modes, CRTCs and connectors.
+ * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
+ * modeset identifiers are _not_ reference counted. Hence don't use this for
+ * reference counted modeset objects like framebuffers.
*
- * RETURNS:
+ * Returns:
* New unique (relative to other objects in @dev) integer identifier for the
* object.
*/
-static int drm_mode_object_get(struct drm_device *dev,
- struct drm_mode_object *obj, uint32_t obj_type)
+int drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type)
{
int ret;
@@ -324,10 +397,12 @@ static int drm_mode_object_get(struct drm_device *dev,
* @dev: DRM device
* @object: object to free
*
- * Free @id from @dev's unique identifier pool.
+ * Free @id from @dev's unique identifier pool. Note that despite the _get
+ * postfix modeset identifiers are _not_ reference counted. Hence don't use this
+ * for reference counted modeset objects like framebuffers.
*/
-static void drm_mode_object_put(struct drm_device *dev,
- struct drm_mode_object *object)
+void drm_mode_object_put(struct drm_device *dev,
+ struct drm_mode_object *object)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_remove(&dev->mode_config.crtc_idr, object->id);
@@ -377,7 +452,7 @@ EXPORT_SYMBOL(drm_mode_object_find);
* since all the fb attributes are invariant over its lifetime, no further
* locking but only correct reference counting is required.
*
- * RETURNS:
+ * Returns:
* Zero on success, error code on failure.
*/
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
@@ -438,7 +513,7 @@ static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
*
* If successful, this grabs an additional reference to the framebuffer -
* callers need to make sure to eventually unreference the returned framebuffer
- * again.
+ * again, using @drm_framebuffer_unreference.
*/
struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
uint32_t id)
@@ -471,6 +546,8 @@ EXPORT_SYMBOL(drm_framebuffer_unreference);
/**
* drm_framebuffer_reference - incr the fb refcnt
* @fb: framebuffer
+ *
+ * This functions increments the fb's refcount.
*/
void drm_framebuffer_reference(struct drm_framebuffer *fb)
{
@@ -527,8 +604,9 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
* drm_framebuffer_cleanup - remove a framebuffer object
* @fb: framebuffer to remove
*
- * Cleanup references to a user-created framebuffer. This function is intended
- * to be used from the drivers ->destroy callback.
+ * Cleanup framebuffer. This function is intended to be used from the drivers
+ * ->destroy callback. It can also be used to clean up driver private
+ * framebuffers embedded into a larger structure.
*
* Note that this function does not remove the fb from active usuage - if it is
* still used anywhere, hilarity can ensue since userspace could call getfb on
@@ -591,7 +669,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
drm_modeset_lock_all(dev);
/* remove from any CRTC */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb == fb) {
+ if (crtc->primary->fb == fb) {
/* should turn off the crtc */
memset(&set, 0, sizeof(struct drm_mode_set));
set.crtc = crtc;
@@ -614,18 +692,23 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
EXPORT_SYMBOL(drm_framebuffer_remove);
/**
- * drm_crtc_init - Initialise a new CRTC object
+ * drm_crtc_init_with_planes - Initialise a new CRTC object with
+ * specified primary and cursor planes.
* @dev: DRM device
* @crtc: CRTC object to init
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC
* @funcs: callbacks for the new CRTC
*
* Inits a new object created as base part of a driver crtc object.
*
- * RETURNS:
+ * Returns:
* Zero on success, error code on failure.
*/
-int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs)
+int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ void *cursor,
+ const struct drm_crtc_funcs *funcs)
{
int ret;
@@ -646,12 +729,16 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
dev->mode_config.num_crtc++;
+ crtc->primary = primary;
+ if (primary)
+ primary->possible_crtcs = 1 << drm_crtc_index(crtc);
+
out:
drm_modeset_unlock_all(dev);
return ret;
}
-EXPORT_SYMBOL(drm_crtc_init);
+EXPORT_SYMBOL(drm_crtc_init_with_planes);
/**
* drm_crtc_cleanup - Clean up the core crtc usage
@@ -697,20 +784,6 @@ unsigned int drm_crtc_index(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_index);
-/**
- * drm_mode_probed_add - add a mode to a connector's probed mode list
- * @connector: connector the new mode
- * @mode: mode data
- *
- * Add @mode to @connector's mode list for later use.
- */
-void drm_mode_probed_add(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- list_add_tail(&mode->head, &connector->probed_modes);
-}
-EXPORT_SYMBOL(drm_mode_probed_add);
-
/*
* drm_mode_remove - remove and free a mode
* @connector: connector list to modify
@@ -735,7 +808,7 @@ static void drm_mode_remove(struct drm_connector *connector,
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
*
- * RETURNS:
+ * Returns:
* Zero on success, error code on failure.
*/
int drm_connector_init(struct drm_device *dev,
@@ -813,6 +886,14 @@ void drm_connector_cleanup(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_connector_cleanup);
+/**
+ * drm_connector_unplug_all - unregister connector userspace interfaces
+ * @dev: drm device
+ *
+ * This function unregisters all connector userspace interfaces in sysfs. Should
+ * be call when the device is disconnected, e.g. from an usb driver's
+ * ->disconnect callback.
+ */
void drm_connector_unplug_all(struct drm_device *dev)
{
struct drm_connector *connector;
@@ -824,6 +905,18 @@ void drm_connector_unplug_all(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_connector_unplug_all);
+/**
+ * drm_bridge_init - initialize a drm transcoder/bridge
+ * @dev: drm device
+ * @bridge: transcoder/bridge to set up
+ * @funcs: bridge function table
+ *
+ * Initialises a preallocated bridge. Bridges should be
+ * subclassed as part of driver connector objects.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
const struct drm_bridge_funcs *funcs)
{
@@ -847,6 +940,12 @@ int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
}
EXPORT_SYMBOL(drm_bridge_init);
+/**
+ * drm_bridge_cleanup - cleans up an initialised bridge
+ * @bridge: bridge to cleanup
+ *
+ * Cleans up the bridge but doesn't free the object.
+ */
void drm_bridge_cleanup(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
@@ -859,6 +958,19 @@ void drm_bridge_cleanup(struct drm_bridge *bridge)
}
EXPORT_SYMBOL(drm_bridge_cleanup);
+/**
+ * drm_encoder_init - Init a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder
+ * @encoder_type: user visible type of the encoder
+ *
+ * Initialises a preallocated encoder. Encoder should be
+ * subclassed as part of driver encoder objects.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
@@ -886,6 +998,12 @@ int drm_encoder_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_encoder_init);
+/**
+ * drm_encoder_cleanup - cleans up an initialised encoder
+ * @encoder: encoder to cleanup
+ *
+ * Cleans up the encoder but doesn't free the object.
+ */
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -898,25 +1016,25 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
EXPORT_SYMBOL(drm_encoder_cleanup);
/**
- * drm_plane_init - Initialise a new plane object
+ * drm_universal_plane_init - Initialize a new universal plane object
* @dev: DRM device
* @plane: plane object to init
* @possible_crtcs: bitmask of possible CRTCs
* @funcs: callbacks for the new plane
* @formats: array of supported formats (%DRM_FORMAT_*)
* @format_count: number of elements in @formats
- * @priv: plane is private (hidden from userspace)?
+ * @type: type of plane (overlay, primary, cursor)
*
- * Inits a new object created as base part of a driver plane object.
+ * Initializes a plane object of type @type.
*
- * RETURNS:
+ * Returns:
* Zero on success, error code on failure.
*/
-int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, uint32_t format_count,
- bool priv)
+int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ enum drm_plane_type type)
{
int ret;
@@ -941,23 +1059,53 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
plane->format_count = format_count;
plane->possible_crtcs = possible_crtcs;
+ plane->type = type;
- /* private planes are not exposed to userspace, but depending on
- * display hardware, might be convenient to allow sharing programming
- * for the scanout engine with the crtc implementation.
- */
- if (!priv) {
- list_add_tail(&plane->head, &dev->mode_config.plane_list);
- dev->mode_config.num_plane++;
- } else {
- INIT_LIST_HEAD(&plane->head);
- }
+ list_add_tail(&plane->head, &dev->mode_config.plane_list);
+ dev->mode_config.num_total_plane++;
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+ dev->mode_config.num_overlay_plane++;
+
+ drm_object_attach_property(&plane->base,
+ dev->mode_config.plane_type_property,
+ plane->type);
out:
drm_modeset_unlock_all(dev);
return ret;
}
+EXPORT_SYMBOL(drm_universal_plane_init);
+
+/**
+ * drm_plane_init - Initialize a legacy plane
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @format_count: number of elements in @formats
+ * @is_primary: plane type (primary vs overlay)
+ *
+ * Legacy API to initialize a DRM plane.
+ *
+ * New drivers should call drm_universal_plane_init() instead.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool is_primary)
+{
+ enum drm_plane_type type;
+
+ type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+ formats, format_count, type);
+}
EXPORT_SYMBOL(drm_plane_init);
/**
@@ -975,11 +1123,13 @@ void drm_plane_cleanup(struct drm_plane *plane)
drm_modeset_lock_all(dev);
kfree(plane->format_types);
drm_mode_object_put(dev, &plane->base);
- /* if not added to a list, it must be a private plane */
- if (!list_empty(&plane->head)) {
- list_del(&plane->head);
- dev->mode_config.num_plane--;
- }
+
+ BUG_ON(list_empty(&plane->head));
+
+ list_del(&plane->head);
+ dev->mode_config.num_total_plane--;
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+ dev->mode_config.num_overlay_plane--;
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_plane_cleanup);
@@ -1010,50 +1160,6 @@ void drm_plane_force_disable(struct drm_plane *plane)
}
EXPORT_SYMBOL(drm_plane_force_disable);
-/**
- * drm_mode_create - create a new display mode
- * @dev: DRM device
- *
- * Create a new drm_display_mode, give it an ID, and return it.
- *
- * RETURNS:
- * Pointer to new mode on success, NULL on error.
- */
-struct drm_display_mode *drm_mode_create(struct drm_device *dev)
-{
- struct drm_display_mode *nmode;
-
- nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
- if (!nmode)
- return NULL;
-
- if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
- kfree(nmode);
- return NULL;
- }
-
- return nmode;
-}
-EXPORT_SYMBOL(drm_mode_create);
-
-/**
- * drm_mode_destroy - remove a mode
- * @dev: DRM device
- * @mode: mode to remove
- *
- * Free @mode's unique identifier, then free it.
- */
-void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
-{
- if (!mode)
- return;
-
- drm_mode_object_put(dev, &mode->base);
-
- kfree(mode);
-}
-EXPORT_SYMBOL(drm_mode_destroy);
-
static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
{
struct drm_property *edid;
@@ -1075,6 +1181,21 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
return 0;
}
+static int drm_mode_create_standard_plane_properties(struct drm_device *dev)
+{
+ struct drm_property *type;
+
+ /*
+ * Standard properties (apply to all planes)
+ */
+ type = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "type", drm_plane_type_enum_list,
+ ARRAY_SIZE(drm_plane_type_enum_list));
+ dev->mode_config.plane_type_property = type;
+
+ return 0;
+}
+
/**
* drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
* @dev: DRM device
@@ -1257,6 +1378,10 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
return 0;
}
+/*
+ * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
+ * the drm core's responsibility to set up mode control groups.
+ */
int drm_mode_group_init_legacy_group(struct drm_device *dev,
struct drm_mode_group *group)
{
@@ -1333,7 +1458,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
static int drm_crtc_convert_umode(struct drm_display_mode *out,
@@ -1376,7 +1501,7 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getresources(struct drm_device *dev, void *data,
@@ -1429,9 +1554,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
mutex_unlock(&file_priv->fbs_lock);
drm_modeset_lock_all(dev);
- mode_group = &file_priv->master->minor->mode_group;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+ if (!drm_is_primary_client(file_priv)) {
+ mode_group = NULL;
list_for_each(lh, &dev->mode_config.crtc_list)
crtc_count++;
@@ -1442,6 +1567,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
encoder_count++;
} else {
+ mode_group = &file_priv->master->minor->mode_group;
crtc_count = mode_group->num_crtcs;
connector_count = mode_group->num_connectors;
encoder_count = mode_group->num_encoders;
@@ -1456,7 +1582,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_crtcs >= crtc_count) {
copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+ if (!mode_group) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
head) {
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
@@ -1483,7 +1609,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_encoders >= encoder_count) {
copied = 0;
encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+ if (!mode_group) {
list_for_each_entry(encoder,
&dev->mode_config.encoder_list,
head) {
@@ -1514,7 +1640,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_connectors >= connector_count) {
copied = 0;
connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+ if (!mode_group) {
list_for_each_entry(connector,
&dev->mode_config.connector_list,
head) {
@@ -1561,7 +1687,7 @@ out:
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getcrtc(struct drm_device *dev,
@@ -1588,8 +1714,8 @@ int drm_mode_getcrtc(struct drm_device *dev,
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
crtc_resp->gamma_size = crtc->gamma_size;
- if (crtc->fb)
- crtc_resp->fb_id = crtc->fb->base.id;
+ if (crtc->primary->fb)
+ crtc_resp->fb_id = crtc->primary->fb->base.id;
else
crtc_resp->fb_id = 0;
@@ -1630,7 +1756,7 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getconnector(struct drm_device *dev, void *data,
@@ -1765,6 +1891,19 @@ out:
return ret;
}
+/**
+ * drm_mode_getencoder - get encoder configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Construct a encoder configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1800,21 +1939,27 @@ out:
}
/**
- * drm_mode_getplane_res - get plane info
+ * drm_mode_getplane_res - enumerate all plane resources
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
- * Return an plane count and set of IDs.
+ * Construct a list of plane ids to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_mode_get_plane_res *plane_resp = data;
struct drm_mode_config *config;
struct drm_plane *plane;
uint32_t __user *plane_ptr;
int copied = 0, ret = 0;
+ unsigned num_planes;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -1822,15 +1967,28 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
config = &dev->mode_config;
+ if (file_priv->universal_planes)
+ num_planes = config->num_total_plane;
+ else
+ num_planes = config->num_overlay_plane;
+
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
- if (config->num_plane &&
- (plane_resp->count_planes >= config->num_plane)) {
+ if (num_planes &&
+ (plane_resp->count_planes >= num_planes)) {
plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
list_for_each_entry(plane, &config->plane_list, head) {
+ /*
+ * Unless userspace set the 'universal planes'
+ * capability bit, only advertise overlays.
+ */
+ if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
+ !file_priv->universal_planes)
+ continue;
+
if (put_user(plane->base.id, plane_ptr + copied)) {
ret = -EFAULT;
goto out;
@@ -1838,7 +1996,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
copied++;
}
}
- plane_resp->count_planes = config->num_plane;
+ plane_resp->count_planes = num_planes;
out:
drm_modeset_unlock_all(dev);
@@ -1846,16 +2004,20 @@ out:
}
/**
- * drm_mode_getplane - get plane info
+ * drm_mode_getplane - get plane configuration
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
- * Return plane info, including formats supported, gamma size, any
- * current fb, etc.
+ * Construct a plane configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
*/
int drm_mode_getplane(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_mode_get_plane *plane_resp = data;
struct drm_mode_object *obj;
@@ -1911,16 +2073,19 @@ out:
}
/**
- * drm_mode_setplane - set up or tear down an plane
+ * drm_mode_setplane - configure a plane's configuration
* @dev: DRM device
* @data: ioctl data*
* @file_priv: DRM file info
*
- * Set plane info, including placement, fb, scaling, and other factors.
+ * Set plane configuration, including placement, fb, scaling, and other factors.
* Or pass a NULL fb to disable.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
*/
int drm_mode_setplane(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_mode_set_plane *plane_req = data;
struct drm_mode_object *obj;
@@ -2050,6 +2215,9 @@ out:
*
* This is a little helper to wrap internal calls to the ->set_config driver
* interface. The only thing it adds is correct refcounting dance.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
*/
int drm_mode_set_config_internal(struct drm_mode_set *set)
{
@@ -2064,19 +2232,21 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
* crtcs. Atomic modeset will have saner semantics ...
*/
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
- tmp->old_fb = tmp->fb;
+ tmp->old_fb = tmp->primary->fb;
fb = set->fb;
ret = crtc->funcs->set_config(set);
if (ret == 0) {
+ crtc->primary->crtc = crtc;
+
/* crtc->fb must be updated by ->set_config, enforces this. */
- WARN_ON(fb != crtc->fb);
+ WARN_ON(fb != crtc->primary->fb);
}
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
- if (tmp->fb)
- drm_framebuffer_reference(tmp->fb);
+ if (tmp->primary->fb)
+ drm_framebuffer_reference(tmp->primary->fb);
if (tmp->old_fb)
drm_framebuffer_unreference(tmp->old_fb);
}
@@ -2085,14 +2255,19 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
}
EXPORT_SYMBOL(drm_mode_set_config_internal);
-/*
- * Checks that the framebuffer is big enough for the CRTC viewport
- * (x, y, hdisplay, vdisplay)
+/**
+ * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
+ * CRTC viewport
+ * @crtc: CRTC that framebuffer will be displayed on
+ * @x: x panning
+ * @y: y panning
+ * @mode: mode that framebuffer will be displayed under
+ * @fb: framebuffer to check size of
*/
-static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
- int x, int y,
- const struct drm_display_mode *mode,
- const struct drm_framebuffer *fb)
+int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+ int x, int y,
+ const struct drm_display_mode *mode,
+ const struct drm_framebuffer *fb)
{
int hdisplay, vdisplay;
@@ -2123,6 +2298,7 @@ static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
return 0;
}
+EXPORT_SYMBOL(drm_crtc_check_viewport);
/**
* drm_mode_setcrtc - set CRTC configuration
@@ -2134,7 +2310,7 @@ static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_setcrtc(struct drm_device *dev, void *data,
@@ -2174,12 +2350,12 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
- if (!crtc->fb) {
+ if (!crtc->primary->fb) {
DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
ret = -EINVAL;
goto out;
}
- fb = crtc->fb;
+ fb = crtc->primary->fb;
/* Make refcounting symmetric with the lookup path. */
drm_framebuffer_reference(fb);
} else {
@@ -2336,8 +2512,23 @@ out:
return ret;
}
+
+
+/**
+ * drm_mode_cursor_ioctl - set CRTC's cursor configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Set the cursor configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_cursor_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+ void *data, struct drm_file *file_priv)
{
struct drm_mode_cursor *req = data;
struct drm_mode_cursor2 new_req;
@@ -2348,6 +2539,21 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
return drm_mode_cursor_common(dev, &new_req, file_priv);
}
+/**
+ * drm_mode_cursor2_ioctl - set CRTC's cursor configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Set the cursor configuration based on user request. This implements the 2nd
+ * version of the cursor ioctl, which allows userspace to additionally specify
+ * the hotspot of the pointer.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_cursor2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -2355,7 +2561,14 @@ int drm_mode_cursor2_ioctl(struct drm_device *dev,
return drm_mode_cursor_common(dev, req, file_priv);
}
-/* Original addfb only supported RGB formats, so figure out which one */
+/**
+ * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
+ * @bpp: bits per pixels
+ * @depth: bit depth per pixel
+ *
+ * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
+ * Useful in fbdev emulation code, since that deals in those values.
+ */
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
{
uint32_t fmt;
@@ -2397,11 +2610,12 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
- * Add a new FB to the specified CRTC, given a user request.
+ * Add a new FB to the specified CRTC, given a user request. This is the
+ * original addfb ioclt which only supported RGB formats.
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_addfb(struct drm_device *dev,
@@ -2574,11 +2788,13 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
- * Add a new FB to the specified CRTC, given a user request with format.
+ * Add a new FB to the specified CRTC, given a user request with format. This is
+ * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
+ * and uses fourcc codes as pixel format specifiers.
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_addfb2(struct drm_device *dev,
@@ -2638,7 +2854,7 @@ int drm_mode_addfb2(struct drm_device *dev,
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_rmfb(struct drm_device *dev,
@@ -2692,7 +2908,7 @@ fail_lookup:
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getfb(struct drm_device *dev,
@@ -2715,7 +2931,8 @@ int drm_mode_getfb(struct drm_device *dev,
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
if (fb->funcs->create_handle) {
- if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
+ if (file_priv->is_master || capable(CAP_SYS_ADMIN) ||
+ drm_is_control_client(file_priv)) {
ret = fb->funcs->create_handle(fb, file_priv,
&r->handle);
} else {
@@ -2736,6 +2953,25 @@ int drm_mode_getfb(struct drm_device *dev,
return ret;
}
+/**
+ * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB and flush out the damaged area supplied by userspace as a clip
+ * rectangle list. Generic userspace which does frontbuffer rendering must call
+ * this ioctl to flush out the changes on manual-update display outputs, e.g.
+ * usb display-link, mipi manual update panels or edp panel self refresh modes.
+ *
+ * Modesetting drivers which always update the frontbuffer do not need to
+ * implement the corresponding ->dirty framebuffer callback.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -2813,7 +3049,7 @@ out_err1:
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
void drm_fb_release(struct drm_file *priv)
@@ -2837,6 +3073,20 @@ void drm_fb_release(struct drm_file *priv)
mutex_unlock(&priv->fbs_lock);
}
+/**
+ * drm_property_create - create a new property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values)
{
@@ -2875,6 +3125,24 @@ fail:
}
EXPORT_SYMBOL(drm_property_create);
+/**
+ * drm_property_create - create a new enumeration property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property values
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
+ *
+ * Userspace is only allowed to set one of the predefined values for enumeration
+ * properties.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
const char *name,
const struct drm_prop_enum_list *props,
@@ -2903,6 +3171,24 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
}
EXPORT_SYMBOL(drm_property_create_enum);
+/**
+ * drm_property_create - create a new bitmask property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property bitflags
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
+ *
+ * Compared to plain enumeration properties userspace is allowed to set any
+ * or'ed together combination of the predefined property bitflag values
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
@@ -2931,6 +3217,24 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_property_create_bitmask);
+/**
+ * drm_property_create - create a new ranged property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @min: minimum value of the property
+ * @max: maximum value of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
+ *
+ * Userspace is allowed to set any interger value in the (min, max) range
+ * inclusive.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max)
@@ -2950,6 +3254,21 @@ struct drm_property *drm_property_create_range(struct drm_device *dev, int flags
}
EXPORT_SYMBOL(drm_property_create_range);
+/**
+ * drm_property_add_enum - add a possible value to an enumeration property
+ * @property: enumeration property to change
+ * @index: index of the new enumeration
+ * @value: value of the new enumeration
+ * @name: symbolic name of the new enumeration
+ *
+ * This functions adds enumerations to a property.
+ *
+ * It's use is deprecated, drivers should use one of the more specific helpers
+ * to directly create the property with all enumerations already attached.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name)
{
@@ -2989,6 +3308,14 @@ int drm_property_add_enum(struct drm_property *property, int index,
}
EXPORT_SYMBOL(drm_property_add_enum);
+/**
+ * drm_property_destroy - destroy a drm property
+ * @dev: drm device
+ * @property: property to destry
+ *
+ * This function frees a property including any attached resources like
+ * enumeration values.
+ */
void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
{
struct drm_property_enum *prop_enum, *pt;
@@ -3006,6 +3333,16 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
+/**
+ * drm_object_attach_property - attach a property to a modeset object
+ * @obj: drm modeset object
+ * @property: property to attach
+ * @init_val: initial value of the property
+ *
+ * This attaches the given property to the modeset object with the given initial
+ * value. Currently this function cannot fail since the properties are stored in
+ * a statically sized array.
+ */
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
@@ -3026,6 +3363,19 @@ void drm_object_attach_property(struct drm_mode_object *obj,
}
EXPORT_SYMBOL(drm_object_attach_property);
+/**
+ * drm_object_property_set_value - set the value of a property
+ * @obj: drm mode object to set property value for
+ * @property: property to set
+ * @val: value the property should be set to
+ *
+ * This functions sets a given property on a given object. This function only
+ * changes the software state of the property, it does not call into the
+ * driver's ->set_property callback.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t val)
{
@@ -3042,6 +3392,20 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
}
EXPORT_SYMBOL(drm_object_property_set_value);
+/**
+ * drm_object_property_get_value - retrieve the value of a property
+ * @obj: drm mode object to get property value from
+ * @property: property to retrieve
+ * @val: storage for the property value
+ *
+ * This function retrieves the softare state of the given property for the given
+ * property. Since there is no driver callback to retrieve the current property
+ * value this might be out of sync with the hardware, depending upon the driver
+ * and property.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
@@ -3058,6 +3422,19 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
}
EXPORT_SYMBOL(drm_object_property_get_value);
+/**
+ * drm_mode_getproperty_ioctl - get the current value of a connector's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the current value for an connectors's property.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3196,6 +3573,20 @@ static void drm_property_destroy_blob(struct drm_device *dev,
kfree(blob);
}
+/**
+ * drm_mode_getblob_ioctl - get the contents of a blob property value
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the contents of a blob property. The value stored in
+ * an object's blob property is just a normal modeset object id.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3230,6 +3621,17 @@ done:
return ret;
}
+/**
+ * drm_mode_connector_update_edid_property - update the edid property of a connector
+ * @connector: drm connector
+ * @edid: new value of the edid property
+ *
+ * This function creates a new blob modeset object and assigns its id to the
+ * connector's edid property.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
@@ -3287,6 +3689,20 @@ static bool drm_property_change_is_valid(struct drm_property *property,
}
}
+/**
+ * drm_mode_connector_property_set_ioctl - set the current value of a connector property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function sets the current value for a connectors's property. It also
+ * calls into a driver's ->set_property callback to update the hardware state
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3353,6 +3769,21 @@ static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
+/**
+ * drm_mode_getproperty_ioctl - get the current value of a object's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the current value for an object's property. Compared
+ * to the connector specific ioctl this one is extended to also work on crtc and
+ * plane objects.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -3409,6 +3840,22 @@ out:
return ret;
}
+/**
+ * drm_mode_obj_set_property_ioctl - set the current value of an object's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function sets the current value for an object's property. It also calls
+ * into a driver's ->set_property callback to update the hardware state.
+ * Compared to the connector specific ioctl this one is extended to also work on
+ * crtc and plane objects.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -3468,6 +3915,18 @@ out:
return ret;
}
+/**
+ * drm_mode_connector_attach_encoder - attach a connector to an encoder
+ * @connector: connector to attach
+ * @encoder: encoder to attach @connector to
+ *
+ * This function links up a connector to an encoder. Note that the routing
+ * restrictions between encoders and crtcs are exposed to userspace through the
+ * possible_clones and possible_crtcs bitmasks.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
@@ -3483,23 +3942,20 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
-void drm_mode_connector_detach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
-{
- int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == encoder->base.id) {
- connector->encoder_ids[i] = 0;
- if (connector->encoder == encoder)
- connector->encoder = NULL;
- break;
- }
- }
-}
-EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
-
+/**
+ * drm_mode_crtc_set_gamma_size - set the gamma table size
+ * @crtc: CRTC to set the gamma table size for
+ * @gamma_size: size of the gamma table
+ *
+ * Drivers which support gamma tables should set this to the supported gamma
+ * table size when initializing the CRTC. Currently the drm core only supports a
+ * fixed gamma table size.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
- int gamma_size)
+ int gamma_size)
{
crtc->gamma_size = gamma_size;
@@ -3513,6 +3969,20 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
}
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+/**
+ * drm_mode_gamma_set_ioctl - set the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
+ * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3572,6 +4042,21 @@ out:
}
+/**
+ * drm_mode_gamma_get_ioctl - get the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Copy the current gamma table into the storage provided. This also provides
+ * the gamma table size the driver expects, which can be used to size the
+ * allocated storage.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3622,6 +4107,24 @@ out:
return ret;
}
+/**
+ * drm_mode_page_flip_ioctl - schedule an asynchronous fb update
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This schedules an asynchronous update on a given CRTC, called page flip.
+ * Optionally a drm event is generated to signal the completion of the event.
+ * Generic drivers cannot assume that a pageflip with changed framebuffer
+ * properties (including driver specific metadata like tiling layout) will work,
+ * but some drivers support e.g. pixel format changes through the pageflip
+ * ioctl.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3646,7 +4149,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
crtc = obj_to_crtc(obj);
mutex_lock(&crtc->mutex);
- if (crtc->fb == NULL) {
+ if (crtc->primary->fb == NULL) {
/* The framebuffer is currently unbound, presumably
* due to a hotplug event, that userspace has not
* yet discovered.
@@ -3668,7 +4171,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (ret)
goto out;
- if (crtc->fb->pixel_format != fb->pixel_format) {
+ if (crtc->primary->fb->pixel_format != fb->pixel_format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
ret = -EINVAL;
goto out;
@@ -3701,7 +4204,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
(void (*) (struct drm_pending_event *)) kfree;
}
- old_fb = crtc->fb;
+ old_fb = crtc->primary->fb;
ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
if (ret) {
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -3719,7 +4222,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
* Failing to do so will screw with the reference counting
* on framebuffers.
*/
- WARN_ON(crtc->fb != fb);
+ WARN_ON(crtc->primary->fb != fb);
/* Unref only the old framebuffer. */
fb = NULL;
}
@@ -3734,6 +4237,14 @@ out:
return ret;
}
+/**
+ * drm_mode_config_reset - call ->reset callbacks
+ * @dev: drm device
+ *
+ * This functions calls all the crtc's, encoder's and connector's ->reset
+ * callback. Drivers can use this in e.g. their driver load or resume code to
+ * reset hardware and software state.
+ */
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
@@ -3757,16 +4268,66 @@ void drm_mode_config_reset(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_mode_config_reset);
+/**
+ * drm_mode_create_dumb_ioctl - create a dumb backing storage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This creates a new dumb buffer in the driver's backing storage manager (GEM,
+ * TTM or something else entirely) and returns the resulting buffer handle. This
+ * handle can then be wrapped up into a framebuffer modeset object.
+ *
+ * Note that userspace is not allowed to use such objects for render
+ * acceleration - drivers must create their own private ioctls for such a use
+ * case.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_create_dumb *args = data;
+ u32 cpp, stride, size;
if (!dev->driver->dumb_create)
return -ENOSYS;
+ if (!args->width || !args->height || !args->bpp)
+ return -EINVAL;
+
+ /* overflow checks for 32bit size calculations */
+ cpp = DIV_ROUND_UP(args->bpp, 8);
+ if (cpp > 0xffffffffU / args->width)
+ return -EINVAL;
+ stride = cpp * args->width;
+ if (args->height > 0xffffffffU / stride)
+ return -EINVAL;
+
+ /* test for wrap-around */
+ size = args->height * stride;
+ if (PAGE_ALIGN(size) == 0)
+ return -EINVAL;
+
return dev->driver->dumb_create(file_priv, dev, args);
}
+/**
+ * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Allocate an offset in the drm device node's address space to be able to
+ * memory map a dumb buffer.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3779,6 +4340,21 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
}
+/**
+ * drm_mode_destroy_dumb_ioctl - destroy a dumb backing strage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This destroys the userspace handle for the given dumb backing storage buffer.
+ * Since buffer objects must be reference counted in the kernel a buffer object
+ * won't be immediately freed if a framebuffer modeset object still uses it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3790,9 +4366,14 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
return dev->driver->dumb_destroy(file_priv, dev, args->handle);
}
-/*
- * Just need to support RGB formats here for compat with code that doesn't
- * use pixel formats directly yet.
+/**
+ * drm_fb_get_bpp_depth - get the bpp/depth values for format
+ * @format: pixel format (DRM_FORMAT_*)
+ * @depth: storage for the depth value
+ * @bpp: storage for the bpp value
+ *
+ * This only supports RGB formats here for compat with code that doesn't use
+ * pixel formats directly yet.
*/
void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp)
@@ -3864,7 +4445,7 @@ EXPORT_SYMBOL(drm_fb_get_bpp_depth);
* drm_format_num_planes - get the number of planes for format
* @format: pixel format (DRM_FORMAT_*)
*
- * RETURNS:
+ * Returns:
* The number of planes used by the specified pixel format.
*/
int drm_format_num_planes(uint32_t format)
@@ -3899,7 +4480,7 @@ EXPORT_SYMBOL(drm_format_num_planes);
* @format: pixel format (DRM_FORMAT_*)
* @plane: plane index
*
- * RETURNS:
+ * Returns:
* The bytes per pixel value for the specified plane.
*/
int drm_format_plane_cpp(uint32_t format, int plane)
@@ -3945,7 +4526,7 @@ EXPORT_SYMBOL(drm_format_plane_cpp);
* drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
* @format: pixel format (DRM_FORMAT_*)
*
- * RETURNS:
+ * Returns:
* The horizontal chroma subsampling factor for the
* specified pixel format.
*/
@@ -3980,7 +4561,7 @@ EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
* drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
* @format: pixel format (DRM_FORMAT_*)
*
- * RETURNS:
+ * Returns:
* The vertical chroma subsampling factor for the
* specified pixel format.
*/
@@ -4030,6 +4611,7 @@ void drm_mode_config_init(struct drm_device *dev)
drm_modeset_lock_all(dev);
drm_mode_create_standard_connector_properties(dev);
+ drm_mode_create_standard_plane_properties(dev);
drm_modeset_unlock_all(dev);
/* Just to be sure */
@@ -4037,6 +4619,8 @@ void drm_mode_config_init(struct drm_device *dev)
dev->mode_config.num_connector = 0;
dev->mode_config.num_crtc = 0;
dev->mode_config.num_encoder = 0;
+ dev->mode_config.num_overlay_plane = 0;
+ dev->mode_config.num_total_plane = 0;
}
EXPORT_SYMBOL(drm_mode_config_init);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index ea92b827e787..c43825e8f5c1 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -105,9 +105,6 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
* @maxX: max width for modes
* @maxY: max height for modes
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Based on the helper callbacks implemented by @connector try to detect all
* valid modes. Modes will first be added to the connector's probed_modes list,
* then culled (based on validity and the @maxX, @maxY parameters) and put into
@@ -117,8 +114,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
* @connector vfunc for drivers that use the crtc helpers for output mode
* filtering and detection.
*
- * RETURNS:
- * Number of modes found on @connector.
+ * Returns:
+ * The number of modes found on @connector.
*/
int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
@@ -131,6 +128,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
int mode_flags = 0;
bool verbose_prune = true;
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
drm_get_connector_name(connector));
/* set all modes to the unverified state */
@@ -176,8 +175,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
drm_mode_connector_list_update(connector);
if (maxX && maxY)
- drm_mode_validate_size(dev, &connector->modes, maxX,
- maxY, 0);
+ drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
@@ -219,18 +217,19 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Walk @encoders's DRM device's mode_config and see if it's in use.
+ * Checks whether @encoder is with the current mode setting output configuration
+ * in use by any connector. This doesn't mean that it is actually enabled since
+ * the DPMS state is tracked separately.
*
- * RETURNS:
- * True if @encoder is part of the mode_config, false otherwise.
+ * Returns:
+ * True if @encoder is used, false otherwise.
*/
bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder)
return true;
@@ -242,19 +241,19 @@ EXPORT_SYMBOL(drm_helper_encoder_in_use);
* drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
* @crtc: CRTC to check
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Walk @crtc's DRM device's mode_config and see if it's in use.
+ * Checks whether @crtc is with the current mode setting output configuration
+ * in use by any connector. This doesn't mean that it is actually enabled since
+ * the DPMS state is tracked separately.
*
- * RETURNS:
- * True if @crtc is part of the mode_config, false otherwise.
+ * Returns:
+ * True if @crtc is used, false otherwise.
*/
bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
- /* FIXME: Locking around list access? */
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
return true;
@@ -279,27 +278,17 @@ drm_encoder_disable(struct drm_encoder *encoder)
encoder->bridge->funcs->post_disable(encoder->bridge);
}
-/**
- * drm_helper_disable_unused_functions - disable unused objects
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
- * by calling its dpms function, which should power it off.
- */
-void drm_helper_disable_unused_functions(struct drm_device *dev)
+static void __drm_helper_disable_unused_functions(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_crtc *crtc;
+ drm_warn_on_modeset_not_all_locked(dev);
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
continue;
- if (connector->status == connector_status_disconnected)
- connector->encoder = NULL;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -318,10 +307,27 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
(*crtc_funcs->disable)(crtc);
else
(*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
- crtc->fb = NULL;
+ crtc->primary->fb = NULL;
}
}
}
+
+/**
+ * drm_helper_disable_unused_functions - disable unused objects
+ * @dev: DRM device
+ *
+ * This function walks through the entire mode setting configuration of @dev. It
+ * will remove any crtc links of unused encoders and encoder links of
+ * disconnected connectors. Then it will disable all unused encoders and crtcs
+ * either by calling their disable callback if available or by calling their
+ * dpms callback with DRM_MODE_DPMS_OFF.
+ */
+void drm_helper_disable_unused_functions(struct drm_device *dev)
+{
+ drm_modeset_lock_all(dev);
+ __drm_helper_disable_unused_functions(dev);
+ drm_modeset_unlock_all(dev);
+}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
/*
@@ -355,9 +361,6 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
* @y: vertical offset into the surface
* @old_fb: old framebuffer, for cleanup
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
* to fixup or reject the mode prior to trying to set it. This is an internal
* helper that drivers could e.g. use to update properties that require the
@@ -367,8 +370,8 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
* drm_crtc_helper_set_config() helper function to drive the mode setting
* sequence.
*
- * RETURNS:
- * True if the mode was set successfully, or false otherwise.
+ * Returns:
+ * True if the mode was set successfully, false otherwise.
*/
bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
@@ -384,6 +387,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_encoder *encoder;
bool ret = true;
+ drm_warn_on_modeset_not_all_locked(dev);
+
saved_enabled = crtc->enabled;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled)
@@ -552,7 +557,7 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
}
}
- drm_helper_disable_unused_functions(dev);
+ __drm_helper_disable_unused_functions(dev);
return 0;
}
@@ -560,17 +565,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
* drm_crtc_helper_set_config - set a new config from userspace
* @set: mode set configuration
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Setup a new configuration, provided by the upper layers (either an ioctl call
- * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * from userspace or internally e.g. from the fbdev support code) in @set, and
* enable it. This is the main helper functions for drivers that implement
* kernel mode setting with the crtc helper functions and the assorted
* ->prepare(), ->modeset() and ->commit() helper callbacks.
*
- * RETURNS:
- * Returns 0 on success, -ERRNO on failure.
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
@@ -612,6 +614,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
dev = set->crtc->dev;
+ drm_warn_on_modeset_not_all_locked(dev);
+
/*
* Allocate space for the backup of all (non-pointer) encoder and
* connector data.
@@ -647,19 +651,19 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
save_set.mode = &set->crtc->mode;
save_set.x = set->crtc->x;
save_set.y = set->crtc->y;
- save_set.fb = set->crtc->fb;
+ save_set.fb = set->crtc->primary->fb;
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
- if (set->crtc->fb != set->fb) {
+ if (set->crtc->primary->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
- if (set->crtc->fb == NULL) {
+ if (set->crtc->primary->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
} else if (set->fb->pixel_format !=
- set->crtc->fb->pixel_format) {
+ set->crtc->primary->fb->pixel_format) {
mode_changed = true;
} else
fb_changed = true;
@@ -689,12 +693,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (new_encoder == NULL)
/* don't break so fail path works correct */
fail = 1;
- break;
if (connector->dpms != DRM_MODE_DPMS_ON) {
DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
mode_changed = true;
}
+
+ break;
}
}
@@ -760,13 +765,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
- set->crtc->fb = set->fb;
+ set->crtc->primary->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
save_set.fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
- set->crtc->fb = save_set.fb;
+ set->crtc->primary->fb = save_set.fb;
ret = -EINVAL;
goto fail;
}
@@ -777,17 +782,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
- drm_helper_disable_unused_functions(dev);
+ __drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
set->crtc->x = set->x;
set->crtc->y = set->y;
- set->crtc->fb = set->fb;
+ set->crtc->primary->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
set->x, set->y, save_set.fb);
if (ret != 0) {
set->crtc->x = save_set.x;
set->crtc->y = save_set.y;
- set->crtc->fb = save_set.fb;
+ set->crtc->primary->fb = save_set.fb;
goto fail;
}
}
@@ -924,8 +929,16 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
-int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
- struct drm_mode_fb_cmd2 *mode_cmd)
+/**
+ * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
+ * @fb: drm_framebuffer object to fill out
+ * @mode_cmd: metadata from the userspace fb creation request
+ *
+ * This helper can be used in a drivers fb_create callback to pre-fill the fb's
+ * metadata fields.
+ */
+void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
int i;
@@ -938,26 +951,47 @@ int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
&fb->bits_per_pixel);
fb->pixel_format = mode_cmd->pixel_format;
-
- return 0;
}
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
-int drm_helper_resume_force_mode(struct drm_device *dev)
+/**
+ * drm_helper_resume_force_mode - force-restore mode setting configuration
+ * @dev: drm_device which should be restored
+ *
+ * Drivers which use the mode setting helpers can use this function to
+ * force-restore the mode setting configuration e.g. on resume or when something
+ * else might have trampled over the hw state (like some overzealous old BIOSen
+ * tended to do).
+ *
+ * This helper doesn't provide a error return value since restoring the old
+ * config should never fail due to resource allocation issues since the driver
+ * has successfully set the restored configuration already. Hence this should
+ * boil down to the equivalent of a few dpms on calls, which also don't provide
+ * an error code.
+ *
+ * Drivers where simply restoring an old configuration again might fail (e.g.
+ * due to slight differences in allocating shared resources when the
+ * configuration is restored in a different order than when userspace set it up)
+ * need to use their own restore logic.
+ */
+void drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_crtc_helper_funcs *crtc_funcs;
- int ret, encoder_dpms;
+ int encoder_dpms;
+ bool ret;
+ drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->enabled)
continue;
ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ crtc->x, crtc->y, crtc->primary->fb);
+ /* Restoring the old config should never fail! */
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
@@ -980,12 +1014,29 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
drm_helper_choose_crtc_dpms(crtc));
}
}
+
/* disable the unused connectors while restoring the modesetting */
- drm_helper_disable_unused_functions(dev);
- return 0;
+ __drm_helper_disable_unused_functions(dev);
+ drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+/**
+ * drm_kms_helper_hotplug_event - fire off KMS hotplug events
+ * @dev: drm_device whose connector state changed
+ *
+ * This function fires off the uevent for userspace and also calls the
+ * output_poll_changed function, which is most commonly used to inform the fbdev
+ * emulation code and allow it to update the fbcon output configuration.
+ *
+ * Drivers should call this from their hotplug handling code when a change is
+ * detected. Note that this function does not do any output detection of its
+ * own, like drm_helper_hpd_irq_event() does - this is assumed to be done by the
+ * driver already.
+ *
+ * This function must be called from process context with no mode
+ * setting locks held.
+ */
void drm_kms_helper_hotplug_event(struct drm_device *dev)
{
/* send a uevent + call fbdev */
@@ -1054,6 +1105,16 @@ static void output_poll_execute(struct work_struct *work)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
+/**
+ * drm_kms_helper_poll_disable - disable output polling
+ * @dev: drm_device
+ *
+ * This function disables the output polling work.
+ *
+ * Drivers can call this helper from their device suspend implementation. It is
+ * not an error to call this even when output polling isn't enabled or arlready
+ * disabled.
+ */
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
@@ -1062,6 +1123,16 @@ void drm_kms_helper_poll_disable(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
+/**
+ * drm_kms_helper_poll_enable - re-enable output polling.
+ * @dev: drm_device
+ *
+ * This function re-enables the output polling work.
+ *
+ * Drivers can call this helper from their device resume implementation. It is
+ * an error to call this when the output polling support has not yet been set
+ * up.
+ */
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
@@ -1081,6 +1152,25 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+/**
+ * drm_kms_helper_poll_init - initialize and enable output polling
+ * @dev: drm_device
+ *
+ * This function intializes and then also enables output polling support for
+ * @dev. Drivers which do not have reliable hotplug support in hardware can use
+ * this helper infrastructure to regularly poll such connectors for changes in
+ * their connection state.
+ *
+ * Drivers can control which connectors are polled by setting the
+ * DRM_CONNECTOR_POLL_CONNECT and DRM_CONNECTOR_POLL_DISCONNECT flags. On
+ * connectors where probing live outputs can result in visual distortion drivers
+ * should not set the DRM_CONNECTOR_POLL_DISCONNECT flag to avoid this.
+ * Connectors which have no flag or only DRM_CONNECTOR_POLL_HPD set are
+ * completely ignored by the polling logic.
+ *
+ * Note that a connector can be both polled and probed from the hotplug handler,
+ * in case the hotplug interrupt is known to be unreliable.
+ */
void drm_kms_helper_poll_init(struct drm_device *dev)
{
INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
@@ -1090,12 +1180,39 @@ void drm_kms_helper_poll_init(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_init);
+/**
+ * drm_kms_helper_poll_fini - disable output polling and clean it up
+ * @dev: drm_device
+ */
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
drm_kms_helper_poll_disable(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+/**
+ * drm_helper_hpd_irq_event - hotplug processing
+ * @dev: drm_device
+ *
+ * Drivers can use this helper function to run a detect cycle on all connectors
+ * which have the DRM_CONNECTOR_POLL_HPD flag set in their &polled member. All
+ * other connectors are ignored, which is useful to avoid reprobing fixed
+ * panels.
+ *
+ * This helper function is useful for drivers which can't or don't track hotplug
+ * interrupts for each connector.
+ *
+ * Drivers which support hotplug interrupts for each connector individually and
+ * which have a more fine-grained detect logic should bypass this code and
+ * directly call drm_kms_helper_hotplug_event() in case the connector state
+ * changed.
+ *
+ * This function must be called from process context with no mode
+ * setting locks held.
+ *
+ * Note that a connector can be both polled and probed from the hotplug handler,
+ * in case the hotplug interrupt is known to be unreliable.
+ */
bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
new file mode 100644
index 000000000000..a2945ee6d675
--- /dev/null
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright © 2014 Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This header file contains mode setting related functions and definitions
+ * which are only used within the drm module as internal implementation details
+ * and are not exported to drivers.
+ */
+
+int drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type);
+void drm_mode_object_put(struct drm_device *dev,
+ struct drm_mode_object *object);
+
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 9e978aae8972..27671489477d 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -346,3 +346,399 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw)
}
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
+
+/**
+ * DOC: dp helpers
+ *
+ * The DisplayPort AUX channel is an abstraction to allow generic, driver-
+ * independent access to AUX functionality. Drivers can take advantage of
+ * this by filling in the fields of the drm_dp_aux structure.
+ *
+ * Transactions are described using a hardware-independent drm_dp_aux_msg
+ * structure, which is passed into a driver's .transfer() implementation.
+ * Both native and I2C-over-AUX transactions are supported.
+ */
+
+static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+ unsigned int offset, void *buffer, size_t size)
+{
+ struct drm_dp_aux_msg msg;
+ unsigned int retry;
+ int err;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.address = offset;
+ msg.request = request;
+ msg.buffer = buffer;
+ msg.size = size;
+
+ /*
+ * The specification doesn't give any recommendation on how often to
+ * retry native transactions, so retry 7 times like for I2C-over-AUX
+ * transactions.
+ */
+ for (retry = 0; retry < 7; retry++) {
+ err = aux->transfer(aux, &msg);
+ if (err < 0) {
+ if (err == -EBUSY)
+ continue;
+
+ return err;
+ }
+
+
+ switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
+ case DP_AUX_NATIVE_REPLY_ACK:
+ if (err < size)
+ return -EPROTO;
+ return err;
+
+ case DP_AUX_NATIVE_REPLY_NACK:
+ return -EIO;
+
+ case DP_AUX_NATIVE_REPLY_DEFER:
+ usleep_range(400, 500);
+ break;
+ }
+ }
+
+ DRM_DEBUG_KMS("too many retries, giving up\n");
+ return -EIO;
+}
+
+/**
+ * drm_dp_dpcd_read() - read a series of bytes from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the (first) register to read
+ * @buffer: buffer to store the register values
+ * @size: number of bytes in @buffer
+ *
+ * Returns the number of bytes transferred on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size)
+{
+ return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
+ size);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_read);
+
+/**
+ * drm_dp_dpcd_write() - write a series of bytes to the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the (first) register to write
+ * @buffer: buffer containing the values to write
+ * @size: number of bytes in @buffer
+ *
+ * Returns the number of bytes transferred on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size)
+{
+ return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
+ size);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_write);
+
+/**
+ * drm_dp_dpcd_read_link_status() - read DPCD link status (bytes 0x202-0x207)
+ * @aux: DisplayPort AUX channel
+ * @status: buffer to store the link status in (must be at least 6 bytes)
+ *
+ * Returns the number of bytes transferred on success or a negative error
+ * code on failure.
+ */
+int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
+ u8 status[DP_LINK_STATUS_SIZE])
+{
+ return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
+ DP_LINK_STATUS_SIZE);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
+
+/**
+ * drm_dp_link_probe() - probe a DisplayPort link for capabilities
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to structure in which to return link capabilities
+ *
+ * The structure filled in by this function can usually be passed directly
+ * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
+ * configure the link based on the link's capabilities.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 values[3];
+ int err;
+
+ memset(link, 0, sizeof(*link));
+
+ err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ link->revision = values[0];
+ link->rate = drm_dp_bw_code_to_link_rate(values[1]);
+ link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
+
+ if (values[2] & DP_ENHANCED_FRAME_CAP)
+ link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_probe);
+
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_up);
+
+/**
+ * drm_dp_link_configure() - configure a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 values[2];
+ int err;
+
+ values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ values[1] = link->num_lanes;
+
+ if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_configure);
+
+/*
+ * I2C-over-AUX implementation
+ */
+
+static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+/*
+ * Transfer a single I2C-over-AUX message and handle various error conditions,
+ * retrying the transaction as appropriate.
+ */
+static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ unsigned int retry;
+ int err;
+
+ /*
+ * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
+ * is required to retry at least seven times upon receiving AUX_DEFER
+ * before giving up the AUX transaction.
+ */
+ for (retry = 0; retry < 7; retry++) {
+ err = aux->transfer(aux, msg);
+ if (err < 0) {
+ if (err == -EBUSY)
+ continue;
+
+ DRM_DEBUG_KMS("transaction failed: %d\n", err);
+ return err;
+ }
+
+
+ switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) {
+ case DP_AUX_NATIVE_REPLY_ACK:
+ /*
+ * For I2C-over-AUX transactions this isn't enough, we
+ * need to check for the I2C ACK reply.
+ */
+ break;
+
+ case DP_AUX_NATIVE_REPLY_NACK:
+ DRM_DEBUG_KMS("native nack\n");
+ return -EREMOTEIO;
+
+ case DP_AUX_NATIVE_REPLY_DEFER:
+ DRM_DEBUG_KMS("native defer");
+ /*
+ * We could check for I2C bit rate capabilities and if
+ * available adjust this interval. We could also be
+ * more careful with DP-to-legacy adapters where a
+ * long legacy cable may force very low I2C bit rates.
+ *
+ * For now just defer for long enough to hopefully be
+ * safe for all use-cases.
+ */
+ usleep_range(500, 600);
+ continue;
+
+ default:
+ DRM_ERROR("invalid native reply %#04x\n", msg->reply);
+ return -EREMOTEIO;
+ }
+
+ switch (msg->reply & DP_AUX_I2C_REPLY_MASK) {
+ case DP_AUX_I2C_REPLY_ACK:
+ /*
+ * Both native ACK and I2C ACK replies received. We
+ * can assume the transfer was successful.
+ */
+ if (err < msg->size)
+ return -EPROTO;
+ return 0;
+
+ case DP_AUX_I2C_REPLY_NACK:
+ DRM_DEBUG_KMS("I2C nack\n");
+ return -EREMOTEIO;
+
+ case DP_AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG_KMS("I2C defer\n");
+ usleep_range(400, 500);
+ continue;
+
+ default:
+ DRM_ERROR("invalid I2C reply %#04x\n", msg->reply);
+ return -EREMOTEIO;
+ }
+ }
+
+ DRM_DEBUG_KMS("too many retries, giving up\n");
+ return -EREMOTEIO;
+}
+
+static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ int num)
+{
+ struct drm_dp_aux *aux = adapter->algo_data;
+ unsigned int i, j;
+
+ for (i = 0; i < num; i++) {
+ struct drm_dp_aux_msg msg;
+ int err;
+
+ /*
+ * Many hardware implementations support FIFOs larger than a
+ * single byte, but it has been empirically determined that
+ * transferring data in larger chunks can actually lead to
+ * decreased performance. Therefore each message is simply
+ * transferred byte-by-byte.
+ */
+ for (j = 0; j < msgs[i].len; j++) {
+ memset(&msg, 0, sizeof(msg));
+ msg.address = msgs[i].addr;
+
+ msg.request = (msgs[i].flags & I2C_M_RD) ?
+ DP_AUX_I2C_READ :
+ DP_AUX_I2C_WRITE;
+
+ /*
+ * All messages except the last one are middle-of-
+ * transfer messages.
+ */
+ if ((i < num - 1) || (j < msgs[i].len - 1))
+ msg.request |= DP_AUX_I2C_MOT;
+
+ msg.buffer = msgs[i].buf + j;
+ msg.size = 1;
+
+ err = drm_dp_i2c_do_msg(aux, &msg);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return num;
+}
+
+static const struct i2c_algorithm drm_dp_i2c_algo = {
+ .functionality = drm_dp_i2c_functionality,
+ .master_xfer = drm_dp_i2c_xfer,
+};
+
+/**
+ * drm_dp_aux_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux)
+{
+ aux->ddc.algo = &drm_dp_i2c_algo;
+ aux->ddc.algo_data = aux;
+ aux->ddc.retries = 3;
+
+ aux->ddc.class = I2C_CLASS_DDC;
+ aux->ddc.owner = THIS_MODULE;
+ aux->ddc.dev.parent = aux->dev;
+ aux->ddc.dev.of_node = aux->dev->of_node;
+
+ strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
+ sizeof(aux->ddc.name));
+
+ return i2c_add_adapter(&aux->ddc);
+}
+EXPORT_SYMBOL(drm_dp_aux_register_i2c_bus);
+
+/**
+ * drm_dp_aux_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
+ * @aux: DisplayPort AUX channel
+ */
+void drm_dp_aux_unregister_i2c_bus(struct drm_dp_aux *aux)
+{
+ i2c_del_adapter(&aux->ddc);
+}
+EXPORT_SYMBOL(drm_dp_aux_unregister_i2c_bus);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 345be03c23db..03711d00aaae 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -286,6 +286,45 @@ static int drm_version(struct drm_device *dev, void *data,
}
/**
+ * drm_ioctl_permit - Check ioctl permissions against caller
+ *
+ * @flags: ioctl permission flags.
+ * @file_priv: Pointer to struct drm_file identifying the caller.
+ *
+ * Checks whether the caller is allowed to run an ioctl with the
+ * indicated permissions. If so, returns zero. Otherwise returns an
+ * error code suitable for ioctl return.
+ */
+static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+{
+ /* ROOT_ONLY is only for CAP_SYS_ADMIN */
+ if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
+ return -EACCES;
+
+ /* AUTH is only for authenticated or render client */
+ if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
+ !file_priv->authenticated))
+ return -EACCES;
+
+ /* MASTER is only for master or control clients */
+ if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
+ !drm_is_control_client(file_priv)))
+ return -EACCES;
+
+ /* Control clients must be explicitly allowed */
+ if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
+ drm_is_control_client(file_priv)))
+ return -EACCES;
+
+ /* Render clients must be explicitly allowed */
+ if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
+ drm_is_render_client(file_priv)))
+ return -EACCES;
+
+ return 0;
+}
+
+/**
* Called whenever a process performs an ioctl on /dev/drm.
*
* \param inode device inode.
@@ -344,65 +383,64 @@ long drm_ioctl(struct file *filp,
DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->device),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
file_priv->authenticated, ioctl->name);
/* Do not trust userspace, use our own definition */
func = ioctl->func;
- if (!func) {
+ if (unlikely(!func)) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
- } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
- ((ioctl->flags & DRM_AUTH) && !drm_is_render_client(file_priv) && !file_priv->authenticated) ||
- ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
- (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL)) ||
- (!(ioctl->flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) {
- retcode = -EACCES;
- } else {
- if (cmd & (IOC_IN | IOC_OUT)) {
- if (asize <= sizeof(stack_kdata)) {
- kdata = stack_kdata;
- } else {
- kdata = kmalloc(asize, GFP_KERNEL);
- if (!kdata) {
- retcode = -ENOMEM;
- goto err_i1;
- }
- }
- if (asize > usize)
- memset(kdata + usize, 0, asize - usize);
- }
+ goto err_i1;
+ }
- if (cmd & IOC_IN) {
- if (copy_from_user(kdata, (void __user *)arg,
- usize) != 0) {
- retcode = -EFAULT;
+ retcode = drm_ioctl_permit(ioctl->flags, file_priv);
+ if (unlikely(retcode))
+ goto err_i1;
+
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
goto err_i1;
}
- } else
- memset(kdata, 0, usize);
-
- if (ioctl->flags & DRM_UNLOCKED)
- retcode = func(dev, kdata, file_priv);
- else {
- mutex_lock(&drm_global_mutex);
- retcode = func(dev, kdata, file_priv);
- mutex_unlock(&drm_global_mutex);
}
+ if (asize > usize)
+ memset(kdata + usize, 0, asize - usize);
+ }
- if (cmd & IOC_OUT) {
- if (copy_to_user((void __user *)arg, kdata,
- usize) != 0)
- retcode = -EFAULT;
+ if (cmd & IOC_IN) {
+ if (copy_from_user(kdata, (void __user *)arg,
+ usize) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
}
+ } else
+ memset(kdata, 0, usize);
+
+ if (ioctl->flags & DRM_UNLOCKED)
+ retcode = func(dev, kdata, file_priv);
+ else {
+ mutex_lock(&drm_global_mutex);
+ retcode = func(dev, kdata, file_priv);
+ mutex_unlock(&drm_global_mutex);
+ }
+
+ if (cmd & IOC_OUT) {
+ if (copy_to_user((void __user *)arg, kdata,
+ usize) != 0)
+ retcode = -EFAULT;
}
err_i1:
if (!ioctl)
DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->device),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
file_priv->authenticated, cmd, nr);
if (kdata != stack_kdata)
@@ -412,3 +450,21 @@ long drm_ioctl(struct file *filp,
return retcode;
}
EXPORT_SYMBOL(drm_ioctl);
+
+/**
+ * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
+ *
+ * @nr: Ioctl number.
+ * @flags: Where to return the ioctl permission flags
+ */
+bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
+{
+ if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
+ (nr < DRM_COMMAND_BASE)) {
+ *flags = drm_ioctls[nr].flags;
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_ioctl_flags);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index b924306b8477..d4e3f9d9370f 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1098,10 +1098,14 @@ EXPORT_SYMBOL(drm_edid_is_valid);
/**
* Get EDID information via I2C.
*
- * \param adapter : i2c device adaptor
- * \param buf : EDID data buffer to be filled
- * \param len : EDID data buffer length
- * \return 0 on success or -1 on failure.
+ * @adapter : i2c device adaptor
+ * @buf: EDID data buffer to be filled
+ * @block: 128 byte EDID block to start fetching from
+ * @len: EDID data buffer length to fetch
+ *
+ * Returns:
+ *
+ * 0 on success or -1 on failure.
*
* Try to fetch EDID information by calling i2c driver function.
*/
@@ -1243,9 +1247,11 @@ out:
/**
* Probe DDC presence.
+ * @adapter: i2c adapter to probe
+ *
+ * Returns:
*
- * \param adapter : i2c device adaptor
- * \return 1 on success
+ * 1 on success
*/
bool
drm_probe_ddc(struct i2c_adapter *adapter)
@@ -1586,8 +1592,10 @@ bad_std_timing(u8 a, u8 b)
/**
* drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @connector: connector of for the EDID block
+ * @edid: EDID block to scan
* @t: standard timing params
- * @timing_level: standard timing level
+ * @revision: standard timing level
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
@@ -2132,6 +2140,7 @@ do_established_modes(struct detailed_timing *timing, void *c)
/**
* add_established_modes - get est. modes from EDID and add them
+ * @connector: connector of for the EDID block
* @edid: EDID block to scan
*
* Each EDID block contains a bitmap of the supported "established modes" list
@@ -2194,6 +2203,7 @@ do_standard_modes(struct detailed_timing *timing, void *c)
/**
* add_standard_modes - get std. modes from EDID and add them
+ * @connector: connector of for the EDID block
* @edid: EDID block to scan
*
* Standard modes can be calculated using the appropriate standard (DMT,
@@ -2580,6 +2590,9 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
return NULL;
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ if (!newmode)
+ return NULL;
+
newmode->vrefresh = 0;
return newmode;
@@ -3300,6 +3313,7 @@ EXPORT_SYMBOL(drm_detect_hdmi_monitor);
/**
* drm_detect_monitor_audio - check monitor audio capability
+ * @edid: EDID block to scan
*
* Monitor should have CEA extension block.
* If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
@@ -3345,6 +3359,7 @@ EXPORT_SYMBOL(drm_detect_monitor_audio);
/**
* drm_rgb_quant_range_selectable - is RGB quantization range selectable?
+ * @edid: EDID block to scan
*
* Check whether the monitor reports the RGB quantization range selection
* as supported. The AVI infoframe can then be used to inform the monitor
@@ -3564,8 +3579,8 @@ void drm_set_preferred_mode(struct drm_connector *connector,
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->probed_modes, head) {
- if (drm_mode_width(mode) == hpref &&
- drm_mode_height(mode) == vpref)
+ if (mode->hdisplay == hpref &&
+ mode->vdisplay == vpref)
mode->type |= DRM_MODE_TYPE_PREFERRED;
}
}
@@ -3599,6 +3614,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
+ frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
return 0;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 98a03639b413..04d3fd3658f3 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -232,7 +232,7 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
if (crtc->base.id == c->base.id)
- return c->fb;
+ return c->primary->fb;
}
return NULL;
@@ -291,7 +291,8 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
drm_warn_on_modeset_not_all_locked(dev);
list_for_each_entry(plane, &dev->mode_config.plane_list, head)
- drm_plane_force_disable(plane);
+ if (plane->type != DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_force_disable(plane);
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
@@ -365,9 +366,9 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
return false;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb)
+ if (crtc->primary->fb)
crtcs_bound++;
- if (crtc->fb == fb_helper->fb)
+ if (crtc->primary->fb == fb_helper->fb)
bound++;
}
@@ -516,6 +517,9 @@ int drm_fb_helper_init(struct drm_device *dev,
struct drm_crtc *crtc;
int i;
+ if (!max_conn_count)
+ return -EINVAL;
+
fb_helper->dev = dev;
INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
@@ -809,8 +813,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct fb_var_screeninfo *var = &info->var;
- int ret;
- int i;
if (var->pixclock != 0) {
DRM_ERROR("PIXEL CLOCK SET\n");
@@ -818,13 +820,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
}
drm_modeset_lock_all(dev);
- for (i = 0; i < fb_helper->crtc_count; i++) {
- ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set);
- if (ret) {
- drm_modeset_unlock_all(dev);
- return ret;
- }
- }
+ drm_fb_helper_restore_fbdev_mode(fb_helper);
drm_modeset_unlock_all(dev);
if (fb_helper->delayed_hotplug) {
@@ -1136,19 +1132,20 @@ static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
return count;
}
-static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &fb_connector->connector->modes, head) {
- if (drm_mode_width(mode) > width ||
- drm_mode_height(mode) > height)
+ if (mode->hdisplay > width ||
+ mode->vdisplay > height)
continue;
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return mode;
}
return NULL;
}
+EXPORT_SYMBOL(drm_has_preferred_mode);
static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
{
@@ -1157,11 +1154,12 @@ static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
return cmdline_mode->specified;
}
-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
int width, int height)
{
struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode = NULL;
+ bool prefer_non_interlace;
cmdline_mode = &fb_helper_conn->cmdline_mode;
if (cmdline_mode->specified == false)
@@ -1173,6 +1171,8 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
if (cmdline_mode->rb || cmdline_mode->margins)
goto create_mode;
+ prefer_non_interlace = !cmdline_mode->interlace;
+ again:
list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
@@ -1187,16 +1187,25 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
if (cmdline_mode->interlace) {
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
+ } else if (prefer_non_interlace) {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ continue;
}
return mode;
}
+ if (prefer_non_interlace) {
+ prefer_non_interlace = false;
+ goto again;
+ }
+
create_mode:
mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
cmdline_mode);
list_add(&mode->head, &fb_helper_conn->connector->modes);
return mode;
}
+EXPORT_SYMBOL(drm_pick_cmdline_mode);
static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
@@ -1539,9 +1548,11 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
drm_fb_helper_parse_command_line(fb_helper);
+ mutex_lock(&dev->mode_config.mutex);
count = drm_fb_helper_probe_connector_modes(fb_helper,
dev->mode_config.max_width,
dev->mode_config.max_height);
+ mutex_unlock(&dev->mode_config.mutex);
/*
* we shouldn't end up with no modes here.
*/
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7f2af9aca038..e1eba0b7cd45 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -39,12 +39,12 @@
#include <linux/slab.h>
#include <linux/module.h>
-/* from BKL pushdown: note that nothing else serializes idr_find() */
+/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
EXPORT_SYMBOL(drm_global_mutex);
static int drm_open_helper(struct inode *inode, struct file *filp,
- struct drm_device * dev);
+ struct drm_minor *minor);
static int drm_setup(struct drm_device * dev)
{
@@ -79,38 +79,23 @@ static int drm_setup(struct drm_device * dev)
*/
int drm_open(struct inode *inode, struct file *filp)
{
- struct drm_device *dev = NULL;
- int minor_id = iminor(inode);
+ struct drm_device *dev;
struct drm_minor *minor;
- int retcode = 0;
+ int retcode;
int need_setup = 0;
- struct address_space *old_mapping;
- struct address_space *old_imapping;
-
- minor = idr_find(&drm_minors_idr, minor_id);
- if (!minor)
- return -ENODEV;
- if (!(dev = minor->dev))
- return -ENODEV;
-
- if (drm_device_is_unplugged(dev))
- return -ENODEV;
+ minor = drm_minor_acquire(iminor(inode));
+ if (IS_ERR(minor))
+ return PTR_ERR(minor);
+ dev = minor->dev;
if (!dev->open_count++)
need_setup = 1;
- mutex_lock(&dev->struct_mutex);
- old_imapping = inode->i_mapping;
- old_mapping = dev->dev_mapping;
- if (old_mapping == NULL)
- dev->dev_mapping = &inode->i_data;
- /* ihold ensures nobody can remove inode with our i_data */
- ihold(container_of(dev->dev_mapping, struct inode, i_data));
- inode->i_mapping = dev->dev_mapping;
- filp->f_mapping = dev->dev_mapping;
- mutex_unlock(&dev->struct_mutex);
- retcode = drm_open_helper(inode, filp, dev);
+ /* share address_space across all char-devs of a single device */
+ filp->f_mapping = dev->anon_inode->i_mapping;
+
+ retcode = drm_open_helper(inode, filp, minor);
if (retcode)
goto err_undo;
if (need_setup) {
@@ -121,13 +106,8 @@ int drm_open(struct inode *inode, struct file *filp)
return 0;
err_undo:
- mutex_lock(&dev->struct_mutex);
- filp->f_mapping = old_imapping;
- inode->i_mapping = old_imapping;
- iput(container_of(dev->dev_mapping, struct inode, i_data));
- dev->dev_mapping = old_mapping;
- mutex_unlock(&dev->struct_mutex);
dev->open_count--;
+ drm_minor_release(minor);
return retcode;
}
EXPORT_SYMBOL(drm_open);
@@ -143,33 +123,30 @@ EXPORT_SYMBOL(drm_open);
*/
int drm_stub_open(struct inode *inode, struct file *filp)
{
- struct drm_device *dev = NULL;
+ struct drm_device *dev;
struct drm_minor *minor;
- int minor_id = iminor(inode);
int err = -ENODEV;
const struct file_operations *new_fops;
DRM_DEBUG("\n");
mutex_lock(&drm_global_mutex);
- minor = idr_find(&drm_minors_idr, minor_id);
- if (!minor)
- goto out;
-
- if (!(dev = minor->dev))
- goto out;
-
- if (drm_device_is_unplugged(dev))
- goto out;
+ minor = drm_minor_acquire(iminor(inode));
+ if (IS_ERR(minor))
+ goto out_unlock;
+ dev = minor->dev;
new_fops = fops_get(dev->driver->fops);
if (!new_fops)
- goto out;
+ goto out_release;
replace_fops(filp, new_fops);
if (filp->f_op->open)
err = filp->f_op->open(inode, filp);
-out:
+
+out_release:
+ drm_minor_release(minor);
+out_unlock:
mutex_unlock(&drm_global_mutex);
return err;
}
@@ -196,16 +173,16 @@ static int drm_cpu_valid(void)
*
* \param inode device inode.
* \param filp file pointer.
- * \param dev device.
+ * \param minor acquired minor-object.
* \return zero on success or a negative number on failure.
*
* Creates and initializes a drm_file structure for the file private data in \p
* filp and add it into the double linked list in \p dev.
*/
static int drm_open_helper(struct inode *inode, struct file *filp,
- struct drm_device * dev)
+ struct drm_minor *minor)
{
- int minor_id = iminor(inode);
+ struct drm_device *dev = minor->dev;
struct drm_file *priv;
int ret;
@@ -216,7 +193,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
return -EINVAL;
- DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
+ DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -226,11 +203,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->filp = filp;
priv->uid = current_euid();
priv->pid = get_pid(task_pid(current));
- priv->minor = idr_find(&drm_minors_idr, minor_id);
- if (!priv->minor) {
- ret = -ENODEV;
- goto out_put_pid;
- }
+ priv->minor = minor;
/* for compatibility root is always authenticated */
priv->always_authenticated = capable(CAP_SYS_ADMIN);
@@ -258,12 +231,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
/* if there is no current master make this fd it, but do not create
* any master object for render clients */
- mutex_lock(&dev->struct_mutex);
- if (!priv->minor->master && !drm_is_render_client(priv)) {
+ mutex_lock(&dev->master_mutex);
+ if (drm_is_primary_client(priv) && !priv->minor->master) {
/* create a new master */
priv->minor->master = drm_master_create(priv->minor);
if (!priv->minor->master) {
- mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
goto out_close;
}
@@ -271,37 +243,31 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->is_master = 1;
/* take another reference for the copy in the local file priv */
priv->master = drm_master_get(priv->minor->master);
-
priv->authenticated = 1;
- mutex_unlock(&dev->struct_mutex);
if (dev->driver->master_create) {
ret = dev->driver->master_create(dev, priv->master);
if (ret) {
- mutex_lock(&dev->struct_mutex);
/* drop both references if this fails */
drm_master_put(&priv->minor->master);
drm_master_put(&priv->master);
- mutex_unlock(&dev->struct_mutex);
goto out_close;
}
}
- mutex_lock(&dev->struct_mutex);
if (dev->driver->master_set) {
ret = dev->driver->master_set(dev, priv, true);
if (ret) {
/* drop both references if this fails */
drm_master_put(&priv->minor->master);
drm_master_put(&priv->master);
- mutex_unlock(&dev->struct_mutex);
goto out_close;
}
}
- } else if (!drm_is_render_client(priv)) {
+ } else if (drm_is_primary_client(priv)) {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->master_mutex);
mutex_lock(&dev->struct_mutex);
list_add(&priv->lhead, &dev->filelist);
@@ -319,7 +285,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
pci_dev_put(pci_dev);
}
if (!dev->hose) {
- struct pci_bus *b = pci_bus_b(pci_root_buses.next);
+ struct pci_bus *b = list_entry(pci_root_buses.next,
+ struct pci_bus, node);
if (b)
dev->hose = b->sysdata;
}
@@ -329,6 +296,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
return 0;
out_close:
+ mutex_unlock(&dev->master_mutex);
if (dev->driver->postclose)
dev->driver->postclose(dev, priv);
out_prime_destroy:
@@ -336,7 +304,6 @@ out_prime_destroy:
drm_prime_destroy_file_private(&priv->prime);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, priv);
-out_put_pid:
put_pid(priv->pid);
kfree(priv);
filp->private_data = NULL;
@@ -434,7 +401,6 @@ int drm_lastclose(struct drm_device * dev)
drm_legacy_dma_takedown(dev);
- dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
drm_legacy_dev_reinit(dev);
@@ -458,7 +424,8 @@ int drm_lastclose(struct drm_device * dev)
int drm_release(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
+ struct drm_minor *minor = file_priv->minor;
+ struct drm_device *dev = minor->dev;
int retcode = 0;
mutex_lock(&drm_global_mutex);
@@ -474,7 +441,7 @@ int drm_release(struct inode *inode, struct file *filp)
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->device),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
dev->open_count);
/* Release any auth tokens that might point to this file_priv,
@@ -517,11 +484,13 @@ int drm_release(struct inode *inode, struct file *filp)
}
mutex_unlock(&dev->ctxlist_mutex);
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev->master_mutex);
if (file_priv->is_master) {
struct drm_master *master = file_priv->master;
struct drm_file *temp;
+
+ mutex_lock(&dev->struct_mutex);
list_for_each_entry(temp, &dev->filelist, lhead) {
if ((temp->master == file_priv->master) &&
(temp != file_priv))
@@ -540,6 +509,7 @@ int drm_release(struct inode *inode, struct file *filp)
master->lock.file_priv = NULL;
wake_up_interruptible_all(&master->lock.lock_queue);
}
+ mutex_unlock(&dev->struct_mutex);
if (file_priv->minor->master == file_priv->master) {
/* drop the reference held my the minor */
@@ -549,13 +519,13 @@ int drm_release(struct inode *inode, struct file *filp)
}
}
- BUG_ON(dev->dev_mapping == NULL);
- iput(container_of(dev->dev_mapping, struct inode, i_data));
-
- /* drop the reference held my the file priv */
+ /* drop the master reference held by the file priv */
if (file_priv->master)
drm_master_put(&file_priv->master);
file_priv->is_master = 0;
+ mutex_unlock(&dev->master_mutex);
+
+ mutex_lock(&dev->struct_mutex);
list_del(&file_priv->lhead);
mutex_unlock(&dev->struct_mutex);
@@ -580,6 +550,8 @@ int drm_release(struct inode *inode, struct file *filp)
}
mutex_unlock(&drm_global_mutex);
+ drm_minor_release(minor);
+
return retcode;
}
EXPORT_SYMBOL(drm_release);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 5bbad873c798..9909bef59800 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -85,9 +85,9 @@
#endif
/**
- * Initialize the GEM device fields
+ * drm_gem_init - Initialize the GEM device fields
+ * @dev: drm_devic structure to initialize
*/
-
int
drm_gem_init(struct drm_device *dev)
{
@@ -120,6 +120,11 @@ drm_gem_destroy(struct drm_device *dev)
}
/**
+ * drm_gem_object_init - initialize an allocated shmem-backed GEM object
+ * @dev: drm_device the object should be initialized for
+ * @obj: drm_gem_object to initialize
+ * @size: object size
+ *
* Initialize an already allocated GEM object of the specified size with
* shmfs backing store.
*/
@@ -141,6 +146,11 @@ int drm_gem_object_init(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_object_init);
/**
+ * drm_gem_object_init - initialize an allocated private GEM object
+ * @dev: drm_device the object should be initialized for
+ * @obj: drm_gem_object to initialize
+ * @size: object size
+ *
* Initialize an already allocated GEM object of the specified size with
* no GEM provided backing store. Instead the caller is responsible for
* backing the object and handling it.
@@ -176,6 +186,9 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
}
/**
+ * drm_gem_object_free - release resources bound to userspace handles
+ * @obj: GEM object to clean up.
+ *
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
@@ -225,7 +238,12 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
}
/**
- * Removes the mapping from handle to filp for this object.
+ * drm_gem_handle_delete - deletes the given file-private handle
+ * @filp: drm file-private structure to use for the handle look up
+ * @handle: userspace handle to delete
+ *
+ * Removes the GEM handle from the @filp lookup table and if this is the last
+ * handle also cleans up linked resources like GEM names.
*/
int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
@@ -270,6 +288,9 @@ EXPORT_SYMBOL(drm_gem_handle_delete);
/**
* drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
+ * @file: drm file-private structure to remove the dumb handle from
+ * @dev: corresponding drm_device
+ * @handle: the dumb handle to remove
*
* This implements the ->dumb_destroy kms driver callback for drivers which use
* gem to manage their backing storage.
@@ -284,6 +305,9 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
/**
* drm_gem_handle_create_tail - internal functions to create a handle
+ * @file_priv: drm file-private structure to register the handle for
+ * @obj: object to register
+ * @handlep: pionter to return the created handle to the caller
*
* This expects the dev->object_name_lock to be held already and will drop it
* before returning. Used to avoid races in establishing new handles when
@@ -336,6 +360,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
}
/**
+ * gem_handle_create - create a gem handle for an object
+ * @file_priv: drm file-private structure to register the handle for
+ * @obj: object to register
+ * @handlep: pionter to return the created handle to the caller
+ *
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
@@ -536,6 +565,11 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
+ * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
+ * @dev: drm_device
+ * @data: ioctl data
+ * @file_priv: drm file-private structure
+ *
* Releases the handle to an mm object.
*/
int
@@ -554,6 +588,11 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
}
/**
+ * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
+ * @dev: drm_device
+ * @data: ioctl data
+ * @file_priv: drm file-private structure
+ *
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
@@ -601,6 +640,11 @@ err:
}
/**
+ * drm_gem_open - implementation of the GEM_OPEN ioctl
+ * @dev: drm_device
+ * @data: ioctl data
+ * @file_priv: drm file-private structure
+ *
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
@@ -640,6 +684,10 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
}
/**
+ * gem_gem_open - initalizes GEM file-private structures at devnode open time
+ * @dev: drm_device which is being opened by userspace
+ * @file_private: drm file-private structure to set up
+ *
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
@@ -650,7 +698,7 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
spin_lock_init(&file_private->table_lock);
}
-/**
+/*
* Called at device close to release the file's
* handle references on objects.
*/
@@ -674,6 +722,10 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
}
/**
+ * drm_gem_release - release file-private GEM resources
+ * @dev: drm_device which is being closed by userspace
+ * @file_private: drm file-private structure to clean up
+ *
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
@@ -692,11 +744,16 @@ drm_gem_object_release(struct drm_gem_object *obj)
WARN_ON(obj->dma_buf);
if (obj->filp)
- fput(obj->filp);
+ fput(obj->filp);
+
+ drm_gem_free_mmap_offset(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
/**
+ * drm_gem_object_free - free a GEM object
+ * @kref: kref of the object to free
+ *
* Called after the last reference to the object has been lost.
* Must be called holding struct_ mutex
*
@@ -782,7 +839,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
@@ -818,7 +875,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
struct drm_gem_object *obj;
struct drm_vma_offset_node *node;
- int ret = 0;
+ int ret;
if (drm_device_is_unplugged(dev))
return -ENODEV;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 6b51bf90df0e..05c97c5350a1 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -79,7 +79,6 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
unsigned int size)
{
struct drm_gem_cma_object *cma_obj;
- struct sg_table *sgt = NULL;
int ret;
size = round_up(size, PAGE_SIZE);
@@ -97,23 +96,9 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
goto error;
}
- sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL);
- if (sgt == NULL) {
- ret = -ENOMEM;
- goto error;
- }
-
- ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr,
- cma_obj->paddr, size);
- if (ret < 0)
- goto error;
-
- cma_obj->sgt = sgt;
-
return cma_obj;
error:
- kfree(sgt);
drm_gem_cma_free_object(&cma_obj->base);
return ERR_PTR(ret);
}
@@ -175,10 +160,6 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
if (cma_obj->vaddr) {
dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
cma_obj->vaddr, cma_obj->paddr);
- if (cma_obj->sgt) {
- sg_free_table(cma_obj->sgt);
- kfree(cma_obj->sgt);
- }
} else if (gem_obj->import_attach) {
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
}
@@ -253,8 +234,17 @@ static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
{
int ret;
- ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
+ /*
+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+ * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+ * the whole buffer.
+ */
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_pgoff = 0;
+
+ ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma,
+ cma_obj->vaddr, cma_obj->paddr,
+ vma->vm_end - vma->vm_start);
if (ret)
drm_gem_vm_close(vma);
@@ -292,9 +282,9 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
off = drm_vma_node_start(&obj->vma_node);
- seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
+ seq_printf(m, "%2d (%2d) %08llx %pad %p %d",
obj->name, obj->refcount.refcount.counter,
- off, cma_obj->paddr, cma_obj->vaddr, obj->size);
+ off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
seq_printf(m, "\n");
}
@@ -342,7 +332,7 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
cma_obj->paddr = sg_dma_address(sgt->sgl);
cma_obj->sgt = sgt;
- DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr, size);
+ DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, size);
return &cma_obj->base;
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index f4dc9b7a3831..93a42040bedb 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -328,6 +328,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->stereo_allowed = req->value;
break;
+ case DRM_CLIENT_CAP_UNIVERSAL_PLANES:
+ if (!drm_universal_planes)
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->universal_planes = req->value;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index b155ee2ffa17..09821f46d768 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -142,8 +142,12 @@ int mipi_dsi_host_register(struct mipi_dsi_host *host)
{
struct device_node *node;
- for_each_available_child_of_node(host->dev->of_node, node)
+ for_each_available_child_of_node(host->dev->of_node, node) {
+ /* skip nodes without reg property */
+ if (!of_find_property(node, "reg", NULL))
+ continue;
of_mipi_dsi_device_add(host, node);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index af93cc55259f..71e2d3fcd6ee 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -47,7 +47,48 @@
#include <linux/seq_file.h>
#include <linux/export.h>
-#define MM_UNUSED_TARGET 4
+/**
+ * DOC: Overview
+ *
+ * drm_mm provides a simple range allocator. The drivers are free to use the
+ * resource allocator from the linux core if it suits them, the upside of drm_mm
+ * is that it's in the DRM core. Which means that it's easier to extend for
+ * some of the crazier special purpose needs of gpus.
+ *
+ * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
+ * Drivers are free to embed either of them into their own suitable
+ * datastructures. drm_mm itself will not do any allocations of its own, so if
+ * drivers choose not to embed nodes they need to still allocate them
+ * themselves.
+ *
+ * The range allocator also supports reservation of preallocated blocks. This is
+ * useful for taking over initial mode setting configurations from the firmware,
+ * where an object needs to be created which exactly matches the firmware's
+ * scanout target. As long as the range is still free it can be inserted anytime
+ * after the allocator is initialized, which helps with avoiding looped
+ * depencies in the driver load sequence.
+ *
+ * drm_mm maintains a stack of most recently freed holes, which of all
+ * simplistic datastructures seems to be a fairly decent approach to clustering
+ * allocations and avoiding too much fragmentation. This means free space
+ * searches are O(num_holes). Given that all the fancy features drm_mm supports
+ * something better would be fairly complex and since gfx thrashing is a fairly
+ * steep cliff not a real concern. Removing a node again is O(1).
+ *
+ * drm_mm supports a few features: Alignment and range restrictions can be
+ * supplied. Further more every &drm_mm_node has a color value (which is just an
+ * opaqua unsigned long) which in conjunction with a driver callback can be used
+ * to implement sophisticated placement restrictions. The i915 DRM driver uses
+ * this to implement guard pages between incompatible caching domains in the
+ * graphics TT.
+ *
+ * Two behaviors are supported for searching and allocating: bottom-up and top-down.
+ * The default is bottom-up. Top-down allocation can be used if the memory area
+ * has different restrictions, or just to reduce fragmentation.
+ *
+ * Finally iteration helpers to walk all nodes and all holes are provided as are
+ * some basic allocator dumpers for debugging.
+ */
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
@@ -65,7 +106,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
- unsigned long color)
+ unsigned long color,
+ enum drm_mm_allocator_flags flags)
{
struct drm_mm *mm = hole_node->mm;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
@@ -78,12 +120,22 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+ if (flags & DRM_MM_CREATE_TOP)
+ adj_start = adj_end - size;
+
if (alignment) {
unsigned tmp = adj_start % alignment;
- if (tmp)
- adj_start += alignment - tmp;
+ if (tmp) {
+ if (flags & DRM_MM_CREATE_TOP)
+ adj_start -= tmp;
+ else
+ adj_start += alignment - tmp;
+ }
}
+ BUG_ON(adj_start < hole_start);
+ BUG_ON(adj_end > hole_end);
+
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del(&hole_node->hole_stack);
@@ -107,6 +159,20 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
}
}
+/**
+ * drm_mm_reserve_node - insert an pre-initialized node
+ * @mm: drm_mm allocator to insert @node into
+ * @node: drm_mm_node to insert
+ *
+ * This functions inserts an already set-up drm_mm_node into the allocator,
+ * meaning that start, size and color must be set by the caller. This is useful
+ * to initialize the allocator with preallocated objects which must be set-up
+ * before the range allocator can be set-up, e.g. when taking over a firmware
+ * framebuffer.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no hole where @node is.
+ */
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
struct drm_mm_node *hole;
@@ -148,23 +214,34 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
EXPORT_SYMBOL(drm_mm_reserve_node);
/**
- * Search for free space and insert a preallocated memory node. Returns
- * -ENOSPC if no suitable free area is available. The preallocated memory node
- * must be cleared.
+ * drm_mm_insert_node_generic - search for space and insert @node
+ * @mm: drm_mm to allocate from
+ * @node: preallocate node to insert
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for this node
+ * @sflags: flags to fine-tune the allocation search
+ * @aflags: flags to fine-tune the allocation behavior
+ *
+ * The preallocated node must be cleared to 0.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no suitable hole.
*/
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color,
- enum drm_mm_search_flags flags)
+ enum drm_mm_search_flags sflags,
+ enum drm_mm_allocator_flags aflags)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_generic(mm, size, alignment,
- color, flags);
+ color, sflags);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper(hole_node, node, size, alignment, color);
+ drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node_generic);
@@ -173,7 +250,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ enum drm_mm_allocator_flags flags)
{
struct drm_mm *mm = hole_node->mm;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
@@ -188,13 +266,20 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
if (adj_end > end)
adj_end = end;
+ if (flags & DRM_MM_CREATE_TOP)
+ adj_start = adj_end - size;
+
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (alignment) {
unsigned tmp = adj_start % alignment;
- if (tmp)
- adj_start += alignment - tmp;
+ if (tmp) {
+ if (flags & DRM_MM_CREATE_TOP)
+ adj_start -= tmp;
+ else
+ adj_start += alignment - tmp;
+ }
}
if (adj_start == hole_start) {
@@ -211,6 +296,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
+ BUG_ON(node->start < start);
+ BUG_ON(node->start < adj_start);
BUG_ON(node->start + node->size > adj_end);
BUG_ON(node->start + node->size > end);
@@ -222,32 +309,51 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
}
/**
- * Search for free space and insert a preallocated memory node. Returns
- * -ENOSPC if no suitable free area is available. This is for range
- * restricted allocations. The preallocated memory node must be cleared.
+ * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
+ * @mm: drm_mm to allocate from
+ * @node: preallocate node to insert
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for this node
+ * @start: start of the allowed range for this node
+ * @end: end of the allowed range for this node
+ * @sflags: flags to fine-tune the allocation search
+ * @aflags: flags to fine-tune the allocation behavior
+ *
+ * The preallocated node must be cleared to 0.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no suitable hole.
*/
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment, unsigned long color,
+ unsigned long size, unsigned alignment,
+ unsigned long color,
unsigned long start, unsigned long end,
- enum drm_mm_search_flags flags)
+ enum drm_mm_search_flags sflags,
+ enum drm_mm_allocator_flags aflags)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
- start, end, flags);
+ start, end, sflags);
if (!hole_node)
return -ENOSPC;
drm_mm_insert_helper_range(hole_node, node,
size, alignment, color,
- start, end);
+ start, end, aflags);
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
/**
- * Remove a memory node from the allocator.
+ * drm_mm_remove_node - Remove a memory node from the allocator.
+ * @node: drm_mm_node to remove
+ *
+ * This just removes a node from its drm_mm allocator. The node does not need to
+ * be cleared again before it can be re-inserted into this or any other drm_mm
+ * allocator. It is a bug to call this function on a un-allocated node.
*/
void drm_mm_remove_node(struct drm_mm_node *node)
{
@@ -315,7 +421,10 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+ __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
+ flags & DRM_MM_SEARCH_BELOW) {
+ unsigned long hole_size = adj_end - adj_start;
+
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
@@ -328,9 +437,9 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
- if (entry->size < best_size) {
+ if (hole_size < best_size) {
best = entry;
- best_size = entry->size;
+ best_size = hole_size;
}
}
@@ -356,7 +465,10 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
best = NULL;
best_size = ~0UL;
- drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+ __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
+ flags & DRM_MM_SEARCH_BELOW) {
+ unsigned long hole_size = adj_end - adj_start;
+
if (adj_start < start)
adj_start = start;
if (adj_end > end)
@@ -374,9 +486,9 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
- if (entry->size < best_size) {
+ if (hole_size < best_size) {
best = entry;
- best_size = entry->size;
+ best_size = hole_size;
}
}
@@ -384,7 +496,13 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
}
/**
- * Moves an allocation. To be used with embedded struct drm_mm_node.
+ * drm_mm_replace_node - move an allocation from @old to @new
+ * @old: drm_mm_node to remove from the allocator
+ * @new: drm_mm_node which should inherit @old's allocation
+ *
+ * This is useful for when drivers embed the drm_mm_node structure and hence
+ * can't move allocations by reassigning pointers. It's a combination of remove
+ * and insert with the guarantee that the allocation start will match.
*/
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
@@ -402,12 +520,46 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
EXPORT_SYMBOL(drm_mm_replace_node);
/**
- * Initializa lru scanning.
+ * DOC: lru scan roaster
+ *
+ * Very often GPUs need to have continuous allocations for a given object. When
+ * evicting objects to make space for a new one it is therefore not most
+ * efficient when we simply start to select all objects from the tail of an LRU
+ * until there's a suitable hole: Especially for big objects or nodes that
+ * otherwise have special allocation constraints there's a good chance we evict
+ * lots of (smaller) objects unecessarily.
+ *
+ * The DRM range allocator supports this use-case through the scanning
+ * interfaces. First a scan operation needs to be initialized with
+ * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
+ * objects to the roaster (probably by walking an LRU list, but this can be
+ * freely implemented) until a suitable hole is found or there's no further
+ * evitable object.
+ *
+ * The the driver must walk through all objects again in exactly the reverse
+ * order to restore the allocator state. Note that while the allocator is used
+ * in the scan mode no other operation is allowed.
+ *
+ * Finally the driver evicts all objects selected in the scan. Adding and
+ * removing an object is O(1), and since freeing a node is also O(1) the overall
+ * complexity is O(scanned_objects). So like the free stack which needs to be
+ * walked before a scan operation even begins this is linear in the number of
+ * objects. It doesn't seem to hurt badly.
+ */
+
+/**
+ * drm_mm_init_scan - initialize lru scanning
+ * @mm: drm_mm to scan
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for the allocation
*
* This simply sets up the scanning routines with the parameters for the desired
- * hole.
+ * hole. Note that there's no need to specify allocation flags, since they only
+ * change the place a node is allocated from within a suitable hole.
*
- * Warning: As long as the scan list is non-empty, no other operations than
+ * Warning:
+ * As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan(struct drm_mm *mm,
@@ -427,12 +579,20 @@ void drm_mm_init_scan(struct drm_mm *mm,
EXPORT_SYMBOL(drm_mm_init_scan);
/**
- * Initializa lru scanning.
+ * drm_mm_init_scan - initialize range-restricted lru scanning
+ * @mm: drm_mm to scan
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for the allocation
+ * @start: start of the allowed range for the allocation
+ * @end: end of the allowed range for the allocation
*
* This simply sets up the scanning routines with the parameters for the desired
- * hole. This version is for range-restricted scans.
+ * hole. Note that there's no need to specify allocation flags, since they only
+ * change the place a node is allocated from within a suitable hole.
*
- * Warning: As long as the scan list is non-empty, no other operations than
+ * Warning:
+ * As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan_with_range(struct drm_mm *mm,
@@ -456,12 +616,16 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
/**
+ * drm_mm_scan_add_block - add a node to the scan list
+ * @node: drm_mm_node to add
+ *
* Add a node to the scan list that might be freed to make space for the desired
* hole.
*
- * Returns non-zero, if a hole has been found, zero otherwise.
+ * Returns:
+ * True if a hole has been found, false otherwise.
*/
-int drm_mm_scan_add_block(struct drm_mm_node *node)
+bool drm_mm_scan_add_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
@@ -501,15 +665,16 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
mm->scan_hit_end = hole_end;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
EXPORT_SYMBOL(drm_mm_scan_add_block);
/**
- * Remove a node from the scan list.
+ * drm_mm_scan_remove_block - remove a node from the scan list
+ * @node: drm_mm_node to remove
*
* Nodes _must_ be removed in the exact same order from the scan list as they
* have been added, otherwise the internal state of the memory manager will be
@@ -519,10 +684,11 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
* immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
* return the just freed block (because its at the top of the free_stack list).
*
- * Returns one if this block should be evicted, zero otherwise. Will always
- * return zero when no hole has been found.
+ * Returns:
+ * True if this block should be evicted, false otherwise. Will always
+ * return false when no hole has been found.
*/
-int drm_mm_scan_remove_block(struct drm_mm_node *node)
+bool drm_mm_scan_remove_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
@@ -543,7 +709,15 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);
-int drm_mm_clean(struct drm_mm * mm)
+/**
+ * drm_mm_clean - checks whether an allocator is clean
+ * @mm: drm_mm allocator to check
+ *
+ * Returns:
+ * True if the allocator is completely free, false if there's still a node
+ * allocated in it.
+ */
+bool drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->head_node.node_list;
@@ -551,6 +725,14 @@ int drm_mm_clean(struct drm_mm * mm)
}
EXPORT_SYMBOL(drm_mm_clean);
+/**
+ * drm_mm_init - initialize a drm-mm allocator
+ * @mm: the drm_mm structure to initialize
+ * @start: start of the range managed by @mm
+ * @size: end of the range managed by @mm
+ *
+ * Note that @mm must be cleared to 0 before calling this function.
+ */
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->hole_stack);
@@ -572,6 +754,13 @@ void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
}
EXPORT_SYMBOL(drm_mm_init);
+/**
+ * drm_mm_takedown - clean up a drm_mm allocator
+ * @mm: drm_mm allocator to clean up
+ *
+ * Note that it is a bug to call this function on an allocator which is not
+ * clean.
+ */
void drm_mm_takedown(struct drm_mm * mm)
{
WARN(!list_empty(&mm->head_node.node_list),
@@ -597,6 +786,11 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
return 0;
}
+/**
+ * drm_mm_debug_table - dump allocator state to dmesg
+ * @mm: drm_mm allocator to dump
+ * @prefix: prefix to use for dumping to dmesg
+ */
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
@@ -635,6 +829,11 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en
return 0;
}
+/**
+ * drm_mm_dump_table - dump allocator state to a seq_file
+ * @m: seq_file to dump to
+ * @mm: drm_mm allocator to dump
+ */
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
struct drm_mm_node *entry;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index b0733153dfd2..8b410576fce4 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -37,15 +37,14 @@
#include <drm/drm_crtc.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_modes.h>
+
+#include "drm_crtc_internal.h"
/**
- * drm_mode_debug_printmodeline - debug print a mode
- * @dev: DRM device
+ * drm_mode_debug_printmodeline - print a mode to dmesg
* @mode: mode to print
*
- * LOCKING:
- * None.
- *
* Describe @mode using DRM_DEBUG.
*/
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
@@ -61,18 +60,77 @@ void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
/**
- * drm_cvt_mode -create a modeline based on CVT algorithm
+ * drm_mode_create - create a new display mode
* @dev: DRM device
- * @hdisplay: hdisplay size
- * @vdisplay: vdisplay size
- * @vrefresh : vrefresh rate
- * @reduced : Whether the GTF calculation is simplified
- * @interlaced:Whether the interlace is supported
*
- * LOCKING:
- * none.
+ * Create a new, cleared drm_display_mode with kzalloc, allocate an ID for it
+ * and return it.
*
- * return the modeline based on CVT algorithm
+ * Returns:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+ struct drm_display_mode *nmode;
+
+ nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+ if (!nmode)
+ return NULL;
+
+ if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
+ kfree(nmode);
+ return NULL;
+ }
+
+ return nmode;
+}
+EXPORT_SYMBOL(drm_mode_create);
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ * Release @mode's unique ID, then free it @mode structure itself using kfree.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ if (!mode)
+ return;
+
+ drm_mode_object_put(dev, &mode->base);
+
+ kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_destroy);
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed_mode list
+ * @connector: connector the new mode
+ * @mode: mode data
+ *
+ * Add @mode to @connector's probed_mode list for later use. This list should
+ * then in a second step get filtered and all the modes actually supported by
+ * the hardware moved to the @connector's modes list.
+ */
+void drm_mode_probed_add(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
+
+ list_add_tail(&mode->head, &connector->probed_modes);
+}
+EXPORT_SYMBOL(drm_mode_probed_add);
+
+/**
+ * drm_cvt_mode -create a modeline based on the CVT algorithm
+ * @dev: drm device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh: vrefresh rate
+ * @reduced: whether to use reduced blanking
+ * @interlaced: whether to compute an interlaced mode
+ * @margins: whether to add margins (borders)
*
* This function is called to generate the modeline based on CVT algorithm
* according to the hdisplay, vdisplay, vrefresh.
@@ -82,12 +140,17 @@ EXPORT_SYMBOL(drm_mode_debug_printmodeline);
*
* And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
* What I have done is to translate it by using integer calculation.
+ *
+ * Returns:
+ * The modeline based on the CVT algorithm stored in a drm_display_mode object.
+ * The display mode object is allocated with drm_mode_create(). Returns NULL
+ * when no mode could be allocated.
*/
-#define HV_FACTOR 1000
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
int vdisplay, int vrefresh,
bool reduced, bool interlaced, bool margins)
{
+#define HV_FACTOR 1000
/* 1) top/bottom margin size (% of height) - default: 1.8, */
#define CVT_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
@@ -281,23 +344,25 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
EXPORT_SYMBOL(drm_cvt_mode);
/**
- * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
- *
- * @dev :drm device
- * @hdisplay :hdisplay size
- * @vdisplay :vdisplay size
- * @vrefresh :vrefresh rate.
- * @interlaced :whether the interlace is supported
- * @margins :desired margin size
- * @GTF_[MCKJ] :extended GTF formula parameters
- *
- * LOCKING.
- * none.
- *
- * return the modeline based on full GTF algorithm.
+ * drm_gtf_mode_complex - create the modeline based on the full GTF algorithm
+ * @dev: drm device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh: vrefresh rate.
+ * @interlaced: whether to compute an interlaced mode
+ * @margins: desired margin (borders) size
+ * @GTF_M: extended GTF formula parameters
+ * @GTF_2C: extended GTF formula parameters
+ * @GTF_K: extended GTF formula parameters
+ * @GTF_2J: extended GTF formula parameters
*
* GTF feature blocks specify C and J in multiples of 0.5, so we pass them
* in here multiplied by two. For a C of 40, pass in 80.
+ *
+ * Returns:
+ * The modeline based on the full GTF algorithm stored in a drm_display_mode object.
+ * The display mode object is allocated with drm_mode_create(). Returns NULL
+ * when no mode could be allocated.
*/
struct drm_display_mode *
drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
@@ -467,17 +532,13 @@ drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
EXPORT_SYMBOL(drm_gtf_mode_complex);
/**
- * drm_gtf_mode - create the modeline based on GTF algorithm
- *
- * @dev :drm device
- * @hdisplay :hdisplay size
- * @vdisplay :vdisplay size
- * @vrefresh :vrefresh rate.
- * @interlaced :whether the interlace is supported
- * @margins :whether the margin is supported
- *
- * LOCKING.
- * none.
+ * drm_gtf_mode - create the modeline based on the GTF algorithm
+ * @dev: drm device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh: vrefresh rate.
+ * @interlaced: whether to compute an interlaced mode
+ * @margins: desired margin (borders) size
*
* return the modeline based on GTF algorithm
*
@@ -496,19 +557,32 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
* C = 40
* K = 128
* J = 20
+ *
+ * Returns:
+ * The modeline based on the GTF algorithm stored in a drm_display_mode object.
+ * The display mode object is allocated with drm_mode_create(). Returns NULL
+ * when no mode could be allocated.
*/
struct drm_display_mode *
drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
- bool lace, int margins)
+ bool interlaced, int margins)
{
- return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
- margins, 600, 40 * 2, 128, 20 * 2);
+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh,
+ interlaced, margins,
+ 600, 40 * 2, 128, 20 * 2);
}
EXPORT_SYMBOL(drm_gtf_mode);
#ifdef CONFIG_VIDEOMODE_HELPERS
-int drm_display_mode_from_videomode(const struct videomode *vm,
- struct drm_display_mode *dmode)
+/**
+ * drm_display_mode_from_videomode - fill in @dmode using @vm,
+ * @vm: videomode structure to use as source
+ * @dmode: drm_display_mode structure to use as destination
+ *
+ * Fills out @dmode using the display mode specified in @vm.
+ */
+void drm_display_mode_from_videomode(const struct videomode *vm,
+ struct drm_display_mode *dmode)
{
dmode->hdisplay = vm->hactive;
dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
@@ -538,8 +612,6 @@ int drm_display_mode_from_videomode(const struct videomode *vm,
if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
dmode->flags |= DRM_MODE_FLAG_DBLCLK;
drm_mode_set_name(dmode);
-
- return 0;
}
EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
@@ -553,6 +625,9 @@ EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
* This function is expensive and should only be used, if only one mode is to be
* read from DT. To get multiple modes start with of_get_display_timings and
* work with that instead.
+ *
+ * Returns:
+ * 0 on success, a negative errno code when no of videomode node was found.
*/
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode, int index)
@@ -580,10 +655,8 @@ EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
*
- * LOCKING:
- * None.
- *
- * Set the name of @mode to a standard format.
+ * Set the name of @mode to a standard format which is <hdisplay>x<vdisplay>
+ * with an optional 'i' suffix for interlaced modes.
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
@@ -595,54 +668,12 @@ void drm_mode_set_name(struct drm_display_mode *mode)
}
EXPORT_SYMBOL(drm_mode_set_name);
-/**
- * drm_mode_width - get the width of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's width (hdisplay) value.
- *
- * FIXME: is this needed?
- *
- * RETURNS:
- * @mode->hdisplay
- */
-int drm_mode_width(const struct drm_display_mode *mode)
-{
- return mode->hdisplay;
-
-}
-EXPORT_SYMBOL(drm_mode_width);
-
-/**
- * drm_mode_height - get the height of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's height (vdisplay) value.
- *
- * FIXME: is this needed?
- *
- * RETURNS:
- * @mode->vdisplay
- */
-int drm_mode_height(const struct drm_display_mode *mode)
-{
- return mode->vdisplay;
-}
-EXPORT_SYMBOL(drm_mode_height);
-
/** drm_mode_hsync - get the hsync of a mode
* @mode: mode
*
- * LOCKING:
- * None.
- *
- * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ * Returns:
+ * @modes's hsync rate in kHz, rounded to the nearest integer. Calculates the
+ * value first if it is not yet set.
*/
int drm_mode_hsync(const struct drm_display_mode *mode)
{
@@ -666,17 +697,9 @@ EXPORT_SYMBOL(drm_mode_hsync);
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
*
- * LOCKING:
- * None.
- *
- * Return @mode's vrefresh rate in Hz or calculate it if necessary.
- *
- * FIXME: why is this needed? shouldn't vrefresh be set already?
- *
- * RETURNS:
- * Vertical refresh rate. It will be the result of actual value plus 0.5.
- * If it is 70.288, it will return 70Hz.
- * If it is 59.6, it will return 60Hz.
+ * Returns:
+ * @modes's vrefresh rate in Hz, rounded to the nearest integer. Calculates the
+ * value first if it is not yet set.
*/
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
@@ -705,14 +728,11 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
EXPORT_SYMBOL(drm_mode_vrefresh);
/**
- * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * drm_mode_set_crtcinfo - set CRTC modesetting timing parameters
* @p: mode
* @adjust_flags: a combination of adjustment flags
*
- * LOCKING:
- * None.
- *
- * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ * Setup the CRTC modesetting timing parameters for @p, adjusting if necessary.
*
* - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
* interlaced modes.
@@ -780,15 +800,11 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
}
EXPORT_SYMBOL(drm_mode_set_crtcinfo);
-
/**
* drm_mode_copy - copy the mode
* @dst: mode to overwrite
* @src: mode to copy
*
- * LOCKING:
- * None.
- *
* Copy an existing mode into another mode, preserving the object id and
* list head of the destination mode.
*/
@@ -805,13 +821,14 @@ EXPORT_SYMBOL(drm_mode_copy);
/**
* drm_mode_duplicate - allocate and duplicate an existing mode
- * @m: mode to duplicate
- *
- * LOCKING:
- * None.
+ * @dev: drm_device to allocate the duplicated mode for
+ * @mode: mode to duplicate
*
* Just allocate a new mode, copy the existing mode into it, and return
* a pointer to it. Used to create new instances of established modes.
+ *
+ * Returns:
+ * Pointer to duplicated mode on success, NULL on error.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -833,12 +850,9 @@ EXPORT_SYMBOL(drm_mode_duplicate);
* @mode1: first mode
* @mode2: second mode
*
- * LOCKING:
- * None.
- *
* Check to see if @mode1 and @mode2 are equivalent.
*
- * RETURNS:
+ * Returns:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
@@ -864,13 +878,10 @@ EXPORT_SYMBOL(drm_mode_equal);
* @mode1: first mode
* @mode2: second mode
*
- * LOCKING:
- * None.
- *
* Check to see if @mode1 and @mode2 are equivalent, but
* don't check the pixel clocks nor the stereo layout.
*
- * RETURNS:
+ * Returns:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
@@ -900,25 +911,19 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
* @mode_list: list of modes to check
* @maxX: maximum width
* @maxY: maximum height
- * @maxPitch: max pitch
*
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * The DRM device (@dev) has size and pitch limits. Here we validate the
- * modes we probed for @dev against those limits and set their status as
- * necessary.
+ * This function is a helper which can be used to validate modes against size
+ * limitations of the DRM device/connector. If a mode is too big its status
+ * memeber is updated with the appropriate validation failure code. The list
+ * itself is not changed.
*/
void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
- int maxX, int maxY, int maxPitch)
+ int maxX, int maxY)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, mode_list, head) {
- if (maxPitch > 0 && mode->hdisplay > maxPitch)
- mode->status = MODE_BAD_WIDTH;
-
if (maxX > 0 && mode->hdisplay > maxX)
mode->status = MODE_VIRTUAL_X;
@@ -934,12 +939,10 @@ EXPORT_SYMBOL(drm_mode_validate_size);
* @mode_list: list of modes to check
* @verbose: be verbose about it
*
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Once mode list generation is complete, a caller can use this routine to
- * remove invalid modes from a mode list. If any of the modes have a
- * status other than %MODE_OK, they are removed from @mode_list and freed.
+ * This helper function can be used to prune a display mode list after
+ * validation has been completed. All modes who's status is not MODE_OK will be
+ * removed from the list, and if @verbose the status code and mode name is also
+ * printed to dmesg.
*/
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose)
@@ -966,13 +969,10 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
* @lh_a: list_head for first mode
* @lh_b: list_head for second mode
*
- * LOCKING:
- * None.
- *
* Compare two modes, given by @lh_a and @lh_b, returning a value indicating
* which is better.
*
- * RETURNS:
+ * Returns:
* Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
* positive if @lh_b is better than @lh_a.
*/
@@ -1000,12 +1000,9 @@ static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head
/**
* drm_mode_sort - sort mode list
- * @mode_list: list to sort
+ * @mode_list: list of drm_display_mode structures to sort
*
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Sort @mode_list by favorability, putting good modes first.
+ * Sort @mode_list by favorability, moving good modes to the head of the list.
*/
void drm_mode_sort(struct list_head *mode_list)
{
@@ -1017,13 +1014,12 @@ EXPORT_SYMBOL(drm_mode_sort);
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
*
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
- * list and only adds different modes. All modes unverified after this point
- * will be removed by the prune invalid modes.
+ * list and only adds different/new modes.
+ *
+ * This is just a helper functions doesn't validate any modes itself and also
+ * doesn't prune any invalid modes. Callers need to do that themselves.
*/
void drm_mode_connector_list_update(struct drm_connector *connector)
{
@@ -1031,6 +1027,8 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
struct drm_display_mode *pmode, *pt;
int found_it;
+ WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
+
list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
head) {
found_it = 0;
@@ -1056,17 +1054,25 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
EXPORT_SYMBOL(drm_mode_connector_list_update);
/**
- * drm_mode_parse_command_line_for_connector - parse command line for connector
- * @mode_option - per connector mode option
- * @connector - connector to parse line for
+ * drm_mode_parse_command_line_for_connector - parse command line modeline for connector
+ * @mode_option: optional per connector mode option
+ * @connector: connector to parse modeline for
+ * @mode: preallocated drm_cmdline_mode structure to fill out
+ *
+ * This parses @mode_option command line modeline for modes and options to
+ * configure the connector. If @mode_option is NULL the default command line
+ * modeline in fb_mode_option will be parsed instead.
*
- * This parses the connector specific then generic command lines for
- * modes and options to configure the connector.
+ * This uses the same parameters as the fb modedb.c, except for an extra
+ * force-enable, force-enable-digital and force-disable bit at the end:
*
- * This uses the same parameters as the fb modedb.c, except for extra
* <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
*
- * enable/enable Digital/disable bit at the end
+ * The intermediate drm_cmdline_mode structure is required to store additional
+ * options from the command line modline like the force-enabel/disable flag.
+ *
+ * Returns:
+ * True if a valid modeline has been parsed, false otherwise.
*/
bool drm_mode_parse_command_line_for_connector(const char *mode_option,
struct drm_connector *connector,
@@ -1219,6 +1225,14 @@ done:
}
EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
+/**
+ * drm_mode_create_from_cmdline_mode - convert a command line modeline into a DRM display mode
+ * @dev: DRM device to create the new mode for
+ * @cmd: input command line modeline
+ *
+ * Returns:
+ * Pointer to converted mode on success, NULL on error.
+ */
struct drm_display_mode *
drm_mode_create_from_cmdline_mode(struct drm_device *dev,
struct drm_cmdline_mode *cmd)
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f7af69bcf3f4..9c696a5ad74d 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -351,7 +351,7 @@ err_agp:
drm_pci_agp_destroy(dev);
pci_disable_device(pdev);
err_free:
- drm_dev_free(dev);
+ drm_dev_unref(dev);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
new file mode 100644
index 000000000000..e768d35ff22e
--- /dev/null
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * DRM universal plane helper functions
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <drm/drmP.h>
+#include <drm/drm_rect.h>
+
+#define SUBPIXEL_MASK 0xffff
+
+/*
+ * This is the minimal list of formats that seem to be safe for modeset use
+ * with all current DRM drivers. Most hardware can actually support more
+ * formats than this and drivers may specify a more accurate list when
+ * creating the primary plane. However drivers that still call
+ * drm_plane_init() will use this minimal format list as the default.
+ */
+const static uint32_t safe_modeset_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+};
+
+/*
+ * Returns the connectors currently associated with a CRTC. This function
+ * should be called twice: once with a NULL connector list to retrieve
+ * the list size, and once with the properly allocated list to be filled in.
+ */
+static int get_connectors_for_crtc(struct drm_crtc *crtc,
+ struct drm_connector **connector_list,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ int count = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder && connector->encoder->crtc == crtc) {
+ if (connector_list != NULL && count < num_connectors)
+ *(connector_list++) = connector;
+
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * drm_primary_helper_update() - Helper for primary plane update
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @crtc_x: x offset of primary plane on crtc
+ * @crtc_y: y offset of primary plane on crtc
+ * @crtc_w: width of primary plane rectangle on crtc
+ * @crtc_h: height of primary plane rectangle on crtc
+ * @src_x: x offset of @fb for panning
+ * @src_y: y offset of @fb for panning
+ * @src_w: width of source rectangle in @fb
+ * @src_h: height of source rectangle in @fb
+ *
+ * Provides a default plane update handler for primary planes. This is handler
+ * is called in response to a userspace SetPlane operation on the plane with a
+ * non-NULL framebuffer. We call the driver's modeset handler to update the
+ * framebuffer.
+ *
+ * SetPlane() on a primary plane of a disabled CRTC is not supported, and will
+ * return an error.
+ *
+ * Note that we make some assumptions about hardware limitations that may not be
+ * true for all hardware --
+ * 1) Primary plane cannot be repositioned.
+ * 2) Primary plane cannot be scaled.
+ * 3) Primary plane must cover the entire CRTC.
+ * 4) Subpixel positioning is not supported.
+ * Drivers for hardware that don't have these restrictions can provide their
+ * own implementation rather than using this helper.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_mode_set set = {
+ .crtc = crtc,
+ .fb = fb,
+ .mode = &crtc->mode,
+ .x = src_x >> 16,
+ .y = src_y >> 16,
+ };
+ struct drm_rect dest = {
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ struct drm_rect clip = {
+ .x2 = crtc->mode.hdisplay,
+ .y2 = crtc->mode.vdisplay,
+ };
+ struct drm_connector **connector_list;
+ struct drm_framebuffer *tmpfb;
+ int num_connectors, ret;
+
+ if (!crtc->enabled) {
+ DRM_DEBUG_KMS("Cannot update primary plane of a disabled CRTC.\n");
+ return -EINVAL;
+ }
+
+ /* Disallow subpixel positioning */
+ if ((src_x | src_y | src_w | src_h) & SUBPIXEL_MASK) {
+ DRM_DEBUG_KMS("Primary plane does not support subpixel positioning\n");
+ return -EINVAL;
+ }
+
+ /* Primary planes are locked to their owning CRTC */
+ if (plane->possible_crtcs != drm_crtc_mask(crtc)) {
+ DRM_DEBUG_KMS("Cannot change primary plane CRTC\n");
+ return -EINVAL;
+ }
+
+ /* Disallow scaling */
+ if (crtc_w != src_w || crtc_h != src_h) {
+ DRM_DEBUG_KMS("Can't scale primary plane\n");
+ return -EINVAL;
+ }
+
+ /* Make sure primary plane covers entire CRTC */
+ drm_rect_intersect(&dest, &clip);
+ if (dest.x1 != 0 || dest.y1 != 0 ||
+ dest.x2 != crtc->mode.hdisplay || dest.y2 != crtc->mode.vdisplay) {
+ DRM_DEBUG_KMS("Primary plane must cover entire CRTC\n");
+ return -EINVAL;
+ }
+
+ /* Framebuffer must be big enough to cover entire plane */
+ ret = drm_crtc_check_viewport(crtc, crtc_x, crtc_y, &crtc->mode, fb);
+ if (ret)
+ return ret;
+
+ /* Find current connectors for CRTC */
+ num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
+ BUG_ON(num_connectors == 0);
+ connector_list = kzalloc(num_connectors * sizeof(*connector_list),
+ GFP_KERNEL);
+ if (!connector_list)
+ return -ENOMEM;
+ get_connectors_for_crtc(crtc, connector_list, num_connectors);
+
+ set.connectors = connector_list;
+ set.num_connectors = num_connectors;
+
+ /*
+ * set_config() adjusts crtc->primary->fb; however the DRM setplane
+ * code that called us expects to handle the framebuffer update and
+ * reference counting; save and restore the current fb before
+ * calling it.
+ *
+ * N.B., we call set_config() directly here rather than using
+ * drm_mode_set_config_internal. We're reprogramming the same
+ * connectors that were already in use, so we shouldn't need the extra
+ * cross-CRTC fb refcounting to accomodate stealing connectors.
+ * drm_mode_setplane() already handles the basic refcounting for the
+ * framebuffers involved in this operation.
+ */
+ tmpfb = plane->fb;
+ ret = crtc->funcs->set_config(&set);
+ plane->fb = tmpfb;
+
+ kfree(connector_list);
+ return ret;
+}
+EXPORT_SYMBOL(drm_primary_helper_update);
+
+/**
+ * drm_primary_helper_disable() - Helper for primary plane disable
+ * @plane: plane to disable
+ *
+ * Provides a default plane disable handler for primary planes. This is handler
+ * is called in response to a userspace SetPlane operation on the plane with a
+ * NULL framebuffer parameter. We call the driver's modeset handler with a NULL
+ * framebuffer to disable the CRTC if no other planes are currently enabled.
+ * If other planes are still enabled on the same CRTC, we return -EBUSY.
+ *
+ * Note that some hardware may be able to disable the primary plane without
+ * disabling the whole CRTC. Drivers for such hardware should provide their
+ * own disable handler that disables just the primary plane (and they'll likely
+ * need to provide their own update handler as well to properly re-enable a
+ * disabled primary plane).
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_primary_helper_disable(struct drm_plane *plane)
+{
+ struct drm_plane *p;
+ struct drm_mode_set set = {
+ .crtc = plane->crtc,
+ .fb = NULL,
+ };
+
+ if (plane->crtc == NULL || plane->fb == NULL)
+ /* Already disabled */
+ return 0;
+
+ list_for_each_entry(p, &plane->dev->mode_config.plane_list, head)
+ if (p != plane && p->fb) {
+ DRM_DEBUG_KMS("Cannot disable primary plane while other planes are still active on CRTC.\n");
+ return -EBUSY;
+ }
+
+ /*
+ * N.B. We call set_config() directly here rather than
+ * drm_mode_set_config_internal() since drm_mode_setplane() already
+ * handles the basic refcounting and we don't need the special
+ * cross-CRTC refcounting (no chance of stealing connectors from
+ * other CRTC's with this update).
+ */
+ return plane->crtc->funcs->set_config(&set);
+}
+EXPORT_SYMBOL(drm_primary_helper_disable);
+
+/**
+ * drm_primary_helper_destroy() - Helper for primary plane destruction
+ * @plane: plane to destroy
+ *
+ * Provides a default plane destroy handler for primary planes. This handler
+ * is called during CRTC destruction. We disable the primary plane, remove
+ * it from the DRM plane list, and deallocate the plane structure.
+ */
+void drm_primary_helper_destroy(struct drm_plane *plane)
+{
+ plane->funcs->disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(plane);
+}
+EXPORT_SYMBOL(drm_primary_helper_destroy);
+
+const struct drm_plane_funcs drm_primary_helper_funcs = {
+ .update_plane = drm_primary_helper_update,
+ .disable_plane = drm_primary_helper_disable,
+ .destroy = drm_primary_helper_destroy,
+};
+EXPORT_SYMBOL(drm_primary_helper_funcs);
+
+/**
+ * drm_primary_helper_create_plane() - Create a generic primary plane
+ * @dev: drm device
+ * @formats: pixel formats supported, or NULL for a default safe list
+ * @num_formats: size of @formats; ignored if @formats is NULL
+ *
+ * Allocates and initializes a primary plane that can be used with the primary
+ * plane helpers. Drivers that wish to use driver-specific plane structures or
+ * provide custom handler functions may perform their own allocation and
+ * initialization rather than calling this function.
+ */
+struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
+ const uint32_t *formats,
+ int num_formats)
+{
+ struct drm_plane *primary;
+ int ret;
+
+ primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+ if (primary == NULL) {
+ DRM_DEBUG_KMS("Failed to allocate primary plane\n");
+ return NULL;
+ }
+
+ if (formats == NULL) {
+ formats = safe_modeset_formats;
+ num_formats = ARRAY_SIZE(safe_modeset_formats);
+ }
+
+ /* possible_crtc's will be filled in later by crtc_init */
+ ret = drm_plane_init(dev, primary, 0, &drm_primary_helper_funcs,
+ formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY);
+ if (ret) {
+ kfree(primary);
+ primary = NULL;
+ }
+
+ return primary;
+}
+EXPORT_SYMBOL(drm_primary_helper_create_plane);
+
+/**
+ * drm_crtc_init - Legacy CRTC initialization function
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * Initialize a CRTC object with a default helper-provided primary plane and no
+ * cursor plane.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs)
+{
+ struct drm_plane *primary;
+
+ primary = drm_primary_helper_create_plane(dev, NULL, 0);
+ return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
+}
+EXPORT_SYMBOL(drm_crtc_init);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 21fc82006b78..319ff5385601 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -64,7 +64,7 @@ static int drm_get_platform_dev(struct platform_device *platdev,
return 0;
err_free:
- drm_dev_free(dev);
+ drm_dev_unref(dev);
return ret;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 56805c39c906..304ca8cacbc4 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -68,7 +68,8 @@ struct drm_prime_attachment {
enum dma_data_direction dir;
};
-static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf, uint32_t handle)
{
struct drm_prime_member *member;
@@ -174,7 +175,7 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
}
static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
struct drm_prime_attachment *prime_attach = attach->priv;
struct drm_gem_object *obj = attach->dmabuf->priv;
@@ -211,11 +212,19 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
}
static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sgt, enum dma_data_direction dir)
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
{
/* nothing to be done here */
}
+/**
+ * drm_gem_dmabuf_release - dma_buf release implementation for GEM
+ * @dma_buf: buffer to be released
+ *
+ * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
+ * must use this in their dma_buf ops structure as the release callback.
+ */
void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
@@ -242,30 +251,30 @@ static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
}
static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
+ unsigned long page_num)
{
return NULL;
}
static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
+ unsigned long page_num, void *addr)
{
}
static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
- unsigned long page_num)
+ unsigned long page_num)
{
return NULL;
}
static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
+ unsigned long page_num, void *addr)
{
}
static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
- struct vm_area_struct *vma)
+ struct vm_area_struct *vma)
{
struct drm_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->dev;
@@ -315,6 +324,15 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
* driver's scatter/gather table
*/
+/**
+ * drm_gem_prime_export - helper library implemention of the export callback
+ * @dev: drm_device to export from
+ * @obj: GEM object to export
+ * @flags: flags like DRM_CLOEXEC
+ *
+ * This is the implementation of the gem_prime_export functions for GEM drivers
+ * using the PRIME helpers.
+ */
struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags)
{
@@ -355,9 +373,23 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
return dmabuf;
}
+/**
+ * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+ * @handle: buffer handle to export
+ * @flags: flags like DRM_CLOEXEC
+ * @prime_fd: pointer to storage for the fd id of the create dma-buf
+ *
+ * This is the PRIME export function which must be used mandatorily by GEM
+ * drivers to ensure correct lifetime management of the underlying GEM object.
+ * The actual exporting from GEM object to a dma-buf is done through the
+ * gem_prime_export driver callback.
+ */
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
- struct drm_file *file_priv, uint32_t handle, uint32_t flags,
- int *prime_fd)
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags,
+ int *prime_fd)
{
struct drm_gem_object *obj;
int ret = 0;
@@ -441,6 +473,14 @@ out_unlock:
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+/**
+ * drm_gem_prime_import - helper library implemention of the import callback
+ * @dev: drm_device to import into
+ * @dma_buf: dma-buf object to import
+ *
+ * This is the implementation of the gem_prime_import functions for GEM drivers
+ * using the PRIME helpers.
+ */
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
@@ -471,7 +511,7 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR_OR_NULL(sgt)) {
+ if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto fail_detach;
}
@@ -496,8 +536,21 @@ fail_detach:
}
EXPORT_SYMBOL(drm_gem_prime_import);
+/**
+ * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+ * @prime_fd: fd id of the dma-buf which should be imported
+ * @handle: pointer to storage for the handle of the imported buffer object
+ *
+ * This is the PRIME import function which must be used mandatorily by GEM
+ * drivers to ensure correct lifetime management of the underlying GEM object.
+ * The actual importing of GEM object from the dma-buf is done through the
+ * gem_import_export driver callback.
+ */
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd, uint32_t *handle)
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle)
{
struct dma_buf *dma_buf;
struct drm_gem_object *obj;
@@ -598,12 +651,14 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
args->fd, &args->handle);
}
-/*
- * drm_prime_pages_to_sg
+/**
+ * drm_prime_pages_to_sg - converts a page array into an sg list
+ * @pages: pointer to the array of page pointers to convert
+ * @nr_pages: length of the page vector
*
- * this helper creates an sg table object from a set of pages
+ * This helper creates an sg table object from a set of pages
* the driver is responsible for mapping the pages into the
- * importers address space
+ * importers address space for use with dma_buf itself.
*/
struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
{
@@ -628,9 +683,16 @@ out:
}
EXPORT_SYMBOL(drm_prime_pages_to_sg);
-/* export an sg table into an array of pages and addresses
- this is currently required by the TTM driver in order to do correct fault
- handling */
+/**
+ * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
+ * @sgt: scatter-gather table to convert
+ * @pages: array of page pointers to store the page array in
+ * @addrs: optional array to store the dma bus address of each page
+ * @max_pages: size of both the passed-in arrays
+ *
+ * Exports an sg table into an array of pages and addresses. This is currently
+ * required by the TTM driver in order to do correct fault handling.
+ */
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_pages)
{
@@ -663,7 +725,15 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
return 0;
}
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
-/* helper function to cleanup a GEM/prime object */
+
+/**
+ * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
+ * @obj: GEM object which was created from a dma-buf
+ * @sg: the sg-table which was pinned at import time
+ *
+ * This is the cleanup functions which GEM drivers need to call when they use
+ * @drm_gem_prime_import to import dma-bufs.
+ */
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
{
struct dma_buf_attachment *attach;
@@ -683,11 +753,9 @@ void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
INIT_LIST_HEAD(&prime_fpriv->head);
mutex_init(&prime_fpriv->lock);
}
-EXPORT_SYMBOL(drm_prime_init_file_private);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
{
/* by now drm_gem_release should've made sure the list is empty */
WARN_ON(!list_empty(&prime_fpriv->head));
}
-EXPORT_SYMBOL(drm_prime_destroy_file_private);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 98a33c580ca1..4c24c3ac1efa 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -31,8 +31,10 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/fs.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mount.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_core.h>
@@ -43,6 +45,10 @@ EXPORT_SYMBOL(drm_debug);
unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
EXPORT_SYMBOL(drm_rnodes);
+/* 1 to allow user space to request universal planes (experimental) */
+unsigned int drm_universal_planes = 0;
+EXPORT_SYMBOL(drm_universal_planes);
+
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
EXPORT_SYMBOL(drm_vblank_offdelay);
@@ -66,10 +72,12 @@ MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(rnodes, drm_rnodes, int, 0600);
+module_param_named(universal_planes, drm_universal_planes, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
+static DEFINE_SPINLOCK(drm_minor_lock);
struct idr drm_minors_idr;
struct class *drm_class;
@@ -94,48 +102,20 @@ int drm_err(const char *func, const char *format, ...)
}
EXPORT_SYMBOL(drm_err);
-void drm_ut_debug_printk(unsigned int request_level,
- const char *prefix,
- const char *function_name,
- const char *format, ...)
+void drm_ut_debug_printk(const char *function_name, const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (drm_debug & request_level) {
- va_start(args, format);
- vaf.fmt = format;
- vaf.va = &args;
-
- if (function_name)
- printk(KERN_DEBUG "[%s:%s], %pV", prefix,
- function_name, &vaf);
- else
- printk(KERN_DEBUG "%pV", &vaf);
- va_end(args);
- }
-}
-EXPORT_SYMBOL(drm_ut_debug_printk);
-
-static int drm_minor_get_id(struct drm_device *dev, int type)
-{
- int ret;
- int base = 0, limit = 63;
-
- if (type == DRM_MINOR_CONTROL) {
- base += 64;
- limit = base + 63;
- } else if (type == DRM_MINOR_RENDER) {
- base += 128;
- limit = base + 63;
- }
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
- mutex_lock(&dev->struct_mutex);
- ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
- mutex_unlock(&dev->struct_mutex);
+ printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
- return ret == -ENOSPC ? -EINVAL : ret;
+ va_end(args);
}
+EXPORT_SYMBOL(drm_ut_debug_printk);
struct drm_master *drm_master_create(struct drm_minor *minor)
{
@@ -152,8 +132,6 @@ struct drm_master *drm_master_create(struct drm_minor *minor)
INIT_LIST_HEAD(&master->magicfree);
master->minor = minor;
- list_add_tail(&master->head, &minor->master_list);
-
return master;
}
@@ -171,8 +149,7 @@ static void drm_master_destroy(struct kref *kref)
struct drm_device *dev = master->minor->dev;
struct drm_map_list *r_list, *list_temp;
- list_del(&master->head);
-
+ mutex_lock(&dev->struct_mutex);
if (dev->driver->master_destroy)
dev->driver->master_destroy(dev, master);
@@ -200,6 +177,7 @@ static void drm_master_destroy(struct kref *kref)
drm_ht_remove(&master->magiclist);
+ mutex_unlock(&dev->struct_mutex);
kfree(master);
}
@@ -215,19 +193,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
{
int ret = 0;
+ mutex_lock(&dev->master_mutex);
if (file_priv->is_master)
- return 0;
-
- if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
- return -EINVAL;
+ goto out_unlock;
- if (!file_priv->master)
- return -EINVAL;
+ if (file_priv->minor->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
- if (file_priv->minor->master)
- return -EINVAL;
+ if (!file_priv->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
- mutex_lock(&dev->struct_mutex);
file_priv->minor->master = drm_master_get(file_priv->master);
file_priv->is_master = 1;
if (dev->driver->master_set) {
@@ -237,142 +216,211 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
drm_master_put(&file_priv->minor->master);
}
}
- mutex_unlock(&dev->struct_mutex);
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
return ret;
}
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ int ret = -EINVAL;
+
+ mutex_lock(&dev->master_mutex);
if (!file_priv->is_master)
- return -EINVAL;
+ goto out_unlock;
if (!file_priv->minor->master)
- return -EINVAL;
+ goto out_unlock;
- mutex_lock(&dev->struct_mutex);
+ ret = 0;
if (dev->driver->master_drop)
dev->driver->master_drop(dev, file_priv, false);
drm_master_put(&file_priv->minor->master);
file_priv->is_master = 0;
- mutex_unlock(&dev->struct_mutex);
- return 0;
+
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
}
-/**
- * drm_get_minor - Allocate and register new DRM minor
- * @dev: DRM device
- * @minor: Pointer to where new minor is stored
- * @type: Type of minor
- *
- * Allocate a new minor of the given type and register it. A pointer to the new
- * minor is returned in @minor.
- * Caller must hold the global DRM mutex.
+/*
+ * DRM Minors
+ * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
+ * of them is represented by a drm_minor object. Depending on the capabilities
+ * of the device-driver, different interfaces are registered.
*
- * RETURNS:
- * 0 on success, negative error code on failure.
+ * Minors can be accessed via dev->$minor_name. This pointer is either
+ * NULL or a valid drm_minor pointer and stays valid as long as the device is
+ * valid. This means, DRM minors have the same life-time as the underlying
+ * device. However, this doesn't mean that the minor is active. Minors are
+ * registered and unregistered dynamically according to device-state.
*/
-static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor,
- int type)
+
+static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
+ unsigned int type)
+{
+ switch (type) {
+ case DRM_MINOR_LEGACY:
+ return &dev->primary;
+ case DRM_MINOR_RENDER:
+ return &dev->render;
+ case DRM_MINOR_CONTROL:
+ return &dev->control;
+ default:
+ return NULL;
+ }
+}
+
+static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *minor;
+
+ minor = kzalloc(sizeof(*minor), GFP_KERNEL);
+ if (!minor)
+ return -ENOMEM;
+
+ minor->type = type;
+ minor->dev = dev;
+
+ *drm_minor_get_slot(dev, type) = minor;
+ return 0;
+}
+
+static void drm_minor_free(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor **slot;
+
+ slot = drm_minor_get_slot(dev, type);
+ if (*slot) {
+ kfree(*slot);
+ *slot = NULL;
+ }
+}
+
+static int drm_minor_register(struct drm_device *dev, unsigned int type)
{
struct drm_minor *new_minor;
+ unsigned long flags;
int ret;
int minor_id;
DRM_DEBUG("\n");
- minor_id = drm_minor_get_id(dev, type);
+ new_minor = *drm_minor_get_slot(dev, type);
+ if (!new_minor)
+ return 0;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ minor_id = idr_alloc(&drm_minors_idr,
+ NULL,
+ 64 * type,
+ 64 * (type + 1),
+ GFP_NOWAIT);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+ idr_preload_end();
+
if (minor_id < 0)
return minor_id;
- new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
- if (!new_minor) {
- ret = -ENOMEM;
- goto err_idr;
- }
-
- new_minor->type = type;
- new_minor->device = MKDEV(DRM_MAJOR, minor_id);
- new_minor->dev = dev;
new_minor->index = minor_id;
- INIT_LIST_HEAD(&new_minor->master_list);
-
- idr_replace(&drm_minors_idr, new_minor, minor_id);
-#if defined(CONFIG_DEBUG_FS)
ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- goto err_mem;
+ goto err_id;
}
-#endif
ret = drm_sysfs_device_add(new_minor);
if (ret) {
- printk(KERN_ERR
- "DRM: Error sysfs_device_add.\n");
+ DRM_ERROR("DRM: Error sysfs_device_add.\n");
goto err_debugfs;
}
- *minor = new_minor;
+
+ /* replace NULL with @minor so lookups will succeed from now on */
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_replace(&drm_minors_idr, new_minor, new_minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
DRM_DEBUG("new minor assigned %d\n", minor_id);
return 0;
-
err_debugfs:
-#if defined(CONFIG_DEBUG_FS)
drm_debugfs_cleanup(new_minor);
-err_mem:
-#endif
- kfree(new_minor);
-err_idr:
+err_id:
+ spin_lock_irqsave(&drm_minor_lock, flags);
idr_remove(&drm_minors_idr, minor_id);
- *minor = NULL;
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+ new_minor->index = 0;
return ret;
}
-/**
- * drm_unplug_minor - Unplug DRM minor
- * @minor: Minor to unplug
- *
- * Unplugs the given DRM minor but keeps the object. So after this returns,
- * minor->dev is still valid so existing open-files can still access it to get
- * device information from their drm_file ojects.
- * If the minor is already unplugged or if @minor is NULL, nothing is done.
- * The global DRM mutex must be held by the caller.
- */
-static void drm_unplug_minor(struct drm_minor *minor)
+static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
{
+ struct drm_minor *minor;
+ unsigned long flags;
+
+ minor = *drm_minor_get_slot(dev, type);
if (!minor || !minor->kdev)
return;
-#if defined(CONFIG_DEBUG_FS)
- drm_debugfs_cleanup(minor);
-#endif
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_remove(&drm_minors_idr, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+ minor->index = 0;
+ drm_debugfs_cleanup(minor);
drm_sysfs_device_remove(minor);
- idr_remove(&drm_minors_idr, minor->index);
}
/**
- * drm_put_minor - Destroy DRM minor
- * @minor: Minor to destroy
+ * drm_minor_acquire - Acquire a DRM minor
+ * @minor_id: Minor ID of the DRM-minor
+ *
+ * Looks up the given minor-ID and returns the respective DRM-minor object. The
+ * refence-count of the underlying device is increased so you must release this
+ * object with drm_minor_release().
*
- * This calls drm_unplug_minor() on the given minor and then frees it. Nothing
- * is done if @minor is NULL. It is fine to call this on already unplugged
- * minors.
- * The global DRM mutex must be held by the caller.
+ * As long as you hold this minor, it is guaranteed that the object and the
+ * minor->dev pointer will stay valid! However, the device may get unplugged and
+ * unregistered while you hold the minor.
+ *
+ * Returns:
+ * Pointer to minor-object with increased device-refcount, or PTR_ERR on
+ * failure.
*/
-static void drm_put_minor(struct drm_minor *minor)
+struct drm_minor *drm_minor_acquire(unsigned int minor_id)
{
- if (!minor)
- return;
+ struct drm_minor *minor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (minor)
+ drm_dev_ref(minor->dev);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ if (!minor) {
+ return ERR_PTR(-ENODEV);
+ } else if (drm_device_is_unplugged(minor->dev)) {
+ drm_dev_unref(minor->dev);
+ return ERR_PTR(-ENODEV);
+ }
- DRM_DEBUG("release secondary minor %d\n", minor->index);
+ return minor;
+}
- drm_unplug_minor(minor);
- kfree(minor);
+/**
+ * drm_minor_release - Release DRM minor
+ * @minor: Pointer to DRM minor object
+ *
+ * Release a minor that was previously acquired via drm_minor_acquire().
+ */
+void drm_minor_release(struct drm_minor *minor)
+{
+ drm_dev_unref(minor->dev);
}
/**
@@ -392,18 +440,16 @@ void drm_put_dev(struct drm_device *dev)
}
drm_dev_unregister(dev);
- drm_dev_free(dev);
+ drm_dev_unref(dev);
}
EXPORT_SYMBOL(drm_put_dev);
void drm_unplug_dev(struct drm_device *dev)
{
/* for a USB device */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_unplug_minor(dev->control);
- if (dev->render)
- drm_unplug_minor(dev->render);
- drm_unplug_minor(dev->primary);
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
mutex_lock(&drm_global_mutex);
@@ -416,6 +462,78 @@ void drm_unplug_dev(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_unplug_dev);
+/*
+ * DRM internal mount
+ * We want to be able to allocate our own "struct address_space" to control
+ * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
+ * stand-alone address_space objects, so we need an underlying inode. As there
+ * is no way to allocate an independent inode easily, we need a fake internal
+ * VFS mount-point.
+ *
+ * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
+ * frees it again. You are allowed to use iget() and iput() to get references to
+ * the inode. But each drm_fs_inode_new() call must be paired with exactly one
+ * drm_fs_inode_free() call (which does not have to be the last iput()).
+ * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
+ * between multiple inode-users. You could, technically, call
+ * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
+ * iput(), but this way you'd end up with a new vfsmount for each inode.
+ */
+
+static int drm_fs_cnt;
+static struct vfsmount *drm_fs_mnt;
+
+static const struct dentry_operations drm_fs_dops = {
+ .d_dname = simple_dname,
+};
+
+static const struct super_operations drm_fs_sops = {
+ .statfs = simple_statfs,
+};
+
+static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type,
+ "drm:",
+ &drm_fs_sops,
+ &drm_fs_dops,
+ 0x010203ff);
+}
+
+static struct file_system_type drm_fs_type = {
+ .name = "drm",
+ .owner = THIS_MODULE,
+ .mount = drm_fs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+static struct inode *drm_fs_inode_new(void)
+{
+ struct inode *inode;
+ int r;
+
+ r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
+ if (r < 0) {
+ DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
+ return ERR_PTR(r);
+ }
+
+ inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
+ if (IS_ERR(inode))
+ simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
+
+ return inode;
+}
+
+static void drm_fs_inode_free(struct inode *inode)
+{
+ if (inode) {
+ iput(inode);
+ simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
+ }
+}
+
/**
* drm_dev_alloc - Allocate new drm device
* @driver: DRM driver to allocate device for
@@ -425,6 +543,9 @@ EXPORT_SYMBOL(drm_unplug_dev);
* Call drm_dev_register() to advertice the device to user space and register it
* with other core subsystems.
*
+ * The initial ref-count of the object is 1. Use drm_dev_ref() and
+ * drm_dev_unref() to take and drop further ref-counts.
+ *
* RETURNS:
* Pointer to new DRM device, or NULL if out of memory.
*/
@@ -438,6 +559,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
if (!dev)
return NULL;
+ kref_init(&dev->ref);
dev->dev = parent;
dev->driver = driver;
@@ -451,9 +573,33 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
+ mutex_init(&dev->master_mutex);
- if (drm_ht_create(&dev->map_hash, 12))
+ dev->anon_inode = drm_fs_inode_new();
+ if (IS_ERR(dev->anon_inode)) {
+ ret = PTR_ERR(dev->anon_inode);
+ DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
goto err_free;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_minors;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+ ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_minors;
+ }
+
+ ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_minors;
+
+ if (drm_ht_create(&dev->map_hash, 12))
+ goto err_minors;
ret = drm_ctxbitmap_init(dev);
if (ret) {
@@ -475,38 +621,71 @@ err_ctxbitmap:
drm_ctxbitmap_cleanup(dev);
err_ht:
drm_ht_remove(&dev->map_hash);
+err_minors:
+ drm_minor_free(dev, DRM_MINOR_LEGACY);
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+ drm_minor_free(dev, DRM_MINOR_CONTROL);
+ drm_fs_inode_free(dev->anon_inode);
err_free:
+ mutex_destroy(&dev->master_mutex);
kfree(dev);
return NULL;
}
EXPORT_SYMBOL(drm_dev_alloc);
-/**
- * drm_dev_free - Free DRM device
- * @dev: DRM device to free
- *
- * Free a DRM device that has previously been allocated via drm_dev_alloc().
- * You must not use kfree() instead or you will leak memory.
- *
- * This must not be called once the device got registered. Use drm_put_dev()
- * instead, which then calls drm_dev_free().
- */
-void drm_dev_free(struct drm_device *dev)
+static void drm_dev_release(struct kref *ref)
{
- drm_put_minor(dev->control);
- drm_put_minor(dev->render);
- drm_put_minor(dev->primary);
+ struct drm_device *dev = container_of(ref, struct drm_device, ref);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
drm_ctxbitmap_cleanup(dev);
drm_ht_remove(&dev->map_hash);
+ drm_fs_inode_free(dev->anon_inode);
+
+ drm_minor_free(dev, DRM_MINOR_LEGACY);
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+ drm_minor_free(dev, DRM_MINOR_CONTROL);
kfree(dev->devname);
+
+ mutex_destroy(&dev->master_mutex);
kfree(dev);
}
-EXPORT_SYMBOL(drm_dev_free);
+
+/**
+ * drm_dev_ref - Take reference of a DRM device
+ * @dev: device to take reference of or NULL
+ *
+ * This increases the ref-count of @dev by one. You *must* already own a
+ * reference when calling this. Use drm_dev_unref() to drop this reference
+ * again.
+ *
+ * This function never fails. However, this function does not provide *any*
+ * guarantee whether the device is alive or running. It only provides a
+ * reference to the object and the memory associated with it.
+ */
+void drm_dev_ref(struct drm_device *dev)
+{
+ if (dev)
+ kref_get(&dev->ref);
+}
+EXPORT_SYMBOL(drm_dev_ref);
+
+/**
+ * drm_dev_unref - Drop reference of a DRM device
+ * @dev: device to drop reference of or NULL
+ *
+ * This decreases the ref-count of @dev by one. The device is destroyed if the
+ * ref-count drops to zero.
+ */
+void drm_dev_unref(struct drm_device *dev)
+{
+ if (dev)
+ kref_put(&dev->ref, drm_dev_release);
+}
+EXPORT_SYMBOL(drm_dev_unref);
/**
* drm_dev_register - Register DRM device
@@ -527,26 +706,22 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
mutex_lock(&drm_global_mutex);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto out_unlock;
- }
+ ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_minors;
- if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
- ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
- if (ret)
- goto err_control_node;
- }
+ ret = drm_minor_register(dev, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_minors;
- ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
if (ret)
- goto err_render_node;
+ goto err_minors;
if (dev->driver->load) {
ret = dev->driver->load(dev, flags);
if (ret)
- goto err_primary_node;
+ goto err_minors;
}
/* setup grouping for legacy outputs */
@@ -563,12 +738,10 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
err_unload:
if (dev->driver->unload)
dev->driver->unload(dev);
-err_primary_node:
- drm_unplug_minor(dev->primary);
-err_render_node:
- drm_unplug_minor(dev->render);
-err_control_node:
- drm_unplug_minor(dev->control);
+err_minors:
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
out_unlock:
mutex_unlock(&drm_global_mutex);
return ret;
@@ -581,7 +754,7 @@ EXPORT_SYMBOL(drm_dev_register);
*
* Unregister the DRM device from the system. This does the reverse of
* drm_dev_register() but does not deallocate the device. The caller must call
- * drm_dev_free() to free all resources.
+ * drm_dev_unref() to drop their final reference.
*/
void drm_dev_unregister(struct drm_device *dev)
{
@@ -600,8 +773,8 @@ void drm_dev_unregister(struct drm_device *dev)
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_rmmap(dev, r_list->map);
- drm_unplug_minor(dev->control);
- drm_unplug_minor(dev->render);
- drm_unplug_minor(dev->primary);
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
}
EXPORT_SYMBOL(drm_dev_unregister);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 0f8cb1ae7607..c3406aad2944 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -30,7 +30,7 @@ int drm_get_usb_dev(struct usb_interface *interface,
return 0;
err_free:
- drm_dev_free(dev);
+ drm_dev_unref(dev);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 6e1a1a20cf6b..5bf5bca94f56 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -31,6 +31,30 @@ config DRM_EXYNOS_FIMD
help
Choose this option if you want to use Exynos FIMD for DRM.
+config DRM_EXYNOS_DPI
+ bool "EXYNOS DRM parallel output support"
+ depends on DRM_EXYNOS
+ select DRM_PANEL
+ default n
+ help
+ This enables support for Exynos parallel output.
+
+config DRM_EXYNOS_DSI
+ bool "EXYNOS DRM MIPI-DSI driver support"
+ depends on DRM_EXYNOS
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ default n
+ help
+ This enables support for Exynos MIPI-DSI device.
+
+config DRM_EXYNOS_DP
+ bool "EXYNOS DRM DP driver support"
+ depends on DRM_EXYNOS && ARCH_EXYNOS
+ default DRM_EXYNOS
+ help
+ This enables support for DP device.
+
config DRM_EXYNOS_HDMI
bool "Exynos DRM HDMI"
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 639b49e1ec05..33ae3652b8da 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -3,7 +3,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
-exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
+exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \
exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
@@ -11,9 +11,10 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
- exynos_ddc.o exynos_hdmiphy.o \
- exynos_drm_hdmi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DPI) += exynos_drm_dpi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DSI) += exynos_drm_dsi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DP) += exynos_dp_core.o exynos_dp_reg.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 5e1a71580051..aed533bbfd31 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -20,9 +19,25 @@
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/bridge/ptn3460.h>
+
+#include "exynos_drm_drv.h"
#include "exynos_dp_core.h"
+#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
+ connector)
+
+struct bridge_init {
+ struct i2c_client *client;
+ struct device_node *node;
+};
+
static int exynos_dp_init_dp(struct exynos_dp_device *dp)
{
exynos_dp_reset(dp);
@@ -893,6 +908,214 @@ static void exynos_dp_hotplug(struct work_struct *work)
dev_err(dp->dev, "unable to config video\n");
}
+static enum drm_connector_status exynos_dp_detect(
+ struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void exynos_dp_connector_destroy(struct drm_connector *connector)
+{
+}
+
+static struct drm_connector_funcs exynos_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = exynos_dp_detect,
+ .destroy = exynos_dp_connector_destroy,
+};
+
+static int exynos_dp_get_modes(struct drm_connector *connector)
+{
+ struct exynos_dp_device *dp = ctx_from_connector(connector);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_ERROR("failed to create a new display mode.\n");
+ return 0;
+ }
+
+ drm_display_mode_from_videomode(&dp->panel.vm, mode);
+ mode->width_mm = dp->panel.width_mm;
+ mode->height_mm = dp->panel.height_mm;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static int exynos_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_encoder *exynos_dp_best_encoder(
+ struct drm_connector *connector)
+{
+ struct exynos_dp_device *dp = ctx_from_connector(connector);
+
+ return dp->encoder;
+}
+
+static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
+ .get_modes = exynos_dp_get_modes,
+ .mode_valid = exynos_dp_mode_valid,
+ .best_encoder = exynos_dp_best_encoder,
+};
+
+static int exynos_dp_initialize(struct exynos_drm_display *display,
+ struct drm_device *drm_dev)
+{
+ struct exynos_dp_device *dp = display->ctx;
+
+ dp->drm_dev = drm_dev;
+
+ return 0;
+}
+
+static bool find_bridge(const char *compat, struct bridge_init *bridge)
+{
+ bridge->client = NULL;
+ bridge->node = of_find_compatible_node(NULL, NULL, compat);
+ if (!bridge->node)
+ return false;
+
+ bridge->client = of_find_i2c_device_by_node(bridge->node);
+ if (!bridge->client)
+ return false;
+
+ return true;
+}
+
+/* returns the number of bridges attached */
+static int exynos_drm_attach_lcd_bridge(struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ struct bridge_init bridge;
+ int ret;
+
+ if (find_bridge("nxp,ptn3460", &bridge)) {
+ ret = ptn3460_init(dev, encoder, bridge.client, bridge.node);
+ if (!ret)
+ return 1;
+ }
+ return 0;
+}
+
+static int exynos_dp_create_connector(struct exynos_drm_display *display,
+ struct drm_encoder *encoder)
+{
+ struct exynos_dp_device *dp = display->ctx;
+ struct drm_connector *connector = &dp->connector;
+ int ret;
+
+ dp->encoder = encoder;
+
+ /* Pre-empt DP connector creation if there's a bridge */
+ ret = exynos_drm_attach_lcd_bridge(dp->drm_dev, encoder);
+ if (ret)
+ return 0;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(dp->drm_dev, connector,
+ &exynos_dp_connector_funcs, DRM_MODE_CONNECTOR_eDP);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static void exynos_dp_phy_init(struct exynos_dp_device *dp)
+{
+ if (dp->phy) {
+ phy_power_on(dp->phy);
+ } else if (dp->phy_addr) {
+ u32 reg;
+
+ reg = __raw_readl(dp->phy_addr);
+ reg |= dp->enable_mask;
+ __raw_writel(reg, dp->phy_addr);
+ }
+}
+
+static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
+{
+ if (dp->phy) {
+ phy_power_off(dp->phy);
+ } else if (dp->phy_addr) {
+ u32 reg;
+
+ reg = __raw_readl(dp->phy_addr);
+ reg &= ~(dp->enable_mask);
+ __raw_writel(reg, dp->phy_addr);
+ }
+}
+
+static void exynos_dp_poweron(struct exynos_dp_device *dp)
+{
+ if (dp->dpms_mode == DRM_MODE_DPMS_ON)
+ return;
+
+ clk_prepare_enable(dp->clock);
+ exynos_dp_phy_init(dp);
+ exynos_dp_init_dp(dp);
+ enable_irq(dp->irq);
+}
+
+static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+{
+ if (dp->dpms_mode != DRM_MODE_DPMS_ON)
+ return;
+
+ disable_irq(dp->irq);
+ flush_work(&dp->hotplug_work);
+ exynos_dp_phy_exit(dp);
+ clk_disable_unprepare(dp->clock);
+}
+
+static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
+{
+ struct exynos_dp_device *dp = display->ctx;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ exynos_dp_poweron(dp);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ exynos_dp_poweroff(dp);
+ break;
+ default:
+ break;
+ };
+ dp->dpms_mode = mode;
+}
+
+static struct exynos_drm_display_ops exynos_dp_display_ops = {
+ .initialize = exynos_dp_initialize,
+ .create_connector = exynos_dp_create_connector,
+ .dpms = exynos_dp_dpms,
+};
+
+static struct exynos_drm_display exynos_dp_display = {
+ .type = EXYNOS_DISPLAY_TYPE_LCD,
+ .ops = &exynos_dp_display_ops,
+};
+
static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
{
struct device_node *dp_node = dev->of_node;
@@ -994,30 +1217,17 @@ err:
return ret;
}
-static void exynos_dp_phy_init(struct exynos_dp_device *dp)
-{
- if (dp->phy) {
- phy_power_on(dp->phy);
- } else if (dp->phy_addr) {
- u32 reg;
-
- reg = __raw_readl(dp->phy_addr);
- reg |= dp->enable_mask;
- __raw_writel(reg, dp->phy_addr);
- }
-}
-
-static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
+static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
{
- if (dp->phy) {
- phy_power_off(dp->phy);
- } else if (dp->phy_addr) {
- u32 reg;
+ int ret;
- reg = __raw_readl(dp->phy_addr);
- reg &= ~(dp->enable_mask);
- __raw_writel(reg, dp->phy_addr);
+ ret = of_get_videomode(dp->dev->of_node, &dp->panel.vm,
+ OF_USE_NATIVE_MODE);
+ if (ret) {
+ DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
+ return ret;
}
+ return 0;
}
static int exynos_dp_probe(struct platform_device *pdev)
@@ -1035,6 +1245,7 @@ static int exynos_dp_probe(struct platform_device *pdev)
}
dp->dev = &pdev->dev;
+ dp->dpms_mode = DRM_MODE_DPMS_OFF;
dp->video_info = exynos_dp_dt_parse_pdata(&pdev->dev);
if (IS_ERR(dp->video_info))
@@ -1044,6 +1255,10 @@ static int exynos_dp_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = exynos_dp_dt_parse_panel(dp);
+ if (ret)
+ return ret;
+
dp->clock = devm_clk_get(&pdev->dev, "dp");
if (IS_ERR(dp->clock)) {
dev_err(&pdev->dev, "failed to get clock\n");
@@ -1076,22 +1291,22 @@ static int exynos_dp_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to request irq\n");
return ret;
}
+ disable_irq(dp->irq);
+
+ exynos_dp_display.ctx = dp;
- platform_set_drvdata(pdev, dp);
+ platform_set_drvdata(pdev, &exynos_dp_display);
+ exynos_drm_display_register(&exynos_dp_display);
return 0;
}
static int exynos_dp_remove(struct platform_device *pdev)
{
- struct exynos_dp_device *dp = platform_get_drvdata(pdev);
-
- flush_work(&dp->hotplug_work);
-
- exynos_dp_phy_exit(dp);
-
- clk_disable_unprepare(dp->clock);
+ struct exynos_drm_display *display = platform_get_drvdata(pdev);
+ exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
+ exynos_drm_display_unregister(&exynos_dp_display);
return 0;
}
@@ -1099,31 +1314,19 @@ static int exynos_dp_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int exynos_dp_suspend(struct device *dev)
{
- struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
- disable_irq(dp->irq);
-
- flush_work(&dp->hotplug_work);
-
- exynos_dp_phy_exit(dp);
-
- clk_disable_unprepare(dp->clock);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_drm_display *display = platform_get_drvdata(pdev);
+ exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
return 0;
}
static int exynos_dp_resume(struct device *dev)
{
- struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
- exynos_dp_phy_init(dp);
-
- clk_prepare_enable(dp->clock);
-
- exynos_dp_init_dp(dp);
-
- enable_irq(dp->irq);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_drm_display *display = platform_get_drvdata(pdev);
+ exynos_dp_dpms(display, DRM_MODE_DPMS_ON);
return 0;
}
#endif
@@ -1136,9 +1339,8 @@ static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
{},
};
-MODULE_DEVICE_TABLE(of, exynos_dp_match);
-static struct platform_driver exynos_dp_driver = {
+struct platform_driver dp_driver = {
.probe = exynos_dp_probe,
.remove = exynos_dp_remove,
.driver = {
@@ -1149,8 +1351,6 @@ static struct platform_driver exynos_dp_driver = {
},
};
-module_platform_driver(exynos_dp_driver);
-
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
MODULE_DESCRIPTION("Samsung SoC DP Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index 607e36d0c147..d6a900d4ee40 100644
--- a/drivers/video/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -13,6 +13,9 @@
#ifndef _EXYNOS_DP_CORE_H
#define _EXYNOS_DP_CORE_H
+#include <drm/drm_crtc.h>
+#include <drm/exynos_drm.h>
+
#define DP_TIMEOUT_LOOP_COUNT 100
#define MAX_CR_LOOP 5
#define MAX_EQ_LOOP 5
@@ -142,6 +145,9 @@ struct link_train {
struct exynos_dp_device {
struct device *dev;
+ struct drm_device *drm_dev;
+ struct drm_connector connector;
+ struct drm_encoder *encoder;
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
@@ -152,6 +158,9 @@ struct exynos_dp_device {
struct link_train link_train;
struct work_struct hotplug_work;
struct phy *phy;
+ int dpms_mode;
+
+ struct exynos_drm_panel_info panel;
};
/* exynos_dp_reg.c */
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/gpu/drm/exynos/exynos_dp_reg.c
index b70da5052ff0..b70da5052ff0 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_reg.c
diff --git a/drivers/video/exynos/exynos_dp_reg.h b/drivers/gpu/drm/exynos/exynos_dp_reg.h
index 2e9bd0e0b9f2..2e9bd0e0b9f2 100644
--- a/drivers/video/exynos/exynos_dp_reg.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_reg.h
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index e082efb2fece..9a16dbe121d1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -23,27 +23,20 @@
drm_connector)
struct exynos_drm_connector {
- struct drm_connector drm_connector;
- uint32_t encoder_id;
- struct exynos_drm_manager *manager;
- uint32_t dpms;
+ struct drm_connector drm_connector;
+ uint32_t encoder_id;
+ struct exynos_drm_display *display;
};
static int exynos_drm_connector_get_modes(struct drm_connector *connector)
{
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
- struct exynos_drm_manager *manager = exynos_connector->manager;
- struct exynos_drm_display_ops *display_ops = manager->display_ops;
+ struct exynos_drm_display *display = exynos_connector->display;
struct edid *edid = NULL;
unsigned int count = 0;
int ret;
- if (!display_ops) {
- DRM_DEBUG_KMS("display_ops is null.\n");
- return 0;
- }
-
/*
* if get_edid() exists then get_edid() callback of hdmi side
* is called to get edid data through i2c interface else
@@ -52,8 +45,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
* P.S. in case of lcd panel, count is always 1 if success
* because lcd panel has only one mode.
*/
- if (display_ops->get_edid) {
- edid = display_ops->get_edid(manager->dev, connector);
+ if (display->ops->get_edid) {
+ edid = display->ops->get_edid(display, connector);
if (IS_ERR_OR_NULL(edid)) {
ret = PTR_ERR(edid);
edid = NULL;
@@ -76,8 +69,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
return 0;
}
- if (display_ops->get_panel)
- panel = display_ops->get_panel(manager->dev);
+ if (display->ops->get_panel)
+ panel = display->ops->get_panel(display);
else {
drm_mode_destroy(connector->dev, mode);
return 0;
@@ -106,20 +99,20 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
{
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
- struct exynos_drm_manager *manager = exynos_connector->manager;
- struct exynos_drm_display_ops *display_ops = manager->display_ops;
+ struct exynos_drm_display *display = exynos_connector->display;
int ret = MODE_BAD;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (display_ops && display_ops->check_mode)
- if (!display_ops->check_mode(manager->dev, mode))
+ if (display->ops->check_mode)
+ if (!display->ops->check_mode(display, mode))
ret = MODE_OK;
return ret;
}
-struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
+static struct drm_encoder *exynos_drm_best_encoder(
+ struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct exynos_drm_connector *exynos_connector =
@@ -146,48 +139,12 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
.best_encoder = exynos_drm_best_encoder,
};
-void exynos_drm_display_power(struct drm_connector *connector, int mode)
-{
- struct drm_encoder *encoder = exynos_drm_best_encoder(connector);
- struct exynos_drm_connector *exynos_connector;
- struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
- struct exynos_drm_display_ops *display_ops = manager->display_ops;
-
- exynos_connector = to_exynos_connector(connector);
-
- if (exynos_connector->dpms == mode) {
- DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
- return;
- }
-
- if (display_ops && display_ops->power_on)
- display_ops->power_on(manager->dev, mode);
-
- exynos_connector->dpms = mode;
-}
-
-static void exynos_drm_connector_dpms(struct drm_connector *connector,
- int mode)
-{
- /*
- * in case that drm_crtc_helper_set_mode() is called,
- * encoder/crtc->funcs->dpms() will be just returned
- * because they already were DRM_MODE_DPMS_ON so only
- * exynos_drm_display_power() will be called.
- */
- drm_helper_connector_dpms(connector, mode);
-
- exynos_drm_display_power(connector, mode);
-
-}
-
static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
unsigned int max_width, unsigned int max_height)
{
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
- struct exynos_drm_manager *manager = exynos_connector->manager;
- struct exynos_drm_manager_ops *ops = manager->ops;
+ struct exynos_drm_display *display = exynos_connector->display;
unsigned int width, height;
width = max_width;
@@ -197,8 +154,8 @@ static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
* if specific driver want to find desired_mode using maxmum
* resolution then get max width and height from that driver.
*/
- if (ops && ops->get_max_resol)
- ops->get_max_resol(manager->dev, &width, &height);
+ if (display->ops->get_max_resol)
+ display->ops->get_max_resol(display, &width, &height);
return drm_helper_probe_single_connector_modes(connector, width,
height);
@@ -210,13 +167,11 @@ exynos_drm_connector_detect(struct drm_connector *connector, bool force)
{
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
- struct exynos_drm_manager *manager = exynos_connector->manager;
- struct exynos_drm_display_ops *display_ops =
- manager->display_ops;
+ struct exynos_drm_display *display = exynos_connector->display;
enum drm_connector_status status = connector_status_disconnected;
- if (display_ops && display_ops->is_connected) {
- if (display_ops->is_connected(manager->dev))
+ if (display->ops->is_connected) {
+ if (display->ops->is_connected(display))
status = connector_status_connected;
else
status = connector_status_disconnected;
@@ -236,7 +191,7 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector)
}
static struct drm_connector_funcs exynos_connector_funcs = {
- .dpms = exynos_drm_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.fill_modes = exynos_drm_connector_fill_modes,
.detect = exynos_drm_connector_detect,
.destroy = exynos_drm_connector_destroy,
@@ -246,7 +201,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
struct drm_encoder *encoder)
{
struct exynos_drm_connector *exynos_connector;
- struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct exynos_drm_display *display = exynos_drm_get_display(encoder);
struct drm_connector *connector;
int type;
int err;
@@ -257,7 +212,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
connector = &exynos_connector->drm_connector;
- switch (manager->display_ops->type) {
+ switch (display->type) {
case EXYNOS_DISPLAY_TYPE_HDMI:
type = DRM_MODE_CONNECTOR_HDMIA;
connector->interlace_allowed = true;
@@ -280,8 +235,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
goto err_connector;
exynos_connector->encoder_id = encoder->base.id;
- exynos_connector->manager = manager;
- exynos_connector->dpms = DRM_MODE_DPMS_OFF;
+ exynos_connector->display = display;
connector->dpms = DRM_MODE_DPMS_OFF;
connector->encoder = encoder;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
index 547c6b590357..4eb20d78379a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.h
@@ -17,8 +17,4 @@
struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
struct drm_encoder *encoder);
-struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector);
-
-void exynos_drm_display_power(struct drm_connector *connector, int mode);
-
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 1bef6dc77478..0e9e06ce36b8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -14,43 +14,42 @@
#include <drm/drmP.h>
#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
#include "exynos_drm_encoder.h"
-#include "exynos_drm_connector.h"
#include "exynos_drm_fbdev.h"
static LIST_HEAD(exynos_drm_subdrv_list);
+static LIST_HEAD(exynos_drm_manager_list);
+static LIST_HEAD(exynos_drm_display_list);
static int exynos_drm_create_enc_conn(struct drm_device *dev,
- struct exynos_drm_subdrv *subdrv)
+ struct exynos_drm_display *display)
{
struct drm_encoder *encoder;
- struct drm_connector *connector;
+ struct exynos_drm_manager *manager;
int ret;
+ unsigned long possible_crtcs = 0;
- subdrv->manager->dev = subdrv->dev;
+ /* Find possible crtcs for this display */
+ list_for_each_entry(manager, &exynos_drm_manager_list, list)
+ if (manager->type == display->type)
+ possible_crtcs |= 1 << manager->pipe;
/* create and initialize a encoder for this sub driver. */
- encoder = exynos_drm_encoder_create(dev, subdrv->manager,
- (1 << MAX_CRTC) - 1);
+ encoder = exynos_drm_encoder_create(dev, display, possible_crtcs);
if (!encoder) {
DRM_ERROR("failed to create encoder\n");
return -EFAULT;
}
- /*
- * create and initialize a connector for this sub driver and
- * attach the encoder created above to the connector.
- */
- connector = exynos_drm_connector_create(dev, encoder);
- if (!connector) {
- DRM_ERROR("failed to create connector\n");
- ret = -EFAULT;
+ display->encoder = encoder;
+
+ ret = display->ops->create_connector(display, encoder);
+ if (ret) {
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
goto err_destroy_encoder;
}
- subdrv->encoder = encoder;
- subdrv->connector = connector;
-
return 0;
err_destroy_encoder:
@@ -58,21 +57,6 @@ err_destroy_encoder:
return ret;
}
-static void exynos_drm_destroy_enc_conn(struct exynos_drm_subdrv *subdrv)
-{
- if (subdrv->encoder) {
- struct drm_encoder *encoder = subdrv->encoder;
- encoder->funcs->destroy(encoder);
- subdrv->encoder = NULL;
- }
-
- if (subdrv->connector) {
- struct drm_connector *connector = subdrv->connector;
- connector->funcs->destroy(connector);
- subdrv->connector = NULL;
- }
-}
-
static int exynos_drm_subdrv_probe(struct drm_device *dev,
struct exynos_drm_subdrv *subdrv)
{
@@ -104,10 +88,98 @@ static void exynos_drm_subdrv_remove(struct drm_device *dev,
subdrv->remove(dev, subdrv->dev);
}
+int exynos_drm_initialize_managers(struct drm_device *dev)
+{
+ struct exynos_drm_manager *manager, *n;
+ int ret, pipe = 0;
+
+ list_for_each_entry(manager, &exynos_drm_manager_list, list) {
+ if (manager->ops->initialize) {
+ ret = manager->ops->initialize(manager, dev, pipe);
+ if (ret) {
+ DRM_ERROR("Mgr init [%d] failed with %d\n",
+ manager->type, ret);
+ goto err;
+ }
+ }
+
+ manager->drm_dev = dev;
+ manager->pipe = pipe++;
+
+ ret = exynos_drm_crtc_create(manager);
+ if (ret) {
+ DRM_ERROR("CRTC create [%d] failed with %d\n",
+ manager->type, ret);
+ goto err;
+ }
+ }
+ return 0;
+
+err:
+ list_for_each_entry_safe(manager, n, &exynos_drm_manager_list, list) {
+ if (pipe-- > 0)
+ exynos_drm_manager_unregister(manager);
+ else
+ list_del(&manager->list);
+ }
+ return ret;
+}
+
+void exynos_drm_remove_managers(struct drm_device *dev)
+{
+ struct exynos_drm_manager *manager, *n;
+
+ list_for_each_entry_safe(manager, n, &exynos_drm_manager_list, list)
+ exynos_drm_manager_unregister(manager);
+}
+
+int exynos_drm_initialize_displays(struct drm_device *dev)
+{
+ struct exynos_drm_display *display, *n;
+ int ret, initialized = 0;
+
+ list_for_each_entry(display, &exynos_drm_display_list, list) {
+ if (display->ops->initialize) {
+ ret = display->ops->initialize(display, dev);
+ if (ret) {
+ DRM_ERROR("Display init [%d] failed with %d\n",
+ display->type, ret);
+ goto err;
+ }
+ }
+
+ initialized++;
+
+ ret = exynos_drm_create_enc_conn(dev, display);
+ if (ret) {
+ DRM_ERROR("Encoder create [%d] failed with %d\n",
+ display->type, ret);
+ goto err;
+ }
+ }
+ return 0;
+
+err:
+ list_for_each_entry_safe(display, n, &exynos_drm_display_list, list) {
+ if (initialized-- > 0)
+ exynos_drm_display_unregister(display);
+ else
+ list_del(&display->list);
+ }
+ return ret;
+}
+
+void exynos_drm_remove_displays(struct drm_device *dev)
+{
+ struct exynos_drm_display *display, *n;
+
+ list_for_each_entry_safe(display, n, &exynos_drm_display_list, list)
+ exynos_drm_display_unregister(display);
+}
+
int exynos_drm_device_register(struct drm_device *dev)
{
struct exynos_drm_subdrv *subdrv, *n;
- unsigned int fine_cnt = 0;
int err;
if (!dev)
@@ -120,30 +192,8 @@ int exynos_drm_device_register(struct drm_device *dev)
list_del(&subdrv->list);
continue;
}
-
- /*
- * if manager is null then it means that this sub driver
- * doesn't need encoder and connector.
- */
- if (!subdrv->manager) {
- fine_cnt++;
- continue;
- }
-
- err = exynos_drm_create_enc_conn(dev, subdrv);
- if (err) {
- DRM_DEBUG("failed to create encoder and connector.\n");
- exynos_drm_subdrv_remove(dev, subdrv);
- list_del(&subdrv->list);
- continue;
- }
-
- fine_cnt++;
}
- if (!fine_cnt)
- return -EINVAL;
-
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_register);
@@ -159,13 +209,44 @@ int exynos_drm_device_unregister(struct drm_device *dev)
list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
exynos_drm_subdrv_remove(dev, subdrv);
- exynos_drm_destroy_enc_conn(subdrv);
}
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
+int exynos_drm_manager_register(struct exynos_drm_manager *manager)
+{
+ BUG_ON(!manager->ops);
+ list_add_tail(&manager->list, &exynos_drm_manager_list);
+ return 0;
+}
+
+int exynos_drm_manager_unregister(struct exynos_drm_manager *manager)
+{
+ if (manager->ops->remove)
+ manager->ops->remove(manager);
+
+ list_del(&manager->list);
+ return 0;
+}
+
+int exynos_drm_display_register(struct exynos_drm_display *display)
+{
+ BUG_ON(!display->ops);
+ list_add_tail(&display->list, &exynos_drm_display_list);
+ return 0;
+}
+
+int exynos_drm_display_unregister(struct exynos_drm_display *display)
+{
+ if (display->ops->remove)
+ display->ops->remove(display);
+
+ list_del(&display->list);
+ return 0;
+}
+
int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
{
if (!subdrv)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 6f3400f3978a..e930d4fe29c7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -33,6 +33,7 @@ enum exynos_crtc_mode {
*
* @drm_crtc: crtc object.
* @drm_plane: pointer of private plane object for this crtc
+ * @manager: the manager associated with this crtc
* @pipe: a crtc index created at load() with a new crtc object creation
* and the crtc object would be set to private->crtc array
* to get a crtc object corresponding to this pipe from private->crtc
@@ -46,6 +47,7 @@ enum exynos_crtc_mode {
struct exynos_drm_crtc {
struct drm_crtc drm_crtc;
struct drm_plane *plane;
+ struct exynos_drm_manager *manager;
unsigned int pipe;
unsigned int dpms;
enum exynos_crtc_mode mode;
@@ -56,6 +58,7 @@ struct exynos_drm_crtc {
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_manager *manager = exynos_crtc->manager;
DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
@@ -71,7 +74,9 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
drm_vblank_off(crtc->dev, exynos_crtc->pipe);
}
- exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms);
+ if (manager->ops->dpms)
+ manager->ops->dpms(manager, mode);
+
exynos_crtc->dpms = mode;
}
@@ -83,9 +88,15 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_manager *manager = exynos_crtc->manager;
exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+
exynos_plane_commit(exynos_crtc->plane);
+
+ if (manager->ops->commit)
+ manager->ops->commit(manager);
+
exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON);
}
@@ -94,7 +105,12 @@ exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- /* drm framework doesn't check NULL */
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_manager *manager = exynos_crtc->manager;
+
+ if (manager->ops->mode_fixup)
+ return manager->ops->mode_fixup(manager, mode, adjusted_mode);
+
return true;
}
@@ -104,10 +120,10 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_framebuffer *old_fb)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_manager *manager = exynos_crtc->manager;
struct drm_plane *plane = exynos_crtc->plane;
unsigned int crtc_w;
unsigned int crtc_h;
- int pipe = exynos_crtc->pipe;
int ret;
/*
@@ -116,18 +132,19 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
*/
memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
- crtc_w = crtc->fb->width - x;
- crtc_h = crtc->fb->height - y;
+ crtc_w = crtc->primary->fb->width - x;
+ crtc_h = crtc->primary->fb->height - y;
+
+ if (manager->ops->mode_set)
+ manager->ops->mode_set(manager, &crtc->mode);
- ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h,
+ ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h,
x, y, crtc_w, crtc_h);
if (ret)
return ret;
plane->crtc = crtc;
- plane->fb = crtc->fb;
-
- exynos_drm_fn_encoder(crtc, &pipe, exynos_drm_encoder_crtc_pipe);
+ plane->fb = crtc->primary->fb;
return 0;
}
@@ -147,10 +164,10 @@ static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
return -EPERM;
}
- crtc_w = crtc->fb->width - x;
- crtc_h = crtc->fb->height - y;
+ crtc_w = crtc->primary->fb->width - x;
+ crtc_h = crtc->primary->fb->height - y;
- ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h,
+ ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h,
x, y, crtc_w, crtc_h);
if (ret)
return ret;
@@ -168,10 +185,19 @@ static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_plane *plane;
+ int ret;
- exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_OFF);
exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ drm_for_each_legacy_plane(plane, &crtc->dev->mode_config.plane_list) {
+ if (plane->crtc != crtc)
+ continue;
+
+ ret = plane->funcs->disable_plane(plane);
+ if (ret)
+ DRM_ERROR("Failed to disable plane %d\n", ret);
+ }
}
static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
@@ -192,7 +218,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct exynos_drm_private *dev_priv = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- struct drm_framebuffer *old_fb = crtc->fb;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
int ret = -EINVAL;
/* when the page flip is requested, crtc's dpms should be on */
@@ -223,11 +249,11 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
atomic_set(&exynos_crtc->pending_flip, 1);
spin_unlock_irq(&dev->event_lock);
- crtc->fb = fb;
+ crtc->primary->fb = fb;
ret = exynos_drm_crtc_mode_set_commit(crtc, crtc->x, crtc->y,
NULL);
if (ret) {
- crtc->fb = old_fb;
+ crtc->primary->fb = old_fb;
spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe);
@@ -318,21 +344,24 @@ static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
drm_object_attach_property(&crtc->base, prop, 0);
}
-int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
+int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
{
struct exynos_drm_crtc *exynos_crtc;
- struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_private *private = manager->drm_dev->dev_private;
struct drm_crtc *crtc;
exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
if (!exynos_crtc)
return -ENOMEM;
- exynos_crtc->pipe = nr;
- exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
init_waitqueue_head(&exynos_crtc->pending_flip_queue);
atomic_set(&exynos_crtc->pending_flip, 0);
- exynos_crtc->plane = exynos_plane_init(dev, 1 << nr, true);
+
+ exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
+ exynos_crtc->manager = manager;
+ exynos_crtc->pipe = manager->pipe;
+ exynos_crtc->plane = exynos_plane_init(manager->drm_dev,
+ 1 << manager->pipe, true);
if (!exynos_crtc->plane) {
kfree(exynos_crtc);
return -ENOMEM;
@@ -340,9 +369,9 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
crtc = &exynos_crtc->drm_crtc;
- private->crtc[nr] = crtc;
+ private->crtc[manager->pipe] = crtc;
- drm_crtc_init(dev, crtc, &exynos_crtc_funcs);
+ drm_crtc_init(manager->drm_dev, crtc, &exynos_crtc_funcs);
drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
exynos_drm_crtc_attach_mode_property(crtc);
@@ -350,39 +379,41 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
return 0;
}
-int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
+int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
{
struct exynos_drm_private *private = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc =
- to_exynos_crtc(private->crtc[crtc]);
+ to_exynos_crtc(private->crtc[pipe]);
+ struct exynos_drm_manager *manager = exynos_crtc->manager;
if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
return -EPERM;
- exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
- exynos_drm_enable_vblank);
+ if (manager->ops->enable_vblank)
+ manager->ops->enable_vblank(manager);
return 0;
}
-void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
+void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
{
struct exynos_drm_private *private = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc =
- to_exynos_crtc(private->crtc[crtc]);
+ to_exynos_crtc(private->crtc[pipe]);
+ struct exynos_drm_manager *manager = exynos_crtc->manager;
if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
return;
- exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
- exynos_drm_disable_vblank);
+ if (manager->ops->disable_vblank)
+ manager->ops->disable_vblank(manager);
}
-void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
+void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
{
struct exynos_drm_private *dev_priv = dev->dev_private;
struct drm_pending_vblank_event *e, *t;
- struct drm_crtc *drm_crtc = dev_priv->crtc[crtc];
+ struct drm_crtc *drm_crtc = dev_priv->crtc[pipe];
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
unsigned long flags;
@@ -391,15 +422,71 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
base.link) {
/* if event's pipe isn't same as crtc then ignore it. */
- if (crtc != e->pipe)
+ if (pipe != e->pipe)
continue;
list_del(&e->base.link);
drm_send_vblank_event(dev, -1, e);
- drm_vblank_put(dev, crtc);
+ drm_vblank_put(dev, pipe);
atomic_set(&exynos_crtc->pending_flip, 0);
wake_up(&exynos_crtc->pending_flip_queue);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+
+void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
+ struct exynos_drm_overlay *overlay)
+{
+ struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+
+ if (manager->ops->win_mode_set)
+ manager->ops->win_mode_set(manager, overlay);
+}
+
+void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos)
+{
+ struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+
+ if (manager->ops->win_commit)
+ manager->ops->win_commit(manager, zpos);
+}
+
+void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos)
+{
+ struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+
+ if (manager->ops->win_enable)
+ manager->ops->win_enable(manager, zpos);
+}
+
+void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos)
+{
+ struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+
+ if (manager->ops->win_disable)
+ manager->ops->win_disable(manager, zpos);
+}
+
+void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
+{
+ struct exynos_drm_manager *manager;
+ struct drm_device *dev = fb->dev;
+ struct drm_crtc *crtc;
+
+ /*
+ * make sure that overlay data are updated to real hardware
+ * for all encoders.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ manager = to_exynos_crtc(crtc)->manager;
+
+ /*
+ * wait for vblank interrupt
+ * - this makes sure that overlay data are updated to
+ * real hardware.
+ */
+ if (manager->ops->wait_for_vblank)
+ manager->ops->wait_for_vblank(manager);
+ }
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 3e197e6ae7d9..c27b66cc5d24 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -15,9 +15,21 @@
#ifndef _EXYNOS_DRM_CRTC_H_
#define _EXYNOS_DRM_CRTC_H_
-int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
-int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
-void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
-void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc);
+struct drm_device;
+struct drm_crtc;
+struct exynos_drm_manager;
+struct exynos_drm_overlay;
+
+int exynos_drm_crtc_create(struct exynos_drm_manager *manager);
+int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
+void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
+void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
+void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
+
+void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
+ struct exynos_drm_overlay *overlay);
+void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos);
+void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos);
+void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 59827cc5e770..c786cd4f457b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -224,7 +224,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR_OR_NULL(sgt)) {
+ if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto err_buf_detach;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
new file mode 100644
index 000000000000..2b09c7c0bfcc
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -0,0 +1,339 @@
+/*
+ * Exynos DRM Parallel output support.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ *
+ * Contacts: Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include <linux/regulator/consumer.h>
+
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#include "exynos_drm_drv.h"
+
+struct exynos_dpi {
+ struct device *dev;
+ struct device_node *panel_node;
+
+ struct drm_panel *panel;
+ struct drm_connector connector;
+ struct drm_encoder *encoder;
+
+ struct videomode *vm;
+ int dpms_mode;
+};
+
+#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
+
+static enum drm_connector_status
+exynos_dpi_detect(struct drm_connector *connector, bool force)
+{
+ struct exynos_dpi *ctx = connector_to_dpi(connector);
+
+ /* panels supported only by boot-loader are always connected */
+ if (!ctx->panel_node)
+ return connector_status_connected;
+
+ if (!ctx->panel) {
+ ctx->panel = of_drm_find_panel(ctx->panel_node);
+ if (ctx->panel)
+ drm_panel_attach(ctx->panel, &ctx->connector);
+ }
+
+ if (ctx->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void exynos_dpi_connector_destroy(struct drm_connector *connector)
+{
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_funcs exynos_dpi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = exynos_dpi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = exynos_dpi_connector_destroy,
+};
+
+static int exynos_dpi_get_modes(struct drm_connector *connector)
+{
+ struct exynos_dpi *ctx = connector_to_dpi(connector);
+
+ /* fimd timings gets precedence over panel modes */
+ if (ctx->vm) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_ERROR("failed to create a new display mode\n");
+ return 0;
+ }
+ drm_display_mode_from_videomode(ctx->vm, mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+
+ if (ctx->panel)
+ return ctx->panel->funcs->get_modes(ctx->panel);
+
+ return 0;
+}
+
+static int exynos_dpi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+exynos_dpi_best_encoder(struct drm_connector *connector)
+{
+ struct exynos_dpi *ctx = connector_to_dpi(connector);
+
+ return ctx->encoder;
+}
+
+static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
+ .get_modes = exynos_dpi_get_modes,
+ .mode_valid = exynos_dpi_mode_valid,
+ .best_encoder = exynos_dpi_best_encoder,
+};
+
+static int exynos_dpi_create_connector(struct exynos_drm_display *display,
+ struct drm_encoder *encoder)
+{
+ struct exynos_dpi *ctx = display->ctx;
+ struct drm_connector *connector = &ctx->connector;
+ int ret;
+
+ ctx->encoder = encoder;
+
+ if (ctx->panel_node)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &exynos_dpi_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ if (ret) {
+ DRM_ERROR("failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static void exynos_dpi_poweron(struct exynos_dpi *ctx)
+{
+ if (ctx->panel)
+ drm_panel_enable(ctx->panel);
+}
+
+static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
+{
+ if (ctx->panel)
+ drm_panel_disable(ctx->panel);
+}
+
+static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
+{
+ struct exynos_dpi *ctx = display->ctx;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (ctx->dpms_mode != DRM_MODE_DPMS_ON)
+ exynos_dpi_poweron(ctx);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (ctx->dpms_mode == DRM_MODE_DPMS_ON)
+ exynos_dpi_poweroff(ctx);
+ break;
+ default:
+ break;
+ };
+ ctx->dpms_mode = mode;
+}
+
+static struct exynos_drm_display_ops exynos_dpi_display_ops = {
+ .create_connector = exynos_dpi_create_connector,
+ .dpms = exynos_dpi_dpms
+};
+
+static struct exynos_drm_display exynos_dpi_display = {
+ .type = EXYNOS_DISPLAY_TYPE_LCD,
+ .ops = &exynos_dpi_display_ops,
+};
+
+/* of_* functions will be removed after merge of of_graph patches */
+static struct device_node *
+of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
+{
+ struct device_node *np;
+
+ for_each_child_of_node(parent, np) {
+ u32 r;
+
+ if (!np->name || of_node_cmp(np->name, name))
+ continue;
+
+ if (of_property_read_u32(np, "reg", &r) < 0)
+ r = 0;
+
+ if (reg == r)
+ break;
+ }
+
+ return np;
+}
+
+static struct device_node *of_graph_get_port_by_reg(struct device_node *parent,
+ u32 reg)
+{
+ struct device_node *ports, *port;
+
+ ports = of_get_child_by_name(parent, "ports");
+ if (ports)
+ parent = ports;
+
+ port = of_get_child_by_name_reg(parent, "port", reg);
+
+ of_node_put(ports);
+
+ return port;
+}
+
+static struct device_node *
+of_graph_get_endpoint_by_reg(struct device_node *port, u32 reg)
+{
+ return of_get_child_by_name_reg(port, "endpoint", reg);
+}
+
+static struct device_node *
+of_graph_get_remote_port_parent(const struct device_node *node)
+{
+ struct device_node *np;
+ unsigned int depth;
+
+ np = of_parse_phandle(node, "remote-endpoint", 0);
+
+ /* Walk 3 levels up only if there is 'ports' node. */
+ for (depth = 3; depth && np; depth--) {
+ np = of_get_next_parent(np);
+ if (depth == 2 && of_node_cmp(np->name, "ports"))
+ break;
+ }
+ return np;
+}
+
+enum {
+ FIMD_PORT_IN0,
+ FIMD_PORT_IN1,
+ FIMD_PORT_IN2,
+ FIMD_PORT_RGB,
+ FIMD_PORT_WRB,
+};
+
+static struct device_node *exynos_dpi_of_find_panel_node(struct device *dev)
+{
+ struct device_node *np, *ep;
+
+ np = of_graph_get_port_by_reg(dev->of_node, FIMD_PORT_RGB);
+ if (!np)
+ return NULL;
+
+ ep = of_graph_get_endpoint_by_reg(np, 0);
+ of_node_put(np);
+ if (!ep)
+ return NULL;
+
+ np = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+
+ return np;
+}
+
+static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
+{
+ struct device *dev = ctx->dev;
+ struct device_node *dn = dev->of_node;
+ struct device_node *np;
+
+ ctx->panel_node = exynos_dpi_of_find_panel_node(dev);
+
+ np = of_get_child_by_name(dn, "display-timings");
+ if (np) {
+ struct videomode *vm;
+ int ret;
+
+ of_node_put(np);
+
+ vm = devm_kzalloc(dev, sizeof(*ctx->vm), GFP_KERNEL);
+ if (!vm)
+ return -ENOMEM;
+
+ ret = of_get_videomode(dn, vm, 0);
+ if (ret < 0)
+ return ret;
+
+ ctx->vm = vm;
+
+ return 0;
+ }
+
+ if (!ctx->panel_node)
+ return -EINVAL;
+
+ return 0;
+}
+
+int exynos_dpi_probe(struct device *dev)
+{
+ struct exynos_dpi *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+ exynos_dpi_display.ctx = ctx;
+ ctx->dpms_mode = DRM_MODE_DPMS_OFF;
+
+ ret = exynos_dpi_parse_dt(ctx);
+ if (ret < 0)
+ return ret;
+
+ exynos_drm_display_register(&exynos_dpi_display);
+
+ return 0;
+}
+
+int exynos_dpi_remove(struct device *dev)
+{
+ exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
+ exynos_drm_display_unregister(&exynos_dpi_display);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index c204b4e3356e..2d27ba23a6a8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -11,6 +11,7 @@
* option) any later version.
*/
+#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -53,6 +54,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
return -ENOMEM;
INIT_LIST_HEAD(&private->pageflip_event_list);
+ dev_set_drvdata(dev->dev, dev);
dev->dev_private = (void *)private;
/*
@@ -64,38 +66,36 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
ret = drm_create_iommu_mapping(dev);
if (ret < 0) {
DRM_ERROR("failed to create iommu mapping.\n");
- goto err_crtc;
+ goto err_free_private;
}
drm_mode_config_init(dev);
- /* init kms poll for handling hpd */
- drm_kms_helper_poll_init(dev);
-
exynos_drm_mode_config_init(dev);
- /*
- * EXYNOS4 is enough to have two CRTCs and each crtc would be used
- * without dependency of hardware.
- */
- for (nr = 0; nr < MAX_CRTC; nr++) {
- ret = exynos_drm_crtc_create(dev, nr);
- if (ret)
- goto err_release_iommu_mapping;
- }
+ ret = exynos_drm_initialize_managers(dev);
+ if (ret)
+ goto err_mode_config_cleanup;
for (nr = 0; nr < MAX_PLANE; nr++) {
struct drm_plane *plane;
- unsigned int possible_crtcs = (1 << MAX_CRTC) - 1;
+ unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
- goto err_release_iommu_mapping;
+ goto err_manager_cleanup;
}
+ ret = exynos_drm_initialize_displays(dev);
+ if (ret)
+ goto err_manager_cleanup;
+
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(dev);
+
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
- goto err_release_iommu_mapping;
+ goto err_display_cleanup;
/*
* probe sub drivers such as display controller and hdmi driver,
@@ -109,30 +109,25 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
/* setup possible_clones. */
exynos_drm_encoder_setup(dev);
- /*
- * create and configure fb helper and also exynos specific
- * fbdev object.
- */
- ret = exynos_drm_fbdev_init(dev);
- if (ret) {
- DRM_ERROR("failed to initialize drm fbdev\n");
- goto err_drm_device;
- }
-
drm_vblank_offdelay = VBLANK_OFF_DELAY;
platform_set_drvdata(dev->platformdev, dev);
+ /* force connectors detection */
+ drm_helper_hpd_irq_event(dev);
+
return 0;
-err_drm_device:
- exynos_drm_device_unregister(dev);
err_vblank:
drm_vblank_cleanup(dev);
-err_release_iommu_mapping:
- drm_release_iommu_mapping(dev);
-err_crtc:
+err_display_cleanup:
+ exynos_drm_remove_displays(dev);
+err_manager_cleanup:
+ exynos_drm_remove_managers(dev);
+err_mode_config_cleanup:
drm_mode_config_cleanup(dev);
+ drm_release_iommu_mapping(dev);
+err_free_private:
kfree(private);
return ret;
@@ -144,6 +139,8 @@ static int exynos_drm_unload(struct drm_device *dev)
exynos_drm_device_unregister(dev);
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
+ exynos_drm_remove_displays(dev);
+ exynos_drm_remove_managers(dev);
drm_mode_config_cleanup(dev);
drm_release_iommu_mapping(dev);
@@ -158,6 +155,41 @@ static const struct file_operations exynos_drm_gem_fops = {
.mmap = exynos_drm_gem_mmap_buffer,
};
+static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
+{
+ struct drm_connector *connector;
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ int old_dpms = connector->dpms;
+
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+
+ /* Set the old mode back to the connector for resume */
+ connector->dpms = old_dpms;
+ }
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+static int exynos_drm_resume(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector, connector->dpms);
+ }
+
+ drm_helper_resume_force_mode(dev);
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv;
@@ -295,6 +327,8 @@ static struct drm_driver exynos_drm_driver = {
DRIVER_GEM | DRIVER_PRIME,
.load = exynos_drm_load,
.unload = exynos_drm_unload,
+ .suspend = exynos_drm_suspend,
+ .resume = exynos_drm_resume,
.open = exynos_drm_open,
.preclose = exynos_drm_preclose,
.lastclose = exynos_drm_lastclose,
@@ -329,6 +363,9 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
if (ret)
return ret;
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
return drm_platform_init(&exynos_drm_driver, pdev);
}
@@ -339,12 +376,67 @@ static int exynos_drm_platform_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int exynos_drm_sys_suspend(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ pm_message_t message;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ message.event = PM_EVENT_SUSPEND;
+ return exynos_drm_suspend(drm_dev, message);
+}
+
+static int exynos_drm_sys_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return exynos_drm_resume(drm_dev);
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int exynos_drm_runtime_suspend(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ pm_message_t message;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ message.event = PM_EVENT_SUSPEND;
+ return exynos_drm_suspend(drm_dev, message);
+}
+
+static int exynos_drm_runtime_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ if (!pm_runtime_suspended(dev))
+ return 0;
+
+ return exynos_drm_resume(drm_dev);
+}
+#endif
+
+static const struct dev_pm_ops exynos_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume)
+ SET_RUNTIME_PM_OPS(exynos_drm_runtime_suspend,
+ exynos_drm_runtime_resume, NULL)
+};
+
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
.remove = exynos_drm_platform_remove,
.driver = {
.owner = THIS_MODULE,
.name = "exynos-drm",
+ .pm = &exynos_drm_pm_ops,
},
};
@@ -352,6 +444,18 @@ static int __init exynos_drm_init(void)
{
int ret;
+#ifdef CONFIG_DRM_EXYNOS_DP
+ ret = platform_driver_register(&dp_driver);
+ if (ret < 0)
+ goto out_dp;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_DSI
+ ret = platform_driver_register(&dsi_driver);
+ if (ret < 0)
+ goto out_dsi;
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_FIMD
ret = platform_driver_register(&fimd_driver);
if (ret < 0)
@@ -365,13 +469,6 @@ static int __init exynos_drm_init(void)
ret = platform_driver_register(&mixer_driver);
if (ret < 0)
goto out_mixer;
- ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
- if (ret < 0)
- goto out_common_hdmi;
-
- ret = exynos_platform_device_hdmi_register();
- if (ret < 0)
- goto out_common_hdmi_dev;
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -464,10 +561,6 @@ out_vidi:
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
- exynos_platform_device_hdmi_unregister();
-out_common_hdmi_dev:
- platform_driver_unregister(&exynos_drm_common_hdmi_driver);
-out_common_hdmi:
platform_driver_unregister(&mixer_driver);
out_mixer:
platform_driver_unregister(&hdmi_driver);
@@ -478,6 +571,16 @@ out_hdmi:
platform_driver_unregister(&fimd_driver);
out_fimd:
#endif
+
+#ifdef CONFIG_DRM_EXYNOS_DSI
+ platform_driver_unregister(&dsi_driver);
+out_dsi:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_DP
+ platform_driver_unregister(&dp_driver);
+out_dp:
+#endif
return ret;
}
@@ -509,8 +612,6 @@ static void __exit exynos_drm_exit(void)
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
- exynos_platform_device_hdmi_unregister();
- platform_driver_unregister(&exynos_drm_common_hdmi_driver);
platform_driver_unregister(&mixer_driver);
platform_driver_unregister(&hdmi_driver);
#endif
@@ -522,6 +623,14 @@ static void __exit exynos_drm_exit(void)
#ifdef CONFIG_DRM_EXYNOS_FIMD
platform_driver_unregister(&fimd_driver);
#endif
+
+#ifdef CONFIG_DRM_EXYNOS_DSI
+ platform_driver_unregister(&dsi_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_DP
+ platform_driver_unregister(&dp_driver);
+#endif
}
module_init(exynos_drm_init);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 0eaf5a27e120..ce3e6a30deaa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -54,22 +54,6 @@ enum exynos_drm_output_type {
};
/*
- * Exynos drm overlay ops structure.
- *
- * @mode_set: copy drm overlay info to hw specific overlay info.
- * @commit: apply hardware specific overlay data to registers.
- * @enable: enable hardware specific overlay.
- * @disable: disable hardware specific overlay.
- */
-struct exynos_drm_overlay_ops {
- void (*mode_set)(struct device *subdrv_dev,
- struct exynos_drm_overlay *overlay);
- void (*commit)(struct device *subdrv_dev, int zpos);
- void (*enable)(struct device *subdrv_dev, int zpos);
- void (*disable)(struct device *subdrv_dev, int zpos);
-};
-
-/*
* Exynos drm common overlay structure.
*
* @fb_x: offset x on a framebuffer to be displayed.
@@ -138,77 +122,110 @@ struct exynos_drm_overlay {
* Exynos DRM Display Structure.
* - this structure is common to analog tv, digital tv and lcd panel.
*
- * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
- * @is_connected: check for that display is connected or not.
- * @get_edid: get edid modes from display driver.
- * @get_panel: get panel object from display driver.
+ * @initialize: initializes the display with drm_dev
+ * @remove: cleans up the display for removal
+ * @mode_fixup: fix mode data comparing to hw specific display mode.
+ * @mode_set: convert drm_display_mode to hw specific display mode and
+ * would be called by encoder->mode_set().
* @check_mode: check if mode is valid or not.
- * @power_on: display device on or off.
+ * @dpms: display device on or off.
+ * @commit: apply changes to hw
*/
+struct exynos_drm_display;
struct exynos_drm_display_ops {
+ int (*initialize)(struct exynos_drm_display *display,
+ struct drm_device *drm_dev);
+ int (*create_connector)(struct exynos_drm_display *display,
+ struct drm_encoder *encoder);
+ void (*remove)(struct exynos_drm_display *display);
+ void (*mode_fixup)(struct exynos_drm_display *display,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*mode_set)(struct exynos_drm_display *display,
+ struct drm_display_mode *mode);
+ int (*check_mode)(struct exynos_drm_display *display,
+ struct drm_display_mode *mode);
+ void (*dpms)(struct exynos_drm_display *display, int mode);
+ void (*commit)(struct exynos_drm_display *display);
+};
+
+/*
+ * Exynos drm display structure, maps 1:1 with an encoder/connector
+ *
+ * @list: the list entry for this manager
+ * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
+ * @encoder: encoder object this display maps to
+ * @connector: connector object this display maps to
+ * @ops: pointer to callbacks for exynos drm specific functionality
+ * @ctx: A pointer to the display's implementation specific context
+ */
+struct exynos_drm_display {
+ struct list_head list;
enum exynos_drm_output_type type;
- bool (*is_connected)(struct device *dev);
- struct edid *(*get_edid)(struct device *dev,
- struct drm_connector *connector);
- void *(*get_panel)(struct device *dev);
- int (*check_mode)(struct device *dev, struct drm_display_mode *mode);
- int (*power_on)(struct device *dev, int mode);
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct exynos_drm_display_ops *ops;
+ void *ctx;
};
/*
* Exynos drm manager ops
*
+ * @initialize: initializes the manager with drm_dev
+ * @remove: cleans up the manager for removal
* @dpms: control device power.
- * @apply: set timing, vblank and overlay data to registers.
- * @mode_fixup: fix mode data comparing to hw specific display mode.
- * @mode_set: convert drm_display_mode to hw specific display mode and
- * would be called by encoder->mode_set().
- * @get_max_resol: get maximum resolution to specific hardware.
+ * @mode_fixup: fix mode data before applying it
+ * @mode_set: set the given mode to the manager
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
* @wait_for_vblank: wait for vblank interrupt to make sure that
* hardware overlay is updated.
+ * @win_mode_set: copy drm overlay info to hw specific overlay info.
+ * @win_commit: apply hardware specific overlay data to registers.
+ * @win_enable: enable hardware specific overlay.
+ * @win_disable: disable hardware specific overlay.
*/
+struct exynos_drm_manager;
struct exynos_drm_manager_ops {
- void (*dpms)(struct device *subdrv_dev, int mode);
- void (*apply)(struct device *subdrv_dev);
- void (*mode_fixup)(struct device *subdrv_dev,
- struct drm_connector *connector,
+ int (*initialize)(struct exynos_drm_manager *mgr,
+ struct drm_device *drm_dev, int pipe);
+ void (*remove)(struct exynos_drm_manager *mgr);
+ void (*dpms)(struct exynos_drm_manager *mgr, int mode);
+ bool (*mode_fixup)(struct exynos_drm_manager *mgr,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
- void (*mode_set)(struct device *subdrv_dev, void *mode);
- void (*get_max_resol)(struct device *subdrv_dev, unsigned int *width,
- unsigned int *height);
- void (*commit)(struct device *subdrv_dev);
- int (*enable_vblank)(struct device *subdrv_dev);
- void (*disable_vblank)(struct device *subdrv_dev);
- void (*wait_for_vblank)(struct device *subdrv_dev);
+ void (*mode_set)(struct exynos_drm_manager *mgr,
+ const struct drm_display_mode *mode);
+ void (*commit)(struct exynos_drm_manager *mgr);
+ int (*enable_vblank)(struct exynos_drm_manager *mgr);
+ void (*disable_vblank)(struct exynos_drm_manager *mgr);
+ void (*wait_for_vblank)(struct exynos_drm_manager *mgr);
+ void (*win_mode_set)(struct exynos_drm_manager *mgr,
+ struct exynos_drm_overlay *overlay);
+ void (*win_commit)(struct exynos_drm_manager *mgr, int zpos);
+ void (*win_enable)(struct exynos_drm_manager *mgr, int zpos);
+ void (*win_disable)(struct exynos_drm_manager *mgr, int zpos);
};
/*
- * Exynos drm common manager structure.
+ * Exynos drm common manager structure, maps 1:1 with a crtc
*
- * @dev: pointer to device object for subdrv device driver.
- * sub drivers such as display controller or hdmi driver,
- * have their own device object.
- * @ops: pointer to callbacks for exynos drm specific framebuffer.
- * these callbacks should be set by specific drivers such fimd
- * or hdmi driver and are used to control hardware global registers.
- * @overlay_ops: pointer to callbacks for exynos drm specific framebuffer.
- * these callbacks should be set by specific drivers such fimd
- * or hdmi driver and are used to control hardware overlay reigsters.
- * @display: pointer to callbacks for exynos drm specific framebuffer.
- * these callbacks should be set by specific drivers such fimd
- * or hdmi driver and are used to control display devices such as
- * analog tv, digital tv and lcd panel and also get timing data for them.
+ * @list: the list entry for this manager
+ * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
+ * @drm_dev: pointer to the drm device
+ * @pipe: the pipe number for this crtc/manager
+ * @ops: pointer to callbacks for exynos drm specific functionality
+ * @ctx: A pointer to the manager's implementation specific context
*/
struct exynos_drm_manager {
- struct device *dev;
+ struct list_head list;
+ enum exynos_drm_output_type type;
+ struct drm_device *drm_dev;
int pipe;
struct exynos_drm_manager_ops *ops;
- struct exynos_drm_overlay_ops *overlay_ops;
- struct exynos_drm_display_ops *display_ops;
+ void *ctx;
};
struct exynos_drm_g2d_private {
@@ -237,7 +254,6 @@ struct drm_exynos_file_private {
* otherwise default one.
* @da_space_size: size of device address space.
* if 0 then default value is used for it.
- * @da_space_order: order to device address space.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@@ -255,7 +271,6 @@ struct exynos_drm_private {
unsigned long da_start;
unsigned long da_space_size;
- unsigned long da_space_order;
};
/*
@@ -273,14 +288,11 @@ struct exynos_drm_private {
* by probe callback.
* @open: this would be called with drm device file open.
* @close: this would be called with drm device file close.
- * @encoder: encoder object owned by this sub driver.
- * @connector: connector object owned by this sub driver.
*/
struct exynos_drm_subdrv {
struct list_head list;
struct device *dev;
struct drm_device *drm_dev;
- struct exynos_drm_manager *manager;
int (*probe)(struct drm_device *drm_dev, struct device *dev);
void (*remove)(struct drm_device *drm_dev, struct device *dev);
@@ -288,9 +300,6 @@ struct exynos_drm_subdrv {
struct drm_file *file);
void (*close)(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file);
-
- struct drm_encoder *encoder;
- struct drm_connector *connector;
};
/*
@@ -305,6 +314,16 @@ int exynos_drm_device_register(struct drm_device *dev);
*/
int exynos_drm_device_unregister(struct drm_device *dev);
+int exynos_drm_initialize_managers(struct drm_device *dev);
+void exynos_drm_remove_managers(struct drm_device *dev);
+int exynos_drm_initialize_displays(struct drm_device *dev);
+void exynos_drm_remove_displays(struct drm_device *dev);
+
+int exynos_drm_manager_register(struct exynos_drm_manager *manager);
+int exynos_drm_manager_unregister(struct exynos_drm_manager *manager);
+int exynos_drm_display_register(struct exynos_drm_display *display);
+int exynos_drm_display_unregister(struct exynos_drm_display *display);
+
/*
* this function would be called by sub drivers such as display controller
* or hdmi driver to register this sub driver object to exynos drm driver
@@ -340,6 +359,16 @@ int exynos_platform_device_ipp_register(void);
*/
void exynos_platform_device_ipp_unregister(void);
+#ifdef CONFIG_DRM_EXYNOS_DPI
+int exynos_dpi_probe(struct device *dev);
+int exynos_dpi_remove(struct device *dev);
+#else
+static inline int exynos_dpi_probe(struct device *dev) { return 0; }
+static inline int exynos_dpi_remove(struct device *dev) { return 0; }
+#endif
+
+extern struct platform_driver dp_driver;
+extern struct platform_driver dsi_driver;
extern struct platform_driver fimd_driver;
extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
new file mode 100644
index 000000000000..eb73e3bf2a0c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -0,0 +1,1524 @@
+/*
+ * Samsung SoC MIPI DSI Master driver.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ *
+ * Contacts: Tomasz Figa <t.figa@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+#include "exynos_drm_drv.h"
+
+/* returns true iff both arguments logically differs */
+#define NEQV(a, b) (!(a) ^ !(b))
+
+#define DSIM_STATUS_REG 0x0 /* Status register */
+#define DSIM_SWRST_REG 0x4 /* Software reset register */
+#define DSIM_CLKCTRL_REG 0x8 /* Clock control register */
+#define DSIM_TIMEOUT_REG 0xc /* Time out register */
+#define DSIM_CONFIG_REG 0x10 /* Configuration register */
+#define DSIM_ESCMODE_REG 0x14 /* Escape mode register */
+
+/* Main display image resolution register */
+#define DSIM_MDRESOL_REG 0x18
+#define DSIM_MVPORCH_REG 0x1c /* Main display Vporch register */
+#define DSIM_MHPORCH_REG 0x20 /* Main display Hporch register */
+#define DSIM_MSYNC_REG 0x24 /* Main display sync area register */
+
+/* Sub display image resolution register */
+#define DSIM_SDRESOL_REG 0x28
+#define DSIM_INTSRC_REG 0x2c /* Interrupt source register */
+#define DSIM_INTMSK_REG 0x30 /* Interrupt mask register */
+#define DSIM_PKTHDR_REG 0x34 /* Packet Header FIFO register */
+#define DSIM_PAYLOAD_REG 0x38 /* Payload FIFO register */
+#define DSIM_RXFIFO_REG 0x3c /* Read FIFO register */
+#define DSIM_FIFOTHLD_REG 0x40 /* FIFO threshold level register */
+#define DSIM_FIFOCTRL_REG 0x44 /* FIFO status and control register */
+
+/* FIFO memory AC characteristic register */
+#define DSIM_PLLCTRL_REG 0x4c /* PLL control register */
+#define DSIM_PLLTMR_REG 0x50 /* PLL timer register */
+#define DSIM_PHYACCHR_REG 0x54 /* D-PHY AC characteristic register */
+#define DSIM_PHYACCHR1_REG 0x58 /* D-PHY AC characteristic register1 */
+
+/* DSIM_STATUS */
+#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
+#define DSIM_STOP_STATE_CLK (1 << 8)
+#define DSIM_TX_READY_HS_CLK (1 << 10)
+#define DSIM_PLL_STABLE (1 << 31)
+
+/* DSIM_SWRST */
+#define DSIM_FUNCRST (1 << 16)
+#define DSIM_SWRST (1 << 0)
+
+/* DSIM_TIMEOUT */
+#define DSIM_LPDR_TIMEOUT(x) ((x) << 0)
+#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
+
+/* DSIM_CLKCTRL */
+#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
+#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
+#define DSIM_LANE_ESC_CLK_EN_CLK (1 << 19)
+#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
+#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
+#define DSIM_BYTE_CLKEN (1 << 24)
+#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
+#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
+#define DSIM_PLL_BYPASS (1 << 27)
+#define DSIM_ESC_CLKEN (1 << 28)
+#define DSIM_TX_REQUEST_HSCLK (1 << 31)
+
+/* DSIM_CONFIG */
+#define DSIM_LANE_EN_CLK (1 << 0)
+#define DSIM_LANE_EN(x) (((x) & 0xf) << 1)
+#define DSIM_NUM_OF_DATA_LANE(x) (((x) & 0x3) << 5)
+#define DSIM_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8)
+#define DSIM_MAIN_PIX_FORMAT_MASK (0x7 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB888 (0x7 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB666 (0x6 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB666_P (0x5 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12)
+#define DSIM_SUB_VC (((x) & 0x3) << 16)
+#define DSIM_MAIN_VC (((x) & 0x3) << 18)
+#define DSIM_HSA_MODE (1 << 20)
+#define DSIM_HBP_MODE (1 << 21)
+#define DSIM_HFP_MODE (1 << 22)
+#define DSIM_HSE_MODE (1 << 23)
+#define DSIM_AUTO_MODE (1 << 24)
+#define DSIM_VIDEO_MODE (1 << 25)
+#define DSIM_BURST_MODE (1 << 26)
+#define DSIM_SYNC_INFORM (1 << 27)
+#define DSIM_EOT_DISABLE (1 << 28)
+#define DSIM_MFLUSH_VS (1 << 29)
+
+/* DSIM_ESCMODE */
+#define DSIM_TX_TRIGGER_RST (1 << 4)
+#define DSIM_TX_LPDT_LP (1 << 6)
+#define DSIM_CMD_LPDT_LP (1 << 7)
+#define DSIM_FORCE_BTA (1 << 16)
+#define DSIM_FORCE_STOP_STATE (1 << 20)
+#define DSIM_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21)
+#define DSIM_STOP_STATE_CNT_MASK (0x7ff << 21)
+
+/* DSIM_MDRESOL */
+#define DSIM_MAIN_STAND_BY (1 << 31)
+#define DSIM_MAIN_VRESOL(x) (((x) & 0x7ff) << 16)
+#define DSIM_MAIN_HRESOL(x) (((x) & 0X7ff) << 0)
+
+/* DSIM_MVPORCH */
+#define DSIM_CMD_ALLOW(x) ((x) << 28)
+#define DSIM_STABLE_VFP(x) ((x) << 16)
+#define DSIM_MAIN_VBP(x) ((x) << 0)
+#define DSIM_CMD_ALLOW_MASK (0xf << 28)
+#define DSIM_STABLE_VFP_MASK (0x7ff << 16)
+#define DSIM_MAIN_VBP_MASK (0x7ff << 0)
+
+/* DSIM_MHPORCH */
+#define DSIM_MAIN_HFP(x) ((x) << 16)
+#define DSIM_MAIN_HBP(x) ((x) << 0)
+#define DSIM_MAIN_HFP_MASK ((0xffff) << 16)
+#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
+
+/* DSIM_MSYNC */
+#define DSIM_MAIN_VSA(x) ((x) << 22)
+#define DSIM_MAIN_HSA(x) ((x) << 0)
+#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
+#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
+
+/* DSIM_SDRESOL */
+#define DSIM_SUB_STANDY(x) ((x) << 31)
+#define DSIM_SUB_VRESOL(x) ((x) << 16)
+#define DSIM_SUB_HRESOL(x) ((x) << 0)
+#define DSIM_SUB_STANDY_MASK ((0x1) << 31)
+#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16)
+#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0)
+
+/* DSIM_INTSRC */
+#define DSIM_INT_PLL_STABLE (1 << 31)
+#define DSIM_INT_SW_RST_RELEASE (1 << 30)
+#define DSIM_INT_SFR_FIFO_EMPTY (1 << 29)
+#define DSIM_INT_BTA (1 << 25)
+#define DSIM_INT_FRAME_DONE (1 << 24)
+#define DSIM_INT_RX_TIMEOUT (1 << 21)
+#define DSIM_INT_BTA_TIMEOUT (1 << 20)
+#define DSIM_INT_RX_DONE (1 << 18)
+#define DSIM_INT_RX_TE (1 << 17)
+#define DSIM_INT_RX_ACK (1 << 16)
+#define DSIM_INT_RX_ECC_ERR (1 << 15)
+#define DSIM_INT_RX_CRC_ERR (1 << 14)
+
+/* DSIM_FIFOCTRL */
+#define DSIM_RX_DATA_FULL (1 << 25)
+#define DSIM_RX_DATA_EMPTY (1 << 24)
+#define DSIM_SFR_HEADER_FULL (1 << 23)
+#define DSIM_SFR_HEADER_EMPTY (1 << 22)
+#define DSIM_SFR_PAYLOAD_FULL (1 << 21)
+#define DSIM_SFR_PAYLOAD_EMPTY (1 << 20)
+#define DSIM_I80_HEADER_FULL (1 << 19)
+#define DSIM_I80_HEADER_EMPTY (1 << 18)
+#define DSIM_I80_PAYLOAD_FULL (1 << 17)
+#define DSIM_I80_PAYLOAD_EMPTY (1 << 16)
+#define DSIM_SD_HEADER_FULL (1 << 15)
+#define DSIM_SD_HEADER_EMPTY (1 << 14)
+#define DSIM_SD_PAYLOAD_FULL (1 << 13)
+#define DSIM_SD_PAYLOAD_EMPTY (1 << 12)
+#define DSIM_MD_HEADER_FULL (1 << 11)
+#define DSIM_MD_HEADER_EMPTY (1 << 10)
+#define DSIM_MD_PAYLOAD_FULL (1 << 9)
+#define DSIM_MD_PAYLOAD_EMPTY (1 << 8)
+#define DSIM_RX_FIFO (1 << 4)
+#define DSIM_SFR_FIFO (1 << 3)
+#define DSIM_I80_FIFO (1 << 2)
+#define DSIM_SD_FIFO (1 << 1)
+#define DSIM_MD_FIFO (1 << 0)
+
+/* DSIM_PHYACCHR */
+#define DSIM_AFC_EN (1 << 14)
+#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5)
+
+/* DSIM_PLLCTRL */
+#define DSIM_FREQ_BAND(x) ((x) << 24)
+#define DSIM_PLL_EN (1 << 23)
+#define DSIM_PLL_P(x) ((x) << 13)
+#define DSIM_PLL_M(x) ((x) << 4)
+#define DSIM_PLL_S(x) ((x) << 1)
+
+#define DSI_MAX_BUS_WIDTH 4
+#define DSI_NUM_VIRTUAL_CHANNELS 4
+#define DSI_TX_FIFO_SIZE 2048
+#define DSI_RX_FIFO_SIZE 256
+#define DSI_XFER_TIMEOUT_MS 100
+#define DSI_RX_FIFO_EMPTY 0x30800002
+
+enum exynos_dsi_transfer_type {
+ EXYNOS_DSI_TX,
+ EXYNOS_DSI_RX,
+};
+
+struct exynos_dsi_transfer {
+ struct list_head list;
+ struct completion completed;
+ int result;
+ u8 data_id;
+ u8 data[2];
+ u16 flags;
+
+ const u8 *tx_payload;
+ u16 tx_len;
+ u16 tx_done;
+
+ u8 *rx_payload;
+ u16 rx_len;
+ u16 rx_done;
+};
+
+#define DSIM_STATE_ENABLED BIT(0)
+#define DSIM_STATE_INITIALIZED BIT(1)
+#define DSIM_STATE_CMD_LPM BIT(2)
+
+struct exynos_dsi {
+ struct mipi_dsi_host dsi_host;
+ struct drm_connector connector;
+ struct drm_encoder *encoder;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct device *dev;
+
+ void __iomem *reg_base;
+ struct phy *phy;
+ struct clk *pll_clk;
+ struct clk *bus_clk;
+ struct regulator_bulk_data supplies[2];
+ int irq;
+
+ u32 pll_clk_rate;
+ u32 burst_clk_rate;
+ u32 esc_clk_rate;
+ u32 lanes;
+ u32 mode_flags;
+ u32 format;
+ struct videomode vm;
+
+ int state;
+ struct drm_property *brightness;
+ struct completion completed;
+
+ spinlock_t transfer_lock; /* protects transfer_list */
+ struct list_head transfer_list;
+};
+
+#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
+#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
+
+static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
+{
+ if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
+ return;
+
+ dev_err(dsi->dev, "timeout waiting for reset\n");
+}
+
+static void exynos_dsi_reset(struct exynos_dsi *dsi)
+{
+ reinit_completion(&dsi->completed);
+ writel(DSIM_SWRST, dsi->reg_base + DSIM_SWRST_REG);
+}
+
+#ifndef MHZ
+#define MHZ (1000*1000)
+#endif
+
+static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
+ unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s)
+{
+ unsigned long best_freq = 0;
+ u32 min_delta = 0xffffffff;
+ u8 p_min, p_max;
+ u8 _p, uninitialized_var(best_p);
+ u16 _m, uninitialized_var(best_m);
+ u8 _s, uninitialized_var(best_s);
+
+ p_min = DIV_ROUND_UP(fin, (12 * MHZ));
+ p_max = fin / (6 * MHZ);
+
+ for (_p = p_min; _p <= p_max; ++_p) {
+ for (_s = 0; _s <= 5; ++_s) {
+ u64 tmp;
+ u32 delta;
+
+ tmp = (u64)fout * (_p << _s);
+ do_div(tmp, fin);
+ _m = tmp;
+ if (_m < 41 || _m > 125)
+ continue;
+
+ tmp = (u64)_m * fin;
+ do_div(tmp, _p);
+ if (tmp < 500 * MHZ || tmp > 1000 * MHZ)
+ continue;
+
+ tmp = (u64)_m * fin;
+ do_div(tmp, _p << _s);
+
+ delta = abs(fout - tmp);
+ if (delta < min_delta) {
+ best_p = _p;
+ best_m = _m;
+ best_s = _s;
+ min_delta = delta;
+ best_freq = tmp;
+ }
+ }
+ }
+
+ if (best_freq) {
+ *p = best_p;
+ *m = best_m;
+ *s = best_s;
+ }
+
+ return best_freq;
+}
+
+static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
+ unsigned long freq)
+{
+ static const unsigned long freq_bands[] = {
+ 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
+ 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
+ 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
+ 770 * MHZ, 870 * MHZ, 950 * MHZ,
+ };
+ unsigned long fin, fout;
+ int timeout, band;
+ u8 p, s;
+ u16 m;
+ u32 reg;
+
+ clk_set_rate(dsi->pll_clk, dsi->pll_clk_rate);
+
+ fin = clk_get_rate(dsi->pll_clk);
+ if (!fin) {
+ dev_err(dsi->dev, "failed to get PLL clock frequency\n");
+ return 0;
+ }
+
+ dev_dbg(dsi->dev, "PLL input frequency: %lu\n", fin);
+
+ fout = exynos_dsi_pll_find_pms(dsi, fin, freq, &p, &m, &s);
+ if (!fout) {
+ dev_err(dsi->dev,
+ "failed to find PLL PMS for requested frequency\n");
+ return -EFAULT;
+ }
+
+ for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
+ if (fout < freq_bands[band])
+ break;
+
+ dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d), band %d\n", fout,
+ p, m, s, band);
+
+ writel(500, dsi->reg_base + DSIM_PLLTMR_REG);
+
+ reg = DSIM_FREQ_BAND(band) | DSIM_PLL_EN
+ | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
+ writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
+
+ timeout = 1000;
+ do {
+ if (timeout-- == 0) {
+ dev_err(dsi->dev, "PLL failed to stabilize\n");
+ return -EFAULT;
+ }
+ reg = readl(dsi->reg_base + DSIM_STATUS_REG);
+ } while ((reg & DSIM_PLL_STABLE) == 0);
+
+ return fout;
+}
+
+static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
+{
+ unsigned long hs_clk, byte_clk, esc_clk;
+ unsigned long esc_div;
+ u32 reg;
+
+ hs_clk = exynos_dsi_set_pll(dsi, dsi->burst_clk_rate);
+ if (!hs_clk) {
+ dev_err(dsi->dev, "failed to configure DSI PLL\n");
+ return -EFAULT;
+ }
+
+ byte_clk = hs_clk / 8;
+ esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
+ esc_clk = byte_clk / esc_div;
+
+ if (esc_clk > 20 * MHZ) {
+ ++esc_div;
+ esc_clk = byte_clk / esc_div;
+ }
+
+ dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
+ hs_clk, byte_clk, esc_clk);
+
+ reg = readl(dsi->reg_base + DSIM_CLKCTRL_REG);
+ reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
+ | DSIM_BYTE_CLK_SRC_MASK);
+ reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
+ | DSIM_ESC_PRESCALER(esc_div)
+ | DSIM_LANE_ESC_CLK_EN_CLK
+ | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
+ | DSIM_BYTE_CLK_SRC(0)
+ | DSIM_TX_REQUEST_HSCLK;
+ writel(reg, dsi->reg_base + DSIM_CLKCTRL_REG);
+
+ return 0;
+}
+
+static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
+{
+ u32 reg;
+
+ reg = readl(dsi->reg_base + DSIM_CLKCTRL_REG);
+ reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
+ | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
+ writel(reg, dsi->reg_base + DSIM_CLKCTRL_REG);
+
+ reg = readl(dsi->reg_base + DSIM_PLLCTRL_REG);
+ reg &= ~DSIM_PLL_EN;
+ writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
+}
+
+static int exynos_dsi_init_link(struct exynos_dsi *dsi)
+{
+ int timeout;
+ u32 reg;
+ u32 lanes_mask;
+
+ /* Initialize FIFO pointers */
+ reg = readl(dsi->reg_base + DSIM_FIFOCTRL_REG);
+ reg &= ~0x1f;
+ writel(reg, dsi->reg_base + DSIM_FIFOCTRL_REG);
+
+ usleep_range(9000, 11000);
+
+ reg |= 0x1f;
+ writel(reg, dsi->reg_base + DSIM_FIFOCTRL_REG);
+
+ usleep_range(9000, 11000);
+
+ /* DSI configuration */
+ reg = 0;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ reg |= DSIM_VIDEO_MODE;
+
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
+ reg |= DSIM_MFLUSH_VS;
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+ reg |= DSIM_EOT_DISABLE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ reg |= DSIM_SYNC_INFORM;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ reg |= DSIM_BURST_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT)
+ reg |= DSIM_AUTO_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
+ reg |= DSIM_HSE_MODE;
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HFP))
+ reg |= DSIM_HFP_MODE;
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HBP))
+ reg |= DSIM_HBP_MODE;
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSA))
+ reg |= DSIM_HSA_MODE;
+ }
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB666;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB565;
+ break;
+ default:
+ dev_err(dsi->dev, "invalid pixel format\n");
+ return -EINVAL;
+ }
+
+ reg |= DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1);
+
+ writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
+
+ reg |= DSIM_LANE_EN_CLK;
+ writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
+
+ lanes_mask = BIT(dsi->lanes) - 1;
+ reg |= DSIM_LANE_EN(lanes_mask);
+ writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
+
+ /* Check clock and data lane state are stop state */
+ timeout = 100;
+ do {
+ if (timeout-- == 0) {
+ dev_err(dsi->dev, "waiting for bus lanes timed out\n");
+ return -EFAULT;
+ }
+
+ reg = readl(dsi->reg_base + DSIM_STATUS_REG);
+ if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
+ != DSIM_STOP_STATE_DAT(lanes_mask))
+ continue;
+ } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
+
+ reg = readl(dsi->reg_base + DSIM_ESCMODE_REG);
+ reg &= ~DSIM_STOP_STATE_CNT_MASK;
+ reg |= DSIM_STOP_STATE_CNT(0xf);
+ writel(reg, dsi->reg_base + DSIM_ESCMODE_REG);
+
+ reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
+ writel(reg, dsi->reg_base + DSIM_TIMEOUT_REG);
+
+ return 0;
+}
+
+static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+ u32 reg;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ reg = DSIM_CMD_ALLOW(0xf)
+ | DSIM_STABLE_VFP(vm->vfront_porch)
+ | DSIM_MAIN_VBP(vm->vback_porch);
+ writel(reg, dsi->reg_base + DSIM_MVPORCH_REG);
+
+ reg = DSIM_MAIN_HFP(vm->hfront_porch)
+ | DSIM_MAIN_HBP(vm->hback_porch);
+ writel(reg, dsi->reg_base + DSIM_MHPORCH_REG);
+
+ reg = DSIM_MAIN_VSA(vm->vsync_len)
+ | DSIM_MAIN_HSA(vm->hsync_len);
+ writel(reg, dsi->reg_base + DSIM_MSYNC_REG);
+ }
+
+ reg = DSIM_MAIN_HRESOL(vm->hactive) | DSIM_MAIN_VRESOL(vm->vactive);
+ writel(reg, dsi->reg_base + DSIM_MDRESOL_REG);
+
+ dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
+}
+
+static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable)
+{
+ u32 reg;
+
+ reg = readl(dsi->reg_base + DSIM_MDRESOL_REG);
+ if (enable)
+ reg |= DSIM_MAIN_STAND_BY;
+ else
+ reg &= ~DSIM_MAIN_STAND_BY;
+ writel(reg, dsi->reg_base + DSIM_MDRESOL_REG);
+}
+
+static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
+{
+ int timeout = 2000;
+
+ do {
+ u32 reg = readl(dsi->reg_base + DSIM_FIFOCTRL_REG);
+
+ if (!(reg & DSIM_SFR_HEADER_FULL))
+ return 0;
+
+ if (!cond_resched())
+ usleep_range(950, 1050);
+ } while (--timeout);
+
+ return -ETIMEDOUT;
+}
+
+static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm)
+{
+ u32 v = readl(dsi->reg_base + DSIM_ESCMODE_REG);
+
+ if (lpm)
+ v |= DSIM_CMD_LPDT_LP;
+ else
+ v &= ~DSIM_CMD_LPDT_LP;
+
+ writel(v, dsi->reg_base + DSIM_ESCMODE_REG);
+}
+
+static void exynos_dsi_force_bta(struct exynos_dsi *dsi)
+{
+ u32 v = readl(dsi->reg_base + DSIM_ESCMODE_REG);
+
+ v |= DSIM_FORCE_BTA;
+ writel(v, dsi->reg_base + DSIM_ESCMODE_REG);
+}
+
+static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
+ struct exynos_dsi_transfer *xfer)
+{
+ struct device *dev = dsi->dev;
+ const u8 *payload = xfer->tx_payload + xfer->tx_done;
+ u16 length = xfer->tx_len - xfer->tx_done;
+ bool first = !xfer->tx_done;
+ u32 reg;
+
+ dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n",
+ xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+
+ if (length > DSI_TX_FIFO_SIZE)
+ length = DSI_TX_FIFO_SIZE;
+
+ xfer->tx_done += length;
+
+ /* Send payload */
+ while (length >= 4) {
+ reg = (payload[3] << 24) | (payload[2] << 16)
+ | (payload[1] << 8) | payload[0];
+ writel(reg, dsi->reg_base + DSIM_PAYLOAD_REG);
+ payload += 4;
+ length -= 4;
+ }
+
+ reg = 0;
+ switch (length) {
+ case 3:
+ reg |= payload[2] << 16;
+ /* Fall through */
+ case 2:
+ reg |= payload[1] << 8;
+ /* Fall through */
+ case 1:
+ reg |= payload[0];
+ writel(reg, dsi->reg_base + DSIM_PAYLOAD_REG);
+ break;
+ case 0:
+ /* Do nothing */
+ break;
+ }
+
+ /* Send packet header */
+ if (!first)
+ return;
+
+ reg = (xfer->data[1] << 16) | (xfer->data[0] << 8) | xfer->data_id;
+ if (exynos_dsi_wait_for_hdr_fifo(dsi)) {
+ dev_err(dev, "waiting for header FIFO timed out\n");
+ return;
+ }
+
+ if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
+ dsi->state & DSIM_STATE_CMD_LPM)) {
+ exynos_dsi_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM);
+ dsi->state ^= DSIM_STATE_CMD_LPM;
+ }
+
+ writel(reg, dsi->reg_base + DSIM_PKTHDR_REG);
+
+ if (xfer->flags & MIPI_DSI_MSG_REQ_ACK)
+ exynos_dsi_force_bta(dsi);
+}
+
+static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
+ struct exynos_dsi_transfer *xfer)
+{
+ u8 *payload = xfer->rx_payload + xfer->rx_done;
+ bool first = !xfer->rx_done;
+ struct device *dev = dsi->dev;
+ u16 length;
+ u32 reg;
+
+ if (first) {
+ reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
+
+ switch (reg & 0x3f) {
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ if (xfer->rx_len >= 2) {
+ payload[1] = reg >> 16;
+ ++xfer->rx_done;
+ }
+ /* Fall through */
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ payload[0] = reg >> 8;
+ ++xfer->rx_done;
+ xfer->rx_len = xfer->rx_done;
+ xfer->result = 0;
+ goto clear_fifo;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ dev_err(dev, "DSI Error Report: 0x%04x\n",
+ (reg >> 8) & 0xffff);
+ xfer->result = 0;
+ goto clear_fifo;
+ }
+
+ length = (reg >> 8) & 0xffff;
+ if (length > xfer->rx_len) {
+ dev_err(dev,
+ "response too long (%u > %u bytes), stripping\n",
+ xfer->rx_len, length);
+ length = xfer->rx_len;
+ } else if (length < xfer->rx_len)
+ xfer->rx_len = length;
+ }
+
+ length = xfer->rx_len - xfer->rx_done;
+ xfer->rx_done += length;
+
+ /* Receive payload */
+ while (length >= 4) {
+ reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
+ payload[0] = (reg >> 0) & 0xff;
+ payload[1] = (reg >> 8) & 0xff;
+ payload[2] = (reg >> 16) & 0xff;
+ payload[3] = (reg >> 24) & 0xff;
+ payload += 4;
+ length -= 4;
+ }
+
+ if (length) {
+ reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
+ switch (length) {
+ case 3:
+ payload[2] = (reg >> 16) & 0xff;
+ /* Fall through */
+ case 2:
+ payload[1] = (reg >> 8) & 0xff;
+ /* Fall through */
+ case 1:
+ payload[0] = reg & 0xff;
+ }
+ }
+
+ if (xfer->rx_done == xfer->rx_len)
+ xfer->result = 0;
+
+clear_fifo:
+ length = DSI_RX_FIFO_SIZE / 4;
+ do {
+ reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
+ if (reg == DSI_RX_FIFO_EMPTY)
+ break;
+ } while (--length);
+}
+
+static void exynos_dsi_transfer_start(struct exynos_dsi *dsi)
+{
+ unsigned long flags;
+ struct exynos_dsi_transfer *xfer;
+ bool start = false;
+
+again:
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (list_empty(&dsi->transfer_list)) {
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ return;
+ }
+
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct exynos_dsi_transfer, list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (xfer->tx_len && xfer->tx_done == xfer->tx_len)
+ /* waiting for RX */
+ return;
+
+ exynos_dsi_send_to_fifo(dsi, xfer);
+
+ if (xfer->tx_len || xfer->rx_len)
+ return;
+
+ xfer->result = 0;
+ complete(&xfer->completed);
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (start)
+ goto again;
+}
+
+static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
+{
+ struct exynos_dsi_transfer *xfer;
+ unsigned long flags;
+ bool start = true;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (list_empty(&dsi->transfer_list)) {
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ return false;
+ }
+
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct exynos_dsi_transfer, list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ dev_dbg(dsi->dev,
+ "> xfer %p, tx_len %u, tx_done %u, rx_len %u, rx_done %u\n",
+ xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+
+ if (xfer->tx_done != xfer->tx_len)
+ return true;
+
+ if (xfer->rx_done != xfer->rx_len)
+ exynos_dsi_read_from_fifo(dsi, xfer);
+
+ if (xfer->rx_done != xfer->rx_len)
+ return true;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (!xfer->rx_len)
+ xfer->result = 0;
+ complete(&xfer->completed);
+
+ return start;
+}
+
+static void exynos_dsi_remove_transfer(struct exynos_dsi *dsi,
+ struct exynos_dsi_transfer *xfer)
+{
+ unsigned long flags;
+ bool start;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (!list_empty(&dsi->transfer_list) &&
+ xfer == list_first_entry(&dsi->transfer_list,
+ struct exynos_dsi_transfer, list)) {
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ if (start)
+ exynos_dsi_transfer_start(dsi);
+ return;
+ }
+
+ list_del_init(&xfer->list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+}
+
+static int exynos_dsi_transfer(struct exynos_dsi *dsi,
+ struct exynos_dsi_transfer *xfer)
+{
+ unsigned long flags;
+ bool stopped;
+
+ xfer->tx_done = 0;
+ xfer->rx_done = 0;
+ xfer->result = -ETIMEDOUT;
+ init_completion(&xfer->completed);
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ stopped = list_empty(&dsi->transfer_list);
+ list_add_tail(&xfer->list, &dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (stopped)
+ exynos_dsi_transfer_start(dsi);
+
+ wait_for_completion_timeout(&xfer->completed,
+ msecs_to_jiffies(DSI_XFER_TIMEOUT_MS));
+ if (xfer->result == -ETIMEDOUT) {
+ exynos_dsi_remove_transfer(dsi, xfer);
+ dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 2, xfer->data,
+ xfer->tx_len, xfer->tx_payload);
+ return -ETIMEDOUT;
+ }
+
+ /* Also covers hardware timeout condition */
+ return xfer->result;
+}
+
+static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
+{
+ struct exynos_dsi *dsi = dev_id;
+ u32 status;
+
+ status = readl(dsi->reg_base + DSIM_INTSRC_REG);
+ if (!status) {
+ static unsigned long int j;
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(dsi->dev, "spurious interrupt\n");
+ return IRQ_HANDLED;
+ }
+ writel(status, dsi->reg_base + DSIM_INTSRC_REG);
+
+ if (status & DSIM_INT_SW_RST_RELEASE) {
+ u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY);
+ writel(mask, dsi->reg_base + DSIM_INTMSK_REG);
+ complete(&dsi->completed);
+ return IRQ_HANDLED;
+ }
+
+ if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY)))
+ return IRQ_HANDLED;
+
+ if (exynos_dsi_transfer_finish(dsi))
+ exynos_dsi_transfer_start(dsi);
+
+ return IRQ_HANDLED;
+}
+
+static int exynos_dsi_init(struct exynos_dsi *dsi)
+{
+ exynos_dsi_enable_clock(dsi);
+ exynos_dsi_reset(dsi);
+ enable_irq(dsi->irq);
+ exynos_dsi_wait_for_reset(dsi);
+ exynos_dsi_init_link(dsi);
+
+ return 0;
+}
+
+static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel_node = device->dev.of_node;
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+
+ dsi->panel_node = NULL;
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+/* distinguish between short and long DSI packet types */
+static bool exynos_dsi_is_short_dsi_type(u8 type)
+{
+ return (type & 0x0f) <= 8;
+}
+
+static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
+ struct mipi_dsi_msg *msg)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct exynos_dsi_transfer xfer;
+ int ret;
+
+ if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
+ ret = exynos_dsi_init(dsi);
+ if (ret)
+ return ret;
+ dsi->state |= DSIM_STATE_INITIALIZED;
+ }
+
+ if (msg->tx_len == 0)
+ return -EINVAL;
+
+ xfer.data_id = msg->type | (msg->channel << 6);
+
+ if (exynos_dsi_is_short_dsi_type(msg->type)) {
+ const char *tx_buf = msg->tx_buf;
+
+ if (msg->tx_len > 2)
+ return -EINVAL;
+ xfer.tx_len = 0;
+ xfer.data[0] = tx_buf[0];
+ xfer.data[1] = (msg->tx_len == 2) ? tx_buf[1] : 0;
+ } else {
+ xfer.tx_len = msg->tx_len;
+ xfer.data[0] = msg->tx_len & 0xff;
+ xfer.data[1] = msg->tx_len >> 8;
+ xfer.tx_payload = msg->tx_buf;
+ }
+
+ xfer.rx_len = msg->rx_len;
+ xfer.rx_payload = msg->rx_buf;
+ xfer.flags = msg->flags;
+
+ ret = exynos_dsi_transfer(dsi, &xfer);
+ return (ret < 0) ? ret : xfer.rx_done;
+}
+
+static const struct mipi_dsi_host_ops exynos_dsi_ops = {
+ .attach = exynos_dsi_host_attach,
+ .detach = exynos_dsi_host_detach,
+ .transfer = exynos_dsi_host_transfer,
+};
+
+static int exynos_dsi_poweron(struct exynos_dsi *dsi)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dsi->bus_clk);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable bus clock %d\n", ret);
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(dsi->pll_clk);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable pll clock %d\n", ret);
+ goto err_pll_clk;
+ }
+
+ ret = phy_power_on(dsi->phy);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable phy %d\n", ret);
+ goto err_phy;
+ }
+
+ return 0;
+
+err_phy:
+ clk_disable_unprepare(dsi->pll_clk);
+err_pll_clk:
+ clk_disable_unprepare(dsi->bus_clk);
+err_bus_clk:
+ regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+
+ return ret;
+}
+
+static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
+{
+ int ret;
+
+ usleep_range(10000, 20000);
+
+ if (dsi->state & DSIM_STATE_INITIALIZED) {
+ dsi->state &= ~DSIM_STATE_INITIALIZED;
+
+ exynos_dsi_disable_clock(dsi);
+
+ disable_irq(dsi->irq);
+ }
+
+ dsi->state &= ~DSIM_STATE_CMD_LPM;
+
+ phy_power_off(dsi->phy);
+
+ clk_disable_unprepare(dsi->pll_clk);
+ clk_disable_unprepare(dsi->bus_clk);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+ if (ret < 0)
+ dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
+}
+
+static int exynos_dsi_enable(struct exynos_dsi *dsi)
+{
+ int ret;
+
+ if (dsi->state & DSIM_STATE_ENABLED)
+ return 0;
+
+ ret = exynos_dsi_poweron(dsi);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ exynos_dsi_poweroff(dsi);
+ return ret;
+ }
+
+ exynos_dsi_set_display_mode(dsi);
+ exynos_dsi_set_display_enable(dsi, true);
+
+ dsi->state |= DSIM_STATE_ENABLED;
+
+ return 0;
+}
+
+static void exynos_dsi_disable(struct exynos_dsi *dsi)
+{
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return;
+
+ exynos_dsi_set_display_enable(dsi, false);
+ drm_panel_disable(dsi->panel);
+ exynos_dsi_poweroff(dsi);
+
+ dsi->state &= ~DSIM_STATE_ENABLED;
+}
+
+static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
+{
+ struct exynos_dsi *dsi = display->ctx;
+
+ if (dsi->panel) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ exynos_dsi_enable(dsi);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ exynos_dsi_disable(dsi);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static enum drm_connector_status
+exynos_dsi_detect(struct drm_connector *connector, bool force)
+{
+ struct exynos_dsi *dsi = connector_to_dsi(connector);
+
+ if (!dsi->panel) {
+ dsi->panel = of_drm_find_panel(dsi->panel_node);
+ if (dsi->panel)
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ } else if (!dsi->panel_node) {
+ struct exynos_drm_display *display;
+
+ display = platform_get_drvdata(to_platform_device(dsi->dev));
+ exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ }
+
+ if (dsi->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void exynos_dsi_connector_destroy(struct drm_connector *connector)
+{
+}
+
+static struct drm_connector_funcs exynos_dsi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = exynos_dsi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = exynos_dsi_connector_destroy,
+};
+
+static int exynos_dsi_get_modes(struct drm_connector *connector)
+{
+ struct exynos_dsi *dsi = connector_to_dsi(connector);
+
+ if (dsi->panel)
+ return dsi->panel->funcs->get_modes(dsi->panel);
+
+ return 0;
+}
+
+static int exynos_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+exynos_dsi_best_encoder(struct drm_connector *connector)
+{
+ struct exynos_dsi *dsi = connector_to_dsi(connector);
+
+ return dsi->encoder;
+}
+
+static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
+ .get_modes = exynos_dsi_get_modes,
+ .mode_valid = exynos_dsi_mode_valid,
+ .best_encoder = exynos_dsi_best_encoder,
+};
+
+static int exynos_dsi_create_connector(struct exynos_drm_display *display,
+ struct drm_encoder *encoder)
+{
+ struct exynos_dsi *dsi = display->ctx;
+ struct drm_connector *connector = &dsi->connector;
+ int ret;
+
+ dsi->encoder = encoder;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &exynos_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static void exynos_dsi_mode_set(struct exynos_drm_display *display,
+ struct drm_display_mode *mode)
+{
+ struct exynos_dsi *dsi = display->ctx;
+ struct videomode *vm = &dsi->vm;
+
+ vm->hactive = mode->hdisplay;
+ vm->vactive = mode->vdisplay;
+ vm->vfront_porch = mode->vsync_start - mode->vdisplay;
+ vm->vback_porch = mode->vtotal - mode->vsync_end;
+ vm->vsync_len = mode->vsync_end - mode->vsync_start;
+ vm->hfront_porch = mode->hsync_start - mode->hdisplay;
+ vm->hback_porch = mode->htotal - mode->hsync_end;
+ vm->hsync_len = mode->hsync_end - mode->hsync_start;
+}
+
+static struct exynos_drm_display_ops exynos_dsi_display_ops = {
+ .create_connector = exynos_dsi_create_connector,
+ .mode_set = exynos_dsi_mode_set,
+ .dpms = exynos_dsi_dpms
+};
+
+static struct exynos_drm_display exynos_dsi_display = {
+ .type = EXYNOS_DISPLAY_TYPE_LCD,
+ .ops = &exynos_dsi_display_ops,
+};
+
+/* of_* functions will be removed after merge of of_graph patches */
+static struct device_node *
+of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
+{
+ struct device_node *np;
+
+ for_each_child_of_node(parent, np) {
+ u32 r;
+
+ if (!np->name || of_node_cmp(np->name, name))
+ continue;
+
+ if (of_property_read_u32(np, "reg", &r) < 0)
+ r = 0;
+
+ if (reg == r)
+ break;
+ }
+
+ return np;
+}
+
+static struct device_node *of_graph_get_port_by_reg(struct device_node *parent,
+ u32 reg)
+{
+ struct device_node *ports, *port;
+
+ ports = of_get_child_by_name(parent, "ports");
+ if (ports)
+ parent = ports;
+
+ port = of_get_child_by_name_reg(parent, "port", reg);
+
+ of_node_put(ports);
+
+ return port;
+}
+
+static struct device_node *
+of_graph_get_endpoint_by_reg(struct device_node *port, u32 reg)
+{
+ return of_get_child_by_name_reg(port, "endpoint", reg);
+}
+
+static int exynos_dsi_of_read_u32(const struct device_node *np,
+ const char *propname, u32 *out_value)
+{
+ int ret = of_property_read_u32(np, propname, out_value);
+
+ if (ret < 0)
+ pr_err("%s: failed to get '%s' property\n", np->full_name,
+ propname);
+
+ return ret;
+}
+
+enum {
+ DSI_PORT_IN,
+ DSI_PORT_OUT
+};
+
+static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *port, *ep;
+ int ret;
+
+ ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
+ &dsi->pll_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ port = of_graph_get_port_by_reg(node, DSI_PORT_OUT);
+ if (!port) {
+ dev_err(dev, "no output port specified\n");
+ return -EINVAL;
+ }
+
+ ep = of_graph_get_endpoint_by_reg(port, 0);
+ of_node_put(port);
+ if (!ep) {
+ dev_err(dev, "no endpoint specified in output port\n");
+ return -EINVAL;
+ }
+
+ ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
+ &dsi->burst_clk_rate);
+ if (ret < 0)
+ goto end;
+
+ ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
+ &dsi->esc_clk_rate);
+
+end:
+ of_node_put(ep);
+
+ return ret;
+}
+
+static int exynos_dsi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct exynos_dsi *dsi;
+ int ret;
+
+ dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi) {
+ dev_err(&pdev->dev, "failed to allocate dsi object.\n");
+ return -ENOMEM;
+ }
+
+ init_completion(&dsi->completed);
+ spin_lock_init(&dsi->transfer_lock);
+ INIT_LIST_HEAD(&dsi->transfer_list);
+
+ dsi->dsi_host.ops = &exynos_dsi_ops;
+ dsi->dsi_host.dev = &pdev->dev;
+
+ dsi->dev = &pdev->dev;
+
+ ret = exynos_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ dsi->supplies[0].supply = "vddcore";
+ dsi->supplies[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(dsi->supplies),
+ dsi->supplies);
+ if (ret) {
+ dev_info(&pdev->dev, "failed to get regulators: %d\n", ret);
+ return -EPROBE_DEFER;
+ }
+
+ dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk");
+ if (IS_ERR(dsi->pll_clk)) {
+ dev_info(&pdev->dev, "failed to get dsi pll input clock\n");
+ return -EPROBE_DEFER;
+ }
+
+ dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(dsi->bus_clk)) {
+ dev_info(&pdev->dev, "failed to get dsi bus clock\n");
+ return -EPROBE_DEFER;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!dsi->reg_base) {
+ dev_err(&pdev->dev, "failed to remap io region\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ dsi->phy = devm_phy_get(&pdev->dev, "dsim");
+ if (IS_ERR(dsi->phy)) {
+ dev_info(&pdev->dev, "failed to get dsim phy\n");
+ return -EPROBE_DEFER;
+ }
+
+ dsi->irq = platform_get_irq(pdev, 0);
+ if (dsi->irq < 0) {
+ dev_err(&pdev->dev, "failed to request dsi irq resource\n");
+ return dsi->irq;
+ }
+
+ irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev, dsi->irq, NULL,
+ exynos_dsi_irq, IRQF_ONESHOT,
+ dev_name(&pdev->dev), dsi);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request dsi irq\n");
+ return ret;
+ }
+
+ exynos_dsi_display.ctx = dsi;
+
+ platform_set_drvdata(pdev, &exynos_dsi_display);
+ exynos_drm_display_register(&exynos_dsi_display);
+
+ return mipi_dsi_host_register(&dsi->dsi_host);
+}
+
+static int exynos_dsi_remove(struct platform_device *pdev)
+{
+ struct exynos_dsi *dsi = exynos_dsi_display.ctx;
+
+ exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
+
+ exynos_drm_display_unregister(&exynos_dsi_display);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+
+ return 0;
+}
+
+#if CONFIG_PM_SLEEP
+static int exynos_dsi_resume(struct device *dev)
+{
+ struct exynos_dsi *dsi = exynos_dsi_display.ctx;
+
+ if (dsi->state & DSIM_STATE_ENABLED) {
+ dsi->state &= ~DSIM_STATE_ENABLED;
+ exynos_dsi_enable(dsi);
+ }
+
+ return 0;
+}
+
+static int exynos_dsi_suspend(struct device *dev)
+{
+ struct exynos_dsi *dsi = exynos_dsi_display.ctx;
+
+ if (dsi->state & DSIM_STATE_ENABLED) {
+ exynos_dsi_disable(dsi);
+ dsi->state |= DSIM_STATE_ENABLED;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos_dsi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume)
+};
+
+static struct of_device_id exynos_dsi_of_match[] = {
+ { .compatible = "samsung,exynos4210-mipi-dsi" },
+ { }
+};
+
+struct platform_driver dsi_driver = {
+ .probe = exynos_dsi_probe,
+ .remove = exynos_dsi_remove,
+ .driver = {
+ .name = "exynos-dsi",
+ .owner = THIS_MODULE,
+ .pm = &exynos_dsi_pm_ops,
+ .of_match_table = exynos_dsi_of_match,
+ },
+};
+
+MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>");
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC MIPI DSI Master");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 06f1b2a09da7..7e282e3d6038 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -17,7 +17,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
-#include "exynos_drm_connector.h"
#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\
drm_encoder)
@@ -26,72 +25,22 @@
* exynos specific encoder structure.
*
* @drm_encoder: encoder object.
- * @manager: specific encoder has its own manager to control a hardware
- * appropriately and we can access a hardware drawing on this manager.
- * @dpms: store the encoder dpms value.
- * @updated: indicate whether overlay data updating is needed or not.
+ * @display: the display structure that maps to this encoder
*/
struct exynos_drm_encoder {
- struct drm_crtc *old_crtc;
struct drm_encoder drm_encoder;
- struct exynos_drm_manager *manager;
- int dpms;
- bool updated;
+ struct exynos_drm_display *display;
};
-static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (exynos_drm_best_encoder(connector) == encoder) {
- DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
- connector->base.id, mode);
-
- exynos_drm_display_power(connector, mode);
- }
- }
-}
-
static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
{
- struct drm_device *dev = encoder->dev;
- struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
- struct exynos_drm_manager_ops *manager_ops = manager->ops;
struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_display *display = exynos_encoder->display;
DRM_DEBUG_KMS("encoder dpms: %d\n", mode);
- if (exynos_encoder->dpms == mode) {
- DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
- return;
- }
-
- mutex_lock(&dev->struct_mutex);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- if (manager_ops && manager_ops->apply)
- if (!exynos_encoder->updated)
- manager_ops->apply(manager->dev);
-
- exynos_drm_connector_power(encoder, mode);
- exynos_encoder->dpms = mode;
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- exynos_drm_connector_power(encoder, mode);
- exynos_encoder->dpms = mode;
- exynos_encoder->updated = false;
- break;
- default:
- DRM_ERROR("unspecified mode %d\n", mode);
- break;
- }
-
- mutex_unlock(&dev->struct_mutex);
+ if (display->ops->dpms)
+ display->ops->dpms(display, mode);
}
static bool
@@ -100,87 +49,31 @@ exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_display *display = exynos_encoder->display;
struct drm_connector *connector;
- struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
- struct exynos_drm_manager_ops *manager_ops = manager->ops;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
- if (manager_ops && manager_ops->mode_fixup)
- manager_ops->mode_fixup(manager->dev, connector,
- mode, adjusted_mode);
+ if (connector->encoder != encoder)
+ continue;
+
+ if (display->ops->mode_fixup)
+ display->ops->mode_fixup(display, connector, mode,
+ adjusted_mode);
}
return true;
}
-static void disable_plane_to_crtc(struct drm_device *dev,
- struct drm_crtc *old_crtc,
- struct drm_crtc *new_crtc)
-{
- struct drm_plane *plane;
-
- /*
- * if old_crtc isn't same as encoder->crtc then it means that
- * user changed crtc id to another one so the plane to old_crtc
- * should be disabled and plane->crtc should be set to new_crtc
- * (encoder->crtc)
- */
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- if (plane->crtc == old_crtc) {
- /*
- * do not change below call order.
- *
- * plane->funcs->disable_plane call checks
- * if encoder->crtc is same as plane->crtc and if same
- * then overlay_ops->disable callback will be called
- * to diasble current hw overlay so plane->crtc should
- * have new_crtc because new_crtc was set to
- * encoder->crtc in advance.
- */
- plane->crtc = new_crtc;
- plane->funcs->disable_plane(plane);
- }
- }
-}
-
static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
- struct exynos_drm_manager *manager;
- struct exynos_drm_manager_ops *manager_ops;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- struct exynos_drm_encoder *exynos_encoder;
-
- exynos_encoder = to_exynos_encoder(encoder);
-
- if (exynos_encoder->old_crtc != encoder->crtc &&
- exynos_encoder->old_crtc) {
-
- /*
- * disable a plane to old crtc and change
- * crtc of the plane to new one.
- */
- disable_plane_to_crtc(dev,
- exynos_encoder->old_crtc,
- encoder->crtc);
- }
-
- manager = exynos_drm_get_manager(encoder);
- manager_ops = manager->ops;
-
- if (manager_ops && manager_ops->mode_set)
- manager_ops->mode_set(manager->dev,
- adjusted_mode);
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_display *display = exynos_encoder->display;
- exynos_encoder->old_crtc = encoder->crtc;
- }
- }
+ if (display->ops->mode_set)
+ display->ops->mode_set(display, adjusted_mode);
}
static void exynos_drm_encoder_prepare(struct drm_encoder *encoder)
@@ -191,53 +84,15 @@ static void exynos_drm_encoder_prepare(struct drm_encoder *encoder)
static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
{
struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_manager *manager = exynos_encoder->manager;
- struct exynos_drm_manager_ops *manager_ops = manager->ops;
-
- if (manager_ops && manager_ops->commit)
- manager_ops->commit(manager->dev);
-
- /*
- * this will avoid one issue that overlay data is updated to
- * real hardware two times.
- * And this variable will be used to check if the data was
- * already updated or not by exynos_drm_encoder_dpms function.
- */
- exynos_encoder->updated = true;
-
- /*
- * In case of setcrtc, there is no way to update encoder's dpms
- * so update it here.
- */
- exynos_encoder->dpms = DRM_MODE_DPMS_ON;
-}
+ struct exynos_drm_display *display = exynos_encoder->display;
-void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
-{
- struct exynos_drm_encoder *exynos_encoder;
- struct exynos_drm_manager_ops *ops;
- struct drm_device *dev = fb->dev;
- struct drm_encoder *encoder;
+ if (display->ops->dpms)
+ display->ops->dpms(display, DRM_MODE_DPMS_ON);
- /*
- * make sure that overlay data are updated to real hardware
- * for all encoders.
- */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- exynos_encoder = to_exynos_encoder(encoder);
- ops = exynos_encoder->manager->ops;
-
- /*
- * wait for vblank interrupt
- * - this makes sure that overlay data are updated to
- * real hardware.
- */
- if (ops->wait_for_vblank)
- ops->wait_for_vblank(exynos_encoder->manager->dev);
- }
+ if (display->ops->commit)
+ display->ops->commit(display);
}
-
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_plane *plane;
@@ -246,7 +101,7 @@ static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
/* all planes connected to this encoder should be also disabled. */
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
if (plane->crtc == encoder->crtc)
plane->funcs->disable_plane(plane);
}
@@ -263,10 +118,7 @@ static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
{
- struct exynos_drm_encoder *exynos_encoder =
- to_exynos_encoder(encoder);
-
- exynos_encoder->manager->pipe = -1;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(exynos_encoder);
@@ -281,13 +133,12 @@ static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
struct drm_encoder *clone;
struct drm_device *dev = encoder->dev;
struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_display_ops *display_ops =
- exynos_encoder->manager->display_ops;
+ struct exynos_drm_display *display = exynos_encoder->display;
unsigned int clone_mask = 0;
int cnt = 0;
list_for_each_entry(clone, &dev->mode_config.encoder_list, head) {
- switch (display_ops->type) {
+ switch (display->type) {
case EXYNOS_DISPLAY_TYPE_LCD:
case EXYNOS_DISPLAY_TYPE_HDMI:
case EXYNOS_DISPLAY_TYPE_VIDI:
@@ -311,24 +162,20 @@ void exynos_drm_encoder_setup(struct drm_device *dev)
struct drm_encoder *
exynos_drm_encoder_create(struct drm_device *dev,
- struct exynos_drm_manager *manager,
- unsigned int possible_crtcs)
+ struct exynos_drm_display *display,
+ unsigned long possible_crtcs)
{
struct drm_encoder *encoder;
struct exynos_drm_encoder *exynos_encoder;
- if (!manager || !possible_crtcs)
- return NULL;
-
- if (!manager->dev)
+ if (!possible_crtcs)
return NULL;
exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
if (!exynos_encoder)
return NULL;
- exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
- exynos_encoder->manager = manager;
+ exynos_encoder->display = display;
encoder = &exynos_encoder->drm_encoder;
encoder->possible_crtcs = possible_crtcs;
@@ -344,149 +191,7 @@ exynos_drm_encoder_create(struct drm_device *dev,
return encoder;
}
-struct exynos_drm_manager *exynos_drm_get_manager(struct drm_encoder *encoder)
-{
- return to_exynos_encoder(encoder)->manager;
-}
-
-void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
- void (*fn)(struct drm_encoder *, void *))
-{
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
- struct exynos_drm_private *private = dev->dev_private;
- struct exynos_drm_manager *manager;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- /*
- * if crtc is detached from encoder, check pipe,
- * otherwise check crtc attached to encoder
- */
- if (!encoder->crtc) {
- manager = to_exynos_encoder(encoder)->manager;
- if (manager->pipe < 0 ||
- private->crtc[manager->pipe] != crtc)
- continue;
- } else {
- if (encoder->crtc != crtc)
- continue;
- }
-
- fn(encoder, data);
- }
-}
-
-void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data)
-{
- struct exynos_drm_manager *manager =
- to_exynos_encoder(encoder)->manager;
- struct exynos_drm_manager_ops *manager_ops = manager->ops;
- int crtc = *(int *)data;
-
- if (manager->pipe != crtc)
- return;
-
- if (manager_ops->enable_vblank)
- manager_ops->enable_vblank(manager->dev);
-}
-
-void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
-{
- struct exynos_drm_manager *manager =
- to_exynos_encoder(encoder)->manager;
- struct exynos_drm_manager_ops *manager_ops = manager->ops;
- int crtc = *(int *)data;
-
- if (manager->pipe != crtc)
- return;
-
- if (manager_ops->disable_vblank)
- manager_ops->disable_vblank(manager->dev);
-}
-
-void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
-{
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_manager *manager = exynos_encoder->manager;
- struct exynos_drm_manager_ops *manager_ops = manager->ops;
- int mode = *(int *)data;
-
- if (manager_ops && manager_ops->dpms)
- manager_ops->dpms(manager->dev, mode);
-
- /*
- * if this condition is ok then it means that the crtc is already
- * detached from encoder and last function for detaching is properly
- * done, so clear pipe from manager to prevent repeated call.
- */
- if (mode > DRM_MODE_DPMS_ON) {
- if (!encoder->crtc)
- manager->pipe = -1;
- }
-}
-
-void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data)
-{
- struct exynos_drm_manager *manager =
- to_exynos_encoder(encoder)->manager;
- int pipe = *(int *)data;
-
- /*
- * when crtc is detached from encoder, this pipe is used
- * to select manager operation
- */
- manager->pipe = pipe;
-}
-
-void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data)
-{
- struct exynos_drm_manager *manager =
- to_exynos_encoder(encoder)->manager;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
- struct exynos_drm_overlay *overlay = data;
-
- if (overlay_ops && overlay_ops->mode_set)
- overlay_ops->mode_set(manager->dev, overlay);
-}
-
-void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data)
+struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder)
{
- struct exynos_drm_manager *manager =
- to_exynos_encoder(encoder)->manager;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
- int zpos = DEFAULT_ZPOS;
-
- if (data)
- zpos = *(int *)data;
-
- if (overlay_ops && overlay_ops->commit)
- overlay_ops->commit(manager->dev, zpos);
-}
-
-void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data)
-{
- struct exynos_drm_manager *manager =
- to_exynos_encoder(encoder)->manager;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
- int zpos = DEFAULT_ZPOS;
-
- if (data)
- zpos = *(int *)data;
-
- if (overlay_ops && overlay_ops->enable)
- overlay_ops->enable(manager->dev, zpos);
-}
-
-void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
-{
- struct exynos_drm_manager *manager =
- to_exynos_encoder(encoder)->manager;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
- int zpos = DEFAULT_ZPOS;
-
- if (data)
- zpos = *(int *)data;
-
- if (overlay_ops && overlay_ops->disable)
- overlay_ops->disable(manager->dev, zpos);
+ return to_exynos_encoder(encoder)->display;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 89e2fb0770af..b7a1620a7e79 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -18,20 +18,8 @@ struct exynos_drm_manager;
void exynos_drm_encoder_setup(struct drm_device *dev);
struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
- struct exynos_drm_manager *mgr,
- unsigned int possible_crtcs);
-struct exynos_drm_manager *
-exynos_drm_get_manager(struct drm_encoder *encoder);
-void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
- void (*fn)(struct drm_encoder *, void *));
-void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
-void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
+ struct exynos_drm_display *mgr,
+ unsigned long possible_crtcs);
+struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index ea39e0ef2ae4..65a22cad7b36 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -20,9 +20,10 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
+#include "exynos_drm_fbdev.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
-#include "exynos_drm_encoder.h"
+#include "exynos_drm_crtc.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -71,7 +72,7 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
unsigned int i;
/* make sure that overlay data are updated before relesing fb. */
- exynos_drm_encoder_complete_scanout(fb);
+ exynos_drm_crtc_complete_scanout(fb);
drm_framebuffer_cleanup(fb);
@@ -300,6 +301,8 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
if (fb_helper)
drm_fb_helper_hotplug_event(fb_helper);
+ else
+ exynos_drm_fbdev_init(dev);
}
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e7c2f2d07f19..addbf7536da4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -90,7 +90,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
/* RGB formats use only one buffer */
buffer = exynos_drm_fb_buffer(fb, 0);
if (!buffer) {
- DRM_LOG_KMS("buffer is null.\n");
+ DRM_DEBUG_KMS("buffer is null.\n");
return -EFAULT;
}
@@ -237,6 +237,24 @@ static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
.fb_probe = exynos_drm_fbdev_create,
};
+bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ bool ret = false;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->status != connector_status_connected)
+ continue;
+
+ ret = true;
+ break;
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
int exynos_drm_fbdev_init(struct drm_device *dev)
{
struct exynos_drm_fbdev *fbdev;
@@ -248,6 +266,9 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
return 0;
+ if (!exynos_drm_fbdev_is_anything_connected(dev))
+ return 0;
+
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
return -ENOMEM;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a20440ce32e6..40fd6ccfcd6f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -62,7 +62,7 @@
/* FIMD has totally five hardware windows. */
#define WINDOWS_NR 5
-#define get_fimd_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_fimd_manager(mgr) platform_get_drvdata(to_platform_device(dev))
struct fimd_driver_data {
unsigned int timing_base;
@@ -105,20 +105,18 @@ struct fimd_win_data {
};
struct fimd_context {
- struct exynos_drm_subdrv subdrv;
- int irq;
- struct drm_crtc *crtc;
+ struct device *dev;
+ struct drm_device *drm_dev;
struct clk *bus_clk;
struct clk *lcd_clk;
void __iomem *regs;
+ struct drm_display_mode mode;
struct fimd_win_data win_data[WINDOWS_NR];
- unsigned int clkdiv;
unsigned int default_win;
unsigned long irq_flags;
- u32 vidcon0;
u32 vidcon1;
bool suspended;
- struct mutex lock;
+ int pipe;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
@@ -145,153 +143,147 @@ static inline struct fimd_driver_data *drm_fimd_get_driver_data(
return (struct fimd_driver_data *)of_id->data;
}
-static bool fimd_display_is_connected(struct device *dev)
+static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
+ struct drm_device *drm_dev, int pipe)
{
- /* TODO. */
+ struct fimd_context *ctx = mgr->ctx;
- return true;
-}
+ ctx->drm_dev = drm_dev;
+ ctx->pipe = pipe;
-static void *fimd_get_panel(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
+ /*
+ * enable drm irq mode.
+ * - with irq_enabled = true, we can use the vblank feature.
+ *
+ * P.S. note that we wouldn't use drm irq handler but
+ * just specific driver own one instead because
+ * drm framework supports only one irq handler.
+ */
+ drm_dev->irq_enabled = true;
- return &ctx->panel;
-}
+ /*
+ * with vblank_disable_allowed = true, vblank interrupt will be disabled
+ * by drm timer once a current process gives up ownership of
+ * vblank event.(after drm_vblank_put function is called)
+ */
+ drm_dev->vblank_disable_allowed = true;
-static int fimd_check_mode(struct device *dev, struct drm_display_mode *mode)
-{
- /* TODO. */
+ /* attach this sub driver to iommu mapping if supported. */
+ if (is_drm_iommu_supported(ctx->drm_dev))
+ drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
return 0;
}
-static int fimd_display_power_on(struct device *dev, int mode)
+static void fimd_mgr_remove(struct exynos_drm_manager *mgr)
{
- /* TODO */
+ struct fimd_context *ctx = mgr->ctx;
- return 0;
+ /* detach this sub driver from iommu mapping if supported. */
+ if (is_drm_iommu_supported(ctx->drm_dev))
+ drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
}
-static struct exynos_drm_display_ops fimd_display_ops = {
- .type = EXYNOS_DISPLAY_TYPE_LCD,
- .is_connected = fimd_display_is_connected,
- .get_panel = fimd_get_panel,
- .check_mode = fimd_check_mode,
- .power_on = fimd_display_power_on,
-};
-
-static void fimd_dpms(struct device *subdrv_dev, int mode)
+static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
+ const struct drm_display_mode *mode)
{
- struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+ unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
+ u32 clkdiv;
- DRM_DEBUG_KMS("%d\n", mode);
+ /* Find the clock divider value that gets us closest to ideal_clk */
+ clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk);
- mutex_lock(&ctx->lock);
+ return (clkdiv < 0x100) ? clkdiv : 0xff;
+}
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- /*
- * enable fimd hardware only if suspended status.
- *
- * P.S. fimd_dpms function would be called at booting time so
- * clk_enable could be called double time.
- */
- if (ctx->suspended)
- pm_runtime_get_sync(subdrv_dev);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- if (!ctx->suspended)
- pm_runtime_put_sync(subdrv_dev);
- break;
- default:
- DRM_DEBUG_KMS("unspecified mode %d\n", mode);
- break;
- }
+static bool fimd_mode_fixup(struct exynos_drm_manager *mgr,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ if (adjusted_mode->vrefresh == 0)
+ adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
- mutex_unlock(&ctx->lock);
+ return true;
}
-static void fimd_apply(struct device *subdrv_dev)
+static void fimd_mode_set(struct exynos_drm_manager *mgr,
+ const struct drm_display_mode *in_mode)
{
- struct fimd_context *ctx = get_fimd_context(subdrv_dev);
- struct exynos_drm_manager *mgr = ctx->subdrv.manager;
- struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
- struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
- struct fimd_win_data *win_data;
- int i;
-
- for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- if (win_data->enabled && (ovl_ops && ovl_ops->commit))
- ovl_ops->commit(subdrv_dev, i);
- }
+ struct fimd_context *ctx = mgr->ctx;
- if (mgr_ops && mgr_ops->commit)
- mgr_ops->commit(subdrv_dev);
+ drm_mode_copy(&ctx->mode, in_mode);
}
-static void fimd_commit(struct device *dev)
+static void fimd_commit(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = get_fimd_context(dev);
- struct exynos_drm_panel_info *panel = &ctx->panel;
- struct videomode *vm = &panel->vm;
+ struct fimd_context *ctx = mgr->ctx;
+ struct drm_display_mode *mode = &ctx->mode;
struct fimd_driver_data *driver_data;
- u32 val;
+ u32 val, clkdiv, vidcon1;
+ int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
driver_data = ctx->driver_data;
if (ctx->suspended)
return;
- /* setup polarity values from machine code. */
- writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
+ /* nothing to do if we haven't set the mode yet */
+ if (mode->htotal == 0 || mode->vtotal == 0)
+ return;
+
+ /* setup polarity values */
+ vidcon1 = ctx->vidcon1;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ vidcon1 |= VIDCON1_INV_VSYNC;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ vidcon1 |= VIDCON1_INV_HSYNC;
+ writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
/* setup vertical timing values. */
- val = VIDTCON0_VBPD(vm->vback_porch - 1) |
- VIDTCON0_VFPD(vm->vfront_porch - 1) |
- VIDTCON0_VSPW(vm->vsync_len - 1);
+ vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
+ vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
+
+ val = VIDTCON0_VBPD(vbpd - 1) |
+ VIDTCON0_VFPD(vfpd - 1) |
+ VIDTCON0_VSPW(vsync_len - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
/* setup horizontal timing values. */
- val = VIDTCON1_HBPD(vm->hback_porch - 1) |
- VIDTCON1_HFPD(vm->hfront_porch - 1) |
- VIDTCON1_HSPW(vm->hsync_len - 1);
+ hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
+ hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
+
+ val = VIDTCON1_HBPD(hbpd - 1) |
+ VIDTCON1_HFPD(hfpd - 1) |
+ VIDTCON1_HSPW(hsync_len - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
/* setup horizontal and vertical display size. */
- val = VIDTCON2_LINEVAL(vm->vactive - 1) |
- VIDTCON2_HOZVAL(vm->hactive - 1) |
- VIDTCON2_LINEVAL_E(vm->vactive - 1) |
- VIDTCON2_HOZVAL_E(vm->hactive - 1);
+ val = VIDTCON2_LINEVAL(mode->vdisplay - 1) |
+ VIDTCON2_HOZVAL(mode->hdisplay - 1) |
+ VIDTCON2_LINEVAL_E(mode->vdisplay - 1) |
+ VIDTCON2_HOZVAL_E(mode->hdisplay - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
- /* setup clock source, clock divider, enable dma. */
- val = ctx->vidcon0;
- val &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
-
- if (ctx->driver_data->has_clksel) {
- val &= ~VIDCON0_CLKSEL_MASK;
- val |= VIDCON0_CLKSEL_LCD;
- }
-
- if (ctx->clkdiv > 1)
- val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR;
- else
- val &= ~VIDCON0_CLKDIR; /* 1:1 clock */
-
/*
* fields of register with prefix '_F' would be updated
* at vsync(same as dma start)
*/
- val |= VIDCON0_ENVID | VIDCON0_ENVID_F;
+ val = VIDCON0_ENVID | VIDCON0_ENVID_F;
+
+ if (ctx->driver_data->has_clksel)
+ val |= VIDCON0_CLKSEL_LCD;
+
+ clkdiv = fimd_calc_clkdiv(ctx, mode);
+ if (clkdiv > 1)
+ val |= VIDCON0_CLKVAL_F(clkdiv - 1) | VIDCON0_CLKDIR;
+
writel(val, ctx->regs + VIDCON0);
}
-static int fimd_enable_vblank(struct device *dev)
+static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_context *ctx = mgr->ctx;
u32 val;
if (ctx->suspended)
@@ -314,9 +306,9 @@ static int fimd_enable_vblank(struct device *dev)
return 0;
}
-static void fimd_disable_vblank(struct device *dev)
+static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_context *ctx = mgr->ctx;
u32 val;
if (ctx->suspended)
@@ -332,9 +324,9 @@ static void fimd_disable_vblank(struct device *dev)
}
}
-static void fimd_wait_for_vblank(struct device *dev)
+static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_context *ctx = mgr->ctx;
if (ctx->suspended)
return;
@@ -351,25 +343,16 @@ static void fimd_wait_for_vblank(struct device *dev)
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
-static struct exynos_drm_manager_ops fimd_manager_ops = {
- .dpms = fimd_dpms,
- .apply = fimd_apply,
- .commit = fimd_commit,
- .enable_vblank = fimd_enable_vblank,
- .disable_vblank = fimd_disable_vblank,
- .wait_for_vblank = fimd_wait_for_vblank,
-};
-
-static void fimd_win_mode_set(struct device *dev,
- struct exynos_drm_overlay *overlay)
+static void fimd_win_mode_set(struct exynos_drm_manager *mgr,
+ struct exynos_drm_overlay *overlay)
{
- struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_context *ctx = mgr->ctx;
struct fimd_win_data *win_data;
int win;
unsigned long offset;
if (!overlay) {
- dev_err(dev, "overlay is NULL\n");
+ DRM_ERROR("overlay is NULL\n");
return;
}
@@ -409,9 +392,8 @@ static void fimd_win_mode_set(struct device *dev,
overlay->fb_width, overlay->crtc_width);
}
-static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
+static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
{
- struct fimd_context *ctx = get_fimd_context(dev);
struct fimd_win_data *win_data = &ctx->win_data[win];
unsigned long val;
@@ -467,9 +449,8 @@ static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
writel(val, ctx->regs + WINCON(win));
}
-static void fimd_win_set_colkey(struct device *dev, unsigned int win)
+static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
{
- struct fimd_context *ctx = get_fimd_context(dev);
unsigned int keycon0 = 0, keycon1 = 0;
keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
@@ -508,9 +489,9 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
writel(val, ctx->regs + reg);
}
-static void fimd_win_commit(struct device *dev, int zpos)
+static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
{
- struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_context *ctx = mgr->ctx;
struct fimd_win_data *win_data;
int win = zpos;
unsigned long val, alpha, size;
@@ -528,6 +509,12 @@ static void fimd_win_commit(struct device *dev, int zpos)
win_data = &ctx->win_data[win];
+ /* If suspended, enable this on resume */
+ if (ctx->suspended) {
+ win_data->resume = true;
+ return;
+ }
+
/*
* SHADOWCON/PRTCON register is used for enabling timing.
*
@@ -605,11 +592,11 @@ static void fimd_win_commit(struct device *dev, int zpos)
DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
}
- fimd_win_set_pixfmt(dev, win);
+ fimd_win_set_pixfmt(ctx, win);
/* hardware window 0 doesn't support color key. */
if (win != 0)
- fimd_win_set_colkey(dev, win);
+ fimd_win_set_colkey(ctx, win);
/* wincon */
val = readl(ctx->regs + WINCON(win));
@@ -628,9 +615,9 @@ static void fimd_win_commit(struct device *dev, int zpos)
win_data->enabled = true;
}
-static void fimd_win_disable(struct device *dev, int zpos)
+static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
{
- struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_context *ctx = mgr->ctx;
struct fimd_win_data *win_data;
int win = zpos;
u32 val;
@@ -669,132 +656,6 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data->enabled = false;
}
-static struct exynos_drm_overlay_ops fimd_overlay_ops = {
- .mode_set = fimd_win_mode_set,
- .commit = fimd_win_commit,
- .disable = fimd_win_disable,
-};
-
-static struct exynos_drm_manager fimd_manager = {
- .pipe = -1,
- .ops = &fimd_manager_ops,
- .overlay_ops = &fimd_overlay_ops,
- .display_ops = &fimd_display_ops,
-};
-
-static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
-{
- struct fimd_context *ctx = (struct fimd_context *)dev_id;
- struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct drm_device *drm_dev = subdrv->drm_dev;
- struct exynos_drm_manager *manager = subdrv->manager;
- u32 val;
-
- val = readl(ctx->regs + VIDINTCON1);
-
- if (val & VIDINTCON1_INT_FRAME)
- /* VSYNC interrupt */
- writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
-
- /* check the crtc is detached already from encoder */
- if (manager->pipe < 0)
- goto out;
-
- drm_handle_vblank(drm_dev, manager->pipe);
- exynos_drm_crtc_finish_pageflip(drm_dev, manager->pipe);
-
- /* set wait vsync event to zero and wake up queue. */
- if (atomic_read(&ctx->wait_vsync_event)) {
- atomic_set(&ctx->wait_vsync_event, 0);
- wake_up(&ctx->wait_vsync_queue);
- }
-out:
- return IRQ_HANDLED;
-}
-
-static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
-{
- /*
- * enable drm irq mode.
- * - with irq_enabled = true, we can use the vblank feature.
- *
- * P.S. note that we wouldn't use drm irq handler but
- * just specific driver own one instead because
- * drm framework supports only one irq handler.
- */
- drm_dev->irq_enabled = true;
-
- /*
- * with vblank_disable_allowed = true, vblank interrupt will be disabled
- * by drm timer once a current process gives up ownership of
- * vblank event.(after drm_vblank_put function is called)
- */
- drm_dev->vblank_disable_allowed = true;
-
- /* attach this sub driver to iommu mapping if supported. */
- if (is_drm_iommu_supported(drm_dev))
- drm_iommu_attach_device(drm_dev, dev);
-
- return 0;
-}
-
-static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
-{
- /* detach this sub driver from iommu mapping if supported. */
- if (is_drm_iommu_supported(drm_dev))
- drm_iommu_detach_device(drm_dev, dev);
-}
-
-static int fimd_configure_clocks(struct fimd_context *ctx, struct device *dev)
-{
- struct videomode *vm = &ctx->panel.vm;
- unsigned long clk;
-
- ctx->bus_clk = devm_clk_get(dev, "fimd");
- if (IS_ERR(ctx->bus_clk)) {
- dev_err(dev, "failed to get bus clock\n");
- return PTR_ERR(ctx->bus_clk);
- }
-
- ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
- if (IS_ERR(ctx->lcd_clk)) {
- dev_err(dev, "failed to get lcd clock\n");
- return PTR_ERR(ctx->lcd_clk);
- }
-
- clk = clk_get_rate(ctx->lcd_clk);
- if (clk == 0) {
- dev_err(dev, "error getting sclk_fimd clock rate\n");
- return -EINVAL;
- }
-
- if (vm->pixelclock == 0) {
- unsigned long c;
- c = vm->hactive + vm->hback_porch + vm->hfront_porch +
- vm->hsync_len;
- c *= vm->vactive + vm->vback_porch + vm->vfront_porch +
- vm->vsync_len;
- vm->pixelclock = c * FIMD_DEFAULT_FRAMERATE;
- if (vm->pixelclock == 0) {
- dev_err(dev, "incorrect display timings\n");
- return -EINVAL;
- }
- dev_warn(dev, "pixel clock recalculated to %luHz (%dHz frame rate)\n",
- vm->pixelclock, FIMD_DEFAULT_FRAMERATE);
- }
- ctx->clkdiv = DIV_ROUND_UP(clk, vm->pixelclock);
- if (ctx->clkdiv > 256) {
- dev_warn(dev, "calculated pixel clock divider too high (%u), lowered to 256\n",
- ctx->clkdiv);
- ctx->clkdiv = 256;
- }
- vm->pixelclock = clk / ctx->clkdiv;
- DRM_DEBUG_KMS("pixel clock = %lu, clkdiv = %d\n", vm->pixelclock,
- ctx->clkdiv);
-
- return 0;
-}
-
static void fimd_clear_win(struct fimd_context *ctx, int win)
{
writel(0, ctx->regs + WINCON(win));
@@ -808,111 +669,190 @@ static void fimd_clear_win(struct fimd_context *ctx, int win)
fimd_shadow_protect_win(ctx, win, false);
}
-static int fimd_clock(struct fimd_context *ctx, bool enable)
+static void fimd_window_suspend(struct exynos_drm_manager *mgr)
{
- if (enable) {
- int ret;
-
- ret = clk_prepare_enable(ctx->bus_clk);
- if (ret < 0)
- return ret;
+ struct fimd_context *ctx = mgr->ctx;
+ struct fimd_win_data *win_data;
+ int i;
- ret = clk_prepare_enable(ctx->lcd_clk);
- if (ret < 0) {
- clk_disable_unprepare(ctx->bus_clk);
- return ret;
- }
- } else {
- clk_disable_unprepare(ctx->lcd_clk);
- clk_disable_unprepare(ctx->bus_clk);
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ if (win_data->enabled)
+ fimd_win_disable(mgr, i);
}
-
- return 0;
+ fimd_wait_for_vblank(mgr);
}
-static void fimd_window_suspend(struct device *dev)
+static void fimd_window_resume(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_context *ctx = mgr->ctx;
struct fimd_win_data *win_data;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
win_data = &ctx->win_data[i];
- win_data->resume = win_data->enabled;
- fimd_win_disable(dev, i);
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
}
- fimd_wait_for_vblank(dev);
}
-static void fimd_window_resume(struct device *dev)
+static void fimd_apply(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_context *ctx = mgr->ctx;
struct fimd_win_data *win_data;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
win_data = &ctx->win_data[i];
- win_data->enabled = win_data->resume;
- win_data->resume = false;
+ if (win_data->enabled)
+ fimd_win_commit(mgr, i);
}
+
+ fimd_commit(mgr);
}
-static int fimd_activate(struct fimd_context *ctx, bool enable)
+static int fimd_poweron(struct exynos_drm_manager *mgr)
{
- struct device *dev = ctx->subdrv.dev;
- if (enable) {
- int ret;
+ struct fimd_context *ctx = mgr->ctx;
+ int ret;
- ret = fimd_clock(ctx, true);
- if (ret < 0)
- return ret;
+ if (!ctx->suspended)
+ return 0;
- ctx->suspended = false;
+ ctx->suspended = false;
- /* if vblank was enabled status, enable it again. */
- if (test_and_clear_bit(0, &ctx->irq_flags))
- fimd_enable_vblank(dev);
+ pm_runtime_get_sync(ctx->dev);
- fimd_window_resume(dev);
- } else {
- fimd_window_suspend(dev);
+ ret = clk_prepare_enable(ctx->bus_clk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
+ goto bus_clk_err;
+ }
+
+ ret = clk_prepare_enable(ctx->lcd_clk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
+ goto lcd_clk_err;
+ }
- fimd_clock(ctx, false);
- ctx->suspended = true;
+ /* if vblank was enabled status, enable it again. */
+ if (test_and_clear_bit(0, &ctx->irq_flags)) {
+ ret = fimd_enable_vblank(mgr);
+ if (ret) {
+ DRM_ERROR("Failed to re-enable vblank [%d]\n", ret);
+ goto enable_vblank_err;
+ }
}
+ fimd_window_resume(mgr);
+
+ fimd_apply(mgr);
+
return 0;
+
+enable_vblank_err:
+ clk_disable_unprepare(ctx->lcd_clk);
+lcd_clk_err:
+ clk_disable_unprepare(ctx->bus_clk);
+bus_clk_err:
+ ctx->suspended = true;
+ return ret;
}
-static int fimd_get_platform_data(struct fimd_context *ctx, struct device *dev)
+static int fimd_poweroff(struct exynos_drm_manager *mgr)
{
- struct videomode *vm;
- int ret;
+ struct fimd_context *ctx = mgr->ctx;
- vm = &ctx->panel.vm;
- ret = of_get_videomode(dev->of_node, vm, OF_USE_NATIVE_MODE);
- if (ret) {
- DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
- return ret;
- }
+ if (ctx->suspended)
+ return 0;
- if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
- ctx->vidcon1 |= VIDCON1_INV_VSYNC;
- if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
- ctx->vidcon1 |= VIDCON1_INV_HSYNC;
- if (vm->flags & DISPLAY_FLAGS_DE_LOW)
- ctx->vidcon1 |= VIDCON1_INV_VDEN;
- if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
- ctx->vidcon1 |= VIDCON1_INV_VCLK;
+ /*
+ * We need to make sure that all windows are disabled before we
+ * suspend that connector. Otherwise we might try to scan from
+ * a destroyed buffer later.
+ */
+ fimd_window_suspend(mgr);
+ clk_disable_unprepare(ctx->lcd_clk);
+ clk_disable_unprepare(ctx->bus_clk);
+
+ pm_runtime_put_sync(ctx->dev);
+
+ ctx->suspended = true;
return 0;
}
+static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
+{
+ DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ fimd_poweron(mgr);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ fimd_poweroff(mgr);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
+}
+
+static struct exynos_drm_manager_ops fimd_manager_ops = {
+ .initialize = fimd_mgr_initialize,
+ .remove = fimd_mgr_remove,
+ .dpms = fimd_dpms,
+ .mode_fixup = fimd_mode_fixup,
+ .mode_set = fimd_mode_set,
+ .commit = fimd_commit,
+ .enable_vblank = fimd_enable_vblank,
+ .disable_vblank = fimd_disable_vblank,
+ .wait_for_vblank = fimd_wait_for_vblank,
+ .win_mode_set = fimd_win_mode_set,
+ .win_commit = fimd_win_commit,
+ .win_disable = fimd_win_disable,
+};
+
+static struct exynos_drm_manager fimd_manager = {
+ .type = EXYNOS_DISPLAY_TYPE_LCD,
+ .ops = &fimd_manager_ops,
+};
+
+static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
+{
+ struct fimd_context *ctx = (struct fimd_context *)dev_id;
+ u32 val;
+
+ val = readl(ctx->regs + VIDINTCON1);
+
+ if (val & VIDINTCON1_INT_FRAME)
+ /* VSYNC interrupt */
+ writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+
+ /* check the crtc is detached already from encoder */
+ if (ctx->pipe < 0 || !ctx->drm_dev)
+ goto out;
+
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ wake_up(&ctx->wait_vsync_queue);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
static int fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
- struct exynos_drm_subdrv *subdrv;
struct resource *res;
int win;
int ret = -EINVAL;
@@ -924,13 +864,25 @@ static int fimd_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ret = fimd_get_platform_data(ctx, dev);
- if (ret)
- return ret;
+ ctx->dev = dev;
+ ctx->suspended = true;
- ret = fimd_configure_clocks(ctx, dev);
- if (ret)
- return ret;
+ if (of_property_read_bool(dev->of_node, "samsung,invert-vden"))
+ ctx->vidcon1 |= VIDCON1_INV_VDEN;
+ if (of_property_read_bool(dev->of_node, "samsung,invert-vclk"))
+ ctx->vidcon1 |= VIDCON1_INV_VCLK;
+
+ ctx->bus_clk = devm_clk_get(dev, "fimd");
+ if (IS_ERR(ctx->bus_clk)) {
+ dev_err(dev, "failed to get bus clock\n");
+ return PTR_ERR(ctx->bus_clk);
+ }
+
+ ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
+ if (IS_ERR(ctx->lcd_clk)) {
+ dev_err(dev, "failed to get lcd clock\n");
+ return PTR_ERR(ctx->lcd_clk);
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -944,9 +896,7 @@ static int fimd_probe(struct platform_device *pdev)
return -ENXIO;
}
- ctx->irq = res->start;
-
- ret = devm_request_irq(dev, ctx->irq, fimd_irq_handler,
+ ret = devm_request_irq(dev, res->start, fimd_irq_handler,
0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
@@ -957,112 +907,35 @@ static int fimd_probe(struct platform_device *pdev)
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
- subdrv = &ctx->subdrv;
+ platform_set_drvdata(pdev, &fimd_manager);
- subdrv->dev = dev;
- subdrv->manager = &fimd_manager;
- subdrv->probe = fimd_subdrv_probe;
- subdrv->remove = fimd_subdrv_remove;
+ fimd_manager.ctx = ctx;
+ exynos_drm_manager_register(&fimd_manager);
- mutex_init(&ctx->lock);
-
- platform_set_drvdata(pdev, ctx);
+ exynos_dpi_probe(ctx->dev);
pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
for (win = 0; win < WINDOWS_NR; win++)
fimd_clear_win(ctx, win);
- exynos_drm_subdrv_register(subdrv);
-
return 0;
}
static int fimd_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct fimd_context *ctx = platform_get_drvdata(pdev);
-
- exynos_drm_subdrv_unregister(&ctx->subdrv);
-
- if (ctx->suspended)
- goto out;
-
- pm_runtime_set_suspended(dev);
- pm_runtime_put_sync(dev);
-
-out:
- pm_runtime_disable(dev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int fimd_suspend(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
+ struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
- /*
- * do not use pm_runtime_suspend(). if pm_runtime_suspend() is
- * called here, an error would be returned by that interface
- * because the usage_count of pm runtime is more than 1.
- */
- if (!pm_runtime_suspended(dev))
- return fimd_activate(ctx, false);
+ exynos_dpi_remove(&pdev->dev);
- return 0;
-}
+ exynos_drm_manager_unregister(&fimd_manager);
-static int fimd_resume(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
+ fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
- /*
- * if entered to sleep when lcd panel was on, the usage_count
- * of pm runtime would still be 1 so in this case, fimd driver
- * should be on directly not drawing on pm runtime interface.
- */
- if (!pm_runtime_suspended(dev)) {
- int ret;
-
- ret = fimd_activate(ctx, true);
- if (ret < 0)
- return ret;
-
- /*
- * in case of dpms on(standby), fimd_apply function will
- * be called by encoder's dpms callback to update fimd's
- * registers but in case of sleep wakeup, it's not.
- * so fimd_apply function should be called at here.
- */
- fimd_apply(dev);
- }
+ pm_runtime_disable(&pdev->dev);
return 0;
}
-#endif
-
-#ifdef CONFIG_PM_RUNTIME
-static int fimd_runtime_suspend(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
-
- return fimd_activate(ctx, false);
-}
-
-static int fimd_runtime_resume(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
-
- return fimd_activate(ctx, true);
-}
-#endif
-
-static const struct dev_pm_ops fimd_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
- SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
-};
struct platform_driver fimd_driver = {
.probe = fimd_probe,
@@ -1070,7 +943,6 @@ struct platform_driver fimd_driver = {
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
- .pm = &fimd_pm_ops,
.of_match_table = fimd_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
deleted file mode 100644
index 8548b974bd59..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ /dev/null
@@ -1,439 +0,0 @@
-/*
- * Copyright (C) 2011 Samsung Electronics Co.Ltd
- * Authors:
- * Inki Dae <inki.dae@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <drm/drmP.h>
-
-#include <linux/kernel.h>
-#include <linux/wait.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-
-#include <drm/exynos_drm.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_hdmi.h"
-
-#define to_context(dev) platform_get_drvdata(to_platform_device(dev))
-#define to_subdrv(dev) to_context(dev)
-#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
- struct drm_hdmi_context, subdrv);
-
-/* platform device pointer for common drm hdmi device. */
-static struct platform_device *exynos_drm_hdmi_pdev;
-
-/* Common hdmi subdrv needs to access the hdmi and mixer though context.
-* These should be initialied by the repective drivers */
-static struct exynos_drm_hdmi_context *hdmi_ctx;
-static struct exynos_drm_hdmi_context *mixer_ctx;
-
-/* these callback points shoud be set by specific drivers. */
-static struct exynos_hdmi_ops *hdmi_ops;
-static struct exynos_mixer_ops *mixer_ops;
-
-struct drm_hdmi_context {
- struct exynos_drm_subdrv subdrv;
- struct exynos_drm_hdmi_context *hdmi_ctx;
- struct exynos_drm_hdmi_context *mixer_ctx;
-
- bool enabled[MIXER_WIN_NR];
-};
-
-int exynos_platform_device_hdmi_register(void)
-{
- struct platform_device *pdev;
-
- if (exynos_drm_hdmi_pdev)
- return -EEXIST;
-
- pdev = platform_device_register_simple(
- "exynos-drm-hdmi", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- exynos_drm_hdmi_pdev = pdev;
-
- return 0;
-}
-
-void exynos_platform_device_hdmi_unregister(void)
-{
- if (exynos_drm_hdmi_pdev) {
- platform_device_unregister(exynos_drm_hdmi_pdev);
- exynos_drm_hdmi_pdev = NULL;
- }
-}
-
-void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
-{
- if (ctx)
- hdmi_ctx = ctx;
-}
-
-void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx)
-{
- if (ctx)
- mixer_ctx = ctx;
-}
-
-void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
-{
- if (ops)
- hdmi_ops = ops;
-}
-
-void exynos_mixer_ops_register(struct exynos_mixer_ops *ops)
-{
- if (ops)
- mixer_ops = ops;
-}
-
-static bool drm_hdmi_is_connected(struct device *dev)
-{
- struct drm_hdmi_context *ctx = to_context(dev);
-
- if (hdmi_ops && hdmi_ops->is_connected)
- return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx);
-
- return false;
-}
-
-static struct edid *drm_hdmi_get_edid(struct device *dev,
- struct drm_connector *connector)
-{
- struct drm_hdmi_context *ctx = to_context(dev);
-
- if (hdmi_ops && hdmi_ops->get_edid)
- return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector);
-
- return NULL;
-}
-
-static int drm_hdmi_check_mode(struct device *dev,
- struct drm_display_mode *mode)
-{
- struct drm_hdmi_context *ctx = to_context(dev);
- int ret = 0;
-
- /*
- * Both, mixer and hdmi should be able to handle the requested mode.
- * If any of the two fails, return mode as BAD.
- */
-
- if (mixer_ops && mixer_ops->check_mode)
- ret = mixer_ops->check_mode(ctx->mixer_ctx->ctx, mode);
-
- if (ret)
- return ret;
-
- if (hdmi_ops && hdmi_ops->check_mode)
- return hdmi_ops->check_mode(ctx->hdmi_ctx->ctx, mode);
-
- return 0;
-}
-
-static int drm_hdmi_power_on(struct device *dev, int mode)
-{
- struct drm_hdmi_context *ctx = to_context(dev);
-
- if (hdmi_ops && hdmi_ops->power_on)
- return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode);
-
- return 0;
-}
-
-static struct exynos_drm_display_ops drm_hdmi_display_ops = {
- .type = EXYNOS_DISPLAY_TYPE_HDMI,
- .is_connected = drm_hdmi_is_connected,
- .get_edid = drm_hdmi_get_edid,
- .check_mode = drm_hdmi_check_mode,
- .power_on = drm_hdmi_power_on,
-};
-
-static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
- struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct exynos_drm_manager *manager = subdrv->manager;
-
- if (mixer_ops && mixer_ops->enable_vblank)
- return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx,
- manager->pipe);
-
- return 0;
-}
-
-static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
- if (mixer_ops && mixer_ops->disable_vblank)
- return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
-}
-
-static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
- if (mixer_ops && mixer_ops->wait_for_vblank)
- mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
-}
-
-static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
- struct drm_connector *connector,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_display_mode *m;
- int mode_ok;
-
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- mode_ok = drm_hdmi_check_mode(subdrv_dev, adjusted_mode);
-
- /* just return if user desired mode exists. */
- if (mode_ok == 0)
- return;
-
- /*
- * otherwise, find the most suitable mode among modes and change it
- * to adjusted_mode.
- */
- list_for_each_entry(m, &connector->modes, head) {
- mode_ok = drm_hdmi_check_mode(subdrv_dev, m);
-
- if (mode_ok == 0) {
- struct drm_mode_object base;
- struct list_head head;
-
- DRM_INFO("desired mode doesn't exist so\n");
- DRM_INFO("use the most suitable mode among modes.\n");
-
- DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
- m->hdisplay, m->vdisplay, m->vrefresh);
-
- /* preserve display mode header while copying. */
- head = adjusted_mode->head;
- base = adjusted_mode->base;
- memcpy(adjusted_mode, m, sizeof(*m));
- adjusted_mode->head = head;
- adjusted_mode->base = base;
- break;
- }
- }
-}
-
-static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
- if (hdmi_ops && hdmi_ops->mode_set)
- hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
-}
-
-static void drm_hdmi_get_max_resol(struct device *subdrv_dev,
- unsigned int *width, unsigned int *height)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
- if (hdmi_ops && hdmi_ops->get_max_resol)
- hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height);
-}
-
-static void drm_hdmi_commit(struct device *subdrv_dev)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
- if (hdmi_ops && hdmi_ops->commit)
- hdmi_ops->commit(ctx->hdmi_ctx->ctx);
-}
-
-static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
- if (mixer_ops && mixer_ops->dpms)
- mixer_ops->dpms(ctx->mixer_ctx->ctx, mode);
-
- if (hdmi_ops && hdmi_ops->dpms)
- hdmi_ops->dpms(ctx->hdmi_ctx->ctx, mode);
-}
-
-static void drm_hdmi_apply(struct device *subdrv_dev)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
- int i;
-
- for (i = 0; i < MIXER_WIN_NR; i++) {
- if (!ctx->enabled[i])
- continue;
- if (mixer_ops && mixer_ops->win_commit)
- mixer_ops->win_commit(ctx->mixer_ctx->ctx, i);
- }
-
- if (hdmi_ops && hdmi_ops->commit)
- hdmi_ops->commit(ctx->hdmi_ctx->ctx);
-}
-
-static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
- .dpms = drm_hdmi_dpms,
- .apply = drm_hdmi_apply,
- .enable_vblank = drm_hdmi_enable_vblank,
- .disable_vblank = drm_hdmi_disable_vblank,
- .wait_for_vblank = drm_hdmi_wait_for_vblank,
- .mode_fixup = drm_hdmi_mode_fixup,
- .mode_set = drm_hdmi_mode_set,
- .get_max_resol = drm_hdmi_get_max_resol,
- .commit = drm_hdmi_commit,
-};
-
-static void drm_mixer_mode_set(struct device *subdrv_dev,
- struct exynos_drm_overlay *overlay)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
- if (mixer_ops && mixer_ops->win_mode_set)
- mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
-}
-
-static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
- int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
-
- if (win < 0 || win >= MIXER_WIN_NR) {
- DRM_ERROR("mixer window[%d] is wrong\n", win);
- return;
- }
-
- if (mixer_ops && mixer_ops->win_commit)
- mixer_ops->win_commit(ctx->mixer_ctx->ctx, win);
-
- ctx->enabled[win] = true;
-}
-
-static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
- int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
-
- if (win < 0 || win >= MIXER_WIN_NR) {
- DRM_ERROR("mixer window[%d] is wrong\n", win);
- return;
- }
-
- if (mixer_ops && mixer_ops->win_disable)
- mixer_ops->win_disable(ctx->mixer_ctx->ctx, win);
-
- ctx->enabled[win] = false;
-}
-
-static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
- .mode_set = drm_mixer_mode_set,
- .commit = drm_mixer_commit,
- .disable = drm_mixer_disable,
-};
-
-static struct exynos_drm_manager hdmi_manager = {
- .pipe = -1,
- .ops = &drm_hdmi_manager_ops,
- .overlay_ops = &drm_hdmi_overlay_ops,
- .display_ops = &drm_hdmi_display_ops,
-};
-
-static int hdmi_subdrv_probe(struct drm_device *drm_dev,
- struct device *dev)
-{
- struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
- struct drm_hdmi_context *ctx;
-
- if (!hdmi_ctx) {
- DRM_ERROR("hdmi context not initialized.\n");
- return -EFAULT;
- }
-
- if (!mixer_ctx) {
- DRM_ERROR("mixer context not initialized.\n");
- return -EFAULT;
- }
-
- ctx = get_ctx_from_subdrv(subdrv);
-
- if (!ctx) {
- DRM_ERROR("no drm hdmi context.\n");
- return -EFAULT;
- }
-
- ctx->hdmi_ctx = hdmi_ctx;
- ctx->mixer_ctx = mixer_ctx;
-
- ctx->hdmi_ctx->drm_dev = drm_dev;
- ctx->mixer_ctx->drm_dev = drm_dev;
-
- if (mixer_ops->iommu_on)
- mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
-
- return 0;
-}
-
-static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
-{
- struct drm_hdmi_context *ctx;
- struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
-
- ctx = get_ctx_from_subdrv(subdrv);
-
- if (mixer_ops->iommu_on)
- mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
-}
-
-static int exynos_drm_hdmi_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct exynos_drm_subdrv *subdrv;
- struct drm_hdmi_context *ctx;
-
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- subdrv = &ctx->subdrv;
-
- subdrv->dev = dev;
- subdrv->manager = &hdmi_manager;
- subdrv->probe = hdmi_subdrv_probe;
- subdrv->remove = hdmi_subdrv_remove;
-
- platform_set_drvdata(pdev, subdrv);
-
- exynos_drm_subdrv_register(subdrv);
-
- return 0;
-}
-
-static int exynos_drm_hdmi_remove(struct platform_device *pdev)
-{
- struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
-
- exynos_drm_subdrv_unregister(&ctx->subdrv);
-
- return 0;
-}
-
-struct platform_driver exynos_drm_common_hdmi_driver = {
- .probe = exynos_drm_hdmi_probe,
- .remove = exynos_drm_hdmi_remove,
- .driver = {
- .name = "exynos-drm-hdmi",
- .owner = THIS_MODULE,
- },
-};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
deleted file mode 100644
index 724cab181976..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* exynos_drm_hdmi.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authoer: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_HDMI_H_
-#define _EXYNOS_DRM_HDMI_H_
-
-#define MIXER_WIN_NR 3
-#define MIXER_DEFAULT_WIN 0
-
-/*
- * exynos hdmi common context structure.
- *
- * @drm_dev: pointer to drm_device.
- * @ctx: pointer to the context of specific device driver.
- * this context should be hdmi_context or mixer_context.
- */
-struct exynos_drm_hdmi_context {
- struct drm_device *drm_dev;
- void *ctx;
-};
-
-struct exynos_hdmi_ops {
- /* display */
- bool (*is_connected)(void *ctx);
- struct edid *(*get_edid)(void *ctx,
- struct drm_connector *connector);
- int (*check_mode)(void *ctx, struct drm_display_mode *mode);
- int (*power_on)(void *ctx, int mode);
-
- /* manager */
- void (*mode_set)(void *ctx, struct drm_display_mode *mode);
- void (*get_max_resol)(void *ctx, unsigned int *width,
- unsigned int *height);
- void (*commit)(void *ctx);
- void (*dpms)(void *ctx, int mode);
-};
-
-struct exynos_mixer_ops {
- /* manager */
- int (*iommu_on)(void *ctx, bool enable);
- int (*enable_vblank)(void *ctx, int pipe);
- void (*disable_vblank)(void *ctx);
- void (*wait_for_vblank)(void *ctx);
- void (*dpms)(void *ctx, int mode);
-
- /* overlay */
- void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
- void (*win_commit)(void *ctx, int zpos);
- void (*win_disable)(void *ctx, int zpos);
-
- /* display */
- int (*check_mode)(void *ctx, struct drm_display_mode *mode);
-};
-
-void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx);
-void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx);
-void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops);
-void exynos_mixer_ops_register(struct exynos_mixer_ops *ops);
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index fb8db0378274..b32b291f88ff 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -36,12 +36,10 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev)
priv->da_start = EXYNOS_DEV_ADDR_START;
if (!priv->da_space_size)
priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
- if (!priv->da_space_order)
- priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
- priv->da_space_size,
- priv->da_space_order);
+ priv->da_space_size);
+
if (IS_ERR(mapping))
return PTR_ERR(mapping);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 598e60f57d4b..72376d41c512 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -14,7 +14,6 @@
#define EXYNOS_DEV_ADDR_START 0x20000000
#define EXYNOS_DEV_ADDR_SIZE 0x40000000
-#define EXYNOS_DEV_ADDR_ORDER 0x0
#ifdef CONFIG_DRM_EXYNOS_IOMMU
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index fcb0652e77d0..8371cbd7631d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -13,7 +13,7 @@
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
-#include "exynos_drm_encoder.h"
+#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_plane.h"
@@ -87,7 +87,7 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i);
if (!buffer) {
- DRM_LOG_KMS("buffer is null\n");
+ DRM_DEBUG_KMS("buffer is null\n");
return -EFAULT;
}
@@ -139,7 +139,7 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
overlay->crtc_x, overlay->crtc_y,
overlay->crtc_width, overlay->crtc_height);
- exynos_drm_fn_encoder(crtc, overlay, exynos_drm_encoder_plane_mode_set);
+ exynos_drm_crtc_plane_mode_set(crtc, overlay);
return 0;
}
@@ -149,8 +149,7 @@ void exynos_plane_commit(struct drm_plane *plane)
struct exynos_plane *exynos_plane = to_exynos_plane(plane);
struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
- exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
- exynos_drm_encoder_plane_commit);
+ exynos_drm_crtc_plane_commit(plane->crtc, overlay->zpos);
}
void exynos_plane_dpms(struct drm_plane *plane, int mode)
@@ -162,17 +161,13 @@ void exynos_plane_dpms(struct drm_plane *plane, int mode)
if (exynos_plane->enabled)
return;
- exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
- exynos_drm_encoder_plane_enable);
-
+ exynos_drm_crtc_plane_enable(plane->crtc, overlay->zpos);
exynos_plane->enabled = true;
} else {
if (!exynos_plane->enabled)
return;
- exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
- exynos_drm_encoder_plane_disable);
-
+ exynos_drm_crtc_plane_disable(plane->crtc, overlay->zpos);
exynos_plane->enabled = false;
}
}
@@ -259,7 +254,7 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
}
struct drm_plane *exynos_plane_init(struct drm_device *dev,
- unsigned int possible_crtcs, bool priv)
+ unsigned long possible_crtcs, bool priv)
{
struct exynos_plane *exynos_plane;
int err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 88312458580d..84d464c90d3d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -17,4 +17,4 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
void exynos_plane_commit(struct drm_plane *plane);
void exynos_plane_dpms(struct drm_plane *plane, int mode);
struct drm_plane *exynos_plane_init(struct drm_device *dev,
- unsigned int possible_crtcs, bool priv);
+ unsigned long possible_crtcs, bool priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index ddaaedde173d..7afead9c3f30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -28,7 +28,9 @@
/* vidi has totally three virtual windows. */
#define WINDOWS_NR 3
-#define get_vidi_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_vidi_mgr(dev) platform_get_drvdata(to_platform_device(dev))
+#define ctx_from_connector(c) container_of(c, struct vidi_context, \
+ connector)
struct vidi_win_data {
unsigned int offset_x;
@@ -45,8 +47,10 @@ struct vidi_win_data {
};
struct vidi_context {
- struct exynos_drm_subdrv subdrv;
+ struct drm_device *drm_dev;
struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector connector;
struct vidi_win_data win_data[WINDOWS_NR];
struct edid *raw_edid;
unsigned int clkdiv;
@@ -58,6 +62,7 @@ struct vidi_context {
bool direct_vblank;
struct work_struct work;
struct mutex lock;
+ int pipe;
};
static const char fake_edid_info[] = {
@@ -85,126 +90,34 @@ static const char fake_edid_info[] = {
0x00, 0x00, 0x00, 0x06
};
-static bool vidi_display_is_connected(struct device *dev)
+static void vidi_apply(struct exynos_drm_manager *mgr)
{
- struct vidi_context *ctx = get_vidi_context(dev);
-
- /*
- * connection request would come from user side
- * to do hotplug through specific ioctl.
- */
- return ctx->connected ? true : false;
-}
-
-static struct edid *vidi_get_edid(struct device *dev,
- struct drm_connector *connector)
-{
- struct vidi_context *ctx = get_vidi_context(dev);
- struct edid *edid;
-
- /*
- * the edid data comes from user side and it would be set
- * to ctx->raw_edid through specific ioctl.
- */
- if (!ctx->raw_edid) {
- DRM_DEBUG_KMS("raw_edid is null.\n");
- return ERR_PTR(-EFAULT);
- }
-
- edid = drm_edid_duplicate(ctx->raw_edid);
- if (!edid) {
- DRM_DEBUG_KMS("failed to allocate edid\n");
- return ERR_PTR(-ENOMEM);
- }
-
- return edid;
-}
-
-static void *vidi_get_panel(struct device *dev)
-{
- /* TODO. */
-
- return NULL;
-}
-
-static int vidi_check_mode(struct device *dev, struct drm_display_mode *mode)
-{
- /* TODO. */
-
- return 0;
-}
-
-static int vidi_display_power_on(struct device *dev, int mode)
-{
- /* TODO */
-
- return 0;
-}
-
-static struct exynos_drm_display_ops vidi_display_ops = {
- .type = EXYNOS_DISPLAY_TYPE_VIDI,
- .is_connected = vidi_display_is_connected,
- .get_edid = vidi_get_edid,
- .get_panel = vidi_get_panel,
- .check_mode = vidi_check_mode,
- .power_on = vidi_display_power_on,
-};
-
-static void vidi_dpms(struct device *subdrv_dev, int mode)
-{
- struct vidi_context *ctx = get_vidi_context(subdrv_dev);
-
- DRM_DEBUG_KMS("%d\n", mode);
-
- mutex_lock(&ctx->lock);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- /* TODO. */
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- /* TODO. */
- break;
- default:
- DRM_DEBUG_KMS("unspecified mode %d\n", mode);
- break;
- }
-
- mutex_unlock(&ctx->lock);
-}
-
-static void vidi_apply(struct device *subdrv_dev)
-{
- struct vidi_context *ctx = get_vidi_context(subdrv_dev);
- struct exynos_drm_manager *mgr = ctx->subdrv.manager;
+ struct vidi_context *ctx = mgr->ctx;
struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
- struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
struct vidi_win_data *win_data;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
win_data = &ctx->win_data[i];
- if (win_data->enabled && (ovl_ops && ovl_ops->commit))
- ovl_ops->commit(subdrv_dev, i);
+ if (win_data->enabled && (mgr_ops && mgr_ops->win_commit))
+ mgr_ops->win_commit(mgr, i);
}
if (mgr_ops && mgr_ops->commit)
- mgr_ops->commit(subdrv_dev);
+ mgr_ops->commit(mgr);
}
-static void vidi_commit(struct device *dev)
+static void vidi_commit(struct exynos_drm_manager *mgr)
{
- struct vidi_context *ctx = get_vidi_context(dev);
+ struct vidi_context *ctx = mgr->ctx;
if (ctx->suspended)
return;
}
-static int vidi_enable_vblank(struct device *dev)
+static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
{
- struct vidi_context *ctx = get_vidi_context(dev);
+ struct vidi_context *ctx = mgr->ctx;
if (ctx->suspended)
return -EPERM;
@@ -217,16 +130,16 @@ static int vidi_enable_vblank(struct device *dev)
/*
* in case of page flip request, vidi_finish_pageflip function
* will not be called because direct_vblank is true and then
- * that function will be called by overlay_ops->commit callback
+ * that function will be called by manager_ops->win_commit callback
*/
schedule_work(&ctx->work);
return 0;
}
-static void vidi_disable_vblank(struct device *dev)
+static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
{
- struct vidi_context *ctx = get_vidi_context(dev);
+ struct vidi_context *ctx = mgr->ctx;
if (ctx->suspended)
return;
@@ -235,24 +148,16 @@ static void vidi_disable_vblank(struct device *dev)
ctx->vblank_on = false;
}
-static struct exynos_drm_manager_ops vidi_manager_ops = {
- .dpms = vidi_dpms,
- .apply = vidi_apply,
- .commit = vidi_commit,
- .enable_vblank = vidi_enable_vblank,
- .disable_vblank = vidi_disable_vblank,
-};
-
-static void vidi_win_mode_set(struct device *dev,
- struct exynos_drm_overlay *overlay)
+static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
+ struct exynos_drm_overlay *overlay)
{
- struct vidi_context *ctx = get_vidi_context(dev);
+ struct vidi_context *ctx = mgr->ctx;
struct vidi_win_data *win_data;
int win;
unsigned long offset;
if (!overlay) {
- dev_err(dev, "overlay is NULL\n");
+ DRM_ERROR("overlay is NULL\n");
return;
}
@@ -296,9 +201,9 @@ static void vidi_win_mode_set(struct device *dev,
overlay->fb_width, overlay->crtc_width);
}
-static void vidi_win_commit(struct device *dev, int zpos)
+static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
{
- struct vidi_context *ctx = get_vidi_context(dev);
+ struct vidi_context *ctx = mgr->ctx;
struct vidi_win_data *win_data;
int win = zpos;
@@ -321,9 +226,9 @@ static void vidi_win_commit(struct device *dev, int zpos)
schedule_work(&ctx->work);
}
-static void vidi_win_disable(struct device *dev, int zpos)
+static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
{
- struct vidi_context *ctx = get_vidi_context(dev);
+ struct vidi_context *ctx = mgr->ctx;
struct vidi_win_data *win_data;
int win = zpos;
@@ -339,98 +244,132 @@ static void vidi_win_disable(struct device *dev, int zpos)
/* TODO. */
}
-static struct exynos_drm_overlay_ops vidi_overlay_ops = {
- .mode_set = vidi_win_mode_set,
- .commit = vidi_win_commit,
- .disable = vidi_win_disable,
-};
+static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
+{
+ struct vidi_context *ctx = mgr->ctx;
-static struct exynos_drm_manager vidi_manager = {
- .pipe = -1,
- .ops = &vidi_manager_ops,
- .overlay_ops = &vidi_overlay_ops,
- .display_ops = &vidi_display_ops,
-};
+ DRM_DEBUG_KMS("%s\n", __FILE__);
-static void vidi_fake_vblank_handler(struct work_struct *work)
-{
- struct vidi_context *ctx = container_of(work, struct vidi_context,
- work);
- struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct exynos_drm_manager *manager = subdrv->manager;
+ if (enable != false && enable != true)
+ return -EINVAL;
- if (manager->pipe < 0)
- return;
+ if (enable) {
+ ctx->suspended = false;
- /* refresh rate is about 50Hz. */
- usleep_range(16000, 20000);
+ /* if vblank was enabled status, enable it again. */
+ if (test_and_clear_bit(0, &ctx->irq_flags))
+ vidi_enable_vblank(mgr);
+
+ vidi_apply(mgr);
+ } else {
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
+{
+ struct vidi_context *ctx = mgr->ctx;
+
+ DRM_DEBUG_KMS("%d\n", mode);
mutex_lock(&ctx->lock);
- if (ctx->direct_vblank) {
- drm_handle_vblank(subdrv->drm_dev, manager->pipe);
- ctx->direct_vblank = false;
- mutex_unlock(&ctx->lock);
- return;
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ vidi_power_on(mgr, true);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ vidi_power_on(mgr, false);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
}
mutex_unlock(&ctx->lock);
-
- exynos_drm_crtc_finish_pageflip(subdrv->drm_dev, manager->pipe);
}
-static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
+ struct drm_device *drm_dev, int pipe)
{
+ struct vidi_context *ctx = mgr->ctx;
+
+ DRM_ERROR("vidi initialize ct=%p dev=%p pipe=%d\n", ctx, drm_dev, pipe);
+
+ ctx->drm_dev = drm_dev;
+ ctx->pipe = pipe;
+
/*
* enable drm irq mode.
- * - with irq_enabled = true, we can use the vblank feature.
+ * - with irq_enabled = 1, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
- drm_dev->irq_enabled = true;
+ drm_dev->irq_enabled = 1;
/*
- * with vblank_disable_allowed = true, vblank interrupt will be disabled
+ * with vblank_disable_allowed = 1, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
- drm_dev->vblank_disable_allowed = true;
+ drm_dev->vblank_disable_allowed = 1;
return 0;
}
-static void vidi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
-{
- /* TODO. */
-}
+static struct exynos_drm_manager_ops vidi_manager_ops = {
+ .initialize = vidi_mgr_initialize,
+ .dpms = vidi_dpms,
+ .commit = vidi_commit,
+ .enable_vblank = vidi_enable_vblank,
+ .disable_vblank = vidi_disable_vblank,
+ .win_mode_set = vidi_win_mode_set,
+ .win_commit = vidi_win_commit,
+ .win_disable = vidi_win_disable,
+};
-static int vidi_power_on(struct vidi_context *ctx, bool enable)
+static struct exynos_drm_manager vidi_manager = {
+ .type = EXYNOS_DISPLAY_TYPE_VIDI,
+ .ops = &vidi_manager_ops,
+};
+
+static void vidi_fake_vblank_handler(struct work_struct *work)
{
- struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct device *dev = subdrv->dev;
+ struct vidi_context *ctx = container_of(work, struct vidi_context,
+ work);
- if (enable) {
- ctx->suspended = false;
+ if (ctx->pipe < 0)
+ return;
- /* if vblank was enabled status, enable it again. */
- if (test_and_clear_bit(0, &ctx->irq_flags))
- vidi_enable_vblank(dev);
+ /* refresh rate is about 50Hz. */
+ usleep_range(16000, 20000);
- vidi_apply(dev);
- } else {
- ctx->suspended = true;
+ mutex_lock(&ctx->lock);
+
+ if (ctx->direct_vblank) {
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ ctx->direct_vblank = false;
+ mutex_unlock(&ctx->lock);
+ return;
}
- return 0;
+ mutex_unlock(&ctx->lock);
+
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
}
static int vidi_show_connection(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
- struct vidi_context *ctx = get_vidi_context(dev);
+ struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
+ struct vidi_context *ctx = mgr->ctx;
mutex_lock(&ctx->lock);
@@ -445,7 +384,8 @@ static int vidi_store_connection(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct vidi_context *ctx = get_vidi_context(dev);
+ struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
+ struct vidi_context *ctx = mgr->ctx;
int ret;
ret = kstrtoint(buf, 0, &ctx->connected);
@@ -467,7 +407,7 @@ static int vidi_store_connection(struct device *dev,
DRM_DEBUG_KMS("requested connection.\n");
- drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
+ drm_helper_hpd_irq_event(ctx->drm_dev);
return len;
}
@@ -480,8 +420,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
{
struct vidi_context *ctx = NULL;
struct drm_encoder *encoder;
- struct exynos_drm_manager *manager;
- struct exynos_drm_display_ops *display_ops;
+ struct exynos_drm_display *display;
struct drm_exynos_vidi_connection *vidi = data;
if (!vidi) {
@@ -496,11 +435,10 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
head) {
- manager = exynos_drm_get_manager(encoder);
- display_ops = manager->display_ops;
+ display = exynos_drm_get_display(encoder);
- if (display_ops->type == EXYNOS_DISPLAY_TYPE_VIDI) {
- ctx = get_vidi_context(manager->dev);
+ if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
+ ctx = display->ctx;
break;
}
}
@@ -539,16 +477,119 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
}
ctx->connected = vidi->connection;
- drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+
+ return 0;
+}
+
+static enum drm_connector_status vidi_detect(struct drm_connector *connector,
+ bool force)
+{
+ struct vidi_context *ctx = ctx_from_connector(connector);
+
+ /*
+ * connection request would come from user side
+ * to do hotplug through specific ioctl.
+ */
+ return ctx->connected ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void vidi_connector_destroy(struct drm_connector *connector)
+{
+}
+
+static struct drm_connector_funcs vidi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = vidi_detect,
+ .destroy = vidi_connector_destroy,
+};
+
+static int vidi_get_modes(struct drm_connector *connector)
+{
+ struct vidi_context *ctx = ctx_from_connector(connector);
+ struct edid *edid;
+ int edid_len;
+
+ /*
+ * the edid data comes from user side and it would be set
+ * to ctx->raw_edid through specific ioctl.
+ */
+ if (!ctx->raw_edid) {
+ DRM_DEBUG_KMS("raw_edid is null.\n");
+ return -EFAULT;
+ }
+
+ edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
+ edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
+ if (!edid) {
+ DRM_DEBUG_KMS("failed to allocate edid\n");
+ return -ENOMEM;
+ }
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ return drm_add_edid_modes(connector, edid);
+}
+
+static int vidi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
+{
+ struct vidi_context *ctx = ctx_from_connector(connector);
+
+ return ctx->encoder;
+}
+
+static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
+ .get_modes = vidi_get_modes,
+ .mode_valid = vidi_mode_valid,
+ .best_encoder = vidi_best_encoder,
+};
+
+static int vidi_create_connector(struct exynos_drm_display *display,
+ struct drm_encoder *encoder)
+{
+ struct vidi_context *ctx = display->ctx;
+ struct drm_connector *connector = &ctx->connector;
+ int ret;
+
+ ctx->encoder = encoder;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(ctx->drm_dev, connector,
+ &vidi_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &vidi_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
+
+static struct exynos_drm_display_ops vidi_display_ops = {
+ .create_connector = vidi_create_connector,
+};
+
+static struct exynos_drm_display vidi_display = {
+ .type = EXYNOS_DISPLAY_TYPE_VIDI,
+ .ops = &vidi_display_ops,
+};
+
static int vidi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct vidi_context *ctx;
- struct exynos_drm_subdrv *subdrv;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
@@ -559,21 +600,19 @@ static int vidi_probe(struct platform_device *pdev)
INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
- subdrv = &ctx->subdrv;
- subdrv->dev = dev;
- subdrv->manager = &vidi_manager;
- subdrv->probe = vidi_subdrv_probe;
- subdrv->remove = vidi_subdrv_remove;
+ vidi_manager.ctx = ctx;
+ vidi_display.ctx = ctx;
mutex_init(&ctx->lock);
- platform_set_drvdata(pdev, ctx);
+ platform_set_drvdata(pdev, &vidi_manager);
ret = device_create_file(dev, &dev_attr_connection);
if (ret < 0)
DRM_INFO("failed to create connection sysfs.\n");
- exynos_drm_subdrv_register(subdrv);
+ exynos_drm_manager_register(&vidi_manager);
+ exynos_drm_display_register(&vidi_display);
return 0;
}
@@ -582,7 +621,8 @@ static int vidi_remove(struct platform_device *pdev)
{
struct vidi_context *ctx = platform_get_drvdata(pdev);
- exynos_drm_subdrv_unregister(&ctx->subdrv);
+ exynos_drm_display_unregister(&vidi_display);
+ exynos_drm_manager_unregister(&vidi_manager);
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
@@ -592,32 +632,11 @@ static int vidi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int vidi_suspend(struct device *dev)
-{
- struct vidi_context *ctx = get_vidi_context(dev);
-
- return vidi_power_on(ctx, false);
-}
-
-static int vidi_resume(struct device *dev)
-{
- struct vidi_context *ctx = get_vidi_context(dev);
-
- return vidi_power_on(ctx, true);
-}
-#endif
-
-static const struct dev_pm_ops vidi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(vidi_suspend, vidi_resume)
-};
-
struct platform_driver vidi_driver = {
.probe = vidi_probe,
.remove = vidi_remove,
.driver = {
.name = "exynos-drm-vidi",
.owner = THIS_MODULE,
- .pm = &vidi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index c021ddc1ffb4..9a6d652a3ef2 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -33,38 +33,42 @@
#include <linux/regulator/consumer.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/i2c.h>
#include <linux/of_gpio.h>
#include <linux/hdmi.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
-#include "exynos_drm_hdmi.h"
-
-#include "exynos_hdmi.h"
+#include "exynos_mixer.h"
#include <linux/gpio.h>
#include <media/s5p_hdmi.h>
-#define MAX_WIDTH 1920
-#define MAX_HEIGHT 1080
-#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_hdmi_display(dev) platform_get_drvdata(to_platform_device(dev))
+#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector)
/* AVI header and aspect ratio */
#define HDMI_AVI_VERSION 0x02
#define HDMI_AVI_LENGTH 0x0D
-#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
-#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
/* AUI header info */
#define HDMI_AUI_VERSION 0x01
#define HDMI_AUI_LENGTH 0x0A
+#define AVI_SAME_AS_PIC_ASPECT_RATIO 0x8
+#define AVI_4_3_CENTER_RATIO 0x9
+#define AVI_16_9_CENTER_RATIO 0xa
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
};
+struct hdmi_driver_data {
+ unsigned int type;
+ unsigned int is_apb_phy:1;
+};
+
struct hdmi_resources {
struct clk *hdmi;
struct clk *sclk_hdmi;
@@ -162,6 +166,7 @@ struct hdmi_v14_conf {
struct hdmi_conf_regs {
int pixel_clock;
int cea_video_id;
+ enum hdmi_picture_aspect aspect_ratio;
union {
struct hdmi_v13_conf v13_conf;
struct hdmi_v14_conf v14_conf;
@@ -171,16 +176,17 @@ struct hdmi_conf_regs {
struct hdmi_context {
struct device *dev;
struct drm_device *drm_dev;
+ struct drm_connector connector;
+ struct drm_encoder *encoder;
bool hpd;
bool powered;
bool dvi_mode;
struct mutex hdmi_mutex;
void __iomem *regs;
- void *parent_ctx;
int irq;
- struct i2c_client *ddc_port;
+ struct i2c_adapter *ddc_adpt;
struct i2c_client *hdmiphy_port;
/* current hdmiphy conf regs */
@@ -198,6 +204,14 @@ struct hdmiphy_config {
u8 conf[32];
};
+struct hdmi_driver_data exynos4212_hdmi_driver_data = {
+ .type = HDMI_TYPE14,
+};
+
+struct hdmi_driver_data exynos5_hdmi_driver_data = {
+ .type = HDMI_TYPE14,
+};
+
/* list of phy config settings */
static const struct hdmiphy_config hdmiphy_v13_configs[] = {
{
@@ -303,6 +317,24 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
},
{
+ .pixel_clock = 71000000,
+ .conf = {
+ 0x01, 0x91, 0x1e, 0x15, 0x40, 0x3c, 0xce, 0x08,
+ 0x04, 0x20, 0xb2, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xad, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 73250000,
+ .conf = {
+ 0x01, 0xd1, 0x1f, 0x15, 0x40, 0x18, 0xe9, 0x08,
+ 0x02, 0xa0, 0xb7, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
.pixel_clock = 74176000,
.conf = {
0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08,
@@ -330,6 +362,15 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
},
{
+ .pixel_clock = 88750000,
+ .conf = {
+ 0x01, 0x91, 0x25, 0x17, 0x40, 0x30, 0xfe, 0x08,
+ 0x06, 0x20, 0xde, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x8a, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
.pixel_clock = 106500000,
.conf = {
0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
@@ -348,6 +389,24 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
},
{
+ .pixel_clock = 115500000,
+ .conf = {
+ 0x01, 0xd1, 0x30, 0x1a, 0x40, 0x40, 0x10, 0x04,
+ 0x04, 0xa0, 0x21, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xaa, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 119000000,
+ .conf = {
+ 0x01, 0x91, 0x32, 0x14, 0x40, 0x60, 0xd8, 0x08,
+ 0x06, 0x20, 0x2a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x9d, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
.pixel_clock = 146250000,
.conf = {
0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08,
@@ -668,7 +727,6 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
{
u32 hdr_sum;
u8 chksum;
- u32 aspect_ratio;
u32 mod;
u32 vic;
@@ -697,10 +755,28 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
AVI_ACTIVE_FORMAT_VALID |
AVI_UNDERSCANNED_DISPLAY_VALID);
- aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
-
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
- AVI_SAME_AS_PIC_ASPECT_RATIO);
+ /*
+ * Set the aspect ratio as per the mode, mentioned in
+ * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard
+ */
+ switch (hdata->mode_conf.aspect_ratio) {
+ case HDMI_PICTURE_ASPECT_4_3:
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
+ hdata->mode_conf.aspect_ratio |
+ AVI_4_3_CENTER_RATIO);
+ break;
+ case HDMI_PICTURE_ASPECT_16_9:
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
+ hdata->mode_conf.aspect_ratio |
+ AVI_16_9_CENTER_RATIO);
+ break;
+ case HDMI_PICTURE_ASPECT_NONE:
+ default:
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
+ hdata->mode_conf.aspect_ratio |
+ AVI_SAME_AS_PIC_ASPECT_RATIO);
+ break;
+ }
vic = hdata->mode_conf.cea_video_id;
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
@@ -728,31 +804,46 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
}
}
-static bool hdmi_is_connected(void *ctx)
+static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
+ bool force)
{
- struct hdmi_context *hdata = ctx;
+ struct hdmi_context *hdata = ctx_from_connector(connector);
+
+ return hdata->hpd ? connector_status_connected :
+ connector_status_disconnected;
+}
- return hdata->hpd;
+static void hdmi_connector_destroy(struct drm_connector *connector)
+{
}
-static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
+static struct drm_connector_funcs hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = hdmi_detect,
+ .destroy = hdmi_connector_destroy,
+};
+
+static int hdmi_get_modes(struct drm_connector *connector)
{
- struct edid *raw_edid;
- struct hdmi_context *hdata = ctx;
+ struct hdmi_context *hdata = ctx_from_connector(connector);
+ struct edid *edid;
- if (!hdata->ddc_port)
- return ERR_PTR(-ENODEV);
+ if (!hdata->ddc_adpt)
+ return -ENODEV;
- raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
- if (!raw_edid)
- return ERR_PTR(-ENODEV);
+ edid = drm_get_edid(connector, hdata->ddc_adpt);
+ if (!edid)
+ return -ENODEV;
- hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
+ hdata->dvi_mode = !drm_detect_hdmi_monitor(edid);
DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
- raw_edid->width_cm, raw_edid->height_cm);
+ edid->width_cm, edid->height_cm);
- return raw_edid;
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ return drm_add_edid_modes(connector, edid);
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
@@ -777,9 +868,10 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
return -EINVAL;
}
-static int hdmi_check_mode(void *ctx, struct drm_display_mode *mode)
+static int hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- struct hdmi_context *hdata = ctx;
+ struct hdmi_context *hdata = ctx_from_connector(connector);
int ret;
DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
@@ -787,12 +879,103 @@ static int hdmi_check_mode(void *ctx, struct drm_display_mode *mode)
(mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
false, mode->clock * 1000);
+ ret = mixer_check_mode(mode);
+ if (ret)
+ return MODE_BAD;
+
ret = hdmi_find_phy_conf(hdata, mode->clock * 1000);
if (ret < 0)
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
+{
+ struct hdmi_context *hdata = ctx_from_connector(connector);
+
+ return hdata->encoder;
+}
+
+static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
+ .get_modes = hdmi_get_modes,
+ .mode_valid = hdmi_mode_valid,
+ .best_encoder = hdmi_best_encoder,
+};
+
+static int hdmi_create_connector(struct exynos_drm_display *display,
+ struct drm_encoder *encoder)
+{
+ struct hdmi_context *hdata = display->ctx;
+ struct drm_connector *connector = &hdata->connector;
+ int ret;
+
+ hdata->encoder = encoder;
+ connector->interlace_allowed = true;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(hdata->drm_dev, connector,
+ &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
return ret;
+ }
+
+ drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static int hdmi_initialize(struct exynos_drm_display *display,
+ struct drm_device *drm_dev)
+{
+ struct hdmi_context *hdata = display->ctx;
+
+ hdata->drm_dev = drm_dev;
+
return 0;
}
+static void hdmi_mode_fixup(struct exynos_drm_display *display,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_display_mode *m;
+ int mode_ok;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ mode_ok = hdmi_mode_valid(connector, adjusted_mode);
+
+ /* just return if user desired mode exists. */
+ if (mode_ok == MODE_OK)
+ return;
+
+ /*
+ * otherwise, find the most suitable mode among modes and change it
+ * to adjusted_mode.
+ */
+ list_for_each_entry(m, &connector->modes, head) {
+ mode_ok = hdmi_mode_valid(connector, m);
+
+ if (mode_ok == MODE_OK) {
+ DRM_INFO("desired mode doesn't exist so\n");
+ DRM_INFO("use the most suitable mode among modes.\n");
+
+ DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+
+ drm_mode_copy(adjusted_mode, m);
+ break;
+ }
+ }
+}
+
static void hdmi_set_acr(u32 freq, u8 *acr)
{
u32 n, cts;
@@ -1421,6 +1604,7 @@ static void hdmi_v13_mode_set(struct hdmi_context *hdata,
hdata->mode_conf.cea_video_id =
drm_match_cea_mode((struct drm_display_mode *)m);
hdata->mode_conf.pixel_clock = m->clock * 1000;
+ hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
@@ -1517,6 +1701,7 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
hdata->mode_conf.cea_video_id =
drm_match_cea_mode((struct drm_display_mode *)m);
hdata->mode_conf.pixel_clock = m->clock * 1000;
+ hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
hdmi_set_reg(core->v_line, 2, m->vtotal);
@@ -1618,9 +1803,10 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
hdmi_set_reg(tg->tg_3d, 1, 0x0);
}
-static void hdmi_mode_set(void *ctx, struct drm_display_mode *mode)
+static void hdmi_mode_set(struct exynos_drm_display *display,
+ struct drm_display_mode *mode)
{
- struct hdmi_context *hdata = ctx;
+ struct hdmi_context *hdata = display->ctx;
struct drm_display_mode *m = mode;
DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
@@ -1634,16 +1820,9 @@ static void hdmi_mode_set(void *ctx, struct drm_display_mode *mode)
hdmi_v14_mode_set(hdata, mode);
}
-static void hdmi_get_max_resol(void *ctx, unsigned int *width,
- unsigned int *height)
-{
- *width = MAX_WIDTH;
- *height = MAX_HEIGHT;
-}
-
-static void hdmi_commit(void *ctx)
+static void hdmi_commit(struct exynos_drm_display *display)
{
- struct hdmi_context *hdata = ctx;
+ struct hdmi_context *hdata = display->ctx;
mutex_lock(&hdata->hdmi_mutex);
if (!hdata->powered) {
@@ -1655,8 +1834,9 @@ static void hdmi_commit(void *ctx)
hdmi_conf_apply(hdata);
}
-static void hdmi_poweron(struct hdmi_context *hdata)
+static void hdmi_poweron(struct exynos_drm_display *display)
{
+ struct hdmi_context *hdata = display->ctx;
struct hdmi_resources *res = &hdata->res;
mutex_lock(&hdata->hdmi_mutex);
@@ -1669,6 +1849,8 @@ static void hdmi_poweron(struct hdmi_context *hdata)
mutex_unlock(&hdata->hdmi_mutex);
+ pm_runtime_get_sync(hdata->dev);
+
if (regulator_bulk_enable(res->regul_count, res->regul_bulk))
DRM_DEBUG_KMS("failed to enable regulator bulk\n");
@@ -1677,10 +1859,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
clk_prepare_enable(res->sclk_hdmi);
hdmiphy_poweron(hdata);
+ hdmi_commit(display);
}
-static void hdmi_poweroff(struct hdmi_context *hdata)
+static void hdmi_poweroff(struct exynos_drm_display *display)
{
+ struct hdmi_context *hdata = display->ctx;
struct hdmi_resources *res = &hdata->res;
mutex_lock(&hdata->hdmi_mutex);
@@ -1700,30 +1884,27 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
clk_disable_unprepare(res->hdmiphy);
regulator_bulk_disable(res->regul_count, res->regul_bulk);
- mutex_lock(&hdata->hdmi_mutex);
+ pm_runtime_put_sync(hdata->dev);
+ mutex_lock(&hdata->hdmi_mutex);
hdata->powered = false;
out:
mutex_unlock(&hdata->hdmi_mutex);
}
-static void hdmi_dpms(void *ctx, int mode)
+static void hdmi_dpms(struct exynos_drm_display *display, int mode)
{
- struct hdmi_context *hdata = ctx;
-
DRM_DEBUG_KMS("mode %d\n", mode);
switch (mode) {
case DRM_MODE_DPMS_ON:
- if (pm_runtime_suspended(hdata->dev))
- pm_runtime_get_sync(hdata->dev);
+ hdmi_poweron(display);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (!pm_runtime_suspended(hdata->dev))
- pm_runtime_put_sync(hdata->dev);
+ hdmi_poweroff(display);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -1731,30 +1912,30 @@ static void hdmi_dpms(void *ctx, int mode)
}
}
-static struct exynos_hdmi_ops hdmi_ops = {
- /* display */
- .is_connected = hdmi_is_connected,
- .get_edid = hdmi_get_edid,
- .check_mode = hdmi_check_mode,
-
- /* manager */
+static struct exynos_drm_display_ops hdmi_display_ops = {
+ .initialize = hdmi_initialize,
+ .create_connector = hdmi_create_connector,
+ .mode_fixup = hdmi_mode_fixup,
.mode_set = hdmi_mode_set,
- .get_max_resol = hdmi_get_max_resol,
- .commit = hdmi_commit,
.dpms = hdmi_dpms,
+ .commit = hdmi_commit,
+};
+
+static struct exynos_drm_display hdmi_display = {
+ .type = EXYNOS_DISPLAY_TYPE_HDMI,
+ .ops = &hdmi_display_ops,
};
static irqreturn_t hdmi_irq_thread(int irq, void *arg)
{
- struct exynos_drm_hdmi_context *ctx = arg;
- struct hdmi_context *hdata = ctx->ctx;
+ struct hdmi_context *hdata = arg;
mutex_lock(&hdata->hdmi_mutex);
hdata->hpd = gpio_get_value(hdata->hpd_gpio);
mutex_unlock(&hdata->hdmi_mutex);
- if (ctx->drm_dev)
- drm_helper_hpd_irq_event(ctx->drm_dev);
+ if (hdata->drm_dev)
+ drm_helper_hpd_irq_event(hdata->drm_dev);
return IRQ_HANDLED;
}
@@ -1830,20 +2011,6 @@ fail:
return -ENODEV;
}
-static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
-
-void hdmi_attach_ddc_client(struct i2c_client *ddc)
-{
- if (ddc)
- hdmi_ddc = ddc;
-}
-
-void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
-{
- if (hdmiphy)
- hdmi_hdmiphy = hdmiphy;
-}
-
static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
(struct device *dev)
{
@@ -1871,10 +2038,10 @@ err_data:
static struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos5-hdmi",
- .data = (void *)HDMI_TYPE14,
+ .data = &exynos5_hdmi_driver_data,
}, {
.compatible = "samsung,exynos4212-hdmi",
- .data = (void *)HDMI_TYPE14,
+ .data = &exynos4212_hdmi_driver_data,
}, {
/* end node */
}
@@ -1883,11 +2050,12 @@ static struct of_device_id hdmi_match_types[] = {
static int hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct exynos_drm_hdmi_context *drm_hdmi_ctx;
struct hdmi_context *hdata;
struct s5p_hdmi_platform_data *pdata;
struct resource *res;
const struct of_device_id *match;
+ struct device_node *ddc_node, *phy_node;
+ struct hdmi_driver_data *drv_data;
int ret;
if (!dev->of_node)
@@ -1897,25 +2065,20 @@ static int hdmi_probe(struct platform_device *pdev)
if (!pdata)
return -EINVAL;
- drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL);
- if (!drm_hdmi_ctx)
- return -ENOMEM;
-
hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
if (!hdata)
return -ENOMEM;
mutex_init(&hdata->hdmi_mutex);
- drm_hdmi_ctx->ctx = (void *)hdata;
- hdata->parent_ctx = (void *)drm_hdmi_ctx;
-
- platform_set_drvdata(pdev, drm_hdmi_ctx);
+ platform_set_drvdata(pdev, &hdmi_display);
match = of_match_node(hdmi_match_types, dev->of_node);
if (!match)
return -ENODEV;
- hdata->type = (enum hdmi_type)match->data;
+
+ drv_data = (struct hdmi_driver_data *)match->data;
+ hdata->type = drv_data->type;
hdata->hpd_gpio = pdata->hpd_gpio;
hdata->dev = dev;
@@ -1938,21 +2101,34 @@ static int hdmi_probe(struct platform_device *pdev)
}
/* DDC i2c driver */
- if (i2c_add_driver(&ddc_driver)) {
- DRM_ERROR("failed to register ddc i2c driver\n");
- return -ENOENT;
+ ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
+ if (!ddc_node) {
+ DRM_ERROR("Failed to find ddc node in device tree\n");
+ return -ENODEV;
+ }
+ hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
+ if (!hdata->ddc_adpt) {
+ DRM_ERROR("Failed to get ddc i2c adapter by node\n");
+ return -ENODEV;
}
- hdata->ddc_port = hdmi_ddc;
+ /* Not support APB PHY yet. */
+ if (drv_data->is_apb_phy)
+ return -EPERM;
/* hdmiphy i2c driver */
- if (i2c_add_driver(&hdmiphy_driver)) {
- DRM_ERROR("failed to register hdmiphy i2c driver\n");
- ret = -ENOENT;
+ phy_node = of_parse_phandle(dev->of_node, "phy", 0);
+ if (!phy_node) {
+ DRM_ERROR("Failed to find hdmiphy node in device tree\n");
+ ret = -ENODEV;
+ goto err_ddc;
+ }
+ hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node);
+ if (!hdata->hdmiphy_port) {
+ DRM_ERROR("Failed to get hdmi phy i2c client from node\n");
+ ret = -ENODEV;
goto err_ddc;
}
-
- hdata->hdmiphy_port = hdmi_hdmiphy;
hdata->irq = gpio_to_irq(hdata->hpd_gpio);
if (hdata->irq < 0) {
@@ -1966,119 +2142,45 @@ static int hdmi_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
hdmi_irq_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "hdmi", drm_hdmi_ctx);
+ "hdmi", hdata);
if (ret) {
DRM_ERROR("failed to register hdmi interrupt\n");
goto err_hdmiphy;
}
- /* Attach HDMI Driver to common hdmi. */
- exynos_hdmi_drv_attach(drm_hdmi_ctx);
-
- /* register specific callbacks to common hdmi. */
- exynos_hdmi_ops_register(&hdmi_ops);
-
pm_runtime_enable(dev);
+ hdmi_display.ctx = hdata;
+ exynos_drm_display_register(&hdmi_display);
+
return 0;
err_hdmiphy:
- i2c_del_driver(&hdmiphy_driver);
+ put_device(&hdata->hdmiphy_port->dev);
err_ddc:
- i2c_del_driver(&ddc_driver);
+ put_device(&hdata->ddc_adpt->dev);
return ret;
}
static int hdmi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct exynos_drm_display *display = get_hdmi_display(dev);
+ struct hdmi_context *hdata = display->ctx;
- pm_runtime_disable(dev);
-
- /* hdmiphy i2c driver */
- i2c_del_driver(&hdmiphy_driver);
- /* DDC i2c driver */
- i2c_del_driver(&ddc_driver);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int hdmi_suspend(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
- struct hdmi_context *hdata = ctx->ctx;
-
- disable_irq(hdata->irq);
-
- hdata->hpd = false;
- if (ctx->drm_dev)
- drm_helper_hpd_irq_event(ctx->drm_dev);
-
- if (pm_runtime_suspended(dev)) {
- DRM_DEBUG_KMS("Already suspended\n");
- return 0;
- }
-
- hdmi_poweroff(hdata);
+ put_device(&hdata->hdmiphy_port->dev);
+ put_device(&hdata->ddc_adpt->dev);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
-static int hdmi_resume(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
- struct hdmi_context *hdata = ctx->ctx;
-
- hdata->hpd = gpio_get_value(hdata->hpd_gpio);
-
- enable_irq(hdata->irq);
-
- if (!pm_runtime_suspended(dev)) {
- DRM_DEBUG_KMS("Already resumed\n");
- return 0;
- }
-
- hdmi_poweron(hdata);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PM_RUNTIME
-static int hdmi_runtime_suspend(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
- struct hdmi_context *hdata = ctx->ctx;
-
- hdmi_poweroff(hdata);
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
- struct hdmi_context *hdata = ctx->ctx;
-
- hdmi_poweron(hdata);
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
- SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
-};
-
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
.remove = hdmi_remove,
.driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
- .pm = &hdmi_pm_ops,
.of_match_table = hdmi_match_types,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 2dfa48c76f54..ce288818d2c0 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -36,10 +36,13 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
-#include "exynos_drm_hdmi.h"
#include "exynos_drm_iommu.h"
+#include "exynos_mixer.h"
-#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_mixer_manager(dev) platform_get_drvdata(to_platform_device(dev))
+
+#define MIXER_WIN_NR 3
+#define MIXER_DEFAULT_WIN 0
struct hdmi_win_data {
dma_addr_t dma_addr;
@@ -82,6 +85,7 @@ enum mixer_version_id {
};
struct mixer_context {
+ struct platform_device *pdev;
struct device *dev;
struct drm_device *drm_dev;
int pipe;
@@ -94,7 +98,6 @@ struct mixer_context {
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver;
- void *parent_ctx;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
};
@@ -685,31 +688,196 @@ static void mixer_win_reset(struct mixer_context *ctx)
spin_unlock_irqrestore(&res->reg_slock, flags);
}
-static int mixer_iommu_on(void *ctx, bool enable)
+static irqreturn_t mixer_irq_handler(int irq, void *arg)
+{
+ struct mixer_context *ctx = arg;
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val, base, shadow;
+
+ spin_lock(&res->reg_slock);
+
+ /* read interrupt status for handling and clearing flags for VSYNC */
+ val = mixer_reg_read(res, MXR_INT_STATUS);
+
+ /* handling VSYNC */
+ if (val & MXR_INT_STATUS_VSYNC) {
+ /* interlace scan need to check shadow register */
+ if (ctx->interlace) {
+ base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
+ shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+ if (base != shadow)
+ goto out;
+
+ base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
+ shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+ if (base != shadow)
+ goto out;
+ }
+
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ wake_up(&ctx->wait_vsync_queue);
+ }
+ }
+
+out:
+ /* clear interrupts */
+ if (~val & MXR_INT_EN_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val &= ~MXR_INT_EN_VSYNC;
+ val |= MXR_INT_CLEAR_VSYNC;
+ }
+ mixer_reg_write(res, MXR_INT_STATUS, val);
+
+ spin_unlock(&res->reg_slock);
+
+ return IRQ_HANDLED;
+}
+
+static int mixer_resources_init(struct mixer_context *mixer_ctx)
{
- struct exynos_drm_hdmi_context *drm_hdmi_ctx;
- struct mixer_context *mdata = ctx;
- struct drm_device *drm_dev;
+ struct device *dev = &mixer_ctx->pdev->dev;
+ struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
+ struct resource *res;
+ int ret;
- drm_hdmi_ctx = mdata->parent_ctx;
- drm_dev = drm_hdmi_ctx->drm_dev;
+ spin_lock_init(&mixer_res->reg_slock);
- if (is_drm_iommu_supported(drm_dev)) {
- if (enable)
- return drm_iommu_attach_device(drm_dev, mdata->dev);
+ mixer_res->mixer = devm_clk_get(dev, "mixer");
+ if (IS_ERR(mixer_res->mixer)) {
+ dev_err(dev, "failed to get clock 'mixer'\n");
+ return -ENODEV;
+ }
- drm_iommu_detach_device(drm_dev, mdata->dev);
+ mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
+ if (IS_ERR(mixer_res->sclk_hdmi)) {
+ dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
+ return -ENODEV;
+ }
+ res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ return -ENXIO;
}
+
+ mixer_res->mixer_regs = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (mixer_res->mixer_regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ return -ENXIO;
+ }
+
+ res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(dev, "get interrupt resource failed.\n");
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(dev, res->start, mixer_irq_handler,
+ 0, "drm_mixer", mixer_ctx);
+ if (ret) {
+ dev_err(dev, "request interrupt failed.\n");
+ return ret;
+ }
+ mixer_res->irq = res->start;
+
return 0;
}
-static int mixer_enable_vblank(void *ctx, int pipe)
+static int vp_resources_init(struct mixer_context *mixer_ctx)
{
- struct mixer_context *mixer_ctx = ctx;
- struct mixer_resources *res = &mixer_ctx->mixer_res;
+ struct device *dev = &mixer_ctx->pdev->dev;
+ struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
+ struct resource *res;
+
+ mixer_res->vp = devm_clk_get(dev, "vp");
+ if (IS_ERR(mixer_res->vp)) {
+ dev_err(dev, "failed to get clock 'vp'\n");
+ return -ENODEV;
+ }
+ mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
+ if (IS_ERR(mixer_res->sclk_mixer)) {
+ dev_err(dev, "failed to get clock 'sclk_mixer'\n");
+ return -ENODEV;
+ }
+ mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
+ if (IS_ERR(mixer_res->sclk_dac)) {
+ dev_err(dev, "failed to get clock 'sclk_dac'\n");
+ return -ENODEV;
+ }
+
+ if (mixer_res->sclk_hdmi)
+ clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
+
+ res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1);
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ return -ENXIO;
+ }
+ mixer_res->vp_regs = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (mixer_res->vp_regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int mixer_initialize(struct exynos_drm_manager *mgr,
+ struct drm_device *drm_dev, int pipe)
+{
+ int ret;
+ struct mixer_context *mixer_ctx = mgr->ctx;
+
+ mixer_ctx->drm_dev = drm_dev;
mixer_ctx->pipe = pipe;
+ /* acquire resources: regs, irqs, clocks */
+ ret = mixer_resources_init(mixer_ctx);
+ if (ret) {
+ DRM_ERROR("mixer_resources_init failed ret=%d\n", ret);
+ return ret;
+ }
+
+ if (mixer_ctx->vp_enabled) {
+ /* acquire vp resources: regs, irqs, clocks */
+ ret = vp_resources_init(mixer_ctx);
+ if (ret) {
+ DRM_ERROR("vp_resources_init failed ret=%d\n", ret);
+ return ret;
+ }
+ }
+
+ if (!is_drm_iommu_supported(mixer_ctx->drm_dev))
+ return 0;
+
+ return drm_iommu_attach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+}
+
+static void mixer_mgr_remove(struct exynos_drm_manager *mgr)
+{
+ struct mixer_context *mixer_ctx = mgr->ctx;
+
+ if (is_drm_iommu_supported(mixer_ctx->drm_dev))
+ drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+}
+
+static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
+{
+ struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+ if (!mixer_ctx->powered) {
+ mixer_ctx->int_en |= MXR_INT_EN_VSYNC;
+ return 0;
+ }
+
/* enable vsync interrupt */
mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
MXR_INT_EN_VSYNC);
@@ -717,19 +885,19 @@ static int mixer_enable_vblank(void *ctx, int pipe)
return 0;
}
-static void mixer_disable_vblank(void *ctx)
+static void mixer_disable_vblank(struct exynos_drm_manager *mgr)
{
- struct mixer_context *mixer_ctx = ctx;
+ struct mixer_context *mixer_ctx = mgr->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
/* disable vsync interrupt */
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
-static void mixer_win_mode_set(void *ctx,
- struct exynos_drm_overlay *overlay)
+static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
+ struct exynos_drm_overlay *overlay)
{
- struct mixer_context *mixer_ctx = ctx;
+ struct mixer_context *mixer_ctx = mgr->ctx;
struct hdmi_win_data *win_data;
int win;
@@ -778,9 +946,10 @@ static void mixer_win_mode_set(void *ctx,
win_data->scan_flags = overlay->scan_flag;
}
-static void mixer_win_commit(void *ctx, int win)
+static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
{
- struct mixer_context *mixer_ctx = ctx;
+ struct mixer_context *mixer_ctx = mgr->ctx;
+ int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("win: %d\n", win);
@@ -799,10 +968,11 @@ static void mixer_win_commit(void *ctx, int win)
mixer_ctx->win_data[win].enabled = true;
}
-static void mixer_win_disable(void *ctx, int win)
+static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
{
- struct mixer_context *mixer_ctx = ctx;
+ struct mixer_context *mixer_ctx = mgr->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
+ int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
unsigned long flags;
DRM_DEBUG_KMS("win: %d\n", win);
@@ -826,32 +996,9 @@ static void mixer_win_disable(void *ctx, int win)
mixer_ctx->win_data[win].enabled = false;
}
-static int mixer_check_mode(void *ctx, struct drm_display_mode *mode)
+static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
{
- struct mixer_context *mixer_ctx = ctx;
- u32 w, h;
-
- w = mode->hdisplay;
- h = mode->vdisplay;
-
- DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n",
- mode->hdisplay, mode->vdisplay, mode->vrefresh,
- (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
-
- if (mixer_ctx->mxr_ver == MXR_VER_0_0_0_16 ||
- mixer_ctx->mxr_ver == MXR_VER_128_0_0_184)
- return 0;
-
- if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
- (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
- (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
- return 0;
-
- return -EINVAL;
-}
-static void mixer_wait_for_vblank(void *ctx)
-{
- struct mixer_context *mixer_ctx = ctx;
+ struct mixer_context *mixer_ctx = mgr->ctx;
mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) {
@@ -872,21 +1019,23 @@ static void mixer_wait_for_vblank(void *ctx)
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
-static void mixer_window_suspend(struct mixer_context *ctx)
+static void mixer_window_suspend(struct exynos_drm_manager *mgr)
{
+ struct mixer_context *ctx = mgr->ctx;
struct hdmi_win_data *win_data;
int i;
for (i = 0; i < MIXER_WIN_NR; i++) {
win_data = &ctx->win_data[i];
win_data->resume = win_data->enabled;
- mixer_win_disable(ctx, i);
+ mixer_win_disable(mgr, i);
}
- mixer_wait_for_vblank(ctx);
+ mixer_wait_for_vblank(mgr);
}
-static void mixer_window_resume(struct mixer_context *ctx)
+static void mixer_window_resume(struct exynos_drm_manager *mgr)
{
+ struct mixer_context *ctx = mgr->ctx;
struct hdmi_win_data *win_data;
int i;
@@ -894,11 +1043,14 @@ static void mixer_window_resume(struct mixer_context *ctx)
win_data = &ctx->win_data[i];
win_data->enabled = win_data->resume;
win_data->resume = false;
+ if (win_data->enabled)
+ mixer_win_commit(mgr, i);
}
}
-static void mixer_poweron(struct mixer_context *ctx)
+static void mixer_poweron(struct exynos_drm_manager *mgr)
{
+ struct mixer_context *ctx = mgr->ctx;
struct mixer_resources *res = &ctx->mixer_res;
mutex_lock(&ctx->mixer_mutex);
@@ -909,6 +1061,8 @@ static void mixer_poweron(struct mixer_context *ctx)
ctx->powered = true;
mutex_unlock(&ctx->mixer_mutex);
+ pm_runtime_get_sync(ctx->dev);
+
clk_prepare_enable(res->mixer);
if (ctx->vp_enabled) {
clk_prepare_enable(res->vp);
@@ -918,11 +1072,12 @@ static void mixer_poweron(struct mixer_context *ctx)
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
- mixer_window_resume(ctx);
+ mixer_window_resume(mgr);
}
-static void mixer_poweroff(struct mixer_context *ctx)
+static void mixer_poweroff(struct exynos_drm_manager *mgr)
{
+ struct mixer_context *ctx = mgr->ctx;
struct mixer_resources *res = &ctx->mixer_res;
mutex_lock(&ctx->mixer_mutex);
@@ -930,7 +1085,7 @@ static void mixer_poweroff(struct mixer_context *ctx)
goto out;
mutex_unlock(&ctx->mixer_mutex);
- mixer_window_suspend(ctx);
+ mixer_window_suspend(mgr);
ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
@@ -940,6 +1095,8 @@ static void mixer_poweroff(struct mixer_context *ctx)
clk_disable_unprepare(res->sclk_mixer);
}
+ pm_runtime_put_sync(ctx->dev);
+
mutex_lock(&ctx->mixer_mutex);
ctx->powered = false;
@@ -947,20 +1104,16 @@ out:
mutex_unlock(&ctx->mixer_mutex);
}
-static void mixer_dpms(void *ctx, int mode)
+static void mixer_dpms(struct exynos_drm_manager *mgr, int mode)
{
- struct mixer_context *mixer_ctx = ctx;
-
switch (mode) {
case DRM_MODE_DPMS_ON:
- if (pm_runtime_suspended(mixer_ctx->dev))
- pm_runtime_get_sync(mixer_ctx->dev);
+ mixer_poweron(mgr);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (!pm_runtime_suspended(mixer_ctx->dev))
- pm_runtime_put_sync(mixer_ctx->dev);
+ mixer_poweroff(mgr);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -968,169 +1121,42 @@ static void mixer_dpms(void *ctx, int mode)
}
}
-static struct exynos_mixer_ops mixer_ops = {
- /* manager */
- .iommu_on = mixer_iommu_on,
- .enable_vblank = mixer_enable_vblank,
- .disable_vblank = mixer_disable_vblank,
- .wait_for_vblank = mixer_wait_for_vblank,
- .dpms = mixer_dpms,
-
- /* overlay */
- .win_mode_set = mixer_win_mode_set,
- .win_commit = mixer_win_commit,
- .win_disable = mixer_win_disable,
-
- /* display */
- .check_mode = mixer_check_mode,
-};
-
-static irqreturn_t mixer_irq_handler(int irq, void *arg)
-{
- struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
- struct mixer_context *ctx = drm_hdmi_ctx->ctx;
- struct mixer_resources *res = &ctx->mixer_res;
- u32 val, base, shadow;
-
- spin_lock(&res->reg_slock);
-
- /* read interrupt status for handling and clearing flags for VSYNC */
- val = mixer_reg_read(res, MXR_INT_STATUS);
-
- /* handling VSYNC */
- if (val & MXR_INT_STATUS_VSYNC) {
- /* interlace scan need to check shadow register */
- if (ctx->interlace) {
- base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
- shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
- if (base != shadow)
- goto out;
-
- base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
- shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
- if (base != shadow)
- goto out;
- }
-
- drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
- exynos_drm_crtc_finish_pageflip(drm_hdmi_ctx->drm_dev,
- ctx->pipe);
-
- /* set wait vsync event to zero and wake up queue. */
- if (atomic_read(&ctx->wait_vsync_event)) {
- atomic_set(&ctx->wait_vsync_event, 0);
- wake_up(&ctx->wait_vsync_queue);
- }
- }
-
-out:
- /* clear interrupts */
- if (~val & MXR_INT_EN_VSYNC) {
- /* vsync interrupt use different bit for read and clear */
- val &= ~MXR_INT_EN_VSYNC;
- val |= MXR_INT_CLEAR_VSYNC;
- }
- mixer_reg_write(res, MXR_INT_STATUS, val);
-
- spin_unlock(&res->reg_slock);
-
- return IRQ_HANDLED;
-}
-
-static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
- struct platform_device *pdev)
+/* Only valid for Mixer version 16.0.33.0 */
+int mixer_check_mode(struct drm_display_mode *mode)
{
- struct mixer_context *mixer_ctx = ctx->ctx;
- struct device *dev = &pdev->dev;
- struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
- struct resource *res;
- int ret;
-
- spin_lock_init(&mixer_res->reg_slock);
-
- mixer_res->mixer = devm_clk_get(dev, "mixer");
- if (IS_ERR(mixer_res->mixer)) {
- dev_err(dev, "failed to get clock 'mixer'\n");
- return -ENODEV;
- }
-
- mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
- if (IS_ERR(mixer_res->sclk_hdmi)) {
- dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
- return -ENODEV;
- }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(dev, "get memory resource failed.\n");
- return -ENXIO;
- }
+ u32 w, h;
- mixer_res->mixer_regs = devm_ioremap(dev, res->start,
- resource_size(res));
- if (mixer_res->mixer_regs == NULL) {
- dev_err(dev, "register mapping failed.\n");
- return -ENXIO;
- }
+ w = mode->hdisplay;
+ h = mode->vdisplay;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(dev, "get interrupt resource failed.\n");
- return -ENXIO;
- }
+ DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ mode->hdisplay, mode->vdisplay, mode->vrefresh,
+ (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
- ret = devm_request_irq(dev, res->start, mixer_irq_handler,
- 0, "drm_mixer", ctx);
- if (ret) {
- dev_err(dev, "request interrupt failed.\n");
- return ret;
- }
- mixer_res->irq = res->start;
+ if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
+ (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
+ (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
+ return 0;
- return 0;
+ return -EINVAL;
}
-static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
- struct platform_device *pdev)
-{
- struct mixer_context *mixer_ctx = ctx->ctx;
- struct device *dev = &pdev->dev;
- struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
- struct resource *res;
-
- mixer_res->vp = devm_clk_get(dev, "vp");
- if (IS_ERR(mixer_res->vp)) {
- dev_err(dev, "failed to get clock 'vp'\n");
- return -ENODEV;
- }
- mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
- if (IS_ERR(mixer_res->sclk_mixer)) {
- dev_err(dev, "failed to get clock 'sclk_mixer'\n");
- return -ENODEV;
- }
- mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
- if (IS_ERR(mixer_res->sclk_dac)) {
- dev_err(dev, "failed to get clock 'sclk_dac'\n");
- return -ENODEV;
- }
-
- if (mixer_res->sclk_hdmi)
- clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res == NULL) {
- dev_err(dev, "get memory resource failed.\n");
- return -ENXIO;
- }
-
- mixer_res->vp_regs = devm_ioremap(dev, res->start,
- resource_size(res));
- if (mixer_res->vp_regs == NULL) {
- dev_err(dev, "register mapping failed.\n");
- return -ENXIO;
- }
+static struct exynos_drm_manager_ops mixer_manager_ops = {
+ .initialize = mixer_initialize,
+ .remove = mixer_mgr_remove,
+ .dpms = mixer_dpms,
+ .enable_vblank = mixer_enable_vblank,
+ .disable_vblank = mixer_disable_vblank,
+ .wait_for_vblank = mixer_wait_for_vblank,
+ .win_mode_set = mixer_win_mode_set,
+ .win_commit = mixer_win_commit,
+ .win_disable = mixer_win_disable,
+};
- return 0;
-}
+static struct exynos_drm_manager mixer_manager = {
+ .type = EXYNOS_DISPLAY_TYPE_HDMI,
+ .ops = &mixer_manager_ops,
+};
static struct mixer_drv_data exynos5420_mxr_drv_data = {
.version = MXR_VER_128_0_0_184,
@@ -1177,21 +1203,16 @@ static struct of_device_id mixer_match_types[] = {
static int mixer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct exynos_drm_hdmi_context *drm_hdmi_ctx;
struct mixer_context *ctx;
struct mixer_drv_data *drv;
- int ret;
dev_info(dev, "probe start\n");
- drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
- GFP_KERNEL);
- if (!drm_hdmi_ctx)
- return -ENOMEM;
-
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ DRM_ERROR("failed to alloc mixer context.\n");
return -ENOMEM;
+ }
mutex_init(&ctx->mixer_mutex);
@@ -1204,46 +1225,20 @@ static int mixer_probe(struct platform_device *pdev)
platform_get_device_id(pdev)->driver_data;
}
+ ctx->pdev = pdev;
ctx->dev = dev;
- ctx->parent_ctx = (void *)drm_hdmi_ctx;
- drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version;
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
- platform_set_drvdata(pdev, drm_hdmi_ctx);
-
- /* acquire resources: regs, irqs, clocks */
- ret = mixer_resources_init(drm_hdmi_ctx, pdev);
- if (ret) {
- DRM_ERROR("mixer_resources_init failed\n");
- goto fail;
- }
-
- if (ctx->vp_enabled) {
- /* acquire vp resources: regs, irqs, clocks */
- ret = vp_resources_init(drm_hdmi_ctx, pdev);
- if (ret) {
- DRM_ERROR("vp_resources_init failed\n");
- goto fail;
- }
- }
-
- /* attach mixer driver to common hdmi. */
- exynos_mixer_drv_attach(drm_hdmi_ctx);
-
- /* register specific callback point to common hdmi. */
- exynos_mixer_ops_register(&mixer_ops);
+ mixer_manager.ctx = ctx;
+ platform_set_drvdata(pdev, &mixer_manager);
+ exynos_drm_manager_register(&mixer_manager);
pm_runtime_enable(dev);
return 0;
-
-
-fail:
- dev_info(dev, "probe failed\n");
- return ret;
}
static int mixer_remove(struct platform_device *pdev)
@@ -1255,70 +1250,10 @@ static int mixer_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int mixer_suspend(struct device *dev)
-{
- struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
- struct mixer_context *ctx = drm_hdmi_ctx->ctx;
-
- if (pm_runtime_suspended(dev)) {
- DRM_DEBUG_KMS("Already suspended\n");
- return 0;
- }
-
- mixer_poweroff(ctx);
-
- return 0;
-}
-
-static int mixer_resume(struct device *dev)
-{
- struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
- struct mixer_context *ctx = drm_hdmi_ctx->ctx;
-
- if (!pm_runtime_suspended(dev)) {
- DRM_DEBUG_KMS("Already resumed\n");
- return 0;
- }
-
- mixer_poweron(ctx);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PM_RUNTIME
-static int mixer_runtime_suspend(struct device *dev)
-{
- struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
- struct mixer_context *ctx = drm_hdmi_ctx->ctx;
-
- mixer_poweroff(ctx);
-
- return 0;
-}
-
-static int mixer_runtime_resume(struct device *dev)
-{
- struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
- struct mixer_context *ctx = drm_hdmi_ctx->ctx;
-
- mixer_poweron(ctx);
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops mixer_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
- SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
-};
-
struct platform_driver mixer_driver = {
.driver = {
.name = "exynos-mixer",
.owner = THIS_MODULE,
- .pm = &mixer_pm_ops,
.of_match_table = mixer_match_types,
},
.probe = mixer_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h
new file mode 100644
index 000000000000..3811e417f0e9
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_mixer.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _EXYNOS_MIXER_H_
+#define _EXYNOS_MIXER_H_
+
+/* This function returns 0 if the given timing is valid for the mixer */
+int mixer_check_mode(struct drm_display_mode *mode);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 508cf99a292d..17f928ec84ea 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -10,7 +10,6 @@ config DRM_GMA500
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
- select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
help
Say yes for an experimental 2D KMS framebuffer driver for the
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index e9064dd9045d..b15315576376 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -13,9 +13,11 @@ gma500_gfx-y += \
intel_i2c.o \
intel_gmbus.o \
mmu.o \
+ blitter.o \
power.o \
psb_drv.o \
gma_display.o \
+ gma_device.o \
psb_intel_display.o \
psb_intel_lvds.o \
psb_intel_modes.o \
diff --git a/drivers/gpu/drm/gma500/blitter.c b/drivers/gpu/drm/gma500/blitter.c
new file mode 100644
index 000000000000..9cd54a6fb899
--- /dev/null
+++ b/drivers/gpu/drm/gma500/blitter.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014, Patrik Jakobsson
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ */
+
+#include "psb_drv.h"
+
+#include "blitter.h"
+#include "psb_reg.h"
+
+/* Wait for the blitter to be completely idle */
+int gma_blt_wait_idle(struct drm_psb_private *dev_priv)
+{
+ unsigned long stop = jiffies + HZ;
+ int busy = 1;
+
+ /* NOP for Cedarview */
+ if (IS_CDV(dev_priv->dev))
+ return 0;
+
+ /* First do a quick check */
+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
+ return 0;
+
+ do {
+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+ } while (busy && !time_after_eq(jiffies, stop));
+
+ if (busy)
+ return -EBUSY;
+
+ do {
+ busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+ _PSB_C2B_STATUS_BUSY) != 0);
+ } while (busy && !time_after_eq(jiffies, stop));
+
+ /* If still busy, we probably have a hang */
+ return (busy) ? -EBUSY : 0;
+}
diff --git a/drivers/gpu/drm/gma500/blitter.h b/drivers/gpu/drm/gma500/blitter.h
new file mode 100644
index 000000000000..b83648df590d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/blitter.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014, Patrik Jakobsson
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ */
+
+#ifndef __BLITTER_H
+#define __BLITTER_H
+
+extern int gma_blt_wait_idle(struct drm_psb_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 5a9a6a3063a8..3531f90e53d0 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -26,6 +26,7 @@
#include "psb_intel_reg.h"
#include "intel_bios.h"
#include "cdv_device.h"
+#include "gma_device.h"
#define VGA_SR_INDEX 0x3c4
#define VGA_SR_DATA 0x3c5
@@ -426,43 +427,6 @@ static int cdv_power_up(struct drm_device *dev)
return 0;
}
-/* FIXME ? - shared with Poulsbo */
-static void cdv_get_core_freq(struct drm_device *dev)
-{
- uint32_t clock;
- struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
- pci_read_config_dword(pci_root, 0xD4, &clock);
- pci_dev_put(pci_root);
-
- switch (clock & 0x07) {
- case 0:
- dev_priv->core_freq = 100;
- break;
- case 1:
- dev_priv->core_freq = 133;
- break;
- case 2:
- dev_priv->core_freq = 150;
- break;
- case 3:
- dev_priv->core_freq = 178;
- break;
- case 4:
- dev_priv->core_freq = 200;
- break;
- case 5:
- case 6:
- case 7:
- dev_priv->core_freq = 266;
- break;
- default:
- dev_priv->core_freq = 0;
- }
-}
-
static void cdv_hotplug_work_func(struct work_struct *work)
{
struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
@@ -618,7 +582,7 @@ static int cdv_chip_setup(struct drm_device *dev)
if (pci_enable_msi(dev->pdev))
dev_warn(dev->dev, "Enabling MSI failed!\n");
dev_priv->regmap = cdv_regmap;
- cdv_get_core_freq(dev);
+ gma_get_core_freq(dev);
psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
cdv_hotplug_enable(dev, false);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 661af492173d..c18268cd516e 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -81,13 +81,6 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -224,7 +217,7 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector,
static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
.dpms = cdv_intel_crt_dpms,
- .mode_fixup = cdv_intel_crt_mode_fixup,
+ .mode_fixup = gma_encoder_mode_fixup,
.prepare = gma_encoder_prepare,
.commit = gma_encoder_commit,
.mode_set = cdv_intel_crt_mode_set,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 8fbfa06da62d..66727328832d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -412,8 +412,11 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
int refclk,
struct gma_clock_t *best_clock)
{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct gma_clock_t clock;
- if (refclk == 27000) {
+
+ switch (refclk) {
+ case 27000:
if (target < 200000) {
clock.p1 = 2;
clock.p2 = 10;
@@ -427,7 +430,9 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
clock.m1 = 0;
clock.m2 = 98;
}
- } else if (refclk == 100000) {
+ break;
+
+ case 100000:
if (target < 200000) {
clock.p1 = 2;
clock.p2 = 10;
@@ -441,12 +446,13 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
clock.m1 = 0;
clock.m2 = 133;
}
- } else
+ break;
+
+ default:
return false;
- clock.m = clock.m2 + 2;
- clock.p = clock.p1 * clock.p2;
- clock.vco = (refclk * clock.m) / clock.n;
- clock.dot = clock.vco / clock.p;
+ }
+
+ gma_crtc->clock_funcs->clock(refclk, &clock);
memcpy(best_clock, &clock, sizeof(struct gma_clock_t));
return true;
}
@@ -463,54 +469,11 @@ static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
gma_crtc = to_gma_crtc(crtc);
- if (crtc->fb == NULL || !gma_crtc->active)
+ if (crtc->primary->fb == NULL || !gma_crtc->active)
return false;
return true;
}
-static bool cdv_intel_single_pipe_active (struct drm_device *dev)
-{
- uint32_t pipe_enabled = 0;
-
- if (cdv_intel_pipe_enabled(dev, 0))
- pipe_enabled |= FIFO_PIPEA;
-
- if (cdv_intel_pipe_enabled(dev, 1))
- pipe_enabled |= FIFO_PIPEB;
-
-
- DRM_DEBUG_KMS("pipe enabled %x\n", pipe_enabled);
-
- if (pipe_enabled == FIFO_PIPEA || pipe_enabled == FIFO_PIPEB)
- return true;
- else
- return false;
-}
-
-static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
-
- if (gma_crtc->pipe != 1)
- return false;
-
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct gma_encoder *gma_encoder =
- gma_attached_encoder(connector);
-
- if (!connector->encoder
- || connector->encoder->crtc != crtc)
- continue;
-
- if (gma_encoder->type == INTEL_OUTPUT_LVDS)
- return true;
- }
-
- return false;
-}
-
void cdv_disable_sr(struct drm_device *dev)
{
if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
@@ -535,8 +498,10 @@ void cdv_disable_sr(struct drm_device *dev)
void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- if (cdv_intel_single_pipe_active(dev)) {
+ /* Is only one pipe enabled? */
+ if (cdv_intel_pipe_enabled(dev, 0) ^ cdv_intel_pipe_enabled(dev, 1)) {
u32 fw;
fw = REG_READ(DSPFW1);
@@ -557,7 +522,9 @@ void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
/* ignore FW4 */
- if (is_pipeb_lvds(dev, crtc)) {
+ /* Is pipe b lvds ? */
+ if (gma_crtc->pipe == 1 &&
+ gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
REG_WRITE(DSPFW5, 0x00040330);
} else {
fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) |
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 0490ce36b53f..9ff30c2efadb 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1693,7 +1693,7 @@ done:
struct drm_crtc *crtc = encoder->base.crtc;
drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y,
- crtc->fb);
+ crtc->primary->fb);
}
return 0;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 1c0d723b8d24..b99084b3f706 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -89,13 +89,6 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
REG_READ(hdmi_priv->hdmi_reg);
}
-static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -199,7 +192,7 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
crtc->saved_mode.vdisplay != 0) {
if (centre) {
if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
- encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
+ encoder->crtc->x, encoder->crtc->y, encoder->crtc->primary->fb))
return -1;
} else {
struct drm_encoder_helper_funcs *helpers
@@ -262,7 +255,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
.dpms = cdv_hdmi_dpms,
- .mode_fixup = cdv_hdmi_mode_fixup,
+ .mode_fixup = gma_encoder_mode_fixup,
.prepare = gma_encoder_prepare,
.mode_set = cdv_hdmi_mode_set,
.commit = gma_encoder_commit,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 20e08e65d46c..8ecc920fc26d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -494,7 +494,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
&crtc->saved_mode,
encoder->crtc->x,
encoder->crtc->y,
- encoder->crtc->fb))
+ encoder->crtc->primary->fb))
return -1;
}
} else if (!strcmp(property->name, "backlight") && encoder) {
@@ -712,6 +712,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
+ mutex_lock(&dev->mode_config.mutex);
psb_intel_ddc_get_modes(connector,
&gma_encoder->ddc_bus->adapter);
list_for_each_entry(scan, &connector->probed_modes, head) {
@@ -772,10 +773,12 @@ void cdv_intel_lvds_init(struct drm_device *dev,
}
out:
+ mutex_unlock(&dev->mode_config.mutex);
drm_sysfs_connector_add(connector);
return;
failed_find:
+ mutex_unlock(&dev->mode_config.mutex);
printk(KERN_ERR "Failed find\n");
if (gma_encoder->ddc_bus)
psb_intel_i2c_destroy(gma_encoder->ddc_bus);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 94b3fec22c28..e7fcc148f333 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -319,7 +319,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
{
struct gtt_range *backing;
/* Begin by trying to use stolen memory backing */
- backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
+ backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
if (backing) {
drm_gem_private_object_init(dev, &backing->gem, aligned_size);
return backing;
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index e2db48a81ed0..c707fa6fca85 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -62,9 +62,6 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
int ret = 0;
struct drm_gem_object *obj;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
mutex_lock(&dev->struct_mutex);
/* GEM does all our handle to object mapping */
@@ -98,8 +95,8 @@ unlock:
* it so that userspace can speak about it. This does the core work
* for the various methods that do/will create GEM objects for things
*/
-static int psb_gem_create(struct drm_file *file,
- struct drm_device *dev, uint64_t size, uint32_t *handlep)
+int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
+ u32 *handlep, int stolen, u32 align)
{
struct gtt_range *r;
int ret;
@@ -109,7 +106,7 @@ static int psb_gem_create(struct drm_file *file,
/* Allocate our object - for now a direct gtt range which is not
stolen memory backed */
- r = psb_gtt_alloc_range(dev, size, "gem", 0);
+ r = psb_gtt_alloc_range(dev, size, "gem", 0, PAGE_SIZE);
if (r == NULL) {
dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
return -ENOSPC;
@@ -153,7 +150,8 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
{
args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
args->size = args->pitch * args->height;
- return psb_gem_create(file, dev, args->size, &args->handle);
+ return psb_gem_create(file, dev, args->size, &args->handle, 0,
+ PAGE_SIZE);
}
/**
@@ -229,47 +227,3 @@ fail:
return VM_FAULT_SIGBUS;
}
}
-
-static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
- int size, u32 *handle)
-{
- struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
- if (gtt == NULL)
- return -ENOMEM;
-
- drm_gem_private_object_init(dev, &gtt->gem, size);
- if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
- return 0;
-
- drm_gem_object_release(&gtt->gem);
- psb_gtt_free_range(dev, gtt);
- return -ENOMEM;
-}
-
-/*
- * GEM interfaces for our specific client
- */
-int psb_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_psb_gem_create *args = data;
- int ret;
- if (args->flags & GMA_GEM_CREATE_STOLEN) {
- ret = psb_gem_create_stolen(file, dev, args->size,
- &args->handle);
- if (ret == 0)
- return 0;
- /* Fall throguh */
- args->flags &= ~GMA_GEM_CREATE_STOLEN;
- }
- return psb_gem_create(file, dev, args->size, &args->handle);
-}
-
-int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_psb_gem_mmap *args = data;
- return dev->driver->dumb_map_offset(file, dev,
- args->handle, &args->offset);
-}
-
diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h
new file mode 100644
index 000000000000..1381c5190f46
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem.h
@@ -0,0 +1,21 @@
+/**************************************************************************
+ * Copyright (c) 2014 Patrik Jakobsson
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ **************************************************************************/
+
+#ifndef _GEM_H
+#define _GEM_H
+
+extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
+ u64 size, u32 *handlep, int stolen, u32 align);
+#endif
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
new file mode 100644
index 000000000000..4a295f9ba067
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -0,0 +1,56 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+
+void gma_get_core_freq(struct drm_device *dev)
+{
+ uint32_t clock;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
+ /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
+
+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+ pci_read_config_dword(pci_root, 0xD4, &clock);
+ pci_dev_put(pci_root);
+
+ switch (clock & 0x07) {
+ case 0:
+ dev_priv->core_freq = 100;
+ break;
+ case 1:
+ dev_priv->core_freq = 133;
+ break;
+ case 2:
+ dev_priv->core_freq = 150;
+ break;
+ case 3:
+ dev_priv->core_freq = 178;
+ break;
+ case 4:
+ dev_priv->core_freq = 200;
+ break;
+ case 5:
+ case 6:
+ case 7:
+ dev_priv->core_freq = 266;
+ break;
+ default:
+ dev_priv->core_freq = 0;
+ }
+}
diff --git a/drivers/gpu/drm/gma500/gma_device.h b/drivers/gpu/drm/gma500/gma_device.h
new file mode 100644
index 000000000000..e1dbb007b820
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_device.h
@@ -0,0 +1,21 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ **************************************************************************/
+
+#ifndef _GMA_DEVICE_H
+#define _GMA_DEVICE_H
+
+extern void gma_get_core_freq(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 386de2c9dc86..9bb9bddd881a 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -59,7 +59,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -70,7 +70,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
/* no fb bound */
- if (!crtc->fb) {
+ if (!crtc->primary->fb) {
dev_err(dev->dev, "No FB bound\n");
goto gma_pipe_cleaner;
}
@@ -81,19 +81,19 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (ret < 0)
goto gma_pipe_set_base_exit;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
- REG_WRITE(map->stride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (crtc->fb->bits_per_pixel) {
+ switch (crtc->primary->fb->bits_per_pixel) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
- if (crtc->fb->depth == 15)
+ if (crtc->primary->fb->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
@@ -485,6 +485,13 @@ int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
+bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -511,8 +518,8 @@ void gma_crtc_disable(struct drm_crtc *crtc)
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->fb) {
- gt = to_psb_fb(crtc->fb)->gtt;
+ if (crtc->primary->fb) {
+ gt = to_psb_fb(crtc->primary->fb)->gtt;
psb_gtt_unpin(gt);
}
}
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index 78b9f986a6e5..ed569d8a6af3 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -90,6 +90,9 @@ extern void gma_crtc_restore(struct drm_crtc *crtc);
extern void gma_encoder_prepare(struct drm_encoder *encoder);
extern void gma_encoder_commit(struct drm_encoder *encoder);
extern void gma_encoder_destroy(struct drm_encoder *encoder);
+extern bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
/* Common clock related functions */
extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 2db731f00930..592d205a0089 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -22,6 +22,7 @@
#include <drm/drmP.h>
#include <linux/shmem_fs.h>
#include "psb_drv.h"
+#include "blitter.h"
/*
@@ -105,11 +106,13 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
/* Write our page entries into the GTT itself */
for (i = r->roll; i < r->npage; i++) {
- pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
+ PSB_MMU_CACHED_MEMORY);
iowrite32(pte, gtt_slot++);
}
for (i = 0; i < r->roll; i++) {
- pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
+ PSB_MMU_CACHED_MEMORY);
iowrite32(pte, gtt_slot++);
}
/* Make sure all the entries are set before we return */
@@ -127,7 +130,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
* page table entries with the dummy page. This is protected via the gtt
* mutex which the caller must hold.
*/
-static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 __iomem *gtt_slot;
@@ -137,7 +140,8 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
WARN_ON(r->stolen);
gtt_slot = psb_gtt_entry(dev, r);
- pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
+ pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
+ PSB_MMU_CACHED_MEMORY);
for (i = 0; i < r->npage; i++)
iowrite32(pte, gtt_slot++);
@@ -176,11 +180,13 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
gtt_slot = psb_gtt_entry(dev, r);
for (i = r->roll; i < r->npage; i++) {
- pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
+ PSB_MMU_CACHED_MEMORY);
iowrite32(pte, gtt_slot++);
}
for (i = 0; i < r->roll; i++) {
- pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
+ PSB_MMU_CACHED_MEMORY);
iowrite32(pte, gtt_slot++);
}
ioread32(gtt_slot - 1);
@@ -240,6 +246,7 @@ int psb_gtt_pin(struct gtt_range *gt)
int ret = 0;
struct drm_device *dev = gt->gem.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 gpu_base = dev_priv->gtt.gatt_start;
mutex_lock(&dev_priv->gtt_mutex);
@@ -252,6 +259,9 @@ int psb_gtt_pin(struct gtt_range *gt)
psb_gtt_detach_pages(gt);
goto out;
}
+ psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
+ gt->pages, (gpu_base + gt->offset),
+ gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
}
gt->in_gart++;
out:
@@ -274,16 +284,30 @@ void psb_gtt_unpin(struct gtt_range *gt)
{
struct drm_device *dev = gt->gem.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 gpu_base = dev_priv->gtt.gatt_start;
+ int ret;
+ /* While holding the gtt_mutex no new blits can be initiated */
mutex_lock(&dev_priv->gtt_mutex);
+ /* Wait for any possible usage of the memory to be finished */
+ ret = gma_blt_wait_idle(dev_priv);
+ if (ret) {
+ DRM_ERROR("Failed to idle the blitter, unpin failed!");
+ goto out;
+ }
+
WARN_ON(!gt->in_gart);
gt->in_gart--;
if (gt->in_gart == 0 && gt->stolen == 0) {
+ psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
+ (gpu_base + gt->offset), gt->npage, 0, 0);
psb_gtt_remove(dev, gt);
psb_gtt_detach_pages(gt);
}
+
+out:
mutex_unlock(&dev_priv->gtt_mutex);
}
@@ -306,7 +330,7 @@ void psb_gtt_unpin(struct gtt_range *gt)
* as in use.
*/
struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
- const char *name, int backed)
+ const char *name, int backed, u32 align)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct gtt_range *gt;
@@ -334,7 +358,7 @@ struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
/* Ensure this is set for non GEM objects */
gt->gem.dev = dev;
ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
- len, start, end, PAGE_SIZE, NULL, NULL);
+ len, start, end, align, NULL, NULL);
if (ret == 0) {
gt->offset = gt->resource.start - r->start;
return gt;
@@ -497,6 +521,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
if (!resume)
dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
stolen_size);
+
if (!dev_priv->vram_addr) {
dev_err(dev->dev, "Failure to map stolen base.\n");
ret = -ENOMEM;
@@ -512,7 +537,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
num_pages, pfn_base << PAGE_SHIFT, 0);
for (i = 0; i < num_pages; ++i) {
- pte = psb_gtt_mask_pte(pfn_base + i, 0);
+ pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
iowrite32(pte, dev_priv->gtt_map + i);
}
@@ -521,7 +546,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
*/
pfn_base = page_to_pfn(dev_priv->scratch_page);
- pte = psb_gtt_mask_pte(pfn_base, 0);
+ pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
for (; i < gtt_pages; ++i)
iowrite32(pte, dev_priv->gtt_map + i);
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index 6191d10acf33..f5860a739bd8 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -53,7 +53,8 @@ struct gtt_range {
};
extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
- const char *name, int backed);
+ const char *name, int backed,
+ u32 align);
extern void psb_gtt_kref_put(struct gtt_range *gt);
extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
extern int psb_gtt_pin(struct gtt_range *gt);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 860a4ee9baaf..6e91b20ce2e5 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -287,7 +287,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
&gma_crtc->saved_mode,
encoder->crtc->x,
encoder->crtc->y,
- encoder->crtc->fb))
+ encoder->crtc->primary->fb))
goto set_prop_error;
} else {
struct drm_encoder_helper_funcs *funcs =
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 321c00a944e9..8cc8a5abbc7b 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -166,7 +166,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -178,12 +178,12 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
/* no fb bound */
- if (!crtc->fb) {
+ if (!crtc->primary->fb) {
dev_dbg(dev->dev, "No FB bound\n");
return 0;
}
- ret = check_fb(crtc->fb);
+ ret = check_fb(crtc->primary->fb);
if (ret)
return ret;
@@ -196,18 +196,18 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
- REG_WRITE(map->stride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (crtc->fb->bits_per_pixel) {
+ switch (crtc->primary->fb->bits_per_pixel) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
- if (crtc->fb->depth == 15)
+ if (crtc->primary->fb->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
@@ -700,7 +700,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
}
#endif
- ret = check_fb(crtc->fb);
+ ret = check_fb(crtc->primary->fb);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 49bac41beefb..0eaf11c19939 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -18,6 +18,7 @@
#include <drm/drmP.h>
#include "psb_drv.h"
#include "psb_reg.h"
+#include "mmu.h"
/*
* Code for the SGX MMU:
@@ -47,51 +48,6 @@
* but on average it should be fast.
*/
-struct psb_mmu_driver {
- /* protects driver- and pd structures. Always take in read mode
- * before taking the page table spinlock.
- */
- struct rw_semaphore sem;
-
- /* protects page tables, directory tables and pt tables.
- * and pt structures.
- */
- spinlock_t lock;
-
- atomic_t needs_tlbflush;
-
- uint8_t __iomem *register_map;
- struct psb_mmu_pd *default_pd;
- /*uint32_t bif_ctrl;*/
- int has_clflush;
- int clflush_add;
- unsigned long clflush_mask;
-
- struct drm_psb_private *dev_priv;
-};
-
-struct psb_mmu_pd;
-
-struct psb_mmu_pt {
- struct psb_mmu_pd *pd;
- uint32_t index;
- uint32_t count;
- struct page *p;
- uint32_t *v;
-};
-
-struct psb_mmu_pd {
- struct psb_mmu_driver *driver;
- int hw_context;
- struct psb_mmu_pt **tables;
- struct page *p;
- struct page *dummy_pt;
- struct page *dummy_page;
- uint32_t pd_mask;
- uint32_t invalid_pde;
- uint32_t invalid_pte;
-};
-
static inline uint32_t psb_mmu_pt_index(uint32_t offset)
{
return (offset >> PSB_PTE_SHIFT) & 0x3FF;
@@ -102,13 +58,13 @@ static inline uint32_t psb_mmu_pd_index(uint32_t offset)
return offset >> PSB_PDE_SHIFT;
}
+#if defined(CONFIG_X86)
static inline void psb_clflush(void *addr)
{
__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
}
-static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
- void *addr)
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
{
if (!driver->has_clflush)
return;
@@ -117,62 +73,77 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
psb_clflush(addr);
mb();
}
+#else
-static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
-{
- uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
- uint32_t clflush_count = PAGE_SIZE / clflush_add;
- int i;
- uint8_t *clf;
-
- clf = kmap_atomic(page);
- mb();
- for (i = 0; i < clflush_count; ++i) {
- psb_clflush(clf);
- clf += clflush_add;
- }
- mb();
- kunmap_atomic(clf);
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
+{;
}
-static void psb_pages_clflush(struct psb_mmu_driver *driver,
- struct page *page[], unsigned long num_pages)
-{
- int i;
-
- if (!driver->has_clflush)
- return ;
+#endif
- for (i = 0; i < num_pages; i++)
- psb_page_clflush(driver, *page++);
-}
-
-static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
- int force)
+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
{
+ struct drm_device *dev = driver->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (atomic_read(&driver->needs_tlbflush) || force) {
+ uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
+ PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
+
+ /* Make sure data cache is turned off before enabling it */
+ wmb();
+ PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
+ if (driver->msvdx_mmu_invaldc)
+ atomic_set(driver->msvdx_mmu_invaldc, 1);
+ }
atomic_set(&driver->needs_tlbflush, 0);
}
+#if 0
static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
{
down_write(&driver->sem);
psb_mmu_flush_pd_locked(driver, force);
up_write(&driver->sem);
}
+#endif
-void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
+void psb_mmu_flush(struct psb_mmu_driver *driver)
{
- if (rc_prot)
- down_write(&driver->sem);
- if (rc_prot)
- up_write(&driver->sem);
+ struct drm_device *dev = driver->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ uint32_t val;
+
+ down_write(&driver->sem);
+ val = PSB_RSGX32(PSB_CR_BIF_CTRL);
+ if (atomic_read(&driver->needs_tlbflush))
+ PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
+ else
+ PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
+
+ /* Make sure data cache is turned off and MMU is flushed before
+ restoring bank interface control register */
+ wmb();
+ PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
+ PSB_CR_BIF_CTRL);
+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
+
+ atomic_set(&driver->needs_tlbflush, 0);
+ if (driver->msvdx_mmu_invaldc)
+ atomic_set(driver->msvdx_mmu_invaldc, 1);
+ up_write(&driver->sem);
}
void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
{
- /*ttm_tt_cache_flush(&pd->p, 1);*/
- psb_pages_clflush(pd->driver, &pd->p, 1);
+ struct drm_device *dev = pd->driver->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
+ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
+
down_write(&pd->driver->sem);
+ PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
wmb();
psb_mmu_flush_pd_locked(pd->driver, 1);
pd->hw_context = hw_context;
@@ -183,7 +154,6 @@ void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
static inline unsigned long psb_pd_addr_end(unsigned long addr,
unsigned long end)
{
-
addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
return (addr < end) ? addr : end;
}
@@ -223,12 +193,10 @@ struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
goto out_err3;
if (!trap_pagefaults) {
- pd->invalid_pde =
- psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
- invalid_type);
- pd->invalid_pte =
- psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
- invalid_type);
+ pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+ invalid_type);
+ pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+ invalid_type);
} else {
pd->invalid_pde = 0;
pd->invalid_pte = 0;
@@ -279,12 +247,16 @@ static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
{
struct psb_mmu_driver *driver = pd->driver;
+ struct drm_device *dev = driver->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_mmu_pt *pt;
int i;
down_write(&driver->sem);
- if (pd->hw_context != -1)
+ if (pd->hw_context != -1) {
+ PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
psb_mmu_flush_pd_locked(driver, 1);
+ }
/* Should take the spinlock here, but we don't need to do that
since we have the semaphore in write mode. */
@@ -331,7 +303,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
*ptes++ = pd->invalid_pte;
-
+#if defined(CONFIG_X86)
if (pd->driver->has_clflush && pd->hw_context != -1) {
mb();
for (i = 0; i < clflush_count; ++i) {
@@ -340,7 +312,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
}
mb();
}
-
+#endif
kunmap_atomic(v);
spin_unlock(lock);
@@ -351,7 +323,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
return pt;
}
-static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
unsigned long addr)
{
uint32_t index = psb_mmu_pd_index(addr);
@@ -383,7 +355,7 @@ static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
kunmap_atomic((void *) v);
if (pd->hw_context != -1) {
- psb_mmu_clflush(pd->driver, (void *) &v[index]);
+ psb_mmu_clflush(pd->driver, (void *)&v[index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
}
@@ -420,8 +392,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
pd->tables[pt->index] = NULL;
if (pd->hw_context != -1) {
- psb_mmu_clflush(pd->driver,
- (void *) &v[pt->index]);
+ psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
kunmap_atomic(pt->v);
@@ -432,8 +403,8 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
spin_unlock(&pd->driver->lock);
}
-static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
- unsigned long addr, uint32_t pte)
+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
+ uint32_t pte)
{
pt->v[psb_mmu_pt_index(addr)] = pte;
}
@@ -444,69 +415,50 @@ static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
}
-
-void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
- uint32_t mmu_offset, uint32_t gtt_start,
- uint32_t gtt_pages)
+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
{
- uint32_t *v;
- uint32_t start = psb_mmu_pd_index(mmu_offset);
- struct psb_mmu_driver *driver = pd->driver;
- int num_pages = gtt_pages;
+ struct psb_mmu_pd *pd;
down_read(&driver->sem);
- spin_lock(&driver->lock);
-
- v = kmap_atomic(pd->p);
- v += start;
-
- while (gtt_pages--) {
- *v++ = gtt_start | pd->pd_mask;
- gtt_start += PAGE_SIZE;
- }
-
- /*ttm_tt_cache_flush(&pd->p, num_pages);*/
- psb_pages_clflush(pd->driver, &pd->p, num_pages);
- kunmap_atomic(v);
- spin_unlock(&driver->lock);
-
- if (pd->hw_context != -1)
- atomic_set(&pd->driver->needs_tlbflush, 1);
+ pd = driver->default_pd;
+ up_read(&driver->sem);
- up_read(&pd->driver->sem);
- psb_mmu_flush_pd(pd->driver, 0);
+ return pd;
}
-struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
+/* Returns the physical address of the PD shared by sgx/msvdx */
+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
{
struct psb_mmu_pd *pd;
- /* down_read(&driver->sem); */
- pd = driver->default_pd;
- /* up_read(&driver->sem); */
-
- return pd;
+ pd = psb_mmu_get_default_pd(driver);
+ return page_to_pfn(pd->p) << PAGE_SHIFT;
}
void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
{
+ struct drm_device *dev = driver->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
psb_mmu_free_pagedir(driver->default_pd);
kfree(driver);
}
-struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
- int trap_pagefaults,
- int invalid_type,
- struct drm_psb_private *dev_priv)
+struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
+ int trap_pagefaults,
+ int invalid_type,
+ atomic_t *msvdx_mmu_invaldc)
{
struct psb_mmu_driver *driver;
+ struct drm_psb_private *dev_priv = dev->dev_private;
driver = kmalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
return NULL;
- driver->dev_priv = dev_priv;
+ driver->dev = dev;
driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
invalid_type);
if (!driver->default_pd)
@@ -515,17 +467,24 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
spin_lock_init(&driver->lock);
init_rwsem(&driver->sem);
down_write(&driver->sem);
- driver->register_map = registers;
atomic_set(&driver->needs_tlbflush, 1);
+ driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
+
+ driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
+ PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
+ PSB_CR_BIF_CTRL);
+ PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
+ PSB_CR_BIF_CTRL);
driver->has_clflush = 0;
- if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
+#if defined(CONFIG_X86)
+ if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
uint32_t tfms, misc, cap0, cap4, clflush_size;
/*
- * clflush size is determined at kernel setup for x86_64
- * but not for i386. We have to do it here.
+ * clflush size is determined at kernel setup for x86_64 but not
+ * for i386. We have to do it here.
*/
cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
@@ -536,6 +495,7 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
driver->clflush_mask = driver->clflush_add - 1;
driver->clflush_mask = ~driver->clflush_mask;
}
+#endif
up_write(&driver->sem);
return driver;
@@ -545,9 +505,9 @@ out_err1:
return NULL;
}
-static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
- unsigned long address, uint32_t num_pages,
- uint32_t desired_tile_stride,
+#if defined(CONFIG_X86)
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
+ uint32_t num_pages, uint32_t desired_tile_stride,
uint32_t hw_tile_stride)
{
struct psb_mmu_pt *pt;
@@ -561,11 +521,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
unsigned long clflush_add = pd->driver->clflush_add;
unsigned long clflush_mask = pd->driver->clflush_mask;
- if (!pd->driver->has_clflush) {
- /*ttm_tt_cache_flush(&pd->p, num_pages);*/
- psb_pages_clflush(pd->driver, &pd->p, num_pages);
+ if (!pd->driver->has_clflush)
return;
- }
if (hw_tile_stride)
rows = num_pages / desired_tile_stride;
@@ -586,10 +543,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
if (!pt)
continue;
do {
- psb_clflush(&pt->v
- [psb_mmu_pt_index(addr)]);
- } while (addr +=
- clflush_add,
+ psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
+ } while (addr += clflush_add,
(addr & clflush_mask) < next);
psb_mmu_pt_unmap_unlock(pt);
@@ -598,6 +553,14 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
}
mb();
}
+#else
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
+ uint32_t num_pages, uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride)
+{
+ drm_ttm_cache_flush();
+}
+#endif
void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
unsigned long address, uint32_t num_pages)
@@ -633,7 +596,7 @@ out:
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
- psb_mmu_flush(pd->driver, 0);
+ psb_mmu_flush(pd->driver);
return;
}
@@ -660,7 +623,7 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
add = desired_tile_stride << PAGE_SHIFT;
row_add = hw_tile_stride << PAGE_SHIFT;
- /* down_read(&pd->driver->sem); */
+ down_read(&pd->driver->sem);
/* Make sure we only need to flush this processor's cache */
@@ -688,10 +651,10 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
psb_mmu_flush_ptes(pd, f_address, num_pages,
desired_tile_stride, hw_tile_stride);
- /* up_read(&pd->driver->sem); */
+ up_read(&pd->driver->sem);
if (pd->hw_context != -1)
- psb_mmu_flush(pd->driver, 0);
+ psb_mmu_flush(pd->driver);
}
int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
@@ -704,7 +667,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
unsigned long end;
unsigned long next;
unsigned long f_address = address;
- int ret = 0;
+ int ret = -ENOMEM;
down_read(&pd->driver->sem);
@@ -726,6 +689,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
+ ret = 0;
out:
if (pd->hw_context != -1)
@@ -734,15 +698,15 @@ out:
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
- psb_mmu_flush(pd->driver, 1);
+ psb_mmu_flush(pd->driver);
- return ret;
+ return 0;
}
int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
- uint32_t desired_tile_stride,
- uint32_t hw_tile_stride, int type)
+ uint32_t desired_tile_stride, uint32_t hw_tile_stride,
+ int type)
{
struct psb_mmu_pt *pt;
uint32_t rows = 1;
@@ -754,7 +718,7 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long add;
unsigned long row_add;
unsigned long f_address = address;
- int ret = 0;
+ int ret = -ENOMEM;
if (hw_tile_stride) {
if (num_pages % desired_tile_stride != 0)
@@ -777,14 +741,11 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_alloc_map_lock(pd, addr);
- if (!pt) {
- ret = -ENOMEM;
+ if (!pt)
goto out;
- }
do {
- pte =
- psb_mmu_mask_pte(page_to_pfn(*pages++),
- type);
+ pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
+ type);
psb_mmu_set_pte(pt, addr, pte);
pt->count++;
} while (addr += PAGE_SIZE, addr < next);
@@ -794,6 +755,8 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
address += row_add;
}
+
+ ret = 0;
out:
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages,
@@ -802,7 +765,7 @@ out:
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
- psb_mmu_flush(pd->driver, 1);
+ psb_mmu_flush(pd->driver);
return ret;
}
diff --git a/drivers/gpu/drm/gma500/mmu.h b/drivers/gpu/drm/gma500/mmu.h
new file mode 100644
index 000000000000..e89abec6209d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mmu.h
@@ -0,0 +1,93 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ **************************************************************************/
+
+#ifndef __MMU_H
+#define __MMU_H
+
+struct psb_mmu_driver {
+ /* protects driver- and pd structures. Always take in read mode
+ * before taking the page table spinlock.
+ */
+ struct rw_semaphore sem;
+
+ /* protects page tables, directory tables and pt tables.
+ * and pt structures.
+ */
+ spinlock_t lock;
+
+ atomic_t needs_tlbflush;
+ atomic_t *msvdx_mmu_invaldc;
+ struct psb_mmu_pd *default_pd;
+ uint32_t bif_ctrl;
+ int has_clflush;
+ int clflush_add;
+ unsigned long clflush_mask;
+
+ struct drm_device *dev;
+};
+
+struct psb_mmu_pd;
+
+struct psb_mmu_pt {
+ struct psb_mmu_pd *pd;
+ uint32_t index;
+ uint32_t count;
+ struct page *p;
+ uint32_t *v;
+};
+
+struct psb_mmu_pd {
+ struct psb_mmu_driver *driver;
+ int hw_context;
+ struct psb_mmu_pt **tables;
+ struct page *p;
+ struct page *dummy_pt;
+ struct page *dummy_page;
+ uint32_t pd_mask;
+ uint32_t invalid_pde;
+ uint32_t invalid_pte;
+};
+
+extern struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
+ int trap_pagefaults,
+ int invalid_type,
+ atomic_t *msvdx_mmu_invaldc);
+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
+ *driver);
+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+ int trap_pagefaults,
+ int invalid_type);
+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+ unsigned long address,
+ uint32_t num_pages);
+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
+ uint32_t start_pfn,
+ unsigned long address,
+ uint32_t num_pages, int type);
+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+ unsigned long *pfn);
+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride, int type);
+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 8195e8592107..2de216c2374f 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -599,7 +599,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -608,7 +608,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int ret = 0;
/* no fb bound */
- if (!crtc->fb) {
+ if (!crtc->primary->fb) {
dev_dbg(dev->dev, "No FB bound\n");
return 0;
}
@@ -617,19 +617,19 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
return 0;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
- REG_WRITE(map->stride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (crtc->fb->bits_per_pixel) {
+ switch (crtc->primary->fb->bits_per_pixel) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
- if (crtc->fb->depth == 15)
+ if (crtc->primary->fb->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 38153143ed8c..cf018ddcc5a6 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -523,13 +523,6 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static enum drm_connector_status
oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
{
@@ -608,7 +601,7 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
.dpms = oaktrail_hdmi_dpms,
- .mode_fixup = oaktrail_hdmi_mode_fixup,
+ .mode_fixup = gma_encoder_mode_fixup,
.prepare = gma_encoder_prepare,
.mode_set = oaktrail_hdmi_mode_set,
.commit = gma_encoder_commit,
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 5e0697862736..9b099468a5db 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -359,6 +359,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
* if closed, act like it's not there for now
*/
+ mutex_lock(&dev->mode_config.mutex);
i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
if (i2c_adap == NULL)
dev_err(dev->dev, "No ddc adapter available!\n");
@@ -401,10 +402,14 @@ void oaktrail_lvds_init(struct drm_device *dev,
}
out:
+ mutex_unlock(&dev->mode_config.mutex);
+
drm_sysfs_connector_add(connector);
return;
failed_find:
+ mutex_unlock(&dev->mode_config.mutex);
+
dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
if (gma_encoder->ddc_bus)
psb_intel_i2c_destroy(gma_encoder->ddc_bus);
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index 13ec6283bf59..ab696ca7eeec 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -173,10 +173,13 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return 0;
}
-void psb_intel_opregion_asle_intr(struct drm_device *dev)
+static void psb_intel_opregion_asle_work(struct work_struct *work)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct psb_intel_opregion *opregion =
+ container_of(work, struct psb_intel_opregion, asle_work);
+ struct drm_psb_private *dev_priv =
+ container_of(opregion, struct drm_psb_private, opregion);
+ struct opregion_asle *asle = opregion->asle;
u32 asle_stat = 0;
u32 asle_req;
@@ -190,9 +193,18 @@ void psb_intel_opregion_asle_intr(struct drm_device *dev)
}
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev_priv->dev, asle->bclp);
asle->aslc = asle_stat;
+
+}
+
+void psb_intel_opregion_asle_intr(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->opregion.asle)
+ schedule_work(&dev_priv->opregion.asle_work);
}
#define ASLE_ALS_EN (1<<0)
@@ -282,6 +294,8 @@ void psb_intel_opregion_fini(struct drm_device *dev)
unregister_acpi_notifier(&psb_intel_opregion_notifier);
}
+ cancel_work_sync(&opregion->asle_work);
+
/* just clear all opregion memory pointers now */
iounmap(opregion->header);
opregion->header = NULL;
@@ -304,6 +318,9 @@ int psb_intel_opregion_setup(struct drm_device *dev)
DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
return -ENOTSUPP;
}
+
+ INIT_WORK(&opregion->asle_work, psb_intel_opregion_asle_work);
+
DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
base = acpi_os_ioremap(opregion_phy, 8*1024);
if (!base)
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 23fb33f1471b..07df7d4eea72 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -26,6 +26,7 @@
#include "psb_intel_reg.h"
#include "intel_bios.h"
#include "psb_device.h"
+#include "gma_device.h"
static int psb_output_init(struct drm_device *dev)
{
@@ -257,45 +258,6 @@ static int psb_power_up(struct drm_device *dev)
return 0;
}
-static void psb_get_core_freq(struct drm_device *dev)
-{
- uint32_t clock;
- struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
- /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
-
- pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
- pci_read_config_dword(pci_root, 0xD4, &clock);
- pci_dev_put(pci_root);
-
- switch (clock & 0x07) {
- case 0:
- dev_priv->core_freq = 100;
- break;
- case 1:
- dev_priv->core_freq = 133;
- break;
- case 2:
- dev_priv->core_freq = 150;
- break;
- case 3:
- dev_priv->core_freq = 178;
- break;
- case 4:
- dev_priv->core_freq = 200;
- break;
- case 5:
- case 6:
- case 7:
- dev_priv->core_freq = 266;
- break;
- default:
- dev_priv->core_freq = 0;
- }
-}
-
/* Poulsbo */
static const struct psb_offset psb_regmap[2] = {
{
@@ -352,7 +314,7 @@ static int psb_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
dev_priv->regmap = psb_regmap;
- psb_get_core_freq(dev);
+ gma_get_core_freq(dev);
gma_intel_setup_gmbus(dev);
psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 1199180667c9..b686e56646eb 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -21,7 +21,6 @@
#include <drm/drmP.h>
#include <drm/drm.h>
-#include <drm/gma_drm.h>
#include "psb_drv.h"
#include "framebuffer.h"
#include "psb_reg.h"
@@ -37,56 +36,65 @@
#include <acpi/video.h>
#include <linux/module.h>
-static int drm_psb_trap_pagefaults;
-
-static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-
-MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
-module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
-
+static struct drm_driver driver;
+static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+/*
+ * The table below contains a mapping of the PCI vendor ID and the PCI Device ID
+ * to the different groups of PowerVR 5-series chip designs
+ *
+ * 0x8086 = Intel Corporation
+ *
+ * PowerVR SGX535 - Poulsbo - Intel GMA 500, Intel Atom Z5xx
+ * PowerVR SGX535 - Moorestown - Intel GMA 600
+ * PowerVR SGX535 - Oaktrail - Intel GMA 600, Intel Atom Z6xx, E6xx
+ * PowerVR SGX540 - Medfield - Intel Atom Z2460
+ * PowerVR SGX544MP2 - Medfield -
+ * PowerVR SGX545 - Cedartrail - Intel GMA 3600, Intel Atom D2500, N2600
+ * PowerVR SGX545 - Cedartrail - Intel GMA 3650, Intel Atom D2550, D2700,
+ * N2800
+ */
static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
{ 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
{ 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
#if defined(CONFIG_DRM_GMA600)
- { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
- { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
- { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
- { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
- { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
- { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
- { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
- { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
- /* Atom E620 */
- { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
#endif
#if defined(CONFIG_DRM_MEDFIELD)
- {0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- {0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- {0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- {0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- {0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- {0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- {0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- {0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ { 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
#endif
#if defined(CONFIG_DRM_GMA3600)
- { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
#endif
{ 0, }
};
@@ -95,59 +103,10 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
/*
* Standard IOCTLs.
*/
-
-#define DRM_IOCTL_GMA_ADB \
- DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
-#define DRM_IOCTL_GMA_MODE_OPERATION \
- DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
- struct drm_psb_mode_operation_arg)
-#define DRM_IOCTL_GMA_STOLEN_MEMORY \
- DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
- struct drm_psb_stolen_memory_arg)
-#define DRM_IOCTL_GMA_GAMMA \
- DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
- struct drm_psb_dpst_lut_arg)
-#define DRM_IOCTL_GMA_DPST_BL \
- DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
- uint32_t)
-#define DRM_IOCTL_GMA_GET_PIPE_FROM_CRTC_ID \
- DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
- struct drm_psb_get_pipe_from_crtc_id_arg)
-#define DRM_IOCTL_GMA_GEM_CREATE \
- DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
- struct drm_psb_gem_create)
-#define DRM_IOCTL_GMA_GEM_MMAP \
- DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
- struct drm_psb_gem_mmap)
-
-static int psb_adb_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-static int psb_gamma_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
static const struct drm_ioctl_desc psb_ioctls[] = {
- DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
- DRM_AUTH),
- DRM_IOCTL_DEF_DRV(GMA_STOLEN_MEMORY, psb_stolen_memory_ioctl,
- DRM_AUTH),
- DRM_IOCTL_DEF_DRV(GMA_GAMMA, psb_gamma_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(GMA_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(GMA_GET_PIPE_FROM_CRTC_ID,
- psb_intel_get_pipe_from_crtc_id, 0),
- DRM_IOCTL_DEF_DRV(GMA_GEM_CREATE, psb_gem_create_ioctl,
- DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(GMA_GEM_MMAP, psb_gem_mmap_ioctl,
- DRM_UNLOCKED | DRM_AUTH),
};
-static void psb_lastclose(struct drm_device *dev)
+static void psb_driver_lastclose(struct drm_device *dev)
{
int ret;
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -169,19 +128,14 @@ static int psb_do_init(struct drm_device *dev)
uint32_t stolen_gtt;
- int ret = -ENOMEM;
-
if (pg->mmu_gatt_start & 0x0FFFFFFF) {
dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
- ret = -EINVAL;
- goto out_err;
+ return -EINVAL;
}
-
stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
- stolen_gtt =
- (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
+ stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
dev_priv->gatt_free_offset = pg->mmu_gatt_start +
(stolen_gtt << PAGE_SHIFT) * 1024;
@@ -192,23 +146,26 @@ static int psb_do_init(struct drm_device *dev)
PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
PSB_RSGX32(PSB_CR_BIF_BANK1);
- PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
- PSB_CR_BIF_CTRL);
+
+ /* Do not bypass any MMU access, let them pagefault instead */
+ PSB_WSGX32((PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_MMU_ER_MASK),
+ PSB_CR_BIF_CTRL);
+ PSB_RSGX32(PSB_CR_BIF_CTRL);
+
psb_spank(dev_priv);
/* mmu_gatt ?? */
PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+ PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE); /* Post */
+
return 0;
-out_err:
- return ret;
}
static int psb_driver_unload(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- /* Kill vblank etc here */
-
+ /* TODO: Kill vblank etc here */
if (dev_priv) {
if (dev_priv->backlight_device)
@@ -268,8 +225,7 @@ static int psb_driver_unload(struct drm_device *dev)
return 0;
}
-
-static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
+static int psb_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_psb_private *dev_priv;
unsigned long resource_start, resource_len;
@@ -277,15 +233,19 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
int ret = -ENOMEM;
struct drm_connector *connector;
struct gma_encoder *gma_encoder;
+ struct psb_gtt *pg;
+ /* allocating and initializing driver private data */
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
- dev_priv->ops = (struct psb_ops *)chipset;
+ dev_priv->ops = (struct psb_ops *)flags;
dev_priv->dev = dev;
dev->dev_private = (void *) dev_priv;
+ pg = &dev_priv->gtt;
+
pci_set_master(dev->pdev);
dev_priv->num_pipe = dev_priv->ops->pipes;
@@ -347,9 +307,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
goto out_err;
- dev_priv->mmu = psb_mmu_driver_init((void *)0,
- drm_psb_trap_pagefaults, 0,
- dev_priv);
+ dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
if (!dev_priv->mmu)
goto out_err;
@@ -357,18 +315,27 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (!dev_priv->pf_pd)
goto out_err;
- psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
- psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
-
ret = psb_do_init(dev);
if (ret)
return ret;
+ /* Add stolen memory to SGX MMU */
+ down_read(&pg->sem);
+ ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
+ dev_priv->stolen_base >> PAGE_SHIFT,
+ pg->gatt_start,
+ pg->stolen_size >> PAGE_SHIFT, 0);
+ up_read(&pg->sem);
+
+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+
PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
acpi_video_register();
+ /* Setup vertical blanking handling */
ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret)
goto out_err;
@@ -390,9 +357,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
drm_irq_install(dev);
dev->vblank_disable_allowed = true;
-
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-
dev->driver->get_vblank_counter = psb_get_vblank_counter;
psb_modeset_init(dev);
@@ -416,11 +381,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
return ret;
psb_intel_opregion_enable_asle(dev);
#if 0
- /*enable runtime pm at last*/
+ /* Enable runtime pm at last */
pm_runtime_enable(&dev->pdev->dev);
pm_runtime_set_active(&dev->pdev->dev);
#endif
- /*Intel drm driver load is done, continue doing pvr load*/
+ /* Intel drm driver load is done, continue doing pvr load */
return 0;
out_err:
psb_driver_unload(dev);
@@ -442,161 +407,6 @@ static inline void get_brightness(struct backlight_device *bd)
#endif
}
-static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_psb_private *dev_priv = psb_priv(dev);
- uint32_t *arg = data;
-
- dev_priv->blc_adj2 = *arg;
- get_brightness(dev_priv->backlight_device);
- return 0;
-}
-
-static int psb_adb_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_psb_private *dev_priv = psb_priv(dev);
- uint32_t *arg = data;
-
- dev_priv->blc_adj1 = *arg;
- get_brightness(dev_priv->backlight_device);
- return 0;
-}
-
-static int psb_gamma_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_psb_dpst_lut_arg *lut_arg = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- struct gma_crtc *gma_crtc;
- int i = 0;
- int32_t obj_id;
-
- obj_id = lut_arg->output_id;
- obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- dev_dbg(dev->dev, "Invalid Connector object.\n");
- return -ENOENT;
- }
-
- connector = obj_to_connector(obj);
- crtc = connector->encoder->crtc;
- gma_crtc = to_gma_crtc(crtc);
-
- for (i = 0; i < 256; i++)
- gma_crtc->lut_adj[i] = lut_arg->lut[i];
-
- gma_crtc_load_lut(crtc);
-
- return 0;
-}
-
-static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- uint32_t obj_id;
- uint16_t op;
- struct drm_mode_modeinfo *umode;
- struct drm_display_mode *mode = NULL;
- struct drm_psb_mode_operation_arg *arg;
- struct drm_mode_object *obj;
- struct drm_connector *connector;
- struct drm_connector_helper_funcs *connector_funcs;
- int ret = 0;
- int resp = MODE_OK;
-
- arg = (struct drm_psb_mode_operation_arg *)data;
- obj_id = arg->obj_id;
- op = arg->operation;
-
- switch (op) {
- case PSB_MODE_OPERATION_MODE_VALID:
- umode = &arg->mode;
-
- drm_modeset_lock_all(dev);
-
- obj = drm_mode_object_find(dev, obj_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -ENOENT;
- goto mode_op_out;
- }
-
- connector = obj_to_connector(obj);
-
- mode = drm_mode_create(dev);
- if (!mode) {
- ret = -ENOMEM;
- goto mode_op_out;
- }
-
- /* drm_crtc_convert_umode(mode, umode); */
- {
- mode->clock = umode->clock;
- mode->hdisplay = umode->hdisplay;
- mode->hsync_start = umode->hsync_start;
- mode->hsync_end = umode->hsync_end;
- mode->htotal = umode->htotal;
- mode->hskew = umode->hskew;
- mode->vdisplay = umode->vdisplay;
- mode->vsync_start = umode->vsync_start;
- mode->vsync_end = umode->vsync_end;
- mode->vtotal = umode->vtotal;
- mode->vscan = umode->vscan;
- mode->vrefresh = umode->vrefresh;
- mode->flags = umode->flags;
- mode->type = umode->type;
- strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
- mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
- }
-
- connector_funcs = (struct drm_connector_helper_funcs *)
- connector->helper_private;
-
- if (connector_funcs->mode_valid) {
- resp = connector_funcs->mode_valid(connector, mode);
- arg->data = resp;
- }
-
- /*do some clean up work*/
- if (mode)
- drm_mode_destroy(dev, mode);
-mode_op_out:
- drm_modeset_unlock_all(dev);
- return ret;
-
- default:
- dev_dbg(dev->dev, "Unsupported psb mode operation\n");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_psb_private *dev_priv = psb_priv(dev);
- struct drm_psb_stolen_memory_arg *arg = data;
-
- arg->base = dev_priv->stolen_base;
- arg->size = dev_priv->vram_stolen_size;
-
- return 0;
-}
-
-static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
-{
- return 0;
-}
-
-static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
-{
-}
-
static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -614,15 +424,21 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
/* FIXME: do we need to wrap the other side of this */
}
-
-/* When a client dies:
+/*
+ * When a client dies:
* - Check for and clean up flipped page state
*/
static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
{
}
-static void psb_remove(struct pci_dev *pdev)
+static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+
+static void psb_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_put_dev(dev);
@@ -657,11 +473,12 @@ static const struct file_operations psb_gem_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
- DRIVER_MODESET | DRIVER_GEM ,
+ DRIVER_MODESET | DRIVER_GEM,
.load = psb_driver_load,
.unload = psb_driver_unload,
+ .lastclose = psb_driver_lastclose,
+ .preclose = psb_driver_preclose,
- .ioctls = psb_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
.device_is_agp = psb_driver_device_is_agp,
.irq_preinstall = psb_irq_preinstall,
@@ -671,40 +488,31 @@ static struct drm_driver driver = {
.enable_vblank = psb_enable_vblank,
.disable_vblank = psb_disable_vblank,
.get_vblank_counter = psb_get_vblank_counter,
- .lastclose = psb_lastclose,
- .open = psb_driver_open,
- .preclose = psb_driver_preclose,
- .postclose = psb_driver_close,
.gem_free_object = psb_gem_free_object,
.gem_vm_ops = &psb_gem_vm_ops,
+
.dumb_create = psb_gem_dumb_create,
.dumb_map_offset = psb_gem_dumb_map_gtt,
.dumb_destroy = drm_gem_dumb_destroy,
+ .ioctls = psb_ioctls,
.fops = &psb_gem_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = PSB_DRM_DRIVER_DATE,
- .major = PSB_DRM_DRIVER_MAJOR,
- .minor = PSB_DRM_DRIVER_MINOR,
- .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL
};
static struct pci_driver psb_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
- .probe = psb_probe,
- .remove = psb_remove,
- .driver = {
- .pm = &psb_pm_ops,
- }
+ .probe = psb_pci_probe,
+ .remove = psb_pci_remove,
+ .driver.pm = &psb_pm_ops,
};
-static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- return drm_get_pci_dev(pdev, ent, &driver);
-}
-
static int __init psb_init(void)
{
return drm_pci_init(&driver, &psb_pci_driver);
@@ -718,6 +526,6 @@ static void __exit psb_exit(void)
late_initcall(psb_init);
module_exit(psb_exit);
-MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others");
+MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE(DRIVER_LICENSE);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 5ad6a03e477e..55ebe2bd88dd 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -33,6 +33,18 @@
#include "power.h"
#include "opregion.h"
#include "oaktrail.h"
+#include "mmu.h"
+
+#define DRIVER_AUTHOR "Alan Cox <alan@linux.intel.com> and others"
+#define DRIVER_LICENSE "GPL"
+
+#define DRIVER_NAME "gma500"
+#define DRIVER_DESC "DRM driver for the Intel GMA500, GMA600, GMA3600, GMA3650"
+#define DRIVER_DATE "20140314"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
/* Append new drm mode definition here, align with libdrm definition */
#define DRM_MODE_SCALE_NO_SCALE 2
@@ -49,21 +61,7 @@ enum {
#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
-/*
- * Driver definitions
- */
-
-#define DRIVER_NAME "gma500"
-#define DRIVER_DESC "DRM driver for the Intel GMA500"
-
-#define PSB_DRM_DRIVER_DATE "2011-06-06"
-#define PSB_DRM_DRIVER_MAJOR 1
-#define PSB_DRM_DRIVER_MINOR 0
-#define PSB_DRM_DRIVER_PATCHLEVEL 0
-
-/*
- * Hardware offsets
- */
+/* Hardware offsets */
#define PSB_VDC_OFFSET 0x00000000
#define PSB_VDC_SIZE 0x000080000
#define MRST_MMIO_SIZE 0x0000C0000
@@ -71,16 +69,14 @@ enum {
#define PSB_SGX_SIZE 0x8000
#define PSB_SGX_OFFSET 0x00040000
#define MRST_SGX_OFFSET 0x00080000
-/*
- * PCI resource identifiers
- */
+
+/* PCI resource identifiers */
#define PSB_MMIO_RESOURCE 0
#define PSB_AUX_RESOURCE 0
#define PSB_GATT_RESOURCE 2
#define PSB_GTT_RESOURCE 3
-/*
- * PCI configuration
- */
+
+/* PCI configuration */
#define PSB_GMCH_CTRL 0x52
#define PSB_BSM 0x5C
#define _PSB_GMCH_ENABLED 0x4
@@ -88,37 +84,29 @@ enum {
#define _PSB_PGETBL_ENABLED 0x00000001
#define PSB_SGX_2D_SLAVE_PORT 0x4000
-/* To get rid of */
+/* TODO: To get rid of */
#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
-/*
- * SGX side MMU definitions (these can probably go)
- */
+/* SGX side MMU definitions (these can probably go) */
-/*
- * Flags for external memory type field.
- */
+/* Flags for external memory type field */
#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
-/*
- * PTE's and PDE's
- */
+
+/* PTE's and PDE's */
#define PSB_PDE_MASK 0x003FFFFF
#define PSB_PDE_SHIFT 22
#define PSB_PTE_SHIFT 12
-/*
- * Cache control
- */
+
+/* Cache control */
#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
#define PSB_PTE_WO 0x0002 /* Write only */
#define PSB_PTE_RO 0x0004 /* Read only */
#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
-/*
- * VDC registers and bits
- */
+/* VDC registers and bits */
#define PSB_MSVDX_CLOCKGATING 0x2064
#define PSB_TOPAZ_CLOCKGATING 0x2068
#define PSB_HWSTAM 0x2098
@@ -265,6 +253,7 @@ struct psb_intel_opregion {
struct opregion_asle *asle;
void *vbt;
u32 __iomem *lid_state;
+ struct work_struct asle_work;
};
struct sdvo_device_mapping {
@@ -283,10 +272,7 @@ struct intel_gmbus {
u32 reg0;
};
-/*
- * Register offset maps
- */
-
+/* Register offset maps */
struct psb_offset {
u32 fp0;
u32 fp1;
@@ -320,9 +306,7 @@ struct psb_offset {
* update the register cache instead.
*/
-/*
- * Common status for pipes.
- */
+/* Common status for pipes */
struct psb_pipe {
u32 fp0;
u32 fp1;
@@ -482,35 +466,24 @@ struct drm_psb_private {
struct psb_mmu_driver *mmu;
struct psb_mmu_pd *pf_pd;
- /*
- * Register base
- */
-
+ /* Register base */
uint8_t __iomem *sgx_reg;
uint8_t __iomem *vdc_reg;
uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
uint32_t gatt_free_offset;
- /*
- * Fencing / irq.
- */
-
+ /* Fencing / irq */
uint32_t vdc_irq_mask;
uint32_t pipestat[PSB_NUM_PIPE];
spinlock_t irqmask_lock;
- /*
- * Power
- */
-
+ /* Power */
bool suspended;
bool display_power;
int display_count;
- /*
- * Modesetting
- */
+ /* Modesetting */
struct psb_intel_mode_device mode_dev;
bool modeset; /* true if we have done the mode_device setup */
@@ -518,15 +491,10 @@ struct drm_psb_private {
struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
uint32_t num_pipe;
- /*
- * OSPM info (Power management base) (can go ?)
- */
+ /* OSPM info (Power management base) (TODO: can go ?) */
uint32_t ospm_base;
- /*
- * Sizes info
- */
-
+ /* Sizes info */
u32 fuse_reg_value;
u32 video_device_fuse;
@@ -546,9 +514,7 @@ struct drm_psb_private {
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
- /*
- * LVDS info
- */
+ /* LVDS info */
int backlight_duty_cycle; /* restore backlight to this value */
bool panel_wants_dither;
struct drm_display_mode *panel_fixed_mode;
@@ -582,34 +548,23 @@ struct drm_psb_private {
/* Oaktrail HDMI state */
struct oaktrail_hdmi_dev *hdmi_priv;
- /*
- * Register state
- */
-
+ /* Register state */
struct psb_save_area regs;
/* MSI reg save */
uint32_t msi_addr;
uint32_t msi_data;
- /*
- * Hotplug handling
- */
-
+ /* Hotplug handling */
struct work_struct hotplug_work;
- /*
- * LID-Switch
- */
+ /* LID-Switch */
spinlock_t lid_lock;
struct timer_list lid_timer;
struct psb_intel_opregion opregion;
u32 lid_last_state;
- /*
- * Watchdog
- */
-
+ /* Watchdog */
uint32_t apm_reg;
uint16_t apm_base;
@@ -629,9 +584,7 @@ struct drm_psb_private {
/* 2D acceleration */
spinlock_t lock_2d;
- /*
- * Panel brightness
- */
+ /* Panel brightness */
int brightness;
int brightness_adjusted;
@@ -664,10 +617,7 @@ struct drm_psb_private {
};
-/*
- * Operations for each board type
- */
-
+/* Operations for each board type */
struct psb_ops {
const char *name;
unsigned int accel_2d:1;
@@ -713,8 +663,6 @@ struct psb_ops {
-struct psb_mmu_driver;
-
extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
extern int drm_pick_crtcs(struct drm_device *dev);
@@ -723,52 +671,7 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
return (struct drm_psb_private *) dev->dev_private;
}
-/*
- * MMU stuff.
- */
-
-extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
- int trap_pagefaults,
- int invalid_type,
- struct drm_psb_private *dev_priv);
-extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
-extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
- *driver);
-extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
- uint32_t gtt_start, uint32_t gtt_pages);
-extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
- int trap_pagefaults,
- int invalid_type);
-extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
-extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
-extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
- unsigned long address,
- uint32_t num_pages);
-extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
- uint32_t start_pfn,
- unsigned long address,
- uint32_t num_pages, int type);
-extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn);
-
-/*
- * Enable / disable MMU for different requestors.
- */
-
-
-extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
-extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
- unsigned long address, uint32_t num_pages,
- uint32_t desired_tile_stride,
- uint32_t hw_tile_stride, int type);
-extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
- unsigned long address, uint32_t num_pages,
- uint32_t desired_tile_stride,
- uint32_t hw_tile_stride);
-/*
- *psb_irq.c
- */
-
+/* psb_irq.c */
extern irqreturn_t psb_irq_handler(int irq, void *arg);
extern int psb_irq_enable_dpst(struct drm_device *dev);
extern int psb_irq_disable_dpst(struct drm_device *dev);
@@ -791,24 +694,17 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
-/*
- * framebuffer.c
- */
+/* framebuffer.c */
extern int psbfb_probed(struct drm_device *dev);
extern int psbfb_remove(struct drm_device *dev,
struct drm_framebuffer *fb);
-/*
- * accel_2d.c
- */
+/* accel_2d.c */
extern void psbfb_copyarea(struct fb_info *info,
const struct fb_copyarea *region);
extern int psbfb_sync(struct fb_info *info);
extern void psb_spank(struct drm_psb_private *dev_priv);
-/*
- * psb_reset.c
- */
-
+/* psb_reset.c */
extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
@@ -867,9 +763,7 @@ extern const struct psb_ops mdfld_chip_ops;
/* cdv_device.c */
extern const struct psb_ops cdv_chip_ops;
-/*
- * Debug print bits setting
- */
+/* Debug print bits setting */
#define PSB_D_GENERAL (1 << 0)
#define PSB_D_INIT (1 << 1)
#define PSB_D_IRQ (1 << 2)
@@ -885,10 +779,7 @@ extern const struct psb_ops cdv_chip_ops;
extern int drm_idle_check_interval;
-/*
- * Utilities
- */
-
+/* Utilities */
static inline u32 MRST_MSG_READ32(uint port, uint offset)
{
int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index c8841ac6c8f1..87b50ba64ed4 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -120,7 +120,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
const struct gma_limit_t *limit;
/* No scan out no play */
- if (crtc->fb == NULL) {
+ if (crtc->primary->fb == NULL) {
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
return 0;
}
@@ -469,7 +469,8 @@ static void psb_intel_cursor_init(struct drm_device *dev,
/* Allocate 4 pages of stolen mem for a hardware cursor. That
* is enough for the 64 x 64 ARGB cursors we support.
*/
- cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1);
+ cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1,
+ PAGE_SIZE);
if (!cursor_gt) {
gma_crtc->cursor_gt = NULL;
goto out;
@@ -554,33 +555,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
gma_crtc->active = true;
}
-int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
- struct drm_mode_object *drmmode_obj;
- struct gma_crtc *crtc;
-
- if (!dev_priv) {
- dev_err(dev->dev, "called with no initialization\n");
- return -EINVAL;
- }
-
- drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
- DRM_MODE_OBJECT_CRTC);
-
- if (!drmmode_obj) {
- dev_err(dev->dev, "no such CRTC id\n");
- return -ENOENT;
- }
-
- crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
- pipe_from_crtc_id->pipe = crtc->pipe;
-
- return 0;
-}
-
struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
{
struct drm_crtc *crtc = NULL;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index dc2c8eb030fa..336bd3aa1a06 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -238,8 +238,6 @@ static inline struct gma_encoder *gma_attached_encoder(
extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
-extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
int pipe);
extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 32342f6990d9..d7778d0472c1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -614,7 +614,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
&crtc->saved_mode,
encoder->crtc->x,
encoder->crtc->y,
- encoder->crtc->fb))
+ encoder->crtc->primary->fb))
goto set_prop_error;
}
} else if (!strcmp(property->name, "backlight")) {
@@ -777,6 +777,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
+ mutex_lock(&dev->mode_config.mutex);
psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -827,10 +828,12 @@ void psb_intel_lvds_init(struct drm_device *dev,
* actually having one.
*/
out:
+ mutex_unlock(&dev->mode_config.mutex);
drm_sysfs_connector_add(connector);
return;
failed_find:
+ mutex_unlock(&dev->mode_config.mutex);
if (lvds_priv->ddc_bus)
psb_intel_i2c_destroy(lvds_priv->ddc_bus);
failed_ddc:
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 07d3a9e6d79b..deeb0829b129 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -406,18 +406,18 @@ static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8
DRM_DEBUG_KMS("%s: W: %02X ",
SDVO_NAME(psb_intel_sdvo), cmd);
for (i = 0; i < args_len; i++)
- DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
+ DRM_DEBUG_KMS("%02X ", ((u8 *)args)[i]);
for (; i < 8; i++)
- DRM_LOG_KMS(" ");
+ DRM_DEBUG_KMS(" ");
for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
- DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+ DRM_DEBUG_KMS("(%s)", sdvo_cmd_names[i].name);
break;
}
}
if (i == ARRAY_SIZE(sdvo_cmd_names))
- DRM_LOG_KMS("(%02X)", cmd);
- DRM_LOG_KMS("\n");
+ DRM_DEBUG_KMS("(%02X)", cmd);
+ DRM_DEBUG_KMS("\n");
}
static const char *cmd_status_names[] = {
@@ -512,9 +512,9 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
}
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+ DRM_DEBUG_KMS("(%s)", cmd_status_names[status]);
else
- DRM_LOG_KMS("(??? %d)", status);
+ DRM_DEBUG_KMS("(??? %d)", status);
if (status != SDVO_CMD_STATUS_SUCCESS)
goto log_fail;
@@ -525,13 +525,13 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
SDVO_I2C_RETURN_0 + i,
&((u8 *)response)[i]))
goto log_fail;
- DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+ DRM_DEBUG_KMS(" %02X", ((u8 *)response)[i]);
}
- DRM_LOG_KMS("\n");
+ DRM_DEBUG_KMS("\n");
return true;
log_fail:
- DRM_LOG_KMS("... failed\n");
+ DRM_DEBUG_KMS("... failed\n");
return false;
}
@@ -1844,7 +1844,7 @@ done:
if (psb_intel_sdvo->base.base.crtc) {
struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
+ crtc->y, crtc->primary->fb);
}
return 0;
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index f883f9e4c524..624eb36511c5 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -200,11 +200,64 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
mid_pipe_event_handler(dev, 1);
}
+/*
+ * SGX interrupt handler
+ */
+static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 val, addr;
+ int error = false;
+
+ if (stat_1 & _PSB_CE_TWOD_COMPLETE)
+ val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
+
+ if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
+ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
+ addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
+ if (val) {
+ if (val & _PSB_CBI_STAT_PF_N_RW)
+ DRM_ERROR("SGX MMU page fault:");
+ else
+ DRM_ERROR("SGX MMU read / write protection fault:");
+
+ if (val & _PSB_CBI_STAT_FAULT_CACHE)
+ DRM_ERROR("\tCache requestor");
+ if (val & _PSB_CBI_STAT_FAULT_TA)
+ DRM_ERROR("\tTA requestor");
+ if (val & _PSB_CBI_STAT_FAULT_VDM)
+ DRM_ERROR("\tVDM requestor");
+ if (val & _PSB_CBI_STAT_FAULT_2D)
+ DRM_ERROR("\t2D requestor");
+ if (val & _PSB_CBI_STAT_FAULT_PBE)
+ DRM_ERROR("\tPBE requestor");
+ if (val & _PSB_CBI_STAT_FAULT_TSP)
+ DRM_ERROR("\tTSP requestor");
+ if (val & _PSB_CBI_STAT_FAULT_ISP)
+ DRM_ERROR("\tISP requestor");
+ if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
+ DRM_ERROR("\tUSSEPDS requestor");
+ if (val & _PSB_CBI_STAT_FAULT_HOST)
+ DRM_ERROR("\tHost requestor");
+
+ DRM_ERROR("\tMMU failing address is 0x%08x.\n",
+ (unsigned int)addr);
+ error = true;
+ }
+ }
+
+ /* Clear bits */
+ PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
+ PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
+ PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
+}
+
irqreturn_t psb_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_psb_private *dev_priv = dev->dev_private;
uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
+ u32 sgx_stat_1, sgx_stat_2;
int handled = 0;
spin_lock(&dev_priv->irqmask_lock);
@@ -233,14 +286,9 @@ irqreturn_t psb_irq_handler(int irq, void *arg)
}
if (sgx_int) {
- /* Not expected - we have it masked, shut it up */
- u32 s, s2;
- s = PSB_RSGX32(PSB_CR_EVENT_STATUS);
- s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
- PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
- PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
- /* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
- we may as well poll even if we add that ! */
+ sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
+ sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
+ psb_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
handled = 1;
}
@@ -269,8 +317,13 @@ void psb_irq_preinstall(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
- if (gma_power_is_on(dev))
+ if (gma_power_is_on(dev)) {
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
+ PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
+ }
if (dev->vblank[0].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
if (dev->vblank[1].enabled)
@@ -286,7 +339,7 @@ void psb_irq_preinstall(struct drm_device *dev)
/* Revisit this area - want per device masks ? */
if (dev_priv->ops->hotplug)
dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
- dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE;
+ dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
/* This register is safe even if display island is off */
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
@@ -295,12 +348,16 @@ void psb_irq_preinstall(struct drm_device *dev)
int psb_irq_postinstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ /* Enable 2D and MMU fault interrupts */
+ PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
+ PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
+ PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
+
/* This register is safe even if display island is off */
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index faa77f543a07..48af5cac1902 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -19,6 +19,8 @@
#include <linux/hdmi.h>
#include <linux/module.h>
+#include <linux/irq.h>
+#include <sound/asoundef.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -30,6 +32,7 @@
struct tda998x_priv {
struct i2c_client *cec;
+ struct i2c_client *hdmi;
uint16_t rev;
uint8_t current_page;
int dpms;
@@ -38,6 +41,10 @@ struct tda998x_priv {
u8 vip_cntrl_1;
u8 vip_cntrl_2;
struct tda998x_encoder_params params;
+
+ wait_queue_head_t wq_edid;
+ volatile int wq_edid_wait;
+ struct drm_encoder *encoder;
};
#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
@@ -120,6 +127,8 @@ struct tda998x_priv {
# define VIP_CNTRL_5_CKCASE (1 << 0)
# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
#define REG_MUX_AP REG(0x00, 0x26) /* read/write */
+# define MUX_AP_SELECT_I2S 0x64
+# define MUX_AP_SELECT_SPDIF 0x40
#define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */
#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
@@ -197,10 +206,11 @@ struct tda998x_priv {
#define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */
# define I2S_FORMAT(x) (((x) & 3) << 0)
#define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */
-# define AIP_CLKSEL_FS(x) (((x) & 3) << 0)
-# define AIP_CLKSEL_CLK_POL(x) (((x) & 1) << 2)
-# define AIP_CLKSEL_AIP(x) (((x) & 7) << 3)
-
+# define AIP_CLKSEL_AIP_SPDIF (0 << 3)
+# define AIP_CLKSEL_AIP_I2S (1 << 3)
+# define AIP_CLKSEL_FS_ACLK (0 << 0)
+# define AIP_CLKSEL_FS_MCLK (1 << 0)
+# define AIP_CLKSEL_FS_FS64SPDIF (2 << 0)
/* Page 02h: PLL settings */
#define REG_PLL_SERIAL_1 REG(0x02, 0x00) /* read/write */
@@ -304,11 +314,16 @@ struct tda998x_priv {
/* CEC registers: (not paged)
*/
+#define REG_CEC_INTSTATUS 0xee /* read */
+# define CEC_INTSTATUS_CEC (1 << 0)
+# define CEC_INTSTATUS_HDMI (1 << 1)
#define REG_CEC_FRO_IM_CLK_CTRL 0xfb /* read/write */
# define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7)
# define CEC_FRO_IM_CLK_CTRL_ENA_OTP (1 << 6)
# define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1)
# define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0)
+#define REG_CEC_RXSHPDINTENA 0xfc /* read/write */
+#define REG_CEC_RXSHPDINT 0xfd /* read */
#define REG_CEC_RXSHPDLEV 0xfe /* read */
# define CEC_RXSHPDLEV_RXSENS (1 << 0)
# define CEC_RXSHPDLEV_HPD (1 << 1)
@@ -328,21 +343,21 @@ struct tda998x_priv {
#define TDA19988 0x0301
static void
-cec_write(struct drm_encoder *encoder, uint16_t addr, uint8_t val)
+cec_write(struct tda998x_priv *priv, uint16_t addr, uint8_t val)
{
- struct i2c_client *client = to_tda998x_priv(encoder)->cec;
+ struct i2c_client *client = priv->cec;
uint8_t buf[] = {addr, val};
int ret;
- ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
}
static uint8_t
-cec_read(struct drm_encoder *encoder, uint8_t addr)
+cec_read(struct tda998x_priv *priv, uint8_t addr)
{
- struct i2c_client *client = to_tda998x_priv(encoder)->cec;
+ struct i2c_client *client = priv->cec;
uint8_t val;
int ret;
@@ -361,32 +376,36 @@ fail:
return 0;
}
-static void
-set_page(struct drm_encoder *encoder, uint16_t reg)
+static int
+set_page(struct tda998x_priv *priv, uint16_t reg)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
-
if (REG2PAGE(reg) != priv->current_page) {
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = priv->hdmi;
uint8_t buf[] = {
REG_CURPAGE, REG2PAGE(reg)
};
int ret = i2c_master_send(client, buf, sizeof(buf));
- if (ret < 0)
- dev_err(&client->dev, "Error %d writing to REG_CURPAGE\n", ret);
+ if (ret < 0) {
+ dev_err(&client->dev, "setpage %04x err %d\n",
+ reg, ret);
+ return ret;
+ }
priv->current_page = REG2PAGE(reg);
}
+ return 0;
}
static int
-reg_read_range(struct drm_encoder *encoder, uint16_t reg, char *buf, int cnt)
+reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = priv->hdmi;
uint8_t addr = REG2ADDR(reg);
int ret;
- set_page(encoder, reg);
+ ret = set_page(priv, reg);
+ if (ret < 0)
+ return ret;
ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0)
@@ -404,100 +423,147 @@ fail:
}
static void
-reg_write_range(struct drm_encoder *encoder, uint16_t reg, uint8_t *p, int cnt)
+reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = priv->hdmi;
uint8_t buf[cnt+1];
int ret;
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
- set_page(encoder, reg);
+ ret = set_page(priv, reg);
+ if (ret < 0)
+ return;
ret = i2c_master_send(client, buf, cnt + 1);
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
}
-static uint8_t
-reg_read(struct drm_encoder *encoder, uint16_t reg)
+static int
+reg_read(struct tda998x_priv *priv, uint16_t reg)
{
uint8_t val = 0;
- reg_read_range(encoder, reg, &val, sizeof(val));
+ int ret;
+
+ ret = reg_read_range(priv, reg, &val, sizeof(val));
+ if (ret < 0)
+ return ret;
return val;
}
static void
-reg_write(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = priv->hdmi;
uint8_t buf[] = {REG2ADDR(reg), val};
int ret;
- set_page(encoder, reg);
+ ret = set_page(priv, reg);
+ if (ret < 0)
+ return;
- ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
}
static void
-reg_write16(struct drm_encoder *encoder, uint16_t reg, uint16_t val)
+reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = priv->hdmi;
uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
int ret;
- set_page(encoder, reg);
+ ret = set_page(priv, reg);
+ if (ret < 0)
+ return;
- ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
}
static void
-reg_set(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+reg_set(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
{
- reg_write(encoder, reg, reg_read(encoder, reg) | val);
+ int old_val;
+
+ old_val = reg_read(priv, reg);
+ if (old_val >= 0)
+ reg_write(priv, reg, old_val | val);
}
static void
-reg_clear(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+reg_clear(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
{
- reg_write(encoder, reg, reg_read(encoder, reg) & ~val);
+ int old_val;
+
+ old_val = reg_read(priv, reg);
+ if (old_val >= 0)
+ reg_write(priv, reg, old_val & ~val);
}
static void
-tda998x_reset(struct drm_encoder *encoder)
+tda998x_reset(struct tda998x_priv *priv)
{
/* reset audio and i2c master: */
- reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
+ reg_write(priv, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
msleep(50);
- reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
+ reg_write(priv, REG_SOFTRESET, 0);
msleep(50);
/* reset transmitter: */
- reg_set(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
- reg_clear(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
+ reg_set(priv, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
+ reg_clear(priv, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
/* PLL registers common configuration */
- reg_write(encoder, REG_PLL_SERIAL_1, 0x00);
- reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1));
- reg_write(encoder, REG_PLL_SERIAL_3, 0x00);
- reg_write(encoder, REG_SERIALIZER, 0x00);
- reg_write(encoder, REG_BUFFER_OUT, 0x00);
- reg_write(encoder, REG_PLL_SCG1, 0x00);
- reg_write(encoder, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8);
- reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
- reg_write(encoder, REG_PLL_SCGN1, 0xfa);
- reg_write(encoder, REG_PLL_SCGN2, 0x00);
- reg_write(encoder, REG_PLL_SCGR1, 0x5b);
- reg_write(encoder, REG_PLL_SCGR2, 0x00);
- reg_write(encoder, REG_PLL_SCG2, 0x10);
+ reg_write(priv, REG_PLL_SERIAL_1, 0x00);
+ reg_write(priv, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1));
+ reg_write(priv, REG_PLL_SERIAL_3, 0x00);
+ reg_write(priv, REG_SERIALIZER, 0x00);
+ reg_write(priv, REG_BUFFER_OUT, 0x00);
+ reg_write(priv, REG_PLL_SCG1, 0x00);
+ reg_write(priv, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8);
+ reg_write(priv, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
+ reg_write(priv, REG_PLL_SCGN1, 0xfa);
+ reg_write(priv, REG_PLL_SCGN2, 0x00);
+ reg_write(priv, REG_PLL_SCGR1, 0x5b);
+ reg_write(priv, REG_PLL_SCGR2, 0x00);
+ reg_write(priv, REG_PLL_SCG2, 0x10);
/* Write the default value MUX register */
- reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24);
+ reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
+}
+
+/*
+ * only 2 interrupts may occur: screen plug/unplug and EDID read
+ */
+static irqreturn_t tda998x_irq_thread(int irq, void *data)
+{
+ struct tda998x_priv *priv = data;
+ u8 sta, cec, lvl, flag0, flag1, flag2;
+
+ if (!priv)
+ return IRQ_HANDLED;
+ sta = cec_read(priv, REG_CEC_INTSTATUS);
+ cec = cec_read(priv, REG_CEC_RXSHPDINT);
+ lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
+ flag0 = reg_read(priv, REG_INT_FLAGS_0);
+ flag1 = reg_read(priv, REG_INT_FLAGS_1);
+ flag2 = reg_read(priv, REG_INT_FLAGS_2);
+ DRM_DEBUG_DRIVER(
+ "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
+ sta, cec, lvl, flag0, flag1, flag2);
+ if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
+ priv->wq_edid_wait = 0;
+ wake_up(&priv->wq_edid);
+ } else if (cec != 0) { /* HPD change */
+ if (priv->encoder && priv->encoder->dev)
+ drm_helper_hpd_irq_event(priv->encoder->dev);
+ }
+ return IRQ_HANDLED;
}
static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
@@ -513,91 +579,88 @@ static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
#define PB(x) (HB(2) + 1 + (x))
static void
-tda998x_write_if(struct drm_encoder *encoder, uint8_t bit, uint16_t addr,
+tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
uint8_t *buf, size_t size)
{
buf[PB(0)] = tda998x_cksum(buf, size);
- reg_clear(encoder, REG_DIP_IF_FLAGS, bit);
- reg_write_range(encoder, addr, buf, size);
- reg_set(encoder, REG_DIP_IF_FLAGS, bit);
+ reg_clear(priv, REG_DIP_IF_FLAGS, bit);
+ reg_write_range(priv, addr, buf, size);
+ reg_set(priv, REG_DIP_IF_FLAGS, bit);
}
static void
-tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
+tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
{
- uint8_t buf[PB(5) + 1];
+ u8 buf[PB(HDMI_AUDIO_INFOFRAME_SIZE) + 1];
memset(buf, 0, sizeof(buf));
- buf[HB(0)] = 0x84;
+ buf[HB(0)] = HDMI_INFOFRAME_TYPE_AUDIO;
buf[HB(1)] = 0x01;
- buf[HB(2)] = 10;
+ buf[HB(2)] = HDMI_AUDIO_INFOFRAME_SIZE;
buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
buf[PB(4)] = p->audio_frame[4];
buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
- tda998x_write_if(encoder, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
+ tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
sizeof(buf));
}
static void
-tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
+tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
{
- uint8_t buf[PB(13) + 1];
+ u8 buf[PB(HDMI_AVI_INFOFRAME_SIZE) + 1];
memset(buf, 0, sizeof(buf));
- buf[HB(0)] = 0x82;
+ buf[HB(0)] = HDMI_INFOFRAME_TYPE_AVI;
buf[HB(1)] = 0x02;
- buf[HB(2)] = 13;
+ buf[HB(2)] = HDMI_AVI_INFOFRAME_SIZE;
buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
+ buf[PB(2)] = HDMI_ACTIVE_ASPECT_PICTURE;
buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
buf[PB(4)] = drm_match_cea_mode(mode);
- tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
+ tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
sizeof(buf));
}
-static void tda998x_audio_mute(struct drm_encoder *encoder, bool on)
+static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
{
if (on) {
- reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
- reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
- reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+ reg_set(priv, REG_SOFTRESET, SOFTRESET_AUDIO);
+ reg_clear(priv, REG_SOFTRESET, SOFTRESET_AUDIO);
+ reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
} else {
- reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+ reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
}
}
static void
-tda998x_configure_audio(struct drm_encoder *encoder,
+tda998x_configure_audio(struct tda998x_priv *priv,
struct drm_display_mode *mode, struct tda998x_encoder_params *p)
{
- uint8_t buf[6], clksel_aip, clksel_fs, ca_i2s, cts_n, adiv;
+ uint8_t buf[6], clksel_aip, clksel_fs, cts_n, adiv;
uint32_t n;
/* Enable audio ports */
- reg_write(encoder, REG_ENA_AP, p->audio_cfg);
- reg_write(encoder, REG_ENA_ACLK, p->audio_clk_cfg);
+ reg_write(priv, REG_ENA_AP, p->audio_cfg);
+ reg_write(priv, REG_ENA_ACLK, p->audio_clk_cfg);
/* Set audio input source */
switch (p->audio_format) {
case AFMT_SPDIF:
- reg_write(encoder, REG_MUX_AP, 0x40);
- clksel_aip = AIP_CLKSEL_AIP(0);
- /* FS64SPDIF */
- clksel_fs = AIP_CLKSEL_FS(2);
+ reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_SPDIF);
+ clksel_aip = AIP_CLKSEL_AIP_SPDIF;
+ clksel_fs = AIP_CLKSEL_FS_FS64SPDIF;
cts_n = CTS_N_M(3) | CTS_N_K(3);
- ca_i2s = 0;
break;
case AFMT_I2S:
- reg_write(encoder, REG_MUX_AP, 0x64);
- clksel_aip = AIP_CLKSEL_AIP(1);
- /* ACLK */
- clksel_fs = AIP_CLKSEL_FS(0);
+ reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_I2S);
+ clksel_aip = AIP_CLKSEL_AIP_I2S;
+ clksel_fs = AIP_CLKSEL_FS_ACLK;
cts_n = CTS_N_M(3) | CTS_N_K(3);
- ca_i2s = CA_I2S_CA_I2S(0);
break;
default:
@@ -605,12 +668,10 @@ tda998x_configure_audio(struct drm_encoder *encoder,
return;
}
- reg_write(encoder, REG_AIP_CLKSEL, clksel_aip);
- reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT);
-
- /* Enable automatic CTS generation */
- reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_ACR_MAN);
- reg_write(encoder, REG_CTS_N, cts_n);
+ reg_write(priv, REG_AIP_CLKSEL, clksel_aip);
+ reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT |
+ AIP_CNTRL_0_ACR_MAN); /* auto CTS */
+ reg_write(priv, REG_CTS_N, cts_n);
/*
* Audio input somehow depends on HDMI line rate which is
@@ -619,11 +680,15 @@ tda998x_configure_audio(struct drm_encoder *encoder,
* There is no detailed info in the datasheet, so we just
* assume 100MHz requires larger divider.
*/
+ adiv = AUDIO_DIV_SERCLK_8;
if (mode->clock > 100000)
- adiv = AUDIO_DIV_SERCLK_16;
- else
- adiv = AUDIO_DIV_SERCLK_8;
- reg_write(encoder, REG_AUDIO_DIV, adiv);
+ adiv++; /* AUDIO_DIV_SERCLK_16 */
+
+ /* S/PDIF asks for a larger divider */
+ if (p->audio_format == AFMT_SPDIF)
+ adiv++; /* AUDIO_DIV_SERCLK_16 or _32 */
+
+ reg_write(priv, REG_AUDIO_DIV, adiv);
/*
* This is the approximate value of N, which happens to be
@@ -638,28 +703,29 @@ tda998x_configure_audio(struct drm_encoder *encoder,
buf[3] = n;
buf[4] = n >> 8;
buf[5] = n >> 16;
- reg_write_range(encoder, REG_ACR_CTS_0, buf, 6);
+ reg_write_range(priv, REG_ACR_CTS_0, buf, 6);
/* Set CTS clock reference */
- reg_write(encoder, REG_AIP_CLKSEL, clksel_aip | clksel_fs);
+ reg_write(priv, REG_AIP_CLKSEL, clksel_aip | clksel_fs);
/* Reset CTS generator */
- reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
- reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+ reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+ reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
/* Write the channel status */
- buf[0] = 0x04;
+ buf[0] = IEC958_AES0_CON_NOT_COPYRIGHT;
buf[1] = 0x00;
- buf[2] = 0x00;
- buf[3] = 0xf1;
- reg_write_range(encoder, REG_CH_STAT_B(0), buf, 4);
+ buf[2] = IEC958_AES3_CON_FS_NOTID;
+ buf[3] = IEC958_AES4_CON_ORIGFS_NOTID |
+ IEC958_AES4_CON_MAX_WORDLEN_24;
+ reg_write_range(priv, REG_CH_STAT_B(0), buf, 4);
- tda998x_audio_mute(encoder, true);
- mdelay(20);
- tda998x_audio_mute(encoder, false);
+ tda998x_audio_mute(priv, true);
+ msleep(20);
+ tda998x_audio_mute(priv, false);
/* Write the audio information packet */
- tda998x_write_aif(encoder, p);
+ tda998x_write_aif(priv, p);
}
/* DRM encoder functions */
@@ -701,19 +767,19 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
/* enable video ports, audio will be enabled later */
- reg_write(encoder, REG_ENA_VP_0, 0xff);
- reg_write(encoder, REG_ENA_VP_1, 0xff);
- reg_write(encoder, REG_ENA_VP_2, 0xff);
+ reg_write(priv, REG_ENA_VP_0, 0xff);
+ reg_write(priv, REG_ENA_VP_1, 0xff);
+ reg_write(priv, REG_ENA_VP_2, 0xff);
/* set muxing after enabling ports: */
- reg_write(encoder, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
- reg_write(encoder, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
- reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
+ reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
+ reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
+ reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
break;
case DRM_MODE_DPMS_OFF:
/* disable video ports */
- reg_write(encoder, REG_ENA_VP_0, 0x00);
- reg_write(encoder, REG_ENA_VP_1, 0x00);
- reg_write(encoder, REG_ENA_VP_2, 0x00);
+ reg_write(priv, REG_ENA_VP_0, 0x00);
+ reg_write(priv, REG_ENA_VP_1, 0x00);
+ reg_write(priv, REG_ENA_VP_2, 0x00);
break;
}
@@ -831,110 +897,110 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
}
/* mute the audio FIFO: */
- reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+ reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
/* set HDMI HDCP mode off: */
- reg_set(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
- reg_clear(encoder, REG_TX33, TX33_HDMI);
+ reg_write(priv, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
+ reg_clear(priv, REG_TX33, TX33_HDMI);
+ reg_write(priv, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0));
- reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0));
/* no pre-filter or interpolator: */
- reg_write(encoder, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
+ reg_write(priv, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
HVF_CNTRL_0_INTPOL(0));
- reg_write(encoder, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
- reg_write(encoder, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
+ reg_write(priv, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
+ reg_write(priv, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
VIP_CNTRL_4_BLC(0));
- reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR);
- reg_clear(encoder, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ);
- reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_DE);
- reg_write(encoder, REG_SERIALIZER, 0);
- reg_write(encoder, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0));
+ reg_clear(priv, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ);
+ reg_clear(priv, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR |
+ PLL_SERIAL_3_SRL_DE);
+ reg_write(priv, REG_SERIALIZER, 0);
+ reg_write(priv, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0));
/* TODO enable pixel repeat for pixel rates less than 25Msamp/s */
rep = 0;
- reg_write(encoder, REG_RPT_CNTRL, 0);
- reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) |
+ reg_write(priv, REG_RPT_CNTRL, 0);
+ reg_write(priv, REG_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) |
SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
- reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
+ reg_write(priv, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
PLL_SERIAL_2_SRL_PR(rep));
/* set color matrix bypass flag: */
- reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
+ reg_write(priv, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP |
+ MAT_CONTRL_MAT_SC(1));
/* set BIAS tmds value: */
- reg_write(encoder, REG_ANA_GENERAL, 0x09);
-
- reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
+ reg_write(priv, REG_ANA_GENERAL, 0x09);
/*
* Sync on rising HSYNC/VSYNC
*/
- reg_write(encoder, REG_VIP_CNTRL_3, 0);
- reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
+ reg = VIP_CNTRL_3_SYNC_HS;
/*
* TDA19988 requires high-active sync at input stage,
* so invert low-active sync provided by master encoder here
*/
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
+ reg |= VIP_CNTRL_3_H_TGL;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
+ reg |= VIP_CNTRL_3_V_TGL;
+ reg_write(priv, REG_VIP_CNTRL_3, reg);
+
+ reg_write(priv, REG_VIDFORMAT, 0x00);
+ reg_write16(priv, REG_REFPIX_MSB, ref_pix);
+ reg_write16(priv, REG_REFLINE_MSB, ref_line);
+ reg_write16(priv, REG_NPIX_MSB, n_pix);
+ reg_write16(priv, REG_NLINE_MSB, n_line);
+ reg_write16(priv, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
+ reg_write16(priv, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
+ reg_write16(priv, REG_VS_LINE_END_1_MSB, vs1_line_e);
+ reg_write16(priv, REG_VS_PIX_END_1_MSB, vs1_pix_e);
+ reg_write16(priv, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
+ reg_write16(priv, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
+ reg_write16(priv, REG_VS_LINE_END_2_MSB, vs2_line_e);
+ reg_write16(priv, REG_VS_PIX_END_2_MSB, vs2_pix_e);
+ reg_write16(priv, REG_HS_PIX_START_MSB, hs_pix_s);
+ reg_write16(priv, REG_HS_PIX_STOP_MSB, hs_pix_e);
+ reg_write16(priv, REG_VWIN_START_1_MSB, vwin1_line_s);
+ reg_write16(priv, REG_VWIN_END_1_MSB, vwin1_line_e);
+ reg_write16(priv, REG_VWIN_START_2_MSB, vwin2_line_s);
+ reg_write16(priv, REG_VWIN_END_2_MSB, vwin2_line_e);
+ reg_write16(priv, REG_DE_START_MSB, de_pix_s);
+ reg_write16(priv, REG_DE_STOP_MSB, de_pix_e);
+
+ if (priv->rev == TDA19988) {
+ /* let incoming pixels fill the active space (if any) */
+ reg_write(priv, REG_ENABLE_SPACE, 0x00);
+ }
/*
* Always generate sync polarity relative to input sync and
* revert input stage toggled sync at output stage
*/
- reg = TBG_CNTRL_1_TGL_EN;
+ reg = TBG_CNTRL_1_DWIN_DIS | TBG_CNTRL_1_TGL_EN;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
reg |= TBG_CNTRL_1_H_TGL;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
reg |= TBG_CNTRL_1_V_TGL;
- reg_write(encoder, REG_TBG_CNTRL_1, reg);
-
- reg_write(encoder, REG_VIDFORMAT, 0x00);
- reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
- reg_write16(encoder, REG_REFLINE_MSB, ref_line);
- reg_write16(encoder, REG_NPIX_MSB, n_pix);
- reg_write16(encoder, REG_NLINE_MSB, n_line);
- reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
- reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
- reg_write16(encoder, REG_VS_LINE_END_1_MSB, vs1_line_e);
- reg_write16(encoder, REG_VS_PIX_END_1_MSB, vs1_pix_e);
- reg_write16(encoder, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
- reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
- reg_write16(encoder, REG_VS_LINE_END_2_MSB, vs2_line_e);
- reg_write16(encoder, REG_VS_PIX_END_2_MSB, vs2_pix_e);
- reg_write16(encoder, REG_HS_PIX_START_MSB, hs_pix_s);
- reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_pix_e);
- reg_write16(encoder, REG_VWIN_START_1_MSB, vwin1_line_s);
- reg_write16(encoder, REG_VWIN_END_1_MSB, vwin1_line_e);
- reg_write16(encoder, REG_VWIN_START_2_MSB, vwin2_line_s);
- reg_write16(encoder, REG_VWIN_END_2_MSB, vwin2_line_e);
- reg_write16(encoder, REG_DE_START_MSB, de_pix_s);
- reg_write16(encoder, REG_DE_STOP_MSB, de_pix_e);
-
- if (priv->rev == TDA19988) {
- /* let incoming pixels fill the active space (if any) */
- reg_write(encoder, REG_ENABLE_SPACE, 0x00);
- }
+ reg_write(priv, REG_TBG_CNTRL_1, reg);
/* must be last register set: */
- reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
+ reg_write(priv, REG_TBG_CNTRL_0, 0);
/* Only setup the info frames if the sink is HDMI */
if (priv->is_hdmi_sink) {
/* We need to turn HDMI HDCP stuff on to get audio through */
- reg_clear(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
- reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
- reg_set(encoder, REG_TX33, TX33_HDMI);
+ reg &= ~TBG_CNTRL_1_DWIN_DIS;
+ reg_write(priv, REG_TBG_CNTRL_1, reg);
+ reg_write(priv, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
+ reg_set(priv, REG_TX33, TX33_HDMI);
- tda998x_write_avi(encoder, adjusted_mode);
+ tda998x_write_avi(priv, adjusted_mode);
if (priv->params.audio_cfg)
- tda998x_configure_audio(encoder, adjusted_mode,
+ tda998x_configure_audio(priv, adjusted_mode,
&priv->params);
}
}
@@ -943,7 +1009,9 @@ static enum drm_connector_status
tda998x_encoder_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
- uint8_t val = cec_read(encoder, REG_CEC_RXSHPDLEV);
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ uint8_t val = cec_read(priv, REG_CEC_RXSHPDLEV);
+
return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
connector_status_disconnected;
}
@@ -951,46 +1019,57 @@ tda998x_encoder_detect(struct drm_encoder *encoder,
static int
read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
uint8_t offset, segptr;
int ret, i;
- /* enable EDID read irq: */
- reg_set(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
-
offset = (blk & 1) ? 128 : 0;
segptr = blk / 2;
- reg_write(encoder, REG_DDC_ADDR, 0xa0);
- reg_write(encoder, REG_DDC_OFFS, offset);
- reg_write(encoder, REG_DDC_SEGM_ADDR, 0x60);
- reg_write(encoder, REG_DDC_SEGM, segptr);
+ reg_write(priv, REG_DDC_ADDR, 0xa0);
+ reg_write(priv, REG_DDC_OFFS, offset);
+ reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
+ reg_write(priv, REG_DDC_SEGM, segptr);
/* enable reading EDID: */
- reg_write(encoder, REG_EDID_CTRL, 0x1);
+ priv->wq_edid_wait = 1;
+ reg_write(priv, REG_EDID_CTRL, 0x1);
/* flag must be cleared by sw: */
- reg_write(encoder, REG_EDID_CTRL, 0x0);
+ reg_write(priv, REG_EDID_CTRL, 0x0);
/* wait for block read to complete: */
- for (i = 100; i > 0; i--) {
- uint8_t val = reg_read(encoder, REG_INT_FLAGS_2);
- if (val & INT_FLAGS_2_EDID_BLK_RD)
- break;
- msleep(1);
+ if (priv->hdmi->irq) {
+ i = wait_event_timeout(priv->wq_edid,
+ !priv->wq_edid_wait,
+ msecs_to_jiffies(100));
+ if (i < 0) {
+ dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
+ return i;
+ }
+ } else {
+ for (i = 10; i > 0; i--) {
+ msleep(10);
+ ret = reg_read(priv, REG_INT_FLAGS_2);
+ if (ret < 0)
+ return ret;
+ if (ret & INT_FLAGS_2_EDID_BLK_RD)
+ break;
+ }
}
- if (i == 0)
+ if (i == 0) {
+ dev_err(&priv->hdmi->dev, "read edid timeout\n");
return -ETIMEDOUT;
+ }
- ret = reg_read_range(encoder, REG_EDID_DATA_0, buf, EDID_LENGTH);
+ ret = reg_read_range(priv, REG_EDID_DATA_0, buf, EDID_LENGTH);
if (ret != EDID_LENGTH) {
- dev_err(encoder->dev->dev, "failed to read edid block %d: %d",
- blk, ret);
+ dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
+ blk, ret);
return ret;
}
- reg_clear(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
-
return 0;
}
@@ -998,7 +1077,7 @@ static uint8_t *
do_get_edid(struct drm_encoder *encoder)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
- int j = 0, valid_extensions = 0;
+ int j, valid_extensions = 0;
uint8_t *block, *new;
bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -1006,7 +1085,7 @@ do_get_edid(struct drm_encoder *encoder)
return NULL;
if (priv->rev == TDA19988)
- reg_clear(encoder, REG_TX4, TX4_PD_RAM);
+ reg_clear(priv, REG_TX4, TX4_PD_RAM);
/* base block fetch */
if (read_edid_block(encoder, block, 0))
@@ -1046,14 +1125,14 @@ do_get_edid(struct drm_encoder *encoder)
done:
if (priv->rev == TDA19988)
- reg_set(encoder, REG_TX4, TX4_PD_RAM);
+ reg_set(priv, REG_TX4, TX4_PD_RAM);
return block;
fail:
if (priv->rev == TDA19988)
- reg_set(encoder, REG_TX4, TX4_PD_RAM);
- dev_warn(encoder->dev->dev, "failed to read EDID\n");
+ reg_set(priv, REG_TX4, TX4_PD_RAM);
+ dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
kfree(block);
return NULL;
}
@@ -1080,7 +1159,13 @@ static int
tda998x_encoder_create_resources(struct drm_encoder *encoder,
struct drm_connector *connector)
{
- DBG("");
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ if (priv->hdmi->irq)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
}
@@ -1099,6 +1184,13 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
drm_i2c_encoder_destroy(encoder);
+
+ /* disable all IRQs and free the IRQ handler */
+ cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
+ reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+ if (priv->hdmi->irq)
+ free_irq(priv->hdmi->irq, priv);
+
if (priv->cec)
i2c_unregister_device(priv->cec);
kfree(priv);
@@ -1138,8 +1230,10 @@ tda998x_encoder_init(struct i2c_client *client,
struct drm_device *dev,
struct drm_encoder_slave *encoder_slave)
{
- struct drm_encoder *encoder = &encoder_slave->base;
struct tda998x_priv *priv;
+ struct device_node *np = client->dev.of_node;
+ u32 video;
+ int rev_lo, rev_hi, ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1150,52 +1244,113 @@ tda998x_encoder_init(struct i2c_client *client,
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
priv->current_page = 0xff;
+ priv->hdmi = client;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
if (!priv->cec) {
kfree(priv);
return -ENODEV;
}
+
+ priv->encoder = &encoder_slave->base;
priv->dpms = DRM_MODE_DPMS_OFF;
encoder_slave->slave_priv = priv;
encoder_slave->slave_funcs = &tda998x_encoder_funcs;
/* wake up the device: */
- cec_write(encoder, REG_CEC_ENAMODS,
+ cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
- tda998x_reset(encoder);
+ tda998x_reset(priv);
/* read version: */
- priv->rev = reg_read(encoder, REG_VERSION_LSB) |
- reg_read(encoder, REG_VERSION_MSB) << 8;
+ rev_lo = reg_read(priv, REG_VERSION_LSB);
+ rev_hi = reg_read(priv, REG_VERSION_MSB);
+ if (rev_lo < 0 || rev_hi < 0) {
+ ret = rev_lo < 0 ? rev_lo : rev_hi;
+ goto fail;
+ }
+
+ priv->rev = rev_lo | rev_hi << 8;
/* mask off feature bits: */
priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */
switch (priv->rev) {
- case TDA9989N2: dev_info(dev->dev, "found TDA9989 n2"); break;
- case TDA19989: dev_info(dev->dev, "found TDA19989"); break;
- case TDA19989N2: dev_info(dev->dev, "found TDA19989 n2"); break;
- case TDA19988: dev_info(dev->dev, "found TDA19988"); break;
+ case TDA9989N2:
+ dev_info(&client->dev, "found TDA9989 n2");
+ break;
+ case TDA19989:
+ dev_info(&client->dev, "found TDA19989");
+ break;
+ case TDA19989N2:
+ dev_info(&client->dev, "found TDA19989 n2");
+ break;
+ case TDA19988:
+ dev_info(&client->dev, "found TDA19988");
+ break;
default:
- DBG("found unsupported device: %04x", priv->rev);
+ dev_err(&client->dev, "found unsupported device: %04x\n",
+ priv->rev);
goto fail;
}
/* after reset, enable DDC: */
- reg_write(encoder, REG_DDC_DISABLE, 0x00);
+ reg_write(priv, REG_DDC_DISABLE, 0x00);
/* set clock on DDC channel: */
- reg_write(encoder, REG_TX3, 39);
+ reg_write(priv, REG_TX3, 39);
/* if necessary, disable multi-master: */
if (priv->rev == TDA19989)
- reg_set(encoder, REG_I2C_MASTER, I2C_MASTER_DIS_MM);
+ reg_set(priv, REG_I2C_MASTER, I2C_MASTER_DIS_MM);
- cec_write(encoder, REG_CEC_FRO_IM_CLK_CTRL,
+ cec_write(priv, REG_CEC_FRO_IM_CLK_CTRL,
CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL);
+ /* initialize the optional IRQ */
+ if (client->irq) {
+ int irqf_trigger;
+
+ /* init read EDID waitqueue */
+ init_waitqueue_head(&priv->wq_edid);
+
+ /* clear pending interrupts */
+ reg_read(priv, REG_INT_FLAGS_0);
+ reg_read(priv, REG_INT_FLAGS_1);
+ reg_read(priv, REG_INT_FLAGS_2);
+
+ irqf_trigger =
+ irqd_get_trigger_type(irq_get_irq_data(client->irq));
+ ret = request_threaded_irq(client->irq, NULL,
+ tda998x_irq_thread,
+ irqf_trigger | IRQF_ONESHOT,
+ "tda998x", priv);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to request IRQ#%u: %d\n",
+ client->irq, ret);
+ goto fail;
+ }
+
+ /* enable HPD irq */
+ cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
+ }
+
+ /* enable EDID read irq: */
+ reg_set(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+ if (!np)
+ return 0; /* non-DT */
+
+ /* get the optional video properties */
+ ret = of_property_read_u32(np, "video-ports", &video);
+ if (ret == 0) {
+ priv->vip_cntrl_0 = video >> 16;
+ priv->vip_cntrl_1 = video >> 8;
+ priv->vip_cntrl_2 = video;
+ }
+
return 0;
fail:
@@ -1210,6 +1365,14 @@ fail:
return -ENXIO;
}
+#ifdef CONFIG_OF
+static const struct of_device_id tda998x_dt_ids[] = {
+ { .compatible = "nxp,tda998x", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tda998x_dt_ids);
+#endif
+
static struct i2c_device_id tda998x_ids[] = {
{ "tda998x", 0 },
{ }
@@ -1222,6 +1385,7 @@ static struct drm_i2c_encoder_driver tda998x_driver = {
.remove = tda998x_remove,
.driver = {
.name = "tda998x",
+ .of_match_table = of_match_ptr(tda998x_dt_ids),
},
.id_table = tda998x_ids,
},
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 73ed59eff139..bea2d67196fb 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -14,7 +14,6 @@ config DRM_I915
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
- select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9fd44f5f3b3b..b1445b73465b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,57 +3,69 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
-i915-y := i915_drv.o i915_dma.o i915_irq.o \
- i915_gpu_error.o \
+
+# Please keep these build lists sorted!
+
+# core driver code
+i915-y := i915_drv.o \
+ i915_params.o \
i915_suspend.o \
- i915_gem.o \
+ i915_sysfs.o \
+ intel_pm.o
+i915-$(CONFIG_COMPAT) += i915_ioc32.o
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+
+# GEM code
+i915-y += i915_cmd_parser.o \
i915_gem_context.o \
i915_gem_debug.o \
+ i915_gem_dmabuf.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
+ i915_gem.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
- i915_sysfs.o \
+ i915_gpu_error.o \
+ i915_irq.o \
i915_trace_points.o \
- i915_ums.o \
+ intel_ringbuffer.o \
+ intel_uncore.o
+
+# modesetting core code
+i915-y += intel_bios.o \
intel_display.o \
- intel_crt.o \
- intel_lvds.o \
- intel_dsi.o \
- intel_dsi_cmd.o \
- intel_dsi_pll.o \
- intel_bios.o \
- intel_ddi.o \
- intel_dp.o \
- intel_hdmi.o \
- intel_sdvo.o \
intel_modes.o \
- intel_panel.o \
- intel_pm.o \
- intel_i2c.o \
- intel_tv.o \
- intel_dvo.o \
- intel_ringbuffer.o \
intel_overlay.o \
- intel_sprite.o \
intel_sideband.o \
- intel_uncore.o \
+ intel_sprite.o
+i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
+i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+
+# modesetting output/encoder code
+i915-y += dvo_ch7017.o \
dvo_ch7xxx.o \
- dvo_ch7017.o \
dvo_ivch.o \
- dvo_tfp410.o \
- dvo_sil164.o \
dvo_ns2501.o \
- i915_gem_dmabuf.o
-
-i915-$(CONFIG_COMPAT) += i915_ioc32.o
-
-i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
-
-i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+ dvo_sil164.o \
+ dvo_tfp410.o \
+ intel_crt.o \
+ intel_ddi.o \
+ intel_dp.o \
+ intel_dsi_cmd.o \
+ intel_dsi.o \
+ intel_dsi_pll.o \
+ intel_dvo.o \
+ intel_hdmi.o \
+ intel_i2c.o \
+ intel_lvds.o \
+ intel_panel.o \
+ intel_sdvo.o \
+ intel_tv.o
-i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+# legacy horrors
+i915-y += i915_dma.o \
+ i915_ums.o
obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index af42e94f6846..a0f5bdd69491 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -340,9 +340,9 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
uint8_t val;
if ((i % 8) == 0)
- DRM_LOG_KMS("\n %02X: ", i);
+ DRM_DEBUG_KMS("\n %02X: ", i);
ch7xxx_readb(dvo, i, &val);
- DRM_LOG_KMS("%02X ", val);
+ DRM_DEBUG_KMS("%02X ", val);
}
}
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index baaf65bf0bdd..0f1865d7d4d8 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -377,41 +377,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
uint16_t val;
ivch_read(dvo, VR00, &val);
- DRM_LOG_KMS("VR00: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
ivch_read(dvo, VR01, &val);
- DRM_LOG_KMS("VR01: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR01: 0x%04x\n", val);
ivch_read(dvo, VR30, &val);
- DRM_LOG_KMS("VR30: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR30: 0x%04x\n", val);
ivch_read(dvo, VR40, &val);
- DRM_LOG_KMS("VR40: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR40: 0x%04x\n", val);
/* GPIO registers */
ivch_read(dvo, VR80, &val);
- DRM_LOG_KMS("VR80: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR80: 0x%04x\n", val);
ivch_read(dvo, VR81, &val);
- DRM_LOG_KMS("VR81: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR81: 0x%04x\n", val);
ivch_read(dvo, VR82, &val);
- DRM_LOG_KMS("VR82: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR82: 0x%04x\n", val);
ivch_read(dvo, VR83, &val);
- DRM_LOG_KMS("VR83: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR83: 0x%04x\n", val);
ivch_read(dvo, VR84, &val);
- DRM_LOG_KMS("VR84: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR84: 0x%04x\n", val);
ivch_read(dvo, VR85, &val);
- DRM_LOG_KMS("VR85: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR85: 0x%04x\n", val);
ivch_read(dvo, VR86, &val);
- DRM_LOG_KMS("VR86: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR86: 0x%04x\n", val);
ivch_read(dvo, VR87, &val);
- DRM_LOG_KMS("VR87: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR87: 0x%04x\n", val);
ivch_read(dvo, VR88, &val);
- DRM_LOG_KMS("VR88: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR88: 0x%04x\n", val);
/* Scratch register 0 - AIM Panel type */
ivch_read(dvo, VR8E, &val);
- DRM_LOG_KMS("VR8E: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR8E: 0x%04x\n", val);
/* Scratch register 1 - Status register */
ivch_read(dvo, VR8F, &val);
- DRM_LOG_KMS("VR8F: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR8F: 0x%04x\n", val);
}
static void ivch_destroy(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 954acb2c7021..8155ded79079 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -490,15 +490,15 @@ static void ns2501_dump_regs(struct intel_dvo_device *dvo)
uint8_t val;
ns2501_readb(dvo, NS2501_FREQ_LO, &val);
- DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
+ DRM_DEBUG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_FREQ_HI, &val);
- DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
+ DRM_DEBUG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REG8, &val);
- DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
+ DRM_DEBUG_KMS("NS2501_REG8: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REG9, &val);
- DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
+ DRM_DEBUG_KMS("NS2501_REG9: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REGC, &val);
- DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
+ DRM_DEBUG_KMS("NS2501_REGC: 0x%02x\n", val);
}
static void ns2501_destroy(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 4debd32e3e4c..7b3e9e936200 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -246,15 +246,15 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
uint8_t val;
sil164_readb(dvo, SIL164_FREQ_LO, &val);
- DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
+ DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
sil164_readb(dvo, SIL164_FREQ_HI, &val);
- DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
+ DRM_DEBUG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG8, &val);
- DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
+ DRM_DEBUG_KMS("SIL164_REG8: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG9, &val);
- DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
+ DRM_DEBUG_KMS("SIL164_REG9: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REGC, &val);
- DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
+ DRM_DEBUG_KMS("SIL164_REGC: 0x%02x\n", val);
}
static void sil164_destroy(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index e17f1b07e915..12ea4b164692 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -267,33 +267,33 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
uint8_t val, val2;
tfp410_readb(dvo, TFP410_REV, &val);
- DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_1, &val);
- DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_CTL1: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_2, &val);
- DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_CTL2: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_3, &val);
- DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_CTL3: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_USERCFG, &val);
- DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_USERCFG: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_DLY, &val);
- DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CTL, &val);
- DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_TOP, &val);
- DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
- DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+ DRM_DEBUG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
- DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+ DRM_DEBUG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_H_RES_LO, &val);
tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
- DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+ DRM_DEBUG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_V_RES_LO, &val);
tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
- DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+ DRM_DEBUG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
static void tfp410_destroy(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
new file mode 100644
index 000000000000..4cf6d020d513
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Brad Volkin <bradley.d.volkin@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+
+/**
+ * DOC: i915 batch buffer command parser
+ *
+ * Motivation:
+ * Certain OpenGL features (e.g. transform feedback, performance monitoring)
+ * require userspace code to submit batches containing commands such as
+ * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
+ * generations of the hardware will noop these commands in "unsecure" batches
+ * (which includes all userspace batches submitted via i915) even though the
+ * commands may be safe and represent the intended programming model of the
+ * device.
+ *
+ * The software command parser is similar in operation to the command parsing
+ * done in hardware for unsecure batches. However, the software parser allows
+ * some operations that would be noop'd by hardware, if the parser determines
+ * the operation is safe, and submits the batch as "secure" to prevent hardware
+ * parsing.
+ *
+ * Threats:
+ * At a high level, the hardware (and software) checks attempt to prevent
+ * granting userspace undue privileges. There are three categories of privilege.
+ *
+ * First, commands which are explicitly defined as privileged or which should
+ * only be used by the kernel driver. The parser generally rejects such
+ * commands, though it may allow some from the drm master process.
+ *
+ * Second, commands which access registers. To support correct/enhanced
+ * userspace functionality, particularly certain OpenGL extensions, the parser
+ * provides a whitelist of registers which userspace may safely access (for both
+ * normal and drm master processes).
+ *
+ * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
+ * The parser always rejects such commands.
+ *
+ * The majority of the problematic commands fall in the MI_* range, with only a
+ * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
+ *
+ * Implementation:
+ * Each ring maintains tables of commands and registers which the parser uses in
+ * scanning batch buffers submitted to that ring.
+ *
+ * Since the set of commands that the parser must check for is significantly
+ * smaller than the number of commands supported, the parser tables contain only
+ * those commands required by the parser. This generally works because command
+ * opcode ranges have standard command length encodings. So for commands that
+ * the parser does not need to check, it can easily skip them. This is
+ * implementated via a per-ring length decoding vfunc.
+ *
+ * Unfortunately, there are a number of commands that do not follow the standard
+ * length encoding for their opcode range, primarily amongst the MI_* commands.
+ * To handle this, the parser provides a way to define explicit "skip" entries
+ * in the per-ring command tables.
+ *
+ * Other command table entries map fairly directly to high level categories
+ * mentioned above: rejected, master-only, register whitelist. The parser
+ * implements a number of checks, including the privileged memory checks, via a
+ * general bitmasking mechanism.
+ */
+
+static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 subclient =
+ (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT)
+ return 0x3F;
+ else if (client == INSTR_RC_CLIENT) {
+ if (subclient == INSTR_MEDIA_SUBCLIENT)
+ return 0xFFFF;
+ else
+ return 0xFF;
+ }
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
+static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 subclient =
+ (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT)
+ return 0x3F;
+ else if (client == INSTR_RC_CLIENT) {
+ if (subclient == INSTR_MEDIA_SUBCLIENT)
+ return 0xFFF;
+ else
+ return 0xFF;
+ }
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
+static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT)
+ return 0x3F;
+ else if (client == INSTR_BC_CLIENT)
+ return 0xFF;
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
+static void validate_cmds_sorted(struct intel_ring_buffer *ring)
+{
+ int i;
+
+ if (!ring->cmd_tables || ring->cmd_table_count == 0)
+ return;
+
+ for (i = 0; i < ring->cmd_table_count; i++) {
+ const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
+ u32 previous = 0;
+ int j;
+
+ for (j = 0; j < table->count; j++) {
+ const struct drm_i915_cmd_descriptor *desc =
+ &table->table[i];
+ u32 curr = desc->cmd.value & desc->cmd.mask;
+
+ if (curr < previous)
+ DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
+ ring->id, i, j, curr, previous);
+
+ previous = curr;
+ }
+ }
+}
+
+static void check_sorted(int ring_id, const u32 *reg_table, int reg_count)
+{
+ int i;
+ u32 previous = 0;
+
+ for (i = 0; i < reg_count; i++) {
+ u32 curr = reg_table[i];
+
+ if (curr < previous)
+ DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
+ ring_id, i, curr, previous);
+
+ previous = curr;
+ }
+}
+
+static void validate_regs_sorted(struct intel_ring_buffer *ring)
+{
+ check_sorted(ring->id, ring->reg_table, ring->reg_count);
+ check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count);
+}
+
+/**
+ * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
+ * @ring: the ringbuffer to initialize
+ *
+ * Optionally initializes fields related to batch buffer command parsing in the
+ * struct intel_ring_buffer based on whether the platform requires software
+ * command parsing.
+ */
+void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
+{
+ if (!IS_GEN7(ring->dev))
+ return;
+
+ switch (ring->id) {
+ case RCS:
+ ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+ break;
+ case VCS:
+ ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ break;
+ case BCS:
+ ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+ break;
+ case VECS:
+ /* VECS can use the same length_mask function as VCS */
+ ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ break;
+ default:
+ DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
+ ring->id);
+ BUG();
+ }
+
+ validate_cmds_sorted(ring);
+ validate_regs_sorted(ring);
+}
+
+static const struct drm_i915_cmd_descriptor*
+find_cmd_in_table(const struct drm_i915_cmd_table *table,
+ u32 cmd_header)
+{
+ int i;
+
+ for (i = 0; i < table->count; i++) {
+ const struct drm_i915_cmd_descriptor *desc = &table->table[i];
+ u32 masked_cmd = desc->cmd.mask & cmd_header;
+ u32 masked_value = desc->cmd.value & desc->cmd.mask;
+
+ if (masked_cmd == masked_value)
+ return desc;
+ }
+
+ return NULL;
+}
+
+/*
+ * Returns a pointer to a descriptor for the command specified by cmd_header.
+ *
+ * The caller must supply space for a default descriptor via the default_desc
+ * parameter. If no descriptor for the specified command exists in the ring's
+ * command parser tables, this function fills in default_desc based on the
+ * ring's default length encoding and returns default_desc.
+ */
+static const struct drm_i915_cmd_descriptor*
+find_cmd(struct intel_ring_buffer *ring,
+ u32 cmd_header,
+ struct drm_i915_cmd_descriptor *default_desc)
+{
+ u32 mask;
+ int i;
+
+ for (i = 0; i < ring->cmd_table_count; i++) {
+ const struct drm_i915_cmd_descriptor *desc;
+
+ desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header);
+ if (desc)
+ return desc;
+ }
+
+ mask = ring->get_cmd_length_mask(cmd_header);
+ if (!mask)
+ return NULL;
+
+ BUG_ON(!default_desc);
+ default_desc->flags = CMD_DESC_SKIP;
+ default_desc->length.mask = mask;
+
+ return default_desc;
+}
+
+static bool valid_reg(const u32 *table, int count, u32 addr)
+{
+ if (table && count != 0) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (table[i] == addr)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static u32 *vmap_batch(struct drm_i915_gem_object *obj)
+{
+ int i;
+ void *addr = NULL;
+ struct sg_page_iter sg_iter;
+ struct page **pages;
+
+ pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+ if (pages == NULL) {
+ DRM_DEBUG_DRIVER("Failed to get space for pages\n");
+ goto finish;
+ }
+
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ pages[i] = sg_page_iter_page(&sg_iter);
+ i++;
+ }
+
+ addr = vmap(pages, i, 0, PAGE_KERNEL);
+ if (addr == NULL) {
+ DRM_DEBUG_DRIVER("Failed to vmap pages\n");
+ goto finish;
+ }
+
+finish:
+ if (pages)
+ drm_free_large(pages);
+ return (u32*)addr;
+}
+
+/**
+ * i915_needs_cmd_parser() - should a given ring use software command parsing?
+ * @ring: the ring in question
+ *
+ * Only certain platforms require software batch buffer command parsing, and
+ * only when enabled via module paramter.
+ *
+ * Return: true if the ring requires software command parsing
+ */
+bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
+{
+ /* No command tables indicates a platform without parsing */
+ if (!ring->cmd_tables)
+ return false;
+
+ return (i915.enable_cmd_parser == 1);
+}
+
+#define LENGTH_BIAS 2
+
+/**
+ * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * @ring: the ring on which the batch is to execute
+ * @batch_obj: the batch buffer in question
+ * @batch_start_offset: byte offset in the batch at which execution starts
+ * @is_master: is the submitting process the drm master?
+ *
+ * Parses the specified batch buffer looking for privilege violations as
+ * described in the overview.
+ *
+ * Return: non-zero if the parser finds violations or otherwise fails
+ */
+int i915_parse_cmds(struct intel_ring_buffer *ring,
+ struct drm_i915_gem_object *batch_obj,
+ u32 batch_start_offset,
+ bool is_master)
+{
+ int ret = 0;
+ u32 *cmd, *batch_base, *batch_end;
+ struct drm_i915_cmd_descriptor default_desc = { 0 };
+ int needs_clflush = 0;
+
+ ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+ return ret;
+ }
+
+ batch_base = vmap_batch(batch_obj);
+ if (!batch_base) {
+ DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
+ i915_gem_object_unpin_pages(batch_obj);
+ return -ENOMEM;
+ }
+
+ if (needs_clflush)
+ drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
+
+ cmd = batch_base + (batch_start_offset / sizeof(*cmd));
+ batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
+
+ while (cmd < batch_end) {
+ const struct drm_i915_cmd_descriptor *desc;
+ u32 length;
+
+ if (*cmd == MI_BATCH_BUFFER_END)
+ break;
+
+ desc = find_cmd(ring, *cmd, &default_desc);
+ if (!desc) {
+ DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
+ *cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (desc->flags & CMD_DESC_FIXED)
+ length = desc->length.fixed;
+ else
+ length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
+
+ if ((batch_end - cmd) < length) {
+ DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%td\n",
+ *cmd,
+ length,
+ (unsigned long)(batch_end - cmd));
+ ret = -EINVAL;
+ break;
+ }
+
+ if (desc->flags & CMD_DESC_REJECT) {
+ DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
+ DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
+ *cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (desc->flags & CMD_DESC_REGISTER) {
+ u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
+
+ if (!valid_reg(ring->reg_table,
+ ring->reg_count, reg_addr)) {
+ if (!is_master ||
+ !valid_reg(ring->master_reg_table,
+ ring->master_reg_count,
+ reg_addr)) {
+ DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
+ reg_addr,
+ *cmd,
+ ring->id);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
+ if (desc->flags & CMD_DESC_BITMASK) {
+ int i;
+
+ for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
+ u32 dword;
+
+ if (desc->bits[i].mask == 0)
+ break;
+
+ dword = cmd[desc->bits[i].offset] &
+ desc->bits[i].mask;
+
+ if (dword != desc->bits[i].expected) {
+ DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
+ *cmd,
+ desc->bits[i].mask,
+ desc->bits[i].expected,
+ dword, ring->id);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret)
+ break;
+ }
+
+ cmd += length;
+ }
+
+ if (cmd >= batch_end) {
+ DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+ ret = -EINVAL;
+ }
+
+ vunmap(batch_base);
+
+ i915_gem_object_unpin_pages(batch_obj);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b2b46c52294c..195fe5bc0aac 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -98,7 +98,7 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
if (obj->user_pin_count > 0)
return "P";
- else if (obj->pin_count > 0)
+ else if (i915_gem_obj_is_pinned(obj))
return "p";
else
return " ";
@@ -123,6 +123,8 @@ static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
+ int pin_count = 0;
+
seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
&obj->base,
get_pin_flag(obj),
@@ -139,8 +141,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
- if (obj->pin_count)
- seq_printf(m, " (pinned x %d)", obj->pin_count);
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->pin_count > 0)
+ pin_count++;
+ seq_printf(m, " (pinned x %d)", pin_count);
if (obj->pin_display)
seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE)
@@ -295,28 +299,62 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
} while (0)
struct file_stats {
+ struct drm_i915_file_private *file_priv;
int count;
- size_t total, active, inactive, unbound;
+ size_t total, unbound;
+ size_t global, shared;
+ size_t active, inactive;
};
static int per_file_stats(int id, void *ptr, void *data)
{
struct drm_i915_gem_object *obj = ptr;
struct file_stats *stats = data;
+ struct i915_vma *vma;
stats->count++;
stats->total += obj->base.size;
- if (i915_gem_obj_ggtt_bound(obj)) {
- if (!list_empty(&obj->ring_list))
- stats->active += obj->base.size;
- else
- stats->inactive += obj->base.size;
+ if (obj->base.name || obj->base.dma_buf)
+ stats->shared += obj->base.size;
+
+ if (USES_FULL_PPGTT(obj->base.dev)) {
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ struct i915_hw_ppgtt *ppgtt;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ if (i915_is_ggtt(vma->vm)) {
+ stats->global += obj->base.size;
+ continue;
+ }
+
+ ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
+ if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv)
+ continue;
+
+ if (obj->ring) /* XXX per-vma statistic */
+ stats->active += obj->base.size;
+ else
+ stats->inactive += obj->base.size;
+
+ return 0;
+ }
} else {
- if (!list_empty(&obj->global_list))
- stats->unbound += obj->base.size;
+ if (i915_gem_obj_ggtt_bound(obj)) {
+ stats->global += obj->base.size;
+ if (obj->ring)
+ stats->active += obj->base.size;
+ else
+ stats->inactive += obj->base.size;
+ return 0;
+ }
}
+ if (!list_empty(&obj->global_list))
+ stats->unbound += obj->base.size;
+
return 0;
}
@@ -407,6 +445,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
struct task_struct *task;
memset(&stats, 0, sizeof(stats));
+ stats.file_priv = file->driver_priv;
idr_for_each(&file->object_idr, per_file_stats, &stats);
/*
* Although we have a valid reference on file->pid, that does
@@ -416,12 +455,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
*/
rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID);
- seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
+ seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
task ? task->comm : "<unknown>",
stats.count,
stats.total,
stats.active,
stats.inactive,
+ stats.global,
+ stats.shared,
stats.unbound);
rcu_read_unlock();
}
@@ -447,7 +488,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (list == PINNED_LIST && obj->pin_count == 0)
+ if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
continue;
seq_puts(m, " ");
@@ -520,7 +561,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct drm_i915_gem_request *gem_request;
int ret, count, i;
@@ -565,7 +606,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int ret, i;
@@ -588,7 +629,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int ret, i, pipe;
@@ -598,7 +639,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
if (INTEL_INFO(dev)->gen >= 8) {
- int i;
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
@@ -611,16 +651,16 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
i, I915_READ(GEN8_GT_IER(i)));
}
- for_each_pipe(i) {
+ for_each_pipe(pipe) {
seq_printf(m, "Pipe %c IMR:\t%08x\n",
- pipe_name(i),
- I915_READ(GEN8_DE_PIPE_IMR(i)));
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IMR(pipe)));
seq_printf(m, "Pipe %c IIR:\t%08x\n",
- pipe_name(i),
- I915_READ(GEN8_DE_PIPE_IIR(i)));
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IIR(pipe)));
seq_printf(m, "Pipe %c IER:\t%08x\n",
- pipe_name(i),
- I915_READ(GEN8_DE_PIPE_IER(i)));
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IER(pipe)));
}
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -712,8 +752,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- seq_printf(m, "Interrupts received: %d\n",
- atomic_read(&dev_priv->irq_received));
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m,
@@ -732,7 +770,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -761,7 +799,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
const u32 *hws;
int i;
@@ -872,7 +910,7 @@ static int
i915_next_seqno_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -909,7 +947,7 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u16 crstanddelay;
int ret;
@@ -932,7 +970,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
intel_runtime_pm_get(dev_priv);
@@ -1025,7 +1063,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
max_freq * GT_FREQUENCY_MULTIPLIER);
seq_printf(m, "Max overclocked frequency: %dMHz\n",
- dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
+ dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
} else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts, val;
@@ -1058,7 +1096,7 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 delayfreq;
int ret, i;
@@ -1089,7 +1127,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 inttoext;
int ret, i;
@@ -1113,7 +1151,7 @@ static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
int ret;
@@ -1339,13 +1377,15 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_FBC(dev)) {
seq_puts(m, "FBC unsupported on this chipset\n");
return 0;
}
+ intel_runtime_pm_get(dev_priv);
+
if (intel_fbc_enabled(dev)) {
seq_puts(m, "FBC enabled\n");
} else {
@@ -1389,6 +1429,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
}
seq_putc(m, '\n');
}
+
+ intel_runtime_pm_put(dev_priv);
+
return 0;
}
@@ -1403,11 +1446,15 @@ static int i915_ips_status(struct seq_file *m, void *unused)
return 0;
}
+ intel_runtime_pm_get(dev_priv);
+
if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
seq_puts(m, "enabled\n");
else
seq_puts(m, "disabled\n");
+ intel_runtime_pm_put(dev_priv);
+
return 0;
}
@@ -1415,9 +1462,11 @@ static int i915_sr_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
bool sr_enabled = false;
+ intel_runtime_pm_get(dev_priv);
+
if (HAS_PCH_SPLIT(dev))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
@@ -1427,6 +1476,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (IS_PINEVIEW(dev))
sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+ intel_runtime_pm_put(dev_priv);
+
seq_printf(m, "self-refresh: %s\n",
sr_enabled ? "enabled" : "disabled");
@@ -1437,7 +1488,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long temp, chipset, gfx;
int ret;
@@ -1465,8 +1516,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
int gpu_freq, ia_freq;
if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
@@ -1474,17 +1525,18 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
+ intel_runtime_pm_get(dev_priv);
+
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
+ goto out;
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
- for (gpu_freq = dev_priv->rps.min_delay;
- gpu_freq <= dev_priv->rps.max_delay;
+ for (gpu_freq = dev_priv->rps.min_freq_softlimit;
+ gpu_freq <= dev_priv->rps.max_freq_softlimit;
gpu_freq++) {
ia_freq = gpu_freq;
sandybridge_pcode_read(dev_priv,
@@ -1496,17 +1548,18 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
((ia_freq >> 8) & 0xff) * 100);
}
- intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->rps.hw_lock);
- return 0;
+out:
+ intel_runtime_pm_put(dev_priv);
+ return ret;
}
static int i915_gfxec(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1526,7 +1579,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
int ret;
@@ -1600,7 +1653,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct i915_hw_context *ctx;
int ret, i;
@@ -1733,6 +1786,17 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
return 0;
}
+static int per_file_ctx(int id, void *ptr, void *data)
+{
+ struct i915_hw_context *ctx = ptr;
+ struct seq_file *m = data;
+ struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
+
+ ppgtt->debug_dump(ppgtt, m);
+
+ return 0;
+}
+
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1744,7 +1808,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
return;
seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
- seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages);
+ seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
for_each_ring(ring, dev_priv, unused) {
seq_printf(m, "%s\n", ring->name);
for (i = 0; i < 4; i++) {
@@ -1762,6 +1826,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
+ struct drm_file *file;
int i;
if (INTEL_INFO(dev)->gen == 6)
@@ -1780,6 +1845,20 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
seq_puts(m, "aliasing PPGTT:\n");
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+
+ ppgtt->debug_dump(ppgtt, m);
+ } else
+ return;
+
+ list_for_each_entry_reverse(file, &dev->filelist, lhead) {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_hw_ppgtt *pvt_ppgtt;
+
+ pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx);
+ seq_printf(m, "proc: %s\n",
+ get_pid_task(file->pid, PIDTYPE_PID)->comm);
+ seq_puts(m, " default context:\n");
+ idr_for_each(&file_priv->context_idr, per_file_ctx, m);
}
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
}
@@ -1892,6 +1971,47 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
return 0;
}
+static int i915_sink_crc(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct intel_dp *intel_dp = NULL;
+ int ret;
+ u8 crc[6];
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+
+ if (connector->base.dpms != DRM_MODE_DPMS_ON)
+ continue;
+
+ if (!connector->base.encoder)
+ continue;
+
+ encoder = to_intel_encoder(connector->base.encoder);
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ ret = intel_dp_sink_crc(intel_dp, crc);
+ if (ret)
+ goto out;
+
+ seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
+ crc[0], crc[1], crc[2],
+ crc[3], crc[4], crc[5]);
+ goto out;
+ }
+ ret = -ENODEV;
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
@@ -1903,12 +2023,16 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
+ intel_runtime_pm_get(dev_priv);
+
rdmsrl(MSR_RAPL_POWER_UNIT, power);
power = (power & 0x1f00) >> 8;
units = 1000000 / (1 << power); /* convert to uJ */
power = I915_READ(MCH_SECP_NRG_STTS);
power *= units;
+ intel_runtime_pm_put(dev_priv);
+
seq_printf(m, "%llu", (long long unsigned)power);
return 0;
@@ -1925,15 +2049,9 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
return 0;
}
- mutex_lock(&dev_priv->pc8.lock);
- seq_printf(m, "Requirements met: %s\n",
- yesno(dev_priv->pc8.requirements_met));
- seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
- seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
+ seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "IRQs disabled: %s\n",
- yesno(dev_priv->pc8.irqs_disabled));
- seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled));
- mutex_unlock(&dev_priv->pc8.lock);
+ yesno(dev_priv->pm.irqs_disabled));
return 0;
}
@@ -1961,6 +2079,28 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
+ case POWER_DOMAIN_PORT_DDI_A_2_LANES:
+ return "PORT_DDI_A_2_LANES";
+ case POWER_DOMAIN_PORT_DDI_A_4_LANES:
+ return "PORT_DDI_A_4_LANES";
+ case POWER_DOMAIN_PORT_DDI_B_2_LANES:
+ return "PORT_DDI_B_2_LANES";
+ case POWER_DOMAIN_PORT_DDI_B_4_LANES:
+ return "PORT_DDI_B_4_LANES";
+ case POWER_DOMAIN_PORT_DDI_C_2_LANES:
+ return "PORT_DDI_C_2_LANES";
+ case POWER_DOMAIN_PORT_DDI_C_4_LANES:
+ return "PORT_DDI_C_4_LANES";
+ case POWER_DOMAIN_PORT_DDI_D_2_LANES:
+ return "PORT_DDI_D_2_LANES";
+ case POWER_DOMAIN_PORT_DDI_D_4_LANES:
+ return "PORT_DDI_D_4_LANES";
+ case POWER_DOMAIN_PORT_DSI:
+ return "PORT_DSI";
+ case POWER_DOMAIN_PORT_CRT:
+ return "PORT_CRT";
+ case POWER_DOMAIN_PORT_OTHER:
+ return "PORT_OTHER";
case POWER_DOMAIN_VGA:
return "VGA";
case POWER_DOMAIN_AUDIO:
@@ -2008,6 +2148,215 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
return 0;
}
+static void intel_seq_print_mode(struct seq_file *m, int tabs,
+ struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < tabs; i++)
+ seq_putc(m, '\t');
+
+ seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+}
+
+static void intel_encoder_info(struct seq_file *m,
+ struct intel_crtc *intel_crtc,
+ struct intel_encoder *intel_encoder)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct intel_connector *intel_connector;
+ struct drm_encoder *encoder;
+
+ encoder = &intel_encoder->base;
+ seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
+ encoder->base.id, drm_get_encoder_name(encoder));
+ for_each_connector_on_encoder(dev, encoder, intel_connector) {
+ struct drm_connector *connector = &intel_connector->base;
+ seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ drm_get_connector_status_name(connector->status));
+ if (connector->status == connector_status_connected) {
+ struct drm_display_mode *mode = &crtc->mode;
+ seq_printf(m, ", mode:\n");
+ intel_seq_print_mode(m, 2, mode);
+ } else {
+ seq_putc(m, '\n');
+ }
+ }
+}
+
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct intel_encoder *intel_encoder;
+
+ seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
+ crtc->primary->fb->base.id, crtc->x, crtc->y,
+ crtc->primary->fb->width, crtc->primary->fb->height);
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ intel_encoder_info(m, intel_crtc, intel_encoder);
+}
+
+static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
+{
+ struct drm_display_mode *mode = panel->fixed_mode;
+
+ seq_printf(m, "\tfixed mode:\n");
+ intel_seq_print_mode(m, 2, mode);
+}
+
+static void intel_dp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+ seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
+ seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
+ "no");
+ if (intel_encoder->type == INTEL_OUTPUT_EDP)
+ intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_hdmi_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
+
+ seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
+ "no");
+}
+
+static void intel_lvds_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_connector_info(struct seq_file *m,
+ struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ struct drm_display_mode *mode;
+
+ seq_printf(m, "connector %d: type %s, status: %s\n",
+ connector->base.id, drm_get_connector_name(connector),
+ drm_get_connector_status_name(connector->status));
+ if (connector->status == connector_status_connected) {
+ seq_printf(m, "\tname: %s\n", connector->display_info.name);
+ seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+ connector->display_info.width_mm,
+ connector->display_info.height_mm);
+ seq_printf(m, "\tsubpixel order: %s\n",
+ drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+ seq_printf(m, "\tCEA rev: %d\n",
+ connector->display_info.cea_rev);
+ }
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
+ intel_dp_info(m, intel_connector);
+ else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
+ intel_hdmi_info(m, intel_connector);
+ else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ intel_lvds_info(m, intel_connector);
+
+ seq_printf(m, "\tmodes:\n");
+ list_for_each_entry(mode, &connector->modes, head)
+ intel_seq_print_mode(m, 2, mode);
+}
+
+static bool cursor_active(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 state;
+
+ if (IS_845G(dev) || IS_I865G(dev))
+ state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
+ else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
+ state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+ else
+ state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
+
+ return state;
+}
+
+static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pos;
+
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
+ pos = I915_READ(CURPOS_IVB(pipe));
+ else
+ pos = I915_READ(CURPOS(pipe));
+
+ *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
+ if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
+ *x = -*x;
+
+ *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
+ if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
+ *y = -*y;
+
+ return cursor_active(dev, pipe);
+}
+
+static int i915_display_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ struct drm_connector *connector;
+
+ intel_runtime_pm_get(dev_priv);
+ drm_modeset_lock_all(dev);
+ seq_printf(m, "CRTC info\n");
+ seq_printf(m, "---------\n");
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ bool active;
+ int x, y;
+
+ seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
+ crtc->base.base.id, pipe_name(crtc->pipe),
+ yesno(crtc->active));
+ if (crtc->active) {
+ intel_crtc_info(m, crtc);
+
+ active = cursor_position(dev, crtc->pipe, &x, &y);
+ seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
+ yesno(crtc->cursor_visible),
+ x, y, crtc->cursor_addr,
+ yesno(active));
+ }
+ }
+
+ seq_printf(m, "\n");
+ seq_printf(m, "Connector info\n");
+ seq_printf(m, "--------------\n");
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ intel_connector_info(m, connector);
+ }
+ drm_modeset_unlock_all(dev);
+ intel_runtime_pm_put(dev_priv);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -2332,8 +2681,6 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
if (need_stable_symbols) {
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
- WARN_ON(!IS_G4X(dev));
-
tmp |= DC_BALANCE_RESET_VLV;
if (pipe == PIPE_A)
tmp |= PIPE_A_SCRAMBLE_RESET;
@@ -2756,11 +3103,179 @@ static const struct file_operations i915_display_crc_ctl_fops = {
.write = display_crc_ctl_write
};
+static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
+{
+ struct drm_device *dev = m->private;
+ int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
+ int level;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ /* WM1+ latency values in 0.5us units */
+ if (level > 0)
+ latency *= 5;
+
+ seq_printf(m, "WM%d %u (%u.%u usec)\n",
+ level, wm[level],
+ latency / 10, latency % 10);
+ }
+
+ drm_modeset_unlock_all(dev);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+
+ wm_latency_show(m, to_i915(dev)->wm.pri_latency);
+
+ return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+
+ wm_latency_show(m, to_i915(dev)->wm.spr_latency);
+
+ return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+
+ wm_latency_show(m, to_i915(dev)->wm.cur_latency);
+
+ return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ if (!HAS_PCH_SPLIT(dev))
+ return -ENODEV;
+
+ return single_open(file, pri_wm_latency_show, dev);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ if (!HAS_PCH_SPLIT(dev))
+ return -ENODEV;
+
+ return single_open(file, spr_wm_latency_show, dev);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ if (!HAS_PCH_SPLIT(dev))
+ return -ENODEV;
+
+ return single_open(file, cur_wm_latency_show, dev);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp, uint16_t wm[5])
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+ uint16_t new[5] = { 0 };
+ int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
+ int level;
+ int ret;
+ char tmp[32];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
+ if (ret != num_levels)
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++)
+ wm[level] = new[level];
+
+ drm_modeset_unlock_all(dev);
+
+ return len;
+}
+
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+
+ return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+
+ return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+
+ return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = pri_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = spr_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = cur_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = cur_wm_latency_write
+};
+
static int
i915_wedged_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
*val = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -2772,9 +3287,8 @@ i915_wedged_set(void *data, u64 val)
{
struct drm_device *dev = data;
- DRM_INFO("Manually setting wedged to %llu\n", val);
- i915_handle_error(dev, val);
-
+ i915_handle_error(dev, val,
+ "Manually setting wedged to %llu", val);
return 0;
}
@@ -2786,7 +3300,7 @@ static int
i915_ring_stop_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
*val = dev_priv->gpu_error.stop_rings;
@@ -2929,7 +3443,7 @@ i915_drop_caches_set(void *data, u64 val)
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
list_for_each_entry_safe(vma, x, &vm->inactive_list,
mm_list) {
- if (vma->obj->pin_count)
+ if (vma->pin_count)
continue;
ret = i915_vma_unbind(vma);
@@ -2963,7 +3477,7 @@ static int
i915_max_freq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -2976,9 +3490,9 @@ i915_max_freq_get(void *data, u64 *val)
return ret;
if (IS_VALLEYVIEW(dev))
- *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
+ *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
else
- *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+ *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -2989,6 +3503,7 @@ i915_max_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rp_state_cap, hw_max, hw_min;
int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3007,14 +3522,29 @@ i915_max_freq_set(void *data, u64 val)
*/
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val);
- dev_priv->rps.max_delay = val;
- valleyview_set_rps(dev, val);
+
+ hw_max = valleyview_rps_max_freq(dev_priv);
+ hw_min = valleyview_rps_min_freq(dev_priv);
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
- dev_priv->rps.max_delay = val;
- gen6_set_rps(dev, val);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = (rp_state_cap >> 16) & 0xff;
+ }
+
+ if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ return -EINVAL;
}
+ dev_priv->rps.max_freq_softlimit = val;
+
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
+
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -3028,7 +3558,7 @@ static int
i915_min_freq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3041,9 +3571,9 @@ i915_min_freq_get(void *data, u64 *val)
return ret;
if (IS_VALLEYVIEW(dev))
- *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
+ *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
else
- *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+ *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -3054,6 +3584,7 @@ i915_min_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rp_state_cap, hw_max, hw_min;
int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3072,13 +3603,29 @@ i915_min_freq_set(void *data, u64 val)
*/
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val);
- dev_priv->rps.min_delay = val;
- valleyview_set_rps(dev, val);
+
+ hw_max = valleyview_rps_max_freq(dev_priv);
+ hw_min = valleyview_rps_min_freq(dev_priv);
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
- dev_priv->rps.min_delay = val;
- gen6_set_rps(dev, val);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = (rp_state_cap >> 16) & 0xff;
+ }
+
+ if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ return -EINVAL;
}
+
+ dev_priv->rps.min_freq_softlimit = val;
+
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
+
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -3092,7 +3639,7 @@ static int
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 snpcr;
int ret;
@@ -3152,7 +3699,6 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- intel_runtime_pm_get(dev_priv);
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
return 0;
@@ -3167,7 +3713,6 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
return 0;
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -3248,9 +3793,11 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_dpio", i915_dpio_info, 0},
{"i915_llc", i915_llc, 0},
{"i915_edp_psr_status", i915_edp_psr_status, 0},
+ {"i915_sink_crc_eDP1", i915_sink_crc, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
{"i915_pc8_status", i915_pc8_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
+ {"i915_display_info", i915_display_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -3269,6 +3816,9 @@ static const struct i915_debugfs_files {
{"i915_error_state", &i915_error_state_fops},
{"i915_next_seqno", &i915_next_seqno_fops},
{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
+ {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
+ {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
+ {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
};
void intel_display_crc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 15a74f979b4b..96177eec0a0e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -82,7 +82,7 @@ intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
void i915_update_dri1_breadcrumb(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
/*
@@ -103,7 +103,7 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
static void i915_write_hws_pga(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 addr;
addr = dev_priv->status_page_dmah->busaddr;
@@ -118,7 +118,7 @@ static void i915_write_hws_pga(struct drm_device *dev)
*/
static void i915_free_hws(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
if (dev_priv->status_page_dmah) {
@@ -137,7 +137,7 @@ static void i915_free_hws(struct drm_device *dev)
void i915_kernel_lost_context(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
@@ -164,7 +164,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
static int i915_dma_cleanup(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int i;
/* Make sure interrupts are disabled here because the uninstall ioctl
@@ -188,7 +188,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret;
@@ -233,7 +233,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
static int i915_dma_resume(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("%s\n", __func__);
@@ -357,7 +357,7 @@ static int validate_cmd(int cmd)
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
@@ -431,7 +431,7 @@ i915_emit_box(struct drm_device *dev,
static void i915_emit_breadcrumb(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
dev_priv->dri1.counter++;
@@ -547,7 +547,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
static int i915_dispatch_flip(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
int ret;
@@ -625,10 +625,9 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data,
static int i915_batchbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
- master_priv->sarea_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+ drm_i915_sarea_t *sarea_priv;
drm_i915_batchbuffer_t *batch = data;
int ret;
struct drm_clip_rect *cliprects = NULL;
@@ -636,6 +635,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
+ master_priv = dev->primary->master->driver_priv;
+ sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
+
if (!dev_priv->dri1.allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
@@ -681,10 +683,9 @@ fail_free:
static int i915_cmdbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
- master_priv->sarea_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+ drm_i915_sarea_t *sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
struct drm_clip_rect *cliprects = NULL;
void *batch_data;
@@ -696,6 +697,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
+ master_priv = dev->primary->master->driver_priv;
+ sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
+
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (cmdbuf->num_cliprects < 0)
@@ -749,7 +753,7 @@ fail_batch_free:
static int i915_emit_irq(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
i915_kernel_lost_context(dev);
@@ -775,7 +779,7 @@ static int i915_emit_irq(struct drm_device * dev)
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
@@ -812,7 +816,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
static int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_irq_emit_t *emit = data;
int result;
@@ -843,7 +847,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data,
static int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_irq_wait_t *irqwait = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -860,7 +864,7 @@ static int i915_irq_wait(struct drm_device *dev, void *data,
static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_vblank_pipe_t *pipe = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -921,7 +925,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data,
static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_getparam_t *param = data;
int value;
@@ -990,7 +994,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = HAS_WT(dev);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
- value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
+ value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
break;
case I915_PARAM_HAS_WAIT_TIMEOUT:
value = 1;
@@ -1029,7 +1033,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
static int i915_setparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_setparam_t *param = data;
if (!dev_priv) {
@@ -1064,7 +1068,7 @@ static int i915_setparam(struct drm_device *dev, void *data,
static int i915_set_status_page(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring;
@@ -1132,7 +1136,7 @@ static int i915_get_bridge_dev(struct drm_device *dev)
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
@@ -1178,11 +1182,14 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
static void
intel_setup_mchbar(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
+ if (IS_VALLEYVIEW(dev))
+ return;
+
dev_priv->mchbar_need_disable = false;
if (IS_I915G(dev) || IS_I915GM(dev)) {
@@ -1215,7 +1222,7 @@ intel_setup_mchbar(struct drm_device *dev)
static void
intel_teardown_mchbar(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
@@ -1317,12 +1324,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_vga_switcheroo;
+ intel_power_domains_init_hw(dev_priv);
+
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem_stolen;
- intel_power_domains_init_hw(dev);
-
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
@@ -1339,7 +1346,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = true;
if (INTEL_INFO(dev)->num_pipes == 0) {
- intel_display_power_put(dev, POWER_DOMAIN_VGA);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
return 0;
}
@@ -1374,10 +1381,10 @@ cleanup_gem:
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
- i915_gem_cleanup_aliasing_ppgtt(dev);
+ WARN_ON(dev_priv->mm.aliasing_ppgtt);
drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_power:
- intel_display_power_put(dev, POWER_DOMAIN_VGA);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
@@ -1442,7 +1449,7 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
- const struct intel_device_info *info = dev_priv->info;
+ const struct intel_device_info *info = &dev_priv->info;
#define PRINT_S(name) "%s"
#define SEP_EMPTY
@@ -1459,6 +1466,62 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
#undef SEP_COMMA
}
+/*
+ * Determine various intel_device_info fields at runtime.
+ *
+ * Use it when either:
+ * - it's judged too laborious to fill n static structures with the limit
+ * when a simple if statement does the job,
+ * - run-time checks (eg read fuse/strap registers) are needed.
+ *
+ * This function needs to be called:
+ * - after the MMIO has been setup as we are reading registers,
+ * - after the PCH has been detected,
+ * - before the first usage of the fields it can tweak.
+ */
+static void intel_device_info_runtime_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_device_info *info;
+ enum pipe pipe;
+
+ info = (struct intel_device_info *)&dev_priv->info;
+
+ if (IS_VALLEYVIEW(dev))
+ for_each_pipe(pipe)
+ info->num_sprites[pipe] = 2;
+ else
+ for_each_pipe(pipe)
+ info->num_sprites[pipe] = 1;
+
+ if (i915.disable_display) {
+ DRM_INFO("Display disabled (module parameter)\n");
+ info->num_pipes = 0;
+ } else if (info->num_pipes > 0 &&
+ (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
+ !IS_VALLEYVIEW(dev)) {
+ u32 fuse_strap = I915_READ(FUSE_STRAP);
+ u32 sfuse_strap = I915_READ(SFUSE_STRAP);
+
+ /*
+ * SFUSE_STRAP is supposed to have a bit signalling the display
+ * is fused off. Unfortunately it seems that, at least in
+ * certain cases, fused off display means that PCH display
+ * reads don't land anywhere. In that case, we read 0s.
+ *
+ * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
+ * should be set when taking over after the firmware.
+ */
+ if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
+ sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
+ (dev_priv->pch_type == PCH_CPT &&
+ !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
+ DRM_INFO("Display fused off, disabling\n");
+ info->num_pipes = 0;
+ }
+ }
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
@@ -1473,7 +1536,7 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
- struct intel_device_info *info;
+ struct intel_device_info *info, *device_info;
int ret = 0, mmio_bar, mmio_size;
uint32_t aperture_size;
@@ -1496,7 +1559,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
- dev_priv->info = info;
+
+ /* copy initial configuration to dev_priv->info */
+ device_info = (struct intel_device_info *)&dev_priv->info;
+ *device_info = *info;
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
@@ -1545,8 +1611,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_bridge;
}
- intel_uncore_early_sanitize(dev);
-
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);
@@ -1635,9 +1699,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- dev_priv->num_plane = 1;
- if (IS_VALLEYVIEW(dev))
- dev_priv->num_plane = 2;
+ intel_device_info_runtime_init(dev);
if (INTEL_INFO(dev)->num_pipes) {
ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
@@ -1645,7 +1707,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload;
}
- intel_power_domains_init(dev);
+ intel_power_domains_init(dev_priv);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
@@ -1674,7 +1736,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_power_well:
- intel_power_domains_remove(dev);
+ intel_power_domains_remove(dev_priv);
drm_vblank_cleanup(dev);
out_gem_unload:
if (dev_priv->mm.inactive_shrinker.scan_objects)
@@ -1724,8 +1786,8 @@ int i915_driver_unload(struct drm_device *dev)
/* The i915.ko module is still not prepared to be loaded when
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
- intel_display_set_init_power(dev, true);
- intel_power_domains_remove(dev);
+ intel_display_set_init_power(dev_priv, true);
+ intel_power_domains_remove(dev_priv);
i915_teardown_sysfs(dev);
@@ -1761,8 +1823,6 @@ int i915_driver_unload(struct drm_device *dev)
cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev);
- cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
-
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
@@ -1776,8 +1836,8 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
+ WARN_ON(dev_priv->mm.aliasing_ppgtt);
mutex_unlock(&dev->struct_mutex);
- i915_gem_cleanup_aliasing_ppgtt(dev);
i915_gem_cleanup_stolen(dev);
if (!I915_NEED_GFX_HWS(dev))
@@ -1835,7 +1895,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
*/
void i915_driver_lastclose(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
/* On gen6+ we refuse to init without kms enabled, but then the drm core
* goes right around and calls lastclose. Check for this and don't clean
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ec7bb0fc71bc..82f4d1f47d3b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -38,134 +38,30 @@
#include <linux/module.h>
#include <drm/drm_crtc_helper.h>
-static int i915_modeset __read_mostly = -1;
-module_param_named(modeset, i915_modeset, int, 0400);
-MODULE_PARM_DESC(modeset,
- "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
- "1=on, -1=force vga console preference [default])");
-
-unsigned int i915_fbpercrtc __always_unused = 0;
-module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
-
-int i915_panel_ignore_lid __read_mostly = 1;
-module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
-MODULE_PARM_DESC(panel_ignore_lid,
- "Override lid status (0=autodetect, 1=autodetect disabled [default], "
- "-1=force lid closed, -2=force lid open)");
-
-unsigned int i915_powersave __read_mostly = 1;
-module_param_named(powersave, i915_powersave, int, 0600);
-MODULE_PARM_DESC(powersave,
- "Enable powersavings, fbc, downclocking, etc. (default: true)");
-
-int i915_semaphores __read_mostly = -1;
-module_param_named(semaphores, i915_semaphores, int, 0400);
-MODULE_PARM_DESC(semaphores,
- "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
-
-int i915_enable_rc6 __read_mostly = -1;
-module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
-MODULE_PARM_DESC(i915_enable_rc6,
- "Enable power-saving render C-state 6. "
- "Different stages can be selected via bitmask values "
- "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
- "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
- "default: -1 (use per-chip default)");
-
-int i915_enable_fbc __read_mostly = -1;
-module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
-MODULE_PARM_DESC(i915_enable_fbc,
- "Enable frame buffer compression for power savings "
- "(default: -1 (use per-chip default))");
-
-unsigned int i915_lvds_downclock __read_mostly = 0;
-module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
-MODULE_PARM_DESC(lvds_downclock,
- "Use panel (LVDS/eDP) downclocking for power savings "
- "(default: false)");
-
-int i915_lvds_channel_mode __read_mostly;
-module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
-MODULE_PARM_DESC(lvds_channel_mode,
- "Specify LVDS channel mode "
- "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
-
-int i915_panel_use_ssc __read_mostly = -1;
-module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
-MODULE_PARM_DESC(lvds_use_ssc,
- "Use Spread Spectrum Clock with panels [LVDS/eDP] "
- "(default: auto from VBT)");
-
-int i915_vbt_sdvo_panel_type __read_mostly = -1;
-module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
-MODULE_PARM_DESC(vbt_sdvo_panel_type,
- "Override/Ignore selection of SDVO panel mode in the VBT "
- "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-
-static bool i915_try_reset __read_mostly = true;
-module_param_named(reset, i915_try_reset, bool, 0600);
-MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
-
-bool i915_enable_hangcheck __read_mostly = true;
-module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
-MODULE_PARM_DESC(enable_hangcheck,
- "Periodically check GPU activity for detecting hangs. "
- "WARNING: Disabling this can cause system wide hangs. "
- "(default: true)");
-
-int i915_enable_ppgtt __read_mostly = -1;
-module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
-MODULE_PARM_DESC(i915_enable_ppgtt,
- "Enable PPGTT (default: true)");
-
-int i915_enable_psr __read_mostly = 0;
-module_param_named(enable_psr, i915_enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
-
-unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
-module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
-MODULE_PARM_DESC(preliminary_hw_support,
- "Enable preliminary hardware support.");
-
-int i915_disable_power_well __read_mostly = 1;
-module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
-MODULE_PARM_DESC(disable_power_well,
- "Disable the power well when possible (default: true)");
-
-int i915_enable_ips __read_mostly = 1;
-module_param_named(enable_ips, i915_enable_ips, int, 0600);
-MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
-
-bool i915_fastboot __read_mostly = 0;
-module_param_named(fastboot, i915_fastboot, bool, 0600);
-MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
- "(default: false)");
-
-int i915_enable_pc8 __read_mostly = 1;
-module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
-MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
-
-int i915_pc8_timeout __read_mostly = 5000;
-module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
-MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
-
-bool i915_prefault_disable __read_mostly;
-module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
-MODULE_PARM_DESC(prefault_disable,
- "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
-
static struct drm_driver driver;
+#define GEN_DEFAULT_PIPEOFFSETS \
+ .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+ PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
+ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+ TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
+ .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
+ .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
+ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+
+
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_845g_info = {
.gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i85x_info = {
@@ -174,18 +70,21 @@ static const struct intel_device_info intel_i85x_info = {
.has_overlay = 1, .overlay_needs_physical = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i865g_info = {
.gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i915g_info = {
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i915gm_info = {
.gen = 3, .is_mobile = 1, .num_pipes = 2,
@@ -194,11 +93,13 @@ static const struct intel_device_info intel_i915gm_info = {
.supports_tv = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i945g_info = {
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i945gm_info = {
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
@@ -207,6 +108,7 @@ static const struct intel_device_info intel_i945gm_info = {
.supports_tv = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i965g_info = {
@@ -214,6 +116,7 @@ static const struct intel_device_info intel_i965g_info = {
.has_hotplug = 1,
.has_overlay = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i965gm_info = {
@@ -222,6 +125,7 @@ static const struct intel_device_info intel_i965gm_info = {
.has_overlay = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_g33_info = {
@@ -229,12 +133,14 @@ static const struct intel_device_info intel_g33_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_g45_info = {
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_gm45_info = {
@@ -243,18 +149,21 @@ static const struct intel_device_info intel_gm45_info = {
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_pineview_info = {
.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_ironlake_m_info = {
@@ -262,6 +171,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_sandybridge_d_info = {
@@ -270,6 +180,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -278,6 +189,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
#define GEN7_FEATURES \
@@ -290,18 +202,21 @@ static const struct intel_device_info intel_sandybridge_m_info = {
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.num_pipes = 0, /* legal, last one wins */
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_valleyview_m_info = {
@@ -312,6 +227,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
.display_mmio_offset = VLV_DISPLAY_BASE,
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_valleyview_d_info = {
@@ -321,6 +237,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
.display_mmio_offset = VLV_DISPLAY_BASE,
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_haswell_d_info = {
@@ -329,6 +246,7 @@ static const struct intel_device_info intel_haswell_d_info = {
.has_ddi = 1,
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_haswell_m_info = {
@@ -338,6 +256,7 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_ddi = 1,
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_broadwell_d_info = {
@@ -346,6 +265,8 @@ static const struct intel_device_info intel_broadwell_d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_broadwell_m_info = {
@@ -354,6 +275,8 @@ static const struct intel_device_info intel_broadwell_m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
/*
@@ -475,14 +398,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return false;
+ if (i915.semaphores >= 0)
+ return i915.semaphores;
+
/* Until we get further testing... */
- if (IS_GEN8(dev)) {
- WARN_ON(!i915_preliminary_hw_support);
+ if (IS_GEN8(dev))
return false;
- }
-
- if (i915_semaphores >= 0)
- return i915_semaphores;
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
@@ -507,8 +428,7 @@ static int i915_drm_freeze(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
- hsw_disable_package_c8(dev_priv);
- intel_display_set_init_power(dev, true);
+ intel_display_set_init_power(dev_priv, true);
drm_kms_helper_poll_disable(dev);
@@ -546,11 +466,14 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev);
intel_opregion_fini(dev);
+ intel_uncore_fini(dev);
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
console_unlock();
+ dev_priv->suspend_count++;
+
return 0;
}
@@ -614,14 +537,21 @@ static void intel_resume_hotplug(struct drm_device *dev)
drm_helper_hpd_irq_event(dev);
}
-static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
+static int i915_drm_thaw_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int error = 0;
intel_uncore_early_sanitize(dev);
-
intel_uncore_sanitize(dev);
+ intel_power_domains_init_hw(dev_priv);
+
+ return 0;
+}
+
+static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int error = 0;
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
restore_gtt_mappings) {
@@ -630,14 +560,13 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
mutex_unlock(&dev->struct_mutex);
}
- intel_power_domains_init_hw(dev);
-
i915_restore_state(dev);
intel_opregion_setup(dev);
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_init_pch_refclk(dev);
+ drm_mode_config_reset(dev);
mutex_lock(&dev->struct_mutex);
@@ -650,7 +579,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_modeset_init_hw(dev);
drm_modeset_lock_all(dev);
- drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
@@ -680,10 +608,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
schedule_work(&dev_priv->console_resume_work);
}
- /* Undo what we did at i915_drm_freeze so the refcount goes back to the
- * expected level. */
- hsw_enable_package_c8(dev_priv);
-
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
@@ -700,19 +624,33 @@ static int i915_drm_thaw(struct drm_device *dev)
return __i915_drm_thaw(dev, true);
}
-int i915_resume(struct drm_device *dev)
+static int i915_resume_early(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ /*
+ * We have a resume ordering issue with the snd-hda driver also
+ * requiring our device to be power up. Due to the lack of a
+ * parent/child relationship we currently solve this with an early
+ * resume hook.
+ *
+ * FIXME: This should be solved with a special hdmi sink device or
+ * similar so that power domains can be employed.
+ */
if (pci_enable_device(dev->pdev))
return -EIO;
pci_set_master(dev->pdev);
+ return i915_drm_thaw_early(dev);
+}
+
+int i915_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
* earlier) need to restore the GTT mappings since the BIOS might clear
@@ -726,6 +664,14 @@ int i915_resume(struct drm_device *dev)
return 0;
}
+static int i915_resume_legacy(struct drm_device *dev)
+{
+ i915_resume_early(dev);
+ i915_resume(dev);
+
+ return 0;
+}
+
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
@@ -743,11 +689,11 @@ int i915_resume(struct drm_device *dev)
*/
int i915_reset(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
bool simulated;
int ret;
- if (!i915_try_reset)
+ if (!i915.reset)
return 0;
mutex_lock(&dev->struct_mutex);
@@ -802,6 +748,17 @@ int i915_reset(struct drm_device *dev)
drm_irq_uninstall(dev);
drm_irq_install(dev);
+
+ /* rps/rc6 re-init is necessary to restore state lost after the
+ * reset and the re-install of drm irq. Skip for ironlake per
+ * previous concerns that it doesn't respond well to some forms
+ * of re-init after reset. */
+ if (INTEL_INFO(dev)->gen > 5) {
+ mutex_lock(&dev->struct_mutex);
+ intel_enable_gt_powersave(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
intel_hpd_init(dev);
} else {
mutex_unlock(&dev->struct_mutex);
@@ -815,7 +772,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
- if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
+ if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
DRM_INFO("This hardware requires preliminary hardware support.\n"
"See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
return -ENODEV;
@@ -846,7 +803,6 @@ static int i915_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- int error;
if (!drm_dev || !drm_dev->dev_private) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
@@ -856,9 +812,25 @@ static int i915_pm_suspend(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- error = i915_drm_freeze(drm_dev);
- if (error)
- return error;
+ return i915_drm_freeze(drm_dev);
+}
+
+static int i915_pm_suspend_late(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ /*
+ * We have a suspedn ordering issue with the snd-hda driver also
+ * requiring our device to be power up. Due to the lack of a
+ * parent/child relationship we currently solve this with an late
+ * suspend hook.
+ *
+ * FIXME: This should be solved with a special hdmi sink device or
+ * similar so that power domains can be employed.
+ */
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
@@ -866,6 +838,14 @@ static int i915_pm_suspend(struct device *dev)
return 0;
}
+static int i915_pm_resume_early(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_resume_early(drm_dev);
+}
+
static int i915_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -887,6 +867,14 @@ static int i915_pm_freeze(struct device *dev)
return i915_drm_freeze(drm_dev);
}
+static int i915_pm_thaw_early(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_thaw_early(drm_dev);
+}
+
static int i915_pm_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -910,9 +898,13 @@ static int i915_runtime_suspend(struct device *device)
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!HAS_RUNTIME_PM(dev));
+ assert_force_wake_inactive(dev_priv);
DRM_DEBUG_KMS("Suspending device\n");
+ if (HAS_PC8(dev))
+ hsw_enable_pc8(dev_priv);
+
i915_gem_release_all_mmaps(dev_priv);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
@@ -927,6 +919,7 @@ static int i915_runtime_suspend(struct device *device)
*/
intel_opregion_notify_adapter(dev, PCI_D1);
+ DRM_DEBUG_KMS("Device suspended\n");
return 0;
}
@@ -943,15 +936,23 @@ static int i915_runtime_resume(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
+ if (HAS_PC8(dev))
+ hsw_disable_pc8(dev_priv);
+
+ DRM_DEBUG_KMS("Device resumed\n");
return 0;
}
static const struct dev_pm_ops i915_pm_ops = {
.suspend = i915_pm_suspend,
+ .suspend_late = i915_pm_suspend_late,
+ .resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
.freeze = i915_pm_freeze,
+ .thaw_early = i915_pm_thaw_early,
.thaw = i915_pm_thaw,
.poweroff = i915_pm_poweroff,
+ .restore_early = i915_pm_resume_early,
.restore = i915_pm_resume,
.runtime_suspend = i915_runtime_suspend,
.runtime_resume = i915_runtime_resume,
@@ -994,7 +995,7 @@ static struct drm_driver driver = {
/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
.suspend = i915_suspend,
- .resume = i915_resume,
+ .resume = i915_resume_legacy,
.device_is_agp = i915_driver_device_is_agp,
.master_create = i915_master_create,
@@ -1046,14 +1047,14 @@ static int __init i915_init(void)
* the default behavior.
*/
#if defined(CONFIG_DRM_I915_KMS)
- if (i915_modeset != 0)
+ if (i915.modeset != 0)
driver.driver_features |= DRIVER_MODESET;
#endif
- if (i915_modeset == 1)
+ if (i915.modeset == 1)
driver.driver_features |= DRIVER_MODESET;
#ifdef CONFIG_VGA_CONSOLE
- if (vgacon_text_force() && i915_modeset == -1)
+ if (vgacon_text_force() && i915.modeset == -1)
driver.driver_features &= ~DRIVER_MODESET;
#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index df77e20e3c3d..0905cd915589 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -58,7 +58,8 @@ enum pipe {
PIPE_A = 0,
PIPE_B,
PIPE_C,
- I915_MAX_PIPES
+ _PIPE_EDP,
+ I915_MAX_PIPES = _PIPE_EDP
};
#define pipe_name(p) ((p) + 'A')
@@ -66,7 +67,8 @@ enum transcoder {
TRANSCODER_A = 0,
TRANSCODER_B,
TRANSCODER_C,
- TRANSCODER_EDP = 0xF,
+ TRANSCODER_EDP,
+ I915_MAX_TRANSCODERS
};
#define transcoder_name(t) ((t) + 'A')
@@ -77,7 +79,7 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
enum port {
PORT_A = 0,
@@ -112,6 +114,17 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_PORT_DDI_A_2_LANES,
+ POWER_DOMAIN_PORT_DDI_A_4_LANES,
+ POWER_DOMAIN_PORT_DDI_B_2_LANES,
+ POWER_DOMAIN_PORT_DDI_B_4_LANES,
+ POWER_DOMAIN_PORT_DDI_C_2_LANES,
+ POWER_DOMAIN_PORT_DDI_C_4_LANES,
+ POWER_DOMAIN_PORT_DDI_D_2_LANES,
+ POWER_DOMAIN_PORT_DDI_D_4_LANES,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_PORT_OTHER,
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
POWER_DOMAIN_INIT,
@@ -119,8 +132,6 @@ enum intel_display_power_domain {
POWER_DOMAIN_NUM,
};
-#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
-
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
@@ -128,14 +139,6 @@ enum intel_display_power_domain {
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
(tran) + POWER_DOMAIN_TRANSCODER_A)
-#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_EDP))
-#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
- BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-
enum hpd_pin {
HPD_NONE = 0,
HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
@@ -157,11 +160,16 @@ enum hpd_pin {
I915_GEM_DOMAIN_VERTEX)
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
+#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
+#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
+ list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
+ if ((intel_connector)->base.encoder == (__encoder))
+
struct drm_i915_private;
enum intel_dpll_id {
@@ -295,53 +303,87 @@ struct intel_display_error_state;
struct drm_i915_error_state {
struct kref ref;
+ struct timeval time;
+
+ char error_msg[128];
+ u32 reset_count;
+ u32 suspend_count;
+
+ /* Generic register state */
u32 eir;
u32 pgtbl_er;
u32 ier;
u32 ccid;
u32 derrmr;
u32 forcewake;
- bool waiting[I915_NUM_RINGS];
- u32 pipestat[I915_MAX_PIPES];
- u32 tail[I915_NUM_RINGS];
- u32 head[I915_NUM_RINGS];
- u32 ctl[I915_NUM_RINGS];
- u32 ipeir[I915_NUM_RINGS];
- u32 ipehr[I915_NUM_RINGS];
- u32 instdone[I915_NUM_RINGS];
- u32 acthd[I915_NUM_RINGS];
- u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
- u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
- u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
- /* our own tracking of ring head and tail */
- u32 cpu_ring_head[I915_NUM_RINGS];
- u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
- u32 bbstate[I915_NUM_RINGS];
- u32 instpm[I915_NUM_RINGS];
- u32 instps[I915_NUM_RINGS];
- u32 extra_instdone[I915_NUM_INSTDONE_REG];
- u32 seqno[I915_NUM_RINGS];
- u64 bbaddr[I915_NUM_RINGS];
- u32 fault_reg[I915_NUM_RINGS];
u32 done_reg;
- u32 faddr[I915_NUM_RINGS];
+ u32 gac_eco;
+ u32 gam_ecochk;
+ u32 gab_ctl;
+ u32 gfx_mode;
+ u32 extra_instdone[I915_NUM_INSTDONE_REG];
+ u32 pipestat[I915_MAX_PIPES];
u64 fence[I915_MAX_NUM_FENCES];
- struct timeval time;
+ struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
+
struct drm_i915_error_ring {
bool valid;
+ /* Software tracked state */
+ bool waiting;
+ int hangcheck_score;
+ enum intel_ring_hangcheck_action hangcheck_action;
+ int num_requests;
+
+ /* our own tracking of ring head and tail */
+ u32 cpu_ring_head;
+ u32 cpu_ring_tail;
+
+ u32 semaphore_seqno[I915_NUM_RINGS - 1];
+
+ /* Register state */
+ u32 tail;
+ u32 head;
+ u32 ctl;
+ u32 hws;
+ u32 ipeir;
+ u32 ipehr;
+ u32 instdone;
+ u32 bbstate;
+ u32 instpm;
+ u32 instps;
+ u32 seqno;
+ u64 bbaddr;
+ u64 acthd;
+ u32 fault_reg;
+ u32 faddr;
+ u32 rc_psmi; /* sleep state */
+ u32 semaphore_mboxes[I915_NUM_RINGS - 1];
+
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
u32 *pages[0];
- } *ringbuffer, *batchbuffer, *ctx;
+ } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
+
struct drm_i915_error_request {
long jiffies;
u32 seqno;
u32 tail;
} *requests;
- int num_requests;
+
+ struct {
+ u32 gfx_mode;
+ union {
+ u64 pdp[4];
+ u32 pp_dir_base;
+ };
+ } vm_info;
+
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
} ring[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
@@ -358,15 +400,13 @@ struct drm_i915_error_state {
s32 ring:4;
u32 cache_level:3;
} **active_bo, **pinned_bo;
+
u32 *active_bo_count, *pinned_bo_count;
- struct intel_overlay_error_state *overlay;
- struct intel_display_error_state *display;
- int hangcheck_score[I915_NUM_RINGS];
- enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
};
struct intel_connector;
struct intel_crtc_config;
+struct intel_plane_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
@@ -405,6 +445,8 @@ struct drm_i915_display_funcs {
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *);
+ void (*get_plane_config)(struct intel_crtc *,
+ struct intel_plane_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
@@ -420,8 +462,9 @@ struct drm_i915_display_funcs {
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
uint32_t flags);
- int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y);
+ int (*update_primary_plane)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
@@ -469,7 +512,7 @@ struct intel_uncore {
unsigned fw_rendercount;
unsigned fw_mediacount;
- struct delayed_work force_wake_work;
+ struct timer_list force_wake_timer;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -504,9 +547,16 @@ struct intel_uncore {
struct intel_device_info {
u32 display_mmio_offset;
u8 num_pipes:3;
+ u8 num_sprites[I915_MAX_PIPES];
u8 gen;
u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+ /* Register offsets for the various display pipes and transcoders */
+ int pipe_offsets[I915_MAX_TRANSCODERS];
+ int trans_offsets[I915_MAX_TRANSCODERS];
+ int dpll_offsets[I915_MAX_PIPES];
+ int dpll_md_offsets[I915_MAX_PIPES];
+ int palette_offsets[I915_MAX_PIPES];
};
#undef DEFINE_FLAG
@@ -524,6 +574,57 @@ enum i915_cache_level {
typedef uint32_t gen6_gtt_pte_t;
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head mm_list;
+
+ struct list_head vma_link; /* Link in the object's VMA list */
+
+ /** This vma's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+
+ /**
+ * How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, pin_ioctl
+ * (via user_pin_count), execbuffer (objects are not allowed multiple
+ * times for the same batchbuffer), and the framebuffer code. When
+ * switching/pageflipping, the framebuffer code has at most two buffers
+ * pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits. */
+ unsigned int pin_count:4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+
+ /** Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page. */
+ void (*unbind_vma)(struct i915_vma *vma);
+ /* Map an object into an address space with the given cache flags. */
+#define GLOBAL_BIND (1<<0)
+ void (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+};
+
struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
@@ -564,12 +665,12 @@ struct i915_address_space {
enum i915_cache_level level,
bool valid); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
- unsigned int first_entry,
- unsigned int num_entries,
+ uint64_t start,
+ uint64_t length,
bool use_scratch);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
- unsigned int first_entry,
+ uint64_t start,
enum i915_cache_level cache_level);
void (*cleanup)(struct i915_address_space *vm);
};
@@ -603,55 +704,34 @@ struct i915_gtt {
};
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
+#define GEN8_LEGACY_PDPS 4
struct i915_hw_ppgtt {
struct i915_address_space base;
+ struct kref ref;
+ struct drm_mm_node node;
unsigned num_pd_entries;
+ unsigned num_pd_pages; /* gen8+ */
union {
struct page **pt_pages;
- struct page *gen8_pt_pages;
+ struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
};
struct page *pd_pages;
- int num_pd_pages;
- int num_pt_pages;
union {
uint32_t pd_offset;
- dma_addr_t pd_dma_addr[4];
+ dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
};
union {
dma_addr_t *pt_dma_addr;
dma_addr_t *gen8_pt_dma_addr[4];
};
- int (*enable)(struct drm_device *dev);
-};
-
-/**
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
- struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
-
- /** This object's place on the active/inactive lists */
- struct list_head mm_list;
- struct list_head vma_link; /* Link in the object's VMA list */
-
- /** This vma's place in the batchbuffer or on the eviction list */
- struct list_head exec_list;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- struct hlist_node exec_node;
- unsigned long exec_handle;
- struct drm_i915_gem_exec_object2 *exec_entry;
+ struct i915_hw_context *ctx;
+ int (*enable)(struct i915_hw_ppgtt *ppgtt);
+ int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
+ struct intel_ring_buffer *ring,
+ bool synchronous);
+ void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
struct i915_ctx_hang_stats {
@@ -676,9 +756,10 @@ struct i915_hw_context {
bool is_initialized;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
- struct intel_ring_buffer *ring;
+ struct intel_ring_buffer *last_ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
+ struct i915_address_space *vm;
struct list_head link;
};
@@ -831,11 +912,7 @@ struct i915_suspend_saved_registers {
u32 savePFIT_CONTROL;
u32 save_palette_a[256];
u32 save_palette_b[256];
- u32 saveDPFC_CB_BASE;
- u32 saveFBC_CFB_BASE;
- u32 saveFBC_LL_BASE;
u32 saveFBC_CONTROL;
- u32 saveFBC_CONTROL2;
u32 saveIER;
u32 saveIIR;
u32 saveIMR;
@@ -905,15 +982,24 @@ struct intel_gen6_power_mgmt {
struct work_struct work;
u32 pm_iir;
- /* The below variables an all the rps hw state are protected by
- * dev->struct mutext. */
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 rpe_delay;
- u8 rp1_delay;
- u8 rp0_delay;
- u8 hw_max;
+ /* Frequencies are stored in potentially platform dependent multiples.
+ * In other words, *_freq needs to be multiplied by X to be interesting.
+ * Soft limits are those which are used for the dynamic reclocking done
+ * by the driver (raise frequencies under heavy loads, and lower for
+ * lighter loads). Hard limits are those imposed by the hardware.
+ *
+ * A distinction is made for overclocking, which is never enabled by
+ * default, and is considered to be above the hard limit if it's
+ * possible at all.
+ */
+ u8 cur_freq; /* Current frequency (cached, may not == HW) */
+ u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
+ u8 max_freq_softlimit; /* Max frequency permitted by the driver */
+ u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
+ u8 min_freq; /* AKA RPn. Minimum frequency */
+ u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
+ u8 rp1_freq; /* "less than" RP0 power/freqency */
+ u8 rp0_freq; /* Non-overclocked max frequency. */
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -953,6 +1039,36 @@ struct intel_ilk_power_mgmt {
struct drm_i915_gem_object *renderctx;
};
+struct drm_i915_private;
+struct i915_power_well;
+
+struct i915_power_well_ops {
+ /*
+ * Synchronize the well's hw state to match the current sw state, for
+ * example enable/disable it based on the current refcount. Called
+ * during driver init and resume time, possibly after first calling
+ * the enable/disable handlers.
+ */
+ void (*sync_hw)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /*
+ * Enable the well and resources that depend on it (for example
+ * interrupts located on the well). Called after the 0->1 refcount
+ * transition.
+ */
+ void (*enable)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /*
+ * Disable the well and resources that depend on it. Called after
+ * the 1->0 refcount transition.
+ */
+ void (*disable)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /* Returns the hw enabled state. */
+ bool (*is_enabled)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+};
+
/* Power well structure for haswell */
struct i915_power_well {
const char *name;
@@ -960,11 +1076,8 @@ struct i915_power_well {
/* power well enable/disable usage count */
int count;
unsigned long domains;
- void *data;
- void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
- bool enable);
- bool (*is_enabled)(struct drm_device *dev,
- struct i915_power_well *power_well);
+ unsigned long data;
+ const struct i915_power_well_ops *ops;
};
struct i915_power_domains {
@@ -1061,6 +1174,14 @@ struct i915_gem_mm {
*/
bool interruptible;
+ /**
+ * Is the GPU currently considered idle, or busy executing userspace
+ * requests? Whilst idle, we attempt to power down the hardware and
+ * display clocks. In order to reduce the effect on performance, there
+ * is a slight delay before we do so.
+ */
+ bool busy;
+
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
@@ -1226,44 +1347,19 @@ struct ilk_wm_values {
};
/*
- * This struct tracks the state needed for the Package C8+ feature.
- *
- * Package states C8 and deeper are really deep PC states that can only be
- * reached when all the devices on the system allow it, so even if the graphics
- * device allows PC8+, it doesn't mean the system will actually get to these
- * states.
- *
- * Our driver only allows PC8+ when all the outputs are disabled, the power well
- * is disabled and the GPU is idle. When these conditions are met, we manually
- * do the other conditions: disable the interrupts, clocks and switch LCPLL
- * refclk to Fclk.
- *
- * When we really reach PC8 or deeper states (not just when we allow it) we lose
- * the state of some registers, so when we come back from PC8+ we need to
- * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
- * need to take care of the registers kept by RC6.
+ * This struct helps tracking the state needed for runtime PM, which puts the
+ * device in PCI D3 state. Notice that when this happens, nothing on the
+ * graphics device works, even register access, so we don't get interrupts nor
+ * anything else.
*
- * The interrupt disabling is part of the requirements. We can only leave the
- * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
- * can lock the machine.
+ * Every piece of our code that needs to actually touch the hardware needs to
+ * either call intel_runtime_pm_get or call intel_display_power_get with the
+ * appropriate power domain.
*
- * Ideally every piece of our code that needs PC8+ disabled would call
- * hsw_disable_package_c8, which would increment disable_count and prevent the
- * system from reaching PC8+. But we don't have a symmetric way to do this for
- * everything, so we have the requirements_met and gpu_idle variables. When we
- * switch requirements_met or gpu_idle to true we decrease disable_count, and
- * increase it in the opposite case. The requirements_met variable is true when
- * all the CRTCs, encoders and the power well are disabled. The gpu_idle
- * variable is true when the GPU is idle.
- *
- * In addition to everything, we only actually enable PC8+ if disable_count
- * stays at zero for at least some seconds. This is implemented with the
- * enable_work variable. We do this so we don't enable/disable PC8 dozens of
- * consecutive times when all screens are disabled and some background app
- * queries the state of our connectors, or we have some application constantly
- * waking up to use the GPU. Only after the enable_work function actually
- * enables PC8+ the "enable" variable will become true, which means that it can
- * be false even if disable_count is 0.
+ * Our driver uses the autosuspend delay feature, which means we'll only really
+ * suspend if we stay with zero refcount for a certain amount of time. The
+ * default value is currently very conservative (see intel_init_runtime_pm), but
+ * it can be changed with the standard runtime PM files from sysfs.
*
* The irqs_disabled variable becomes true exactly after we disable the IRQs and
* goes back to false exactly before we reenable the IRQs. We use this variable
@@ -1273,17 +1369,11 @@ struct ilk_wm_values {
* inside struct regsave so when we restore the IRQs they will contain the
* latest expected values.
*
- * For more, read "Display Sequences for Package C8" on our documentation.
+ * For more, read the Documentation/power/runtime_pm.txt.
*/
-struct i915_package_c8 {
- bool requirements_met;
- bool gpu_idle;
+struct i915_runtime_pm {
+ bool suspended;
bool irqs_disabled;
- /* Only true after the delayed work task actually enables it. */
- bool enabled;
- int disable_count;
- struct mutex lock;
- struct delayed_work enable_work;
struct {
uint32_t deimr;
@@ -1294,10 +1384,6 @@ struct i915_package_c8 {
} regsave;
};
-struct i915_runtime_pm {
- bool suspended;
-};
-
enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1,
@@ -1332,7 +1418,7 @@ typedef struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
- const struct intel_device_info *info;
+ const struct intel_device_info info;
int relative_constants_mode;
@@ -1361,11 +1447,11 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
- atomic_t irq_received;
-
/* protects the irq masks */
spinlock_t irq_lock;
+ bool display_irqs_enabled;
+
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
struct pm_qos_request pm_qos;
@@ -1379,6 +1465,8 @@ typedef struct drm_i915_private {
};
u32 gt_irq_mask;
u32 pm_irq_mask;
+ u32 pm_rps_events;
+ u32 pipestat_irq_mask[I915_MAX_PIPES];
struct work_struct hotplug_work;
bool enable_hotplug_processing;
@@ -1394,8 +1482,6 @@ typedef struct drm_i915_private {
u32 hpd_event_bits;
struct timer_list hotplug_reenable_timer;
- int num_plane;
-
struct i915_fbc fbc;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
@@ -1445,8 +1531,8 @@ typedef struct drm_i915_private {
struct sdvo_device_mapping sdvo_mappings[2];
- struct drm_crtc *plane_to_crtc_mapping[3];
- struct drm_crtc *pipe_to_crtc_mapping[3];
+ struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
+ struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
wait_queue_head_t pending_flip_queue;
#ifdef CONFIG_DEBUG_FS
@@ -1506,6 +1592,7 @@ typedef struct drm_i915_private {
u32 fdi_rx_config;
+ u32 suspend_count;
struct i915_suspend_saved_registers regfile;
struct {
@@ -1525,8 +1612,6 @@ typedef struct drm_i915_private {
struct ilk_wm_values hw;
} wm;
- struct i915_package_c8 pc8;
-
struct i915_runtime_pm pm;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
@@ -1627,18 +1712,6 @@ struct drm_i915_gem_object {
*/
unsigned int fence_dirty:1;
- /** How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, pin_ioctl
- * (via user_pin_count), execbuffer (objects are not allowed multiple
- * times for the same batchbuffer), and the framebuffer code. When
- * switching/pageflipping, the framebuffer code has at most two buffers
- * pinned per crtc.
- *
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
- * bits with absolutely no headroom. So use 4 bits. */
- unsigned int pin_count:4;
-#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
-
/**
* Is the object at the current location in the gtt mappable and
* fenceable? Used to avoid costly recalculations.
@@ -1697,7 +1770,6 @@ struct drm_i915_gem_object {
/** for phy allocated objects */
struct drm_i915_gem_phys_object *phys_obj;
};
-#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -1743,6 +1815,7 @@ struct drm_i915_gem_request {
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
+ struct drm_file *file;
struct {
spinlock_t lock;
@@ -1751,11 +1824,95 @@ struct drm_i915_file_private {
} mm;
struct idr context_idr;
- struct i915_ctx_hang_stats hang_stats;
+ struct i915_hw_context *private_default_ctx;
atomic_t rps_wait_boost;
};
-#define INTEL_INFO(dev) (to_i915(dev)->info)
+/*
+ * A command that requires special handling by the command parser.
+ */
+struct drm_i915_cmd_descriptor {
+ /*
+ * Flags describing how the command parser processes the command.
+ *
+ * CMD_DESC_FIXED: The command has a fixed length if this is set,
+ * a length mask if not set
+ * CMD_DESC_SKIP: The command is allowed but does not follow the
+ * standard length encoding for the opcode range in
+ * which it falls
+ * CMD_DESC_REJECT: The command is never allowed
+ * CMD_DESC_REGISTER: The command should be checked against the
+ * register whitelist for the appropriate ring
+ * CMD_DESC_MASTER: The command is allowed if the submitting process
+ * is the DRM master
+ */
+ u32 flags;
+#define CMD_DESC_FIXED (1<<0)
+#define CMD_DESC_SKIP (1<<1)
+#define CMD_DESC_REJECT (1<<2)
+#define CMD_DESC_REGISTER (1<<3)
+#define CMD_DESC_BITMASK (1<<4)
+#define CMD_DESC_MASTER (1<<5)
+
+ /*
+ * The command's unique identification bits and the bitmask to get them.
+ * This isn't strictly the opcode field as defined in the spec and may
+ * also include type, subtype, and/or subop fields.
+ */
+ struct {
+ u32 value;
+ u32 mask;
+ } cmd;
+
+ /*
+ * The command's length. The command is either fixed length (i.e. does
+ * not include a length field) or has a length field mask. The flag
+ * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
+ * a length mask. All command entries in a command table must include
+ * length information.
+ */
+ union {
+ u32 fixed;
+ u32 mask;
+ } length;
+
+ /*
+ * Describes where to find a register address in the command to check
+ * against the ring's register whitelist. Only valid if flags has the
+ * CMD_DESC_REGISTER bit set.
+ */
+ struct {
+ u32 offset;
+ u32 mask;
+ } reg;
+
+#define MAX_CMD_DESC_BITMASKS 3
+ /*
+ * Describes command checks where a particular dword is masked and
+ * compared against an expected value. If the command does not match
+ * the expected value, the parser rejects it. Only valid if flags has
+ * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
+ * are valid.
+ */
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 expected;
+ } bits[MAX_CMD_DESC_BITMASKS];
+};
+
+/*
+ * A table of commands requiring special handling by the command parser.
+ *
+ * Each ring has an array of tables. Each table consists of an array of command
+ * descriptors, which must be sorted with command opcodes in ascending order.
+ */
+struct drm_i915_cmd_table {
+ const struct drm_i915_cmd_descriptor *table;
+ int count;
+};
+
+#define INTEL_INFO(dev) (&to_i915(dev)->info)
#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
@@ -1824,7 +1981,11 @@ struct drm_i915_file_private {
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev))
+#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \
+ && !IS_BROADWELL(dev))
+#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
+#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -1887,32 +2048,40 @@ struct drm_i915_file_private {
extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
-extern unsigned int i915_fbpercrtc __always_unused;
-extern int i915_panel_ignore_lid __read_mostly;
-extern unsigned int i915_powersave __read_mostly;
-extern int i915_semaphores __read_mostly;
-extern unsigned int i915_lvds_downclock __read_mostly;
-extern int i915_lvds_channel_mode __read_mostly;
-extern int i915_panel_use_ssc __read_mostly;
-extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern int i915_enable_rc6 __read_mostly;
-extern int i915_enable_fbc __read_mostly;
-extern bool i915_enable_hangcheck __read_mostly;
-extern int i915_enable_ppgtt __read_mostly;
-extern int i915_enable_psr __read_mostly;
-extern unsigned int i915_preliminary_hw_support __read_mostly;
-extern int i915_disable_power_well __read_mostly;
-extern int i915_enable_ips __read_mostly;
-extern bool i915_fastboot __read_mostly;
-extern int i915_enable_pc8 __read_mostly;
-extern int i915_pc8_timeout __read_mostly;
-extern bool i915_prefault_disable __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
+/* i915_params.c */
+struct i915_params {
+ int modeset;
+ int panel_ignore_lid;
+ unsigned int powersave;
+ int semaphores;
+ unsigned int lvds_downclock;
+ int lvds_channel_mode;
+ int panel_use_ssc;
+ int vbt_sdvo_panel_type;
+ int enable_rc6;
+ int enable_fbc;
+ int enable_ppgtt;
+ int enable_psr;
+ unsigned int preliminary_hw_support;
+ int disable_power_well;
+ int enable_ips;
+ int invert_brightness;
+ int enable_cmd_parser;
+ /* leave bools at the end to not create holes */
+ bool enable_hangcheck;
+ bool fastboot;
+ bool prefault_disable;
+ bool reset;
+ bool disable_display;
+};
+extern struct i915_params i915 __read_mostly;
+
/* i915_dma.c */
void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
@@ -1943,8 +2112,12 @@ extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
-void i915_handle_error(struct drm_device *dev, bool wedged);
+__printf(3, 4)
+void i915_handle_error(struct drm_device *dev, bool wedged,
+ const char *fmt, ...);
+void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
+ int new_delay);
extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
@@ -1955,10 +2128,15 @@ extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
+i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 status_mask);
void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
+i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 status_mask);
+
+void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
+void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -2014,22 +2192,27 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
+void i915_init_vm(struct drm_i915_private *dev_priv,
+ struct i915_address_space *vm);
void i915_gem_free_object(struct drm_gem_object *obj);
void i915_gem_vma_destroy(struct i915_vma *vma);
+#define PIN_MAPPABLE 0x1
+#define PIN_NONBLOCK 0x2
+#define PIN_GLOBAL 0x4
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
- bool map_and_fenceable,
- bool nonblocking);
-void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+ unsigned flags);
int __must_check i915_vma_unbind(struct i915_vma *vma);
-int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+ int *needs_clflush);
+
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
@@ -2096,8 +2279,10 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
}
}
+struct drm_i915_gem_request *
+i915_gem_find_active_request(struct intel_ring_buffer *ring);
+
bool i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
@@ -2186,6 +2371,13 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
+static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->pin_count > 0)
+ return true;
+ return false;
+}
/* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \
@@ -2217,54 +2409,69 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
- bool map_and_fenceable,
- bool nonblocking)
+ unsigned flags)
{
- return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
- map_and_fenceable, nonblocking);
+ return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
}
+static inline int
+i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
+{
+ return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
+}
+
+void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
+
/* i915_gem_context.c */
+#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
+void i915_gem_context_reset(struct drm_device *dev);
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
+int i915_gem_context_enable(struct drm_i915_private *dev_priv);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_ring_buffer *ring,
- struct drm_file *file, int to_id);
+ struct drm_file *file, struct i915_hw_context *to);
+struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
{
- kref_get(&ctx->ref);
+ if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
+ kref_get(&ctx->ref);
}
static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
{
- kref_put(&ctx->ref, i915_gem_context_free);
+ if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
+ kref_put(&ctx->ref, i915_gem_context_free);
+}
+
+static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
+{
+ return c->id == DEFAULT_CONTEXT_ID;
}
-struct i915_ctx_hang_stats * __must_check
-i915_gem_context_get_hang_stats(struct drm_device *dev,
- struct drm_file *file,
- u32 id);
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-/* i915_gem_gtt.c */
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj);
+/* i915_gem_evict.c */
+int __must_check i915_gem_evict_something(struct drm_device *dev,
+ struct i915_address_space *vm,
+ int min_size,
+ unsigned alignment,
+ unsigned cache_level,
+ unsigned flags);
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
+int i915_gem_evict_everything(struct drm_device *dev);
+/* i915_gem_gtt.c */
void i915_check_and_clear_faults(struct drm_device *dev);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
@@ -2275,18 +2482,8 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
intel_gtt_chipset_flush();
}
-
-
-/* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev,
- struct i915_address_space *vm,
- int min_size,
- unsigned alignment,
- unsigned cache_level,
- bool mappable,
- bool nonblock);
-int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
-int i915_gem_evict_everything(struct drm_device *dev);
+int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
+bool intel_enable_ppgtt(struct drm_device *dev, bool full);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
@@ -2305,7 +2502,7 @@ void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj->tiling_mode != I915_TILING_NONE;
@@ -2343,7 +2540,8 @@ static inline void i915_error_state_buf_release(
{
kfree(eb->buf);
}
-void i915_capture_error_state(struct drm_device *dev);
+void i915_capture_error_state(struct drm_device *dev, bool wedge,
+ const char *error_msg);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
@@ -2352,6 +2550,14 @@ void i915_destroy_error_state(struct drm_device *dev);
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(int type);
+/* i915_cmd_parser.c */
+void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
+bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
+int i915_parse_cmds(struct intel_ring_buffer *ring,
+ struct drm_i915_gem_object *batch_obj,
+ u32 batch_start_offset,
+ bool is_master);
+
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
@@ -2425,10 +2631,12 @@ extern void intel_modeset_suspend_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
+extern void intel_connector_unregister(struct intel_connector *);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern void i915_redisable_vga(struct drm_device *dev);
+extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -2463,6 +2671,7 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
+void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2525,9 +2734,26 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
+/* Be very careful with read/write 64-bit values. On 32-bit machines, they
+ * will be implemented using 2 32-bit writes in an arbitrary order with
+ * an arbitrary delay between them. This can cause the hardware to
+ * act upon the intermediate value, possibly leading to corruption and
+ * machine death. You have been warned.
+ */
#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
+#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
+ u32 upper = I915_READ(upper_reg); \
+ u32 lower = I915_READ(lower_reg); \
+ u32 tmp = I915_READ(upper_reg); \
+ if (upper != tmp) { \
+ upper = tmp; \
+ lower = I915_READ(lower_reg); \
+ WARN_ON(I915_READ(upper_reg) != upper); \
+ } \
+ (u64)upper << 32 | lower; })
+
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
@@ -2566,4 +2792,31 @@ timespec_to_jiffies_timeout(const struct timespec *value)
return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
}
+/*
+ * If you need to wait X milliseconds between events A and B, but event B
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+ * when event A happened, then just before event B you call this function and
+ * pass the timestamp as the first argument, and X as the second argument.
+ */
+static inline void
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+{
+ unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+
+ /*
+ * Don't re-read the value of "jiffies" every time since it may change
+ * behind our back and break the math.
+ */
+ tmp_jiffies = jiffies;
+ target_jiffies = timestamp_jiffies +
+ msecs_to_jiffies_timeout(to_wait_ms);
+
+ if (time_after(target_jiffies, tmp_jiffies)) {
+ remaining_jiffies = target_jiffies - tmp_jiffies;
+ while (remaining_jiffies)
+ remaining_jiffies =
+ schedule_timeout_uninterruptible(remaining_jiffies);
+ }
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 00c836154725..6370a761d137 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,12 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
-static __must_check int
-i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- unsigned alignment,
- bool map_and_fenceable,
- bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -67,6 +61,7 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
@@ -204,7 +199,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- if (obj->pin_count)
+ if (i915_gem_obj_is_pinned(obj))
pinned += i915_gem_obj_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex);
@@ -332,6 +327,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
return 0;
}
+/*
+ * Pins the specified object's pages and synchronizes the object with
+ * GPU accesses. Sets needs_clflush to non-zero if the caller should
+ * flush the object from the CPU cache.
+ */
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+ int *needs_clflush)
+{
+ int ret;
+
+ *needs_clflush = 0;
+
+ if (!obj->base.filp)
+ return -EINVAL;
+
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+ /* If we're not in the cpu read domain, set ourself into the gtt
+ * read domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will dirty the data
+ * anyway again before the next pread happens. */
+ *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
+ obj->cache_level);
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
+ return ret;
+}
+
/* Per-page copy function for the shmem pread fastpath.
* Flushes invalid cachelines before reading the target if
* needs_clflush is set. */
@@ -429,23 +460,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
- /* If we're not in the cpu read domain, set ourself into the gtt
- * read domain and manually flush cachelines (if required). This
- * optimizes for the case when the gpu will dirty the data
- * anyway again before the next pread happens. */
- needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
- }
-
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
offset = args->offset;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
@@ -476,7 +494,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
mutex_unlock(&dev->struct_mutex);
- if (likely(!i915_prefault_disable) && !prefaulted) {
+ if (likely(!i915.prefault_disable) && !prefaulted) {
ret = fault_in_multipages_writeable(user_data, remain);
/* Userspace is tricking us, but we've already clobbered
* its pages with the prefault and promised to write the
@@ -492,12 +510,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
mutex_lock(&dev->struct_mutex);
-next_page:
- mark_page_accessed(page);
-
if (ret)
goto out;
+next_page:
remain -= page_length;
user_data += page_length;
offset += page_length;
@@ -599,13 +615,13 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
int page_offset, page_length, ret;
- ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
if (ret)
goto out;
@@ -651,7 +667,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
}
out_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
out:
return ret;
}
@@ -677,9 +693,8 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
if (needs_clflush_before)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
- ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
- user_data,
- page_length);
+ ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
+ user_data, page_length);
if (needs_clflush_after)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
@@ -813,13 +828,10 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
mutex_lock(&dev->struct_mutex);
-next_page:
- set_page_dirty(page);
- mark_page_accessed(page);
-
if (ret)
goto out;
+next_page:
remain -= page_length;
user_data += page_length;
offset += page_length;
@@ -868,7 +880,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- if (likely(!i915_prefault_disable)) {
+ if (likely(!i915.prefault_disable)) {
ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
args->size);
if (ret)
@@ -1014,7 +1026,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
struct timespec *timeout,
struct drm_i915_file_private *file_priv)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
struct timespec before, now;
@@ -1022,14 +1035,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
unsigned long timeout_expire;
int ret;
- WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
+ WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
- if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
+ if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
if (file_priv)
mod_delayed_work(dev_priv->wq,
@@ -1184,7 +1197,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
*/
static __must_check int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
- struct drm_file *file,
+ struct drm_i915_file_private *file_priv,
bool readonly)
{
struct drm_device *dev = obj->base.dev;
@@ -1211,7 +1224,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
@@ -1260,7 +1273,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
- ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
+ ret = i915_gem_object_wait_rendering__nonblocking(obj,
+ file->driver_priv,
+ !write_domain);
if (ret)
goto unref;
@@ -1374,7 +1389,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
@@ -1392,6 +1407,15 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
+ /* Try to flush the object off the GPU first without holding the lock.
+ * Upon reacquiring the lock, we will perform our sanity checks and then
+ * repeat the flush holding the lock in the normal manner to catch cases
+ * where we are gazumped.
+ */
+ ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
+ if (ret)
+ goto unlock;
+
/* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
ret = -EINVAL;
@@ -1399,7 +1423,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* Now bind it into the GTT if needed */
- ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
if (ret)
goto unlock;
@@ -1420,7 +1444,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
out:
@@ -1453,6 +1477,7 @@ out:
ret = VM_FAULT_OOM;
break;
case -ENOSPC:
+ case -EFAULT:
ret = VM_FAULT_SIGBUS;
break;
default:
@@ -1501,7 +1526,8 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (!obj->fault_mappable)
return;
- drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
obj->fault_mappable = false;
}
@@ -1617,8 +1643,8 @@ i915_gem_mmap_gtt(struct drm_file *file,
}
if (obj->madv != I915_MADV_WILLNEED) {
- DRM_ERROR("Attempting to mmap a purgeable buffer\n");
- ret = -EINVAL;
+ DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
+ ret = -EFAULT;
goto out;
}
@@ -1971,8 +1997,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
if (obj->madv != I915_MADV_WILLNEED) {
- DRM_ERROR("Attempting to obtain a purgeable object\n");
- return -EINVAL;
+ DRM_DEBUG("Attempting to obtain a purgeable object\n");
+ return -EFAULT;
}
BUG_ON(obj->pages_pin_count);
@@ -2035,13 +2061,17 @@ static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
- struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (vma && !list_empty(&vma->mm_list))
+ list_move_tail(&vma->mm_list, &vm->inactive_list);
+ }
list_del_init(&obj->ring_list);
obj->ring = NULL;
@@ -2134,10 +2164,9 @@ int __i915_add_request(struct intel_ring_buffer *ring,
struct drm_i915_gem_object *obj,
u32 *out_seqno)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
u32 request_ring_position, request_start;
- int was_empty;
int ret;
request_start = intel_ring_get_tail(ring);
@@ -2188,7 +2217,6 @@ int __i915_add_request(struct intel_ring_buffer *ring,
i915_gem_context_reference(request->ctx);
request->emitted_jiffies = jiffies;
- was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
request->file_priv = NULL;
@@ -2209,13 +2237,11 @@ int __i915_add_request(struct intel_ring_buffer *ring,
if (!dev_priv->ums.mm_suspended) {
i915_queue_hangcheck(ring->dev);
- if (was_empty) {
- cancel_delayed_work_sync(&dev_priv->mm.idle_work);
- queue_delayed_work(dev_priv->wq,
- &dev_priv->mm.retire_work,
- round_jiffies_up_relative(HZ));
- intel_mark_busy(dev_priv->dev);
- }
+ cancel_delayed_work_sync(&dev_priv->mm.idle_work);
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
+ intel_mark_busy(dev_priv->dev);
}
if (out_seqno)
@@ -2237,125 +2263,46 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
+ const struct i915_hw_context *ctx)
{
- if (acthd >= i915_gem_obj_offset(obj, vm) &&
- acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
- return true;
+ unsigned long elapsed;
- return false;
-}
+ elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
-static bool i915_head_inside_request(const u32 acthd_unmasked,
- const u32 request_start,
- const u32 request_end)
-{
- const u32 acthd = acthd_unmasked & HEAD_ADDR;
+ if (ctx->hang_stats.banned)
+ return true;
- if (request_start < request_end) {
- if (acthd >= request_start && acthd < request_end)
- return true;
- } else if (request_start > request_end) {
- if (acthd >= request_start || acthd < request_end)
+ if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
+ if (!i915_gem_context_is_default(ctx)) {
+ DRM_DEBUG("context hanging too fast, banning!\n");
return true;
- }
-
- return false;
-}
-
-static struct i915_address_space *
-request_to_vm(struct drm_i915_gem_request *request)
-{
- struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
- struct i915_address_space *vm;
-
- vm = &dev_priv->gtt.base;
-
- return vm;
-}
-
-static bool i915_request_guilty(struct drm_i915_gem_request *request,
- const u32 acthd, bool *inside)
-{
- /* There is a possibility that unmasked head address
- * pointing inside the ring, matches the batch_obj address range.
- * However this is extremely unlikely.
- */
- if (request->batch_obj) {
- if (i915_head_inside_object(acthd, request->batch_obj,
- request_to_vm(request))) {
- *inside = true;
+ } else if (dev_priv->gpu_error.stop_rings == 0) {
+ DRM_ERROR("gpu hanging too fast, banning!\n");
return true;
}
}
- if (i915_head_inside_request(acthd, request->head, request->tail)) {
- *inside = false;
- return true;
- }
-
return false;
}
-static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
+static void i915_set_reset_status(struct drm_i915_private *dev_priv,
+ struct i915_hw_context *ctx,
+ const bool guilty)
{
- const unsigned long elapsed = get_seconds() - hs->guilty_ts;
-
- if (hs->banned)
- return true;
-
- if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
- DRM_ERROR("context hanging too fast, declaring banned!\n");
- return true;
- }
-
- return false;
-}
+ struct i915_ctx_hang_stats *hs;
-static void i915_set_reset_status(struct intel_ring_buffer *ring,
- struct drm_i915_gem_request *request,
- u32 acthd)
-{
- struct i915_ctx_hang_stats *hs = NULL;
- bool inside, guilty;
- unsigned long offset = 0;
-
- /* Innocent until proven guilty */
- guilty = false;
-
- if (request->batch_obj)
- offset = i915_gem_obj_offset(request->batch_obj,
- request_to_vm(request));
+ if (WARN_ON(!ctx))
+ return;
- if (ring->hangcheck.action != HANGCHECK_WAIT &&
- i915_request_guilty(request, acthd, &inside)) {
- DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
- ring->name,
- inside ? "inside" : "flushing",
- offset,
- request->ctx ? request->ctx->id : 0,
- acthd);
+ hs = &ctx->hang_stats;
- guilty = true;
- }
-
- /* If contexts are disabled or this is the default context, use
- * file_priv->reset_state
- */
- if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
- hs = &request->ctx->hang_stats;
- else if (request->file_priv)
- hs = &request->file_priv->hang_stats;
-
- if (hs) {
- if (guilty) {
- hs->banned = i915_context_is_banned(hs);
- hs->batch_active++;
- hs->guilty_ts = get_seconds();
- } else {
- hs->batch_pending++;
- }
+ if (guilty) {
+ hs->banned = i915_context_is_banned(dev_priv, ctx);
+ hs->batch_active++;
+ hs->guilty_ts = get_seconds();
+ } else {
+ hs->batch_pending++;
}
}
@@ -2370,19 +2317,41 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
kfree(request);
}
-static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+struct drm_i915_gem_request *
+i915_gem_find_active_request(struct intel_ring_buffer *ring)
{
- u32 completed_seqno = ring->get_seqno(ring, false);
- u32 acthd = intel_ring_get_active_head(ring);
struct drm_i915_gem_request *request;
+ u32 completed_seqno;
+
+ completed_seqno = ring->get_seqno(ring, false);
list_for_each_entry(request, &ring->request_list, list) {
if (i915_seqno_passed(completed_seqno, request->seqno))
continue;
- i915_set_reset_status(ring, request, acthd);
+ return request;
}
+
+ return NULL;
+}
+
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_request *request;
+ bool ring_hung;
+
+ request = i915_gem_find_active_request(ring);
+
+ if (request == NULL)
+ return;
+
+ ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+
+ i915_set_reset_status(dev_priv, request->ctx, ring_hung);
+
+ list_for_each_entry_continue(request, &ring->request_list, list)
+ i915_set_reset_status(dev_priv, request->ctx, false);
}
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
@@ -2456,13 +2425,15 @@ void i915_gem_reset(struct drm_device *dev)
i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_context_reset(dev);
+
i915_gem_restore_fences(dev);
}
/**
* This function clears the request list as sequence numbers are passed.
*/
-void
+static void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
@@ -2474,6 +2445,24 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
seqno = ring->get_seqno(ring, true);
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate,
+ * before we free the context associated with the requests.
+ */
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+ break;
+
+ i915_gem_object_move_to_inactive(obj);
+ }
+
+
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -2495,22 +2484,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
i915_gem_free_request(request);
}
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate.
- */
- while (!list_empty(&ring->active_list)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
-
- if (!i915_seqno_passed(seqno, obj->last_read_seqno))
- break;
-
- i915_gem_object_move_to_inactive(obj);
- }
-
if (unlikely(ring->trace_irq_seqno &&
i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
ring->irq_put(ring);
@@ -2523,7 +2496,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
bool
i915_gem_retire_requests(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
bool idle = true;
int i;
@@ -2615,7 +2588,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring = NULL;
@@ -2750,22 +2723,18 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
int i915_vma_unbind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- /* For now we only ever use 1 vma per object */
- WARN_ON(!list_is_singular(&obj->vma_list));
-
if (list_empty(&vma->vma_link))
return 0;
if (!drm_mm_node_allocated(&vma->node)) {
i915_gem_vma_destroy(vma);
-
return 0;
}
- if (obj->pin_count)
+ if (vma->pin_count)
return -EBUSY;
BUG_ON(obj->pages == NULL);
@@ -2787,15 +2756,11 @@ int i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma);
- if (obj->has_global_gtt_mapping)
- i915_gem_gtt_unbind_object(obj);
- if (obj->has_aliasing_ppgtt_mapping) {
- i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
- obj->has_aliasing_ppgtt_mapping = 0;
- }
+ vma->unbind_vma(vma);
+
i915_gem_gtt_finish_object(obj);
- list_del(&vma->mm_list);
+ list_del_init(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
if (i915_is_ggtt(vma->vm))
obj->map_and_fenceable = true;
@@ -2817,35 +2782,15 @@ int i915_vma_unbind(struct i915_vma *vma)
return 0;
}
-/**
- * Unbinds an object from the global GTT aperture.
- */
-int
-i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct i915_address_space *ggtt = &dev_priv->gtt.base;
-
- if (!i915_gem_obj_ggtt_bound(obj))
- return 0;
-
- if (obj->pin_count)
- return -EBUSY;
-
- BUG_ON(obj->pages == NULL);
-
- return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
-}
-
int i915_gpu_idle(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int ret, i;
/* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) {
- ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+ ret = i915_switch_context(ring, NULL, ring->default_context);
if (ret)
return ret;
@@ -2860,7 +2805,7 @@ int i915_gpu_idle(struct drm_device *dev)
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int fence_reg;
int fence_pitch_shift;
@@ -2912,7 +2857,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
static void i915_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
if (obj) {
@@ -2956,7 +2901,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
static void i830_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
if (obj) {
@@ -3259,18 +3204,17 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
/**
* Finds free space in the GTT aperture and binds the object there.
*/
-static int
+static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
- bool map_and_fenceable,
- bool nonblocking)
+ unsigned flags)
{
struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 size, fence_size, fence_alignment, unfenced_alignment;
size_t gtt_max =
- map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
+ flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
int ret;
@@ -3282,57 +3226,49 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
obj->tiling_mode, true);
unfenced_alignment =
i915_gem_get_gtt_alignment(dev,
- obj->base.size,
- obj->tiling_mode, false);
+ obj->base.size,
+ obj->tiling_mode, false);
if (alignment == 0)
- alignment = map_and_fenceable ? fence_alignment :
+ alignment = flags & PIN_MAPPABLE ? fence_alignment :
unfenced_alignment;
- if (map_and_fenceable && alignment & (fence_alignment - 1)) {
- DRM_ERROR("Invalid object alignment requested %u\n", alignment);
- return -EINVAL;
+ if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
+ DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
+ return ERR_PTR(-EINVAL);
}
- size = map_and_fenceable ? fence_size : obj->base.size;
+ size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
if (obj->base.size > gtt_max) {
- DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
obj->base.size,
- map_and_fenceable ? "mappable" : "total",
+ flags & PIN_MAPPABLE ? "mappable" : "total",
gtt_max);
- return -E2BIG;
+ return ERR_PTR(-E2BIG);
}
ret = i915_gem_object_get_pages(obj);
if (ret)
- return ret;
+ return ERR_PTR(ret);
i915_gem_object_pin_pages(obj);
- BUG_ON(!i915_is_ggtt(vm));
-
vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
+ if (IS_ERR(vma))
goto err_unpin;
- }
-
- /* For now we only ever use 1 vma per object */
- WARN_ON(!list_is_singular(&obj->vma_list));
search_free:
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
obj->cache_level, 0, gtt_max,
- DRM_MM_SEARCH_DEFAULT);
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
if (ret) {
ret = i915_gem_evict_something(dev, vm, size, alignment,
- obj->cache_level,
- map_and_fenceable,
- nonblocking);
+ obj->cache_level, flags);
if (ret == 0)
goto search_free;
@@ -3363,19 +3299,23 @@ search_free:
obj->map_and_fenceable = mappable && fenceable;
}
- WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
+ WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+
+ trace_i915_vma_bind(vma, flags);
+ vma->bind_vma(vma, obj->cache_level,
+ flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
- trace_i915_vma_bind(vma, map_and_fenceable);
i915_gem_verify_gtt(dev);
- return 0;
+ return vma;
err_remove_node:
drm_mm_remove_node(&vma->node);
err_free_vma:
i915_gem_vma_destroy(vma);
+ vma = ERR_PTR(ret);
err_unpin:
i915_gem_object_unpin_pages(obj);
- return ret;
+ return vma;
}
bool
@@ -3470,7 +3410,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
- drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -3528,25 +3468,22 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct i915_vma *vma;
+ struct i915_vma *vma, *next;
int ret;
if (obj->cache_level == cache_level)
return 0;
- if (obj->pin_count) {
+ if (i915_gem_obj_is_pinned(obj)) {
DRM_DEBUG("can not change the cache level of pinned objects\n");
return -EBUSY;
}
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
ret = i915_vma_unbind(vma);
if (ret)
return ret;
-
- break;
}
}
@@ -3567,11 +3504,10 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return ret;
}
- if (obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(obj, cache_level);
- if (obj->has_aliasing_ppgtt_mapping)
- i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
- obj, cache_level);
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
+ vma->bind_vma(vma, cache_level,
+ obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3695,7 +3631,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
* subtracting the potential reference by the user, any pin_count
* remains, it must be due to another use by the display engine.
*/
- return obj->pin_count - !!obj->user_pin_count;
+ return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
}
/*
@@ -3740,7 +3676,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
if (ret)
goto err_unpin_display;
@@ -3769,7 +3705,7 @@ err_unpin_display:
void
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
{
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
obj->pin_display = is_pin_display(obj);
}
@@ -3896,65 +3832,63 @@ int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
- bool map_and_fenceable,
- bool nonblocking)
+ unsigned flags)
{
struct i915_vma *vma;
int ret;
- if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
- return -EBUSY;
-
- WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
+ if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
+ return -EINVAL;
vma = i915_gem_obj_to_vma(obj, vm);
-
if (vma) {
+ if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+ return -EBUSY;
+
if ((alignment &&
vma->node.start & (alignment - 1)) ||
- (map_and_fenceable && !obj->map_and_fenceable)) {
- WARN(obj->pin_count,
+ (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
+ WARN(vma->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
i915_gem_obj_offset(obj, vm), alignment,
- map_and_fenceable,
+ flags & PIN_MAPPABLE,
obj->map_and_fenceable);
ret = i915_vma_unbind(vma);
if (ret)
return ret;
+
+ vma = NULL;
}
}
- if (!i915_gem_obj_bound(obj, vm)) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
- ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
- map_and_fenceable,
- nonblocking);
- if (ret)
- return ret;
-
- if (!dev_priv->mm.aliasing_ppgtt)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
+ vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
}
- if (!obj->has_global_gtt_mapping && map_and_fenceable)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
+ vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
- obj->pin_count++;
- obj->pin_mappable |= map_and_fenceable;
+ vma->pin_count++;
+ if (flags & PIN_MAPPABLE)
+ obj->pin_mappable |= true;
return 0;
}
void
-i915_gem_object_unpin(struct drm_i915_gem_object *obj)
+i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
{
- BUG_ON(obj->pin_count == 0);
- BUG_ON(!i915_gem_obj_bound_any(obj));
+ struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
- if (--obj->pin_count == 0)
+ BUG_ON(!vma);
+ BUG_ON(vma->pin_count == 0);
+ BUG_ON(!i915_gem_obj_ggtt_bound(obj));
+
+ if (--vma->pin_count == 0)
obj->pin_mappable = false;
}
@@ -3966,6 +3900,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret;
+ if (INTEL_INFO(dev)->gen >= 6)
+ return -ENODEV;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -3977,13 +3914,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
}
if (obj->madv != I915_MADV_WILLNEED) {
- DRM_ERROR("Attempting to pin a purgeable buffer\n");
- ret = -EINVAL;
+ DRM_DEBUG("Attempting to pin a purgeable buffer\n");
+ ret = -EFAULT;
goto out;
}
if (obj->pin_filp != NULL && obj->pin_filp != file) {
- DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+ DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
@@ -3995,7 +3932,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
}
if (obj->user_pin_count == 0) {
- ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
if (ret)
goto out;
}
@@ -4030,7 +3967,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
}
if (obj->pin_filp != file) {
- DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+ DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
@@ -4038,7 +3975,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
obj->user_pin_count--;
if (obj->user_pin_count == 0) {
obj->pin_filp = NULL;
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
}
out:
@@ -4118,7 +4055,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
goto unlock;
}
- if (obj->pin_count) {
+ if (i915_gem_obj_is_pinned(obj)) {
ret = -EINVAL;
goto out;
}
@@ -4219,7 +4156,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_vma *vma, *next;
intel_runtime_pm_get(dev_priv);
@@ -4229,12 +4166,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
- obj->pin_count = 0;
- /* NB: 0 or 1 elements */
- WARN_ON(!list_empty(&obj->vma_list) &&
- !list_is_singular(&obj->vma_list));
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
- int ret = i915_vma_unbind(vma);
+ int ret;
+
+ vma->pin_count = 0;
+ ret = i915_vma_unbind(vma);
if (WARN_ON(ret == -ERESTARTSYS)) {
bool was_interruptible;
@@ -4283,41 +4219,6 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
return NULL;
}
-static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- if (vma == NULL)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&vma->vma_link);
- INIT_LIST_HEAD(&vma->mm_list);
- INIT_LIST_HEAD(&vma->exec_list);
- vma->vm = vm;
- vma->obj = obj;
-
- /* Keep GGTT vmas first to make debug easier */
- if (i915_is_ggtt(vm))
- list_add(&vma->vma_link, &obj->vma_list);
- else
- list_add_tail(&vma->vma_link, &obj->vma_list);
-
- return vma;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- struct i915_vma *vma;
-
- vma = i915_gem_obj_to_vma(obj, vm);
- if (!vma)
- vma = __i915_gem_vma_create(obj, vm);
-
- return vma;
-}
-
void i915_gem_vma_destroy(struct i915_vma *vma)
{
WARN_ON(vma->node.allocated);
@@ -4334,7 +4235,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
int
i915_gem_suspend(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
mutex_lock(&dev->struct_mutex);
@@ -4376,7 +4277,7 @@ err:
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
int i, ret;
@@ -4406,7 +4307,7 @@ int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
void i915_gem_init_swizzling(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
@@ -4494,7 +4395,7 @@ cleanup_render_ring:
int
i915_gem_init_hw(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
@@ -4508,9 +4409,15 @@ i915_gem_init_hw(struct drm_device *dev)
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
if (HAS_PCH_NOP(dev)) {
- u32 temp = I915_READ(GEN7_MSG_CTL);
- temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
- I915_WRITE(GEN7_MSG_CTL, temp);
+ if (IS_IVYBRIDGE(dev)) {
+ u32 temp = I915_READ(GEN7_MSG_CTL);
+ temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
+ I915_WRITE(GEN7_MSG_CTL, temp);
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
+ temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
+ I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
+ }
}
i915_gem_init_swizzling(dev);
@@ -4523,25 +4430,23 @@ i915_gem_init_hw(struct drm_device *dev)
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
/*
- * XXX: There was some w/a described somewhere suggesting loading
- * contexts before PPGTT.
+ * XXX: Contexts should only be initialized once. Doing a switch to the
+ * default context switch however is something we'd like to do after
+ * reset or thaw (the latter may not actually be necessary for HW, but
+ * goes with our code better). Context switching requires rings (for
+ * the do_switch), but before enabling PPGTT. So don't move this.
*/
- ret = i915_gem_context_init(dev);
+ ret = i915_gem_context_enable(dev_priv);
if (ret) {
- i915_gem_cleanup_ringbuffer(dev);
- DRM_ERROR("Context initialization failed %d\n", ret);
- return ret;
- }
-
- if (dev_priv->mm.aliasing_ppgtt) {
- ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
- if (ret) {
- i915_gem_cleanup_aliasing_ppgtt(dev);
- DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
- }
+ DRM_ERROR("Context enable failed %d\n", ret);
+ goto err_out;
}
return 0;
+
+err_out:
+ i915_gem_cleanup_ringbuffer(dev);
+ return ret;
}
int i915_gem_init(struct drm_device *dev)
@@ -4560,10 +4465,18 @@ int i915_gem_init(struct drm_device *dev)
i915_gem_init_global_gtt(dev);
+ ret = i915_gem_context_init(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (ret) {
- i915_gem_cleanup_aliasing_ppgtt(dev);
+ WARN_ON(dev_priv->mm.aliasing_ppgtt);
+ i915_gem_context_fini(dev);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
return ret;
}
@@ -4576,7 +4489,7 @@ int i915_gem_init(struct drm_device *dev)
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i;
@@ -4658,20 +4571,22 @@ init_ring_lists(struct intel_ring_buffer *ring)
INIT_LIST_HEAD(&ring->request_list);
}
-static void i915_init_vm(struct drm_i915_private *dev_priv,
- struct i915_address_space *vm)
+void i915_init_vm(struct drm_i915_private *dev_priv,
+ struct i915_address_space *vm)
{
+ if (!i915_is_ggtt(vm))
+ drm_mm_init(&vm->mm, vm->start, vm->total);
vm->dev = dev_priv->dev;
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
INIT_LIST_HEAD(&vm->global_link);
- list_add(&vm->global_link, &dev_priv->vm_list);
+ list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
void
i915_gem_load(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int i;
dev_priv->slab =
@@ -4738,7 +4653,7 @@ i915_gem_load(struct drm_device *dev)
static int i915_gem_init_phys_object(struct drm_device *dev,
int id, int size, int align)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
int ret;
@@ -4770,7 +4685,7 @@ kfree_obj:
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
if (!dev_priv->mm.phys_objs[id - 1])
@@ -4837,7 +4752,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
int align)
{
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
int page_count;
int i;
@@ -4950,6 +4865,7 @@ i915_gem_file_idle_work_handler(struct work_struct *work)
int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
+ int ret;
DRM_DEBUG_DRIVER("\n");
@@ -4959,15 +4875,18 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = file_priv;
file_priv->dev_priv = dev->dev_private;
+ file_priv->file = file;
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
INIT_DELAYED_WORK(&file_priv->mm.idle_work,
i915_gem_file_idle_work_handler);
- idr_init(&file_priv->context_idr);
+ ret = i915_gem_context_open(dev, file);
+ if (ret)
+ kfree(file_priv);
- return 0;
+ return ret;
}
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
@@ -5014,7 +4933,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
if (obj->active)
continue;
- if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+ if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
count += obj->base.size >> PAGE_SHIFT;
}
@@ -5031,7 +4950,8 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+ if (!dev_priv->mm.aliasing_ppgtt ||
+ vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
BUG_ON(list_empty(&o->vma_list));
@@ -5072,7 +4992,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+ if (!dev_priv->mm.aliasing_ppgtt ||
+ vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
BUG_ON(list_empty(&o->vma_list));
@@ -5127,7 +5048,7 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
return NULL;
vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
- if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
+ if (vma->vm != obj_to_ggtt(obj))
return NULL;
return vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e08acaba5402..6043062ffce7 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -93,11 +93,63 @@
* I've seen in a spec to date, and that was a workaround for a non-shipping
* part. It should be safe to decrease this, but it's more future proof as is.
*/
-#define CONTEXT_ALIGN (64<<10)
+#define GEN6_CONTEXT_ALIGN (64<<10)
+#define GEN7_CONTEXT_ALIGN 4096
-static struct i915_hw_context *
-i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
-static int do_switch(struct i915_hw_context *to);
+static int do_switch(struct intel_ring_buffer *ring,
+ struct i915_hw_context *to);
+
+static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &ppgtt->base;
+
+ if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
+ (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
+ ppgtt->base.cleanup(&ppgtt->base);
+ return;
+ }
+
+ /*
+ * Make sure vmas are unbound before we take down the drm_mm
+ *
+ * FIXME: Proper refcounting should take care of this, this shouldn't be
+ * needed at all.
+ */
+ if (!list_empty(&vm->active_list)) {
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &vm->active_list, mm_list)
+ if (WARN_ON(list_empty(&vma->vma_link) ||
+ list_is_singular(&vma->vma_link)))
+ break;
+
+ i915_gem_evict_vm(&ppgtt->base, true);
+ } else {
+ i915_gem_retire_requests(dev);
+ i915_gem_evict_vm(&ppgtt->base, false);
+ }
+
+ ppgtt->base.cleanup(&ppgtt->base);
+}
+
+static void ppgtt_release(struct kref *kref)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(kref, struct i915_hw_ppgtt, ref);
+
+ do_ppgtt_cleanup(ppgtt);
+ kfree(ppgtt);
+}
+
+static size_t get_context_alignment(struct drm_device *dev)
+{
+ if (IS_GEN6(dev))
+ return GEN6_CONTEXT_ALIGN;
+
+ return GEN7_CONTEXT_ALIGN;
+}
static int get_context_size(struct drm_device *dev)
{
@@ -131,14 +183,44 @@ void i915_gem_context_free(struct kref *ctx_ref)
{
struct i915_hw_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
+ struct i915_hw_ppgtt *ppgtt = NULL;
- list_del(&ctx->link);
+ /* We refcount even the aliasing PPGTT to keep the code symmetric */
+ if (USES_PPGTT(ctx->obj->base.dev))
+ ppgtt = ctx_to_ppgtt(ctx);
+
+ /* XXX: Free up the object before tearing down the address space, in
+ * case we're bound in the PPGTT */
drm_gem_object_unreference(&ctx->obj->base);
+
+ if (ppgtt)
+ kref_put(&ppgtt->ref, ppgtt_release);
+ list_del(&ctx->link);
kfree(ctx);
}
+static struct i915_hw_ppgtt *
+create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+ int ret;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = i915_gem_init_ppgtt(dev, ppgtt);
+ if (ret) {
+ kfree(ppgtt);
+ return ERR_PTR(ret);
+ }
+
+ ppgtt->ctx = ctx;
+ return ppgtt;
+}
+
static struct i915_hw_context *
-create_hw_context(struct drm_device *dev,
+__create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -166,18 +248,13 @@ create_hw_context(struct drm_device *dev,
goto err_out;
}
- /* The ring associated with the context object is handled by the normal
- * object tracking code. We give an initial ring value simple to pass an
- * assertion in the context switch code.
- */
- ctx->ring = &dev_priv->ring[RCS];
list_add_tail(&ctx->link, &dev_priv->context_list);
/* Default context will never have a file_priv */
if (file_priv == NULL)
return ctx;
- ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
+ ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0,
GFP_KERNEL);
if (ret < 0)
goto err_out;
@@ -196,67 +273,136 @@ err_out:
return ERR_PTR(ret);
}
-static inline bool is_default_context(struct i915_hw_context *ctx)
-{
- return (ctx == ctx->ring->default_context);
-}
-
/**
* The default context needs to exist per ring that uses contexts. It stores the
* context state of the GPU for applications that don't utilize HW contexts, as
* well as an idle case.
*/
-static int create_default_context(struct drm_i915_private *dev_priv)
+static struct i915_hw_context *
+i915_gem_create_context(struct drm_device *dev,
+ struct drm_i915_file_private *file_priv,
+ bool create_vm)
{
+ const bool is_global_default_ctx = file_priv == NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *ctx;
- int ret;
+ int ret = 0;
- BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- ctx = create_hw_context(dev_priv->dev, NULL);
+ ctx = __create_hw_context(dev, file_priv);
if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- /* We may need to do things with the shrinker which require us to
- * immediately switch back to the default context. This can cause a
- * problem as pinning the default context also requires GTT space which
- * may not be available. To avoid this we always pin the
- * default context.
- */
- ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
- if (ret) {
- DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
- goto err_destroy;
- }
+ return ctx;
- ret = do_switch(ctx);
- if (ret) {
- DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
- goto err_unpin;
+ if (is_global_default_ctx) {
+ /* We may need to do things with the shrinker which
+ * require us to immediately switch back to the default
+ * context. This can cause a problem as pinning the
+ * default context also requires GTT space which may not
+ * be available. To avoid this we always pin the default
+ * context.
+ */
+ ret = i915_gem_obj_ggtt_pin(ctx->obj,
+ get_context_alignment(dev), 0);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
+ goto err_destroy;
+ }
}
- dev_priv->ring[RCS].default_context = ctx;
+ if (create_vm) {
+ struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
+
+ if (IS_ERR_OR_NULL(ppgtt)) {
+ DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
+ PTR_ERR(ppgtt));
+ ret = PTR_ERR(ppgtt);
+ goto err_unpin;
+ } else
+ ctx->vm = &ppgtt->base;
+
+ /* This case is reserved for the global default context and
+ * should only happen once. */
+ if (is_global_default_ctx) {
+ if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
+ ret = -EEXIST;
+ goto err_unpin;
+ }
+
+ dev_priv->mm.aliasing_ppgtt = ppgtt;
+ }
+ } else if (USES_PPGTT(dev)) {
+ /* For platforms which only have aliasing PPGTT, we fake the
+ * address space and refcounting. */
+ ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
+ kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
+ } else
+ ctx->vm = &dev_priv->gtt.base;
- DRM_DEBUG_DRIVER("Default HW context loaded\n");
- return 0;
+ return ctx;
err_unpin:
- i915_gem_object_unpin(ctx->obj);
+ if (is_global_default_ctx)
+ i915_gem_object_ggtt_unpin(ctx->obj);
err_destroy:
i915_gem_context_unreference(ctx);
- return ret;
+ return ERR_PTR(ret);
+}
+
+void i915_gem_context_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ if (!HAS_HW_CONTEXTS(dev))
+ return;
+
+ /* Prevent the hardware from restoring the last context (which hung) on
+ * the next switch */
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct i915_hw_context *dctx;
+ if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
+ continue;
+
+ /* Do a fake switch to the default context */
+ ring = &dev_priv->ring[i];
+ dctx = ring->default_context;
+ if (WARN_ON(!dctx))
+ continue;
+
+ if (!ring->last_context)
+ continue;
+
+ if (ring->last_context == dctx)
+ continue;
+
+ if (i == RCS) {
+ WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
+ get_context_alignment(dev), 0));
+ /* Fake a finish/inactive */
+ dctx->obj->base.write_domain = 0;
+ dctx->obj->active = 0;
+ }
+
+ i915_gem_context_unreference(ring->last_context);
+ i915_gem_context_reference(dctx);
+ ring->last_context = dctx;
+ }
}
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
+ struct intel_ring_buffer *ring;
+ int i;
if (!HAS_HW_CONTEXTS(dev))
return 0;
- /* If called from reset, or thaw... we've been here already */
- if (dev_priv->ring[RCS].default_context)
+ /* Init should only be called once per module load. Eventually the
+ * restriction on the context_disabled check can be loosened. */
+ if (WARN_ON(dev_priv->ring[RCS].default_context))
return 0;
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
@@ -266,11 +412,23 @@ int i915_gem_context_init(struct drm_device *dev)
return -E2BIG;
}
- ret = create_default_context(dev_priv);
- if (ret) {
- DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
- ret);
- return ret;
+ dev_priv->ring[RCS].default_context =
+ i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
+
+ if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) {
+ DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n",
+ PTR_ERR(dev_priv->ring[RCS].default_context));
+ return PTR_ERR(dev_priv->ring[RCS].default_context);
+ }
+
+ for (i = RCS + 1; i < I915_NUM_RINGS; i++) {
+ if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
+ continue;
+
+ ring = &dev_priv->ring[i];
+
+ /* NB: RCS will hold a ref for all rings */
+ ring->default_context = dev_priv->ring[RCS].default_context;
}
DRM_DEBUG_DRIVER("HW context support initialized\n");
@@ -281,6 +439,7 @@ void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
+ int i;
if (!HAS_HW_CONTEXTS(dev))
return;
@@ -300,59 +459,129 @@ void i915_gem_context_fini(struct drm_device *dev)
if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */
WARN_ON(dctx->obj->active);
- i915_gem_object_unpin(dctx->obj);
+ i915_gem_object_ggtt_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
+ dev_priv->ring[RCS].last_context = NULL;
}
- i915_gem_object_unpin(dctx->obj);
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_ring_buffer *ring = &dev_priv->ring[i];
+ if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
+ continue;
+
+ if (ring->last_context)
+ i915_gem_context_unreference(ring->last_context);
+
+ ring->default_context = NULL;
+ ring->last_context = NULL;
+ }
+
+ i915_gem_object_ggtt_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
- dev_priv->ring[RCS].default_context = NULL;
- dev_priv->ring[RCS].last_context = NULL;
+ dev_priv->mm.aliasing_ppgtt = NULL;
+}
+
+int i915_gem_context_enable(struct drm_i915_private *dev_priv)
+{
+ struct intel_ring_buffer *ring;
+ int ret, i;
+
+ if (!HAS_HW_CONTEXTS(dev_priv->dev))
+ return 0;
+
+ /* This is the only place the aliasing PPGTT gets enabled, which means
+ * it has to happen before we bail on reset */
+ if (dev_priv->mm.aliasing_ppgtt) {
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ ppgtt->enable(ppgtt);
+ }
+
+ /* FIXME: We should make this work, even in reset */
+ if (i915_reset_in_progress(&dev_priv->gpu_error))
+ return 0;
+
+ BUG_ON(!dev_priv->ring[RCS].default_context);
+
+ for_each_ring(ring, dev_priv, i) {
+ ret = do_switch(ring, ring->default_context);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int context_idr_cleanup(int id, void *p, void *data)
{
struct i915_hw_context *ctx = p;
- BUG_ON(id == DEFAULT_CONTEXT_ID);
+ /* Ignore the default context because close will handle it */
+ if (i915_gem_context_is_default(ctx))
+ return 0;
i915_gem_context_unreference(ctx);
return 0;
}
-struct i915_ctx_hang_stats *
-i915_gem_context_get_hang_stats(struct drm_device *dev,
- struct drm_file *file,
- u32 id)
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_context *ctx;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- if (id == DEFAULT_CONTEXT_ID)
- return &file_priv->hang_stats;
+ if (!HAS_HW_CONTEXTS(dev)) {
+ /* Cheat for hang stats */
+ file_priv->private_default_ctx =
+ kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL);
- if (!HAS_HW_CONTEXTS(dev))
- return ERR_PTR(-ENOENT);
+ if (file_priv->private_default_ctx == NULL)
+ return -ENOMEM;
- ctx = i915_gem_context_get(file->driver_priv, id);
- if (ctx == NULL)
- return ERR_PTR(-ENOENT);
+ file_priv->private_default_ctx->vm = &dev_priv->gtt.base;
+ return 0;
+ }
+
+ idr_init(&file_priv->context_idr);
- return &ctx->hang_stats;
+ mutex_lock(&dev->struct_mutex);
+ file_priv->private_default_ctx =
+ i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
+ mutex_unlock(&dev->struct_mutex);
+
+ if (IS_ERR(file_priv->private_default_ctx)) {
+ idr_destroy(&file_priv->context_idr);
+ return PTR_ERR(file_priv->private_default_ctx);
+ }
+
+ return 0;
}
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ if (!HAS_HW_CONTEXTS(dev)) {
+ kfree(file_priv->private_default_ctx);
+ return;
+ }
+
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+ i915_gem_context_unreference(file_priv->private_default_ctx);
idr_destroy(&file_priv->context_idr);
}
-static struct i915_hw_context *
+struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
- return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
+ struct i915_hw_context *ctx;
+
+ if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
+ return file_priv->private_default_ctx;
+
+ ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
+ if (!ctx)
+ return ERR_PTR(-ENOENT);
+
+ return ctx;
}
static inline int
@@ -390,7 +619,10 @@ mi_set_context(struct intel_ring_buffer *ring,
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
hw_flags);
- /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
+ /*
+ * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
+ * WaMiSetContext_Hang:snb,ivb,vlv
+ */
intel_ring_emit(ring, MI_NOOP);
if (IS_GEN7(ring->dev))
@@ -403,21 +635,30 @@ mi_set_context(struct intel_ring_buffer *ring,
return ret;
}
-static int do_switch(struct i915_hw_context *to)
+static int do_switch(struct intel_ring_buffer *ring,
+ struct i915_hw_context *to)
{
- struct intel_ring_buffer *ring = to->ring;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_hw_context *from = ring->last_context;
+ struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
u32 hw_flags = 0;
int ret, i;
- BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
+ if (from != NULL && ring == &dev_priv->ring[RCS]) {
+ BUG_ON(from->obj == NULL);
+ BUG_ON(!i915_gem_obj_is_pinned(from->obj));
+ }
- if (from == to && !to->remap_slice)
+ if (from == to && from->last_ring == ring && !to->remap_slice)
return 0;
- ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
- if (ret)
- return ret;
+ /* Trying to pin first makes error handling easier. */
+ if (ring == &dev_priv->ring[RCS]) {
+ ret = i915_gem_obj_ggtt_pin(to->obj,
+ get_context_alignment(ring->dev), 0);
+ if (ret)
+ return ret;
+ }
/*
* Pin can switch back to the default context if we end up calling into
@@ -426,6 +667,18 @@ static int do_switch(struct i915_hw_context *to)
*/
from = ring->last_context;
+ if (USES_FULL_PPGTT(ring->dev)) {
+ ret = ppgtt->switch_mm(ppgtt, ring, false);
+ if (ret)
+ goto unpin_out;
+ }
+
+ if (ring != &dev_priv->ring[RCS]) {
+ if (from)
+ i915_gem_context_unreference(from);
+ goto done;
+ }
+
/*
* Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
@@ -435,22 +688,21 @@ static int do_switch(struct i915_hw_context *to)
* XXX: We need a real interface to do this instead of trickery.
*/
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
- if (ret) {
- i915_gem_object_unpin(to->obj);
- return ret;
- }
+ if (ret)
+ goto unpin_out;
- if (!to->obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+ if (!to->obj->has_global_gtt_mapping) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+ &dev_priv->gtt.base);
+ vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+ }
- if (!to->is_initialized || is_default_context(to))
+ if (!to->is_initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
ret = mi_set_context(ring, to, hw_flags);
- if (ret) {
- i915_gem_object_unpin(to->obj);
- return ret;
- }
+ if (ret)
+ goto unpin_out;
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
@@ -484,22 +736,30 @@ static int do_switch(struct i915_hw_context *to)
BUG_ON(from->obj->ring != ring);
/* obj is kept alive until the next request by its active ref */
- i915_gem_object_unpin(from->obj);
+ i915_gem_object_ggtt_unpin(from->obj);
i915_gem_context_unreference(from);
}
+ to->is_initialized = true;
+
+done:
i915_gem_context_reference(to);
ring->last_context = to;
- to->is_initialized = true;
+ to->last_ring = ring;
return 0;
+
+unpin_out:
+ if (ring->id == RCS)
+ i915_gem_object_ggtt_unpin(to->obj);
+ return ret;
}
/**
* i915_switch_context() - perform a GPU context switch.
* @ring: ring for which we'll execute the context switch
* @file_priv: file_priv associated with the context, may be NULL
- * @id: context id number
+ * @to: the context to switch to
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -508,31 +768,21 @@ static int do_switch(struct i915_hw_context *to)
*/
int i915_switch_context(struct intel_ring_buffer *ring,
struct drm_file *file,
- int to_id)
+ struct i915_hw_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct i915_hw_context *to;
-
- if (!HAS_HW_CONTEXTS(ring->dev))
- return 0;
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- if (ring != &dev_priv->ring[RCS])
- return 0;
-
- if (to_id == DEFAULT_CONTEXT_ID) {
- to = ring->default_context;
- } else {
- if (file == NULL)
- return -EINVAL;
+ BUG_ON(file && to == NULL);
- to = i915_gem_context_get(file->driver_priv, to_id);
- if (to == NULL)
- return -ENOENT;
+ /* We have the fake context */
+ if (!HAS_HW_CONTEXTS(ring->dev)) {
+ ring->last_context = to;
+ return 0;
}
- return do_switch(to);
+ return do_switch(ring, to);
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -543,9 +793,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct i915_hw_context *ctx;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
if (!HAS_HW_CONTEXTS(dev))
return -ENODEV;
@@ -553,7 +800,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- ctx = create_hw_context(dev, file_priv);
+ ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -572,17 +819,17 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct i915_hw_context *ctx;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
+ if (args->ctx_id == DEFAULT_CONTEXT_ID)
+ return -ENOENT;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ctx = i915_gem_context_get(file_priv, args->ctx_id);
- if (!ctx) {
+ if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
- return -ENOENT;
+ return PTR_ERR(ctx);
}
idr_remove(&ctx->file_priv->context_idr, ctx->id);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 775d506b3208..f462d1b51d97 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -34,7 +34,7 @@ int
i915_verify_lists(struct drm_device *dev)
{
static int warned;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int err = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 2ca280f9ee53..75fca63dc8c1 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -36,7 +36,7 @@
static bool
mark_free(struct i915_vma *vma, struct list_head *unwind)
{
- if (vma->obj->pin_count)
+ if (vma->pin_count)
return false;
if (WARN_ON(!list_empty(&vma->exec_list)))
@@ -46,18 +46,37 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
return drm_mm_scan_add_block(&vma->node);
}
+/**
+ * i915_gem_evict_something - Evict vmas to make room for binding a new one
+ * @dev: drm_device
+ * @vm: address space to evict from
+ * @size: size of the desired free space
+ * @alignment: alignment constraint of the desired free space
+ * @cache_level: cache_level for the desired space
+ * @mappable: whether the free space must be mappable
+ * @nonblocking: whether evicting active objects is allowed or not
+ *
+ * This function will try to evict vmas until a free space satisfying the
+ * requirements is found. Callers must check first whether any such hole exists
+ * already before calling this function.
+ *
+ * This function is used by the object/vma binding code.
+ *
+ * To clarify: This is for freeing up virtual address space, not for freeing
+ * memory in e.g. the shrinker.
+ */
int
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
int min_size, unsigned alignment, unsigned cache_level,
- bool mappable, bool nonblocking)
+ unsigned flags)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
struct i915_vma *vma;
int ret = 0;
int pass = 0;
- trace_i915_gem_evict(dev, min_size, alignment, mappable);
+ trace_i915_gem_evict(dev, min_size, alignment, flags);
/*
* The goal is to evict objects and amalgamate space in LRU order.
@@ -83,7 +102,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
*/
INIT_LIST_HEAD(&unwind_list);
- if (mappable) {
+ if (flags & PIN_MAPPABLE) {
BUG_ON(!i915_is_ggtt(vm));
drm_mm_init_scan_with_range(&vm->mm, min_size,
alignment, cache_level, 0,
@@ -98,7 +117,7 @@ search_again:
goto found;
}
- if (nonblocking)
+ if (flags & PIN_NONBLOCK)
goto none;
/* Now merge in the soon-to-be-expired objects... */
@@ -122,7 +141,7 @@ none:
/* Can we unpin some objects such as idle hw contents,
* or pending flips?
*/
- if (nonblocking)
+ if (flags & PIN_NONBLOCK)
return -ENOSPC;
/* Only idle the GPU and repeat the search once */
@@ -177,19 +196,19 @@ found:
}
/**
- * i915_gem_evict_vm - Try to free up VM space
+ * i915_gem_evict_vm - Evict all idle vmas from a vm
*
- * @vm: Address space to evict from
+ * @vm: Address space to cleanse
* @do_idle: Boolean directing whether to idle first.
*
- * VM eviction is about freeing up virtual address space. If one wants fine
- * grained eviction, they should see evict something for more details. In terms
- * of freeing up actual system memory, this function may not accomplish the
- * desired result. An object may be shared in multiple address space, and this
- * function will not assert those objects be freed.
+ * This function evicts all idles vmas from a vm. If all unpinned vmas should be
+ * evicted the @do_idle needs to be set to true.
*
- * Using do_idle will result in a more complete eviction because it retires, and
- * inactivates current BOs.
+ * This is used by the execbuf code as a last-ditch effort to defragment the
+ * address space.
+ *
+ * To clarify: This is for freeing up virtual address space, not for freeing
+ * memory in e.g. the shrinker.
*/
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
{
@@ -207,16 +226,24 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
}
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
- if (vma->obj->pin_count == 0)
+ if (vma->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
return 0;
}
+/**
+ * i915_gem_evict_everything - Try to evict all objects
+ * @dev: Device to evict objects for
+ *
+ * This functions tries to evict all gem objects from all address spaces. Used
+ * by the shrinker as a last-ditch effort and for suspend, before releasing the
+ * backing storage of all unbound objects.
+ */
int
i915_gem_evict_everything(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm;
bool lists_empty = true;
int ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d269ecf46e26..7447160155a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -91,6 +91,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
struct i915_address_space *vm,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
struct drm_i915_gem_object *obj;
struct list_head objects;
int i, ret;
@@ -125,6 +126,20 @@ eb_lookup_vmas(struct eb_vmas *eb,
i = 0;
while (!list_empty(&objects)) {
struct i915_vma *vma;
+ struct i915_address_space *bind_vm = vm;
+
+ if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
+ USES_FULL_PPGTT(vm->dev)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* If we have secure dispatch, or the userspace assures us that
+ * they know what they're doing, use the GGTT VM.
+ */
+ if (((args->flags & I915_EXEC_SECURE) &&
+ (i == (args->buffer_count - 1))))
+ bind_vm = &dev_priv->gtt.base;
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
@@ -138,7 +153,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
- vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
@@ -217,7 +232,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
i915_gem_object_unpin_fence(obj);
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
- i915_gem_object_unpin(obj);
+ vma->pin_count--;
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
@@ -327,8 +342,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
- struct drm_i915_gem_relocation_entry *reloc,
- struct i915_address_space *vm)
+ struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
@@ -352,8 +366,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
!target_i915_obj->has_global_gtt_mapping)) {
- i915_gem_gtt_bind_object(target_i915_obj,
- target_i915_obj->cache_level);
+ struct i915_vma *vma =
+ list_first_entry(&target_i915_obj->vma_list,
+ typeof(*vma), vma_link);
+ vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
}
/* Validate that the target is in a valid r/w GPU domain */
@@ -451,8 +467,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
do {
u64 offset = r->presumed_offset;
- ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
- vma->vm);
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
if (ret)
return ret;
@@ -481,8 +496,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
- vma->vm);
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
if (ret)
return ret;
}
@@ -527,21 +541,26 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_ring_buffer *ring,
bool *need_reloc)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
- bool need_fence, need_mappable;
- struct drm_i915_gem_object *obj = vma->obj;
+ bool need_fence;
+ unsigned flags;
int ret;
+ flags = 0;
+
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(vma);
+ if (need_fence || need_reloc_mappable(vma))
+ flags |= PIN_MAPPABLE;
- ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
- false);
+ if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+ flags |= PIN_GLOBAL;
+
+ ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
if (ret)
return ret;
@@ -560,14 +579,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
}
}
- /* Ensure ppgtt mapping exists if needed */
- if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
- i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
- obj, obj->cache_level);
-
- obj->has_aliasing_ppgtt_mapping = 1;
- }
-
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start;
*need_reloc = true;
@@ -578,10 +589,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
}
- if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
- !obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
-
return 0;
}
@@ -891,7 +898,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
- if (likely(!i915_prefault_disable)) {
+ if (likely(!i915.prefault_disable)) {
if (fault_in_multipages_readable(ptr, length))
return -EFAULT;
}
@@ -900,22 +907,27 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
return 0;
}
-static int
+static struct i915_hw_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
- const u32 ctx_id)
+ struct intel_ring_buffer *ring, const u32 ctx_id)
{
+ struct i915_hw_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
- hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
- if (IS_ERR(hs))
- return PTR_ERR(hs);
+ if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
+ return ERR_PTR(-EINVAL);
+
+ ctx = i915_gem_context_get(file->driver_priv, ctx_id);
+ if (IS_ERR(ctx))
+ return ctx;
+ hs = &ctx->hang_stats;
if (hs->banned) {
DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
- return -EIO;
+ return ERR_PTR(-EIO);
}
- return 0;
+ return ctx;
}
static void
@@ -939,7 +951,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
- if (obj->pin_count) /* check for potential scanout */
+ /* check for potential scanout */
+ if (i915_gem_obj_ggtt_bound(obj) &&
+ i915_gem_obj_to_ggtt(obj)->pin_count)
intel_mark_fb_busy(obj, ring);
}
@@ -964,7 +978,7 @@ static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
@@ -989,16 +1003,17 @@ static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec,
- struct i915_address_space *vm)
+ struct drm_i915_gem_exec_object2 *exec)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
+ struct i915_hw_context *ctx;
+ struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
- u32 exec_start, exec_len;
+ u32 exec_start = args->batch_start_offset, exec_len;
u32 mask, flags;
int ret, mode, i;
bool need_relocs;
@@ -1020,41 +1035,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->flags & I915_EXEC_IS_PINNED)
flags |= I915_DISPATCH_PINNED;
- switch (args->flags & I915_EXEC_RING_MASK) {
- case I915_EXEC_DEFAULT:
- case I915_EXEC_RENDER:
- ring = &dev_priv->ring[RCS];
- break;
- case I915_EXEC_BSD:
- ring = &dev_priv->ring[VCS];
- if (ctx_id != DEFAULT_CONTEXT_ID) {
- DRM_DEBUG("Ring %s doesn't support contexts\n",
- ring->name);
- return -EPERM;
- }
- break;
- case I915_EXEC_BLT:
- ring = &dev_priv->ring[BCS];
- if (ctx_id != DEFAULT_CONTEXT_ID) {
- DRM_DEBUG("Ring %s doesn't support contexts\n",
- ring->name);
- return -EPERM;
- }
- break;
- case I915_EXEC_VEBOX:
- ring = &dev_priv->ring[VECS];
- if (ctx_id != DEFAULT_CONTEXT_ID) {
- DRM_DEBUG("Ring %s doesn't support contexts\n",
- ring->name);
- return -EPERM;
- }
- break;
-
- default:
+ if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
+
+ if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
+ ring = &dev_priv->ring[RCS];
+ else
+ ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
+
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
@@ -1136,11 +1127,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
- ret = i915_gem_validate_context(dev, file, ctx_id);
- if (ret) {
+ ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
+ if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
+ ret = PTR_ERR(ctx);
goto pre_mutex_err;
- }
+ }
+
+ i915_gem_context_reference(ctx);
+
+ vm = ctx->vm;
+ if (!USES_FULL_PPGTT(dev))
+ vm = &dev_priv->gtt.base;
eb = eb_create(args);
if (eb == NULL) {
@@ -1184,17 +1182,46 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+ if (i915_needs_cmd_parser(ring)) {
+ ret = i915_parse_cmds(ring,
+ batch_obj,
+ args->batch_start_offset,
+ file->is_master);
+ if (ret)
+ goto err;
+
+ /*
+ * XXX: Actually do this when enabling batch copy...
+ *
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
+ * from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically don't
+ * want that set when the command parser is enabled.
+ */
+ }
+
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
- if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+ if (flags & I915_DISPATCH_SECURE &&
+ !batch_obj->has_global_gtt_mapping) {
+ /* When we have multiple VMs, we'll need to make sure that we
+ * allocate space first */
+ struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
+ BUG_ON(!vma);
+ vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
+ }
+
+ if (flags & I915_DISPATCH_SECURE)
+ exec_start += i915_gem_obj_ggtt_offset(batch_obj);
+ else
+ exec_start += i915_gem_obj_offset(batch_obj, vm);
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
if (ret)
goto err;
- ret = i915_switch_context(ring, file, ctx_id);
+ ret = i915_switch_context(ring, file, ctx);
if (ret)
goto err;
@@ -1219,8 +1246,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- exec_start = i915_gem_obj_offset(batch_obj, vm) +
- args->batch_start_offset;
+
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
@@ -1249,6 +1275,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err:
+ /* the request owns the ref now */
+ i915_gem_context_unreference(ctx);
eb_destroy(eb);
mutex_unlock(&dev->struct_mutex);
@@ -1270,7 +1298,6 @@ int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1326,8 +1353,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.flags = I915_EXEC_RENDER;
i915_execbuffer2_set_context_id(exec2, 0);
- ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
- &dev_priv->gtt.base);
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
@@ -1353,7 +1379,6 @@ int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
@@ -1384,8 +1409,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EFAULT;
}
- ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
- &dev_priv->gtt.base);
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d278be110805..ab5e93c30aa2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1,5 +1,6 @@
/*
* Copyright © 2010 Daniel Vetter
+ * Copyright © 2011-2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,12 +23,38 @@
*
*/
+#include <linux/seq_file.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv);
+
+bool intel_enable_ppgtt(struct drm_device *dev, bool full)
+{
+ if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+ return false;
+
+ if (i915.enable_ppgtt == 1 && full)
+ return false;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+ DRM_INFO("Disabling PPGTT because VT-d is on\n");
+ return false;
+ }
+#endif
+
+ /* Full ppgtt disabled by default for now due to issues. */
+ if (full)
+ return false; /* HAS_PPGTT(dev) */
+ else
+ return HAS_ALIASING_PPGTT(dev);
+}
+
#define GEN6_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
typedef uint64_t gen8_gtt_pte_t;
@@ -63,13 +90,31 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
-#define GEN8_LEGACY_PDPS 4
+
+/* GEN8 legacy style addressis defined as a 3 level page table:
+ * 31:30 | 29:21 | 20:12 | 11:0
+ * PDPE | PDE | PTE | offset
+ * The difference as compared to normal x86 3 level page table is the PDPEs are
+ * programmed via register.
+ */
+#define GEN8_PDPE_SHIFT 30
+#define GEN8_PDPE_MASK 0x3
+#define GEN8_PDE_SHIFT 21
+#define GEN8_PDE_MASK 0x1ff
+#define GEN8_PTE_SHIFT 12
+#define GEN8_PTE_MASK 0x1ff
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
+static void ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+static void ppgtt_unbind_vma(struct i915_vma *vma);
+static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
+
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
@@ -199,12 +244,19 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
- uint64_t val)
+ uint64_t val, bool synchronous)
{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
int ret;
BUG_ON(entry >= 4);
+ if (synchronous) {
+ I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
+ I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
+ return 0;
+ }
+
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
@@ -220,216 +272,357 @@ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
return 0;
}
-static int gen8_ppgtt_enable(struct drm_device *dev)
+static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_ring_buffer *ring,
+ bool synchronous)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int i, j, ret;
+ int i, ret;
/* bit of a hack to find the actual last used pd */
int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
- for_each_ring(ring, dev_priv, j) {
- I915_WRITE(RING_MODE_GEN7(ring),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- }
-
for (i = used_pd - 1; i >= 0; i--) {
dma_addr_t addr = ppgtt->pd_dma_addr[i];
- for_each_ring(ring, dev_priv, j) {
- ret = gen8_write_pdp(ring, i, addr);
- if (ret)
- goto err_out;
- }
+ ret = gen8_write_pdp(ring, i, addr, synchronous);
+ if (ret)
+ return ret;
}
- return 0;
-err_out:
- for_each_ring(ring, dev_priv, j)
- I915_WRITE(RING_MODE_GEN7(ring),
- _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
- return ret;
+ return 0;
}
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
- unsigned first_entry,
- unsigned num_entries,
+ uint64_t start,
+ uint64_t length,
bool use_scratch)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_gtt_pte_t *pt_vaddr, scratch_pte;
- unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
- unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
+ unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
+ unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
+ unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
+ unsigned num_entries = length >> PAGE_SHIFT;
unsigned last_pte, i;
scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
I915_CACHE_LLC, use_scratch);
while (num_entries) {
- struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
+ struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
- last_pte = first_pte + num_entries;
+ last_pte = pte + num_entries;
if (last_pte > GEN8_PTES_PER_PAGE)
last_pte = GEN8_PTES_PER_PAGE;
pt_vaddr = kmap_atomic(page_table);
- for (i = first_pte; i < last_pte; i++)
+ for (i = pte; i < last_pte; i++) {
pt_vaddr[i] = scratch_pte;
+ num_entries--;
+ }
kunmap_atomic(pt_vaddr);
- num_entries -= last_pte - first_pte;
- first_pte = 0;
- act_pt++;
+ pte = 0;
+ if (++pde == GEN8_PDES_PER_PAGE) {
+ pdpe++;
+ pde = 0;
+ }
}
}
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
- unsigned first_entry,
+ uint64_t start,
enum i915_cache_level cache_level)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_gtt_pte_t *pt_vaddr;
- unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
- unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
+ unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
+ unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
+ unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
struct sg_page_iter sg_iter;
pt_vaddr = NULL;
+
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+ if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
+ break;
+
if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+ pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
- pt_vaddr[act_pte] =
+ pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
- if (++act_pte == GEN8_PTES_PER_PAGE) {
+ if (++pte == GEN8_PTES_PER_PAGE) {
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
- act_pt++;
- act_pte = 0;
+ if (++pde == GEN8_PDES_PER_PAGE) {
+ pdpe++;
+ pde = 0;
+ }
+ pte = 0;
}
}
if (pt_vaddr)
kunmap_atomic(pt_vaddr);
}
+static void gen8_free_page_tables(struct page **pt_pages)
+{
+ int i;
+
+ if (pt_pages == NULL)
+ return;
+
+ for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
+ if (pt_pages[i])
+ __free_pages(pt_pages[i], 0);
+}
+
+static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
+ kfree(ppgtt->gen8_pt_pages[i]);
+ kfree(ppgtt->gen8_pt_dma_addr[i]);
+ }
+
+ __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
+}
+
+static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
+{
+ struct pci_dev *hwdev = ppgtt->base.dev->pdev;
+ int i, j;
+
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ /* TODO: In the future we'll support sparse mappings, so this
+ * will have to change. */
+ if (!ppgtt->pd_dma_addr[i])
+ continue;
+
+ pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+
+ for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ if (addr)
+ pci_unmap_page(hwdev, addr, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ }
+ }
+}
+
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- int i, j;
+ list_del(&vm->global_link);
drm_mm_takedown(&vm->mm);
- for (i = 0; i < ppgtt->num_pd_pages ; i++) {
- if (ppgtt->pd_dma_addr[i]) {
- pci_unmap_page(ppgtt->base.dev->pdev,
- ppgtt->pd_dma_addr[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ gen8_ppgtt_unmap_pages(ppgtt);
+ gen8_ppgtt_free(ppgtt);
+}
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
- dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
- if (addr)
- pci_unmap_page(ppgtt->base.dev->pdev,
- addr,
- PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+static struct page **__gen8_alloc_page_tables(void)
+{
+ struct page **pt_pages;
+ int i;
- }
- }
- kfree(ppgtt->gen8_pt_dma_addr[i]);
+ pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
+ if (!pt_pages)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
+ pt_pages[i] = alloc_page(GFP_KERNEL);
+ if (!pt_pages[i])
+ goto bail;
}
- __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
- __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
+ return pt_pages;
+
+bail:
+ gen8_free_page_tables(pt_pages);
+ kfree(pt_pages);
+ return ERR_PTR(-ENOMEM);
}
-/**
- * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
- * net effect resembling a 2-level page table in normal x86 terms. Each PDP
- * represents 1GB of memory
- * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
- *
- * TODO: Do something with the size parameter
- **/
-static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
{
- struct page *pt_pages;
- int i, j, ret = -ENOMEM;
- const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
- const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+ struct page **pt_pages[GEN8_LEGACY_PDPS];
+ int i, ret;
- if (size % (1<<30))
- DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+ for (i = 0; i < max_pdp; i++) {
+ pt_pages[i] = __gen8_alloc_page_tables();
+ if (IS_ERR(pt_pages[i])) {
+ ret = PTR_ERR(pt_pages[i]);
+ goto unwind_out;
+ }
+ }
- /* FIXME: split allocation into smaller pieces. For now we only ever do
- * this once, but with full PPGTT, the multiple contiguous allocations
- * will be bad.
+ /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
+ * "atomic" - for cleanup purposes.
*/
+ for (i = 0; i < max_pdp; i++)
+ ppgtt->gen8_pt_pages[i] = pt_pages[i];
+
+ return 0;
+
+unwind_out:
+ while (i--) {
+ gen8_free_page_tables(pt_pages[i]);
+ kfree(pt_pages[i]);
+ }
+
+ return ret;
+}
+
+static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
+ sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!ppgtt->gen8_pt_dma_addr[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
+{
ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
if (!ppgtt->pd_pages)
return -ENOMEM;
- pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
- if (!pt_pages) {
+ ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
+ BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+
+ return 0;
+}
+
+static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
+{
+ int ret;
+
+ ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
+ if (ret)
+ return ret;
+
+ ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
+ if (ret) {
__free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
- return -ENOMEM;
+ return ret;
}
- ppgtt->gen8_pt_pages = pt_pages;
- ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
- ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
- ppgtt->enable = gen8_ppgtt_enable;
- ppgtt->base.clear_range = gen8_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
- ppgtt->base.cleanup = gen8_ppgtt_cleanup;
- ppgtt->base.start = 0;
- ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
- BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+ ret = gen8_ppgtt_allocate_dma(ppgtt);
+ if (ret)
+ gen8_ppgtt_free(ppgtt);
- /*
- * - Create a mapping for the page directories.
- * - For each page directory:
- * allocate space for page table mappings.
- * map each page table
- */
- for (i = 0; i < max_pdp; i++) {
- dma_addr_t temp;
- temp = pci_map_page(ppgtt->base.dev->pdev,
- &ppgtt->pd_pages[i], 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
- goto err_out;
+ return ret;
+}
- ppgtt->pd_dma_addr[i] = temp;
+static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
+ const int pd)
+{
+ dma_addr_t pd_addr;
+ int ret;
- ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
- if (!ppgtt->gen8_pt_dma_addr[i])
- goto err_out;
+ pd_addr = pci_map_page(ppgtt->base.dev->pdev,
+ &ppgtt->pd_pages[pd], 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
- struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
- temp = pci_map_page(ppgtt->base.dev->pdev,
- p, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
+ if (ret)
+ return ret;
- if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
- goto err_out;
+ ppgtt->pd_dma_addr[pd] = pd_addr;
- ppgtt->gen8_pt_dma_addr[i][j] = temp;
+ return 0;
+}
+
+static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
+ const int pd,
+ const int pt)
+{
+ dma_addr_t pt_addr;
+ struct page *p;
+ int ret;
+
+ p = ppgtt->gen8_pt_pages[pd][pt];
+ pt_addr = pci_map_page(ppgtt->base.dev->pdev,
+ p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
+ if (ret)
+ return ret;
+
+ ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
+
+ return 0;
+}
+
+/**
+ * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
+ * with a net effect resembling a 2-level page table in normal x86 terms. Each
+ * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
+ * space.
+ *
+ * FIXME: split allocation into smaller pieces. For now we only ever do this
+ * once, but with full PPGTT, the multiple contiguous allocations will be bad.
+ * TODO: Do something with the size parameter
+ */
+static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+{
+ const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
+ const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+ int i, j, ret;
+
+ if (size % (1<<30))
+ DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+
+ /* 1. Do all our allocations for page directories and page tables. */
+ ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
+ if (ret)
+ return ret;
+
+ /*
+ * 2. Create DMA mappings for the page directories and page tables.
+ */
+ for (i = 0; i < max_pdp; i++) {
+ ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
+ if (ret)
+ goto bail;
+
+ for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
+ if (ret)
+ goto bail;
}
}
- /* For now, the PPGTT helper functions all require that the PDEs are
+ /*
+ * 3. Map all the page directory entires to point to the page tables
+ * we've allocated.
+ *
+ * For now, the PPGTT helper functions all require that the PDEs are
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
- * will never need to touch the PDEs again */
+ * will never need to touch the PDEs again.
+ */
for (i = 0; i < max_pdp; i++) {
gen8_ppgtt_pde_t *pd_vaddr;
pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
@@ -441,23 +634,85 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
kunmap_atomic(pd_vaddr);
}
- ppgtt->base.clear_range(&ppgtt->base, 0,
- ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
- true);
+ ppgtt->enable = gen8_ppgtt_enable;
+ ppgtt->switch_mm = gen8_mm_switch;
+ ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+ ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
+ ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+ ppgtt->base.start = 0;
+ ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
+
+ ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
- ppgtt->num_pt_pages,
- (ppgtt->num_pt_pages - num_pt_pages) +
- size % (1<<30));
+ ppgtt->num_pd_entries,
+ (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
return 0;
-err_out:
- ppgtt->base.cleanup(&ppgtt->base);
+bail:
+ gen8_ppgtt_unmap_pages(ppgtt);
+ gen8_ppgtt_free(ppgtt);
return ret;
}
+static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+{
+ struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
+ struct i915_address_space *vm = &ppgtt->base;
+ gen6_gtt_pte_t __iomem *pd_addr;
+ gen6_gtt_pte_t scratch_pte;
+ uint32_t pd_entry;
+ int pte, pde;
+
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+
+ pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+
+ seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
+ ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
+ for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
+ u32 expected;
+ gen6_gtt_pte_t *pt_vaddr;
+ dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
+ pd_entry = readl(pd_addr + pde);
+ expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
+
+ if (pd_entry != expected)
+ seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
+ pde,
+ pd_entry,
+ expected);
+ seq_printf(m, "\tPDE: %x\n", pd_entry);
+
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
+ for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
+ unsigned long va =
+ (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
+ (pte * PAGE_SIZE);
+ int i;
+ bool found = false;
+ for (i = 0; i < 4; i++)
+ if (pt_vaddr[pte + i] != scratch_pte)
+ found = true;
+ if (!found)
+ continue;
+
+ seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
+ for (i = 0; i < 4; i++) {
+ if (pt_vaddr[pte + i] != scratch_pte)
+ seq_printf(m, " %08x", pt_vaddr[pte + i]);
+ else
+ seq_puts(m, " SCRATCH ");
+ }
+ seq_puts(m, "\n");
+ }
+ kunmap_atomic(pt_vaddr);
+ }
+}
+
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
@@ -480,73 +735,235 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
readl(pd_addr);
}
-static int gen6_ppgtt_enable(struct drm_device *dev)
+static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
+{
+ BUG_ON(ppgtt->pd_offset & 0x3f);
+
+ return (ppgtt->pd_offset / 64) << 16;
+}
+
+static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_ring_buffer *ring,
+ bool synchronous)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* If we're in reset, we can assume the GPU is sufficiently idle to
+ * manually frob these bits. Ideally we could use the ring functions,
+ * except our error handling makes it quite difficult (can't use
+ * intel_ring_begin, ring->flush, or intel_ring_advance)
+ *
+ * FIXME: We should try not to special case reset
+ */
+ if (synchronous ||
+ i915_reset_in_progress(&dev_priv->gpu_error)) {
+ WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ POSTING_READ(RING_PP_DIR_BASE(ring));
+ return 0;
+ }
+
+ /* NB: TLBs must be flushed and invalidated before a switch */
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+ intel_ring_emit(ring, PP_DIR_DCLV_2G);
+ intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+ intel_ring_emit(ring, get_pd_offset(ppgtt));
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_ring_buffer *ring,
+ bool synchronous)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* If we're in reset, we can assume the GPU is sufficiently idle to
+ * manually frob these bits. Ideally we could use the ring functions,
+ * except our error handling makes it quite difficult (can't use
+ * intel_ring_begin, ring->flush, or intel_ring_advance)
+ *
+ * FIXME: We should try not to special case reset
+ */
+ if (synchronous ||
+ i915_reset_in_progress(&dev_priv->gpu_error)) {
+ WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ POSTING_READ(RING_PP_DIR_BASE(ring));
+ return 0;
+ }
+
+ /* NB: TLBs must be flushed and invalidated before a switch */
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+ intel_ring_emit(ring, PP_DIR_DCLV_2G);
+ intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+ intel_ring_emit(ring, get_pd_offset(ppgtt));
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ /* XXX: RCS is the only one to auto invalidate the TLBs? */
+ if (ring->id != RCS) {
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_ring_buffer *ring,
+ bool synchronous)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!synchronous)
+ return 0;
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+
+ POSTING_READ(RING_PP_DIR_DCLV(ring));
+
+ return 0;
+}
+
+static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t pd_offset;
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int i;
+ int j, ret;
- BUG_ON(ppgtt->pd_offset & 0x3f);
+ for_each_ring(ring, dev_priv, j) {
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- gen6_write_pdes(ppgtt);
+ /* We promise to do a switch later with FULL PPGTT. If this is
+ * aliasing, this is the one and only switch we'll do */
+ if (USES_FULL_PPGTT(dev))
+ continue;
- pd_offset = ppgtt->pd_offset;
- pd_offset /= 64; /* in cachelines, */
- pd_offset <<= 16;
+ ret = ppgtt->switch_mm(ppgtt, ring, true);
+ if (ret)
+ goto err_out;
+ }
- if (INTEL_INFO(dev)->gen == 6) {
- uint32_t ecochk, gab_ctl, ecobits;
+ return 0;
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
- ECOBITS_PPGTT_CACHE64B);
+err_out:
+ for_each_ring(ring, dev_priv, j)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
+ return ret;
+}
- gab_ctl = I915_READ(GAB_CTL);
- I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ uint32_t ecochk, ecobits;
+ int i;
- ecochk = I915_READ(GAM_ECOCHK);
- I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
- ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- } else if (INTEL_INFO(dev)->gen >= 7) {
- uint32_t ecochk, ecobits;
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+ ecochk = I915_READ(GAM_ECOCHK);
+ if (IS_HASWELL(dev)) {
+ ecochk |= ECOCHK_PPGTT_WB_HSW;
+ } else {
+ ecochk |= ECOCHK_PPGTT_LLC_IVB;
+ ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+ }
+ I915_WRITE(GAM_ECOCHK, ecochk);
- ecochk = I915_READ(GAM_ECOCHK);
- if (IS_HASWELL(dev)) {
- ecochk |= ECOCHK_PPGTT_WB_HSW;
- } else {
- ecochk |= ECOCHK_PPGTT_LLC_IVB;
- ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
- }
- I915_WRITE(GAM_ECOCHK, ecochk);
+ for_each_ring(ring, dev_priv, i) {
+ int ret;
/* GFX_MODE is per-ring on gen7+ */
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ /* We promise to do a switch later with FULL PPGTT. If this is
+ * aliasing, this is the one and only switch we'll do */
+ if (USES_FULL_PPGTT(dev))
+ continue;
+
+ ret = ppgtt->switch_mm(ppgtt, ring, true);
+ if (ret)
+ return ret;
}
- for_each_ring(ring, dev_priv, i) {
- if (INTEL_INFO(dev)->gen >= 7)
- I915_WRITE(RING_MODE_GEN7(ring),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ return 0;
+}
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ uint32_t ecochk, gab_ctl, ecobits;
+ int i;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
+ ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
+ I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ for_each_ring(ring, dev_priv, i) {
+ int ret = ppgtt->switch_mm(ppgtt, ring, true);
+ if (ret)
+ return ret;
}
+
return 0;
}
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
- unsigned first_entry,
- unsigned num_entries,
+ uint64_t start,
+ uint64_t length,
bool use_scratch)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr, scratch_pte;
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned num_entries = length >> PAGE_SHIFT;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
@@ -573,12 +990,13 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
- unsigned first_entry,
+ uint64_t start,
enum i915_cache_level cache_level)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr;
+ unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
struct sg_page_iter sg_iter;
@@ -602,65 +1020,130 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
}
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
int i;
- drm_mm_takedown(&ppgtt->base.mm);
-
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
pci_unmap_page(ppgtt->base.dev->pdev,
ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
+}
+
+static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
__free_page(ppgtt->pt_pages[i]);
kfree(ppgtt->pt_pages);
- kfree(ppgtt);
}
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+
+ list_del(&vm->global_link);
+ drm_mm_takedown(&ppgtt->base.mm);
+ drm_mm_remove_node(&ppgtt->node);
+
+ gen6_ppgtt_unmap_pages(ppgtt);
+ gen6_ppgtt_free(ppgtt);
+}
+
+static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
+{
+#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned first_pd_entry_in_global_pt;
- int i;
- int ret = -ENOMEM;
+ bool retried = false;
+ int ret;
- /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
- * entries. For aliasing ppgtt support we just steal them at the end for
- * now. */
- first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
+ /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+ * allocator works in address space sizes, so it's multiplied by page
+ * size. We allocate at the top of the GTT to avoid fragmentation.
+ */
+ BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+alloc:
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+ &ppgtt->node, GEN6_PD_SIZE,
+ GEN6_PD_ALIGN, 0,
+ 0, dev_priv->gtt.base.total,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+ if (ret == -ENOSPC && !retried) {
+ ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
+ GEN6_PD_SIZE, GEN6_PD_ALIGN,
+ I915_CACHE_NONE, 0);
+ if (ret)
+ return ret;
+
+ retried = true;
+ goto alloc;
+ }
+
+ if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+ DRM_DEBUG("Forced to use aperture for PDEs\n");
- ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
- ppgtt->enable = gen6_ppgtt_enable;
- ppgtt->base.clear_range = gen6_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->base.cleanup = gen6_ppgtt_cleanup;
- ppgtt->base.scratch = dev_priv->gtt.base.scratch;
- ppgtt->base.start = 0;
- ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
+ return ret;
+}
+
+static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
+
if (!ppgtt->pt_pages)
return -ENOMEM;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
- if (!ppgtt->pt_pages[i])
- goto err_pt_alloc;
+ if (!ppgtt->pt_pages[i]) {
+ gen6_ppgtt_free(ppgtt);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+{
+ int ret;
+
+ ret = gen6_ppgtt_allocate_page_directories(ppgtt);
+ if (ret)
+ return ret;
+
+ ret = gen6_ppgtt_allocate_page_tables(ppgtt);
+ if (ret) {
+ drm_mm_remove_node(&ppgtt->node);
+ return ret;
}
ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
GFP_KERNEL);
- if (!ppgtt->pt_dma_addr)
- goto err_pt_alloc;
+ if (!ppgtt->pt_dma_addr) {
+ drm_mm_remove_node(&ppgtt->node);
+ gen6_ppgtt_free(ppgtt);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ int i;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
@@ -669,48 +1152,71 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
- ret = -EIO;
- goto err_pd_pin;
-
+ gen6_ppgtt_unmap_pages(ppgtt);
+ return -EIO;
}
+
ppgtt->pt_dma_addr[i] = pt_addr;
}
- ppgtt->base.clear_range(&ppgtt->base, 0,
- ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
+ return 0;
+}
- ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
- return 0;
+ ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+ if (IS_GEN6(dev)) {
+ ppgtt->enable = gen6_ppgtt_enable;
+ ppgtt->switch_mm = gen6_mm_switch;
+ } else if (IS_HASWELL(dev)) {
+ ppgtt->enable = gen7_ppgtt_enable;
+ ppgtt->switch_mm = hsw_mm_switch;
+ } else if (IS_GEN7(dev)) {
+ ppgtt->enable = gen7_ppgtt_enable;
+ ppgtt->switch_mm = gen7_mm_switch;
+ } else
+ BUG();
-err_pd_pin:
- if (ppgtt->pt_dma_addr) {
- for (i--; i >= 0; i--)
- pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
- 4096, PCI_DMA_BIDIRECTIONAL);
- }
-err_pt_alloc:
- kfree(ppgtt->pt_dma_addr);
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- if (ppgtt->pt_pages[i])
- __free_page(ppgtt->pt_pages[i]);
+ ret = gen6_ppgtt_alloc(ppgtt);
+ if (ret)
+ return ret;
+
+ ret = gen6_ppgtt_setup_page_tables(ppgtt);
+ if (ret) {
+ gen6_ppgtt_free(ppgtt);
+ return ret;
}
- kfree(ppgtt->pt_pages);
- return ret;
+ ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.start = 0;
+ ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
+ ppgtt->debug_dump = gen6_dump_ppgtt;
+
+ ppgtt->pd_offset =
+ ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
+
+ ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+
+ DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
+ ppgtt->node.size >> 20,
+ ppgtt->node.start / PAGE_SIZE);
+
+ return 0;
}
-static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt;
- int ret;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return -ENOMEM;
+ int ret = 0;
ppgtt->base.dev = dev;
+ ppgtt->base.scratch = dev_priv->gtt.base.scratch;
if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt);
@@ -719,45 +1225,37 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
else
BUG();
- if (ret)
- kfree(ppgtt);
- else {
- dev_priv->mm.aliasing_ppgtt = ppgtt;
+ if (!ret) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
ppgtt->base.total);
+ i915_init_vm(dev_priv, &ppgtt->base);
+ if (INTEL_INFO(dev)->gen < 8) {
+ gen6_write_pdes(ppgtt);
+ DRM_DEBUG("Adding PPGTT at offset %x\n",
+ ppgtt->pd_offset << 10);
+ }
}
return ret;
}
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+static void
+ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
- if (!ppgtt)
- return;
-
- ppgtt->base.cleanup(&ppgtt->base);
- dev_priv->mm.aliasing_ppgtt = NULL;
+ vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
+ cache_level);
}
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
+static void ppgtt_unbind_vma(struct i915_vma *vma)
{
- ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
- i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
- cache_level);
-}
-
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj)
-{
- ppgtt->base.clear_range(&ppgtt->base,
- i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- true);
+ vma->vm->clear_range(vma->vm,
+ vma->node.start,
+ vma->obj->base.size,
+ true);
}
extern int intel_iommu_gfx_mapped;
@@ -840,8 +1338,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev);
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start / PAGE_SIZE,
- dev_priv->gtt.base.total / PAGE_SIZE,
+ dev_priv->gtt.base.start,
+ dev_priv->gtt.base.total,
true);
}
@@ -849,18 +1347,46 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
i915_check_and_clear_faults(dev);
/* First fill our portion of the GTT with scratch pages */
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start / PAGE_SIZE,
- dev_priv->gtt.base.total / PAGE_SIZE,
+ dev_priv->gtt.base.start,
+ dev_priv->gtt.base.total,
true);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+ &dev_priv->gtt.base);
+ if (!vma)
+ continue;
+
i915_gem_clflush_object(obj, obj->pin_display);
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ /* The bind_vma code tries to be smart about tracking mappings.
+ * Unfortunately above, we've just wiped out the mappings
+ * without telling our object about it. So we need to fake it.
+ */
+ obj->has_global_gtt_mapping = 0;
+ vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ }
+
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ gen8_setup_private_ppat(dev_priv);
+ return;
+ }
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ /* TODO: Perhaps it shouldn't be gen6 specific */
+ if (i915_is_ggtt(vm)) {
+ if (dev_priv->mm.aliasing_ppgtt)
+ gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
+ continue;
+ }
+
+ gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
}
i915_gem_chipset_flush(dev);
@@ -891,10 +1417,11 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
- unsigned int first_entry,
+ uint64_t start,
enum i915_cache_level level)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned first_entry = start >> PAGE_SHIFT;
gen8_gtt_pte_t __iomem *gtt_entries =
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
@@ -936,10 +1463,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
- unsigned int first_entry,
+ uint64_t start,
enum i915_cache_level level)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned first_entry = start >> PAGE_SHIFT;
gen6_gtt_pte_t __iomem *gtt_entries =
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
@@ -971,11 +1499,13 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- unsigned int first_entry,
- unsigned int num_entries,
+ uint64_t start,
+ uint64_t length,
bool use_scratch)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned num_entries = length >> PAGE_SHIFT;
gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -995,11 +1525,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
- unsigned int first_entry,
- unsigned int num_entries,
+ uint64_t start,
+ uint64_t length,
bool use_scratch)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned num_entries = length >> PAGE_SHIFT;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -1017,53 +1549,103 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
readl(gtt_base);
}
-static void i915_ggtt_insert_entries(struct i915_address_space *vm,
- struct sg_table *st,
- unsigned int pg_start,
- enum i915_cache_level cache_level)
+
+static void i915_ggtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
{
+ const unsigned long entry = vma->node.start >> PAGE_SHIFT;
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_sg_entries(st, pg_start, flags);
-
+ BUG_ON(!i915_is_ggtt(vma->vm));
+ intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
+ vma->obj->has_global_gtt_mapping = 1;
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
- unsigned int first_entry,
- unsigned int num_entries,
+ uint64_t start,
+ uint64_t length,
bool unused)
{
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned num_entries = length >> PAGE_SHIFT;
intel_gtt_clear_range(first_entry, num_entries);
}
+static void i915_ggtt_unbind_vma(struct i915_vma *vma)
+{
+ const unsigned int first = vma->node.start >> PAGE_SHIFT;
+ const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
+
+ BUG_ON(!i915_is_ggtt(vma->vm));
+ vma->obj->has_global_gtt_mapping = 0;
+ intel_gtt_clear_range(first, size);
+}
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
+static void ggtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
- struct drm_device *dev = obj->base.dev;
+ struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
+ struct drm_i915_gem_object *obj = vma->obj;
- dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
- entry,
- cache_level);
+ /* If there is no aliasing PPGTT, or the caller needs a global mapping,
+ * or we have a global mapping already but the cacheability flags have
+ * changed, set the global PTEs.
+ *
+ * If there is an aliasing PPGTT it is anecdotally faster, so use that
+ * instead if none of the above hold true.
+ *
+ * NB: A global mapping should only be needed for special regions like
+ * "gtt mappable", SNB errata, or if specified via special execbuf
+ * flags. At all other times, the GPU will use the aliasing PPGTT.
+ */
+ if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
+ if (!obj->has_global_gtt_mapping ||
+ (cache_level != obj->cache_level)) {
+ vma->vm->insert_entries(vma->vm, obj->pages,
+ vma->node.start,
+ cache_level);
+ obj->has_global_gtt_mapping = 1;
+ }
+ }
- obj->has_global_gtt_mapping = 1;
+ if (dev_priv->mm.aliasing_ppgtt &&
+ (!obj->has_aliasing_ppgtt_mapping ||
+ (cache_level != obj->cache_level))) {
+ struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+ appgtt->base.insert_entries(&appgtt->base,
+ vma->obj->pages,
+ vma->node.start,
+ cache_level);
+ vma->obj->has_aliasing_ppgtt_mapping = 1;
+ }
}
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+static void ggtt_unbind_vma(struct i915_vma *vma)
{
- struct drm_device *dev = obj->base.dev;
+ struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- entry,
- obj->base.size >> PAGE_SHIFT,
- true);
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (obj->has_global_gtt_mapping) {
+ vma->vm->clear_range(vma->vm,
+ vma->node.start,
+ obj->base.size,
+ true);
+ obj->has_global_gtt_mapping = 0;
+ }
- obj->has_global_gtt_mapping = 0;
+ if (obj->has_aliasing_ppgtt_mapping) {
+ struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+ appgtt->base.clear_range(&appgtt->base,
+ vma->node.start,
+ obj->base.size,
+ true);
+ obj->has_aliasing_ppgtt_mapping = 0;
+ }
}
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
@@ -1145,29 +1727,14 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
- const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
+ ggtt_vm->clear_range(ggtt_vm, hole_start,
+ hole_end - hole_start, true);
}
/* And finally clear the reserved guard page */
- ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
-}
-
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
-{
- if (i915_enable_ppgtt >= 0)
- return i915_enable_ppgtt;
-
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
- return false;
-#endif
-
- return true;
+ ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
}
void i915_gem_init_global_gtt(struct drm_device *dev)
@@ -1178,26 +1745,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
gtt_size = dev_priv->gtt.base.total;
mappable_size = dev_priv->gtt.mappable_end;
- if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
- int ret;
-
- if (INTEL_INFO(dev)->gen <= 7) {
- /* PPGTT pdes are stolen from global gtt ptes, so shrink the
- * aperture accordingly when using aliasing ppgtt. */
- gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
- }
-
- i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
-
- ret = i915_gem_init_aliasing_ppgtt(dev);
- if (!ret)
- return;
-
- DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
- drm_mm_takedown(&dev_priv->gtt.base.mm);
- if (INTEL_INFO(dev)->gen < 8)
- gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
- }
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
@@ -1252,11 +1799,6 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
- if (bdw_gmch_ctl > 4) {
- WARN_ON(!i915_preliminary_hw_support);
- return 4<<20;
- }
-
return bdw_gmch_ctl << 20;
}
@@ -1438,7 +1980,6 @@ static int i915_gmch_probe(struct drm_device *dev,
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
- dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
if (unlikely(dev_priv->gtt.do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -1493,3 +2034,62 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
}
+
+static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->vma_link);
+ INIT_LIST_HEAD(&vma->mm_list);
+ INIT_LIST_HEAD(&vma->exec_list);
+ vma->vm = vm;
+ vma->obj = obj;
+
+ switch (INTEL_INFO(vm->dev)->gen) {
+ case 8:
+ case 7:
+ case 6:
+ if (i915_is_ggtt(vm)) {
+ vma->unbind_vma = ggtt_unbind_vma;
+ vma->bind_vma = ggtt_bind_vma;
+ } else {
+ vma->unbind_vma = ppgtt_unbind_vma;
+ vma->bind_vma = ppgtt_bind_vma;
+ }
+ break;
+ case 5:
+ case 4:
+ case 3:
+ case 2:
+ BUG_ON(!i915_is_ggtt(vm));
+ vma->unbind_vma = i915_ggtt_unbind_vma;
+ vma->bind_vma = i915_ggtt_bind_vma;
+ break;
+ default:
+ BUG();
+ }
+
+ /* Keep GGTT vmas first to make debug easier */
+ if (i915_is_ggtt(vm))
+ list_add(&vma->vma_link, &obj->vma_list);
+ else
+ list_add_tail(&vma->vma_link, &obj->vma_list);
+
+ return vma;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma)
+ vma = __i915_gem_vma_create(obj, vm);
+
+ return vma;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 28d24caa49f3..62ef55ba061c 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -215,7 +215,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
int bios_reserved = 0;
#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped) {
+ if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b13905348048..cb150e8b4336 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -87,7 +87,7 @@
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -294,7 +294,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (obj->pin_count || obj->framebuffer_references) {
+ if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
@@ -415,7 +415,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 990cf8f43efd..12f1d43b2d68 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -238,50 +238,61 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
- struct drm_i915_error_state *error,
- unsigned ring)
+ struct drm_i915_error_ring *ring)
{
- BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
- if (!error->ring[ring].valid)
+ if (!ring->valid)
return;
- err_printf(m, "%s command stream:\n", ring_str(ring));
- err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
- err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
- err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
- err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
- err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
- err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
- err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
+ err_printf(m, " HEAD: 0x%08x\n", ring->head);
+ err_printf(m, " TAIL: 0x%08x\n", ring->tail);
+ err_printf(m, " CTL: 0x%08x\n", ring->ctl);
+ err_printf(m, " HWS: 0x%08x\n", ring->hws);
+ err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
+ err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
+ err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
+ err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
if (INTEL_INFO(dev)->gen >= 4) {
- err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]);
- err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
- err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
+ err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
+ err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
+ err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
}
- err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
- err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
+ err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
+ err_printf(m, " FADDR: 0x%08x\n", ring->faddr);
if (INTEL_INFO(dev)->gen >= 6) {
- err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
- err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+ err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
+ err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
- error->semaphore_mboxes[ring][0],
- error->semaphore_seqno[ring][0]);
+ ring->semaphore_mboxes[0],
+ ring->semaphore_seqno[0]);
err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
- error->semaphore_mboxes[ring][1],
- error->semaphore_seqno[ring][1]);
+ ring->semaphore_mboxes[1],
+ ring->semaphore_seqno[1]);
if (HAS_VEBOX(dev)) {
err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
- error->semaphore_mboxes[ring][2],
- error->semaphore_seqno[ring][2]);
+ ring->semaphore_mboxes[2],
+ ring->semaphore_seqno[2]);
}
}
- err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
- err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
- err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
- err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+ if (USES_PPGTT(dev)) {
+ err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ int i;
+ for (i = 0; i < 4; i++)
+ err_printf(m, " PDP%d: 0x%016llx\n",
+ i, ring->vm_info.pdp[i]);
+ } else {
+ err_printf(m, " PP_DIR_BASE: 0x%08x\n",
+ ring->vm_info.pp_dir_base);
+ }
+ }
+ err_printf(m, " seqno: 0x%08x\n", ring->seqno);
+ err_printf(m, " waiting: %s\n", yesno(ring->waiting));
+ err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
+ err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
err_printf(m, " hangcheck: %s [%d]\n",
- hangcheck_action_to_str(error->hangcheck_action[ring]),
- error->hangcheck_score[ring]);
+ hangcheck_action_to_str(ring->hangcheck_action),
+ ring->hangcheck_score);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -293,22 +304,54 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
+static void print_error_obj(struct drm_i915_error_state_buf *m,
+ struct drm_i915_error_object *obj)
+{
+ int page, offset, elt;
+
+ for (page = offset = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ err_printf(m, "%08x : %08x\n", offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+}
+
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
struct drm_device *dev = error_priv->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
- int i, j, page, offset, elt;
+ int i, j, offset, elt;
+ int max_hangcheck_score;
if (!error) {
err_printf(m, "no error state collected\n");
goto out;
}
+ err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ max_hangcheck_score = 0;
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ if (error->ring[i].hangcheck_score > max_hangcheck_score)
+ max_hangcheck_score = error->ring[i].hangcheck_score;
+ }
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ if (error->ring[i].hangcheck_score == max_hangcheck_score &&
+ error->ring[i].pid != -1) {
+ err_printf(m, "Active process (on ring %s): %s [%d]\n",
+ ring_str(i),
+ error->ring[i].comm,
+ error->ring[i].pid);
+ }
+ }
+ err_printf(m, "Reset count: %u\n", error->reset_count);
+ err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
@@ -333,8 +376,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen == 7)
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
- for (i = 0; i < ARRAY_SIZE(error->ring); i++)
- i915_ring_error_state(m, dev, error, i);
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ err_printf(m, "%s command stream:\n", ring_str(i));
+ i915_ring_error_state(m, dev, &error->ring[i]);
+ }
if (error->active_bo)
print_error_buffers(m, "Active",
@@ -349,18 +394,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
struct drm_i915_error_object *obj;
- if ((obj = error->ring[i].batchbuffer)) {
- err_printf(m, "%s --- gtt_offset = 0x%08x\n",
- dev_priv->ring[i].name,
+ obj = error->ring[i].batchbuffer;
+ if (obj) {
+ err_puts(m, dev_priv->ring[i].name);
+ if (error->ring[i].pid != -1)
+ err_printf(m, " (submitted by %s [%d])",
+ error->ring[i].comm,
+ error->ring[i].pid);
+ err_printf(m, " --- gtt_offset = 0x%08x\n",
obj->gtt_offset);
- offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n", offset,
- obj->pages[page][elt]);
- offset += 4;
- }
- }
+ print_error_obj(m, obj);
+ }
+
+ obj = error->ring[i].wa_batchbuffer;
+ if (obj) {
+ err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
+ dev_priv->ring[i].name, obj->gtt_offset);
+ print_error_obj(m, obj);
}
if (error->ring[i].num_requests) {
@@ -379,14 +429,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
+ print_error_obj(m, obj);
+ }
+
+ if ((obj = error->ring[i].hws_page)) {
+ err_printf(m, "%s --- HW Status = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n",
- offset,
- obj->pages[page][elt]);
- offset += 4;
- }
+ for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+ err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ offset,
+ obj->pages[0][elt],
+ obj->pages[0][elt+1],
+ obj->pages[0][elt+2],
+ obj->pages[0][elt+3]);
+ offset += 16;
}
}
@@ -472,6 +530,7 @@ static void i915_error_state_free(struct kref *error_ref)
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
i915_error_object_free(error->ring[i].batchbuffer);
i915_error_object_free(error->ring[i].ringbuffer);
+ i915_error_object_free(error->ring[i].hws_page);
i915_error_object_free(error->ring[i].ctx);
kfree(error->ring[i].requests);
}
@@ -485,6 +544,7 @@ static void i915_error_state_free(struct kref *error_ref)
static struct drm_i915_error_object *
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src,
+ struct i915_address_space *vm,
const int num_pages)
{
struct drm_i915_error_object *dst;
@@ -498,7 +558,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
if (dst == NULL)
return NULL;
- reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
+ reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
for (i = 0; i < num_pages; i++) {
unsigned long flags;
void *d;
@@ -508,8 +568,10 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
- if (reloc_offset < dev_priv->gtt.mappable_end &&
- src->has_global_gtt_mapping) {
+ if (src->cache_level == I915_CACHE_NONE &&
+ reloc_offset < dev_priv->gtt.mappable_end &&
+ src->has_global_gtt_mapping &&
+ i915_is_ggtt(vm)) {
void __iomem *s;
/* Simply ignore tiling or any overlapping fence.
@@ -559,8 +621,12 @@ unwind:
kfree(dst);
return NULL;
}
-#define i915_error_object_create(dev_priv, src) \
- i915_error_object_create_sized((dev_priv), (src), \
+#define i915_error_object_create(dev_priv, src, vm) \
+ i915_error_object_create_sized((dev_priv), (src), (vm), \
+ (src)->base.size>>PAGE_SHIFT)
+
+#define i915_error_ggtt_object_create(dev_priv, src) \
+ i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
(src)->base.size>>PAGE_SHIFT)
static void capture_bo(struct drm_i915_error_buffer *err,
@@ -575,7 +641,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg;
err->pinned = 0;
- if (obj->pin_count > 0)
+ if (i915_gem_obj_is_pinned(obj))
err->pinned = 1;
if (obj->user_pin_count > 0)
err->pinned = -1;
@@ -608,7 +674,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
int i = 0;
list_for_each_entry(obj, head, global_list) {
- if (obj->pin_count == 0)
+ if (!i915_gem_obj_is_pinned(obj))
continue;
capture_bo(err++, obj);
@@ -619,6 +685,39 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
return i;
}
+/* Generate a semi-unique error code. The code is not meant to have meaning, The
+ * code's only purpose is to try to prevent false duplicated bug reports by
+ * grossly estimating a GPU error state.
+ *
+ * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
+ * the hang if we could strip the GTT offset information from it.
+ *
+ * It's only a small step better than a random number in its current form.
+ */
+static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error,
+ int *ring_id)
+{
+ uint32_t error_code = 0;
+ int i;
+
+ /* IPEHR would be an ideal way to detect errors, as it's the gross
+ * measure of "the command that hung." However, has some very common
+ * synchronization commands which almost always appear in the case
+ * strictly a client bug. Use instdone to differentiate those some.
+ */
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
+ if (ring_id)
+ *ring_id = i;
+
+ return error->ring[i].ipehr ^ error->ring[i].instdone;
+ }
+ }
+
+ return error_code;
+}
+
static void i915_gem_record_fences(struct drm_device *dev,
struct drm_i915_error_state *error)
{
@@ -652,107 +751,114 @@ static void i915_gem_record_fences(struct drm_device *dev,
}
}
-static struct drm_i915_error_object *
-i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
-{
- struct i915_address_space *vm;
- struct i915_vma *vma;
- struct drm_i915_gem_object *obj;
- u32 seqno;
-
- if (!ring->get_seqno)
- return NULL;
-
- if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
- u32 acthd = I915_READ(ACTHD);
-
- if (WARN_ON(ring->id != RCS))
- return NULL;
-
- obj = ring->scratch.obj;
- if (obj != NULL &&
- acthd >= i915_gem_obj_ggtt_offset(obj) &&
- acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
- return i915_error_object_create(dev_priv, obj);
- }
-
- seqno = ring->get_seqno(ring, false);
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- list_for_each_entry(vma, &vm->active_list, mm_list) {
- obj = vma->obj;
- if (obj->ring != ring)
- continue;
-
- if (i915_seqno_passed(seqno, obj->last_read_seqno))
- continue;
-
- if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
- continue;
-
- /* We need to copy these to an anonymous buffer as the simplest
- * method to avoid being overwritten by userspace.
- */
- return i915_error_object_create(dev_priv, obj);
- }
- }
-
- return NULL;
-}
-
static void i915_record_ring_state(struct drm_device *dev,
- struct drm_i915_error_state *error,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring,
+ struct drm_i915_error_ring *ering)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
- error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
- error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
- error->semaphore_mboxes[ring->id][0]
+ ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
+ ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
+ ering->semaphore_mboxes[0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
- error->semaphore_mboxes[ring->id][1]
+ ering->semaphore_mboxes[1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
- error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
- error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
+ ering->semaphore_seqno[0] = ring->sync_seqno[0];
+ ering->semaphore_seqno[1] = ring->sync_seqno[1];
}
if (HAS_VEBOX(dev)) {
- error->semaphore_mboxes[ring->id][2] =
+ ering->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(ring->mmio_base));
- error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
+ ering->semaphore_seqno[2] = ring->sync_seqno[2];
}
if (INTEL_INFO(dev)->gen >= 4) {
- error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
- error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
- error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
- error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
- error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
- error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
+ ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
+ ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
+ ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+ ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
+ ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
+ ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
if (INTEL_INFO(dev)->gen >= 8)
- error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
- error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
+ ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+ ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
} else {
- error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
- error->ipeir[ring->id] = I915_READ(IPEIR);
- error->ipehr[ring->id] = I915_READ(IPEHR);
- error->instdone[ring->id] = I915_READ(INSTDONE);
+ ering->faddr = I915_READ(DMA_FADD_I8XX);
+ ering->ipeir = I915_READ(IPEIR);
+ ering->ipehr = I915_READ(IPEHR);
+ ering->instdone = I915_READ(INSTDONE);
+ }
+
+ ering->waiting = waitqueue_active(&ring->irq_queue);
+ ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
+ ering->seqno = ring->get_seqno(ring, false);
+ ering->acthd = intel_ring_get_active_head(ring);
+ ering->head = I915_READ_HEAD(ring);
+ ering->tail = I915_READ_TAIL(ring);
+ ering->ctl = I915_READ_CTL(ring);
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ int mmio;
+
+ if (IS_GEN7(dev)) {
+ switch (ring->id) {
+ default:
+ case RCS:
+ mmio = RENDER_HWS_PGA_GEN7;
+ break;
+ case BCS:
+ mmio = BLT_HWS_PGA_GEN7;
+ break;
+ case VCS:
+ mmio = BSD_HWS_PGA_GEN7;
+ break;
+ case VECS:
+ mmio = VEBOX_HWS_PGA_GEN7;
+ break;
+ }
+ } else if (IS_GEN6(ring->dev)) {
+ mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else {
+ /* XXX: gen8 returns to sanity */
+ mmio = RING_HWS_PGA(ring->mmio_base);
+ }
+
+ ering->hws = I915_READ(mmio);
}
- error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
- error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
- error->seqno[ring->id] = ring->get_seqno(ring, false);
- error->acthd[ring->id] = intel_ring_get_active_head(ring);
- error->head[ring->id] = I915_READ_HEAD(ring);
- error->tail[ring->id] = I915_READ_TAIL(ring);
- error->ctl[ring->id] = I915_READ_CTL(ring);
+ ering->cpu_ring_head = ring->head;
+ ering->cpu_ring_tail = ring->tail;
+
+ ering->hangcheck_score = ring->hangcheck.score;
+ ering->hangcheck_action = ring->hangcheck.action;
+
+ if (USES_PPGTT(dev)) {
+ int i;
- error->cpu_ring_head[ring->id] = ring->head;
- error->cpu_ring_tail[ring->id] = ring->tail;
+ ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
- error->hangcheck_score[ring->id] = ring->hangcheck.score;
- error->hangcheck_action[ring->id] = ring->hangcheck.action;
+ switch (INTEL_INFO(dev)->gen) {
+ case 8:
+ for (i = 0; i < 4; i++) {
+ ering->vm_info.pdp[i] =
+ I915_READ(GEN8_RING_PDP_UDW(ring, i));
+ ering->vm_info.pdp[i] <<= 32;
+ ering->vm_info.pdp[i] |=
+ I915_READ(GEN8_RING_PDP_LDW(ring, i));
+ }
+ break;
+ case 7:
+ ering->vm_info.pp_dir_base =
+ I915_READ(RING_PP_DIR_BASE(ring));
+ break;
+ case 6:
+ ering->vm_info.pp_dir_base =
+ I915_READ(RING_PP_DIR_BASE_READ(ring));
+ break;
+ }
+ }
}
@@ -770,7 +876,9 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
ering->ctx = i915_error_object_create_sized(dev_priv,
- obj, 1);
+ obj,
+ &dev_priv->gtt.base,
+ 1);
break;
}
}
@@ -791,14 +899,48 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].valid = true;
- i915_record_ring_state(dev, error, ring);
+ i915_record_ring_state(dev, ring, &error->ring[i]);
- error->ring[i].batchbuffer =
- i915_error_first_batchbuffer(dev_priv, ring);
+ error->ring[i].pid = -1;
+ request = i915_gem_find_active_request(ring);
+ if (request) {
+ /* We need to copy these to an anonymous buffer
+ * as the simplest method to avoid being overwritten
+ * by userspace.
+ */
+ error->ring[i].batchbuffer =
+ i915_error_object_create(dev_priv,
+ request->batch_obj,
+ request->ctx ?
+ request->ctx->vm :
+ &dev_priv->gtt.base);
+
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
+ ring->scratch.obj)
+ error->ring[i].wa_batchbuffer =
+ i915_error_ggtt_object_create(dev_priv,
+ ring->scratch.obj);
+
+ if (request->file_priv) {
+ struct task_struct *task;
+
+ rcu_read_lock();
+ task = pid_task(request->file_priv->file->pid,
+ PIDTYPE_PID);
+ if (task) {
+ strcpy(error->ring[i].comm, task->comm);
+ error->ring[i].pid = task->pid;
+ }
+ rcu_read_unlock();
+ }
+ }
error->ring[i].ringbuffer =
- i915_error_object_create(dev_priv, ring->obj);
+ i915_error_ggtt_object_create(dev_priv, ring->obj);
+ if (ring->status_page.obj)
+ error->ring[i].hws_page =
+ i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
i915_gem_record_active_context(ring, error, &error->ring[i]);
@@ -845,7 +987,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
i++;
error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- if (obj->pin_count)
+ if (i915_gem_obj_is_pinned(obj))
i++;
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
@@ -879,11 +1021,6 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
cnt++;
- if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
- cnt = 1;
-
- vm = &dev_priv->gtt.base;
-
error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
@@ -895,6 +1032,108 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
i915_gem_capture_vm(dev_priv, error, vm, i++);
}
+/* Capture all registers which don't fit into another category. */
+static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int pipe;
+
+ /* General organization
+ * 1. Registers specific to a single generation
+ * 2. Registers which belong to multiple generations
+ * 3. Feature specific registers.
+ * 4. Everything else
+ * Please try to follow the order.
+ */
+
+ /* 1: Registers specific to a single generation */
+ if (IS_VALLEYVIEW(dev)) {
+ error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ error->forcewake = I915_READ(FORCEWAKE_VLV);
+ }
+
+ if (IS_GEN7(dev))
+ error->err_int = I915_READ(GEN7_ERR_INT);
+
+ if (IS_GEN6(dev)) {
+ error->forcewake = I915_READ(FORCEWAKE);
+ error->gab_ctl = I915_READ(GAB_CTL);
+ error->gfx_mode = I915_READ(GFX_MODE);
+ }
+
+ if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+
+ /* 2: Registers which belong to multiple generations */
+ if (INTEL_INFO(dev)->gen >= 7)
+ error->forcewake = I915_READ(FORCEWAKE_MT);
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->derrmr = I915_READ(DERRMR);
+ error->error = I915_READ(ERROR_GEN6);
+ error->done_reg = I915_READ(DONE_REG);
+ }
+
+ /* 3: Feature specific registers */
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ error->gam_ecochk = I915_READ(GAM_ECOCHK);
+ error->gac_eco = I915_READ(GAC_ECO_BITS);
+ }
+
+ /* 4: Everything else */
+ if (HAS_HW_CONTEXTS(dev))
+ error->ccid = I915_READ(CCID);
+
+ if (HAS_PCH_SPLIT(dev))
+ error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else {
+ error->ier = I915_READ(IER);
+ for_each_pipe(pipe)
+ error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+ }
+
+ /* 4: Everything else */
+ error->eir = I915_READ(EIR);
+ error->pgtbl_er = I915_READ(PGTBL_ER);
+
+ i915_get_extra_instdone(dev, error->extra_instdone);
+}
+
+static void i915_error_capture_msg(struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ bool wedged,
+ const char *error_msg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 ecode;
+ int ring_id = -1, len;
+
+ ecode = i915_error_generate_code(dev_priv, error, &ring_id);
+
+ len = scnprintf(error->error_msg, sizeof(error->error_msg),
+ "GPU HANG: ecode %d:0x%08x", ring_id, ecode);
+
+ if (ring_id != -1 && error->ring[ring_id].pid != -1)
+ len += scnprintf(error->error_msg + len,
+ sizeof(error->error_msg) - len,
+ ", in %s [%d]",
+ error->ring[ring_id].comm,
+ error->ring[ring_id].pid);
+
+ scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
+ ", reason: %s, action: %s",
+ error_msg,
+ wedged ? "reset" : "continue");
+}
+
+static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ error->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ error->suspend_count = dev_priv->suspend_count;
+}
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -904,18 +1143,13 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
-void i915_capture_error_state(struct drm_device *dev)
+void i915_capture_error_state(struct drm_device *dev, bool wedged,
+ const char *error_msg)
{
+ static bool warned;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
- int pipe;
-
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- error = dev_priv->gpu_error.first_error;
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
- if (error)
- return;
/* Account for pipe specific data like PIPE*STAT */
error = kzalloc(sizeof(*error), GFP_ATOMIC);
@@ -924,52 +1158,10 @@ void i915_capture_error_state(struct drm_device *dev)
return;
}
- DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
- dev->primary->index);
- DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
- DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
- DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
- DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
-
kref_init(&error->ref);
- error->eir = I915_READ(EIR);
- error->pgtbl_er = I915_READ(PGTBL_ER);
- if (HAS_HW_CONTEXTS(dev))
- error->ccid = I915_READ(CCID);
-
- if (HAS_PCH_SPLIT(dev))
- error->ier = I915_READ(DEIER) | I915_READ(GTIER);
- else if (IS_VALLEYVIEW(dev))
- error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
- else if (IS_GEN2(dev))
- error->ier = I915_READ16(IER);
- else
- error->ier = I915_READ(IER);
-
- if (INTEL_INFO(dev)->gen >= 6)
- error->derrmr = I915_READ(DERRMR);
-
- if (IS_VALLEYVIEW(dev))
- error->forcewake = I915_READ(FORCEWAKE_VLV);
- else if (INTEL_INFO(dev)->gen >= 7)
- error->forcewake = I915_READ(FORCEWAKE_MT);
- else if (INTEL_INFO(dev)->gen == 6)
- error->forcewake = I915_READ(FORCEWAKE);
-
- if (!HAS_PCH_SPLIT(dev))
- for_each_pipe(pipe)
- error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-
- if (INTEL_INFO(dev)->gen >= 6) {
- error->error = I915_READ(ERROR_GEN6);
- error->done_reg = I915_READ(DONE_REG);
- }
-
- if (INTEL_INFO(dev)->gen == 7)
- error->err_int = I915_READ(GEN7_ERR_INT);
-
- i915_get_extra_instdone(dev, error->extra_instdone);
+ i915_capture_gen_state(dev_priv, error);
+ i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error);
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
@@ -979,6 +1171,9 @@ void i915_capture_error_state(struct drm_device *dev)
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
+ i915_error_capture_msg(dev, error, wedged, error_msg);
+ DRM_INFO("%s\n", error->error_msg);
+
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
if (dev_priv->gpu_error.first_error == NULL) {
dev_priv->gpu_error.first_error = error;
@@ -986,8 +1181,19 @@ void i915_capture_error_state(struct drm_device *dev)
}
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
- if (error)
+ if (error) {
i915_error_state_free(&error->ref);
+ return;
+ }
+
+ if (!warned) {
+ DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+ DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+ DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+ DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
+ DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
+ warned = true;
+ }
}
void i915_error_state_get(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d554169ac592..7753249b3a95 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -82,13 +82,13 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
/* For display hotplug interrupt */
static void
-ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (dev_priv->pc8.irqs_disabled) {
+ if (dev_priv->pm.irqs_disabled) {
WARN(1, "IRQs disabled\n");
- dev_priv->pc8.regsave.deimr &= ~mask;
+ dev_priv->pm.regsave.deimr &= ~mask;
return;
}
@@ -100,13 +100,13 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
}
static void
-ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (dev_priv->pc8.irqs_disabled) {
+ if (dev_priv->pm.irqs_disabled) {
WARN(1, "IRQs disabled\n");
- dev_priv->pc8.regsave.deimr |= mask;
+ dev_priv->pm.regsave.deimr |= mask;
return;
}
@@ -129,10 +129,10 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
{
assert_spin_locked(&dev_priv->irq_lock);
- if (dev_priv->pc8.irqs_disabled) {
+ if (dev_priv->pm.irqs_disabled) {
WARN(1, "IRQs disabled\n");
- dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
- dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
+ dev_priv->pm.regsave.gtimr &= ~interrupt_mask;
+ dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask &
interrupt_mask);
return;
}
@@ -167,10 +167,10 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (dev_priv->pc8.irqs_disabled) {
+ if (dev_priv->pm.irqs_disabled) {
WARN(1, "IRQs disabled\n");
- dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
- dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
+ dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask;
+ dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask &
interrupt_mask);
return;
}
@@ -232,6 +232,18 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
return true;
}
+static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg = PIPESTAT(pipe);
+ u32 pipestat = I915_READ(reg) & 0x7fff0000;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+}
+
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
@@ -301,11 +313,11 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (dev_priv->pc8.irqs_disabled &&
+ if (dev_priv->pm.irqs_disabled &&
(interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
WARN(1, "IRQs disabled\n");
- dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
- dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
+ dev_priv->pm.regsave.sdeimr &= ~interrupt_mask;
+ dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask &
interrupt_mask);
return;
}
@@ -375,16 +387,15 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
*
* Returns the previous state of underrun reporting.
*/
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
bool ret;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ assert_spin_locked(&dev_priv->irq_lock);
ret = !intel_crtc->cpu_fifo_underrun_disabled;
@@ -393,7 +404,9 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
intel_crtc->cpu_fifo_underrun_disabled = !enable;
- if (IS_GEN5(dev) || IS_GEN6(dev))
+ if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
+ i9xx_clear_fifo_underrun(dev, pipe);
+ else if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
@@ -401,10 +414,33 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
done:
+ return ret;
+}
+
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
return ret;
}
+static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ return !intel_crtc->cpu_fifo_underrun_disabled;
+}
+
/**
* intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
* @dev: drm device
@@ -458,45 +494,109 @@ done:
}
-void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
+static void
+__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 enable_mask, u32 status_mask)
{
u32 reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & 0x7fff0000;
+ u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock);
- if ((pipestat & mask) == mask)
+ if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+ status_mask & ~PIPESTAT_INT_STATUS_MASK))
return;
+ if ((pipestat & enable_mask) == enable_mask)
+ return;
+
+ dev_priv->pipestat_irq_mask[pipe] |= status_mask;
+
/* Enable the interrupt, clear any pending status */
- pipestat |= mask | (mask >> 16);
+ pipestat |= enable_mask | status_mask;
I915_WRITE(reg, pipestat);
POSTING_READ(reg);
}
-void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
+static void
+__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 enable_mask, u32 status_mask)
{
u32 reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & 0x7fff0000;
+ u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock);
- if ((pipestat & mask) == 0)
+ if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+ status_mask & ~PIPESTAT_INT_STATUS_MASK))
+ return;
+
+ if ((pipestat & enable_mask) == 0)
return;
- pipestat &= ~mask;
+ dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
+
+ pipestat &= ~enable_mask;
I915_WRITE(reg, pipestat);
POSTING_READ(reg);
}
+static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
+{
+ u32 enable_mask = status_mask << 16;
+
+ /*
+ * On pipe A we don't support the PSR interrupt yet, on pipe B the
+ * same bit MBZ.
+ */
+ if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
+ return 0;
+
+ enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
+ SPRITE0_FLIP_DONE_INT_EN_VLV |
+ SPRITE1_FLIP_DONE_INT_EN_VLV);
+ if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
+ enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
+ if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
+ enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
+
+ return enable_mask;
+}
+
+void
+i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 status_mask)
+{
+ u32 enable_mask;
+
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+ status_mask);
+ else
+ enable_mask = status_mask << 16;
+ __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
+}
+
+void
+i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 status_mask)
+{
+ u32 enable_mask;
+
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+ status_mask);
+ else
+ enable_mask = status_mask << 16;
+ __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
+}
+
/**
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
*/
static void i915_enable_asle_pipestat(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
@@ -504,10 +604,10 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, PIPE_A,
- PIPE_LEGACY_BLC_EVENT_ENABLE);
+ PIPE_LEGACY_BLC_EVENT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -524,7 +624,7 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Locking is horribly broken here, but whatever. */
@@ -548,7 +648,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
*/
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
u32 high1, high2, low, pixel, vbl_start;
@@ -604,7 +704,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int reg = PIPE_FRMCOUNT_GM45(pipe);
if (!i915_pipe_enabled(dev, pipe)) {
@@ -859,8 +959,8 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
static void i915_hotplug_work_func(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- hotplug_work);
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_connector *intel_connector;
@@ -928,9 +1028,14 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
+static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
+{
+ del_timer_sync(&dev_priv->hotplug_reenable_timer);
+}
+
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
@@ -981,8 +1086,8 @@ static void notify_ring(struct drm_device *dev,
static void gen6_pm_rps_work(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- rps.work);
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, rps.work);
u32 pm_iir;
int new_delay, adj;
@@ -990,13 +1095,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer code */
- snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
+ snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
- WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
+ WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
- if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
+ if ((pm_iir & dev_priv->pm_rps_events) == 0)
return;
mutex_lock(&dev_priv->rps.hw_lock);
@@ -1007,36 +1112,38 @@ static void gen6_pm_rps_work(struct work_struct *work)
adj *= 2;
else
adj = 1;
- new_delay = dev_priv->rps.cur_delay + adj;
+ new_delay = dev_priv->rps.cur_freq + adj;
/*
* For better performance, jump directly
* to RPe if we're below it.
*/
- if (new_delay < dev_priv->rps.rpe_delay)
- new_delay = dev_priv->rps.rpe_delay;
+ if (new_delay < dev_priv->rps.efficient_freq)
+ new_delay = dev_priv->rps.efficient_freq;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
- if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
- new_delay = dev_priv->rps.rpe_delay;
+ if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
+ new_delay = dev_priv->rps.efficient_freq;
else
- new_delay = dev_priv->rps.min_delay;
+ new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
else
adj = -1;
- new_delay = dev_priv->rps.cur_delay + adj;
+ new_delay = dev_priv->rps.cur_freq + adj;
} else { /* unknown event */
- new_delay = dev_priv->rps.cur_delay;
+ new_delay = dev_priv->rps.cur_freq;
}
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
new_delay = clamp_t(int, new_delay,
- dev_priv->rps.min_delay, dev_priv->rps.max_delay);
- dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
+ dev_priv->rps.min_freq_softlimit,
+ dev_priv->rps.max_freq_softlimit);
+
+ dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
if (IS_VALLEYVIEW(dev_priv->dev))
valleyview_set_rps(dev_priv->dev, new_delay);
@@ -1058,8 +1165,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
*/
static void ivybridge_parity_work(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- l3_parity.error_work);
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[6];
uint32_t misccpctl;
@@ -1131,7 +1238,7 @@ out:
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_L3_DPF(dev))
return;
@@ -1177,8 +1284,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
- DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
- i915_handle_error(dev, false);
+ i915_handle_error(dev, false, "GT error interrupt 0x%08x",
+ gt_iir);
}
if (gt_iir & GT_PARITY_ERROR(dev))
@@ -1242,13 +1349,16 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger,
const u32 *hpd)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int i;
bool storm_detected = false;
if (!hotplug_trigger)
return;
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_trigger);
+
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
@@ -1295,14 +1405,14 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
static void gmbus_irq_handler(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
wake_up_all(&dev_priv->gmbus_wait_queue);
}
static void dp_aux_irq_handler(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
wake_up_all(&dev_priv->gmbus_wait_queue);
}
@@ -1408,10 +1518,10 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
- if (pm_iir & GEN6_PM_RPS_EVENTS) {
+ if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
- snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
+ dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
+ snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1422,23 +1532,89 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
- DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
- i915_handle_error(dev_priv->dev, false);
+ i915_handle_error(dev_priv->dev, false,
+ "VEBOX CS error interrupt 0x%08x",
+ pm_iir);
}
}
}
+static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pipe_stats[I915_MAX_PIPES] = { };
+ int pipe;
+
+ spin_lock(&dev_priv->irq_lock);
+ for_each_pipe(pipe) {
+ int reg;
+ u32 mask, iir_bit = 0;
+
+ /*
+ * PIPESTAT bits get signalled even when the interrupt is
+ * disabled with the mask bits, and some of the status bits do
+ * not generate interrupts at all (like the underrun bit). Hence
+ * we need to be careful that we only handle what we want to
+ * handle.
+ */
+ mask = 0;
+ if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
+ mask |= PIPE_FIFO_UNDERRUN_STATUS;
+
+ switch (pipe) {
+ case PIPE_A:
+ iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ break;
+ case PIPE_B:
+ iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ break;
+ }
+ if (iir & iir_bit)
+ mask |= dev_priv->pipestat_irq_mask[pipe];
+
+ if (!mask)
+ continue;
+
+ reg = PIPESTAT(pipe);
+ mask |= PIPESTAT_INT_ENABLE_MASK;
+ pipe_stats[pipe] = I915_READ(reg) & mask;
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
+ PIPESTAT_INT_STATUS_MASK))
+ I915_WRITE(reg, pipe_stats[pipe]);
+ }
+ spin_unlock(&dev_priv->irq_lock);
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(dev, pipe);
+
+ if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
+
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev);
+}
+
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
- unsigned long irqflags;
- int pipe;
- u32 pipe_stats[I915_MAX_PIPES];
-
- atomic_inc(&dev_priv->irq_received);
while (true) {
iir = I915_READ(VLV_IIR);
@@ -1452,44 +1628,13 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
snb_gt_irq_handler(dev, dev_priv, gt_iir);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- for_each_pipe(pipe) {
- int reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg);
-
- /*
- * Clear the PIPE*STAT regs before the IIR
- */
- if (pipe_stats[pipe] & 0x8000ffff) {
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe %c underrun\n",
- pipe_name(pipe));
- I915_WRITE(reg, pipe_stats[pipe]);
- }
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
- for_each_pipe(pipe) {
- if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(dev, pipe);
-
- if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip(dev, pipe);
- }
-
- if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
- }
+ valleyview_pipestat_irq_handler(dev, iir);
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_status);
-
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
@@ -1499,8 +1644,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_READ(PORT_HOTPLUG_STAT);
}
- if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
- gmbus_irq_handler(dev);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
@@ -1516,7 +1659,7 @@ out:
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
@@ -1559,12 +1702,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
false))
- DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+ DRM_ERROR("PCH transcoder A FIFO underrun\n");
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
false))
- DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+ DRM_ERROR("PCH transcoder B FIFO underrun\n");
}
static void ivb_err_int_handler(struct drm_device *dev)
@@ -1580,8 +1723,8 @@ static void ivb_err_int_handler(struct drm_device *dev)
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
false))
- DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
}
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
@@ -1606,24 +1749,24 @@ static void cpt_serr_int_handler(struct drm_device *dev)
if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
false))
- DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+ DRM_ERROR("PCH transcoder A FIFO underrun\n");
if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
false))
- DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+ DRM_ERROR("PCH transcoder B FIFO underrun\n");
if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
false))
- DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
+ DRM_ERROR("PCH transcoder C FIFO underrun\n");
I915_WRITE(SERR_INT, serr_int);
}
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
@@ -1678,8 +1821,8 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
if (de_iir & DE_PIPE_CRC_DONE(pipe))
i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -1711,7 +1854,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe i;
+ enum pipe pipe;
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev);
@@ -1722,14 +1865,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(dev);
- for_each_pipe(i) {
- if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
- drm_handle_vblank(dev, i);
+ for_each_pipe(pipe) {
+ if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
+ drm_handle_vblank(dev, pipe);
/* plane/pipes map 1:1 on ilk+ */
- if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
- intel_prepare_page_flip(dev, i);
- intel_finish_page_flip_plane(dev, i);
+ if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
}
}
@@ -1747,12 +1890,10 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
- atomic_inc(&dev_priv->irq_received);
-
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
intel_uncore_check_errors(dev);
@@ -1821,8 +1962,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
uint32_t tmp = 0;
enum pipe pipe;
- atomic_inc(&dev_priv->irq_received);
-
master_ctl = I915_READ(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
@@ -1884,8 +2023,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
false))
- DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
}
if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
@@ -1962,8 +2101,8 @@ static void i915_error_work_func(struct work_struct *work)
{
struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
work);
- drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
- gpu_error);
+ struct drm_i915_private *dev_priv =
+ container_of(error, struct drm_i915_private, gpu_error);
struct drm_device *dev = dev_priv->dev;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
@@ -2127,11 +2266,18 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-void i915_handle_error(struct drm_device *dev, bool wedged)
+void i915_handle_error(struct drm_device *dev, bool wedged,
+ const char *fmt, ...)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ va_list args;
+ char error_msg[80];
- i915_capture_error_state(dev);
+ va_start(args, fmt);
+ vscnprintf(error_msg, sizeof(error_msg), fmt, args);
+ va_end(args);
+
+ i915_capture_error_state(dev, wedged, error_msg);
i915_report_and_clear_eir(dev);
if (wedged) {
@@ -2165,7 +2311,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj;
@@ -2197,8 +2343,8 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
- crtc->y * crtc->fb->pitches[0] +
- crtc->x * crtc->fb->bits_per_pixel/8);
+ crtc->y * crtc->primary->fb->pitches[0] +
+ crtc->x * crtc->primary->fb->bits_per_pixel/8);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -2214,7 +2360,7 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
*/
static int i915_enable_vblank(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
if (!i915_pipe_enabled(dev, pipe))
@@ -2223,13 +2369,13 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
else
i915_enable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_ENABLE);
+ PIPE_VBLANK_INTERRUPT_STATUS);
/* maintain vblank delivery even in deep C-states */
- if (dev_priv->info->gen == 3)
+ if (INTEL_INFO(dev)->gen == 3)
I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -2238,7 +2384,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
@@ -2255,22 +2401,15 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- u32 imr;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- imr = I915_READ(VLV_IMR);
- if (pipe == PIPE_A)
- imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
- else
- imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- I915_WRITE(VLV_IMR, imr);
i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -2297,22 +2436,22 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe)
*/
static void i915_disable_vblank(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (dev_priv->info->gen == 3)
+ if (INTEL_INFO(dev)->gen == 3)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
i915_disable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_ENABLE |
- PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ PIPE_VBLANK_INTERRUPT_STATUS |
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
@@ -2324,19 +2463,12 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- u32 imr;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_disable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_ENABLE);
- imr = I915_READ(VLV_IMR);
- if (pipe == PIPE_A)
- imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
- else
- imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- I915_WRITE(VLV_IMR, imr);
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -2373,29 +2505,43 @@ static struct intel_ring_buffer *
semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- u32 cmd, ipehr, acthd, acthd_min;
+ u32 cmd, ipehr, head;
+ int i;
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
if ((ipehr & ~(0x3 << 16)) !=
(MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
return NULL;
- /* ACTHD is likely pointing to the dword after the actual command,
- * so scan backwards until we find the MBOX.
+ /*
+ * HEAD is likely pointing to the dword after the actual command,
+ * so scan backwards until we find the MBOX. But limit it to just 3
+ * dwords. Note that we don't care about ACTHD here since that might
+ * point at at batch, and semaphores are always emitted into the
+ * ringbuffer itself.
*/
- acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
- acthd_min = max((int)acthd - 3 * 4, 0);
- do {
- cmd = ioread32(ring->virtual_start + acthd);
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
+
+ for (i = 4; i; --i) {
+ /*
+ * Be paranoid and presume the hw has gone off into the wild -
+ * our ring is smaller than what the hardware (and hence
+ * HEAD_ADDR) allows. Also handles wrap-around.
+ */
+ head &= ring->size - 1;
+
+ /* This here seems to blow up */
+ cmd = ioread32(ring->virtual_start + head);
if (cmd == ipehr)
break;
- acthd -= 4;
- if (acthd < acthd_min)
- return NULL;
- } while (1);
+ head -= 4;
+ }
- *seqno = ioread32(ring->virtual_start+acthd+4)+1;
+ if (!i)
+ return NULL;
+
+ *seqno = ioread32(ring->virtual_start + head + 4) + 1;
return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
}
@@ -2429,7 +2575,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
}
static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
+ring_stuck(struct intel_ring_buffer *ring, u64 acthd)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2448,9 +2594,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
*/
tmp = I915_READ_CTL(ring);
if (tmp & RING_WAIT) {
- DRM_ERROR("Kicking stuck wait on %s\n",
- ring->name);
- i915_handle_error(dev, false);
+ i915_handle_error(dev, false,
+ "Kicking stuck wait on %s",
+ ring->name);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
}
@@ -2460,9 +2606,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
default:
return HANGCHECK_HUNG;
case 1:
- DRM_ERROR("Kicking stuck semaphore on %s\n",
- ring->name);
- i915_handle_error(dev, false);
+ i915_handle_error(dev, false,
+ "Kicking stuck semaphore on %s",
+ ring->name);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
case 0:
@@ -2484,7 +2630,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
static void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i;
int busy_count = 0, rings_hung = 0;
@@ -2492,13 +2638,13 @@ static void i915_hangcheck_elapsed(unsigned long data)
#define BUSY 1
#define KICK 5
#define HUNG 20
-#define FIRE 30
- if (!i915_enable_hangcheck)
+ if (!i915.enable_hangcheck)
return;
for_each_ring(ring, dev_priv, i) {
- u32 seqno, acthd;
+ u64 acthd;
+ u32 seqno;
bool busy = true;
semaphore_clear_deadlocks(dev_priv);
@@ -2576,7 +2722,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
}
for_each_ring(ring, dev_priv, i) {
- if (ring->hangcheck.score > FIRE) {
+ if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
DRM_INFO("%s on %s\n",
stuck[i] ? "stuck" : "no progress",
ring->name);
@@ -2585,7 +2731,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
}
if (rings_hung)
- return i915_handle_error(dev, true);
+ return i915_handle_error(dev, true, "Ring hung");
if (busy_count)
/* Reset timer case chip hangs without another request
@@ -2596,7 +2742,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
void i915_queue_hangcheck(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!i915_enable_hangcheck)
+ if (!i915.enable_hangcheck)
return;
mod_timer(&dev_priv->gpu_error.hangcheck_timer,
@@ -2643,9 +2789,7 @@ static void gen5_gt_irq_preinstall(struct drm_device *dev)
*/
static void ironlake_irq_preinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- atomic_set(&dev_priv->irq_received, 0);
+ struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(HWSTAM, 0xeffe);
@@ -2660,11 +2804,9 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
static void valleyview_irq_preinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- atomic_set(&dev_priv->irq_received, 0);
-
/* VLV magic */
I915_WRITE(VLV_IMR, 0);
I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
@@ -2694,8 +2836,6 @@ static void gen8_irq_preinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- atomic_set(&dev_priv->irq_received, 0);
-
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
@@ -2740,7 +2880,7 @@ static void gen8_irq_preinstall(struct drm_device *dev)
static void ibx_hpd_irq_setup(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 hotplug_irqs, hotplug, enabled_irqs = 0;
@@ -2775,7 +2915,7 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
static void ibx_irq_postinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 mask;
if (HAS_PCH_NOP(dev))
@@ -2821,7 +2961,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
POSTING_READ(GTIER);
if (INTEL_INFO(dev)->gen >= 6) {
- pm_irqs |= GEN6_PM_RPS_EVENTS;
+ pm_irqs |= dev_priv->pm_rps_events;
if (HAS_VEBOX(dev))
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
@@ -2837,7 +2977,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
static int ironlake_irq_postinstall(struct drm_device *dev)
{
unsigned long irqflags;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 display_mask, extra_mask;
if (INTEL_INFO(dev)->gen >= 7) {
@@ -2885,44 +3025,113 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
return 0;
}
+static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
+{
+ u32 pipestat_mask;
+ u32 iir_mask;
+
+ pipestat_mask = PIPESTAT_INT_STATUS_MASK |
+ PIPE_FIFO_UNDERRUN_STATUS;
+
+ I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
+ I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+ POSTING_READ(PIPESTAT(PIPE_A));
+
+ pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+ PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+ i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
+ PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+
+ iir_mask = I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ dev_priv->irq_mask &= ~iir_mask;
+
+ I915_WRITE(VLV_IIR, iir_mask);
+ I915_WRITE(VLV_IIR, iir_mask);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+ POSTING_READ(VLV_IER);
+}
+
+static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
+{
+ u32 pipestat_mask;
+ u32 iir_mask;
+
+ iir_mask = I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+
+ dev_priv->irq_mask |= iir_mask;
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IIR, iir_mask);
+ I915_WRITE(VLV_IIR, iir_mask);
+ POSTING_READ(VLV_IIR);
+
+ pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+ PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+ i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
+ PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+
+ pipestat_mask = PIPESTAT_INT_STATUS_MASK |
+ PIPE_FIFO_UNDERRUN_STATUS;
+ I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
+ I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+ POSTING_READ(PIPESTAT(PIPE_A));
+}
+
+void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (dev_priv->display_irqs_enabled)
+ return;
+
+ dev_priv->display_irqs_enabled = true;
+
+ if (dev_priv->dev->irq_enabled)
+ valleyview_display_irqs_install(dev_priv);
+}
+
+void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (!dev_priv->display_irqs_enabled)
+ return;
+
+ dev_priv->display_irqs_enabled = false;
+
+ if (dev_priv->dev->irq_enabled)
+ valleyview_display_irqs_uninstall(dev_priv);
+}
+
static int valleyview_irq_postinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 enable_mask;
- u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
- PIPE_CRC_DONE_ENABLE;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- enable_mask = I915_DISPLAY_PORT_INTERRUPT;
- enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
- /*
- *Leave vblank interrupts masked initially. enable/disable will
- * toggle them based on usage.
- */
- dev_priv->irq_mask = (~enable_mask) |
- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+ dev_priv->irq_mask = ~0;
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- I915_WRITE(VLV_IER, enable_mask);
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(PIPESTAT(0), 0xffff);
- I915_WRITE(PIPESTAT(1), 0xffff);
POSTING_READ(VLV_IER);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
- i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
+ if (dev_priv->display_irqs_enabled)
+ valleyview_display_irqs_install(dev_priv);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
I915_WRITE(VLV_IIR, 0xffffffff);
@@ -3018,8 +3227,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
if (!dev_priv)
return;
- atomic_set(&dev_priv->irq_received, 0);
-
I915_WRITE(GEN8_MASTER_IRQ, 0);
#define GEN8_IRQ_FINI_NDX(type, which) do { \
@@ -3054,13 +3261,14 @@ static void gen8_irq_uninstall(struct drm_device *dev)
static void valleyview_irq_uninstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
int pipe;
if (!dev_priv)
return;
- del_timer_sync(&dev_priv->hotplug_reenable_timer);
+ intel_hpd_irq_uninstall(dev_priv);
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
@@ -3068,8 +3276,14 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xffffffff);
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- for_each_pipe(pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->display_irqs_enabled)
+ valleyview_display_irqs_uninstall(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ dev_priv->irq_mask = 0;
+
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IMR, 0xffffffff);
I915_WRITE(VLV_IER, 0x0);
@@ -3078,12 +3292,12 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
static void ironlake_irq_uninstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv)
return;
- del_timer_sync(&dev_priv->hotplug_reenable_timer);
+ intel_hpd_irq_uninstall(dev_priv);
I915_WRITE(HWSTAM, 0xffffffff);
@@ -3109,11 +3323,9 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
static void i8xx_irq_preinstall(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- atomic_set(&dev_priv->irq_received, 0);
-
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE16(IMR, 0xffff);
@@ -3123,7 +3335,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
static int i8xx_irq_postinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
I915_WRITE16(EMR,
@@ -3148,8 +3360,8 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -3161,7 +3373,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
static bool i8xx_handle_vblank(struct drm_device *dev,
int plane, int pipe, u32 iir)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
if (!drm_handle_vblank(dev, pipe))
@@ -3189,7 +3401,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u16 iir, new_iir;
u32 pipe_stats[2];
unsigned long irqflags;
@@ -3198,8 +3410,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
- atomic_inc(&dev_priv->irq_received);
-
iir = I915_READ16(IIR);
if (iir == 0)
return IRQ_NONE;
@@ -3212,7 +3422,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false);
+ i915_handle_error(dev, false,
+ "Command parser error, iir 0x%08x",
+ iir);
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
@@ -3221,12 +3433,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
/*
* Clear the PIPE*STAT regs before the IIR
*/
- if (pipe_stats[pipe] & 0x8000ffff) {
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe %c underrun\n",
- pipe_name(pipe));
+ if (pipe_stats[pipe] & 0x8000ffff)
I915_WRITE(reg, pipe_stats[pipe]);
- }
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3249,6 +3457,10 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
iir = new_iir;
@@ -3259,7 +3471,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
static void i8xx_irq_uninstall(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
for_each_pipe(pipe) {
@@ -3274,11 +3486,9 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
static void i915_irq_preinstall(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- atomic_set(&dev_priv->irq_received, 0);
-
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3294,7 +3504,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
static int i915_irq_postinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask;
unsigned long irqflags;
@@ -3335,8 +3545,8 @@ static int i915_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -3348,7 +3558,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
static bool i915_handle_vblank(struct drm_device *dev,
int plane, int pipe, u32 iir)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
if (!drm_handle_vblank(dev, pipe))
@@ -3376,7 +3586,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
unsigned long irqflags;
u32 flip_mask =
@@ -3384,8 +3594,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
int pipe, ret = IRQ_NONE;
- atomic_inc(&dev_priv->irq_received);
-
iir = I915_READ(IIR);
do {
bool irq_received = (iir & ~flip_mask) != 0;
@@ -3398,7 +3606,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false);
+ i915_handle_error(dev, false,
+ "Command parser error, iir 0x%08x",
+ iir);
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
@@ -3406,9 +3616,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
/* Clear the PIPE*STAT regs before the IIR */
if (pipe_stats[pipe] & 0x8000ffff) {
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe %c underrun\n",
- pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
irq_received = true;
}
@@ -3424,9 +3631,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_status);
-
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
@@ -3453,6 +3657,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -3484,10 +3692,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i915_irq_uninstall(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- del_timer_sync(&dev_priv->hotplug_reenable_timer);
+ intel_hpd_irq_uninstall(dev_priv);
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -3508,11 +3716,9 @@ static void i915_irq_uninstall(struct drm_device * dev)
static void i965_irq_preinstall(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- atomic_set(&dev_priv->irq_received, 0);
-
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3526,7 +3732,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
static int i965_irq_postinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask;
u32 error_mask;
unsigned long irqflags;
@@ -3551,9 +3757,9 @@ static int i965_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
/*
@@ -3585,7 +3791,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
static void i915_hpd_irq_setup(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 hotplug_en;
@@ -3617,25 +3823,21 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
unsigned long irqflags;
- int irq_received;
int ret = IRQ_NONE, pipe;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
- atomic_inc(&dev_priv->irq_received);
-
iir = I915_READ(IIR);
for (;;) {
+ bool irq_received = (iir & ~flip_mask) != 0;
bool blc_event = false;
- irq_received = (iir & ~flip_mask) != 0;
-
/* Can't rely on pipestat interrupt bit in iir as it might
* have been cleared after the pipestat interrupt was received.
* It doesn't set the bit in iir again, but it still produces
@@ -3643,7 +3845,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false);
+ i915_handle_error(dev, false,
+ "Command parser error, iir 0x%08x",
+ iir);
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
@@ -3653,11 +3857,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
* Clear the PIPE*STAT regs before the IIR
*/
if (pipe_stats[pipe] & 0x8000ffff) {
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe %c underrun\n",
- pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
- irq_received = 1;
+ irq_received = true;
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3674,9 +3875,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
HOTPLUG_INT_STATUS_G4X :
HOTPLUG_INT_STATUS_I915);
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_status);
-
intel_hpd_irq_handler(dev, hotplug_trigger,
IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
@@ -3706,8 +3904,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- }
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(dev);
@@ -3740,13 +3941,13 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
static void i965_irq_uninstall(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
if (!dev_priv)
return;
- del_timer_sync(&dev_priv->hotplug_reenable_timer);
+ intel_hpd_irq_uninstall(dev_priv);
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3763,9 +3964,9 @@ static void i965_irq_uninstall(struct drm_device * dev)
I915_WRITE(IIR, I915_READ(IIR));
}
-static void i915_reenable_hotplug_timer_func(unsigned long data)
+static void intel_hpd_irq_reenable(unsigned long data)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
unsigned long irqflags;
@@ -3807,10 +4008,13 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ /* Let's track the enabled rps events */
+ dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+
setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
(unsigned long) dev);
- setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
+ setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
(unsigned long) dev_priv);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
@@ -3906,32 +4110,32 @@ void intel_hpd_init(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-/* Disable interrupts so we can allow Package C8+. */
-void hsw_pc8_disable_interrupts(struct drm_device *dev)
+/* Disable interrupts so we can allow runtime PM. */
+void hsw_runtime_pm_disable_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
- dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
- dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
- dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
- dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
+ dev_priv->pm.regsave.deimr = I915_READ(DEIMR);
+ dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR);
+ dev_priv->pm.regsave.gtimr = I915_READ(GTIMR);
+ dev_priv->pm.regsave.gtier = I915_READ(GTIER);
+ dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
ironlake_disable_display_irq(dev_priv, 0xffffffff);
ibx_disable_display_interrupt(dev_priv, 0xffffffff);
ilk_disable_gt_irq(dev_priv, 0xffffffff);
snb_disable_pm_irq(dev_priv, 0xffffffff);
- dev_priv->pc8.irqs_disabled = true;
+ dev_priv->pm.irqs_disabled = true;
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-/* Restore interrupts so we can recover from Package C8+. */
-void hsw_pc8_restore_interrupts(struct drm_device *dev)
+/* Restore interrupts so we can recover from runtime PM. */
+void hsw_runtime_pm_restore_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -3951,13 +4155,13 @@ void hsw_pc8_restore_interrupts(struct drm_device *dev)
val = I915_READ(GEN6_PMIMR);
WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
- dev_priv->pc8.irqs_disabled = false;
+ dev_priv->pm.irqs_disabled = false;
- ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
- ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr);
- ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
- snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
- I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
+ ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr);
+ ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr);
+ ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr);
+ snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr);
+ I915_WRITE(GTIER, dev_priv->pm.regsave.gtier);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
new file mode 100644
index 000000000000..d1d7980f0e01
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+
+struct i915_params i915 __read_mostly = {
+ .modeset = -1,
+ .panel_ignore_lid = 1,
+ .powersave = 1,
+ .semaphores = -1,
+ .lvds_downclock = 0,
+ .lvds_channel_mode = 0,
+ .panel_use_ssc = -1,
+ .vbt_sdvo_panel_type = -1,
+ .enable_rc6 = -1,
+ .enable_fbc = -1,
+ .enable_hangcheck = true,
+ .enable_ppgtt = -1,
+ .enable_psr = 0,
+ .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
+ .disable_power_well = 1,
+ .enable_ips = 1,
+ .fastboot = 0,
+ .prefault_disable = 0,
+ .reset = true,
+ .invert_brightness = 0,
+ .disable_display = 0,
+ .enable_cmd_parser = 0,
+};
+
+module_param_named(modeset, i915.modeset, int, 0400);
+MODULE_PARM_DESC(modeset,
+ "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+ "1=on, -1=force vga console preference [default])");
+
+module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
+MODULE_PARM_DESC(panel_ignore_lid,
+ "Override lid status (0=autodetect, 1=autodetect disabled [default], "
+ "-1=force lid closed, -2=force lid open)");
+
+module_param_named(powersave, i915.powersave, int, 0600);
+MODULE_PARM_DESC(powersave,
+ "Enable powersavings, fbc, downclocking, etc. (default: true)");
+
+module_param_named(semaphores, i915.semaphores, int, 0400);
+MODULE_PARM_DESC(semaphores,
+ "Use semaphores for inter-ring sync "
+ "(default: -1 (use per-chip defaults))");
+
+module_param_named(enable_rc6, i915.enable_rc6, int, 0400);
+MODULE_PARM_DESC(enable_rc6,
+ "Enable power-saving render C-state 6. "
+ "Different stages can be selected via bitmask values "
+ "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
+ "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
+ "default: -1 (use per-chip default)");
+
+module_param_named(enable_fbc, i915.enable_fbc, int, 0600);
+MODULE_PARM_DESC(enable_fbc,
+ "Enable frame buffer compression for power savings "
+ "(default: -1 (use per-chip default))");
+
+module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
+MODULE_PARM_DESC(lvds_downclock,
+ "Use panel (LVDS/eDP) downclocking for power savings "
+ "(default: false)");
+
+module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
+MODULE_PARM_DESC(lvds_channel_mode,
+ "Specify LVDS channel mode "
+ "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
+module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
+MODULE_PARM_DESC(lvds_use_ssc,
+ "Use Spread Spectrum Clock with panels [LVDS/eDP] "
+ "(default: auto from VBT)");
+
+module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
+MODULE_PARM_DESC(vbt_sdvo_panel_type,
+ "Override/Ignore selection of SDVO panel mode in the VBT "
+ "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
+
+module_param_named(reset, i915.reset, bool, 0600);
+MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+
+module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
+MODULE_PARM_DESC(enable_hangcheck,
+ "Periodically check GPU activity for detecting hangs. "
+ "WARNING: Disabling this can cause system wide hangs. "
+ "(default: true)");
+
+module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400);
+MODULE_PARM_DESC(enable_ppgtt,
+ "Override PPGTT usage. "
+ "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
+
+module_param_named(enable_psr, i915.enable_psr, int, 0600);
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+
+module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
+MODULE_PARM_DESC(preliminary_hw_support,
+ "Enable preliminary hardware support.");
+
+module_param_named(disable_power_well, i915.disable_power_well, int, 0600);
+MODULE_PARM_DESC(disable_power_well,
+ "Disable the power well when possible (default: true)");
+
+module_param_named(enable_ips, i915.enable_ips, int, 0600);
+MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
+
+module_param_named(fastboot, i915.fastboot, bool, 0600);
+MODULE_PARM_DESC(fastboot,
+ "Try to skip unnecessary mode sets at boot time (default: false)");
+
+module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
+MODULE_PARM_DESC(prefault_disable,
+ "Disable page prefaulting for pread/pwrite/reloc (default:false). "
+ "For developers only.");
+
+module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
+MODULE_PARM_DESC(invert_brightness,
+ "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+
+module_param_named(disable_display, i915.disable_display, bool, 0600);
+MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
+
+module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
+MODULE_PARM_DESC(enable_cmd_parser,
+ "Enable command parsing (1=enabled, 0=disabled [default])");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a48b7cad6f11..9f5b18d9d885 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,7 +26,6 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
-#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -73,7 +72,8 @@
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
-#define LBB 0xf4
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
+
/* Graphics reset regs */
#define I965_GDRST 0xc0 /* PCI config register */
@@ -175,6 +175,18 @@
#define VGA_CR_DATA_CGA 0x3d5
/*
+ * Instruction field definitions used by the command parser
+ */
+#define INSTR_CLIENT_SHIFT 29
+#define INSTR_CLIENT_MASK 0xE0000000
+#define INSTR_MI_CLIENT 0x0
+#define INSTR_BC_CLIENT 0x2
+#define INSTR_RC_CLIENT 0x3
+#define INSTR_SUBCLIENT_SHIFT 27
+#define INSTR_SUBCLIENT_MASK 0x18000000
+#define INSTR_MEDIA_SUBCLIENT 0x2
+
+/*
* Memory interface instructions used by the kernel
*/
#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
@@ -377,14 +389,30 @@
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
#define DSPFREQGUAR_SHIFT 14
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
+
+/* See the PUNIT HAS v0.8 for the below bits */
+enum punit_power_well {
+ PUNIT_POWER_WELL_RENDER = 0,
+ PUNIT_POWER_WELL_MEDIA = 1,
+ PUNIT_POWER_WELL_DISP2D = 3,
+ PUNIT_POWER_WELL_DPIO_CMN_BC = 5,
+ PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6,
+ PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7,
+ PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8,
+ PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9,
+ PUNIT_POWER_WELL_DPIO_RX0 = 10,
+ PUNIT_POWER_WELL_DPIO_RX1 = 11,
+
+ PUNIT_POWER_WELL_NUM,
+};
+
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
-#define PUNIT_CLK_GATE 1
-#define PUNIT_PWR_RESET 2
-#define PUNIT_PWR_GATE 3
-#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
-#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
-#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
+#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
+#define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2))
+#define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2))
+#define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2))
+#define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2))
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
@@ -678,6 +706,7 @@
#define BLT_HWS_PGA_GEN7 (0x04280)
#define VEBOX_HWS_PGA_GEN7 (0x04380)
#define RING_ACTHD(base) ((base)+0x74)
+#define RING_ACTHD_UDW(base) ((base)+0x5c)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
#define RING_TIMESTAMP(base) ((base)+0x358)
@@ -720,6 +749,7 @@
#define RING_INSTPS(base) ((base)+0x70)
#define RING_DMA_FADD(base) ((base)+0x78)
#define RING_INSTPM(base) ((base)+0xc0)
+#define RING_MI_MODE(base) ((base)+0x9c)
#define INSTPS 0x02070 /* 965+ only */
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
@@ -789,15 +819,22 @@
#define _3D_CHICKEN3 0x02090
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
-#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1)
+#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
+#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
+# define MODE_IDLE (1 << 9)
#define GEN6_GT_MODE 0x20d0
-#define GEN6_GT_MODE_HI (1 << 9)
+#define GEN7_GT_MODE 0x7008
+#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
+#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
+#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
+#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
+#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GFX_MODE 0x02520
@@ -934,13 +971,19 @@
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
+#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */
+#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
#define CACHE_MODE_1 0x7004 /* IVB+ */
-#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
+#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
#define GEN6_BLITTER_ECOSKPD 0x221d0
#define GEN6_BLITTER_LOCK_SHIFT 16
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
+#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
+#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -1046,9 +1089,8 @@
#define FBC_CTL_IDLE_LINE (2<<2)
#define FBC_CTL_IDLE_DEBUG (3<<2)
#define FBC_CTL_CPU_FENCE (1<<1)
-#define FBC_CTL_PLANEA (0<<0)
-#define FBC_CTL_PLANEB (1<<0)
-#define FBC_FENCE_OFF 0x0321b
+#define FBC_CTL_PLANE(plane) ((plane)<<0)
+#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
#define FBC_TAG 0x03300
#define FBC_LL_SIZE (1536)
@@ -1057,9 +1099,8 @@
#define DPFC_CB_BASE 0x3200
#define DPFC_CONTROL 0x3208
#define DPFC_CTL_EN (1<<31)
-#define DPFC_CTL_PLANEA (0<<30)
-#define DPFC_CTL_PLANEB (1<<30)
-#define IVB_DPFC_CTL_PLANE_SHIFT (29)
+#define DPFC_CTL_PLANE(plane) ((plane)<<30)
+#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
#define DPFC_CTL_FENCE_EN (1<<29)
#define IVB_DPFC_CTL_FENCE_EN (1<<28)
#define DPFC_CTL_PERSISTENT_MODE (1<<25)
@@ -1120,13 +1161,6 @@
#define FBC_REND_NUKE (1<<2)
#define FBC_REND_CACHE_CLEAN (1<<1)
-#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0
-#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4
-#define HSW_BYPASS_FBC_QUEUE (1<<22)
-#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \
- _HSW_PIPE_SLICE_CHICKEN_1_A, + \
- _HSW_PIPE_SLICE_CHICKEN_1_B)
-
/*
* GPIO regs
*/
@@ -1202,6 +1236,10 @@
/*
* Clock control & power management
*/
+#define DPLL_A_OFFSET 0x6014
+#define DPLL_B_OFFSET 0x6018
+#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
+ dev_priv->info.display_mmio_offset)
#define VGA0 0x6000
#define VGA1 0x6004
@@ -1214,9 +1252,6 @@
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
-#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
-#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
-#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_SDVO_HIGH_SPEED (1 << 30)
#define DPLL_DVO_2X_MODE (1 << 30)
@@ -1278,7 +1313,12 @@
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
+
+#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
+#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
+#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
+ dev_priv->info.display_mmio_offset)
+
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
@@ -1315,8 +1355,6 @@
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
-#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
#define _FPA0 0x06040
#define _FPA1 0x06044
@@ -1348,7 +1386,7 @@
#define DSTATE_PLL_D3_OFF (1<<3)
#define DSTATE_GFX_CLOCK_GATING (1<<1)
#define DSTATE_DOT_CLOCK_GATING (1<<0)
-#define DSPCLK_GATE_D (dev_priv->info->display_mmio_offset + 0x6200)
+#define DSPCLK_GATE_D (dev_priv->info.display_mmio_offset + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -1472,10 +1510,10 @@
/*
* Palette regs
*/
-
-#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
-#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
-#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
+#define PALETTE_A_OFFSET 0xa000
+#define PALETTE_B_OFFSET 0xa800
+#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \
+ dev_priv->info.display_mmio_offset)
/* MCH MMIO space */
@@ -1862,7 +1900,7 @@
*/
/* Pipe A CRC regs */
-#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050)
+#define _PIPE_CRC_CTL_A 0x60050
#define PIPE_CRC_ENABLE (1 << 31)
/* ivb+ source selection */
#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
@@ -1902,11 +1940,11 @@
#define _PIPE_CRC_RES_4_A_IVB 0x60070
#define _PIPE_CRC_RES_5_A_IVB 0x60074
-#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060)
-#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064)
-#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068)
-#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c)
-#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080)
+#define _PIPE_CRC_RES_RED_A 0x60060
+#define _PIPE_CRC_RES_GREEN_A 0x60064
+#define _PIPE_CRC_RES_BLUE_A 0x60068
+#define _PIPE_CRC_RES_RES1_A_I915 0x6006c
+#define _PIPE_CRC_RES_RES2_A_G4X 0x60080
/* Pipe B CRC regs */
#define _PIPE_CRC_RES_1_B_IVB 0x61064
@@ -1915,59 +1953,69 @@
#define _PIPE_CRC_RES_4_B_IVB 0x61070
#define _PIPE_CRC_RES_5_B_IVB 0x61074
-#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
+#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A)
#define PIPE_CRC_RES_1_IVB(pipe) \
- _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB)
#define PIPE_CRC_RES_2_IVB(pipe) \
- _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB)
#define PIPE_CRC_RES_3_IVB(pipe) \
- _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB)
#define PIPE_CRC_RES_4_IVB(pipe) \
- _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB)
#define PIPE_CRC_RES_5_IVB(pipe) \
- _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB)
#define PIPE_CRC_RES_RED(pipe) \
- _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A)
#define PIPE_CRC_RES_GREEN(pipe) \
- _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A)
#define PIPE_CRC_RES_BLUE(pipe) \
- _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A)
#define PIPE_CRC_RES_RES1_I915(pipe) \
- _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915)
#define PIPE_CRC_RES_RES2_G4X(pipe) \
- _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
/* Pipe A timing regs */
-#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
-#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
-#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008)
-#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c)
-#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010)
-#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014)
-#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c)
-#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020)
-#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028)
+#define _HTOTAL_A 0x60000
+#define _HBLANK_A 0x60004
+#define _HSYNC_A 0x60008
+#define _VTOTAL_A 0x6000c
+#define _VBLANK_A 0x60010
+#define _VSYNC_A 0x60014
+#define _PIPEASRC 0x6001c
+#define _BCLRPAT_A 0x60020
+#define _VSYNCSHIFT_A 0x60028
/* Pipe B timing regs */
-#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000)
-#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004)
-#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008)
-#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c)
-#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010)
-#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014)
-#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c)
-#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
-#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
-
-#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
-#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
-#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
-#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
-#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
-#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
-#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
-#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+#define _HTOTAL_B 0x61000
+#define _HBLANK_B 0x61004
+#define _HSYNC_B 0x61008
+#define _VTOTAL_B 0x6100c
+#define _VBLANK_B 0x61010
+#define _VSYNC_B 0x61014
+#define _PIPEBSRC 0x6101c
+#define _BCLRPAT_B 0x61020
+#define _VSYNCSHIFT_B 0x61028
+
+#define TRANSCODER_A_OFFSET 0x60000
+#define TRANSCODER_B_OFFSET 0x61000
+#define TRANSCODER_C_OFFSET 0x62000
+#define TRANSCODER_EDP_OFFSET 0x6f000
+
+#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \
+ dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
+
+#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A)
+#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A)
+#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A)
+#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A)
+#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A)
+#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A)
+#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
+#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
+#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
@@ -2084,7 +2132,7 @@
/* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110)
+#define PORT_HOTPLUG_EN (dev_priv->info.display_mmio_offset + 0x61110)
#define PORTB_HOTPLUG_INT_EN (1 << 29)
#define PORTC_HOTPLUG_INT_EN (1 << 28)
#define PORTD_HOTPLUG_INT_EN (1 << 27)
@@ -2114,7 +2162,7 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
-#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
+#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114)
/*
* HDMI/DP bits are gen4+
*
@@ -2332,9 +2380,7 @@
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
-#define VIDEO_DIP_PORT_B (1 << 29)
-#define VIDEO_DIP_PORT_C (2 << 29)
-#define VIDEO_DIP_PORT_D (3 << 29)
+#define VIDEO_DIP_PORT(port) ((port) << 29)
#define VIDEO_DIP_PORT_MASK (3 << 29)
#define VIDEO_DIP_ENABLE_GCP (1 << 25)
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
@@ -2391,7 +2437,7 @@
#define PP_DIVISOR 0x61210
/* Panel fitting */
-#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230)
+#define PFIT_CONTROL (dev_priv->info.display_mmio_offset + 0x61230)
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
@@ -2409,7 +2455,7 @@
#define PFIT_SCALING_PROGRAMMED (1 << 26)
#define PFIT_SCALING_PILLAR (2 << 26)
#define PFIT_SCALING_LETTER (3 << 26)
-#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234)
+#define PFIT_PGM_RATIOS (dev_priv->info.display_mmio_offset + 0x61234)
/* Pre-965 */
#define PFIT_VERT_SCALE_SHIFT 20
#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -2421,25 +2467,25 @@
#define PFIT_HORIZ_SCALE_SHIFT_965 0
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
-#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
+#define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238)
-#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250)
-#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350)
+#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
_VLV_BLC_PWM_CTL2_B)
-#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254)
-#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354)
+#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
_VLV_BLC_PWM_CTL_B)
-#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260)
-#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360)
+#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
_VLV_BLC_HIST_CTL_B)
/* Backlight control */
-#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
+#define BLC_PWM_CTL2 (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
#define BLM_PWM_ENABLE (1 << 31)
#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
#define BLM_PIPE_SELECT (1 << 29)
@@ -2462,7 +2508,7 @@
#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
#define BLM_PHASE_IN_INCR_SHIFT (0)
#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
-#define BLC_PWM_CTL (dev_priv->info->display_mmio_offset + 0x61254)
+#define BLC_PWM_CTL (dev_priv->info.display_mmio_offset + 0x61254)
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
@@ -2484,7 +2530,7 @@
#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
-#define BLC_HIST_CTL (dev_priv->info->display_mmio_offset + 0x61260)
+#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260)
/* New registers for PCH-split platforms. Safe where new bits show up, the
* register layout machtes with gen4 BLC_PWM_CTL[12]. */
@@ -3178,10 +3224,10 @@
/* Display & cursor control */
/* Pipe A */
-#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000)
+#define _PIPEADSL 0x70000
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
-#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008)
+#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
@@ -3224,9 +3270,9 @@
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
-#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024)
+#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
-#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
+#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
@@ -3239,35 +3285,55 @@
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
+#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19)
#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
-#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
-#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14)
+#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15)
+#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
-#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
+#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
#define PIPE_DPST_EVENT_STATUS (1UL<<7)
#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
+#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
+#define PIPE_B_PSR_STATUS_VLV (1UL<<3)
#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
-#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
-#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
-#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
-#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
-#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
-#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
+#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
+#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
+
+#define PIPE_A_OFFSET 0x70000
+#define PIPE_B_OFFSET 0x71000
+#define PIPE_C_OFFSET 0x72000
+/*
+ * There's actually no pipe EDP. Some pipe registers have
+ * simply shifted from the pipe to the transcoder, while
+ * keeping their original offset. Thus we need PIPE_EDP_OFFSET
+ * to access such registers in transcoder EDP.
+ */
+#define PIPE_EDP_OFFSET 0x7f000
+
+#define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \
+ dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
+
+#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF)
+#define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL)
+#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL)
+#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT)
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
@@ -3279,20 +3345,20 @@
#define PIPEMISC_DITHER_ENABLE (1<<4)
#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
#define PIPEMISC_DITHER_TYPE_SP (0<<2)
-#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
+#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A)
#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
-#define SPRITED_FLIPDONE_INT_EN (1<<26)
-#define SPRITEC_FLIPDONE_INT_EN (1<<25)
-#define PLANEB_FLIPDONE_INT_EN (1<<24)
+#define SPRITED_FLIP_DONE_INT_EN (1<<26)
+#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
+#define PLANEB_FLIP_DONE_INT_EN (1<<24)
#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
#define PIPEA_HLINE_INT_EN (1<<20)
#define PIPEA_VBLANK_INT_EN (1<<19)
-#define SPRITEB_FLIPDONE_INT_EN (1<<18)
-#define SPRITEA_FLIPDONE_INT_EN (1<<17)
+#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
+#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
#define PLANEA_FLIPDONE_INT_EN (1<<16)
#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
@@ -3323,7 +3389,7 @@
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
-#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034)
+#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
@@ -3331,11 +3397,11 @@
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW_PLANEB_MASK (0x7f<<8)
#define DSPFW_PLANEA_MASK (0x7f)
-#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038)
+#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038)
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_PLANEC_MASK (0x7f)
-#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c)
+#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c)
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
@@ -3343,8 +3409,8 @@
#define DSPFW_HPLL_CURSOR_SHIFT 16
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff)
-#define DSPFW4 (dev_priv->info->display_mmio_offset + 0x70070)
-#define DSPFW7 (dev_priv->info->display_mmio_offset + 0x7007c)
+#define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070)
+#define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c)
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32
@@ -3468,12 +3534,12 @@
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
-#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70040)
-#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70044)
+#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70040)
+#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70044)
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
/* Cursor A & B regs */
-#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080)
+#define _CURACNTR (dev_priv->info.display_mmio_offset + 0x70080)
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
@@ -3489,23 +3555,27 @@
/* New style CUR*CNTR flags */
#define CURSOR_MODE 0x27
#define CURSOR_MODE_DISABLE 0x00
+#define CURSOR_MODE_128_32B_AX 0x02
+#define CURSOR_MODE_256_32B_AX 0x03
#define CURSOR_MODE_64_32B_AX 0x07
+#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
+#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
#define MCURSOR_PIPE_SELECT (1 << 28)
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
-#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
-#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
+#define _CURABASE (dev_priv->info.display_mmio_offset + 0x70084)
+#define _CURAPOS (dev_priv->info.display_mmio_offset + 0x70088)
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
-#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0)
-#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4)
-#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8)
+#define _CURBCNTR (dev_priv->info.display_mmio_offset + 0x700c0)
+#define _CURBBASE (dev_priv->info.display_mmio_offset + 0x700c4)
+#define _CURBPOS (dev_priv->info.display_mmio_offset + 0x700c8)
#define _CURBCNTR_IVB 0x71080
#define _CURBBASE_IVB 0x71084
@@ -3520,7 +3590,7 @@
#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
/* Display A control */
-#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180)
+#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
@@ -3554,25 +3624,25 @@
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
-#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184)
-#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188)
-#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
-#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190)
-#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
-#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
-#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
-#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC)
-
-#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
-#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
-#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
-#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
-#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
-#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
-#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+#define _DSPAADDR 0x70184
+#define _DSPASTRIDE 0x70188
+#define _DSPAPOS 0x7018C /* reserved */
+#define _DSPASIZE 0x70190
+#define _DSPASURF 0x7019C /* 965+ only */
+#define _DSPATILEOFF 0x701A4 /* 965+ only */
+#define _DSPAOFFSET 0x701A4 /* HSW */
+#define _DSPASURFLIVE 0x701AC
+
+#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR)
+#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR)
+#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE)
+#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS)
+#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE)
+#define DSPSURF(plane) _PIPE2(plane, _DSPASURF)
+#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
-#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
-#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
+#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
+#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3580,44 +3650,44 @@
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
/* VBIOS flags */
-#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
-#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414)
-#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418)
-#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c)
-#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420)
-#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424)
-#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428)
-#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410)
-#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414)
-#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420)
-#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414)
-#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418)
-#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c)
+#define SWF00 (dev_priv->info.display_mmio_offset + 0x71410)
+#define SWF01 (dev_priv->info.display_mmio_offset + 0x71414)
+#define SWF02 (dev_priv->info.display_mmio_offset + 0x71418)
+#define SWF03 (dev_priv->info.display_mmio_offset + 0x7141c)
+#define SWF04 (dev_priv->info.display_mmio_offset + 0x71420)
+#define SWF05 (dev_priv->info.display_mmio_offset + 0x71424)
+#define SWF06 (dev_priv->info.display_mmio_offset + 0x71428)
+#define SWF10 (dev_priv->info.display_mmio_offset + 0x70410)
+#define SWF11 (dev_priv->info.display_mmio_offset + 0x70414)
+#define SWF14 (dev_priv->info.display_mmio_offset + 0x71420)
+#define SWF30 (dev_priv->info.display_mmio_offset + 0x72414)
+#define SWF31 (dev_priv->info.display_mmio_offset + 0x72418)
+#define SWF32 (dev_priv->info.display_mmio_offset + 0x7241c)
/* Pipe B */
-#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
-#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
-#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
+#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
+#define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008)
+#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024)
#define _PIPEBFRAMEHIGH 0x71040
#define _PIPEBFRAMEPIXEL 0x71044
-#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71040)
-#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71044)
+#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71040)
+#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71044)
/* Display B control */
-#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180)
+#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180)
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
-#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184)
-#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188)
-#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C)
-#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190)
-#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C)
-#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4)
-#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4)
-#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC)
+#define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184)
+#define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188)
+#define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C)
+#define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190)
+#define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C)
+#define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4)
+#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4)
+#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC)
/* Sprite A control */
#define _DVSACNTR 0x72180
@@ -3866,48 +3936,45 @@
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
-#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
+#define _PIPEA_DATA_M1 0x60030
#define PIPE_DATA_M1_OFFSET 0
-#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
+#define _PIPEA_DATA_N1 0x60034
#define PIPE_DATA_N1_OFFSET 0
-#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038)
+#define _PIPEA_DATA_M2 0x60038
#define PIPE_DATA_M2_OFFSET 0
-#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c)
+#define _PIPEA_DATA_N2 0x6003c
#define PIPE_DATA_N2_OFFSET 0
-#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040)
+#define _PIPEA_LINK_M1 0x60040
#define PIPE_LINK_M1_OFFSET 0
-#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044)
+#define _PIPEA_LINK_N1 0x60044
#define PIPE_LINK_N1_OFFSET 0
-#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048)
+#define _PIPEA_LINK_M2 0x60048
#define PIPE_LINK_M2_OFFSET 0
-#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c)
+#define _PIPEA_LINK_N2 0x6004c
#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
-#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030)
-#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034)
-
-#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038)
-#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c)
-
-#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040)
-#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044)
-
-#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048)
-#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c)
-
-#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
-#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
-#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
-#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
-#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
-#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
-#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
-#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+#define _PIPEB_DATA_M1 0x61030
+#define _PIPEB_DATA_N1 0x61034
+#define _PIPEB_DATA_M2 0x61038
+#define _PIPEB_DATA_N2 0x6103c
+#define _PIPEB_LINK_M1 0x61040
+#define _PIPEB_LINK_N1 0x61044
+#define _PIPEB_LINK_M2 0x61048
+#define _PIPEB_LINK_N2 0x6104c
+
+#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2)
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
@@ -4084,13 +4151,14 @@
#define ILK_ELPIN_409_SELECT (1 << 25)
#define ILK_DPARB_GATE (1<<22)
#define ILK_VSDPFD_FULL (1<<21)
-#define ILK_DISPLAY_CHICKEN_FUSES 0x42014
-#define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31)
-#define ILK_INTERNAL_DISPLAY_DISABLE (1<<30)
-#define ILK_DISPLAY_DEBUG_DISABLE (1<<29)
-#define ILK_HDCP_DISABLE (1<<25)
-#define ILK_eDP_A_DISABLE (1<<24)
-#define ILK_DESKTOP (1<<23)
+#define FUSE_STRAP 0x42014
+#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
+#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
+#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29)
+#define ILK_HDCP_DISABLE (1 << 25)
+#define ILK_eDP_A_DISABLE (1 << 24)
+#define HSW_CDCLK_LIMIT (1 << 24)
+#define ILK_DESKTOP (1 << 23)
#define ILK_DSPCLK_GATE_D 0x42020
#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
@@ -4109,7 +4177,8 @@
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
-#define DPRS_MASK_VBLANK_SRD (1 << 0)
+#define HSW_FBCQ_DIS (1 << 22)
+#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define DISP_ARB_CTL 0x45000
@@ -4120,6 +4189,8 @@
#define GEN7_MSG_CTL 0x45010
#define WAIT_FOR_PCH_RESET_ACK (1<<1)
#define WAIT_FOR_PCH_FLR_ACK (1<<0)
+#define HSW_NDE_RSTWRN_OPT 0x46408
+#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
@@ -4127,8 +4198,11 @@
#define COMMON_SLICE_CHICKEN2 0x7014
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
+#define GEN7_L3SQCREG1 0xB010
+#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
+
#define GEN7_L3CNTLREG1 0xB01C
-#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
+#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
@@ -4148,9 +4222,6 @@
#define HSW_SCRATCH1 0xb038
#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
-#define HSW_FUSE_STRAP 0x42014
-#define HSW_CDCLK_LIMIT (1 << 24)
-
/* PCH */
/* south display engine interrupt: IBX */
@@ -4436,24 +4507,24 @@
#define HSW_VIDEO_DIP_GCP_B 0x61210
#define HSW_TVIDEO_DIP_CTL(trans) \
- _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
- _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A)
#define HSW_TVIDEO_DIP_VS_DATA(trans) \
- _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B)
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A)
#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
- _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A)
#define HSW_TVIDEO_DIP_GCP(trans) \
- _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
- _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A)
#define HSW_STEREO_3D_CTL_A 0x70020
#define S3D_ENABLE (1<<31)
#define HSW_STEREO_3D_CTL_B 0x71020
#define HSW_STEREO_3D_CTL(trans) \
- _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A)
+ _PIPE2(trans, HSW_STEREO_3D_CTL_A)
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define _PCH_TRANS_HBLANK_B 0xe1004
@@ -4865,6 +4936,9 @@
#define GEN7_UCGCTL4 0x940c
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+#define GEN8_UCGCTL6 0x9430
+#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
+
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
@@ -4945,6 +5019,10 @@
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define VLV_GTLC_SURVIVABILITY_REG 0x130098
+#define VLV_GFX_CLK_STATUS_BIT (1<<3)
+#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
+
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define VLV_COUNTER_CONTROL 0x138104
#define VLV_COUNT_RANGE_HIGH (1<<15)
@@ -5006,6 +5084,10 @@
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+#define GEN8_ROW_CHICKEN 0xe4f0
+#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
+#define STALL_DOP_GATING_DISABLE (1<<5)
+
#define GEN7_ROW_CHICKEN2 0xe4f4
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
@@ -5017,7 +5099,7 @@
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
-#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
+#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
#define INTEL_AUDIO_DEVCTG 0x80862802
@@ -5178,8 +5260,8 @@
#define TRANS_DDI_FUNC_CTL_B 0x61400
#define TRANS_DDI_FUNC_CTL_C 0x62400
#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
-#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
- TRANS_DDI_FUNC_CTL_B)
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A)
+
#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define TRANS_DDI_PORT_MASK (7<<28)
@@ -5311,8 +5393,12 @@
#define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SSC (1<<28)
#define SPLL_PLL_NON_SSC (2<<28)
+#define SPLL_PLL_LCPLL (3<<28)
+#define SPLL_PLL_REF_MASK (3<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26)
+#define SPLL_PLL_FREQ_2700MHz (2<<26)
+#define SPLL_PLL_FREQ_MASK (3<<26)
/* WRPLL */
#define WRPLL_CTL1 0x46040
@@ -5323,8 +5409,13 @@
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_REF_MASK (0xff)
#define WRPLL_DIVIDER_POST(x) ((x)<<8)
+#define WRPLL_DIVIDER_POST_MASK (0x3f<<8)
+#define WRPLL_DIVIDER_POST_SHIFT 8
#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+#define WRPLL_DIVIDER_FB_SHIFT 16
+#define WRPLL_DIVIDER_FB_MASK (0xff<<16)
/* Port clock selection */
#define PORT_CLK_SEL_A 0x46100
@@ -5337,6 +5428,7 @@
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29)
+#define PORT_CLK_SEL_MASK (7<<29)
/* Transcoder clock selection */
#define TRANS_CLK_SEL_A 0x46140
@@ -5346,10 +5438,12 @@
#define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
-#define _TRANSA_MSA_MISC 0x60410
-#define _TRANSB_MSA_MISC 0x61410
-#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
- _TRANSB_MSA_MISC)
+#define TRANSA_MSA_MISC 0x60410
+#define TRANSB_MSA_MISC 0x61410
+#define TRANSC_MSA_MISC 0x62410
+#define TRANS_EDP_MSA_MISC 0x6f410
+#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC)
+
#define TRANS_MSA_SYNC_CLK (1<<0)
#define TRANS_MSA_6_BPC (0<<5)
#define TRANS_MSA_8_BPC (1<<5)
@@ -5389,6 +5483,8 @@
/* SFUSE_STRAP */
#define SFUSE_STRAP 0xc2014
+#define SFUSE_STRAP_FUSE_LOCK (1<<13)
+#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
@@ -5857,4 +5953,12 @@
#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
+/* For UMS only (deprecated): */
+#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
+#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
+#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
+#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
+#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8150fdc08d49..56785e8fb2eb 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -236,19 +236,9 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
}
- /* Only regfile.save FBC state on the platform that supports FBC */
- if (HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
- } else if (IS_GM45(dev)) {
- dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
- } else {
- dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
- }
- }
+ /* save FBC interval */
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_save_vga(dev);
@@ -300,18 +290,10 @@ static void i915_restore_display(struct drm_device *dev)
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
- if (HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
- } else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
- } else {
- I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- }
- }
+
+ /* restore FBC interval */
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_restore_vga(dev);
@@ -324,10 +306,6 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- if (INTEL_INFO(dev)->gen <= 4)
- pci_read_config_byte(dev->pdev, LBB,
- &dev_priv->regfile.saveLBB);
-
mutex_lock(&dev->struct_mutex);
i915_save_display(dev);
@@ -377,10 +355,6 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- if (INTEL_INFO(dev)->gen <= 4)
- pci_write_config_byte(dev->pdev, LBB,
- dev_priv->regfile.saveLBB);
-
mutex_lock(&dev->struct_mutex);
i915_gem_restore_fences(dev);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 33bcae314bf8..9c57029f6f4b 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
- ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+ ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -284,7 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private;
return snprintf(buf, PAGE_SIZE, "%d\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay));
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -298,9 +298,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
+ ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
else
- ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+ ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -313,7 +313,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
+ u32 val;
ssize_t ret;
ret = kstrtou32(buf, 0, &val);
@@ -324,38 +324,34 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev_priv->dev)) {
+ if (IS_VALLEYVIEW(dev_priv->dev))
val = vlv_freq_opcode(dev_priv, val);
-
- hw_max = valleyview_rps_max_freq(dev_priv);
- hw_min = valleyview_rps_min_freq(dev_priv);
- non_oc_max = hw_max;
- } else {
+ else
val /= GT_FREQUENCY_MULTIPLIER;
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- hw_max = dev_priv->rps.hw_max;
- non_oc_max = (rp_state_cap & 0xff);
- hw_min = ((rp_state_cap & 0xff0000) >> 16);
- }
-
- if (val < hw_min || val > hw_max ||
- val < dev_priv->rps.min_delay) {
+ if (val < dev_priv->rps.min_freq ||
+ val > dev_priv->rps.max_freq ||
+ val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
- if (val > non_oc_max)
+ if (val > dev_priv->rps.rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
val * GT_FREQUENCY_MULTIPLIER);
- dev_priv->rps.max_delay = val;
+ dev_priv->rps.max_freq_softlimit = val;
- if (dev_priv->rps.cur_delay > val) {
+ if (dev_priv->rps.cur_freq > val) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
+ } else if (!IS_VALLEYVIEW(dev)) {
+ /* We still need gen6_set_rps to process the new max_delay and
+ * update the interrupt limits even though frequency request is
+ * unchanged. */
+ gen6_set_rps(dev, dev_priv->rps.cur_freq);
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -374,9 +370,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
+ ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
else
- ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+ ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -389,7 +385,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val, rp_state_cap, hw_max, hw_min;
+ u32 val;
ssize_t ret;
ret = kstrtou32(buf, 0, &val);
@@ -400,31 +396,30 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev))
val = vlv_freq_opcode(dev_priv, val);
-
- hw_max = valleyview_rps_max_freq(dev_priv);
- hw_min = valleyview_rps_min_freq(dev_priv);
- } else {
+ else
val /= GT_FREQUENCY_MULTIPLIER;
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- hw_max = dev_priv->rps.hw_max;
- hw_min = ((rp_state_cap & 0xff0000) >> 16);
- }
-
- if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+ if (val < dev_priv->rps.min_freq ||
+ val > dev_priv->rps.max_freq ||
+ val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
- dev_priv->rps.min_delay = val;
+ dev_priv->rps.min_freq_softlimit = val;
- if (dev_priv->rps.cur_delay < val) {
+ if (dev_priv->rps.cur_freq < val) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
+ } else if (!IS_VALLEYVIEW(dev)) {
+ /* We still need gen6_set_rps to process the new min_delay and
+ * update the interrupt limits even though frequency request is
+ * unchanged. */
+ gen6_set_rps(dev, dev_priv->rps.cur_freq);
}
mutex_unlock(&dev_priv->rps.hw_lock);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 6e580c98dede..23c26f1f8b37 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -34,15 +34,15 @@ TRACE_EVENT(i915_gem_object_create,
);
TRACE_EVENT(i915_vma_bind,
- TP_PROTO(struct i915_vma *vma, bool mappable),
- TP_ARGS(vma, mappable),
+ TP_PROTO(struct i915_vma *vma, unsigned flags),
+ TP_ARGS(vma, flags),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
- __field(bool, mappable)
+ __field(unsigned, flags)
),
TP_fast_assign(
@@ -50,12 +50,12 @@ TRACE_EVENT(i915_vma_bind,
__entry->vm = vma->vm;
__entry->offset = vma->node.start;
__entry->size = vma->node.size;
- __entry->mappable = mappable;
+ __entry->flags = flags;
),
TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
- __entry->mappable ? ", mappable" : "",
+ __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
__entry->vm)
);
@@ -196,26 +196,26 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
);
TRACE_EVENT(i915_gem_evict,
- TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
- TP_ARGS(dev, size, align, mappable),
+ TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
+ TP_ARGS(dev, size, align, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, size)
__field(u32, align)
- __field(bool, mappable)
+ __field(unsigned, flags)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->size = size;
__entry->align = align;
- __entry->mappable = mappable;
+ __entry->flags = flags;
),
TP_printk("dev=%d, size=%d, align=%d %s",
__entry->dev, __entry->size, __entry->align,
- __entry->mappable ? ", mappable" : "")
+ __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
);
TRACE_EVENT(i915_gem_evict_everything,
@@ -238,14 +238,16 @@ TRACE_EVENT(i915_gem_evict_vm,
TP_ARGS(vm),
TP_STRUCT__entry(
+ __field(u32, dev)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
+ __entry->dev = vm->dev->primary->index;
__entry->vm = vm;
),
- TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
+ TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
);
TRACE_EVENT(i915_gem_ring_sync_to,
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index caa18e855815..480da593e6c0 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -271,6 +271,10 @@ void i915_save_display_reg(struct drm_device *dev)
/* FIXME: regfile.save TV & SDVO state */
/* Backlight */
+ if (INTEL_INFO(dev)->gen <= 4)
+ pci_read_config_byte(dev->pdev, PCI_LBPC,
+ &dev_priv->regfile.saveLBB);
+
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -293,6 +297,10 @@ void i915_restore_display_reg(struct drm_device *dev)
int i;
/* Backlight */
+ if (INTEL_INFO(dev)->gen <= 4)
+ pci_write_config_byte(dev->pdev, PCI_LBPC,
+ dev_priv->regfile.saveLBB);
+
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f22041973f3a..4867f4cc0938 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -259,7 +259,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
downclock = dvo_timing->clock;
}
- if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
+ if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = downclock * 10;
DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
@@ -318,7 +318,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct drm_display_mode *panel_fixed_mode;
int index;
- index = i915_vbt_sdvo_panel_type;
+ index = i915.vbt_sdvo_panel_type;
if (index == -2) {
DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
return;
@@ -599,14 +599,14 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
struct bdb_mipi *mipi;
- mipi = find_section(bdb, BDB_MIPI);
+ mipi = find_section(bdb, BDB_MIPI_CONFIG);
if (!mipi) {
DRM_DEBUG_KMS("No MIPI BDB found");
return;
}
/* XXX: add more info */
- dev_priv->vbt.dsi.panel_id = mipi->panel_id;
+ dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 282de5e9f39d..83b7629e4367 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -104,7 +104,8 @@ struct vbios_data {
#define BDB_LVDS_LFP_DATA 42
#define BDB_LVDS_BACKLIGHT 43
#define BDB_LVDS_POWER 44
-#define BDB_MIPI 50
+#define BDB_MIPI_CONFIG 52
+#define BDB_MIPI_SEQUENCE 53
#define BDB_SKIP 254 /* VBIOS private block, ignore */
struct bdb_general_features {
@@ -711,44 +712,159 @@ int intel_parse_bios(struct drm_device *dev);
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
-/* MIPI DSI panel info */
-struct bdb_mipi {
- u16 panel_id;
- u16 bridge_revision;
-
- /* General params */
- u32 dithering:1;
- u32 bpp_pixel_format:1;
- u32 rsvd1:1;
- u32 dphy_valid:1;
- u32 resvd2:28;
+/* Block 52 contains MIPI Panel info
+ * 6 such enteries will there. Index into correct
+ * entery is based on the panel_index in #40 LFP
+ */
+#define MAX_MIPI_CONFIGURATIONS 6
- u16 port_info;
- u16 rsvd3:2;
- u16 num_lanes:2;
- u16 rsvd4:12;
+#define MIPI_DSI_UNDEFINED_PANEL_ID 0
+#define MIPI_DSI_GENERIC_PANEL_ID 1
- /* DSI config */
- u16 virt_ch_num:2;
- u16 vtm:2;
- u16 rsvd5:12;
+struct mipi_config {
+ u16 panel_id;
- u32 dsi_clock;
+ /* General Params */
+ u32 enable_dithering:1;
+ u32 rsvd1:1;
+ u32 is_bridge:1;
+
+ u32 panel_arch_type:2;
+ u32 is_cmd_mode:1;
+
+#define NON_BURST_SYNC_PULSE 0x1
+#define NON_BURST_SYNC_EVENTS 0x2
+#define BURST_MODE 0x3
+ u32 video_transfer_mode:2;
+
+ u32 cabc_supported:1;
+ u32 pwm_blc:1;
+
+ /* Bit 13:10 */
+#define PIXEL_FORMAT_RGB565 0x1
+#define PIXEL_FORMAT_RGB666 0x2
+#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
+#define PIXEL_FORMAT_RGB888 0x4
+ u32 videomode_color_format:4;
+
+ /* Bit 15:14 */
+#define ENABLE_ROTATION_0 0x0
+#define ENABLE_ROTATION_90 0x1
+#define ENABLE_ROTATION_180 0x2
+#define ENABLE_ROTATION_270 0x3
+ u32 rotation:2;
+ u32 bta_enabled:1;
+ u32 rsvd2:15;
+
+ /* 2 byte Port Description */
+#define DUAL_LINK_NOT_SUPPORTED 0
+#define DUAL_LINK_FRONT_BACK 1
+#define DUAL_LINK_PIXEL_ALT 2
+ u16 dual_link:2;
+ u16 lane_cnt:2;
+ u16 rsvd3:12;
+
+ u16 rsvd4;
+
+ u8 rsvd5[5];
+ u32 dsi_ddr_clk;
u32 bridge_ref_clk;
- u16 rsvd_pwr;
- /* Dphy Params */
- u32 prepare_cnt:5;
- u32 rsvd6:3;
+#define BYTE_CLK_SEL_20MHZ 0
+#define BYTE_CLK_SEL_10MHZ 1
+#define BYTE_CLK_SEL_5MHZ 2
+ u8 byte_clk_sel:2;
+
+ u8 rsvd6:6;
+
+ /* DPHY Flags */
+ u16 dphy_param_valid:1;
+ u16 eot_pkt_disabled:1;
+ u16 enable_clk_stop:1;
+ u16 rsvd7:13;
+
+ u32 hs_tx_timeout;
+ u32 lp_rx_timeout;
+ u32 turn_around_timeout;
+ u32 device_reset_timer;
+ u32 master_init_timer;
+ u32 dbi_bw_timer;
+ u32 lp_byte_clk_val;
+
+ /* 4 byte Dphy Params */
+ u32 prepare_cnt:6;
+ u32 rsvd8:2;
u32 clk_zero_cnt:8;
u32 trail_cnt:5;
- u32 rsvd7:3;
+ u32 rsvd9:3;
u32 exit_zero_cnt:6;
- u32 rsvd8:2;
+ u32 rsvd10:2;
- u32 hl_switch_cnt;
- u32 lp_byte_clk;
u32 clk_lane_switch_cnt;
+ u32 hl_switch_cnt;
+
+ u32 rsvd11[6];
+
+ /* timings based on dphy spec */
+ u8 tclk_miss;
+ u8 tclk_post;
+ u8 rsvd12;
+ u8 tclk_pre;
+ u8 tclk_prepare;
+ u8 tclk_settle;
+ u8 tclk_term_enable;
+ u8 tclk_trail;
+ u16 tclk_prepare_clkzero;
+ u8 rsvd13;
+ u8 td_term_enable;
+ u8 teot;
+ u8 ths_exit;
+ u8 ths_prepare;
+ u16 ths_prepare_hszero;
+ u8 rsvd14;
+ u8 ths_settle;
+ u8 ths_skip;
+ u8 ths_trail;
+ u8 tinit;
+ u8 tlpx;
+ u8 rsvd15[3];
+
+ /* GPIOs */
+ u8 panel_enable;
+ u8 bl_enable;
+ u8 pwm_enable;
+ u8 reset_r_n;
+ u8 pwr_down_r;
+ u8 stdby_r_n;
+
} __packed;
+/* Block 52 contains MIPI configuration block
+ * 6 * bdb_mipi_config, followed by 6 pps data
+ * block below
+ *
+ * all delays has a unit of 100us
+ */
+struct mipi_pps_data {
+ u16 panel_on_delay;
+ u16 bl_enable_delay;
+ u16 bl_disable_delay;
+ u16 panel_off_delay;
+ u16 panel_power_cycle_delay;
+};
+
+struct bdb_mipi_config {
+ struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
+ struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
+};
+
+/* Block 53 contains MIPI sequences as needed by the panel
+ * for enabling it. This block can be variable in size and
+ * can be maximum of 6 blocks
+ */
+struct bdb_mipi_sequence {
+ u8 version;
+ u8 data[0];
+};
+
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e2e39e65f109..aa5a3dc43342 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -68,8 +68,13 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ enum intel_display_power_domain power_domain;
u32 tmp;
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
tmp = I915_READ(crt->adpa_reg);
if (!(tmp & ADPA_DAC_ENABLE))
@@ -262,6 +267,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_LPT(dev))
pipe_config->pipe_bpp = 24;
+ /* FDI must always be 2.7 GHz */
+ if (HAS_DDI(dev))
+ pipe_config->port_clock = 135000 * 2;
+
return true;
}
@@ -630,14 +639,22 @@ static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_encoder *intel_encoder = &crt->base;
+ enum intel_display_power_domain power_domain;
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
+ intel_runtime_pm_get(dev_priv);
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, drm_get_connector_name(connector),
force);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
if (I915_HAS_HOTPLUG(dev)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
@@ -645,23 +662,30 @@ intel_crt_detect(struct drm_connector *connector, bool force)
*/
if (intel_crt_detect_hotplug(connector)) {
DRM_DEBUG_KMS("CRT detected via hotplug\n");
- return connector_status_connected;
+ status = connector_status_connected;
+ goto out;
} else
DRM_DEBUG_KMS("CRT not detected via hotplug\n");
}
- if (intel_crt_detect_ddc(connector))
- return connector_status_connected;
+ if (intel_crt_detect_ddc(connector)) {
+ status = connector_status_connected;
+ goto out;
+ }
/* Load detection is broken on HPD capable machines. Whoever wants a
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev))
- return connector_status_disconnected;
+ if (I915_HAS_HOTPLUG(dev)) {
+ status = connector_status_disconnected;
+ goto out;
+ }
- if (!force)
- return connector->status;
+ if (!force) {
+ status = connector->status;
+ goto out;
+ }
/* for pre-945g platforms use load detect */
if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
@@ -673,6 +697,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
} else
status = connector_status_unknown;
+out:
+ intel_display_power_put(dev_priv, power_domain);
+ intel_runtime_pm_put(dev_priv);
+
return status;
}
@@ -686,17 +714,28 @@ static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_encoder *intel_encoder = &crt->base;
+ enum intel_display_power_domain power_domain;
int ret;
struct i2c_adapter *i2c;
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
- return ret;
+ goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
- return intel_crt_ddc_get_modes(connector, i2c);
+ ret = intel_crt_ddc_get_modes(connector, i2c);
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static int intel_crt_set_property(struct drm_connector *connector,
@@ -765,6 +804,14 @@ static const struct dmi_system_id intel_no_crt[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
},
},
+ {
+ .callback = intel_no_crt_dmi_callback,
+ .ident = "DELL XPS 8700",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
+ },
+ },
{ }
};
@@ -800,7 +847,7 @@ void intel_crt_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG;
- crt->base.cloneable = true;
+ crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
if (IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
@@ -833,6 +880,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.get_hw_state = intel_crt_get_hw_state;
}
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->unregister = intel_connector_unregister;
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
@@ -857,4 +905,6 @@ void intel_crt_init(struct drm_device *dev)
dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
}
+
+ intel_crt_reset(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 234ac5f7bc5a..0ad4e9600063 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -633,6 +633,97 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
/* Otherwise a < c && b >= d, do nothing */
}
+static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ int reg)
+{
+ int refclk = LC_FREQ;
+ int n, p, r;
+ u32 wrpll;
+
+ wrpll = I915_READ(reg);
+ switch (wrpll & SPLL_PLL_REF_MASK) {
+ case SPLL_PLL_SSC:
+ case SPLL_PLL_NON_SSC:
+ /*
+ * We could calculate spread here, but our checking
+ * code only cares about 5% accuracy, and spread is a max of
+ * 0.5% downspread.
+ */
+ refclk = 135;
+ break;
+ case SPLL_PLL_LCPLL:
+ refclk = LC_FREQ;
+ break;
+ default:
+ WARN(1, "bad wrpll refclk\n");
+ return 0;
+ }
+
+ r = wrpll & WRPLL_DIVIDER_REF_MASK;
+ p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+ n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+ /* Convert to KHz, p & r have a fixed point portion */
+ return (refclk * n * 100) / (p * r);
+}
+
+static void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ int link_clock = 0;
+ u32 val, pll;
+
+ val = I915_READ(PORT_CLK_SEL(port));
+ switch (val & PORT_CLK_SEL_MASK) {
+ case PORT_CLK_SEL_LCPLL_810:
+ link_clock = 81000;
+ break;
+ case PORT_CLK_SEL_LCPLL_1350:
+ link_clock = 135000;
+ break;
+ case PORT_CLK_SEL_LCPLL_2700:
+ link_clock = 270000;
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
+ break;
+ case PORT_CLK_SEL_SPLL:
+ pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
+ if (pll == SPLL_PLL_FREQ_810MHz)
+ link_clock = 81000;
+ else if (pll == SPLL_PLL_FREQ_1350MHz)
+ link_clock = 135000;
+ else if (pll == SPLL_PLL_FREQ_2700MHz)
+ link_clock = 270000;
+ else {
+ WARN(1, "bad spll freq\n");
+ return;
+ }
+ break;
+ default:
+ WARN(1, "bad port clock sel\n");
+ return;
+ }
+
+ pipe_config->port_clock = link_clock * 2;
+
+ if (pipe_config->has_pch_encoder)
+ pipe_config->adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->fdi_m_n);
+ else if (pipe_config->has_dp_encoder)
+ pipe_config->adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
static void
intel_ddi_calculate_wrpll(int clock /* in Hz */,
unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
@@ -1017,8 +1108,13 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
enum port port = intel_ddi_get_encoder_port(intel_encoder);
enum pipe pipe = 0;
enum transcoder cpu_transcoder;
+ enum intel_display_power_domain power_domain;
uint32_t tmp;
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
return false;
@@ -1054,9 +1150,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(encoder);
+ enum intel_display_power_domain power_domain;
u32 tmp;
int i;
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
@@ -1200,7 +1301,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- ironlake_edp_panel_on(intel_dp);
+ intel_edp_panel_on(intel_dp);
}
WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
@@ -1244,8 +1345,8 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
- ironlake_edp_panel_vdd_on(intel_dp);
- ironlake_edp_panel_off(intel_dp);
+ intel_edp_panel_vdd_on(intel_dp);
+ intel_edp_panel_off(intel_dp);
}
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
@@ -1280,7 +1381,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
if (port == PORT_A)
intel_dp_stop_link_train(intel_dp);
- ironlake_edp_backlight_on(intel_dp);
+ intel_edp_backlight_on(intel_dp);
intel_edp_psr_enable(intel_dp);
}
@@ -1313,7 +1414,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_psr_disable(intel_dp);
- ironlake_edp_backlight_off(intel_dp);
+ intel_edp_backlight_off(intel_dp);
}
}
@@ -1325,7 +1426,7 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
if (lcpll & LCPLL_CD_SOURCE_FCLK) {
return 800000;
- } else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) {
+ } else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) {
return 450000;
} else if (freq == LCPLL_CLK_FREQ_450) {
return 450000;
@@ -1510,6 +1611,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
+
+ intel_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -1620,7 +1723,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- intel_encoder->cloneable = false;
+ intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_ddi_hot_plug;
if (init_dp)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9b8a7c7ea7fc..dae976f51d83 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -51,7 +51,10 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
-
+static int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj);
typedef struct {
int min, max;
@@ -738,10 +741,10 @@ bool intel_crtc_active(struct drm_crtc *crtc)
* We can ditch the adjusted_mode.crtc_clock check as soon
* as Haswell has gained clock readout/fastboot support.
*
- * We can ditch the crtc->fb check as soon as we can
+ * We can ditch the crtc->primary->fb check as soon as we can
* properly reconstruct framebuffers.
*/
- return intel_crtc->active && crtc->fb &&
+ return intel_crtc->active && crtc->primary->fb &&
intel_crtc->config.adjusted_mode.crtc_clock;
}
@@ -1030,7 +1033,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
u32 val;
/* ILK FDI PLL is always enabled */
- if (dev_priv->info->gen == 5)
+ if (INTEL_INFO(dev_priv->dev)->gen == 5)
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -1119,7 +1122,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
- if (!intel_display_power_enabled(dev_priv->dev,
+ if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
@@ -1163,7 +1166,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
if (INTEL_INFO(dev)->gen >= 4) {
reg = DSPCNTR(pipe);
val = I915_READ(reg);
- WARN((val & DISPLAY_PLANE_ENABLE),
+ WARN(val & DISPLAY_PLANE_ENABLE,
"plane %c assertion failure, should be disabled but not\n",
plane_name(pipe));
return;
@@ -1185,27 +1188,27 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
- int reg, i;
+ int reg, sprite;
u32 val;
if (IS_VALLEYVIEW(dev)) {
- for (i = 0; i < dev_priv->num_plane; i++) {
- reg = SPCNTR(pipe, i);
+ for_each_sprite(pipe, sprite) {
+ reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
- WARN((val & SP_ENABLE),
+ WARN(val & SP_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
- sprite_name(pipe, i), pipe_name(pipe));
+ sprite_name(pipe, sprite), pipe_name(pipe));
}
} else if (INTEL_INFO(dev)->gen >= 7) {
reg = SPRCTL(pipe);
val = I915_READ(reg);
- WARN((val & SPRITE_ENABLE),
+ WARN(val & SPRITE_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
} else if (INTEL_INFO(dev)->gen >= 5) {
reg = DVSCNTR(pipe);
val = I915_READ(reg);
- WARN((val & DVS_ENABLE),
+ WARN(val & DVS_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
}
@@ -1443,7 +1446,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
assert_pipe_disabled(dev_priv, crtc->pipe);
/* No really, not for ILK+ */
- BUG_ON(dev_priv->info->gen >= 5);
+ BUG_ON(INTEL_INFO(dev)->gen >= 5);
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev) && !IS_I830(dev))
@@ -1549,11 +1552,12 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
*/
static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
/* PCH PLLs only available on ILK, SNB and IVB */
- BUG_ON(dev_priv->info->gen < 5);
+ BUG_ON(INTEL_INFO(dev)->gen < 5);
if (WARN_ON(pll == NULL))
return;
@@ -1578,11 +1582,12 @@ static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
/* PCH only available on ILK+ */
- BUG_ON(dev_priv->info->gen < 5);
+ BUG_ON(INTEL_INFO(dev)->gen < 5);
if (WARN_ON(pll == NULL))
return;
@@ -1617,7 +1622,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
uint32_t reg, val, pipeconf_val;
/* PCH only available on ILK+ */
- BUG_ON(dev_priv->info->gen < 5);
+ BUG_ON(INTEL_INFO(dev)->gen < 5);
/* Make sure PCH DPLL is enabled */
assert_shared_dpll_enabled(dev_priv,
@@ -1670,7 +1675,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
u32 val, pipeconf_val;
/* PCH only available on ILK+ */
- BUG_ON(dev_priv->info->gen < 5);
+ BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
@@ -1744,21 +1749,16 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
/**
* intel_enable_pipe - enable a pipe, asserting requirements
- * @dev_priv: i915 private structure
- * @pipe: pipe to enable
- * @pch_port: on ILK+, is this pipe driving a PCH port or not
+ * @crtc: crtc responsible for the pipe
*
- * Enable @pipe, making sure that various hardware specific requirements
+ * Enable @crtc's pipe, making sure that various hardware specific requirements
* are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
- *
- * @pipe should be %PIPE_A or %PIPE_B.
- *
- * Will wait until the pipe is actually running (i.e. first vblank) before
- * returning.
*/
-static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
- bool pch_port, bool dsi)
+static void intel_enable_pipe(struct intel_crtc *crtc)
{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum pipe pch_transcoder;
@@ -1780,12 +1780,12 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
* need the check.
*/
if (!HAS_PCH_SPLIT(dev_priv->dev))
- if (dsi)
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
else {
- if (pch_port) {
+ if (crtc->config.has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
assert_fdi_tx_pll_enabled(dev_priv,
@@ -1796,11 +1796,24 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
- if (val & PIPECONF_ENABLE)
+ if (val & PIPECONF_ENABLE) {
+ WARN_ON(!(pipe == PIPE_A &&
+ dev_priv->quirks & QUIRK_PIPEA_FORCE));
return;
+ }
I915_WRITE(reg, val | PIPECONF_ENABLE);
- intel_wait_for_vblank(dev_priv->dev, pipe);
+ POSTING_READ(reg);
+
+ /*
+ * There's no guarantee the pipe will really start running now. It
+ * depends on the Gen, the output type and the relative order between
+ * pipe and plane enabling. Avoid waiting on HSW+ since it's not
+ * necessary.
+ * TODO: audit the previous gens.
+ */
+ if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ intel_wait_for_vblank(dev_priv->dev, pipe);
}
/**
@@ -1851,22 +1864,23 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
- u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
+ struct drm_device *dev = dev_priv->dev;
+ u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
I915_WRITE(reg, I915_READ(reg));
POSTING_READ(reg);
}
/**
- * intel_enable_primary_plane - enable the primary plane on a given pipe
+ * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
* @dev_priv: i915 private structure
* @plane: plane to enable
* @pipe: pipe being fed
*
* Enable @plane on @pipe, making sure that @pipe is running first.
*/
-static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
- enum plane plane, enum pipe pipe)
+static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
{
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
@@ -1891,15 +1905,15 @@ static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
}
/**
- * intel_disable_primary_plane - disable the primary plane
+ * intel_disable_primary_hw_plane - disable the primary hardware plane
* @dev_priv: i915 private structure
* @plane: plane to disable
* @pipe: pipe consuming the data
*
* Disable @plane; should be an independent operation.
*/
-static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
- enum plane plane, enum pipe pipe)
+static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
{
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
@@ -1929,6 +1943,14 @@ static bool need_vtd_wa(struct drm_device *dev)
return false;
}
+static int intel_align_height(struct drm_device *dev, int height, bool tiled)
+{
+ int tile_height;
+
+ tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
+ return ALIGN(height, tile_height);
+}
+
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -2025,8 +2047,114 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
}
}
-static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y)
+int intel_format_to_fourcc(int format)
+{
+ switch (format) {
+ case DISPPLANE_8BPP:
+ return DRM_FORMAT_C8;
+ case DISPPLANE_BGRX555:
+ return DRM_FORMAT_XRGB1555;
+ case DISPPLANE_BGRX565:
+ return DRM_FORMAT_RGB565;
+ default:
+ case DISPPLANE_BGRX888:
+ return DRM_FORMAT_XRGB8888;
+ case DISPPLANE_RGBX888:
+ return DRM_FORMAT_XBGR8888;
+ case DISPPLANE_BGRX101010:
+ return DRM_FORMAT_XRGB2101010;
+ case DISPPLANE_RGBX101010:
+ return DRM_FORMAT_XBGR2101010;
+ }
+}
+
+static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
+ struct intel_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_gem_object *obj = NULL;
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ u32 base = plane_config->base;
+
+ if (plane_config->size == 0)
+ return false;
+
+ obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
+ plane_config->size);
+ if (!obj)
+ return false;
+
+ if (plane_config->tiled) {
+ obj->tiling_mode = I915_TILING_X;
+ obj->stride = crtc->base.primary->fb->pitches[0];
+ }
+
+ mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
+ mode_cmd.width = crtc->base.primary->fb->width;
+ mode_cmd.height = crtc->base.primary->fb->height;
+ mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
+ &mode_cmd, obj)) {
+ DRM_DEBUG_KMS("intel fb init failed\n");
+ goto out_unref_obj;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG_KMS("plane fb obj %p\n", obj);
+ return true;
+
+out_unref_obj:
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+ return false;
+}
+
+static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
+ struct intel_plane_config *plane_config)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_crtc *c;
+ struct intel_crtc *i;
+ struct intel_framebuffer *fb;
+
+ if (!intel_crtc->base.primary->fb)
+ return;
+
+ if (intel_alloc_plane_obj(intel_crtc, plane_config))
+ return;
+
+ kfree(intel_crtc->base.primary->fb);
+ intel_crtc->base.primary->fb = NULL;
+
+ /*
+ * Failed to alloc the obj, check to see if we should share
+ * an fb with another CRTC instead
+ */
+ list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ i = to_intel_crtc(c);
+
+ if (c == &intel_crtc->base)
+ continue;
+
+ if (!i->active || !c->primary->fb)
+ continue;
+
+ fb = to_intel_framebuffer(c->primary->fb);
+ if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
+ drm_framebuffer_reference(c->primary->fb);
+ intel_crtc->base.primary->fb = c->primary->fb;
+ break;
+ }
+ }
+}
+
+static int i9xx_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2125,8 +2253,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return 0;
}
-static int ironlake_update_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y)
+static int ironlake_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2230,7 +2359,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dev_priv->display.disable_fbc(dev);
intel_increase_pllclock(crtc);
- return dev_priv->display.update_plane(crtc, fb, x, y);
+ return dev_priv->display.update_primary_plane(crtc, fb, x, y);
}
void intel_display_handle_reset(struct drm_device *dev)
@@ -2267,11 +2396,13 @@ void intel_display_handle_reset(struct drm_device *dev)
/*
* FIXME: Once we have proper support for primary planes (and
* disabling them without disabling the entire crtc) allow again
- * a NULL crtc->fb.
+ * a NULL crtc->primary->fb.
*/
- if (intel_crtc->active && crtc->fb)
- dev_priv->display.update_plane(crtc, crtc->fb,
- crtc->x, crtc->y);
+ if (intel_crtc->active && crtc->primary->fb)
+ dev_priv->display.update_primary_plane(crtc,
+ crtc->primary->fb,
+ crtc->x,
+ crtc->y);
mutex_unlock(&crtc->mutex);
}
}
@@ -2299,31 +2430,23 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
return ret;
}
-static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
+static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_master_private *master_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long flags;
+ bool pending;
- if (!dev->primary->master)
- return;
+ if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+ intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ return false;
- master_priv = dev->primary->master->driver_priv;
- if (!master_priv->sarea_priv)
- return;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ pending = to_intel_crtc(crtc)->unpin_work != NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
- switch (intel_crtc->pipe) {
- case 0:
- master_priv->sarea_priv->pipeA_x = x;
- master_priv->sarea_priv->pipeA_y = y;
- break;
- case 1:
- master_priv->sarea_priv->pipeB_x = x;
- master_priv->sarea_priv->pipeB_y = y;
- break;
- default:
- break;
- }
+ return pending;
}
static int
@@ -2336,6 +2459,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb;
int ret;
+ if (intel_crtc_has_pending_flip(crtc)) {
+ DRM_ERROR("pipe is still busy with an old pageflip\n");
+ return -EBUSY;
+ }
+
/* no fb bound */
if (!fb) {
DRM_ERROR("No FB bound\n");
@@ -2353,8 +2481,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
ret = intel_pin_and_fence_fb_obj(dev,
to_intel_framebuffer(fb)->obj,
NULL);
+ mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
- mutex_unlock(&dev->struct_mutex);
DRM_ERROR("pin & fence failed\n");
return ret;
}
@@ -2372,7 +2500,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
* whether the platform allows pfit disable with pipe active, and only
* then update the pipesrc and pfit state, even on the flip path.
*/
- if (i915_fastboot) {
+ if (i915.fastboot) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
@@ -2390,31 +2518,33 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
}
- ret = dev_priv->display.update_plane(crtc, fb, x, y);
+ ret = dev_priv->display.update_primary_plane(crtc, fb, x, y);
if (ret) {
+ mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("failed to update base address\n");
return ret;
}
- old_fb = crtc->fb;
- crtc->fb = fb;
+ old_fb = crtc->primary->fb;
+ crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
if (old_fb) {
if (intel_crtc->active && old_fb != fb)
intel_wait_for_vblank(dev, intel_crtc->pipe);
+ mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
}
+ mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
- intel_crtc_update_sarea_pos(crtc, x, y);
-
return 0;
}
@@ -2963,25 +3093,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
}
-static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
- bool pending;
-
- if (i915_reset_in_progress(&dev_priv->gpu_error) ||
- intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
- return false;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- pending = to_intel_crtc(crtc)->unpin_work != NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- return pending;
-}
-
bool intel_has_pending_fb_unpin(struct drm_device *dev)
{
struct intel_crtc *crtc;
@@ -3011,7 +3122,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (crtc->fb == NULL)
+ if (crtc->primary->fb == NULL)
return;
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
@@ -3020,7 +3131,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
!intel_crtc_has_pending_flip(crtc));
mutex_lock(&dev->struct_mutex);
- intel_finish_fb(crtc->fb);
+ intel_finish_fb(crtc->primary->fb);
mutex_unlock(&dev->struct_mutex);
}
@@ -3425,22 +3536,28 @@ static void intel_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct drm_plane *plane;
struct intel_plane *intel_plane;
- list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
+ drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+ intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe)
intel_plane_restore(&intel_plane->base);
+ }
}
static void intel_disable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct drm_plane *plane;
struct intel_plane *intel_plane;
- list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
+ drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+ intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe)
intel_plane_disable(&intel_plane->base);
+ }
}
void hsw_enable_ips(struct intel_crtc *crtc)
@@ -3587,9 +3704,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_crtc_load_lut(crtc);
intel_update_watermarks(crtc);
- intel_enable_pipe(dev_priv, pipe,
- intel_crtc->config.has_pch_encoder, false);
- intel_enable_primary_plane(dev_priv, plane, pipe);
+ intel_enable_pipe(intel_crtc);
+ intel_enable_primary_hw_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
@@ -3631,7 +3747,7 @@ static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- intel_enable_primary_plane(dev_priv, plane, pipe);
+ intel_enable_primary_hw_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
@@ -3661,7 +3777,7 @@ static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
- intel_disable_primary_plane(dev_priv, plane, pipe);
+ intel_disable_primary_hw_plane(dev_priv, plane, pipe);
}
/*
@@ -3733,8 +3849,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_ddi_enable_transcoder_func(crtc);
intel_update_watermarks(crtc);
- intel_enable_pipe(dev_priv, pipe,
- intel_crtc->config.has_pch_encoder, false);
+ intel_enable_pipe(intel_crtc);
if (intel_crtc->config.has_pch_encoder)
lpt_pch_enable(crtc);
@@ -3748,16 +3863,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
haswell_crtc_enable_planes(crtc);
-
- /*
- * There seems to be a race in PCH platform hw (at least on some
- * outputs) where an enabled pipe still completes any pageflip right
- * away (as if the pipe is off) instead of waiting for vblank. As soon
- * as the first vblank happend, everything works as expected. Hence just
- * wait for one vblank before returning to avoid strange things
- * happening.
- */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
}
static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -3800,7 +3905,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
- intel_disable_primary_plane(dev_priv, plane, pipe);
+ intel_disable_primary_hw_plane(dev_priv, plane, pipe);
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
@@ -3972,6 +4077,117 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ if ((1 << (domain)) & (mask))
+
+enum intel_display_power_domain
+intel_display_port_power_domain(struct intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct intel_digital_port *intel_dig_port;
+
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_UNKNOWN:
+ /* Only DDI platforms should ever use this output type */
+ WARN_ON_ONCE(!HAS_DDI(dev));
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_HDMI:
+ case INTEL_OUTPUT_EDP:
+ intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ return POWER_DOMAIN_PORT_DDI_A_4_LANES;
+ case PORT_B:
+ return POWER_DOMAIN_PORT_DDI_B_4_LANES;
+ case PORT_C:
+ return POWER_DOMAIN_PORT_DDI_C_4_LANES;
+ case PORT_D:
+ return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+ default:
+ WARN_ON_ONCE(1);
+ return POWER_DOMAIN_PORT_OTHER;
+ }
+ case INTEL_OUTPUT_ANALOG:
+ return POWER_DOMAIN_PORT_CRT;
+ case INTEL_OUTPUT_DSI:
+ return POWER_DOMAIN_PORT_DSI;
+ default:
+ return POWER_DOMAIN_PORT_OTHER;
+ }
+}
+
+static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
+ unsigned long mask;
+ enum transcoder transcoder;
+
+ transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
+
+ mask = BIT(POWER_DOMAIN_PIPE(pipe));
+ mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
+ if (pfit_enabled)
+ mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ mask |= BIT(intel_display_port_power_domain(intel_encoder));
+
+ return mask;
+}
+
+void intel_display_set_init_power(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ if (dev_priv->power_domains.init_power_on == enable)
+ return;
+
+ if (enable)
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ else
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ dev_priv->power_domains.init_power_on = enable;
+}
+
+static void modeset_update_crtc_power_domains(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
+ struct intel_crtc *crtc;
+
+ /*
+ * First get all needed power domains, then put all unneeded, to avoid
+ * any unnecessary toggling of the power wells.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ enum intel_display_power_domain domain;
+
+ if (!crtc->base.enabled)
+ continue;
+
+ pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
+
+ for_each_power_domain(domain, pipe_domains[crtc->pipe])
+ intel_display_power_get(dev_priv, domain);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ enum intel_display_power_domain domain;
+
+ for_each_power_domain(domain, crtc->enabled_power_domains)
+ intel_display_power_put(dev_priv, domain);
+
+ crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+ }
+
+ intel_display_set_init_power(dev_priv, false);
+}
+
int valleyview_get_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
@@ -4088,9 +4304,8 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
/* Looks like the 200MHz CDclk freq doesn't work on some configs */
}
-static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
- unsigned modeset_pipes,
- struct intel_crtc_config *pipe_config)
+/* compute the max pixel clock for new configuration */
+static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc;
@@ -4098,31 +4313,26 @@ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
- if (modeset_pipes & (1 << intel_crtc->pipe))
- max_pixclk = max(max_pixclk,
- pipe_config->adjusted_mode.crtc_clock);
- else if (intel_crtc->base.enabled)
+ if (intel_crtc->new_enabled)
max_pixclk = max(max_pixclk,
- intel_crtc->config.adjusted_mode.crtc_clock);
+ intel_crtc->new_config->adjusted_mode.crtc_clock);
}
return max_pixclk;
}
static void valleyview_modeset_global_pipes(struct drm_device *dev,
- unsigned *prepare_pipes,
- unsigned modeset_pipes,
- struct intel_crtc_config *pipe_config)
+ unsigned *prepare_pipes)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
- int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
- pipe_config);
+ int max_pixclk = intel_mode_max_pixclk(dev_priv);
int cur_cdclk = valleyview_cur_cdclk(dev_priv);
if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
return;
+ /* disable/enable all currently active pipes while we change cdclk */
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head)
if (intel_crtc->base.enabled)
@@ -4132,12 +4342,13 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
static void valleyview_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
+ int max_pixclk = intel_mode_max_pixclk(dev_priv);
int cur_cdclk = valleyview_cur_cdclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
if (req_cdclk != cur_cdclk)
valleyview_set_cdclk(dev, req_cdclk);
+ modeset_update_crtc_power_domains(dev);
}
static void valleyview_crtc_enable(struct drm_crtc *crtc)
@@ -4175,8 +4386,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc_load_lut(crtc);
intel_update_watermarks(crtc);
- intel_enable_pipe(dev_priv, pipe, false, is_dsi);
- intel_enable_primary_plane(dev_priv, plane, pipe);
+ intel_enable_pipe(intel_crtc);
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_enable_primary_hw_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
@@ -4213,8 +4425,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_crtc_load_lut(crtc);
intel_update_watermarks(crtc);
- intel_enable_pipe(dev_priv, pipe, false, false);
- intel_enable_primary_plane(dev_priv, plane, pipe);
+ intel_enable_pipe(intel_crtc);
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_enable_primary_hw_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
/* The fixup needs to happen before cursor is enabled */
if (IS_G4X(dev))
@@ -4270,8 +4483,9 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
- intel_disable_primary_plane(dev_priv, plane, pipe);
+ intel_disable_primary_hw_plane(dev_priv, plane, pipe);
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
intel_disable_pipe(dev_priv, pipe);
i9xx_pfit_disable(intel_crtc);
@@ -4365,11 +4579,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
- if (crtc->fb) {
+ if (crtc->primary->fb) {
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj);
mutex_unlock(&dev->struct_mutex);
- crtc->fb = NULL;
+ crtc->primary->fb = NULL;
}
/* Update computed state. */
@@ -4583,7 +4797,7 @@ retry:
static void hsw_compute_ips_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
- pipe_config->ips_enabled = i915_enable_ips &&
+ pipe_config->ips_enabled = i915.enable_ips &&
hsw_crtc_supports_ips(crtc) &&
pipe_config->pipe_bpp <= 24;
}
@@ -4784,8 +4998,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes,
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
- if (i915_panel_use_ssc >= 0)
- return i915_panel_use_ssc != 0;
+ if (i915.panel_use_ssc >= 0)
+ return i915.panel_use_ssc != 0;
return dev_priv->vbt.lvds_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
@@ -4844,7 +5058,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
crtc->lowfreq_avail = false;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
- reduced_clock && i915_powersave) {
+ reduced_clock && i915.powersave) {
I915_WRITE(FP1(pipe), fp2);
crtc->config.dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
@@ -5161,21 +5375,26 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
- uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
+ uint32_t crtc_vtotal, crtc_vblank_end;
+ int vsyncshift = 0;
/* We need to be careful not to changed the adjusted mode, for otherwise
* the hw state checker will get angry at the mismatch. */
crtc_vtotal = adjusted_mode->crtc_vtotal;
crtc_vblank_end = adjusted_mode->crtc_vblank_end;
- if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
/* the chip adds 2 halflines automatically */
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
- vsyncshift = adjusted_mode->crtc_hsync_start
- - adjusted_mode->crtc_htotal / 2;
- } else {
- vsyncshift = 0;
+
+ if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
+ vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
+ else
+ vsyncshift = adjusted_mode->crtc_hsync_start -
+ adjusted_mode->crtc_htotal / 2;
+ if (vsyncshift < 0)
+ vsyncshift += adjusted_mode->crtc_htotal;
}
if (INTEL_INFO(dev)->gen > 3)
@@ -5259,25 +5478,23 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
}
-static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
- struct intel_crtc_config *pipe_config)
+void intel_mode_from_pipe_config(struct drm_display_mode *mode,
+ struct intel_crtc_config *pipe_config)
{
- struct drm_crtc *crtc = &intel_crtc->base;
-
- crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
- crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
- crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
- crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
+ mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
+ mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
+ mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
+ mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
- crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
- crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
- crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
- crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
+ mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
+ mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
+ mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
+ mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
- crtc->mode.flags = pipe_config->adjusted_mode.flags;
+ mode->flags = pipe_config->adjusted_mode.flags;
- crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
- crtc->mode.flags |= pipe_config->adjusted_mode.flags;
+ mode->clock = pipe_config->adjusted_mode.crtc_clock;
+ mode->flags |= pipe_config->adjusted_mode.flags;
}
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@ -5327,10 +5544,13 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (!IS_GEN2(dev) &&
- intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
- else
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (INTEL_INFO(dev)->gen < 4 ||
+ intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
+ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ else
+ pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
+ } else
pipeconf |= PIPECONF_PROGRESSIVE;
if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
@@ -5512,6 +5732,67 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = clock.dot / 5;
}
+static void i9xx_get_plane_config(struct intel_crtc *crtc,
+ struct intel_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, base, offset;
+ int pipe = crtc->pipe, plane = crtc->plane;
+ int fourcc, pixel_format;
+ int aligned_height;
+
+ crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
+ if (!crtc->base.primary->fb) {
+ DRM_DEBUG_KMS("failed to alloc fb\n");
+ return;
+ }
+
+ val = I915_READ(DSPCNTR(plane));
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ if (val & DISPPLANE_TILED)
+ plane_config->tiled = true;
+
+ pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
+ fourcc = intel_format_to_fourcc(pixel_format);
+ crtc->base.primary->fb->pixel_format = fourcc;
+ crtc->base.primary->fb->bits_per_pixel =
+ drm_format_plane_cpp(fourcc, 0) * 8;
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (plane_config->tiled)
+ offset = I915_READ(DSPTILEOFF(plane));
+ else
+ offset = I915_READ(DSPLINOFF(plane));
+ base = I915_READ(DSPSURF(plane)) & 0xfffff000;
+ } else {
+ base = I915_READ(DSPADDR(plane));
+ }
+ plane_config->base = base;
+
+ val = I915_READ(PIPESRC(pipe));
+ crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
+ crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
+
+ val = I915_READ(DSPSTRIDE(pipe));
+ crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
+
+ aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
+ plane_config->tiled);
+
+ plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
+ aligned_height, PAGE_SIZE);
+
+ DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe, plane, crtc->base.primary->fb->width,
+ crtc->base.primary->fb->height,
+ crtc->base.primary->fb->bits_per_pixel, base,
+ crtc->base.primary->fb->pitches[0],
+ plane_config->size);
+
+}
+
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -5519,6 +5800,10 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
+ return false;
+
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -6180,7 +6465,7 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
* is 2.5%; use 5% for safety's sake.
*/
u32 bps = target_clock * bpp * 21 / 20;
- return bps / (link_bw * 8) + 1;
+ return DIV_ROUND_UP(bps, link_bw * 8);
}
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
@@ -6348,7 +6633,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
- if (is_lvds && has_reduced_clock && i915_powersave)
+ if (is_lvds && has_reduced_clock && i915.powersave)
intel_crtc->lowfreq_avail = true;
else
intel_crtc->lowfreq_avail = false;
@@ -6455,6 +6740,66 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
}
}
+static void ironlake_get_plane_config(struct intel_crtc *crtc,
+ struct intel_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, base, offset;
+ int pipe = crtc->pipe, plane = crtc->plane;
+ int fourcc, pixel_format;
+ int aligned_height;
+
+ crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
+ if (!crtc->base.primary->fb) {
+ DRM_DEBUG_KMS("failed to alloc fb\n");
+ return;
+ }
+
+ val = I915_READ(DSPCNTR(plane));
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ if (val & DISPPLANE_TILED)
+ plane_config->tiled = true;
+
+ pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
+ fourcc = intel_format_to_fourcc(pixel_format);
+ crtc->base.primary->fb->pixel_format = fourcc;
+ crtc->base.primary->fb->bits_per_pixel =
+ drm_format_plane_cpp(fourcc, 0) * 8;
+
+ base = I915_READ(DSPSURF(plane)) & 0xfffff000;
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ offset = I915_READ(DSPOFFSET(plane));
+ } else {
+ if (plane_config->tiled)
+ offset = I915_READ(DSPTILEOFF(plane));
+ else
+ offset = I915_READ(DSPLINOFF(plane));
+ }
+ plane_config->base = base;
+
+ val = I915_READ(PIPESRC(pipe));
+ crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
+ crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
+
+ val = I915_READ(DSPSTRIDE(pipe));
+ crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
+
+ aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
+ plane_config->tiled);
+
+ plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
+ aligned_height, PAGE_SIZE);
+
+ DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe, plane, crtc->base.primary->fb->width,
+ crtc->base.primary->fb->height,
+ crtc->base.primary->fb->bits_per_pixel, base,
+ crtc->base.primary->fb->pitches[0],
+ plane_config->size);
+}
+
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -6629,6 +6974,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
uint32_t val;
+ unsigned long irqflags;
val = I915_READ(LCPLL_CTL);
@@ -6636,9 +6982,22 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
return;
- /* Make sure we're not on PC8 state before disabling PC8, otherwise
- * we'll hang the machine! */
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ /*
+ * Make sure we're not on PC8 state before disabling PC8, otherwise
+ * we'll hang the machine. To prevent PC8 state, just enable force_wake.
+ *
+ * The other problem is that hsw_restore_lcpll() is called as part of
+ * the runtime PM resume sequence, so we can't just call
+ * gen6_gt_force_wake_get() because that function calls
+ * intel_runtime_pm_get(), and we can't change the runtime PM refcount
+ * while we are on the resume sequence. So to solve this problem we have
+ * to call special forcewake code that doesn't touch runtime PM and
+ * doesn't enable the forcewake delayed work.
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (dev_priv->uncore.forcewake_count++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6672,26 +7031,45 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ /* See the big comment above. */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (--dev_priv->uncore.forcewake_count == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void hsw_enable_pc8_work(struct work_struct *__work)
+/*
+ * Package states C8 and deeper are really deep PC states that can only be
+ * reached when all the devices on the system allow it, so even if the graphics
+ * device allows PC8+, it doesn't mean the system will actually get to these
+ * states. Our driver only allows PC8+ when going into runtime PM.
+ *
+ * The requirements for PC8+ are that all the outputs are disabled, the power
+ * well is disabled and most interrupts are disabled, and these are also
+ * requirements for runtime PM. When these conditions are met, we manually do
+ * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
+ * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
+ * hang the machine.
+ *
+ * When we really reach PC8 or deeper states (not just when we allow it) we lose
+ * the state of some registers, so when we come back from PC8+ we need to
+ * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
+ * need to take care of the registers kept by RC6. Notice that this happens even
+ * if we don't put the device in PCI D3 state (which is what currently happens
+ * because of the runtime PM support).
+ *
+ * For more, read "Display Sequences for Package C8" on the hardware
+ * documentation.
+ */
+void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv =
- container_of(to_delayed_work(__work), struct drm_i915_private,
- pc8.enable_work);
struct drm_device *dev = dev_priv->dev;
uint32_t val;
WARN_ON(!HAS_PC8(dev));
- if (dev_priv->pc8.enabled)
- return;
-
DRM_DEBUG_KMS("Enabling package C8+\n");
- dev_priv->pc8.enabled = true;
-
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
@@ -6699,51 +7077,21 @@ void hsw_enable_pc8_work(struct work_struct *__work)
}
lpt_disable_clkout_dp(dev);
- hsw_pc8_disable_interrupts(dev);
+ hsw_runtime_pm_disable_interrupts(dev);
hsw_disable_lcpll(dev_priv, true, true);
-
- intel_runtime_pm_put(dev_priv);
-}
-
-static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
-{
- WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
- WARN(dev_priv->pc8.disable_count < 1,
- "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
-
- dev_priv->pc8.disable_count--;
- if (dev_priv->pc8.disable_count != 0)
- return;
-
- schedule_delayed_work(&dev_priv->pc8.enable_work,
- msecs_to_jiffies(i915_pc8_timeout));
}
-static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
+void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t val;
- WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
- WARN(dev_priv->pc8.disable_count < 0,
- "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
-
- dev_priv->pc8.disable_count++;
- if (dev_priv->pc8.disable_count != 1)
- return;
-
WARN_ON(!HAS_PC8(dev));
- cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
- if (!dev_priv->pc8.enabled)
- return;
-
DRM_DEBUG_KMS("Disabling package C8+\n");
- intel_runtime_pm_get(dev_priv);
-
hsw_restore_lcpll(dev_priv);
- hsw_pc8_restore_interrupts(dev);
+ hsw_runtime_pm_restore_interrupts(dev);
lpt_init_pch_refclk(dev);
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
@@ -6757,185 +7105,11 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
gen6_update_ring_freq(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
- dev_priv->pc8.enabled = false;
-}
-
-void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
-{
- if (!HAS_PC8(dev_priv->dev))
- return;
-
- mutex_lock(&dev_priv->pc8.lock);
- __hsw_enable_package_c8(dev_priv);
- mutex_unlock(&dev_priv->pc8.lock);
-}
-
-void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
-{
- if (!HAS_PC8(dev_priv->dev))
- return;
-
- mutex_lock(&dev_priv->pc8.lock);
- __hsw_disable_package_c8(dev_priv);
- mutex_unlock(&dev_priv->pc8.lock);
-}
-
-static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct intel_crtc *crtc;
- uint32_t val;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
- if (crtc->base.enabled)
- return false;
-
- /* This case is still possible since we have the i915.disable_power_well
- * parameter and also the KVMr or something else might be requesting the
- * power well. */
- val = I915_READ(HSW_PWR_WELL_DRIVER);
- if (val != 0) {
- DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
- return false;
- }
-
- return true;
-}
-
-/* Since we're called from modeset_global_resources there's no way to
- * symmetrically increase and decrease the refcount, so we use
- * dev_priv->pc8.requirements_met to track whether we already have the refcount
- * or not.
- */
-static void hsw_update_package_c8(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- bool allow;
-
- if (!HAS_PC8(dev_priv->dev))
- return;
-
- if (!i915_enable_pc8)
- return;
-
- mutex_lock(&dev_priv->pc8.lock);
-
- allow = hsw_can_enable_package_c8(dev_priv);
-
- if (allow == dev_priv->pc8.requirements_met)
- goto done;
-
- dev_priv->pc8.requirements_met = allow;
-
- if (allow)
- __hsw_enable_package_c8(dev_priv);
- else
- __hsw_disable_package_c8(dev_priv);
-
-done:
- mutex_unlock(&dev_priv->pc8.lock);
-}
-
-static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
-{
- if (!HAS_PC8(dev_priv->dev))
- return;
-
- mutex_lock(&dev_priv->pc8.lock);
- if (!dev_priv->pc8.gpu_idle) {
- dev_priv->pc8.gpu_idle = true;
- __hsw_enable_package_c8(dev_priv);
- }
- mutex_unlock(&dev_priv->pc8.lock);
-}
-
-static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
-{
- if (!HAS_PC8(dev_priv->dev))
- return;
-
- mutex_lock(&dev_priv->pc8.lock);
- if (dev_priv->pc8.gpu_idle) {
- dev_priv->pc8.gpu_idle = false;
- __hsw_disable_package_c8(dev_priv);
- }
- mutex_unlock(&dev_priv->pc8.lock);
-}
-
-#define for_each_power_domain(domain, mask) \
- for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- if ((1 << (domain)) & (mask))
-
-static unsigned long get_pipe_power_domains(struct drm_device *dev,
- enum pipe pipe, bool pfit_enabled)
-{
- unsigned long mask;
- enum transcoder transcoder;
-
- transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
-
- mask = BIT(POWER_DOMAIN_PIPE(pipe));
- mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
- if (pfit_enabled)
- mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
-
- return mask;
-}
-
-void intel_display_set_init_power(struct drm_device *dev, bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->power_domains.init_power_on == enable)
- return;
-
- if (enable)
- intel_display_power_get(dev, POWER_DOMAIN_INIT);
- else
- intel_display_power_put(dev, POWER_DOMAIN_INIT);
-
- dev_priv->power_domains.init_power_on = enable;
-}
-
-static void modeset_update_power_wells(struct drm_device *dev)
-{
- unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
- struct intel_crtc *crtc;
-
- /*
- * First get all needed power domains, then put all unneeded, to avoid
- * any unnecessary toggling of the power wells.
- */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
- enum intel_display_power_domain domain;
-
- if (!crtc->base.enabled)
- continue;
-
- pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
- crtc->pipe,
- crtc->config.pch_pfit.enabled);
-
- for_each_power_domain(domain, pipe_domains[crtc->pipe])
- intel_display_power_get(dev, domain);
- }
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
- enum intel_display_power_domain domain;
-
- for_each_power_domain(domain, crtc->enabled_power_domains)
- intel_display_power_put(dev, domain);
-
- crtc->enabled_power_domains = pipe_domains[crtc->pipe];
- }
-
- intel_display_set_init_power(dev, false);
}
static void haswell_modeset_global_resources(struct drm_device *dev)
{
- modeset_update_power_wells(dev);
- hsw_update_package_c8(dev);
+ modeset_update_crtc_power_domains(dev);
}
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
@@ -6985,6 +7159,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
+ return false;
+
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -7010,7 +7188,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
- if (!intel_display_power_enabled(dev,
+ if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
@@ -7038,7 +7216,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- if (intel_display_power_enabled(dev, pfit_domain))
+ if (intel_display_power_enabled(dev_priv, pfit_domain))
ironlake_get_pfit_config(crtc, pipe_config);
if (IS_HASWELL(dev))
@@ -7435,10 +7613,26 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
bool visible = base != 0;
if (intel_crtc->cursor_visible != visible) {
+ int16_t width = intel_crtc->cursor_width;
uint32_t cntl = I915_READ(CURCNTR(pipe));
if (base) {
cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
- cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ cntl |= MCURSOR_GAMMA_ENABLE;
+
+ switch (width) {
+ case 64:
+ cntl |= CURSOR_MODE_64_ARGB_AX;
+ break;
+ case 128:
+ cntl |= CURSOR_MODE_128_ARGB_AX;
+ break;
+ case 256:
+ cntl |= CURSOR_MODE_256_ARGB_AX;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
cntl |= pipe << 28; /* Connect to correct pipe */
} else {
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
@@ -7463,10 +7657,25 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
bool visible = base != 0;
if (intel_crtc->cursor_visible != visible) {
+ int16_t width = intel_crtc->cursor_width;
uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
if (base) {
cntl &= ~CURSOR_MODE;
- cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ cntl |= MCURSOR_GAMMA_ENABLE;
+ switch (width) {
+ case 64:
+ cntl |= CURSOR_MODE_64_ARGB_AX;
+ break;
+ case 128:
+ cntl |= CURSOR_MODE_128_ARGB_AX;
+ break;
+ case 256:
+ cntl |= CURSOR_MODE_256_ARGB_AX;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
} else {
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
@@ -7550,6 +7759,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj;
+ unsigned old_width;
uint32_t addr;
int ret;
@@ -7562,9 +7772,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto finish;
}
- /* Currently we only support 64x64 cursors */
- if (width != 64 || height != 64) {
- DRM_ERROR("we currently only support 64x64 cursors\n");
+ /* Check for which cursor types we support */
+ if (!((width == 64 && height == 64) ||
+ (width == 128 && height == 128 && !IS_GEN2(dev)) ||
+ (width == 256 && height == 256 && !IS_GEN2(dev)))) {
+ DRM_DEBUG("Cursor dimension not supported\n");
return -EINVAL;
}
@@ -7573,18 +7785,18 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return -ENOENT;
if (obj->base.size < width * height * 4) {
- DRM_ERROR("buffer is to small\n");
+ DRM_DEBUG_KMS("buffer is to small\n");
ret = -ENOMEM;
goto fail;
}
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
- if (!dev_priv->info->cursor_needs_physical) {
+ if (!INTEL_INFO(dev)->cursor_needs_physical) {
unsigned alignment;
if (obj->tiling_mode) {
- DRM_ERROR("cursor cannot be tiled\n");
+ DRM_DEBUG_KMS("cursor cannot be tiled\n");
ret = -EINVAL;
goto fail_locked;
}
@@ -7600,13 +7812,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
if (ret) {
- DRM_ERROR("failed to move cursor bo into the GTT\n");
+ DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
goto fail_locked;
}
ret = i915_gem_object_put_fence(obj);
if (ret) {
- DRM_ERROR("failed to release fence for cursor");
+ DRM_DEBUG_KMS("failed to release fence for cursor");
goto fail_unpin;
}
@@ -7617,7 +7829,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) {
- DRM_ERROR("failed to attach phys object\n");
+ DRM_DEBUG_KMS("failed to attach phys object\n");
goto fail_locked;
}
addr = obj->phys_obj->handle->busaddr;
@@ -7628,7 +7840,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
finish:
if (intel_crtc->cursor_bo) {
- if (dev_priv->info->cursor_needs_physical) {
+ if (INTEL_INFO(dev)->cursor_needs_physical) {
if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
@@ -7638,13 +7850,18 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
mutex_unlock(&dev->struct_mutex);
+ old_width = intel_crtc->cursor_width;
+
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
- if (intel_crtc->active)
+ if (intel_crtc->active) {
+ if (old_width != width)
+ intel_update_watermarks(crtc);
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+ }
return 0;
fail_unpin:
@@ -7690,10 +7907,10 @@ static struct drm_display_mode load_detect_mode = {
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
-static struct drm_framebuffer *
-intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_i915_gem_object *obj)
+struct drm_framebuffer *
+__intel_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj)
{
struct intel_framebuffer *intel_fb;
int ret;
@@ -7704,12 +7921,7 @@ intel_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto err;
-
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
- mutex_unlock(&dev->struct_mutex);
if (ret)
goto err;
@@ -7721,6 +7933,23 @@ err:
return ERR_PTR(ret);
}
+static struct drm_framebuffer *
+intel_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_framebuffer *fb;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+ fb = __intel_framebuffer_create(dev, mode_cmd, obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return fb;
+}
+
static u32
intel_framebuffer_pitch_for_width(int width, int bpp)
{
@@ -7766,14 +7995,16 @@ mode_fits_in_fbdev(struct drm_device *dev,
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
- if (dev_priv->fbdev == NULL)
+ if (!dev_priv->fbdev)
return NULL;
- obj = dev_priv->fbdev->ifb.obj;
- if (obj == NULL)
+ if (!dev_priv->fbdev->fb)
return NULL;
- fb = &dev_priv->fbdev->ifb.base;
+ obj = dev_priv->fbdev->fb->obj;
+ BUG_ON(!obj);
+
+ fb = &dev_priv->fbdev->fb->base;
if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
fb->bits_per_pixel))
return NULL;
@@ -7855,6 +8086,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
to_intel_connector(connector)->new_encoder = intel_encoder;
intel_crtc = to_intel_crtc(crtc);
+ intel_crtc->new_enabled = true;
+ intel_crtc->new_config = &intel_crtc->config;
old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
@@ -7878,21 +8111,28 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- mutex_unlock(&crtc->mutex);
- return false;
+ goto fail;
}
if (intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
- mutex_unlock(&crtc->mutex);
- return false;
+ goto fail;
}
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
return true;
+
+ fail:
+ intel_crtc->new_enabled = crtc->enabled;
+ if (intel_crtc->new_enabled)
+ intel_crtc->new_config = &intel_crtc->config;
+ else
+ intel_crtc->new_config = NULL;
+ mutex_unlock(&crtc->mutex);
+ return false;
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -7902,6 +8142,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
@@ -7910,6 +8151,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
if (old->load_detect_temp) {
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
+ intel_crtc->new_enabled = false;
+ intel_crtc->new_config = NULL;
intel_set_mode(crtc, NULL, 0, 0, NULL);
if (old->release_fb) {
@@ -8122,7 +8365,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
static void intel_increase_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
@@ -8153,7 +8396,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
static void intel_decrease_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
if (HAS_PCH_SPLIT(dev))
@@ -8190,8 +8433,12 @@ void intel_mark_busy(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- hsw_package_c8_gpu_busy(dev_priv);
+ if (dev_priv->mm.busy)
+ return;
+
+ intel_runtime_pm_get(dev_priv);
i915_update_gfx_val(dev_priv);
+ dev_priv->mm.busy = true;
}
void intel_mark_idle(struct drm_device *dev)
@@ -8199,20 +8446,26 @@ void intel_mark_idle(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- hsw_package_c8_gpu_idle(dev_priv);
-
- if (!i915_powersave)
+ if (!dev_priv->mm.busy)
return;
+ dev_priv->mm.busy = false;
+
+ if (!i915.powersave)
+ goto out;
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (!crtc->fb)
+ if (!crtc->primary->fb)
continue;
intel_decrease_pllclock(crtc);
}
- if (dev_priv->info->gen >= 6)
+ if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_idle(dev->dev_private);
+
+out:
+ intel_runtime_pm_put(dev_priv);
}
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -8221,14 +8474,14 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
- if (!i915_powersave)
+ if (!i915.powersave)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (!crtc->fb)
+ if (!crtc->primary->fb)
continue;
- if (to_intel_framebuffer(crtc->fb)->obj != obj)
+ if (to_intel_framebuffer(crtc->primary->fb)->obj != obj)
continue;
intel_increase_pllclock(crtc);
@@ -8284,7 +8537,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
static void do_intel_finish_page_flip(struct drm_device *dev,
struct drm_crtc *crtc)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
@@ -8325,7 +8578,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
void intel_finish_page_flip(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
do_intel_finish_page_flip(dev, crtc);
@@ -8333,7 +8586,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
do_intel_finish_page_flip(dev, crtc);
@@ -8341,7 +8594,7 @@ void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
void intel_prepare_page_flip(struct drm_device *dev, int plane)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
@@ -8656,7 +8909,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *old_fb = crtc->fb;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
@@ -8664,7 +8917,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
int ret;
/* Can't change pixel format via MI display flips. */
- if (fb->pixel_format != crtc->fb->pixel_format)
+ if (fb->pixel_format != crtc->primary->fb->pixel_format)
return -EINVAL;
/*
@@ -8672,10 +8925,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* Note that pitch changes could also affect these register.
*/
if (INTEL_INFO(dev)->gen > 3 &&
- (fb->offsets[0] != crtc->fb->offsets[0] ||
- fb->pitches[0] != crtc->fb->pitches[0]))
+ (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
+ fb->pitches[0] != crtc->primary->fb->pitches[0]))
return -EINVAL;
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ goto out_hang;
+
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
@@ -8713,7 +8969,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
drm_gem_object_reference(&work->old_fb_obj->base);
drm_gem_object_reference(&obj->base);
- crtc->fb = fb;
+ crtc->primary->fb = fb;
work->pending_flip_obj = obj;
@@ -8736,7 +8992,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
- crtc->fb = old_fb;
+ crtc->primary->fb = old_fb;
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
@@ -8750,6 +9006,13 @@ cleanup:
free_work:
kfree(work);
+ if (ret == -EIO) {
+out_hang:
+ intel_crtc_wait_for_pending_flips(crtc);
+ ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
+ if (ret == 0 && event)
+ drm_send_vblank_event(dev, intel_crtc->pipe, event);
+ }
return ret;
}
@@ -8766,6 +9029,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
*/
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
{
+ struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -8780,6 +9044,16 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
encoder->new_crtc =
to_intel_crtc(encoder->base.crtc);
}
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ crtc->new_enabled = crtc->base.enabled;
+
+ if (crtc->new_enabled)
+ crtc->new_config = &crtc->config;
+ else
+ crtc->new_config = NULL;
+ }
}
/**
@@ -8789,6 +9063,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
*/
static void intel_modeset_commit_output_state(struct drm_device *dev)
{
+ struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -8801,6 +9076,11 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
base.head) {
encoder->base.crtc = &encoder->new_crtc->base;
}
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ crtc->base.enabled = crtc->new_enabled;
+ }
}
static void
@@ -8941,23 +9221,47 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
}
-static bool check_encoder_cloning(struct drm_crtc *crtc)
+static bool encoders_cloneable(const struct intel_encoder *a,
+ const struct intel_encoder *b)
{
- int num_encoders = 0;
- bool uncloneable_encoders = false;
+ /* masks could be asymmetric, so check both ways */
+ return a == b || (a->cloneable & (1 << b->type) &&
+ b->cloneable & (1 << a->type));
+}
+
+static bool check_single_encoder_cloning(struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *source_encoder;
+
+ list_for_each_entry(source_encoder,
+ &dev->mode_config.encoder_list, base.head) {
+ if (source_encoder->new_crtc != crtc)
+ continue;
+
+ if (!encoders_cloneable(encoder, source_encoder))
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_encoder_cloning(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
- list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
- base.head) {
- if (&encoder->new_crtc->base != crtc)
+ list_for_each_entry(encoder,
+ &dev->mode_config.encoder_list, base.head) {
+ if (encoder->new_crtc != crtc)
continue;
- num_encoders++;
- if (!encoder->cloneable)
- uncloneable_encoders = true;
+ if (!check_single_encoder_cloning(crtc, encoder))
+ return false;
}
- return !(num_encoders > 1 && uncloneable_encoders);
+ return true;
}
static struct intel_crtc_config *
@@ -8971,7 +9275,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
int plane_bpp, ret = -EINVAL;
bool retry = true;
- if (!check_encoder_cloning(crtc)) {
+ if (!check_encoder_cloning(to_intel_crtc(crtc))) {
DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
return ERR_PTR(-EINVAL);
}
@@ -9127,29 +9431,22 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
*prepare_pipes |= 1 << encoder->new_crtc->pipe;
}
- /* Check for any pipes that will be fully disabled ... */
+ /* Check for pipes that will be enabled/disabled ... */
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
- bool used = false;
-
- /* Don't try to disable disabled crtcs. */
- if (!intel_crtc->base.enabled)
+ if (intel_crtc->base.enabled == intel_crtc->new_enabled)
continue;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
- if (encoder->new_crtc == intel_crtc)
- used = true;
- }
-
- if (!used)
+ if (!intel_crtc->new_enabled)
*disable_pipes |= 1 << intel_crtc->pipe;
+ else
+ *prepare_pipes |= 1 << intel_crtc->pipe;
}
/* set_mode is also used to update properties on life display pipes. */
intel_crtc = to_intel_crtc(crtc);
- if (crtc->enabled)
+ if (intel_crtc->new_enabled)
*prepare_pipes |= 1 << intel_crtc->pipe;
/*
@@ -9208,10 +9505,13 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
intel_modeset_commit_output_state(dev);
- /* Update computed state. */
+ /* Double check state. */
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
- intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+ WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
+ WARN_ON(intel_crtc->new_config &&
+ intel_crtc->new_config != &intel_crtc->config);
+ WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
}
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -9380,10 +9680,8 @@ intel_pipe_config_compare(struct drm_device *dev,
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- if (!HAS_DDI(dev)) {
- PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
- PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
- }
+ PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
@@ -9471,7 +9769,7 @@ check_encoder_state(struct drm_device *dev)
static void
check_crtc_state(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_crtc_config pipe_config;
@@ -9539,7 +9837,7 @@ check_crtc_state(struct drm_device *dev)
static void
check_shared_dpll_state(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_dpll_hw_state dpll_hw_state;
int i;
@@ -9612,7 +9910,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode;
struct intel_crtc_config *pipe_config = NULL;
struct intel_crtc *intel_crtc;
@@ -9643,6 +9941,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
}
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
"[modeset]");
+ to_intel_crtc(crtc)->new_config = pipe_config;
}
/*
@@ -9653,8 +9952,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* adjusted_mode bits in the crtc directly.
*/
if (IS_VALLEYVIEW(dev)) {
- valleyview_modeset_global_pipes(dev, &prepare_pipes,
- modeset_pipes, pipe_config);
+ valleyview_modeset_global_pipes(dev, &prepare_pipes);
/* may have added more to prepare_pipes than we should */
prepare_pipes &= ~disable_pipes;
@@ -9676,6 +9974,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
/* mode_set/enable/disable functions rely on a correct pipe
* config. */
to_intel_crtc(crtc)->config = *pipe_config;
+ to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
/*
* Calculate and store various constants which
@@ -9734,7 +10033,7 @@ static int intel_set_mode(struct drm_crtc *crtc,
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
- intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
}
#undef for_each_intel_crtc_masked
@@ -9746,16 +10045,24 @@ static void intel_set_config_free(struct intel_set_config *config)
kfree(config->save_connector_encoders);
kfree(config->save_encoder_crtcs);
+ kfree(config->save_crtc_enabled);
kfree(config);
}
static int intel_set_config_save_state(struct drm_device *dev,
struct intel_set_config *config)
{
+ struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
int count;
+ config->save_crtc_enabled =
+ kcalloc(dev->mode_config.num_crtc,
+ sizeof(bool), GFP_KERNEL);
+ if (!config->save_crtc_enabled)
+ return -ENOMEM;
+
config->save_encoder_crtcs =
kcalloc(dev->mode_config.num_encoder,
sizeof(struct drm_crtc *), GFP_KERNEL);
@@ -9773,6 +10080,11 @@ static int intel_set_config_save_state(struct drm_device *dev,
* restored, not the drivers personal bookkeeping.
*/
count = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ config->save_crtc_enabled[count++] = crtc->enabled;
+ }
+
+ count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
config->save_encoder_crtcs[count++] = encoder->crtc;
}
@@ -9788,11 +10100,22 @@ static int intel_set_config_save_state(struct drm_device *dev,
static void intel_set_config_restore_state(struct drm_device *dev,
struct intel_set_config *config)
{
+ struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
int count;
count = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ crtc->new_enabled = config->save_crtc_enabled[count++];
+
+ if (crtc->new_enabled)
+ crtc->new_config = &crtc->config;
+ else
+ crtc->new_config = NULL;
+ }
+
+ count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->new_crtc =
to_intel_crtc(config->save_encoder_crtcs[count++]);
@@ -9834,13 +10157,13 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
* and then just flip_or_move it */
if (is_crtc_connector_off(set)) {
config->mode_changed = true;
- } else if (set->crtc->fb != set->fb) {
+ } else if (set->crtc->primary->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
- if (set->crtc->fb == NULL) {
+ if (set->crtc->primary->fb == NULL) {
struct intel_crtc *intel_crtc =
to_intel_crtc(set->crtc);
- if (intel_crtc->active && i915_fastboot) {
+ if (intel_crtc->active && i915.fastboot) {
DRM_DEBUG_KMS("crtc has no fb, will flip\n");
config->fb_changed = true;
} else {
@@ -9850,7 +10173,7 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
} else if (set->fb == NULL) {
config->mode_changed = true;
} else if (set->fb->pixel_format !=
- set->crtc->fb->pixel_format) {
+ set->crtc->primary->fb->pixel_format) {
config->mode_changed = true;
} else {
config->fb_changed = true;
@@ -9876,9 +10199,9 @@ intel_modeset_stage_output_state(struct drm_device *dev,
struct drm_mode_set *set,
struct intel_set_config *config)
{
- struct drm_crtc *new_crtc;
struct intel_connector *connector;
struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
int ro;
/* The upper layers ensure that we either disable a crtc or have a list
@@ -9921,6 +10244,8 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* Update crtc of enabled connectors. */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
+ struct drm_crtc *new_crtc;
+
if (!connector->new_encoder)
continue;
@@ -9971,9 +10296,58 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
/* Now we've also updated encoder->new_crtc for all encoders. */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ crtc->new_enabled = false;
+
+ list_for_each_entry(encoder,
+ &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->new_crtc == crtc) {
+ crtc->new_enabled = true;
+ break;
+ }
+ }
+
+ if (crtc->new_enabled != crtc->base.enabled) {
+ DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
+ crtc->new_enabled ? "en" : "dis");
+ config->mode_changed = true;
+ }
+
+ if (crtc->new_enabled)
+ crtc->new_config = &crtc->config;
+ else
+ crtc->new_config = NULL;
+ }
+
return 0;
}
+static void disable_crtc_nofb(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
+ pipe_name(crtc->pipe));
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ if (connector->new_encoder &&
+ connector->new_encoder->new_crtc == crtc)
+ connector->new_encoder = NULL;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ if (encoder->new_crtc == crtc)
+ encoder->new_crtc = NULL;
+ }
+
+ crtc->new_enabled = false;
+ crtc->new_config = NULL;
+}
+
static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
@@ -10012,7 +10386,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
save_set.mode = &set->crtc->mode;
save_set.x = set->crtc->x;
save_set.y = set->crtc->y;
- save_set.fb = set->crtc->fb;
+ save_set.fb = set->crtc->primary->fb;
/* Compute whether we need a full modeset, only an fb base update or no
* change at all. In the future we might also check whether only the
@@ -10040,7 +10414,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
* flipping, so increasing its cost here shouldn't be a big
* deal).
*/
- if (i915_fastboot && ret == 0)
+ if (i915.fastboot && ret == 0)
intel_modeset_check_state(set->crtc->dev);
}
@@ -10050,6 +10424,15 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
fail:
intel_set_config_restore_state(dev, config);
+ /*
+ * HACK: if the pipe was on, but we didn't have a framebuffer,
+ * force the pipe off to avoid oopsing in the modeset code
+ * due to fb==NULL. This should only happen during boot since
+ * we don't yet reconstruct the FB from the hardware state.
+ */
+ if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
+ disable_crtc_nofb(to_intel_crtc(save_set.crtc));
+
/* Try to restore the config */
if (config->mode_changed &&
intel_set_mode(save_set.crtc, save_set.mode,
@@ -10174,7 +10557,7 @@ static void intel_shared_dpll_init(struct drm_device *dev)
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int i;
@@ -10184,6 +10567,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+ if (IS_GEN2(dev)) {
+ intel_crtc->max_cursor_width = GEN2_CURSOR_WIDTH;
+ intel_crtc->max_cursor_height = GEN2_CURSOR_HEIGHT;
+ } else {
+ intel_crtc->max_cursor_width = CURSOR_WIDTH;
+ intel_crtc->max_cursor_height = CURSOR_HEIGHT;
+ }
+ dev->mode_config.cursor_width = intel_crtc->max_cursor_width;
+ dev->mode_config.cursor_height = intel_crtc->max_cursor_height;
+
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
for (i = 0; i < 256; i++) {
intel_crtc->lut_r[i] = i;
@@ -10255,12 +10648,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
list_for_each_entry(source_encoder,
&dev->mode_config.encoder_list, base.head) {
-
- if (encoder == source_encoder)
- index_mask |= (1 << entry);
-
- /* Intel hw has only one MUX where enocoders could be cloned. */
- if (encoder->cloneable && source_encoder->cloneable)
+ if (encoders_cloneable(encoder, source_encoder))
index_mask |= (1 << entry);
entry++;
@@ -10279,8 +10667,7 @@ static bool has_edp_a(struct drm_device *dev)
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_GEN5(dev) &&
- (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
+ if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
@@ -10433,18 +10820,13 @@ static void intel_setup_outputs(struct drm_device *dev)
drm_helper_move_panel_connectors_to_head(dev);
}
-void intel_framebuffer_fini(struct intel_framebuffer *fb)
-{
- drm_framebuffer_cleanup(&fb->base);
- WARN_ON(!fb->obj->framebuffer_references--);
- drm_gem_object_unreference_unlocked(&fb->obj->base);
-}
-
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- intel_framebuffer_fini(intel_fb);
+ drm_framebuffer_cleanup(fb);
+ WARN_ON(!intel_fb->obj->framebuffer_references--);
+ drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
kfree(intel_fb);
}
@@ -10463,12 +10845,12 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle,
};
-int intel_framebuffer_init(struct drm_device *dev,
- struct intel_framebuffer *intel_fb,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_i915_gem_object *obj)
+static int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *intel_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj)
{
- int aligned_height, tile_height;
+ int aligned_height;
int pitch_limit;
int ret;
@@ -10562,9 +10944,8 @@ int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
- tile_height = IS_GEN2(dev) ? 16 : 8;
- aligned_height = ALIGN(mode_cmd->height,
- obj->tiling_mode ? tile_height : 1);
+ aligned_height = intel_align_height(dev, mode_cmd->height,
+ obj->tiling_mode);
/* FIXME drm helper for size checks (especially planar formats)? */
if (obj->base.size < aligned_height * mode_cmd->pitches[0])
return -EINVAL;
@@ -10624,32 +11005,40 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_DDI(dev)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+ dev_priv->display.get_plane_config = ironlake_get_plane_config;
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = haswell_crtc_off;
- dev_priv->display.update_plane = ironlake_update_plane;
+ dev_priv->display.update_primary_plane =
+ ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+ dev_priv->display.get_plane_config = ironlake_get_plane_config;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
- dev_priv->display.update_plane = ironlake_update_plane;
+ dev_priv->display.update_primary_plane =
+ ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_plane_config = i9xx_get_plane_config;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
- dev_priv->display.update_plane = i9xx_update_plane;
+ dev_priv->display.update_primary_plane =
+ i9xx_update_primary_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_plane_config = i9xx_get_plane_config;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
- dev_priv->display.update_plane = i9xx_update_plane;
+ dev_priv->display.update_primary_plane =
+ i9xx_update_primary_plane;
}
/* Returns the core display clock speed */
@@ -10839,6 +11228,9 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Acer Aspire 5336 */
+ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -10869,6 +11261,7 @@ static void i915_disable_vga(struct drm_device *dev)
u8 sr1;
u32 vga_reg = i915_vgacntrl_reg(dev);
+ /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(SR01, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
@@ -10901,7 +11294,9 @@ void intel_modeset_suspend_hw(struct drm_device *dev)
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i, j, ret;
+ int sprite, ret;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
drm_mode_config_init(dev);
@@ -10938,13 +11333,13 @@ void intel_modeset_init(struct drm_device *dev)
INTEL_INFO(dev)->num_pipes,
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
- for_each_pipe(i) {
- intel_crtc_init(dev, i);
- for (j = 0; j < dev_priv->num_plane; j++) {
- ret = intel_plane_init(dev, i, j);
+ for_each_pipe(pipe) {
+ intel_crtc_init(dev, pipe);
+ for_each_sprite(pipe, sprite) {
+ ret = intel_plane_init(dev, pipe, sprite);
if (ret)
DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
- pipe_name(i), sprite_name(i, j), ret);
+ pipe_name(pipe), sprite_name(pipe, sprite), ret);
}
}
@@ -10960,6 +11355,33 @@ void intel_modeset_init(struct drm_device *dev)
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
+
+ mutex_lock(&dev->mode_config.mutex);
+ intel_modeset_setup_hw_state(dev, false);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ if (!crtc->active)
+ continue;
+
+ /*
+ * Note that reserving the BIOS fb up front prevents us
+ * from stuffing other stolen allocations like the ring
+ * on top. This prevents some ugliness at boot time, and
+ * can even allow for smooth boot transitions if the BIOS
+ * fb is large enough for the active pipe configuration.
+ */
+ if (dev_priv->display.get_plane_config) {
+ dev_priv->display.get_plane_config(crtc,
+ &crtc->plane_config);
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_plane_obj(crtc, &crtc->plane_config);
+ }
+ }
}
static void
@@ -11097,6 +11519,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
encoder->base.crtc = NULL;
}
}
+ if (crtc->active) {
+ /*
+ * We start out with underrun reporting disabled to avoid races.
+ * For correct bookkeeping mark this on active crtcs.
+ *
+ * No protection against concurrent access is required - at
+ * worst a fifo underrun happens which also sets this to false.
+ */
+ crtc->cpu_fifo_underrun_disabled = true;
+ crtc->pch_fifo_underrun_disabled = true;
+ }
}
static void intel_sanitize_encoder(struct intel_encoder *encoder)
@@ -11142,11 +11575,21 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
-void i915_redisable_vga(struct drm_device *dev)
+void i915_redisable_vga_power_on(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 vga_reg = i915_vgacntrl_reg(dev);
+ if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ i915_disable_vga(dev);
+ }
+}
+
+void i915_redisable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
@@ -11154,14 +11597,10 @@ void i915_redisable_vga(struct drm_device *dev)
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
- if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
- (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
+ if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
- if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
- DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- i915_disable_vga(dev);
- }
+ i915_redisable_vga_power_on(dev);
}
static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -11265,9 +11704,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
- if (crtc->active && i915_fastboot) {
- intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
-
+ if (crtc->active && i915.fastboot) {
+ intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
crtc->base.base.id);
drm_mode_debug_printmodeline(&crtc->base.mode);
@@ -11313,7 +11751,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
dev_priv->pipe_to_crtc_mapping[pipe];
__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
- crtc->fb);
+ crtc->primary->fb);
}
} else {
intel_modeset_update_staged_output_state(dev);
@@ -11324,14 +11762,44 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
void intel_modeset_gem_init(struct drm_device *dev)
{
+ struct drm_crtc *c;
+ struct intel_framebuffer *fb;
+
+ mutex_lock(&dev->struct_mutex);
+ intel_init_gt_powersave(dev);
+ mutex_unlock(&dev->struct_mutex);
+
intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_config_reset(dev);
- intel_modeset_setup_hw_state(dev, false);
- mutex_unlock(&dev->mode_config.mutex);
+ /*
+ * Make sure any fbs we allocated at startup are properly
+ * pinned & fenced. When we do the allocation it's too early
+ * for this.
+ */
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ if (!c->primary->fb)
+ continue;
+
+ fb = to_intel_framebuffer(c->primary->fb);
+ if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
+ DRM_ERROR("failed to pin boot fb on pipe %d\n",
+ to_intel_crtc(c)->pipe);
+ drm_framebuffer_unreference(c->primary->fb);
+ c->primary->fb = NULL;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+void intel_connector_unregister(struct intel_connector *intel_connector)
+{
+ struct drm_connector *connector = &intel_connector->base;
+
+ intel_panel_destroy_backlight(connector);
+ drm_sysfs_connector_remove(connector);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -11359,7 +11827,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
- if (!crtc->fb)
+ if (!crtc->primary->fb)
continue;
intel_increase_pllclock(crtc);
@@ -11378,13 +11846,19 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* destroy the backlight and sysfs files before encoders/connectors */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- intel_panel_destroy_backlight(connector);
- drm_sysfs_connector_remove(connector);
+ struct intel_connector *intel_connector;
+
+ intel_connector = to_intel_connector(connector);
+ intel_connector->unregister(intel_connector);
}
drm_mode_config_cleanup(dev);
intel_cleanup_overlay(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_cleanup_gt_powersave(dev);
+ mutex_unlock(&dev->struct_mutex);
}
/*
@@ -11412,12 +11886,24 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
- pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
+ if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
+ DRM_ERROR("failed to read control word\n");
+ return -EIO;
+ }
+
+ if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
+ return 0;
+
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
- pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
+
+ if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
+ DRM_ERROR("failed to write control word\n");
+ return -EIO;
+ }
+
return 0;
}
@@ -11467,7 +11953,7 @@ struct intel_display_error_state {
struct intel_display_error_state *
intel_display_capture_error_state(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
int transcoders[] = {
TRANSCODER_A,
@@ -11489,7 +11975,8 @@ intel_display_capture_error_state(struct drm_device *dev)
for_each_pipe(i) {
error->pipe[i].power_domain_on =
- intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
+ intel_display_power_enabled_sw(dev_priv,
+ POWER_DOMAIN_PIPE(i));
if (!error->pipe[i].power_domain_on)
continue;
@@ -11527,7 +12014,7 @@ intel_display_capture_error_state(struct drm_device *dev)
enum transcoder cpu_transcoder = transcoders[i];
error->transcoder[i].power_domain_on =
- intel_display_power_enabled_sw(dev,
+ intel_display_power_enabled_sw(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!error->transcoder[i].power_domain_on)
continue;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2688f6d64bb9..a0dad1a2f819 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -91,18 +91,25 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
}
static void intel_dp_link_down(struct intel_dp *intel_dp);
+static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
+static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+ struct drm_device *dev = intel_dp->attached_connector->base.dev;
switch (max_link_bw) {
case DP_LINK_BW_1_62:
case DP_LINK_BW_2_7:
break;
case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
- max_link_bw = DP_LINK_BW_2_7;
+ if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) &&
+ intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
+ max_link_bw = DP_LINK_BW_5_4;
+ else
+ max_link_bw = DP_LINK_BW_2_7;
break;
default:
WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
@@ -294,7 +301,7 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
}
-static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
+static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -302,12 +309,13 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
-static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
+static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
+ return !dev_priv->pm.suspended &&
+ (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
}
static void
@@ -319,7 +327,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
if (!is_edp(intel_dp))
return;
- if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
+ if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
WARN(1, "eDP powered off while attempting aux channel communication.\n");
DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
I915_READ(_pp_stat_reg(intel_dp)),
@@ -351,31 +359,46 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
return status;
}
-static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
- int index)
+static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- /* The clock divider is based off the hrawclk,
- * and would like to run at 2MHz. So, take the
- * hrawclk value and divide by 2 and use that
- *
- * Note that PCH attached eDP panels should use a 125MHz input
- * clock divider.
+ /*
+ * The clock divider is based off the hrawclk, and would like to run at
+ * 2MHz. So, take the hrawclk value and divide by 2 and use that
*/
- if (IS_VALLEYVIEW(dev)) {
- return index ? 0 : 100;
- } else if (intel_dig_port->port == PORT_A) {
- if (index)
- return 0;
- if (HAS_DDI(dev))
- return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
- else if (IS_GEN6(dev) || IS_GEN7(dev))
+ return index ? 0 : intel_hrawclk(dev) / 2;
+}
+
+static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+
+ if (index)
+ return 0;
+
+ if (intel_dig_port->port == PORT_A) {
+ if (IS_GEN6(dev) || IS_GEN7(dev))
return 200; /* SNB & IVB eDP input clock at 400Mhz */
else
return 225; /* eDP input clock at 450Mhz */
+ } else {
+ return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+ }
+}
+
+static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (intel_dig_port->port == PORT_A) {
+ if (index)
+ return 0;
+ return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
switch (index) {
@@ -383,13 +406,46 @@ static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
case 1: return 72;
default: return 0;
}
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else {
return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
- } else {
- return index ? 0 :intel_hrawclk(dev) / 2;
}
}
+static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ return index ? 0 : 100;
+}
+
+static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
+ bool has_aux_irq,
+ int send_bytes,
+ uint32_t aux_clock_divider)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ uint32_t precharge, timeout;
+
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
+ if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
+ timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+ else
+ timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
+ return DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ timeout |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+}
+
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
@@ -403,9 +459,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
- int try, precharge, clock = 0;
+ int try, clock = 0;
bool has_aux_irq = HAS_AUX_IRQ(dev);
- uint32_t timeout;
+ bool vdd;
+
+ vdd = _edp_panel_vdd_on(intel_dp);
/* dp aux is extremely sensitive to irq latency, hence request the
* lowest possible wakeup latency and so prevent the cpu from going into
@@ -415,16 +473,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
intel_dp_check_edp(intel_dp);
- if (IS_GEN6(dev))
- precharge = 3;
- else
- precharge = 5;
-
- if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
- timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
- else
- timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
-
intel_aux_display_runtime_get(dev_priv);
/* Try to wait for any previous AUX channel activity */
@@ -448,7 +496,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
goto out;
}
- while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
+ while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
+ u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+ has_aux_irq,
+ send_bytes,
+ aux_clock_divider);
+
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
@@ -457,16 +510,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
pack_aux(send + i, send_bytes - i));
/* Send the command and wait for it to complete */
- I915_WRITE(ch_ctl,
- DP_AUX_CH_CTL_SEND_BUSY |
- (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
- timeout |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
+ I915_WRITE(ch_ctl, send_ctl);
status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
@@ -525,246 +569,140 @@ out:
pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
intel_aux_display_runtime_put(dev_priv);
+ if (vdd)
+ edp_panel_vdd_off(intel_dp, false);
+
return ret;
}
-/* Write data to the aux channel in native mode */
-static int
-intel_dp_aux_native_write(struct intel_dp *intel_dp,
- uint16_t address, uint8_t *send, int send_bytes)
+#define HEADER_SIZE 4
+static ssize_t
+intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
+ struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
+ uint8_t txbuf[20], rxbuf[20];
+ size_t txsize, rxsize;
int ret;
- uint8_t msg[20];
- int msg_bytes;
- uint8_t ack;
- int retry;
- if (WARN_ON(send_bytes > 16))
- return -E2BIG;
+ txbuf[0] = msg->request << 4;
+ txbuf[1] = msg->address >> 8;
+ txbuf[2] = msg->address & 0xff;
+ txbuf[3] = msg->size - 1;
- intel_dp_check_edp(intel_dp);
- msg[0] = DP_AUX_NATIVE_WRITE << 4;
- msg[1] = address >> 8;
- msg[2] = address & 0xff;
- msg[3] = send_bytes - 1;
- memcpy(&msg[4], send, send_bytes);
- msg_bytes = send_bytes + 4;
- for (retry = 0; retry < 7; retry++) {
- ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
- if (ret < 0)
- return ret;
- ack >>= 4;
- if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
- return send_bytes;
- else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
- usleep_range(400, 500);
- else
- return -EIO;
- }
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ txsize = HEADER_SIZE + msg->size;
+ rxsize = 1;
- DRM_ERROR("too many retries, giving up\n");
- return -EIO;
-}
+ if (WARN_ON(txsize > 20))
+ return -E2BIG;
-/* Write a single byte to the aux channel in native mode */
-static int
-intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
- uint16_t address, uint8_t byte)
-{
- return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
-}
+ memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
-/* read bytes from a native aux channel */
-static int
-intel_dp_aux_native_read(struct intel_dp *intel_dp,
- uint16_t address, uint8_t *recv, int recv_bytes)
-{
- uint8_t msg[4];
- int msg_bytes;
- uint8_t reply[20];
- int reply_bytes;
- uint8_t ack;
- int ret;
- int retry;
+ ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+
+ /* Return payload size. */
+ ret = msg->size;
+ }
+ break;
- if (WARN_ON(recv_bytes > 19))
- return -E2BIG;
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ txsize = HEADER_SIZE;
+ rxsize = msg->size + 1;
- intel_dp_check_edp(intel_dp);
- msg[0] = DP_AUX_NATIVE_READ << 4;
- msg[1] = address >> 8;
- msg[2] = address & 0xff;
- msg[3] = recv_bytes - 1;
-
- msg_bytes = 4;
- reply_bytes = recv_bytes + 1;
-
- for (retry = 0; retry < 7; retry++) {
- ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
- reply, reply_bytes);
- if (ret == 0)
- return -EPROTO;
- if (ret < 0)
- return ret;
- ack = reply[0] >> 4;
- if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
- memcpy(recv, reply + 1, ret - 1);
- return ret - 1;
+ if (WARN_ON(rxsize > 20))
+ return -E2BIG;
+
+ ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+ /*
+ * Assume happy day, and copy the data. The caller is
+ * expected to check msg->reply before touching it.
+ *
+ * Return payload size.
+ */
+ ret--;
+ memcpy(msg->buffer, rxbuf + 1, ret);
}
- else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
- usleep_range(400, 500);
- else
- return -EIO;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
}
- DRM_ERROR("too many retries, giving up\n");
- return -EIO;
+ return ret;
}
-static int
-intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
- uint8_t write_byte, uint8_t *read_byte)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- struct intel_dp *intel_dp = container_of(adapter,
- struct intel_dp,
- adapter);
- uint16_t address = algo_data->address;
- uint8_t msg[5];
- uint8_t reply[2];
- unsigned retry;
- int msg_bytes;
- int reply_bytes;
+static void
+intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ enum port port = intel_dig_port->port;
+ const char *name = NULL;
int ret;
- ironlake_edp_panel_vdd_on(intel_dp);
- intel_dp_check_edp(intel_dp);
- /* Set up the command byte */
- if (mode & MODE_I2C_READ)
- msg[0] = DP_AUX_I2C_READ << 4;
- else
- msg[0] = DP_AUX_I2C_WRITE << 4;
-
- if (!(mode & MODE_I2C_STOP))
- msg[0] |= DP_AUX_I2C_MOT << 4;
-
- msg[1] = address >> 8;
- msg[2] = address;
-
- switch (mode) {
- case MODE_I2C_WRITE:
- msg[3] = 0;
- msg[4] = write_byte;
- msg_bytes = 5;
- reply_bytes = 1;
+ switch (port) {
+ case PORT_A:
+ intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
+ name = "DPDDC-A";
+ break;
+ case PORT_B:
+ intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
+ name = "DPDDC-B";
+ break;
+ case PORT_C:
+ intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
+ name = "DPDDC-C";
break;
- case MODE_I2C_READ:
- msg[3] = 0;
- msg_bytes = 4;
- reply_bytes = 2;
+ case PORT_D:
+ intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
+ name = "DPDDC-D";
break;
default:
- msg_bytes = 3;
- reply_bytes = 1;
- break;
+ BUG();
}
- /*
- * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
- * required to retry at least seven times upon receiving AUX_DEFER
- * before giving up the AUX transaction.
- */
- for (retry = 0; retry < 7; retry++) {
- ret = intel_dp_aux_ch(intel_dp,
- msg, msg_bytes,
- reply, reply_bytes);
- if (ret < 0) {
- DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
- goto out;
- }
+ if (!HAS_DDI(dev))
+ intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
- switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
- case DP_AUX_NATIVE_REPLY_ACK:
- /* I2C-over-AUX Reply field is only valid
- * when paired with AUX ACK.
- */
- break;
- case DP_AUX_NATIVE_REPLY_NACK:
- DRM_DEBUG_KMS("aux_ch native nack\n");
- ret = -EREMOTEIO;
- goto out;
- case DP_AUX_NATIVE_REPLY_DEFER:
- /*
- * For now, just give more slack to branch devices. We
- * could check the DPCD for I2C bit rate capabilities,
- * and if available, adjust the interval. We could also
- * be more careful with DP-to-Legacy adapters where a
- * long legacy cable may force very low I2C bit rates.
- */
- if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT)
- usleep_range(500, 600);
- else
- usleep_range(300, 400);
- continue;
- default:
- DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
- reply[0]);
- ret = -EREMOTEIO;
- goto out;
- }
+ intel_dp->aux.name = name;
+ intel_dp->aux.dev = dev->dev;
+ intel_dp->aux.transfer = intel_dp_aux_transfer;
- switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
- case DP_AUX_I2C_REPLY_ACK:
- if (mode == MODE_I2C_READ) {
- *read_byte = reply[1];
- }
- ret = reply_bytes - 1;
- goto out;
- case DP_AUX_I2C_REPLY_NACK:
- DRM_DEBUG_KMS("aux_i2c nack\n");
- ret = -EREMOTEIO;
- goto out;
- case DP_AUX_I2C_REPLY_DEFER:
- DRM_DEBUG_KMS("aux_i2c defer\n");
- udelay(100);
- break;
- default:
- DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
- ret = -EREMOTEIO;
- goto out;
- }
- }
+ DRM_DEBUG_KMS("registering %s bus for %s\n", name,
+ connector->base.kdev->kobj.name);
- DRM_ERROR("too many retries, giving up\n");
- ret = -EREMOTEIO;
+ ret = drm_dp_aux_register_i2c_bus(&intel_dp->aux);
+ if (ret < 0) {
+ DRM_ERROR("drm_dp_aux_register_i2c_bus() for %s failed (%d)\n",
+ name, ret);
+ return;
+ }
-out:
- ironlake_edp_panel_vdd_off(intel_dp, false);
- return ret;
+ ret = sysfs_create_link(&connector->base.kdev->kobj,
+ &intel_dp->aux.ddc.dev.kobj,
+ intel_dp->aux.ddc.dev.kobj.name);
+ if (ret < 0) {
+ DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
+ drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
+ }
}
-static int
-intel_dp_i2c_init(struct intel_dp *intel_dp,
- struct intel_connector *intel_connector, const char *name)
+static void
+intel_dp_connector_unregister(struct intel_connector *intel_connector)
{
- int ret;
-
- DRM_DEBUG_KMS("i2c_init %s\n", name);
- intel_dp->algo.running = false;
- intel_dp->algo.address = 0;
- intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
-
- memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
- intel_dp->adapter.owner = THIS_MODULE;
- intel_dp->adapter.class = I2C_CLASS_DDC;
- strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
- intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
- intel_dp->adapter.algo_data = &intel_dp->algo;
- intel_dp->adapter.dev.parent = intel_connector->base.kdev;
+ struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
- ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
- return ret;
+ sysfs_remove_link(&intel_connector->base.kdev->kobj,
+ intel_dp->aux.ddc.dev.kobj.name);
+ intel_connector_unregister(intel_connector);
}
static void
@@ -812,9 +750,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
- int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ /* Conveniently, the link BW constants become indices with a shift...*/
+ int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
int bpp, mode_rate;
- static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+ static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
int link_avail, link_clock;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
@@ -855,8 +794,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
- for (clock = 0; clock <= max_clock; clock++) {
- for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = 0; clock <= max_clock; clock++) {
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
@@ -1015,16 +954,16 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
ironlake_set_pll_cpu_edp(intel_dp);
}
-#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
-#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
-#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
-#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
+#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
-#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
-#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
-static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
+static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
@@ -1049,24 +988,41 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Wait complete\n");
}
-static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
+static void wait_panel_on(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power on\n");
- ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+ wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
-static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+static void wait_panel_off(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power off time\n");
- ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+ wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
-static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
+static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power cycle\n");
- ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+
+ /* When we disable the VDD override bit last we have to do the manual
+ * wait. */
+ wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
+ intel_dp->panel_power_cycle_delay);
+
+ wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+static void wait_backlight_on(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
+ intel_dp->backlight_on_delay);
}
+static void edp_wait_backlight_off(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
+ intel_dp->backlight_off_delay);
+}
/* Read the current pp_control value, unlocking the register if it
* is locked
@@ -1084,30 +1040,28 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
return control;
}
-void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
+ bool need_to_disable = !intel_dp->want_panel_vdd;
if (!is_edp(intel_dp))
- return;
-
- WARN(intel_dp->want_panel_vdd,
- "eDP VDD already requested on\n");
+ return false;
intel_dp->want_panel_vdd = true;
- if (ironlake_edp_have_panel_vdd(intel_dp))
- return;
+ if (edp_have_panel_vdd(intel_dp))
+ return need_to_disable;
intel_runtime_pm_get(dev_priv);
DRM_DEBUG_KMS("Turning eDP VDD on\n");
- if (!ironlake_edp_have_panel_power(intel_dp))
- ironlake_wait_panel_power_cycle(intel_dp);
+ if (!edp_have_panel_power(intel_dp))
+ wait_panel_power_cycle(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
@@ -1122,13 +1076,24 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
/*
* If the panel wasn't on, delay before accessing aux channel
*/
- if (!ironlake_edp_have_panel_power(intel_dp)) {
+ if (!edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP was not running\n");
msleep(intel_dp->panel_power_up_delay);
}
+
+ return need_to_disable;
+}
+
+void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+ if (is_edp(intel_dp)) {
+ bool vdd = _edp_panel_vdd_on(intel_dp);
+
+ WARN(!vdd, "eDP VDD already requested on\n");
+ }
}
-static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
+static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1137,7 +1102,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+ if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
DRM_DEBUG_KMS("Turning eDP VDD off\n");
pp = ironlake_get_pp_control(intel_dp);
@@ -1154,24 +1119,24 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
if ((pp & POWER_TARGET_ON) == 0)
- msleep(intel_dp->panel_power_cycle_delay);
+ intel_dp->last_power_cycle = jiffies;
intel_runtime_pm_put(dev_priv);
}
}
-static void ironlake_panel_vdd_work(struct work_struct *__work)
+static void edp_panel_vdd_work(struct work_struct *__work)
{
struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
mutex_lock(&dev->mode_config.mutex);
- ironlake_panel_vdd_off_sync(intel_dp);
+ edp_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
-void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
return;
@@ -1181,7 +1146,7 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
intel_dp->want_panel_vdd = false;
if (sync) {
- ironlake_panel_vdd_off_sync(intel_dp);
+ edp_panel_vdd_off_sync(intel_dp);
} else {
/*
* Queue the timer to fire a long
@@ -1193,7 +1158,7 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
}
}
-void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+void intel_edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1205,12 +1170,12 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power on\n");
- if (ironlake_edp_have_panel_power(intel_dp)) {
+ if (edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP power already on\n");
return;
}
- ironlake_wait_panel_power_cycle(intel_dp);
+ wait_panel_power_cycle(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
@@ -1228,7 +1193,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- ironlake_wait_panel_on(intel_dp);
+ wait_panel_on(intel_dp);
+ intel_dp->last_power_on = jiffies;
if (IS_GEN5(dev)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1237,7 +1203,7 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
}
-void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+void intel_edp_panel_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1249,27 +1215,31 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
+ edp_wait_backlight_off(intel_dp);
+
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
- pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+ EDP_BLC_ENABLE);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ intel_dp->want_panel_vdd = false;
+
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- intel_dp->want_panel_vdd = false;
-
- ironlake_wait_panel_off(intel_dp);
+ intel_dp->last_power_cycle = jiffies;
+ wait_panel_off(intel_dp);
/* We got a reference when we enabled the VDD. */
intel_runtime_pm_put(dev_priv);
}
-void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+void intel_edp_backlight_on(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -1287,7 +1257,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
* link. So delay a bit to make sure the image is solid before
* allowing it to appear.
*/
- msleep(intel_dp->backlight_on_delay);
+ wait_backlight_on(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
@@ -1299,7 +1269,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
intel_panel_enable_backlight(intel_dp->attached_connector);
}
-void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+void intel_edp_backlight_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1319,7 +1289,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- msleep(intel_dp->backlight_off_delay);
+ intel_dp->last_backlight_off = jiffies;
}
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1383,8 +1353,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
return;
if (mode != DRM_MODE_DPMS_ON) {
- ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
- DP_SET_POWER_D3);
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D3);
if (ret != 1)
DRM_DEBUG_DRIVER("failed to write sink power state\n");
} else {
@@ -1393,9 +1363,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
* time to wake up.
*/
for (i = 0; i < 3; i++) {
- ret = intel_dp_aux_native_write_1(intel_dp,
- DP_SET_POWER,
- DP_SET_POWER_D0);
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D0);
if (ret == 1)
break;
msleep(1);
@@ -1410,7 +1379,14 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 tmp = I915_READ(intel_dp->output_reg);
+ enum intel_display_power_domain power_domain;
+ u32 tmp;
+
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
+ tmp = I915_READ(intel_dp->output_reg);
if (!(tmp & DP_PORT_EN))
return false;
@@ -1604,19 +1580,19 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
+ uint32_t aux_clock_divider;
int precharge = 0x3;
int msg_size = 5; /* Header(4) + Message(1) */
+ aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
/* Enable PSR in sink */
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
- intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
- DP_PSR_ENABLE &
- ~DP_PSR_MAIN_LINK_ACTIVE);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
else
- intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
- DP_PSR_ENABLE |
- DP_PSR_MAIN_LINK_ACTIVE);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
/* Setup AUX registers */
I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
@@ -1659,7 +1635,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
dev_priv->psr.source_ok = false;
@@ -1675,7 +1651,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- if (!i915_enable_psr) {
+ if (!i915.enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
return false;
}
@@ -1692,7 +1668,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- obj = to_intel_framebuffer(crtc->fb)->obj;
+ obj = to_intel_framebuffer(crtc->primary->fb)->obj;
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
@@ -1791,10 +1767,10 @@ static void intel_disable_dp(struct intel_encoder *encoder)
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
- ironlake_edp_panel_vdd_on(intel_dp);
- ironlake_edp_backlight_off(intel_dp);
+ intel_edp_panel_vdd_on(intel_dp);
+ intel_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
- ironlake_edp_panel_off(intel_dp);
+ intel_edp_panel_off(intel_dp);
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
@@ -1824,11 +1800,11 @@ static void intel_enable_dp(struct intel_encoder *encoder)
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
- ironlake_edp_panel_vdd_on(intel_dp);
+ intel_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
+ intel_edp_panel_on(intel_dp);
+ edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
@@ -1838,14 +1814,14 @@ static void g4x_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_enable_dp(encoder);
- ironlake_edp_backlight_on(intel_dp);
+ intel_edp_backlight_on(intel_dp);
}
static void vlv_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- ironlake_edp_backlight_on(intel_dp);
+ intel_edp_backlight_on(intel_dp);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
@@ -1927,26 +1903,25 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
+ *
+ * Sinks are *supposed* to come up within 1ms from an off state, but we're also
+ * supposed to retry 3 times per the spec.
*/
-static bool
-intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
- uint8_t *recv, int recv_bytes)
+static ssize_t
+intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size)
{
- int ret, i;
+ ssize_t ret;
+ int i;
- /*
- * Sinks are *supposed* to come up within 1ms from an off state,
- * but we're also supposed to retry 3 times per the spec.
- */
for (i = 0; i < 3; i++) {
- ret = intel_dp_aux_native_read(intel_dp, address, recv,
- recv_bytes);
- if (ret == recv_bytes)
- return true;
+ ret = drm_dp_dpcd_read(aux, offset, buffer, size);
+ if (ret == size)
+ return ret;
msleep(1);
}
- return false;
+ return ret;
}
/*
@@ -1956,10 +1931,10 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
static bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
- return intel_dp_aux_native_read_retry(intel_dp,
- DP_LANE0_1_STATUS,
- link_status,
- DP_LINK_STATUS_SIZE);
+ return intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_LANE0_1_STATUS,
+ link_status,
+ DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
/*
@@ -2473,8 +2448,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
len = intel_dp->lane_count + 1;
}
- ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
- buf, len);
+ ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
+ buf, len);
return ret == len;
}
@@ -2503,9 +2478,8 @@ intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
I915_WRITE(intel_dp->output_reg, *DP);
POSTING_READ(intel_dp->output_reg);
- ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
- intel_dp->train_set,
- intel_dp->lane_count);
+ ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
+ intel_dp->train_set, intel_dp->lane_count);
return ret == intel_dp->lane_count;
}
@@ -2561,11 +2535,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
link_config[1] = intel_dp->lane_count;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
- intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
+ drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
DP |= DP_PORT_EN;
@@ -2638,10 +2612,15 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
+ uint32_t training_pattern = DP_TRAINING_PATTERN_2;
+
+ /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
+ if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
+ training_pattern = DP_TRAINING_PATTERN_3;
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp, &DP,
- DP_TRAINING_PATTERN_2 |
+ training_pattern |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to start channel equalization\n");
return;
@@ -2668,7 +2647,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
- DP_TRAINING_PATTERN_2 |
+ training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
cr_tries++;
continue;
@@ -2684,7 +2663,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
- DP_TRAINING_PATTERN_2 |
+ training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
tries = 0;
cr_tries++;
@@ -2803,8 +2782,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
- if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
- sizeof(intel_dp->dpcd)) == 0)
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
+ sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */
hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
@@ -2817,15 +2796,23 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
/* Check if the panel supports PSR */
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
if (is_edp(intel_dp)) {
- intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
- intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
+ intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
+ intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
dev_priv->psr.sink_support = true;
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
}
}
+ /* Training Pattern 3 support */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
+ intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
+ intel_dp->use_tps3 = true;
+ DRM_DEBUG_KMS("Displayport TPS3 supported");
+ } else
+ intel_dp->use_tps3 = false;
+
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
@@ -2833,9 +2820,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
return true; /* no per-port downstream info */
- if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
- intel_dp->downstream_ports,
- DP_MAX_DOWNSTREAM_PORTS) == 0)
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
+ intel_dp->downstream_ports,
+ DP_MAX_DOWNSTREAM_PORTS) < 0)
return false; /* downstream port status fetch failed */
return true;
@@ -2849,38 +2836,61 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
- ironlake_edp_panel_vdd_on(intel_dp);
+ intel_edp_panel_vdd_on(intel_dp);
- if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
- if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
- ironlake_edp_panel_vdd_off(intel_dp, false);
+ edp_panel_vdd_off(intel_dp, false);
}
-static bool
-intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
- int ret;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
+ u8 buf[1];
- ret = intel_dp_aux_native_read_retry(intel_dp,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector, 1);
- if (!ret)
- return false;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
+ return -EAGAIN;
- return true;
+ if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
+ return -ENOTTY;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+ DP_TEST_SINK_START) < 0)
+ return -EAGAIN;
+
+ /* Wait 2 vblanks to be sure we will have the correct CRC value */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
+ return -EAGAIN;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
+ return 0;
+}
+
+static bool
+intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+ return intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector, 1) == 1;
}
static void
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
/* NAK by default */
- intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
}
/*
@@ -2919,9 +2929,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
/* Clear interrupt source */
- intel_dp_aux_native_write_1(intel_dp,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector);
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector);
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
intel_dp_handle_test_request(intel_dp);
@@ -2956,15 +2966,17 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
uint8_t reg;
- if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
- &reg, 1))
+
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
+ &reg, 1) < 0)
return connector_status_unknown;
+
return DP_GET_SINK_COUNT(reg) ? connector_status_connected
: connector_status_disconnected;
}
/* If no HPD, poke DDC gently */
- if (drm_probe_ddc(&intel_dp->adapter))
+ if (drm_probe_ddc(&intel_dp->aux.ddc))
return connector_status_connected;
/* Well we tried, say unknown for unreliable port types */
@@ -3106,10 +3118,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum drm_connector_status status;
+ enum intel_display_power_domain power_domain;
struct edid *edid = NULL;
intel_runtime_pm_get(dev_priv);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
@@ -3128,7 +3144,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
- edid = intel_dp_get_edid(connector, &intel_dp->adapter);
+ edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
intel_dp->has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
@@ -3140,21 +3156,32 @@ intel_dp_detect(struct drm_connector *connector, bool force)
status = connector_status_connected;
out:
+ intel_display_power_put(dev_priv, power_domain);
+
intel_runtime_pm_put(dev_priv);
+
return status;
}
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc);
+ intel_display_power_put(dev_priv, power_domain);
if (ret)
return ret;
@@ -3175,15 +3202,25 @@ static bool
intel_dp_detect_audio(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
struct edid *edid;
bool has_audio = false;
- edid = intel_dp_get_edid(connector, &intel_dp->adapter);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
}
+ intel_display_power_put(dev_priv, power_domain);
+
return has_audio;
}
@@ -3298,12 +3335,12 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- i2c_del_adapter(&intel_dp->adapter);
+ drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
mutex_lock(&dev->mode_config.mutex);
- ironlake_panel_vdd_off_sync(intel_dp);
+ edp_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
kfree(intel_dig_port);
@@ -3402,6 +3439,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
}
}
+static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
+{
+ intel_dp->last_power_cycle = jiffies;
+ intel_dp->last_power_on = jiffies;
+ intel_dp->last_backlight_off = jiffies;
+}
+
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp,
@@ -3524,10 +3568,17 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
}
- /* And finally store the new values in the power sequencer. */
+ /*
+ * And finally store the new values in the power sequencer. The
+ * backlight delays are set to 1 because we do manual waits on them. For
+ * T8, even BSpec recommends doing it. For T9, if we don't do this,
+ * we'll end up waiting for the backlight off delay twice: once when we
+ * do the manual sleep, and once when we disable the panel and wait for
+ * the PP_STATUS bit to become zero.
+ */
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
- (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
- pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+ (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
+ pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
@@ -3562,14 +3613,14 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
}
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
- struct intel_connector *intel_connector)
+ struct intel_connector *intel_connector,
+ struct edp_power_seq *power_seq)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL;
- struct edp_power_seq power_seq = { 0 };
bool has_dpcd;
struct drm_display_mode *scan;
struct edid *edid;
@@ -3577,12 +3628,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!is_edp(intel_dp))
return true;
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
-
/* Cache DPCD and EDID for edp. */
- ironlake_edp_panel_vdd_on(intel_dp);
+ intel_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, false);
+ edp_panel_vdd_off(intel_dp, false);
if (has_dpcd) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -3596,10 +3645,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
/* We now know it's not a ghost, init power sequence regs. */
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
- edid = drm_get_edid(connector, &intel_dp->adapter);
+ mutex_lock(&dev->mode_config.mutex);
+ edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
@@ -3629,8 +3678,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (fixed_mode)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
+ mutex_unlock(&dev->mode_config.mutex);
- intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector);
return true;
@@ -3646,8 +3696,20 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
- const char *name = NULL;
- int type, error;
+ struct edp_power_seq power_seq = { 0 };
+ int type;
+
+ /* intel_dp vfuncs */
+ if (IS_VALLEYVIEW(dev))
+ intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+ else if (HAS_PCH_SPLIT(dev))
+ intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+ else
+ intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
+
+ intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
@@ -3677,7 +3739,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0;
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
- ironlake_panel_vdd_work);
+ edp_panel_vdd_work);
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
@@ -3686,61 +3748,41 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->unregister = intel_dp_connector_unregister;
- intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
- if (HAS_DDI(dev)) {
- switch (intel_dig_port->port) {
- case PORT_A:
- intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
- break;
- case PORT_B:
- intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
- break;
- case PORT_C:
- intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
- break;
- case PORT_D:
- intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
- break;
- default:
- BUG();
- }
- }
-
- /* Set up the DDC bus. */
+ /* Set up the hotplug pin. */
switch (port) {
case PORT_A:
intel_encoder->hpd_pin = HPD_PORT_A;
- name = "DPDDC-A";
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
- name = "DPDDC-B";
break;
case PORT_C:
intel_encoder->hpd_pin = HPD_PORT_C;
- name = "DPDDC-C";
break;
case PORT_D:
intel_encoder->hpd_pin = HPD_PORT_D;
- name = "DPDDC-D";
break;
default:
BUG();
}
- error = intel_dp_i2c_init(intel_dp, intel_connector, name);
- WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
- error, port_name(port));
+ if (is_edp(intel_dp)) {
+ intel_dp_init_panel_power_timestamps(intel_dp);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ }
+
+ intel_dp_aux_init(intel_dp, intel_connector);
intel_dp->psr_setup_done = false;
- if (!intel_edp_init_connector(intel_dp, intel_connector)) {
- i2c_del_adapter(&intel_dp->adapter);
+ if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
+ drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
mutex_lock(&dev->mode_config.mutex);
- ironlake_panel_vdd_off_sync(intel_dp);
+ edp_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
drm_sysfs_connector_remove(connector);
@@ -3806,7 +3848,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- intel_encoder->cloneable = false;
+ intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_dp_hot_plug;
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fbfaaba5cc3b..0542de982260 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -78,6 +78,12 @@
#define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */
+/* Maximum cursor sizes */
+#define GEN2_CURSOR_WIDTH 64
+#define GEN2_CURSOR_HEIGHT 64
+#define CURSOR_WIDTH 256
+#define CURSOR_HEIGHT 256
+
#define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2
@@ -110,9 +116,10 @@ struct intel_framebuffer {
struct intel_fbdev {
struct drm_fb_helper helper;
- struct intel_framebuffer ifb;
+ struct intel_framebuffer *fb;
struct list_head fbdev_list;
struct drm_display_mode *our_mode;
+ int preferred_bpp;
};
struct intel_encoder {
@@ -124,11 +131,7 @@ struct intel_encoder {
struct intel_crtc *new_crtc;
int type;
- /*
- * Intel hw has only one MUX where encoders could be clone, hence a
- * simple flag is enough to compute the possible_clones mask.
- */
- bool cloneable;
+ unsigned int cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
@@ -187,6 +190,14 @@ struct intel_connector {
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
+ /*
+ * Removes all interfaces through which the connector is accessible
+ * - like sysfs, debugfs entries -, so that no new operations can be
+ * started on the connector. Also makes sure all currently pending
+ * operations finish before returing.
+ */
+ void (*unregister)(struct intel_connector *);
+
/* Panel info for eDP and LVDS */
struct intel_panel panel;
@@ -210,6 +221,12 @@ typedef struct dpll {
int p;
} intel_clock_t;
+struct intel_plane_config {
+ bool tiled;
+ int size;
+ u32 base;
+};
+
struct intel_crtc_config {
/**
* quirks - bitfield with hw state readout quirks
@@ -356,9 +373,13 @@ struct intel_crtc {
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
+ int16_t max_cursor_width, max_cursor_height;
bool cursor_visible;
+ struct intel_plane_config plane_config;
struct intel_crtc_config config;
+ struct intel_crtc_config *new_config;
+ bool new_enabled;
uint32_t ddi_pll_sel;
@@ -475,8 +496,7 @@ struct intel_dp {
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
- struct i2c_adapter adapter;
- struct i2c_algo_dp_aux_data algo;
+ struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@@ -485,8 +505,22 @@ struct intel_dp {
int backlight_off_delay;
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
+ unsigned long last_power_cycle;
+ unsigned long last_power_on;
+ unsigned long last_backlight_off;
bool psr_setup_done;
+ bool use_tps3;
struct intel_connector *attached_connector;
+
+ uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
+ /*
+ * This function returns the value we have to program the AUX_CTL
+ * register with to kick off an AUX transaction.
+ */
+ uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
+ bool has_aux_irq,
+ int send_bytes,
+ uint32_t aux_clock_divider);
};
struct intel_digital_port {
@@ -540,6 +574,7 @@ struct intel_unpin_work {
struct intel_set_config {
struct drm_encoder **save_connector_encoders;
struct drm_crtc **save_encoder_crtcs;
+ bool *save_crtc_enabled;
bool fb_changed;
bool mode_changed;
@@ -584,6 +619,8 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
/* i915_irq.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable);
+bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
@@ -591,8 +628,8 @@ void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void hsw_pc8_disable_interrupts(struct drm_device *dev);
-void hsw_pc8_restore_interrupts(struct drm_device *dev);
+void hsw_runtime_pm_disable_interrupts(struct drm_device *dev);
+void hsw_runtime_pm_restore_interrupts(struct drm_device *dev);
/* intel_crt.c */
@@ -664,11 +701,10 @@ int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
-int intel_framebuffer_init(struct drm_device *dev,
- struct intel_framebuffer *ifb,
+struct drm_framebuffer *
+__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
-void intel_framebuffer_fini(struct intel_framebuffer *fb);
void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
@@ -696,9 +732,8 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int bpp,
unsigned int pitch);
void intel_display_handle_reset(struct drm_device *dev);
-void hsw_enable_pc8_work(struct work_struct *__work);
-void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
-void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
+void hsw_enable_pc8(struct drm_i915_private *dev_priv);
+void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
@@ -708,8 +743,13 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
-void intel_display_set_init_power(struct drm_device *dev, bool enable);
+void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
+enum intel_display_power_domain
+intel_display_port_power_domain(struct intel_encoder *intel_encoder);
int valleyview_get_vco(struct drm_i915_private *dev_priv);
+void intel_mode_from_pipe_config(struct drm_display_mode *mode,
+ struct intel_crtc_config *pipe_config);
+int intel_format_to_fourcc(int format);
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -721,15 +761,15 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp);
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
-void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
-void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
-void ironlake_edp_panel_on(struct intel_dp *intel_dp);
-void ironlake_edp_panel_off(struct intel_dp *intel_dp);
-void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+void intel_edp_backlight_on(struct intel_dp *intel_dp);
+void intel_edp_backlight_off(struct intel_dp *intel_dp);
+void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void intel_edp_panel_on(struct intel_dp *intel_dp);
+void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_edp_psr_update(struct drm_device *dev);
@@ -808,7 +848,8 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
/* intel_panel.c */
int intel_panel_init(struct intel_panel *panel,
- struct drm_display_mode *fixed_mode);
+ struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *downclock_mode);
void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
@@ -845,18 +886,19 @@ bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
-int intel_power_domains_init(struct drm_device *dev);
-void intel_power_domains_remove(struct drm_device *dev);
-bool intel_display_power_enabled(struct drm_device *dev,
+int intel_power_domains_init(struct drm_i915_private *);
+void intel_power_domains_remove(struct drm_i915_private *);
+bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-bool intel_display_power_enabled_sw(struct drm_device *dev,
+bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_display_power_get(struct drm_device *dev,
+void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_display_power_put(struct drm_device *dev,
+void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_power_domains_init_hw(struct drm_device *dev);
-void intel_set_power_well(struct drm_device *dev, bool enable);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
+void intel_init_gt_powersave(struct drm_device *dev);
+void intel_cleanup_gt_powersave(struct drm_device *dev);
void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
void ironlake_teardown_rc6(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index fabbf0d895cf..33656647f8bc 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -243,11 +243,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ enum intel_display_power_domain power_domain;
u32 port, func;
enum pipe p;
DRM_DEBUG_KMS("\n");
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
/* XXX: this only works for one DSI output */
for (p = PIPE_A; p <= PIPE_B; p++) {
port = I915_READ(MIPI_PORT_CTRL(p));
@@ -488,8 +493,19 @@ static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+ struct intel_encoder *intel_encoder = &intel_dsi->base;
+ enum intel_display_power_domain power_domain;
+ enum drm_connector_status connector_status;
+ struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
+
DRM_DEBUG_KMS("\n");
- return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+
+ intel_display_power_get(dev_priv, power_domain);
+ connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
+ intel_display_power_put(dev_priv, power_domain);
+
+ return connector_status;
}
static int intel_dsi_get_modes(struct drm_connector *connector)
@@ -586,6 +602,7 @@ bool intel_dsi_init(struct drm_device *dev)
intel_encoder->get_config = intel_dsi_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->unregister = intel_connector_unregister;
for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
dsi = &intel_dsi_devices[i];
@@ -603,7 +620,7 @@ bool intel_dsi_init(struct drm_device *dev)
intel_encoder->type = INTEL_OUTPUT_DSI;
intel_encoder->crtc_mask = (1 << 0); /* XXX */
- intel_encoder->cloneable = false;
+ intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -624,7 +641,7 @@ bool intel_dsi_init(struct drm_device *dev)
}
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
return true;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index eeff998e52ef..7fe3feedfe03 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -477,6 +477,7 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->compute_config = intel_dvo_compute_config;
intel_encoder->mode_set = intel_dvo_mode_set;
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+ intel_connector->unregister = intel_connector_unregister;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
@@ -521,14 +522,15 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
- intel_encoder->cloneable = true;
+ intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
+ (1 << INTEL_OUTPUT_DVO);
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
- intel_encoder->cloneable = false;
+ intel_encoder->cloneable = 0;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 39eac9937a4a..b4d44e62f0c7 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -62,6 +62,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
+ struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
@@ -93,18 +94,22 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
- DRM_ERROR("failed to pin fb: %d\n", ret);
+ DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_unref;
}
- ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
- if (ret)
+ fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
goto out_unpin;
+ }
+
+ ifbdev->fb = to_intel_framebuffer(fb);
return 0;
out_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
out_unref:
drm_gem_object_unreference(&obj->base);
out:
@@ -116,23 +121,26 @@ static int intelfb_create(struct drm_fb_helper *helper,
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
- struct intel_framebuffer *intel_fb = &ifbdev->ifb;
+ struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
int size, ret;
+ bool prealloc = false;
mutex_lock(&dev->struct_mutex);
- if (!intel_fb->obj) {
+ if (!intel_fb || WARN_ON(!intel_fb->obj)) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
goto out_unlock;
+ intel_fb = ifbdev->fb;
} else {
DRM_DEBUG_KMS("re-using BIOS fb\n");
+ prealloc = true;
sizes->fb_width = intel_fb->base.width;
sizes->fb_height = intel_fb->base.height;
}
@@ -148,7 +156,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->par = helper;
- fb = &ifbdev->ifb.base;
+ fb = &ifbdev->fb->base;
ifbdev->helper.fb = fb;
ifbdev->helper.fbdev = info;
@@ -194,7 +202,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (ifbdev->ifb.obj->stolen)
+ if (ifbdev->fb->obj->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -208,7 +216,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
return 0;
out_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
out_unlock:
mutex_unlock(&dev->struct_mutex);
@@ -236,7 +244,193 @@ static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = intel_crtc->lut_b[regno] << 8;
}
+static struct drm_fb_helper_crtc *
+intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
+{
+ int i;
+
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
+ return &fb_helper->crtc_info[i];
+
+ return NULL;
+}
+
+/*
+ * Try to read the BIOS display configuration and use it for the initial
+ * fb configuration.
+ *
+ * The BIOS or boot loader will generally create an initial display
+ * configuration for us that includes some set of active pipes and displays.
+ * This routine tries to figure out which pipes and connectors are active
+ * and stuffs them into the crtcs and modes array given to us by the
+ * drm_fb_helper code.
+ *
+ * The overall sequence is:
+ * intel_fbdev_init - from driver load
+ * intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data
+ * drm_fb_helper_init - build fb helper structs
+ * drm_fb_helper_single_add_all_connectors - more fb helper structs
+ * intel_fbdev_initial_config - apply the config
+ * drm_fb_helper_initial_config - call ->probe then register_framebuffer()
+ * drm_setup_crtcs - build crtc config for fbdev
+ * intel_fb_initial_config - find active connectors etc
+ * drm_fb_helper_single_fb_probe - set up fbdev
+ * intelfb_create - re-use or alloc fb, build out fbdev structs
+ *
+ * Note that we don't make special consideration whether we could actually
+ * switch to the selected modes without a full modeset. E.g. when the display
+ * is in VGA mode we need to recalculate watermarks and set a new high-res
+ * framebuffer anyway.
+ */
+static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **crtcs,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int i, j;
+ bool *save_enabled;
+ bool fallback = true;
+ int num_connectors_enabled = 0;
+ int num_connectors_detected = 0;
+
+ /*
+ * If the user specified any force options, just bail here
+ * and use that config.
+ */
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_conn;
+ struct drm_connector *connector;
+
+ fb_conn = fb_helper->connector_info[i];
+ connector = fb_conn->connector;
+
+ if (!enabled[i])
+ continue;
+
+ if (connector->force != DRM_FORCE_UNSPECIFIED)
+ return false;
+ }
+
+ save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
+ GFP_KERNEL);
+ if (!save_enabled)
+ return false;
+
+ memcpy(save_enabled, enabled, dev->mode_config.num_connector);
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_conn;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_fb_helper_crtc *new_crtc;
+
+ fb_conn = fb_helper->connector_info[i];
+ connector = fb_conn->connector;
+
+ if (connector->status == connector_status_connected)
+ num_connectors_detected++;
+
+ if (!enabled[i]) {
+ DRM_DEBUG_KMS("connector %d not enabled, skipping\n",
+ connector->base.id);
+ continue;
+ }
+
+ encoder = connector->encoder;
+ if (!encoder || WARN_ON(!encoder->crtc)) {
+ DRM_DEBUG_KMS("connector %d has no encoder or crtc, skipping\n",
+ connector->base.id);
+ enabled[i] = false;
+ continue;
+ }
+
+ num_connectors_enabled++;
+
+ new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
+
+ /*
+ * Make sure we're not trying to drive multiple connectors
+ * with a single CRTC, since our cloning support may not
+ * match the BIOS.
+ */
+ for (j = 0; j < fb_helper->connector_count; j++) {
+ if (crtcs[j] == new_crtc) {
+ DRM_DEBUG_KMS("fallback: cloned configuration\n");
+ fallback = true;
+ goto out;
+ }
+ }
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ fb_conn->connector->base.id);
+
+ /* go for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
+
+ /* try for preferred next */
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+ fb_conn->connector->base.id);
+ modes[i] = drm_has_preferred_mode(fb_conn, width,
+ height);
+ }
+
+ /* last resort: use current mode */
+ if (!modes[i]) {
+ /*
+ * IMPORTANT: We want to use the adjusted mode (i.e.
+ * after the panel fitter upscaling) as the initial
+ * config, not the input mode, which is what crtc->mode
+ * usually contains. But since our current fastboot
+ * code puts a mode derived from the post-pfit timings
+ * into crtc->mode this works out correctly. We don't
+ * use hwmode anywhere right now, so use it for this
+ * since the fb helper layer wants a pointer to
+ * something we own.
+ */
+ intel_mode_from_pipe_config(&encoder->crtc->hwmode,
+ &to_intel_crtc(encoder->crtc)->config);
+ modes[i] = &encoder->crtc->hwmode;
+ }
+ crtcs[i] = new_crtc;
+
+ DRM_DEBUG_KMS("connector %s on crtc %d: %s\n",
+ drm_get_connector_name(connector),
+ encoder->crtc->base.id,
+ modes[i]->name);
+
+ fallback = false;
+ }
+
+ /*
+ * If the BIOS didn't enable everything it could, fall back to have the
+ * same user experiencing of lighting up as much as possible like the
+ * fbdev helper library.
+ */
+ if (num_connectors_enabled != num_connectors_detected &&
+ num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
+ DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
+ DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
+ num_connectors_detected);
+ fallback = true;
+ }
+
+out:
+ if (fallback) {
+ DRM_DEBUG_KMS("Not using firmware configuration\n");
+ memcpy(enabled, save_enabled, dev->mode_config.num_connector);
+ kfree(save_enabled);
+ return false;
+ }
+
+ kfree(save_enabled);
+ return true;
+}
+
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .initial_config = intel_fb_initial_config,
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
.fb_probe = intelfb_create,
@@ -258,8 +452,139 @@ static void intel_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&ifbdev->helper);
- drm_framebuffer_unregister_private(&ifbdev->ifb.base);
- intel_framebuffer_fini(&ifbdev->ifb);
+ drm_framebuffer_unregister_private(&ifbdev->fb->base);
+ drm_framebuffer_remove(&ifbdev->fb->base);
+}
+
+/*
+ * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
+ * The core display code will have read out the current plane configuration,
+ * so we use that to figure out if there's an object for us to use as the
+ * fb, and if so, we re-use it for the fbdev configuration.
+ *
+ * Note we only support a single fb shared across pipes for boot (mostly for
+ * fbcon), so we just find the biggest and use that.
+ */
+static bool intel_fbdev_init_bios(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
+{
+ struct intel_framebuffer *fb = NULL;
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+ struct intel_plane_config *plane_config = NULL;
+ unsigned int max_size = 0;
+
+ if (!i915.fastboot)
+ return false;
+
+ /* Find the largest fb */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_crtc->active || !crtc->primary->fb) {
+ DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
+ pipe_name(intel_crtc->pipe));
+ continue;
+ }
+
+ if (intel_crtc->plane_config.size > max_size) {
+ DRM_DEBUG_KMS("found possible fb from plane %c\n",
+ pipe_name(intel_crtc->pipe));
+ plane_config = &intel_crtc->plane_config;
+ fb = to_intel_framebuffer(crtc->primary->fb);
+ max_size = plane_config->size;
+ }
+ }
+
+ if (!fb) {
+ DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
+ goto out;
+ }
+
+ /* Now make sure all the pipes will fit into it */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ unsigned int cur_size;
+
+ intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_crtc->active) {
+ DRM_DEBUG_KMS("pipe %c not active, skipping\n",
+ pipe_name(intel_crtc->pipe));
+ continue;
+ }
+
+ DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
+ pipe_name(intel_crtc->pipe));
+
+ /*
+ * See if the plane fb we found above will fit on this
+ * pipe. Note we need to use the selected fb's pitch and bpp
+ * rather than the current pipe's, since they differ.
+ */
+ cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay;
+ cur_size = cur_size * fb->base.bits_per_pixel / 8;
+ if (fb->base.pitches[0] < cur_size) {
+ DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
+ pipe_name(intel_crtc->pipe),
+ cur_size, fb->base.pitches[0]);
+ plane_config = NULL;
+ fb = NULL;
+ break;
+ }
+
+ cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay;
+ cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1);
+ cur_size *= fb->base.pitches[0];
+ DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
+ pipe_name(intel_crtc->pipe),
+ intel_crtc->config.adjusted_mode.crtc_hdisplay,
+ intel_crtc->config.adjusted_mode.crtc_vdisplay,
+ fb->base.bits_per_pixel,
+ cur_size);
+
+ if (cur_size > max_size) {
+ DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
+ pipe_name(intel_crtc->pipe),
+ cur_size, max_size);
+ plane_config = NULL;
+ fb = NULL;
+ break;
+ }
+
+ DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
+ pipe_name(intel_crtc->pipe),
+ max_size, cur_size);
+ }
+
+ if (!fb) {
+ DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
+ goto out;
+ }
+
+ ifbdev->preferred_bpp = fb->base.bits_per_pixel;
+ ifbdev->fb = fb;
+
+ drm_framebuffer_reference(&ifbdev->fb->base);
+
+ /* Final pass to check if any active pipes don't have fbs */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_crtc->active)
+ continue;
+
+ WARN(!crtc->primary->fb,
+ "re-used BIOS config but lost an fb on crtc %d\n",
+ crtc->base.id);
+ }
+
+
+ DRM_DEBUG_KMS("using BIOS fb for initial console\n");
+ return true;
+
+out:
+
+ return false;
}
int intel_fbdev_init(struct drm_device *dev)
@@ -268,21 +593,25 @@ int intel_fbdev_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
- if (!ifbdev)
+ if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
+ return -ENODEV;
+
+ ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+ if (ifbdev == NULL)
return -ENOMEM;
- dev_priv->fbdev = ifbdev;
ifbdev->helper.funcs = &intel_fb_helper_funcs;
+ if (!intel_fbdev_init_bios(dev, ifbdev))
+ ifbdev->preferred_bpp = 32;
ret = drm_fb_helper_init(dev, &ifbdev->helper,
- INTEL_INFO(dev)->num_pipes,
- 4);
+ INTEL_INFO(dev)->num_pipes, 4);
if (ret) {
kfree(ifbdev);
return ret;
}
+ dev_priv->fbdev = ifbdev;
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
return 0;
@@ -291,9 +620,10 @@ int intel_fbdev_init(struct drm_device *dev)
void intel_fbdev_initial_config(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_fbdev *ifbdev = dev_priv->fbdev;
/* Due to peculiar init order wrt to hpd handling this is separate. */
- drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
+ drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp);
}
void intel_fbdev_fini(struct drm_device *dev)
@@ -322,7 +652,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
* been restored from swap. If the object is stolen however, it will be
* full of whatever garbage was left in there.
*/
- if (state == FBINFO_STATE_RUNNING && ifbdev->ifb.obj->stolen)
+ if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
memset_io(info->screen_base, 0, info->screen_size);
fb_set_suspend(info, state);
@@ -331,7 +661,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+ if (dev_priv->fbdev)
+ drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}
void intel_fbdev_restore_mode(struct drm_device *dev)
@@ -339,7 +670,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
int ret;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (INTEL_INFO(dev)->num_pipes == 0)
+ if (!dev_priv->fbdev)
return;
drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ee3181ebcc92..b0413e190625 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -113,7 +113,8 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
}
static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
- enum transcoder cpu_transcoder)
+ enum transcoder cpu_transcoder,
+ struct drm_i915_private *dev_priv)
{
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -296,7 +297,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
u32 val = I915_READ(ctl_reg);
data_reg = hsw_infoframe_data_reg(type,
- intel_crtc->config.cpu_transcoder);
+ intel_crtc->config.cpu_transcoder,
+ dev_priv);
if (data_reg == 0)
return;
@@ -423,7 +425,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
- u32 port;
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -447,18 +449,6 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
return;
}
- switch (intel_dig_port->port) {
- case PORT_B:
- port = VIDEO_DIP_PORT_B;
- break;
- case PORT_C:
- port = VIDEO_DIP_PORT_C;
- break;
- default:
- BUG();
- return;
- }
-
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
val &= ~VIDEO_DIP_ENABLE;
@@ -489,7 +479,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port;
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -505,21 +495,6 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
return;
}
- switch (intel_dig_port->port) {
- case PORT_B:
- port = VIDEO_DIP_PORT_B;
- break;
- case PORT_C:
- port = VIDEO_DIP_PORT_C;
- break;
- case PORT_D:
- port = VIDEO_DIP_PORT_D;
- break;
- default:
- BUG();
- return;
- }
-
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
val &= ~VIDEO_DIP_ENABLE;
@@ -692,8 +667,13 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ enum intel_display_power_domain power_domain;
u32 tmp;
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (!(tmp & SDVO_ENABLE))
@@ -868,6 +848,30 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+ int count = 0, count_hdmi = 0;
+
+ if (!HAS_PCH_SPLIT(dev))
+ return false;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ if (encoder->new_crtc != crtc)
+ continue;
+
+ count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
+ count++;
+ }
+
+ /*
+ * HDMI 12bpc affects the clocks, so it's only possible
+ * when not cloning with other encoder types.
+ */
+ return count_hdmi > 0 && count_hdmi == count;
+}
+
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
@@ -900,7 +904,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
- clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
+ clock_12bpc <= portclock_limit &&
+ hdmi_12bpc_possible(encoder->new_crtc)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -934,11 +939,15 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = dev->dev_private;
struct edid *edid;
+ enum intel_display_power_domain power_domain;
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
@@ -966,31 +975,48 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_encoder->type = INTEL_OUTPUT_HDMI;
}
+ intel_display_power_put(dev_priv, power_domain);
+
return status;
}
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ int ret;
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(connector,
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ ret = intel_ddc_get_modes(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static bool
intel_hdmi_detect_audio(struct drm_connector *connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ enum intel_display_power_domain power_domain;
struct edid *edid;
bool has_audio = false;
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
@@ -1000,6 +1026,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
kfree(edid);
}
+ intel_display_power_put(dev_priv, power_domain);
+
return has_audio;
}
@@ -1261,6 +1289,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->unregister = intel_connector_unregister;
intel_hdmi_add_properties(intel_hdmi, connector);
@@ -1314,7 +1343,14 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- intel_encoder->cloneable = false;
+ intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
+ /*
+ * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
+ * to work on real hardware. And since g4x can send infoframes to
+ * only one port anyway, nothing is lost by allowing it.
+ */
+ if (IS_G4X(dev))
+ intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
intel_dig_port->port = port;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8bcb93a2a9f6..f1ecf916474a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -848,8 +848,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
/* use the module option value if specified */
- if (i915_lvds_channel_mode > 0)
- return i915_lvds_channel_mode == 2;
+ if (i915.lvds_channel_mode > 0)
+ return i915.lvds_channel_mode == 2;
if (dmi_check_system(intel_dual_link_lvds))
return true;
@@ -899,6 +899,7 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_display_mode *fixed_mode = NULL;
+ struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
struct drm_crtc *crtc;
u32 lvds;
@@ -957,11 +958,12 @@ void intel_lvds_init(struct drm_device *dev)
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->unregister = intel_connector_unregister;
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
- intel_encoder->cloneable = false;
+ intel_encoder->cloneable = 0;
if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
else if (IS_GEN4(dev))
@@ -1000,6 +1002,7 @@ void intel_lvds_init(struct drm_device *dev)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
+ mutex_lock(&dev->mode_config.mutex);
edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
@@ -1032,15 +1035,14 @@ void intel_lvds_init(struct drm_device *dev)
fixed_mode = drm_mode_duplicate(dev, scan);
if (fixed_mode) {
- intel_connector->panel.downclock_mode =
+ downclock_mode =
intel_find_panel_downclock(dev,
fixed_mode, connector);
- if (intel_connector->panel.downclock_mode !=
- NULL && i915_lvds_downclock) {
+ if (downclock_mode != NULL &&
+ i915.lvds_downclock) {
/* We found the downclock for LVDS. */
dev_priv->lvds_downclock_avail = true;
dev_priv->lvds_downclock =
- intel_connector->panel.
downclock_mode->clock;
DRM_DEBUG_KMS("LVDS downclock is found"
" in EDID. Normal clock %dKhz, "
@@ -1094,6 +1096,8 @@ void intel_lvds_init(struct drm_device *dev)
goto failed;
out:
+ mutex_unlock(&dev->mode_config.mutex);
+
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
@@ -1116,17 +1120,17 @@ out:
}
drm_sysfs_connector_add(connector);
- intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
return;
failed:
+ mutex_unlock(&dev->mode_config.mutex);
+
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- if (fixed_mode)
- drm_mode_destroy(dev, fixed_mode);
kfree(lvds_encoder);
kfree(lvds_connector);
return;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a759ecdb7a6e..d8adc9104dca 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -189,7 +189,7 @@ struct intel_overlay {
static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
- drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->dev->dev_private;
struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -212,7 +212,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
void (*tail)(struct intel_overlay *))
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
@@ -262,7 +262,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
bool load_polyphase_filter)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
u32 tmp;
@@ -293,7 +293,7 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
struct drm_i915_gem_object *obj = overlay->old_vid_bo;
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
overlay->old_vid_bo = NULL;
@@ -306,7 +306,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo);
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
overlay->vid_bo = NULL;
@@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
@@ -388,7 +388,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
@@ -606,14 +606,14 @@ static void update_colorkey(struct intel_overlay *overlay,
{
u32 key = overlay->color_key;
- switch (overlay->crtc->base.fb->bits_per_pixel) {
+ switch (overlay->crtc->base.primary->fb->bits_per_pixel) {
case 8:
iowrite32(0, &regs->DCLRKV);
iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
break;
case 16:
- if (overlay->crtc->base.fb->depth == 15) {
+ if (overlay->crtc->base.primary->fb->depth == 15) {
iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
&regs->DCLRKM);
@@ -782,7 +782,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
return 0;
out_unpin:
- i915_gem_object_unpin(new_bo);
+ i915_gem_object_ggtt_unpin(new_bo);
return ret;
}
@@ -834,7 +834,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 pfit_control = I915_READ(PFIT_CONTROL);
u32 ratio;
@@ -1026,7 +1026,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_overlay_put_image *put_image_rec = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
@@ -1076,7 +1076,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
if (new_bo->tiling_mode) {
- DRM_ERROR("buffer used for overlay image can not be tiled\n");
+ DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
ret = -EINVAL;
goto out_unlock;
}
@@ -1226,7 +1226,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_overlay_attrs *attrs = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct overlay_registers __iomem *regs;
int ret;
@@ -1311,7 +1311,7 @@ out_unlock:
void intel_setup_overlay(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
struct overlay_registers __iomem *regs;
@@ -1349,7 +1349,7 @@ void intel_setup_overlay(struct drm_device *dev)
}
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false);
+ ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
@@ -1386,7 +1386,7 @@ void intel_setup_overlay(struct drm_device *dev)
out_unpin_bo:
if (!OVERLAY_NEEDS_PHYSICAL(dev))
- i915_gem_object_unpin(reg_bo);
+ i915_gem_object_ggtt_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(&reg_bo->base);
out_free:
@@ -1397,7 +1397,7 @@ out_free:
void intel_cleanup_overlay(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->overlay)
return;
@@ -1421,7 +1421,7 @@ struct intel_overlay_error_state {
static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{
- drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->dev->dev_private;
struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -1447,7 +1447,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay = dev_priv->overlay;
struct intel_overlay_error_state *error;
struct overlay_registers __iomem *regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 079ea38f14d9..cb058408c70e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -33,8 +33,6 @@
#include <linux/moduleparam.h>
#include "intel_drv.h"
-#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
-
void
intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
@@ -325,13 +323,6 @@ out:
pipe_config->gmch_pfit.lvds_border_bits = border;
}
-static int i915_panel_invert_brightness;
-MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
- "(-1 force normal, 0 machine defaults, 1 force inversion), please "
- "report PCI device ID, subsystem vendor and subsystem device ID "
- "to dri-devel@lists.freedesktop.org, if your machine needs it. "
- "It will then be included in an upcoming module version.");
-module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
u32 val)
{
@@ -341,10 +332,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
WARN_ON(panel->backlight.max == 0);
- if (i915_panel_invert_brightness < 0)
+ if (i915.invert_brightness < 0)
return val;
- if (i915_panel_invert_brightness > 0 ||
+ if (i915.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
return panel->backlight.max - val;
}
@@ -810,13 +801,13 @@ intel_panel_detect(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Assume that the BIOS does not lie through the OpRegion... */
- if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
+ if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected :
connector_status_disconnected;
}
- switch (i915_panel_ignore_lid) {
+ switch (i915.panel_ignore_lid) {
case -2:
return connector_status_connected;
case -1:
@@ -1199,9 +1190,11 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
}
int intel_panel_init(struct intel_panel *panel,
- struct drm_display_mode *fixed_mode)
+ struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *downclock_mode)
{
panel->fixed_mode = fixed_mode;
+ panel->downclock_mode = downclock_mode;
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e1fc35a72656..5874716774a7 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -92,12 +92,12 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
- int plane, i;
+ int i;
u32 fbc_ctl;
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
@@ -109,7 +109,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
- plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
@@ -120,7 +119,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= plane;
+ fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
}
@@ -135,7 +134,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
- DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
}
@@ -150,21 +149,23 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
u32 dpfc_ctl;
- dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ else
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
- I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
/* enable it... */
- I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
@@ -220,22 +221,20 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
u32 dpfc_ctl;
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- dpfc_ctl &= DPFC_RESERVED;
- dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- /* Set persistent mode for front-buffer rendering, ala X. */
- dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+ dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ else
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev))
dpfc_ctl |= obj->fence_reg;
- I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
@@ -278,24 +277,31 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
- I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
+ dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ else
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
- I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
- IVB_DPFC_CTL_FENCE_EN |
- intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
if (IS_IVYBRIDGE(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
- I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
} else {
- /* WaFbcAsynchFlipDisableFbcQueue:hsw */
- I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
- HSW_BYPASS_FBC_QUEUE);
+ /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+ I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
+ I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+ HSW_FBCQ_DIS);
}
I915_WRITE(SNB_DPFC_CTL_SA,
@@ -330,11 +336,11 @@ static void intel_fbc_work_fn(struct work_struct *__work)
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
- if (work->crtc->fb == work->fb) {
+ if (work->crtc->primary->fb == work->fb) {
dev_priv->display.enable_fbc(work->crtc);
dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->fbc.fb_id = work->crtc->fb->base.id;
+ dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
dev_priv->fbc.y = work->crtc->y;
}
@@ -387,7 +393,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc)
}
work->crtc = crtc;
- work->fb = crtc->fb;
+ work->fb = crtc->primary->fb;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
dev_priv->fbc.fbc_work = work;
@@ -466,7 +472,7 @@ void intel_update_fbc(struct drm_device *dev)
return;
}
- if (!i915_powersave) {
+ if (!i915.powersave) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
return;
@@ -493,25 +499,25 @@ void intel_update_fbc(struct drm_device *dev)
}
}
- if (!crtc || crtc->fb == NULL) {
+ if (!crtc || crtc->primary->fb == NULL) {
if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
DRM_DEBUG_KMS("no output, disabling\n");
goto out_disable;
}
intel_crtc = to_intel_crtc(crtc);
- fb = crtc->fb;
+ fb = crtc->primary->fb;
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
adjusted_mode = &intel_crtc->config.adjusted_mode;
- if (i915_enable_fbc < 0 &&
+ if (i915.enable_fbc < 0 &&
INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
DRM_DEBUG_KMS("disabled per chip default\n");
goto out_disable;
}
- if (!i915_enable_fbc) {
+ if (!i915.enable_fbc) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
goto out_disable;
@@ -537,7 +543,7 @@ void intel_update_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
- if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
+ if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
intel_crtc->plane != PLANE_A) {
if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
DRM_DEBUG_KMS("plane not A, disabling compression\n");
@@ -617,7 +623,7 @@ out_disable:
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
tmp = I915_READ(CLKCFG);
@@ -656,7 +662,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u16 ddrpll, csipll;
ddrpll = I915_READ16(DDRMPLL1);
@@ -1035,7 +1041,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
crtc = single_enabled_crtc(dev);
if (crtc) {
const struct drm_display_mode *adjusted_mode;
- int pixel_size = crtc->fb->bits_per_pixel / 8;
+ int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
int clock;
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
@@ -1115,7 +1121,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
- pixel_size = crtc->fb->bits_per_pixel / 8;
+ pixel_size = crtc->primary->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -1128,9 +1134,9 @@ static bool g4x_compute_wm0(struct drm_device *dev,
*plane_wm = display->max_wm;
/* Use the large buffer method to calculate cursor watermark */
- line_time_us = ((htotal * 1000) / clock);
+ line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * 64 * pixel_size;
+ entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
@@ -1202,9 +1208,9 @@ static bool g4x_compute_srwm(struct drm_device *dev,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
- pixel_size = crtc->fb->bits_per_pixel / 8;
+ pixel_size = crtc->primary->fb->bits_per_pixel / 8;
- line_time_us = (htotal * 1000) / clock;
+ line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
@@ -1216,7 +1222,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
*display_wm = entries + display->guard_size;
/* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * 64;
+ entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
@@ -1241,7 +1247,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
return false;
clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
- pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
+ pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
entries = (clock / 1000) * pixel_size;
*plane_prec_mult = (entries > 256) ?
@@ -1433,11 +1439,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
- int pixel_size = crtc->fb->bits_per_pixel / 8;
+ int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
- line_time_us = ((htotal * 1000) / clock);
+ line_time_us = max(htotal * 1000 / clock, 1);
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
@@ -1451,7 +1457,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
entries, srwm);
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * 64;
+ pixel_size * to_intel_crtc(crtc)->cursor_width;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1506,7 +1512,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev, 0);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
- int cpp = crtc->fb->bits_per_pixel / 8;
+ int cpp = crtc->primary->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1522,7 +1528,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
- int cpp = crtc->fb->bits_per_pixel / 8;
+ int cpp = crtc->primary->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1559,11 +1565,11 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
- int pixel_size = enabled->fb->bits_per_pixel / 8;
+ int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
- line_time_us = (htotal * 1000) / clock;
+ line_time_us = max(htotal * 1000 / clock, 1);
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
@@ -1886,7 +1892,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
}
/* Calculate the maximum FBC watermark */
-static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
+static unsigned int ilk_fbc_wm_max(const struct drm_device *dev)
{
/* max that registers can hold */
if (INTEL_INFO(dev)->gen >= 8)
@@ -1895,7 +1901,7 @@ static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
return 15;
}
-static void ilk_compute_wm_maximums(struct drm_device *dev,
+static void ilk_compute_wm_maximums(const struct drm_device *dev,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
@@ -1948,7 +1954,7 @@ static bool ilk_validate_wm_level(int level,
return ret;
}
-static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
int level,
const struct ilk_pipe_wm_parameters *p,
struct intel_wm_level *result)
@@ -2079,7 +2085,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
}
}
-static void intel_setup_wm_latency(struct drm_device *dev)
+static void ilk_setup_wm_latency(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2111,10 +2117,10 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
if (p->active) {
p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
- p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
+ p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
p->cur.bytes_per_pixel = 4;
p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
- p->cur.horiz_pixels = 64;
+ p->cur.horiz_pixels = intel_crtc->cursor_width;
/* TODO: for now, assume primary and cursor planes are always enabled. */
p->pri.enabled = true;
p->cur.enabled = true;
@@ -2123,7 +2129,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
config->num_pipes_active += intel_crtc_active(crtc);
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
struct intel_plane *intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe)
@@ -2140,7 +2146,7 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
struct intel_pipe_wm *pipe_wm)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_i915_private *dev_priv = dev->dev_private;
int level, max_level = ilk_wm_max_level(dev);
/* LP0 watermark maximums depend on this pipe alone */
struct intel_wm_config config = {
@@ -2738,7 +2744,7 @@ intel_alloc_context_page(struct drm_device *dev)
return NULL;
}
- ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
+ ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -2753,7 +2759,7 @@ intel_alloc_context_page(struct drm_device *dev)
return ctx;
err_unpin:
- i915_gem_object_unpin(ctx);
+ i915_gem_object_ggtt_unpin(ctx);
err_unref:
drm_gem_object_unreference(&ctx->base);
return NULL;
@@ -2901,9 +2907,9 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- limits = dev_priv->rps.max_delay << 24;
- if (val <= dev_priv->rps.min_delay)
- limits |= dev_priv->rps.min_delay << 16;
+ limits = dev_priv->rps.max_freq_softlimit << 24;
+ if (val <= dev_priv->rps.min_freq_softlimit)
+ limits |= dev_priv->rps.min_freq_softlimit << 16;
return limits;
}
@@ -2915,26 +2921,26 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
new_power = dev_priv->rps.power;
switch (dev_priv->rps.power) {
case LOW_POWER:
- if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
+ if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
new_power = BETWEEN;
break;
case BETWEEN:
- if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
+ if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
new_power = LOW_POWER;
- else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
+ else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
new_power = HIGH_POWER;
break;
case HIGH_POWER:
- if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
+ if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
new_power = BETWEEN;
break;
}
/* Max/min bins are special */
- if (val == dev_priv->rps.min_delay)
+ if (val == dev_priv->rps.min_freq_softlimit)
new_power = LOW_POWER;
- if (val == dev_priv->rps.max_delay)
+ if (val == dev_priv->rps.max_freq_softlimit)
new_power = HIGH_POWER;
if (new_power == dev_priv->rps.power)
return;
@@ -3000,41 +3006,113 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
dev_priv->rps.last_adj = 0;
}
+static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
+{
+ u32 mask = 0;
+
+ if (val > dev_priv->rps.min_freq_softlimit)
+ mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+ if (val < dev_priv->rps.max_freq_softlimit)
+ mask |= GEN6_PM_RP_UP_THRESHOLD;
+
+ /* IVB and SNB hard hangs on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ */
+ if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ return ~mask;
+}
+
+/* gen6_set_rps is called to update the frequency request, but should also be
+ * called when the range (min_delay and max_delay) is modified so that we can
+ * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_delay);
- WARN_ON(val < dev_priv->rps.min_delay);
-
- if (val == dev_priv->rps.cur_delay)
- return;
+ WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+ WARN_ON(val < dev_priv->rps.min_freq_softlimit);
- gen6_set_rps_thresholds(dev_priv, val);
+ /* min/max delay may still have been modified so be sure to
+ * write the limits value.
+ */
+ if (val != dev_priv->rps.cur_freq) {
+ gen6_set_rps_thresholds(dev_priv, val);
- if (IS_HASWELL(dev))
- I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(val));
- else
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(val) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
+ if (IS_HASWELL(dev))
+ I915_WRITE(GEN6_RPNSWREQ,
+ HSW_FREQUENCY(val));
+ else
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ }
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- gen6_rps_limits(dev_priv, val));
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
POSTING_READ(GEN6_RPNSWREQ);
- dev_priv->rps.cur_delay = val;
-
+ dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(val * 50);
}
+/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
+ *
+ * * If Gfx is Idle, then
+ * 1. Mask Turbo interrupts
+ * 2. Bring up Gfx clock
+ * 3. Change the freq to Rpn and wait till P-Unit updates freq
+ * 4. Clear the Force GFX CLK ON bit so that Gfx can down
+ * 5. Unmask Turbo interrupts
+*/
+static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
+{
+ /*
+ * When we are idle. Drop to min voltage state.
+ */
+
+ if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
+ return;
+
+ /* Mask turbo interrupt so that they will not come in between */
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+
+ /* Bring up the Gfx clock */
+ I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
+ I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
+ VLV_GFX_CLK_FORCE_ON_BIT);
+
+ if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
+ I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
+ DRM_ERROR("GFX_CLK_ON request timed out\n");
+ return;
+ }
+
+ dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
+
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
+ dev_priv->rps.min_freq_softlimit);
+
+ if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
+ & GENFREQSTATUS) == 0, 5))
+ DRM_ERROR("timed out waiting for Punit\n");
+
+ /* Release the Gfx clock */
+ I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
+ I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
+ ~VLV_GFX_CLK_FORCE_ON_BIT);
+
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+}
+
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -3042,9 +3120,9 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+ vlv_set_rps_idle(dev_priv);
else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3057,9 +3135,9 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3070,21 +3148,20 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_delay);
- WARN_ON(val < dev_priv->rps.min_delay);
+ WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+ WARN_ON(val < dev_priv->rps.min_freq_softlimit);
DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
- dev_priv->rps.cur_delay,
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ dev_priv->rps.cur_freq,
vlv_gpu_freq(dev_priv, val), val);
- if (val == dev_priv->rps.cur_delay)
- return;
-
- vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+ if (val != dev_priv->rps.cur_freq)
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
- dev_priv->rps.cur_delay = val;
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+ dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
@@ -3093,7 +3170,8 @@ static void gen6_disable_rps_interrupts(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
- I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
+ I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
+ ~dev_priv->pm_rps_events);
/* Complete PM interrupt masking here doesn't race with the rps work
* item again unmasking PM interrupts because that is using a different
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
@@ -3103,7 +3181,7 @@ static void gen6_disable_rps_interrupts(struct drm_device *dev)
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
- I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
+ I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
}
static void gen6_disable_rps(struct drm_device *dev)
@@ -3123,25 +3201,14 @@ static void valleyview_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
gen6_disable_rps_interrupts(dev);
-
- if (dev_priv->vlv_pctx) {
- drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
- dev_priv->vlv_pctx = NULL;
- }
}
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
{
- if (IS_GEN6(dev))
- DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
-
- if (IS_HASWELL(dev))
- DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
-
DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+ (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
}
int intel_enable_rc6(const struct drm_device *dev)
@@ -3151,44 +3218,28 @@ int intel_enable_rc6(const struct drm_device *dev)
return 0;
/* Respect the kernel parameter if it is set */
- if (i915_enable_rc6 >= 0)
- return i915_enable_rc6;
+ if (i915.enable_rc6 >= 0)
+ return i915.enable_rc6;
/* Disable RC6 on Ironlake */
if (INTEL_INFO(dev)->gen == 5)
return 0;
- if (IS_HASWELL(dev))
- return INTEL_RC6_ENABLE;
-
- /* snb/ivb have more than one rc6 state. */
- if (INTEL_INFO(dev)->gen == 6)
- return INTEL_RC6_ENABLE;
+ if (IS_IVYBRIDGE(dev))
+ return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
- return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+ return INTEL_RC6_ENABLE;
}
static void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 enabled_intrs;
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
- snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
- I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
+ snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
-
- /* only unmask PM interrupts we need. Mask all others. */
- enabled_intrs = GEN6_PM_RPS_EVENTS;
-
- /* IVB and SNB hard hangs on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- */
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
- enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
-
- I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
}
static void gen8_enable_rps(struct drm_device *dev)
@@ -3222,10 +3273,10 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 3: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
+ intel_print_rc6_info(dev, rc6_mask);
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_EI_MODE(1) |
- rc6_mask);
+ GEN6_RC_CTL_EI_MODE(1) |
+ rc6_mask);
/* 4 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
@@ -3235,8 +3286,8 @@ static void gen8_enable_rps(struct drm_device *dev)
/* Docs recommend 900MHz, and 300 MHz respectively */
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- dev_priv->rps.max_delay << 24 |
- dev_priv->rps.min_delay << 16);
+ dev_priv->rps.max_freq_softlimit << 24 |
+ dev_priv->rps.min_freq_softlimit << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
@@ -3269,7 +3320,7 @@ static void gen6_enable_rps(struct drm_device *dev)
struct intel_ring_buffer *ring;
u32 rp_state_cap;
u32 gt_perf_status;
- u32 rc6vids, pcu_mbox, rc6_mask = 0;
+ u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
int i, ret;
@@ -3295,13 +3346,23 @@ static void gen6_enable_rps(struct drm_device *dev)
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
- /* In units of 50MHz */
- dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
- dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
- dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
- dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
- dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
- dev_priv->rps.cur_delay = 0;
+ /* All of these values are in units of 50MHz */
+ dev_priv->rps.cur_freq = 0;
+ /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
+ dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
+ dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
+ dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+ /* XXX: only BYT has a special efficient freq */
+ dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+ /* hw_max = RP0 until we check for overclocking */
+ dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
+
+ /* Preserve min/max settings in case of re-init */
+ if (dev_priv->rps.max_freq_softlimit == 0)
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+ if (dev_priv->rps.min_freq_softlimit == 0)
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3350,21 +3411,19 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
- if (!ret) {
- pcu_mbox = 0;
- ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
- if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
- DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
- (dev_priv->rps.max_delay & 0xff) * 50,
- (pcu_mbox & 0xff) * 50);
- dev_priv->rps.hw_max = pcu_mbox & 0xff;
- }
- } else {
+ if (ret)
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
+
+ ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+ if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
+ DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
+ (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
+ (pcu_mbox & 0xff) * 50);
+ dev_priv->rps.max_freq = pcu_mbox & 0xff;
}
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
gen6_enable_rps_interrupts(dev);
@@ -3420,9 +3479,9 @@ void gen6_update_ring_freq(struct drm_device *dev)
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
- for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
+ for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
gpu_freq--) {
- int diff = dev_priv->rps.max_delay - gpu_freq;
+ int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
if (INTEL_INFO(dev)->gen >= 8) {
@@ -3485,6 +3544,15 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
}
+/* Check that the pctx buffer wasn't move under us. */
+static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
+{
+ unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+
+ WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
+ dev_priv->vlv_pctx->stolen->start);
+}
+
static void valleyview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3529,6 +3597,17 @@ out:
dev_priv->vlv_pctx = pctx;
}
+static void valleyview_cleanup_pctx(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (WARN_ON(!dev_priv->vlv_pctx))
+ return;
+
+ drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
+ dev_priv->vlv_pctx = NULL;
+}
+
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3538,6 +3617,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ valleyview_check_pctx(dev_priv);
+
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
gtfifodbg);
@@ -3588,32 +3669,39 @@ static void valleyview_enable_rps(struct drm_device *dev)
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
- dev_priv->rps.cur_delay = (val >> 8) & 0xff;
+ dev_priv->rps.cur_freq = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
- dev_priv->rps.cur_delay);
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ dev_priv->rps.cur_freq);
- dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
- dev_priv->rps.hw_max = dev_priv->rps.max_delay;
+ dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
+ dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
- dev_priv->rps.max_delay);
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ dev_priv->rps.max_freq);
- dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
+ dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
- dev_priv->rps.rpe_delay);
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
- dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
+ dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
- dev_priv->rps.min_delay);
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ dev_priv->rps.min_freq);
+
+ /* Preserve min/max settings in case of re-init */
+ if (dev_priv->rps.max_freq_softlimit == 0)
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+ if (dev_priv->rps.min_freq_softlimit == 0)
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
- dev_priv->rps.rpe_delay);
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
gen6_enable_rps_interrupts(dev);
@@ -3625,13 +3713,13 @@ void ironlake_teardown_rc6(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->ips.renderctx) {
- i915_gem_object_unpin(dev_priv->ips.renderctx);
+ i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
dev_priv->ips.renderctx = NULL;
}
if (dev_priv->ips.pwrctx) {
- i915_gem_object_unpin(dev_priv->ips.pwrctx);
+ i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
dev_priv->ips.pwrctx = NULL;
}
@@ -3823,9 +3911,10 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
unsigned long val;
- if (dev_priv->info->gen != 5)
+ if (INTEL_INFO(dev)->gen != 5)
return 0;
spin_lock_irq(&mchdev_lock);
@@ -3854,6 +3943,7 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
+ struct drm_device *dev = dev_priv->dev;
static const struct v_table {
u16 vd; /* in .1 mil */
u16 vm; /* in .1 mil */
@@ -3987,7 +4077,7 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{ 16000, 14875, },
{ 16125, 15000, },
};
- if (dev_priv->info->is_mobile)
+ if (INTEL_INFO(dev)->is_mobile)
return v_table[pxvid].vm;
else
return v_table[pxvid].vd;
@@ -4030,7 +4120,9 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
- if (dev_priv->info->gen != 5)
+ struct drm_device *dev = dev_priv->dev;
+
+ if (INTEL_INFO(dev)->gen != 5)
return;
spin_lock_irq(&mchdev_lock);
@@ -4047,7 +4139,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
assert_spin_locked(&mchdev_lock);
- pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
@@ -4079,9 +4171,10 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
unsigned long val;
- if (dev_priv->info->gen != 5)
+ if (INTEL_INFO(dev)->gen != 5)
return 0;
spin_lock_irq(&mchdev_lock);
@@ -4270,6 +4363,7 @@ void intel_gpu_ips_teardown(void)
i915_mch_dev = NULL;
spin_unlock_irq(&mchdev_lock);
}
+
static void intel_init_emon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4341,6 +4435,18 @@ static void intel_init_emon(struct drm_device *dev)
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
}
+void intel_init_gt_powersave(struct drm_device *dev)
+{
+ if (IS_VALLEYVIEW(dev))
+ valleyview_setup_pctx(dev);
+}
+
+void intel_cleanup_gt_powersave(struct drm_device *dev)
+{
+ if (IS_VALLEYVIEW(dev))
+ valleyview_cleanup_pctx(dev);
+}
+
void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4395,8 +4501,6 @@ void intel_enable_gt_powersave(struct drm_device *dev)
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
- if (IS_VALLEYVIEW(dev))
- valleyview_setup_pctx(dev);
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
@@ -4587,6 +4691,17 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_GT_MODE,
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+ /*
+ * BSpec recoomends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN6_GT_MODE,
+ GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
ilk_init_lp_watermarks(dev);
I915_WRITE(CACHE_MODE_0,
@@ -4607,17 +4722,24 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*
- * Also apply WaDisableVDSUnitClockGating:snb and
- * WaDisableRCPBUnitClockGating:snb.
+ * WaDisableRCCUnitClockGating:snb
+ * WaDisableRCPBUnitClockGating:snb
*/
I915_WRITE(GEN6_UCGCTL2,
- GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
- /* Bspec says we need to always set all mask bits. */
- I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
- _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
+ /* WaStripsFansDisableFastClipPerformanceFix:snb */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
+
+ /*
+ * Bspec says:
+ * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
+ * 3DSTATE_SF number of SF output attributes is more than 16."
+ */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
/*
* According to the spec the following bits should be
@@ -4643,11 +4765,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
g4x_disable_trickle_feed(dev);
- /* The default value should be 0x200 according to docs, but the two
- * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
- I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
- I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
-
cpt_init_clock_gating(dev);
gen6_check_mch_setup(dev);
@@ -4657,14 +4774,17 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
{
uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
reg &= ~GEN7_FF_SCHED_MASK;
reg |= GEN7_FF_TS_SCHED_HW;
reg |= GEN7_FF_VS_SCHED_HW;
reg |= GEN7_FF_DS_SCHED_HW;
- if (IS_HASWELL(dev_priv->dev))
- reg &= ~GEN7_FF_VS_REF_CNT_FFME;
-
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
@@ -4702,7 +4822,7 @@ static void lpt_suspend_hw(struct drm_device *dev)
static void gen8_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe i;
+ enum pipe pipe;
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
@@ -4711,8 +4831,19 @@ static void gen8_init_clock_gating(struct drm_device *dev)
/* FIXME(BDW): Check all the w/a, some might only apply to
* pre-production hw. */
- WARN(!i915_preliminary_hw_support,
- "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
+ /* WaDisablePartialInstShootdown:bdw */
+ I915_WRITE(GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
+
+ /* WaDisableThreadStallDopClockGating:bdw */
+ /* FIXME: Unclear whether we really need this on production bdw. */
+ I915_WRITE(GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+
+ /*
+ * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
+ * pre-production hardware
+ */
I915_WRITE(HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
I915_WRITE(HALF_SLICE_CHICKEN3,
@@ -4736,10 +4867,10 @@ static void gen8_init_clock_gating(struct drm_device *dev)
I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
- for_each_pipe(i) {
- I915_WRITE(CHICKEN_PIPESL_1(i),
- I915_READ(CHICKEN_PIPESL_1(i) |
- DPRS_MASK_VBLANK_SRD));
+ for_each_pipe(pipe) {
+ I915_WRITE(CHICKEN_PIPESL_1(pipe),
+ I915_READ(CHICKEN_PIPESL_1(pipe)) |
+ BDW_DPRS_MASK_VBLANK_SRD);
}
/* Use Force Non-Coherent whenever executing a 3D context. This is a
@@ -4755,6 +4886,28 @@ static void gen8_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_FF_THREAD_MODE,
I915_READ(GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
+ I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+
+ /* WaDisableSDEUnitClockGating:bdw */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /* Wa4x4STCOptimizationDisable:bdw */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
}
static void haswell_init_clock_gating(struct drm_device *dev)
@@ -4763,21 +4916,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
ilk_init_lp_watermarks(dev);
- /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
- * This implements the WaDisableRCZUnitClockGating:hsw workaround.
- */
- I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
- /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
- I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
- GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
- /* WaApplyL3ControlAndL3ChickenMode:hsw */
- I915_WRITE(GEN7_L3CNTLREG1,
- GEN7_WA_FOR_GEN7_L3_CONTROL);
- I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
- GEN7_WA_L3_CHICKEN_MODE);
-
/* L3 caching of data atomics doesn't work -- disable it. */
I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
I915_WRITE(HSW_ROW_CHICKEN3,
@@ -4789,12 +4927,28 @@ static void haswell_init_clock_gating(struct drm_device *dev)
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/* WaVSRefCountFullforceMissDisable:hsw */
- gen7_setup_fixed_func_scheduler(dev_priv);
+ I915_WRITE(GEN7_FF_THREAD_MODE,
+ I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
+
+ /* enable HiZ Raw Stall Optimization */
+ I915_WRITE(CACHE_MODE_0_GEN7,
+ _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
/* WaDisable4x2SubspanOptimization:hsw */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -4827,9 +4981,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
if (IS_IVB_GT1(dev))
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
- else
- I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
- _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
@@ -4843,31 +4994,24 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
if (IS_IVB_GT1(dev))
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
- else
+ else {
+ /* must write both registers */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-
+ }
/* WaForceL3Serialization:ivb */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
- /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
- * gating disable must be set. Failure to set it results in
- * flickering pixels due to Z write ordering failures after
- * some amount of runtime in the Mesa "fire" demo, and Unigine
- * Sanctuary and Tropics, and apparently anything else with
- * alpha test or pixel discard.
- *
- * According to the spec, bit 11 (RCCUNIT) must also be set,
- * but we didn't debug actual testcases to find it out.
- *
+ /*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:ivb workaround.
*/
I915_WRITE(GEN6_UCGCTL2,
- GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
/* This is required by WaCatErrorRejectionIssue:ivb */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
@@ -4876,13 +5020,29 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
g4x_disable_trickle_feed(dev);
- /* WaVSRefCountFullforceMissDisable:ivb */
gen7_setup_fixed_func_scheduler(dev_priv);
+ if (0) { /* causes HiZ corruption on ivb:gt1 */
+ /* enable HiZ Raw Stall Optimization */
+ I915_WRITE(CACHE_MODE_0_GEN7,
+ _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
+ }
+
/* WaDisable4x2SubspanOptimization:ivb */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
@@ -4904,13 +5064,11 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
switch ((val >> 6) & 3) {
case 0:
- dev_priv->mem_freq = 800;
- break;
case 1:
- dev_priv->mem_freq = 1066;
+ dev_priv->mem_freq = 800;
break;
case 2:
- dev_priv->mem_freq = 1333;
+ dev_priv->mem_freq = 1066;
break;
case 3:
dev_priv->mem_freq = 1333;
@@ -4929,19 +5087,12 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
+ /* WaPsdDispatchEnable:vlv */
/* WaDisablePSDDualDispatchEnable:vlv */
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
- /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
- I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
- GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
- /* WaApplyL3ControlAndL3ChickenMode:vlv */
- I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
- I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
-
/* WaForceL3Serialization:vlv */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
@@ -4955,51 +5106,39 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
- * gating disable must be set. Failure to set it results in
- * flickering pixels due to Z write ordering failures after
- * some amount of runtime in the Mesa "fire" demo, and Unigine
- * Sanctuary and Tropics, and apparently anything else with
- * alpha test or pixel discard.
- *
- * According to the spec, bit 11 (RCCUNIT) must also be set,
- * but we didn't debug actual testcases to find it out.
- *
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:vlv workaround.
- *
- * Also apply WaDisableVDSUnitClockGating:vlv and
- * WaDisableRCPBUnitClockGating:vlv.
*/
I915_WRITE(GEN6_UCGCTL2,
- GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
- GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+ /* WaDisableL3Bank2xClockGate:vlv */
I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ /*
+ * BSpec says this must be set, even though
+ * WaDisable4x2SubspanOptimization isn't listed for VLV.
+ */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
/*
+ * WaIncreaseL3CreditsForVLVB0:vlv
+ * This is the hardware default actually.
+ */
+ I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
+
+ /*
* WaDisableVLVClockGating_VBIIssue:vlv
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
*/
- I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
-
- /* Conservative clock gating settings for now */
- I915_WRITE(0x9400, 0xffffffff);
- I915_WRITE(0x9404, 0xffffffff);
- I915_WRITE(0x9408, 0xffffffff);
- I915_WRITE(0x940c, 0xffffffff);
- I915_WRITE(0x9410, 0xffffffff);
- I915_WRITE(0x9414, 0xffffffff);
- I915_WRITE(0x9418, 0xffffffff);
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -5114,19 +5253,16 @@ void intel_suspend_hw(struct drm_device *dev)
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
-static bool hsw_power_well_enabled(struct drm_device *dev,
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
return I915_READ(HSW_PWR_WELL_DRIVER) ==
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
}
-bool intel_display_power_enabled_sw(struct drm_device *dev,
+bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
power_domains = &dev_priv->power_domains;
@@ -5134,15 +5270,17 @@ bool intel_display_power_enabled_sw(struct drm_device *dev,
return power_domains->domain_use_count[domain];
}
-bool intel_display_power_enabled(struct drm_device *dev,
+bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
bool is_enabled;
int i;
+ if (dev_priv->pm.suspended)
+ return false;
+
power_domains = &dev_priv->power_domains;
is_enabled = true;
@@ -5152,7 +5290,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
if (power_well->always_on)
continue;
- if (!power_well->is_enabled(dev, power_well)) {
+ if (!power_well->ops->is_enabled(dev_priv, power_well)) {
is_enabled = false;
break;
}
@@ -5162,6 +5300,12 @@ bool intel_display_power_enabled(struct drm_device *dev,
return is_enabled;
}
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -5198,10 +5342,17 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
}
}
+static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe)
+{
+ assert_spin_locked(&dev->vbl_lock);
+
+ dev->vblank[pipe].last = 0;
+}
+
static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- enum pipe p;
+ enum pipe pipe;
unsigned long irqflags;
/*
@@ -5212,21 +5363,18 @@ static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
* FIXME: Should we do this in general in drm_vblank_post_modeset?
*/
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for_each_pipe(p)
- if (p != PIPE_A)
- dev->vblank[p].last = 0;
+ for_each_pipe(pipe)
+ if (pipe != PIPE_A)
+ reset_vblank_counter(dev, pipe);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-static void hsw_set_power_well(struct drm_device *dev,
+static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested;
uint32_t tmp;
- WARN_ON(dev_priv->pc8.enabled);
-
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5255,55 +5403,229 @@ static void hsw_set_power_well(struct drm_device *dev,
}
}
-static void __intel_power_well_get(struct drm_device *dev,
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
- if (!power_well->count++ && power_well->set) {
- hsw_disable_package_c8(dev_priv);
- power_well->set(dev, power_well, true);
- }
+ /*
+ * We're taking over the BIOS, so clear any requests made by it since
+ * the driver is in charge now.
+ */
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, true);
}
-static void __intel_power_well_put(struct drm_device *dev,
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ hsw_set_power_well(dev_priv, power_well, false);
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return true;
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ enum punit_power_well power_well_id = power_well->data;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(power_well_id);
+ state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
+ PUNIT_PWRGT_PWR_GATE(power_well_id);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl &= ~mask;
+ ctrl |= state;
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int power_well_id = power_well->data;
+ bool enabled = false;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(power_well_id);
+ ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
+ state != PUNIT_PWRGT_PWR_GATE(power_well_id));
+ if (state == ctrl)
+ enabled = true;
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ WARN_ON(ctrl != state);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return enabled;
+}
- WARN_ON(!power_well->count);
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization we need to defer enabling hotplug
+ * processing until fbdev is set up.
+ */
+ if (dev_priv->enable_hotplug_processing)
+ intel_hpd_init(dev_priv->dev);
+
+ i915_redisable_vga_power_on(dev_priv->dev);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum pipe pipe;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ for_each_pipe(pipe)
+ __intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ spin_lock_irq(&dev->vbl_lock);
+ for_each_pipe(pipe)
+ reset_vblank_counter(dev, pipe);
+ spin_unlock_irq(&dev->vbl_lock);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
- if (!--power_well->count && power_well->set &&
- i915_disable_power_well) {
- power_well->set(dev, power_well, false);
- hsw_enable_package_c8(dev_priv);
+static void check_power_well_state(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
+
+ if (power_well->always_on || !i915.disable_power_well) {
+ if (!enabled)
+ goto mismatch;
+
+ return;
}
+
+ if (enabled != (power_well->count > 0))
+ goto mismatch;
+
+ return;
+
+mismatch:
+ WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
+ power_well->name, power_well->always_on, enabled,
+ power_well->count, i915.disable_power_well);
}
-void intel_display_power_get(struct drm_device *dev,
+void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
+ intel_runtime_pm_get(dev_priv);
+
power_domains = &dev_priv->power_domains;
mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, BIT(domain), power_domains)
- __intel_power_well_get(dev, power_well);
+ for_each_power_well(i, power_well, BIT(domain), power_domains) {
+ if (!power_well->count++) {
+ DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+ power_well->ops->enable(dev_priv, power_well);
+ }
+
+ check_power_well_state(dev_priv, power_well);
+ }
power_domains->domain_use_count[domain]++;
mutex_unlock(&power_domains->lock);
}
-void intel_display_power_put(struct drm_device *dev,
+void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
@@ -5315,10 +5637,20 @@ void intel_display_power_put(struct drm_device *dev,
WARN_ON(!power_domains->domain_use_count[domain]);
power_domains->domain_use_count[domain]--;
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
- __intel_power_well_put(dev, power_well);
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ WARN_ON(!power_well->count);
+
+ if (!--power_well->count && i915.disable_power_well) {
+ DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+ power_well->ops->disable(dev_priv, power_well);
+ }
+
+ check_power_well_state(dev_priv, power_well);
+ }
mutex_unlock(&power_domains->lock);
+
+ intel_runtime_pm_put(dev_priv);
}
static struct i915_power_domains *hsw_pwr;
@@ -5333,7 +5665,7 @@ void i915_request_power_well(void)
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
- intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
@@ -5347,29 +5679,99 @@ void i915_release_power_well(void)
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
- intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_INIT))
+#define HSW_DISPLAY_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
+ HSW_ALWAYS_ON_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
+#define BDW_DISPLAY_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
+#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+ .sync_hw = i9xx_always_on_power_well_noop,
+ .enable = i9xx_always_on_power_well_noop,
+ .disable = i9xx_always_on_power_well_noop,
+ .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
static struct i915_power_well i9xx_always_on_power_well[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
},
};
+static const struct i915_power_well_ops hsw_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
- .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
- .is_enabled = hsw_power_well_enabled,
- .set = hsw_set_power_well,
+ .domains = HSW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
},
};
@@ -5378,12 +5780,83 @@ static struct i915_power_well bdw_power_wells[] = {
.name = "always-on",
.always_on = 1,
.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
- .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
- .is_enabled = hsw_power_well_enabled,
- .set = hsw_set_power_well,
+ .domains = BDW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_display_power_well_enable,
+ .disable = vlv_display_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_power_well_enable,
+ .disable = vlv_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well vlv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DISP2D,
+ .ops = &vlv_display_power_well_ops,
+ },
+ {
+ .name = "dpio-common",
+ .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .ops = &vlv_dpio_power_well_ops,
+ },
+ {
+ .name = "dpio-tx-b-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ },
+ {
+ .name = "dpio-tx-b-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ },
+ {
+ .name = "dpio-tx-c-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ },
+ {
+ .name = "dpio-tx-c-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
},
};
@@ -5392,9 +5865,8 @@ static struct i915_power_well bdw_power_wells[] = {
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
})
-int intel_power_domains_init(struct drm_device *dev)
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
mutex_init(&power_domains->lock);
@@ -5403,12 +5875,14 @@ int intel_power_domains_init(struct drm_device *dev)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv->dev)) {
set_power_wells(power_domains, hsw_power_wells);
hsw_pwr = power_domains;
- } else if (IS_BROADWELL(dev)) {
+ } else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
hsw_pwr = power_domains;
+ } else if (IS_VALLEYVIEW(dev_priv->dev)) {
+ set_power_wells(power_domains, vlv_power_wells);
} else {
set_power_wells(power_domains, i9xx_always_on_power_well);
}
@@ -5416,58 +5890,38 @@ int intel_power_domains_init(struct drm_device *dev)
return 0;
}
-void intel_power_domains_remove(struct drm_device *dev)
+void intel_power_domains_remove(struct drm_i915_private *dev_priv)
{
hsw_pwr = NULL;
}
-static void intel_power_domains_resume(struct drm_device *dev)
+static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
- if (power_well->set)
- power_well->set(dev, power_well, power_well->count > 0);
- }
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
+ power_well->ops->sync_hw(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
}
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-void intel_power_domains_init_hw(struct drm_device *dev)
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* For now, we need the power well to be always enabled. */
- intel_display_set_init_power(dev, true);
- intel_power_domains_resume(dev);
-
- if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
- return;
-
- /* We're taking over the BIOS, so clear any requests made by it since
- * the driver is in charge now. */
- if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
- I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+ intel_display_set_init_power(dev_priv, true);
+ intel_power_domains_resume(dev_priv);
}
-/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
{
- hsw_disable_package_c8(dev_priv);
+ intel_runtime_pm_get(dev_priv);
}
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
{
- hsw_enable_package_c8(dev_priv);
+ intel_runtime_pm_put(dev_priv);
}
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
@@ -5499,8 +5953,6 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
- dev_priv->pm.suspended = false;
-
if (!HAS_RUNTIME_PM(dev))
return;
@@ -5509,6 +5961,8 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
pm_runtime_mark_last_busy(device);
pm_runtime_use_autosuspend(device);
+
+ pm_runtime_put_autosuspend(device);
}
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
@@ -5560,7 +6014,7 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- intel_setup_wm_latency(dev);
+ ilk_setup_wm_latency(dev);
if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
@@ -5731,13 +6185,9 @@ void intel_pm_setup(struct drm_device *dev)
mutex_init(&dev_priv->rps.hw_lock);
- mutex_init(&dev_priv->pc8.lock);
- dev_priv->pc8.requirements_met = false;
- dev_priv->pc8.gpu_idle = false;
- dev_priv->pc8.irqs_disabled = false;
- dev_priv->pc8.enabled = false;
- dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
- INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
+
+ dev_priv->pm.suspended = false;
+ dev_priv->pm.irqs_disabled = false;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 31b36c5ac894..6bc68bdcf433 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -406,17 +406,24 @@ gen8_render_ring_flush(struct intel_ring_buffer *ring,
static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
I915_WRITE_TAIL(ring, value);
}
-u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
+u64 intel_ring_get_active_head(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
- RING_ACTHD(ring->mmio_base) : ACTHD;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u64 acthd;
+
+ if (INTEL_INFO(ring->dev)->gen >= 8)
+ acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
+ RING_ACTHD_UDW(ring->mmio_base));
+ else if (INTEL_INFO(ring->dev)->gen >= 4)
+ acthd = I915_READ(RING_ACTHD(ring->mmio_base));
+ else
+ acthd = I915_READ(ACTHD);
- return I915_READ(acthd_reg);
+ return acthd;
}
static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
@@ -433,22 +440,24 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
static int init_ring_common(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = ring->obj;
int ret = 0;
u32 head;
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
- if (I915_NEED_GFX_HWS(dev))
- intel_ring_setup_status_page(ring);
- else
- ring_setup_phys_status_page(ring);
-
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
+ if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
+ DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+
+ if (I915_NEED_GFX_HWS(dev))
+ intel_ring_setup_status_page(ring);
+ else
+ ring_setup_phys_status_page(ring);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
@@ -531,9 +540,11 @@ init_pipe_control(struct intel_ring_buffer *ring)
goto err;
}
- i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
+ ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
+ if (ret)
+ goto err_unref;
- ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
+ ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
if (ret)
goto err_unref;
@@ -549,7 +560,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
return 0;
err_unpin:
- i915_gem_object_unpin(ring->scratch.obj);
+ i915_gem_object_ggtt_unpin(ring->scratch.obj);
err_unref:
drm_gem_object_unreference(&ring->scratch.obj->base);
err:
@@ -562,14 +573,15 @@ static int init_render_ring(struct intel_ring_buffer *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
- if (INTEL_INFO(dev)->gen > 3)
+ /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
+ if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
- * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw
*/
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
@@ -625,7 +637,7 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen >= 5) {
kunmap(sg_page(ring->scratch.obj->pages->sgl));
- i915_gem_object_unpin(ring->scratch.obj);
+ i915_gem_object_ggtt_unpin(ring->scratch.obj);
}
drm_gem_object_unreference(&ring->scratch.obj->base);
@@ -809,8 +821,11 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page. */
- if (!lazy_coherency)
- intel_ring_get_active_head(ring);
+ if (!lazy_coherency) {
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ POSTING_READ(RING_ACTHD(ring->mmio_base));
+ }
+
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
@@ -842,7 +857,7 @@ static bool
gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
if (!dev->irq_enabled)
@@ -860,7 +875,7 @@ static void
gen5_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -873,7 +888,7 @@ static bool
i9xx_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
if (!dev->irq_enabled)
@@ -894,7 +909,7 @@ static void
i9xx_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -910,7 +925,7 @@ static bool
i8xx_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
if (!dev->irq_enabled)
@@ -931,7 +946,7 @@ static void
i8xx_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -946,7 +961,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 mmio = 0;
/* The ring status page addresses are no longer next to the rest of
@@ -977,9 +992,19 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
- /* Flush the TLB for this page */
- if (INTEL_INFO(dev)->gen >= 6) {
+ /*
+ * Flush the TLB for this page
+ *
+ * FIXME: These two bits have disappeared on gen8, so a question
+ * arises: do we still need this and if so how should we go about
+ * invalidating the TLB?
+ */
+ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
u32 reg = RING_INSTPM(ring->mmio_base);
+
+ /* ring should be idle before issuing a sync flush*/
+ WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+
I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
@@ -1029,7 +1054,7 @@ static bool
gen6_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
if (!dev->irq_enabled)
@@ -1054,7 +1079,7 @@ static void
gen6_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1253,7 +1278,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
return;
kunmap(sg_page(obj->pages->sgl));
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
}
@@ -1271,12 +1296,13 @@ static int init_status_page(struct intel_ring_buffer *ring)
goto err;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ if (ret)
+ goto err_unref;
- ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
- if (ret != 0) {
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
+ if (ret)
goto err_unref;
- }
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
@@ -1293,7 +1319,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
return 0;
err_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
err_unref:
drm_gem_object_unreference(&obj->base);
err:
@@ -1356,7 +1382,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj;
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret)
goto err_unref;
@@ -1385,12 +1411,14 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
+ i915_cmd_parser_init_ring(ring);
+
return 0;
err_unmap:
iounmap(ring->virtual_start);
err_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
err_unref:
drm_gem_object_unreference(&obj->base);
ring->obj = NULL;
@@ -1418,7 +1446,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
iounmap(ring->virtual_start);
- i915_gem_object_unpin(ring->obj);
+ i915_gem_object_ggtt_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
ring->obj = NULL;
ring->preallocated_lazy_request = NULL;
@@ -1430,28 +1458,16 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
cleanup_status_page(ring);
}
-static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
-{
- int ret;
-
- ret = i915_wait_seqno(ring, seqno);
- if (!ret)
- i915_gem_retire_requests_ring(ring);
-
- return ret;
-}
-
static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
{
struct drm_i915_gem_request *request;
- u32 seqno = 0;
+ u32 seqno = 0, tail;
int ret;
- i915_gem_retire_requests_ring(ring);
-
if (ring->last_retired_head != -1) {
ring->head = ring->last_retired_head;
ring->last_retired_head = -1;
+
ring->space = ring_space(ring);
if (ring->space >= n)
return 0;
@@ -1468,6 +1484,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
space += ring->size;
if (space >= n) {
seqno = request->seqno;
+ tail = request->tail;
break;
}
@@ -1482,15 +1499,11 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
if (seqno == 0)
return -ENOSPC;
- ret = intel_ring_wait_seqno(ring, seqno);
+ ret = i915_wait_seqno(ring, seqno);
if (ret)
return ret;
- if (WARN_ON(ring->last_retired_head == -1))
- return -ENOSPC;
-
- ring->head = ring->last_retired_head;
- ring->last_retired_head = -1;
+ ring->head = tail;
ring->space = ring_space(ring);
if (WARN_ON(ring->space < n))
return -ENOSPC;
@@ -1528,7 +1541,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
return 0;
}
- if (dev->primary->master) {
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
+ dev->primary->master) {
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
@@ -1632,7 +1646,7 @@ static int __intel_ring_prepare(struct intel_ring_buffer *ring,
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
int ret;
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
@@ -1694,7 +1708,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
/* Every tail move must follow the sequence below */
@@ -1869,7 +1883,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
int intel_init_render_ring_buffer(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
ring->name = "render ring";
@@ -1954,7 +1968,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return -ENOMEM;
}
- ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to ping batch bo\n");
@@ -1970,7 +1984,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
@@ -2038,7 +2052,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
ring->name = "bsd ring";
@@ -2101,7 +2115,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
ring->name = "blitter ring";
@@ -2141,7 +2155,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
ring->name = "video enhancement ring";
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0b243ce33714..270a6a973438 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -33,6 +33,8 @@ struct intel_hw_status_page {
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
+
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
@@ -41,12 +43,14 @@ enum intel_ring_hangcheck_action {
HANGCHECK_HUNG,
};
+#define HANGCHECK_SCORE_RING_HUNG 31
+
struct intel_ring_hangcheck {
- bool deadlock;
+ u64 acthd;
u32 seqno;
- u32 acthd;
int score;
enum intel_ring_hangcheck_action action;
+ bool deadlock;
};
struct intel_ring_buffer {
@@ -162,6 +166,38 @@ struct intel_ring_buffer {
u32 gtt_offset;
volatile u32 *cpu_page;
} scratch;
+
+ /*
+ * Tables of commands the command parser needs to know about
+ * for this ring.
+ */
+ const struct drm_i915_cmd_table *cmd_tables;
+ int cmd_table_count;
+
+ /*
+ * Table of registers allowed in commands that read/write registers.
+ */
+ const u32 *reg_table;
+ int reg_count;
+
+ /*
+ * Table of registers allowed in commands that read/write registers, but
+ * only from the DRM master.
+ */
+ const u32 *master_reg_table;
+ int master_reg_count;
+
+ /*
+ * Returns the bitmask for the length field of the specified command.
+ * Return 0 for an unrecognized/invalid command.
+ *
+ * If the command parser finds an entry for a command in the ring's
+ * cmd_tables, it gets the command's length based on the table entry.
+ * If not, it calls this function to determine the per-ring length field
+ * encoding for the command (i.e. certain opcode ranges use certain bits
+ * to encode the command length in the header).
+ */
+ u32 (*get_cmd_length_mask)(u32 cmd_header);
};
static inline bool
@@ -256,7 +292,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
-u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
+u64 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 95bdfb3c431c..d27155adf5db 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1461,7 +1461,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
u32 temp;
bool input1, input2;
int i;
- u8 status;
+ bool success;
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0) {
@@ -1475,12 +1475,12 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe);
- status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+ success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
* A lot of SDVO devices fail to notify of sync, but it's
* a given it the status is a success, we succeeded.
*/
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ if (success && !input1) {
DRM_DEBUG_KMS("First %s output reported failure to "
"sync\n", SDVO_NAME(intel_sdvo));
}
@@ -2382,24 +2382,62 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
}
static void
+intel_sdvo_connector_unregister(struct intel_connector *intel_connector)
+{
+ struct drm_connector *drm_connector;
+ struct intel_sdvo *sdvo_encoder;
+
+ drm_connector = &intel_connector->base;
+ sdvo_encoder = intel_attached_sdvo(&intel_connector->base);
+
+ sysfs_remove_link(&drm_connector->kdev->kobj,
+ sdvo_encoder->ddc.dev.kobj.name);
+ intel_connector_unregister(intel_connector);
+}
+
+static int
intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
struct intel_sdvo *encoder)
{
- drm_connector_init(encoder->base.base.dev,
- &connector->base.base,
+ struct drm_connector *drm_connector;
+ int ret;
+
+ drm_connector = &connector->base.base;
+ ret = drm_connector_init(encoder->base.base.dev,
+ drm_connector,
&intel_sdvo_connector_funcs,
connector->base.base.connector_type);
+ if (ret < 0)
+ return ret;
- drm_connector_helper_add(&connector->base.base,
+ drm_connector_helper_add(drm_connector,
&intel_sdvo_connector_helper_funcs);
connector->base.base.interlace_allowed = 1;
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
+ connector->base.unregister = intel_sdvo_connector_unregister;
intel_connector_attach_encoder(&connector->base, &encoder->base);
- drm_sysfs_connector_add(&connector->base.base);
+ ret = drm_sysfs_connector_add(drm_connector);
+ if (ret < 0)
+ goto err1;
+
+ ret = sysfs_create_link(&encoder->ddc.dev.kobj,
+ &drm_connector->kdev->kobj,
+ encoder->ddc.dev.kobj.name);
+ if (ret < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ drm_sysfs_connector_remove(drm_connector);
+err1:
+ drm_connector_cleanup(drm_connector);
+
+ return ret;
}
static void
@@ -2459,7 +2497,11 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo->is_hdmi = true;
}
- intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+ return false;
+ }
+
if (intel_sdvo->is_hdmi)
intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
@@ -2490,7 +2532,10 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
intel_sdvo->is_tv = true;
- intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+ return false;
+ }
if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
goto err;
@@ -2534,8 +2579,11 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_sdvo_connector_init(intel_sdvo_connector,
- intel_sdvo);
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+ return false;
+ }
+
return true;
}
@@ -2566,7 +2614,11 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+ return false;
+ }
+
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
@@ -2980,7 +3032,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
* simplistic anyway to express such constraints, so just give up on
* cloning for SDVO encoders.
*/
- intel_sdvo->base.cloneable = false;
+ intel_sdvo->base.cloneable = 0;
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 716a3c9c0751..336ae6c602f2 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -124,9 +124,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
crtc_w--;
crtc_h--;
- I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
- I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
-
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
obj->tiling_mode,
@@ -134,6 +131,9 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
+ I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
+ I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
+
if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
else
@@ -293,15 +293,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
- I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
+ I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -472,15 +472,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
- I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
- I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
+ I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+
if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
else
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 22cf0f4ba248..bafe92e317d5 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1189,8 +1189,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_disable_pipestat(dev_priv, 0,
- PIPE_HOTPLUG_INTERRUPT_ENABLE |
- PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ PIPE_HOTPLUG_INTERRUPT_STATUS |
+ PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -1266,8 +1266,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0,
- PIPE_HOTPLUG_INTERRUPT_ENABLE |
- PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ PIPE_HOTPLUG_INTERRUPT_STATUS |
+ PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -1536,9 +1536,14 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
/*
* If the device type is not TV, continue.
*/
- if (p_child->old.device_type != DEVICE_TYPE_INT_TV &&
- p_child->old.device_type != DEVICE_TYPE_TV)
+ switch (p_child->old.device_type) {
+ case DEVICE_TYPE_INT_TV:
+ case DEVICE_TYPE_TV:
+ case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
+ break;
+ default:
continue;
+ }
/* Only when the addin_offset is non-zero, it is regarded
* as present.
*/
@@ -1634,13 +1639,13 @@ intel_tv_init(struct drm_device *dev)
intel_encoder->disable = intel_disable_tv;
intel_encoder->get_hw_state = intel_tv_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->unregister = intel_connector_unregister;
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
- intel_encoder->cloneable = false;
+ intel_encoder->cloneable = 0;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
- intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 87df68f5f504..f729dc71d5be 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -40,6 +40,12 @@
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
+static void
+assert_device_not_suspended(struct drm_i915_private *dev_priv)
+{
+ WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+ "Device suspended\n");
+}
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
@@ -83,14 +89,14 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
__gen6_gt_wait_for_thread_c0(dev_priv);
}
-static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
}
-static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
+static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
int fw_engine)
{
u32 forcewake_ack;
@@ -136,14 +142,16 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
gen6_gt_check_fifodbg(dev_priv);
}
-static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
+static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
int fw_engine)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
- gen6_gt_check_fifodbg(dev_priv);
+
+ if (IS_GEN7(dev_priv->dev))
+ gen6_gt_check_fifodbg(dev_priv);
}
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -251,16 +259,16 @@ void vlv_force_wake_get(struct drm_i915_private *dev_priv,
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (FORCEWAKE_RENDER & fw_engine) {
- if (dev_priv->uncore.fw_rendercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_RENDER);
- }
- if (FORCEWAKE_MEDIA & fw_engine) {
- if (dev_priv->uncore.fw_mediacount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_MEDIA);
- }
+
+ if (fw_engine & FORCEWAKE_RENDER &&
+ dev_priv->uncore.fw_rendercount++ != 0)
+ fw_engine &= ~FORCEWAKE_RENDER;
+ if (fw_engine & FORCEWAKE_MEDIA &&
+ dev_priv->uncore.fw_mediacount++ != 0)
+ fw_engine &= ~FORCEWAKE_MEDIA;
+
+ if (fw_engine)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -272,46 +280,89 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (FORCEWAKE_RENDER & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_rendercount == 0);
- if (--dev_priv->uncore.fw_rendercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_RENDER);
+ if (fw_engine & FORCEWAKE_RENDER) {
+ WARN_ON(!dev_priv->uncore.fw_rendercount);
+ if (--dev_priv->uncore.fw_rendercount != 0)
+ fw_engine &= ~FORCEWAKE_RENDER;
}
- if (FORCEWAKE_MEDIA & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_mediacount == 0);
- if (--dev_priv->uncore.fw_mediacount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_MEDIA);
+ if (fw_engine & FORCEWAKE_MEDIA) {
+ WARN_ON(!dev_priv->uncore.fw_mediacount);
+ if (--dev_priv->uncore.fw_mediacount != 0)
+ fw_engine &= ~FORCEWAKE_MEDIA;
}
+ if (fw_engine)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void gen6_force_wake_work(struct work_struct *work)
+static void gen6_force_wake_timer(unsigned long arg)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
+ struct drm_i915_private *dev_priv = (void *)arg;
unsigned long irqflags;
+ assert_device_not_suspended(dev_priv);
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ WARN_ON(!dev_priv->uncore.forcewake_count);
+
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ intel_runtime_pm_put(dev_priv);
}
-static void intel_uncore_forcewake_reset(struct drm_device *dev)
+static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
- if (IS_VALLEYVIEW(dev)) {
+ del_timer_sync(&dev_priv->uncore.force_wake_timer);
+
+ /* Hold uncore.lock across reset to prevent any register access
+ * with forcewake not set correctly
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (IS_VALLEYVIEW(dev))
vlv_force_wake_reset(dev_priv);
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ else if (IS_GEN6(dev) || IS_GEN7(dev))
__gen6_gt_force_wake_reset(dev_priv);
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- __gen6_gt_force_wake_mt_reset(dev_priv);
+
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
+ __gen7_gt_force_wake_mt_reset(dev_priv);
+
+ if (restore) { /* If reset with a user forcewake, try to restore */
+ unsigned fw = 0;
+
+ if (IS_VALLEYVIEW(dev)) {
+ if (dev_priv->uncore.fw_rendercount)
+ fw |= FORCEWAKE_RENDER;
+
+ if (dev_priv->uncore.fw_mediacount)
+ fw |= FORCEWAKE_MEDIA;
+ } else {
+ if (dev_priv->uncore.forcewake_count)
+ fw = FORCEWAKE_ALL;
+ }
+
+ if (fw)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
+
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ dev_priv->uncore.fifo_count =
+ __raw_i915_read32(dev_priv, GTFIFOCTL) &
+ GT_FIFO_FREE_ENTRIES_MASK;
+ } else {
+ dev_priv->uncore.forcewake_count = 0;
+ dev_priv->uncore.fw_rendercount = 0;
+ dev_priv->uncore.fw_mediacount = 0;
}
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
void intel_uncore_early_sanitize(struct drm_device *dev)
@@ -337,7 +388,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
- intel_uncore_forcewake_reset(dev);
+ intel_uncore_forcewake_reset(dev, false);
}
void intel_uncore_sanitize(struct drm_device *dev)
@@ -354,7 +405,9 @@ void intel_uncore_sanitize(struct drm_device *dev)
mutex_lock(&dev_priv->rps.hw_lock);
reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
- if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
+ if (reg_val & (PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_RENDER) |
+ PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_MEDIA) |
+ PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_DISP2D)))
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -393,25 +446,40 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
+ bool delayed = false;
if (!dev_priv->uncore.funcs.force_wake_put)
return;
/* Redirect to VLV specific routine */
- if (IS_VALLEYVIEW(dev_priv->dev))
- return vlv_force_wake_put(dev_priv, fw_engine);
+ if (IS_VALLEYVIEW(dev_priv->dev)) {
+ vlv_force_wake_put(dev_priv, fw_engine);
+ goto out;
+ }
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ WARN_ON(!dev_priv->uncore.forcewake_count);
+
if (--dev_priv->uncore.forcewake_count == 0) {
dev_priv->uncore.forcewake_count++;
- mod_delayed_work(dev_priv->wq,
- &dev_priv->uncore.force_wake_work,
- 1);
+ delayed = true;
+ mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
+ jiffies + 1);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
- intel_runtime_pm_put(dev_priv);
+out:
+ if (!delayed)
+ intel_runtime_pm_put(dev_priv);
+}
+
+void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->uncore.funcs.force_wake_get)
+ return;
+
+ WARN_ON(dev_priv->uncore.forcewake_count > 0);
}
/* We give fast paths for the really cool registers */
@@ -446,16 +514,10 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
}
}
-static void
-assert_device_not_suspended(struct drm_i915_private *dev_priv)
-{
- WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
- "Device suspended\n");
-}
-
#define REG_READ_HEADER(x) \
unsigned long irqflags; \
u##x val = 0; \
+ assert_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
#define REG_READ_FOOTER \
@@ -484,14 +546,13 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
- if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- FORCEWAKE_ALL); \
+ if (dev_priv->uncore.forcewake_count == 0 && \
+ NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ FORCEWAKE_ALL); \
val = __raw_i915_read##x(dev_priv, reg); \
- if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- FORCEWAKE_ALL); \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+ FORCEWAKE_ALL); \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
@@ -502,27 +563,19 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
unsigned fwengine = 0; \
- unsigned *fwcount; \
REG_READ_HEADER(x); \
- if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
- fwengine = FORCEWAKE_RENDER; \
- fwcount = &dev_priv->uncore.fw_rendercount; \
- } \
- else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
- fwengine = FORCEWAKE_MEDIA; \
- fwcount = &dev_priv->uncore.fw_mediacount; \
+ if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
} \
- if (fwengine != 0) { \
- if ((*fwcount)++ == 0) \
- (dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
- fwengine); \
- val = __raw_i915_read##x(dev_priv, reg); \
- if (--(*fwcount) == 0) \
- (dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
- fwengine); \
- } else { \
- val = __raw_i915_read##x(dev_priv, reg); \
- } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
REG_READ_FOOTER; \
}
@@ -554,6 +607,7 @@ __gen4_read(64)
#define REG_WRITE_HEADER \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ assert_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
#define REG_WRITE_FOOTER \
@@ -584,7 +638,6 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- assert_device_not_suspended(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
@@ -600,7 +653,6 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- assert_device_not_suspended(dev_priv); \
hsw_unclaimed_reg_clear(dev_priv, reg); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
@@ -634,16 +686,17 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
REG_WRITE_HEADER; \
- if (__needs_put) { \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- FORCEWAKE_ALL); \
- } \
- __raw_i915_write##x(dev_priv, reg, val); \
- if (__needs_put) { \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- FORCEWAKE_ALL); \
+ if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
+ if (dev_priv->uncore.forcewake_count == 0) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ FORCEWAKE_ALL); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (dev_priv->uncore.forcewake_count == 0) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+ FORCEWAKE_ALL); \
+ } else { \
+ __raw_i915_write##x(dev_priv, reg, val); \
} \
REG_WRITE_FOOTER; \
}
@@ -681,15 +734,17 @@ void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
- gen6_force_wake_work);
+ setup_timer(&dev_priv->uncore.force_wake_timer,
+ gen6_force_wake_timer, (unsigned long)dev_priv);
+
+ intel_uncore_early_sanitize(dev);
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
- dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
+ dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
} else if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
@@ -703,16 +758,16 @@ void intel_uncore_init(struct drm_device *dev)
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
+ __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
+ __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
if (ecobus & FORCEWAKE_MT_ENABLE) {
dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_mt_get;
+ __gen7_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_mt_put;
+ __gen7_gt_force_wake_mt_put;
} else {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
@@ -792,12 +847,9 @@ void intel_uncore_init(struct drm_device *dev)
void intel_uncore_fini(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- flush_delayed_work(&dev_priv->uncore.force_wake_work);
-
/* Paranoia: make sure we have disabled everything before we exit. */
intel_uncore_sanitize(dev);
+ intel_uncore_forcewake_reset(dev, false);
}
static const struct register_whitelist {
@@ -814,7 +866,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
- int i;
+ int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (entry->offset == reg->offset &&
@@ -825,6 +877,8 @@ int i915_reg_read_ioctl(struct drm_device *dev,
if (i == ARRAY_SIZE(whitelist))
return -EINVAL;
+ intel_runtime_pm_get(dev_priv);
+
switch (entry->size) {
case 8:
reg->val = I915_READ64(reg->offset);
@@ -840,10 +894,13 @@ int i915_reg_read_ioctl(struct drm_device *dev,
break;
default:
WARN_ON(1);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- return 0;
+out:
+ intel_runtime_pm_put(dev_priv);
+ return ret;
}
int i915_get_reset_stats_ioctl(struct drm_device *dev,
@@ -852,6 +909,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reset_stats *args = data;
struct i915_ctx_hang_stats *hs;
+ struct i915_hw_context *ctx;
int ret;
if (args->flags || args->pad)
@@ -864,11 +922,12 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
if (ret)
return ret;
- hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
- if (IS_ERR(hs)) {
+ ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
+ if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(hs);
+ return PTR_ERR(ctx);
}
+ hs = &ctx->hang_stats;
if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
@@ -944,12 +1003,6 @@ static int gen6_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- unsigned long irqflags;
-
- /* Hold uncore.lock across reset to prevent any register access
- * with forcewake not set correctly
- */
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/* Reset the chip */
@@ -962,18 +1015,8 @@ static int gen6_do_reset(struct drm_device *dev)
/* Spin waiting for the device to ack the reset request */
ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
- intel_uncore_forcewake_reset(dev);
+ intel_uncore_forcewake_reset(dev, true);
- /* If reset with a user forcewake, try to restore, otherwise turn it off */
- if (dev_priv->uncore.forcewake_count)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
- else
- dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
-
- /* Restore fifo count */
- dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return ret;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 968374776db9..a034ed408252 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -29,7 +29,7 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
int i;
if (!crtc->enabled)
@@ -742,7 +742,7 @@ static int mga_crtc_do_set_base(struct drm_crtc *crtc,
mgag200_bo_unreserve(bo);
}
- mga_fb = to_mga_framebuffer(crtc->fb);
+ mga_fb = to_mga_framebuffer(crtc->primary->fb);
obj = mga_fb->obj;
bo = gem_to_mga_bo(obj);
@@ -805,7 +805,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
/* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0
};
- bppshift = mdev->bpp_shifts[(crtc->fb->bits_per_pixel >> 3) - 1];
+ bppshift = mdev->bpp_shifts[(crtc->primary->fb->bits_per_pixel >> 3) - 1];
switch (mdev->type) {
case G200_SE_A:
@@ -843,12 +843,12 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
break;
}
- switch (crtc->fb->bits_per_pixel) {
+ switch (crtc->primary->fb->bits_per_pixel) {
case 8:
dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits;
break;
case 16:
- if (crtc->fb->depth == 15)
+ if (crtc->primary->fb->depth == 15)
dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits;
else
dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits;
@@ -896,8 +896,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
WREG_SEQ(3, 0);
WREG_SEQ(4, 0xe);
- pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
- if (crtc->fb->bits_per_pixel == 24)
+ pitch = crtc->primary->fb->pitches[0] / (crtc->primary->fb->bits_per_pixel / 8);
+ if (crtc->primary->fb->bits_per_pixel == 24)
pitch = (pitch * 3) >> (4 - bppshift);
else
pitch = pitch >> (4 - bppshift);
@@ -974,7 +974,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
((vdisplay & 0xc00) >> 7) |
((vsyncstart & 0xc00) >> 5) |
((vdisplay & 0x400) >> 3);
- if (crtc->fb->bits_per_pixel == 24)
+ if (crtc->primary->fb->bits_per_pixel == 24)
ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80;
else
ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
@@ -1034,9 +1034,9 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
u32 bpp;
u32 mb;
- if (crtc->fb->bits_per_pixel > 16)
+ if (crtc->primary->fb->bits_per_pixel > 16)
bpp = 32;
- else if (crtc->fb->bits_per_pixel > 8)
+ else if (crtc->primary->fb->bits_per_pixel > 8)
bpp = 16;
else
bpp = 8;
@@ -1277,8 +1277,8 @@ static void mga_crtc_disable(struct drm_crtc *crtc)
int ret;
DRM_DEBUG_KMS("\n");
mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->fb) {
- struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->fb);
+ if (crtc->primary->fb) {
+ struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->primary->fb);
struct drm_gem_object *obj = mga_fb->obj;
struct mgag200_bo *bo = gem_to_mga_bo(obj);
ret = mgag200_bo_reserve(bo, false);
@@ -1287,7 +1287,7 @@ static void mga_crtc_disable(struct drm_crtc *crtc)
mgag200_bo_push_sysram(bo);
mgag200_bo_unreserve(bo);
}
- crtc->fb = NULL;
+ crtc->primary->fb = NULL;
}
/* These provide the minimum set of functions required to handle a CRTC */
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index adb5166a5dfd..5a00e90696de 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -259,7 +259,9 @@ int mgag200_mm_init(struct mga_device *mdev)
ret = ttm_bo_device_init(&mdev->ttm.bdev,
mdev->ttm.bo_global_ref.ref.object,
- &mgag200_bo_driver, DRM_FILE_PAGE_OFFSET,
+ &mgag200_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -324,7 +326,6 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
}
mgabo->bo.bdev = &mdev->ttm.bdev;
- mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index c69d1e07a3a6..b6984971ce0c 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,7 +3,7 @@ config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on MSM_IOMMU
- depends on (ARCH_MSM && ARCH_MSM8960) || (ARM && COMPILE_TEST)
+ depends on ARCH_MSM8960 || (ARM && COMPILE_TEST)
select DRM_KMS_HELPER
select SHMEM
select TMPFS
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 4f977a593bea..5e1e6b0cd8ac 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -7,6 +7,7 @@ msm-y := \
adreno/adreno_gpu.o \
adreno/a3xx_gpu.o \
hdmi/hdmi.o \
+ hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
hdmi/hdmi_connector.o \
hdmi/hdmi_i2c.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 461df93e825e..f20fbde5dc49 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -35,7 +35,11 @@
A3XX_INT0_CP_AHB_ERROR_HALT | \
A3XX_INT0_UCHE_OOB_ACCESS)
-static struct platform_device *a3xx_pdev;
+
+static bool hang_debug = false;
+MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
+module_param_named(hang_debug, hang_debug, bool, 0600);
+static void a3xx_dump(struct msm_gpu *gpu);
static void a3xx_me_init(struct msm_gpu *gpu)
{
@@ -291,6 +295,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
static void a3xx_recover(struct msm_gpu *gpu)
{
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a3xx_dump(gpu);
gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
@@ -311,27 +318,18 @@ static void a3xx_destroy(struct msm_gpu *gpu)
ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
#endif
- put_device(&a3xx_gpu->pdev->dev);
kfree(a3xx_gpu);
}
static void a3xx_idle(struct msm_gpu *gpu)
{
- unsigned long t;
-
/* wait for ringbuffer to drain: */
adreno_idle(gpu);
- t = jiffies + ADRENO_IDLE_TIMEOUT;
-
/* then wait for GPU to finish: */
- do {
- uint32_t rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
- if (!(rbbm_status & A3XX_RBBM_STATUS_GPU_BUSY))
- return;
- } while(time_before(jiffies, t));
-
- DRM_ERROR("timeout waiting for %s to idle!\n", gpu->name);
+ if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
+ A3XX_RBBM_STATUS_GPU_BUSY)))
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
/* TODO maybe we need to reset GPU here to recover from hang? */
}
@@ -352,7 +350,6 @@ static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
return IRQ_HANDLED;
}
-#ifdef CONFIG_DEBUG_FS
static const unsigned int a3xx_registers[] = {
0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
@@ -392,11 +389,18 @@ static const unsigned int a3xx_registers[] = {
0x303c, 0x303c, 0x305e, 0x305f,
};
+#ifdef CONFIG_DEBUG_FS
static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
+ struct drm_device *dev = gpu->dev;
int i;
adreno_show(gpu, m);
+
+ mutex_lock(&dev->struct_mutex);
+
+ gpu->funcs->pm_resume(gpu);
+
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
@@ -412,9 +416,36 @@ static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
}
}
+
+ gpu->funcs->pm_suspend(gpu);
+
+ mutex_unlock(&dev->struct_mutex);
}
#endif
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+static void a3xx_dump(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump(gpu);
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A3XX_RBBM_STATUS));
+
+ /* dump these out in a form that can be parsed by demsm: */
+ printk("IO:region %s 00000000 00020000\n", gpu->name);
+ for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
+ uint32_t start = a3xx_registers[i];
+ uint32_t end = a3xx_registers[i+1];
+ uint32_t addr;
+
+ for (addr = start; addr <= end; addr++) {
+ uint32_t val = gpu_read(gpu, addr);
+ printk("IO:R %08x %08x\n", addr<<2, val);
+ }
+ }
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -439,7 +470,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
struct a3xx_gpu *a3xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
- struct platform_device *pdev = a3xx_pdev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
struct adreno_platform_config *config;
int ret;
@@ -460,7 +492,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a3xx_gpu->base;
gpu = &adreno_gpu->base;
- get_device(&pdev->dev);
a3xx_gpu->pdev = pdev;
gpu->fast_rate = config->fast_rate;
@@ -522,17 +553,24 @@ fail:
# include <mach/kgsl.h>
#endif
-static int a3xx_probe(struct platform_device *pdev)
+static void set_gpu_pdev(struct drm_device *dev,
+ struct platform_device *pdev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ priv->gpu_pdev = pdev;
+}
+
+static int a3xx_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
#ifdef CONFIG_OF
- struct device_node *child, *node = pdev->dev.of_node;
+ struct device_node *child, *node = dev->of_node;
u32 val;
int ret;
ret = of_property_read_u32(node, "qcom,chipid", &val);
if (ret) {
- dev_err(&pdev->dev, "could not find chipid: %d\n", ret);
+ dev_err(dev, "could not find chipid: %d\n", ret);
return ret;
}
@@ -548,7 +586,7 @@ static int a3xx_probe(struct platform_device *pdev)
for_each_child_of_node(child, pwrlvl) {
ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
if (ret) {
- dev_err(&pdev->dev, "could not find gpu-freq: %d\n", ret);
+ dev_err(dev, "could not find gpu-freq: %d\n", ret);
return ret;
}
config.fast_rate = max(config.fast_rate, val);
@@ -558,12 +596,12 @@ static int a3xx_probe(struct platform_device *pdev)
}
if (!config.fast_rate) {
- dev_err(&pdev->dev, "could not find clk rates\n");
+ dev_err(dev, "could not find clk rates\n");
return -ENXIO;
}
#else
- struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+ struct kgsl_device_platform_data *pdata = dev->platform_data;
uint32_t version = socinfo_get_version();
if (cpu_is_apq8064ab()) {
config.fast_rate = 450000000;
@@ -609,14 +647,30 @@ static int a3xx_probe(struct platform_device *pdev)
config.bus_scale_table = pdata->bus_scale_table;
# endif
#endif
- pdev->dev.platform_data = &config;
- a3xx_pdev = pdev;
+ dev->platform_data = &config;
+ set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0;
}
+static void a3xx_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ set_gpu_pdev(dev_get_drvdata(master), NULL);
+}
+
+static const struct component_ops a3xx_ops = {
+ .bind = a3xx_bind,
+ .unbind = a3xx_unbind,
+};
+
+static int a3xx_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &a3xx_ops);
+}
+
static int a3xx_remove(struct platform_device *pdev)
{
- a3xx_pdev = NULL;
+ component_del(&pdev->dev, &a3xx_ops);
return 0;
}
@@ -624,7 +678,6 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,kgsl-3d0" },
{}
};
-MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver a3xx_driver = {
.probe = a3xx_probe,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index d321099abdd4..28ca8cd8b09e 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -73,6 +73,12 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
case MSM_PARAM_GMEM_SIZE:
*value = adreno_gpu->gmem;
return 0;
+ case MSM_PARAM_CHIP_ID:
+ *value = adreno_gpu->rev.patchid |
+ (adreno_gpu->rev.minor << 8) |
+ (adreno_gpu->rev.major << 16) |
+ (adreno_gpu->rev.core << 24);
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
@@ -225,19 +231,11 @@ void adreno_flush(struct msm_gpu *gpu)
void adreno_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t rptr, wptr = get_wptr(gpu->rb);
- unsigned long t;
-
- t = jiffies + ADRENO_IDLE_TIMEOUT;
-
- /* then wait for CP to drain ringbuffer: */
- do {
- rptr = adreno_gpu->memptrs->rptr;
- if (rptr == wptr)
- return;
- } while(time_before(jiffies, t));
+ uint32_t wptr = get_wptr(gpu->rb);
- DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ /* wait for CP to drain ringbuffer: */
+ if (spin_until(adreno_gpu->memptrs->rptr == wptr))
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
/* TODO maybe we need to reset GPU here to recover from hang? */
}
@@ -260,22 +258,37 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
}
#endif
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+void adreno_dump(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t freedwords;
- unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT;
- do {
- uint32_t size = gpu->rb->size / 4;
- uint32_t wptr = get_wptr(gpu->rb);
- uint32_t rptr = adreno_gpu->memptrs->rptr;
- freedwords = (rptr + (size - 1) - wptr) % size;
-
- if (time_after(jiffies, t)) {
- DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
- break;
- }
- } while(freedwords < ndwords);
+
+ printk("revision: %d (%d.%d.%d.%d)\n",
+ adreno_gpu->info->revn, adreno_gpu->rev.core,
+ adreno_gpu->rev.major, adreno_gpu->rev.minor,
+ adreno_gpu->rev.patchid);
+
+ printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
+ gpu->submitted_fence);
+ printk("rptr: %d\n", adreno_gpu->memptrs->rptr);
+ printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
+ printk("rb wptr: %d\n", get_wptr(gpu->rb));
+
+}
+
+static uint32_t ring_freewords(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ uint32_t size = gpu->rb->size / 4;
+ uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t rptr = adreno_gpu->memptrs->rptr;
+ return (rptr + (size - 1) - wptr) % size;
+}
+
+void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+{
+ if (spin_until(ring_freewords(gpu) >= ndwords))
+ DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
}
static const char *iommu_ports[] = {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index ca11ea4da165..63c36ce33020 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -76,7 +76,20 @@ struct adreno_platform_config {
#endif
};
-#define ADRENO_IDLE_TIMEOUT (20 * 1000)
+#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+#define spin_until(X) ({ \
+ int __ret = -ETIMEDOUT; \
+ unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
+ do { \
+ if (X) { \
+ __ret = 0; \
+ break; \
+ } \
+ } while (time_before(jiffies, __t)); \
+ __ret; \
+})
+
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
{
@@ -114,6 +127,7 @@ void adreno_idle(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif
+void adreno_dump(struct msm_gpu *gpu);
void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 6f1588aa9071..ae750f6928c1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -17,8 +17,6 @@
#include "hdmi.h"
-static struct platform_device *hdmi_pdev;
-
void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
{
uint32_t ctrl = 0;
@@ -67,7 +65,7 @@ void hdmi_destroy(struct kref *kref)
if (hdmi->i2c)
hdmi_i2c_destroy(hdmi->i2c);
- put_device(&hdmi->pdev->dev);
+ platform_set_drvdata(hdmi->pdev, NULL);
}
/* initialize connector */
@@ -75,7 +73,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
{
struct hdmi *hdmi = NULL;
struct msm_drm_private *priv = dev->dev_private;
- struct platform_device *pdev = hdmi_pdev;
+ struct platform_device *pdev = priv->hdmi_pdev;
struct hdmi_platform_config *config;
int i, ret;
@@ -95,13 +93,13 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
kref_init(&hdmi->refcount);
- get_device(&pdev->dev);
-
hdmi->dev = dev;
hdmi->pdev = pdev;
hdmi->config = config;
hdmi->encoder = encoder;
+ hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
+
/* not sure about which phy maps to which msm.. probably I miss some */
if (config->phy_init)
hdmi->phy = config->phy_init(hdmi);
@@ -228,6 +226,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector;
+ platform_set_drvdata(pdev, hdmi);
+
return hdmi;
fail:
@@ -249,17 +249,24 @@ fail:
#include <linux/of_gpio.h>
-static int hdmi_dev_probe(struct platform_device *pdev)
+static void set_hdmi_pdev(struct drm_device *dev,
+ struct platform_device *pdev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ priv->hdmi_pdev = pdev;
+}
+
+static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
static struct hdmi_platform_config config = {};
#ifdef CONFIG_OF
- struct device_node *of_node = pdev->dev.of_node;
+ struct device_node *of_node = dev->of_node;
int get_gpio(const char *name)
{
int gpio = of_get_named_gpio(of_node, name, 0);
if (gpio < 0) {
- dev_err(&pdev->dev, "failed to get gpio: %s (%d)\n",
+ dev_err(dev, "failed to get gpio: %s (%d)\n",
name, gpio);
gpio = -1;
}
@@ -305,7 +312,7 @@ static int hdmi_dev_probe(struct platform_device *pdev)
config.ddc_data_gpio = 71;
config.hpd_gpio = 72;
config.mux_en_gpio = -1;
- config.mux_sel_gpio = 13 + NR_GPIO_IRQS;
+ config.mux_sel_gpio = -1;
} else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
@@ -336,14 +343,30 @@ static int hdmi_dev_probe(struct platform_device *pdev)
config.mux_sel_gpio = -1;
}
#endif
- pdev->dev.platform_data = &config;
- hdmi_pdev = pdev;
+ dev->platform_data = &config;
+ set_hdmi_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0;
}
+static void hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ set_hdmi_pdev(dev_get_drvdata(master), NULL);
+}
+
+static const struct component_ops hdmi_ops = {
+ .bind = hdmi_bind,
+ .unbind = hdmi_unbind,
+};
+
+static int hdmi_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &hdmi_ops);
+}
+
static int hdmi_dev_remove(struct platform_device *pdev)
{
- hdmi_pdev = NULL;
+ component_del(&pdev->dev, &hdmi_ops);
return 0;
}
@@ -351,7 +374,6 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,hdmi-tx" },
{}
};
-MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver hdmi_driver = {
.probe = hdmi_dev_probe,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 41b29add70b1..9fafee6a3e43 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -22,6 +22,7 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/hdmi.h>
#include "msm_drv.h"
#include "hdmi.xml.h"
@@ -30,6 +31,12 @@
struct hdmi_phy;
struct hdmi_platform_config;
+struct hdmi_audio {
+ bool enabled;
+ struct hdmi_audio_infoframe infoframe;
+ int rate;
+};
+
struct hdmi {
struct kref refcount;
@@ -38,6 +45,13 @@ struct hdmi {
const struct hdmi_platform_config *config;
+ /* audio state: */
+ struct hdmi_audio audio;
+
+ /* video state: */
+ bool power_on;
+ unsigned long int pixclock;
+
void __iomem *mmio;
struct regulator *hpd_regs[2];
@@ -132,6 +146,17 @@ struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi);
/*
+ * audio:
+ */
+
+int hdmi_audio_update(struct hdmi *hdmi);
+int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
+ uint32_t num_of_channels, uint32_t channel_allocation,
+ uint32_t level_shift, bool down_mix);
+void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
+
+
+/*
* hdmi bridge:
*/
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
new file mode 100644
index 000000000000..872485f60134
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/hdmi.h>
+#include "hdmi.h"
+
+
+/* Supported HDMI Audio channels */
+#define MSM_HDMI_AUDIO_CHANNEL_2 0
+#define MSM_HDMI_AUDIO_CHANNEL_4 1
+#define MSM_HDMI_AUDIO_CHANNEL_6 2
+#define MSM_HDMI_AUDIO_CHANNEL_8 3
+
+/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
+static int nchannels[] = { 2, 4, 6, 8 };
+
+/* Supported HDMI Audio sample rates */
+#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
+#define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1
+#define MSM_HDMI_SAMPLE_RATE_48KHZ 2
+#define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3
+#define MSM_HDMI_SAMPLE_RATE_96KHZ 4
+#define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5
+#define MSM_HDMI_SAMPLE_RATE_192KHZ 6
+#define MSM_HDMI_SAMPLE_RATE_MAX 7
+
+
+struct hdmi_msm_audio_acr {
+ uint32_t n; /* N parameter for clock regeneration */
+ uint32_t cts; /* CTS parameter for clock regeneration */
+};
+
+struct hdmi_msm_audio_arcs {
+ unsigned long int pixclock;
+ struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX];
+};
+
+#define HDMI_MSM_AUDIO_ARCS(pclk, ...) { (1000 * (pclk)), __VA_ARGS__ }
+
+/* Audio constants lookup table for hdmi_msm_audio_acr_setup */
+/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
+static const struct hdmi_msm_audio_arcs acr_lut[] = {
+ /* 25.200MHz */
+ HDMI_MSM_AUDIO_ARCS(25200, {
+ {4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
+ {12288, 25200}, {25088, 28000}, {24576, 25200} }),
+ /* 27.000MHz */
+ HDMI_MSM_AUDIO_ARCS(27000, {
+ {4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
+ {12288, 27000}, {25088, 30000}, {24576, 27000} }),
+ /* 27.027MHz */
+ HDMI_MSM_AUDIO_ARCS(27030, {
+ {4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030},
+ {12288, 27027}, {25088, 30030}, {24576, 27027} }),
+ /* 74.250MHz */
+ HDMI_MSM_AUDIO_ARCS(74250, {
+ {4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
+ {12288, 74250}, {25088, 82500}, {24576, 74250} }),
+ /* 148.500MHz */
+ HDMI_MSM_AUDIO_ARCS(148500, {
+ {4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000},
+ {12288, 148500}, {25088, 165000}, {24576, 148500} }),
+};
+
+static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(acr_lut); i++) {
+ const struct hdmi_msm_audio_arcs *arcs = &acr_lut[i];
+ if (arcs->pixclock == pixclock)
+ return arcs;
+ }
+
+ return NULL;
+}
+
+int hdmi_audio_update(struct hdmi *hdmi)
+{
+ struct hdmi_audio *audio = &hdmi->audio;
+ struct hdmi_audio_infoframe *info = &audio->infoframe;
+ const struct hdmi_msm_audio_arcs *arcs = NULL;
+ bool enabled = audio->enabled;
+ uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
+ uint32_t infofrm_ctrl, audio_config;
+
+ DBG("audio: enabled=%d, channels=%d, channel_allocation=0x%x, "
+ "level_shift_value=%d, downmix_inhibit=%d, rate=%d",
+ audio->enabled, info->channels, info->channel_allocation,
+ info->level_shift_value, info->downmix_inhibit, audio->rate);
+ DBG("video: power_on=%d, pixclock=%lu", hdmi->power_on, hdmi->pixclock);
+
+ if (enabled && !(hdmi->power_on && hdmi->pixclock)) {
+ DBG("disabling audio: no video");
+ enabled = false;
+ }
+
+ if (enabled) {
+ arcs = get_arcs(hdmi->pixclock);
+ if (!arcs) {
+ DBG("disabling audio: unsupported pixclock: %lu",
+ hdmi->pixclock);
+ enabled = false;
+ }
+ }
+
+ /* Read first before writing */
+ acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
+ vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
+ aud_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_AUDIO_PKT_CTRL1);
+ infofrm_ctrl = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
+ audio_config = hdmi_read(hdmi, REG_HDMI_AUDIO_CFG);
+
+ /* Clear N/CTS selection bits */
+ acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SELECT__MASK;
+
+ if (enabled) {
+ uint32_t n, cts, multiplier;
+ enum hdmi_acr_cts select;
+ uint8_t buf[14];
+
+ n = arcs->lut[audio->rate].n;
+ cts = arcs->lut[audio->rate].cts;
+
+ if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) {
+ multiplier = 4;
+ n >>= 2; /* divide N by 4 and use multiplier */
+ } else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate)) {
+ multiplier = 2;
+ n >>= 1; /* divide N by 2 and use multiplier */
+ } else {
+ multiplier = 1;
+ }
+
+ DBG("n=%u, cts=%u, multiplier=%u", n, cts, multiplier);
+
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SOURCE;
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY;
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_N_MULTIPLIER(multiplier);
+
+ if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate))
+ select = ACR_48;
+ else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate))
+ select = ACR_44;
+ else /* default to 32k */
+ select = ACR_32;
+
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SELECT(select);
+
+ hdmi_write(hdmi, REG_HDMI_ACR_0(select - 1),
+ HDMI_ACR_0_CTS(cts));
+ hdmi_write(hdmi, REG_HDMI_ACR_1(select - 1),
+ HDMI_ACR_1_N(n));
+
+ hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL2,
+ COND(info->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
+ HDMI_AUDIO_PKT_CTRL2_OVERRIDE);
+
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_CONT;
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SEND;
+
+ /* configure infoframe: */
+ hdmi_audio_infoframe_pack(info, buf, sizeof(buf));
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
+ (buf[3] << 0) || (buf[4] << 8) ||
+ (buf[5] << 16) || (buf[6] << 24));
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
+ (buf[7] << 0) || (buf[8] << 8));
+
+ hdmi_write(hdmi, REG_HDMI_GC, 0);
+
+ vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_ENABLE;
+ vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
+
+ aud_pkt_ctrl |= HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
+
+ infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
+ infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
+ infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
+ infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
+
+ audio_config &= ~HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
+ audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
+ audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
+ } else {
+ hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
+ acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT;
+ acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND;
+ vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
+ vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
+ aud_pkt_ctrl &= ~HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
+ infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
+ infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
+ infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
+ infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
+ audio_config &= ~HDMI_AUDIO_CFG_ENGINE_ENABLE;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_ACR_PKT_CTRL, acr_pkt_ctrl);
+ hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_ctrl);
+ hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL1, aud_pkt_ctrl);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, infofrm_ctrl);
+
+ hdmi_write(hdmi, REG_HDMI_AUD_INT,
+ COND(enabled, HDMI_AUD_INT_AUD_FIFO_URUN_INT) |
+ COND(enabled, HDMI_AUD_INT_AUD_SAM_DROP_INT));
+
+ hdmi_write(hdmi, REG_HDMI_AUDIO_CFG, audio_config);
+
+
+ DBG("audio %sabled", enabled ? "en" : "dis");
+
+ return 0;
+}
+
+int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
+ uint32_t num_of_channels, uint32_t channel_allocation,
+ uint32_t level_shift, bool down_mix)
+{
+ struct hdmi_audio *audio;
+
+ if (!hdmi)
+ return -ENXIO;
+
+ audio = &hdmi->audio;
+
+ if (num_of_channels >= ARRAY_SIZE(nchannels))
+ return -EINVAL;
+
+ audio->enabled = enabled;
+ audio->infoframe.channels = nchannels[num_of_channels];
+ audio->infoframe.channel_allocation = channel_allocation;
+ audio->infoframe.level_shift_value = level_shift;
+ audio->infoframe.downmix_inhibit = down_mix;
+
+ return hdmi_audio_update(hdmi);
+}
+
+void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
+{
+ struct hdmi_audio *audio;
+
+ if (!hdmi)
+ return;
+
+ audio = &hdmi->audio;
+
+ if ((rate < 0) || (rate >= MSM_HDMI_SAMPLE_RATE_MAX))
+ return;
+
+ audio->rate = rate;
+ hdmi_audio_update(hdmi);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 7d10e55403c6..f6cf745c249e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -19,11 +19,7 @@
struct hdmi_bridge {
struct drm_bridge base;
-
struct hdmi *hdmi;
- bool power_on;
-
- unsigned long int pixclock;
};
#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
@@ -52,8 +48,8 @@ static void power_on(struct drm_bridge *bridge)
}
if (config->pwr_clk_cnt > 0) {
- DBG("pixclock: %lu", hdmi_bridge->pixclock);
- ret = clk_set_rate(hdmi->pwr_clks[0], hdmi_bridge->pixclock);
+ DBG("pixclock: %lu", hdmi->pixclock);
+ ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
if (ret) {
dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
config->pwr_clk_names[0], ret);
@@ -102,12 +98,13 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
DBG("power up");
- if (!hdmi_bridge->power_on) {
+ if (!hdmi->power_on) {
power_on(bridge);
- hdmi_bridge->power_on = true;
+ hdmi->power_on = true;
+ hdmi_audio_update(hdmi);
}
- phy->funcs->powerup(phy, hdmi_bridge->pixclock);
+ phy->funcs->powerup(phy, hdmi->pixclock);
hdmi_set_mode(hdmi, true);
}
@@ -129,9 +126,10 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
hdmi_set_mode(hdmi, false);
phy->funcs->powerdown(phy);
- if (hdmi_bridge->power_on) {
+ if (hdmi->power_on) {
power_off(bridge);
- hdmi_bridge->power_on = false;
+ hdmi->power_on = false;
+ hdmi_audio_update(hdmi);
}
}
@@ -146,7 +144,7 @@ static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
mode = adjusted_mode;
- hdmi_bridge->pixclock = mode->clock * 1000;
+ hdmi->pixclock = mode->clock * 1000;
hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
@@ -194,9 +192,7 @@ static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
DBG("frame_ctrl=%08x", frame_ctrl);
hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
- // TODO until we have audio, this might be safest:
- if (hdmi->hdmi_mode)
- hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
+ hdmi_audio_update(hdmi);
}
static const struct drm_bridge_funcs hdmi_bridge_funcs = {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 84c5b13b33c9..3e6c0f3ed592 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -120,7 +120,7 @@ static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
/* grab reference to incoming scanout fb: */
drm_framebuffer_reference(new_fb);
- mdp4_crtc->base.fb = new_fb;
+ mdp4_crtc->base.primary->fb = new_fb;
mdp4_crtc->fb = new_fb;
if (old_fb)
@@ -182,7 +182,7 @@ static void pageflip_cb(struct msm_fence_cb *cb)
struct mdp4_crtc *mdp4_crtc =
container_of(cb, struct mdp4_crtc, pageflip_cb);
struct drm_crtc *crtc = &mdp4_crtc->base;
- struct drm_framebuffer *fb = crtc->fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
if (!fb)
return;
@@ -348,14 +348,14 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mode->type, mode->flags);
/* grab extra ref for update_scanout() */
- drm_framebuffer_reference(crtc->fb);
+ drm_framebuffer_reference(crtc->primary->fb);
- ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
+ ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
- drm_framebuffer_unreference(crtc->fb);
+ drm_framebuffer_unreference(crtc->primary->fb);
dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
mdp4_crtc->name, ret);
return ret;
@@ -368,7 +368,7 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
/* take data from pipe: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
- crtc->fb->pitches[0]);
+ crtc->primary->fb->pitches[0]);
mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
MDP4_DMA_DST_SIZE_WIDTH(0) |
MDP4_DMA_DST_SIZE_HEIGHT(0));
@@ -378,7 +378,7 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
- crtc->fb->pitches[0]);
+ crtc->primary->fb->pitches[0]);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
@@ -388,8 +388,8 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
}
- update_fb(crtc, crtc->fb);
- update_scanout(crtc, crtc->fb);
+ update_fb(crtc, crtc->primary->fb);
+ update_scanout(crtc, crtc->primary->fb);
return 0;
}
@@ -420,19 +420,19 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
int ret;
/* grab extra ref for update_scanout() */
- drm_framebuffer_reference(crtc->fb);
+ drm_framebuffer_reference(crtc->primary->fb);
- ret = mdp4_plane_mode_set(plane, crtc, crtc->fb,
+ ret = mdp4_plane_mode_set(plane, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
- drm_framebuffer_unreference(crtc->fb);
+ drm_framebuffer_unreference(crtc->primary->fb);
return ret;
}
- update_fb(crtc, crtc->fb);
- update_scanout(crtc, crtc->fb);
+ update_fb(crtc, crtc->primary->fb);
+ update_scanout(crtc, crtc->primary->fb);
return 0;
}
@@ -740,6 +740,9 @@ void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
{
+ /* don't actually detatch our primary plane: */
+ if (to_mdp4_crtc(crtc)->plane == plane)
+ return;
set_attach(crtc, mdp4_plane_pipe(plane), NULL);
}
@@ -791,7 +794,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
- drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 1e893dd13859..66f33dba1ebb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -222,6 +222,7 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
struct drm_plane *plane = NULL;
struct mdp4_plane *mdp4_plane;
int ret;
+ enum drm_plane_type type;
mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
if (!mdp4_plane) {
@@ -237,9 +238,10 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
ARRAY_SIZE(mdp4_plane->formats));
- drm_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
- mdp4_plane->formats, mdp4_plane->nformats,
- private_plane);
+ type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
+ mdp4_plane->formats, mdp4_plane->nformats,
+ type);
mdp4_plane_install_properties(plane, &plane->base);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index f2794021f086..6ea10bdb6e8f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -102,7 +102,7 @@ static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
/* grab reference to incoming scanout fb: */
drm_framebuffer_reference(new_fb);
- mdp5_crtc->base.fb = new_fb;
+ mdp5_crtc->base.primary->fb = new_fb;
mdp5_crtc->fb = new_fb;
if (old_fb)
@@ -289,14 +289,14 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
mode->type, mode->flags);
/* grab extra ref for update_scanout() */
- drm_framebuffer_reference(crtc->fb);
+ drm_framebuffer_reference(crtc->primary->fb);
- ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->fb,
+ ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
- drm_framebuffer_unreference(crtc->fb);
+ drm_framebuffer_unreference(crtc->primary->fb);
dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
mdp5_crtc->name, ret);
return ret;
@@ -306,8 +306,8 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
- update_fb(crtc, crtc->fb);
- update_scanout(crtc, crtc->fb);
+ update_fb(crtc, crtc->primary->fb);
+ update_scanout(crtc, crtc->primary->fb);
return 0;
}
@@ -338,19 +338,19 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
int ret;
/* grab extra ref for update_scanout() */
- drm_framebuffer_reference(crtc->fb);
+ drm_framebuffer_reference(crtc->primary->fb);
- ret = mdp5_plane_mode_set(plane, crtc, crtc->fb,
+ ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
- drm_framebuffer_unreference(crtc->fb);
+ drm_framebuffer_unreference(crtc->primary->fb);
return ret;
}
- update_fb(crtc, crtc->fb);
- update_scanout(crtc, crtc->fb);
+ update_fb(crtc, crtc->primary->fb);
+ update_scanout(crtc, crtc->primary->fb);
return 0;
}
@@ -524,6 +524,9 @@ void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
{
+ /* don't actually detatch our primary plane: */
+ if (to_mdp5_crtc(crtc)->plane == plane)
+ return;
set_attach(crtc, mdp5_plane_pipe(plane), NULL);
}
@@ -559,7 +562,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
- drm_crtc_init(dev, crtc, &mdp5_crtc_funcs);
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 0ac8bb5e7e85..47f7bbb9c15a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -358,6 +358,7 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane;
int ret;
+ enum drm_plane_type type;
mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
if (!mdp5_plane) {
@@ -373,9 +374,10 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats));
- drm_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
- mdp5_plane->formats, mdp5_plane->nformats,
- private_plane);
+ type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+ mdp5_plane->formats, mdp5_plane->nformats,
+ type);
mdp5_plane_install_properties(plane, &plane->base);
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 3be48f7c36be..03455b64a245 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -101,7 +101,8 @@ void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
.count = 1,
};
mdp_irq_register(mdp_kms, &wait.irq);
- wait_event(wait_event, (wait.count <= 0));
+ wait_event_timeout(wait_event, (wait.count <= 0),
+ msecs_to_jiffies(100));
mdp_irq_unregister(mdp_kms, &wait.irq);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index e6adafc7eff3..f9de156b9e65 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -56,6 +56,10 @@ static char *vram;
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
module_param(vram, charp, 0);
+/*
+ * Util/helpers:
+ */
+
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname)
{
@@ -143,6 +147,8 @@ static int msm_unload(struct drm_device *dev)
priv->vram.paddr, &attrs);
}
+ component_unbind_all(dev->dev, dev);
+
dev->dev_private = NULL;
kfree(priv);
@@ -175,6 +181,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
struct msm_kms *kms;
int ret;
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(dev->dev, "failed to allocate private data\n");
@@ -226,6 +233,13 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
(uint32_t)(priv->vram.paddr + size));
}
+ platform_set_drvdata(pdev, dev);
+
+ /* Bind all our sub-components: */
+ ret = component_bind_all(dev->dev, dev);
+ if (ret)
+ return ret;
+
switch (get_mdp_ver(pdev)) {
case 4:
kms = mdp4_kms_init(dev);
@@ -281,8 +295,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
goto fail;
}
- platform_set_drvdata(pdev, dev);
-
#ifdef CONFIG_DRM_MSM_FBDEV
priv->fbdev = msm_fbdev_init(dev);
#endif
@@ -311,7 +323,6 @@ static void load_gpu(struct drm_device *dev)
gpu = NULL;
/* not fatal */
}
- mutex_unlock(&dev->struct_mutex);
if (gpu) {
int ret;
@@ -321,10 +332,16 @@ static void load_gpu(struct drm_device *dev)
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
+ } else {
+ /* give inactive pm a chance to kick in: */
+ msm_gpu_retire(gpu);
}
+
}
priv->gpu = gpu;
+
+ mutex_unlock(&dev->struct_mutex);
}
static int msm_open(struct drm_device *dev, struct drm_file *file)
@@ -647,6 +664,12 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_gem_new *args = data;
+
+ if (args->flags & ~MSM_BO_FLAGS) {
+ DRM_ERROR("invalid flags: %08x\n", args->flags);
+ return -EINVAL;
+ }
+
return msm_gem_new_handle(dev, file, args->size,
args->flags, &args->handle);
}
@@ -660,6 +683,11 @@ static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
int ret;
+ if (args->op & ~MSM_PREP_FLAGS) {
+ DRM_ERROR("invalid op: %08x\n", args->op);
+ return -EINVAL;
+ }
+
obj = drm_gem_object_lookup(dev, file, args->handle);
if (!obj)
return -ENOENT;
@@ -714,7 +742,14 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_wait_fence *args = data;
- return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
+
+ if (args->pad) {
+ DRM_ERROR("invalid pad: %08x\n", args->pad);
+ return -EINVAL;
+ }
+
+ return msm_wait_fence_interruptable(dev, args->fence,
+ &TS(args->timeout));
}
static const struct drm_ioctl_desc msm_ioctls[] = {
@@ -819,18 +854,110 @@ static const struct dev_pm_ops msm_pm_ops = {
};
/*
+ * Componentized driver support:
+ */
+
+#ifdef CONFIG_OF
+/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
+ * (or probably any other).. so probably some room for some helpers
+ */
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int msm_drm_add_components(struct device *master, struct master *m)
+{
+ struct device_node *np = master->of_node;
+ unsigned i;
+ int ret;
+
+ for (i = 0; ; i++) {
+ struct device_node *node;
+
+ node = of_parse_phandle(np, "connectors", i);
+ if (!node)
+ break;
+
+ ret = component_master_add_child(m, compare_of, node);
+ of_node_put(node);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+#else
+static int compare_dev(struct device *dev, void *data)
+{
+ return dev == data;
+}
+
+static int msm_drm_add_components(struct device *master, struct master *m)
+{
+ /* For non-DT case, it kinda sucks. We don't actually have a way
+ * to know whether or not we are waiting for certain devices (or if
+ * they are simply not present). But for non-DT we only need to
+ * care about apq8064/apq8060/etc (all mdp4/a3xx):
+ */
+ static const char *devnames[] = {
+ "hdmi_msm.0", "kgsl-3d0.0",
+ };
+ int i;
+
+ DBG("Adding components..");
+
+ for (i = 0; i < ARRAY_SIZE(devnames); i++) {
+ struct device *dev;
+ int ret;
+
+ dev = bus_find_device_by_name(&platform_bus_type,
+ NULL, devnames[i]);
+ if (!dev) {
+ dev_info(master, "still waiting for %s\n", devnames[i]);
+ return -EPROBE_DEFER;
+ }
+
+ ret = component_master_add_child(m, compare_dev, dev);
+ if (ret) {
+ DBG("could not add child: %d", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static int msm_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&msm_driver, to_platform_device(dev));
+}
+
+static void msm_drm_unbind(struct device *dev)
+{
+ drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+}
+
+static const struct component_master_ops msm_drm_ops = {
+ .add_components = msm_drm_add_components,
+ .bind = msm_drm_bind,
+ .unbind = msm_drm_unbind,
+};
+
+/*
* Platform driver:
*/
static int msm_pdev_probe(struct platform_device *pdev)
{
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- return drm_platform_init(&msm_driver, pdev);
+ return component_master_add(&pdev->dev, &msm_drm_ops);
}
static int msm_pdev_remove(struct platform_device *pdev)
{
- drm_put_dev(platform_get_drvdata(pdev));
+ component_master_del(&pdev->dev, &msm_drm_ops);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 3d63269c5b29..9d10ee0b5aac 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -22,6 +22,7 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
+#include <linux/component.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -69,6 +70,9 @@ struct msm_drm_private {
struct msm_kms *kms;
+ /* subordinate devices, if present: */
+ struct platform_device *hdmi_pdev, *gpu_pdev;
+
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5423e914e491..1f1f4cffdaed 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -23,7 +23,6 @@
* Cmdstream submission:
*/
-#define BO_INVALID_FLAGS ~(MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
#define BO_VALID 0x8000
#define BO_LOCKED 0x4000
@@ -77,7 +76,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
goto out_unlock;
}
- if (submit_bo.flags & BO_INVALID_FLAGS) {
+ if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
@@ -369,6 +368,18 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
}
+ /* validate input from userspace: */
+ switch (submit_cmd.type) {
+ case MSM_SUBMIT_CMD_BUF:
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ break;
+ default:
+ DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = submit_bo(submit, submit_cmd.submit_idx,
&msm_obj, &iova, NULL);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 0cfe3f426ee4..3e667ca1f2b9 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -154,9 +154,18 @@ static int disable_axi(struct msm_gpu *gpu)
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
+ struct drm_device *dev = gpu->dev;
int ret;
- DBG("%s", gpu->name);
+ DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (gpu->active_cnt++ > 0)
+ return 0;
+
+ if (WARN_ON(gpu->active_cnt <= 0))
+ return -EINVAL;
ret = enable_pwrrail(gpu);
if (ret)
@@ -175,9 +184,18 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
{
+ struct drm_device *dev = gpu->dev;
int ret;
- DBG("%s", gpu->name);
+ DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (--gpu->active_cnt > 0)
+ return 0;
+
+ if (WARN_ON(gpu->active_cnt < 0))
+ return -EINVAL;
ret = disable_axi(gpu);
if (ret)
@@ -195,6 +213,55 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
}
/*
+ * Inactivity detection (for suspend):
+ */
+
+static void inactive_worker(struct work_struct *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
+ struct drm_device *dev = gpu->dev;
+
+ if (gpu->inactive)
+ return;
+
+ DBG("%s: inactive!\n", gpu->name);
+ mutex_lock(&dev->struct_mutex);
+ if (!(msm_gpu_active(gpu) || gpu->inactive)) {
+ disable_axi(gpu);
+ disable_clk(gpu);
+ gpu->inactive = true;
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void inactive_handler(unsigned long data)
+{
+ struct msm_gpu *gpu = (struct msm_gpu *)data;
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+
+ queue_work(priv->wq, &gpu->inactive_work);
+}
+
+/* cancel inactive timer and make sure we are awake: */
+static void inactive_cancel(struct msm_gpu *gpu)
+{
+ DBG("%s", gpu->name);
+ del_timer(&gpu->inactive_timer);
+ if (gpu->inactive) {
+ enable_clk(gpu);
+ enable_axi(gpu);
+ gpu->inactive = false;
+ }
+}
+
+static void inactive_start(struct msm_gpu *gpu)
+{
+ DBG("%s", gpu->name);
+ mod_timer(&gpu->inactive_timer,
+ round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
+}
+
+/*
* Hangcheck detection for locked gpu:
*/
@@ -206,7 +273,10 @@ static void recover_worker(struct work_struct *work)
dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
mutex_lock(&dev->struct_mutex);
- gpu->funcs->recover(gpu);
+ if (msm_gpu_active(gpu)) {
+ inactive_cancel(gpu);
+ gpu->funcs->recover(gpu);
+ }
mutex_unlock(&dev->struct_mutex);
msm_gpu_retire(gpu);
@@ -281,6 +351,9 @@ static void retire_worker(struct work_struct *work)
}
mutex_unlock(&dev->struct_mutex);
+
+ if (!msm_gpu_active(gpu))
+ inactive_start(gpu);
}
/* call from irq handler to schedule work to retire bo's */
@@ -302,6 +375,8 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
gpu->submitted_fence = submit->fence;
+ inactive_cancel(gpu);
+
ret = gpu->funcs->submit(gpu, submit, ctx);
priv->lastctx = ctx;
@@ -357,11 +432,15 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
+ gpu->inactive = true;
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
+ INIT_WORK(&gpu->inactive_work, inactive_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
+ setup_timer(&gpu->inactive_timer, inactive_handler,
+ (unsigned long)gpu);
setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
(unsigned long)gpu);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 458db8c64c28..fad27008922f 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -72,6 +72,10 @@ struct msm_gpu {
uint32_t submitted_fence;
+ /* is gpu powered/active? */
+ int active_cnt;
+ bool inactive;
+
/* worker for handling active-list retiring: */
struct work_struct retire_work;
@@ -91,7 +95,12 @@ struct msm_gpu {
uint32_t bsc;
#endif
- /* Hang Detction: */
+ /* Hang and Inactivity Detection:
+ */
+#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
+#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD)
+ struct timer_list inactive_timer;
+ struct work_struct inactive_work;
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
@@ -99,6 +108,11 @@ struct msm_gpu {
struct work_struct recover_work;
};
+static inline bool msm_gpu_active(struct msm_gpu *gpu)
+{
+ return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
+}
+
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
msm_writel(data, gpu->mmio + (reg << 2));
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 7cf787d697b1..637c29a33127 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -11,7 +11,7 @@ config DRM_NOUVEAU
select FB
select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
- select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
+ select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
select MXM_WMI if ACPI && X86
@@ -19,7 +19,6 @@ config DRM_NOUVEAU
# Similar to i915, we need to select ACPI_VIDEO and it's dependencies
select BACKLIGHT_LCD_SUPPORT if ACPI && X86
select BACKLIGHT_CLASS_DEVICE if ACPI && X86
- select VIDEO_OUTPUT_CONTROL if ACPI && X86
select INPUT if ACPI && X86
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index d310c195bdfe..b7d216264775 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -48,6 +48,7 @@ nouveau-y += core/subdev/bios/therm.o
nouveau-y += core/subdev/bios/vmap.o
nouveau-y += core/subdev/bios/volt.o
nouveau-y += core/subdev/bios/xpio.o
+nouveau-y += core/subdev/bios/P0260.o
nouveau-y += core/subdev/bus/hwsq.o
nouveau-y += core/subdev/bus/nv04.o
nouveau-y += core/subdev/bus/nv31.o
@@ -77,6 +78,7 @@ nouveau-y += core/subdev/devinit/nv98.o
nouveau-y += core/subdev/devinit/nva3.o
nouveau-y += core/subdev/devinit/nvaf.o
nouveau-y += core/subdev/devinit/nvc0.o
+nouveau-y += core/subdev/devinit/gm107.o
nouveau-y += core/subdev/fb/base.o
nouveau-y += core/subdev/fb/nv04.o
nouveau-y += core/subdev/fb/nv10.o
@@ -100,6 +102,7 @@ nouveau-y += core/subdev/fb/nvaa.o
nouveau-y += core/subdev/fb/nvaf.o
nouveau-y += core/subdev/fb/nvc0.o
nouveau-y += core/subdev/fb/nve0.o
+nouveau-y += core/subdev/fb/gm107.o
nouveau-y += core/subdev/fb/ramnv04.o
nouveau-y += core/subdev/fb/ramnv10.o
nouveau-y += core/subdev/fb/ramnv1a.o
@@ -114,6 +117,7 @@ nouveau-y += core/subdev/fb/ramnva3.o
nouveau-y += core/subdev/fb/ramnvaa.o
nouveau-y += core/subdev/fb/ramnvc0.o
nouveau-y += core/subdev/fb/ramnve0.o
+nouveau-y += core/subdev/fb/ramgm107.o
nouveau-y += core/subdev/fb/sddr3.o
nouveau-y += core/subdev/fb/gddr5.o
nouveau-y += core/subdev/gpio/base.o
@@ -136,7 +140,8 @@ nouveau-y += core/subdev/instmem/base.o
nouveau-y += core/subdev/instmem/nv04.o
nouveau-y += core/subdev/instmem/nv40.o
nouveau-y += core/subdev/instmem/nv50.o
-nouveau-y += core/subdev/ltcg/nvc0.o
+nouveau-y += core/subdev/ltcg/gf100.o
+nouveau-y += core/subdev/ltcg/gm107.o
nouveau-y += core/subdev/mc/base.o
nouveau-y += core/subdev/mc/nv04.o
nouveau-y += core/subdev/mc/nv40.o
@@ -170,6 +175,7 @@ nouveau-y += core/subdev/therm/nva3.o
nouveau-y += core/subdev/therm/nvd0.o
nouveau-y += core/subdev/timer/base.o
nouveau-y += core/subdev/timer/nv04.o
+nouveau-y += core/subdev/timer/gk20a.o
nouveau-y += core/subdev/vm/base.o
nouveau-y += core/subdev/vm/nv04.o
nouveau-y += core/subdev/vm/nv41.o
@@ -206,6 +212,7 @@ nouveau-y += core/engine/device/nv40.o
nouveau-y += core/engine/device/nv50.o
nouveau-y += core/engine/device/nvc0.o
nouveau-y += core/engine/device/nve0.o
+nouveau-y += core/engine/device/gm100.o
nouveau-y += core/engine/disp/base.o
nouveau-y += core/engine/disp/nv04.o
nouveau-y += core/engine/disp/nv50.o
@@ -216,6 +223,7 @@ nouveau-y += core/engine/disp/nva3.o
nouveau-y += core/engine/disp/nvd0.o
nouveau-y += core/engine/disp/nve0.o
nouveau-y += core/engine/disp/nvf0.o
+nouveau-y += core/engine/disp/gm107.o
nouveau-y += core/engine/disp/dacnv50.o
nouveau-y += core/engine/disp/dport.o
nouveau-y += core/engine/disp/hdanva3.o
@@ -242,13 +250,14 @@ nouveau-y += core/engine/graph/ctxnv40.o
nouveau-y += core/engine/graph/ctxnv50.o
nouveau-y += core/engine/graph/ctxnvc0.o
nouveau-y += core/engine/graph/ctxnvc1.o
-nouveau-y += core/engine/graph/ctxnvc3.o
+nouveau-y += core/engine/graph/ctxnvc4.o
nouveau-y += core/engine/graph/ctxnvc8.o
nouveau-y += core/engine/graph/ctxnvd7.o
nouveau-y += core/engine/graph/ctxnvd9.o
nouveau-y += core/engine/graph/ctxnve4.o
nouveau-y += core/engine/graph/ctxnvf0.o
nouveau-y += core/engine/graph/ctxnv108.o
+nouveau-y += core/engine/graph/ctxgm107.o
nouveau-y += core/engine/graph/nv04.o
nouveau-y += core/engine/graph/nv10.o
nouveau-y += core/engine/graph/nv20.o
@@ -261,13 +270,14 @@ nouveau-y += core/engine/graph/nv40.o
nouveau-y += core/engine/graph/nv50.o
nouveau-y += core/engine/graph/nvc0.o
nouveau-y += core/engine/graph/nvc1.o
-nouveau-y += core/engine/graph/nvc3.o
+nouveau-y += core/engine/graph/nvc4.o
nouveau-y += core/engine/graph/nvc8.o
nouveau-y += core/engine/graph/nvd7.o
nouveau-y += core/engine/graph/nvd9.o
nouveau-y += core/engine/graph/nve4.o
nouveau-y += core/engine/graph/nvf0.o
nouveau-y += core/engine/graph/nv108.o
+nouveau-y += core/engine/graph/gm107.o
nouveau-y += core/engine/mpeg/nv31.o
nouveau-y += core/engine/mpeg/nv40.o
nouveau-y += core/engine/mpeg/nv44.o
diff --git a/drivers/gpu/drm/nouveau/core/core/namedb.c b/drivers/gpu/drm/nouveau/core/core/namedb.c
index 1ce95a8709df..0594a599f6fb 100644
--- a/drivers/gpu/drm/nouveau/core/core/namedb.c
+++ b/drivers/gpu/drm/nouveau/core/core/namedb.c
@@ -167,7 +167,7 @@ int
nouveau_namedb_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, u32 pclass,
- struct nouveau_oclass *sclass, u32 engcls,
+ struct nouveau_oclass *sclass, u64 engcls,
int length, void **pobject)
{
struct nouveau_namedb *namedb;
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
index 313380ce632d..dee5d1235e9b 100644
--- a/drivers/gpu/drm/nouveau/core/core/parent.c
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -49,7 +49,7 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
mask = nv_parent(parent)->engine;
while (mask) {
- int i = ffsll(mask) - 1;
+ int i = __ffs64(mask);
if (nv_iclass(parent, NV_CLIENT_CLASS))
engine = nv_engine(nv_client(parent)->device);
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index dd01c6c435d6..18c8c7245b73 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -131,8 +131,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- mmio_base = pci_resource_start(device->pdev, 0);
- mmio_size = pci_resource_len(device->pdev, 0);
+ mmio_base = nv_device_resource_start(device, 0);
+ mmio_size = nv_device_resource_len(device, 0);
/* translate api disable mask into internal mapping */
disable = args->debug0;
@@ -185,6 +185,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
case 0x0e0:
case 0x0f0:
case 0x100: device->card_type = NV_E0; break;
+ case 0x110: device->card_type = GM100; break;
default:
break;
}
@@ -208,6 +209,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
case NV_C0:
case NV_D0: ret = nvc0_identify(device); break;
case NV_E0: ret = nve0_identify(device); break;
+ case GM100: ret = gm100_identify(device); break;
default:
ret = -EINVAL;
break;
@@ -446,6 +448,72 @@ nouveau_device_dtor(struct nouveau_object *object)
nouveau_engine_destroy(&device->base);
}
+resource_size_t
+nv_device_resource_start(struct nouveau_device *device, unsigned int bar)
+{
+ if (nv_device_is_pci(device)) {
+ return pci_resource_start(device->pdev, bar);
+ } else {
+ struct resource *res;
+ res = platform_get_resource(device->platformdev,
+ IORESOURCE_MEM, bar);
+ if (!res)
+ return 0;
+ return res->start;
+ }
+}
+
+resource_size_t
+nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
+{
+ if (nv_device_is_pci(device)) {
+ return pci_resource_len(device->pdev, bar);
+ } else {
+ struct resource *res;
+ res = platform_get_resource(device->platformdev,
+ IORESOURCE_MEM, bar);
+ if (!res)
+ return 0;
+ return resource_size(res);
+ }
+}
+
+dma_addr_t
+nv_device_map_page(struct nouveau_device *device, struct page *page)
+{
+ dma_addr_t ret;
+
+ if (nv_device_is_pci(device)) {
+ ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(device->pdev, ret))
+ ret = 0;
+ } else {
+ ret = page_to_phys(page);
+ }
+
+ return ret;
+}
+
+void
+nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr)
+{
+ if (nv_device_is_pci(device))
+ pci_unmap_page(device->pdev, addr, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+}
+
+int
+nv_device_get_irq(struct nouveau_device *device, bool stall)
+{
+ if (nv_device_is_pci(device)) {
+ return device->pdev->irq;
+ } else {
+ return platform_get_irq_byname(device->platformdev,
+ stall ? "stall" : "nonstall");
+ }
+}
+
static struct nouveau_oclass
nouveau_device_oclass = {
.handle = NV_ENGINE(DEVICE, 0x00),
@@ -457,8 +525,8 @@ nouveau_device_oclass = {
};
int
-nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
- const char *cfg, const char *dbg,
+nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name,
+ const char *sname, const char *cfg, const char *dbg,
int length, void **pobject)
{
struct nouveau_device *device;
@@ -476,7 +544,14 @@ nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
if (ret)
goto done;
- device->pdev = pdev;
+ switch (type) {
+ case NOUVEAU_BUS_PCI:
+ device->pdev = dev;
+ break;
+ case NOUVEAU_BUS_PLATFORM:
+ device->platformdev = dev;
+ break;
+ }
device->handle = name;
device->cfgopt = cfg;
device->dbgopt = dbg;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
new file mode 100644
index 000000000000..d258c21c4a22
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/ltcg.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+#include <engine/copy.h>
+#include <engine/bsp.h>
+#include <engine/vp.h>
+#include <engine/ppp.h>
+#include <engine/perfmon.h>
+
+int
+gm100_identify(struct nouveau_device *device)
+{
+ switch (device->chipset) {
+ case 0x117:
+ device->cname = "GM107";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+#if 0
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+#endif
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gm107_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+#if 0
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+#endif
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gm107_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+#endif
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+#endif
+ break;
+ default:
+ nv_fatal(device, "unknown Maxwell chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index 32113b08c4d5..0a51ff4e9e00 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -60,7 +60,7 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x05:
device->cname = "NV05";
@@ -78,7 +78,7 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown RIVA chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 744f15d7e131..e008de8b51b0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -60,7 +60,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x15:
device->cname = "NV15";
@@ -79,7 +79,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x16:
device->cname = "NV16";
@@ -98,7 +98,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x1a:
device->cname = "nForce";
@@ -117,7 +117,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x11:
device->cname = "NV11";
@@ -136,7 +136,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x17:
device->cname = "NV17";
@@ -155,7 +155,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x1f:
device->cname = "nForce2";
@@ -174,7 +174,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x18:
device->cname = "NV18";
@@ -193,7 +193,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown Celsius chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index 27ba61fb2710..7b629a3aed05 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -63,7 +63,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x25:
device->cname = "NV25";
@@ -82,7 +82,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x28:
device->cname = "NV28";
@@ -101,7 +101,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x2a:
device->cname = "NV2A";
@@ -120,7 +120,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown Kelvin chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index fd47ace67543..7dfddd5a1908 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -63,7 +63,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x35:
device->cname = "NV35";
@@ -82,7 +82,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x31:
device->cname = "NV31";
@@ -102,7 +102,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x36:
device->cname = "NV36";
@@ -122,7 +122,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x34:
device->cname = "NV34";
@@ -142,7 +142,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown Rankine chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 08b88591ed60..7c1ce6cf4f1f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -70,7 +70,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x41:
@@ -93,7 +93,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x42:
@@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x43:
@@ -139,7 +139,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x45:
@@ -162,7 +162,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x47:
@@ -185,7 +185,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x49:
@@ -208,7 +208,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x4b:
@@ -231,7 +231,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x44:
@@ -254,7 +254,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x46:
@@ -277,7 +277,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x4a:
@@ -300,7 +300,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x4c:
@@ -323,7 +323,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x4e:
@@ -346,7 +346,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x63:
@@ -369,7 +369,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x67:
@@ -392,7 +392,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x68:
@@ -415,7 +415,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index 81d5c26643d5..66499fa0f758 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -79,7 +79,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv50_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv50_perfmon_oclass;
break;
case 0x84:
@@ -107,7 +107,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv84_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0x86:
@@ -135,7 +135,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv84_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0x92:
@@ -163,7 +163,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv84_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0x94:
@@ -191,7 +191,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0x96:
@@ -219,7 +219,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0x98:
@@ -247,7 +247,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0xa0:
@@ -275,7 +275,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva0_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0xaa:
@@ -303,7 +303,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0xac:
@@ -331,7 +331,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0xa3:
@@ -361,7 +361,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
break;
case 0xa5:
@@ -390,7 +390,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
break;
case 0xa8:
@@ -419,7 +419,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
break;
case 0xaf:
@@ -448,7 +448,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index b7d66b59f43d..2075b3027052 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -70,7 +70,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -86,7 +86,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xc4:
@@ -102,7 +102,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -112,13 +112,13 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xc3:
@@ -134,7 +134,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -144,12 +144,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xce:
@@ -165,7 +165,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -175,13 +175,13 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xcf:
@@ -197,7 +197,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -207,13 +207,13 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xc1:
@@ -229,7 +229,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -244,7 +244,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xc8:
@@ -260,7 +260,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -276,7 +276,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xd9:
@@ -292,7 +292,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -307,7 +307,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvd0_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xd7:
@@ -323,7 +323,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -336,7 +336,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvd0_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 987edbc30a09..9784cbf8a9d2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -70,7 +70,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -81,7 +81,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
@@ -103,7 +103,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -114,7 +114,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
@@ -136,7 +136,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -147,7 +147,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
@@ -169,7 +169,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -180,7 +180,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
@@ -204,7 +204,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -215,7 +215,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 1bd4c63369c1..3ca2d25b7f5e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -273,7 +273,7 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
.outp = outp,
.head = head,
}, *dp = &_dp;
- const u32 bw_list[] = { 270000, 162000, 0 };
+ const u32 bw_list[] = { 540000, 270000, 162000, 0 };
const u32 *link_bw = bw_list;
u8 hdr, cnt, len;
u32 data;
@@ -312,6 +312,14 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
ERR("failed to read DPCD\n");
}
+ /* bring capabilities within encoder limits */
+ if ((dp->dpcd[2] & 0x1f) > dp->outp->dpconf.link_nr) {
+ dp->dpcd[2] &= ~0x1f;
+ dp->dpcd[2] |= dp->outp->dpconf.link_nr;
+ }
+ if (dp->dpcd[1] > dp->outp->dpconf.link_bw)
+ dp->dpcd[1] = dp->outp->dpconf.link_bw;
+
/* adjust required bandwidth for 8B/10B coding overhead */
datarate = (datarate / 8) * 10;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
new file mode 100644
index 000000000000..cf6f59677b74
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static struct nouveau_oclass
+gm107_disp_sclass[] = {
+ { GM107_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { GM107_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { GM107_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { GM107_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { GM107_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+gm107_disp_base_oclass[] = {
+ { GM107_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static int
+gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = gm107_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+ priv->sclass = gm107_disp_sclass;
+ priv->head.nr = heads;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.dp = &nvd0_sor_dp_func;
+ return 0;
+}
+
+struct nouveau_oclass *
+gm107_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x07),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gm107_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+ .mthd.core = &nve0_disp_mast_mthd_chan,
+ .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+ .mthd.prev = -0x020000,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 7cf8b1348632..6c89af792889 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include <engine/disp.h>
+#include "priv.h"
#include <core/event.h>
#include <core/class.h>
@@ -138,13 +138,13 @@ nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nv04_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_disp_oclass = &(struct nouveau_disp_impl) {
+ .base.handle = NV_ENGINE(DISP, 0x04),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
-};
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 9ad722e4e087..9a0cab9c3adb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -26,8 +26,7 @@
#include <core/parent.h>
#include <core/handle.h>
#include <core/class.h>
-
-#include <engine/disp.h>
+#include <core/enum.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
@@ -227,6 +226,177 @@ nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
* EVO master channel object
******************************************************************************/
+static void
+nv50_disp_mthd_list(struct nv50_disp_priv *priv, int debug, u32 base, int c,
+ const struct nv50_disp_mthd_list *list, int inst)
+{
+ struct nouveau_object *disp = nv_object(priv);
+ int i;
+
+ for (i = 0; list->data[i].mthd; i++) {
+ if (list->data[i].addr) {
+ u32 next = nv_rd32(priv, list->data[i].addr + base + 0);
+ u32 prev = nv_rd32(priv, list->data[i].addr + base + c);
+ u32 mthd = list->data[i].mthd + (list->mthd * inst);
+ const char *name = list->data[i].name;
+ char mods[16];
+
+ if (prev != next)
+ snprintf(mods, sizeof(mods), "-> 0x%08x", next);
+ else
+ snprintf(mods, sizeof(mods), "%13c", ' ');
+
+ nv_printk_(disp, debug, "\t0x%04x: 0x%08x %s%s%s\n",
+ mthd, prev, mods, name ? " // " : "",
+ name ? name : "");
+ }
+ }
+}
+
+void
+nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
+ const struct nv50_disp_mthd_chan *chan)
+{
+ struct nouveau_object *disp = nv_object(priv);
+ const struct nv50_disp_impl *impl = (void *)disp->oclass;
+ const struct nv50_disp_mthd_list *list;
+ int i, j;
+
+ if (debug > nv_subdev(priv)->debug)
+ return;
+
+ for (i = 0; (list = chan->data[i].mthd) != NULL; i++) {
+ u32 base = head * chan->addr;
+ for (j = 0; j < chan->data[i].nr; j++, base += list->addr) {
+ const char *cname = chan->name;
+ const char *sname = "";
+ char cname_[16], sname_[16];
+
+ if (chan->addr) {
+ snprintf(cname_, sizeof(cname_), "%s %d",
+ chan->name, head);
+ cname = cname_;
+ }
+
+ if (chan->data[i].nr > 1) {
+ snprintf(sname_, sizeof(sname_), " - %s %d",
+ chan->data[i].name, j);
+ sname = sname_;
+ }
+
+ nv_printk_(disp, debug, "%s%s:\n", cname, sname);
+ nv50_disp_mthd_list(priv, debug, base, impl->mthd.prev,
+ list, j);
+ }
+ }
+}
+
+const struct nv50_disp_mthd_list
+nv50_disp_mast_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x610bb8 },
+ { 0x0088, 0x610b9c },
+ { 0x008c, 0x000000 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_list
+nv50_disp_mast_mthd_dac = {
+ .mthd = 0x0080,
+ .addr = 0x000008,
+ .data = {
+ { 0x0400, 0x610b58 },
+ { 0x0404, 0x610bdc },
+ { 0x0420, 0x610828 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_mast_mthd_sor = {
+ .mthd = 0x0040,
+ .addr = 0x000008,
+ .data = {
+ { 0x0600, 0x610b70 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_mast_mthd_pior = {
+ .mthd = 0x0040,
+ .addr = 0x000008,
+ .data = {
+ { 0x0700, 0x610b80 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_list
+nv50_disp_mast_mthd_head = {
+ .mthd = 0x0400,
+ .addr = 0x000540,
+ .data = {
+ { 0x0800, 0x610ad8 },
+ { 0x0804, 0x610ad0 },
+ { 0x0808, 0x610a48 },
+ { 0x080c, 0x610a78 },
+ { 0x0810, 0x610ac0 },
+ { 0x0814, 0x610af8 },
+ { 0x0818, 0x610b00 },
+ { 0x081c, 0x610ae8 },
+ { 0x0820, 0x610af0 },
+ { 0x0824, 0x610b08 },
+ { 0x0828, 0x610b10 },
+ { 0x082c, 0x610a68 },
+ { 0x0830, 0x610a60 },
+ { 0x0834, 0x000000 },
+ { 0x0838, 0x610a40 },
+ { 0x0840, 0x610a24 },
+ { 0x0844, 0x610a2c },
+ { 0x0848, 0x610aa8 },
+ { 0x084c, 0x610ab0 },
+ { 0x0860, 0x610a84 },
+ { 0x0864, 0x610a90 },
+ { 0x0868, 0x610b18 },
+ { 0x086c, 0x610b20 },
+ { 0x0870, 0x610ac8 },
+ { 0x0874, 0x610a38 },
+ { 0x0880, 0x610a58 },
+ { 0x0884, 0x610a9c },
+ { 0x08a0, 0x610a70 },
+ { 0x08a4, 0x610a50 },
+ { 0x08a8, 0x610ae0 },
+ { 0x08c0, 0x610b28 },
+ { 0x08c4, 0x610b30 },
+ { 0x08c8, 0x610b40 },
+ { 0x08d4, 0x610b38 },
+ { 0x08d8, 0x610b48 },
+ { 0x08dc, 0x610b50 },
+ { 0x0900, 0x610a18 },
+ { 0x0904, 0x610ab8 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_chan
+nv50_disp_mast_mthd_chan = {
+ .name = "Core",
+ .addr = 0x000000,
+ .data = {
+ { "Global", 1, &nv50_disp_mast_mthd_base },
+ { "DAC", 3, &nv50_disp_mast_mthd_dac },
+ { "SOR", 2, &nv50_disp_mast_mthd_sor },
+ { "PIOR", 3, &nv50_disp_mast_mthd_pior },
+ { "HEAD", 2, &nv50_disp_mast_mthd_head },
+ {}
+ }
+};
+
static int
nv50_disp_mast_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -323,6 +493,56 @@ nv50_disp_mast_ofuncs = {
* EVO sync channel objects
******************************************************************************/
+static const struct nv50_disp_mthd_list
+nv50_disp_sync_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x0008c4 },
+ { 0x0088, 0x0008d0 },
+ { 0x008c, 0x0008dc },
+ { 0x0090, 0x0008e4 },
+ { 0x0094, 0x610884 },
+ { 0x00a0, 0x6108a0 },
+ { 0x00a4, 0x610878 },
+ { 0x00c0, 0x61086c },
+ { 0x00e0, 0x610858 },
+ { 0x00e4, 0x610860 },
+ { 0x00e8, 0x6108ac },
+ { 0x00ec, 0x6108b4 },
+ { 0x0100, 0x610894 },
+ { 0x0110, 0x6108bc },
+ { 0x0114, 0x61088c },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_sync_mthd_image = {
+ .mthd = 0x0400,
+ .addr = 0x000000,
+ .data = {
+ { 0x0800, 0x6108f0 },
+ { 0x0804, 0x6108fc },
+ { 0x0808, 0x61090c },
+ { 0x080c, 0x610914 },
+ { 0x0810, 0x610904 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_chan
+nv50_disp_sync_mthd_chan = {
+ .name = "Base",
+ .addr = 0x000540,
+ .data = {
+ { "Global", 1, &nv50_disp_sync_mthd_base },
+ { "Image", 2, &nv50_disp_sync_mthd_image },
+ {}
+ }
+};
+
static int
nv50_disp_sync_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -362,6 +582,44 @@ nv50_disp_sync_ofuncs = {
* EVO overlay channel objects
******************************************************************************/
+const struct nv50_disp_mthd_list
+nv50_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x0009a0 },
+ { 0x0088, 0x0009c0 },
+ { 0x008c, 0x0009c8 },
+ { 0x0090, 0x6109b4 },
+ { 0x0094, 0x610970 },
+ { 0x00a0, 0x610998 },
+ { 0x00a4, 0x610964 },
+ { 0x00c0, 0x610958 },
+ { 0x00e0, 0x6109a8 },
+ { 0x00e4, 0x6109d0 },
+ { 0x00e8, 0x6109d8 },
+ { 0x0100, 0x61094c },
+ { 0x0104, 0x610984 },
+ { 0x0108, 0x61098c },
+ { 0x0800, 0x6109f8 },
+ { 0x0808, 0x610a08 },
+ { 0x080c, 0x610a10 },
+ { 0x0810, 0x610a00 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_chan
+nv50_disp_ovly_mthd_chan = {
+ .name = "Overlay",
+ .addr = 0x000540,
+ .data = {
+ { "Global", 1, &nv50_disp_ovly_mthd_base },
+ {}
+ }
+};
+
static int
nv50_disp_ovly_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -782,25 +1040,78 @@ nv50_disp_cclass = {
* Display engine implementation
******************************************************************************/
-static void
-nv50_disp_intr_error(struct nv50_disp_priv *priv)
-{
- u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16;
- u32 addr, data;
- int chid;
-
- for (chid = 0; chid < 5; chid++) {
- if (!(channels & (1 << chid)))
- continue;
+static const struct nouveau_enum
+nv50_disp_intr_error_type[] = {
+ { 3, "ILLEGAL_MTHD" },
+ { 4, "INVALID_VALUE" },
+ { 5, "INVALID_STATE" },
+ { 7, "INVALID_HANDLE" },
+ {}
+};
- nv_wr32(priv, 0x610020, 0x00010000 << chid);
- addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
- data = nv_rd32(priv, 0x610084 + (chid * 0x08));
- nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+static const struct nouveau_enum
+nv50_disp_intr_error_code[] = {
+ { 0x00, "" },
+ {}
+};
- nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n",
- chid, addr & 0xffc, data, addr);
+static void
+nv50_disp_intr_error(struct nv50_disp_priv *priv, int chid)
+{
+ struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
+ u32 data = nv_rd32(priv, 0x610084 + (chid * 0x08));
+ u32 addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+ u32 code = (addr & 0x00ff0000) >> 16;
+ u32 type = (addr & 0x00007000) >> 12;
+ u32 mthd = (addr & 0x00000ffc);
+ const struct nouveau_enum *ec, *et;
+ char ecunk[6], etunk[6];
+
+ et = nouveau_enum_find(nv50_disp_intr_error_type, type);
+ if (!et)
+ snprintf(etunk, sizeof(etunk), "UNK%02X", type);
+
+ ec = nouveau_enum_find(nv50_disp_intr_error_code, code);
+ if (!ec)
+ snprintf(ecunk, sizeof(ecunk), "UNK%02X", code);
+
+ nv_error(priv, "%s [%s] chid %d mthd 0x%04x data 0x%08x\n",
+ et ? et->name : etunk, ec ? ec->name : ecunk,
+ chid, mthd, data);
+
+ if (chid == 0) {
+ switch (mthd) {
+ case 0x0080:
+ nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
+ impl->mthd.core);
+ break;
+ default:
+ break;
+ }
+ } else
+ if (chid <= 2) {
+ switch (mthd) {
+ case 0x0080:
+ nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
+ impl->mthd.base);
+ break;
+ default:
+ break;
+ }
+ } else
+ if (chid <= 4) {
+ switch (mthd) {
+ case 0x0080:
+ nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 3,
+ impl->mthd.ovly);
+ break;
+ default:
+ break;
+ }
}
+
+ nv_wr32(priv, 0x610020, 0x00010000 << chid);
+ nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
}
static u16
@@ -1241,12 +1552,14 @@ nv50_disp_intr_supervisor(struct work_struct *work)
{
struct nv50_disp_priv *priv =
container_of(work, struct nv50_disp_priv, supervisor);
+ struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
u32 super = nv_rd32(priv, 0x610030);
int head;
nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super);
if (priv->super & 0x00000010) {
+ nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
for (head = 0; head < priv->head.nr; head++) {
if (!(super & (0x00000020 << head)))
continue;
@@ -1290,9 +1603,10 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
u32 intr0 = nv_rd32(priv, 0x610020);
u32 intr1 = nv_rd32(priv, 0x610024);
- if (intr0 & 0x001f0000) {
- nv50_disp_intr_error(priv);
- intr0 &= ~0x001f0000;
+ while (intr0 & 0x001f0000) {
+ u32 chid = __ffs(intr0 & 0x001f0000) - 16;
+ nv50_disp_intr_error(priv, chid);
+ intr0 &= ~(0x00010000 << chid);
}
if (intr1 & 0x00000004) {
@@ -1346,13 +1660,17 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nv50_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x50),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
-};
+ .mthd.core = &nv50_disp_mast_mthd_chan,
+ .mthd.base = &nv50_disp_sync_mthd_chan,
+ .mthd.ovly = &nv50_disp_ovly_mthd_chan,
+ .mthd.prev = 0x000004,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index d31d426ea1f6..48d59db47f0d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -8,9 +8,19 @@
#include <core/event.h>
#include <engine/dmaobj.h>
-#include <engine/disp.h>
#include "dport.h"
+#include "priv.h"
+
+struct nv50_disp_impl {
+ struct nouveau_disp_impl base;
+ struct {
+ const struct nv50_disp_mthd_chan *core;
+ const struct nv50_disp_mthd_chan *base;
+ const struct nv50_disp_mthd_chan *ovly;
+ int prev;
+ } mthd;
+};
struct nv50_disp_priv {
struct nouveau_disp base;
@@ -124,21 +134,60 @@ struct nv50_disp_pioc {
struct nv50_disp_chan base;
};
+struct nv50_disp_mthd_list {
+ u32 mthd;
+ u32 addr;
+ struct {
+ u32 mthd;
+ u32 addr;
+ const char *name;
+ } data[];
+};
+
+struct nv50_disp_mthd_chan {
+ const char *name;
+ u32 addr;
+ struct {
+ const char *name;
+ int nr;
+ const struct nv50_disp_mthd_list *mthd;
+ } data[];
+};
+
extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
+extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base;
+extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor;
+extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior;
extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
+extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image;
extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
+extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
+ const struct nv50_disp_mthd_chan *);
void nv50_disp_intr_supervisor(struct work_struct *);
void nv50_disp_intr(struct nouveau_subdev *);
+extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan;
+extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac;
+extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head;
+extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan;
+extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan;
extern struct nouveau_omthds nv84_disp_base_omthds[];
+extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan;
+
extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
+extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base;
+extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac;
+extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor;
+extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior;
extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
+extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan;
extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
extern struct nouveau_omthds nvd0_disp_base_omthds[];
@@ -147,4 +196,7 @@ extern struct nouveau_oclass nvd0_disp_cclass;
void nvd0_disp_intr_supervisor(struct work_struct *);
void nvd0_disp_intr(struct nouveau_subdev *);
+extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan;
+extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index ef9ce300a496..98c5b19bc2b0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -29,6 +29,179 @@
#include "nv50.h"
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+const struct nv50_disp_mthd_list
+nv84_disp_mast_mthd_dac = {
+ .mthd = 0x0080,
+ .addr = 0x000008,
+ .data = {
+ { 0x0400, 0x610b58 },
+ { 0x0404, 0x610bdc },
+ { 0x0420, 0x610bc4 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nv84_disp_mast_mthd_head = {
+ .mthd = 0x0400,
+ .addr = 0x000540,
+ .data = {
+ { 0x0800, 0x610ad8 },
+ { 0x0804, 0x610ad0 },
+ { 0x0808, 0x610a48 },
+ { 0x080c, 0x610a78 },
+ { 0x0810, 0x610ac0 },
+ { 0x0814, 0x610af8 },
+ { 0x0818, 0x610b00 },
+ { 0x081c, 0x610ae8 },
+ { 0x0820, 0x610af0 },
+ { 0x0824, 0x610b08 },
+ { 0x0828, 0x610b10 },
+ { 0x082c, 0x610a68 },
+ { 0x0830, 0x610a60 },
+ { 0x0834, 0x000000 },
+ { 0x0838, 0x610a40 },
+ { 0x0840, 0x610a24 },
+ { 0x0844, 0x610a2c },
+ { 0x0848, 0x610aa8 },
+ { 0x084c, 0x610ab0 },
+ { 0x085c, 0x610c5c },
+ { 0x0860, 0x610a84 },
+ { 0x0864, 0x610a90 },
+ { 0x0868, 0x610b18 },
+ { 0x086c, 0x610b20 },
+ { 0x0870, 0x610ac8 },
+ { 0x0874, 0x610a38 },
+ { 0x0878, 0x610c50 },
+ { 0x0880, 0x610a58 },
+ { 0x0884, 0x610a9c },
+ { 0x089c, 0x610c68 },
+ { 0x08a0, 0x610a70 },
+ { 0x08a4, 0x610a50 },
+ { 0x08a8, 0x610ae0 },
+ { 0x08c0, 0x610b28 },
+ { 0x08c4, 0x610b30 },
+ { 0x08c8, 0x610b40 },
+ { 0x08d4, 0x610b38 },
+ { 0x08d8, 0x610b48 },
+ { 0x08dc, 0x610b50 },
+ { 0x0900, 0x610a18 },
+ { 0x0904, 0x610ab8 },
+ { 0x0910, 0x610c70 },
+ { 0x0914, 0x610c78 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nv84_disp_mast_mthd_chan = {
+ .name = "Core",
+ .addr = 0x000000,
+ .data = {
+ { "Global", 1, &nv50_disp_mast_mthd_base },
+ { "DAC", 3, &nv84_disp_mast_mthd_dac },
+ { "SOR", 2, &nv50_disp_mast_mthd_sor },
+ { "PIOR", 3, &nv50_disp_mast_mthd_pior },
+ { "HEAD", 2, &nv84_disp_mast_mthd_head },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nv84_disp_sync_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x0008c4 },
+ { 0x0088, 0x0008d0 },
+ { 0x008c, 0x0008dc },
+ { 0x0090, 0x0008e4 },
+ { 0x0094, 0x610884 },
+ { 0x00a0, 0x6108a0 },
+ { 0x00a4, 0x610878 },
+ { 0x00c0, 0x61086c },
+ { 0x00c4, 0x610800 },
+ { 0x00c8, 0x61080c },
+ { 0x00cc, 0x610818 },
+ { 0x00e0, 0x610858 },
+ { 0x00e4, 0x610860 },
+ { 0x00e8, 0x6108ac },
+ { 0x00ec, 0x6108b4 },
+ { 0x00fc, 0x610824 },
+ { 0x0100, 0x610894 },
+ { 0x0104, 0x61082c },
+ { 0x0110, 0x6108bc },
+ { 0x0114, 0x61088c },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nv84_disp_sync_mthd_chan = {
+ .name = "Base",
+ .addr = 0x000540,
+ .data = {
+ { "Global", 1, &nv84_disp_sync_mthd_base },
+ { "Image", 2, &nv50_disp_sync_mthd_image },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nv84_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x6109a0 },
+ { 0x0088, 0x6109c0 },
+ { 0x008c, 0x6109c8 },
+ { 0x0090, 0x6109b4 },
+ { 0x0094, 0x610970 },
+ { 0x00a0, 0x610998 },
+ { 0x00a4, 0x610964 },
+ { 0x00c0, 0x610958 },
+ { 0x00e0, 0x6109a8 },
+ { 0x00e4, 0x6109d0 },
+ { 0x00e8, 0x6109d8 },
+ { 0x0100, 0x61094c },
+ { 0x0104, 0x610984 },
+ { 0x0108, 0x61098c },
+ { 0x0800, 0x6109f8 },
+ { 0x0808, 0x610a08 },
+ { 0x080c, 0x610a10 },
+ { 0x0810, 0x610a00 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nv84_disp_ovly_mthd_chan = {
+ .name = "Overlay",
+ .addr = 0x000540,
+ .data = {
+ { "Global", 1, &nv84_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
static struct nouveau_oclass
nv84_disp_sclass[] = {
{ NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
@@ -59,6 +232,10 @@ nv84_disp_base_oclass[] = {
{}
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
static int
nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -91,13 +268,17 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nv84_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0x82),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv84_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x82),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
-};
+ .mthd.core = &nv84_disp_mast_mthd_chan,
+ .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.ovly = &nv84_disp_ovly_mthd_chan,
+ .mthd.prev = 0x000004,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index a518543c00ab..6844061c7e04 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -29,6 +29,38 @@
#include "nv50.h"
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+const struct nv50_disp_mthd_list
+nv94_disp_mast_mthd_sor = {
+ .mthd = 0x0040,
+ .addr = 0x000008,
+ .data = {
+ { 0x0600, 0x610794 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nv94_disp_mast_mthd_chan = {
+ .name = "Core",
+ .addr = 0x000000,
+ .data = {
+ { "Global", 1, &nv50_disp_mast_mthd_base },
+ { "DAC", 3, &nv84_disp_mast_mthd_dac },
+ { "SOR", 4, &nv94_disp_mast_mthd_sor },
+ { "PIOR", 3, &nv50_disp_mast_mthd_pior },
+ { "HEAD", 2, &nv84_disp_mast_mthd_head },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
static struct nouveau_oclass
nv94_disp_sclass[] = {
{ NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
@@ -59,6 +91,10 @@ nv94_disp_base_oclass[] = {
{}
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
static int
nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -92,13 +128,17 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nv94_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0x88),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv94_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x88),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv94_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
-};
+ .mthd.core = &nv94_disp_mast_mthd_chan,
+ .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.ovly = &nv84_disp_ovly_mthd_chan,
+ .mthd.prev = 0x000004,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index 6cf8eefac368..88c96241c02a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -29,6 +29,55 @@
#include "nv50.h"
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nva0_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x6109a0 },
+ { 0x0088, 0x6109c0 },
+ { 0x008c, 0x6109c8 },
+ { 0x0090, 0x6109b4 },
+ { 0x0094, 0x610970 },
+ { 0x00a0, 0x610998 },
+ { 0x00a4, 0x610964 },
+ { 0x00b0, 0x610c98 },
+ { 0x00b4, 0x610ca4 },
+ { 0x00b8, 0x610cac },
+ { 0x00c0, 0x610958 },
+ { 0x00e0, 0x6109a8 },
+ { 0x00e4, 0x6109d0 },
+ { 0x00e8, 0x6109d8 },
+ { 0x0100, 0x61094c },
+ { 0x0104, 0x610984 },
+ { 0x0108, 0x61098c },
+ { 0x0800, 0x6109f8 },
+ { 0x0808, 0x610a08 },
+ { 0x080c, 0x610a10 },
+ { 0x0810, 0x610a00 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_chan
+nva0_disp_ovly_mthd_chan = {
+ .name = "Overlay",
+ .addr = 0x000540,
+ .data = {
+ { "Global", 1, &nva0_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
static struct nouveau_oclass
nva0_disp_sclass[] = {
{ NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
@@ -45,6 +94,10 @@ nva0_disp_base_oclass[] = {
{}
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
static int
nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,13 +130,17 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nva0_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0x83),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nva0_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x83),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nva0_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
-};
+ .mthd.core = &nv84_disp_mast_mthd_chan,
+ .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.ovly = &nva0_disp_ovly_mthd_chan,
+ .mthd.prev = 0x000004,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index 6ad6dcece43b..46cb2ce0e82a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -29,6 +29,10 @@
#include "nv50.h"
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
static struct nouveau_oclass
nva3_disp_sclass[] = {
{ NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
@@ -60,6 +64,10 @@ nva3_disp_base_oclass[] = {
{}
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
static int
nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -94,13 +102,17 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nva3_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0x85),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nva3_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x85),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nva3_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
-};
+ .mthd.core = &nv94_disp_mast_mthd_chan,
+ .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.ovly = &nv84_disp_ovly_mthd_chan,
+ .mthd.prev = 0x000004,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 1c5e4e8b2c82..7762665ad8fd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -124,6 +124,146 @@ nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
* EVO master channel object
******************************************************************************/
+const struct nv50_disp_mthd_list
+nvd0_disp_mast_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x660080 },
+ { 0x0084, 0x660084 },
+ { 0x0088, 0x660088 },
+ { 0x008c, 0x000000 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nvd0_disp_mast_mthd_dac = {
+ .mthd = 0x0020,
+ .addr = 0x000020,
+ .data = {
+ { 0x0180, 0x660180 },
+ { 0x0184, 0x660184 },
+ { 0x0188, 0x660188 },
+ { 0x0190, 0x660190 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nvd0_disp_mast_mthd_sor = {
+ .mthd = 0x0020,
+ .addr = 0x000020,
+ .data = {
+ { 0x0200, 0x660200 },
+ { 0x0204, 0x660204 },
+ { 0x0208, 0x660208 },
+ { 0x0210, 0x660210 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nvd0_disp_mast_mthd_pior = {
+ .mthd = 0x0020,
+ .addr = 0x000020,
+ .data = {
+ { 0x0300, 0x660300 },
+ { 0x0304, 0x660304 },
+ { 0x0308, 0x660308 },
+ { 0x0310, 0x660310 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_list
+nvd0_disp_mast_mthd_head = {
+ .mthd = 0x0300,
+ .addr = 0x000300,
+ .data = {
+ { 0x0400, 0x660400 },
+ { 0x0404, 0x660404 },
+ { 0x0408, 0x660408 },
+ { 0x040c, 0x66040c },
+ { 0x0410, 0x660410 },
+ { 0x0414, 0x660414 },
+ { 0x0418, 0x660418 },
+ { 0x041c, 0x66041c },
+ { 0x0420, 0x660420 },
+ { 0x0424, 0x660424 },
+ { 0x0428, 0x660428 },
+ { 0x042c, 0x66042c },
+ { 0x0430, 0x660430 },
+ { 0x0434, 0x660434 },
+ { 0x0438, 0x660438 },
+ { 0x0440, 0x660440 },
+ { 0x0444, 0x660444 },
+ { 0x0448, 0x660448 },
+ { 0x044c, 0x66044c },
+ { 0x0450, 0x660450 },
+ { 0x0454, 0x660454 },
+ { 0x0458, 0x660458 },
+ { 0x045c, 0x66045c },
+ { 0x0460, 0x660460 },
+ { 0x0468, 0x660468 },
+ { 0x046c, 0x66046c },
+ { 0x0470, 0x660470 },
+ { 0x0474, 0x660474 },
+ { 0x0480, 0x660480 },
+ { 0x0484, 0x660484 },
+ { 0x048c, 0x66048c },
+ { 0x0490, 0x660490 },
+ { 0x0494, 0x660494 },
+ { 0x0498, 0x660498 },
+ { 0x04b0, 0x6604b0 },
+ { 0x04b8, 0x6604b8 },
+ { 0x04bc, 0x6604bc },
+ { 0x04c0, 0x6604c0 },
+ { 0x04c4, 0x6604c4 },
+ { 0x04c8, 0x6604c8 },
+ { 0x04d0, 0x6604d0 },
+ { 0x04d4, 0x6604d4 },
+ { 0x04e0, 0x6604e0 },
+ { 0x04e4, 0x6604e4 },
+ { 0x04e8, 0x6604e8 },
+ { 0x04ec, 0x6604ec },
+ { 0x04f0, 0x6604f0 },
+ { 0x04f4, 0x6604f4 },
+ { 0x04f8, 0x6604f8 },
+ { 0x04fc, 0x6604fc },
+ { 0x0500, 0x660500 },
+ { 0x0504, 0x660504 },
+ { 0x0508, 0x660508 },
+ { 0x050c, 0x66050c },
+ { 0x0510, 0x660510 },
+ { 0x0514, 0x660514 },
+ { 0x0518, 0x660518 },
+ { 0x051c, 0x66051c },
+ { 0x052c, 0x66052c },
+ { 0x0530, 0x660530 },
+ { 0x054c, 0x66054c },
+ { 0x0550, 0x660550 },
+ { 0x0554, 0x660554 },
+ { 0x0558, 0x660558 },
+ { 0x055c, 0x66055c },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_chan
+nvd0_disp_mast_mthd_chan = {
+ .name = "Core",
+ .addr = 0x000000,
+ .data = {
+ { "Global", 1, &nvd0_disp_mast_mthd_base },
+ { "DAC", 3, &nvd0_disp_mast_mthd_dac },
+ { "SOR", 8, &nvd0_disp_mast_mthd_sor },
+ { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
+ { "HEAD", 4, &nvd0_disp_mast_mthd_head },
+ {}
+ }
+};
+
static int
nvd0_disp_mast_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -216,6 +356,81 @@ nvd0_disp_mast_ofuncs = {
* EVO sync channel objects
******************************************************************************/
+static const struct nv50_disp_mthd_list
+nvd0_disp_sync_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x661080 },
+ { 0x0084, 0x661084 },
+ { 0x0088, 0x661088 },
+ { 0x008c, 0x66108c },
+ { 0x0090, 0x661090 },
+ { 0x0094, 0x661094 },
+ { 0x00a0, 0x6610a0 },
+ { 0x00a4, 0x6610a4 },
+ { 0x00c0, 0x6610c0 },
+ { 0x00c4, 0x6610c4 },
+ { 0x00c8, 0x6610c8 },
+ { 0x00cc, 0x6610cc },
+ { 0x00e0, 0x6610e0 },
+ { 0x00e4, 0x6610e4 },
+ { 0x00e8, 0x6610e8 },
+ { 0x00ec, 0x6610ec },
+ { 0x00fc, 0x6610fc },
+ { 0x0100, 0x661100 },
+ { 0x0104, 0x661104 },
+ { 0x0108, 0x661108 },
+ { 0x010c, 0x66110c },
+ { 0x0110, 0x661110 },
+ { 0x0114, 0x661114 },
+ { 0x0118, 0x661118 },
+ { 0x011c, 0x66111c },
+ { 0x0130, 0x661130 },
+ { 0x0134, 0x661134 },
+ { 0x0138, 0x661138 },
+ { 0x013c, 0x66113c },
+ { 0x0140, 0x661140 },
+ { 0x0144, 0x661144 },
+ { 0x0148, 0x661148 },
+ { 0x014c, 0x66114c },
+ { 0x0150, 0x661150 },
+ { 0x0154, 0x661154 },
+ { 0x0158, 0x661158 },
+ { 0x015c, 0x66115c },
+ { 0x0160, 0x661160 },
+ { 0x0164, 0x661164 },
+ { 0x0168, 0x661168 },
+ { 0x016c, 0x66116c },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_list
+nvd0_disp_sync_mthd_image = {
+ .mthd = 0x0400,
+ .addr = 0x000400,
+ .data = {
+ { 0x0400, 0x661400 },
+ { 0x0404, 0x661404 },
+ { 0x0408, 0x661408 },
+ { 0x040c, 0x66140c },
+ { 0x0410, 0x661410 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nvd0_disp_sync_mthd_chan = {
+ .name = "Base",
+ .addr = 0x001000,
+ .data = {
+ { "Global", 1, &nvd0_disp_sync_mthd_base },
+ { "Image", 2, &nvd0_disp_sync_mthd_image },
+ {}
+ }
+};
+
static int
nvd0_disp_sync_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -256,6 +471,68 @@ nvd0_disp_sync_ofuncs = {
* EVO overlay channel objects
******************************************************************************/
+static const struct nv50_disp_mthd_list
+nvd0_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .data = {
+ { 0x0080, 0x665080 },
+ { 0x0084, 0x665084 },
+ { 0x0088, 0x665088 },
+ { 0x008c, 0x66508c },
+ { 0x0090, 0x665090 },
+ { 0x0094, 0x665094 },
+ { 0x00a0, 0x6650a0 },
+ { 0x00a4, 0x6650a4 },
+ { 0x00b0, 0x6650b0 },
+ { 0x00b4, 0x6650b4 },
+ { 0x00b8, 0x6650b8 },
+ { 0x00c0, 0x6650c0 },
+ { 0x00e0, 0x6650e0 },
+ { 0x00e4, 0x6650e4 },
+ { 0x00e8, 0x6650e8 },
+ { 0x0100, 0x665100 },
+ { 0x0104, 0x665104 },
+ { 0x0108, 0x665108 },
+ { 0x010c, 0x66510c },
+ { 0x0110, 0x665110 },
+ { 0x0118, 0x665118 },
+ { 0x011c, 0x66511c },
+ { 0x0120, 0x665120 },
+ { 0x0124, 0x665124 },
+ { 0x0130, 0x665130 },
+ { 0x0134, 0x665134 },
+ { 0x0138, 0x665138 },
+ { 0x013c, 0x66513c },
+ { 0x0140, 0x665140 },
+ { 0x0144, 0x665144 },
+ { 0x0148, 0x665148 },
+ { 0x014c, 0x66514c },
+ { 0x0150, 0x665150 },
+ { 0x0154, 0x665154 },
+ { 0x0158, 0x665158 },
+ { 0x015c, 0x66515c },
+ { 0x0160, 0x665160 },
+ { 0x0164, 0x665164 },
+ { 0x0168, 0x665168 },
+ { 0x016c, 0x66516c },
+ { 0x0400, 0x665400 },
+ { 0x0408, 0x665408 },
+ { 0x040c, 0x66540c },
+ { 0x0410, 0x665410 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_chan
+nvd0_disp_ovly_mthd_chan = {
+ .name = "Overlay",
+ .addr = 0x001000,
+ .data = {
+ { "Global", 1, &nvd0_disp_ovly_mthd_base },
+ {}
+ }
+};
+
static int
nvd0_disp_ovly_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -897,19 +1174,22 @@ nvd0_disp_intr_supervisor(struct work_struct *work)
{
struct nv50_disp_priv *priv =
container_of(work, struct nv50_disp_priv, supervisor);
+ struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
u32 mask[4];
int head;
- nv_debug(priv, "supervisor %08x\n", priv->super);
+ nv_debug(priv, "supervisor %d\n", ffs(priv->super));
for (head = 0; head < priv->head.nr; head++) {
mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
}
if (priv->super & 0x00000001) {
+ nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
for (head = 0; head < priv->head.nr; head++) {
if (!(mask[head] & 0x00001000))
continue;
+ nv_debug(priv, "supervisor 1.0 - head %d\n", head);
nvd0_disp_intr_unk1_0(priv, head);
}
} else
@@ -917,16 +1197,19 @@ nvd0_disp_intr_supervisor(struct work_struct *work)
for (head = 0; head < priv->head.nr; head++) {
if (!(mask[head] & 0x00001000))
continue;
+ nv_debug(priv, "supervisor 2.0 - head %d\n", head);
nvd0_disp_intr_unk2_0(priv, head);
}
for (head = 0; head < priv->head.nr; head++) {
if (!(mask[head] & 0x00010000))
continue;
+ nv_debug(priv, "supervisor 2.1 - head %d\n", head);
nvd0_disp_intr_unk2_1(priv, head);
}
for (head = 0; head < priv->head.nr; head++) {
if (!(mask[head] & 0x00001000))
continue;
+ nv_debug(priv, "supervisor 2.2 - head %d\n", head);
nvd0_disp_intr_unk2_2(priv, head);
}
} else
@@ -934,6 +1217,7 @@ nvd0_disp_intr_supervisor(struct work_struct *work)
for (head = 0; head < priv->head.nr; head++) {
if (!(mask[head] & 0x00001000))
continue;
+ nv_debug(priv, "supervisor 3.0 - head %d\n", head);
nvd0_disp_intr_unk4_0(priv, head);
}
}
@@ -943,6 +1227,53 @@ nvd0_disp_intr_supervisor(struct work_struct *work)
nv_wr32(priv, 0x6101d0, 0x80000000);
}
+static void
+nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
+{
+ const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
+ u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
+ u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
+ u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
+
+ nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
+ "0x%08x 0x%08x\n",
+ chid, (mthd & 0x0000ffc), data, mthd, unkn);
+
+ if (chid == 0) {
+ switch (mthd) {
+ case 0x0080:
+ nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
+ impl->mthd.core);
+ break;
+ default:
+ break;
+ }
+ } else
+ if (chid <= 4) {
+ switch (mthd) {
+ case 0x0080:
+ nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
+ impl->mthd.base);
+ break;
+ default:
+ break;
+ }
+ } else
+ if (chid <= 8) {
+ switch (mthd) {
+ case 0x0080:
+ nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
+ impl->mthd.ovly);
+ break;
+ default:
+ break;
+ }
+ }
+
+ nv_wr32(priv, 0x61009c, (1 << chid));
+ nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
+}
+
void
nvd0_disp_intr(struct nouveau_subdev *subdev)
{
@@ -959,18 +1290,8 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
if (intr & 0x00000002) {
u32 stat = nv_rd32(priv, 0x61009c);
int chid = ffs(stat) - 1;
- if (chid >= 0) {
- u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
- u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
- u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
-
- nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
- "0x%08x 0x%08x\n",
- chid, (mthd & 0x0000ffc), data, mthd, unkn);
- nv_wr32(priv, 0x61009c, (1 << chid));
- nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
- }
-
+ if (chid >= 0)
+ nvd0_disp_intr_error(priv, chid);
intr &= ~0x00000002;
}
@@ -1035,13 +1356,17 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nvd0_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0x90),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nvd0_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x90),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvd0_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
-};
+ .mthd.core = &nvd0_disp_mast_mthd_chan,
+ .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.ovly = &nvd0_disp_ovly_mthd_chan,
+ .mthd.prev = -0x020000,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index ab63f32c00b2..44e0b8f34c1a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -29,6 +29,175 @@
#include "nv50.h"
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nve0_disp_mast_mthd_head = {
+ .mthd = 0x0300,
+ .addr = 0x000300,
+ .data = {
+ { 0x0400, 0x660400 },
+ { 0x0404, 0x660404 },
+ { 0x0408, 0x660408 },
+ { 0x040c, 0x66040c },
+ { 0x0410, 0x660410 },
+ { 0x0414, 0x660414 },
+ { 0x0418, 0x660418 },
+ { 0x041c, 0x66041c },
+ { 0x0420, 0x660420 },
+ { 0x0424, 0x660424 },
+ { 0x0428, 0x660428 },
+ { 0x042c, 0x66042c },
+ { 0x0430, 0x660430 },
+ { 0x0434, 0x660434 },
+ { 0x0438, 0x660438 },
+ { 0x0440, 0x660440 },
+ { 0x0444, 0x660444 },
+ { 0x0448, 0x660448 },
+ { 0x044c, 0x66044c },
+ { 0x0450, 0x660450 },
+ { 0x0454, 0x660454 },
+ { 0x0458, 0x660458 },
+ { 0x045c, 0x66045c },
+ { 0x0460, 0x660460 },
+ { 0x0468, 0x660468 },
+ { 0x046c, 0x66046c },
+ { 0x0470, 0x660470 },
+ { 0x0474, 0x660474 },
+ { 0x047c, 0x66047c },
+ { 0x0480, 0x660480 },
+ { 0x0484, 0x660484 },
+ { 0x0488, 0x660488 },
+ { 0x048c, 0x66048c },
+ { 0x0490, 0x660490 },
+ { 0x0494, 0x660494 },
+ { 0x0498, 0x660498 },
+ { 0x04a0, 0x6604a0 },
+ { 0x04b0, 0x6604b0 },
+ { 0x04b8, 0x6604b8 },
+ { 0x04bc, 0x6604bc },
+ { 0x04c0, 0x6604c0 },
+ { 0x04c4, 0x6604c4 },
+ { 0x04c8, 0x6604c8 },
+ { 0x04d0, 0x6604d0 },
+ { 0x04d4, 0x6604d4 },
+ { 0x04e0, 0x6604e0 },
+ { 0x04e4, 0x6604e4 },
+ { 0x04e8, 0x6604e8 },
+ { 0x04ec, 0x6604ec },
+ { 0x04f0, 0x6604f0 },
+ { 0x04f4, 0x6604f4 },
+ { 0x04f8, 0x6604f8 },
+ { 0x04fc, 0x6604fc },
+ { 0x0500, 0x660500 },
+ { 0x0504, 0x660504 },
+ { 0x0508, 0x660508 },
+ { 0x050c, 0x66050c },
+ { 0x0510, 0x660510 },
+ { 0x0514, 0x660514 },
+ { 0x0518, 0x660518 },
+ { 0x051c, 0x66051c },
+ { 0x0520, 0x660520 },
+ { 0x0524, 0x660524 },
+ { 0x052c, 0x66052c },
+ { 0x0530, 0x660530 },
+ { 0x054c, 0x66054c },
+ { 0x0550, 0x660550 },
+ { 0x0554, 0x660554 },
+ { 0x0558, 0x660558 },
+ { 0x055c, 0x66055c },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nve0_disp_mast_mthd_chan = {
+ .name = "Core",
+ .addr = 0x000000,
+ .data = {
+ { "Global", 1, &nvd0_disp_mast_mthd_base },
+ { "DAC", 3, &nvd0_disp_mast_mthd_dac },
+ { "SOR", 8, &nvd0_disp_mast_mthd_sor },
+ { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
+ { "HEAD", 4, &nve0_disp_mast_mthd_head },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nve0_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .data = {
+ { 0x0080, 0x665080 },
+ { 0x0084, 0x665084 },
+ { 0x0088, 0x665088 },
+ { 0x008c, 0x66508c },
+ { 0x0090, 0x665090 },
+ { 0x0094, 0x665094 },
+ { 0x00a0, 0x6650a0 },
+ { 0x00a4, 0x6650a4 },
+ { 0x00b0, 0x6650b0 },
+ { 0x00b4, 0x6650b4 },
+ { 0x00b8, 0x6650b8 },
+ { 0x00c0, 0x6650c0 },
+ { 0x00c4, 0x6650c4 },
+ { 0x00e0, 0x6650e0 },
+ { 0x00e4, 0x6650e4 },
+ { 0x00e8, 0x6650e8 },
+ { 0x0100, 0x665100 },
+ { 0x0104, 0x665104 },
+ { 0x0108, 0x665108 },
+ { 0x010c, 0x66510c },
+ { 0x0110, 0x665110 },
+ { 0x0118, 0x665118 },
+ { 0x011c, 0x66511c },
+ { 0x0120, 0x665120 },
+ { 0x0124, 0x665124 },
+ { 0x0130, 0x665130 },
+ { 0x0134, 0x665134 },
+ { 0x0138, 0x665138 },
+ { 0x013c, 0x66513c },
+ { 0x0140, 0x665140 },
+ { 0x0144, 0x665144 },
+ { 0x0148, 0x665148 },
+ { 0x014c, 0x66514c },
+ { 0x0150, 0x665150 },
+ { 0x0154, 0x665154 },
+ { 0x0158, 0x665158 },
+ { 0x015c, 0x66515c },
+ { 0x0160, 0x665160 },
+ { 0x0164, 0x665164 },
+ { 0x0168, 0x665168 },
+ { 0x016c, 0x66516c },
+ { 0x0400, 0x665400 },
+ { 0x0404, 0x665404 },
+ { 0x0408, 0x665408 },
+ { 0x040c, 0x66540c },
+ { 0x0410, 0x665410 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nve0_disp_ovly_mthd_chan = {
+ .name = "Overlay",
+ .addr = 0x001000,
+ .data = {
+ { "Global", 1, &nve0_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
static struct nouveau_oclass
nve0_disp_sclass[] = {
{ NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
@@ -45,6 +214,10 @@ nve0_disp_base_oclass[] = {
{}
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
static int
nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,13 +250,17 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nve0_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0x91),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nve0_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x91),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
-};
+ .mthd.core = &nve0_disp_mast_mthd_chan,
+ .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+ .mthd.prev = -0x020000,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 05fee10e0c97..482585d375fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -29,6 +29,10 @@
#include "nv50.h"
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
static struct nouveau_oclass
nvf0_disp_sclass[] = {
{ NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
@@ -45,6 +49,10 @@ nvf0_disp_base_oclass[] = {
{}
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
static int
nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,13 +85,17 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nvf0_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0x92),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nvf0_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x92),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvf0_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
-};
+ .mthd.core = &nve0_disp_mast_mthd_chan,
+ .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+ .mthd.prev = -0x020000,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
new file mode 100644
index 000000000000..cc3c7a4ca747
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
@@ -0,0 +1,10 @@
+#ifndef __NVKM_DISP_PRIV_H__
+#define __NVKM_DISP_PRIV_H__
+
+#include <engine/disp.h>
+
+struct nouveau_disp_impl {
+ struct nouveau_oclass base;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
index 944e73ac485c..1cfb3bb90131 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -53,6 +53,9 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
case NVF0_DISP_MAST_CLASS:
case NVF0_DISP_SYNC_CLASS:
case NVF0_DISP_OVLY_CLASS:
+ case GM107_DISP_MAST_CLASS:
+ case GM107_DISP_SYNC_CLASS:
+ case GM107_DISP_OVLY_CLASS:
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c
index 5e077e4ed7f6..2914646c8709 100644
--- a/drivers/gpu/drm/nouveau/core/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c
@@ -119,7 +119,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
device->chipset, falcon->addr >> 12);
- ret = request_firmware(&fw, name, &device->pdev->dev);
+ ret = request_firmware(&fw, name, nv_device_base(device));
if (ret == 0) {
falcon->code.data = vmemdup(fw->data, fw->size);
falcon->code.size = fw->size;
@@ -138,7 +138,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
device->chipset, falcon->addr >> 12);
- ret = request_firmware(&fw, name, &device->pdev->dev);
+ ret = request_firmware(&fw, name, nv_device_base(device));
if (ret) {
nv_error(falcon, "unable to load firmware data\n");
return ret;
@@ -153,7 +153,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
device->chipset, falcon->addr >> 12);
- ret = request_firmware(&fw, name, &device->pdev->dev);
+ ret = request_firmware(&fw, name, nv_device_base(device));
if (ret) {
nv_error(falcon, "unable to load firmware code\n");
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index d3ec436d9cb5..6f9041ced9a2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -86,7 +86,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
}
/* map fifo control registers */
- chan->user = ioremap(pci_resource_start(device->pdev, bar) + addr +
+ chan->user = ioremap(nv_device_resource_start(device, bar) + addr +
(chan->chid * size), size);
if (!chan->user)
return -EFAULT;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index b22a33f0702d..fa1e719872b7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -41,8 +41,16 @@
struct nvc0_fifo_priv {
struct nouveau_fifo base;
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
+
+ struct work_struct fault;
+ u64 mask;
+
+ struct {
+ struct nouveau_gpuobj *mem[2];
+ int active;
+ wait_queue_head_t wait;
+ } runlist;
+
struct {
struct nouveau_gpuobj *mem;
struct nouveau_vma bar;
@@ -58,6 +66,11 @@ struct nvc0_fifo_base {
struct nvc0_fifo_chan {
struct nouveau_fifo_chan base;
+ enum {
+ STOPPED,
+ RUNNING,
+ KILLED
+ } state;
};
/*******************************************************************************
@@ -65,29 +78,33 @@ struct nvc0_fifo_chan {
******************************************************************************/
static void
-nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
+nvc0_fifo_runlist_update(struct nvc0_fifo_priv *priv)
{
struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_gpuobj *cur;
int i, p;
mutex_lock(&nv_subdev(priv)->mutex);
- cur = priv->playlist[priv->cur_playlist];
- priv->cur_playlist = !priv->cur_playlist;
+ cur = priv->runlist.mem[priv->runlist.active];
+ priv->runlist.active = !priv->runlist.active;
for (i = 0, p = 0; i < 128; i++) {
- if (!(nv_rd32(priv, 0x003004 + (i * 8)) & 1))
- continue;
- nv_wo32(cur, p + 0, i);
- nv_wo32(cur, p + 4, 0x00000004);
- p += 8;
+ struct nvc0_fifo_chan *chan = (void *)priv->base.channel[i];
+ if (chan && chan->state == RUNNING) {
+ nv_wo32(cur, p + 0, i);
+ nv_wo32(cur, p + 4, 0x00000004);
+ p += 8;
+ }
}
bar->flush(bar);
nv_wr32(priv, 0x002270, cur->addr >> 12);
nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
- if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
- nv_error(priv, "playlist update failed\n");
+
+ if (wait_event_timeout(priv->runlist.wait,
+ !(nv_rd32(priv, 0x00227c) & 0x00100000),
+ msecs_to_jiffies(2000)) == 0)
+ nv_error(priv, "runlist update timeout\n");
mutex_unlock(&nv_subdev(priv)->mutex);
}
@@ -239,30 +256,32 @@ nvc0_fifo_chan_init(struct nouveau_object *object)
return ret;
nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
- nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
- nvc0_fifo_playlist_update(priv);
+
+ if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
+ nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
+ nvc0_fifo_runlist_update(priv);
+ }
+
return 0;
}
+static void nvc0_fifo_intr_engine(struct nvc0_fifo_priv *priv);
+
static int
nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
{
struct nvc0_fifo_priv *priv = (void *)object->engine;
struct nvc0_fifo_chan *chan = (void *)object;
u32 chid = chan->base.chid;
- u32 mask, engine;
- nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
- nvc0_fifo_playlist_update(priv);
- mask = nv_rd32(priv, 0x0025a4);
- for (engine = 0; mask && engine < 16; engine++) {
- if (!(mask & (1 << engine)))
- continue;
- nv_mask(priv, 0x0025a8 + (engine * 4), 0x00000000, 0x00000000);
- mask &= ~(1 << engine);
+ if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
+ nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
+ nvc0_fifo_runlist_update(priv);
}
- nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
+ nvc0_fifo_intr_engine(priv);
+
+ nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
return nouveau_fifo_channel_fini(&chan->base, suspend);
}
@@ -345,11 +364,177 @@ nvc0_fifo_cclass = {
* PFIFO engine
******************************************************************************/
-static const struct nouveau_enum nvc0_fifo_fault_unit[] = {
+static inline int
+nvc0_fifo_engidx(struct nvc0_fifo_priv *priv, u32 engn)
+{
+ switch (engn) {
+ case NVDEV_ENGINE_GR : engn = 0; break;
+ case NVDEV_ENGINE_BSP : engn = 1; break;
+ case NVDEV_ENGINE_PPP : engn = 2; break;
+ case NVDEV_ENGINE_VP : engn = 3; break;
+ case NVDEV_ENGINE_COPY0: engn = 4; break;
+ case NVDEV_ENGINE_COPY1: engn = 5; break;
+ default:
+ return -1;
+ }
+
+ return engn;
+}
+
+static inline struct nouveau_engine *
+nvc0_fifo_engine(struct nvc0_fifo_priv *priv, u32 engn)
+{
+ switch (engn) {
+ case 0: engn = NVDEV_ENGINE_GR; break;
+ case 1: engn = NVDEV_ENGINE_BSP; break;
+ case 2: engn = NVDEV_ENGINE_PPP; break;
+ case 3: engn = NVDEV_ENGINE_VP; break;
+ case 4: engn = NVDEV_ENGINE_COPY0; break;
+ case 5: engn = NVDEV_ENGINE_COPY1; break;
+ default:
+ return NULL;
+ }
+
+ return nouveau_engine(priv, engn);
+}
+
+static void
+nvc0_fifo_recover_work(struct work_struct *work)
+{
+ struct nvc0_fifo_priv *priv = container_of(work, typeof(*priv), fault);
+ struct nouveau_object *engine;
+ unsigned long flags;
+ u32 engn, engm = 0;
+ u64 mask, todo;
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ mask = priv->mask;
+ priv->mask = 0ULL;
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+
+ for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
+ engm |= 1 << nvc0_fifo_engidx(priv, engn);
+ nv_mask(priv, 0x002630, engm, engm);
+
+ for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
+ if ((engine = (void *)nouveau_engine(priv, engn))) {
+ nv_ofuncs(engine)->fini(engine, false);
+ WARN_ON(nv_ofuncs(engine)->init(engine));
+ }
+ }
+
+ nvc0_fifo_runlist_update(priv);
+ nv_wr32(priv, 0x00262c, engm);
+ nv_mask(priv, 0x002630, engm, 0x00000000);
+}
+
+static void
+nvc0_fifo_recover(struct nvc0_fifo_priv *priv, struct nouveau_engine *engine,
+ struct nvc0_fifo_chan *chan)
+{
+ struct nouveau_object *engobj = nv_object(engine);
+ u32 chid = chan->base.chid;
+ unsigned long flags;
+
+ nv_error(priv, "%s engine fault on channel %d, recovering...\n",
+ nv_subdev(engine)->name, chid);
+
+ nv_mask(priv, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
+ chan->state = KILLED;
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ priv->mask |= 1ULL << nv_engidx(engobj);
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ schedule_work(&priv->fault);
+}
+
+static int
+nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
+{
+ struct nvc0_fifo_chan *chan = NULL;
+ struct nouveau_handle *bind;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ if (likely(chid >= priv->base.min && chid <= priv->base.max))
+ chan = (void *)priv->base.channel[chid];
+ if (unlikely(!chan))
+ goto out;
+
+ bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
+ if (likely(bind)) {
+ if (!mthd || !nv_call(bind->object, mthd, data))
+ ret = 0;
+ nouveau_namedb_put(bind);
+ }
+
+out:
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ return ret;
+}
+
+static const struct nouveau_enum
+nvc0_fifo_sched_reason[] = {
+ { 0x0a, "CTXSW_TIMEOUT" },
+ {}
+};
+
+static void
+nvc0_fifo_intr_sched_ctxsw(struct nvc0_fifo_priv *priv)
+{
+ struct nouveau_engine *engine;
+ struct nvc0_fifo_chan *chan;
+ u32 engn;
+
+ for (engn = 0; engn < 6; engn++) {
+ u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
+ u32 busy = (stat & 0x80000000);
+ u32 save = (stat & 0x00100000); /* maybe? */
+ u32 unk0 = (stat & 0x00040000);
+ u32 unk1 = (stat & 0x00001000);
+ u32 chid = (stat & 0x0000007f);
+ (void)save;
+
+ if (busy && unk0 && unk1) {
+ if (!(chan = (void *)priv->base.channel[chid]))
+ continue;
+ if (!(engine = nvc0_fifo_engine(priv, engn)))
+ continue;
+ nvc0_fifo_recover(priv, engine, chan);
+ }
+ }
+}
+
+static void
+nvc0_fifo_intr_sched(struct nvc0_fifo_priv *priv)
+{
+ u32 intr = nv_rd32(priv, 0x00254c);
+ u32 code = intr & 0x000000ff;
+ const struct nouveau_enum *en;
+ char enunk[6] = "";
+
+ en = nouveau_enum_find(nvc0_fifo_sched_reason, code);
+ if (!en)
+ snprintf(enunk, sizeof(enunk), "UNK%02x", code);
+
+ nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
+
+ switch (code) {
+ case 0x0a:
+ nvc0_fifo_intr_sched_ctxsw(priv);
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct nouveau_enum
+nvc0_fifo_fault_engine[] = {
{ 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
- { 0x03, "PEEPHOLE" },
- { 0x04, "BAR1" },
- { 0x05, "BAR3" },
+ { 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
+ { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
{ 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
{ 0x10, "PBSP", NULL, NVDEV_ENGINE_BSP },
{ 0x11, "PPPP", NULL, NVDEV_ENGINE_PPP },
@@ -361,7 +546,8 @@ static const struct nouveau_enum nvc0_fifo_fault_unit[] = {
{}
};
-static const struct nouveau_enum nvc0_fifo_fault_reason[] = {
+static const struct nouveau_enum
+nvc0_fifo_fault_reason[] = {
{ 0x00, "PT_NOT_PRESENT" },
{ 0x01, "PT_TOO_SHORT" },
{ 0x02, "PAGE_NOT_PRESENT" },
@@ -374,7 +560,8 @@ static const struct nouveau_enum nvc0_fifo_fault_reason[] = {
{}
};
-static const struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
+static const struct nouveau_enum
+nvc0_fifo_fault_hubclient[] = {
{ 0x01, "PCOPY0" },
{ 0x02, "PCOPY1" },
{ 0x04, "DISPATCH" },
@@ -392,7 +579,8 @@ static const struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
{}
};
-static const struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
+static const struct nouveau_enum
+nvc0_fifo_fault_gpcclient[] = {
{ 0x01, "TEX" },
{ 0x0c, "ESETUP" },
{ 0x0e, "CTXCTL" },
@@ -400,92 +588,92 @@ static const struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
{}
};
-static const struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
-/* { 0x00008000, "" } seen with null ib push */
- { 0x00200000, "ILLEGAL_MTHD" },
- { 0x00800000, "EMPTY_SUBC" },
- {}
-};
-
static void
-nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
+nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit)
{
u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+ u32 gpc = (stat & 0x1f000000) >> 24;
u32 client = (stat & 0x00001f00) >> 8;
- const struct nouveau_enum *en;
- struct nouveau_engine *engine;
- struct nouveau_object *engctx = NULL;
-
- switch (unit) {
- case 3: /* PEEPHOLE */
- nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
- break;
- case 4: /* BAR1 */
- nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
- break;
- case 5: /* BAR3 */
- nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
- break;
- default:
- break;
+ u32 write = (stat & 0x00000080);
+ u32 hub = (stat & 0x00000040);
+ u32 reason = (stat & 0x0000000f);
+ struct nouveau_object *engctx = NULL, *object;
+ struct nouveau_engine *engine = NULL;
+ const struct nouveau_enum *er, *eu, *ec;
+ char erunk[6] = "";
+ char euunk[6] = "";
+ char ecunk[6] = "";
+ char gpcid[3] = "";
+
+ er = nouveau_enum_find(nvc0_fifo_fault_reason, reason);
+ if (!er)
+ snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
+
+ eu = nouveau_enum_find(nvc0_fifo_fault_engine, unit);
+ if (eu) {
+ switch (eu->data2) {
+ case NVDEV_SUBDEV_BAR:
+ nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+ break;
+ case NVDEV_SUBDEV_INSTMEM:
+ nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+ break;
+ case NVDEV_ENGINE_IFB:
+ nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
+ break;
+ default:
+ engine = nouveau_engine(priv, eu->data2);
+ if (engine)
+ engctx = nouveau_engctx_get(engine, inst);
+ break;
+ }
+ } else {
+ snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
}
- nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
- "write" : "read", (u64)vahi << 32 | valo);
- nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
- pr_cont("] from ");
- en = nouveau_enum_print(nvc0_fifo_fault_unit, unit);
- if (stat & 0x00000040) {
- pr_cont("/");
- nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
+ if (hub) {
+ ec = nouveau_enum_find(nvc0_fifo_fault_hubclient, client);
} else {
- pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
- nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
+ ec = nouveau_enum_find(nvc0_fifo_fault_gpcclient, client);
+ snprintf(gpcid, sizeof(gpcid), "%d", gpc);
}
- if (en && en->data2) {
- engine = nouveau_engine(priv, en->data2);
- if (engine)
- engctx = nouveau_engctx_get(engine, inst);
-
+ if (!ec)
+ snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
+
+ nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
+ "channel 0x%010llx [%s]\n", write ? "write" : "read",
+ (u64)vahi << 32 | valo, er ? er->name : erunk,
+ eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
+ ec ? ec->name : ecunk, (u64)inst << 12,
+ nouveau_client_name(engctx));
+
+ object = engctx;
+ while (object) {
+ switch (nv_mclass(object)) {
+ case NVC0_CHANNEL_IND_CLASS:
+ nvc0_fifo_recover(priv, engine, (void *)object);
+ break;
+ }
+ object = object->parent;
}
- pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
- nouveau_client_name(engctx));
nouveau_engctx_put(engctx);
}
-static int
-nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
-{
- struct nvc0_fifo_chan *chan = NULL;
- struct nouveau_handle *bind;
- unsigned long flags;
- int ret = -EINVAL;
-
- spin_lock_irqsave(&priv->base.lock, flags);
- if (likely(chid >= priv->base.min && chid <= priv->base.max))
- chan = (void *)priv->base.channel[chid];
- if (unlikely(!chan))
- goto out;
-
- bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
- if (likely(bind)) {
- if (!mthd || !nv_call(bind->object, mthd, data))
- ret = 0;
- nouveau_namedb_put(bind);
- }
-
-out:
- spin_unlock_irqrestore(&priv->base.lock, flags);
- return ret;
-}
+static const struct nouveau_bitfield
+nvc0_fifo_pbdma_intr[] = {
+/* { 0x00008000, "" } seen with null ib push */
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
static void
-nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
+nvc0_fifo_intr_pbdma(struct nvc0_fifo_priv *priv, int unit)
{
u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
@@ -501,11 +689,11 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
}
if (show) {
- nv_error(priv, "SUBFIFO%d:", unit);
- nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
+ nv_error(priv, "PBDMA%d:", unit);
+ nouveau_bitfield_print(nvc0_fifo_pbdma_intr, show);
pr_cont("\n");
nv_error(priv,
- "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+ "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
unit, chid,
nouveau_client_name_for_fifo_chid(&priv->base, chid),
subc, mthd, data);
@@ -516,6 +704,56 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
}
static void
+nvc0_fifo_intr_runlist(struct nvc0_fifo_priv *priv)
+{
+ u32 intr = nv_rd32(priv, 0x002a00);
+
+ if (intr & 0x10000000) {
+ wake_up(&priv->runlist.wait);
+ nv_wr32(priv, 0x002a00, 0x10000000);
+ intr &= ~0x10000000;
+ }
+
+ if (intr) {
+ nv_error(priv, "RUNLIST 0x%08x\n", intr);
+ nv_wr32(priv, 0x002a00, intr);
+ }
+}
+
+static void
+nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
+{
+ u32 intr = nv_rd32(priv, 0x0025a8 + (engn * 0x04));
+ u32 inte = nv_rd32(priv, 0x002628);
+ u32 unkn;
+
+ for (unkn = 0; unkn < 8; unkn++) {
+ u32 ints = (intr >> (unkn * 0x04)) & inte;
+ if (ints & 0x1) {
+ nouveau_event_trigger(priv->base.uevent, 0);
+ ints &= ~1;
+ }
+ if (ints) {
+ nv_error(priv, "ENGINE %d %d %01x", engn, unkn, ints);
+ nv_mask(priv, 0x002628, ints, 0);
+ }
+ }
+
+ nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
+}
+
+static void
+nvc0_fifo_intr_engine(struct nvc0_fifo_priv *priv)
+{
+ u32 mask = nv_rd32(priv, 0x0025a4);
+ while (mask) {
+ u32 unit = __ffs(mask);
+ nvc0_fifo_intr_engine_unit(priv, unit);
+ mask &= ~(1 << unit);
+ }
+}
+
+static void
nvc0_fifo_intr(struct nouveau_subdev *subdev)
{
struct nvc0_fifo_priv *priv = (void *)subdev;
@@ -530,8 +768,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x00000100) {
- u32 intr = nv_rd32(priv, 0x00254c);
- nv_warn(priv, "INTR 0x00000100: 0x%08x\n", intr);
+ nvc0_fifo_intr_sched(priv);
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
@@ -551,52 +788,41 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x10000000) {
- u32 units = nv_rd32(priv, 0x00259c);
- u32 u = units;
-
- while (u) {
- int i = ffs(u) - 1;
- nvc0_fifo_isr_vm_fault(priv, i);
- u &= ~(1 << i);
+ u32 mask = nv_rd32(priv, 0x00259c);
+ while (mask) {
+ u32 unit = __ffs(mask);
+ nvc0_fifo_intr_fault(priv, unit);
+ nv_wr32(priv, 0x00259c, (1 << unit));
+ mask &= ~(1 << unit);
}
-
- nv_wr32(priv, 0x00259c, units);
stat &= ~0x10000000;
}
if (stat & 0x20000000) {
- u32 units = nv_rd32(priv, 0x0025a0);
- u32 u = units;
-
- while (u) {
- int i = ffs(u) - 1;
- nvc0_fifo_isr_subfifo_intr(priv, i);
- u &= ~(1 << i);
+ u32 mask = nv_rd32(priv, 0x0025a0);
+ while (mask) {
+ u32 unit = __ffs(mask);
+ nvc0_fifo_intr_pbdma(priv, unit);
+ nv_wr32(priv, 0x0025a0, (1 << unit));
+ mask &= ~(1 << unit);
}
-
- nv_wr32(priv, 0x0025a0, units);
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
- u32 intr0 = nv_rd32(priv, 0x0025a4);
- u32 intr1 = nv_mask(priv, 0x002a00, 0x00000000, 0x00000);
- nv_debug(priv, "INTR 0x40000000: 0x%08x 0x%08x\n",
- intr0, intr1);
+ nvc0_fifo_intr_runlist(priv);
stat &= ~0x40000000;
}
if (stat & 0x80000000) {
- u32 intr = nv_mask(priv, 0x0025a8, 0x00000000, 0x00000000);
- nouveau_event_trigger(priv->base.uevent, 0);
- nv_debug(priv, "INTR 0x80000000: 0x%08x\n", intr);
+ nvc0_fifo_intr_engine(priv);
stat &= ~0x80000000;
}
if (stat) {
- nv_fatal(priv, "unhandled status 0x%08x\n", stat);
+ nv_error(priv, "INTR 0x%08x\n", stat);
+ nv_mask(priv, 0x002140, stat, 0x00000000);
nv_wr32(priv, 0x002100, stat);
- nv_wr32(priv, 0x002140, 0);
}
}
@@ -627,16 +853,20 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ INIT_WORK(&priv->fault, nvc0_fifo_recover_work);
+
ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
- &priv->playlist[0]);
+ &priv->runlist.mem[0]);
if (ret)
return ret;
ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
- &priv->playlist[1]);
+ &priv->runlist.mem[1]);
if (ret)
return ret;
+ init_waitqueue_head(&priv->runlist.wait);
+
ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
&priv->user.mem);
if (ret)
@@ -665,8 +895,8 @@ nvc0_fifo_dtor(struct nouveau_object *object)
nouveau_gpuobj_unmap(&priv->user.bar);
nouveau_gpuobj_ref(NULL, &priv->user.mem);
- nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
- nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+ nouveau_gpuobj_ref(NULL, &priv->runlist.mem[0]);
+ nouveau_gpuobj_ref(NULL, &priv->runlist.mem[1]);
nouveau_fifo_destroy(&priv->base);
}
@@ -685,9 +915,9 @@ nvc0_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x002204, 0xffffffff);
priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
- nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
+ nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
- /* assign engines to subfifos */
+ /* assign engines to PBDMAs */
if (priv->spoon_nr >= 3) {
nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
@@ -697,7 +927,7 @@ nvc0_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
}
- /* PSUBFIFO[n] */
+ /* PBDMA[n] */
for (i = 0; i < priv->spoon_nr; i++) {
nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
@@ -707,10 +937,9 @@ nvc0_fifo_init(struct nouveau_object *object)
nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
- nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0x3fffffff);
- nv_wr32(priv, 0x002628, 0x00000001); /* makes mthd 0x20 work */
+ nv_wr32(priv, 0x002140, 0x7fffffff);
+ nv_wr32(priv, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 54c1b5b471cd..a9a1a9c9f9f2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -60,10 +60,15 @@ static const struct {
struct nve0_fifo_engn {
struct nouveau_gpuobj *runlist[2];
int cur_runlist;
+ wait_queue_head_t wait;
};
struct nve0_fifo_priv {
struct nouveau_fifo base;
+
+ struct work_struct fault;
+ u64 mask;
+
struct nve0_fifo_engn engine[FIFO_ENGINE_NR];
struct {
struct nouveau_gpuobj *mem;
@@ -81,6 +86,11 @@ struct nve0_fifo_base {
struct nve0_fifo_chan {
struct nouveau_fifo_chan base;
u32 engine;
+ enum {
+ STOPPED,
+ RUNNING,
+ KILLED
+ } state;
};
/*******************************************************************************
@@ -93,7 +103,6 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
struct nouveau_bar *bar = nouveau_bar(priv);
struct nve0_fifo_engn *engn = &priv->engine[engine];
struct nouveau_gpuobj *cur;
- u32 match = (engine << 16) | 0x00000001;
int i, p;
mutex_lock(&nv_subdev(priv)->mutex);
@@ -101,18 +110,21 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
engn->cur_runlist = !engn->cur_runlist;
for (i = 0, p = 0; i < priv->base.max; i++) {
- u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
- if (ctrl != match)
- continue;
- nv_wo32(cur, p + 0, i);
- nv_wo32(cur, p + 4, 0x00000000);
- p += 8;
+ struct nve0_fifo_chan *chan = (void *)priv->base.channel[i];
+ if (chan && chan->state == RUNNING && chan->engine == engine) {
+ nv_wo32(cur, p + 0, i);
+ nv_wo32(cur, p + 4, 0x00000000);
+ p += 8;
+ }
}
bar->flush(bar);
nv_wr32(priv, 0x002270, cur->addr >> 12);
nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
- if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000))
+
+ if (wait_event_timeout(engn->wait, !(nv_rd32(priv, 0x002284 +
+ (engine * 0x08)) & 0x00100000),
+ msecs_to_jiffies(2000)) == 0)
nv_error(priv, "runlist %d update timeout\n", engine);
mutex_unlock(&nv_subdev(priv)->mutex);
}
@@ -129,9 +141,11 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW :
+ return 0;
case NVDEV_ENGINE_COPY0:
case NVDEV_ENGINE_COPY1:
case NVDEV_ENGINE_COPY2:
+ nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
return 0;
case NVDEV_ENGINE_GR : addr = 0x0210; break;
case NVDEV_ENGINE_BSP : addr = 0x0270; break;
@@ -279,9 +293,13 @@ nve0_fifo_chan_init(struct nouveau_object *object)
nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
- nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
- nve0_fifo_runlist_update(priv, chan->engine);
- nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+
+ if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
+ nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+ nve0_fifo_runlist_update(priv, chan->engine);
+ nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+ }
+
return 0;
}
@@ -292,10 +310,12 @@ nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
struct nve0_fifo_chan *chan = (void *)object;
u32 chid = chan->base.chid;
- nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
- nve0_fifo_runlist_update(priv, chan->engine);
- nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
+ if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
+ nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
+ nve0_fifo_runlist_update(priv, chan->engine);
+ }
+ nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
return nouveau_fifo_channel_fini(&chan->base, suspend);
}
@@ -377,14 +397,211 @@ nve0_fifo_cclass = {
* PFIFO engine
******************************************************************************/
-static const struct nouveau_enum nve0_fifo_sched_reason[] = {
+static inline int
+nve0_fifo_engidx(struct nve0_fifo_priv *priv, u32 engn)
+{
+ switch (engn) {
+ case NVDEV_ENGINE_GR :
+ case NVDEV_ENGINE_COPY2: engn = 0; break;
+ case NVDEV_ENGINE_BSP : engn = 1; break;
+ case NVDEV_ENGINE_PPP : engn = 2; break;
+ case NVDEV_ENGINE_VP : engn = 3; break;
+ case NVDEV_ENGINE_COPY0: engn = 4; break;
+ case NVDEV_ENGINE_COPY1: engn = 5; break;
+ case NVDEV_ENGINE_VENC : engn = 6; break;
+ default:
+ return -1;
+ }
+
+ return engn;
+}
+
+static inline struct nouveau_engine *
+nve0_fifo_engine(struct nve0_fifo_priv *priv, u32 engn)
+{
+ if (engn >= ARRAY_SIZE(fifo_engine))
+ return NULL;
+ return nouveau_engine(priv, fifo_engine[engn].subdev);
+}
+
+static void
+nve0_fifo_recover_work(struct work_struct *work)
+{
+ struct nve0_fifo_priv *priv = container_of(work, typeof(*priv), fault);
+ struct nouveau_object *engine;
+ unsigned long flags;
+ u32 engn, engm = 0;
+ u64 mask, todo;
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ mask = priv->mask;
+ priv->mask = 0ULL;
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+
+ for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
+ engm |= 1 << nve0_fifo_engidx(priv, engn);
+ nv_mask(priv, 0x002630, engm, engm);
+
+ for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
+ if ((engine = (void *)nouveau_engine(priv, engn))) {
+ nv_ofuncs(engine)->fini(engine, false);
+ WARN_ON(nv_ofuncs(engine)->init(engine));
+ }
+ nve0_fifo_runlist_update(priv, nve0_fifo_engidx(priv, engn));
+ }
+
+ nv_wr32(priv, 0x00262c, engm);
+ nv_mask(priv, 0x002630, engm, 0x00000000);
+}
+
+static void
+nve0_fifo_recover(struct nve0_fifo_priv *priv, struct nouveau_engine *engine,
+ struct nve0_fifo_chan *chan)
+{
+ struct nouveau_object *engobj = nv_object(engine);
+ u32 chid = chan->base.chid;
+ unsigned long flags;
+
+ nv_error(priv, "%s engine fault on channel %d, recovering...\n",
+ nv_subdev(engine)->name, chid);
+
+ nv_mask(priv, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
+ chan->state = KILLED;
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ priv->mask |= 1ULL << nv_engidx(engobj);
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ schedule_work(&priv->fault);
+}
+
+static int
+nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
+{
+ struct nve0_fifo_chan *chan = NULL;
+ struct nouveau_handle *bind;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ if (likely(chid >= priv->base.min && chid <= priv->base.max))
+ chan = (void *)priv->base.channel[chid];
+ if (unlikely(!chan))
+ goto out;
+
+ bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
+ if (likely(bind)) {
+ if (!mthd || !nv_call(bind->object, mthd, data))
+ ret = 0;
+ nouveau_namedb_put(bind);
+ }
+
+out:
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ return ret;
+}
+
+static const struct nouveau_enum
+nve0_fifo_bind_reason[] = {
+ { 0x01, "BIND_NOT_UNBOUND" },
+ { 0x02, "SNOOP_WITHOUT_BAR1" },
+ { 0x03, "UNBIND_WHILE_RUNNING" },
+ { 0x05, "INVALID_RUNLIST" },
+ { 0x06, "INVALID_CTX_TGT" },
+ { 0x0b, "UNBIND_WHILE_PARKED" },
+ {}
+};
+
+static void
+nve0_fifo_intr_bind(struct nve0_fifo_priv *priv)
+{
+ u32 intr = nv_rd32(priv, 0x00252c);
+ u32 code = intr & 0x000000ff;
+ const struct nouveau_enum *en;
+ char enunk[6] = "";
+
+ en = nouveau_enum_find(nve0_fifo_bind_reason, code);
+ if (!en)
+ snprintf(enunk, sizeof(enunk), "UNK%02x", code);
+
+ nv_error(priv, "BIND_ERROR [ %s ]\n", en ? en->name : enunk);
+}
+
+static const struct nouveau_enum
+nve0_fifo_sched_reason[] = {
{ 0x0a, "CTXSW_TIMEOUT" },
{}
};
-static const struct nouveau_enum nve0_fifo_fault_engine[] = {
+static void
+nve0_fifo_intr_sched_ctxsw(struct nve0_fifo_priv *priv)
+{
+ struct nouveau_engine *engine;
+ struct nve0_fifo_chan *chan;
+ u32 engn;
+
+ for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
+ u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
+ u32 busy = (stat & 0x80000000);
+ u32 next = (stat & 0x07ff0000) >> 16;
+ u32 chsw = (stat & 0x00008000);
+ u32 save = (stat & 0x00004000);
+ u32 load = (stat & 0x00002000);
+ u32 prev = (stat & 0x000007ff);
+ u32 chid = load ? next : prev;
+ (void)save;
+
+ if (busy && chsw) {
+ if (!(chan = (void *)priv->base.channel[chid]))
+ continue;
+ if (!(engine = nve0_fifo_engine(priv, engn)))
+ continue;
+ nve0_fifo_recover(priv, engine, chan);
+ }
+ }
+}
+
+static void
+nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
+{
+ u32 intr = nv_rd32(priv, 0x00254c);
+ u32 code = intr & 0x000000ff;
+ const struct nouveau_enum *en;
+ char enunk[6] = "";
+
+ en = nouveau_enum_find(nve0_fifo_sched_reason, code);
+ if (!en)
+ snprintf(enunk, sizeof(enunk), "UNK%02x", code);
+
+ nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
+
+ switch (code) {
+ case 0x0a:
+ nve0_fifo_intr_sched_ctxsw(priv);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv)
+{
+ u32 stat = nv_rd32(priv, 0x00256c);
+ nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
+ nv_wr32(priv, 0x00256c, stat);
+}
+
+static void
+nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv)
+{
+ u32 stat = nv_rd32(priv, 0x00259c);
+ nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
+}
+
+static const struct nouveau_enum
+nve0_fifo_fault_engine[] = {
{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
- { 0x03, "IFB" },
+ { 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
@@ -402,7 +619,8 @@ static const struct nouveau_enum nve0_fifo_fault_engine[] = {
{}
};
-static const struct nouveau_enum nve0_fifo_fault_reason[] = {
+static const struct nouveau_enum
+nve0_fifo_fault_reason[] = {
{ 0x00, "PDE" },
{ 0x01, "PDE_SIZE" },
{ 0x02, "PTE" },
@@ -422,7 +640,8 @@ static const struct nouveau_enum nve0_fifo_fault_reason[] = {
{}
};
-static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
+static const struct nouveau_enum
+nve0_fifo_fault_hubclient[] = {
{ 0x00, "VIP" },
{ 0x01, "CE0" },
{ 0x02, "CE1" },
@@ -458,7 +677,8 @@ static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
{}
};
-static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
+static const struct nouveau_enum
+nve0_fifo_fault_gpcclient[] = {
{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
@@ -483,6 +703,82 @@ static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
{}
};
+static void
+nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
+{
+ u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
+ u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
+ u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
+ u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+ u32 gpc = (stat & 0x1f000000) >> 24;
+ u32 client = (stat & 0x00001f00) >> 8;
+ u32 write = (stat & 0x00000080);
+ u32 hub = (stat & 0x00000040);
+ u32 reason = (stat & 0x0000000f);
+ struct nouveau_object *engctx = NULL, *object;
+ struct nouveau_engine *engine = NULL;
+ const struct nouveau_enum *er, *eu, *ec;
+ char erunk[6] = "";
+ char euunk[6] = "";
+ char ecunk[6] = "";
+ char gpcid[3] = "";
+
+ er = nouveau_enum_find(nve0_fifo_fault_reason, reason);
+ if (!er)
+ snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
+
+ eu = nouveau_enum_find(nve0_fifo_fault_engine, unit);
+ if (eu) {
+ switch (eu->data2) {
+ case NVDEV_SUBDEV_BAR:
+ nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+ break;
+ case NVDEV_SUBDEV_INSTMEM:
+ nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+ break;
+ case NVDEV_ENGINE_IFB:
+ nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
+ break;
+ default:
+ engine = nouveau_engine(priv, eu->data2);
+ if (engine)
+ engctx = nouveau_engctx_get(engine, inst);
+ break;
+ }
+ } else {
+ snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
+ }
+
+ if (hub) {
+ ec = nouveau_enum_find(nve0_fifo_fault_hubclient, client);
+ } else {
+ ec = nouveau_enum_find(nve0_fifo_fault_gpcclient, client);
+ snprintf(gpcid, sizeof(gpcid), "%d", gpc);
+ }
+
+ if (!ec)
+ snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
+
+ nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
+ "channel 0x%010llx [%s]\n", write ? "write" : "read",
+ (u64)vahi << 32 | valo, er ? er->name : erunk,
+ eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
+ ec ? ec->name : ecunk, (u64)inst << 12,
+ nouveau_client_name(engctx));
+
+ object = engctx;
+ while (object) {
+ switch (nv_mclass(object)) {
+ case NVE0_CHANNEL_IND_CLASS:
+ nve0_fifo_recover(priv, engine, (void *)object);
+ break;
+ }
+ object = object->parent;
+ }
+
+ nouveau_engctx_put(engctx);
+}
+
static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
{ 0x00000001, "MEMREQ" },
{ 0x00000002, "MEMACK_TIMEOUT" },
@@ -518,104 +814,6 @@ static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
};
static void
-nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
-{
- u32 intr = nv_rd32(priv, 0x00254c);
- u32 code = intr & 0x000000ff;
- nv_error(priv, "SCHED_ERROR [");
- nouveau_enum_print(nve0_fifo_sched_reason, code);
- pr_cont("]\n");
-}
-
-static void
-nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv)
-{
- u32 stat = nv_rd32(priv, 0x00256c);
- nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
- nv_wr32(priv, 0x00256c, stat);
-}
-
-static void
-nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv)
-{
- u32 stat = nv_rd32(priv, 0x00259c);
- nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
-}
-
-static void
-nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
-{
- u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
- u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
- u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
- u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
- u32 client = (stat & 0x00001f00) >> 8;
- struct nouveau_engine *engine = NULL;
- struct nouveau_object *engctx = NULL;
- const struct nouveau_enum *en;
- const char *name = "unknown";
-
- nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
- "write" : "read", (u64)vahi << 32 | valo);
- nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
- pr_cont("] from ");
- en = nouveau_enum_print(nve0_fifo_fault_engine, unit);
- if (stat & 0x00000040) {
- pr_cont("/");
- nouveau_enum_print(nve0_fifo_fault_hubclient, client);
- } else {
- pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
- nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
- }
-
- if (en && en->data2) {
- if (en->data2 == NVDEV_SUBDEV_BAR) {
- nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
- name = "BAR1";
- } else
- if (en->data2 == NVDEV_SUBDEV_INSTMEM) {
- nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
- name = "BAR3";
- } else {
- engine = nouveau_engine(priv, en->data2);
- if (engine) {
- engctx = nouveau_engctx_get(engine, inst);
- name = nouveau_client_name(engctx);
- }
- }
- }
- pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12, name);
-
- nouveau_engctx_put(engctx);
-}
-
-static int
-nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
-{
- struct nve0_fifo_chan *chan = NULL;
- struct nouveau_handle *bind;
- unsigned long flags;
- int ret = -EINVAL;
-
- spin_lock_irqsave(&priv->base.lock, flags);
- if (likely(chid >= priv->base.min && chid <= priv->base.max))
- chan = (void *)priv->base.channel[chid];
- if (unlikely(!chan))
- goto out;
-
- bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
- if (likely(bind)) {
- if (!mthd || !nv_call(bind->object, mthd, data))
- ret = 0;
- nouveau_namedb_put(bind);
- }
-
-out:
- spin_unlock_irqrestore(&priv->base.lock, flags);
- return ret;
-}
-
-static void
nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
{
u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
@@ -647,6 +845,24 @@ nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
}
static void
+nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
+{
+ u32 mask = nv_rd32(priv, 0x002a00);
+ while (mask) {
+ u32 engn = __ffs(mask);
+ wake_up(&priv->engine[engn].wait);
+ nv_wr32(priv, 0x002a00, 1 << engn);
+ mask &= ~(1 << engn);
+ }
+}
+
+static void
+nve0_fifo_intr_engine(struct nve0_fifo_priv *priv)
+{
+ nouveau_event_trigger(priv->base.uevent, 0);
+}
+
+static void
nve0_fifo_intr(struct nouveau_subdev *subdev)
{
struct nve0_fifo_priv *priv = (void *)subdev;
@@ -654,8 +870,7 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
u32 stat = nv_rd32(priv, 0x002100) & mask;
if (stat & 0x00000001) {
- u32 stat = nv_rd32(priv, 0x00252c);
- nv_error(priv, "BIND_ERROR 0x%08x\n", stat);
+ nve0_fifo_intr_bind(priv);
nv_wr32(priv, 0x002100, 0x00000001);
stat &= ~0x00000001;
}
@@ -697,55 +912,42 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x10000000) {
- u32 units = nv_rd32(priv, 0x00259c);
- u32 u = units;
-
- while (u) {
- int i = ffs(u) - 1;
- nve0_fifo_intr_fault(priv, i);
- u &= ~(1 << i);
+ u32 mask = nv_rd32(priv, 0x00259c);
+ while (mask) {
+ u32 unit = __ffs(mask);
+ nve0_fifo_intr_fault(priv, unit);
+ nv_wr32(priv, 0x00259c, (1 << unit));
+ mask &= ~(1 << unit);
}
-
- nv_wr32(priv, 0x00259c, units);
stat &= ~0x10000000;
}
if (stat & 0x20000000) {
u32 mask = nv_rd32(priv, 0x0025a0);
- u32 temp = mask;
-
- while (temp) {
- u32 unit = ffs(temp) - 1;
+ while (mask) {
+ u32 unit = __ffs(mask);
nve0_fifo_intr_pbdma(priv, unit);
- temp &= ~(1 << unit);
+ nv_wr32(priv, 0x0025a0, (1 << unit));
+ mask &= ~(1 << unit);
}
-
- nv_wr32(priv, 0x0025a0, mask);
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
- u32 mask = nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
-
- while (mask) {
- u32 engn = ffs(mask) - 1;
- /* runlist event, not currently used */
- mask &= ~(1 << engn);
- }
-
+ nve0_fifo_intr_runlist(priv);
stat &= ~0x40000000;
}
if (stat & 0x80000000) {
- nouveau_event_trigger(priv->base.uevent, 0);
+ nve0_fifo_intr_engine(priv);
nv_wr32(priv, 0x002100, 0x80000000);
stat &= ~0x80000000;
}
if (stat) {
- nv_fatal(priv, "unhandled status 0x%08x\n", stat);
+ nv_error(priv, "INTR 0x%08x\n", stat);
+ nv_mask(priv, 0x002140, stat, 0x00000000);
nv_wr32(priv, 0x002100, stat);
- nv_wr32(priv, 0x002140, 0);
}
}
@@ -802,9 +1004,8 @@ nve0_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
- nv_wr32(priv, 0x002a00, 0xffffffff);
nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0x3fffffff);
+ nv_wr32(priv, 0x002140, 0x7fffffff);
return 0;
}
@@ -840,6 +1041,8 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ INIT_WORK(&priv->fault, nve0_fifo_recover_work);
+
for (i = 0; i < FIFO_ENGINE_NR; i++) {
ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
0, &priv->engine[i].runlist[0]);
@@ -850,10 +1053,12 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
0, &priv->engine[i].runlist[1]);
if (ret)
return ret;
+
+ init_waitqueue_head(&priv->engine[i].wait);
}
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
new file mode 100644
index 000000000000..1dc37b1ddbfa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
@@ -0,0 +1,989 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "ctxnvc0.h"
+
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+gm107_grctx_init_icmd_0[] = {
+ { 0x001000, 1, 0x01, 0x00000004 },
+ { 0x000039, 3, 0x01, 0x00000000 },
+ { 0x0000a9, 1, 0x01, 0x0000ffff },
+ { 0x000038, 1, 0x01, 0x0fac6881 },
+ { 0x00003d, 1, 0x01, 0x00000001 },
+ { 0x0000e8, 8, 0x01, 0x00000400 },
+ { 0x000078, 8, 0x01, 0x00000300 },
+ { 0x000050, 1, 0x01, 0x00000011 },
+ { 0x000058, 8, 0x01, 0x00000008 },
+ { 0x000208, 8, 0x01, 0x00000001 },
+ { 0x000081, 1, 0x01, 0x00000001 },
+ { 0x000085, 1, 0x01, 0x00000004 },
+ { 0x000088, 1, 0x01, 0x00000400 },
+ { 0x000090, 1, 0x01, 0x00000300 },
+ { 0x000098, 1, 0x01, 0x00001001 },
+ { 0x0000e3, 1, 0x01, 0x00000001 },
+ { 0x0000da, 1, 0x01, 0x00000001 },
+ { 0x0000f8, 1, 0x01, 0x00000003 },
+ { 0x0000fa, 1, 0x01, 0x00000001 },
+ { 0x0000b1, 2, 0x01, 0x00000001 },
+ { 0x00009f, 4, 0x01, 0x0000ffff },
+ { 0x0000a8, 1, 0x01, 0x0000ffff },
+ { 0x0000ad, 1, 0x01, 0x0000013e },
+ { 0x0000e1, 1, 0x01, 0x00000010 },
+ { 0x000290, 16, 0x01, 0x00000000 },
+ { 0x0003b0, 16, 0x01, 0x00000000 },
+ { 0x0002a0, 16, 0x01, 0x00000000 },
+ { 0x000420, 16, 0x01, 0x00000000 },
+ { 0x0002b0, 16, 0x01, 0x00000000 },
+ { 0x000430, 16, 0x01, 0x00000000 },
+ { 0x0002c0, 16, 0x01, 0x00000000 },
+ { 0x0004d0, 16, 0x01, 0x00000000 },
+ { 0x000720, 16, 0x01, 0x00000000 },
+ { 0x0008c0, 16, 0x01, 0x00000000 },
+ { 0x000890, 16, 0x01, 0x00000000 },
+ { 0x0008e0, 16, 0x01, 0x00000000 },
+ { 0x0008a0, 16, 0x01, 0x00000000 },
+ { 0x0008f0, 16, 0x01, 0x00000000 },
+ { 0x00094c, 1, 0x01, 0x000000ff },
+ { 0x00094d, 1, 0x01, 0xffffffff },
+ { 0x00094e, 1, 0x01, 0x00000002 },
+ { 0x0002f2, 2, 0x01, 0x00000001 },
+ { 0x0002f5, 1, 0x01, 0x00000001 },
+ { 0x0002f7, 1, 0x01, 0x00000001 },
+ { 0x000303, 1, 0x01, 0x00000001 },
+ { 0x0002e6, 1, 0x01, 0x00000001 },
+ { 0x000466, 1, 0x01, 0x00000052 },
+ { 0x000301, 1, 0x01, 0x3f800000 },
+ { 0x000304, 1, 0x01, 0x30201000 },
+ { 0x000305, 1, 0x01, 0x70605040 },
+ { 0x000306, 1, 0x01, 0xb8a89888 },
+ { 0x000307, 1, 0x01, 0xf8e8d8c8 },
+ { 0x00030a, 1, 0x01, 0x00ffff00 },
+ { 0x0000de, 1, 0x01, 0x00000001 },
+ { 0x00030b, 1, 0x01, 0x0000001a },
+ { 0x00030c, 1, 0x01, 0x00000001 },
+ { 0x000318, 1, 0x01, 0x00000001 },
+ { 0x000340, 1, 0x01, 0x00000000 },
+ { 0x00037d, 1, 0x01, 0x00000006 },
+ { 0x0003a0, 1, 0x01, 0x00000002 },
+ { 0x0003aa, 1, 0x01, 0x00000001 },
+ { 0x0003a9, 1, 0x01, 0x00000001 },
+ { 0x000380, 1, 0x01, 0x00000001 },
+ { 0x000383, 1, 0x01, 0x00000011 },
+ { 0x000360, 1, 0x01, 0x00000040 },
+ { 0x000366, 2, 0x01, 0x00000000 },
+ { 0x000368, 1, 0x01, 0x00000fff },
+ { 0x000370, 2, 0x01, 0x00000000 },
+ { 0x000372, 1, 0x01, 0x000fffff },
+ { 0x00037a, 1, 0x01, 0x00000012 },
+ { 0x000619, 1, 0x01, 0x00000003 },
+ { 0x000811, 1, 0x01, 0x00000003 },
+ { 0x000812, 1, 0x01, 0x00000004 },
+ { 0x000813, 1, 0x01, 0x00000006 },
+ { 0x000814, 1, 0x01, 0x00000008 },
+ { 0x000815, 1, 0x01, 0x0000000b },
+ { 0x000800, 6, 0x01, 0x00000001 },
+ { 0x000632, 1, 0x01, 0x00000001 },
+ { 0x000633, 1, 0x01, 0x00000002 },
+ { 0x000634, 1, 0x01, 0x00000003 },
+ { 0x000635, 1, 0x01, 0x00000004 },
+ { 0x000654, 1, 0x01, 0x3f800000 },
+ { 0x000657, 1, 0x01, 0x3f800000 },
+ { 0x000655, 2, 0x01, 0x3f800000 },
+ { 0x0006cd, 1, 0x01, 0x3f800000 },
+ { 0x0007f5, 1, 0x01, 0x3f800000 },
+ { 0x0007dc, 1, 0x01, 0x39291909 },
+ { 0x0007dd, 1, 0x01, 0x79695949 },
+ { 0x0007de, 1, 0x01, 0xb9a99989 },
+ { 0x0007df, 1, 0x01, 0xf9e9d9c9 },
+ { 0x0007e8, 1, 0x01, 0x00003210 },
+ { 0x0007e9, 1, 0x01, 0x00007654 },
+ { 0x0007ea, 1, 0x01, 0x00000098 },
+ { 0x0007ec, 1, 0x01, 0x39291909 },
+ { 0x0007ed, 1, 0x01, 0x79695949 },
+ { 0x0007ee, 1, 0x01, 0xb9a99989 },
+ { 0x0007ef, 1, 0x01, 0xf9e9d9c9 },
+ { 0x0007f0, 1, 0x01, 0x00003210 },
+ { 0x0007f1, 1, 0x01, 0x00007654 },
+ { 0x0007f2, 1, 0x01, 0x00000098 },
+ { 0x0005a5, 1, 0x01, 0x00000001 },
+ { 0x0005d0, 1, 0x01, 0x20181008 },
+ { 0x0005d1, 1, 0x01, 0x40383028 },
+ { 0x0005d2, 1, 0x01, 0x60585048 },
+ { 0x0005d3, 1, 0x01, 0x80787068 },
+ { 0x000980, 128, 0x01, 0x00000000 },
+ { 0x000468, 1, 0x01, 0x00000004 },
+ { 0x00046c, 1, 0x01, 0x00000001 },
+ { 0x000470, 96, 0x01, 0x00000000 },
+ { 0x000510, 16, 0x01, 0x3f800000 },
+ { 0x000520, 1, 0x01, 0x000002b6 },
+ { 0x000529, 1, 0x01, 0x00000001 },
+ { 0x000530, 16, 0x01, 0xffff0000 },
+ { 0x000550, 32, 0x01, 0xffff0000 },
+ { 0x000585, 1, 0x01, 0x0000003f },
+ { 0x000576, 1, 0x01, 0x00000003 },
+ { 0x00057b, 1, 0x01, 0x00000059 },
+ { 0x000586, 1, 0x01, 0x00000040 },
+ { 0x000582, 2, 0x01, 0x00000080 },
+ { 0x000595, 1, 0x01, 0x00400040 },
+ { 0x000596, 1, 0x01, 0x00000492 },
+ { 0x000597, 1, 0x01, 0x08080203 },
+ { 0x0005ad, 1, 0x01, 0x00000008 },
+ { 0x000598, 1, 0x01, 0x00020001 },
+ { 0x0005c2, 1, 0x01, 0x00000001 },
+ { 0x000638, 2, 0x01, 0x00000001 },
+ { 0x00063a, 1, 0x01, 0x00000002 },
+ { 0x00063b, 2, 0x01, 0x00000001 },
+ { 0x00063d, 1, 0x01, 0x00000002 },
+ { 0x00063e, 1, 0x01, 0x00000001 },
+ { 0x0008b8, 8, 0x01, 0x00000001 },
+ { 0x000900, 8, 0x01, 0x00000001 },
+ { 0x000908, 8, 0x01, 0x00000002 },
+ { 0x000910, 16, 0x01, 0x00000001 },
+ { 0x000920, 8, 0x01, 0x00000002 },
+ { 0x000928, 8, 0x01, 0x00000001 },
+ { 0x000662, 1, 0x01, 0x00000001 },
+ { 0x000648, 9, 0x01, 0x00000001 },
+ { 0x000658, 1, 0x01, 0x0000000f },
+ { 0x0007ff, 1, 0x01, 0x0000000a },
+ { 0x00066a, 1, 0x01, 0x40000000 },
+ { 0x00066b, 1, 0x01, 0x10000000 },
+ { 0x00066c, 2, 0x01, 0xffff0000 },
+ { 0x0007af, 2, 0x01, 0x00000008 },
+ { 0x0007f6, 1, 0x01, 0x00000001 },
+ { 0x0006b2, 1, 0x01, 0x00000055 },
+ { 0x0007ad, 1, 0x01, 0x00000003 },
+ { 0x000971, 1, 0x01, 0x00000008 },
+ { 0x000972, 1, 0x01, 0x00000040 },
+ { 0x000973, 1, 0x01, 0x0000012c },
+ { 0x00097c, 1, 0x01, 0x00000040 },
+ { 0x000975, 1, 0x01, 0x00000020 },
+ { 0x000976, 1, 0x01, 0x00000001 },
+ { 0x000977, 1, 0x01, 0x00000020 },
+ { 0x000978, 1, 0x01, 0x00000001 },
+ { 0x000957, 1, 0x01, 0x00000003 },
+ { 0x00095e, 1, 0x01, 0x20164010 },
+ { 0x00095f, 1, 0x01, 0x00000020 },
+ { 0x000a0d, 1, 0x01, 0x00000006 },
+ { 0x00097d, 1, 0x01, 0x0000000c },
+ { 0x000683, 1, 0x01, 0x00000006 },
+ { 0x000687, 1, 0x01, 0x003fffff },
+ { 0x0006a0, 1, 0x01, 0x00000005 },
+ { 0x000840, 1, 0x01, 0x00400008 },
+ { 0x000841, 1, 0x01, 0x08000080 },
+ { 0x000842, 1, 0x01, 0x00400008 },
+ { 0x000843, 1, 0x01, 0x08000080 },
+ { 0x000818, 8, 0x01, 0x00000000 },
+ { 0x000848, 16, 0x01, 0x00000000 },
+ { 0x000738, 1, 0x01, 0x00000000 },
+ { 0x0006aa, 1, 0x01, 0x00000001 },
+ { 0x0006ab, 1, 0x01, 0x00000002 },
+ { 0x0006ac, 1, 0x01, 0x00000080 },
+ { 0x0006ad, 2, 0x01, 0x00000100 },
+ { 0x0006b1, 1, 0x01, 0x00000011 },
+ { 0x0006bb, 1, 0x01, 0x000000cf },
+ { 0x0006ce, 1, 0x01, 0x2a712488 },
+ { 0x000739, 1, 0x01, 0x4085c000 },
+ { 0x00073a, 1, 0x01, 0x00000080 },
+ { 0x000786, 1, 0x01, 0x80000100 },
+ { 0x00073c, 1, 0x01, 0x00010100 },
+ { 0x00073d, 1, 0x01, 0x02800000 },
+ { 0x000787, 1, 0x01, 0x000000cf },
+ { 0x00078c, 1, 0x01, 0x00000008 },
+ { 0x000792, 1, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
+ { 0x000797, 1, 0x01, 0x000000cf },
+ { 0x000836, 1, 0x01, 0x00000001 },
+ { 0x00079a, 1, 0x01, 0x00000002 },
+ { 0x000833, 1, 0x01, 0x04444480 },
+ { 0x0007a1, 1, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
+ { 0x000831, 1, 0x01, 0x00000004 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x000a04, 1, 0x01, 0x000000ff },
+ { 0x000a0b, 1, 0x01, 0x00000040 },
+ { 0x00097f, 1, 0x01, 0x00000100 },
+ { 0x000a02, 1, 0x01, 0x00000001 },
+ { 0x000809, 1, 0x01, 0x00000007 },
+ { 0x00c221, 1, 0x01, 0x00000040 },
+ { 0x00c1b0, 8, 0x01, 0x0000000f },
+ { 0x00c1b8, 1, 0x01, 0x0fac6881 },
+ { 0x00c1b9, 1, 0x01, 0x00fac688 },
+ { 0x00c401, 1, 0x01, 0x00000001 },
+ { 0x00c402, 1, 0x01, 0x00010001 },
+ { 0x00c403, 2, 0x01, 0x00000001 },
+ { 0x00c40e, 1, 0x01, 0x00000020 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000002 },
+ { 0x0006aa, 1, 0x01, 0x00000001 },
+ { 0x0006ad, 2, 0x01, 0x00000100 },
+ { 0x0006b1, 1, 0x01, 0x00000011 },
+ { 0x00078c, 1, 0x01, 0x00000008 },
+ { 0x000792, 1, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
+ { 0x000797, 1, 0x01, 0x000000cf },
+ { 0x00079a, 1, 0x01, 0x00000002 },
+ { 0x0007a1, 1, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
+ { 0x000831, 1, 0x01, 0x00000004 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000008 },
+ { 0x000039, 3, 0x01, 0x00000000 },
+ { 0x000380, 1, 0x01, 0x00000001 },
+ { 0x000366, 2, 0x01, 0x00000000 },
+ { 0x000368, 1, 0x01, 0x00000fff },
+ { 0x000370, 2, 0x01, 0x00000000 },
+ { 0x000372, 1, 0x01, 0x000fffff },
+ { 0x000813, 1, 0x01, 0x00000006 },
+ { 0x000814, 1, 0x01, 0x00000008 },
+ { 0x000818, 8, 0x01, 0x00000000 },
+ { 0x000848, 16, 0x01, 0x00000000 },
+ { 0x000738, 1, 0x01, 0x00000000 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x000a04, 1, 0x01, 0x000000ff },
+ { 0x000a0b, 1, 0x01, 0x00000040 },
+ { 0x00097f, 1, 0x01, 0x00000100 },
+ { 0x000a02, 1, 0x01, 0x00000001 },
+ { 0x000809, 1, 0x01, 0x00000007 },
+ { 0x00c221, 1, 0x01, 0x00000040 },
+ { 0x00c401, 1, 0x01, 0x00000001 },
+ { 0x00c402, 1, 0x01, 0x00010001 },
+ { 0x00c403, 2, 0x01, 0x00000001 },
+ { 0x00c40e, 1, 0x01, 0x00000020 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000001 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+gm107_grctx_pack_icmd[] = {
+ { gm107_grctx_init_icmd_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_b097_0[] = {
+ { 0x000800, 8, 0x40, 0x00000000 },
+ { 0x000804, 8, 0x40, 0x00000000 },
+ { 0x000808, 8, 0x40, 0x00000400 },
+ { 0x00080c, 8, 0x40, 0x00000300 },
+ { 0x000810, 1, 0x04, 0x000000cf },
+ { 0x000850, 7, 0x40, 0x00000000 },
+ { 0x000814, 8, 0x40, 0x00000040 },
+ { 0x000818, 8, 0x40, 0x00000001 },
+ { 0x00081c, 8, 0x40, 0x00000000 },
+ { 0x000820, 8, 0x40, 0x00000000 },
+ { 0x001c00, 16, 0x10, 0x00000000 },
+ { 0x001c04, 16, 0x10, 0x00000000 },
+ { 0x001c08, 16, 0x10, 0x00000000 },
+ { 0x001c0c, 16, 0x10, 0x00000000 },
+ { 0x001d00, 16, 0x10, 0x00000000 },
+ { 0x001d04, 16, 0x10, 0x00000000 },
+ { 0x001d08, 16, 0x10, 0x00000000 },
+ { 0x001d0c, 16, 0x10, 0x00000000 },
+ { 0x001f00, 16, 0x08, 0x00000000 },
+ { 0x001f04, 16, 0x08, 0x00000000 },
+ { 0x001f80, 16, 0x08, 0x00000000 },
+ { 0x001f84, 16, 0x08, 0x00000000 },
+ { 0x002000, 1, 0x04, 0x00000000 },
+ { 0x002040, 1, 0x04, 0x00000011 },
+ { 0x002080, 1, 0x04, 0x00000020 },
+ { 0x0020c0, 1, 0x04, 0x00000030 },
+ { 0x002100, 1, 0x04, 0x00000040 },
+ { 0x002140, 1, 0x04, 0x00000051 },
+ { 0x00200c, 6, 0x40, 0x00000001 },
+ { 0x002010, 1, 0x04, 0x00000000 },
+ { 0x002050, 1, 0x04, 0x00000000 },
+ { 0x002090, 1, 0x04, 0x00000001 },
+ { 0x0020d0, 1, 0x04, 0x00000002 },
+ { 0x002110, 1, 0x04, 0x00000003 },
+ { 0x002150, 1, 0x04, 0x00000004 },
+ { 0x000380, 4, 0x20, 0x00000000 },
+ { 0x000384, 4, 0x20, 0x00000000 },
+ { 0x000388, 4, 0x20, 0x00000000 },
+ { 0x00038c, 4, 0x20, 0x00000000 },
+ { 0x000700, 4, 0x10, 0x00000000 },
+ { 0x000704, 4, 0x10, 0x00000000 },
+ { 0x000708, 4, 0x10, 0x00000000 },
+ { 0x002800, 128, 0x04, 0x00000000 },
+ { 0x000a00, 16, 0x20, 0x00000000 },
+ { 0x000a04, 16, 0x20, 0x00000000 },
+ { 0x000a08, 16, 0x20, 0x00000000 },
+ { 0x000a0c, 16, 0x20, 0x00000000 },
+ { 0x000a10, 16, 0x20, 0x00000000 },
+ { 0x000a14, 16, 0x20, 0x00000000 },
+ { 0x000c00, 16, 0x10, 0x00000000 },
+ { 0x000c04, 16, 0x10, 0x00000000 },
+ { 0x000c08, 16, 0x10, 0x00000000 },
+ { 0x000c0c, 16, 0x10, 0x3f800000 },
+ { 0x000d00, 8, 0x08, 0xffff0000 },
+ { 0x000d04, 8, 0x08, 0xffff0000 },
+ { 0x000e00, 16, 0x10, 0x00000000 },
+ { 0x000e04, 16, 0x10, 0xffff0000 },
+ { 0x000e08, 16, 0x10, 0xffff0000 },
+ { 0x000d40, 4, 0x08, 0x00000000 },
+ { 0x000d44, 4, 0x08, 0x00000000 },
+ { 0x001e00, 8, 0x20, 0x00000001 },
+ { 0x001e04, 8, 0x20, 0x00000001 },
+ { 0x001e08, 8, 0x20, 0x00000002 },
+ { 0x001e0c, 8, 0x20, 0x00000001 },
+ { 0x001e10, 8, 0x20, 0x00000001 },
+ { 0x001e14, 8, 0x20, 0x00000002 },
+ { 0x001e18, 8, 0x20, 0x00000001 },
+ { 0x001480, 8, 0x10, 0x00000000 },
+ { 0x001484, 8, 0x10, 0x00000000 },
+ { 0x001488, 8, 0x10, 0x00000000 },
+ { 0x003400, 128, 0x04, 0x00000000 },
+ { 0x00030c, 1, 0x04, 0x00000001 },
+ { 0x001944, 1, 0x04, 0x00000000 },
+ { 0x001514, 1, 0x04, 0x00000000 },
+ { 0x000d68, 1, 0x04, 0x0000ffff },
+ { 0x00121c, 1, 0x04, 0x0fac6881 },
+ { 0x000fac, 1, 0x04, 0x00000001 },
+ { 0x001538, 1, 0x04, 0x00000001 },
+ { 0x000fe0, 2, 0x04, 0x00000000 },
+ { 0x000fe8, 1, 0x04, 0x00000014 },
+ { 0x000fec, 1, 0x04, 0x00000040 },
+ { 0x000ff0, 1, 0x04, 0x00000000 },
+ { 0x00179c, 1, 0x04, 0x00000000 },
+ { 0x001228, 1, 0x04, 0x00000400 },
+ { 0x00122c, 1, 0x04, 0x00000300 },
+ { 0x001230, 1, 0x04, 0x00010001 },
+ { 0x0007f8, 1, 0x04, 0x00000000 },
+ { 0x0015b4, 1, 0x04, 0x00000001 },
+ { 0x0015cc, 1, 0x04, 0x00000000 },
+ { 0x001534, 1, 0x04, 0x00000000 },
+ { 0x000754, 1, 0x04, 0x00000001 },
+ { 0x000fb0, 1, 0x04, 0x00000000 },
+ { 0x0015d0, 1, 0x04, 0x00000000 },
+ { 0x00153c, 1, 0x04, 0x00000000 },
+ { 0x0016b4, 1, 0x04, 0x00000003 },
+ { 0x000fbc, 4, 0x04, 0x0000ffff },
+ { 0x000df8, 2, 0x04, 0x00000000 },
+ { 0x001948, 1, 0x04, 0x00000000 },
+ { 0x001970, 1, 0x04, 0x00000001 },
+ { 0x00161c, 1, 0x04, 0x000009f0 },
+ { 0x000dcc, 1, 0x04, 0x00000010 },
+ { 0x0015e4, 1, 0x04, 0x00000000 },
+ { 0x001160, 32, 0x04, 0x25e00040 },
+ { 0x001880, 32, 0x04, 0x00000000 },
+ { 0x000f84, 2, 0x04, 0x00000000 },
+ { 0x0017c8, 2, 0x04, 0x00000000 },
+ { 0x0017d0, 1, 0x04, 0x000000ff },
+ { 0x0017d4, 1, 0x04, 0xffffffff },
+ { 0x0017d8, 1, 0x04, 0x00000002 },
+ { 0x0017dc, 1, 0x04, 0x00000000 },
+ { 0x0015f4, 2, 0x04, 0x00000000 },
+ { 0x001434, 2, 0x04, 0x00000000 },
+ { 0x000d74, 1, 0x04, 0x00000000 },
+ { 0x0013a4, 1, 0x04, 0x00000000 },
+ { 0x001318, 1, 0x04, 0x00000001 },
+ { 0x001080, 2, 0x04, 0x00000000 },
+ { 0x001088, 2, 0x04, 0x00000001 },
+ { 0x001090, 1, 0x04, 0x00000000 },
+ { 0x001094, 1, 0x04, 0x00000001 },
+ { 0x001098, 1, 0x04, 0x00000000 },
+ { 0x00109c, 1, 0x04, 0x00000001 },
+ { 0x0010a0, 2, 0x04, 0x00000000 },
+ { 0x001644, 1, 0x04, 0x00000000 },
+ { 0x000748, 1, 0x04, 0x00000000 },
+ { 0x000de8, 1, 0x04, 0x00000000 },
+ { 0x001648, 1, 0x04, 0x00000000 },
+ { 0x0012a4, 1, 0x04, 0x00000000 },
+ { 0x001120, 4, 0x04, 0x00000000 },
+ { 0x001118, 1, 0x04, 0x00000000 },
+ { 0x00164c, 1, 0x04, 0x00000000 },
+ { 0x001658, 1, 0x04, 0x00000000 },
+ { 0x001910, 1, 0x04, 0x00000290 },
+ { 0x001518, 1, 0x04, 0x00000000 },
+ { 0x00165c, 1, 0x04, 0x00000001 },
+ { 0x001520, 1, 0x04, 0x00000000 },
+ { 0x001604, 1, 0x04, 0x00000000 },
+ { 0x001570, 1, 0x04, 0x00000000 },
+ { 0x0013b0, 2, 0x04, 0x3f800000 },
+ { 0x00020c, 1, 0x04, 0x00000000 },
+ { 0x001670, 1, 0x04, 0x30201000 },
+ { 0x001674, 1, 0x04, 0x70605040 },
+ { 0x001678, 1, 0x04, 0xb8a89888 },
+ { 0x00167c, 1, 0x04, 0xf8e8d8c8 },
+ { 0x00166c, 1, 0x04, 0x00000000 },
+ { 0x001680, 1, 0x04, 0x00ffff00 },
+ { 0x0012d0, 1, 0x04, 0x00000003 },
+ { 0x0012d4, 1, 0x04, 0x00000002 },
+ { 0x001684, 2, 0x04, 0x00000000 },
+ { 0x000dac, 2, 0x04, 0x00001b02 },
+ { 0x000db4, 1, 0x04, 0x00000000 },
+ { 0x00168c, 1, 0x04, 0x00000000 },
+ { 0x0015bc, 1, 0x04, 0x00000000 },
+ { 0x00156c, 1, 0x04, 0x00000000 },
+ { 0x00187c, 1, 0x04, 0x00000000 },
+ { 0x001110, 1, 0x04, 0x00000001 },
+ { 0x000dc0, 3, 0x04, 0x00000000 },
+ { 0x000f40, 5, 0x04, 0x00000000 },
+ { 0x001234, 1, 0x04, 0x00000000 },
+ { 0x001690, 1, 0x04, 0x00000000 },
+ { 0x000790, 5, 0x04, 0x00000000 },
+ { 0x00077c, 1, 0x04, 0x00000000 },
+ { 0x001000, 1, 0x04, 0x00000010 },
+ { 0x0010fc, 1, 0x04, 0x00000000 },
+ { 0x001290, 1, 0x04, 0x00000000 },
+ { 0x000218, 1, 0x04, 0x00000010 },
+ { 0x0012d8, 1, 0x04, 0x00000000 },
+ { 0x0012dc, 1, 0x04, 0x00000010 },
+ { 0x000d94, 1, 0x04, 0x00000001 },
+ { 0x00155c, 2, 0x04, 0x00000000 },
+ { 0x001564, 1, 0x04, 0x00000fff },
+ { 0x001574, 2, 0x04, 0x00000000 },
+ { 0x00157c, 1, 0x04, 0x000fffff },
+ { 0x001354, 1, 0x04, 0x00000000 },
+ { 0x001610, 1, 0x04, 0x00000012 },
+ { 0x001608, 2, 0x04, 0x00000000 },
+ { 0x00260c, 1, 0x04, 0x00000000 },
+ { 0x0007ac, 1, 0x04, 0x00000000 },
+ { 0x00162c, 1, 0x04, 0x00000003 },
+ { 0x000210, 1, 0x04, 0x00000000 },
+ { 0x000320, 1, 0x04, 0x00000000 },
+ { 0x000324, 6, 0x04, 0x3f800000 },
+ { 0x000750, 1, 0x04, 0x00000000 },
+ { 0x000760, 1, 0x04, 0x39291909 },
+ { 0x000764, 1, 0x04, 0x79695949 },
+ { 0x000768, 1, 0x04, 0xb9a99989 },
+ { 0x00076c, 1, 0x04, 0xf9e9d9c9 },
+ { 0x000770, 1, 0x04, 0x30201000 },
+ { 0x000774, 1, 0x04, 0x70605040 },
+ { 0x000778, 1, 0x04, 0x00009080 },
+ { 0x000780, 1, 0x04, 0x39291909 },
+ { 0x000784, 1, 0x04, 0x79695949 },
+ { 0x000788, 1, 0x04, 0xb9a99989 },
+ { 0x00078c, 1, 0x04, 0xf9e9d9c9 },
+ { 0x0007d0, 1, 0x04, 0x30201000 },
+ { 0x0007d4, 1, 0x04, 0x70605040 },
+ { 0x0007d8, 1, 0x04, 0x00009080 },
+ { 0x00037c, 1, 0x04, 0x00000001 },
+ { 0x000740, 2, 0x04, 0x00000000 },
+ { 0x002600, 1, 0x04, 0x00000000 },
+ { 0x001918, 1, 0x04, 0x00000000 },
+ { 0x00191c, 1, 0x04, 0x00000900 },
+ { 0x001920, 1, 0x04, 0x00000405 },
+ { 0x001308, 1, 0x04, 0x00000001 },
+ { 0x001924, 1, 0x04, 0x00000000 },
+ { 0x0013ac, 1, 0x04, 0x00000000 },
+ { 0x00192c, 1, 0x04, 0x00000001 },
+ { 0x00193c, 1, 0x04, 0x00002c1c },
+ { 0x000d7c, 1, 0x04, 0x00000000 },
+ { 0x000f8c, 1, 0x04, 0x00000000 },
+ { 0x0002c0, 1, 0x04, 0x00000001 },
+ { 0x001510, 1, 0x04, 0x00000000 },
+ { 0x001940, 1, 0x04, 0x00000000 },
+ { 0x000ff4, 2, 0x04, 0x00000000 },
+ { 0x00194c, 2, 0x04, 0x00000000 },
+ { 0x001968, 1, 0x04, 0x00000000 },
+ { 0x001590, 1, 0x04, 0x0000003f },
+ { 0x0007e8, 4, 0x04, 0x00000000 },
+ { 0x00196c, 1, 0x04, 0x00000011 },
+ { 0x0002e4, 1, 0x04, 0x0000b001 },
+ { 0x00036c, 2, 0x04, 0x00000000 },
+ { 0x00197c, 1, 0x04, 0x00000000 },
+ { 0x000fcc, 2, 0x04, 0x00000000 },
+ { 0x0002d8, 1, 0x04, 0x00000040 },
+ { 0x001980, 1, 0x04, 0x00000080 },
+ { 0x001504, 1, 0x04, 0x00000080 },
+ { 0x001984, 1, 0x04, 0x00000000 },
+ { 0x000f60, 1, 0x04, 0x00000000 },
+ { 0x000f64, 1, 0x04, 0x00400040 },
+ { 0x000f68, 1, 0x04, 0x00002212 },
+ { 0x000f6c, 1, 0x04, 0x08080203 },
+ { 0x001108, 1, 0x04, 0x00000008 },
+ { 0x000f70, 1, 0x04, 0x00080001 },
+ { 0x000ffc, 1, 0x04, 0x00000000 },
+ { 0x000300, 1, 0x04, 0x00000001 },
+ { 0x0013a8, 1, 0x04, 0x00000000 },
+ { 0x0012ec, 1, 0x04, 0x00000000 },
+ { 0x001310, 1, 0x04, 0x00000000 },
+ { 0x001314, 1, 0x04, 0x00000001 },
+ { 0x001380, 1, 0x04, 0x00000000 },
+ { 0x001384, 4, 0x04, 0x00000001 },
+ { 0x001394, 1, 0x04, 0x00000000 },
+ { 0x00139c, 1, 0x04, 0x00000000 },
+ { 0x001398, 1, 0x04, 0x00000000 },
+ { 0x001594, 1, 0x04, 0x00000000 },
+ { 0x001598, 4, 0x04, 0x00000001 },
+ { 0x000f54, 3, 0x04, 0x00000000 },
+ { 0x0019bc, 1, 0x04, 0x00000000 },
+ { 0x000f9c, 2, 0x04, 0x00000000 },
+ { 0x0012cc, 1, 0x04, 0x00000000 },
+ { 0x0012e8, 1, 0x04, 0x00000000 },
+ { 0x00130c, 1, 0x04, 0x00000001 },
+ { 0x001360, 8, 0x04, 0x00000000 },
+ { 0x00133c, 2, 0x04, 0x00000001 },
+ { 0x001344, 1, 0x04, 0x00000002 },
+ { 0x001348, 2, 0x04, 0x00000001 },
+ { 0x001350, 1, 0x04, 0x00000002 },
+ { 0x001358, 1, 0x04, 0x00000001 },
+ { 0x0012e4, 1, 0x04, 0x00000000 },
+ { 0x00131c, 4, 0x04, 0x00000000 },
+ { 0x0019c0, 1, 0x04, 0x00000000 },
+ { 0x001140, 1, 0x04, 0x00000000 },
+ { 0x000dd0, 1, 0x04, 0x00000000 },
+ { 0x000dd4, 1, 0x04, 0x00000001 },
+ { 0x0002f4, 1, 0x04, 0x00000000 },
+ { 0x0019c4, 1, 0x04, 0x00000000 },
+ { 0x0019c8, 1, 0x04, 0x00001500 },
+ { 0x00135c, 1, 0x04, 0x00000000 },
+ { 0x000f90, 1, 0x04, 0x00000000 },
+ { 0x0019e0, 8, 0x04, 0x00000001 },
+ { 0x0019cc, 1, 0x04, 0x00000001 },
+ { 0x0015b8, 1, 0x04, 0x00000000 },
+ { 0x001a00, 1, 0x04, 0x00001111 },
+ { 0x001a04, 7, 0x04, 0x00000000 },
+ { 0x000d6c, 2, 0x04, 0xffff0000 },
+ { 0x0010f8, 1, 0x04, 0x00001010 },
+ { 0x000d80, 5, 0x04, 0x00000000 },
+ { 0x000da0, 1, 0x04, 0x00000000 },
+ { 0x0007a4, 2, 0x04, 0x00000000 },
+ { 0x001508, 1, 0x04, 0x80000000 },
+ { 0x00150c, 1, 0x04, 0x40000000 },
+ { 0x001668, 1, 0x04, 0x00000000 },
+ { 0x000318, 2, 0x04, 0x00000008 },
+ { 0x000d9c, 1, 0x04, 0x00000001 },
+ { 0x000f14, 1, 0x04, 0x00000000 },
+ { 0x000374, 1, 0x04, 0x00000000 },
+ { 0x000378, 1, 0x04, 0x0000000c },
+ { 0x0007dc, 1, 0x04, 0x00000000 },
+ { 0x00074c, 1, 0x04, 0x00000055 },
+ { 0x001420, 1, 0x04, 0x00000003 },
+ { 0x001008, 1, 0x04, 0x00000008 },
+ { 0x00100c, 1, 0x04, 0x00000040 },
+ { 0x001010, 1, 0x04, 0x0000012c },
+ { 0x000d60, 1, 0x04, 0x00000040 },
+ { 0x001018, 1, 0x04, 0x00000020 },
+ { 0x00101c, 1, 0x04, 0x00000001 },
+ { 0x001020, 1, 0x04, 0x00000020 },
+ { 0x001024, 1, 0x04, 0x00000001 },
+ { 0x001444, 3, 0x04, 0x00000000 },
+ { 0x000360, 1, 0x04, 0x20164010 },
+ { 0x000364, 1, 0x04, 0x00000020 },
+ { 0x000368, 1, 0x04, 0x00000000 },
+ { 0x000da8, 1, 0x04, 0x00000030 },
+ { 0x000de4, 1, 0x04, 0x00000000 },
+ { 0x000204, 1, 0x04, 0x00000006 },
+ { 0x0002d0, 1, 0x04, 0x003fffff },
+ { 0x001220, 1, 0x04, 0x00000005 },
+ { 0x000fdc, 1, 0x04, 0x00000000 },
+ { 0x000f98, 1, 0x04, 0x00400008 },
+ { 0x001284, 1, 0x04, 0x08000080 },
+ { 0x001450, 1, 0x04, 0x00400008 },
+ { 0x001454, 1, 0x04, 0x08000080 },
+ { 0x000214, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+gm107_grctx_pack_mthd[] = {
+ { gm107_grctx_init_b097_0, 0xb097 },
+ { nvc0_grctx_init_902d_0, 0x902d },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_fe_0[] = {
+ { 0x404004, 8, 0x04, 0x00000000 },
+ { 0x404024, 1, 0x04, 0x0000e000 },
+ { 0x404028, 8, 0x04, 0x00000000 },
+ { 0x4040a8, 8, 0x04, 0x00000000 },
+ { 0x4040c8, 1, 0x04, 0xf800008f },
+ { 0x4040d0, 6, 0x04, 0x00000000 },
+ { 0x4040f8, 1, 0x04, 0x00000000 },
+ { 0x404100, 10, 0x04, 0x00000000 },
+ { 0x404130, 2, 0x04, 0x00000000 },
+ { 0x404150, 1, 0x04, 0x0000002e },
+ { 0x404154, 1, 0x04, 0x00000400 },
+ { 0x404158, 1, 0x04, 0x00000200 },
+ { 0x404164, 1, 0x04, 0x00000045 },
+ { 0x40417c, 2, 0x04, 0x00000000 },
+ { 0x404194, 1, 0x04, 0x01000700 },
+ { 0x4041a0, 4, 0x04, 0x00000000 },
+ { 0x404200, 4, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_ds_0[] = {
+ { 0x405800, 1, 0x04, 0x0f8001bf },
+ { 0x405830, 1, 0x04, 0x0aa01000 },
+ { 0x405834, 1, 0x04, 0x08000000 },
+ { 0x405838, 1, 0x04, 0x00000000 },
+ { 0x405854, 1, 0x04, 0x00000000 },
+ { 0x405870, 4, 0x04, 0x00000001 },
+ { 0x405a00, 2, 0x04, 0x00000000 },
+ { 0x405a18, 1, 0x04, 0x00000000 },
+ { 0x405a1c, 1, 0x04, 0x000000ff },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_pd_0[] = {
+ { 0x406020, 1, 0x04, 0x07410001 },
+ { 0x406028, 4, 0x04, 0x00000001 },
+ { 0x4064a8, 1, 0x04, 0x00000000 },
+ { 0x4064ac, 1, 0x04, 0x00003fff },
+ { 0x4064b0, 3, 0x04, 0x00000000 },
+ { 0x4064c0, 1, 0x04, 0x80400280 },
+ { 0x4064c4, 1, 0x04, 0x0400ffff },
+ { 0x4064c8, 1, 0x04, 0x018001ff },
+ { 0x4064cc, 9, 0x04, 0x00000000 },
+ { 0x4064fc, 1, 0x04, 0x0000022a },
+ { 0x406500, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_be_0[] = {
+ { 0x408800, 1, 0x04, 0x32802a3c },
+ { 0x408804, 1, 0x04, 0x00000040 },
+ { 0x408808, 1, 0x04, 0x1003e005 },
+ { 0x408840, 1, 0x04, 0x0000000b },
+ { 0x408900, 1, 0x04, 0xb080b801 },
+ { 0x408904, 1, 0x04, 0x63038001 },
+ { 0x408908, 1, 0x04, 0x02c8102f },
+ { 0x408980, 1, 0x04, 0x0000011d },
+ {}
+};
+
+static const struct nvc0_graph_pack
+gm107_grctx_pack_hub[] = {
+ { nvc0_grctx_init_main_0 },
+ { gm107_grctx_init_fe_0 },
+ { nvf0_grctx_init_pri_0 },
+ { nve4_grctx_init_memfmt_0 },
+ { gm107_grctx_init_ds_0 },
+ { nvf0_grctx_init_cwd_0 },
+ { gm107_grctx_init_pd_0 },
+ { nv108_grctx_init_rstr2d_0 },
+ { nve4_grctx_init_scc_0 },
+ { gm107_grctx_init_be_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_gpc_unk_0[] = {
+ { 0x418380, 1, 0x04, 0x00000056 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_gpc_unk_1[] = {
+ { 0x418600, 1, 0x04, 0x0000007f },
+ { 0x418684, 1, 0x04, 0x0000001f },
+ { 0x418700, 1, 0x04, 0x00000002 },
+ { 0x418704, 1, 0x04, 0x00000080 },
+ { 0x418708, 1, 0x04, 0x40000000 },
+ { 0x41870c, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_setup_0[] = {
+ { 0x418800, 1, 0x04, 0x7006863a },
+ { 0x418810, 1, 0x04, 0x00000000 },
+ { 0x418828, 1, 0x04, 0x00000044 },
+ { 0x418830, 1, 0x04, 0x10000001 },
+ { 0x4188d8, 1, 0x04, 0x00000008 },
+ { 0x4188e0, 1, 0x04, 0x01000000 },
+ { 0x4188e8, 5, 0x04, 0x00000000 },
+ { 0x4188fc, 1, 0x04, 0x20100058 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_gpc_unk_2[] = {
+ { 0x418d24, 1, 0x04, 0x00000000 },
+ { 0x418e00, 1, 0x04, 0x90000000 },
+ { 0x418e24, 1, 0x04, 0x00000000 },
+ { 0x418e28, 1, 0x04, 0x00000030 },
+ { 0x418e30, 1, 0x04, 0x00000000 },
+ { 0x418e34, 1, 0x04, 0x00010000 },
+ { 0x418e38, 1, 0x04, 0x00000000 },
+ { 0x418e40, 22, 0x04, 0x00000000 },
+ { 0x418ea0, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+gm107_grctx_pack_gpc[] = {
+ { gm107_grctx_init_gpc_unk_0 },
+ { nv108_grctx_init_prop_0 },
+ { gm107_grctx_init_gpc_unk_1 },
+ { gm107_grctx_init_setup_0 },
+ { nvc0_grctx_init_zcull_0 },
+ { nv108_grctx_init_crstr_0 },
+ { nve4_grctx_init_gpm_0 },
+ { gm107_grctx_init_gpc_unk_2 },
+ { nvc0_grctx_init_gcc_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_tex_0[] = {
+ { 0x419a00, 1, 0x04, 0x000300f0 },
+ { 0x419a04, 1, 0x04, 0x00000005 },
+ { 0x419a08, 1, 0x04, 0x00000421 },
+ { 0x419a0c, 1, 0x04, 0x00120000 },
+ { 0x419a10, 1, 0x04, 0x00000000 },
+ { 0x419a14, 1, 0x04, 0x00002200 },
+ { 0x419a1c, 1, 0x04, 0x0000c000 },
+ { 0x419a20, 1, 0x04, 0x20008a00 },
+ { 0x419a30, 1, 0x04, 0x00000001 },
+ { 0x419a3c, 1, 0x04, 0x00000002 },
+ { 0x419ac4, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_mpc_0[] = {
+ { 0x419c00, 1, 0x04, 0x0000001a },
+ { 0x419c04, 1, 0x04, 0x80000006 },
+ { 0x419c08, 1, 0x04, 0x00000002 },
+ { 0x419c20, 1, 0x04, 0x00000000 },
+ { 0x419c24, 1, 0x04, 0x00084210 },
+ { 0x419c28, 1, 0x04, 0x3efbefbe },
+ { 0x419c2c, 1, 0x04, 0x00000000 },
+ { 0x419c34, 1, 0x04, 0x01ff1ff3 },
+ { 0x419c3c, 1, 0x04, 0x00001919 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_l1c_0[] = {
+ { 0x419c84, 1, 0x04, 0x00000020 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_sm_0[] = {
+ { 0x419e04, 3, 0x04, 0x00000000 },
+ { 0x419e10, 1, 0x04, 0x00001c02 },
+ { 0x419e44, 1, 0x04, 0x00d3eff2 },
+ { 0x419e48, 1, 0x04, 0x00000000 },
+ { 0x419e4c, 1, 0x04, 0x0000007f },
+ { 0x419e50, 1, 0x04, 0x00000000 },
+ { 0x419e60, 4, 0x04, 0x00000000 },
+ { 0x419e74, 10, 0x04, 0x00000000 },
+ { 0x419eac, 1, 0x04, 0x0001cf8b },
+ { 0x419eb0, 1, 0x04, 0x00030300 },
+ { 0x419eb8, 1, 0x04, 0x00000000 },
+ { 0x419ef0, 24, 0x04, 0x00000000 },
+ { 0x419f68, 2, 0x04, 0x00000000 },
+ { 0x419f70, 1, 0x04, 0x00000020 },
+ { 0x419f78, 1, 0x04, 0x000003eb },
+ { 0x419f7c, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+gm107_grctx_pack_tpc[] = {
+ { nvd7_grctx_init_pe_0 },
+ { gm107_grctx_init_tex_0 },
+ { gm107_grctx_init_mpc_0 },
+ { gm107_grctx_init_l1c_0 },
+ { gm107_grctx_init_sm_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_cbm_0[] = {
+ { 0x41bec0, 1, 0x04, 0x00000000 },
+ { 0x41bec4, 1, 0x04, 0x01050000 },
+ { 0x41bee4, 1, 0x04, 0x00000000 },
+ { 0x41bef0, 1, 0x04, 0x000003ff },
+ { 0x41bef4, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_grctx_init_wwdx_0[] = {
+ { 0x41bf00, 1, 0x04, 0x0a418820 },
+ { 0x41bf04, 1, 0x04, 0x062080e6 },
+ { 0x41bf08, 1, 0x04, 0x020398a4 },
+ { 0x41bf0c, 1, 0x04, 0x0e629062 },
+ { 0x41bf10, 1, 0x04, 0x0a418820 },
+ { 0x41bf14, 1, 0x04, 0x000000e6 },
+ { 0x41bfd0, 1, 0x04, 0x00900103 },
+ { 0x41bfe0, 1, 0x04, 0x80000000 },
+ { 0x41bfe4, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+gm107_grctx_pack_ppc[] = {
+ { nve4_grctx_init_pes_0 },
+ { gm107_grctx_init_cbm_0 },
+ { gm107_grctx_init_wwdx_0 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+static void
+gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+{
+ mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+ mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+ mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+
+ mmio_list(0x40800c, 0x00000000, 8, 1);
+ mmio_list(0x408010, 0x80000000, 0, 0);
+ mmio_list(0x419004, 0x00000000, 8, 1);
+ mmio_list(0x419008, 0x00000000, 0, 0);
+ mmio_list(0x4064cc, 0x80000000, 0, 0);
+ mmio_list(0x418e30, 0x80000000, 0, 0);
+
+ mmio_list(0x408004, 0x00000000, 8, 0);
+ mmio_list(0x408008, 0x80000030, 0, 0);
+ mmio_list(0x418e24, 0x00000000, 8, 0);
+ mmio_list(0x418e28, 0x80000030, 0, 0);
+
+ mmio_list(0x418810, 0x80000000, 12, 2);
+ mmio_list(0x419848, 0x10000000, 12, 2);
+ mmio_list(0x419c2c, 0x10000000, 12, 2);
+
+ mmio_list(0x405830, 0x0aa01000, 0, 0);
+ mmio_list(0x4064c4, 0x0400ffff, 0, 0);
+
+ /*XXX*/
+ mmio_list(0x5030c0, 0x00001540, 0, 0);
+ mmio_list(0x5030f4, 0x00000000, 0, 0);
+ mmio_list(0x5030e4, 0x00002000, 0, 0);
+ mmio_list(0x5030f8, 0x00003fc0, 0, 0);
+ mmio_list(0x418ea0, 0x07151540, 0, 0);
+
+ mmio_list(0x5032c0, 0x00001540, 0, 0);
+ mmio_list(0x5032f4, 0x00001fe0, 0, 0);
+ mmio_list(0x5032e4, 0x00002000, 0, 0);
+ mmio_list(0x5032f8, 0x00006fc0, 0, 0);
+ mmio_list(0x418ea4, 0x07151540, 0, 0);
+}
+
+static void
+gm107_grctx_generate_tpcid(struct nvc0_graph_priv *priv)
+{
+ int gpc, tpc, id;
+
+ for (tpc = 0, id = 0; tpc < 4; tpc++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tpc < priv->tpc_nr[gpc]) {
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+ id++;
+ }
+
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+ }
+ }
+}
+
+static void
+gm107_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+{
+ struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+ int i;
+
+ nvc0_graph_mmio(priv, oclass->hub);
+ nvc0_graph_mmio(priv, oclass->gpc);
+ nvc0_graph_mmio(priv, oclass->zcull);
+ nvc0_graph_mmio(priv, oclass->tpc);
+ nvc0_graph_mmio(priv, oclass->ppc);
+
+ nv_wr32(priv, 0x404154, 0x00000000);
+
+ oclass->mods(priv, info);
+ oclass->unkn(priv);
+
+ gm107_grctx_generate_tpcid(priv);
+ nvc0_grctx_generate_r406028(priv);
+ nve4_grctx_generate_r418bb8(priv);
+ nvc0_grctx_generate_r406800(priv);
+
+ nv_wr32(priv, 0x4064d0, 0x00000001);
+ for (i = 1; i < 8; i++)
+ nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+ nv_wr32(priv, 0x406500, 0x00000001);
+
+ nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+
+ if (priv->gpc_nr == 1) {
+ nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]);
+ nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]);
+ } else {
+ nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr);
+ nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
+ }
+
+ nvc0_graph_icmd(priv, oclass->icmd);
+ nv_wr32(priv, 0x404154, 0x00000400);
+ nvc0_graph_mthd(priv, oclass->mthd);
+
+ nv_mask(priv, 0x419e00, 0x00808080, 0x00808080);
+ nv_mask(priv, 0x419ccc, 0x80000000, 0x80000000);
+ nv_mask(priv, 0x419f80, 0x80000000, 0x80000000);
+ nv_mask(priv, 0x419f88, 0x80000000, 0x80000000);
+}
+
+struct nouveau_oclass *
+gm107_grctx_oclass = &(struct nvc0_grctx_oclass) {
+ .base.handle = NV_ENGCTX(GR, 0x08),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_context_ctor,
+ .dtor = nvc0_graph_context_dtor,
+ .init = _nouveau_graph_context_init,
+ .fini = _nouveau_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+ .main = gm107_grctx_generate_main,
+ .mods = gm107_grctx_generate_mods,
+ .unkn = nve4_grctx_generate_unkn,
+ .hub = gm107_grctx_pack_hub,
+ .gpc = gm107_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = gm107_grctx_pack_tpc,
+ .ppc = gm107_grctx_pack_ppc,
+ .icmd = gm107_grctx_pack_icmd,
+ .mthd = gm107_grctx_pack_mthd,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
index a86bd3352bf8..48351b4d6d6b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
@@ -22,10 +22,14 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "nvc0.h"
+#include "ctxnvc0.h"
-static struct nvc0_graph_init
-nv108_grctx_init_icmd[] = {
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+nv108_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x000039, 3, 0x01, 0x00000000 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
@@ -274,839 +278,14 @@ nv108_grctx_init_icmd[] = {
{}
};
-static struct nvc0_graph_init
-nv108_grctx_init_a197[] = {
- { 0x000800, 1, 0x04, 0x00000000 },
- { 0x000840, 1, 0x04, 0x00000000 },
- { 0x000880, 1, 0x04, 0x00000000 },
- { 0x0008c0, 1, 0x04, 0x00000000 },
- { 0x000900, 1, 0x04, 0x00000000 },
- { 0x000940, 1, 0x04, 0x00000000 },
- { 0x000980, 1, 0x04, 0x00000000 },
- { 0x0009c0, 1, 0x04, 0x00000000 },
- { 0x000804, 1, 0x04, 0x00000000 },
- { 0x000844, 1, 0x04, 0x00000000 },
- { 0x000884, 1, 0x04, 0x00000000 },
- { 0x0008c4, 1, 0x04, 0x00000000 },
- { 0x000904, 1, 0x04, 0x00000000 },
- { 0x000944, 1, 0x04, 0x00000000 },
- { 0x000984, 1, 0x04, 0x00000000 },
- { 0x0009c4, 1, 0x04, 0x00000000 },
- { 0x000808, 1, 0x04, 0x00000400 },
- { 0x000848, 1, 0x04, 0x00000400 },
- { 0x000888, 1, 0x04, 0x00000400 },
- { 0x0008c8, 1, 0x04, 0x00000400 },
- { 0x000908, 1, 0x04, 0x00000400 },
- { 0x000948, 1, 0x04, 0x00000400 },
- { 0x000988, 1, 0x04, 0x00000400 },
- { 0x0009c8, 1, 0x04, 0x00000400 },
- { 0x00080c, 1, 0x04, 0x00000300 },
- { 0x00084c, 1, 0x04, 0x00000300 },
- { 0x00088c, 1, 0x04, 0x00000300 },
- { 0x0008cc, 1, 0x04, 0x00000300 },
- { 0x00090c, 1, 0x04, 0x00000300 },
- { 0x00094c, 1, 0x04, 0x00000300 },
- { 0x00098c, 1, 0x04, 0x00000300 },
- { 0x0009cc, 1, 0x04, 0x00000300 },
- { 0x000810, 1, 0x04, 0x000000cf },
- { 0x000850, 1, 0x04, 0x00000000 },
- { 0x000890, 1, 0x04, 0x00000000 },
- { 0x0008d0, 1, 0x04, 0x00000000 },
- { 0x000910, 1, 0x04, 0x00000000 },
- { 0x000950, 1, 0x04, 0x00000000 },
- { 0x000990, 1, 0x04, 0x00000000 },
- { 0x0009d0, 1, 0x04, 0x00000000 },
- { 0x000814, 1, 0x04, 0x00000040 },
- { 0x000854, 1, 0x04, 0x00000040 },
- { 0x000894, 1, 0x04, 0x00000040 },
- { 0x0008d4, 1, 0x04, 0x00000040 },
- { 0x000914, 1, 0x04, 0x00000040 },
- { 0x000954, 1, 0x04, 0x00000040 },
- { 0x000994, 1, 0x04, 0x00000040 },
- { 0x0009d4, 1, 0x04, 0x00000040 },
- { 0x000818, 1, 0x04, 0x00000001 },
- { 0x000858, 1, 0x04, 0x00000001 },
- { 0x000898, 1, 0x04, 0x00000001 },
- { 0x0008d8, 1, 0x04, 0x00000001 },
- { 0x000918, 1, 0x04, 0x00000001 },
- { 0x000958, 1, 0x04, 0x00000001 },
- { 0x000998, 1, 0x04, 0x00000001 },
- { 0x0009d8, 1, 0x04, 0x00000001 },
- { 0x00081c, 1, 0x04, 0x00000000 },
- { 0x00085c, 1, 0x04, 0x00000000 },
- { 0x00089c, 1, 0x04, 0x00000000 },
- { 0x0008dc, 1, 0x04, 0x00000000 },
- { 0x00091c, 1, 0x04, 0x00000000 },
- { 0x00095c, 1, 0x04, 0x00000000 },
- { 0x00099c, 1, 0x04, 0x00000000 },
- { 0x0009dc, 1, 0x04, 0x00000000 },
- { 0x000820, 1, 0x04, 0x00000000 },
- { 0x000860, 1, 0x04, 0x00000000 },
- { 0x0008a0, 1, 0x04, 0x00000000 },
- { 0x0008e0, 1, 0x04, 0x00000000 },
- { 0x000920, 1, 0x04, 0x00000000 },
- { 0x000960, 1, 0x04, 0x00000000 },
- { 0x0009a0, 1, 0x04, 0x00000000 },
- { 0x0009e0, 1, 0x04, 0x00000000 },
- { 0x001c00, 1, 0x04, 0x00000000 },
- { 0x001c10, 1, 0x04, 0x00000000 },
- { 0x001c20, 1, 0x04, 0x00000000 },
- { 0x001c30, 1, 0x04, 0x00000000 },
- { 0x001c40, 1, 0x04, 0x00000000 },
- { 0x001c50, 1, 0x04, 0x00000000 },
- { 0x001c60, 1, 0x04, 0x00000000 },
- { 0x001c70, 1, 0x04, 0x00000000 },
- { 0x001c80, 1, 0x04, 0x00000000 },
- { 0x001c90, 1, 0x04, 0x00000000 },
- { 0x001ca0, 1, 0x04, 0x00000000 },
- { 0x001cb0, 1, 0x04, 0x00000000 },
- { 0x001cc0, 1, 0x04, 0x00000000 },
- { 0x001cd0, 1, 0x04, 0x00000000 },
- { 0x001ce0, 1, 0x04, 0x00000000 },
- { 0x001cf0, 1, 0x04, 0x00000000 },
- { 0x001c04, 1, 0x04, 0x00000000 },
- { 0x001c14, 1, 0x04, 0x00000000 },
- { 0x001c24, 1, 0x04, 0x00000000 },
- { 0x001c34, 1, 0x04, 0x00000000 },
- { 0x001c44, 1, 0x04, 0x00000000 },
- { 0x001c54, 1, 0x04, 0x00000000 },
- { 0x001c64, 1, 0x04, 0x00000000 },
- { 0x001c74, 1, 0x04, 0x00000000 },
- { 0x001c84, 1, 0x04, 0x00000000 },
- { 0x001c94, 1, 0x04, 0x00000000 },
- { 0x001ca4, 1, 0x04, 0x00000000 },
- { 0x001cb4, 1, 0x04, 0x00000000 },
- { 0x001cc4, 1, 0x04, 0x00000000 },
- { 0x001cd4, 1, 0x04, 0x00000000 },
- { 0x001ce4, 1, 0x04, 0x00000000 },
- { 0x001cf4, 1, 0x04, 0x00000000 },
- { 0x001c08, 1, 0x04, 0x00000000 },
- { 0x001c18, 1, 0x04, 0x00000000 },
- { 0x001c28, 1, 0x04, 0x00000000 },
- { 0x001c38, 1, 0x04, 0x00000000 },
- { 0x001c48, 1, 0x04, 0x00000000 },
- { 0x001c58, 1, 0x04, 0x00000000 },
- { 0x001c68, 1, 0x04, 0x00000000 },
- { 0x001c78, 1, 0x04, 0x00000000 },
- { 0x001c88, 1, 0x04, 0x00000000 },
- { 0x001c98, 1, 0x04, 0x00000000 },
- { 0x001ca8, 1, 0x04, 0x00000000 },
- { 0x001cb8, 1, 0x04, 0x00000000 },
- { 0x001cc8, 1, 0x04, 0x00000000 },
- { 0x001cd8, 1, 0x04, 0x00000000 },
- { 0x001ce8, 1, 0x04, 0x00000000 },
- { 0x001cf8, 1, 0x04, 0x00000000 },
- { 0x001c0c, 1, 0x04, 0x00000000 },
- { 0x001c1c, 1, 0x04, 0x00000000 },
- { 0x001c2c, 1, 0x04, 0x00000000 },
- { 0x001c3c, 1, 0x04, 0x00000000 },
- { 0x001c4c, 1, 0x04, 0x00000000 },
- { 0x001c5c, 1, 0x04, 0x00000000 },
- { 0x001c6c, 1, 0x04, 0x00000000 },
- { 0x001c7c, 1, 0x04, 0x00000000 },
- { 0x001c8c, 1, 0x04, 0x00000000 },
- { 0x001c9c, 1, 0x04, 0x00000000 },
- { 0x001cac, 1, 0x04, 0x00000000 },
- { 0x001cbc, 1, 0x04, 0x00000000 },
- { 0x001ccc, 1, 0x04, 0x00000000 },
- { 0x001cdc, 1, 0x04, 0x00000000 },
- { 0x001cec, 1, 0x04, 0x00000000 },
- { 0x001cfc, 2, 0x04, 0x00000000 },
- { 0x001d10, 1, 0x04, 0x00000000 },
- { 0x001d20, 1, 0x04, 0x00000000 },
- { 0x001d30, 1, 0x04, 0x00000000 },
- { 0x001d40, 1, 0x04, 0x00000000 },
- { 0x001d50, 1, 0x04, 0x00000000 },
- { 0x001d60, 1, 0x04, 0x00000000 },
- { 0x001d70, 1, 0x04, 0x00000000 },
- { 0x001d80, 1, 0x04, 0x00000000 },
- { 0x001d90, 1, 0x04, 0x00000000 },
- { 0x001da0, 1, 0x04, 0x00000000 },
- { 0x001db0, 1, 0x04, 0x00000000 },
- { 0x001dc0, 1, 0x04, 0x00000000 },
- { 0x001dd0, 1, 0x04, 0x00000000 },
- { 0x001de0, 1, 0x04, 0x00000000 },
- { 0x001df0, 1, 0x04, 0x00000000 },
- { 0x001d04, 1, 0x04, 0x00000000 },
- { 0x001d14, 1, 0x04, 0x00000000 },
- { 0x001d24, 1, 0x04, 0x00000000 },
- { 0x001d34, 1, 0x04, 0x00000000 },
- { 0x001d44, 1, 0x04, 0x00000000 },
- { 0x001d54, 1, 0x04, 0x00000000 },
- { 0x001d64, 1, 0x04, 0x00000000 },
- { 0x001d74, 1, 0x04, 0x00000000 },
- { 0x001d84, 1, 0x04, 0x00000000 },
- { 0x001d94, 1, 0x04, 0x00000000 },
- { 0x001da4, 1, 0x04, 0x00000000 },
- { 0x001db4, 1, 0x04, 0x00000000 },
- { 0x001dc4, 1, 0x04, 0x00000000 },
- { 0x001dd4, 1, 0x04, 0x00000000 },
- { 0x001de4, 1, 0x04, 0x00000000 },
- { 0x001df4, 1, 0x04, 0x00000000 },
- { 0x001d08, 1, 0x04, 0x00000000 },
- { 0x001d18, 1, 0x04, 0x00000000 },
- { 0x001d28, 1, 0x04, 0x00000000 },
- { 0x001d38, 1, 0x04, 0x00000000 },
- { 0x001d48, 1, 0x04, 0x00000000 },
- { 0x001d58, 1, 0x04, 0x00000000 },
- { 0x001d68, 1, 0x04, 0x00000000 },
- { 0x001d78, 1, 0x04, 0x00000000 },
- { 0x001d88, 1, 0x04, 0x00000000 },
- { 0x001d98, 1, 0x04, 0x00000000 },
- { 0x001da8, 1, 0x04, 0x00000000 },
- { 0x001db8, 1, 0x04, 0x00000000 },
- { 0x001dc8, 1, 0x04, 0x00000000 },
- { 0x001dd8, 1, 0x04, 0x00000000 },
- { 0x001de8, 1, 0x04, 0x00000000 },
- { 0x001df8, 1, 0x04, 0x00000000 },
- { 0x001d0c, 1, 0x04, 0x00000000 },
- { 0x001d1c, 1, 0x04, 0x00000000 },
- { 0x001d2c, 1, 0x04, 0x00000000 },
- { 0x001d3c, 1, 0x04, 0x00000000 },
- { 0x001d4c, 1, 0x04, 0x00000000 },
- { 0x001d5c, 1, 0x04, 0x00000000 },
- { 0x001d6c, 1, 0x04, 0x00000000 },
- { 0x001d7c, 1, 0x04, 0x00000000 },
- { 0x001d8c, 1, 0x04, 0x00000000 },
- { 0x001d9c, 1, 0x04, 0x00000000 },
- { 0x001dac, 1, 0x04, 0x00000000 },
- { 0x001dbc, 1, 0x04, 0x00000000 },
- { 0x001dcc, 1, 0x04, 0x00000000 },
- { 0x001ddc, 1, 0x04, 0x00000000 },
- { 0x001dec, 1, 0x04, 0x00000000 },
- { 0x001dfc, 1, 0x04, 0x00000000 },
- { 0x001f00, 1, 0x04, 0x00000000 },
- { 0x001f08, 1, 0x04, 0x00000000 },
- { 0x001f10, 1, 0x04, 0x00000000 },
- { 0x001f18, 1, 0x04, 0x00000000 },
- { 0x001f20, 1, 0x04, 0x00000000 },
- { 0x001f28, 1, 0x04, 0x00000000 },
- { 0x001f30, 1, 0x04, 0x00000000 },
- { 0x001f38, 1, 0x04, 0x00000000 },
- { 0x001f40, 1, 0x04, 0x00000000 },
- { 0x001f48, 1, 0x04, 0x00000000 },
- { 0x001f50, 1, 0x04, 0x00000000 },
- { 0x001f58, 1, 0x04, 0x00000000 },
- { 0x001f60, 1, 0x04, 0x00000000 },
- { 0x001f68, 1, 0x04, 0x00000000 },
- { 0x001f70, 1, 0x04, 0x00000000 },
- { 0x001f78, 1, 0x04, 0x00000000 },
- { 0x001f04, 1, 0x04, 0x00000000 },
- { 0x001f0c, 1, 0x04, 0x00000000 },
- { 0x001f14, 1, 0x04, 0x00000000 },
- { 0x001f1c, 1, 0x04, 0x00000000 },
- { 0x001f24, 1, 0x04, 0x00000000 },
- { 0x001f2c, 1, 0x04, 0x00000000 },
- { 0x001f34, 1, 0x04, 0x00000000 },
- { 0x001f3c, 1, 0x04, 0x00000000 },
- { 0x001f44, 1, 0x04, 0x00000000 },
- { 0x001f4c, 1, 0x04, 0x00000000 },
- { 0x001f54, 1, 0x04, 0x00000000 },
- { 0x001f5c, 1, 0x04, 0x00000000 },
- { 0x001f64, 1, 0x04, 0x00000000 },
- { 0x001f6c, 1, 0x04, 0x00000000 },
- { 0x001f74, 1, 0x04, 0x00000000 },
- { 0x001f7c, 2, 0x04, 0x00000000 },
- { 0x001f88, 1, 0x04, 0x00000000 },
- { 0x001f90, 1, 0x04, 0x00000000 },
- { 0x001f98, 1, 0x04, 0x00000000 },
- { 0x001fa0, 1, 0x04, 0x00000000 },
- { 0x001fa8, 1, 0x04, 0x00000000 },
- { 0x001fb0, 1, 0x04, 0x00000000 },
- { 0x001fb8, 1, 0x04, 0x00000000 },
- { 0x001fc0, 1, 0x04, 0x00000000 },
- { 0x001fc8, 1, 0x04, 0x00000000 },
- { 0x001fd0, 1, 0x04, 0x00000000 },
- { 0x001fd8, 1, 0x04, 0x00000000 },
- { 0x001fe0, 1, 0x04, 0x00000000 },
- { 0x001fe8, 1, 0x04, 0x00000000 },
- { 0x001ff0, 1, 0x04, 0x00000000 },
- { 0x001ff8, 1, 0x04, 0x00000000 },
- { 0x001f84, 1, 0x04, 0x00000000 },
- { 0x001f8c, 1, 0x04, 0x00000000 },
- { 0x001f94, 1, 0x04, 0x00000000 },
- { 0x001f9c, 1, 0x04, 0x00000000 },
- { 0x001fa4, 1, 0x04, 0x00000000 },
- { 0x001fac, 1, 0x04, 0x00000000 },
- { 0x001fb4, 1, 0x04, 0x00000000 },
- { 0x001fbc, 1, 0x04, 0x00000000 },
- { 0x001fc4, 1, 0x04, 0x00000000 },
- { 0x001fcc, 1, 0x04, 0x00000000 },
- { 0x001fd4, 1, 0x04, 0x00000000 },
- { 0x001fdc, 1, 0x04, 0x00000000 },
- { 0x001fe4, 1, 0x04, 0x00000000 },
- { 0x001fec, 1, 0x04, 0x00000000 },
- { 0x001ff4, 1, 0x04, 0x00000000 },
- { 0x001ffc, 2, 0x04, 0x00000000 },
- { 0x002040, 1, 0x04, 0x00000011 },
- { 0x002080, 1, 0x04, 0x00000020 },
- { 0x0020c0, 1, 0x04, 0x00000030 },
- { 0x002100, 1, 0x04, 0x00000040 },
- { 0x002140, 1, 0x04, 0x00000051 },
- { 0x00200c, 1, 0x04, 0x00000001 },
- { 0x00204c, 1, 0x04, 0x00000001 },
- { 0x00208c, 1, 0x04, 0x00000001 },
- { 0x0020cc, 1, 0x04, 0x00000001 },
- { 0x00210c, 1, 0x04, 0x00000001 },
- { 0x00214c, 1, 0x04, 0x00000001 },
- { 0x002010, 1, 0x04, 0x00000000 },
- { 0x002050, 1, 0x04, 0x00000000 },
- { 0x002090, 1, 0x04, 0x00000001 },
- { 0x0020d0, 1, 0x04, 0x00000002 },
- { 0x002110, 1, 0x04, 0x00000003 },
- { 0x002150, 1, 0x04, 0x00000004 },
- { 0x000380, 1, 0x04, 0x00000000 },
- { 0x0003a0, 1, 0x04, 0x00000000 },
- { 0x0003c0, 1, 0x04, 0x00000000 },
- { 0x0003e0, 1, 0x04, 0x00000000 },
- { 0x000384, 1, 0x04, 0x00000000 },
- { 0x0003a4, 1, 0x04, 0x00000000 },
- { 0x0003c4, 1, 0x04, 0x00000000 },
- { 0x0003e4, 1, 0x04, 0x00000000 },
- { 0x000388, 1, 0x04, 0x00000000 },
- { 0x0003a8, 1, 0x04, 0x00000000 },
- { 0x0003c8, 1, 0x04, 0x00000000 },
- { 0x0003e8, 1, 0x04, 0x00000000 },
- { 0x00038c, 1, 0x04, 0x00000000 },
- { 0x0003ac, 1, 0x04, 0x00000000 },
- { 0x0003cc, 1, 0x04, 0x00000000 },
- { 0x0003ec, 1, 0x04, 0x00000000 },
- { 0x000700, 1, 0x04, 0x00000000 },
- { 0x000710, 1, 0x04, 0x00000000 },
- { 0x000720, 1, 0x04, 0x00000000 },
- { 0x000730, 1, 0x04, 0x00000000 },
- { 0x000704, 1, 0x04, 0x00000000 },
- { 0x000714, 1, 0x04, 0x00000000 },
- { 0x000724, 1, 0x04, 0x00000000 },
- { 0x000734, 1, 0x04, 0x00000000 },
- { 0x000708, 1, 0x04, 0x00000000 },
- { 0x000718, 1, 0x04, 0x00000000 },
- { 0x000728, 1, 0x04, 0x00000000 },
- { 0x000738, 1, 0x04, 0x00000000 },
- { 0x002800, 128, 0x04, 0x00000000 },
- { 0x000a00, 1, 0x04, 0x00000000 },
- { 0x000a20, 1, 0x04, 0x00000000 },
- { 0x000a40, 1, 0x04, 0x00000000 },
- { 0x000a60, 1, 0x04, 0x00000000 },
- { 0x000a80, 1, 0x04, 0x00000000 },
- { 0x000aa0, 1, 0x04, 0x00000000 },
- { 0x000ac0, 1, 0x04, 0x00000000 },
- { 0x000ae0, 1, 0x04, 0x00000000 },
- { 0x000b00, 1, 0x04, 0x00000000 },
- { 0x000b20, 1, 0x04, 0x00000000 },
- { 0x000b40, 1, 0x04, 0x00000000 },
- { 0x000b60, 1, 0x04, 0x00000000 },
- { 0x000b80, 1, 0x04, 0x00000000 },
- { 0x000ba0, 1, 0x04, 0x00000000 },
- { 0x000bc0, 1, 0x04, 0x00000000 },
- { 0x000be0, 1, 0x04, 0x00000000 },
- { 0x000a04, 1, 0x04, 0x00000000 },
- { 0x000a24, 1, 0x04, 0x00000000 },
- { 0x000a44, 1, 0x04, 0x00000000 },
- { 0x000a64, 1, 0x04, 0x00000000 },
- { 0x000a84, 1, 0x04, 0x00000000 },
- { 0x000aa4, 1, 0x04, 0x00000000 },
- { 0x000ac4, 1, 0x04, 0x00000000 },
- { 0x000ae4, 1, 0x04, 0x00000000 },
- { 0x000b04, 1, 0x04, 0x00000000 },
- { 0x000b24, 1, 0x04, 0x00000000 },
- { 0x000b44, 1, 0x04, 0x00000000 },
- { 0x000b64, 1, 0x04, 0x00000000 },
- { 0x000b84, 1, 0x04, 0x00000000 },
- { 0x000ba4, 1, 0x04, 0x00000000 },
- { 0x000bc4, 1, 0x04, 0x00000000 },
- { 0x000be4, 1, 0x04, 0x00000000 },
- { 0x000a08, 1, 0x04, 0x00000000 },
- { 0x000a28, 1, 0x04, 0x00000000 },
- { 0x000a48, 1, 0x04, 0x00000000 },
- { 0x000a68, 1, 0x04, 0x00000000 },
- { 0x000a88, 1, 0x04, 0x00000000 },
- { 0x000aa8, 1, 0x04, 0x00000000 },
- { 0x000ac8, 1, 0x04, 0x00000000 },
- { 0x000ae8, 1, 0x04, 0x00000000 },
- { 0x000b08, 1, 0x04, 0x00000000 },
- { 0x000b28, 1, 0x04, 0x00000000 },
- { 0x000b48, 1, 0x04, 0x00000000 },
- { 0x000b68, 1, 0x04, 0x00000000 },
- { 0x000b88, 1, 0x04, 0x00000000 },
- { 0x000ba8, 1, 0x04, 0x00000000 },
- { 0x000bc8, 1, 0x04, 0x00000000 },
- { 0x000be8, 1, 0x04, 0x00000000 },
- { 0x000a0c, 1, 0x04, 0x00000000 },
- { 0x000a2c, 1, 0x04, 0x00000000 },
- { 0x000a4c, 1, 0x04, 0x00000000 },
- { 0x000a6c, 1, 0x04, 0x00000000 },
- { 0x000a8c, 1, 0x04, 0x00000000 },
- { 0x000aac, 1, 0x04, 0x00000000 },
- { 0x000acc, 1, 0x04, 0x00000000 },
- { 0x000aec, 1, 0x04, 0x00000000 },
- { 0x000b0c, 1, 0x04, 0x00000000 },
- { 0x000b2c, 1, 0x04, 0x00000000 },
- { 0x000b4c, 1, 0x04, 0x00000000 },
- { 0x000b6c, 1, 0x04, 0x00000000 },
- { 0x000b8c, 1, 0x04, 0x00000000 },
- { 0x000bac, 1, 0x04, 0x00000000 },
- { 0x000bcc, 1, 0x04, 0x00000000 },
- { 0x000bec, 1, 0x04, 0x00000000 },
- { 0x000a10, 1, 0x04, 0x00000000 },
- { 0x000a30, 1, 0x04, 0x00000000 },
- { 0x000a50, 1, 0x04, 0x00000000 },
- { 0x000a70, 1, 0x04, 0x00000000 },
- { 0x000a90, 1, 0x04, 0x00000000 },
- { 0x000ab0, 1, 0x04, 0x00000000 },
- { 0x000ad0, 1, 0x04, 0x00000000 },
- { 0x000af0, 1, 0x04, 0x00000000 },
- { 0x000b10, 1, 0x04, 0x00000000 },
- { 0x000b30, 1, 0x04, 0x00000000 },
- { 0x000b50, 1, 0x04, 0x00000000 },
- { 0x000b70, 1, 0x04, 0x00000000 },
- { 0x000b90, 1, 0x04, 0x00000000 },
- { 0x000bb0, 1, 0x04, 0x00000000 },
- { 0x000bd0, 1, 0x04, 0x00000000 },
- { 0x000bf0, 1, 0x04, 0x00000000 },
- { 0x000a14, 1, 0x04, 0x00000000 },
- { 0x000a34, 1, 0x04, 0x00000000 },
- { 0x000a54, 1, 0x04, 0x00000000 },
- { 0x000a74, 1, 0x04, 0x00000000 },
- { 0x000a94, 1, 0x04, 0x00000000 },
- { 0x000ab4, 1, 0x04, 0x00000000 },
- { 0x000ad4, 1, 0x04, 0x00000000 },
- { 0x000af4, 1, 0x04, 0x00000000 },
- { 0x000b14, 1, 0x04, 0x00000000 },
- { 0x000b34, 1, 0x04, 0x00000000 },
- { 0x000b54, 1, 0x04, 0x00000000 },
- { 0x000b74, 1, 0x04, 0x00000000 },
- { 0x000b94, 1, 0x04, 0x00000000 },
- { 0x000bb4, 1, 0x04, 0x00000000 },
- { 0x000bd4, 1, 0x04, 0x00000000 },
- { 0x000bf4, 1, 0x04, 0x00000000 },
- { 0x000c00, 1, 0x04, 0x00000000 },
- { 0x000c10, 1, 0x04, 0x00000000 },
- { 0x000c20, 1, 0x04, 0x00000000 },
- { 0x000c30, 1, 0x04, 0x00000000 },
- { 0x000c40, 1, 0x04, 0x00000000 },
- { 0x000c50, 1, 0x04, 0x00000000 },
- { 0x000c60, 1, 0x04, 0x00000000 },
- { 0x000c70, 1, 0x04, 0x00000000 },
- { 0x000c80, 1, 0x04, 0x00000000 },
- { 0x000c90, 1, 0x04, 0x00000000 },
- { 0x000ca0, 1, 0x04, 0x00000000 },
- { 0x000cb0, 1, 0x04, 0x00000000 },
- { 0x000cc0, 1, 0x04, 0x00000000 },
- { 0x000cd0, 1, 0x04, 0x00000000 },
- { 0x000ce0, 1, 0x04, 0x00000000 },
- { 0x000cf0, 1, 0x04, 0x00000000 },
- { 0x000c04, 1, 0x04, 0x00000000 },
- { 0x000c14, 1, 0x04, 0x00000000 },
- { 0x000c24, 1, 0x04, 0x00000000 },
- { 0x000c34, 1, 0x04, 0x00000000 },
- { 0x000c44, 1, 0x04, 0x00000000 },
- { 0x000c54, 1, 0x04, 0x00000000 },
- { 0x000c64, 1, 0x04, 0x00000000 },
- { 0x000c74, 1, 0x04, 0x00000000 },
- { 0x000c84, 1, 0x04, 0x00000000 },
- { 0x000c94, 1, 0x04, 0x00000000 },
- { 0x000ca4, 1, 0x04, 0x00000000 },
- { 0x000cb4, 1, 0x04, 0x00000000 },
- { 0x000cc4, 1, 0x04, 0x00000000 },
- { 0x000cd4, 1, 0x04, 0x00000000 },
- { 0x000ce4, 1, 0x04, 0x00000000 },
- { 0x000cf4, 1, 0x04, 0x00000000 },
- { 0x000c08, 1, 0x04, 0x00000000 },
- { 0x000c18, 1, 0x04, 0x00000000 },
- { 0x000c28, 1, 0x04, 0x00000000 },
- { 0x000c38, 1, 0x04, 0x00000000 },
- { 0x000c48, 1, 0x04, 0x00000000 },
- { 0x000c58, 1, 0x04, 0x00000000 },
- { 0x000c68, 1, 0x04, 0x00000000 },
- { 0x000c78, 1, 0x04, 0x00000000 },
- { 0x000c88, 1, 0x04, 0x00000000 },
- { 0x000c98, 1, 0x04, 0x00000000 },
- { 0x000ca8, 1, 0x04, 0x00000000 },
- { 0x000cb8, 1, 0x04, 0x00000000 },
- { 0x000cc8, 1, 0x04, 0x00000000 },
- { 0x000cd8, 1, 0x04, 0x00000000 },
- { 0x000ce8, 1, 0x04, 0x00000000 },
- { 0x000cf8, 1, 0x04, 0x00000000 },
- { 0x000c0c, 1, 0x04, 0x3f800000 },
- { 0x000c1c, 1, 0x04, 0x3f800000 },
- { 0x000c2c, 1, 0x04, 0x3f800000 },
- { 0x000c3c, 1, 0x04, 0x3f800000 },
- { 0x000c4c, 1, 0x04, 0x3f800000 },
- { 0x000c5c, 1, 0x04, 0x3f800000 },
- { 0x000c6c, 1, 0x04, 0x3f800000 },
- { 0x000c7c, 1, 0x04, 0x3f800000 },
- { 0x000c8c, 1, 0x04, 0x3f800000 },
- { 0x000c9c, 1, 0x04, 0x3f800000 },
- { 0x000cac, 1, 0x04, 0x3f800000 },
- { 0x000cbc, 1, 0x04, 0x3f800000 },
- { 0x000ccc, 1, 0x04, 0x3f800000 },
- { 0x000cdc, 1, 0x04, 0x3f800000 },
- { 0x000cec, 1, 0x04, 0x3f800000 },
- { 0x000cfc, 1, 0x04, 0x3f800000 },
- { 0x000d00, 1, 0x04, 0xffff0000 },
- { 0x000d08, 1, 0x04, 0xffff0000 },
- { 0x000d10, 1, 0x04, 0xffff0000 },
- { 0x000d18, 1, 0x04, 0xffff0000 },
- { 0x000d20, 1, 0x04, 0xffff0000 },
- { 0x000d28, 1, 0x04, 0xffff0000 },
- { 0x000d30, 1, 0x04, 0xffff0000 },
- { 0x000d38, 1, 0x04, 0xffff0000 },
- { 0x000d04, 1, 0x04, 0xffff0000 },
- { 0x000d0c, 1, 0x04, 0xffff0000 },
- { 0x000d14, 1, 0x04, 0xffff0000 },
- { 0x000d1c, 1, 0x04, 0xffff0000 },
- { 0x000d24, 1, 0x04, 0xffff0000 },
- { 0x000d2c, 1, 0x04, 0xffff0000 },
- { 0x000d34, 1, 0x04, 0xffff0000 },
- { 0x000d3c, 1, 0x04, 0xffff0000 },
- { 0x000e00, 1, 0x04, 0x00000000 },
- { 0x000e10, 1, 0x04, 0x00000000 },
- { 0x000e20, 1, 0x04, 0x00000000 },
- { 0x000e30, 1, 0x04, 0x00000000 },
- { 0x000e40, 1, 0x04, 0x00000000 },
- { 0x000e50, 1, 0x04, 0x00000000 },
- { 0x000e60, 1, 0x04, 0x00000000 },
- { 0x000e70, 1, 0x04, 0x00000000 },
- { 0x000e80, 1, 0x04, 0x00000000 },
- { 0x000e90, 1, 0x04, 0x00000000 },
- { 0x000ea0, 1, 0x04, 0x00000000 },
- { 0x000eb0, 1, 0x04, 0x00000000 },
- { 0x000ec0, 1, 0x04, 0x00000000 },
- { 0x000ed0, 1, 0x04, 0x00000000 },
- { 0x000ee0, 1, 0x04, 0x00000000 },
- { 0x000ef0, 1, 0x04, 0x00000000 },
- { 0x000e04, 1, 0x04, 0xffff0000 },
- { 0x000e14, 1, 0x04, 0xffff0000 },
- { 0x000e24, 1, 0x04, 0xffff0000 },
- { 0x000e34, 1, 0x04, 0xffff0000 },
- { 0x000e44, 1, 0x04, 0xffff0000 },
- { 0x000e54, 1, 0x04, 0xffff0000 },
- { 0x000e64, 1, 0x04, 0xffff0000 },
- { 0x000e74, 1, 0x04, 0xffff0000 },
- { 0x000e84, 1, 0x04, 0xffff0000 },
- { 0x000e94, 1, 0x04, 0xffff0000 },
- { 0x000ea4, 1, 0x04, 0xffff0000 },
- { 0x000eb4, 1, 0x04, 0xffff0000 },
- { 0x000ec4, 1, 0x04, 0xffff0000 },
- { 0x000ed4, 1, 0x04, 0xffff0000 },
- { 0x000ee4, 1, 0x04, 0xffff0000 },
- { 0x000ef4, 1, 0x04, 0xffff0000 },
- { 0x000e08, 1, 0x04, 0xffff0000 },
- { 0x000e18, 1, 0x04, 0xffff0000 },
- { 0x000e28, 1, 0x04, 0xffff0000 },
- { 0x000e38, 1, 0x04, 0xffff0000 },
- { 0x000e48, 1, 0x04, 0xffff0000 },
- { 0x000e58, 1, 0x04, 0xffff0000 },
- { 0x000e68, 1, 0x04, 0xffff0000 },
- { 0x000e78, 1, 0x04, 0xffff0000 },
- { 0x000e88, 1, 0x04, 0xffff0000 },
- { 0x000e98, 1, 0x04, 0xffff0000 },
- { 0x000ea8, 1, 0x04, 0xffff0000 },
- { 0x000eb8, 1, 0x04, 0xffff0000 },
- { 0x000ec8, 1, 0x04, 0xffff0000 },
- { 0x000ed8, 1, 0x04, 0xffff0000 },
- { 0x000ee8, 1, 0x04, 0xffff0000 },
- { 0x000ef8, 1, 0x04, 0xffff0000 },
- { 0x000d40, 1, 0x04, 0x00000000 },
- { 0x000d48, 1, 0x04, 0x00000000 },
- { 0x000d50, 1, 0x04, 0x00000000 },
- { 0x000d58, 1, 0x04, 0x00000000 },
- { 0x000d44, 1, 0x04, 0x00000000 },
- { 0x000d4c, 1, 0x04, 0x00000000 },
- { 0x000d54, 1, 0x04, 0x00000000 },
- { 0x000d5c, 1, 0x04, 0x00000000 },
- { 0x001e00, 1, 0x04, 0x00000001 },
- { 0x001e20, 1, 0x04, 0x00000001 },
- { 0x001e40, 1, 0x04, 0x00000001 },
- { 0x001e60, 1, 0x04, 0x00000001 },
- { 0x001e80, 1, 0x04, 0x00000001 },
- { 0x001ea0, 1, 0x04, 0x00000001 },
- { 0x001ec0, 1, 0x04, 0x00000001 },
- { 0x001ee0, 1, 0x04, 0x00000001 },
- { 0x001e04, 1, 0x04, 0x00000001 },
- { 0x001e24, 1, 0x04, 0x00000001 },
- { 0x001e44, 1, 0x04, 0x00000001 },
- { 0x001e64, 1, 0x04, 0x00000001 },
- { 0x001e84, 1, 0x04, 0x00000001 },
- { 0x001ea4, 1, 0x04, 0x00000001 },
- { 0x001ec4, 1, 0x04, 0x00000001 },
- { 0x001ee4, 1, 0x04, 0x00000001 },
- { 0x001e08, 1, 0x04, 0x00000002 },
- { 0x001e28, 1, 0x04, 0x00000002 },
- { 0x001e48, 1, 0x04, 0x00000002 },
- { 0x001e68, 1, 0x04, 0x00000002 },
- { 0x001e88, 1, 0x04, 0x00000002 },
- { 0x001ea8, 1, 0x04, 0x00000002 },
- { 0x001ec8, 1, 0x04, 0x00000002 },
- { 0x001ee8, 1, 0x04, 0x00000002 },
- { 0x001e0c, 1, 0x04, 0x00000001 },
- { 0x001e2c, 1, 0x04, 0x00000001 },
- { 0x001e4c, 1, 0x04, 0x00000001 },
- { 0x001e6c, 1, 0x04, 0x00000001 },
- { 0x001e8c, 1, 0x04, 0x00000001 },
- { 0x001eac, 1, 0x04, 0x00000001 },
- { 0x001ecc, 1, 0x04, 0x00000001 },
- { 0x001eec, 1, 0x04, 0x00000001 },
- { 0x001e10, 1, 0x04, 0x00000001 },
- { 0x001e30, 1, 0x04, 0x00000001 },
- { 0x001e50, 1, 0x04, 0x00000001 },
- { 0x001e70, 1, 0x04, 0x00000001 },
- { 0x001e90, 1, 0x04, 0x00000001 },
- { 0x001eb0, 1, 0x04, 0x00000001 },
- { 0x001ed0, 1, 0x04, 0x00000001 },
- { 0x001ef0, 1, 0x04, 0x00000001 },
- { 0x001e14, 1, 0x04, 0x00000002 },
- { 0x001e34, 1, 0x04, 0x00000002 },
- { 0x001e54, 1, 0x04, 0x00000002 },
- { 0x001e74, 1, 0x04, 0x00000002 },
- { 0x001e94, 1, 0x04, 0x00000002 },
- { 0x001eb4, 1, 0x04, 0x00000002 },
- { 0x001ed4, 1, 0x04, 0x00000002 },
- { 0x001ef4, 1, 0x04, 0x00000002 },
- { 0x001e18, 1, 0x04, 0x00000001 },
- { 0x001e38, 1, 0x04, 0x00000001 },
- { 0x001e58, 1, 0x04, 0x00000001 },
- { 0x001e78, 1, 0x04, 0x00000001 },
- { 0x001e98, 1, 0x04, 0x00000001 },
- { 0x001eb8, 1, 0x04, 0x00000001 },
- { 0x001ed8, 1, 0x04, 0x00000001 },
- { 0x001ef8, 1, 0x04, 0x00000001 },
- { 0x003400, 128, 0x04, 0x00000000 },
- { 0x00030c, 1, 0x04, 0x00000001 },
- { 0x001944, 1, 0x04, 0x00000000 },
- { 0x001514, 1, 0x04, 0x00000000 },
- { 0x000d68, 1, 0x04, 0x0000ffff },
- { 0x00121c, 1, 0x04, 0x0fac6881 },
- { 0x000fac, 1, 0x04, 0x00000001 },
- { 0x001538, 1, 0x04, 0x00000001 },
- { 0x000fe0, 2, 0x04, 0x00000000 },
- { 0x000fe8, 1, 0x04, 0x00000014 },
- { 0x000fec, 1, 0x04, 0x00000040 },
- { 0x000ff0, 1, 0x04, 0x00000000 },
- { 0x00179c, 1, 0x04, 0x00000000 },
- { 0x001228, 1, 0x04, 0x00000400 },
- { 0x00122c, 1, 0x04, 0x00000300 },
- { 0x001230, 1, 0x04, 0x00010001 },
- { 0x0007f8, 1, 0x04, 0x00000000 },
- { 0x0015b4, 1, 0x04, 0x00000001 },
- { 0x0015cc, 1, 0x04, 0x00000000 },
- { 0x001534, 1, 0x04, 0x00000000 },
- { 0x000fb0, 1, 0x04, 0x00000000 },
- { 0x0015d0, 1, 0x04, 0x00000000 },
- { 0x00153c, 1, 0x04, 0x00000000 },
- { 0x0016b4, 1, 0x04, 0x00000003 },
- { 0x000fbc, 4, 0x04, 0x0000ffff },
- { 0x000df8, 2, 0x04, 0x00000000 },
- { 0x001948, 1, 0x04, 0x00000000 },
- { 0x001970, 1, 0x04, 0x00000001 },
- { 0x00161c, 1, 0x04, 0x000009f0 },
- { 0x000dcc, 1, 0x04, 0x00000010 },
- { 0x00163c, 1, 0x04, 0x00000000 },
- { 0x0015e4, 1, 0x04, 0x00000000 },
- { 0x001160, 32, 0x04, 0x25e00040 },
- { 0x001880, 32, 0x04, 0x00000000 },
- { 0x000f84, 2, 0x04, 0x00000000 },
- { 0x0017c8, 2, 0x04, 0x00000000 },
- { 0x0017d0, 1, 0x04, 0x000000ff },
- { 0x0017d4, 1, 0x04, 0xffffffff },
- { 0x0017d8, 1, 0x04, 0x00000002 },
- { 0x0017dc, 1, 0x04, 0x00000000 },
- { 0x0015f4, 2, 0x04, 0x00000000 },
- { 0x001434, 2, 0x04, 0x00000000 },
- { 0x000d74, 1, 0x04, 0x00000000 },
- { 0x000dec, 1, 0x04, 0x00000001 },
- { 0x0013a4, 1, 0x04, 0x00000000 },
- { 0x001318, 1, 0x04, 0x00000001 },
- { 0x001644, 1, 0x04, 0x00000000 },
- { 0x000748, 1, 0x04, 0x00000000 },
- { 0x000de8, 1, 0x04, 0x00000000 },
- { 0x001648, 1, 0x04, 0x00000000 },
- { 0x0012a4, 1, 0x04, 0x00000000 },
- { 0x001120, 4, 0x04, 0x00000000 },
- { 0x001118, 1, 0x04, 0x00000000 },
- { 0x00164c, 1, 0x04, 0x00000000 },
- { 0x001658, 1, 0x04, 0x00000000 },
- { 0x001910, 1, 0x04, 0x00000290 },
- { 0x001518, 1, 0x04, 0x00000000 },
- { 0x00165c, 1, 0x04, 0x00000001 },
- { 0x001520, 1, 0x04, 0x00000000 },
- { 0x001604, 1, 0x04, 0x00000000 },
- { 0x001570, 1, 0x04, 0x00000000 },
- { 0x0013b0, 2, 0x04, 0x3f800000 },
- { 0x00020c, 1, 0x04, 0x00000000 },
- { 0x001670, 1, 0x04, 0x30201000 },
- { 0x001674, 1, 0x04, 0x70605040 },
- { 0x001678, 1, 0x04, 0xb8a89888 },
- { 0x00167c, 1, 0x04, 0xf8e8d8c8 },
- { 0x00166c, 1, 0x04, 0x00000000 },
- { 0x001680, 1, 0x04, 0x00ffff00 },
- { 0x0012d0, 1, 0x04, 0x00000003 },
- { 0x0012d4, 1, 0x04, 0x00000002 },
- { 0x001684, 2, 0x04, 0x00000000 },
- { 0x000dac, 2, 0x04, 0x00001b02 },
- { 0x000db4, 1, 0x04, 0x00000000 },
- { 0x00168c, 1, 0x04, 0x00000000 },
- { 0x0015bc, 1, 0x04, 0x00000000 },
- { 0x00156c, 1, 0x04, 0x00000000 },
- { 0x00187c, 1, 0x04, 0x00000000 },
- { 0x001110, 1, 0x04, 0x00000001 },
- { 0x000dc0, 3, 0x04, 0x00000000 },
- { 0x001234, 1, 0x04, 0x00000000 },
- { 0x001690, 1, 0x04, 0x00000000 },
- { 0x0012ac, 1, 0x04, 0x00000001 },
- { 0x0002c4, 1, 0x04, 0x00000000 },
- { 0x000790, 5, 0x04, 0x00000000 },
- { 0x00077c, 1, 0x04, 0x00000000 },
- { 0x001000, 1, 0x04, 0x00000010 },
- { 0x0010fc, 1, 0x04, 0x00000000 },
- { 0x001290, 1, 0x04, 0x00000000 },
- { 0x000218, 1, 0x04, 0x00000010 },
- { 0x0012d8, 1, 0x04, 0x00000000 },
- { 0x0012dc, 1, 0x04, 0x00000010 },
- { 0x000d94, 1, 0x04, 0x00000001 },
- { 0x00155c, 2, 0x04, 0x00000000 },
- { 0x001564, 1, 0x04, 0x00000fff },
- { 0x001574, 2, 0x04, 0x00000000 },
- { 0x00157c, 1, 0x04, 0x000fffff },
- { 0x001354, 1, 0x04, 0x00000000 },
- { 0x001610, 1, 0x04, 0x00000012 },
- { 0x001608, 2, 0x04, 0x00000000 },
- { 0x00260c, 1, 0x04, 0x00000000 },
- { 0x0007ac, 1, 0x04, 0x00000000 },
- { 0x00162c, 1, 0x04, 0x00000003 },
- { 0x000210, 1, 0x04, 0x00000000 },
- { 0x000320, 1, 0x04, 0x00000000 },
- { 0x000324, 6, 0x04, 0x3f800000 },
- { 0x000750, 1, 0x04, 0x00000000 },
- { 0x000760, 1, 0x04, 0x39291909 },
- { 0x000764, 1, 0x04, 0x79695949 },
- { 0x000768, 1, 0x04, 0xb9a99989 },
- { 0x00076c, 1, 0x04, 0xf9e9d9c9 },
- { 0x000770, 1, 0x04, 0x30201000 },
- { 0x000774, 1, 0x04, 0x70605040 },
- { 0x000778, 1, 0x04, 0x00009080 },
- { 0x000780, 1, 0x04, 0x39291909 },
- { 0x000784, 1, 0x04, 0x79695949 },
- { 0x000788, 1, 0x04, 0xb9a99989 },
- { 0x00078c, 1, 0x04, 0xf9e9d9c9 },
- { 0x0007d0, 1, 0x04, 0x30201000 },
- { 0x0007d4, 1, 0x04, 0x70605040 },
- { 0x0007d8, 1, 0x04, 0x00009080 },
- { 0x00037c, 1, 0x04, 0x00000001 },
- { 0x000740, 2, 0x04, 0x00000000 },
- { 0x002600, 1, 0x04, 0x00000000 },
- { 0x001918, 1, 0x04, 0x00000000 },
- { 0x00191c, 1, 0x04, 0x00000900 },
- { 0x001920, 1, 0x04, 0x00000405 },
- { 0x001308, 1, 0x04, 0x00000001 },
- { 0x001924, 1, 0x04, 0x00000000 },
- { 0x0013ac, 1, 0x04, 0x00000000 },
- { 0x00192c, 1, 0x04, 0x00000001 },
- { 0x00193c, 1, 0x04, 0x00002c1c },
- { 0x000d7c, 1, 0x04, 0x00000000 },
- { 0x000f8c, 1, 0x04, 0x00000000 },
- { 0x0002c0, 1, 0x04, 0x00000001 },
- { 0x001510, 1, 0x04, 0x00000000 },
- { 0x001940, 1, 0x04, 0x00000000 },
- { 0x000ff4, 2, 0x04, 0x00000000 },
- { 0x00194c, 2, 0x04, 0x00000000 },
- { 0x001968, 1, 0x04, 0x00000000 },
- { 0x001590, 1, 0x04, 0x0000003f },
- { 0x0007e8, 4, 0x04, 0x00000000 },
- { 0x00196c, 1, 0x04, 0x00000011 },
- { 0x0002e4, 1, 0x04, 0x0000b001 },
- { 0x00036c, 2, 0x04, 0x00000000 },
- { 0x00197c, 1, 0x04, 0x00000000 },
- { 0x000fcc, 2, 0x04, 0x00000000 },
- { 0x0002d8, 1, 0x04, 0x00000040 },
- { 0x001980, 1, 0x04, 0x00000080 },
- { 0x001504, 1, 0x04, 0x00000080 },
- { 0x001984, 1, 0x04, 0x00000000 },
- { 0x000300, 1, 0x04, 0x00000001 },
- { 0x0013a8, 1, 0x04, 0x00000000 },
- { 0x0012ec, 1, 0x04, 0x00000000 },
- { 0x001310, 1, 0x04, 0x00000000 },
- { 0x001314, 1, 0x04, 0x00000001 },
- { 0x001380, 1, 0x04, 0x00000000 },
- { 0x001384, 4, 0x04, 0x00000001 },
- { 0x001394, 1, 0x04, 0x00000000 },
- { 0x00139c, 1, 0x04, 0x00000000 },
- { 0x001398, 1, 0x04, 0x00000000 },
- { 0x001594, 1, 0x04, 0x00000000 },
- { 0x001598, 4, 0x04, 0x00000001 },
- { 0x000f54, 3, 0x04, 0x00000000 },
- { 0x0019bc, 1, 0x04, 0x00000000 },
- { 0x000f9c, 2, 0x04, 0x00000000 },
- { 0x0012cc, 1, 0x04, 0x00000000 },
- { 0x0012e8, 1, 0x04, 0x00000000 },
- { 0x00130c, 1, 0x04, 0x00000001 },
- { 0x001360, 8, 0x04, 0x00000000 },
- { 0x00133c, 2, 0x04, 0x00000001 },
- { 0x001344, 1, 0x04, 0x00000002 },
- { 0x001348, 2, 0x04, 0x00000001 },
- { 0x001350, 1, 0x04, 0x00000002 },
- { 0x001358, 1, 0x04, 0x00000001 },
- { 0x0012e4, 1, 0x04, 0x00000000 },
- { 0x00131c, 4, 0x04, 0x00000000 },
- { 0x0019c0, 1, 0x04, 0x00000000 },
- { 0x001140, 1, 0x04, 0x00000000 },
- { 0x0019c4, 1, 0x04, 0x00000000 },
- { 0x0019c8, 1, 0x04, 0x00001500 },
- { 0x00135c, 1, 0x04, 0x00000000 },
- { 0x000f90, 1, 0x04, 0x00000000 },
- { 0x0019e0, 8, 0x04, 0x00000001 },
- { 0x0019cc, 1, 0x04, 0x00000001 },
- { 0x0015b8, 1, 0x04, 0x00000000 },
- { 0x001a00, 1, 0x04, 0x00001111 },
- { 0x001a04, 7, 0x04, 0x00000000 },
- { 0x000d6c, 2, 0x04, 0xffff0000 },
- { 0x0010f8, 1, 0x04, 0x00001010 },
- { 0x000d80, 5, 0x04, 0x00000000 },
- { 0x000da0, 1, 0x04, 0x00000000 },
- { 0x0007a4, 2, 0x04, 0x00000000 },
- { 0x001508, 1, 0x04, 0x80000000 },
- { 0x00150c, 1, 0x04, 0x40000000 },
- { 0x001668, 1, 0x04, 0x00000000 },
- { 0x000318, 2, 0x04, 0x00000008 },
- { 0x000d9c, 1, 0x04, 0x00000001 },
- { 0x000ddc, 1, 0x04, 0x00000002 },
- { 0x000374, 1, 0x04, 0x00000000 },
- { 0x000378, 1, 0x04, 0x00000020 },
- { 0x0007dc, 1, 0x04, 0x00000000 },
- { 0x00074c, 1, 0x04, 0x00000055 },
- { 0x001420, 1, 0x04, 0x00000003 },
- { 0x0017bc, 2, 0x04, 0x00000000 },
- { 0x0017c4, 1, 0x04, 0x00000001 },
- { 0x001008, 1, 0x04, 0x00000008 },
- { 0x00100c, 1, 0x04, 0x00000040 },
- { 0x001010, 1, 0x04, 0x0000012c },
- { 0x000d60, 1, 0x04, 0x00000040 },
- { 0x00075c, 1, 0x04, 0x00000003 },
- { 0x001018, 1, 0x04, 0x00000020 },
- { 0x00101c, 1, 0x04, 0x00000001 },
- { 0x001020, 1, 0x04, 0x00000020 },
- { 0x001024, 1, 0x04, 0x00000001 },
- { 0x001444, 3, 0x04, 0x00000000 },
- { 0x000360, 1, 0x04, 0x20164010 },
- { 0x000364, 1, 0x04, 0x00000020 },
- { 0x000368, 1, 0x04, 0x00000000 },
- { 0x000de4, 1, 0x04, 0x00000000 },
- { 0x000204, 1, 0x04, 0x00000006 },
- { 0x000208, 1, 0x04, 0x00000000 },
- { 0x0002cc, 2, 0x04, 0x003fffff },
- { 0x001220, 1, 0x04, 0x00000005 },
- { 0x000fdc, 1, 0x04, 0x00000000 },
- { 0x000f98, 1, 0x04, 0x00400008 },
- { 0x001284, 1, 0x04, 0x08000080 },
- { 0x001450, 1, 0x04, 0x00400008 },
- { 0x001454, 1, 0x04, 0x08000080 },
- { 0x000214, 1, 0x04, 0x00000000 },
+static const struct nvc0_graph_pack
+nv108_grctx_pack_icmd[] = {
+ { nv108_grctx_init_icmd_0 },
{}
};
-static struct nvc0_graph_init
-nv108_grctx_init_unk40xx[] = {
+static const struct nvc0_graph_init
+nv108_grctx_init_fe_0[] = {
{ 0x404004, 8, 0x04, 0x00000000 },
{ 0x404024, 1, 0x04, 0x0000e000 },
{ 0x404028, 8, 0x04, 0x00000000 },
@@ -1132,8 +311,8 @@ nv108_grctx_init_unk40xx[] = {
{}
};
-static struct nvc0_graph_init
-nv108_grctx_init_unk58xx[] = {
+static const struct nvc0_graph_init
+nv108_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x0f8000bf },
{ 0x405830, 1, 0x04, 0x02180648 },
{ 0x405834, 1, 0x04, 0x08000000 },
@@ -1146,8 +325,10 @@ nv108_grctx_init_unk58xx[] = {
{}
};
-static struct nvc0_graph_init
-nv108_grctx_init_unk64xx[] = {
+static const struct nvc0_graph_init
+nv108_grctx_init_pd_0[] = {
+ { 0x406020, 1, 0x04, 0x034103c1 },
+ { 0x406028, 4, 0x04, 0x00000001 },
{ 0x4064a8, 1, 0x04, 0x00000000 },
{ 0x4064ac, 1, 0x04, 0x00003fff },
{ 0x4064b0, 3, 0x04, 0x00000000 },
@@ -1159,8 +340,8 @@ nv108_grctx_init_unk64xx[] = {
{}
};
-static struct nvc0_graph_init
-nv108_grctx_init_unk78xx[] = {
+const struct nvc0_graph_init
+nv108_grctx_init_rstr2d_0[] = {
{ 0x407804, 1, 0x04, 0x00000063 },
{ 0x40780c, 1, 0x04, 0x0a418820 },
{ 0x407810, 1, 0x04, 0x062080e6 },
@@ -1172,8 +353,8 @@ nv108_grctx_init_unk78xx[] = {
{}
};
-static struct nvc0_graph_init
-nv108_grctx_init_unk88xx[] = {
+static const struct nvc0_graph_init
+nv108_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x32802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
{ 0x408808, 1, 0x04, 0x1003e005 },
@@ -1185,9 +366,23 @@ nv108_grctx_init_unk88xx[] = {
{}
};
-static struct nvc0_graph_init
-nv108_grctx_init_gpc_0[] = {
- { 0x418380, 1, 0x04, 0x00000016 },
+static const struct nvc0_graph_pack
+nv108_grctx_pack_hub[] = {
+ { nvc0_grctx_init_main_0 },
+ { nv108_grctx_init_fe_0 },
+ { nvf0_grctx_init_pri_0 },
+ { nve4_grctx_init_memfmt_0 },
+ { nv108_grctx_init_ds_0 },
+ { nvf0_grctx_init_cwd_0 },
+ { nv108_grctx_init_pd_0 },
+ { nv108_grctx_init_rstr2d_0 },
+ { nve4_grctx_init_scc_0 },
+ { nv108_grctx_init_be_0 },
+ {}
+};
+
+const struct nvc0_graph_init
+nv108_grctx_init_prop_0[] = {
{ 0x418400, 1, 0x04, 0x38005e00 },
{ 0x418404, 1, 0x04, 0x71e0ffff },
{ 0x41840c, 1, 0x04, 0x00001008 },
@@ -1196,11 +391,21 @@ nv108_grctx_init_gpc_0[] = {
{ 0x418450, 6, 0x04, 0x00000000 },
{ 0x418468, 1, 0x04, 0x00000001 },
{ 0x41846c, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nv108_grctx_init_gpc_unk_1[] = {
{ 0x418600, 1, 0x04, 0x0000007f },
{ 0x418684, 1, 0x04, 0x0000001f },
{ 0x418700, 1, 0x04, 0x00000002 },
{ 0x418704, 2, 0x04, 0x00000080 },
{ 0x41870c, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nv108_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x7006863a },
{ 0x418808, 1, 0x04, 0x00000000 },
{ 0x41880c, 1, 0x04, 0x00000030 },
@@ -1211,10 +416,11 @@ nv108_grctx_init_gpc_0[] = {
{ 0x4188e0, 1, 0x04, 0x01000000 },
{ 0x4188e8, 5, 0x04, 0x00000000 },
{ 0x4188fc, 1, 0x04, 0x20100058 },
- { 0x41891c, 1, 0x04, 0x00ff00ff },
- { 0x418924, 1, 0x04, 0x00000000 },
- { 0x418928, 1, 0x04, 0x00ffff00 },
- { 0x41892c, 1, 0x04, 0x0000ff00 },
+ {}
+};
+
+const struct nvc0_graph_init
+nv108_grctx_init_crstr_0[] = {
{ 0x418b00, 1, 0x04, 0x0000001e },
{ 0x418b08, 1, 0x04, 0x0a418820 },
{ 0x418b0c, 1, 0x04, 0x062080e6 },
@@ -1223,24 +429,36 @@ nv108_grctx_init_gpc_0[] = {
{ 0x418b18, 1, 0x04, 0x0a418820 },
{ 0x418b1c, 1, 0x04, 0x000000e6 },
{ 0x418bb8, 1, 0x04, 0x00000103 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nv108_grctx_init_gpm_0[] = {
{ 0x418c08, 1, 0x04, 0x00000001 },
{ 0x418c10, 8, 0x04, 0x00000000 },
{ 0x418c40, 1, 0x04, 0xffffffff },
{ 0x418c6c, 1, 0x04, 0x00000001 },
{ 0x418c80, 1, 0x04, 0x2020000c },
{ 0x418c8c, 1, 0x04, 0x00000001 },
- { 0x418d24, 1, 0x04, 0x00000000 },
- { 0x419000, 1, 0x04, 0x00000780 },
- { 0x419004, 2, 0x04, 0x00000000 },
- { 0x419014, 1, 0x04, 0x00000004 },
{}
};
-static struct nvc0_graph_init
-nv108_grctx_init_tpc[] = {
- { 0x419848, 1, 0x04, 0x00000000 },
- { 0x419864, 1, 0x04, 0x00000129 },
- { 0x419888, 1, 0x04, 0x00000000 },
+static const struct nvc0_graph_pack
+nv108_grctx_pack_gpc[] = {
+ { nvc0_grctx_init_gpc_unk_0 },
+ { nv108_grctx_init_prop_0 },
+ { nv108_grctx_init_gpc_unk_1 },
+ { nv108_grctx_init_setup_0 },
+ { nvc0_grctx_init_zcull_0 },
+ { nv108_grctx_init_crstr_0 },
+ { nv108_grctx_init_gpm_0 },
+ { nvf0_grctx_init_gpc_unk_2 },
+ { nvc0_grctx_init_gcc_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nv108_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000100f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000421 },
@@ -1251,14 +469,11 @@ nv108_grctx_init_tpc[] = {
{ 0x419a20, 1, 0x04, 0x00000800 },
{ 0x419a30, 1, 0x04, 0x00000001 },
{ 0x419ac4, 1, 0x04, 0x0037f440 },
- { 0x419c00, 1, 0x04, 0x0000001a },
- { 0x419c04, 1, 0x04, 0x80000006 },
- { 0x419c08, 1, 0x04, 0x00000002 },
- { 0x419c20, 1, 0x04, 0x00000000 },
- { 0x419c24, 1, 0x04, 0x00084210 },
- { 0x419c28, 1, 0x04, 0x3efbefbe },
- { 0x419ce8, 1, 0x04, 0x00000000 },
- { 0x419cf4, 1, 0x04, 0x00000203 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nv108_grctx_init_sm_0[] = {
{ 0x419e04, 1, 0x04, 0x00000000 },
{ 0x419e08, 1, 0x04, 0x0000001d },
{ 0x419e0c, 1, 0x04, 0x00000000 },
@@ -1272,7 +487,7 @@ nv108_grctx_init_tpc[] = {
{ 0x419e68, 1, 0x04, 0x00000002 },
{ 0x419e6c, 12, 0x04, 0x00000000 },
{ 0x419eac, 1, 0x04, 0x00001f8f },
- { 0x419eb0, 1, 0x04, 0x0db00da0 },
+ { 0x419eb0, 1, 0x04, 0x0db00d2f },
{ 0x419eb8, 1, 0x04, 0x00000000 },
{ 0x419ec8, 1, 0x04, 0x0001304f },
{ 0x419f30, 4, 0x04, 0x00000000 },
@@ -1285,25 +500,37 @@ nv108_grctx_init_tpc[] = {
{}
};
-static struct nvc0_graph_init
-nv108_grctx_init_unk[] = {
- { 0x41be24, 1, 0x04, 0x00000006 },
+static const struct nvc0_graph_pack
+nv108_grctx_pack_tpc[] = {
+ { nvd7_grctx_init_pe_0 },
+ { nv108_grctx_init_tex_0 },
+ { nvf0_grctx_init_mpc_0 },
+ { nvf0_grctx_init_l1c_0 },
+ { nv108_grctx_init_sm_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nv108_grctx_init_cbm_0[] = {
{ 0x41bec0, 1, 0x04, 0x10000000 },
{ 0x41bec4, 1, 0x04, 0x00037f7f },
{ 0x41bee4, 1, 0x04, 0x00000000 },
{ 0x41bef0, 1, 0x04, 0x000003ff },
- { 0x41bf00, 1, 0x04, 0x0a418820 },
- { 0x41bf04, 1, 0x04, 0x062080e6 },
- { 0x41bf08, 1, 0x04, 0x020398a4 },
- { 0x41bf0c, 1, 0x04, 0x0e629062 },
- { 0x41bf10, 1, 0x04, 0x0a418820 },
- { 0x41bf14, 1, 0x04, 0x000000e6 },
- { 0x41bfd0, 1, 0x04, 0x00900103 },
- { 0x41bfe0, 1, 0x04, 0x00400001 },
- { 0x41bfe4, 1, 0x04, 0x00000000 },
{}
};
+static const struct nvc0_graph_pack
+nv108_grctx_pack_ppc[] = {
+ { nve4_grctx_init_pes_0 },
+ { nv108_grctx_init_cbm_0 },
+ { nvd7_grctx_init_wwdx_0 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
static void
nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
@@ -1346,47 +573,6 @@ nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
mmio_list(0x17e920, 0x00090d08, 0, 0);
}
-static struct nvc0_graph_init *
-nv108_grctx_init_hub[] = {
- nvc0_grctx_init_base,
- nv108_grctx_init_unk40xx,
- nvf0_grctx_init_unk44xx,
- nve4_grctx_init_unk46xx,
- nve4_grctx_init_unk47xx,
- nv108_grctx_init_unk58xx,
- nvf0_grctx_init_unk5bxx,
- nvf0_grctx_init_unk60xx,
- nv108_grctx_init_unk64xx,
- nv108_grctx_init_unk78xx,
- nve4_grctx_init_unk80xx,
- nv108_grctx_init_unk88xx,
- NULL
-};
-
-struct nvc0_graph_init *
-nv108_grctx_init_gpc[] = {
- nv108_grctx_init_gpc_0,
- nvc0_grctx_init_gpc_1,
- nv108_grctx_init_tpc,
- nv108_grctx_init_unk,
- NULL
-};
-
-struct nvc0_graph_init
-nv108_grctx_init_mthd_magic[] = {
- { 0x3410, 1, 0x04, 0x8e0e2006 },
- { 0x3414, 1, 0x04, 0x00000038 },
- {}
-};
-
-static struct nvc0_graph_mthd
-nv108_grctx_init_mthd[] = {
- { 0xa197, nv108_grctx_init_a197, },
- { 0x902d, nvc0_grctx_init_902d, },
- { 0x902d, nv108_grctx_init_mthd_magic, },
- {}
-};
-
struct nouveau_oclass *
nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x08),
@@ -1398,11 +584,14 @@ nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
- .main = nve4_grctx_generate_main,
- .mods = nv108_grctx_generate_mods,
- .unkn = nve4_grctx_generate_unkn,
- .hub = nv108_grctx_init_hub,
- .gpc = nv108_grctx_init_gpc,
- .icmd = nv108_grctx_init_icmd,
- .mthd = nv108_grctx_init_mthd,
+ .main = nve4_grctx_generate_main,
+ .mods = nv108_grctx_generate_mods,
+ .unkn = nve4_grctx_generate_unkn,
+ .hub = nv108_grctx_pack_hub,
+ .gpc = nv108_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = nv108_grctx_pack_tpc,
+ .ppc = nv108_grctx_pack_ppc,
+ .icmd = nv108_grctx_pack_icmd,
+ .mthd = nvf0_grctx_pack_mthd,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index fe67415c3e17..833a96508c4e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -22,10 +22,14 @@
* Authors: Ben Skeggs
*/
-#include "nvc0.h"
+#include "ctxnvc0.h"
-struct nvc0_graph_init
-nvc0_grctx_init_icmd[] = {
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+nvc0_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
{ 0x000038, 1, 0x01, 0x0fac6881 },
@@ -140,8 +144,7 @@ nvc0_grctx_init_icmd[] = {
{ 0x000586, 1, 0x01, 0x00000040 },
{ 0x000582, 2, 0x01, 0x00000080 },
{ 0x0005c2, 1, 0x01, 0x00000001 },
- { 0x000638, 1, 0x01, 0x00000001 },
- { 0x000639, 1, 0x01, 0x00000001 },
+ { 0x000638, 2, 0x01, 0x00000001 },
{ 0x00063a, 1, 0x01, 0x00000002 },
{ 0x00063b, 2, 0x01, 0x00000001 },
{ 0x00063d, 1, 0x01, 0x00000002 },
@@ -201,15 +204,13 @@ nvc0_grctx_init_icmd[] = {
{ 0x000787, 1, 0x01, 0x000000cf },
{ 0x00078c, 1, 0x01, 0x00000008 },
{ 0x000792, 1, 0x01, 0x00000001 },
- { 0x000794, 1, 0x01, 0x00000001 },
- { 0x000795, 2, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
{ 0x000797, 1, 0x01, 0x000000cf },
{ 0x000836, 1, 0x01, 0x00000001 },
{ 0x00079a, 1, 0x01, 0x00000002 },
{ 0x000833, 1, 0x01, 0x04444480 },
{ 0x0007a1, 1, 0x01, 0x00000001 },
- { 0x0007a3, 1, 0x01, 0x00000001 },
- { 0x0007a4, 2, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
{ 0x000831, 1, 0x01, 0x00000004 },
{ 0x00080c, 1, 0x01, 0x00000002 },
{ 0x00080d, 2, 0x01, 0x00000100 },
@@ -235,14 +236,12 @@ nvc0_grctx_init_icmd[] = {
{ 0x0006b1, 1, 0x01, 0x00000011 },
{ 0x00078c, 1, 0x01, 0x00000008 },
{ 0x000792, 1, 0x01, 0x00000001 },
- { 0x000794, 1, 0x01, 0x00000001 },
- { 0x000795, 2, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
{ 0x000797, 1, 0x01, 0x000000cf },
{ 0x00079a, 1, 0x01, 0x00000002 },
{ 0x000833, 1, 0x01, 0x04444480 },
{ 0x0007a1, 1, 0x01, 0x00000001 },
- { 0x0007a3, 1, 0x01, 0x00000001 },
- { 0x0007a4, 2, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
{ 0x000831, 1, 0x01, 0x00000004 },
{ 0x01e100, 1, 0x01, 0x00000001 },
{ 0x001000, 1, 0x01, 0x00000014 },
@@ -267,8 +266,14 @@ nvc0_grctx_init_icmd[] = {
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_9097[] = {
+const struct nvc0_graph_pack
+nvc0_grctx_pack_icmd[] = {
+ { nvc0_grctx_init_icmd_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc0_grctx_init_9097_0[] = {
{ 0x000800, 8, 0x40, 0x00000000 },
{ 0x000804, 8, 0x40, 0x00000000 },
{ 0x000808, 8, 0x40, 0x00000400 },
@@ -516,8 +521,7 @@ nvc0_grctx_init_9097[] = {
{ 0x001350, 1, 0x04, 0x00000002 },
{ 0x001358, 1, 0x04, 0x00000001 },
{ 0x0012e4, 1, 0x04, 0x00000000 },
- { 0x00131c, 1, 0x04, 0x00000000 },
- { 0x001320, 3, 0x04, 0x00000000 },
+ { 0x00131c, 4, 0x04, 0x00000000 },
{ 0x0019c0, 1, 0x04, 0x00000000 },
{ 0x001140, 1, 0x04, 0x00000000 },
{ 0x0019c4, 1, 0x04, 0x00000000 },
@@ -571,8 +575,8 @@ nvc0_grctx_init_9097[] = {
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_902d[] = {
+const struct nvc0_graph_init
+nvc0_grctx_init_902d_0[] = {
{ 0x000200, 1, 0x04, 0x000000cf },
{ 0x000204, 1, 0x04, 0x00000001 },
{ 0x000208, 1, 0x04, 0x00000020 },
@@ -590,8 +594,8 @@ nvc0_grctx_init_902d[] = {
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_9039[] = {
+const struct nvc0_graph_init
+nvc0_grctx_init_9039_0[] = {
{ 0x00030c, 3, 0x04, 0x00000000 },
{ 0x000320, 1, 0x04, 0x00000000 },
{ 0x000238, 2, 0x04, 0x00000000 },
@@ -599,8 +603,8 @@ nvc0_grctx_init_9039[] = {
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_90c0[] = {
+const struct nvc0_graph_init
+nvc0_grctx_init_90c0_0[] = {
{ 0x00270c, 8, 0x20, 0x00000000 },
{ 0x00030c, 1, 0x04, 0x00000001 },
{ 0x001944, 1, 0x04, 0x00000000 },
@@ -617,38 +621,44 @@ nvc0_grctx_init_90c0[] = {
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_base[] = {
+const struct nvc0_graph_pack
+nvc0_grctx_pack_mthd[] = {
+ { nvc0_grctx_init_9097_0, 0x9097 },
+ { nvc0_grctx_init_902d_0, 0x902d },
+ { nvc0_grctx_init_9039_0, 0x9039 },
+ { nvc0_grctx_init_90c0_0, 0x90c0 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_grctx_init_main_0[] = {
{ 0x400204, 2, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_unk40xx[] = {
- { 0x404004, 10, 0x04, 0x00000000 },
+const struct nvc0_graph_init
+nvc0_grctx_init_fe_0[] = {
+ { 0x404004, 11, 0x04, 0x00000000 },
{ 0x404044, 1, 0x04, 0x00000000 },
- { 0x404094, 1, 0x04, 0x00000000 },
- { 0x404098, 12, 0x04, 0x00000000 },
+ { 0x404094, 13, 0x04, 0x00000000 },
{ 0x4040c8, 1, 0x04, 0xf0000087 },
{ 0x4040d0, 6, 0x04, 0x00000000 },
{ 0x4040e8, 1, 0x04, 0x00001000 },
{ 0x4040f8, 1, 0x04, 0x00000000 },
- { 0x404130, 1, 0x04, 0x00000000 },
- { 0x404134, 1, 0x04, 0x00000000 },
+ { 0x404130, 2, 0x04, 0x00000000 },
{ 0x404138, 1, 0x04, 0x20000040 },
{ 0x404150, 1, 0x04, 0x0000002e },
{ 0x404154, 1, 0x04, 0x00000400 },
{ 0x404158, 1, 0x04, 0x00000200 },
{ 0x404164, 1, 0x04, 0x00000055 },
{ 0x404168, 1, 0x04, 0x00000000 },
- { 0x404174, 1, 0x04, 0x00000000 },
- { 0x404178, 2, 0x04, 0x00000000 },
+ { 0x404174, 3, 0x04, 0x00000000 },
{ 0x404200, 8, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_unk44xx[] = {
+const struct nvc0_graph_init
+nvc0_grctx_init_pri_0[] = {
{ 0x404404, 14, 0x04, 0x00000000 },
{ 0x404460, 2, 0x04, 0x00000000 },
{ 0x404468, 1, 0x04, 0x00ffffff },
@@ -658,8 +668,8 @@ nvc0_grctx_init_unk44xx[] = {
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_unk46xx[] = {
+const struct nvc0_graph_init
+nvc0_grctx_init_memfmt_0[] = {
{ 0x404604, 1, 0x04, 0x00000015 },
{ 0x404608, 1, 0x04, 0x00000000 },
{ 0x40460c, 1, 0x04, 0x00002e00 },
@@ -674,19 +684,14 @@ nvc0_grctx_init_unk46xx[] = {
{ 0x4046a0, 1, 0x04, 0x007f0080 },
{ 0x4046a4, 18, 0x04, 0x00000000 },
{ 0x4046f0, 2, 0x04, 0x00000000 },
- {}
-};
-
-struct nvc0_graph_init
-nvc0_grctx_init_unk47xx[] = {
{ 0x404700, 13, 0x04, 0x00000000 },
{ 0x404734, 1, 0x04, 0x00000100 },
{ 0x404738, 8, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_unk58xx[] = {
+static const struct nvc0_graph_init
+nvc0_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x078000bf },
{ 0x405830, 1, 0x04, 0x02180000 },
{ 0x405834, 2, 0x04, 0x00000000 },
@@ -697,23 +702,18 @@ nvc0_grctx_init_unk58xx[] = {
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_unk60xx[] = {
+static const struct nvc0_graph_init
+nvc0_grctx_init_pd_0[] = {
{ 0x406020, 1, 0x04, 0x000103c1 },
{ 0x406028, 4, 0x04, 0x00000001 },
- {}
-};
-
-struct nvc0_graph_init
-nvc0_grctx_init_unk64xx[] = {
{ 0x4064a8, 1, 0x04, 0x00000000 },
{ 0x4064ac, 1, 0x04, 0x00003fff },
{ 0x4064b4, 2, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_unk78xx[] = {
+const struct nvc0_graph_init
+nvc0_grctx_init_rstr2d_0[] = {
{ 0x407804, 1, 0x04, 0x00000023 },
{ 0x40780c, 1, 0x04, 0x0a418820 },
{ 0x407810, 1, 0x04, 0x062080e6 },
@@ -725,8 +725,8 @@ nvc0_grctx_init_unk78xx[] = {
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_unk80xx[] = {
+const struct nvc0_graph_init
+nvc0_grctx_init_scc_0[] = {
{ 0x408000, 2, 0x04, 0x00000000 },
{ 0x408008, 1, 0x04, 0x00000018 },
{ 0x40800c, 2, 0x04, 0x00000000 },
@@ -736,8 +736,8 @@ nvc0_grctx_init_unk80xx[] = {
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_rop[] = {
+static const struct nvc0_graph_init
+nvc0_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x02802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
{ 0x408808, 1, 0x04, 0x0003e00d },
@@ -748,9 +748,28 @@ nvc0_grctx_init_rop[] = {
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_gpc_0[] = {
+const struct nvc0_graph_pack
+nvc0_grctx_pack_hub[] = {
+ { nvc0_grctx_init_main_0 },
+ { nvc0_grctx_init_fe_0 },
+ { nvc0_grctx_init_pri_0 },
+ { nvc0_grctx_init_memfmt_0 },
+ { nvc0_grctx_init_ds_0 },
+ { nvc0_grctx_init_pd_0 },
+ { nvc0_grctx_init_rstr2d_0 },
+ { nvc0_grctx_init_scc_0 },
+ { nvc0_grctx_init_be_0 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_grctx_init_gpc_unk_0[] = {
{ 0x418380, 1, 0x04, 0x00000016 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_grctx_init_prop_0[] = {
{ 0x418400, 1, 0x04, 0x38004e00 },
{ 0x418404, 1, 0x04, 0x71e0ffff },
{ 0x418408, 1, 0x04, 0x00000000 },
@@ -760,6 +779,11 @@ nvc0_grctx_init_gpc_0[] = {
{ 0x418450, 6, 0x04, 0x00000000 },
{ 0x418468, 1, 0x04, 0x00000001 },
{ 0x41846c, 2, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_grctx_init_gpc_unk_1[] = {
{ 0x418600, 1, 0x04, 0x0000001f },
{ 0x418684, 1, 0x04, 0x0000000f },
{ 0x418700, 1, 0x04, 0x00000002 },
@@ -767,6 +791,11 @@ nvc0_grctx_init_gpc_0[] = {
{ 0x418708, 1, 0x04, 0x00000000 },
{ 0x41870c, 1, 0x04, 0x07c80000 },
{ 0x418710, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc0_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x0006860a },
{ 0x418808, 3, 0x04, 0x00000000 },
{ 0x418828, 1, 0x04, 0x00008442 },
@@ -775,10 +804,20 @@ nvc0_grctx_init_gpc_0[] = {
{ 0x4188e0, 1, 0x04, 0x01000000 },
{ 0x4188e8, 5, 0x04, 0x00000000 },
{ 0x4188fc, 1, 0x04, 0x00100000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_grctx_init_zcull_0[] = {
{ 0x41891c, 1, 0x04, 0x00ff00ff },
{ 0x418924, 1, 0x04, 0x00000000 },
{ 0x418928, 1, 0x04, 0x00ffff00 },
{ 0x41892c, 1, 0x04, 0x0000ff00 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_grctx_init_crstr_0[] = {
{ 0x418b00, 1, 0x04, 0x00000000 },
{ 0x418b08, 1, 0x04, 0x0a418820 },
{ 0x418b0c, 1, 0x04, 0x062080e6 },
@@ -787,18 +826,41 @@ nvc0_grctx_init_gpc_0[] = {
{ 0x418b18, 1, 0x04, 0x0a418820 },
{ 0x418b1c, 1, 0x04, 0x000000e6 },
{ 0x418bb8, 1, 0x04, 0x00000103 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_grctx_init_gpm_0[] = {
{ 0x418c08, 1, 0x04, 0x00000001 },
{ 0x418c10, 8, 0x04, 0x00000000 },
{ 0x418c80, 1, 0x04, 0x20200004 },
{ 0x418c8c, 1, 0x04, 0x00000001 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_grctx_init_gcc_0[] = {
{ 0x419000, 1, 0x04, 0x00000780 },
{ 0x419004, 2, 0x04, 0x00000000 },
{ 0x419014, 1, 0x04, 0x00000004 },
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_gpc_1[] = {
+const struct nvc0_graph_pack
+nvc0_grctx_pack_gpc[] = {
+ { nvc0_grctx_init_gpc_unk_0 },
+ { nvc0_grctx_init_prop_0 },
+ { nvc0_grctx_init_gpc_unk_1 },
+ { nvc0_grctx_init_setup_0 },
+ { nvc0_grctx_init_zcull_0 },
+ { nvc0_grctx_init_crstr_0 },
+ { nvc0_grctx_init_gpm_0 },
+ { nvc0_grctx_init_gcc_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc0_grctx_init_zcullr_0[] = {
{ 0x418a00, 3, 0x04, 0x00000000 },
{ 0x418a0c, 1, 0x04, 0x00010000 },
{ 0x418a10, 3, 0x04, 0x00000000 },
@@ -826,19 +888,35 @@ nvc0_grctx_init_gpc_1[] = {
{}
};
-struct nvc0_graph_init
-nvc0_grctx_init_tpc[] = {
+const struct nvc0_graph_pack
+nvc0_grctx_pack_zcull[] = {
+ { nvc0_grctx_init_zcullr_0 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_grctx_init_pe_0[] = {
{ 0x419818, 1, 0x04, 0x00000000 },
{ 0x41983c, 1, 0x04, 0x00038bc7 },
{ 0x419848, 1, 0x04, 0x00000000 },
{ 0x419864, 1, 0x04, 0x0000012a },
{ 0x419888, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc0_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000001f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000023 },
{ 0x419a0c, 1, 0x04, 0x00020000 },
{ 0x419a10, 1, 0x04, 0x00000000 },
{ 0x419a14, 1, 0x04, 0x00000200 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_grctx_init_wwdx_0[] = {
{ 0x419b00, 1, 0x04, 0x0a418820 },
{ 0x419b04, 1, 0x04, 0x062080e6 },
{ 0x419b08, 1, 0x04, 0x020398a4 },
@@ -848,15 +926,35 @@ nvc0_grctx_init_tpc[] = {
{ 0x419bd0, 1, 0x04, 0x00900103 },
{ 0x419be0, 1, 0x04, 0x00000001 },
{ 0x419be4, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_grctx_init_mpc_0[] = {
{ 0x419c00, 1, 0x04, 0x00000002 },
{ 0x419c04, 1, 0x04, 0x00000006 },
{ 0x419c08, 1, 0x04, 0x00000002 },
{ 0x419c20, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc0_grctx_init_l1c_0[] = {
{ 0x419cb0, 1, 0x04, 0x00060048 },
{ 0x419ce8, 1, 0x04, 0x00000000 },
{ 0x419cf4, 1, 0x04, 0x00000183 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_grctx_init_tpccs_0[] = {
{ 0x419d20, 1, 0x04, 0x02180000 },
{ 0x419d24, 1, 0x04, 0x00001fff },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc0_grctx_init_sm_0[] = {
{ 0x419e04, 3, 0x04, 0x00000000 },
{ 0x419e10, 1, 0x04, 0x00000002 },
{ 0x419e44, 1, 0x04, 0x001beff2 },
@@ -868,6 +966,22 @@ nvc0_grctx_init_tpc[] = {
{}
};
+const struct nvc0_graph_pack
+nvc0_grctx_pack_tpc[] = {
+ { nvc0_grctx_init_pe_0 },
+ { nvc0_grctx_init_tex_0 },
+ { nvc0_grctx_init_wwdx_0 },
+ { nvc0_grctx_init_mpc_0 },
+ { nvc0_grctx_init_l1c_0 },
+ { nvc0_grctx_init_tpccs_0 },
+ { nvc0_grctx_init_sm_0 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
void
nvc0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
@@ -1055,14 +1169,14 @@ void
nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
- int i;
nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
- for (i = 0; oclass->hub[i]; i++)
- nvc0_graph_mmio(priv, oclass->hub[i]);
- for (i = 0; oclass->gpc[i]; i++)
- nvc0_graph_mmio(priv, oclass->gpc[i]);
+ nvc0_graph_mmio(priv, oclass->hub);
+ nvc0_graph_mmio(priv, oclass->gpc);
+ nvc0_graph_mmio(priv, oclass->zcull);
+ nvc0_graph_mmio(priv, oclass->tpc);
+ nvc0_graph_mmio(priv, oclass->ppc);
nv_wr32(priv, 0x404154, 0x00000000);
@@ -1182,46 +1296,6 @@ done:
return ret;
}
-struct nvc0_graph_init *
-nvc0_grctx_init_hub[] = {
- nvc0_grctx_init_base,
- nvc0_grctx_init_unk40xx,
- nvc0_grctx_init_unk44xx,
- nvc0_grctx_init_unk46xx,
- nvc0_grctx_init_unk47xx,
- nvc0_grctx_init_unk58xx,
- nvc0_grctx_init_unk60xx,
- nvc0_grctx_init_unk64xx,
- nvc0_grctx_init_unk78xx,
- nvc0_grctx_init_unk80xx,
- nvc0_grctx_init_rop,
- NULL
-};
-
-static struct nvc0_graph_init *
-nvc0_grctx_init_gpc[] = {
- nvc0_grctx_init_gpc_0,
- nvc0_grctx_init_gpc_1,
- nvc0_grctx_init_tpc,
- NULL
-};
-
-struct nvc0_graph_init
-nvc0_grctx_init_mthd_magic[] = {
- { 0x3410, 1, 0x04, 0x00000000 },
- {}
-};
-
-struct nvc0_graph_mthd
-nvc0_grctx_init_mthd[] = {
- { 0x9097, nvc0_grctx_init_9097, },
- { 0x902d, nvc0_grctx_init_902d, },
- { 0x9039, nvc0_grctx_init_9039, },
- { 0x90c0, nvc0_grctx_init_90c0, },
- { 0x902d, nvc0_grctx_init_mthd_magic, },
- {}
-};
-
struct nouveau_oclass *
nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc0),
@@ -1233,11 +1307,13 @@ nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
- .main = nvc0_grctx_generate_main,
- .mods = nvc0_grctx_generate_mods,
- .unkn = nvc0_grctx_generate_unkn,
- .hub = nvc0_grctx_init_hub,
- .gpc = nvc0_grctx_init_gpc,
- .icmd = nvc0_grctx_init_icmd,
- .mthd = nvc0_grctx_init_mthd,
+ .main = nvc0_grctx_generate_main,
+ .mods = nvc0_grctx_generate_mods,
+ .unkn = nvc0_grctx_generate_unkn,
+ .hub = nvc0_grctx_pack_hub,
+ .gpc = nvc0_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = nvc0_grctx_pack_tpc,
+ .icmd = nvc0_grctx_pack_icmd,
+ .mthd = nvc0_grctx_pack_mthd,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
new file mode 100644
index 000000000000..9c815d1f99ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
@@ -0,0 +1,170 @@
+#ifndef __NVKM_GRCTX_NVC0_H__
+#define __NVKM_GRCTX_NVC0_H__
+
+#include "nvc0.h"
+
+struct nvc0_grctx {
+ struct nvc0_graph_priv *priv;
+ struct nvc0_graph_data *data;
+ struct nvc0_graph_mmio *mmio;
+ int buffer_nr;
+ u64 buffer[4];
+ u64 addr;
+};
+
+struct nvc0_grctx_oclass {
+ struct nouveau_oclass base;
+ /* main context generation function */
+ void (*main)(struct nvc0_graph_priv *, struct nvc0_grctx *);
+ /* context-specific modify-on-first-load list generation function */
+ void (*mods)(struct nvc0_graph_priv *, struct nvc0_grctx *);
+ void (*unkn)(struct nvc0_graph_priv *);
+ /* mmio context data */
+ const struct nvc0_graph_pack *hub;
+ const struct nvc0_graph_pack *gpc;
+ const struct nvc0_graph_pack *zcull;
+ const struct nvc0_graph_pack *tpc;
+ const struct nvc0_graph_pack *ppc;
+ /* indirect context data, generated with icmds/mthds */
+ const struct nvc0_graph_pack *icmd;
+ const struct nvc0_graph_pack *mthd;
+};
+
+#define mmio_data(s,a,p) do { \
+ info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \
+ info->addr = info->buffer[info->buffer_nr++] + (s); \
+ info->data->size = (s); \
+ info->data->align = (a); \
+ info->data->access = (p); \
+ info->data++; \
+} while(0)
+
+#define mmio_list(r,d,s,b) do { \
+ info->mmio->addr = (r); \
+ info->mmio->data = (d); \
+ info->mmio->shift = (s); \
+ info->mmio->buffer = (b); \
+ info->mmio++; \
+ nv_wr32(priv, (r), (d) | ((s) ? (info->buffer[(b)] >> (s)) : 0)); \
+} while(0)
+
+extern struct nouveau_oclass *nvc0_grctx_oclass;
+int nvc0_grctx_generate(struct nvc0_graph_priv *);
+void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nvc0_grctx_generate_unkn(struct nvc0_graph_priv *);
+void nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *);
+void nvc0_grctx_generate_r406028(struct nvc0_graph_priv *);
+void nvc0_grctx_generate_r4060a8(struct nvc0_graph_priv *);
+void nvc0_grctx_generate_r418bb8(struct nvc0_graph_priv *);
+void nvc0_grctx_generate_r406800(struct nvc0_graph_priv *);
+
+extern struct nouveau_oclass *nvc1_grctx_oclass;
+void nvc1_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nvc1_grctx_generate_unkn(struct nvc0_graph_priv *);
+
+extern struct nouveau_oclass *nvc4_grctx_oclass;
+extern struct nouveau_oclass *nvc8_grctx_oclass;
+extern struct nouveau_oclass *nvd7_grctx_oclass;
+extern struct nouveau_oclass *nvd9_grctx_oclass;
+
+extern struct nouveau_oclass *nve4_grctx_oclass;
+void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nve4_grctx_generate_unkn(struct nvc0_graph_priv *);
+void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *);
+
+extern struct nouveau_oclass *nvf0_grctx_oclass;
+extern struct nouveau_oclass *nv108_grctx_oclass;
+extern struct nouveau_oclass *gm107_grctx_oclass;
+
+/* context init value lists */
+
+extern const struct nvc0_graph_pack nvc0_grctx_pack_icmd[];
+
+extern const struct nvc0_graph_pack nvc0_grctx_pack_mthd[];
+extern const struct nvc0_graph_init nvc0_grctx_init_902d_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_9039_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_90c0_0[];
+
+extern const struct nvc0_graph_pack nvc0_grctx_pack_hub[];
+extern const struct nvc0_graph_init nvc0_grctx_init_main_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_fe_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_pri_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_memfmt_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_rstr2d_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_scc_0[];
+
+extern const struct nvc0_graph_pack nvc0_grctx_pack_gpc[];
+extern const struct nvc0_graph_init nvc0_grctx_init_gpc_unk_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_prop_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_gpc_unk_1[];
+extern const struct nvc0_graph_init nvc0_grctx_init_zcull_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_crstr_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_gpm_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_gcc_0[];
+
+extern const struct nvc0_graph_pack nvc0_grctx_pack_zcull[];
+
+extern const struct nvc0_graph_pack nvc0_grctx_pack_tpc[];
+extern const struct nvc0_graph_init nvc0_grctx_init_pe_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_wwdx_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_mpc_0[];
+extern const struct nvc0_graph_init nvc0_grctx_init_tpccs_0[];
+
+extern const struct nvc0_graph_init nvc4_grctx_init_tex_0[];
+extern const struct nvc0_graph_init nvc4_grctx_init_l1c_0[];
+extern const struct nvc0_graph_init nvc4_grctx_init_sm_0[];
+
+extern const struct nvc0_graph_init nvc1_grctx_init_9097_0[];
+
+extern const struct nvc0_graph_init nvc1_grctx_init_gpm_0[];
+
+extern const struct nvc0_graph_init nvc1_grctx_init_pe_0[];
+extern const struct nvc0_graph_init nvc1_grctx_init_wwdx_0[];
+extern const struct nvc0_graph_init nvc1_grctx_init_tpccs_0[];
+
+extern const struct nvc0_graph_init nvc8_grctx_init_9197_0[];
+extern const struct nvc0_graph_init nvc8_grctx_init_9297_0[];
+
+extern const struct nvc0_graph_pack nvd9_grctx_pack_icmd[];
+
+extern const struct nvc0_graph_pack nvd9_grctx_pack_mthd[];
+
+extern const struct nvc0_graph_init nvd9_grctx_init_fe_0[];
+extern const struct nvc0_graph_init nvd9_grctx_init_be_0[];
+
+extern const struct nvc0_graph_init nvd9_grctx_init_prop_0[];
+extern const struct nvc0_graph_init nvd9_grctx_init_gpc_unk_1[];
+extern const struct nvc0_graph_init nvd9_grctx_init_crstr_0[];
+
+extern const struct nvc0_graph_init nvd9_grctx_init_sm_0[];
+
+extern const struct nvc0_graph_init nvd7_grctx_init_pe_0[];
+
+extern const struct nvc0_graph_init nvd7_grctx_init_wwdx_0[];
+
+extern const struct nvc0_graph_init nve4_grctx_init_memfmt_0[];
+extern const struct nvc0_graph_init nve4_grctx_init_ds_0[];
+extern const struct nvc0_graph_init nve4_grctx_init_scc_0[];
+
+extern const struct nvc0_graph_init nve4_grctx_init_gpm_0[];
+
+extern const struct nvc0_graph_init nve4_grctx_init_pes_0[];
+
+extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[];
+
+extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[];
+extern const struct nvc0_graph_init nvf0_grctx_init_cwd_0[];
+
+extern const struct nvc0_graph_init nvf0_grctx_init_gpc_unk_2[];
+
+extern const struct nvc0_graph_init nvf0_grctx_init_mpc_0[];
+extern const struct nvc0_graph_init nvf0_grctx_init_l1c_0[];
+
+extern const struct nvc0_graph_init nv108_grctx_init_rstr2d_0[];
+
+extern const struct nvc0_graph_init nv108_grctx_init_prop_0[];
+extern const struct nvc0_graph_init nv108_grctx_init_crstr_0[];
+
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
index 71b4283f7fad..24a92c569c0a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
@@ -22,10 +22,14 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "nvc0.h"
+#include "ctxnvc0.h"
-static struct nvc0_graph_init
-nvc1_grctx_init_icmd[] = {
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+nvc1_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
{ 0x000038, 1, 0x01, 0x0fac6881 },
@@ -141,8 +145,7 @@ nvc1_grctx_init_icmd[] = {
{ 0x000586, 1, 0x01, 0x00000040 },
{ 0x000582, 2, 0x01, 0x00000080 },
{ 0x0005c2, 1, 0x01, 0x00000001 },
- { 0x000638, 1, 0x01, 0x00000001 },
- { 0x000639, 1, 0x01, 0x00000001 },
+ { 0x000638, 2, 0x01, 0x00000001 },
{ 0x00063a, 1, 0x01, 0x00000002 },
{ 0x00063b, 2, 0x01, 0x00000001 },
{ 0x00063d, 1, 0x01, 0x00000002 },
@@ -202,15 +205,13 @@ nvc1_grctx_init_icmd[] = {
{ 0x000787, 1, 0x01, 0x000000cf },
{ 0x00078c, 1, 0x01, 0x00000008 },
{ 0x000792, 1, 0x01, 0x00000001 },
- { 0x000794, 1, 0x01, 0x00000001 },
- { 0x000795, 2, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
{ 0x000797, 1, 0x01, 0x000000cf },
{ 0x000836, 1, 0x01, 0x00000001 },
{ 0x00079a, 1, 0x01, 0x00000002 },
{ 0x000833, 1, 0x01, 0x04444480 },
{ 0x0007a1, 1, 0x01, 0x00000001 },
- { 0x0007a3, 1, 0x01, 0x00000001 },
- { 0x0007a4, 2, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
{ 0x000831, 1, 0x01, 0x00000004 },
{ 0x00080c, 1, 0x01, 0x00000002 },
{ 0x00080d, 2, 0x01, 0x00000100 },
@@ -236,14 +237,12 @@ nvc1_grctx_init_icmd[] = {
{ 0x0006b1, 1, 0x01, 0x00000011 },
{ 0x00078c, 1, 0x01, 0x00000008 },
{ 0x000792, 1, 0x01, 0x00000001 },
- { 0x000794, 1, 0x01, 0x00000001 },
- { 0x000795, 2, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
{ 0x000797, 1, 0x01, 0x000000cf },
{ 0x00079a, 1, 0x01, 0x00000002 },
{ 0x000833, 1, 0x01, 0x04444480 },
{ 0x0007a1, 1, 0x01, 0x00000001 },
- { 0x0007a3, 1, 0x01, 0x00000001 },
- { 0x0007a4, 2, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
{ 0x000831, 1, 0x01, 0x00000004 },
{ 0x01e100, 1, 0x01, 0x00000001 },
{ 0x001000, 1, 0x01, 0x00000014 },
@@ -268,8 +267,14 @@ nvc1_grctx_init_icmd[] = {
{}
};
-struct nvc0_graph_init
-nvc1_grctx_init_9097[] = {
+static const struct nvc0_graph_pack
+nvc1_grctx_pack_icmd[] = {
+ { nvc1_grctx_init_icmd_0 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc1_grctx_init_9097_0[] = {
{ 0x000800, 8, 0x40, 0x00000000 },
{ 0x000804, 8, 0x40, 0x00000000 },
{ 0x000808, 8, 0x40, 0x00000400 },
@@ -516,8 +521,7 @@ nvc1_grctx_init_9097[] = {
{ 0x001350, 1, 0x04, 0x00000002 },
{ 0x001358, 1, 0x04, 0x00000001 },
{ 0x0012e4, 1, 0x04, 0x00000000 },
- { 0x00131c, 1, 0x04, 0x00000000 },
- { 0x001320, 3, 0x04, 0x00000000 },
+ { 0x00131c, 4, 0x04, 0x00000000 },
{ 0x0019c0, 1, 0x04, 0x00000000 },
{ 0x001140, 1, 0x04, 0x00000000 },
{ 0x0019c4, 1, 0x04, 0x00000000 },
@@ -571,15 +575,25 @@ nvc1_grctx_init_9097[] = {
{}
};
-static struct nvc0_graph_init
-nvc1_grctx_init_9197[] = {
+static const struct nvc0_graph_init
+nvc1_grctx_init_9197_0[] = {
{ 0x003400, 128, 0x04, 0x00000000 },
{ 0x0002e4, 1, 0x04, 0x0000b001 },
{}
};
-static struct nvc0_graph_init
-nvc1_grctx_init_unk58xx[] = {
+static const struct nvc0_graph_pack
+nvc1_grctx_pack_mthd[] = {
+ { nvc1_grctx_init_9097_0, 0x9097 },
+ { nvc1_grctx_init_9197_0, 0x9197 },
+ { nvc0_grctx_init_902d_0, 0x902d },
+ { nvc0_grctx_init_9039_0, 0x9039 },
+ { nvc0_grctx_init_90c0_0, 0x90c0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc1_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x0f8000bf },
{ 0x405830, 1, 0x04, 0x02180218 },
{ 0x405834, 2, 0x04, 0x00000000 },
@@ -590,8 +604,20 @@ nvc1_grctx_init_unk58xx[] = {
{}
};
-static struct nvc0_graph_init
-nvc1_grctx_init_rop[] = {
+static const struct nvc0_graph_init
+nvc1_grctx_init_pd_0[] = {
+ { 0x406020, 1, 0x04, 0x000103c1 },
+ { 0x406028, 4, 0x04, 0x00000001 },
+ { 0x4064a8, 1, 0x04, 0x00000000 },
+ { 0x4064ac, 1, 0x04, 0x00003fff },
+ { 0x4064b4, 2, 0x04, 0x00000000 },
+ { 0x4064c0, 1, 0x04, 0x80140078 },
+ { 0x4064c4, 1, 0x04, 0x0086ffff },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc1_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x02802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
{ 0x408808, 1, 0x04, 0x1003e005 },
@@ -602,25 +628,22 @@ nvc1_grctx_init_rop[] = {
{}
};
-static struct nvc0_graph_init
-nvc1_grctx_init_gpc_0[] = {
- { 0x418380, 1, 0x04, 0x00000016 },
- { 0x418400, 1, 0x04, 0x38004e00 },
- { 0x418404, 1, 0x04, 0x71e0ffff },
- { 0x418408, 1, 0x04, 0x00000000 },
- { 0x41840c, 1, 0x04, 0x00001008 },
- { 0x418410, 1, 0x04, 0x0fff0fff },
- { 0x418414, 1, 0x04, 0x00200fff },
- { 0x418450, 6, 0x04, 0x00000000 },
- { 0x418468, 1, 0x04, 0x00000001 },
- { 0x41846c, 2, 0x04, 0x00000000 },
- { 0x418600, 1, 0x04, 0x0000001f },
- { 0x418684, 1, 0x04, 0x0000000f },
- { 0x418700, 1, 0x04, 0x00000002 },
- { 0x418704, 1, 0x04, 0x00000080 },
- { 0x418708, 1, 0x04, 0x00000000 },
- { 0x41870c, 1, 0x04, 0x07c80000 },
- { 0x418710, 1, 0x04, 0x00000000 },
+static const struct nvc0_graph_pack
+nvc1_grctx_pack_hub[] = {
+ { nvc0_grctx_init_main_0 },
+ { nvc0_grctx_init_fe_0 },
+ { nvc0_grctx_init_pri_0 },
+ { nvc0_grctx_init_memfmt_0 },
+ { nvc1_grctx_init_ds_0 },
+ { nvc1_grctx_init_pd_0 },
+ { nvc0_grctx_init_rstr2d_0 },
+ { nvc0_grctx_init_scc_0 },
+ { nvc1_grctx_init_be_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc1_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x0006860a },
{ 0x418808, 3, 0x04, 0x00000000 },
{ 0x418828, 1, 0x04, 0x00008442 },
@@ -629,69 +652,44 @@ nvc1_grctx_init_gpc_0[] = {
{ 0x4188e0, 1, 0x04, 0x01000000 },
{ 0x4188e8, 5, 0x04, 0x00000000 },
{ 0x4188fc, 1, 0x04, 0x00100018 },
- { 0x41891c, 1, 0x04, 0x00ff00ff },
- { 0x418924, 1, 0x04, 0x00000000 },
- { 0x418928, 1, 0x04, 0x00ffff00 },
- { 0x41892c, 1, 0x04, 0x0000ff00 },
- { 0x418a00, 3, 0x04, 0x00000000 },
- { 0x418a0c, 1, 0x04, 0x00010000 },
- { 0x418a10, 3, 0x04, 0x00000000 },
- { 0x418a20, 3, 0x04, 0x00000000 },
- { 0x418a2c, 1, 0x04, 0x00010000 },
- { 0x418a30, 3, 0x04, 0x00000000 },
- { 0x418a40, 3, 0x04, 0x00000000 },
- { 0x418a4c, 1, 0x04, 0x00010000 },
- { 0x418a50, 3, 0x04, 0x00000000 },
- { 0x418a60, 3, 0x04, 0x00000000 },
- { 0x418a6c, 1, 0x04, 0x00010000 },
- { 0x418a70, 3, 0x04, 0x00000000 },
- { 0x418a80, 3, 0x04, 0x00000000 },
- { 0x418a8c, 1, 0x04, 0x00010000 },
- { 0x418a90, 3, 0x04, 0x00000000 },
- { 0x418aa0, 3, 0x04, 0x00000000 },
- { 0x418aac, 1, 0x04, 0x00010000 },
- { 0x418ab0, 3, 0x04, 0x00000000 },
- { 0x418ac0, 3, 0x04, 0x00000000 },
- { 0x418acc, 1, 0x04, 0x00010000 },
- { 0x418ad0, 3, 0x04, 0x00000000 },
- { 0x418ae0, 3, 0x04, 0x00000000 },
- { 0x418aec, 1, 0x04, 0x00010000 },
- { 0x418af0, 3, 0x04, 0x00000000 },
- { 0x418b00, 1, 0x04, 0x00000000 },
- { 0x418b08, 1, 0x04, 0x0a418820 },
- { 0x418b0c, 1, 0x04, 0x062080e6 },
- { 0x418b10, 1, 0x04, 0x020398a4 },
- { 0x418b14, 1, 0x04, 0x0e629062 },
- { 0x418b18, 1, 0x04, 0x0a418820 },
- { 0x418b1c, 1, 0x04, 0x000000e6 },
- { 0x418bb8, 1, 0x04, 0x00000103 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc1_grctx_init_gpm_0[] = {
{ 0x418c08, 1, 0x04, 0x00000001 },
{ 0x418c10, 8, 0x04, 0x00000000 },
{ 0x418c6c, 1, 0x04, 0x00000001 },
{ 0x418c80, 1, 0x04, 0x20200004 },
{ 0x418c8c, 1, 0x04, 0x00000001 },
- { 0x419000, 1, 0x04, 0x00000780 },
- { 0x419004, 2, 0x04, 0x00000000 },
- { 0x419014, 1, 0x04, 0x00000004 },
{}
};
-static struct nvc0_graph_init
-nvc1_grctx_init_tpc[] = {
+static const struct nvc0_graph_pack
+nvc1_grctx_pack_gpc[] = {
+ { nvc0_grctx_init_gpc_unk_0 },
+ { nvc0_grctx_init_prop_0 },
+ { nvc0_grctx_init_gpc_unk_1 },
+ { nvc1_grctx_init_setup_0 },
+ { nvc0_grctx_init_zcull_0 },
+ { nvc0_grctx_init_crstr_0 },
+ { nvc1_grctx_init_gpm_0 },
+ { nvc0_grctx_init_gcc_0 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc1_grctx_init_pe_0[] = {
{ 0x419818, 1, 0x04, 0x00000000 },
{ 0x41983c, 1, 0x04, 0x00038bc7 },
{ 0x419848, 1, 0x04, 0x00000000 },
{ 0x419864, 1, 0x04, 0x00000129 },
{ 0x419888, 1, 0x04, 0x00000000 },
- { 0x419a00, 1, 0x04, 0x000001f0 },
- { 0x419a04, 1, 0x04, 0x00000001 },
- { 0x419a08, 1, 0x04, 0x00000023 },
- { 0x419a0c, 1, 0x04, 0x00020000 },
- { 0x419a10, 1, 0x04, 0x00000000 },
- { 0x419a14, 1, 0x04, 0x00000200 },
- { 0x419a1c, 1, 0x04, 0x00000000 },
- { 0x419a20, 1, 0x04, 0x00000800 },
- { 0x419ac4, 1, 0x04, 0x0007f440 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc1_grctx_init_wwdx_0[] = {
{ 0x419b00, 1, 0x04, 0x0a418820 },
{ 0x419b04, 1, 0x04, 0x062080e6 },
{ 0x419b08, 1, 0x04, 0x020398a4 },
@@ -701,28 +699,33 @@ nvc1_grctx_init_tpc[] = {
{ 0x419bd0, 1, 0x04, 0x00900103 },
{ 0x419be0, 1, 0x04, 0x00400001 },
{ 0x419be4, 1, 0x04, 0x00000000 },
- { 0x419c00, 1, 0x04, 0x00000002 },
- { 0x419c04, 1, 0x04, 0x00000006 },
- { 0x419c08, 1, 0x04, 0x00000002 },
- { 0x419c20, 1, 0x04, 0x00000000 },
- { 0x419cb0, 1, 0x04, 0x00020048 },
- { 0x419ce8, 1, 0x04, 0x00000000 },
- { 0x419cf4, 1, 0x04, 0x00000183 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc1_grctx_init_tpccs_0[] = {
{ 0x419d20, 1, 0x04, 0x12180000 },
{ 0x419d24, 1, 0x04, 0x00001fff },
{ 0x419d44, 1, 0x04, 0x02180218 },
- { 0x419e04, 3, 0x04, 0x00000000 },
- { 0x419e10, 1, 0x04, 0x00000002 },
- { 0x419e44, 1, 0x04, 0x001beff2 },
- { 0x419e48, 1, 0x04, 0x00000000 },
- { 0x419e4c, 1, 0x04, 0x0000000f },
- { 0x419e50, 17, 0x04, 0x00000000 },
- { 0x419e98, 1, 0x04, 0x00000000 },
- { 0x419ee0, 1, 0x04, 0x00011110 },
- { 0x419f30, 11, 0x04, 0x00000000 },
{}
};
+static const struct nvc0_graph_pack
+nvc1_grctx_pack_tpc[] = {
+ { nvc1_grctx_init_pe_0 },
+ { nvc4_grctx_init_tex_0 },
+ { nvc1_grctx_init_wwdx_0 },
+ { nvc0_grctx_init_mpc_0 },
+ { nvc4_grctx_init_l1c_0 },
+ { nvc1_grctx_init_tpccs_0 },
+ { nvc4_grctx_init_sm_0 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
void
nvc1_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
@@ -771,41 +774,6 @@ nvc1_grctx_generate_unkn(struct nvc0_graph_priv *priv)
nv_mask(priv, 0x419c00, 0x00000008, 0x00000008);
}
-static struct nvc0_graph_init *
-nvc1_grctx_init_hub[] = {
- nvc0_grctx_init_base,
- nvc0_grctx_init_unk40xx,
- nvc0_grctx_init_unk44xx,
- nvc0_grctx_init_unk46xx,
- nvc0_grctx_init_unk47xx,
- nvc1_grctx_init_unk58xx,
- nvc0_grctx_init_unk60xx,
- nvc0_grctx_init_unk64xx,
- nvc0_grctx_init_unk78xx,
- nvc0_grctx_init_unk80xx,
- nvc1_grctx_init_rop,
- NULL
-};
-
-struct nvc0_graph_init *
-nvc1_grctx_init_gpc[] = {
- nvc1_grctx_init_gpc_0,
- nvc0_grctx_init_gpc_1,
- nvc1_grctx_init_tpc,
- NULL
-};
-
-static struct nvc0_graph_mthd
-nvc1_grctx_init_mthd[] = {
- { 0x9097, nvc1_grctx_init_9097, },
- { 0x9197, nvc1_grctx_init_9197, },
- { 0x902d, nvc0_grctx_init_902d, },
- { 0x9039, nvc0_grctx_init_9039, },
- { 0x90c0, nvc0_grctx_init_90c0, },
- { 0x902d, nvc0_grctx_init_mthd_magic, },
- {}
-};
-
struct nouveau_oclass *
nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc1),
@@ -817,11 +785,13 @@ nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) {
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
- .main = nvc0_grctx_generate_main,
- .mods = nvc1_grctx_generate_mods,
- .unkn = nvc1_grctx_generate_unkn,
- .hub = nvc1_grctx_init_hub,
- .gpc = nvc1_grctx_init_gpc,
- .icmd = nvc1_grctx_init_icmd,
- .mthd = nvc1_grctx_init_mthd,
+ .main = nvc0_grctx_generate_main,
+ .mods = nvc1_grctx_generate_mods,
+ .unkn = nvc1_grctx_generate_unkn,
+ .hub = nvc1_grctx_pack_hub,
+ .gpc = nvc1_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = nvc1_grctx_pack_tpc,
+ .icmd = nvc1_grctx_pack_icmd,
+ .mthd = nvc1_grctx_pack_mthd,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc3.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
index 8f237b3bd8c6..e11ed5538193 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
@@ -22,15 +22,14 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "nvc0.h"
+#include "ctxnvc0.h"
-static struct nvc0_graph_init
-nvc3_grctx_init_tpc[] = {
- { 0x419818, 1, 0x04, 0x00000000 },
- { 0x41983c, 1, 0x04, 0x00038bc7 },
- { 0x419848, 1, 0x04, 0x00000000 },
- { 0x419864, 1, 0x04, 0x0000012a },
- { 0x419888, 1, 0x04, 0x00000000 },
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
+
+const struct nvc0_graph_init
+nvc4_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000001f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000023 },
@@ -40,24 +39,19 @@ nvc3_grctx_init_tpc[] = {
{ 0x419a1c, 1, 0x04, 0x00000000 },
{ 0x419a20, 1, 0x04, 0x00000800 },
{ 0x419ac4, 1, 0x04, 0x0007f440 },
- { 0x419b00, 1, 0x04, 0x0a418820 },
- { 0x419b04, 1, 0x04, 0x062080e6 },
- { 0x419b08, 1, 0x04, 0x020398a4 },
- { 0x419b0c, 1, 0x04, 0x0e629062 },
- { 0x419b10, 1, 0x04, 0x0a418820 },
- { 0x419b14, 1, 0x04, 0x000000e6 },
- { 0x419bd0, 1, 0x04, 0x00900103 },
- { 0x419be0, 1, 0x04, 0x00000001 },
- { 0x419be4, 1, 0x04, 0x00000000 },
- { 0x419c00, 1, 0x04, 0x00000002 },
- { 0x419c04, 1, 0x04, 0x00000006 },
- { 0x419c08, 1, 0x04, 0x00000002 },
- { 0x419c20, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc4_grctx_init_l1c_0[] = {
{ 0x419cb0, 1, 0x04, 0x00020048 },
{ 0x419ce8, 1, 0x04, 0x00000000 },
{ 0x419cf4, 1, 0x04, 0x00000183 },
- { 0x419d20, 1, 0x04, 0x02180000 },
- { 0x419d24, 1, 0x04, 0x00001fff },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc4_grctx_init_sm_0[] = {
{ 0x419e04, 3, 0x04, 0x00000000 },
{ 0x419e10, 1, 0x04, 0x00000002 },
{ 0x419e44, 1, 0x04, 0x001beff2 },
@@ -70,16 +64,24 @@ nvc3_grctx_init_tpc[] = {
{}
};
-struct nvc0_graph_init *
-nvc3_grctx_init_gpc[] = {
- nvc0_grctx_init_gpc_0,
- nvc0_grctx_init_gpc_1,
- nvc3_grctx_init_tpc,
- NULL
+static const struct nvc0_graph_pack
+nvc4_grctx_pack_tpc[] = {
+ { nvc0_grctx_init_pe_0 },
+ { nvc4_grctx_init_tex_0 },
+ { nvc0_grctx_init_wwdx_0 },
+ { nvc0_grctx_init_mpc_0 },
+ { nvc4_grctx_init_l1c_0 },
+ { nvc0_grctx_init_tpccs_0 },
+ { nvc4_grctx_init_sm_0 },
+ {}
};
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
struct nouveau_oclass *
-nvc3_grctx_oclass = &(struct nvc0_grctx_oclass) {
+nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc3),
.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_graph_context_ctor,
@@ -89,11 +91,13 @@ nvc3_grctx_oclass = &(struct nvc0_grctx_oclass) {
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
- .main = nvc0_grctx_generate_main,
- .mods = nvc0_grctx_generate_mods,
- .unkn = nvc0_grctx_generate_unkn,
- .hub = nvc0_grctx_init_hub,
- .gpc = nvc3_grctx_init_gpc,
- .icmd = nvc0_grctx_init_icmd,
- .mthd = nvc0_grctx_init_mthd,
+ .main = nvc0_grctx_generate_main,
+ .mods = nvc0_grctx_generate_mods,
+ .unkn = nvc0_grctx_generate_unkn,
+ .hub = nvc0_grctx_pack_hub,
+ .gpc = nvc0_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = nvc4_grctx_pack_tpc,
+ .icmd = nvc0_grctx_pack_icmd,
+ .mthd = nvc0_grctx_pack_mthd,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
index d0d4ce3c4892..feebd58dfe8d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
@@ -22,10 +22,14 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "nvc0.h"
+#include "ctxnvc0.h"
-static struct nvc0_graph_init
-nvc8_grctx_init_icmd[] = {
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+nvc8_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
{ 0x000038, 1, 0x01, 0x0fac6881 },
@@ -141,8 +145,7 @@ nvc8_grctx_init_icmd[] = {
{ 0x000586, 1, 0x01, 0x00000040 },
{ 0x000582, 2, 0x01, 0x00000080 },
{ 0x0005c2, 1, 0x01, 0x00000001 },
- { 0x000638, 1, 0x01, 0x00000001 },
- { 0x000639, 1, 0x01, 0x00000001 },
+ { 0x000638, 2, 0x01, 0x00000001 },
{ 0x00063a, 1, 0x01, 0x00000002 },
{ 0x00063b, 2, 0x01, 0x00000001 },
{ 0x00063d, 1, 0x01, 0x00000002 },
@@ -203,15 +206,13 @@ nvc8_grctx_init_icmd[] = {
{ 0x000787, 1, 0x01, 0x000000cf },
{ 0x00078c, 1, 0x01, 0x00000008 },
{ 0x000792, 1, 0x01, 0x00000001 },
- { 0x000794, 1, 0x01, 0x00000001 },
- { 0x000795, 2, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
{ 0x000797, 1, 0x01, 0x000000cf },
{ 0x000836, 1, 0x01, 0x00000001 },
{ 0x00079a, 1, 0x01, 0x00000002 },
{ 0x000833, 1, 0x01, 0x04444480 },
{ 0x0007a1, 1, 0x01, 0x00000001 },
- { 0x0007a3, 1, 0x01, 0x00000001 },
- { 0x0007a4, 2, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
{ 0x000831, 1, 0x01, 0x00000004 },
{ 0x00080c, 1, 0x01, 0x00000002 },
{ 0x00080d, 2, 0x01, 0x00000100 },
@@ -237,14 +238,12 @@ nvc8_grctx_init_icmd[] = {
{ 0x0006b1, 1, 0x01, 0x00000011 },
{ 0x00078c, 1, 0x01, 0x00000008 },
{ 0x000792, 1, 0x01, 0x00000001 },
- { 0x000794, 1, 0x01, 0x00000001 },
- { 0x000795, 2, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
{ 0x000797, 1, 0x01, 0x000000cf },
{ 0x00079a, 1, 0x01, 0x00000002 },
{ 0x000833, 1, 0x01, 0x04444480 },
{ 0x0007a1, 1, 0x01, 0x00000001 },
- { 0x0007a3, 1, 0x01, 0x00000001 },
- { 0x0007a4, 2, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
{ 0x000831, 1, 0x01, 0x00000004 },
{ 0x01e100, 1, 0x01, 0x00000001 },
{ 0x001000, 1, 0x01, 0x00000014 },
@@ -269,58 +268,20 @@ nvc8_grctx_init_icmd[] = {
{}
};
-static struct nvc0_graph_init
-nvc8_grctx_init_tpc[] = {
- { 0x419818, 1, 0x04, 0x00000000 },
- { 0x41983c, 1, 0x04, 0x00038bc7 },
- { 0x419848, 1, 0x04, 0x00000000 },
- { 0x419864, 1, 0x04, 0x0000012a },
- { 0x419888, 1, 0x04, 0x00000000 },
- { 0x419a00, 1, 0x04, 0x000001f0 },
- { 0x419a04, 1, 0x04, 0x00000001 },
- { 0x419a08, 1, 0x04, 0x00000023 },
- { 0x419a0c, 1, 0x04, 0x00020000 },
- { 0x419a10, 1, 0x04, 0x00000000 },
- { 0x419a14, 1, 0x04, 0x00000200 },
- { 0x419a1c, 1, 0x04, 0x00000000 },
- { 0x419a20, 1, 0x04, 0x00000800 },
- { 0x419b00, 1, 0x04, 0x0a418820 },
- { 0x419b04, 1, 0x04, 0x062080e6 },
- { 0x419b08, 1, 0x04, 0x020398a4 },
- { 0x419b0c, 1, 0x04, 0x0e629062 },
- { 0x419b10, 1, 0x04, 0x0a418820 },
- { 0x419b14, 1, 0x04, 0x000000e6 },
- { 0x419bd0, 1, 0x04, 0x00900103 },
- { 0x419be0, 1, 0x04, 0x00000001 },
- { 0x419be4, 1, 0x04, 0x00000000 },
- { 0x419c00, 1, 0x04, 0x00000002 },
- { 0x419c04, 1, 0x04, 0x00000006 },
- { 0x419c08, 1, 0x04, 0x00000002 },
- { 0x419c20, 1, 0x04, 0x00000000 },
- { 0x419cb0, 1, 0x04, 0x00060048 },
- { 0x419ce8, 1, 0x04, 0x00000000 },
- { 0x419cf4, 1, 0x04, 0x00000183 },
- { 0x419d20, 1, 0x04, 0x02180000 },
- { 0x419d24, 1, 0x04, 0x00001fff },
- { 0x419e04, 3, 0x04, 0x00000000 },
- { 0x419e10, 1, 0x04, 0x00000002 },
- { 0x419e44, 1, 0x04, 0x001beff2 },
- { 0x419e48, 1, 0x04, 0x00000000 },
- { 0x419e4c, 1, 0x04, 0x0000000f },
- { 0x419e50, 17, 0x04, 0x00000000 },
- { 0x419e98, 1, 0x04, 0x00000000 },
- { 0x419f50, 2, 0x04, 0x00000000 },
+static const struct nvc0_graph_pack
+nvc8_grctx_pack_icmd[] = {
+ { nvc8_grctx_init_icmd_0 },
{}
};
-struct nvc0_graph_init
-nvc8_grctx_init_9197[] = {
+const struct nvc0_graph_init
+nvc8_grctx_init_9197_0[] = {
{ 0x0002e4, 1, 0x04, 0x0000b001 },
{}
};
-struct nvc0_graph_init
-nvc8_grctx_init_9297[] = {
+const struct nvc0_graph_init
+nvc8_grctx_init_9297_0[] = {
{ 0x003400, 128, 0x04, 0x00000000 },
{ 0x00036c, 2, 0x04, 0x00000000 },
{ 0x0007a4, 2, 0x04, 0x00000000 },
@@ -329,26 +290,47 @@ nvc8_grctx_init_9297[] = {
{}
};
-static struct nvc0_graph_mthd
-nvc8_grctx_init_mthd[] = {
- { 0x9097, nvc1_grctx_init_9097, },
- { 0x9197, nvc8_grctx_init_9197, },
- { 0x9297, nvc8_grctx_init_9297, },
- { 0x902d, nvc0_grctx_init_902d, },
- { 0x9039, nvc0_grctx_init_9039, },
- { 0x90c0, nvc0_grctx_init_90c0, },
- { 0x902d, nvc0_grctx_init_mthd_magic, },
+static const struct nvc0_graph_pack
+nvc8_grctx_pack_mthd[] = {
+ { nvc1_grctx_init_9097_0, 0x9097 },
+ { nvc8_grctx_init_9197_0, 0x9197 },
+ { nvc8_grctx_init_9297_0, 0x9297 },
+ { nvc0_grctx_init_902d_0, 0x902d },
+ { nvc0_grctx_init_9039_0, 0x9039 },
+ { nvc0_grctx_init_90c0_0, 0x90c0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc8_grctx_init_setup_0[] = {
+ { 0x418800, 1, 0x04, 0x0006860a },
+ { 0x418808, 3, 0x04, 0x00000000 },
+ { 0x418828, 1, 0x04, 0x00008442 },
+ { 0x418830, 1, 0x04, 0x00000001 },
+ { 0x4188d8, 1, 0x04, 0x00000008 },
+ { 0x4188e0, 1, 0x04, 0x01000000 },
+ { 0x4188e8, 5, 0x04, 0x00000000 },
+ { 0x4188fc, 1, 0x04, 0x20100000 },
{}
};
-static struct nvc0_graph_init *
-nvc8_grctx_init_gpc[] = {
- nvc0_grctx_init_gpc_0,
- nvc0_grctx_init_gpc_1,
- nvc8_grctx_init_tpc,
- NULL
+static const struct nvc0_graph_pack
+nvc8_grctx_pack_gpc[] = {
+ { nvc0_grctx_init_gpc_unk_0 },
+ { nvc0_grctx_init_prop_0 },
+ { nvc0_grctx_init_gpc_unk_1 },
+ { nvc8_grctx_init_setup_0 },
+ { nvc0_grctx_init_zcull_0 },
+ { nvc0_grctx_init_crstr_0 },
+ { nvc0_grctx_init_gpm_0 },
+ { nvc0_grctx_init_gcc_0 },
+ {}
};
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
struct nouveau_oclass *
nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc8),
@@ -360,11 +342,13 @@ nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) {
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
- .main = nvc0_grctx_generate_main,
- .mods = nvc0_grctx_generate_mods,
- .unkn = nvc0_grctx_generate_unkn,
- .hub = nvc0_grctx_init_hub,
- .gpc = nvc8_grctx_init_gpc,
- .icmd = nvc8_grctx_init_icmd,
- .mthd = nvc8_grctx_init_mthd,
+ .main = nvc0_grctx_generate_main,
+ .mods = nvc0_grctx_generate_mods,
+ .unkn = nvc0_grctx_generate_unkn,
+ .hub = nvc0_grctx_pack_hub,
+ .gpc = nvc8_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = nvc0_grctx_pack_tpc,
+ .icmd = nvc8_grctx_pack_icmd,
+ .mthd = nvc8_grctx_pack_mthd,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
index c4740d528532..1dbc8d7f2e86 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
@@ -22,33 +22,14 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "nvc0.h"
+#include "ctxnvc0.h"
-struct nvc0_graph_init
-nvd7_grctx_init_unk40xx[] = {
- { 0x404004, 10, 0x04, 0x00000000 },
- { 0x404044, 1, 0x04, 0x00000000 },
- { 0x404094, 1, 0x04, 0x00000000 },
- { 0x404098, 12, 0x04, 0x00000000 },
- { 0x4040c8, 1, 0x04, 0xf0000087 },
- { 0x4040d0, 6, 0x04, 0x00000000 },
- { 0x4040e8, 1, 0x04, 0x00001000 },
- { 0x4040f8, 1, 0x04, 0x00000000 },
- { 0x404130, 1, 0x04, 0x00000000 },
- { 0x404134, 1, 0x04, 0x00000000 },
- { 0x404138, 1, 0x04, 0x20000040 },
- { 0x404150, 1, 0x04, 0x0000002e },
- { 0x404154, 1, 0x04, 0x00000400 },
- { 0x404158, 1, 0x04, 0x00000200 },
- { 0x404164, 1, 0x04, 0x00000055 },
- { 0x404168, 1, 0x04, 0x00000000 },
- { 0x404178, 2, 0x04, 0x00000000 },
- { 0x404200, 8, 0x04, 0x00000000 },
- {}
-};
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
-static struct nvc0_graph_init
-nvd7_grctx_init_unk58xx[] = {
+static const struct nvc0_graph_init
+nvd7_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x0f8000bf },
{ 0x405830, 1, 0x04, 0x02180324 },
{ 0x405834, 1, 0x04, 0x08000000 },
@@ -60,8 +41,10 @@ nvd7_grctx_init_unk58xx[] = {
{}
};
-static struct nvc0_graph_init
-nvd7_grctx_init_unk64xx[] = {
+static const struct nvc0_graph_init
+nvd7_grctx_init_pd_0[] = {
+ { 0x406020, 1, 0x04, 0x000103c1 },
+ { 0x406028, 4, 0x04, 0x00000001 },
{ 0x4064a8, 1, 0x04, 0x00000000 },
{ 0x4064ac, 1, 0x04, 0x00003fff },
{ 0x4064b4, 3, 0x04, 0x00000000 },
@@ -71,22 +54,22 @@ nvd7_grctx_init_unk64xx[] = {
{}
};
-static struct nvc0_graph_init
-nvd7_grctx_init_gpc_0[] = {
- { 0x418380, 1, 0x04, 0x00000016 },
- { 0x418400, 1, 0x04, 0x38004e00 },
- { 0x418404, 1, 0x04, 0x71e0ffff },
- { 0x41840c, 1, 0x04, 0x00001008 },
- { 0x418410, 1, 0x04, 0x0fff0fff },
- { 0x418414, 1, 0x04, 0x02200fff },
- { 0x418450, 6, 0x04, 0x00000000 },
- { 0x418468, 1, 0x04, 0x00000001 },
- { 0x41846c, 2, 0x04, 0x00000000 },
- { 0x418600, 1, 0x04, 0x0000001f },
- { 0x418684, 1, 0x04, 0x0000000f },
- { 0x418700, 1, 0x04, 0x00000002 },
- { 0x418704, 1, 0x04, 0x00000080 },
- { 0x418708, 3, 0x04, 0x00000000 },
+static const struct nvc0_graph_pack
+nvd7_grctx_pack_hub[] = {
+ { nvc0_grctx_init_main_0 },
+ { nvd9_grctx_init_fe_0 },
+ { nvc0_grctx_init_pri_0 },
+ { nvc0_grctx_init_memfmt_0 },
+ { nvd7_grctx_init_ds_0 },
+ { nvd7_grctx_init_pd_0 },
+ { nvc0_grctx_init_rstr2d_0 },
+ { nvc0_grctx_init_scc_0 },
+ { nvd9_grctx_init_be_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvd7_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x7006860a },
{ 0x418808, 3, 0x04, 0x00000000 },
{ 0x418828, 1, 0x04, 0x00008442 },
@@ -95,34 +78,32 @@ nvd7_grctx_init_gpc_0[] = {
{ 0x4188e0, 1, 0x04, 0x01000000 },
{ 0x4188e8, 5, 0x04, 0x00000000 },
{ 0x4188fc, 1, 0x04, 0x20100018 },
- { 0x41891c, 1, 0x04, 0x00ff00ff },
- { 0x418924, 1, 0x04, 0x00000000 },
- { 0x418928, 1, 0x04, 0x00ffff00 },
- { 0x41892c, 1, 0x04, 0x0000ff00 },
- { 0x418b00, 1, 0x04, 0x00000006 },
- { 0x418b08, 1, 0x04, 0x0a418820 },
- { 0x418b0c, 1, 0x04, 0x062080e6 },
- { 0x418b10, 1, 0x04, 0x020398a4 },
- { 0x418b14, 1, 0x04, 0x0e629062 },
- { 0x418b18, 1, 0x04, 0x0a418820 },
- { 0x418b1c, 1, 0x04, 0x000000e6 },
- { 0x418bb8, 1, 0x04, 0x00000103 },
- { 0x418c08, 1, 0x04, 0x00000001 },
- { 0x418c10, 8, 0x04, 0x00000000 },
- { 0x418c6c, 1, 0x04, 0x00000001 },
- { 0x418c80, 1, 0x04, 0x20200004 },
- { 0x418c8c, 1, 0x04, 0x00000001 },
- { 0x419000, 1, 0x04, 0x00000780 },
- { 0x419004, 2, 0x04, 0x00000000 },
- { 0x419014, 1, 0x04, 0x00000004 },
{}
};
-static struct nvc0_graph_init
-nvd7_grctx_init_tpc[] = {
+static const struct nvc0_graph_pack
+nvd7_grctx_pack_gpc[] = {
+ { nvc0_grctx_init_gpc_unk_0 },
+ { nvd9_grctx_init_prop_0 },
+ { nvd9_grctx_init_gpc_unk_1 },
+ { nvd7_grctx_init_setup_0 },
+ { nvc0_grctx_init_zcull_0 },
+ { nvd9_grctx_init_crstr_0 },
+ { nvc1_grctx_init_gpm_0 },
+ { nvc0_grctx_init_gcc_0 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd7_grctx_init_pe_0[] = {
{ 0x419848, 1, 0x04, 0x00000000 },
{ 0x419864, 1, 0x04, 0x00000129 },
{ 0x419888, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvd7_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000001f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000023 },
@@ -132,33 +113,46 @@ nvd7_grctx_init_tpc[] = {
{ 0x419a1c, 1, 0x04, 0x00008000 },
{ 0x419a20, 1, 0x04, 0x00000800 },
{ 0x419ac4, 1, 0x04, 0x0017f440 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvd7_grctx_init_mpc_0[] = {
{ 0x419c00, 1, 0x04, 0x0000000a },
{ 0x419c04, 1, 0x04, 0x00000006 },
{ 0x419c08, 1, 0x04, 0x00000002 },
{ 0x419c20, 1, 0x04, 0x00000000 },
{ 0x419c24, 1, 0x04, 0x00084210 },
{ 0x419c28, 1, 0x04, 0x3efbefbe },
- { 0x419cb0, 1, 0x04, 0x00020048 },
- { 0x419ce8, 1, 0x04, 0x00000000 },
- { 0x419cf4, 1, 0x04, 0x00000183 },
- { 0x419e04, 3, 0x04, 0x00000000 },
- { 0x419e10, 1, 0x04, 0x00000002 },
- { 0x419e44, 1, 0x04, 0x001beff2 },
- { 0x419e48, 1, 0x04, 0x00000000 },
- { 0x419e4c, 1, 0x04, 0x0000000f },
- { 0x419e50, 17, 0x04, 0x00000000 },
- { 0x419e98, 1, 0x04, 0x00000000 },
- { 0x419ee0, 1, 0x04, 0x00010110 },
- { 0x419f30, 11, 0x04, 0x00000000 },
{}
};
-static struct nvc0_graph_init
-nvd7_grctx_init_unk[] = {
+static const struct nvc0_graph_pack
+nvd7_grctx_pack_tpc[] = {
+ { nvd7_grctx_init_pe_0 },
+ { nvd7_grctx_init_tex_0 },
+ { nvd7_grctx_init_mpc_0 },
+ { nvc4_grctx_init_l1c_0 },
+ { nvd9_grctx_init_sm_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvd7_grctx_init_pes_0[] = {
{ 0x41be24, 1, 0x04, 0x00000002 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvd7_grctx_init_cbm_0[] = {
{ 0x41bec0, 1, 0x04, 0x12180000 },
{ 0x41bec4, 1, 0x04, 0x00003fff },
{ 0x41bee4, 1, 0x04, 0x03240218 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd7_grctx_init_wwdx_0[] = {
{ 0x41bf00, 1, 0x04, 0x0a418820 },
{ 0x41bf04, 1, 0x04, 0x062080e6 },
{ 0x41bf08, 1, 0x04, 0x020398a4 },
@@ -171,6 +165,18 @@ nvd7_grctx_init_unk[] = {
{}
};
+static const struct nvc0_graph_pack
+nvd7_grctx_pack_ppc[] = {
+ { nvd7_grctx_init_pes_0 },
+ { nvd7_grctx_init_cbm_0 },
+ { nvd7_grctx_init_wwdx_0 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
static void
nvd7_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
@@ -219,10 +225,11 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
- for (i = 0; oclass->hub[i]; i++)
- nvc0_graph_mmio(priv, oclass->hub[i]);
- for (i = 0; oclass->gpc[i]; i++)
- nvc0_graph_mmio(priv, oclass->gpc[i]);
+ nvc0_graph_mmio(priv, oclass->hub);
+ nvc0_graph_mmio(priv, oclass->gpc);
+ nvc0_graph_mmio(priv, oclass->zcull);
+ nvc0_graph_mmio(priv, oclass->tpc);
+ nvc0_graph_mmio(priv, oclass->ppc);
nv_wr32(priv, 0x404154, 0x00000000);
@@ -244,32 +251,6 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
}
-
-static struct nvc0_graph_init *
-nvd7_grctx_init_hub[] = {
- nvc0_grctx_init_base,
- nvd7_grctx_init_unk40xx,
- nvc0_grctx_init_unk44xx,
- nvc0_grctx_init_unk46xx,
- nvc0_grctx_init_unk47xx,
- nvd7_grctx_init_unk58xx,
- nvc0_grctx_init_unk60xx,
- nvd7_grctx_init_unk64xx,
- nvc0_grctx_init_unk78xx,
- nvc0_grctx_init_unk80xx,
- nvd9_grctx_init_rop,
- NULL
-};
-
-struct nvc0_graph_init *
-nvd7_grctx_init_gpc[] = {
- nvd7_grctx_init_gpc_0,
- nvc0_grctx_init_gpc_1,
- nvd7_grctx_init_tpc,
- nvd7_grctx_init_unk,
- NULL
-};
-
struct nouveau_oclass *
nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xd7),
@@ -281,11 +262,14 @@ nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) {
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
- .main = nvd7_grctx_generate_main,
- .mods = nvd7_grctx_generate_mods,
- .unkn = nve4_grctx_generate_unkn,
- .hub = nvd7_grctx_init_hub,
- .gpc = nvd7_grctx_init_gpc,
- .icmd = nvd9_grctx_init_icmd,
- .mthd = nvd9_grctx_init_mthd,
+ .main = nvd7_grctx_generate_main,
+ .mods = nvd7_grctx_generate_mods,
+ .unkn = nve4_grctx_generate_unkn,
+ .hub = nvd7_grctx_pack_hub,
+ .gpc = nvd7_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = nvd7_grctx_pack_tpc,
+ .ppc = nvd7_grctx_pack_ppc,
+ .icmd = nvd9_grctx_pack_icmd,
+ .mthd = nvd9_grctx_pack_mthd,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
index a1102cbf2fdc..c665fb7e4660 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
@@ -22,38 +22,14 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "nvc0.h"
+#include "ctxnvc0.h"
-struct nvc0_graph_init
-nvd9_grctx_init_90c0[] = {
- { 0x002700, 4, 0x40, 0x00000000 },
- { 0x002720, 4, 0x40, 0x00000000 },
- { 0x002704, 4, 0x40, 0x00000000 },
- { 0x002724, 4, 0x40, 0x00000000 },
- { 0x002708, 4, 0x40, 0x00000000 },
- { 0x002728, 4, 0x40, 0x00000000 },
- { 0x00270c, 8, 0x20, 0x00000000 },
- { 0x002710, 4, 0x40, 0x00014000 },
- { 0x002730, 4, 0x40, 0x00014000 },
- { 0x002714, 4, 0x40, 0x00000040 },
- { 0x002734, 4, 0x40, 0x00000040 },
- { 0x00030c, 1, 0x04, 0x00000001 },
- { 0x001944, 1, 0x04, 0x00000000 },
- { 0x000758, 1, 0x04, 0x00000100 },
- { 0x0002c4, 1, 0x04, 0x00000000 },
- { 0x000790, 5, 0x04, 0x00000000 },
- { 0x00077c, 1, 0x04, 0x00000000 },
- { 0x000204, 3, 0x04, 0x00000000 },
- { 0x000214, 1, 0x04, 0x00000000 },
- { 0x00024c, 1, 0x04, 0x00000000 },
- { 0x000d94, 1, 0x04, 0x00000001 },
- { 0x001608, 2, 0x04, 0x00000000 },
- { 0x001664, 1, 0x04, 0x00000000 },
- {}
-};
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
-struct nvc0_graph_init
-nvd9_grctx_init_icmd[] = {
+static const struct nvc0_graph_init
+nvd9_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
{ 0x000038, 1, 0x01, 0x0fac6881 },
@@ -171,8 +147,7 @@ nvd9_grctx_init_icmd[] = {
{ 0x000586, 1, 0x01, 0x00000040 },
{ 0x000582, 2, 0x01, 0x00000080 },
{ 0x0005c2, 1, 0x01, 0x00000001 },
- { 0x000638, 1, 0x01, 0x00000001 },
- { 0x000639, 1, 0x01, 0x00000001 },
+ { 0x000638, 2, 0x01, 0x00000001 },
{ 0x00063a, 1, 0x01, 0x00000002 },
{ 0x00063b, 2, 0x01, 0x00000001 },
{ 0x00063d, 1, 0x01, 0x00000002 },
@@ -233,15 +208,13 @@ nvd9_grctx_init_icmd[] = {
{ 0x000787, 1, 0x01, 0x000000cf },
{ 0x00078c, 1, 0x01, 0x00000008 },
{ 0x000792, 1, 0x01, 0x00000001 },
- { 0x000794, 1, 0x01, 0x00000001 },
- { 0x000795, 2, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
{ 0x000797, 1, 0x01, 0x000000cf },
{ 0x000836, 1, 0x01, 0x00000001 },
{ 0x00079a, 1, 0x01, 0x00000002 },
{ 0x000833, 1, 0x01, 0x04444480 },
{ 0x0007a1, 1, 0x01, 0x00000001 },
- { 0x0007a3, 1, 0x01, 0x00000001 },
- { 0x0007a4, 2, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
{ 0x000831, 1, 0x01, 0x00000004 },
{ 0x00080c, 1, 0x01, 0x00000002 },
{ 0x00080d, 2, 0x01, 0x00000100 },
@@ -267,14 +240,12 @@ nvd9_grctx_init_icmd[] = {
{ 0x0006b1, 1, 0x01, 0x00000011 },
{ 0x00078c, 1, 0x01, 0x00000008 },
{ 0x000792, 1, 0x01, 0x00000001 },
- { 0x000794, 1, 0x01, 0x00000001 },
- { 0x000795, 2, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
{ 0x000797, 1, 0x01, 0x000000cf },
{ 0x00079a, 1, 0x01, 0x00000002 },
{ 0x000833, 1, 0x01, 0x04444480 },
{ 0x0007a1, 1, 0x01, 0x00000001 },
- { 0x0007a3, 1, 0x01, 0x00000001 },
- { 0x0007a4, 2, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
{ 0x000831, 1, 0x01, 0x00000004 },
{ 0x01e100, 1, 0x01, 0x00000001 },
{ 0x001000, 1, 0x01, 0x00000014 },
@@ -299,18 +270,56 @@ nvd9_grctx_init_icmd[] = {
{}
};
-struct nvc0_graph_init
-nvd9_grctx_init_unk40xx[] = {
- { 0x404004, 11, 0x04, 0x00000000 },
+const struct nvc0_graph_pack
+nvd9_grctx_pack_icmd[] = {
+ { nvd9_grctx_init_icmd_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvd9_grctx_init_90c0_0[] = {
+ { 0x002700, 8, 0x20, 0x00000000 },
+ { 0x002704, 8, 0x20, 0x00000000 },
+ { 0x002708, 8, 0x20, 0x00000000 },
+ { 0x00270c, 8, 0x20, 0x00000000 },
+ { 0x002710, 8, 0x20, 0x00014000 },
+ { 0x002714, 8, 0x20, 0x00000040 },
+ { 0x00030c, 1, 0x04, 0x00000001 },
+ { 0x001944, 1, 0x04, 0x00000000 },
+ { 0x000758, 1, 0x04, 0x00000100 },
+ { 0x0002c4, 1, 0x04, 0x00000000 },
+ { 0x000790, 5, 0x04, 0x00000000 },
+ { 0x00077c, 1, 0x04, 0x00000000 },
+ { 0x000204, 3, 0x04, 0x00000000 },
+ { 0x000214, 1, 0x04, 0x00000000 },
+ { 0x00024c, 1, 0x04, 0x00000000 },
+ { 0x000d94, 1, 0x04, 0x00000001 },
+ { 0x001608, 2, 0x04, 0x00000000 },
+ { 0x001664, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_pack
+nvd9_grctx_pack_mthd[] = {
+ { nvc1_grctx_init_9097_0, 0x9097 },
+ { nvc8_grctx_init_9197_0, 0x9197 },
+ { nvc8_grctx_init_9297_0, 0x9297 },
+ { nvc0_grctx_init_902d_0, 0x902d },
+ { nvc0_grctx_init_9039_0, 0x9039 },
+ { nvd9_grctx_init_90c0_0, 0x90c0 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd9_grctx_init_fe_0[] = {
+ { 0x404004, 10, 0x04, 0x00000000 },
{ 0x404044, 1, 0x04, 0x00000000 },
- { 0x404094, 1, 0x04, 0x00000000 },
- { 0x404098, 12, 0x04, 0x00000000 },
+ { 0x404094, 13, 0x04, 0x00000000 },
{ 0x4040c8, 1, 0x04, 0xf0000087 },
{ 0x4040d0, 6, 0x04, 0x00000000 },
{ 0x4040e8, 1, 0x04, 0x00001000 },
{ 0x4040f8, 1, 0x04, 0x00000000 },
- { 0x404130, 1, 0x04, 0x00000000 },
- { 0x404134, 1, 0x04, 0x00000000 },
+ { 0x404130, 2, 0x04, 0x00000000 },
{ 0x404138, 1, 0x04, 0x20000040 },
{ 0x404150, 1, 0x04, 0x0000002e },
{ 0x404154, 1, 0x04, 0x00000400 },
@@ -322,8 +331,8 @@ nvd9_grctx_init_unk40xx[] = {
{}
};
-static struct nvc0_graph_init
-nvd9_grctx_init_unk58xx[] = {
+static const struct nvc0_graph_init
+nvd9_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x0f8000bf },
{ 0x405830, 1, 0x04, 0x02180218 },
{ 0x405834, 1, 0x04, 0x08000000 },
@@ -335,8 +344,10 @@ nvd9_grctx_init_unk58xx[] = {
{}
};
-static struct nvc0_graph_init
-nvd9_grctx_init_unk64xx[] = {
+static const struct nvc0_graph_init
+nvd9_grctx_init_pd_0[] = {
+ { 0x406020, 1, 0x04, 0x000103c1 },
+ { 0x406028, 4, 0x04, 0x00000001 },
{ 0x4064a8, 1, 0x04, 0x00000000 },
{ 0x4064ac, 1, 0x04, 0x00003fff },
{ 0x4064b4, 3, 0x04, 0x00000000 },
@@ -345,21 +356,34 @@ nvd9_grctx_init_unk64xx[] = {
{}
};
-struct nvc0_graph_init
-nvd9_grctx_init_rop[] = {
+const struct nvc0_graph_init
+nvd9_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x02802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
{ 0x408808, 1, 0x04, 0x1043e005 },
{ 0x408900, 1, 0x04, 0x3080b801 },
- { 0x408904, 1, 0x04, 0x1043e005 },
+ { 0x408904, 1, 0x04, 0x62000001 },
{ 0x408908, 1, 0x04, 0x00c8102f },
{ 0x408980, 1, 0x04, 0x0000011d },
{}
};
-static struct nvc0_graph_init
-nvd9_grctx_init_gpc_0[] = {
- { 0x418380, 1, 0x04, 0x00000016 },
+static const struct nvc0_graph_pack
+nvd9_grctx_pack_hub[] = {
+ { nvc0_grctx_init_main_0 },
+ { nvd9_grctx_init_fe_0 },
+ { nvc0_grctx_init_pri_0 },
+ { nvc0_grctx_init_memfmt_0 },
+ { nvd9_grctx_init_ds_0 },
+ { nvd9_grctx_init_pd_0 },
+ { nvc0_grctx_init_rstr2d_0 },
+ { nvc0_grctx_init_scc_0 },
+ { nvd9_grctx_init_be_0 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd9_grctx_init_prop_0[] = {
{ 0x418400, 1, 0x04, 0x38004e00 },
{ 0x418404, 1, 0x04, 0x71e0ffff },
{ 0x41840c, 1, 0x04, 0x00001008 },
@@ -368,11 +392,21 @@ nvd9_grctx_init_gpc_0[] = {
{ 0x418450, 6, 0x04, 0x00000000 },
{ 0x418468, 1, 0x04, 0x00000001 },
{ 0x41846c, 2, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd9_grctx_init_gpc_unk_1[] = {
{ 0x418600, 1, 0x04, 0x0000001f },
{ 0x418684, 1, 0x04, 0x0000000f },
{ 0x418700, 1, 0x04, 0x00000002 },
{ 0x418704, 1, 0x04, 0x00000080 },
{ 0x418708, 3, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvd9_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x7006860a },
{ 0x418808, 3, 0x04, 0x00000000 },
{ 0x418828, 1, 0x04, 0x00008442 },
@@ -381,10 +415,11 @@ nvd9_grctx_init_gpc_0[] = {
{ 0x4188e0, 1, 0x04, 0x01000000 },
{ 0x4188e8, 5, 0x04, 0x00000000 },
{ 0x4188fc, 1, 0x04, 0x20100008 },
- { 0x41891c, 1, 0x04, 0x00ff00ff },
- { 0x418924, 1, 0x04, 0x00000000 },
- { 0x418928, 1, 0x04, 0x00ffff00 },
- { 0x41892c, 1, 0x04, 0x0000ff00 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd9_grctx_init_crstr_0[] = {
{ 0x418b00, 1, 0x04, 0x00000006 },
{ 0x418b08, 1, 0x04, 0x0a418820 },
{ 0x418b0c, 1, 0x04, 0x062080e6 },
@@ -393,24 +428,24 @@ nvd9_grctx_init_gpc_0[] = {
{ 0x418b18, 1, 0x04, 0x0a418820 },
{ 0x418b1c, 1, 0x04, 0x000000e6 },
{ 0x418bb8, 1, 0x04, 0x00000103 },
- { 0x418c08, 1, 0x04, 0x00000001 },
- { 0x418c10, 8, 0x04, 0x00000000 },
- { 0x418c6c, 1, 0x04, 0x00000001 },
- { 0x418c80, 1, 0x04, 0x20200004 },
- { 0x418c8c, 1, 0x04, 0x00000001 },
- { 0x419000, 1, 0x04, 0x00000780 },
- { 0x419004, 2, 0x04, 0x00000000 },
- { 0x419014, 1, 0x04, 0x00000004 },
{}
};
-static struct nvc0_graph_init
-nvd9_grctx_init_tpc[] = {
- { 0x419818, 1, 0x04, 0x00000000 },
- { 0x41983c, 1, 0x04, 0x00038bc7 },
- { 0x419848, 1, 0x04, 0x00000000 },
- { 0x419864, 1, 0x04, 0x00000129 },
- { 0x419888, 1, 0x04, 0x00000000 },
+static const struct nvc0_graph_pack
+nvd9_grctx_pack_gpc[] = {
+ { nvc0_grctx_init_gpc_unk_0 },
+ { nvd9_grctx_init_prop_0 },
+ { nvd9_grctx_init_gpc_unk_1 },
+ { nvd9_grctx_init_setup_0 },
+ { nvc0_grctx_init_zcull_0 },
+ { nvd9_grctx_init_crstr_0 },
+ { nvc1_grctx_init_gpm_0 },
+ { nvc0_grctx_init_gcc_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvd9_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000001f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000023 },
@@ -420,27 +455,22 @@ nvd9_grctx_init_tpc[] = {
{ 0x419a1c, 1, 0x04, 0x00000000 },
{ 0x419a20, 1, 0x04, 0x00000800 },
{ 0x419ac4, 1, 0x04, 0x0017f440 },
- { 0x419b00, 1, 0x04, 0x0a418820 },
- { 0x419b04, 1, 0x04, 0x062080e6 },
- { 0x419b08, 1, 0x04, 0x020398a4 },
- { 0x419b0c, 1, 0x04, 0x0e629062 },
- { 0x419b10, 1, 0x04, 0x0a418820 },
- { 0x419b14, 1, 0x04, 0x000000e6 },
- { 0x419bd0, 1, 0x04, 0x00900103 },
- { 0x419be0, 1, 0x04, 0x00400001 },
- { 0x419be4, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvd9_grctx_init_mpc_0[] = {
{ 0x419c00, 1, 0x04, 0x0000000a },
{ 0x419c04, 1, 0x04, 0x00000006 },
{ 0x419c08, 1, 0x04, 0x00000002 },
{ 0x419c20, 1, 0x04, 0x00000000 },
{ 0x419c24, 1, 0x04, 0x00084210 },
{ 0x419c28, 1, 0x04, 0x3cf3cf3c },
- { 0x419cb0, 1, 0x04, 0x00020048 },
- { 0x419ce8, 1, 0x04, 0x00000000 },
- { 0x419cf4, 1, 0x04, 0x00000183 },
- { 0x419d20, 1, 0x04, 0x12180000 },
- { 0x419d24, 1, 0x04, 0x00001fff },
- { 0x419d44, 1, 0x04, 0x02180218 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd9_grctx_init_sm_0[] = {
{ 0x419e04, 3, 0x04, 0x00000000 },
{ 0x419e10, 1, 0x04, 0x00000002 },
{ 0x419e44, 1, 0x04, 0x001beff2 },
@@ -453,47 +483,21 @@ nvd9_grctx_init_tpc[] = {
{}
};
-static struct nvc0_graph_init *
-nvd9_grctx_init_hub[] = {
- nvc0_grctx_init_base,
- nvd9_grctx_init_unk40xx,
- nvc0_grctx_init_unk44xx,
- nvc0_grctx_init_unk46xx,
- nvc0_grctx_init_unk47xx,
- nvd9_grctx_init_unk58xx,
- nvc0_grctx_init_unk60xx,
- nvd9_grctx_init_unk64xx,
- nvc0_grctx_init_unk78xx,
- nvc0_grctx_init_unk80xx,
- nvd9_grctx_init_rop,
- NULL
-};
-
-struct nvc0_graph_init *
-nvd9_grctx_init_gpc[] = {
- nvd9_grctx_init_gpc_0,
- nvc0_grctx_init_gpc_1,
- nvd9_grctx_init_tpc,
- NULL
-};
-
-struct nvc0_graph_init
-nvd9_grctx_init_mthd_magic[] = {
- { 0x3410, 1, 0x04, 0x80002006 },
+static const struct nvc0_graph_pack
+nvd9_grctx_pack_tpc[] = {
+ { nvc1_grctx_init_pe_0 },
+ { nvd9_grctx_init_tex_0 },
+ { nvc1_grctx_init_wwdx_0 },
+ { nvd9_grctx_init_mpc_0 },
+ { nvc4_grctx_init_l1c_0 },
+ { nvc1_grctx_init_tpccs_0 },
+ { nvd9_grctx_init_sm_0 },
{}
};
-struct nvc0_graph_mthd
-nvd9_grctx_init_mthd[] = {
- { 0x9097, nvc1_grctx_init_9097, },
- { 0x9197, nvc8_grctx_init_9197, },
- { 0x9297, nvc8_grctx_init_9297, },
- { 0x902d, nvc0_grctx_init_902d, },
- { 0x9039, nvc0_grctx_init_9039, },
- { 0x90c0, nvd9_grctx_init_90c0, },
- { 0x902d, nvd9_grctx_init_mthd_magic, },
- {}
-};
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
struct nouveau_oclass *
nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) {
@@ -506,11 +510,13 @@ nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) {
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
- .main = nvc0_grctx_generate_main,
- .mods = nvc1_grctx_generate_mods,
- .unkn = nvc1_grctx_generate_unkn,
- .hub = nvd9_grctx_init_hub,
- .gpc = nvd9_grctx_init_gpc,
- .icmd = nvd9_grctx_init_icmd,
- .mthd = nvd9_grctx_init_mthd,
+ .main = nvc0_grctx_generate_main,
+ .mods = nvc1_grctx_generate_mods,
+ .unkn = nvc1_grctx_generate_unkn,
+ .hub = nvd9_grctx_pack_hub,
+ .gpc = nvd9_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = nvd9_grctx_pack_tpc,
+ .icmd = nvd9_grctx_pack_icmd,
+ .mthd = nvd9_grctx_pack_mthd,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
index e2de73ee5eee..49a14b116a5f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
@@ -22,10 +22,14 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "nvc0.h"
+#include "ctxnvc0.h"
-struct nvc0_graph_init
-nve4_grctx_init_icmd[] = {
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+nve4_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x000039, 3, 0x01, 0x00000000 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
@@ -138,8 +142,7 @@ nve4_grctx_init_icmd[] = {
{ 0x000586, 1, 0x01, 0x00000040 },
{ 0x000582, 2, 0x01, 0x00000080 },
{ 0x0005c2, 1, 0x01, 0x00000001 },
- { 0x000638, 1, 0x01, 0x00000001 },
- { 0x000639, 1, 0x01, 0x00000001 },
+ { 0x000638, 2, 0x01, 0x00000001 },
{ 0x00063a, 1, 0x01, 0x00000002 },
{ 0x00063b, 2, 0x01, 0x00000001 },
{ 0x00063d, 1, 0x01, 0x00000002 },
@@ -197,15 +200,13 @@ nve4_grctx_init_icmd[] = {
{ 0x000787, 1, 0x01, 0x000000cf },
{ 0x00078c, 1, 0x01, 0x00000008 },
{ 0x000792, 1, 0x01, 0x00000001 },
- { 0x000794, 1, 0x01, 0x00000001 },
- { 0x000795, 2, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
{ 0x000797, 1, 0x01, 0x000000cf },
{ 0x000836, 1, 0x01, 0x00000001 },
{ 0x00079a, 1, 0x01, 0x00000002 },
{ 0x000833, 1, 0x01, 0x04444480 },
{ 0x0007a1, 1, 0x01, 0x00000001 },
- { 0x0007a3, 1, 0x01, 0x00000001 },
- { 0x0007a4, 2, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
{ 0x000831, 1, 0x01, 0x00000004 },
{ 0x000b07, 1, 0x01, 0x00000002 },
{ 0x000b08, 2, 0x01, 0x00000100 },
@@ -231,14 +232,12 @@ nve4_grctx_init_icmd[] = {
{ 0x0006b1, 1, 0x01, 0x00000011 },
{ 0x00078c, 1, 0x01, 0x00000008 },
{ 0x000792, 1, 0x01, 0x00000001 },
- { 0x000794, 1, 0x01, 0x00000001 },
- { 0x000795, 2, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
{ 0x000797, 1, 0x01, 0x000000cf },
{ 0x00079a, 1, 0x01, 0x00000002 },
{ 0x000833, 1, 0x01, 0x04444480 },
{ 0x0007a1, 1, 0x01, 0x00000001 },
- { 0x0007a3, 1, 0x01, 0x00000001 },
- { 0x0007a4, 2, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
{ 0x000831, 1, 0x01, 0x00000004 },
{ 0x01e100, 1, 0x01, 0x00000001 },
{ 0x001000, 1, 0x01, 0x00000008 },
@@ -273,8 +272,14 @@ nve4_grctx_init_icmd[] = {
{}
};
-struct nvc0_graph_init
-nve4_grctx_init_a097[] = {
+static const struct nvc0_graph_pack
+nve4_grctx_pack_icmd[] = {
+ { nve4_grctx_init_icmd_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nve4_grctx_init_a097_0[] = {
{ 0x000800, 8, 0x40, 0x00000000 },
{ 0x000804, 8, 0x40, 0x00000000 },
{ 0x000808, 8, 0x40, 0x00000400 },
@@ -517,8 +522,7 @@ nve4_grctx_init_a097[] = {
{ 0x001350, 1, 0x04, 0x00000002 },
{ 0x001358, 1, 0x04, 0x00000001 },
{ 0x0012e4, 1, 0x04, 0x00000000 },
- { 0x00131c, 1, 0x04, 0x00000000 },
- { 0x001320, 3, 0x04, 0x00000000 },
+ { 0x00131c, 4, 0x04, 0x00000000 },
{ 0x0019c0, 1, 0x04, 0x00000000 },
{ 0x001140, 1, 0x04, 0x00000000 },
{ 0x0019c4, 1, 0x04, 0x00000000 },
@@ -574,19 +578,24 @@ nve4_grctx_init_a097[] = {
{}
};
-static struct nvc0_graph_init
-nve4_grctx_init_unk40xx[] = {
+static const struct nvc0_graph_pack
+nve4_grctx_pack_mthd[] = {
+ { nve4_grctx_init_a097_0, 0xa097 },
+ { nvc0_grctx_init_902d_0, 0x902d },
+ {}
+};
+
+static const struct nvc0_graph_init
+nve4_grctx_init_fe_0[] = {
{ 0x404010, 5, 0x04, 0x00000000 },
{ 0x404024, 1, 0x04, 0x0000e000 },
{ 0x404028, 1, 0x04, 0x00000000 },
- { 0x4040a8, 1, 0x04, 0x00000000 },
- { 0x4040ac, 7, 0x04, 0x00000000 },
+ { 0x4040a8, 8, 0x04, 0x00000000 },
{ 0x4040c8, 1, 0x04, 0xf800008f },
{ 0x4040d0, 6, 0x04, 0x00000000 },
{ 0x4040e8, 1, 0x04, 0x00001000 },
{ 0x4040f8, 1, 0x04, 0x00000000 },
- { 0x404130, 1, 0x04, 0x00000000 },
- { 0x404134, 1, 0x04, 0x00000000 },
+ { 0x404130, 2, 0x04, 0x00000000 },
{ 0x404138, 1, 0x04, 0x20000040 },
{ 0x404150, 1, 0x04, 0x0000002e },
{ 0x404154, 1, 0x04, 0x00000400 },
@@ -597,8 +606,8 @@ nve4_grctx_init_unk40xx[] = {
{}
};
-struct nvc0_graph_init
-nve4_grctx_init_unk46xx[] = {
+const struct nvc0_graph_init
+nve4_grctx_init_memfmt_0[] = {
{ 0x404604, 1, 0x04, 0x00000014 },
{ 0x404608, 1, 0x04, 0x00000000 },
{ 0x40460c, 1, 0x04, 0x00003fff },
@@ -614,11 +623,6 @@ nve4_grctx_init_unk46xx[] = {
{ 0x4046a0, 1, 0x04, 0x007f0080 },
{ 0x4046a4, 8, 0x04, 0x00000000 },
{ 0x4046c8, 3, 0x04, 0x00000000 },
- {}
-};
-
-struct nvc0_graph_init
-nve4_grctx_init_unk47xx[] = {
{ 0x404700, 3, 0x04, 0x00000000 },
{ 0x404718, 7, 0x04, 0x00000000 },
{ 0x404734, 1, 0x04, 0x00000100 },
@@ -628,8 +632,8 @@ nve4_grctx_init_unk47xx[] = {
{}
};
-struct nvc0_graph_init
-nve4_grctx_init_unk58xx[] = {
+const struct nvc0_graph_init
+nve4_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x0f8000bf },
{ 0x405830, 1, 0x04, 0x02180648 },
{ 0x405834, 1, 0x04, 0x08000000 },
@@ -641,22 +645,17 @@ nve4_grctx_init_unk58xx[] = {
{}
};
-static struct nvc0_graph_init
-nve4_grctx_init_unk5bxx[] = {
+static const struct nvc0_graph_init
+nve4_grctx_init_cwd_0[] = {
{ 0x405b00, 1, 0x04, 0x00000000 },
{ 0x405b10, 1, 0x04, 0x00001000 },
{}
};
-static struct nvc0_graph_init
-nve4_grctx_init_unk60xx[] = {
+static const struct nvc0_graph_init
+nve4_grctx_init_pd_0[] = {
{ 0x406020, 1, 0x04, 0x004103c1 },
{ 0x406028, 4, 0x04, 0x00000001 },
- {}
-};
-
-static struct nvc0_graph_init
-nve4_grctx_init_unk64xx[] = {
{ 0x4064a8, 1, 0x04, 0x00000000 },
{ 0x4064ac, 1, 0x04, 0x00003fff },
{ 0x4064b4, 2, 0x04, 0x00000000 },
@@ -668,14 +667,14 @@ nve4_grctx_init_unk64xx[] = {
{}
};
-static struct nvc0_graph_init
-nve4_grctx_init_unk70xx[] = {
+static const struct nvc0_graph_init
+nve4_grctx_init_sked_0[] = {
{ 0x407040, 1, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nve4_grctx_init_unk80xx[] = {
+const struct nvc0_graph_init
+nve4_grctx_init_scc_0[] = {
{ 0x408000, 2, 0x04, 0x00000000 },
{ 0x408008, 1, 0x04, 0x00000030 },
{ 0x40800c, 2, 0x04, 0x00000000 },
@@ -685,8 +684,8 @@ nve4_grctx_init_unk80xx[] = {
{}
};
-static struct nvc0_graph_init
-nve4_grctx_init_rop[] = {
+static const struct nvc0_graph_init
+nve4_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x02802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
{ 0x408808, 1, 0x04, 0x1043e005 },
@@ -698,22 +697,24 @@ nve4_grctx_init_rop[] = {
{}
};
-static struct nvc0_graph_init
-nve4_grctx_init_gpc_0[] = {
- { 0x418380, 1, 0x04, 0x00000016 },
- { 0x418400, 1, 0x04, 0x38004e00 },
- { 0x418404, 1, 0x04, 0x71e0ffff },
- { 0x41840c, 1, 0x04, 0x00001008 },
- { 0x418410, 1, 0x04, 0x0fff0fff },
- { 0x418414, 1, 0x04, 0x02200fff },
- { 0x418450, 6, 0x04, 0x00000000 },
- { 0x418468, 1, 0x04, 0x00000001 },
- { 0x41846c, 2, 0x04, 0x00000000 },
- { 0x418600, 1, 0x04, 0x0000001f },
- { 0x418684, 1, 0x04, 0x0000000f },
- { 0x418700, 1, 0x04, 0x00000002 },
- { 0x418704, 1, 0x04, 0x00000080 },
- { 0x418708, 3, 0x04, 0x00000000 },
+static const struct nvc0_graph_pack
+nve4_grctx_pack_hub[] = {
+ { nvc0_grctx_init_main_0 },
+ { nve4_grctx_init_fe_0 },
+ { nvc0_grctx_init_pri_0 },
+ { nve4_grctx_init_memfmt_0 },
+ { nve4_grctx_init_ds_0 },
+ { nve4_grctx_init_cwd_0 },
+ { nve4_grctx_init_pd_0 },
+ { nve4_grctx_init_sked_0 },
+ { nvc0_grctx_init_rstr2d_0 },
+ { nve4_grctx_init_scc_0 },
+ { nve4_grctx_init_be_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nve4_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x7006860a },
{ 0x418808, 3, 0x04, 0x00000000 },
{ 0x418828, 1, 0x04, 0x00000044 },
@@ -722,35 +723,35 @@ nve4_grctx_init_gpc_0[] = {
{ 0x4188e0, 1, 0x04, 0x01000000 },
{ 0x4188e8, 5, 0x04, 0x00000000 },
{ 0x4188fc, 1, 0x04, 0x20100018 },
- { 0x41891c, 1, 0x04, 0x00ff00ff },
- { 0x418924, 1, 0x04, 0x00000000 },
- { 0x418928, 1, 0x04, 0x00ffff00 },
- { 0x41892c, 1, 0x04, 0x0000ff00 },
- { 0x418b00, 1, 0x04, 0x00000006 },
- { 0x418b08, 1, 0x04, 0x0a418820 },
- { 0x418b0c, 1, 0x04, 0x062080e6 },
- { 0x418b10, 1, 0x04, 0x020398a4 },
- { 0x418b14, 1, 0x04, 0x0e629062 },
- { 0x418b18, 1, 0x04, 0x0a418820 },
- { 0x418b1c, 1, 0x04, 0x000000e6 },
- { 0x418bb8, 1, 0x04, 0x00000103 },
+ {}
+};
+
+const struct nvc0_graph_init
+nve4_grctx_init_gpm_0[] = {
{ 0x418c08, 1, 0x04, 0x00000001 },
{ 0x418c10, 8, 0x04, 0x00000000 },
{ 0x418c40, 1, 0x04, 0xffffffff },
{ 0x418c6c, 1, 0x04, 0x00000001 },
{ 0x418c80, 1, 0x04, 0x20200004 },
{ 0x418c8c, 1, 0x04, 0x00000001 },
- { 0x419000, 1, 0x04, 0x00000780 },
- { 0x419004, 2, 0x04, 0x00000000 },
- { 0x419014, 1, 0x04, 0x00000004 },
{}
};
-static struct nvc0_graph_init
-nve4_grctx_init_tpc[] = {
- { 0x419848, 1, 0x04, 0x00000000 },
- { 0x419864, 1, 0x04, 0x00000129 },
- { 0x419888, 1, 0x04, 0x00000000 },
+static const struct nvc0_graph_pack
+nve4_grctx_pack_gpc[] = {
+ { nvc0_grctx_init_gpc_unk_0 },
+ { nvd9_grctx_init_prop_0 },
+ { nvd9_grctx_init_gpc_unk_1 },
+ { nve4_grctx_init_setup_0 },
+ { nvc0_grctx_init_zcull_0 },
+ { nvd9_grctx_init_crstr_0 },
+ { nve4_grctx_init_gpm_0 },
+ { nvc0_grctx_init_gcc_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nve4_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000000f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000021 },
@@ -761,14 +762,29 @@ nve4_grctx_init_tpc[] = {
{ 0x419a20, 1, 0x04, 0x00000800 },
{ 0x419a30, 1, 0x04, 0x00000001 },
{ 0x419ac4, 1, 0x04, 0x0037f440 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nve4_grctx_init_mpc_0[] = {
{ 0x419c00, 1, 0x04, 0x0000000a },
{ 0x419c04, 1, 0x04, 0x80000006 },
{ 0x419c08, 1, 0x04, 0x00000002 },
{ 0x419c20, 1, 0x04, 0x00000000 },
{ 0x419c24, 1, 0x04, 0x00084210 },
{ 0x419c28, 1, 0x04, 0x3efbefbe },
+ {}
+};
+
+static const struct nvc0_graph_init
+nve4_grctx_init_l1c_0[] = {
{ 0x419ce8, 1, 0x04, 0x00000000 },
{ 0x419cf4, 1, 0x04, 0x00003203 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nve4_grctx_init_sm_0[] = {
{ 0x419e04, 3, 0x04, 0x00000000 },
{ 0x419e10, 1, 0x04, 0x00000402 },
{ 0x419e44, 1, 0x04, 0x0013eff2 },
@@ -782,28 +798,46 @@ nve4_grctx_init_tpc[] = {
{ 0x419f58, 1, 0x04, 0x00000000 },
{ 0x419f70, 1, 0x04, 0x00000000 },
{ 0x419f78, 1, 0x04, 0x0000000b },
- { 0x419f7c, 1, 0x04, 0x0000027a },
+ { 0x419f7c, 1, 0x04, 0x0000027c },
+ {}
+};
+
+static const struct nvc0_graph_pack
+nve4_grctx_pack_tpc[] = {
+ { nvd7_grctx_init_pe_0 },
+ { nve4_grctx_init_tex_0 },
+ { nve4_grctx_init_mpc_0 },
+ { nve4_grctx_init_l1c_0 },
+ { nve4_grctx_init_sm_0 },
{}
};
-static struct nvc0_graph_init
-nve4_grctx_init_unk[] = {
+const struct nvc0_graph_init
+nve4_grctx_init_pes_0[] = {
{ 0x41be24, 1, 0x04, 0x00000006 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nve4_grctx_init_cbm_0[] = {
{ 0x41bec0, 1, 0x04, 0x12180000 },
{ 0x41bec4, 1, 0x04, 0x00037f7f },
{ 0x41bee4, 1, 0x04, 0x06480430 },
- { 0x41bf00, 1, 0x04, 0x0a418820 },
- { 0x41bf04, 1, 0x04, 0x062080e6 },
- { 0x41bf08, 1, 0x04, 0x020398a4 },
- { 0x41bf0c, 1, 0x04, 0x0e629062 },
- { 0x41bf10, 1, 0x04, 0x0a418820 },
- { 0x41bf14, 1, 0x04, 0x000000e6 },
- { 0x41bfd0, 1, 0x04, 0x00900103 },
- { 0x41bfe0, 1, 0x04, 0x00400001 },
- { 0x41bfe4, 1, 0x04, 0x00000000 },
{}
};
+static const struct nvc0_graph_pack
+nve4_grctx_pack_ppc[] = {
+ { nve4_grctx_init_pes_0 },
+ { nve4_grctx_init_cbm_0 },
+ { nvd7_grctx_init_wwdx_0 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
static void
nve4_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
@@ -925,10 +959,11 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
- for (i = 0; oclass->hub[i]; i++)
- nvc0_graph_mmio(priv, oclass->hub[i]);
- for (i = 0; oclass->gpc[i]; i++)
- nvc0_graph_mmio(priv, oclass->gpc[i]);
+ nvc0_graph_mmio(priv, oclass->hub);
+ nvc0_graph_mmio(priv, oclass->gpc);
+ nvc0_graph_mmio(priv, oclass->zcull);
+ nvc0_graph_mmio(priv, oclass->tpc);
+ nvc0_graph_mmio(priv, oclass->ppc);
nv_wr32(priv, 0x404154, 0x00000000);
@@ -962,41 +997,6 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_mask(priv, 0x41be10, 0x00800000, 0x00800000);
}
-static struct nvc0_graph_init *
-nve4_grctx_init_hub[] = {
- nvc0_grctx_init_base,
- nve4_grctx_init_unk40xx,
- nvc0_grctx_init_unk44xx,
- nve4_grctx_init_unk46xx,
- nve4_grctx_init_unk47xx,
- nve4_grctx_init_unk58xx,
- nve4_grctx_init_unk5bxx,
- nve4_grctx_init_unk60xx,
- nve4_grctx_init_unk64xx,
- nve4_grctx_init_unk70xx,
- nvc0_grctx_init_unk78xx,
- nve4_grctx_init_unk80xx,
- nve4_grctx_init_rop,
- NULL
-};
-
-struct nvc0_graph_init *
-nve4_grctx_init_gpc[] = {
- nve4_grctx_init_gpc_0,
- nvc0_grctx_init_gpc_1,
- nve4_grctx_init_tpc,
- nve4_grctx_init_unk,
- NULL
-};
-
-static struct nvc0_graph_mthd
-nve4_grctx_init_mthd[] = {
- { 0xa097, nve4_grctx_init_a097, },
- { 0x902d, nvc0_grctx_init_902d, },
- { 0x902d, nvc0_grctx_init_mthd_magic, },
- {}
-};
-
struct nouveau_oclass *
nve4_grctx_oclass = &(struct nvc0_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xe4),
@@ -1008,11 +1008,14 @@ nve4_grctx_oclass = &(struct nvc0_grctx_oclass) {
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
- .main = nve4_grctx_generate_main,
- .mods = nve4_grctx_generate_mods,
- .unkn = nve4_grctx_generate_unkn,
- .hub = nve4_grctx_init_hub,
- .gpc = nve4_grctx_init_gpc,
- .icmd = nve4_grctx_init_icmd,
- .mthd = nve4_grctx_init_mthd,
+ .main = nve4_grctx_generate_main,
+ .mods = nve4_grctx_generate_mods,
+ .unkn = nve4_grctx_generate_unkn,
+ .hub = nve4_grctx_pack_hub,
+ .gpc = nve4_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = nve4_grctx_pack_tpc,
+ .ppc = nve4_grctx_pack_ppc,
+ .icmd = nve4_grctx_pack_icmd,
+ .mthd = nve4_grctx_pack_mthd,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
index 44012c3da538..0fab95e49f53 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
@@ -22,10 +22,580 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "nvc0.h"
+#include "ctxnvc0.h"
-static struct nvc0_graph_init
-nvf0_grctx_init_unk40xx[] = {
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+nvf0_grctx_init_icmd_0[] = {
+ { 0x001000, 1, 0x01, 0x00000004 },
+ { 0x000039, 3, 0x01, 0x00000000 },
+ { 0x0000a9, 1, 0x01, 0x0000ffff },
+ { 0x000038, 1, 0x01, 0x0fac6881 },
+ { 0x00003d, 1, 0x01, 0x00000001 },
+ { 0x0000e8, 8, 0x01, 0x00000400 },
+ { 0x000078, 8, 0x01, 0x00000300 },
+ { 0x000050, 1, 0x01, 0x00000011 },
+ { 0x000058, 8, 0x01, 0x00000008 },
+ { 0x000208, 8, 0x01, 0x00000001 },
+ { 0x000081, 1, 0x01, 0x00000001 },
+ { 0x000085, 1, 0x01, 0x00000004 },
+ { 0x000088, 1, 0x01, 0x00000400 },
+ { 0x000090, 1, 0x01, 0x00000300 },
+ { 0x000098, 1, 0x01, 0x00001001 },
+ { 0x0000e3, 1, 0x01, 0x00000001 },
+ { 0x0000da, 1, 0x01, 0x00000001 },
+ { 0x0000f8, 1, 0x01, 0x00000003 },
+ { 0x0000fa, 1, 0x01, 0x00000001 },
+ { 0x00009f, 4, 0x01, 0x0000ffff },
+ { 0x0000b1, 1, 0x01, 0x00000001 },
+ { 0x0000ad, 1, 0x01, 0x0000013e },
+ { 0x0000e1, 1, 0x01, 0x00000010 },
+ { 0x000290, 16, 0x01, 0x00000000 },
+ { 0x0003b0, 16, 0x01, 0x00000000 },
+ { 0x0002a0, 16, 0x01, 0x00000000 },
+ { 0x000420, 16, 0x01, 0x00000000 },
+ { 0x0002b0, 16, 0x01, 0x00000000 },
+ { 0x000430, 16, 0x01, 0x00000000 },
+ { 0x0002c0, 16, 0x01, 0x00000000 },
+ { 0x0004d0, 16, 0x01, 0x00000000 },
+ { 0x000720, 16, 0x01, 0x00000000 },
+ { 0x0008c0, 16, 0x01, 0x00000000 },
+ { 0x000890, 16, 0x01, 0x00000000 },
+ { 0x0008e0, 16, 0x01, 0x00000000 },
+ { 0x0008a0, 16, 0x01, 0x00000000 },
+ { 0x0008f0, 16, 0x01, 0x00000000 },
+ { 0x00094c, 1, 0x01, 0x000000ff },
+ { 0x00094d, 1, 0x01, 0xffffffff },
+ { 0x00094e, 1, 0x01, 0x00000002 },
+ { 0x0002ec, 1, 0x01, 0x00000001 },
+ { 0x0002f2, 2, 0x01, 0x00000001 },
+ { 0x0002f5, 1, 0x01, 0x00000001 },
+ { 0x0002f7, 1, 0x01, 0x00000001 },
+ { 0x000303, 1, 0x01, 0x00000001 },
+ { 0x0002e6, 1, 0x01, 0x00000001 },
+ { 0x000466, 1, 0x01, 0x00000052 },
+ { 0x000301, 1, 0x01, 0x3f800000 },
+ { 0x000304, 1, 0x01, 0x30201000 },
+ { 0x000305, 1, 0x01, 0x70605040 },
+ { 0x000306, 1, 0x01, 0xb8a89888 },
+ { 0x000307, 1, 0x01, 0xf8e8d8c8 },
+ { 0x00030a, 1, 0x01, 0x00ffff00 },
+ { 0x00030b, 1, 0x01, 0x0000001a },
+ { 0x00030c, 1, 0x01, 0x00000001 },
+ { 0x000318, 1, 0x01, 0x00000001 },
+ { 0x000340, 1, 0x01, 0x00000000 },
+ { 0x000375, 1, 0x01, 0x00000001 },
+ { 0x00037d, 1, 0x01, 0x00000006 },
+ { 0x0003a0, 1, 0x01, 0x00000002 },
+ { 0x0003aa, 1, 0x01, 0x00000001 },
+ { 0x0003a9, 1, 0x01, 0x00000001 },
+ { 0x000380, 1, 0x01, 0x00000001 },
+ { 0x000383, 1, 0x01, 0x00000011 },
+ { 0x000360, 1, 0x01, 0x00000040 },
+ { 0x000366, 2, 0x01, 0x00000000 },
+ { 0x000368, 1, 0x01, 0x00000fff },
+ { 0x000370, 2, 0x01, 0x00000000 },
+ { 0x000372, 1, 0x01, 0x000fffff },
+ { 0x00037a, 1, 0x01, 0x00000012 },
+ { 0x000619, 1, 0x01, 0x00000003 },
+ { 0x000811, 1, 0x01, 0x00000003 },
+ { 0x000812, 1, 0x01, 0x00000004 },
+ { 0x000813, 1, 0x01, 0x00000006 },
+ { 0x000814, 1, 0x01, 0x00000008 },
+ { 0x000815, 1, 0x01, 0x0000000b },
+ { 0x000800, 6, 0x01, 0x00000001 },
+ { 0x000632, 1, 0x01, 0x00000001 },
+ { 0x000633, 1, 0x01, 0x00000002 },
+ { 0x000634, 1, 0x01, 0x00000003 },
+ { 0x000635, 1, 0x01, 0x00000004 },
+ { 0x000654, 1, 0x01, 0x3f800000 },
+ { 0x000657, 1, 0x01, 0x3f800000 },
+ { 0x000655, 2, 0x01, 0x3f800000 },
+ { 0x0006cd, 1, 0x01, 0x3f800000 },
+ { 0x0007f5, 1, 0x01, 0x3f800000 },
+ { 0x0007dc, 1, 0x01, 0x39291909 },
+ { 0x0007dd, 1, 0x01, 0x79695949 },
+ { 0x0007de, 1, 0x01, 0xb9a99989 },
+ { 0x0007df, 1, 0x01, 0xf9e9d9c9 },
+ { 0x0007e8, 1, 0x01, 0x00003210 },
+ { 0x0007e9, 1, 0x01, 0x00007654 },
+ { 0x0007ea, 1, 0x01, 0x00000098 },
+ { 0x0007ec, 1, 0x01, 0x39291909 },
+ { 0x0007ed, 1, 0x01, 0x79695949 },
+ { 0x0007ee, 1, 0x01, 0xb9a99989 },
+ { 0x0007ef, 1, 0x01, 0xf9e9d9c9 },
+ { 0x0007f0, 1, 0x01, 0x00003210 },
+ { 0x0007f1, 1, 0x01, 0x00007654 },
+ { 0x0007f2, 1, 0x01, 0x00000098 },
+ { 0x0005a5, 1, 0x01, 0x00000001 },
+ { 0x000980, 128, 0x01, 0x00000000 },
+ { 0x000468, 1, 0x01, 0x00000004 },
+ { 0x00046c, 1, 0x01, 0x00000001 },
+ { 0x000470, 96, 0x01, 0x00000000 },
+ { 0x000510, 16, 0x01, 0x3f800000 },
+ { 0x000520, 1, 0x01, 0x000002b6 },
+ { 0x000529, 1, 0x01, 0x00000001 },
+ { 0x000530, 16, 0x01, 0xffff0000 },
+ { 0x000585, 1, 0x01, 0x0000003f },
+ { 0x000576, 1, 0x01, 0x00000003 },
+ { 0x00057b, 1, 0x01, 0x00000059 },
+ { 0x000586, 1, 0x01, 0x00000040 },
+ { 0x000582, 2, 0x01, 0x00000080 },
+ { 0x0005c2, 1, 0x01, 0x00000001 },
+ { 0x000638, 2, 0x01, 0x00000001 },
+ { 0x00063a, 1, 0x01, 0x00000002 },
+ { 0x00063b, 2, 0x01, 0x00000001 },
+ { 0x00063d, 1, 0x01, 0x00000002 },
+ { 0x00063e, 1, 0x01, 0x00000001 },
+ { 0x0008b8, 8, 0x01, 0x00000001 },
+ { 0x000900, 8, 0x01, 0x00000001 },
+ { 0x000908, 8, 0x01, 0x00000002 },
+ { 0x000910, 16, 0x01, 0x00000001 },
+ { 0x000920, 8, 0x01, 0x00000002 },
+ { 0x000928, 8, 0x01, 0x00000001 },
+ { 0x000662, 1, 0x01, 0x00000001 },
+ { 0x000648, 9, 0x01, 0x00000001 },
+ { 0x000658, 1, 0x01, 0x0000000f },
+ { 0x0007ff, 1, 0x01, 0x0000000a },
+ { 0x00066a, 1, 0x01, 0x40000000 },
+ { 0x00066b, 1, 0x01, 0x10000000 },
+ { 0x00066c, 2, 0x01, 0xffff0000 },
+ { 0x0007af, 2, 0x01, 0x00000008 },
+ { 0x0007f6, 1, 0x01, 0x00000001 },
+ { 0x00080b, 1, 0x01, 0x00000002 },
+ { 0x0006b2, 1, 0x01, 0x00000055 },
+ { 0x0007ad, 1, 0x01, 0x00000003 },
+ { 0x000937, 1, 0x01, 0x00000001 },
+ { 0x000971, 1, 0x01, 0x00000008 },
+ { 0x000972, 1, 0x01, 0x00000040 },
+ { 0x000973, 1, 0x01, 0x0000012c },
+ { 0x00097c, 1, 0x01, 0x00000040 },
+ { 0x000979, 1, 0x01, 0x00000003 },
+ { 0x000975, 1, 0x01, 0x00000020 },
+ { 0x000976, 1, 0x01, 0x00000001 },
+ { 0x000977, 1, 0x01, 0x00000020 },
+ { 0x000978, 1, 0x01, 0x00000001 },
+ { 0x000957, 1, 0x01, 0x00000003 },
+ { 0x00095e, 1, 0x01, 0x20164010 },
+ { 0x00095f, 1, 0x01, 0x00000020 },
+ { 0x000a0d, 1, 0x01, 0x00000006 },
+ { 0x00097d, 1, 0x01, 0x00000020 },
+ { 0x000683, 1, 0x01, 0x00000006 },
+ { 0x000685, 1, 0x01, 0x003fffff },
+ { 0x000687, 1, 0x01, 0x003fffff },
+ { 0x0006a0, 1, 0x01, 0x00000005 },
+ { 0x000840, 1, 0x01, 0x00400008 },
+ { 0x000841, 1, 0x01, 0x08000080 },
+ { 0x000842, 1, 0x01, 0x00400008 },
+ { 0x000843, 1, 0x01, 0x08000080 },
+ { 0x0006aa, 1, 0x01, 0x00000001 },
+ { 0x0006ab, 1, 0x01, 0x00000002 },
+ { 0x0006ac, 1, 0x01, 0x00000080 },
+ { 0x0006ad, 2, 0x01, 0x00000100 },
+ { 0x0006b1, 1, 0x01, 0x00000011 },
+ { 0x0006bb, 1, 0x01, 0x000000cf },
+ { 0x0006ce, 1, 0x01, 0x2a712488 },
+ { 0x000739, 1, 0x01, 0x4085c000 },
+ { 0x00073a, 1, 0x01, 0x00000080 },
+ { 0x000786, 1, 0x01, 0x80000100 },
+ { 0x00073c, 1, 0x01, 0x00010100 },
+ { 0x00073d, 1, 0x01, 0x02800000 },
+ { 0x000787, 1, 0x01, 0x000000cf },
+ { 0x00078c, 1, 0x01, 0x00000008 },
+ { 0x000792, 1, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
+ { 0x000797, 1, 0x01, 0x000000cf },
+ { 0x000836, 1, 0x01, 0x00000001 },
+ { 0x00079a, 1, 0x01, 0x00000002 },
+ { 0x000833, 1, 0x01, 0x04444480 },
+ { 0x0007a1, 1, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
+ { 0x000831, 1, 0x01, 0x00000004 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x000a04, 1, 0x01, 0x000000ff },
+ { 0x000a0b, 1, 0x01, 0x00000040 },
+ { 0x00097f, 1, 0x01, 0x00000100 },
+ { 0x000a02, 1, 0x01, 0x00000001 },
+ { 0x000809, 1, 0x01, 0x00000007 },
+ { 0x00c221, 1, 0x01, 0x00000040 },
+ { 0x00c1b0, 8, 0x01, 0x0000000f },
+ { 0x00c1b8, 1, 0x01, 0x0fac6881 },
+ { 0x00c1b9, 1, 0x01, 0x00fac688 },
+ { 0x00c401, 1, 0x01, 0x00000001 },
+ { 0x00c402, 1, 0x01, 0x00010001 },
+ { 0x00c403, 2, 0x01, 0x00000001 },
+ { 0x00c40e, 1, 0x01, 0x00000020 },
+ { 0x00c500, 1, 0x01, 0x00000003 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000002 },
+ { 0x0006aa, 1, 0x01, 0x00000001 },
+ { 0x0006ad, 2, 0x01, 0x00000100 },
+ { 0x0006b1, 1, 0x01, 0x00000011 },
+ { 0x00078c, 1, 0x01, 0x00000008 },
+ { 0x000792, 1, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
+ { 0x000797, 1, 0x01, 0x000000cf },
+ { 0x00079a, 1, 0x01, 0x00000002 },
+ { 0x000833, 1, 0x01, 0x04444480 },
+ { 0x0007a1, 1, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
+ { 0x000831, 1, 0x01, 0x00000004 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000008 },
+ { 0x000039, 3, 0x01, 0x00000000 },
+ { 0x000380, 1, 0x01, 0x00000001 },
+ { 0x000366, 2, 0x01, 0x00000000 },
+ { 0x000368, 1, 0x01, 0x00000fff },
+ { 0x000370, 2, 0x01, 0x00000000 },
+ { 0x000372, 1, 0x01, 0x000fffff },
+ { 0x000813, 1, 0x01, 0x00000006 },
+ { 0x000814, 1, 0x01, 0x00000008 },
+ { 0x000957, 1, 0x01, 0x00000003 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x000a04, 1, 0x01, 0x000000ff },
+ { 0x000a0b, 1, 0x01, 0x00000040 },
+ { 0x00097f, 1, 0x01, 0x00000100 },
+ { 0x000a02, 1, 0x01, 0x00000001 },
+ { 0x000809, 1, 0x01, 0x00000007 },
+ { 0x00c221, 1, 0x01, 0x00000040 },
+ { 0x00c401, 1, 0x01, 0x00000001 },
+ { 0x00c402, 1, 0x01, 0x00010001 },
+ { 0x00c403, 2, 0x01, 0x00000001 },
+ { 0x00c40e, 1, 0x01, 0x00000020 },
+ { 0x00c500, 1, 0x01, 0x00000003 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000001 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+nvf0_grctx_pack_icmd[] = {
+ { nvf0_grctx_init_icmd_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvf0_grctx_init_a197_0[] = {
+ { 0x000800, 8, 0x40, 0x00000000 },
+ { 0x000804, 8, 0x40, 0x00000000 },
+ { 0x000808, 8, 0x40, 0x00000400 },
+ { 0x00080c, 8, 0x40, 0x00000300 },
+ { 0x000810, 1, 0x04, 0x000000cf },
+ { 0x000850, 7, 0x40, 0x00000000 },
+ { 0x000814, 8, 0x40, 0x00000040 },
+ { 0x000818, 8, 0x40, 0x00000001 },
+ { 0x00081c, 8, 0x40, 0x00000000 },
+ { 0x000820, 8, 0x40, 0x00000000 },
+ { 0x001c00, 16, 0x10, 0x00000000 },
+ { 0x001c04, 16, 0x10, 0x00000000 },
+ { 0x001c08, 16, 0x10, 0x00000000 },
+ { 0x001c0c, 16, 0x10, 0x00000000 },
+ { 0x001d00, 16, 0x10, 0x00000000 },
+ { 0x001d04, 16, 0x10, 0x00000000 },
+ { 0x001d08, 16, 0x10, 0x00000000 },
+ { 0x001d0c, 16, 0x10, 0x00000000 },
+ { 0x001f00, 16, 0x08, 0x00000000 },
+ { 0x001f04, 16, 0x08, 0x00000000 },
+ { 0x001f80, 16, 0x08, 0x00000000 },
+ { 0x001f84, 16, 0x08, 0x00000000 },
+ { 0x002000, 1, 0x04, 0x00000000 },
+ { 0x002040, 1, 0x04, 0x00000011 },
+ { 0x002080, 1, 0x04, 0x00000020 },
+ { 0x0020c0, 1, 0x04, 0x00000030 },
+ { 0x002100, 1, 0x04, 0x00000040 },
+ { 0x002140, 1, 0x04, 0x00000051 },
+ { 0x00200c, 6, 0x40, 0x00000001 },
+ { 0x002010, 1, 0x04, 0x00000000 },
+ { 0x002050, 1, 0x04, 0x00000000 },
+ { 0x002090, 1, 0x04, 0x00000001 },
+ { 0x0020d0, 1, 0x04, 0x00000002 },
+ { 0x002110, 1, 0x04, 0x00000003 },
+ { 0x002150, 1, 0x04, 0x00000004 },
+ { 0x000380, 4, 0x20, 0x00000000 },
+ { 0x000384, 4, 0x20, 0x00000000 },
+ { 0x000388, 4, 0x20, 0x00000000 },
+ { 0x00038c, 4, 0x20, 0x00000000 },
+ { 0x000700, 4, 0x10, 0x00000000 },
+ { 0x000704, 4, 0x10, 0x00000000 },
+ { 0x000708, 4, 0x10, 0x00000000 },
+ { 0x002800, 128, 0x04, 0x00000000 },
+ { 0x000a00, 16, 0x20, 0x00000000 },
+ { 0x000a04, 16, 0x20, 0x00000000 },
+ { 0x000a08, 16, 0x20, 0x00000000 },
+ { 0x000a0c, 16, 0x20, 0x00000000 },
+ { 0x000a10, 16, 0x20, 0x00000000 },
+ { 0x000a14, 16, 0x20, 0x00000000 },
+ { 0x000c00, 16, 0x10, 0x00000000 },
+ { 0x000c04, 16, 0x10, 0x00000000 },
+ { 0x000c08, 16, 0x10, 0x00000000 },
+ { 0x000c0c, 16, 0x10, 0x3f800000 },
+ { 0x000d00, 8, 0x08, 0xffff0000 },
+ { 0x000d04, 8, 0x08, 0xffff0000 },
+ { 0x000e00, 16, 0x10, 0x00000000 },
+ { 0x000e04, 16, 0x10, 0xffff0000 },
+ { 0x000e08, 16, 0x10, 0xffff0000 },
+ { 0x000d40, 4, 0x08, 0x00000000 },
+ { 0x000d44, 4, 0x08, 0x00000000 },
+ { 0x001e00, 8, 0x20, 0x00000001 },
+ { 0x001e04, 8, 0x20, 0x00000001 },
+ { 0x001e08, 8, 0x20, 0x00000002 },
+ { 0x001e0c, 8, 0x20, 0x00000001 },
+ { 0x001e10, 8, 0x20, 0x00000001 },
+ { 0x001e14, 8, 0x20, 0x00000002 },
+ { 0x001e18, 8, 0x20, 0x00000001 },
+ { 0x003400, 128, 0x04, 0x00000000 },
+ { 0x00030c, 1, 0x04, 0x00000001 },
+ { 0x001944, 1, 0x04, 0x00000000 },
+ { 0x001514, 1, 0x04, 0x00000000 },
+ { 0x000d68, 1, 0x04, 0x0000ffff },
+ { 0x00121c, 1, 0x04, 0x0fac6881 },
+ { 0x000fac, 1, 0x04, 0x00000001 },
+ { 0x001538, 1, 0x04, 0x00000001 },
+ { 0x000fe0, 2, 0x04, 0x00000000 },
+ { 0x000fe8, 1, 0x04, 0x00000014 },
+ { 0x000fec, 1, 0x04, 0x00000040 },
+ { 0x000ff0, 1, 0x04, 0x00000000 },
+ { 0x00179c, 1, 0x04, 0x00000000 },
+ { 0x001228, 1, 0x04, 0x00000400 },
+ { 0x00122c, 1, 0x04, 0x00000300 },
+ { 0x001230, 1, 0x04, 0x00010001 },
+ { 0x0007f8, 1, 0x04, 0x00000000 },
+ { 0x0015b4, 1, 0x04, 0x00000001 },
+ { 0x0015cc, 1, 0x04, 0x00000000 },
+ { 0x001534, 1, 0x04, 0x00000000 },
+ { 0x000fb0, 1, 0x04, 0x00000000 },
+ { 0x0015d0, 1, 0x04, 0x00000000 },
+ { 0x00153c, 1, 0x04, 0x00000000 },
+ { 0x0016b4, 1, 0x04, 0x00000003 },
+ { 0x000fbc, 4, 0x04, 0x0000ffff },
+ { 0x000df8, 2, 0x04, 0x00000000 },
+ { 0x001948, 1, 0x04, 0x00000000 },
+ { 0x001970, 1, 0x04, 0x00000001 },
+ { 0x00161c, 1, 0x04, 0x000009f0 },
+ { 0x000dcc, 1, 0x04, 0x00000010 },
+ { 0x00163c, 1, 0x04, 0x00000000 },
+ { 0x0015e4, 1, 0x04, 0x00000000 },
+ { 0x001160, 32, 0x04, 0x25e00040 },
+ { 0x001880, 32, 0x04, 0x00000000 },
+ { 0x000f84, 2, 0x04, 0x00000000 },
+ { 0x0017c8, 2, 0x04, 0x00000000 },
+ { 0x0017d0, 1, 0x04, 0x000000ff },
+ { 0x0017d4, 1, 0x04, 0xffffffff },
+ { 0x0017d8, 1, 0x04, 0x00000002 },
+ { 0x0017dc, 1, 0x04, 0x00000000 },
+ { 0x0015f4, 2, 0x04, 0x00000000 },
+ { 0x001434, 2, 0x04, 0x00000000 },
+ { 0x000d74, 1, 0x04, 0x00000000 },
+ { 0x000dec, 1, 0x04, 0x00000001 },
+ { 0x0013a4, 1, 0x04, 0x00000000 },
+ { 0x001318, 1, 0x04, 0x00000001 },
+ { 0x001644, 1, 0x04, 0x00000000 },
+ { 0x000748, 1, 0x04, 0x00000000 },
+ { 0x000de8, 1, 0x04, 0x00000000 },
+ { 0x001648, 1, 0x04, 0x00000000 },
+ { 0x0012a4, 1, 0x04, 0x00000000 },
+ { 0x001120, 4, 0x04, 0x00000000 },
+ { 0x001118, 1, 0x04, 0x00000000 },
+ { 0x00164c, 1, 0x04, 0x00000000 },
+ { 0x001658, 1, 0x04, 0x00000000 },
+ { 0x001910, 1, 0x04, 0x00000290 },
+ { 0x001518, 1, 0x04, 0x00000000 },
+ { 0x00165c, 1, 0x04, 0x00000001 },
+ { 0x001520, 1, 0x04, 0x00000000 },
+ { 0x001604, 1, 0x04, 0x00000000 },
+ { 0x001570, 1, 0x04, 0x00000000 },
+ { 0x0013b0, 2, 0x04, 0x3f800000 },
+ { 0x00020c, 1, 0x04, 0x00000000 },
+ { 0x001670, 1, 0x04, 0x30201000 },
+ { 0x001674, 1, 0x04, 0x70605040 },
+ { 0x001678, 1, 0x04, 0xb8a89888 },
+ { 0x00167c, 1, 0x04, 0xf8e8d8c8 },
+ { 0x00166c, 1, 0x04, 0x00000000 },
+ { 0x001680, 1, 0x04, 0x00ffff00 },
+ { 0x0012d0, 1, 0x04, 0x00000003 },
+ { 0x0012d4, 1, 0x04, 0x00000002 },
+ { 0x001684, 2, 0x04, 0x00000000 },
+ { 0x000dac, 2, 0x04, 0x00001b02 },
+ { 0x000db4, 1, 0x04, 0x00000000 },
+ { 0x00168c, 1, 0x04, 0x00000000 },
+ { 0x0015bc, 1, 0x04, 0x00000000 },
+ { 0x00156c, 1, 0x04, 0x00000000 },
+ { 0x00187c, 1, 0x04, 0x00000000 },
+ { 0x001110, 1, 0x04, 0x00000001 },
+ { 0x000dc0, 3, 0x04, 0x00000000 },
+ { 0x001234, 1, 0x04, 0x00000000 },
+ { 0x001690, 1, 0x04, 0x00000000 },
+ { 0x0012ac, 1, 0x04, 0x00000001 },
+ { 0x0002c4, 1, 0x04, 0x00000000 },
+ { 0x000790, 5, 0x04, 0x00000000 },
+ { 0x00077c, 1, 0x04, 0x00000000 },
+ { 0x001000, 1, 0x04, 0x00000010 },
+ { 0x0010fc, 1, 0x04, 0x00000000 },
+ { 0x001290, 1, 0x04, 0x00000000 },
+ { 0x000218, 1, 0x04, 0x00000010 },
+ { 0x0012d8, 1, 0x04, 0x00000000 },
+ { 0x0012dc, 1, 0x04, 0x00000010 },
+ { 0x000d94, 1, 0x04, 0x00000001 },
+ { 0x00155c, 2, 0x04, 0x00000000 },
+ { 0x001564, 1, 0x04, 0x00000fff },
+ { 0x001574, 2, 0x04, 0x00000000 },
+ { 0x00157c, 1, 0x04, 0x000fffff },
+ { 0x001354, 1, 0x04, 0x00000000 },
+ { 0x001610, 1, 0x04, 0x00000012 },
+ { 0x001608, 2, 0x04, 0x00000000 },
+ { 0x00260c, 1, 0x04, 0x00000000 },
+ { 0x0007ac, 1, 0x04, 0x00000000 },
+ { 0x00162c, 1, 0x04, 0x00000003 },
+ { 0x000210, 1, 0x04, 0x00000000 },
+ { 0x000320, 1, 0x04, 0x00000000 },
+ { 0x000324, 6, 0x04, 0x3f800000 },
+ { 0x000750, 1, 0x04, 0x00000000 },
+ { 0x000760, 1, 0x04, 0x39291909 },
+ { 0x000764, 1, 0x04, 0x79695949 },
+ { 0x000768, 1, 0x04, 0xb9a99989 },
+ { 0x00076c, 1, 0x04, 0xf9e9d9c9 },
+ { 0x000770, 1, 0x04, 0x30201000 },
+ { 0x000774, 1, 0x04, 0x70605040 },
+ { 0x000778, 1, 0x04, 0x00009080 },
+ { 0x000780, 1, 0x04, 0x39291909 },
+ { 0x000784, 1, 0x04, 0x79695949 },
+ { 0x000788, 1, 0x04, 0xb9a99989 },
+ { 0x00078c, 1, 0x04, 0xf9e9d9c9 },
+ { 0x0007d0, 1, 0x04, 0x30201000 },
+ { 0x0007d4, 1, 0x04, 0x70605040 },
+ { 0x0007d8, 1, 0x04, 0x00009080 },
+ { 0x00037c, 1, 0x04, 0x00000001 },
+ { 0x000740, 2, 0x04, 0x00000000 },
+ { 0x002600, 1, 0x04, 0x00000000 },
+ { 0x001918, 1, 0x04, 0x00000000 },
+ { 0x00191c, 1, 0x04, 0x00000900 },
+ { 0x001920, 1, 0x04, 0x00000405 },
+ { 0x001308, 1, 0x04, 0x00000001 },
+ { 0x001924, 1, 0x04, 0x00000000 },
+ { 0x0013ac, 1, 0x04, 0x00000000 },
+ { 0x00192c, 1, 0x04, 0x00000001 },
+ { 0x00193c, 1, 0x04, 0x00002c1c },
+ { 0x000d7c, 1, 0x04, 0x00000000 },
+ { 0x000f8c, 1, 0x04, 0x00000000 },
+ { 0x0002c0, 1, 0x04, 0x00000001 },
+ { 0x001510, 1, 0x04, 0x00000000 },
+ { 0x001940, 1, 0x04, 0x00000000 },
+ { 0x000ff4, 2, 0x04, 0x00000000 },
+ { 0x00194c, 2, 0x04, 0x00000000 },
+ { 0x001968, 1, 0x04, 0x00000000 },
+ { 0x001590, 1, 0x04, 0x0000003f },
+ { 0x0007e8, 4, 0x04, 0x00000000 },
+ { 0x00196c, 1, 0x04, 0x00000011 },
+ { 0x0002e4, 1, 0x04, 0x0000b001 },
+ { 0x00036c, 2, 0x04, 0x00000000 },
+ { 0x00197c, 1, 0x04, 0x00000000 },
+ { 0x000fcc, 2, 0x04, 0x00000000 },
+ { 0x0002d8, 1, 0x04, 0x00000040 },
+ { 0x001980, 1, 0x04, 0x00000080 },
+ { 0x001504, 1, 0x04, 0x00000080 },
+ { 0x001984, 1, 0x04, 0x00000000 },
+ { 0x000300, 1, 0x04, 0x00000001 },
+ { 0x0013a8, 1, 0x04, 0x00000000 },
+ { 0x0012ec, 1, 0x04, 0x00000000 },
+ { 0x001310, 1, 0x04, 0x00000000 },
+ { 0x001314, 1, 0x04, 0x00000001 },
+ { 0x001380, 1, 0x04, 0x00000000 },
+ { 0x001384, 4, 0x04, 0x00000001 },
+ { 0x001394, 1, 0x04, 0x00000000 },
+ { 0x00139c, 1, 0x04, 0x00000000 },
+ { 0x001398, 1, 0x04, 0x00000000 },
+ { 0x001594, 1, 0x04, 0x00000000 },
+ { 0x001598, 4, 0x04, 0x00000001 },
+ { 0x000f54, 3, 0x04, 0x00000000 },
+ { 0x0019bc, 1, 0x04, 0x00000000 },
+ { 0x000f9c, 2, 0x04, 0x00000000 },
+ { 0x0012cc, 1, 0x04, 0x00000000 },
+ { 0x0012e8, 1, 0x04, 0x00000000 },
+ { 0x00130c, 1, 0x04, 0x00000001 },
+ { 0x001360, 8, 0x04, 0x00000000 },
+ { 0x00133c, 2, 0x04, 0x00000001 },
+ { 0x001344, 1, 0x04, 0x00000002 },
+ { 0x001348, 2, 0x04, 0x00000001 },
+ { 0x001350, 1, 0x04, 0x00000002 },
+ { 0x001358, 1, 0x04, 0x00000001 },
+ { 0x0012e4, 1, 0x04, 0x00000000 },
+ { 0x00131c, 4, 0x04, 0x00000000 },
+ { 0x0019c0, 1, 0x04, 0x00000000 },
+ { 0x001140, 1, 0x04, 0x00000000 },
+ { 0x0019c4, 1, 0x04, 0x00000000 },
+ { 0x0019c8, 1, 0x04, 0x00001500 },
+ { 0x00135c, 1, 0x04, 0x00000000 },
+ { 0x000f90, 1, 0x04, 0x00000000 },
+ { 0x0019e0, 8, 0x04, 0x00000001 },
+ { 0x0019cc, 1, 0x04, 0x00000001 },
+ { 0x0015b8, 1, 0x04, 0x00000000 },
+ { 0x001a00, 1, 0x04, 0x00001111 },
+ { 0x001a04, 7, 0x04, 0x00000000 },
+ { 0x000d6c, 2, 0x04, 0xffff0000 },
+ { 0x0010f8, 1, 0x04, 0x00001010 },
+ { 0x000d80, 5, 0x04, 0x00000000 },
+ { 0x000da0, 1, 0x04, 0x00000000 },
+ { 0x0007a4, 2, 0x04, 0x00000000 },
+ { 0x001508, 1, 0x04, 0x80000000 },
+ { 0x00150c, 1, 0x04, 0x40000000 },
+ { 0x001668, 1, 0x04, 0x00000000 },
+ { 0x000318, 2, 0x04, 0x00000008 },
+ { 0x000d9c, 1, 0x04, 0x00000001 },
+ { 0x000ddc, 1, 0x04, 0x00000002 },
+ { 0x000374, 1, 0x04, 0x00000000 },
+ { 0x000378, 1, 0x04, 0x00000020 },
+ { 0x0007dc, 1, 0x04, 0x00000000 },
+ { 0x00074c, 1, 0x04, 0x00000055 },
+ { 0x001420, 1, 0x04, 0x00000003 },
+ { 0x0017bc, 2, 0x04, 0x00000000 },
+ { 0x0017c4, 1, 0x04, 0x00000001 },
+ { 0x001008, 1, 0x04, 0x00000008 },
+ { 0x00100c, 1, 0x04, 0x00000040 },
+ { 0x001010, 1, 0x04, 0x0000012c },
+ { 0x000d60, 1, 0x04, 0x00000040 },
+ { 0x00075c, 1, 0x04, 0x00000003 },
+ { 0x001018, 1, 0x04, 0x00000020 },
+ { 0x00101c, 1, 0x04, 0x00000001 },
+ { 0x001020, 1, 0x04, 0x00000020 },
+ { 0x001024, 1, 0x04, 0x00000001 },
+ { 0x001444, 3, 0x04, 0x00000000 },
+ { 0x000360, 1, 0x04, 0x20164010 },
+ { 0x000364, 1, 0x04, 0x00000020 },
+ { 0x000368, 1, 0x04, 0x00000000 },
+ { 0x000de4, 1, 0x04, 0x00000000 },
+ { 0x000204, 1, 0x04, 0x00000006 },
+ { 0x000208, 1, 0x04, 0x00000000 },
+ { 0x0002cc, 2, 0x04, 0x003fffff },
+ { 0x001220, 1, 0x04, 0x00000005 },
+ { 0x000fdc, 1, 0x04, 0x00000000 },
+ { 0x000f98, 1, 0x04, 0x00400008 },
+ { 0x001284, 1, 0x04, 0x08000080 },
+ { 0x001450, 1, 0x04, 0x00400008 },
+ { 0x001454, 1, 0x04, 0x08000080 },
+ { 0x000214, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_pack
+nvf0_grctx_pack_mthd[] = {
+ { nvf0_grctx_init_a197_0, 0xa197 },
+ { nvc0_grctx_init_902d_0, 0x902d },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvf0_grctx_init_fe_0[] = {
{ 0x404004, 8, 0x04, 0x00000000 },
{ 0x404024, 1, 0x04, 0x0000e000 },
{ 0x404028, 8, 0x04, 0x00000000 },
@@ -50,8 +620,8 @@ nvf0_grctx_init_unk40xx[] = {
{}
};
-struct nvc0_graph_init
-nvf0_grctx_init_unk44xx[] = {
+const struct nvc0_graph_init
+nvf0_grctx_init_pri_0[] = {
{ 0x404404, 12, 0x04, 0x00000000 },
{ 0x404438, 1, 0x04, 0x00000000 },
{ 0x404460, 2, 0x04, 0x00000000 },
@@ -62,23 +632,18 @@ nvf0_grctx_init_unk44xx[] = {
{}
};
-struct nvc0_graph_init
-nvf0_grctx_init_unk5bxx[] = {
+const struct nvc0_graph_init
+nvf0_grctx_init_cwd_0[] = {
{ 0x405b00, 1, 0x04, 0x00000000 },
{ 0x405b10, 1, 0x04, 0x00001000 },
{ 0x405b20, 1, 0x04, 0x04000000 },
{}
};
-struct nvc0_graph_init
-nvf0_grctx_init_unk60xx[] = {
+static const struct nvc0_graph_init
+nvf0_grctx_init_pd_0[] = {
{ 0x406020, 1, 0x04, 0x034103c1 },
{ 0x406028, 4, 0x04, 0x00000001 },
- {}
-};
-
-static struct nvc0_graph_init
-nvf0_grctx_init_unk64xx[] = {
{ 0x4064a8, 1, 0x04, 0x00000000 },
{ 0x4064ac, 1, 0x04, 0x00003fff },
{ 0x4064b0, 3, 0x04, 0x00000000 },
@@ -90,8 +655,8 @@ nvf0_grctx_init_unk64xx[] = {
{}
};
-static struct nvc0_graph_init
-nvf0_grctx_init_unk88xx[] = {
+static const struct nvc0_graph_init
+nvf0_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x12802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
{ 0x408808, 1, 0x04, 0x1003e005 },
@@ -103,22 +668,23 @@ nvf0_grctx_init_unk88xx[] = {
{}
};
-static struct nvc0_graph_init
-nvf0_grctx_init_gpc_0[] = {
- { 0x418380, 1, 0x04, 0x00000016 },
- { 0x418400, 1, 0x04, 0x38004e00 },
- { 0x418404, 1, 0x04, 0x71e0ffff },
- { 0x41840c, 1, 0x04, 0x00001008 },
- { 0x418410, 1, 0x04, 0x0fff0fff },
- { 0x418414, 1, 0x04, 0x02200fff },
- { 0x418450, 6, 0x04, 0x00000000 },
- { 0x418468, 1, 0x04, 0x00000001 },
- { 0x41846c, 2, 0x04, 0x00000000 },
- { 0x418600, 1, 0x04, 0x0000001f },
- { 0x418684, 1, 0x04, 0x0000000f },
- { 0x418700, 1, 0x04, 0x00000002 },
- { 0x418704, 1, 0x04, 0x00000080 },
- { 0x418708, 3, 0x04, 0x00000000 },
+static const struct nvc0_graph_pack
+nvf0_grctx_pack_hub[] = {
+ { nvc0_grctx_init_main_0 },
+ { nvf0_grctx_init_fe_0 },
+ { nvf0_grctx_init_pri_0 },
+ { nve4_grctx_init_memfmt_0 },
+ { nve4_grctx_init_ds_0 },
+ { nvf0_grctx_init_cwd_0 },
+ { nvf0_grctx_init_pd_0 },
+ { nvc0_grctx_init_rstr2d_0 },
+ { nve4_grctx_init_scc_0 },
+ { nvf0_grctx_init_be_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvf0_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x7006860a },
{ 0x418808, 1, 0x04, 0x00000000 },
{ 0x41880c, 1, 0x04, 0x00000030 },
@@ -129,36 +695,31 @@ nvf0_grctx_init_gpc_0[] = {
{ 0x4188e0, 1, 0x04, 0x01000000 },
{ 0x4188e8, 5, 0x04, 0x00000000 },
{ 0x4188fc, 1, 0x04, 0x20100018 },
- { 0x41891c, 1, 0x04, 0x00ff00ff },
- { 0x418924, 1, 0x04, 0x00000000 },
- { 0x418928, 1, 0x04, 0x00ffff00 },
- { 0x41892c, 1, 0x04, 0x0000ff00 },
- { 0x418b00, 1, 0x04, 0x00000006 },
- { 0x418b08, 1, 0x04, 0x0a418820 },
- { 0x418b0c, 1, 0x04, 0x062080e6 },
- { 0x418b10, 1, 0x04, 0x020398a4 },
- { 0x418b14, 1, 0x04, 0x0e629062 },
- { 0x418b18, 1, 0x04, 0x0a418820 },
- { 0x418b1c, 1, 0x04, 0x000000e6 },
- { 0x418bb8, 1, 0x04, 0x00000103 },
- { 0x418c08, 1, 0x04, 0x00000001 },
- { 0x418c10, 8, 0x04, 0x00000000 },
- { 0x418c40, 1, 0x04, 0xffffffff },
- { 0x418c6c, 1, 0x04, 0x00000001 },
- { 0x418c80, 1, 0x04, 0x20200004 },
- { 0x418c8c, 1, 0x04, 0x00000001 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvf0_grctx_init_gpc_unk_2[] = {
{ 0x418d24, 1, 0x04, 0x00000000 },
- { 0x419000, 1, 0x04, 0x00000780 },
- { 0x419004, 2, 0x04, 0x00000000 },
- { 0x419014, 1, 0x04, 0x00000004 },
{}
};
-static struct nvc0_graph_init
-nvf0_grctx_init_tpc[] = {
- { 0x419848, 1, 0x04, 0x00000000 },
- { 0x419864, 1, 0x04, 0x00000129 },
- { 0x419888, 1, 0x04, 0x00000000 },
+static const struct nvc0_graph_pack
+nvf0_grctx_pack_gpc[] = {
+ { nvc0_grctx_init_gpc_unk_0 },
+ { nvd9_grctx_init_prop_0 },
+ { nvd9_grctx_init_gpc_unk_1 },
+ { nvf0_grctx_init_setup_0 },
+ { nvc0_grctx_init_zcull_0 },
+ { nvd9_grctx_init_crstr_0 },
+ { nve4_grctx_init_gpm_0 },
+ { nvf0_grctx_init_gpc_unk_2 },
+ { nvc0_grctx_init_gcc_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvf0_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000000f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000021 },
@@ -169,14 +730,29 @@ nvf0_grctx_init_tpc[] = {
{ 0x419a20, 1, 0x04, 0x00020800 },
{ 0x419a30, 1, 0x04, 0x00000001 },
{ 0x419ac4, 1, 0x04, 0x0037f440 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvf0_grctx_init_mpc_0[] = {
{ 0x419c00, 1, 0x04, 0x0000001a },
{ 0x419c04, 1, 0x04, 0x80000006 },
{ 0x419c08, 1, 0x04, 0x00000002 },
{ 0x419c20, 1, 0x04, 0x00000000 },
{ 0x419c24, 1, 0x04, 0x00084210 },
{ 0x419c28, 1, 0x04, 0x3efbefbe },
+ {}
+};
+
+const struct nvc0_graph_init
+nvf0_grctx_init_l1c_0[] = {
{ 0x419ce8, 1, 0x04, 0x00000000 },
{ 0x419cf4, 1, 0x04, 0x00000203 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvf0_grctx_init_sm_0[] = {
{ 0x419e04, 1, 0x04, 0x00000000 },
{ 0x419e08, 1, 0x04, 0x0000001d },
{ 0x419e0c, 1, 0x04, 0x00000000 },
@@ -189,8 +765,8 @@ nvf0_grctx_init_tpc[] = {
{ 0x419e5c, 3, 0x04, 0x00000000 },
{ 0x419e68, 1, 0x04, 0x00000002 },
{ 0x419e6c, 12, 0x04, 0x00000000 },
- { 0x419eac, 1, 0x04, 0x00001fcf },
- { 0x419eb0, 1, 0x04, 0x0db00da0 },
+ { 0x419eac, 1, 0x04, 0x00001f8f },
+ { 0x419eb0, 1, 0x04, 0x0db00d2f },
{ 0x419eb8, 1, 0x04, 0x00000000 },
{ 0x419ec8, 1, 0x04, 0x0001304f },
{ 0x419f30, 4, 0x04, 0x00000000 },
@@ -203,24 +779,36 @@ nvf0_grctx_init_tpc[] = {
{}
};
-static struct nvc0_graph_init
-nvf0_grctx_init_unk[] = {
- { 0x41be24, 1, 0x04, 0x00000006 },
+static const struct nvc0_graph_pack
+nvf0_grctx_pack_tpc[] = {
+ { nvd7_grctx_init_pe_0 },
+ { nvf0_grctx_init_tex_0 },
+ { nvf0_grctx_init_mpc_0 },
+ { nvf0_grctx_init_l1c_0 },
+ { nvf0_grctx_init_sm_0 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvf0_grctx_init_cbm_0[] = {
{ 0x41bec0, 1, 0x04, 0x10000000 },
{ 0x41bec4, 1, 0x04, 0x00037f7f },
{ 0x41bee4, 1, 0x04, 0x00000000 },
- { 0x41bf00, 1, 0x04, 0x0a418820 },
- { 0x41bf04, 1, 0x04, 0x062080e6 },
- { 0x41bf08, 1, 0x04, 0x020398a4 },
- { 0x41bf0c, 1, 0x04, 0x0e629062 },
- { 0x41bf10, 1, 0x04, 0x0a418820 },
- { 0x41bf14, 1, 0x04, 0x000000e6 },
- { 0x41bfd0, 1, 0x04, 0x00900103 },
- { 0x41bfe0, 1, 0x04, 0x00400001 },
- { 0x41bfe4, 1, 0x04, 0x00000000 },
{}
};
+static const struct nvc0_graph_pack
+nvf0_grctx_pack_ppc[] = {
+ { nve4_grctx_init_pes_0 },
+ { nvf0_grctx_init_cbm_0 },
+ { nvd7_grctx_init_wwdx_0 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
static void
nvf0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
@@ -273,39 +861,6 @@ nvf0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
mmio_list(0x17e920, 0x00090a05, 0, 0);
}
-static struct nvc0_graph_init *
-nvf0_grctx_init_hub[] = {
- nvc0_grctx_init_base,
- nvf0_grctx_init_unk40xx,
- nvf0_grctx_init_unk44xx,
- nve4_grctx_init_unk46xx,
- nve4_grctx_init_unk47xx,
- nve4_grctx_init_unk58xx,
- nvf0_grctx_init_unk5bxx,
- nvf0_grctx_init_unk60xx,
- nvf0_grctx_init_unk64xx,
- nve4_grctx_init_unk80xx,
- nvf0_grctx_init_unk88xx,
- NULL
-};
-
-struct nvc0_graph_init *
-nvf0_grctx_init_gpc[] = {
- nvf0_grctx_init_gpc_0,
- nvc0_grctx_init_gpc_1,
- nvf0_grctx_init_tpc,
- nvf0_grctx_init_unk,
- NULL
-};
-
-static struct nvc0_graph_mthd
-nvf0_grctx_init_mthd[] = {
- { 0xa197, nvc1_grctx_init_9097, },
- { 0x902d, nvc0_grctx_init_902d, },
- { 0x902d, nvc0_grctx_init_mthd_magic, },
- {}
-};
-
struct nouveau_oclass *
nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xf0),
@@ -317,11 +872,14 @@ nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
- .main = nve4_grctx_generate_main,
- .mods = nvf0_grctx_generate_mods,
- .unkn = nve4_grctx_generate_unkn,
- .hub = nvf0_grctx_init_hub,
- .gpc = nvf0_grctx_init_gpc,
- .icmd = nvc0_grctx_init_icmd,
- .mthd = nvf0_grctx_init_mthd,
+ .main = nve4_grctx_generate_main,
+ .mods = nvf0_grctx_generate_mods,
+ .unkn = nve4_grctx_generate_unkn,
+ .hub = nvf0_grctx_pack_hub,
+ .gpc = nvf0_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = nvf0_grctx_pack_tpc,
+ .ppc = nvf0_grctx_pack_ppc,
+ .icmd = nvf0_grctx_pack_icmd,
+ .mthd = nvf0_grctx_pack_mthd,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
index e148961b8075..e37d8106ae1a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
@@ -228,7 +228,7 @@ mmctx_xfer:
and $r11 0x1f
cmpu b32 $r11 0x10
bra ne #mmctx_fini_wait
- mov $r10 2 // DONE_MMCTX
+ mov $r10 5 // DONE_MMCTX
call(wait_donez)
bra #mmctx_done
mmctx_stop:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
index 96cbcea3b2c9..2f7345f7fe07 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
@@ -78,7 +78,12 @@ error:
//
init:
clear b32 $r0
- mov $sp $r0
+
+ // setup stack
+ nv_iord($r1, NV_PGRAPH_GPCX_GPCCS_CAPS, 0)
+ extr $r1 $r1 9:17
+ shl b32 $r1 8
+ mov $sp $r1
// enable fifo access
mov $r2 NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5
new file mode 100644
index 000000000000..e730603891d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#define NV_PGRAPH_GPCX_UNK__SIZE 0x00000002
+
+#define CHIPSET GK208
+#include "macros.fuc"
+
+.section #gm107_grgpc_data
+#define INCLUDE_DATA
+#include "com.fuc"
+#include "gpc.fuc"
+#undef INCLUDE_DATA
+
+.section #gm107_grgpc_code
+#define INCLUDE_CODE
+bra #init
+#include "com.fuc"
+#include "gpc.fuc"
+.align 256
+#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h
new file mode 100644
index 000000000000..6d53b67dd3c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h
@@ -0,0 +1,473 @@
+uint32_t gm107_grgpc_data[] = {
+/* 0x0000: gpc_mmio_list_head */
+ 0x0000006c,
+/* 0x0004: gpc_mmio_list_tail */
+/* 0x0004: tpc_mmio_list_head */
+ 0x0000006c,
+/* 0x0008: tpc_mmio_list_tail */
+/* 0x0008: unk_mmio_list_head */
+ 0x0000006c,
+/* 0x000c: unk_mmio_list_tail */
+ 0x0000006c,
+/* 0x0010: gpc_id */
+ 0x00000000,
+/* 0x0014: tpc_count */
+ 0x00000000,
+/* 0x0018: tpc_mask */
+ 0x00000000,
+/* 0x001c: unk_count */
+ 0x00000000,
+/* 0x0020: unk_mask */
+ 0x00000000,
+/* 0x0024: cmd_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t gm107_grgpc_code[] = {
+ 0x03140ef5,
+/* 0x0004: queue_put */
+ 0x9800d898,
+ 0x86f001d9,
+ 0xf489a408,
+ 0x020f0b1b,
+ 0x0002f87e,
+/* 0x001a: queue_put_next */
+ 0x98c400f8,
+ 0x0384b607,
+ 0xb6008dbb,
+ 0x8eb50880,
+ 0x018fb500,
+ 0xf00190b6,
+ 0xd9b50f94,
+/* 0x0037: queue_get */
+ 0xf400f801,
+ 0xd8980131,
+ 0x01d99800,
+ 0x0bf489a4,
+ 0x0789c421,
+ 0xbb0394b6,
+ 0x90b6009d,
+ 0x009e9808,
+ 0xb6019f98,
+ 0x84f00180,
+ 0x00d8b50f,
+/* 0x0063: queue_get_done */
+ 0xf80132f4,
+/* 0x0065: nv_rd32 */
+ 0xf0ecb200,
+ 0x00801fc9,
+ 0x0cf601ca,
+/* 0x0073: nv_rd32_wait */
+ 0x8c04bd00,
+ 0xcf01ca00,
+ 0xccc800cc,
+ 0xf61bf41f,
+ 0xec7e060a,
+ 0x008f0000,
+ 0xffcf01cb,
+/* 0x008f: nv_wr32 */
+ 0x8000f800,
+ 0xf601cc00,
+ 0x04bd000f,
+ 0xc9f0ecb2,
+ 0x1ec9f01f,
+ 0x01ca0080,
+ 0xbd000cf6,
+/* 0x00a9: nv_wr32_wait */
+ 0xca008c04,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f61b,
+/* 0x00b8: wait_donez */
+ 0x99f094bd,
+ 0x37008000,
+ 0x0009f602,
+ 0x008004bd,
+ 0x0af60206,
+/* 0x00cf: wait_donez_ne */
+ 0x8804bd00,
+ 0xcf010000,
+ 0x8aff0088,
+ 0xf61bf488,
+ 0x99f094bd,
+ 0x17008000,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x00ec: wait_doneo */
+ 0x99f094bd,
+ 0x37008000,
+ 0x0009f602,
+ 0x008004bd,
+ 0x0af60206,
+/* 0x0103: wait_doneo_e */
+ 0x8804bd00,
+ 0xcf010000,
+ 0x8aff0088,
+ 0xf60bf488,
+ 0x99f094bd,
+ 0x17008000,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x0120: mmctx_size */
+/* 0x0122: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0x1bf4efa4,
+ 0xf89fb2ec,
+/* 0x013d: mmctx_xfer */
+ 0xf094bd00,
+ 0x00800199,
+ 0x09f60237,
+ 0xbd04bd00,
+ 0x05bbfd94,
+ 0x800f0bf4,
+ 0xf601c400,
+ 0x04bd000b,
+/* 0x015f: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0xc6008018,
+ 0x000ef601,
+ 0x008004bd,
+ 0x0ff601c7,
+ 0xf004bd00,
+/* 0x017a: mmctx_multi_disabled */
+ 0xabc80199,
+ 0x10b4b600,
+ 0xc80cb9f0,
+ 0xe4b601ae,
+ 0x05befd11,
+ 0x01c50080,
+ 0xbd000bf6,
+/* 0x0195: mmctx_exec_loop */
+/* 0x0195: mmctx_wait_free */
+ 0xc5008e04,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f60b,
+ 0x05e9fd00,
+ 0x01c80080,
+ 0xbd000ef6,
+ 0x04c0b604,
+ 0x1bf4cda4,
+ 0x02abc8df,
+/* 0x01bf: mmctx_fini_wait */
+ 0x8b1c1bf4,
+ 0xcf01c500,
+ 0xb4f000bb,
+ 0x10b4b01f,
+ 0x0af31bf4,
+ 0x00b87e05,
+ 0x250ef400,
+/* 0x01d8: mmctx_stop */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x12b9f00c,
+ 0x01c50080,
+ 0xbd000bf6,
+/* 0x01ed: mmctx_stop_wait */
+ 0xc5008b04,
+ 0x00bbcf01,
+ 0xf412bbc8,
+/* 0x01fa: mmctx_done */
+ 0x94bdf61b,
+ 0x800199f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x020a: strand_wait */
+ 0xa0f900f8,
+ 0xb87e020a,
+ 0xa0fc0000,
+/* 0x0216: strand_pre */
+ 0x0c0900f8,
+ 0x024afc80,
+ 0xbd0009f6,
+ 0x020a7e04,
+/* 0x0227: strand_post */
+ 0x0900f800,
+ 0x4afc800d,
+ 0x0009f602,
+ 0x0a7e04bd,
+ 0x00f80002,
+/* 0x0238: strand_set */
+ 0xfc800f0c,
+ 0x0cf6024f,
+ 0x0c04bd00,
+ 0x4afc800b,
+ 0x000cf602,
+ 0xfc8004bd,
+ 0x0ef6024f,
+ 0x0c04bd00,
+ 0x4afc800a,
+ 0x000cf602,
+ 0x0a7e04bd,
+ 0x00f80002,
+/* 0x0268: strand_ctx_init */
+ 0x99f094bd,
+ 0x37008003,
+ 0x0009f602,
+ 0x167e04bd,
+ 0x030e0002,
+ 0x0002387e,
+ 0xfc80c4bd,
+ 0x0cf60247,
+ 0x0c04bd00,
+ 0x4afc8001,
+ 0x000cf602,
+ 0x0a7e04bd,
+ 0x0c920002,
+ 0x46fc8001,
+ 0x000cf602,
+ 0x020c04bd,
+ 0x024afc80,
+ 0xbd000cf6,
+ 0x020a7e04,
+ 0x02277e00,
+ 0x42008800,
+ 0x20008902,
+ 0x0099cf02,
+/* 0x02c7: ctx_init_strand_loop */
+ 0xf608fe95,
+ 0x8ef6008e,
+ 0x808acf40,
+ 0xb606a5b6,
+ 0xeabb01a0,
+ 0x0480b600,
+ 0xf40192b6,
+ 0xe4b6e81b,
+ 0xf2efbc08,
+ 0x99f094bd,
+ 0x17008003,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x02f8: error */
+ 0xffb2e0f9,
+ 0x4098148e,
+ 0x00008f7e,
+ 0xffb2010f,
+ 0x409c1c8e,
+ 0x00008f7e,
+ 0x00f8e0fc,
+/* 0x0314: init */
+ 0x004104bd,
+ 0x0011cf42,
+ 0x010911e7,
+ 0xfe0814b6,
+ 0x02020014,
+ 0xf6120040,
+ 0x04bd0002,
+ 0xfe047241,
+ 0x00400010,
+ 0x0000f607,
+ 0x040204bd,
+ 0xf6040040,
+ 0x04bd0002,
+ 0x821031f4,
+ 0xcf018200,
+ 0x01030022,
+ 0xbb1f24f0,
+ 0x32b60432,
+ 0x0502b501,
+ 0x820603b5,
+ 0xcf018600,
+ 0x02b50022,
+ 0x0c308e04,
+ 0xbd24bd50,
+/* 0x0377: init_unk_loop */
+ 0x7e44bd34,
+ 0xb0000065,
+ 0x0bf400f6,
+ 0xbb010f0e,
+ 0x4ffd04f2,
+ 0x0130b605,
+/* 0x038c: init_unk_next */
+ 0xb60120b6,
+ 0x26b004e0,
+ 0xe21bf402,
+/* 0x0398: init_unk_done */
+ 0xb50703b5,
+ 0x00820804,
+ 0x22cf0201,
+ 0x9534bd00,
+ 0x00800825,
+ 0x05f601c0,
+ 0x8004bd00,
+ 0xf601c100,
+ 0x04bd0005,
+ 0x98000e98,
+ 0x207e010f,
+ 0x2fbb0001,
+ 0x003fbb00,
+ 0x98010e98,
+ 0x207e020f,
+ 0x0e980001,
+ 0x00effd05,
+ 0xbb002ebb,
+ 0x0e98003e,
+ 0x030f9802,
+ 0x0001207e,
+ 0xfd070e98,
+ 0x2ebb00ef,
+ 0x003ebb00,
+ 0x800235b6,
+ 0xf601d300,
+ 0x04bd0003,
+ 0xb60825b6,
+ 0x20b60635,
+ 0x0130b601,
+ 0xb60824b6,
+ 0x2fb20834,
+ 0x0002687e,
+ 0x80003fbb,
+ 0xf6020100,
+ 0x04bd0003,
+ 0x29f024bd,
+ 0x3000801f,
+ 0x0002f602,
+/* 0x0436: main */
+ 0x31f404bd,
+ 0x0028f400,
+ 0x377e240d,
+ 0x01f40000,
+ 0x04e4b0f4,
+ 0xfe1d18f4,
+ 0x06020181,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x097e0018,
+ 0x0ef40005,
+/* 0x0465: main_not_ctx_xfer */
+ 0x10ef94d4,
+ 0x7e01f5f0,
+ 0xf40002f8,
+/* 0x0472: ih */
+ 0x80f9c70e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0x4a04bdf0,
+ 0xaacf0200,
+ 0x04abc400,
+ 0x0d1f0bf4,
+ 0x1a004e24,
+ 0x4f00eecf,
+ 0xffcf1900,
+ 0x00047e00,
+ 0x40010e00,
+ 0x0ef61d00,
+/* 0x04af: ih_no_fifo */
+ 0x4004bd00,
+ 0x0af60100,
+ 0xfc04bd00,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x32f480fc,
+/* 0x04cf: hub_barrier_done */
+ 0x0f01f800,
+ 0x040e9801,
+ 0xb204febb,
+ 0x94188eff,
+ 0x008f7e40,
+/* 0x04e3: ctx_redswitch */
+ 0x0f00f800,
+ 0x85008020,
+ 0x000ff601,
+ 0x080e04bd,
+/* 0x04f0: ctx_redswitch_delay */
+ 0xf401e2b6,
+ 0xf5f1fd1b,
+ 0xf5f10800,
+ 0x00800200,
+ 0x0ff60185,
+ 0xf804bd00,
+/* 0x0509: ctx_xfer */
+ 0x81008000,
+ 0x000ff602,
+ 0x11f404bd,
+ 0x04e37e07,
+/* 0x0519: ctx_xfer_not_load */
+ 0x02167e00,
+ 0x8024bd00,
+ 0xf60247fc,
+ 0x04bd0002,
+ 0xb6012cf0,
+ 0xfc800320,
+ 0x02f6024a,
+ 0xf004bd00,
+ 0xa5f001ac,
+ 0x00008b02,
+ 0x040c9850,
+ 0xbb0fc4b6,
+ 0x0c9800bc,
+ 0x010d9800,
+ 0x3d7e000e,
+ 0xacf00001,
+ 0x40008b01,
+ 0x040c9850,
+ 0xbb0fc4b6,
+ 0x0c9800bc,
+ 0x020d9801,
+ 0x4e060f98,
+ 0x3d7e0800,
+ 0xacf00001,
+ 0x04a5f001,
+ 0x5030008b,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x020c9800,
+ 0x98030d98,
+ 0x004e080f,
+ 0x013d7e02,
+ 0x020a7e00,
+ 0x0601f400,
+/* 0x05a3: ctx_xfer_post */
+ 0x7e0712f4,
+/* 0x05a7: ctx_xfer_done */
+ 0x7e000227,
+ 0xf80004cf,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
index 27dc1280dc10..31922707794f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
@@ -177,7 +177,7 @@ uint32_t nv108_grgpc_code[] = {
0xb4f000bb,
0x10b4b01f,
0x0af31bf4,
- 0x00b87e02,
+ 0x00b87e05,
0x250ef400,
/* 0x01d8: mmctx_stop */
0xb600abc8,
@@ -269,186 +269,186 @@ uint32_t nv108_grgpc_code[] = {
0x00008f7e,
0x00f8e0fc,
/* 0x0314: init */
- 0x04fe04bd,
- 0x40020200,
- 0x02f61200,
- 0x4104bd00,
- 0x10fe0465,
- 0x07004000,
- 0xbd0000f6,
- 0x40040204,
- 0x02f60400,
- 0xf404bd00,
- 0x00821031,
- 0x22cf0182,
- 0xf0010300,
- 0x32bb1f24,
- 0x0132b604,
- 0xb50502b5,
- 0x00820603,
- 0x22cf0186,
- 0x0402b500,
- 0x500c308e,
- 0x34bd24bd,
-/* 0x036a: init_unk_loop */
- 0x657e44bd,
- 0xf6b00000,
- 0x0e0bf400,
- 0xf2bb010f,
- 0x054ffd04,
-/* 0x037f: init_unk_next */
- 0xb60130b6,
- 0xe0b60120,
- 0x0126b004,
-/* 0x038b: init_unk_done */
- 0xb5e21bf4,
- 0x04b50703,
- 0x01008208,
- 0x0022cf02,
- 0x259534bd,
- 0xc0008008,
- 0x0005f601,
- 0x008004bd,
- 0x05f601c1,
- 0x9804bd00,
- 0x0f98000e,
- 0x01207e01,
- 0x002fbb00,
- 0x98003fbb,
- 0x0f98010e,
- 0x01207e02,
- 0x050e9800,
- 0xbb00effd,
- 0x3ebb002e,
- 0x020e9800,
- 0x7e030f98,
- 0x98000120,
- 0xeffd070e,
- 0x002ebb00,
- 0xb6003ebb,
- 0x00800235,
- 0x03f601d3,
- 0xb604bd00,
- 0x35b60825,
- 0x0120b606,
- 0xb60130b6,
- 0x34b60824,
- 0x7e2fb208,
- 0xbb000268,
- 0x0080003f,
- 0x03f60201,
- 0xbd04bd00,
- 0x1f29f024,
- 0x02300080,
- 0xbd0002f6,
-/* 0x0429: main */
- 0x0031f404,
- 0x0d0028f4,
- 0x00377e24,
- 0xf401f400,
- 0xf404e4b0,
- 0x81fe1d18,
- 0xbd060201,
- 0x0412fd20,
- 0xfd01e4b6,
- 0x18fe051e,
- 0x04fc7e00,
- 0xd40ef400,
-/* 0x0458: main_not_ctx_xfer */
- 0xf010ef94,
- 0xf87e01f5,
- 0x0ef40002,
-/* 0x0465: ih */
- 0xfe80f9c7,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0x004a04bd,
- 0x00aacf02,
- 0xf404abc4,
- 0x240d1f0b,
- 0xcf1a004e,
- 0x004f00ee,
- 0x00ffcf19,
- 0x0000047e,
- 0x0040010e,
- 0x000ef61d,
-/* 0x04a2: ih_no_fifo */
- 0x004004bd,
- 0x000af601,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x04c2: hub_barrier_done */
- 0x010f01f8,
- 0xbb040e98,
- 0xffb204fe,
- 0x4094188e,
- 0x00008f7e,
-/* 0x04d6: ctx_redswitch */
- 0x200f00f8,
- 0x01850080,
- 0xbd000ff6,
-/* 0x04e3: ctx_redswitch_delay */
- 0xb6080e04,
- 0x1bf401e2,
- 0x00f5f1fd,
- 0x00f5f108,
- 0x85008002,
+ 0x004104bd,
+ 0x0011cf42,
+ 0x010911e7,
+ 0xfe0814b6,
+ 0x02020014,
+ 0xf6120040,
+ 0x04bd0002,
+ 0xfe047241,
+ 0x00400010,
+ 0x0000f607,
+ 0x040204bd,
+ 0xf6040040,
+ 0x04bd0002,
+ 0x821031f4,
+ 0xcf018200,
+ 0x01030022,
+ 0xbb1f24f0,
+ 0x32b60432,
+ 0x0502b501,
+ 0x820603b5,
+ 0xcf018600,
+ 0x02b50022,
+ 0x0c308e04,
+ 0xbd24bd50,
+/* 0x0377: init_unk_loop */
+ 0x7e44bd34,
+ 0xb0000065,
+ 0x0bf400f6,
+ 0xbb010f0e,
+ 0x4ffd04f2,
+ 0x0130b605,
+/* 0x038c: init_unk_next */
+ 0xb60120b6,
+ 0x26b004e0,
+ 0xe21bf401,
+/* 0x0398: init_unk_done */
+ 0xb50703b5,
+ 0x00820804,
+ 0x22cf0201,
+ 0x9534bd00,
+ 0x00800825,
+ 0x05f601c0,
+ 0x8004bd00,
+ 0xf601c100,
+ 0x04bd0005,
+ 0x98000e98,
+ 0x207e010f,
+ 0x2fbb0001,
+ 0x003fbb00,
+ 0x98010e98,
+ 0x207e020f,
+ 0x0e980001,
+ 0x00effd05,
+ 0xbb002ebb,
+ 0x0e98003e,
+ 0x030f9802,
+ 0x0001207e,
+ 0xfd070e98,
+ 0x2ebb00ef,
+ 0x003ebb00,
+ 0x800235b6,
+ 0xf601d300,
+ 0x04bd0003,
+ 0xb60825b6,
+ 0x20b60635,
+ 0x0130b601,
+ 0xb60824b6,
+ 0x2fb20834,
+ 0x0002687e,
+ 0x80003fbb,
+ 0xf6020100,
+ 0x04bd0003,
+ 0x29f024bd,
+ 0x3000801f,
+ 0x0002f602,
+/* 0x0436: main */
+ 0x31f404bd,
+ 0x0028f400,
+ 0x377e240d,
+ 0x01f40000,
+ 0x04e4b0f4,
+ 0xfe1d18f4,
+ 0x06020181,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x097e0018,
+ 0x0ef40005,
+/* 0x0465: main_not_ctx_xfer */
+ 0x10ef94d4,
+ 0x7e01f5f0,
+ 0xf40002f8,
+/* 0x0472: ih */
+ 0x80f9c70e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0x4a04bdf0,
+ 0xaacf0200,
+ 0x04abc400,
+ 0x0d1f0bf4,
+ 0x1a004e24,
+ 0x4f00eecf,
+ 0xffcf1900,
+ 0x00047e00,
+ 0x40010e00,
+ 0x0ef61d00,
+/* 0x04af: ih_no_fifo */
+ 0x4004bd00,
+ 0x0af60100,
+ 0xfc04bd00,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x32f480fc,
+/* 0x04cf: hub_barrier_done */
+ 0x0f01f800,
+ 0x040e9801,
+ 0xb204febb,
+ 0x94188eff,
+ 0x008f7e40,
+/* 0x04e3: ctx_redswitch */
+ 0x0f00f800,
+ 0x85008020,
0x000ff601,
- 0x00f804bd,
-/* 0x04fc: ctx_xfer */
- 0x02810080,
- 0xbd000ff6,
- 0x0711f404,
- 0x0004d67e,
-/* 0x050c: ctx_xfer_not_load */
- 0x0002167e,
- 0xfc8024bd,
- 0x02f60247,
+ 0x080e04bd,
+/* 0x04f0: ctx_redswitch_delay */
+ 0xf401e2b6,
+ 0xf5f1fd1b,
+ 0xf5f10800,
+ 0x00800200,
+ 0x0ff60185,
+ 0xf804bd00,
+/* 0x0509: ctx_xfer */
+ 0x81008000,
+ 0x000ff602,
+ 0x11f404bd,
+ 0x04e37e07,
+/* 0x0519: ctx_xfer_not_load */
+ 0x02167e00,
+ 0x8024bd00,
+ 0xf60247fc,
+ 0x04bd0002,
+ 0xb6012cf0,
+ 0xfc800320,
+ 0x02f6024a,
0xf004bd00,
- 0x20b6012c,
- 0x4afc8003,
- 0x0002f602,
- 0xacf004bd,
- 0x02a5f001,
- 0x5000008b,
+ 0xa5f001ac,
+ 0x00008b02,
+ 0x040c9850,
+ 0xbb0fc4b6,
+ 0x0c9800bc,
+ 0x010d9800,
+ 0x3d7e000e,
+ 0xacf00001,
+ 0x40008b01,
+ 0x040c9850,
+ 0xbb0fc4b6,
+ 0x0c9800bc,
+ 0x020d9801,
+ 0x4e060f98,
+ 0x3d7e0800,
+ 0xacf00001,
+ 0x04a5f001,
+ 0x5030008b,
0xb6040c98,
0xbcbb0fc4,
- 0x000c9800,
- 0x0e010d98,
- 0x013d7e00,
- 0x01acf000,
- 0x5040008b,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0x004e060f,
- 0x013d7e08,
- 0x01acf000,
- 0x8b04a5f0,
- 0x98503000,
- 0xc4b6040c,
- 0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x02004e08,
- 0x00013d7e,
- 0x00020a7e,
- 0xf40601f4,
-/* 0x0596: ctx_xfer_post */
- 0x277e0712,
-/* 0x059a: ctx_xfer_done */
- 0xc27e0002,
- 0x00f80004,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x020c9800,
+ 0x98030d98,
+ 0x004e080f,
+ 0x013d7e02,
+ 0x020a7e00,
+ 0x0601f400,
+/* 0x05a3: ctx_xfer_post */
+ 0x7e0712f4,
+/* 0x05a7: ctx_xfer_done */
+ 0x7e000227,
+ 0xf80004cf,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
index 0e7b01efae8d..325cc7b7b2fb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
@@ -192,7 +192,7 @@ uint32_t nvc0_grgpc_code[] = {
0x1fb4f000,
0xf410b4b0,
0xa7f0f01b,
- 0xd021f402,
+ 0xd021f405,
/* 0x0223: mmctx_stop */
0xc82b0ef4,
0xb4b600ab,
@@ -300,182 +300,182 @@ uint32_t nvc0_grgpc_code[] = {
0x21f440e3,
0xf8e0fc9d,
/* 0x03a1: init */
- 0xfe04bd00,
- 0x27f00004,
- 0x0007f102,
- 0x0003f012,
- 0xbd0002d0,
- 0xd517f104,
- 0x0010fe04,
- 0x070007f1,
+ 0xf104bd00,
+ 0xf0420017,
+ 0x11cf0013,
+ 0x0911e700,
+ 0x0814b601,
+ 0xf00014fe,
+ 0x07f10227,
+ 0x03f01200,
+ 0x0002d000,
+ 0x17f104bd,
+ 0x10fe04e6,
+ 0x0007f100,
+ 0x0003f007,
+ 0xbd0000d0,
+ 0x0427f004,
+ 0x040007f1,
0xd00003f0,
- 0x04bd0000,
- 0xf10427f0,
- 0xf0040007,
- 0x02d00003,
- 0xf404bd00,
- 0x27f11031,
- 0x23f08200,
- 0x0022cf01,
- 0xf00137f0,
- 0x32bb1f24,
- 0x0132b604,
- 0x80050280,
- 0x27f10603,
- 0x23f08600,
- 0x0022cf01,
- 0xf1040280,
- 0xf0010027,
- 0x22cf0223,
- 0x9534bd00,
- 0x07f10825,
- 0x03f0c000,
- 0x0005d001,
- 0x07f104bd,
- 0x03f0c100,
- 0x0005d001,
- 0x0e9804bd,
- 0x010f9800,
- 0x015021f5,
- 0xbb002fbb,
- 0x0e98003f,
- 0x020f9801,
- 0x015021f5,
- 0xfd050e98,
- 0x2ebb00ef,
- 0x003ebb00,
- 0xf10235b6,
- 0xf0d30007,
- 0x03d00103,
- 0xb604bd00,
- 0x35b60825,
- 0x0120b606,
- 0xb60130b6,
- 0x34b60824,
- 0x022fb908,
- 0x02d321f5,
- 0xf1003fbb,
- 0xf0010007,
- 0x03d00203,
- 0xbd04bd00,
- 0x1f29f024,
- 0x080007f1,
- 0xd00203f0,
0x04bd0002,
-/* 0x0498: main */
- 0xf40031f4,
- 0xd7f00028,
- 0x3921f41c,
- 0xb0f401f4,
- 0x18f404e4,
- 0x0181fe1e,
- 0xbd0627f0,
- 0x0412fd20,
- 0xfd01e4b6,
- 0x18fe051e,
- 0x8d21f500,
- 0xd30ef405,
-/* 0x04c8: main_not_ctx_xfer */
- 0xf010ef94,
- 0x21f501f5,
- 0x0ef4037e,
-/* 0x04d5: ih */
- 0xfe80f9c6,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0xa7f104bd,
- 0xa3f00200,
- 0x00aacf00,
- 0xf404abc4,
- 0xd7f02c0b,
- 0x00e7f11c,
- 0x00e3f01a,
- 0xf100eecf,
- 0xf01900f7,
- 0xffcf00f3,
- 0x0421f400,
- 0xf101e7f0,
- 0xf01d0007,
- 0x0ed00003,
-/* 0x0523: ih_no_fifo */
+ 0xf11031f4,
+ 0xf0820027,
+ 0x22cf0123,
+ 0x0137f000,
+ 0xbb1f24f0,
+ 0x32b60432,
+ 0x05028001,
+ 0xf1060380,
+ 0xf0860027,
+ 0x22cf0123,
+ 0x04028000,
+ 0x010027f1,
+ 0xcf0223f0,
+ 0x34bd0022,
+ 0xf1082595,
+ 0xf0c00007,
+ 0x05d00103,
0xf104bd00,
- 0xf0010007,
- 0x0ad00003,
- 0xfc04bd00,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x0547: hub_barrier_done */
- 0xf001f800,
- 0x0e9801f7,
- 0x04febb04,
- 0xf102ffb9,
- 0xf09418e7,
- 0x21f440e3,
-/* 0x055f: ctx_redswitch */
- 0xf000f89d,
- 0x07f120f7,
- 0x03f08500,
- 0x000fd001,
- 0xe7f004bd,
-/* 0x0571: ctx_redswitch_delay */
- 0x01e2b608,
- 0xf1fd1bf4,
- 0xf10800f5,
- 0xf10200f5,
+ 0xf0c10007,
+ 0x05d00103,
+ 0x9804bd00,
+ 0x0f98000e,
+ 0x5021f501,
+ 0x002fbb01,
+ 0x98003fbb,
+ 0x0f98010e,
+ 0x5021f502,
+ 0x050e9801,
+ 0xbb00effd,
+ 0x3ebb002e,
+ 0x0235b600,
+ 0xd30007f1,
+ 0xd00103f0,
+ 0x04bd0003,
+ 0xb60825b6,
+ 0x20b60635,
+ 0x0130b601,
+ 0xb60824b6,
+ 0x2fb90834,
+ 0xd321f502,
+ 0x003fbb02,
+ 0x010007f1,
+ 0xd00203f0,
+ 0x04bd0003,
+ 0x29f024bd,
+ 0x0007f11f,
+ 0x0203f008,
+ 0xbd0002d0,
+/* 0x04a9: main */
+ 0x0031f404,
+ 0xf00028f4,
+ 0x21f41cd7,
+ 0xf401f439,
+ 0xf404e4b0,
+ 0x81fe1e18,
+ 0x0627f001,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x21f50018,
+ 0x0ef4059e,
+/* 0x04d9: main_not_ctx_xfer */
+ 0x10ef94d3,
+ 0xf501f5f0,
+ 0xf4037e21,
+/* 0x04e6: ih */
+ 0x80f9c60e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0xf104bdf0,
+ 0xf00200a7,
+ 0xaacf00a3,
+ 0x04abc400,
+ 0xf02c0bf4,
+ 0xe7f11cd7,
+ 0xe3f01a00,
+ 0x00eecf00,
+ 0x1900f7f1,
+ 0xcf00f3f0,
+ 0x21f400ff,
+ 0x01e7f004,
+ 0x1d0007f1,
+ 0xd00003f0,
+ 0x04bd000e,
+/* 0x0534: ih_no_fifo */
+ 0x010007f1,
+ 0xd00003f0,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xf480fc00,
+ 0x01f80032,
+/* 0x0558: hub_barrier_done */
+ 0x9801f7f0,
+ 0xfebb040e,
+ 0x02ffb904,
+ 0x9418e7f1,
+ 0xf440e3f0,
+ 0x00f89d21,
+/* 0x0570: ctx_redswitch */
+ 0xf120f7f0,
0xf0850007,
0x0fd00103,
- 0xf804bd00,
-/* 0x058d: ctx_xfer */
- 0x0007f100,
- 0x0203f081,
- 0xbd000fd0,
- 0x0711f404,
- 0x055f21f5,
-/* 0x05a0: ctx_xfer_not_load */
- 0x026a21f5,
- 0x07f124bd,
- 0x03f047fc,
- 0x0002d002,
- 0x2cf004bd,
- 0x0320b601,
- 0x4afc07f1,
- 0xd00203f0,
- 0x04bd0002,
+ 0xf004bd00,
+/* 0x0582: ctx_redswitch_delay */
+ 0xe2b608e7,
+ 0xfd1bf401,
+ 0x0800f5f1,
+ 0x0200f5f1,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x059e: ctx_xfer */
+ 0x07f100f8,
+ 0x03f08100,
+ 0x000fd002,
+ 0x11f404bd,
+ 0x7021f507,
+/* 0x05b1: ctx_xfer_not_load */
+ 0x6a21f505,
+ 0xf124bd02,
+ 0xf047fc07,
+ 0x02d00203,
+ 0xf004bd00,
+ 0x20b6012c,
+ 0xfc07f103,
+ 0x0203f04a,
+ 0xbd0002d0,
+ 0x01acf004,
+ 0xf102a5f0,
+ 0xf00000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98000c,
+ 0x00e7f001,
+ 0x016f21f5,
0xf001acf0,
- 0xb7f102a5,
- 0xb3f00000,
+ 0xb7f104a5,
+ 0xb3f04000,
0x040c9850,
0xbb0fc4b6,
0x0c9800bc,
- 0x010d9800,
- 0xf500e7f0,
- 0xf0016f21,
- 0xa5f001ac,
- 0x00b7f104,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0x21f5016f,
- 0x01f4025e,
- 0x0712f406,
-/* 0x0618: ctx_xfer_post */
- 0x027f21f5,
-/* 0x061c: ctx_xfer_done */
- 0x054721f5,
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x020d9801,
+ 0xf1060f98,
+ 0xf50800e7,
+ 0xf5016f21,
+ 0xf4025e21,
+ 0x12f40601,
+/* 0x0629: ctx_xfer_post */
+ 0x7f21f507,
+/* 0x062d: ctx_xfer_done */
+ 0x5821f502,
+ 0x0000f805,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
index 84dd32db28a0..d1504a4059c6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
@@ -196,7 +196,7 @@ uint32_t nvd7_grgpc_code[] = {
0x1fb4f000,
0xf410b4b0,
0xa7f0f01b,
- 0xd021f402,
+ 0xd021f405,
/* 0x0223: mmctx_stop */
0xc82b0ef4,
0xb4b600ab,
@@ -304,212 +304,212 @@ uint32_t nvd7_grgpc_code[] = {
0x21f440e3,
0xf8e0fc9d,
/* 0x03a1: init */
- 0xfe04bd00,
- 0x27f00004,
- 0x0007f102,
- 0x0003f012,
- 0xbd0002d0,
- 0x1f17f104,
- 0x0010fe05,
- 0x070007f1,
+ 0xf104bd00,
+ 0xf0420017,
+ 0x11cf0013,
+ 0x0911e700,
+ 0x0814b601,
+ 0xf00014fe,
+ 0x07f10227,
+ 0x03f01200,
+ 0x0002d000,
+ 0x17f104bd,
+ 0x10fe0530,
+ 0x0007f100,
+ 0x0003f007,
+ 0xbd0000d0,
+ 0x0427f004,
+ 0x040007f1,
0xd00003f0,
- 0x04bd0000,
- 0xf10427f0,
- 0xf0040007,
- 0x02d00003,
+ 0x04bd0002,
+ 0xf11031f4,
+ 0xf0820027,
+ 0x22cf0123,
+ 0x0137f000,
+ 0xbb1f24f0,
+ 0x32b60432,
+ 0x05028001,
+ 0xf1060380,
+ 0xf0860027,
+ 0x22cf0123,
+ 0x04028000,
+ 0x0c30e7f1,
+ 0xbd50e3f0,
+ 0xbd34bd24,
+/* 0x0421: init_unk_loop */
+ 0x6821f444,
+ 0xf400f6b0,
+ 0xf7f00f0b,
+ 0x04f2bb01,
+ 0xb6054ffd,
+/* 0x0436: init_unk_next */
+ 0x20b60130,
+ 0x04e0b601,
+ 0xf40126b0,
+/* 0x0442: init_unk_done */
+ 0x0380e21b,
+ 0x08048007,
+ 0x010027f1,
+ 0xcf0223f0,
+ 0x34bd0022,
+ 0xf1082595,
+ 0xf0c00007,
+ 0x05d00103,
+ 0xf104bd00,
+ 0xf0c10007,
+ 0x05d00103,
+ 0x9804bd00,
+ 0x0f98000e,
+ 0x5021f501,
+ 0x002fbb01,
+ 0x98003fbb,
+ 0x0f98010e,
+ 0x5021f502,
+ 0x050e9801,
+ 0xbb00effd,
+ 0x3ebb002e,
+ 0x020e9800,
+ 0xf5030f98,
+ 0x98015021,
+ 0xeffd070e,
+ 0x002ebb00,
+ 0xb6003ebb,
+ 0x07f10235,
+ 0x03f0d300,
+ 0x0003d001,
+ 0x25b604bd,
+ 0x0635b608,
+ 0xb60120b6,
+ 0x24b60130,
+ 0x0834b608,
+ 0xf5022fb9,
+ 0xbb02d321,
+ 0x07f1003f,
+ 0x03f00100,
+ 0x0003d002,
+ 0x24bd04bd,
+ 0xf11f29f0,
+ 0xf0080007,
+ 0x02d00203,
+/* 0x04f3: main */
0xf404bd00,
- 0x27f11031,
- 0x23f08200,
- 0x0022cf01,
- 0xf00137f0,
- 0x32bb1f24,
- 0x0132b604,
- 0x80050280,
- 0x27f10603,
- 0x23f08600,
- 0x0022cf01,
- 0xf1040280,
- 0xf00c30e7,
- 0x24bd50e3,
- 0x44bd34bd,
-/* 0x0410: init_unk_loop */
- 0xb06821f4,
- 0x0bf400f6,
- 0x01f7f00f,
- 0xfd04f2bb,
- 0x30b6054f,
-/* 0x0425: init_unk_next */
- 0x0120b601,
- 0xb004e0b6,
- 0x1bf40126,
-/* 0x0431: init_unk_done */
- 0x070380e2,
- 0xf1080480,
- 0xf0010027,
- 0x22cf0223,
- 0x9534bd00,
- 0x07f10825,
- 0x03f0c000,
- 0x0005d001,
+ 0x28f40031,
+ 0x24d7f000,
+ 0xf43921f4,
+ 0xe4b0f401,
+ 0x1e18f404,
+ 0xf00181fe,
+ 0x20bd0627,
+ 0xb60412fd,
+ 0x1efd01e4,
+ 0x0018fe05,
+ 0x05e821f5,
+/* 0x0523: main_not_ctx_xfer */
+ 0x94d30ef4,
+ 0xf5f010ef,
+ 0x7e21f501,
+ 0xc60ef403,
+/* 0x0530: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x24d7f02c,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
+ 0xf00421f4,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x057e: ih_no_fifo */
0x07f104bd,
- 0x03f0c100,
- 0x0005d001,
- 0x0e9804bd,
- 0x010f9800,
- 0x015021f5,
- 0xbb002fbb,
- 0x0e98003f,
- 0x020f9801,
- 0x015021f5,
- 0xfd050e98,
- 0x2ebb00ef,
- 0x003ebb00,
- 0x98020e98,
- 0x21f5030f,
- 0x0e980150,
- 0x00effd07,
- 0xbb002ebb,
- 0x35b6003e,
- 0x0007f102,
- 0x0103f0d3,
- 0xbd0003d0,
- 0x0825b604,
- 0xb60635b6,
- 0x30b60120,
- 0x0824b601,
- 0xb90834b6,
- 0x21f5022f,
- 0x3fbb02d3,
- 0x0007f100,
- 0x0203f001,
- 0xbd0003d0,
- 0xf024bd04,
- 0x07f11f29,
- 0x03f00800,
- 0x0002d002,
-/* 0x04e2: main */
- 0x31f404bd,
- 0x0028f400,
- 0xf424d7f0,
- 0x01f43921,
- 0x04e4b0f4,
- 0xfe1e18f4,
- 0x27f00181,
- 0xfd20bd06,
- 0xe4b60412,
- 0x051efd01,
- 0xf50018fe,
- 0xf405d721,
-/* 0x0512: main_not_ctx_xfer */
- 0xef94d30e,
- 0x01f5f010,
- 0x037e21f5,
-/* 0x051f: ih */
- 0xf9c60ef4,
- 0x0188fe80,
- 0x90f980f9,
- 0xb0f9a0f9,
- 0xe0f9d0f9,
- 0x04bdf0f9,
- 0x0200a7f1,
- 0xcf00a3f0,
- 0xabc400aa,
- 0x2c0bf404,
- 0xf124d7f0,
- 0xf01a00e7,
- 0xeecf00e3,
- 0x00f7f100,
- 0x00f3f019,
- 0xf400ffcf,
- 0xe7f00421,
- 0x0007f101,
- 0x0003f01d,
- 0xbd000ed0,
-/* 0x056d: ih_no_fifo */
- 0x0007f104,
- 0x0003f001,
- 0xbd000ad0,
- 0xfcf0fc04,
- 0xfcd0fce0,
- 0xfca0fcb0,
- 0xfe80fc90,
- 0x80fc0088,
- 0xf80032f4,
-/* 0x0591: hub_barrier_done */
- 0x01f7f001,
- 0xbb040e98,
- 0xffb904fe,
- 0x18e7f102,
- 0x40e3f094,
- 0xf89d21f4,
-/* 0x05a9: ctx_redswitch */
- 0x20f7f000,
- 0x850007f1,
- 0xd00103f0,
- 0x04bd000f,
-/* 0x05bb: ctx_redswitch_delay */
- 0xb608e7f0,
- 0x1bf401e2,
- 0x00f5f1fd,
- 0x00f5f108,
- 0x0007f102,
+ 0x03f00100,
+ 0x000ad000,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0x0032f480,
+/* 0x05a2: hub_barrier_done */
+ 0xf7f001f8,
+ 0x040e9801,
+ 0xb904febb,
+ 0xe7f102ff,
+ 0xe3f09418,
+ 0x9d21f440,
+/* 0x05ba: ctx_redswitch */
+ 0xf7f000f8,
+ 0x0007f120,
0x0103f085,
0xbd000fd0,
-/* 0x05d7: ctx_xfer */
- 0xf100f804,
- 0xf0810007,
- 0x0fd00203,
- 0xf404bd00,
- 0x21f50711,
-/* 0x05ea: ctx_xfer_not_load */
- 0x21f505a9,
- 0x24bd026a,
- 0x47fc07f1,
+ 0x08e7f004,
+/* 0x05cc: ctx_redswitch_delay */
+ 0xf401e2b6,
+ 0xf5f1fd1b,
+ 0xf5f10800,
+ 0x07f10200,
+ 0x03f08500,
+ 0x000fd001,
+ 0x00f804bd,
+/* 0x05e8: ctx_xfer */
+ 0x810007f1,
0xd00203f0,
- 0x04bd0002,
- 0xb6012cf0,
- 0x07f10320,
- 0x03f04afc,
- 0x0002d002,
- 0xacf004bd,
- 0x02a5f001,
- 0x0000b7f1,
- 0x9850b3f0,
- 0xc4b6040c,
- 0x00bcbb0f,
- 0x98000c98,
- 0xe7f0010d,
- 0x6f21f500,
- 0x01acf001,
- 0x4000b7f1,
+ 0x04bd000f,
+ 0xf50711f4,
+/* 0x05fb: ctx_xfer_not_load */
+ 0xf505ba21,
+ 0xbd026a21,
+ 0xfc07f124,
+ 0x0203f047,
+ 0xbd0002d0,
+ 0x012cf004,
+ 0xf10320b6,
+ 0xf04afc07,
+ 0x02d00203,
+ 0xf004bd00,
+ 0xa5f001ac,
+ 0x00b7f102,
+ 0x50b3f000,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x000c9800,
+ 0xf0010d98,
+ 0x21f500e7,
+ 0xacf0016f,
+ 0x00b7f101,
+ 0x50b3f040,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0x98020d98,
+ 0xe7f1060f,
+ 0x21f50800,
+ 0xacf0016f,
+ 0x04a5f001,
+ 0x3000b7f1,
0x9850b3f0,
0xc4b6040c,
0x00bcbb0f,
- 0x98010c98,
- 0x0f98020d,
- 0x00e7f106,
- 0x6f21f508,
- 0x01acf001,
- 0xf104a5f0,
- 0xf03000b7,
- 0x0c9850b3,
- 0x0fc4b604,
- 0x9800bcbb,
- 0x0d98020c,
- 0x080f9803,
- 0x0200e7f1,
- 0x016f21f5,
- 0x025e21f5,
- 0xf40601f4,
-/* 0x0686: ctx_xfer_post */
- 0x21f50712,
-/* 0x068a: ctx_xfer_done */
- 0x21f5027f,
- 0x00f80591,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x98020c98,
+ 0x0f98030d,
+ 0x00e7f108,
+ 0x6f21f502,
+ 0x5e21f501,
+ 0x0601f402,
+/* 0x0697: ctx_xfer_post */
+ 0xf50712f4,
+/* 0x069b: ctx_xfer_done */
+ 0xf5027f21,
+ 0xf805a221,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
index b6da800ee9c2..855b220378f9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -196,7 +196,7 @@ uint32_t nve0_grgpc_code[] = {
0x1fb4f000,
0xf410b4b0,
0xa7f0f01b,
- 0xd021f402,
+ 0xd021f405,
/* 0x0223: mmctx_stop */
0xc82b0ef4,
0xb4b600ab,
@@ -304,212 +304,212 @@ uint32_t nve0_grgpc_code[] = {
0x21f440e3,
0xf8e0fc9d,
/* 0x03a1: init */
- 0xfe04bd00,
- 0x27f00004,
- 0x0007f102,
- 0x0003f012,
- 0xbd0002d0,
- 0x1f17f104,
- 0x0010fe05,
- 0x070007f1,
+ 0xf104bd00,
+ 0xf0420017,
+ 0x11cf0013,
+ 0x0911e700,
+ 0x0814b601,
+ 0xf00014fe,
+ 0x07f10227,
+ 0x03f01200,
+ 0x0002d000,
+ 0x17f104bd,
+ 0x10fe0530,
+ 0x0007f100,
+ 0x0003f007,
+ 0xbd0000d0,
+ 0x0427f004,
+ 0x040007f1,
0xd00003f0,
- 0x04bd0000,
- 0xf10427f0,
- 0xf0040007,
- 0x02d00003,
+ 0x04bd0002,
+ 0xf11031f4,
+ 0xf0820027,
+ 0x22cf0123,
+ 0x0137f000,
+ 0xbb1f24f0,
+ 0x32b60432,
+ 0x05028001,
+ 0xf1060380,
+ 0xf0860027,
+ 0x22cf0123,
+ 0x04028000,
+ 0x0c30e7f1,
+ 0xbd50e3f0,
+ 0xbd34bd24,
+/* 0x0421: init_unk_loop */
+ 0x6821f444,
+ 0xf400f6b0,
+ 0xf7f00f0b,
+ 0x04f2bb01,
+ 0xb6054ffd,
+/* 0x0436: init_unk_next */
+ 0x20b60130,
+ 0x04e0b601,
+ 0xf40126b0,
+/* 0x0442: init_unk_done */
+ 0x0380e21b,
+ 0x08048007,
+ 0x010027f1,
+ 0xcf0223f0,
+ 0x34bd0022,
+ 0xf1082595,
+ 0xf0c00007,
+ 0x05d00103,
+ 0xf104bd00,
+ 0xf0c10007,
+ 0x05d00103,
+ 0x9804bd00,
+ 0x0f98000e,
+ 0x5021f501,
+ 0x002fbb01,
+ 0x98003fbb,
+ 0x0f98010e,
+ 0x5021f502,
+ 0x050e9801,
+ 0xbb00effd,
+ 0x3ebb002e,
+ 0x020e9800,
+ 0xf5030f98,
+ 0x98015021,
+ 0xeffd070e,
+ 0x002ebb00,
+ 0xb6003ebb,
+ 0x07f10235,
+ 0x03f0d300,
+ 0x0003d001,
+ 0x25b604bd,
+ 0x0635b608,
+ 0xb60120b6,
+ 0x24b60130,
+ 0x0834b608,
+ 0xf5022fb9,
+ 0xbb02d321,
+ 0x07f1003f,
+ 0x03f00100,
+ 0x0003d002,
+ 0x24bd04bd,
+ 0xf11f29f0,
+ 0xf0080007,
+ 0x02d00203,
+/* 0x04f3: main */
0xf404bd00,
- 0x27f11031,
- 0x23f08200,
- 0x0022cf01,
- 0xf00137f0,
- 0x32bb1f24,
- 0x0132b604,
- 0x80050280,
- 0x27f10603,
- 0x23f08600,
- 0x0022cf01,
- 0xf1040280,
- 0xf00c30e7,
- 0x24bd50e3,
- 0x44bd34bd,
-/* 0x0410: init_unk_loop */
- 0xb06821f4,
- 0x0bf400f6,
- 0x01f7f00f,
- 0xfd04f2bb,
- 0x30b6054f,
-/* 0x0425: init_unk_next */
- 0x0120b601,
- 0xb004e0b6,
- 0x1bf40126,
-/* 0x0431: init_unk_done */
- 0x070380e2,
- 0xf1080480,
- 0xf0010027,
- 0x22cf0223,
- 0x9534bd00,
- 0x07f10825,
- 0x03f0c000,
- 0x0005d001,
+ 0x28f40031,
+ 0x24d7f000,
+ 0xf43921f4,
+ 0xe4b0f401,
+ 0x1e18f404,
+ 0xf00181fe,
+ 0x20bd0627,
+ 0xb60412fd,
+ 0x1efd01e4,
+ 0x0018fe05,
+ 0x05e821f5,
+/* 0x0523: main_not_ctx_xfer */
+ 0x94d30ef4,
+ 0xf5f010ef,
+ 0x7e21f501,
+ 0xc60ef403,
+/* 0x0530: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x24d7f02c,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
+ 0xf00421f4,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x057e: ih_no_fifo */
0x07f104bd,
- 0x03f0c100,
- 0x0005d001,
- 0x0e9804bd,
- 0x010f9800,
- 0x015021f5,
- 0xbb002fbb,
- 0x0e98003f,
- 0x020f9801,
- 0x015021f5,
- 0xfd050e98,
- 0x2ebb00ef,
- 0x003ebb00,
- 0x98020e98,
- 0x21f5030f,
- 0x0e980150,
- 0x00effd07,
- 0xbb002ebb,
- 0x35b6003e,
- 0x0007f102,
- 0x0103f0d3,
- 0xbd0003d0,
- 0x0825b604,
- 0xb60635b6,
- 0x30b60120,
- 0x0824b601,
- 0xb90834b6,
- 0x21f5022f,
- 0x3fbb02d3,
- 0x0007f100,
- 0x0203f001,
- 0xbd0003d0,
- 0xf024bd04,
- 0x07f11f29,
- 0x03f00800,
- 0x0002d002,
-/* 0x04e2: main */
- 0x31f404bd,
- 0x0028f400,
- 0xf424d7f0,
- 0x01f43921,
- 0x04e4b0f4,
- 0xfe1e18f4,
- 0x27f00181,
- 0xfd20bd06,
- 0xe4b60412,
- 0x051efd01,
- 0xf50018fe,
- 0xf405d721,
-/* 0x0512: main_not_ctx_xfer */
- 0xef94d30e,
- 0x01f5f010,
- 0x037e21f5,
-/* 0x051f: ih */
- 0xf9c60ef4,
- 0x0188fe80,
- 0x90f980f9,
- 0xb0f9a0f9,
- 0xe0f9d0f9,
- 0x04bdf0f9,
- 0x0200a7f1,
- 0xcf00a3f0,
- 0xabc400aa,
- 0x2c0bf404,
- 0xf124d7f0,
- 0xf01a00e7,
- 0xeecf00e3,
- 0x00f7f100,
- 0x00f3f019,
- 0xf400ffcf,
- 0xe7f00421,
- 0x0007f101,
- 0x0003f01d,
- 0xbd000ed0,
-/* 0x056d: ih_no_fifo */
- 0x0007f104,
- 0x0003f001,
- 0xbd000ad0,
- 0xfcf0fc04,
- 0xfcd0fce0,
- 0xfca0fcb0,
- 0xfe80fc90,
- 0x80fc0088,
- 0xf80032f4,
-/* 0x0591: hub_barrier_done */
- 0x01f7f001,
- 0xbb040e98,
- 0xffb904fe,
- 0x18e7f102,
- 0x40e3f094,
- 0xf89d21f4,
-/* 0x05a9: ctx_redswitch */
- 0x20f7f000,
- 0x850007f1,
- 0xd00103f0,
- 0x04bd000f,
-/* 0x05bb: ctx_redswitch_delay */
- 0xb608e7f0,
- 0x1bf401e2,
- 0x00f5f1fd,
- 0x00f5f108,
- 0x0007f102,
+ 0x03f00100,
+ 0x000ad000,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0x0032f480,
+/* 0x05a2: hub_barrier_done */
+ 0xf7f001f8,
+ 0x040e9801,
+ 0xb904febb,
+ 0xe7f102ff,
+ 0xe3f09418,
+ 0x9d21f440,
+/* 0x05ba: ctx_redswitch */
+ 0xf7f000f8,
+ 0x0007f120,
0x0103f085,
0xbd000fd0,
-/* 0x05d7: ctx_xfer */
- 0xf100f804,
- 0xf0810007,
- 0x0fd00203,
- 0xf404bd00,
- 0x21f50711,
-/* 0x05ea: ctx_xfer_not_load */
- 0x21f505a9,
- 0x24bd026a,
- 0x47fc07f1,
+ 0x08e7f004,
+/* 0x05cc: ctx_redswitch_delay */
+ 0xf401e2b6,
+ 0xf5f1fd1b,
+ 0xf5f10800,
+ 0x07f10200,
+ 0x03f08500,
+ 0x000fd001,
+ 0x00f804bd,
+/* 0x05e8: ctx_xfer */
+ 0x810007f1,
0xd00203f0,
- 0x04bd0002,
- 0xb6012cf0,
- 0x07f10320,
- 0x03f04afc,
- 0x0002d002,
- 0xacf004bd,
- 0x02a5f001,
- 0x0000b7f1,
- 0x9850b3f0,
- 0xc4b6040c,
- 0x00bcbb0f,
- 0x98000c98,
- 0xe7f0010d,
- 0x6f21f500,
- 0x01acf001,
- 0x4000b7f1,
+ 0x04bd000f,
+ 0xf50711f4,
+/* 0x05fb: ctx_xfer_not_load */
+ 0xf505ba21,
+ 0xbd026a21,
+ 0xfc07f124,
+ 0x0203f047,
+ 0xbd0002d0,
+ 0x012cf004,
+ 0xf10320b6,
+ 0xf04afc07,
+ 0x02d00203,
+ 0xf004bd00,
+ 0xa5f001ac,
+ 0x00b7f102,
+ 0x50b3f000,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x000c9800,
+ 0xf0010d98,
+ 0x21f500e7,
+ 0xacf0016f,
+ 0x00b7f101,
+ 0x50b3f040,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0x98020d98,
+ 0xe7f1060f,
+ 0x21f50800,
+ 0xacf0016f,
+ 0x04a5f001,
+ 0x3000b7f1,
0x9850b3f0,
0xc4b6040c,
0x00bcbb0f,
- 0x98010c98,
- 0x0f98020d,
- 0x00e7f106,
- 0x6f21f508,
- 0x01acf001,
- 0xf104a5f0,
- 0xf03000b7,
- 0x0c9850b3,
- 0x0fc4b604,
- 0x9800bcbb,
- 0x0d98020c,
- 0x080f9803,
- 0x0200e7f1,
- 0x016f21f5,
- 0x025e21f5,
- 0xf40601f4,
-/* 0x0686: ctx_xfer_post */
- 0x21f50712,
-/* 0x068a: ctx_xfer_done */
- 0x21f5027f,
- 0x00f80591,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x98020c98,
+ 0x0f98030d,
+ 0x00e7f108,
+ 0x6f21f502,
+ 0x5e21f501,
+ 0x0601f402,
+/* 0x0697: ctx_xfer_post */
+ 0xf50712f4,
+/* 0x069b: ctx_xfer_done */
+ 0xf5027f21,
+ 0xf805a221,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
index 6316ebaf5d9a..1b803197d28b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
@@ -196,7 +196,7 @@ uint32_t nvf0_grgpc_code[] = {
0x1fb4f000,
0xf410b4b0,
0xa7f0f01b,
- 0xd021f402,
+ 0xd021f405,
/* 0x0223: mmctx_stop */
0xc82b0ef4,
0xb4b600ab,
@@ -304,212 +304,212 @@ uint32_t nvf0_grgpc_code[] = {
0x21f440e3,
0xf8e0fc9d,
/* 0x03a1: init */
- 0xfe04bd00,
- 0x27f00004,
- 0x0007f102,
- 0x0003f012,
- 0xbd0002d0,
- 0x1f17f104,
- 0x0010fe05,
- 0x070007f1,
+ 0xf104bd00,
+ 0xf0420017,
+ 0x11cf0013,
+ 0x0911e700,
+ 0x0814b601,
+ 0xf00014fe,
+ 0x07f10227,
+ 0x03f01200,
+ 0x0002d000,
+ 0x17f104bd,
+ 0x10fe0530,
+ 0x0007f100,
+ 0x0003f007,
+ 0xbd0000d0,
+ 0x0427f004,
+ 0x040007f1,
0xd00003f0,
- 0x04bd0000,
- 0xf10427f0,
- 0xf0040007,
- 0x02d00003,
+ 0x04bd0002,
+ 0xf11031f4,
+ 0xf0820027,
+ 0x22cf0123,
+ 0x0137f000,
+ 0xbb1f24f0,
+ 0x32b60432,
+ 0x05028001,
+ 0xf1060380,
+ 0xf0860027,
+ 0x22cf0123,
+ 0x04028000,
+ 0x0c30e7f1,
+ 0xbd50e3f0,
+ 0xbd34bd24,
+/* 0x0421: init_unk_loop */
+ 0x6821f444,
+ 0xf400f6b0,
+ 0xf7f00f0b,
+ 0x04f2bb01,
+ 0xb6054ffd,
+/* 0x0436: init_unk_next */
+ 0x20b60130,
+ 0x04e0b601,
+ 0xf40226b0,
+/* 0x0442: init_unk_done */
+ 0x0380e21b,
+ 0x08048007,
+ 0x010027f1,
+ 0xcf0223f0,
+ 0x34bd0022,
+ 0xf1082595,
+ 0xf0c00007,
+ 0x05d00103,
+ 0xf104bd00,
+ 0xf0c10007,
+ 0x05d00103,
+ 0x9804bd00,
+ 0x0f98000e,
+ 0x5021f501,
+ 0x002fbb01,
+ 0x98003fbb,
+ 0x0f98010e,
+ 0x5021f502,
+ 0x050e9801,
+ 0xbb00effd,
+ 0x3ebb002e,
+ 0x020e9800,
+ 0xf5030f98,
+ 0x98015021,
+ 0xeffd070e,
+ 0x002ebb00,
+ 0xb6003ebb,
+ 0x07f10235,
+ 0x03f0d300,
+ 0x0003d001,
+ 0x25b604bd,
+ 0x0635b608,
+ 0xb60120b6,
+ 0x24b60130,
+ 0x0834b608,
+ 0xf5022fb9,
+ 0xbb02d321,
+ 0x07f1003f,
+ 0x03f00100,
+ 0x0003d002,
+ 0x24bd04bd,
+ 0xf11f29f0,
+ 0xf0300007,
+ 0x02d00203,
+/* 0x04f3: main */
0xf404bd00,
- 0x27f11031,
- 0x23f08200,
- 0x0022cf01,
- 0xf00137f0,
- 0x32bb1f24,
- 0x0132b604,
- 0x80050280,
- 0x27f10603,
- 0x23f08600,
- 0x0022cf01,
- 0xf1040280,
- 0xf00c30e7,
- 0x24bd50e3,
- 0x44bd34bd,
-/* 0x0410: init_unk_loop */
- 0xb06821f4,
- 0x0bf400f6,
- 0x01f7f00f,
- 0xfd04f2bb,
- 0x30b6054f,
-/* 0x0425: init_unk_next */
- 0x0120b601,
- 0xb004e0b6,
- 0x1bf40226,
-/* 0x0431: init_unk_done */
- 0x070380e2,
- 0xf1080480,
- 0xf0010027,
- 0x22cf0223,
- 0x9534bd00,
- 0x07f10825,
- 0x03f0c000,
- 0x0005d001,
+ 0x28f40031,
+ 0x24d7f000,
+ 0xf43921f4,
+ 0xe4b0f401,
+ 0x1e18f404,
+ 0xf00181fe,
+ 0x20bd0627,
+ 0xb60412fd,
+ 0x1efd01e4,
+ 0x0018fe05,
+ 0x05e821f5,
+/* 0x0523: main_not_ctx_xfer */
+ 0x94d30ef4,
+ 0xf5f010ef,
+ 0x7e21f501,
+ 0xc60ef403,
+/* 0x0530: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x24d7f02c,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
+ 0xf00421f4,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x057e: ih_no_fifo */
0x07f104bd,
- 0x03f0c100,
- 0x0005d001,
- 0x0e9804bd,
- 0x010f9800,
- 0x015021f5,
- 0xbb002fbb,
- 0x0e98003f,
- 0x020f9801,
- 0x015021f5,
- 0xfd050e98,
- 0x2ebb00ef,
- 0x003ebb00,
- 0x98020e98,
- 0x21f5030f,
- 0x0e980150,
- 0x00effd07,
- 0xbb002ebb,
- 0x35b6003e,
- 0x0007f102,
- 0x0103f0d3,
- 0xbd0003d0,
- 0x0825b604,
- 0xb60635b6,
- 0x30b60120,
- 0x0824b601,
- 0xb90834b6,
- 0x21f5022f,
- 0x3fbb02d3,
- 0x0007f100,
- 0x0203f001,
- 0xbd0003d0,
- 0xf024bd04,
- 0x07f11f29,
- 0x03f03000,
- 0x0002d002,
-/* 0x04e2: main */
- 0x31f404bd,
- 0x0028f400,
- 0xf424d7f0,
- 0x01f43921,
- 0x04e4b0f4,
- 0xfe1e18f4,
- 0x27f00181,
- 0xfd20bd06,
- 0xe4b60412,
- 0x051efd01,
- 0xf50018fe,
- 0xf405d721,
-/* 0x0512: main_not_ctx_xfer */
- 0xef94d30e,
- 0x01f5f010,
- 0x037e21f5,
-/* 0x051f: ih */
- 0xf9c60ef4,
- 0x0188fe80,
- 0x90f980f9,
- 0xb0f9a0f9,
- 0xe0f9d0f9,
- 0x04bdf0f9,
- 0x0200a7f1,
- 0xcf00a3f0,
- 0xabc400aa,
- 0x2c0bf404,
- 0xf124d7f0,
- 0xf01a00e7,
- 0xeecf00e3,
- 0x00f7f100,
- 0x00f3f019,
- 0xf400ffcf,
- 0xe7f00421,
- 0x0007f101,
- 0x0003f01d,
- 0xbd000ed0,
-/* 0x056d: ih_no_fifo */
- 0x0007f104,
- 0x0003f001,
- 0xbd000ad0,
- 0xfcf0fc04,
- 0xfcd0fce0,
- 0xfca0fcb0,
- 0xfe80fc90,
- 0x80fc0088,
- 0xf80032f4,
-/* 0x0591: hub_barrier_done */
- 0x01f7f001,
- 0xbb040e98,
- 0xffb904fe,
- 0x18e7f102,
- 0x40e3f094,
- 0xf89d21f4,
-/* 0x05a9: ctx_redswitch */
- 0x20f7f000,
- 0x850007f1,
- 0xd00103f0,
- 0x04bd000f,
-/* 0x05bb: ctx_redswitch_delay */
- 0xb608e7f0,
- 0x1bf401e2,
- 0x00f5f1fd,
- 0x00f5f108,
- 0x0007f102,
+ 0x03f00100,
+ 0x000ad000,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0x0032f480,
+/* 0x05a2: hub_barrier_done */
+ 0xf7f001f8,
+ 0x040e9801,
+ 0xb904febb,
+ 0xe7f102ff,
+ 0xe3f09418,
+ 0x9d21f440,
+/* 0x05ba: ctx_redswitch */
+ 0xf7f000f8,
+ 0x0007f120,
0x0103f085,
0xbd000fd0,
-/* 0x05d7: ctx_xfer */
- 0xf100f804,
- 0xf0810007,
- 0x0fd00203,
- 0xf404bd00,
- 0x21f50711,
-/* 0x05ea: ctx_xfer_not_load */
- 0x21f505a9,
- 0x24bd026a,
- 0x47fc07f1,
+ 0x08e7f004,
+/* 0x05cc: ctx_redswitch_delay */
+ 0xf401e2b6,
+ 0xf5f1fd1b,
+ 0xf5f10800,
+ 0x07f10200,
+ 0x03f08500,
+ 0x000fd001,
+ 0x00f804bd,
+/* 0x05e8: ctx_xfer */
+ 0x810007f1,
0xd00203f0,
- 0x04bd0002,
- 0xb6012cf0,
- 0x07f10320,
- 0x03f04afc,
- 0x0002d002,
- 0xacf004bd,
- 0x02a5f001,
- 0x0000b7f1,
- 0x9850b3f0,
- 0xc4b6040c,
- 0x00bcbb0f,
- 0x98000c98,
- 0xe7f0010d,
- 0x6f21f500,
- 0x01acf001,
- 0x4000b7f1,
+ 0x04bd000f,
+ 0xf50711f4,
+/* 0x05fb: ctx_xfer_not_load */
+ 0xf505ba21,
+ 0xbd026a21,
+ 0xfc07f124,
+ 0x0203f047,
+ 0xbd0002d0,
+ 0x012cf004,
+ 0xf10320b6,
+ 0xf04afc07,
+ 0x02d00203,
+ 0xf004bd00,
+ 0xa5f001ac,
+ 0x00b7f102,
+ 0x50b3f000,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x000c9800,
+ 0xf0010d98,
+ 0x21f500e7,
+ 0xacf0016f,
+ 0x00b7f101,
+ 0x50b3f040,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0x98020d98,
+ 0xe7f1060f,
+ 0x21f50800,
+ 0xacf0016f,
+ 0x04a5f001,
+ 0x3000b7f1,
0x9850b3f0,
0xc4b6040c,
0x00bcbb0f,
- 0x98010c98,
- 0x0f98020d,
- 0x00e7f106,
- 0x6f21f508,
- 0x01acf001,
- 0xf104a5f0,
- 0xf03000b7,
- 0x0c9850b3,
- 0x0fc4b604,
- 0x9800bcbb,
- 0x0d98020c,
- 0x080f9803,
- 0x0200e7f1,
- 0x016f21f5,
- 0x025e21f5,
- 0xf40601f4,
-/* 0x0686: ctx_xfer_post */
- 0x21f50712,
-/* 0x068a: ctx_xfer_done */
- 0x21f5027f,
- 0x00f80591,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x98020c98,
+ 0x0f98030d,
+ 0x00e7f108,
+ 0x6f21f502,
+ 0x5e21f501,
+ 0x0601f402,
+/* 0x0697: ctx_xfer_post */
+ 0xf50712f4,
+/* 0x069b: ctx_xfer_done */
+ 0xf5027f21,
+ 0xf805a221,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5
new file mode 100644
index 000000000000..27591b3086a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#define CHIPSET GK208
+#include "macros.fuc"
+
+.section #gm107_grhub_data
+#define INCLUDE_DATA
+#include "com.fuc"
+#include "hub.fuc"
+#undef INCLUDE_DATA
+
+.section #gm107_grhub_code
+#define INCLUDE_CODE
+bra #init
+#include "com.fuc"
+#include "hub.fuc"
+.align 256
+#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
new file mode 100644
index 000000000000..214dd16ec566
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
@@ -0,0 +1,916 @@
+uint32_t gm107_grhub_data[] = {
+/* 0x0000: hub_mmio_list_head */
+ 0x00000300,
+/* 0x0004: hub_mmio_list_tail */
+ 0x00000304,
+/* 0x0008: gpc_count */
+ 0x00000000,
+/* 0x000c: rop_count */
+ 0x00000000,
+/* 0x0010: cmd_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0058: ctx_current */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0100: chan_data */
+/* 0x0100: chan_mmio_count */
+ 0x00000000,
+/* 0x0104: chan_mmio_address */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0200: xfer_data */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0300: hub_mmio_list_base */
+ 0x0417e91c,
+};
+
+uint32_t gm107_grhub_code[] = {
+ 0x030e0ef5,
+/* 0x0004: queue_put */
+ 0x9800d898,
+ 0x86f001d9,
+ 0xf489a408,
+ 0x020f0b1b,
+ 0x0002f87e,
+/* 0x001a: queue_put_next */
+ 0x98c400f8,
+ 0x0384b607,
+ 0xb6008dbb,
+ 0x8eb50880,
+ 0x018fb500,
+ 0xf00190b6,
+ 0xd9b50f94,
+/* 0x0037: queue_get */
+ 0xf400f801,
+ 0xd8980131,
+ 0x01d99800,
+ 0x0bf489a4,
+ 0x0789c421,
+ 0xbb0394b6,
+ 0x90b6009d,
+ 0x009e9808,
+ 0xb6019f98,
+ 0x84f00180,
+ 0x00d8b50f,
+/* 0x0063: queue_get_done */
+ 0xf80132f4,
+/* 0x0065: nv_rd32 */
+ 0xf0ecb200,
+ 0x00801fc9,
+ 0x0cf601ca,
+/* 0x0073: nv_rd32_wait */
+ 0x8c04bd00,
+ 0xcf01ca00,
+ 0xccc800cc,
+ 0xf61bf41f,
+ 0xec7e060a,
+ 0x008f0000,
+ 0xffcf01cb,
+/* 0x008f: nv_wr32 */
+ 0x8000f800,
+ 0xf601cc00,
+ 0x04bd000f,
+ 0xc9f0ecb2,
+ 0x1ec9f01f,
+ 0x01ca0080,
+ 0xbd000cf6,
+/* 0x00a9: nv_wr32_wait */
+ 0xca008c04,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f61b,
+/* 0x00b8: wait_donez */
+ 0x99f094bd,
+ 0x37008000,
+ 0x0009f602,
+ 0x008004bd,
+ 0x0af60206,
+/* 0x00cf: wait_donez_ne */
+ 0x8804bd00,
+ 0xcf010000,
+ 0x8aff0088,
+ 0xf61bf488,
+ 0x99f094bd,
+ 0x17008000,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x00ec: wait_doneo */
+ 0x99f094bd,
+ 0x37008000,
+ 0x0009f602,
+ 0x008004bd,
+ 0x0af60206,
+/* 0x0103: wait_doneo_e */
+ 0x8804bd00,
+ 0xcf010000,
+ 0x8aff0088,
+ 0xf60bf488,
+ 0x99f094bd,
+ 0x17008000,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x0120: mmctx_size */
+/* 0x0122: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0x1bf4efa4,
+ 0xf89fb2ec,
+/* 0x013d: mmctx_xfer */
+ 0xf094bd00,
+ 0x00800199,
+ 0x09f60237,
+ 0xbd04bd00,
+ 0x05bbfd94,
+ 0x800f0bf4,
+ 0xf601c400,
+ 0x04bd000b,
+/* 0x015f: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0xc6008018,
+ 0x000ef601,
+ 0x008004bd,
+ 0x0ff601c7,
+ 0xf004bd00,
+/* 0x017a: mmctx_multi_disabled */
+ 0xabc80199,
+ 0x10b4b600,
+ 0xc80cb9f0,
+ 0xe4b601ae,
+ 0x05befd11,
+ 0x01c50080,
+ 0xbd000bf6,
+/* 0x0195: mmctx_exec_loop */
+/* 0x0195: mmctx_wait_free */
+ 0xc5008e04,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f60b,
+ 0x05e9fd00,
+ 0x01c80080,
+ 0xbd000ef6,
+ 0x04c0b604,
+ 0x1bf4cda4,
+ 0x02abc8df,
+/* 0x01bf: mmctx_fini_wait */
+ 0x8b1c1bf4,
+ 0xcf01c500,
+ 0xb4f000bb,
+ 0x10b4b01f,
+ 0x0af31bf4,
+ 0x00b87e05,
+ 0x250ef400,
+/* 0x01d8: mmctx_stop */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x12b9f00c,
+ 0x01c50080,
+ 0xbd000bf6,
+/* 0x01ed: mmctx_stop_wait */
+ 0xc5008b04,
+ 0x00bbcf01,
+ 0xf412bbc8,
+/* 0x01fa: mmctx_done */
+ 0x94bdf61b,
+ 0x800199f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x020a: strand_wait */
+ 0xa0f900f8,
+ 0xb87e020a,
+ 0xa0fc0000,
+/* 0x0216: strand_pre */
+ 0x0c0900f8,
+ 0x024afc80,
+ 0xbd0009f6,
+ 0x020a7e04,
+/* 0x0227: strand_post */
+ 0x0900f800,
+ 0x4afc800d,
+ 0x0009f602,
+ 0x0a7e04bd,
+ 0x00f80002,
+/* 0x0238: strand_set */
+ 0xfc800f0c,
+ 0x0cf6024f,
+ 0x0c04bd00,
+ 0x4afc800b,
+ 0x000cf602,
+ 0xfc8004bd,
+ 0x0ef6024f,
+ 0x0c04bd00,
+ 0x4afc800a,
+ 0x000cf602,
+ 0x0a7e04bd,
+ 0x00f80002,
+/* 0x0268: strand_ctx_init */
+ 0x99f094bd,
+ 0x37008003,
+ 0x0009f602,
+ 0x167e04bd,
+ 0x030e0002,
+ 0x0002387e,
+ 0xfc80c4bd,
+ 0x0cf60247,
+ 0x0c04bd00,
+ 0x4afc8001,
+ 0x000cf602,
+ 0x0a7e04bd,
+ 0x0c920002,
+ 0x46fc8001,
+ 0x000cf602,
+ 0x020c04bd,
+ 0x024afc80,
+ 0xbd000cf6,
+ 0x020a7e04,
+ 0x02277e00,
+ 0x42008800,
+ 0x20008902,
+ 0x0099cf02,
+/* 0x02c7: ctx_init_strand_loop */
+ 0xf608fe95,
+ 0x8ef6008e,
+ 0x808acf40,
+ 0xb606a5b6,
+ 0xeabb01a0,
+ 0x0480b600,
+ 0xf40192b6,
+ 0xe4b6e81b,
+ 0xf2efbc08,
+ 0x99f094bd,
+ 0x17008003,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x02f8: error */
+ 0x02050080,
+ 0xbd000ff6,
+ 0x80010f04,
+ 0xf6030700,
+ 0x04bd000f,
+/* 0x030e: init */
+ 0x04bd00f8,
+ 0x410007fe,
+ 0x11cf4200,
+ 0x0911e700,
+ 0x0814b601,
+ 0x020014fe,
+ 0x12004002,
+ 0xbd0002f6,
+ 0x05c94104,
+ 0xbd0010fe,
+ 0x07004024,
+ 0xbd0002f6,
+ 0x20034204,
+ 0x01010080,
+ 0xbd0002f6,
+ 0x20044204,
+ 0x01010480,
+ 0xbd0002f6,
+ 0x200b4204,
+ 0x01010880,
+ 0xbd0002f6,
+ 0x200c4204,
+ 0x01011c80,
+ 0xbd0002f6,
+ 0x01039204,
+ 0x03090080,
+ 0xbd0003f6,
+ 0x87044204,
+ 0xf6040040,
+ 0x04bd0002,
+ 0x00400402,
+ 0x0002f603,
+ 0x31f404bd,
+ 0x96048e10,
+ 0x00657e40,
+ 0xc7feb200,
+ 0x01b590f1,
+ 0x1ff4f003,
+ 0x01020fb5,
+ 0x041fbb01,
+ 0x800112b6,
+ 0xf6010300,
+ 0x04bd0001,
+ 0x01040080,
+ 0xbd0001f6,
+ 0x01004104,
+ 0x627e020f,
+ 0x717e0006,
+ 0x100f0006,
+ 0x0006b37e,
+ 0x98000e98,
+ 0x207e010f,
+ 0x14950001,
+ 0xc0008008,
+ 0x0004f601,
+ 0x008004bd,
+ 0x04f601c1,
+ 0xb704bd00,
+ 0xbb130030,
+ 0xf5b6001f,
+ 0xd3008002,
+ 0x000ff601,
+ 0x15b604bd,
+ 0x0110b608,
+ 0xb20814b6,
+ 0x02687e1f,
+ 0x001fbb00,
+ 0x84020398,
+/* 0x041f: init_gpc */
+ 0xb8502000,
+ 0x0008044e,
+ 0x8f7e1fb2,
+ 0x4eb80000,
+ 0xbd00010c,
+ 0x008f7ef4,
+ 0x044eb800,
+ 0x8f7e0001,
+ 0x4eb80000,
+ 0x0f000100,
+ 0x008f7e02,
+ 0x004eb800,
+/* 0x044e: init_gpc_wait */
+ 0x657e0008,
+ 0xffc80000,
+ 0xf90bf41f,
+ 0x08044eb8,
+ 0x00657e00,
+ 0x001fbb00,
+ 0x800040b7,
+ 0xf40132b6,
+ 0x000fb41b,
+ 0x0006b37e,
+ 0x627e000f,
+ 0x00800006,
+ 0x01f60201,
+ 0xbd04bd00,
+ 0x1f19f014,
+ 0x02300080,
+ 0xbd0001f6,
+/* 0x0491: main */
+ 0x0031f404,
+ 0x0d0028f4,
+ 0x00377e10,
+ 0xf401f400,
+ 0x4001e4b1,
+ 0x00c71bf5,
+ 0x99f094bd,
+ 0x37008004,
+ 0x0009f602,
+ 0x008104bd,
+ 0x11cf02c0,
+ 0xc1008200,
+ 0x0022cf02,
+ 0xf41f13c8,
+ 0x23c8770b,
+ 0x550bf41f,
+ 0x12b220f9,
+ 0x99f094bd,
+ 0x37008007,
+ 0x0009f602,
+ 0x32f404bd,
+ 0x0231f401,
+ 0x0008367e,
+ 0x99f094bd,
+ 0x17008007,
+ 0x0009f602,
+ 0x20fc04bd,
+ 0x99f094bd,
+ 0x37008006,
+ 0x0009f602,
+ 0x31f404bd,
+ 0x08367e01,
+ 0xf094bd00,
+ 0x00800699,
+ 0x09f60217,
+ 0xf404bd00,
+/* 0x0522: chsw_prev_no_next */
+ 0x20f92f0e,
+ 0x32f412b2,
+ 0x0232f401,
+ 0x0008367e,
+ 0x008020fc,
+ 0x02f602c0,
+ 0xf404bd00,
+/* 0x053e: chsw_no_prev */
+ 0x23c8130e,
+ 0x0d0bf41f,
+ 0xf40131f4,
+ 0x367e0232,
+/* 0x054e: chsw_done */
+ 0x01020008,
+ 0x02c30080,
+ 0xbd0002f6,
+ 0xf094bd04,
+ 0x00800499,
+ 0x09f60217,
+ 0xf504bd00,
+/* 0x056b: main_not_ctx_switch */
+ 0xb0ff2a0e,
+ 0x1bf401e4,
+ 0x7ef2b20c,
+ 0xf40007d6,
+/* 0x057a: main_not_ctx_chan */
+ 0xe4b0400e,
+ 0x2c1bf402,
+ 0x99f094bd,
+ 0x37008007,
+ 0x0009f602,
+ 0x32f404bd,
+ 0x0232f401,
+ 0x0008367e,
+ 0x99f094bd,
+ 0x17008007,
+ 0x0009f602,
+ 0x0ef404bd,
+/* 0x05a9: main_not_ctx_save */
+ 0x10ef9411,
+ 0x7e01f5f0,
+ 0xf50002f8,
+/* 0x05b7: main_done */
+ 0xbdfede0e,
+ 0x1f29f024,
+ 0x02300080,
+ 0xbd0002f6,
+ 0xcc0ef504,
+/* 0x05c9: ih */
+ 0xfe80f9fe,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0x004a04bd,
+ 0x00aacf02,
+ 0xf404abc4,
+ 0x100d230b,
+ 0xcf1a004e,
+ 0x004f00ee,
+ 0x00ffcf19,
+ 0x0000047e,
+ 0x0400b0b7,
+ 0x0040010e,
+ 0x000ef61d,
+/* 0x060a: ih_no_fifo */
+ 0xabe404bd,
+ 0x0bf40100,
+ 0x4e100d0c,
+ 0x047e4001,
+/* 0x061a: ih_no_ctxsw */
+ 0xabe40000,
+ 0x0bf40400,
+ 0x01004b10,
+ 0x448ebfb2,
+ 0x8f7e4001,
+/* 0x062e: ih_no_fwmthd */
+ 0x044b0000,
+ 0xffb0bd01,
+ 0x0bf4b4ab,
+ 0x0700800c,
+ 0x000bf603,
+/* 0x0642: ih_no_other */
+ 0x004004bd,
+ 0x000af601,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0x0032f480,
+/* 0x0662: ctx_4170s */
+ 0xf5f001f8,
+ 0x8effb210,
+ 0x7e404170,
+ 0xf800008f,
+/* 0x0671: ctx_4170w */
+ 0x41708e00,
+ 0x00657e40,
+ 0xf0ffb200,
+ 0x1bf410f4,
+/* 0x0683: ctx_redswitch */
+ 0x4e00f8f3,
+ 0xe5f00200,
+ 0x20e5f040,
+ 0x8010e5f0,
+ 0xf6018500,
+ 0x04bd000e,
+/* 0x069a: ctx_redswitch_delay */
+ 0xf2b6080f,
+ 0xfd1bf401,
+ 0x0400e5f1,
+ 0x0100e5f1,
+ 0x01850080,
+ 0xbd000ef6,
+/* 0x06b3: ctx_86c */
+ 0x8000f804,
+ 0xf6022300,
+ 0x04bd000f,
+ 0x148effb2,
+ 0x8f7e408a,
+ 0xffb20000,
+ 0x41a88c8e,
+ 0x00008f7e,
+/* 0x06d2: ctx_mem */
+ 0x008000f8,
+ 0x0ff60284,
+/* 0x06db: ctx_mem_wait */
+ 0x8f04bd00,
+ 0xcf028400,
+ 0xfffd00ff,
+ 0xf61bf405,
+/* 0x06ea: ctx_load */
+ 0x94bd00f8,
+ 0x800599f0,
+ 0xf6023700,
+ 0x04bd0009,
+ 0xb87e0c0a,
+ 0xf4bd0000,
+ 0x02890080,
+ 0xbd000ff6,
+ 0xc1008004,
+ 0x0002f602,
+ 0x008004bd,
+ 0x02f60283,
+ 0x0f04bd00,
+ 0x06d27e07,
+ 0xc0008000,
+ 0x0002f602,
+ 0x0bfe04bd,
+ 0x1f2af000,
+ 0xb60424b6,
+ 0x94bd0220,
+ 0x800899f0,
+ 0xf6023700,
+ 0x04bd0009,
+ 0x02810080,
+ 0xbd0002f6,
+ 0x0000d204,
+ 0x25f08000,
+ 0x88008002,
+ 0x0002f602,
+ 0x100104bd,
+ 0xf0020042,
+ 0x12fa0223,
+ 0xbd03f805,
+ 0x0899f094,
+ 0x02170080,
+ 0xbd0009f6,
+ 0x81019804,
+ 0x981814b6,
+ 0x25b68002,
+ 0x0512fd08,
+ 0xbd1601b5,
+ 0x0999f094,
+ 0x02370080,
+ 0xbd0009f6,
+ 0x81008004,
+ 0x0001f602,
+ 0x010204bd,
+ 0x02880080,
+ 0xbd0002f6,
+ 0x01004104,
+ 0xfa0613f0,
+ 0x03f80501,
+ 0x99f094bd,
+ 0x17008009,
+ 0x0009f602,
+ 0x94bd04bd,
+ 0x800599f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x07d6: ctx_chan */
+ 0xea7e00f8,
+ 0x0c0a0006,
+ 0x0000b87e,
+ 0xd27e050f,
+ 0x00f80006,
+/* 0x07e8: ctx_mmio_exec */
+ 0x80410398,
+ 0xf6028100,
+ 0x04bd0003,
+/* 0x07f6: ctx_mmio_loop */
+ 0x34c434bd,
+ 0x0e1bf4ff,
+ 0xf0020045,
+ 0x35fa0653,
+/* 0x0807: ctx_mmio_pull */
+ 0x9803f805,
+ 0x4f98804e,
+ 0x008f7e81,
+ 0x0830b600,
+ 0xf40112b6,
+/* 0x081a: ctx_mmio_done */
+ 0x0398df1b,
+ 0x81008016,
+ 0x0003f602,
+ 0x00b504bd,
+ 0x01004140,
+ 0xfa0613f0,
+ 0x03f80601,
+/* 0x0836: ctx_xfer */
+ 0x040e00f8,
+ 0x03020080,
+ 0xbd000ef6,
+/* 0x0841: ctx_xfer_idle */
+ 0x00008e04,
+ 0x00eecf03,
+ 0x2000e4f1,
+ 0xf4f51bf4,
+ 0x02f40611,
+/* 0x0855: ctx_xfer_pre */
+ 0x7e100f0c,
+ 0xf40006b3,
+/* 0x085e: ctx_xfer_pre_load */
+ 0x020f1b11,
+ 0x0006627e,
+ 0x0006717e,
+ 0x0006837e,
+ 0x627ef4bd,
+ 0xea7e0006,
+/* 0x0876: ctx_xfer_exec */
+ 0x01980006,
+ 0x8024bd16,
+ 0xf6010500,
+ 0x04bd0002,
+ 0x008e1fb2,
+ 0x8f7e41a5,
+ 0xfcf00000,
+ 0x022cf001,
+ 0xfd0124b6,
+ 0xffb205f2,
+ 0x41a5048e,
+ 0x00008f7e,
+ 0x0002167e,
+ 0xfc8024bd,
+ 0x02f60247,
+ 0xf004bd00,
+ 0x20b6012c,
+ 0x4afc8003,
+ 0x0002f602,
+ 0xacf004bd,
+ 0x06a5f001,
+ 0x0c98000b,
+ 0x010d9800,
+ 0x3d7e000e,
+ 0x080a0001,
+ 0x0000ec7e,
+ 0x00020a7e,
+ 0x0a1201f4,
+ 0x00b87e0c,
+ 0x7e050f00,
+ 0xf40006d2,
+/* 0x08f2: ctx_xfer_post */
+ 0x020f2d02,
+ 0x0006627e,
+ 0xb37ef4bd,
+ 0x277e0006,
+ 0x717e0002,
+ 0xf4bd0006,
+ 0x0006627e,
+ 0x981011f4,
+ 0x11fd4001,
+ 0x070bf405,
+ 0x0007e87e,
+/* 0x091c: ctx_xfer_no_post_mmio */
+/* 0x091c: ctx_xfer_done */
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
index 4750984bf380..64dfd75192bf 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
@@ -342,7 +342,7 @@ uint32_t nv108_grhub_code[] = {
0xb4f000bb,
0x10b4b01f,
0x0af31bf4,
- 0x00b87e02,
+ 0x00b87e05,
0x250ef400,
/* 0x01d8: mmctx_stop */
0xb600abc8,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index 132f684b1946..f8f7b278a13f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -361,7 +361,7 @@ uint32_t nvc0_grhub_code[] = {
0x1fb4f000,
0xf410b4b0,
0xa7f0f01b,
- 0xd021f402,
+ 0xd021f405,
/* 0x0223: mmctx_stop */
0xc82b0ef4,
0xb4b600ab,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
index 84af82418987..624215a005b0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
@@ -361,7 +361,7 @@ uint32_t nvd7_grhub_code[] = {
0x1fb4f000,
0xf410b4b0,
0xa7f0f01b,
- 0xd021f402,
+ 0xd021f405,
/* 0x0223: mmctx_stop */
0xc82b0ef4,
0xb4b600ab,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index 1c179bdd48cc..6547b3dfc7ed 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -361,7 +361,7 @@ uint32_t nve0_grhub_code[] = {
0x1fb4f000,
0xf410b4b0,
0xa7f0f01b,
- 0xd021f402,
+ 0xd021f405,
/* 0x0223: mmctx_stop */
0xc82b0ef4,
0xb4b600ab,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
index 229c0ae37228..a5aee5a4302f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
@@ -361,7 +361,7 @@ uint32_t nvf0_grhub_code[] = {
0x1fb4f000,
0xf410b4b0,
0xa7f0f01b,
- 0xd021f402,
+ 0xd021f405,
/* 0x0223: mmctx_stop */
0xc82b0ef4,
0xb4b600ab,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
index 6ffe28307dbd..a47d49db5232 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
@@ -132,6 +132,7 @@
#define NV_PGRAPH_GPCX_GPCCS_FIFO_CMD 0x41a068
#define NV_PGRAPH_GPCX_GPCCS_FIFO_ACK 0x41a074
#define NV_PGRAPH_GPCX_GPCCS_UNITS 0x41a608
+#define NV_PGRAPH_GPCX_GPCCS_CAPS 0x41a108
#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH 0x41a614
#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11 0x00000800
#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE 0x00000200
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
new file mode 100644
index 000000000000..21c5f31d607f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
@@ -0,0 +1,465 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/P0260.h>
+
+#include "nvc0.h"
+#include "ctxnvc0.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+gm107_graph_sclass[] = {
+ { 0x902d, &nouveau_object_ofuncs },
+ { 0xa140, &nouveau_object_ofuncs },
+ { 0xb097, &nouveau_object_ofuncs },
+ { 0xb0c0, &nouveau_object_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+gm107_graph_init_main_0[] = {
+ { 0x400080, 1, 0x04, 0x003003c2 },
+ { 0x400088, 1, 0x04, 0x0001bfe7 },
+ { 0x40008c, 1, 0x04, 0x00060000 },
+ { 0x400090, 1, 0x04, 0x00000030 },
+ { 0x40013c, 1, 0x04, 0x003901f3 },
+ { 0x400140, 1, 0x04, 0x00000100 },
+ { 0x400144, 1, 0x04, 0x00000000 },
+ { 0x400148, 1, 0x04, 0x00000110 },
+ { 0x400138, 1, 0x04, 0x00000000 },
+ { 0x400130, 2, 0x04, 0x00000000 },
+ { 0x400124, 1, 0x04, 0x00000002 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_ds_0[] = {
+ { 0x405844, 1, 0x04, 0x00ffffff },
+ { 0x405850, 1, 0x04, 0x00000000 },
+ { 0x405900, 1, 0x04, 0x00000000 },
+ { 0x405908, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_scc_0[] = {
+ { 0x40803c, 1, 0x04, 0x00000010 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_sked_0[] = {
+ { 0x407010, 1, 0x04, 0x00000000 },
+ { 0x407040, 1, 0x04, 0x40440424 },
+ { 0x407048, 1, 0x04, 0x0000000a },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_prop_0[] = {
+ { 0x418408, 1, 0x04, 0x00000000 },
+ { 0x4184a0, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_setup_1[] = {
+ { 0x4188c8, 2, 0x04, 0x00000000 },
+ { 0x4188d0, 1, 0x04, 0x00010000 },
+ { 0x4188d4, 1, 0x04, 0x00010201 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_zcull_0[] = {
+ { 0x418910, 1, 0x04, 0x00010001 },
+ { 0x418914, 1, 0x04, 0x00000301 },
+ { 0x418918, 1, 0x04, 0x00800000 },
+ { 0x418930, 2, 0x04, 0x00000000 },
+ { 0x418980, 1, 0x04, 0x77777770 },
+ { 0x418984, 3, 0x04, 0x77777777 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_gpc_unk_1[] = {
+ { 0x418d00, 1, 0x04, 0x00000000 },
+ { 0x418f00, 1, 0x04, 0x00000400 },
+ { 0x418f08, 1, 0x04, 0x00000000 },
+ { 0x418e08, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_tpccs_0[] = {
+ { 0x419dc4, 1, 0x04, 0x00000000 },
+ { 0x419dc8, 1, 0x04, 0x00000501 },
+ { 0x419dd0, 1, 0x04, 0x00000000 },
+ { 0x419dd4, 1, 0x04, 0x00000100 },
+ { 0x419dd8, 1, 0x04, 0x00000001 },
+ { 0x419ddc, 1, 0x04, 0x00000002 },
+ { 0x419de0, 1, 0x04, 0x00000001 },
+ { 0x419d0c, 1, 0x04, 0x00000000 },
+ { 0x419d10, 1, 0x04, 0x00000014 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_tex_0[] = {
+ { 0x419ab0, 1, 0x04, 0x00000000 },
+ { 0x419ab8, 1, 0x04, 0x000000e7 },
+ { 0x419abc, 1, 0x04, 0x00000000 },
+ { 0x419acc, 1, 0x04, 0x000000ff },
+ { 0x419ac0, 1, 0x04, 0x00000000 },
+ { 0x419aa8, 2, 0x04, 0x00000000 },
+ { 0x419ad0, 2, 0x04, 0x00000000 },
+ { 0x419ae0, 2, 0x04, 0x00000000 },
+ { 0x419af0, 4, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_pe_0[] = {
+ { 0x419900, 1, 0x04, 0x000000ff },
+ { 0x41980c, 1, 0x04, 0x00000010 },
+ { 0x419844, 1, 0x04, 0x00000000 },
+ { 0x419838, 1, 0x04, 0x000000ff },
+ { 0x419850, 1, 0x04, 0x00000004 },
+ { 0x419854, 2, 0x04, 0x00000000 },
+ { 0x419894, 3, 0x04, 0x00100401 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_l1c_0[] = {
+ { 0x419c98, 1, 0x04, 0x00000000 },
+ { 0x419cc0, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_sm_0[] = {
+ { 0x419e30, 1, 0x04, 0x000000ff },
+ { 0x419e00, 1, 0x04, 0x00000000 },
+ { 0x419ea0, 1, 0x04, 0x00000000 },
+ { 0x419ee4, 1, 0x04, 0x00000000 },
+ { 0x419ea4, 1, 0x04, 0x00000100 },
+ { 0x419ea8, 1, 0x04, 0x01000000 },
+ { 0x419ee8, 1, 0x04, 0x00000091 },
+ { 0x419eb4, 1, 0x04, 0x00000000 },
+ { 0x419ebc, 2, 0x04, 0x00000000 },
+ { 0x419edc, 1, 0x04, 0x000c1810 },
+ { 0x419ed8, 1, 0x04, 0x00000000 },
+ { 0x419ee0, 1, 0x04, 0x00000000 },
+ { 0x419f74, 1, 0x04, 0x00005155 },
+ { 0x419f80, 4, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_l1c_1[] = {
+ { 0x419ccc, 2, 0x04, 0x00000000 },
+ { 0x419c80, 1, 0x04, 0x3f006022 },
+ { 0x419c88, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_pes_0[] = {
+ { 0x41be50, 1, 0x04, 0x000000ff },
+ { 0x41be04, 1, 0x04, 0x00000000 },
+ { 0x41be08, 1, 0x04, 0x00000004 },
+ { 0x41be0c, 1, 0x04, 0x00000008 },
+ { 0x41be10, 1, 0x04, 0x0e3b8bc7 },
+ { 0x41be14, 2, 0x04, 0x00000000 },
+ { 0x41be3c, 5, 0x04, 0x00100401 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_wwdx_0[] = {
+ { 0x41bfd4, 1, 0x04, 0x00800000 },
+ { 0x41bfdc, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_cbm_0[] = {
+ { 0x41becc, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_be_0[] = {
+ { 0x408890, 1, 0x04, 0x000000ff },
+ { 0x40880c, 1, 0x04, 0x00000000 },
+ { 0x408850, 1, 0x04, 0x00000004 },
+ { 0x408878, 1, 0x04, 0x00c81603 },
+ { 0x40887c, 1, 0x04, 0x80543432 },
+ { 0x408880, 1, 0x04, 0x0010581e },
+ { 0x408884, 1, 0x04, 0x00001205 },
+ { 0x408974, 1, 0x04, 0x000000ff },
+ { 0x408910, 9, 0x04, 0x00000000 },
+ { 0x408950, 1, 0x04, 0x00000000 },
+ { 0x408954, 1, 0x04, 0x0000ffff },
+ { 0x408958, 1, 0x04, 0x00000034 },
+ { 0x40895c, 1, 0x04, 0x8531a003 },
+ { 0x408960, 1, 0x04, 0x0561985a },
+ { 0x408964, 1, 0x04, 0x04e15c4f },
+ { 0x408968, 1, 0x04, 0x02808833 },
+ { 0x40896c, 1, 0x04, 0x01f02438 },
+ { 0x408970, 1, 0x04, 0x00012c00 },
+ { 0x408984, 1, 0x04, 0x00000000 },
+ { 0x408988, 1, 0x04, 0x08040201 },
+ { 0x40898c, 1, 0x04, 0x80402010 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gm107_graph_init_sm_1[] = {
+ { 0x419e5c, 1, 0x04, 0x00000000 },
+ { 0x419e58, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+gm107_graph_pack_mmio[] = {
+ { gm107_graph_init_main_0 },
+ { nvf0_graph_init_fe_0 },
+ { nvc0_graph_init_pri_0 },
+ { nvc0_graph_init_rstr2d_0 },
+ { nvc0_graph_init_pd_0 },
+ { gm107_graph_init_ds_0 },
+ { gm107_graph_init_scc_0 },
+ { gm107_graph_init_sked_0 },
+ { nvf0_graph_init_cwd_0 },
+ { gm107_graph_init_prop_0 },
+ { nv108_graph_init_gpc_unk_0 },
+ { nvc0_graph_init_setup_0 },
+ { nvc0_graph_init_crstr_0 },
+ { gm107_graph_init_setup_1 },
+ { gm107_graph_init_zcull_0 },
+ { nvc0_graph_init_gpm_0 },
+ { gm107_graph_init_gpc_unk_1 },
+ { nvc0_graph_init_gcc_0 },
+ { gm107_graph_init_tpccs_0 },
+ { gm107_graph_init_tex_0 },
+ { gm107_graph_init_pe_0 },
+ { gm107_graph_init_l1c_0 },
+ { nvc0_graph_init_mpc_0 },
+ { gm107_graph_init_sm_0 },
+ { gm107_graph_init_l1c_1 },
+ { gm107_graph_init_pes_0 },
+ { gm107_graph_init_wwdx_0 },
+ { gm107_graph_init_cbm_0 },
+ { gm107_graph_init_be_0 },
+ { gm107_graph_init_sm_1 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+gm107_graph_init_bios(struct nvc0_graph_priv *priv)
+{
+ static const struct {
+ u32 ctrl;
+ u32 data;
+ } regs[] = {
+ { 0x419ed8, 0x419ee0 },
+ { 0x419ad0, 0x419ad4 },
+ { 0x419ae0, 0x419ae4 },
+ { 0x419af0, 0x419af4 },
+ { 0x419af8, 0x419afc },
+ };
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_P0260E infoE;
+ struct nvbios_P0260X infoX;
+ int E = -1, X;
+ u8 ver, hdr;
+
+ while (nvbios_P0260Ep(bios, ++E, &ver, &hdr, &infoE)) {
+ if (X = -1, E < ARRAY_SIZE(regs)) {
+ nv_wr32(priv, regs[E].ctrl, infoE.data);
+ while (nvbios_P0260Xp(bios, ++X, &ver, &hdr, &infoX))
+ nv_wr32(priv, regs[E].data, infoX.data);
+ }
+ }
+}
+
+int
+gm107_graph_init(struct nouveau_object *object)
+{
+ struct nvc0_graph_oclass *oclass = (void *)object->oclass;
+ struct nvc0_graph_priv *priv = (void *)object;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+ u32 data[TPC_MAX / 8] = {};
+ u8 tpcnr[GPC_MAX];
+ int gpc, tpc, ppc, rop;
+ int ret, i;
+
+ ret = nouveau_graph_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
+ nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
+ nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
+ nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
+ nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+
+ nvc0_graph_mmio(priv, oclass->mmio);
+
+ gm107_graph_init_bios(priv);
+
+ nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
+ nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
+ nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
+ nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
+ priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ priv->tpc_total);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
+ nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
+
+ nv_wr32(priv, 0x400500, 0x00010001);
+
+ nv_wr32(priv, 0x400100, 0xffffffff);
+ nv_wr32(priv, 0x40013c, 0xffffffff);
+ nv_wr32(priv, 0x400124, 0x00000002);
+ nv_wr32(priv, 0x409c24, 0x000e0000);
+
+ nv_wr32(priv, 0x404000, 0xc0000000);
+ nv_wr32(priv, 0x404600, 0xc0000000);
+ nv_wr32(priv, 0x408030, 0xc0000000);
+ nv_wr32(priv, 0x404490, 0xc0000000);
+ nv_wr32(priv, 0x406018, 0xc0000000);
+ nv_wr32(priv, 0x407020, 0x40000000);
+ nv_wr32(priv, 0x405840, 0xc0000000);
+ nv_wr32(priv, 0x405844, 0x00ffffff);
+ nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < 2 /* priv->ppc_nr[gpc] */; ppc++)
+ nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+ }
+ nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000);
+ nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000);
+ nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+
+ nv_wr32(priv, 0x400108, 0xffffffff);
+ nv_wr32(priv, 0x400138, 0xffffffff);
+ nv_wr32(priv, 0x400118, 0xffffffff);
+ nv_wr32(priv, 0x400130, 0xffffffff);
+ nv_wr32(priv, 0x40011c, 0xffffffff);
+ nv_wr32(priv, 0x400134, 0xffffffff);
+
+ nv_wr32(priv, 0x400054, 0x2c350f63);
+ return nvc0_graph_init_ctxctl(priv);
+}
+
+#include "fuc/hubgm107.fuc5.h"
+
+static struct nvc0_graph_ucode
+gm107_graph_fecs_ucode = {
+ .code.data = gm107_grhub_code,
+ .code.size = sizeof(gm107_grhub_code),
+ .data.data = gm107_grhub_data,
+ .data.size = sizeof(gm107_grhub_data),
+};
+
+#include "fuc/gpcgm107.fuc5.h"
+
+static struct nvc0_graph_ucode
+gm107_graph_gpccs_ucode = {
+ .code.data = gm107_grgpc_code,
+ .code.size = sizeof(gm107_grgpc_code),
+ .data.data = gm107_grgpc_data,
+ .data.size = sizeof(gm107_grgpc_data),
+};
+
+struct nouveau_oclass *
+gm107_graph_oclass = &(struct nvc0_graph_oclass) {
+ .base.handle = NV_ENGINE(GR, 0x07),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_ctor,
+ .dtor = nvc0_graph_dtor,
+ .init = gm107_graph_init,
+ .fini = _nouveau_graph_fini,
+ },
+ .cclass = &gm107_grctx_oclass,
+ .sclass = gm107_graph_sclass,
+ .mmio = gm107_graph_pack_mmio,
+ .fecs.ucode = 0 ? &gm107_graph_fecs_ucode : NULL,
+ .gpccs.ucode = &gm107_graph_gpccs_ucode,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
index e1af65ead379..00ea1a089822 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
@@ -23,6 +23,7 @@
*/
#include "nvc0.h"
+#include "ctxnvc0.h"
/*******************************************************************************
* Graphics object classes
@@ -38,11 +39,11 @@ nv108_graph_sclass[] = {
};
/*******************************************************************************
- * PGRAPH engine/subdev functions
+ * PGRAPH register lists
******************************************************************************/
-static struct nvc0_graph_init
-nv108_graph_init_regs[] = {
+static const struct nvc0_graph_init
+nv108_graph_init_main_0[] = {
{ 0x400080, 1, 0x04, 0x003083c2 },
{ 0x400088, 1, 0x04, 0x0001bfe7 },
{ 0x40008c, 1, 0x04, 0x00000000 },
@@ -57,66 +58,46 @@ nv108_graph_init_regs[] = {
{}
};
-struct nvc0_graph_init
-nv108_graph_init_unk58xx[] = {
+static const struct nvc0_graph_init
+nv108_graph_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405900, 1, 0x04, 0x00000000 },
{ 0x405908, 1, 0x04, 0x00000000 },
- { 0x405928, 1, 0x04, 0x00000000 },
- { 0x40592c, 1, 0x04, 0x00000000 },
+ { 0x405928, 2, 0x04, 0x00000000 },
{}
};
-static struct nvc0_graph_init
-nv108_graph_init_gpc[] = {
- { 0x418408, 1, 0x04, 0x00000000 },
- { 0x4184a0, 3, 0x04, 0x00000000 },
+const struct nvc0_graph_init
+nv108_graph_init_gpc_unk_0[] = {
{ 0x418604, 1, 0x04, 0x00000000 },
{ 0x418680, 1, 0x04, 0x00000000 },
{ 0x418714, 1, 0x04, 0x00000000 },
{ 0x418384, 2, 0x04, 0x00000000 },
- { 0x418814, 3, 0x04, 0x00000000 },
- { 0x418b04, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nv108_graph_init_setup_1[] = {
{ 0x4188c8, 2, 0x04, 0x00000000 },
{ 0x4188d0, 1, 0x04, 0x00010000 },
{ 0x4188d4, 1, 0x04, 0x00000201 },
- { 0x418910, 1, 0x04, 0x00010001 },
- { 0x418914, 1, 0x04, 0x00000301 },
- { 0x418918, 1, 0x04, 0x00800000 },
- { 0x418980, 1, 0x04, 0x77777770 },
- { 0x418984, 3, 0x04, 0x77777777 },
- { 0x418c04, 1, 0x04, 0x00000000 },
- { 0x418c64, 2, 0x04, 0x00000000 },
- { 0x418c88, 1, 0x04, 0x00000000 },
- { 0x418cb4, 2, 0x04, 0x00000000 },
- { 0x418d00, 1, 0x04, 0x00000000 },
- { 0x418d28, 2, 0x04, 0x00000000 },
- { 0x418f00, 1, 0x04, 0x00000400 },
- { 0x418f08, 1, 0x04, 0x00000000 },
- { 0x418f20, 2, 0x04, 0x00000000 },
- { 0x418e00, 1, 0x04, 0x00000000 },
- { 0x418e08, 1, 0x04, 0x00000000 },
- { 0x418e1c, 2, 0x04, 0x00000000 },
- { 0x41900c, 1, 0x04, 0x00000000 },
- { 0x419018, 1, 0x04, 0x00000000 },
{}
};
-static struct nvc0_graph_init
-nv108_graph_init_tpc[] = {
- { 0x419d0c, 1, 0x04, 0x00000000 },
- { 0x419d10, 1, 0x04, 0x00000014 },
+static const struct nvc0_graph_init
+nv108_graph_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ac8, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
{ 0x419abc, 2, 0x04, 0x00000000 },
{ 0x419ab4, 1, 0x04, 0x00000000 },
{ 0x419aa8, 2, 0x04, 0x00000000 },
- { 0x41980c, 1, 0x04, 0x00000010 },
- { 0x419844, 1, 0x04, 0x00000000 },
- { 0x419850, 1, 0x04, 0x00000004 },
- { 0x419854, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nv108_graph_init_l1c_0[] = {
{ 0x419c98, 1, 0x04, 0x00000000 },
{ 0x419ca8, 1, 0x04, 0x00000000 },
{ 0x419cb0, 1, 0x04, 0x01000000 },
@@ -127,22 +108,47 @@ nv108_graph_init_tpc[] = {
{ 0x419cc0, 2, 0x04, 0x00000000 },
{ 0x419c80, 1, 0x04, 0x00000230 },
{ 0x419ccc, 2, 0x04, 0x00000000 },
- { 0x419c0c, 1, 0x04, 0x00000000 },
- { 0x419e00, 1, 0x04, 0x00000080 },
- { 0x419ea0, 1, 0x04, 0x00000000 },
- { 0x419ee4, 1, 0x04, 0x00000000 },
- { 0x419ea4, 1, 0x04, 0x00000100 },
- { 0x419ea8, 1, 0x04, 0x00000000 },
- { 0x419eb4, 1, 0x04, 0x00000000 },
- { 0x419ebc, 2, 0x04, 0x00000000 },
- { 0x419edc, 1, 0x04, 0x00000000 },
- { 0x419f00, 1, 0x04, 0x00000000 },
- { 0x419ed0, 1, 0x04, 0x00003234 },
- { 0x419f74, 1, 0x04, 0x00015555 },
- { 0x419f80, 4, 0x04, 0x00000000 },
{}
};
+static const struct nvc0_graph_pack
+nv108_graph_pack_mmio[] = {
+ { nv108_graph_init_main_0 },
+ { nvf0_graph_init_fe_0 },
+ { nvc0_graph_init_pri_0 },
+ { nvc0_graph_init_rstr2d_0 },
+ { nvd9_graph_init_pd_0 },
+ { nv108_graph_init_ds_0 },
+ { nvc0_graph_init_scc_0 },
+ { nvf0_graph_init_sked_0 },
+ { nvf0_graph_init_cwd_0 },
+ { nvd9_graph_init_prop_0 },
+ { nv108_graph_init_gpc_unk_0 },
+ { nvc0_graph_init_setup_0 },
+ { nvc0_graph_init_crstr_0 },
+ { nv108_graph_init_setup_1 },
+ { nvc0_graph_init_zcull_0 },
+ { nvd9_graph_init_gpm_0 },
+ { nvf0_graph_init_gpc_unk_1 },
+ { nvc0_graph_init_gcc_0 },
+ { nve4_graph_init_tpccs_0 },
+ { nv108_graph_init_tex_0 },
+ { nve4_graph_init_pe_0 },
+ { nv108_graph_init_l1c_0 },
+ { nvc0_graph_init_mpc_0 },
+ { nvf0_graph_init_sm_0 },
+ { nvd7_graph_init_pes_0 },
+ { nvd7_graph_init_wwdx_0 },
+ { nvd7_graph_init_cbm_0 },
+ { nve4_graph_init_be_0 },
+ { nvc0_graph_init_fe_1 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
static int
nv108_graph_fini(struct nouveau_object *object, bool suspend)
{
@@ -180,25 +186,6 @@ nv108_graph_fini(struct nouveau_object *object, bool suspend)
return nouveau_graph_fini(&priv->base, suspend);
}
-static struct nvc0_graph_init *
-nv108_graph_init_mmio[] = {
- nv108_graph_init_regs,
- nvf0_graph_init_unk40xx,
- nvc0_graph_init_unk44xx,
- nvc0_graph_init_unk78xx,
- nvc0_graph_init_unk60xx,
- nvd9_graph_init_unk64xx,
- nv108_graph_init_unk58xx,
- nvc0_graph_init_unk80xx,
- nvf0_graph_init_unk70xx,
- nvf0_graph_init_unk5bxx,
- nv108_graph_init_gpc,
- nv108_graph_init_tpc,
- nve4_graph_init_unk,
- nve4_graph_init_unk88xx,
- NULL
-};
-
#include "fuc/hubnv108.fuc5.h"
static struct nvc0_graph_ucode
@@ -230,7 +217,7 @@ nv108_graph_oclass = &(struct nvc0_graph_oclass) {
},
.cclass = &nv108_grctx_oclass,
.sclass = nv108_graph_sclass,
- .mmio = nv108_graph_init_mmio,
+ .mmio = nv108_graph_pack_mmio,
.fecs.ucode = &nv108_graph_fecs_ucode,
.gpccs.ucode = &nv108_graph_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index b24559315903..d145e080899a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -349,7 +349,7 @@ nv20_graph_init(struct nouveau_object *object)
nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
/* begin RAM config */
- vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
+ vramsz = nv_device_resource_len(nv_device(priv), 0) - 1;
nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 193a5de1b482..6477fbf6a550 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -484,7 +484,7 @@ nv40_graph_init(struct nouveau_object *object)
engine->tile_prog(engine, i);
/* begin RAM config */
- vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
+ vramsz = nv_device_resource_len(nv_device(priv), 0) - 1;
switch (nv_device(priv)->chipset) {
case 0x40:
nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 7a367c402978..2c7809e1a09b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -197,34 +197,35 @@ static const struct nouveau_bitfield nv50_pgraph_status[] = {
{ 0x00000080, "UNK7" },
{ 0x00000100, "CTXPROG" },
{ 0x00000200, "VFETCH" },
- { 0x00000400, "CCACHE_UNK4" },
- { 0x00000800, "STRMOUT_GSCHED_UNK5" },
- { 0x00001000, "UNK14XX" },
- { 0x00002000, "UNK24XX_CSCHED" },
- { 0x00004000, "UNK1CXX" },
+ { 0x00000400, "CCACHE_PREGEOM" },
+ { 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
+ { 0x00001000, "VCLIP" },
+ { 0x00002000, "RATTR_APLANE" },
+ { 0x00004000, "TRAST" },
{ 0x00008000, "CLIPID" },
{ 0x00010000, "ZCULL" },
{ 0x00020000, "ENG2D" },
- { 0x00040000, "UNK34XX" },
- { 0x00080000, "TPRAST" },
- { 0x00100000, "TPROP" },
- { 0x00200000, "TEX" },
- { 0x00400000, "TPVP" },
- { 0x00800000, "MP" },
+ { 0x00040000, "RMASK" },
+ { 0x00080000, "TPC_RAST" },
+ { 0x00100000, "TPC_PROP" },
+ { 0x00200000, "TPC_TEX" },
+ { 0x00400000, "TPC_GEOM" },
+ { 0x00800000, "TPC_MP" },
{ 0x01000000, "ROP" },
{}
};
static const char *const nv50_pgraph_vstatus_0[] = {
- "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL
+ "VFETCH", "CCACHE", "PREGEOM", "POSTGEOM", "VATTR", "STRMOUT", "VCLIP",
+ NULL
};
static const char *const nv50_pgraph_vstatus_1[] = {
- "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL
+ "TPC_RAST", "TPC_PROP", "TPC_TEX", "TPC_GEOM", "TPC_MP", NULL
};
static const char *const nv50_pgraph_vstatus_2[] = {
- "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX",
+ "RATTR", "APLANE", "TRAST", "CLIPID", "ZCULL", "ENG2D", "RMASK",
"ROP", NULL
};
@@ -329,6 +330,15 @@ static const struct nouveau_bitfield nv50_mpc_traps[] = {
{}
};
+static const struct nouveau_bitfield nv50_tex_traps[] = {
+ { 0x00000001, "" }, /* any bit set? */
+ { 0x00000002, "FAULT" },
+ { 0x00000004, "STORAGE_TYPE_MISMATCH" },
+ { 0x00000008, "LINEAR_MISMATCH" },
+ { 0x00000020, "WRONG_MEMTYPE" },
+ {}
+};
+
static const struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
{ 0x00000001, "NOTIFY" },
{ 0x00000002, "IN" },
@@ -531,6 +541,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
nv_error(priv, "\t0x%08x: 0x%08x\n", r,
nv_rd32(priv, r));
+ if (ustatus) {
+ nv_error(priv, "%s - TP%d:", name, i);
+ nouveau_bitfield_print(nv50_tex_traps,
+ ustatus);
+ pr_cont("\n");
+ ustatus = 0;
+ }
}
break;
case 7: /* MP error */
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index a73ab209ea88..f3c7329da0a0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -23,6 +23,7 @@
*/
#include "nvc0.h"
+#include "ctxnvc0.h"
/*******************************************************************************
* Graphics object classes
@@ -146,11 +147,11 @@ nvc0_graph_context_dtor(struct nouveau_object *object)
}
/*******************************************************************************
- * PGRAPH engine/subdev functions
+ * PGRAPH register lists
******************************************************************************/
-struct nvc0_graph_init
-nvc0_graph_init_regs[] = {
+const struct nvc0_graph_init
+nvc0_graph_init_main_0[] = {
{ 0x400080, 1, 0x04, 0x003083c2 },
{ 0x400088, 1, 0x04, 0x00006fe7 },
{ 0x40008c, 1, 0x04, 0x00000000 },
@@ -165,95 +166,170 @@ nvc0_graph_init_regs[] = {
{}
};
-struct nvc0_graph_init
-nvc0_graph_init_unk40xx[] = {
+const struct nvc0_graph_init
+nvc0_graph_init_fe_0[] = {
{ 0x40415c, 1, 0x04, 0x00000000 },
{ 0x404170, 1, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvc0_graph_init_unk44xx[] = {
+const struct nvc0_graph_init
+nvc0_graph_init_pri_0[] = {
{ 0x404488, 2, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvc0_graph_init_unk78xx[] = {
+const struct nvc0_graph_init
+nvc0_graph_init_rstr2d_0[] = {
{ 0x407808, 1, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvc0_graph_init_unk60xx[] = {
+const struct nvc0_graph_init
+nvc0_graph_init_pd_0[] = {
{ 0x406024, 1, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvc0_graph_init_unk58xx[] = {
+const struct nvc0_graph_init
+nvc0_graph_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405908, 1, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvc0_graph_init_unk80xx[] = {
+const struct nvc0_graph_init
+nvc0_graph_init_scc_0[] = {
{ 0x40803c, 1, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvc0_graph_init_gpc[] = {
+const struct nvc0_graph_init
+nvc0_graph_init_prop_0[] = {
{ 0x4184a0, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_gpc_unk_0[] = {
{ 0x418604, 1, 0x04, 0x00000000 },
{ 0x418680, 1, 0x04, 0x00000000 },
{ 0x418714, 1, 0x04, 0x80000000 },
{ 0x418384, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_setup_0[] = {
{ 0x418814, 3, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_crstr_0[] = {
{ 0x418b04, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_setup_1[] = {
{ 0x4188c8, 1, 0x04, 0x80000000 },
{ 0x4188cc, 1, 0x04, 0x00000000 },
{ 0x4188d0, 1, 0x04, 0x00010000 },
{ 0x4188d4, 1, 0x04, 0x00000001 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_zcull_0[] = {
{ 0x418910, 1, 0x04, 0x00010001 },
{ 0x418914, 1, 0x04, 0x00000301 },
{ 0x418918, 1, 0x04, 0x00800000 },
{ 0x418980, 1, 0x04, 0x77777770 },
{ 0x418984, 3, 0x04, 0x77777777 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_gpm_0[] = {
{ 0x418c04, 1, 0x04, 0x00000000 },
{ 0x418c88, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
{ 0x418f08, 1, 0x04, 0x00000000 },
{ 0x418e00, 1, 0x04, 0x00000050 },
{ 0x418e08, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_gcc_0[] = {
{ 0x41900c, 1, 0x04, 0x00000000 },
{ 0x419018, 1, 0x04, 0x00000000 },
{}
};
-static struct nvc0_graph_init
-nvc0_graph_init_tpc[] = {
+const struct nvc0_graph_init
+nvc0_graph_init_tpccs_0[] = {
{ 0x419d08, 2, 0x04, 0x00000000 },
{ 0x419d10, 1, 0x04, 0x00000014 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
{ 0x419abc, 2, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_pe_0[] = {
{ 0x41980c, 3, 0x04, 0x00000000 },
{ 0x419844, 1, 0x04, 0x00000000 },
{ 0x41984c, 1, 0x04, 0x00005bc5 },
{ 0x419850, 4, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_l1c_0[] = {
{ 0x419c98, 1, 0x04, 0x00000000 },
{ 0x419ca8, 1, 0x04, 0x80000000 },
{ 0x419cb4, 1, 0x04, 0x00000000 },
{ 0x419cb8, 1, 0x04, 0x00008bf4 },
{ 0x419cbc, 1, 0x04, 0x28137606 },
{ 0x419cc0, 2, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_wwdx_0[] = {
{ 0x419bd4, 1, 0x04, 0x00800000 },
{ 0x419bdc, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_tpccs_1[] = {
{ 0x419d2c, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc0_graph_init_mpc_0[] = {
{ 0x419c0c, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc0_graph_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000000 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ea4, 1, 0x04, 0x00000100 },
@@ -270,8 +346,8 @@ nvc0_graph_init_tpc[] = {
{}
};
-struct nvc0_graph_init
-nvc0_graph_init_unk88xx[] = {
+const struct nvc0_graph_init
+nvc0_graph_init_be_0[] = {
{ 0x40880c, 1, 0x04, 0x00000000 },
{ 0x408910, 9, 0x04, 0x00000000 },
{ 0x408950, 1, 0x04, 0x00000000 },
@@ -282,18 +358,64 @@ nvc0_graph_init_unk88xx[] = {
{}
};
-struct nvc0_graph_init
-nvc0_graph_tpc_0[] = {
- { 0x50405c, 1, 0x04, 0x00000001 },
+const struct nvc0_graph_init
+nvc0_graph_init_fe_1[] = {
+ { 0x4040f0, 1, 0x04, 0x00000000 },
{}
};
+const struct nvc0_graph_init
+nvc0_graph_init_pe_1[] = {
+ { 0x419880, 1, 0x04, 0x00000002 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+nvc0_graph_pack_mmio[] = {
+ { nvc0_graph_init_main_0 },
+ { nvc0_graph_init_fe_0 },
+ { nvc0_graph_init_pri_0 },
+ { nvc0_graph_init_rstr2d_0 },
+ { nvc0_graph_init_pd_0 },
+ { nvc0_graph_init_ds_0 },
+ { nvc0_graph_init_scc_0 },
+ { nvc0_graph_init_prop_0 },
+ { nvc0_graph_init_gpc_unk_0 },
+ { nvc0_graph_init_setup_0 },
+ { nvc0_graph_init_crstr_0 },
+ { nvc0_graph_init_setup_1 },
+ { nvc0_graph_init_zcull_0 },
+ { nvc0_graph_init_gpm_0 },
+ { nvc0_graph_init_gpc_unk_1 },
+ { nvc0_graph_init_gcc_0 },
+ { nvc0_graph_init_tpccs_0 },
+ { nvc0_graph_init_tex_0 },
+ { nvc0_graph_init_pe_0 },
+ { nvc0_graph_init_l1c_0 },
+ { nvc0_graph_init_wwdx_0 },
+ { nvc0_graph_init_tpccs_1 },
+ { nvc0_graph_init_mpc_0 },
+ { nvc0_graph_init_sm_0 },
+ { nvc0_graph_init_be_0 },
+ { nvc0_graph_init_fe_1 },
+ { nvc0_graph_init_pe_1 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
void
-nvc0_graph_mmio(struct nvc0_graph_priv *priv, struct nvc0_graph_init *init)
+nvc0_graph_mmio(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
{
- for (; init && init->count; init++) {
- u32 addr = init->addr, i;
- for (i = 0; i < init->count; i++) {
+ const struct nvc0_graph_pack *pack;
+ const struct nvc0_graph_init *init;
+
+ pack_for_each_init(init, pack, p) {
+ u32 next = init->addr + init->count * init->pitch;
+ u32 addr = init->addr;
+ while (addr < next) {
nv_wr32(priv, addr, init->data);
addr += init->pitch;
}
@@ -301,49 +423,53 @@ nvc0_graph_mmio(struct nvc0_graph_priv *priv, struct nvc0_graph_init *init)
}
void
-nvc0_graph_icmd(struct nvc0_graph_priv *priv, struct nvc0_graph_init *init)
+nvc0_graph_icmd(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
{
- u32 addr, data;
- int i, j;
+ const struct nvc0_graph_pack *pack;
+ const struct nvc0_graph_init *init;
+ u32 data = 0;
nv_wr32(priv, 0x400208, 0x80000000);
- for (i = 0; init->count; init++, i++) {
- if (!i || data != init->data) {
+
+ pack_for_each_init(init, pack, p) {
+ u32 next = init->addr + init->count * init->pitch;
+ u32 addr = init->addr;
+
+ if ((pack == p && init == p->init) || data != init->data) {
nv_wr32(priv, 0x400204, init->data);
data = init->data;
}
- addr = init->addr;
- for (j = 0; j < init->count; j++) {
+ while (addr < next) {
nv_wr32(priv, 0x400200, addr);
+ nv_wait(priv, 0x400700, 0x00000002, 0x00000000);
addr += init->pitch;
- while (nv_rd32(priv, 0x400700) & 0x00000002) {}
}
}
+
nv_wr32(priv, 0x400208, 0x00000000);
}
void
-nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds)
+nvc0_graph_mthd(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
{
- struct nvc0_graph_mthd *mthd;
- struct nvc0_graph_init *init;
- int i = 0, j;
- u32 data;
-
- while ((mthd = &mthds[i++]) && (init = mthd->init)) {
- u32 addr = 0x80000000 | mthd->oclass;
- for (data = 0; init->count; init++) {
- if (init == mthd->init || data != init->data) {
- nv_wr32(priv, 0x40448c, init->data);
- data = init->data;
- }
+ const struct nvc0_graph_pack *pack;
+ const struct nvc0_graph_init *init;
+ u32 data = 0;
- addr = (addr & 0x8000ffff) | (init->addr << 14);
- for (j = 0; j < init->count; j++) {
- nv_wr32(priv, 0x404488, addr);
- addr += init->pitch << 14;
- }
+ pack_for_each_init(init, pack, p) {
+ u32 ctrl = 0x80000000 | pack->type;
+ u32 next = init->addr + init->count * init->pitch;
+ u32 addr = init->addr;
+
+ if ((pack == p && init == p->init) || data != init->data) {
+ nv_wr32(priv, 0x40448c, init->data);
+ data = init->data;
+ }
+
+ while (addr < next) {
+ nv_wr32(priv, 0x404488, ctrl | (addr << 14));
+ addr += init->pitch;
}
}
}
@@ -772,11 +898,12 @@ nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base,
static void
nvc0_graph_init_csdata(struct nvc0_graph_priv *priv,
- struct nvc0_graph_init *init,
+ const struct nvc0_graph_pack *pack,
u32 falcon, u32 starstar, u32 base)
{
- u32 addr = init->addr;
- u32 next = addr;
+ const struct nvc0_graph_pack *iter;
+ const struct nvc0_graph_init *init;
+ u32 addr = ~0, prev = ~0, xfer = 0;
u32 star, temp;
nv_wr32(priv, falcon + 0x01c0, 0x02000000 + starstar);
@@ -786,22 +913,28 @@ nvc0_graph_init_csdata(struct nvc0_graph_priv *priv,
star = temp;
nv_wr32(priv, falcon + 0x01c0, 0x01000000 + star);
- do {
- if (init->addr != next) {
- while (addr < next) {
- u32 nr = min((int)(next - addr) / 4, 32);
- nv_wr32(priv, falcon + 0x01c4,
- ((nr - 1) << 26) | (addr - base));
- addr += nr * 4;
- star += 4;
+ pack_for_each_init(init, iter, pack) {
+ u32 head = init->addr - base;
+ u32 tail = head + init->count * init->pitch;
+ while (head < tail) {
+ if (head != prev + 4 || xfer >= 32) {
+ if (xfer) {
+ u32 data = ((--xfer << 26) | addr);
+ nv_wr32(priv, falcon + 0x01c4, data);
+ star += 4;
+ }
+ addr = head;
+ xfer = 0;
}
- addr = next = init->addr;
+ prev = head;
+ xfer = xfer + 1;
+ head = head + init->pitch;
}
- next += init->count * 4;
- } while ((init++)->count);
+ }
+ nv_wr32(priv, falcon + 0x01c4, (--xfer << 26) | addr);
nv_wr32(priv, falcon + 0x01c0, 0x01000004 + starstar);
- nv_wr32(priv, falcon + 0x01c4, star);
+ nv_wr32(priv, falcon + 0x01c4, star + 4);
}
int
@@ -809,7 +942,6 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
{
struct nvc0_graph_oclass *oclass = (void *)nv_object(priv)->oclass;
struct nvc0_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass;
- struct nvc0_graph_init *init;
u32 r000260;
int i;
@@ -919,10 +1051,6 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x409184, oclass->fecs.ucode->code.data[i]);
}
- for (i = 0; (init = cclass->hub[i]); i++) {
- nvc0_graph_init_csdata(priv, init, 0x409000, 0x000, 0x000000);
- }
-
/* load GPC microcode */
nv_wr32(priv, 0x41a1c0, 0x01000000);
for (i = 0; i < oclass->gpccs.ucode->data.size / 4; i++)
@@ -936,12 +1064,11 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
}
nv_wr32(priv, 0x000260, r000260);
- if ((init = cclass->gpc[0]))
- nvc0_graph_init_csdata(priv, init, 0x41a000, 0x000, 0x418000);
- if ((init = cclass->gpc[2]))
- nvc0_graph_init_csdata(priv, init, 0x41a000, 0x004, 0x419800);
- if ((init = cclass->gpc[3]))
- nvc0_graph_init_csdata(priv, init, 0x41a000, 0x008, 0x41be00);
+ /* load register lists */
+ nvc0_graph_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000);
+ nvc0_graph_init_csdata(priv, cclass->gpc, 0x41a000, 0x000, 0x418000);
+ nvc0_graph_init_csdata(priv, cclass->tpc, 0x41a000, 0x004, 0x419800);
+ nvc0_graph_init_csdata(priv, cclass->ppc, 0x41a000, 0x008, 0x41be00);
/* start HUB ucode running, it'll init the GPCs */
nv_wr32(priv, 0x40910c, 0x00000000);
@@ -988,8 +1115,7 @@ nvc0_graph_init(struct nouveau_object *object)
nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
- for (i = 0; oclass->mmio[i]; i++)
- nvc0_graph_mmio(priv, oclass->mmio[i]);
+ nvc0_graph_mmio(priv, oclass->mmio);
memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
@@ -1091,10 +1217,10 @@ nvc0_graph_ctor_fw(struct nvc0_graph_priv *priv, const char *fwname,
int ret;
snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
- ret = request_firmware(&fw, f, &device->pdev->dev);
+ ret = request_firmware(&fw, f, nv_device_base(device));
if (ret) {
snprintf(f, sizeof(f), "nouveau/%s", fwname);
- ret = request_firmware(&fw, f, &device->pdev->dev);
+ ret = request_firmware(&fw, f, nv_device_base(device));
if (ret) {
nv_error(priv, "failed to load %s\n", fwname);
return ret;
@@ -1220,22 +1346,6 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nvc0_graph_init *
-nvc0_graph_init_mmio[] = {
- nvc0_graph_init_regs,
- nvc0_graph_init_unk40xx,
- nvc0_graph_init_unk44xx,
- nvc0_graph_init_unk78xx,
- nvc0_graph_init_unk60xx,
- nvc0_graph_init_unk58xx,
- nvc0_graph_init_unk80xx,
- nvc0_graph_init_gpc,
- nvc0_graph_init_tpc,
- nvc0_graph_init_unk88xx,
- nvc0_graph_tpc_0,
- NULL
-};
-
#include "fuc/hubnvc0.fuc.h"
struct nvc0_graph_ucode
@@ -1267,7 +1377,7 @@ nvc0_graph_oclass = &(struct nvc0_graph_oclass) {
},
.cclass = &nvc0_grctx_oclass,
.sclass = nvc0_graph_sclass,
- .mmio = nvc0_graph_init_mmio,
+ .mmio = nvc0_graph_pack_mmio,
.fecs.ucode = &nvc0_graph_fecs_ucode,
.gpccs.ucode = &nvc0_graph_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index b0ab6de270b2..90d44616c876 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -45,6 +45,7 @@
#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
#define GPC_BCAST(r) (0x418000 + (r))
#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
+#define PPC_UNIT(t, m, r) (0x503000 + (t) * 0x8000 + (m) * 0x200 + (r))
#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
struct nvc0_graph_data {
@@ -102,8 +103,6 @@ struct nvc0_graph_chan {
} data[4];
};
-int nvc0_grctx_generate(struct nvc0_graph_priv *);
-
int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
struct nouveau_object **);
@@ -130,34 +129,14 @@ struct nvc0_graph_init {
u32 data;
};
-struct nvc0_graph_mthd {
- u16 oclass;
- struct nvc0_graph_init *init;
-};
-
-struct nvc0_grctx {
- struct nvc0_graph_priv *priv;
- struct nvc0_graph_data *data;
- struct nvc0_graph_mmio *mmio;
- int buffer_nr;
- u64 buffer[4];
- u64 addr;
+struct nvc0_graph_pack {
+ const struct nvc0_graph_init *init;
+ u32 type;
};
-struct nvc0_grctx_oclass {
- struct nouveau_oclass base;
- /* main context generation function */
- void (*main)(struct nvc0_graph_priv *, struct nvc0_grctx *);
- /* context-specific modify-on-first-load list generation function */
- void (*mods)(struct nvc0_graph_priv *, struct nvc0_grctx *);
- void (*unkn)(struct nvc0_graph_priv *);
- /* mmio context data */
- struct nvc0_graph_init **hub;
- struct nvc0_graph_init **gpc;
- /* indirect context data, generated with icmds/mthds */
- struct nvc0_graph_init *icmd;
- struct nvc0_graph_mthd *mthd;
-};
+#define pack_for_each_init(init, pack, head) \
+ for (pack = head; pack && pack->init; pack++) \
+ for (init = pack->init; init && init->count; init++)
struct nvc0_graph_ucode {
struct nvc0_graph_fuc code;
@@ -171,7 +150,7 @@ struct nvc0_graph_oclass {
struct nouveau_oclass base;
struct nouveau_oclass **cclass;
struct nouveau_oclass *sclass;
- struct nvc0_graph_init **mmio;
+ const struct nvc0_graph_pack *mmio;
struct {
struct nvc0_graph_ucode *ucode;
} fecs;
@@ -180,119 +159,72 @@ struct nvc0_graph_oclass {
} gpccs;
};
-void nvc0_graph_mmio(struct nvc0_graph_priv *, struct nvc0_graph_init *);
-void nvc0_graph_icmd(struct nvc0_graph_priv *, struct nvc0_graph_init *);
-void nvc0_graph_mthd(struct nvc0_graph_priv *, struct nvc0_graph_mthd *);
+void nvc0_graph_mmio(struct nvc0_graph_priv *, const struct nvc0_graph_pack *);
+void nvc0_graph_icmd(struct nvc0_graph_priv *, const struct nvc0_graph_pack *);
+void nvc0_graph_mthd(struct nvc0_graph_priv *, const struct nvc0_graph_pack *);
int nvc0_graph_init_ctxctl(struct nvc0_graph_priv *);
-extern struct nvc0_graph_init nvc0_graph_init_regs[];
-extern struct nvc0_graph_init nvc0_graph_init_unk40xx[];
-extern struct nvc0_graph_init nvc0_graph_init_unk44xx[];
-extern struct nvc0_graph_init nvc0_graph_init_unk78xx[];
-extern struct nvc0_graph_init nvc0_graph_init_unk60xx[];
-extern struct nvc0_graph_init nvc0_graph_init_unk58xx[];
-extern struct nvc0_graph_init nvc0_graph_init_unk80xx[];
-extern struct nvc0_graph_init nvc0_graph_init_gpc[];
-extern struct nvc0_graph_init nvc0_graph_init_unk88xx[];
-extern struct nvc0_graph_init nvc0_graph_tpc_0[];
-
-extern struct nvc0_graph_init nvc3_graph_init_unk58xx[];
-
-extern struct nvc0_graph_init nvd9_graph_init_unk58xx[];
-extern struct nvc0_graph_init nvd9_graph_init_unk64xx[];
-
-extern struct nvc0_graph_init nve4_graph_init_regs[];
-extern struct nvc0_graph_init nve4_graph_init_unk[];
-extern struct nvc0_graph_init nve4_graph_init_unk88xx[];
-
-extern struct nvc0_graph_init nvf0_graph_init_unk40xx[];
-extern struct nvc0_graph_init nvf0_graph_init_unk70xx[];
-extern struct nvc0_graph_init nvf0_graph_init_unk5bxx[];
-extern struct nvc0_graph_init nvf0_graph_init_tpc[];
-
-int nvc0_grctx_generate(struct nvc0_graph_priv *);
-void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
-void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
-void nvc0_grctx_generate_unkn(struct nvc0_graph_priv *);
-void nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *);
-void nvc0_grctx_generate_r406028(struct nvc0_graph_priv *);
-void nvc0_grctx_generate_r4060a8(struct nvc0_graph_priv *);
-void nvc0_grctx_generate_r418bb8(struct nvc0_graph_priv *);
-void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *);
-void nvc0_grctx_generate_r406800(struct nvc0_graph_priv *);
-
-extern struct nouveau_oclass *nvc0_grctx_oclass;
-extern struct nvc0_graph_init *nvc0_grctx_init_hub[];
-extern struct nvc0_graph_init nvc0_grctx_init_base[];
-extern struct nvc0_graph_init nvc0_grctx_init_unk40xx[];
-extern struct nvc0_graph_init nvc0_grctx_init_unk44xx[];
-extern struct nvc0_graph_init nvc0_grctx_init_unk46xx[];
-extern struct nvc0_graph_init nvc0_grctx_init_unk47xx[];
-extern struct nvc0_graph_init nvc0_grctx_init_unk60xx[];
-extern struct nvc0_graph_init nvc0_grctx_init_unk64xx[];
-extern struct nvc0_graph_init nvc0_grctx_init_unk78xx[];
-extern struct nvc0_graph_init nvc0_grctx_init_unk80xx[];
-extern struct nvc0_graph_init nvc0_grctx_init_gpc_0[];
-extern struct nvc0_graph_init nvc0_grctx_init_gpc_1[];
-extern struct nvc0_graph_init nvc0_grctx_init_tpc[];
-extern struct nvc0_graph_init nvc0_grctx_init_icmd[];
-extern struct nvc0_graph_init nvd9_grctx_init_icmd[]; //
-
-extern struct nvc0_graph_mthd nvc0_grctx_init_mthd[];
-extern struct nvc0_graph_init nvc0_grctx_init_902d[];
-extern struct nvc0_graph_init nvc0_grctx_init_9039[];
-extern struct nvc0_graph_init nvc0_grctx_init_90c0[];
-extern struct nvc0_graph_init nvc0_grctx_init_mthd_magic[];
-
-void nvc1_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
-void nvc1_grctx_generate_unkn(struct nvc0_graph_priv *);
-extern struct nouveau_oclass *nvc1_grctx_oclass;
-extern struct nvc0_graph_init nvc1_grctx_init_9097[];
-
-extern struct nouveau_oclass *nvc3_grctx_oclass;
-
-extern struct nouveau_oclass *nvc8_grctx_oclass;
-extern struct nvc0_graph_init nvc8_grctx_init_9197[];
-extern struct nvc0_graph_init nvc8_grctx_init_9297[];
-
-extern struct nouveau_oclass *nvd7_grctx_oclass;
-
-extern struct nouveau_oclass *nvd9_grctx_oclass;
-extern struct nvc0_graph_init nvd9_grctx_init_rop[];
-extern struct nvc0_graph_mthd nvd9_grctx_init_mthd[];
-
-void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
-void nve4_grctx_generate_unkn(struct nvc0_graph_priv *);
-extern struct nouveau_oclass *nve4_grctx_oclass;
-extern struct nvc0_graph_init nve4_grctx_init_unk46xx[];
-extern struct nvc0_graph_init nve4_grctx_init_unk47xx[];
-extern struct nvc0_graph_init nve4_grctx_init_unk58xx[];
-extern struct nvc0_graph_init nve4_grctx_init_unk80xx[];
-extern struct nvc0_graph_init nve4_grctx_init_unk90xx[];
-
-extern struct nouveau_oclass *nvf0_grctx_oclass;
-extern struct nvc0_graph_init nvf0_grctx_init_unk44xx[];
-extern struct nvc0_graph_init nvf0_grctx_init_unk5bxx[];
-extern struct nvc0_graph_init nvf0_grctx_init_unk60xx[];
-
-extern struct nouveau_oclass *nv108_grctx_oclass;
-
-#define mmio_data(s,a,p) do { \
- info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \
- info->addr = info->buffer[info->buffer_nr++] + (s); \
- info->data->size = (s); \
- info->data->align = (a); \
- info->data->access = (p); \
- info->data++; \
-} while(0)
-
-#define mmio_list(r,d,s,b) do { \
- info->mmio->addr = (r); \
- info->mmio->data = (d); \
- info->mmio->shift = (s); \
- info->mmio->buffer = (b); \
- info->mmio++; \
- nv_wr32(priv, (r), (d) | ((s) ? (info->buffer[(b)] >> (s)) : 0)); \
-} while(0)
+/* register init value lists */
+
+extern const struct nvc0_graph_init nvc0_graph_init_main_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_fe_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_pri_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_rstr2d_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_pd_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_ds_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_scc_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_prop_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_gpc_unk_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_setup_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_crstr_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_setup_1[];
+extern const struct nvc0_graph_init nvc0_graph_init_zcull_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_gpm_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_gpc_unk_1[];
+extern const struct nvc0_graph_init nvc0_graph_init_gcc_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_tpccs_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_tex_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_pe_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_l1c_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_wwdx_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_tpccs_1[];
+extern const struct nvc0_graph_init nvc0_graph_init_mpc_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_be_0[];
+extern const struct nvc0_graph_init nvc0_graph_init_fe_1[];
+extern const struct nvc0_graph_init nvc0_graph_init_pe_1[];
+
+extern const struct nvc0_graph_init nvc4_graph_init_ds_0[];
+extern const struct nvc0_graph_init nvc4_graph_init_tex_0[];
+extern const struct nvc0_graph_init nvc4_graph_init_sm_0[];
+
+extern const struct nvc0_graph_init nvc1_graph_init_gpc_unk_0[];
+extern const struct nvc0_graph_init nvc1_graph_init_setup_1[];
+
+extern const struct nvc0_graph_init nvd9_graph_init_pd_0[];
+extern const struct nvc0_graph_init nvd9_graph_init_ds_0[];
+extern const struct nvc0_graph_init nvd9_graph_init_prop_0[];
+extern const struct nvc0_graph_init nvd9_graph_init_gpm_0[];
+extern const struct nvc0_graph_init nvd9_graph_init_gpc_unk_1[];
+extern const struct nvc0_graph_init nvd9_graph_init_tex_0[];
+extern const struct nvc0_graph_init nvd9_graph_init_sm_0[];
+extern const struct nvc0_graph_init nvd9_graph_init_fe_1[];
+
+extern const struct nvc0_graph_init nvd7_graph_init_pes_0[];
+extern const struct nvc0_graph_init nvd7_graph_init_wwdx_0[];
+extern const struct nvc0_graph_init nvd7_graph_init_cbm_0[];
+
+extern const struct nvc0_graph_init nve4_graph_init_main_0[];
+extern const struct nvc0_graph_init nve4_graph_init_tpccs_0[];
+extern const struct nvc0_graph_init nve4_graph_init_pe_0[];
+extern const struct nvc0_graph_init nve4_graph_init_be_0[];
+
+extern const struct nvc0_graph_init nvf0_graph_init_fe_0[];
+extern const struct nvc0_graph_init nvf0_graph_init_sked_0[];
+extern const struct nvc0_graph_init nvf0_graph_init_cwd_0[];
+extern const struct nvc0_graph_init nvf0_graph_init_gpc_unk_1[];
+extern const struct nvc0_graph_init nvf0_graph_init_sm_0[];
+
+extern const struct nvc0_graph_init nv108_graph_init_gpc_unk_0[];
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
index bc4a469b86cb..30cab0b2eba1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
@@ -23,6 +23,7 @@
*/
#include "nvc0.h"
+#include "ctxnvc0.h"
/*******************************************************************************
* Graphics object classes
@@ -39,94 +40,82 @@ nvc1_graph_sclass[] = {
};
/*******************************************************************************
- * PGRAPH engine/subdev functions
+ * PGRAPH register lists
******************************************************************************/
-static struct nvc0_graph_init
-nvc1_graph_init_gpc[] = {
- { 0x4184a0, 1, 0x04, 0x00000000 },
+const struct nvc0_graph_init
+nvc1_graph_init_gpc_unk_0[] = {
{ 0x418604, 1, 0x04, 0x00000000 },
{ 0x418680, 1, 0x04, 0x00000000 },
{ 0x418714, 1, 0x04, 0x00000000 },
{ 0x418384, 1, 0x04, 0x00000000 },
- { 0x418814, 3, 0x04, 0x00000000 },
- { 0x418b04, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc1_graph_init_setup_1[] = {
{ 0x4188c8, 2, 0x04, 0x00000000 },
{ 0x4188d0, 1, 0x04, 0x00010000 },
{ 0x4188d4, 1, 0x04, 0x00000001 },
- { 0x418910, 1, 0x04, 0x00010001 },
- { 0x418914, 1, 0x04, 0x00000301 },
- { 0x418918, 1, 0x04, 0x00800000 },
- { 0x418980, 1, 0x04, 0x77777770 },
- { 0x418984, 3, 0x04, 0x77777777 },
- { 0x418c04, 1, 0x04, 0x00000000 },
- { 0x418c88, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc1_graph_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
{ 0x418f08, 1, 0x04, 0x00000000 },
{ 0x418e00, 1, 0x04, 0x00000003 },
{ 0x418e08, 1, 0x04, 0x00000000 },
- { 0x41900c, 1, 0x04, 0x00000000 },
- { 0x419018, 1, 0x04, 0x00000000 },
{}
};
-static struct nvc0_graph_init
-nvc1_graph_init_tpc[] = {
- { 0x419d08, 2, 0x04, 0x00000000 },
- { 0x419d10, 1, 0x04, 0x00000014 },
- { 0x419ab0, 1, 0x04, 0x00000000 },
- { 0x419ac8, 1, 0x04, 0x00000000 },
- { 0x419ab8, 1, 0x04, 0x000000e7 },
- { 0x419abc, 2, 0x04, 0x00000000 },
- { 0x41980c, 2, 0x04, 0x00000000 },
+static const struct nvc0_graph_init
+nvc1_graph_init_pe_0[] = {
+ { 0x41980c, 1, 0x04, 0x00000010 },
+ { 0x419810, 1, 0x04, 0x00000000 },
{ 0x419814, 1, 0x04, 0x00000004 },
{ 0x419844, 1, 0x04, 0x00000000 },
{ 0x41984c, 1, 0x04, 0x00005bc5 },
{ 0x419850, 4, 0x04, 0x00000000 },
{ 0x419880, 1, 0x04, 0x00000002 },
- { 0x419c98, 1, 0x04, 0x00000000 },
- { 0x419ca8, 1, 0x04, 0x80000000 },
- { 0x419cb4, 1, 0x04, 0x00000000 },
- { 0x419cb8, 1, 0x04, 0x00008bf4 },
- { 0x419cbc, 1, 0x04, 0x28137606 },
- { 0x419cc0, 2, 0x04, 0x00000000 },
- { 0x419bd4, 1, 0x04, 0x00800000 },
- { 0x419bdc, 1, 0x04, 0x00000000 },
- { 0x419d2c, 1, 0x04, 0x00000000 },
- { 0x419c0c, 1, 0x04, 0x00000000 },
- { 0x419e00, 1, 0x04, 0x00000000 },
- { 0x419ea0, 1, 0x04, 0x00000000 },
- { 0x419ea4, 1, 0x04, 0x00000100 },
- { 0x419ea8, 1, 0x04, 0x00001100 },
- { 0x419eac, 1, 0x04, 0x11100702 },
- { 0x419eb0, 1, 0x04, 0x00000003 },
- { 0x419eb4, 4, 0x04, 0x00000000 },
- { 0x419ec8, 1, 0x04, 0x0e063818 },
- { 0x419ecc, 1, 0x04, 0x0e060e06 },
- { 0x419ed0, 1, 0x04, 0x00003818 },
- { 0x419ed4, 1, 0x04, 0x011104f1 },
- { 0x419edc, 1, 0x04, 0x00000000 },
- { 0x419f00, 1, 0x04, 0x00000000 },
- { 0x419f2c, 1, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init *
-nvc1_graph_init_mmio[] = {
- nvc0_graph_init_regs,
- nvc0_graph_init_unk40xx,
- nvc0_graph_init_unk44xx,
- nvc0_graph_init_unk78xx,
- nvc0_graph_init_unk60xx,
- nvc3_graph_init_unk58xx,
- nvc0_graph_init_unk80xx,
- nvc1_graph_init_gpc,
- nvc1_graph_init_tpc,
- nvc0_graph_init_unk88xx,
- nvc0_graph_tpc_0,
- NULL
+static const struct nvc0_graph_pack
+nvc1_graph_pack_mmio[] = {
+ { nvc0_graph_init_main_0 },
+ { nvc0_graph_init_fe_0 },
+ { nvc0_graph_init_pri_0 },
+ { nvc0_graph_init_rstr2d_0 },
+ { nvc0_graph_init_pd_0 },
+ { nvc4_graph_init_ds_0 },
+ { nvc0_graph_init_scc_0 },
+ { nvc0_graph_init_prop_0 },
+ { nvc1_graph_init_gpc_unk_0 },
+ { nvc0_graph_init_setup_0 },
+ { nvc0_graph_init_crstr_0 },
+ { nvc1_graph_init_setup_1 },
+ { nvc0_graph_init_zcull_0 },
+ { nvc0_graph_init_gpm_0 },
+ { nvc1_graph_init_gpc_unk_1 },
+ { nvc0_graph_init_gcc_0 },
+ { nvc0_graph_init_tpccs_0 },
+ { nvc4_graph_init_tex_0 },
+ { nvc1_graph_init_pe_0 },
+ { nvc0_graph_init_l1c_0 },
+ { nvc0_graph_init_wwdx_0 },
+ { nvc0_graph_init_tpccs_1 },
+ { nvc0_graph_init_mpc_0 },
+ { nvc4_graph_init_sm_0 },
+ { nvc0_graph_init_be_0 },
+ { nvc0_graph_init_fe_1 },
+ {}
};
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
struct nouveau_oclass *
nvc1_graph_oclass = &(struct nvc0_graph_oclass) {
.base.handle = NV_ENGINE(GR, 0xc1),
@@ -138,7 +127,7 @@ nvc1_graph_oclass = &(struct nvc0_graph_oclass) {
},
.cclass = &nvc1_grctx_oclass,
.sclass = nvc1_graph_sclass,
- .mmio = nvc1_graph_init_mmio,
+ .mmio = nvc1_graph_pack_mmio,
.fecs.ucode = &nvc0_graph_fecs_ucode,
.gpccs.ucode = &nvc0_graph_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc3.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc4.c
index d44b3b3ee800..e82e70c53132 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc4.c
@@ -23,13 +23,14 @@
*/
#include "nvc0.h"
+#include "ctxnvc0.h"
/*******************************************************************************
- * PGRAPH engine/subdev functions
+ * PGRAPH register lists
******************************************************************************/
-struct nvc0_graph_init
-nvc3_graph_init_unk58xx[] = {
+const struct nvc0_graph_init
+nvc4_graph_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405900, 1, 0x04, 0x00002834 },
@@ -37,29 +38,27 @@ nvc3_graph_init_unk58xx[] = {
{}
};
-static struct nvc0_graph_init
-nvc3_graph_init_tpc[] = {
- { 0x419d08, 2, 0x04, 0x00000000 },
- { 0x419d10, 1, 0x04, 0x00000014 },
+const struct nvc0_graph_init
+nvc4_graph_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ac8, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
{ 0x419abc, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvc4_graph_init_pe_0[] = {
{ 0x41980c, 3, 0x04, 0x00000000 },
{ 0x419844, 1, 0x04, 0x00000000 },
{ 0x41984c, 1, 0x04, 0x00005bc5 },
{ 0x419850, 4, 0x04, 0x00000000 },
{ 0x419880, 1, 0x04, 0x00000002 },
- { 0x419c98, 1, 0x04, 0x00000000 },
- { 0x419ca8, 1, 0x04, 0x80000000 },
- { 0x419cb4, 1, 0x04, 0x00000000 },
- { 0x419cb8, 1, 0x04, 0x00008bf4 },
- { 0x419cbc, 1, 0x04, 0x28137606 },
- { 0x419cc0, 2, 0x04, 0x00000000 },
- { 0x419bd4, 1, 0x04, 0x00800000 },
- { 0x419bdc, 1, 0x04, 0x00000000 },
- { 0x419d2c, 1, 0x04, 0x00000000 },
- { 0x419c0c, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvc4_graph_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000000 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ea4, 1, 0x04, 0x00000100 },
@@ -77,24 +76,43 @@ nvc3_graph_init_tpc[] = {
{}
};
-static struct nvc0_graph_init *
-nvc3_graph_init_mmio[] = {
- nvc0_graph_init_regs,
- nvc0_graph_init_unk40xx,
- nvc0_graph_init_unk44xx,
- nvc0_graph_init_unk78xx,
- nvc0_graph_init_unk60xx,
- nvc3_graph_init_unk58xx,
- nvc0_graph_init_unk80xx,
- nvc0_graph_init_gpc,
- nvc3_graph_init_tpc,
- nvc0_graph_init_unk88xx,
- nvc0_graph_tpc_0,
- NULL
+static const struct nvc0_graph_pack
+nvc4_graph_pack_mmio[] = {
+ { nvc0_graph_init_main_0 },
+ { nvc0_graph_init_fe_0 },
+ { nvc0_graph_init_pri_0 },
+ { nvc0_graph_init_rstr2d_0 },
+ { nvc0_graph_init_pd_0 },
+ { nvc4_graph_init_ds_0 },
+ { nvc0_graph_init_scc_0 },
+ { nvc0_graph_init_prop_0 },
+ { nvc0_graph_init_gpc_unk_0 },
+ { nvc0_graph_init_setup_0 },
+ { nvc0_graph_init_crstr_0 },
+ { nvc0_graph_init_setup_1 },
+ { nvc0_graph_init_zcull_0 },
+ { nvc0_graph_init_gpm_0 },
+ { nvc0_graph_init_gpc_unk_1 },
+ { nvc0_graph_init_gcc_0 },
+ { nvc0_graph_init_tpccs_0 },
+ { nvc4_graph_init_tex_0 },
+ { nvc4_graph_init_pe_0 },
+ { nvc0_graph_init_l1c_0 },
+ { nvc0_graph_init_wwdx_0 },
+ { nvc0_graph_init_tpccs_1 },
+ { nvc0_graph_init_mpc_0 },
+ { nvc4_graph_init_sm_0 },
+ { nvc0_graph_init_be_0 },
+ { nvc0_graph_init_fe_1 },
+ {}
};
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
struct nouveau_oclass *
-nvc3_graph_oclass = &(struct nvc0_graph_oclass) {
+nvc4_graph_oclass = &(struct nvc0_graph_oclass) {
.base.handle = NV_ENGINE(GR, 0xc3),
.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_graph_ctor,
@@ -102,9 +120,9 @@ nvc3_graph_oclass = &(struct nvc0_graph_oclass) {
.init = nvc0_graph_init,
.fini = _nouveau_graph_fini,
},
- .cclass = &nvc3_grctx_oclass,
+ .cclass = &nvc4_grctx_oclass,
.sclass = nvc0_graph_sclass,
- .mmio = nvc3_graph_init_mmio,
+ .mmio = nvc4_graph_pack_mmio,
.fecs.ucode = &nvc0_graph_fecs_ucode,
.gpccs.ucode = &nvc0_graph_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
index 02845e567314..a6bf783e1256 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
@@ -23,6 +23,7 @@
*/
#include "nvc0.h"
+#include "ctxnvc0.h"
/*******************************************************************************
* Graphics object classes
@@ -40,58 +41,11 @@ nvc8_graph_sclass[] = {
};
/*******************************************************************************
- * PGRAPH engine/subdev functions
+ * PGRAPH register lists
******************************************************************************/
-static struct nvc0_graph_init
-nvc8_graph_init_gpc[] = {
- { 0x4184a0, 1, 0x04, 0x00000000 },
- { 0x418604, 1, 0x04, 0x00000000 },
- { 0x418680, 1, 0x04, 0x00000000 },
- { 0x418714, 1, 0x04, 0x80000000 },
- { 0x418384, 1, 0x04, 0x00000000 },
- { 0x418814, 3, 0x04, 0x00000000 },
- { 0x418b04, 1, 0x04, 0x00000000 },
- { 0x4188c8, 2, 0x04, 0x00000000 },
- { 0x4188d0, 1, 0x04, 0x00010000 },
- { 0x4188d4, 1, 0x04, 0x00000001 },
- { 0x418910, 1, 0x04, 0x00010001 },
- { 0x418914, 1, 0x04, 0x00000301 },
- { 0x418918, 1, 0x04, 0x00800000 },
- { 0x418980, 1, 0x04, 0x77777770 },
- { 0x418984, 3, 0x04, 0x77777777 },
- { 0x418c04, 1, 0x04, 0x00000000 },
- { 0x418c88, 1, 0x04, 0x00000000 },
- { 0x418d00, 1, 0x04, 0x00000000 },
- { 0x418f08, 1, 0x04, 0x00000000 },
- { 0x418e00, 1, 0x04, 0x00000050 },
- { 0x418e08, 1, 0x04, 0x00000000 },
- { 0x41900c, 1, 0x04, 0x00000000 },
- { 0x419018, 1, 0x04, 0x00000000 },
- {}
-};
-
-static struct nvc0_graph_init
-nvc8_graph_init_tpc[] = {
- { 0x419d08, 2, 0x04, 0x00000000 },
- { 0x419d10, 1, 0x04, 0x00000014 },
- { 0x419ab0, 1, 0x04, 0x00000000 },
- { 0x419ab8, 1, 0x04, 0x000000e7 },
- { 0x419abc, 2, 0x04, 0x00000000 },
- { 0x41980c, 3, 0x04, 0x00000000 },
- { 0x419844, 1, 0x04, 0x00000000 },
- { 0x41984c, 1, 0x04, 0x00005bc5 },
- { 0x419850, 4, 0x04, 0x00000000 },
- { 0x419c98, 1, 0x04, 0x00000000 },
- { 0x419ca8, 1, 0x04, 0x80000000 },
- { 0x419cb4, 1, 0x04, 0x00000000 },
- { 0x419cb8, 1, 0x04, 0x00008bf4 },
- { 0x419cbc, 1, 0x04, 0x28137606 },
- { 0x419cc0, 2, 0x04, 0x00000000 },
- { 0x419bd4, 1, 0x04, 0x00800000 },
- { 0x419bdc, 1, 0x04, 0x00000000 },
- { 0x419d2c, 1, 0x04, 0x00000000 },
- { 0x419c0c, 1, 0x04, 0x00000000 },
+static const struct nvc0_graph_init
+nvc8_graph_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000000 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ea4, 1, 0x04, 0x00000100 },
@@ -108,22 +62,42 @@ nvc8_graph_init_tpc[] = {
{}
};
-static struct nvc0_graph_init *
-nvc8_graph_init_mmio[] = {
- nvc0_graph_init_regs,
- nvc0_graph_init_unk40xx,
- nvc0_graph_init_unk44xx,
- nvc0_graph_init_unk78xx,
- nvc0_graph_init_unk60xx,
- nvc0_graph_init_unk58xx,
- nvc0_graph_init_unk80xx,
- nvc8_graph_init_gpc,
- nvc8_graph_init_tpc,
- nvc0_graph_init_unk88xx,
- nvc0_graph_tpc_0,
- NULL
+static const struct nvc0_graph_pack
+nvc8_graph_pack_mmio[] = {
+ { nvc0_graph_init_main_0 },
+ { nvc0_graph_init_fe_0 },
+ { nvc0_graph_init_pri_0 },
+ { nvc0_graph_init_rstr2d_0 },
+ { nvc0_graph_init_pd_0 },
+ { nvc0_graph_init_ds_0 },
+ { nvc0_graph_init_scc_0 },
+ { nvc0_graph_init_prop_0 },
+ { nvc0_graph_init_gpc_unk_0 },
+ { nvc0_graph_init_setup_0 },
+ { nvc0_graph_init_crstr_0 },
+ { nvc1_graph_init_setup_1 },
+ { nvc0_graph_init_zcull_0 },
+ { nvc0_graph_init_gpm_0 },
+ { nvc0_graph_init_gpc_unk_1 },
+ { nvc0_graph_init_gcc_0 },
+ { nvc0_graph_init_tpccs_0 },
+ { nvc0_graph_init_tex_0 },
+ { nvc0_graph_init_pe_0 },
+ { nvc0_graph_init_l1c_0 },
+ { nvc0_graph_init_wwdx_0 },
+ { nvc0_graph_init_tpccs_1 },
+ { nvc0_graph_init_mpc_0 },
+ { nvc8_graph_init_sm_0 },
+ { nvc0_graph_init_be_0 },
+ { nvc0_graph_init_fe_1 },
+ { nvc0_graph_init_pe_1 },
+ {}
};
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
struct nouveau_oclass *
nvc8_graph_oclass = &(struct nvc0_graph_oclass) {
.base.handle = NV_ENGINE(GR, 0xc8),
@@ -135,7 +109,7 @@ nvc8_graph_oclass = &(struct nvc0_graph_oclass) {
},
.cclass = &nvc8_grctx_oclass,
.sclass = nvc8_graph_sclass,
- .mmio = nvc8_graph_init_mmio,
+ .mmio = nvc8_graph_pack_mmio,
.fecs.ucode = &nvc0_graph_fecs_ucode,
.gpccs.ucode = &nvc0_graph_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
index 5052d7ab4d72..2a6a94e2a041 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
@@ -23,6 +23,77 @@
*/
#include "nvc0.h"
+#include "ctxnvc0.h"
+
+/*******************************************************************************
+ * PGRAPH register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+nvd7_graph_init_pe_0[] = {
+ { 0x41980c, 1, 0x04, 0x00000010 },
+ { 0x419844, 1, 0x04, 0x00000000 },
+ { 0x41984c, 1, 0x04, 0x00005bc8 },
+ { 0x419850, 3, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd7_graph_init_pes_0[] = {
+ { 0x41be04, 1, 0x04, 0x00000000 },
+ { 0x41be08, 1, 0x04, 0x00000004 },
+ { 0x41be0c, 1, 0x04, 0x00000000 },
+ { 0x41be10, 1, 0x04, 0x003b8bc7 },
+ { 0x41be14, 2, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd7_graph_init_wwdx_0[] = {
+ { 0x41bfd4, 1, 0x04, 0x00800000 },
+ { 0x41bfdc, 1, 0x04, 0x00000000 },
+ { 0x41bff8, 2, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd7_graph_init_cbm_0[] = {
+ { 0x41becc, 1, 0x04, 0x00000000 },
+ { 0x41bee8, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+nvd7_graph_pack_mmio[] = {
+ { nvc0_graph_init_main_0 },
+ { nvc0_graph_init_fe_0 },
+ { nvc0_graph_init_pri_0 },
+ { nvc0_graph_init_rstr2d_0 },
+ { nvd9_graph_init_pd_0 },
+ { nvd9_graph_init_ds_0 },
+ { nvc0_graph_init_scc_0 },
+ { nvd9_graph_init_prop_0 },
+ { nvc1_graph_init_gpc_unk_0 },
+ { nvc0_graph_init_setup_0 },
+ { nvc0_graph_init_crstr_0 },
+ { nvc1_graph_init_setup_1 },
+ { nvc0_graph_init_zcull_0 },
+ { nvd9_graph_init_gpm_0 },
+ { nvd9_graph_init_gpc_unk_1 },
+ { nvc0_graph_init_gcc_0 },
+ { nvc0_graph_init_tpccs_0 },
+ { nvd9_graph_init_tex_0 },
+ { nvd7_graph_init_pe_0 },
+ { nvc0_graph_init_l1c_0 },
+ { nvc0_graph_init_mpc_0 },
+ { nvd9_graph_init_sm_0 },
+ { nvd7_graph_init_pes_0 },
+ { nvd7_graph_init_wwdx_0 },
+ { nvd7_graph_init_cbm_0 },
+ { nvc0_graph_init_be_0 },
+ { nvd9_graph_init_fe_1 },
+ {}
+};
/*******************************************************************************
* PGRAPH engine/subdev functions
@@ -48,108 +119,6 @@ nvd7_graph_gpccs_ucode = {
.data.size = sizeof(nvd7_grgpc_data),
};
-static struct nvc0_graph_init
-nvd7_graph_init_gpc[] = {
- { 0x418408, 1, 0x04, 0x00000000 },
- { 0x4184a0, 1, 0x04, 0x00000000 },
- { 0x4184a4, 2, 0x04, 0x00000000 },
- { 0x418604, 1, 0x04, 0x00000000 },
- { 0x418680, 1, 0x04, 0x00000000 },
- { 0x418714, 1, 0x04, 0x00000000 },
- { 0x418384, 1, 0x04, 0x00000000 },
- { 0x418814, 3, 0x04, 0x00000000 },
- { 0x418b04, 1, 0x04, 0x00000000 },
- { 0x4188c8, 2, 0x04, 0x00000000 },
- { 0x4188d0, 1, 0x04, 0x00010000 },
- { 0x4188d4, 1, 0x04, 0x00000001 },
- { 0x418910, 1, 0x04, 0x00010001 },
- { 0x418914, 1, 0x04, 0x00000301 },
- { 0x418918, 1, 0x04, 0x00800000 },
- { 0x418980, 1, 0x04, 0x77777770 },
- { 0x418984, 3, 0x04, 0x77777777 },
- { 0x418c04, 1, 0x04, 0x00000000 },
- { 0x418c64, 1, 0x04, 0x00000000 },
- { 0x418c68, 1, 0x04, 0x00000000 },
- { 0x418c88, 1, 0x04, 0x00000000 },
- { 0x418cb4, 2, 0x04, 0x00000000 },
- { 0x418d00, 1, 0x04, 0x00000000 },
- { 0x418d28, 1, 0x04, 0x00000000 },
- { 0x418f00, 1, 0x04, 0x00000000 },
- { 0x418f08, 1, 0x04, 0x00000000 },
- { 0x418f20, 2, 0x04, 0x00000000 },
- { 0x418e00, 1, 0x04, 0x00000003 },
- { 0x418e08, 1, 0x04, 0x00000000 },
- { 0x418e1c, 1, 0x04, 0x00000000 },
- { 0x418e20, 1, 0x04, 0x00000000 },
- { 0x41900c, 1, 0x04, 0x00000000 },
- { 0x419018, 1, 0x04, 0x00000000 },
- {}
-};
-
-static struct nvc0_graph_init
-nvd7_graph_init_tpc[] = {
- { 0x419d08, 2, 0x04, 0x00000000 },
- { 0x419d10, 1, 0x04, 0x00000014 },
- { 0x419ab0, 1, 0x04, 0x00000000 },
- { 0x419ac8, 1, 0x04, 0x00000000 },
- { 0x419ab8, 1, 0x04, 0x000000e7 },
- { 0x419abc, 2, 0x04, 0x00000000 },
- { 0x419ab4, 1, 0x04, 0x00000000 },
- { 0x41980c, 1, 0x04, 0x00000010 },
- { 0x419844, 1, 0x04, 0x00000000 },
- { 0x41984c, 1, 0x04, 0x00005bc8 },
- { 0x419850, 2, 0x04, 0x00000000 },
- { 0x419c98, 1, 0x04, 0x00000000 },
- { 0x419ca8, 1, 0x04, 0x80000000 },
- { 0x419cb4, 1, 0x04, 0x00000000 },
- { 0x419cb8, 1, 0x04, 0x00008bf4 },
- { 0x419cbc, 1, 0x04, 0x28137606 },
- { 0x419cc0, 2, 0x04, 0x00000000 },
- { 0x419c0c, 1, 0x04, 0x00000000 },
- { 0x419e00, 1, 0x04, 0x00000000 },
- { 0x419ea0, 1, 0x04, 0x00000000 },
- { 0x419ea4, 1, 0x04, 0x00000100 },
- { 0x419ea8, 1, 0x04, 0x02001100 },
- { 0x419eac, 1, 0x04, 0x11100702 },
- { 0x419eb0, 1, 0x04, 0x00000003 },
- { 0x419eb4, 4, 0x04, 0x00000000 },
- { 0x419ec8, 1, 0x04, 0x0e063818 },
- { 0x419ecc, 1, 0x04, 0x0e060e06 },
- { 0x419ed0, 1, 0x04, 0x00003818 },
- { 0x419ed4, 1, 0x04, 0x011104f1 },
- { 0x419edc, 1, 0x04, 0x00000000 },
- { 0x419f00, 1, 0x04, 0x00000000 },
- { 0x419f2c, 1, 0x04, 0x00000000 },
- {}
-};
-
-static struct nvc0_graph_init
-nvd7_graph_init_tpc_0[] = {
- { 0x40402c, 1, 0x04, 0x00000000 },
- { 0x4040f0, 1, 0x04, 0x00000000 },
- { 0x404174, 1, 0x04, 0x00000000 },
- { 0x503018, 1, 0x04, 0x00000001 },
- {}
-};
-
-static struct nvc0_graph_init *
-nvd7_graph_init_mmio[] = {
- nvc0_graph_init_regs,
- nvc0_graph_init_unk40xx,
- nvc0_graph_init_unk44xx,
- nvc0_graph_init_unk78xx,
- nvc0_graph_init_unk60xx,
- nvd9_graph_init_unk64xx,
- nvd9_graph_init_unk58xx,
- nvc0_graph_init_unk80xx,
- nvd7_graph_init_gpc,
- nvd7_graph_init_tpc,
- nve4_graph_init_unk,
- nvc0_graph_init_unk88xx,
- nvd7_graph_init_tpc_0,
- NULL
-};
-
struct nouveau_oclass *
nvd7_graph_oclass = &(struct nvc0_graph_oclass) {
.base.handle = NV_ENGINE(GR, 0xd7),
@@ -161,7 +130,7 @@ nvd7_graph_oclass = &(struct nvc0_graph_oclass) {
},
.cclass = &nvd7_grctx_oclass,
.sclass = nvc8_graph_sclass,
- .mmio = nvd7_graph_init_mmio,
+ .mmio = nvd7_graph_pack_mmio,
.fecs.ucode = &nvd7_graph_fecs_ucode,
.gpccs.ucode = &nvd7_graph_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c
index 652098e0df3f..00fdf202fb92 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c
@@ -23,76 +23,70 @@
*/
#include "nvc0.h"
+#include "ctxnvc0.h"
/*******************************************************************************
- * PGRAPH engine/subdev functions
+ * PGRAPH register lists
******************************************************************************/
-struct nvc0_graph_init
-nvd9_graph_init_unk64xx[] = {
+const struct nvc0_graph_init
+nvd9_graph_init_pd_0[] = {
+ { 0x406024, 1, 0x04, 0x00000000 },
{ 0x4064f0, 3, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvd9_graph_init_unk58xx[] = {
+const struct nvc0_graph_init
+nvd9_graph_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405900, 1, 0x04, 0x00002834 },
{ 0x405908, 1, 0x04, 0x00000000 },
- { 0x405928, 1, 0x04, 0x00000000 },
- { 0x40592c, 1, 0x04, 0x00000000 },
+ { 0x405928, 2, 0x04, 0x00000000 },
{}
};
-static struct nvc0_graph_init
-nvd9_graph_init_gpc[] = {
+const struct nvc0_graph_init
+nvd9_graph_init_prop_0[] = {
{ 0x418408, 1, 0x04, 0x00000000 },
- { 0x4184a0, 1, 0x04, 0x00000000 },
- { 0x4184a4, 2, 0x04, 0x00000000 },
- { 0x418604, 1, 0x04, 0x00000000 },
- { 0x418680, 1, 0x04, 0x00000000 },
- { 0x418714, 1, 0x04, 0x00000000 },
- { 0x418384, 1, 0x04, 0x00000000 },
- { 0x418814, 3, 0x04, 0x00000000 },
- { 0x418b04, 1, 0x04, 0x00000000 },
- { 0x4188c8, 2, 0x04, 0x00000000 },
- { 0x4188d0, 1, 0x04, 0x00010000 },
- { 0x4188d4, 1, 0x04, 0x00000001 },
- { 0x418910, 1, 0x04, 0x00010001 },
- { 0x418914, 1, 0x04, 0x00000301 },
- { 0x418918, 1, 0x04, 0x00800000 },
- { 0x418980, 1, 0x04, 0x77777770 },
- { 0x418984, 3, 0x04, 0x77777777 },
+ { 0x4184a0, 3, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd9_graph_init_gpm_0[] = {
{ 0x418c04, 1, 0x04, 0x00000000 },
- { 0x418c64, 1, 0x04, 0x00000000 },
- { 0x418c68, 1, 0x04, 0x00000000 },
+ { 0x418c64, 2, 0x04, 0x00000000 },
{ 0x418c88, 1, 0x04, 0x00000000 },
{ 0x418cb4, 2, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd9_graph_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
- { 0x418d28, 1, 0x04, 0x00000000 },
- { 0x418d2c, 1, 0x04, 0x00000000 },
+ { 0x418d28, 2, 0x04, 0x00000000 },
{ 0x418f00, 1, 0x04, 0x00000000 },
{ 0x418f08, 1, 0x04, 0x00000000 },
{ 0x418f20, 2, 0x04, 0x00000000 },
{ 0x418e00, 1, 0x04, 0x00000003 },
{ 0x418e08, 1, 0x04, 0x00000000 },
- { 0x418e1c, 1, 0x04, 0x00000000 },
- { 0x418e20, 1, 0x04, 0x00000000 },
- { 0x41900c, 1, 0x04, 0x00000000 },
- { 0x419018, 1, 0x04, 0x00000000 },
+ { 0x418e1c, 2, 0x04, 0x00000000 },
{}
};
-static struct nvc0_graph_init
-nvd9_graph_init_tpc[] = {
- { 0x419d08, 2, 0x04, 0x00000000 },
- { 0x419d10, 1, 0x04, 0x00000014 },
+const struct nvc0_graph_init
+nvd9_graph_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ac8, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
{ 0x419abc, 2, 0x04, 0x00000000 },
{ 0x419ab4, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvd9_graph_init_pe_0[] = {
{ 0x41980c, 1, 0x04, 0x00000010 },
{ 0x419810, 1, 0x04, 0x00000000 },
{ 0x419814, 1, 0x04, 0x00000004 },
@@ -100,20 +94,26 @@ nvd9_graph_init_tpc[] = {
{ 0x41984c, 1, 0x04, 0x0000a918 },
{ 0x419850, 4, 0x04, 0x00000000 },
{ 0x419880, 1, 0x04, 0x00000002 },
- { 0x419c98, 1, 0x04, 0x00000000 },
- { 0x419ca8, 1, 0x04, 0x80000000 },
- { 0x419cb4, 1, 0x04, 0x00000000 },
- { 0x419cb8, 1, 0x04, 0x00008bf4 },
- { 0x419cbc, 1, 0x04, 0x28137606 },
- { 0x419cc0, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvd9_graph_init_wwdx_0[] = {
{ 0x419bd4, 1, 0x04, 0x00800000 },
{ 0x419bdc, 1, 0x04, 0x00000000 },
- { 0x419bf8, 1, 0x04, 0x00000000 },
- { 0x419bfc, 1, 0x04, 0x00000000 },
+ { 0x419bf8, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvd9_graph_init_tpccs_1[] = {
{ 0x419d2c, 1, 0x04, 0x00000000 },
- { 0x419d48, 1, 0x04, 0x00000000 },
- { 0x419d4c, 1, 0x04, 0x00000000 },
- { 0x419c0c, 1, 0x04, 0x00000000 },
+ { 0x419d48, 2, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvd9_graph_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000000 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ea4, 1, 0x04, 0x00000100 },
@@ -131,23 +131,49 @@ nvd9_graph_init_tpc[] = {
{}
};
-static struct nvc0_graph_init *
-nvd9_graph_init_mmio[] = {
- nvc0_graph_init_regs,
- nvc0_graph_init_unk40xx,
- nvc0_graph_init_unk44xx,
- nvc0_graph_init_unk78xx,
- nvc0_graph_init_unk60xx,
- nvd9_graph_init_unk64xx,
- nvd9_graph_init_unk58xx,
- nvc0_graph_init_unk80xx,
- nvd9_graph_init_gpc,
- nvd9_graph_init_tpc,
- nvc0_graph_init_unk88xx,
- nvc0_graph_tpc_0,
- NULL
+const struct nvc0_graph_init
+nvd9_graph_init_fe_1[] = {
+ { 0x40402c, 1, 0x04, 0x00000000 },
+ { 0x4040f0, 1, 0x04, 0x00000000 },
+ { 0x404174, 1, 0x04, 0x00000000 },
+ {}
};
+static const struct nvc0_graph_pack
+nvd9_graph_pack_mmio[] = {
+ { nvc0_graph_init_main_0 },
+ { nvc0_graph_init_fe_0 },
+ { nvc0_graph_init_pri_0 },
+ { nvc0_graph_init_rstr2d_0 },
+ { nvd9_graph_init_pd_0 },
+ { nvd9_graph_init_ds_0 },
+ { nvc0_graph_init_scc_0 },
+ { nvd9_graph_init_prop_0 },
+ { nvc1_graph_init_gpc_unk_0 },
+ { nvc0_graph_init_setup_0 },
+ { nvc0_graph_init_crstr_0 },
+ { nvc1_graph_init_setup_1 },
+ { nvc0_graph_init_zcull_0 },
+ { nvd9_graph_init_gpm_0 },
+ { nvd9_graph_init_gpc_unk_1 },
+ { nvc0_graph_init_gcc_0 },
+ { nvc0_graph_init_tpccs_0 },
+ { nvd9_graph_init_tex_0 },
+ { nvd9_graph_init_pe_0 },
+ { nvc0_graph_init_l1c_0 },
+ { nvd9_graph_init_wwdx_0 },
+ { nvd9_graph_init_tpccs_1 },
+ { nvc0_graph_init_mpc_0 },
+ { nvd9_graph_init_sm_0 },
+ { nvc0_graph_init_be_0 },
+ { nvd9_graph_init_fe_1 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
struct nouveau_oclass *
nvd9_graph_oclass = &(struct nvc0_graph_oclass) {
.base.handle = NV_ENGINE(GR, 0xd9),
@@ -159,7 +185,7 @@ nvd9_graph_oclass = &(struct nvc0_graph_oclass) {
},
.cclass = &nvd9_grctx_oclass,
.sclass = nvc8_graph_sclass,
- .mmio = nvd9_graph_init_mmio,
+ .mmio = nvd9_graph_pack_mmio,
.fecs.ucode = &nvc0_graph_fecs_ucode,
.gpccs.ucode = &nvc0_graph_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
index 05ec09c88517..f7c011217175 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
@@ -23,6 +23,7 @@
*/
#include "nvc0.h"
+#include "ctxnvc0.h"
/*******************************************************************************
* Graphics object classes
@@ -38,11 +39,11 @@ nve4_graph_sclass[] = {
};
/*******************************************************************************
- * PGRAPH engine/subdev functions
+ * PGRAPH register lists
******************************************************************************/
-struct nvc0_graph_init
-nve4_graph_init_regs[] = {
+const struct nvc0_graph_init
+nve4_graph_init_main_0[] = {
{ 0x400080, 1, 0x04, 0x003083c2 },
{ 0x400088, 1, 0x04, 0x0001ffe7 },
{ 0x40008c, 1, 0x04, 0x00000000 },
@@ -57,81 +58,59 @@ nve4_graph_init_regs[] = {
{}
};
-static struct nvc0_graph_init
-nve4_graph_init_unk58xx[] = {
+static const struct nvc0_graph_init
+nve4_graph_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405900, 1, 0x04, 0x0000ff34 },
{ 0x405908, 1, 0x04, 0x00000000 },
- { 0x405928, 1, 0x04, 0x00000000 },
- { 0x40592c, 1, 0x04, 0x00000000 },
+ { 0x405928, 2, 0x04, 0x00000000 },
{}
};
-static struct nvc0_graph_init
-nve4_graph_init_unk70xx[] = {
+static const struct nvc0_graph_init
+nve4_graph_init_sked_0[] = {
{ 0x407010, 1, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nve4_graph_init_unk5bxx[] = {
+static const struct nvc0_graph_init
+nve4_graph_init_cwd_0[] = {
{ 0x405b50, 1, 0x04, 0x00000000 },
{}
};
-static struct nvc0_graph_init
-nve4_graph_init_gpc[] = {
- { 0x418408, 1, 0x04, 0x00000000 },
- { 0x4184a0, 1, 0x04, 0x00000000 },
- { 0x4184a4, 2, 0x04, 0x00000000 },
- { 0x418604, 1, 0x04, 0x00000000 },
- { 0x418680, 1, 0x04, 0x00000000 },
- { 0x418714, 1, 0x04, 0x00000000 },
- { 0x418384, 1, 0x04, 0x00000000 },
- { 0x418814, 3, 0x04, 0x00000000 },
- { 0x418b04, 1, 0x04, 0x00000000 },
- { 0x4188c8, 2, 0x04, 0x00000000 },
- { 0x4188d0, 1, 0x04, 0x00010000 },
- { 0x4188d4, 1, 0x04, 0x00000001 },
- { 0x418910, 1, 0x04, 0x00010001 },
- { 0x418914, 1, 0x04, 0x00000301 },
- { 0x418918, 1, 0x04, 0x00800000 },
- { 0x418980, 1, 0x04, 0x77777770 },
- { 0x418984, 3, 0x04, 0x77777777 },
- { 0x418c04, 1, 0x04, 0x00000000 },
- { 0x418c64, 1, 0x04, 0x00000000 },
- { 0x418c68, 1, 0x04, 0x00000000 },
- { 0x418c88, 1, 0x04, 0x00000000 },
- { 0x418cb4, 2, 0x04, 0x00000000 },
+static const struct nvc0_graph_init
+nve4_graph_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
- { 0x418d28, 1, 0x04, 0x00000000 },
- { 0x418d2c, 1, 0x04, 0x00000000 },
+ { 0x418d28, 2, 0x04, 0x00000000 },
{ 0x418f00, 1, 0x04, 0x00000000 },
{ 0x418f08, 1, 0x04, 0x00000000 },
{ 0x418f20, 2, 0x04, 0x00000000 },
{ 0x418e00, 1, 0x04, 0x00000060 },
{ 0x418e08, 1, 0x04, 0x00000000 },
- { 0x418e1c, 1, 0x04, 0x00000000 },
- { 0x418e20, 1, 0x04, 0x00000000 },
- { 0x41900c, 1, 0x04, 0x00000000 },
- { 0x419018, 1, 0x04, 0x00000000 },
+ { 0x418e1c, 2, 0x04, 0x00000000 },
{}
};
-static struct nvc0_graph_init
-nve4_graph_init_tpc[] = {
+const struct nvc0_graph_init
+nve4_graph_init_tpccs_0[] = {
{ 0x419d0c, 1, 0x04, 0x00000000 },
{ 0x419d10, 1, 0x04, 0x00000014 },
- { 0x419ab0, 1, 0x04, 0x00000000 },
- { 0x419ac8, 1, 0x04, 0x00000000 },
- { 0x419ab8, 1, 0x04, 0x000000e7 },
- { 0x419abc, 2, 0x04, 0x00000000 },
- { 0x419ab4, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nve4_graph_init_pe_0[] = {
{ 0x41980c, 1, 0x04, 0x00000010 },
{ 0x419844, 1, 0x04, 0x00000000 },
{ 0x419850, 1, 0x04, 0x00000004 },
{ 0x419854, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nve4_graph_init_l1c_0[] = {
{ 0x419c98, 1, 0x04, 0x00000000 },
{ 0x419ca8, 1, 0x04, 0x00000000 },
{ 0x419cb0, 1, 0x04, 0x01000000 },
@@ -141,39 +120,25 @@ nve4_graph_init_tpc[] = {
{ 0x419cbc, 1, 0x04, 0x28137646 },
{ 0x419cc0, 2, 0x04, 0x00000000 },
{ 0x419c80, 1, 0x04, 0x00020232 },
- { 0x419c0c, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nve4_graph_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000000 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ee4, 1, 0x04, 0x00000000 },
{ 0x419ea4, 1, 0x04, 0x00000100 },
{ 0x419ea8, 1, 0x04, 0x00000000 },
- { 0x419eb4, 1, 0x04, 0x00000000 },
- { 0x419eb8, 3, 0x04, 0x00000000 },
+ { 0x419eb4, 4, 0x04, 0x00000000 },
{ 0x419edc, 1, 0x04, 0x00000000 },
{ 0x419f00, 1, 0x04, 0x00000000 },
{ 0x419f74, 1, 0x04, 0x00000555 },
{}
};
-struct nvc0_graph_init
-nve4_graph_init_unk[] = {
- { 0x41be04, 1, 0x04, 0x00000000 },
- { 0x41be08, 1, 0x04, 0x00000004 },
- { 0x41be0c, 1, 0x04, 0x00000000 },
- { 0x41be10, 1, 0x04, 0x003b8bc7 },
- { 0x41be14, 2, 0x04, 0x00000000 },
- { 0x41bfd4, 1, 0x04, 0x00800000 },
- { 0x41bfdc, 1, 0x04, 0x00000000 },
- { 0x41bff8, 1, 0x04, 0x00000000 },
- { 0x41bffc, 1, 0x04, 0x00000000 },
- { 0x41becc, 1, 0x04, 0x00000000 },
- { 0x41bee8, 1, 0x04, 0x00000000 },
- { 0x41beec, 1, 0x04, 0x00000000 },
- {}
-};
-
-struct nvc0_graph_init
-nve4_graph_init_unk88xx[] = {
+const struct nvc0_graph_init
+nve4_graph_init_be_0[] = {
{ 0x40880c, 1, 0x04, 0x00000000 },
{ 0x408850, 1, 0x04, 0x00000004 },
{ 0x408910, 9, 0x04, 0x00000000 },
@@ -186,6 +151,67 @@ nve4_graph_init_unk88xx[] = {
{}
};
+static const struct nvc0_graph_pack
+nve4_graph_pack_mmio[] = {
+ { nve4_graph_init_main_0 },
+ { nvc0_graph_init_fe_0 },
+ { nvc0_graph_init_pri_0 },
+ { nvc0_graph_init_rstr2d_0 },
+ { nvd9_graph_init_pd_0 },
+ { nve4_graph_init_ds_0 },
+ { nvc0_graph_init_scc_0 },
+ { nve4_graph_init_sked_0 },
+ { nve4_graph_init_cwd_0 },
+ { nvd9_graph_init_prop_0 },
+ { nvc1_graph_init_gpc_unk_0 },
+ { nvc0_graph_init_setup_0 },
+ { nvc0_graph_init_crstr_0 },
+ { nvc1_graph_init_setup_1 },
+ { nvc0_graph_init_zcull_0 },
+ { nvd9_graph_init_gpm_0 },
+ { nve4_graph_init_gpc_unk_1 },
+ { nvc0_graph_init_gcc_0 },
+ { nve4_graph_init_tpccs_0 },
+ { nvd9_graph_init_tex_0 },
+ { nve4_graph_init_pe_0 },
+ { nve4_graph_init_l1c_0 },
+ { nvc0_graph_init_mpc_0 },
+ { nve4_graph_init_sm_0 },
+ { nvd7_graph_init_pes_0 },
+ { nvd7_graph_init_wwdx_0 },
+ { nvd7_graph_init_cbm_0 },
+ { nve4_graph_init_be_0 },
+ { nvc0_graph_init_fe_1 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve4_graph_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nvc0_graph_priv *priv = (void *)object;
+
+ /*XXX: this is a nasty hack to power on gr on certain boards
+ * where it's disabled by therm, somehow. ideally it'd
+ * be nice to know when we should be doing this, and why,
+ * but, it's yet to be determined. for now we test for
+ * the particular mmio error that occurs in the situation,
+ * and then bash therm in the way nvidia do.
+ */
+ nv_mask(priv, 0x000200, 0x08001000, 0x08001000);
+ nv_rd32(priv, 0x000200);
+ if (nv_rd32(priv, 0x400700) == 0xbadf1000) {
+ nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
+ nv_rd32(priv, 0x000200);
+ nv_mask(priv, 0x020004, 0xc0000000, 0x40000000);
+ }
+
+ return nouveau_graph_fini(&priv->base, suspend);
+}
+
int
nve4_graph_init(struct nouveau_object *object)
{
@@ -210,8 +236,7 @@ nve4_graph_init(struct nouveau_object *object)
nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
- for (i = 0; oclass->mmio[i]; i++)
- nvc0_graph_mmio(priv, oclass->mmio[i]);
+ nvc0_graph_mmio(priv, oclass->mmio);
nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
@@ -298,25 +323,6 @@ nve4_graph_init(struct nouveau_object *object)
return nvc0_graph_init_ctxctl(priv);
}
-static struct nvc0_graph_init *
-nve4_graph_init_mmio[] = {
- nve4_graph_init_regs,
- nvc0_graph_init_unk40xx,
- nvc0_graph_init_unk44xx,
- nvc0_graph_init_unk78xx,
- nvc0_graph_init_unk60xx,
- nvd9_graph_init_unk64xx,
- nve4_graph_init_unk58xx,
- nvc0_graph_init_unk80xx,
- nve4_graph_init_unk70xx,
- nve4_graph_init_unk5bxx,
- nve4_graph_init_gpc,
- nve4_graph_init_tpc,
- nve4_graph_init_unk,
- nve4_graph_init_unk88xx,
- NULL
-};
-
#include "fuc/hubnve0.fuc.h"
static struct nvc0_graph_ucode
@@ -344,11 +350,11 @@ nve4_graph_oclass = &(struct nvc0_graph_oclass) {
.ctor = nvc0_graph_ctor,
.dtor = nvc0_graph_dtor,
.init = nve4_graph_init,
- .fini = _nouveau_graph_fini,
+ .fini = nve4_graph_fini,
},
.cclass = &nve4_grctx_oclass,
.sclass = nve4_graph_sclass,
- .mmio = nve4_graph_init_mmio,
+ .mmio = nve4_graph_pack_mmio,
.fecs.ucode = &nve4_graph_fecs_ucode,
.gpccs.ucode = &nve4_graph_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
index b1acb9939d95..c96762122b9b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
@@ -23,6 +23,7 @@
*/
#include "nvc0.h"
+#include "ctxnvc0.h"
/*******************************************************************************
* Graphics object classes
@@ -38,86 +39,57 @@ nvf0_graph_sclass[] = {
};
/*******************************************************************************
- * PGRAPH engine/subdev functions
+ * PGRAPH register lists
******************************************************************************/
-struct nvc0_graph_init
-nvf0_graph_init_unk40xx[] = {
+const struct nvc0_graph_init
+nvf0_graph_init_fe_0[] = {
{ 0x40415c, 1, 0x04, 0x00000000 },
{ 0x404170, 1, 0x04, 0x00000000 },
{ 0x4041b4, 1, 0x04, 0x00000000 },
{}
};
-static struct nvc0_graph_init
-nvf0_graph_init_unk58xx[] = {
+static const struct nvc0_graph_init
+nvf0_graph_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405900, 1, 0x04, 0x0000ff00 },
{ 0x405908, 1, 0x04, 0x00000000 },
- { 0x405928, 1, 0x04, 0x00000000 },
- { 0x40592c, 1, 0x04, 0x00000000 },
+ { 0x405928, 2, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvf0_graph_init_unk70xx[] = {
+const struct nvc0_graph_init
+nvf0_graph_init_sked_0[] = {
{ 0x407010, 1, 0x04, 0x00000000 },
{ 0x407040, 1, 0x04, 0x80440424 },
{ 0x407048, 1, 0x04, 0x0000000a },
{}
};
-struct nvc0_graph_init
-nvf0_graph_init_unk5bxx[] = {
+const struct nvc0_graph_init
+nvf0_graph_init_cwd_0[] = {
{ 0x405b44, 1, 0x04, 0x00000000 },
{ 0x405b50, 1, 0x04, 0x00000000 },
{}
};
-static struct nvc0_graph_init
-nvf0_graph_init_gpc[] = {
- { 0x418408, 1, 0x04, 0x00000000 },
- { 0x4184a0, 1, 0x04, 0x00000000 },
- { 0x4184a4, 2, 0x04, 0x00000000 },
- { 0x418604, 1, 0x04, 0x00000000 },
- { 0x418680, 1, 0x04, 0x00000000 },
- { 0x418714, 1, 0x04, 0x00000000 },
- { 0x418384, 1, 0x04, 0x00000000 },
- { 0x418814, 3, 0x04, 0x00000000 },
- { 0x418b04, 1, 0x04, 0x00000000 },
- { 0x4188c8, 2, 0x04, 0x00000000 },
- { 0x4188d0, 1, 0x04, 0x00010000 },
- { 0x4188d4, 1, 0x04, 0x00000001 },
- { 0x418910, 1, 0x04, 0x00010001 },
- { 0x418914, 1, 0x04, 0x00000301 },
- { 0x418918, 1, 0x04, 0x00800000 },
- { 0x418980, 1, 0x04, 0x77777770 },
- { 0x418984, 3, 0x04, 0x77777777 },
- { 0x418c04, 1, 0x04, 0x00000000 },
- { 0x418c64, 1, 0x04, 0x00000000 },
- { 0x418c68, 1, 0x04, 0x00000000 },
- { 0x418c88, 1, 0x04, 0x00000000 },
- { 0x418cb4, 2, 0x04, 0x00000000 },
+const struct nvc0_graph_init
+nvf0_graph_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
- { 0x418d28, 1, 0x04, 0x00000000 },
- { 0x418d2c, 1, 0x04, 0x00000000 },
+ { 0x418d28, 2, 0x04, 0x00000000 },
{ 0x418f00, 1, 0x04, 0x00000400 },
{ 0x418f08, 1, 0x04, 0x00000000 },
- { 0x418f20, 1, 0x04, 0x00000000 },
- { 0x418f24, 1, 0x04, 0x00000000 },
+ { 0x418f20, 2, 0x04, 0x00000000 },
{ 0x418e00, 1, 0x04, 0x00000000 },
{ 0x418e08, 1, 0x04, 0x00000000 },
{ 0x418e1c, 2, 0x04, 0x00000000 },
- { 0x41900c, 1, 0x04, 0x00000000 },
- { 0x419018, 1, 0x04, 0x00000000 },
{}
};
-struct nvc0_graph_init
-nvf0_graph_init_tpc[] = {
- { 0x419d0c, 1, 0x04, 0x00000000 },
- { 0x419d10, 1, 0x04, 0x00000014 },
+static const struct nvc0_graph_init
+nvf0_graph_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ac8, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
@@ -125,10 +97,11 @@ nvf0_graph_init_tpc[] = {
{ 0x419abc, 2, 0x04, 0x00000000 },
{ 0x419ab4, 1, 0x04, 0x00000000 },
{ 0x419aa8, 2, 0x04, 0x00000000 },
- { 0x41980c, 1, 0x04, 0x00000010 },
- { 0x419844, 1, 0x04, 0x00000000 },
- { 0x419850, 1, 0x04, 0x00000004 },
- { 0x419854, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+nvf0_graph_init_l1c_0[] = {
{ 0x419c98, 1, 0x04, 0x00000000 },
{ 0x419ca8, 1, 0x04, 0x00000000 },
{ 0x419cb0, 1, 0x04, 0x01000000 },
@@ -139,7 +112,11 @@ nvf0_graph_init_tpc[] = {
{ 0x419cc0, 2, 0x04, 0x00000000 },
{ 0x419c80, 1, 0x04, 0x00020230 },
{ 0x419ccc, 2, 0x04, 0x00000000 },
- { 0x419c0c, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct nvc0_graph_init
+nvf0_graph_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000080 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ee4, 1, 0x04, 0x00000000 },
@@ -155,6 +132,44 @@ nvf0_graph_init_tpc[] = {
{}
};
+static const struct nvc0_graph_pack
+nvf0_graph_pack_mmio[] = {
+ { nve4_graph_init_main_0 },
+ { nvf0_graph_init_fe_0 },
+ { nvc0_graph_init_pri_0 },
+ { nvc0_graph_init_rstr2d_0 },
+ { nvd9_graph_init_pd_0 },
+ { nvf0_graph_init_ds_0 },
+ { nvc0_graph_init_scc_0 },
+ { nvf0_graph_init_sked_0 },
+ { nvf0_graph_init_cwd_0 },
+ { nvd9_graph_init_prop_0 },
+ { nvc1_graph_init_gpc_unk_0 },
+ { nvc0_graph_init_setup_0 },
+ { nvc0_graph_init_crstr_0 },
+ { nvc1_graph_init_setup_1 },
+ { nvc0_graph_init_zcull_0 },
+ { nvd9_graph_init_gpm_0 },
+ { nvf0_graph_init_gpc_unk_1 },
+ { nvc0_graph_init_gcc_0 },
+ { nve4_graph_init_tpccs_0 },
+ { nvf0_graph_init_tex_0 },
+ { nve4_graph_init_pe_0 },
+ { nvf0_graph_init_l1c_0 },
+ { nvc0_graph_init_mpc_0 },
+ { nvf0_graph_init_sm_0 },
+ { nvd7_graph_init_pes_0 },
+ { nvd7_graph_init_wwdx_0 },
+ { nvd7_graph_init_cbm_0 },
+ { nve4_graph_init_be_0 },
+ { nvc0_graph_init_fe_1 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
static int
nvf0_graph_fini(struct nouveau_object *object, bool suspend)
{
@@ -192,25 +207,6 @@ nvf0_graph_fini(struct nouveau_object *object, bool suspend)
return nouveau_graph_fini(&priv->base, suspend);
}
-static struct nvc0_graph_init *
-nvf0_graph_init_mmio[] = {
- nve4_graph_init_regs,
- nvf0_graph_init_unk40xx,
- nvc0_graph_init_unk44xx,
- nvc0_graph_init_unk78xx,
- nvc0_graph_init_unk60xx,
- nvd9_graph_init_unk64xx,
- nvf0_graph_init_unk58xx,
- nvc0_graph_init_unk80xx,
- nvf0_graph_init_unk70xx,
- nvf0_graph_init_unk5bxx,
- nvf0_graph_init_gpc,
- nvf0_graph_init_tpc,
- nve4_graph_init_unk,
- nve4_graph_init_unk88xx,
- NULL
-};
-
#include "fuc/hubnvf0.fuc.h"
static struct nvc0_graph_ucode
@@ -242,7 +238,7 @@ nvf0_graph_oclass = &(struct nvc0_graph_oclass) {
},
.cclass = &nvf0_grctx_oclass,
.sclass = nvf0_graph_sclass,
- .mmio = nvf0_graph_init_mmio,
+ .mmio = nvf0_graph_pack_mmio,
.fecs.ucode = &nvf0_graph_fecs_ucode,
.gpccs.ucode = &nvf0_graph_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
index 5f6ede7c4892..92384759d2f5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
@@ -112,7 +112,7 @@ _nouveau_xtensa_init(struct nouveau_object *object)
snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
xtensa->addr >> 12);
- ret = request_firmware(&fw, name, &device->pdev->dev);
+ ret = request_firmware(&fw, name, nv_device_base(device));
if (ret) {
nv_warn(xtensa, "unable to load firmware %s\n", name);
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index e71a4325e670..9c0cd73462d9 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -258,6 +258,7 @@ struct nv04_display_scanoutpos {
* 9070: NVD0_DISP
* 9170: NVE0_DISP
* 9270: NVF0_DISP
+ * 9470: GM107_DISP
*/
#define NV50_DISP_CLASS 0x00005070
@@ -268,6 +269,7 @@ struct nv04_display_scanoutpos {
#define NVD0_DISP_CLASS 0x00009070
#define NVE0_DISP_CLASS 0x00009170
#define NVF0_DISP_CLASS 0x00009270
+#define GM107_DISP_CLASS 0x00009470
#define NV50_DISP_MTHD 0x00000000
#define NV50_DISP_MTHD_HEAD 0x00000003
@@ -342,6 +344,7 @@ struct nv50_display_class {
* 907a: NVD0_DISP_CURS
* 917a: NVE0_DISP_CURS
* 927a: NVF0_DISP_CURS
+ * 947a: GM107_DISP_CURS
*/
#define NV50_DISP_CURS_CLASS 0x0000507a
@@ -352,6 +355,7 @@ struct nv50_display_class {
#define NVD0_DISP_CURS_CLASS 0x0000907a
#define NVE0_DISP_CURS_CLASS 0x0000917a
#define NVF0_DISP_CURS_CLASS 0x0000927a
+#define GM107_DISP_CURS_CLASS 0x0000947a
struct nv50_display_curs_class {
u32 head;
@@ -365,6 +369,7 @@ struct nv50_display_curs_class {
* 907b: NVD0_DISP_OIMM
* 917b: NVE0_DISP_OIMM
* 927b: NVE0_DISP_OIMM
+ * 947b: GM107_DISP_OIMM
*/
#define NV50_DISP_OIMM_CLASS 0x0000507b
@@ -375,6 +380,7 @@ struct nv50_display_curs_class {
#define NVD0_DISP_OIMM_CLASS 0x0000907b
#define NVE0_DISP_OIMM_CLASS 0x0000917b
#define NVF0_DISP_OIMM_CLASS 0x0000927b
+#define GM107_DISP_OIMM_CLASS 0x0000947b
struct nv50_display_oimm_class {
u32 head;
@@ -388,6 +394,7 @@ struct nv50_display_oimm_class {
* 907c: NVD0_DISP_SYNC
* 917c: NVE0_DISP_SYNC
* 927c: NVF0_DISP_SYNC
+ * 947c: GM107_DISP_SYNC
*/
#define NV50_DISP_SYNC_CLASS 0x0000507c
@@ -398,6 +405,7 @@ struct nv50_display_oimm_class {
#define NVD0_DISP_SYNC_CLASS 0x0000907c
#define NVE0_DISP_SYNC_CLASS 0x0000917c
#define NVF0_DISP_SYNC_CLASS 0x0000927c
+#define GM107_DISP_SYNC_CLASS 0x0000947c
struct nv50_display_sync_class {
u32 pushbuf;
@@ -412,6 +420,7 @@ struct nv50_display_sync_class {
* 907d: NVD0_DISP_MAST
* 917d: NVE0_DISP_MAST
* 927d: NVF0_DISP_MAST
+ * 947d: GM107_DISP_MAST
*/
#define NV50_DISP_MAST_CLASS 0x0000507d
@@ -422,6 +431,7 @@ struct nv50_display_sync_class {
#define NVD0_DISP_MAST_CLASS 0x0000907d
#define NVE0_DISP_MAST_CLASS 0x0000917d
#define NVF0_DISP_MAST_CLASS 0x0000927d
+#define GM107_DISP_MAST_CLASS 0x0000947d
struct nv50_display_mast_class {
u32 pushbuf;
@@ -435,6 +445,7 @@ struct nv50_display_mast_class {
* 907e: NVD0_DISP_OVLY
* 917e: NVE0_DISP_OVLY
* 927e: NVF0_DISP_OVLY
+ * 947e: GM107_DISP_OVLY
*/
#define NV50_DISP_OVLY_CLASS 0x0000507e
@@ -445,6 +456,7 @@ struct nv50_display_mast_class {
#define NVD0_DISP_OVLY_CLASS 0x0000907e
#define NVE0_DISP_OVLY_CLASS 0x0000917e
#define NVF0_DISP_OVLY_CLASS 0x0000927e
+#define GM107_DISP_OVLY_CLASS 0x0000947e
struct nv50_display_ovly_class {
u32 pushbuf;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index 7b8ea221b00d..a8a9a9cf16cb 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -40,6 +40,7 @@ enum nv_subdev_type {
NVDEV_ENGINE_FIRST,
NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
+ NVDEV_ENGINE_IFB,
NVDEV_ENGINE_FIFO,
NVDEV_ENGINE_SW,
NVDEV_ENGINE_GR,
@@ -65,6 +66,7 @@ struct nouveau_device {
struct list_head head;
struct pci_dev *pdev;
+ struct platform_device *platformdev;
u64 handle;
const char *cfgopt;
@@ -84,6 +86,7 @@ struct nouveau_device {
NV_C0 = 0xc0,
NV_D0 = 0xd0,
NV_E0 = 0xe0,
+ GM100 = 0x110,
} card_type;
u32 chipset;
u32 crystal;
@@ -140,4 +143,32 @@ nv_device_match(struct nouveau_object *object, u16 dev, u16 ven, u16 sub)
device->pdev->subsystem_device == sub;
}
+static inline bool
+nv_device_is_pci(struct nouveau_device *device)
+{
+ return device->pdev != NULL;
+}
+
+static inline struct device *
+nv_device_base(struct nouveau_device *device)
+{
+ return nv_device_is_pci(device) ? &device->pdev->dev :
+ &device->platformdev->dev;
+}
+
+resource_size_t
+nv_device_resource_start(struct nouveau_device *device, unsigned int bar);
+
+resource_size_t
+nv_device_resource_len(struct nouveau_device *device, unsigned int bar);
+
+dma_addr_t
+nv_device_map_page(struct nouveau_device *device, struct page *page);
+
+void
+nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr);
+
+int
+nv_device_get_irq(struct nouveau_device *device, bool stall);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/namedb.h b/drivers/gpu/drm/nouveau/core/include/core/namedb.h
index 8897e0886085..f5b5fd8e1fc9 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/namedb.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/namedb.h
@@ -33,7 +33,7 @@ nv_namedb(void *obj)
int nouveau_namedb_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, u32 pclass,
- struct nouveau_oclass *, u32 engcls,
+ struct nouveau_oclass *, u64 engcls,
int size, void **);
int _nouveau_namedb_ctor(struct nouveau_object *, struct nouveau_object *,
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/device.h b/drivers/gpu/drm/nouveau/core/include/engine/device.h
index b3dd2c4c2f1e..672d3c8f4145 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/device.h
@@ -3,11 +3,20 @@
#include <core/device.h>
-#define nouveau_device_create(p,n,s,c,d,u) \
- nouveau_device_create_((p), (n), (s), (c), (d), sizeof(**u), (void **)u)
+struct platform_device;
-int nouveau_device_create_(struct pci_dev *, u64 name, const char *sname,
- const char *cfg, const char *dbg, int, void **);
+enum nv_bus_type {
+ NOUVEAU_BUS_PCI,
+ NOUVEAU_BUS_PLATFORM,
+};
+
+#define nouveau_device_create(p,t,n,s,c,d,u) \
+ nouveau_device_create_((void *)(p), (t), (n), (s), (c), (d), \
+ sizeof(**u), (void **)u)
+
+int nouveau_device_create_(void *, enum nv_bus_type type, u64 name,
+ const char *sname, const char *cfg, const char *dbg,
+ int, void **);
int nv04_identify(struct nouveau_device *);
int nv10_identify(struct nouveau_device *);
@@ -17,6 +26,7 @@ int nv40_identify(struct nouveau_device *);
int nv50_identify(struct nouveau_device *);
int nvc0_identify(struct nouveau_device *);
int nve0_identify(struct nouveau_device *);
+int gm100_identify(struct nouveau_device *);
struct nouveau_device *nouveau_device_find(u64 name);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 4b21fabfbddb..fd0c68804de3 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -36,14 +36,15 @@ void _nouveau_disp_dtor(struct nouveau_object *);
#define _nouveau_disp_init _nouveau_engine_init
#define _nouveau_disp_fini _nouveau_engine_fini
-extern struct nouveau_oclass nv04_disp_oclass;
-extern struct nouveau_oclass nv50_disp_oclass;
-extern struct nouveau_oclass nv84_disp_oclass;
-extern struct nouveau_oclass nva0_disp_oclass;
-extern struct nouveau_oclass nv94_disp_oclass;
-extern struct nouveau_oclass nva3_disp_oclass;
-extern struct nouveau_oclass nvd0_disp_oclass;
-extern struct nouveau_oclass nve0_disp_oclass;
-extern struct nouveau_oclass nvf0_disp_oclass;
+extern struct nouveau_oclass *nv04_disp_oclass;
+extern struct nouveau_oclass *nv50_disp_oclass;
+extern struct nouveau_oclass *nv84_disp_oclass;
+extern struct nouveau_oclass *nva0_disp_oclass;
+extern struct nouveau_oclass *nv94_disp_oclass;
+extern struct nouveau_oclass *nva3_disp_oclass;
+extern struct nouveau_oclass *nvd0_disp_oclass;
+extern struct nouveau_oclass *nve0_disp_oclass;
+extern struct nouveau_oclass *nvf0_disp_oclass;
+extern struct nouveau_oclass *gm107_disp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 97705618de97..871edfdf3d5b 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -63,13 +63,14 @@ extern struct nouveau_oclass nv40_graph_oclass;
extern struct nouveau_oclass nv50_graph_oclass;
extern struct nouveau_oclass *nvc0_graph_oclass;
extern struct nouveau_oclass *nvc1_graph_oclass;
-extern struct nouveau_oclass *nvc3_graph_oclass;
+extern struct nouveau_oclass *nvc4_graph_oclass;
extern struct nouveau_oclass *nvc8_graph_oclass;
extern struct nouveau_oclass *nvd7_graph_oclass;
extern struct nouveau_oclass *nvd9_graph_oclass;
extern struct nouveau_oclass *nve4_graph_oclass;
extern struct nouveau_oclass *nvf0_graph_oclass;
extern struct nouveau_oclass *nv108_graph_oclass;
+extern struct nouveau_oclass *gm107_graph_oclass;
extern const struct nouveau_bitfield nv04_graph_nsource[];
extern struct nouveau_ofuncs nv04_graph_ofuncs;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h
new file mode 100644
index 000000000000..bba01ab1e049
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h
@@ -0,0 +1,23 @@
+#ifndef __NVBIOS_P0260_H__
+#define __NVBIOS_P0260_H__
+
+u32 nvbios_P0260Te(struct nouveau_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
+
+struct nvbios_P0260E {
+ u32 data;
+};
+
+u32 nvbios_P0260Ee(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_P0260Ep(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_P0260E *);
+
+struct nvbios_P0260X {
+ u32 data;
+};
+
+u32 nvbios_P0260Xe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_P0260Xp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_P0260X *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
index c1270548fd0d..a32feb3f3fb6 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
@@ -16,6 +16,7 @@ enum dcb_connector_type {
DCB_CONNECTOR_eDP = 0x47,
DCB_CONNECTOR_HDMI_0 = 0x60,
DCB_CONNECTOR_HDMI_1 = 0x61,
+ DCB_CONNECTOR_HDMI_C = 0x63,
DCB_CONNECTOR_DMS59_DP0 = 0x64,
DCB_CONNECTOR_DMS59_DP1 = 0x65,
DCB_CONNECTOR_NONE = 0xff
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
index c5e6d1e6ac1d..c086ac6d677d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
@@ -61,6 +61,6 @@ struct nvbios_ramcfg {
};
u8 nvbios_ramcfg_count(struct nouveau_bios *);
-u8 nvbios_ramcfg_index(struct nouveau_bios *);
+u8 nvbios_ramcfg_index(struct nouveau_subdev *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
index 083541dbe9c8..8dc5051df55d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
@@ -31,6 +31,12 @@ struct nouveau_therm_trip_point {
int hysteresis;
};
+enum nvbios_therm_fan_mode {
+ NVBIOS_THERM_FAN_TRIP = 0,
+ NVBIOS_THERM_FAN_LINEAR = 1,
+ NVBIOS_THERM_FAN_OTHER = 2,
+};
+
struct nvbios_therm_fan {
u16 pwm_freq;
@@ -40,6 +46,7 @@ struct nvbios_therm_fan {
u16 bump_period;
u16 slow_down_period;
+ enum nvbios_therm_fan_mode fan_mode;
struct nouveau_therm_trip_point trip[NOUVEAU_TEMP_FAN_TRIP_MAX];
u8 nr_fan_trip;
u8 linear_min_temp;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
index ed1ac68c38b3..e292271a84e4 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
@@ -9,6 +9,7 @@ struct nouveau_devinit {
bool post;
void (*meminit)(struct nouveau_devinit *);
int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
+ u32 (*mmio)(struct nouveau_devinit *, u32 addr);
};
static inline struct nouveau_devinit *
@@ -28,5 +29,6 @@ extern struct nouveau_oclass *nv98_devinit_oclass;
extern struct nouveau_oclass *nva3_devinit_oclass;
extern struct nouveau_oclass *nvaf_devinit_oclass;
extern struct nouveau_oclass *nvc0_devinit_oclass;
+extern struct nouveau_oclass *gm107_devinit_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index d7ecafbae1ca..58c7ccdebb01 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -105,6 +105,7 @@ extern struct nouveau_oclass *nvaa_fb_oclass;
extern struct nouveau_oclass *nvaf_fb_oclass;
extern struct nouveau_oclass *nvc0_fb_oclass;
extern struct nouveau_oclass *nve0_fb_oclass;
+extern struct nouveau_oclass *gm107_fb_oclass;
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
index a1985ed3d58d..c9c1950b7743 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
@@ -35,6 +35,7 @@ nouveau_ltcg(void *obj)
#define _nouveau_ltcg_init _nouveau_subdev_init
#define _nouveau_ltcg_fini _nouveau_subdev_fini
-extern struct nouveau_oclass nvc0_ltcg_oclass;
+extern struct nouveau_oclass *gf100_ltcg_oclass;
+extern struct nouveau_oclass *gm107_ltcg_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index 3c6738edd127..72b176831be6 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -12,6 +12,7 @@ struct nouveau_mc_intr {
struct nouveau_mc {
struct nouveau_subdev base;
bool use_msi;
+ unsigned int irq;
};
static inline struct nouveau_mc *
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index 69891d4a3fe7..d4a68179e586 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -31,7 +31,7 @@ struct nouveau_therm {
int (*pwm_ctrl)(struct nouveau_therm *, int line, bool);
int (*pwm_get)(struct nouveau_therm *, int line, u32 *, u32 *);
int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
- int (*pwm_clock)(struct nouveau_therm *);
+ int (*pwm_clock)(struct nouveau_therm *, int line);
int (*fan_get)(struct nouveau_therm *);
int (*fan_set)(struct nouveau_therm *, int);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
index 9ab70dfe5b02..db9be803a874 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -59,5 +59,6 @@ int nouveau_timer_create_(struct nouveau_object *, struct nouveau_engine *,
struct nouveau_oclass *, int size, void **);
extern struct nouveau_oclass nv04_timer_oclass;
+extern struct nouveau_oclass gk20a_timer_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index 191e739f30d1..d0ced94ca54c 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/bitops.h>
#include <linux/firmware.h>
@@ -23,17 +24,6 @@
#include <asm/unaligned.h>
-static inline int
-ffsll(u64 mask)
-{
- int i;
- for (i = 0; i < 64; i++) {
- if (mask & (1ULL << i))
- return i + 1;
- }
- return 0;
-}
-
#ifndef ioread32_native
#ifdef __BIG_ENDIAN
#define ioread16_native ioread16be
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index 7098ddd54678..bdf594116f3f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -118,8 +118,8 @@ nouveau_bar_create_(struct nouveau_object *parent,
if (ret)
return ret;
- bar->iomem = ioremap(pci_resource_start(device->pdev, 3),
- pci_resource_len(device->pdev, 3));
+ bar->iomem = ioremap(nv_device_resource_start(device, 3),
+ nv_device_resource_len(device, 3));
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
index 090d594a21b3..f748ba49dfc8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -139,7 +139,7 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
/* BAR3 */
start = 0x0100000000ULL;
- limit = start + pci_resource_len(device->pdev, 3);
+ limit = start + nv_device_resource_len(device, 3);
ret = nouveau_vm_new(device, start, limit, start, &vm);
if (ret)
@@ -173,7 +173,7 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
/* BAR1 */
start = 0x0000000000ULL;
- limit = start + pci_resource_len(device->pdev, 1);
+ limit = start + nv_device_resource_len(device, 1);
ret = nouveau_vm_new(device, start, limit--, start, &vm);
if (ret)
@@ -231,7 +231,7 @@ static int
nv50_bar_init(struct nouveau_object *object)
{
struct nv50_bar_priv *priv = (void *)object;
- int ret;
+ int ret, i;
ret = nouveau_bar_init(&priv->base);
if (ret)
@@ -249,6 +249,8 @@ nv50_bar_init(struct nouveau_object *object)
nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
+ for (i = 0; i < 8; i++)
+ nv_wr32(priv, 0x001900 + (i * 4), 0x00000000);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index bac5e754de35..3f30db62e656 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -84,7 +84,6 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nouveau_device *device = nv_device(parent);
- struct pci_dev *pdev = device->pdev;
struct nvc0_bar_priv *priv;
struct nouveau_gpuobj *mem;
struct nouveau_vm *vm;
@@ -107,14 +106,14 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 3), 0, &vm);
+ ret = nouveau_vm_new(device, 0, nv_device_resource_len(device, 3), 0, &vm);
if (ret)
return ret;
atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
ret = nouveau_gpuobj_new(nv_object(priv), NULL,
- (pci_resource_len(pdev, 3) >> 12) * 8,
+ (nv_device_resource_len(device, 3) >> 12) * 8,
0x1000, NVOBJ_FLAG_ZERO_ALLOC,
&vm->pgt[0].obj[0]);
vm->pgt[0].refcount[0] = 1;
@@ -128,8 +127,8 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr));
nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr));
- nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 3) - 1));
- nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));
+ nv_wo32(mem, 0x0208, lower_32_bits(nv_device_resource_len(device, 3) - 1));
+ nv_wo32(mem, 0x020c, upper_32_bits(nv_device_resource_len(device, 3) - 1));
/* BAR1 */
ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
@@ -143,7 +142,7 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 1), 0, &vm);
+ ret = nouveau_vm_new(device, 0, nv_device_resource_len(device, 1), 0, &vm);
if (ret)
return ret;
@@ -156,8 +155,8 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
- nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 1) - 1));
- nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 1) - 1));
+ nv_wo32(mem, 0x0208, lower_32_bits(nv_device_resource_len(device, 1) - 1));
+ nv_wo32(mem, 0x020c, upper_32_bits(nv_device_resource_len(device, 1) - 1));
priv->base.alloc = nouveau_bar_alloc;
priv->base.kmap = nvc0_bar_kmap;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/P0260.c b/drivers/gpu/drm/nouveau/core/subdev/bios/P0260.c
new file mode 100644
index 000000000000..199f4e5f7488
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/P0260.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
+#include <subdev/bios/P0260.h>
+
+u32
+nvbios_P0260Te(struct nouveau_bios *bios,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
+{
+ struct bit_entry bit_P;
+ u32 data = 0x00000000;
+
+ if (!bit_entry(bios, 'P', &bit_P)) {
+ if (bit_P.version == 2 && bit_P.length > 0x63)
+ data = nv_ro32(bios, bit_P.offset + 0x60);
+ if (data) {
+ *ver = nv_ro08(bios, data + 0);
+ switch (*ver) {
+ case 0x10:
+ *hdr = nv_ro08(bios, data + 1);
+ *cnt = nv_ro08(bios, data + 2);
+ *len = 4;
+ *xnr = nv_ro08(bios, data + 3);
+ *xsz = 4;
+ return data;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0x00000000;
+}
+
+u32
+nvbios_P0260Ee(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+ u8 hdr, cnt, xnr, xsz;
+ u32 data = nvbios_P0260Te(bios, ver, &hdr, &cnt, len, &xnr, &xsz);
+ if (data && idx < cnt)
+ return data + hdr + (idx * *len);
+ return 0x00000000;
+}
+
+u32
+nvbios_P0260Ep(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
+ struct nvbios_P0260E *info)
+{
+ u32 data = nvbios_P0260Ee(bios, idx, ver, len);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ case 0x10:
+ info->data = nv_ro32(bios, data);
+ return data;
+ default:
+ break;
+ }
+ return 0x00000000;
+}
+
+u32
+nvbios_P0260Xe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *xsz)
+{
+ u8 hdr, cnt, len, xnr;
+ u32 data = nvbios_P0260Te(bios, ver, &hdr, &cnt, &len, &xnr, xsz);
+ if (data && idx < xnr)
+ return data + hdr + (cnt * len) + (idx * *xsz);
+ return 0x00000000;
+}
+
+u32
+nvbios_P0260Xp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_P0260X *info)
+{
+ u32 data = nvbios_P0260Xe(bios, idx, ver, hdr);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ case 0x10:
+ info->data = nv_ro32(bios, data);
+ return data;
+ default:
+ break;
+ }
+ return 0x00000000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index ef0c9c4a8cc3..e9df94f96d78 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -90,10 +90,26 @@ nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
int i;
if (device->card_type >= NV_50) {
- if ( device->card_type < NV_C0 ||
- !(nv_rd32(bios, 0x022500) & 0x00000001))
- addr = (u64)(nv_rd32(bios, 0x619f04) & 0xffffff00) << 8;
+ if (device->card_type >= NV_C0 && device->card_type < GM100) {
+ if (nv_rd32(bios, 0x022500) & 0x00000001)
+ return;
+ } else
+ if (device->card_type >= GM100) {
+ if (nv_rd32(bios, 0x021c04) & 0x00000001)
+ return;
+ }
+
+ addr = nv_rd32(bios, 0x619f04);
+ if (!(addr & 0x00000008)) {
+ nv_debug(bios, "... not enabled\n");
+ return;
+ }
+ if ( (addr & 0x00000003) != 1) {
+ nv_debug(bios, "... not in vram\n");
+ return;
+ }
+ addr = (u64)(addr >> 8) << 8;
if (!addr) {
addr = (u64)nv_rd32(bios, 0x001700) << 16;
addr += 0xf0000;
@@ -141,6 +157,10 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
pcireg = 0x001850;
access = nv_mask(bios, pcireg, 0x00000001, 0x00000000);
+ /* WARNING: PROM accesses should always be 32-bits aligned. Other
+ * accesses work on most chipset but do not on Kepler chipsets
+ */
+
/* bail if no rom signature, with a workaround for a PROM reading
* issue on some chipsets. the first read after a period of
* inactivity returns the wrong result, so retry the first header
@@ -148,31 +168,32 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
*/
i = 16;
do {
- if (nv_rd08(bios, 0x300000) == 0x55)
+ if ((nv_rd32(bios, 0x300000) & 0xffff) == 0xaa55)
break;
} while (i--);
- if (!i || nv_rd08(bios, 0x300001) != 0xaa)
- goto out;
-
- /* additional check (see note below) - read PCI record header */
- pcir = nv_rd08(bios, 0x300018) |
- nv_rd08(bios, 0x300019) << 8;
- if (nv_rd08(bios, 0x300000 + pcir) != 'P' ||
- nv_rd08(bios, 0x300001 + pcir) != 'C' ||
- nv_rd08(bios, 0x300002 + pcir) != 'I' ||
- nv_rd08(bios, 0x300003 + pcir) != 'R')
+ if (!i)
goto out;
/* read entire bios image to system memory */
- bios->size = nv_rd08(bios, 0x300002) * 512;
+ bios->size = ((nv_rd32(bios, 0x300000) >> 16) & 0xff) * 512;
if (!bios->size)
goto out;
bios->data = kmalloc(bios->size, GFP_KERNEL);
if (bios->data) {
- for (i = 0; i < bios->size; i++)
- nv_wo08(bios, i, nv_rd08(bios, 0x300000 + i));
+ for (i = 0; i < bios->size; i+=4)
+ nv_wo32(bios, i, nv_rd32(bios, 0x300000 + i));
+ }
+
+ /* check the PCI record header */
+ pcir = nv_ro16(bios, 0x0018);
+ if (bios->data[pcir + 0] != 'P' ||
+ bios->data[pcir + 1] != 'C' ||
+ bios->data[pcir + 2] != 'I' ||
+ bios->data[pcir + 3] != 'R') {
+ bios->size = 0;
+ kfree(bios->data);
}
out:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 2d9b9d7a7992..88606bfaf847 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -142,9 +142,36 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
if (*ver >= 0x40) {
u32 conf = nv_ro32(bios, dcb + 0x04);
switch (outp->type) {
+ case DCB_OUTPUT_DP:
+ switch (conf & 0x00e00000) {
+ case 0x00000000:
+ outp->dpconf.link_bw = 0x06;
+ break;
+ case 0x00200000:
+ outp->dpconf.link_bw = 0x0a;
+ break;
+ case 0x00400000:
+ default:
+ outp->dpconf.link_bw = 0x14;
+ break;
+ }
+
+ switch (conf & 0x0f000000) {
+ case 0x0f000000:
+ outp->dpconf.link_nr = 4;
+ break;
+ case 0x03000000:
+ outp->dpconf.link_nr = 2;
+ break;
+ case 0x01000000:
+ default:
+ outp->dpconf.link_nr = 1;
+ break;
+ }
+
+ /* fall-through... */
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
outp->link = (conf & 0x00000030) >> 4;
outp->sorconf.link = outp->link; /*XXX*/
outp->extdev = 0x00;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index de201baeb053..acaeaf79e3f0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -118,6 +118,8 @@ init_conn(struct nvbios_init *init)
static inline u32
init_nvreg(struct nvbios_init *init, u32 reg)
{
+ struct nouveau_devinit *devinit = nouveau_devinit(init->bios);
+
/* C51 (at least) sometimes has the lower bits set which the VBIOS
* interprets to mean that access needs to go through certain IO
* ports instead. The NVIDIA binary driver has been seen to access
@@ -147,6 +149,9 @@ init_nvreg(struct nvbios_init *init, u32 reg)
if (reg & ~0x00fffffc)
warn("unknown bits in register 0x%08x\n", reg);
+
+ if (devinit->mmio)
+ reg = devinit->mmio(devinit, reg);
return reg;
}
@@ -154,7 +159,7 @@ static u32
init_rd32(struct nvbios_init *init, u32 reg)
{
reg = init_nvreg(init, reg);
- if (init_exec(init))
+ if (reg != ~0 && init_exec(init))
return nv_rd32(init->subdev, reg);
return 0x00000000;
}
@@ -163,7 +168,7 @@ static void
init_wr32(struct nvbios_init *init, u32 reg, u32 val)
{
reg = init_nvreg(init, reg);
- if (init_exec(init))
+ if (reg != ~0 && init_exec(init))
nv_wr32(init->subdev, reg, val);
}
@@ -171,7 +176,7 @@ static u32
init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val)
{
reg = init_nvreg(init, reg);
- if (init_exec(init)) {
+ if (reg != ~0 && init_exec(init)) {
u32 tmp = nv_rd32(init->subdev, reg);
nv_wr32(init->subdev, reg, (tmp & ~mask) | val);
return tmp;
@@ -410,7 +415,7 @@ init_ram_restrict(struct nvbios_init *init)
* in case *not* re-reading the strap causes similar breakage.
*/
if (!init->ramcfg || init->bios->version.major < 0x70)
- init->ramcfg = 0x80000000 | nvbios_ramcfg_index(init->bios);
+ init->ramcfg = 0x80000000 | nvbios_ramcfg_index(init->subdev);
return (init->ramcfg & 0x7fffffff);
}
@@ -845,9 +850,8 @@ init_idx_addr_latched(struct nvbios_init *init)
u32 data = nv_ro32(bios, init->offset + 13);
u8 count = nv_ro08(bios, init->offset + 17);
- trace("INDEX_ADDRESS_LATCHED\t"
- "R[0x%06x] : R[0x%06x]\n\tCTRL &= 0x%08x |= 0x%08x\n",
- creg, dreg, mask, data);
+ trace("INDEX_ADDRESS_LATCHED\tR[0x%06x] : R[0x%06x]\n", creg, dreg);
+ trace("\tCTRL &= 0x%08x |= 0x%08x\n", mask, data);
init->offset += 18;
while (count--) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
index 991aedda999b..6c401f70ab99 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
@@ -27,9 +27,9 @@
#include <subdev/bios/ramcfg.h>
static u8
-nvbios_ramcfg_strap(struct nouveau_bios *bios)
+nvbios_ramcfg_strap(struct nouveau_subdev *subdev)
{
- return (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+ return (nv_rd32(subdev, 0x101000) & 0x0000003c) >> 2;
}
u8
@@ -48,9 +48,10 @@ nvbios_ramcfg_count(struct nouveau_bios *bios)
}
u8
-nvbios_ramcfg_index(struct nouveau_bios *bios)
+nvbios_ramcfg_index(struct nouveau_subdev *subdev)
{
- u8 strap = nvbios_ramcfg_strap(bios);
+ struct nouveau_bios *bios = nouveau_bios(subdev);
+ u8 strap = nvbios_ramcfg_strap(subdev);
u32 xlat = 0x00000000;
struct bit_entry bit_M;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
index 22ac6dbd6c8f..d15854094078 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -164,6 +164,7 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
i = 0;
fan->nr_fan_trip = 0;
+ fan->fan_mode = NVBIOS_THERM_FAN_OTHER;
while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
s16 value = nv_ro16(bios, entry + 1);
@@ -174,6 +175,8 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
break;
case 0x24:
fan->nr_fan_trip++;
+ if (fan->fan_mode > NVBIOS_THERM_FAN_TRIP)
+ fan->fan_mode = NVBIOS_THERM_FAN_TRIP;
cur_trip = &fan->trip[fan->nr_fan_trip - 1];
cur_trip->hysteresis = value & 0xf;
cur_trip->temp = (value & 0xff0) >> 4;
@@ -194,11 +197,19 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
fan->slow_down_period = value;
break;
case 0x46:
+ if (fan->fan_mode > NVBIOS_THERM_FAN_LINEAR)
+ fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
fan->linear_min_temp = nv_ro08(bios, entry + 1);
fan->linear_max_temp = nv_ro08(bios, entry + 2);
break;
}
}
+ /* starting from fermi, fan management is always linear */
+ if (nv_device(bios)->card_type >= NV_C0 &&
+ fan->fan_mode == NVBIOS_THERM_FAN_OTHER) {
+ fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
index 8fa34e8152c2..239acfe876c3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
@@ -96,5 +96,6 @@ nouveau_devinit_create_(struct nouveau_object *parent,
devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false);
devinit->meminit = impl->meminit;
devinit->pll_set = impl->pll_set;
+ devinit->mmio = impl->mmio;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
index 6b56a0f4cb40..4fe49cf4c99a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
@@ -24,6 +24,8 @@
*
*/
+#include <core/device.h>
+
#define NV04_PFB_BOOT_0 0x00100000
# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000
@@ -60,10 +62,10 @@
# define NV10_PFB_REFCTRL_VALID_1 (1 << 31)
static inline struct io_mapping *
-fbmem_init(struct pci_dev *pdev)
+fbmem_init(struct nouveau_device *dev)
{
- return io_mapping_create_wc(pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1));
+ return io_mapping_create_wc(nv_device_resource_start(dev, 1),
+ nv_device_resource_len(dev, 1));
}
static inline void
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
new file mode 100644
index 000000000000..c69bc7f54e37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+gm107_devinit_disable(struct nouveau_devinit *devinit)
+{
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r021c00 = nv_rd32(priv, 0x021c00);
+ u32 r021c04 = nv_rd32(priv, 0x021c04);
+ u64 disable = 0ULL;
+
+ if (r021c00 & 0x00000001)
+ disable |= (1ULL << NVDEV_ENGINE_COPY0);
+ if (r021c00 & 0x00000004)
+ disable |= (1ULL << NVDEV_ENGINE_COPY2);
+ if (r021c04 & 0x00000001)
+ disable |= (1ULL << NVDEV_ENGINE_DISP);
+
+ return disable;
+}
+
+struct nouveau_oclass *
+gm107_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x07),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_devinit_ctor,
+ .dtor = _nouveau_devinit_dtor,
+ .init = nv50_devinit_init,
+ .fini = _nouveau_devinit_fini,
+ },
+ .pll_set = nvc0_devinit_pll_set,
+ .disable = gm107_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index 7037eae46e44..052ad690b468 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -38,7 +38,7 @@ nv04_devinit_meminit(struct nouveau_devinit *devinit)
int i;
/* Map the framebuffer aperture */
- fb = fbmem_init(nv_device(priv)->pdev);
+ fb = fbmem_init(nv_device(priv));
if (!fb) {
nv_error(priv, "failed to map fb\n");
return;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
index 98b7e6780dc7..4a19c10e5178 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
@@ -53,7 +53,7 @@ nv05_devinit_meminit(struct nouveau_devinit *devinit)
int i, v;
/* Map the framebuffer aperture */
- fb = fbmem_init(nv_device(priv)->pdev);
+ fb = fbmem_init(nv_device(priv));
if (!fb) {
nv_error(priv, "failed to map fb\n");
return;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 32b3d2131a7f..3b8d657da279 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -46,7 +46,7 @@ nv10_devinit_meminit(struct nouveau_devinit *devinit)
mem_width_count = 2;
/* Map the framebuffer aperture */
- fb = fbmem_init(nv_device(priv)->pdev);
+ fb = fbmem_init(nv_device(priv));
if (!fb) {
nv_error(priv, "failed to map fb\n");
return;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
index 4689ba303b0b..04bc9732644c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
@@ -37,7 +37,7 @@ nv20_devinit_meminit(struct nouveau_devinit *devinit)
struct io_mapping *fb;
/* Map the framebuffer aperture */
- fb = fbmem_init(nv_device(priv)->pdev);
+ fb = fbmem_init(nv_device(priv));
if (!fb) {
nv_error(priv, "failed to map fb\n");
return;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
index 141c27e9f182..51d5076333ec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
@@ -5,6 +5,7 @@
struct nv50_devinit_priv {
struct nouveau_devinit base;
+ u32 r001540;
};
int nv50_devinit_ctor(struct nouveau_object *, struct nouveau_object *,
@@ -15,4 +16,6 @@ int nv50_devinit_pll_set(struct nouveau_devinit *, u32, u32);
int nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+int nvc0_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
index 6dedf1dad7f7..006cf348bda7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
@@ -81,6 +81,55 @@ nva3_devinit_disable(struct nouveau_devinit *devinit)
return disable;
}
+static u32
+nva3_devinit_mmio_part[] = {
+ 0x100720, 0x1008bc, 4,
+ 0x100a20, 0x100adc, 4,
+ 0x100d80, 0x100ddc, 4,
+ 0x110000, 0x110f9c, 4,
+ 0x111000, 0x11103c, 8,
+ 0x111080, 0x1110fc, 4,
+ 0x111120, 0x1111fc, 4,
+ 0x111300, 0x1114bc, 4,
+ 0,
+};
+
+static u32
+nva3_devinit_mmio(struct nouveau_devinit *devinit, u32 addr)
+{
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 *mmio = nva3_devinit_mmio_part;
+
+ /* the init tables on some boards have INIT_RAM_RESTRICT_ZM_REG_GROUP
+ * instructions which touch registers that may not even exist on
+ * some configurations (Quadro 400), which causes the register
+ * interface to screw up for some amount of time after attempting to
+ * write to one of these, and results in all sorts of things going
+ * horribly wrong.
+ *
+ * the binary driver avoids touching these registers at all, however,
+ * the video bios doesn't care and does what the scripts say. it's
+ * presumed that the io-port access to priv registers isn't effected
+ * by the screw-up bug mentioned above.
+ *
+ * really, a new opcode should've been invented to handle these
+ * requirements, but whatever, it's too late for that now.
+ */
+ while (mmio[0]) {
+ if (addr >= mmio[0] && addr <= mmio[1]) {
+ u32 part = (addr / mmio[2]) & 7;
+ if (!priv->r001540)
+ priv->r001540 = nv_rd32(priv, 0x001540);
+ if (part >= hweight8((priv->r001540 >> 16) & 0xff))
+ return ~0;
+ return addr;
+ }
+ mmio += 3;
+ }
+
+ return addr;
+}
+
struct nouveau_oclass *
nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0xa3),
@@ -92,4 +141,5 @@ nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nva3_devinit_pll_set,
.disable = nva3_devinit_disable,
+ .mmio = nva3_devinit_mmio,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
index fa7e63766b1b..30c765747eea 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
@@ -24,7 +24,7 @@
#include "nv50.h"
-static int
+int
nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
{
struct nv50_devinit_priv *priv = (void *)devinit;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
index 822a2fbf44a5..f0e8683ad840 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
@@ -11,6 +11,7 @@ struct nouveau_devinit_impl {
void (*meminit)(struct nouveau_devinit *);
int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
u64 (*disable)(struct nouveau_devinit *);
+ u32 (*mmio)(struct nouveau_devinit *, u32);
};
#define nouveau_devinit_create(p,e,o,d) \
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gm107.c
new file mode 100644
index 000000000000..c4840aedc2dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gm107.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+struct nouveau_oclass *
+gm107_fb_oclass = &(struct nouveau_fb_impl) {
+ .base.handle = NV_SUBDEV(FB, 0x07),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_fb_ctor,
+ .dtor = nvc0_fb_dtor,
+ .init = nvc0_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+ .memtype = nvc0_fb_memtype_valid,
+ .ram = &gm107_ram_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index cbc7f00c1278..1fc55c1e91a1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -250,10 +250,8 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (priv->r100c08_page) {
- priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(device->pdev, priv->r100c08))
+ priv->r100c08 = nv_device_map_page(device, priv->r100c08_page);
+ if (!priv->r100c08)
nv_warn(priv, "failed 0x100c08 page map\n");
} else {
nv_warn(priv, "failed 0x100c08 page alloc\n");
@@ -270,8 +268,7 @@ nv50_fb_dtor(struct nouveau_object *object)
struct nv50_fb_priv *priv = (void *)object;
if (priv->r100c08_page) {
- pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ nv_device_unmap_page(device, priv->r100c08);
__free_page(priv->r100c08_page);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 45470e1f0385..0670ae33ee45 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -70,8 +70,7 @@ nvc0_fb_dtor(struct nouveau_object *object)
struct nvc0_fb_priv *priv = (void *)object;
if (priv->r100c10_page) {
- pci_unmap_page(device->pdev, priv->r100c10, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ nv_device_unmap_page(device, priv->r100c10);
__free_page(priv->r100c10_page);
}
@@ -94,10 +93,8 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (priv->r100c10_page) {
- priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page,
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(device->pdev, priv->r100c10))
+ priv->r100c10 = nv_device_map_page(device, priv->r100c10_page);
+ if (!priv->r100c10)
return -EFAULT;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
index 9e1931eb746f..705a06d755ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
@@ -18,12 +18,14 @@ int nvc0_fb_init(struct nouveau_object *);
bool nvc0_fb_memtype_valid(struct nouveau_fb *, u32);
-#define nvc0_ram_create(p,e,o,d) \
- nvc0_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvc0_ram_create(p,e,o,m,d) \
+ nvc0_ram_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
int nvc0_ram_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
+ struct nouveau_oclass *, u32, int, void **);
int nvc0_ram_get(struct nouveau_fb *, u64, u32, u32, u32,
struct nouveau_mem **);
void nvc0_ram_put(struct nouveau_fb *, struct nouveau_mem **);
+int nve0_ram_init(struct nouveau_object*);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index edaf95dee612..da74c889aed4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -32,6 +32,7 @@ extern struct nouveau_oclass nva3_ram_oclass;
extern struct nouveau_oclass nvaa_ram_oclass;
extern struct nouveau_oclass nvc0_ram_oclass;
extern struct nouveau_oclass nve0_ram_oclass;
+extern struct nouveau_oclass gm107_ram_oclass;
int nouveau_sddr3_calc(struct nouveau_ram *ram);
int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramgm107.c
new file mode 100644
index 000000000000..4c6363595c79
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramgm107.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+struct gm107_ram {
+ struct nouveau_ram base;
+};
+
+static int
+gm107_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct gm107_ram *ram;
+ int ret;
+
+ ret = nvc0_ram_create(parent, engine, oclass, 0x021c14, &ram);
+ *pobject = nv_object(ram);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+gm107_ram_oclass = {
+ .handle = 0,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gm107_ram_ctor,
+ .dtor = _nouveau_ram_dtor,
+ .init = nve0_ram_init,
+ .fini = _nouveau_ram_fini,
+ }
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index c7fdb3a9e88b..ef91b6e893af 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -91,7 +91,7 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
} while (perfE.memory < freq);
/* locate specific data set for the attached memory */
- strap = nvbios_ramcfg_index(bios);
+ strap = nvbios_ramcfg_index(nv_subdev(pfb));
if (strap >= cnt) {
nv_error(pfb, "invalid ramcfg strap\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
index f4ae8aa46a25..6eb97f16fbda 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -98,7 +98,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
}
/* locate specific data set for the attached memory */
- strap = nvbios_ramcfg_index(bios);
+ strap = nvbios_ramcfg_index(nv_subdev(pfb));
if (strap >= cnt) {
nv_error(pfb, "invalid ramcfg strap\n");
return -EINVAL;
@@ -335,21 +335,23 @@ nva3_ram_init(struct nouveau_object *object)
/* prepare for ddr link training, and load training patterns */
switch (ram->base.type) {
case NV_MEM_TYPE_DDR3: {
- static const u32 pattern[16] = {
- 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
- 0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
- 0x33333333, 0x55555555, 0x77777777, 0x66666666,
- 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
- };
-
- nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/
- nv_wr32(pfb, 0x1005a8, 0x0000ffff);
- nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
- for (i = 0; i < 0x30; i++) {
- nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
- nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
- nv_wr32(pfb, 0x10f900, pattern[i % 16]);
- nv_wr32(pfb, 0x10f920, pattern[i % 16]);
+ if (nv_device(pfb)->chipset == 0xa8) {
+ static const u32 pattern[16] = {
+ 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
+ 0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
+ 0x33333333, 0x55555555, 0x77777777, 0x66666666,
+ 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
+ };
+
+ nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/
+ nv_wr32(pfb, 0x1005a8, 0x0000ffff);
+ nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
+ for (i = 0; i < 0x30; i++) {
+ nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
+ nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
+ nv_wr32(pfb, 0x10f900, pattern[i % 16]);
+ nv_wr32(pfb, 0x10f920, pattern[i % 16]);
+ }
}
}
break;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 0391b824ee76..8edc92224c84 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -152,7 +152,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
}
/* locate specific data set for the attached memory */
- strap = nvbios_ramcfg_index(bios);
+ strap = nvbios_ramcfg_index(nv_subdev(pfb));
if (strap >= cnt) {
nv_error(pfb, "invalid ramcfg strap\n");
return -EINVAL;
@@ -505,7 +505,8 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
int
nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int size, void **pobject)
+ struct nouveau_oclass *oclass, u32 maskaddr, int size,
+ void **pobject)
{
struct nouveau_fb *pfb = nouveau_fb(parent);
struct nouveau_bios *bios = nouveau_bios(pfb);
@@ -513,7 +514,7 @@ nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
u32 parts = nv_rd32(pfb, 0x022438);
- u32 pmask = nv_rd32(pfb, 0x022554);
+ u32 pmask = nv_rd32(pfb, maskaddr);
u32 bsize = nv_rd32(pfb, 0x10f20c);
u32 offset, length;
bool uniform = true;
@@ -630,7 +631,7 @@ nvc0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_ram *ram;
int ret;
- ret = nvc0_ram_create(parent, engine, oclass, &ram);
+ ret = nvc0_ram_create(parent, engine, oclass, 0x022554, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index 3257c522a021..16752192cf87 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -950,10 +950,11 @@ nve0_ram_calc_data(struct nouveau_fb *pfb, u32 freq,
}
/* locate specific data set for the attached memory */
+ strap = nvbios_ramcfg_index(nv_subdev(pfb));
ram->base.ramcfg.data = nvbios_rammapSp(bios, ram->base.rammap.data,
ram->base.rammap.version,
- ram->base.rammap.size, cnt, len,
- nvbios_ramcfg_index(bios),
+ ram->base.rammap.size,
+ cnt, len, strap,
&ram->base.ramcfg.version,
&ram->base.ramcfg.size,
&data->bios);
@@ -1123,7 +1124,7 @@ nve0_ram_tidy(struct nouveau_fb *pfb)
ram_exec(fuc, false);
}
-static int
+int
nve0_ram_init(struct nouveau_object *object)
{
struct nouveau_fb *pfb = (void *)object->parent;
@@ -1226,7 +1227,7 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret, i;
u32 tmp;
- ret = nvc0_ram_create(parent, engine, oclass, &ram);
+ ret = nvc0_ram_create(parent, engine, oclass, 0x022554, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index c4c1d415e7fe..2ef774731629 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -46,7 +46,8 @@ nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
u8 unk0 = !!(data & 0x02000000);
u8 unk1 = !!(data & 0x04000000);
u32 val = (unk1 << 16) | unk0;
- u32 reg = regs[line >> 4]; line &= 0x0f;
+ u32 reg = regs[line >> 4];
+ u32 lsh = line & 0x0f;
if ( func == DCB_GPIO_UNUSED ||
(match != DCB_GPIO_UNUSED && match != func))
@@ -54,7 +55,7 @@ nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
gpio->set(gpio, 0, func, line, defs);
- nv_mask(priv, reg, 0x00010001 << line, val << line);
+ nv_mask(priv, reg, 0x00010001 << lsh, val << lsh);
}
}
@@ -79,7 +80,7 @@ nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
if (nv50_gpio_location(line, &reg, &shift))
return -EINVAL;
- nv_mask(gpio, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
+ nv_mask(gpio, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index c33c03d2f4af..378e05b88e6f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -111,7 +111,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
snprintf(port->adapter.name, sizeof(port->adapter.name),
"nouveau-%s-%d", device->name, index);
port->adapter.owner = THIS_MODULE;
- port->adapter.dev.parent = &device->pdev->dev;
+ port->adapter.dev.parent = nv_device_base(device);
port->index = index;
port->func = func;
i2c_set_adapdata(&port->adapter, i2c);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index ec0b9661d614..8803809f9fc5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -50,7 +50,6 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nouveau_device *device = nv_device(parent);
- struct pci_dev *pdev = device->pdev;
struct nv04_instmem_priv *priv;
int ret, bar, vs;
@@ -60,13 +59,13 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
/* map bar */
- if (pci_resource_len(pdev, 2))
+ if (nv_device_resource_len(device, 2))
bar = 2;
else
bar = 3;
- priv->iomem = ioremap(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar));
+ priv->iomem = ioremap(nv_device_resource_start(device, bar),
+ nv_device_resource_len(device, bar));
if (!priv->iomem) {
nv_error(priv, "unable to map PRAMIN BAR\n");
return -EFAULT;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c
index cce65cc56514..f2f3338a967a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c
@@ -22,44 +22,35 @@
* Authors: Ben Skeggs
*/
-#include <subdev/ltcg.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
-struct nvc0_ltcg_priv {
- struct nouveau_ltcg base;
- u32 part_nr;
- u32 subp_nr;
- u32 num_tags;
- u32 tag_base;
- struct nouveau_mm tags;
- struct nouveau_mm_node *tag_ram;
-};
+#include "gf100.h"
static void
-nvc0_ltcg_subp_isr(struct nvc0_ltcg_priv *priv, int unit, int subp)
+gf100_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
{
- u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
- u32 stat = nv_rd32(priv, subp_base + 0x020);
+ u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400);
+ u32 stat = nv_rd32(priv, base + 0x020);
if (stat) {
- nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", unit, subp, stat);
- nv_wr32(priv, subp_base + 0x020, stat);
+ nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", ltc, lts, stat);
+ nv_wr32(priv, base + 0x020, stat);
}
}
static void
-nvc0_ltcg_intr(struct nouveau_subdev *subdev)
+gf100_ltcg_intr(struct nouveau_subdev *subdev)
{
- struct nvc0_ltcg_priv *priv = (void *)subdev;
- u32 units;
-
- units = nv_rd32(priv, 0x00017c);
- while (units) {
- u32 subp, unit = ffs(units) - 1;
- for (subp = 0; subp < priv->subp_nr; subp++)
- nvc0_ltcg_subp_isr(priv, unit, subp);
- units &= ~(1 << unit);
+ struct gf100_ltcg_priv *priv = (void *)subdev;
+ u32 mask;
+
+ mask = nv_rd32(priv, 0x00017c);
+ while (mask) {
+ u32 lts, ltc = __ffs(mask);
+ for (lts = 0; lts < priv->lts_nr; lts++)
+ gf100_ltcg_lts_isr(priv, ltc, lts);
+ mask &= ~(1 << ltc);
}
/* we do something horribly wrong and upset PMFB a lot, so mask off
@@ -68,11 +59,11 @@ nvc0_ltcg_intr(struct nouveau_subdev *subdev)
nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
}
-static int
-nvc0_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n,
+int
+gf100_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n,
struct nouveau_mm_node **pnode)
{
- struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+ struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
int ret;
ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode);
@@ -82,18 +73,18 @@ nvc0_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n,
return ret;
}
-static void
-nvc0_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode)
+void
+gf100_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode)
{
- struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+ struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
nouveau_mm_free(&priv->tags, pnode);
}
static void
-nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
+gf100_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
{
- struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+ struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
u32 last = first + count - 1;
int p, i;
@@ -104,16 +95,16 @@ nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */
/* wait until it's finished with clearing */
- for (p = 0; p < priv->part_nr; ++p) {
- for (i = 0; i < priv->subp_nr; ++i)
+ for (p = 0; p < priv->ltc_nr; ++p) {
+ for (i = 0; i < priv->lts_nr; ++i)
nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
}
}
/* TODO: Figure out tag memory details and drop the over-cautious allocation.
*/
-static int
-nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
+int
+gf100_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct gf100_ltcg_priv *priv)
{
u32 tag_size, tag_margin, tag_align;
int ret;
@@ -124,7 +115,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
priv->num_tags = 1 << 17; /* we have 17 bits in PTE */
priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */
- tag_align = priv->part_nr * 0x800;
+ tag_align = priv->ltc_nr * 0x800;
tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
/* 4 part 4 sub: 0x2000 bytes for 56 tags */
@@ -157,11 +148,11 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
}
static int
-nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+gf100_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nvc0_ltcg_priv *priv;
+ struct gf100_ltcg_priv *priv;
struct nouveau_fb *pfb = nouveau_fb(parent);
u32 parts, mask;
int ret, i;
@@ -175,27 +166,27 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
mask = nv_rd32(priv, 0x022554);
for (i = 0; i < parts; i++) {
if (!(mask & (1 << i)))
- priv->part_nr++;
+ priv->ltc_nr++;
}
- priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
+ priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28;
- ret = nvc0_ltcg_init_tag_ram(pfb, priv);
+ ret = gf100_ltcg_init_tag_ram(pfb, priv);
if (ret)
return ret;
- priv->base.tags_alloc = nvc0_ltcg_tags_alloc;
- priv->base.tags_free = nvc0_ltcg_tags_free;
- priv->base.tags_clear = nvc0_ltcg_tags_clear;
+ priv->base.tags_alloc = gf100_ltcg_tags_alloc;
+ priv->base.tags_free = gf100_ltcg_tags_free;
+ priv->base.tags_clear = gf100_ltcg_tags_clear;
- nv_subdev(priv)->intr = nvc0_ltcg_intr;
+ nv_subdev(priv)->intr = gf100_ltcg_intr;
return 0;
}
-static void
-nvc0_ltcg_dtor(struct nouveau_object *object)
+void
+gf100_ltcg_dtor(struct nouveau_object *object)
{
struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
- struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+ struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent);
nouveau_mm_fini(&priv->tags);
@@ -205,10 +196,10 @@ nvc0_ltcg_dtor(struct nouveau_object *object)
}
static int
-nvc0_ltcg_init(struct nouveau_object *object)
+gf100_ltcg_init(struct nouveau_object *object)
{
struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
- struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+ struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
int ret;
ret = nouveau_ltcg_init(ltcg);
@@ -216,20 +207,20 @@ nvc0_ltcg_init(struct nouveau_object *object)
return ret;
nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
- nv_wr32(priv, 0x17e8d8, priv->part_nr);
+ nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
if (nv_device(ltcg)->card_type >= NV_E0)
- nv_wr32(priv, 0x17e000, priv->part_nr);
+ nv_wr32(priv, 0x17e000, priv->ltc_nr);
nv_wr32(priv, 0x17e8d4, priv->tag_base);
return 0;
}
-struct nouveau_oclass
-nvc0_ltcg_oclass = {
+struct nouveau_oclass *
+gf100_ltcg_oclass = &(struct nouveau_oclass) {
.handle = NV_SUBDEV(LTCG, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_ltcg_ctor,
- .dtor = nvc0_ltcg_dtor,
- .init = nvc0_ltcg_init,
+ .ctor = gf100_ltcg_ctor,
+ .dtor = gf100_ltcg_dtor,
+ .init = gf100_ltcg_init,
.fini = _nouveau_ltcg_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h
new file mode 100644
index 000000000000..87b10b8412ea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_LTCG_PRIV_GF100_H__
+#define __NVKM_LTCG_PRIV_GF100_H__
+
+#include <subdev/ltcg.h>
+
+struct gf100_ltcg_priv {
+ struct nouveau_ltcg base;
+ u32 ltc_nr;
+ u32 lts_nr;
+ u32 num_tags;
+ u32 tag_base;
+ struct nouveau_mm tags;
+ struct nouveau_mm_node *tag_ram;
+};
+
+void gf100_ltcg_dtor(struct nouveau_object *);
+int gf100_ltcg_init_tag_ram(struct nouveau_fb *, struct gf100_ltcg_priv *);
+int gf100_ltcg_tags_alloc(struct nouveau_ltcg *, u32, struct nouveau_mm_node **);
+void gf100_ltcg_tags_free(struct nouveau_ltcg *, struct nouveau_mm_node **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c
new file mode 100644
index 000000000000..e79d0e81de40
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+
+#include "gf100.h"
+
+static void
+gm107_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
+{
+ u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400);
+ u32 stat = nv_rd32(priv, base + 0x00c);
+
+ if (stat) {
+ nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", ltc, lts, stat);
+ nv_wr32(priv, base + 0x00c, stat);
+ }
+}
+
+static void
+gm107_ltcg_intr(struct nouveau_subdev *subdev)
+{
+ struct gf100_ltcg_priv *priv = (void *)subdev;
+ u32 mask;
+
+ mask = nv_rd32(priv, 0x00017c);
+ while (mask) {
+ u32 lts, ltc = __ffs(mask);
+ for (lts = 0; lts < priv->lts_nr; lts++)
+ gm107_ltcg_lts_isr(priv, ltc, lts);
+ mask &= ~(1 << ltc);
+ }
+
+ /* we do something horribly wrong and upset PMFB a lot, so mask off
+ * interrupts from it after the first one until it's fixed
+ */
+ nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
+}
+
+static void
+gm107_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
+{
+ struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
+ u32 last = first + count - 1;
+ int p, i;
+
+ BUG_ON((first > last) || (last >= priv->num_tags));
+
+ nv_wr32(priv, 0x17e270, first);
+ nv_wr32(priv, 0x17e274, last);
+ nv_wr32(priv, 0x17e26c, 0x4); /* trigger clear */
+
+ /* wait until it's finished with clearing */
+ for (p = 0; p < priv->ltc_nr; ++p) {
+ for (i = 0; i < priv->lts_nr; ++i)
+ nv_wait(priv, 0x14046c + p * 0x2000 + i * 0x200, ~0, 0);
+ }
+}
+
+static int
+gm107_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct gf100_ltcg_priv *priv;
+ struct nouveau_fb *pfb = nouveau_fb(parent);
+ u32 parts, mask;
+ int ret, i;
+
+ ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ parts = nv_rd32(priv, 0x022438);
+ mask = nv_rd32(priv, 0x021c14);
+ for (i = 0; i < parts; i++) {
+ if (!(mask & (1 << i)))
+ priv->ltc_nr++;
+ }
+ priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28;
+
+ ret = gf100_ltcg_init_tag_ram(pfb, priv);
+ if (ret)
+ return ret;
+
+ priv->base.tags_alloc = gf100_ltcg_tags_alloc;
+ priv->base.tags_free = gf100_ltcg_tags_free;
+ priv->base.tags_clear = gm107_ltcg_tags_clear;
+
+ nv_subdev(priv)->intr = gm107_ltcg_intr;
+ return 0;
+}
+
+static int
+gm107_ltcg_init(struct nouveau_object *object)
+{
+ struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
+ struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
+ int ret;
+
+ ret = nouveau_ltcg_init(ltcg);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x17e27c, priv->ltc_nr);
+ nv_wr32(priv, 0x17e278, priv->tag_base);
+ return 0;
+}
+
+struct nouveau_oclass *
+gm107_ltcg_oclass = &(struct nouveau_oclass) {
+ .handle = NV_SUBDEV(LTCG, 0xff),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gm107_ltcg_ctor,
+ .dtor = gf100_ltcg_dtor,
+ .init = gm107_ltcg_init,
+ .fini = _nouveau_ltcg_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index b4b9943773bc..8a5555192fa5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -93,7 +93,7 @@ _nouveau_mc_dtor(struct nouveau_object *object)
{
struct nouveau_device *device = nv_device(object);
struct nouveau_mc *pmc = (void *)object;
- free_irq(device->pdev->irq, pmc);
+ free_irq(pmc->irq, pmc);
if (pmc->use_msi)
pci_disable_msi(device->pdev);
nouveau_subdev_destroy(&pmc->base);
@@ -114,33 +114,44 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- switch (device->pdev->device & 0x0ff0) {
- case 0x00f0:
- case 0x02e0:
- /* BR02? NFI how these would be handled yet exactly */
- break;
- default:
- switch (device->chipset) {
- case 0xaa: break; /* reported broken, nv also disable it */
- default:
- pmc->use_msi = true;
+ if (nv_device_is_pci(device))
+ switch (device->pdev->device & 0x0ff0) {
+ case 0x00f0:
+ case 0x02e0:
+ /* BR02? NFI how these would be handled yet exactly */
break;
+ default:
+ switch (device->chipset) {
+ case 0xaa:
+ /* reported broken, nv also disable it */
+ break;
+ default:
+ pmc->use_msi = true;
+ break;
}
- }
- pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", pmc->use_msi);
- if (pmc->use_msi && oclass->msi_rearm) {
- pmc->use_msi = pci_enable_msi(device->pdev) == 0;
- if (pmc->use_msi) {
- nv_info(pmc, "MSI interrupts enabled\n");
- oclass->msi_rearm(pmc);
+ pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI",
+ pmc->use_msi);
+
+ if (pmc->use_msi && oclass->msi_rearm) {
+ pmc->use_msi = pci_enable_msi(device->pdev) == 0;
+ if (pmc->use_msi) {
+ nv_info(pmc, "MSI interrupts enabled\n");
+ oclass->msi_rearm(pmc);
+ }
+ } else {
+ pmc->use_msi = false;
}
- } else {
- pmc->use_msi = false;
}
- ret = request_irq(device->pdev->irq, nouveau_mc_intr,
- IRQF_SHARED, "nouveau", pmc);
+ ret = nv_device_get_irq(device, true);
+ if (ret < 0)
+ return ret;
+ pmc->irq = ret;
+
+ ret = request_irq(pmc->irq, nouveau_mc_intr, IRQF_SHARED, "nouveau",
+ pmc);
+
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index 13c5af88a601..51fcf7960417 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -96,7 +96,7 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
acpi_handle handle;
int rev;
- handle = ACPI_HANDLE(&device->pdev->dev);
+ handle = ACPI_HANDLE(nv_device_base(device));
if (!handle)
return false;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index 80e584a1bd1c..9ad01da6eacb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -110,16 +110,18 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
poll = false;
break;
case NOUVEAU_THERM_CTRL_AUTO:
- if (priv->fan->bios.nr_fan_trip) {
+ switch(priv->fan->bios.fan_mode) {
+ case NVBIOS_THERM_FAN_TRIP:
duty = nouveau_therm_update_trip(therm);
- } else
- if (priv->fan->bios.linear_min_temp ||
- priv->fan->bios.linear_max_temp) {
+ break;
+ case NVBIOS_THERM_FAN_LINEAR:
duty = nouveau_therm_update_linear(therm);
- } else {
+ break;
+ case NVBIOS_THERM_FAN_OTHER:
if (priv->cstate)
duty = priv->cstate;
poll = false;
+ break;
}
immd = false;
break;
@@ -179,7 +181,7 @@ nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode)
/* do not allow automatic fan management if the thermal sensor is
* not available */
- if (priv->mode == NOUVEAU_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
+ if (mode == NOUVEAU_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
return -EINVAL;
if (priv->mode == mode)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 95f6129eeede..016990a8252c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -54,8 +54,10 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
/* check that we're not already at the target duty cycle */
duty = fan->get(therm);
- if (duty == target)
- goto done;
+ if (duty == target) {
+ spin_unlock_irqrestore(&fan->lock, flags);
+ return 0;
+ }
/* smooth out the fanspeed increase/decrease */
if (!immediate && duty >= 0) {
@@ -73,8 +75,15 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
nv_debug(therm, "FAN update: %d\n", duty);
ret = fan->set(therm, duty);
- if (ret)
- goto done;
+ if (ret) {
+ spin_unlock_irqrestore(&fan->lock, flags);
+ return ret;
+ }
+
+ /* fan speed updated, drop the fan lock before grabbing the
+ * alarm-scheduling lock and risking a deadlock
+ */
+ spin_unlock_irqrestore(&fan->lock, flags);
/* schedule next fan update, if not at target speed already */
if (list_empty(&fan->alarm.head) && target != duty) {
@@ -92,8 +101,6 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
}
-done:
- spin_unlock_irqrestore(&fan->lock, flags);
return ret;
}
@@ -185,11 +192,8 @@ nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
priv->fan->bios.max_duty = 100;
priv->fan->bios.bump_period = 500;
priv->fan->bios.slow_down_period = 2000;
-/*XXX: talk to mupuf */
-#if 0
priv->fan->bios.linear_min_temp = 40;
priv->fan->bios.linear_max_temp = 85;
-#endif
}
static void
@@ -235,7 +239,8 @@ nouveau_therm_fan_ctor(struct nouveau_therm *therm)
/* attempt to locate a drivable fan, and determine control method */
ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
if (ret == 0) {
- if (func.log[0] & DCB_GPIO_LOG_DIR_IN) {
+ /* FIXME: is this really the place to perform such checks ? */
+ if (func.line != 16 && func.log[0] & DCB_GPIO_LOG_DIR_IN) {
nv_debug(therm, "GPIO_FAN is in input mode\n");
ret = -EINVAL;
} else {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
index 5f71db8e8992..9a5c07340263 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
@@ -67,7 +67,7 @@ nouveau_fanpwm_set(struct nouveau_therm *therm, int percent)
if (priv->base.bios.pwm_freq) {
divs = 1;
if (therm->pwm_clock)
- divs = therm->pwm_clock(therm);
+ divs = therm->pwm_clock(therm, priv->func.line);
divs /= priv->base.bios.pwm_freq;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
index 8cf7597a2182..321db927d638 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -93,7 +93,7 @@ nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
}
int
-nv50_fan_pwm_clock(struct nouveau_therm *therm)
+nv50_fan_pwm_clock(struct nouveau_therm *therm, int line)
{
int chipset = nv_device(therm)->chipset;
int crystal = nv_device(therm)->crystal;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 4dd4f81ae873..43fec17ea540 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -32,10 +32,12 @@ static int
pwm_info(struct nouveau_therm *therm, int line)
{
u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
+
switch (gpio & 0x000000c0) {
case 0x00000000: /* normal mode, possibly pwm forced off by us */
case 0x00000040: /* nvio special */
switch (gpio & 0x0000001f) {
+ case 0x00: return 2;
case 0x19: return 1;
case 0x1c: return 0;
default:
@@ -56,8 +58,9 @@ nvd0_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
int indx = pwm_info(therm, line);
if (indx < 0)
return indx;
-
- nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
+ else if (indx < 2)
+ nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
+ /* nothing to do for indx == 2, it seems hardwired to PTHERM */
return 0;
}
@@ -67,10 +70,15 @@ nvd0_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
int indx = pwm_info(therm, line);
if (indx < 0)
return indx;
-
- if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
- *divs = nv_rd32(therm, 0x00e114 + (indx * 8));
- *duty = nv_rd32(therm, 0x00e118 + (indx * 8));
+ else if (indx < 2) {
+ if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
+ *divs = nv_rd32(therm, 0x00e114 + (indx * 8));
+ *duty = nv_rd32(therm, 0x00e118 + (indx * 8));
+ return 0;
+ }
+ } else if (indx == 2) {
+ *divs = nv_rd32(therm, 0x0200d8) & 0x1fff;
+ *duty = nv_rd32(therm, 0x0200dc) & 0x1fff;
return 0;
}
@@ -83,16 +91,26 @@ nvd0_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
int indx = pwm_info(therm, line);
if (indx < 0)
return indx;
-
- nv_wr32(therm, 0x00e114 + (indx * 8), divs);
- nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
+ else if (indx < 2) {
+ nv_wr32(therm, 0x00e114 + (indx * 8), divs);
+ nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
+ } else if (indx == 2) {
+ nv_mask(therm, 0x0200d8, 0x1fff, divs); /* keep the high bits */
+ nv_wr32(therm, 0x0200dc, duty | 0x40000000);
+ }
return 0;
}
static int
-nvd0_fan_pwm_clock(struct nouveau_therm *therm)
+nvd0_fan_pwm_clock(struct nouveau_therm *therm, int line)
{
- return (nv_device(therm)->crystal * 1000) / 20;
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return 0;
+ else if (indx < 2)
+ return (nv_device(therm)->crystal * 1000) / 20;
+ else
+ return nv_device(therm)->crystal * 1000 / 10;
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 96f8f95693ce..916fca5c7816 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -143,7 +143,7 @@ void nv40_therm_intr(struct nouveau_subdev *);
int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool);
int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
-int nv50_fan_pwm_clock(struct nouveau_therm *);
+int nv50_fan_pwm_clock(struct nouveau_therm *, int);
int nv84_temp_get(struct nouveau_therm *therm);
int nv84_therm_fini(struct nouveau_object *object, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/timer/gk20a.c
new file mode 100644
index 000000000000..37484db1f7fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/gk20a.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+static int
+gk20a_timer_init(struct nouveau_object *object)
+{
+ struct nv04_timer_priv *priv = (void *)object;
+ u32 hi = upper_32_bits(priv->suspend_time);
+ u32 lo = lower_32_bits(priv->suspend_time);
+ int ret;
+
+ ret = nouveau_timer_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_debug(priv, "time low : 0x%08x\n", lo);
+ nv_debug(priv, "time high : 0x%08x\n", hi);
+
+ /* restore the time before suspend */
+ nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
+ nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
+ return 0;
+}
+
+struct nouveau_oclass
+gk20a_timer_oclass = {
+ .handle = NV_SUBDEV(TIMER, 0xff),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_timer_ctor,
+ .dtor = nv04_timer_dtor,
+ .init = gk20a_timer_init,
+ .fini = nv04_timer_fini,
+ }
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index c0bdd10358d7..240ed0b983a9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -22,22 +22,7 @@
* Authors: Ben Skeggs
*/
-#include <subdev/timer.h>
-
-#define NV04_PTIMER_INTR_0 0x009100
-#define NV04_PTIMER_INTR_EN_0 0x009140
-#define NV04_PTIMER_NUMERATOR 0x009200
-#define NV04_PTIMER_DENOMINATOR 0x009210
-#define NV04_PTIMER_TIME_0 0x009400
-#define NV04_PTIMER_TIME_1 0x009410
-#define NV04_PTIMER_ALARM_0 0x009420
-
-struct nv04_timer_priv {
- struct nouveau_timer base;
- struct list_head alarms;
- spinlock_t lock;
- u64 suspend_time;
-};
+#include "nv04.h"
static u64
nv04_timer_read(struct nouveau_timer *ptimer)
@@ -142,35 +127,14 @@ nv04_timer_intr(struct nouveau_subdev *subdev)
}
}
-static int
-nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv04_timer_priv *priv;
- int ret;
-
- ret = nouveau_timer_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.base.intr = nv04_timer_intr;
- priv->base.read = nv04_timer_read;
- priv->base.alarm = nv04_timer_alarm;
- priv->base.alarm_cancel = nv04_timer_alarm_cancel;
- priv->suspend_time = 0;
-
- INIT_LIST_HEAD(&priv->alarms);
- spin_lock_init(&priv->lock);
- return 0;
-}
-
-static void
-nv04_timer_dtor(struct nouveau_object *object)
+int
+nv04_timer_fini(struct nouveau_object *object, bool suspend)
{
struct nv04_timer_priv *priv = (void *)object;
- return nouveau_timer_destroy(&priv->base);
+ if (suspend)
+ priv->suspend_time = nv04_timer_read(&priv->base);
+ nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
+ return nouveau_timer_fini(&priv->base, suspend);
}
static int
@@ -257,14 +221,35 @@ nv04_timer_init(struct nouveau_object *object)
return 0;
}
-static int
-nv04_timer_fini(struct nouveau_object *object, bool suspend)
+void
+nv04_timer_dtor(struct nouveau_object *object)
{
struct nv04_timer_priv *priv = (void *)object;
- if (suspend)
- priv->suspend_time = nv04_timer_read(&priv->base);
- nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
- return nouveau_timer_fini(&priv->base, suspend);
+ return nouveau_timer_destroy(&priv->base);
+}
+
+int
+nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_timer_priv *priv;
+ int ret;
+
+ ret = nouveau_timer_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.intr = nv04_timer_intr;
+ priv->base.read = nv04_timer_read;
+ priv->base.alarm = nv04_timer_alarm;
+ priv->base.alarm_cancel = nv04_timer_alarm_cancel;
+ priv->suspend_time = 0;
+
+ INIT_LIST_HEAD(&priv->alarms);
+ spin_lock_init(&priv->lock);
+ return 0;
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.h
new file mode 100644
index 000000000000..4bc152697c37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.h
@@ -0,0 +1,27 @@
+#ifndef __NVKM_TIMER_NV04_H__
+#define __NVKM_TIMER_NV04_H__
+
+#include "priv.h"
+
+#define NV04_PTIMER_INTR_0 0x009100
+#define NV04_PTIMER_INTR_EN_0 0x009140
+#define NV04_PTIMER_NUMERATOR 0x009200
+#define NV04_PTIMER_DENOMINATOR 0x009210
+#define NV04_PTIMER_TIME_0 0x009400
+#define NV04_PTIMER_TIME_1 0x009410
+#define NV04_PTIMER_ALARM_0 0x009420
+
+struct nv04_timer_priv {
+ struct nouveau_timer base;
+ struct list_head alarms;
+ spinlock_t lock;
+ u64 suspend_time;
+};
+
+int nv04_timer_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nv04_timer_dtor(struct nouveau_object *);
+int nv04_timer_fini(struct nouveau_object *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/core/subdev/timer/priv.h
new file mode 100644
index 000000000000..799dae3f2300
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/priv.h
@@ -0,0 +1,6 @@
+#ifndef __NVKM_TIMER_PRIV_H__
+#define __NVKM_TIMER_PRIV_H__
+
+#include <subdev/timer.h>
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0e3270c3ffd2..41be3424c906 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -239,7 +239,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
struct drm_device *dev = crtc->dev;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
- struct drm_framebuffer *fb = crtc->fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
/* Calculate our timings */
int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
@@ -574,7 +574,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
regp->CRTC[NV_CIO_CRE_86] = 0x1;
}
- regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->fb->depth + 1) / 8;
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->primary->fb->depth + 1) / 8;
/* Enable slaved mode (called MODE_TV in nv4ref.h) */
if (lvds_output || tmds_output || tv_output)
regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
@@ -588,7 +588,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL |
NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
- if (crtc->fb->depth == 16)
+ if (crtc->primary->fb->depth == 16)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
if (nv_device(drm->device)->chipset >= 0x11)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
@@ -609,7 +609,7 @@ static int
nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
{
struct nv04_display *disp = nv04_display(crtc->dev);
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
@@ -808,7 +808,7 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
* mark the lut values as dirty by setting depth==0, and it'll be
* uploaded on the first mode_set_base()
*/
- if (!nv_crtc->base.fb) {
+ if (!nv_crtc->base.primary->fb) {
nv_crtc->lut.depth = 0;
return;
}
@@ -832,7 +832,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
NV_DEBUG(drm, "index %d\n", nv_crtc->index);
/* no fb bound */
- if (!atomic && !crtc->fb) {
+ if (!atomic && !crtc->primary->fb) {
NV_DEBUG(drm, "No FB bound\n");
return 0;
}
@@ -844,8 +844,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
drm_fb = passed_fb;
fb = nouveau_framebuffer(passed_fb);
} else {
- drm_fb = crtc->fb;
- fb = nouveau_framebuffer(crtc->fb);
+ drm_fb = crtc->primary->fb;
+ fb = nouveau_framebuffer(crtc->primary->fb);
}
nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -857,9 +857,9 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
/* Update the framebuffer format. */
regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
- regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->fb->depth + 1) / 8;
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->primary->fb->depth + 1) / 8;
regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
- if (crtc->fb->depth == 16)
+ if (crtc->primary->fb->depth == 16)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
@@ -1048,7 +1048,7 @@ nouveau_crtc_set_config(struct drm_mode_set *set)
/* get a pm reference here */
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0)
+ if (ret < 0 && ret != -EACCES)
return ret;
ret = drm_crtc_helper_set_config(set);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 7fdc51e2a571..a2d669b4acf2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -415,7 +415,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
/* Output property. */
if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
(nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
- encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
+ encoder->crtc->primary->fb->depth > connector->display_info.bpc * 3)) {
if (nv_device(drm->device)->chipset == 0x11)
regp->dither = savep->dither | 0x00010000;
else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 900fae01793e..b13f441c6431 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -97,6 +97,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
case NV_C0:
case NV_D0:
case NV_E0:
+ case GM100:
return 0x906e;
}
@@ -139,7 +140,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
/* destroy channel object, all children will be killed too */
if (chan->chan) {
- abi16->handles &= ~(1 << (chan->chan->handle & 0xffff));
+ abi16->handles &= ~(1ULL << (chan->chan->handle & 0xffff));
nouveau_channel_del(&chan->chan);
}
@@ -179,12 +180,21 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = device->chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
- getparam->value = dev->pdev->vendor;
+ if (nv_device_is_pci(device))
+ getparam->value = dev->pdev->vendor;
+ else
+ getparam->value = 0;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
- getparam->value = dev->pdev->device;
+ if (nv_device_is_pci(device))
+ getparam->value = dev->pdev->device;
+ else
+ getparam->value = 0;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
+ if (!nv_device_is_pci(device))
+ getparam->value = 3;
+ else
if (drm_pci_device_is_agp(dev))
getparam->value = 0;
else
@@ -270,8 +280,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
return nouveau_abi16_put(abi16, -EINVAL);
/* allocate "abi16 channel" data and make up a handle for it */
- init->channel = ffsll(~abi16->handles);
- if (!init->channel--)
+ init->channel = __ffs64(~abi16->handles);
+ if (~abi16->handles == 0)
return nouveau_abi16_put(abi16, -ENOSPC);
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
@@ -280,7 +290,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
INIT_LIST_HEAD(&chan->notifiers);
list_add(&chan->head, &abi16->channels);
- abi16->handles |= (1 << init->channel);
+ abi16->handles |= (1ULL << init->channel);
/* create channel object and initialise dma and fence management */
ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index 2953c4e91e1a..51666daddb94 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -75,7 +75,7 @@ nouveau_agp_enabled(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
- if (!drm_pci_device_is_agp(dev) || !dev->agp)
+ if (!dev->pdev || !drm_pci_device_is_agp(dev) || !dev->agp)
return false;
if (drm->agp.stat == UNKNOWN) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 4c3feaaa1037..8268a4ccac15 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1474,9 +1474,12 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
case 0:
entry->dpconf.link_bw = 162000;
break;
- default:
+ case 1:
entry->dpconf.link_bw = 270000;
break;
+ default:
+ entry->dpconf.link_bw = 540000;
+ break;
}
switch ((conf & 0x0f000000) >> 24) {
case 0xf:
@@ -2069,6 +2072,10 @@ nouveau_bios_init(struct drm_device *dev)
struct nvbios *bios = &drm->vbios;
int ret;
+ /* only relevant for PCI devices */
+ if (!dev->pdev)
+ return 0;
+
if (!NVInitVBIOS(dev))
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4aed1714b9ab..b6dc85c614be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1255,7 +1255,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
/* fallthrough, tiled memory */
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = pci_resource_start(dev->pdev, 1);
+ mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1);
mem->bus.is_iomem = true;
if (nv_device(drm->device)->card_type >= NV_50) {
struct nouveau_bar *bar = nouveau_bar(drm->device);
@@ -1293,7 +1293,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_device *device = nv_device(drm->device);
- u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
+ u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT;
int ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
@@ -1331,6 +1331,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
+ struct nouveau_device *device;
struct drm_device *dev;
unsigned i;
int r;
@@ -1348,6 +1349,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
drm = nouveau_bdev(ttm->bdev);
+ device = nv_device(drm->device);
dev = drm->dev;
#if __OS_HAS_AGP
@@ -1368,13 +1370,12 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
for (i = 0; i < ttm->num_pages; i++) {
- ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
+ ttm_dma->dma_address[i] = nv_device_map_page(device,
+ ttm->pages[i]);
+ if (!ttm_dma->dma_address[i]) {
while (--i) {
- pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ nv_device_unmap_page(device,
+ ttm_dma->dma_address[i]);
ttm_dma->dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
@@ -1389,6 +1390,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
+ struct nouveau_device *device;
struct drm_device *dev;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1397,6 +1399,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
drm = nouveau_bdev(ttm->bdev);
+ device = nv_device(drm->device);
dev = drm->dev;
#if __OS_HAS_AGP
@@ -1415,8 +1418,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) {
if (ttm_dma->dma_address[i]) {
- pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ nv_device_unmap_page(device, ttm_dma->dma_address[i]);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index cc5152be2cf1..ccb6b452d6d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -154,7 +154,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
* nfi why this exists, it came from the -nv ddx.
*/
args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
- args.start = pci_resource_start(device->pdev, 1);
+ args.start = nv_device_resource_start(device, 1);
args.limit = args.start + limit;
} else {
args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1674882d60d5..d07ce028af51 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -255,7 +255,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
}
ret = pm_runtime_get_sync(connector->dev->dev);
- if (ret < 0)
+ if (ret < 0 && ret != -EACCES)
return conn_status;
i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
@@ -960,7 +960,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort;
case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
case DCB_CONNECTOR_HDMI_0 :
- case DCB_CONNECTOR_HDMI_1 : return DRM_MODE_CONNECTOR_HDMIA;
+ case DCB_CONNECTOR_HDMI_1 :
+ case DCB_CONNECTOR_HDMI_C : return DRM_MODE_CONNECTOR_HDMIA;
default:
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 24011596af43..3ff030dc1ee3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -105,7 +105,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
if (retry) ndelay(crtc->linedur_ns);
} while (retry--);
- *hpos = calc(args.hblanks, args.hblanke, args.htotal, args.hline);
+ *hpos = args.hline;
*vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
if (stime) *stime = ns_to_ktime(args.time[0]);
if (etime) *etime = ns_to_ktime(args.time[1]);
@@ -419,6 +419,7 @@ int
nouveau_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_device *device = nouveau_dev(dev);
struct nouveau_display *disp;
int ret, gen;
@@ -459,7 +460,7 @@ nouveau_display_create(struct drm_device *dev)
}
dev->mode_config.funcs = &nouveau_mode_config_funcs;
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
+ dev->mode_config.fb_base = nv_device_resource_start(device, 1);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
@@ -488,6 +489,7 @@ nouveau_display_create(struct drm_device *dev)
if (drm->vbios.dcb.entries) {
static const u16 oclass[] = {
+ GM107_DISP_CLASS,
NVF0_DISP_CLASS,
NVE0_DISP_CLASS,
NVD0_DISP_CLASS,
@@ -569,7 +571,7 @@ nouveau_display_suspend(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
- nouveau_fb = nouveau_framebuffer(crtc->fb);
+ nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
if (!nouveau_fb || !nouveau_fb->nvbo)
continue;
@@ -596,7 +598,7 @@ nouveau_display_repin(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
- nouveau_fb = nouveau_framebuffer(crtc->fb);
+ nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
if (!nouveau_fb || !nouveau_fb->nvbo)
continue;
@@ -693,7 +695,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
+ struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
struct nouveau_page_flip_state *s;
struct nouveau_channel *chan = drm->channel;
@@ -767,7 +769,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
goto fail_unreserve;
/* Update the crtc struct and cleanup */
- crtc->fb = fb;
+ crtc->primary->fb = fb;
nouveau_bo_fence(old_bo, fence);
ttm_bo_unreserve(&old_bo->bo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 4ee702ac8907..ddd83756b9a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -33,6 +33,7 @@
#include <core/client.h>
#include <core/gpuobj.h>
#include <core/class.h>
+#include <core/option.h>
#include <engine/device.h>
#include <engine/disp.h>
@@ -81,7 +82,7 @@ module_param_named(runpm, nouveau_runtime_pm, int, 0400);
static struct drm_driver driver;
static u64
-nouveau_name(struct pci_dev *pdev)
+nouveau_pci_name(struct pci_dev *pdev)
{
u64 name = (u64)pci_domain_nr(pdev->bus) << 32;
name |= pdev->bus->number << 16;
@@ -89,15 +90,30 @@ nouveau_name(struct pci_dev *pdev)
return name | PCI_FUNC(pdev->devfn);
}
+static u64
+nouveau_platform_name(struct platform_device *platformdev)
+{
+ return platformdev->id;
+}
+
+static u64
+nouveau_name(struct drm_device *dev)
+{
+ if (dev->pdev)
+ return nouveau_pci_name(dev->pdev);
+ else
+ return nouveau_platform_name(dev->platformdev);
+}
+
static int
-nouveau_cli_create(struct pci_dev *pdev, const char *name,
+nouveau_cli_create(u64 name, const char *sname,
int size, void **pcli)
{
struct nouveau_cli *cli;
int ret;
*pcli = NULL;
- ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
+ ret = nouveau_client_create_(sname, name, nouveau_config,
nouveau_debug, size, pcli);
cli = *pcli;
if (ret) {
@@ -281,7 +297,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
remove_conflicting_framebuffers(aper, "nouveaufb", boot);
kfree(aper);
- ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
+ ret = nouveau_device_create(pdev, NOUVEAU_BUS_PCI,
+ nouveau_pci_name(pdev), pci_name(pdev),
nouveau_config, nouveau_debug, &device);
if (ret)
return ret;
@@ -300,22 +317,27 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
static void
-nouveau_get_hdmi_dev(struct drm_device *dev)
+nouveau_get_hdmi_dev(struct nouveau_drm *drm)
{
- struct nouveau_drm *drm = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = drm->dev->pdev;
+
+ if (!pdev) {
+ DRM_INFO("not a PCI device; no HDMI\n");
+ drm->hdmi_device = NULL;
+ return;
+ }
/* subfunction one is a hdmi audio device? */
drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
PCI_DEVFN(PCI_SLOT(pdev->devfn), 1));
if (!drm->hdmi_device) {
- DRM_INFO("hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1);
+ NV_DEBUG(drm, "hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1);
return;
}
if ((drm->hdmi_device->class >> 8) != PCI_CLASS_MULTIMEDIA_HD_AUDIO) {
- DRM_INFO("possible hdmi device not audio %d\n", drm->hdmi_device->class);
+ NV_DEBUG(drm, "possible hdmi device not audio %d\n", drm->hdmi_device->class);
pci_dev_put(drm->hdmi_device);
drm->hdmi_device = NULL;
return;
@@ -330,22 +352,24 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
struct nouveau_drm *drm;
int ret;
- ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
+ ret = nouveau_cli_create(nouveau_name(dev), "DRM", sizeof(*drm),
+ (void **)&drm);
if (ret)
return ret;
dev->dev_private = drm;
drm->dev = dev;
+ nouveau_client(drm)->debug = nouveau_dbgopt(nouveau_debug, "DRM");
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
- nouveau_get_hdmi_dev(dev);
+ nouveau_get_hdmi_dev(drm);
/* make sure AGP controller is in a consistent state before we
* (possibly) execute vbios init tables (see nouveau_agp.h)
*/
- if (drm_pci_device_is_agp(dev) && dev->agp) {
+ if (pdev && drm_pci_device_is_agp(dev) && dev->agp) {
/* dummy device object, doesn't init anything, but allows
* agp code access to registers
*/
@@ -486,13 +510,13 @@ nouveau_drm_remove(struct pci_dev *pdev)
}
static int
-nouveau_do_suspend(struct drm_device *dev)
+nouveau_do_suspend(struct drm_device *dev, bool runtime)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
int ret;
- if (dev->mode_config.num_crtc) {
+ if (dev->mode_config.num_crtc && !runtime) {
NV_INFO(drm, "suspending display...\n");
ret = nouveau_display_suspend(dev);
if (ret)
@@ -566,7 +590,7 @@ int nouveau_pmops_suspend(struct device *dev)
if (drm_dev->mode_config.num_crtc)
nouveau_fbcon_set_suspend(drm_dev, 1);
- ret = nouveau_do_suspend(drm_dev);
+ ret = nouveau_do_suspend(drm_dev, false);
if (ret)
return ret;
@@ -646,7 +670,7 @@ static int nouveau_pmops_freeze(struct device *dev)
if (drm_dev->mode_config.num_crtc)
nouveau_fbcon_set_suspend(drm_dev, 1);
- ret = nouveau_do_suspend(drm_dev);
+ ret = nouveau_do_suspend(drm_dev, false);
return ret;
}
@@ -671,7 +695,6 @@ static int nouveau_pmops_thaw(struct device *dev)
static int
nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
{
- struct pci_dev *pdev = dev->pdev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
char name[32], tmpname[TASK_COMM_LEN];
@@ -679,13 +702,15 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
/* need to bring up power immediately if opening device */
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0)
+ if (ret < 0 && ret != -EACCES)
return ret;
get_task_comm(tmpname, current);
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
- ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
+ ret = nouveau_cli_create(nouveau_name(dev), name, sizeof(*cli),
+ (void **)&cli);
+
if (ret)
goto out_suspend;
@@ -762,7 +787,7 @@ long nouveau_drm_ioctl(struct file *filp,
dev = file_priv->minor->dev;
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0)
+ if (ret < 0 && ret != -EACCES)
return ret;
ret = drm_ioctl(filp, cmd, arg);
@@ -882,7 +907,7 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
drm_kms_helper_poll_disable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
nouveau_switcheroo_optimus_dsm();
- ret = nouveau_do_suspend(drm_dev);
+ ret = nouveau_do_suspend(drm_dev, true);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3cold);
@@ -908,8 +933,6 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev);
- if (drm_dev->mode_config.num_crtc)
- nouveau_display_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
nv_mask(device, 0x88488, (1 << 25), (1 << 25));
@@ -980,6 +1003,25 @@ nouveau_drm_pci_driver = {
.driver.pm = &nouveau_pm_ops,
};
+int nouveau_drm_platform_probe(struct platform_device *pdev)
+{
+ struct nouveau_device *device;
+ int ret;
+
+ ret = nouveau_device_create(pdev, NOUVEAU_BUS_PLATFORM,
+ nouveau_platform_name(pdev),
+ dev_name(&pdev->dev), nouveau_config,
+ nouveau_debug, &device);
+
+ ret = drm_platform_init(&driver, pdev);
+ if (ret) {
+ nouveau_object_ref(NULL, (struct nouveau_object **)&device);
+ return ret;
+ }
+
+ return ret;
+}
+
static int __init
nouveau_drm_init(void)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 23ca7a517246..7efbafaf7c1d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -161,10 +161,7 @@ int nouveau_pmops_resume(struct device *);
#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
#define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args)
-#define NV_DEBUG(cli, fmt, args...) do { \
- if (drm_debug & DRM_UT_DRIVER) \
- nv_info((cli), fmt, ##args); \
-} while (0)
+#define NV_DEBUG(cli, fmt, args...) nv_debug((cli), fmt, ##args)
extern int nouveau_modeset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 7903e0ed3c75..64a42cfd3717 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -528,10 +528,10 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->fbcon) {
console_lock();
- if (state == 0)
+ if (state == 1)
nouveau_fbcon_save_disable_accel(dev);
fb_set_suspend(drm->fbcon->helper.fbdev, state);
- if (state == 1)
+ if (state == 0)
nouveau_fbcon_restore_accel(dev);
console_unlock();
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 27c3fd89e8ce..c90c0dc0afe8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -228,8 +228,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo = NULL;
int ret = 0;
- drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
-
if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 4aff04fa483c..19fd767bab10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -383,8 +383,9 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
long value;
int ret;
- if (strict_strtol(buf, 10, &value) == -EINVAL)
- return -EINVAL;
+ ret = kstrtol(buf, 10, &value);
+ if (ret)
+ return ret;
ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value);
if (ret)
@@ -587,18 +588,14 @@ nouveau_hwmon_init(struct drm_device *dev)
/* set the default attributes */
ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup);
- if (ret) {
- if (ret)
- goto error;
- }
+ if (ret)
+ goto error;
/* if the card has a working thermal sensor */
if (therm->temp_get(therm) >= 0) {
ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
- if (ret) {
- if (ret)
- goto error;
- }
+ if (ret)
+ goto error;
}
/* if the card has a pwm fan */
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index 89201a17ce75..75dda2b07176 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -30,7 +30,7 @@
static inline struct drm_device *
drm_device(struct device *d)
{
- return pci_get_drvdata(to_pci_dev(d));
+ return dev_get_drvdata(d);
}
#define snappendf(p,r,f,a...) do { \
@@ -132,9 +132,10 @@ nouveau_sysfs_fini(struct drm_device *dev)
{
struct nouveau_sysfs *sysfs = nouveau_sysfs(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_device *device = nv_device(drm->device);
if (sysfs->ctrl) {
- device_remove_file(&dev->pdev->dev, &dev_attr_pstate);
+ device_remove_file(nv_device_base(device), &dev_attr_pstate);
nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL);
}
@@ -146,6 +147,7 @@ int
nouveau_sysfs_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_device *device = nv_device(drm->device);
struct nouveau_sysfs *sysfs;
int ret;
@@ -156,7 +158,7 @@ nouveau_sysfs_init(struct drm_device *dev)
ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL,
NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl);
if (ret == 0)
- device_create_file(&dev->pdev->dev, &dev_attr_pstate);
+ device_create_file(nv_device_base(device), &dev_attr_pstate);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index d45d50da978f..ab0228f640a5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -354,21 +354,26 @@ int
nouveau_ttm_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
+ struct nouveau_device *device = nv_device(drm->device);
u32 bits;
int ret;
bits = nouveau_vmmgr(drm->device)->dma_bits;
- if ( drm->agp.stat == ENABLED ||
- !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
- bits = 32;
-
- ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
- if (ret)
- return ret;
-
- ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
- if (ret)
- pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
+ if (nv_device_is_pci(device)) {
+ if (drm->agp.stat == ENABLED ||
+ !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
+ bits = 32;
+
+ ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
+ if (ret)
+ return ret;
+
+ ret = pci_set_consistent_dma_mask(dev->pdev,
+ DMA_BIT_MASK(bits));
+ if (ret)
+ pci_set_consistent_dma_mask(dev->pdev,
+ DMA_BIT_MASK(32));
+ }
ret = nouveau_ttm_global_init(drm);
if (ret)
@@ -376,7 +381,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
ret = ttm_bo_device_init(&drm->ttm.bdev,
drm->ttm.bo_global_ref.ref.object,
- &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
+ &nouveau_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
bits <= 32 ? true : false);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
@@ -394,8 +401,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
- drm->ttm.mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 1),
- pci_resource_len(dev->pdev, 1));
+ drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(device, 1),
+ nv_device_resource_len(device, 1));
/* GART init */
if (drm->agp.stat != ENABLED) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 471347edc27e..fb84da3cb50d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -84,6 +84,11 @@ nouveau_vga_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
bool runtime = false;
+
+ /* only relevant for PCI devices */
+ if (!dev->pdev)
+ return;
+
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
if (nouveau_runtime_pm == 1)
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2dccafc6e9db..58af547b0b93 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -651,7 +651,7 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
nv_connector = nouveau_crtc_connector_get(nv_crtc);
connector = &nv_connector->base;
if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+ if (nv_crtc->base.primary->fb->depth > connector->display_info.bpc * 3)
mode = DITHERING_MODE_DYNAMIC2X2;
} else {
mode = nv_connector->dithering_mode;
@@ -785,7 +785,8 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
if (update) {
nv50_display_flip_stop(crtc);
- nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+ nv50_display_flip_next(crtc, crtc->primary->fb,
+ NULL, 1);
}
}
@@ -1028,7 +1029,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
}
nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
- nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+ nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
}
static bool
@@ -1042,7 +1043,7 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
static int
nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
struct nv50_head *head = nv50_head(crtc);
int ret;
@@ -1139,7 +1140,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
nv50_crtc_set_dither(nv_crtc, false);
nv50_crtc_set_scale(nv_crtc, false);
nv50_crtc_set_color_vibrance(nv_crtc, false);
- nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
+ nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false);
return 0;
}
@@ -1151,7 +1152,7 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
- if (!crtc->fb) {
+ if (!crtc->primary->fb) {
NV_DEBUG(drm, "No FB bound\n");
return 0;
}
@@ -1161,8 +1162,8 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
nv50_display_flip_stop(crtc);
- nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
- nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+ nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, true);
+ nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 912759daf562..86f4ead0441d 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -37,7 +37,7 @@ struct omap_connector {
void copy_timings_omap_to_drm(struct drm_display_mode *mode,
struct omap_video_timings *timings)
{
- mode->clock = timings->pixel_clock;
+ mode->clock = timings->pixelclock / 1000;
mode->hdisplay = timings->x_res;
mode->hsync_start = mode->hdisplay + timings->hfp;
@@ -68,7 +68,7 @@ void copy_timings_omap_to_drm(struct drm_display_mode *mode,
void copy_timings_drm_to_omap(struct omap_video_timings *timings,
struct drm_display_mode *mode)
{
- timings->pixel_clock = mode->clock;
+ timings->pixelclock = mode->clock * 1000;
timings->x_res = mode->hdisplay;
timings->hfp = mode->hsync_start - mode->hdisplay;
@@ -220,7 +220,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
if (!r) {
/* check if vrefresh is still valid */
new_mode = drm_mode_duplicate(dev, mode);
- new_mode->clock = timings.pixel_clock;
+ new_mode->clock = timings.pixelclock / 1000;
new_mode->vrefresh = 0;
if (mode->vrefresh == drm_mode_vrefresh(new_mode))
ret = MODE_OK;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 4313bb0a49a6..355157e4f78d 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -245,7 +245,7 @@ static int omap_crtc_mode_set(struct drm_crtc *crtc,
copy_timings_drm_to_omap(&omap_crtc->timings, mode);
omap_crtc->full_update = true;
- return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
+ return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16,
@@ -273,7 +273,7 @@ static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_plane *plane = omap_crtc->plane;
struct drm_display_mode *mode = &crtc->mode;
- return omap_plane_mode_set(plane, crtc, crtc->fb,
+ return omap_plane_mode_set(plane, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16,
@@ -308,14 +308,14 @@ static void page_flip_worker(struct work_struct *work)
struct drm_gem_object *bo;
mutex_lock(&crtc->mutex);
- omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
+ omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
crtc->x << 16, crtc->y << 16,
mode->hdisplay << 16, mode->vdisplay << 16,
vblank_cb, crtc);
mutex_unlock(&crtc->mutex);
- bo = omap_framebuffer_bo(crtc->fb, 0);
+ bo = omap_framebuffer_bo(crtc->primary->fb, 0);
drm_gem_object_unreference_unlocked(bo);
}
@@ -336,9 +336,10 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_plane *primary = crtc->primary;
struct drm_gem_object *bo;
- DBG("%d -> %d (event=%p)", crtc->fb ? crtc->fb->base.id : -1,
+ DBG("%d -> %d (event=%p)", primary->fb ? primary->fb->base.id : -1,
fb->base.id, event);
if (omap_crtc->old_fb) {
@@ -347,7 +348,7 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
}
omap_crtc->event = event;
- crtc->fb = fb;
+ primary->fb = fb;
/*
* Hold a reference temporarily until the crtc is updated
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index f466c4aaee94..d2b8c49bfb4a 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -306,13 +306,14 @@ struct drm_connector *omap_framebuffer_get_next_connector(
struct drm_connector *connector = from;
if (!from)
- return list_first_entry(connector_list, typeof(*from), head);
+ return list_first_entry_or_null(connector_list, typeof(*from),
+ head);
list_for_each_entry_from(connector, connector_list, head) {
if (connector != from) {
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
- if (crtc && crtc->fb == fb)
+ if (crtc && crtc->primary->fb == fb)
return connector;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 5aec3e81fe24..c8d972763889 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -153,24 +153,24 @@ static struct {
static void evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct usergart_entry *entry)
{
- if (obj->dev->dev_mapping) {
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int n = usergart[fmt].height;
- size_t size = PAGE_SIZE * n;
- loff_t off = mmap_offset(obj) +
- (entry->obj_pgoff << PAGE_SHIFT);
- const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
- if (m > 1) {
- int i;
- /* if stride > than PAGE_SIZE then sparse mapping: */
- for (i = n; i > 0; i--) {
- unmap_mapping_range(obj->dev->dev_mapping,
- off, PAGE_SIZE, 1);
- off += PAGE_SIZE * m;
- }
- } else {
- unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int n = usergart[fmt].height;
+ size_t size = PAGE_SIZE * n;
+ loff_t off = mmap_offset(obj) +
+ (entry->obj_pgoff << PAGE_SHIFT);
+ const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+
+ if (m > 1) {
+ int i;
+ /* if stride > than PAGE_SIZE then sparse mapping: */
+ for (i = n; i > 0; i--) {
+ unmap_mapping_range(obj->dev->anon_inode->i_mapping,
+ off, PAGE_SIZE, 1);
+ off += PAGE_SIZE * m;
}
+ } else {
+ unmap_mapping_range(obj->dev->anon_inode->i_mapping,
+ off, size, 1);
}
entry->obj = NULL;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 3e0f13d1bc84..4ec874da5668 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -16,4 +16,18 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_LD9040
+ tristate "LD9040 RGB/SPI panel"
+ depends on DRM && DRM_PANEL
+ depends on OF
+ select SPI
+ select VIDEOMODE_HELPERS
+
+config DRM_PANEL_S6E8AA0
+ tristate "S6E8AA0 DSI video mode panel"
+ depends on DRM && DRM_PANEL
+ depends on OF
+ select DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+
endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index af9dfa235b94..8b929212fad7 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1 +1,3 @@
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o
+obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
new file mode 100644
index 000000000000..1f1f8371a199
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -0,0 +1,376 @@
+/*
+ * ld9040 AMOLED LCD drm_panel driver.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * Derived from drivers/video/backlight/ld9040.c
+ *
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+/* Manufacturer Command Set */
+#define MCS_MANPWR 0xb0
+#define MCS_ELVSS_ON 0xb1
+#define MCS_USER_SETTING 0xf0
+#define MCS_DISPCTL 0xf2
+#define MCS_GTCON 0xf7
+#define MCS_PANEL_CONDITION 0xf8
+#define MCS_GAMMA_SET1 0xf9
+#define MCS_GAMMA_CTRL 0xfb
+
+/* array of gamma tables for gamma value 2.2 */
+static u8 const ld9040_gammas[25][22] = {
+ { 0xf9, 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30, 0x00, 0xaf, 0xc0,
+ 0xb8, 0xcd, 0x00, 0x3d, 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44 },
+ { 0xf9, 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c, 0x00, 0xaf, 0xbf,
+ 0xb6, 0xcb, 0x00, 0x4b, 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52 },
+ { 0xf9, 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41, 0x00, 0xb0, 0xbe,
+ 0xb5, 0xc9, 0x00, 0x51, 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57 },
+ { 0xf9, 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46, 0x00, 0xb1, 0xbc,
+ 0xb5, 0xc8, 0x00, 0x56, 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d },
+ { 0xf9, 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b, 0x00, 0xb3, 0xbc,
+ 0xb4, 0xc7, 0x00, 0x5c, 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62 },
+ { 0xf9, 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f, 0x00, 0xb4, 0xbb,
+ 0xb3, 0xc7, 0x00, 0x60, 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67 },
+ { 0xf9, 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53, 0x00, 0xb5, 0xbb,
+ 0xb3, 0xc6, 0x00, 0x65, 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c },
+ { 0xf9, 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57, 0x00, 0xb5, 0xbb,
+ 0xb0, 0xc5, 0x00, 0x6a, 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70 },
+ { 0xf9, 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b, 0x00, 0xb5, 0xba,
+ 0xb1, 0xc4, 0x00, 0x6e, 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75 },
+ { 0xf9, 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f, 0x00, 0xb5, 0xba,
+ 0xb0, 0xc3, 0x00, 0x72, 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a },
+ { 0xf9, 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62, 0x00, 0xb6, 0xba,
+ 0xaf, 0xc3, 0x00, 0x76, 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e },
+ { 0xf9, 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65, 0x00, 0xb7, 0xb8,
+ 0xaf, 0xc3, 0x00, 0x7a, 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81 },
+ { 0xf9, 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69, 0x00, 0xb8, 0xb9,
+ 0xae, 0xc1, 0x00, 0x7f, 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85 },
+ { 0xf9, 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c, 0x00, 0xb8, 0xb8,
+ 0xae, 0xc1, 0x00, 0x82, 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89 },
+ { 0xf9, 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f, 0x00, 0xb8, 0xb8,
+ 0xad, 0xc0, 0x00, 0x86, 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d },
+ { 0xf9, 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72, 0x00, 0xb8, 0xb8,
+ 0xac, 0xbf, 0x00, 0x8a, 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91 },
+ { 0xf9, 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75, 0x00, 0xb9, 0xb8,
+ 0xab, 0xbe, 0x00, 0x8e, 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94 },
+ { 0xf9, 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77, 0x00, 0xb9, 0xb7,
+ 0xab, 0xbe, 0x00, 0x90, 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97 },
+ { 0xf9, 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a, 0x00, 0xb9, 0xb7,
+ 0xaa, 0xbd, 0x00, 0x94, 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a },
+ { 0xf9, 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d, 0x00, 0xb9, 0xb6,
+ 0xaa, 0xbb, 0x00, 0x97, 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d },
+ { 0xf9, 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80, 0x00, 0xb8, 0xb6,
+ 0xaa, 0xbc, 0x00, 0x9a, 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0 },
+ { 0xf9, 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84, 0x00, 0xb9, 0xb7,
+ 0xa8, 0xbc, 0x00, 0x9d, 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4 },
+ { 0xf9, 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86, 0x00, 0xb8, 0xb5,
+ 0xa8, 0xbc, 0x00, 0xa0, 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7 },
+ { 0xf9, 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89, 0x00, 0xb7, 0xb6,
+ 0xa8, 0xba, 0x00, 0xa4, 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa },
+ { 0xf9, 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91, 0x00, 0xb2, 0xb4,
+ 0xaa, 0xbb, 0x00, 0xac, 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3 },
+};
+
+struct ld9040 {
+ struct device *dev;
+ struct drm_panel panel;
+
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+ u32 power_on_delay;
+ u32 reset_delay;
+ struct videomode vm;
+ u32 width_mm;
+ u32 height_mm;
+
+ int brightness;
+
+ /* This field is tested by functions directly accessing bus before
+ * transfer, transfer is skipped if it is set. In case of transfer
+ * failure or unexpected response the field is set to error value.
+ * Such construct allows to eliminate many checks in higher level
+ * functions.
+ */
+ int error;
+};
+
+#define panel_to_ld9040(p) container_of(p, struct ld9040, panel)
+
+static int ld9040_clear_error(struct ld9040 *ctx)
+{
+ int ret = ctx->error;
+
+ ctx->error = 0;
+ return ret;
+}
+
+static int ld9040_spi_write_word(struct ld9040 *ctx, u16 data)
+{
+ struct spi_device *spi = to_spi_device(ctx->dev);
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = &data,
+ };
+ struct spi_message msg;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(spi, &msg);
+}
+
+static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
+{
+ int ret = 0;
+
+ if (ctx->error < 0 || len == 0)
+ return;
+
+ dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", len, data);
+ ret = ld9040_spi_write_word(ctx, *data);
+
+ while (!ret && --len) {
+ ++data;
+ ret = ld9040_spi_write_word(ctx, *data | 0x100);
+ }
+
+ if (ret) {
+ dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len,
+ data);
+ ctx->error = ret;
+ }
+
+ usleep_range(300, 310);
+}
+
+#define ld9040_dcs_write_seq_static(ctx, seq...) \
+({\
+ static const u8 d[] = { seq };\
+ ld9040_dcs_write(ctx, d, ARRAY_SIZE(d));\
+})
+
+static void ld9040_brightness_set(struct ld9040 *ctx)
+{
+ ld9040_dcs_write(ctx, ld9040_gammas[ctx->brightness],
+ ARRAY_SIZE(ld9040_gammas[ctx->brightness]));
+
+ ld9040_dcs_write_seq_static(ctx, MCS_GAMMA_CTRL, 0x02, 0x5a);
+}
+
+static void ld9040_init(struct ld9040 *ctx)
+{
+ ld9040_dcs_write_seq_static(ctx, MCS_USER_SETTING, 0x5a, 0x5a);
+ ld9040_dcs_write_seq_static(ctx, MCS_PANEL_CONDITION,
+ 0x05, 0x65, 0x96, 0x71, 0x7d, 0x19, 0x3b, 0x0d,
+ 0x19, 0x7e, 0x0d, 0xe2, 0x00, 0x00, 0x7e, 0x7d,
+ 0x07, 0x07, 0x20, 0x20, 0x20, 0x02, 0x02);
+ ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL,
+ 0x02, 0x08, 0x08, 0x10, 0x10);
+ ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04);
+ ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16);
+ ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00);
+ ld9040_brightness_set(ctx);
+ ld9040_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
+ ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
+}
+
+static int ld9040_power_on(struct ld9040 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ msleep(ctx->power_on_delay);
+ gpiod_set_value(ctx->reset_gpio, 0);
+ msleep(ctx->reset_delay);
+ gpiod_set_value(ctx->reset_gpio, 1);
+ msleep(ctx->reset_delay);
+
+ return 0;
+}
+
+static int ld9040_power_off(struct ld9040 *ctx)
+{
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int ld9040_disable(struct drm_panel *panel)
+{
+ struct ld9040 *ctx = panel_to_ld9040(panel);
+
+ msleep(120);
+ ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
+ ld9040_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
+ msleep(40);
+
+ ld9040_clear_error(ctx);
+
+ return ld9040_power_off(ctx);
+}
+
+static int ld9040_enable(struct drm_panel *panel)
+{
+ struct ld9040 *ctx = panel_to_ld9040(panel);
+ int ret;
+
+ ret = ld9040_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ ld9040_init(ctx);
+
+ ret = ld9040_clear_error(ctx);
+
+ if (ret < 0)
+ ld9040_disable(panel);
+
+ return ret;
+}
+
+static int ld9040_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct ld9040 *ctx = panel_to_ld9040(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_ERROR("failed to create a new display mode\n");
+ return 0;
+ }
+
+ drm_display_mode_from_videomode(&ctx->vm, mode);
+ mode->width_mm = ctx->width_mm;
+ mode->height_mm = ctx->height_mm;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ld9040_drm_funcs = {
+ .disable = ld9040_disable,
+ .enable = ld9040_enable,
+ .get_modes = ld9040_get_modes,
+};
+
+static int ld9040_parse_dt(struct ld9040 *ctx)
+{
+ struct device *dev = ctx->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_get_videomode(np, &ctx->vm, 0);
+ if (ret < 0)
+ return ret;
+
+ of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
+ of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
+ of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
+ of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
+
+ return 0;
+}
+
+static int ld9040_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ld9040 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(struct ld9040), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ctx);
+
+ ctx->dev = dev;
+ ctx->brightness = ARRAY_SIZE(ld9040_gammas) - 1;
+
+ ret = ld9040_parse_dt(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->supplies[0].supply = "vdd3";
+ ctx->supplies[1].supply = "vci";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset");
+ if (IS_ERR(ctx->reset_gpio)) {
+ dev_err(dev, "cannot get reset-gpios %ld\n",
+ PTR_ERR(ctx->reset_gpio));
+ return PTR_ERR(ctx->reset_gpio);
+ }
+ ret = gpiod_direction_output(ctx->reset_gpio, 1);
+ if (ret < 0) {
+ dev_err(dev, "cannot configure reset-gpios %d\n", ret);
+ return ret;
+ }
+
+ spi->bits_per_word = 9;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(dev, "spi setup failed.\n");
+ return ret;
+ }
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &ld9040_drm_funcs;
+
+ return drm_panel_add(&ctx->panel);
+}
+
+static int ld9040_remove(struct spi_device *spi)
+{
+ struct ld9040 *ctx = spi_get_drvdata(spi);
+
+ ld9040_power_off(ctx);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static struct of_device_id ld9040_of_match[] = {
+ { .compatible = "samsung,ld9040" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ld9040_of_match);
+
+static struct spi_driver ld9040_driver = {
+ .probe = ld9040_probe,
+ .remove = ld9040_remove,
+ .driver = {
+ .name = "ld9040",
+ .owner = THIS_MODULE,
+ .of_match_table = ld9040_of_match,
+ },
+};
+module_spi_driver(ld9040_driver);
+
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("ld9040 LCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
new file mode 100644
index 000000000000..35941d2412b8
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -0,0 +1,1069 @@
+/*
+ * MIPI-DSI based s6e8aa0 AMOLED LCD 5.3 inch panel driver.
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd
+ *
+ * Inki Dae, <inki.dae@samsung.com>
+ * Donghwa Lee, <dh09.lee@samsung.com>
+ * Joongmock Shin <jmock.shin@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Tomasz Figa <t.figa@samsung.com>
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#define LDI_MTP_LENGTH 24
+#define GAMMA_LEVEL_NUM 25
+#define GAMMA_TABLE_LEN 26
+
+#define PANELCTL_SS_MASK (1 << 5)
+#define PANELCTL_SS_1_800 (0 << 5)
+#define PANELCTL_SS_800_1 (1 << 5)
+#define PANELCTL_GTCON_MASK (7 << 2)
+#define PANELCTL_GTCON_110 (6 << 2)
+#define PANELCTL_GTCON_111 (7 << 2)
+
+#define PANELCTL_CLK1_CON_MASK (7 << 3)
+#define PANELCTL_CLK1_000 (0 << 3)
+#define PANELCTL_CLK1_001 (1 << 3)
+#define PANELCTL_CLK2_CON_MASK (7 << 0)
+#define PANELCTL_CLK2_000 (0 << 0)
+#define PANELCTL_CLK2_001 (1 << 0)
+
+#define PANELCTL_INT1_CON_MASK (7 << 3)
+#define PANELCTL_INT1_000 (0 << 3)
+#define PANELCTL_INT1_001 (1 << 3)
+#define PANELCTL_INT2_CON_MASK (7 << 0)
+#define PANELCTL_INT2_000 (0 << 0)
+#define PANELCTL_INT2_001 (1 << 0)
+
+#define PANELCTL_BICTL_CON_MASK (7 << 3)
+#define PANELCTL_BICTL_000 (0 << 3)
+#define PANELCTL_BICTL_001 (1 << 3)
+#define PANELCTL_BICTLB_CON_MASK (7 << 0)
+#define PANELCTL_BICTLB_000 (0 << 0)
+#define PANELCTL_BICTLB_001 (1 << 0)
+
+#define PANELCTL_EM_CLK1_CON_MASK (7 << 3)
+#define PANELCTL_EM_CLK1_110 (6 << 3)
+#define PANELCTL_EM_CLK1_111 (7 << 3)
+#define PANELCTL_EM_CLK1B_CON_MASK (7 << 0)
+#define PANELCTL_EM_CLK1B_110 (6 << 0)
+#define PANELCTL_EM_CLK1B_111 (7 << 0)
+
+#define PANELCTL_EM_CLK2_CON_MASK (7 << 3)
+#define PANELCTL_EM_CLK2_110 (6 << 3)
+#define PANELCTL_EM_CLK2_111 (7 << 3)
+#define PANELCTL_EM_CLK2B_CON_MASK (7 << 0)
+#define PANELCTL_EM_CLK2B_110 (6 << 0)
+#define PANELCTL_EM_CLK2B_111 (7 << 0)
+
+#define PANELCTL_EM_INT1_CON_MASK (7 << 3)
+#define PANELCTL_EM_INT1_000 (0 << 3)
+#define PANELCTL_EM_INT1_001 (1 << 3)
+#define PANELCTL_EM_INT2_CON_MASK (7 << 0)
+#define PANELCTL_EM_INT2_000 (0 << 0)
+#define PANELCTL_EM_INT2_001 (1 << 0)
+
+#define AID_DISABLE (0x4)
+#define AID_1 (0x5)
+#define AID_2 (0x6)
+#define AID_3 (0x7)
+
+typedef u8 s6e8aa0_gamma_table[GAMMA_TABLE_LEN];
+
+struct s6e8aa0_variant {
+ u8 version;
+ const s6e8aa0_gamma_table *gamma_tables;
+};
+
+struct s6e8aa0 {
+ struct device *dev;
+ struct drm_panel panel;
+
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+ u32 power_on_delay;
+ u32 reset_delay;
+ u32 init_delay;
+ bool flip_horizontal;
+ bool flip_vertical;
+ struct videomode vm;
+ u32 width_mm;
+ u32 height_mm;
+
+ u8 version;
+ u8 id;
+ const struct s6e8aa0_variant *variant;
+ int brightness;
+
+ /* This field is tested by functions directly accessing DSI bus before
+ * transfer, transfer is skipped if it is set. In case of transfer
+ * failure or unexpected response the field is set to error value.
+ * Such construct allows to eliminate many checks in higher level
+ * functions.
+ */
+ int error;
+};
+
+#define panel_to_s6e8aa0(p) container_of(p, struct s6e8aa0, panel)
+
+static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
+{
+ int ret = ctx->error;
+
+ ctx->error = 0;
+ return ret;
+}
+
+static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->error < 0)
+ return;
+
+ ret = mipi_dsi_dcs_write(dsi, dsi->channel, data, len);
+ if (ret < 0) {
+ dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len,
+ data);
+ ctx->error = ret;
+ }
+}
+
+static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->error < 0)
+ return ctx->error;
+
+ ret = mipi_dsi_dcs_read(dsi, dsi->channel, cmd, data, len);
+ if (ret < 0) {
+ dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd);
+ ctx->error = ret;
+ }
+
+ return ret;
+}
+
+#define s6e8aa0_dcs_write_seq(ctx, seq...) \
+({\
+ const u8 d[] = { seq };\
+ BUILD_BUG_ON_MSG(ARRAY_SIZE(d) > 64, "DCS sequence too big for stack");\
+ s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
+})
+
+#define s6e8aa0_dcs_write_seq_static(ctx, seq...) \
+({\
+ static const u8 d[] = { seq };\
+ s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
+})
+
+static void s6e8aa0_apply_level_1_key(struct s6e8aa0 *ctx)
+{
+ s6e8aa0_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a);
+}
+
+static void s6e8aa0_panel_cond_set_v142(struct s6e8aa0 *ctx)
+{
+ static const u8 aids[] = {
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x60, 0x80, 0xA0
+ };
+ u8 aid = aids[ctx->id >> 5];
+ u8 cfg = 0x3d;
+ u8 clk_con = 0xc8;
+ u8 int_con = 0x08;
+ u8 bictl_con = 0x48;
+ u8 em_clk1_con = 0xff;
+ u8 em_clk2_con = 0xff;
+ u8 em_int_con = 0xc8;
+
+ if (ctx->flip_vertical) {
+ /* GTCON */
+ cfg &= ~(PANELCTL_GTCON_MASK);
+ cfg |= (PANELCTL_GTCON_110);
+ }
+
+ if (ctx->flip_horizontal) {
+ /* SS */
+ cfg &= ~(PANELCTL_SS_MASK);
+ cfg |= (PANELCTL_SS_1_800);
+ }
+
+ if (ctx->flip_horizontal || ctx->flip_vertical) {
+ /* CLK1,2_CON */
+ clk_con &= ~(PANELCTL_CLK1_CON_MASK |
+ PANELCTL_CLK2_CON_MASK);
+ clk_con |= (PANELCTL_CLK1_000 | PANELCTL_CLK2_001);
+
+ /* INT1,2_CON */
+ int_con &= ~(PANELCTL_INT1_CON_MASK |
+ PANELCTL_INT2_CON_MASK);
+ int_con |= (PANELCTL_INT1_000 | PANELCTL_INT2_001);
+
+ /* BICTL,B_CON */
+ bictl_con &= ~(PANELCTL_BICTL_CON_MASK |
+ PANELCTL_BICTLB_CON_MASK);
+ bictl_con |= (PANELCTL_BICTL_000 |
+ PANELCTL_BICTLB_001);
+
+ /* EM_CLK1,1B_CON */
+ em_clk1_con &= ~(PANELCTL_EM_CLK1_CON_MASK |
+ PANELCTL_EM_CLK1B_CON_MASK);
+ em_clk1_con |= (PANELCTL_EM_CLK1_110 |
+ PANELCTL_EM_CLK1B_110);
+
+ /* EM_CLK2,2B_CON */
+ em_clk2_con &= ~(PANELCTL_EM_CLK2_CON_MASK |
+ PANELCTL_EM_CLK2B_CON_MASK);
+ em_clk2_con |= (PANELCTL_EM_CLK2_110 |
+ PANELCTL_EM_CLK2B_110);
+
+ /* EM_INT1,2_CON */
+ em_int_con &= ~(PANELCTL_EM_INT1_CON_MASK |
+ PANELCTL_EM_INT2_CON_MASK);
+ em_int_con |= (PANELCTL_EM_INT1_000 |
+ PANELCTL_EM_INT2_001);
+ }
+
+ s6e8aa0_dcs_write_seq(ctx,
+ 0xf8, cfg, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00,
+ 0x3c, 0x78, 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00,
+ 0x00, 0x20, aid, 0x08, 0x6e, 0x00, 0x00, 0x00,
+ 0x02, 0x07, 0x07, 0x23, 0x23, 0xc0, clk_con, int_con,
+ bictl_con, 0xc1, 0x00, 0xc1, em_clk1_con, em_clk2_con,
+ em_int_con);
+}
+
+static void s6e8aa0_panel_cond_set(struct s6e8aa0 *ctx)
+{
+ if (ctx->version < 142)
+ s6e8aa0_dcs_write_seq_static(ctx,
+ 0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x94, 0x00,
+ 0x3c, 0x78, 0x10, 0x27, 0x08, 0x6e, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x08, 0x6e, 0x00, 0x00, 0x00,
+ 0x00, 0x07, 0x07, 0x23, 0x6e, 0xc0, 0xc1, 0x01,
+ 0x81, 0xc1, 0x00, 0xc3, 0xf6, 0xf6, 0xc1
+ );
+ else
+ s6e8aa0_panel_cond_set_v142(ctx);
+}
+
+static void s6e8aa0_display_condition_set(struct s6e8aa0 *ctx)
+{
+ s6e8aa0_dcs_write_seq_static(ctx, 0xf2, 0x80, 0x03, 0x0d);
+}
+
+static void s6e8aa0_etc_source_control(struct s6e8aa0 *ctx)
+{
+ s6e8aa0_dcs_write_seq_static(ctx, 0xf6, 0x00, 0x02, 0x00);
+}
+
+static void s6e8aa0_etc_pentile_control(struct s6e8aa0 *ctx)
+{
+ static const u8 pent32[] = {
+ 0xb6, 0x0c, 0x02, 0x03, 0x32, 0xc0, 0x44, 0x44, 0xc0, 0x00
+ };
+
+ static const u8 pent142[] = {
+ 0xb6, 0x0c, 0x02, 0x03, 0x32, 0xff, 0x44, 0x44, 0xc0, 0x00
+ };
+
+ if (ctx->version < 142)
+ s6e8aa0_dcs_write(ctx, pent32, ARRAY_SIZE(pent32));
+ else
+ s6e8aa0_dcs_write(ctx, pent142, ARRAY_SIZE(pent142));
+}
+
+static void s6e8aa0_etc_power_control(struct s6e8aa0 *ctx)
+{
+ static const u8 pwr142[] = {
+ 0xf4, 0xcf, 0x0a, 0x12, 0x10, 0x1e, 0x33, 0x02
+ };
+
+ static const u8 pwr32[] = {
+ 0xf4, 0xcf, 0x0a, 0x15, 0x10, 0x19, 0x33, 0x02
+ };
+
+ if (ctx->version < 142)
+ s6e8aa0_dcs_write(ctx, pwr32, ARRAY_SIZE(pwr32));
+ else
+ s6e8aa0_dcs_write(ctx, pwr142, ARRAY_SIZE(pwr142));
+}
+
+static void s6e8aa0_etc_elvss_control(struct s6e8aa0 *ctx)
+{
+ u8 id = ctx->id ? 0 : 0x95;
+
+ s6e8aa0_dcs_write_seq(ctx, 0xb1, 0x04, id);
+}
+
+static void s6e8aa0_elvss_nvm_set_v142(struct s6e8aa0 *ctx)
+{
+ u8 br;
+
+ switch (ctx->brightness) {
+ case 0 ... 6: /* 30cd ~ 100cd */
+ br = 0xdf;
+ break;
+ case 7 ... 11: /* 120cd ~ 150cd */
+ br = 0xdd;
+ break;
+ case 12 ... 15: /* 180cd ~ 210cd */
+ default:
+ br = 0xd9;
+ break;
+ case 16 ... 24: /* 240cd ~ 300cd */
+ br = 0xd0;
+ break;
+ }
+
+ s6e8aa0_dcs_write_seq(ctx, 0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e,
+ 0xc4, 0x0f, 0x40, 0x41, br, 0x00, 0x60, 0x19);
+}
+
+static void s6e8aa0_elvss_nvm_set(struct s6e8aa0 *ctx)
+{
+ if (ctx->version < 142)
+ s6e8aa0_dcs_write_seq_static(ctx,
+ 0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e, 0xc4, 0x07,
+ 0x40, 0x41, 0xc1, 0x00, 0x60, 0x19);
+ else
+ s6e8aa0_elvss_nvm_set_v142(ctx);
+};
+
+static void s6e8aa0_apply_level_2_key(struct s6e8aa0 *ctx)
+{
+ s6e8aa0_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a);
+}
+
+static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v142[GAMMA_LEVEL_NUM] = {
+ {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x62, 0x55, 0x55,
+ 0xaf, 0xb1, 0xb1, 0xbd, 0xce, 0xb7, 0x9a, 0xb1,
+ 0x90, 0xb2, 0xc4, 0xae, 0x00, 0x60, 0x00, 0x40,
+ 0x00, 0x70,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x74, 0x68, 0x69,
+ 0xb8, 0xc1, 0xb7, 0xbd, 0xcd, 0xb8, 0x93, 0xab,
+ 0x88, 0xb4, 0xc4, 0xb1, 0x00, 0x6b, 0x00, 0x4d,
+ 0x00, 0x7d,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x95, 0x8a, 0x89,
+ 0xb4, 0xc6, 0xb2, 0xc5, 0xd2, 0xbf, 0x90, 0xa8,
+ 0x85, 0xb5, 0xc4, 0xb3, 0x00, 0x7b, 0x00, 0x5d,
+ 0x00, 0x8f,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9f, 0x98, 0x92,
+ 0xb3, 0xc4, 0xb0, 0xbc, 0xcc, 0xb4, 0x91, 0xa6,
+ 0x87, 0xb5, 0xc5, 0xb4, 0x00, 0x87, 0x00, 0x6a,
+ 0x00, 0x9e,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x99, 0x93, 0x8b,
+ 0xb2, 0xc2, 0xb0, 0xbd, 0xce, 0xb4, 0x90, 0xa6,
+ 0x87, 0xb3, 0xc3, 0xb2, 0x00, 0x8d, 0x00, 0x70,
+ 0x00, 0xa4,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xa5, 0x99,
+ 0xb2, 0xc2, 0xb0, 0xbb, 0xcd, 0xb1, 0x93, 0xa7,
+ 0x8a, 0xb2, 0xc1, 0xb0, 0x00, 0x92, 0x00, 0x75,
+ 0x00, 0xaa,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xa0, 0x93,
+ 0xb6, 0xc4, 0xb4, 0xb5, 0xc8, 0xaa, 0x94, 0xa9,
+ 0x8c, 0xb2, 0xc0, 0xb0, 0x00, 0x97, 0x00, 0x7a,
+ 0x00, 0xaf,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xa7, 0x96,
+ 0xb3, 0xc2, 0xb0, 0xba, 0xcb, 0xb0, 0x94, 0xa8,
+ 0x8c, 0xb0, 0xbf, 0xaf, 0x00, 0x9f, 0x00, 0x83,
+ 0x00, 0xb9,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9d, 0xa2, 0x90,
+ 0xb6, 0xc5, 0xb3, 0xb8, 0xc9, 0xae, 0x94, 0xa8,
+ 0x8d, 0xaf, 0xbd, 0xad, 0x00, 0xa4, 0x00, 0x88,
+ 0x00, 0xbf,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xac, 0x97,
+ 0xb4, 0xc4, 0xb1, 0xbb, 0xcb, 0xb2, 0x93, 0xa7,
+ 0x8d, 0xae, 0xbc, 0xad, 0x00, 0xa7, 0x00, 0x8c,
+ 0x00, 0xc3,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa2, 0xa9, 0x93,
+ 0xb6, 0xc5, 0xb2, 0xba, 0xc9, 0xb0, 0x93, 0xa7,
+ 0x8d, 0xae, 0xbb, 0xac, 0x00, 0xab, 0x00, 0x90,
+ 0x00, 0xc8,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9e, 0xa6, 0x8f,
+ 0xb7, 0xc6, 0xb3, 0xb8, 0xc8, 0xb0, 0x93, 0xa6,
+ 0x8c, 0xae, 0xbb, 0xad, 0x00, 0xae, 0x00, 0x93,
+ 0x00, 0xcc,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb4, 0x9c,
+ 0xb3, 0xc3, 0xaf, 0xb7, 0xc7, 0xaf, 0x93, 0xa6,
+ 0x8c, 0xaf, 0xbc, 0xad, 0x00, 0xb1, 0x00, 0x97,
+ 0x00, 0xcf,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xb1, 0x98,
+ 0xb1, 0xc2, 0xab, 0xba, 0xc9, 0xb2, 0x93, 0xa6,
+ 0x8d, 0xae, 0xba, 0xab, 0x00, 0xb5, 0x00, 0x9b,
+ 0x00, 0xd4,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xae, 0x94,
+ 0xb2, 0xc3, 0xac, 0xbb, 0xca, 0xb4, 0x91, 0xa4,
+ 0x8a, 0xae, 0xba, 0xac, 0x00, 0xb8, 0x00, 0x9e,
+ 0x00, 0xd8,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb7, 0x9c,
+ 0xae, 0xc0, 0xa9, 0xba, 0xc9, 0xb3, 0x92, 0xa5,
+ 0x8b, 0xad, 0xb9, 0xab, 0x00, 0xbb, 0x00, 0xa1,
+ 0x00, 0xdc,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb4, 0x97,
+ 0xb0, 0xc1, 0xaa, 0xb9, 0xc8, 0xb2, 0x92, 0xa5,
+ 0x8c, 0xae, 0xb9, 0xab, 0x00, 0xbe, 0x00, 0xa4,
+ 0x00, 0xdf,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
+ 0xb0, 0xc2, 0xab, 0xbb, 0xc9, 0xb3, 0x91, 0xa4,
+ 0x8b, 0xad, 0xb8, 0xaa, 0x00, 0xc1, 0x00, 0xa8,
+ 0x00, 0xe2,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
+ 0xae, 0xbf, 0xa8, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
+ 0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xc4, 0x00, 0xab,
+ 0x00, 0xe6,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb6, 0x98,
+ 0xaf, 0xc0, 0xa8, 0xb8, 0xc7, 0xb2, 0x93, 0xa5,
+ 0x8d, 0xad, 0xb7, 0xa9, 0x00, 0xc7, 0x00, 0xae,
+ 0x00, 0xe9,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
+ 0xaf, 0xc1, 0xa9, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
+ 0x8b, 0xad, 0xb7, 0xaa, 0x00, 0xc9, 0x00, 0xb0,
+ 0x00, 0xec,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
+ 0xac, 0xbe, 0xa6, 0xbb, 0xc9, 0xb4, 0x90, 0xa3,
+ 0x8a, 0xad, 0xb7, 0xa9, 0x00, 0xcc, 0x00, 0xb4,
+ 0x00, 0xf0,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xb0, 0x91,
+ 0xae, 0xc0, 0xa6, 0xba, 0xc8, 0xb4, 0x91, 0xa4,
+ 0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xcf, 0x00, 0xb7,
+ 0x00, 0xf3,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb8, 0x98,
+ 0xab, 0xbd, 0xa4, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
+ 0x8b, 0xac, 0xb6, 0xa8, 0x00, 0xd1, 0x00, 0xb9,
+ 0x00, 0xf6,
+ }, {
+ 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb5, 0x95,
+ 0xa9, 0xbc, 0xa1, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
+ 0x8a, 0xad, 0xb6, 0xa8, 0x00, 0xd6, 0x00, 0xbf,
+ 0x00, 0xfc,
+ },
+};
+
+static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v96[GAMMA_LEVEL_NUM] = {
+ {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+ 0xdf, 0x1f, 0xd7, 0xdc, 0xb7, 0xe1, 0xc0, 0xaf,
+ 0xc4, 0xd2, 0xd0, 0xcf, 0x00, 0x4d, 0x00, 0x40,
+ 0x00, 0x5f,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+ 0xd5, 0x35, 0xcf, 0xdc, 0xc1, 0xe1, 0xbf, 0xb3,
+ 0xc1, 0xd2, 0xd1, 0xce, 0x00, 0x53, 0x00, 0x46,
+ 0x00, 0x67,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+ 0xd2, 0x64, 0xcf, 0xdb, 0xc6, 0xe1, 0xbd, 0xb3,
+ 0xbd, 0xd2, 0xd2, 0xce, 0x00, 0x59, 0x00, 0x4b,
+ 0x00, 0x6e,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+ 0xd0, 0x7c, 0xcf, 0xdb, 0xc9, 0xe0, 0xbc, 0xb4,
+ 0xbb, 0xcf, 0xd1, 0xcc, 0x00, 0x5f, 0x00, 0x50,
+ 0x00, 0x75,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+ 0xd0, 0x8e, 0xd1, 0xdb, 0xcc, 0xdf, 0xbb, 0xb6,
+ 0xb9, 0xd0, 0xd1, 0xcd, 0x00, 0x63, 0x00, 0x54,
+ 0x00, 0x7a,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+ 0xd1, 0x9e, 0xd5, 0xda, 0xcd, 0xdd, 0xbb, 0xb7,
+ 0xb9, 0xce, 0xce, 0xc9, 0x00, 0x68, 0x00, 0x59,
+ 0x00, 0x81,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+ 0xd0, 0xa5, 0xd6, 0xda, 0xcf, 0xdd, 0xbb, 0xb7,
+ 0xb8, 0xcc, 0xcd, 0xc7, 0x00, 0x6c, 0x00, 0x5c,
+ 0x00, 0x86,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xfe,
+ 0xd0, 0xae, 0xd7, 0xd9, 0xd0, 0xdb, 0xb9, 0xb6,
+ 0xb5, 0xca, 0xcc, 0xc5, 0x00, 0x74, 0x00, 0x63,
+ 0x00, 0x90,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf9,
+ 0xcf, 0xb0, 0xd6, 0xd9, 0xd1, 0xdb, 0xb9, 0xb6,
+ 0xb4, 0xca, 0xcb, 0xc5, 0x00, 0x77, 0x00, 0x66,
+ 0x00, 0x94,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf7,
+ 0xcf, 0xb3, 0xd7, 0xd8, 0xd1, 0xd9, 0xb7, 0xb6,
+ 0xb3, 0xc9, 0xca, 0xc3, 0x00, 0x7b, 0x00, 0x69,
+ 0x00, 0x99,
+
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfd, 0x2f, 0xf7,
+ 0xdf, 0xb5, 0xd6, 0xd8, 0xd1, 0xd8, 0xb6, 0xb5,
+ 0xb2, 0xca, 0xcb, 0xc4, 0x00, 0x7e, 0x00, 0x6c,
+ 0x00, 0x9d,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfa, 0x2f, 0xf5,
+ 0xce, 0xb6, 0xd5, 0xd7, 0xd2, 0xd8, 0xb6, 0xb4,
+ 0xb0, 0xc7, 0xc9, 0xc1, 0x00, 0x84, 0x00, 0x71,
+ 0x00, 0xa5,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf7, 0x2f, 0xf2,
+ 0xce, 0xb9, 0xd5, 0xd8, 0xd2, 0xd8, 0xb4, 0xb4,
+ 0xaf, 0xc7, 0xc9, 0xc1, 0x00, 0x87, 0x00, 0x73,
+ 0x00, 0xa8,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf5, 0x2f, 0xf0,
+ 0xdf, 0xba, 0xd5, 0xd7, 0xd2, 0xd7, 0xb4, 0xb4,
+ 0xaf, 0xc5, 0xc7, 0xbf, 0x00, 0x8a, 0x00, 0x76,
+ 0x00, 0xac,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf2, 0x2f, 0xed,
+ 0xcE, 0xbb, 0xd4, 0xd6, 0xd2, 0xd6, 0xb5, 0xb4,
+ 0xaF, 0xc5, 0xc7, 0xbf, 0x00, 0x8c, 0x00, 0x78,
+ 0x00, 0xaf,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x2f, 0xeb,
+ 0xcd, 0xbb, 0xd2, 0xd7, 0xd3, 0xd6, 0xb3, 0xb4,
+ 0xae, 0xc5, 0xc6, 0xbe, 0x00, 0x91, 0x00, 0x7d,
+ 0x00, 0xb6,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xee, 0x2f, 0xea,
+ 0xce, 0xbd, 0xd4, 0xd6, 0xd2, 0xd5, 0xb2, 0xb3,
+ 0xad, 0xc3, 0xc4, 0xbb, 0x00, 0x94, 0x00, 0x7f,
+ 0x00, 0xba,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xec, 0x2f, 0xe8,
+ 0xce, 0xbe, 0xd3, 0xd6, 0xd3, 0xd5, 0xb2, 0xb2,
+ 0xac, 0xc3, 0xc5, 0xbc, 0x00, 0x96, 0x00, 0x81,
+ 0x00, 0xbd,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xeb, 0x2f, 0xe7,
+ 0xce, 0xbf, 0xd3, 0xd6, 0xd2, 0xd5, 0xb1, 0xb2,
+ 0xab, 0xc2, 0xc4, 0xbb, 0x00, 0x99, 0x00, 0x83,
+ 0x00, 0xc0,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x5f, 0xe9,
+ 0xca, 0xbf, 0xd3, 0xd5, 0xd2, 0xd4, 0xb2, 0xb2,
+ 0xab, 0xc1, 0xc4, 0xba, 0x00, 0x9b, 0x00, 0x85,
+ 0x00, 0xc3,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xea, 0x5f, 0xe8,
+ 0xee, 0xbf, 0xd2, 0xd5, 0xd2, 0xd4, 0xb1, 0xb2,
+ 0xab, 0xc1, 0xc2, 0xb9, 0x00, 0x9D, 0x00, 0x87,
+ 0x00, 0xc6,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe9, 0x5f, 0xe7,
+ 0xcd, 0xbf, 0xd2, 0xd6, 0xd2, 0xd4, 0xb1, 0xb2,
+ 0xab, 0xbe, 0xc0, 0xb7, 0x00, 0xa1, 0x00, 0x8a,
+ 0x00, 0xca,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x61, 0xe6,
+ 0xcd, 0xbf, 0xd1, 0xd6, 0xd3, 0xd4, 0xaf, 0xb0,
+ 0xa9, 0xbe, 0xc1, 0xb7, 0x00, 0xa3, 0x00, 0x8b,
+ 0x00, 0xce,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x62, 0xe5,
+ 0xcc, 0xc0, 0xd0, 0xd6, 0xd2, 0xd4, 0xaf, 0xb1,
+ 0xa9, 0xbd, 0xc0, 0xb6, 0x00, 0xa5, 0x00, 0x8d,
+ 0x00, 0xd0,
+ }, {
+ 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe7, 0x7f, 0xe3,
+ 0xcc, 0xc1, 0xd0, 0xd5, 0xd3, 0xd3, 0xae, 0xaf,
+ 0xa8, 0xbe, 0xc0, 0xb7, 0x00, 0xa8, 0x00, 0x90,
+ 0x00, 0xd3,
+ }
+};
+
+static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v32[GAMMA_LEVEL_NUM] = {
+ {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0x72, 0x5e, 0x6b,
+ 0xa1, 0xa7, 0x9a, 0xb4, 0xcb, 0xb8, 0x92, 0xac,
+ 0x97, 0xb4, 0xc3, 0xb5, 0x00, 0x4e, 0x00, 0x37,
+ 0x00, 0x58,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0x85, 0x71, 0x7d,
+ 0xa6, 0xb6, 0xa1, 0xb5, 0xca, 0xba, 0x93, 0xac,
+ 0x98, 0xb2, 0xc0, 0xaf, 0x00, 0x59, 0x00, 0x43,
+ 0x00, 0x64,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xa4, 0x94, 0x9e,
+ 0xa0, 0xbb, 0x9c, 0xc3, 0xd2, 0xc6, 0x93, 0xaa,
+ 0x95, 0xb7, 0xc2, 0xb4, 0x00, 0x65, 0x00, 0x50,
+ 0x00, 0x74,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa1, 0xa6,
+ 0xa0, 0xb9, 0x9b, 0xc3, 0xd1, 0xc8, 0x90, 0xa6,
+ 0x90, 0xbb, 0xc3, 0xb7, 0x00, 0x6f, 0x00, 0x5b,
+ 0x00, 0x80,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xa6, 0x9d, 0x9f,
+ 0x9f, 0xb8, 0x9a, 0xc7, 0xd5, 0xcc, 0x90, 0xa5,
+ 0x8f, 0xb8, 0xc1, 0xb6, 0x00, 0x74, 0x00, 0x60,
+ 0x00, 0x85,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb3, 0xae, 0xae,
+ 0x9e, 0xb7, 0x9a, 0xc8, 0xd6, 0xce, 0x91, 0xa6,
+ 0x90, 0xb6, 0xc0, 0xb3, 0x00, 0x78, 0x00, 0x65,
+ 0x00, 0x8a,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa9, 0xa8,
+ 0xa3, 0xb9, 0x9e, 0xc4, 0xd3, 0xcb, 0x94, 0xa6,
+ 0x90, 0xb6, 0xbf, 0xb3, 0x00, 0x7c, 0x00, 0x69,
+ 0x00, 0x8e,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xaf, 0xaf, 0xa9,
+ 0xa5, 0xbc, 0xa2, 0xc7, 0xd5, 0xcd, 0x93, 0xa5,
+ 0x8f, 0xb4, 0xbd, 0xb1, 0x00, 0x83, 0x00, 0x70,
+ 0x00, 0x96,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xab, 0xa3,
+ 0xaa, 0xbf, 0xa7, 0xc5, 0xd3, 0xcb, 0x93, 0xa5,
+ 0x8f, 0xb2, 0xbb, 0xb0, 0x00, 0x86, 0x00, 0x74,
+ 0x00, 0x9b,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xb5, 0xab,
+ 0xab, 0xc0, 0xa9, 0xc7, 0xd4, 0xcc, 0x94, 0xa4,
+ 0x8f, 0xb1, 0xbb, 0xaf, 0x00, 0x8a, 0x00, 0x77,
+ 0x00, 0x9e,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb2, 0xa7,
+ 0xae, 0xc2, 0xab, 0xc5, 0xd3, 0xca, 0x93, 0xa4,
+ 0x8f, 0xb1, 0xba, 0xae, 0x00, 0x8d, 0x00, 0x7b,
+ 0x00, 0xa2,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xaf, 0xa3,
+ 0xb0, 0xc3, 0xae, 0xc4, 0xd1, 0xc8, 0x93, 0xa4,
+ 0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x8f, 0x00, 0x7d,
+ 0x00, 0xa5,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbd, 0xaf,
+ 0xae, 0xc1, 0xab, 0xc2, 0xd0, 0xc6, 0x94, 0xa4,
+ 0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x92, 0x00, 0x80,
+ 0x00, 0xa8,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xb9, 0xac,
+ 0xad, 0xc1, 0xab, 0xc4, 0xd1, 0xc7, 0x95, 0xa4,
+ 0x90, 0xb0, 0xb9, 0xad, 0x00, 0x95, 0x00, 0x84,
+ 0x00, 0xac,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb6, 0xa7,
+ 0xaf, 0xc2, 0xae, 0xc5, 0xd1, 0xc7, 0x93, 0xa3,
+ 0x8e, 0xb0, 0xb9, 0xad, 0x00, 0x98, 0x00, 0x86,
+ 0x00, 0xaf,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbf, 0xaf,
+ 0xad, 0xc1, 0xab, 0xc3, 0xd0, 0xc6, 0x94, 0xa3,
+ 0x8f, 0xaf, 0xb8, 0xac, 0x00, 0x9a, 0x00, 0x89,
+ 0x00, 0xb2,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xbc, 0xac,
+ 0xaf, 0xc2, 0xad, 0xc2, 0xcf, 0xc4, 0x94, 0xa3,
+ 0x90, 0xaf, 0xb8, 0xad, 0x00, 0x9c, 0x00, 0x8b,
+ 0x00, 0xb5,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
+ 0xb1, 0xc4, 0xaf, 0xc3, 0xcf, 0xc5, 0x94, 0xa3,
+ 0x8f, 0xae, 0xb7, 0xac, 0x00, 0x9f, 0x00, 0x8e,
+ 0x00, 0xb8,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
+ 0xaf, 0xc2, 0xad, 0xc1, 0xce, 0xc3, 0x95, 0xa3,
+ 0x90, 0xad, 0xb6, 0xab, 0x00, 0xa2, 0x00, 0x91,
+ 0x00, 0xbb,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xbe, 0xac,
+ 0xb1, 0xc4, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa4,
+ 0x91, 0xad, 0xb6, 0xab, 0x00, 0xa4, 0x00, 0x93,
+ 0x00, 0xbd,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
+ 0xb3, 0xc5, 0xb2, 0xc1, 0xcd, 0xc2, 0x95, 0xa3,
+ 0x90, 0xad, 0xb6, 0xab, 0x00, 0xa6, 0x00, 0x95,
+ 0x00, 0xc0,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
+ 0xb0, 0xc3, 0xaf, 0xc2, 0xce, 0xc2, 0x94, 0xa2,
+ 0x90, 0xac, 0xb6, 0xab, 0x00, 0xa8, 0x00, 0x98,
+ 0x00, 0xc3,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xb8, 0xa5,
+ 0xb3, 0xc5, 0xb2, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
+ 0x90, 0xad, 0xb6, 0xab, 0x00, 0xaa, 0x00, 0x9a,
+ 0x00, 0xc5,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xc0, 0xac,
+ 0xb0, 0xc3, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa2,
+ 0x90, 0xac, 0xb5, 0xa9, 0x00, 0xac, 0x00, 0x9c,
+ 0x00, 0xc8,
+ }, {
+ 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbd, 0xa8,
+ 0xaf, 0xc2, 0xaf, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
+ 0x90, 0xac, 0xb5, 0xaa, 0x00, 0xb1, 0x00, 0xa1,
+ 0x00, 0xcc,
+ },
+};
+
+static const struct s6e8aa0_variant s6e8aa0_variants[] = {
+ {
+ .version = 32,
+ .gamma_tables = s6e8aa0_gamma_tables_v32,
+ }, {
+ .version = 96,
+ .gamma_tables = s6e8aa0_gamma_tables_v96,
+ }, {
+ .version = 142,
+ .gamma_tables = s6e8aa0_gamma_tables_v142,
+ }, {
+ .version = 210,
+ .gamma_tables = s6e8aa0_gamma_tables_v142,
+ }
+};
+
+static void s6e8aa0_brightness_set(struct s6e8aa0 *ctx)
+{
+ const u8 *gamma;
+
+ if (ctx->error)
+ return;
+
+ gamma = ctx->variant->gamma_tables[ctx->brightness];
+
+ if (ctx->version >= 142)
+ s6e8aa0_elvss_nvm_set(ctx);
+
+ s6e8aa0_dcs_write(ctx, gamma, GAMMA_TABLE_LEN);
+
+ /* update gamma table. */
+ s6e8aa0_dcs_write_seq_static(ctx, 0xf7, 0x03);
+}
+
+static void s6e8aa0_panel_init(struct s6e8aa0 *ctx)
+{
+ s6e8aa0_apply_level_1_key(ctx);
+ s6e8aa0_apply_level_2_key(ctx);
+ msleep(20);
+
+ s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(40);
+
+ s6e8aa0_panel_cond_set(ctx);
+ s6e8aa0_display_condition_set(ctx);
+ s6e8aa0_brightness_set(ctx);
+ s6e8aa0_etc_source_control(ctx);
+ s6e8aa0_etc_pentile_control(ctx);
+ s6e8aa0_elvss_nvm_set(ctx);
+ s6e8aa0_etc_power_control(ctx);
+ s6e8aa0_etc_elvss_control(ctx);
+ msleep(ctx->init_delay);
+}
+
+static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx,
+ int size)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ u8 buf[] = {size, 0};
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+ .tx_len = sizeof(buf),
+ .tx_buf = buf
+ };
+ int ret;
+
+ if (ctx->error < 0)
+ return;
+
+ if (!ops || !ops->transfer)
+ ret = -EIO;
+ else
+ ret = ops->transfer(dsi->host, &msg);
+
+ if (ret < 0) {
+ dev_err(ctx->dev,
+ "error %d setting maximum return packet size to %d\n",
+ ret, size);
+ ctx->error = ret;
+ }
+}
+
+static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
+{
+ u8 id[3];
+ int ret, i;
+
+ ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
+ if (ret < ARRAY_SIZE(id) || id[0] == 0x00) {
+ dev_err(ctx->dev, "read id failed\n");
+ ctx->error = -EIO;
+ return;
+ }
+
+ dev_info(ctx->dev, "ID: 0x%2x, 0x%2x, 0x%2x\n", id[0], id[1], id[2]);
+
+ for (i = 0; i < ARRAY_SIZE(s6e8aa0_variants); ++i) {
+ if (id[1] == s6e8aa0_variants[i].version)
+ break;
+ }
+ if (i >= ARRAY_SIZE(s6e8aa0_variants)) {
+ dev_err(ctx->dev, "unsupported display version %d\n", id[1]);
+ ctx->error = -EINVAL;
+ }
+
+ ctx->variant = &s6e8aa0_variants[i];
+ ctx->version = id[1];
+ ctx->id = id[2];
+}
+
+static void s6e8aa0_set_sequence(struct s6e8aa0 *ctx)
+{
+ s6e8aa0_set_maximum_return_packet_size(ctx, 3);
+ s6e8aa0_read_mtp_id(ctx);
+ s6e8aa0_panel_init(ctx);
+ s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
+}
+
+static int s6e8aa0_power_on(struct s6e8aa0 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ msleep(ctx->power_on_delay);
+
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+ gpiod_set_value(ctx->reset_gpio, 1);
+
+ msleep(ctx->reset_delay);
+
+ return 0;
+}
+
+static int s6e8aa0_power_off(struct s6e8aa0 *ctx)
+{
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int s6e8aa0_disable(struct drm_panel *panel)
+{
+ struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
+
+ s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
+ s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
+ msleep(40);
+
+ s6e8aa0_clear_error(ctx);
+
+ return s6e8aa0_power_off(ctx);
+}
+
+static int s6e8aa0_enable(struct drm_panel *panel)
+{
+ struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
+ int ret;
+
+ ret = s6e8aa0_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ s6e8aa0_set_sequence(ctx);
+ ret = ctx->error;
+
+ if (ret < 0)
+ s6e8aa0_disable(panel);
+
+ return ret;
+}
+
+static int s6e8aa0_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_ERROR("failed to create a new display mode\n");
+ return 0;
+ }
+
+ drm_display_mode_from_videomode(&ctx->vm, mode);
+ mode->width_mm = ctx->width_mm;
+ mode->height_mm = ctx->height_mm;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6e8aa0_drm_funcs = {
+ .disable = s6e8aa0_disable,
+ .enable = s6e8aa0_enable,
+ .get_modes = s6e8aa0_get_modes,
+};
+
+static int s6e8aa0_parse_dt(struct s6e8aa0 *ctx)
+{
+ struct device *dev = ctx->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_get_videomode(np, &ctx->vm, 0);
+ if (ret < 0)
+ return ret;
+
+ of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
+ of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
+ of_property_read_u32(np, "init-delay", &ctx->init_delay);
+ of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
+ of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
+
+ ctx->flip_horizontal = of_property_read_bool(np, "flip-horizontal");
+ ctx->flip_vertical = of_property_read_bool(np, "flip-vertical");
+
+ return 0;
+}
+
+static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e8aa0 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(struct s6e8aa0), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
+ | MIPI_DSI_MODE_VIDEO_HFP | MIPI_DSI_MODE_VIDEO_HBP
+ | MIPI_DSI_MODE_VIDEO_HSA | MIPI_DSI_MODE_EOT_PACKET
+ | MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
+
+ ret = s6e8aa0_parse_dt(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->supplies[0].supply = "vdd3";
+ ctx->supplies[1].supply = "vci";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset");
+ if (IS_ERR(ctx->reset_gpio)) {
+ dev_err(dev, "cannot get reset-gpios %ld\n",
+ PTR_ERR(ctx->reset_gpio));
+ return PTR_ERR(ctx->reset_gpio);
+ }
+ ret = gpiod_direction_output(ctx->reset_gpio, 1);
+ if (ret < 0) {
+ dev_err(dev, "cannot configure reset-gpios %d\n", ret);
+ return ret;
+ }
+
+ ctx->brightness = GAMMA_LEVEL_NUM - 1;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &s6e8aa0_drm_funcs;
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ drm_panel_remove(&ctx->panel);
+
+ return ret;
+}
+
+static int s6e8aa0_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e8aa0 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static struct of_device_id s6e8aa0_of_match[] = {
+ { .compatible = "samsung,s6e8aa0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s6e8aa0_of_match);
+
+static struct mipi_dsi_driver s6e8aa0_driver = {
+ .probe = s6e8aa0_probe,
+ .remove = s6e8aa0_remove,
+ .driver = {
+ .name = "panel_s6e8aa0",
+ .owner = THIS_MODULE,
+ .of_match_table = s6e8aa0_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e8aa0_driver);
+
+MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joongmock Shin <jmock.shin@samsung.com>");
+MODULE_AUTHOR("Eunchul Kim <chulspro.kim@samsung.com>");
+MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>");
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("MIPI-DSI based s6e8aa0 AMOLED LCD Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 59d52ca2c67f..309f29e9234a 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -22,9 +22,8 @@
*/
#include <linux/backlight.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -44,9 +43,6 @@ struct panel_desc {
} size;
};
-/* TODO: convert to gpiod_*() API once it's been merged */
-#define GPIO_ACTIVE_LOW (1 << 0)
-
struct panel_simple {
struct drm_panel base;
bool enabled;
@@ -57,8 +53,7 @@ struct panel_simple {
struct regulator *supply;
struct i2c_adapter *ddc;
- unsigned long enable_gpio_flags;
- int enable_gpio;
+ struct gpio_desc *enable_gpio;
};
static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
@@ -110,12 +105,8 @@ static int panel_simple_disable(struct drm_panel *panel)
backlight_update_status(p->backlight);
}
- if (gpio_is_valid(p->enable_gpio)) {
- if (p->enable_gpio_flags & GPIO_ACTIVE_LOW)
- gpio_set_value(p->enable_gpio, 1);
- else
- gpio_set_value(p->enable_gpio, 0);
- }
+ if (p->enable_gpio)
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
p->enabled = false;
@@ -137,12 +128,8 @@ static int panel_simple_enable(struct drm_panel *panel)
return err;
}
- if (gpio_is_valid(p->enable_gpio)) {
- if (p->enable_gpio_flags & GPIO_ACTIVE_LOW)
- gpio_set_value(p->enable_gpio, 0);
- else
- gpio_set_value(p->enable_gpio, 1);
- }
+ if (p->enable_gpio)
+ gpiod_set_value_cansleep(p->enable_gpio, 1);
if (p->backlight) {
p->backlight->props.power = FB_BLANK_UNBLANK;
@@ -185,7 +172,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
{
struct device_node *backlight, *ddc;
struct panel_simple *panel;
- enum of_gpio_flags flags;
int err;
panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
@@ -199,29 +185,20 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
if (IS_ERR(panel->supply))
return PTR_ERR(panel->supply);
- panel->enable_gpio = of_get_named_gpio_flags(dev->of_node,
- "enable-gpios", 0,
- &flags);
- if (gpio_is_valid(panel->enable_gpio)) {
- unsigned int value;
-
- if (flags & OF_GPIO_ACTIVE_LOW)
- panel->enable_gpio_flags |= GPIO_ACTIVE_LOW;
-
- err = gpio_request(panel->enable_gpio, "enable");
- if (err < 0) {
- dev_err(dev, "failed to request GPIO#%u: %d\n",
- panel->enable_gpio, err);
+ panel->enable_gpio = devm_gpiod_get(dev, "enable");
+ if (IS_ERR(panel->enable_gpio)) {
+ err = PTR_ERR(panel->enable_gpio);
+ if (err != -ENOENT) {
+ dev_err(dev, "failed to request GPIO: %d\n", err);
return err;
}
- value = (panel->enable_gpio_flags & GPIO_ACTIVE_LOW) != 0;
-
- err = gpio_direction_output(panel->enable_gpio, value);
+ panel->enable_gpio = NULL;
+ } else {
+ err = gpiod_direction_output(panel->enable_gpio, 0);
if (err < 0) {
- dev_err(dev, "failed to setup GPIO%u: %d\n",
- panel->enable_gpio, err);
- goto free_gpio;
+ dev_err(dev, "failed to setup GPIO: %d\n", err);
+ return err;
}
}
@@ -230,10 +207,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->backlight = of_find_backlight_by_node(backlight);
of_node_put(backlight);
- if (!panel->backlight) {
- err = -EPROBE_DEFER;
- goto free_gpio;
- }
+ if (!panel->backlight)
+ return -EPROBE_DEFER;
}
ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
@@ -265,9 +240,6 @@ free_ddc:
free_backlight:
if (panel->backlight)
put_device(&panel->backlight->dev);
-free_gpio:
- if (gpio_is_valid(panel->enable_gpio))
- gpio_free(panel->enable_gpio);
return err;
}
@@ -287,11 +259,6 @@ static int panel_simple_remove(struct device *dev)
if (panel->backlight)
put_device(&panel->backlight->dev);
- if (gpio_is_valid(panel->enable_gpio))
- gpio_free(panel->enable_gpio);
-
- regulator_disable(panel->supply);
-
return 0;
}
@@ -361,6 +328,28 @@ static const struct panel_desc chunghwa_claa101wb01 = {
},
};
+static const struct drm_display_mode lg_lp129qe_mode = {
+ .clock = 285250,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 48,
+ .hsync_end = 2560 + 48 + 32,
+ .htotal = 2560 + 48 + 32 + 80,
+ .vdisplay = 1700,
+ .vsync_start = 1700 + 3,
+ .vsync_end = 1700 + 3 + 10,
+ .vtotal = 1700 + 3 + 10 + 36,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc lg_lp129qe = {
+ .modes = &lg_lp129qe_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 272,
+ .height = 181,
+ },
+};
+
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -394,6 +383,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "chunghwa,claa101wb01",
.data = &chunghwa_claa101wb01
}, {
+ .compatible = "lg,lp129qe",
+ .data = &lg_lp129qe,
+ }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
@@ -433,10 +425,65 @@ static struct platform_driver panel_simple_platform_driver = {
struct panel_desc_dsi {
struct panel_desc desc;
+ unsigned long flags;
enum mipi_dsi_pixel_format format;
unsigned int lanes;
};
+static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
+ .clock = 71000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 32,
+ .hsync_end = 800 + 32 + 1,
+ .htotal = 800 + 32 + 1 + 57,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 28,
+ .vsync_end = 1280 + 28 + 1,
+ .vtotal = 1280 + 28 + 1 + 14,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
+ .desc = {
+ .modes = &lg_ld070wx3_sl01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 94,
+ .height = 151,
+ },
+ },
+ .flags = MIPI_DSI_MODE_VIDEO,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
+static const struct drm_display_mode lg_lh500wx1_sd03_mode = {
+ .clock = 67000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 12,
+ .hsync_end = 720 + 12 + 4,
+ .htotal = 720 + 12 + 4 + 112,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 8,
+ .vsync_end = 1280 + 8 + 4,
+ .vtotal = 1280 + 8 + 4 + 12,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi lg_lh500wx1_sd03 = {
+ .desc = {
+ .modes = &lg_lh500wx1_sd03_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 62,
+ .height = 110,
+ },
+ },
+ .flags = MIPI_DSI_MODE_VIDEO,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
static const struct drm_display_mode panasonic_vvx10f004b00_mode = {
.clock = 157200,
.hdisplay = 1920,
@@ -459,12 +506,19 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.height = 136,
},
},
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
static const struct of_device_id dsi_of_match[] = {
{
+ .compatible = "lg,ld070wx3-sl01",
+ .data = &lg_ld070wx3_sl01
+ }, {
+ .compatible = "lg,lh500wx1-sd03",
+ .data = &lg_lh500wx1_sd03
+ }, {
.compatible = "panasonic,vvx10f004b00",
.data = &panasonic_vvx10f004b00
}, {
@@ -489,6 +543,7 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
if (err < 0)
return err;
+ dsi->mode_flags = desc->flags;
dsi->format = desc->format;
dsi->lanes = desc->lanes;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 798bde2e5881..41bdd174657e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -527,7 +527,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
bool recreate_primary = false;
int ret;
int surf_id;
- if (!crtc->fb) {
+ if (!crtc->primary->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -536,7 +536,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
qfb = to_qxl_framebuffer(old_fb);
old_bo = gem_to_qxl_bo(qfb->obj);
}
- qfb = to_qxl_framebuffer(crtc->fb);
+ qfb = to_qxl_framebuffer(crtc->primary->fb);
bo = gem_to_qxl_bo(qfb->obj);
if (!m)
/* and do we care? */
@@ -609,14 +609,14 @@ static void qxl_crtc_disable(struct drm_crtc *crtc)
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct qxl_device *qdev = dev->dev_private;
- if (crtc->fb) {
- struct qxl_framebuffer *qfb = to_qxl_framebuffer(crtc->fb);
+ if (crtc->primary->fb) {
+ struct qxl_framebuffer *qfb = to_qxl_framebuffer(crtc->primary->fb);
struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
int ret;
ret = qxl_bo_reserve(bo, false);
qxl_bo_unpin(bo);
qxl_bo_unreserve(bo);
- crtc->fb = NULL;
+ crtc->primary->fb = NULL;
}
qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, 0, 0, 0);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 8691c76c5ef0..b95f144f0b49 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -82,8 +82,6 @@ int qxl_bo_create(struct qxl_device *qdev,
enum ttm_bo_type type;
int r;
- if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
- qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
if (kernel)
type = ttm_bo_type_kernel;
else
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 821ab7b9409b..14e776f1d14e 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -349,7 +349,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
qxl_fence_add_release_locked(&qbo->fence, release->id);
ttm_bo_add_to_lru(bo);
- ww_mutex_unlock(&bo->resv->lock);
+ __ttm_bo_unreserve(bo);
entry->reserved = false;
}
spin_unlock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index c7e7e6590c2b..d52c27527b9a 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -433,6 +433,7 @@ static int qxl_sync_obj_flush(void *sync_obj)
static void qxl_sync_obj_unref(void **sync_obj)
{
+ *sync_obj = NULL;
}
static void *qxl_sync_obj_ref(void *sync_obj)
@@ -493,7 +494,9 @@ int qxl_ttm_init(struct qxl_device *qdev)
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&qdev->mman.bdev,
qdev->mman.bo_global_ref.ref.object,
- &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0);
+ &qxl_bo_driver,
+ qdev->ddev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET, 0);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
@@ -518,8 +521,6 @@ int qxl_ttm_init(struct qxl_device *qdev)
((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
DRM_INFO("qxl: %uM of Surface memory size\n",
(unsigned)qdev->surfaceram_size / (1024 * 1024));
- if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
- qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
r = qxl_ttm_debugfs_init(qdev);
if (r) {
DRM_ERROR("Failed to init debugfs\n");
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 306364a1ecda..09433534dc47 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
- ci_dpm.o dce6_afmt.o
+ ci_dpm.o dce6_afmt.o radeon_vm.o
# add async DMA block
radeon-y += \
@@ -99,6 +99,12 @@ radeon-y += \
uvd_v3_1.o \
uvd_v4_2.o
+# add VCE block
+radeon-y += \
+ radeon_vce.o \
+ vce_v1_0.o \
+ vce_v2_0.o \
+
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
radeon-$(CONFIG_ACPI) += radeon_acpi.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index daa4dd375ab1..fb187c78978f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1106,7 +1106,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
int r;
/* no fb bound */
- if (!atomic && !crtc->fb) {
+ if (!atomic && !crtc->primary->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -1116,8 +1116,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
target_fb = fb;
}
else {
- radeon_fb = to_radeon_framebuffer(crtc->fb);
- target_fb = crtc->fb;
+ radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ target_fb = crtc->primary->fb;
}
/* If atomic, assume fb object is pinned & idle & fenced and
@@ -1316,7 +1316,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
/* set pageflip to happen anywhere in vblank interval */
WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
- if (!atomic && fb && fb != crtc->fb) {
+ if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
@@ -1350,7 +1350,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
int r;
/* no fb bound */
- if (!atomic && !crtc->fb) {
+ if (!atomic && !crtc->primary->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -1360,8 +1360,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
target_fb = fb;
}
else {
- radeon_fb = to_radeon_framebuffer(crtc->fb);
- target_fb = crtc->fb;
+ radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ target_fb = crtc->primary->fb;
}
obj = radeon_fb->obj;
@@ -1485,7 +1485,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
/* set pageflip to happen anywhere in vblank interval */
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
- if (!atomic && fb && fb != crtc->fb) {
+ if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
@@ -1972,12 +1972,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->fb) {
+ if (crtc->primary->fb) {
int r;
struct radeon_framebuffer *radeon_fb;
struct radeon_bo *rbo;
- radeon_fb = to_radeon_framebuffer(crtc->fb);
+ radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r))
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 4ad7643fce5f..8b0ab170cef9 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -142,101 +142,69 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
return recv_bytes;
}
-static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
- u16 address, u8 *send, u8 send_bytes, u8 delay)
-{
- struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
- int ret;
- u8 msg[20];
- int msg_bytes = send_bytes + 4;
- u8 ack;
- unsigned retry;
-
- if (send_bytes > 16)
- return -1;
+#define HEADER_SIZE 4
- msg[0] = address;
- msg[1] = address >> 8;
- msg[2] = DP_AUX_NATIVE_WRITE << 4;
- msg[3] = (msg_bytes << 4) | (send_bytes - 1);
- memcpy(&msg[4], send, send_bytes);
-
- for (retry = 0; retry < 7; retry++) {
- ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
- msg, msg_bytes, NULL, 0, delay, &ack);
- if (ret == -EBUSY)
- continue;
- else if (ret < 0)
- return ret;
- ack >>= 4;
- if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
- return send_bytes;
- else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
- usleep_range(400, 500);
- else
- return -EIO;
- }
-
- return -EIO;
-}
-
-static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
- u16 address, u8 *recv, int recv_bytes, u8 delay)
+static ssize_t
+radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
- struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
- u8 msg[4];
- int msg_bytes = 4;
- u8 ack;
+ struct radeon_i2c_chan *chan =
+ container_of(aux, struct radeon_i2c_chan, aux);
int ret;
- unsigned retry;
-
- msg[0] = address;
- msg[1] = address >> 8;
- msg[2] = DP_AUX_NATIVE_READ << 4;
- msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
-
- for (retry = 0; retry < 7; retry++) {
- ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
- msg, msg_bytes, recv, recv_bytes, delay, &ack);
- if (ret == -EBUSY)
- continue;
- else if (ret < 0)
- return ret;
- ack >>= 4;
- if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
- return ret;
- else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
- usleep_range(400, 500);
- else if (ret == 0)
- return -EPROTO;
- else
- return -EIO;
+ u8 tx_buf[20];
+ size_t tx_size;
+ u8 ack, delay = 0;
+
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
+ tx_buf[0] = msg->address & 0xff;
+ tx_buf[1] = msg->address >> 8;
+ tx_buf[2] = msg->request << 4;
+ tx_buf[3] = msg->size - 1;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ tx_size = HEADER_SIZE + msg->size;
+ tx_buf[3] |= tx_size << 4;
+ memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size);
+ ret = radeon_process_aux_ch(chan,
+ tx_buf, tx_size, NULL, 0, delay, &ack);
+ if (ret >= 0)
+ /* Return payload size. */
+ ret = msg->size;
+ break;
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ tx_size = HEADER_SIZE;
+ tx_buf[3] |= tx_size << 4;
+ ret = radeon_process_aux_ch(chan,
+ tx_buf, tx_size, msg->buffer, msg->size, delay, &ack);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- return -EIO;
-}
+ if (ret > 0)
+ msg->reply = ack >> 4;
-static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
- u16 reg, u8 val)
-{
- radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0);
+ return ret;
}
-static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector,
- u16 reg)
+void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
{
- u8 val = 0;
-
- radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0);
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
- return val;
+ dig_connector->dp_i2c_bus->aux.dev = radeon_connector->base.kdev;
+ dig_connector->dp_i2c_bus->aux.transfer = radeon_dp_aux_transfer;
}
int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
+ struct radeon_i2c_chan *auxch = i2c_get_adapdata(adapter);
u16 address = algo_data->address;
u8 msg[5];
u8 reply[2];
@@ -246,34 +214,30 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
int ret;
u8 ack;
- /* Set up the command byte */
- if (mode & MODE_I2C_READ)
- msg[2] = DP_AUX_I2C_READ << 4;
- else
- msg[2] = DP_AUX_I2C_WRITE << 4;
-
- if (!(mode & MODE_I2C_STOP))
- msg[2] |= DP_AUX_I2C_MOT << 4;
-
+ /* Set up the address */
msg[0] = address;
msg[1] = address >> 8;
- switch (mode) {
- case MODE_I2C_WRITE:
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ) {
+ msg[2] = DP_AUX_I2C_READ << 4;
+ msg_bytes = 4;
+ msg[3] = msg_bytes << 4;
+ } else {
+ msg[2] = DP_AUX_I2C_WRITE << 4;
msg_bytes = 5;
msg[3] = msg_bytes << 4;
msg[4] = write_byte;
- break;
- case MODE_I2C_READ:
- msg_bytes = 4;
- msg[3] = msg_bytes << 4;
- break;
- default:
- msg_bytes = 4;
- msg[3] = 3 << 4;
- break;
}
+ /* special handling for start/stop */
+ if (mode & (MODE_I2C_START | MODE_I2C_STOP))
+ msg[3] = 3 << 4;
+
+ /* Set MOT bit for all but stop */
+ if ((mode & MODE_I2C_STOP) == 0)
+ msg[2] |= DP_AUX_I2C_MOT << 4;
+
for (retry = 0; retry < 7; retry++) {
ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack);
@@ -472,11 +436,11 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
- if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
+ if (drm_dp_dpcd_read(&dig_connector->dp_i2c_bus->aux, DP_SINK_OUI, buf, 3))
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
- if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
+ if (drm_dp_dpcd_read(&dig_connector->dp_i2c_bus->aux, DP_BRANCH_OUI, buf, 3))
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
}
@@ -487,8 +451,8 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
u8 msg[DP_DPCD_SIZE];
int ret, i;
- ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
- DP_DPCD_SIZE, 0);
+ ret = drm_dp_dpcd_read(&dig_connector->dp_i2c_bus->aux, DP_DPCD_REV, msg,
+ DP_DPCD_SIZE);
if (ret > 0) {
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
DRM_DEBUG_KMS("DPCD: ");
@@ -510,6 +474,7 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
u8 tmp;
@@ -517,9 +482,15 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
if (!ASIC_IS_DCE4(rdev))
return panel_mode;
+ if (!radeon_connector->con_priv)
+ return panel_mode;
+
+ dig_connector = radeon_connector->con_priv;
+
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
- tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+ drm_dp_dpcd_readb(&dig_connector->dp_i2c_bus->aux,
+ DP_EDP_CONFIGURATION_CAP, &tmp);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
@@ -529,7 +500,8 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
/* eDP */
- tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+ drm_dp_dpcd_readb(&dig_connector->dp_i2c_bus->aux,
+ DP_EDP_CONFIGURATION_CAP, &tmp);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
}
@@ -577,37 +549,42 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
return MODE_OK;
}
-static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
- u8 link_status[DP_LINK_STATUS_SIZE])
-{
- int ret;
- ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
- link_status, DP_LINK_STATUS_SIZE, 100);
- if (ret <= 0) {
- return false;
- }
-
- DRM_DEBUG_KMS("link status %6ph\n", link_status);
- return true;
-}
-
bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
{
u8 link_status[DP_LINK_STATUS_SIZE];
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
- if (!radeon_dp_get_link_status(radeon_connector, link_status))
+ if (drm_dp_dpcd_read_link_status(&dig->dp_i2c_bus->aux, link_status) <= 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
return true;
}
+void radeon_dp_set_rx_power_state(struct drm_connector *connector,
+ u8 power_state)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
+
+ if (!radeon_connector->con_priv)
+ return;
+
+ dig_connector = radeon_connector->con_priv;
+
+ /* power up/down the sink */
+ if (dig_connector->dpcd[0] >= 0x11) {
+ drm_dp_dpcd_writeb(&dig_connector->dp_i2c_bus->aux,
+ DP_SET_POWER, power_state);
+ usleep_range(1000, 2000);
+ }
+}
+
+
struct radeon_dp_link_train_info {
struct radeon_device *rdev;
struct drm_encoder *encoder;
struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
int enc_id;
int dp_clock;
int dp_lane_count;
@@ -617,6 +594,7 @@ struct radeon_dp_link_train_info {
u8 link_status[DP_LINK_STATUS_SIZE];
u8 tries;
bool use_dpencoder;
+ struct drm_dp_aux *aux;
};
static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
@@ -627,8 +605,8 @@ static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
0, dp_info->train_set[0]); /* sets all lanes at once */
/* set the vs/emph on the sink */
- radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET,
- dp_info->train_set, dp_info->dp_lane_count, 0);
+ drm_dp_dpcd_write(dp_info->aux, DP_TRAINING_LANE0_SET,
+ dp_info->train_set, dp_info->dp_lane_count);
}
static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
@@ -663,7 +641,7 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
}
/* enable training pattern on the sink */
- radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp);
+ drm_dp_dpcd_writeb(dp_info->aux, DP_TRAINING_PATTERN_SET, tp);
}
static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
@@ -673,34 +651,30 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
u8 tmp;
/* power up the sink */
- if (dp_info->dpcd[0] >= 0x11) {
- radeon_write_dpcd_reg(dp_info->radeon_connector,
- DP_SET_POWER, DP_SET_POWER_D0);
- usleep_range(1000, 2000);
- }
+ radeon_dp_set_rx_power_state(dp_info->connector, DP_SET_POWER_D0);
/* possibly enable downspread on the sink */
if (dp_info->dpcd[3] & 0x1)
- radeon_write_dpcd_reg(dp_info->radeon_connector,
- DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
+ drm_dp_dpcd_writeb(dp_info->aux,
+ DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
else
- radeon_write_dpcd_reg(dp_info->radeon_connector,
- DP_DOWNSPREAD_CTRL, 0);
+ drm_dp_dpcd_writeb(dp_info->aux,
+ DP_DOWNSPREAD_CTRL, 0);
if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
(dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
- radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
+ drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
}
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
+ drm_dp_dpcd_writeb(dp_info->aux, DP_LANE_COUNT_SET, tmp);
/* set the link rate on the sink */
tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
- radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
+ drm_dp_dpcd_writeb(dp_info->aux, DP_LINK_BW_SET, tmp);
/* start training on the source */
if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
@@ -711,9 +685,9 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
dp_info->dp_clock, dp_info->enc_id, 0);
/* disable the training pattern on the sink */
- radeon_write_dpcd_reg(dp_info->radeon_connector,
- DP_TRAINING_PATTERN_SET,
- DP_TRAINING_PATTERN_DISABLE);
+ drm_dp_dpcd_writeb(dp_info->aux,
+ DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
return 0;
}
@@ -723,9 +697,9 @@ static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info
udelay(400);
/* disable the training pattern on the sink */
- radeon_write_dpcd_reg(dp_info->radeon_connector,
- DP_TRAINING_PATTERN_SET,
- DP_TRAINING_PATTERN_DISABLE);
+ drm_dp_dpcd_writeb(dp_info->aux,
+ DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
/* disable the training pattern on the source */
if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
@@ -757,7 +731,8 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
while (1) {
drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+ if (drm_dp_dpcd_read_link_status(dp_info->aux,
+ dp_info->link_status) <= 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -819,7 +794,8 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
while (1) {
drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+ if (drm_dp_dpcd_read_link_status(dp_info->aux,
+ dp_info->link_status) <= 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -902,7 +878,7 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
else
dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
- tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
+ drm_dp_dpcd_readb(&dig_connector->dp_i2c_bus->aux, DP_MAX_LANE_COUNT, &tmp);
if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
dp_info.tp3_supported = true;
else
@@ -912,9 +888,9 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
dp_info.rdev = rdev;
dp_info.encoder = encoder;
dp_info.connector = connector;
- dp_info.radeon_connector = radeon_connector;
dp_info.dp_lane_count = dig_connector->dp_lane_count;
dp_info.dp_clock = dig_connector->dp_clock;
+ dp_info.aux = &dig_connector->dp_i2c_bus->aux;
if (radeon_dp_link_train_init(&dp_info))
goto done;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 607dc14d195e..e6eb5097597f 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1633,10 +1633,16 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_connector *radeon_connector = NULL;
struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
+ bool travis_quirk = false;
if (connector) {
radeon_connector = to_radeon_connector(connector);
radeon_dig_connector = radeon_connector->con_priv;
+ if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+ ENCODER_OBJECT_ID_TRAVIS) &&
+ (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+ !ASIC_IS_DCE5(rdev))
+ travis_quirk = true;
}
switch (mode) {
@@ -1657,17 +1663,13 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
}
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
} else if (ASIC_IS_DCE4(rdev)) {
/* setup and enable the encoder */
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
- /* enable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
} else {
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1675,68 +1677,56 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
ATOM_TRANSMITTER_ACTION_POWER_ON);
radeon_dig_connector->edp_on = true;
}
+ }
+ /* enable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+ /* DP_SET_POWER_D0 is set in radeon_dp_link_train */
radeon_dp_link_train(encoder, connector);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ atombios_dig_transmitter_setup(encoder,
+ ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ if (ext_encoder)
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (ASIC_IS_DCE4(rdev)) {
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector)
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+ }
+ if (ext_encoder)
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder,
+ ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) &&
+ connector && !travis_quirk)
+ radeon_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
+ if (ASIC_IS_DCE4(rdev)) {
/* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_transmitter_setup(encoder,
+ ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
} else {
/* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_transmitter_setup(encoder,
+ ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
- if (ASIC_IS_DCE4(rdev))
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+ if (travis_quirk)
+ radeon_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
radeon_dig_connector->edp_on = false;
}
}
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
- break;
- }
-}
-
-static void
-radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
- struct drm_encoder *ext_encoder,
- int mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- default:
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
- } else
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
- } else
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
break;
}
}
@@ -1747,7 +1737,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
@@ -1807,9 +1796,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
return;
}
- if (ext_encoder)
- radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode);
-
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index ea103ccdf4bd..f81d7ca134db 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2601,6 +2601,10 @@ int btc_dpm_init(struct radeon_device *rdev)
pi->min_vddc_in_table = 0;
pi->max_vddc_in_table = 0;
+ ret = r600_get_platform_caps(rdev);
+ if (ret)
+ return ret;
+
ret = rv7xx_parse_power_table(rdev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 8d49104ca6c2..cad89a977527 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -172,6 +172,8 @@ extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
extern int ci_mc_load_microcode(struct radeon_device *rdev);
+extern void cik_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable);
static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
struct atom_voltage_table_entry *voltage_table,
@@ -746,6 +748,14 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
+ if (rps->vce_active) {
+ rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
+ rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
+ } else {
+ rps->evclk = 0;
+ rps->ecclk = 0;
+ }
+
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ci_dpm_vblank_too_short(rdev))
disable_mclk_switching = true;
@@ -804,6 +814,13 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
sclk = ps->performance_levels[0].sclk;
}
+ if (rps->vce_active) {
+ if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
+ sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
+ if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
+ mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
+ }
+
ps->performance_levels[0].sclk = sclk;
ps->performance_levels[0].mclk = mclk;
@@ -3468,7 +3485,6 @@ static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
0 : -EINVAL;
}
-#if 0
static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
{
struct ci_power_info *pi = ci_get_pi(rdev);
@@ -3501,6 +3517,7 @@ static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
0 : -EINVAL;
}
+#if 0
static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
{
struct ci_power_info *pi = ci_get_pi(rdev);
@@ -3587,7 +3604,6 @@ static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
return ci_enable_uvd_dpm(rdev, !gate);
}
-#if 0
static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
{
u8 i;
@@ -3608,15 +3624,15 @@ static int ci_update_vce_dpm(struct radeon_device *rdev,
struct radeon_ps *radeon_current_state)
{
struct ci_power_info *pi = ci_get_pi(rdev);
- bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
- bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
int ret = 0;
u32 tmp;
- if (new_vce_clock_non_zero != old_vce_clock_non_zero) {
- if (new_vce_clock_non_zero) {
- pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
+ if (radeon_current_state->evclk != radeon_new_state->evclk) {
+ if (radeon_new_state->evclk) {
+ /* turn the clocks on when encoding */
+ cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
+ pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
tmp = RREG32_SMC(DPM_TABLE_475);
tmp &= ~VceBootLevel_MASK;
tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
@@ -3624,12 +3640,16 @@ static int ci_update_vce_dpm(struct radeon_device *rdev,
ret = ci_enable_vce_dpm(rdev, true);
} else {
+ /* turn the clocks off when not encoding */
+ cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
+
ret = ci_enable_vce_dpm(rdev, false);
}
}
return ret;
}
+#if 0
static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
{
return ci_enable_samu_dpm(rdev, gate);
@@ -4752,13 +4772,13 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
return ret;
}
-#if 0
+
ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
if (ret) {
DRM_ERROR("ci_update_vce_dpm failed\n");
return ret;
}
-#endif
+
ret = ci_update_sclk_t(rdev);
if (ret) {
DRM_ERROR("ci_update_sclk_t failed\n");
@@ -4959,9 +4979,6 @@ static int ci_parse_power_table(struct radeon_device *rdev)
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
- rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
@@ -4998,6 +5015,21 @@ static int ci_parse_power_table(struct radeon_device *rdev)
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
}
rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+ /* fill in the vce power states */
+ for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
+ u32 sclk, mclk;
+ clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
+ sclk |= clock_info->ci.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
+ mclk |= clock_info->ci.ucMemoryClockHigh << 16;
+ rdev->pm.dpm.vce_states[i].sclk = sclk;
+ rdev->pm.dpm.vce_states[i].mclk = mclk;
+ }
+
return 0;
}
@@ -5077,17 +5109,25 @@ int ci_dpm_init(struct radeon_device *rdev)
ci_dpm_fini(rdev);
return ret;
}
- ret = ci_parse_power_table(rdev);
+
+ ret = r600_get_platform_caps(rdev);
if (ret) {
ci_dpm_fini(rdev);
return ret;
}
+
ret = r600_parse_extended_power_table(rdev);
if (ret) {
ci_dpm_fini(rdev);
return ret;
}
+ ret = ci_parse_power_table(rdev);
+ if (ret) {
+ ci_dpm_fini(rdev);
+ return ret;
+ }
+
pi->dll_default_on = false;
pi->sram_end = SMC_RAM_END;
@@ -5120,6 +5160,7 @@ int ci_dpm_init(struct radeon_device *rdev)
pi->caps_sclk_throttle_low_notification = false;
pi->caps_uvd_dpm = true;
+ pi->caps_vce_dpm = true;
ci_get_leakage_voltages(rdev);
ci_patch_dependency_tables_with_leakage(rdev);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index bbb17841a9e5..745143c2358f 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -75,6 +75,7 @@ extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
extern int cik_sdma_resume(struct radeon_device *rdev);
extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
extern void cik_sdma_fini(struct radeon_device *rdev);
+extern void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable);
static void cik_rlc_stop(struct radeon_device *rdev);
static void cik_pcie_gen3_enable(struct radeon_device *rdev);
static void cik_program_aspm(struct radeon_device *rdev);
@@ -1095,7 +1096,7 @@ static const u32 spectre_golden_registers[] =
0x8a14, 0xf000003f, 0x00000007,
0x8b24, 0xffffffff, 0x00ffffff,
0x28350, 0x3f3f3fff, 0x00000082,
- 0x28355, 0x0000003f, 0x00000000,
+ 0x28354, 0x0000003f, 0x00000000,
0x3e78, 0x00000001, 0x00000002,
0x913c, 0xffff03df, 0x00000004,
0xc768, 0x00000008, 0x00000008,
@@ -2028,6 +2029,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 5:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 6:
@@ -2048,6 +2050,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 9:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
break;
case 10:
@@ -2070,6 +2073,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 13:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
break;
case 14:
@@ -2092,6 +2096,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 27:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
break;
case 28:
@@ -2246,6 +2251,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 5:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 6:
@@ -2266,6 +2272,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 9:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
break;
case 10:
@@ -2288,6 +2295,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 13:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
break;
case 14:
@@ -2310,6 +2318,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 27:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
break;
case 28:
@@ -2466,6 +2475,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 5:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 6:
@@ -2486,6 +2496,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 9:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
break;
case 10:
@@ -2508,6 +2519,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 13:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
break;
case 14:
@@ -2530,6 +2542,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 27:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
break;
case 28:
@@ -2592,6 +2605,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 5:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 6:
@@ -2612,6 +2626,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 9:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
break;
case 10:
@@ -2634,6 +2649,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 13:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
break;
case 14:
@@ -2656,6 +2672,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 27:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
break;
case 28:
@@ -2812,6 +2829,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 5:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 6:
@@ -2827,11 +2845,13 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
TILE_SPLIT(split_equal_to_row_size));
break;
case 8:
- gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
+ gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P2);
break;
case 9:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2));
break;
case 10:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
@@ -2853,6 +2873,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 13:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
break;
case 14:
@@ -2875,7 +2896,8 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
break;
case 27:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2));
break;
case 28:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
@@ -4030,8 +4052,6 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
WREG32(CP_RB0_BASE, rb_addr);
WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
- ring->rptr = RREG32(CP_RB0_RPTR);
-
/* start the ring */
cik_cp_gfx_start(rdev);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
@@ -4589,8 +4609,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
rdev->ring[idx].wptr = 0;
mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
- rdev->ring[idx].rptr = RREG32(CP_HQD_PQ_RPTR);
- mqd->queue_state.cp_hqd_pq_rptr = rdev->ring[idx].rptr;
+ mqd->queue_state.cp_hqd_pq_rptr = RREG32(CP_HQD_PQ_RPTR);
/* set the vmid for the queue */
mqd->queue_state.cp_hqd_vmid = 0;
@@ -5120,11 +5139,9 @@ bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
if (!(reset_mask & (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_CP))) {
- radeon_ring_lockup_update(ring);
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
@@ -6144,6 +6161,10 @@ void cik_update_cg(struct radeon_device *rdev,
cik_enable_hdp_mgcg(rdev, enable);
cik_enable_hdp_ls(rdev, enable);
}
+
+ if (block & RADEON_CG_BLOCK_VCE) {
+ vce_v2_0_enable_mgcg(rdev, enable);
+ }
}
static void cik_init_cg(struct radeon_device *rdev)
@@ -6521,8 +6542,8 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
buffer[count++] = cpu_to_le32(0x00000000);
break;
case CHIP_HAWAII:
- buffer[count++] = 0x3a00161a;
- buffer[count++] = 0x0000002e;
+ buffer[count++] = cpu_to_le32(0x3a00161a);
+ buffer[count++] = cpu_to_le32(0x0000002e);
break;
default:
buffer[count++] = cpu_to_le32(0x00000000);
@@ -7493,6 +7514,20 @@ restart_ih:
/* reset addr and status */
WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
+ case 167: /* VCE */
+ DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
+ switch (src_data) {
+ case 0:
+ radeon_fence_process(rdev, TN_RING_TYPE_VCE1_INDEX);
+ break;
+ case 1:
+ radeon_fence_process(rdev, TN_RING_TYPE_VCE2_INDEX);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
case 176: /* GFX RB CP_INT */
case 177: /* GFX IB CP_INT */
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
@@ -7792,6 +7827,22 @@ static int cik_startup(struct radeon_device *rdev)
if (r)
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+ r = radeon_vce_resume(rdev);
+ if (!r) {
+ r = vce_v2_0_resume(rdev);
+ if (!r)
+ r = radeon_fence_driver_start_ring(rdev,
+ TN_RING_TYPE_VCE1_INDEX);
+ if (!r)
+ r = radeon_fence_driver_start_ring(rdev,
+ TN_RING_TYPE_VCE2_INDEX);
+ }
+ if (r) {
+ dev_err(rdev->dev, "VCE init error (%d).\n", r);
+ rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
+ rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
+ }
+
/* Enable IRQ */
if (!rdev->irq.installed) {
r = radeon_irq_kms_init(rdev);
@@ -7867,6 +7918,23 @@ static int cik_startup(struct radeon_device *rdev)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
+ r = -ENOENT;
+
+ ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+ if (ring->ring_size)
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
+ VCE_CMD_NO_OP);
+
+ ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+ if (ring->ring_size)
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
+ VCE_CMD_NO_OP);
+
+ if (!r)
+ r = vce_v1_0_init(rdev);
+ else if (r != -ENOENT)
+ DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -7938,6 +8006,7 @@ int cik_suspend(struct radeon_device *rdev)
cik_sdma_enable(rdev, false);
uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
+ radeon_vce_suspend(rdev);
cik_fini_pg(rdev);
cik_fini_cg(rdev);
cik_irq_suspend(rdev);
@@ -8070,6 +8139,17 @@ int cik_init(struct radeon_device *rdev)
r600_ring_init(rdev, ring, 4096);
}
+ r = radeon_vce_init(rdev);
+ if (!r) {
+ ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 4096);
+
+ ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 4096);
+ }
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -8131,6 +8211,7 @@ void cik_fini(struct radeon_device *rdev)
radeon_irq_kms_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ radeon_vce_fini(rdev);
cik_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
@@ -8869,6 +8950,41 @@ int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
return r;
}
+int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
+{
+ int r, i;
+ struct atom_clock_dividers dividers;
+ u32 tmp;
+
+ r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ ecclk, false, &dividers);
+ if (r)
+ return r;
+
+ for (i = 0; i < 100; i++) {
+ if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS)
+ break;
+ mdelay(10);
+ }
+ if (i == 100)
+ return -ETIMEDOUT;
+
+ tmp = RREG32_SMC(CG_ECLK_CNTL);
+ tmp &= ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK);
+ tmp |= dividers.post_divider;
+ WREG32_SMC(CG_ECLK_CNTL, tmp);
+
+ for (i = 0; i < 100; i++) {
+ if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS)
+ break;
+ mdelay(10);
+ }
+ if (i == 100)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 94626ea90fa5..89b4afa5041c 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -369,8 +369,6 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev)
ring->wptr = 0;
WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
- ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
-
/* enable DMA RB */
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
@@ -713,11 +711,9 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
mask = RADEON_RESET_DMA1;
if (!(reset_mask & mask)) {
- radeon_ring_lockup_update(ring);
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 98bae9d7b74d..213873270d5f 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -203,6 +203,12 @@
#define CTF_TEMP_MASK 0x0003fe00
#define CTF_TEMP_SHIFT 9
+#define CG_ECLK_CNTL 0xC05000AC
+# define ECLK_DIVIDER_MASK 0x7f
+# define ECLK_DIR_CNTL_EN (1 << 8)
+#define CG_ECLK_STATUS 0xC05000B0
+# define ECLK_STATUS (1 << 0)
+
#define CG_SPLL_FUNC_CNTL 0xC0500140
#define SPLL_RESET (1 << 0)
#define SPLL_PWRON (1 << 1)
@@ -2010,4 +2016,47 @@
/* UVD CTX indirect */
#define UVD_CGC_MEM_CTRL 0xC0
+/* VCE */
+
+#define VCE_VCPU_CACHE_OFFSET0 0x20024
+#define VCE_VCPU_CACHE_SIZE0 0x20028
+#define VCE_VCPU_CACHE_OFFSET1 0x2002c
+#define VCE_VCPU_CACHE_SIZE1 0x20030
+#define VCE_VCPU_CACHE_OFFSET2 0x20034
+#define VCE_VCPU_CACHE_SIZE2 0x20038
+#define VCE_RB_RPTR2 0x20178
+#define VCE_RB_WPTR2 0x2017c
+#define VCE_RB_RPTR 0x2018c
+#define VCE_RB_WPTR 0x20190
+#define VCE_CLOCK_GATING_A 0x202f8
+# define CGC_CLK_GATE_DLY_TIMER_MASK (0xf << 0)
+# define CGC_CLK_GATE_DLY_TIMER(x) ((x) << 0)
+# define CGC_CLK_GATER_OFF_DLY_TIMER_MASK (0xff << 4)
+# define CGC_CLK_GATER_OFF_DLY_TIMER(x) ((x) << 4)
+# define CGC_UENC_WAIT_AWAKE (1 << 18)
+#define VCE_CLOCK_GATING_B 0x202fc
+#define VCE_CGTT_CLK_OVERRIDE 0x207a0
+#define VCE_UENC_CLOCK_GATING 0x207bc
+# define CLOCK_ON_DELAY_MASK (0xf << 0)
+# define CLOCK_ON_DELAY(x) ((x) << 0)
+# define CLOCK_OFF_DELAY_MASK (0xff << 4)
+# define CLOCK_OFF_DELAY(x) ((x) << 4)
+#define VCE_UENC_REG_CLOCK_GATING 0x207c0
+#define VCE_SYS_INT_EN 0x21300
+# define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3)
+#define VCE_LMI_CTRL2 0x21474
+#define VCE_LMI_CTRL 0x21498
+#define VCE_LMI_VM_CTRL 0x214a0
+#define VCE_LMI_SWAP_CNTL 0x214b4
+#define VCE_LMI_SWAP_CNTL1 0x214b8
+#define VCE_LMI_CACHE_CTRL 0x214f4
+
+#define VCE_CMD_NO_OP 0x00000000
+#define VCE_CMD_END 0x00000001
+#define VCE_CMD_IB 0x00000002
+#define VCE_CMD_FENCE 0x00000003
+#define VCE_CMD_TRAP 0x00000004
+#define VCE_CMD_IB_AUTO 0x00000005
+#define VCE_CMD_SEMAPHORE 0x00000006
+
#endif
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index cf783fc0ef21..5a9a5f4d7888 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2036,6 +2036,10 @@ int cypress_dpm_init(struct radeon_device *rdev)
pi->min_vddc_in_table = 0;
pi->max_vddc_in_table = 0;
+ ret = r600_get_platform_caps(rdev);
+ if (ret)
+ return ret;
+
ret = rv7xx_parse_power_table(rdev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 27b0ff16082e..b406546440da 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2990,8 +2990,6 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
- ring->rptr = RREG32(CP_RB_RPTR);
-
evergreen_cp_start(rdev);
ring->ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
@@ -3952,11 +3950,9 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
if (!(reset_mask & (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_CP))) {
- radeon_ring_lockup_update(ring);
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c7cac07f139b..5c8b358f9fba 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1165,7 +1165,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case DB_DEPTH_CONTROL:
track->db_depth_control = radeon_get_ib_value(p, idx);
@@ -1196,12 +1196,12 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
ib[idx] &= ~Z_ARRAY_MODE(0xf);
track->db_z_info &= ~Z_ARRAY_MODE(0xf);
- ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
- track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+ track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+ if (reloc->tiling_flags & RADEON_TILING_MACRO) {
unsigned bankw, bankh, mtaspect, tile_split;
- evergreen_tiling_fields(reloc->lobj.tiling_flags,
+ evergreen_tiling_fields(reloc->tiling_flags,
&bankw, &bankh, &mtaspect,
&tile_split);
ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
@@ -1237,7 +1237,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
track->db_z_read_offset = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->db_z_read_bo = reloc->robj;
track->db_dirty = true;
break;
@@ -1249,7 +1249,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
track->db_z_write_offset = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->db_z_write_bo = reloc->robj;
track->db_dirty = true;
break;
@@ -1261,7 +1261,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
track->db_s_read_offset = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->db_s_read_bo = reloc->robj;
track->db_dirty = true;
break;
@@ -1273,7 +1273,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
track->db_s_write_offset = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->db_s_write_bo = reloc->robj;
track->db_dirty = true;
break;
@@ -1297,7 +1297,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->vgt_strmout_bo[tmp] = reloc->robj;
track->streamout_dirty = true;
break;
@@ -1317,7 +1317,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
case CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
track->cb_dirty = true;
@@ -1381,8 +1381,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
}
track->cb_dirty = true;
break;
@@ -1399,8 +1399,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
}
track->cb_dirty = true;
break;
@@ -1461,10 +1461,10 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ if (reloc->tiling_flags & RADEON_TILING_MACRO) {
unsigned bankw, bankh, mtaspect, tile_split;
- evergreen_tiling_fields(reloc->lobj.tiling_flags,
+ evergreen_tiling_fields(reloc->tiling_flags,
&bankw, &bankh, &mtaspect,
&tile_split);
ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
@@ -1489,10 +1489,10 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ if (reloc->tiling_flags & RADEON_TILING_MACRO) {
unsigned bankw, bankh, mtaspect, tile_split;
- evergreen_tiling_fields(reloc->lobj.tiling_flags,
+ evergreen_tiling_fields(reloc->tiling_flags,
&bankw, &bankh, &mtaspect,
&tile_split);
ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
@@ -1520,7 +1520,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->cb_color_fmask_bo[tmp] = reloc->robj;
break;
case CB_COLOR0_CMASK:
@@ -1537,7 +1537,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->cb_color_cmask_bo[tmp] = reloc->robj;
break;
case CB_COLOR0_FMASK_SLICE:
@@ -1578,7 +1578,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
tmp = (reg - CB_COLOR0_BASE) / 0x3c;
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->cb_color_bo[tmp] = reloc->robj;
track->cb_dirty = true;
break;
@@ -1594,7 +1594,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->cb_color_bo[tmp] = reloc->robj;
track->cb_dirty = true;
break;
@@ -1606,7 +1606,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
track->htile_offset = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->htile_bo = reloc->robj;
track->db_dirty = true;
break;
@@ -1723,7 +1723,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case SX_MEMORY_EXPORT_BASE:
if (p->rdev->family >= CHIP_CAYMAN) {
@@ -1737,7 +1737,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case CAYMAN_SX_SCATTER_EXPORT_BASE:
if (p->rdev->family < CHIP_CAYMAN) {
@@ -1751,7 +1751,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case SX_MISC:
track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
@@ -1836,7 +1836,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
(idx_value & 0xfffffff0) +
((u64)(tmp & 0xff) << 32);
@@ -1882,7 +1882,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
idx_value +
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
@@ -1909,7 +1909,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
idx_value +
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
@@ -1937,7 +1937,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
radeon_get_ib_value(p, idx+1) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -2027,7 +2027,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
- ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff);
r = evergreen_cs_track_check(p);
if (r) {
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
@@ -2049,7 +2049,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -2106,7 +2106,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
tmp = radeon_get_ib_value(p, idx) +
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
- offset = reloc->lobj.gpu_offset + tmp;
+ offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
@@ -2144,7 +2144,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
tmp = radeon_get_ib_value(p, idx+2) +
((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
- offset = reloc->lobj.gpu_offset + tmp;
+ offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
@@ -2174,7 +2174,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SURFACE_SYNC\n");
return -EINVAL;
}
- ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
}
break;
case PACKET3_EVENT_WRITE:
@@ -2190,7 +2190,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -2212,7 +2212,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -2234,7 +2234,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -2302,11 +2302,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
ib[idx+1+(i*8)+1] |=
- TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+ if (reloc->tiling_flags & RADEON_TILING_MACRO) {
unsigned bankw, bankh, mtaspect, tile_split;
- evergreen_tiling_fields(reloc->lobj.tiling_flags,
+ evergreen_tiling_fields(reloc->tiling_flags,
&bankw, &bankh, &mtaspect,
&tile_split);
ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
@@ -2318,7 +2318,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
}
texture = reloc->robj;
- toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ toffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
/* tex mip base */
tex_dim = ib[idx+1+(i*8)+0] & 0x7;
@@ -2337,7 +2337,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
- moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
mipmap = reloc->robj;
}
@@ -2364,7 +2364,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
}
- offset64 = reloc->lobj.gpu_offset + offset;
+ offset64 = reloc->gpu_offset + offset;
ib[idx+1+(i*8)+0] = offset64;
ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
(upper_32_bits(offset64) & 0xff);
@@ -2445,7 +2445,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->lobj.gpu_offset;
+ offset += reloc->gpu_offset;
ib[idx+1] = offset;
ib[idx+2] = upper_32_bits(offset) & 0xff;
}
@@ -2464,7 +2464,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->lobj.gpu_offset;
+ offset += reloc->gpu_offset;
ib[idx+3] = offset;
ib[idx+4] = upper_32_bits(offset) & 0xff;
}
@@ -2493,7 +2493,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->lobj.gpu_offset;
+ offset += reloc->gpu_offset;
ib[idx+0] = offset;
ib[idx+1] = upper_32_bits(offset) & 0xff;
break;
@@ -2518,7 +2518,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->lobj.gpu_offset;
+ offset += reloc->gpu_offset;
ib[idx+1] = offset;
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else {
@@ -2542,7 +2542,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->lobj.gpu_offset;
+ offset += reloc->gpu_offset;
ib[idx+3] = offset;
ib[idx+4] = upper_32_bits(offset) & 0xff;
} else {
@@ -2717,7 +2717,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
p->idx += count + 7;
break;
/* linear */
@@ -2725,8 +2725,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
p->idx += count + 3;
break;
default:
@@ -2768,10 +2768,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
p->idx += 5;
break;
/* Copy L2T/T2L */
@@ -2781,22 +2781,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* tiled src, linear dst */
src_offset = radeon_get_ib_value(p, idx+1);
src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
dst_offset = radeon_get_ib_value(p, idx + 7);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
} else {
/* linear src, tiled dst */
src_offset = radeon_get_ib_value(p, idx+7);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
@@ -2827,10 +2827,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset + count, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
+ ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xffffffff);
+ ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
p->idx += 5;
break;
/* Copy L2L, partial */
@@ -2840,10 +2840,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("L2L Partial is cayman only !\n");
return -EINVAL;
}
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+ ib[idx+4] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
+ ib[idx+5] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
p->idx += 9;
break;
@@ -2876,12 +2876,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(dst2_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+3] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+4] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+ ib[idx+5] += upper_32_bits(dst2_reloc->gpu_offset) & 0xff;
+ ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
p->idx += 7;
break;
/* Copy L2T Frame to Field */
@@ -2916,10 +2916,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
p->idx += 10;
break;
/* Copy L2T/T2L, partial */
@@ -2932,16 +2932,16 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* detile bit */
if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
/* tiled src, linear dst */
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
} else {
/* linear src, tiled dst */
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
}
p->idx += 12;
break;
@@ -2978,10 +2978,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
p->idx += 10;
break;
/* Copy L2T/T2L (tile units) */
@@ -2992,22 +2992,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* tiled src, linear dst */
src_offset = radeon_get_ib_value(p, idx+1);
src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
dst_offset = radeon_get_ib_value(p, idx+7);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
} else {
/* linear src, tiled dst */
src_offset = radeon_get_ib_value(p, idx+7);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
@@ -3028,8 +3028,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("L2T, T2L Partial is cayman only !\n");
return -EINVAL;
}
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
- ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
+ ib[idx+4] += (u32)(dst_reloc->gpu_offset >> 8);
p->idx += 13;
break;
/* Copy L2T broadcast (tile units) */
@@ -3065,10 +3065,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
p->idx += 10;
break;
default:
@@ -3089,8 +3089,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
p->idx += 4;
break;
case DMA_PACKET_NOP:
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index a37b54436382..287fe966d7de 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -174,11 +174,9 @@ bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
if (!(reset_mask & RADEON_RESET_DMA)) {
- radeon_ring_lockup_update(ring);
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 351db361239d..16ec9d56a234 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1338,13 +1338,11 @@ static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
}
-#if 0
static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
{
return kv_notify_message_to_smu(rdev, enable ?
PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
}
-#endif
static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
{
@@ -1389,7 +1387,6 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
return kv_enable_uvd_dpm(rdev, !gate);
}
-#if 0
static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
{
u8 i;
@@ -1414,6 +1411,9 @@ static int kv_update_vce_dpm(struct radeon_device *rdev,
int ret;
if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
+ kv_dpm_powergate_vce(rdev, false);
+ /* turn the clocks on when encoding */
+ cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
if (pi->caps_stable_p_state)
pi->vce_boot_level = table->count - 1;
else
@@ -1436,11 +1436,13 @@ static int kv_update_vce_dpm(struct radeon_device *rdev,
kv_enable_vce_dpm(rdev, true);
} else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
kv_enable_vce_dpm(rdev, false);
+ /* turn the clocks off when not encoding */
+ cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
+ kv_dpm_powergate_vce(rdev, true);
}
return 0;
}
-#endif
static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
{
@@ -1575,11 +1577,16 @@ static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
pi->vce_power_gated = gate;
if (gate) {
- if (pi->caps_vce_pg)
+ if (pi->caps_vce_pg) {
+ /* XXX do we need a vce_v1_0_stop() ? */
kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
+ }
} else {
- if (pi->caps_vce_pg)
+ if (pi->caps_vce_pg) {
kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
+ vce_v2_0_resume(rdev);
+ vce_v1_0_start(rdev);
+ }
}
}
@@ -1768,7 +1775,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
struct radeon_ps *new_ps = &pi->requested_rps;
- /*struct radeon_ps *old_ps = &pi->current_rps;*/
+ struct radeon_ps *old_ps = &pi->current_rps;
int ret;
if (pi->bapm_enable) {
@@ -1798,13 +1805,12 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
kv_set_enabled_levels(rdev);
kv_force_lowest_valid(rdev);
kv_unforce_levels(rdev);
-#if 0
+
ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
if (ret) {
DRM_ERROR("kv_update_vce_dpm failed\n");
return ret;
}
-#endif
kv_update_sclk_t(rdev);
}
} else {
@@ -1823,13 +1829,11 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
kv_program_nbps_index_settings(rdev, new_ps);
kv_freeze_sclk_dpm(rdev, false);
kv_set_enabled_levels(rdev);
-#if 0
ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
if (ret) {
DRM_ERROR("kv_update_vce_dpm failed\n");
return ret;
}
-#endif
kv_update_acp_boot_level(rdev);
kv_update_sclk_t(rdev);
kv_enable_nb_dpm(rdev);
@@ -2037,6 +2041,14 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_clock_and_voltage_limits *max_limits =
&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ if (new_rps->vce_active) {
+ new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
+ new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
+ } else {
+ new_rps->evclk = 0;
+ new_rps->ecclk = 0;
+ }
+
mclk = max_limits->mclk;
sclk = min_sclk;
@@ -2056,6 +2068,11 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
sclk = stable_p_state_sclk;
}
+ if (new_rps->vce_active) {
+ if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
+ sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
+ }
+
ps->need_dfs_bypass = true;
for (i = 0; i < ps->num_levels; i++) {
@@ -2092,7 +2109,8 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
- pi->video_start = new_rps->dclk || new_rps->vclk;
+ pi->video_start = new_rps->dclk || new_rps->vclk ||
+ new_rps->evclk || new_rps->ecclk;
if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
@@ -2538,9 +2556,6 @@ static int kv_parse_power_table(struct radeon_device *rdev)
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
- rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
@@ -2577,6 +2592,19 @@ static int kv_parse_power_table(struct radeon_device *rdev)
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
}
rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+ /* fill in the vce power states */
+ for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
+ u32 sclk;
+ clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+ sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+ rdev->pm.dpm.vce_states[i].sclk = sclk;
+ rdev->pm.dpm.vce_states[i].mclk = 0;
+ }
+
return 0;
}
@@ -2590,6 +2618,10 @@ int kv_dpm_init(struct radeon_device *rdev)
return -ENOMEM;
rdev->pm.dpm.priv = pi;
+ ret = r600_get_platform_caps(rdev);
+ if (ret)
+ return ret;
+
ret = r600_parse_extended_power_table(rdev);
if (ret)
return ret;
@@ -2623,7 +2655,7 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->caps_fps = false; /* true? */
pi->caps_uvd_pg = true;
pi->caps_uvd_dpm = true;
- pi->caps_vce_pg = false;
+ pi->caps_vce_pg = false; /* XXX true */
pi->caps_samu_pg = false;
pi->caps_acp_pg = false;
pi->caps_stable_p_state = false;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index bf6300cfd62d..d246e043421a 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1642,8 +1642,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
ring = &rdev->ring[ridx[i]];
WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
- ring->rptr = ring->wptr = 0;
- WREG32(cp_rb_rptr[i], ring->rptr);
+ ring->wptr = 0;
+ WREG32(cp_rb_rptr[i], 0);
WREG32(cp_rb_wptr[i], ring->wptr);
mdelay(1);
@@ -1917,11 +1917,9 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
if (!(reset_mask & (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_CP))) {
- radeon_ring_lockup_update(ring);
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 7cf96b15377f..6378e0276691 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -248,8 +248,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
ring->wptr = 0;
WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
- ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
-
WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
ring->ready = true;
@@ -302,11 +300,9 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
mask = RADEON_RESET_DMA1;
if (!(reset_mask & mask)) {
- radeon_ring_lockup_update(ring);
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index ca814276b075..004c931606c4 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -4025,9 +4025,6 @@ static int ni_parse_power_table(struct radeon_device *rdev)
power_info->pplib.ucNumStates, GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
- rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < power_info->pplib.ucNumStates; i++) {
power_state = (union pplib_power_state *)
@@ -4089,6 +4086,10 @@ int ni_dpm_init(struct radeon_device *rdev)
pi->min_vddc_in_table = 0;
pi->max_vddc_in_table = 0;
+ ret = r600_get_platform_caps(rdev);
+ if (ret)
+ return ret;
+
ret = ni_parse_power_table(rdev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 3cc78bb66042..b6c32640df20 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1193,7 +1193,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
- ring->rptr = RREG32(RADEON_CP_RB_RPTR);
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1275,12 +1274,12 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
value = radeon_get_ib_value(p, idx);
tmp = value & 0x003fffff;
- tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+ tmp += (((u32)reloc->gpu_offset) >> 10);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_DST_TILE_MACRO;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ if (reloc->tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
DRM_ERROR("Cannot src blit from microtiled surface\n");
radeon_cs_dump_packet(p, pkt);
@@ -1326,7 +1325,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
return r;
}
idx_value = radeon_get_ib_value(p, idx);
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].robj = reloc->robj;
@@ -1338,7 +1337,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt);
return r;
}
- ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
+ ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
track->arrays[i + 1].robj = reloc->robj;
track->arrays[i + 1].esize = idx_value >> 24;
track->arrays[i + 1].esize &= 0x7F;
@@ -1352,7 +1351,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
return r;
}
idx_value = radeon_get_ib_value(p, idx);
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].esize &= 0x7F;
@@ -1595,7 +1594,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -1608,7 +1607,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
track->cb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case RADEON_PP_TXOFFSET_0:
case RADEON_PP_TXOFFSET_1:
@@ -1622,16 +1621,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_TXO_MACRO_TILE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_TXO_MICRO_TILE_X2;
tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags;
- ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = tmp + ((u32)reloc->gpu_offset);
} else
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -1649,7 +1648,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r;
}
track->textures[0].cube_info[i].offset = idx_value;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[0].cube_info[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -1667,7 +1666,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r;
}
track->textures[1].cube_info[i].offset = idx_value;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[1].cube_info[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -1685,7 +1684,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r;
}
track->textures[2].cube_info[i].offset = idx_value;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[2].cube_info[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -1703,9 +1702,9 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
@@ -1773,7 +1772,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case RADEON_PP_CNTL:
{
@@ -1933,7 +1932,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt);
return r;
}
- ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
+ ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
if (r) {
return r;
@@ -1947,7 +1946,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
track->num_arrays = 1;
track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
@@ -2523,11 +2522,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- radeon_ring_lockup_update(ring);
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
@@ -3223,12 +3220,12 @@ void r100_bandwidth_update(struct radeon_device *rdev)
if (rdev->mode_info.crtcs[0]->base.enabled) {
mode1 = &rdev->mode_info.crtcs[0]->base.mode;
- pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
+ pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8;
}
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
if (rdev->mode_info.crtcs[1]->base.enabled) {
mode2 = &rdev->mode_info.crtcs[1]->base.mode;
- pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
+ pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8;
}
}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index b3807edb1936..58f0473aa73f 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -185,7 +185,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -198,7 +198,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
track->cb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case R200_PP_TXOFFSET_0:
case R200_PP_TXOFFSET_1:
@@ -215,16 +215,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R200_TXO_MACRO_TILE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R200_TXO_MICRO_TILE;
tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags;
- ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = tmp + ((u32)reloc->gpu_offset);
} else
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -268,7 +268,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r;
}
track->textures[i].cube_info[face - 1].offset = idx_value;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[i].cube_info[face - 1].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -287,9 +287,9 @@ int r200_packet0_check(struct radeon_cs_parser *p,
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
@@ -362,7 +362,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case RADEON_PP_CNTL:
{
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 0b658b34b33a..206caf9700b7 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -640,7 +640,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->cb[i].robj = reloc->robj;
track->cb[i].offset = idx_value;
track->cb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case R300_ZB_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -653,7 +653,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case R300_TX_OFFSET_0:
case R300_TX_OFFSET_0+4:
@@ -682,16 +682,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
- ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
+ ((idx_value & ~31) + (u32)reloc->gpu_offset);
} else {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_TXO_MACRO_TILE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_TXO_MICRO_TILE;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
- tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+ tmp = idx_value + ((u32)reloc->gpu_offset);
tmp |= tile_flags;
ib[idx] = tmp;
}
@@ -753,11 +753,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_COLOR_TILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
@@ -838,11 +838,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_DEPTHMACROTILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_DEPTHMICROTILE_TILED;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
tmp = idx_value & ~(0x7 << 16);
@@ -1052,7 +1052,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case 0x4e0c:
/* RB3D_COLOR_CHANNEL_MASK */
@@ -1097,7 +1097,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->aa.robj = reloc->robj;
track->aa.offset = idx_value;
track->aa_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break;
case R300_RB3D_AARESOLVE_PITCH:
track->aa.pitch = idx_value & 0x3FFE;
@@ -1162,7 +1162,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt);
return r;
}
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
if (r) {
return r;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 647ef4079217..6e887d004eba 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1748,11 +1748,9 @@ bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
if (!(reset_mask & (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_CP))) {
- radeon_ring_lockup_update(ring);
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
@@ -2604,8 +2602,6 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
- ring->rptr = RREG32(CP_RB_RPTR);
-
r600_cp_start(rdev);
ring->ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 2812c7d1ae6f..12511bb5fd6f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1022,7 +1022,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case SQ_CONFIG:
track->sq_config = radeon_get_ib_value(p, idx);
@@ -1043,7 +1043,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_depth_info = radeon_get_ib_value(p, idx);
ib[idx] &= C_028010_ARRAY_MODE;
track->db_depth_info &= C_028010_ARRAY_MODE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ if (reloc->tiling_flags & RADEON_TILING_MACRO) {
ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
} else {
@@ -1084,9 +1084,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->vgt_strmout_bo[tmp] = reloc->robj;
- track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
+ track->vgt_strmout_bo_mc[tmp] = reloc->gpu_offset;
track->streamout_dirty = true;
break;
case VGT_STRMOUT_BUFFER_SIZE_0:
@@ -1105,7 +1105,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case R_028238_CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
@@ -1142,10 +1142,10 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ if (reloc->tiling_flags & RADEON_TILING_MACRO) {
ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ } else if (reloc->tiling_flags & RADEON_TILING_MICRO) {
ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
}
@@ -1214,7 +1214,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
track->cb_color_frag_bo[tmp] = reloc->robj;
track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
}
if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
track->cb_dirty = true;
@@ -1245,7 +1245,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
track->cb_color_tile_bo[tmp] = reloc->robj;
track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
}
if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
track->cb_dirty = true;
@@ -1281,10 +1281,10 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
tmp = (reg - CB_COLOR0_BASE) / 4;
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
- track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
+ track->cb_color_bo_mc[tmp] = reloc->gpu_offset;
track->cb_dirty = true;
break;
case DB_DEPTH_BASE:
@@ -1295,9 +1295,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
track->db_offset = radeon_get_ib_value(p, idx) << 8;
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->db_bo = reloc->robj;
- track->db_bo_mc = reloc->lobj.gpu_offset;
+ track->db_bo_mc = reloc->gpu_offset;
track->db_dirty = true;
break;
case DB_HTILE_DATA_BASE:
@@ -1308,7 +1308,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
track->htile_offset = radeon_get_ib_value(p, idx) << 8;
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->htile_bo = reloc->robj;
track->db_dirty = true;
break;
@@ -1377,7 +1377,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case SX_MEMORY_EXPORT_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
@@ -1386,7 +1386,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case SX_MISC:
track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
@@ -1672,7 +1672,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
(idx_value & 0xfffffff0) +
((u64)(tmp & 0xff) << 32);
@@ -1713,7 +1713,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
idx_value +
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
@@ -1765,7 +1765,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -1805,7 +1805,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
tmp = radeon_get_ib_value(p, idx) +
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
- offset = reloc->lobj.gpu_offset + tmp;
+ offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
@@ -1835,7 +1835,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
tmp = radeon_get_ib_value(p, idx+2) +
((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
- offset = reloc->lobj.gpu_offset + tmp;
+ offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
@@ -1861,7 +1861,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SURFACE_SYNC\n");
return -EINVAL;
}
- ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
}
break;
case PACKET3_EVENT_WRITE:
@@ -1877,7 +1877,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -1899,7 +1899,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
+ offset = reloc->gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -1964,11 +1964,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
}
- base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->tiling_flags & RADEON_TILING_MACRO)
ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ else if (reloc->tiling_flags & RADEON_TILING_MICRO)
ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
}
texture = reloc->robj;
@@ -1978,13 +1978,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
}
- mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
mipmap = reloc->robj;
r = r600_check_texture_resource(p, idx+(i*7)+1,
texture, mipmap,
base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
- reloc->lobj.tiling_flags);
+ reloc->tiling_flags);
if (r)
return r;
ib[idx+1+(i*7)+2] += base_offset;
@@ -2008,7 +2008,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
}
- offset64 = reloc->lobj.gpu_offset + offset;
+ offset64 = reloc->gpu_offset + offset;
ib[idx+1+(i*8)+0] = offset64;
ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
(upper_32_bits(offset64) & 0xff);
@@ -2118,7 +2118,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
}
break;
case PACKET3_SURFACE_BASE_UPDATE:
@@ -2151,7 +2151,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->lobj.gpu_offset;
+ offset += reloc->gpu_offset;
ib[idx+1] = offset;
ib[idx+2] = upper_32_bits(offset) & 0xff;
}
@@ -2170,7 +2170,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->lobj.gpu_offset;
+ offset += reloc->gpu_offset;
ib[idx+3] = offset;
ib[idx+4] = upper_32_bits(offset) & 0xff;
}
@@ -2199,7 +2199,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->lobj.gpu_offset;
+ offset += reloc->gpu_offset;
ib[idx+0] = offset;
ib[idx+1] = upper_32_bits(offset) & 0xff;
break;
@@ -2224,7 +2224,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->lobj.gpu_offset;
+ offset += reloc->gpu_offset;
ib[idx+1] = offset;
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else {
@@ -2248,7 +2248,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->lobj.gpu_offset;
+ offset += reloc->gpu_offset;
ib[idx+3] = offset;
ib[idx+4] = upper_32_bits(offset) & 0xff;
} else {
@@ -2505,14 +2505,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
p->idx += count + 5;
} else {
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
p->idx += count + 3;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
@@ -2539,22 +2539,22 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
/* tiled src, linear dst */
src_offset = radeon_get_ib_value(p, idx+1);
src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
dst_offset = radeon_get_ib_value(p, idx+5);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
- ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+5] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
} else {
/* linear src, tiled dst */
src_offset = radeon_get_ib_value(p, idx+5);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
- ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+5] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
}
p->idx += 7;
} else {
@@ -2564,10 +2564,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
p->idx += 5;
} else {
src_offset = radeon_get_ib_value(p, idx+2);
@@ -2575,10 +2575,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+ ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) & 0xff) << 16;
p->idx += 4;
}
}
@@ -2610,8 +2610,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+ ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
p->idx += 4;
break;
case DMA_PACKET_NOP:
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index b2d4c91e6272..53fcb28f5578 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -176,8 +176,6 @@ int r600_dma_resume(struct radeon_device *rdev)
ring->wptr = 0;
WREG32(DMA_RB_WPTR, ring->wptr << 2);
- ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
-
WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
ring->ready = true;
@@ -221,11 +219,9 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
u32 reset_mask = r600_gpu_check_soft_reset(rdev);
if (!(reset_mask & RADEON_RESET_DMA)) {
- radeon_ring_lockup_update(ring);
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index e4cc9b314ce9..cbf7e3269f84 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -834,6 +834,26 @@ static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependen
return 0;
}
+int r600_get_platform_caps(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
+ rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
+ rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
+
+ return 0;
+}
+
/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
@@ -1043,7 +1063,15 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1 + array->ucNumEntries * sizeof(VCEClockInfo));
+ ATOM_PPLIB_VCE_State_Table *states =
+ (ATOM_PPLIB_VCE_State_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+ 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
+ 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
+ ATOM_PPLIB_VCE_State_Record *state_entry;
+ VCEClockInfo *vce_clk;
u32 size = limits->numEntries *
sizeof(struct radeon_vce_clock_voltage_dependency_entry);
rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
@@ -1055,8 +1083,9 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
+ state_entry = &states->entries[0];
for (i = 0; i < limits->numEntries; i++) {
- VCEClockInfo *vce_clk = (VCEClockInfo *)
+ vce_clk = (VCEClockInfo *)
((u8 *)&array->entries[0] +
(entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
@@ -1068,6 +1097,23 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
}
+ for (i = 0; i < states->numEntries; i++) {
+ if (i >= RADEON_MAX_VCE_LEVELS)
+ break;
+ vce_clk = (VCEClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+ rdev->pm.dpm.vce_states[i].evclk =
+ le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+ rdev->pm.dpm.vce_states[i].ecclk =
+ le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+ rdev->pm.dpm.vce_states[i].clk_idx =
+ state_entry->ucClockInfoIndex & 0x3f;
+ rdev->pm.dpm.vce_states[i].pstate =
+ (state_entry->ucClockInfoIndex & 0xc0) >> 6;
+ state_entry = (ATOM_PPLIB_VCE_State_Record *)
+ ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
+ }
}
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
ext_hdr->usUVDTableOffset) {
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 07eab2b04e81..46b9d2a03018 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -215,6 +215,8 @@ void r600_stop_dpm(struct radeon_device *rdev);
bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor);
+int r600_get_platform_caps(struct radeon_device *rdev);
+
int r600_parse_extended_power_table(struct radeon_device *rdev);
void r600_free_extended_power_table(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index e887d027b6d0..f21db7a0b34d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -113,19 +113,16 @@ extern int radeon_hard_reset;
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
-/* max number of rings */
-#define RADEON_NUM_RINGS 6
-
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
/* internal ring indices */
/* r1xx+ has gfx CP ring */
-#define RADEON_RING_TYPE_GFX_INDEX 0
+#define RADEON_RING_TYPE_GFX_INDEX 0
/* cayman has 2 compute CP rings */
-#define CAYMAN_RING_TYPE_CP1_INDEX 1
-#define CAYMAN_RING_TYPE_CP2_INDEX 2
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
/* R600+ has an async dma ring */
#define R600_RING_TYPE_DMA_INDEX 3
@@ -133,7 +130,17 @@ extern int radeon_hard_reset;
#define CAYMAN_RING_TYPE_DMA1_INDEX 4
/* R600+ */
-#define R600_RING_TYPE_UVD_INDEX 5
+#define R600_RING_TYPE_UVD_INDEX 5
+
+/* TN+ */
+#define TN_RING_TYPE_VCE1_INDEX 6
+#define TN_RING_TYPE_VCE2_INDEX 7
+
+/* max number of rings */
+#define RADEON_NUM_RINGS 8
+
+/* number of hw syncs before falling back on blocking */
+#define RADEON_NUM_SYNCS 4
/* number of hw syncs before falling back on blocking */
#define RADEON_NUM_SYNCS 4
@@ -356,9 +363,8 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_locked(struct radeon_fence *fence);
-int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
@@ -450,6 +456,7 @@ struct radeon_bo {
/* Protected by gem.mutex */
struct list_head list;
/* Protected by tbo.reserved */
+ u32 initial_domain;
u32 placements[3];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
@@ -472,16 +479,6 @@ struct radeon_bo {
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
-struct radeon_bo_list {
- struct ttm_validate_buffer tv;
- struct radeon_bo *bo;
- uint64_t gpu_offset;
- bool written;
- unsigned domain;
- unsigned alt_domain;
- u32 tiling_flags;
-};
-
int radeon_gem_debugfs_init(struct radeon_device *rdev);
/* sub-allocation manager, it has to be protected by another lock.
@@ -789,7 +786,6 @@ struct radeon_ib {
struct radeon_ring {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
- unsigned rptr;
unsigned rptr_offs;
unsigned rptr_save_reg;
u64 next_rptr_gpu_addr;
@@ -799,8 +795,8 @@ struct radeon_ring {
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
- unsigned long last_activity;
- unsigned last_rptr;
+ atomic_t last_rptr;
+ atomic64_t last_activity;
uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask;
@@ -852,17 +848,22 @@ struct radeon_mec {
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
+struct radeon_vm_pt {
+ struct radeon_bo *bo;
+ uint64_t addr;
+};
+
struct radeon_vm {
- struct list_head list;
struct list_head va;
unsigned id;
/* contains the page directory */
- struct radeon_sa_bo *page_directory;
+ struct radeon_bo *page_directory;
uint64_t pd_gpu_addr;
+ unsigned max_pde_used;
/* array of page tables, one for each page directory entry */
- struct radeon_sa_bo **page_tables;
+ struct radeon_vm_pt *page_tables;
struct mutex mutex;
/* last fence for cs using this vm */
@@ -874,10 +875,7 @@ struct radeon_vm {
};
struct radeon_vm_manager {
- struct mutex lock;
- struct list_head lru_vm;
struct radeon_fence *active[RADEON_NUM_VM];
- struct radeon_sa_manager sa_manager;
uint32_t max_pfn;
/* number of VMIDs */
unsigned nvm;
@@ -953,8 +951,8 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *c
void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
-void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
-void radeon_ring_lockup_update(struct radeon_ring *ring);
+void radeon_ring_lockup_update(struct radeon_device *rdev,
+ struct radeon_ring *ring);
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
uint32_t **data);
@@ -980,9 +978,12 @@ void cayman_dma_fini(struct radeon_device *rdev);
struct radeon_cs_reloc {
struct drm_gem_object *gobj;
struct radeon_bo *robj;
- struct radeon_bo_list lobj;
+ struct ttm_validate_buffer tv;
+ uint64_t gpu_offset;
+ unsigned domain;
+ unsigned alt_domain;
+ uint32_t tiling_flags;
uint32_t handle;
- uint32_t flags;
};
struct radeon_cs_chunk {
@@ -1006,6 +1007,7 @@ struct radeon_cs_parser {
unsigned nrelocs;
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
+ struct radeon_cs_reloc *vm_bos;
struct list_head validated;
unsigned dma_reloc_idx;
/* indices of various chunks */
@@ -1255,6 +1257,17 @@ enum radeon_dpm_event_src {
RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
};
+#define RADEON_MAX_VCE_LEVELS 6
+
+enum radeon_vce_level {
+ RADEON_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
+ RADEON_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
+ RADEON_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
+ RADEON_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+ RADEON_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
+ RADEON_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
struct radeon_ps {
u32 caps; /* vbios flags */
u32 class; /* vbios flags */
@@ -1265,6 +1278,8 @@ struct radeon_ps {
/* VCE clocks */
u32 evclk;
u32 ecclk;
+ bool vce_active;
+ enum radeon_vce_level vce_level;
/* asic priv */
void *ps_priv;
};
@@ -1439,6 +1454,17 @@ enum radeon_dpm_forced_level {
RADEON_DPM_FORCED_LEVEL_HIGH = 2,
};
+struct radeon_vce_state {
+ /* vce clocks */
+ u32 evclk;
+ u32 ecclk;
+ /* gpu clocks */
+ u32 sclk;
+ u32 mclk;
+ u8 clk_idx;
+ u8 pstate;
+};
+
struct radeon_dpm {
struct radeon_ps *ps;
/* number of valid power states */
@@ -1451,6 +1477,9 @@ struct radeon_dpm {
struct radeon_ps *boot_ps;
/* default uvd power state */
struct radeon_ps *uvd_ps;
+ /* vce requirements */
+ struct radeon_vce_state vce_states[RADEON_MAX_VCE_LEVELS];
+ enum radeon_vce_level vce_level;
enum radeon_pm_state_type state;
enum radeon_pm_state_type user_state;
u32 platform_caps;
@@ -1476,6 +1505,7 @@ struct radeon_dpm {
/* special states active */
bool thermal_active;
bool uvd_active;
+ bool vce_active;
/* thermal handling */
struct radeon_dpm_thermal thermal;
/* forced levels */
@@ -1486,6 +1516,7 @@ struct radeon_dpm {
};
void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable);
+void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable);
struct radeon_pm {
struct mutex mutex;
@@ -1591,6 +1622,45 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
unsigned cg_upll_func_cntl);
+/*
+ * VCE
+ */
+#define RADEON_MAX_VCE_HANDLES 16
+#define RADEON_VCE_STACK_SIZE (1024*1024)
+#define RADEON_VCE_HEAP_SIZE (4*1024*1024)
+
+struct radeon_vce {
+ struct radeon_bo *vcpu_bo;
+ uint64_t gpu_addr;
+ unsigned fw_version;
+ unsigned fb_version;
+ atomic_t handles[RADEON_MAX_VCE_HANDLES];
+ struct drm_file *filp[RADEON_MAX_VCE_HANDLES];
+ struct delayed_work idle_work;
+};
+
+int radeon_vce_init(struct radeon_device *rdev);
+void radeon_vce_fini(struct radeon_device *rdev);
+int radeon_vce_suspend(struct radeon_device *rdev);
+int radeon_vce_resume(struct radeon_device *rdev);
+int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence);
+int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence);
+void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
+void radeon_vce_note_usage(struct radeon_device *rdev);
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi);
+int radeon_vce_cs_parse(struct radeon_cs_parser *p);
+bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+void radeon_vce_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+
struct r600_audio_pin {
int channels;
int rate;
@@ -1780,6 +1850,7 @@ struct radeon_asic {
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
+ int (*set_vce_clocks)(struct radeon_device *rdev, u32 evclk, u32 ecclk);
int (*get_temperature)(struct radeon_device *rdev);
} pm;
/* dynamic power management */
@@ -2041,6 +2112,8 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -2186,6 +2259,7 @@ struct radeon_device {
struct radeon_gem gem;
struct radeon_pm pm;
struct radeon_uvd uvd;
+ struct radeon_vce vce;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
@@ -2205,6 +2279,7 @@ struct radeon_device {
const struct firmware *sdma_fw; /* CIK SDMA firmware */
const struct firmware *smc_fw; /* SMC firmware */
const struct firmware *uvd_fw; /* UVD firmware */
+ const struct firmware *vce_fw; /* VCE firmware */
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
@@ -2229,6 +2304,10 @@ struct radeon_device {
/* virtual memory */
struct radeon_vm_manager vm_manager;
struct mutex gpu_clock_mutex;
+ /* memory stats */
+ atomic64_t vram_usage;
+ atomic64_t gtt_usage;
+ atomic64_t num_bytes_moved;
/* ACPI interface */
struct radeon_atif atif;
struct radeon_atcs atcs;
@@ -2639,6 +2718,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
+#define radeon_set_vce_clocks(rdev, ev, ec) (rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec))
#define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
@@ -2715,16 +2795,22 @@ extern void radeon_program_register_sequence(struct radeon_device *rdev,
*/
int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
-void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
-void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
+struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct list_head *head);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring);
+void radeon_vm_flush(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ int ring);
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
+int radeon_vm_update_page_directory(struct radeon_device *rdev,
+ struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index dda02bfc10a4..b8a24a75d4ff 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1987,6 +1987,19 @@ static struct radeon_asic_ring ci_dma_ring = {
.set_wptr = &cik_sdma_set_wptr,
};
+static struct radeon_asic_ring ci_vce_ring = {
+ .ib_execute = &radeon_vce_ib_execute,
+ .emit_fence = &radeon_vce_fence_emit,
+ .emit_semaphore = &radeon_vce_semaphore_emit,
+ .cs_parse = &radeon_vce_cs_parse,
+ .ring_test = &radeon_vce_ring_test,
+ .ib_test = &radeon_vce_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ .get_rptr = &vce_v1_0_get_rptr,
+ .get_wptr = &vce_v1_0_get_wptr,
+ .set_wptr = &vce_v1_0_set_wptr,
+};
+
static struct radeon_asic ci_asic = {
.init = &cik_init,
.fini = &cik_fini,
@@ -2015,6 +2028,8 @@ static struct radeon_asic ci_asic = {
[R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
[CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
+ [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring,
+ [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring,
},
.irq = {
.set = &cik_irq_set,
@@ -2061,6 +2076,7 @@ static struct radeon_asic ci_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &cik_set_uvd_clocks,
+ .set_vce_clocks = &cik_set_vce_clocks,
.get_temperature = &ci_get_temp,
},
.dpm = {
@@ -2117,6 +2133,8 @@ static struct radeon_asic kv_asic = {
[R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
[CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
+ [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring,
+ [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring,
},
.irq = {
.set = &cik_irq_set,
@@ -2163,6 +2181,7 @@ static struct radeon_asic kv_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &cik_set_uvd_clocks,
+ .set_vce_clocks = &cik_set_vce_clocks,
.get_temperature = &kv_get_temp,
},
.dpm = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index ae637cfda783..3d55a3a39e82 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -717,6 +717,7 @@ u32 cik_get_xclk(struct radeon_device *rdev);
uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk);
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
@@ -863,4 +864,17 @@ bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
/* uvd v4.2 */
int uvd_v4_2_resume(struct radeon_device *rdev);
+/* vce v1.0 */
+uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void vce_v1_0_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+int vce_v1_0_init(struct radeon_device *rdev);
+int vce_v1_0_start(struct radeon_device *rdev);
+
+/* vce v2.0 */
+int vce_v2_0_resume(struct radeon_device *rdev);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 82d4f865546e..c566b486ca08 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -89,7 +89,7 @@ static void radeon_property_change_mode(struct drm_encoder *encoder)
if (crtc && crtc->enabled) {
drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ crtc->x, crtc->y, crtc->primary->fb);
}
}
@@ -1595,6 +1595,7 @@ radeon_add_atom_connector(struct drm_device *dev,
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
bool is_dp_bridge = false;
+ bool has_aux = false;
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
@@ -1672,7 +1673,9 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
else
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
- if (!radeon_dig_connector->dp_i2c_bus)
+ if (radeon_dig_connector->dp_i2c_bus)
+ has_aux = true;
+ else
DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
@@ -1895,7 +1898,9 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_dig_connector->dp_i2c_bus)
DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
- if (!radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus)
+ has_aux = true;
+ else
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
@@ -1939,7 +1944,9 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
/* add DP i2c bus */
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
- if (!radeon_dig_connector->dp_i2c_bus)
+ if (radeon_dig_connector->dp_i2c_bus)
+ has_aux = true;
+ else
DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
@@ -2000,6 +2007,10 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
+
+ if (has_aux)
+ radeon_dp_aux_init(radeon_connector);
+
return;
failed:
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index dfb5a1db87d4..2b6e0ebcc13a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -24,16 +24,59 @@
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
+#include <linux/list_sort.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_trace.h"
+#define RADEON_CS_MAX_PRIORITY 32u
+#define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
+
+/* This is based on the bucket sort with O(n) time complexity.
+ * An item with priority "i" is added to bucket[i]. The lists are then
+ * concatenated in descending order.
+ */
+struct radeon_cs_buckets {
+ struct list_head bucket[RADEON_CS_NUM_BUCKETS];
+};
+
+static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
+{
+ unsigned i;
+
+ for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
+ INIT_LIST_HEAD(&b->bucket[i]);
+}
+
+static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
+ struct list_head *item, unsigned priority)
+{
+ /* Since buffers which appear sooner in the relocation list are
+ * likely to be used more often than buffers which appear later
+ * in the list, the sort mustn't change the ordering of buffers
+ * with the same priority, i.e. it must be stable.
+ */
+ list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
+}
+
+static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
+ struct list_head *out_list)
+{
+ unsigned i;
+
+ /* Connect the sorted buckets in the output list. */
+ for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
+ list_splice(&b->bucket[i], out_list);
+ }
+}
+
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
struct drm_device *ddev = p->rdev->ddev;
struct radeon_cs_chunk *chunk;
+ struct radeon_cs_buckets buckets;
unsigned i, j;
bool duplicate;
@@ -52,8 +95,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
if (p->relocs == NULL) {
return -ENOMEM;
}
+
+ radeon_cs_buckets_init(&buckets);
+
for (i = 0; i < p->nrelocs; i++) {
struct drm_radeon_cs_reloc *r;
+ unsigned priority;
duplicate = false;
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
@@ -78,8 +125,14 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
- p->relocs[i].lobj.bo = p->relocs[i].robj;
- p->relocs[i].lobj.written = !!r->write_domain;
+
+ /* The userspace buffer priorities are from 0 to 15. A higher
+ * number means the buffer is more important.
+ * Also, the buffers used for write have a higher priority than
+ * the buffers used for read only, which doubles the range
+ * to 0 to 31. 32 is reserved for the kernel driver.
+ */
+ priority = (r->flags & 0xf) * 2 + !!r->write_domain;
/* the first reloc of an UVD job is the msg and that must be in
VRAM, also but everything into VRAM on AGP cards to avoid
@@ -87,29 +140,38 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
(i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
/* TODO: is this still needed for NI+ ? */
- p->relocs[i].lobj.domain =
+ p->relocs[i].domain =
RADEON_GEM_DOMAIN_VRAM;
- p->relocs[i].lobj.alt_domain =
+ p->relocs[i].alt_domain =
RADEON_GEM_DOMAIN_VRAM;
+ /* prioritize this over any other relocation */
+ priority = RADEON_CS_MAX_PRIORITY;
} else {
uint32_t domain = r->write_domain ?
r->write_domain : r->read_domains;
- p->relocs[i].lobj.domain = domain;
+ p->relocs[i].domain = domain;
if (domain == RADEON_GEM_DOMAIN_VRAM)
domain |= RADEON_GEM_DOMAIN_GTT;
- p->relocs[i].lobj.alt_domain = domain;
+ p->relocs[i].alt_domain = domain;
}
- p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
+ p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].handle = r->handle;
- radeon_bo_list_add_object(&p->relocs[i].lobj,
- &p->validated);
+ radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
+ priority);
}
- return radeon_bo_list_validate(&p->ticket, &p->validated, p->ring);
+
+ radeon_cs_buckets_get_list(&buckets, &p->validated);
+
+ if (p->cs_flags & RADEON_CS_USE_VM)
+ p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
+ &p->validated);
+
+ return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
}
static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -147,6 +209,10 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
case RADEON_CS_RING_UVD:
p->ring = R600_RING_TYPE_UVD_INDEX;
break;
+ case RADEON_CS_RING_VCE:
+ /* TODO: only use the low priority ring for now */
+ p->ring = TN_RING_TYPE_VCE1_INDEX;
+ break;
}
return 0;
}
@@ -286,6 +352,16 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return 0;
}
+static int cmp_size_smaller_first(void *priv, struct list_head *a,
+ struct list_head *b)
+{
+ struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
+ struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
+
+ /* Sort A before B if A is smaller. */
+ return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
+}
+
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
@@ -299,6 +375,18 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
unsigned i;
if (!error) {
+ /* Sort the buffer list from the smallest to largest buffer,
+ * which affects the order of buffers in the LRU list.
+ * This assures that the smallest buffers are added first
+ * to the LRU list, so they are likely to be later evicted
+ * first, instead of large buffers whose eviction is more
+ * expensive.
+ *
+ * This slightly lowers the number of bytes moved by TTM
+ * per frame under memory pressure.
+ */
+ list_sort(NULL, &parser->validated, cmp_size_smaller_first);
+
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
parser->ib.fence);
@@ -316,6 +404,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
kfree(parser->track);
kfree(parser->relocs);
kfree(parser->relocs_ptr);
+ kfree(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
@@ -343,6 +432,9 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
if (parser->ring == R600_RING_TYPE_UVD_INDEX)
radeon_uvd_note_usage(rdev);
+ else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
+ (parser->ring == TN_RING_TYPE_VCE2_INDEX))
+ radeon_vce_note_usage(rdev);
radeon_cs_sync_rings(parser);
r = radeon_ib_schedule(rdev, &parser->ib, NULL);
@@ -352,24 +444,32 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
return r;
}
-static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
struct radeon_vm *vm)
{
- struct radeon_device *rdev = parser->rdev;
- struct radeon_bo_list *lobj;
- struct radeon_bo *bo;
- int r;
+ struct radeon_device *rdev = p->rdev;
+ int i, r;
- r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
- if (r) {
+ r = radeon_vm_update_page_directory(rdev, vm);
+ if (r)
return r;
- }
- list_for_each_entry(lobj, &parser->validated, tv.head) {
- bo = lobj->bo;
- r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem);
- if (r) {
+
+ r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+ &rdev->ring_tmp_bo.bo->tbo.mem);
+ if (r)
+ return r;
+
+ for (i = 0; i < p->nrelocs; i++) {
+ struct radeon_bo *bo;
+
+ /* ignore duplicates */
+ if (p->relocs_ptr[i] != &p->relocs[i])
+ continue;
+
+ bo = p->relocs[i].robj;
+ r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+ if (r)
return r;
- }
}
return 0;
}
@@ -401,20 +501,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if (parser->ring == R600_RING_TYPE_UVD_INDEX)
radeon_uvd_note_usage(rdev);
- mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
- r = radeon_vm_alloc_pt(rdev, vm);
- if (r) {
- goto out;
- }
r = radeon_bo_vm_update_pte(parser, vm);
if (r) {
goto out;
}
radeon_cs_sync_rings(parser);
radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
- radeon_semaphore_sync_to(parser->ib.semaphore,
- radeon_vm_grab_id(rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
@@ -423,14 +516,8 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
r = radeon_ib_schedule(rdev, &parser->ib, NULL);
}
- if (!r) {
- radeon_vm_fence(rdev, vm, parser->ib.fence);
- }
-
out:
- radeon_vm_add_to_lru(rdev, vm);
mutex_unlock(&vm->mutex);
- mutex_unlock(&rdev->vm_manager.lock);
return r;
}
@@ -698,9 +785,9 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
/* FIXME: we assume reloc size is 4 dwords */
if (nomm) {
*cs_reloc = p->relocs;
- (*cs_reloc)->lobj.gpu_offset =
+ (*cs_reloc)->gpu_offset =
(u64)relocs_chunk->kdata[idx + 3] << 32;
- (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
+ (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
} else
*cs_reloc = p->relocs_ptr[(idx / 4)];
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 044bc98fb459..835516d2d257 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1191,14 +1191,12 @@ int radeon_device_init(struct radeon_device *rdev,
r = radeon_gem_init(rdev);
if (r)
return r;
- /* initialize vm here */
- mutex_init(&rdev->vm_manager.lock);
+
/* Adjust VM size here.
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
rdev->vm_manager.max_pfn = 1 << 20;
- INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
/* Set asic functions */
r = radeon_asic_init(rdev);
@@ -1426,7 +1424,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
+ struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
struct radeon_bo *robj;
if (rfb == NULL || rfb->obj == NULL) {
@@ -1445,10 +1443,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
- mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
- r = radeon_fence_wait_empty_locked(rdev, i);
+ r = radeon_fence_wait_empty(rdev, i);
if (r) {
/* delay GPU reset to resume */
force_completion = true;
@@ -1457,7 +1454,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
if (force_completion) {
radeon_fence_driver_force_completion(rdev);
}
- mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev);
@@ -1555,10 +1551,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
/* reset hpd state */
radeon_hpd_init(rdev);
/* blat the mode back in */
- drm_helper_resume_force_mode(dev);
- /* turn on display hw */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ if (fbcon) {
+ drm_helper_resume_force_mode(dev);
+ /* turn on display hw */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
}
drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index fbd8b930f2be..386cfa4c194d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -34,6 +34,8 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <linux/gcd.h>
+
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -369,7 +371,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->rdev = rdev;
work->crtc_id = radeon_crtc->crtc_id;
- old_radeon_fb = to_radeon_framebuffer(crtc->fb);
+ old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
new_radeon_fb = to_radeon_framebuffer(fb);
/* schedule unpin of the old buffer */
obj = old_radeon_fb->obj;
@@ -460,7 +462,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
/* update crtc fb */
- crtc->fb = fb;
+ crtc->primary->fb = fb;
r = drm_vblank_get(dev, radeon_crtc->crtc_id);
if (r) {
@@ -792,6 +794,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if (radeon_connector->edid) {
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
+ drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
return ret;
}
drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
@@ -799,66 +802,57 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
}
/* avivo */
-static void avivo_get_fb_div(struct radeon_pll *pll,
- u32 target_clock,
- u32 post_div,
- u32 ref_div,
- u32 *fb_div,
- u32 *frac_fb_div)
-{
- u32 tmp = post_div * ref_div;
- tmp *= target_clock;
- *fb_div = tmp / pll->reference_freq;
- *frac_fb_div = tmp % pll->reference_freq;
-
- if (*fb_div > pll->max_feedback_div)
- *fb_div = pll->max_feedback_div;
- else if (*fb_div < pll->min_feedback_div)
- *fb_div = pll->min_feedback_div;
-}
-
-static u32 avivo_get_post_div(struct radeon_pll *pll,
- u32 target_clock)
+/**
+ * avivo_reduce_ratio - fractional number reduction
+ *
+ * @nom: nominator
+ * @den: denominator
+ * @nom_min: minimum value for nominator
+ * @den_min: minimum value for denominator
+ *
+ * Find the greatest common divisor and apply it on both nominator and
+ * denominator, but make nominator and denominator are at least as large
+ * as their minimum values.
+ */
+static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
+ unsigned nom_min, unsigned den_min)
{
- u32 vco, post_div, tmp;
-
- if (pll->flags & RADEON_PLL_USE_POST_DIV)
- return pll->post_div;
-
- if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
- if (pll->flags & RADEON_PLL_IS_LCD)
- vco = pll->lcd_pll_out_min;
- else
- vco = pll->pll_out_min;
- } else {
- if (pll->flags & RADEON_PLL_IS_LCD)
- vco = pll->lcd_pll_out_max;
- else
- vco = pll->pll_out_max;
+ unsigned tmp;
+
+ /* reduce the numbers to a simpler ratio */
+ tmp = gcd(*nom, *den);
+ *nom /= tmp;
+ *den /= tmp;
+
+ /* make sure nominator is large enough */
+ if (*nom < nom_min) {
+ tmp = (nom_min + *nom - 1) / *nom;
+ *nom *= tmp;
+ *den *= tmp;
}
- post_div = vco / target_clock;
- tmp = vco % target_clock;
-
- if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
- if (tmp)
- post_div++;
- } else {
- if (!tmp)
- post_div--;
+ /* make sure the denominator is large enough */
+ if (*den < den_min) {
+ tmp = (den_min + *den - 1) / *den;
+ *nom *= tmp;
+ *den *= tmp;
}
-
- if (post_div > pll->max_post_div)
- post_div = pll->max_post_div;
- else if (post_div < pll->min_post_div)
- post_div = pll->min_post_div;
-
- return post_div;
}
-#define MAX_TOLERANCE 10
-
+/**
+ * radeon_compute_pll_avivo - compute PLL paramaters
+ *
+ * @pll: information about the PLL
+ * @dot_clock_p: resulting pixel clock
+ * fb_div_p: resulting feedback divider
+ * frac_fb_div_p: fractional part of the feedback divider
+ * ref_div_p: resulting reference divider
+ * post_div_p: resulting reference divider
+ *
+ * Try to calculate the PLL parameters to generate the given frequency:
+ * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
+ */
void radeon_compute_pll_avivo(struct radeon_pll *pll,
u32 freq,
u32 *dot_clock_p,
@@ -867,53 +861,123 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
u32 *ref_div_p,
u32 *post_div_p)
{
- u32 target_clock = freq / 10;
- u32 post_div = avivo_get_post_div(pll, target_clock);
- u32 ref_div = pll->min_ref_div;
- u32 fb_div = 0, frac_fb_div = 0, tmp;
+ unsigned fb_div_min, fb_div_max, fb_div;
+ unsigned post_div_min, post_div_max, post_div;
+ unsigned ref_div_min, ref_div_max, ref_div;
+ unsigned post_div_best, diff_best;
+ unsigned nom, den, tmp;
- if (pll->flags & RADEON_PLL_USE_REF_DIV)
- ref_div = pll->reference_div;
+ /* determine allowed feedback divider range */
+ fb_div_min = pll->min_feedback_div;
+ fb_div_max = pll->max_feedback_div;
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
- avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
- frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
- if (frac_fb_div >= 5) {
- frac_fb_div -= 5;
- frac_fb_div = frac_fb_div / 10;
- frac_fb_div++;
+ fb_div_min *= 10;
+ fb_div_max *= 10;
+ }
+
+ /* determine allowed ref divider range */
+ if (pll->flags & RADEON_PLL_USE_REF_DIV)
+ ref_div_min = pll->reference_div;
+ else
+ ref_div_min = pll->min_ref_div;
+ ref_div_max = pll->max_ref_div;
+
+ /* determine allowed post divider range */
+ if (pll->flags & RADEON_PLL_USE_POST_DIV) {
+ post_div_min = pll->post_div;
+ post_div_max = pll->post_div;
+ } else {
+ unsigned target_clock = freq / 10;
+ unsigned vco_min, vco_max;
+
+ if (pll->flags & RADEON_PLL_IS_LCD) {
+ vco_min = pll->lcd_pll_out_min;
+ vco_max = pll->lcd_pll_out_max;
+ } else {
+ vco_min = pll->pll_out_min;
+ vco_max = pll->pll_out_max;
}
- if (frac_fb_div >= 10) {
- fb_div++;
- frac_fb_div = 0;
+
+ post_div_min = vco_min / target_clock;
+ if ((target_clock * post_div_min) < vco_min)
+ ++post_div_min;
+ if (post_div_min < pll->min_post_div)
+ post_div_min = pll->min_post_div;
+
+ post_div_max = vco_max / target_clock;
+ if ((target_clock * post_div_max) > vco_max)
+ --post_div_max;
+ if (post_div_max > pll->max_post_div)
+ post_div_max = pll->max_post_div;
+ }
+
+ /* represent the searched ratio as fractional number */
+ nom = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ? freq : freq / 10;
+ den = pll->reference_freq;
+
+ /* reduce the numbers to a simpler ratio */
+ avivo_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
+
+ /* now search for a post divider */
+ if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
+ post_div_best = post_div_min;
+ else
+ post_div_best = post_div_max;
+ diff_best = ~0;
+
+ for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
+ unsigned diff = abs(den - den / post_div * post_div);
+ if (diff < diff_best || (diff == diff_best &&
+ !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) {
+
+ post_div_best = post_div;
+ diff_best = diff;
}
+ }
+ post_div = post_div_best;
+
+ /* get matching reference and feedback divider */
+ ref_div = max(den / post_div, 1u);
+ fb_div = nom;
+
+ /* we're almost done, but reference and feedback
+ divider might be to large now */
+
+ tmp = ref_div;
+
+ if (fb_div > fb_div_max) {
+ ref_div = ref_div * fb_div_max / fb_div;
+ fb_div = fb_div_max;
+ }
+
+ if (ref_div > ref_div_max) {
+ ref_div = ref_div_max;
+ fb_div = nom * ref_div_max / tmp;
+ }
+
+ /* reduce the numbers to a simpler ratio once more */
+ /* this also makes sure that the reference divider is large enough */
+ avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
+
+ /* and finally save the result */
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ *fb_div_p = fb_div / 10;
+ *frac_fb_div_p = fb_div % 10;
} else {
- while (ref_div <= pll->max_ref_div) {
- avivo_get_fb_div(pll, target_clock, post_div, ref_div,
- &fb_div, &frac_fb_div);
- if (frac_fb_div >= (pll->reference_freq / 2))
- fb_div++;
- frac_fb_div = 0;
- tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
- tmp = (tmp * 10000) / target_clock;
-
- if (tmp > (10000 + MAX_TOLERANCE))
- ref_div++;
- else if (tmp >= (10000 - MAX_TOLERANCE))
- break;
- else
- ref_div++;
- }
+ *fb_div_p = fb_div;
+ *frac_fb_div_p = 0;
}
- *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
- (ref_div * post_div * 10);
- *fb_div_p = fb_div;
- *frac_fb_div_p = frac_fb_div;
+ *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
+ (pll->reference_freq * *frac_fb_div_p)) /
+ (ref_div * post_div * 10);
*ref_div_p = ref_div;
*post_div_p = post_div;
- DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
- *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
+
+ DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+ freq, *dot_clock_p, *fb_div_p, *frac_fb_div_p,
+ ref_div, post_div);
}
/* pre-avivo */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index f633c2782170..d0eba48dd74e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -79,9 +79,11 @@
* 2.35.0 - Add CIK macrotile mode array query
* 2.36.0 - Fix CIK DCE tiling setup
* 2.37.0 - allow GS ring setup on r6xx/r7xx
+ * 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
+ * CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 37
+#define KMS_DRIVER_MINOR 38
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index c37cb79a9489..a77b1c13ea43 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -288,7 +288,6 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
* @rdev: radeon device pointer
* @target_seq: sequence number(s) we want to wait for
* @intr: use interruptable sleep
- * @lock_ring: whether the ring should be locked or not
*
* Wait for the requested sequence number(s) to be written by any ring
* (all asics). Sequnce number array is indexed by ring id.
@@ -299,7 +298,7 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
* -EDEADLK is returned when a GPU lockup has been detected.
*/
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
- bool intr, bool lock_ring)
+ bool intr)
{
uint64_t last_seq[RADEON_NUM_RINGS];
bool signaled;
@@ -358,9 +357,6 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
if (i != RADEON_NUM_RINGS)
continue;
- if (lock_ring)
- mutex_lock(&rdev->ring_lock);
-
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i])
continue;
@@ -378,14 +374,9 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
/* remember that we need an reset */
rdev->needs_reset = true;
- if (lock_ring)
- mutex_unlock(&rdev->ring_lock);
wake_up_all(&rdev->fence_queue);
return -EDEADLK;
}
-
- if (lock_ring)
- mutex_unlock(&rdev->ring_lock);
}
}
return 0;
@@ -416,7 +407,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
return 0;
- r = radeon_fence_wait_seq(fence->rdev, seq, intr, true);
+ r = radeon_fence_wait_seq(fence->rdev, seq, intr);
if (r)
return r;
@@ -464,7 +455,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
if (num_rings == 0)
return -ENOENT;
- r = radeon_fence_wait_seq(rdev, seq, intr, true);
+ r = radeon_fence_wait_seq(rdev, seq, intr);
if (r) {
return r;
}
@@ -472,37 +463,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
}
/**
- * radeon_fence_wait_locked - wait for a fence to signal
- *
- * @fence: radeon fence object
- *
- * Wait for the requested fence to signal (all asics).
- * Returns 0 if the fence has passed, error for all other cases.
- */
-int radeon_fence_wait_locked(struct radeon_fence *fence)
-{
- uint64_t seq[RADEON_NUM_RINGS] = {};
- int r;
-
- if (fence == NULL) {
- WARN(1, "Querying an invalid fence : %p !\n", fence);
- return -EINVAL;
- }
-
- seq[fence->ring] = fence->seq;
- if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
- return 0;
-
- r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
- if (r)
- return r;
-
- fence->seq = RADEON_FENCE_SIGNALED_SEQ;
- return 0;
-}
-
-/**
- * radeon_fence_wait_next_locked - wait for the next fence to signal
+ * radeon_fence_wait_next - wait for the next fence to signal
*
* @rdev: radeon device pointer
* @ring: ring index the fence is associated with
@@ -511,7 +472,7 @@ int radeon_fence_wait_locked(struct radeon_fence *fence)
* Returns 0 if the next fence has passed, error for all other cases.
* Caller must hold ring lock.
*/
-int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
@@ -521,11 +482,11 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
already the last emited fence */
return -ENOENT;
}
- return radeon_fence_wait_seq(rdev, seq, false, false);
+ return radeon_fence_wait_seq(rdev, seq, false);
}
/**
- * radeon_fence_wait_empty_locked - wait for all fences to signal
+ * radeon_fence_wait_empty - wait for all fences to signal
*
* @rdev: radeon device pointer
* @ring: ring index the fence is associated with
@@ -534,7 +495,7 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
* Returns 0 if the fences have passed, error for all other cases.
* Caller must hold ring lock.
*/
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
int r;
@@ -543,7 +504,7 @@ int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
if (!seq[ring])
return 0;
- r = radeon_fence_wait_seq(rdev, seq, false, false);
+ r = radeon_fence_wait_seq(rdev, seq, false);
if (r) {
if (r == -EDEADLK)
return -EDEADLK;
@@ -794,7 +755,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
- r = radeon_fence_wait_empty_locked(rdev, ring);
+ r = radeon_fence_wait_empty(rdev, ring);
if (r) {
/* no need to trigger GPU reset as we are unloading */
radeon_fence_driver_force_completion(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a8f9b463bf2a..2e723651069b 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -28,8 +28,6 @@
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
-#include "radeon_reg.h"
-#include "radeon_trace.h"
/*
* GART
@@ -394,959 +392,3 @@ void radeon_gart_fini(struct radeon_device *rdev)
radeon_dummy_page_fini(rdev);
}
-
-/*
- * GPUVM
- * GPUVM is similar to the legacy gart on older asics, however
- * rather than there being a single global gart table
- * for the entire GPU, there are multiple VM page tables active
- * at any given time. The VM page tables can contain a mix
- * vram pages and system memory pages and system memory pages
- * can be mapped as snooped (cached system pages) or unsnooped
- * (uncached system pages).
- * Each VM has an ID associated with it and there is a page table
- * associated with each VMID. When execting a command buffer,
- * the kernel tells the the ring what VMID to use for that command
- * buffer. VMIDs are allocated dynamically as commands are submitted.
- * The userspace drivers maintain their own address space and the kernel
- * sets up their pages tables accordingly when they submit their
- * command buffers and a VMID is assigned.
- * Cayman/Trinity support up to 8 active VMs at any given time;
- * SI supports 16.
- */
-
-/*
- * vm helpers
- *
- * TODO bind a default page at vm initialization for default address
- */
-
-/**
- * radeon_vm_num_pde - return the number of page directory entries
- *
- * @rdev: radeon_device pointer
- *
- * Calculate the number of page directory entries (cayman+).
- */
-static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
-{
- return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
-}
-
-/**
- * radeon_vm_directory_size - returns the size of the page directory in bytes
- *
- * @rdev: radeon_device pointer
- *
- * Calculate the size of the page directory in bytes (cayman+).
- */
-static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
-{
- return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
-}
-
-/**
- * radeon_vm_manager_init - init the vm manager
- *
- * @rdev: radeon_device pointer
- *
- * Init the vm manager (cayman+).
- * Returns 0 for success, error for failure.
- */
-int radeon_vm_manager_init(struct radeon_device *rdev)
-{
- struct radeon_vm *vm;
- struct radeon_bo_va *bo_va;
- int r;
- unsigned size;
-
- if (!rdev->vm_manager.enabled) {
- /* allocate enough for 2 full VM pts */
- size = radeon_vm_directory_size(rdev);
- size += rdev->vm_manager.max_pfn * 8;
- size *= 2;
- r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
- RADEON_GPU_PAGE_ALIGN(size),
- RADEON_VM_PTB_ALIGN_SIZE,
- RADEON_GEM_DOMAIN_VRAM);
- if (r) {
- dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
- (rdev->vm_manager.max_pfn * 8) >> 10);
- return r;
- }
-
- r = radeon_asic_vm_init(rdev);
- if (r)
- return r;
-
- rdev->vm_manager.enabled = true;
-
- r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
- if (r)
- return r;
- }
-
- /* restore page table */
- list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
- if (vm->page_directory == NULL)
- continue;
-
- list_for_each_entry(bo_va, &vm->va, vm_list) {
- bo_va->valid = false;
- }
- }
- return 0;
-}
-
-/**
- * radeon_vm_free_pt - free the page table for a specific vm
- *
- * @rdev: radeon_device pointer
- * @vm: vm to unbind
- *
- * Free the page table of a specific vm (cayman+).
- *
- * Global and local mutex must be lock!
- */
-static void radeon_vm_free_pt(struct radeon_device *rdev,
- struct radeon_vm *vm)
-{
- struct radeon_bo_va *bo_va;
- int i;
-
- if (!vm->page_directory)
- return;
-
- list_del_init(&vm->list);
- radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
-
- list_for_each_entry(bo_va, &vm->va, vm_list) {
- bo_va->valid = false;
- }
-
- if (vm->page_tables == NULL)
- return;
-
- for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
- radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
-
- kfree(vm->page_tables);
-}
-
-/**
- * radeon_vm_manager_fini - tear down the vm manager
- *
- * @rdev: radeon_device pointer
- *
- * Tear down the VM manager (cayman+).
- */
-void radeon_vm_manager_fini(struct radeon_device *rdev)
-{
- struct radeon_vm *vm, *tmp;
- int i;
-
- if (!rdev->vm_manager.enabled)
- return;
-
- mutex_lock(&rdev->vm_manager.lock);
- /* free all allocated page tables */
- list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
- mutex_lock(&vm->mutex);
- radeon_vm_free_pt(rdev, vm);
- mutex_unlock(&vm->mutex);
- }
- for (i = 0; i < RADEON_NUM_VM; ++i) {
- radeon_fence_unref(&rdev->vm_manager.active[i]);
- }
- radeon_asic_vm_fini(rdev);
- mutex_unlock(&rdev->vm_manager.lock);
-
- radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
- radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
- rdev->vm_manager.enabled = false;
-}
-
-/**
- * radeon_vm_evict - evict page table to make room for new one
- *
- * @rdev: radeon_device pointer
- * @vm: VM we want to allocate something for
- *
- * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
- * Returns 0 for success, -ENOMEM for failure.
- *
- * Global and local mutex must be locked!
- */
-static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- struct radeon_vm *vm_evict;
-
- if (list_empty(&rdev->vm_manager.lru_vm))
- return -ENOMEM;
-
- vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
- struct radeon_vm, list);
- if (vm_evict == vm)
- return -ENOMEM;
-
- mutex_lock(&vm_evict->mutex);
- radeon_vm_free_pt(rdev, vm_evict);
- mutex_unlock(&vm_evict->mutex);
- return 0;
-}
-
-/**
- * radeon_vm_alloc_pt - allocates a page table for a VM
- *
- * @rdev: radeon_device pointer
- * @vm: vm to bind
- *
- * Allocate a page table for the requested vm (cayman+).
- * Returns 0 for success, error for failure.
- *
- * Global and local mutex must be locked!
- */
-int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- unsigned pd_size, pd_entries, pts_size;
- struct radeon_ib ib;
- int r;
-
- if (vm == NULL) {
- return -EINVAL;
- }
-
- if (vm->page_directory != NULL) {
- return 0;
- }
-
- pd_size = radeon_vm_directory_size(rdev);
- pd_entries = radeon_vm_num_pdes(rdev);
-
-retry:
- r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
- &vm->page_directory, pd_size,
- RADEON_VM_PTB_ALIGN_SIZE, false);
- if (r == -ENOMEM) {
- r = radeon_vm_evict(rdev, vm);
- if (r)
- return r;
- goto retry;
-
- } else if (r) {
- return r;
- }
-
- vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
-
- /* Initially clear the page directory */
- r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
- NULL, pd_entries * 2 + 64);
- if (r) {
- radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
- return r;
- }
-
- ib.length_dw = 0;
-
- radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
- 0, pd_entries, 0, 0);
-
- radeon_semaphore_sync_to(ib.semaphore, vm->fence);
- r = radeon_ib_schedule(rdev, &ib, NULL);
- if (r) {
- radeon_ib_free(rdev, &ib);
- radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
- return r;
- }
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(ib.fence);
- radeon_ib_free(rdev, &ib);
- radeon_fence_unref(&vm->last_flush);
-
- /* allocate page table array */
- pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
- vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
-
- if (vm->page_tables == NULL) {
- DRM_ERROR("Cannot allocate memory for page table array\n");
- radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * radeon_vm_add_to_lru - add VMs page table to LRU list
- *
- * @rdev: radeon_device pointer
- * @vm: vm to add to LRU
- *
- * Add the allocated page table to the LRU list (cayman+).
- *
- * Global mutex must be locked!
- */
-void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- list_del_init(&vm->list);
- list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
-}
-
-/**
- * radeon_vm_grab_id - allocate the next free VMID
- *
- * @rdev: radeon_device pointer
- * @vm: vm to allocate id for
- * @ring: ring we want to submit job to
- *
- * Allocate an id for the vm (cayman+).
- * Returns the fence we need to sync to (if any).
- *
- * Global and local mutex must be locked!
- */
-struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
- struct radeon_vm *vm, int ring)
-{
- struct radeon_fence *best[RADEON_NUM_RINGS] = {};
- unsigned choices[2] = {};
- unsigned i;
-
- /* check if the id is still valid */
- if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
- return NULL;
-
- /* we definately need to flush */
- radeon_fence_unref(&vm->last_flush);
-
- /* skip over VMID 0, since it is the system VM */
- for (i = 1; i < rdev->vm_manager.nvm; ++i) {
- struct radeon_fence *fence = rdev->vm_manager.active[i];
-
- if (fence == NULL) {
- /* found a free one */
- vm->id = i;
- trace_radeon_vm_grab_id(vm->id, ring);
- return NULL;
- }
-
- if (radeon_fence_is_earlier(fence, best[fence->ring])) {
- best[fence->ring] = fence;
- choices[fence->ring == ring ? 0 : 1] = i;
- }
- }
-
- for (i = 0; i < 2; ++i) {
- if (choices[i]) {
- vm->id = choices[i];
- trace_radeon_vm_grab_id(vm->id, ring);
- return rdev->vm_manager.active[choices[i]];
- }
- }
-
- /* should never happen */
- BUG();
- return NULL;
-}
-
-/**
- * radeon_vm_fence - remember fence for vm
- *
- * @rdev: radeon_device pointer
- * @vm: vm we want to fence
- * @fence: fence to remember
- *
- * Fence the vm (cayman+).
- * Set the fence used to protect page table and id.
- *
- * Global and local mutex must be locked!
- */
-void radeon_vm_fence(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_fence *fence)
-{
- radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
- rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
-
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(fence);
-
- radeon_fence_unref(&vm->last_id_use);
- vm->last_id_use = radeon_fence_ref(fence);
-}
-
-/**
- * radeon_vm_bo_find - find the bo_va for a specific vm & bo
- *
- * @vm: requested vm
- * @bo: requested buffer object
- *
- * Find @bo inside the requested vm (cayman+).
- * Search inside the @bos vm list for the requested vm
- * Returns the found bo_va or NULL if none is found
- *
- * Object has to be reserved!
- */
-struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
- struct radeon_bo *bo)
-{
- struct radeon_bo_va *bo_va;
-
- list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->vm == vm) {
- return bo_va;
- }
- }
- return NULL;
-}
-
-/**
- * radeon_vm_bo_add - add a bo to a specific vm
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- * @bo: radeon buffer object
- *
- * Add @bo into the requested vm (cayman+).
- * Add @bo to the list of bos associated with the vm
- * Returns newly added bo_va or NULL for failure
- *
- * Object has to be reserved!
- */
-struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo)
-{
- struct radeon_bo_va *bo_va;
-
- bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (bo_va == NULL) {
- return NULL;
- }
- bo_va->vm = vm;
- bo_va->bo = bo;
- bo_va->soffset = 0;
- bo_va->eoffset = 0;
- bo_va->flags = 0;
- bo_va->valid = false;
- bo_va->ref_count = 1;
- INIT_LIST_HEAD(&bo_va->bo_list);
- INIT_LIST_HEAD(&bo_va->vm_list);
-
- mutex_lock(&vm->mutex);
- list_add(&bo_va->vm_list, &vm->va);
- list_add_tail(&bo_va->bo_list, &bo->va);
- mutex_unlock(&vm->mutex);
-
- return bo_va;
-}
-
-/**
- * radeon_vm_bo_set_addr - set bos virtual address inside a vm
- *
- * @rdev: radeon_device pointer
- * @bo_va: bo_va to store the address
- * @soffset: requested offset of the buffer in the VM address space
- * @flags: attributes of pages (read/write/valid/etc.)
- *
- * Set offset of @bo_va (cayman+).
- * Validate and set the offset requested within the vm address space.
- * Returns 0 for success, error for failure.
- *
- * Object has to be reserved!
- */
-int radeon_vm_bo_set_addr(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va,
- uint64_t soffset,
- uint32_t flags)
-{
- uint64_t size = radeon_bo_size(bo_va->bo);
- uint64_t eoffset, last_offset = 0;
- struct radeon_vm *vm = bo_va->vm;
- struct radeon_bo_va *tmp;
- struct list_head *head;
- unsigned last_pfn;
-
- if (soffset) {
- /* make sure object fit at this offset */
- eoffset = soffset + size;
- if (soffset >= eoffset) {
- return -EINVAL;
- }
-
- last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
- if (last_pfn > rdev->vm_manager.max_pfn) {
- dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
- last_pfn, rdev->vm_manager.max_pfn);
- return -EINVAL;
- }
-
- } else {
- eoffset = last_pfn = 0;
- }
-
- mutex_lock(&vm->mutex);
- head = &vm->va;
- last_offset = 0;
- list_for_each_entry(tmp, &vm->va, vm_list) {
- if (bo_va == tmp) {
- /* skip over currently modified bo */
- continue;
- }
-
- if (soffset >= last_offset && eoffset <= tmp->soffset) {
- /* bo can be added before this one */
- break;
- }
- if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
- /* bo and tmp overlap, invalid offset */
- dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
- bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
- (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
- mutex_unlock(&vm->mutex);
- return -EINVAL;
- }
- last_offset = tmp->eoffset;
- head = &tmp->vm_list;
- }
-
- bo_va->soffset = soffset;
- bo_va->eoffset = eoffset;
- bo_va->flags = flags;
- bo_va->valid = false;
- list_move(&bo_va->vm_list, head);
-
- mutex_unlock(&vm->mutex);
- return 0;
-}
-
-/**
- * radeon_vm_map_gart - get the physical address of a gart page
- *
- * @rdev: radeon_device pointer
- * @addr: the unmapped addr
- *
- * Look up the physical address of the page that the pte resolves
- * to (cayman+).
- * Returns the physical address of the page.
- */
-uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
-{
- uint64_t result;
-
- /* page table offset */
- result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
-
- /* in case cpu page size != gpu page size*/
- result |= addr & (~PAGE_MASK);
-
- return result;
-}
-
-/**
- * radeon_vm_page_flags - translate page flags to what the hw uses
- *
- * @flags: flags comming from userspace
- *
- * Translate the flags the userspace ABI uses to hw flags.
- */
-static uint32_t radeon_vm_page_flags(uint32_t flags)
-{
- uint32_t hw_flags = 0;
- hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
- hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
- hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- hw_flags |= R600_PTE_SYSTEM;
- hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
- }
- return hw_flags;
-}
-
-/**
- * radeon_vm_update_pdes - make sure that page directory is valid
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory (cayman+).
- * Returns 0 for success, error for failure.
- *
- * Global and local mutex must be locked!
- */
-static int radeon_vm_update_pdes(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_ib *ib,
- uint64_t start, uint64_t end)
-{
- static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
-
- uint64_t last_pde = ~0, last_pt = ~0;
- unsigned count = 0;
- uint64_t pt_idx;
- int r;
-
- start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
- end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
-
- /* walk over the address space and update the page directory */
- for (pt_idx = start; pt_idx <= end; ++pt_idx) {
- uint64_t pde, pt;
-
- if (vm->page_tables[pt_idx])
- continue;
-
-retry:
- r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
- &vm->page_tables[pt_idx],
- RADEON_VM_PTE_COUNT * 8,
- RADEON_GPU_PAGE_SIZE, false);
-
- if (r == -ENOMEM) {
- r = radeon_vm_evict(rdev, vm);
- if (r)
- return r;
- goto retry;
- } else if (r) {
- return r;
- }
-
- pde = vm->pd_gpu_addr + pt_idx * 8;
-
- pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
-
- if (((last_pde + 8 * count) != pde) ||
- ((last_pt + incr * count) != pt)) {
-
- if (count) {
- radeon_asic_vm_set_page(rdev, ib, last_pde,
- last_pt, count, incr,
- R600_PTE_VALID);
-
- count *= RADEON_VM_PTE_COUNT;
- radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
- count, 0, 0);
- }
-
- count = 1;
- last_pde = pde;
- last_pt = pt;
- } else {
- ++count;
- }
- }
-
- if (count) {
- radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
- incr, R600_PTE_VALID);
-
- count *= RADEON_VM_PTE_COUNT;
- radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
- count, 0, 0);
- }
-
- return 0;
-}
-
-/**
- * radeon_vm_update_ptes - make sure that page tables are valid
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- * @dst: destination address to map to
- * @flags: mapping flags
- *
- * Update the page tables in the range @start - @end (cayman+).
- *
- * Global and local mutex must be locked!
- */
-static void radeon_vm_update_ptes(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_ib *ib,
- uint64_t start, uint64_t end,
- uint64_t dst, uint32_t flags)
-{
- static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
-
- uint64_t last_pte = ~0, last_dst = ~0;
- unsigned count = 0;
- uint64_t addr;
-
- start = start / RADEON_GPU_PAGE_SIZE;
- end = end / RADEON_GPU_PAGE_SIZE;
-
- /* walk over the address space and update the page tables */
- for (addr = start; addr < end; ) {
- uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
- unsigned nptes;
- uint64_t pte;
-
- if ((addr & ~mask) == (end & ~mask))
- nptes = end - addr;
- else
- nptes = RADEON_VM_PTE_COUNT - (addr & mask);
-
- pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
- pte += (addr & mask) * 8;
-
- if ((last_pte + 8 * count) != pte) {
-
- if (count) {
- radeon_asic_vm_set_page(rdev, ib, last_pte,
- last_dst, count,
- RADEON_GPU_PAGE_SIZE,
- flags);
- }
-
- count = nptes;
- last_pte = pte;
- last_dst = dst;
- } else {
- count += nptes;
- }
-
- addr += nptes;
- dst += nptes * RADEON_GPU_PAGE_SIZE;
- }
-
- if (count) {
- radeon_asic_vm_set_page(rdev, ib, last_pte,
- last_dst, count,
- RADEON_GPU_PAGE_SIZE, flags);
- }
-}
-
-/**
- * radeon_vm_bo_update - map a bo into the vm page table
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- * @bo: radeon buffer object
- * @mem: ttm mem
- *
- * Fill in the page table entries for @bo (cayman+).
- * Returns 0 for success, -EINVAL for failure.
- *
- * Object have to be reserved & global and local mutex must be locked!
- */
-int radeon_vm_bo_update(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- struct ttm_mem_reg *mem)
-{
- struct radeon_ib ib;
- struct radeon_bo_va *bo_va;
- unsigned nptes, npdes, ndw;
- uint64_t addr;
- int r;
-
- /* nothing to do if vm isn't bound */
- if (vm->page_directory == NULL)
- return 0;
-
- bo_va = radeon_vm_bo_find(vm, bo);
- if (bo_va == NULL) {
- dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
- return -EINVAL;
- }
-
- if (!bo_va->soffset) {
- dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
- bo, vm);
- return -EINVAL;
- }
-
- if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
- return 0;
-
- bo_va->flags &= ~RADEON_VM_PAGE_VALID;
- bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
- if (mem) {
- addr = mem->start << PAGE_SHIFT;
- if (mem->mem_type != TTM_PL_SYSTEM) {
- bo_va->flags |= RADEON_VM_PAGE_VALID;
- bo_va->valid = true;
- }
- if (mem->mem_type == TTM_PL_TT) {
- bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
- } else {
- addr += rdev->vm_manager.vram_base_offset;
- }
- } else {
- addr = 0;
- bo_va->valid = false;
- }
-
- trace_radeon_vm_bo_update(bo_va);
-
- nptes = radeon_bo_ngpu_pages(bo);
-
- /* assume two extra pdes in case the mapping overlaps the borders */
- npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
-
- /* padding, etc. */
- ndw = 64;
-
- if (RADEON_VM_BLOCK_SIZE > 11)
- /* reserve space for one header for every 2k dwords */
- ndw += (nptes >> 11) * 4;
- else
- /* reserve space for one header for
- every (1 << BLOCK_SIZE) entries */
- ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
-
- /* reserve space for pte addresses */
- ndw += nptes * 2;
-
- /* reserve space for one header for every 2k dwords */
- ndw += (npdes >> 11) * 4;
-
- /* reserve space for pde addresses */
- ndw += npdes * 2;
-
- /* reserve space for clearing new page tables */
- ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
-
- /* update too big for an IB */
- if (ndw > 0xfffff)
- return -ENOMEM;
-
- r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
- if (r)
- return r;
- ib.length_dw = 0;
-
- r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
- if (r) {
- radeon_ib_free(rdev, &ib);
- return r;
- }
-
- radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
- addr, radeon_vm_page_flags(bo_va->flags));
-
- radeon_semaphore_sync_to(ib.semaphore, vm->fence);
- r = radeon_ib_schedule(rdev, &ib, NULL);
- if (r) {
- radeon_ib_free(rdev, &ib);
- return r;
- }
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(ib.fence);
- radeon_ib_free(rdev, &ib);
- radeon_fence_unref(&vm->last_flush);
-
- return 0;
-}
-
-/**
- * radeon_vm_bo_rmv - remove a bo to a specific vm
- *
- * @rdev: radeon_device pointer
- * @bo_va: requested bo_va
- *
- * Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
- *
- * Object have to be reserved!
- */
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va)
-{
- int r = 0;
-
- mutex_lock(&rdev->vm_manager.lock);
- mutex_lock(&bo_va->vm->mutex);
- if (bo_va->soffset) {
- r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
- }
- mutex_unlock(&rdev->vm_manager.lock);
- list_del(&bo_va->vm_list);
- mutex_unlock(&bo_va->vm->mutex);
- list_del(&bo_va->bo_list);
-
- kfree(bo_va);
- return r;
-}
-
-/**
- * radeon_vm_bo_invalidate - mark the bo as invalid
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- * @bo: radeon buffer object
- *
- * Mark @bo as invalid (cayman+).
- */
-void radeon_vm_bo_invalidate(struct radeon_device *rdev,
- struct radeon_bo *bo)
-{
- struct radeon_bo_va *bo_va;
-
- list_for_each_entry(bo_va, &bo->va, bo_list) {
- bo_va->valid = false;
- }
-}
-
-/**
- * radeon_vm_init - initialize a vm instance
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- *
- * Init @vm fields (cayman+).
- */
-void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- vm->id = 0;
- vm->fence = NULL;
- vm->last_flush = NULL;
- vm->last_id_use = NULL;
- mutex_init(&vm->mutex);
- INIT_LIST_HEAD(&vm->list);
- INIT_LIST_HEAD(&vm->va);
-}
-
-/**
- * radeon_vm_fini - tear down a vm instance
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- *
- * Tear down @vm (cayman+).
- * Unbind the VM and remove all bos from the vm bo list
- */
-void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- struct radeon_bo_va *bo_va, *tmp;
- int r;
-
- mutex_lock(&rdev->vm_manager.lock);
- mutex_lock(&vm->mutex);
- radeon_vm_free_pt(rdev, vm);
- mutex_unlock(&rdev->vm_manager.lock);
-
- if (!list_empty(&vm->va)) {
- dev_err(rdev->dev, "still active bo inside vm\n");
- }
- list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
- list_del_init(&bo_va->vm_list);
- r = radeon_bo_reserve(bo_va->bo, false);
- if (!r) {
- list_del_init(&bo_va->bo_list);
- radeon_bo_unreserve(bo_va->bo);
- kfree(bo_va);
- }
- }
- radeon_fence_unref(&vm->fence);
- radeon_fence_unref(&vm->last_flush);
- radeon_fence_unref(&vm->last_id_use);
- mutex_unlock(&vm->mutex);
-}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index b96c819024b3..d09650c1d720 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -344,18 +344,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, &cur_placement, true);
- switch (cur_placement) {
- case TTM_PL_VRAM:
- args->domain = RADEON_GEM_DOMAIN_VRAM;
- break;
- case TTM_PL_TT:
- args->domain = RADEON_GEM_DOMAIN_GTT;
- break;
- case TTM_PL_SYSTEM:
- args->domain = RADEON_GEM_DOMAIN_CPU;
- default:
- break;
- }
+ args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
@@ -533,6 +522,42 @@ out:
return r;
}
+int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_op *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_bo *robj;
+ int r;
+
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ return -ENOENT;
+ }
+ robj = gem_to_radeon_bo(gobj);
+ r = radeon_bo_reserve(robj, false);
+ if (unlikely(r))
+ goto out;
+
+ switch (args->op) {
+ case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
+ args->value = robj->initial_domain;
+ break;
+ case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
+ robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
+ RADEON_GEM_DOMAIN_GTT |
+ RADEON_GEM_DOMAIN_CPU);
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ radeon_bo_unreserve(robj);
+out:
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+}
+
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 66ed3ea71440..3e49342a20e6 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -441,6 +441,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case RADEON_CS_RING_UVD:
*value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
break;
+ case RADEON_CS_RING_VCE:
+ *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
+ break;
default:
return -EINVAL;
}
@@ -485,6 +488,27 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
else
*value = rdev->pm.default_sclk * 10;
break;
+ case RADEON_INFO_VCE_FW_VERSION:
+ *value = rdev->vce.fw_version;
+ break;
+ case RADEON_INFO_VCE_FB_VERSION:
+ *value = rdev->vce.fb_version;
+ break;
+ case RADEON_INFO_NUM_BYTES_MOVED:
+ value = (uint32_t*)&value64;
+ value_size = sizeof(uint64_t);
+ value64 = atomic64_read(&rdev->num_bytes_moved);
+ break;
+ case RADEON_INFO_VRAM_USAGE:
+ value = (uint32_t*)&value64;
+ value_size = sizeof(uint64_t);
+ value64 = atomic64_read(&rdev->vram_usage);
+ break;
+ case RADEON_INFO_GTT_USAGE:
+ value = (uint32_t*)&value64;
+ value_size = sizeof(uint64_t);
+ value64 = atomic64_read(&rdev->gtt_usage);
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
@@ -543,7 +567,9 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
- radeon_vm_init(rdev, &fpriv->vm);
+ r = radeon_vm_init(rdev, &fpriv->vm);
+ if (r)
+ return r;
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r)
@@ -624,6 +650,7 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
if (rdev->cmask_filp == file_priv)
rdev->cmask_filp = NULL;
radeon_uvd_free_handles(rdev, file_priv);
+ radeon_vce_free_handles(rdev, file_priv);
}
/*
@@ -818,5 +845,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 0b158f98d287..cafb1ccf2ec3 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -385,7 +385,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
DRM_DEBUG_KMS("\n");
/* no fb bound */
- if (!atomic && !crtc->fb) {
+ if (!atomic && !crtc->primary->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -395,8 +395,8 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
target_fb = fb;
}
else {
- radeon_fb = to_radeon_framebuffer(crtc->fb);
- target_fb = crtc->fb;
+ radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ target_fb = crtc->primary->fb;
}
switch (target_fb->bits_per_pixel) {
@@ -444,7 +444,7 @@ retry:
* We don't shutdown the display controller because new buffer
* will end up in same spot.
*/
- if (!atomic && fb && fb != crtc->fb) {
+ if (!atomic && fb && fb != crtc->primary->fb) {
struct radeon_bo *old_rbo;
unsigned long nsize, osize;
@@ -555,7 +555,7 @@ retry:
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
- if (!atomic && fb && fb != crtc->fb) {
+ if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
@@ -599,7 +599,7 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
}
}
- switch (crtc->fb->bits_per_pixel) {
+ switch (crtc->primary->fb->bits_per_pixel) {
case 8:
format = 2;
break;
@@ -1087,12 +1087,12 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
static void radeon_crtc_disable(struct drm_crtc *crtc)
{
radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->fb) {
+ if (crtc->primary->fb) {
int r;
struct radeon_framebuffer *radeon_fb;
struct radeon_bo *rbo;
- radeon_fb = to_radeon_framebuffer(crtc->fb);
+ radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r))
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 402dbe32c234..832d9fa1a4c4 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -192,6 +192,7 @@ struct radeon_i2c_chan {
struct i2c_algo_dp_aux_data dp;
} algo;
struct radeon_i2c_bus_rec rec;
+ struct drm_dp_aux aux;
};
/* mostly for macs, but really any system without connector tables */
@@ -690,6 +691,9 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
+extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
+ u8 power_state);
+extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
extern void radeon_atom_encoder_init(struct radeon_device *rdev);
extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 08595cf90b01..19bec0dbfa38 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -56,11 +56,36 @@ static void radeon_bo_clear_va(struct radeon_bo *bo)
}
}
+static void radeon_update_memory_usage(struct radeon_bo *bo,
+ unsigned mem_type, int sign)
+{
+ struct radeon_device *rdev = bo->rdev;
+ u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
+
+ switch (mem_type) {
+ case TTM_PL_TT:
+ if (sign > 0)
+ atomic64_add(size, &rdev->gtt_usage);
+ else
+ atomic64_sub(size, &rdev->gtt_usage);
+ break;
+ case TTM_PL_VRAM:
+ if (sign > 0)
+ atomic64_add(size, &rdev->vram_usage);
+ else
+ atomic64_sub(size, &rdev->vram_usage);
+ break;
+ }
+}
+
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct radeon_bo *bo;
bo = container_of(tbo, struct radeon_bo, tbo);
+
+ radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
+
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
@@ -79,7 +104,7 @@ bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
- u32 c = 0;
+ u32 c = 0, i;
rbo->placement.fpfn = 0;
rbo->placement.lpfn = 0;
@@ -106,6 +131,17 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
+
+ /*
+ * Use two-ended allocation depending on the buffer size to
+ * improve fragmentation quality.
+ * 512kb was measured as the most optimal number.
+ */
+ if (rbo->tbo.mem.size > 512 * 1024) {
+ for (i = 0; i < c; i++) {
+ rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN;
+ }
+ }
}
int radeon_bo_create(struct radeon_device *rdev,
@@ -120,7 +156,6 @@ int radeon_bo_create(struct radeon_device *rdev,
size = ALIGN(size, PAGE_SIZE);
- rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
if (kernel) {
type = ttm_bo_type_kernel;
} else if (sg) {
@@ -145,6 +180,9 @@ int radeon_bo_create(struct radeon_device *rdev,
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
INIT_LIST_HEAD(&bo->va);
+ bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
+ RADEON_GEM_DOMAIN_GTT |
+ RADEON_GEM_DOMAIN_CPU);
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
@@ -338,39 +376,105 @@ void radeon_bo_fini(struct radeon_device *rdev)
arch_phys_wc_del(rdev->mc.vram_mtrr);
}
-void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
- struct list_head *head)
+/* Returns how many bytes TTM can move per IB.
+ */
+static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
{
- if (lobj->written) {
- list_add(&lobj->tv.head, head);
- } else {
- list_add_tail(&lobj->tv.head, head);
- }
+ u64 real_vram_size = rdev->mc.real_vram_size;
+ u64 vram_usage = atomic64_read(&rdev->vram_usage);
+
+ /* This function is based on the current VRAM usage.
+ *
+ * - If all of VRAM is free, allow relocating the number of bytes that
+ * is equal to 1/4 of the size of VRAM for this IB.
+
+ * - If more than one half of VRAM is occupied, only allow relocating
+ * 1 MB of data for this IB.
+ *
+ * - From 0 to one half of used VRAM, the threshold decreases
+ * linearly.
+ * __________________
+ * 1/4 of -|\ |
+ * VRAM | \ |
+ * | \ |
+ * | \ |
+ * | \ |
+ * | \ |
+ * | \ |
+ * | \________|1 MB
+ * |----------------|
+ * VRAM 0 % 100 %
+ * used used
+ *
+ * Note: It's a threshold, not a limit. The threshold must be crossed
+ * for buffer relocations to stop, so any buffer of an arbitrary size
+ * can be moved as long as the threshold isn't crossed before
+ * the relocation takes place. We don't want to disable buffer
+ * relocations completely.
+ *
+ * The idea is that buffers should be placed in VRAM at creation time
+ * and TTM should only do a minimum number of relocations during
+ * command submission. In practice, you need to submit at least
+ * a dozen IBs to move all buffers to VRAM if they are in GTT.
+ *
+ * Also, things can get pretty crazy under memory pressure and actual
+ * VRAM usage can change a lot, so playing safe even at 50% does
+ * consistently increase performance.
+ */
+
+ u64 half_vram = real_vram_size >> 1;
+ u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
+ u64 bytes_moved_threshold = half_free_vram >> 1;
+ return max(bytes_moved_threshold, 1024*1024ull);
}
-int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
+int radeon_bo_list_validate(struct radeon_device *rdev,
+ struct ww_acquire_ctx *ticket,
struct list_head *head, int ring)
{
- struct radeon_bo_list *lobj;
+ struct radeon_cs_reloc *lobj;
struct radeon_bo *bo;
- u32 domain;
int r;
+ u64 bytes_moved = 0, initial_bytes_moved;
+ u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
r = ttm_eu_reserve_buffers(ticket, head);
if (unlikely(r != 0)) {
return r;
}
+
list_for_each_entry(lobj, head, tv.head) {
- bo = lobj->bo;
+ bo = lobj->robj;
if (!bo->pin_count) {
- domain = lobj->domain;
-
+ u32 domain = lobj->domain;
+ u32 current_domain =
+ radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+ /* Check if this buffer will be moved and don't move it
+ * if we have moved too many buffers for this IB already.
+ *
+ * Note that this allows moving at least one buffer of
+ * any size, because it doesn't take the current "bo"
+ * into account. We don't want to disallow buffer moves
+ * completely.
+ */
+ if (current_domain != RADEON_GEM_DOMAIN_CPU &&
+ (domain & current_domain) == 0 && /* will be moved */
+ bytes_moved > bytes_moved_threshold) {
+ /* don't move it */
+ domain = current_domain;
+ }
+
retry:
radeon_ttm_placement_from_domain(bo, domain);
if (ring == R600_RING_TYPE_UVD_INDEX)
radeon_uvd_force_into_uvd_segment(bo);
- r = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false);
+
+ initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
+ initial_bytes_moved;
+
if (unlikely(r)) {
if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
domain = lobj->alt_domain;
@@ -564,14 +668,23 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *new_mem)
{
struct radeon_bo *rbo;
+
if (!radeon_ttm_bo_is_radeon_bo(bo))
return;
+
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 1);
radeon_vm_bo_invalidate(rbo->rdev, rbo);
+
+ /* update statistics */
+ if (!new_mem)
+ return;
+
+ radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
+ radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
}
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 209b11150263..9e7b25a0629d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -138,9 +138,8 @@ extern int radeon_bo_evict_vram(struct radeon_device *rdev);
extern void radeon_bo_force_delete(struct radeon_device *rdev);
extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
-extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
- struct list_head *head);
-extern int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
+extern int radeon_bo_list_validate(struct radeon_device *rdev,
+ struct ww_acquire_ctx *ticket,
struct list_head *head, int ring);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
@@ -151,7 +150,7 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
+ struct ttm_mem_reg *new_mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
@@ -181,7 +180,7 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
extern int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
- unsigned size, unsigned align, bool block);
+ unsigned size, unsigned align);
extern void radeon_sa_bo_free(struct radeon_device *rdev,
struct radeon_sa_bo **sa_bo,
struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8e8153e471c2..ee738a524639 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -260,7 +260,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
if (!ring->ready) {
continue;
}
- r = radeon_fence_wait_empty_locked(rdev, i);
+ r = radeon_fence_wait_empty(rdev, i);
if (r) {
/* needs a GPU reset dont reset here */
mutex_unlock(&rdev->ring_lock);
@@ -826,6 +826,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
/* no need to reprogram if nothing changed unless we are on BTC+ */
if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
+ /* vce just modifies an existing state so force a change */
+ if (ps->vce_active != rdev->pm.dpm.vce_active)
+ goto force;
if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
/* for pre-BTC and APUs if the num crtcs changed but state is the same,
* all we need to do is update the display configuration.
@@ -862,16 +865,21 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
}
}
+force:
if (radeon_dpm == 1) {
printk("switching from power state:\n");
radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
printk("switching to power state:\n");
radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
}
+
mutex_lock(&rdev->ddev->struct_mutex);
down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock);
+ /* update whether vce is active */
+ ps->vce_active = rdev->pm.dpm.vce_active;
+
ret = radeon_dpm_pre_set_power_state(rdev);
if (ret)
goto done;
@@ -888,7 +896,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
for (i = 0; i < RADEON_NUM_RINGS; i++) {
struct radeon_ring *ring = &rdev->ring[i];
if (ring->ready)
- radeon_fence_wait_empty_locked(rdev, i);
+ radeon_fence_wait_empty(rdev, i);
}
/* program the new power state */
@@ -935,8 +943,6 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
if (enable) {
mutex_lock(&rdev->pm.mutex);
rdev->pm.dpm.uvd_active = true;
- /* disable this for now */
-#if 0
if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
@@ -946,7 +952,6 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
else
-#endif
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
rdev->pm.dpm.state = dpm_state;
mutex_unlock(&rdev->pm.mutex);
@@ -960,6 +965,23 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
}
}
+void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
+{
+ if (enable) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.dpm.vce_active = true;
+ /* XXX select vce level based on ring/task */
+ rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
+ mutex_unlock(&rdev->pm.mutex);
+ } else {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.dpm.vce_active = false;
+ mutex_unlock(&rdev->pm.mutex);
+ }
+
+ radeon_pm_compute_clocks(rdev);
+}
+
static void radeon_pm_suspend_old(struct radeon_device *rdev)
{
mutex_lock(&rdev->pm.mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 15e44a7281ab..f8050f5429e2 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -63,7 +63,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
{
int r;
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
if (r) {
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r;
@@ -145,6 +145,13 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
return r;
}
+ /* grab a vm id if necessary */
+ if (ib->vm) {
+ struct radeon_fence *vm_id_fence;
+ vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
+ radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
+ }
+
/* sync with other rings */
r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
if (r) {
@@ -153,11 +160,9 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
return r;
}
- /* if we can't remember our last VM flush then flush now! */
- /* XXX figure out why we have to flush for every IB */
- if (ib->vm /*&& !ib->vm->last_flush*/) {
- radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
- }
+ if (ib->vm)
+ radeon_vm_flush(rdev, ib->vm, ib->ring);
+
if (const_ib) {
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
@@ -172,10 +177,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
if (const_ib) {
const_ib->fence = radeon_fence_ref(ib->fence);
}
- /* we just flushed the VM, remember that */
- if (ib->vm && !ib->vm->last_flush) {
- ib->vm->last_flush = radeon_fence_ref(ib->fence);
- }
+
+ if (ib->vm)
+ radeon_vm_fence(rdev, ib->vm, ib->fence);
+
radeon_ring_unlock_commit(rdev, ring);
return 0;
}
@@ -257,6 +262,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
r = radeon_ib_test(rdev, i, ring);
if (r) {
ring->ready = false;
+ rdev->needs_reset = false;
if (i == RADEON_RING_TYPE_GFX_INDEX) {
/* oh, oh, that's really bad */
@@ -342,13 +348,17 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
*/
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
{
- ring->rptr = radeon_ring_get_rptr(rdev, ring);
+ uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
+
/* This works because ring_size is a power of 2 */
- ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
+ ring->ring_free_dw = rptr + (ring->ring_size / 4);
ring->ring_free_dw -= ring->wptr;
ring->ring_free_dw &= ring->ptr_mask;
if (!ring->ring_free_dw) {
+ /* this is an empty ring */
ring->ring_free_dw = ring->ring_size / 4;
+ /* update lockup info to avoid false positive */
+ radeon_ring_lockup_update(rdev, ring);
}
}
@@ -372,19 +382,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
/* Align requested size with padding so unlock_commit can
* pad safely */
radeon_ring_free_size(rdev, ring);
- if (ring->ring_free_dw == (ring->ring_size / 4)) {
- /* This is an empty ring update lockup info to avoid
- * false positive.
- */
- radeon_ring_lockup_update(ring);
- }
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
while (ndw > (ring->ring_free_dw - 1)) {
radeon_ring_free_size(rdev, ring);
if (ndw < ring->ring_free_dw) {
break;
}
- r = radeon_fence_wait_next_locked(rdev, ring->idx);
+ r = radeon_fence_wait_next(rdev, ring->idx);
if (r)
return r;
}
@@ -478,39 +482,17 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *rin
}
/**
- * radeon_ring_force_activity - add some nop packets to the ring
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Add some nop packets to the ring to force activity (all asics).
- * Used for lockup detection to see if the rptr is advancing.
- */
-void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- int r;
-
- radeon_ring_free_size(rdev, ring);
- if (ring->rptr == ring->wptr) {
- r = radeon_ring_alloc(rdev, ring, 1);
- if (!r) {
- radeon_ring_write(ring, ring->nop);
- radeon_ring_commit(rdev, ring);
- }
- }
-}
-
-/**
* radeon_ring_lockup_update - update lockup variables
*
* @ring: radeon_ring structure holding ring information
*
* Update the last rptr value and timestamp (all asics).
*/
-void radeon_ring_lockup_update(struct radeon_ring *ring)
+void radeon_ring_lockup_update(struct radeon_device *rdev,
+ struct radeon_ring *ring)
{
- ring->last_rptr = ring->rptr;
- ring->last_activity = jiffies;
+ atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring));
+ atomic64_set(&ring->last_activity, jiffies_64);
}
/**
@@ -518,40 +500,23 @@ void radeon_ring_lockup_update(struct radeon_ring *ring)
* @rdev: radeon device structure
* @ring: radeon_ring structure holding ring information
*
- * We don't need to initialize the lockup tracking information as we will either
- * have CP rptr to a different value of jiffies wrap around which will force
- * initialization of the lockup tracking informations.
- *
- * A possible false positivie is if we get call after while and last_cp_rptr ==
- * the current CP rptr, even if it's unlikely it might happen. To avoid this
- * if the elapsed time since last call is bigger than 2 second than we return
- * false and update the tracking information. Due to this the caller must call
- * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
- * the fencing code should be cautious about that.
- *
- * Caller should write to the ring to force CP to do something so we don't get
- * false positive when CP is just gived nothing to do.
- *
- **/
+ */
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- unsigned long cjiffies, elapsed;
+ uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
+ uint64_t last = atomic64_read(&ring->last_activity);
+ uint64_t elapsed;
- cjiffies = jiffies;
- if (!time_after(cjiffies, ring->last_activity)) {
- /* likely a wrap around */
- radeon_ring_lockup_update(ring);
+ if (rptr != atomic_read(&ring->last_rptr)) {
+ /* ring is still working, no lockup */
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- ring->rptr = radeon_ring_get_rptr(rdev, ring);
- if (ring->rptr != ring->last_rptr) {
- /* CP is still working no lockup */
- radeon_ring_lockup_update(ring);
- return false;
- }
- elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
+
+ elapsed = jiffies_to_msecs(jiffies_64 - last);
if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
- dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+ dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n",
+ ring->idx, elapsed);
return true;
}
/* give a chance to the GPU ... */
@@ -709,7 +674,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
if (radeon_debugfs_ring_init(rdev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
- radeon_ring_lockup_update(ring);
+ radeon_ring_lockup_update(rdev, ring);
return 0;
}
@@ -780,8 +745,6 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
ring->wptr, ring->wptr);
- seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n",
- ring->rptr, ring->rptr);
seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
ring->last_semaphore_signal_addr);
seq_printf(m, "last semaphore wait addr : 0x%016llx\n",
@@ -814,6 +777,8 @@ static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
+static int si_vce1_index = TN_RING_TYPE_VCE1_INDEX;
+static int si_vce2_index = TN_RING_TYPE_VCE2_INDEX;
static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
@@ -822,6 +787,8 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
{"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
{"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
+ {"radeon_ring_vce1", radeon_debugfs_ring_info, 0, &si_vce1_index},
+ {"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
};
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index c0625805cdd7..adcf3e2f07da 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -312,7 +312,7 @@ static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
- unsigned size, unsigned align, bool block)
+ unsigned size, unsigned align)
{
struct radeon_fence *fences[RADEON_NUM_RINGS];
unsigned tries[RADEON_NUM_RINGS];
@@ -353,14 +353,11 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
r = radeon_fence_wait_any(rdev, fences, false);
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
- if (r == -ENOENT && block) {
+ if (r == -ENOENT) {
r = wait_event_interruptible_locked(
sa_manager->wq,
radeon_sa_event(sa_manager, size, align)
);
-
- } else if (r == -ENOENT) {
- r = -ENOMEM;
}
} while (!r);
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 9006b32d5eed..dbd6bcde92de 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -42,7 +42,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
return -ENOMEM;
}
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
- 8 * RADEON_NUM_SYNCS, 8, true);
+ 8 * RADEON_NUM_SYNCS, 8);
if (r) {
kfree(*semaphore);
*semaphore = NULL;
@@ -147,7 +147,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
if (++count > RADEON_NUM_SYNCS) {
/* not enough room, wait manually */
- radeon_fence_wait_locked(fence);
+ r = radeon_fence_wait(fence, false);
+ if (r)
+ return r;
continue;
}
@@ -161,7 +163,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
/* signaling wasn't successful wait manually */
radeon_ring_undo(&rdev->ring[i]);
- radeon_fence_wait_locked(fence);
+ r = radeon_fence_wait(fence, false);
+ if (r)
+ return r;
continue;
}
@@ -169,7 +173,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
/* waiting wasn't successful wait manually */
radeon_ring_undo(&rdev->ring[i]);
- radeon_fence_wait_locked(fence);
+ r = radeon_fence_wait(fence, false);
+ if (r)
+ return r;
continue;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 12e8099a0823..3a13e0d1055c 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -257,20 +257,36 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_fence **fence)
{
+ uint32_t handle = ring->idx ^ 0xdeafbeef;
int r;
if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
- r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+ r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
if (r) {
DRM_ERROR("Failed to get dummy create msg\n");
return r;
}
- r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence);
+ r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
if (r) {
DRM_ERROR("Failed to get dummy destroy msg\n");
return r;
}
+
+ } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
+ ring->idx == TN_RING_TYPE_VCE2_INDEX) {
+ r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
+ if (r) {
+ DRM_ERROR("Failed to get dummy create msg\n");
+ return r;
+ }
+
+ r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
+ if (r) {
+ DRM_ERROR("Failed to get dummy destroy msg\n");
+ return r;
+ }
+
} else {
r = radeon_ring_lock(rdev, ring, 64);
if (r) {
@@ -486,6 +502,16 @@ out_cleanup:
printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
}
+static bool radeon_test_sync_possible(struct radeon_ring *ringA,
+ struct radeon_ring *ringB)
+{
+ if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
+ ringB->idx == TN_RING_TYPE_VCE1_INDEX)
+ return false;
+
+ return true;
+}
+
void radeon_test_syncing(struct radeon_device *rdev)
{
int i, j, k;
@@ -500,6 +526,9 @@ void radeon_test_syncing(struct radeon_device *rdev)
if (!ringB->ready)
continue;
+ if (!radeon_test_sync_possible(ringA, ringB))
+ continue;
+
DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
radeon_test_ring_sync(rdev, ringA, ringB);
@@ -511,6 +540,12 @@ void radeon_test_syncing(struct radeon_device *rdev)
if (!ringC->ready)
continue;
+ if (!radeon_test_sync_possible(ringA, ringC))
+ continue;
+
+ if (!radeon_test_sync_possible(ringB, ringC))
+ continue;
+
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 040a2a10ea17..c8a8a5144ec1 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -406,8 +406,14 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ if (r) {
+ return r;
+ }
}
- return r;
+
+ /* update statistics */
+ atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
+ return 0;
}
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -701,7 +707,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev,
rdev->mman.bo_global_ref.ref.object,
- &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
+ &radeon_bo_driver,
+ rdev->ddev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
rdev->need_dma32);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -742,7 +750,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
DRM_INFO("radeon: %uM of GTT memory ready.\n",
(unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
- rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
r = radeon_ttm_debugfs_init(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 3e6804b2b2ef..5748bdaeacce 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -455,7 +455,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
}
reloc = p->relocs_ptr[(idx / 4)];
- start = reloc->lobj.gpu_offset;
+ start = reloc->gpu_offset;
end = start + radeon_bo_size(reloc->robj);
start += offset;
@@ -807,8 +807,7 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
(rdev->pm.dpm.hd != hd)) {
rdev->pm.dpm.sd = sd;
rdev->pm.dpm.hd = hd;
- /* disable this for now */
- /*streams_changed = true;*/
+ streams_changed = true;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
new file mode 100644
index 000000000000..76e9904bc537
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -0,0 +1,699 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "sid.h"
+
+/* 1 second timeout */
+#define VCE_IDLE_TIMEOUT_MS 1000
+
+/* Firmware Names */
+#define FIRMWARE_BONAIRE "radeon/BONAIRE_vce.bin"
+
+MODULE_FIRMWARE(FIRMWARE_BONAIRE);
+
+static void radeon_vce_idle_work_handler(struct work_struct *work);
+
+/**
+ * radeon_vce_init - allocate memory, load vce firmware
+ *
+ * @rdev: radeon_device pointer
+ *
+ * First step to get VCE online, allocate memory and load the firmware
+ */
+int radeon_vce_init(struct radeon_device *rdev)
+{
+ static const char *fw_version = "[ATI LIB=VCEFW,";
+ static const char *fb_version = "[ATI LIB=VCEFWSTATS,";
+ unsigned long size;
+ const char *fw_name, *c;
+ uint8_t start, mid, end;
+ int i, r;
+
+ INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler);
+
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ fw_name = FIRMWARE_BONAIRE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev);
+ if (r) {
+ dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n",
+ fw_name);
+ return r;
+ }
+
+ /* search for firmware version */
+
+ size = rdev->vce_fw->size - strlen(fw_version) - 9;
+ c = rdev->vce_fw->data;
+ for (;size > 0; --size, ++c)
+ if (strncmp(c, fw_version, strlen(fw_version)) == 0)
+ break;
+
+ if (size == 0)
+ return -EINVAL;
+
+ c += strlen(fw_version);
+ if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3)
+ return -EINVAL;
+
+ /* search for feedback version */
+
+ size = rdev->vce_fw->size - strlen(fb_version) - 3;
+ c = rdev->vce_fw->data;
+ for (;size > 0; --size, ++c)
+ if (strncmp(c, fb_version, strlen(fb_version)) == 0)
+ break;
+
+ if (size == 0)
+ return -EINVAL;
+
+ c += strlen(fb_version);
+ if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1)
+ return -EINVAL;
+
+ DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n",
+ start, mid, end, rdev->vce.fb_version);
+
+ rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
+
+ /* we can only work with this fw version for now */
+ if (rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8)))
+ return -EINVAL;
+
+ /* allocate firmware, stack and heap BO */
+
+ size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
+ RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
+ return r;
+ }
+
+ r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
+ if (r) {
+ radeon_bo_unref(&rdev->vce.vcpu_bo);
+ dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
+ return r;
+ }
+
+ r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->vce.gpu_addr);
+ radeon_bo_unreserve(rdev->vce.vcpu_bo);
+ if (r) {
+ radeon_bo_unref(&rdev->vce.vcpu_bo);
+ dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r);
+ return r;
+ }
+
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+ atomic_set(&rdev->vce.handles[i], 0);
+ rdev->vce.filp[i] = NULL;
+ }
+
+ return 0;
+}
+
+/**
+ * radeon_vce_fini - free memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Last step on VCE teardown, free firmware memory
+ */
+void radeon_vce_fini(struct radeon_device *rdev)
+{
+ if (rdev->vce.vcpu_bo == NULL)
+ return;
+
+ radeon_bo_unref(&rdev->vce.vcpu_bo);
+
+ release_firmware(rdev->vce_fw);
+}
+
+/**
+ * radeon_vce_suspend - unpin VCE fw memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ */
+int radeon_vce_suspend(struct radeon_device *rdev)
+{
+ int i;
+
+ if (rdev->vce.vcpu_bo == NULL)
+ return 0;
+
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
+ if (atomic_read(&rdev->vce.handles[i]))
+ break;
+
+ if (i == RADEON_MAX_VCE_HANDLES)
+ return 0;
+
+ /* TODO: suspending running encoding sessions isn't supported */
+ return -EINVAL;
+}
+
+/**
+ * radeon_vce_resume - pin VCE fw memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ */
+int radeon_vce_resume(struct radeon_device *rdev)
+{
+ void *cpu_addr;
+ int r;
+
+ if (rdev->vce.vcpu_bo == NULL)
+ return -EINVAL;
+
+ r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
+ return r;
+ }
+
+ r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->vce.vcpu_bo);
+ dev_err(rdev->dev, "(%d) VCE map failed\n", r);
+ return r;
+ }
+
+ memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size);
+
+ radeon_bo_kunmap(rdev->vce.vcpu_bo);
+
+ radeon_bo_unreserve(rdev->vce.vcpu_bo);
+
+ return 0;
+}
+
+/**
+ * radeon_vce_idle_work_handler - power off VCE
+ *
+ * @work: pointer to work structure
+ *
+ * power of VCE when it's not used any more
+ */
+static void radeon_vce_idle_work_handler(struct work_struct *work)
+{
+ struct radeon_device *rdev =
+ container_of(work, struct radeon_device, vce.idle_work.work);
+
+ if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX) == 0) &&
+ (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX) == 0)) {
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ radeon_dpm_enable_vce(rdev, false);
+ } else {
+ radeon_set_vce_clocks(rdev, 0, 0);
+ }
+ } else {
+ schedule_delayed_work(&rdev->vce.idle_work,
+ msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
+ }
+}
+
+/**
+ * radeon_vce_note_usage - power up VCE
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Make sure VCE is powerd up when we want to use it
+ */
+void radeon_vce_note_usage(struct radeon_device *rdev)
+{
+ bool streams_changed = false;
+ bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work);
+ set_clocks &= schedule_delayed_work(&rdev->vce.idle_work,
+ msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
+
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ /* XXX figure out if the streams changed */
+ streams_changed = false;
+ }
+
+ if (set_clocks || streams_changed) {
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ radeon_dpm_enable_vce(rdev, true);
+ } else {
+ radeon_set_vce_clocks(rdev, 53300, 40000);
+ }
+ }
+}
+
+/**
+ * radeon_vce_free_handles - free still open VCE handles
+ *
+ * @rdev: radeon_device pointer
+ * @filp: drm file pointer
+ *
+ * Close all VCE handles still open by this file pointer
+ */
+void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp)
+{
+ int i, r;
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+ uint32_t handle = atomic_read(&rdev->vce.handles[i]);
+ if (!handle || rdev->vce.filp[i] != filp)
+ continue;
+
+ radeon_vce_note_usage(rdev);
+
+ r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX,
+ handle, NULL);
+ if (r)
+ DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
+
+ rdev->vce.filp[i] = NULL;
+ atomic_set(&rdev->vce.handles[i], 0);
+ }
+}
+
+/**
+ * radeon_vce_get_create_msg - generate a VCE create msg
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: VCE session handle to use
+ * @fence: optional fence to return
+ *
+ * Open up a stream for HW test
+ */
+int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence)
+{
+ const unsigned ib_size_dw = 1024;
+ struct radeon_ib ib;
+ uint64_t dummy;
+ int i, r;
+
+ r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ dummy = ib.gpu_addr + 1024;
+
+ /* stitch together an VCE create msg */
+ ib.length_dw = 0;
+ ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
+ ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
+ ib.ptr[ib.length_dw++] = handle;
+
+ ib.ptr[ib.length_dw++] = 0x00000030; /* len */
+ ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
+ ib.ptr[ib.length_dw++] = 0x00000000;
+ ib.ptr[ib.length_dw++] = 0x00000042;
+ ib.ptr[ib.length_dw++] = 0x0000000a;
+ ib.ptr[ib.length_dw++] = 0x00000001;
+ ib.ptr[ib.length_dw++] = 0x00000080;
+ ib.ptr[ib.length_dw++] = 0x00000060;
+ ib.ptr[ib.length_dw++] = 0x00000100;
+ ib.ptr[ib.length_dw++] = 0x00000100;
+ ib.ptr[ib.length_dw++] = 0x0000000c;
+ ib.ptr[ib.length_dw++] = 0x00000000;
+
+ ib.ptr[ib.length_dw++] = 0x00000014; /* len */
+ ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
+ ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
+ ib.ptr[ib.length_dw++] = dummy;
+ ib.ptr[ib.length_dw++] = 0x00000001;
+
+ for (i = ib.length_dw; i < ib_size_dw; ++i)
+ ib.ptr[i] = 0x0;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ }
+
+ if (fence)
+ *fence = radeon_fence_ref(ib.fence);
+
+ radeon_ib_free(rdev, &ib);
+
+ return r;
+}
+
+/**
+ * radeon_vce_get_destroy_msg - generate a VCE destroy msg
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: VCE session handle to use
+ * @fence: optional fence to return
+ *
+ * Close up a stream for HW test or if userspace failed to do so
+ */
+int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence)
+{
+ const unsigned ib_size_dw = 1024;
+ struct radeon_ib ib;
+ uint64_t dummy;
+ int i, r;
+
+ r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ dummy = ib.gpu_addr + 1024;
+
+ /* stitch together an VCE destroy msg */
+ ib.length_dw = 0;
+ ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
+ ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
+ ib.ptr[ib.length_dw++] = handle;
+
+ ib.ptr[ib.length_dw++] = 0x00000014; /* len */
+ ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
+ ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
+ ib.ptr[ib.length_dw++] = dummy;
+ ib.ptr[ib.length_dw++] = 0x00000001;
+
+ ib.ptr[ib.length_dw++] = 0x00000008; /* len */
+ ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
+
+ for (i = ib.length_dw; i < ib_size_dw; ++i)
+ ib.ptr[i] = 0x0;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ }
+
+ if (fence)
+ *fence = radeon_fence_ref(ib.fence);
+
+ radeon_ib_free(rdev, &ib);
+
+ return r;
+}
+
+/**
+ * radeon_vce_cs_reloc - command submission relocation
+ *
+ * @p: parser context
+ * @lo: address of lower dword
+ * @hi: address of higher dword
+ *
+ * Patch relocation inside command stream with real buffer address
+ */
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ uint64_t offset;
+ unsigned idx;
+
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ offset = radeon_get_ib_value(p, lo);
+ idx = radeon_get_ib_value(p, hi);
+
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ return -EINVAL;
+ }
+
+ offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
+
+ p->ib.ptr[lo] = offset & 0xFFFFFFFF;
+ p->ib.ptr[hi] = offset >> 32;
+
+ return 0;
+}
+
+/**
+ * radeon_vce_cs_parse - parse and validate the command stream
+ *
+ * @p: parser context
+ *
+ */
+int radeon_vce_cs_parse(struct radeon_cs_parser *p)
+{
+ uint32_t handle = 0;
+ bool destroy = false;
+ int i, r;
+
+ while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
+ uint32_t len = radeon_get_ib_value(p, p->idx);
+ uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
+
+ if ((len < 8) || (len & 3)) {
+ DRM_ERROR("invalid VCE command length (%d)!\n", len);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case 0x00000001: // session
+ handle = radeon_get_ib_value(p, p->idx + 2);
+ break;
+
+ case 0x00000002: // task info
+ case 0x01000001: // create
+ case 0x04000001: // config extension
+ case 0x04000002: // pic control
+ case 0x04000005: // rate control
+ case 0x04000007: // motion estimation
+ case 0x04000008: // rdo
+ break;
+
+ case 0x03000001: // encode
+ r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9);
+ if (r)
+ return r;
+
+ r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11);
+ if (r)
+ return r;
+ break;
+
+ case 0x02000001: // destroy
+ destroy = true;
+ break;
+
+ case 0x05000001: // context buffer
+ case 0x05000004: // video bitstream buffer
+ case 0x05000005: // feedback buffer
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2);
+ if (r)
+ return r;
+ break;
+
+ default:
+ DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
+ return -EINVAL;
+ }
+
+ p->idx += len / 4;
+ }
+
+ if (destroy) {
+ /* IB contains a destroy msg, free the handle */
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
+ atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
+
+ return 0;
+ }
+
+ /* create or encode, validate the handle */
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+ if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+ return 0;
+ }
+
+ /* handle not found try to alloc a new one */
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+ if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
+ p->rdev->vce.filp[i] = p->filp;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("No more free VCE handles!\n");
+ return -EINVAL;
+}
+
+/**
+ * radeon_vce_semaphore_emit - emit a semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: engine to use
+ * @semaphore: address of semaphore
+ * @emit_wait: true=emit wait, false=emit signal
+ *
+ */
+bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, VCE_CMD_SEMAPHORE);
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+ radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
+ if (!emit_wait)
+ radeon_ring_write(ring, VCE_CMD_END);
+
+ return true;
+}
+
+/**
+ * radeon_vce_ib_execute - execute indirect buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ib: the IB to execute
+ *
+ */
+void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ radeon_ring_write(ring, VCE_CMD_IB);
+ radeon_ring_write(ring, ib->gpu_addr);
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ radeon_ring_write(ring, ib->length_dw);
+}
+
+/**
+ * radeon_vce_fence_emit - add a fence command to the ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: the fence
+ *
+ */
+void radeon_vce_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ radeon_ring_write(ring, VCE_CMD_FENCE);
+ radeon_ring_write(ring, addr);
+ radeon_ring_write(ring, upper_32_bits(addr));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, VCE_CMD_TRAP);
+ radeon_ring_write(ring, VCE_CMD_END);
+}
+
+/**
+ * radeon_vce_ring_test - test if VCE ring is working
+ *
+ * @rdev: radeon_device pointer
+ * @ring: the engine to test on
+ *
+ */
+int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ uint32_t rptr = vce_v1_0_get_rptr(rdev, ring);
+ unsigned i;
+ int r;
+
+ r = radeon_ring_lock(rdev, ring, 16);
+ if (r) {
+ DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, VCE_CMD_END);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (vce_v1_0_get_rptr(rdev, ring) != rptr)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed\n",
+ ring->idx);
+ r = -ETIMEDOUT;
+ }
+
+ return r;
+}
+
+/**
+ * radeon_vce_ib_test - test if VCE IBs are working
+ *
+ * @rdev: radeon_device pointer
+ * @ring: the engine to test on
+ *
+ */
+int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_fence *fence = NULL;
+ int r;
+
+ r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL);
+ if (r) {
+ DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence);
+ if (r) {
+ DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ } else {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ }
+error:
+ radeon_fence_unref(&fence);
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
new file mode 100644
index 000000000000..2aae6ce49d32
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -0,0 +1,966 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_trace.h"
+
+/*
+ * GPUVM
+ * GPUVM is similar to the legacy gart on older asics, however
+ * rather than there being a single global gart table
+ * for the entire GPU, there are multiple VM page tables active
+ * at any given time. The VM page tables can contain a mix
+ * vram pages and system memory pages and system memory pages
+ * can be mapped as snooped (cached system pages) or unsnooped
+ * (uncached system pages).
+ * Each VM has an ID associated with it and there is a page table
+ * associated with each VMID. When execting a command buffer,
+ * the kernel tells the the ring what VMID to use for that command
+ * buffer. VMIDs are allocated dynamically as commands are submitted.
+ * The userspace drivers maintain their own address space and the kernel
+ * sets up their pages tables accordingly when they submit their
+ * command buffers and a VMID is assigned.
+ * Cayman/Trinity support up to 8 active VMs at any given time;
+ * SI supports 16.
+ */
+
+/**
+ * radeon_vm_num_pde - return the number of page directory entries
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the number of page directory entries (cayman+).
+ */
+static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
+{
+ return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
+}
+
+/**
+ * radeon_vm_directory_size - returns the size of the page directory in bytes
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the size of the page directory in bytes (cayman+).
+ */
+static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
+{
+ return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
+}
+
+/**
+ * radeon_vm_manager_init - init the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init the vm manager (cayman+).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->vm_manager.enabled) {
+ r = radeon_asic_vm_init(rdev);
+ if (r)
+ return r;
+
+ rdev->vm_manager.enabled = true;
+ }
+ return 0;
+}
+
+/**
+ * radeon_vm_manager_fini - tear down the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the VM manager (cayman+).
+ */
+void radeon_vm_manager_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ if (!rdev->vm_manager.enabled)
+ return;
+
+ for (i = 0; i < RADEON_NUM_VM; ++i)
+ radeon_fence_unref(&rdev->vm_manager.active[i]);
+ radeon_asic_vm_fini(rdev);
+ rdev->vm_manager.enabled = false;
+}
+
+/**
+ * radeon_vm_get_bos - add the vm BOs to a validation list
+ *
+ * @vm: vm providing the BOs
+ * @head: head of validation list
+ *
+ * Add the page directory to the list of BOs to
+ * validate for command submission (cayman+).
+ */
+struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct list_head *head)
+{
+ struct radeon_cs_reloc *list;
+ unsigned i, idx, size;
+
+ size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc);
+ list = kmalloc(size, GFP_KERNEL);
+ if (!list)
+ return NULL;
+
+ /* add the vm page table to the list */
+ list[0].gobj = NULL;
+ list[0].robj = vm->page_directory;
+ list[0].domain = RADEON_GEM_DOMAIN_VRAM;
+ list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM;
+ list[0].tv.bo = &vm->page_directory->tbo;
+ list[0].tiling_flags = 0;
+ list[0].handle = 0;
+ list_add(&list[0].tv.head, head);
+
+ for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
+ if (!vm->page_tables[i].bo)
+ continue;
+
+ list[idx].gobj = NULL;
+ list[idx].robj = vm->page_tables[i].bo;
+ list[idx].domain = RADEON_GEM_DOMAIN_VRAM;
+ list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM;
+ list[idx].tv.bo = &list[idx].robj->tbo;
+ list[idx].tiling_flags = 0;
+ list[idx].handle = 0;
+ list_add(&list[idx++].tv.head, head);
+ }
+
+ return list;
+}
+
+/**
+ * radeon_vm_grab_id - allocate the next free VMID
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ *
+ * Allocate an id for the vm (cayman+).
+ * Returns the fence we need to sync to (if any).
+ *
+ * Global and local mutex must be locked!
+ */
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+ struct radeon_vm *vm, int ring)
+{
+ struct radeon_fence *best[RADEON_NUM_RINGS] = {};
+ unsigned choices[2] = {};
+ unsigned i;
+
+ /* check if the id is still valid */
+ if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
+ return NULL;
+
+ /* we definately need to flush */
+ radeon_fence_unref(&vm->last_flush);
+
+ /* skip over VMID 0, since it is the system VM */
+ for (i = 1; i < rdev->vm_manager.nvm; ++i) {
+ struct radeon_fence *fence = rdev->vm_manager.active[i];
+
+ if (fence == NULL) {
+ /* found a free one */
+ vm->id = i;
+ trace_radeon_vm_grab_id(vm->id, ring);
+ return NULL;
+ }
+
+ if (radeon_fence_is_earlier(fence, best[fence->ring])) {
+ best[fence->ring] = fence;
+ choices[fence->ring == ring ? 0 : 1] = i;
+ }
+ }
+
+ for (i = 0; i < 2; ++i) {
+ if (choices[i]) {
+ vm->id = choices[i];
+ trace_radeon_vm_grab_id(vm->id, ring);
+ return rdev->vm_manager.active[choices[i]];
+ }
+ }
+
+ /* should never happen */
+ BUG();
+ return NULL;
+}
+
+/**
+ * radeon_vm_flush - hardware flush the vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to flush
+ * @ring: ring to use for flush
+ *
+ * Flush the vm (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_flush(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ int ring)
+{
+ uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
+
+ /* if we can't remember our last VM flush then flush now! */
+ /* XXX figure out why we have to flush all the time */
+ if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
+ vm->pd_gpu_addr = pd_addr;
+ radeon_ring_vm_flush(rdev, ring, vm);
+ }
+}
+
+/**
+ * radeon_vm_fence - remember fence for vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to fence
+ * @fence: fence to remember
+ *
+ * Fence the vm (cayman+).
+ * Set the fence used to protect page table and id.
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_fence(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_fence *fence)
+{
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(fence);
+
+ radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
+ rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
+
+ radeon_fence_unref(&vm->last_id_use);
+ vm->last_id_use = radeon_fence_ref(fence);
+
+ /* we just flushed the VM, remember that */
+ if (!vm->last_flush)
+ vm->last_flush = radeon_fence_ref(fence);
+}
+
+/**
+ * radeon_vm_bo_find - find the bo_va for a specific vm & bo
+ *
+ * @vm: requested vm
+ * @bo: requested buffer object
+ *
+ * Find @bo inside the requested vm (cayman+).
+ * Search inside the @bos vm list for the requested vm
+ * Returns the found bo_va or NULL if none is found
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ list_for_each_entry(bo_va, &bo->va, bo_list) {
+ if (bo_va->vm == vm) {
+ return bo_va;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * radeon_vm_bo_add - add a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Add @bo into the requested vm (cayman+).
+ * Add @bo to the list of bos associated with the vm
+ * Returns newly added bo_va or NULL for failure
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (bo_va == NULL) {
+ return NULL;
+ }
+ bo_va->vm = vm;
+ bo_va->bo = bo;
+ bo_va->soffset = 0;
+ bo_va->eoffset = 0;
+ bo_va->flags = 0;
+ bo_va->valid = false;
+ bo_va->ref_count = 1;
+ INIT_LIST_HEAD(&bo_va->bo_list);
+ INIT_LIST_HEAD(&bo_va->vm_list);
+
+ mutex_lock(&vm->mutex);
+ list_add(&bo_va->vm_list, &vm->va);
+ list_add_tail(&bo_va->bo_list, &bo->va);
+ mutex_unlock(&vm->mutex);
+
+ return bo_va;
+}
+
+/**
+ * radeon_vm_clear_bo - initially clear the page dir/table
+ *
+ * @rdev: radeon_device pointer
+ * @bo: bo to clear
+ */
+static int radeon_vm_clear_bo(struct radeon_device *rdev,
+ struct radeon_bo *bo)
+{
+ struct ttm_validate_buffer tv;
+ struct ww_acquire_ctx ticket;
+ struct list_head head;
+ struct radeon_ib ib;
+ unsigned entries;
+ uint64_t addr;
+ int r;
+
+ memset(&tv, 0, sizeof(tv));
+ tv.bo = &bo->tbo;
+
+ INIT_LIST_HEAD(&head);
+ list_add(&tv.head, &head);
+
+ r = ttm_eu_reserve_buffers(&ticket, &head);
+ if (r)
+ return r;
+
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ if (r)
+ goto error;
+
+ addr = radeon_bo_gpu_offset(bo);
+ entries = radeon_bo_size(bo) / 8;
+
+ r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
+ NULL, entries * 2 + 64);
+ if (r)
+ goto error;
+
+ ib.length_dw = 0;
+
+ radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r)
+ goto error;
+
+ ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
+ radeon_ib_free(rdev, &ib);
+
+ return 0;
+
+error:
+ ttm_eu_backoff_reservation(&ticket, &head);
+ return r;
+}
+
+/**
+ * radeon_vm_bo_set_addr - set bos virtual address inside a vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to store the address
+ * @soffset: requested offset of the buffer in the VM address space
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Set offset of @bo_va (cayman+).
+ * Validate and set the offset requested within the vm address space.
+ * Returns 0 for success, error for failure.
+ *
+ * Object has to be reserved!
+ */
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va,
+ uint64_t soffset,
+ uint32_t flags)
+{
+ uint64_t size = radeon_bo_size(bo_va->bo);
+ uint64_t eoffset, last_offset = 0;
+ struct radeon_vm *vm = bo_va->vm;
+ struct radeon_bo_va *tmp;
+ struct list_head *head;
+ unsigned last_pfn, pt_idx;
+ int r;
+
+ if (soffset) {
+ /* make sure object fit at this offset */
+ eoffset = soffset + size;
+ if (soffset >= eoffset) {
+ return -EINVAL;
+ }
+
+ last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+ if (last_pfn > rdev->vm_manager.max_pfn) {
+ dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ last_pfn, rdev->vm_manager.max_pfn);
+ return -EINVAL;
+ }
+
+ } else {
+ eoffset = last_pfn = 0;
+ }
+
+ mutex_lock(&vm->mutex);
+ head = &vm->va;
+ last_offset = 0;
+ list_for_each_entry(tmp, &vm->va, vm_list) {
+ if (bo_va == tmp) {
+ /* skip over currently modified bo */
+ continue;
+ }
+
+ if (soffset >= last_offset && eoffset <= tmp->soffset) {
+ /* bo can be added before this one */
+ break;
+ }
+ if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
+ /* bo and tmp overlap, invalid offset */
+ dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
+ bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
+ (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
+ mutex_unlock(&vm->mutex);
+ return -EINVAL;
+ }
+ last_offset = tmp->eoffset;
+ head = &tmp->vm_list;
+ }
+
+ bo_va->soffset = soffset;
+ bo_va->eoffset = eoffset;
+ bo_va->flags = flags;
+ bo_va->valid = false;
+ list_move(&bo_va->vm_list, head);
+
+ soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+ eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+
+ if (eoffset > vm->max_pde_used)
+ vm->max_pde_used = eoffset;
+
+ radeon_bo_unreserve(bo_va->bo);
+
+ /* walk over the address space and allocate the page tables */
+ for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
+ struct radeon_bo *pt;
+
+ if (vm->page_tables[pt_idx].bo)
+ continue;
+
+ /* drop mutex to allocate and clear page table */
+ mutex_unlock(&vm->mutex);
+
+ r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
+ RADEON_GPU_PAGE_SIZE, false,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
+ if (r)
+ return r;
+
+ r = radeon_vm_clear_bo(rdev, pt);
+ if (r) {
+ radeon_bo_unref(&pt);
+ radeon_bo_reserve(bo_va->bo, false);
+ return r;
+ }
+
+ /* aquire mutex again */
+ mutex_lock(&vm->mutex);
+ if (vm->page_tables[pt_idx].bo) {
+ /* someone else allocated the pt in the meantime */
+ mutex_unlock(&vm->mutex);
+ radeon_bo_unref(&pt);
+ mutex_lock(&vm->mutex);
+ continue;
+ }
+
+ vm->page_tables[pt_idx].addr = 0;
+ vm->page_tables[pt_idx].bo = pt;
+ }
+
+ mutex_unlock(&vm->mutex);
+ return radeon_bo_reserve(bo_va->bo, false);
+}
+
+/**
+ * radeon_vm_map_gart - get the physical address of a gart page
+ *
+ * @rdev: radeon_device pointer
+ * @addr: the unmapped addr
+ *
+ * Look up the physical address of the page that the pte resolves
+ * to (cayman+).
+ * Returns the physical address of the page.
+ */
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
+{
+ uint64_t result;
+
+ /* page table offset */
+ result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
+
+ /* in case cpu page size != gpu page size*/
+ result |= addr & (~PAGE_MASK);
+
+ return result;
+}
+
+/**
+ * radeon_vm_page_flags - translate page flags to what the hw uses
+ *
+ * @flags: flags comming from userspace
+ *
+ * Translate the flags the userspace ABI uses to hw flags.
+ */
+static uint32_t radeon_vm_page_flags(uint32_t flags)
+{
+ uint32_t hw_flags = 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ hw_flags |= R600_PTE_SYSTEM;
+ hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+ }
+ return hw_flags;
+}
+
+/**
+ * radeon_vm_update_pdes - make sure that page directory is valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+int radeon_vm_update_page_directory(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
+
+ uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
+ uint64_t last_pde = ~0, last_pt = ~0;
+ unsigned count = 0, pt_idx, ndw;
+ struct radeon_ib ib;
+ int r;
+
+ /* padding, etc. */
+ ndw = 64;
+
+ /* assume the worst case */
+ ndw += vm->max_pde_used * 12;
+
+ /* update too big for an IB */
+ if (ndw > 0xfffff)
+ return -ENOMEM;
+
+ r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
+ if (r)
+ return r;
+ ib.length_dw = 0;
+
+ /* walk over the address space and update the page directory */
+ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
+ struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
+ uint64_t pde, pt;
+
+ if (bo == NULL)
+ continue;
+
+ pt = radeon_bo_gpu_offset(bo);
+ if (vm->page_tables[pt_idx].addr == pt)
+ continue;
+ vm->page_tables[pt_idx].addr = pt;
+
+ pde = pd_addr + pt_idx * 8;
+ if (((last_pde + 8 * count) != pde) ||
+ ((last_pt + incr * count) != pt)) {
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, &ib, last_pde,
+ last_pt, count, incr,
+ R600_PTE_VALID);
+ }
+
+ count = 1;
+ last_pde = pde;
+ last_pt = pt;
+ } else {
+ ++count;
+ }
+ }
+
+ if (count)
+ radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
+ incr, R600_PTE_VALID);
+
+ if (ib.length_dw != 0) {
+ radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ return r;
+ }
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(ib.fence);
+ radeon_fence_unref(&vm->last_flush);
+ }
+ radeon_ib_free(rdev, &ib);
+
+ return 0;
+}
+
+/**
+ * radeon_vm_update_ptes - make sure that page tables are valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_update_ptes(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_ib *ib,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
+{
+ static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
+
+ uint64_t last_pte = ~0, last_dst = ~0;
+ unsigned count = 0;
+ uint64_t addr;
+
+ start = start / RADEON_GPU_PAGE_SIZE;
+ end = end / RADEON_GPU_PAGE_SIZE;
+
+ /* walk over the address space and update the page tables */
+ for (addr = start; addr < end; ) {
+ uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
+ unsigned nptes;
+ uint64_t pte;
+
+ if ((addr & ~mask) == (end & ~mask))
+ nptes = end - addr;
+ else
+ nptes = RADEON_VM_PTE_COUNT - (addr & mask);
+
+ pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo);
+ pte += (addr & mask) * 8;
+
+ if ((last_pte + 8 * count) != pte) {
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, ib, last_pte,
+ last_dst, count,
+ RADEON_GPU_PAGE_SIZE,
+ flags);
+ }
+
+ count = nptes;
+ last_pte = pte;
+ last_dst = dst;
+ } else {
+ count += nptes;
+ }
+
+ addr += nptes;
+ dst += nptes * RADEON_GPU_PAGE_SIZE;
+ }
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, ib, last_pte,
+ last_dst, count,
+ RADEON_GPU_PAGE_SIZE, flags);
+ }
+}
+
+/**
+ * radeon_vm_bo_update - map a bo into the vm page table
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ * @mem: ttm mem
+ *
+ * Fill in the page table entries for @bo (cayman+).
+ * Returns 0 for success, -EINVAL for failure.
+ *
+ * Object have to be reserved and mutex must be locked!
+ */
+int radeon_vm_bo_update(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem)
+{
+ struct radeon_ib ib;
+ struct radeon_bo_va *bo_va;
+ unsigned nptes, ndw;
+ uint64_t addr;
+ int r;
+
+ bo_va = radeon_vm_bo_find(vm, bo);
+ if (bo_va == NULL) {
+ dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+ return -EINVAL;
+ }
+
+ if (!bo_va->soffset) {
+ dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
+ bo, vm);
+ return -EINVAL;
+ }
+
+ if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
+ return 0;
+
+ bo_va->flags &= ~RADEON_VM_PAGE_VALID;
+ bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+ if (mem) {
+ addr = mem->start << PAGE_SHIFT;
+ if (mem->mem_type != TTM_PL_SYSTEM) {
+ bo_va->flags |= RADEON_VM_PAGE_VALID;
+ bo_va->valid = true;
+ }
+ if (mem->mem_type == TTM_PL_TT) {
+ bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+ } else {
+ addr += rdev->vm_manager.vram_base_offset;
+ }
+ } else {
+ addr = 0;
+ bo_va->valid = false;
+ }
+
+ trace_radeon_vm_bo_update(bo_va);
+
+ nptes = radeon_bo_ngpu_pages(bo);
+
+ /* padding, etc. */
+ ndw = 64;
+
+ if (RADEON_VM_BLOCK_SIZE > 11)
+ /* reserve space for one header for every 2k dwords */
+ ndw += (nptes >> 11) * 4;
+ else
+ /* reserve space for one header for
+ every (1 << BLOCK_SIZE) entries */
+ ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
+
+ /* reserve space for pte addresses */
+ ndw += nptes * 2;
+
+ /* update too big for an IB */
+ if (ndw > 0xfffff)
+ return -ENOMEM;
+
+ r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
+ if (r)
+ return r;
+ ib.length_dw = 0;
+
+ radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
+ addr, radeon_vm_page_flags(bo_va->flags));
+
+ radeon_semaphore_sync_to(ib.semaphore, vm->fence);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ return r;
+ }
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(ib.fence);
+ radeon_ib_free(rdev, &ib);
+ radeon_fence_unref(&vm->last_flush);
+
+ return 0;
+}
+
+/**
+ * radeon_vm_bo_rmv - remove a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: requested bo_va
+ *
+ * Remove @bo_va->bo from the requested vm (cayman+).
+ * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
+ * remove the ptes for @bo_va in the page table.
+ * Returns 0 for success.
+ *
+ * Object have to be reserved!
+ */
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va)
+{
+ int r = 0;
+
+ mutex_lock(&bo_va->vm->mutex);
+ if (bo_va->soffset)
+ r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
+
+ list_del(&bo_va->vm_list);
+ mutex_unlock(&bo_va->vm->mutex);
+ list_del(&bo_va->bo_list);
+
+ kfree(bo_va);
+ return r;
+}
+
+/**
+ * radeon_vm_bo_invalidate - mark the bo as invalid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Mark @bo as invalid (cayman+).
+ */
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ list_for_each_entry(bo_va, &bo->va, bo_list) {
+ bo_va->valid = false;
+ }
+}
+
+/**
+ * radeon_vm_init - initialize a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Init @vm fields (cayman+).
+ */
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ unsigned pd_size, pd_entries, pts_size;
+ int r;
+
+ vm->id = 0;
+ vm->fence = NULL;
+ vm->last_flush = NULL;
+ vm->last_id_use = NULL;
+ mutex_init(&vm->mutex);
+ INIT_LIST_HEAD(&vm->va);
+
+ pd_size = radeon_vm_directory_size(rdev);
+ pd_entries = radeon_vm_num_pdes(rdev);
+
+ /* allocate page table array */
+ pts_size = pd_entries * sizeof(struct radeon_vm_pt);
+ vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+ if (vm->page_tables == NULL) {
+ DRM_ERROR("Cannot allocate memory for page table array\n");
+ return -ENOMEM;
+ }
+
+ r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false,
+ RADEON_GEM_DOMAIN_VRAM, NULL,
+ &vm->page_directory);
+ if (r)
+ return r;
+
+ r = radeon_vm_clear_bo(rdev, vm->page_directory);
+ if (r) {
+ radeon_bo_unref(&vm->page_directory);
+ vm->page_directory = NULL;
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * radeon_vm_fini - tear down a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Tear down @vm (cayman+).
+ * Unbind the VM and remove all bos from the vm bo list
+ */
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int i, r;
+
+ if (!list_empty(&vm->va)) {
+ dev_err(rdev->dev, "still active bo inside vm\n");
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
+ list_del_init(&bo_va->vm_list);
+ r = radeon_bo_reserve(bo_va->bo, false);
+ if (!r) {
+ list_del_init(&bo_va->bo_list);
+ radeon_bo_unreserve(bo_va->bo);
+ kfree(bo_va);
+ }
+ }
+
+
+ for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
+ radeon_bo_unref(&vm->page_tables[i].bo);
+ kfree(vm->page_tables);
+
+ radeon_bo_unref(&vm->page_directory);
+
+ radeon_fence_unref(&vm->fence);
+ radeon_fence_unref(&vm->last_flush);
+ radeon_fence_unref(&vm->last_id_use);
+
+ mutex_destroy(&vm->mutex);
+}
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 8512085b0aef..02f7710de470 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -807,9 +807,6 @@ static int rs780_parse_power_table(struct radeon_device *rdev)
power_info->pplib.ucNumStates, GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
- rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < power_info->pplib.ucNumStates; i++) {
power_state = (union pplib_power_state *)
@@ -859,6 +856,10 @@ int rs780_dpm_init(struct radeon_device *rdev)
return -ENOMEM;
rdev->pm.dpm.priv = pi;
+ ret = r600_get_platform_caps(rdev);
+ if (ret)
+ return ret;
+
ret = rs780_parse_power_table(rdev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index bebf31c4d841..e7045b085715 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1891,9 +1891,6 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
power_info->pplib.ucNumStates, GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
- rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < power_info->pplib.ucNumStates; i++) {
power_state = (union pplib_power_state *)
@@ -1943,6 +1940,10 @@ int rv6xx_dpm_init(struct radeon_device *rdev)
return -ENOMEM;
rdev->pm.dpm.priv = pi;
+ ret = r600_get_platform_caps(rdev);
+ if (ret)
+ return ret;
+
ret = rv6xx_parse_power_table(rdev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index b5f63f5e22a3..da041a43d82e 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2281,9 +2281,6 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
power_info->pplib.ucNumStates, GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
- rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < power_info->pplib.ucNumStates; i++) {
power_state = (union pplib_power_state *)
@@ -2361,6 +2358,10 @@ int rv770_dpm_init(struct radeon_device *rdev)
pi->min_vddc_in_table = 0;
pi->max_vddc_in_table = 0;
+ ret = r600_get_platform_caps(rdev);
+ if (ret)
+ return ret;
+
ret = rv7xx_parse_power_table(rdev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 9a124d0608b3..d589475fe9e6 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3434,8 +3434,6 @@ static int si_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
- ring->rptr = RREG32(CP_RB0_RPTR);
-
/* ring1 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
@@ -3460,8 +3458,6 @@ static int si_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
- ring->rptr = RREG32(CP_RB1_RPTR);
-
/* ring2 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
@@ -3486,8 +3482,6 @@ static int si_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
- ring->rptr = RREG32(CP_RB2_RPTR);
-
/* start the rings */
si_cp_start(rdev);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
@@ -3872,11 +3866,9 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
if (!(reset_mask & (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_CP))) {
- radeon_ring_lockup_update(ring);
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 59be2cfcbb47..cf0fdad8c278 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -49,11 +49,9 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
mask = RADEON_RESET_DMA1;
if (!(reset_mask & mask)) {
- radeon_ring_lockup_update(ring);
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0a2f5b4bca43..9a3567bedaae 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6271,9 +6271,6 @@ static int si_parse_power_table(struct radeon_device *rdev)
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
- rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
@@ -6350,6 +6347,10 @@ int si_dpm_init(struct radeon_device *rdev)
pi->min_vddc_in_table = 0;
pi->max_vddc_in_table = 0;
+ ret = r600_get_platform_caps(rdev);
+ if (ret)
+ return ret;
+
ret = si_parse_power_table(rdev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 9239a6d29128..683532f84931 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1798,4 +1798,51 @@
#define DMA_PACKET_CONSTANT_FILL 0xd
#define DMA_PACKET_NOP 0xf
+#define VCE_STATUS 0x20004
+#define VCE_VCPU_CNTL 0x20014
+#define VCE_CLK_EN (1 << 0)
+#define VCE_VCPU_CACHE_OFFSET0 0x20024
+#define VCE_VCPU_CACHE_SIZE0 0x20028
+#define VCE_VCPU_CACHE_OFFSET1 0x2002c
+#define VCE_VCPU_CACHE_SIZE1 0x20030
+#define VCE_VCPU_CACHE_OFFSET2 0x20034
+#define VCE_VCPU_CACHE_SIZE2 0x20038
+#define VCE_SOFT_RESET 0x20120
+#define VCE_ECPU_SOFT_RESET (1 << 0)
+#define VCE_FME_SOFT_RESET (1 << 2)
+#define VCE_RB_BASE_LO2 0x2016c
+#define VCE_RB_BASE_HI2 0x20170
+#define VCE_RB_SIZE2 0x20174
+#define VCE_RB_RPTR2 0x20178
+#define VCE_RB_WPTR2 0x2017c
+#define VCE_RB_BASE_LO 0x20180
+#define VCE_RB_BASE_HI 0x20184
+#define VCE_RB_SIZE 0x20188
+#define VCE_RB_RPTR 0x2018c
+#define VCE_RB_WPTR 0x20190
+#define VCE_CLOCK_GATING_A 0x202f8
+#define VCE_CLOCK_GATING_B 0x202fc
+#define VCE_UENC_CLOCK_GATING 0x205bc
+#define VCE_UENC_REG_CLOCK_GATING 0x205c0
+#define VCE_FW_REG_STATUS 0x20e10
+# define VCE_FW_REG_STATUS_BUSY (1 << 0)
+# define VCE_FW_REG_STATUS_PASS (1 << 3)
+# define VCE_FW_REG_STATUS_DONE (1 << 11)
+#define VCE_LMI_FW_START_KEYSEL 0x20e18
+#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20
+#define VCE_LMI_CTRL2 0x20e74
+#define VCE_LMI_CTRL 0x20e98
+#define VCE_LMI_VM_CTRL 0x20ea0
+#define VCE_LMI_SWAP_CNTL 0x20eb4
+#define VCE_LMI_SWAP_CNTL1 0x20eb8
+#define VCE_LMI_CACHE_CTRL 0x20ef4
+
+#define VCE_CMD_NO_OP 0x00000000
+#define VCE_CMD_END 0x00000001
+#define VCE_CMD_IB 0x00000002
+#define VCE_CMD_FENCE 0x00000003
+#define VCE_CMD_TRAP 0x00000004
+#define VCE_CMD_IB_AUTO 0x00000005
+#define VCE_CMD_SEMAPHORE 0x00000006
+
#endif
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 8b47b3cd0357..3f0e8d7b8dbe 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1484,9 +1484,6 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
- rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
@@ -1772,6 +1769,10 @@ int sumo_dpm_init(struct radeon_device *rdev)
sumo_construct_boot_and_acpi_state(rdev);
+ ret = r600_get_platform_caps(rdev);
+ if (ret)
+ return ret;
+
ret = sumo_parse_power_table(rdev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2da0e17eb960..2a2822c03329 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1694,9 +1694,6 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
- rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
@@ -1895,6 +1892,10 @@ int trinity_dpm_init(struct radeon_device *rdev)
trinity_construct_boot_state(rdev);
+ ret = r600_get_platform_caps(rdev);
+ if (ret)
+ return ret;
+
ret = trinity_parse_power_table(rdev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index d4a68af1a279..0a243f0e5d68 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -262,7 +262,7 @@ int uvd_v1_0_start(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(UVD_RBC_RB_RPTR, 0x0);
- ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
+ ring->wptr = RREG32(UVD_RBC_RB_RPTR);
WREG32(UVD_RBC_RB_WPTR, ring->wptr);
/* set the ring address */
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
new file mode 100644
index 000000000000..b44d9c842f7b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "sid.h"
+
+/**
+ * vce_v1_0_get_rptr - get read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
+ return RREG32(VCE_RB_RPTR);
+ else
+ return RREG32(VCE_RB_RPTR2);
+}
+
+/**
+ * vce_v1_0_get_wptr - get write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
+ return RREG32(VCE_RB_WPTR);
+ else
+ return RREG32(VCE_RB_WPTR2);
+}
+
+/**
+ * vce_v1_0_set_wptr - set write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+void vce_v1_0_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
+ WREG32(VCE_RB_WPTR, ring->wptr);
+ else
+ WREG32(VCE_RB_WPTR2, ring->wptr);
+}
+
+/**
+ * vce_v1_0_start - start VCE block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup and start the VCE block
+ */
+int vce_v1_0_start(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int i, j, r;
+
+ /* set BUSY flag */
+ WREG32_P(VCE_STATUS, 1, ~1);
+
+ ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+ WREG32(VCE_RB_RPTR, ring->wptr);
+ WREG32(VCE_RB_WPTR, ring->wptr);
+ WREG32(VCE_RB_BASE_LO, ring->gpu_addr);
+ WREG32(VCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(VCE_RB_SIZE, ring->ring_size / 4);
+
+ ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+ WREG32(VCE_RB_RPTR2, ring->wptr);
+ WREG32(VCE_RB_WPTR2, ring->wptr);
+ WREG32(VCE_RB_BASE_LO2, ring->gpu_addr);
+ WREG32(VCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(VCE_RB_SIZE2, ring->ring_size / 4);
+
+ WREG32_P(VCE_VCPU_CNTL, VCE_CLK_EN, ~VCE_CLK_EN);
+
+ WREG32_P(VCE_SOFT_RESET,
+ VCE_ECPU_SOFT_RESET |
+ VCE_FME_SOFT_RESET, ~(
+ VCE_ECPU_SOFT_RESET |
+ VCE_FME_SOFT_RESET));
+
+ mdelay(100);
+
+ WREG32_P(VCE_SOFT_RESET, 0, ~(
+ VCE_ECPU_SOFT_RESET |
+ VCE_FME_SOFT_RESET));
+
+ for (i = 0; i < 10; ++i) {
+ uint32_t status;
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(VCE_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
+
+ DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+ WREG32_P(VCE_SOFT_RESET, VCE_ECPU_SOFT_RESET, ~VCE_ECPU_SOFT_RESET);
+ mdelay(10);
+ WREG32_P(VCE_SOFT_RESET, 0, ~VCE_ECPU_SOFT_RESET);
+ mdelay(10);
+ r = -1;
+ }
+
+ /* clear BUSY flag */
+ WREG32_P(VCE_STATUS, 0, ~1);
+
+ if (r) {
+ DRM_ERROR("VCE not responding, giving up!!!\n");
+ return r;
+ }
+
+ return 0;
+}
+
+int vce_v1_0_init(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ r = vce_v1_0_start(rdev);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+ ring->ready = true;
+ r = radeon_ring_test(rdev, TN_RING_TYPE_VCE1_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+ ring->ready = true;
+ r = radeon_ring_test(rdev, TN_RING_TYPE_VCE2_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ DRM_INFO("VCE initialized successfully.\n");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c
new file mode 100644
index 000000000000..1ac7bb825a1b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/vce_v2_0.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "cikd.h"
+
+static void vce_v2_0_set_sw_cg(struct radeon_device *rdev, bool gated)
+{
+ u32 tmp;
+
+ if (gated) {
+ tmp = RREG32(VCE_CLOCK_GATING_B);
+ tmp |= 0xe70000;
+ WREG32(VCE_CLOCK_GATING_B, tmp);
+
+ tmp = RREG32(VCE_UENC_CLOCK_GATING);
+ tmp |= 0xff000000;
+ WREG32(VCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3fc;
+ WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
+
+ WREG32(VCE_CGTT_CLK_OVERRIDE, 0);
+ } else {
+ tmp = RREG32(VCE_CLOCK_GATING_B);
+ tmp |= 0xe7;
+ tmp &= ~0xe70000;
+ WREG32(VCE_CLOCK_GATING_B, tmp);
+
+ tmp = RREG32(VCE_UENC_CLOCK_GATING);
+ tmp |= 0x1fe000;
+ tmp &= ~0xff000000;
+ WREG32(VCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
+ tmp |= 0x3fc;
+ WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
+ }
+}
+
+static void vce_v2_0_set_dyn_cg(struct radeon_device *rdev, bool gated)
+{
+ u32 orig, tmp;
+
+ tmp = RREG32(VCE_CLOCK_GATING_B);
+ tmp &= ~0x00060006;
+ if (gated) {
+ tmp |= 0xe10000;
+ } else {
+ tmp |= 0xe1;
+ tmp &= ~0xe10000;
+ }
+ WREG32(VCE_CLOCK_GATING_B, tmp);
+
+ orig = tmp = RREG32(VCE_UENC_CLOCK_GATING);
+ tmp &= ~0x1fe000;
+ tmp &= ~0xff000000;
+ if (tmp != orig)
+ WREG32(VCE_UENC_CLOCK_GATING, tmp);
+
+ orig = tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3fc;
+ if (tmp != orig)
+ WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
+
+ if (gated)
+ WREG32(VCE_CGTT_CLK_OVERRIDE, 0);
+}
+
+static void vce_v2_0_disable_cg(struct radeon_device *rdev)
+{
+ WREG32(VCE_CGTT_CLK_OVERRIDE, 7);
+}
+
+void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable)
+{
+ bool sw_cg = false;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_VCE_MGCG)) {
+ if (sw_cg)
+ vce_v2_0_set_sw_cg(rdev, true);
+ else
+ vce_v2_0_set_dyn_cg(rdev, true);
+ } else {
+ vce_v2_0_disable_cg(rdev);
+
+ if (sw_cg)
+ vce_v2_0_set_sw_cg(rdev, false);
+ else
+ vce_v2_0_set_dyn_cg(rdev, false);
+ }
+}
+
+static void vce_v2_0_init_cg(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32(VCE_CLOCK_GATING_A);
+ tmp &= ~(CGC_CLK_GATE_DLY_TIMER_MASK | CGC_CLK_GATER_OFF_DLY_TIMER_MASK);
+ tmp |= (CGC_CLK_GATE_DLY_TIMER(0) | CGC_CLK_GATER_OFF_DLY_TIMER(4));
+ tmp |= CGC_UENC_WAIT_AWAKE;
+ WREG32(VCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(VCE_UENC_CLOCK_GATING);
+ tmp &= ~(CLOCK_ON_DELAY_MASK | CLOCK_OFF_DELAY_MASK);
+ tmp |= (CLOCK_ON_DELAY(0) | CLOCK_OFF_DELAY(4));
+ WREG32(VCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(VCE_CLOCK_GATING_B);
+ tmp |= 0x10;
+ tmp &= ~0x100000;
+ WREG32(VCE_CLOCK_GATING_B, tmp);
+}
+
+int vce_v2_0_resume(struct radeon_device *rdev)
+{
+ uint64_t addr = rdev->vce.gpu_addr;
+ uint32_t size;
+
+ WREG32_P(VCE_CLOCK_GATING_A, 0, ~(1 << 16));
+ WREG32_P(VCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
+ WREG32_P(VCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
+ WREG32(VCE_CLOCK_GATING_B, 0xf7);
+
+ WREG32(VCE_LMI_CTRL, 0x00398000);
+ WREG32_P(VCE_LMI_CACHE_CTRL, 0x0, ~0x1);
+ WREG32(VCE_LMI_SWAP_CNTL, 0);
+ WREG32(VCE_LMI_SWAP_CNTL1, 0);
+ WREG32(VCE_LMI_VM_CTRL, 0);
+
+ size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size);
+ WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
+ WREG32(VCE_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = RADEON_VCE_STACK_SIZE;
+ WREG32(VCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
+ WREG32(VCE_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = RADEON_VCE_HEAP_SIZE;
+ WREG32(VCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
+ WREG32(VCE_VCPU_CACHE_SIZE2, size);
+
+ WREG32_P(VCE_LMI_CTRL2, 0x0, ~0x100);
+
+ WREG32_P(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN,
+ ~VCE_SYS_INT_TRAP_INTERRUPT_EN);
+
+ vce_v2_0_init_cg(rdev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index fbf4be316d0b..299267db2898 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -299,7 +299,7 @@ static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
- rcar_du_plane_compute_base(rcrtc->plane, crtc->fb);
+ rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb);
rcar_du_plane_update_base(rcrtc->plane);
}
@@ -358,10 +358,10 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
const struct rcar_du_format_info *format;
int ret;
- format = rcar_du_format_info(crtc->fb->pixel_format);
+ format = rcar_du_format_info(crtc->primary->fb->pixel_format);
if (format == NULL) {
dev_dbg(rcdu->dev, "mode_set: unsupported format %08x\n",
- crtc->fb->pixel_format);
+ crtc->primary->fb->pixel_format);
ret = -EINVAL;
goto error;
}
@@ -377,7 +377,7 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
rcrtc->plane->width = mode->hdisplay;
rcrtc->plane->height = mode->vdisplay;
- rcar_du_plane_compute_base(rcrtc->plane, crtc->fb);
+ rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb);
rcrtc->outputs = 0;
@@ -510,7 +510,7 @@ static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- crtc->fb = fb;
+ crtc->primary->fb = fb;
rcar_du_crtc_update_base(rcrtc);
if (event) {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index fbeabd9a281f..a87edfac111f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -248,7 +248,10 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
continue;
}
- rcar_du_encoder_init(rcdu, pdata->type, pdata->output, pdata);
+ ret = rcar_du_encoder_init(rcdu, pdata->type, pdata->output,
+ pdata);
+ if (ret < 0)
+ return ret;
}
/* Set the possible CRTCs and possible clones. There's always at least
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 0428076f1ce8..e9e5e6d368cc 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -173,7 +173,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
if (scrtc->started)
return;
- format = shmob_drm_format_info(crtc->fb->pixel_format);
+ format = shmob_drm_format_info(crtc->primary->fb->pixel_format);
if (WARN_ON(format == NULL))
return;
@@ -247,7 +247,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
lcdc_write(sdev, LDDDSR, value);
/* Setup planes. */
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
if (plane->crtc == crtc)
shmob_drm_plane_setup(plane);
}
@@ -303,7 +303,7 @@ static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
int x, int y)
{
struct drm_crtc *crtc = &scrtc->crtc;
- struct drm_framebuffer *fb = crtc->fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
struct shmob_drm_device *sdev = crtc->dev->dev_private;
struct drm_gem_cma_object *gem;
unsigned int bpp;
@@ -382,15 +382,15 @@ static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
const struct shmob_drm_format_info *format;
void *cache;
- format = shmob_drm_format_info(crtc->fb->pixel_format);
+ format = shmob_drm_format_info(crtc->primary->fb->pixel_format);
if (format == NULL) {
dev_dbg(sdev->dev, "mode_set: unsupported format %08x\n",
- crtc->fb->pixel_format);
+ crtc->primary->fb->pixel_format);
return -EINVAL;
}
scrtc->format = format;
- scrtc->line_size = crtc->fb->pitches[0];
+ scrtc->line_size = crtc->primary->fb->pitches[0];
if (sdev->meram) {
/* Enable MERAM cache if configured. We need to de-init
@@ -402,7 +402,7 @@ static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
}
cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata,
- crtc->fb->pitches[0],
+ crtc->primary->fb->pitches[0],
adjusted_mode->vdisplay,
format->meram,
&scrtc->line_size);
@@ -489,7 +489,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- crtc->fb = fb;
+ crtc->primary->fb = fb;
shmob_drm_crtc_update_base(scrtc);
if (event) {
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 8d220afbd85f..d43f21bb4596 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -11,6 +11,8 @@ tegra-drm-y := \
hdmi.o \
mipi-phy.o \
dsi.o \
+ sor.o \
+ dpaux.o \
gr2d.o \
gr3d.o
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
index e38e5967d77b..71cef5c13dc8 100644
--- a/drivers/gpu/drm/tegra/bus.c
+++ b/drivers/gpu/drm/tegra/bus.c
@@ -63,7 +63,7 @@ int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
return 0;
err_free:
- drm_dev_free(drm);
+ drm_dev_unref(drm);
return ret;
}
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 9336006b475d..36c717af6cf9 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -235,14 +235,14 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
if (!dc->event)
return;
- bo = tegra_fb_get_plane(crtc->fb, 0);
+ bo = tegra_fb_get_plane(crtc->primary->fb, 0);
/* check if new start address has been latched */
tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
- if (base == bo->paddr + crtc->fb->offsets[0]) {
+ if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
spin_lock_irqsave(&drm->event_lock, flags);
drm_send_vblank_event(drm, dc->pipe, dc->event);
drm_vblank_put(drm, dc->pipe);
@@ -284,7 +284,7 @@ static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
tegra_dc_set_base(dc, 0, 0, fb);
- crtc->fb = fb;
+ crtc->primary->fb = fb;
return 0;
}
@@ -645,7 +645,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *adjusted,
int x, int y, struct drm_framebuffer *old_fb)
{
- struct tegra_bo *bo = tegra_fb_get_plane(crtc->fb, 0);
+ struct tegra_bo *bo = tegra_fb_get_plane(crtc->primary->fb, 0);
struct tegra_dc *dc = to_tegra_dc(crtc);
struct tegra_dc_window window;
unsigned long div, value;
@@ -682,9 +682,9 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
window.dst.y = 0;
window.dst.w = mode->hdisplay;
window.dst.h = mode->vdisplay;
- window.format = tegra_dc_format(crtc->fb->pixel_format);
- window.bits_per_pixel = crtc->fb->bits_per_pixel;
- window.stride[0] = crtc->fb->pitches[0];
+ window.format = tegra_dc_format(crtc->primary->fb->pixel_format);
+ window.bits_per_pixel = crtc->primary->fb->bits_per_pixel;
+ window.stride[0] = crtc->primary->fb->pitches[0];
window.base[0] = bo->paddr;
err = tegra_dc_setup_window(dc, 0, &window);
@@ -699,7 +699,7 @@ static int tegra_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
{
struct tegra_dc *dc = to_tegra_dc(crtc);
- return tegra_dc_set_base(dc, x, y, crtc->fb);
+ return tegra_dc_set_base(dc, x, y, crtc->primary->fb);
}
static void tegra_crtc_prepare(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 3c2c0ea1cd87..c94101494826 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -118,6 +118,7 @@
#define DC_DISP_DISP_WIN_OPTIONS 0x402
#define HDMI_ENABLE (1 << 30)
#define DSI_ENABLE (1 << 29)
+#define SOR_ENABLE (1 << 25)
#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
new file mode 100644
index 000000000000..d536ed381fbd
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -0,0 +1,544 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_panel.h>
+
+#include "dpaux.h"
+#include "drm.h"
+
+static DEFINE_MUTEX(dpaux_lock);
+static LIST_HEAD(dpaux_list);
+
+struct tegra_dpaux {
+ struct drm_dp_aux aux;
+ struct device *dev;
+
+ void __iomem *regs;
+ int irq;
+
+ struct tegra_output *output;
+
+ struct reset_control *rst;
+ struct clk *clk_parent;
+ struct clk *clk;
+
+ struct regulator *vdd;
+
+ struct completion complete;
+ struct list_head list;
+};
+
+static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
+{
+ return container_of(aux, struct tegra_dpaux, aux);
+}
+
+static inline unsigned long tegra_dpaux_readl(struct tegra_dpaux *dpaux,
+ unsigned long offset)
+{
+ return readl(dpaux->regs + (offset << 2));
+}
+
+static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux,
+ unsigned long value,
+ unsigned long offset)
+{
+ writel(value, dpaux->regs + (offset << 2));
+}
+
+static void tegra_dpaux_write_fifo(struct tegra_dpaux *dpaux, const u8 *buffer,
+ size_t size)
+{
+ unsigned long offset = DPAUX_DP_AUXDATA_WRITE(0);
+ size_t i, j;
+
+ for (i = 0; i < size; i += 4) {
+ size_t num = min_t(size_t, size - i, 4);
+ unsigned long value = 0;
+
+ for (j = 0; j < num; j++)
+ value |= buffer[i + j] << (j * 8);
+
+ tegra_dpaux_writel(dpaux, value, offset++);
+ }
+}
+
+static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer,
+ size_t size)
+{
+ unsigned long offset = DPAUX_DP_AUXDATA_READ(0);
+ size_t i, j;
+
+ for (i = 0; i < size; i += 4) {
+ size_t num = min_t(size_t, size - i, 4);
+ unsigned long value;
+
+ value = tegra_dpaux_readl(dpaux, offset++);
+
+ for (j = 0; j < num; j++)
+ buffer[i + j] = value >> (j * 8);
+ }
+}
+
+static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ unsigned long value = DPAUX_DP_AUXCTL_TRANSACTREQ;
+ unsigned long timeout = msecs_to_jiffies(250);
+ struct tegra_dpaux *dpaux = to_dpaux(aux);
+ unsigned long status;
+ ssize_t ret = 0;
+
+ if (msg->size < 1 || msg->size > 16)
+ return -EINVAL;
+
+ tegra_dpaux_writel(dpaux, msg->address, DPAUX_DP_AUXADDR);
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_I2C_WRITE:
+ if (msg->request & DP_AUX_I2C_MOT)
+ value = DPAUX_DP_AUXCTL_CMD_MOT_WR;
+ else
+ value = DPAUX_DP_AUXCTL_CMD_I2C_WR;
+
+ break;
+
+ case DP_AUX_I2C_READ:
+ if (msg->request & DP_AUX_I2C_MOT)
+ value = DPAUX_DP_AUXCTL_CMD_MOT_RD;
+ else
+ value = DPAUX_DP_AUXCTL_CMD_I2C_RD;
+
+ break;
+
+ case DP_AUX_I2C_STATUS:
+ if (msg->request & DP_AUX_I2C_MOT)
+ value = DPAUX_DP_AUXCTL_CMD_MOT_RQ;
+ else
+ value = DPAUX_DP_AUXCTL_CMD_I2C_RQ;
+
+ break;
+
+ case DP_AUX_NATIVE_WRITE:
+ value = DPAUX_DP_AUXCTL_CMD_AUX_WR;
+ break;
+
+ case DP_AUX_NATIVE_READ:
+ value = DPAUX_DP_AUXCTL_CMD_AUX_RD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ value |= DPAUX_DP_AUXCTL_CMDLEN(msg->size - 1);
+ tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL);
+
+ if ((msg->request & DP_AUX_I2C_READ) == 0) {
+ tegra_dpaux_write_fifo(dpaux, msg->buffer, msg->size);
+ ret = msg->size;
+ }
+
+ /* start transaction */
+ value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXCTL);
+ value |= DPAUX_DP_AUXCTL_TRANSACTREQ;
+ tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL);
+
+ status = wait_for_completion_timeout(&dpaux->complete, timeout);
+ if (!status)
+ return -ETIMEDOUT;
+
+ /* read status and clear errors */
+ value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
+ tegra_dpaux_writel(dpaux, 0xf00, DPAUX_DP_AUXSTAT);
+
+ if (value & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR)
+ return -ETIMEDOUT;
+
+ if ((value & DPAUX_DP_AUXSTAT_RX_ERROR) ||
+ (value & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR) ||
+ (value & DPAUX_DP_AUXSTAT_NO_STOP_ERROR))
+ return -EIO;
+
+ switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) {
+ case 0x00:
+ msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+ break;
+
+ case 0x01:
+ msg->reply = DP_AUX_NATIVE_REPLY_NACK;
+ break;
+
+ case 0x02:
+ msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
+ break;
+
+ case 0x04:
+ msg->reply = DP_AUX_I2C_REPLY_NACK;
+ break;
+
+ case 0x08:
+ msg->reply = DP_AUX_I2C_REPLY_DEFER;
+ break;
+ }
+
+ if (msg->reply == DP_AUX_NATIVE_REPLY_ACK) {
+ if (msg->request & DP_AUX_I2C_READ) {
+ size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
+
+ if (WARN_ON(count != msg->size))
+ count = min_t(size_t, count, msg->size);
+
+ tegra_dpaux_read_fifo(dpaux, msg->buffer, count);
+ ret = count;
+ }
+ }
+
+ return ret;
+}
+
+static irqreturn_t tegra_dpaux_irq(int irq, void *data)
+{
+ struct tegra_dpaux *dpaux = data;
+ irqreturn_t ret = IRQ_HANDLED;
+ unsigned long value;
+
+ /* clear interrupts */
+ value = tegra_dpaux_readl(dpaux, DPAUX_INTR_AUX);
+ tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX);
+
+ if (value & DPAUX_INTR_PLUG_EVENT) {
+ if (dpaux->output) {
+ drm_helper_hpd_irq_event(dpaux->output->connector.dev);
+ }
+ }
+
+ if (value & DPAUX_INTR_UNPLUG_EVENT) {
+ if (dpaux->output)
+ drm_helper_hpd_irq_event(dpaux->output->connector.dev);
+ }
+
+ if (value & DPAUX_INTR_IRQ_EVENT) {
+ /* TODO: handle this */
+ }
+
+ if (value & DPAUX_INTR_AUX_DONE)
+ complete(&dpaux->complete);
+
+ return ret;
+}
+
+static int tegra_dpaux_probe(struct platform_device *pdev)
+{
+ struct tegra_dpaux *dpaux;
+ struct resource *regs;
+ unsigned long value;
+ int err;
+
+ dpaux = devm_kzalloc(&pdev->dev, sizeof(*dpaux), GFP_KERNEL);
+ if (!dpaux)
+ return -ENOMEM;
+
+ init_completion(&dpaux->complete);
+ INIT_LIST_HEAD(&dpaux->list);
+ dpaux->dev = &pdev->dev;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dpaux->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(dpaux->regs))
+ return PTR_ERR(dpaux->regs);
+
+ dpaux->irq = platform_get_irq(pdev, 0);
+ if (dpaux->irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ\n");
+ return -ENXIO;
+ }
+
+ dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
+ if (IS_ERR(dpaux->rst))
+ return PTR_ERR(dpaux->rst);
+
+ dpaux->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dpaux->clk))
+ return PTR_ERR(dpaux->clk);
+
+ err = clk_prepare_enable(dpaux->clk);
+ if (err < 0)
+ return err;
+
+ reset_control_deassert(dpaux->rst);
+
+ dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
+ if (IS_ERR(dpaux->clk_parent))
+ return PTR_ERR(dpaux->clk_parent);
+
+ err = clk_prepare_enable(dpaux->clk_parent);
+ if (err < 0)
+ return err;
+
+ err = clk_set_rate(dpaux->clk_parent, 270000000);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n",
+ err);
+ return err;
+ }
+
+ dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(dpaux->vdd))
+ return PTR_ERR(dpaux->vdd);
+
+ err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
+ dev_name(dpaux->dev), dpaux);
+ if (err < 0) {
+ dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
+ dpaux->irq, err);
+ return err;
+ }
+
+ dpaux->aux.transfer = tegra_dpaux_transfer;
+ dpaux->aux.dev = &pdev->dev;
+
+ err = drm_dp_aux_register_i2c_bus(&dpaux->aux);
+ if (err < 0)
+ return err;
+
+ /* enable and clear all interrupts */
+ value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
+ DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
+ tegra_dpaux_writel(dpaux, value, DPAUX_INTR_EN_AUX);
+ tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX);
+
+ mutex_lock(&dpaux_lock);
+ list_add_tail(&dpaux->list, &dpaux_list);
+ mutex_unlock(&dpaux_lock);
+
+ platform_set_drvdata(pdev, dpaux);
+
+ return 0;
+}
+
+static int tegra_dpaux_remove(struct platform_device *pdev)
+{
+ struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
+
+ drm_dp_aux_unregister_i2c_bus(&dpaux->aux);
+
+ mutex_lock(&dpaux_lock);
+ list_del(&dpaux->list);
+ mutex_unlock(&dpaux_lock);
+
+ clk_disable_unprepare(dpaux->clk_parent);
+ reset_control_assert(dpaux->rst);
+ clk_disable_unprepare(dpaux->clk);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_dpaux_of_match[] = {
+ { .compatible = "nvidia,tegra124-dpaux", },
+ { },
+};
+
+struct platform_driver tegra_dpaux_driver = {
+ .driver = {
+ .name = "tegra-dpaux",
+ .of_match_table = tegra_dpaux_of_match,
+ },
+ .probe = tegra_dpaux_probe,
+ .remove = tegra_dpaux_remove,
+};
+
+struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np)
+{
+ struct tegra_dpaux *dpaux;
+
+ mutex_lock(&dpaux_lock);
+
+ list_for_each_entry(dpaux, &dpaux_list, list)
+ if (np == dpaux->dev->of_node) {
+ mutex_unlock(&dpaux_lock);
+ return dpaux;
+ }
+
+ mutex_unlock(&dpaux_lock);
+
+ return NULL;
+}
+
+int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
+{
+ unsigned long timeout;
+ int err;
+
+ dpaux->output = output;
+
+ err = regulator_enable(dpaux->vdd);
+ if (err < 0)
+ return err;
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ enum drm_connector_status status;
+
+ status = tegra_dpaux_detect(dpaux);
+ if (status == connector_status_connected)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
+{
+ unsigned long timeout;
+ int err;
+
+ err = regulator_disable(dpaux->vdd);
+ if (err < 0)
+ return err;
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ enum drm_connector_status status;
+
+ status = tegra_dpaux_detect(dpaux);
+ if (status == connector_status_disconnected) {
+ dpaux->output = NULL;
+ return 0;
+ }
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux)
+{
+ unsigned long value;
+
+ value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
+
+ if (value & DPAUX_DP_AUXSTAT_HPD_STATUS)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+int tegra_dpaux_enable(struct tegra_dpaux *dpaux)
+{
+ unsigned long value;
+
+ value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_MODE_AUX;
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+
+ value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+ value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+
+ return 0;
+}
+
+int tegra_dpaux_disable(struct tegra_dpaux *dpaux)
+{
+ unsigned long value;
+
+ value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+ value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+
+ return 0;
+}
+
+int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding)
+{
+ int err;
+
+ err = drm_dp_dpcd_writeb(&dpaux->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+ encoding);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
+ u8 pattern)
+{
+ u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
+ u8 status[DP_LINK_STATUS_SIZE], values[4];
+ unsigned int i;
+ int err;
+
+ err = drm_dp_dpcd_writeb(&dpaux->aux, DP_TRAINING_PATTERN_SET, pattern);
+ if (err < 0)
+ return err;
+
+ if (tp == DP_TRAINING_PATTERN_DISABLE)
+ return 0;
+
+ for (i = 0; i < link->num_lanes; i++)
+ values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
+ DP_TRAIN_PRE_EMPHASIS_0 |
+ DP_TRAIN_MAX_SWING_REACHED |
+ DP_TRAIN_VOLTAGE_SWING_400;
+
+ err = drm_dp_dpcd_write(&dpaux->aux, DP_TRAINING_LANE0_SET, values,
+ link->num_lanes);
+ if (err < 0)
+ return err;
+
+ usleep_range(500, 1000);
+
+ err = drm_dp_dpcd_read_link_status(&dpaux->aux, status);
+ if (err < 0)
+ return err;
+
+ switch (tp) {
+ case DP_TRAINING_PATTERN_1:
+ if (!drm_dp_clock_recovery_ok(status, link->num_lanes))
+ return -EAGAIN;
+
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ if (!drm_dp_channel_eq_ok(status, link->num_lanes))
+ return -EAGAIN;
+
+ break;
+
+ default:
+ dev_err(dpaux->dev, "unsupported training pattern %u\n", tp);
+ return -EINVAL;
+ }
+
+ err = drm_dp_dpcd_writeb(&dpaux->aux, DP_EDP_CONFIGURATION_SET, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/dpaux.h b/drivers/gpu/drm/tegra/dpaux.h
new file mode 100644
index 000000000000..4f5bf10fdff9
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dpaux.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef DRM_TEGRA_DPAUX_H
+#define DRM_TEGRA_DPAUX_H
+
+#define DPAUX_CTXSW 0x00
+
+#define DPAUX_INTR_EN_AUX 0x01
+#define DPAUX_INTR_AUX 0x05
+#define DPAUX_INTR_AUX_DONE (1 << 3)
+#define DPAUX_INTR_IRQ_EVENT (1 << 2)
+#define DPAUX_INTR_UNPLUG_EVENT (1 << 1)
+#define DPAUX_INTR_PLUG_EVENT (1 << 0)
+
+#define DPAUX_DP_AUXDATA_WRITE(x) (0x09 + ((x) << 2))
+#define DPAUX_DP_AUXDATA_READ(x) (0x19 + ((x) << 2))
+#define DPAUX_DP_AUXADDR 0x29
+
+#define DPAUX_DP_AUXCTL 0x2d
+#define DPAUX_DP_AUXCTL_TRANSACTREQ (1 << 16)
+#define DPAUX_DP_AUXCTL_CMD_AUX_RD (9 << 12)
+#define DPAUX_DP_AUXCTL_CMD_AUX_WR (8 << 12)
+#define DPAUX_DP_AUXCTL_CMD_MOT_RQ (6 << 12)
+#define DPAUX_DP_AUXCTL_CMD_MOT_RD (5 << 12)
+#define DPAUX_DP_AUXCTL_CMD_MOT_WR (4 << 12)
+#define DPAUX_DP_AUXCTL_CMD_I2C_RQ (2 << 12)
+#define DPAUX_DP_AUXCTL_CMD_I2C_RD (1 << 12)
+#define DPAUX_DP_AUXCTL_CMD_I2C_WR (0 << 12)
+#define DPAUX_DP_AUXCTL_CMDLEN(x) ((x) & 0xff)
+
+#define DPAUX_DP_AUXSTAT 0x31
+#define DPAUX_DP_AUXSTAT_HPD_STATUS (1 << 28)
+#define DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK (0xf0000)
+#define DPAUX_DP_AUXSTAT_NO_STOP_ERROR (1 << 11)
+#define DPAUX_DP_AUXSTAT_SINKSTAT_ERROR (1 << 10)
+#define DPAUX_DP_AUXSTAT_RX_ERROR (1 << 9)
+#define DPAUX_DP_AUXSTAT_TIMEOUT_ERROR (1 << 8)
+#define DPAUX_DP_AUXSTAT_REPLY_MASK (0xff)
+
+#define DPAUX_DP_AUX_SINKSTAT_LO 0x35
+#define DPAUX_DP_AUX_SINKSTAT_HI 0x39
+
+#define DPAUX_HPD_CONFIG 0x3d
+#define DPAUX_HPD_CONFIG_UNPLUG_MIN_TIME(x) (((x) & 0xffff) << 16)
+#define DPAUX_HPD_CONFIG_PLUG_MIN_TIME(x) ((x) & 0xffff)
+
+#define DPAUX_HPD_IRQ_CONFIG 0x41
+#define DPAUX_HPD_IRQ_CONFIG_MIN_LOW_TIME(x) ((x) & 0xffff)
+
+#define DPAUX_DP_AUX_CONFIG 0x45
+
+#define DPAUX_HYBRID_PADCTL 0x49
+#define DPAUX_HYBRID_PADCTL_AUX_CMH(x) (((x) & 0x3) << 12)
+#define DPAUX_HYBRID_PADCTL_AUX_DRVZ(x) (((x) & 0x7) << 8)
+#define DPAUX_HYBRID_PADCTL_AUX_DRVI(x) (((x) & 0x3f) << 2)
+#define DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV (1 << 1)
+#define DPAUX_HYBRID_PADCTL_MODE_I2C (1 << 0)
+#define DPAUX_HYBRID_PADCTL_MODE_AUX (0 << 0)
+
+#define DPAUX_HYBRID_SPARE 0x4d
+#define DPAUX_HYBRID_SPARE_PAD_POWER_DOWN (1 << 0)
+
+#define DPAUX_SCRATCH_REG0 0x51
+#define DPAUX_SCRATCH_REG1 0x55
+#define DPAUX_SCRATCH_REG2 0x59
+
+#endif
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index c71594754f46..6f5b6e2f552e 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -665,6 +665,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra114-hdmi", },
{ .compatible = "nvidia,tegra114-gr3d", },
{ .compatible = "nvidia,tegra124-dc", },
+ { .compatible = "nvidia,tegra124-sor", },
{ /* sentinel */ }
};
@@ -691,14 +692,22 @@ static int __init host1x_drm_init(void)
if (err < 0)
goto unregister_dc;
- err = platform_driver_register(&tegra_hdmi_driver);
+ err = platform_driver_register(&tegra_sor_driver);
if (err < 0)
goto unregister_dsi;
- err = platform_driver_register(&tegra_gr2d_driver);
+ err = platform_driver_register(&tegra_hdmi_driver);
+ if (err < 0)
+ goto unregister_sor;
+
+ err = platform_driver_register(&tegra_dpaux_driver);
if (err < 0)
goto unregister_hdmi;
+ err = platform_driver_register(&tegra_gr2d_driver);
+ if (err < 0)
+ goto unregister_dpaux;
+
err = platform_driver_register(&tegra_gr3d_driver);
if (err < 0)
goto unregister_gr2d;
@@ -707,8 +716,12 @@ static int __init host1x_drm_init(void)
unregister_gr2d:
platform_driver_unregister(&tegra_gr2d_driver);
+unregister_dpaux:
+ platform_driver_unregister(&tegra_dpaux_driver);
unregister_hdmi:
platform_driver_unregister(&tegra_hdmi_driver);
+unregister_sor:
+ platform_driver_unregister(&tegra_sor_driver);
unregister_dsi:
platform_driver_unregister(&tegra_dsi_driver);
unregister_dc:
@@ -723,7 +736,9 @@ static void __exit host1x_drm_exit(void)
{
platform_driver_unregister(&tegra_gr3d_driver);
platform_driver_unregister(&tegra_gr2d_driver);
+ platform_driver_unregister(&tegra_dpaux_driver);
platform_driver_unregister(&tegra_hdmi_driver);
+ platform_driver_unregister(&tegra_sor_driver);
platform_driver_unregister(&tegra_dsi_driver);
platform_driver_unregister(&tegra_dc_driver);
host1x_driver_unregister(&host1x_drm_driver);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index bf1cac7658f8..126332c3ecbb 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -179,12 +179,14 @@ struct tegra_output_ops {
int (*check_mode)(struct tegra_output *output,
struct drm_display_mode *mode,
enum drm_mode_status *status);
+ enum drm_connector_status (*detect)(struct tegra_output *output);
};
enum tegra_output_type {
TEGRA_OUTPUT_RGB,
TEGRA_OUTPUT_HDMI,
TEGRA_OUTPUT_DSI,
+ TEGRA_OUTPUT_EDP,
};
struct tegra_output {
@@ -265,6 +267,22 @@ extern int tegra_output_remove(struct tegra_output *output);
extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
extern int tegra_output_exit(struct tegra_output *output);
+/* from dpaux.c */
+
+struct tegra_dpaux;
+struct drm_dp_link;
+struct drm_dp_aux;
+
+struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np);
+enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux);
+int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output);
+int tegra_dpaux_detach(struct tegra_dpaux *dpaux);
+int tegra_dpaux_enable(struct tegra_dpaux *dpaux);
+int tegra_dpaux_disable(struct tegra_dpaux *dpaux);
+int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding);
+int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
+ u8 pattern);
+
/* from fb.c */
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
unsigned int index);
@@ -278,7 +296,9 @@ extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
extern struct platform_driver tegra_dc_driver;
extern struct platform_driver tegra_dsi_driver;
+extern struct platform_driver tegra_sor_driver;
extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_dpaux_driver;
extern struct platform_driver tegra_gr2d_driver;
extern struct platform_driver tegra_gr3d_driver;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index d452faab0235..0e599f0417c0 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1,23 +1,9 @@
/*
* Copyright (C) 2013 NVIDIA Corporation
*
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index 00e79c1f448c..1db5cc24ea91 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -1,23 +1,9 @@
/*
* Copyright (C) 2013 NVIDIA Corporation
*
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#ifndef DRM_TEGRA_DSI_H
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index ef853e558036..bcf9895cef9f 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -8,14 +8,9 @@
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#include <linux/dma-buf.h>
@@ -394,6 +389,18 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
return -EINVAL;
}
+static void *tegra_gem_prime_vmap(struct dma_buf *buf)
+{
+ struct drm_gem_object *gem = buf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+
+ return bo->vaddr;
+}
+
+static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
+{
+}
+
static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.map_dma_buf = tegra_gem_prime_map_dma_buf,
.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
@@ -403,6 +410,8 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.kmap = tegra_gem_prime_kmap,
.kunmap = tegra_gem_prime_kunmap,
.mmap = tegra_gem_prime_mmap,
+ .vmap = tegra_gem_prime_vmap,
+ .vunmap = tegra_gem_prime_vunmap,
};
struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index ffd4f792b410..2f3fe96c5154 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -3,17 +3,9 @@
*
* Copyright (c) 2012-2013, NVIDIA Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#ifndef __HOST1X_GEM_H
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 7ec4259ffded..2c7ca748edf5 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -1,17 +1,9 @@
/*
* Copyright (c) 2012-2013, NVIDIA Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/tegra/mipi-phy.c b/drivers/gpu/drm/tegra/mipi-phy.c
index e2c4aedaee78..486d19d589c8 100644
--- a/drivers/gpu/drm/tegra/mipi-phy.c
+++ b/drivers/gpu/drm/tegra/mipi-phy.c
@@ -1,23 +1,9 @@
/*
* Copyright (C) 2013 NVIDIA Corporation
*
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#include <linux/errno.h>
diff --git a/drivers/gpu/drm/tegra/mipi-phy.h b/drivers/gpu/drm/tegra/mipi-phy.h
index d3591694432d..012ea8ac36d7 100644
--- a/drivers/gpu/drm/tegra/mipi-phy.h
+++ b/drivers/gpu/drm/tegra/mipi-phy.h
@@ -1,23 +1,9 @@
/*
* Copyright (C) 2013 NVIDIA Corporation
*
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#ifndef DRM_TEGRA_MIPI_PHY_H
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 57cecbd18ca8..a3e4f1eca6f7 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -77,6 +77,9 @@ tegra_connector_detect(struct drm_connector *connector, bool force)
struct tegra_output *output = connector_to_output(connector);
enum drm_connector_status status = connector_status_unknown;
+ if (output->ops->detect)
+ return output->ops->detect(output);
+
if (gpio_is_valid(output->hpd_gpio)) {
if (gpio_get_value(output->hpd_gpio) == 0)
status = connector_status_disconnected;
@@ -292,6 +295,11 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
encoder = DRM_MODE_ENCODER_DSI;
break;
+ case TEGRA_OUTPUT_EDP:
+ connector = DRM_MODE_CONNECTOR_eDP;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ break;
+
default:
connector = DRM_MODE_CONNECTOR_Unknown;
encoder = DRM_MODE_ENCODER_NONE;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
new file mode 100644
index 000000000000..49ef5729f435
--- /dev/null
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -0,0 +1,1092 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/tegra-powergate.h>
+
+#include <drm/drm_dp_helper.h>
+
+#include "dc.h"
+#include "drm.h"
+#include "sor.h"
+
+struct tegra_sor {
+ struct host1x_client client;
+ struct tegra_output output;
+ struct device *dev;
+
+ void __iomem *regs;
+
+ struct reset_control *rst;
+ struct clk *clk_parent;
+ struct clk *clk_safe;
+ struct clk *clk_dp;
+ struct clk *clk;
+
+ struct tegra_dpaux *dpaux;
+
+ bool enabled;
+};
+
+static inline struct tegra_sor *
+host1x_client_to_sor(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_sor, client);
+}
+
+static inline struct tegra_sor *to_sor(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_sor, output);
+}
+
+static inline unsigned long tegra_sor_readl(struct tegra_sor *sor,
+ unsigned long offset)
+{
+ return readl(sor->regs + (offset << 2));
+}
+
+static inline void tegra_sor_writel(struct tegra_sor *sor, unsigned long value,
+ unsigned long offset)
+{
+ writel(value, sor->regs + (offset << 2));
+}
+
+static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
+ struct drm_dp_link *link)
+{
+ unsigned long value;
+ unsigned int i;
+ u8 pattern;
+ int err;
+
+ /* setup lane parameters */
+ value = SOR_LANE_DRIVE_CURRENT_LANE3(0x40) |
+ SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
+ SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
+ SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
+ tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT_0);
+
+ value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
+ SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
+ SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
+ SOR_LANE_PREEMPHASIS_LANE0(0x0f);
+ tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS_0);
+
+ value = SOR_LANE_POST_CURSOR_LANE3(0x00) |
+ SOR_LANE_POST_CURSOR_LANE2(0x00) |
+ SOR_LANE_POST_CURSOR_LANE1(0x00) |
+ SOR_LANE_POST_CURSOR_LANE0(0x00);
+ tegra_sor_writel(sor, value, SOR_LANE_POST_CURSOR_0);
+
+ /* disable LVDS mode */
+ tegra_sor_writel(sor, 0, SOR_LVDS);
+
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+ value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+ value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
+ SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+
+ usleep_range(10, 100);
+
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
+ SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+
+ err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B);
+ if (err < 0)
+ return err;
+
+ for (i = 0, value = 0; i < link->num_lanes; i++) {
+ unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
+ SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN1;
+ value = (value << 8) | lane;
+ }
+
+ tegra_sor_writel(sor, value, SOR_DP_TPG);
+
+ pattern = DP_TRAINING_PATTERN_1;
+
+ err = tegra_dpaux_train(sor->dpaux, link, pattern);
+ if (err < 0)
+ return err;
+
+ value = tegra_sor_readl(sor, SOR_DP_SPARE_0);
+ value |= SOR_DP_SPARE_SEQ_ENABLE;
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+ value |= SOR_DP_SPARE_MACRO_SOR_CLK;
+ tegra_sor_writel(sor, value, SOR_DP_SPARE_0);
+
+ for (i = 0, value = 0; i < link->num_lanes; i++) {
+ unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
+ SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN2;
+ value = (value << 8) | lane;
+ }
+
+ tegra_sor_writel(sor, value, SOR_DP_TPG);
+
+ pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2;
+
+ err = tegra_dpaux_train(sor->dpaux, link, pattern);
+ if (err < 0)
+ return err;
+
+ for (i = 0, value = 0; i < link->num_lanes; i++) {
+ unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
+ SOR_DP_TPG_SCRAMBLER_GALIOS |
+ SOR_DP_TPG_PATTERN_NONE;
+ value = (value << 8) | lane;
+ }
+
+ tegra_sor_writel(sor, value, SOR_DP_TPG);
+
+ pattern = DP_TRAINING_PATTERN_DISABLE;
+
+ err = tegra_dpaux_train(sor->dpaux, link, pattern);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void tegra_sor_super_update(struct tegra_sor *sor)
+{
+ tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
+ tegra_sor_writel(sor, 1, SOR_SUPER_STATE_0);
+ tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
+}
+
+static void tegra_sor_update(struct tegra_sor *sor)
+{
+ tegra_sor_writel(sor, 0, SOR_STATE_0);
+ tegra_sor_writel(sor, 1, SOR_STATE_0);
+ tegra_sor_writel(sor, 0, SOR_STATE_0);
+}
+
+static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
+{
+ unsigned long value;
+
+ value = tegra_sor_readl(sor, SOR_PWM_DIV);
+ value &= ~SOR_PWM_DIV_MASK;
+ value |= 0x400; /* period */
+ tegra_sor_writel(sor, value, SOR_PWM_DIV);
+
+ value = tegra_sor_readl(sor, SOR_PWM_CTL);
+ value &= ~SOR_PWM_CTL_DUTY_CYCLE_MASK;
+ value |= 0x400; /* duty cycle */
+ value &= ~SOR_PWM_CTL_CLK_SEL; /* clock source: PCLK */
+ value |= SOR_PWM_CTL_TRIGGER;
+ tegra_sor_writel(sor, value, SOR_PWM_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_PWM_CTL);
+ if ((value & SOR_PWM_CTL_TRIGGER) == 0)
+ return 0;
+
+ usleep_range(25, 100);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_sor_attach(struct tegra_sor *sor)
+{
+ unsigned long value, timeout;
+
+ /* wake up in normal mode */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE;
+ value |= SOR_SUPER_STATE_MODE_NORMAL;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_super_update(sor);
+
+ /* attach */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value |= SOR_SUPER_STATE_ATTACHED;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_super_update(sor);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_TEST);
+ if ((value & SOR_TEST_ATTACHED) != 0)
+ return 0;
+
+ usleep_range(25, 100);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_sor_wakeup(struct tegra_sor *sor)
+{
+ struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc);
+ unsigned long value, timeout;
+
+ /* enable display controller outputs */
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ /* wait for head to wake up */
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_TEST);
+ value &= SOR_TEST_HEAD_MODE_MASK;
+
+ if (value == SOR_TEST_HEAD_MODE_AWAKE)
+ return 0;
+
+ usleep_range(25, 100);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_sor_power_up(struct tegra_sor *sor, unsigned long timeout)
+{
+ unsigned long value;
+
+ value = tegra_sor_readl(sor, SOR_PWR);
+ value |= SOR_PWR_TRIGGER | SOR_PWR_NORMAL_STATE_PU;
+ tegra_sor_writel(sor, value, SOR_PWR);
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_PWR);
+ if ((value & SOR_PWR_TRIGGER) == 0)
+ return 0;
+
+ usleep_range(25, 100);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_output_sor_enable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct drm_display_mode *mode = &dc->base.mode;
+ unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
+ struct tegra_sor *sor = to_sor(output);
+ unsigned long value;
+ int err;
+
+ if (sor->enabled)
+ return 0;
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0)
+ return err;
+
+ reset_control_deassert(sor->rst);
+
+ if (sor->dpaux) {
+ err = tegra_dpaux_enable(sor->dpaux);
+ if (err < 0)
+ dev_err(sor->dev, "failed to enable DP: %d\n", err);
+ }
+
+ err = clk_set_parent(sor->clk, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_PLL_2);
+ value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL_2);
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, SOR_PLL_3);
+ value |= SOR_PLL_3_PLL_VDD_MODE_V3_3;
+ tegra_sor_writel(sor, value, SOR_PLL_3);
+
+ value = SOR_PLL_0_ICHPMP(0xf) | SOR_PLL_0_VCOCAP_RST |
+ SOR_PLL_0_PLLREG_LEVEL_V45 | SOR_PLL_0_RESISTOR_EXT;
+ tegra_sor_writel(sor, value, SOR_PLL_0);
+
+ value = tegra_sor_readl(sor, SOR_PLL_2);
+ value |= SOR_PLL_2_SEQ_PLLCAPPD;
+ value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
+ value |= SOR_PLL_2_LVDS_ENABLE;
+ tegra_sor_writel(sor, value, SOR_PLL_2);
+
+ value = SOR_PLL_1_TERM_COMPOUT | SOR_PLL_1_TMDS_TERM;
+ tegra_sor_writel(sor, value, SOR_PLL_1);
+
+ while (true) {
+ value = tegra_sor_readl(sor, SOR_PLL_2);
+ if ((value & SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE) == 0)
+ break;
+
+ usleep_range(250, 1000);
+ }
+
+ value = tegra_sor_readl(sor, SOR_PLL_2);
+ value &= ~SOR_PLL_2_POWERDOWN_OVERRIDE;
+ value &= ~SOR_PLL_2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL_2);
+
+ /*
+ * power up
+ */
+
+ /* set safe link bandwidth (1.62 Gbps) */
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62;
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ /* step 1 */
+ value = tegra_sor_readl(sor, SOR_PLL_2);
+ value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL_2_PORT_POWERDOWN |
+ SOR_PLL_2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL_2);
+
+ value = tegra_sor_readl(sor, SOR_PLL_0);
+ value |= SOR_PLL_0_VCOPD | SOR_PLL_0_POWER_OFF;
+ tegra_sor_writel(sor, value, SOR_PLL_0);
+
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+
+ /* step 2 */
+ err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power on I/O rail: %d\n", err);
+ return err;
+ }
+
+ usleep_range(5, 100);
+
+ /* step 3 */
+ value = tegra_sor_readl(sor, SOR_PLL_2);
+ value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL_2);
+
+ usleep_range(20, 100);
+
+ /* step 4 */
+ value = tegra_sor_readl(sor, SOR_PLL_0);
+ value &= ~SOR_PLL_0_POWER_OFF;
+ value &= ~SOR_PLL_0_VCOPD;
+ tegra_sor_writel(sor, value, SOR_PLL_0);
+
+ value = tegra_sor_readl(sor, SOR_PLL_2);
+ value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
+ tegra_sor_writel(sor, value, SOR_PLL_2);
+
+ usleep_range(200, 1000);
+
+ /* step 5 */
+ value = tegra_sor_readl(sor, SOR_PLL_2);
+ value &= ~SOR_PLL_2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL_2);
+
+ /* switch to DP clock */
+ err = clk_set_parent(sor->clk, sor->clk_dp);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
+
+ /* power dplanes (XXX parameterize based on link?) */
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+ SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+ value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
+ value |= SOR_DP_LINKCTL_LANE_COUNT(4);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+ SOR_LANE_SEQ_CTL_POWER_STATE_UP;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ while (true) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(250, 1000);
+ }
+
+ /* set link bandwidth (2.7 GHz, XXX: parameterize based on link?) */
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ /* set linkctl */
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+ value |= SOR_DP_LINKCTL_ENABLE;
+
+ value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
+ value |= SOR_DP_LINKCTL_TU_SIZE(59); /* XXX: don't hardcode? */
+
+ value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+
+ for (i = 0, value = 0; i < 4; i++) {
+ unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
+ SOR_DP_TPG_SCRAMBLER_GALIOS |
+ SOR_DP_TPG_PATTERN_NONE;
+ value = (value << 8) | lane;
+ }
+
+ tegra_sor_writel(sor, value, SOR_DP_TPG);
+
+ value = tegra_sor_readl(sor, SOR_DP_CONFIG_0);
+ value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
+ value |= SOR_DP_CONFIG_WATERMARK(14); /* XXX: don't hardcode? */
+
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(47); /* XXX: don't hardcode? */
+
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(9); /* XXX: don't hardcode? */
+
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY; /* XXX: don't hardcode? */
+
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
+ value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE; /* XXX: don't hardcode? */
+ tegra_sor_writel(sor, value, SOR_DP_CONFIG_0);
+
+ value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+ value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
+ value |= 137; /* XXX: don't hardcode? */
+ tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+
+ value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+ value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
+ value |= 2368; /* XXX: don't hardcode? */
+ tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+
+ /* enable pad calibration logic */
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value |= SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+
+ if (sor->dpaux) {
+ /* FIXME: properly convert to struct drm_dp_aux */
+ struct drm_dp_aux *aux = (struct drm_dp_aux *)sor->dpaux;
+ struct drm_dp_link link;
+ u8 rate, lanes;
+
+ err = drm_dp_link_probe(aux, &link);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to probe eDP link: %d\n",
+ err);
+ return err;
+ }
+
+ err = drm_dp_link_power_up(aux, &link);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power up eDP link: %d\n",
+ err);
+ return err;
+ }
+
+ err = drm_dp_link_configure(aux, &link);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to configure eDP link: %d\n",
+ err);
+ return err;
+ }
+
+ rate = drm_dp_link_rate_to_bw_code(link.rate);
+ lanes = link.num_lanes;
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+ value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
+ value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
+
+ if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
+
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+
+ /* disable training pattern generator */
+
+ for (i = 0; i < link.num_lanes; i++) {
+ unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
+ SOR_DP_TPG_SCRAMBLER_GALIOS |
+ SOR_DP_TPG_PATTERN_NONE;
+ value = (value << 8) | lane;
+ }
+
+ tegra_sor_writel(sor, value, SOR_DP_TPG);
+
+ err = tegra_sor_dp_train_fast(sor, &link);
+ if (err < 0) {
+ dev_err(sor->dev, "DP fast link training failed: %d\n",
+ err);
+ return err;
+ }
+
+ dev_dbg(sor->dev, "fast link training succeeded\n");
+ }
+
+ err = tegra_sor_power_up(sor, 250);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+ return err;
+ }
+
+ /* start display controller in continuous mode */
+ value = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS);
+ value |= WRITE_MUX;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_ACCESS);
+
+ tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS);
+ tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND);
+
+ value = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS);
+ value &= ~WRITE_MUX;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_ACCESS);
+
+ /*
+ * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
+ * raster, associate with display controller)
+ */
+ value = SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 |
+ SOR_STATE_ASY_VSYNCPOL |
+ SOR_STATE_ASY_HSYNCPOL |
+ SOR_STATE_ASY_PROTOCOL_DP_A |
+ SOR_STATE_ASY_CRC_MODE_COMPLETE |
+ SOR_STATE_ASY_OWNER(dc->pipe + 1);
+ tegra_sor_writel(sor, value, SOR_STATE_1);
+
+ /*
+ * TODO: The video timing programming below doesn't seem to match the
+ * register definitions.
+ */
+
+ value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE_1(0));
+
+ vse = mode->vsync_end - mode->vsync_start - 1;
+ hse = mode->hsync_end - mode->hsync_start - 1;
+
+ value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE_2(0));
+
+ vbe = vse + (mode->vsync_start - mode->vdisplay);
+ hbe = hse + (mode->hsync_start - mode->hdisplay);
+
+ value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE_3(0));
+
+ vbs = vbe + mode->vdisplay;
+ hbs = hbe + mode->hdisplay;
+
+ value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0));
+
+ /* XXX interlaced mode */
+ tegra_sor_writel(sor, 0x00000001, SOR_HEAD_STATE_5(0));
+
+ /* CSTM (LVDS, link A/B, upper) */
+ value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_B | SOR_CSTM_LINK_ACT_B |
+ SOR_CSTM_UPPER;
+ tegra_sor_writel(sor, value, SOR_CSTM);
+
+ /* PWM setup */
+ err = tegra_sor_setup_pwm(sor, 250);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to setup PWM: %d\n", err);
+ return err;
+ }
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= SOR_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_sor_update(sor);
+
+ err = tegra_sor_attach(sor);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+ return err;
+ }
+
+ err = tegra_sor_wakeup(sor);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable DC: %d\n", err);
+ return err;
+ }
+
+ sor->enabled = true;
+
+ return 0;
+}
+
+static int tegra_sor_detach(struct tegra_sor *sor)
+{
+ unsigned long value, timeout;
+
+ /* switch to safe mode */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value &= ~SOR_SUPER_STATE_MODE_NORMAL;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_super_update(sor);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_PWR);
+ if (value & SOR_PWR_MODE_SAFE)
+ break;
+ }
+
+ if ((value & SOR_PWR_MODE_SAFE) == 0)
+ return -ETIMEDOUT;
+
+ /* go to sleep */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_super_update(sor);
+
+ /* detach */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value &= ~SOR_SUPER_STATE_ATTACHED;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_super_update(sor);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_TEST);
+ if ((value & SOR_TEST_ATTACHED) == 0)
+ break;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_TEST_ATTACHED) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int tegra_sor_power_down(struct tegra_sor *sor)
+{
+ unsigned long value, timeout;
+ int err;
+
+ value = tegra_sor_readl(sor, SOR_PWR);
+ value &= ~SOR_PWR_NORMAL_STATE_PU;
+ value |= SOR_PWR_TRIGGER;
+ tegra_sor_writel(sor, value, SOR_PWR);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_PWR);
+ if ((value & SOR_PWR_TRIGGER) == 0)
+ return 0;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_PWR_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ err = clk_set_parent(sor->clk, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+ SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+
+ /* stop lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+ SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ value = tegra_sor_readl(sor, SOR_PLL_2);
+ value |= SOR_PLL_2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL_2);
+
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, SOR_PLL_0);
+ value |= SOR_PLL_0_POWER_OFF;
+ value |= SOR_PLL_0_VCOPD;
+ tegra_sor_writel(sor, value, SOR_PLL_0);
+
+ value = tegra_sor_readl(sor, SOR_PLL_2);
+ value |= SOR_PLL_2_SEQ_PLLCAPPD;
+ value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
+ tegra_sor_writel(sor, value, SOR_PLL_2);
+
+ usleep_range(20, 100);
+
+ return 0;
+}
+
+static int tegra_output_sor_disable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct tegra_sor *sor = to_sor(output);
+ unsigned long value;
+ int err;
+
+ if (!sor->enabled)
+ return 0;
+
+ err = tegra_sor_detach(sor);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+ return err;
+ }
+
+ tegra_sor_writel(sor, 0, SOR_STATE_1);
+ tegra_sor_update(sor);
+
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ /*
+ * XXX: We can't do this here because it causes the SOR to go
+ * into an erroneous state and the output will look scrambled
+ * the next time it is enabled. Presumably this is because we
+ * should be doing this only on the next VBLANK. A possible
+ * solution would be to queue a "power-off" event to trigger
+ * this code to be run during the next VBLANK.
+ */
+ /*
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+ */
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~SOR_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ }
+
+ err = tegra_sor_power_down(sor);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+ return err;
+ }
+
+ if (sor->dpaux) {
+ err = tegra_dpaux_disable(sor->dpaux);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to disable DP: %d\n", err);
+ return err;
+ }
+ }
+
+ err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
+ return err;
+ }
+
+ reset_control_assert(sor->rst);
+ clk_disable_unprepare(sor->clk);
+
+ sor->enabled = false;
+
+ return 0;
+}
+
+static int tegra_output_sor_setup_clock(struct tegra_output *output,
+ struct clk *clk, unsigned long pclk)
+{
+ struct tegra_sor *sor = to_sor(output);
+ int err;
+
+ /* round to next MHz */
+ pclk = DIV_ROUND_UP(pclk / 2, 1000000) * 1000000;
+
+ err = clk_set_parent(clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+ return err;
+ }
+
+ err = clk_set_rate(sor->clk_parent, pclk);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to set base clock rate to %lu Hz\n",
+ pclk * 2);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_output_sor_check_mode(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status)
+{
+ /*
+ * FIXME: For now, always assume that the mode is okay.
+ */
+
+ *status = MODE_OK;
+
+ return 0;
+}
+
+static enum drm_connector_status
+tegra_output_sor_detect(struct tegra_output *output)
+{
+ struct tegra_sor *sor = to_sor(output);
+
+ if (sor->dpaux)
+ return tegra_dpaux_detect(sor->dpaux);
+
+ return connector_status_unknown;
+}
+
+static const struct tegra_output_ops sor_ops = {
+ .enable = tegra_output_sor_enable,
+ .disable = tegra_output_sor_disable,
+ .setup_clock = tegra_output_sor_setup_clock,
+ .check_mode = tegra_output_sor_check_mode,
+ .detect = tegra_output_sor_detect,
+};
+
+static int tegra_sor_init(struct host1x_client *client)
+{
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct tegra_sor *sor = host1x_client_to_sor(client);
+ int err;
+
+ if (!sor->dpaux)
+ return -ENODEV;
+
+ sor->output.type = TEGRA_OUTPUT_EDP;
+
+ sor->output.dev = sor->dev;
+ sor->output.ops = &sor_ops;
+
+ err = tegra_output_init(tegra->drm, &sor->output);
+ if (err < 0) {
+ dev_err(sor->dev, "output setup failed: %d\n", err);
+ return err;
+ }
+
+ if (sor->dpaux) {
+ err = tegra_dpaux_attach(sor->dpaux, &sor->output);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to attach DP: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_sor_exit(struct host1x_client *client)
+{
+ struct tegra_sor *sor = host1x_client_to_sor(client);
+ int err;
+
+ err = tegra_output_disable(&sor->output);
+ if (err < 0) {
+ dev_err(sor->dev, "output failed to disable: %d\n", err);
+ return err;
+ }
+
+ if (sor->dpaux) {
+ err = tegra_dpaux_detach(sor->dpaux);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to detach DP: %d\n", err);
+ return err;
+ }
+ }
+
+ err = tegra_output_exit(&sor->output);
+ if (err < 0) {
+ dev_err(sor->dev, "output cleanup failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops sor_client_ops = {
+ .init = tegra_sor_init,
+ .exit = tegra_sor_exit,
+};
+
+static int tegra_sor_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct tegra_sor *sor;
+ struct resource *regs;
+ int err;
+
+ sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
+ if (!sor)
+ return -ENOMEM;
+
+ sor->output.dev = sor->dev = &pdev->dev;
+
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
+ if (np) {
+ sor->dpaux = tegra_dpaux_find_by_of_node(np);
+ of_node_put(np);
+
+ if (!sor->dpaux)
+ return -EPROBE_DEFER;
+ }
+
+ err = tegra_output_probe(&sor->output);
+ if (err < 0)
+ return err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sor->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(sor->regs))
+ return PTR_ERR(sor->regs);
+
+ sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+ if (IS_ERR(sor->rst))
+ return PTR_ERR(sor->rst);
+
+ sor->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sor->clk))
+ return PTR_ERR(sor->clk);
+
+ sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
+ if (IS_ERR(sor->clk_parent))
+ return PTR_ERR(sor->clk_parent);
+
+ err = clk_prepare_enable(sor->clk_parent);
+ if (err < 0)
+ return err;
+
+ sor->clk_safe = devm_clk_get(&pdev->dev, "safe");
+ if (IS_ERR(sor->clk_safe))
+ return PTR_ERR(sor->clk_safe);
+
+ err = clk_prepare_enable(sor->clk_safe);
+ if (err < 0)
+ return err;
+
+ sor->clk_dp = devm_clk_get(&pdev->dev, "dp");
+ if (IS_ERR(sor->clk_dp))
+ return PTR_ERR(sor->clk_dp);
+
+ err = clk_prepare_enable(sor->clk_dp);
+ if (err < 0)
+ return err;
+
+ INIT_LIST_HEAD(&sor->client.list);
+ sor->client.ops = &sor_client_ops;
+ sor->client.dev = &pdev->dev;
+
+ err = host1x_client_register(&sor->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, sor);
+
+ return 0;
+}
+
+static int tegra_sor_remove(struct platform_device *pdev)
+{
+ struct tegra_sor *sor = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&sor->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ clk_disable_unprepare(sor->clk_parent);
+ clk_disable_unprepare(sor->clk_safe);
+ clk_disable_unprepare(sor->clk_dp);
+ clk_disable_unprepare(sor->clk);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_sor_of_match[] = {
+ { .compatible = "nvidia,tegra124-sor", },
+ { },
+};
+
+struct platform_driver tegra_sor_driver = {
+ .driver = {
+ .name = "tegra-sor",
+ .of_match_table = tegra_sor_of_match,
+ },
+ .probe = tegra_sor_probe,
+ .remove = tegra_sor_remove,
+};
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
new file mode 100644
index 000000000000..f4156d54cd05
--- /dev/null
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef DRM_TEGRA_SOR_H
+#define DRM_TEGRA_SOR_H
+
+#define SOR_CTXSW 0x00
+
+#define SOR_SUPER_STATE_0 0x01
+
+#define SOR_SUPER_STATE_1 0x02
+#define SOR_SUPER_STATE_ATTACHED (1 << 3)
+#define SOR_SUPER_STATE_MODE_NORMAL (1 << 2)
+#define SOR_SUPER_STATE_HEAD_MODE_MASK (3 << 0)
+#define SOR_SUPER_STATE_HEAD_MODE_AWAKE (2 << 0)
+#define SOR_SUPER_STATE_HEAD_MODE_SNOOZE (1 << 0)
+#define SOR_SUPER_STATE_HEAD_MODE_SLEEP (0 << 0)
+
+#define SOR_STATE_0 0x03
+
+#define SOR_STATE_1 0x04
+#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17)
+#define SOR_STATE_ASY_VSYNCPOL (1 << 13)
+#define SOR_STATE_ASY_HSYNCPOL (1 << 12)
+#define SOR_STATE_ASY_PROTOCOL_MASK (0xf << 8)
+#define SOR_STATE_ASY_PROTOCOL_CUSTOM (0xf << 8)
+#define SOR_STATE_ASY_PROTOCOL_DP_A (0x8 << 8)
+#define SOR_STATE_ASY_PROTOCOL_DP_B (0x9 << 8)
+#define SOR_STATE_ASY_PROTOCOL_LVDS (0x0 << 8)
+#define SOR_STATE_ASY_CRC_MODE_MASK (0x3 << 6)
+#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
+#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
+#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
+#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
+
+#define SOR_HEAD_STATE_0(x) (0x05 + (x))
+#define SOR_HEAD_STATE_1(x) (0x07 + (x))
+#define SOR_HEAD_STATE_2(x) (0x09 + (x))
+#define SOR_HEAD_STATE_3(x) (0x0b + (x))
+#define SOR_HEAD_STATE_4(x) (0x0d + (x))
+#define SOR_HEAD_STATE_5(x) (0x0f + (x))
+#define SOR_CRC_CNTRL 0x11
+#define SOR_DP_DEBUG_MVID 0x12
+
+#define SOR_CLK_CNTRL 0x13
+#define SOR_CLK_CNTRL_DP_LINK_SPEED_MASK (0x1f << 2)
+#define SOR_CLK_CNTRL_DP_LINK_SPEED(x) (((x) & 0x1f) << 2)
+#define SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62 (0x06 << 2)
+#define SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70 (0x0a << 2)
+#define SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40 (0x14 << 2)
+#define SOR_CLK_CNTRL_DP_CLK_SEL_MASK (3 << 0)
+#define SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK (0 << 0)
+#define SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK (1 << 0)
+#define SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK (2 << 0)
+#define SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK (3 << 0)
+
+#define SOR_CAP 0x14
+
+#define SOR_PWR 0x15
+#define SOR_PWR_TRIGGER (1 << 31)
+#define SOR_PWR_MODE_SAFE (1 << 28)
+#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
+
+#define SOR_TEST 0x16
+#define SOR_TEST_ATTACHED (1 << 10)
+#define SOR_TEST_HEAD_MODE_MASK (3 << 8)
+#define SOR_TEST_HEAD_MODE_AWAKE (2 << 8)
+
+#define SOR_PLL_0 0x17
+#define SOR_PLL_0_ICHPMP_MASK (0xf << 24)
+#define SOR_PLL_0_ICHPMP(x) (((x) & 0xf) << 24)
+#define SOR_PLL_0_VCOCAP_MASK (0xf << 8)
+#define SOR_PLL_0_VCOCAP(x) (((x) & 0xf) << 8)
+#define SOR_PLL_0_VCOCAP_RST SOR_PLL_0_VCOCAP(3)
+#define SOR_PLL_0_PLLREG_MASK (0x3 << 6)
+#define SOR_PLL_0_PLLREG_LEVEL(x) (((x) & 0x3) << 6)
+#define SOR_PLL_0_PLLREG_LEVEL_V25 SOR_PLL_0_PLLREG_LEVEL(0)
+#define SOR_PLL_0_PLLREG_LEVEL_V15 SOR_PLL_0_PLLREG_LEVEL(1)
+#define SOR_PLL_0_PLLREG_LEVEL_V35 SOR_PLL_0_PLLREG_LEVEL(2)
+#define SOR_PLL_0_PLLREG_LEVEL_V45 SOR_PLL_0_PLLREG_LEVEL(3)
+#define SOR_PLL_0_PULLDOWN (1 << 5)
+#define SOR_PLL_0_RESISTOR_EXT (1 << 4)
+#define SOR_PLL_0_VCOPD (1 << 2)
+#define SOR_PLL_0_POWER_OFF (1 << 0)
+
+#define SOR_PLL_1 0x18
+/* XXX: read-only bit? */
+#define SOR_PLL_1_TERM_COMPOUT (1 << 15)
+#define SOR_PLL_1_TMDS_TERM (1 << 8)
+
+#define SOR_PLL_2 0x19
+#define SOR_PLL_2_LVDS_ENABLE (1 << 25)
+#define SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE (1 << 24)
+#define SOR_PLL_2_PORT_POWERDOWN (1 << 23)
+#define SOR_PLL_2_BANDGAP_POWERDOWN (1 << 22)
+#define SOR_PLL_2_POWERDOWN_OVERRIDE (1 << 18)
+#define SOR_PLL_2_SEQ_PLLCAPPD (1 << 17)
+
+#define SOR_PLL_3 0x1a
+#define SOR_PLL_3_PLL_VDD_MODE_V1_8 (0 << 13)
+#define SOR_PLL_3_PLL_VDD_MODE_V3_3 (1 << 13)
+
+#define SOR_CSTM 0x1b
+#define SOR_CSTM_LVDS (1 << 16)
+#define SOR_CSTM_LINK_ACT_B (1 << 15)
+#define SOR_CSTM_LINK_ACT_A (1 << 14)
+#define SOR_CSTM_UPPER (1 << 11)
+
+#define SOR_LVDS 0x1c
+#define SOR_CRC_A 0x1d
+#define SOR_CRC_B 0x1e
+#define SOR_BLANK 0x1f
+#define SOR_SEQ_CTL 0x20
+
+#define SOR_LANE_SEQ_CTL 0x21
+#define SOR_LANE_SEQ_CTL_TRIGGER (1 << 31)
+#define SOR_LANE_SEQ_CTL_SEQUENCE_UP (0 << 20)
+#define SOR_LANE_SEQ_CTL_SEQUENCE_DOWN (1 << 20)
+#define SOR_LANE_SEQ_CTL_POWER_STATE_UP (0 << 16)
+#define SOR_LANE_SEQ_CTL_POWER_STATE_DOWN (1 << 16)
+
+#define SOR_SEQ_INST(x) (0x22 + (x))
+
+#define SOR_PWM_DIV 0x32
+#define SOR_PWM_DIV_MASK 0xffffff
+
+#define SOR_PWM_CTL 0x33
+#define SOR_PWM_CTL_TRIGGER (1 << 31)
+#define SOR_PWM_CTL_CLK_SEL (1 << 30)
+#define SOR_PWM_CTL_DUTY_CYCLE_MASK 0xffffff
+
+#define SOR_VCRC_A_0 0x34
+#define SOR_VCRC_A_1 0x35
+#define SOR_VCRC_B_0 0x36
+#define SOR_VCRC_B_1 0x37
+#define SOR_CCRC_A_0 0x38
+#define SOR_CCRC_A_1 0x39
+#define SOR_CCRC_B_0 0x3a
+#define SOR_CCRC_B_1 0x3b
+#define SOR_EDATA_A_0 0x3c
+#define SOR_EDATA_A_1 0x3d
+#define SOR_EDATA_B_0 0x3e
+#define SOR_EDATA_B_1 0x3f
+#define SOR_COUNT_A_0 0x40
+#define SOR_COUNT_A_1 0x41
+#define SOR_COUNT_B_0 0x42
+#define SOR_COUNT_B_1 0x43
+#define SOR_DEBUG_A_0 0x44
+#define SOR_DEBUG_A_1 0x45
+#define SOR_DEBUG_B_0 0x46
+#define SOR_DEBUG_B_1 0x47
+#define SOR_TRIG 0x48
+#define SOR_MSCHECK 0x49
+#define SOR_XBAR_CTRL 0x4a
+#define SOR_XBAR_POL 0x4b
+
+#define SOR_DP_LINKCTL_0 0x4c
+#define SOR_DP_LINKCTL_LANE_COUNT_MASK (0x1f << 16)
+#define SOR_DP_LINKCTL_LANE_COUNT(x) (((1 << (x)) - 1) << 16)
+#define SOR_DP_LINKCTL_ENHANCED_FRAME (1 << 14)
+#define SOR_DP_LINKCTL_TU_SIZE_MASK (0x7f << 2)
+#define SOR_DP_LINKCTL_TU_SIZE(x) (((x) & 0x7f) << 2)
+#define SOR_DP_LINKCTL_ENABLE (1 << 0)
+
+#define SOR_DP_LINKCTL_1 0x4d
+
+#define SOR_LANE_DRIVE_CURRENT_0 0x4e
+#define SOR_LANE_DRIVE_CURRENT_1 0x4f
+#define SOR_LANE4_DRIVE_CURRENT_0 0x50
+#define SOR_LANE4_DRIVE_CURRENT_1 0x51
+#define SOR_LANE_DRIVE_CURRENT_LANE3(x) (((x) & 0xff) << 24)
+#define SOR_LANE_DRIVE_CURRENT_LANE2(x) (((x) & 0xff) << 16)
+#define SOR_LANE_DRIVE_CURRENT_LANE1(x) (((x) & 0xff) << 8)
+#define SOR_LANE_DRIVE_CURRENT_LANE0(x) (((x) & 0xff) << 0)
+
+#define SOR_LANE_PREEMPHASIS_0 0x52
+#define SOR_LANE_PREEMPHASIS_1 0x53
+#define SOR_LANE4_PREEMPHASIS_0 0x54
+#define SOR_LANE4_PREEMPHASIS_1 0x55
+#define SOR_LANE_PREEMPHASIS_LANE3(x) (((x) & 0xff) << 24)
+#define SOR_LANE_PREEMPHASIS_LANE2(x) (((x) & 0xff) << 16)
+#define SOR_LANE_PREEMPHASIS_LANE1(x) (((x) & 0xff) << 8)
+#define SOR_LANE_PREEMPHASIS_LANE0(x) (((x) & 0xff) << 0)
+
+#define SOR_LANE_POST_CURSOR_0 0x56
+#define SOR_LANE_POST_CURSOR_1 0x57
+#define SOR_LANE_POST_CURSOR_LANE3(x) (((x) & 0xff) << 24)
+#define SOR_LANE_POST_CURSOR_LANE2(x) (((x) & 0xff) << 16)
+#define SOR_LANE_POST_CURSOR_LANE1(x) (((x) & 0xff) << 8)
+#define SOR_LANE_POST_CURSOR_LANE0(x) (((x) & 0xff) << 0)
+
+#define SOR_DP_CONFIG_0 0x58
+#define SOR_DP_CONFIG_DISPARITY_NEGATIVE (1 << 31)
+#define SOR_DP_CONFIG_ACTIVE_SYM_ENABLE (1 << 26)
+#define SOR_DP_CONFIG_ACTIVE_SYM_POLARITY (1 << 24)
+#define SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK (0xf << 16)
+#define SOR_DP_CONFIG_ACTIVE_SYM_FRAC(x) (((x) & 0xf) << 16)
+#define SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK (0x7f << 8)
+#define SOR_DP_CONFIG_ACTIVE_SYM_COUNT(x) (((x) & 0x7f) << 8)
+#define SOR_DP_CONFIG_WATERMARK_MASK (0x3f << 0)
+#define SOR_DP_CONFIG_WATERMARK(x) (((x) & 0x3f) << 0)
+
+#define SOR_DP_CONFIG_1 0x59
+#define SOR_DP_MN_0 0x5a
+#define SOR_DP_MN_1 0x5b
+
+#define SOR_DP_PADCTL_0 0x5c
+#define SOR_DP_PADCTL_PAD_CAL_PD (1 << 23)
+#define SOR_DP_PADCTL_TX_PU_ENABLE (1 << 22)
+#define SOR_DP_PADCTL_TX_PU_MASK (0xff << 8)
+#define SOR_DP_PADCTL_TX_PU(x) (((x) & 0xff) << 8)
+#define SOR_DP_PADCTL_CM_TXD_3 (1 << 7)
+#define SOR_DP_PADCTL_CM_TXD_2 (1 << 6)
+#define SOR_DP_PADCTL_CM_TXD_1 (1 << 5)
+#define SOR_DP_PADCTL_CM_TXD_0 (1 << 4)
+#define SOR_DP_PADCTL_PD_TXD_3 (1 << 3)
+#define SOR_DP_PADCTL_PD_TXD_0 (1 << 2)
+#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
+#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
+
+#define SOR_DP_PADCTL_1 0x5d
+
+#define SOR_DP_DEBUG_0 0x5e
+#define SOR_DP_DEBUG_1 0x5f
+
+#define SOR_DP_SPARE_0 0x60
+#define SOR_DP_SPARE_MACRO_SOR_CLK (1 << 2)
+#define SOR_DP_SPARE_PANEL_INTERNAL (1 << 1)
+#define SOR_DP_SPARE_SEQ_ENABLE (1 << 0)
+
+#define SOR_DP_SPARE_1 0x61
+#define SOR_DP_AUDIO_CTRL 0x62
+
+#define SOR_DP_AUDIO_HBLANK_SYMBOLS 0x63
+#define SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK (0x01ffff << 0)
+
+#define SOR_DP_AUDIO_VBLANK_SYMBOLS 0x64
+#define SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK (0x1fffff << 0)
+
+#define SOR_DP_GENERIC_INFOFRAME_HEADER 0x65
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_0 0x66
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_1 0x67
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_2 0x68
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_3 0x69
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_4 0x6a
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_5 0x6b
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_6 0x6c
+
+#define SOR_DP_TPG 0x6d
+#define SOR_DP_TPG_CHANNEL_CODING (1 << 6)
+#define SOR_DP_TPG_SCRAMBLER_MASK (3 << 4)
+#define SOR_DP_TPG_SCRAMBLER_FIBONACCI (2 << 4)
+#define SOR_DP_TPG_SCRAMBLER_GALIOS (1 << 4)
+#define SOR_DP_TPG_SCRAMBLER_NONE (0 << 4)
+#define SOR_DP_TPG_PATTERN_MASK (0xf << 0)
+#define SOR_DP_TPG_PATTERN_HBR2 (0x8 << 0)
+#define SOR_DP_TPG_PATTERN_CSTM (0x7 << 0)
+#define SOR_DP_TPG_PATTERN_PRBS7 (0x6 << 0)
+#define SOR_DP_TPG_PATTERN_SBLERRRATE (0x5 << 0)
+#define SOR_DP_TPG_PATTERN_D102 (0x4 << 0)
+#define SOR_DP_TPG_PATTERN_TRAIN3 (0x3 << 0)
+#define SOR_DP_TPG_PATTERN_TRAIN2 (0x2 << 0)
+#define SOR_DP_TPG_PATTERN_TRAIN1 (0x1 << 0)
+#define SOR_DP_TPG_PATTERN_NONE (0x0 << 0)
+
+#define SOR_DP_TPG_CONFIG 0x6e
+#define SOR_DP_LQ_CSTM_0 0x6f
+#define SOR_DP_LQ_CSTM_1 0x70
+#define SOR_DP_LQ_CSTM_2 0x71
+
+#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index d36efc13b16f..d642d4a02134 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -74,7 +74,7 @@ static void set_scanout(struct drm_crtc *crtc, int n)
drm_flip_work_queue(&tilcdc_crtc->unref_work, tilcdc_crtc->scanout[n]);
drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
}
- tilcdc_crtc->scanout[n] = crtc->fb;
+ tilcdc_crtc->scanout[n] = crtc->primary->fb;
drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
tilcdc_crtc->dirty &= ~stat[n];
pm_runtime_put_sync(dev->dev);
@@ -84,7 +84,7 @@ static void update_scanout(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_framebuffer *fb = crtc->fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_gem_cma_object *gem;
unsigned int depth, bpp;
@@ -159,7 +159,7 @@ static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
return -EBUSY;
}
- crtc->fb = fb;
+ crtc->primary->fb = fb;
tilcdc_crtc->event = event;
update_scanout(crtc);
@@ -339,7 +339,7 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
if (priv->rev == 2) {
unsigned int depth, bpp;
- drm_fb_get_bpp_depth(crtc->fb->pixel_format, &depth, &bpp);
+ drm_fb_get_bpp_depth(crtc->primary->fb->pixel_format, &depth, &bpp);
switch (bpp) {
case 16:
break;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 214b7992a3aa..4ab9f7171c4f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -412,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int ret;
spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, 0);
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
@@ -443,7 +443,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo);
}
- ww_mutex_unlock(&bo->resv->lock);
+ __ttm_bo_unreserve(bo);
}
kref_get(&bo->list_kref);
@@ -494,7 +494,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
- ww_mutex_unlock(&bo->resv->lock);
+ __ttm_bo_unreserve(bo);
spin_unlock(&glob->lru_lock);
ret = driver->sync_obj_wait(sync_obj, false, interruptible);
@@ -514,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return ret;
spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, 0);
/*
* We raced, and lost, someone else holds the reservation now,
@@ -532,7 +532,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
spin_unlock(&bdev->fence_lock);
if (ret || unlikely(list_empty(&bo->ddestroy))) {
- ww_mutex_unlock(&bo->resv->lock);
+ __ttm_bo_unreserve(bo);
spin_unlock(&glob->lru_lock);
return ret;
}
@@ -577,11 +577,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
+ ret = __ttm_bo_reserve(entry, false, true, false, 0);
if (remove_all && ret) {
spin_unlock(&glob->lru_lock);
- ret = ttm_bo_reserve_nolru(entry, false, false,
- false, 0);
+ ret = __ttm_bo_reserve(entry, false, false,
+ false, 0);
spin_lock(&glob->lru_lock);
}
@@ -726,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
- ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, 0);
if (!ret)
break;
}
@@ -1451,6 +1451,7 @@ EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
+ struct address_space *mapping,
uint64_t file_page_offset,
bool need_dma32)
{
@@ -1472,7 +1473,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
0x10000000);
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
- bdev->dev_mapping = NULL;
+ bdev->dev_mapping = mapping;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
bdev->val_seq = 0;
@@ -1629,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) {
- ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, 0);
if (!ret)
break;
}
@@ -1696,7 +1697,7 @@ out:
* already swapped buffer.
*/
- ww_mutex_unlock(&bo->resv->lock);
+ __ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -1730,10 +1731,10 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
return -ERESTARTSYS;
if (!ww_mutex_is_locked(&bo->resv->lock))
goto out_unlock;
- ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
+ ret = __ttm_bo_reserve(bo, true, false, false, NULL);
if (unlikely(ret != 0))
goto out_unlock;
- ww_mutex_unlock(&bo->resv->lock);
+ __ttm_bo_unreserve(bo);
out_unlock:
mutex_unlock(&bo->wu_mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index c58eba33bd5f..bd850c9f4bca 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -55,6 +55,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node = NULL;
+ enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
unsigned long lpfn;
int ret;
@@ -66,11 +67,15 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
if (!node)
return -ENOMEM;
+ if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN)
+ aflags = DRM_MM_CREATE_TOP;
+
spin_lock(&rman->lock);
- ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
- mem->page_alignment,
+ ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
+ mem->page_alignment, 0,
placement->fpfn, lpfn,
- DRM_MM_SEARCH_BEST);
+ DRM_MM_SEARCH_BEST,
+ aflags);
spin_unlock(&rman->lock);
if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 479e9418e3d7..e8dac8758528 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -46,7 +46,7 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list)
ttm_bo_add_to_lru(bo);
entry->removed = false;
}
- ww_mutex_unlock(&bo->resv->lock);
+ __ttm_bo_unreserve(bo);
}
}
@@ -140,8 +140,8 @@ retry:
if (entry->reserved)
continue;
- ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
- ticket);
+ ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
+ ticket);
if (ret == -EDEADLK) {
/* uh oh, we lost out, drop every reservation and try
@@ -224,7 +224,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
ttm_bo_add_to_lru(bo);
- ww_mutex_unlock(&bo->resv->lock);
+ __ttm_bo_unreserve(bo);
entry->reserved = false;
}
spin_unlock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 53b51c4e671a..d2a053352789 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -270,6 +270,52 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
}
EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
+/**
+ * ttm_ref_object_exists - Check whether a caller has a valid ref object
+ * (has opened) a base object.
+ *
+ * @tfile: Pointer to a struct ttm_object_file identifying the caller.
+ * @base: Pointer to a struct base object.
+ *
+ * Checks wether the caller identified by @tfile has put a valid USAGE
+ * reference object on the base object identified by @base.
+ */
+bool ttm_ref_object_exists(struct ttm_object_file *tfile,
+ struct ttm_base_object *base)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+ struct drm_hash_item *hash;
+ struct ttm_ref_object *ref;
+
+ rcu_read_lock();
+ if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
+ goto out_false;
+
+ /*
+ * Verify that the ref object is really pointing to our base object.
+ * Our base object could actually be dead, and the ref object pointing
+ * to another base object with the same handle.
+ */
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ if (unlikely(base != ref->obj))
+ goto out_false;
+
+ /*
+ * Verify that the ref->obj pointer was actually valid!
+ */
+ rmb();
+ if (unlikely(atomic_read(&ref->kref.refcount) == 0))
+ goto out_false;
+
+ rcu_read_unlock();
+ return true;
+
+ out_false:
+ rcu_read_unlock();
+ return false;
+}
+EXPORT_SYMBOL(ttm_ref_object_exists);
+
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
enum ttm_ref_type ref_type, bool *existed)
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dbadd49e4c4a..377176372da8 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -421,7 +421,7 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
clips[i].x2 - clips[i].x1,
clips[i].y2 - clips[i].y1);
if (ret)
- goto unlock;
+ break;
}
if (ufb->obj->base.import_attach) {
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 0394811251bd..c041cd73f399 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -60,7 +60,7 @@ int udl_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- args->pitch = args->width * ((args->bpp + 1) / 8);
+ args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
args->size = args->pitch * args->height;
return udl_gem_create(file, dev,
args->size, &args->handle);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 2ae1eb7d1635..cddc4fcf35cf 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -310,7 +310,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
- struct udl_framebuffer *ufb = to_udl_fb(crtc->fb);
+ struct udl_framebuffer *ufb = to_udl_fb(crtc->primary->fb);
struct udl_device *udl = dev->dev_private;
char *buf;
char *wrptr;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 1e80152674b5..8bb26dcd9eae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -117,10 +117,10 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
(void) vmw_context_binding_state_kill
(&container_of(res, struct vmw_user_context, res)->cbs);
(void) vmw_gb_context_destroy(res);
+ mutex_unlock(&dev_priv->binding_mutex);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
- mutex_unlock(&dev_priv->binding_mutex);
mutex_unlock(&dev_priv->cmdbuf_mutex);
return;
}
@@ -462,7 +462,6 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
@@ -474,7 +473,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(vmw_user_context_size == 0))
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@@ -521,7 +520,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
out_err:
vmw_resource_unreference(&res);
out_unlock:
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index a75840211b3c..70ddce8358b0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -52,11 +52,10 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
struct ttm_placement *placement,
bool interruptible)
{
- struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
int ret;
- ret = ttm_write_lock(&vmaster->lock, interruptible);
+ ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
@@ -71,7 +70,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
ttm_bo_unreserve(bo);
err:
- ttm_write_unlock(&vmaster->lock);
+ ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -95,12 +94,11 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
- struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement *placement;
int ret;
- ret = ttm_write_lock(&vmaster->lock, interruptible);
+ ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
@@ -143,7 +141,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
err_unreserve:
ttm_bo_unreserve(bo);
err:
- ttm_write_unlock(&vmaster->lock);
+ ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -198,7 +196,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
- struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
int ret = 0;
@@ -209,7 +206,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
placement = vmw_vram_placement;
placement.lpfn = bo->num_pages;
- ret = ttm_write_lock(&vmaster->lock, interruptible);
+ ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
@@ -232,7 +229,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
ttm_bo_unreserve(bo);
err_unlock:
- ttm_write_unlock(&vmaster->lock);
+ ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0083cbf99edf..4a223bbea3b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -142,11 +142,11 @@
static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
@@ -159,29 +159,28 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
vmw_fence_obj_signaled_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_FENCE_EVENT,
- vmw_fence_event_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
/* these allow direct access to the framebuffers mark as master only */
VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
@@ -194,19 +193,19 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_MASTER | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
vmw_shader_define_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SHADER,
vmw_shader_destroy_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
vmw_gb_surface_define_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
vmw_gb_surface_reference_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
};
static struct pci_device_id vmw_pci_id_list[] = {
@@ -606,6 +605,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
+ ttm_lock_init(&dev_priv->reservation_sem);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
@@ -722,7 +722,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ret = ttm_bo_device_init(&dev_priv->bdev,
dev_priv->bo_global_ref.ref.object,
- &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
+ &vmw_bo_driver,
+ dev->anon_inode->i_mapping,
+ VMWGFX_FILE_PAGE_OFFSET,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
@@ -969,7 +971,6 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
goto out_no_shman;
file_priv->driver_priv = vmw_fp;
- dev_priv->bdev.dev_mapping = dev->dev_mapping;
return 0;
@@ -980,12 +981,70 @@ out_no_tfile:
return ret;
}
-static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+static struct vmw_master *vmw_master_check(struct drm_device *dev,
+ struct drm_file *file_priv,
+ unsigned int flags)
+{
+ int ret;
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct vmw_master *vmaster;
+
+ if (file_priv->minor->type != DRM_MINOR_LEGACY ||
+ !(flags & DRM_AUTH))
+ return NULL;
+
+ ret = mutex_lock_interruptible(&dev->master_mutex);
+ if (unlikely(ret != 0))
+ return ERR_PTR(-ERESTARTSYS);
+
+ if (file_priv->is_master) {
+ mutex_unlock(&dev->master_mutex);
+ return NULL;
+ }
+
+ /*
+ * Check if we were previously master, but now dropped.
+ */
+ if (vmw_fp->locked_master) {
+ mutex_unlock(&dev->master_mutex);
+ DRM_ERROR("Dropped master trying to access ioctl that "
+ "requires authentication.\n");
+ return ERR_PTR(-EACCES);
+ }
+ mutex_unlock(&dev->master_mutex);
+
+ /*
+ * Taking the drm_global_mutex after the TTM lock might deadlock
+ */
+ if (!(flags & DRM_UNLOCKED)) {
+ DRM_ERROR("Refusing locked ioctl access.\n");
+ return ERR_PTR(-EDEADLK);
+ }
+
+ /*
+ * Take the TTM lock. Possibly sleep waiting for the authenticating
+ * master to become master again, or for a SIGTERM if the
+ * authenticating master exits.
+ */
+ vmaster = vmw_master(file_priv->master);
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ vmaster = ERR_PTR(ret);
+
+ return vmaster;
+}
+
+static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg,
+ long (*ioctl_func)(struct file *, unsigned int,
+ unsigned long))
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
unsigned int nr = DRM_IOCTL_NR(cmd);
+ struct vmw_master *vmaster;
+ unsigned int flags;
+ long ret;
/*
* Do extra checking on driver private ioctls.
@@ -994,18 +1053,44 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
const struct drm_ioctl_desc *ioctl =
- &vmw_ioctls[nr - DRM_COMMAND_BASE];
+ &vmw_ioctls[nr - DRM_COMMAND_BASE];
if (unlikely(ioctl->cmd_drv != cmd)) {
DRM_ERROR("Invalid command format, ioctl %d\n",
nr - DRM_COMMAND_BASE);
return -EINVAL;
}
+ flags = ioctl->flags;
+ } else if (!drm_ioctl_flags(nr, &flags))
+ return -EINVAL;
+
+ vmaster = vmw_master_check(dev, file_priv, flags);
+ if (unlikely(IS_ERR(vmaster))) {
+ DRM_INFO("IOCTL ERROR %d\n", nr);
+ return PTR_ERR(vmaster);
}
- return drm_ioctl(filp, cmd, arg);
+ ret = ioctl_func(filp, cmd, arg);
+ if (vmaster)
+ ttm_read_unlock(&vmaster->lock);
+
+ return ret;
+}
+
+static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
}
+#ifdef CONFIG_COMPAT
+static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
+}
+#endif
+
static void vmw_lastclose(struct drm_device *dev)
{
struct drm_crtc *crtc;
@@ -1174,12 +1259,11 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
{
struct vmw_private *dev_priv =
container_of(nb, struct vmw_private, pm_nb);
- struct vmw_master *vmaster = dev_priv->active_master;
switch (val) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
- ttm_suspend_lock(&vmaster->lock);
+ ttm_suspend_lock(&dev_priv->reservation_sem);
/**
* This empties VRAM and unbinds all GMR bindings.
@@ -1193,7 +1277,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
case PM_POST_RESTORE:
- ttm_suspend_unlock(&vmaster->lock);
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
break;
case PM_RESTORE_PREPARE:
@@ -1314,14 +1398,14 @@ static const struct file_operations vmwgfx_driver_fops = {
.poll = vmw_fops_poll,
.read = vmw_fops_read,
#if defined(CONFIG_COMPAT)
- .compat_ioctl = drm_compat_ioctl,
+ .compat_ioctl = vmw_compat_ioctl,
#endif
.llseek = noop_llseek,
};
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
- DRIVER_MODESET | DRIVER_PRIME,
+ DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
.load = vmw_driver_load,
.unload = vmw_driver_unload,
.lastclose = vmw_lastclose,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 07831554dad7..6b252a887ae2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20140228"
+#define VMWGFX_DRIVER_DATE "20140325"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 5
+#define VMWGFX_DRIVER_MINOR 6
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -487,6 +487,11 @@ struct vmw_private {
uint32_t num_3d_resources;
/*
+ * Replace this with an rwsem as soon as we have down_xx_interruptible()
+ */
+ struct ttm_lock reservation_sem;
+
+ /*
* Query processing. These members
* are protected by the cmdbuf mutex.
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index efb575a7996c..931490b9cfed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2712,7 +2712,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
/*
@@ -2729,7 +2728,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@@ -2745,6 +2744,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
vmw_kms_cursor_post_execbuf(dev_priv);
out_unlock:
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index ed5ce2a41bbf..a89ad938eacf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -147,7 +147,7 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
}
if (!vmw_kms_validate_mode_vram(vmw_priv,
- info->fix.line_length,
+ var->xres * var->bits_per_pixel/8,
var->yoffset + var->yres)) {
DRM_ERROR("Requested geom can not fit in framebuffer\n");
return -EINVAL;
@@ -162,6 +162,8 @@ static int vmw_fb_set_par(struct fb_info *info)
struct vmw_private *vmw_priv = par->vmw_priv;
int ret;
+ info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
+
ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
info->fix.line_length,
par->bpp, par->depth);
@@ -177,6 +179,7 @@ static int vmw_fb_set_par(struct fb_info *info)
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
+ vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
}
@@ -377,14 +380,13 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- /* interuptable? */
- ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
- if (unlikely(ret != 0))
- return ret;
+ (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
- if (!vmw_bo)
+ if (!vmw_bo) {
+ ret = -ENOMEM;
goto err_unlock;
+ }
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
&ne_placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 47b70949bf3a..37881ecf5d7a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -226,7 +226,6 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_present_arg *arg =
(struct drm_vmw_present_arg *)data;
struct vmw_surface *surface;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_vmw_rect __user *clips_ptr;
struct drm_vmw_rect *clips = NULL;
struct drm_framebuffer *fb;
@@ -271,7 +270,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
}
vfb = vmw_framebuffer_to_vfb(fb);
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
goto out_no_ttm_lock;
@@ -291,7 +290,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
vmw_surface_unreference(&surface);
out_no_surface:
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
out_no_ttm_lock:
drm_framebuffer_unreference(fb);
out_no_fb:
@@ -311,7 +310,6 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)
(unsigned long)arg->fence_rep;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_vmw_rect __user *clips_ptr;
struct drm_vmw_rect *clips = NULL;
struct drm_framebuffer *fb;
@@ -361,7 +359,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_no_ttm_lock;
}
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
goto out_no_ttm_lock;
@@ -369,7 +367,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
vfb, user_fence_rep,
clips, num_clips);
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
out_no_ttm_lock:
drm_framebuffer_unreference(fb);
out_no_fb:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8a650413dea5..a2dde5ad8138 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -468,7 +468,7 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
- if (crtc->fb != &framebuffer->base)
+ if (crtc->primary->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
@@ -596,7 +596,6 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct drm_clip_rect norect;
@@ -611,7 +610,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
drm_modeset_lock_all(dev_priv->dev);
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) {
drm_modeset_unlock_all(dev_priv->dev);
return ret;
@@ -632,7 +631,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
flags, color,
clips, num_clips, inc, NULL);
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev);
@@ -883,7 +882,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->fb != &framebuffer->base)
+ if (crtc->primary->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
@@ -954,7 +953,6 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
struct drm_clip_rect norect;
@@ -962,7 +960,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
drm_modeset_lock_all(dev_priv->dev);
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) {
drm_modeset_unlock_all(dev_priv->dev);
return ret;
@@ -989,7 +987,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
clips, num_clips, increment, NULL);
}
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev);
@@ -1245,7 +1243,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->fb != &vfb->base)
+ if (crtc->primary->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
@@ -1382,7 +1380,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->fb != &vfb->base)
+ if (crtc->primary->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
@@ -1725,7 +1723,7 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
uint32_t page_flip_flags)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct drm_framebuffer *old_fb = crtc->fb;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
struct drm_file *file_priv ;
struct vmw_fence_obj *fence = NULL;
@@ -1743,7 +1741,7 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
return -EINVAL;
- crtc->fb = fb;
+ crtc->primary->fb = fb;
/* do a full screen dirty update */
clips.x1 = clips.y1 = 0;
@@ -1783,7 +1781,7 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
return ret;
out_no_fence:
- crtc->fb = old_fb;
+ crtc->primary->fb = old_fb;
return ret;
}
@@ -2022,7 +2020,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
void __user *user_rects;
struct drm_vmw_rect *rects;
unsigned rects_size;
@@ -2030,7 +2027,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
int i;
struct drm_mode_config *mode_config = &dev->mode_config;
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@@ -2072,6 +2069,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
out_free:
kfree(rects);
out_unlock:
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a055a26819c2..b2b9bd23aeee 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -93,7 +93,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
if (crtc == NULL)
return 0;
- fb = entry->base.crtc.fb;
+ fb = entry->base.crtc.primary->fb;
return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
fb->bits_per_pixel, fb->depth);
@@ -101,7 +101,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
if (!list_empty(&lds->active)) {
entry = list_entry(lds->active.next, typeof(*entry), active);
- fb = entry->base.crtc.fb;
+ fb = entry->base.crtc.primary->fb;
vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
fb->bits_per_pixel, fb->depth);
@@ -259,7 +259,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
connector->encoder = NULL;
encoder->crtc = NULL;
- crtc->fb = NULL;
+ crtc->primary->fb = NULL;
crtc->enabled = false;
vmw_ldu_del_active(dev_priv, ldu);
@@ -280,7 +280,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
vmw_fb_off(dev_priv);
- crtc->fb = fb;
+ crtc->primary->fb = fb;
encoder->crtc = crtc;
connector->encoder = encoder;
crtc->x = set->x;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 9757b57f8388..01d68f0a69dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -538,8 +538,13 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
return -EPERM;
vmw_user_bo = vmw_user_dma_buffer(bo);
- return (vmw_user_bo->prime.base.tfile == tfile ||
- vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
+
+ /* Check that the caller has opened the object. */
+ if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
+ return 0;
+
+ DRM_ERROR("Could not grant buffer access.\n");
+ return -EPERM;
}
/**
@@ -676,10 +681,9 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
struct vmw_dma_buffer *dma_buf;
uint32_t handle;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@@ -696,7 +700,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf:
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -873,7 +877,6 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *tmp;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
/*
@@ -884,7 +887,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
if (unlikely(vmw_user_stream_size == 0))
vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@@ -932,7 +935,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
out_err:
vmw_resource_unreference(&res);
out_unlock:
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -985,14 +988,13 @@ int vmw_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_dma_buffer *dma_buf;
int ret;
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@@ -1004,7 +1006,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf:
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 22406c8651ea..a95d3a0cabe4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -307,7 +307,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
connector->encoder = NULL;
encoder->crtc = NULL;
- crtc->fb = NULL;
+ crtc->primary->fb = NULL;
crtc->x = 0;
crtc->y = 0;
crtc->enabled = false;
@@ -368,7 +368,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
connector->encoder = NULL;
encoder->crtc = NULL;
- crtc->fb = NULL;
+ crtc->primary->fb = NULL;
crtc->x = 0;
crtc->y = 0;
crtc->enabled = false;
@@ -381,7 +381,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
connector->encoder = encoder;
encoder->crtc = crtc;
crtc->mode = *mode;
- crtc->fb = fb;
+ crtc->primary->fb = fb;
crtc->x = set->x;
crtc->y = set->y;
crtc->enabled = true;
@@ -572,5 +572,5 @@ void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
BUG_ON(!sou->base.is_implicit);
dev_priv->sou_priv->implicit_fb =
- vmw_framebuffer_to_vfb(sou->base.crtc.fb);
+ vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index ee3856578a12..c1559eeaffe9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -449,7 +449,6 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_dma_buffer *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
@@ -487,14 +486,14 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
goto out_bad_arg;
}
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
goto out_bad_arg;
ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
shader_type, tfile, &arg->shader_handle);
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
vmw_dmabuf_unreference(&buffer);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index e7af580ab977..4ecdbf3e59da 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -36,11 +36,13 @@
* @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
* @size: TTM accounting size for the surface.
+ * @master: master of the creating client. Used for security check.
*/
struct vmw_user_surface {
struct ttm_prime_object prime;
struct vmw_surface srf;
uint32_t size;
+ struct drm_master *master;
};
/**
@@ -624,6 +626,8 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_private *dev_priv = srf->res.dev_priv;
uint32_t size = user_srf->size;
+ if (user_srf->master)
+ drm_master_put(&user_srf->master);
kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
@@ -697,7 +701,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct vmw_surface_offset *cur_offset;
uint32_t num_sizes;
uint32_t size;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
const struct svga3d_surface_desc *desc;
if (unlikely(vmw_user_surface_size == 0))
@@ -723,7 +726,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@@ -820,6 +823,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL;
+ if (drm_is_primary_client(file_priv))
+ user_srf->master = drm_master_get(file_priv->master);
/**
* From this point, the generic resource management functions
@@ -862,7 +867,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
rep->sid = user_srf->prime.base.hash.key;
vmw_resource_unreference(&res);
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
return 0;
out_no_copy:
kfree(srf->offsets);
@@ -873,7 +878,81 @@ out_no_sizes:
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+
+static int
+vmw_surface_handle_reference(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ uint32_t u_handle,
+ enum drm_vmw_handle_type handle_type,
+ struct ttm_base_object **base_p)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_user_surface *user_srf;
+ uint32_t handle;
+ struct ttm_base_object *base;
+ int ret;
+
+ if (handle_type == DRM_VMW_HANDLE_PRIME) {
+ ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
+ if (unlikely(ret != 0))
+ return ret;
+ } else {
+ if (unlikely(drm_is_render_client(file_priv))) {
+ DRM_ERROR("Render client refused legacy "
+ "surface reference.\n");
+ return -EACCES;
+ }
+ handle = u_handle;
+ }
+
+ ret = -EINVAL;
+ base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Could not find surface to reference.\n");
+ goto out_no_lookup;
+ }
+
+ if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
+ DRM_ERROR("Referenced object is not a surface.\n");
+ goto out_bad_resource;
+ }
+
+ if (handle_type != DRM_VMW_HANDLE_PRIME) {
+ user_srf = container_of(base, struct vmw_user_surface,
+ prime.base);
+
+ /*
+ * Make sure the surface creator has the same
+ * authenticating master.
+ */
+ if (drm_is_primary_client(file_priv) &&
+ user_srf->master != file_priv->master) {
+ DRM_ERROR("Trying to reference surface outside of"
+ " master domain.\n");
+ ret = -EACCES;
+ goto out_bad_resource;
+ }
+
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a surface.\n");
+ goto out_bad_resource;
+ }
+ }
+
+ *base_p = base;
+ return 0;
+
+out_bad_resource:
+ ttm_base_object_unref(&base);
+out_no_lookup:
+ if (handle_type == DRM_VMW_HANDLE_PRIME)
+ (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+
return ret;
}
@@ -898,27 +977,16 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct vmw_user_surface *user_srf;
struct drm_vmw_size __user *user_sizes;
struct ttm_base_object *base;
- int ret = -EINVAL;
-
- base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
- if (unlikely(base == NULL)) {
- DRM_ERROR("Could not find surface to reference.\n");
- return -EINVAL;
- }
+ int ret;
- if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
- goto out_bad_resource;
+ ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
+ req->handle_type, &base);
+ if (unlikely(ret != 0))
+ return ret;
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
- TTM_REF_USAGE, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a surface.\n");
- goto out_no_reference;
- }
-
rep->flags = srf->flags;
rep->format = srf->format;
memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
@@ -931,10 +999,10 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
+ ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
ret = -EFAULT;
}
-out_bad_resource:
-out_no_reference:
+
ttm_base_object_unref(&base);
return ret;
@@ -1173,7 +1241,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
uint32_t size;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
const struct svga3d_surface_desc *desc;
uint32_t backup_handle;
@@ -1189,7 +1256,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = ttm_read_lock(&vmaster->lock, true);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@@ -1228,6 +1295,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL;
+ if (drm_is_primary_client(file_priv))
+ user_srf->master = drm_master_get(file_priv->master);
/**
* From this point, the generic resource management functions
@@ -1283,12 +1352,12 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
vmw_resource_unreference(&res);
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
return 0;
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
- ttm_read_unlock(&vmaster->lock);
+ ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -1315,14 +1384,10 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
uint32_t backup_handle;
int ret = -EINVAL;
- base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
- if (unlikely(base == NULL)) {
- DRM_ERROR("Could not find surface to reference.\n");
- return -EINVAL;
- }
-
- if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
- goto out_bad_resource;
+ ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
+ req->handle_type, &base);
+ if (unlikely(ret != 0))
+ return ret;
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
@@ -1331,13 +1396,6 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
goto out_bad_resource;
}
- ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
- TTM_REF_USAGE, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a GB surface.\n");
- goto out_bad_resource;
- }
-
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
&backup_handle);
@@ -1346,8 +1404,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a GB surface "
"backup buffer.\n");
- (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- req->sid,
+ (void) ttm_ref_object_base_unref(tfile, base->hash.key,
TTM_REF_USAGE);
goto out_bad_resource;
}
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index bfb09d802abd..b10550ee1d89 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -102,6 +102,7 @@ u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
{
return (u32)atomic_add_return(incrs, &sp->max_val);
}
+EXPORT_SYMBOL(host1x_syncpt_incr_max);
/*
* Write cached syncpoint and waitbase values to hardware.
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index f7220011a00b..7af9d0b5dea1 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -175,6 +175,15 @@ config HID_PRODIKEYS
multimedia keyboard, but will lack support for the musical keyboard
and some additional multimedia keys.
+config HID_CP2112
+ tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
+ depends on USB_HID && I2C && GPIOLIB
+ ---help---
+ Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge.
+ This is a HID device driver which registers as an i2c adapter
+ and gpiochip to expose these functions of the CP2112. The
+ customizable USB descriptor fields are exposed as sysfs attributes.
+
config HID_CYPRESS
tristate "Cypress mouse and barcode readers" if EXPERT
depends on HID
@@ -608,25 +617,27 @@ config HID_SAMSUNG
Support for Samsung InfraRed remote control or keyboards.
config HID_SONY
- tristate "Sony PS2/3 accessories"
+ tristate "Sony PS2/3/4 accessories"
depends on USB_HID
depends on NEW_LEDS
depends on LEDS_CLASS
+ select POWER_SUPPLY
---help---
Support for
* Sony PS3 6-axis controllers
+ * Sony PS4 DualShock 4 controllers
* Buzz controllers
* Sony PS3 Blue-ray Disk Remote Control (Bluetooth)
* Logitech Harmony adapter for Sony Playstation 3 (Bluetooth)
config SONY_FF
- bool "Sony PS2/3 accessories force feedback support"
+ bool "Sony PS2/3/4 accessories force feedback support"
depends on HID_SONY
select INPUT_FF_MEMLESS
---help---
- Say Y here if you have a Sony PS2/3 accessory and want to enable force
- feedback support for it.
+ Say Y here if you have a Sony PS2/3/4 accessory and want to enable
+ force feedback support for it.
config HID_SPEEDLINK
tristate "Speedlink VAD Cezanne mouse support"
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 30e44318f87f..fc712dde02a4 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_HID_AUREAL) += hid-aureal.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
+obj-$(CONFIG_HID_CP2112) += hid-cp2112.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
obj-$(CONFIG_HID_DRAGONRISE) += hid-dr.o
obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index cc32a6f96c64..9e8064205bc7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1248,6 +1248,11 @@ void hid_output_report(struct hid_report *report, __u8 *data)
}
EXPORT_SYMBOL_GPL(hid_output_report);
+static int hid_report_len(struct hid_report *report)
+{
+ return ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7;
+}
+
/*
* Allocator for buffer that is going to be passed to hid_output_report()
*/
@@ -1258,7 +1263,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
* of implement() working on 8 byte chunks
*/
- int len = ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7;
+ int len = hid_report_len(report);
return kmalloc(len, flags);
}
@@ -1314,6 +1319,41 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
return report;
}
+/*
+ * Implement a generic .request() callback, using .raw_request()
+ * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
+ */
+void __hid_request(struct hid_device *hid, struct hid_report *report,
+ int reqtype)
+{
+ char *buf;
+ int ret;
+ int len;
+
+ buf = hid_alloc_report_buf(report, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ len = hid_report_len(report);
+
+ if (reqtype == HID_REQ_SET_REPORT)
+ hid_output_report(report, buf);
+
+ ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
+ report->type, reqtype);
+ if (ret < 0) {
+ dbg_hid("unable to complete request: %d\n", ret);
+ goto out;
+ }
+
+ if (reqtype == HID_REQ_GET_REPORT)
+ hid_input_report(hid, report->type, buf, ret, 0);
+
+out:
+ kfree(buf);
+}
+EXPORT_SYMBOL_GPL(__hid_request);
+
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
int interrupt)
{
@@ -1693,6 +1733,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
@@ -1782,6 +1823,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
@@ -2278,6 +2320,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
{ }
};
@@ -2433,6 +2476,14 @@ int hid_add_device(struct hid_device *hdev)
return -ENODEV;
/*
+ * Check for the mandatory transport channel.
+ */
+ if (!hdev->ll_driver->raw_request) {
+ hid_err(hdev, "transport driver missing .raw_request()\n");
+ return -EINVAL;
+ }
+
+ /*
* Read the device report descriptor once and use as template
* for the driver-specific modifications.
*/
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
new file mode 100644
index 000000000000..56be85a9a77c
--- /dev/null
+++ b/drivers/hid/hid-cp2112.c
@@ -0,0 +1,1073 @@
+/*
+ * hid-cp2112.c - Silicon Labs HID USB to SMBus master bridge
+ * Copyright (c) 2013,2014 Uplogix, Inc.
+ * David Barksdale <dbarksdale@uplogix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ * The Silicon Labs CP2112 chip is a USB HID device which provides an
+ * SMBus controller for talking to slave devices and 8 GPIO pins. The
+ * host communicates with the CP2112 via raw HID reports.
+ *
+ * Data Sheet:
+ * http://www.silabs.com/Support%20Documents/TechnicalDocs/CP2112.pdf
+ * Programming Interface Specification:
+ * http://www.silabs.com/Support%20Documents/TechnicalDocs/AN495.pdf
+ */
+
+#include <linux/gpio.h>
+#include <linux/hid.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/nls.h>
+#include <linux/usb/ch9.h>
+#include "hid-ids.h"
+
+enum {
+ CP2112_GPIO_CONFIG = 0x02,
+ CP2112_GPIO_GET = 0x03,
+ CP2112_GPIO_SET = 0x04,
+ CP2112_GET_VERSION_INFO = 0x05,
+ CP2112_SMBUS_CONFIG = 0x06,
+ CP2112_DATA_READ_REQUEST = 0x10,
+ CP2112_DATA_WRITE_READ_REQUEST = 0x11,
+ CP2112_DATA_READ_FORCE_SEND = 0x12,
+ CP2112_DATA_READ_RESPONSE = 0x13,
+ CP2112_DATA_WRITE_REQUEST = 0x14,
+ CP2112_TRANSFER_STATUS_REQUEST = 0x15,
+ CP2112_TRANSFER_STATUS_RESPONSE = 0x16,
+ CP2112_CANCEL_TRANSFER = 0x17,
+ CP2112_LOCK_BYTE = 0x20,
+ CP2112_USB_CONFIG = 0x21,
+ CP2112_MANUFACTURER_STRING = 0x22,
+ CP2112_PRODUCT_STRING = 0x23,
+ CP2112_SERIAL_STRING = 0x24,
+};
+
+enum {
+ STATUS0_IDLE = 0x00,
+ STATUS0_BUSY = 0x01,
+ STATUS0_COMPLETE = 0x02,
+ STATUS0_ERROR = 0x03,
+};
+
+enum {
+ STATUS1_TIMEOUT_NACK = 0x00,
+ STATUS1_TIMEOUT_BUS = 0x01,
+ STATUS1_ARBITRATION_LOST = 0x02,
+ STATUS1_READ_INCOMPLETE = 0x03,
+ STATUS1_WRITE_INCOMPLETE = 0x04,
+ STATUS1_SUCCESS = 0x05,
+};
+
+struct cp2112_smbus_config_report {
+ u8 report; /* CP2112_SMBUS_CONFIG */
+ __be32 clock_speed; /* Hz */
+ u8 device_address; /* Stored in the upper 7 bits */
+ u8 auto_send_read; /* 1 = enabled, 0 = disabled */
+ __be16 write_timeout; /* ms, 0 = no timeout */
+ __be16 read_timeout; /* ms, 0 = no timeout */
+ u8 scl_low_timeout; /* 1 = enabled, 0 = disabled */
+ __be16 retry_time; /* # of retries, 0 = no limit */
+} __packed;
+
+struct cp2112_usb_config_report {
+ u8 report; /* CP2112_USB_CONFIG */
+ __le16 vid; /* Vendor ID */
+ __le16 pid; /* Product ID */
+ u8 max_power; /* Power requested in 2mA units */
+ u8 power_mode; /* 0x00 = bus powered
+ 0x01 = self powered & regulator off
+ 0x02 = self powered & regulator on */
+ u8 release_major;
+ u8 release_minor;
+ u8 mask; /* What fields to program */
+} __packed;
+
+struct cp2112_read_req_report {
+ u8 report; /* CP2112_DATA_READ_REQUEST */
+ u8 slave_address;
+ __be16 length;
+} __packed;
+
+struct cp2112_write_read_req_report {
+ u8 report; /* CP2112_DATA_WRITE_READ_REQUEST */
+ u8 slave_address;
+ __be16 length;
+ u8 target_address_length;
+ u8 target_address[16];
+} __packed;
+
+struct cp2112_write_req_report {
+ u8 report; /* CP2112_DATA_WRITE_REQUEST */
+ u8 slave_address;
+ u8 length;
+ u8 data[61];
+} __packed;
+
+struct cp2112_force_read_report {
+ u8 report; /* CP2112_DATA_READ_FORCE_SEND */
+ __be16 length;
+} __packed;
+
+struct cp2112_xfer_status_report {
+ u8 report; /* CP2112_TRANSFER_STATUS_RESPONSE */
+ u8 status0; /* STATUS0_* */
+ u8 status1; /* STATUS1_* */
+ __be16 retries;
+ __be16 length;
+} __packed;
+
+struct cp2112_string_report {
+ u8 dummy; /* force .string to be aligned */
+ u8 report; /* CP2112_*_STRING */
+ u8 length; /* length in bytes of everyting after .report */
+ u8 type; /* USB_DT_STRING */
+ wchar_t string[30]; /* UTF16_LITTLE_ENDIAN string */
+} __packed;
+
+/* Number of times to request transfer status before giving up waiting for a
+ transfer to complete. This may need to be changed if SMBUS clock, retries,
+ or read/write/scl_low timeout settings are changed. */
+static const int XFER_STATUS_RETRIES = 10;
+
+/* Time in ms to wait for a CP2112_DATA_READ_RESPONSE or
+ CP2112_TRANSFER_STATUS_RESPONSE. */
+static const int RESPONSE_TIMEOUT = 50;
+
+static const struct hid_device_id cp2112_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, cp2112_devices);
+
+struct cp2112_device {
+ struct i2c_adapter adap;
+ struct hid_device *hdev;
+ wait_queue_head_t wait;
+ u8 read_data[61];
+ u8 read_length;
+ int xfer_status;
+ atomic_t read_avail;
+ atomic_t xfer_avail;
+ struct gpio_chip gc;
+};
+
+static int gpio_push_pull = 0xFF;
+module_param(gpio_push_pull, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(gpio_push_pull, "GPIO push-pull configuration bitmask");
+
+static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct cp2112_device *dev = container_of(chip, struct cp2112_device,
+ gc);
+ struct hid_device *hdev = dev->hdev;
+ u8 buf[5];
+ int ret;
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ sizeof(buf), HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != sizeof(buf)) {
+ hid_err(hdev, "error requesting GPIO config: %d\n", ret);
+ return ret;
+ }
+
+ buf[1] &= ~(1 << offset);
+ buf[2] = gpio_push_pull;
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret < 0) {
+ hid_err(hdev, "error setting GPIO config: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct cp2112_device *dev = container_of(chip, struct cp2112_device,
+ gc);
+ struct hid_device *hdev = dev->hdev;
+ u8 buf[3];
+ int ret;
+
+ buf[0] = CP2112_GPIO_SET;
+ buf[1] = value ? 0xff : 0;
+ buf[2] = 1 << offset;
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret < 0)
+ hid_err(hdev, "error setting GPIO values: %d\n", ret);
+}
+
+static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct cp2112_device *dev = container_of(chip, struct cp2112_device,
+ gc);
+ struct hid_device *hdev = dev->hdev;
+ u8 buf[2];
+ int ret;
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret != sizeof(buf)) {
+ hid_err(hdev, "error requesting GPIO values: %d\n", ret);
+ return ret;
+ }
+
+ return (buf[1] >> offset) & 1;
+}
+
+static int cp2112_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct cp2112_device *dev = container_of(chip, struct cp2112_device,
+ gc);
+ struct hid_device *hdev = dev->hdev;
+ u8 buf[5];
+ int ret;
+
+ cp2112_gpio_set(chip, offset, value);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ sizeof(buf), HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != sizeof(buf)) {
+ hid_err(hdev, "error requesting GPIO config: %d\n", ret);
+ return ret;
+ }
+
+ buf[1] |= 1 << offset;
+ buf[2] = gpio_push_pull;
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret < 0) {
+ hid_err(hdev, "error setting GPIO config: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
+ u8 *data, size_t count, unsigned char report_type)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, report_number, buf, count,
+ report_type, HID_REQ_GET_REPORT);
+ memcpy(data, buf, count);
+ kfree(buf);
+ return ret;
+}
+
+static int cp2112_hid_output(struct hid_device *hdev, u8 *data, size_t count,
+ unsigned char report_type)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmemdup(data, count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (report_type == HID_OUTPUT_REPORT)
+ ret = hid_hw_output_report(hdev, buf, count);
+ else
+ ret = hid_hw_raw_request(hdev, buf[0], buf, count, report_type,
+ HID_REQ_SET_REPORT);
+
+ kfree(buf);
+ return ret;
+}
+
+static int cp2112_wait(struct cp2112_device *dev, atomic_t *avail)
+{
+ int ret = 0;
+
+ /* We have sent either a CP2112_TRANSFER_STATUS_REQUEST or a
+ * CP2112_DATA_READ_FORCE_SEND and we are waiting for the response to
+ * come in cp2112_raw_event or timeout. There will only be one of these
+ * in flight at any one time. The timeout is extremely large and is a
+ * last resort if the CP2112 has died. If we do timeout we don't expect
+ * to receive the response which would cause data races, it's not like
+ * we can do anything about it anyway.
+ */
+ ret = wait_event_interruptible_timeout(dev->wait,
+ atomic_read(avail), msecs_to_jiffies(RESPONSE_TIMEOUT));
+ if (-ERESTARTSYS == ret)
+ return ret;
+ if (!ret)
+ return -ETIMEDOUT;
+
+ atomic_set(avail, 0);
+ return 0;
+}
+
+static int cp2112_xfer_status(struct cp2112_device *dev)
+{
+ struct hid_device *hdev = dev->hdev;
+ u8 buf[2];
+ int ret;
+
+ buf[0] = CP2112_TRANSFER_STATUS_REQUEST;
+ buf[1] = 0x01;
+ atomic_set(&dev->xfer_avail, 0);
+
+ ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT);
+ if (ret < 0) {
+ hid_warn(hdev, "Error requesting status: %d\n", ret);
+ return ret;
+ }
+
+ ret = cp2112_wait(dev, &dev->xfer_avail);
+ if (ret)
+ return ret;
+
+ return dev->xfer_status;
+}
+
+static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
+{
+ struct hid_device *hdev = dev->hdev;
+ struct cp2112_force_read_report report;
+ int ret;
+
+ report.report = CP2112_DATA_READ_FORCE_SEND;
+ report.length = cpu_to_be16(size);
+
+ atomic_set(&dev->read_avail, 0);
+
+ ret = cp2112_hid_output(hdev, &report.report, sizeof(report),
+ HID_OUTPUT_REPORT);
+ if (ret < 0) {
+ hid_warn(hdev, "Error requesting data: %d\n", ret);
+ return ret;
+ }
+
+ ret = cp2112_wait(dev, &dev->read_avail);
+ if (ret)
+ return ret;
+
+ hid_dbg(hdev, "read %d of %zd bytes requested\n",
+ dev->read_length, size);
+
+ if (size > dev->read_length)
+ size = dev->read_length;
+
+ memcpy(data, dev->read_data, size);
+ return dev->read_length;
+}
+
+static int cp2112_read_req(void *buf, u8 slave_address, u16 length)
+{
+ struct cp2112_read_req_report *report = buf;
+
+ if (length < 1 || length > 512)
+ return -EINVAL;
+
+ report->report = CP2112_DATA_READ_REQUEST;
+ report->slave_address = slave_address << 1;
+ report->length = cpu_to_be16(length);
+ return sizeof(*report);
+}
+
+static int cp2112_write_read_req(void *buf, u8 slave_address, u16 length,
+ u8 command, u8 *data, u8 data_length)
+{
+ struct cp2112_write_read_req_report *report = buf;
+
+ if (length < 1 || length > 512
+ || data_length > sizeof(report->target_address) - 1)
+ return -EINVAL;
+
+ report->report = CP2112_DATA_WRITE_READ_REQUEST;
+ report->slave_address = slave_address << 1;
+ report->length = cpu_to_be16(length);
+ report->target_address_length = data_length + 1;
+ report->target_address[0] = command;
+ memcpy(&report->target_address[1], data, data_length);
+ return data_length + 6;
+}
+
+static int cp2112_write_req(void *buf, u8 slave_address, u8 command, u8 *data,
+ u8 data_length)
+{
+ struct cp2112_write_req_report *report = buf;
+
+ if (data_length > sizeof(report->data) - 1)
+ return -EINVAL;
+
+ report->report = CP2112_DATA_WRITE_REQUEST;
+ report->slave_address = slave_address << 1;
+ report->length = data_length + 1;
+ report->data[0] = command;
+ memcpy(&report->data[1], data, data_length);
+ return data_length + 4;
+}
+
+static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data;
+ struct hid_device *hdev = dev->hdev;
+ u8 buf[64];
+ __be16 word;
+ ssize_t count;
+ size_t read_length = 0;
+ unsigned int retries;
+ int ret;
+
+ hid_dbg(hdev, "%s addr 0x%x flags 0x%x cmd 0x%x size %d\n",
+ read_write == I2C_SMBUS_WRITE ? "write" : "read",
+ addr, flags, command, size);
+
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ read_length = 1;
+
+ if (I2C_SMBUS_READ == read_write)
+ count = cp2112_read_req(buf, addr, read_length);
+ else
+ count = cp2112_write_req(buf, addr, data->byte, NULL,
+ 0);
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ read_length = 1;
+
+ if (I2C_SMBUS_READ == read_write)
+ count = cp2112_write_read_req(buf, addr, read_length,
+ command, NULL, 0);
+ else
+ count = cp2112_write_req(buf, addr, command,
+ &data->byte, 1);
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ read_length = 2;
+ word = cpu_to_be16(data->word);
+
+ if (I2C_SMBUS_READ == read_write)
+ count = cp2112_write_read_req(buf, addr, read_length,
+ command, NULL, 0);
+ else
+ count = cp2112_write_req(buf, addr, command,
+ (u8 *)&word, 2);
+ break;
+ case I2C_SMBUS_PROC_CALL:
+ size = I2C_SMBUS_WORD_DATA;
+ read_write = I2C_SMBUS_READ;
+ read_length = 2;
+ word = cpu_to_be16(data->word);
+
+ count = cp2112_write_read_req(buf, addr, read_length, command,
+ (u8 *)&word, 2);
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ size = I2C_SMBUS_BLOCK_DATA;
+ /* fallthrough */
+ case I2C_SMBUS_BLOCK_DATA:
+ if (I2C_SMBUS_READ == read_write) {
+ count = cp2112_write_read_req(buf, addr,
+ I2C_SMBUS_BLOCK_MAX,
+ command, NULL, 0);
+ } else {
+ count = cp2112_write_req(buf, addr, command,
+ data->block,
+ data->block[0] + 1);
+ }
+ break;
+ case I2C_SMBUS_BLOCK_PROC_CALL:
+ size = I2C_SMBUS_BLOCK_DATA;
+ read_write = I2C_SMBUS_READ;
+
+ count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX,
+ command, data->block,
+ data->block[0] + 1);
+ break;
+ default:
+ hid_warn(hdev, "Unsupported transaction %d\n", size);
+ return -EOPNOTSUPP;
+ }
+
+ if (count < 0)
+ return count;
+
+ ret = hid_hw_power(hdev, PM_HINT_FULLON);
+ if (ret < 0) {
+ hid_err(hdev, "power management error: %d\n", ret);
+ return ret;
+ }
+
+ ret = cp2112_hid_output(hdev, buf, count, HID_OUTPUT_REPORT);
+ if (ret < 0) {
+ hid_warn(hdev, "Error starting transaction: %d\n", ret);
+ goto power_normal;
+ }
+
+ for (retries = 0; retries < XFER_STATUS_RETRIES; ++retries) {
+ ret = cp2112_xfer_status(dev);
+ if (-EBUSY == ret)
+ continue;
+ if (ret < 0)
+ goto power_normal;
+ break;
+ }
+
+ if (XFER_STATUS_RETRIES <= retries) {
+ hid_warn(hdev, "Transfer timed out, cancelling.\n");
+ buf[0] = CP2112_CANCEL_TRANSFER;
+ buf[1] = 0x01;
+
+ ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT);
+ if (ret < 0)
+ hid_warn(hdev, "Error cancelling transaction: %d\n",
+ ret);
+
+ ret = -ETIMEDOUT;
+ goto power_normal;
+ }
+
+ if (I2C_SMBUS_WRITE == read_write) {
+ ret = 0;
+ goto power_normal;
+ }
+
+ if (I2C_SMBUS_BLOCK_DATA == size)
+ read_length = ret;
+
+ ret = cp2112_read(dev, buf, read_length);
+ if (ret < 0)
+ goto power_normal;
+ if (ret != read_length) {
+ hid_warn(hdev, "short read: %d < %zd\n", ret, read_length);
+ ret = -EIO;
+ goto power_normal;
+ }
+
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ case I2C_SMBUS_BYTE_DATA:
+ data->byte = buf[0];
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ data->word = be16_to_cpup((__be16 *)buf);
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (read_length > I2C_SMBUS_BLOCK_MAX) {
+ ret = -EPROTO;
+ goto power_normal;
+ }
+
+ memcpy(data->block, buf, read_length);
+ break;
+ }
+
+ ret = 0;
+power_normal:
+ hid_hw_power(hdev, PM_HINT_NORMAL);
+ hid_dbg(hdev, "transfer finished: %d\n", ret);
+ return ret;
+}
+
+static u32 cp2112_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK |
+ I2C_FUNC_SMBUS_PROC_CALL |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL;
+}
+
+static const struct i2c_algorithm smbus_algorithm = {
+ .smbus_xfer = cp2112_xfer,
+ .functionality = cp2112_functionality,
+};
+
+static int cp2112_get_usb_config(struct hid_device *hdev,
+ struct cp2112_usb_config_report *cfg)
+{
+ int ret;
+
+ ret = cp2112_hid_get(hdev, CP2112_USB_CONFIG, (u8 *)cfg, sizeof(*cfg),
+ HID_FEATURE_REPORT);
+ if (ret != sizeof(*cfg)) {
+ hid_err(hdev, "error reading usb config: %d\n", ret);
+ if (ret < 0)
+ return ret;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int cp2112_set_usb_config(struct hid_device *hdev,
+ struct cp2112_usb_config_report *cfg)
+{
+ int ret;
+
+ BUG_ON(cfg->report != CP2112_USB_CONFIG);
+
+ ret = cp2112_hid_output(hdev, (u8 *)cfg, sizeof(*cfg),
+ HID_FEATURE_REPORT);
+ if (ret != sizeof(*cfg)) {
+ hid_err(hdev, "error writing usb config: %d\n", ret);
+ if (ret < 0)
+ return ret;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void chmod_sysfs_attrs(struct hid_device *hdev);
+
+#define CP2112_CONFIG_ATTR(name, store, format, ...) \
+static ssize_t name##_store(struct device *kdev, \
+ struct device_attribute *attr, const char *buf, \
+ size_t count) \
+{ \
+ struct hid_device *hdev = container_of(kdev, struct hid_device, dev); \
+ struct cp2112_usb_config_report cfg; \
+ int ret = cp2112_get_usb_config(hdev, &cfg); \
+ if (ret) \
+ return ret; \
+ store; \
+ ret = cp2112_set_usb_config(hdev, &cfg); \
+ if (ret) \
+ return ret; \
+ chmod_sysfs_attrs(hdev); \
+ return count; \
+} \
+static ssize_t name##_show(struct device *kdev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct hid_device *hdev = container_of(kdev, struct hid_device, dev); \
+ struct cp2112_usb_config_report cfg; \
+ int ret = cp2112_get_usb_config(hdev, &cfg); \
+ if (ret) \
+ return ret; \
+ return scnprintf(buf, PAGE_SIZE, format, ##__VA_ARGS__); \
+} \
+static DEVICE_ATTR_RW(name);
+
+CP2112_CONFIG_ATTR(vendor_id, ({
+ u16 vid;
+
+ if (sscanf(buf, "%hi", &vid) != 1)
+ return -EINVAL;
+
+ cfg.vid = cpu_to_le16(vid);
+ cfg.mask = 0x01;
+}), "0x%04x\n", le16_to_cpu(cfg.vid));
+
+CP2112_CONFIG_ATTR(product_id, ({
+ u16 pid;
+
+ if (sscanf(buf, "%hi", &pid) != 1)
+ return -EINVAL;
+
+ cfg.pid = cpu_to_le16(pid);
+ cfg.mask = 0x02;
+}), "0x%04x\n", le16_to_cpu(cfg.pid));
+
+CP2112_CONFIG_ATTR(max_power, ({
+ int mA;
+
+ if (sscanf(buf, "%i", &mA) != 1)
+ return -EINVAL;
+
+ cfg.max_power = (mA + 1) / 2;
+ cfg.mask = 0x04;
+}), "%u mA\n", cfg.max_power * 2);
+
+CP2112_CONFIG_ATTR(power_mode, ({
+ if (sscanf(buf, "%hhi", &cfg.power_mode) != 1)
+ return -EINVAL;
+
+ cfg.mask = 0x08;
+}), "%u\n", cfg.power_mode);
+
+CP2112_CONFIG_ATTR(release_version, ({
+ if (sscanf(buf, "%hhi.%hhi", &cfg.release_major, &cfg.release_minor)
+ != 2)
+ return -EINVAL;
+
+ cfg.mask = 0x10;
+}), "%u.%u\n", cfg.release_major, cfg.release_minor);
+
+#undef CP2112_CONFIG_ATTR
+
+struct cp2112_pstring_attribute {
+ struct device_attribute attr;
+ unsigned char report;
+};
+
+static ssize_t pstr_store(struct device *kdev,
+ struct device_attribute *kattr, const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = container_of(kdev, struct hid_device, dev);
+ struct cp2112_pstring_attribute *attr =
+ container_of(kattr, struct cp2112_pstring_attribute, attr);
+ struct cp2112_string_report report;
+ int ret;
+
+ memset(&report, 0, sizeof(report));
+
+ ret = utf8s_to_utf16s(buf, count, UTF16_LITTLE_ENDIAN,
+ report.string, ARRAY_SIZE(report.string));
+ report.report = attr->report;
+ report.length = ret * sizeof(report.string[0]) + 2;
+ report.type = USB_DT_STRING;
+
+ ret = cp2112_hid_output(hdev, &report.report, report.length + 1,
+ HID_FEATURE_REPORT);
+ if (ret != report.length + 1) {
+ hid_err(hdev, "error writing %s string: %d\n", kattr->attr.name,
+ ret);
+ if (ret < 0)
+ return ret;
+ return -EIO;
+ }
+
+ chmod_sysfs_attrs(hdev);
+ return count;
+}
+
+static ssize_t pstr_show(struct device *kdev,
+ struct device_attribute *kattr, char *buf)
+{
+ struct hid_device *hdev = container_of(kdev, struct hid_device, dev);
+ struct cp2112_pstring_attribute *attr =
+ container_of(kattr, struct cp2112_pstring_attribute, attr);
+ struct cp2112_string_report report;
+ u8 length;
+ int ret;
+
+ ret = cp2112_hid_get(hdev, attr->report, &report.report,
+ sizeof(report) - 1, HID_FEATURE_REPORT);
+ if (ret < 3) {
+ hid_err(hdev, "error reading %s string: %d\n", kattr->attr.name,
+ ret);
+ if (ret < 0)
+ return ret;
+ return -EIO;
+ }
+
+ if (report.length < 2) {
+ hid_err(hdev, "invalid %s string length: %d\n",
+ kattr->attr.name, report.length);
+ return -EIO;
+ }
+
+ length = report.length > ret - 1 ? ret - 1 : report.length;
+ length = (length - 2) / sizeof(report.string[0]);
+ ret = utf16s_to_utf8s(report.string, length, UTF16_LITTLE_ENDIAN, buf,
+ PAGE_SIZE - 1);
+ buf[ret++] = '\n';
+ return ret;
+}
+
+#define CP2112_PSTR_ATTR(name, _report) \
+static struct cp2112_pstring_attribute dev_attr_##name = { \
+ .attr = __ATTR(name, (S_IWUSR | S_IRUGO), pstr_show, pstr_store), \
+ .report = _report, \
+};
+
+CP2112_PSTR_ATTR(manufacturer, CP2112_MANUFACTURER_STRING);
+CP2112_PSTR_ATTR(product, CP2112_PRODUCT_STRING);
+CP2112_PSTR_ATTR(serial, CP2112_SERIAL_STRING);
+
+#undef CP2112_PSTR_ATTR
+
+static const struct attribute_group cp2112_attr_group = {
+ .attrs = (struct attribute *[]){
+ &dev_attr_vendor_id.attr,
+ &dev_attr_product_id.attr,
+ &dev_attr_max_power.attr,
+ &dev_attr_power_mode.attr,
+ &dev_attr_release_version.attr,
+ &dev_attr_manufacturer.attr.attr,
+ &dev_attr_product.attr.attr,
+ &dev_attr_serial.attr.attr,
+ NULL
+ }
+};
+
+/* Chmoding our sysfs attributes is simply a way to expose which fields in the
+ * PROM have already been programmed. We do not depend on this preventing
+ * writing to these attributes since the CP2112 will simply ignore writes to
+ * already-programmed fields. This is why there is no sense in fixing this
+ * racy behaviour.
+ */
+static void chmod_sysfs_attrs(struct hid_device *hdev)
+{
+ struct attribute **attr;
+ u8 buf[2];
+ int ret;
+
+ ret = cp2112_hid_get(hdev, CP2112_LOCK_BYTE, buf, sizeof(buf),
+ HID_FEATURE_REPORT);
+ if (ret != sizeof(buf)) {
+ hid_err(hdev, "error reading lock byte: %d\n", ret);
+ return;
+ }
+
+ for (attr = cp2112_attr_group.attrs; *attr; ++attr) {
+ umode_t mode = (buf[1] & 1) ? S_IWUSR | S_IRUGO : S_IRUGO;
+ ret = sysfs_chmod_file(&hdev->dev.kobj, *attr, mode);
+ if (ret < 0)
+ hid_err(hdev, "error chmoding sysfs file %s\n",
+ (*attr)->name);
+ buf[1] >>= 1;
+ }
+}
+
+static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct cp2112_device *dev;
+ u8 buf[3];
+ struct cp2112_smbus_config_report config;
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "hw open failed\n");
+ goto err_hid_stop;
+ }
+
+ ret = hid_hw_power(hdev, PM_HINT_FULLON);
+ if (ret < 0) {
+ hid_err(hdev, "power management error: %d\n", ret);
+ goto err_hid_close;
+ }
+
+ ret = cp2112_hid_get(hdev, CP2112_GET_VERSION_INFO, buf, sizeof(buf),
+ HID_FEATURE_REPORT);
+ if (ret != sizeof(buf)) {
+ hid_err(hdev, "error requesting version\n");
+ if (ret >= 0)
+ ret = -EIO;
+ goto err_power_normal;
+ }
+
+ hid_info(hdev, "Part Number: 0x%02X Device Version: 0x%02X\n",
+ buf[1], buf[2]);
+
+ ret = cp2112_hid_get(hdev, CP2112_SMBUS_CONFIG, (u8 *)&config,
+ sizeof(config), HID_FEATURE_REPORT);
+ if (ret != sizeof(config)) {
+ hid_err(hdev, "error requesting SMBus config\n");
+ if (ret >= 0)
+ ret = -EIO;
+ goto err_power_normal;
+ }
+
+ config.retry_time = cpu_to_be16(1);
+
+ ret = cp2112_hid_output(hdev, (u8 *)&config, sizeof(config),
+ HID_FEATURE_REPORT);
+ if (ret != sizeof(config)) {
+ hid_err(hdev, "error setting SMBus config\n");
+ if (ret >= 0)
+ ret = -EIO;
+ goto err_power_normal;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err_power_normal;
+ }
+
+ hid_set_drvdata(hdev, (void *)dev);
+ dev->hdev = hdev;
+ dev->adap.owner = THIS_MODULE;
+ dev->adap.class = I2C_CLASS_HWMON;
+ dev->adap.algo = &smbus_algorithm;
+ dev->adap.algo_data = dev;
+ dev->adap.dev.parent = &hdev->dev;
+ snprintf(dev->adap.name, sizeof(dev->adap.name),
+ "CP2112 SMBus Bridge on hiddev%d", hdev->minor);
+ init_waitqueue_head(&dev->wait);
+
+ hid_device_io_start(hdev);
+ ret = i2c_add_adapter(&dev->adap);
+ hid_device_io_stop(hdev);
+
+ if (ret) {
+ hid_err(hdev, "error registering i2c adapter\n");
+ goto err_free_dev;
+ }
+
+ hid_dbg(hdev, "adapter registered\n");
+
+ dev->gc.label = "cp2112_gpio";
+ dev->gc.direction_input = cp2112_gpio_direction_input;
+ dev->gc.direction_output = cp2112_gpio_direction_output;
+ dev->gc.set = cp2112_gpio_set;
+ dev->gc.get = cp2112_gpio_get;
+ dev->gc.base = -1;
+ dev->gc.ngpio = 8;
+ dev->gc.can_sleep = 1;
+ dev->gc.dev = &hdev->dev;
+
+ ret = gpiochip_add(&dev->gc);
+ if (ret < 0) {
+ hid_err(hdev, "error registering gpio chip\n");
+ goto err_free_i2c;
+ }
+
+ ret = sysfs_create_group(&hdev->dev.kobj, &cp2112_attr_group);
+ if (ret < 0) {
+ hid_err(hdev, "error creating sysfs attrs\n");
+ goto err_gpiochip_remove;
+ }
+
+ chmod_sysfs_attrs(hdev);
+ hid_hw_power(hdev, PM_HINT_NORMAL);
+
+ return ret;
+
+err_gpiochip_remove:
+ if (gpiochip_remove(&dev->gc) < 0)
+ hid_err(hdev, "error removing gpio chip\n");
+err_free_i2c:
+ i2c_del_adapter(&dev->adap);
+err_free_dev:
+ kfree(dev);
+err_power_normal:
+ hid_hw_power(hdev, PM_HINT_NORMAL);
+err_hid_close:
+ hid_hw_close(hdev);
+err_hid_stop:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static void cp2112_remove(struct hid_device *hdev)
+{
+ struct cp2112_device *dev = hid_get_drvdata(hdev);
+
+ sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
+ if (gpiochip_remove(&dev->gc))
+ hid_err(hdev, "unable to remove gpio chip\n");
+ i2c_del_adapter(&dev->adap);
+ /* i2c_del_adapter has finished removing all i2c devices from our
+ * adapter. Well behaved devices should no longer call our cp2112_xfer
+ * and should have waited for any pending calls to finish. It has also
+ * waited for device_unregister(&adap->dev) to complete. Therefore we
+ * can safely free our struct cp2112_device.
+ */
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+ kfree(dev);
+}
+
+static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct cp2112_device *dev = hid_get_drvdata(hdev);
+ struct cp2112_xfer_status_report *xfer = (void *)data;
+
+ switch (data[0]) {
+ case CP2112_TRANSFER_STATUS_RESPONSE:
+ hid_dbg(hdev, "xfer status: %02x %02x %04x %04x\n",
+ xfer->status0, xfer->status1,
+ be16_to_cpu(xfer->retries), be16_to_cpu(xfer->length));
+
+ switch (xfer->status0) {
+ case STATUS0_IDLE:
+ dev->xfer_status = -EAGAIN;
+ break;
+ case STATUS0_BUSY:
+ dev->xfer_status = -EBUSY;
+ break;
+ case STATUS0_COMPLETE:
+ dev->xfer_status = be16_to_cpu(xfer->length);
+ break;
+ case STATUS0_ERROR:
+ switch (xfer->status1) {
+ case STATUS1_TIMEOUT_NACK:
+ case STATUS1_TIMEOUT_BUS:
+ dev->xfer_status = -ETIMEDOUT;
+ break;
+ default:
+ dev->xfer_status = -EIO;
+ break;
+ }
+ break;
+ default:
+ dev->xfer_status = -EINVAL;
+ break;
+ }
+
+ atomic_set(&dev->xfer_avail, 1);
+ break;
+ case CP2112_DATA_READ_RESPONSE:
+ hid_dbg(hdev, "read response: %02x %02x\n", data[1], data[2]);
+
+ dev->read_length = data[2];
+ if (dev->read_length > sizeof(dev->read_data))
+ dev->read_length = sizeof(dev->read_data);
+
+ memcpy(dev->read_data, &data[3], dev->read_length);
+ atomic_set(&dev->read_avail, 1);
+ break;
+ default:
+ hid_err(hdev, "unknown report\n");
+
+ return 0;
+ }
+
+ wake_up_interruptible(&dev->wait);
+ return 1;
+}
+
+static struct hid_driver cp2112_driver = {
+ .name = "cp2112",
+ .id_table = cp2112_devices,
+ .probe = cp2112_probe,
+ .remove = cp2112_remove,
+ .raw_event = cp2112_raw_event,
+};
+
+module_hid_driver(cp2112_driver);
+MODULE_DESCRIPTION("Silicon Labs HID USB to SMBus master bridge");
+MODULE_AUTHOR("David Barksdale <dbarksdale@uplogix.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index c24908f14934..f52dbcb7133b 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -460,12 +460,22 @@ static void mousevsc_hid_stop(struct hid_device *hid)
{
}
+static int mousevsc_hid_raw_request(struct hid_device *hid,
+ unsigned char report_num,
+ __u8 *buf, size_t len,
+ unsigned char rtype,
+ int reqtype)
+{
+ return 0;
+}
+
static struct hid_ll_driver mousevsc_ll_driver = {
.parse = mousevsc_hid_parse,
.open = mousevsc_hid_open,
.close = mousevsc_hid_close,
.start = mousevsc_hid_start,
.stop = mousevsc_hid_stop,
+ .raw_request = mousevsc_hid_raw_request,
};
static struct hid_driver mousevsc_hid_driver;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 22f28d6b33a8..bd221263c739 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -67,6 +67,9 @@
#define USB_VENDOR_ID_ALPS 0x0433
#define USB_DEVICE_ID_IBM_GAMEPAD 0x1101
+#define USB_VENDOR_ID_ANTON 0x1130
+#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
+
#define USB_VENDOR_ID_APPLE 0x05ac
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
@@ -242,6 +245,7 @@
#define USB_VENDOR_ID_CYGNAL 0x10c4
#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9
+#define USB_DEVICE_ID_CYGNAL_CP2112 0xea90
#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244
@@ -616,6 +620,7 @@
#define USB_VENDOR_ID_MICROSOFT 0x045e
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
+#define USB_DEVICE_ID_MS_OFFICE_KB 0x0048
#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
#define USB_DEVICE_ID_MS_NE4K 0x00db
#define USB_DEVICE_ID_MS_NE4K_JP 0x00dc
@@ -961,4 +966,7 @@
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
+#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */
+#define USB_DEVICE_ID_RI_KA_WEBMAIL 0x1320 /* Webmail Notifier */
+
#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index a713e6211419..e7e8b19a9284 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -350,9 +350,9 @@ static int hidinput_get_battery_property(struct power_supply *psy,
ret = -ENOMEM;
break;
}
- ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
- buf, 2,
- dev->battery_report_type);
+ ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2,
+ dev->battery_report_type,
+ HID_REQ_GET_REPORT);
if (ret != 2) {
ret = -ENODATA;
@@ -789,10 +789,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x199: map_key_clear(KEY_CHAT); break;
case 0x19c: map_key_clear(KEY_LOGOFF); break;
case 0x19e: map_key_clear(KEY_COFFEE); break;
+ case 0x1a3: map_key_clear(KEY_NEXT); break;
+ case 0x1a4: map_key_clear(KEY_PREVIOUS); break;
case 0x1a6: map_key_clear(KEY_HELP); break;
case 0x1a7: map_key_clear(KEY_DOCUMENTS); break;
case 0x1ab: map_key_clear(KEY_SPELLCHECK); break;
case 0x1ae: map_key_clear(KEY_KEYBOARD); break;
+ case 0x1b4: map_key_clear(KEY_FILE); break;
case 0x1b6: map_key_clear(KEY_IMAGES); break;
case 0x1b7: map_key_clear(KEY_AUDIO); break;
case 0x1b8: map_key_clear(KEY_VIDEO); break;
@@ -1150,7 +1153,7 @@ static void hidinput_led_worker(struct work_struct *work)
led_work);
struct hid_field *field;
struct hid_report *report;
- int len;
+ int len, ret;
__u8 *buf;
field = hidinput_get_led_field(hid);
@@ -1184,7 +1187,10 @@ static void hidinput_led_worker(struct work_struct *work)
hid_output_report(report, buf);
/* synchronous output report */
- hid->hid_output_raw_report(hid, buf, len, HID_OUTPUT_REPORT);
+ ret = hid_hw_output_report(hid, buf, len);
+ if (ret == -ENOSYS)
+ hid_hw_raw_request(hid, report->id, buf, len, HID_OUTPUT_REPORT,
+ HID_REQ_SET_REPORT);
kfree(buf);
}
@@ -1263,10 +1269,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid)
}
input_set_drvdata(input_dev, hid);
- if (hid->ll_driver->hidinput_input_event)
- input_dev->event = hid->ll_driver->hidinput_input_event;
- else if (hid->ll_driver->request || hid->hid_output_raw_report)
- input_dev->event = hidinput_input_event;
+ input_dev->event = hidinput_input_event;
input_dev->open = hidinput_open;
input_dev->close = hidinput_close;
input_dev->setkeycode = hidinput_setkeycode;
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 9fe9d4ac3114..a976f48263f6 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -692,7 +692,8 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
- ret = hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
if (ret >= 0) {
/* insert a little delay of 10 jiffies ~ 40ms */
@@ -704,7 +705,8 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
buf[1] = 0xB2;
get_random_bytes(&buf[2], 2);
- ret = hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
}
}
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index f45279c3b11a..486dbde2ba2d 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -44,14 +44,6 @@ static const char kbd_descriptor[] = {
0x19, 0xE0, /* USAGE_MINIMUM (Left Control) */
0x29, 0xE7, /* USAGE_MAXIMUM (Right GUI) */
0x81, 0x02, /* INPUT (Data,Var,Abs) */
- 0x95, 0x05, /* REPORT COUNT (5) */
- 0x05, 0x08, /* USAGE PAGE (LED page) */
- 0x19, 0x01, /* USAGE MINIMUM (1) */
- 0x29, 0x05, /* USAGE MAXIMUM (5) */
- 0x91, 0x02, /* OUTPUT (Data, Variable, Absolute) */
- 0x95, 0x01, /* REPORT COUNT (1) */
- 0x75, 0x03, /* REPORT SIZE (3) */
- 0x91, 0x01, /* OUTPUT (Constant) */
0x95, 0x06, /* REPORT_COUNT (6) */
0x75, 0x08, /* REPORT_SIZE (8) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
@@ -60,6 +52,18 @@ static const char kbd_descriptor[] = {
0x19, 0x00, /* USAGE_MINIMUM (no event) */
0x2A, 0xFF, 0x00, /* USAGE_MAXIMUM (reserved) */
0x81, 0x00, /* INPUT (Data,Ary,Abs) */
+ 0x85, 0x0e, /* REPORT_ID (14) */
+ 0x05, 0x08, /* USAGE PAGE (LED page) */
+ 0x95, 0x05, /* REPORT COUNT (5) */
+ 0x75, 0x01, /* REPORT SIZE (1) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
+ 0x19, 0x01, /* USAGE MINIMUM (1) */
+ 0x29, 0x05, /* USAGE MAXIMUM (5) */
+ 0x91, 0x02, /* OUTPUT (Data, Variable, Absolute) */
+ 0x95, 0x01, /* REPORT COUNT (1) */
+ 0x75, 0x03, /* REPORT SIZE (3) */
+ 0x91, 0x01, /* OUTPUT (Constant) */
0xC0
};
@@ -189,9 +193,6 @@ static const u8 hid_reportid_size_map[NUMBER_OF_HID_REPORTS] = {
static struct hid_ll_driver logi_dj_ll_driver;
-static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
- size_t count,
- unsigned char report_type);
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
@@ -258,7 +259,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
}
dj_hiddev->ll_driver = &logi_dj_ll_driver;
- dj_hiddev->hid_output_raw_report = logi_dj_output_hidraw_report;
dj_hiddev->dev.parent = &djrcv_hdev->dev;
dj_hiddev->bus = BUS_USB;
@@ -540,14 +540,35 @@ static void logi_dj_ll_close(struct hid_device *hid)
dbg_hid("%s:%s\n", __func__, hid->phys);
}
-static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
- size_t count,
- unsigned char report_type)
+static int logi_dj_ll_raw_request(struct hid_device *hid,
+ unsigned char reportnum, __u8 *buf,
+ size_t count, unsigned char report_type,
+ int reqtype)
{
- /* Called by hid raw to send data */
- dbg_hid("%s\n", __func__);
+ struct dj_device *djdev = hid->driver_data;
+ struct dj_receiver_dev *djrcv_dev = djdev->dj_receiver_dev;
+ u8 *out_buf;
+ int ret;
- return 0;
+ if (buf[0] != REPORT_TYPE_LEDS)
+ return -EINVAL;
+
+ out_buf = kzalloc(DJREPORT_SHORT_LENGTH, GFP_ATOMIC);
+ if (!out_buf)
+ return -ENOMEM;
+
+ if (count < DJREPORT_SHORT_LENGTH - 2)
+ count = DJREPORT_SHORT_LENGTH - 2;
+
+ out_buf[0] = REPORT_ID_DJ_SHORT;
+ out_buf[1] = djdev->device_index;
+ memcpy(out_buf + 2, buf, count);
+
+ ret = hid_hw_raw_request(djrcv_dev->hdev, out_buf[0], out_buf,
+ DJREPORT_SHORT_LENGTH, report_type, reqtype);
+
+ kfree(out_buf);
+ return ret;
}
static void rdcat(char *rdesc, unsigned int *rsize, const char *data, unsigned int size)
@@ -613,58 +634,6 @@ static int logi_dj_ll_parse(struct hid_device *hid)
return retval;
}
-static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type,
- unsigned int code, int value)
-{
- /* Sent by the input layer to handle leds and Force Feedback */
- struct hid_device *dj_hiddev = input_get_drvdata(dev);
- struct dj_device *dj_dev = dj_hiddev->driver_data;
-
- struct dj_receiver_dev *djrcv_dev =
- dev_get_drvdata(dj_hiddev->dev.parent);
- struct hid_device *dj_rcv_hiddev = djrcv_dev->hdev;
- struct hid_report_enum *output_report_enum;
-
- struct hid_field *field;
- struct hid_report *report;
- unsigned char *data;
- int offset;
-
- dbg_hid("%s: %s, type:%d | code:%d | value:%d\n",
- __func__, dev->phys, type, code, value);
-
- if (type != EV_LED)
- return -1;
-
- offset = hidinput_find_field(dj_hiddev, type, code, &field);
-
- if (offset == -1) {
- dev_warn(&dev->dev, "event field not found\n");
- return -1;
- }
- hid_set_field(field, offset, value);
-
- data = hid_alloc_report_buf(field->report, GFP_ATOMIC);
- if (!data) {
- dev_warn(&dev->dev, "failed to allocate report buf memory\n");
- return -1;
- }
-
- hid_output_report(field->report, &data[0]);
-
- output_report_enum = &dj_rcv_hiddev->report_enum[HID_OUTPUT_REPORT];
- report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
- hid_set_field(report->field[0], 0, dj_dev->device_index);
- hid_set_field(report->field[0], 1, REPORT_TYPE_LEDS);
- hid_set_field(report->field[0], 2, data[1]);
-
- hid_hw_request(dj_rcv_hiddev, report, HID_REQ_SET_REPORT);
-
- kfree(data);
-
- return 0;
-}
-
static int logi_dj_ll_start(struct hid_device *hid)
{
dbg_hid("%s\n", __func__);
@@ -683,7 +652,7 @@ static struct hid_ll_driver logi_dj_ll_driver = {
.stop = logi_dj_ll_stop,
.open = logi_dj_ll_open,
.close = logi_dj_ll_close,
- .hidinput_input_event = logi_dj_ll_input_event,
+ .raw_request = logi_dj_ll_raw_request,
};
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 3b43d1cfa936..ecc2cbf300cc 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -538,8 +538,8 @@ static int magicmouse_probe(struct hid_device *hdev,
* but there seems to be no other way of switching the mode.
* Thus the super-ugly hacky success check below.
*/
- ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
- HID_FEATURE_REPORT);
+ ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
if (ret != -EIO && ret != sizeof(feature)) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 404a3a8a82f1..6fd58175a291 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -62,9 +62,48 @@ static int ms_ergonomy_kb_quirk(struct hid_input *hi, struct hid_usage *usage,
{
struct input_dev *input = hi->input;
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) {
+ switch (usage->hid & HID_USAGE) {
+ /*
+ * Microsoft uses these 2 reserved usage ids for 2 keys on
+ * the MS office kb labelled "Office Home" and "Task Pane".
+ */
+ case 0x29d:
+ ms_map_key_clear(KEY_PROG1);
+ return 1;
+ case 0x29e:
+ ms_map_key_clear(KEY_PROG2);
+ return 1;
+ }
+ return 0;
+ }
+
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_MSVENDOR)
+ return 0;
+
switch (usage->hid & HID_USAGE) {
case 0xfd06: ms_map_key_clear(KEY_CHAT); break;
case 0xfd07: ms_map_key_clear(KEY_PHONE); break;
+ case 0xff00:
+ /* Special keypad keys */
+ ms_map_key_clear(KEY_KPEQUAL);
+ set_bit(KEY_KPLEFTPAREN, input->keybit);
+ set_bit(KEY_KPRIGHTPAREN, input->keybit);
+ break;
+ case 0xff01:
+ /* Scroll wheel */
+ hid_map_usage_clear(hi, usage, bit, max, EV_REL, REL_WHEEL);
+ break;
+ case 0xff02:
+ /*
+ * This byte contains a copy of the modifier keys byte of a
+ * standard hid keyboard report, as send by interface 0
+ * (this usage is found on interface 1).
+ *
+ * This byte only gets send when another key in the same report
+ * changes state, and as such is useless, ignore it.
+ */
+ return -1;
case 0xff05:
set_bit(EV_REP, input->evbit);
ms_map_key_clear(KEY_F13);
@@ -83,6 +122,9 @@ static int ms_ergonomy_kb_quirk(struct hid_input *hi, struct hid_usage *usage,
static int ms_presenter_8k_quirk(struct hid_input *hi, struct hid_usage *usage,
unsigned long **bit, int *max)
{
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_MSVENDOR)
+ return 0;
+
set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
case 0xfd08: ms_map_key_clear(KEY_FORWARD); break;
@@ -102,9 +144,6 @@ static int ms_input_mapping(struct hid_device *hdev, struct hid_input *hi,
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
- if ((usage->hid & HID_USAGE_PAGE) != HID_UP_MSVENDOR)
- return 0;
-
if (quirks & MS_ERGONOMY) {
int ret = ms_ergonomy_kb_quirk(hi, usage, bit, max);
if (ret)
@@ -134,14 +173,39 @@ static int ms_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct input_dev *input;
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
!usage->type)
return 0;
+ input = field->hidinput->input;
+
/* Handling MS keyboards special buttons */
+ if (quirks & MS_ERGONOMY && usage->hid == (HID_UP_MSVENDOR | 0xff00)) {
+ /* Special keypad keys */
+ input_report_key(input, KEY_KPEQUAL, value & 0x01);
+ input_report_key(input, KEY_KPLEFTPAREN, value & 0x02);
+ input_report_key(input, KEY_KPRIGHTPAREN, value & 0x04);
+ return 1;
+ }
+
+ if (quirks & MS_ERGONOMY && usage->hid == (HID_UP_MSVENDOR | 0xff01)) {
+ /* Scroll wheel */
+ int step = ((value & 0x60) >> 5) + 1;
+
+ switch (value & 0x1f) {
+ case 0x01:
+ input_report_rel(input, REL_WHEEL, step);
+ break;
+ case 0x1f:
+ input_report_rel(input, REL_WHEEL, -step);
+ break;
+ }
+ return 1;
+ }
+
if (quirks & MS_ERGONOMY && usage->hid == (HID_UP_MSVENDOR | 0xff05)) {
- struct input_dev *input = field->hidinput->input;
static unsigned int last_key = 0;
unsigned int key = 0;
switch (value) {
@@ -194,6 +258,8 @@ err_free:
static const struct hid_device_id ms_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV),
.driver_data = MS_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB),
+ .driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K),
.driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 221d503f1c24..35278e43c7a4 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -68,6 +68,9 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_HOVERING (1 << 11)
#define MT_QUIRK_CONTACT_CNT_ACCURATE (1 << 12)
+#define MT_INPUTMODE_TOUCHSCREEN 0x02
+#define MT_INPUTMODE_TOUCHPAD 0x03
+
struct mt_slot {
__s32 x, y, cx, cy, p, w, h;
__s32 contactid; /* the device ContactID assigned to this slot */
@@ -84,6 +87,7 @@ struct mt_class {
__s32 sn_pressure; /* Signal/noise ratio for pressure events */
__u8 maxcontacts;
bool is_indirect; /* true for touchpads */
+ bool export_all_inputs; /* do not ignore mouse, keyboards, etc... */
};
struct mt_fields {
@@ -100,11 +104,11 @@ struct mt_device {
int cc_value_index; /* contact count value index in the field */
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
- unsigned pen_report_id; /* the report ID of the pen device */
__s16 inputmode; /* InputMode HID feature, -1 if non-existent */
__s16 inputmode_index; /* InputMode HID feature index in the report */
__s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
-1 if non-existent */
+ __u8 inputmode_value; /* InputMode HID feature value */
__u8 num_received; /* how many contacts we received */
__u8 num_expected; /* expected last contact index */
__u8 maxcontacts;
@@ -128,16 +132,17 @@ static void mt_post_parse(struct mt_device *td);
#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0005
#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0006
#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0007
-#define MT_CLS_DUAL_NSMU_CONTACTID 0x0008
+/* reserved 0x0008 */
#define MT_CLS_INRANGE_CONTACTNUMBER 0x0009
#define MT_CLS_NSMU 0x000a
-#define MT_CLS_DUAL_CONTACT_NUMBER 0x0010
-#define MT_CLS_DUAL_CONTACT_ID 0x0011
+/* reserved 0x0010 */
+/* reserved 0x0011 */
#define MT_CLS_WIN_8 0x0012
+#define MT_CLS_EXPORT_ALL_INPUTS 0x0013
/* vendor specific classes */
#define MT_CLS_3M 0x0101
-#define MT_CLS_CYPRESS 0x0102
+/* reserved 0x0102 */
#define MT_CLS_EGALAX 0x0103
#define MT_CLS_EGALAX_SERIAL 0x0104
#define MT_CLS_TOPSEED 0x0105
@@ -189,28 +194,18 @@ static struct mt_class mt_classes[] = {
.quirks = MT_QUIRK_VALID_IS_INRANGE |
MT_QUIRK_SLOT_IS_CONTACTNUMBER,
.maxcontacts = 2 },
- { .name = MT_CLS_DUAL_NSMU_CONTACTID,
- .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
- MT_QUIRK_SLOT_IS_CONTACTID,
- .maxcontacts = 2 },
{ .name = MT_CLS_INRANGE_CONTACTNUMBER,
.quirks = MT_QUIRK_VALID_IS_INRANGE |
MT_QUIRK_SLOT_IS_CONTACTNUMBER },
- { .name = MT_CLS_DUAL_CONTACT_NUMBER,
- .quirks = MT_QUIRK_ALWAYS_VALID |
- MT_QUIRK_CONTACT_CNT_ACCURATE |
- MT_QUIRK_SLOT_IS_CONTACTNUMBER,
- .maxcontacts = 2 },
- { .name = MT_CLS_DUAL_CONTACT_ID,
- .quirks = MT_QUIRK_ALWAYS_VALID |
- MT_QUIRK_CONTACT_CNT_ACCURATE |
- MT_QUIRK_SLOT_IS_CONTACTID,
- .maxcontacts = 2 },
{ .name = MT_CLS_WIN_8,
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_IGNORE_DUPLICATES |
MT_QUIRK_HOVERING |
MT_QUIRK_CONTACT_CNT_ACCURATE },
+ { .name = MT_CLS_EXPORT_ALL_INPUTS,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE,
+ .export_all_inputs = true },
/*
* vendor specific classes
@@ -223,10 +218,6 @@ static struct mt_class mt_classes[] = {
.sn_height = 128,
.maxcontacts = 60,
},
- { .name = MT_CLS_CYPRESS,
- .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
- MT_QUIRK_CYPRESS,
- .maxcontacts = 10 },
{ .name = MT_CLS_EGALAX,
.quirks = MT_QUIRK_SLOT_IS_CONTACTID |
MT_QUIRK_VALID_IS_INRANGE,
@@ -360,45 +351,6 @@ static void mt_store_field(struct hid_usage *usage, struct mt_device *td,
f->usages[f->length++] = usage->hid;
}
-static int mt_pen_input_mapping(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- struct mt_device *td = hid_get_drvdata(hdev);
-
- td->pen_report_id = field->report->id;
-
- return 0;
-}
-
-static int mt_pen_input_mapped(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- return 0;
-}
-
-static int mt_pen_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value)
-{
- /* let hid-input handle it */
- return 0;
-}
-
-static void mt_pen_report(struct hid_device *hid, struct hid_report *report)
-{
- struct hid_field *field = report->field[0];
-
- input_sync(field->hidinput->input);
-}
-
-static void mt_pen_input_configured(struct hid_device *hdev,
- struct hid_input *hi)
-{
- /* force BTN_STYLUS to allow tablet matching in udev */
- __set_bit(BTN_STYLUS, hi->input->keybit);
-}
-
static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
@@ -415,8 +367,10 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
* Model touchscreens providing buttons as touchpads.
*/
if (field->application == HID_DG_TOUCHPAD ||
- (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON)
+ (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
td->mt_flags |= INPUT_MT_POINTER;
+ td->inputmode_value = MT_INPUTMODE_TOUCHPAD;
+ }
if (usage->usage_index)
prev_usage = &field->usage[usage->usage_index - 1];
@@ -776,28 +730,52 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- /* Only map fields from TouchScreen or TouchPad collections.
- * We need to ignore fields that belong to other collections
- * such as Mouse that might have the same GenericDesktop usages. */
- if (field->application != HID_DG_TOUCHSCREEN &&
+ struct mt_device *td = hid_get_drvdata(hdev);
+
+ /*
+ * If mtclass.export_all_inputs is not set, only map fields from
+ * TouchScreen or TouchPad collections. We need to ignore fields
+ * that belong to other collections such as Mouse that might have
+ * the same GenericDesktop usages.
+ */
+ if (!td->mtclass.export_all_inputs &&
+ field->application != HID_DG_TOUCHSCREEN &&
field->application != HID_DG_PEN &&
field->application != HID_DG_TOUCHPAD)
return -1;
+ /*
+ * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
+ * for the stylus.
+ */
if (field->physical == HID_DG_STYLUS)
- return mt_pen_input_mapping(hdev, hi, field, usage, bit, max);
+ return 0;
- return mt_touch_input_mapping(hdev, hi, field, usage, bit, max);
+ if (field->application == HID_DG_TOUCHSCREEN ||
+ field->application == HID_DG_TOUCHPAD)
+ return mt_touch_input_mapping(hdev, hi, field, usage, bit, max);
+
+ /* let hid-core decide for the others */
+ return 0;
}
static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
+ /*
+ * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
+ * for the stylus.
+ */
if (field->physical == HID_DG_STYLUS)
- return mt_pen_input_mapped(hdev, hi, field, usage, bit, max);
+ return 0;
- return mt_touch_input_mapped(hdev, hi, field, usage, bit, max);
+ if (field->application == HID_DG_TOUCHSCREEN ||
+ field->application == HID_DG_TOUCHPAD)
+ return mt_touch_input_mapped(hdev, hi, field, usage, bit, max);
+
+ /* let hid-core decide for the others */
+ return 0;
}
static int mt_event(struct hid_device *hid, struct hid_field *field,
@@ -808,25 +786,22 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
if (field->report->id == td->mt_report_id)
return mt_touch_event(hid, field, usage, value);
- if (field->report->id == td->pen_report_id)
- return mt_pen_event(hid, field, usage, value);
-
- /* ignore other reports */
- return 1;
+ return 0;
}
static void mt_report(struct hid_device *hid, struct hid_report *report)
{
struct mt_device *td = hid_get_drvdata(hid);
+ struct hid_field *field = report->field[0];
if (!(hid->claimed & HID_CLAIMED_INPUT))
return;
if (report->id == td->mt_report_id)
- mt_touch_report(hid, report);
+ return mt_touch_report(hid, report);
- if (report->id == td->pen_report_id)
- mt_pen_report(hid, report);
+ if (field && field->hidinput && field->hidinput->input)
+ input_sync(field->hidinput->input);
}
static void mt_set_input_mode(struct hid_device *hdev)
@@ -841,7 +816,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
re = &(hdev->report_enum[HID_FEATURE_REPORT]);
r = re->report_id_hash[td->inputmode];
if (r) {
- r->field[0]->value[td->inputmode_index] = 0x02;
+ r->field[0]->value[td->inputmode_index] = td->inputmode_value;
hid_hw_request(hdev, r, HID_REQ_SET_REPORT);
}
}
@@ -907,13 +882,49 @@ static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
struct mt_device *td = hid_get_drvdata(hdev);
char *name;
const char *suffix = NULL;
+ struct hid_field *field = hi->report->field[0];
if (hi->report->id == td->mt_report_id)
mt_touch_input_configured(hdev, hi);
+ /*
+ * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
+ * for the stylus. Check this first, and then rely on the application
+ * field.
+ */
if (hi->report->field[0]->physical == HID_DG_STYLUS) {
suffix = "Pen";
- mt_pen_input_configured(hdev, hi);
+ /* force BTN_STYLUS to allow tablet matching in udev */
+ __set_bit(BTN_STYLUS, hi->input->keybit);
+ } else {
+ switch (field->application) {
+ case HID_GD_KEYBOARD:
+ suffix = "Keyboard";
+ break;
+ case HID_GD_KEYPAD:
+ suffix = "Keypad";
+ break;
+ case HID_GD_MOUSE:
+ suffix = "Mouse";
+ break;
+ case HID_DG_STYLUS:
+ suffix = "Pen";
+ /* force BTN_STYLUS to allow tablet matching in udev */
+ __set_bit(BTN_STYLUS, hi->input->keybit);
+ break;
+ case HID_DG_TOUCHSCREEN:
+ /* we do not set suffix = "Touchscreen" */
+ break;
+ case HID_GD_SYSTEM_CONTROL:
+ suffix = "System Control";
+ break;
+ case HID_CP_CONSUMER_CONTROL:
+ suffix = "Consumer Control";
+ break;
+ default:
+ suffix = "UNKNOWN";
+ break;
+ }
}
if (suffix) {
@@ -973,9 +984,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
td->mtclass = *mtclass;
td->inputmode = -1;
td->maxcontact_report_id = -1;
+ td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN;
td->cc_index = -1;
td->mt_report_id = -1;
- td->pen_report_id = -1;
hid_set_drvdata(hdev, td);
td->fields = devm_kzalloc(&hdev->dev, sizeof(struct mt_fields),
@@ -1034,6 +1045,12 @@ static void mt_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
+/*
+ * This list contains only:
+ * - VID/PID of products not working with the default multitouch handling
+ * - 2 generic rules.
+ * So there is no point in adding here any device with MT_CLS_DEFAULT.
+ */
static const struct hid_device_id mt_devices[] = {
/* 3M panels */
@@ -1047,33 +1064,25 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_3M,
USB_DEVICE_ID_3M3266) },
- /* ActionStar panels */
- { .driver_data = MT_CLS_NSMU,
- MT_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
- USB_DEVICE_ID_ACTIONSTAR_1011) },
+ /* Anton devices */
+ { .driver_data = MT_CLS_EXPORT_ALL_INPUTS,
+ MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
+ USB_DEVICE_ID_ANTON_TOUCH_PAD) },
/* Atmel panels */
{ .driver_data = MT_CLS_SERIAL,
MT_USB_DEVICE(USB_VENDOR_ID_ATMEL,
- USB_DEVICE_ID_ATMEL_MULTITOUCH) },
- { .driver_data = MT_CLS_SERIAL,
- MT_USB_DEVICE(USB_VENDOR_ID_ATMEL,
USB_DEVICE_ID_ATMEL_MXT_DIGITIZER) },
/* Baanto multitouch devices */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_BAANTO,
USB_DEVICE_ID_BAANTO_MT_190W2) },
+
/* Cando panels */
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
- { .driver_data = MT_CLS_DUAL_CONTACT_NUMBER,
- MT_USB_DEVICE(USB_VENDOR_ID_CANDO,
- USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
- { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
- MT_USB_DEVICE(USB_VENDOR_ID_CANDO,
- USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
@@ -1088,16 +1097,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
USB_DEVICE_ID_CVTOUCH_SCREEN) },
- /* Cypress panel */
- { .driver_data = MT_CLS_CYPRESS,
- HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS,
- USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
-
- /* Data Modul easyMaxTouch */
- { .driver_data = MT_CLS_DEFAULT,
- MT_USB_DEVICE(USB_VENDOR_ID_DATA_MODUL,
- USB_VENDOR_ID_DATA_MODUL_EASYMAXTOUCH) },
-
/* eGalax devices (resistive) */
{ .driver_data = MT_CLS_EGALAX,
MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
@@ -1156,11 +1155,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
- /* Elo TouchSystems IntelliTouch Plus panel */
- { .driver_data = MT_CLS_DUAL_CONTACT_ID,
- MT_USB_DEVICE(USB_VENDOR_ID_ELO,
- USB_DEVICE_ID_ELO_TS2515) },
-
/* Flatfrog Panels */
{ .driver_data = MT_CLS_FLATFROG,
MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
@@ -1209,37 +1203,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
- /* Ideacom panel */
- { .driver_data = MT_CLS_SERIAL,
- MT_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
- USB_DEVICE_ID_IDEACOM_IDC6650) },
- { .driver_data = MT_CLS_SERIAL,
- MT_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
- USB_DEVICE_ID_IDEACOM_IDC6651) },
-
/* Ilitek dual touch panel */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
USB_DEVICE_ID_ILITEK_MULTITOUCH) },
- /* IRTOUCH panels */
- { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
- MT_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS,
- USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
-
- /* LG Display panels */
- { .driver_data = MT_CLS_DEFAULT,
- MT_USB_DEVICE(USB_VENDOR_ID_LG,
- USB_DEVICE_ID_LG_MULTITOUCH) },
-
- /* Lumio panels */
- { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
- MT_USB_DEVICE(USB_VENDOR_ID_LUMIO,
- USB_DEVICE_ID_CRYSTALTOUCH) },
- { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
- MT_USB_DEVICE(USB_VENDOR_ID_LUMIO,
- USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
-
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
@@ -1251,11 +1219,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_TURBOX,
USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
- /* Nexio panels */
- { .driver_data = MT_CLS_DEFAULT,
- MT_USB_DEVICE(USB_VENDOR_ID_NEXIO,
- USB_DEVICE_ID_NEXIO_MULTITOUCH_420)},
-
/* Panasonic panels */
{ .driver_data = MT_CLS_PANASONIC,
MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
@@ -1269,11 +1232,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
USB_DEVICE_ID_NOVATEK_PCT) },
- /* PenMount panels */
- { .driver_data = MT_CLS_CONFIDENCE,
- MT_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
- USB_DEVICE_ID_PENMOUNT_PCI) },
-
/* PixArt optical touch screen */
{ .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
@@ -1287,44 +1245,18 @@ static const struct hid_device_id mt_devices[] = {
/* PixCir-based panels */
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
- MT_USB_DEVICE(USB_VENDOR_ID_HANVON,
- USB_DEVICE_ID_HANVON_MULTITOUCH) },
- { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
MT_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
/* Quanta-based panels */
{ .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
MT_USB_DEVICE(USB_VENDOR_ID_QUANTA,
- USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
- { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
- MT_USB_DEVICE(USB_VENDOR_ID_QUANTA,
USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001) },
- { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
- MT_USB_DEVICE(USB_VENDOR_ID_QUANTA,
- USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008) },
-
- /* SiS panels */
- { .driver_data = MT_CLS_DEFAULT,
- HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH,
- USB_DEVICE_ID_SIS9200_TOUCH) },
- { .driver_data = MT_CLS_DEFAULT,
- HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH,
- USB_DEVICE_ID_SIS817_TOUCH) },
- { .driver_data = MT_CLS_DEFAULT,
- HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH,
- USB_DEVICE_ID_SIS1030_TOUCH) },
/* Stantum panels */
{ .driver_data = MT_CLS_CONFIDENCE,
- MT_USB_DEVICE(USB_VENDOR_ID_STANTUM,
- USB_DEVICE_ID_MTP)},
- { .driver_data = MT_CLS_CONFIDENCE,
MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
USB_DEVICE_ID_MTP_STM)},
- { .driver_data = MT_CLS_DEFAULT,
- MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX,
- USB_DEVICE_ID_MTP_SITRONIX)},
/* TopSeed panels */
{ .driver_data = MT_CLS_TOPSEED,
@@ -1383,11 +1315,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR2) },
- /* Zytronic panels */
- { .driver_data = MT_CLS_SERIAL,
- MT_USB_DEVICE(USB_VENDOR_ID_ZYTRONIC,
- USB_DEVICE_ID_ZYTRONIC_ZXY100) },
-
/* Generic MT device */
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
index 59d5eb1e742c..cf1a9f1c1217 100644
--- a/drivers/hid/hid-picolcd_cir.c
+++ b/drivers/hid/hid-picolcd_cir.c
@@ -114,7 +114,7 @@ int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
rdev->priv = data;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rdev, RC_BIT_ALL);
rdev->open = picolcd_cir_open;
rdev->close = picolcd_cir_close;
rdev->input_name = data->hdev->name;
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 7ed828056414..91fab975063c 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -624,7 +624,8 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
/* Setup sound card */
- err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
+ err = snd_card_new(&pm->pk->hdev->dev, index[dev], id[dev],
+ THIS_MODULE, 0, &card);
if (err < 0) {
pk_error("failed to create pc-midi sound card\n");
err = -ENOMEM;
@@ -660,8 +661,6 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
snd_rawmidi_set_ops(rwmidi, SNDRV_RAWMIDI_STREAM_INPUT,
&pcmidi_in_ops);
- snd_card_set_dev(card, &pm->pk->hdev->dev);
-
/* create sysfs variables */
err = device_create_file(&pm->pk->hdev->dev,
sysfs_device_attr_channel);
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 9c22e14c57f0..5182031f7b52 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -56,9 +56,9 @@ struct sensor_hub_pending {
* @dyn_callback_lock: spin lock to protect callback list
* @hid_sensor_hub_client_devs: Stores all MFD cells for a hub instance.
* @hid_sensor_client_cnt: Number of MFD cells, (no of sensors attached).
+ * @ref_cnt: Number of MFD clients have opened this device
*/
struct sensor_hub_data {
- struct hid_sensor_hub_device *hsdev;
struct mutex mutex;
spinlock_t lock;
struct sensor_hub_pending pending;
@@ -67,6 +67,7 @@ struct sensor_hub_data {
struct mfd_cell *hid_sensor_hub_client_devs;
int hid_sensor_client_cnt;
unsigned long quirks;
+ int ref_cnt;
};
/**
@@ -79,6 +80,7 @@ struct sensor_hub_data {
struct hid_sensor_hub_callbacks_list {
struct list_head list;
u32 usage_id;
+ struct hid_sensor_hub_device *hsdev;
struct hid_sensor_hub_callbacks *usage_callback;
void *priv;
};
@@ -97,20 +99,18 @@ static struct hid_report *sensor_hub_report(int id, struct hid_device *hdev,
return NULL;
}
-static int sensor_hub_get_physical_device_count(
- struct hid_report_enum *report_enum)
+static int sensor_hub_get_physical_device_count(struct hid_device *hdev)
{
- struct hid_report *report;
- struct hid_field *field;
- int cnt = 0;
+ int i;
+ int count = 0;
- list_for_each_entry(report, &report_enum->report_list, list) {
- field = report->field[0];
- if (report->maxfield && field && field->physical)
- cnt++;
+ for (i = 0; i < hdev->maxcollection; ++i) {
+ struct hid_collection *collection = &hdev->collection[i];
+ if (collection->type == HID_COLLECTION_PHYSICAL)
+ ++count;
}
- return cnt;
+ return count;
}
static void sensor_hub_fill_attr_info(
@@ -128,15 +128,23 @@ static void sensor_hub_fill_attr_info(
static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
struct hid_device *hdev,
- u32 usage_id, void **priv)
+ u32 usage_id,
+ int collection_index,
+ struct hid_sensor_hub_device **hsdev,
+ void **priv)
{
struct hid_sensor_hub_callbacks_list *callback;
struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
spin_lock(&pdata->dyn_callback_lock);
list_for_each_entry(callback, &pdata->dyn_callback_list, list)
- if (callback->usage_id == usage_id) {
+ if (callback->usage_id == usage_id &&
+ (collection_index >=
+ callback->hsdev->start_collection_index) &&
+ (collection_index <
+ callback->hsdev->end_collection_index)) {
*priv = callback->priv;
+ *hsdev = callback->hsdev;
spin_unlock(&pdata->dyn_callback_lock);
return callback->usage_callback;
}
@@ -154,7 +162,8 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
spin_lock(&pdata->dyn_callback_lock);
list_for_each_entry(callback, &pdata->dyn_callback_list, list)
- if (callback->usage_id == usage_id) {
+ if (callback->usage_id == usage_id &&
+ callback->hsdev == hsdev) {
spin_unlock(&pdata->dyn_callback_lock);
return -EINVAL;
}
@@ -163,6 +172,7 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
spin_unlock(&pdata->dyn_callback_lock);
return -ENOMEM;
}
+ callback->hsdev = hsdev;
callback->usage_callback = usage_callback;
callback->usage_id = usage_id;
callback->priv = NULL;
@@ -181,7 +191,8 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
spin_lock(&pdata->dyn_callback_lock);
list_for_each_entry(callback, &pdata->dyn_callback_list, list)
- if (callback->usage_id == usage_id) {
+ if (callback->usage_id == usage_id &&
+ callback->hsdev == hsdev) {
list_del(&callback->list);
kfree(callback);
break;
@@ -260,13 +271,12 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
spin_lock_irqsave(&data->lock, flags);
data->pending.status = true;
+ spin_unlock_irqrestore(&data->lock, flags);
report = sensor_hub_report(report_id, hsdev->hdev, HID_INPUT_REPORT);
- if (!report) {
- spin_unlock_irqrestore(&data->lock, flags);
+ if (!report)
goto err_free;
- }
+
hid_hw_request(hsdev->hdev, report, HID_REQ_GET_REPORT);
- spin_unlock_irqrestore(&data->lock, flags);
wait_for_completion_interruptible_timeout(&data->pending.ready, HZ*5);
switch (data->pending.raw_size) {
case 1:
@@ -291,6 +301,28 @@ err_free:
}
EXPORT_SYMBOL_GPL(sensor_hub_input_attr_get_raw_value);
+int hid_sensor_get_usage_index(struct hid_sensor_hub_device *hsdev,
+ u32 report_id, int field_index, u32 usage_id)
+{
+ struct hid_report *report;
+ struct hid_field *field;
+ int i;
+
+ report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
+ if (!report || (field_index >= report->maxfield))
+ goto done_proc;
+
+ field = report->field[field_index];
+ for (i = 0; i < field->maxusage; ++i) {
+ if (field->usage[i].hid == usage_id)
+ return field->usage[i].usage_index;
+ }
+
+done_proc:
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(hid_sensor_get_usage_index);
+
int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
u8 type,
u32 usage_id,
@@ -298,8 +330,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
struct hid_sensor_hub_attribute_info *info)
{
int ret = -1;
- int i, j;
- int collection_index = -1;
+ int i;
struct hid_report *report;
struct hid_field *field;
struct hid_report_enum *report_enum;
@@ -313,44 +344,31 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
info->units = -1;
info->unit_expo = -1;
- for (i = 0; i < hdev->maxcollection; ++i) {
- struct hid_collection *collection = &hdev->collection[i];
- if (usage_id == collection->usage) {
- collection_index = i;
- break;
- }
- }
- if (collection_index == -1)
- goto err_ret;
-
report_enum = &hdev->report_enum[type];
list_for_each_entry(report, &report_enum->report_list, list) {
for (i = 0; i < report->maxfield; ++i) {
field = report->field[i];
- if (field->physical == usage_id &&
- field->logical == attr_usage_id) {
- sensor_hub_fill_attr_info(info, i, report->id,
- field);
- ret = 0;
- } else {
- for (j = 0; j < field->maxusage; ++j) {
- if (field->usage[j].hid ==
- attr_usage_id &&
- field->usage[j].collection_index ==
- collection_index) {
- sensor_hub_fill_attr_info(info,
- i, report->id, field);
- ret = 0;
- break;
- }
+ if (field->maxusage) {
+ if (field->physical == usage_id &&
+ (field->logical == attr_usage_id ||
+ field->usage[0].hid ==
+ attr_usage_id) &&
+ (field->usage[0].collection_index >=
+ hsdev->start_collection_index) &&
+ (field->usage[0].collection_index <
+ hsdev->end_collection_index)) {
+
+ sensor_hub_fill_attr_info(info, i,
+ report->id,
+ field);
+ ret = 0;
+ break;
}
}
- if (ret == 0)
- break;
}
+
}
-err_ret:
return ret;
}
EXPORT_SYMBOL_GPL(sensor_hub_input_get_attribute_info);
@@ -366,7 +384,7 @@ static int sensor_hub_suspend(struct hid_device *hdev, pm_message_t message)
list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
if (callback->usage_callback->suspend)
callback->usage_callback->suspend(
- pdata->hsdev, callback->priv);
+ callback->hsdev, callback->priv);
}
spin_unlock(&pdata->dyn_callback_lock);
@@ -383,7 +401,7 @@ static int sensor_hub_resume(struct hid_device *hdev)
list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
if (callback->usage_callback->resume)
callback->usage_callback->resume(
- pdata->hsdev, callback->priv);
+ callback->hsdev, callback->priv);
}
spin_unlock(&pdata->dyn_callback_lock);
@@ -410,6 +428,7 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
struct hid_sensor_hub_callbacks *callback = NULL;
struct hid_collection *collection = NULL;
void *priv = NULL;
+ struct hid_sensor_hub_device *hsdev = NULL;
hid_dbg(hdev, "sensor_hub_raw_event report id:0x%x size:%d type:%d\n",
report->id, size, report->type);
@@ -444,23 +463,26 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
report->field[i]->usage->collection_index];
hid_dbg(hdev, "collection->usage %x\n",
collection->usage);
- callback = sensor_hub_get_callback(pdata->hsdev->hdev,
- report->field[i]->physical,
- &priv);
+
+ callback = sensor_hub_get_callback(hdev,
+ report->field[i]->physical,
+ report->field[i]->usage[0].collection_index,
+ &hsdev, &priv);
+
if (callback && callback->capture_sample) {
if (report->field[i]->logical)
- callback->capture_sample(pdata->hsdev,
+ callback->capture_sample(hsdev,
report->field[i]->logical, sz, ptr,
callback->pdev);
else
- callback->capture_sample(pdata->hsdev,
+ callback->capture_sample(hsdev,
report->field[i]->usage->hid, sz, ptr,
callback->pdev);
}
ptr += sz;
}
if (callback && collection && callback->send_event)
- callback->send_event(pdata->hsdev, collection->usage,
+ callback->send_event(hsdev, collection->usage,
callback->pdev);
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -473,7 +495,7 @@ int sensor_hub_device_open(struct hid_sensor_hub_device *hsdev)
struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
mutex_lock(&data->mutex);
- if (!hsdev->ref_cnt) {
+ if (!data->ref_cnt) {
ret = hid_hw_open(hsdev->hdev);
if (ret) {
hid_err(hsdev->hdev, "failed to open hid device\n");
@@ -481,7 +503,7 @@ int sensor_hub_device_open(struct hid_sensor_hub_device *hsdev)
return ret;
}
}
- hsdev->ref_cnt++;
+ data->ref_cnt++;
mutex_unlock(&data->mutex);
return ret;
@@ -493,8 +515,8 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
mutex_lock(&data->mutex);
- hsdev->ref_cnt--;
- if (!hsdev->ref_cnt)
+ data->ref_cnt--;
+ if (!data->ref_cnt)
hid_hw_close(hsdev->hdev);
mutex_unlock(&data->mutex);
}
@@ -541,26 +563,19 @@ static int sensor_hub_probe(struct hid_device *hdev,
struct sensor_hub_data *sd;
int i;
char *name;
- struct hid_report *report;
- struct hid_report_enum *report_enum;
- struct hid_field *field;
int dev_cnt;
+ struct hid_sensor_hub_device *hsdev;
+ struct hid_sensor_hub_device *last_hsdev = NULL;
sd = devm_kzalloc(&hdev->dev, sizeof(*sd), GFP_KERNEL);
if (!sd) {
hid_err(hdev, "cannot allocate Sensor data\n");
return -ENOMEM;
}
- sd->hsdev = devm_kzalloc(&hdev->dev, sizeof(*sd->hsdev), GFP_KERNEL);
- if (!sd->hsdev) {
- hid_err(hdev, "cannot allocate hid_sensor_hub_device\n");
- return -ENOMEM;
- }
+
hid_set_drvdata(hdev, sd);
sd->quirks = id->driver_data;
- sd->hsdev->hdev = hdev;
- sd->hsdev->vendor_id = hdev->vendor;
- sd->hsdev->product_id = hdev->product;
+
spin_lock_init(&sd->lock);
spin_lock_init(&sd->dyn_callback_lock);
mutex_init(&sd->mutex);
@@ -578,9 +593,8 @@ static int sensor_hub_probe(struct hid_device *hdev,
}
INIT_LIST_HEAD(&sd->dyn_callback_list);
sd->hid_sensor_client_cnt = 0;
- report_enum = &hdev->report_enum[HID_INPUT_REPORT];
- dev_cnt = sensor_hub_get_physical_device_count(report_enum);
+ dev_cnt = sensor_hub_get_physical_device_count(hdev);
if (dev_cnt > HID_MAX_PHY_DEVICES) {
hid_err(hdev, "Invalid Physical device count\n");
ret = -EINVAL;
@@ -594,42 +608,63 @@ static int sensor_hub_probe(struct hid_device *hdev,
ret = -ENOMEM;
goto err_stop_hw;
}
- list_for_each_entry(report, &report_enum->report_list, list) {
- hid_dbg(hdev, "Report id:%x\n", report->id);
- field = report->field[0];
- if (report->maxfield && field &&
- field->physical) {
+
+ for (i = 0; i < hdev->maxcollection; ++i) {
+ struct hid_collection *collection = &hdev->collection[i];
+
+ if (collection->type == HID_COLLECTION_PHYSICAL) {
+
+ hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
+ if (!hsdev) {
+ hid_err(hdev, "cannot allocate hid_sensor_hub_device\n");
+ ret = -ENOMEM;
+ goto err_no_mem;
+ }
+ hsdev->hdev = hdev;
+ hsdev->vendor_id = hdev->vendor;
+ hsdev->product_id = hdev->product;
+ hsdev->start_collection_index = i;
+ if (last_hsdev)
+ last_hsdev->end_collection_index = i;
+ last_hsdev = hsdev;
name = kasprintf(GFP_KERNEL, "HID-SENSOR-%x",
- field->physical);
+ collection->usage);
if (name == NULL) {
hid_err(hdev, "Failed MFD device name\n");
ret = -ENOMEM;
- goto err_free_names;
+ goto err_no_mem;
}
sd->hid_sensor_hub_client_devs[
- sd->hid_sensor_client_cnt].id = PLATFORM_DEVID_AUTO;
+ sd->hid_sensor_client_cnt].id =
+ PLATFORM_DEVID_AUTO;
sd->hid_sensor_hub_client_devs[
sd->hid_sensor_client_cnt].name = name;
sd->hid_sensor_hub_client_devs[
sd->hid_sensor_client_cnt].platform_data =
- sd->hsdev;
+ hsdev;
sd->hid_sensor_hub_client_devs[
sd->hid_sensor_client_cnt].pdata_size =
- sizeof(*sd->hsdev);
- hid_dbg(hdev, "Adding %s:%p\n", name, sd);
+ sizeof(*hsdev);
+ hid_dbg(hdev, "Adding %s:%d\n", name,
+ hsdev->start_collection_index);
sd->hid_sensor_client_cnt++;
}
}
+ if (last_hsdev)
+ last_hsdev->end_collection_index = i;
+
ret = mfd_add_devices(&hdev->dev, 0, sd->hid_sensor_hub_client_devs,
sd->hid_sensor_client_cnt, NULL, 0, NULL);
if (ret < 0)
- goto err_free_names;
+ goto err_no_mem;
return ret;
-err_free_names:
- for (i = 0; i < sd->hid_sensor_client_cnt ; ++i)
+err_no_mem:
+ for (i = 0; i < sd->hid_sensor_client_cnt; ++i) {
kfree(sd->hid_sensor_hub_client_devs[i].name);
+ kfree(sd->hid_sensor_hub_client_devs[i].platform_data);
+ }
kfree(sd->hid_sensor_hub_client_devs);
err_stop_hw:
hid_hw_stop(hdev);
@@ -651,8 +686,10 @@ static void sensor_hub_remove(struct hid_device *hdev)
complete(&data->pending.ready);
spin_unlock_irqrestore(&data->lock, flags);
mfd_remove_devices(&hdev->dev);
- for (i = 0; i < data->hid_sensor_client_cnt ; ++i)
+ for (i = 0; i < data->hid_sensor_client_cnt; ++i) {
kfree(data->hid_sensor_hub_client_devs[i].name);
+ kfree(data->hid_sensor_hub_client_devs[i].platform_data);
+ }
kfree(data->hid_sensor_hub_client_devs);
hid_set_drvdata(hdev, NULL);
mutex_destroy(&data->mutex);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 2f19b15f47f2..69204afea7a4 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -17,7 +17,8 @@
* any later version.
*/
-/* NOTE: in order for the Sony PS3 BD Remote Control to be found by
+/*
+ * NOTE: in order for the Sony PS3 BD Remote Control to be found by
* a Bluetooth host, the key combination Start+Enter has to be kept pressed
* for about 7 seconds with the Bluetooth Host Controller in discovering mode.
*
@@ -28,8 +29,11 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/usb.h>
#include <linux/leds.h>
+#include <linux/power_supply.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/input/mt.h>
#include "hid-ids.h"
@@ -41,8 +45,13 @@
#define DUALSHOCK4_CONTROLLER_USB BIT(5)
#define DUALSHOCK4_CONTROLLER_BT BIT(6)
-#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB)
-#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER_USB | DUALSHOCK4_CONTROLLER_USB)
+#define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)
+#define DUALSHOCK4_CONTROLLER (DUALSHOCK4_CONTROLLER_USB |\
+ DUALSHOCK4_CONTROLLER_BT)
+#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER | BUZZ_CONTROLLER |\
+ DUALSHOCK4_CONTROLLER)
+#define SONY_BATTERY_SUPPORT (SIXAXIS_CONTROLLER | DUALSHOCK4_CONTROLLER)
+#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER | DUALSHOCK4_CONTROLLER)
#define MAX_LEDS 4
@@ -74,7 +83,8 @@ static const u8 sixaxis_rdesc_fixup2[] = {
0xb1, 0x02, 0xc0, 0xc0,
};
-/* The default descriptor doesn't provide mapping for the accelerometers
+/*
+ * The default descriptor doesn't provide mapping for the accelerometers
* or orientation sensors. This fixed descriptor maps the accelerometers
* to usage values 0x40, 0x41 and 0x42 and maps the orientation sensors
* to usage values 0x43, 0x44 and 0x45.
@@ -333,6 +343,217 @@ static u8 dualshock4_usb_rdesc[] = {
0xC0 /* End Collection */
};
+/*
+ * The default behavior of the Dualshock 4 is to send reports using report
+ * type 1 when running over Bluetooth. However, as soon as it receives a
+ * report of type 17 to set the LEDs or rumble it starts returning it's state
+ * in report 17 instead of 1. Since report 17 is undefined in the default HID
+ * descriptor the button and axis definitions must be moved to report 17 or
+ * the HID layer won't process the received input once a report is sent.
+ */
+static u8 dualshock4_bt_rdesc[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x05, /* Usage (Gamepad), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x01, /* Report ID (1), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x0A, /* Report Count (9), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x06, 0x04, 0xFF, /* Usage Page (FF04h), */
+ 0x85, 0x02, /* Report ID (2), */
+ 0x09, 0x24, /* Usage (24h), */
+ 0x95, 0x24, /* Report Count (36), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA3, /* Report ID (163), */
+ 0x09, 0x25, /* Usage (25h), */
+ 0x95, 0x30, /* Report Count (48), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x05, /* Report ID (5), */
+ 0x09, 0x26, /* Usage (26h), */
+ 0x95, 0x28, /* Report Count (40), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x06, /* Report ID (6), */
+ 0x09, 0x27, /* Usage (27h), */
+ 0x95, 0x34, /* Report Count (52), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x07, /* Report ID (7), */
+ 0x09, 0x28, /* Usage (28h), */
+ 0x95, 0x30, /* Report Count (48), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x08, /* Report ID (8), */
+ 0x09, 0x29, /* Usage (29h), */
+ 0x95, 0x2F, /* Report Count (47), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x06, 0x03, 0xFF, /* Usage Page (FF03h), */
+ 0x85, 0x03, /* Report ID (3), */
+ 0x09, 0x21, /* Usage (21h), */
+ 0x95, 0x26, /* Report Count (38), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x04, /* Report ID (4), */
+ 0x09, 0x22, /* Usage (22h), */
+ 0x95, 0x2E, /* Report Count (46), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xF0, /* Report ID (240), */
+ 0x09, 0x47, /* Usage (47h), */
+ 0x95, 0x3F, /* Report Count (63), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xF1, /* Report ID (241), */
+ 0x09, 0x48, /* Usage (48h), */
+ 0x95, 0x3F, /* Report Count (63), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xF2, /* Report ID (242), */
+ 0x09, 0x49, /* Usage (49h), */
+ 0x95, 0x0F, /* Report Count (15), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x11, /* Report ID (17), */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0x20, /* Usage (20h), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x09, 0x32, /* Usage (Z), */
+ 0x09, 0x35, /* Usage (Rz), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x04, /* Report Count (4), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x39, /* Usage (Hat Switch), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x07, /* Logical Maximum (7), */
+ 0x75, 0x04, /* Report Size (4), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x42, /* Input (Variable, Null State), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x0E, /* Usage Maximum (0Eh), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x0E, /* Report Count (14), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x06, /* Report Size (6), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x33, /* Usage (Rx), */
+ 0x09, 0x34, /* Usage (Ry), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0x20, /* Usage (20h), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x19, 0x40, /* Usage Minimum (40h), */
+ 0x29, 0x42, /* Usage Maximum (42h), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x19, 0x43, /* Usage Minimum (43h), */
+ 0x29, 0x45, /* Usage Maximum (45h), */
+ 0x16, 0xFF, 0xBF, /* Logical Minimum (-16385), */
+ 0x26, 0x00, 0x40, /* Logical Maximum (16384), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0x20, /* Usage (20h), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x31, /* Report Count (51), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x21, /* Usage (21h), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x4D, /* Report Count (77), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x85, 0x12, /* Report ID (18), */
+ 0x09, 0x22, /* Usage (22h), */
+ 0x95, 0x8D, /* Report Count (141), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x23, /* Usage (23h), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x85, 0x13, /* Report ID (19), */
+ 0x09, 0x24, /* Usage (24h), */
+ 0x95, 0xCD, /* Report Count (205), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x25, /* Usage (25h), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x85, 0x14, /* Report ID (20), */
+ 0x09, 0x26, /* Usage (26h), */
+ 0x96, 0x0D, 0x01, /* Report Count (269), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x27, /* Usage (27h), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x85, 0x15, /* Report ID (21), */
+ 0x09, 0x28, /* Usage (28h), */
+ 0x96, 0x4D, 0x01, /* Report Count (333), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x29, /* Usage (29h), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x85, 0x16, /* Report ID (22), */
+ 0x09, 0x2A, /* Usage (2Ah), */
+ 0x96, 0x8D, 0x01, /* Report Count (397), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x2B, /* Usage (2Bh), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x85, 0x17, /* Report ID (23), */
+ 0x09, 0x2C, /* Usage (2Ch), */
+ 0x96, 0xCD, 0x01, /* Report Count (461), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x2D, /* Usage (2Dh), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x85, 0x18, /* Report ID (24), */
+ 0x09, 0x2E, /* Usage (2Eh), */
+ 0x96, 0x0D, 0x02, /* Report Count (525), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x2F, /* Usage (2Fh), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x85, 0x19, /* Report ID (25), */
+ 0x09, 0x30, /* Usage (30h), */
+ 0x96, 0x22, 0x02, /* Report Count (546), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x31, /* Usage (31h), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x06, 0x80, 0xFF, /* Usage Page (FF80h), */
+ 0x85, 0x82, /* Report ID (130), */
+ 0x09, 0x22, /* Usage (22h), */
+ 0x95, 0x3F, /* Report Count (63), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x83, /* Report ID (131), */
+ 0x09, 0x23, /* Usage (23h), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x84, /* Report ID (132), */
+ 0x09, 0x24, /* Usage (24h), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x90, /* Report ID (144), */
+ 0x09, 0x30, /* Usage (30h), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x91, /* Report ID (145), */
+ 0x09, 0x31, /* Usage (31h), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x92, /* Report ID (146), */
+ 0x09, 0x32, /* Usage (32h), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x93, /* Report ID (147), */
+ 0x09, 0x33, /* Usage (33h), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA0, /* Report ID (160), */
+ 0x09, 0x40, /* Usage (40h), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA4, /* Report ID (164), */
+ 0x09, 0x44, /* Usage (44h), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0 /* End Collection */
+};
+
static __u8 ps3remote_rdesc[] = {
0x05, 0x01, /* GUsagePage Generic Desktop */
0x09, 0x05, /* LUsage 0x05 [Game Pad] */
@@ -450,7 +671,8 @@ static const unsigned int ps3remote_keymap_remote_buttons[] = {
};
static const unsigned int buzz_keymap[] = {
- /* The controller has 4 remote buzzers, each with one LED and 5
+ /*
+ * The controller has 4 remote buzzers, each with one LED and 5
* buttons.
*
* We use the mapping chosen by the controller, which is:
@@ -488,19 +710,35 @@ static const unsigned int buzz_keymap[] = {
[20] = BTN_TRIGGER_HAPPY20,
};
+static enum power_supply_property sony_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_STATUS,
+};
+
+static spinlock_t sony_dev_list_lock;
+static LIST_HEAD(sony_device_list);
+
struct sony_sc {
+ spinlock_t lock;
+ struct list_head list_node;
struct hid_device *hdev;
struct led_classdev *leds[MAX_LEDS];
- struct hid_report *output_report;
unsigned long quirks;
struct work_struct state_worker;
+ struct power_supply battery;
#ifdef CONFIG_SONY_FF
__u8 left;
__u8 right;
#endif
+ __u8 mac_address[6];
__u8 worker_initialized;
+ __u8 cable_state;
+ __u8 battery_charging;
+ __u8 battery_capacity;
__u8 led_state[MAX_LEDS];
__u8 led_count;
};
@@ -578,6 +816,10 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev, "Using modified Dualshock 4 report descriptor with gyroscope axes\n");
rdesc = dualshock4_usb_rdesc;
*rsize = sizeof(dualshock4_usb_rdesc);
+ } else if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) && *rsize == 357) {
+ hid_info(hdev, "Using modified Dualshock 4 Bluetooth report descriptor\n");
+ rdesc = dualshock4_bt_rdesc;
+ *rsize = sizeof(dualshock4_bt_rdesc);
}
/* The HID descriptor exposed over BT has a trailing zero byte */
@@ -601,20 +843,127 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
+static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
+{
+ static const __u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 };
+ unsigned long flags;
+ __u8 cable_state, battery_capacity, battery_charging;
+
+ /*
+ * The sixaxis is charging if the battery value is 0xee
+ * and it is fully charged if the value is 0xef.
+ * It does not report the actual level while charging so it
+ * is set to 100% while charging is in progress.
+ */
+ if (rd[30] >= 0xee) {
+ battery_capacity = 100;
+ battery_charging = !(rd[30] & 0x01);
+ } else {
+ __u8 index = rd[30] <= 5 ? rd[30] : 5;
+ battery_capacity = sixaxis_battery_capacity[index];
+ battery_charging = 0;
+ }
+ cable_state = !(rd[31] & 0x04);
+
+ spin_lock_irqsave(&sc->lock, flags);
+ sc->cable_state = cable_state;
+ sc->battery_capacity = battery_capacity;
+ sc->battery_charging = battery_charging;
+ spin_unlock_irqrestore(&sc->lock, flags);
+}
+
+static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
+{
+ struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
+ struct hid_input, list);
+ struct input_dev *input_dev = hidinput->input;
+ unsigned long flags;
+ int n, offset;
+ __u8 cable_state, battery_capacity, battery_charging;
+
+ /*
+ * Battery and touchpad data starts at byte 30 in the USB report and
+ * 32 in Bluetooth report.
+ */
+ offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 30 : 32;
+
+ /*
+ * The lower 4 bits of byte 30 contain the battery level
+ * and the 5th bit contains the USB cable state.
+ */
+ cable_state = (rd[offset] >> 4) & 0x01;
+ battery_capacity = rd[offset] & 0x0F;
+
+ /*
+ * When a USB power source is connected the battery level ranges from
+ * 0 to 10, and when running on battery power it ranges from 0 to 9.
+ * A battery level above 10 when plugged in means charge completed.
+ */
+ if (!cable_state || battery_capacity > 10)
+ battery_charging = 0;
+ else
+ battery_charging = 1;
+
+ if (!cable_state)
+ battery_capacity++;
+ if (battery_capacity > 10)
+ battery_capacity = 10;
+
+ battery_capacity *= 10;
+
+ spin_lock_irqsave(&sc->lock, flags);
+ sc->cable_state = cable_state;
+ sc->battery_capacity = battery_capacity;
+ sc->battery_charging = battery_charging;
+ spin_unlock_irqrestore(&sc->lock, flags);
+
+ offset += 5;
+
+ /*
+ * The Dualshock 4 multi-touch trackpad data starts at offset 35 on USB
+ * and 37 on Bluetooth.
+ * The first 7 bits of the first byte is a counter and bit 8 is a touch
+ * indicator that is 0 when pressed and 1 when not pressed.
+ * The next 3 bytes are two 12 bit touch coordinates, X and Y.
+ * The data for the second touch is in the same format and immediatly
+ * follows the data for the first.
+ */
+ for (n = 0; n < 2; n++) {
+ __u16 x, y;
+
+ x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
+ y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
+
+ input_mt_slot(input_dev, n);
+ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER,
+ !(rd[offset] >> 7));
+ input_report_abs(input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+
+ offset += 4;
+ }
+}
+
static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
__u8 *rd, int size)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
- /* Sixaxis HID report has acclerometers/gyro with MSByte first, this
+ /*
+ * Sixaxis HID report has acclerometers/gyro with MSByte first, this
* has to be BYTE_SWAPPED before passing up to joystick interface
*/
- if ((sc->quirks & (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)) &&
- rd[0] == 0x01 && size == 49) {
+ if ((sc->quirks & SIXAXIS_CONTROLLER) && rd[0] == 0x01 && size == 49) {
swap(rd[41], rd[42]);
swap(rd[43], rd[44]);
swap(rd[45], rd[46]);
swap(rd[47], rd[48]);
+
+ sixaxis_parse_report(sc, rd, size);
+ } else if (((sc->quirks & DUALSHOCK4_CONTROLLER_USB) && rd[0] == 0x01 &&
+ size == 64) || ((sc->quirks & DUALSHOCK4_CONTROLLER_BT)
+ && rd[0] == 0x11 && size == 78)) {
+ dualshock4_parse_report(sc, rd, size);
}
return 0;
@@ -657,45 +1006,6 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
}
/*
- * The Sony Sixaxis does not handle HID Output Reports on the Interrupt EP
- * like it should according to usbhid/hid-core.c::usbhid_output_raw_report()
- * so we need to override that forcing HID Output Reports on the Control EP.
- *
- * There is also another issue about HID Output Reports via USB, the Sixaxis
- * does not want the report_id as part of the data packet, so we have to
- * discard buf[0] when sending the actual control message, even for numbered
- * reports, humpf!
- */
-static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
- size_t count, unsigned char report_type)
-{
- struct usb_interface *intf = to_usb_interface(hid->dev.parent);
- struct usb_device *dev = interface_to_usbdev(intf);
- struct usb_host_interface *interface = intf->cur_altsetting;
- int report_id = buf[0];
- int ret;
-
- if (report_type == HID_OUTPUT_REPORT) {
- /* Don't send the Report ID */
- buf++;
- count--;
- }
-
- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- HID_REQ_SET_REPORT,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- ((report_type + 1) << 8) | report_id,
- interface->desc.bInterfaceNumber, buf, count,
- USB_CTRL_SET_TIMEOUT);
-
- /* Count also the Report ID, in case of an Output report. */
- if (ret > 0 && report_type == HID_OUTPUT_REPORT)
- ret++;
-
- return ret;
-}
-
-/*
* Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller
* to "operational". Without this, the ps3 controller will not report any
* events.
@@ -708,7 +1018,8 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
if (!buf)
return -ENOMEM;
- ret = hdev->hid_get_raw_report(hdev, 0xf2, buf, 17, HID_FEATURE_REPORT);
+ ret = hid_hw_raw_request(hdev, 0xf2, buf, 17, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
if (ret < 0)
hid_err(hdev, "can't set operational mode\n");
@@ -721,7 +1032,20 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
static int sixaxis_set_operational_bt(struct hid_device *hdev)
{
unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
- return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
+ return hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+}
+
+/*
+ * Requesting feature report 0x02 in Bluetooth mode changes the state of the
+ * controller so that it sends full input reports of type 0x11.
+ */
+static int dualshock4_set_operational_bt(struct hid_device *hdev)
+{
+ __u8 buf[37] = { 0 };
+
+ return hid_hw_raw_request(hdev, 0x02, buf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
}
static void buzz_set_leds(struct hid_device *hdev, const __u8 *leds)
@@ -751,8 +1075,7 @@ static void sony_set_leds(struct hid_device *hdev, const __u8 *leds, int count)
if (drv_data->quirks & BUZZ_CONTROLLER && count == 4) {
buzz_set_leds(hdev, leds);
- } else if ((drv_data->quirks & SIXAXIS_CONTROLLER_USB) ||
- (drv_data->quirks & DUALSHOCK4_CONTROLLER_USB)) {
+ } else {
for (n = 0; n < count; n++)
drv_data->led_state[n] = leds[n];
schedule_work(&drv_data->state_worker);
@@ -792,7 +1115,6 @@ static enum led_brightness sony_led_get_brightness(struct led_classdev *led)
struct sony_sc *drv_data;
int n;
- int on = 0;
drv_data = hid_get_drvdata(hdev);
if (!drv_data) {
@@ -801,13 +1123,11 @@ static enum led_brightness sony_led_get_brightness(struct led_classdev *led)
}
for (n = 0; n < drv_data->led_count; n++) {
- if (led == drv_data->leds[n]) {
- on = !!(drv_data->led_state[n]);
- break;
- }
+ if (led == drv_data->leds[n])
+ return drv_data->led_state[n];
}
- return on ? LED_FULL : LED_OFF;
+ return LED_OFF;
}
static void sony_leds_remove(struct hid_device *hdev)
@@ -857,7 +1177,7 @@ static int sony_leds_init(struct hid_device *hdev)
/* Validate expected report characteristics. */
if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7))
return -ENODEV;
- } else if (drv_data->quirks & DUALSHOCK4_CONTROLLER_USB) {
+ } else if (drv_data->quirks & DUALSHOCK4_CONTROLLER) {
drv_data->led_count = 3;
max_brightness = 255;
use_colors = 1;
@@ -871,9 +1191,11 @@ static int sony_leds_init(struct hid_device *hdev)
name_fmt = "%s::sony%d";
}
- /* Clear LEDs as we have no way of reading their initial state. This is
+ /*
+ * Clear LEDs as we have no way of reading their initial state. This is
* only relevant if the driver is loaded after somebody actively set the
- * LEDs to on */
+ * LEDs to on
+ */
sony_set_leds(hdev, initial_values, drv_data->led_count);
name_sz = strlen(dev_name(&hdev->dev)) + name_len + 1;
@@ -943,29 +1265,45 @@ static void sixaxis_state_worker(struct work_struct *work)
buf[10] |= sc->led_state[2] << 3;
buf[10] |= sc->led_state[3] << 4;
- sc->hdev->hid_output_raw_report(sc->hdev, buf, sizeof(buf),
- HID_OUTPUT_REPORT);
+ hid_hw_raw_request(sc->hdev, 0x01, buf, sizeof(buf), HID_OUTPUT_REPORT,
+ HID_REQ_SET_REPORT);
}
static void dualshock4_state_worker(struct work_struct *work)
{
struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
struct hid_device *hdev = sc->hdev;
- struct hid_report *report = sc->output_report;
- __s32 *value = report->field[0]->value;
+ int offset;
- value[0] = 0x03;
+ __u8 buf[78] = { 0 };
+
+ if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
+ buf[0] = 0x05;
+ buf[1] = 0x03;
+ offset = 4;
+ } else {
+ buf[0] = 0x11;
+ buf[1] = 0xB0;
+ buf[3] = 0x0F;
+ offset = 6;
+ }
#ifdef CONFIG_SONY_FF
- value[3] = sc->right;
- value[4] = sc->left;
+ buf[offset++] = sc->right;
+ buf[offset++] = sc->left;
+#else
+ offset += 2;
#endif
- value[5] = sc->led_state[0];
- value[6] = sc->led_state[1];
- value[7] = sc->led_state[2];
+ buf[offset++] = sc->led_state[0];
+ buf[offset++] = sc->led_state[1];
+ buf[offset++] = sc->led_state[2];
- hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
+ if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
+ hid_hw_output_report(hdev, buf, 32);
+ else
+ hid_hw_raw_request(hdev, 0x11, buf, 78,
+ HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
}
#ifdef CONFIG_SONY_FF
@@ -1000,35 +1338,247 @@ static int sony_init_ff(struct hid_device *hdev)
{
return 0;
}
+
#endif
-static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size)
+static int sony_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
{
- struct list_head *head, *list;
- struct hid_report *report;
+ struct sony_sc *sc = container_of(psy, struct sony_sc, battery);
+ unsigned long flags;
+ int ret = 0;
+ u8 battery_charging, battery_capacity, cable_state;
+
+ spin_lock_irqsave(&sc->lock, flags);
+ battery_charging = sc->battery_charging;
+ battery_capacity = sc->battery_capacity;
+ cable_state = sc->cable_state;
+ spin_unlock_irqrestore(&sc->lock, flags);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = battery_capacity;
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ if (battery_charging)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ if (battery_capacity == 100 && cable_state)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int sony_battery_probe(struct sony_sc *sc)
+{
+ static atomic_t power_id_seq = ATOMIC_INIT(0);
+ unsigned long power_id;
struct hid_device *hdev = sc->hdev;
+ int ret;
+
+ /*
+ * Set the default battery level to 100% to avoid low battery warnings
+ * if the battery is polled before the first device report is received.
+ */
+ sc->battery_capacity = 100;
+
+ power_id = (unsigned long)atomic_inc_return(&power_id_seq);
+
+ sc->battery.properties = sony_battery_props;
+ sc->battery.num_properties = ARRAY_SIZE(sony_battery_props);
+ sc->battery.get_property = sony_battery_get_property;
+ sc->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ sc->battery.use_for_apm = 0;
+ sc->battery.name = kasprintf(GFP_KERNEL, "sony_controller_battery_%lu",
+ power_id);
+ if (!sc->battery.name)
+ return -ENOMEM;
- list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
+ ret = power_supply_register(&hdev->dev, &sc->battery);
+ if (ret) {
+ hid_err(hdev, "Unable to register battery device\n");
+ goto err_free;
+ }
- list_for_each(head, list) {
- report = list_entry(head, struct hid_report, list);
+ power_supply_powers(&sc->battery, &hdev->dev);
+ return 0;
- if (report->id == req_id) {
- if (report->size < req_size) {
- hid_err(hdev, "Output report 0x%02x (%i bits) is smaller than requested size (%i bits)\n",
- req_id, report->size, req_size);
- return -EINVAL;
- }
- sc->output_report = report;
- return 0;
+err_free:
+ kfree(sc->battery.name);
+ sc->battery.name = NULL;
+ return ret;
+}
+
+static void sony_battery_remove(struct sony_sc *sc)
+{
+ if (!sc->battery.name)
+ return;
+
+ power_supply_unregister(&sc->battery);
+ kfree(sc->battery.name);
+ sc->battery.name = NULL;
+}
+
+static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
+ int w, int h)
+{
+ struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
+ struct hid_input, list);
+ struct input_dev *input_dev = hidinput->input;
+ int ret;
+
+ ret = input_mt_init_slots(input_dev, touch_count, 0);
+ if (ret < 0) {
+ hid_err(sc->hdev, "Unable to initialize multi-touch slots\n");
+ return ret;
+ }
+
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, w, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, h, 0, 0);
+
+ return 0;
+}
+
+/*
+ * If a controller is plugged in via USB while already connected via Bluetooth
+ * it will show up as two devices. A global list of connected controllers and
+ * their MAC addresses is maintained to ensure that a device is only connected
+ * once.
+ */
+static int sony_check_add_dev_list(struct sony_sc *sc)
+{
+ struct sony_sc *entry;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&sony_dev_list_lock, flags);
+
+ list_for_each_entry(entry, &sony_device_list, list_node) {
+ ret = memcmp(sc->mac_address, entry->mac_address,
+ sizeof(sc->mac_address));
+ if (!ret) {
+ ret = -EEXIST;
+ hid_info(sc->hdev, "controller with MAC address %pMR already connected\n",
+ sc->mac_address);
+ goto unlock;
}
}
- hid_err(hdev, "Unable to locate output report 0x%02x\n", req_id);
+ ret = 0;
+ list_add(&(sc->list_node), &sony_device_list);
- return -EINVAL;
+unlock:
+ spin_unlock_irqrestore(&sony_dev_list_lock, flags);
+ return ret;
+}
+
+static void sony_remove_dev_list(struct sony_sc *sc)
+{
+ unsigned long flags;
+
+ if (sc->list_node.next) {
+ spin_lock_irqsave(&sony_dev_list_lock, flags);
+ list_del(&(sc->list_node));
+ spin_unlock_irqrestore(&sony_dev_list_lock, flags);
+ }
}
+static int sony_get_bt_devaddr(struct sony_sc *sc)
+{
+ int ret;
+
+ /* HIDP stores the device MAC address as a string in the uniq field. */
+ ret = strlen(sc->hdev->uniq);
+ if (ret != 17)
+ return -EINVAL;
+
+ ret = sscanf(sc->hdev->uniq,
+ "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ &sc->mac_address[5], &sc->mac_address[4], &sc->mac_address[3],
+ &sc->mac_address[2], &sc->mac_address[1], &sc->mac_address[0]);
+
+ if (ret != 6)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sony_check_add(struct sony_sc *sc)
+{
+ int n, ret;
+
+ if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) ||
+ (sc->quirks & SIXAXIS_CONTROLLER_BT)) {
+ /*
+ * sony_get_bt_devaddr() attempts to parse the Bluetooth MAC
+ * address from the uniq string where HIDP stores it.
+ * As uniq cannot be guaranteed to be a MAC address in all cases
+ * a failure of this function should not prevent the connection.
+ */
+ if (sony_get_bt_devaddr(sc) < 0) {
+ hid_warn(sc->hdev, "UNIQ does not contain a MAC address; duplicate check skipped\n");
+ return 0;
+ }
+ } else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
+ __u8 buf[7];
+
+ /*
+ * The MAC address of a DS4 controller connected via USB can be
+ * retrieved with feature report 0x81. The address begins at
+ * offset 1.
+ */
+ ret = hid_hw_raw_request(sc->hdev, 0x81, buf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+
+ if (ret != 7) {
+ hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n");
+ return ret < 0 ? ret : -EINVAL;
+ }
+
+ memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
+ } else if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
+ __u8 buf[18];
+
+ /*
+ * The MAC address of a Sixaxis controller connected via USB can
+ * be retrieved with feature report 0xf2. The address begins at
+ * offset 4.
+ */
+ ret = hid_hw_raw_request(sc->hdev, 0xf2, buf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+
+ if (ret != 18) {
+ hid_err(sc->hdev, "failed to retrieve feature report 0xf2 with the Sixaxis MAC address\n");
+ return ret < 0 ? ret : -EINVAL;
+ }
+
+ /*
+ * The Sixaxis device MAC in the report is big-endian and must
+ * be byte-swapped.
+ */
+ for (n = 0; n < 6; n++)
+ sc->mac_address[5-n] = buf[4+n];
+ } else {
+ return 0;
+ }
+
+ return sony_check_add_dev_list(sc);
+}
+
+
static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
@@ -1066,17 +1616,48 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
- hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
+ /*
+ * The Sony Sixaxis does not handle HID Output Reports on the
+ * Interrupt EP like it could, so we need to force HID Output
+ * Reports to use HID_REQ_SET_REPORT on the Control EP.
+ *
+ * There is also another issue about HID Output Reports via USB,
+ * the Sixaxis does not want the report_id as part of the data
+ * packet, so we have to discard buf[0] when sending the actual
+ * control message, even for numbered reports, humpf!
+ */
+ hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
+ hdev->quirks |= HID_QUIRK_SKIP_OUTPUT_REPORT_ID;
ret = sixaxis_set_operational_usb(hdev);
-
sc->worker_initialized = 1;
INIT_WORK(&sc->state_worker, sixaxis_state_worker);
- }
- else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
+ } else if (sc->quirks & SIXAXIS_CONTROLLER_BT) {
+ /*
+ * The Sixaxis wants output reports sent on the ctrl endpoint
+ * when connected via Bluetooth.
+ */
+ hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
ret = sixaxis_set_operational_bt(hdev);
- else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- /* Report 5 (31 bytes) is used to send data to the controller via USB */
- ret = sony_set_output_report(sc, 0x05, 248);
+ sc->worker_initialized = 1;
+ INIT_WORK(&sc->state_worker, sixaxis_state_worker);
+ } else if (sc->quirks & DUALSHOCK4_CONTROLLER) {
+ if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
+ /*
+ * The DualShock 4 wants output reports sent on the ctrl
+ * endpoint when connected via Bluetooth.
+ */
+ hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
+ ret = dualshock4_set_operational_bt(hdev);
+ if (ret < 0) {
+ hid_err(hdev, "failed to set the Dualshock 4 operational mode\n");
+ goto err_stop;
+ }
+ }
+ /*
+ * The Dualshock 4 touchpad supports 2 touches and has a
+ * resolution of 1920x940.
+ */
+ ret = sony_register_touchpad(sc, 2, 1920, 940);
if (ret < 0)
goto err_stop;
@@ -1089,22 +1670,46 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret < 0)
goto err_stop;
+ ret = sony_check_add(sc);
+ if (ret < 0)
+ goto err_stop;
+
if (sc->quirks & SONY_LED_SUPPORT) {
ret = sony_leds_init(hdev);
if (ret < 0)
goto err_stop;
}
+ if (sc->quirks & SONY_BATTERY_SUPPORT) {
+ ret = sony_battery_probe(sc);
+ if (ret < 0)
+ goto err_stop;
+
+ /* Open the device to receive reports with battery info */
+ ret = hid_hw_open(hdev);
+ if (ret < 0) {
+ hid_err(hdev, "hw open failed\n");
+ goto err_stop;
+ }
+ }
+
if (sc->quirks & SONY_FF_SUPPORT) {
ret = sony_init_ff(hdev);
if (ret < 0)
- goto err_stop;
+ goto err_close;
}
return 0;
+err_close:
+ hid_hw_close(hdev);
err_stop:
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(hdev);
+ if (sc->quirks & SONY_BATTERY_SUPPORT)
+ sony_battery_remove(sc);
+ if (sc->worker_initialized)
+ cancel_work_sync(&sc->state_worker);
+ sony_remove_dev_list(sc);
hid_hw_stop(hdev);
return ret;
}
@@ -1118,6 +1723,15 @@ static void sony_remove(struct hid_device *hdev)
if (sc->worker_initialized)
cancel_work_sync(&sc->state_worker);
+ if (sc->quirks & SONY_BATTERY_SUPPORT) {
+ hid_hw_close(hdev);
+ sony_battery_remove(sc);
+ }
+
+ if (sc->worker_initialized)
+ cancel_work_sync(&sc->state_worker);
+
+ sony_remove_dev_list(sc);
hid_hw_stop(hdev);
}
diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c
index 99342cfa0ea2..a97c78845f7b 100644
--- a/drivers/hid/hid-thingm.c
+++ b/drivers/hid/hid-thingm.c
@@ -48,8 +48,8 @@ static int blink1_send_command(struct blink1_data *data,
buf[0], buf[1], buf[2], buf[3], buf[4],
buf[5], buf[6], buf[7], buf[8]);
- ret = data->hdev->hid_output_raw_report(data->hdev, buf,
- BLINK1_CMD_SIZE, HID_FEATURE_REPORT);
+ ret = hid_hw_raw_request(data->hdev, buf[0], buf, BLINK1_CMD_SIZE,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
return ret < 0 ? ret : 0;
}
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 60c75dcbbdb8..902013ec041b 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -128,8 +128,8 @@ static void wacom_set_image(struct hid_device *hdev, const char *image,
rep_data[0] = WAC_CMD_ICON_START_STOP;
rep_data[1] = 0;
- ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
- HID_FEATURE_REPORT);
+ ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
if (ret < 0)
goto err;
@@ -143,15 +143,15 @@ static void wacom_set_image(struct hid_device *hdev, const char *image,
rep_data[j + 3] = p[(i << 6) + j];
rep_data[2] = i;
- ret = hdev->hid_output_raw_report(hdev, rep_data, 67,
- HID_FEATURE_REPORT);
+ ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 67,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
}
rep_data[0] = WAC_CMD_ICON_START_STOP;
rep_data[1] = 0;
- ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
- HID_FEATURE_REPORT);
+ ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
err:
return;
@@ -183,7 +183,8 @@ static void wacom_leds_set_brightness(struct led_classdev *led_dev,
buf[3] = value;
/* use fixed brightness for OLEDs */
buf[4] = 0x08;
- hdev->hid_output_raw_report(hdev, buf, 9, HID_FEATURE_REPORT);
+ hid_hw_raw_request(hdev, buf[0], buf, 9, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
kfree(buf);
}
@@ -339,8 +340,8 @@ static void wacom_set_features(struct hid_device *hdev, u8 speed)
rep_data[0] = 0x03 ; rep_data[1] = 0x00;
limit = 3;
do {
- ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
- HID_FEATURE_REPORT);
+ ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
} while (ret < 0 && limit-- > 0);
if (ret >= 0) {
@@ -352,8 +353,9 @@ static void wacom_set_features(struct hid_device *hdev, u8 speed)
rep_data[1] = 0x00;
limit = 3;
do {
- ret = hdev->hid_output_raw_report(hdev,
- rep_data, 2, HID_FEATURE_REPORT);
+ ret = hid_hw_raw_request(hdev, rep_data[0],
+ rep_data, 2, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
} while (ret < 0 && limit-- > 0);
if (ret >= 0) {
@@ -378,8 +380,8 @@ static void wacom_set_features(struct hid_device *hdev, u8 speed)
rep_data[0] = 0x03;
rep_data[1] = wdata->features;
- ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
- HID_FEATURE_REPORT);
+ ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
if (ret >= 0)
wdata->high_speed = speed;
break;
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index abb20db2b443..d00391418d1a 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -28,14 +28,14 @@ static int wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
__u8 *buf;
int ret;
- if (!hdev->hid_output_raw_report)
+ if (!hdev->ll_driver->output_report)
return -ENODEV;
buf = kmemdup(buffer, count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = hdev->hid_output_raw_report(hdev, buf, count, HID_OUTPUT_REPORT);
+ ret = hid_hw_output_report(hdev, buf, count);
kfree(buf);
return ret;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index ab24ce2eb28f..9c2d7c23f296 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -123,10 +123,6 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
dev = hidraw_table[minor]->hid;
- if (!dev->hid_output_raw_report) {
- ret = -ENODEV;
- goto out;
- }
if (count > HID_MAX_BUFFER_SIZE) {
hid_warn(dev, "pid %d passed too large report\n",
@@ -153,7 +149,21 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
goto out_free;
}
- ret = dev->hid_output_raw_report(dev, buf, count, report_type);
+ if ((report_type == HID_OUTPUT_REPORT) &&
+ !(dev->quirks & HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP)) {
+ ret = hid_hw_output_report(dev, buf, count);
+ /*
+ * compatibility with old implementation of USB-HID and I2C-HID:
+ * if the device does not support receiving output reports,
+ * on an interrupt endpoint, fallback to SET_REPORT HID command.
+ */
+ if (ret != -ENOSYS)
+ goto out_free;
+ }
+
+ ret = hid_hw_raw_request(dev, buf[0], buf, count, report_type,
+ HID_REQ_SET_REPORT);
+
out_free:
kfree(buf);
out:
@@ -189,7 +199,7 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
dev = hidraw_table[minor]->hid;
- if (!dev->hid_get_raw_report) {
+ if (!dev->ll_driver->raw_request) {
ret = -ENODEV;
goto out;
}
@@ -216,14 +226,15 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
/*
* Read the first byte from the user. This is the report number,
- * which is passed to dev->hid_get_raw_report().
+ * which is passed to hid_hw_raw_request().
*/
if (copy_from_user(&report_number, buffer, 1)) {
ret = -EFAULT;
goto out_free;
}
- ret = dev->hid_get_raw_report(dev, report_number, buf, count, report_type);
+ ret = hid_hw_raw_request(dev, report_number, buf, count, report_type,
+ HID_REQ_GET_REPORT);
if (ret < 0)
goto out_free;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 42eebd14de1f..b50860db92f1 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -25,6 +25,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/err.h>
@@ -256,18 +257,27 @@ static int i2c_hid_get_report(struct i2c_client *client, u8 reportType,
return 0;
}
-static int i2c_hid_set_report(struct i2c_client *client, u8 reportType,
- u8 reportID, unsigned char *buf, size_t data_len)
+/**
+ * i2c_hid_set_or_send_report: forward an incoming report to the device
+ * @client: the i2c_client of the device
+ * @reportType: 0x03 for HID_FEATURE_REPORT ; 0x02 for HID_OUTPUT_REPORT
+ * @reportID: the report ID
+ * @buf: the actual data to transfer, without the report ID
+ * @len: size of buf
+ * @use_data: true: use SET_REPORT HID command, false: send plain OUTPUT report
+ */
+static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
+ u8 reportID, unsigned char *buf, size_t data_len, bool use_data)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
u8 *args = ihid->argsbuf;
- const struct i2c_hid_cmd * hidcmd = &hid_set_report_cmd;
+ const struct i2c_hid_cmd *hidcmd;
int ret;
u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
- /* hidraw already checked that data_len < HID_MAX_BUFFER_SIZE */
+ /* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
u16 size = 2 /* size */ +
(reportID ? 1 : 0) /* reportID */ +
data_len /* buf */;
@@ -278,6 +288,9 @@ static int i2c_hid_set_report(struct i2c_client *client, u8 reportType,
i2c_hid_dbg(ihid, "%s\n", __func__);
+ if (!use_data && maxOutputLength == 0)
+ return -ENOSYS;
+
if (reportID >= 0x0F) {
args[index++] = reportID;
reportID = 0x0F;
@@ -287,9 +300,10 @@ static int i2c_hid_set_report(struct i2c_client *client, u8 reportType,
* use the data register for feature reports or if the device does not
* support the output register
*/
- if (reportType == 0x03 || maxOutputLength == 0) {
+ if (use_data) {
args[index++] = dataRegister & 0xFF;
args[index++] = dataRegister >> 8;
+ hidcmd = &hid_set_report_cmd;
} else {
args[index++] = outputRegister & 0xFF;
args[index++] = outputRegister >> 8;
@@ -454,10 +468,18 @@ static void i2c_hid_init_reports(struct hid_device *hid)
return;
}
+ /*
+ * The device must be powered on while we fetch initial reports
+ * from it.
+ */
+ pm_runtime_get_sync(&client->dev);
+
list_for_each_entry(report,
&hid->report_enum[HID_FEATURE_REPORT].report_list, list)
i2c_hid_init_report(report, inbuf, ihid->bufsize);
+ pm_runtime_put(&client->dev);
+
kfree(inbuf);
}
@@ -550,7 +572,7 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
}
static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
- size_t count, unsigned char report_type)
+ size_t count, unsigned char report_type, bool use_data)
{
struct i2c_client *client = hid->driver_data;
int report_id = buf[0];
@@ -564,9 +586,9 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
count--;
}
- ret = i2c_hid_set_report(client,
+ ret = i2c_hid_set_or_send_report(client,
report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
- report_id, buf, count);
+ report_id, buf, count, use_data);
if (report_id && ret >= 0)
ret++; /* add report_id to the number of transfered bytes */
@@ -574,34 +596,27 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
return ret;
}
-static void i2c_hid_request(struct hid_device *hid, struct hid_report *rep,
- int reqtype)
+static int i2c_hid_output_report(struct hid_device *hid, __u8 *buf,
+ size_t count)
{
- struct i2c_client *client = hid->driver_data;
- char *buf;
- int ret;
- int len = i2c_hid_get_report_length(rep) - 2;
-
- buf = hid_alloc_report_buf(rep, GFP_KERNEL);
- if (!buf)
- return;
+ return i2c_hid_output_raw_report(hid, buf, count, HID_OUTPUT_REPORT,
+ false);
+}
+static int i2c_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
+{
switch (reqtype) {
case HID_REQ_GET_REPORT:
- ret = i2c_hid_get_raw_report(hid, rep->id, buf, len, rep->type);
- if (ret < 0)
- dev_err(&client->dev, "%s: unable to get report: %d\n",
- __func__, ret);
- else
- hid_input_report(hid, rep->type, buf, ret, 0);
- break;
+ return i2c_hid_get_raw_report(hid, reportnum, buf, len, rtype);
case HID_REQ_SET_REPORT:
- hid_output_report(rep, buf);
- i2c_hid_output_raw_report(hid, buf, len, rep->type);
- break;
+ if (buf[0] != reportnum)
+ return -EINVAL;
+ return i2c_hid_output_raw_report(hid, buf, len, rtype, true);
+ default:
+ return -EIO;
}
-
- kfree(buf);
}
static int i2c_hid_parse(struct hid_device *hid)
@@ -703,8 +718,8 @@ static int i2c_hid_open(struct hid_device *hid)
mutex_lock(&i2c_hid_open_mut);
if (!hid->open++) {
- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
- if (ret) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
hid->open--;
goto done;
}
@@ -712,7 +727,7 @@ static int i2c_hid_open(struct hid_device *hid)
}
done:
mutex_unlock(&i2c_hid_open_mut);
- return ret;
+ return ret < 0 ? ret : 0;
}
static void i2c_hid_close(struct hid_device *hid)
@@ -729,7 +744,7 @@ static void i2c_hid_close(struct hid_device *hid)
clear_bit(I2C_HID_STARTED, &ihid->flags);
/* Save some power */
- i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ pm_runtime_put(&client->dev);
}
mutex_unlock(&i2c_hid_open_mut);
}
@@ -738,19 +753,18 @@ static int i2c_hid_power(struct hid_device *hid, int lvl)
{
struct i2c_client *client = hid->driver_data;
struct i2c_hid *ihid = i2c_get_clientdata(client);
- int ret = 0;
i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
switch (lvl) {
case PM_HINT_FULLON:
- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ pm_runtime_get_sync(&client->dev);
break;
case PM_HINT_NORMAL:
- ret = i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ pm_runtime_put(&client->dev);
break;
}
- return ret;
+ return 0;
}
static struct hid_ll_driver i2c_hid_ll_driver = {
@@ -760,7 +774,8 @@ static struct hid_ll_driver i2c_hid_ll_driver = {
.open = i2c_hid_open,
.close = i2c_hid_close,
.power = i2c_hid_power,
- .request = i2c_hid_request,
+ .output_report = i2c_hid_output_report,
+ .raw_request = i2c_hid_raw_request,
};
static int i2c_hid_init_irq(struct i2c_client *client)
@@ -973,13 +988,17 @@ static int i2c_hid_probe(struct i2c_client *client,
if (ret < 0)
goto err;
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
ret = i2c_hid_fetch_hid_descriptor(ihid);
if (ret < 0)
- goto err;
+ goto err_pm;
ret = i2c_hid_init_irq(client);
if (ret < 0)
- goto err;
+ goto err_pm;
hid = hid_allocate_device();
if (IS_ERR(hid)) {
@@ -991,8 +1010,6 @@ static int i2c_hid_probe(struct i2c_client *client,
hid->driver_data = client;
hid->ll_driver = &i2c_hid_ll_driver;
- hid->hid_get_raw_report = i2c_hid_get_raw_report;
- hid->hid_output_raw_report = i2c_hid_output_raw_report;
hid->dev.parent = &client->dev;
ACPI_COMPANION_SET(&hid->dev, ACPI_COMPANION(&client->dev));
hid->bus = BUS_I2C;
@@ -1010,6 +1027,7 @@ static int i2c_hid_probe(struct i2c_client *client,
goto err_mem_free;
}
+ pm_runtime_put(&client->dev);
return 0;
err_mem_free:
@@ -1018,6 +1036,10 @@ err_mem_free:
err_irq:
free_irq(client->irq, ihid);
+err_pm:
+ pm_runtime_put_noidle(&client->dev);
+ pm_runtime_disable(&client->dev);
+
err:
i2c_hid_free_buffers(ihid);
kfree(ihid);
@@ -1029,6 +1051,11 @@ static int i2c_hid_remove(struct i2c_client *client)
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid;
+ pm_runtime_get_sync(&client->dev);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
hid = ihid->hid;
hid_destroy_device(hid);
@@ -1074,7 +1101,31 @@ static int i2c_hid_resume(struct device *dev)
}
#endif
-static SIMPLE_DEV_PM_OPS(i2c_hid_pm, i2c_hid_suspend, i2c_hid_resume);
+#ifdef CONFIG_PM_RUNTIME
+static int i2c_hid_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ disable_irq(client->irq);
+ return 0;
+}
+
+static int i2c_hid_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ enable_irq(client->irq);
+ i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops i2c_hid_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
+ SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume,
+ NULL)
+};
static const struct i2c_device_id i2c_hid_id_table[] = {
{ "hid", 0 },
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index cedc6da93c19..0d078c32db4f 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -244,12 +244,35 @@ static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
return count;
}
+static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
+ size_t count)
+{
+ return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
+}
+
+static int uhid_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
+{
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ return uhid_hid_get_raw(hid, reportnum, buf, len, rtype);
+ case HID_REQ_SET_REPORT:
+ /* TODO: implement proper SET_REPORT functionality */
+ return -ENOSYS;
+ default:
+ return -EIO;
+ }
+}
+
static struct hid_ll_driver uhid_hid_driver = {
.start = uhid_hid_start,
.stop = uhid_hid_stop,
.open = uhid_hid_open,
.close = uhid_hid_close,
.parse = uhid_hid_parse,
+ .output_report = uhid_hid_output_report,
+ .raw_request = uhid_raw_request,
};
#ifdef CONFIG_COMPAT
@@ -377,8 +400,6 @@ static int uhid_dev_create(struct uhid_device *uhid,
hid->uniq[63] = 0;
hid->ll_driver = &uhid_hid_driver;
- hid->hid_get_raw_report = uhid_hid_get_raw;
- hid->hid_output_raw_report = uhid_hid_output_raw;
hid->bus = ev->u.create.bus;
hid->vendor = ev->u.create.vendor;
hid->product = ev->u.create.product;
@@ -407,6 +428,67 @@ err_free:
return ret;
}
+static int uhid_dev_create2(struct uhid_device *uhid,
+ const struct uhid_event *ev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ if (uhid->running)
+ return -EALREADY;
+
+ uhid->rd_size = ev->u.create2.rd_size;
+ if (uhid->rd_size <= 0 || uhid->rd_size > HID_MAX_DESCRIPTOR_SIZE)
+ return -EINVAL;
+
+ uhid->rd_data = kmalloc(uhid->rd_size, GFP_KERNEL);
+ if (!uhid->rd_data)
+ return -ENOMEM;
+
+ memcpy(uhid->rd_data, ev->u.create2.rd_data, uhid->rd_size);
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid)) {
+ ret = PTR_ERR(hid);
+ goto err_free;
+ }
+
+ strncpy(hid->name, ev->u.create2.name, 127);
+ hid->name[127] = 0;
+ strncpy(hid->phys, ev->u.create2.phys, 63);
+ hid->phys[63] = 0;
+ strncpy(hid->uniq, ev->u.create2.uniq, 63);
+ hid->uniq[63] = 0;
+
+ hid->ll_driver = &uhid_hid_driver;
+ hid->bus = ev->u.create2.bus;
+ hid->vendor = ev->u.create2.vendor;
+ hid->product = ev->u.create2.product;
+ hid->version = ev->u.create2.version;
+ hid->country = ev->u.create2.country;
+ hid->driver_data = uhid;
+ hid->dev.parent = uhid_misc.this_device;
+
+ uhid->hid = hid;
+ uhid->running = true;
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ hid_err(hid, "Cannot register HID device\n");
+ goto err_hid;
+ }
+
+ return 0;
+
+err_hid:
+ hid_destroy_device(hid);
+ uhid->hid = NULL;
+ uhid->running = false;
+err_free:
+ kfree(uhid->rd_data);
+ return ret;
+}
+
static int uhid_dev_destroy(struct uhid_device *uhid)
{
if (!uhid->running)
@@ -435,6 +517,17 @@ static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
return 0;
}
+static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
+{
+ if (!uhid->running)
+ return -EINVAL;
+
+ hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
+ min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0);
+
+ return 0;
+}
+
static int uhid_dev_feature_answer(struct uhid_device *uhid,
struct uhid_event *ev)
{
@@ -571,12 +664,18 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
case UHID_CREATE:
ret = uhid_dev_create(uhid, &uhid->input_buf);
break;
+ case UHID_CREATE2:
+ ret = uhid_dev_create2(uhid, &uhid->input_buf);
+ break;
case UHID_DESTROY:
ret = uhid_dev_destroy(uhid);
break;
case UHID_INPUT:
ret = uhid_dev_input(uhid, &uhid->input_buf);
break;
+ case UHID_INPUT2:
+ ret = uhid_dev_input2(uhid, &uhid->input_buf);
+ break;
case UHID_FEATURE_ANSWER:
ret = uhid_dev_feature_answer(uhid, &uhid->input_buf);
break;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 44df131d390a..7b88f4cb9902 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -884,52 +884,66 @@ static int usbhid_get_raw_report(struct hid_device *hid,
return ret;
}
-static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count,
- unsigned char report_type)
+static int usbhid_set_raw_report(struct hid_device *hid, unsigned int reportnum,
+ __u8 *buf, size_t count, unsigned char rtype)
{
struct usbhid_device *usbhid = hid->driver_data;
struct usb_device *dev = hid_to_usb_dev(hid);
struct usb_interface *intf = usbhid->intf;
struct usb_host_interface *interface = intf->cur_altsetting;
- int ret;
+ int ret, skipped_report_id = 0;
- if (usbhid->urbout && report_type != HID_FEATURE_REPORT) {
- int actual_length;
- int skipped_report_id = 0;
+ /* Byte 0 is the report number. Report data starts at byte 1.*/
+ if ((rtype == HID_OUTPUT_REPORT) &&
+ (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORT_ID))
+ buf[0] = 0;
+ else
+ buf[0] = reportnum;
+
+ if (buf[0] == 0x0) {
+ /* Don't send the Report ID */
+ buf++;
+ count--;
+ skipped_report_id = 1;
+ }
- if (buf[0] == 0x0) {
- /* Don't send the Report ID */
- buf++;
- count--;
- skipped_report_id = 1;
- }
- ret = usb_interrupt_msg(dev, usbhid->urbout->pipe,
- buf, count, &actual_length,
- USB_CTRL_SET_TIMEOUT);
- /* return the number of bytes transferred */
- if (ret == 0) {
- ret = actual_length;
- /* count also the report id */
- if (skipped_report_id)
- ret++;
- }
- } else {
- int skipped_report_id = 0;
- int report_id = buf[0];
- if (buf[0] == 0x0) {
- /* Don't send the Report ID */
- buf++;
- count--;
- skipped_report_id = 1;
- }
- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
HID_REQ_SET_REPORT,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- ((report_type + 1) << 8) | report_id,
+ ((rtype + 1) << 8) | reportnum,
interface->desc.bInterfaceNumber, buf, count,
USB_CTRL_SET_TIMEOUT);
- /* count also the report id, if this was a numbered report. */
- if (ret > 0 && skipped_report_id)
+ /* count also the report id, if this was a numbered report. */
+ if (ret > 0 && skipped_report_id)
+ ret++;
+
+ return ret;
+}
+
+static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ struct usb_device *dev = hid_to_usb_dev(hid);
+ int actual_length, skipped_report_id = 0, ret;
+
+ if (!usbhid->urbout)
+ return -ENOSYS;
+
+ if (buf[0] == 0x0) {
+ /* Don't send the Report ID */
+ buf++;
+ count--;
+ skipped_report_id = 1;
+ }
+
+ ret = usb_interrupt_msg(dev, usbhid->urbout->pipe,
+ buf, count, &actual_length,
+ USB_CTRL_SET_TIMEOUT);
+ /* return the number of bytes transferred */
+ if (ret == 0) {
+ ret = actual_length;
+ /* count also the report id */
+ if (skipped_report_id)
ret++;
}
@@ -1200,6 +1214,20 @@ static void usbhid_request(struct hid_device *hid, struct hid_report *rep, int r
}
}
+static int usbhid_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
+{
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ return usbhid_get_raw_report(hid, reportnum, buf, len, rtype);
+ case HID_REQ_SET_REPORT:
+ return usbhid_set_raw_report(hid, reportnum, buf, len, rtype);
+ default:
+ return -EIO;
+ }
+}
+
static int usbhid_idle(struct hid_device *hid, int report, int idle,
int reqtype)
{
@@ -1223,6 +1251,8 @@ static struct hid_ll_driver usb_hid_driver = {
.power = usbhid_power,
.request = usbhid_request,
.wait = usbhid_wait_io,
+ .raw_request = usbhid_raw_request,
+ .output_report = usbhid_output_report,
.idle = usbhid_idle,
};
@@ -1253,8 +1283,6 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
usb_set_intfdata(intf, hid);
hid->ll_driver = &usb_hid_driver;
- hid->hid_get_raw_report = usbhid_get_raw_report;
- hid->hid_output_raw_report = usbhid_output_raw_report;
hid->ff_init = hid_pidff_init;
#ifdef CONFIG_USB_HIDDEV
hid->hiddev_connect = hiddev_connect;
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index 0a74b5661186..5e4dfa4cfe22 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
channel_mgmt.o ring_buffer.o
-hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o
+hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 69ea36f07b4d..602ca86a6488 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hyperv.h>
+#include <linux/uio.h>
#include "hyperv_vmbus.h"
@@ -554,14 +555,14 @@ EXPORT_SYMBOL_GPL(vmbus_close);
*
* Mainly used by Hyper-V drivers.
*/
-int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
+int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u64 requestid,
enum vmbus_packet_type type, u32 flags)
{
struct vmpacket_descriptor desc;
u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
- struct scatterlist bufferlist[3];
+ struct kvec bufferlist[3];
u64 aligned_data = 0;
int ret;
bool signal = false;
@@ -575,11 +576,12 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
desc.len8 = (u16)(packetlen_aligned >> 3);
desc.trans_id = requestid;
- sg_init_table(bufferlist, 3);
- sg_set_buf(&bufferlist[0], &desc, sizeof(struct vmpacket_descriptor));
- sg_set_buf(&bufferlist[1], buffer, bufferlen);
- sg_set_buf(&bufferlist[2], &aligned_data,
- packetlen_aligned - packetlen);
+ bufferlist[0].iov_base = &desc;
+ bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
@@ -605,7 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
- struct scatterlist bufferlist[3];
+ struct kvec bufferlist[3];
u64 aligned_data = 0;
bool signal = false;
@@ -637,11 +639,12 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
desc.range[i].pfn = pagebuffers[i].pfn;
}
- sg_init_table(bufferlist, 3);
- sg_set_buf(&bufferlist[0], &desc, descsize);
- sg_set_buf(&bufferlist[1], buffer, bufferlen);
- sg_set_buf(&bufferlist[2], &aligned_data,
- packetlen_aligned - packetlen);
+ bufferlist[0].iov_base = &desc;
+ bufferlist[0].iov_len = descsize;
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
@@ -665,7 +668,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
- struct scatterlist bufferlist[3];
+ struct kvec bufferlist[3];
u64 aligned_data = 0;
bool signal = false;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
@@ -700,11 +703,12 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
pfncount * sizeof(u64));
- sg_init_table(bufferlist, 3);
- sg_set_buf(&bufferlist[0], &desc, descsize);
- sg_set_buf(&bufferlist[1], buffer, bufferlen);
- sg_set_buf(&bufferlist[2], &aligned_data,
- packetlen_aligned - packetlen);
+ bufferlist[0].iov_base = &desc;
+ bufferlist[0].iov_len = descsize;
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 7e17a5495e02..7e6d78dc9437 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1171,7 +1171,8 @@ static int dm_thread_func(void *dm_dev)
int t;
while (!kthread_should_stop()) {
- t = wait_for_completion_timeout(&dm_device.config_event, 1*HZ);
+ t = wait_for_completion_interruptible_timeout(
+ &dm_device.config_event, 1*HZ);
/*
* The host expects us to post information on the memory
* pressure every second.
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
new file mode 100644
index 000000000000..eaaa3d843b80
--- /dev/null
+++ b/drivers/hv/hv_fcopy.c
@@ -0,0 +1,414 @@
+/*
+ * An implementation of file copy service.
+ *
+ * Copyright (C) 2014, Microsoft, Inc.
+ *
+ * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/semaphore.h>
+#include <linux/fs.h>
+#include <linux/nls.h>
+#include <linux/workqueue.h>
+#include <linux/cdev.h>
+#include <linux/hyperv.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include "hyperv_vmbus.h"
+
+#define WIN8_SRV_MAJOR 1
+#define WIN8_SRV_MINOR 1
+#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
+
+/*
+ * Global state maintained for transaction that is being processed.
+ * For a class of integration services, including the "file copy service",
+ * the specified protocol is a "request/response" protocol which means that
+ * there can only be single outstanding transaction from the host at any
+ * given point in time. We use this to simplify memory management in this
+ * driver - we cache and process only one message at a time.
+ *
+ * While the request/response protocol is guaranteed by the host, we further
+ * ensure this by serializing packet processing in this driver - we do not
+ * read additional packets from the VMBUs until the current packet is fully
+ * handled.
+ *
+ * The transaction "active" state is set when we receive a request from the
+ * host and we cleanup this state when the transaction is completed - when we
+ * respond to the host with our response. When the transaction active state is
+ * set, we defer handling incoming packets.
+ */
+
+static struct {
+ bool active; /* transaction status - active or not */
+ int recv_len; /* number of bytes received. */
+ struct hv_fcopy_hdr *fcopy_msg; /* current message */
+ struct hv_start_fcopy message; /* sent to daemon */
+ struct vmbus_channel *recv_channel; /* chn we got the request */
+ u64 recv_req_id; /* request ID. */
+ void *fcopy_context; /* for the channel callback */
+ struct semaphore read_sema;
+} fcopy_transaction;
+
+static bool opened; /* currently device opened */
+
+/*
+ * Before we can accept copy messages from the host, we need
+ * to handshake with the user level daemon. This state tracks
+ * if we are in the handshake phase.
+ */
+static bool in_hand_shake = true;
+static void fcopy_send_data(void);
+static void fcopy_respond_to_host(int error);
+static void fcopy_work_func(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(fcopy_work, fcopy_work_func);
+static u8 *recv_buffer;
+
+static void fcopy_work_func(struct work_struct *dummy)
+{
+ /*
+ * If the timer fires, the user-mode component has not responded;
+ * process the pending transaction.
+ */
+ fcopy_respond_to_host(HV_E_FAIL);
+}
+
+static int fcopy_handle_handshake(u32 version)
+{
+ switch (version) {
+ case FCOPY_CURRENT_VERSION:
+ break;
+ default:
+ /*
+ * For now we will fail the registration.
+ * If and when we have multiple versions to
+ * deal with, we will be backward compatible.
+ * We will add this code when needed.
+ */
+ return -EINVAL;
+ }
+ pr_info("FCP: user-mode registering done. Daemon version: %d\n",
+ version);
+ fcopy_transaction.active = false;
+ if (fcopy_transaction.fcopy_context)
+ hv_fcopy_onchannelcallback(fcopy_transaction.fcopy_context);
+ in_hand_shake = false;
+ return 0;
+}
+
+static void fcopy_send_data(void)
+{
+ struct hv_start_fcopy *smsg_out = &fcopy_transaction.message;
+ int operation = fcopy_transaction.fcopy_msg->operation;
+ struct hv_start_fcopy *smsg_in;
+
+ /*
+ * The strings sent from the host are encoded in
+ * in utf16; convert it to utf8 strings.
+ * The host assures us that the utf16 strings will not exceed
+ * the max lengths specified. We will however, reserve room
+ * for the string terminating character - in the utf16s_utf8s()
+ * function we limit the size of the buffer where the converted
+ * string is placed to W_MAX_PATH -1 to guarantee
+ * that the strings can be properly terminated!
+ */
+
+ switch (operation) {
+ case START_FILE_COPY:
+ memset(smsg_out, 0, sizeof(struct hv_start_fcopy));
+ smsg_out->hdr.operation = operation;
+ smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
+
+ utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)smsg_out->file_name, W_MAX_PATH - 1);
+
+ utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)smsg_out->path_name, W_MAX_PATH - 1);
+
+ smsg_out->copy_flags = smsg_in->copy_flags;
+ smsg_out->file_size = smsg_in->file_size;
+ break;
+
+ default:
+ break;
+ }
+ up(&fcopy_transaction.read_sema);
+ return;
+}
+
+/*
+ * Send a response back to the host.
+ */
+
+static void
+fcopy_respond_to_host(int error)
+{
+ struct icmsg_hdr *icmsghdr;
+ u32 buf_len;
+ struct vmbus_channel *channel;
+ u64 req_id;
+
+ /*
+ * Copy the global state for completing the transaction. Note that
+ * only one transaction can be active at a time. This is guaranteed
+ * by the file copy protocol implemented by the host. Furthermore,
+ * the "transaction active" state we maintain ensures that there can
+ * only be one active transaction at a time.
+ */
+
+ buf_len = fcopy_transaction.recv_len;
+ channel = fcopy_transaction.recv_channel;
+ req_id = fcopy_transaction.recv_req_id;
+
+ fcopy_transaction.active = false;
+
+ icmsghdr = (struct icmsg_hdr *)
+ &recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (channel->onchannel_callback == NULL)
+ /*
+ * We have raced with util driver being unloaded;
+ * silently return.
+ */
+ return;
+
+ icmsghdr->status = error;
+ icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
+ vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
+ VM_PKT_DATA_INBAND, 0);
+}
+
+void hv_fcopy_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u32 recvlen;
+ u64 requestid;
+ struct hv_fcopy_hdr *fcopy_msg;
+ struct icmsg_hdr *icmsghdr;
+ struct icmsg_negotiate *negop = NULL;
+ int util_fw_version;
+ int fcopy_srv_version;
+
+ if (fcopy_transaction.active) {
+ /*
+ * We will defer processing this callback once
+ * the current transaction is complete.
+ */
+ fcopy_transaction.fcopy_context = context;
+ return;
+ }
+
+ vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ &requestid);
+ if (recvlen <= 0)
+ return;
+
+ icmsghdr = (struct icmsg_hdr *)&recv_buffer[
+ sizeof(struct vmbuspipe_hdr)];
+ if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ util_fw_version = UTIL_FW_VERSION;
+ fcopy_srv_version = WIN8_SRV_VERSION;
+ vmbus_prep_negotiate_resp(icmsghdr, negop, recv_buffer,
+ util_fw_version, fcopy_srv_version);
+ } else {
+ fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+
+ /*
+ * Stash away this global state for completing the
+ * transaction; note transactions are serialized.
+ */
+
+ fcopy_transaction.active = true;
+ fcopy_transaction.recv_len = recvlen;
+ fcopy_transaction.recv_channel = channel;
+ fcopy_transaction.recv_req_id = requestid;
+ fcopy_transaction.fcopy_msg = fcopy_msg;
+
+ /*
+ * Send the information to the user-level daemon.
+ */
+ fcopy_send_data();
+ schedule_delayed_work(&fcopy_work, 5*HZ);
+ return;
+ }
+ icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
+ vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+}
+
+/*
+ * Create a char device that can support read/write for passing
+ * the payload.
+ */
+
+static ssize_t fcopy_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ void *src;
+ size_t copy_size;
+ int operation;
+
+ /*
+ * Wait until there is something to be read.
+ */
+ if (down_interruptible(&fcopy_transaction.read_sema))
+ return -EINTR;
+
+ /*
+ * The channel may be rescinded and in this case, we will wakeup the
+ * the thread blocked on the semaphore and we will use the opened
+ * state to correctly handle this case.
+ */
+ if (!opened)
+ return -ENODEV;
+
+ operation = fcopy_transaction.fcopy_msg->operation;
+
+ if (operation == START_FILE_COPY) {
+ src = &fcopy_transaction.message;
+ copy_size = sizeof(struct hv_start_fcopy);
+ if (count < copy_size)
+ return 0;
+ } else {
+ src = fcopy_transaction.fcopy_msg;
+ copy_size = sizeof(struct hv_do_fcopy);
+ if (count < copy_size)
+ return 0;
+ }
+ if (copy_to_user(buf, src, copy_size))
+ return -EFAULT;
+
+ return copy_size;
+}
+
+static ssize_t fcopy_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int response = 0;
+
+ if (count != sizeof(int))
+ return -EINVAL;
+
+ if (copy_from_user(&response, buf, sizeof(int)))
+ return -EFAULT;
+
+ if (in_hand_shake) {
+ if (fcopy_handle_handshake(response))
+ return -EINVAL;
+ return sizeof(int);
+ }
+
+ /*
+ * Complete the transaction by forwarding the result
+ * to the host. But first, cancel the timeout.
+ */
+ if (cancel_delayed_work_sync(&fcopy_work))
+ fcopy_respond_to_host(response);
+
+ return sizeof(int);
+}
+
+static int fcopy_open(struct inode *inode, struct file *f)
+{
+ /*
+ * The user level daemon that will open this device is
+ * really an extension of this driver. We can have only
+ * active open at a time.
+ */
+ if (opened)
+ return -EBUSY;
+
+ /*
+ * The daemon is alive; setup the state.
+ */
+ opened = true;
+ return 0;
+}
+
+static int fcopy_release(struct inode *inode, struct file *f)
+{
+ /*
+ * The daemon has exited; reset the state.
+ */
+ in_hand_shake = true;
+ opened = false;
+ return 0;
+}
+
+
+static const struct file_operations fcopy_fops = {
+ .read = fcopy_read,
+ .write = fcopy_write,
+ .release = fcopy_release,
+ .open = fcopy_open,
+};
+
+static struct miscdevice fcopy_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vmbus/hv_fcopy",
+ .fops = &fcopy_fops,
+};
+
+static int fcopy_dev_init(void)
+{
+ return misc_register(&fcopy_misc);
+}
+
+static void fcopy_dev_deinit(void)
+{
+
+ /*
+ * The device is going away - perhaps because the
+ * host has rescinded the channel. Setup state so that
+ * user level daemon can gracefully exit if it is blocked
+ * on the read semaphore.
+ */
+ opened = false;
+ /*
+ * Signal the semaphore as the device is
+ * going away.
+ */
+ up(&fcopy_transaction.read_sema);
+ misc_deregister(&fcopy_misc);
+}
+
+int hv_fcopy_init(struct hv_util_service *srv)
+{
+ recv_buffer = srv->recv_buffer;
+
+ /*
+ * When this driver loads, the user level daemon that
+ * processes the host requests may not yet be running.
+ * Defer processing channel callbacks until the daemon
+ * has registered.
+ */
+ fcopy_transaction.active = true;
+ sema_init(&fcopy_transaction.read_sema, 0);
+
+ return fcopy_dev_init();
+}
+
+void hv_fcopy_deinit(void)
+{
+ cancel_delayed_work_sync(&fcopy_work);
+ fcopy_dev_deinit();
+}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 09988b289622..ea852537307e 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -113,7 +113,7 @@ kvp_register(int reg_value)
kvp_msg->kvp_hdr.operation = reg_value;
strcpy(version, HV_DRV_VERSION);
msg->len = sizeof(struct hv_kvp_msg);
- cn_netlink_send(msg, 0, GFP_ATOMIC);
+ cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
kfree(msg);
}
}
@@ -435,7 +435,7 @@ kvp_send_key(struct work_struct *dummy)
}
msg->len = sizeof(struct hv_kvp_msg);
- cn_netlink_send(msg, 0, GFP_ATOMIC);
+ cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
kfree(msg);
return;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 0c3546224376..34f14fddb666 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -98,7 +98,7 @@ static void vss_send_op(struct work_struct *dummy)
vss_msg->vss_hdr.operation = op;
msg->len = sizeof(struct hv_vss_msg);
- cn_netlink_send(msg, 0, GFP_ATOMIC);
+ cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
kfree(msg);
return;
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 62dfd246b948..dd761806f0e8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -28,6 +28,7 @@
#include <linux/reboot.h>
#include <linux/hyperv.h>
+#include "hyperv_vmbus.h"
#define SD_MAJOR 3
#define SD_MINOR 0
@@ -82,6 +83,12 @@ static struct hv_util_service util_vss = {
.util_deinit = hv_vss_deinit,
};
+static struct hv_util_service util_fcopy = {
+ .util_cb = hv_fcopy_onchannelcallback,
+ .util_init = hv_fcopy_init,
+ .util_deinit = hv_fcopy_deinit,
+};
+
static void perform_shutdown(struct work_struct *dummy)
{
orderly_poweroff(true);
@@ -401,6 +408,10 @@ static const struct hv_vmbus_device_id id_table[] = {
{ HV_VSS_GUID,
.driver_data = (unsigned long)&util_vss
},
+ /* File copy GUID */
+ { HV_FCOPY_GUID,
+ .driver_data = (unsigned long)&util_fcopy
+ },
{ },
};
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index e05517616a06..860134da8039 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -559,8 +559,8 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, void *buffer,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
- struct scatterlist *sglist,
- u32 sgcount, bool *signal);
+ struct kvec *kv_list,
+ u32 kv_count, bool *signal);
int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
u32 buflen);
@@ -669,5 +669,9 @@ int vmbus_set_event(struct vmbus_channel *channel);
void vmbus_on_event(unsigned long data);
+int hv_fcopy_init(struct hv_util_service *);
+void hv_fcopy_deinit(void);
+void hv_fcopy_onchannelcallback(void *);
+
#endif /* _HYPERV_VMBUS_H */
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 26c93cf9f6be..15db66b74141 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/hyperv.h>
+#include <linux/uio.h>
#include "hyperv_vmbus.h"
@@ -387,23 +388,20 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
*
*/
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct scatterlist *sglist, u32 sgcount, bool *signal)
+ struct kvec *kv_list, u32 kv_count, bool *signal)
{
int i = 0;
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 totalbytes_towrite = 0;
- struct scatterlist *sg;
u32 next_write_location;
u32 old_write;
u64 prev_indices = 0;
unsigned long flags;
- for_each_sg(sglist, sg, sgcount, i)
- {
- totalbytes_towrite += sg->length;
- }
+ for (i = 0; i < kv_count; i++)
+ totalbytes_towrite += kv_list[i].iov_len;
totalbytes_towrite += sizeof(u64);
@@ -427,12 +425,11 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
old_write = next_write_location;
- for_each_sg(sglist, sg, sgcount, i)
- {
+ for (i = 0; i < kv_count; i++) {
next_write_location = hv_copyto_ringbuffer(outring_info,
next_write_location,
- sg_virt(sg),
- sg->length);
+ kv_list[i].iov_base,
+ kv_list[i].iov_len);
}
/* Set previous packet start */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 077bb1bdac34..8e53a3c2607e 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -25,7 +25,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/slab.h>
@@ -44,6 +43,12 @@ static struct tasklet_struct msg_dpc;
static struct completion probe_event;
static int irq;
+struct resource hyperv_mmio = {
+ .name = "hyperv mmio",
+ .flags = IORESOURCE_MEM,
+};
+EXPORT_SYMBOL_GPL(hyperv_mmio);
+
static int vmbus_exists(void)
{
if (hv_acpi_dev == NULL)
@@ -558,9 +563,6 @@ static struct bus_type hv_bus = {
.dev_groups = vmbus_groups,
};
-static const char *driver_name = "hyperv";
-
-
struct onmessage_work_context {
struct work_struct work;
struct hv_message msg;
@@ -619,7 +621,7 @@ static void vmbus_on_msg_dpc(unsigned long data)
}
}
-static irqreturn_t vmbus_isr(int irq, void *dev_id)
+static void vmbus_isr(void)
{
int cpu = smp_processor_id();
void *page_addr;
@@ -629,7 +631,7 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
page_addr = hv_context.synic_event_page[cpu];
if (page_addr == NULL)
- return IRQ_NONE;
+ return;
event = (union hv_synic_event_flags *)page_addr +
VMBUS_MESSAGE_SINT;
@@ -665,28 +667,8 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
/* Check if there are actual msgs to be processed */
- if (msg->header.message_type != HVMSG_NONE) {
- handled = true;
+ if (msg->header.message_type != HVMSG_NONE)
tasklet_schedule(&msg_dpc);
- }
-
- if (handled)
- return IRQ_HANDLED;
- else
- return IRQ_NONE;
-}
-
-/*
- * vmbus interrupt flow handler:
- * vmbus interrupts can concurrently occur on multiple CPUs and
- * can be handled concurrently.
- */
-
-static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
-{
- kstat_incr_irqs_this_cpu(irq, desc);
-
- desc->action->handler(irq, desc->action->dev_id);
}
/*
@@ -715,25 +697,7 @@ static int vmbus_bus_init(int irq)
if (ret)
goto err_cleanup;
- ret = request_irq(irq, vmbus_isr, 0, driver_name, hv_acpi_dev);
-
- if (ret != 0) {
- pr_err("Unable to request IRQ %d\n",
- irq);
- goto err_unregister;
- }
-
- /*
- * Vmbus interrupts can be handled concurrently on
- * different CPUs. Establish an appropriate interrupt flow
- * handler that can support this model.
- */
- irq_set_handler(irq, vmbus_flow_handler);
-
- /*
- * Register our interrupt handler.
- */
- hv_register_vmbus_handler(irq, vmbus_isr);
+ hv_setup_vmbus_irq(vmbus_isr);
ret = hv_synic_alloc();
if (ret)
@@ -753,9 +717,8 @@ static int vmbus_bus_init(int irq)
err_alloc:
hv_synic_free();
- free_irq(irq, hv_acpi_dev);
+ hv_remove_vmbus_irq();
-err_unregister:
bus_unregister(&hv_bus);
err_cleanup:
@@ -886,18 +849,21 @@ void vmbus_device_unregister(struct hv_device *device_obj)
/*
- * VMBUS is an acpi enumerated device. Get the the IRQ information
- * from DSDT.
+ * VMBUS is an acpi enumerated device. Get the the information we
+ * need from DSDT.
*/
-static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq)
+static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
{
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ irq = res->data.irq.interrupts[0];
+ break;
- if (res->type == ACPI_RESOURCE_TYPE_IRQ) {
- struct acpi_resource_irq *irqp;
- irqp = &res->data.irq;
-
- *((unsigned int *)irq) = irqp->interrupts[0];
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ hyperv_mmio.start = res->data.address64.minimum;
+ hyperv_mmio.end = res->data.address64.maximum;
+ break;
}
return AE_OK;
@@ -906,18 +872,34 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq)
static int vmbus_acpi_add(struct acpi_device *device)
{
acpi_status result;
+ int ret_val = -ENODEV;
hv_acpi_dev = device;
result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
- vmbus_walk_resources, &irq);
+ vmbus_walk_resources, NULL);
- if (ACPI_FAILURE(result)) {
- complete(&probe_event);
- return -ENODEV;
+ if (ACPI_FAILURE(result))
+ goto acpi_walk_err;
+ /*
+ * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
+ * has the mmio ranges. Get that.
+ */
+ if (device->parent) {
+ result = acpi_walk_resources(device->parent->handle,
+ METHOD_NAME__CRS,
+ vmbus_walk_resources, NULL);
+
+ if (ACPI_FAILURE(result))
+ goto acpi_walk_err;
+ if (hyperv_mmio.start && hyperv_mmio.end)
+ request_resource(&iomem_resource, &hyperv_mmio);
}
+ ret_val = 0;
+
+acpi_walk_err:
complete(&probe_event);
- return 0;
+ return ret_val;
}
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
@@ -947,7 +929,6 @@ static int __init hv_acpi_init(void)
/*
* Get irq resources first.
*/
-
ret = acpi_bus_register_driver(&vmbus_acpi_driver);
if (ret)
@@ -978,8 +959,7 @@ cleanup:
static void __exit vmbus_exit(void)
{
-
- free_irq(irq, hv_acpi_dev);
+ hv_remove_vmbus_irq();
vmbus_free_channels();
bus_unregister(&hv_bus);
hv_cleanup();
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5ce43d8dfa98..bc196f49ec53 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -111,22 +111,6 @@ config SENSORS_AD7418
This driver can also be built as a module. If so, the module
will be called ad7418.
-config SENSORS_ADCXX
- tristate "National Semiconductor ADCxxxSxxx"
- depends on SPI_MASTER
- help
- If you say yes here you get support for the National Semiconductor
- ADC<bb><c>S<sss> chip family, where
- * bb is the resolution in number of bits (8, 10, 12)
- * c is the number of channels (1, 2, 4, 8)
- * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500
- kSPS and 101 for 1 MSPS)
-
- Examples : ADC081S101, ADC124S501, ...
-
- This driver can also be built as a module. If so, the module
- will be called adcxx.
-
config SENSORS_ADM1021
tristate "Analog Devices ADM1021 and compatibles"
depends on I2C
@@ -296,8 +280,8 @@ config SENSORS_K10TEMP
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
the AMD Family 10h and all revisions of the AMD Family 11h,
- 12h (Llano), 14h (Brazos), 15h (Bulldozer/Trinity) and
- 16h (Kabini) microarchitectures.
+ 12h (Llano), 14h (Brazos), 15h (Bulldozer/Trinity/Kaveri) and
+ 16h (Kabini/Mullins) microarchitectures.
This driver can also be built as a module. If so, the module
will be called k10temp.
@@ -312,6 +296,31 @@ config SENSORS_FAM15H_POWER
This driver can also be built as a module. If so, the module
will be called fam15h_power.
+config SENSORS_APPLESMC
+ tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
+ depends on INPUT && X86
+ select NEW_LEDS
+ select LEDS_CLASS
+ select INPUT_POLLDEV
+ default n
+ help
+ This driver provides support for the Apple System Management
+ Controller, which provides an accelerometer (Apple Sudden Motion
+ Sensor), light sensors, temperature sensors, keyboard backlight
+ control and fan control.
+
+ Only Intel-based Apple's computers are supported (MacBook Pro,
+ MacBook, MacMini).
+
+ Data from the different sensors, keyboard backlight control and fan
+ control are accessible via sysfs.
+
+ This driver also provides an absolute input class device, allowing
+ the laptop to act as a pinball machine-esque joystick.
+
+ Say Y here if you have an applicable laptop and want to experience
+ the awesome power of applesmc.
+
config SENSORS_ASB100
tristate "Asus ASB100 Bach"
depends on X86 && I2C
@@ -435,6 +444,12 @@ config SENSORS_F75375S
This driver can also be built as a module. If so, the module
will be called f75375s.
+config SENSORS_MC13783_ADC
+ tristate "Freescale MC13783/MC13892 ADC"
+ depends on MFD_MC13XXX
+ help
+ Support for the A/D converter on MC13783 and MC13892 PMIC.
+
config SENSORS_FSCHMD
tristate "Fujitsu Siemens Computers sensor chips"
depends on X86 && I2C
@@ -451,26 +466,6 @@ config SENSORS_FSCHMD
This driver can also be built as a module. If so, the module
will be called fschmd.
-config SENSORS_G760A
- tristate "GMT G760A"
- depends on I2C
- help
- If you say yes here you get support for Global Mixed-mode
- Technology Inc G760A fan speed PWM controller chips.
-
- This driver can also be built as a module. If so, the module
- will be called g760a.
-
-config SENSORS_G762
- tristate "GMT G762 and G763"
- depends on I2C
- help
- If you say yes here you get support for Global Mixed-mode
- Technology Inc G762 and G763 fan speed PWM controller chips.
-
- This driver can also be built as a module. If so, the module
- will be called g762.
-
config SENSORS_GL518SM
tristate "Genesys Logic GL518SM"
depends on I2C
@@ -492,6 +487,26 @@ config SENSORS_GL520SM
This driver can also be built as a module. If so, the module
will be called gl520sm.
+config SENSORS_G760A
+ tristate "GMT G760A"
+ depends on I2C
+ help
+ If you say yes here you get support for Global Mixed-mode
+ Technology Inc G760A fan speed PWM controller chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called g760a.
+
+config SENSORS_G762
+ tristate "GMT G762 and G763"
+ depends on I2C
+ help
+ If you say yes here you get support for Global Mixed-mode
+ Technology Inc G762 and G763 fan speed PWM controller chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called g762.
+
config SENSORS_GPIO_FAN
tristate "GPIO fan"
depends on GPIOLIB
@@ -511,24 +526,6 @@ config SENSORS_HIH6130
This driver can also be built as a module. If so, the module
will be called hih6130.
-config SENSORS_HTU21
- tristate "Measurement Specialties HTU21D humidity/temperature sensors"
- depends on I2C
- help
- If you say yes here you get support for the Measurement Specialties
- HTU21D humidity and temperature sensors.
-
- This driver can also be built as a module. If so, the module
- will be called htu21.
-
-config SENSORS_CORETEMP
- tristate "Intel Core/Core2/Atom temperature sensor"
- depends on X86
- help
- If you say yes here you get support for the temperature
- sensor inside your CPU. Most of the family 6 CPUs
- are supported. Check Documentation/hwmon/coretemp for details.
-
config SENSORS_IBMAEM
tristate "IBM Active Energy Manager temperature/power sensors and control"
select IPMI_SI
@@ -566,6 +563,14 @@ config SENSORS_IIO_HWMON
for those channels specified in the map. This map can be provided
either via platform data or the device tree bindings.
+config SENSORS_CORETEMP
+ tristate "Intel Core/Core2/Atom temperature sensor"
+ depends on X86
+ help
+ If you say yes here you get support for the temperature
+ sensor inside your CPU. Most of the family 6 CPUs
+ are supported. Check Documentation/hwmon/coretemp for details.
+
config SENSORS_IT87
tristate "ITE IT87xx and compatibles"
depends on !PPC
@@ -614,6 +619,219 @@ config SENSORS_LINEAGE
This driver can also be built as a module. If so, the module
will be called lineage-pem.
+config SENSORS_LTC2945
+ tristate "Linear Technology LTC2945"
+ depends on I2C
+ select REGMAP_I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC2945
+ I2C System Monitor.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2945.
+
+config SENSORS_LTC4151
+ tristate "Linear Technology LTC4151"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4151
+ High Voltage I2C Current and Voltage Monitor interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4151.
+
+config SENSORS_LTC4215
+ tristate "Linear Technology LTC4215"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4215
+ Hot Swap Controller I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4215.
+
+config SENSORS_LTC4222
+ tristate "Linear Technology LTC4222"
+ depends on I2C
+ select REGMAP_I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4222
+ Dual Hot Swap Controller I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4222.
+
+config SENSORS_LTC4245
+ tristate "Linear Technology LTC4245"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4245
+ Multiple Supply Hot Swap Controller I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4245.
+
+config SENSORS_LTC4260
+ tristate "Linear Technology LTC4260"
+ depends on I2C
+ select REGMAP_I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4260
+ Positive Voltage Hot Swap Controller I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4260.
+
+config SENSORS_LTC4261
+ tristate "Linear Technology LTC4261"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4261
+ Negative Voltage Hot Swap Controller I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4261.
+
+config SENSORS_MAX1111
+ tristate "Maxim MAX1111 Serial 8-bit ADC chip and compatibles"
+ depends on SPI_MASTER
+ help
+ Say y here to support Maxim's MAX1110, MAX1111, MAX1112, and MAX1113
+ ADC chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max1111.
+
+config SENSORS_MAX16065
+ tristate "Maxim MAX16065 System Manager and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for hardware monitoring
+ capabilities of the following Maxim System Manager chips.
+ MAX16065
+ MAX16066
+ MAX16067
+ MAX16068
+ MAX16070
+ MAX16071
+
+ This driver can also be built as a module. If so, the module
+ will be called max16065.
+
+config SENSORS_MAX1619
+ tristate "Maxim MAX1619 sensor chip"
+ depends on I2C
+ help
+ If you say yes here you get support for MAX1619 sensor chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called max1619.
+
+config SENSORS_MAX1668
+ tristate "Maxim MAX1668 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for MAX1668, MAX1989 and
+ MAX1805 chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max1668.
+
+config SENSORS_MAX197
+ tristate "Maxim MAX197 and compatibles"
+ help
+ Support for the Maxim MAX197 A/D converter.
+ Support will include, but not be limited to, MAX197, and MAX199.
+
+ This driver can also be built as a module. If so, the module
+ will be called max197.
+
+config SENSORS_MAX6639
+ tristate "Maxim MAX6639 sensor chip"
+ depends on I2C
+ help
+ If you say yes here you get support for the MAX6639
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6639.
+
+config SENSORS_MAX6642
+ tristate "Maxim MAX6642 sensor chip"
+ depends on I2C
+ help
+ If you say yes here you get support for MAX6642 sensor chip.
+ MAX6642 is a SMBus-Compatible Remote/Local Temperature Sensor
+ with Overtemperature Alarm from Maxim.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6642.
+
+config SENSORS_MAX6650
+ tristate "Maxim MAX6650 sensor chip"
+ depends on I2C
+ help
+ If you say yes here you get support for the MAX6650 / MAX6651
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6650.
+
+config SENSORS_MAX6697
+ tristate "Maxim MAX6697 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for MAX6581, MAX6602, MAX6622,
+ MAX6636, MAX6689, MAX6693, MAX6694, MAX6697, MAX6698, and MAX6699
+ temperature sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6697.
+
+config SENSORS_HTU21
+ tristate "Measurement Specialties HTU21D humidity/temperature sensors"
+ depends on I2C
+ help
+ If you say yes here you get support for the Measurement Specialties
+ HTU21D humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called htu21.
+
+config SENSORS_MCP3021
+ tristate "Microchip MCP3021 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for MCP3021 and MCP3221.
+ The MCP3021 is a A/D converter (ADC) with 10-bit and the MCP3221
+ with 12-bit resolution.
+
+ This driver can also be built as a module. If so, the module
+ will be called mcp3021.
+
+config SENSORS_ADCXX
+ tristate "National Semiconductor ADCxxxSxxx"
+ depends on SPI_MASTER
+ help
+ If you say yes here you get support for the National Semiconductor
+ ADC<bb><c>S<sss> chip family, where
+ * bb is the resolution in number of bits (8, 10, 12)
+ * c is the number of channels (1, 2, 4, 8)
+ * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500
+ kSPS and 101 for 1 MSPS)
+
+ Examples : ADC081S101, ADC124S501, ...
+
+ This driver can also be built as a module. If so, the module
+ will be called adcxx.
+
config SENSORS_LM63
tristate "National Semiconductor LM63 and compatibles"
depends on I2C
@@ -776,50 +994,6 @@ config SENSORS_LM93
This driver can also be built as a module. If so, the module
will be called lm93.
-config SENSORS_LTC4151
- tristate "Linear Technology LTC4151"
- depends on I2C
- default n
- help
- If you say yes here you get support for Linear Technology LTC4151
- High Voltage I2C Current and Voltage Monitor interface.
-
- This driver can also be built as a module. If so, the module will
- be called ltc4151.
-
-config SENSORS_LTC4215
- tristate "Linear Technology LTC4215"
- depends on I2C
- default n
- help
- If you say yes here you get support for Linear Technology LTC4215
- Hot Swap Controller I2C interface.
-
- This driver can also be built as a module. If so, the module will
- be called ltc4215.
-
-config SENSORS_LTC4245
- tristate "Linear Technology LTC4245"
- depends on I2C
- default n
- help
- If you say yes here you get support for Linear Technology LTC4245
- Multiple Supply Hot Swap Controller I2C interface.
-
- This driver can also be built as a module. If so, the module will
- be called ltc4245.
-
-config SENSORS_LTC4261
- tristate "Linear Technology LTC4261"
- depends on I2C
- default n
- help
- If you say yes here you get support for Linear Technology LTC4261
- Negative Voltage Hot Swap Controller I2C interface.
-
- This driver can also be built as a module. If so, the module will
- be called ltc4261.
-
config SENSORS_LM95234
tristate "National Semiconductor LM95234"
depends on I2C
@@ -849,125 +1023,33 @@ config SENSORS_LM95245
This driver can also be built as a module. If so, the module
will be called lm95245.
-config SENSORS_MAX1111
- tristate "Maxim MAX1111 Serial 8-bit ADC chip and compatibles"
- depends on SPI_MASTER
- help
- Say y here to support Maxim's MAX1110, MAX1111, MAX1112, and MAX1113
- ADC chips.
-
- This driver can also be built as a module. If so, the module
- will be called max1111.
-
-config SENSORS_MAX16065
- tristate "Maxim MAX16065 System Manager and compatibles"
- depends on I2C
- help
- If you say yes here you get support for hardware monitoring
- capabilities of the following Maxim System Manager chips.
- MAX16065
- MAX16066
- MAX16067
- MAX16068
- MAX16070
- MAX16071
-
- This driver can also be built as a module. If so, the module
- will be called max16065.
-
-config SENSORS_MAX1619
- tristate "Maxim MAX1619 sensor chip"
- depends on I2C
- help
- If you say yes here you get support for MAX1619 sensor chip.
-
- This driver can also be built as a module. If so, the module
- will be called max1619.
-
-config SENSORS_MAX1668
- tristate "Maxim MAX1668 and compatibles"
- depends on I2C
- help
- If you say yes here you get support for MAX1668, MAX1989 and
- MAX1805 chips.
-
- This driver can also be built as a module. If so, the module
- will be called max1668.
-
-config SENSORS_MAX197
- tristate "Maxim MAX197 and compatibles"
- help
- Support for the Maxim MAX197 A/D converter.
- Support will include, but not be limited to, MAX197, and MAX199.
-
- This driver can also be built as a module. If so, the module
- will be called max197.
-
-config SENSORS_MAX6639
- tristate "Maxim MAX6639 sensor chip"
- depends on I2C
- help
- If you say yes here you get support for the MAX6639
- sensor chips.
-
- This driver can also be built as a module. If so, the module
- will be called max6639.
-
-config SENSORS_MAX6642
- tristate "Maxim MAX6642 sensor chip"
- depends on I2C
- help
- If you say yes here you get support for MAX6642 sensor chip.
- MAX6642 is a SMBus-Compatible Remote/Local Temperature Sensor
- with Overtemperature Alarm from Maxim.
-
- This driver can also be built as a module. If so, the module
- will be called max6642.
-
-config SENSORS_MAX6650
- tristate "Maxim MAX6650 sensor chip"
- depends on I2C
- help
- If you say yes here you get support for the MAX6650 / MAX6651
- sensor chips.
-
- This driver can also be built as a module. If so, the module
- will be called max6650.
-
-config SENSORS_MAX6697
- tristate "Maxim MAX6697 and compatibles"
- depends on I2C
- help
- If you say yes here you get support for MAX6581, MAX6602, MAX6622,
- MAX6636, MAX6689, MAX6693, MAX6694, MAX6697, MAX6698, and MAX6699
- temperature sensor chips.
-
- This driver can also be built as a module. If so, the module
- will be called max6697.
-
-config SENSORS_MCP3021
- tristate "Microchip MCP3021 and compatibles"
- depends on I2C
+config SENSORS_PC87360
+ tristate "National Semiconductor PC87360 family"
+ depends on !PPC
+ select HWMON_VID
help
- If you say yes here you get support for MCP3021 and MCP3221.
- The MCP3021 is a A/D converter (ADC) with 10-bit and the MCP3221
- with 12-bit resolution.
+ If you say yes here you get access to the hardware monitoring
+ functions of the National Semiconductor PC8736x Super-I/O chips.
+ The PC87360, PC87363 and PC87364 only have fan monitoring and
+ control. The PC87365 and PC87366 additionally have voltage and
+ temperature monitoring.
This driver can also be built as a module. If so, the module
- will be called mcp3021.
+ will be called pc87360.
-config SENSORS_NCT6775
- tristate "Nuvoton NCT6775F and compatibles"
+config SENSORS_PC87427
+ tristate "National Semiconductor PC87427"
depends on !PPC
- select HWMON_VID
help
- If you say yes here you get support for the hardware monitoring
- functionality of the Nuvoton NCT6775F, NCT6776F, NCT6779D
- and compatible Super-I/O chips. This driver replaces the
- w83627ehf driver for NCT6775F and NCT6776F.
+ If you say yes here you get access to the hardware monitoring
+ functions of the National Semiconductor PC87427 Super-I/O chip.
+ The chip has two distinct logical devices, one for fan speed
+ monitoring and control, and one for voltage and temperature
+ monitoring. Fan speed monitoring and control are supported, as
+ well as temperature monitoring. Voltages aren't supported yet.
This driver can also be built as a module. If so, the module
- will be called nct6775.
+ will be called pc87427.
config SENSORS_NTC_THERMISTOR
tristate "NTC thermistor support"
@@ -983,33 +1065,18 @@ config SENSORS_NTC_THERMISTOR
This driver can also be built as a module. If so, the module
will be called ntc-thermistor.
-config SENSORS_PC87360
- tristate "National Semiconductor PC87360 family"
+config SENSORS_NCT6775
+ tristate "Nuvoton NCT6775F and compatibles"
depends on !PPC
select HWMON_VID
help
- If you say yes here you get access to the hardware monitoring
- functions of the National Semiconductor PC8736x Super-I/O chips.
- The PC87360, PC87363 and PC87364 only have fan monitoring and
- control. The PC87365 and PC87366 additionally have voltage and
- temperature monitoring.
-
- This driver can also be built as a module. If so, the module
- will be called pc87360.
-
-config SENSORS_PC87427
- tristate "National Semiconductor PC87427"
- depends on !PPC
- help
- If you say yes here you get access to the hardware monitoring
- functions of the National Semiconductor PC87427 Super-I/O chip.
- The chip has two distinct logical devices, one for fan speed
- monitoring and control, and one for voltage and temperature
- monitoring. Fan speed monitoring and control are supported, as
- well as temperature monitoring. Voltages aren't supported yet.
+ If you say yes here you get support for the hardware monitoring
+ functionality of the Nuvoton NCT6775F, NCT6776F, NCT6779D
+ and compatible Super-I/O chips. This driver replaces the
+ w83627ehf driver for NCT6775F and NCT6776F.
This driver can also be built as a module. If so, the module
- will be called pc87427.
+ will be called nct6775.
config SENSORS_PCF8591
tristate "Philips PCF8591 ADC/DAC"
@@ -1074,21 +1141,6 @@ config SENSORS_SIS5595
This driver can also be built as a module. If so, the module
will be called sis5595.
-config SENSORS_SMM665
- tristate "Summit Microelectronics SMM665"
- depends on I2C
- default n
- help
- If you say yes here you get support for the hardware monitoring
- features of the Summit Microelectronics SMM665/SMM665B Six-Channel
- Active DC Output Controller / Monitor.
-
- Other supported chips are SMM465, SMM665C, SMM764, and SMM766.
- Support for those chips is untested.
-
- This driver can also be built as a module. If so, the module will
- be called smm665.
-
config SENSORS_DME1737
tristate "SMSC DME1737, SCH311x and compatibles"
depends on I2C && !PPC
@@ -1210,6 +1262,31 @@ config SENSORS_SCH5636
This driver can also be built as a module. If so, the module
will be called sch5636.
+config SENSORS_SMM665
+ tristate "Summit Microelectronics SMM665"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for the hardware monitoring
+ features of the Summit Microelectronics SMM665/SMM665B Six-Channel
+ Active DC Output Controller / Monitor.
+
+ Other supported chips are SMM465, SMM665C, SMM764, and SMM766.
+ Support for those chips is untested.
+
+ This driver can also be built as a module. If so, the module will
+ be called smm665.
+
+config SENSORS_ADC128D818
+ tristate "Texas Instruments ADC128D818"
+ depends on I2C
+ help
+ If you say yes here you get support for the Texas Instruments
+ ADC128D818 System Monitor with Temperature Sensor chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called adc128d818.
+
config SENSORS_ADS1015
tristate "Texas Instruments ADS1015"
depends on I2C
@@ -1525,37 +1602,6 @@ config SENSORS_ULTRA45
This driver provides support for the Ultra45 workstation environmental
sensors.
-config SENSORS_APPLESMC
- tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
- depends on INPUT && X86
- select NEW_LEDS
- select LEDS_CLASS
- select INPUT_POLLDEV
- default n
- help
- This driver provides support for the Apple System Management
- Controller, which provides an accelerometer (Apple Sudden Motion
- Sensor), light sensors, temperature sensors, keyboard backlight
- control and fan control.
-
- Only Intel-based Apple's computers are supported (MacBook Pro,
- MacBook, MacMini).
-
- Data from the different sensors, keyboard backlight control and fan
- control are accessible via sysfs.
-
- This driver also provides an absolute input class device, allowing
- the laptop to act as a pinball machine-esque joystick.
-
- Say Y here if you have an applicable laptop and want to experience
- the awesome power of applesmc.
-
-config SENSORS_MC13783_ADC
- tristate "Freescale MC13783/MC13892 ADC"
- depends on MFD_MC13XXX
- help
- Support for the A/D converter on MC13783 and MC13892 PMIC.
-
if ACPI
comment "ACPI drivers"
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index ec7cde06eb52..c48f9873ac73 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
obj-$(CONFIG_SENSORS_AD7314) += ad7314.o
obj-$(CONFIG_SENSORS_AD7414) += ad7414.o
obj-$(CONFIG_SENSORS_AD7418) += ad7418.o
+obj-$(CONFIG_SENSORS_ADC128D818) += adc128d818.o
obj-$(CONFIG_SENSORS_ADCXX) += adcxx.o
obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o
obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
@@ -95,9 +96,12 @@ obj-$(CONFIG_SENSORS_LM93) += lm93.o
obj-$(CONFIG_SENSORS_LM95234) += lm95234.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
+obj-$(CONFIG_SENSORS_LTC2945) += ltc2945.o
obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
+obj-$(CONFIG_SENSORS_LTC4222) += ltc4222.o
obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
+obj-$(CONFIG_SENSORS_LTC4260) += ltc4260.o
obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
obj-$(CONFIG_SENSORS_MAX16065) += max16065.o
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
new file mode 100644
index 000000000000..5ffd81f19d01
--- /dev/null
+++ b/drivers/hwmon/adc128d818.c
@@ -0,0 +1,491 @@
+/*
+ * Driver for TI ADC128D818 System Monitor with Temperature Sensor
+ *
+ * Copyright (c) 2014 Guenter Roeck
+ *
+ * Derived from lm80.c
+ * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
+ * and Philip Edelbrock <phil@netroedge.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+
+/* Addresses to scan
+ * The chip also supports addresses 0x35..0x37. Don't scan those addresses
+ * since they are also used by some EEPROMs, which may result in false
+ * positives.
+ */
+static const unsigned short normal_i2c[] = {
+ 0x1d, 0x1e, 0x1f, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END };
+
+/* registers */
+#define ADC128_REG_IN_MAX(nr) (0x2a + (nr) * 2)
+#define ADC128_REG_IN_MIN(nr) (0x2b + (nr) * 2)
+#define ADC128_REG_IN(nr) (0x20 + (nr))
+
+#define ADC128_REG_TEMP 0x27
+#define ADC128_REG_TEMP_MAX 0x38
+#define ADC128_REG_TEMP_HYST 0x39
+
+#define ADC128_REG_CONFIG 0x00
+#define ADC128_REG_ALARM 0x01
+#define ADC128_REG_MASK 0x03
+#define ADC128_REG_CONV_RATE 0x07
+#define ADC128_REG_ONESHOT 0x09
+#define ADC128_REG_SHUTDOWN 0x0a
+#define ADC128_REG_CONFIG_ADV 0x0b
+#define ADC128_REG_BUSY_STATUS 0x0c
+
+#define ADC128_REG_MAN_ID 0x3e
+#define ADC128_REG_DEV_ID 0x3f
+
+struct adc128_data {
+ struct i2c_client *client;
+ struct regulator *regulator;
+ int vref; /* Reference voltage in mV */
+ struct mutex update_lock;
+ bool valid; /* true if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+
+ u16 in[3][7]; /* Register value, normalized to 12 bit
+ * 0: input voltage
+ * 1: min limit
+ * 2: max limit
+ */
+ s16 temp[3]; /* Register value, normalized to 9 bit
+ * 0: sensor 1: limit 2: hyst
+ */
+ u8 alarms; /* alarm register value */
+};
+
+static struct adc128_data *adc128_update_device(struct device *dev)
+{
+ struct adc128_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ struct adc128_data *ret = data;
+ int i, rv;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ for (i = 0; i < 7; i++) {
+ rv = i2c_smbus_read_word_swapped(client,
+ ADC128_REG_IN(i));
+ if (rv < 0)
+ goto abort;
+ data->in[0][i] = rv >> 4;
+
+ rv = i2c_smbus_read_byte_data(client,
+ ADC128_REG_IN_MIN(i));
+ if (rv < 0)
+ goto abort;
+ data->in[1][i] = rv << 4;
+
+ rv = i2c_smbus_read_byte_data(client,
+ ADC128_REG_IN_MAX(i));
+ if (rv < 0)
+ goto abort;
+ data->in[2][i] = rv << 4;
+ }
+
+ rv = i2c_smbus_read_word_swapped(client, ADC128_REG_TEMP);
+ if (rv < 0)
+ goto abort;
+ data->temp[0] = rv >> 7;
+
+ rv = i2c_smbus_read_byte_data(client, ADC128_REG_TEMP_MAX);
+ if (rv < 0)
+ goto abort;
+ data->temp[1] = rv << 1;
+
+ rv = i2c_smbus_read_byte_data(client, ADC128_REG_TEMP_HYST);
+ if (rv < 0)
+ goto abort;
+ data->temp[2] = rv << 1;
+
+ rv = i2c_smbus_read_byte_data(client, ADC128_REG_ALARM);
+ if (rv < 0)
+ goto abort;
+ data->alarms |= rv;
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+ goto done;
+
+abort:
+ ret = ERR_PTR(rv);
+ data->valid = false;
+done:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static ssize_t adc128_show_in(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adc128_data *data = adc128_update_device(dev);
+ int index = to_sensor_dev_attr_2(attr)->index;
+ int nr = to_sensor_dev_attr_2(attr)->nr;
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = DIV_ROUND_CLOSEST(data->in[index][nr] * data->vref, 4095);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t adc128_set_in(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adc128_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr_2(attr)->index;
+ int nr = to_sensor_dev_attr_2(attr)->nr;
+ u8 reg, regval;
+ long val;
+ int err;
+
+ err = kstrtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ /* 10 mV LSB on limit registers */
+ regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255);
+ data->in[index][nr] = regval << 4;
+ reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr);
+ i2c_smbus_write_byte_data(data->client, reg, regval);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t adc128_show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adc128_data *data = adc128_update_device(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ int temp;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ temp = (data->temp[index] << 7) >> 7; /* sign extend */
+ return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */
+}
+
+static ssize_t adc128_set_temp(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adc128_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ long val;
+ int err;
+ s8 regval;
+
+ err = kstrtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+ data->temp[index] = regval << 1;
+ i2c_smbus_write_byte_data(data->client,
+ index == 1 ? ADC128_REG_TEMP_MAX
+ : ADC128_REG_TEMP_HYST,
+ regval);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t adc128_show_alarm(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adc128_data *data = adc128_update_device(dev);
+ int mask = 1 << to_sensor_dev_attr(attr)->index;
+ u8 alarms;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /*
+ * Clear an alarm after reporting it to user space. If it is still
+ * active, the next update sequence will set the alarm bit again.
+ */
+ alarms = data->alarms;
+ data->alarms &= ~mask;
+
+ return sprintf(buf, "%u\n", !!(alarms & mask));
+}
+
+static SENSOR_DEVICE_ATTR_2(in0_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 0, 0);
+static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 0, 1);
+static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 0, 2);
+
+static SENSOR_DEVICE_ATTR_2(in1_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 1, 0);
+static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 1, 1);
+static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 1, 2);
+
+static SENSOR_DEVICE_ATTR_2(in2_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 2, 0);
+static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 2, 1);
+static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 2, 2);
+
+static SENSOR_DEVICE_ATTR_2(in3_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 3, 0);
+static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 3, 1);
+static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 3, 2);
+
+static SENSOR_DEVICE_ATTR_2(in4_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 4, 0);
+static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 4, 1);
+static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 4, 2);
+
+static SENSOR_DEVICE_ATTR_2(in5_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 5, 0);
+static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 5, 1);
+static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 5, 2);
+
+static SENSOR_DEVICE_ATTR_2(in6_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 6, 0);
+static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 6, 1);
+static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 6, 2);
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, adc128_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
+ adc128_show_temp, adc128_set_temp, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
+ adc128_show_temp, adc128_set_temp, 2);
+
+static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, adc128_show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, adc128_show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, adc128_show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, adc128_show_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, adc128_show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, adc128_show_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, adc128_show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, adc128_show_alarm, NULL, 7);
+
+static struct attribute *adc128_attrs[] = {
+ &sensor_dev_attr_in0_min.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in2_min.dev_attr.attr,
+ &sensor_dev_attr_in3_min.dev_attr.attr,
+ &sensor_dev_attr_in4_min.dev_attr.attr,
+ &sensor_dev_attr_in5_min.dev_attr.attr,
+ &sensor_dev_attr_in6_min.dev_attr.attr,
+ &sensor_dev_attr_in0_max.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
+ &sensor_dev_attr_in2_max.dev_attr.attr,
+ &sensor_dev_attr_in3_max.dev_attr.attr,
+ &sensor_dev_attr_in4_max.dev_attr.attr,
+ &sensor_dev_attr_in5_max.dev_attr.attr,
+ &sensor_dev_attr_in6_max.dev_attr.attr,
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_in0_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_alarm.dev_attr.attr,
+ &sensor_dev_attr_in3_alarm.dev_attr.attr,
+ &sensor_dev_attr_in4_alarm.dev_attr.attr,
+ &sensor_dev_attr_in5_alarm.dev_attr.attr,
+ &sensor_dev_attr_in6_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(adc128);
+
+static int adc128_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ int man_id, dev_id;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ man_id = i2c_smbus_read_byte_data(client, ADC128_REG_MAN_ID);
+ dev_id = i2c_smbus_read_byte_data(client, ADC128_REG_DEV_ID);
+ if (man_id != 0x01 || dev_id != 0x09)
+ return -ENODEV;
+
+ /* Check unused bits for confirmation */
+ if (i2c_smbus_read_byte_data(client, ADC128_REG_CONFIG) & 0xf4)
+ return -ENODEV;
+ if (i2c_smbus_read_byte_data(client, ADC128_REG_CONV_RATE) & 0xfe)
+ return -ENODEV;
+ if (i2c_smbus_read_byte_data(client, ADC128_REG_ONESHOT) & 0xfe)
+ return -ENODEV;
+ if (i2c_smbus_read_byte_data(client, ADC128_REG_SHUTDOWN) & 0xfe)
+ return -ENODEV;
+ if (i2c_smbus_read_byte_data(client, ADC128_REG_CONFIG_ADV) & 0xf8)
+ return -ENODEV;
+ if (i2c_smbus_read_byte_data(client, ADC128_REG_BUSY_STATUS) & 0xfc)
+ return -ENODEV;
+
+ strlcpy(info->type, "adc128d818", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int adc128_init_client(struct adc128_data *data)
+{
+ struct i2c_client *client = data->client;
+ int err;
+
+ /*
+ * Reset chip to defaults.
+ * This makes most other initializations unnecessary.
+ */
+ err = i2c_smbus_write_byte_data(client, ADC128_REG_CONFIG, 0x80);
+ if (err)
+ return err;
+
+ /* Start monitoring */
+ err = i2c_smbus_write_byte_data(client, ADC128_REG_CONFIG, 0x01);
+ if (err)
+ return err;
+
+ /* If external vref is selected, configure the chip to use it */
+ if (data->regulator) {
+ err = i2c_smbus_write_byte_data(client,
+ ADC128_REG_CONFIG_ADV, 0x01);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int adc128_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct regulator *regulator;
+ struct device *hwmon_dev;
+ struct adc128_data *data;
+ int err, vref;
+
+ data = devm_kzalloc(dev, sizeof(struct adc128_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* vref is optional. If specified, is used as chip reference voltage */
+ regulator = devm_regulator_get_optional(dev, "vref");
+ if (!IS_ERR(regulator)) {
+ data->regulator = regulator;
+ err = regulator_enable(regulator);
+ if (err < 0)
+ return err;
+ vref = regulator_get_voltage(regulator);
+ if (vref < 0) {
+ err = vref;
+ goto error;
+ }
+ data->vref = DIV_ROUND_CLOSEST(vref, 1000);
+ } else {
+ data->vref = 2560; /* 2.56V, in mV */
+ }
+
+ data->client = client;
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Initialize the chip */
+ err = adc128_init_client(data);
+ if (err < 0)
+ goto error;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, adc128_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (data->regulator)
+ regulator_disable(data->regulator);
+ return err;
+}
+
+static int adc128_remove(struct i2c_client *client)
+{
+ struct adc128_data *data = i2c_get_clientdata(client);
+
+ if (data->regulator)
+ regulator_disable(data->regulator);
+
+ return 0;
+}
+
+static const struct i2c_device_id adc128_id[] = {
+ { "adc128d818", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adc128_id);
+
+static struct i2c_driver adc128_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "adc128d818",
+ },
+ .probe = adc128_probe,
+ .remove = adc128_remove,
+ .id_table = adc128_id,
+ .detect = adc128_detect,
+ .address_list = normal_i2c,
+};
+
+module_i2c_driver(adc128_driver);
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("Driver for ADC128D818");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 29dd9f746dfa..3eb4281689b5 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -79,9 +79,11 @@ enum chips {
/* Each client has this additional data */
struct adm1021_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
enum chips type;
+ const struct attribute_group *groups[3];
+
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
char low_power; /* !=0 if device in low power mode */
@@ -101,7 +103,6 @@ static int adm1021_probe(struct i2c_client *client,
static int adm1021_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void adm1021_init_client(struct i2c_client *client);
-static int adm1021_remove(struct i2c_client *client);
static struct adm1021_data *adm1021_update_device(struct device *dev);
/* (amalysh) read only mode, otherwise any limit's writing confuse BIOS */
@@ -128,7 +129,6 @@ static struct i2c_driver adm1021_driver = {
.name = "adm1021",
},
.probe = adm1021_probe,
- .remove = adm1021_remove,
.id_table = adm1021_id,
.detect = adm1021_detect,
.address_list = normal_i2c,
@@ -182,8 +182,8 @@ static ssize_t set_temp_max(struct device *dev,
const char *buf, size_t count)
{
int index = to_sensor_dev_attr(devattr)->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct adm1021_data *data = i2c_get_clientdata(client);
+ struct adm1021_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long temp;
int err;
@@ -207,8 +207,8 @@ static ssize_t set_temp_min(struct device *dev,
const char *buf, size_t count)
{
int index = to_sensor_dev_attr(devattr)->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct adm1021_data *data = i2c_get_clientdata(client);
+ struct adm1021_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long temp;
int err;
@@ -238,8 +238,8 @@ static ssize_t set_low_power(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct adm1021_data *data = i2c_get_clientdata(client);
+ struct adm1021_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
char low_power;
unsigned long val;
int err;
@@ -412,15 +412,15 @@ static int adm1021_detect(struct i2c_client *client,
static int adm1021_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct adm1021_data *data;
- int err;
+ struct device *hwmon_dev;
- data = devm_kzalloc(&client->dev, sizeof(struct adm1021_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct adm1021_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
data->type = id->driver_data;
mutex_init(&data->update_lock);
@@ -428,29 +428,14 @@ static int adm1021_probe(struct i2c_client *client,
if (data->type != lm84 && !read_only)
adm1021_init_client(client);
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &adm1021_group);
- if (err)
- return err;
-
- if (data->type != lm84) {
- err = sysfs_create_group(&client->dev.kobj, &adm1021_min_group);
- if (err)
- goto error;
- }
+ data->groups[0] = &adm1021_group;
+ if (data->type != lm84)
+ data->groups[1] = &adm1021_min_group;
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto error;
- }
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
- return 0;
-
-error:
- sysfs_remove_group(&client->dev.kobj, &adm1021_min_group);
- sysfs_remove_group(&client->dev.kobj, &adm1021_group);
- return err;
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static void adm1021_init_client(struct i2c_client *client)
@@ -462,21 +447,10 @@ static void adm1021_init_client(struct i2c_client *client)
i2c_smbus_write_byte_data(client, ADM1021_REG_CONV_RATE_W, 0x04);
}
-static int adm1021_remove(struct i2c_client *client)
-{
- struct adm1021_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &adm1021_min_group);
- sysfs_remove_group(&client->dev.kobj, &adm1021_group);
-
- return 0;
-}
-
static struct adm1021_data *adm1021_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct adm1021_data *data = i2c_get_clientdata(client);
+ struct adm1021_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
mutex_lock(&data->update_lock);
@@ -484,7 +458,7 @@ static struct adm1021_data *adm1021_update_device(struct device *dev)
|| !data->valid) {
int i;
- dev_dbg(&client->dev, "Starting adm1021 update\n");
+ dev_dbg(dev, "Starting adm1021 update\n");
for (i = 0; i < 2; i++) {
data->temp[i] = 1000 *
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 8d9f2a0e8efe..71463689d163 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -1115,7 +1115,6 @@ asc7621_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -ENOMEM;
i2c_set_clientdata(client, data);
- data->valid = 0;
mutex_init(&data->update_lock);
/* Initialize the asc7621 chip */
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index ddff02e3e66f..6edce42c61d5 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -353,8 +353,6 @@ static int atxp1_probe(struct i2c_client *new_client,
data->vrm = vid_which_vrm();
i2c_set_clientdata(new_client, data);
- data->valid = 0;
-
mutex_init(&data->update_lock);
/* Register sysfs hooks */
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index bbb0b0d463f7..6d02e3b06375 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -94,6 +94,8 @@ struct temp_data {
bool valid;
struct sensor_device_attribute sd_attrs[TOTAL_ATTRS];
char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
+ struct attribute *attrs[TOTAL_ATTRS + 1];
+ struct attribute_group attr_group;
struct mutex update_lock;
};
@@ -114,12 +116,6 @@ struct pdev_entry {
static LIST_HEAD(pdev_list);
static DEFINE_MUTEX(pdev_list_mutex);
-static ssize_t show_name(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- return sprintf(buf, "%s\n", DRVNAME);
-}
-
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -393,20 +389,10 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
return adjust_tjmax(c, id, dev);
}
-static int create_name_attr(struct platform_data *pdata,
- struct device *dev)
-{
- sysfs_attr_init(&pdata->name_attr.attr);
- pdata->name_attr.attr.name = "name";
- pdata->name_attr.attr.mode = S_IRUGO;
- pdata->name_attr.show = show_name;
- return device_create_file(dev, &pdata->name_attr);
-}
-
static int create_core_attrs(struct temp_data *tdata, struct device *dev,
int attr_no)
{
- int err, i;
+ int i;
static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
struct device_attribute *devattr, char *buf) = {
show_label, show_crit_alarm, show_temp, show_tjmax,
@@ -424,16 +410,10 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
tdata->sd_attrs[i].index = attr_no;
- err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
- if (err)
- goto exit_free;
+ tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
}
- return 0;
-
-exit_free:
- while (--i >= 0)
- device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
- return err;
+ tdata->attr_group.attrs = tdata->attrs;
+ return sysfs_create_group(&dev->kobj, &tdata->attr_group);
}
@@ -548,7 +528,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
pdata->core_data[attr_no] = tdata;
/* Create sysfs interfaces */
- err = create_core_attrs(tdata, &pdev->dev, attr_no);
+ err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no);
if (err)
goto exit_free;
@@ -573,14 +553,12 @@ static void coretemp_add_core(unsigned int cpu, int pkg_flag)
}
static void coretemp_remove_core(struct platform_data *pdata,
- struct device *dev, int indx)
+ int indx)
{
- int i;
struct temp_data *tdata = pdata->core_data[indx];
/* Remove the sysfs attributes */
- for (i = 0; i < tdata->attr_size; i++)
- device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
+ sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group);
kfree(pdata->core_data[indx]);
pdata->core_data[indx] = NULL;
@@ -588,34 +566,20 @@ static void coretemp_remove_core(struct platform_data *pdata,
static int coretemp_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct platform_data *pdata;
- int err;
/* Initialize the per-package data structures */
- pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- err = create_name_attr(pdata, &pdev->dev);
- if (err)
- goto exit_free;
-
pdata->phys_proc_id = pdev->id;
platform_set_drvdata(pdev, pdata);
- pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
- if (IS_ERR(pdata->hwmon_dev)) {
- err = PTR_ERR(pdata->hwmon_dev);
- dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
- goto exit_name;
- }
- return 0;
-
-exit_name:
- device_remove_file(&pdev->dev, &pdata->name_attr);
-exit_free:
- kfree(pdata);
- return err;
+ pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
+ pdata, NULL);
+ return PTR_ERR_OR_ZERO(pdata->hwmon_dev);
}
static int coretemp_remove(struct platform_device *pdev)
@@ -625,11 +589,8 @@ static int coretemp_remove(struct platform_device *pdev)
for (i = MAX_CORE_DATA - 1; i >= 0; --i)
if (pdata->core_data[i])
- coretemp_remove_core(pdata, &pdev->dev, i);
+ coretemp_remove_core(pdata, i);
- device_remove_file(&pdev->dev, &pdata->name_attr);
- hwmon_device_unregister(pdata->hwmon_dev);
- kfree(pdata);
return 0;
}
@@ -777,7 +738,7 @@ static void put_core_offline(unsigned int cpu)
return;
if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
- coretemp_remove_core(pdata, &pdev->dev, indx);
+ coretemp_remove_core(pdata, indx);
/*
* If a HT sibling of a core is taken offline, but another HT sibling
@@ -849,20 +810,20 @@ static int __init coretemp_init(void)
if (err)
goto exit;
- get_online_cpus();
+ cpu_notifier_register_begin();
for_each_online_cpu(i)
get_core_online(i);
#ifndef CONFIG_HOTPLUG_CPU
if (list_empty(&pdev_list)) {
- put_online_cpus();
+ cpu_notifier_register_done();
err = -ENODEV;
goto exit_driver_unreg;
}
#endif
- register_hotcpu_notifier(&coretemp_cpu_notifier);
- put_online_cpus();
+ __register_hotcpu_notifier(&coretemp_cpu_notifier);
+ cpu_notifier_register_done();
return 0;
#ifndef CONFIG_HOTPLUG_CPU
@@ -877,8 +838,8 @@ static void __exit coretemp_exit(void)
{
struct pdev_entry *p, *n;
- get_online_cpus();
- unregister_hotcpu_notifier(&coretemp_cpu_notifier);
+ cpu_notifier_register_begin();
+ __unregister_hotcpu_notifier(&coretemp_cpu_notifier);
mutex_lock(&pdev_list_mutex);
list_for_each_entry_safe(p, n, &pdev_list, list) {
platform_device_unregister(p->pdev);
@@ -886,7 +847,7 @@ static void __exit coretemp_exit(void)
kfree(p);
}
mutex_unlock(&pdev_list_mutex);
- put_online_cpus();
+ cpu_notifier_register_done();
platform_driver_unregister(&coretemp_driver);
}
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 2c137b26acb4..fd892dd48e4c 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -349,7 +349,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
dev_dbg(&client->dev, "reg 0x%02x, err %d\n",
REG_FAN_CONF1, status);
mutex_unlock(&data->update_lock);
- return -EIO;
+ return status;
}
status &= 0x9F;
status |= (new_range_bits << 5);
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 1a8aa1265262..32f5132c4652 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -1648,7 +1648,7 @@ static void __exit f71805f_exit(void)
platform_driver_unregister(&f71805f_driver);
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("F71805F/F71872F hardware monitoring driver");
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index e176a43af63d..a26c385a435b 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -22,6 +22,7 @@
#include <linux/gfp.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/string.h>
#define HWMON_ID_PREFIX "hwmon"
#define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
@@ -99,6 +100,10 @@ hwmon_device_register_with_groups(struct device *dev, const char *name,
struct hwmon_device *hwdev;
int err, id;
+ /* Do not accept invalid characters in hwmon name attribute */
+ if (name && (!strlen(name) || strpbrk(name, "-* \t\n")))
+ return ERR_PTR(-EINVAL);
+
id = ida_simple_get(&hwmon_ida, 0, 0, GFP_KERNEL);
if (id < 0)
return ERR_PTR(id);
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 708081b68c6f..9fbb1b1fdff3 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -31,6 +31,7 @@ struct iio_hwmon_state {
int num_channels;
struct device *hwmon_dev;
struct attribute_group attr_group;
+ const struct attribute_group *groups[2];
struct attribute **attrs;
};
@@ -56,19 +57,6 @@ static ssize_t iio_hwmon_read_val(struct device *dev,
return sprintf(buf, "%d\n", result);
}
-static ssize_t show_name(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- const char *name = "iio_hwmon";
-
- if (dev->of_node && dev->of_node->name)
- name = dev->of_node->name;
-
- return sprintf(buf, "%s\n", name);
-}
-
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-
static int iio_hwmon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -78,6 +66,10 @@ static int iio_hwmon_probe(struct platform_device *pdev)
int in_i = 1, temp_i = 1, curr_i = 1;
enum iio_chan_type type;
struct iio_channel *channels;
+ const char *name = "iio_hwmon";
+
+ if (dev->of_node && dev->of_node->name)
+ name = dev->of_node->name;
channels = iio_channel_get_all(dev);
if (IS_ERR(channels))
@@ -96,7 +88,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->num_channels++;
st->attrs = devm_kzalloc(dev,
- sizeof(*st->attrs) * (st->num_channels + 2),
+ sizeof(*st->attrs) * (st->num_channels + 1),
GFP_KERNEL);
if (st->attrs == NULL) {
ret = -ENOMEM;
@@ -144,22 +136,18 @@ static int iio_hwmon_probe(struct platform_device *pdev)
a->index = i;
st->attrs[i] = &a->dev_attr.attr;
}
- st->attrs[st->num_channels] = &dev_attr_name.attr;
- st->attr_group.attrs = st->attrs;
- platform_set_drvdata(pdev, st);
- ret = sysfs_create_group(&dev->kobj, &st->attr_group);
- if (ret < 0)
- goto error_release_channels;
- st->hwmon_dev = hwmon_device_register(dev);
+ st->attr_group.attrs = st->attrs;
+ st->groups[0] = &st->attr_group;
+ st->hwmon_dev = hwmon_device_register_with_groups(dev, name, st,
+ st->groups);
if (IS_ERR(st->hwmon_dev)) {
ret = PTR_ERR(st->hwmon_dev);
- goto error_remove_group;
+ goto error_release_channels;
}
+ platform_set_drvdata(pdev, st);
return 0;
-error_remove_group:
- sysfs_remove_group(&dev->kobj, &st->attr_group);
error_release_channels:
iio_channel_release_all(channels);
return ret;
@@ -170,7 +158,6 @@ static int iio_hwmon_remove(struct platform_device *pdev)
struct iio_hwmon_state *st = platform_get_drvdata(pdev);
hwmon_device_unregister(st->hwmon_dev);
- sysfs_remove_group(&pdev->dev.kobj, &st->attr_group);
iio_channel_release_all(st->channels);
return 0;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 70749fc15a4f..a327fd3402a7 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -11,6 +11,7 @@
* similar parts. The other devices are supported by different drivers.
*
* Supports: IT8603E Super I/O chip w/LPC interface
+ * IT8623E Super I/O chip w/LPC interface
* IT8705F Super I/O chip w/LPC interface
* IT8712F Super I/O chip w/LPC interface
* IT8716F Super I/O chip w/LPC interface
@@ -147,7 +148,8 @@ static inline void superio_exit(void)
#define IT8772E_DEVID 0x8772
#define IT8782F_DEVID 0x8782
#define IT8783E_DEVID 0x8783
-#define IT8306E_DEVID 0x8603
+#define IT8603E_DEVID 0x8603
+#define IT8623E_DEVID 0x8623
#define IT87_ACT_REG 0x30
#define IT87_BASE_REG 0x60
@@ -1431,7 +1433,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 2);
-/* special AVCC3 IT8306E in9 */
+/* special AVCC3 IT8603E in9 */
static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 0);
static ssize_t show_name(struct device *dev, struct device_attribute
@@ -1766,7 +1768,8 @@ static int __init it87_find(unsigned short *address,
case IT8783E_DEVID:
sio_data->type = it8783;
break;
- case IT8306E_DEVID:
+ case IT8603E_DEVID:
+ case IT8623E_DEVID:
sio_data->type = it8603;
break;
case 0xffff: /* No device at all */
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index a183e488db78..7488e36809c8 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -28,7 +28,6 @@
#include <linux/hwmon.h>
struct jz4740_hwmon {
- struct resource *mem;
void __iomem *base;
int irq;
@@ -106,6 +105,7 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
{
int ret;
struct jz4740_hwmon *hwmon;
+ struct resource *mem;
hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
@@ -120,25 +120,10 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
return hwmon->irq;
}
- hwmon->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!hwmon->mem) {
- dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
- return -ENOENT;
- }
-
- hwmon->mem = devm_request_mem_region(&pdev->dev, hwmon->mem->start,
- resource_size(hwmon->mem), pdev->name);
- if (!hwmon->mem) {
- dev_err(&pdev->dev, "Failed to request mmio memory region\n");
- return -EBUSY;
- }
-
- hwmon->base = devm_ioremap_nocache(&pdev->dev, hwmon->mem->start,
- resource_size(hwmon->mem));
- if (!hwmon->base) {
- dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
- return -EBUSY;
- }
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hwmon->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(hwmon->base))
+ return PTR_ERR(hwmon->base);
init_completion(&hwmon->read_completion);
mutex_init(&hwmon->lock);
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index baf375b5ab0d..f7b46f68ef43 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -212,6 +212,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index b4ad598feb6c..848b9611151f 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -155,8 +155,9 @@ enum chips { lm63, lm64, lm96163 };
*/
struct lm63_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
+ const struct attribute_group *groups[5];
char valid; /* zero until following fields are valid */
char lut_valid; /* zero until lut fields are valid */
unsigned long last_updated; /* in jiffies */
@@ -218,9 +219,9 @@ static inline int lut_temp_to_reg(struct lm63_data *data, long val)
* Update the lookup table register cache.
* client->update_lock must be held when calling this function.
*/
-static void lm63_update_lut(struct i2c_client *client)
+static void lm63_update_lut(struct lm63_data *data)
{
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct i2c_client *client = data->client;
int i;
if (time_after(jiffies, data->lut_last_updated + 5 * HZ) ||
@@ -241,8 +242,8 @@ static void lm63_update_lut(struct i2c_client *client)
static struct lm63_data *lm63_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct lm63_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long next_update;
mutex_lock(&data->update_lock);
@@ -310,7 +311,7 @@ static struct lm63_data *lm63_update_device(struct device *dev)
data->valid = 1;
}
- lm63_update_lut(client);
+ lm63_update_lut(data);
mutex_unlock(&data->update_lock);
@@ -321,18 +322,17 @@ static struct lm63_data *lm63_update_device(struct device *dev)
* Trip points in the lookup table should be in ascending order for both
* temperatures and PWM output values.
*/
-static int lm63_lut_looks_bad(struct i2c_client *client)
+static int lm63_lut_looks_bad(struct device *dev, struct lm63_data *data)
{
- struct lm63_data *data = i2c_get_clientdata(client);
int i;
mutex_lock(&data->update_lock);
- lm63_update_lut(client);
+ lm63_update_lut(data);
for (i = 1; i < data->lut_size; i++) {
if (data->pwm1[1 + i - 1] > data->pwm1[1 + i]
|| data->temp8[3 + i - 1] > data->temp8[3 + i]) {
- dev_warn(&client->dev,
+ dev_warn(dev,
"Lookup table doesn't look sane (check entries %d and %d)\n",
i, i + 1);
break;
@@ -358,8 +358,8 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
static ssize_t set_fan(struct device *dev, struct device_attribute *dummy,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct lm63_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int err;
@@ -399,8 +399,8 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct lm63_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int nr = attr->index;
unsigned long val;
int err;
@@ -435,8 +435,8 @@ static ssize_t set_pwm1_enable(struct device *dev,
struct device_attribute *dummy,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct lm63_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int err;
@@ -450,7 +450,7 @@ static ssize_t set_pwm1_enable(struct device *dev,
* Only let the user switch to automatic mode if the lookup table
* looks sane.
*/
- if (val == 2 && lm63_lut_looks_bad(client))
+ if (val == 2 && lm63_lut_looks_bad(dev, data))
return -EPERM;
mutex_lock(&data->update_lock);
@@ -461,7 +461,7 @@ static ssize_t set_pwm1_enable(struct device *dev,
else
data->config_fan &= ~0x20;
i2c_smbus_write_byte_data(client, LM63_REG_CONFIG_FAN,
- data->config_fan);
+ data->config_fan);
mutex_unlock(&data->update_lock);
return count;
}
@@ -505,8 +505,8 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct lm63_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int nr = attr->index;
long val;
int err;
@@ -579,8 +579,8 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
};
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct lm63_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long val;
int err;
int nr = attr->index;
@@ -635,8 +635,8 @@ static ssize_t set_temp2_crit_hyst(struct device *dev,
struct device_attribute *dummy,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct lm63_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long val;
int err;
long hyst;
@@ -657,11 +657,11 @@ static ssize_t set_temp2_crit_hyst(struct device *dev,
* Set conversion rate.
* client->update_lock must be held when calling this function.
*/
-static void lm63_set_convrate(struct i2c_client *client, struct lm63_data *data,
- unsigned int interval)
+static void lm63_set_convrate(struct lm63_data *data, unsigned int interval)
{
- int i;
+ struct i2c_client *client = data->client;
unsigned int update_interval;
+ int i;
/* Shift calculations to avoid rounding errors */
interval <<= 6;
@@ -689,8 +689,7 @@ static ssize_t set_update_interval(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct lm63_data *data = dev_get_drvdata(dev);
unsigned long val;
int err;
@@ -699,7 +698,7 @@ static ssize_t set_update_interval(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- lm63_set_convrate(client, data, clamp_val(val, 0, 100000));
+ lm63_set_convrate(data, clamp_val(val, 0, 100000));
mutex_unlock(&data->update_lock);
return count;
@@ -708,8 +707,7 @@ static ssize_t set_update_interval(struct device *dev,
static ssize_t show_type(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct lm63_data *data = dev_get_drvdata(dev);
return sprintf(buf, data->trutherm ? "1\n" : "2\n");
}
@@ -717,8 +715,8 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr,
static ssize_t set_type(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct lm63_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int ret;
u8 reg;
@@ -915,6 +913,15 @@ static struct attribute *lm63_attributes[] = {
NULL
};
+static struct attribute *lm63_attributes_temp2_type[] = {
+ &dev_attr_temp2_type.attr,
+ NULL
+};
+
+static const struct attribute_group lm63_group_temp2_type = {
+ .attrs = lm63_attributes_temp2_type,
+};
+
static struct attribute *lm63_attributes_extra_lut[] = {
&sensor_dev_attr_pwm1_auto_point9_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point9_temp.dev_attr.attr,
@@ -946,8 +953,7 @@ static umode_t lm63_attribute_mode(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct i2c_client *client = to_i2c_client(dev);
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct lm63_data *data = dev_get_drvdata(dev);
if (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr
&& (data->kind == lm64 ||
@@ -1026,9 +1032,10 @@ static int lm63_detect(struct i2c_client *client,
* Ideally we shouldn't have to initialize anything, since the BIOS
* should have taken care of everything
*/
-static void lm63_init_client(struct i2c_client *client)
+static void lm63_init_client(struct lm63_data *data)
{
- struct lm63_data *data = i2c_get_clientdata(client);
+ struct i2c_client *client = data->client;
+ struct device *dev = &client->dev;
u8 convrate;
data->config = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG1);
@@ -1037,7 +1044,7 @@ static void lm63_init_client(struct i2c_client *client)
/* Start converting if needed */
if (data->config & 0x40) { /* standby */
- dev_dbg(&client->dev, "Switching to operational mode\n");
+ dev_dbg(dev, "Switching to operational mode\n");
data->config &= 0xA7;
i2c_smbus_write_byte_data(client, LM63_REG_CONFIG1,
data->config);
@@ -1090,13 +1097,13 @@ static void lm63_init_client(struct i2c_client *client)
/* Show some debug info about the LM63 configuration */
if (data->kind == lm63)
- dev_dbg(&client->dev, "Alert/tach pin configured for %s\n",
+ dev_dbg(dev, "Alert/tach pin configured for %s\n",
(data->config & 0x04) ? "tachometer input" :
"alert output");
- dev_dbg(&client->dev, "PWM clock %s kHz, output frequency %u Hz\n",
+ dev_dbg(dev, "PWM clock %s kHz, output frequency %u Hz\n",
(data->config_fan & 0x08) ? "1.4" : "360",
((data->config_fan & 0x08) ? 700 : 180000) / data->pwm1_freq);
- dev_dbg(&client->dev, "PWM output active %s, %s mode\n",
+ dev_dbg(dev, "PWM output active %s, %s mode\n",
(data->config_fan & 0x10) ? "low" : "high",
(data->config_fan & 0x20) ? "manual" : "auto");
}
@@ -1104,15 +1111,16 @@ static void lm63_init_client(struct i2c_client *client)
static int lm63_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
struct lm63_data *data;
- int err;
+ int groups = 0;
- data = devm_kzalloc(&client->dev, sizeof(struct lm63_data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct lm63_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
- data->valid = 0;
+ data->client = client;
mutex_init(&data->update_lock);
/* Set the device type */
@@ -1121,59 +1129,21 @@ static int lm63_probe(struct i2c_client *client,
data->temp2_offset = 16000;
/* Initialize chip */
- lm63_init_client(client);
+ lm63_init_client(data);
/* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &lm63_group);
- if (err)
- return err;
- if (data->config & 0x04) { /* tachometer enabled */
- err = sysfs_create_group(&client->dev.kobj, &lm63_group_fan1);
- if (err)
- goto exit_remove_files;
- }
- if (data->kind == lm96163) {
- err = device_create_file(&client->dev, &dev_attr_temp2_type);
- if (err)
- goto exit_remove_files;
-
- err = sysfs_create_group(&client->dev.kobj,
- &lm63_group_extra_lut);
- if (err)
- goto exit_remove_files;
- }
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
+ data->groups[groups++] = &lm63_group;
+ if (data->config & 0x04) /* tachometer enabled */
+ data->groups[groups++] = &lm63_group_fan1;
- return 0;
-
-exit_remove_files:
- sysfs_remove_group(&client->dev.kobj, &lm63_group);
- sysfs_remove_group(&client->dev.kobj, &lm63_group_fan1);
if (data->kind == lm96163) {
- device_remove_file(&client->dev, &dev_attr_temp2_type);
- sysfs_remove_group(&client->dev.kobj, &lm63_group_extra_lut);
+ data->groups[groups++] = &lm63_group_temp2_type;
+ data->groups[groups++] = &lm63_group_extra_lut;
}
- return err;
-}
-
-static int lm63_remove(struct i2c_client *client)
-{
- struct lm63_data *data = i2c_get_clientdata(client);
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm63_group);
- sysfs_remove_group(&client->dev.kobj, &lm63_group_fan1);
- if (data->kind == lm96163) {
- device_remove_file(&client->dev, &dev_attr_temp2_type);
- sysfs_remove_group(&client->dev.kobj, &lm63_group_extra_lut);
- }
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
/*
@@ -1194,7 +1164,6 @@ static struct i2c_driver lm63_driver = {
.name = "lm63",
},
.probe = lm63_probe,
- .remove = lm63_remove,
.id_table = lm63_id,
.detect = lm63_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index f17beb5e6dd6..502771c06fd9 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -348,7 +348,6 @@ static int lm77_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -ENOMEM;
i2c_set_clientdata(client, data);
- data->valid = 0;
mutex_init(&data->update_lock);
/* Initialize the LM77 chip */
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index eba89aac3ece..bd0a1ebbf867 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -112,7 +112,7 @@ static inline long TEMP_FROM_REG(u16 temp)
*/
struct lm80_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
char error; /* !=0 if error occurred during last update */
char valid; /* !=0 if following fields are valid */
@@ -140,7 +140,6 @@ static int lm80_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm80_init_client(struct i2c_client *client);
-static int lm80_remove(struct i2c_client *client);
static struct lm80_data *lm80_update_device(struct device *dev);
static int lm80_read_value(struct i2c_client *client, u8 reg);
static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value);
@@ -162,7 +161,6 @@ static struct i2c_driver lm80_driver = {
.name = "lm80",
},
.probe = lm80_probe,
- .remove = lm80_remove,
.id_table = lm80_id,
.detect = lm80_detect,
.address_list = normal_i2c,
@@ -191,8 +189,8 @@ static ssize_t set_in_##suffix(struct device *dev, \
struct device_attribute *attr, const char *buf, size_t count) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
- struct i2c_client *client = to_i2c_client(dev); \
- struct lm80_data *data = i2c_get_clientdata(client); \
+ struct lm80_data *data = dev_get_drvdata(dev); \
+ struct i2c_client *client = data->client; \
long val; \
int err = kstrtol(buf, 10, &val); \
if (err < 0) \
@@ -235,8 +233,8 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct lm80_data *data = i2c_get_clientdata(client);
+ struct lm80_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int err = kstrtoul(buf, 10, &val);
if (err < 0)
@@ -259,8 +257,8 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct lm80_data *data = i2c_get_clientdata(client);
+ struct lm80_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long min, val;
u8 reg;
int err = kstrtoul(buf, 10, &val);
@@ -286,7 +284,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
data->fan_div[nr] = 3;
break;
default:
- dev_err(&client->dev,
+ dev_err(dev,
"fan_div value %ld not supported. Choose one of 1, 2, 4 or 8!\n",
val);
mutex_unlock(&data->update_lock);
@@ -332,8 +330,8 @@ show_temp(os_hyst, temp_os_hyst);
static ssize_t set_temp_##suffix(struct device *dev, \
struct device_attribute *attr, const char *buf, size_t count) \
{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct lm80_data *data = i2c_get_clientdata(client); \
+ struct lm80_data *data = dev_get_drvdata(dev); \
+ struct i2c_client *client = data->client; \
long val; \
int err = kstrtol(buf, 10, &val); \
if (err < 0) \
@@ -440,7 +438,7 @@ static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 13);
* Real code
*/
-static struct attribute *lm80_attributes[] = {
+static struct attribute *lm80_attrs[] = {
&sensor_dev_attr_in0_min.dev_attr.attr,
&sensor_dev_attr_in1_min.dev_attr.attr,
&sensor_dev_attr_in2_min.dev_attr.attr,
@@ -487,10 +485,7 @@ static struct attribute *lm80_attributes[] = {
&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
NULL
};
-
-static const struct attribute_group lm80_group = {
- .attrs = lm80_attributes,
-};
+ATTRIBUTE_GROUPS(lm80);
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info)
@@ -541,14 +536,15 @@ static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info)
static int lm80_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
struct lm80_data *data;
- int err;
- data = devm_kzalloc(&client->dev, sizeof(struct lm80_data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the LM80 chip */
@@ -558,32 +554,10 @@ static int lm80_probe(struct i2c_client *client,
data->fan_min[0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
data->fan_min[1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &lm80_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto error_remove;
- }
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, lm80_groups);
- return 0;
-
-error_remove:
- sysfs_remove_group(&client->dev.kobj, &lm80_group);
- return err;
-}
-
-static int lm80_remove(struct i2c_client *client)
-{
- struct lm80_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm80_group);
-
- return 0;
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static int lm80_read_value(struct i2c_client *client, u8 reg)
@@ -614,8 +588,8 @@ static void lm80_init_client(struct i2c_client *client)
static struct lm80_data *lm80_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm80_data *data = i2c_get_clientdata(client);
+ struct lm80_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int i;
int rv;
int prev_rv;
@@ -627,7 +601,7 @@ static struct lm80_data *lm80_update_device(struct device *dev)
lm80_init_client(client);
if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
- dev_dbg(&client->dev, "Starting lm80 update\n");
+ dev_dbg(dev, "Starting lm80 update\n");
for (i = 0; i <= 6; i++) {
rv = lm80_read_value(client, LM80_REG_IN(i));
if (rv < 0)
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index abd270243ba7..be02155788c3 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -349,7 +349,6 @@ static int lm83_probe(struct i2c_client *new_client,
return -ENOMEM;
i2c_set_clientdata(new_client, data);
- data->valid = 0;
mutex_init(&data->update_lock);
/*
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 4c5f20231c1a..ba1d83d48056 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -903,7 +903,6 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -ENOMEM;
i2c_set_clientdata(client, data);
- data->valid = 0;
mutex_init(&data->update_lock);
/* Initialize the LM87 chip */
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 701e952ae523..c9ff08dbe10c 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -365,7 +365,9 @@ enum lm90_temp11_reg_index {
*/
struct lm90_data {
+ struct i2c_client *client;
struct device *hwmon_dev;
+ const struct attribute_group *groups[6];
struct mutex update_lock;
struct regulator *regulator;
char valid; /* zero until following fields are valid */
@@ -513,8 +515,8 @@ static void lm90_set_convrate(struct i2c_client *client, struct lm90_data *data,
static struct lm90_data *lm90_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm90_data *data = i2c_get_clientdata(client);
+ struct lm90_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long next_update;
mutex_lock(&data->update_lock);
@@ -793,8 +795,8 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
};
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct lm90_data *data = i2c_get_clientdata(client);
+ struct lm90_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int nr = attr->index;
long val;
int err;
@@ -860,8 +862,8 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
};
struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct lm90_data *data = i2c_get_clientdata(client);
+ struct lm90_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int nr = attr->nr;
int index = attr->index;
long val;
@@ -922,8 +924,8 @@ static ssize_t show_temphyst(struct device *dev,
static ssize_t set_temphyst(struct device *dev, struct device_attribute *dummy,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm90_data *data = i2c_get_clientdata(client);
+ struct lm90_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long val;
int err;
int temp;
@@ -976,8 +978,8 @@ static ssize_t set_update_interval(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm90_data *data = i2c_get_clientdata(client);
+ struct lm90_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int err;
@@ -1057,6 +1059,15 @@ static const struct attribute_group lm90_group = {
.attrs = lm90_attributes,
};
+static struct attribute *lm90_temp2_offset_attributes[] = {
+ &sensor_dev_attr_temp2_offset.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group lm90_temp2_offset_group = {
+ .attrs = lm90_temp2_offset_attributes,
+};
+
/*
* Additional attributes for devices with emergency sensors
*/
@@ -1393,22 +1404,6 @@ static int lm90_detect(struct i2c_client *client,
return 0;
}
-static void lm90_remove_files(struct i2c_client *client, struct lm90_data *data)
-{
- struct device *dev = &client->dev;
-
- if (data->flags & LM90_HAVE_TEMP3)
- sysfs_remove_group(&dev->kobj, &lm90_temp3_group);
- if (data->flags & LM90_HAVE_EMERGENCY_ALARM)
- sysfs_remove_group(&dev->kobj, &lm90_emergency_alarm_group);
- if (data->flags & LM90_HAVE_EMERGENCY)
- sysfs_remove_group(&dev->kobj, &lm90_emergency_group);
- if (data->flags & LM90_HAVE_OFFSET)
- device_remove_file(dev, &sensor_dev_attr_temp2_offset.dev_attr);
- device_remove_file(dev, &dev_attr_pec);
- sysfs_remove_group(&dev->kobj, &lm90_group);
-}
-
static void lm90_restore_conf(struct i2c_client *client, struct lm90_data *data)
{
/* Restore initial configuration */
@@ -1418,10 +1413,9 @@ static void lm90_restore_conf(struct i2c_client *client, struct lm90_data *data)
data->config_orig);
}
-static void lm90_init_client(struct i2c_client *client)
+static void lm90_init_client(struct i2c_client *client, struct lm90_data *data)
{
u8 config, convrate;
- struct lm90_data *data = i2c_get_clientdata(client);
if (lm90_read_reg(client, LM90_REG_R_CONVRATE, &convrate) < 0) {
dev_warn(&client->dev, "Failed to read convrate register!\n");
@@ -1519,6 +1513,7 @@ static int lm90_probe(struct i2c_client *client,
struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
struct lm90_data *data;
struct regulator *regulator;
+ int groups = 0;
int err;
regulator = devm_regulator_get(dev, "vcc");
@@ -1527,15 +1522,15 @@ static int lm90_probe(struct i2c_client *client,
err = regulator_enable(regulator);
if (err < 0) {
- dev_err(&client->dev,
- "Failed to enable regulator: %d\n", err);
+ dev_err(dev, "Failed to enable regulator: %d\n", err);
return err;
}
- data = devm_kzalloc(&client->dev, sizeof(struct lm90_data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct lm90_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->client = client;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -1562,44 +1557,34 @@ static int lm90_probe(struct i2c_client *client,
data->max_convrate = lm90_params[data->kind].max_convrate;
/* Initialize the LM90 chip */
- lm90_init_client(client);
+ lm90_init_client(client, data);
/* Register sysfs hooks */
- err = sysfs_create_group(&dev->kobj, &lm90_group);
- if (err)
- goto exit_restore;
+ data->groups[groups++] = &lm90_group;
+
+ if (data->flags & LM90_HAVE_OFFSET)
+ data->groups[groups++] = &lm90_temp2_offset_group;
+
+ if (data->flags & LM90_HAVE_EMERGENCY)
+ data->groups[groups++] = &lm90_emergency_group;
+
+ if (data->flags & LM90_HAVE_EMERGENCY_ALARM)
+ data->groups[groups++] = &lm90_emergency_alarm_group;
+
+ if (data->flags & LM90_HAVE_TEMP3)
+ data->groups[groups++] = &lm90_temp3_group;
+
if (client->flags & I2C_CLIENT_PEC) {
err = device_create_file(dev, &dev_attr_pec);
if (err)
- goto exit_remove_files;
- }
- if (data->flags & LM90_HAVE_OFFSET) {
- err = device_create_file(dev,
- &sensor_dev_attr_temp2_offset.dev_attr);
- if (err)
- goto exit_remove_files;
- }
- if (data->flags & LM90_HAVE_EMERGENCY) {
- err = sysfs_create_group(&dev->kobj, &lm90_emergency_group);
- if (err)
- goto exit_remove_files;
- }
- if (data->flags & LM90_HAVE_EMERGENCY_ALARM) {
- err = sysfs_create_group(&dev->kobj,
- &lm90_emergency_alarm_group);
- if (err)
- goto exit_remove_files;
- }
- if (data->flags & LM90_HAVE_TEMP3) {
- err = sysfs_create_group(&dev->kobj, &lm90_temp3_group);
- if (err)
- goto exit_remove_files;
+ goto exit_restore;
}
- data->hwmon_dev = hwmon_device_register(dev);
+ data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
+ goto exit_remove_pec;
}
if (client->irq) {
@@ -1618,8 +1603,8 @@ static int lm90_probe(struct i2c_client *client,
exit_unregister:
hwmon_device_unregister(data->hwmon_dev);
-exit_remove_files:
- lm90_remove_files(client, data);
+exit_remove_pec:
+ device_remove_file(dev, &dev_attr_pec);
exit_restore:
lm90_restore_conf(client, data);
regulator_disable(data->regulator);
@@ -1632,7 +1617,7 @@ static int lm90_remove(struct i2c_client *client)
struct lm90_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
- lm90_remove_files(client, data);
+ device_remove_file(&client->dev, &dev_attr_pec);
lm90_restore_conf(client, data);
regulator_disable(data->regulator);
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 9d0e87a4f0cb..b9022db6511a 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -380,7 +380,6 @@ static int lm92_probe(struct i2c_client *new_client,
return -ENOMEM;
i2c_set_clientdata(new_client, data);
- data->valid = 0;
mutex_init(&data->update_lock);
/* Initialize the chipset */
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 6f1c6c0dbaf5..adf23165a6a7 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -2754,7 +2754,6 @@ static int lm93_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
/* housekeeping */
- data->valid = 0;
data->update = update;
mutex_init(&data->update_lock);
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 4b68fb2a31d7..cdf19adaec79 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -89,7 +89,7 @@ static const u8 lm95241_reg_address[] = {
/* Client data (each client gets its own) */
struct lm95241_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
unsigned long last_updated, interval; /* in jiffies */
char valid; /* zero until following fields are valid */
@@ -113,8 +113,8 @@ static int temp_from_reg_unsigned(u8 val_h, u8 val_l)
static struct lm95241_data *lm95241_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
mutex_lock(&data->update_lock);
@@ -122,7 +122,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
!data->valid) {
int i;
- dev_dbg(&client->dev, "Updating lm95241 data.\n");
+ dev_dbg(dev, "Updating lm95241 data.\n");
for (i = 0; i < ARRAY_SIZE(lm95241_reg_address); i++)
data->temp[i]
= i2c_smbus_read_byte_data(client,
@@ -153,8 +153,7 @@ static ssize_t show_input(struct device *dev, struct device_attribute *attr,
static ssize_t show_type(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE - 1,
data->model & to_sensor_dev_attr(attr)->index ? "1\n" : "2\n");
@@ -163,8 +162,8 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr,
static ssize_t set_type(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int shift;
u8 mask = to_sensor_dev_attr(attr)->index;
@@ -201,8 +200,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
static ssize_t show_min(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE - 1,
data->config & to_sensor_dev_attr(attr)->index ?
@@ -212,8 +210,7 @@ static ssize_t show_min(struct device *dev, struct device_attribute *attr,
static ssize_t set_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
long val;
if (kstrtol(buf, 10, &val) < 0)
@@ -229,7 +226,8 @@ static ssize_t set_min(struct device *dev, struct device_attribute *attr,
data->config &= ~to_sensor_dev_attr(attr)->index;
data->valid = 0;
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
+ i2c_smbus_write_byte_data(data->client, LM95241_REG_RW_CONFIG,
+ data->config);
mutex_unlock(&data->update_lock);
@@ -239,8 +237,7 @@ static ssize_t set_min(struct device *dev, struct device_attribute *attr,
static ssize_t show_max(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE - 1,
data->config & to_sensor_dev_attr(attr)->index ?
@@ -250,8 +247,7 @@ static ssize_t show_max(struct device *dev, struct device_attribute *attr,
static ssize_t set_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
long val;
if (kstrtol(buf, 10, &val) < 0)
@@ -267,7 +263,8 @@ static ssize_t set_max(struct device *dev, struct device_attribute *attr,
data->config &= ~to_sensor_dev_attr(attr)->index;
data->valid = 0;
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
+ i2c_smbus_write_byte_data(data->client, LM95241_REG_RW_CONFIG,
+ data->config);
mutex_unlock(&data->update_lock);
@@ -286,8 +283,7 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
@@ -316,7 +312,7 @@ static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max, set_max,
static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
set_interval);
-static struct attribute *lm95241_attributes[] = {
+static struct attribute *lm95241_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
@@ -329,10 +325,7 @@ static struct attribute *lm95241_attributes[] = {
&dev_attr_update_interval.attr,
NULL
};
-
-static const struct attribute_group lm95241_group = {
- .attrs = lm95241_attributes,
-};
+ATTRIBUTE_GROUPS(lm95241);
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm95241_detect(struct i2c_client *new_client,
@@ -366,14 +359,11 @@ static int lm95241_detect(struct i2c_client *new_client,
return 0;
}
-static void lm95241_init_client(struct i2c_client *client)
+static void lm95241_init_client(struct i2c_client *client,
+ struct lm95241_data *data)
{
- struct lm95241_data *data = i2c_get_clientdata(client);
-
data->interval = HZ; /* 1 sec default */
- data->valid = 0;
data->config = CFG_CR0076;
- data->model = 0;
data->trutherm = (TT_OFF << TT1_SHIFT) | (TT_OFF << TT2_SHIFT);
i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
@@ -385,49 +375,27 @@ static void lm95241_init_client(struct i2c_client *client)
data->model);
}
-static int lm95241_probe(struct i2c_client *new_client,
+static int lm95241_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct lm95241_data *data;
- int err;
+ struct device *hwmon_dev;
- data = devm_kzalloc(&new_client->dev, sizeof(struct lm95241_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct lm95241_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(new_client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the LM95241 chip */
- lm95241_init_client(new_client);
+ lm95241_init_client(client, data);
- /* Register sysfs hooks */
- err = sysfs_create_group(&new_client->dev.kobj, &lm95241_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
-exit_remove_files:
- sysfs_remove_group(&new_client->dev.kobj, &lm95241_group);
- return err;
-}
-
-static int lm95241_remove(struct i2c_client *client)
-{
- struct lm95241_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm95241_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ lm95241_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
/* Driver data (common to all clients) */
@@ -444,7 +412,6 @@ static struct i2c_driver lm95241_driver = {
.name = DEVNAME,
},
.probe = lm95241_probe,
- .remove = lm95241_remove,
.id_table = lm95241_id,
.detect = lm95241_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index a6c85f0ff8f3..0ae0dfdafdff 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -115,7 +115,7 @@ static const u8 lm95245_reg_address[] = {
/* Client data (each client gets its own) */
struct lm95245_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
unsigned long last_updated; /* in jiffies */
unsigned long interval; /* in msecs */
@@ -140,8 +140,8 @@ static int temp_from_reg_signed(u8 val_h, u8 val_l)
static struct lm95245_data *lm95245_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95245_data *data = i2c_get_clientdata(client);
+ struct lm95245_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
mutex_lock(&data->update_lock);
@@ -149,7 +149,6 @@ static struct lm95245_data *lm95245_update_device(struct device *dev)
+ msecs_to_jiffies(data->interval)) || !data->valid) {
int i;
- dev_dbg(&client->dev, "Updating lm95245 data.\n");
for (i = 0; i < ARRAY_SIZE(lm95245_reg_address); i++)
data->regs[i]
= i2c_smbus_read_byte_data(client,
@@ -249,9 +248,9 @@ static ssize_t show_limit(struct device *dev, struct device_attribute *attr,
static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95245_data *data = i2c_get_clientdata(client);
+ struct lm95245_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
+ struct i2c_client *client = data->client;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
@@ -272,27 +271,38 @@ static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
return count;
}
+static ssize_t show_crit_hyst(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm95245_data *data = lm95245_update_device(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ int hyst = data->regs[index] - data->regs[8];
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", hyst * 1000);
+}
+
static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95245_data *data = i2c_get_clientdata(client);
+ struct lm95245_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ struct i2c_client *client = data->client;
unsigned long val;
+ int hyst, limit;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
- val /= 1000;
-
- val = clamp_val(val, 0, 31);
-
mutex_lock(&data->update_lock);
- data->valid = 0;
+ limit = i2c_smbus_read_byte_data(client, lm95245_reg_address[index]);
+ hyst = limit - val / 1000;
+ hyst = clamp_val(hyst, 0, 31);
+ data->regs[8] = hyst;
/* shared crit hysteresis */
i2c_smbus_write_byte_data(client, LM95245_REG_RW_COMMON_HYSTERESIS,
- val);
+ hyst);
mutex_unlock(&data->update_lock);
@@ -302,8 +312,7 @@ static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
static ssize_t show_type(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95245_data *data = i2c_get_clientdata(client);
+ struct lm95245_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE - 1,
data->config2 & CFG2_REMOTE_TT ? "1\n" : "2\n");
@@ -312,8 +321,8 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr,
static ssize_t set_type(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95245_data *data = i2c_get_clientdata(client);
+ struct lm95245_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
@@ -359,8 +368,8 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95245_data *data = i2c_get_clientdata(client);
+ struct lm95245_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
@@ -378,16 +387,15 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_limit,
set_limit, 6);
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_limit,
- set_crit_hyst, 8);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_crit_hyst,
+ set_crit_hyst, 6);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL,
STATUS1_LOC);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_limit,
set_limit, 7);
-static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_limit,
- set_crit_hyst, 8);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_crit_hyst, NULL, 7);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL,
STATUS1_RTCRIT);
static SENSOR_DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type,
@@ -398,7 +406,7 @@ static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL,
static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
set_interval);
-static struct attribute *lm95245_attributes[] = {
+static struct attribute *lm95245_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
@@ -412,10 +420,7 @@ static struct attribute *lm95245_attributes[] = {
&dev_attr_update_interval.attr,
NULL
};
-
-static const struct attribute_group lm95245_group = {
- .attrs = lm95245_attributes,
-};
+ATTRIBUTE_GROUPS(lm95245);
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm95245_detect(struct i2c_client *new_client,
@@ -436,11 +441,9 @@ static int lm95245_detect(struct i2c_client *new_client,
return 0;
}
-static void lm95245_init_client(struct i2c_client *client)
+static void lm95245_init_client(struct i2c_client *client,
+ struct lm95245_data *data)
{
- struct lm95245_data *data = i2c_get_clientdata(client);
-
- data->valid = 0;
data->interval = lm95245_read_conversion_rate(client);
data->config1 = i2c_smbus_read_byte_data(client,
@@ -456,49 +459,27 @@ static void lm95245_init_client(struct i2c_client *client)
}
}
-static int lm95245_probe(struct i2c_client *new_client,
+static int lm95245_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct lm95245_data *data;
- int err;
+ struct device *hwmon_dev;
- data = devm_kzalloc(&new_client->dev, sizeof(struct lm95245_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct lm95245_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(new_client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the LM95245 chip */
- lm95245_init_client(new_client);
-
- /* Register sysfs hooks */
- err = sysfs_create_group(&new_client->dev.kobj, &lm95245_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
-exit_remove_files:
- sysfs_remove_group(&new_client->dev.kobj, &lm95245_group);
- return err;
-}
+ lm95245_init_client(client, data);
-static int lm95245_remove(struct i2c_client *client)
-{
- struct lm95245_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm95245_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ lm95245_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
/* Driver data (common to all clients) */
@@ -514,7 +495,6 @@ static struct i2c_driver lm95245_driver = {
.name = DEVNAME,
},
.probe = lm95245_probe,
- .remove = lm95245_remove,
.id_table = lm95245_id,
.detect = lm95245_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/ltc2945.c b/drivers/hwmon/ltc2945.c
new file mode 100644
index 000000000000..c104cc32989d
--- /dev/null
+++ b/drivers/hwmon/ltc2945.c
@@ -0,0 +1,519 @@
+/*
+ * Driver for Linear Technology LTC2945 I2C Power Monitor
+ *
+ * Copyright (c) 2014 Guenter Roeck
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+
+/* chip registers */
+#define LTC2945_CONTROL 0x00
+#define LTC2945_ALERT 0x01
+#define LTC2945_STATUS 0x02
+#define LTC2945_FAULT 0x03
+#define LTC2945_POWER_H 0x05
+#define LTC2945_MAX_POWER_H 0x08
+#define LTC2945_MIN_POWER_H 0x0b
+#define LTC2945_MAX_POWER_THRES_H 0x0e
+#define LTC2945_MIN_POWER_THRES_H 0x11
+#define LTC2945_SENSE_H 0x14
+#define LTC2945_MAX_SENSE_H 0x16
+#define LTC2945_MIN_SENSE_H 0x18
+#define LTC2945_MAX_SENSE_THRES_H 0x1a
+#define LTC2945_MIN_SENSE_THRES_H 0x1c
+#define LTC2945_VIN_H 0x1e
+#define LTC2945_MAX_VIN_H 0x20
+#define LTC2945_MIN_VIN_H 0x22
+#define LTC2945_MAX_VIN_THRES_H 0x24
+#define LTC2945_MIN_VIN_THRES_H 0x26
+#define LTC2945_ADIN_H 0x28
+#define LTC2945_MAX_ADIN_H 0x2a
+#define LTC2945_MIN_ADIN_H 0x2c
+#define LTC2945_MAX_ADIN_THRES_H 0x2e
+#define LTC2945_MIN_ADIN_THRES_H 0x30
+#define LTC2945_MIN_ADIN_THRES_L 0x31
+
+/* Fault register bits */
+
+#define FAULT_ADIN_UV (1 << 0)
+#define FAULT_ADIN_OV (1 << 1)
+#define FAULT_VIN_UV (1 << 2)
+#define FAULT_VIN_OV (1 << 3)
+#define FAULT_SENSE_UV (1 << 4)
+#define FAULT_SENSE_OV (1 << 5)
+#define FAULT_POWER_UV (1 << 6)
+#define FAULT_POWER_OV (1 << 7)
+
+/* Control register bits */
+
+#define CONTROL_MULT_SELECT (1 << 0)
+#define CONTROL_TEST_MODE (1 << 4)
+
+static inline bool is_power_reg(u8 reg)
+{
+ return reg < LTC2945_SENSE_H;
+}
+
+/* Return the value from the given register in uW, mV, or mA */
+static long long ltc2945_reg_to_val(struct device *dev, u8 reg)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int control;
+ u8 buf[3];
+ long long val;
+ int ret;
+
+ ret = regmap_bulk_read(regmap, reg, buf,
+ is_power_reg(reg) ? 3 : 2);
+ if (ret < 0)
+ return ret;
+
+ if (is_power_reg(reg)) {
+ /* power */
+ val = (buf[0] << 16) + (buf[1] << 8) + buf[2];
+ } else {
+ /* current, voltage */
+ val = (buf[0] << 4) + (buf[1] >> 4);
+ }
+
+ switch (reg) {
+ case LTC2945_POWER_H:
+ case LTC2945_MAX_POWER_H:
+ case LTC2945_MIN_POWER_H:
+ case LTC2945_MAX_POWER_THRES_H:
+ case LTC2945_MIN_POWER_THRES_H:
+ /*
+ * Convert to uW by assuming current is measured with
+ * an 1mOhm sense resistor, similar to current
+ * measurements.
+ * Control register bit 0 selects if voltage at SENSE+/VDD
+ * or voltage at ADIN is used to measure power.
+ */
+ ret = regmap_read(regmap, LTC2945_CONTROL, &control);
+ if (ret < 0)
+ return ret;
+ if (control & CONTROL_MULT_SELECT) {
+ /* 25 mV * 25 uV = 0.625 uV resolution. */
+ val *= 625LL;
+ } else {
+ /* 0.5 mV * 25 uV = 0.0125 uV resolution. */
+ val = (val * 25LL) >> 1;
+ }
+ break;
+ case LTC2945_VIN_H:
+ case LTC2945_MAX_VIN_H:
+ case LTC2945_MIN_VIN_H:
+ case LTC2945_MAX_VIN_THRES_H:
+ case LTC2945_MIN_VIN_THRES_H:
+ /* 25 mV resolution. Convert to mV. */
+ val *= 25;
+ break;
+ case LTC2945_ADIN_H:
+ case LTC2945_MAX_ADIN_H:
+ case LTC2945_MIN_ADIN_THRES_H:
+ case LTC2945_MAX_ADIN_THRES_H:
+ case LTC2945_MIN_ADIN_H:
+ /* 0.5mV resolution. Convert to mV. */
+ val = val >> 1;
+ break;
+ case LTC2945_SENSE_H:
+ case LTC2945_MAX_SENSE_H:
+ case LTC2945_MIN_SENSE_H:
+ case LTC2945_MAX_SENSE_THRES_H:
+ case LTC2945_MIN_SENSE_THRES_H:
+ /*
+ * 25 uV resolution. Convert to current as measured with
+ * an 1 mOhm sense resistor, in mA. If a different sense
+ * resistor is installed, calculate the actual current by
+ * dividing the reported current by the sense resistor value
+ * in mOhm.
+ */
+ val *= 25;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return val;
+}
+
+static int ltc2945_val_to_reg(struct device *dev, u8 reg,
+ unsigned long val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int control;
+ int ret;
+
+ switch (reg) {
+ case LTC2945_POWER_H:
+ case LTC2945_MAX_POWER_H:
+ case LTC2945_MIN_POWER_H:
+ case LTC2945_MAX_POWER_THRES_H:
+ case LTC2945_MIN_POWER_THRES_H:
+ /*
+ * Convert to register value by assuming current is measured
+ * with an 1mOhm sense resistor, similar to current
+ * measurements.
+ * Control register bit 0 selects if voltage at SENSE+/VDD
+ * or voltage at ADIN is used to measure power, which in turn
+ * determines register calculations.
+ */
+ ret = regmap_read(regmap, LTC2945_CONTROL, &control);
+ if (ret < 0)
+ return ret;
+ if (control & CONTROL_MULT_SELECT) {
+ /* 25 mV * 25 uV = 0.625 uV resolution. */
+ val = DIV_ROUND_CLOSEST(val, 625);
+ } else {
+ /*
+ * 0.5 mV * 25 uV = 0.0125 uV resolution.
+ * Divide first to avoid overflow;
+ * accept loss of accuracy.
+ */
+ val = DIV_ROUND_CLOSEST(val, 25) * 2;
+ }
+ break;
+ case LTC2945_VIN_H:
+ case LTC2945_MAX_VIN_H:
+ case LTC2945_MIN_VIN_H:
+ case LTC2945_MAX_VIN_THRES_H:
+ case LTC2945_MIN_VIN_THRES_H:
+ /* 25 mV resolution. */
+ val /= 25;
+ break;
+ case LTC2945_ADIN_H:
+ case LTC2945_MAX_ADIN_H:
+ case LTC2945_MIN_ADIN_THRES_H:
+ case LTC2945_MAX_ADIN_THRES_H:
+ case LTC2945_MIN_ADIN_H:
+ /* 0.5mV resolution. */
+ val *= 2;
+ break;
+ case LTC2945_SENSE_H:
+ case LTC2945_MAX_SENSE_H:
+ case LTC2945_MIN_SENSE_H:
+ case LTC2945_MAX_SENSE_THRES_H:
+ case LTC2945_MIN_SENSE_THRES_H:
+ /*
+ * 25 uV resolution. Convert to current as measured with
+ * an 1 mOhm sense resistor, in mA. If a different sense
+ * resistor is installed, calculate the actual current by
+ * dividing the reported current by the sense resistor value
+ * in mOhm.
+ */
+ val = DIV_ROUND_CLOSEST(val, 25);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return val;
+}
+
+static ssize_t ltc2945_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ long long value;
+
+ value = ltc2945_reg_to_val(dev, attr->index);
+ if (value < 0)
+ return value;
+ return snprintf(buf, PAGE_SIZE, "%lld\n", value);
+}
+
+static ssize_t ltc2945_set_value(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct regmap *regmap = dev_get_drvdata(dev);
+ u8 reg = attr->index;
+ unsigned long val;
+ u8 regbuf[3];
+ int num_regs;
+ int regval;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ /* convert to register value, then clamp and write result */
+ regval = ltc2945_val_to_reg(dev, reg, val);
+ if (is_power_reg(reg)) {
+ regval = clamp_val(regval, 0, 0xffffff);
+ regbuf[0] = regval >> 16;
+ regbuf[1] = (regval >> 8) & 0xff;
+ regbuf[2] = regval;
+ num_regs = 3;
+ } else {
+ regval = clamp_val(regval, 0, 0xfff) << 4;
+ regbuf[0] = regval >> 8;
+ regbuf[1] = regval & 0xff;
+ num_regs = 2;
+ }
+ ret = regmap_bulk_write(regmap, reg, regbuf, num_regs);
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t ltc2945_reset_history(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct regmap *regmap = dev_get_drvdata(dev);
+ u8 reg = attr->index;
+ int num_regs = is_power_reg(reg) ? 3 : 2;
+ u8 buf_min[3] = { 0xff, 0xff, 0xff };
+ u8 buf_max[3] = { 0, 0, 0 };
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val != 1)
+ return -EINVAL;
+
+ ret = regmap_update_bits(regmap, LTC2945_CONTROL, CONTROL_TEST_MODE,
+ CONTROL_TEST_MODE);
+
+ /* Reset minimum */
+ ret = regmap_bulk_write(regmap, reg, buf_min, num_regs);
+ if (ret)
+ return ret;
+
+ switch (reg) {
+ case LTC2945_MIN_POWER_H:
+ reg = LTC2945_MAX_POWER_H;
+ break;
+ case LTC2945_MIN_SENSE_H:
+ reg = LTC2945_MAX_SENSE_H;
+ break;
+ case LTC2945_MIN_VIN_H:
+ reg = LTC2945_MAX_VIN_H;
+ break;
+ case LTC2945_MIN_ADIN_H:
+ reg = LTC2945_MAX_ADIN_H;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ /* Reset maximum */
+ ret = regmap_bulk_write(regmap, reg, buf_max, num_regs);
+
+ /* Try resetting test mode even if there was an error */
+ regmap_update_bits(regmap, LTC2945_CONTROL, CONTROL_TEST_MODE, 0);
+
+ return ret ? : count;
+}
+
+static ssize_t ltc2945_show_bool(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int fault;
+ int ret;
+
+ ret = regmap_read(regmap, LTC2945_FAULT, &fault);
+ if (ret < 0)
+ return ret;
+
+ fault &= attr->index;
+ if (fault) /* Clear reported faults in chip register */
+ regmap_update_bits(regmap, LTC2945_FAULT, attr->index, 0);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", !!fault);
+}
+
+/* Input voltages */
+
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_VIN_H);
+static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MIN_VIN_THRES_H);
+static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MAX_VIN_THRES_H);
+static SENSOR_DEVICE_ATTR(in1_lowest, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_MIN_VIN_H);
+static SENSOR_DEVICE_ATTR(in1_highest, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_MAX_VIN_H);
+static SENSOR_DEVICE_ATTR(in1_reset_history, S_IWUSR, NULL,
+ ltc2945_reset_history, LTC2945_MIN_VIN_H);
+
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_ADIN_H);
+static SENSOR_DEVICE_ATTR(in2_min, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MIN_ADIN_THRES_H);
+static SENSOR_DEVICE_ATTR(in2_max, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MAX_ADIN_THRES_H);
+static SENSOR_DEVICE_ATTR(in2_lowest, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_MIN_ADIN_H);
+static SENSOR_DEVICE_ATTR(in2_highest, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_MAX_ADIN_H);
+static SENSOR_DEVICE_ATTR(in2_reset_history, S_IWUSR, NULL,
+ ltc2945_reset_history, LTC2945_MIN_ADIN_H);
+
+/* Voltage alarms */
+
+static SENSOR_DEVICE_ATTR(in1_min_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_VIN_UV);
+static SENSOR_DEVICE_ATTR(in1_max_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_VIN_OV);
+static SENSOR_DEVICE_ATTR(in2_min_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_ADIN_UV);
+static SENSOR_DEVICE_ATTR(in2_max_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_ADIN_OV);
+
+/* Currents (via sense resistor) */
+
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_SENSE_H);
+static SENSOR_DEVICE_ATTR(curr1_min, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MIN_SENSE_THRES_H);
+static SENSOR_DEVICE_ATTR(curr1_max, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MAX_SENSE_THRES_H);
+static SENSOR_DEVICE_ATTR(curr1_lowest, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_MIN_SENSE_H);
+static SENSOR_DEVICE_ATTR(curr1_highest, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_MAX_SENSE_H);
+static SENSOR_DEVICE_ATTR(curr1_reset_history, S_IWUSR, NULL,
+ ltc2945_reset_history, LTC2945_MIN_SENSE_H);
+
+/* Current alarms */
+
+static SENSOR_DEVICE_ATTR(curr1_min_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_SENSE_UV);
+static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_SENSE_OV);
+
+/* Power */
+
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_POWER_H);
+static SENSOR_DEVICE_ATTR(power1_min, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MIN_POWER_THRES_H);
+static SENSOR_DEVICE_ATTR(power1_max, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MAX_POWER_THRES_H);
+static SENSOR_DEVICE_ATTR(power1_input_lowest, S_IRUGO, ltc2945_show_value,
+ NULL, LTC2945_MIN_POWER_H);
+static SENSOR_DEVICE_ATTR(power1_input_highest, S_IRUGO, ltc2945_show_value,
+ NULL, LTC2945_MAX_POWER_H);
+static SENSOR_DEVICE_ATTR(power1_reset_history, S_IWUSR, NULL,
+ ltc2945_reset_history, LTC2945_MIN_POWER_H);
+
+/* Power alarms */
+
+static SENSOR_DEVICE_ATTR(power1_min_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_POWER_UV);
+static SENSOR_DEVICE_ATTR(power1_max_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_POWER_OV);
+
+static struct attribute *ltc2945_attrs[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
+ &sensor_dev_attr_in1_lowest.dev_attr.attr,
+ &sensor_dev_attr_in1_highest.dev_attr.attr,
+ &sensor_dev_attr_in1_reset_history.dev_attr.attr,
+ &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_min.dev_attr.attr,
+ &sensor_dev_attr_in2_max.dev_attr.attr,
+ &sensor_dev_attr_in2_lowest.dev_attr.attr,
+ &sensor_dev_attr_in2_highest.dev_attr.attr,
+ &sensor_dev_attr_in2_reset_history.dev_attr.attr,
+ &sensor_dev_attr_in2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_max_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_min.dev_attr.attr,
+ &sensor_dev_attr_curr1_max.dev_attr.attr,
+ &sensor_dev_attr_curr1_lowest.dev_attr.attr,
+ &sensor_dev_attr_curr1_highest.dev_attr.attr,
+ &sensor_dev_attr_curr1_reset_history.dev_attr.attr,
+ &sensor_dev_attr_curr1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_power1_input.dev_attr.attr,
+ &sensor_dev_attr_power1_min.dev_attr.attr,
+ &sensor_dev_attr_power1_max.dev_attr.attr,
+ &sensor_dev_attr_power1_input_lowest.dev_attr.attr,
+ &sensor_dev_attr_power1_input_highest.dev_attr.attr,
+ &sensor_dev_attr_power1_reset_history.dev_attr.attr,
+ &sensor_dev_attr_power1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_power1_max_alarm.dev_attr.attr,
+
+ NULL,
+};
+ATTRIBUTE_GROUPS(ltc2945);
+
+static struct regmap_config ltc2945_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LTC2945_MIN_ADIN_THRES_L,
+};
+
+static int ltc2945_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &ltc2945_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Clear faults */
+ regmap_write(regmap, LTC2945_FAULT, 0x00);
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ regmap,
+ ltc2945_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id ltc2945_id[] = {
+ {"ltc2945", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, ltc2945_id);
+
+static struct i2c_driver ltc2945_driver = {
+ .driver = {
+ .name = "ltc2945",
+ },
+ .probe = ltc2945_probe,
+ .id_table = ltc2945_id,
+};
+
+module_i2c_driver(ltc2945_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("LTC2945 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c
index 8a142960d69e..c8a9bd9b050f 100644
--- a/drivers/hwmon/ltc4215.c
+++ b/drivers/hwmon/ltc4215.c
@@ -33,7 +33,7 @@ enum ltc4215_cmd {
};
struct ltc4215_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
bool valid;
@@ -45,8 +45,8 @@ struct ltc4215_data {
static struct ltc4215_data *ltc4215_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ltc4215_data *data = i2c_get_clientdata(client);
+ struct ltc4215_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
s32 val;
int i;
@@ -214,7 +214,7 @@ static SENSOR_DEVICE_ATTR(in2_min_alarm, S_IRUGO, ltc4215_show_alarm, NULL,
* Finally, construct an array of pointers to members of the above objects,
* as required for sysfs_create_group()
*/
-static struct attribute *ltc4215_attributes[] = {
+static struct attribute *ltc4215_attrs[] = {
&sensor_dev_attr_curr1_input.dev_attr.attr,
&sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
@@ -229,57 +229,33 @@ static struct attribute *ltc4215_attributes[] = {
NULL,
};
-
-static const struct attribute_group ltc4215_group = {
- .attrs = ltc4215_attributes,
-};
+ATTRIBUTE_GROUPS(ltc4215);
static int ltc4215_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
struct ltc4215_data *data;
- int ret;
+ struct device *hwmon_dev;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the LTC4215 chip */
i2c_smbus_write_byte_data(client, LTC4215_FAULT, 0x00);
- /* Register sysfs hooks */
- ret = sysfs_create_group(&client->dev.kobj, &ltc4215_group);
- if (ret)
- return ret;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out_hwmon_device_register;
- }
-
- return 0;
-
-out_hwmon_device_register:
- sysfs_remove_group(&client->dev.kobj, &ltc4215_group);
- return ret;
-}
-
-static int ltc4215_remove(struct i2c_client *client)
-{
- struct ltc4215_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &ltc4215_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ ltc4215_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id ltc4215_id[] = {
@@ -294,7 +270,6 @@ static struct i2c_driver ltc4215_driver = {
.name = "ltc4215",
},
.probe = ltc4215_probe,
- .remove = ltc4215_remove,
.id_table = ltc4215_id,
};
diff --git a/drivers/hwmon/ltc4222.c b/drivers/hwmon/ltc4222.c
new file mode 100644
index 000000000000..07c25653659f
--- /dev/null
+++ b/drivers/hwmon/ltc4222.c
@@ -0,0 +1,237 @@
+/*
+ * Driver for Linear Technology LTC4222 Dual Hot Swap controller
+ *
+ * Copyright (c) 2014 Guenter Roeck
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+
+/* chip registers */
+
+#define LTC4222_CONTROL1 0xd0
+#define LTC4222_ALERT1 0xd1
+#define LTC4222_STATUS1 0xd2
+#define LTC4222_FAULT1 0xd3
+#define LTC4222_CONTROL2 0xd4
+#define LTC4222_ALERT2 0xd5
+#define LTC4222_STATUS2 0xd6
+#define LTC4222_FAULT2 0xd7
+#define LTC4222_SOURCE1 0xd8
+#define LTC4222_SOURCE2 0xda
+#define LTC4222_ADIN1 0xdc
+#define LTC4222_ADIN2 0xde
+#define LTC4222_SENSE1 0xe0
+#define LTC4222_SENSE2 0xe2
+#define LTC4222_ADC_CONTROL 0xe4
+
+/*
+ * Fault register bits
+ */
+#define FAULT_OV BIT(0)
+#define FAULT_UV BIT(1)
+#define FAULT_OC BIT(2)
+#define FAULT_POWER_BAD BIT(3)
+#define FAULT_FET_BAD BIT(5)
+
+/* Return the voltage from the given register in mV or mA */
+static int ltc4222_get_value(struct device *dev, u8 reg)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int val;
+ u8 buf[2];
+ int ret;
+
+ ret = regmap_bulk_read(regmap, reg, buf, 2);
+ if (ret < 0)
+ return ret;
+
+ val = ((buf[0] << 8) + buf[1]) >> 6;
+
+ switch (reg) {
+ case LTC4222_ADIN1:
+ case LTC4222_ADIN2:
+ /* 1.25 mV resolution. Convert to mV. */
+ val = DIV_ROUND_CLOSEST(val * 5, 4);
+ break;
+ case LTC4222_SOURCE1:
+ case LTC4222_SOURCE2:
+ /* 31.25 mV resolution. Convert to mV. */
+ val = DIV_ROUND_CLOSEST(val * 125, 4);
+ break;
+ case LTC4222_SENSE1:
+ case LTC4222_SENSE2:
+ /*
+ * 62.5 uV resolution. Convert to current as measured with
+ * an 1 mOhm sense resistor, in mA. If a different sense
+ * resistor is installed, calculate the actual current by
+ * dividing the reported current by the sense resistor value
+ * in mOhm.
+ */
+ val = DIV_ROUND_CLOSEST(val * 125, 2);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return val;
+}
+
+static ssize_t ltc4222_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int value;
+
+ value = ltc4222_get_value(dev, attr->index);
+ if (value < 0)
+ return value;
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t ltc4222_show_bool(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int fault;
+ int ret;
+
+ ret = regmap_read(regmap, attr->nr, &fault);
+ if (ret < 0)
+ return ret;
+ fault &= attr->index;
+ if (fault) /* Clear reported faults in chip register */
+ regmap_update_bits(regmap, attr->nr, attr->index, 0);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", !!fault);
+}
+
+/* Voltages */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ltc4222_show_value, NULL,
+ LTC4222_SOURCE1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc4222_show_value, NULL,
+ LTC4222_ADIN1);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, ltc4222_show_value, NULL,
+ LTC4222_SOURCE2);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, ltc4222_show_value, NULL,
+ LTC4222_ADIN2);
+
+/*
+ * Voltage alarms
+ * UV/OV faults are associated with the input voltage, and power bad and fet
+ * faults are associated with the output voltage.
+ */
+static SENSOR_DEVICE_ATTR_2(in1_min_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT1, FAULT_UV);
+static SENSOR_DEVICE_ATTR_2(in1_max_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT1, FAULT_OV);
+static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT1, FAULT_POWER_BAD | FAULT_FET_BAD);
+
+static SENSOR_DEVICE_ATTR_2(in3_min_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT2, FAULT_UV);
+static SENSOR_DEVICE_ATTR_2(in3_max_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT2, FAULT_OV);
+static SENSOR_DEVICE_ATTR_2(in4_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT2, FAULT_POWER_BAD | FAULT_FET_BAD);
+
+/* Current (via sense resistor) */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4222_show_value, NULL,
+ LTC4222_SENSE1);
+static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO, ltc4222_show_value, NULL,
+ LTC4222_SENSE2);
+
+/* Overcurrent alarm */
+static SENSOR_DEVICE_ATTR_2(curr1_max_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT1, FAULT_OC);
+static SENSOR_DEVICE_ATTR_2(curr2_max_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT2, FAULT_OC);
+
+static struct attribute *ltc4222_attrs[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_alarm.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in3_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in4_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_curr2_input.dev_attr.attr,
+ &sensor_dev_attr_curr2_max_alarm.dev_attr.attr,
+
+ NULL,
+};
+ATTRIBUTE_GROUPS(ltc4222);
+
+static struct regmap_config ltc4222_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LTC4222_ADC_CONTROL,
+};
+
+static int ltc4222_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &ltc4222_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Clear faults */
+ regmap_write(regmap, LTC4222_FAULT1, 0x00);
+ regmap_write(regmap, LTC4222_FAULT2, 0x00);
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ regmap,
+ ltc4222_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id ltc4222_id[] = {
+ {"ltc4222", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, ltc4222_id);
+
+static struct i2c_driver ltc4222_driver = {
+ .driver = {
+ .name = "ltc4222",
+ },
+ .probe = ltc4222_probe,
+ .id_table = ltc4222_id,
+};
+
+module_i2c_driver(ltc4222_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("LTC4222 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index d4172933ce4f..681b5b7b3c3b 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -95,7 +95,6 @@ static void ltc4245_update_gpios(struct device *dev)
* readings as stale by setting them to -EAGAIN
*/
if (time_after(jiffies, data->last_updated + 5 * HZ)) {
- dev_dbg(&client->dev, "Marking GPIOs invalid\n");
for (i = 0; i < ARRAY_SIZE(data->gpios); i++)
data->gpios[i] = -EAGAIN;
}
@@ -141,8 +140,6 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- dev_dbg(&client->dev, "Starting ltc4245 update\n");
-
/* Read control registers -- 0x00 to 0x07 */
for (i = 0; i < ARRAY_SIZE(data->cregs); i++) {
val = i2c_smbus_read_byte_data(client, i);
@@ -470,19 +467,15 @@ static void ltc4245_sysfs_add_groups(struct ltc4245_data *data)
static bool ltc4245_use_extra_gpios(struct i2c_client *client)
{
struct ltc4245_platform_data *pdata = dev_get_platdata(&client->dev);
-#ifdef CONFIG_OF
struct device_node *np = client->dev.of_node;
-#endif
/* prefer platform data */
if (pdata)
return pdata->use_extra_gpios;
-#ifdef CONFIG_OF
/* fallback on OF */
if (of_find_property(np, "ltc4245,use-extra-gpios", NULL))
return true;
-#endif
return false;
}
@@ -512,24 +505,10 @@ static int ltc4245_probe(struct i2c_client *client,
/* Add sysfs hooks */
ltc4245_sysfs_add_groups(data);
- hwmon_dev = hwmon_device_register_with_groups(&client->dev,
- client->name, data,
- data->groups);
- if (IS_ERR(hwmon_dev))
- return PTR_ERR(hwmon_dev);
-
- i2c_set_clientdata(client, hwmon_dev);
-
- return 0;
-}
-
-static int ltc4245_remove(struct i2c_client *client)
-{
- struct device *hwmon_dev = i2c_get_clientdata(client);
-
- hwmon_device_unregister(hwmon_dev);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ data->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id ltc4245_id[] = {
@@ -544,7 +523,6 @@ static struct i2c_driver ltc4245_driver = {
.name = "ltc4245",
},
.probe = ltc4245_probe,
- .remove = ltc4245_remove,
.id_table = ltc4245_id,
};
diff --git a/drivers/hwmon/ltc4260.c b/drivers/hwmon/ltc4260.c
new file mode 100644
index 000000000000..453a250d9df5
--- /dev/null
+++ b/drivers/hwmon/ltc4260.c
@@ -0,0 +1,200 @@
+/*
+ * Driver for Linear Technology LTC4260 I2C Positive Voltage Hot Swap Controller
+ *
+ * Copyright (c) 2014 Guenter Roeck
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+
+/* chip registers */
+#define LTC4260_CONTROL 0x00
+#define LTC4260_ALERT 0x01
+#define LTC4260_STATUS 0x02
+#define LTC4260_FAULT 0x03
+#define LTC4260_SENSE 0x04
+#define LTC4260_SOURCE 0x05
+#define LTC4260_ADIN 0x06
+
+/*
+ * Fault register bits
+ */
+#define FAULT_OV (1 << 0)
+#define FAULT_UV (1 << 1)
+#define FAULT_OC (1 << 2)
+#define FAULT_POWER_BAD (1 << 3)
+#define FAULT_FET_SHORT (1 << 5)
+
+/* Return the voltage from the given register in mV or mA */
+static int ltc4260_get_value(struct device *dev, u8 reg)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case LTC4260_ADIN:
+ /* 10 mV resolution. Convert to mV. */
+ val = val * 10;
+ break;
+ case LTC4260_SOURCE:
+ /* 400 mV resolution. Convert to mV. */
+ val = val * 400;
+ break;
+ case LTC4260_SENSE:
+ /*
+ * 300 uV resolution. Convert to current as measured with
+ * an 1 mOhm sense resistor, in mA. If a different sense
+ * resistor is installed, calculate the actual current by
+ * dividing the reported current by the sense resistor value
+ * in mOhm.
+ */
+ val = val * 300;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return val;
+}
+
+static ssize_t ltc4260_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int value;
+
+ value = ltc4260_get_value(dev, attr->index);
+ if (value < 0)
+ return value;
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t ltc4260_show_bool(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int fault;
+ int ret;
+
+ ret = regmap_read(regmap, LTC4260_FAULT, &fault);
+ if (ret < 0)
+ return ret;
+
+ fault &= attr->index;
+ if (fault) /* Clear reported faults in chip register */
+ regmap_update_bits(regmap, LTC4260_FAULT, attr->index, 0);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", !!fault);
+}
+
+/* Voltages */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ltc4260_show_value, NULL,
+ LTC4260_SOURCE);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc4260_show_value, NULL,
+ LTC4260_ADIN);
+
+/*
+ * Voltage alarms
+ * UV/OV faults are associated with the input voltage, and the POWER BAD and
+ * FET SHORT faults are associated with the output voltage.
+ */
+static SENSOR_DEVICE_ATTR(in1_min_alarm, S_IRUGO, ltc4260_show_bool, NULL,
+ FAULT_UV);
+static SENSOR_DEVICE_ATTR(in1_max_alarm, S_IRUGO, ltc4260_show_bool, NULL,
+ FAULT_OV);
+static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, ltc4260_show_bool, NULL,
+ FAULT_POWER_BAD | FAULT_FET_SHORT);
+
+/* Current (via sense resistor) */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4260_show_value, NULL,
+ LTC4260_SENSE);
+
+/* Overcurrent alarm */
+static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO, ltc4260_show_bool, NULL,
+ FAULT_OC);
+
+static struct attribute *ltc4260_attrs[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
+
+ NULL,
+};
+ATTRIBUTE_GROUPS(ltc4260);
+
+static struct regmap_config ltc4260_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LTC4260_ADIN,
+};
+
+static int ltc4260_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &ltc4260_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Clear faults */
+ regmap_write(regmap, LTC4260_FAULT, 0x00);
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ regmap,
+ ltc4260_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id ltc4260_id[] = {
+ {"ltc4260", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, ltc4260_id);
+
+static struct i2c_driver ltc4260_driver = {
+ .driver = {
+ .name = "ltc4260",
+ },
+ .probe = ltc4260_probe,
+ .id_table = ltc4260_id,
+};
+
+module_i2c_driver(ltc4260_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("LTC4260 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 6638e997f83f..4c23afe113e2 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -273,7 +273,6 @@ static int max1619_probe(struct i2c_client *new_client,
return -ENOMEM;
i2c_set_clientdata(new_client, data);
- data->valid = 0;
mutex_init(&data->update_lock);
/* Initialize the MAX1619 chip */
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 029b65e6c589..e3ed0a5b6d94 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -66,7 +66,8 @@ MODULE_PARM_DESC(read_only, "Don't set any values, read only mode");
enum chips { max1668, max1805, max1989 };
struct max1668_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
+ const struct attribute_group *groups[3];
enum chips type;
struct mutex update_lock;
@@ -82,8 +83,8 @@ struct max1668_data {
static struct max1668_data *max1668_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max1668_data *data = i2c_get_clientdata(client);
+ struct max1668_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct max1668_data *ret = data;
s32 val;
int i;
@@ -205,8 +206,8 @@ static ssize_t set_temp_max(struct device *dev,
const char *buf, size_t count)
{
int index = to_sensor_dev_attr(devattr)->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct max1668_data *data = i2c_get_clientdata(client);
+ struct max1668_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long temp;
int ret;
@@ -216,10 +217,11 @@ static ssize_t set_temp_max(struct device *dev,
mutex_lock(&data->update_lock);
data->temp_max[index] = clamp_val(temp/1000, -128, 127);
- if (i2c_smbus_write_byte_data(client,
+ ret = i2c_smbus_write_byte_data(client,
MAX1668_REG_LIMH_WR(index),
- data->temp_max[index]))
- count = -EIO;
+ data->temp_max[index]);
+ if (ret < 0)
+ count = ret;
mutex_unlock(&data->update_lock);
return count;
@@ -230,8 +232,8 @@ static ssize_t set_temp_min(struct device *dev,
const char *buf, size_t count)
{
int index = to_sensor_dev_attr(devattr)->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct max1668_data *data = i2c_get_clientdata(client);
+ struct max1668_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long temp;
int ret;
@@ -241,10 +243,11 @@ static ssize_t set_temp_min(struct device *dev,
mutex_lock(&data->update_lock);
data->temp_min[index] = clamp_val(temp/1000, -128, 127);
- if (i2c_smbus_write_byte_data(client,
+ ret = i2c_smbus_write_byte_data(client,
MAX1668_REG_LIML_WR(index),
- data->temp_min[index]))
- count = -EIO;
+ data->temp_min[index]);
+ if (ret < 0)
+ count = ret;
mutex_unlock(&data->update_lock);
return count;
@@ -405,60 +408,29 @@ static int max1668_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
struct max1668_data *data;
- int err;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
- data = devm_kzalloc(&client->dev, sizeof(struct max1668_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct max1668_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
data->type = id->driver_data;
mutex_init(&data->update_lock);
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &max1668_group_common);
- if (err)
- return err;
-
- if (data->type == max1668 || data->type == max1989) {
- err = sysfs_create_group(&client->dev.kobj,
- &max1668_group_unique);
- if (err)
- goto error_sysrem0;
- }
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto error_sysrem1;
- }
-
- return 0;
-
-error_sysrem1:
+ /* sysfs hooks */
+ data->groups[0] = &max1668_group_common;
if (data->type == max1668 || data->type == max1989)
- sysfs_remove_group(&client->dev.kobj, &max1668_group_unique);
-error_sysrem0:
- sysfs_remove_group(&client->dev.kobj, &max1668_group_common);
- return err;
-}
-
-static int max1668_remove(struct i2c_client *client)
-{
- struct max1668_data *data = i2c_get_clientdata(client);
+ data->groups[1] = &max1668_group_unique;
- hwmon_device_unregister(data->hwmon_dev);
- if (data->type == max1668 || data->type == max1989)
- sysfs_remove_group(&client->dev.kobj, &max1668_group_unique);
-
- sysfs_remove_group(&client->dev.kobj, &max1668_group_common);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id max1668_id[] = {
@@ -476,7 +448,6 @@ static struct i2c_driver max1668_driver = {
.name = "max1668",
},
.probe = max1668_probe,
- .remove = max1668_remove,
.id_table = max1668_id,
.detect = max1668_detect,
.address_list = max1668_addr_list,
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 066e587a18a5..70650de2cbd1 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -80,7 +80,7 @@ static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
* Client data (each client gets its own)
*/
struct max6639_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -104,8 +104,8 @@ struct max6639_data {
static struct max6639_data *max6639_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
+ struct max6639_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct max6639_data *ret = data;
int i;
int status_reg;
@@ -191,9 +191,8 @@ static ssize_t show_temp_fault(struct device *dev,
static ssize_t show_temp_max(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", (data->temp_therm[attr->index] * 1000));
}
@@ -202,9 +201,9 @@ static ssize_t set_temp_max(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int res;
@@ -224,9 +223,8 @@ static ssize_t set_temp_max(struct device *dev,
static ssize_t show_temp_crit(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", (data->temp_alert[attr->index] * 1000));
}
@@ -235,9 +233,9 @@ static ssize_t set_temp_crit(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int res;
@@ -258,9 +256,8 @@ static ssize_t show_temp_emergency(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", (data->temp_ot[attr->index] * 1000));
}
@@ -269,9 +266,9 @@ static ssize_t set_temp_emergency(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int res;
@@ -291,9 +288,8 @@ static ssize_t set_temp_emergency(struct device *dev,
static ssize_t show_pwm(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->pwm[attr->index] * 255 / 120);
}
@@ -302,9 +298,9 @@ static ssize_t set_pwm(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int res;
@@ -378,7 +374,7 @@ static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, show_alarm, NULL, 5);
static SENSOR_DEVICE_ATTR(temp2_emergency_alarm, S_IRUGO, show_alarm, NULL, 4);
-static struct attribute *max6639_attributes[] = {
+static struct attribute *max6639_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp1_fault.dev_attr.attr,
@@ -403,10 +399,7 @@ static struct attribute *max6639_attributes[] = {
&sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr,
NULL
};
-
-static const struct attribute_group max6639_group = {
- .attrs = max6639_attributes,
-};
+ATTRIBUTE_GROUPS(max6639);
/*
* returns respective index in rpm_ranges table
@@ -424,9 +417,9 @@ static int rpm_range_to_reg(int range)
return 1; /* default: 4000 RPM */
}
-static int max6639_init_client(struct i2c_client *client)
+static int max6639_init_client(struct i2c_client *client,
+ struct max6639_data *data)
{
- struct max6639_data *data = i2c_get_clientdata(client);
struct max6639_platform_data *max6639_info =
dev_get_platdata(&client->dev);
int i;
@@ -545,50 +538,27 @@ static int max6639_detect(struct i2c_client *client,
static int max6639_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct max6639_data *data;
+ struct device *hwmon_dev;
int err;
- data = devm_kzalloc(&client->dev, sizeof(struct max6639_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct max6639_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the max6639 chip */
- err = max6639_init_client(client);
+ err = max6639_init_client(client, data);
if (err < 0)
return err;
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &max6639_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto error_remove;
- }
-
- dev_info(&client->dev, "temperature sensor and fan control found\n");
-
- return 0;
-
-error_remove:
- sysfs_remove_group(&client->dev.kobj, &max6639_group);
- return err;
-}
-
-static int max6639_remove(struct i2c_client *client)
-{
- struct max6639_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &max6639_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ max6639_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
#ifdef CONFIG_PM_SLEEP
@@ -622,9 +592,7 @@ static const struct i2c_device_id max6639_id[] = {
MODULE_DEVICE_TABLE(i2c, max6639_id);
-static const struct dev_pm_ops max6639_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(max6639_suspend, max6639_resume)
-};
+static SIMPLE_DEV_PM_OPS(max6639_pm_ops, max6639_suspend, max6639_resume);
static struct i2c_driver max6639_driver = {
.class = I2C_CLASS_HWMON,
@@ -633,7 +601,6 @@ static struct i2c_driver max6639_driver = {
.pm = &max6639_pm_ops,
},
.probe = max6639_probe,
- .remove = max6639_remove,
.id_table = max6639_id,
.detect = max6639_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 0cafc390db4d..162a520f4bd6 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -105,38 +105,13 @@ module_param(clock, int, S_IRUGO);
#define DIV_FROM_REG(reg) (1 << (reg & 7))
-static int max6650_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static int max6650_init_client(struct i2c_client *client);
-static int max6650_remove(struct i2c_client *client);
-static struct max6650_data *max6650_update_device(struct device *dev);
-
-/*
- * Driver data (common to all clients)
- */
-
-static const struct i2c_device_id max6650_id[] = {
- { "max6650", 1 },
- { "max6651", 4 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, max6650_id);
-
-static struct i2c_driver max6650_driver = {
- .driver = {
- .name = "max6650",
- },
- .probe = max6650_probe,
- .remove = max6650_remove,
- .id_table = max6650_id,
-};
-
/*
* Client data (each client gets its own)
*/
struct max6650_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
+ const struct attribute_group *groups[3];
struct mutex update_lock;
int nr_fans;
char valid; /* zero until following fields are valid */
@@ -151,6 +126,51 @@ struct max6650_data {
u8 alarm;
};
+static const u8 tach_reg[] = {
+ MAX6650_REG_TACH0,
+ MAX6650_REG_TACH1,
+ MAX6650_REG_TACH2,
+ MAX6650_REG_TACH3,
+};
+
+static struct max6650_data *max6650_update_device(struct device *dev)
+{
+ struct max6650_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ data->speed = i2c_smbus_read_byte_data(client,
+ MAX6650_REG_SPEED);
+ data->config = i2c_smbus_read_byte_data(client,
+ MAX6650_REG_CONFIG);
+ for (i = 0; i < data->nr_fans; i++) {
+ data->tach[i] = i2c_smbus_read_byte_data(client,
+ tach_reg[i]);
+ }
+ data->count = i2c_smbus_read_byte_data(client,
+ MAX6650_REG_COUNT);
+ data->dac = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC);
+
+ /*
+ * Alarms are cleared on read in case the condition that
+ * caused the alarm is removed. Keep the value latched here
+ * for providing the register through different alarm files.
+ */
+ data->alarm |= i2c_smbus_read_byte_data(client,
+ MAX6650_REG_ALARM);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
static ssize_t get_fan(struct device *dev, struct device_attribute *devattr,
char *buf)
{
@@ -235,8 +255,8 @@ static ssize_t get_target(struct device *dev, struct device_attribute *devattr,
static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6650_data *data = i2c_get_clientdata(client);
+ struct max6650_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int kscale, ktach;
unsigned long rpm;
int err;
@@ -304,8 +324,8 @@ static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr,
static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6650_data *data = i2c_get_clientdata(client);
+ struct max6650_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long pwm;
int err;
@@ -350,8 +370,8 @@ static ssize_t get_enable(struct device *dev, struct device_attribute *devattr,
static ssize_t set_enable(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6650_data *data = i2c_get_clientdata(client);
+ struct max6650_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int max6650_modes[3] = {0, 3, 2};
unsigned long mode;
int err;
@@ -400,8 +420,8 @@ static ssize_t get_div(struct device *dev, struct device_attribute *devattr,
static ssize_t set_div(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6650_data *data = i2c_get_clientdata(client);
+ struct max6650_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long div;
int err;
@@ -446,7 +466,7 @@ static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max6650_data *data = max6650_update_device(dev);
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = data->client;
int alarm = 0;
if (data->alarm & attr->index) {
@@ -484,7 +504,8 @@ static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct i2c_client *client = to_i2c_client(dev);
+ struct max6650_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
u8 alarm_en = i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN);
struct device_attribute *devattr;
@@ -519,7 +540,7 @@ static struct attribute *max6650_attrs[] = {
NULL
};
-static struct attribute_group max6650_attr_grp = {
+static const struct attribute_group max6650_group = {
.attrs = max6650_attrs,
.is_visible = max6650_attrs_visible,
};
@@ -531,7 +552,7 @@ static struct attribute *max6651_attrs[] = {
NULL
};
-static const struct attribute_group max6651_attr_grp = {
+static const struct attribute_group max6651_group = {
.attrs = max6651_attrs,
};
@@ -539,74 +560,17 @@ static const struct attribute_group max6651_attr_grp = {
* Real code
*/
-static int max6650_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct max6650_data *data;
- int err;
-
- data = devm_kzalloc(&client->dev, sizeof(struct max6650_data),
- GFP_KERNEL);
- if (!data) {
- dev_err(&client->dev, "out of memory.\n");
- return -ENOMEM;
- }
-
- i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
- data->nr_fans = id->driver_data;
-
- /*
- * Initialize the max6650 chip
- */
- err = max6650_init_client(client);
- if (err)
- return err;
-
- err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp);
- if (err)
- return err;
- /* 3 additional fan inputs for the MAX6651 */
- if (data->nr_fans == 4) {
- err = sysfs_create_group(&client->dev.kobj, &max6651_attr_grp);
- if (err)
- goto err_remove;
- }
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (!IS_ERR(data->hwmon_dev))
- return 0;
-
- err = PTR_ERR(data->hwmon_dev);
- dev_err(&client->dev, "error registering hwmon device.\n");
- if (data->nr_fans == 4)
- sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp);
-err_remove:
- sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
- return err;
-}
-
-static int max6650_remove(struct i2c_client *client)
+static int max6650_init_client(struct max6650_data *data,
+ struct i2c_client *client)
{
- struct max6650_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- if (data->nr_fans == 4)
- sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp);
- sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
- return 0;
-}
-
-static int max6650_init_client(struct i2c_client *client)
-{
- struct max6650_data *data = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
int config;
int err = -EIO;
config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG);
if (config < 0) {
- dev_err(&client->dev, "Error reading config, aborting.\n");
+ dev_err(dev, "Error reading config, aborting.\n");
return err;
}
@@ -620,11 +584,11 @@ static int max6650_init_client(struct i2c_client *client)
config |= MAX6650_CFG_V12;
break;
default:
- dev_err(&client->dev, "illegal value for fan_voltage (%d)\n",
+ dev_err(dev, "illegal value for fan_voltage (%d)\n",
fan_voltage);
}
- dev_info(&client->dev, "Fan voltage is set to %dV.\n",
+ dev_info(dev, "Fan voltage is set to %dV.\n",
(config & MAX6650_CFG_V12) ? 12 : 5);
switch (prescaler) {
@@ -650,11 +614,10 @@ static int max6650_init_client(struct i2c_client *client)
| MAX6650_CFG_PRESCALER_16;
break;
default:
- dev_err(&client->dev, "illegal value for prescaler (%d)\n",
- prescaler);
+ dev_err(dev, "illegal value for prescaler (%d)\n", prescaler);
}
- dev_info(&client->dev, "Prescaler is set to %d.\n",
+ dev_info(dev, "Prescaler is set to %d.\n",
1 << (config & MAX6650_CFG_PRESCALER_MASK));
/*
@@ -664,17 +627,17 @@ static int max6650_init_client(struct i2c_client *client)
*/
if ((config & MAX6650_CFG_MODE_MASK) == MAX6650_CFG_MODE_OFF) {
- dev_dbg(&client->dev, "Change mode to open loop, full off.\n");
+ dev_dbg(dev, "Change mode to open loop, full off.\n");
config = (config & ~MAX6650_CFG_MODE_MASK)
| MAX6650_CFG_MODE_OPEN_LOOP;
if (i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, 255)) {
- dev_err(&client->dev, "DAC write error, aborting.\n");
+ dev_err(dev, "DAC write error, aborting.\n");
return err;
}
}
if (i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, config)) {
- dev_err(&client->dev, "Config write error, aborting.\n");
+ dev_err(dev, "Config write error, aborting.\n");
return err;
}
@@ -684,51 +647,55 @@ static int max6650_init_client(struct i2c_client *client)
return 0;
}
-static const u8 tach_reg[] = {
- MAX6650_REG_TACH0,
- MAX6650_REG_TACH1,
- MAX6650_REG_TACH2,
- MAX6650_REG_TACH3,
-};
-
-static struct max6650_data *max6650_update_device(struct device *dev)
+static int max6650_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
- int i;
- struct i2c_client *client = to_i2c_client(dev);
- struct max6650_data *data = i2c_get_clientdata(client);
-
- mutex_lock(&data->update_lock);
+ struct device *dev = &client->dev;
+ struct max6650_data *data;
+ struct device *hwmon_dev;
+ int err;
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- data->speed = i2c_smbus_read_byte_data(client,
- MAX6650_REG_SPEED);
- data->config = i2c_smbus_read_byte_data(client,
- MAX6650_REG_CONFIG);
- for (i = 0; i < data->nr_fans; i++) {
- data->tach[i] = i2c_smbus_read_byte_data(client,
- tach_reg[i]);
- }
- data->count = i2c_smbus_read_byte_data(client,
- MAX6650_REG_COUNT);
- data->dac = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC);
+ data = devm_kzalloc(dev, sizeof(struct max6650_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- /*
- * Alarms are cleared on read in case the condition that
- * caused the alarm is removed. Keep the value latched here
- * for providing the register through different alarm files.
- */
- data->alarm |= i2c_smbus_read_byte_data(client,
- MAX6650_REG_ALARM);
+ data->client = client;
+ mutex_init(&data->update_lock);
+ data->nr_fans = id->driver_data;
- data->last_updated = jiffies;
- data->valid = 1;
- }
+ /*
+ * Initialize the max6650 chip
+ */
+ err = max6650_init_client(data, client);
+ if (err)
+ return err;
- mutex_unlock(&data->update_lock);
+ data->groups[0] = &max6650_group;
+ /* 3 additional fan inputs for the MAX6651 */
+ if (data->nr_fans == 4)
+ data->groups[1] = &max6651_group;
- return data;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+ client->name, data,
+ data->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
+static const struct i2c_device_id max6650_id[] = {
+ { "max6650", 1 },
+ { "max6651", 4 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max6650_id);
+
+static struct i2c_driver max6650_driver = {
+ .driver = {
+ .name = "max6650",
+ },
+ .probe = max6650_probe,
+ .id_table = max6650_id,
+};
+
module_i2c_driver(max6650_driver);
MODULE_AUTHOR("Hans J. Koch");
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 330fe117e219..988181e4cfcd 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1225,7 +1225,7 @@ static int pc87360_probe(struct platform_device *pdev)
int i;
struct pc87360_data *data;
int err = 0;
- const char *name = "pc87360";
+ const char *name;
int use_thermistors = 0;
struct device *dev = &pdev->dev;
@@ -1233,13 +1233,14 @@ static int pc87360_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- data->fannr = 2;
- data->innr = 0;
- data->tempnr = 0;
-
switch (devid) {
+ default:
+ name = "pc87360";
+ data->fannr = 2;
+ break;
case 0xe8:
name = "pc87363";
+ data->fannr = 2;
break;
case 0xe4:
name = "pc87364";
@@ -1260,7 +1261,6 @@ static int pc87360_probe(struct platform_device *pdev)
}
data->name = name;
- data->valid = 0;
mutex_init(&data->lock);
mutex_init(&data->update_lock);
platform_set_drvdata(pdev, data);
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index de3c152a1d9a..e24ed521051a 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -1,9 +1,9 @@
/*
* Hardware monitoring driver for LTC2974, LTC2977, LTC2978, LTC3880,
- * and LTC3883
+ * LTC3883, and LTM4676
*
* Copyright (c) 2011 Ericsson AB.
- * Copyright (c) 2013 Guenter Roeck
+ * Copyright (c) 2013, 2014 Guenter Roeck
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
@@ -28,7 +24,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883 };
+enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883, ltm4676 };
/* Common for all chips */
#define LTC2978_MFR_VOUT_PEAK 0xdd
@@ -45,7 +41,7 @@ enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883 };
#define LTC2974_MFR_IOUT_PEAK 0xd7
#define LTC2974_MFR_IOUT_MIN 0xd8
-/* LTC3880 and LTC3883 */
+/* LTC3880, LTC3883, and LTM4676 */
#define LTC3880_MFR_IOUT_PEAK 0xd7
#define LTC3880_MFR_CLEAR_PEAKS 0xe3
#define LTC3880_MFR_TEMPERATURE2_PEAK 0xf4
@@ -53,7 +49,8 @@ enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883 };
/* LTC3883 only */
#define LTC3883_MFR_IIN_PEAK 0xe1
-#define LTC2974_ID 0x0212
+#define LTC2974_ID_REV1 0x0212
+#define LTC2974_ID_REV2 0x0213
#define LTC2977_ID 0x0130
#define LTC2978_ID_REV1 0x0121
#define LTC2978_ID_REV2 0x0122
@@ -62,6 +59,8 @@ enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883 };
#define LTC3880_ID_MASK 0xff00
#define LTC3883_ID 0x4300
#define LTC3883_ID_MASK 0xff00
+#define LTM4676_ID 0x4480 /* datasheet claims 0x440X */
+#define LTM4676_ID_MASK 0xfff0
#define LTC2974_NUM_PAGES 4
#define LTC2978_NUM_PAGES 8
@@ -370,6 +369,7 @@ static const struct i2c_device_id ltc2978_id[] = {
{"ltc2978", ltc2978},
{"ltc3880", ltc3880},
{"ltc3883", ltc3883},
+ {"ltm4676", ltm4676},
{}
};
MODULE_DEVICE_TABLE(i2c, ltc2978_id);
@@ -394,7 +394,7 @@ static int ltc2978_probe(struct i2c_client *client,
if (chip_id < 0)
return chip_id;
- if (chip_id == LTC2974_ID) {
+ if (chip_id == LTC2974_ID_REV1 || chip_id == LTC2974_ID_REV2) {
data->id = ltc2974;
} else if (chip_id == LTC2977_ID) {
data->id = ltc2977;
@@ -405,6 +405,8 @@ static int ltc2978_probe(struct i2c_client *client,
data->id = ltc3880;
} else if ((chip_id & LTC3883_ID_MASK) == LTC3883_ID) {
data->id = ltc3883;
+ } else if ((chip_id & LTM4676_ID_MASK) == LTM4676_ID) {
+ data->id = ltm4676;
} else {
dev_err(&client->dev, "Unsupported chip ID 0x%x\n", chip_id);
return -ENODEV;
@@ -458,6 +460,7 @@ static int ltc2978_probe(struct i2c_client *client,
}
break;
case ltc3880:
+ case ltm4676:
info->read_word_data = ltc3880_read_word_data;
info->pages = LTC3880_NUM_PAGES;
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
@@ -500,5 +503,5 @@ static struct i2c_driver ltc2978_driver = {
module_i2c_driver(ltc2978_driver);
MODULE_AUTHOR("Guenter Roeck");
-MODULE_DESCRIPTION("PMBus driver for LTC2974, LTC2978, LTC3880, and LTC3883");
+MODULE_DESCRIPTION("PMBus driver for LTC2974, LTC2978, LTC3880, LTC3883, and LTM4676");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
index d9e1b7de78da..4ef5802df6d8 100644
--- a/drivers/hwmon/smm665.c
+++ b/drivers/hwmon/smm665.c
@@ -222,7 +222,7 @@ static int smm665_read_adc(struct smm665_data *data, int adc)
rv = i2c_smbus_read_word_swapped(client, 0);
if (rv < 0) {
dev_dbg(&client->dev, "Failed to read ADC value: error %d", rv);
- return -1;
+ return rv;
}
/*
* Validate/verify readback adc channel (in bit 11..14).
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index 38944e94f65f..8df43c51de2c 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -319,7 +319,7 @@ static int __init via_cputemp_init(void)
if (err)
goto exit;
- get_online_cpus();
+ cpu_notifier_register_begin();
for_each_online_cpu(i) {
struct cpuinfo_x86 *c = &cpu_data(i);
@@ -339,14 +339,14 @@ static int __init via_cputemp_init(void)
#ifndef CONFIG_HOTPLUG_CPU
if (list_empty(&pdev_list)) {
- put_online_cpus();
+ cpu_notifier_register_done();
err = -ENODEV;
goto exit_driver_unreg;
}
#endif
- register_hotcpu_notifier(&via_cputemp_cpu_notifier);
- put_online_cpus();
+ __register_hotcpu_notifier(&via_cputemp_cpu_notifier);
+ cpu_notifier_register_done();
return 0;
#ifndef CONFIG_HOTPLUG_CPU
@@ -361,8 +361,8 @@ static void __exit via_cputemp_exit(void)
{
struct pdev_entry *p, *n;
- get_online_cpus();
- unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
+ cpu_notifier_register_begin();
+ __unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
mutex_lock(&pdev_list_mutex);
list_for_each_entry_safe(p, n, &pdev_list, list) {
platform_device_unregister(p->pdev);
@@ -370,7 +370,7 @@ static void __exit via_cputemp_exit(void)
kfree(p);
}
mutex_unlock(&pdev_list_mutex);
- put_online_cpus();
+ cpu_notifier_register_done();
platform_driver_unregister(&via_cputemp_driver);
}
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index df585808adb6..4068db4d9580 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -1376,7 +1376,6 @@ w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -ENOMEM;
i2c_set_clientdata(client, data);
- data->valid = 0;
mutex_init(&data->update_lock);
err = w83792d_detect_subclients(client);
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 6384b268f590..ac3043122011 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -188,12 +188,8 @@ static int w83l785ts_probe(struct i2c_client *client,
return -ENOMEM;
i2c_set_clientdata(client, data);
- data->valid = 0;
mutex_init(&data->update_lock);
- /* Default values in case the first read fails (unlikely). */
- data->temp[1] = data->temp[0] = 0;
-
/*
* Initialize the W83L785TS chip
* Nothing yet, assume it is already started.
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index de17c5593d97..c94db1c5e353 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -110,6 +110,7 @@ config I2C_I801
Wellsburg (PCH)
Coleto Creek (PCH)
Wildcat Point-LP (PCH)
+ BayTrail (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -375,6 +376,13 @@ config I2C_BLACKFIN_TWI_CLK_KHZ
help
The unit of the TWI clock is kHz.
+config I2C_CADENCE
+ tristate "Cadence I2C Controller"
+ depends on ARCH_ZYNQ
+ help
+ Say yes here to select Cadence I2C Host Controller. This controller is
+ e.g. used by Xilinx Zynq.
+
config I2C_CBUS_GPIO
tristate "CBUS I2C driver"
depends on GPIOLIB
@@ -432,6 +440,13 @@ config I2C_DESIGNWARE_PCI
This driver can also be built as a module. If so, the module
will be called i2c-designware-pci.
+config I2C_EFM32
+ tristate "EFM32 I2C controller"
+ depends on ARCH_EFM32 || COMPILE_TEST
+ help
+ This driver supports the i2c block found in Energy Micro's EFM32
+ SoCs.
+
config I2C_EG20T
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) I2C"
depends on PCI
@@ -527,7 +542,7 @@ config I2C_MPC
config I2C_MV64XXX
tristate "Marvell mv64xxx I2C Controller"
- depends on (MV64X60 || PLAT_ORION || ARCH_SUNXI)
+ depends on MV64X60 || PLAT_ORION || ARCH_SUNXI
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Marvell 64xxx line of host bridges.
@@ -648,6 +663,16 @@ config I2C_PXA_SLAVE
is necessary for systems where the PXA may be a target on the
I2C bus.
+config I2C_QUP
+ tristate "Qualcomm QUP based I2C controller"
+ depends on ARCH_QCOM
+ help
+ If you say yes to this option, support will be included for the
+ built-in I2C interface on the Qualcomm SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-qup.
+
config I2C_RIIC
tristate "Renesas RIIC adapter"
depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -936,7 +961,7 @@ config I2C_ACORN
config I2C_ELEKTOR
tristate "Elektor ISA card"
- depends on ISA && HAS_IOPORT && BROKEN_ON_SMP
+ depends on ISA && HAS_IOPORT_MAP && BROKEN_ON_SMP
select I2C_ALGOPCF
help
This supports the PCF8584 ISA bus I2C adapter. Say Y if you own
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index a08931fe73e1..18d18ff9db93 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_I2C_AT91) += i2c-at91.o
obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o
obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
+obj-$(CONFIG_I2C_CADENCE) += i2c-cadence.o
obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o
obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
@@ -41,6 +42,7 @@ obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o
i2c-designware-platform-objs := i2c-designware-platdrv.o
obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
i2c-designware-pci-objs := i2c-designware-pcidrv.o
+obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o
obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
obj-$(CONFIG_I2C_EXYNOS5) += i2c-exynos5.o
obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
@@ -63,6 +65,7 @@ obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
+obj-$(CONFIG_I2C_QUP) += i2c-qup.o
obj-$(CONFIG_I2C_RIIC) += i2c-riic.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 7d60d3a1f621..451e305f7971 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -494,7 +494,7 @@ static struct i2c_adapter ali1535_adapter = {
.algo = &smbus_algorithm,
};
-static DEFINE_PCI_DEVICE_TABLE(ali1535_ids) = {
+static const struct pci_device_id ali1535_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ },
};
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 4611e4754a67..98a1c97739ba 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -416,7 +416,7 @@ static void ali1563_remove(struct pci_dev *dev)
ali1563_shutdown(dev);
}
-static DEFINE_PCI_DEVICE_TABLE(ali1563_id_table) = {
+static const struct pci_device_id ali1563_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) },
{},
};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 4823206a4870..2fa21ce9682b 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -476,7 +476,7 @@ static struct i2c_adapter ali15x3_adapter = {
.algo = &smbus_algorithm,
};
-static DEFINE_PCI_DEVICE_TABLE(ali15x3_ids) = {
+static const struct pci_device_id ali15x3_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 819d3c1062a7..a16f72891358 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -307,7 +307,7 @@ static const char* chipname[] = {
"nVidia nForce", "AMD8111",
};
-static DEFINE_PCI_DEVICE_TABLE(amd756_ids) = {
+static const struct pci_device_id amd756_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B),
.driver_data = AMD756 },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413),
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index f3d4d79855b5..95a80a8f81b5 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -414,7 +414,7 @@ static const struct i2c_algorithm smbus_algorithm = {
};
-static DEFINE_PCI_DEVICE_TABLE(amd8111_ids) = {
+static const struct pci_device_id amd8111_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 843d01268ae9..e95f9ba96790 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -32,7 +32,7 @@
#include <linux/slab.h>
#include <linux/platform_data/dma-atmel.h>
-#define TWI_CLK_HZ 100000 /* max 400 Kbits/s */
+#define DEFAULT_TWI_CLK_HZ 100000 /* max 400 Kbits/s */
#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
@@ -711,6 +711,7 @@ static int at91_twi_probe(struct platform_device *pdev)
struct resource *mem;
int rc;
u32 phy_addr;
+ u32 bus_clk_rate;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -756,13 +757,18 @@ static int at91_twi_probe(struct platform_device *pdev)
dev->use_dma = true;
}
- at91_calc_twi_clock(dev, TWI_CLK_HZ);
+ rc = of_property_read_u32(dev->dev->of_node, "clock-frequency",
+ &bus_clk_rate);
+ if (rc)
+ bus_clk_rate = DEFAULT_TWI_CLK_HZ;
+
+ at91_calc_twi_clock(dev, bus_clk_rate);
at91_init_twi_bus(dev);
snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
i2c_set_adapdata(&dev->adapter, dev);
dev->adapter.owner = THIS_MODULE;
- dev->adapter.class = I2C_CLASS_HWMON;
+ dev->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
dev->adapter.algo = &at91_twi_algorithm;
dev->adapter.dev.parent = dev->dev;
dev->adapter.nr = pdev->id;
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 77df97b932af..c60719577fc3 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -219,7 +219,7 @@ static const struct i2c_algorithm bcm2835_i2c_algo = {
static int bcm2835_i2c_probe(struct platform_device *pdev)
{
struct bcm2835_i2c_dev *i2c_dev;
- struct resource *mem, *requested, *irq;
+ struct resource *mem, *irq;
u32 bus_clk_rate, divider;
int ret;
struct i2c_adapter *adap;
@@ -234,25 +234,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
init_completion(&i2c_dev->completion);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "No mem resource\n");
- return -ENODEV;
- }
-
- requested = devm_request_mem_region(&pdev->dev, mem->start,
- resource_size(mem),
- dev_name(&pdev->dev));
- if (!requested) {
- dev_err(&pdev->dev, "Could not claim register region\n");
- return -EBUSY;
- }
-
- i2c_dev->regs = devm_ioremap(&pdev->dev, mem->start,
- resource_size(mem));
- if (!i2c_dev->regs) {
- dev_err(&pdev->dev, "Could not map registers\n");
- return -ENOMEM;
- }
+ i2c_dev->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(i2c_dev->regs))
+ return PTR_ERR(i2c_dev->regs);
i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c_dev->clk)) {
@@ -295,7 +279,7 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
adap = &i2c_dev->adapter;
i2c_set_adapdata(adap, i2c_dev);
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON;
+ adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name));
adap->algo = &bcm2835_i2c_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 3b9bd9a3f2b0..e6d5162b6379 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -21,10 +21,10 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/i2c/bfin_twi.h>
-#include <asm/blackfin.h>
-#include <asm/portmux.h>
#include <asm/irq.h>
+#include <asm/portmux.h>
#include <asm/bfin_twi.h>
/* SMBus mode*/
@@ -65,7 +65,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
/* Transmit next data */
while (iface->writeNum > 0 &&
(read_FIFO_STAT(iface) & XMTSTAT) != XMT_FULL) {
- SSYNC();
write_XMT_DATA8(iface, *(iface->transPtr++));
iface->writeNum--;
}
@@ -248,7 +247,6 @@ static irqreturn_t bfin_twi_interrupt_entry(int irq, void *dev_id)
/* Clear interrupt status */
write_INT_STAT(iface, twi_int_status);
bfin_twi_handle_interrupt(iface, twi_int_status);
- SSYNC();
}
spin_unlock_irqrestore(&iface->lock, flags);
return IRQ_HANDLED;
@@ -294,9 +292,7 @@ static int bfin_twi_do_master_xfer(struct i2c_adapter *adap,
* discarded before start a new operation.
*/
write_FIFO_CTL(iface, 0x3);
- SSYNC();
write_FIFO_CTL(iface, 0);
- SSYNC();
if (pmsg->flags & I2C_M_RD)
iface->read_write = I2C_SMBUS_READ;
@@ -306,7 +302,6 @@ static int bfin_twi_do_master_xfer(struct i2c_adapter *adap,
if (iface->writeNum > 0) {
write_XMT_DATA8(iface, *(iface->transPtr++));
iface->writeNum--;
- SSYNC();
}
}
@@ -315,7 +310,6 @@ static int bfin_twi_do_master_xfer(struct i2c_adapter *adap,
/* Interrupt mask . Enable XMT, RCV interrupt */
write_INT_MASK(iface, MCOMP | MERR | RCVSERV | XMTSERV);
- SSYNC();
if (pmsg->len <= 255)
write_MASTER_CTL(iface, pmsg->len << 6);
@@ -329,7 +323,6 @@ static int bfin_twi_do_master_xfer(struct i2c_adapter *adap,
(iface->msg_num > 1 ? RSTART : 0) |
((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) |
((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0));
- SSYNC();
while (!iface->result) {
if (!wait_for_completion_timeout(&iface->complete,
@@ -453,7 +446,6 @@ int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr,
* start a new operation.
*/
write_FIFO_CTL(iface, 0x3);
- SSYNC();
write_FIFO_CTL(iface, 0);
/* clear int stat */
@@ -461,7 +453,6 @@ int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr,
/* Set Transmit device address */
write_MASTER_ADDR(iface, addr);
- SSYNC();
switch (iface->cur_mode) {
case TWI_I2C_MODE_STANDARDSUB:
@@ -469,7 +460,6 @@ int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr,
write_INT_MASK(iface, MCOMP | MERR |
((iface->read_write == I2C_SMBUS_READ) ?
RCVSERV : XMTSERV));
- SSYNC();
if (iface->writeNum + 1 <= 255)
write_MASTER_CTL(iface, (iface->writeNum + 1) << 6);
@@ -484,7 +474,6 @@ int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr,
case TWI_I2C_MODE_COMBINED:
write_XMT_DATA8(iface, iface->command);
write_INT_MASK(iface, MCOMP | MERR | RCVSERV | XMTSERV);
- SSYNC();
if (iface->writeNum > 0)
write_MASTER_CTL(iface, (iface->writeNum + 1) << 6);
@@ -531,7 +520,6 @@ int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr,
write_INT_MASK(iface, MCOMP | MERR |
((iface->read_write == I2C_SMBUS_READ) ?
RCVSERV : XMTSERV));
- SSYNC();
/* Master enable */
write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
@@ -539,7 +527,6 @@ int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr,
((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0));
break;
}
- SSYNC();
while (!iface->result) {
if (!wait_for_completion_timeout(&iface->complete,
@@ -669,7 +656,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name));
p_adap->algo = &bfin_twi_algorithm;
p_adap->algo_data = iface;
- p_adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ p_adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED;
p_adap->dev.parent = &pdev->dev;
p_adap->timeout = 5 * HZ;
p_adap->retries = 3;
@@ -704,7 +691,6 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
/* Enable TWI */
write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA);
- SSYNC();
rc = i2c_add_numbered_adapter(p_adap);
if (rc < 0) {
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
new file mode 100644
index 000000000000..63f3f03ecc9b
--- /dev/null
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -0,0 +1,905 @@
+/*
+ * I2C bus driver for the Cadence I2C controller.
+ *
+ * Copyright (C) 2009 - 2014 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it
+ * and/or modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation;
+ * either version 2 of the License, or (at your option) any
+ * later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/* Register offsets for the I2C device. */
+#define CDNS_I2C_CR_OFFSET 0x00 /* Control Register, RW */
+#define CDNS_I2C_SR_OFFSET 0x04 /* Status Register, RO */
+#define CDNS_I2C_ADDR_OFFSET 0x08 /* I2C Address Register, RW */
+#define CDNS_I2C_DATA_OFFSET 0x0C /* I2C Data Register, RW */
+#define CDNS_I2C_ISR_OFFSET 0x10 /* IRQ Status Register, RW */
+#define CDNS_I2C_XFER_SIZE_OFFSET 0x14 /* Transfer Size Register, RW */
+#define CDNS_I2C_TIME_OUT_OFFSET 0x1C /* Time Out Register, RW */
+#define CDNS_I2C_IER_OFFSET 0x24 /* IRQ Enable Register, WO */
+#define CDNS_I2C_IDR_OFFSET 0x28 /* IRQ Disable Register, WO */
+
+/* Control Register Bit mask definitions */
+#define CDNS_I2C_CR_HOLD BIT(4) /* Hold Bus bit */
+#define CDNS_I2C_CR_ACK_EN BIT(3)
+#define CDNS_I2C_CR_NEA BIT(2)
+#define CDNS_I2C_CR_MS BIT(1)
+/* Read or Write Master transfer 0 = Transmitter, 1 = Receiver */
+#define CDNS_I2C_CR_RW BIT(0)
+/* 1 = Auto init FIFO to zeroes */
+#define CDNS_I2C_CR_CLR_FIFO BIT(6)
+#define CDNS_I2C_CR_DIVA_SHIFT 14
+#define CDNS_I2C_CR_DIVA_MASK (3 << CDNS_I2C_CR_DIVA_SHIFT)
+#define CDNS_I2C_CR_DIVB_SHIFT 8
+#define CDNS_I2C_CR_DIVB_MASK (0x3f << CDNS_I2C_CR_DIVB_SHIFT)
+
+/* Status Register Bit mask definitions */
+#define CDNS_I2C_SR_BA BIT(8)
+#define CDNS_I2C_SR_RXDV BIT(5)
+
+/*
+ * I2C Address Register Bit mask definitions
+ * Normal addressing mode uses [6:0] bits. Extended addressing mode uses [9:0]
+ * bits. A write access to this register always initiates a transfer if the I2C
+ * is in master mode.
+ */
+#define CDNS_I2C_ADDR_MASK 0x000003FF /* I2C Address Mask */
+
+/*
+ * I2C Interrupt Registers Bit mask definitions
+ * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
+ * bit definitions.
+ */
+#define CDNS_I2C_IXR_ARB_LOST BIT(9)
+#define CDNS_I2C_IXR_RX_UNF BIT(7)
+#define CDNS_I2C_IXR_TX_OVF BIT(6)
+#define CDNS_I2C_IXR_RX_OVF BIT(5)
+#define CDNS_I2C_IXR_SLV_RDY BIT(4)
+#define CDNS_I2C_IXR_TO BIT(3)
+#define CDNS_I2C_IXR_NACK BIT(2)
+#define CDNS_I2C_IXR_DATA BIT(1)
+#define CDNS_I2C_IXR_COMP BIT(0)
+
+#define CDNS_I2C_IXR_ALL_INTR_MASK (CDNS_I2C_IXR_ARB_LOST | \
+ CDNS_I2C_IXR_RX_UNF | \
+ CDNS_I2C_IXR_TX_OVF | \
+ CDNS_I2C_IXR_RX_OVF | \
+ CDNS_I2C_IXR_SLV_RDY | \
+ CDNS_I2C_IXR_TO | \
+ CDNS_I2C_IXR_NACK | \
+ CDNS_I2C_IXR_DATA | \
+ CDNS_I2C_IXR_COMP)
+
+#define CDNS_I2C_IXR_ERR_INTR_MASK (CDNS_I2C_IXR_ARB_LOST | \
+ CDNS_I2C_IXR_RX_UNF | \
+ CDNS_I2C_IXR_TX_OVF | \
+ CDNS_I2C_IXR_RX_OVF | \
+ CDNS_I2C_IXR_NACK)
+
+#define CDNS_I2C_ENABLED_INTR_MASK (CDNS_I2C_IXR_ARB_LOST | \
+ CDNS_I2C_IXR_RX_UNF | \
+ CDNS_I2C_IXR_TX_OVF | \
+ CDNS_I2C_IXR_RX_OVF | \
+ CDNS_I2C_IXR_NACK | \
+ CDNS_I2C_IXR_DATA | \
+ CDNS_I2C_IXR_COMP)
+
+#define CDNS_I2C_TIMEOUT msecs_to_jiffies(1000)
+
+#define CDNS_I2C_FIFO_DEPTH 16
+/* FIFO depth at which the DATA interrupt occurs */
+#define CDNS_I2C_DATA_INTR_DEPTH (CDNS_I2C_FIFO_DEPTH - 2)
+#define CDNS_I2C_MAX_TRANSFER_SIZE 255
+/* Transfer size in multiples of data interrupt depth */
+#define CDNS_I2C_TRANSFER_SIZE (CDNS_I2C_MAX_TRANSFER_SIZE - 3)
+
+#define DRIVER_NAME "cdns-i2c"
+
+#define CDNS_I2C_SPEED_MAX 400000
+#define CDNS_I2C_SPEED_DEFAULT 100000
+
+#define CDNS_I2C_DIVA_MAX 4
+#define CDNS_I2C_DIVB_MAX 64
+
+#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
+#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
+
+/**
+ * struct cdns_i2c - I2C device private data structure
+ * @membase: Base address of the I2C device
+ * @adap: I2C adapter instance
+ * @p_msg: Message pointer
+ * @err_status: Error status in Interrupt Status Register
+ * @xfer_done: Transfer complete status
+ * @p_send_buf: Pointer to transmit buffer
+ * @p_recv_buf: Pointer to receive buffer
+ * @suspended: Flag holding the device's PM status
+ * @send_count: Number of bytes still expected to send
+ * @recv_count: Number of bytes still expected to receive
+ * @irq: IRQ number
+ * @input_clk: Input clock to I2C controller
+ * @i2c_clk: Maximum I2C clock speed
+ * @bus_hold_flag: Flag used in repeated start for clearing HOLD bit
+ * @clk: Pointer to struct clk
+ * @clk_rate_change_nb: Notifier block for clock rate changes
+ */
+struct cdns_i2c {
+ void __iomem *membase;
+ struct i2c_adapter adap;
+ struct i2c_msg *p_msg;
+ int err_status;
+ struct completion xfer_done;
+ unsigned char *p_send_buf;
+ unsigned char *p_recv_buf;
+ u8 suspended;
+ unsigned int send_count;
+ unsigned int recv_count;
+ int irq;
+ unsigned long input_clk;
+ unsigned int i2c_clk;
+ unsigned int bus_hold_flag;
+ struct clk *clk;
+ struct notifier_block clk_rate_change_nb;
+};
+
+#define to_cdns_i2c(_nb) container_of(_nb, struct cdns_i2c, \
+ clk_rate_change_nb)
+
+/**
+ * cdns_i2c_clear_bus_hold() - Clear bus hold bit
+ * @id: Pointer to driver data struct
+ *
+ * Helper to clear the controller's bus hold bit.
+ */
+static void cdns_i2c_clear_bus_hold(struct cdns_i2c *id)
+{
+ u32 reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ if (reg & CDNS_I2C_CR_HOLD)
+ cdns_i2c_writereg(reg & ~CDNS_I2C_CR_HOLD, CDNS_I2C_CR_OFFSET);
+}
+
+/**
+ * cdns_i2c_isr - Interrupt handler for the I2C device
+ * @irq: irq number for the I2C device
+ * @ptr: void pointer to cdns_i2c structure
+ *
+ * This function handles the data interrupt, transfer complete interrupt and
+ * the error interrupts of the I2C device.
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
+{
+ unsigned int isr_status, avail_bytes;
+ unsigned int bytes_to_recv, bytes_to_send;
+ struct cdns_i2c *id = ptr;
+ /* Signal completion only after everything is updated */
+ int done_flag = 0;
+ irqreturn_t status = IRQ_NONE;
+
+ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+
+ /* Handling nack and arbitration lost interrupt */
+ if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) {
+ done_flag = 1;
+ status = IRQ_HANDLED;
+ }
+
+ /* Handling Data interrupt */
+ if ((isr_status & CDNS_I2C_IXR_DATA) &&
+ (id->recv_count >= CDNS_I2C_DATA_INTR_DEPTH)) {
+ /* Always read data interrupt threshold bytes */
+ bytes_to_recv = CDNS_I2C_DATA_INTR_DEPTH;
+ id->recv_count -= CDNS_I2C_DATA_INTR_DEPTH;
+ avail_bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
+
+ /*
+ * if the tranfer size register value is zero, then
+ * check for the remaining bytes and update the
+ * transfer size register.
+ */
+ if (!avail_bytes) {
+ if (id->recv_count > CDNS_I2C_TRANSFER_SIZE)
+ cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
+ CDNS_I2C_XFER_SIZE_OFFSET);
+ else
+ cdns_i2c_writereg(id->recv_count,
+ CDNS_I2C_XFER_SIZE_OFFSET);
+ }
+
+ /* Process the data received */
+ while (bytes_to_recv--)
+ *(id->p_recv_buf)++ =
+ cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
+
+ if (!id->bus_hold_flag &&
+ (id->recv_count <= CDNS_I2C_FIFO_DEPTH))
+ cdns_i2c_clear_bus_hold(id);
+
+ status = IRQ_HANDLED;
+ }
+
+ /* Handling Transfer Complete interrupt */
+ if (isr_status & CDNS_I2C_IXR_COMP) {
+ if (!id->p_recv_buf) {
+ /*
+ * If the device is sending data If there is further
+ * data to be sent. Calculate the available space
+ * in FIFO and fill the FIFO with that many bytes.
+ */
+ if (id->send_count) {
+ avail_bytes = CDNS_I2C_FIFO_DEPTH -
+ cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
+ if (id->send_count > avail_bytes)
+ bytes_to_send = avail_bytes;
+ else
+ bytes_to_send = id->send_count;
+
+ while (bytes_to_send--) {
+ cdns_i2c_writereg(
+ (*(id->p_send_buf)++),
+ CDNS_I2C_DATA_OFFSET);
+ id->send_count--;
+ }
+ } else {
+ /*
+ * Signal the completion of transaction and
+ * clear the hold bus bit if there are no
+ * further messages to be processed.
+ */
+ done_flag = 1;
+ }
+ if (!id->send_count && !id->bus_hold_flag)
+ cdns_i2c_clear_bus_hold(id);
+ } else {
+ if (!id->bus_hold_flag)
+ cdns_i2c_clear_bus_hold(id);
+ /*
+ * If the device is receiving data, then signal
+ * the completion of transaction and read the data
+ * present in the FIFO. Signal the completion of
+ * transaction.
+ */
+ while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
+ CDNS_I2C_SR_RXDV) {
+ *(id->p_recv_buf)++ =
+ cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
+ id->recv_count--;
+ }
+ done_flag = 1;
+ }
+
+ status = IRQ_HANDLED;
+ }
+
+ /* Update the status for errors */
+ id->err_status = isr_status & CDNS_I2C_IXR_ERR_INTR_MASK;
+ if (id->err_status)
+ status = IRQ_HANDLED;
+
+ cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
+
+ if (done_flag)
+ complete(&id->xfer_done);
+
+ return status;
+}
+
+/**
+ * cdns_i2c_mrecv - Prepare and start a master receive operation
+ * @id: pointer to the i2c device structure
+ */
+static void cdns_i2c_mrecv(struct cdns_i2c *id)
+{
+ unsigned int ctrl_reg;
+ unsigned int isr_status;
+
+ id->p_recv_buf = id->p_msg->buf;
+ id->recv_count = id->p_msg->len;
+
+ /* Put the controller in master receive mode and clear the FIFO */
+ ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ ctrl_reg |= CDNS_I2C_CR_RW | CDNS_I2C_CR_CLR_FIFO;
+
+ if (id->p_msg->flags & I2C_M_RECV_LEN)
+ id->recv_count = I2C_SMBUS_BLOCK_MAX + 1;
+
+ /*
+ * Check for the message size against FIFO depth and set the
+ * 'hold bus' bit if it is greater than FIFO depth.
+ */
+ if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
+ ctrl_reg |= CDNS_I2C_CR_HOLD;
+
+ cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
+
+ /* Clear the interrupts in interrupt status register */
+ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+ cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
+
+ /*
+ * The no. of bytes to receive is checked against the limit of
+ * max transfer size. Set transfer size register with no of bytes
+ * receive if it is less than transfer size and transfer size if
+ * it is more. Enable the interrupts.
+ */
+ if (id->recv_count > CDNS_I2C_TRANSFER_SIZE)
+ cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
+ CDNS_I2C_XFER_SIZE_OFFSET);
+ else
+ cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET);
+ /* Clear the bus hold flag if bytes to receive is less than FIFO size */
+ if (!id->bus_hold_flag &&
+ ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
+ (id->recv_count <= CDNS_I2C_FIFO_DEPTH))
+ cdns_i2c_clear_bus_hold(id);
+ /* Set the slave address in address register - triggers operation */
+ cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
+ CDNS_I2C_ADDR_OFFSET);
+ cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
+}
+
+/**
+ * cdns_i2c_msend - Prepare and start a master send operation
+ * @id: pointer to the i2c device
+ */
+static void cdns_i2c_msend(struct cdns_i2c *id)
+{
+ unsigned int avail_bytes;
+ unsigned int bytes_to_send;
+ unsigned int ctrl_reg;
+ unsigned int isr_status;
+
+ id->p_recv_buf = NULL;
+ id->p_send_buf = id->p_msg->buf;
+ id->send_count = id->p_msg->len;
+
+ /* Set the controller in Master transmit mode and clear the FIFO. */
+ ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ ctrl_reg &= ~CDNS_I2C_CR_RW;
+ ctrl_reg |= CDNS_I2C_CR_CLR_FIFO;
+
+ /*
+ * Check for the message size against FIFO depth and set the
+ * 'hold bus' bit if it is greater than FIFO depth.
+ */
+ if (id->send_count > CDNS_I2C_FIFO_DEPTH)
+ ctrl_reg |= CDNS_I2C_CR_HOLD;
+ cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
+
+ /* Clear the interrupts in interrupt status register. */
+ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+ cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
+
+ /*
+ * Calculate the space available in FIFO. Check the message length
+ * against the space available, and fill the FIFO accordingly.
+ * Enable the interrupts.
+ */
+ avail_bytes = CDNS_I2C_FIFO_DEPTH -
+ cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
+
+ if (id->send_count > avail_bytes)
+ bytes_to_send = avail_bytes;
+ else
+ bytes_to_send = id->send_count;
+
+ while (bytes_to_send--) {
+ cdns_i2c_writereg((*(id->p_send_buf)++), CDNS_I2C_DATA_OFFSET);
+ id->send_count--;
+ }
+
+ /*
+ * Clear the bus hold flag if there is no more data
+ * and if it is the last message.
+ */
+ if (!id->bus_hold_flag && !id->send_count)
+ cdns_i2c_clear_bus_hold(id);
+ /* Set the slave address in address register - triggers operation. */
+ cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
+ CDNS_I2C_ADDR_OFFSET);
+
+ cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
+}
+
+/**
+ * cdns_i2c_master_reset - Reset the interface
+ * @adap: pointer to the i2c adapter driver instance
+ *
+ * This function cleanup the fifos, clear the hold bit and status
+ * and disable the interrupts.
+ */
+static void cdns_i2c_master_reset(struct i2c_adapter *adap)
+{
+ struct cdns_i2c *id = adap->algo_data;
+ u32 regval;
+
+ /* Disable the interrupts */
+ cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET);
+ /* Clear the hold bit and fifos */
+ regval = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ regval &= ~CDNS_I2C_CR_HOLD;
+ regval |= CDNS_I2C_CR_CLR_FIFO;
+ cdns_i2c_writereg(regval, CDNS_I2C_CR_OFFSET);
+ /* Update the transfercount register to zero */
+ cdns_i2c_writereg(0, CDNS_I2C_XFER_SIZE_OFFSET);
+ /* Clear the interupt status register */
+ regval = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+ cdns_i2c_writereg(regval, CDNS_I2C_ISR_OFFSET);
+ /* Clear the status register */
+ regval = cdns_i2c_readreg(CDNS_I2C_SR_OFFSET);
+ cdns_i2c_writereg(regval, CDNS_I2C_SR_OFFSET);
+}
+
+static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
+ struct i2c_adapter *adap)
+{
+ int ret;
+ u32 reg;
+
+ id->p_msg = msg;
+ id->err_status = 0;
+ reinit_completion(&id->xfer_done);
+
+ /* Check for the TEN Bit mode on each msg */
+ reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ if (msg->flags & I2C_M_TEN) {
+ if (reg & CDNS_I2C_CR_NEA)
+ cdns_i2c_writereg(reg & ~CDNS_I2C_CR_NEA,
+ CDNS_I2C_CR_OFFSET);
+ } else {
+ if (!(reg & CDNS_I2C_CR_NEA))
+ cdns_i2c_writereg(reg | CDNS_I2C_CR_NEA,
+ CDNS_I2C_CR_OFFSET);
+ }
+
+ /* Check for the R/W flag on each msg */
+ if (msg->flags & I2C_M_RD)
+ cdns_i2c_mrecv(id);
+ else
+ cdns_i2c_msend(id);
+
+ /* Wait for the signal of completion */
+ ret = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
+ if (!ret) {
+ cdns_i2c_master_reset(adap);
+ dev_err(id->adap.dev.parent,
+ "timeout waiting on completion\n");
+ return -ETIMEDOUT;
+ }
+
+ cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK,
+ CDNS_I2C_IDR_OFFSET);
+
+ /* If it is bus arbitration error, try again */
+ if (id->err_status & CDNS_I2C_IXR_ARB_LOST)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * cdns_i2c_master_xfer - The main i2c transfer function
+ * @adap: pointer to the i2c adapter driver instance
+ * @msgs: pointer to the i2c message structure
+ * @num: the number of messages to transfer
+ *
+ * Initiates the send/recv activity based on the transfer message received.
+ *
+ * Return: number of msgs processed on success, negative error otherwise
+ */
+static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ int ret, count;
+ u32 reg;
+ struct cdns_i2c *id = adap->algo_data;
+
+ /* Check if the bus is free */
+ if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA)
+ return -EAGAIN;
+
+ /*
+ * Set the flag to one when multiple messages are to be
+ * processed with a repeated start.
+ */
+ if (num > 1) {
+ id->bus_hold_flag = 1;
+ reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ reg |= CDNS_I2C_CR_HOLD;
+ cdns_i2c_writereg(reg, CDNS_I2C_CR_OFFSET);
+ } else {
+ id->bus_hold_flag = 0;
+ }
+
+ /* Process the msg one by one */
+ for (count = 0; count < num; count++, msgs++) {
+ if (count == (num - 1))
+ id->bus_hold_flag = 0;
+
+ ret = cdns_i2c_process_msg(id, msgs, adap);
+ if (ret)
+ return ret;
+
+ /* Report the other error interrupts to application */
+ if (id->err_status) {
+ cdns_i2c_master_reset(adap);
+
+ if (id->err_status & CDNS_I2C_IXR_NACK)
+ return -ENXIO;
+
+ return -EIO;
+ }
+ }
+
+ return num;
+}
+
+/**
+ * cdns_i2c_func - Returns the supported features of the I2C driver
+ * @adap: pointer to the i2c adapter structure
+ *
+ * Return: 32 bit value, each bit corresponding to a feature
+ */
+static u32 cdns_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR |
+ (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
+ I2C_FUNC_SMBUS_BLOCK_DATA;
+}
+
+static const struct i2c_algorithm cdns_i2c_algo = {
+ .master_xfer = cdns_i2c_master_xfer,
+ .functionality = cdns_i2c_func,
+};
+
+/**
+ * cdns_i2c_calc_divs - Calculate clock dividers
+ * @f: I2C clock frequency
+ * @input_clk: Input clock frequency
+ * @a: First divider (return value)
+ * @b: Second divider (return value)
+ *
+ * f is used as input and output variable. As input it is used as target I2C
+ * frequency. On function exit f holds the actually resulting I2C frequency.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int cdns_i2c_calc_divs(unsigned long *f, unsigned long input_clk,
+ unsigned int *a, unsigned int *b)
+{
+ unsigned long fscl = *f, best_fscl = *f, actual_fscl, temp;
+ unsigned int div_a, div_b, calc_div_a = 0, calc_div_b = 0;
+ unsigned int last_error, current_error;
+
+ /* calculate (divisor_a+1) x (divisor_b+1) */
+ temp = input_clk / (22 * fscl);
+
+ /*
+ * If the calculated value is negative or 0, the fscl input is out of
+ * range. Return error.
+ */
+ if (!temp || (temp > (CDNS_I2C_DIVA_MAX * CDNS_I2C_DIVB_MAX)))
+ return -EINVAL;
+
+ last_error = -1;
+ for (div_a = 0; div_a < CDNS_I2C_DIVA_MAX; div_a++) {
+ div_b = DIV_ROUND_UP(input_clk, 22 * fscl * (div_a + 1));
+
+ if ((div_b < 1) || (div_b > CDNS_I2C_DIVB_MAX))
+ continue;
+ div_b--;
+
+ actual_fscl = input_clk / (22 * (div_a + 1) * (div_b + 1));
+
+ if (actual_fscl > fscl)
+ continue;
+
+ current_error = ((actual_fscl > fscl) ? (actual_fscl - fscl) :
+ (fscl - actual_fscl));
+
+ if (last_error > current_error) {
+ calc_div_a = div_a;
+ calc_div_b = div_b;
+ best_fscl = actual_fscl;
+ last_error = current_error;
+ }
+ }
+
+ *a = calc_div_a;
+ *b = calc_div_b;
+ *f = best_fscl;
+
+ return 0;
+}
+
+/**
+ * cdns_i2c_setclk - This function sets the serial clock rate for the I2C device
+ * @clk_in: I2C clock input frequency in Hz
+ * @id: Pointer to the I2C device structure
+ *
+ * The device must be idle rather than busy transferring data before setting
+ * these device options.
+ * The data rate is set by values in the control register.
+ * The formula for determining the correct register values is
+ * Fscl = Fpclk/(22 x (divisor_a+1) x (divisor_b+1))
+ * See the hardware data sheet for a full explanation of setting the serial
+ * clock rate. The clock can not be faster than the input clock divide by 22.
+ * The two most common clock rates are 100KHz and 400KHz.
+ *
+ * Return: 0 on success, negative error otherwise
+ */
+static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id)
+{
+ unsigned int div_a, div_b;
+ unsigned int ctrl_reg;
+ int ret = 0;
+ unsigned long fscl = id->i2c_clk;
+
+ ret = cdns_i2c_calc_divs(&fscl, clk_in, &div_a, &div_b);
+ if (ret)
+ return ret;
+
+ ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ ctrl_reg &= ~(CDNS_I2C_CR_DIVA_MASK | CDNS_I2C_CR_DIVB_MASK);
+ ctrl_reg |= ((div_a << CDNS_I2C_CR_DIVA_SHIFT) |
+ (div_b << CDNS_I2C_CR_DIVB_SHIFT));
+ cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
+
+ return 0;
+}
+
+/**
+ * cdns_i2c_clk_notifier_cb - Clock rate change callback
+ * @nb: Pointer to notifier block
+ * @event: Notification reason
+ * @data: Pointer to notification data object
+ *
+ * This function is called when the cdns_i2c input clock frequency changes.
+ * The callback checks whether a valid bus frequency can be generated after the
+ * change. If so, the change is acknowledged, otherwise the change is aborted.
+ * New dividers are written to the HW in the pre- or post change notification
+ * depending on the scaling direction.
+ *
+ * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
+ * to acknowedge the change, NOTIFY_DONE if the notification is
+ * considered irrelevant.
+ */
+static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
+ event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct cdns_i2c *id = to_cdns_i2c(nb);
+
+ if (id->suspended)
+ return NOTIFY_OK;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ {
+ unsigned long input_clk = ndata->new_rate;
+ unsigned long fscl = id->i2c_clk;
+ unsigned int div_a, div_b;
+ int ret;
+
+ ret = cdns_i2c_calc_divs(&fscl, input_clk, &div_a, &div_b);
+ if (ret) {
+ dev_warn(id->adap.dev.parent,
+ "clock rate change rejected\n");
+ return NOTIFY_STOP;
+ }
+
+ /* scale up */
+ if (ndata->new_rate > ndata->old_rate)
+ cdns_i2c_setclk(ndata->new_rate, id);
+
+ return NOTIFY_OK;
+ }
+ case POST_RATE_CHANGE:
+ id->input_clk = ndata->new_rate;
+ /* scale down */
+ if (ndata->new_rate < ndata->old_rate)
+ cdns_i2c_setclk(ndata->new_rate, id);
+ return NOTIFY_OK;
+ case ABORT_RATE_CHANGE:
+ /* scale up */
+ if (ndata->new_rate > ndata->old_rate)
+ cdns_i2c_setclk(ndata->old_rate, id);
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+/**
+ * cdns_i2c_suspend - Suspend method for the driver
+ * @_dev: Address of the platform_device structure
+ *
+ * Put the driver into low power mode.
+ *
+ * Return: 0 always
+ */
+static int __maybe_unused cdns_i2c_suspend(struct device *_dev)
+{
+ struct platform_device *pdev = container_of(_dev,
+ struct platform_device, dev);
+ struct cdns_i2c *xi2c = platform_get_drvdata(pdev);
+
+ clk_disable(xi2c->clk);
+ xi2c->suspended = 1;
+
+ return 0;
+}
+
+/**
+ * cdns_i2c_resume - Resume from suspend
+ * @_dev: Address of the platform_device structure
+ *
+ * Resume operation after suspend.
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused cdns_i2c_resume(struct device *_dev)
+{
+ struct platform_device *pdev = container_of(_dev,
+ struct platform_device, dev);
+ struct cdns_i2c *xi2c = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_enable(xi2c->clk);
+ if (ret) {
+ dev_err(_dev, "Cannot enable clock.\n");
+ return ret;
+ }
+
+ xi2c->suspended = 0;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cdns_i2c_dev_pm_ops, cdns_i2c_suspend,
+ cdns_i2c_resume);
+
+/**
+ * cdns_i2c_probe - Platform registration call
+ * @pdev: Handle to the platform device structure
+ *
+ * This function does all the memory allocation and registration for the i2c
+ * device. User can modify the address mode to 10 bit address mode using the
+ * ioctl call with option I2C_TENBIT.
+ *
+ * Return: 0 on success, negative error otherwise
+ */
+static int cdns_i2c_probe(struct platform_device *pdev)
+{
+ struct resource *r_mem;
+ struct cdns_i2c *id;
+ int ret;
+
+ id = devm_kzalloc(&pdev->dev, sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, id);
+
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ id->membase = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(id->membase))
+ return PTR_ERR(id->membase);
+
+ id->irq = platform_get_irq(pdev, 0);
+
+ id->adap.dev.of_node = pdev->dev.of_node;
+ id->adap.algo = &cdns_i2c_algo;
+ id->adap.timeout = CDNS_I2C_TIMEOUT;
+ id->adap.retries = 3; /* Default retry value. */
+ id->adap.algo_data = id;
+ id->adap.dev.parent = &pdev->dev;
+ init_completion(&id->xfer_done);
+ snprintf(id->adap.name, sizeof(id->adap.name),
+ "Cadence I2C at %08lx", (unsigned long)r_mem->start);
+
+ id->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(id->clk)) {
+ dev_err(&pdev->dev, "input clock not found.\n");
+ return PTR_ERR(id->clk);
+ }
+ ret = clk_prepare_enable(id->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ id->clk_rate_change_nb.notifier_call = cdns_i2c_clk_notifier_cb;
+ if (clk_notifier_register(id->clk, &id->clk_rate_change_nb))
+ dev_warn(&pdev->dev, "Unable to register clock notifier.\n");
+ id->input_clk = clk_get_rate(id->clk);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &id->i2c_clk);
+ if (ret || (id->i2c_clk > CDNS_I2C_SPEED_MAX))
+ id->i2c_clk = CDNS_I2C_SPEED_DEFAULT;
+
+ cdns_i2c_writereg(CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS,
+ CDNS_I2C_CR_OFFSET);
+
+ ret = cdns_i2c_setclk(id->input_clk, id);
+ if (ret) {
+ dev_err(&pdev->dev, "invalid SCL clock: %u Hz\n", id->i2c_clk);
+ ret = -EINVAL;
+ goto err_clk_dis;
+ }
+
+ ret = devm_request_irq(&pdev->dev, id->irq, cdns_i2c_isr, 0,
+ DRIVER_NAME, id);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot get irq %d\n", id->irq);
+ goto err_clk_dis;
+ }
+
+ ret = i2c_add_adapter(&id->adap);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "reg adap failed: %d\n", ret);
+ goto err_clk_dis;
+ }
+
+ dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
+ id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
+
+ return 0;
+
+err_clk_dis:
+ clk_disable_unprepare(id->clk);
+ return ret;
+}
+
+/**
+ * cdns_i2c_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the platform device structure
+ *
+ * This function frees all the resources allocated to the device.
+ *
+ * Return: 0 always
+ */
+static int cdns_i2c_remove(struct platform_device *pdev)
+{
+ struct cdns_i2c *id = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&id->adap);
+ clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
+ clk_disable_unprepare(id->clk);
+
+ return 0;
+}
+
+static const struct of_device_id cdns_i2c_of_match[] = {
+ { .compatible = "cdns,i2c-r1p10", },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, cdns_i2c_of_match);
+
+static struct platform_driver cdns_i2c_drv = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cdns_i2c_of_match,
+ .pm = &cdns_i2c_dev_pm_ops,
+ },
+ .probe = cdns_i2c_probe,
+ .remove = cdns_i2c_remove,
+};
+
+module_platform_driver(cdns_i2c_drv);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Cadence I2C bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index af0b5830303d..389bc68c55ad 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -712,7 +712,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON;
+ adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
strlcpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name));
adap->algo = &i2c_davinci_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 14c4b30d4ccc..22e92c3d3d07 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -218,7 +218,7 @@ i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
*
* If your hardware is free from tHD;STA issue, try this one.
*/
- return (ic_clk * tSYMBOL + 5000) / 10000 - 8 + offset;
+ return (ic_clk * tSYMBOL + 500000) / 1000000 - 8 + offset;
else
/*
* Conditional expression:
@@ -234,7 +234,8 @@ i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
* The reason why we need to take into account "tf" here,
* is the same as described in i2c_dw_scl_lcnt().
*/
- return (ic_clk * (tSYMBOL + tf) + 5000) / 10000 - 3 + offset;
+ return (ic_clk * (tSYMBOL + tf) + 500000) / 1000000
+ - 3 + offset;
}
static u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
@@ -250,7 +251,7 @@ static u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
* account the fall time of SCL signal (tf). Default tf value
* should be 0.3 us, for safety.
*/
- return ((ic_clk * (tLOW + tf) + 5000) / 10000) - 1 + offset;
+ return ((ic_clk * (tLOW + tf) + 500000) / 1000000) - 1 + offset;
}
static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
@@ -287,6 +288,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
u32 input_clock_khz;
u32 hcnt, lcnt;
u32 reg;
+ u32 sda_falling_time, scl_falling_time;
input_clock_khz = dev->get_clk_rate_khz(dev);
@@ -308,15 +310,18 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
/* set standard and fast speed deviders for high/low periods */
+ sda_falling_time = dev->sda_falling_time ?: 300; /* ns */
+ scl_falling_time = dev->scl_falling_time ?: 300; /* ns */
+
/* Standard-mode */
hcnt = i2c_dw_scl_hcnt(input_clock_khz,
- 40, /* tHD;STA = tHIGH = 4.0 us */
- 3, /* tf = 0.3 us */
+ 4000, /* tHD;STA = tHIGH = 4.0 us */
+ sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
lcnt = i2c_dw_scl_lcnt(input_clock_khz,
- 47, /* tLOW = 4.7 us */
- 3, /* tf = 0.3 us */
+ 4700, /* tLOW = 4.7 us */
+ scl_falling_time,
0); /* No offset */
/* Allow platforms to specify the ideal HCNT and LCNT values */
@@ -330,13 +335,13 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
/* Fast-mode */
hcnt = i2c_dw_scl_hcnt(input_clock_khz,
- 6, /* tHD;STA = tHIGH = 0.6 us */
- 3, /* tf = 0.3 us */
+ 600, /* tHD;STA = tHIGH = 0.6 us */
+ sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
lcnt = i2c_dw_scl_lcnt(input_clock_khz,
- 13, /* tLOW = 1.3 us */
- 3, /* tf = 0.3 us */
+ 1300, /* tLOW = 1.3 us */
+ scl_falling_time,
0); /* No offset */
if (dev->fs_hcnt && dev->fs_lcnt) {
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index e8a756537ed0..d66b6cbc9edc 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -99,6 +99,8 @@ struct dw_i2c_dev {
unsigned int rx_fifo_depth;
int rx_outstanding;
u32 sda_hold_time;
+ u32 sda_falling_time;
+ u32 scl_falling_time;
u16 ss_hcnt;
u16 ss_lcnt;
u16 fs_hcnt;
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index f6ed06c966ee..85056c22d21e 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -54,6 +54,16 @@ enum dw_pci_ctl_id_t {
medfield_3,
medfield_4,
medfield_5,
+
+ baytrail,
+};
+
+struct dw_scl_sda_cfg {
+ u32 ss_hcnt;
+ u32 fs_hcnt;
+ u32 ss_lcnt;
+ u32 fs_lcnt;
+ u32 sda_hold;
};
struct dw_pci_controller {
@@ -62,12 +72,29 @@ struct dw_pci_controller {
u32 tx_fifo_depth;
u32 rx_fifo_depth;
u32 clk_khz;
+ u32 functionality;
+ struct dw_scl_sda_cfg *scl_sda_cfg;
};
#define INTEL_MID_STD_CFG (DW_IC_CON_MASTER | \
DW_IC_CON_SLAVE_DISABLE | \
DW_IC_CON_RESTART_EN)
+#define DW_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \
+ I2C_FUNC_SMBUS_BYTE | \
+ I2C_FUNC_SMBUS_BYTE_DATA | \
+ I2C_FUNC_SMBUS_WORD_DATA | \
+ I2C_FUNC_SMBUS_I2C_BLOCK)
+
+/* BayTrail HCNT/LCNT/SDA hold time */
+static struct dw_scl_sda_cfg byt_config = {
+ .ss_hcnt = 0x200,
+ .fs_hcnt = 0x55,
+ .ss_lcnt = 0x200,
+ .fs_lcnt = 0x99,
+ .sda_hold = 0x6,
+};
+
static struct dw_pci_controller dw_pci_controllers[] = {
[moorestown_0] = {
.bus_num = 0,
@@ -132,75 +159,40 @@ static struct dw_pci_controller dw_pci_controllers[] = {
.rx_fifo_depth = 32,
.clk_khz = 25000,
},
+ [baytrail] = {
+ .bus_num = -1,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 100000,
+ .functionality = I2C_FUNC_10BIT_ADDR,
+ .scl_sda_cfg = &byt_config,
+ },
};
static struct i2c_algorithm i2c_dw_algo = {
.master_xfer = i2c_dw_xfer,
.functionality = i2c_dw_func,
};
+#ifdef CONFIG_PM
static int i2c_dw_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
- struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
- int err;
-
-
- i2c_dw_disable(i2c);
-
- err = pci_save_state(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_save_state failed\n");
- return err;
- }
-
- err = pci_set_power_state(pdev, PCI_D3hot);
- if (err) {
- dev_err(&pdev->dev, "pci_set_power_state failed\n");
- return err;
- }
+ i2c_dw_disable(pci_get_drvdata(pdev));
return 0;
}
static int i2c_dw_pci_resume(struct device *dev)
{
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
- struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
- int err;
- u32 enabled;
-
- enabled = i2c_dw_is_enabled(i2c);
- if (enabled)
- return 0;
-
- err = pci_set_power_state(pdev, PCI_D0);
- if (err) {
- dev_err(&pdev->dev, "pci_set_power_state() failed\n");
- return err;
- }
-
- pci_restore_state(pdev);
-
- i2c_dw_init(i2c);
- return 0;
-}
-static int i2c_dw_pci_runtime_idle(struct device *dev)
-{
- int err = pm_schedule_suspend(dev, 500);
- dev_dbg(dev, "runtime_idle called\n");
-
- if (err != 0)
- return 0;
- return -EBUSY;
+ return i2c_dw_init(pci_get_drvdata(pdev));
}
+#endif
-static const struct dev_pm_ops i2c_dw_pm_ops = {
- .resume = i2c_dw_pci_resume,
- .suspend = i2c_dw_pci_suspend,
- SET_RUNTIME_PM_OPS(i2c_dw_pci_suspend, i2c_dw_pci_resume,
- i2c_dw_pci_runtime_idle)
-};
+static UNIVERSAL_DEV_PM_OPS(i2c_dw_pm_ops, i2c_dw_pci_suspend,
+ i2c_dw_pci_resume, NULL);
static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
{
@@ -214,6 +206,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
struct i2c_adapter *adap;
int r;
struct dw_pci_controller *controller;
+ struct dw_scl_sda_cfg *cfg;
if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) {
dev_err(&pdev->dev, "%s: invalid driver data %ld\n", __func__,
@@ -247,13 +240,18 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
dev->base = pcim_iomap_table(pdev)[0];
dev->dev = &pdev->dev;
- dev->functionality =
- I2C_FUNC_I2C |
- I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK;
+ dev->functionality = controller->functionality |
+ DW_DEFAULT_FUNCTIONALITY;
+
dev->master_cfg = controller->bus_cfg;
+ if (controller->scl_sda_cfg) {
+ cfg = controller->scl_sda_cfg;
+ dev->ss_hcnt = cfg->ss_hcnt;
+ dev->fs_hcnt = cfg->fs_hcnt;
+ dev->ss_lcnt = cfg->ss_lcnt;
+ dev->fs_lcnt = cfg->fs_lcnt;
+ dev->sda_hold_time = cfg->sda_hold;
+ }
pci_set_drvdata(pdev, dev);
@@ -270,8 +268,8 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
adap->algo = &i2c_dw_algo;
adap->dev.parent = &pdev->dev;
adap->nr = controller->bus_num;
- snprintf(adap->name, sizeof(adap->name), "i2c-designware-pci-%d",
- adap->nr);
+
+ snprintf(adap->name, sizeof(adap->name), "i2c-designware-pci");
r = devm_request_irq(&pdev->dev, pdev->irq, i2c_dw_isr, IRQF_SHARED,
adap->name, dev);
@@ -290,6 +288,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
@@ -309,7 +308,7 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
/* work with hotplug and coldplug */
MODULE_ALIAS("i2c_designware-pci");
-static DEFINE_PCI_DEVICE_TABLE(i2_designware_pci_ids) = {
+static const struct pci_device_id i2_designware_pci_ids[] = {
/* Moorestown */
{ PCI_VDEVICE(INTEL, 0x0802), moorestown_0 },
{ PCI_VDEVICE(INTEL, 0x0803), moorestown_1 },
@@ -321,6 +320,14 @@ static DEFINE_PCI_DEVICE_TABLE(i2_designware_pci_ids) = {
{ PCI_VDEVICE(INTEL, 0x082C), medfield_0 },
{ PCI_VDEVICE(INTEL, 0x082D), medfield_1 },
{ PCI_VDEVICE(INTEL, 0x082E), medfield_2 },
+ /* Baytrail */
+ { PCI_VDEVICE(INTEL, 0x0F41), baytrail },
+ { PCI_VDEVICE(INTEL, 0x0F42), baytrail },
+ { PCI_VDEVICE(INTEL, 0x0F43), baytrail },
+ { PCI_VDEVICE(INTEL, 0x0F44), baytrail },
+ { PCI_VDEVICE(INTEL, 0x0F45), baytrail },
+ { PCI_VDEVICE(INTEL, 0x0F46), baytrail },
+ { PCI_VDEVICE(INTEL, 0x0F47), baytrail },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index d0bdac0498ce..9c7802614342 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -159,6 +159,13 @@ static int dw_i2c_probe(struct platform_device *pdev)
"i2c-sda-hold-time-ns", &ht);
dev->sda_hold_time = div_u64((u64)ic_clk * ht + 500000,
1000000);
+
+ of_property_read_u32(pdev->dev.of_node,
+ "i2c-sda-falling-time-ns",
+ &dev->sda_falling_time);
+ of_property_read_u32(pdev->dev.of_node,
+ "i2c-scl-falling-time-ns",
+ &dev->scl_falling_time);
}
dev->functionality =
@@ -195,7 +202,7 @@ static int dw_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON;
+ adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
strlcpy(adap->name, "Synopsys DesignWare I2C adapter",
sizeof(adap->name));
adap->algo = &i2c_dw_algo;
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
new file mode 100644
index 000000000000..777ed409a24a
--- /dev/null
+++ b/drivers/i2c/busses/i2c-efm32.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright (C) 2014 Uwe Kleine-Koenig for Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+
+#define DRIVER_NAME "efm32-i2c"
+
+#define MASK_VAL(mask, val) ((val << __ffs(mask)) & mask)
+
+#define REG_CTRL 0x00
+#define REG_CTRL_EN 0x00001
+#define REG_CTRL_SLAVE 0x00002
+#define REG_CTRL_AUTOACK 0x00004
+#define REG_CTRL_AUTOSE 0x00008
+#define REG_CTRL_AUTOSN 0x00010
+#define REG_CTRL_ARBDIS 0x00020
+#define REG_CTRL_GCAMEN 0x00040
+#define REG_CTRL_CLHR__MASK 0x00300
+#define REG_CTRL_BITO__MASK 0x03000
+#define REG_CTRL_BITO_OFF 0x00000
+#define REG_CTRL_BITO_40PCC 0x01000
+#define REG_CTRL_BITO_80PCC 0x02000
+#define REG_CTRL_BITO_160PCC 0x03000
+#define REG_CTRL_GIBITO 0x08000
+#define REG_CTRL_CLTO__MASK 0x70000
+#define REG_CTRL_CLTO_OFF 0x00000
+
+#define REG_CMD 0x04
+#define REG_CMD_START 0x00001
+#define REG_CMD_STOP 0x00002
+#define REG_CMD_ACK 0x00004
+#define REG_CMD_NACK 0x00008
+#define REG_CMD_CONT 0x00010
+#define REG_CMD_ABORT 0x00020
+#define REG_CMD_CLEARTX 0x00040
+#define REG_CMD_CLEARPC 0x00080
+
+#define REG_STATE 0x08
+#define REG_STATE_BUSY 0x00001
+#define REG_STATE_MASTER 0x00002
+#define REG_STATE_TRANSMITTER 0x00004
+#define REG_STATE_NACKED 0x00008
+#define REG_STATE_BUSHOLD 0x00010
+#define REG_STATE_STATE__MASK 0x000e0
+#define REG_STATE_STATE_IDLE 0x00000
+#define REG_STATE_STATE_WAIT 0x00020
+#define REG_STATE_STATE_START 0x00040
+#define REG_STATE_STATE_ADDR 0x00060
+#define REG_STATE_STATE_ADDRACK 0x00080
+#define REG_STATE_STATE_DATA 0x000a0
+#define REG_STATE_STATE_DATAACK 0x000c0
+
+#define REG_STATUS 0x0c
+#define REG_STATUS_PSTART 0x00001
+#define REG_STATUS_PSTOP 0x00002
+#define REG_STATUS_PACK 0x00004
+#define REG_STATUS_PNACK 0x00008
+#define REG_STATUS_PCONT 0x00010
+#define REG_STATUS_PABORT 0x00020
+#define REG_STATUS_TXC 0x00040
+#define REG_STATUS_TXBL 0x00080
+#define REG_STATUS_RXDATAV 0x00100
+
+#define REG_CLKDIV 0x10
+#define REG_CLKDIV_DIV__MASK 0x001ff
+#define REG_CLKDIV_DIV(div) MASK_VAL(REG_CLKDIV_DIV__MASK, (div))
+
+#define REG_SADDR 0x14
+#define REG_SADDRMASK 0x18
+#define REG_RXDATA 0x1c
+#define REG_RXDATAP 0x20
+#define REG_TXDATA 0x24
+#define REG_IF 0x28
+#define REG_IF_START 0x00001
+#define REG_IF_RSTART 0x00002
+#define REG_IF_ADDR 0x00004
+#define REG_IF_TXC 0x00008
+#define REG_IF_TXBL 0x00010
+#define REG_IF_RXDATAV 0x00020
+#define REG_IF_ACK 0x00040
+#define REG_IF_NACK 0x00080
+#define REG_IF_MSTOP 0x00100
+#define REG_IF_ARBLOST 0x00200
+#define REG_IF_BUSERR 0x00400
+#define REG_IF_BUSHOLD 0x00800
+#define REG_IF_TXOF 0x01000
+#define REG_IF_RXUF 0x02000
+#define REG_IF_BITO 0x04000
+#define REG_IF_CLTO 0x08000
+#define REG_IF_SSTOP 0x10000
+
+#define REG_IFS 0x2c
+#define REG_IFC 0x30
+#define REG_IFC__MASK 0x1ffcf
+
+#define REG_IEN 0x34
+
+#define REG_ROUTE 0x38
+#define REG_ROUTE_SDAPEN 0x00001
+#define REG_ROUTE_SCLPEN 0x00002
+#define REG_ROUTE_LOCATION__MASK 0x00700
+#define REG_ROUTE_LOCATION(n) MASK_VAL(REG_ROUTE_LOCATION__MASK, (n))
+
+struct efm32_i2c_ddata {
+ struct i2c_adapter adapter;
+
+ struct clk *clk;
+ void __iomem *base;
+ unsigned int irq;
+ u8 location;
+ unsigned long frequency;
+
+ /* transfer data */
+ struct completion done;
+ struct i2c_msg *msgs;
+ size_t num_msgs;
+ size_t current_word, current_msg;
+ int retval;
+};
+
+static u32 efm32_i2c_read32(struct efm32_i2c_ddata *ddata, unsigned offset)
+{
+ return readl(ddata->base + offset);
+}
+
+static void efm32_i2c_write32(struct efm32_i2c_ddata *ddata,
+ unsigned offset, u32 value)
+{
+ writel(value, ddata->base + offset);
+}
+
+static void efm32_i2c_send_next_msg(struct efm32_i2c_ddata *ddata)
+{
+ struct i2c_msg *cur_msg = &ddata->msgs[ddata->current_msg];
+
+ efm32_i2c_write32(ddata, REG_CMD, REG_CMD_START);
+ efm32_i2c_write32(ddata, REG_TXDATA, cur_msg->addr << 1 |
+ (cur_msg->flags & I2C_M_RD ? 1 : 0));
+}
+
+static void efm32_i2c_send_next_byte(struct efm32_i2c_ddata *ddata)
+{
+ struct i2c_msg *cur_msg = &ddata->msgs[ddata->current_msg];
+
+ if (ddata->current_word >= cur_msg->len) {
+ /* cur_msg completely transferred */
+ ddata->current_word = 0;
+ ddata->current_msg += 1;
+
+ if (ddata->current_msg >= ddata->num_msgs) {
+ efm32_i2c_write32(ddata, REG_CMD, REG_CMD_STOP);
+ complete(&ddata->done);
+ } else {
+ efm32_i2c_send_next_msg(ddata);
+ }
+ } else {
+ efm32_i2c_write32(ddata, REG_TXDATA,
+ cur_msg->buf[ddata->current_word++]);
+ }
+}
+
+static void efm32_i2c_recv_next_byte(struct efm32_i2c_ddata *ddata)
+{
+ struct i2c_msg *cur_msg = &ddata->msgs[ddata->current_msg];
+
+ cur_msg->buf[ddata->current_word] = efm32_i2c_read32(ddata, REG_RXDATA);
+ ddata->current_word += 1;
+ if (ddata->current_word >= cur_msg->len) {
+ /* cur_msg completely transferred */
+ ddata->current_word = 0;
+ ddata->current_msg += 1;
+
+ efm32_i2c_write32(ddata, REG_CMD, REG_CMD_NACK);
+
+ if (ddata->current_msg >= ddata->num_msgs) {
+ efm32_i2c_write32(ddata, REG_CMD, REG_CMD_STOP);
+ complete(&ddata->done);
+ } else {
+ efm32_i2c_send_next_msg(ddata);
+ }
+ } else {
+ efm32_i2c_write32(ddata, REG_CMD, REG_CMD_ACK);
+ }
+}
+
+static irqreturn_t efm32_i2c_irq(int irq, void *dev_id)
+{
+ struct efm32_i2c_ddata *ddata = dev_id;
+ struct i2c_msg *cur_msg = &ddata->msgs[ddata->current_msg];
+ u32 irqflag = efm32_i2c_read32(ddata, REG_IF);
+ u32 state = efm32_i2c_read32(ddata, REG_STATE);
+
+ efm32_i2c_write32(ddata, REG_IFC, irqflag & REG_IFC__MASK);
+
+ switch (state & REG_STATE_STATE__MASK) {
+ case REG_STATE_STATE_IDLE:
+ /* arbitration lost? */
+ ddata->retval = -EAGAIN;
+ complete(&ddata->done);
+ break;
+ case REG_STATE_STATE_WAIT:
+ /*
+ * huh, this shouldn't happen.
+ * Reset hardware state and get out
+ */
+ ddata->retval = -EIO;
+ efm32_i2c_write32(ddata, REG_CMD,
+ REG_CMD_STOP | REG_CMD_ABORT |
+ REG_CMD_CLEARTX | REG_CMD_CLEARPC);
+ complete(&ddata->done);
+ break;
+ case REG_STATE_STATE_START:
+ /* "caller" is expected to send an address */
+ break;
+ case REG_STATE_STATE_ADDR:
+ /* wait for Ack or NAck of slave */
+ break;
+ case REG_STATE_STATE_ADDRACK:
+ if (state & REG_STATE_NACKED) {
+ efm32_i2c_write32(ddata, REG_CMD, REG_CMD_STOP);
+ ddata->retval = -ENXIO;
+ complete(&ddata->done);
+ } else if (cur_msg->flags & I2C_M_RD) {
+ /* wait for slave to send first data byte */
+ } else {
+ efm32_i2c_send_next_byte(ddata);
+ }
+ break;
+ case REG_STATE_STATE_DATA:
+ if (cur_msg->flags & I2C_M_RD) {
+ efm32_i2c_recv_next_byte(ddata);
+ } else {
+ /* wait for Ack or Nack of slave */
+ }
+ break;
+ case REG_STATE_STATE_DATAACK:
+ if (state & REG_STATE_NACKED) {
+ efm32_i2c_write32(ddata, REG_CMD, REG_CMD_STOP);
+ complete(&ddata->done);
+ } else {
+ efm32_i2c_send_next_byte(ddata);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int efm32_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct efm32_i2c_ddata *ddata = i2c_get_adapdata(adap);
+ int ret;
+
+ if (ddata->msgs)
+ return -EBUSY;
+
+ ddata->msgs = msgs;
+ ddata->num_msgs = num;
+ ddata->current_word = 0;
+ ddata->current_msg = 0;
+ ddata->retval = -EIO;
+
+ reinit_completion(&ddata->done);
+
+ dev_dbg(&ddata->adapter.dev, "state: %08x, status: %08x\n",
+ efm32_i2c_read32(ddata, REG_STATE),
+ efm32_i2c_read32(ddata, REG_STATUS));
+
+ efm32_i2c_send_next_msg(ddata);
+
+ wait_for_completion(&ddata->done);
+
+ if (ddata->current_msg >= ddata->num_msgs)
+ ret = ddata->num_msgs;
+ else
+ ret = ddata->retval;
+
+ return ret;
+}
+
+static u32 efm32_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm efm32_i2c_algo = {
+ .master_xfer = efm32_i2c_master_xfer,
+ .functionality = efm32_i2c_functionality,
+};
+
+static u32 efm32_i2c_get_configured_location(struct efm32_i2c_ddata *ddata)
+{
+ u32 reg = efm32_i2c_read32(ddata, REG_ROUTE);
+
+ return (reg & REG_ROUTE_LOCATION__MASK) >>
+ __ffs(REG_ROUTE_LOCATION__MASK);
+}
+
+static int efm32_i2c_probe(struct platform_device *pdev)
+{
+ struct efm32_i2c_ddata *ddata;
+ struct resource *res;
+ unsigned long rate;
+ struct device_node *np = pdev->dev.of_node;
+ u32 location, frequency;
+ int ret;
+ u32 clkdiv;
+
+ if (!np)
+ return -EINVAL;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata) {
+ dev_dbg(&pdev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, ddata);
+
+ init_completion(&ddata->done);
+ strlcpy(ddata->adapter.name, pdev->name, sizeof(ddata->adapter.name));
+ ddata->adapter.owner = THIS_MODULE;
+ ddata->adapter.algo = &efm32_i2c_algo;
+ ddata->adapter.dev.parent = &pdev->dev;
+ ddata->adapter.dev.of_node = pdev->dev.of_node;
+ i2c_set_adapdata(&ddata->adapter, ddata);
+
+ ddata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ddata->clk)) {
+ ret = PTR_ERR(ddata->clk);
+ dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to determine base address\n");
+ return -ENODEV;
+ }
+
+ if (resource_size(res) < 0x42) {
+ dev_err(&pdev->dev, "memory resource too small\n");
+ return -EINVAL;
+ }
+
+ ddata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ddata->base))
+ return PTR_ERR(ddata->base);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_err(&pdev->dev, "failed to get irq (%d)\n", ret);
+ if (!ret)
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ddata->irq = ret;
+
+ ret = clk_prepare_enable(ddata->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable clock (%d)\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "efm32,location", &location);
+ if (!ret) {
+ dev_dbg(&pdev->dev, "using location %u\n", location);
+ } else {
+ /* default to location configured in hardware */
+ location = efm32_i2c_get_configured_location(ddata);
+
+ dev_info(&pdev->dev, "fall back to location %u\n", location);
+ }
+
+ ddata->location = location;
+
+ ret = of_property_read_u32(np, "clock-frequency", &frequency);
+ if (!ret) {
+ dev_dbg(&pdev->dev, "using frequency %u\n", frequency);
+ } else {
+ frequency = 100000;
+ dev_info(&pdev->dev, "defaulting to 100 kHz\n");
+ }
+ ddata->frequency = frequency;
+
+ rate = clk_get_rate(ddata->clk);
+ if (!rate) {
+ dev_err(&pdev->dev, "there is no input clock available\n");
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
+ clkdiv = DIV_ROUND_UP(rate, 8 * ddata->frequency) - 1;
+ if (clkdiv >= 0x200) {
+ dev_err(&pdev->dev,
+ "input clock too fast (%lu) to divide down to bus freq (%lu)",
+ rate, ddata->frequency);
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
+
+ dev_dbg(&pdev->dev, "input clock = %lu, bus freq = %lu, clkdiv = %lu\n",
+ rate, ddata->frequency, (unsigned long)clkdiv);
+ efm32_i2c_write32(ddata, REG_CLKDIV, REG_CLKDIV_DIV(clkdiv));
+
+ efm32_i2c_write32(ddata, REG_ROUTE, REG_ROUTE_SDAPEN |
+ REG_ROUTE_SCLPEN |
+ REG_ROUTE_LOCATION(ddata->location));
+
+ efm32_i2c_write32(ddata, REG_CTRL, REG_CTRL_EN |
+ REG_CTRL_BITO_160PCC | 0 * REG_CTRL_GIBITO);
+
+ efm32_i2c_write32(ddata, REG_IFC, REG_IFC__MASK);
+ efm32_i2c_write32(ddata, REG_IEN, REG_IF_TXC | REG_IF_ACK | REG_IF_NACK
+ | REG_IF_ARBLOST | REG_IF_BUSERR | REG_IF_RXDATAV);
+
+ /* to make bus idle */
+ efm32_i2c_write32(ddata, REG_CMD, REG_CMD_ABORT);
+
+ ret = request_irq(ddata->irq, efm32_i2c_irq, 0, DRIVER_NAME, ddata);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request irq (%d)\n", ret);
+ return ret;
+ }
+
+ ret = i2c_add_adapter(&ddata->adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add i2c adapter (%d)\n", ret);
+ free_irq(ddata->irq, ddata);
+
+err_disable_clk:
+ clk_disable_unprepare(ddata->clk);
+ }
+ return ret;
+}
+
+static int efm32_i2c_remove(struct platform_device *pdev)
+{
+ struct efm32_i2c_ddata *ddata = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&ddata->adapter);
+ free_irq(ddata->irq, ddata);
+ clk_disable_unprepare(ddata->clk);
+
+ return 0;
+}
+
+static const struct of_device_id efm32_i2c_dt_ids[] = {
+ {
+ .compatible = "energymicro,efm32-i2c",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, efm32_i2c_dt_ids);
+
+static struct platform_driver efm32_i2c_driver = {
+ .probe = efm32_i2c_probe,
+ .remove = efm32_i2c_remove,
+
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = efm32_i2c_dt_ids,
+ },
+};
+module_platform_driver(efm32_i2c_driver);
+
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_DESCRIPTION("EFM32 i2c driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index e08e458bab02..ff775ac29e49 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -186,7 +186,7 @@ static DEFINE_MUTEX(pch_mutex);
#define PCI_DEVICE_ID_ML7223_I2C 0x8010
#define PCI_DEVICE_ID_ML7831_I2C 0x8817
-static DEFINE_PCI_DEVICE_TABLE(pch_pcidev_id) = {
+static const struct pci_device_id pch_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, },
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 9fd711c03dd2..00af0a0a3361 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -566,7 +566,7 @@ static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c,
static int exynos5_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
- struct exynos5_i2c *i2c = (struct exynos5_i2c *)adap->algo_data;
+ struct exynos5_i2c *i2c = adap->algo_data;
int i = 0, ret = 0, stop = 0;
if (i2c->suspended) {
@@ -715,6 +715,7 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int exynos5_i2c_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -745,6 +746,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(exynos5_i2c_dev_pm_ops, exynos5_i2c_suspend_noirq,
exynos5_i2c_resume_noirq);
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index d9f7e186a4c7..02d2d4abb9dd 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -94,6 +94,9 @@ static int of_i2c_gpio_get_pins(struct device_node *np,
*sda_pin = of_get_gpio(np, 0);
*scl_pin = of_get_gpio(np, 1);
+ if (*sda_pin == -EPROBE_DEFER || *scl_pin == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
if (!gpio_is_valid(*sda_pin) || !gpio_is_valid(*scl_pin)) {
pr_err("%s: invalid GPIO pins, sda=%d/scl=%d\n",
np->full_name, *sda_pin, *scl_pin);
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index e248257fe517..14d2b76de25f 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -104,7 +104,7 @@ static struct i2c_adapter hydra_adap = {
.algo_data = &hydra_bit_data,
};
-static DEFINE_PCI_DEVICE_TABLE(hydra_ids) = {
+static const struct pci_device_id hydra_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 349c2d35e792..6777cd6f8776 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -60,6 +60,7 @@
Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
Coleto Creek (PCH) 0x23b0 32 hard yes yes yes
Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
+ BayTrail (SOC) 0x0f12 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -161,6 +162,7 @@
STATUS_ERROR_FLAGS)
/* Older devices have their ID defined in <linux/pci_ids.h> */
+#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
@@ -789,7 +791,7 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = i801_func,
};
-static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
+static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) },
@@ -822,6 +824,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 8ce4f517fc56..984492553e95 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -182,7 +182,7 @@ struct ismt_priv {
/**
* ismt_ids - PCI device IDs supported by this driver
*/
-static DEFINE_PCI_DEVICE_TABLE(ismt_ids) = {
+static const struct pci_device_id ismt_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index d52d84937ad3..540ea692bf79 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/mv643xx_i2c.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -97,7 +98,6 @@ enum {
enum {
MV64XXX_I2C_ACTION_INVALID,
MV64XXX_I2C_ACTION_CONTINUE,
- MV64XXX_I2C_ACTION_SEND_START,
MV64XXX_I2C_ACTION_SEND_RESTART,
MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
MV64XXX_I2C_ACTION_SEND_ADDR_1,
@@ -148,6 +148,8 @@ struct mv64xxx_i2c_data {
bool offload_enabled;
/* 5us delay in order to avoid repeated start timing violation */
bool errata_delay;
+ struct reset_control *rstc;
+ bool irq_clear_inverted;
};
static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = {
@@ -176,11 +178,6 @@ mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data,
{
u32 dir = 0;
- drv_data->msg = msg;
- drv_data->byte_posn = 0;
- drv_data->bytes_left = msg->len;
- drv_data->aborting = 0;
- drv_data->rc = 0;
drv_data->cntl_bits = MV64XXX_I2C_REG_CONTROL_ACK |
MV64XXX_I2C_REG_CONTROL_INTEN | MV64XXX_I2C_REG_CONTROL_TWSIEN;
@@ -206,11 +203,6 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
if (!drv_data->offload_enabled)
return -EOPNOTSUPP;
- drv_data->msg = msg;
- drv_data->byte_posn = 0;
- drv_data->bytes_left = msg->len;
- drv_data->aborting = 0;
- drv_data->rc = 0;
/* Only regular transactions can be offloaded */
if ((msg->flags & ~(I2C_M_TEN | I2C_M_RD)) != 0)
return -EINVAL;
@@ -419,6 +411,23 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
}
}
+static void mv64xxx_i2c_send_start(struct mv64xxx_i2c_data *drv_data)
+{
+ drv_data->msg = drv_data->msgs;
+ drv_data->byte_posn = 0;
+ drv_data->bytes_left = drv_data->msg->len;
+ drv_data->aborting = 0;
+ drv_data->rc = 0;
+
+ /* Can we offload this msg ? */
+ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
+ /* No, switch to standard path */
+ mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+ writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
+ drv_data->reg_base + drv_data->reg_offsets.control);
+ }
+}
+
static void
mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
{
@@ -435,14 +444,8 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->msgs++;
drv_data->num_msgs--;
- if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
- drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
- writel(drv_data->cntl_bits,
- drv_data->reg_base + drv_data->reg_offsets.control);
+ mv64xxx_i2c_send_start(drv_data);
- /* Setup for the next message */
- mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
- }
if (drv_data->errata_delay)
udelay(5);
@@ -459,16 +462,6 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->reg_base + drv_data->reg_offsets.control);
break;
- case MV64XXX_I2C_ACTION_SEND_START:
- /* Can we offload this msg ? */
- if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
- /* No, switch to standard path */
- mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
- writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
- drv_data->reg_base + drv_data->reg_offsets.control);
- }
- break;
-
case MV64XXX_I2C_ACTION_SEND_ADDR_1:
writel(drv_data->addr1,
drv_data->reg_base + drv_data->reg_offsets.data);
@@ -566,6 +559,11 @@ mv64xxx_i2c_intr(int irq, void *dev_id)
status = readl(drv_data->reg_base + drv_data->reg_offsets.status);
mv64xxx_i2c_fsm(drv_data, status);
mv64xxx_i2c_do_action(drv_data);
+
+ if (drv_data->irq_clear_inverted)
+ writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_IFLG,
+ drv_data->reg_base + drv_data->reg_offsets.control);
+
rc = IRQ_HANDLED;
}
spin_unlock_irqrestore(&drv_data->lock, flags);
@@ -626,12 +624,11 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
spin_lock_irqsave(&drv_data->lock, flags);
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
drv_data->send_stop = is_last;
drv_data->block = 1;
- mv64xxx_i2c_do_action(drv_data);
+ mv64xxx_i2c_send_start(drv_data);
spin_unlock_irqrestore(&drv_data->lock, flags);
mv64xxx_i2c_wait_for_completion(drv_data);
@@ -685,6 +682,7 @@ static const struct i2c_algorithm mv64xxx_i2c_algo = {
*/
static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
{ .compatible = "allwinner,sun4i-i2c", .data = &mv64xxx_i2c_regs_sun4i},
+ { .compatible = "allwinner,sun6i-a31-i2c", .data = &mv64xxx_i2c_regs_sun4i},
{ .compatible = "marvell,mv64xxx-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
{ .compatible = "marvell,mv78230-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
{ .compatible = "marvell,mv78230-a0-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
@@ -759,6 +757,16 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
}
drv_data->irq = irq_of_parse_and_map(np, 0);
+ drv_data->rstc = devm_reset_control_get_optional(dev, NULL);
+ if (IS_ERR(drv_data->rstc)) {
+ if (PTR_ERR(drv_data->rstc) == -EPROBE_DEFER) {
+ rc = -EPROBE_DEFER;
+ goto out;
+ }
+ } else {
+ reset_control_deassert(drv_data->rstc);
+ }
+
/* Its not yet defined how timeouts will be specified in device tree.
* So hard code the value to 1 second.
*/
@@ -783,6 +791,10 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
drv_data->offload_enabled = false;
drv_data->errata_delay = true;
}
+
+ if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
+ drv_data->irq_clear_inverted = true;
+
out:
return rc;
#endif
@@ -845,13 +857,13 @@ mv64xxx_i2c_probe(struct platform_device *pd)
}
if (drv_data->irq < 0) {
rc = -ENXIO;
- goto exit_clk;
+ goto exit_reset;
}
drv_data->adapter.dev.parent = &pd->dev;
drv_data->adapter.algo = &mv64xxx_i2c_algo;
drv_data->adapter.owner = THIS_MODULE;
- drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED;
drv_data->adapter.nr = pd->id;
drv_data->adapter.dev.of_node = pd->dev.of_node;
platform_set_drvdata(pd, drv_data);
@@ -865,7 +877,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
dev_err(&drv_data->adapter.dev,
"mv64xxx: Can't register intr handler irq%d: %d\n",
drv_data->irq, rc);
- goto exit_clk;
+ goto exit_reset;
} else if ((rc = i2c_add_numbered_adapter(&drv_data->adapter)) != 0) {
dev_err(&drv_data->adapter.dev,
"mv64xxx: Can't add i2c adapter, rc: %d\n", -rc);
@@ -876,6 +888,9 @@ mv64xxx_i2c_probe(struct platform_device *pd)
exit_free_irq:
free_irq(drv_data->irq, drv_data);
+exit_reset:
+ if (!IS_ERR_OR_NULL(drv_data->rstc))
+ reset_control_assert(drv_data->rstc);
exit_clk:
#if defined(CONFIG_HAVE_CLK)
/* Not all platforms have a clk */
@@ -894,6 +909,8 @@ mv64xxx_i2c_remove(struct platform_device *dev)
i2c_del_adapter(&drv_data->adapter);
free_irq(drv_data->irq, drv_data);
+ if (!IS_ERR_OR_NULL(drv_data->rstc))
+ reset_control_assert(drv_data->rstc);
#if defined(CONFIG_HAVE_CLK)
/* Not all platforms have a clk */
if (!IS_ERR(drv_data->clk)) {
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 0cde4e6ab2b2..7170fc892829 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -806,7 +806,6 @@ static int mxs_i2c_probe(struct platform_device *pdev)
struct mxs_i2c_dev *i2c;
struct i2c_adapter *adap;
struct resource *res;
- resource_size_t res_size;
int err, irq;
i2c = devm_kzalloc(dev, sizeof(struct mxs_i2c_dev), GFP_KERNEL);
@@ -819,18 +818,13 @@ static int mxs_i2c_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
-
- if (!res || irq < 0)
- return -ENOENT;
+ i2c->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(i2c->regs))
+ return PTR_ERR(i2c->regs);
- res_size = resource_size(res);
- if (!devm_request_mem_region(dev, res->start, res_size, res->name))
- return -EBUSY;
-
- i2c->regs = devm_ioremap_nocache(dev, res->start, res_size);
- if (!i2c->regs)
- return -EBUSY;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
err = devm_request_irq(dev, irq, mxs_i2c_isr, 0, dev_name(dev), i2c);
if (err)
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 0038c451095c..ee3a76c7ae97 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -306,7 +306,7 @@ static struct i2c_algorithm smbus_algorithm = {
};
-static DEFINE_PCI_DEVICE_TABLE(nforce2_ids) = {
+static const struct pci_device_id nforce2_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) },
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 4443613514ee..28cbe1b2a2ec 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -111,22 +111,6 @@ enum i2c_freq_mode {
};
/**
- * struct nmk_i2c_controller - client specific controller configuration
- * @clk_freq: clock frequency for the operation mode
- * @tft: Tx FIFO Threshold in bytes
- * @rft: Rx FIFO Threshold in bytes
- * @timeout Slave response timeout(ms)
- * @sm: speed mode
- */
-struct nmk_i2c_controller {
- u32 clk_freq;
- unsigned char tft;
- unsigned char rft;
- int timeout;
- enum i2c_freq_mode sm;
-};
-
-/**
* struct i2c_vendor_data - per-vendor variations
* @has_mtdws: variant has the MTDWS bit
* @fifodepth: variant FIFO depth
@@ -174,12 +158,15 @@ struct i2c_nmk_client {
* @irq: interrupt line for the controller.
* @virtbase: virtual io memory area.
* @clk: hardware i2c block clock.
- * @cfg: machine provided controller configuration.
* @cli: holder of client specific data.
+ * @clk_freq: clock frequency for the operation mode
+ * @tft: Tx FIFO Threshold in bytes
+ * @rft: Rx FIFO Threshold in bytes
+ * @timeout Slave response timeout (ms)
+ * @sm: speed mode
* @stop: stop condition.
* @xfer_complete: acknowledge completion for a I2C message.
* @result: controller propogated result.
- * @busy: Busy doing transfer.
*/
struct nmk_i2c_dev {
struct i2c_vendor_data *vendor;
@@ -188,12 +175,15 @@ struct nmk_i2c_dev {
int irq;
void __iomem *virtbase;
struct clk *clk;
- struct nmk_i2c_controller cfg;
struct i2c_nmk_client cli;
+ u32 clk_freq;
+ unsigned char tft;
+ unsigned char rft;
+ int timeout;
+ enum i2c_freq_mode sm;
int stop;
struct completion xfer_complete;
int result;
- bool busy;
};
/* controller's abort causes */
@@ -386,7 +376,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* slsu = cycles / (1000000000 / f) + 1
*/
ns = DIV_ROUND_UP_ULL(1000000000ULL, i2c_clk);
- switch (dev->cfg.sm) {
+ switch (dev->sm) {
case I2C_FREQ_MODE_FAST:
case I2C_FREQ_MODE_FAST_PLUS:
slsu = DIV_ROUND_UP(100, ns); /* Fast */
@@ -409,7 +399,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* 2 whereas it is 3 for fast and fastplus mode of
* operation. TODO - high speed support.
*/
- div = (dev->cfg.clk_freq > 100000) ? 3 : 2;
+ div = (dev->clk_freq > 100000) ? 3 : 2;
/*
* generate the mask for baud rate counters. The controller
@@ -419,7 +409,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* so set brcr1 to 0.
*/
brcr1 = 0 << 16;
- brcr2 = (i2c_clk/(dev->cfg.clk_freq * div)) & 0xffff;
+ brcr2 = (i2c_clk/(dev->clk_freq * div)) & 0xffff;
/* set the baud rate counter register */
writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
@@ -430,7 +420,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* TODO - support for fast mode plus (up to 1Mb/s)
* and high speed (up to 3.4 Mb/s)
*/
- if (dev->cfg.sm > I2C_FREQ_MODE_FAST) {
+ if (dev->sm > I2C_FREQ_MODE_FAST) {
dev_err(&dev->adev->dev,
"do not support this mode defaulting to std. mode\n");
brcr2 = i2c_clk/(100000 * 2) & 0xffff;
@@ -438,11 +428,11 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
writel(I2C_FREQ_MODE_STANDARD << 4,
dev->virtbase + I2C_CR);
}
- writel(dev->cfg.sm << 4, dev->virtbase + I2C_CR);
+ writel(dev->sm << 4, dev->virtbase + I2C_CR);
/* set the Tx and Rx FIFO threshold */
- writel(dev->cfg.tft, dev->virtbase + I2C_TFTR);
- writel(dev->cfg.rft, dev->virtbase + I2C_RFTR);
+ writel(dev->tft, dev->virtbase + I2C_TFTR);
+ writel(dev->rft, dev->virtbase + I2C_RFTR);
}
/**
@@ -674,28 +664,13 @@ static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags)
static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num_msgs)
{
- int status;
+ int status = 0;
int i;
struct nmk_i2c_dev *dev = i2c_get_adapdata(i2c_adap);
int j;
- dev->busy = true;
-
pm_runtime_get_sync(&dev->adev->dev);
- status = clk_prepare_enable(dev->clk);
- if (status) {
- dev_err(&dev->adev->dev, "can't prepare_enable clock\n");
- goto out_clk;
- }
-
- /* Optionaly enable pins to be muxed in and configured */
- pinctrl_pm_select_default_state(&dev->adev->dev);
-
- status = init_hw(dev);
- if (status)
- goto out;
-
/* Attempt three times to send the message queue */
for (j = 0; j < 3; j++) {
/* setup the i2c controller */
@@ -716,16 +691,8 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
break;
}
-out:
- clk_disable_unprepare(dev->clk);
-out_clk:
- /* Optionally let pins go into idle state */
- pinctrl_pm_select_idle_state(&dev->adev->dev);
-
pm_runtime_put_sync(&dev->adev->dev);
- dev->busy = false;
-
/* return the no. messages processed */
if (status)
return status;
@@ -909,22 +876,15 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-
-#ifdef CONFIG_PM
-static int nmk_i2c_suspend(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int nmk_i2c_suspend_late(struct device *dev)
{
- struct amba_device *adev = to_amba_device(dev);
- struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
-
- if (nmk_i2c->busy)
- return -EBUSY;
-
pinctrl_pm_select_sleep_state(dev);
return 0;
}
-static int nmk_i2c_resume(struct device *dev)
+static int nmk_i2c_resume_early(struct device *dev)
{
/* First go to the default state */
pinctrl_pm_select_default_state(dev);
@@ -933,19 +893,48 @@ static int nmk_i2c_resume(struct device *dev)
return 0;
}
-#else
-#define nmk_i2c_suspend NULL
-#define nmk_i2c_resume NULL
#endif
-/*
- * We use noirq so that we suspend late and resume before the wakeup interrupt
- * to ensure that we do the !pm_runtime_suspended() check in resume before
- * there has been a regular pm runtime resume (via pm_runtime_get_sync()).
- */
+#ifdef CONFIG_PM
+static int nmk_i2c_runtime_suspend(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
+
+ clk_disable_unprepare(nmk_i2c->clk);
+ pinctrl_pm_select_idle_state(dev);
+ return 0;
+}
+
+static int nmk_i2c_runtime_resume(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
+ int ret;
+
+ ret = clk_prepare_enable(nmk_i2c->clk);
+ if (ret) {
+ dev_err(dev, "can't prepare_enable clock\n");
+ return ret;
+ }
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = init_hw(nmk_i2c);
+ if (ret) {
+ clk_disable_unprepare(nmk_i2c->clk);
+ pinctrl_pm_select_idle_state(dev);
+ }
+
+ return ret;
+}
+#endif
+
static const struct dev_pm_ops nmk_i2c_pm = {
- .suspend_noirq = nmk_i2c_suspend,
- .resume_noirq = nmk_i2c_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(nmk_i2c_suspend_late, nmk_i2c_resume_early)
+ SET_PM_RUNTIME_PM_OPS(nmk_i2c_runtime_suspend,
+ nmk_i2c_runtime_resume,
+ NULL)
};
static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap)
@@ -958,118 +947,98 @@ static const struct i2c_algorithm nmk_i2c_algo = {
.functionality = nmk_i2c_functionality
};
-static struct nmk_i2c_controller u8500_i2c = {
- .tft = 1, /* Tx FIFO threshold */
- .rft = 8, /* Rx FIFO threshold */
- .clk_freq = 400000, /* fast mode operation */
- .timeout = 200, /* Slave response timeout(ms) */
- .sm = I2C_FREQ_MODE_FAST,
-};
-
static void nmk_i2c_of_probe(struct device_node *np,
- struct nmk_i2c_controller *pdata)
+ struct nmk_i2c_dev *nmk)
{
- of_property_read_u32(np, "clock-frequency", &pdata->clk_freq);
+ /* Default to 100 kHz if no frequency is given in the node */
+ if (of_property_read_u32(np, "clock-frequency", &nmk->clk_freq))
+ nmk->clk_freq = 100000;
/* This driver only supports 'standard' and 'fast' modes of operation. */
- if (pdata->clk_freq <= 100000)
- pdata->sm = I2C_FREQ_MODE_STANDARD;
+ if (nmk->clk_freq <= 100000)
+ nmk->sm = I2C_FREQ_MODE_STANDARD;
else
- pdata->sm = I2C_FREQ_MODE_FAST;
+ nmk->sm = I2C_FREQ_MODE_FAST;
+ nmk->tft = 1; /* Tx FIFO threshold */
+ nmk->rft = 8; /* Rx FIFO threshold */
+ nmk->timeout = 200; /* Slave response timeout(ms) */
}
static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
- struct nmk_i2c_controller *pdata = dev_get_platdata(&adev->dev);
struct device_node *np = adev->dev.of_node;
struct nmk_i2c_dev *dev;
struct i2c_adapter *adap;
struct i2c_vendor_data *vendor = id->data;
u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1;
- if (!pdata) {
- if (np) {
- pdata = devm_kzalloc(&adev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- ret = -ENOMEM;
- goto err_no_mem;
- }
- /* Provide the default configuration as a base. */
- memcpy(pdata, &u8500_i2c, sizeof(struct nmk_i2c_controller));
- nmk_i2c_of_probe(np, pdata);
- } else
- /* No i2c configuration found, using the default. */
- pdata = &u8500_i2c;
+ dev = devm_kzalloc(&adev->dev, sizeof(struct nmk_i2c_dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&adev->dev, "cannot allocate memory\n");
+ ret = -ENOMEM;
+ goto err_no_mem;
}
+ dev->vendor = vendor;
+ dev->adev = adev;
+ nmk_i2c_of_probe(np, dev);
- if (pdata->tft > max_fifo_threshold) {
+ if (dev->tft > max_fifo_threshold) {
dev_warn(&adev->dev, "requested TX FIFO threshold %u, adjusted down to %u\n",
- pdata->tft, max_fifo_threshold);
- pdata->tft = max_fifo_threshold;
+ dev->tft, max_fifo_threshold);
+ dev->tft = max_fifo_threshold;
}
- if (pdata->rft > max_fifo_threshold) {
+ if (dev->rft > max_fifo_threshold) {
dev_warn(&adev->dev, "requested RX FIFO threshold %u, adjusted down to %u\n",
- pdata->rft, max_fifo_threshold);
- pdata->rft = max_fifo_threshold;
+ dev->rft, max_fifo_threshold);
+ dev->rft = max_fifo_threshold;
}
- dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&adev->dev, "cannot allocate memory\n");
- ret = -ENOMEM;
- goto err_no_mem;
- }
- dev->vendor = vendor;
- dev->busy = false;
- dev->adev = adev;
amba_set_drvdata(adev, dev);
- /* Select default pin state */
- pinctrl_pm_select_default_state(&adev->dev);
- /* If possible, let's go to idle until the first transfer */
- pinctrl_pm_select_idle_state(&adev->dev);
-
- dev->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
- if (!dev->virtbase) {
+ dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
+ resource_size(&adev->res));
+ if (IS_ERR(dev->virtbase)) {
ret = -ENOMEM;
- goto err_no_ioremap;
+ goto err_no_mem;
}
dev->irq = adev->irq[0];
- ret = request_irq(dev->irq, i2c_irq_handler, 0,
+ ret = devm_request_irq(&adev->dev, dev->irq, i2c_irq_handler, 0,
DRIVER_NAME, dev);
if (ret) {
dev_err(&adev->dev, "cannot claim the irq %d\n", dev->irq);
- goto err_irq;
+ goto err_no_mem;
}
pm_suspend_ignore_children(&adev->dev, true);
- dev->clk = clk_get(&adev->dev, NULL);
+ dev->clk = devm_clk_get(&adev->dev, NULL);
if (IS_ERR(dev->clk)) {
dev_err(&adev->dev, "could not get i2c clock\n");
ret = PTR_ERR(dev->clk);
- goto err_no_clk;
+ goto err_no_mem;
+ }
+
+ ret = clk_prepare_enable(dev->clk);
+ if (ret) {
+ dev_err(&adev->dev, "can't prepare_enable clock\n");
+ goto err_no_mem;
}
+ init_hw(dev);
+
adap = &dev->adap;
adap->dev.of_node = np;
adap->dev.parent = &adev->dev;
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED;
adap->algo = &nmk_i2c_algo;
- adap->timeout = msecs_to_jiffies(pdata->timeout);
+ adap->timeout = msecs_to_jiffies(dev->timeout);
snprintf(adap->name, sizeof(adap->name),
"Nomadik I2C at %pR", &adev->res);
- /* fetch the controller configuration from machine */
- dev->cfg.clk_freq = pdata->clk_freq;
- dev->cfg.tft = pdata->tft;
- dev->cfg.rft = pdata->rft;
- dev->cfg.sm = pdata->sm;
-
i2c_set_adapdata(adap, dev);
dev_info(&adev->dev,
@@ -1079,21 +1048,15 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
ret = i2c_add_adapter(adap);
if (ret) {
dev_err(&adev->dev, "failed to add adapter\n");
- goto err_add_adap;
+ goto err_no_adap;
}
pm_runtime_put(&adev->dev);
return 0;
- err_add_adap:
- clk_put(dev->clk);
- err_no_clk:
- free_irq(dev->irq, dev);
- err_irq:
- iounmap(dev->virtbase);
- err_no_ioremap:
- kfree(dev);
+ err_no_adap:
+ clk_disable_unprepare(dev->clk);
err_no_mem:
return ret;
@@ -1110,13 +1073,9 @@ static int nmk_i2c_remove(struct amba_device *adev)
clear_all_interrupts(dev);
/* disable the controller */
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
- free_irq(dev->irq, dev);
- iounmap(dev->virtbase);
+ clk_disable_unprepare(dev->clk);
if (res)
release_mem_region(res->start, resource_size(res));
- clk_put(dev->clk);
- pm_runtime_disable(&adev->dev);
- kfree(dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 80e06fa45720..1f6369f14fb6 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -246,7 +246,7 @@ static const struct i2c_algorithm ocores_algorithm = {
static struct i2c_adapter ocores_adapter = {
.owner = THIS_MODULE,
.name = "i2c-ocores",
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED,
.algo = &ocores_algorithm,
};
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 90dcc2eaac5f..85f8eac9ba18 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -636,7 +636,7 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
int r;
r = pm_runtime_get_sync(dev->dev);
- if (IS_ERR_VALUE(r))
+ if (r < 0)
goto out;
r = omap_i2c_wait_for_bb(dev);
@@ -1155,7 +1155,7 @@ omap_i2c_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(dev->dev);
r = pm_runtime_get_sync(dev->dev);
- if (IS_ERR_VALUE(r))
+ if (r < 0)
goto err_free_mem;
/*
@@ -1238,7 +1238,7 @@ omap_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON;
+ adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
adap->algo = &omap_i2c_algo;
adap->dev.parent = &pdev->dev;
@@ -1276,7 +1276,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
ret = pm_runtime_get_sync(&pdev->dev);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index 615f632c846f..7a9dce43e115 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -401,7 +401,7 @@ static void pasemi_smb_remove(struct pci_dev *dev)
kfree(smbus);
}
-static DEFINE_PCI_DEVICE_TABLE(pasemi_smb_ids) = {
+static const struct pci_device_id pasemi_smb_ids[] = {
{ PCI_DEVICE(0x1959, 0xa003) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 39dd8ec60dfd..a6f54ba27e2a 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -540,7 +540,7 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = piix4_func,
};
-static DEFINE_PCI_DEVICE_TABLE(piix4_ids) = {
+static const struct pci_device_id piix4_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 9639be86e53f..417464e9ea2a 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -148,7 +148,7 @@ static void ce4100_i2c_remove(struct pci_dev *dev)
kfree(sds);
}
-static DEFINE_PCI_DEVICE_TABLE(ce4100_i2c_devices) = {
+static const struct pci_device_id ce4100_i2c_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)},
{ },
};
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
new file mode 100644
index 000000000000..1b4cf14f1106
--- /dev/null
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -0,0 +1,768 @@
+/*
+ * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, Sony Mobile Communications AB.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+/* QUP Registers */
+#define QUP_CONFIG 0x000
+#define QUP_STATE 0x004
+#define QUP_IO_MODE 0x008
+#define QUP_SW_RESET 0x00c
+#define QUP_OPERATIONAL 0x018
+#define QUP_ERROR_FLAGS 0x01c
+#define QUP_ERROR_FLAGS_EN 0x020
+#define QUP_HW_VERSION 0x030
+#define QUP_MX_OUTPUT_CNT 0x100
+#define QUP_OUT_FIFO_BASE 0x110
+#define QUP_MX_WRITE_CNT 0x150
+#define QUP_MX_INPUT_CNT 0x200
+#define QUP_MX_READ_CNT 0x208
+#define QUP_IN_FIFO_BASE 0x218
+#define QUP_I2C_CLK_CTL 0x400
+#define QUP_I2C_STATUS 0x404
+
+/* QUP States and reset values */
+#define QUP_RESET_STATE 0
+#define QUP_RUN_STATE 1
+#define QUP_PAUSE_STATE 3
+#define QUP_STATE_MASK 3
+
+#define QUP_STATE_VALID BIT(2)
+#define QUP_I2C_MAST_GEN BIT(4)
+
+#define QUP_OPERATIONAL_RESET 0x000ff0
+#define QUP_I2C_STATUS_RESET 0xfffffc
+
+/* QUP OPERATIONAL FLAGS */
+#define QUP_I2C_NACK_FLAG BIT(3)
+#define QUP_OUT_NOT_EMPTY BIT(4)
+#define QUP_IN_NOT_EMPTY BIT(5)
+#define QUP_OUT_FULL BIT(6)
+#define QUP_OUT_SVC_FLAG BIT(8)
+#define QUP_IN_SVC_FLAG BIT(9)
+#define QUP_MX_OUTPUT_DONE BIT(10)
+#define QUP_MX_INPUT_DONE BIT(11)
+
+/* I2C mini core related values */
+#define QUP_CLOCK_AUTO_GATE BIT(13)
+#define I2C_MINI_CORE (2 << 8)
+#define I2C_N_VAL 15
+/* Most significant word offset in FIFO port */
+#define QUP_MSW_SHIFT (I2C_N_VAL + 1)
+
+/* Packing/Unpacking words in FIFOs, and IO modes */
+#define QUP_OUTPUT_BLK_MODE (1 << 10)
+#define QUP_INPUT_BLK_MODE (1 << 12)
+#define QUP_UNPACK_EN BIT(14)
+#define QUP_PACK_EN BIT(15)
+
+#define QUP_REPACK_EN (QUP_UNPACK_EN | QUP_PACK_EN)
+
+#define QUP_OUTPUT_BLOCK_SIZE(x)(((x) >> 0) & 0x03)
+#define QUP_OUTPUT_FIFO_SIZE(x) (((x) >> 2) & 0x07)
+#define QUP_INPUT_BLOCK_SIZE(x) (((x) >> 5) & 0x03)
+#define QUP_INPUT_FIFO_SIZE(x) (((x) >> 7) & 0x07)
+
+/* QUP tags */
+#define QUP_TAG_START (1 << 8)
+#define QUP_TAG_DATA (2 << 8)
+#define QUP_TAG_STOP (3 << 8)
+#define QUP_TAG_REC (4 << 8)
+
+/* Status, Error flags */
+#define I2C_STATUS_WR_BUFFER_FULL BIT(0)
+#define I2C_STATUS_BUS_ACTIVE BIT(8)
+#define I2C_STATUS_ERROR_MASK 0x38000fc
+#define QUP_STATUS_ERROR_FLAGS 0x7c
+
+#define QUP_READ_LIMIT 256
+
+struct qup_i2c_dev {
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+ struct clk *clk;
+ struct clk *pclk;
+ struct i2c_adapter adap;
+
+ int clk_ctl;
+ int out_fifo_sz;
+ int in_fifo_sz;
+ int out_blk_sz;
+ int in_blk_sz;
+
+ unsigned long one_byte_t;
+
+ struct i2c_msg *msg;
+ /* Current posion in user message buffer */
+ int pos;
+ /* I2C protocol errors */
+ u32 bus_err;
+ /* QUP core errors */
+ u32 qup_err;
+
+ struct completion xfer;
+};
+
+static irqreturn_t qup_i2c_interrupt(int irq, void *dev)
+{
+ struct qup_i2c_dev *qup = dev;
+ u32 bus_err;
+ u32 qup_err;
+ u32 opflags;
+
+ bus_err = readl(qup->base + QUP_I2C_STATUS);
+ qup_err = readl(qup->base + QUP_ERROR_FLAGS);
+ opflags = readl(qup->base + QUP_OPERATIONAL);
+
+ if (!qup->msg) {
+ /* Clear Error interrupt */
+ writel(QUP_RESET_STATE, qup->base + QUP_STATE);
+ return IRQ_HANDLED;
+ }
+
+ bus_err &= I2C_STATUS_ERROR_MASK;
+ qup_err &= QUP_STATUS_ERROR_FLAGS;
+
+ if (qup_err) {
+ /* Clear Error interrupt */
+ writel(qup_err, qup->base + QUP_ERROR_FLAGS);
+ goto done;
+ }
+
+ if (bus_err) {
+ /* Clear Error interrupt */
+ writel(QUP_RESET_STATE, qup->base + QUP_STATE);
+ goto done;
+ }
+
+ if (opflags & QUP_IN_SVC_FLAG)
+ writel(QUP_IN_SVC_FLAG, qup->base + QUP_OPERATIONAL);
+
+ if (opflags & QUP_OUT_SVC_FLAG)
+ writel(QUP_OUT_SVC_FLAG, qup->base + QUP_OPERATIONAL);
+
+done:
+ qup->qup_err = qup_err;
+ qup->bus_err = bus_err;
+ complete(&qup->xfer);
+ return IRQ_HANDLED;
+}
+
+static int qup_i2c_poll_state_mask(struct qup_i2c_dev *qup,
+ u32 req_state, u32 req_mask)
+{
+ int retries = 1;
+ u32 state;
+
+ /*
+ * State transition takes 3 AHB clocks cycles + 3 I2C master clock
+ * cycles. So retry once after a 1uS delay.
+ */
+ do {
+ state = readl(qup->base + QUP_STATE);
+
+ if (state & QUP_STATE_VALID &&
+ (state & req_mask) == req_state)
+ return 0;
+
+ udelay(1);
+ } while (retries--);
+
+ return -ETIMEDOUT;
+}
+
+static int qup_i2c_poll_state(struct qup_i2c_dev *qup, u32 req_state)
+{
+ return qup_i2c_poll_state_mask(qup, req_state, QUP_STATE_MASK);
+}
+
+static int qup_i2c_poll_state_valid(struct qup_i2c_dev *qup)
+{
+ return qup_i2c_poll_state_mask(qup, 0, 0);
+}
+
+static int qup_i2c_poll_state_i2c_master(struct qup_i2c_dev *qup)
+{
+ return qup_i2c_poll_state_mask(qup, QUP_I2C_MAST_GEN, QUP_I2C_MAST_GEN);
+}
+
+static int qup_i2c_change_state(struct qup_i2c_dev *qup, u32 state)
+{
+ if (qup_i2c_poll_state_valid(qup) != 0)
+ return -EIO;
+
+ writel(state, qup->base + QUP_STATE);
+
+ if (qup_i2c_poll_state(qup, state) != 0)
+ return -EIO;
+ return 0;
+}
+
+static int qup_i2c_wait_writeready(struct qup_i2c_dev *qup)
+{
+ unsigned long timeout;
+ u32 opflags;
+ u32 status;
+
+ timeout = jiffies + HZ;
+
+ for (;;) {
+ opflags = readl(qup->base + QUP_OPERATIONAL);
+ status = readl(qup->base + QUP_I2C_STATUS);
+
+ if (!(opflags & QUP_OUT_NOT_EMPTY) &&
+ !(status & I2C_STATUS_BUS_ACTIVE))
+ return 0;
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ usleep_range(qup->one_byte_t, qup->one_byte_t * 2);
+ }
+}
+
+static void qup_i2c_set_write_mode(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ /* Number of entries to shift out, including the start */
+ int total = msg->len + 1;
+
+ if (total < qup->out_fifo_sz) {
+ /* FIFO mode */
+ writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE);
+ writel(total, qup->base + QUP_MX_WRITE_CNT);
+ } else {
+ /* BLOCK mode (transfer data on chunks) */
+ writel(QUP_OUTPUT_BLK_MODE | QUP_REPACK_EN,
+ qup->base + QUP_IO_MODE);
+ writel(total, qup->base + QUP_MX_OUTPUT_CNT);
+ }
+}
+
+static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ u32 addr = msg->addr << 1;
+ u32 qup_tag;
+ u32 opflags;
+ int idx;
+ u32 val;
+
+ if (qup->pos == 0) {
+ val = QUP_TAG_START | addr;
+ idx = 1;
+ } else {
+ val = 0;
+ idx = 0;
+ }
+
+ while (qup->pos < msg->len) {
+ /* Check that there's space in the FIFO for our pair */
+ opflags = readl(qup->base + QUP_OPERATIONAL);
+ if (opflags & QUP_OUT_FULL)
+ break;
+
+ if (qup->pos == msg->len - 1)
+ qup_tag = QUP_TAG_STOP;
+ else
+ qup_tag = QUP_TAG_DATA;
+
+ if (idx & 1)
+ val |= (qup_tag | msg->buf[qup->pos]) << QUP_MSW_SHIFT;
+ else
+ val = qup_tag | msg->buf[qup->pos];
+
+ /* Write out the pair and the last odd value */
+ if (idx & 1 || qup->pos == msg->len - 1)
+ writel(val, qup->base + QUP_OUT_FIFO_BASE);
+
+ qup->pos++;
+ idx++;
+ }
+}
+
+static int qup_i2c_write_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ unsigned long left;
+ int ret;
+
+ qup->msg = msg;
+ qup->pos = 0;
+
+ enable_irq(qup->irq);
+
+ qup_i2c_set_write_mode(qup, msg);
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto err;
+
+ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+
+ do {
+ ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
+ if (ret)
+ goto err;
+
+ qup_i2c_issue_write(qup, msg);
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto err;
+
+ left = wait_for_completion_timeout(&qup->xfer, HZ);
+ if (!left) {
+ writel(1, qup->base + QUP_SW_RESET);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ if (qup->bus_err || qup->qup_err) {
+ if (qup->bus_err & QUP_I2C_NACK_FLAG)
+ dev_err(qup->dev, "NACK from %x\n", msg->addr);
+ ret = -EIO;
+ goto err;
+ }
+ } while (qup->pos < msg->len);
+
+ /* Wait for the outstanding data in the fifo to drain */
+ ret = qup_i2c_wait_writeready(qup);
+
+err:
+ disable_irq(qup->irq);
+ qup->msg = NULL;
+
+ return ret;
+}
+
+static void qup_i2c_set_read_mode(struct qup_i2c_dev *qup, int len)
+{
+ if (len < qup->in_fifo_sz) {
+ /* FIFO mode */
+ writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE);
+ writel(len, qup->base + QUP_MX_READ_CNT);
+ } else {
+ /* BLOCK mode (transfer data on chunks) */
+ writel(QUP_INPUT_BLK_MODE | QUP_REPACK_EN,
+ qup->base + QUP_IO_MODE);
+ writel(len, qup->base + QUP_MX_INPUT_CNT);
+ }
+}
+
+static void qup_i2c_issue_read(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ u32 addr, len, val;
+
+ addr = (msg->addr << 1) | 1;
+
+ /* 0 is used to specify a length 256 (QUP_READ_LIMIT) */
+ len = (msg->len == QUP_READ_LIMIT) ? 0 : msg->len;
+
+ val = ((QUP_TAG_REC | len) << QUP_MSW_SHIFT) | QUP_TAG_START | addr;
+ writel(val, qup->base + QUP_OUT_FIFO_BASE);
+}
+
+
+static void qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ u32 opflags;
+ u32 val = 0;
+ int idx;
+
+ for (idx = 0; qup->pos < msg->len; idx++) {
+ if ((idx & 1) == 0) {
+ /* Check that FIFO have data */
+ opflags = readl(qup->base + QUP_OPERATIONAL);
+ if (!(opflags & QUP_IN_NOT_EMPTY))
+ break;
+
+ /* Reading 2 words at time */
+ val = readl(qup->base + QUP_IN_FIFO_BASE);
+
+ msg->buf[qup->pos++] = val & 0xFF;
+ } else {
+ msg->buf[qup->pos++] = val >> QUP_MSW_SHIFT;
+ }
+ }
+}
+
+static int qup_i2c_read_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ unsigned long left;
+ int ret;
+
+ /*
+ * The QUP block will issue a NACK and STOP on the bus when reaching
+ * the end of the read, the length of the read is specified as one byte
+ * which limits the possible read to 256 (QUP_READ_LIMIT) bytes.
+ */
+ if (msg->len > QUP_READ_LIMIT) {
+ dev_err(qup->dev, "HW not capable of reads over %d bytes\n",
+ QUP_READ_LIMIT);
+ return -EINVAL;
+ }
+
+ qup->msg = msg;
+ qup->pos = 0;
+
+ enable_irq(qup->irq);
+
+ qup_i2c_set_read_mode(qup, msg->len);
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto err;
+
+ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+
+ ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
+ if (ret)
+ goto err;
+
+ qup_i2c_issue_read(qup, msg);
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto err;
+
+ do {
+ left = wait_for_completion_timeout(&qup->xfer, HZ);
+ if (!left) {
+ writel(1, qup->base + QUP_SW_RESET);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ if (qup->bus_err || qup->qup_err) {
+ if (qup->bus_err & QUP_I2C_NACK_FLAG)
+ dev_err(qup->dev, "NACK from %x\n", msg->addr);
+ ret = -EIO;
+ goto err;
+ }
+
+ qup_i2c_read_fifo(qup, msg);
+ } while (qup->pos < msg->len);
+
+err:
+ disable_irq(qup->irq);
+ qup->msg = NULL;
+
+ return ret;
+}
+
+static int qup_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msgs[],
+ int num)
+{
+ struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
+ int ret, idx;
+
+ ret = pm_runtime_get_sync(qup->dev);
+ if (ret)
+ goto out;
+
+ writel(1, qup->base + QUP_SW_RESET);
+ ret = qup_i2c_poll_state(qup, QUP_RESET_STATE);
+ if (ret)
+ goto out;
+
+ /* Configure QUP as I2C mini core */
+ writel(I2C_MINI_CORE | I2C_N_VAL, qup->base + QUP_CONFIG);
+
+ for (idx = 0; idx < num; idx++) {
+ if (msgs[idx].len == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (qup_i2c_poll_state_i2c_master(qup)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (msgs[idx].flags & I2C_M_RD)
+ ret = qup_i2c_read_one(qup, &msgs[idx]);
+ else
+ ret = qup_i2c_write_one(qup, &msgs[idx]);
+
+ if (ret)
+ break;
+
+ ret = qup_i2c_change_state(qup, QUP_RESET_STATE);
+ if (ret)
+ break;
+ }
+
+ if (ret == 0)
+ ret = num;
+out:
+
+ pm_runtime_mark_last_busy(qup->dev);
+ pm_runtime_put_autosuspend(qup->dev);
+
+ return ret;
+}
+
+static u32 qup_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_algorithm qup_i2c_algo = {
+ .master_xfer = qup_i2c_xfer,
+ .functionality = qup_i2c_func,
+};
+
+static void qup_i2c_enable_clocks(struct qup_i2c_dev *qup)
+{
+ clk_prepare_enable(qup->clk);
+ clk_prepare_enable(qup->pclk);
+}
+
+static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup)
+{
+ u32 config;
+
+ qup_i2c_change_state(qup, QUP_RESET_STATE);
+ clk_disable_unprepare(qup->clk);
+ config = readl(qup->base + QUP_CONFIG);
+ config |= QUP_CLOCK_AUTO_GATE;
+ writel(config, qup->base + QUP_CONFIG);
+ clk_disable_unprepare(qup->pclk);
+}
+
+static int qup_i2c_probe(struct platform_device *pdev)
+{
+ static const int blk_sizes[] = {4, 16, 32};
+ struct device_node *node = pdev->dev.of_node;
+ struct qup_i2c_dev *qup;
+ unsigned long one_bit_t;
+ struct resource *res;
+ u32 io_mode, hw_ver, size;
+ int ret, fs_div, hs_div;
+ int src_clk_freq;
+ u32 clk_freq = 100000;
+
+ qup = devm_kzalloc(&pdev->dev, sizeof(*qup), GFP_KERNEL);
+ if (!qup)
+ return -ENOMEM;
+
+ qup->dev = &pdev->dev;
+ init_completion(&qup->xfer);
+ platform_set_drvdata(pdev, qup);
+
+ of_property_read_u32(node, "clock-frequency", &clk_freq);
+
+ /* We support frequencies up to FAST Mode (400KHz) */
+ if (!clk_freq || clk_freq > 400000) {
+ dev_err(qup->dev, "clock frequency not supported %d\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ qup->base = devm_ioremap_resource(qup->dev, res);
+ if (IS_ERR(qup->base))
+ return PTR_ERR(qup->base);
+
+ qup->irq = platform_get_irq(pdev, 0);
+ if (qup->irq < 0) {
+ dev_err(qup->dev, "No IRQ defined\n");
+ return qup->irq;
+ }
+
+ qup->clk = devm_clk_get(qup->dev, "core");
+ if (IS_ERR(qup->clk)) {
+ dev_err(qup->dev, "Could not get core clock\n");
+ return PTR_ERR(qup->clk);
+ }
+
+ qup->pclk = devm_clk_get(qup->dev, "iface");
+ if (IS_ERR(qup->pclk)) {
+ dev_err(qup->dev, "Could not get iface clock\n");
+ return PTR_ERR(qup->pclk);
+ }
+
+ qup_i2c_enable_clocks(qup);
+
+ /*
+ * Bootloaders might leave a pending interrupt on certain QUP's,
+ * so we reset the core before registering for interrupts.
+ */
+ writel(1, qup->base + QUP_SW_RESET);
+ ret = qup_i2c_poll_state_valid(qup);
+ if (ret)
+ goto fail;
+
+ ret = devm_request_irq(qup->dev, qup->irq, qup_i2c_interrupt,
+ IRQF_TRIGGER_HIGH, "i2c_qup", qup);
+ if (ret) {
+ dev_err(qup->dev, "Request %d IRQ failed\n", qup->irq);
+ goto fail;
+ }
+ disable_irq(qup->irq);
+
+ hw_ver = readl(qup->base + QUP_HW_VERSION);
+ dev_dbg(qup->dev, "Revision %x\n", hw_ver);
+
+ io_mode = readl(qup->base + QUP_IO_MODE);
+
+ /*
+ * The block/fifo size w.r.t. 'actual data' is 1/2 due to 'tag'
+ * associated with each byte written/received
+ */
+ size = QUP_OUTPUT_BLOCK_SIZE(io_mode);
+ if (size >= ARRAY_SIZE(blk_sizes))
+ return -EIO;
+ qup->out_blk_sz = blk_sizes[size] / 2;
+
+ size = QUP_INPUT_BLOCK_SIZE(io_mode);
+ if (size >= ARRAY_SIZE(blk_sizes))
+ return -EIO;
+ qup->in_blk_sz = blk_sizes[size] / 2;
+
+ size = QUP_OUTPUT_FIFO_SIZE(io_mode);
+ qup->out_fifo_sz = qup->out_blk_sz * (2 << size);
+
+ size = QUP_INPUT_FIFO_SIZE(io_mode);
+ qup->in_fifo_sz = qup->in_blk_sz * (2 << size);
+
+ src_clk_freq = clk_get_rate(qup->clk);
+ fs_div = ((src_clk_freq / clk_freq) / 2) - 3;
+ hs_div = 3;
+ qup->clk_ctl = (hs_div << 8) | (fs_div & 0xff);
+
+ /*
+ * Time it takes for a byte to be clocked out on the bus.
+ * Each byte takes 9 clock cycles (8 bits + 1 ack).
+ */
+ one_bit_t = (USEC_PER_SEC / clk_freq) + 1;
+ qup->one_byte_t = one_bit_t * 9;
+
+ dev_dbg(qup->dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
+ qup->in_blk_sz, qup->in_fifo_sz,
+ qup->out_blk_sz, qup->out_fifo_sz);
+
+ i2c_set_adapdata(&qup->adap, qup);
+ qup->adap.algo = &qup_i2c_algo;
+ qup->adap.dev.parent = qup->dev;
+ qup->adap.dev.of_node = pdev->dev.of_node;
+ strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
+
+ ret = i2c_add_adapter(&qup->adap);
+ if (ret)
+ goto fail;
+
+ pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(qup->dev);
+ pm_runtime_set_active(qup->dev);
+ pm_runtime_enable(qup->dev);
+ return 0;
+
+fail:
+ qup_i2c_disable_clocks(qup);
+ return ret;
+}
+
+static int qup_i2c_remove(struct platform_device *pdev)
+{
+ struct qup_i2c_dev *qup = platform_get_drvdata(pdev);
+
+ disable_irq(qup->irq);
+ qup_i2c_disable_clocks(qup);
+ i2c_del_adapter(&qup->adap);
+ pm_runtime_disable(qup->dev);
+ pm_runtime_set_suspended(qup->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int qup_i2c_pm_suspend_runtime(struct device *device)
+{
+ struct qup_i2c_dev *qup = dev_get_drvdata(device);
+
+ dev_dbg(device, "pm_runtime: suspending...\n");
+ qup_i2c_disable_clocks(qup);
+ return 0;
+}
+
+static int qup_i2c_pm_resume_runtime(struct device *device)
+{
+ struct qup_i2c_dev *qup = dev_get_drvdata(device);
+
+ dev_dbg(device, "pm_runtime: resuming...\n");
+ qup_i2c_enable_clocks(qup);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int qup_i2c_suspend(struct device *device)
+{
+ qup_i2c_pm_suspend_runtime(device);
+ return 0;
+}
+
+static int qup_i2c_resume(struct device *device)
+{
+ qup_i2c_pm_resume_runtime(device);
+ pm_runtime_mark_last_busy(device);
+ pm_request_autosuspend(device);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops qup_i2c_qup_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(
+ qup_i2c_suspend,
+ qup_i2c_resume)
+ SET_RUNTIME_PM_OPS(
+ qup_i2c_pm_suspend_runtime,
+ qup_i2c_pm_resume_runtime,
+ NULL)
+};
+
+static const struct of_device_id qup_i2c_dt_match[] = {
+ { .compatible = "qcom,i2c-qup-v1.1.1" },
+ { .compatible = "qcom,i2c-qup-v2.1.1" },
+ { .compatible = "qcom,i2c-qup-v2.2.1" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qup_i2c_dt_match);
+
+static struct platform_driver qup_i2c_driver = {
+ .probe = qup_i2c_probe,
+ .remove = qup_i2c_remove,
+ .driver = {
+ .name = "i2c_qup",
+ .owner = THIS_MODULE,
+ .pm = &qup_i2c_qup_pm_ops,
+ .of_match_table = qup_i2c_dt_match,
+ },
+};
+
+module_platform_driver(qup_i2c_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c_qup");
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 0282d4d42805..d4fa8eba6e9d 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -638,6 +638,7 @@ static const struct of_device_id rcar_i2c_dt_ids[] = {
{ .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,i2c-r8a7790", .data = (void *)I2C_RCAR_GEN2 },
+ { .compatible = "renesas,i2c-r8a7791", .data = (void *)I2C_RCAR_GEN2 },
{},
};
MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
@@ -691,7 +692,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
adap = &priv->adap;
adap->nr = pdev->id;
adap->algo = &rcar_i2c_algo;
- adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED;
adap->retries = 3;
adap->dev.parent = dev;
adap->dev.of_node = dev->of_node;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 684d21e71e4a..ae4491062e41 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -601,6 +601,31 @@ static irqreturn_t s3c24xx_i2c_irq(int irqno, void *dev_id)
return IRQ_HANDLED;
}
+/*
+ * Disable the bus so that we won't get any interrupts from now on, or try
+ * to drive any lines. This is the default state when we don't have
+ * anything to send/receive.
+ *
+ * If there is an event on the bus, or we have a pre-existing event at
+ * kernel boot time, we may not notice the event and the I2C controller
+ * will lock the bus with the I2C clock line low indefinitely.
+ */
+static inline void s3c24xx_i2c_disable_bus(struct s3c24xx_i2c *i2c)
+{
+ unsigned long tmp;
+
+ /* Stop driving the I2C pins */
+ tmp = readl(i2c->regs + S3C2410_IICSTAT);
+ tmp &= ~S3C2410_IICSTAT_TXRXEN;
+ writel(tmp, i2c->regs + S3C2410_IICSTAT);
+
+ /* We don't expect any interrupts now, and don't want send acks */
+ tmp = readl(i2c->regs + S3C2410_IICCON);
+ tmp &= ~(S3C2410_IICCON_IRQEN | S3C2410_IICCON_IRQPEND |
+ S3C2410_IICCON_ACKEN);
+ writel(tmp, i2c->regs + S3C2410_IICCON);
+}
+
/* s3c24xx_i2c_set_master
*
@@ -735,7 +760,11 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
s3c24xx_i2c_wait_idle(i2c);
+ s3c24xx_i2c_disable_bus(i2c);
+
out:
+ i2c->state = STATE_IDLE;
+
return ret;
}
@@ -1004,7 +1033,6 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
{
- unsigned long iicon = S3C2410_IICCON_IRQEN | S3C2410_IICCON_ACKEN;
struct s3c2410_platform_i2c *pdata;
unsigned int freq;
@@ -1018,12 +1046,12 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
dev_info(i2c->dev, "slave address 0x%02x\n", pdata->slave_addr);
- writel(iicon, i2c->regs + S3C2410_IICCON);
+ writel(0, i2c->regs + S3C2410_IICCON);
+ writel(0, i2c->regs + S3C2410_IICSTAT);
/* we need to work out the divisors for the clock... */
if (s3c24xx_i2c_clockrate(i2c, &freq) != 0) {
- writel(0, i2c->regs + S3C2410_IICCON);
dev_err(i2c->dev, "cannot meet bus frequency required\n");
return -EINVAL;
}
@@ -1031,7 +1059,8 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
/* todo - check that the i2c lines aren't being dragged anywhere */
dev_info(i2c->dev, "bus frequency set to %d KHz\n", freq);
- dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02lx\n", iicon);
+ dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02x\n",
+ readl(i2c->regs + S3C2410_IICCON));
return 0;
}
@@ -1106,7 +1135,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &s3c24xx_i2c_algorithm;
i2c->adap.retries = 2;
- i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED;
i2c->tx_setup = 50;
init_waitqueue_head(&i2c->wait);
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 6784f7f527a4..8e3be7ed0586 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -312,7 +312,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
goto out;
}
adap = &siic->adapter;
- adap->class = I2C_CLASS_HWMON;
+ adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
siic->base = devm_ioremap_resource(&pdev->dev, mem_res);
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 79fd96a04386..ac9bc33acef4 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -369,7 +369,7 @@ static struct i2c_adapter sis5595_adapter = {
.algo = &smbus_algorithm,
};
-static DEFINE_PCI_DEVICE_TABLE(sis5595_ids) = {
+static const struct pci_device_id sis5595_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 19b8505d0cdd..c6366733008d 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -510,7 +510,7 @@ static struct i2c_adapter sis630_adapter = {
.retries = 3
};
-static DEFINE_PCI_DEVICE_TABLE(sis630_ids) = {
+static const struct pci_device_id sis630_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_964) },
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index f8aa0c29f02b..8dc2fc5f74ff 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -244,7 +244,7 @@ static struct i2c_adapter sis96x_adapter = {
.algo = &smbus_algorithm,
};
-static DEFINE_PCI_DEVICE_TABLE(sis96x_ids) = {
+static const struct pci_device_id sis96x_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 9cf715d69551..872016196ef3 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -574,7 +574,7 @@ static irqreturn_t st_i2c_isr_thread(int irq, void *data)
writel_relaxed(it, i2c_dev->base + SSC_IEN);
st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG);
- c->result = -EIO;
+ c->result = -EAGAIN;
break;
default:
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 5b80ef310841..29b1fb778943 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -911,7 +911,7 @@ static int stu300_probe(struct platform_device *pdev)
adap = &dev->adapter;
adap->owner = THIS_MODULE;
/* DDC class but actually often used for more generic I2C */
- adap->class = I2C_CLASS_DDC;
+ adap->class = I2C_CLASS_DDC | I2C_CLASS_DEPRECATED;
strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
sizeof(adap->name));
adap->nr = bus_nr;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 9704537aee3c..00f04cb5b4eb 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -794,7 +794,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
i2c_dev->adapter.owner = THIS_MODULE;
- i2c_dev->adapter.class = I2C_CLASS_HWMON;
+ i2c_dev->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter",
sizeof(i2c_dev->adapter.name));
i2c_dev->adapter.algo = &tegra_i2c_algo;
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 49d7f14b9d27..f4a1ed757612 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -88,7 +88,7 @@ static struct i2c_adapter vt586b_adapter = {
};
-static DEFINE_PCI_DEVICE_TABLE(vt586b_ids) = {
+static const struct pci_device_id vt586b_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 40d36df678de..6841200b6e50 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -442,7 +442,7 @@ release_region:
return error;
}
-static DEFINE_PCI_DEVICE_TABLE(vt596_ids) = {
+static const struct pci_device_id vt596_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3),
.driver_data = SMBBA1 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3),
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 28107502517f..7731f1795869 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -684,7 +684,7 @@ static const struct i2c_algorithm xiic_algorithm = {
static struct i2c_adapter xiic_adapter = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED,
.algo = &xiic_algorithm,
};
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 2d1d2c5653fb..cb66f9586f76 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -556,7 +556,7 @@ static struct platform_driver scx200_pci_driver = {
.remove = scx200_remove,
};
-static DEFINE_PCI_DEVICE_TABLE(scx200_isa) = {
+static const struct pci_device_id scx200_isa[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) },
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) },
{ 0, }
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 5fb80b8962a2..7c7f4b856bad 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -48,10 +48,13 @@
#include <linux/rwsem.h>
#include <linux/pm_runtime.h>
#include <linux/acpi.h>
+#include <linux/jump_label.h>
#include <asm/uaccess.h>
#include "i2c-core.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/i2c.h>
/* core_lock protects i2c_adapter_idr, and guarantees
that device detection, deletion of detected devices, and attach_adapter
@@ -62,6 +65,18 @@ static DEFINE_IDR(i2c_adapter_idr);
static struct device_type i2c_client_type;
static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
+static struct static_key i2c_trace_msg = STATIC_KEY_INIT_FALSE;
+
+void i2c_transfer_trace_reg(void)
+{
+ static_key_slow_inc(&i2c_trace_msg);
+}
+
+void i2c_transfer_trace_unreg(void)
+{
+ static_key_slow_dec(&i2c_trace_msg);
+}
+
/* ------------------------------------------------------------------------- */
static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
@@ -1686,6 +1701,7 @@ static void __exit i2c_exit(void)
class_compat_unregister(i2c_adapter_compat_class);
#endif
bus_unregister(&i2c_bus_type);
+ tracepoint_synchronize_unregister();
}
/* We must initialize early, because some subsystems register i2c drivers
@@ -1716,6 +1732,19 @@ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
unsigned long orig_jiffies;
int ret, try;
+ /* i2c_trace_msg gets enabled when tracepoint i2c_transfer gets
+ * enabled. This is an efficient way of keeping the for-loop from
+ * being executed when not needed.
+ */
+ if (static_key_false(&i2c_trace_msg)) {
+ int i;
+ for (i = 0; i < num; i++)
+ if (msgs[i].flags & I2C_M_RD)
+ trace_i2c_read(adap, &msgs[i], i);
+ else
+ trace_i2c_write(adap, &msgs[i], i);
+ }
+
/* Retry automatically on arbitration loss */
orig_jiffies = jiffies;
for (ret = 0, try = 0; try <= adap->retries; try++) {
@@ -1726,6 +1755,14 @@ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
break;
}
+ if (static_key_false(&i2c_trace_msg)) {
+ int i;
+ for (i = 0; i < ret; i++)
+ if (msgs[i].flags & I2C_M_RD)
+ trace_i2c_reply(adap, &msgs[i], i);
+ trace_i2c_result(adap, i, ret);
+ }
+
return ret;
}
EXPORT_SYMBOL(__i2c_transfer);
@@ -1941,6 +1978,13 @@ static int i2c_detect_address(struct i2c_client *temp_client,
struct i2c_client *client;
/* Detection succeeded, instantiate the device */
+ if (adapter->class & I2C_CLASS_DEPRECATED)
+ dev_warn(&adapter->dev,
+ "This adapter will soon drop class based instantiation of devices. "
+ "Please make sure client 0x%02x gets instantiated by other means. "
+ "Check 'Documentation/i2c/instantiating-devices' for details.\n",
+ info.addr);
+
dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n",
info.type, info.addr);
client = i2c_new_device(adapter, &info);
@@ -2521,6 +2565,14 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
int try;
s32 res;
+ /* If enabled, the following two tracepoints are conditional on
+ * read_write and protocol.
+ */
+ trace_smbus_write(adapter, addr, flags, read_write,
+ command, protocol, data);
+ trace_smbus_read(adapter, addr, flags, read_write,
+ command, protocol);
+
flags &= I2C_M_TEN | I2C_CLIENT_PEC | I2C_CLIENT_SCCB;
if (adapter->algo->smbus_xfer) {
@@ -2541,15 +2593,24 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
i2c_unlock_adapter(adapter);
if (res != -EOPNOTSUPP || !adapter->algo->master_xfer)
- return res;
+ goto trace;
/*
* Fall back to i2c_smbus_xfer_emulated if the adapter doesn't
* implement native support for the SMBus operation.
*/
}
- return i2c_smbus_xfer_emulated(adapter, addr, flags, read_write,
- command, protocol, data);
+ res = i2c_smbus_xfer_emulated(adapter, addr, flags, read_write,
+ command, protocol, data);
+
+trace:
+ /* If enabled, the reply tracepoint is conditional on read_write. */
+ trace_smbus_reply(adapter, addr, flags, read_write,
+ command, protocol, data);
+ trace_smbus_result(adapter, addr, flags, read_write,
+ command, protocol, res);
+
+ return res;
}
EXPORT_SYMBOL(i2c_smbus_xfer);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 8e1939f564f4..a43220c2e3d9 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -196,6 +196,53 @@ static struct cpuidle_state snb_cstates[] = {
.enter = NULL }
};
+static struct cpuidle_state byt_cstates[] = {
+ {
+ .name = "C1-BYT",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle },
+ {
+ .name = "C1E-BYT",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 15,
+ .target_residency = 30,
+ .enter = &intel_idle },
+ {
+ .name = "C6N-BYT",
+ .desc = "MWAIT 0x58",
+ .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 40,
+ .target_residency = 275,
+ .enter = &intel_idle },
+ {
+ .name = "C6S-BYT",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
+ .target_residency = 560,
+ .enter = &intel_idle },
+ {
+ .name = "C7-BYT",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1200,
+ .target_residency = 1500,
+ .enter = &intel_idle },
+ {
+ .name = "C7S-BYT",
+ .desc = "MWAIT 0x64",
+ .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 20000,
+ .enter = &intel_idle },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state ivb_cstates[] = {
{
.name = "C1-IVB",
@@ -236,6 +283,105 @@ static struct cpuidle_state ivb_cstates[] = {
.enter = NULL }
};
+static struct cpuidle_state ivt_cstates[] = {
+ {
+ .name = "C1-IVT",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle },
+ {
+ .name = "C1E-IVT",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 80,
+ .enter = &intel_idle },
+ {
+ .name = "C3-IVT",
+ .desc = "MWAIT 0x10",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 59,
+ .target_residency = 156,
+ .enter = &intel_idle },
+ {
+ .name = "C6-IVT",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 82,
+ .target_residency = 300,
+ .enter = &intel_idle },
+ {
+ .enter = NULL }
+};
+
+static struct cpuidle_state ivt_cstates_4s[] = {
+ {
+ .name = "C1-IVT-4S",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle },
+ {
+ .name = "C1E-IVT-4S",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 250,
+ .enter = &intel_idle },
+ {
+ .name = "C3-IVT-4S",
+ .desc = "MWAIT 0x10",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 59,
+ .target_residency = 300,
+ .enter = &intel_idle },
+ {
+ .name = "C6-IVT-4S",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 84,
+ .target_residency = 400,
+ .enter = &intel_idle },
+ {
+ .enter = NULL }
+};
+
+static struct cpuidle_state ivt_cstates_8s[] = {
+ {
+ .name = "C1-IVT-8S",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle },
+ {
+ .name = "C1E-IVT-8S",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 500,
+ .enter = &intel_idle },
+ {
+ .name = "C3-IVT-8S",
+ .desc = "MWAIT 0x10",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 59,
+ .target_residency = 600,
+ .enter = &intel_idle },
+ {
+ .name = "C6-IVT-8S",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 88,
+ .target_residency = 700,
+ .enter = &intel_idle },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state hsw_cstates[] = {
{
.name = "C1-HSW",
@@ -464,11 +610,21 @@ static const struct idle_cpu idle_cpu_snb = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_byt = {
+ .state_table = byt_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
static const struct idle_cpu idle_cpu_ivb = {
.state_table = ivb_cstates,
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_ivt = {
+ .state_table = ivt_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
static const struct idle_cpu idle_cpu_hsw = {
.state_table = hsw_cstates,
.disable_promotion_to_c1e = true,
@@ -494,8 +650,10 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x2f, idle_cpu_nehalem),
ICPU(0x2a, idle_cpu_snb),
ICPU(0x2d, idle_cpu_snb),
+ ICPU(0x36, idle_cpu_atom),
+ ICPU(0x37, idle_cpu_byt),
ICPU(0x3a, idle_cpu_ivb),
- ICPU(0x3e, idle_cpu_ivb),
+ ICPU(0x3e, idle_cpu_ivt),
ICPU(0x3c, idle_cpu_hsw),
ICPU(0x3f, idle_cpu_hsw),
ICPU(0x45, idle_cpu_hsw),
@@ -572,6 +730,39 @@ static void intel_idle_cpuidle_devices_uninit(void)
free_percpu(intel_idle_cpuidle_devices);
return;
}
+
+/*
+ * intel_idle_state_table_update()
+ *
+ * Update the default state_table for this CPU-id
+ *
+ * Currently used to access tuned IVT multi-socket targets
+ * Assumption: num_sockets == (max_package_num + 1)
+ */
+void intel_idle_state_table_update(void)
+{
+ /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
+ if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
+ int cpu, package_num, num_sockets = 1;
+
+ for_each_online_cpu(cpu) {
+ package_num = topology_physical_package_id(cpu);
+ if (package_num + 1 > num_sockets) {
+ num_sockets = package_num + 1;
+
+ if (num_sockets > 4)
+ cpuidle_state_table = ivt_cstates_8s;
+ return;
+ }
+ }
+
+ if (num_sockets > 2)
+ cpuidle_state_table = ivt_cstates_4s;
+ /* else, 1 and 2 socket systems use default ivt_cstates */
+ }
+ return;
+}
+
/*
* intel_idle_cpuidle_driver_init()
* allocate, initialize cpuidle_states
@@ -581,10 +772,12 @@ static int __init intel_idle_cpuidle_driver_init(void)
int cstate;
struct cpuidle_driver *drv = &intel_idle_driver;
+ intel_idle_state_table_update();
+
drv->state_count = 1;
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
- int num_substates, mwait_hint, mwait_cstate, mwait_substate;
+ int num_substates, mwait_hint, mwait_cstate;
if (cpuidle_state_table[cstate].enter == NULL)
break;
@@ -597,14 +790,13 @@ static int __init intel_idle_cpuidle_driver_init(void)
mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
- mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
- /* does the state exist in CPUID.MWAIT? */
+ /* number of sub-states for this state in CPUID.MWAIT */
num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
& MWAIT_SUBSTATE_MASK;
- /* if sub-state in table is not enumerated by CPUID */
- if ((mwait_substate + 1) > num_substates)
+ /* if NO sub-states for this state in CPUID, skip it */
+ if (num_substates == 0)
continue;
if (((mwait_cstate + 1) > 2) &&
@@ -681,14 +873,19 @@ static int __init intel_idle_init(void)
if (intel_idle_cpuidle_devices == NULL)
return -ENOMEM;
+ cpu_notifier_register_begin();
+
for_each_online_cpu(i) {
retval = intel_idle_cpu_init(i);
if (retval) {
+ cpu_notifier_register_done();
cpuidle_unregister_driver(&intel_idle_driver);
return retval;
}
}
- register_cpu_notifier(&cpu_hotplug_notifier);
+ __register_cpu_notifier(&cpu_hotplug_notifier);
+
+ cpu_notifier_register_done();
return 0;
}
@@ -698,10 +895,13 @@ static void __exit intel_idle_exit(void)
intel_idle_cpuidle_devices_uninit();
cpuidle_unregister_driver(&intel_idle_driver);
+ cpu_notifier_register_begin();
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
- unregister_cpu_notifier(&cpu_hotplug_notifier);
+ __unregister_cpu_notifier(&cpu_hotplug_notifier);
+
+ cpu_notifier_register_done();
return;
}
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index bfec313492b3..a7e68c81f89d 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -451,9 +451,9 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
.type = IIO_ACCEL, \
.modified = 1, \
.channel2 = IIO_MOD_##_axis, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.scan_index = AXIS_##_axis, \
.scan_type = { \
.sign = 's', \
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 2209f28441e9..d86196cfe4b4 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -155,6 +155,16 @@ config MCP3422
This driver can also be built as a module. If so, the module will be
called mcp3422.
+config MEN_Z188_ADC
+ tristate "MEN 16z188 ADC IP Core support"
+ depends on MCB
+ help
+ Say yes here to enable support for the MEN 16z188 ADC IP-Core on a MCB
+ carrier.
+
+ This driver can also be built as a module. If so, the module will be
+ called men_z188_adc.
+
config NAU7802
tristate "Nuvoton NAU7802 ADC driver"
depends on I2C
@@ -183,6 +193,16 @@ config TI_AM335X_ADC
Say yes here to build support for Texas Instruments ADC
driver which is also a MFD client.
+config TWL4030_MADC
+ tristate "TWL4030 MADC (Monitoring A/D Converter)"
+ depends on TWL4030_CORE
+ help
+ This driver provides support for Triton TWL4030-MADC. The
+ driver supports both RT and SW conversion methods.
+
+ This driver can also be built as a module. If so, the module will be
+ called twl4030-madc.
+
config TWL6030_GPADC
tristate "TWL6030 GPADC (General Purpose A/D Converter) Support"
depends on TWL4030_CORE
@@ -197,6 +217,16 @@ config TWL6030_GPADC
This driver can also be built as a module. If so, the module will be
called twl6030-gpadc.
+config VF610_ADC
+ tristate "Freescale vf610 ADC driver"
+ depends on OF
+ help
+ Say yes here to support for Vybrid board analog-to-digital converter.
+ Since the IP is used for i.MX6SLX, the driver also support i.MX6SLX.
+
+ This driver can also be built as a module. If so, the module will be
+ called vf610_adc.
+
config VIPERBOARD_ADC
tristate "Viperboard ADC support"
depends on MFD_VIPERBOARD && USB
@@ -204,4 +234,17 @@ config VIPERBOARD_ADC
Say yes here to access the ADC part of the Nano River
Technologies Viperboard.
+config XILINX_XADC
+ tristate "Xilinx XADC driver"
+ depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
+ depends on HAS_IOMEM
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to have support for the Xilinx XADC. The driver does support
+ both the ZYNQ interface to the XADC as well as the AXI-XADC interface.
+
+ The driver can also be build as a module. If so, the module will be called
+ xilinx-xadc.
+
endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index ba9a10a24cd0..ab346d88c688 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -17,8 +17,13 @@ obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MCP320X) += mcp320x.o
obj-$(CONFIG_MCP3422) += mcp3422.o
+obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
+obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
obj-$(CONFIG_TWL6030_GPADC) += twl6030-gpadc.o
+obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
+xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
+obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 360259266d4f..9cf3229a7272 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -8,17 +8,11 @@
* based on linux/drivers/acron/char/pcf8583.c
* Copyright (C) 2000 Russell King
*
+ * Driver for max1363 and similar chips.
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * max1363.c
- *
- * Partial support for max1363 and similar chips.
- *
- * Not currently implemented.
- *
- * - Control of internal reference.
*/
#include <linux/interrupt.h>
@@ -1253,7 +1247,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11604] = {
.bits = 8,
- .int_vref_mv = 4098,
+ .int_vref_mv = 4096,
.mode_list = max1238_mode_list,
.num_modes = ARRAY_SIZE(max1238_mode_list),
.default_mode = s0to11,
@@ -1313,7 +1307,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11610] = {
.bits = 10,
- .int_vref_mv = 4098,
+ .int_vref_mv = 4096,
.mode_list = max1238_mode_list,
.num_modes = ARRAY_SIZE(max1238_mode_list),
.default_mode = s0to11,
@@ -1373,7 +1367,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11616] = {
.bits = 12,
- .int_vref_mv = 4098,
+ .int_vref_mv = 4096,
.mode_list = max1238_mode_list,
.num_modes = ARRAY_SIZE(max1238_mode_list),
.default_mode = s0to11,
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
new file mode 100644
index 000000000000..6989c16aec2b
--- /dev/null
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -0,0 +1,172 @@
+/*
+ * MEN 16z188 Analog to Digial Converter
+ *
+ * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
+ * Author: Johannes Thumshirn <johannes.thumshirn@men.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mcb.h>
+#include <linux/io.h>
+#include <linux/iio/iio.h>
+
+#define Z188_ADC_MAX_CHAN 8
+#define Z188_ADC_GAIN 0x0700000
+#define Z188_MODE_VOLTAGE BIT(27)
+#define Z188_CFG_AUTO 0x1
+#define Z188_CTRL_REG 0x40
+
+#define ADC_DATA(x) (((x) >> 2) & 0x7ffffc)
+#define ADC_OVR(x) ((x) & 0x1)
+
+struct z188_adc {
+ struct resource *mem;
+ void __iomem *base;
+};
+
+#define Z188_ADC_CHANNEL(idx) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+}
+
+static const struct iio_chan_spec z188_adc_iio_channels[] = {
+ Z188_ADC_CHANNEL(0),
+ Z188_ADC_CHANNEL(1),
+ Z188_ADC_CHANNEL(2),
+ Z188_ADC_CHANNEL(3),
+ Z188_ADC_CHANNEL(4),
+ Z188_ADC_CHANNEL(5),
+ Z188_ADC_CHANNEL(6),
+ Z188_ADC_CHANNEL(7),
+};
+
+static int z188_iio_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long info)
+{
+ struct z188_adc *adc = iio_priv(iio_dev);
+ int ret;
+ u16 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ tmp = readw(adc->base + chan->channel * 4);
+
+ if (ADC_OVR(tmp)) {
+ dev_info(&iio_dev->dev,
+ "Oversampling error on ADC channel %d\n",
+ chan->channel);
+ return -EIO;
+ }
+ *val = ADC_DATA(tmp);
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct iio_info z188_adc_info = {
+ .read_raw = &z188_iio_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static void men_z188_config_channels(void __iomem *addr)
+{
+ int i;
+ u32 cfg;
+ u32 ctl;
+
+ ctl = readl(addr + Z188_CTRL_REG);
+ ctl |= Z188_CFG_AUTO;
+ writel(ctl, addr + Z188_CTRL_REG);
+
+ for (i = 0; i < Z188_ADC_MAX_CHAN; i++) {
+ cfg = readl(addr + i);
+ cfg &= ~Z188_ADC_GAIN;
+ cfg |= Z188_MODE_VOLTAGE;
+ writel(cfg, addr + i);
+ }
+}
+
+static int men_z188_probe(struct mcb_device *dev,
+ const struct mcb_device_id *id)
+{
+ struct z188_adc *adc;
+ struct iio_dev *indio_dev;
+ struct resource *mem;
+
+ indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ indio_dev->name = "z188-adc";
+ indio_dev->dev.parent = &dev->dev;
+ indio_dev->info = &z188_adc_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = z188_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(z188_adc_iio_channels);
+
+ mem = mcb_request_mem(dev, "z188-adc");
+ if (!mem)
+ return -ENOMEM;
+
+ adc->base = ioremap(mem->start, resource_size(mem));
+ if (adc->base == NULL)
+ goto err;
+
+ men_z188_config_channels(adc->base);
+
+ adc->mem = mem;
+ mcb_set_drvdata(dev, indio_dev);
+
+ return iio_device_register(indio_dev);
+
+err:
+ mcb_release_mem(mem);
+ return -ENXIO;
+}
+
+static void men_z188_remove(struct mcb_device *dev)
+{
+ struct iio_dev *indio_dev = mcb_get_drvdata(dev);
+ struct z188_adc *adc = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iounmap(adc->base);
+ mcb_release_mem(adc->mem);
+}
+
+static const struct mcb_device_id men_z188_ids[] = {
+ { .device = 0xbc },
+};
+MODULE_DEVICE_TABLE(mcb, men_z188_ids);
+
+static struct mcb_driver men_z188_driver = {
+ .driver = {
+ .name = "z188-adc",
+ .owner = THIS_MODULE,
+ },
+ .probe = men_z188_probe,
+ .remove = men_z188_remove,
+ .id_table = men_z188_ids,
+};
+module_mcb_driver(men_z188_driver);
+
+MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IIO ADC driver for MEN 16z188 ADC Core");
+MODULE_ALIAS("mcb:16z188");
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 31e786e3999b..a4db3026bec6 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -13,7 +13,6 @@
* GNU General Public License for more details.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/module.h>
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 4c583e471339..7de1c4c87942 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -29,7 +29,6 @@
*
*/
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -47,20 +46,84 @@
#include <linux/gfp.h>
#include <linux/err.h>
-/*
+#include <linux/iio/iio.h>
+
+/**
* struct twl4030_madc_data - a container for madc info
- * @dev - pointer to device structure for madc
- * @lock - mutex protecting this data structure
- * @requests - Array of request struct corresponding to SW1, SW2 and RT
- * @imr - Interrupt mask register of MADC
- * @isr - Interrupt status register of MADC
+ * @dev: Pointer to device structure for madc
+ * @lock: Mutex protecting this data structure
+ * @requests: Array of request struct corresponding to SW1, SW2 and RT
+ * @use_second_irq: IRQ selection (main or co-processor)
+ * @imr: Interrupt mask register of MADC
+ * @isr: Interrupt status register of MADC
*/
struct twl4030_madc_data {
struct device *dev;
struct mutex lock; /* mutex protecting this data structure */
struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS];
- int imr;
- int isr;
+ bool use_second_irq;
+ u8 imr;
+ u8 isr;
+};
+
+static int twl4030_madc_read(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct twl4030_madc_data *madc = iio_priv(iio_dev);
+ struct twl4030_madc_request req;
+ int ret;
+
+ req.method = madc->use_second_irq ? TWL4030_MADC_SW2 : TWL4030_MADC_SW1;
+
+ req.channels = BIT(chan->channel);
+ req.active = false;
+ req.func_cb = NULL;
+ req.type = TWL4030_MADC_WAIT;
+ req.raw = !(mask == IIO_CHAN_INFO_PROCESSED);
+ req.do_avg = (mask == IIO_CHAN_INFO_AVERAGE_RAW);
+
+ ret = twl4030_madc_conversion(&req);
+ if (ret < 0)
+ return ret;
+
+ *val = req.rbuf[chan->channel];
+
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info twl4030_madc_iio_info = {
+ .read_raw = &twl4030_madc_read,
+ .driver_module = THIS_MODULE,
+};
+
+#define TWL4030_ADC_CHANNEL(_channel, _type, _name) { \
+ .type = _type, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_AVERAGE_RAW) | \
+ BIT(IIO_CHAN_INFO_PROCESSED), \
+ .datasheet_name = _name, \
+ .indexed = 1, \
+}
+
+static const struct iio_chan_spec twl4030_madc_iio_channels[] = {
+ TWL4030_ADC_CHANNEL(0, IIO_VOLTAGE, "ADCIN0"),
+ TWL4030_ADC_CHANNEL(1, IIO_TEMP, "ADCIN1"),
+ TWL4030_ADC_CHANNEL(2, IIO_VOLTAGE, "ADCIN2"),
+ TWL4030_ADC_CHANNEL(3, IIO_VOLTAGE, "ADCIN3"),
+ TWL4030_ADC_CHANNEL(4, IIO_VOLTAGE, "ADCIN4"),
+ TWL4030_ADC_CHANNEL(5, IIO_VOLTAGE, "ADCIN5"),
+ TWL4030_ADC_CHANNEL(6, IIO_VOLTAGE, "ADCIN6"),
+ TWL4030_ADC_CHANNEL(7, IIO_VOLTAGE, "ADCIN7"),
+ TWL4030_ADC_CHANNEL(8, IIO_VOLTAGE, "ADCIN8"),
+ TWL4030_ADC_CHANNEL(9, IIO_VOLTAGE, "ADCIN9"),
+ TWL4030_ADC_CHANNEL(10, IIO_CURRENT, "ADCIN10"),
+ TWL4030_ADC_CHANNEL(11, IIO_VOLTAGE, "ADCIN11"),
+ TWL4030_ADC_CHANNEL(12, IIO_VOLTAGE, "ADCIN12"),
+ TWL4030_ADC_CHANNEL(13, IIO_VOLTAGE, "ADCIN13"),
+ TWL4030_ADC_CHANNEL(14, IIO_VOLTAGE, "ADCIN14"),
+ TWL4030_ADC_CHANNEL(15, IIO_VOLTAGE, "ADCIN15"),
};
static struct twl4030_madc_data *twl4030_madc;
@@ -91,17 +154,16 @@ twl4030_divider_ratios[16] = {
};
-/*
- * Conversion table from -3 to 55 degree Celcius
- */
-static int therm_tbl[] = {
-30800, 29500, 28300, 27100,
-26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900,
-17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100,
-11600, 11200, 10800, 10400, 10000, 9630, 9280, 8950, 8620, 8310,
-8020, 7730, 7460, 7200, 6950, 6710, 6470, 6250, 6040, 5830,
-5640, 5450, 5260, 5090, 4920, 4760, 4600, 4450, 4310, 4170,
-4040, 3910, 3790, 3670, 3550
+/* Conversion table from -3 to 55 degrees Celcius */
+static int twl4030_therm_tbl[] = {
+ 30800, 29500, 28300, 27100,
+ 26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700,
+ 17900, 17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100,
+ 12600, 12100, 11600, 11200, 10800, 10400, 10000, 9630, 9280,
+ 8950, 8620, 8310, 8020, 7730, 7460, 7200, 6950, 6710,
+ 6470, 6250, 6040, 5830, 5640, 5450, 5260, 5090, 4920,
+ 4760, 4600, 4450, 4310, 4170, 4040, 3910, 3790, 3670,
+ 3550
};
/*
@@ -133,37 +195,32 @@ const struct twl4030_madc_conversion_method twl4030_conversion_methods[] = {
},
};
-/*
- * Function to read a particular channel value.
- * @madc - pointer to struct twl4030_madc_data
- * @reg - lsb of ADC Channel
- * If the i2c read fails it returns an error else returns 0.
+/**
+ * twl4030_madc_channel_raw_read() - Function to read a particular channel value
+ * @madc: pointer to struct twl4030_madc_data
+ * @reg: lsb of ADC Channel
+ *
+ * Return: 0 on success, an error code otherwise.
*/
static int twl4030_madc_channel_raw_read(struct twl4030_madc_data *madc, u8 reg)
{
- u8 msb, lsb;
+ u16 val;
int ret;
/*
* For each ADC channel, we have MSB and LSB register pair. MSB address
* is always LSB address+1. reg parameter is the address of LSB register
*/
- ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &msb, reg + 1);
+ ret = twl_i2c_read_u16(TWL4030_MODULE_MADC, &val, reg);
if (ret) {
- dev_err(madc->dev, "unable to read MSB register 0x%X\n",
- reg + 1);
- return ret;
- }
- ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &lsb, reg);
- if (ret) {
- dev_err(madc->dev, "unable to read LSB register 0x%X\n", reg);
+ dev_err(madc->dev, "unable to read register 0x%X\n", reg);
return ret;
}
- return (int)(((msb << 8) | lsb) >> 6);
+ return (int)(val >> 6);
}
/*
- * Return battery temperature
+ * Return battery temperature in degrees Celsius
* Or < 0 on failure.
*/
static int twl4030battery_temperature(int raw_volt)
@@ -172,18 +229,18 @@ static int twl4030battery_temperature(int raw_volt)
int temp, curr, volt, res, ret;
volt = (raw_volt * TEMP_STEP_SIZE) / TEMP_PSR_R;
- /* Getting and calculating the supply current in micro ampers */
+ /* Getting and calculating the supply current in micro amperes */
ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, &val,
REG_BCICTL2);
if (ret < 0)
return ret;
+
curr = ((val & TWL4030_BCI_ITHEN) + 1) * 10;
/* Getting and calculating the thermistor resistance in ohms */
res = volt * 1000 / curr;
/* calculating temperature */
for (temp = 58; temp >= 0; temp--) {
- int actual = therm_tbl[temp];
-
+ int actual = twl4030_therm_tbl[temp];
if ((actual - res) >= 0)
break;
}
@@ -205,11 +262,12 @@ static int twl4030battery_current(int raw_volt)
else /* slope of 0.88 mV/mA */
return (raw_volt * CURR_STEP_SIZE) / CURR_PSR_R2;
}
+
/*
* Function to read channel values
* @madc - pointer to twl4030_madc_data struct
* @reg_base - Base address of the first channel
- * @Channels - 16 bit bitmap. If the bit is set, channel value is read
+ * @Channels - 16 bit bitmap. If the bit is set, channel's value is read
* @buf - The channel values are stored here. if read fails error
* @raw - Return raw values without conversion
* value is stored
@@ -220,17 +278,17 @@ static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
long channels, int *buf,
bool raw)
{
- int count = 0, count_req = 0, i;
+ int count = 0;
+ int i;
u8 reg;
for_each_set_bit(i, &channels, TWL4030_MADC_MAX_CHANNELS) {
- reg = reg_base + 2 * i;
+ reg = reg_base + (2 * i);
buf[i] = twl4030_madc_channel_raw_read(madc, reg);
if (buf[i] < 0) {
- dev_err(madc->dev,
- "Unable to read register 0x%X\n", reg);
- count_req++;
- continue;
+ dev_err(madc->dev, "Unable to read register 0x%X\n",
+ reg);
+ return buf[i];
}
if (raw) {
count++;
@@ -241,7 +299,7 @@ static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
buf[i] = twl4030battery_current(buf[i]);
if (buf[i] < 0) {
dev_err(madc->dev, "err reading current\n");
- count_req++;
+ return buf[i];
} else {
count++;
buf[i] = buf[i] - 750;
@@ -251,7 +309,7 @@ static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
buf[i] = twl4030battery_temperature(buf[i]);
if (buf[i] < 0) {
dev_err(madc->dev, "err reading temperature\n");
- count_req++;
+ return buf[i];
} else {
buf[i] -= 3;
count++;
@@ -272,8 +330,6 @@ static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
twl4030_divider_ratios[i].numerator);
}
}
- if (count_req)
- dev_err(madc->dev, "%d channel conversion failed\n", count_req);
return count;
}
@@ -297,13 +353,13 @@ static int twl4030_madc_enable_irq(struct twl4030_madc_data *madc, u8 id)
madc->imr);
return ret;
}
+
val &= ~(1 << id);
ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, madc->imr);
if (ret) {
dev_err(madc->dev,
"unable to write imr register 0x%X\n", madc->imr);
return ret;
-
}
return 0;
@@ -366,7 +422,7 @@ static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
continue;
ret = twl4030_madc_disable_irq(madc, i);
if (ret < 0)
- dev_dbg(madc->dev, "Disable interrupt failed%d\n", i);
+ dev_dbg(madc->dev, "Disable interrupt failed %d\n", i);
madc->requests[i].result_pending = 1;
}
for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
@@ -448,21 +504,17 @@ static int twl4030_madc_start_conversion(struct twl4030_madc_data *madc,
{
const struct twl4030_madc_conversion_method *method;
int ret = 0;
+
+ if (conv_method != TWL4030_MADC_SW1 && conv_method != TWL4030_MADC_SW2)
+ return -ENOTSUPP;
+
method = &twl4030_conversion_methods[conv_method];
- switch (conv_method) {
- case TWL4030_MADC_SW1:
- case TWL4030_MADC_SW2:
- ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
- TWL4030_MADC_SW_START, method->ctrl);
- if (ret) {
- dev_err(madc->dev,
- "unable to write ctrl register 0x%X\n",
- method->ctrl);
- return ret;
- }
- break;
- default:
- break;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, TWL4030_MADC_SW_START,
+ method->ctrl);
+ if (ret) {
+ dev_err(madc->dev, "unable to write ctrl register 0x%X\n",
+ method->ctrl);
+ return ret;
}
return 0;
@@ -513,7 +565,6 @@ static int twl4030_madc_wait_conversion_ready(struct twl4030_madc_data *madc,
int twl4030_madc_conversion(struct twl4030_madc_request *req)
{
const struct twl4030_madc_conversion_method *method;
- u8 ch_msb, ch_lsb;
int ret;
if (!req || !twl4030_madc)
@@ -529,38 +580,22 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
ret = -EBUSY;
goto out;
}
- ch_msb = (req->channels >> 8) & 0xff;
- ch_lsb = req->channels & 0xff;
method = &twl4030_conversion_methods[req->method];
/* Select channels to be converted */
- ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_msb, method->sel + 1);
- if (ret) {
- dev_err(twl4030_madc->dev,
- "unable to write sel register 0x%X\n", method->sel + 1);
- goto out;
- }
- ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel);
+ ret = twl_i2c_write_u16(TWL4030_MODULE_MADC, req->channels, method->sel);
if (ret) {
dev_err(twl4030_madc->dev,
- "unable to write sel register 0x%X\n", method->sel + 1);
+ "unable to write sel register 0x%X\n", method->sel);
goto out;
}
/* Select averaging for all channels if do_avg is set */
if (req->do_avg) {
- ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
- ch_msb, method->avg + 1);
+ ret = twl_i2c_write_u16(TWL4030_MODULE_MADC, req->channels,
+ method->avg);
if (ret) {
dev_err(twl4030_madc->dev,
"unable to write avg register 0x%X\n",
- method->avg + 1);
- goto out;
- }
- ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
- ch_lsb, method->avg);
- if (ret) {
- dev_err(twl4030_madc->dev,
- "unable to write sel reg 0x%X\n",
- method->sel + 1);
+ method->avg);
goto out;
}
}
@@ -601,10 +636,6 @@ out:
}
EXPORT_SYMBOL_GPL(twl4030_madc_conversion);
-/*
- * Return channel value
- * Or < 0 on failure.
- */
int twl4030_get_madc_conversion(int channel_no)
{
struct twl4030_madc_request req;
@@ -625,20 +656,25 @@ int twl4030_get_madc_conversion(int channel_no)
}
EXPORT_SYMBOL_GPL(twl4030_get_madc_conversion);
-/*
+/**
+ * twl4030_madc_set_current_generator() - setup bias current
+ *
+ * @madc: pointer to twl4030_madc_data struct
+ * @chan: can be one of the two values:
+ * TWL4030_BCI_ITHEN
+ * Enables bias current for main battery type reading
+ * TWL4030_BCI_TYPEN
+ * Enables bias current for main battery temperature sensing
+ * @on: enable or disable chan.
+ *
* Function to enable or disable bias current for
* main battery type reading or temperature sensing
- * @madc - pointer to twl4030_madc_data struct
- * @chan - can be one of the two values
- * TWL4030_BCI_ITHEN - Enables bias current for main battery type reading
- * TWL4030_BCI_TYPEN - Enables bias current for main battery temperature
- * sensing
- * @on - enable or disable chan.
*/
static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
int chan, int on)
{
int ret;
+ int regmask;
u8 regval;
ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
@@ -648,10 +684,13 @@ static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
TWL4030_BCI_BCICTL1);
return ret;
}
+
+ regmask = chan ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
if (on)
- regval |= chan ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
+ regval |= regmask;
else
- regval &= chan ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN;
+ regval &= ~regmask;
+
ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
regval, TWL4030_BCI_BCICTL1);
if (ret) {
@@ -666,7 +705,7 @@ static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
/*
* Function that sets MADC software power on bit to enable MADC
* @madc - pointer to twl4030_madc_data struct
- * @on - Enable or disable MADC software powen on bit.
+ * @on - Enable or disable MADC software power on bit.
* returns error if i2c read/write fails else 0
*/
static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
@@ -702,31 +741,52 @@ static int twl4030_madc_probe(struct platform_device *pdev)
{
struct twl4030_madc_data *madc;
struct twl4030_madc_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret;
+ struct device_node *np = pdev->dev.of_node;
+ int irq, ret;
u8 regval;
+ struct iio_dev *iio_dev = NULL;
- if (!pdata) {
- dev_err(&pdev->dev, "platform_data not available\n");
+ if (!pdata && !np) {
+ dev_err(&pdev->dev, "neither platform data nor Device Tree node available\n");
return -EINVAL;
}
- madc = kzalloc(sizeof(*madc), GFP_KERNEL);
- if (!madc)
+
+ iio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*madc));
+ if (!iio_dev) {
+ dev_err(&pdev->dev, "failed allocating iio device\n");
return -ENOMEM;
+ }
+ madc = iio_priv(iio_dev);
madc->dev = &pdev->dev;
+ iio_dev->name = dev_name(&pdev->dev);
+ iio_dev->dev.parent = &pdev->dev;
+ iio_dev->dev.of_node = pdev->dev.of_node;
+ iio_dev->info = &twl4030_madc_iio_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+ iio_dev->channels = twl4030_madc_iio_channels;
+ iio_dev->num_channels = ARRAY_SIZE(twl4030_madc_iio_channels);
+
/*
* Phoenix provides 2 interrupt lines. The first one is connected to
* the OMAP. The other one can be connected to the other processor such
* as modem. Hence two separate ISR and IMR registers.
*/
- madc->imr = (pdata->irq_line == 1) ?
- TWL4030_MADC_IMR1 : TWL4030_MADC_IMR2;
- madc->isr = (pdata->irq_line == 1) ?
- TWL4030_MADC_ISR1 : TWL4030_MADC_ISR2;
+ if (pdata)
+ madc->use_second_irq = (pdata->irq_line != 1);
+ else
+ madc->use_second_irq = of_property_read_bool(np,
+ "ti,system-uses-second-madc-irq");
+
+ madc->imr = madc->use_second_irq ? TWL4030_MADC_IMR2 :
+ TWL4030_MADC_IMR1;
+ madc->isr = madc->use_second_irq ? TWL4030_MADC_ISR2 :
+ TWL4030_MADC_ISR1;
+
ret = twl4030_madc_set_power(madc, 1);
if (ret < 0)
- goto err_power;
+ return ret;
ret = twl4030_madc_set_current_generator(madc, 0, 1);
if (ret < 0)
goto err_current_generator;
@@ -768,46 +828,63 @@ static int twl4030_madc_probe(struct platform_device *pdev)
}
}
- platform_set_drvdata(pdev, madc);
+ platform_set_drvdata(pdev, iio_dev);
mutex_init(&madc->lock);
- ret = request_threaded_irq(platform_get_irq(pdev, 0), NULL,
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
twl4030_madc_threaded_irq_handler,
IRQF_TRIGGER_RISING, "twl4030_madc", madc);
if (ret) {
- dev_dbg(&pdev->dev, "could not request irq\n");
+ dev_err(&pdev->dev, "could not request irq\n");
goto err_i2c;
}
twl4030_madc = madc;
+
+ ret = iio_device_register(iio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register iio device\n");
+ goto err_i2c;
+ }
+
return 0;
+
err_i2c:
twl4030_madc_set_current_generator(madc, 0, 0);
err_current_generator:
twl4030_madc_set_power(madc, 0);
-err_power:
- kfree(madc);
-
return ret;
}
static int twl4030_madc_remove(struct platform_device *pdev)
{
- struct twl4030_madc_data *madc = platform_get_drvdata(pdev);
+ struct iio_dev *iio_dev = platform_get_drvdata(pdev);
+ struct twl4030_madc_data *madc = iio_priv(iio_dev);
+
+ iio_device_unregister(iio_dev);
- free_irq(platform_get_irq(pdev, 0), madc);
twl4030_madc_set_current_generator(madc, 0, 0);
twl4030_madc_set_power(madc, 0);
- kfree(madc);
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id twl_madc_of_match[] = {
+ { .compatible = "ti,twl4030-madc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl_madc_of_match);
+#endif
+
static struct platform_driver twl4030_madc_driver = {
.probe = twl4030_madc_probe,
.remove = twl4030_madc_remove,
.driver = {
.name = "twl4030_madc",
.owner = THIS_MODULE,
- },
+ .of_match_table = of_match_ptr(twl_madc_of_match),
+ },
};
module_platform_driver(twl4030_madc_driver);
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index 53a24ebb92c3..15282f148b3b 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -28,7 +28,6 @@
* 02110-1301 USA
*
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
new file mode 100644
index 000000000000..44799eb5930e
--- /dev/null
+++ b/drivers/iio/adc/vf610_adc.c
@@ -0,0 +1,711 @@
+/*
+ * Freescale Vybrid vf610 ADC driver
+ *
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of_platform.h>
+#include <linux/err.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/driver.h>
+
+/* This will be the driver name the kernel reports */
+#define DRIVER_NAME "vf610-adc"
+
+/* Vybrid/IMX ADC registers */
+#define VF610_REG_ADC_HC0 0x00
+#define VF610_REG_ADC_HC1 0x04
+#define VF610_REG_ADC_HS 0x08
+#define VF610_REG_ADC_R0 0x0c
+#define VF610_REG_ADC_R1 0x10
+#define VF610_REG_ADC_CFG 0x14
+#define VF610_REG_ADC_GC 0x18
+#define VF610_REG_ADC_GS 0x1c
+#define VF610_REG_ADC_CV 0x20
+#define VF610_REG_ADC_OFS 0x24
+#define VF610_REG_ADC_CAL 0x28
+#define VF610_REG_ADC_PCTL 0x30
+
+/* Configuration register field define */
+#define VF610_ADC_MODE_BIT8 0x00
+#define VF610_ADC_MODE_BIT10 0x04
+#define VF610_ADC_MODE_BIT12 0x08
+#define VF610_ADC_MODE_MASK 0x0c
+#define VF610_ADC_BUSCLK2_SEL 0x01
+#define VF610_ADC_ALTCLK_SEL 0x02
+#define VF610_ADC_ADACK_SEL 0x03
+#define VF610_ADC_ADCCLK_MASK 0x03
+#define VF610_ADC_CLK_DIV2 0x20
+#define VF610_ADC_CLK_DIV4 0x40
+#define VF610_ADC_CLK_DIV8 0x60
+#define VF610_ADC_CLK_MASK 0x60
+#define VF610_ADC_ADLSMP_LONG 0x10
+#define VF610_ADC_ADSTS_MASK 0x300
+#define VF610_ADC_ADLPC_EN 0x80
+#define VF610_ADC_ADHSC_EN 0x400
+#define VF610_ADC_REFSEL_VALT 0x100
+#define VF610_ADC_REFSEL_VBG 0x1000
+#define VF610_ADC_ADTRG_HARD 0x2000
+#define VF610_ADC_AVGS_8 0x4000
+#define VF610_ADC_AVGS_16 0x8000
+#define VF610_ADC_AVGS_32 0xC000
+#define VF610_ADC_AVGS_MASK 0xC000
+#define VF610_ADC_OVWREN 0x10000
+
+/* General control register field define */
+#define VF610_ADC_ADACKEN 0x1
+#define VF610_ADC_DMAEN 0x2
+#define VF610_ADC_ACREN 0x4
+#define VF610_ADC_ACFGT 0x8
+#define VF610_ADC_ACFE 0x10
+#define VF610_ADC_AVGEN 0x20
+#define VF610_ADC_ADCON 0x40
+#define VF610_ADC_CAL 0x80
+
+/* Other field define */
+#define VF610_ADC_ADCHC(x) ((x) & 0xF)
+#define VF610_ADC_AIEN (0x1 << 7)
+#define VF610_ADC_CONV_DISABLE 0x1F
+#define VF610_ADC_HS_COCO0 0x1
+#define VF610_ADC_CALF 0x2
+#define VF610_ADC_TIMEOUT msecs_to_jiffies(100)
+
+enum clk_sel {
+ VF610_ADCIOC_BUSCLK_SET,
+ VF610_ADCIOC_ALTCLK_SET,
+ VF610_ADCIOC_ADACK_SET,
+};
+
+enum vol_ref {
+ VF610_ADCIOC_VR_VREF_SET,
+ VF610_ADCIOC_VR_VALT_SET,
+ VF610_ADCIOC_VR_VBG_SET,
+};
+
+enum average_sel {
+ VF610_ADC_SAMPLE_1,
+ VF610_ADC_SAMPLE_4,
+ VF610_ADC_SAMPLE_8,
+ VF610_ADC_SAMPLE_16,
+ VF610_ADC_SAMPLE_32,
+};
+
+struct vf610_adc_feature {
+ enum clk_sel clk_sel;
+ enum vol_ref vol_ref;
+
+ int clk_div;
+ int sample_rate;
+ int res_mode;
+
+ bool lpm;
+ bool calibration;
+ bool ovwren;
+};
+
+struct vf610_adc {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+
+ u32 vref_uv;
+ u32 value;
+ struct regulator *vref;
+ struct vf610_adc_feature adc_feature;
+
+ struct completion completion;
+};
+
+#define VF610_ADC_CHAN(_idx, _chan_type) { \
+ .type = (_chan_type), \
+ .indexed = 1, \
+ .channel = (_idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+}
+
+static const struct iio_chan_spec vf610_adc_iio_channels[] = {
+ VF610_ADC_CHAN(0, IIO_VOLTAGE),
+ VF610_ADC_CHAN(1, IIO_VOLTAGE),
+ VF610_ADC_CHAN(2, IIO_VOLTAGE),
+ VF610_ADC_CHAN(3, IIO_VOLTAGE),
+ VF610_ADC_CHAN(4, IIO_VOLTAGE),
+ VF610_ADC_CHAN(5, IIO_VOLTAGE),
+ VF610_ADC_CHAN(6, IIO_VOLTAGE),
+ VF610_ADC_CHAN(7, IIO_VOLTAGE),
+ VF610_ADC_CHAN(8, IIO_VOLTAGE),
+ VF610_ADC_CHAN(9, IIO_VOLTAGE),
+ VF610_ADC_CHAN(10, IIO_VOLTAGE),
+ VF610_ADC_CHAN(11, IIO_VOLTAGE),
+ VF610_ADC_CHAN(12, IIO_VOLTAGE),
+ VF610_ADC_CHAN(13, IIO_VOLTAGE),
+ VF610_ADC_CHAN(14, IIO_VOLTAGE),
+ VF610_ADC_CHAN(15, IIO_VOLTAGE),
+ /* sentinel */
+};
+
+/*
+ * ADC sample frequency, unit is ADCK cycles.
+ * ADC clk source is ipg clock, which is the same as bus clock.
+ *
+ * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
+ * SFCAdder: fixed to 6 ADCK cycles
+ * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
+ * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
+ * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
+ *
+ * By default, enable 12 bit resolution mode, clock source
+ * set to ipg clock, So get below frequency group:
+ */
+static const u32 vf610_sample_freq_avail[5] =
+{1941176, 559332, 286957, 145374, 73171};
+
+static inline void vf610_adc_cfg_init(struct vf610_adc *info)
+{
+ /* set default Configuration for ADC controller */
+ info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET;
+ info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET;
+
+ info->adc_feature.calibration = true;
+ info->adc_feature.ovwren = true;
+
+ info->adc_feature.clk_div = 1;
+ info->adc_feature.res_mode = 12;
+ info->adc_feature.sample_rate = 1;
+ info->adc_feature.lpm = true;
+}
+
+static void vf610_adc_cfg_post_set(struct vf610_adc *info)
+{
+ struct vf610_adc_feature *adc_feature = &info->adc_feature;
+ int cfg_data = 0;
+ int gc_data = 0;
+
+ switch (adc_feature->clk_sel) {
+ case VF610_ADCIOC_ALTCLK_SET:
+ cfg_data |= VF610_ADC_ALTCLK_SEL;
+ break;
+ case VF610_ADCIOC_ADACK_SET:
+ cfg_data |= VF610_ADC_ADACK_SEL;
+ break;
+ default:
+ break;
+ }
+
+ /* low power set for calibration */
+ cfg_data |= VF610_ADC_ADLPC_EN;
+
+ /* enable high speed for calibration */
+ cfg_data |= VF610_ADC_ADHSC_EN;
+
+ /* voltage reference */
+ switch (adc_feature->vol_ref) {
+ case VF610_ADCIOC_VR_VREF_SET:
+ break;
+ case VF610_ADCIOC_VR_VALT_SET:
+ cfg_data |= VF610_ADC_REFSEL_VALT;
+ break;
+ case VF610_ADCIOC_VR_VBG_SET:
+ cfg_data |= VF610_ADC_REFSEL_VBG;
+ break;
+ default:
+ dev_err(info->dev, "error voltage reference\n");
+ }
+
+ /* data overwrite enable */
+ if (adc_feature->ovwren)
+ cfg_data |= VF610_ADC_OVWREN;
+
+ writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
+ writel(gc_data, info->regs + VF610_REG_ADC_GC);
+}
+
+static void vf610_adc_calibration(struct vf610_adc *info)
+{
+ int adc_gc, hc_cfg;
+ int timeout;
+
+ if (!info->adc_feature.calibration)
+ return;
+
+ /* enable calibration interrupt */
+ hc_cfg = VF610_ADC_AIEN | VF610_ADC_CONV_DISABLE;
+ writel(hc_cfg, info->regs + VF610_REG_ADC_HC0);
+
+ adc_gc = readl(info->regs + VF610_REG_ADC_GC);
+ writel(adc_gc | VF610_ADC_CAL, info->regs + VF610_REG_ADC_GC);
+
+ timeout = wait_for_completion_timeout
+ (&info->completion, VF610_ADC_TIMEOUT);
+ if (timeout == 0)
+ dev_err(info->dev, "Timeout for adc calibration\n");
+
+ adc_gc = readl(info->regs + VF610_REG_ADC_GS);
+ if (adc_gc & VF610_ADC_CALF)
+ dev_err(info->dev, "ADC calibration failed\n");
+
+ info->adc_feature.calibration = false;
+}
+
+static void vf610_adc_cfg_set(struct vf610_adc *info)
+{
+ struct vf610_adc_feature *adc_feature = &(info->adc_feature);
+ int cfg_data;
+
+ cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
+
+ /* low power configuration */
+ cfg_data &= ~VF610_ADC_ADLPC_EN;
+ if (adc_feature->lpm)
+ cfg_data |= VF610_ADC_ADLPC_EN;
+
+ /* disable high speed */
+ cfg_data &= ~VF610_ADC_ADHSC_EN;
+
+ writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
+}
+
+static void vf610_adc_sample_set(struct vf610_adc *info)
+{
+ struct vf610_adc_feature *adc_feature = &(info->adc_feature);
+ int cfg_data, gc_data;
+
+ cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
+ gc_data = readl(info->regs + VF610_REG_ADC_GC);
+
+ /* resolution mode */
+ cfg_data &= ~VF610_ADC_MODE_MASK;
+ switch (adc_feature->res_mode) {
+ case 8:
+ cfg_data |= VF610_ADC_MODE_BIT8;
+ break;
+ case 10:
+ cfg_data |= VF610_ADC_MODE_BIT10;
+ break;
+ case 12:
+ cfg_data |= VF610_ADC_MODE_BIT12;
+ break;
+ default:
+ dev_err(info->dev, "error resolution mode\n");
+ break;
+ }
+
+ /* clock select and clock divider */
+ cfg_data &= ~(VF610_ADC_CLK_MASK | VF610_ADC_ADCCLK_MASK);
+ switch (adc_feature->clk_div) {
+ case 1:
+ break;
+ case 2:
+ cfg_data |= VF610_ADC_CLK_DIV2;
+ break;
+ case 4:
+ cfg_data |= VF610_ADC_CLK_DIV4;
+ break;
+ case 8:
+ cfg_data |= VF610_ADC_CLK_DIV8;
+ break;
+ case 16:
+ switch (adc_feature->clk_sel) {
+ case VF610_ADCIOC_BUSCLK_SET:
+ cfg_data |= VF610_ADC_BUSCLK2_SEL | VF610_ADC_CLK_DIV8;
+ break;
+ default:
+ dev_err(info->dev, "error clk divider\n");
+ break;
+ }
+ break;
+ }
+
+ /* Use the short sample mode */
+ cfg_data &= ~(VF610_ADC_ADLSMP_LONG | VF610_ADC_ADSTS_MASK);
+
+ /* update hardware average selection */
+ cfg_data &= ~VF610_ADC_AVGS_MASK;
+ gc_data &= ~VF610_ADC_AVGEN;
+ switch (adc_feature->sample_rate) {
+ case VF610_ADC_SAMPLE_1:
+ break;
+ case VF610_ADC_SAMPLE_4:
+ gc_data |= VF610_ADC_AVGEN;
+ break;
+ case VF610_ADC_SAMPLE_8:
+ gc_data |= VF610_ADC_AVGEN;
+ cfg_data |= VF610_ADC_AVGS_8;
+ break;
+ case VF610_ADC_SAMPLE_16:
+ gc_data |= VF610_ADC_AVGEN;
+ cfg_data |= VF610_ADC_AVGS_16;
+ break;
+ case VF610_ADC_SAMPLE_32:
+ gc_data |= VF610_ADC_AVGEN;
+ cfg_data |= VF610_ADC_AVGS_32;
+ break;
+ default:
+ dev_err(info->dev,
+ "error hardware sample average select\n");
+ }
+
+ writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
+ writel(gc_data, info->regs + VF610_REG_ADC_GC);
+}
+
+static void vf610_adc_hw_init(struct vf610_adc *info)
+{
+ /* CFG: Feature set */
+ vf610_adc_cfg_post_set(info);
+ vf610_adc_sample_set(info);
+
+ /* adc calibration */
+ vf610_adc_calibration(info);
+
+ /* CFG: power and speed set */
+ vf610_adc_cfg_set(info);
+}
+
+static int vf610_adc_read_data(struct vf610_adc *info)
+{
+ int result;
+
+ result = readl(info->regs + VF610_REG_ADC_R0);
+
+ switch (info->adc_feature.res_mode) {
+ case 8:
+ result &= 0xFF;
+ break;
+ case 10:
+ result &= 0x3FF;
+ break;
+ case 12:
+ result &= 0xFFF;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
+{
+ struct vf610_adc *info = (struct vf610_adc *)dev_id;
+ int coco;
+
+ coco = readl(info->regs + VF610_REG_ADC_HS);
+ if (coco & VF610_ADC_HS_COCO0) {
+ info->value = vf610_adc_read_data(info);
+ complete(&info->completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171");
+
+static struct attribute *vf610_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group vf610_attribute_group = {
+ .attrs = vf610_attributes,
+};
+
+static int vf610_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct vf610_adc *info = iio_priv(indio_dev);
+ unsigned int hc_cfg;
+ long ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ reinit_completion(&info->completion);
+
+ hc_cfg = VF610_ADC_ADCHC(chan->channel);
+ hc_cfg |= VF610_ADC_AIEN;
+ writel(hc_cfg, info->regs + VF610_REG_ADC_HC0);
+ ret = wait_for_completion_interruptible_timeout
+ (&info->completion, VF610_ADC_TIMEOUT);
+ if (ret == 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return -ETIMEDOUT;
+ }
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+
+ *val = info->value;
+ mutex_unlock(&indio_dev->mlock);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = info->vref_uv / 1000;
+ *val2 = info->adc_feature.res_mode;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = vf610_sample_freq_avail[info->adc_feature.sample_rate];
+ *val2 = 0;
+ return IIO_VAL_INT;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int vf610_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct vf610_adc *info = iio_priv(indio_dev);
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ for (i = 0;
+ i < ARRAY_SIZE(vf610_sample_freq_avail);
+ i++)
+ if (val == vf610_sample_freq_avail[i]) {
+ info->adc_feature.sample_rate = i;
+ vf610_adc_sample_set(info);
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int vf610_adc_reg_access(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval)
+{
+ struct vf610_adc *info = iio_priv(indio_dev);
+
+ if ((readval == NULL) ||
+ (!(reg % 4) || (reg > VF610_REG_ADC_PCTL)))
+ return -EINVAL;
+
+ *readval = readl(info->regs + reg);
+
+ return 0;
+}
+
+static const struct iio_info vf610_adc_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &vf610_read_raw,
+ .write_raw = &vf610_write_raw,
+ .debugfs_reg_access = &vf610_adc_reg_access,
+ .attrs = &vf610_attribute_group,
+};
+
+static const struct of_device_id vf610_adc_match[] = {
+ { .compatible = "fsl,vf610-adc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vf610_adc_match);
+
+static int vf610_adc_probe(struct platform_device *pdev)
+{
+ struct vf610_adc *info;
+ struct iio_dev *indio_dev;
+ struct resource *mem;
+ int irq;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct vf610_adc));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "Failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ info = iio_priv(indio_dev);
+ info->dev = &pdev->dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(info->regs))
+ return PTR_ERR(info->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(info->dev, irq,
+ vf610_adc_isr, 0,
+ dev_name(&pdev->dev), info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed requesting irq, irq = %d\n", irq);
+ return ret;
+ }
+
+ info->clk = devm_clk_get(&pdev->dev, "adc");
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed getting clock, err = %ld\n",
+ PTR_ERR(info->clk));
+ ret = PTR_ERR(info->clk);
+ return ret;
+ }
+
+ info->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(info->vref))
+ return PTR_ERR(info->vref);
+
+ ret = regulator_enable(info->vref);
+ if (ret)
+ return ret;
+
+ info->vref_uv = regulator_get_voltage(info->vref);
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ init_completion(&info->completion);
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &vf610_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = vf610_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vf610_adc_iio_channels);
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not prepare or enable the clock.\n");
+ goto error_adc_clk_enable;
+ }
+
+ vf610_adc_cfg_init(info);
+ vf610_adc_hw_init(info);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register the device.\n");
+ goto error_iio_device_register;
+ }
+
+ return 0;
+
+
+error_iio_device_register:
+ clk_disable_unprepare(info->clk);
+error_adc_clk_enable:
+ regulator_disable(info->vref);
+
+ return ret;
+}
+
+static int vf610_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct vf610_adc *info = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(info->vref);
+ clk_disable_unprepare(info->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vf610_adc_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct vf610_adc *info = iio_priv(indio_dev);
+ int hc_cfg;
+
+ /* ADC controller enters to stop mode */
+ hc_cfg = readl(info->regs + VF610_REG_ADC_HC0);
+ hc_cfg |= VF610_ADC_CONV_DISABLE;
+ writel(hc_cfg, info->regs + VF610_REG_ADC_HC0);
+
+ clk_disable_unprepare(info->clk);
+ regulator_disable(info->vref);
+
+ return 0;
+}
+
+static int vf610_adc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct vf610_adc *info = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(info->vref);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret)
+ return ret;
+
+ vf610_adc_hw_init(info);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(vf610_adc_pm_ops,
+ vf610_adc_suspend,
+ vf610_adc_resume);
+
+static struct platform_driver vf610_adc_driver = {
+ .probe = vf610_adc_probe,
+ .remove = vf610_adc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = vf610_adc_match,
+ .pm = &vf610_adc_pm_ops,
+ },
+};
+
+module_platform_driver(vf610_adc_driver);
+
+MODULE_AUTHOR("Fugang Duan <B38611@freescale.com>");
+MODULE_DESCRIPTION("Freescale VF610 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
index d0add8f9416b..9acf6b6d705b 100644
--- a/drivers/iio/adc/viperboard_adc.c
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -139,8 +139,6 @@ static int vprbrd_adc_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, indio_dev);
-
return 0;
}
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
new file mode 100644
index 000000000000..ab52be29141b
--- /dev/null
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -0,0 +1,1333 @@
+/*
+ * Xilinx XADC driver
+ *
+ * Copyright 2013-2014 Analog Devices Inc.
+ * Author: Lars-Peter Clauen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ *
+ * Documentation for the parts can be found at:
+ * - XADC hardmacro: Xilinx UG480
+ * - ZYNQ XADC interface: Xilinx UG585
+ * - AXI XADC interface: Xilinx PG019
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include "xilinx-xadc.h"
+
+static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
+
+/* ZYNQ register definitions */
+#define XADC_ZYNQ_REG_CFG 0x00
+#define XADC_ZYNQ_REG_INTSTS 0x04
+#define XADC_ZYNQ_REG_INTMSK 0x08
+#define XADC_ZYNQ_REG_STATUS 0x0c
+#define XADC_ZYNQ_REG_CFIFO 0x10
+#define XADC_ZYNQ_REG_DFIFO 0x14
+#define XADC_ZYNQ_REG_CTL 0x18
+
+#define XADC_ZYNQ_CFG_ENABLE BIT(31)
+#define XADC_ZYNQ_CFG_CFIFOTH_MASK (0xf << 20)
+#define XADC_ZYNQ_CFG_CFIFOTH_OFFSET 20
+#define XADC_ZYNQ_CFG_DFIFOTH_MASK (0xf << 16)
+#define XADC_ZYNQ_CFG_DFIFOTH_OFFSET 16
+#define XADC_ZYNQ_CFG_WEDGE BIT(13)
+#define XADC_ZYNQ_CFG_REDGE BIT(12)
+#define XADC_ZYNQ_CFG_TCKRATE_MASK (0x3 << 8)
+#define XADC_ZYNQ_CFG_TCKRATE_DIV2 (0x0 << 8)
+#define XADC_ZYNQ_CFG_TCKRATE_DIV4 (0x1 << 8)
+#define XADC_ZYNQ_CFG_TCKRATE_DIV8 (0x2 << 8)
+#define XADC_ZYNQ_CFG_TCKRATE_DIV16 (0x3 << 8)
+#define XADC_ZYNQ_CFG_IGAP_MASK 0x1f
+#define XADC_ZYNQ_CFG_IGAP(x) (x)
+
+#define XADC_ZYNQ_INT_CFIFO_LTH BIT(9)
+#define XADC_ZYNQ_INT_DFIFO_GTH BIT(8)
+#define XADC_ZYNQ_INT_ALARM_MASK 0xff
+#define XADC_ZYNQ_INT_ALARM_OFFSET 0
+
+#define XADC_ZYNQ_STATUS_CFIFO_LVL_MASK (0xf << 16)
+#define XADC_ZYNQ_STATUS_CFIFO_LVL_OFFSET 16
+#define XADC_ZYNQ_STATUS_DFIFO_LVL_MASK (0xf << 12)
+#define XADC_ZYNQ_STATUS_DFIFO_LVL_OFFSET 12
+#define XADC_ZYNQ_STATUS_CFIFOF BIT(11)
+#define XADC_ZYNQ_STATUS_CFIFOE BIT(10)
+#define XADC_ZYNQ_STATUS_DFIFOF BIT(9)
+#define XADC_ZYNQ_STATUS_DFIFOE BIT(8)
+#define XADC_ZYNQ_STATUS_OT BIT(7)
+#define XADC_ZYNQ_STATUS_ALM(x) BIT(x)
+
+#define XADC_ZYNQ_CTL_RESET BIT(4)
+
+#define XADC_ZYNQ_CMD_NOP 0x00
+#define XADC_ZYNQ_CMD_READ 0x01
+#define XADC_ZYNQ_CMD_WRITE 0x02
+
+#define XADC_ZYNQ_CMD(cmd, addr, data) (((cmd) << 26) | ((addr) << 16) | (data))
+
+/* AXI register definitions */
+#define XADC_AXI_REG_RESET 0x00
+#define XADC_AXI_REG_STATUS 0x04
+#define XADC_AXI_REG_ALARM_STATUS 0x08
+#define XADC_AXI_REG_CONVST 0x0c
+#define XADC_AXI_REG_XADC_RESET 0x10
+#define XADC_AXI_REG_GIER 0x5c
+#define XADC_AXI_REG_IPISR 0x60
+#define XADC_AXI_REG_IPIER 0x68
+#define XADC_AXI_ADC_REG_OFFSET 0x200
+
+#define XADC_AXI_RESET_MAGIC 0xa
+#define XADC_AXI_GIER_ENABLE BIT(31)
+
+#define XADC_AXI_INT_EOS BIT(4)
+#define XADC_AXI_INT_ALARM_MASK 0x3c0f
+
+#define XADC_FLAGS_BUFFERED BIT(0)
+
+static void xadc_write_reg(struct xadc *xadc, unsigned int reg,
+ uint32_t val)
+{
+ writel(val, xadc->base + reg);
+}
+
+static void xadc_read_reg(struct xadc *xadc, unsigned int reg,
+ uint32_t *val)
+{
+ *val = readl(xadc->base + reg);
+}
+
+/*
+ * The ZYNQ interface uses two asynchronous FIFOs for communication with the
+ * XADC. Reads and writes to the XADC register are performed by submitting a
+ * request to the command FIFO (CFIFO), once the request has been completed the
+ * result can be read from the data FIFO (DFIFO). The method currently used in
+ * this driver is to submit the request for a read/write operation, then go to
+ * sleep and wait for an interrupt that signals that a response is available in
+ * the data FIFO.
+ */
+
+static void xadc_zynq_write_fifo(struct xadc *xadc, uint32_t *cmd,
+ unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++)
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CFIFO, cmd[i]);
+}
+
+static void xadc_zynq_drain_fifo(struct xadc *xadc)
+{
+ uint32_t status, tmp;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_STATUS, &status);
+
+ while (!(status & XADC_ZYNQ_STATUS_DFIFOE)) {
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &tmp);
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_STATUS, &status);
+ }
+}
+
+static void xadc_zynq_update_intmsk(struct xadc *xadc, unsigned int mask,
+ unsigned int val)
+{
+ xadc->zynq_intmask &= ~mask;
+ xadc->zynq_intmask |= val;
+
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTMSK,
+ xadc->zynq_intmask | xadc->zynq_masked_alarm);
+}
+
+static int xadc_zynq_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ uint32_t cmd[1];
+ uint32_t tmp;
+ int ret;
+
+ spin_lock_irq(&xadc->lock);
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
+ XADC_ZYNQ_INT_DFIFO_GTH);
+
+ reinit_completion(&xadc->completion);
+
+ cmd[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_WRITE, reg, val);
+ xadc_zynq_write_fifo(xadc, cmd, ARRAY_SIZE(cmd));
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_CFG, &tmp);
+ tmp &= ~XADC_ZYNQ_CFG_DFIFOTH_MASK;
+ tmp |= 0 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET;
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, tmp);
+
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH, 0);
+ spin_unlock_irq(&xadc->lock);
+
+ ret = wait_for_completion_interruptible_timeout(&xadc->completion, HZ);
+ if (ret == 0)
+ ret = -EIO;
+ else
+ ret = 0;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &tmp);
+
+ return ret;
+}
+
+static int xadc_zynq_read_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ uint32_t cmd[2];
+ uint32_t resp, tmp;
+ int ret;
+
+ cmd[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_READ, reg, 0);
+ cmd[1] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_NOP, 0, 0);
+
+ spin_lock_irq(&xadc->lock);
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
+ XADC_ZYNQ_INT_DFIFO_GTH);
+ xadc_zynq_drain_fifo(xadc);
+ reinit_completion(&xadc->completion);
+
+ xadc_zynq_write_fifo(xadc, cmd, ARRAY_SIZE(cmd));
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_CFG, &tmp);
+ tmp &= ~XADC_ZYNQ_CFG_DFIFOTH_MASK;
+ tmp |= 1 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET;
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, tmp);
+
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH, 0);
+ spin_unlock_irq(&xadc->lock);
+ ret = wait_for_completion_interruptible_timeout(&xadc->completion, HZ);
+ if (ret == 0)
+ ret = -EIO;
+ if (ret < 0)
+ return ret;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &resp);
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &resp);
+
+ *val = resp & 0xffff;
+
+ return 0;
+}
+
+static unsigned int xadc_zynq_transform_alarm(unsigned int alarm)
+{
+ return ((alarm & 0x80) >> 4) |
+ ((alarm & 0x78) << 1) |
+ (alarm & 0x07);
+}
+
+/*
+ * The ZYNQ threshold interrupts are level sensitive. Since we can't make the
+ * threshold condition go way from within the interrupt handler, this means as
+ * soon as a threshold condition is present we would enter the interrupt handler
+ * again and again. To work around this we mask all active thresholds interrupts
+ * in the interrupt handler and start a timer. In this timer we poll the
+ * interrupt status and only if the interrupt is inactive we unmask it again.
+ */
+static void xadc_zynq_unmask_worker(struct work_struct *work)
+{
+ struct xadc *xadc = container_of(work, struct xadc, zynq_unmask_work.work);
+ unsigned int misc_sts, unmask;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_STATUS, &misc_sts);
+
+ misc_sts &= XADC_ZYNQ_INT_ALARM_MASK;
+
+ spin_lock_irq(&xadc->lock);
+
+ /* Clear those bits which are not active anymore */
+ unmask = (xadc->zynq_masked_alarm ^ misc_sts) & xadc->zynq_masked_alarm;
+ xadc->zynq_masked_alarm &= misc_sts;
+
+ /* Also clear those which are masked out anyway */
+ xadc->zynq_masked_alarm &= ~xadc->zynq_intmask;
+
+ /* Clear the interrupts before we unmask them */
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, unmask);
+
+ xadc_zynq_update_intmsk(xadc, 0, 0);
+
+ spin_unlock_irq(&xadc->lock);
+
+ /* if still pending some alarm re-trigger the timer */
+ if (xadc->zynq_masked_alarm) {
+ schedule_delayed_work(&xadc->zynq_unmask_work,
+ msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
+ }
+}
+
+static irqreturn_t xadc_zynq_threaded_interrupt_handler(int irq, void *devid)
+{
+ struct iio_dev *indio_dev = devid;
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int alarm;
+
+ spin_lock_irq(&xadc->lock);
+ alarm = xadc->zynq_alarm;
+ xadc->zynq_alarm = 0;
+ spin_unlock_irq(&xadc->lock);
+
+ xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
+
+ /* unmask the required interrupts in timer. */
+ schedule_delayed_work(&xadc->zynq_unmask_work,
+ msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
+{
+ struct iio_dev *indio_dev = devid;
+ struct xadc *xadc = iio_priv(indio_dev);
+ irqreturn_t ret = IRQ_HANDLED;
+ uint32_t status;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
+
+ status &= ~(xadc->zynq_intmask | xadc->zynq_masked_alarm);
+
+ if (!status)
+ return IRQ_NONE;
+
+ spin_lock(&xadc->lock);
+
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status);
+
+ if (status & XADC_ZYNQ_INT_DFIFO_GTH) {
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
+ XADC_ZYNQ_INT_DFIFO_GTH);
+ complete(&xadc->completion);
+ }
+
+ status &= XADC_ZYNQ_INT_ALARM_MASK;
+ if (status) {
+ xadc->zynq_alarm |= status;
+ xadc->zynq_masked_alarm |= status;
+ /*
+ * mask the current event interrupt,
+ * unmask it when the interrupt is no more active.
+ */
+ xadc_zynq_update_intmsk(xadc, 0, 0);
+ ret = IRQ_WAKE_THREAD;
+ }
+ spin_unlock(&xadc->lock);
+
+ return ret;
+}
+
+#define XADC_ZYNQ_TCK_RATE_MAX 50000000
+#define XADC_ZYNQ_IGAP_DEFAULT 20
+
+static int xadc_zynq_setup(struct platform_device *pdev,
+ struct iio_dev *indio_dev, int irq)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned long pcap_rate;
+ unsigned int tck_div;
+ unsigned int div;
+ unsigned int igap;
+ unsigned int tck_rate;
+
+ /* TODO: Figure out how to make igap and tck_rate configurable */
+ igap = XADC_ZYNQ_IGAP_DEFAULT;
+ tck_rate = XADC_ZYNQ_TCK_RATE_MAX;
+
+ xadc->zynq_intmask = ~0;
+
+ pcap_rate = clk_get_rate(xadc->clk);
+
+ if (tck_rate > XADC_ZYNQ_TCK_RATE_MAX)
+ tck_rate = XADC_ZYNQ_TCK_RATE_MAX;
+ if (tck_rate > pcap_rate / 2) {
+ div = 2;
+ } else {
+ div = pcap_rate / tck_rate;
+ if (pcap_rate / div > XADC_ZYNQ_TCK_RATE_MAX)
+ div++;
+ }
+
+ if (div <= 3)
+ tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV2;
+ else if (div <= 7)
+ tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV4;
+ else if (div <= 15)
+ tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV8;
+ else
+ tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV16;
+
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CTL, XADC_ZYNQ_CTL_RESET);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CTL, 0);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, ~0);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTMSK, xadc->zynq_intmask);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, XADC_ZYNQ_CFG_ENABLE |
+ XADC_ZYNQ_CFG_REDGE | XADC_ZYNQ_CFG_WEDGE |
+ tck_div | XADC_ZYNQ_CFG_IGAP(igap));
+
+ return 0;
+}
+
+static unsigned long xadc_zynq_get_dclk_rate(struct xadc *xadc)
+{
+ unsigned int div;
+ uint32_t val;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_CFG, &val);
+
+ switch (val & XADC_ZYNQ_CFG_TCKRATE_MASK) {
+ case XADC_ZYNQ_CFG_TCKRATE_DIV4:
+ div = 4;
+ break;
+ case XADC_ZYNQ_CFG_TCKRATE_DIV8:
+ div = 8;
+ break;
+ case XADC_ZYNQ_CFG_TCKRATE_DIV16:
+ div = 16;
+ break;
+ default:
+ div = 2;
+ break;
+ }
+
+ return clk_get_rate(xadc->clk) / div;
+}
+
+static void xadc_zynq_update_alarm(struct xadc *xadc, unsigned int alarm)
+{
+ unsigned long flags;
+ uint32_t status;
+
+ /* Move OT to bit 7 */
+ alarm = ((alarm & 0x08) << 4) | ((alarm & 0xf0) >> 1) | (alarm & 0x07);
+
+ spin_lock_irqsave(&xadc->lock, flags);
+
+ /* Clear previous interrupts if any. */
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status & alarm);
+
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_ALARM_MASK,
+ ~alarm & XADC_ZYNQ_INT_ALARM_MASK);
+
+ spin_unlock_irqrestore(&xadc->lock, flags);
+}
+
+static const struct xadc_ops xadc_zynq_ops = {
+ .read = xadc_zynq_read_adc_reg,
+ .write = xadc_zynq_write_adc_reg,
+ .setup = xadc_zynq_setup,
+ .get_dclk_rate = xadc_zynq_get_dclk_rate,
+ .interrupt_handler = xadc_zynq_interrupt_handler,
+ .threaded_interrupt_handler = xadc_zynq_threaded_interrupt_handler,
+ .update_alarm = xadc_zynq_update_alarm,
+};
+
+static int xadc_axi_read_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ uint32_t val32;
+
+ xadc_read_reg(xadc, XADC_AXI_ADC_REG_OFFSET + reg * 4, &val32);
+ *val = val32 & 0xffff;
+
+ return 0;
+}
+
+static int xadc_axi_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ xadc_write_reg(xadc, XADC_AXI_ADC_REG_OFFSET + reg * 4, val);
+
+ return 0;
+}
+
+static int xadc_axi_setup(struct platform_device *pdev,
+ struct iio_dev *indio_dev, int irq)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+
+ xadc_write_reg(xadc, XADC_AXI_REG_RESET, XADC_AXI_RESET_MAGIC);
+ xadc_write_reg(xadc, XADC_AXI_REG_GIER, XADC_AXI_GIER_ENABLE);
+
+ return 0;
+}
+
+static irqreturn_t xadc_axi_interrupt_handler(int irq, void *devid)
+{
+ struct iio_dev *indio_dev = devid;
+ struct xadc *xadc = iio_priv(indio_dev);
+ uint32_t status, mask;
+ unsigned int events;
+
+ xadc_read_reg(xadc, XADC_AXI_REG_IPISR, &status);
+ xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &mask);
+ status &= mask;
+
+ if (!status)
+ return IRQ_NONE;
+
+ if ((status & XADC_AXI_INT_EOS) && xadc->trigger)
+ iio_trigger_poll(xadc->trigger, 0);
+
+ if (status & XADC_AXI_INT_ALARM_MASK) {
+ /*
+ * The order of the bits in the AXI-XADC status register does
+ * not match the order of the bits in the XADC alarm enable
+ * register. xadc_handle_events() expects the events to be in
+ * the same order as the XADC alarm enable register.
+ */
+ events = (status & 0x000e) >> 1;
+ events |= (status & 0x0001) << 3;
+ events |= (status & 0x3c00) >> 6;
+ xadc_handle_events(indio_dev, events);
+ }
+
+ xadc_write_reg(xadc, XADC_AXI_REG_IPISR, status);
+
+ return IRQ_HANDLED;
+}
+
+static void xadc_axi_update_alarm(struct xadc *xadc, unsigned int alarm)
+{
+ uint32_t val;
+ unsigned long flags;
+
+ /*
+ * The order of the bits in the AXI-XADC status register does not match
+ * the order of the bits in the XADC alarm enable register. We get
+ * passed the alarm mask in the same order as in the XADC alarm enable
+ * register.
+ */
+ alarm = ((alarm & 0x07) << 1) | ((alarm & 0x08) >> 3) |
+ ((alarm & 0xf0) << 6);
+
+ spin_lock_irqsave(&xadc->lock, flags);
+ xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
+ val &= ~XADC_AXI_INT_ALARM_MASK;
+ val |= alarm;
+ xadc_write_reg(xadc, XADC_AXI_REG_IPIER, val);
+ spin_unlock_irqrestore(&xadc->lock, flags);
+}
+
+static unsigned long xadc_axi_get_dclk(struct xadc *xadc)
+{
+ return clk_get_rate(xadc->clk);
+}
+
+static const struct xadc_ops xadc_axi_ops = {
+ .read = xadc_axi_read_adc_reg,
+ .write = xadc_axi_write_adc_reg,
+ .setup = xadc_axi_setup,
+ .get_dclk_rate = xadc_axi_get_dclk,
+ .update_alarm = xadc_axi_update_alarm,
+ .interrupt_handler = xadc_axi_interrupt_handler,
+ .flags = XADC_FLAGS_BUFFERED,
+};
+
+static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t mask, uint16_t val)
+{
+ uint16_t tmp;
+ int ret;
+
+ ret = _xadc_read_adc_reg(xadc, reg, &tmp);
+ if (ret)
+ return ret;
+
+ return _xadc_write_adc_reg(xadc, reg, (tmp & ~mask) | val);
+}
+
+static int xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t mask, uint16_t val)
+{
+ int ret;
+
+ mutex_lock(&xadc->mutex);
+ ret = _xadc_update_adc_reg(xadc, reg, mask, val);
+ mutex_unlock(&xadc->mutex);
+
+ return ret;
+}
+
+static unsigned long xadc_get_dclk_rate(struct xadc *xadc)
+{
+ return xadc->ops->get_dclk_rate(xadc);
+}
+
+static int xadc_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *mask)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int n;
+
+ n = bitmap_weight(mask, indio_dev->masklength);
+
+ kfree(xadc->data);
+ xadc->data = kcalloc(n, sizeof(*xadc->data), GFP_KERNEL);
+ if (!xadc->data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static unsigned int xadc_scan_index_to_channel(unsigned int scan_index)
+{
+ switch (scan_index) {
+ case 5:
+ return XADC_REG_VCCPINT;
+ case 6:
+ return XADC_REG_VCCPAUX;
+ case 7:
+ return XADC_REG_VCCO_DDR;
+ case 8:
+ return XADC_REG_TEMP;
+ case 9:
+ return XADC_REG_VCCINT;
+ case 10:
+ return XADC_REG_VCCAUX;
+ case 11:
+ return XADC_REG_VPVN;
+ case 12:
+ return XADC_REG_VREFP;
+ case 13:
+ return XADC_REG_VREFN;
+ case 14:
+ return XADC_REG_VCCBRAM;
+ default:
+ return XADC_REG_VAUX(scan_index - 16);
+ }
+}
+
+static irqreturn_t xadc_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int chan;
+ int i, j;
+
+ if (!xadc->data)
+ goto out;
+
+ j = 0;
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ chan = xadc_scan_index_to_channel(i);
+ xadc_read_adc_reg(xadc, chan, &xadc->data[j]);
+ j++;
+ }
+
+ iio_push_to_buffers(indio_dev, xadc->data);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
+{
+ struct xadc *xadc = iio_trigger_get_drvdata(trigger);
+ unsigned long flags;
+ unsigned int convst;
+ unsigned int val;
+ int ret = 0;
+
+ mutex_lock(&xadc->mutex);
+
+ if (state) {
+ /* Only one of the two triggers can be active at the a time. */
+ if (xadc->trigger != NULL) {
+ ret = -EBUSY;
+ goto err_out;
+ } else {
+ xadc->trigger = trigger;
+ if (trigger == xadc->convst_trigger)
+ convst = XADC_CONF0_EC;
+ else
+ convst = 0;
+ }
+ ret = _xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF0_EC,
+ convst);
+ if (ret)
+ goto err_out;
+ } else {
+ xadc->trigger = NULL;
+ }
+
+ spin_lock_irqsave(&xadc->lock, flags);
+ xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
+ xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS);
+ if (state)
+ val |= XADC_AXI_INT_EOS;
+ else
+ val &= ~XADC_AXI_INT_EOS;
+ xadc_write_reg(xadc, XADC_AXI_REG_IPIER, val);
+ spin_unlock_irqrestore(&xadc->lock, flags);
+
+err_out:
+ mutex_unlock(&xadc->mutex);
+
+ return ret;
+}
+
+static const struct iio_trigger_ops xadc_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &xadc_trigger_set_state,
+};
+
+static struct iio_trigger *xadc_alloc_trigger(struct iio_dev *indio_dev,
+ const char *name)
+{
+ struct iio_trigger *trig;
+ int ret;
+
+ trig = iio_trigger_alloc("%s%d-%s", indio_dev->name,
+ indio_dev->id, name);
+ if (trig == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ trig->dev.parent = indio_dev->dev.parent;
+ trig->ops = &xadc_trigger_ops;
+ iio_trigger_set_drvdata(trig, iio_priv(indio_dev));
+
+ ret = iio_trigger_register(trig);
+ if (ret)
+ goto error_free_trig;
+
+ return trig;
+
+error_free_trig:
+ iio_trigger_free(trig);
+ return ERR_PTR(ret);
+}
+
+static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
+{
+ uint16_t val;
+
+ switch (seq_mode) {
+ case XADC_CONF1_SEQ_SIMULTANEOUS:
+ case XADC_CONF1_SEQ_INDEPENDENT:
+ val = XADC_CONF2_PD_ADC_B;
+ break;
+ default:
+ val = 0;
+ break;
+ }
+
+ return xadc_update_adc_reg(xadc, XADC_REG_CONF2, XADC_CONF2_PD_MASK,
+ val);
+}
+
+static int xadc_get_seq_mode(struct xadc *xadc, unsigned long scan_mode)
+{
+ unsigned int aux_scan_mode = scan_mode >> 16;
+
+ if (xadc->external_mux_mode == XADC_EXTERNAL_MUX_DUAL)
+ return XADC_CONF1_SEQ_SIMULTANEOUS;
+
+ if ((aux_scan_mode & 0xff00) == 0 ||
+ (aux_scan_mode & 0x00ff) == 0)
+ return XADC_CONF1_SEQ_CONTINUOUS;
+
+ return XADC_CONF1_SEQ_SIMULTANEOUS;
+}
+
+static int xadc_postdisable(struct iio_dev *indio_dev)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned long scan_mask;
+ int ret;
+ int i;
+
+ scan_mask = 1; /* Run calibration as part of the sequence */
+ for (i = 0; i < indio_dev->num_channels; i++)
+ scan_mask |= BIT(indio_dev->channels[i].scan_index);
+
+ /* Enable all channels and calibration */
+ ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(0), scan_mask & 0xffff);
+ if (ret)
+ return ret;
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
+ if (ret)
+ return ret;
+
+ ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_SEQ_MASK,
+ XADC_CONF1_SEQ_CONTINUOUS);
+ if (ret)
+ return ret;
+
+ return xadc_power_adc_b(xadc, XADC_CONF1_SEQ_CONTINUOUS);
+}
+
+static int xadc_preenable(struct iio_dev *indio_dev)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned long scan_mask;
+ int seq_mode;
+ int ret;
+
+ ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_SEQ_MASK,
+ XADC_CONF1_SEQ_DEFAULT);
+ if (ret)
+ goto err;
+
+ scan_mask = *indio_dev->active_scan_mask;
+ seq_mode = xadc_get_seq_mode(xadc, scan_mask);
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(0), scan_mask & 0xffff);
+ if (ret)
+ goto err;
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
+ if (ret)
+ goto err;
+
+ ret = xadc_power_adc_b(xadc, seq_mode);
+ if (ret)
+ goto err;
+
+ ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_SEQ_MASK,
+ seq_mode);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ xadc_postdisable(indio_dev);
+ return ret;
+}
+
+static struct iio_buffer_setup_ops xadc_buffer_ops = {
+ .preenable = &xadc_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+ .postdisable = &xadc_postdisable,
+};
+
+static int xadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int div;
+ uint16_t val16;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+ ret = xadc_read_adc_reg(xadc, chan->address, &val16);
+ if (ret < 0)
+ return ret;
+
+ val16 >>= 4;
+ if (chan->scan_type.sign == 'u')
+ *val = val16;
+ else
+ *val = sign_extend32(val16, 11);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ /* V = (val * 3.0) / 4096 */
+ switch (chan->address) {
+ case XADC_REG_VCCINT:
+ case XADC_REG_VCCAUX:
+ case XADC_REG_VCCBRAM:
+ case XADC_REG_VCCPINT:
+ case XADC_REG_VCCPAUX:
+ case XADC_REG_VCCO_DDR:
+ *val = 3000;
+ break;
+ default:
+ *val = 1000;
+ break;
+ }
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ /* Temp in C = (val * 503.975) / 4096 - 273.15 */
+ *val = 503975;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has an offset */
+ *val = -((273150 << 12) / 503975);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
+ if (ret)
+ return ret;
+
+ div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
+ if (div < 2)
+ div = 2;
+
+ *val = xadc_get_dclk_rate(xadc) / div / 26;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int xadc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned long clk_rate = xadc_get_dclk_rate(xadc);
+ unsigned int div;
+
+ if (info != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ if (val <= 0)
+ return -EINVAL;
+
+ /* Max. 150 kSPS */
+ if (val > 150000)
+ val = 150000;
+
+ val *= 26;
+
+ /* Min 1MHz */
+ if (val < 1000000)
+ val = 1000000;
+
+ /*
+ * We want to round down, but only if we do not exceed the 150 kSPS
+ * limit.
+ */
+ div = clk_rate / val;
+ if (clk_rate / div / 26 > 150000)
+ div++;
+ if (div < 2)
+ div = 2;
+ else if (div > 0xff)
+ div = 0xff;
+
+ return xadc_update_adc_reg(xadc, XADC_REG_CONF2, XADC_CONF2_DIV_MASK,
+ div << XADC_CONF2_DIV_OFFSET);
+}
+
+static const struct iio_event_spec xadc_temp_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
+
+/* Separate values for upper and lower thresholds, but only a shared enabled */
+static const struct iio_event_spec xadc_voltage_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define XADC_CHAN_TEMP(_chan, _scan_index, _addr) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = (_chan), \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .event_spec = xadc_temp_events, \
+ .num_event_specs = ARRAY_SIZE(xadc_temp_events), \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+#define XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_chan), \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .event_spec = (_alarm) ? xadc_voltage_events : NULL, \
+ .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+ .extend_name = _ext, \
+}
+
+static const struct iio_chan_spec xadc_channels[] = {
+ XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP),
+ XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
+ XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCINT, "vccaux", true),
+ XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
+ XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
+ XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
+ XADC_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccoddr", true),
+ XADC_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
+ XADC_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
+ XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
+ XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
+ XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
+ XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
+ XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
+ XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
+ XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
+ XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
+ XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
+ XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
+ XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
+ XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
+ XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
+ XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
+ XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
+ XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
+ XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
+};
+
+static const struct iio_info xadc_info = {
+ .read_raw = &xadc_read_raw,
+ .write_raw = &xadc_write_raw,
+ .read_event_config = &xadc_read_event_config,
+ .write_event_config = &xadc_write_event_config,
+ .read_event_value = &xadc_read_event_value,
+ .write_event_value = &xadc_write_event_value,
+ .update_scan_mode = &xadc_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct of_device_id xadc_of_match_table[] = {
+ { .compatible = "xlnx,zynq-xadc-1.00.a", (void *)&xadc_zynq_ops },
+ { .compatible = "xlnx,axi-xadc-1.00.a", (void *)&xadc_axi_ops },
+ { },
+};
+MODULE_DEVICE_TABLE(of, xadc_of_match_table);
+
+static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
+ unsigned int *conf)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ struct iio_chan_spec *channels, *chan;
+ struct device_node *chan_node, *child;
+ unsigned int num_channels;
+ const char *external_mux;
+ u32 ext_mux_chan;
+ int reg;
+ int ret;
+
+ *conf = 0;
+
+ ret = of_property_read_string(np, "xlnx,external-mux", &external_mux);
+ if (ret < 0 || strcasecmp(external_mux, "none") == 0)
+ xadc->external_mux_mode = XADC_EXTERNAL_MUX_NONE;
+ else if (strcasecmp(external_mux, "single") == 0)
+ xadc->external_mux_mode = XADC_EXTERNAL_MUX_SINGLE;
+ else if (strcasecmp(external_mux, "dual") == 0)
+ xadc->external_mux_mode = XADC_EXTERNAL_MUX_DUAL;
+ else
+ return -EINVAL;
+
+ if (xadc->external_mux_mode != XADC_EXTERNAL_MUX_NONE) {
+ ret = of_property_read_u32(np, "xlnx,external-mux-channel",
+ &ext_mux_chan);
+ if (ret < 0)
+ return ret;
+
+ if (xadc->external_mux_mode == XADC_EXTERNAL_MUX_SINGLE) {
+ if (ext_mux_chan == 0)
+ ext_mux_chan = XADC_REG_VPVN;
+ else if (ext_mux_chan <= 16)
+ ext_mux_chan = XADC_REG_VAUX(ext_mux_chan - 1);
+ else
+ return -EINVAL;
+ } else {
+ if (ext_mux_chan > 0 && ext_mux_chan <= 8)
+ ext_mux_chan = XADC_REG_VAUX(ext_mux_chan - 1);
+ else
+ return -EINVAL;
+ }
+
+ *conf |= XADC_CONF0_MUX | XADC_CONF0_CHAN(ext_mux_chan);
+ }
+
+ channels = kmemdup(xadc_channels, sizeof(xadc_channels), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ num_channels = 9;
+ chan = &channels[9];
+
+ chan_node = of_get_child_by_name(np, "xlnx,channels");
+ if (chan_node) {
+ for_each_child_of_node(chan_node, child) {
+ if (num_channels >= ARRAY_SIZE(xadc_channels)) {
+ of_node_put(child);
+ break;
+ }
+
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret || reg > 16)
+ continue;
+
+ if (of_property_read_bool(child, "xlnx,bipolar"))
+ chan->scan_type.sign = 's';
+
+ if (reg == 0) {
+ chan->scan_index = 11;
+ chan->address = XADC_REG_VPVN;
+ } else {
+ chan->scan_index = 15 + reg;
+ chan->scan_index = XADC_REG_VAUX(reg - 1);
+ }
+ num_channels++;
+ chan++;
+ }
+ }
+ of_node_put(chan_node);
+
+ indio_dev->num_channels = num_channels;
+ indio_dev->channels = krealloc(channels, sizeof(*channels) *
+ num_channels, GFP_KERNEL);
+ /* If we can't resize the channels array, just use the original */
+ if (!indio_dev->channels)
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int xadc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct iio_dev *indio_dev;
+ unsigned int bipolar_mask;
+ struct resource *mem;
+ unsigned int conf0;
+ struct xadc *xadc;
+ int ret;
+ int irq;
+ int i;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ id = of_match_node(xadc_of_match_table, pdev->dev.of_node);
+ if (!id)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*xadc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ xadc = iio_priv(indio_dev);
+ xadc->ops = id->data;
+ init_completion(&xadc->completion);
+ mutex_init(&xadc->mutex);
+ spin_lock_init(&xadc->lock);
+ INIT_DELAYED_WORK(&xadc->zynq_unmask_work, xadc_zynq_unmask_worker);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xadc->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(xadc->base))
+ return PTR_ERR(xadc->base);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->name = "xadc";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &xadc_info;
+
+ ret = xadc_parse_dt(indio_dev, pdev->dev.of_node, &conf0);
+ if (ret)
+ goto err_device_free;
+
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
+ ret = iio_triggered_buffer_setup(indio_dev,
+ &iio_pollfunc_store_time, &xadc_trigger_handler,
+ &xadc_buffer_ops);
+ if (ret)
+ goto err_device_free;
+
+ xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst");
+ if (IS_ERR(xadc->convst_trigger))
+ goto err_triggered_buffer_cleanup;
+ xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev,
+ "samplerate");
+ if (IS_ERR(xadc->samplerate_trigger))
+ goto err_free_convst_trigger;
+ }
+
+ xadc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(xadc->clk)) {
+ ret = PTR_ERR(xadc->clk);
+ goto err_free_samplerate_trigger;
+ }
+ clk_prepare_enable(xadc->clk);
+
+ ret = xadc->ops->setup(pdev, indio_dev, irq);
+ if (ret)
+ goto err_free_samplerate_trigger;
+
+ ret = request_threaded_irq(irq, xadc->ops->interrupt_handler,
+ xadc->ops->threaded_interrupt_handler,
+ 0, dev_name(&pdev->dev), indio_dev);
+ if (ret)
+ goto err_clk_disable_unprepare;
+
+ for (i = 0; i < 16; i++)
+ xadc_read_adc_reg(xadc, XADC_REG_THRESHOLD(i),
+ &xadc->threshold[i]);
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_CONF0, conf0);
+ if (ret)
+ goto err_free_irq;
+
+ bipolar_mask = 0;
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ if (indio_dev->channels[i].scan_type.sign == 's')
+ bipolar_mask |= BIT(indio_dev->channels[i].scan_index);
+ }
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(0), bipolar_mask);
+ if (ret)
+ goto err_free_irq;
+ ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(1),
+ bipolar_mask >> 16);
+ if (ret)
+ goto err_free_irq;
+
+ /* Disable all alarms */
+ xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
+ XADC_CONF1_ALARM_MASK);
+
+ /* Set thresholds to min/max */
+ for (i = 0; i < 16; i++) {
+ /*
+ * Set max voltage threshold and both temperature thresholds to
+ * 0xffff, min voltage threshold to 0.
+ */
+ if (i % 8 < 4 || i == 7)
+ xadc->threshold[i] = 0xffff;
+ else
+ xadc->threshold[i] = 0;
+ xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
+ xadc->threshold[i]);
+ }
+
+ /* Go to non-buffered mode */
+ xadc_postdisable(indio_dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_free_irq;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+err_free_irq:
+ free_irq(irq, indio_dev);
+err_free_samplerate_trigger:
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ iio_trigger_free(xadc->samplerate_trigger);
+err_free_convst_trigger:
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ iio_trigger_free(xadc->convst_trigger);
+err_triggered_buffer_cleanup:
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ iio_triggered_buffer_cleanup(indio_dev);
+err_clk_disable_unprepare:
+ clk_disable_unprepare(xadc->clk);
+err_device_free:
+ kfree(indio_dev->channels);
+
+ return ret;
+}
+
+static int xadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct xadc *xadc = iio_priv(indio_dev);
+ int irq = platform_get_irq(pdev, 0);
+
+ iio_device_unregister(indio_dev);
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
+ iio_trigger_free(xadc->samplerate_trigger);
+ iio_trigger_free(xadc->convst_trigger);
+ iio_triggered_buffer_cleanup(indio_dev);
+ }
+ free_irq(irq, indio_dev);
+ clk_disable_unprepare(xadc->clk);
+ cancel_delayed_work(&xadc->zynq_unmask_work);
+ kfree(xadc->data);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_driver xadc_driver = {
+ .probe = xadc_probe,
+ .remove = xadc_remove,
+ .driver = {
+ .name = "xadc",
+ .owner = THIS_MODULE,
+ .of_match_table = xadc_of_match_table,
+ },
+};
+module_platform_driver(xadc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Xilinx XADC IIO driver");
diff --git a/drivers/iio/adc/xilinx-xadc-events.c b/drivers/iio/adc/xilinx-xadc-events.c
new file mode 100644
index 000000000000..3e7f0d7a80c3
--- /dev/null
+++ b/drivers/iio/adc/xilinx-xadc-events.c
@@ -0,0 +1,254 @@
+/*
+ * Xilinx XADC driver
+ *
+ * Copyright 2013 Analog Devices Inc.
+ * Author: Lars-Peter Clauen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+
+#include "xilinx-xadc.h"
+
+static const struct iio_chan_spec *xadc_event_to_channel(
+ struct iio_dev *indio_dev, unsigned int event)
+{
+ switch (event) {
+ case XADC_THRESHOLD_OT_MAX:
+ case XADC_THRESHOLD_TEMP_MAX:
+ return &indio_dev->channels[0];
+ case XADC_THRESHOLD_VCCINT_MAX:
+ case XADC_THRESHOLD_VCCAUX_MAX:
+ return &indio_dev->channels[event];
+ default:
+ return &indio_dev->channels[event-1];
+ }
+}
+
+static void xadc_handle_event(struct iio_dev *indio_dev, unsigned int event)
+{
+ const struct iio_chan_spec *chan;
+ unsigned int offset;
+
+ /* Temperature threshold error, we don't handle this yet */
+ if (event == 0)
+ return;
+
+ if (event < 4)
+ offset = event;
+ else
+ offset = event + 4;
+
+ chan = xadc_event_to_channel(indio_dev, event);
+
+ if (chan->type == IIO_TEMP) {
+ /*
+ * The temperature channel only supports over-temperature
+ * events.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ } else {
+ /*
+ * For other channels we don't know whether it is a upper or
+ * lower threshold event. Userspace will have to check the
+ * channel value if it wants to know.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER),
+ iio_get_time_ns());
+ }
+}
+
+void xadc_handle_events(struct iio_dev *indio_dev, unsigned long events)
+{
+ unsigned int i;
+
+ for_each_set_bit(i, &events, 8)
+ xadc_handle_event(indio_dev, i);
+}
+
+static unsigned xadc_get_threshold_offset(const struct iio_chan_spec *chan,
+ enum iio_event_direction dir)
+{
+ unsigned int offset;
+
+ if (chan->type == IIO_TEMP) {
+ offset = XADC_THRESHOLD_OT_MAX;
+ } else {
+ if (chan->channel < 2)
+ offset = chan->channel + 1;
+ else
+ offset = chan->channel + 6;
+ }
+
+ if (dir == IIO_EV_DIR_FALLING)
+ offset += 4;
+
+ return offset;
+}
+
+static unsigned int xadc_get_alarm_mask(const struct iio_chan_spec *chan)
+{
+ if (chan->type == IIO_TEMP) {
+ return XADC_ALARM_OT_MASK;
+ } else {
+ switch (chan->channel) {
+ case 0:
+ return XADC_ALARM_VCCINT_MASK;
+ case 1:
+ return XADC_ALARM_VCCAUX_MASK;
+ case 2:
+ return XADC_ALARM_VCCBRAM_MASK;
+ case 3:
+ return XADC_ALARM_VCCPINT_MASK;
+ case 4:
+ return XADC_ALARM_VCCPAUX_MASK;
+ case 5:
+ return XADC_ALARM_VCCODDR_MASK;
+ default:
+ /* We will never get here */
+ return 0;
+ }
+ }
+}
+
+int xadc_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+
+ return (bool)(xadc->alarm_mask & xadc_get_alarm_mask(chan));
+}
+
+int xadc_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ unsigned int alarm = xadc_get_alarm_mask(chan);
+ struct xadc *xadc = iio_priv(indio_dev);
+ uint16_t cfg, old_cfg;
+ int ret;
+
+ mutex_lock(&xadc->mutex);
+
+ if (state)
+ xadc->alarm_mask |= alarm;
+ else
+ xadc->alarm_mask &= ~alarm;
+
+ xadc->ops->update_alarm(xadc, xadc->alarm_mask);
+
+ ret = _xadc_read_adc_reg(xadc, XADC_REG_CONF1, &cfg);
+ if (ret)
+ goto err_out;
+
+ old_cfg = cfg;
+ cfg |= XADC_CONF1_ALARM_MASK;
+ cfg &= ~((xadc->alarm_mask & 0xf0) << 4); /* bram, pint, paux, ddr */
+ cfg &= ~((xadc->alarm_mask & 0x08) >> 3); /* ot */
+ cfg &= ~((xadc->alarm_mask & 0x07) << 1); /* temp, vccint, vccaux */
+ if (old_cfg != cfg)
+ ret = _xadc_write_adc_reg(xadc, XADC_REG_CONF1, cfg);
+
+err_out:
+ mutex_unlock(&xadc->mutex);
+
+ return ret;
+}
+
+/* Register value is msb aligned, the lower 4 bits are ignored */
+#define XADC_THRESHOLD_VALUE_SHIFT 4
+
+int xadc_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2)
+{
+ unsigned int offset = xadc_get_threshold_offset(chan, dir);
+ struct xadc *xadc = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = xadc->threshold[offset];
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ *val = xadc->temp_hysteresis;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val >>= XADC_THRESHOLD_VALUE_SHIFT;
+
+ return IIO_VAL_INT;
+}
+
+int xadc_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int val, int val2)
+{
+ unsigned int offset = xadc_get_threshold_offset(chan, dir);
+ struct xadc *xadc = iio_priv(indio_dev);
+ int ret = 0;
+
+ val <<= XADC_THRESHOLD_VALUE_SHIFT;
+
+ if (val < 0 || val > 0xffff)
+ return -EINVAL;
+
+ mutex_lock(&xadc->mutex);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ xadc->threshold[offset] = val;
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ xadc->temp_hysteresis = val;
+ break;
+ default:
+ mutex_unlock(&xadc->mutex);
+ return -EINVAL;
+ }
+
+ if (chan->type == IIO_TEMP) {
+ /*
+ * According to the datasheet we need to set the lower 4 bits to
+ * 0x3, otherwise 125 degree celsius will be used as the
+ * threshold.
+ */
+ val |= 0x3;
+
+ /*
+ * Since we store the hysteresis as relative (to the threshold)
+ * value, but the hardware expects an absolute value we need to
+ * recalcualte this value whenever the hysteresis or the
+ * threshold changes.
+ */
+ if (xadc->threshold[offset] < xadc->temp_hysteresis)
+ xadc->threshold[offset + 4] = 0;
+ else
+ xadc->threshold[offset + 4] = xadc->threshold[offset] -
+ xadc->temp_hysteresis;
+ ret = _xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(offset + 4),
+ xadc->threshold[offset + 4]);
+ if (ret)
+ goto out_unlock;
+ }
+
+ if (info == IIO_EV_INFO_VALUE)
+ ret = _xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(offset), val);
+
+out_unlock:
+ mutex_unlock(&xadc->mutex);
+
+ return ret;
+}
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
new file mode 100644
index 000000000000..c7487e8d7f80
--- /dev/null
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -0,0 +1,209 @@
+/*
+ * Xilinx XADC driver
+ *
+ * Copyright 2013 Analog Devices Inc.
+ * Author: Lars-Peter Clauen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __IIO_XILINX_XADC__
+#define __IIO_XILINX_XADC__
+
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+struct iio_dev;
+struct clk;
+struct xadc_ops;
+struct platform_device;
+
+void xadc_handle_events(struct iio_dev *indio_dev, unsigned long events);
+
+int xadc_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir);
+int xadc_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state);
+int xadc_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2);
+int xadc_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int val, int val2);
+
+enum xadc_external_mux_mode {
+ XADC_EXTERNAL_MUX_NONE,
+ XADC_EXTERNAL_MUX_SINGLE,
+ XADC_EXTERNAL_MUX_DUAL,
+};
+
+struct xadc {
+ void __iomem *base;
+ struct clk *clk;
+
+ const struct xadc_ops *ops;
+
+ uint16_t threshold[16];
+ uint16_t temp_hysteresis;
+ unsigned int alarm_mask;
+
+ uint16_t *data;
+
+ struct iio_trigger *trigger;
+ struct iio_trigger *convst_trigger;
+ struct iio_trigger *samplerate_trigger;
+
+ enum xadc_external_mux_mode external_mux_mode;
+
+ unsigned int zynq_alarm;
+ unsigned int zynq_masked_alarm;
+ unsigned int zynq_intmask;
+ struct delayed_work zynq_unmask_work;
+
+ struct mutex mutex;
+ spinlock_t lock;
+
+ struct completion completion;
+};
+
+struct xadc_ops {
+ int (*read)(struct xadc *, unsigned int, uint16_t *);
+ int (*write)(struct xadc *, unsigned int, uint16_t);
+ int (*setup)(struct platform_device *pdev, struct iio_dev *indio_dev,
+ int irq);
+ void (*update_alarm)(struct xadc *, unsigned int);
+ unsigned long (*get_dclk_rate)(struct xadc *);
+ irqreturn_t (*interrupt_handler)(int, void *);
+ irqreturn_t (*threaded_interrupt_handler)(int, void *);
+
+ unsigned int flags;
+};
+
+static inline int _xadc_read_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ lockdep_assert_held(&xadc->mutex);
+ return xadc->ops->read(xadc, reg, val);
+}
+
+static inline int _xadc_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ lockdep_assert_held(&xadc->mutex);
+ return xadc->ops->write(xadc, reg, val);
+}
+
+static inline int xadc_read_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ int ret;
+
+ mutex_lock(&xadc->mutex);
+ ret = _xadc_read_adc_reg(xadc, reg, val);
+ mutex_unlock(&xadc->mutex);
+ return ret;
+}
+
+static inline int xadc_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ int ret;
+
+ mutex_lock(&xadc->mutex);
+ ret = _xadc_write_adc_reg(xadc, reg, val);
+ mutex_unlock(&xadc->mutex);
+ return ret;
+}
+
+/* XADC hardmacro register definitions */
+#define XADC_REG_TEMP 0x00
+#define XADC_REG_VCCINT 0x01
+#define XADC_REG_VCCAUX 0x02
+#define XADC_REG_VPVN 0x03
+#define XADC_REG_VREFP 0x04
+#define XADC_REG_VREFN 0x05
+#define XADC_REG_VCCBRAM 0x06
+
+#define XADC_REG_VCCPINT 0x0d
+#define XADC_REG_VCCPAUX 0x0e
+#define XADC_REG_VCCO_DDR 0x0f
+#define XADC_REG_VAUX(x) (0x10 + (x))
+
+#define XADC_REG_MAX_TEMP 0x20
+#define XADC_REG_MAX_VCCINT 0x21
+#define XADC_REG_MAX_VCCAUX 0x22
+#define XADC_REG_MAX_VCCBRAM 0x23
+#define XADC_REG_MIN_TEMP 0x24
+#define XADC_REG_MIN_VCCINT 0x25
+#define XADC_REG_MIN_VCCAUX 0x26
+#define XADC_REG_MIN_VCCBRAM 0x27
+#define XADC_REG_MAX_VCCPINT 0x28
+#define XADC_REG_MAX_VCCPAUX 0x29
+#define XADC_REG_MAX_VCCO_DDR 0x2a
+#define XADC_REG_MIN_VCCPINT 0x2b
+#define XADC_REG_MIN_VCCPAUX 0x2c
+#define XADC_REG_MIN_VCCO_DDR 0x2d
+
+#define XADC_REG_CONF0 0x40
+#define XADC_REG_CONF1 0x41
+#define XADC_REG_CONF2 0x42
+#define XADC_REG_SEQ(x) (0x48 + (x))
+#define XADC_REG_INPUT_MODE(x) (0x4c + (x))
+#define XADC_REG_THRESHOLD(x) (0x50 + (x))
+
+#define XADC_REG_FLAG 0x3f
+
+#define XADC_CONF0_EC BIT(9)
+#define XADC_CONF0_ACQ BIT(8)
+#define XADC_CONF0_MUX BIT(11)
+#define XADC_CONF0_CHAN(x) (x)
+
+#define XADC_CONF1_SEQ_MASK (0xf << 12)
+#define XADC_CONF1_SEQ_DEFAULT (0 << 12)
+#define XADC_CONF1_SEQ_SINGLE_PASS (1 << 12)
+#define XADC_CONF1_SEQ_CONTINUOUS (2 << 12)
+#define XADC_CONF1_SEQ_SINGLE_CHANNEL (3 << 12)
+#define XADC_CONF1_SEQ_SIMULTANEOUS (4 << 12)
+#define XADC_CONF1_SEQ_INDEPENDENT (8 << 12)
+#define XADC_CONF1_ALARM_MASK 0x0f0f
+
+#define XADC_CONF2_DIV_MASK 0xff00
+#define XADC_CONF2_DIV_OFFSET 8
+
+#define XADC_CONF2_PD_MASK (0x3 << 4)
+#define XADC_CONF2_PD_NONE (0x0 << 4)
+#define XADC_CONF2_PD_ADC_B (0x2 << 4)
+#define XADC_CONF2_PD_BOTH (0x3 << 4)
+
+#define XADC_ALARM_TEMP_MASK BIT(0)
+#define XADC_ALARM_VCCINT_MASK BIT(1)
+#define XADC_ALARM_VCCAUX_MASK BIT(2)
+#define XADC_ALARM_OT_MASK BIT(3)
+#define XADC_ALARM_VCCBRAM_MASK BIT(4)
+#define XADC_ALARM_VCCPINT_MASK BIT(5)
+#define XADC_ALARM_VCCPAUX_MASK BIT(6)
+#define XADC_ALARM_VCCODDR_MASK BIT(7)
+
+#define XADC_THRESHOLD_TEMP_MAX 0x0
+#define XADC_THRESHOLD_VCCINT_MAX 0x1
+#define XADC_THRESHOLD_VCCAUX_MAX 0x2
+#define XADC_THRESHOLD_OT_MAX 0x3
+#define XADC_THRESHOLD_TEMP_MIN 0x4
+#define XADC_THRESHOLD_VCCINT_MIN 0x5
+#define XADC_THRESHOLD_VCCAUX_MIN 0x6
+#define XADC_THRESHOLD_OT_MIN 0x7
+#define XADC_THRESHOLD_VCCBRAM_MAX 0x8
+#define XADC_THRESHOLD_VCCPINT_MAX 0x9
+#define XADC_THRESHOLD_VCCPAUX_MAX 0xa
+#define XADC_THRESHOLD_VCCODDR_MAX 0xb
+#define XADC_THRESHOLD_VCCBRAM_MIN 0xc
+#define XADC_THRESHOLD_VCCPINT_MIN 0xd
+#define XADC_THRESHOLD_VCCPAUX_MIN 0xe
+#define XADC_THRESHOLD_VCCODDR_MIN 0xf
+
+#endif
diff --git a/drivers/iio/buffer_cb.c b/drivers/iio/buffer_cb.c
index 2d9c6f8c06db..eb46e728aa2e 100644
--- a/drivers/iio/buffer_cb.c
+++ b/drivers/iio/buffer_cb.c
@@ -46,10 +46,8 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
struct iio_channel *chan;
cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
- if (cb_buff == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ if (cb_buff == NULL)
+ return ERR_PTR(-ENOMEM);
iio_buffer_init(&cb_buff->buffer);
@@ -91,7 +89,6 @@ error_release_channels:
iio_channel_release_all(cb_buff->channels);
error_free_cb_buff:
kfree(cb_buff);
-error_ret:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 7dcf83998e6f..dbefbdaf7cd1 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -38,29 +38,40 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
if (state) {
if (sensor_hub_device_open(st->hsdev))
return -EIO;
- state_val =
- HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM;
- report_val =
- HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM;
-
+ state_val = hid_sensor_get_usage_index(st->hsdev,
+ st->power_state.report_id,
+ st->power_state.index,
+ HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM);
+ report_val = hid_sensor_get_usage_index(st->hsdev,
+ st->report_state.report_id,
+ st->report_state.index,
+ HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
} else {
sensor_hub_device_close(st->hsdev);
- state_val =
- HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM;
- report_val =
- HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM;
+ state_val = hid_sensor_get_usage_index(st->hsdev,
+ st->power_state.report_id,
+ st->power_state.index,
+ HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM);
+ report_val = hid_sensor_get_usage_index(st->hsdev,
+ st->report_state.report_id,
+ st->report_state.index,
+ HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM);
}
-
st->data_ready = state;
- state_val += st->power_state.logical_minimum;
- report_val += st->report_state.logical_minimum;
- sensor_hub_set_feature(st->hsdev, st->power_state.report_id,
+
+ if (state_val >= 0) {
+ state_val += st->power_state.logical_minimum;
+ sensor_hub_set_feature(st->hsdev, st->power_state.report_id,
st->power_state.index,
(s32)state_val);
+ }
- sensor_hub_set_feature(st->hsdev, st->report_state.report_id,
+ if (report_val >= 0) {
+ report_val += st->report_state.logical_minimum;
+ sensor_hub_set_feature(st->hsdev, st->report_state.report_id,
st->report_state.index,
(s32)report_val);
+ }
return 0;
}
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index d0505fd22ef4..fa2810032968 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -92,7 +92,7 @@ static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev,
ad7303_write(st, chan->channel, st->dac_cache[chan->channel]);
mutex_unlock(&indio_dev->mlock);
- return ret ? ret : len;
+ return len;
}
static int ad7303_get_vref(struct ad7303_state *st,
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
index de76e6a34c1e..9a82a7255ebb 100644
--- a/drivers/iio/dac/max517.c
+++ b/drivers/iio/dac/max517.c
@@ -19,7 +19,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 7d9f5c31d2fc..43d14588448d 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -15,7 +15,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 463c4d9da79e..e116bd8dd0e4 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -12,4 +12,14 @@ config DHT11
Other sensors should work as well as long as they speak the
same protocol.
+config SI7005
+ tristate "SI7005 relative humidity and temperature sensor"
+ depends on I2C
+ help
+ Say yes here to build support for the Silabs Si7005 relative
+ humidity and temperature sensor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called si7005.
+
endmenu
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index d5d36c0c95f9..e3f3d942e646 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_DHT11) += dht11.o
+obj-$(CONFIG_SI7005) += si7005.o
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
new file mode 100644
index 000000000000..bdd586e6d955
--- /dev/null
+++ b/drivers/iio/humidity/si7005.c
@@ -0,0 +1,189 @@
+/*
+ * si7005.c - Support for Silabs Si7005 humidity and temperature sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x40)
+ *
+ * TODO: heater, fast mode, processed mode (temp. / linearity compensation)
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define SI7005_STATUS 0x00
+#define SI7005_DATA 0x01 /* 16-bit, MSB */
+#define SI7005_CONFIG 0x03
+#define SI7005_ID 0x11
+
+#define SI7005_STATUS_NRDY BIT(0)
+#define SI7005_CONFIG_TEMP BIT(4)
+#define SI7005_CONFIG_START BIT(0)
+
+#define SI7005_ID_7005 0x50
+#define SI7005_ID_7015 0xf0
+
+struct si7005_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 config;
+};
+
+static int si7005_read_measurement(struct si7005_data *data, bool temp)
+{
+ int tries = 50;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = i2c_smbus_write_byte_data(data->client, SI7005_CONFIG,
+ data->config | SI7005_CONFIG_START |
+ (temp ? SI7005_CONFIG_TEMP : 0));
+ if (ret < 0)
+ goto done;
+
+ while (tries-- > 0) {
+ msleep(20);
+ ret = i2c_smbus_read_byte_data(data->client, SI7005_STATUS);
+ if (ret < 0)
+ goto done;
+ if (!(ret & SI7005_STATUS_NRDY))
+ break;
+ }
+ if (tries < 0) {
+ ret = -EIO;
+ goto done;
+ }
+
+ ret = i2c_smbus_read_word_swapped(data->client, SI7005_DATA);
+
+done:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int si7005_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct si7005_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = si7005_read_measurement(data, chan->type == IIO_TEMP);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_TEMP) {
+ *val = 7;
+ *val2 = 812500;
+ } else {
+ *val = 3;
+ *val2 = 906250;
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ if (chan->type == IIO_TEMP)
+ *val = -50 * 32 * 4;
+ else
+ *val = -24 * 16 * 16;
+ return IIO_VAL_INT;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec si7005_channels[] = {
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ }
+};
+
+static const struct iio_info si7005_info = {
+ .read_raw = si7005_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int si7005_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct si7005_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &si7005_info;
+
+ indio_dev->channels = si7005_channels;
+ indio_dev->num_channels = ARRAY_SIZE(si7005_channels);
+
+ ret = i2c_smbus_read_byte_data(client, SI7005_ID);
+ if (ret < 0)
+ return ret;
+ if (ret != SI7005_ID_7005 && ret != SI7005_ID_7015)
+ return -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(client, SI7005_CONFIG);
+ if (ret < 0)
+ return ret;
+ data->config = ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id si7005_id[] = {
+ { "si7005", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si7005_id);
+
+static struct i2c_driver si7005_driver = {
+ .driver = {
+ .name = "si7005",
+ .owner = THIS_MODULE,
+ },
+ .probe = si7005_probe,
+ .id_table = si7005_id,
+};
+module_i2c_driver(si7005_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Silabs Si7005 humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 663e88a1a3c1..2b0e45133e9d 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -25,6 +25,8 @@ config ADIS16480
Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
ADIS16485, ADIS16488 inertial sensors.
+source "drivers/iio/imu/inv_mpu6050/Kconfig"
+
endmenu
config IIO_ADIS_LIB
@@ -38,5 +40,3 @@ config IIO_ADIS_LIB_BUFFER
help
A set of buffer helper functions for the Analog Devices ADIS* device
family.
-
-source "drivers/iio/imu/inv_mpu6050/Kconfig"
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 7c582f7ae34e..433583b6f800 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -281,7 +281,7 @@ static ssize_t adis16400_write_frequency(struct device *dev,
st->variant->set_freq(st, val);
mutex_unlock(&indio_dev->mlock);
- return ret ? ret : len;
+ return len;
}
/* Power down the device */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index df7f1e1157ae..cb9f96b446a5 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
@@ -117,7 +116,7 @@ int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
return result;
if (en) {
- /* Wait for output stablize */
+ /* Wait for output stabilize */
msleep(INV_MPU6050_TEMP_UP_TIME);
if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
/* switch internal clock to PLL */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index f38395529a44..0ab382be1e64 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -126,35 +126,35 @@ struct inv_mpu6050_state {
#define INV_MPU6050_REG_SAMPLE_RATE_DIV 0x19
#define INV_MPU6050_REG_CONFIG 0x1A
#define INV_MPU6050_REG_GYRO_CONFIG 0x1B
-#define INV_MPU6050_REG_ACCEL_CONFIG 0x1C
+#define INV_MPU6050_REG_ACCEL_CONFIG 0x1C
#define INV_MPU6050_REG_FIFO_EN 0x23
-#define INV_MPU6050_BIT_ACCEL_OUT 0x08
-#define INV_MPU6050_BITS_GYRO_OUT 0x70
+#define INV_MPU6050_BIT_ACCEL_OUT 0x08
+#define INV_MPU6050_BITS_GYRO_OUT 0x70
#define INV_MPU6050_REG_INT_ENABLE 0x38
-#define INV_MPU6050_BIT_DATA_RDY_EN 0x01
-#define INV_MPU6050_BIT_DMP_INT_EN 0x02
+#define INV_MPU6050_BIT_DATA_RDY_EN 0x01
+#define INV_MPU6050_BIT_DMP_INT_EN 0x02
#define INV_MPU6050_REG_RAW_ACCEL 0x3B
#define INV_MPU6050_REG_TEMPERATURE 0x41
#define INV_MPU6050_REG_RAW_GYRO 0x43
#define INV_MPU6050_REG_USER_CTRL 0x6A
-#define INV_MPU6050_BIT_FIFO_RST 0x04
-#define INV_MPU6050_BIT_DMP_RST 0x08
-#define INV_MPU6050_BIT_I2C_MST_EN 0x20
-#define INV_MPU6050_BIT_FIFO_EN 0x40
-#define INV_MPU6050_BIT_DMP_EN 0x80
+#define INV_MPU6050_BIT_FIFO_RST 0x04
+#define INV_MPU6050_BIT_DMP_RST 0x08
+#define INV_MPU6050_BIT_I2C_MST_EN 0x20
+#define INV_MPU6050_BIT_FIFO_EN 0x40
+#define INV_MPU6050_BIT_DMP_EN 0x80
#define INV_MPU6050_REG_PWR_MGMT_1 0x6B
-#define INV_MPU6050_BIT_H_RESET 0x80
-#define INV_MPU6050_BIT_SLEEP 0x40
-#define INV_MPU6050_BIT_CLK_MASK 0x7
+#define INV_MPU6050_BIT_H_RESET 0x80
+#define INV_MPU6050_BIT_SLEEP 0x40
+#define INV_MPU6050_BIT_CLK_MASK 0x7
#define INV_MPU6050_REG_PWR_MGMT_2 0x6C
-#define INV_MPU6050_BIT_PWR_ACCL_STBY 0x38
-#define INV_MPU6050_BIT_PWR_GYRO_STBY 0x07
+#define INV_MPU6050_BIT_PWR_ACCL_STBY 0x38
+#define INV_MPU6050_BIT_PWR_GYRO_STBY 0x07
#define INV_MPU6050_REG_FIFO_COUNT_H 0x72
#define INV_MPU6050_REG_FIFO_R_W 0x74
@@ -180,10 +180,10 @@ struct inv_mpu6050_state {
/* init parameters */
#define INV_MPU6050_INIT_FIFO_RATE 50
-#define INV_MPU6050_TIME_STAMP_TOR 5
-#define INV_MPU6050_MAX_FIFO_RATE 1000
-#define INV_MPU6050_MIN_FIFO_RATE 4
-#define INV_MPU6050_ONE_K_HZ 1000
+#define INV_MPU6050_TIME_STAMP_TOR 5
+#define INV_MPU6050_MAX_FIFO_RATE 1000
+#define INV_MPU6050_MIN_FIFO_RATE 4
+#define INV_MPU6050_ONE_K_HZ 1000
/* scan element definition */
enum inv_mpu6050_scan {
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 429517117eff..0cd306a72a6e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index c67d83bdc8f0..e108f2a9d827 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -264,7 +264,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
if (ret)
- goto error_ret;
+ return ret;
attrcount++;
ret = __iio_add_chan_devattr("type",
chan,
@@ -275,7 +275,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
if (ret)
- goto error_ret;
+ return ret;
attrcount++;
if (chan->type != IIO_TIMESTAMP)
ret = __iio_add_chan_devattr("en",
@@ -296,10 +296,9 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
if (ret)
- goto error_ret;
+ return ret;
attrcount++;
ret = attrcount;
-error_ret:
return ret;
}
@@ -553,13 +552,13 @@ static int __iio_update_buffers(struct iio_dev *indio_dev,
if (indio_dev->setup_ops->predisable) {
ret = indio_dev->setup_ops->predisable(indio_dev);
if (ret)
- goto error_ret;
+ return ret;
}
indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable) {
ret = indio_dev->setup_ops->postdisable(indio_dev);
if (ret)
- goto error_ret;
+ return ret;
}
}
/* Keep a copy of current setup to allow roll back */
@@ -613,7 +612,7 @@ static int __iio_update_buffers(struct iio_dev *indio_dev,
else {
kfree(compound_mask);
ret = -EINVAL;
- goto error_ret;
+ return ret;
}
}
} else {
@@ -696,13 +695,10 @@ error_run_postdisable:
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
error_remove_inserted:
-
if (insert_buffer)
iio_buffer_deactivate(insert_buffer);
indio_dev->active_scan_mask = old_mask;
kfree(compound_mask);
-error_ret:
-
return ret;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index acc911a836ca..ede16aec20fb 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -540,7 +540,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
enum iio_shared_by shared_by)
{
int ret = 0;
- char *name_format = NULL;
+ char *name = NULL;
char *full_postfix;
sysfs_attr_init(&dev_attr->attr);
@@ -558,7 +558,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
->channel2],
postfix);
} else {
- if (chan->extend_name == NULL)
+ if (chan->extend_name == NULL || shared_by != IIO_SEPARATE)
full_postfix = kstrdup(postfix, GFP_KERNEL);
else
full_postfix = kasprintf(GFP_KERNEL,
@@ -572,16 +572,15 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
if (chan->differential) { /* Differential can not have modifier */
switch (shared_by) {
case IIO_SHARED_BY_ALL:
- name_format = kasprintf(GFP_KERNEL, "%s", full_postfix);
+ name = kasprintf(GFP_KERNEL, "%s", full_postfix);
break;
case IIO_SHARED_BY_DIR:
- name_format = kasprintf(GFP_KERNEL, "%s_%s",
+ name = kasprintf(GFP_KERNEL, "%s_%s",
iio_direction[chan->output],
full_postfix);
break;
case IIO_SHARED_BY_TYPE:
- name_format
- = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
+ name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
iio_chan_type_name_spec[chan->type],
@@ -593,8 +592,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
ret = -EINVAL;
goto error_free_full_postfix;
}
- name_format
- = kasprintf(GFP_KERNEL,
+ name = kasprintf(GFP_KERNEL,
"%s_%s%d-%s%d_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
@@ -607,16 +605,15 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
} else { /* Single ended */
switch (shared_by) {
case IIO_SHARED_BY_ALL:
- name_format = kasprintf(GFP_KERNEL, "%s", full_postfix);
+ name = kasprintf(GFP_KERNEL, "%s", full_postfix);
break;
case IIO_SHARED_BY_DIR:
- name_format = kasprintf(GFP_KERNEL, "%s_%s",
+ name = kasprintf(GFP_KERNEL, "%s_%s",
iio_direction[chan->output],
full_postfix);
break;
case IIO_SHARED_BY_TYPE:
- name_format
- = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ name = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
full_postfix);
@@ -624,33 +621,24 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
case IIO_SEPARATE:
if (chan->indexed)
- name_format
- = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
+ name = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
chan->channel,
full_postfix);
else
- name_format
- = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ name = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
full_postfix);
break;
}
}
- if (name_format == NULL) {
+ if (name == NULL) {
ret = -ENOMEM;
goto error_free_full_postfix;
}
- dev_attr->attr.name = kasprintf(GFP_KERNEL,
- name_format,
- chan->channel,
- chan->channel2);
- if (dev_attr->attr.name == NULL) {
- ret = -ENOMEM;
- goto error_free_name_format;
- }
+ dev_attr->attr.name = name;
if (readfunc) {
dev_attr->attr.mode |= S_IRUGO;
@@ -661,8 +649,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
dev_attr->attr.mode |= S_IWUSR;
dev_attr->store = writefunc;
}
-error_free_name_format:
- kfree(name_format);
+
error_free_full_postfix:
kfree(full_postfix);
@@ -692,10 +679,8 @@ int __iio_add_chan_devattr(const char *postfix,
struct iio_dev_attr *iio_attr, *t;
iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
- if (iio_attr == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ if (iio_attr == NULL)
+ return -ENOMEM;
ret = __iio_device_attr_init(&iio_attr->dev_attr,
postfix, chan,
readfunc, writefunc, shared_by);
@@ -720,7 +705,6 @@ error_device_attr_deinit:
__iio_device_attr_deinit(&iio_attr->dev_attr);
error_iio_dev_attr_free:
kfree(iio_attr);
-error_ret:
return ret;
}
@@ -1134,7 +1118,7 @@ int iio_device_register(struct iio_dev *indio_dev)
if (ret) {
dev_err(indio_dev->dev.parent,
"Failed to register debugfs interfaces\n");
- goto error_ret;
+ return ret;
}
ret = iio_device_register_sysfs(indio_dev);
if (ret) {
@@ -1175,7 +1159,6 @@ error_free_sysfs:
iio_device_unregister_sysfs(indio_dev);
error_unreg_debugfs:
iio_device_unregister_debugfs(indio_dev);
-error_ret:
return ret;
}
EXPORT_SYMBOL(iio_device_register);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index c9c1419fe6e0..ea6e06b9c7d4 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -40,6 +40,7 @@ struct iio_event_interface {
struct list_head dev_attr_list;
unsigned long flags;
struct attribute_group group;
+ struct mutex read_lock;
};
/**
@@ -47,16 +48,17 @@ struct iio_event_interface {
* @indio_dev: IIO device structure
* @ev_code: What event
* @timestamp: When the event occurred
+ *
+ * Note: The caller must make sure that this function is not running
+ * concurrently for the same indio_dev more than once.
**/
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
{
struct iio_event_interface *ev_int = indio_dev->event_interface;
struct iio_event_data ev;
- unsigned long flags;
int copied;
/* Does anyone care? */
- spin_lock_irqsave(&ev_int->wait.lock, flags);
if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
ev.id = ev_code;
@@ -64,9 +66,8 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
copied = kfifo_put(&ev_int->det_events, ev);
if (copied != 0)
- wake_up_locked_poll(&ev_int->wait, POLLIN);
+ wake_up_poll(&ev_int->wait, POLLIN);
}
- spin_unlock_irqrestore(&ev_int->wait.lock, flags);
return 0;
}
@@ -87,10 +88,8 @@ static unsigned int iio_event_poll(struct file *filep,
poll_wait(filep, &ev_int->wait, wait);
- spin_lock_irq(&ev_int->wait.lock);
if (!kfifo_is_empty(&ev_int->det_events))
events = POLLIN | POLLRDNORM;
- spin_unlock_irq(&ev_int->wait.lock);
return events;
}
@@ -111,31 +110,40 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
if (count < sizeof(struct iio_event_data))
return -EINVAL;
- spin_lock_irq(&ev_int->wait.lock);
- if (kfifo_is_empty(&ev_int->det_events)) {
- if (filep->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- goto error_unlock;
- }
- /* Blocking on device; waiting for something to be there */
- ret = wait_event_interruptible_locked_irq(ev_int->wait,
+ do {
+ if (kfifo_is_empty(&ev_int->det_events)) {
+ if (filep->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(ev_int->wait,
!kfifo_is_empty(&ev_int->det_events) ||
indio_dev->info == NULL);
- if (ret)
- goto error_unlock;
- if (indio_dev->info == NULL) {
- ret = -ENODEV;
- goto error_unlock;
+ if (ret)
+ return ret;
+ if (indio_dev->info == NULL)
+ return -ENODEV;
}
- /* Single access device so no one else can get the data */
- }
- ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
+ if (mutex_lock_interruptible(&ev_int->read_lock))
+ return -ERESTARTSYS;
+ ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
+ mutex_unlock(&ev_int->read_lock);
-error_unlock:
- spin_unlock_irq(&ev_int->wait.lock);
+ if (ret)
+ return ret;
+
+ /*
+ * If we couldn't read anything from the fifo (a different
+ * thread might have been faster) we either return -EAGAIN if
+ * the file descriptor is non-blocking, otherwise we go back to
+ * sleep and wait for more data to arrive.
+ */
+ if (copied == 0 && (filep->f_flags & O_NONBLOCK))
+ return -EAGAIN;
- return ret ? ret : copied;
+ } while (copied == 0);
+
+ return copied;
}
static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
@@ -143,15 +151,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
struct iio_dev *indio_dev = filep->private_data;
struct iio_event_interface *ev_int = indio_dev->event_interface;
- spin_lock_irq(&ev_int->wait.lock);
- __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- /*
- * In order to maintain a clean state for reopening,
- * clear out any awaiting events. The mask will prevent
- * any new __iio_push_event calls running.
- */
- kfifo_reset_out(&ev_int->det_events);
- spin_unlock_irq(&ev_int->wait.lock);
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
iio_device_put(indio_dev);
@@ -174,22 +174,20 @@ int iio_event_getfd(struct iio_dev *indio_dev)
if (ev_int == NULL)
return -ENODEV;
- spin_lock_irq(&ev_int->wait.lock);
- if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
- spin_unlock_irq(&ev_int->wait.lock);
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags))
return -EBUSY;
- }
- spin_unlock_irq(&ev_int->wait.lock);
+
iio_device_get(indio_dev);
fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
indio_dev, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
- spin_lock_irq(&ev_int->wait.lock);
- __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- spin_unlock_irq(&ev_int->wait.lock);
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
iio_device_put(indio_dev);
+ } else {
+ kfifo_reset_out(&ev_int->det_events);
}
+
return fd;
}
@@ -366,32 +364,31 @@ static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SEPARATE, &chan->event_spec[i].mask_separate);
if (ret < 0)
- goto error_ret;
+ return ret;
attrcount += ret;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SHARED_BY_TYPE,
&chan->event_spec[i].mask_shared_by_type);
if (ret < 0)
- goto error_ret;
+ return ret;
attrcount += ret;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SHARED_BY_DIR,
&chan->event_spec[i].mask_shared_by_dir);
if (ret < 0)
- goto error_ret;
+ return ret;
attrcount += ret;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SHARED_BY_ALL,
&chan->event_spec[i].mask_shared_by_all);
if (ret < 0)
- goto error_ret;
+ return ret;
attrcount += ret;
}
ret = attrcount;
-error_ret:
return ret;
}
@@ -425,6 +422,7 @@ static void iio_setup_ev_int(struct iio_event_interface *ev_int)
{
INIT_KFIFO(ev_int->det_events);
init_waitqueue_head(&ev_int->wait);
+ mutex_init(&ev_int->read_lock);
}
static const char *iio_event_group_name = "events";
@@ -440,10 +438,8 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
indio_dev->event_interface =
kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
- if (indio_dev->event_interface == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ if (indio_dev->event_interface == NULL)
+ return -ENOMEM;
INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
@@ -489,8 +485,6 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
error_free_setup_event_lines:
iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
kfree(indio_dev->event_interface);
-error_ret:
-
return ret;
}
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 766fab24b720..3383b025f62e 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -62,10 +62,9 @@ int iio_trigger_register(struct iio_trigger *trig_info)
int ret;
trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
- if (trig_info->id < 0) {
- ret = trig_info->id;
- goto error_ret;
- }
+ if (trig_info->id < 0)
+ return trig_info->id;
+
/* Set the name used for the sysfs directory etc */
dev_set_name(&trig_info->dev, "trigger%ld",
(unsigned long) trig_info->id);
@@ -83,7 +82,6 @@ int iio_trigger_register(struct iio_trigger *trig_info)
error_unregister_id:
ida_simple_remove(&iio_trigger_ida, trig_info->id);
-error_ret:
return ret;
}
EXPORT_SYMBOL(iio_trigger_register);
@@ -234,13 +232,12 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
ret = trig->ops->set_trigger_state(trig, false);
if (ret)
- goto error_ret;
+ return ret;
}
iio_trigger_put_irq(trig, pf->irq);
free_irq(pf->irq, pf);
module_put(pf->indio_dev->info->driver_module);
-error_ret:
return ret;
}
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index d12b2a0dbfbc..c89740d4748f 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -73,6 +73,20 @@ config HID_SENSOR_ALS
Say yes here to build support for the HID SENSOR
Ambient light sensor.
+config HID_SENSOR_PROX
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID PROX"
+ help
+ Say yes here to build support for the HID SENSOR
+ Proximity sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-sensor-prox.
+
config SENSORS_LM3533
tristate "LM3533 ambient light sensor"
depends on MFD_LM3533
@@ -90,6 +104,18 @@ config SENSORS_LM3533
changes. The ALS-control output values can be set per zone for the
three current output channels.
+config LTR501
+ tristate "LTR-501ALS-01 light sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for the Lite-On LTR-501ALS-01
+ ambient light and proximity sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called ltr501.
+
config TCS3472
tristate "TAOS TCS3472 color light-to-digital converter"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 60e35ac07ff0..3eb36e5151fa 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -9,7 +9,9 @@ obj-$(CONFIG_CM32181) += cm32181.o
obj-$(CONFIG_CM36651) += cm36651.o
obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
+obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
+obj-$(CONFIG_LTR501) += ltr501.o
obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
obj-$(CONFIG_TCS3472) += tcs3472.o
obj-$(CONFIG_TSL4531) += tsl4531.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index f3068477b466..09ad5f1ce539 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/slab.h>
@@ -120,7 +119,6 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct adjd_s311_data *data = iio_priv(indio_dev);
s64 time_ns = iio_get_time_ns();
- int len = 0;
int i, j = 0;
int ret = adjd_s311_req_data(indio_dev);
@@ -135,7 +133,6 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
goto done;
data->buffer[j++] = ret & ADJD_S311_DATA_MASK;
- len += 2;
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, time_ns);
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
new file mode 100644
index 000000000000..1894ab196f97
--- /dev/null
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -0,0 +1,375 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+#define CHANNEL_SCAN_INDEX_PRESENCE 0
+
+struct prox_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info prox_attr;
+ u32 human_presence;
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec prox_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .modified = 1,
+ .channel2 = IIO_NO_MOD,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_PRESENCE,
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void prox_adjust_channel_bit_mask(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ channels[channel].scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ channels[channel].scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int prox_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct prox_state *prox_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret;
+ int ret_type;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->scan_index) {
+ case CHANNEL_SCAN_INDEX_PRESENCE:
+ report_id = prox_state->prox_attr.report_id;
+ address =
+ HID_USAGE_SENSOR_HUMAN_PRESENCE;
+ break;
+ default:
+ report_id = -1;
+ break;
+ }
+ if (report_id >= 0)
+ *val = sensor_hub_input_attr_get_raw_value(
+ prox_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_PROX, address,
+ report_id);
+ else {
+ *val = 0;
+ return -EINVAL;
+ }
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = prox_state->prox_attr.units;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = hid_sensor_convert_exponent(
+ prox_state->prox_attr.unit_expo);
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_read_samp_freq_value(
+ &prox_state->common_attributes, val, val2);
+ ret_type = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_read_raw_hyst_value(
+ &prox_state->common_attributes, val, val2);
+ ret_type = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int prox_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct prox_state *prox_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &prox_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &prox_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info prox_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &prox_read_raw,
+ .write_raw = &prox_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int prox_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct prox_state *prox_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "prox_proc_event [%d]\n",
+ prox_state->common_attributes.data_ready);
+ if (prox_state->common_attributes.data_ready)
+ hid_sensor_push_data(indio_dev,
+ &prox_state->human_presence,
+ sizeof(prox_state->human_presence));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int prox_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct prox_state *prox_state = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_HUMAN_PRESENCE:
+ prox_state->human_presence = *(u32 *)raw_data;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int prox_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct prox_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_HUMAN_PRESENCE,
+ &st->prox_attr);
+ if (ret < 0)
+ return ret;
+ prox_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_PRESENCE,
+ st->prox_attr.size);
+
+ dev_dbg(&pdev->dev, "prox %x:%x\n", st->prox_attr.index,
+ st->prox_attr.report_id);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_PRESENCE,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_prox_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ static const char *name = "prox";
+ struct iio_dev *indio_dev;
+ struct prox_state *prox_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct prox_state));
+ if (!indio_dev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, indio_dev);
+
+ prox_state = iio_priv(indio_dev);
+ prox_state->common_attributes.hsdev = hsdev;
+ prox_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev, HID_USAGE_SENSOR_PROX,
+ &prox_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = kmemdup(prox_channels, sizeof(prox_channels), GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = prox_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_PROX, prox_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ goto error_free_dev_mem;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels =
+ ARRAY_SIZE(prox_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &prox_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ goto error_free_dev_mem;
+ }
+ prox_state->common_attributes.data_ready = false;
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &prox_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ prox_state->callbacks.send_event = prox_proc_event;
+ prox_state->callbacks.capture_sample = prox_capture_sample;
+ prox_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_PROX,
+ &prox_state->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return ret;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&prox_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_dev_mem:
+ kfree(indio_dev->channels);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_prox_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct prox_state *prox_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PROX);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&prox_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_device_id hid_prox_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200011",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_prox_ids);
+
+static struct platform_driver hid_prox_platform_driver = {
+ .id_table = hid_prox_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = hid_prox_probe,
+ .remove = hid_prox_remove,
+};
+module_platform_driver(hid_prox_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Proximity");
+MODULE_AUTHOR("Archana Patni <archana.patni@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
new file mode 100644
index 000000000000..62b7072af4de
--- /dev/null
+++ b/drivers/iio/light/ltr501.c
@@ -0,0 +1,445 @@
+/*
+ * ltr501.c - Support for Lite-On LTR501 ambient light and proximity sensor
+ *
+ * Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * 7-bit I2C slave address 0x23
+ *
+ * TODO: interrupt, threshold, measurement rate, IR LED characteristics
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define LTR501_DRV_NAME "ltr501"
+
+#define LTR501_ALS_CONTR 0x80 /* ALS operation mode, SW reset */
+#define LTR501_PS_CONTR 0x81 /* PS operation mode */
+#define LTR501_PART_ID 0x86
+#define LTR501_MANUFAC_ID 0x87
+#define LTR501_ALS_DATA1 0x88 /* 16-bit, little endian */
+#define LTR501_ALS_DATA0 0x8a /* 16-bit, little endian */
+#define LTR501_ALS_PS_STATUS 0x8c
+#define LTR501_PS_DATA 0x8d /* 16-bit, little endian */
+
+#define LTR501_ALS_CONTR_SW_RESET BIT(2)
+#define LTR501_CONTR_PS_GAIN_MASK (BIT(3) | BIT(2))
+#define LTR501_CONTR_PS_GAIN_SHIFT 2
+#define LTR501_CONTR_ALS_GAIN_MASK BIT(3)
+#define LTR501_CONTR_ACTIVE BIT(1)
+
+#define LTR501_STATUS_ALS_RDY BIT(2)
+#define LTR501_STATUS_PS_RDY BIT(0)
+
+#define LTR501_PS_DATA_MASK 0x7ff
+
+struct ltr501_data {
+ struct i2c_client *client;
+ struct mutex lock_als, lock_ps;
+ u8 als_contr, ps_contr;
+};
+
+static int ltr501_drdy(struct ltr501_data *data, u8 drdy_mask)
+{
+ int tries = 100;
+ int ret;
+
+ while (tries--) {
+ ret = i2c_smbus_read_byte_data(data->client,
+ LTR501_ALS_PS_STATUS);
+ if (ret < 0)
+ return ret;
+ if ((ret & drdy_mask) == drdy_mask)
+ return 0;
+ msleep(25);
+ }
+
+ dev_err(&data->client->dev, "ltr501_drdy() failed, data not ready\n");
+ return -EIO;
+}
+
+static int ltr501_read_als(struct ltr501_data *data, __le16 buf[2])
+{
+ int ret = ltr501_drdy(data, LTR501_STATUS_ALS_RDY);
+ if (ret < 0)
+ return ret;
+ /* always read both ALS channels in given order */
+ return i2c_smbus_read_i2c_block_data(data->client,
+ LTR501_ALS_DATA1, 2 * sizeof(__le16), (u8 *) buf);
+}
+
+static int ltr501_read_ps(struct ltr501_data *data)
+{
+ int ret = ltr501_drdy(data, LTR501_STATUS_PS_RDY);
+ if (ret < 0)
+ return ret;
+ return i2c_smbus_read_word_data(data->client, LTR501_PS_DATA);
+}
+
+#define LTR501_INTENSITY_CHANNEL(_idx, _addr, _mod, _shared) { \
+ .type = IIO_INTENSITY, \
+ .modified = 1, \
+ .address = (_addr), \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = (_shared), \
+ .scan_index = (_idx), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+static const struct iio_chan_spec ltr501_channels[] = {
+ LTR501_INTENSITY_CHANNEL(0, LTR501_ALS_DATA0, IIO_MOD_LIGHT_BOTH, 0),
+ LTR501_INTENSITY_CHANNEL(1, LTR501_ALS_DATA1, IIO_MOD_LIGHT_IR,
+ BIT(IIO_CHAN_INFO_SCALE)),
+ {
+ .type = IIO_PROXIMITY,
+ .address = LTR501_PS_DATA,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 2,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 11,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static const int ltr501_ps_gain[4][2] = {
+ {1, 0}, {0, 250000}, {0, 125000}, {0, 62500}
+};
+
+static int ltr501_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ltr501_data *data = iio_priv(indio_dev);
+ __le16 buf[2];
+ int ret, i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ mutex_lock(&data->lock_als);
+ ret = ltr501_read_als(data, buf);
+ mutex_unlock(&data->lock_als);
+ if (ret < 0)
+ return ret;
+ *val = le16_to_cpu(chan->address == LTR501_ALS_DATA1 ?
+ buf[0] : buf[1]);
+ return IIO_VAL_INT;
+ case IIO_PROXIMITY:
+ mutex_lock(&data->lock_ps);
+ ret = ltr501_read_ps(data);
+ mutex_unlock(&data->lock_ps);
+ if (ret < 0)
+ return ret;
+ *val = ret & LTR501_PS_DATA_MASK;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ if (data->als_contr & LTR501_CONTR_ALS_GAIN_MASK) {
+ *val = 0;
+ *val2 = 5000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ } else {
+ *val = 1;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ }
+ case IIO_PROXIMITY:
+ i = (data->ps_contr & LTR501_CONTR_PS_GAIN_MASK) >>
+ LTR501_CONTR_PS_GAIN_SHIFT;
+ *val = ltr501_ps_gain[i][0];
+ *val2 = ltr501_ps_gain[i][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ }
+ return -EINVAL;
+}
+
+static int ltr501_get_ps_gain_index(int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ltr501_ps_gain); i++)
+ if (val == ltr501_ps_gain[i][0] && val2 == ltr501_ps_gain[i][1])
+ return i;
+
+ return -1;
+}
+
+static int ltr501_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ltr501_data *data = iio_priv(indio_dev);
+ int i;
+
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ if (val == 0 && val2 == 5000)
+ data->als_contr |= LTR501_CONTR_ALS_GAIN_MASK;
+ else if (val == 1 && val2 == 0)
+ data->als_contr &= ~LTR501_CONTR_ALS_GAIN_MASK;
+ else
+ return -EINVAL;
+ return i2c_smbus_write_byte_data(data->client,
+ LTR501_ALS_CONTR, data->als_contr);
+ case IIO_PROXIMITY:
+ i = ltr501_get_ps_gain_index(val, val2);
+ if (i < 0)
+ return -EINVAL;
+ data->ps_contr &= ~LTR501_CONTR_PS_GAIN_MASK;
+ data->ps_contr |= i << LTR501_CONTR_PS_GAIN_SHIFT;
+ return i2c_smbus_write_byte_data(data->client,
+ LTR501_PS_CONTR, data->ps_contr);
+ default:
+ return -EINVAL;
+ }
+ }
+ return -EINVAL;
+}
+
+static IIO_CONST_ATTR(in_proximity_scale_available, "1 0.25 0.125 0.0625");
+static IIO_CONST_ATTR(in_intensity_scale_available, "1 0.005");
+
+static struct attribute *ltr501_attributes[] = {
+ &iio_const_attr_in_proximity_scale_available.dev_attr.attr,
+ &iio_const_attr_in_intensity_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ltr501_attribute_group = {
+ .attrs = ltr501_attributes,
+};
+
+static const struct iio_info ltr501_info = {
+ .read_raw = ltr501_read_raw,
+ .write_raw = ltr501_write_raw,
+ .attrs = &ltr501_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int ltr501_write_contr(struct i2c_client *client, u8 als_val, u8 ps_val)
+{
+ int ret = i2c_smbus_write_byte_data(client, LTR501_ALS_CONTR, als_val);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(client, LTR501_PS_CONTR, ps_val);
+}
+
+static irqreturn_t ltr501_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ltr501_data *data = iio_priv(indio_dev);
+ u16 buf[8];
+ __le16 als_buf[2];
+ u8 mask = 0;
+ int j = 0;
+ int ret;
+
+ memset(buf, 0, sizeof(buf));
+
+ /* figure out which data needs to be ready */
+ if (test_bit(0, indio_dev->active_scan_mask) ||
+ test_bit(1, indio_dev->active_scan_mask))
+ mask |= LTR501_STATUS_ALS_RDY;
+ if (test_bit(2, indio_dev->active_scan_mask))
+ mask |= LTR501_STATUS_PS_RDY;
+
+ ret = ltr501_drdy(data, mask);
+ if (ret < 0)
+ goto done;
+
+ if (mask & LTR501_STATUS_ALS_RDY) {
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ LTR501_ALS_DATA1, sizeof(als_buf), (u8 *) als_buf);
+ if (ret < 0)
+ return ret;
+ if (test_bit(0, indio_dev->active_scan_mask))
+ buf[j++] = le16_to_cpu(als_buf[1]);
+ if (test_bit(1, indio_dev->active_scan_mask))
+ buf[j++] = le16_to_cpu(als_buf[0]);
+ }
+
+ if (mask & LTR501_STATUS_PS_RDY) {
+ ret = i2c_smbus_read_word_data(data->client, LTR501_PS_DATA);
+ if (ret < 0)
+ goto done;
+ buf[j++] = ret & LTR501_PS_DATA_MASK;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ltr501_init(struct ltr501_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, LTR501_ALS_CONTR);
+ if (ret < 0)
+ return ret;
+ data->als_contr = ret | LTR501_CONTR_ACTIVE;
+
+ ret = i2c_smbus_read_byte_data(data->client, LTR501_PS_CONTR);
+ if (ret < 0)
+ return ret;
+ data->ps_contr = ret | LTR501_CONTR_ACTIVE;
+
+ return ltr501_write_contr(data->client, data->als_contr,
+ data->ps_contr);
+}
+
+static int ltr501_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ltr501_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock_als);
+ mutex_init(&data->lock_ps);
+
+ ret = i2c_smbus_read_byte_data(data->client, LTR501_PART_ID);
+ if (ret < 0)
+ return ret;
+ if ((ret >> 4) != 0x8)
+ return -ENODEV;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ltr501_info;
+ indio_dev->channels = ltr501_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ltr501_channels);
+ indio_dev->name = LTR501_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = ltr501_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ltr501_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_buffer;
+
+ return 0;
+
+error_unreg_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int ltr501_powerdown(struct ltr501_data *data)
+{
+ return ltr501_write_contr(data->client,
+ data->als_contr & ~LTR501_CONTR_ACTIVE,
+ data->ps_contr & ~LTR501_CONTR_ACTIVE);
+}
+
+static int ltr501_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ ltr501_powerdown(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ltr501_suspend(struct device *dev)
+{
+ struct ltr501_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ return ltr501_powerdown(data);
+}
+
+static int ltr501_resume(struct device *dev)
+{
+ struct ltr501_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return ltr501_write_contr(data->client, data->als_contr,
+ data->ps_contr);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ltr501_pm_ops, ltr501_suspend, ltr501_resume);
+
+static const struct i2c_device_id ltr501_id[] = {
+ { "ltr501", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ltr501_id);
+
+static struct i2c_driver ltr501_driver = {
+ .driver = {
+ .name = LTR501_DRV_NAME,
+ .pm = &ltr501_pm_ops,
+ .owner = THIS_MODULE,
+ },
+ .probe = ltr501_probe,
+ .remove = ltr501_remove,
+ .id_table = ltr501_id,
+};
+
+module_i2c_driver(ltr501_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Lite-On LTR501 ambient light and proximity sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 887fecf1f9bb..fe063a0a21cd 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -179,7 +179,6 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct tcs3472_data *data = iio_priv(indio_dev);
- int len = 0;
int i, j = 0;
int ret = tcs3472_req_data(data);
@@ -194,7 +193,6 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
goto done;
data->buffer[j++] = ret;
- len += 2;
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 05423543f89d..74866d1efd1b 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -513,6 +513,7 @@ static int ak8975_probe(struct i2c_client *client,
indio_dev->channels = ak8975_channels;
indio_dev->num_channels = ARRAY_SIZE(ak8975_channels);
indio_dev->info = &ak8975_info;
+ indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
err = iio_device_register(indio_dev);
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index f66955fb3509..8b77782474d7 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -183,9 +183,17 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = 1000;
- return IIO_VAL_INT_PLUS_MICRO;
+ switch (chan->type) {
+ case IIO_MAGN:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 1000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
i = data->ctrl_reg1 >> MAG3110_CTRL_DR_SHIFT;
*val = mag3110_samp_freq[i][0];
@@ -270,7 +278,8 @@ static const struct iio_chan_spec mag3110_channels[] = {
MAG3110_CHANNEL(Z, 2),
{
.type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
.scan_index = 3,
.scan_type = {
.sign = 's',
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index a8b9cae5c173..d88ff17fedb2 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -5,6 +5,20 @@
menu "Pressure sensors"
+config HID_SENSOR_PRESS
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID PRESS"
+ help
+ Say yes here to build support for the HID SENSOR
+ Pressure driver
+
+ To compile this driver as a module, choose M here: the module
+ will be called hid-sensor-press.
+
config MPL3115
tristate "Freescale MPL3115A2 pressure sensor driver"
depends on I2C
@@ -26,7 +40,7 @@ config IIO_ST_PRESS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics pressure
- sensors: LPS001WP, LPS331AP.
+ sensors: LPS001WP, LPS25H, LPS331AP.
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 42bb9fcf5436..4a57bf65b04b 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -3,6 +3,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
obj-$(CONFIG_MPL3115) += mpl3115.o
obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o
st_pressure-y := st_pressure_core.o
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
new file mode 100644
index 000000000000..e0e6409aa94e
--- /dev/null
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -0,0 +1,376 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+#define CHANNEL_SCAN_INDEX_PRESSURE 0
+
+struct press_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info press_attr;
+ u32 press_data;
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec press_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .modified = 1,
+ .channel2 = IIO_NO_MOD,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_PRESSURE,
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void press_adjust_channel_bit_mask(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ channels[channel].scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ channels[channel].scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int press_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct press_state *press_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret;
+ int ret_type;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->scan_index) {
+ case CHANNEL_SCAN_INDEX_PRESSURE:
+ report_id = press_state->press_attr.report_id;
+ address =
+ HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE;
+ break;
+ default:
+ report_id = -1;
+ break;
+ }
+ if (report_id >= 0)
+ *val = sensor_hub_input_attr_get_raw_value(
+ press_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_PRESSURE, address,
+ report_id);
+ else {
+ *val = 0;
+ return -EINVAL;
+ }
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = press_state->press_attr.units;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = hid_sensor_convert_exponent(
+ press_state->press_attr.unit_expo);
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_read_samp_freq_value(
+ &press_state->common_attributes, val, val2);
+ ret_type = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_read_raw_hyst_value(
+ &press_state->common_attributes, val, val2);
+ ret_type = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int press_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct press_state *press_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &press_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &press_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info press_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &press_read_raw,
+ .write_raw = &press_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int press_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct press_state *press_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "press_proc_event [%d]\n",
+ press_state->common_attributes.data_ready);
+ if (press_state->common_attributes.data_ready)
+ hid_sensor_push_data(indio_dev,
+ &press_state->press_data,
+ sizeof(press_state->press_data));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int press_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct press_state *press_state = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE:
+ press_state->press_data = *(u32 *)raw_data;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int press_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct press_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE,
+ &st->press_attr);
+ if (ret < 0)
+ return ret;
+ press_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_PRESSURE,
+ st->press_attr.size);
+
+ dev_dbg(&pdev->dev, "press %x:%x\n", st->press_attr.index,
+ st->press_attr.report_id);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ATMOSPHERIC_PRESSURE,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_press_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ static const char *name = "press";
+ struct iio_dev *indio_dev;
+ struct press_state *press_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct press_state));
+ if (!indio_dev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, indio_dev);
+
+ press_state = iio_priv(indio_dev);
+ press_state->common_attributes.hsdev = hsdev;
+ press_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_PRESSURE,
+ &press_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = kmemdup(press_channels, sizeof(press_channels), GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = press_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_PRESSURE, press_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ goto error_free_dev_mem;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels =
+ ARRAY_SIZE(press_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &press_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ goto error_free_dev_mem;
+ }
+ press_state->common_attributes.data_ready = false;
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &press_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ press_state->callbacks.send_event = press_proc_event;
+ press_state->callbacks.capture_sample = press_capture_sample;
+ press_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_PRESSURE,
+ &press_state->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return ret;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&press_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_dev_mem:
+ kfree(indio_dev->channels);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_press_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct press_state *press_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PRESSURE);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&press_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_device_id hid_press_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200031",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_press_ids);
+
+static struct platform_driver hid_press_platform_driver = {
+ .id_table = hid_press_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = hid_press_probe,
+ .remove = hid_press_remove,
+};
+module_platform_driver(hid_press_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Pressure");
+MODULE_AUTHOR("Archana Patni <archana.patni@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index ac8c8ab723e5..ba6d0c520e63 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -77,7 +77,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct mpl3115_data *data = iio_priv(indio_dev);
- s32 tmp = 0;
+ __be32 tmp = 0;
int ret;
switch (mask) {
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index 049c21acf1f0..242943c0c4e4 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -15,6 +15,7 @@
#include <linux/iio/common/st_sensors.h>
#define LPS001WP_PRESS_DEV_NAME "lps001wp"
+#define LPS25H_PRESS_DEV_NAME "lps25h"
#define LPS331AP_PRESS_DEV_NAME "lps331ap"
/**
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 58083f9d51c5..7418768ed49c 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -40,6 +40,9 @@
/* FULLSCALE */
#define ST_PRESS_FS_AVL_1260MB 1260
+#define ST_PRESS_1_OUT_XL_ADDR 0x28
+#define ST_TEMP_1_OUT_L_ADDR 0x2b
+
/* CUSTOM VALUES FOR LPS331AP SENSOR */
#define ST_PRESS_LPS331AP_WAI_EXP 0xbb
#define ST_PRESS_LPS331AP_ODR_ADDR 0x20
@@ -62,8 +65,6 @@
#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
-#define ST_PRESS_LPS331AP_OUT_XL_ADDR 0x28
-#define ST_TEMP_LPS331AP_OUT_L_ADDR 0x2b
/* CUSTOM VALUES FOR LPS001WP SENSOR */
#define ST_PRESS_LPS001WP_WAI_EXP 0xba
@@ -80,11 +81,36 @@
#define ST_PRESS_LPS001WP_OUT_L_ADDR 0x28
#define ST_TEMP_LPS001WP_OUT_L_ADDR 0x2a
-static const struct iio_chan_spec st_press_lps331ap_channels[] = {
+/* CUSTOM VALUES FOR LPS25H SENSOR */
+#define ST_PRESS_LPS25H_WAI_EXP 0xbd
+#define ST_PRESS_LPS25H_ODR_ADDR 0x20
+#define ST_PRESS_LPS25H_ODR_MASK 0x70
+#define ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL 0x01
+#define ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL 0x02
+#define ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL 0x03
+#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
+#define ST_PRESS_LPS25H_PW_ADDR 0x20
+#define ST_PRESS_LPS25H_PW_MASK 0x80
+#define ST_PRESS_LPS25H_FS_ADDR 0x00
+#define ST_PRESS_LPS25H_FS_MASK 0x00
+#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00
+#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
+#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
+#define ST_PRESS_LPS25H_BDU_ADDR 0x20
+#define ST_PRESS_LPS25H_BDU_MASK 0x04
+#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
+#define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01
+#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
+#define ST_PRESS_LPS25H_MULTIREAD_BIT true
+#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
+#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
+#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
+
+static const struct iio_chan_spec st_press_1_channels[] = {
{
.type = IIO_PRESSURE,
.channel2 = IIO_NO_MOD,
- .address = ST_PRESS_LPS331AP_OUT_XL_ADDR,
+ .address = ST_PRESS_1_OUT_XL_ADDR,
.scan_index = ST_SENSORS_SCAN_X,
.scan_type = {
.sign = 'u',
@@ -99,7 +125,7 @@ static const struct iio_chan_spec st_press_lps331ap_channels[] = {
{
.type = IIO_TEMP,
.channel2 = IIO_NO_MOD,
- .address = ST_TEMP_LPS331AP_OUT_L_ADDR,
+ .address = ST_TEMP_1_OUT_L_ADDR,
.scan_index = -1,
.scan_type = {
.sign = 'u',
@@ -156,8 +182,8 @@ static const struct st_sensors st_press_sensors[] = {
.sensors_supported = {
[0] = LPS331AP_PRESS_DEV_NAME,
},
- .ch = (struct iio_chan_spec *)st_press_lps331ap_channels,
- .num_ch = ARRAY_SIZE(st_press_lps331ap_channels),
+ .ch = (struct iio_chan_spec *)st_press_1_channels,
+ .num_ch = ARRAY_SIZE(st_press_1_channels),
.odr = {
.addr = ST_PRESS_LPS331AP_ODR_ADDR,
.mask = ST_PRESS_LPS331AP_ODR_MASK,
@@ -233,6 +259,53 @@ static const struct st_sensors st_press_sensors[] = {
.multi_read_bit = ST_PRESS_LPS001WP_MULTIREAD_BIT,
.bootime = 2,
},
+ {
+ .wai = ST_PRESS_LPS25H_WAI_EXP,
+ .sensors_supported = {
+ [0] = LPS25H_PRESS_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_press_1_channels,
+ .num_ch = ARRAY_SIZE(st_press_1_channels),
+ .odr = {
+ .addr = ST_PRESS_LPS25H_ODR_ADDR,
+ .mask = ST_PRESS_LPS25H_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL, },
+ { 7, ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL, },
+ { 13, ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL, },
+ { 25, ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_PRESS_LPS25H_PW_ADDR,
+ .mask = ST_PRESS_LPS25H_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .addr = ST_PRESS_LPS25H_FS_ADDR,
+ .mask = ST_PRESS_LPS25H_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_PRESS_FS_AVL_1260MB,
+ .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
+ .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
+ .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_PRESS_LPS25H_BDU_ADDR,
+ .mask = ST_PRESS_LPS25H_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_PRESS_LPS25H_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
+ },
+ .multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
+ .bootime = 2,
+ },
};
static int st_press_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 51eab7fcb194..3cd73e39b840 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -50,6 +50,7 @@ static int st_press_i2c_remove(struct i2c_client *client)
static const struct i2c_device_id st_press_id_table[] = {
{ LPS001WP_PRESS_DEV_NAME },
+ { LPS25H_PRESS_DEV_NAME },
{ LPS331AP_PRESS_DEV_NAME },
{},
};
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 27322af6d665..f45d430ec529 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -49,6 +49,7 @@ static int st_press_spi_remove(struct spi_device *spi)
static const struct spi_device_id st_press_id_table[] = {
{ LPS001WP_PRESS_DEV_NAME },
+ { LPS25H_PRESS_DEV_NAME },
{ LPS331AP_PRESS_DEV_NAME },
{},
};
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 0601b9daf840..c3239170d8b7 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -349,23 +349,6 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh, &av->ah_attr);
}
-int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac)
-{
- struct cm_id_private *cm_id_priv;
-
- cm_id_priv = container_of(id, struct cm_id_private, id);
-
- if (smac != NULL)
- memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac));
-
- if (alt_smac != NULL)
- memcpy(cm_id_priv->alt_av.smac, alt_smac,
- sizeof(cm_id_priv->alt_av.smac));
-
- return 0;
-}
-EXPORT_SYMBOL(ib_update_cm_av);
-
static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
{
struct cm_device *cm_dev;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 199958d9ddc8..42c3058e6e9c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1284,15 +1284,6 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event;
int offset, ret;
- u8 smac[ETH_ALEN];
- u8 alt_smac[ETH_ALEN];
- u8 *psmac = smac;
- u8 *palt_smac = alt_smac;
- int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) ==
- RDMA_TRANSPORT_IB) &&
- (rdma_port_get_link_layer(cm_id->device,
- ib_event->param.req_rcvd.port) ==
- IB_LINK_LAYER_ETHERNET));
listen_id = cm_id->context;
if (!cma_check_req_qp_type(&listen_id->id, ib_event))
@@ -1336,28 +1327,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
ret = conn_id->id.event_handler(&conn_id->id, &event);
if (ret)
goto err3;
-
- if (is_iboe) {
- if (ib_event->param.req_rcvd.primary_path != NULL)
- rdma_addr_find_smac_by_sgid(
- &ib_event->param.req_rcvd.primary_path->sgid,
- psmac, NULL);
- else
- psmac = NULL;
- if (ib_event->param.req_rcvd.alternate_path != NULL)
- rdma_addr_find_smac_by_sgid(
- &ib_event->param.req_rcvd.alternate_path->sgid,
- palt_smac, NULL);
- else
- palt_smac = NULL;
- }
/*
* Acquire mutex to prevent user executing rdma_destroy_id()
* while we're accessing the cm_id.
*/
mutex_lock(&lock);
- if (is_iboe)
- ib_update_cm_av(cm_id, psmac, palt_smac);
if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
(conn_id->id.qp_type != IB_QPT_UD))
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 4c837e66516b..ab31f136d04b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1022,12 +1022,21 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_send_wr->send_buf.mad,
sge[0].length,
DMA_TO_DEVICE);
+ if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
+ return -ENOMEM;
+
mad_send_wr->header_mapping = sge[0].addr;
sge[1].addr = ib_dma_map_single(mad_agent->device,
ib_get_payload(mad_send_wr),
sge[1].length,
DMA_TO_DEVICE);
+ if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
+ ib_dma_unmap_single(mad_agent->device,
+ mad_send_wr->header_mapping,
+ sge[0].length, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
mad_send_wr->payload_mapping = sge[1].addr;
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
@@ -2590,6 +2599,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
sizeof *mad_priv -
sizeof mad_priv->header,
DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
+ sg_list.addr))) {
+ ret = -ENOMEM;
+ break;
+ }
mad_priv->header.mapping = sg_list.addr;
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
mad_priv->header.mad_list.mad_queue = recv_queue;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index a84112322071..a3a2e9c1639b 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -42,29 +42,29 @@
#include "uverbs.h"
-#define IB_UMEM_MAX_PAGE_CHUNK \
- ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
- ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
- (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
- struct ib_umem_chunk *chunk, *tmp;
+ struct scatterlist *sg;
+ struct page *page;
int i;
- list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
- ib_dma_unmap_sg(dev, chunk->page_list,
- chunk->nents, DMA_BIDIRECTIONAL);
- for (i = 0; i < chunk->nents; ++i) {
- struct page *page = sg_page(&chunk->page_list[i]);
+ if (umem->nmap > 0)
+ ib_dma_unmap_sg(dev, umem->sg_head.sgl,
+ umem->nmap,
+ DMA_BIDIRECTIONAL);
- if (umem->writable && dirty)
- set_page_dirty_lock(page);
- put_page(page);
- }
+ for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
- kfree(chunk);
+ page = sg_page(sg);
+ if (umem->writable && dirty)
+ set_page_dirty_lock(page);
+ put_page(page);
}
+
+ sg_free_table(&umem->sg_head);
+ return;
+
}
/**
@@ -81,15 +81,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
struct ib_umem *umem;
struct page **page_list;
struct vm_area_struct **vma_list;
- struct ib_umem_chunk *chunk;
unsigned long locked;
unsigned long lock_limit;
unsigned long cur_base;
unsigned long npages;
int ret;
- int off;
int i;
DEFINE_DMA_ATTRS(attrs);
+ struct scatterlist *sg, *sg_list_start;
+ int need_release = 0;
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
@@ -97,7 +97,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (!can_do_mlock())
return ERR_PTR(-EPERM);
- umem = kmalloc(sizeof *umem, GFP_KERNEL);
+ umem = kzalloc(sizeof *umem, GFP_KERNEL);
if (!umem)
return ERR_PTR(-ENOMEM);
@@ -117,8 +117,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
/* We assume the memory is from hugetlb until proved otherwise */
umem->hugetlb = 1;
- INIT_LIST_HEAD(&umem->chunk_list);
-
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) {
kfree(umem);
@@ -147,7 +145,18 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
cur_base = addr & PAGE_MASK;
- ret = 0;
+ if (npages == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ need_release = 1;
+ sg_list_start = umem->sg_head.sgl;
+
while (npages) {
ret = get_user_pages(current, current->mm, cur_base,
min_t(unsigned long, npages,
@@ -157,54 +166,38 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (ret < 0)
goto out;
+ umem->npages += ret;
cur_base += ret * PAGE_SIZE;
npages -= ret;
- off = 0;
-
- while (ret) {
- chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
- min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK),
- GFP_KERNEL);
- if (!chunk) {
- ret = -ENOMEM;
- goto out;
- }
-
- chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
- sg_init_table(chunk->page_list, chunk->nents);
- for (i = 0; i < chunk->nents; ++i) {
- if (vma_list &&
- !is_vm_hugetlb_page(vma_list[i + off]))
- umem->hugetlb = 0;
- sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
- }
-
- chunk->nmap = ib_dma_map_sg_attrs(context->device,
- &chunk->page_list[0],
- chunk->nents,
- DMA_BIDIRECTIONAL,
- &attrs);
- if (chunk->nmap <= 0) {
- for (i = 0; i < chunk->nents; ++i)
- put_page(sg_page(&chunk->page_list[i]));
- kfree(chunk);
-
- ret = -ENOMEM;
- goto out;
- }
-
- ret -= chunk->nents;
- off += chunk->nents;
- list_add_tail(&chunk->list, &umem->chunk_list);
+ for_each_sg(sg_list_start, sg, ret, i) {
+ if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
+ umem->hugetlb = 0;
+
+ sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
}
- ret = 0;
+ /* preparing for next loop */
+ sg_list_start = sg;
}
+ umem->nmap = ib_dma_map_sg_attrs(context->device,
+ umem->sg_head.sgl,
+ umem->npages,
+ DMA_BIDIRECTIONAL,
+ &attrs);
+
+ if (umem->nmap <= 0) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = 0;
+
out:
if (ret < 0) {
- __ib_umem_release(context->device, umem, 0);
+ if (need_release)
+ __ib_umem_release(context->device, umem, 0);
kfree(umem);
} else
current->mm->pinned_vm = locked;
@@ -278,17 +271,16 @@ EXPORT_SYMBOL(ib_umem_release);
int ib_umem_page_count(struct ib_umem *umem)
{
- struct ib_umem_chunk *chunk;
int shift;
int i;
int n;
+ struct scatterlist *sg;
shift = ilog2(umem->page_size);
n = 0;
- list_for_each_entry(chunk, &umem->chunk_list, list)
- for (i = 0; i < chunk->nmap; ++i)
- n += sg_dma_len(&chunk->page_list[i]) >> shift;
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
+ n += sg_dma_len(sg) >> shift;
return n;
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 3ac795115438..92525f855d82 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1169,6 +1169,45 @@ int ib_dereg_mr(struct ib_mr *mr)
}
EXPORT_SYMBOL(ib_dereg_mr);
+struct ib_mr *ib_create_mr(struct ib_pd *pd,
+ struct ib_mr_init_attr *mr_init_attr)
+{
+ struct ib_mr *mr;
+
+ if (!pd->device->create_mr)
+ return ERR_PTR(-ENOSYS);
+
+ mr = pd->device->create_mr(pd, mr_init_attr);
+
+ if (!IS_ERR(mr)) {
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->uobject = NULL;
+ atomic_inc(&pd->usecnt);
+ atomic_set(&mr->usecnt, 0);
+ }
+
+ return mr;
+}
+EXPORT_SYMBOL(ib_create_mr);
+
+int ib_destroy_mr(struct ib_mr *mr)
+{
+ struct ib_pd *pd;
+ int ret;
+
+ if (atomic_read(&mr->usecnt))
+ return -EBUSY;
+
+ pd = mr->pd;
+ ret = mr->device->destroy_mr(mr);
+ if (!ret)
+ atomic_dec(&pd->usecnt);
+
+ return ret;
+}
+EXPORT_SYMBOL(ib_destroy_mr);
+
struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
{
struct ib_mr *mr;
@@ -1398,3 +1437,11 @@ int ib_destroy_flow(struct ib_flow *flow_id)
return err;
}
EXPORT_SYMBOL(ib_destroy_flow);
+
+int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
+ struct ib_mr_status *mr_status)
+{
+ return mr->device->check_mr_status ?
+ mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
+}
+EXPORT_SYMBOL(ib_check_mr_status);
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 07eb3a8067d8..8af33cf1fc4e 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -431,9 +431,9 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 *pages;
u64 kva = 0;
int shift, n, len;
- int i, j, k;
+ int i, k, entry;
int err = 0;
- struct ib_umem_chunk *chunk;
+ struct scatterlist *sg;
struct c2_pd *c2pd = to_c2pd(pd);
struct c2_mr *c2mr;
@@ -452,10 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
shift = ffs(c2mr->umem->page_size) - 1;
-
- n = 0;
- list_for_each_entry(chunk, &c2mr->umem->chunk_list, list)
- n += chunk->nents;
+ n = c2mr->umem->nmap;
pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
if (!pages) {
@@ -464,14 +461,12 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
i = 0;
- list_for_each_entry(chunk, &c2mr->umem->chunk_list, list) {
- for (j = 0; j < chunk->nmap; ++j) {
- len = sg_dma_len(&chunk->page_list[j]) >> shift;
- for (k = 0; k < len; ++k) {
- pages[i++] =
- sg_dma_address(&chunk->page_list[j]) +
- (c2mr->umem->page_size * k);
- }
+ for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) {
+ len = sg_dma_len(sg) >> shift;
+ for (k = 0; k < len; ++k) {
+ pages[i++] =
+ sg_dma_address(sg) +
+ (c2mr->umem->page_size * k);
}
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index d2283837d451..811b24a539c0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -618,14 +618,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
__be64 *pages;
int shift, n, len;
- int i, j, k;
+ int i, k, entry;
int err = 0;
- struct ib_umem_chunk *chunk;
struct iwch_dev *rhp;
struct iwch_pd *php;
struct iwch_mr *mhp;
struct iwch_reg_user_mr_resp uresp;
-
+ struct scatterlist *sg;
PDBG("%s ib_pd %p\n", __func__, pd);
php = to_iwch_pd(pd);
@@ -645,9 +644,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
shift = ffs(mhp->umem->page_size) - 1;
- n = 0;
- list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
- n += chunk->nents;
+ n = mhp->umem->nmap;
err = iwch_alloc_pbl(mhp, n);
if (err)
@@ -661,12 +658,10 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0;
- list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
- for (j = 0; j < chunk->nmap; ++j) {
- len = sg_dma_len(&chunk->page_list[j]) >> shift;
+ for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
+ len = sg_dma_len(sg) >> shift;
for (k = 0; k < len; ++k) {
- pages[i++] = cpu_to_be64(sg_dma_address(
- &chunk->page_list[j]) +
+ pages[i++] = cpu_to_be64(sg_dma_address(sg) +
mhp->umem->page_size * k);
if (i == PAGE_SIZE / sizeof *pages) {
err = iwch_write_pbl(mhp, pages, i, n);
@@ -676,7 +671,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = 0;
}
}
- }
+ }
if (i)
err = iwch_write_pbl(mhp, pages, i, n);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index d286bdebe2ab..02436d5d0dab 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -98,9 +98,9 @@ int c4iw_debug;
module_param(c4iw_debug, int, 0644);
MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
-static int peer2peer;
+static int peer2peer = 1;
module_param(peer2peer, int, 0644);
-MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
+MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
module_param(p2p_type, int, 0644);
@@ -400,7 +400,8 @@ static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
n = dst_neigh_lookup(&rt->dst, &peer_ip);
if (!n)
return NULL;
- if (!our_interface(dev, n->dev)) {
+ if (!our_interface(dev, n->dev) &&
+ !(n->dev->flags & IFF_LOOPBACK)) {
dst_release(&rt->dst);
return NULL;
}
@@ -759,8 +760,9 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
ep->mpa_skb = skb;
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
start_ep_timer(ep);
- state_set(&ep->com, MPA_REQ_SENT);
+ __state_set(&ep->com, MPA_REQ_SENT);
ep->mpa_attr.initiator = 1;
+ ep->snd_seq += mpalen;
return;
}
@@ -840,6 +842,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
BUG_ON(ep->mpa_skb);
ep->mpa_skb = skb;
+ ep->snd_seq += mpalen;
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
@@ -923,7 +926,8 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
skb_get(skb);
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
ep->mpa_skb = skb;
- state_set(&ep->com, MPA_REP_SENT);
+ __state_set(&ep->com, MPA_REP_SENT);
+ ep->snd_seq += mpalen;
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
@@ -940,6 +944,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
+ mutex_lock(&ep->com.mutex);
dst_confirm(ep->dst);
/* setup the hwtid for this connection */
@@ -963,17 +968,18 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
send_mpa_req(ep, skb, 1);
else
send_mpa_req(ep, skb, mpa_rev);
-
+ mutex_unlock(&ep->com.mutex);
return 0;
}
-static void close_complete_upcall(struct c4iw_ep *ep)
+static void close_complete_upcall(struct c4iw_ep *ep, int status)
{
struct iw_cm_event event;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE;
+ event.status = status;
if (ep->com.cm_id) {
PDBG("close complete delivered ep %p cm_id %p tid %u\n",
ep, ep->com.cm_id, ep->hwtid);
@@ -987,7 +993,6 @@ static void close_complete_upcall(struct c4iw_ep *ep)
static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
{
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- close_complete_upcall(ep);
state_set(&ep->com, ABORTING);
set_bit(ABORT_CONN, &ep->com.history);
return send_abort(ep, skb, gfp);
@@ -1066,9 +1071,10 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
}
}
-static void connect_request_upcall(struct c4iw_ep *ep)
+static int connect_request_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
+ int ret;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
memset(&event, 0, sizeof(event));
@@ -1093,15 +1099,14 @@ static void connect_request_upcall(struct c4iw_ep *ep)
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
}
- if (state_read(&ep->parent_ep->com) != DEAD) {
- c4iw_get_ep(&ep->com);
- ep->parent_ep->com.cm_id->event_handler(
- ep->parent_ep->com.cm_id,
- &event);
- }
+ c4iw_get_ep(&ep->com);
+ ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
+ &event);
+ if (ret)
+ c4iw_put_ep(&ep->com);
set_bit(CONNREQ_UPCALL, &ep->com.history);
c4iw_put_ep(&ep->parent_ep->com);
- ep->parent_ep = NULL;
+ return ret;
}
static void established_upcall(struct c4iw_ep *ep)
@@ -1165,7 +1170,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* the connection.
*/
stop_ep_timer(ep);
- if (state_read(&ep->com) != MPA_REQ_SENT)
+ if (ep->com.state != MPA_REQ_SENT)
return;
/*
@@ -1240,7 +1245,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* start reply message including private data. And
* the MPA header is valid.
*/
- state_set(&ep->com, FPDU_MODE);
+ __state_set(&ep->com, FPDU_MODE);
ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
ep->mpa_attr.recv_marker_enabled = markers_enabled;
ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
@@ -1355,7 +1360,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
}
goto out;
err:
- state_set(&ep->com, ABORTING);
+ __state_set(&ep->com, ABORTING);
send_abort(ep, skb, GFP_KERNEL);
out:
connect_reply_upcall(ep, err);
@@ -1370,7 +1375,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- if (state_read(&ep->com) != MPA_REQ_WAIT)
+ if (ep->com.state != MPA_REQ_WAIT)
return;
/*
@@ -1400,7 +1405,6 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
return;
PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
- stop_ep_timer(ep);
mpa = (struct mpa_message *) ep->mpa_pkt;
/*
@@ -1492,10 +1496,18 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
ep->mpa_attr.p2p_type);
- state_set(&ep->com, MPA_REQ_RCVD);
+ __state_set(&ep->com, MPA_REQ_RCVD);
+ stop_ep_timer(ep);
/* drive upcall */
- connect_request_upcall(ep);
+ mutex_lock(&ep->parent_ep->com.mutex);
+ if (ep->parent_ep->com.state != DEAD) {
+ if (connect_request_upcall(ep))
+ abort_connection(ep, skb, GFP_KERNEL);
+ } else {
+ abort_connection(ep, skb, GFP_KERNEL);
+ }
+ mutex_unlock(&ep->parent_ep->com.mutex);
return;
}
@@ -1509,14 +1521,17 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
__u8 status = hdr->status;
ep = lookup_tid(t, tid);
+ if (!ep)
+ return 0;
PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
skb_pull(skb, sizeof(*hdr));
skb_trim(skb, dlen);
+ mutex_lock(&ep->com.mutex);
/* update RX credits */
update_rx_credits(ep, dlen);
- switch (state_read(&ep->com)) {
+ switch (ep->com.state) {
case MPA_REQ_SENT:
ep->rcv_seq += dlen;
process_mpa_reply(ep, skb);
@@ -1532,7 +1547,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
pr_err("%s Unexpected streaming data." \
" qpid %u ep %p state %d tid %u status %d\n",
__func__, ep->com.qp->wq.sq.qid, ep,
- state_read(&ep->com), ep->hwtid, status);
+ ep->com.state, ep->hwtid, status);
attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
@@ -1541,6 +1556,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
default:
break;
}
+ mutex_unlock(&ep->com.mutex);
return 0;
}
@@ -1647,6 +1663,15 @@ static inline int act_open_has_tid(int status)
status != CPL_ERR_ARP_MISS;
}
+/* Returns whether a CPL status conveys negative advice.
+ */
+static int is_neg_adv(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE ||
+ status == CPL_ERR_KEEPALV_NEG_ADVICE;
+}
+
#define ACT_OPEN_RETRY_COUNT 2
static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
@@ -1835,7 +1860,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
status, status2errno(status));
- if (status == CPL_ERR_RTX_NEG_ADVICE) {
+ if (is_neg_adv(status)) {
printk(KERN_WARNING MOD "Connection problems for atid %u\n",
atid);
return 0;
@@ -2246,7 +2271,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
}
- close_complete_upcall(ep);
+ close_complete_upcall(ep, 0);
__state_set(&ep->com, DEAD);
release = 1;
disconnect = 0;
@@ -2265,15 +2290,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
-/*
- * Returns whether an ABORT_REQ_RSS message is a negative advice.
- */
-static int is_neg_adv_abort(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE;
-}
-
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
@@ -2287,7 +2303,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int tid = GET_TID(req);
ep = lookup_tid(t, tid);
- if (is_neg_adv_abort(req->status)) {
+ if (is_neg_adv(req->status)) {
PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
ep->hwtid);
return 0;
@@ -2425,7 +2441,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
C4IW_QP_ATTR_NEXT_STATE,
&attrs, 1);
}
- close_complete_upcall(ep);
+ close_complete_upcall(ep, 0);
__state_set(&ep->com, DEAD);
release = 1;
break;
@@ -2500,22 +2516,28 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
- int err;
+ int err = 0;
+ int disconnect = 0;
struct c4iw_ep *ep = to_ep(cm_id);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- if (state_read(&ep->com) == DEAD) {
+ mutex_lock(&ep->com.mutex);
+ if (ep->com.state == DEAD) {
+ mutex_unlock(&ep->com.mutex);
c4iw_put_ep(&ep->com);
return -ECONNRESET;
}
set_bit(ULP_REJECT, &ep->com.history);
- BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
+ BUG_ON(ep->com.state != MPA_REQ_RCVD);
if (mpa_rev == 0)
abort_connection(ep, NULL, GFP_KERNEL);
else {
err = send_mpa_reject(ep, pdata, pdata_len);
- err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+ disconnect = 1;
}
+ mutex_unlock(&ep->com.mutex);
+ if (disconnect)
+ err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
c4iw_put_ep(&ep->com);
return 0;
}
@@ -2530,12 +2552,14 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- if (state_read(&ep->com) == DEAD) {
+
+ mutex_lock(&ep->com.mutex);
+ if (ep->com.state == DEAD) {
err = -ECONNRESET;
goto err;
}
- BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
+ BUG_ON(ep->com.state != MPA_REQ_RCVD);
BUG_ON(!qp);
set_bit(ULP_ACCEPT, &ep->com.history);
@@ -2604,14 +2628,16 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (err)
goto err1;
- state_set(&ep->com, FPDU_MODE);
+ __state_set(&ep->com, FPDU_MODE);
established_upcall(ep);
+ mutex_unlock(&ep->com.mutex);
c4iw_put_ep(&ep->com);
return 0;
err1:
ep->com.cm_id = NULL;
cm_id->rem_ref(cm_id);
err:
+ mutex_unlock(&ep->com.mutex);
c4iw_put_ep(&ep->com);
return err;
}
@@ -2980,7 +3006,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
rdev = &ep->com.dev->rdev;
if (c4iw_fatal_error(rdev)) {
fatal = 1;
- close_complete_upcall(ep);
+ close_complete_upcall(ep, -EIO);
ep->com.state = DEAD;
}
switch (ep->com.state) {
@@ -3022,7 +3048,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (close) {
if (abrupt) {
set_bit(EP_DISC_ABORT, &ep->com.history);
- close_complete_upcall(ep);
+ close_complete_upcall(ep, -ECONNRESET);
ret = send_abort(ep, NULL, gfp);
} else {
set_bit(EP_DISC_CLOSE, &ep->com.history);
@@ -3203,6 +3229,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
struct sk_buff *req_skb;
struct fw_ofld_connection_wr *req;
struct cpl_pass_accept_req *cpl = cplhdr(skb);
+ int ret;
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
@@ -3239,7 +3266,13 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req->cookie = (unsigned long)skb;
set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
- cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
+ ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
+ if (ret < 0) {
+ pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
+ ret);
+ kfree_skb(skb);
+ kfree_skb(req_skb);
+ }
}
/*
@@ -3346,13 +3379,13 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
pi = (struct port_info *)netdev_priv(pdev);
tx_chan = cxgb4_port_chan(pdev);
}
+ neigh_release(neigh);
if (!e) {
pr_err("%s - failed to allocate l2t entry!\n",
__func__);
goto free_dst;
}
- neigh_release(neigh);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
window = (__force u16) htons((__force u16)tcph->window);
@@ -3427,6 +3460,7 @@ static void process_timeout(struct c4iw_ep *ep)
&attrs, 1);
}
__state_set(&ep->com, ABORTING);
+ close_complete_upcall(ep, -ETIMEDOUT);
break;
default:
WARN(1, "%s unexpected state ep %p tid %u state %u\n",
@@ -3570,7 +3604,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
kfree_skb(skb);
return 0;
}
- if (is_neg_adv_abort(req->status)) {
+ if (is_neg_adv(req->status)) {
PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
ep->hwtid);
kfree_skb(skb);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 88de3aa9c5b0..ce468e542428 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -365,8 +365,14 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
- /*
- * drop peer2peer RTR reads.
+ /* If we have reached here because of async
+ * event or other error, and have egress error
+ * then drop
+ */
+ if (CQE_TYPE(hw_cqe) == 1)
+ goto next_cqe;
+
+ /* drop peer2peer RTR reads.
*/
if (CQE_WRID_STAG(hw_cqe) == 1)
goto next_cqe;
@@ -511,8 +517,18 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
*/
if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
- /*
- * If this is an unsolicited read response, then the read
+ /* If we have reached here because of async
+ * event or other error, and have egress error
+ * then drop
+ */
+ if (CQE_TYPE(hw_cqe) == 1) {
+ if (CQE_STATUS(hw_cqe))
+ t4_set_wq_in_error(wq);
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+
+ /* If this is an unsolicited read response, then the read
* was generated by the kernel driver as part of peer-2-peer
* connection setup. So ignore the completion.
*/
@@ -603,7 +619,7 @@ proc_cqe:
*/
if (SQ_TYPE(hw_cqe)) {
int idx = CQE_WRID_SQ_IDX(hw_cqe);
- BUG_ON(idx > wq->sq.size);
+ BUG_ON(idx >= wq->sq.size);
/*
* Account for any unsignaled completions completed by
@@ -617,7 +633,7 @@ proc_cqe:
wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
else
wq->sq.in_use -= idx - wq->sq.cidx;
- BUG_ON(wq->sq.in_use < 0 && wq->sq.in_use < wq->sq.size);
+ BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
wq->sq.cidx = (uint16_t)idx;
PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
@@ -881,7 +897,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
/*
* Make actual HW queue 2x to avoid cdix_inc overflows.
*/
- hwentries = entries * 2;
+ hwentries = min(entries * 2, T4_MAX_IQ_SIZE);
/*
* Make HW queue at least 64 entries so GTS updates aren't too
@@ -930,6 +946,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
if (!mm2)
goto err4;
+ memset(&uresp, 0, sizeof(uresp));
uresp.qid_mask = rhp->rdev.cqmask;
uresp.cqid = chp->cq.cqid;
uresp.size = chp->cq.size;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 4a033853312e..9489a388376c 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -64,6 +64,10 @@ struct uld_ctx {
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
+#define DB_FC_RESUME_SIZE 64
+#define DB_FC_RESUME_DELAY 1
+#define DB_FC_DRAIN_THRESH 0
+
static struct dentry *c4iw_debugfs_root;
struct c4iw_debugfs_data {
@@ -282,7 +286,7 @@ static const struct file_operations stag_debugfs_fops = {
.llseek = default_llseek,
};
-static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"};
+static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
static int stats_show(struct seq_file *seq, void *v)
{
@@ -311,9 +315,10 @@ static int stats_show(struct seq_file *seq, void *v)
seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
- seq_printf(seq, " DB State: %s Transitions %llu\n",
+ seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
db_state_str[dev->db_state],
- dev->rdev.stats.db_state_transitions);
+ dev->rdev.stats.db_state_transitions,
+ dev->rdev.stats.db_fc_interruptions);
seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
dev->rdev.stats.act_ofld_conn_fails);
@@ -643,6 +648,12 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
goto err4;
}
+ rdev->status_page = (struct t4_dev_status_page *)
+ __get_free_page(GFP_KERNEL);
+ if (!rdev->status_page) {
+ pr_err(MOD "error allocating status page\n");
+ goto err4;
+ }
return 0;
err4:
c4iw_rqtpool_destroy(rdev);
@@ -656,6 +667,7 @@ err1:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
+ free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
c4iw_destroy_resource(&rdev->resource);
@@ -703,18 +715,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
pr_info("%s: On-Chip Queues not supported on this device.\n",
pci_name(infop->pdev));
- if (!is_t4(infop->adapter_type)) {
- if (!allow_db_fc_on_t5) {
- db_fc_threshold = 100000;
- pr_info("DB Flow Control Disabled.\n");
- }
-
- if (!allow_db_coalescing_on_t5) {
- db_coalescing_threshold = -1;
- pr_info("DB Coalescing Disabled.\n");
- }
- }
-
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
if (!devp) {
printk(KERN_ERR MOD "Cannot allocate ib device\n");
@@ -749,6 +749,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
spin_lock_init(&devp->lock);
mutex_init(&devp->rdev.stats.lock);
mutex_init(&devp->db_mutex);
+ INIT_LIST_HEAD(&devp->db_fc_list);
if (c4iw_debugfs_root) {
devp->debugfs_root = debugfs_create_dir(
@@ -897,11 +898,13 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
}
opcode = *(u8 *)rsp;
- if (c4iw_handlers[opcode])
+ if (c4iw_handlers[opcode]) {
c4iw_handlers[opcode](dev, skb);
- else
+ } else {
pr_info("%s no handler opcode 0x%x...\n", __func__,
opcode);
+ kfree_skb(skb);
+ }
return 0;
nomem:
@@ -977,13 +980,16 @@ static int disable_qp_db(int id, void *p, void *data)
static void stop_queues(struct uld_ctx *ctx)
{
- spin_lock_irq(&ctx->dev->lock);
- if (ctx->dev->db_state == NORMAL) {
- ctx->dev->rdev.stats.db_state_transitions++;
- ctx->dev->db_state = FLOW_CONTROL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->dev->lock, flags);
+ ctx->dev->rdev.stats.db_state_transitions++;
+ ctx->dev->db_state = STOPPED;
+ if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
- }
- spin_unlock_irq(&ctx->dev->lock);
+ else
+ ctx->dev->rdev.status_page->db_off = 1;
+ spin_unlock_irqrestore(&ctx->dev->lock, flags);
}
static int enable_qp_db(int id, void *p, void *data)
@@ -994,15 +1000,70 @@ static int enable_qp_db(int id, void *p, void *data)
return 0;
}
+static void resume_rc_qp(struct c4iw_qp *qp)
+{
+ spin_lock(&qp->lock);
+ t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc);
+ qp->wq.sq.wq_pidx_inc = 0;
+ t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc);
+ qp->wq.rq.wq_pidx_inc = 0;
+ spin_unlock(&qp->lock);
+}
+
+static void resume_a_chunk(struct uld_ctx *ctx)
+{
+ int i;
+ struct c4iw_qp *qp;
+
+ for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
+ qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
+ db_fc_entry);
+ list_del_init(&qp->db_fc_entry);
+ resume_rc_qp(qp);
+ if (list_empty(&ctx->dev->db_fc_list))
+ break;
+ }
+}
+
static void resume_queues(struct uld_ctx *ctx)
{
spin_lock_irq(&ctx->dev->lock);
- if (ctx->dev->qpcnt <= db_fc_threshold &&
- ctx->dev->db_state == FLOW_CONTROL) {
- ctx->dev->db_state = NORMAL;
- ctx->dev->rdev.stats.db_state_transitions++;
- idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
+ if (ctx->dev->db_state != STOPPED)
+ goto out;
+ ctx->dev->db_state = FLOW_CONTROL;
+ while (1) {
+ if (list_empty(&ctx->dev->db_fc_list)) {
+ WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
+ ctx->dev->db_state = NORMAL;
+ ctx->dev->rdev.stats.db_state_transitions++;
+ if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
+ idr_for_each(&ctx->dev->qpidr, enable_qp_db,
+ NULL);
+ } else {
+ ctx->dev->rdev.status_page->db_off = 0;
+ }
+ break;
+ } else {
+ if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
+ < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
+ DB_FC_DRAIN_THRESH)) {
+ resume_a_chunk(ctx);
+ }
+ if (!list_empty(&ctx->dev->db_fc_list)) {
+ spin_unlock_irq(&ctx->dev->lock);
+ if (DB_FC_RESUME_DELAY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(DB_FC_RESUME_DELAY);
+ }
+ spin_lock_irq(&ctx->dev->lock);
+ if (ctx->dev->db_state != FLOW_CONTROL)
+ break;
+ }
+ }
}
+out:
+ if (ctx->dev->db_state != NORMAL)
+ ctx->dev->rdev.stats.db_fc_interruptions++;
spin_unlock_irq(&ctx->dev->lock);
}
@@ -1028,12 +1089,12 @@ static int count_qps(int id, void *p, void *data)
return 0;
}
-static void deref_qps(struct qp_list qp_list)
+static void deref_qps(struct qp_list *qp_list)
{
int idx;
- for (idx = 0; idx < qp_list.idx; idx++)
- c4iw_qp_rem_ref(&qp_list.qps[idx]->ibqp);
+ for (idx = 0; idx < qp_list->idx; idx++)
+ c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
}
static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
@@ -1044,17 +1105,22 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
for (idx = 0; idx < qp_list->idx; idx++) {
struct c4iw_qp *qp = qp_list->qps[idx];
+ spin_lock_irq(&qp->rhp->lock);
+ spin_lock(&qp->lock);
ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
qp->wq.sq.qid,
t4_sq_host_wq_pidx(&qp->wq),
t4_sq_wq_size(&qp->wq));
if (ret) {
- printk(KERN_ERR MOD "%s: Fatal error - "
+ pr_err(KERN_ERR MOD "%s: Fatal error - "
"DB overflow recovery failed - "
"error syncing SQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
+ spin_unlock(&qp->lock);
+ spin_unlock_irq(&qp->rhp->lock);
return;
}
+ qp->wq.sq.wq_pidx_inc = 0;
ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
qp->wq.rq.qid,
@@ -1062,12 +1128,17 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
t4_rq_wq_size(&qp->wq));
if (ret) {
- printk(KERN_ERR MOD "%s: Fatal error - "
+ pr_err(KERN_ERR MOD "%s: Fatal error - "
"DB overflow recovery failed - "
"error syncing RQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
+ spin_unlock(&qp->lock);
+ spin_unlock_irq(&qp->rhp->lock);
return;
}
+ qp->wq.rq.wq_pidx_inc = 0;
+ spin_unlock(&qp->lock);
+ spin_unlock_irq(&qp->rhp->lock);
/* Wait for the dbfifo to drain */
while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
@@ -1083,36 +1154,22 @@ static void recover_queues(struct uld_ctx *ctx)
struct qp_list qp_list;
int ret;
- /* lock out kernel db ringers */
- mutex_lock(&ctx->dev->db_mutex);
-
- /* put all queues in to recovery mode */
- spin_lock_irq(&ctx->dev->lock);
- ctx->dev->db_state = RECOVERY;
- ctx->dev->rdev.stats.db_state_transitions++;
- idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
- spin_unlock_irq(&ctx->dev->lock);
-
/* slow everybody down */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(1000));
- /* Wait for the dbfifo to completely drain. */
- while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(usecs_to_jiffies(10));
- }
-
/* flush the SGE contexts */
ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
if (ret) {
printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
pci_name(ctx->lldi.pdev));
- goto out;
+ return;
}
/* Count active queues so we can build a list of queues to recover */
spin_lock_irq(&ctx->dev->lock);
+ WARN_ON(ctx->dev->db_state != STOPPED);
+ ctx->dev->db_state = RECOVERY;
idr_for_each(&ctx->dev->qpidr, count_qps, &count);
qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
@@ -1120,7 +1177,7 @@ static void recover_queues(struct uld_ctx *ctx)
printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
pci_name(ctx->lldi.pdev));
spin_unlock_irq(&ctx->dev->lock);
- goto out;
+ return;
}
qp_list.idx = 0;
@@ -1133,29 +1190,13 @@ static void recover_queues(struct uld_ctx *ctx)
recover_lost_dbs(ctx, &qp_list);
/* we're almost done! deref the qps and clean up */
- deref_qps(qp_list);
+ deref_qps(&qp_list);
kfree(qp_list.qps);
- /* Wait for the dbfifo to completely drain again */
- while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(usecs_to_jiffies(10));
- }
-
- /* resume the queues */
spin_lock_irq(&ctx->dev->lock);
- if (ctx->dev->qpcnt > db_fc_threshold)
- ctx->dev->db_state = FLOW_CONTROL;
- else {
- ctx->dev->db_state = NORMAL;
- idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
- }
- ctx->dev->rdev.stats.db_state_transitions++;
+ WARN_ON(ctx->dev->db_state != RECOVERY);
+ ctx->dev->db_state = STOPPED;
spin_unlock_irq(&ctx->dev->lock);
-
-out:
- /* start up kernel db ringers again */
- mutex_unlock(&ctx->dev->db_mutex);
}
static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
@@ -1165,9 +1206,7 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
switch (control) {
case CXGB4_CONTROL_DB_FULL:
stop_queues(ctx);
- mutex_lock(&ctx->dev->rdev.stats.lock);
ctx->dev->rdev.stats.db_full++;
- mutex_unlock(&ctx->dev->rdev.stats.lock);
break;
case CXGB4_CONTROL_DB_EMPTY:
resume_queues(ctx);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 23eaeabab93b..e872203c5424 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -109,6 +109,7 @@ struct c4iw_dev_ucontext {
enum c4iw_rdev_flags {
T4_FATAL_ERROR = (1<<0),
+ T4_STATUS_PAGE_DISABLED = (1<<1),
};
struct c4iw_stat {
@@ -130,6 +131,7 @@ struct c4iw_stats {
u64 db_empty;
u64 db_drop;
u64 db_state_transitions;
+ u64 db_fc_interruptions;
u64 tcam_full;
u64 act_ofld_conn_fails;
u64 pas_ofld_conn_fails;
@@ -150,6 +152,7 @@ struct c4iw_rdev {
unsigned long oc_mw_pa;
void __iomem *oc_mw_kva;
struct c4iw_stats stats;
+ struct t4_dev_status_page *status_page;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -211,7 +214,8 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
enum db_state {
NORMAL = 0,
FLOW_CONTROL = 1,
- RECOVERY = 2
+ RECOVERY = 2,
+ STOPPED = 3
};
struct c4iw_dev {
@@ -225,10 +229,10 @@ struct c4iw_dev {
struct mutex db_mutex;
struct dentry *debugfs_root;
enum db_state db_state;
- int qpcnt;
struct idr hwtid_idr;
struct idr atid_idr;
struct idr stid_idr;
+ struct list_head db_fc_list;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -369,6 +373,7 @@ struct c4iw_fr_page_list {
DEFINE_DMA_UNMAP_ADDR(mapping);
dma_addr_t dma_addr;
struct c4iw_dev *dev;
+ int pll_len;
};
static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
@@ -432,6 +437,7 @@ struct c4iw_qp_attributes {
struct c4iw_qp {
struct ib_qp ibqp;
+ struct list_head db_fc_entry;
struct c4iw_dev *rhp;
struct c4iw_ep *ep;
struct c4iw_qp_attributes attr;
@@ -441,6 +447,7 @@ struct c4iw_qp {
atomic_t refcnt;
wait_queue_head_t wait;
struct timer_list timer;
+ int sq_sig_all;
};
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 41b11951a30a..f9ca072a99ed 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -37,9 +37,9 @@
#include "iw_cxgb4.h"
-int use_dsgl = 1;
+int use_dsgl = 0;
module_param(use_dsgl, int, 0644);
-MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)");
+MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=0)");
#define T4_ULPTX_MIN_IO 32
#define C4IW_MAX_INLINE_SIZE 96
@@ -678,9 +678,9 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
__be64 *pages;
int shift, n, len;
- int i, j, k;
+ int i, k, entry;
int err = 0;
- struct ib_umem_chunk *chunk;
+ struct scatterlist *sg;
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mr *mhp;
@@ -710,10 +710,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
shift = ffs(mhp->umem->page_size) - 1;
- n = 0;
- list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
- n += chunk->nents;
-
+ n = mhp->umem->nmap;
err = alloc_pbl(mhp, n);
if (err)
goto err;
@@ -726,24 +723,22 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0;
- list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
- for (j = 0; j < chunk->nmap; ++j) {
- len = sg_dma_len(&chunk->page_list[j]) >> shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = cpu_to_be64(sg_dma_address(
- &chunk->page_list[j]) +
- mhp->umem->page_size * k);
- if (i == PAGE_SIZE / sizeof *pages) {
- err = write_pbl(&mhp->rhp->rdev,
- pages,
- mhp->attr.pbl_addr + (n << 3), i);
- if (err)
- goto pbl_done;
- n += i;
- i = 0;
- }
+ for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
+ len = sg_dma_len(sg) >> shift;
+ for (k = 0; k < len; ++k) {
+ pages[i++] = cpu_to_be64(sg_dma_address(sg) +
+ mhp->umem->page_size * k);
+ if (i == PAGE_SIZE / sizeof *pages) {
+ err = write_pbl(&mhp->rhp->rdev,
+ pages,
+ mhp->attr.pbl_addr + (n << 3), i);
+ if (err)
+ goto pbl_done;
+ n += i;
+ i = 0;
}
}
+ }
if (i)
err = write_pbl(&mhp->rhp->rdev, pages,
@@ -903,7 +898,11 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
dma_unmap_addr_set(c4pl, mapping, dma_addr);
c4pl->dma_addr = dma_addr;
c4pl->dev = dev;
- c4pl->ibpl.max_page_list_len = pll_len;
+ c4pl->pll_len = pll_len;
+
+ PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n",
+ __func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list,
+ &c4pl->dma_addr);
return &c4pl->ibpl;
}
@@ -912,8 +911,12 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
{
struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
+ PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n",
+ __func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list,
+ &c4pl->dma_addr);
+
dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
- c4pl->ibpl.max_page_list_len,
+ c4pl->pll_len,
c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
kfree(c4pl);
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 7e94c9a656a1..79429256023a 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -106,15 +106,56 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
{
struct c4iw_ucontext *context;
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
+ static int warned;
+ struct c4iw_alloc_ucontext_resp uresp;
+ int ret = 0;
+ struct c4iw_mm_entry *mm = NULL;
PDBG("%s ibdev %p\n", __func__, ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
+ if (!context) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
+
+ if (udata->outlen < sizeof(uresp)) {
+ if (!warned++)
+ pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
+ rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
+ } else {
+ mm = kmalloc(sizeof(*mm), GFP_KERNEL);
+ if (!mm) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ uresp.status_page_size = PAGE_SIZE;
+
+ spin_lock(&context->mmap_lock);
+ uresp.status_page_key = context->key;
+ context->key += PAGE_SIZE;
+ spin_unlock(&context->mmap_lock);
+
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_mm;
+
+ mm->key = uresp.status_page_key;
+ mm->addr = virt_to_phys(rhp->rdev.status_page);
+ mm->len = PAGE_SIZE;
+ insert_mmap(context, mm);
+ }
return &context->ibucontext;
+err_mm:
+ kfree(mm);
+err_free:
+ kfree(context);
+err:
+ return ERR_PTR(ret);
}
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 582936708e6e..cb76eb5eee1f 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -638,6 +638,46 @@ void c4iw_qp_rem_ref(struct ib_qp *qp)
wake_up(&(to_c4iw_qp(qp)->wait));
}
+static void add_to_fc_list(struct list_head *head, struct list_head *entry)
+{
+ if (list_empty(entry))
+ list_add_tail(entry, head);
+}
+
+static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qhp->rhp->lock, flags);
+ spin_lock(&qhp->lock);
+ if (qhp->rhp->db_state == NORMAL) {
+ t4_ring_sq_db(&qhp->wq, inc);
+ } else {
+ add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
+ qhp->wq.sq.wq_pidx_inc += inc;
+ }
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&qhp->rhp->lock, flags);
+ return 0;
+}
+
+static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qhp->rhp->lock, flags);
+ spin_lock(&qhp->lock);
+ if (qhp->rhp->db_state == NORMAL) {
+ t4_ring_rq_db(&qhp->wq, inc);
+ } else {
+ add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
+ qhp->wq.rq.wq_pidx_inc += inc;
+ }
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&qhp->rhp->lock, flags);
+ return 0;
+}
+
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -675,7 +715,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_flags = 0;
if (wr->send_flags & IB_SEND_SOLICITED)
fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
- if (wr->send_flags & IB_SEND_SIGNALED)
+ if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
fw_flags |= FW_RI_COMPLETION_FLAG;
swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
switch (wr->opcode) {
@@ -736,7 +776,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
swsqe->idx = qhp->wq.sq.pidx;
swsqe->complete = 0;
- swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
+ swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
+ qhp->sq_sig_all;
swsqe->flushed = 0;
swsqe->wr_id = wr->wr_id;
@@ -750,9 +791,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
t4_sq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
}
- if (t4_wq_db_enabled(&qhp->wq))
+ if (!qhp->rhp->rdev.status_page->db_off) {
t4_ring_sq_db(&qhp->wq, idx);
- spin_unlock_irqrestore(&qhp->lock, flag);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ } else {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ ring_kernel_sq_db(qhp, idx);
+ }
return err;
}
@@ -812,9 +857,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wr = wr->next;
num_wrs--;
}
- if (t4_wq_db_enabled(&qhp->wq))
+ if (!qhp->rhp->rdev.status_page->db_off) {
t4_ring_rq_db(&qhp->wq, idx);
- spin_unlock_irqrestore(&qhp->lock, flag);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ } else {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ ring_kernel_rq_db(qhp, idx);
+ }
return err;
}
@@ -1200,35 +1249,6 @@ out:
return ret;
}
-/*
- * Called by the library when the qp has user dbs disabled due to
- * a DB_FULL condition. This function will single-thread all user
- * DB rings to avoid overflowing the hw db-fifo.
- */
-static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
-{
- int delay = db_delay_usecs;
-
- mutex_lock(&qhp->rhp->db_mutex);
- do {
-
- /*
- * The interrupt threshold is dbfifo_int_thresh << 6. So
- * make sure we don't cross that and generate an interrupt.
- */
- if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
- (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
- writel(QID(qid) | PIDX(inc), qhp->wq.db);
- break;
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(usecs_to_jiffies(delay));
- delay = min(delay << 1, 2000);
- } while (1);
- mutex_unlock(&qhp->rhp->db_mutex);
- return 0;
-}
-
int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
enum c4iw_qp_attr_mask mask,
struct c4iw_qp_attributes *attrs,
@@ -1278,11 +1298,11 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
if (mask & C4IW_QP_ATTR_SQ_DB) {
- ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
+ ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
goto out;
}
if (mask & C4IW_QP_ATTR_RQ_DB) {
- ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
+ ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
goto out;
}
@@ -1465,14 +1485,6 @@ out:
return ret;
}
-static int enable_qp_db(int id, void *p, void *data)
-{
- struct c4iw_qp *qp = p;
-
- t4_enable_wq_db(&qp->wq);
- return 0;
-}
-
int c4iw_destroy_qp(struct ib_qp *ib_qp)
{
struct c4iw_dev *rhp;
@@ -1490,22 +1502,15 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
wait_event(qhp->wait, !qhp->ep);
- spin_lock_irq(&rhp->lock);
- remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
- rhp->qpcnt--;
- BUG_ON(rhp->qpcnt < 0);
- if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
- rhp->rdev.stats.db_state_transitions++;
- rhp->db_state = NORMAL;
- idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
- }
- if (db_coalescing_threshold >= 0)
- if (rhp->qpcnt <= db_coalescing_threshold)
- cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
- spin_unlock_irq(&rhp->lock);
+ remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
+ spin_lock_irq(&rhp->lock);
+ if (!list_empty(&qhp->db_fc_entry))
+ list_del_init(&qhp->db_fc_entry);
+ spin_unlock_irq(&rhp->lock);
+
ucontext = ib_qp->uobject ?
to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
destroy_qp(&rhp->rdev, &qhp->wq,
@@ -1516,14 +1521,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
return 0;
}
-static int disable_qp_db(int id, void *p, void *data)
-{
- struct c4iw_qp *qp = p;
-
- t4_disable_wq_db(&qp->wq);
- return 0;
-}
-
struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct ib_udata *udata)
{
@@ -1533,7 +1530,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct c4iw_cq *schp;
struct c4iw_cq *rchp;
struct c4iw_create_qp_resp uresp;
- int sqsize, rqsize;
+ unsigned int sqsize, rqsize;
struct c4iw_ucontext *ucontext;
int ret;
struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
@@ -1605,25 +1602,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.enable_bind = 1;
qhp->attr.max_ord = 1;
qhp->attr.max_ird = 1;
+ qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
spin_lock_init(&qhp->lock);
mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
- spin_lock_irq(&rhp->lock);
- if (rhp->db_state != NORMAL)
- t4_disable_wq_db(&qhp->wq);
- rhp->qpcnt++;
- if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
- rhp->rdev.stats.db_state_transitions++;
- rhp->db_state = FLOW_CONTROL;
- idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
- }
- if (db_coalescing_threshold >= 0)
- if (rhp->qpcnt > db_coalescing_threshold)
- cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
- ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
- spin_unlock_irq(&rhp->lock);
+ ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret)
goto err2;
@@ -1709,6 +1694,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer));
+ INIT_LIST_HEAD(&qhp->db_fc_entry);
PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
__func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
qhp->wq.sq.qid);
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index e73ace739183..eeca8b1e6376 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -300,6 +300,7 @@ struct t4_sq {
u16 cidx;
u16 pidx;
u16 wq_pidx;
+ u16 wq_pidx_inc;
u16 flags;
short flush_cidx;
};
@@ -324,6 +325,7 @@ struct t4_rq {
u16 cidx;
u16 pidx;
u16 wq_pidx;
+ u16 wq_pidx_inc;
};
struct t4_wq {
@@ -609,3 +611,7 @@ static inline void t4_set_cq_in_error(struct t4_cq *cq)
((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
}
#endif
+
+struct t4_dev_status_page {
+ u8 db_off;
+};
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
index 32b754c35ab7..11ccd276e5d9 100644
--- a/drivers/infiniband/hw/cxgb4/user.h
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -70,4 +70,9 @@ struct c4iw_create_qp_resp {
__u32 qid_mask;
__u32 flags;
};
+
+struct c4iw_alloc_ucontext_resp {
+ __u64 status_page_key;
+ __u32 status_page_size;
+};
#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index f08f6eaf3fa8..bd45e0f3923f 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -322,7 +322,7 @@ struct ehca_mr_pginfo {
} phy;
struct { /* type EHCA_MR_PGI_USER section */
struct ib_umem *region;
- struct ib_umem_chunk *next_chunk;
+ struct scatterlist *next_sg;
u64 next_nmap;
} usr;
struct { /* type EHCA_MR_PGI_FMR section */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 212150c25ea0..8cc837537768 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -283,6 +283,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
(my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
ehca_err(device, "Copy to udata failed.");
+ cq = ERR_PTR(-EFAULT);
goto create_cq_exit4;
}
}
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index bcfb0c183620..3488e8c9fcb4 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -400,10 +400,7 @@ reg_user_mr_fallback:
pginfo.num_hwpages = num_hwpages;
pginfo.u.usr.region = e_mr->umem;
pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
- pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
- (&e_mr->umem->chunk_list),
- list);
-
+ pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
&e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
@@ -1858,61 +1855,39 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
u64 *kpage)
{
int ret = 0;
- struct ib_umem_chunk *prev_chunk;
- struct ib_umem_chunk *chunk;
u64 pgaddr;
- u32 i = 0;
u32 j = 0;
int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
-
- /* loop over desired chunk entries */
- chunk = pginfo->u.usr.next_chunk;
- prev_chunk = pginfo->u.usr.next_chunk;
- list_for_each_entry_continue(
- chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
- for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
- pgaddr = page_to_pfn(sg_page(&chunk->page_list[i]))
- << PAGE_SHIFT ;
- *kpage = pgaddr + (pginfo->next_hwpage *
- pginfo->hwpage_size);
- if ( !(*kpage) ) {
- ehca_gen_err("pgaddr=%llx "
- "chunk->page_list[i]=%llx "
- "i=%x next_hwpage=%llx",
- pgaddr, (u64)sg_dma_address(
- &chunk->page_list[i]),
- i, pginfo->next_hwpage);
- return -EFAULT;
- }
- (pginfo->hwpage_cnt)++;
- (pginfo->next_hwpage)++;
- kpage++;
- if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
- (pginfo->kpage_cnt)++;
- (pginfo->u.usr.next_nmap)++;
- pginfo->next_hwpage = 0;
- i++;
- }
- j++;
- if (j >= number) break;
+ struct scatterlist **sg = &pginfo->u.usr.next_sg;
+
+ while (*sg != NULL) {
+ pgaddr = page_to_pfn(sg_page(*sg))
+ << PAGE_SHIFT;
+ *kpage = pgaddr + (pginfo->next_hwpage *
+ pginfo->hwpage_size);
+ if (!(*kpage)) {
+ ehca_gen_err("pgaddr=%llx "
+ "sg_dma_address=%llx "
+ "entry=%llx next_hwpage=%llx",
+ pgaddr, (u64)sg_dma_address(*sg),
+ pginfo->u.usr.next_nmap,
+ pginfo->next_hwpage);
+ return -EFAULT;
}
- if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
- (j >= number)) {
- pginfo->u.usr.next_nmap = 0;
- prev_chunk = chunk;
- break;
- } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
- pginfo->u.usr.next_nmap = 0;
- prev_chunk = chunk;
- } else if (j >= number)
+ (pginfo->hwpage_cnt)++;
+ (pginfo->next_hwpage)++;
+ kpage++;
+ if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
+ (pginfo->kpage_cnt)++;
+ (pginfo->u.usr.next_nmap)++;
+ pginfo->next_hwpage = 0;
+ *sg = sg_next(*sg);
+ }
+ j++;
+ if (j >= number)
break;
- else
- prev_chunk = chunk;
}
- pginfo->u.usr.next_chunk =
- list_prepare_entry(prev_chunk,
- (&(pginfo->u.usr.region->chunk_list)),
- list);
+
return ret;
}
@@ -1920,20 +1895,19 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
* check given pages for contiguous layout
* last page addr is returned in prev_pgaddr for further check
*/
-static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
- int start_idx, int end_idx,
+static int ehca_check_kpages_per_ate(struct scatterlist **sg,
+ int num_pages,
u64 *prev_pgaddr)
{
- int t;
- for (t = start_idx; t <= end_idx; t++) {
- u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
+ for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
+ u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
if (ehca_debug_level >= 3)
ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
*(u64 *)__va(pgaddr));
if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
ehca_gen_err("uncontiguous page found pgaddr=%llx "
- "prev_pgaddr=%llx page_list_i=%x",
- pgaddr, *prev_pgaddr, t);
+ "prev_pgaddr=%llx entries_left_in_hwpage=%x",
+ pgaddr, *prev_pgaddr, num_pages);
return -EINVAL;
}
*prev_pgaddr = pgaddr;
@@ -1947,111 +1921,80 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
u64 *kpage)
{
int ret = 0;
- struct ib_umem_chunk *prev_chunk;
- struct ib_umem_chunk *chunk;
u64 pgaddr, prev_pgaddr;
- u32 i = 0;
u32 j = 0;
int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
int nr_kpages = kpages_per_hwpage;
+ struct scatterlist **sg = &pginfo->u.usr.next_sg;
+
+ while (*sg != NULL) {
- /* loop over desired chunk entries */
- chunk = pginfo->u.usr.next_chunk;
- prev_chunk = pginfo->u.usr.next_chunk;
- list_for_each_entry_continue(
- chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
- for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
- if (nr_kpages == kpages_per_hwpage) {
- pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i]))
- << PAGE_SHIFT );
- *kpage = pgaddr;
- if ( !(*kpage) ) {
- ehca_gen_err("pgaddr=%llx i=%x",
- pgaddr, i);
+ if (nr_kpages == kpages_per_hwpage) {
+ pgaddr = (page_to_pfn(sg_page(*sg))
+ << PAGE_SHIFT);
+ *kpage = pgaddr;
+ if (!(*kpage)) {
+ ehca_gen_err("pgaddr=%llx entry=%llx",
+ pgaddr, pginfo->u.usr.next_nmap);
+ ret = -EFAULT;
+ return ret;
+ }
+ /*
+ * The first page in a hwpage must be aligned;
+ * the first MR page is exempt from this rule.
+ */
+ if (pgaddr & (pginfo->hwpage_size - 1)) {
+ if (pginfo->hwpage_cnt) {
+ ehca_gen_err(
+ "invalid alignment "
+ "pgaddr=%llx entry=%llx "
+ "mr_pgsize=%llx",
+ pgaddr, pginfo->u.usr.next_nmap,
+ pginfo->hwpage_size);
ret = -EFAULT;
return ret;
}
- /*
- * The first page in a hwpage must be aligned;
- * the first MR page is exempt from this rule.
- */
- if (pgaddr & (pginfo->hwpage_size - 1)) {
- if (pginfo->hwpage_cnt) {
- ehca_gen_err(
- "invalid alignment "
- "pgaddr=%llx i=%x "
- "mr_pgsize=%llx",
- pgaddr, i,
- pginfo->hwpage_size);
- ret = -EFAULT;
- return ret;
- }
- /* first MR page */
- pginfo->kpage_cnt =
- (pgaddr &
- (pginfo->hwpage_size - 1)) >>
- PAGE_SHIFT;
- nr_kpages -= pginfo->kpage_cnt;
- *kpage = pgaddr &
- ~(pginfo->hwpage_size - 1);
- }
- if (ehca_debug_level >= 3) {
- u64 val = *(u64 *)__va(pgaddr);
- ehca_gen_dbg("kpage=%llx chunk_page=%llx "
- "value=%016llx",
- *kpage, pgaddr, val);
- }
- prev_pgaddr = pgaddr;
- i++;
- pginfo->kpage_cnt++;
- pginfo->u.usr.next_nmap++;
- nr_kpages--;
- if (!nr_kpages)
- goto next_kpage;
- continue;
+ /* first MR page */
+ pginfo->kpage_cnt =
+ (pgaddr &
+ (pginfo->hwpage_size - 1)) >>
+ PAGE_SHIFT;
+ nr_kpages -= pginfo->kpage_cnt;
+ *kpage = pgaddr &
+ ~(pginfo->hwpage_size - 1);
}
- if (i + nr_kpages > chunk->nmap) {
- ret = ehca_check_kpages_per_ate(
- chunk->page_list, i,
- chunk->nmap - 1, &prev_pgaddr);
- if (ret) return ret;
- pginfo->kpage_cnt += chunk->nmap - i;
- pginfo->u.usr.next_nmap += chunk->nmap - i;
- nr_kpages -= chunk->nmap - i;
- break;
+ if (ehca_debug_level >= 3) {
+ u64 val = *(u64 *)__va(pgaddr);
+ ehca_gen_dbg("kpage=%llx page=%llx "
+ "value=%016llx",
+ *kpage, pgaddr, val);
}
+ prev_pgaddr = pgaddr;
+ *sg = sg_next(*sg);
+ pginfo->kpage_cnt++;
+ pginfo->u.usr.next_nmap++;
+ nr_kpages--;
+ if (!nr_kpages)
+ goto next_kpage;
+ continue;
+ }
+
+ ret = ehca_check_kpages_per_ate(sg, nr_kpages,
+ &prev_pgaddr);
+ if (ret)
+ return ret;
+ pginfo->kpage_cnt += nr_kpages;
+ pginfo->u.usr.next_nmap += nr_kpages;
- ret = ehca_check_kpages_per_ate(chunk->page_list, i,
- i + nr_kpages - 1,
- &prev_pgaddr);
- if (ret) return ret;
- i += nr_kpages;
- pginfo->kpage_cnt += nr_kpages;
- pginfo->u.usr.next_nmap += nr_kpages;
next_kpage:
- nr_kpages = kpages_per_hwpage;
- (pginfo->hwpage_cnt)++;
- kpage++;
- j++;
- if (j >= number) break;
- }
- if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
- (j >= number)) {
- pginfo->u.usr.next_nmap = 0;
- prev_chunk = chunk;
- break;
- } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
- pginfo->u.usr.next_nmap = 0;
- prev_chunk = chunk;
- } else if (j >= number)
+ nr_kpages = kpages_per_hwpage;
+ (pginfo->hwpage_cnt)++;
+ kpage++;
+ j++;
+ if (j >= number)
break;
- else
- prev_chunk = chunk;
}
- pginfo->u.usr.next_chunk =
- list_prepare_entry(prev_chunk,
- (&(pginfo->u.usr.region->chunk_list)),
- list);
+
return ret;
}
@@ -2591,16 +2534,6 @@ static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
/* This is only a stub; nothing to be done here */
}
-static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg)
-{
- return sg->dma_address;
-}
-
-static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg)
-{
- return sg->length;
-}
-
static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
size_t size,
enum dma_data_direction dir)
@@ -2653,8 +2586,6 @@ struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
.unmap_page = ehca_dma_unmap_page,
.map_sg = ehca_dma_map_sg,
.unmap_sg = ehca_dma_unmap_sg,
- .dma_address = ehca_dma_address,
- .dma_len = ehca_dma_len,
.sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
.sync_single_for_device = ehca_dma_sync_single_for_device,
.alloc_coherent = ehca_dma_alloc_coherent,
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 714293b78518..e2f9a51f4a38 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -326,7 +326,7 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
size_t count, loff_t *off)
{
u32 __iomem *piobuf;
- u32 plen, clen, pbufn;
+ u32 plen, pbufn, maxlen_reserve;
struct ipath_diag_pkt odp;
struct ipath_diag_xpkt dp;
u32 *tmpbuf = NULL;
@@ -335,51 +335,29 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
u64 val;
u32 l_state, lt_state; /* LinkState, LinkTrainingState */
- if (count < sizeof(odp)) {
- ret = -EINVAL;
- goto bail;
- }
if (count == sizeof(dp)) {
if (copy_from_user(&dp, data, sizeof(dp))) {
ret = -EFAULT;
goto bail;
}
- } else if (copy_from_user(&odp, data, sizeof(odp))) {
- ret = -EFAULT;
+ } else if (count == sizeof(odp)) {
+ if (copy_from_user(&odp, data, sizeof(odp))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ } else {
+ ret = -EINVAL;
goto bail;
}
- /*
- * Due to padding/alignment issues (lessened with new struct)
- * the old and new structs are the same length. We need to
- * disambiguate them, which we can do because odp.len has never
- * been less than the total of LRH+BTH+DETH so far, while
- * dp.unit (same offset) unit is unlikely to get that high.
- * Similarly, dp.data, the pointer to user at the same offset
- * as odp.unit, is almost certainly at least one (512byte)page
- * "above" NULL. The if-block below can be omitted if compatibility
- * between a new driver and older diagnostic code is unimportant.
- * compatibility the other direction (new diags, old driver) is
- * handled in the diagnostic code, with a warning.
- */
- if (dp.unit >= 20 && dp.data < 512) {
- /* very probable version mismatch. Fix it up */
- memcpy(&odp, &dp, sizeof(odp));
- /* We got a legacy dp, copy elements to dp */
- dp.unit = odp.unit;
- dp.data = odp.data;
- dp.len = odp.len;
- dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */
- }
-
/* send count must be an exact number of dwords */
if (dp.len & 3) {
ret = -EINVAL;
goto bail;
}
- clen = dp.len >> 2;
+ plen = dp.len >> 2;
dd = ipath_lookup(dp.unit);
if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
@@ -422,16 +400,22 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
goto bail;
}
- /* need total length before first word written */
- /* +1 word is for the qword padding */
- plen = sizeof(u32) + dp.len;
-
- if ((plen + 4) > dd->ipath_ibmaxlen) {
+ /*
+ * need total length before first word written, plus 2 Dwords. One Dword
+ * is for padding so we get the full user data when not aligned on
+ * a word boundary. The other Dword is to make sure we have room for the
+ * ICRC which gets tacked on later.
+ */
+ maxlen_reserve = 2 * sizeof(u32);
+ if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
- plen - 4, dd->ipath_ibmaxlen);
+ dp.len, dd->ipath_ibmaxlen);
ret = -EINVAL;
- goto bail; /* before writing pbc */
+ goto bail;
}
+
+ plen = sizeof(u32) + dp.len;
+
tmpbuf = vmalloc(plen);
if (!tmpbuf) {
dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
@@ -473,11 +457,11 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
*/
if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
ipath_flush_wc();
- __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1);
+ __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
ipath_flush_wc();
- __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
+ __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
} else
- __iowrite32_copy(piobuf + 2, tmpbuf, clen);
+ __iowrite32_copy(piobuf + 2, tmpbuf, plen);
ipath_flush_wc();
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
index 644c2c74e054..123a8c053539 100644
--- a/drivers/infiniband/hw/ipath/ipath_dma.c
+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
@@ -115,6 +115,10 @@ static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
ret = 0;
break;
}
+ sg->dma_address = addr + sg->offset;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+#endif
}
return ret;
}
@@ -126,21 +130,6 @@ static void ipath_unmap_sg(struct ib_device *dev,
BUG_ON(!valid_dma_direction(direction));
}
-static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
-{
- u64 addr = (u64) page_address(sg_page(sg));
-
- if (addr)
- addr += sg->offset;
- return addr;
-}
-
-static unsigned int ipath_sg_dma_len(struct ib_device *dev,
- struct scatterlist *sg)
-{
- return sg->length;
-}
-
static void ipath_sync_single_for_cpu(struct ib_device *dev,
u64 addr,
size_t size,
@@ -176,17 +165,15 @@ static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
}
struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
- ipath_mapping_error,
- ipath_dma_map_single,
- ipath_dma_unmap_single,
- ipath_dma_map_page,
- ipath_dma_unmap_page,
- ipath_map_sg,
- ipath_unmap_sg,
- ipath_sg_dma_address,
- ipath_sg_dma_len,
- ipath_sync_single_for_cpu,
- ipath_sync_single_for_device,
- ipath_dma_alloc_coherent,
- ipath_dma_free_coherent
+ .mapping_error = ipath_mapping_error,
+ .map_single = ipath_dma_map_single,
+ .unmap_single = ipath_dma_unmap_single,
+ .map_page = ipath_dma_map_page,
+ .unmap_page = ipath_dma_unmap_page,
+ .map_sg = ipath_map_sg,
+ .unmap_sg = ipath_unmap_sg,
+ .sync_single_for_cpu = ipath_sync_single_for_cpu,
+ .sync_single_for_device = ipath_sync_single_for_device,
+ .alloc_coherent = ipath_dma_alloc_coherent,
+ .free_coherent = ipath_dma_free_coherent
};
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index e346d3890a0e..5e61e9bff697 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -188,8 +188,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
struct ipath_mr *mr;
struct ib_umem *umem;
- struct ib_umem_chunk *chunk;
- int n, m, i;
+ int n, m, entry;
+ struct scatterlist *sg;
struct ib_mr *ret;
if (length == 0) {
@@ -202,10 +202,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ERR(umem))
return (void *) umem;
- n = 0;
- list_for_each_entry(chunk, &umem->chunk_list, list)
- n += chunk->nents;
-
+ n = umem->nmap;
mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
if (!mr) {
ret = ERR_PTR(-ENOMEM);
@@ -224,22 +221,20 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
m = 0;
n = 0;
- list_for_each_entry(chunk, &umem->chunk_list, list) {
- for (i = 0; i < chunk->nents; i++) {
- void *vaddr;
-
- vaddr = page_address(sg_page(&chunk->page_list[i]));
- if (!vaddr) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- mr->mr.map[m]->segs[n].vaddr = vaddr;
- mr->mr.map[m]->segs[n].length = umem->page_size;
- n++;
- if (n == IPATH_SEGSZ) {
- m++;
- n = 0;
- }
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ void *vaddr;
+
+ vaddr = page_address(sg_page(sg));
+ if (!vaddr) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ mr->mr.map[m]->segs[n].vaddr = vaddr;
+ mr->mr.map[m]->segs[n].length = umem->page_size;
+ n++;
+ if (n == IPATH_SEGSZ) {
+ m++;
+ n = 0;
}
}
ret = &mr->ibmr;
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 2f215b93db6b..0eb141c41416 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
continue;
slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
- if (slave_id >= dev->dev->num_slaves)
+ if (slave_id >= dev->dev->num_vfs + 1)
return;
tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
form_cache_ag = get_cached_alias_guid(dev, port_num,
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index d1f5f1dd77b0..56a593e0ae5d 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -61,6 +61,11 @@ struct cm_generic_msg {
__be32 remote_comm_id;
};
+struct cm_sidr_generic_msg {
+ struct ib_mad_hdr hdr;
+ __be32 request_id;
+};
+
struct cm_req_msg {
unsigned char unused[0x60];
union ib_gid primary_path_sgid;
@@ -69,28 +74,62 @@ struct cm_req_msg {
static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
{
- struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
- msg->local_comm_id = cpu_to_be32(cm_id);
+ if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+ struct cm_sidr_generic_msg *msg =
+ (struct cm_sidr_generic_msg *)mad;
+ msg->request_id = cpu_to_be32(cm_id);
+ } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+ pr_err("trying to set local_comm_id in SIDR_REP\n");
+ return;
+ } else {
+ struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+ msg->local_comm_id = cpu_to_be32(cm_id);
+ }
}
static u32 get_local_comm_id(struct ib_mad *mad)
{
- struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
-
- return be32_to_cpu(msg->local_comm_id);
+ if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+ struct cm_sidr_generic_msg *msg =
+ (struct cm_sidr_generic_msg *)mad;
+ return be32_to_cpu(msg->request_id);
+ } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+ pr_err("trying to set local_comm_id in SIDR_REP\n");
+ return -1;
+ } else {
+ struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+ return be32_to_cpu(msg->local_comm_id);
+ }
}
static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
{
- struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
- msg->remote_comm_id = cpu_to_be32(cm_id);
+ if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+ struct cm_sidr_generic_msg *msg =
+ (struct cm_sidr_generic_msg *)mad;
+ msg->request_id = cpu_to_be32(cm_id);
+ } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+ pr_err("trying to set remote_comm_id in SIDR_REQ\n");
+ return;
+ } else {
+ struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+ msg->remote_comm_id = cpu_to_be32(cm_id);
+ }
}
static u32 get_remote_comm_id(struct ib_mad *mad)
{
- struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
-
- return be32_to_cpu(msg->remote_comm_id);
+ if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+ struct cm_sidr_generic_msg *msg =
+ (struct cm_sidr_generic_msg *)mad;
+ return be32_to_cpu(msg->request_id);
+ } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+ pr_err("trying to set remote_comm_id in SIDR_REQ\n");
+ return -1;
+ } else {
+ struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+ return be32_to_cpu(msg->remote_comm_id);
+ }
}
static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
@@ -282,19 +321,21 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
u32 sl_cm_id;
int pv_cm_id = -1;
- sl_cm_id = get_local_comm_id(mad);
-
if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
- mad->mad_hdr.attr_id == CM_REP_ATTR_ID) {
+ mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
+ mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+ sl_cm_id = get_local_comm_id(mad);
id = id_map_alloc(ibdev, slave_id, sl_cm_id);
if (IS_ERR(id)) {
mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
__func__, slave_id, sl_cm_id);
return PTR_ERR(id);
}
- } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) {
+ } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
+ mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
return 0;
} else {
+ sl_cm_id = get_local_comm_id(mad);
id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
}
@@ -315,14 +356,18 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
}
int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
- struct ib_mad *mad)
+ struct ib_mad *mad)
{
u32 pv_cm_id;
struct id_map_entry *id;
- if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID) {
+ if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
+ mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
union ib_gid gid;
+ if (!slave)
+ return 0;
+
gid = gid_from_req_msg(ibdev, mad);
*slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
if (*slave < 0) {
@@ -341,7 +386,8 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
return -ENOENT;
}
- *slave = id->slave_id;
+ if (slave)
+ *slave = id->slave_id;
set_remote_comm_id(mad, id->sl_cm_id);
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index cc40f08ca8f1..5f640814cc81 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -564,7 +564,7 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
}
static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
- unsigned tail, struct mlx4_cqe *cqe)
+ unsigned tail, struct mlx4_cqe *cqe, int is_eth)
{
struct mlx4_ib_proxy_sqp_hdr *hdr;
@@ -574,12 +574,20 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
DMA_FROM_DEVICE);
hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
- wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
- wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
wc->dlid_path_bits = 0;
+ if (is_eth) {
+ wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
+ memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
+ memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
+ wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
+ } else {
+ wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
+ wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
+ }
+
return 0;
}
@@ -594,6 +602,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
struct mlx4_srq *msrq = NULL;
int is_send;
int is_error;
+ int is_eth;
u32 g_mlpath_rqpn;
u16 wqe_ctr;
unsigned tail = 0;
@@ -778,11 +787,15 @@ repoll:
break;
}
+ is_eth = (rdma_port_get_link_layer(wc->qp->device,
+ (*cur_qp)->port) ==
+ IB_LINK_LAYER_ETHERNET);
if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
if ((*cur_qp)->mlx4_ib_qp_type &
(MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
- return use_tunnel_data(*cur_qp, cq, wc, tail, cqe);
+ return use_tunnel_data(*cur_qp, cq, wc, tail,
+ cqe, is_eth);
}
wc->slid = be16_to_cpu(cqe->rlid);
@@ -793,20 +806,21 @@ repoll:
wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
- if (rdma_port_get_link_layer(wc->qp->device,
- (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
+ if (is_eth) {
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
- else
- wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
- if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) {
- wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
- MLX4_CQE_VID_MASK;
+ if (be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_VLAN_PRESENT_MASK) {
+ wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
+ MLX4_CQE_VID_MASK;
+ } else {
+ wc->vlan_id = 0xffff;
+ }
+ memcpy(wc->smac, cqe->smac, ETH_ALEN);
+ wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
} else {
+ wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
wc->vlan_id = 0xffff;
}
- wc->wc_flags |= IB_WC_WITH_VLAN;
- memcpy(wc->smac, cqe->smac, ETH_ALEN);
- wc->wc_flags |= IB_WC_WITH_SMAC;
}
return 0;
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 8aee4233b388..c51740986367 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -45,7 +45,6 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
struct mlx4_db *db)
{
struct mlx4_ib_user_db_page *page;
- struct ib_umem_chunk *chunk;
int err = 0;
mutex_lock(&context->db_page_mutex);
@@ -73,8 +72,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
list_add(&page->list, &context->db_page_list);
found:
- chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list);
- db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
+ db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
db->u.user_page = page;
++page->refcnt;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index f2a3f48107e7..fd36ec672632 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -467,6 +467,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
int ret = 0;
u16 tun_pkey_ix;
u16 cached_pkey;
+ u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
if (dest_qpt > IB_QPT_GSI)
return -EINVAL;
@@ -509,6 +510,10 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
* The driver will set the force loopback bit in post_send */
memset(&attr, 0, sizeof attr);
attr.port_num = port;
+ if (is_eth) {
+ memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
+ attr.ah_flags = IB_AH_GRH;
+ }
ah = ib_create_ah(tun_ctx->pd, &attr);
if (IS_ERR(ah))
return -ENOMEM;
@@ -540,11 +545,36 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
/* adjust tunnel data */
tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
- tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
- tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
+ if (is_eth) {
+ u16 vlan = 0;
+ if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
+ NULL)) {
+ /* VST mode */
+ if (vlan != wc->vlan_id)
+ /* Packet vlan is not the VST-assigned vlan.
+ * Drop the packet.
+ */
+ goto out;
+ else
+ /* Remove the vlan tag before forwarding
+ * the packet to the VF.
+ */
+ vlan = 0xffff;
+ } else {
+ vlan = wc->vlan_id;
+ }
+
+ tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
+ memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
+ memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
+ } else {
+ tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
+ tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
+ }
+
ib_dma_sync_single_for_device(&dev->ib_dev,
tun_qp->tx_ring[tun_tx_ix].buf.map,
sizeof (struct mlx4_rcv_tunnel_mad),
@@ -580,6 +610,41 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
int err;
int slave;
u8 *slave_id;
+ int is_eth = 0;
+
+ if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
+ is_eth = 0;
+ else
+ is_eth = 1;
+
+ if (is_eth) {
+ if (!(wc->wc_flags & IB_WC_GRH)) {
+ mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
+ return -EINVAL;
+ }
+ if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
+ mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
+ return -EINVAL;
+ }
+ if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) {
+ mlx4_ib_warn(ibdev, "failed matching grh\n");
+ return -ENOENT;
+ }
+ if (slave >= dev->dev->caps.sqp_demux) {
+ mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
+ slave, dev->dev->caps.sqp_demux);
+ return -ENOENT;
+ }
+
+ if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
+ return 0;
+
+ err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
+ if (err)
+ pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
+ slave, err);
+ return 0;
+ }
/* Initially assume that this mad is for us */
slave = mlx4_master_func_num(dev->dev);
@@ -1076,8 +1141,9 @@ static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
- enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
- u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad)
+ enum ib_qp_type dest_qpt, u16 pkey_index,
+ u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
+ u8 *s_mac, struct ib_mad *mad)
{
struct ib_sge list;
struct ib_send_wr wr, *bad_wr;
@@ -1166,6 +1232,9 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
wr.num_sge = 1;
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
+ if (s_mac)
+ memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
+
ret = ib_post_send(send_qp, &wr, &bad_wr);
out:
@@ -1174,6 +1243,22 @@ out:
return ret;
}
+static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
+{
+ if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
+ return slave;
+ return mlx4_get_base_gid_ix(dev->dev, slave, port);
+}
+
+static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
+ struct ib_ah_attr *ah_attr)
+{
+ if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
+ ah_attr->grh.sgid_index = slave;
+ else
+ ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
+}
+
static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
{
struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
@@ -1184,6 +1269,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
struct ib_ah_attr ah_attr;
u8 *slave_id;
int slave;
+ int port;
/* Get slave that sent this packet */
if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
@@ -1260,12 +1346,18 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
ah.ibah.device = ctx->ib_dev;
mlx4_ib_query_ah(&ah.ibah, &ah_attr);
- if ((ah_attr.ah_flags & IB_AH_GRH) &&
- (ah_attr.grh.sgid_index != slave)) {
- mlx4_ib_warn(ctx->ib_dev, "slave:%d accessed invalid sgid_index:%d\n",
- slave, ah_attr.grh.sgid_index);
+ if (ah_attr.ah_flags & IB_AH_GRH)
+ fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
+
+ port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num);
+ if (port < 0)
return;
- }
+ ah_attr.port_num = port;
+ memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
+ ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
+ /* if slave have default vlan use it */
+ mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
+ &ah_attr.vlan_id, &ah_attr.sl);
mlx4_ib_send_to_wire(dev, slave, ctx->port,
is_proxy_qp0(dev, wc->src_qp, slave) ?
@@ -1273,7 +1365,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
be16_to_cpu(tunnel->hdr.pkey_index),
be32_to_cpu(tunnel->hdr.remote_qpn),
be32_to_cpu(tunnel->hdr.qkey),
- &ah_attr, &tunnel->mad);
+ &ah_attr, wc->smac, &tunnel->mad);
}
static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
@@ -1850,7 +1942,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
ctx->port = port;
ctx->ib_dev = &dev->ib_dev;
- for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
+ for (i = 0;
+ i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1));
+ i++) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev->dev, i);
+
+ if (!test_bit(port - 1, actv_ports.ports))
+ continue;
+
ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
if (ret) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f9c12e92fdd6..1b6dbe156a37 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1546,7 +1546,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
iboe = &ibdev->iboe;
spin_lock(&iboe->lock);
- for (port = 1; port <= MLX4_MAX_PORTS; ++port)
+ for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
if ((netif_is_bond_master(real_dev) &&
(real_dev == iboe->masters[port - 1])) ||
(!netif_is_bond_master(real_dev) &&
@@ -1569,14 +1569,14 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
iboe = &ibdev->iboe;
- for (port = 1; port <= MLX4_MAX_PORTS; ++port)
+ for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
if ((netif_is_bond_master(real_dev) &&
(real_dev == iboe->masters[port - 1])) ||
(!netif_is_bond_master(real_dev) &&
(real_dev == iboe->netdevs[port - 1])))
break;
- if ((port == 0) || (port > MLX4_MAX_PORTS))
+ if ((port == 0) || (port > ibdev->dev->caps.num_ports))
return 0;
else
return port;
@@ -1626,7 +1626,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
union ib_gid gid;
- if ((port == 0) || (port > MLX4_MAX_PORTS))
+ if ((port == 0) || (port > ibdev->dev->caps.num_ports))
return;
/* IPv4 gids */
@@ -1803,7 +1803,7 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{
- char name[32];
+ char name[80];
int eq_per_port = 0;
int added_eqs = 0;
int total_eqs = 0;
@@ -1833,8 +1833,8 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
eq = 0;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
for (j = 0; j < eq_per_port; j++) {
- sprintf(name, "mlx4-ib-%d-%d@%s",
- i, j, dev->pdev->bus->name);
+ snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
+ i, j, dev->pdev->bus->name);
/* Set IRQ for specific name (per ring) */
if (mlx4_assign_eq(dev, name, NULL,
&ibdev->eq_table[eq])) {
@@ -1888,14 +1888,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
pr_info_once("%s", mlx4_ib_version);
- mlx4_foreach_non_ib_transport_port(i, dev)
- num_ports++;
-
- if (mlx4_is_mfunc(dev) && num_ports) {
- dev_err(&dev->pdev->dev, "RoCE is not supported over SRIOV as yet\n");
- return NULL;
- }
-
num_ports = 0;
mlx4_foreach_ib_transport_port(i, dev)
num_ports++;
@@ -2056,8 +2048,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
if (err)
ibdev->counters[i] = -1;
- } else
- ibdev->counters[i] = -1;
+ } else {
+ ibdev->counters[i] = -1;
+ }
}
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
@@ -2331,17 +2324,24 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
struct mlx4_dev *dev = ibdev->dev;
int i;
unsigned long flags;
+ struct mlx4_active_ports actv_ports;
+ unsigned int ports;
+ unsigned int first_port;
if (!mlx4_is_master(dev))
return;
- dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
+ actv_ports = mlx4_get_active_ports(dev, slave);
+ ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+ first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
+
+ dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
if (!dm) {
pr_err("failed to allocate memory for tunneling qp update\n");
goto out;
}
- for (i = 0; i < dev->caps.num_ports; i++) {
+ for (i = 0; i < ports; i++) {
dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
if (!dm[i]) {
pr_err("failed to allocate memory for tunneling qp update work struct\n");
@@ -2353,9 +2353,9 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
}
}
/* initialize or tear down tunnel QPs for the slave */
- for (i = 0; i < dev->caps.num_ports; i++) {
+ for (i = 0; i < ports; i++) {
INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
- dm[i]->port = i + 1;
+ dm[i]->port = first_port + i + 1;
dm[i]->slave = slave;
dm[i]->do_init = do_init;
dm[i]->dev = ibdev;
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 25b2cdff00f8..ed327e6c8fdc 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -215,8 +215,9 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
}
mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
spin_unlock(&dev->sm_lock);
- return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), ctx->port,
- IB_QPT_GSI, 0, 1, IB_QP1_QKEY, &ah_attr, mad);
+ return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
+ ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
+ &ah_attr, NULL, mad);
}
static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index a230683af940..f589522fddfd 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -241,6 +241,22 @@ struct mlx4_ib_proxy_sqp_hdr {
struct mlx4_rcv_tunnel_hdr tun;
} __packed;
+struct mlx4_roce_smac_vlan_info {
+ u64 smac;
+ int smac_index;
+ int smac_port;
+ u64 candidate_smac;
+ int candidate_smac_index;
+ int candidate_smac_port;
+ u16 vid;
+ int vlan_index;
+ int vlan_port;
+ u16 candidate_vid;
+ int candidate_vlan_index;
+ int candidate_vlan_port;
+ int update_vid;
+};
+
struct mlx4_ib_qp {
struct ib_qp ibqp;
struct mlx4_qp mqp;
@@ -273,8 +289,9 @@ struct mlx4_ib_qp {
struct list_head gid_list;
struct list_head steering_rules;
struct mlx4_ib_buf *sqp_proxy_rcv;
+ struct mlx4_roce_smac_vlan_info pri;
+ struct mlx4_roce_smac_vlan_info alt;
u64 reg_id;
-
};
struct mlx4_ib_srq {
@@ -720,9 +737,12 @@ void mlx4_ib_tunnels_update_work(struct work_struct *work);
int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
enum ib_qp_type qpt, struct ib_wc *wc,
struct ib_grh *grh, struct ib_mad *mad);
+
int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
- u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad);
+ u32 qkey, struct ib_ah_attr *attr, u8 *s_mac,
+ struct ib_mad *mad);
+
__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index e471f089ff00..cb2a8727f3fb 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -90,11 +90,11 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem)
{
u64 *pages;
- struct ib_umem_chunk *chunk;
- int i, j, k;
+ int i, k, entry;
int n;
int len;
int err = 0;
+ struct scatterlist *sg;
pages = (u64 *) __get_free_page(GFP_KERNEL);
if (!pages)
@@ -102,26 +102,25 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
i = n = 0;
- list_for_each_entry(chunk, &umem->chunk_list, list)
- for (j = 0; j < chunk->nmap; ++j) {
- len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = sg_dma_address(&chunk->page_list[j]) +
- umem->page_size * k;
- /*
- * Be friendly to mlx4_write_mtt() and
- * pass it chunks of appropriate size.
- */
- if (i == PAGE_SIZE / sizeof (u64)) {
- err = mlx4_write_mtt(dev->dev, mtt, n,
- i, pages);
- if (err)
- goto out;
- n += i;
- i = 0;
- }
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ len = sg_dma_len(sg) >> mtt->page_shift;
+ for (k = 0; k < len; ++k) {
+ pages[i++] = sg_dma_address(sg) +
+ umem->page_size * k;
+ /*
+ * Be friendly to mlx4_write_mtt() and
+ * pass it chunks of appropriate size.
+ */
+ if (i == PAGE_SIZE / sizeof (u64)) {
+ err = mlx4_write_mtt(dev->dev, mtt, n,
+ i, pages);
+ if (err)
+ goto out;
+ n += i;
+ i = 0;
}
}
+ }
if (i)
err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index d8f4d1fe8494..41308af4163c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -662,10 +662,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (!sqp)
return -ENOMEM;
qp = &sqp->qp;
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
} else {
qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL);
if (!qp)
return -ENOMEM;
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
}
} else
qp = *caller_qp;
@@ -940,11 +944,32 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
{
struct mlx4_ib_cq *send_cq, *recv_cq;
- if (qp->state != IB_QPS_RESET)
+ if (qp->state != IB_QPS_RESET) {
if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
pr_warn("modify QP %06x to RESET failed.\n",
qp->mqp.qpn);
+ if (qp->pri.smac) {
+ mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
+ qp->pri.smac = 0;
+ }
+ if (qp->alt.smac) {
+ mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
+ qp->alt.smac = 0;
+ }
+ if (qp->pri.vid < 0x1000) {
+ mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
+ qp->pri.vid = 0xFFFF;
+ qp->pri.candidate_vid = 0xFFFF;
+ qp->pri.update_vid = 0;
+ }
+ if (qp->alt.vid < 0x1000) {
+ mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
+ qp->alt.vid = 0xFFFF;
+ qp->alt.candidate_vid = 0xFFFF;
+ qp->alt.update_vid = 0;
+ }
+ }
get_cqs(qp, &send_cq, &recv_cq);
@@ -1057,6 +1082,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
qp = kzalloc(sizeof *qp, GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
/* fall through */
case IB_QPT_UD:
{
@@ -1188,12 +1215,13 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
- u8 port)
+ struct mlx4_roce_smac_vlan_info *smac_info, u8 port)
{
int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
IB_LINK_LAYER_ETHERNET;
int vidx;
int smac_index;
+ int err;
path->grh_mylmc = ah->src_path_bits & 0x7f;
@@ -1223,61 +1251,103 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
}
if (is_eth) {
- path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
- ((port - 1) << 6) | ((ah->sl & 7) << 3);
-
if (!(ah->ah_flags & IB_AH_GRH))
return -1;
- memcpy(path->dmac, ah->dmac, ETH_ALEN);
- path->ackto = MLX4_IB_LINK_TYPE_ETH;
- /* find the index into MAC table for IBoE */
- if (!is_zero_ether_addr((const u8 *)&smac)) {
- if (mlx4_find_cached_mac(dev->dev, port, smac,
- &smac_index))
- return -ENOENT;
- } else {
- smac_index = 0;
- }
-
- path->grh_mylmc &= 0x80 | smac_index;
+ path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
+ ((port - 1) << 6) | ((ah->sl & 7) << 3);
path->feup |= MLX4_FEUP_FORCE_ETH_UP;
if (vlan_tag < 0x1000) {
- if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
- return -ENOENT;
-
- path->vlan_index = vidx;
- path->fl = 1 << 6;
+ if (smac_info->vid < 0x1000) {
+ /* both valid vlan ids */
+ if (smac_info->vid != vlan_tag) {
+ /* different VIDs. unreg old and reg new */
+ err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
+ if (err)
+ return err;
+ smac_info->candidate_vid = vlan_tag;
+ smac_info->candidate_vlan_index = vidx;
+ smac_info->candidate_vlan_port = port;
+ smac_info->update_vid = 1;
+ path->vlan_index = vidx;
+ } else {
+ path->vlan_index = smac_info->vlan_index;
+ }
+ } else {
+ /* no current vlan tag in qp */
+ err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
+ if (err)
+ return err;
+ smac_info->candidate_vid = vlan_tag;
+ smac_info->candidate_vlan_index = vidx;
+ smac_info->candidate_vlan_port = port;
+ smac_info->update_vid = 1;
+ path->vlan_index = vidx;
+ }
path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
+ path->fl = 1 << 6;
+ } else {
+ /* have current vlan tag. unregister it at modify-qp success */
+ if (smac_info->vid < 0x1000) {
+ smac_info->candidate_vid = 0xFFFF;
+ smac_info->update_vid = 1;
+ }
}
- } else
+
+ /* get smac_index for RoCE use.
+ * If no smac was yet assigned, register one.
+ * If one was already assigned, but the new mac differs,
+ * unregister the old one and register the new one.
+ */
+ if (!smac_info->smac || smac_info->smac != smac) {
+ /* register candidate now, unreg if needed, after success */
+ smac_index = mlx4_register_mac(dev->dev, port, smac);
+ if (smac_index >= 0) {
+ smac_info->candidate_smac_index = smac_index;
+ smac_info->candidate_smac = smac;
+ smac_info->candidate_smac_port = port;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ smac_index = smac_info->smac_index;
+ }
+
+ memcpy(path->dmac, ah->dmac, 6);
+ path->ackto = MLX4_IB_LINK_TYPE_ETH;
+ /* put MAC table smac index for IBoE */
+ path->grh_mylmc = (u8) (smac_index) | 0x80;
+ } else {
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
((port - 1) << 6) | ((ah->sl & 0xf) << 2);
+ }
return 0;
}
static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
enum ib_qp_attr_mask qp_attr_mask,
+ struct mlx4_ib_qp *mqp,
struct mlx4_qp_path *path, u8 port)
{
return _mlx4_set_path(dev, &qp->ah_attr,
mlx4_mac_to_u64((u8 *)qp->smac),
(qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff,
- path, port);
+ path, &mqp->pri, port);
}
static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
const struct ib_qp_attr *qp,
enum ib_qp_attr_mask qp_attr_mask,
+ struct mlx4_ib_qp *mqp,
struct mlx4_qp_path *path, u8 port)
{
return _mlx4_set_path(dev, &qp->alt_ah_attr,
mlx4_mac_to_u64((u8 *)qp->alt_smac),
(qp_attr_mask & IB_QP_ALT_VID) ?
qp->alt_vlan_id : 0xffff,
- path, port);
+ path, &mqp->alt, port);
}
static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
@@ -1292,6 +1362,37 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
}
}
+static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac,
+ struct mlx4_qp_context *context)
+{
+ struct net_device *ndev;
+ u64 u64_mac;
+ int smac_index;
+
+
+ ndev = dev->iboe.netdevs[qp->port - 1];
+ if (ndev) {
+ smac = ndev->dev_addr;
+ u64_mac = mlx4_mac_to_u64(smac);
+ } else {
+ u64_mac = dev->dev->caps.def_mac[qp->port];
+ }
+
+ context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
+ if (!qp->pri.smac) {
+ smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
+ if (smac_index >= 0) {
+ qp->pri.candidate_smac_index = smac_index;
+ qp->pri.candidate_smac = u64_mac;
+ qp->pri.candidate_smac_port = qp->port;
+ context->pri_path.grh_mylmc = 0x80 | (u8) smac_index;
+ } else {
+ return -ENOENT;
+ }
+ }
+ return 0;
+}
+
static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, enum ib_qp_state new_state)
@@ -1403,7 +1504,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_AV) {
- if (mlx4_set_path(dev, attr, attr_mask, &context->pri_path,
+ if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
attr_mask & IB_QP_PORT ?
attr->port_num : qp->port))
goto out;
@@ -1426,7 +1527,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
dev->dev->caps.pkey_table_len[attr->alt_port_num])
goto out;
- if (mlx4_set_alt_path(dev, attr, attr_mask, &context->alt_path,
+ if (mlx4_set_alt_path(dev, attr, attr_mask, qp,
+ &context->alt_path,
attr->alt_port_num))
goto out;
@@ -1532,6 +1634,20 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
context->pri_path.fl = 0x80;
context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
}
+ if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
+ IB_LINK_LAYER_ETHERNET) {
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI ||
+ qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI)
+ context->pri_path.feup = 1 << 7; /* don't fsm */
+ /* handle smac_index */
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
+ qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
+ qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
+ err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
+ if (err)
+ return -EINVAL;
+ }
+ }
}
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
@@ -1619,28 +1735,113 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
* If we moved a kernel QP to RESET, clean up all old CQ
* entries and reinitialize the QP.
*/
- if (new_state == IB_QPS_RESET && !ibqp->uobject) {
- mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
- ibqp->srq ? to_msrq(ibqp->srq): NULL);
- if (send_cq != recv_cq)
- mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
+ if (new_state == IB_QPS_RESET) {
+ if (!ibqp->uobject) {
+ mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
+ ibqp->srq ? to_msrq(ibqp->srq) : NULL);
+ if (send_cq != recv_cq)
+ mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
+
+ qp->rq.head = 0;
+ qp->rq.tail = 0;
+ qp->sq.head = 0;
+ qp->sq.tail = 0;
+ qp->sq_next_wqe = 0;
+ if (qp->rq.wqe_cnt)
+ *qp->db.db = 0;
- qp->rq.head = 0;
- qp->rq.tail = 0;
- qp->sq.head = 0;
- qp->sq.tail = 0;
- qp->sq_next_wqe = 0;
- if (qp->rq.wqe_cnt)
- *qp->db.db = 0;
+ if (qp->flags & MLX4_IB_QP_NETIF)
+ mlx4_ib_steer_qp_reg(dev, qp, 0);
+ }
+ if (qp->pri.smac) {
+ mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
+ qp->pri.smac = 0;
+ }
+ if (qp->alt.smac) {
+ mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
+ qp->alt.smac = 0;
+ }
+ if (qp->pri.vid < 0x1000) {
+ mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
+ qp->pri.vid = 0xFFFF;
+ qp->pri.candidate_vid = 0xFFFF;
+ qp->pri.update_vid = 0;
+ }
- if (qp->flags & MLX4_IB_QP_NETIF)
- mlx4_ib_steer_qp_reg(dev, qp, 0);
+ if (qp->alt.vid < 0x1000) {
+ mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
+ qp->alt.vid = 0xFFFF;
+ qp->alt.candidate_vid = 0xFFFF;
+ qp->alt.update_vid = 0;
+ }
}
-
out:
if (err && steer_qp)
mlx4_ib_steer_qp_reg(dev, qp, 0);
kfree(context);
+ if (qp->pri.candidate_smac) {
+ if (err) {
+ mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
+ } else {
+ if (qp->pri.smac)
+ mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
+ qp->pri.smac = qp->pri.candidate_smac;
+ qp->pri.smac_index = qp->pri.candidate_smac_index;
+ qp->pri.smac_port = qp->pri.candidate_smac_port;
+ }
+ qp->pri.candidate_smac = 0;
+ qp->pri.candidate_smac_index = 0;
+ qp->pri.candidate_smac_port = 0;
+ }
+ if (qp->alt.candidate_smac) {
+ if (err) {
+ mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac);
+ } else {
+ if (qp->alt.smac)
+ mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
+ qp->alt.smac = qp->alt.candidate_smac;
+ qp->alt.smac_index = qp->alt.candidate_smac_index;
+ qp->alt.smac_port = qp->alt.candidate_smac_port;
+ }
+ qp->alt.candidate_smac = 0;
+ qp->alt.candidate_smac_index = 0;
+ qp->alt.candidate_smac_port = 0;
+ }
+
+ if (qp->pri.update_vid) {
+ if (err) {
+ if (qp->pri.candidate_vid < 0x1000)
+ mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port,
+ qp->pri.candidate_vid);
+ } else {
+ if (qp->pri.vid < 0x1000)
+ mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port,
+ qp->pri.vid);
+ qp->pri.vid = qp->pri.candidate_vid;
+ qp->pri.vlan_port = qp->pri.candidate_vlan_port;
+ qp->pri.vlan_index = qp->pri.candidate_vlan_index;
+ }
+ qp->pri.candidate_vid = 0xFFFF;
+ qp->pri.update_vid = 0;
+ }
+
+ if (qp->alt.update_vid) {
+ if (err) {
+ if (qp->alt.candidate_vid < 0x1000)
+ mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port,
+ qp->alt.candidate_vid);
+ } else {
+ if (qp->alt.vid < 0x1000)
+ mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port,
+ qp->alt.vid);
+ qp->alt.vid = qp->alt.candidate_vid;
+ qp->alt.vlan_port = qp->alt.candidate_vlan_port;
+ qp->alt.vlan_index = qp->alt.candidate_vlan_index;
+ }
+ qp->alt.candidate_vid = 0xFFFF;
+ qp->alt.update_vid = 0;
+ }
+
return err;
}
@@ -1842,9 +2043,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
{
struct ib_device *ib_dev = sqp->qp.ibqp.device;
struct mlx4_wqe_mlx_seg *mlx = wqe;
+ struct mlx4_wqe_ctrl_seg *ctrl = wqe;
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
- struct net_device *ndev;
union ib_gid sgid;
u16 pkey;
int send_size;
@@ -1868,12 +2069,11 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
/* When multi-function is enabled, the ib_core gid
* indexes don't necessarily match the hw ones, so
* we must use our own cache */
- sgid.global.subnet_prefix =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- subnet_prefix;
- sgid.global.interface_id =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- guid_cache[ah->av.ib.gid_index];
+ err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev,
+ be32_to_cpu(ah->av.ib.port_pd) >> 24,
+ ah->av.ib.gid_index, &sgid.raw[0]);
+ if (err)
+ return err;
} else {
err = ib_get_cached_gid(ib_dev,
be32_to_cpu(ah->av.ib.port_pd) >> 24,
@@ -1882,7 +2082,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
return err;
}
- if (ah->av.eth.vlan != 0xffff) {
+ if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
is_vlan = 1;
}
@@ -1902,6 +2102,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
sqp->ud_header.grh.flow_label =
ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
+ if (is_eth)
+ memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
+ else {
if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
/* When multi-function is enabled, the ib_core gid
* indexes don't necessarily match the hw ones, so
@@ -1917,6 +2120,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
be32_to_cpu(ah->av.ib.port_pd) >> 24,
ah->av.ib.gid_index,
&sqp->ud_header.grh.source_gid);
+ }
memcpy(sqp->ud_header.grh.destination_gid.raw,
ah->av.ib.dgid, 16);
}
@@ -1949,16 +2153,23 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
if (is_eth) {
u8 *smac;
+ struct in6_addr in6;
+
u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
mlx->sched_prio = cpu_to_be16(pcp);
memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
/* FIXME: cache smac value? */
- ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1];
- if (!ndev)
- return -ENODEV;
- smac = ndev->dev_addr;
+ memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
+ memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
+ memcpy(&in6, sgid.raw, sizeof(in6));
+
+ if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev))
+ smac = to_mdev(sqp->qp.ibqp.device)->
+ iboe.netdevs[sqp->qp.port - 1]->dev_addr;
+ else /* use the src mac of the tunnel */
+ smac = ah->av.eth.s_mac;
memcpy(sqp->ud_header.eth.smac_h, smac, 6);
if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
@@ -2190,6 +2401,8 @@ static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_
hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index);
hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+ memcpy(hdr.mac, ah->av.eth.mac, 6);
+ hdr.vlan = ah->av.eth.vlan;
spc = MLX4_INLINE_ALIGN -
((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index db2ea31df832..5a38e43eca65 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -627,6 +627,7 @@ static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave)
int port;
struct kobject *p, *t;
struct mlx4_port *mport;
+ struct mlx4_active_ports actv_ports;
get_name(dev, name, slave, sizeof name);
@@ -649,7 +650,11 @@ static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave)
goto err_ports;
}
+ actv_ports = mlx4_get_active_ports(dev->dev, slave);
+
for (port = 1; port <= dev->dev->caps.num_ports; ++port) {
+ if (!test_bit(port - 1, actv_ports.ports))
+ continue;
err = add_port(dev, port, slave);
if (err)
goto err_add;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index b1705ce6eb88..62bb6b49dc1d 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -366,6 +366,38 @@ static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
mlx5_buf_free(&dev->mdev, &buf->buf);
}
+static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
+ struct ib_sig_err *item)
+{
+ u16 syndrome = be16_to_cpu(cqe->syndrome);
+
+#define GUARD_ERR (1 << 13)
+#define APPTAG_ERR (1 << 12)
+#define REFTAG_ERR (1 << 11)
+
+ if (syndrome & GUARD_ERR) {
+ item->err_type = IB_SIG_BAD_GUARD;
+ item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
+ item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
+ } else
+ if (syndrome & REFTAG_ERR) {
+ item->err_type = IB_SIG_BAD_REFTAG;
+ item->expected = be32_to_cpu(cqe->expected_reftag);
+ item->actual = be32_to_cpu(cqe->actual_reftag);
+ } else
+ if (syndrome & APPTAG_ERR) {
+ item->err_type = IB_SIG_BAD_APPTAG;
+ item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
+ item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
+ } else {
+ pr_err("Got signature completion error with bad syndrome %04x\n",
+ syndrome);
+ }
+
+ item->sig_err_offset = be64_to_cpu(cqe->err_offset);
+ item->key = be32_to_cpu(cqe->mkey);
+}
+
static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_ib_qp **cur_qp,
struct ib_wc *wc)
@@ -375,6 +407,9 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_cqe64 *cqe64;
struct mlx5_core_qp *mqp;
struct mlx5_ib_wq *wq;
+ struct mlx5_sig_err_cqe *sig_err_cqe;
+ struct mlx5_core_mr *mmr;
+ struct mlx5_ib_mr *mr;
uint8_t opcode;
uint32_t qpn;
u16 wqe_ctr;
@@ -475,6 +510,33 @@ repoll:
}
}
break;
+ case MLX5_CQE_SIG_ERR:
+ sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
+
+ read_lock(&dev->mdev.priv.mr_table.lock);
+ mmr = __mlx5_mr_lookup(&dev->mdev,
+ mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
+ if (unlikely(!mmr)) {
+ read_unlock(&dev->mdev.priv.mr_table.lock);
+ mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
+ cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
+ return -EINVAL;
+ }
+
+ mr = to_mibmr(mmr);
+ get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
+ mr->sig->sig_err_exists = true;
+ mr->sig->sigerr_count++;
+
+ mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
+ cq->mcq.cqn, mr->sig->err_item.key,
+ mr->sig->err_item.err_type,
+ mr->sig->err_item.sig_err_offset,
+ mr->sig->err_item.expected,
+ mr->sig->err_item.actual);
+
+ read_unlock(&dev->mdev.priv.mr_table.lock);
+ goto repoll;
}
return 0;
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 256a23344f28..ece028fc47d6 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -47,7 +47,6 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
struct mlx5_db *db)
{
struct mlx5_ib_user_db_page *page;
- struct ib_umem_chunk *chunk;
int err = 0;
mutex_lock(&context->db_page_mutex);
@@ -75,8 +74,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
list_add(&page->list, &context->db_page_list);
found:
- chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list);
- db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
+ db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
db->u.user_page = page;
++page->refcnt;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index bf900579ac08..fa6dc870adae 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -273,6 +273,15 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (flags & MLX5_DEV_CAP_FLAG_XRC)
props->device_cap_flags |= IB_DEVICE_XRC;
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ if (flags & MLX5_DEV_CAP_FLAG_SIG_HAND_OVER) {
+ props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
+ /* At this stage no support for signature handover */
+ props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
+ IB_PROT_T10DIF_TYPE_2 |
+ IB_PROT_T10DIF_TYPE_3;
+ props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
+ IB_GUARD_T10DIF_CSUM;
+ }
props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
0xffffff;
@@ -1423,12 +1432,15 @@ static int init_one(struct pci_dev *pdev,
dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
+ dev->ib_dev.destroy_mr = mlx5_ib_destroy_mr;
dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
dev->ib_dev.process_mad = mlx5_ib_process_mad;
+ dev->ib_dev.create_mr = mlx5_ib_create_mr;
dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr;
dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list;
dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
+ dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 3a5322870b96..8499aec94db6 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -44,16 +44,17 @@
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int *ncont, int *order)
{
- struct ib_umem_chunk *chunk;
unsigned long tmp;
unsigned long m;
- int i, j, k;
+ int i, k;
u64 base = 0;
int p = 0;
int skip;
int mask;
u64 len;
u64 pfn;
+ struct scatterlist *sg;
+ int entry;
addr = addr >> PAGE_SHIFT;
tmp = (unsigned long)addr;
@@ -61,32 +62,31 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
skip = 1 << m;
mask = skip - 1;
i = 0;
- list_for_each_entry(chunk, &umem->chunk_list, list)
- for (j = 0; j < chunk->nmap; j++) {
- len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT;
- pfn = sg_dma_address(&chunk->page_list[j]) >> PAGE_SHIFT;
- for (k = 0; k < len; k++) {
- if (!(i & mask)) {
- tmp = (unsigned long)pfn;
- m = min(m, find_first_bit(&tmp, sizeof(tmp)));
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ pfn = sg_dma_address(sg) >> PAGE_SHIFT;
+ for (k = 0; k < len; k++) {
+ if (!(i & mask)) {
+ tmp = (unsigned long)pfn;
+ m = min(m, find_first_bit(&tmp, sizeof(tmp)));
+ skip = 1 << m;
+ mask = skip - 1;
+ base = pfn;
+ p = 0;
+ } else {
+ if (base + p != pfn) {
+ tmp = (unsigned long)p;
+ m = find_first_bit(&tmp, sizeof(tmp));
skip = 1 << m;
mask = skip - 1;
base = pfn;
p = 0;
- } else {
- if (base + p != pfn) {
- tmp = (unsigned long)p;
- m = find_first_bit(&tmp, sizeof(tmp));
- skip = 1 << m;
- mask = skip - 1;
- base = pfn;
- p = 0;
- }
}
- p++;
- i++;
}
+ p++;
+ i++;
}
+ }
if (i) {
m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
@@ -112,32 +112,32 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
{
int shift = page_shift - PAGE_SHIFT;
int mask = (1 << shift) - 1;
- struct ib_umem_chunk *chunk;
- int i, j, k;
+ int i, k;
u64 cur = 0;
u64 base;
int len;
+ struct scatterlist *sg;
+ int entry;
i = 0;
- list_for_each_entry(chunk, &umem->chunk_list, list)
- for (j = 0; j < chunk->nmap; j++) {
- len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT;
- base = sg_dma_address(&chunk->page_list[j]);
- for (k = 0; k < len; k++) {
- if (!(i & mask)) {
- cur = base + (k << PAGE_SHIFT);
- if (umr)
- cur |= 3;
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ base = sg_dma_address(sg);
+ for (k = 0; k < len; k++) {
+ if (!(i & mask)) {
+ cur = base + (k << PAGE_SHIFT);
+ if (umr)
+ cur |= 3;
- pas[i >> shift] = cpu_to_be64(cur);
- mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
- i >> shift, be64_to_cpu(pas[i >> shift]));
- } else
- mlx5_ib_dbg(dev, "=====> 0x%llx\n",
- base + (k << PAGE_SHIFT));
- i++;
- }
+ pas[i >> shift] = cpu_to_be64(cur);
+ mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
+ i >> shift, be64_to_cpu(pas[i >> shift]));
+ } else
+ mlx5_ib_dbg(dev, "=====> 0x%llx\n",
+ base + (k << PAGE_SHIFT));
+ i++;
}
+ }
}
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 389e31965773..50541586e0a6 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -189,6 +189,9 @@ struct mlx5_ib_qp {
int create_type;
u32 pa_lkey;
+
+ /* Store signature errors */
+ bool signature_en;
};
struct mlx5_ib_cq_buf {
@@ -265,6 +268,7 @@ struct mlx5_ib_mr {
enum ib_wc_status status;
struct mlx5_ib_dev *dev;
struct mlx5_create_mkey_mbox_out out;
+ struct mlx5_core_sig_ctx *sig;
};
struct mlx5_ib_fast_reg_page_list {
@@ -396,6 +400,11 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
return container_of(mqp, struct mlx5_ib_qp, mqp);
}
+static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
+{
+ return container_of(mmr, struct mlx5_ib_mr, mmr);
+}
+
static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct mlx5_ib_pd, ibpd);
@@ -495,6 +504,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
+int mlx5_ib_destroy_mr(struct ib_mr *ibmr);
+struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
+ struct ib_mr_init_attr *mr_init_attr);
struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
int max_page_list_len);
struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
@@ -530,6 +542,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
+int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
+ struct ib_mr_status *mr_status);
static inline void init_query_mad(struct ib_smp *mad)
{
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 7c95ca1f0c25..81392b26d078 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -992,6 +992,122 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
return 0;
}
+struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
+ struct ib_mr_init_attr *mr_init_attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_create_mkey_mbox_in *in;
+ struct mlx5_ib_mr *mr;
+ int access_mode, err;
+ int ndescs = roundup(mr_init_attr->max_reg_descriptors, 4);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ in->seg.status = 1 << 6; /* free */
+ in->seg.xlt_oct_size = cpu_to_be32(ndescs);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
+ access_mode = MLX5_ACCESS_MODE_MTT;
+
+ if (mr_init_attr->flags & IB_MR_SIGNATURE_EN) {
+ u32 psv_index[2];
+
+ in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
+ MLX5_MKEY_BSF_EN);
+ in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
+ mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
+ if (!mr->sig) {
+ err = -ENOMEM;
+ goto err_free_in;
+ }
+
+ /* create mem & wire PSVs */
+ err = mlx5_core_create_psv(&dev->mdev, to_mpd(pd)->pdn,
+ 2, psv_index);
+ if (err)
+ goto err_free_sig;
+
+ access_mode = MLX5_ACCESS_MODE_KLM;
+ mr->sig->psv_memory.psv_idx = psv_index[0];
+ mr->sig->psv_wire.psv_idx = psv_index[1];
+
+ mr->sig->sig_status_checked = true;
+ mr->sig->sig_err_exists = false;
+ /* Next UMR, Arm SIGERR */
+ ++mr->sig->sigerr_count;
+ }
+
+ in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
+ err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in),
+ NULL, NULL, NULL);
+ if (err)
+ goto err_destroy_psv;
+
+ mr->ibmr.lkey = mr->mmr.key;
+ mr->ibmr.rkey = mr->mmr.key;
+ mr->umem = NULL;
+ kfree(in);
+
+ return &mr->ibmr;
+
+err_destroy_psv:
+ if (mr->sig) {
+ if (mlx5_core_destroy_psv(&dev->mdev,
+ mr->sig->psv_memory.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
+ mr->sig->psv_memory.psv_idx);
+ if (mlx5_core_destroy_psv(&dev->mdev,
+ mr->sig->psv_wire.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
+ mr->sig->psv_wire.psv_idx);
+ }
+err_free_sig:
+ kfree(mr->sig);
+err_free_in:
+ kfree(in);
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ int err;
+
+ if (mr->sig) {
+ if (mlx5_core_destroy_psv(&dev->mdev,
+ mr->sig->psv_memory.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
+ mr->sig->psv_memory.psv_idx);
+ if (mlx5_core_destroy_psv(&dev->mdev,
+ mr->sig->psv_wire.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
+ mr->sig->psv_wire.psv_idx);
+ kfree(mr->sig);
+ }
+
+ err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
+ mr->mmr.key, err);
+ return err;
+ }
+
+ kfree(mr);
+
+ return err;
+}
+
struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
int max_page_list_len)
{
@@ -1077,3 +1193,44 @@ void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
kfree(mfrpl->ibfrpl.page_list);
kfree(mfrpl);
}
+
+int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
+ struct ib_mr_status *mr_status)
+{
+ struct mlx5_ib_mr *mmr = to_mmr(ibmr);
+ int ret = 0;
+
+ if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
+ pr_err("Invalid status check mask\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mr_status->fail_status = 0;
+ if (check_mask & IB_MR_CHECK_SIG_STATUS) {
+ if (!mmr->sig) {
+ ret = -EINVAL;
+ pr_err("signature status check requested on a non-signature enabled MR\n");
+ goto done;
+ }
+
+ mmr->sig->sig_status_checked = true;
+ if (!mmr->sig->sig_err_exists)
+ goto done;
+
+ if (ibmr->lkey == mmr->sig->err_item.key)
+ memcpy(&mr_status->sig_err, &mmr->sig->err_item,
+ sizeof(mr_status->sig_err));
+ else {
+ mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
+ mr_status->sig_err.sig_err_offset = 0;
+ mr_status->sig_err.key = mmr->sig->err_item.key;
+ }
+
+ mmr->sig->sig_err_exists = false;
+ mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
+ }
+
+done:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7dfe8a1c84cf..ae788d27b93f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -256,8 +256,11 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
}
size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
-
- return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
+ if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
+ ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
+ return MLX5_SIG_WQE_SIZE;
+ else
+ return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
}
static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
@@ -284,6 +287,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
sizeof(struct mlx5_wqe_inline_seg);
attr->cap.max_inline_data = qp->max_inline_data;
+ if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
+ qp->signature_en = true;
+
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
@@ -665,7 +671,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int err;
uuari = &dev->mdev.priv.uuari;
- if (init_attr->create_flags)
+ if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN)
return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
@@ -1771,6 +1777,27 @@ static __be64 frwr_mkey_mask(void)
return cpu_to_be64(result);
}
+static __be64 sig_mkey_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_EN_SIGERR |
+ MLX5_MKEY_MASK_EN_RINVAL |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_SMALL_FENCE |
+ MLX5_MKEY_MASK_FREE |
+ MLX5_MKEY_MASK_BSF_EN;
+
+ return cpu_to_be64(result);
+}
+
static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
struct ib_send_wr *wr, int li)
{
@@ -1826,7 +1853,7 @@ static u8 get_umr_flags(int acc)
(acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
(acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
- MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
+ MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
}
static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
@@ -1838,7 +1865,8 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
return;
}
- seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags);
+ seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) |
+ MLX5_ACCESS_MODE_MTT;
*writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
@@ -1954,6 +1982,342 @@ static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
return 0;
}
+static u16 prot_field_size(enum ib_signature_type type)
+{
+ switch (type) {
+ case IB_SIG_TYPE_T10_DIF:
+ return MLX5_DIF_SIZE;
+ default:
+ return 0;
+ }
+}
+
+static u8 bs_selector(int block_size)
+{
+ switch (block_size) {
+ case 512: return 0x1;
+ case 520: return 0x2;
+ case 4096: return 0x3;
+ case 4160: return 0x4;
+ case 1073741824: return 0x5;
+ default: return 0;
+ }
+}
+
+static int format_selector(struct ib_sig_attrs *attr,
+ struct ib_sig_domain *domain,
+ int *selector)
+{
+
+#define FORMAT_DIF_NONE 0
+#define FORMAT_DIF_CRC_INC 8
+#define FORMAT_DIF_CRC_NO_INC 12
+#define FORMAT_DIF_CSUM_INC 13
+#define FORMAT_DIF_CSUM_NO_INC 14
+
+ switch (domain->sig.dif.type) {
+ case IB_T10DIF_NONE:
+ /* No DIF */
+ *selector = FORMAT_DIF_NONE;
+ break;
+ case IB_T10DIF_TYPE1: /* Fall through */
+ case IB_T10DIF_TYPE2:
+ switch (domain->sig.dif.bg_type) {
+ case IB_T10DIF_CRC:
+ *selector = FORMAT_DIF_CRC_INC;
+ break;
+ case IB_T10DIF_CSUM:
+ *selector = FORMAT_DIF_CSUM_INC;
+ break;
+ default:
+ return 1;
+ }
+ break;
+ case IB_T10DIF_TYPE3:
+ switch (domain->sig.dif.bg_type) {
+ case IB_T10DIF_CRC:
+ *selector = domain->sig.dif.type3_inc_reftag ?
+ FORMAT_DIF_CRC_INC :
+ FORMAT_DIF_CRC_NO_INC;
+ break;
+ case IB_T10DIF_CSUM:
+ *selector = domain->sig.dif.type3_inc_reftag ?
+ FORMAT_DIF_CSUM_INC :
+ FORMAT_DIF_CSUM_NO_INC;
+ break;
+ default:
+ return 1;
+ }
+ break;
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mlx5_set_bsf(struct ib_mr *sig_mr,
+ struct ib_sig_attrs *sig_attrs,
+ struct mlx5_bsf *bsf, u32 data_size)
+{
+ struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
+ struct mlx5_bsf_basic *basic = &bsf->basic;
+ struct ib_sig_domain *mem = &sig_attrs->mem;
+ struct ib_sig_domain *wire = &sig_attrs->wire;
+ int ret, selector;
+
+ switch (sig_attrs->mem.sig_type) {
+ case IB_SIG_TYPE_T10_DIF:
+ if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF)
+ return -EINVAL;
+
+ /* Input domain check byte mask */
+ basic->check_byte_mask = sig_attrs->check_mask;
+ if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
+ mem->sig.dif.type == wire->sig.dif.type) {
+ /* Same block structure */
+ basic->bsf_size_sbs = 1 << 4;
+ if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
+ basic->wire.copy_byte_mask = 0xff;
+ else
+ basic->wire.copy_byte_mask = 0x3f;
+ } else
+ basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
+
+ basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
+ basic->raw_data_size = cpu_to_be32(data_size);
+
+ ret = format_selector(sig_attrs, mem, &selector);
+ if (ret)
+ return -EINVAL;
+ basic->m_bfs_psv = cpu_to_be32(selector << 24 |
+ msig->psv_memory.psv_idx);
+
+ ret = format_selector(sig_attrs, wire, &selector);
+ if (ret)
+ return -EINVAL;
+ basic->w_bfs_psv = cpu_to_be32(selector << 24 |
+ msig->psv_wire.psv_idx);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
+ void **seg, int *size)
+{
+ struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs;
+ struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
+ struct mlx5_bsf *bsf;
+ u32 data_len = wr->sg_list->length;
+ u32 data_key = wr->sg_list->lkey;
+ u64 data_va = wr->sg_list->addr;
+ int ret;
+ int wqe_size;
+
+ if (!wr->wr.sig_handover.prot) {
+ /**
+ * Source domain doesn't contain signature information
+ * So need construct:
+ * ------------------
+ * | data_klm |
+ * ------------------
+ * | BSF |
+ * ------------------
+ **/
+ struct mlx5_klm *data_klm = *seg;
+
+ data_klm->bcount = cpu_to_be32(data_len);
+ data_klm->key = cpu_to_be32(data_key);
+ data_klm->va = cpu_to_be64(data_va);
+ wqe_size = ALIGN(sizeof(*data_klm), 64);
+ } else {
+ /**
+ * Source domain contains signature information
+ * So need construct a strided block format:
+ * ---------------------------
+ * | stride_block_ctrl |
+ * ---------------------------
+ * | data_klm |
+ * ---------------------------
+ * | prot_klm |
+ * ---------------------------
+ * | BSF |
+ * ---------------------------
+ **/
+ struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
+ struct mlx5_stride_block_entry *data_sentry;
+ struct mlx5_stride_block_entry *prot_sentry;
+ u32 prot_key = wr->wr.sig_handover.prot->lkey;
+ u64 prot_va = wr->wr.sig_handover.prot->addr;
+ u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
+ int prot_size;
+
+ sblock_ctrl = *seg;
+ data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
+ prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
+
+ prot_size = prot_field_size(sig_attrs->mem.sig_type);
+ if (!prot_size) {
+ pr_err("Bad block size given: %u\n", block_size);
+ return -EINVAL;
+ }
+ sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
+ prot_size);
+ sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
+ sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
+ sblock_ctrl->num_entries = cpu_to_be16(2);
+
+ data_sentry->bcount = cpu_to_be16(block_size);
+ data_sentry->key = cpu_to_be32(data_key);
+ data_sentry->va = cpu_to_be64(data_va);
+ prot_sentry->bcount = cpu_to_be16(prot_size);
+ prot_sentry->key = cpu_to_be32(prot_key);
+
+ if (prot_key == data_key && prot_va == data_va) {
+ /**
+ * The data and protection are interleaved
+ * in a single memory region
+ **/
+ prot_sentry->va = cpu_to_be64(data_va + block_size);
+ prot_sentry->stride = cpu_to_be16(block_size + prot_size);
+ data_sentry->stride = prot_sentry->stride;
+ } else {
+ /* The data and protection are two different buffers */
+ prot_sentry->va = cpu_to_be64(prot_va);
+ data_sentry->stride = cpu_to_be16(block_size);
+ prot_sentry->stride = cpu_to_be16(prot_size);
+ }
+ wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
+ sizeof(*prot_sentry), 64);
+ }
+
+ *seg += wqe_size;
+ *size += wqe_size / 16;
+ if (unlikely((*seg == qp->sq.qend)))
+ *seg = mlx5_get_send_wqe(qp, 0);
+
+ bsf = *seg;
+ ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
+ if (ret)
+ return -EINVAL;
+
+ *seg += sizeof(*bsf);
+ *size += sizeof(*bsf) / 16;
+ if (unlikely((*seg == qp->sq.qend)))
+ *seg = mlx5_get_send_wqe(qp, 0);
+
+ return 0;
+}
+
+static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
+ struct ib_send_wr *wr, u32 nelements,
+ u32 length, u32 pdn)
+{
+ struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
+ u32 sig_key = sig_mr->rkey;
+ u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
+
+ memset(seg, 0, sizeof(*seg));
+
+ seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) |
+ MLX5_ACCESS_MODE_KLM;
+ seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
+ seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
+ MLX5_MKEY_BSF_EN | pdn);
+ seg->len = cpu_to_be64(length);
+ seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
+ seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
+}
+
+static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
+ struct ib_send_wr *wr, u32 nelements)
+{
+ memset(umr, 0, sizeof(*umr));
+
+ umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
+ umr->klm_octowords = get_klm_octo(nelements);
+ umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
+ umr->mkey_mask = sig_mkey_mask();
+}
+
+
+static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
+ void **seg, int *size)
+{
+ struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr);
+ u32 pdn = get_pd(qp)->pdn;
+ u32 klm_oct_size;
+ int region_len, ret;
+
+ if (unlikely(wr->num_sge != 1) ||
+ unlikely(wr->wr.sig_handover.access_flags &
+ IB_ACCESS_REMOTE_ATOMIC) ||
+ unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
+ unlikely(!sig_mr->sig->sig_status_checked))
+ return -EINVAL;
+
+ /* length of the protected region, data + protection */
+ region_len = wr->sg_list->length;
+ if (wr->wr.sig_handover.prot)
+ region_len += wr->wr.sig_handover.prot->length;
+
+ /**
+ * KLM octoword size - if protection was provided
+ * then we use strided block format (3 octowords),
+ * else we use single KLM (1 octoword)
+ **/
+ klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1;
+
+ set_sig_umr_segment(*seg, wr, klm_oct_size);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ if (unlikely((*seg == qp->sq.qend)))
+ *seg = mlx5_get_send_wqe(qp, 0);
+
+ set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ if (unlikely((*seg == qp->sq.qend)))
+ *seg = mlx5_get_send_wqe(qp, 0);
+
+ ret = set_sig_data_segment(wr, qp, seg, size);
+ if (ret)
+ return ret;
+
+ sig_mr->sig->sig_status_checked = false;
+ return 0;
+}
+
+static int set_psv_wr(struct ib_sig_domain *domain,
+ u32 psv_idx, void **seg, int *size)
+{
+ struct mlx5_seg_set_psv *psv_seg = *seg;
+
+ memset(psv_seg, 0, sizeof(*psv_seg));
+ psv_seg->psv_num = cpu_to_be32(psv_idx);
+ switch (domain->sig_type) {
+ case IB_SIG_TYPE_T10_DIF:
+ psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
+ domain->sig.dif.app_tag);
+ psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
+
+ *seg += sizeof(*psv_seg);
+ *size += sizeof(*psv_seg) / 16;
+ break;
+
+ default:
+ pr_err("Bad signature type given.\n");
+ return 1;
+ }
+
+ return 0;
+}
+
static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
{
@@ -2041,6 +2405,59 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
}
}
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ struct ib_send_wr *wr, int *idx,
+ int *size, int nreq)
+{
+ int err = 0;
+
+ if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+ *seg = mlx5_get_send_wqe(qp, *idx);
+ *ctrl = *seg;
+ *(uint32_t *)(*seg + 8) = 0;
+ (*ctrl)->imm = send_ieth(wr);
+ (*ctrl)->fm_ce_se = qp->sq_signal_bits |
+ (wr->send_flags & IB_SEND_SIGNALED ?
+ MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+ (wr->send_flags & IB_SEND_SOLICITED ?
+ MLX5_WQE_CTRL_SOLICITED : 0);
+
+ *seg += sizeof(**ctrl);
+ *size = sizeof(**ctrl) / 16;
+
+ return err;
+}
+
+static void finish_wqe(struct mlx5_ib_qp *qp,
+ struct mlx5_wqe_ctrl_seg *ctrl,
+ u8 size, unsigned idx, u64 wr_id,
+ int nreq, u8 fence, u8 next_fence,
+ u32 mlx5_opcode)
+{
+ u8 opmod = 0;
+
+ ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
+ mlx5_opcode | ((u32)opmod << 24));
+ ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
+ ctrl->fm_ce_se |= fence;
+ qp->fm_cache = next_fence;
+ if (unlikely(qp->wq_sig))
+ ctrl->signature = wq_sig(ctrl);
+
+ qp->sq.wrid[idx] = wr_id;
+ qp->sq.w_list[idx].opcode = mlx5_opcode;
+ qp->sq.wqe_head[idx] = qp->sq.head + nreq;
+ qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
+ qp->sq.w_list[idx].next = qp->sq.cur_post;
+}
+
+
int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -2048,13 +2465,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_core_dev *mdev = &dev->mdev;
struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_mr *mr;
struct mlx5_wqe_data_seg *dpseg;
struct mlx5_wqe_xrc_seg *xrc;
struct mlx5_bf *bf = qp->bf;
int uninitialized_var(size);
void *qend = qp->sq.qend;
unsigned long flags;
- u32 mlx5_opcode;
unsigned idx;
int err = 0;
int inl = 0;
@@ -2063,7 +2480,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int nreq;
int i;
u8 next_fence = 0;
- u8 opmod = 0;
u8 fence;
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -2076,36 +2492,23 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out;
}
- if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
+ fence = qp->fm_cache;
+ num_sge = wr->num_sge;
+ if (unlikely(num_sge > qp->sq.max_gs)) {
mlx5_ib_warn(dev, "\n");
err = -ENOMEM;
*bad_wr = wr;
goto out;
}
- fence = qp->fm_cache;
- num_sge = wr->num_sge;
- if (unlikely(num_sge > qp->sq.max_gs)) {
+ err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
+ if (err) {
mlx5_ib_warn(dev, "\n");
err = -ENOMEM;
*bad_wr = wr;
goto out;
}
- idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
- seg = mlx5_get_send_wqe(qp, idx);
- ctrl = seg;
- *(uint32_t *)(seg + 8) = 0;
- ctrl->imm = send_ieth(wr);
- ctrl->fm_ce_se = qp->sq_signal_bits |
- (wr->send_flags & IB_SEND_SIGNALED ?
- MLX5_WQE_CTRL_CQ_UPDATE : 0) |
- (wr->send_flags & IB_SEND_SOLICITED ?
- MLX5_WQE_CTRL_SOLICITED : 0);
-
- seg += sizeof(*ctrl);
- size = sizeof(*ctrl) / 16;
-
switch (ibqp->qp_type) {
case IB_QPT_XRC_INI:
xrc = seg;
@@ -2158,6 +2561,73 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
num_sge = 0;
break;
+ case IB_WR_REG_SIG_MR:
+ qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
+ mr = to_mmr(wr->wr.sig_handover.sig_mr);
+
+ ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
+ err = set_sig_umr_wr(wr, qp, &seg, &size);
+ if (err) {
+ mlx5_ib_warn(dev, "\n");
+ *bad_wr = wr;
+ goto out;
+ }
+
+ finish_wqe(qp, ctrl, size, idx, wr->wr_id,
+ nreq, get_fence(fence, wr),
+ next_fence, MLX5_OPCODE_UMR);
+ /*
+ * SET_PSV WQEs are not signaled and solicited
+ * on error
+ */
+ wr->send_flags &= ~IB_SEND_SIGNALED;
+ wr->send_flags |= IB_SEND_SOLICITED;
+ err = begin_wqe(qp, &seg, &ctrl, wr,
+ &idx, &size, nreq);
+ if (err) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem,
+ mr->sig->psv_memory.psv_idx, &seg,
+ &size);
+ if (err) {
+ mlx5_ib_warn(dev, "\n");
+ *bad_wr = wr;
+ goto out;
+ }
+
+ finish_wqe(qp, ctrl, size, idx, wr->wr_id,
+ nreq, get_fence(fence, wr),
+ next_fence, MLX5_OPCODE_SET_PSV);
+ err = begin_wqe(qp, &seg, &ctrl, wr,
+ &idx, &size, nreq);
+ if (err) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire,
+ mr->sig->psv_wire.psv_idx, &seg,
+ &size);
+ if (err) {
+ mlx5_ib_warn(dev, "\n");
+ *bad_wr = wr;
+ goto out;
+ }
+
+ finish_wqe(qp, ctrl, size, idx, wr->wr_id,
+ nreq, get_fence(fence, wr),
+ next_fence, MLX5_OPCODE_SET_PSV);
+ num_sge = 0;
+ goto skip_psv;
+
default:
break;
}
@@ -2238,22 +2708,10 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
}
- mlx5_opcode = mlx5_ib_opcode[wr->opcode];
- ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
- mlx5_opcode |
- ((u32)opmod << 24));
- ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
- ctrl->fm_ce_se |= get_fence(fence, wr);
- qp->fm_cache = next_fence;
- if (unlikely(qp->wq_sig))
- ctrl->signature = wq_sig(ctrl);
-
- qp->sq.wrid[idx] = wr->wr_id;
- qp->sq.w_list[idx].opcode = mlx5_opcode;
- qp->sq.wqe_head[idx] = qp->sq.head + nreq;
- qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
- qp->sq.w_list[idx].next = qp->sq.cur_post;
-
+ finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+ get_fence(fence, wr), next_fence,
+ mlx5_ib_opcode[wr->opcode]);
+skip_psv:
if (0)
dump_wqe(qp, idx, size);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 5b71d43bd89c..415f8e1a54db 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -695,6 +695,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
mthca_free_cq(to_mdev(ibdev), cq);
+ err = -EFAULT;
goto err_free;
}
@@ -976,12 +977,12 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(pd->device);
- struct ib_umem_chunk *chunk;
+ struct scatterlist *sg;
struct mthca_mr *mr;
struct mthca_reg_mr ucmd;
u64 *pages;
int shift, n, len;
- int i, j, k;
+ int i, k, entry;
int err = 0;
int write_mtt_size;
@@ -1009,10 +1010,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
shift = ffs(mr->umem->page_size) - 1;
-
- n = 0;
- list_for_each_entry(chunk, &mr->umem->chunk_list, list)
- n += chunk->nents;
+ n = mr->umem->nmap;
mr->mtt = mthca_alloc_mtt(dev, n);
if (IS_ERR(mr->mtt)) {
@@ -1030,25 +1028,24 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
- list_for_each_entry(chunk, &mr->umem->chunk_list, list)
- for (j = 0; j < chunk->nmap; ++j) {
- len = sg_dma_len(&chunk->page_list[j]) >> shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = sg_dma_address(&chunk->page_list[j]) +
- mr->umem->page_size * k;
- /*
- * Be friendly to write_mtt and pass it chunks
- * of appropriate size.
- */
- if (i == write_mtt_size) {
- err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
- if (err)
- goto mtt_done;
- n += i;
- i = 0;
- }
+ for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
+ len = sg_dma_len(sg) >> shift;
+ for (k = 0; k < len; ++k) {
+ pages[i++] = sg_dma_address(sg) +
+ mr->umem->page_size * k;
+ /*
+ * Be friendly to write_mtt and pass it chunks
+ * of appropriate size.
+ */
+ if (i == write_mtt_size) {
+ err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
+ if (err)
+ goto mtt_done;
+ n += i;
+ i = 0;
}
}
+ }
if (i)
err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 9c9f2f57e960..dfa9df484505 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -128,6 +128,7 @@ static void build_mpa_v1(struct nes_cm_node *, void *, u8);
static void build_rdma0_msg(struct nes_cm_node *, struct nes_qp **);
static void print_core(struct nes_cm_core *core);
+static void record_ird_ord(struct nes_cm_node *, u16, u16);
/* External CM API Interface */
/* instance of function pointers for client API */
@@ -317,7 +318,6 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
}
}
-
if (priv_data_len + mpa_hdr_len != len) {
nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
" complete (%x + %x != %x)\n",
@@ -356,25 +356,57 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
/* send reset */
return -EINVAL;
}
+ if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD)
+ cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
- if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) {
+ if (cm_node->mpav2_ird_ord != IETF_NO_IRD_ORD) {
/* responder */
- if (cm_node->ord_size > ird_size)
- cm_node->ord_size = ird_size;
- } else {
- /* initiator */
- if (cm_node->ord_size > ird_size)
- cm_node->ord_size = ird_size;
-
- if (cm_node->ird_size < ord_size) {
- /* no resources available */
- /* send terminate message */
- return -EINVAL;
+ if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) {
+ /* we are still negotiating */
+ if (ord_size > NES_MAX_IRD) {
+ cm_node->ird_size = NES_MAX_IRD;
+ } else {
+ cm_node->ird_size = ord_size;
+ if (ord_size == 0 &&
+ (rtr_ctrl_ord & IETF_RDMA0_READ)) {
+ cm_node->ird_size = 1;
+ nes_debug(NES_DBG_CM,
+ "%s: Remote peer doesn't support RDMA0_READ (ord=%u)\n",
+ __func__, ord_size);
+ }
+ }
+ if (ird_size > NES_MAX_ORD)
+ cm_node->ord_size = NES_MAX_ORD;
+ else
+ cm_node->ord_size = ird_size;
+ } else { /* initiator */
+ if (ord_size > NES_MAX_IRD) {
+ nes_debug(NES_DBG_CM,
+ "%s: Unable to support the requested (ord =%u)\n",
+ __func__, ord_size);
+ return -EINVAL;
+ }
+ cm_node->ird_size = ord_size;
+
+ if (ird_size > NES_MAX_ORD) {
+ cm_node->ord_size = NES_MAX_ORD;
+ } else {
+ if (ird_size == 0 &&
+ (rtr_ctrl_ord & IETF_RDMA0_READ)) {
+ nes_debug(NES_DBG_CM,
+ "%s: Remote peer doesn't support RDMA0_READ (ird=%u)\n",
+ __func__, ird_size);
+ return -EINVAL;
+ } else {
+ cm_node->ord_size = ird_size;
+ }
+ }
}
}
if (rtr_ctrl_ord & IETF_RDMA0_READ) {
cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+
} else if (rtr_ctrl_ord & IETF_RDMA0_WRITE) {
cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
} else { /* Not supported RDMA0 operation */
@@ -514,6 +546,19 @@ static void print_core(struct nes_cm_core *core)
nes_debug(NES_DBG_CM, "-------------- end core ---------------\n");
}
+static void record_ird_ord(struct nes_cm_node *cm_node,
+ u16 conn_ird, u16 conn_ord)
+{
+ if (conn_ird > NES_MAX_IRD)
+ conn_ird = NES_MAX_IRD;
+
+ if (conn_ord > NES_MAX_ORD)
+ conn_ord = NES_MAX_ORD;
+
+ cm_node->ird_size = conn_ird;
+ cm_node->ord_size = conn_ord;
+}
+
/**
* cm_build_mpa_frame - build a MPA V1 frame or MPA V2 frame
*/
@@ -557,11 +602,13 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
/* initialize RTR msg */
- ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
- IETF_NO_IRD_ORD : cm_node->ird_size;
- ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
- IETF_NO_IRD_ORD : cm_node->ord_size;
-
+ if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
+ ctrl_ird = IETF_NO_IRD_ORD;
+ ctrl_ord = IETF_NO_IRD_ORD;
+ } else {
+ ctrl_ird = cm_node->ird_size & IETF_NO_IRD_ORD;
+ ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD;
+ }
ctrl_ird |= IETF_PEER_TO_PEER;
ctrl_ird |= IETF_FLPDU_ZERO_LEN;
@@ -610,7 +657,7 @@ static void build_rdma0_msg(struct nes_cm_node *cm_node, struct nes_qp **nesqp_a
struct nes_qp *nesqp = *nesqp_addr;
struct nes_hw_qp_wqe *wqe = &nesqp->hwqp.sq_vbase[0];
- u64temp = (unsigned long)nesqp;
+ u64temp = (unsigned long)nesqp->nesuqp_addr;
u64temp |= NES_SW_CONTEXT_ALIGN >> 1;
set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp);
@@ -1409,8 +1456,9 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->mpa_frame_rev = mpa_version;
cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
- cm_node->ird_size = IETF_NO_IRD_ORD;
- cm_node->ord_size = IETF_NO_IRD_ORD;
+ cm_node->mpav2_ird_ord = 0;
+ cm_node->ird_size = 0;
+ cm_node->ord_size = 0;
nes_debug(NES_DBG_CM, "Make node addresses : loc = %pI4:%x, rem = %pI4:%x\n",
&cm_node->loc_addr, cm_node->loc_port,
@@ -3027,11 +3075,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
rem_ref_cm_node(cm_node->cm_core, cm_node);
return -ECONNRESET;
}
-
/* associate the node with the QP */
nesqp->cm_node = (void *)cm_node;
cm_node->nesqp = nesqp;
+
nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
atomic_inc(&cm_accepts);
@@ -3054,6 +3102,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (cm_node->mpa_frame_rev == IETF_MPA_V1)
mpa_frame_offset = 4;
+ if (cm_node->mpa_frame_rev == IETF_MPA_V1 ||
+ cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
+ record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
+ }
+
memcpy(mpa_v2_frame->priv_data, conn_param->private_data,
conn_param->private_data_len);
@@ -3117,7 +3170,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
nesqp->skip_lsmm = 1;
-
/* Cache the cm_id in the qp */
nesqp->cm_id = cm_id;
cm_node->cm_id = cm_id;
@@ -3154,7 +3206,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT));
nesqp->nesqp_context->ird_ord_sizes |=
- cpu_to_le32((u32)conn_param->ord);
+ cpu_to_le32((u32)cm_node->ord_size);
memset(&nes_quad, 0, sizeof(nes_quad));
nes_quad.DstIpAdrIndex =
@@ -3194,6 +3246,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
+ cm_event.ird = cm_node->ird_size;
+ cm_event.ord = cm_node->ord_size;
+
ret = cm_id->event_handler(cm_id, &cm_event);
attr.qp_state = IB_QPS_RTS;
nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
@@ -3290,14 +3345,8 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
/* cache the cm_id in the qp */
nesqp->cm_id = cm_id;
-
cm_id->provider_data = nesqp;
-
nesqp->private_data_len = conn_param->private_data_len;
- nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
- /* space for rdma0 read msg */
- if (conn_param->ord == 0)
- nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(1);
nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord);
nes_debug(NES_DBG_CM, "mpa private data len =%u\n",
@@ -3334,6 +3383,11 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return -ENOMEM;
}
+ record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
+ if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
+ cm_node->ord_size == 0)
+ cm_node->ord_size = 1;
+
cm_node->apbvt_set = apbvt_set;
nesqp->cm_node = cm_node;
cm_node->nesqp = nesqp;
@@ -3530,6 +3584,8 @@ static void cm_event_connected(struct nes_cm_event *event)
nesqp->nesqp_context->ird_ord_sizes |=
cpu_to_le32((u32)1 <<
NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT);
+ nesqp->nesqp_context->ird_ord_sizes |=
+ cpu_to_le32((u32)cm_node->ord_size);
/* Adjust tail for not having a LSMM */
/*nesqp->hwqp.sq_tail = 1;*/
@@ -3742,8 +3798,13 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
cm_event_raddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);
cm_event.private_data = cm_node->mpa_frame_buf;
cm_event.private_data_len = (u8)cm_node->mpa_frame_size;
+ if (cm_node->mpa_frame_rev == IETF_MPA_V1) {
+ cm_event.ird = NES_MAX_IRD;
+ cm_event.ord = NES_MAX_ORD;
+ } else {
cm_event.ird = cm_node->ird_size;
cm_event.ord = cm_node->ord_size;
+ }
ret = cm_id->event_handler(cm_id, &cm_event);
if (ret)
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 4646e6666087..522c99cd07c4 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -58,6 +58,8 @@
#define IETF_RDMA0_WRITE 0x8000
#define IETF_RDMA0_READ 0x4000
#define IETF_NO_IRD_ORD 0x3FFF
+#define NES_MAX_IRD 0x40
+#define NES_MAX_ORD 0x7F
enum ietf_mpa_flags {
IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
@@ -333,6 +335,7 @@ struct nes_cm_node {
enum mpa_frame_version mpa_frame_rev;
u16 ird_size;
u16 ord_size;
+ u16 mpav2_ird_ord;
u16 mpa_frame_size;
struct iw_cm_id *cm_id;
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
index 4926de744488..529c421bb15c 100644
--- a/drivers/infiniband/hw/nes/nes_user.h
+++ b/drivers/infiniband/hw/nes/nes_user.h
@@ -39,8 +39,8 @@
#include <linux/types.h>
-#define NES_ABI_USERSPACE_VER 1
-#define NES_ABI_KERNEL_VER 1
+#define NES_ABI_USERSPACE_VER 2
+#define NES_ABI_KERNEL_VER 2
/*
* Make sure that all structs defined in this file remain laid out so
@@ -78,6 +78,7 @@ struct nes_create_cq_req {
struct nes_create_qp_req {
__u64 user_wqe_buffers;
+ __u64 user_qp_buffer;
};
enum iwnes_memreg_type {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 8308e3634767..218dd3574285 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1186,11 +1186,13 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
kfree(nesqp->allocated_buffer);
nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n");
- return NULL;
+ return ERR_PTR(-EFAULT);
}
if (req.user_wqe_buffers) {
virt_wqs = 1;
}
+ if (req.user_qp_buffer)
+ nesqp->nesuqp_addr = req.user_qp_buffer;
if ((ibpd->uobject) && (ibpd->uobject->context)) {
nesqp->user_mode = 1;
nes_ucontext = to_nesucontext(ibpd->uobject->context);
@@ -2307,7 +2309,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct ib_mr *ibmr = ERR_PTR(-EINVAL);
- struct ib_umem_chunk *chunk;
+ struct scatterlist *sg;
struct nes_ucontext *nes_ucontext;
struct nes_pbl *nespbl;
struct nes_mr *nesmr;
@@ -2315,7 +2317,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct nes_mem_reg_req req;
struct nes_vpbl vpbl;
struct nes_root_vpbl root_vpbl;
- int nmap_index, page_index;
+ int entry, page_index;
int page_count = 0;
int err, pbl_depth = 0;
int chunk_pages;
@@ -2330,6 +2332,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u16 pbl_count;
u8 single_page = 1;
u8 stag_key;
+ int first_page = 1;
region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
if (IS_ERR(region)) {
@@ -2380,128 +2383,125 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
nesmr->region = region;
- list_for_each_entry(chunk, &region->chunk_list, list) {
- nes_debug(NES_DBG_MR, "Chunk: nents = %u, nmap = %u .\n",
- chunk->nents, chunk->nmap);
- for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
- if (sg_dma_address(&chunk->page_list[nmap_index]) & ~PAGE_MASK) {
- ib_umem_release(region);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
- (unsigned int) sg_dma_address(&chunk->page_list[nmap_index]));
- ibmr = ERR_PTR(-EINVAL);
- kfree(nesmr);
- goto reg_user_mr_err;
- }
+ for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
+ if (sg_dma_address(sg) & ~PAGE_MASK) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
+ (unsigned int) sg_dma_address(sg));
+ ibmr = ERR_PTR(-EINVAL);
+ kfree(nesmr);
+ goto reg_user_mr_err;
+ }
- if (!sg_dma_len(&chunk->page_list[nmap_index])) {
- ib_umem_release(region);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- stag_index);
- nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
- ibmr = ERR_PTR(-EINVAL);
- kfree(nesmr);
- goto reg_user_mr_err;
- }
+ if (!sg_dma_len(sg)) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
+ nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
+ ibmr = ERR_PTR(-EINVAL);
+ kfree(nesmr);
+ goto reg_user_mr_err;
+ }
- region_length += sg_dma_len(&chunk->page_list[nmap_index]);
- chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12;
- region_length -= skip_pages << 12;
- for (page_index=skip_pages; page_index < chunk_pages; page_index++) {
- skip_pages = 0;
- if ((page_count!=0)&&(page_count<<12)-(region->offset&(4096-1))>=region->length)
- goto enough_pages;
- if ((page_count&0x01FF) == 0) {
- if (page_count >= 1024 * 512) {
+ region_length += sg_dma_len(sg);
+ chunk_pages = sg_dma_len(sg) >> 12;
+ region_length -= skip_pages << 12;
+ for (page_index = skip_pages; page_index < chunk_pages; page_index++) {
+ skip_pages = 0;
+ if ((page_count != 0) && (page_count<<12)-(region->offset&(4096-1)) >= region->length)
+ goto enough_pages;
+ if ((page_count&0x01FF) == 0) {
+ if (page_count >= 1024 * 512) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter,
+ nesadapter->allocated_mrs, stag_index);
+ kfree(nesmr);
+ ibmr = ERR_PTR(-E2BIG);
+ goto reg_user_mr_err;
+ }
+ if (root_pbl_index == 1) {
+ root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
+ 8192, &root_vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
+ root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
+ if (!root_vpbl.pbl_vbase) {
ib_umem_release(region);
- nes_free_resource(nesadapter,
- nesadapter->allocated_mrs, stag_index);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
kfree(nesmr);
- ibmr = ERR_PTR(-E2BIG);
+ ibmr = ERR_PTR(-ENOMEM);
goto reg_user_mr_err;
}
- if (root_pbl_index == 1) {
- root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
- 8192, &root_vpbl.pbl_pbase);
- nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
- root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
- if (!root_vpbl.pbl_vbase) {
- ib_umem_release(region);
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
- vpbl.pbl_pbase);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- stag_index);
- kfree(nesmr);
- ibmr = ERR_PTR(-ENOMEM);
- goto reg_user_mr_err;
- }
- root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
- GFP_KERNEL);
- if (!root_vpbl.leaf_vpbl) {
- ib_umem_release(region);
- pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
- root_vpbl.pbl_pbase);
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
- vpbl.pbl_pbase);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- stag_index);
- kfree(nesmr);
- ibmr = ERR_PTR(-ENOMEM);
- goto reg_user_mr_err;
- }
- root_vpbl.pbl_vbase[0].pa_low =
- cpu_to_le32((u32)vpbl.pbl_pbase);
- root_vpbl.pbl_vbase[0].pa_high =
- cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
- root_vpbl.leaf_vpbl[0] = vpbl;
- }
- vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
- &vpbl.pbl_pbase);
- nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
- vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
- if (!vpbl.pbl_vbase) {
+ root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
+ GFP_KERNEL);
+ if (!root_vpbl.leaf_vpbl) {
ib_umem_release(region);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- ibmr = ERR_PTR(-ENOMEM);
+ pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
+ root_vpbl.pbl_pbase);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
kfree(nesmr);
+ ibmr = ERR_PTR(-ENOMEM);
goto reg_user_mr_err;
}
- if (1 <= root_pbl_index) {
- root_vpbl.pbl_vbase[root_pbl_index].pa_low =
- cpu_to_le32((u32)vpbl.pbl_pbase);
- root_vpbl.pbl_vbase[root_pbl_index].pa_high =
- cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
- root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
- }
- root_pbl_index++;
- cur_pbl_index = 0;
+ root_vpbl.pbl_vbase[0].pa_low =
+ cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[0].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
+ root_vpbl.leaf_vpbl[0] = vpbl;
}
- if (single_page) {
- if (page_count != 0) {
- if ((last_dma_addr+4096) !=
- (sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*4096)))
- single_page = 0;
- last_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*4096);
- } else {
- first_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*4096);
- last_dma_addr = first_dma_addr;
- }
+ vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+ &vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
+ vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
+ if (!vpbl.pbl_vbase) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ ibmr = ERR_PTR(-ENOMEM);
+ kfree(nesmr);
+ goto reg_user_mr_err;
+ }
+ if (1 <= root_pbl_index) {
+ root_vpbl.pbl_vbase[root_pbl_index].pa_low =
+ cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[root_pbl_index].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
+ root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
+ }
+ root_pbl_index++;
+ cur_pbl_index = 0;
+ }
+ if (single_page) {
+ if (page_count != 0) {
+ if ((last_dma_addr+4096) !=
+ (sg_dma_address(sg)+
+ (page_index*4096)))
+ single_page = 0;
+ last_dma_addr = sg_dma_address(sg)+
+ (page_index*4096);
+ } else {
+ first_dma_addr = sg_dma_address(sg)+
+ (page_index*4096);
+ last_dma_addr = first_dma_addr;
}
-
- vpbl.pbl_vbase[cur_pbl_index].pa_low =
- cpu_to_le32((u32)(sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*4096)));
- vpbl.pbl_vbase[cur_pbl_index].pa_high =
- cpu_to_le32((u32)((((u64)(sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*4096))) >> 32)));
- cur_pbl_index++;
- page_count++;
}
+
+ vpbl.pbl_vbase[cur_pbl_index].pa_low =
+ cpu_to_le32((u32)(sg_dma_address(sg)+
+ (page_index*4096)));
+ vpbl.pbl_vbase[cur_pbl_index].pa_high =
+ cpu_to_le32((u32)((((u64)(sg_dma_address(sg)+
+ (page_index*4096))) >> 32)));
+ cur_pbl_index++;
+ page_count++;
}
}
+
enough_pages:
nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x,"
" stag_key=0x%08x\n",
@@ -2613,25 +2613,28 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nespbl->pbl_size, (unsigned long) nespbl->pbl_pbase,
(void *) nespbl->pbl_vbase, nespbl->user_base);
- list_for_each_entry(chunk, &region->chunk_list, list) {
- for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
- chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12;
- chunk_pages += (sg_dma_len(&chunk->page_list[nmap_index]) & (4096-1)) ? 1 : 0;
- nespbl->page = sg_page(&chunk->page_list[0]);
- for (page_index=0; page_index<chunk_pages; page_index++) {
- ((__le32 *)pbl)[0] = cpu_to_le32((u32)
- (sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*4096)));
- ((__le32 *)pbl)[1] = cpu_to_le32(((u64)
- (sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*4096)))>>32);
- nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
- (unsigned long long)*pbl,
- le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0]));
- pbl++;
- }
+ for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
+ chunk_pages = sg_dma_len(sg) >> 12;
+ chunk_pages += (sg_dma_len(sg) & (4096-1)) ? 1 : 0;
+ if (first_page) {
+ nespbl->page = sg_page(sg);
+ first_page = 0;
+ }
+
+ for (page_index = 0; page_index < chunk_pages; page_index++) {
+ ((__le32 *)pbl)[0] = cpu_to_le32((u32)
+ (sg_dma_address(sg)+
+ (page_index*4096)));
+ ((__le32 *)pbl)[1] = cpu_to_le32(((u64)
+ (sg_dma_address(sg)+
+ (page_index*4096)))>>32);
+ nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
+ (unsigned long long)*pbl,
+ le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0]));
+ pbl++;
}
}
+
if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
list_add_tail(&nespbl->list, &nes_ucontext->qp_reg_mem_list);
} else {
@@ -3134,9 +3137,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
" original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
original_last_aeq, nesqp->last_aeq);
- if ((!ret) ||
- ((original_last_aeq != NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) &&
- (ret))) {
+ if (!ret || original_last_aeq != NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
if (dont_wait) {
if (nesqp->cm_id && nesqp->hw_tcp_state != 0) {
nes_debug(NES_DBG_MOD_QP, "QP%u Queuing fake disconnect for QP refcount (%d),"
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 0eff7c44d76b..309b31c31ae1 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -184,5 +184,6 @@ struct nes_qp {
u8 pau_busy;
u8 pau_pending;
u8 pau_state;
+ __u64 nesuqp_addr;
};
#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/hw/ocrdma/Makefile b/drivers/infiniband/hw/ocrdma/Makefile
index 06a5bed12e43..d1bfd4f4cdde 100644
--- a/drivers/infiniband/hw/ocrdma/Makefile
+++ b/drivers/infiniband/hw/ocrdma/Makefile
@@ -2,4 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/emulex/benet
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o
-ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o
+ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o ocrdma_stats.o
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 7c001b97b23f..19011dbb930f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -35,17 +35,27 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
#include <be_roce.h>
#include "ocrdma_sli.h"
-#define OCRDMA_ROCE_DEV_VERSION "1.0.0"
+#define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u"
+
+#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
+#define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)"
+#define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)"
+
+#define OC_SKH_DEVICE_PF 0x720
+#define OC_SKH_DEVICE_VF 0x728
#define OCRDMA_MAX_AH 512
#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
+
struct ocrdma_dev_attr {
u8 fw_ver[32];
u32 vendor_id;
@@ -65,6 +75,7 @@ struct ocrdma_dev_attr {
int max_mr;
u64 max_mr_size;
u32 max_num_mr_pbl;
+ int max_mw;
int max_fmr;
int max_map_per_fmr;
int max_pages_per_frmr;
@@ -83,6 +94,12 @@ struct ocrdma_dev_attr {
u8 num_ird_pages;
};
+struct ocrdma_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+};
+
struct ocrdma_pbl {
void *va;
dma_addr_t pa;
@@ -148,6 +165,26 @@ struct ocrdma_mr {
struct ocrdma_hw_mr hwmr;
};
+struct ocrdma_stats {
+ u8 type;
+ struct ocrdma_dev *dev;
+};
+
+struct stats_mem {
+ struct ocrdma_mqe mqe;
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+ char *debugfs_mem;
+};
+
+struct phy_info {
+ u16 auto_speeds_supported;
+ u16 fixed_speeds_supported;
+ u16 phy_type;
+ u16 interface_type;
+};
+
struct ocrdma_dev {
struct ib_device ibdev;
struct ocrdma_dev_attr attr;
@@ -191,12 +228,30 @@ struct ocrdma_dev {
struct mqe_ctx mqe_ctx;
struct be_dev_info nic_info;
+ struct phy_info phy;
+ char model_number[32];
+ u32 hba_port_num;
struct list_head entry;
struct rcu_head rcu;
int id;
- struct ocrdma_mr *stag_arr[OCRDMA_MAX_STAG];
+ u64 stag_arr[OCRDMA_MAX_STAG];
u16 pvid;
+ u32 asic_id;
+
+ ulong last_stats_time;
+ struct mutex stats_lock; /* provide synch for debugfs operations */
+ struct stats_mem stats_mem;
+ struct ocrdma_stats rsrc_stats;
+ struct ocrdma_stats rx_stats;
+ struct ocrdma_stats wqe_stats;
+ struct ocrdma_stats tx_stats;
+ struct ocrdma_stats db_err_stats;
+ struct ocrdma_stats tx_qp_err_stats;
+ struct ocrdma_stats rx_qp_err_stats;
+ struct ocrdma_stats tx_dbg_stats;
+ struct ocrdma_stats rx_dbg_stats;
+ struct dentry *dir;
};
struct ocrdma_cq {
@@ -209,8 +264,8 @@ struct ocrdma_cq {
*/
u32 max_hw_cqe;
bool phase_change;
- bool armed, solicited;
- bool arm_needed;
+ bool deferred_arm, deferred_sol;
+ bool first_arm;
spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
* to cq polling
@@ -223,6 +278,7 @@ struct ocrdma_cq {
struct ocrdma_ucontext *ucontext;
dma_addr_t pa;
u32 len;
+ u32 cqe_cnt;
/* head of all qp's sq and rq for which cqes need to be flushed
* by the software.
@@ -232,7 +288,6 @@ struct ocrdma_cq {
struct ocrdma_pd {
struct ib_pd ibpd;
- struct ocrdma_dev *dev;
struct ocrdma_ucontext *uctx;
u32 id;
int num_dpp_qp;
@@ -317,10 +372,8 @@ struct ocrdma_qp {
bool dpp_enabled;
u8 *ird_q_va;
bool signaled;
- u16 db_cache;
};
-
struct ocrdma_ucontext {
struct ib_ucontext ibucontext;
@@ -385,13 +438,6 @@ static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
return container_of(ibsrq, struct ocrdma_srq, ibsrq);
}
-
-static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp)
-{
- return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY &&
- qp->id < 128) ? 24 : 16);
-}
-
static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
{
int cqe_valid;
@@ -436,4 +482,40 @@ static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
return 0;
}
+static inline char *hca_name(struct ocrdma_dev *dev)
+{
+ switch (dev->nic_info.pdev->device) {
+ case OC_SKH_DEVICE_PF:
+ case OC_SKH_DEVICE_VF:
+ return OC_NAME_SH;
+ default:
+ return OC_NAME_UNKNOWN;
+ }
+}
+
+static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev,
+ int eqid)
+{
+ int indx;
+
+ for (indx = 0; indx < dev->eq_cnt; indx++) {
+ if (dev->eq_tbl[indx].q.id == eqid)
+ return indx;
+ }
+
+ return -EINVAL;
+}
+
+static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev)
+{
+ if (dev->nic_info.dev_family == 0xF && !dev->asic_id) {
+ pci_read_config_dword(
+ dev->nic_info.pdev,
+ OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id);
+ }
+
+ return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >>
+ OCRDMA_SLI_ASIC_GEN_NUM_SHIFT;
+}
+
#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index fbac8eb44036..1554cca5712a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -28,7 +28,8 @@
#ifndef __OCRDMA_ABI_H__
#define __OCRDMA_ABI_H__
-#define OCRDMA_ABI_VERSION 1
+#define OCRDMA_ABI_VERSION 2
+#define OCRDMA_BE_ROCE_ABI_VERSION 1
/* user kernel communication data structures. */
struct ocrdma_alloc_ucontext_resp {
@@ -107,9 +108,7 @@ struct ocrdma_create_qp_uresp {
u32 db_sq_offset;
u32 db_rq_offset;
u32 db_shift;
- u64 rsvd1;
- u64 rsvd2;
- u64 rsvd3;
+ u64 rsvd[11];
} __packed;
struct ocrdma_create_srq_uresp {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 34071143006e..d4cc01f10c01 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -100,7 +100,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
- ah = kzalloc(sizeof *ah, GFP_ATOMIC);
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 1664d648cbfc..3bbf2010a821 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -32,7 +32,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_addr.h>
#include "ocrdma.h"
#include "ocrdma_hw.h"
@@ -243,6 +242,23 @@ static int ocrdma_get_mbx_errno(u32 status)
return err_num;
}
+char *port_speed_string(struct ocrdma_dev *dev)
+{
+ char *str = "";
+ u16 speeds_supported;
+
+ speeds_supported = dev->phy.fixed_speeds_supported |
+ dev->phy.auto_speeds_supported;
+ if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS)
+ str = "40Gbps ";
+ else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS)
+ str = "10Gbps ";
+ else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS)
+ str = "1Gbps ";
+
+ return str;
+}
+
static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
{
int err_num = -EINVAL;
@@ -332,6 +348,11 @@ static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
return mqe;
}
+static void *ocrdma_alloc_mqe(void)
+{
+ return kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
+}
+
static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
{
dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
@@ -364,8 +385,8 @@ static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
}
}
-static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q,
- int queue_type)
+static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev,
+ struct ocrdma_queue_info *q, int queue_type)
{
u8 opcode = 0;
int status;
@@ -444,7 +465,7 @@ mbx_err:
return status;
}
-static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
+int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
{
int irq;
@@ -574,6 +595,7 @@ static int ocrdma_create_mq(struct ocrdma_dev *dev)
if (status)
goto alloc_err;
+ dev->eq_tbl[0].cq_cnt++;
status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
if (status)
goto mbx_cq_free;
@@ -639,7 +661,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
{
struct ocrdma_qp *qp = NULL;
struct ocrdma_cq *cq = NULL;
- struct ib_event ib_evt;
+ struct ib_event ib_evt = { 0 };
int cq_event = 0;
int qp_event = 1;
int srq_event = 0;
@@ -664,6 +686,8 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
case OCRDMA_CQ_OVERRUN_ERROR:
ib_evt.element.cq = &cq->ibcq;
ib_evt.event = IB_EVENT_CQ_ERR;
+ cq_event = 1;
+ qp_event = 0;
break;
case OCRDMA_CQ_QPCAT_ERROR:
ib_evt.element.qp = &qp->ibqp;
@@ -725,6 +749,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
qp->srq->ibsrq.
srq_context);
} else if (dev_event) {
+ pr_err("%s: Fatal event received\n", dev->ibdev.name);
ib_dispatch_event(&ib_evt);
}
@@ -752,7 +777,6 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
}
}
-
static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
{
/* async CQE processing */
@@ -799,8 +823,6 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
ocrdma_process_acqe(dev, cqe);
else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
ocrdma_process_mcqe(dev, cqe);
- else
- pr_err("%s() cqe->compl is not set.\n", __func__);
memset(cqe, 0, sizeof(struct ocrdma_mcqe));
ocrdma_mcq_inc_tail(dev);
}
@@ -858,16 +880,8 @@ static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
BUG();
cq = dev->cq_tbl[cq_idx];
- if (cq == NULL) {
- pr_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx);
+ if (cq == NULL)
return;
- }
- spin_lock_irqsave(&cq->cq_lock, flags);
- cq->armed = false;
- cq->solicited = false;
- spin_unlock_irqrestore(&cq->cq_lock, flags);
-
- ocrdma_ring_cq_db(dev, cq->id, false, false, 0);
if (cq->ibcq.comp_handler) {
spin_lock_irqsave(&cq->comp_handler_lock, flags);
@@ -892,27 +906,35 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
struct ocrdma_dev *dev = eq->dev;
struct ocrdma_eqe eqe;
struct ocrdma_eqe *ptr;
- u16 eqe_popped = 0;
u16 cq_id;
- while (1) {
+ int budget = eq->cq_cnt;
+
+ do {
ptr = ocrdma_get_eqe(eq);
eqe = *ptr;
ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
break;
- eqe_popped += 1;
+
ptr->id_valid = 0;
+ /* ring eq doorbell as soon as its consumed. */
+ ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1);
/* check whether its CQE or not. */
if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
ocrdma_cq_handler(dev, cq_id);
}
ocrdma_eq_inc_tail(eq);
- }
- ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped);
- /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */
- if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
- ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
+
+ /* There can be a stale EQE after the last bound CQ is
+ * destroyed. EQE valid and budget == 0 implies this.
+ */
+ if (budget)
+ budget--;
+
+ } while (budget);
+
+ ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
return IRQ_HANDLED;
}
@@ -949,7 +971,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
{
int status = 0;
u16 cqe_status, ext_status;
- struct ocrdma_mqe *rsp;
+ struct ocrdma_mqe *rsp_mqe;
+ struct ocrdma_mbx_rsp *rsp = NULL;
mutex_lock(&dev->mqe_ctx.lock);
ocrdma_post_mqe(dev, mqe);
@@ -958,23 +981,61 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
goto mbx_err;
cqe_status = dev->mqe_ctx.cqe_status;
ext_status = dev->mqe_ctx.ext_status;
- rsp = ocrdma_get_mqe_rsp(dev);
- ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
+ rsp_mqe = ocrdma_get_mqe_rsp(dev);
+ ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe)));
+ if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
+ OCRDMA_MQE_HDR_EMB_SHIFT)
+ rsp = &mqe->u.rsp;
+
if (cqe_status || ext_status) {
- pr_err("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
- __func__,
- (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
- OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
+ pr_err("%s() cqe_status=0x%x, ext_status=0x%x,",
+ __func__, cqe_status, ext_status);
+ if (rsp) {
+ /* This is for embedded cmds. */
+ pr_err("opcode=0x%x, subsystem=0x%x\n",
+ (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
+ OCRDMA_MBX_RSP_OPCODE_SHIFT,
+ (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
+ OCRDMA_MBX_RSP_SUBSYS_SHIFT);
+ }
status = ocrdma_get_mbx_cqe_errno(cqe_status);
goto mbx_err;
}
- if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)
+ /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */
+ if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK))
status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
mbx_err:
mutex_unlock(&dev->mqe_ctx.lock);
return status;
}
+static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
+ void *payload_va)
+{
+ int status = 0;
+ struct ocrdma_mbx_rsp *rsp = payload_va;
+
+ if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
+ OCRDMA_MQE_HDR_EMB_SHIFT)
+ BUG();
+
+ status = ocrdma_mbx_cmd(dev, mqe);
+ if (!status)
+ /* For non embedded, only CQE failures are handled in
+ * ocrdma_mbx_cmd. We need to check for RSP errors.
+ */
+ if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK)
+ status = ocrdma_get_mbx_errno(rsp->status);
+
+ if (status)
+ pr_err("opcode=0x%x, subsystem=0x%x\n",
+ (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
+ OCRDMA_MBX_RSP_OPCODE_SHIFT,
+ (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
+ OCRDMA_MBX_RSP_SUBSYS_SHIFT);
+ return status;
+}
+
static void ocrdma_get_attr(struct ocrdma_dev *dev,
struct ocrdma_dev_attr *attr,
struct ocrdma_mbx_query_config *rsp)
@@ -985,6 +1046,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_qp =
(rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
+ attr->max_srq =
+ (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
attr->max_send_sge = ((rsp->max_write_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
@@ -1000,9 +1064,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
- attr->max_srq =
- (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
- OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
@@ -1015,6 +1076,7 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
+ attr->max_mw = rsp->max_mw;
attr->max_mr = rsp->max_mr;
attr->max_mr_size = ~0ull;
attr->max_fmr = 0;
@@ -1036,7 +1098,7 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_inline_data =
attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
sizeof(struct ocrdma_sge));
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
attr->ird = 1;
attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
@@ -1110,6 +1172,96 @@ mbx_err:
return status;
}
+int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
+{
+ struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
+ struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
+ struct ocrdma_rdma_stats_resp *old_stats = NULL;
+ int status;
+
+ old_stats = kzalloc(sizeof(*old_stats), GFP_KERNEL);
+ if (old_stats == NULL)
+ return -ENOMEM;
+
+ memset(mqe, 0, sizeof(*mqe));
+ mqe->hdr.pyld_len = dev->stats_mem.size;
+ mqe->hdr.spcl_sge_cnt_emb |=
+ (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
+ OCRDMA_MQE_HDR_SGE_CNT_MASK;
+ mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff);
+ mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa);
+ mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size;
+
+ /* Cache the old stats */
+ memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
+ memset(req, 0, dev->stats_mem.size);
+
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
+ OCRDMA_CMD_GET_RDMA_STATS,
+ OCRDMA_SUBSYS_ROCE,
+ dev->stats_mem.size);
+ if (reset)
+ req->reset_stats = reset;
+
+ status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va);
+ if (status)
+ /* Copy from cache, if mbox fails */
+ memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
+ else
+ ocrdma_le32_to_cpu(req, dev->stats_mem.size);
+
+ kfree(old_stats);
+ return status;
+}
+
+static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
+{
+ int status = -ENOMEM;
+ struct ocrdma_dma_mem dma;
+ struct ocrdma_mqe *mqe;
+ struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
+ struct mgmt_hba_attribs *hba_attribs;
+
+ mqe = ocrdma_alloc_mqe();
+ if (!mqe)
+ return status;
+ memset(mqe, 0, sizeof(*mqe));
+
+ dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
+ dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev,
+ dma.size, &dma.pa, GFP_KERNEL);
+ if (!dma.va)
+ goto free_mqe;
+
+ mqe->hdr.pyld_len = dma.size;
+ mqe->hdr.spcl_sge_cnt_emb |=
+ (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
+ OCRDMA_MQE_HDR_SGE_CNT_MASK;
+ mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff);
+ mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
+ mqe->u.nonemb_req.sge[0].len = dma.size;
+
+ memset(dma.va, 0, dma.size);
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
+ OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
+ OCRDMA_SUBSYS_COMMON,
+ dma.size);
+
+ status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va);
+ if (!status) {
+ ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
+ hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
+
+ dev->hba_port_num = hba_attribs->phy_port;
+ strncpy(dev->model_number,
+ hba_attribs->controller_model_number, 31);
+ }
+ dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
+free_mqe:
+ kfree(mqe);
+ return status;
+}
+
static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
{
int status = -ENOMEM;
@@ -1157,6 +1309,35 @@ mbx_err:
return status;
}
+static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
+{
+ int status = -ENOMEM;
+ struct ocrdma_mqe *cmd;
+ struct ocrdma_get_phy_info_rsp *rsp;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd));
+ if (!cmd)
+ return status;
+
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
+ OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON,
+ sizeof(*cmd));
+
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+
+ rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
+ dev->phy.phy_type = le16_to_cpu(rsp->phy_type);
+ dev->phy.auto_speeds_supported =
+ le16_to_cpu(rsp->auto_speeds_supported);
+ dev->phy.fixed_speeds_supported =
+ le16_to_cpu(rsp->fixed_speeds_supported);
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
{
int status = -ENOMEM;
@@ -1226,7 +1407,7 @@ static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
{
- int i ;
+ int i;
int status = 0;
int max_ah;
struct ocrdma_create_ah_tbl *cmd;
@@ -1357,12 +1538,10 @@ static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
int i;
mutex_lock(&dev->dev_lock);
- for (i = 0; i < dev->eq_cnt; i++) {
- if (dev->eq_tbl[i].q.id != eq_id)
- continue;
- dev->eq_tbl[i].cq_cnt -= 1;
- break;
- }
+ i = ocrdma_get_eq_table_index(dev, eq_id);
+ if (i == -EINVAL)
+ BUG();
+ dev->eq_tbl[i].cq_cnt -= 1;
mutex_unlock(&dev->dev_lock);
}
@@ -1380,7 +1559,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
__func__, dev->id, dev->attr.max_cqe, entries);
return -EINVAL;
}
- if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY))
+ if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R))
return -EINVAL;
if (dpp_cq) {
@@ -1417,6 +1596,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cq->eqn = ocrdma_bind_eq(dev);
cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
cqe_count = cq->len / cqe_size;
+ cq->cqe_cnt = cqe_count;
if (cqe_count > 1024) {
/* Set cnt to 3 to indicate more than 1024 cq entries */
cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
@@ -1439,7 +1619,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
}
/* shared eq between all the consumer cqs. */
cmd->cmd.eqn = cq->eqn;
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
if (dpp_cq)
cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
OCRDMA_CREATE_CQ_TYPE_SHIFT;
@@ -1484,12 +1664,9 @@ int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
(cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
OCRDMA_DESTROY_CQ_QID_MASK;
- ocrdma_unbind_eq(dev, cq->eqn);
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
- if (status)
- goto mbx_err;
+ ocrdma_unbind_eq(dev, cq->eqn);
dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
-mbx_err:
kfree(cmd);
return status;
}
@@ -2029,8 +2206,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
qp->rq_cq = cq;
- if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
- (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
+ if (pd->dpp_enabled && pd->num_dpp_qp) {
ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
dpp_cq_id);
}
@@ -2099,7 +2275,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
sizeof(cmd->params.dgid));
status = ocrdma_query_gid(&qp->dev->ibdev, 1,
- ah_attr->grh.sgid_index, &sgid);
+ ah_attr->grh.sgid_index, &sgid);
if (status)
return status;
@@ -2127,8 +2303,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
struct ocrdma_modify_qp *cmd,
- struct ib_qp_attr *attrs, int attr_mask,
- enum ib_qp_state old_qps)
+ struct ib_qp_attr *attrs, int attr_mask)
{
int status = 0;
@@ -2233,8 +2408,7 @@ pmtu_err:
}
int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
- struct ib_qp_attr *attrs, int attr_mask,
- enum ib_qp_state old_qps)
+ struct ib_qp_attr *attrs, int attr_mask)
{
int status = -ENOMEM;
struct ocrdma_modify_qp *cmd;
@@ -2257,7 +2431,7 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
OCRDMA_QP_PARAMS_STATE_MASK;
}
- status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
+ status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask);
if (status)
goto mbx_err;
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
@@ -2488,7 +2662,7 @@ static int ocrdma_create_eqs(struct ocrdma_dev *dev)
for (i = 0; i < num_eq; i++) {
status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
- OCRDMA_EQ_LEN);
+ OCRDMA_EQ_LEN);
if (status) {
status = -EINVAL;
break;
@@ -2533,6 +2707,13 @@ int ocrdma_init_hw(struct ocrdma_dev *dev)
status = ocrdma_mbx_create_ah_tbl(dev);
if (status)
goto conf_err;
+ status = ocrdma_mbx_get_phy_info(dev);
+ if (status)
+ goto conf_err;
+ status = ocrdma_mbx_get_ctrl_attribs(dev);
+ if (status)
+ goto conf_err;
+
return 0;
conf_err:
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index 82fe332ae6c6..e513f7293142 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -112,8 +112,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs,
u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
u16 *dpp_credit_lmt);
int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *,
- struct ib_qp_attr *attrs, int attr_mask,
- enum ib_qp_state old_qps);
+ struct ib_qp_attr *attrs, int attr_mask);
int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *,
struct ocrdma_qp_params *param);
int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *);
@@ -132,5 +131,8 @@ int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state,
bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
void ocrdma_flush_qp(struct ocrdma_qp *);
+int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
+int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
+char *port_speed_string(struct ocrdma_dev *dev);
#endif /* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 1a8a945efa60..7c504e079744 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -39,10 +39,11 @@
#include "ocrdma_ah.h"
#include "be_roce.h"
#include "ocrdma_hw.h"
+#include "ocrdma_stats.h"
#include "ocrdma_abi.h"
-MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION);
-MODULE_DESCRIPTION("Emulex RoCE HCA Driver");
+MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
+MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
@@ -286,7 +287,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.process_mad = ocrdma_process_mad;
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
dev->ibdev.uverbs_cmd_mask |=
OCRDMA_UVERBS(CREATE_SRQ) |
OCRDMA_UVERBS(MODIFY_SRQ) |
@@ -338,9 +339,42 @@ static void ocrdma_free_resources(struct ocrdma_dev *dev)
kfree(dev->sgid_tbl);
}
+/* OCRDMA sysfs interface */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct ocrdma_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
+}
+
+static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct ocrdma_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%s", &dev->attr.fw_ver[0]);
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+
+static struct device_attribute *ocrdma_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_fw_ver
+};
+
+static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
+ device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
+}
+
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
- int status = 0;
+ int status = 0, i;
struct ocrdma_dev *dev;
dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
@@ -369,11 +403,25 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status)
goto alloc_err;
+ for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
+ if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
+ goto sysfs_err;
spin_lock(&ocrdma_devlist_lock);
list_add_tail_rcu(&dev->entry, &ocrdma_dev_list);
spin_unlock(&ocrdma_devlist_lock);
+ /* Init stats */
+ ocrdma_add_port_stats(dev);
+
+ pr_info("%s %s: %s \"%s\" port %d\n",
+ dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
+ port_speed_string(dev), dev->model_number,
+ dev->hba_port_num);
+ pr_info("%s ocrdma%d driver loaded successfully\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
return dev;
+sysfs_err:
+ ocrdma_remove_sysfiles(dev);
alloc_err:
ocrdma_free_resources(dev);
ocrdma_cleanup_hw(dev);
@@ -400,6 +448,9 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
/* first unregister with stack to stop all the active traffic
* of the registered clients.
*/
+ ocrdma_rem_port_stats(dev);
+ ocrdma_remove_sysfiles(dev);
+
ib_unregister_device(&dev->ibdev);
spin_lock(&ocrdma_devlist_lock);
@@ -437,7 +488,7 @@ static int ocrdma_close(struct ocrdma_dev *dev)
cur_qp = dev->qp_tbl;
for (i = 0; i < OCRDMA_MAX_QP; i++) {
qp = cur_qp[i];
- if (qp) {
+ if (qp && qp->ibqp.qp_type != IB_QPT_GSI) {
/* change the QP state to ERROR */
_ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
@@ -478,6 +529,7 @@ static struct ocrdma_driver ocrdma_drv = {
.add = ocrdma_add,
.remove = ocrdma_remove,
.state_change_handler = ocrdma_event_handler,
+ .be_abi_version = OCRDMA_BE_ROCE_ABI_VERSION,
};
static void ocrdma_unregister_inet6addr_notifier(void)
@@ -487,10 +539,17 @@ static void ocrdma_unregister_inet6addr_notifier(void)
#endif
}
+static void ocrdma_unregister_inetaddr_notifier(void)
+{
+ unregister_inetaddr_notifier(&ocrdma_inetaddr_notifier);
+}
+
static int __init ocrdma_init_module(void)
{
int status;
+ ocrdma_init_debugfs();
+
status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier);
if (status)
return status;
@@ -498,13 +557,19 @@ static int __init ocrdma_init_module(void)
#if IS_ENABLED(CONFIG_IPV6)
status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
if (status)
- return status;
+ goto err_notifier6;
#endif
status = be_roce_register_driver(&ocrdma_drv);
if (status)
- ocrdma_unregister_inet6addr_notifier();
+ goto err_be_reg;
+ return 0;
+
+err_be_reg:
+ ocrdma_unregister_inet6addr_notifier();
+err_notifier6:
+ ocrdma_unregister_inetaddr_notifier();
return status;
}
@@ -512,6 +577,8 @@ static void __exit ocrdma_exit_module(void)
{
be_roce_unregister_driver(&ocrdma_drv);
ocrdma_unregister_inet6addr_notifier();
+ ocrdma_unregister_inetaddr_notifier();
+ ocrdma_rem_debugfs();
}
module_init(ocrdma_init_module);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 60d5ac23ea80..96c9ee602ba4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -30,8 +30,16 @@
#define Bit(_b) (1 << (_b))
-#define OCRDMA_GEN1_FAMILY 0xB
-#define OCRDMA_GEN2_FAMILY 0x0F
+enum {
+ OCRDMA_ASIC_GEN_SKH_R = 0x04,
+ OCRDMA_ASIC_GEN_LANCER = 0x0B
+};
+
+enum {
+ OCRDMA_ASIC_REV_A0 = 0x00,
+ OCRDMA_ASIC_REV_B0 = 0x10,
+ OCRDMA_ASIC_REV_C0 = 0x20
+};
#define OCRDMA_SUBSYS_ROCE 10
enum {
@@ -64,6 +72,7 @@ enum {
OCRDMA_CMD_ATTACH_MCAST,
OCRDMA_CMD_DETACH_MCAST,
+ OCRDMA_CMD_GET_RDMA_STATS,
OCRDMA_CMD_MAX
};
@@ -74,12 +83,14 @@ enum {
OCRDMA_CMD_CREATE_CQ = 12,
OCRDMA_CMD_CREATE_EQ = 13,
OCRDMA_CMD_CREATE_MQ = 21,
+ OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32,
OCRDMA_CMD_GET_FW_VER = 35,
OCRDMA_CMD_DELETE_MQ = 53,
OCRDMA_CMD_DELETE_CQ = 54,
OCRDMA_CMD_DELETE_EQ = 55,
OCRDMA_CMD_GET_FW_CONFIG = 58,
- OCRDMA_CMD_CREATE_MQ_EXT = 90
+ OCRDMA_CMD_CREATE_MQ_EXT = 90,
+ OCRDMA_CMD_PHY_DETAILS = 102
};
enum {
@@ -103,7 +114,10 @@ enum {
OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ_OFFSET,
OCRDMA_DB_CQ_OFFSET = 0x120,
OCRDMA_DB_EQ_OFFSET = OCRDMA_DB_CQ_OFFSET,
- OCRDMA_DB_MQ_OFFSET = 0x140
+ OCRDMA_DB_MQ_OFFSET = 0x140,
+
+ OCRDMA_DB_SQ_SHIFT = 16,
+ OCRDMA_DB_RQ_SHIFT = 24
};
#define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
@@ -138,6 +152,10 @@ enum {
#define OCRDMA_MIN_Q_PAGE_SIZE (4096)
#define OCRDMA_MAX_Q_PAGES (8)
+#define OCRDMA_SLI_ASIC_ID_OFFSET 0x9C
+#define OCRDMA_SLI_ASIC_REV_MASK 0x000000FF
+#define OCRDMA_SLI_ASIC_GEN_NUM_MASK 0x0000FF00
+#define OCRDMA_SLI_ASIC_GEN_NUM_SHIFT 0x08
/*
# 0: 4K Bytes
# 1: 8K Bytes
@@ -562,6 +580,30 @@ enum {
OCRDMA_FN_MODE_RDMA = 0x4
};
+struct ocrdma_get_phy_info_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u16 phy_type;
+ u16 interface_type;
+ u32 misc_params;
+ u16 ext_phy_details;
+ u16 rsvd;
+ u16 auto_speeds_supported;
+ u16 fixed_speeds_supported;
+ u32 future_use[2];
+};
+
+enum {
+ OCRDMA_PHY_SPEED_ZERO = 0x0,
+ OCRDMA_PHY_SPEED_10MBPS = 0x1,
+ OCRDMA_PHY_SPEED_100MBPS = 0x2,
+ OCRDMA_PHY_SPEED_1GBPS = 0x4,
+ OCRDMA_PHY_SPEED_10GBPS = 0x8,
+ OCRDMA_PHY_SPEED_40GBPS = 0x20
+};
+
+
struct ocrdma_get_link_speed_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
@@ -590,7 +632,7 @@ enum {
enum {
OCRDMA_CREATE_CQ_VER2 = 2,
- OCRDMA_CREATE_CQ_VER3 = 3,
+ OCRDMA_CREATE_CQ_VER3 = 3,
OCRDMA_CREATE_CQ_PAGE_CNT_MASK = 0xFFFF,
OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT = 16,
@@ -1050,6 +1092,7 @@ enum {
OCRDMA_MODIFY_QP_RSP_MAX_ORD_MASK = 0xFFFF <<
OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT
};
+
struct ocrdma_modify_qp_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
@@ -1062,8 +1105,8 @@ struct ocrdma_query_qp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
-#define OCRDMA_QUERY_UP_QP_ID_SHIFT 0
-#define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF
+#define OCRDMA_QUERY_UP_QP_ID_SHIFT 0
+#define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF
u32 qp_id;
};
@@ -1703,4 +1746,208 @@ struct ocrdma_av {
u32 valid;
} __packed;
+struct ocrdma_rsrc_stats {
+ u32 dpp_pds;
+ u32 non_dpp_pds;
+ u32 rc_dpp_qps;
+ u32 uc_dpp_qps;
+ u32 ud_dpp_qps;
+ u32 rc_non_dpp_qps;
+ u32 rsvd;
+ u32 uc_non_dpp_qps;
+ u32 ud_non_dpp_qps;
+ u32 rsvd1;
+ u32 srqs;
+ u32 rbqs;
+ u32 r64K_nsmr;
+ u32 r64K_to_2M_nsmr;
+ u32 r2M_to_44M_nsmr;
+ u32 r44M_to_1G_nsmr;
+ u32 r1G_to_4G_nsmr;
+ u32 nsmr_count_4G_to_32G;
+ u32 r32G_to_64G_nsmr;
+ u32 r64G_to_128G_nsmr;
+ u32 r128G_to_higher_nsmr;
+ u32 embedded_nsmr;
+ u32 frmr;
+ u32 prefetch_qps;
+ u32 ondemand_qps;
+ u32 phy_mr;
+ u32 mw;
+ u32 rsvd2[7];
+};
+
+struct ocrdma_db_err_stats {
+ u32 sq_doorbell_errors;
+ u32 cq_doorbell_errors;
+ u32 rq_srq_doorbell_errors;
+ u32 cq_overflow_errors;
+ u32 rsvd[4];
+};
+
+struct ocrdma_wqe_stats {
+ u32 large_send_rc_wqes_lo;
+ u32 large_send_rc_wqes_hi;
+ u32 large_write_rc_wqes_lo;
+ u32 large_write_rc_wqes_hi;
+ u32 rsvd[4];
+ u32 read_wqes_lo;
+ u32 read_wqes_hi;
+ u32 frmr_wqes_lo;
+ u32 frmr_wqes_hi;
+ u32 mw_bind_wqes_lo;
+ u32 mw_bind_wqes_hi;
+ u32 invalidate_wqes_lo;
+ u32 invalidate_wqes_hi;
+ u32 rsvd1[2];
+ u32 dpp_wqe_drops;
+ u32 rsvd2[5];
+};
+
+struct ocrdma_tx_stats {
+ u32 send_pkts_lo;
+ u32 send_pkts_hi;
+ u32 write_pkts_lo;
+ u32 write_pkts_hi;
+ u32 read_pkts_lo;
+ u32 read_pkts_hi;
+ u32 read_rsp_pkts_lo;
+ u32 read_rsp_pkts_hi;
+ u32 ack_pkts_lo;
+ u32 ack_pkts_hi;
+ u32 send_bytes_lo;
+ u32 send_bytes_hi;
+ u32 write_bytes_lo;
+ u32 write_bytes_hi;
+ u32 read_req_bytes_lo;
+ u32 read_req_bytes_hi;
+ u32 read_rsp_bytes_lo;
+ u32 read_rsp_bytes_hi;
+ u32 ack_timeouts;
+ u32 rsvd[5];
+};
+
+
+struct ocrdma_tx_qp_err_stats {
+ u32 local_length_errors;
+ u32 local_protection_errors;
+ u32 local_qp_operation_errors;
+ u32 retry_count_exceeded_errors;
+ u32 rnr_retry_count_exceeded_errors;
+ u32 rsvd[3];
+};
+
+struct ocrdma_rx_stats {
+ u32 roce_frame_bytes_lo;
+ u32 roce_frame_bytes_hi;
+ u32 roce_frame_icrc_drops;
+ u32 roce_frame_payload_len_drops;
+ u32 ud_drops;
+ u32 qp1_drops;
+ u32 psn_error_request_packets;
+ u32 psn_error_resp_packets;
+ u32 rnr_nak_timeouts;
+ u32 rnr_nak_receives;
+ u32 roce_frame_rxmt_drops;
+ u32 nak_count_psn_sequence_errors;
+ u32 rc_drop_count_lookup_errors;
+ u32 rq_rnr_naks;
+ u32 srq_rnr_naks;
+ u32 roce_frames_lo;
+ u32 roce_frames_hi;
+ u32 rsvd;
+};
+
+struct ocrdma_rx_qp_err_stats {
+ u32 nak_invalid_requst_errors;
+ u32 nak_remote_operation_errors;
+ u32 nak_count_remote_access_errors;
+ u32 local_length_errors;
+ u32 local_protection_errors;
+ u32 local_qp_operation_errors;
+ u32 rsvd[2];
+};
+
+struct ocrdma_tx_dbg_stats {
+ u32 data[100];
+};
+
+struct ocrdma_rx_dbg_stats {
+ u32 data[200];
+};
+
+struct ocrdma_rdma_stats_req {
+ struct ocrdma_mbx_hdr hdr;
+ u8 reset_stats;
+ u8 rsvd[3];
+} __packed;
+
+struct ocrdma_rdma_stats_resp {
+ struct ocrdma_mbx_hdr hdr;
+ struct ocrdma_rsrc_stats act_rsrc_stats;
+ struct ocrdma_rsrc_stats th_rsrc_stats;
+ struct ocrdma_db_err_stats db_err_stats;
+ struct ocrdma_wqe_stats wqe_stats;
+ struct ocrdma_tx_stats tx_stats;
+ struct ocrdma_tx_qp_err_stats tx_qp_err_stats;
+ struct ocrdma_rx_stats rx_stats;
+ struct ocrdma_rx_qp_err_stats rx_qp_err_stats;
+ struct ocrdma_tx_dbg_stats tx_dbg_stats;
+ struct ocrdma_rx_dbg_stats rx_dbg_stats;
+} __packed;
+
+
+struct mgmt_hba_attribs {
+ u8 flashrom_version_string[32];
+ u8 manufacturer_name[32];
+ u32 supported_modes;
+ u32 rsvd0[3];
+ u8 ncsi_ver_string[12];
+ u32 default_extended_timeout;
+ u8 controller_model_number[32];
+ u8 controller_description[64];
+ u8 controller_serial_number[32];
+ u8 ip_version_string[32];
+ u8 firmware_version_string[32];
+ u8 bios_version_string[32];
+ u8 redboot_version_string[32];
+ u8 driver_version_string[32];
+ u8 fw_on_flash_version_string[32];
+ u32 functionalities_supported;
+ u16 max_cdblength;
+ u8 asic_revision;
+ u8 generational_guid[16];
+ u8 hba_port_count;
+ u16 default_link_down_timeout;
+ u8 iscsi_ver_min_max;
+ u8 multifunction_device;
+ u8 cache_valid;
+ u8 hba_status;
+ u8 max_domains_supported;
+ u8 phy_port;
+ u32 firmware_post_status;
+ u32 hba_mtu[8];
+ u32 rsvd1[4];
+};
+
+struct mgmt_controller_attrib {
+ struct mgmt_hba_attribs hba_attribs;
+ u16 pci_vendor_id;
+ u16 pci_device_id;
+ u16 pci_sub_vendor_id;
+ u16 pci_sub_system_id;
+ u8 pci_bus_number;
+ u8 pci_device_number;
+ u8 pci_function_number;
+ u8 interface_type;
+ u64 unique_identifier;
+ u32 rsvd0[5];
+};
+
+struct ocrdma_get_ctrl_attribs_rsp {
+ struct ocrdma_mbx_hdr hdr;
+ struct mgmt_controller_attrib ctrl_attribs;
+};
+
+
#endif /* __OCRDMA_SLI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
new file mode 100644
index 000000000000..6c54106f5e64
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -0,0 +1,623 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for *
+ * RoCE (RDMA over Converged Ethernet) adapters. *
+ * Copyright (C) 2008-2014 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#include <rdma/ib_addr.h>
+#include "ocrdma_stats.h"
+
+static struct dentry *ocrdma_dbgfs_dir;
+
+static int ocrdma_add_stat(char *start, char *pcur,
+ char *name, u64 count)
+{
+ char buff[128] = {0};
+ int cpy_len = 0;
+
+ snprintf(buff, 128, "%s: %llu\n", name, count);
+ cpy_len = strlen(buff);
+
+ if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) {
+ pr_err("%s: No space in stats buff\n", __func__);
+ return 0;
+ }
+
+ memcpy(pcur, buff, cpy_len);
+ return cpy_len;
+}
+
+static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
+{
+ struct stats_mem *mem = &dev->stats_mem;
+
+ /* Alloc mbox command mem*/
+ mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
+ sizeof(struct ocrdma_rdma_stats_resp));
+
+ mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (!mem->va) {
+ pr_err("%s: stats mbox allocation failed\n", __func__);
+ return false;
+ }
+
+ memset(mem->va, 0, mem->size);
+
+ /* Alloc debugfs mem */
+ mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
+ if (!mem->debugfs_mem) {
+ pr_err("%s: stats debugfs mem allocation failed\n", __func__);
+ return false;
+ }
+
+ return true;
+}
+
+static void ocrdma_release_stats_mem(struct ocrdma_dev *dev)
+{
+ struct stats_mem *mem = &dev->stats_mem;
+
+ if (mem->va)
+ dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
+ mem->va, mem->pa);
+ kfree(mem->debugfs_mem);
+}
+
+static char *ocrdma_resource_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds",
+ (u64)rsrc_stats->dpp_pds);
+ pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds",
+ (u64)rsrc_stats->non_dpp_pds);
+ pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps",
+ (u64)rsrc_stats->rc_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps",
+ (u64)rsrc_stats->uc_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps",
+ (u64)rsrc_stats->ud_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps",
+ (u64)rsrc_stats->rc_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps",
+ (u64)rsrc_stats->uc_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps",
+ (u64)rsrc_stats->ud_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_srqs",
+ (u64)rsrc_stats->srqs);
+ pcur += ocrdma_add_stat(stats, pcur, "active_rbqs",
+ (u64)rsrc_stats->rbqs);
+ pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr",
+ (u64)rsrc_stats->r64K_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr",
+ (u64)rsrc_stats->r64K_to_2M_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr",
+ (u64)rsrc_stats->r2M_to_44M_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr",
+ (u64)rsrc_stats->r44M_to_1G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr",
+ (u64)rsrc_stats->r1G_to_4G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G",
+ (u64)rsrc_stats->nsmr_count_4G_to_32G);
+ pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr",
+ (u64)rsrc_stats->r32G_to_64G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr",
+ (u64)rsrc_stats->r64G_to_128G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr",
+ (u64)rsrc_stats->r128G_to_higher_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr",
+ (u64)rsrc_stats->embedded_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_frmr",
+ (u64)rsrc_stats->frmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps",
+ (u64)rsrc_stats->prefetch_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps",
+ (u64)rsrc_stats->ondemand_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr",
+ (u64)rsrc_stats->phy_mr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_mw",
+ (u64)rsrc_stats->mw);
+
+ /* Print the threshold stats */
+ rsrc_stats = &rdma_stats->th_rsrc_stats;
+
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds",
+ (u64)rsrc_stats->dpp_pds);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds",
+ (u64)rsrc_stats->non_dpp_pds);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps",
+ (u64)rsrc_stats->rc_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps",
+ (u64)rsrc_stats->uc_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps",
+ (u64)rsrc_stats->ud_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps",
+ (u64)rsrc_stats->rc_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps",
+ (u64)rsrc_stats->uc_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps",
+ (u64)rsrc_stats->ud_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs",
+ (u64)rsrc_stats->srqs);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs",
+ (u64)rsrc_stats->rbqs);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr",
+ (u64)rsrc_stats->r64K_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr",
+ (u64)rsrc_stats->r64K_to_2M_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr",
+ (u64)rsrc_stats->r2M_to_44M_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr",
+ (u64)rsrc_stats->r44M_to_1G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr",
+ (u64)rsrc_stats->r1G_to_4G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G",
+ (u64)rsrc_stats->nsmr_count_4G_to_32G);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr",
+ (u64)rsrc_stats->r32G_to_64G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr",
+ (u64)rsrc_stats->r64G_to_128G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr",
+ (u64)rsrc_stats->r128G_to_higher_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr",
+ (u64)rsrc_stats->embedded_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr",
+ (u64)rsrc_stats->frmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps",
+ (u64)rsrc_stats->prefetch_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps",
+ (u64)rsrc_stats->ondemand_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr",
+ (u64)rsrc_stats->phy_mr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_mw",
+ (u64)rsrc_stats->mw);
+ return stats;
+}
+
+static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat
+ (stats, pcur, "roce_frame_bytes",
+ convert_to_64bit(rx_stats->roce_frame_bytes_lo,
+ rx_stats->roce_frame_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops",
+ (u64)rx_stats->roce_frame_icrc_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops",
+ (u64)rx_stats->roce_frame_payload_len_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "ud_drops",
+ (u64)rx_stats->ud_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "qp1_drops",
+ (u64)rx_stats->qp1_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets",
+ (u64)rx_stats->psn_error_request_packets);
+ pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets",
+ (u64)rx_stats->psn_error_resp_packets);
+ pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts",
+ (u64)rx_stats->rnr_nak_timeouts);
+ pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives",
+ (u64)rx_stats->rnr_nak_receives);
+ pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops",
+ (u64)rx_stats->roce_frame_rxmt_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors",
+ (u64)rx_stats->nak_count_psn_sequence_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors",
+ (u64)rx_stats->rc_drop_count_lookup_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks",
+ (u64)rx_stats->rq_rnr_naks);
+ pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks",
+ (u64)rx_stats->srq_rnr_naks);
+ pcur += ocrdma_add_stat(stats, pcur, "roce_frames",
+ convert_to_64bit(rx_stats->roce_frames_lo,
+ rx_stats->roce_frames_hi));
+
+ return stats;
+}
+
+static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "send_pkts",
+ convert_to_64bit(tx_stats->send_pkts_lo,
+ tx_stats->send_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "write_pkts",
+ convert_to_64bit(tx_stats->write_pkts_lo,
+ tx_stats->write_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_pkts",
+ convert_to_64bit(tx_stats->read_pkts_lo,
+ tx_stats->read_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts",
+ convert_to_64bit(tx_stats->read_rsp_pkts_lo,
+ tx_stats->read_rsp_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "ack_pkts",
+ convert_to_64bit(tx_stats->ack_pkts_lo,
+ tx_stats->ack_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "send_bytes",
+ convert_to_64bit(tx_stats->send_bytes_lo,
+ tx_stats->send_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "write_bytes",
+ convert_to_64bit(tx_stats->write_bytes_lo,
+ tx_stats->write_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes",
+ convert_to_64bit(tx_stats->read_req_bytes_lo,
+ tx_stats->read_req_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes",
+ convert_to_64bit(tx_stats->read_rsp_bytes_lo,
+ tx_stats->read_rsp_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts",
+ (u64)tx_stats->ack_timeouts);
+
+ return stats;
+}
+
+static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_wqe_stats *wqe_stats = &rdma_stats->wqe_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes",
+ convert_to_64bit(wqe_stats->large_send_rc_wqes_lo,
+ wqe_stats->large_send_rc_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes",
+ convert_to_64bit(wqe_stats->large_write_rc_wqes_lo,
+ wqe_stats->large_write_rc_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_wqes",
+ convert_to_64bit(wqe_stats->read_wqes_lo,
+ wqe_stats->read_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes",
+ convert_to_64bit(wqe_stats->frmr_wqes_lo,
+ wqe_stats->frmr_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes",
+ convert_to_64bit(wqe_stats->mw_bind_wqes_lo,
+ wqe_stats->mw_bind_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes",
+ convert_to_64bit(wqe_stats->invalidate_wqes_lo,
+ wqe_stats->invalidate_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops",
+ (u64)wqe_stats->dpp_wqe_drops);
+ return stats;
+}
+
+static char *ocrdma_db_errstats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors",
+ (u64)db_err_stats->sq_doorbell_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors",
+ (u64)db_err_stats->cq_doorbell_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors",
+ (u64)db_err_stats->rq_srq_doorbell_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors",
+ (u64)db_err_stats->cq_overflow_errors);
+ return stats;
+}
+
+static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rx_qp_err_stats *rx_qp_err_stats =
+ &rdma_stats->rx_qp_err_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors",
+ (u64)rx_qp_err_stats->nak_invalid_requst_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
+ (u64)rx_qp_err_stats->nak_remote_operation_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
+ (u64)rx_qp_err_stats->nak_count_remote_access_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
+ (u64)rx_qp_err_stats->local_length_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
+ (u64)rx_qp_err_stats->local_protection_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
+ (u64)rx_qp_err_stats->local_qp_operation_errors);
+ return stats;
+}
+
+static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_tx_qp_err_stats *tx_qp_err_stats =
+ &rdma_stats->tx_qp_err_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
+ (u64)tx_qp_err_stats->local_length_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
+ (u64)tx_qp_err_stats->local_protection_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
+ (u64)tx_qp_err_stats->local_qp_operation_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors",
+ (u64)tx_qp_err_stats->retry_count_exceeded_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors",
+ (u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors);
+ return stats;
+}
+
+static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev)
+{
+ int i;
+ char *pstats = dev->stats_mem.debugfs_mem;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_tx_dbg_stats *tx_dbg_stats =
+ &rdma_stats->tx_dbg_stats;
+
+ memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ for (i = 0; i < 100; i++)
+ pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
+ tx_dbg_stats->data[i]);
+
+ return dev->stats_mem.debugfs_mem;
+}
+
+static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
+{
+ int i;
+ char *pstats = dev->stats_mem.debugfs_mem;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rx_dbg_stats *rx_dbg_stats =
+ &rdma_stats->rx_dbg_stats;
+
+ memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ for (i = 0; i < 200; i++)
+ pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
+ rx_dbg_stats->data[i]);
+
+ return dev->stats_mem.debugfs_mem;
+}
+
+static void ocrdma_update_stats(struct ocrdma_dev *dev)
+{
+ ulong now = jiffies, secs;
+ int status = 0;
+
+ secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
+ if (secs) {
+ /* update */
+ status = ocrdma_mbx_rdma_stats(dev, false);
+ if (status)
+ pr_err("%s: stats mbox failed with status = %d\n",
+ __func__, status);
+ dev->last_stats_time = jiffies;
+ }
+}
+
+static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
+ size_t usr_buf_len, loff_t *ppos)
+{
+ struct ocrdma_stats *pstats = filp->private_data;
+ struct ocrdma_dev *dev = pstats->dev;
+ ssize_t status = 0;
+ char *data = NULL;
+
+ /* No partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ mutex_lock(&dev->stats_lock);
+
+ ocrdma_update_stats(dev);
+
+ switch (pstats->type) {
+ case OCRDMA_RSRC_STATS:
+ data = ocrdma_resource_stats(dev);
+ break;
+ case OCRDMA_RXSTATS:
+ data = ocrdma_rx_stats(dev);
+ break;
+ case OCRDMA_WQESTATS:
+ data = ocrdma_wqe_stats(dev);
+ break;
+ case OCRDMA_TXSTATS:
+ data = ocrdma_tx_stats(dev);
+ break;
+ case OCRDMA_DB_ERRSTATS:
+ data = ocrdma_db_errstats(dev);
+ break;
+ case OCRDMA_RXQP_ERRSTATS:
+ data = ocrdma_rxqp_errstats(dev);
+ break;
+ case OCRDMA_TXQP_ERRSTATS:
+ data = ocrdma_txqp_errstats(dev);
+ break;
+ case OCRDMA_TX_DBG_STATS:
+ data = ocrdma_tx_dbg_stats(dev);
+ break;
+ case OCRDMA_RX_DBG_STATS:
+ data = ocrdma_rx_dbg_stats(dev);
+ break;
+
+ default:
+ status = -EFAULT;
+ goto exit;
+ }
+
+ if (usr_buf_len < strlen(data)) {
+ status = -ENOSPC;
+ goto exit;
+ }
+
+ status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data,
+ strlen(data));
+exit:
+ mutex_unlock(&dev->stats_lock);
+ return status;
+}
+
+static int ocrdma_debugfs_open(struct inode *inode, struct file *file)
+{
+ if (inode->i_private)
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations ocrdma_dbg_ops = {
+ .owner = THIS_MODULE,
+ .open = ocrdma_debugfs_open,
+ .read = ocrdma_dbgfs_ops_read,
+};
+
+void ocrdma_add_port_stats(struct ocrdma_dev *dev)
+{
+ if (!ocrdma_dbgfs_dir)
+ return;
+
+ /* Create post stats base dir */
+ dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir);
+ if (!dev->dir)
+ goto err;
+
+ dev->rsrc_stats.type = OCRDMA_RSRC_STATS;
+ dev->rsrc_stats.dev = dev;
+ if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
+ &dev->rsrc_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ dev->rx_stats.type = OCRDMA_RXSTATS;
+ dev->rx_stats.dev = dev;
+ if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir,
+ &dev->rx_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ dev->wqe_stats.type = OCRDMA_WQESTATS;
+ dev->wqe_stats.dev = dev;
+ if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir,
+ &dev->wqe_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ dev->tx_stats.type = OCRDMA_TXSTATS;
+ dev->tx_stats.dev = dev;
+ if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir,
+ &dev->tx_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ dev->db_err_stats.type = OCRDMA_DB_ERRSTATS;
+ dev->db_err_stats.dev = dev;
+ if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
+ &dev->db_err_stats, &ocrdma_dbg_ops))
+ goto err;
+
+
+ dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS;
+ dev->tx_qp_err_stats.dev = dev;
+ if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
+ &dev->tx_qp_err_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS;
+ dev->rx_qp_err_stats.dev = dev;
+ if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
+ &dev->rx_qp_err_stats, &ocrdma_dbg_ops))
+ goto err;
+
+
+ dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS;
+ dev->tx_dbg_stats.dev = dev;
+ if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
+ &dev->tx_dbg_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS;
+ dev->rx_dbg_stats.dev = dev;
+ if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
+ &dev->rx_dbg_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ /* Now create dma_mem for stats mbx command */
+ if (!ocrdma_alloc_stats_mem(dev))
+ goto err;
+
+ mutex_init(&dev->stats_lock);
+
+ return;
+err:
+ ocrdma_release_stats_mem(dev);
+ debugfs_remove_recursive(dev->dir);
+ dev->dir = NULL;
+}
+
+void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
+{
+ if (!dev->dir)
+ return;
+ mutex_destroy(&dev->stats_lock);
+ ocrdma_release_stats_mem(dev);
+ debugfs_remove(dev->dir);
+}
+
+void ocrdma_init_debugfs(void)
+{
+ /* Create base dir in debugfs root dir */
+ ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL);
+}
+
+void ocrdma_rem_debugfs(void)
+{
+ debugfs_remove_recursive(ocrdma_dbgfs_dir);
+}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
new file mode 100644
index 000000000000..5f5e20c46d7c
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -0,0 +1,54 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for *
+ * RoCE (RDMA over Converged Ethernet) adapters. *
+ * Copyright (C) 2008-2014 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#ifndef __OCRDMA_STATS_H__
+#define __OCRDMA_STATS_H__
+
+#include <linux/debugfs.h>
+#include "ocrdma.h"
+#include "ocrdma_hw.h"
+
+#define OCRDMA_MAX_DBGFS_MEM 4096
+
+enum OCRDMA_STATS_TYPE {
+ OCRDMA_RSRC_STATS,
+ OCRDMA_RXSTATS,
+ OCRDMA_WQESTATS,
+ OCRDMA_TXSTATS,
+ OCRDMA_DB_ERRSTATS,
+ OCRDMA_RXQP_ERRSTATS,
+ OCRDMA_TXQP_ERRSTATS,
+ OCRDMA_TX_DBG_STATS,
+ OCRDMA_RX_DBG_STATS
+};
+
+void ocrdma_rem_debugfs(void);
+void ocrdma_init_debugfs(void);
+void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
+void ocrdma_add_port_stats(struct ocrdma_dev *dev);
+
+#endif /* __OCRDMA_STATS_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index e0cc201be41a..edf6211d84b8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
dev = get_ocrdma_dev(ibdev);
memset(sgid, 0, sizeof(*sgid));
- if (index >= OCRDMA_MAX_SGID)
+ if (index > OCRDMA_MAX_SGID)
return -EINVAL;
memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -89,7 +89,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr;
- attr->max_mw = 0;
+ attr->max_mw = dev->attr.max_mw;
attr->max_pd = dev->attr.max_pd;
attr->atomic_cap = 0;
attr->max_fmr = 0;
@@ -144,7 +144,6 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
}
}
-
int ocrdma_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
@@ -267,7 +266,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
if (udata && uctx) {
pd->dpp_enabled =
- dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY;
+ ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
pd->num_dpp_qp =
pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
}
@@ -726,10 +725,10 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
u32 num_pbes)
{
struct ocrdma_pbe *pbe;
- struct ib_umem_chunk *chunk;
+ struct scatterlist *sg;
struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
struct ib_umem *umem = mr->umem;
- int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+ int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
if (!mr->hwmr.num_pbes)
return;
@@ -739,39 +738,37 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
shift = ilog2(umem->page_size);
- list_for_each_entry(chunk, &umem->chunk_list, list) {
- /* get all the dma regions from the chunk. */
- for (i = 0; i < chunk->nmap; i++) {
- pages = sg_dma_len(&chunk->page_list[i]) >> shift;
- for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
- /* store the page address in pbe */
- pbe->pa_lo =
- cpu_to_le32(sg_dma_address
- (&chunk->page_list[i]) +
- (umem->page_size * pg_cnt));
- pbe->pa_hi =
- cpu_to_le32(upper_32_bits
- ((sg_dma_address
- (&chunk->page_list[i]) +
- umem->page_size * pg_cnt)));
- pbe_cnt += 1;
- total_num_pbes += 1;
- pbe++;
-
- /* if done building pbes, issue the mbx cmd. */
- if (total_num_pbes == num_pbes)
- return;
-
- /* if the given pbl is full storing the pbes,
- * move to next pbl.
- */
- if (pbe_cnt ==
- (mr->hwmr.pbl_size / sizeof(u64))) {
- pbl_tbl++;
- pbe = (struct ocrdma_pbe *)pbl_tbl->va;
- pbe_cnt = 0;
- }
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ pages = sg_dma_len(sg) >> shift;
+ for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
+ /* store the page address in pbe */
+ pbe->pa_lo =
+ cpu_to_le32(sg_dma_address
+ (sg) +
+ (umem->page_size * pg_cnt));
+ pbe->pa_hi =
+ cpu_to_le32(upper_32_bits
+ ((sg_dma_address
+ (sg) +
+ umem->page_size * pg_cnt)));
+ pbe_cnt += 1;
+ total_num_pbes += 1;
+ pbe++;
+
+ /* if done building pbes, issue the mbx cmd. */
+ if (total_num_pbes == num_pbes)
+ return;
+
+ /* if the given pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (pbe_cnt ==
+ (mr->hwmr.pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ pbe_cnt = 0;
}
+
}
}
}
@@ -840,8 +837,7 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
- if (mr->hwmr.fr_mr == 0)
- ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
/* it could be user registered memory. */
if (mr->umem)
@@ -910,6 +906,7 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
spin_lock_init(&cq->comp_handler_lock);
INIT_LIST_HEAD(&cq->sq_head);
INIT_LIST_HEAD(&cq->rq_head);
+ cq->first_arm = true;
if (ib_ctx) {
uctx = get_ocrdma_ucontext(ib_ctx);
@@ -927,9 +924,7 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
goto ctx_err;
}
cq->phase = OCRDMA_CQE_VALID;
- cq->arm_needed = true;
dev->cq_tbl[cq->id] = cq;
-
return &cq->ibcq;
ctx_err:
@@ -952,15 +947,52 @@ int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
return status;
}
+static void ocrdma_flush_cq(struct ocrdma_cq *cq)
+{
+ int cqe_cnt;
+ int valid_count = 0;
+ unsigned long flags;
+
+ struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
+ struct ocrdma_cqe *cqe = NULL;
+
+ cqe = cq->va;
+ cqe_cnt = cq->cqe_cnt;
+
+ /* Last irq might have scheduled a polling thread
+ * sync-up with it before hard flushing.
+ */
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ while (cqe_cnt) {
+ if (is_cqe_valid(cq, cqe))
+ valid_count++;
+ cqe++;
+ cqe_cnt--;
+ }
+ ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+}
+
int ocrdma_destroy_cq(struct ib_cq *ibcq)
{
int status;
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ struct ocrdma_eq *eq = NULL;
struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
int pdid = 0;
+ u32 irq, indx;
- status = ocrdma_mbx_destroy_cq(dev, cq);
+ dev->cq_tbl[cq->id] = NULL;
+ indx = ocrdma_get_eq_table_index(dev, cq->eqn);
+ if (indx == -EINVAL)
+ BUG();
+ eq = &dev->eq_tbl[indx];
+ irq = ocrdma_get_irq(dev, eq);
+ synchronize_irq(irq);
+ ocrdma_flush_cq(cq);
+
+ status = ocrdma_mbx_destroy_cq(dev, cq);
if (cq->ucontext) {
pdid = cq->ucontext->cntxt_pd->id;
ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
@@ -969,7 +1001,6 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
ocrdma_get_db_addr(dev, pdid),
dev->nic_info.db_page_size);
}
- dev->cq_tbl[cq->id] = NULL;
kfree(cq);
return status;
@@ -1092,15 +1123,9 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
}
uresp.db_page_addr = usr_db;
uresp.db_page_size = dev->nic_info.db_page_size;
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
- uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
- uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
- uresp.db_shift = 24;
- } else {
- uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
- uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
- uresp.db_shift = 16;
- }
+ uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
+ uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
+ uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
if (qp->dpp_enabled) {
uresp.dpp_credit = dpp_credit_lmt;
@@ -1132,7 +1157,7 @@ err:
static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
struct ocrdma_pd *pd)
{
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
qp->sq_db = dev->nic_info.db +
(pd->id * dev->nic_info.db_page_size) +
OCRDMA_DB_GEN2_SQ_OFFSET;
@@ -1182,7 +1207,6 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
}
-
static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
struct ib_qp_init_attr *attrs)
{
@@ -1268,17 +1292,6 @@ gen_err:
return ERR_PTR(status);
}
-
-static void ocrdma_flush_rq_db(struct ocrdma_qp *qp)
-{
- if (qp->db_cache) {
- u32 val = qp->rq.dbid | (qp->db_cache <<
- ocrdma_get_num_posted_shift(qp));
- iowrite32(val, qp->rq_db);
- qp->db_cache = 0;
- }
-}
-
int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask)
{
@@ -1296,9 +1309,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
*/
if (status < 0)
return status;
- status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
- if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR)
- ocrdma_flush_rq_db(qp);
+ status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
return status;
}
@@ -1510,7 +1521,7 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
int discard_cnt = 0;
u32 cur_getp, stop_getp;
struct ocrdma_cqe *cqe;
- u32 qpn = 0;
+ u32 qpn = 0, wqe_idx = 0;
spin_lock_irqsave(&cq->cq_lock, cq_flags);
@@ -1539,24 +1550,29 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
if (qpn == 0 || qpn != qp->id)
goto skip_cqe;
- /* mark cqe discarded so that it is not picked up later
- * in the poll_cq().
- */
- discard_cnt += 1;
- cqe->cmn.qpn = 0;
if (is_cqe_for_sq(cqe)) {
ocrdma_hwq_inc_tail(&qp->sq);
} else {
if (qp->srq) {
+ wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
+ OCRDMA_CQE_BUFTAG_SHIFT) &
+ qp->srq->rq.max_wqe_idx;
+ if (wqe_idx < 1)
+ BUG();
spin_lock_irqsave(&qp->srq->q_lock, flags);
ocrdma_hwq_inc_tail(&qp->srq->rq);
- ocrdma_srq_toggle_bit(qp->srq, cur_getp);
+ ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
spin_unlock_irqrestore(&qp->srq->q_lock, flags);
} else {
ocrdma_hwq_inc_tail(&qp->rq);
}
}
+ /* mark cqe discarded so that it is not picked up later
+ * in the poll_cq().
+ */
+ discard_cnt += 1;
+ cqe->cmn.qpn = 0;
skip_cqe:
cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
} while (cur_getp != stop_getp);
@@ -1659,7 +1675,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
(srq->pd->id * dev->nic_info.db_page_size);
uresp.db_page_size = dev->nic_info.db_page_size;
uresp.num_rqe_allocated = srq->rq.max_cnt;
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
uresp.db_shift = 24;
} else {
@@ -2009,15 +2025,15 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
fast_reg->size_sge =
get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
- mr = (struct ocrdma_mr *) (unsigned long) qp->dev->stag_arr[(hdr->lkey >> 8) &
- (OCRDMA_MAX_STAG - 1)];
+ mr = (struct ocrdma_mr *) (unsigned long)
+ qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
return 0;
}
static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
{
- u32 val = qp->sq.dbid | (1 << 16);
+ u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
iowrite32(val, qp->sq_db);
}
@@ -2122,12 +2138,9 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
{
- u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
+ u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
- if (qp->state != OCRDMA_QPS_INIT)
- iowrite32(val, qp->rq_db);
- else
- qp->db_cache++;
+ iowrite32(val, qp->rq_db);
}
static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
@@ -2213,7 +2226,7 @@ static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
if (row == srq->bit_fields_len)
BUG();
- return indx;
+ return indx + 1; /* Use from index 1 */
}
static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
@@ -2550,10 +2563,13 @@ static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
srq = get_ocrdma_srq(qp->ibqp.srq);
wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
- OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
+ OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
+ if (wqe_idx < 1)
+ BUG();
+
ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
spin_lock_irqsave(&srq->q_lock, flags);
- ocrdma_srq_toggle_bit(srq, wqe_idx);
+ ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
spin_unlock_irqrestore(&srq->q_lock, flags);
ocrdma_hwq_inc_tail(&srq->rq);
}
@@ -2705,10 +2721,18 @@ expand_cqe:
}
stop_cqe:
cq->getp = cur_getp;
- if (polled_hw_cqes || expand || stop) {
- ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,
+ if (cq->deferred_arm) {
+ ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
+ polled_hw_cqes);
+ cq->deferred_arm = false;
+ cq->deferred_sol = false;
+ } else {
+ /* We need to pop the CQE. No need to arm */
+ ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
polled_hw_cqes);
+ cq->deferred_sol = false;
}
+
return i;
}
@@ -2780,30 +2804,28 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
u16 cq_id;
- u16 cur_getp;
- struct ocrdma_cqe *cqe;
unsigned long flags;
+ bool arm_needed = false, sol_needed = false;
cq_id = cq->id;
spin_lock_irqsave(&cq->cq_lock, flags);
if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
- cq->armed = true;
+ arm_needed = true;
if (cq_flags & IB_CQ_SOLICITED)
- cq->solicited = true;
-
- cur_getp = cq->getp;
- cqe = cq->va + cur_getp;
+ sol_needed = true;
- /* check whether any valid cqe exist or not, if not then safe to
- * arm. If cqe is not yet consumed, then let it get consumed and then
- * we arm it to avoid false interrupts.
- */
- if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {
- cq->arm_needed = false;
- ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0);
+ if (cq->first_arm) {
+ ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
+ cq->first_arm = false;
+ goto skip_defer;
}
+ cq->deferred_arm = true;
+
+skip_defer:
+ cq->deferred_sol = sol_needed;
spin_unlock_irqrestore(&cq->cq_lock, flags);
+
return 0;
}
@@ -2838,7 +2860,8 @@ struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
goto mbx_err;
mr->ibmr.rkey = mr->hwmr.lkey;
mr->ibmr.lkey = mr->hwmr.lkey;
- dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = mr;
+ dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
+ (unsigned long) mr;
return &mr->ibmr;
mbx_err:
ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 1946101419a3..c00ae093b6f8 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -868,8 +868,10 @@ struct qib_devdata {
/* last buffer for user use */
u32 lastctxt_piobuf;
- /* saturating counter of (non-port-specific) device interrupts */
- u32 int_counter;
+ /* reset value */
+ u64 z_int_counter;
+ /* percpu intcounter */
+ u64 __percpu *int_counter;
/* pio bufs allocated per ctxt */
u32 pbufsctxt;
@@ -1184,7 +1186,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *);
void qib_set_ctxtcnt(struct qib_devdata *);
int qib_create_ctxts(struct qib_devdata *dd);
struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int);
-void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
+int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
@@ -1449,6 +1451,10 @@ void qib_nomsi(struct qib_devdata *);
void qib_nomsix(struct qib_devdata *);
void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
+/* interrupts for device */
+u64 qib_int_counter(struct qib_devdata *);
+/* interrupt for all devices */
+u64 qib_sps_ints(void);
/*
* dma_addr wrappers - all 0's invalid for hw
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 1686fd4bda87..5dfda4c5cc9c 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -546,7 +546,7 @@ static ssize_t qib_diagpkt_write(struct file *fp,
size_t count, loff_t *off)
{
u32 __iomem *piobuf;
- u32 plen, clen, pbufn;
+ u32 plen, pbufn, maxlen_reserve;
struct qib_diag_xpkt dp;
u32 *tmpbuf = NULL;
struct qib_devdata *dd;
@@ -590,15 +590,20 @@ static ssize_t qib_diagpkt_write(struct file *fp,
}
ppd = &dd->pport[dp.port - 1];
- /* need total length before first word written */
- /* +1 word is for the qword padding */
- plen = sizeof(u32) + dp.len;
- clen = dp.len >> 2;
-
- if ((plen + 4) > ppd->ibmaxlen) {
+ /*
+ * need total length before first word written, plus 2 Dwords. One Dword
+ * is for padding so we get the full user data when not aligned on
+ * a word boundary. The other Dword is to make sure we have room for the
+ * ICRC which gets tacked on later.
+ */
+ maxlen_reserve = 2 * sizeof(u32);
+ if (dp.len > ppd->ibmaxlen - maxlen_reserve) {
ret = -EINVAL;
- goto bail; /* before writing pbc */
+ goto bail;
}
+
+ plen = sizeof(u32) + dp.len;
+
tmpbuf = vmalloc(plen);
if (!tmpbuf) {
qib_devinfo(dd->pcidev,
@@ -638,11 +643,11 @@ static ssize_t qib_diagpkt_write(struct file *fp,
*/
if (dd->flags & QIB_PIO_FLUSH_WC) {
qib_flush_wc();
- qib_pio_copy(piobuf + 2, tmpbuf, clen - 1);
+ qib_pio_copy(piobuf + 2, tmpbuf, plen - 1);
qib_flush_wc();
- __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
+ __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
} else
- qib_pio_copy(piobuf + 2, tmpbuf, clen);
+ qib_pio_copy(piobuf + 2, tmpbuf, plen);
if (dd->flags & QIB_USE_SPCL_TRIG) {
u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
@@ -689,28 +694,23 @@ int qib_register_observer(struct qib_devdata *dd,
const struct diag_observer *op)
{
struct diag_observer_list_elt *olp;
- int ret = -EINVAL;
+ unsigned long flags;
if (!dd || !op)
- goto bail;
- ret = -ENOMEM;
+ return -EINVAL;
olp = vmalloc(sizeof *olp);
if (!olp) {
pr_err("vmalloc for observer failed\n");
- goto bail;
+ return -ENOMEM;
}
- if (olp) {
- unsigned long flags;
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- olp->op = op;
- olp->next = dd->diag_observer_list;
- dd->diag_observer_list = olp;
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
- ret = 0;
- }
-bail:
- return ret;
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ olp->op = op;
+ olp->next = dd->diag_observer_list;
+ dd->diag_observer_list = olp;
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+
+ return 0;
}
/* Remove all registered observers when device is closed */
diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c
index 2920bb39a65b..59fe092b4b0f 100644
--- a/drivers/infiniband/hw/qib/qib_dma.c
+++ b/drivers/infiniband/hw/qib/qib_dma.c
@@ -108,6 +108,10 @@ static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,
ret = 0;
break;
}
+ sg->dma_address = addr + sg->offset;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+#endif
}
return ret;
}
@@ -119,21 +123,6 @@ static void qib_unmap_sg(struct ib_device *dev,
BUG_ON(!valid_dma_direction(direction));
}
-static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
-{
- u64 addr = (u64) page_address(sg_page(sg));
-
- if (addr)
- addr += sg->offset;
- return addr;
-}
-
-static unsigned int qib_sg_dma_len(struct ib_device *dev,
- struct scatterlist *sg)
-{
- return sg->length;
-}
-
static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,
size_t size, enum dma_data_direction dir)
{
@@ -173,8 +162,6 @@ struct ib_dma_mapping_ops qib_dma_mapping_ops = {
.unmap_page = qib_dma_unmap_page,
.map_sg = qib_map_sg,
.unmap_sg = qib_unmap_sg,
- .dma_address = qib_sg_dma_address,
- .dma_len = qib_sg_dma_len,
.sync_single_for_cpu = qib_sync_single_for_cpu,
.sync_single_for_device = qib_sync_single_for_device,
.alloc_coherent = qib_dma_alloc_coherent,
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 275f247f9fca..b15e34eeef68 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1459,7 +1459,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
cused++;
else
cfree++;
- if (pusable && cfree && cused < inuse) {
+ if (cfree && cused < inuse) {
udd = dd;
inuse = cused;
}
@@ -1578,7 +1578,7 @@ static int do_qib_user_sdma_queue_create(struct file *fp)
struct qib_ctxtdata *rcd = fd->rcd;
struct qib_devdata *dd = rcd->dd;
- if (dd->flags & QIB_HAS_SEND_DMA)
+ if (dd->flags & QIB_HAS_SEND_DMA) {
fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
dd->unit,
@@ -1586,6 +1586,7 @@ static int do_qib_user_sdma_queue_create(struct file *fp)
fd->subctxt);
if (!fd->pq)
return -ENOMEM;
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index c61e2a92b3c1..cab610ccd50e 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -105,6 +105,7 @@ static int create_file(const char *name, umode_t mode,
static ssize_t driver_stats_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ qib_stats.sps_ints = qib_sps_ints();
return simple_read_from_buffer(buf, count, ppos, &qib_stats,
sizeof qib_stats);
}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 84e593d6007b..d68266ac7619 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1634,9 +1634,7 @@ static irqreturn_t qib_6120intr(int irq, void *data)
goto bail;
}
- qib_stats.sps_ints++;
- if (dd->int_counter != (u32) -1)
- dd->int_counter++;
+ this_cpu_inc(*dd->int_counter);
if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
@@ -1808,7 +1806,8 @@ static int qib_6120_setup_reset(struct qib_devdata *dd)
* isn't set.
*/
dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
- dd->int_counter = 0; /* so we check interrupts work again */
+ /* so we check interrupts work again */
+ dd->z_int_counter = qib_int_counter(dd);
val = dd->control | QLOGIC_IB_C_RESET;
writeq(val, &dd->kregbase[kr_control]);
mb(); /* prevent compiler re-ordering around actual reset */
@@ -3266,7 +3265,9 @@ static int init_6120_variables(struct qib_devdata *dd)
dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
- qib_init_pportdata(ppd, dd, 0, 1);
+ ret = qib_init_pportdata(ppd, dd, 0, 1);
+ if (ret)
+ goto bail;
ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
ppd->link_speed_supported = QIB_IB_SDR;
ppd->link_width_enabled = IB_WIDTH_4X;
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 454c2e7668fe..7dec89fdc124 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1962,10 +1962,7 @@ static irqreturn_t qib_7220intr(int irq, void *data)
goto bail;
}
- qib_stats.sps_ints++;
- if (dd->int_counter != (u32) -1)
- dd->int_counter++;
-
+ this_cpu_inc(*dd->int_counter);
if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
unlikely_7220_intr(dd, istat);
@@ -2120,7 +2117,8 @@ static int qib_setup_7220_reset(struct qib_devdata *dd)
* isn't set.
*/
dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
- dd->int_counter = 0; /* so we check interrupts work again */
+ /* so we check interrupts work again */
+ dd->z_int_counter = qib_int_counter(dd);
val = dd->control | QLOGIC_IB_C_RESET;
writeq(val, &dd->kregbase[kr_control]);
mb(); /* prevent compiler reordering around actual reset */
@@ -4061,7 +4059,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
init_waitqueue_head(&cpspec->autoneg_wait);
INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
- qib_init_pportdata(ppd, dd, 0, 1);
+ ret = qib_init_pportdata(ppd, dd, 0, 1);
+ if (ret)
+ goto bail;
ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index d1bd21319d7d..a7eb32517a04 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -3115,9 +3115,7 @@ static irqreturn_t qib_7322intr(int irq, void *data)
goto bail;
}
- qib_stats.sps_ints++;
- if (dd->int_counter != (u32) -1)
- dd->int_counter++;
+ this_cpu_inc(*dd->int_counter);
/* handle "errors" of various kinds first, device ahead of port */
if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
@@ -3186,9 +3184,7 @@ static irqreturn_t qib_7322pintr(int irq, void *data)
*/
return IRQ_HANDLED;
- qib_stats.sps_ints++;
- if (dd->int_counter != (u32) -1)
- dd->int_counter++;
+ this_cpu_inc(*dd->int_counter);
/* Clear the interrupt bit we expect to be set. */
qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
@@ -3215,9 +3211,7 @@ static irqreturn_t qib_7322bufavail(int irq, void *data)
*/
return IRQ_HANDLED;
- qib_stats.sps_ints++;
- if (dd->int_counter != (u32) -1)
- dd->int_counter++;
+ this_cpu_inc(*dd->int_counter);
/* Clear the interrupt bit we expect to be set. */
qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
@@ -3248,9 +3242,7 @@ static irqreturn_t sdma_intr(int irq, void *data)
*/
return IRQ_HANDLED;
- qib_stats.sps_ints++;
- if (dd->int_counter != (u32) -1)
- dd->int_counter++;
+ this_cpu_inc(*dd->int_counter);
/* Clear the interrupt bit we expect to be set. */
qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
@@ -3277,9 +3269,7 @@ static irqreturn_t sdma_idle_intr(int irq, void *data)
*/
return IRQ_HANDLED;
- qib_stats.sps_ints++;
- if (dd->int_counter != (u32) -1)
- dd->int_counter++;
+ this_cpu_inc(*dd->int_counter);
/* Clear the interrupt bit we expect to be set. */
qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
@@ -3306,9 +3296,7 @@ static irqreturn_t sdma_progress_intr(int irq, void *data)
*/
return IRQ_HANDLED;
- qib_stats.sps_ints++;
- if (dd->int_counter != (u32) -1)
- dd->int_counter++;
+ this_cpu_inc(*dd->int_counter);
/* Clear the interrupt bit we expect to be set. */
qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
@@ -3336,9 +3324,7 @@ static irqreturn_t sdma_cleanup_intr(int irq, void *data)
*/
return IRQ_HANDLED;
- qib_stats.sps_ints++;
- if (dd->int_counter != (u32) -1)
- dd->int_counter++;
+ this_cpu_inc(*dd->int_counter);
/* Clear the interrupt bit we expect to be set. */
qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
@@ -3723,7 +3709,8 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
dd->pport->cpspec->ibsymdelta = 0;
dd->pport->cpspec->iblnkerrdelta = 0;
dd->pport->cpspec->ibmalfdelta = 0;
- dd->int_counter = 0; /* so we check interrupts work again */
+ /* so we check interrupts work again */
+ dd->z_int_counter = qib_int_counter(dd);
/*
* Keep chip from being accessed until we are ready. Use
@@ -6557,7 +6544,11 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
}
dd->num_pports++;
- qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
+ ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
+ if (ret) {
+ dd->num_pports--;
+ goto bail;
+ }
ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
ppd->link_width_enabled = IB_WIDTH_4X;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 24e802f4ea2f..5b7aeb224a30 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -130,7 +130,6 @@ void qib_set_ctxtcnt(struct qib_devdata *dd)
int qib_create_ctxts(struct qib_devdata *dd)
{
unsigned i;
- int ret;
int local_node_id = pcibus_to_node(dd->pcidev->bus);
if (local_node_id < 0)
@@ -145,8 +144,7 @@ int qib_create_ctxts(struct qib_devdata *dd)
if (!dd->rcd) {
qib_dev_err(dd,
"Unable to allocate ctxtdata array, failing\n");
- ret = -ENOMEM;
- goto done;
+ return -ENOMEM;
}
/* create (one or more) kctxt */
@@ -163,15 +161,14 @@ int qib_create_ctxts(struct qib_devdata *dd)
if (!rcd) {
qib_dev_err(dd,
"Unable to allocate ctxtdata for Kernel ctxt, failing\n");
- ret = -ENOMEM;
- goto done;
+ kfree(dd->rcd);
+ dd->rcd = NULL;
+ return -ENOMEM;
}
rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
rcd->seq_cnt = 1;
}
- ret = 0;
-done:
- return ret;
+ return 0;
}
/*
@@ -233,7 +230,7 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt,
/*
* Common code for initializing the physical port structure.
*/
-void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
+int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
u8 hw_pidx, u8 port)
{
int size;
@@ -243,6 +240,7 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
spin_lock_init(&ppd->sdma_lock);
spin_lock_init(&ppd->lflags_lock);
+ spin_lock_init(&ppd->cc_shadow_lock);
init_waitqueue_head(&ppd->state_wait);
init_timer(&ppd->symerr_clear_timer);
@@ -250,8 +248,10 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
ppd->symerr_clear_timer.data = (unsigned long)ppd;
ppd->qib_wq = NULL;
-
- spin_lock_init(&ppd->cc_shadow_lock);
+ ppd->ibport_data.pmastats =
+ alloc_percpu(struct qib_pma_counters);
+ if (!ppd->ibport_data.pmastats)
+ return -ENOMEM;
if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
goto bail;
@@ -299,7 +299,7 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
goto bail_3;
}
- return;
+ return 0;
bail_3:
kfree(ppd->ccti_entries_shadow);
@@ -313,7 +313,7 @@ bail_1:
bail:
/* User is intentionally disabling the congestion control agent */
if (!qib_cc_table_size)
- return;
+ return 0;
if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) {
qib_cc_table_size = 0;
@@ -324,7 +324,7 @@ bail:
qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
port);
- return;
+ return 0;
}
static int init_pioavailregs(struct qib_devdata *dd)
@@ -525,6 +525,7 @@ static void enable_chip(struct qib_devdata *dd)
static void verify_interrupt(unsigned long opaque)
{
struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ u64 int_counter;
if (!dd)
return; /* being torn down */
@@ -533,7 +534,8 @@ static void verify_interrupt(unsigned long opaque)
* If we don't have a lid or any interrupts, let the user know and
* don't bother checking again.
*/
- if (dd->int_counter == 0) {
+ int_counter = qib_int_counter(dd) - dd->z_int_counter;
+ if (int_counter == 0) {
if (!dd->f_intr_fallback(dd))
dev_err(&dd->pcidev->dev,
"No interrupts detected, not usable.\n");
@@ -633,6 +635,12 @@ wq_error:
return -ENOMEM;
}
+static void qib_free_pportdata(struct qib_pportdata *ppd)
+{
+ free_percpu(ppd->ibport_data.pmastats);
+ ppd->ibport_data.pmastats = NULL;
+}
+
/**
* qib_init - do the actual initialization sequence on the chip
* @dd: the qlogic_ib device
@@ -920,6 +928,7 @@ static void qib_shutdown_device(struct qib_devdata *dd)
destroy_workqueue(ppd->qib_wq);
ppd->qib_wq = NULL;
}
+ qib_free_pportdata(ppd);
}
qib_update_eeprom_log(dd);
@@ -1079,9 +1088,34 @@ void qib_free_devdata(struct qib_devdata *dd)
#ifdef CONFIG_DEBUG_FS
qib_dbg_ibdev_exit(&dd->verbs_dev);
#endif
+ free_percpu(dd->int_counter);
ib_dealloc_device(&dd->verbs_dev.ibdev);
}
+u64 qib_int_counter(struct qib_devdata *dd)
+{
+ int cpu;
+ u64 int_counter = 0;
+
+ for_each_possible_cpu(cpu)
+ int_counter += *per_cpu_ptr(dd->int_counter, cpu);
+ return int_counter;
+}
+
+u64 qib_sps_ints(void)
+{
+ unsigned long flags;
+ struct qib_devdata *dd;
+ u64 sps_ints = 0;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ list_for_each_entry(dd, &qib_dev_list, list) {
+ sps_ints += qib_int_counter(dd);
+ }
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+ return sps_ints;
+}
+
/*
* Allocate our primary per-unit data structure. Must be done via verbs
* allocator, because the verbs cleanup process both does cleanup and
@@ -1097,14 +1131,10 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
int ret;
dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
- if (!dd) {
- dd = ERR_PTR(-ENOMEM);
- goto bail;
- }
+ if (!dd)
+ return ERR_PTR(-ENOMEM);
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_ibdev_init(&dd->verbs_dev);
-#endif
+ INIT_LIST_HEAD(&dd->list);
idr_preload(GFP_KERNEL);
spin_lock_irqsave(&qib_devs_lock, flags);
@@ -1121,11 +1151,13 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
if (ret < 0) {
qib_early_err(&pdev->dev,
"Could not allocate unit ID: error %d\n", -ret);
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_ibdev_exit(&dd->verbs_dev);
-#endif
- ib_dealloc_device(&dd->verbs_dev.ibdev);
- dd = ERR_PTR(ret);
+ goto bail;
+ }
+ dd->int_counter = alloc_percpu(u64);
+ if (!dd->int_counter) {
+ ret = -ENOMEM;
+ qib_early_err(&pdev->dev,
+ "Could not allocate per-cpu int_counter\n");
goto bail;
}
@@ -1139,9 +1171,15 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
qib_early_err(&pdev->dev,
"Could not alloc cpulist info, cpu affinity might be wrong\n");
}
-
-bail:
+#ifdef CONFIG_DEBUG_FS
+ qib_dbg_ibdev_init(&dd->verbs_dev);
+#endif
return dd;
+bail:
+ if (!list_empty(&dd->list))
+ list_del_init(&dd->list);
+ ib_dealloc_device(&dd->verbs_dev.ibdev);
+ return ERR_PTR(ret);;
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index ccb119143d20..edad991d60ed 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -1634,6 +1634,23 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
return reply((struct ib_smp *)pmp);
}
+static void qib_snapshot_pmacounters(
+ struct qib_ibport *ibp,
+ struct qib_pma_counters *pmacounters)
+{
+ struct qib_pma_counters *p;
+ int cpu;
+
+ memset(pmacounters, 0, sizeof(*pmacounters));
+ for_each_possible_cpu(cpu) {
+ p = per_cpu_ptr(ibp->pmastats, cpu);
+ pmacounters->n_unicast_xmit += p->n_unicast_xmit;
+ pmacounters->n_unicast_rcv += p->n_unicast_rcv;
+ pmacounters->n_multicast_xmit += p->n_multicast_xmit;
+ pmacounters->n_multicast_rcv += p->n_multicast_rcv;
+ }
+}
+
static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
@@ -1642,6 +1659,7 @@ static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
u64 swords, rwords, spkts, rpkts, xwait;
+ struct qib_pma_counters pma;
u8 port_select = p->port_select;
memset(pmp->data, 0, sizeof(pmp->data));
@@ -1664,10 +1682,17 @@ static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
p->port_rcv_data = cpu_to_be64(rwords);
p->port_xmit_packets = cpu_to_be64(spkts);
p->port_rcv_packets = cpu_to_be64(rpkts);
- p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit);
- p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv);
- p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit);
- p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv);
+
+ qib_snapshot_pmacounters(ibp, &pma);
+
+ p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit
+ - ibp->z_unicast_xmit);
+ p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv
+ - ibp->z_unicast_rcv);
+ p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit
+ - ibp->z_multicast_xmit);
+ p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv
+ - ibp->z_multicast_rcv);
bail:
return reply((struct ib_smp *) pmp);
@@ -1795,6 +1820,7 @@ static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
u64 swords, rwords, spkts, rpkts, xwait;
+ struct qib_pma_counters pma;
qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
@@ -1810,17 +1836,19 @@ static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
ibp->z_port_rcv_packets = rpkts;
+ qib_snapshot_pmacounters(ibp, &pma);
+
if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
- ibp->n_unicast_xmit = 0;
+ ibp->z_unicast_xmit = pma.n_unicast_xmit;
if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
- ibp->n_unicast_rcv = 0;
+ ibp->z_unicast_rcv = pma.n_unicast_rcv;
if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
- ibp->n_multicast_xmit = 0;
+ ibp->z_multicast_xmit = pma.n_multicast_xmit;
if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
- ibp->n_multicast_rcv = 0;
+ ibp->z_multicast_rcv = pma.n_multicast_rcv;
return pma_get_portcounters_ext(pmp, ibdev, port);
}
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index e6687ded8210..9bbb55347cc1 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -232,8 +232,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
struct qib_mr *mr;
struct ib_umem *umem;
- struct ib_umem_chunk *chunk;
- int n, m, i;
+ struct scatterlist *sg;
+ int n, m, entry;
struct ib_mr *ret;
if (length == 0) {
@@ -246,9 +246,7 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ERR(umem))
return (void *) umem;
- n = 0;
- list_for_each_entry(chunk, &umem->chunk_list, list)
- n += chunk->nents;
+ n = umem->nmap;
mr = alloc_mr(n, pd);
if (IS_ERR(mr)) {
@@ -268,11 +266,10 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.page_shift = ilog2(umem->page_size);
m = 0;
n = 0;
- list_for_each_entry(chunk, &umem->chunk_list, list) {
- for (i = 0; i < chunk->nents; i++) {
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
void *vaddr;
- vaddr = page_address(sg_page(&chunk->page_list[i]));
+ vaddr = page_address(sg_page(sg));
if (!vaddr) {
ret = ERR_PTR(-EINVAL);
goto bail;
@@ -284,7 +281,6 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
m++;
n = 0;
}
- }
}
ret = &mr->ibmr;
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 3ab341320ead..2f2501890c4e 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -752,7 +752,7 @@ void qib_send_rc_ack(struct qib_qp *qp)
qib_flush_wc();
qib_sendbuf_done(dd, pbufn);
- ibp->n_unicast_xmit++;
+ this_cpu_inc(ibp->pmastats->n_unicast_xmit);
goto done;
queue_ack:
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 357b6cfcd46c..4c07a8b34ffe 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -703,6 +703,7 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
ohdr->bth[2] = cpu_to_be32(bth2);
+ this_cpu_inc(ibp->pmastats->n_unicast_xmit);
}
/**
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 3ad651c3356c..aaf7039f8ed2 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -280,11 +280,11 @@ int qib_make_ud_req(struct qib_qp *qp)
ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
if (ah_attr->dlid != QIB_PERMISSIVE_LID)
- ibp->n_multicast_xmit++;
+ this_cpu_inc(ibp->pmastats->n_multicast_xmit);
else
- ibp->n_unicast_xmit++;
+ this_cpu_inc(ibp->pmastats->n_unicast_xmit);
} else {
- ibp->n_unicast_xmit++;
+ this_cpu_inc(ibp->pmastats->n_unicast_xmit);
lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
if (unlikely(lid == ppd->lid)) {
/*
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 165aee2ca8a0..d2806cae234c 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -52,6 +52,17 @@
/* attempt to drain the queue for 5secs */
#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
+/*
+ * track how many times a process open this driver.
+ */
+static struct rb_root qib_user_sdma_rb_root = RB_ROOT;
+
+struct qib_user_sdma_rb_node {
+ struct rb_node node;
+ int refcount;
+ pid_t pid;
+};
+
struct qib_user_sdma_pkt {
struct list_head list; /* list element */
@@ -120,15 +131,60 @@ struct qib_user_sdma_queue {
/* dma page table */
struct rb_root dma_pages_root;
+ struct qib_user_sdma_rb_node *sdma_rb_node;
+
/* protect everything above... */
struct mutex lock;
};
+static struct qib_user_sdma_rb_node *
+qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
+{
+ struct qib_user_sdma_rb_node *sdma_rb_node;
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ sdma_rb_node = container_of(node,
+ struct qib_user_sdma_rb_node, node);
+ if (pid < sdma_rb_node->pid)
+ node = node->rb_left;
+ else if (pid > sdma_rb_node->pid)
+ node = node->rb_right;
+ else
+ return sdma_rb_node;
+ }
+ return NULL;
+}
+
+static int
+qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
+{
+ struct rb_node **node = &(root->rb_node);
+ struct rb_node *parent = NULL;
+ struct qib_user_sdma_rb_node *got;
+
+ while (*node) {
+ got = container_of(*node, struct qib_user_sdma_rb_node, node);
+ parent = *node;
+ if (new->pid < got->pid)
+ node = &((*node)->rb_left);
+ else if (new->pid > got->pid)
+ node = &((*node)->rb_right);
+ else
+ return 0;
+ }
+
+ rb_link_node(&new->node, parent, node);
+ rb_insert_color(&new->node, root);
+ return 1;
+}
+
struct qib_user_sdma_queue *
qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
{
struct qib_user_sdma_queue *pq =
kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
+ struct qib_user_sdma_rb_node *sdma_rb_node;
if (!pq)
goto done;
@@ -138,6 +194,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
pq->num_pending = 0;
pq->num_sending = 0;
pq->added = 0;
+ pq->sdma_rb_node = NULL;
INIT_LIST_HEAD(&pq->sent);
spin_lock_init(&pq->sent_lock);
@@ -163,8 +220,30 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
pq->dma_pages_root = RB_ROOT;
+ sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root,
+ current->pid);
+ if (sdma_rb_node) {
+ sdma_rb_node->refcount++;
+ } else {
+ int ret;
+ sdma_rb_node = kmalloc(sizeof(
+ struct qib_user_sdma_rb_node), GFP_KERNEL);
+ if (!sdma_rb_node)
+ goto err_rb;
+
+ sdma_rb_node->refcount = 1;
+ sdma_rb_node->pid = current->pid;
+
+ ret = qib_user_sdma_rb_insert(&qib_user_sdma_rb_root,
+ sdma_rb_node);
+ BUG_ON(ret == 0);
+ }
+ pq->sdma_rb_node = sdma_rb_node;
+
goto done;
+err_rb:
+ dma_pool_destroy(pq->header_cache);
err_slab:
kmem_cache_destroy(pq->pkt_slab);
err_kfree:
@@ -1020,8 +1099,13 @@ void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
if (!pq)
return;
- kmem_cache_destroy(pq->pkt_slab);
+ pq->sdma_rb_node->refcount--;
+ if (pq->sdma_rb_node->refcount == 0) {
+ rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
+ kfree(pq->sdma_rb_node);
+ }
dma_pool_destroy(pq->header_cache);
+ kmem_cache_destroy(pq->pkt_slab);
kfree(pq);
}
@@ -1241,26 +1325,52 @@ static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq,
struct list_head *pktlist, int count)
{
- int ret = 0;
unsigned long flags;
if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
return -ECOMM;
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- if (unlikely(!__qib_sdma_running(ppd))) {
- ret = -ECOMM;
- goto unlock;
+ /* non-blocking mode */
+ if (pq->sdma_rb_node->refcount > 1) {
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ if (unlikely(!__qib_sdma_running(ppd))) {
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ return -ECOMM;
+ }
+ pq->num_pending += count;
+ list_splice_tail_init(pktlist, &ppd->sdma_userpending);
+ qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ return 0;
}
+ /* In this case, descriptors from this process are not
+ * linked to ppd pending queue, interrupt handler
+ * won't update this process, it is OK to directly
+ * modify without sdma lock.
+ */
+
+
pq->num_pending += count;
- list_splice_tail_init(pktlist, &ppd->sdma_userpending);
- qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
+ /*
+ * Blocking mode for single rail process, we must
+ * release/regain sdma_lock to give other process
+ * chance to make progress. This is important for
+ * performance.
+ */
+ do {
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ if (unlikely(!__qib_sdma_running(ppd))) {
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ return -ECOMM;
+ }
+ qib_user_sdma_send_desc(ppd, pktlist);
+ if (!list_empty(pktlist))
+ qib_sdma_make_progress(ppd);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ } while (!list_empty(pktlist));
-unlock:
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return ret;
+ return 0;
}
int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
@@ -1290,7 +1400,7 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
qib_user_sdma_queue_clean(ppd, pq);
while (dim) {
- int mxp = 8;
+ int mxp = 1;
int ndesc = 0;
ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 092b0bb1bb78..9bcfbd842980 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -662,7 +662,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
if (mcast == NULL)
goto drop;
- ibp->n_multicast_rcv++;
+ this_cpu_inc(ibp->pmastats->n_multicast_rcv);
list_for_each_entry_rcu(p, &mcast->qp_list, list)
qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
/*
@@ -678,8 +678,8 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
&rcd->lookaside_qp->refcount))
wake_up(
&rcd->lookaside_qp->wait);
- rcd->lookaside_qp = NULL;
- }
+ rcd->lookaside_qp = NULL;
+ }
}
if (!rcd->lookaside_qp) {
qp = qib_lookup_qpn(ibp, qp_num);
@@ -689,7 +689,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
rcd->lookaside_qpn = qp_num;
} else
qp = rcd->lookaside_qp;
- ibp->n_unicast_rcv++;
+ this_cpu_inc(ibp->pmastats->n_unicast_rcv);
qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
}
return;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index a01c7d2cf541..bfc8948fdd35 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -664,6 +664,13 @@ struct qib_opcode_stats_perctx {
struct qib_opcode_stats stats[128];
};
+struct qib_pma_counters {
+ u64 n_unicast_xmit; /* total unicast packets sent */
+ u64 n_unicast_rcv; /* total unicast packets received */
+ u64 n_multicast_xmit; /* total multicast packets sent */
+ u64 n_multicast_rcv; /* total multicast packets received */
+};
+
struct qib_ibport {
struct qib_qp __rcu *qp0;
struct qib_qp __rcu *qp1;
@@ -680,10 +687,11 @@ struct qib_ibport {
__be64 mkey;
__be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
u64 tid; /* TID for traps */
- u64 n_unicast_xmit; /* total unicast packets sent */
- u64 n_unicast_rcv; /* total unicast packets received */
- u64 n_multicast_xmit; /* total multicast packets sent */
- u64 n_multicast_rcv; /* total multicast packets received */
+ struct qib_pma_counters __percpu *pmastats;
+ u64 z_unicast_xmit; /* starting count for PMA */
+ u64 z_unicast_rcv; /* starting count for PMA */
+ u64 z_multicast_xmit; /* starting count for PMA */
+ u64 z_multicast_rcv; /* starting count for PMA */
u64 z_symbol_error_counter; /* starting count for PMA */
u64 z_link_error_recovery_counter; /* starting count for PMA */
u64 z_link_downed_counter; /* starting count for PMA */
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 16755cdab2c0..801a1d6937e4 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -286,7 +286,7 @@ iter_chunk:
err = iommu_map(pd->domain, va_start, pa_start,
size, flags);
if (err) {
- usnic_err("Failed to map va 0x%lx pa 0x%pa size 0x%zx with err %d\n",
+ usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
goto err_out;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index dd03cfe596d6..25f195ef44b0 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -5,7 +5,7 @@
* Copyright (C) 2004 Alex Aizman
* Copyright (C) 2005 Mike Christie
* Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
* maintained by openib-general@openib.org
*
* This software is available to you under a choice of one of two
@@ -82,6 +82,8 @@ static unsigned int iscsi_max_lun = 512;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
int iser_debug_level = 0;
+bool iser_pi_enable = false;
+int iser_pi_guard = 0;
MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
MODULE_LICENSE("Dual BSD/GPL");
@@ -91,6 +93,12 @@ MODULE_VERSION(DRV_VER);
module_param_named(debug_level, iser_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
+module_param_named(pi_enable, iser_pi_enable, bool, 0644);
+MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
+
+module_param_named(pi_guard, iser_pi_guard, int, 0644);
+MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:CRC)");
+
struct iser_global ig;
void
@@ -138,8 +146,8 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc)
{
- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
- struct iser_device *device = iser_conn->ib_conn->device;
+ struct iser_conn *ib_conn = task->conn->dd_data;
+ struct iser_device *device = ib_conn->device;
struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr;
@@ -153,7 +161,7 @@ int iser_initialize_task_headers(struct iscsi_task *task,
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = device->mr->lkey;
- iser_task->iser_conn = iser_conn;
+ iser_task->ib_conn = ib_conn;
return 0;
}
/**
@@ -176,6 +184,8 @@ iscsi_iser_task_init(struct iscsi_task *task)
iser_task->command_sent = 0;
iser_task_rdma_init(iser_task);
+ iser_task->sc = task->sc;
+
return 0;
}
@@ -278,10 +288,9 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_tx_desc *tx_desc = &iser_task->desc;
-
- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
- struct iser_device *device = iser_conn->ib_conn->device;
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
+ struct iser_conn *ib_conn = task->conn->dd_data;
+ struct iser_device *device = ib_conn->device;
ib_dma_unmap_single(device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
@@ -296,14 +305,25 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
}
}
+static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
+{
+ struct iscsi_iser_task *iser_task = task->dd_data;
+
+ if (iser_task->dir[ISER_DIR_IN])
+ return iser_check_task_pi_status(iser_task, ISER_DIR_IN,
+ sector);
+ else
+ return iser_check_task_pi_status(iser_task, ISER_DIR_OUT,
+ sector);
+}
+
static struct iscsi_cls_conn *
iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
{
struct iscsi_conn *conn;
struct iscsi_cls_conn *cls_conn;
- struct iscsi_iser_conn *iser_conn;
- cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
+ cls_conn = iscsi_conn_setup(cls_session, 0, conn_idx);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
@@ -314,10 +334,6 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
*/
conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN;
- iser_conn = conn->dd_data;
- conn->dd_data = iser_conn;
- iser_conn->iscsi_conn = conn;
-
return cls_conn;
}
@@ -325,8 +341,7 @@ static void
iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
- struct iser_conn *ib_conn = iser_conn->ib_conn;
+ struct iser_conn *ib_conn = conn->dd_data;
iscsi_conn_teardown(cls_conn);
/*
@@ -335,7 +350,7 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
* we free it here.
*/
if (ib_conn) {
- ib_conn->iser_conn = NULL;
+ ib_conn->iscsi_conn = NULL;
iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
}
}
@@ -346,7 +361,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
int is_leading)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_iser_conn *iser_conn;
struct iscsi_session *session;
struct iser_conn *ib_conn;
struct iscsi_endpoint *ep;
@@ -373,11 +387,11 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
/* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges
* connection pointers */
- iser_info("binding iscsi/iser conn %p %p to ib_conn %p\n",
- conn, conn->dd_data, ib_conn);
- iser_conn = conn->dd_data;
- ib_conn->iser_conn = iser_conn;
- iser_conn->ib_conn = ib_conn;
+ iser_info("binding iscsi conn %p to ib_conn %p\n", conn, ib_conn);
+
+ conn->dd_data = ib_conn;
+ ib_conn->iscsi_conn = conn;
+
iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */
return 0;
}
@@ -386,8 +400,7 @@ static void
iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
- struct iser_conn *ib_conn = iser_conn->ib_conn;
+ struct iser_conn *ib_conn = conn->dd_data;
/*
* Userspace may have goofed up and not bound the connection or
@@ -401,7 +414,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
*/
iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
}
- iser_conn->ib_conn = NULL;
+ conn->dd_data = NULL;
}
static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
@@ -413,6 +426,17 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_host_free(shost);
}
+static inline unsigned int
+iser_dif_prot_caps(int prot_caps)
+{
+ return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ? SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION : 0) |
+ ((prot_caps & IB_PROT_T10DIF_TYPE_2) ? SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION : 0) |
+ ((prot_caps & IB_PROT_T10DIF_TYPE_3) ? SHOST_DIF_TYPE3_PROTECTION |
+ SHOST_DIX_TYPE3_PROTECTION : 0);
+}
+
static struct iscsi_cls_session *
iscsi_iser_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max, uint16_t qdepth,
@@ -437,8 +461,18 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
* older userspace tools (before 2.0-870) did not pass us
* the leading conn's ep so this will be NULL;
*/
- if (ep)
+ if (ep) {
ib_conn = ep->dd_data;
+ if (ib_conn->pi_support) {
+ u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap;
+
+ scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
+ if (iser_pi_guard)
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
+ else
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+ }
+ }
if (iscsi_host_add(shost,
ep ? ib_conn->device->ib_device->dma_device : NULL))
@@ -618,7 +652,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
struct iser_conn *ib_conn;
ib_conn = ep->dd_data;
- if (ib_conn->iser_conn)
+ if (ib_conn->iscsi_conn)
/*
* Must suspend xmit path if the ep is bound to the
* iscsi_conn, so we know we are not accessing the ib_conn
@@ -626,7 +660,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
*
* This may not be bound if the ep poll failed.
*/
- iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+ iscsi_suspend_tx(ib_conn->iscsi_conn);
iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state);
@@ -732,6 +766,7 @@ static struct iscsi_transport iscsi_iser_transport = {
.xmit_task = iscsi_iser_task_xmit,
.cleanup_task = iscsi_iser_cleanup_task,
.alloc_pdu = iscsi_iser_pdu_alloc,
+ .check_protection = iscsi_iser_check_protection,
/* recovery */
.session_recovery_timedout = iscsi_session_recovery_timedout,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 67914027c614..324129f80d40 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -8,7 +8,7 @@
*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
- * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -46,6 +46,8 @@
#include <linux/printk.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
@@ -67,7 +69,7 @@
#define DRV_NAME "iser"
#define PFX DRV_NAME ": "
-#define DRV_VER "1.1"
+#define DRV_VER "1.3"
#define iser_dbg(fmt, arg...) \
do { \
@@ -134,10 +136,21 @@
ISER_MAX_TX_MISC_PDUS + \
ISER_MAX_RX_MISC_PDUS)
+/* Max registration work requests per command */
+#define ISER_MAX_REG_WR_PER_CMD 5
+
+/* For Signature we don't support DATAOUTs so no need to make room for them */
+#define ISER_QP_SIG_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
+ (1 + ISER_MAX_REG_WR_PER_CMD) + \
+ ISER_MAX_TX_MISC_PDUS + \
+ ISER_MAX_RX_MISC_PDUS)
+
#define ISER_VER 0x10
#define ISER_WSV 0x08
#define ISER_RSV 0x04
+#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
+
struct iser_hdr {
u8 flags;
u8 rsvd[3];
@@ -201,7 +214,6 @@ struct iser_data_buf {
/* fwd declarations */
struct iser_device;
struct iser_cq_desc;
-struct iscsi_iser_conn;
struct iscsi_iser_task;
struct iscsi_endpoint;
@@ -258,6 +270,7 @@ struct iscsi_iser_task;
struct iser_device {
struct ib_device *ib_device;
struct ib_pd *pd;
+ struct ib_device_attr dev_attr;
struct ib_cq *rx_cq[ISER_MAX_CQ];
struct ib_cq *tx_cq[ISER_MAX_CQ];
struct ib_mr *mr;
@@ -277,17 +290,35 @@ struct iser_device {
enum iser_data_dir cmd_dir);
};
+#define ISER_CHECK_GUARD 0xc0
+#define ISER_CHECK_REFTAG 0x0f
+#define ISER_CHECK_APPTAG 0x30
+
+enum iser_reg_indicator {
+ ISER_DATA_KEY_VALID = 1 << 0,
+ ISER_PROT_KEY_VALID = 1 << 1,
+ ISER_SIG_KEY_VALID = 1 << 2,
+ ISER_FASTREG_PROTECTED = 1 << 3,
+};
+
+struct iser_pi_context {
+ struct ib_mr *prot_mr;
+ struct ib_fast_reg_page_list *prot_frpl;
+ struct ib_mr *sig_mr;
+};
+
struct fast_reg_descriptor {
struct list_head list;
/* For fast registration - FRWR */
struct ib_mr *data_mr;
struct ib_fast_reg_page_list *data_frpl;
- /* Valid for fast registration flag */
- bool valid;
+ struct iser_pi_context *pi_ctx;
+ /* registration indicators container */
+ u8 reg_indicators;
};
struct iser_conn {
- struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+ struct iscsi_conn *iscsi_conn;
struct iscsi_endpoint *ep;
enum iser_ib_conn_state state; /* rdma connection state */
atomic_t refcount;
@@ -310,6 +341,9 @@ struct iser_conn {
unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
+ bool pi_support;
+
+ /* Connection memory registration pool */
union {
struct {
struct ib_fmr_pool *pool; /* pool of IB FMRs */
@@ -319,24 +353,22 @@ struct iser_conn {
struct {
struct list_head pool;
int pool_size;
- } frwr;
- } fastreg;
-};
-
-struct iscsi_iser_conn {
- struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
- struct iser_conn *ib_conn; /* iSER IB conn */
+ } fastreg;
+ };
};
struct iscsi_iser_task {
struct iser_tx_desc desc;
- struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
enum iser_task_status status;
+ struct scsi_cmnd *sc;
int command_sent; /* set if command sent */
int dir[ISER_DIRS_NUM]; /* set if dir use*/
struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */
struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/
struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */
+ struct iser_data_buf prot[ISER_DIRS_NUM]; /* prot desc */
+ struct iser_data_buf prot_copy[ISER_DIRS_NUM];/* prot copy */
};
struct iser_page_vec {
@@ -362,6 +394,8 @@ struct iser_global {
extern struct iser_global ig;
extern int iser_debug_level;
+extern bool iser_pi_enable;
+extern int iser_pi_guard;
/* allocate connection resources needed for rdma functionality */
int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
@@ -401,13 +435,15 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_free_rx_descriptors(struct iser_conn *ib_conn);
-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
- enum iser_data_dir cmd_dir);
+void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_data_buf *mem_copy,
+ enum iser_data_dir cmd_dir);
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
-int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *task,
- enum iser_data_dir cmd_dir);
+int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *ib_conn,
struct sockaddr_in *src_addr,
@@ -420,8 +456,8 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
-void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir);
int iser_post_recvl(struct iser_conn *ib_conn);
int iser_post_recvm(struct iser_conn *ib_conn, int count);
@@ -432,12 +468,15 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir);
-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
+void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *data);
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session);
int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max);
void iser_free_fmr_pool(struct iser_conn *ib_conn);
-int iser_create_frwr_pool(struct iser_conn *ib_conn, unsigned cmds_max);
-void iser_free_frwr_pool(struct iser_conn *ib_conn);
+int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max);
+void iser_free_fastreg_pool(struct iser_conn *ib_conn);
+u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir, sector_t *sector);
#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 334f34b1cd46..2e2d903db838 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -49,7 +49,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
{
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_device *device = iser_task->iser_conn->ib_conn->device;
+ struct iser_device *device = iser_task->ib_conn->device;
struct iser_regd_buf *regd_buf;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
@@ -62,11 +62,22 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
if (err)
return err;
+ if (scsi_prot_sg_count(iser_task->sc)) {
+ struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];
+
+ err = iser_dma_map_task_data(iser_task,
+ pbuf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+ }
+
if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
iser_err("Total data length: %ld, less than EDTL: "
"%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
iser_task->data[ISER_DIR_IN].data_len, edtl,
- task->itt, iser_task->iser_conn);
+ task->itt, iser_task->ib_conn);
return -EINVAL;
}
@@ -99,7 +110,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
unsigned int edtl)
{
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_device *device = iser_task->iser_conn->ib_conn->device;
+ struct iser_device *device = iser_task->ib_conn->device;
struct iser_regd_buf *regd_buf;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
@@ -113,6 +124,17 @@ iser_prepare_write_cmd(struct iscsi_task *task,
if (err)
return err;
+ if (scsi_prot_sg_count(iser_task->sc)) {
+ struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];
+
+ err = iser_dma_map_task_data(iser_task,
+ pbuf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+ }
+
if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Total data length: %ld, less than EDTL: %d, "
"in WRITE cmd BHS itt: %d, conn: 0x%p\n",
@@ -327,7 +349,7 @@ free_login_buf:
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_conn *ib_conn = conn->dd_data;
struct iscsi_session *session = conn->session;
iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
@@ -340,19 +362,18 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
* response) and no posted send buffers left - they must have been
* consumed during previous login phases.
*/
- WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1);
- WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
+ WARN_ON(ib_conn->post_recv_buf_count != 1);
+ WARN_ON(atomic_read(&ib_conn->post_send_buf_count) != 0);
if (session->discovery_sess) {
iser_info("Discovery session, re-using login RX buffer\n");
return 0;
} else
iser_info("Normal session, posting batch of RX %d buffers\n",
- iser_conn->ib_conn->min_posted_rx);
+ ib_conn->min_posted_rx);
/* Initial post receive buffers */
- if (iser_post_recvm(iser_conn->ib_conn,
- iser_conn->ib_conn->min_posted_rx))
+ if (iser_post_recvm(ib_conn, ib_conn->min_posted_rx))
return -ENOMEM;
return 0;
@@ -364,11 +385,11 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
int iser_send_command(struct iscsi_conn *conn,
struct iscsi_task *task)
{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_conn *ib_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
unsigned long edtl;
int err;
- struct iser_data_buf *data_buf;
+ struct iser_data_buf *data_buf, *prot_buf;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
@@ -377,20 +398,28 @@ int iser_send_command(struct iscsi_conn *conn,
/* build the tx desc regd header and add it to the tx desc dto */
tx_desc->type = ISCSI_TX_SCSI_COMMAND;
- iser_create_send_desc(iser_conn->ib_conn, tx_desc);
+ iser_create_send_desc(ib_conn, tx_desc);
- if (hdr->flags & ISCSI_FLAG_CMD_READ)
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
data_buf = &iser_task->data[ISER_DIR_IN];
- else
+ prot_buf = &iser_task->prot[ISER_DIR_IN];
+ } else {
data_buf = &iser_task->data[ISER_DIR_OUT];
+ prot_buf = &iser_task->prot[ISER_DIR_OUT];
+ }
if (scsi_sg_count(sc)) { /* using a scatter list */
data_buf->buf = scsi_sglist(sc);
data_buf->size = scsi_sg_count(sc);
}
-
data_buf->data_len = scsi_bufflen(sc);
+ if (scsi_prot_sg_count(sc)) {
+ prot_buf->buf = scsi_prot_sglist(sc);
+ prot_buf->size = scsi_prot_sg_count(sc);
+ prot_buf->data_len = sc->prot_sdb->length;
+ }
+
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
err = iser_prepare_read_cmd(task, edtl);
if (err)
@@ -408,7 +437,7 @@ int iser_send_command(struct iscsi_conn *conn,
iser_task->status = ISER_TASK_STATUS_STARTED;
- err = iser_post_send(iser_conn->ib_conn, tx_desc);
+ err = iser_post_send(ib_conn, tx_desc);
if (!err)
return 0;
@@ -424,7 +453,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_task *task,
struct iscsi_data *hdr)
{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_conn *ib_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc = NULL;
struct iser_regd_buf *regd_buf;
@@ -473,7 +502,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
itt, buf_offset, data_seg_len);
- err = iser_post_send(iser_conn->ib_conn, tx_desc);
+ err = iser_post_send(ib_conn, tx_desc);
if (!err)
return 0;
@@ -486,19 +515,18 @@ send_data_out_error:
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task)
{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_conn *ib_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *mdesc = &iser_task->desc;
unsigned long data_seg_len;
int err = 0;
struct iser_device *device;
- struct iser_conn *ib_conn = iser_conn->ib_conn;
/* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL;
- iser_create_send_desc(iser_conn->ib_conn, mdesc);
+ iser_create_send_desc(ib_conn, mdesc);
- device = iser_conn->ib_conn->device;
+ device = ib_conn->device;
data_seg_len = ntoh24(task->hdr->dlength);
@@ -513,14 +541,13 @@ int iser_send_control(struct iscsi_conn *conn,
ib_conn->login_req_dma, task->data_count,
DMA_TO_DEVICE);
- memcpy(iser_conn->ib_conn->login_req_buf, task->data,
- task->data_count);
+ memcpy(ib_conn->login_req_buf, task->data, task->data_count);
ib_dma_sync_single_for_device(device->ib_device,
ib_conn->login_req_dma, task->data_count,
DMA_TO_DEVICE);
- tx_dsg->addr = iser_conn->ib_conn->login_req_dma;
+ tx_dsg->addr = ib_conn->login_req_dma;
tx_dsg->length = task->data_count;
tx_dsg->lkey = device->mr->lkey;
mdesc->num_sge = 2;
@@ -529,7 +556,7 @@ int iser_send_control(struct iscsi_conn *conn,
if (task == conn->login_task) {
iser_dbg("op %x dsl %lx, posting login rx buffer\n",
task->hdr->opcode, data_seg_len);
- err = iser_post_recvl(iser_conn->ib_conn);
+ err = iser_post_recvl(ib_conn);
if (err)
goto send_control_error;
err = iser_post_rx_bufs(conn, task->hdr);
@@ -537,7 +564,7 @@ int iser_send_control(struct iscsi_conn *conn,
goto send_control_error;
}
- err = iser_post_send(iser_conn->ib_conn, mdesc);
+ err = iser_post_send(ib_conn, mdesc);
if (!err)
return 0;
@@ -553,7 +580,6 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
unsigned long rx_xfer_len,
struct iser_conn *ib_conn)
{
- struct iscsi_iser_conn *conn = ib_conn->iser_conn;
struct iscsi_hdr *hdr;
u64 rx_dma;
int rx_buflen, outstanding, count, err;
@@ -575,17 +601,17 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
- iscsi_iser_recv(conn->iscsi_conn, hdr,
- rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN);
+ iscsi_iser_recv(ib_conn->iscsi_conn, hdr, rx_desc->data,
+ rx_xfer_len - ISER_HEADERS_LEN);
ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
- rx_buflen, DMA_FROM_DEVICE);
+ rx_buflen, DMA_FROM_DEVICE);
/* decrementing conn->post_recv_buf_count only --after-- freeing the *
* task eliminates the need to worry on tasks which are completed in *
* parallel to the execution of iser_conn_term. So the code that waits *
* for the posted rx bufs refcount to become zero handles everything */
- conn->ib_conn->post_recv_buf_count--;
+ ib_conn->post_recv_buf_count--;
if (rx_dma == ib_conn->login_resp_dma)
return;
@@ -635,6 +661,9 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
iser_task->data[ISER_DIR_IN].data_len = 0;
iser_task->data[ISER_DIR_OUT].data_len = 0;
+ iser_task->prot[ISER_DIR_IN].data_len = 0;
+ iser_task->prot[ISER_DIR_OUT].data_len = 0;
+
memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
sizeof(struct iser_regd_buf));
memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
@@ -643,28 +672,63 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
- struct iser_device *device = iser_task->iser_conn->ib_conn->device;
- int is_rdma_aligned = 1;
+ struct iser_device *device = iser_task->ib_conn->device;
+ int is_rdma_data_aligned = 1;
+ int is_rdma_prot_aligned = 1;
+ int prot_count = scsi_prot_sg_count(iser_task->sc);
/* if we were reading, copy back to unaligned sglist,
* anyway dma_unmap and free the copy
*/
if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
- is_rdma_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
+ is_rdma_data_aligned = 0;
+ iser_finalize_rdma_unaligned_sg(iser_task,
+ &iser_task->data[ISER_DIR_IN],
+ &iser_task->data_copy[ISER_DIR_IN],
+ ISER_DIR_IN);
}
+
if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
- is_rdma_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
+ is_rdma_data_aligned = 0;
+ iser_finalize_rdma_unaligned_sg(iser_task,
+ &iser_task->data[ISER_DIR_OUT],
+ &iser_task->data_copy[ISER_DIR_OUT],
+ ISER_DIR_OUT);
+ }
+
+ if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) {
+ is_rdma_prot_aligned = 0;
+ iser_finalize_rdma_unaligned_sg(iser_task,
+ &iser_task->prot[ISER_DIR_IN],
+ &iser_task->prot_copy[ISER_DIR_IN],
+ ISER_DIR_IN);
+ }
+
+ if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ is_rdma_prot_aligned = 0;
+ iser_finalize_rdma_unaligned_sg(iser_task,
+ &iser_task->prot[ISER_DIR_OUT],
+ &iser_task->prot_copy[ISER_DIR_OUT],
+ ISER_DIR_OUT);
}
- if (iser_task->dir[ISER_DIR_IN])
+ if (iser_task->dir[ISER_DIR_IN]) {
device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
+ if (is_rdma_data_aligned)
+ iser_dma_unmap_task_data(iser_task,
+ &iser_task->data[ISER_DIR_IN]);
+ if (prot_count && is_rdma_prot_aligned)
+ iser_dma_unmap_task_data(iser_task,
+ &iser_task->prot[ISER_DIR_IN]);
+ }
- if (iser_task->dir[ISER_DIR_OUT])
+ if (iser_task->dir[ISER_DIR_OUT]) {
device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
-
- /* if the data was unaligned, it was already unmapped and then copied */
- if (is_rdma_aligned)
- iser_dma_unmap_task_data(iser_task);
+ if (is_rdma_data_aligned)
+ iser_dma_unmap_task_data(iser_task,
+ &iser_task->data[ISER_DIR_OUT]);
+ if (prot_count && is_rdma_prot_aligned)
+ iser_dma_unmap_task_data(iser_task,
+ &iser_task->prot[ISER_DIR_OUT]);
+ }
}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 1ce0c97d2ccb..47acd3ad3a17 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -45,13 +45,19 @@
* iser_start_rdma_unaligned_sg
*/
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *data,
+ struct iser_data_buf *data_copy,
enum iser_data_dir cmd_dir)
{
- int dma_nents;
- struct ib_device *dev;
+ struct ib_device *dev = iser_task->ib_conn->device->ib_device;
+ struct scatterlist *sgl = (struct scatterlist *)data->buf;
+ struct scatterlist *sg;
char *mem = NULL;
- struct iser_data_buf *data = &iser_task->data[cmd_dir];
- unsigned long cmd_data_len = data->data_len;
+ unsigned long cmd_data_len = 0;
+ int dma_nents, i;
+
+ for_each_sg(sgl, sg, data->size, i)
+ cmd_data_len += ib_sg_dma_len(dev, sg);
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
mem = (void *)__get_free_pages(GFP_ATOMIC,
@@ -61,17 +67,16 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
if (mem == NULL) {
iser_err("Failed to allocate mem size %d %d for copying sglist\n",
- data->size,(int)cmd_data_len);
+ data->size, (int)cmd_data_len);
return -ENOMEM;
}
if (cmd_dir == ISER_DIR_OUT) {
/* copy the unaligned sg the buffer which is used for RDMA */
- struct scatterlist *sgl = (struct scatterlist *)data->buf;
- struct scatterlist *sg;
int i;
char *p, *from;
+ sgl = (struct scatterlist *)data->buf;
p = mem;
for_each_sg(sgl, sg, data->size, i) {
from = kmap_atomic(sg_page(sg));
@@ -83,39 +88,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
}
}
- sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
- iser_task->data_copy[cmd_dir].buf =
- &iser_task->data_copy[cmd_dir].sg_single;
- iser_task->data_copy[cmd_dir].size = 1;
+ sg_init_one(&data_copy->sg_single, mem, cmd_data_len);
+ data_copy->buf = &data_copy->sg_single;
+ data_copy->size = 1;
+ data_copy->copy_buf = mem;
- iser_task->data_copy[cmd_dir].copy_buf = mem;
-
- dev = iser_task->iser_conn->ib_conn->device->ib_device;
- dma_nents = ib_dma_map_sg(dev,
- &iser_task->data_copy[cmd_dir].sg_single,
- 1,
+ dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
BUG_ON(dma_nents == 0);
- iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
+ data_copy->dma_nents = dma_nents;
+ data_copy->data_len = cmd_data_len;
+
return 0;
}
/**
* iser_finalize_rdma_unaligned_sg
*/
+
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
+ struct iser_data_buf *data,
+ struct iser_data_buf *data_copy,
+ enum iser_data_dir cmd_dir)
{
struct ib_device *dev;
- struct iser_data_buf *mem_copy;
unsigned long cmd_data_len;
- dev = iser_task->iser_conn->ib_conn->device->ib_device;
- mem_copy = &iser_task->data_copy[cmd_dir];
+ dev = iser_task->ib_conn->device->ib_device;
- ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -127,10 +130,10 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
int i;
/* copy back read RDMA to unaligned sg */
- mem = mem_copy->copy_buf;
+ mem = data_copy->copy_buf;
- sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
- sg_size = iser_task->data[ISER_DIR_IN].size;
+ sgl = (struct scatterlist *)data->buf;
+ sg_size = data->size;
p = mem;
for_each_sg(sgl, sg, sg_size, i) {
@@ -143,15 +146,15 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
}
}
- cmd_data_len = iser_task->data[cmd_dir].data_len;
+ cmd_data_len = data->data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
- free_pages((unsigned long)mem_copy->copy_buf,
+ free_pages((unsigned long)data_copy->copy_buf,
ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
else
- kfree(mem_copy->copy_buf);
+ kfree(data_copy->copy_buf);
- mem_copy->copy_buf = NULL;
+ data_copy->copy_buf = NULL;
}
#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
@@ -319,7 +322,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct ib_device *dev;
iser_task->dir[iser_dir] = 1;
- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+ dev = iser_task->ib_conn->device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) {
@@ -329,31 +332,23 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
return 0;
}
-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
+void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *data)
{
struct ib_device *dev;
- struct iser_data_buf *data;
- dev = iser_task->iser_conn->ib_conn->device->ib_device;
-
- if (iser_task->dir[ISER_DIR_IN]) {
- data = &iser_task->data[ISER_DIR_IN];
- ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
- }
-
- if (iser_task->dir[ISER_DIR_OUT]) {
- data = &iser_task->data[ISER_DIR_OUT];
- ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
- }
+ dev = iser_task->ib_conn->device->ib_device;
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
}
static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
struct ib_device *ibdev,
+ struct iser_data_buf *mem,
+ struct iser_data_buf *mem_copy,
enum iser_data_dir cmd_dir,
int aligned_len)
{
- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
+ struct iscsi_conn *iscsi_conn = iser_task->ib_conn->iscsi_conn;
iscsi_conn->fmr_unalign_cnt++;
iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
@@ -363,12 +358,12 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
iser_data_buf_dump(mem, ibdev);
/* unmap the command data before accessing it */
- iser_dma_unmap_task_data(iser_task);
+ iser_dma_unmap_task_data(iser_task, mem);
/* allocate copy buf, if we are writing, copy the */
/* unaligned scatterlist, dma map the copy */
- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
- return -ENOMEM;
+ if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0)
+ return -ENOMEM;
return 0;
}
@@ -382,7 +377,7 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
+ struct iser_conn *ib_conn = iser_task->ib_conn;
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
@@ -396,7 +391,8 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
- err = fall_to_bounce_buf(iser_task, ibdev,
+ err = fall_to_bounce_buf(iser_task, ibdev, mem,
+ &iser_task->data_copy[cmd_dir],
cmd_dir, aligned_len);
if (err) {
iser_err("failed to allocate bounce buffer\n");
@@ -422,8 +418,8 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
(unsigned long)regd_buf->reg.va,
(unsigned long)regd_buf->reg.len);
} else { /* use FMR for multiple dma entries */
- iser_page_vec_build(mem, ib_conn->fastreg.fmr.page_vec, ibdev);
- err = iser_reg_page_vec(ib_conn, ib_conn->fastreg.fmr.page_vec,
+ iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev);
+ err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec,
&regd_buf->reg);
if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
@@ -431,12 +427,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
- ib_conn->fastreg.fmr.page_vec->data_size,
- ib_conn->fastreg.fmr.page_vec->length,
- ib_conn->fastreg.fmr.page_vec->offset);
- for (i = 0; i < ib_conn->fastreg.fmr.page_vec->length; i++)
+ ib_conn->fmr.page_vec->data_size,
+ ib_conn->fmr.page_vec->length,
+ ib_conn->fmr.page_vec->offset);
+ for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
iser_err("page_vec[%d] = 0x%llx\n", i,
- (unsigned long long) ib_conn->fastreg.fmr.page_vec->pages[i]);
+ (unsigned long long) ib_conn->fmr.page_vec->pages[i]);
}
if (err)
return err;
@@ -444,94 +440,280 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
return 0;
}
-static int iser_fast_reg_mr(struct fast_reg_descriptor *desc,
- struct iser_conn *ib_conn,
+static inline enum ib_t10_dif_type
+scsi2ib_prot_type(unsigned char prot_type)
+{
+ switch (prot_type) {
+ case SCSI_PROT_DIF_TYPE0:
+ return IB_T10DIF_NONE;
+ case SCSI_PROT_DIF_TYPE1:
+ return IB_T10DIF_TYPE1;
+ case SCSI_PROT_DIF_TYPE2:
+ return IB_T10DIF_TYPE2;
+ case SCSI_PROT_DIF_TYPE3:
+ return IB_T10DIF_TYPE3;
+ default:
+ return IB_T10DIF_NONE;
+ }
+}
+
+
+static int
+iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
+{
+ unsigned char scsi_ptype = scsi_get_prot_type(sc);
+
+ sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
+ sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
+ sig_attrs->mem.sig.dif.pi_interval = sc->device->sector_size;
+ sig_attrs->wire.sig.dif.pi_interval = sc->device->sector_size;
+
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_STRIP:
+ sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE;
+ sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
+ sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+ sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) &
+ 0xffffffff;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
+ sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
+ sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) &
+ 0xffffffff;
+ sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
+ break;
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
+ sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
+ sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) &
+ 0xffffffff;
+ sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
+ sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+ sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) &
+ 0xffffffff;
+ break;
+ default:
+ iser_err("Unsupported PI operation %d\n",
+ scsi_get_prot_op(sc));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+static int
+iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
+{
+ switch (scsi_get_prot_type(sc)) {
+ case SCSI_PROT_DIF_TYPE0:
+ *mask = 0x0;
+ break;
+ case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
+ *mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG;
+ break;
+ case SCSI_PROT_DIF_TYPE3:
+ *mask = ISER_CHECK_GUARD;
+ break;
+ default:
+ iser_err("Unsupported protection type %d\n",
+ scsi_get_prot_type(sc));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+ struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
+ struct ib_sge *prot_sge, struct ib_sge *sig_sge)
+{
+ struct iser_conn *ib_conn = iser_task->ib_conn;
+ struct iser_pi_context *pi_ctx = desc->pi_ctx;
+ struct ib_send_wr sig_wr, inv_wr;
+ struct ib_send_wr *bad_wr, *wr = NULL;
+ struct ib_sig_attrs sig_attrs;
+ int ret;
+ u32 key;
+
+ memset(&sig_attrs, 0, sizeof(sig_attrs));
+ ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
+ if (ret)
+ goto err;
+
+ ret = iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
+ if (ret)
+ goto err;
+
+ if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) {
+ memset(&inv_wr, 0, sizeof(inv_wr));
+ inv_wr.opcode = IB_WR_LOCAL_INV;
+ inv_wr.wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+ wr = &inv_wr;
+ /* Bump the key */
+ key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
+ ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
+ }
+
+ memset(&sig_wr, 0, sizeof(sig_wr));
+ sig_wr.opcode = IB_WR_REG_SIG_MR;
+ sig_wr.wr_id = ISER_FASTREG_LI_WRID;
+ sig_wr.sg_list = data_sge;
+ sig_wr.num_sge = 1;
+ sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
+ sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
+ if (scsi_prot_sg_count(iser_task->sc))
+ sig_wr.wr.sig_handover.prot = prot_sge;
+ sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+
+ if (!wr)
+ wr = &sig_wr;
+ else
+ wr->next = &sig_wr;
+
+ ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
+ if (ret) {
+ iser_err("reg_sig_mr failed, ret:%d\n", ret);
+ goto err;
+ }
+ desc->reg_indicators &= ~ISER_SIG_KEY_VALID;
+
+ sig_sge->lkey = pi_ctx->sig_mr->lkey;
+ sig_sge->addr = 0;
+ sig_sge->length = data_sge->length + prot_sge->length;
+ if (scsi_get_prot_op(iser_task->sc) == SCSI_PROT_WRITE_INSERT ||
+ scsi_get_prot_op(iser_task->sc) == SCSI_PROT_READ_STRIP) {
+ sig_sge->length += (data_sge->length /
+ iser_task->sc->device->sector_size) * 8;
+ }
+
+ iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n",
+ sig_sge->addr, sig_sge->length,
+ sig_sge->lkey);
+err:
+ return ret;
+}
+
+static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct iser_regd_buf *regd_buf,
- u32 offset, unsigned int data_size,
- unsigned int page_list_len)
+ struct iser_data_buf *mem,
+ enum iser_reg_indicator ind,
+ struct ib_sge *sge)
{
+ struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
+ struct iser_conn *ib_conn = iser_task->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+ struct ib_mr *mr;
+ struct ib_fast_reg_page_list *frpl;
struct ib_send_wr fastreg_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
u8 key;
- int ret;
+ int ret, offset, size, plen;
+
+ /* if there a single dma entry, dma mr suffices */
+ if (mem->dma_nents == 1) {
+ struct scatterlist *sg = (struct scatterlist *)mem->buf;
- if (!desc->valid) {
+ sge->lkey = device->mr->lkey;
+ sge->addr = ib_sg_dma_address(ibdev, &sg[0]);
+ sge->length = ib_sg_dma_len(ibdev, &sg[0]);
+
+ iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n",
+ sge->lkey, sge->addr, sge->length);
+ return 0;
+ }
+
+ if (ind == ISER_DATA_KEY_VALID) {
+ mr = desc->data_mr;
+ frpl = desc->data_frpl;
+ } else {
+ mr = desc->pi_ctx->prot_mr;
+ frpl = desc->pi_ctx->prot_frpl;
+ }
+
+ plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
+ &offset, &size);
+ if (plen * SIZE_4K < size) {
+ iser_err("fast reg page_list too short to hold this SG\n");
+ return -EINVAL;
+ }
+
+ if (!(desc->reg_indicators & ind)) {
memset(&inv_wr, 0, sizeof(inv_wr));
+ inv_wr.wr_id = ISER_FASTREG_LI_WRID;
inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.send_flags = IB_SEND_SIGNALED;
- inv_wr.ex.invalidate_rkey = desc->data_mr->rkey;
+ inv_wr.ex.invalidate_rkey = mr->rkey;
wr = &inv_wr;
/* Bump the key */
- key = (u8)(desc->data_mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(desc->data_mr, ++key);
+ key = (u8)(mr->rkey & 0x000000FF);
+ ib_update_fast_reg_key(mr, ++key);
}
/* Prepare FASTREG WR */
memset(&fastreg_wr, 0, sizeof(fastreg_wr));
+ fastreg_wr.wr_id = ISER_FASTREG_LI_WRID;
fastreg_wr.opcode = IB_WR_FAST_REG_MR;
- fastreg_wr.send_flags = IB_SEND_SIGNALED;
- fastreg_wr.wr.fast_reg.iova_start = desc->data_frpl->page_list[0] + offset;
- fastreg_wr.wr.fast_reg.page_list = desc->data_frpl;
- fastreg_wr.wr.fast_reg.page_list_len = page_list_len;
+ fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset;
+ fastreg_wr.wr.fast_reg.page_list = frpl;
+ fastreg_wr.wr.fast_reg.page_list_len = plen;
fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
- fastreg_wr.wr.fast_reg.length = data_size;
- fastreg_wr.wr.fast_reg.rkey = desc->data_mr->rkey;
+ fastreg_wr.wr.fast_reg.length = size;
+ fastreg_wr.wr.fast_reg.rkey = mr->rkey;
fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
- if (!wr) {
+ if (!wr)
wr = &fastreg_wr;
- atomic_inc(&ib_conn->post_send_buf_count);
- } else {
+ else
wr->next = &fastreg_wr;
- atomic_add(2, &ib_conn->post_send_buf_count);
- }
ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
if (ret) {
- if (bad_wr->next)
- atomic_sub(2, &ib_conn->post_send_buf_count);
- else
- atomic_dec(&ib_conn->post_send_buf_count);
iser_err("fast registration failed, ret:%d\n", ret);
return ret;
}
- desc->valid = false;
+ desc->reg_indicators &= ~ind;
- regd_buf->reg.mem_h = desc;
- regd_buf->reg.lkey = desc->data_mr->lkey;
- regd_buf->reg.rkey = desc->data_mr->rkey;
- regd_buf->reg.va = desc->data_frpl->page_list[0] + offset;
- regd_buf->reg.len = data_size;
- regd_buf->reg.is_mr = 1;
+ sge->lkey = mr->lkey;
+ sge->addr = frpl->page_list[0] + offset;
+ sge->length = size;
return ret;
}
/**
- * iser_reg_rdma_mem_frwr - Registers memory intended for RDMA,
+ * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA,
* using Fast Registration WR (if possible) obtaining rkey and va
*
* returns 0 on success, errno code on failure
*/
-int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
+int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
{
- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
+ struct iser_conn *ib_conn = iser_task->ib_conn;
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
- struct fast_reg_descriptor *desc;
- unsigned int data_size, page_list_len;
+ struct fast_reg_descriptor *desc = NULL;
+ struct ib_sge data_sge;
int err, aligned_len;
unsigned long flags;
- u32 offset;
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
- err = fall_to_bounce_buf(iser_task, ibdev,
+ err = fall_to_bounce_buf(iser_task, ibdev, mem,
+ &iser_task->data_copy[cmd_dir],
cmd_dir, aligned_len);
if (err) {
iser_err("failed to allocate bounce buffer\n");
@@ -540,41 +722,79 @@ int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *iser_task,
mem = &iser_task->data_copy[cmd_dir];
}
- /* if there a single dma entry, dma mr suffices */
- if (mem->dma_nents == 1) {
- struct scatterlist *sg = (struct scatterlist *)mem->buf;
-
- regd_buf->reg.lkey = device->mr->lkey;
- regd_buf->reg.rkey = device->mr->rkey;
- regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
- regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
- regd_buf->reg.is_mr = 0;
- } else {
+ if (mem->dma_nents != 1 ||
+ scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
spin_lock_irqsave(&ib_conn->lock, flags);
- desc = list_first_entry(&ib_conn->fastreg.frwr.pool,
+ desc = list_first_entry(&ib_conn->fastreg.pool,
struct fast_reg_descriptor, list);
list_del(&desc->list);
spin_unlock_irqrestore(&ib_conn->lock, flags);
- page_list_len = iser_sg_to_page_vec(mem, device->ib_device,
- desc->data_frpl->page_list,
- &offset, &data_size);
-
- if (page_list_len * SIZE_4K < data_size) {
- iser_err("fast reg page_list too short to hold this SG\n");
- err = -EINVAL;
- goto err_reg;
+ regd_buf->reg.mem_h = desc;
+ }
+
+ err = iser_fast_reg_mr(iser_task, regd_buf, mem,
+ ISER_DATA_KEY_VALID, &data_sge);
+ if (err)
+ goto err_reg;
+
+ if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
+ struct ib_sge prot_sge, sig_sge;
+
+ memset(&prot_sge, 0, sizeof(prot_sge));
+ if (scsi_prot_sg_count(iser_task->sc)) {
+ mem = &iser_task->prot[cmd_dir];
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+ err = fall_to_bounce_buf(iser_task, ibdev, mem,
+ &iser_task->prot_copy[cmd_dir],
+ cmd_dir, aligned_len);
+ if (err) {
+ iser_err("failed to allocate bounce buffer\n");
+ return err;
+ }
+ mem = &iser_task->prot_copy[cmd_dir];
+ }
+
+ err = iser_fast_reg_mr(iser_task, regd_buf, mem,
+ ISER_PROT_KEY_VALID, &prot_sge);
+ if (err)
+ goto err_reg;
}
- err = iser_fast_reg_mr(desc, ib_conn, regd_buf,
- offset, data_size, page_list_len);
- if (err)
- goto err_reg;
+ err = iser_reg_sig_mr(iser_task, desc, &data_sge,
+ &prot_sge, &sig_sge);
+ if (err) {
+ iser_err("Failed to register signature mr\n");
+ return err;
+ }
+ desc->reg_indicators |= ISER_FASTREG_PROTECTED;
+
+ regd_buf->reg.lkey = sig_sge.lkey;
+ regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey;
+ regd_buf->reg.va = sig_sge.addr;
+ regd_buf->reg.len = sig_sge.length;
+ regd_buf->reg.is_mr = 1;
+ } else {
+ if (desc) {
+ regd_buf->reg.rkey = desc->data_mr->rkey;
+ regd_buf->reg.is_mr = 1;
+ } else {
+ regd_buf->reg.rkey = device->mr->rkey;
+ regd_buf->reg.is_mr = 0;
+ }
+
+ regd_buf->reg.lkey = data_sge.lkey;
+ regd_buf->reg.va = data_sge.addr;
+ regd_buf->reg.len = data_sge.length;
}
return 0;
err_reg:
- spin_lock_irqsave(&ib_conn->lock, flags);
- list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool);
- spin_unlock_irqrestore(&ib_conn->lock, flags);
+ if (desc) {
+ spin_lock_irqsave(&ib_conn->lock, flags);
+ list_add_tail(&desc->list, &ib_conn->fastreg.pool);
+ spin_unlock_irqrestore(&ib_conn->lock, flags);
+ }
+
return err;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index ca37edef2791..32849f2becde 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1,7 +1,7 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
- * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -71,17 +71,14 @@ static void iser_event_handler(struct ib_event_handler *handler,
*/
static int iser_create_device_ib_res(struct iser_device *device)
{
- int i, j;
struct iser_cq_desc *cq_desc;
- struct ib_device_attr *dev_attr;
+ struct ib_device_attr *dev_attr = &device->dev_attr;
+ int ret, i, j;
- dev_attr = kmalloc(sizeof(*dev_attr), GFP_KERNEL);
- if (!dev_attr)
- return -ENOMEM;
-
- if (ib_query_device(device->ib_device, dev_attr)) {
+ ret = ib_query_device(device->ib_device, dev_attr);
+ if (ret) {
pr_warn("Query device failed for %s\n", device->ib_device->name);
- goto dev_attr_err;
+ return ret;
}
/* Assign function handles - based on FMR support */
@@ -94,14 +91,14 @@ static int iser_create_device_ib_res(struct iser_device *device)
device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
} else
if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
- iser_info("FRWR supported, using FRWR for registration\n");
- device->iser_alloc_rdma_reg_res = iser_create_frwr_pool;
- device->iser_free_rdma_reg_res = iser_free_frwr_pool;
- device->iser_reg_rdma_mem = iser_reg_rdma_mem_frwr;
- device->iser_unreg_rdma_mem = iser_unreg_mem_frwr;
+ iser_info("FastReg supported, using FastReg for registration\n");
+ device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
+ device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
+ device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
+ device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
} else {
- iser_err("IB device does not support FMRs nor FRWRs, can't register memory\n");
- goto dev_attr_err;
+ iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
+ return -1;
}
device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
@@ -158,7 +155,6 @@ static int iser_create_device_ib_res(struct iser_device *device)
if (ib_register_event_handler(&device->event_handler))
goto handler_err;
- kfree(dev_attr);
return 0;
handler_err:
@@ -178,8 +174,6 @@ pd_err:
kfree(device->cq_desc);
cq_desc_err:
iser_err("failed to allocate an IB resource\n");
-dev_attr_err:
- kfree(dev_attr);
return -1;
}
@@ -221,13 +215,13 @@ int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
struct ib_fmr_pool_param params;
int ret = -ENOMEM;
- ib_conn->fastreg.fmr.page_vec = kmalloc(sizeof(struct iser_page_vec) +
- (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
- GFP_KERNEL);
- if (!ib_conn->fastreg.fmr.page_vec)
+ ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
+ (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
+ GFP_KERNEL);
+ if (!ib_conn->fmr.page_vec)
return ret;
- ib_conn->fastreg.fmr.page_vec->pages = (u64 *)(ib_conn->fastreg.fmr.page_vec + 1);
+ ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
params.page_shift = SHIFT_4K;
/* when the first/last SG element are not start/end *
@@ -243,16 +237,16 @@ int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
- ib_conn->fastreg.fmr.pool = ib_create_fmr_pool(device->pd, &params);
- if (!IS_ERR(ib_conn->fastreg.fmr.pool))
+ ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
+ if (!IS_ERR(ib_conn->fmr.pool))
return 0;
/* no FMR => no need for page_vec */
- kfree(ib_conn->fastreg.fmr.page_vec);
- ib_conn->fastreg.fmr.page_vec = NULL;
+ kfree(ib_conn->fmr.page_vec);
+ ib_conn->fmr.page_vec = NULL;
- ret = PTR_ERR(ib_conn->fastreg.fmr.pool);
- ib_conn->fastreg.fmr.pool = NULL;
+ ret = PTR_ERR(ib_conn->fmr.pool);
+ ib_conn->fmr.pool = NULL;
if (ret != -ENOSYS) {
iser_err("FMR allocation failed, err %d\n", ret);
return ret;
@@ -268,93 +262,173 @@ int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
void iser_free_fmr_pool(struct iser_conn *ib_conn)
{
iser_info("freeing conn %p fmr pool %p\n",
- ib_conn, ib_conn->fastreg.fmr.pool);
+ ib_conn, ib_conn->fmr.pool);
+
+ if (ib_conn->fmr.pool != NULL)
+ ib_destroy_fmr_pool(ib_conn->fmr.pool);
+
+ ib_conn->fmr.pool = NULL;
+
+ kfree(ib_conn->fmr.page_vec);
+ ib_conn->fmr.page_vec = NULL;
+}
+
+static int
+iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
+ bool pi_enable, struct fast_reg_descriptor *desc)
+{
+ int ret;
+
+ desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
+ ISCSI_ISER_SG_TABLESIZE + 1);
+ if (IS_ERR(desc->data_frpl)) {
+ ret = PTR_ERR(desc->data_frpl);
+ iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
+ ret);
+ return PTR_ERR(desc->data_frpl);
+ }
+
+ desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
+ if (IS_ERR(desc->data_mr)) {
+ ret = PTR_ERR(desc->data_mr);
+ iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
+ goto fast_reg_mr_failure;
+ }
+ desc->reg_indicators |= ISER_DATA_KEY_VALID;
+
+ if (pi_enable) {
+ struct ib_mr_init_attr mr_init_attr = {0};
+ struct iser_pi_context *pi_ctx = NULL;
- if (ib_conn->fastreg.fmr.pool != NULL)
- ib_destroy_fmr_pool(ib_conn->fastreg.fmr.pool);
+ desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
+ if (!desc->pi_ctx) {
+ iser_err("Failed to allocate pi context\n");
+ ret = -ENOMEM;
+ goto pi_ctx_alloc_failure;
+ }
+ pi_ctx = desc->pi_ctx;
+
+ pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
+ ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(pi_ctx->prot_frpl)) {
+ ret = PTR_ERR(pi_ctx->prot_frpl);
+ iser_err("Failed to allocate prot frpl ret=%d\n",
+ ret);
+ goto prot_frpl_failure;
+ }
+
+ pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
+ ISCSI_ISER_SG_TABLESIZE + 1);
+ if (IS_ERR(pi_ctx->prot_mr)) {
+ ret = PTR_ERR(pi_ctx->prot_mr);
+ iser_err("Failed to allocate prot frmr ret=%d\n",
+ ret);
+ goto prot_mr_failure;
+ }
+ desc->reg_indicators |= ISER_PROT_KEY_VALID;
+
+ mr_init_attr.max_reg_descriptors = 2;
+ mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
+ pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+ if (IS_ERR(pi_ctx->sig_mr)) {
+ ret = PTR_ERR(pi_ctx->sig_mr);
+ iser_err("Failed to allocate signature enabled mr err=%d\n",
+ ret);
+ goto sig_mr_failure;
+ }
+ desc->reg_indicators |= ISER_SIG_KEY_VALID;
+ }
+ desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
- ib_conn->fastreg.fmr.pool = NULL;
+ iser_dbg("Create fr_desc %p page_list %p\n",
+ desc, desc->data_frpl->page_list);
- kfree(ib_conn->fastreg.fmr.page_vec);
- ib_conn->fastreg.fmr.page_vec = NULL;
+ return 0;
+sig_mr_failure:
+ ib_dereg_mr(desc->pi_ctx->prot_mr);
+prot_mr_failure:
+ ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+prot_frpl_failure:
+ kfree(desc->pi_ctx);
+pi_ctx_alloc_failure:
+ ib_dereg_mr(desc->data_mr);
+fast_reg_mr_failure:
+ ib_free_fast_reg_page_list(desc->data_frpl);
+
+ return ret;
}
/**
- * iser_create_frwr_pool - Creates pool of fast_reg descriptors
+ * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
* for fast registration work requests.
* returns 0 on success, or errno code on failure
*/
-int iser_create_frwr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
+int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max)
{
struct iser_device *device = ib_conn->device;
struct fast_reg_descriptor *desc;
int i, ret;
- INIT_LIST_HEAD(&ib_conn->fastreg.frwr.pool);
- ib_conn->fastreg.frwr.pool_size = 0;
+ INIT_LIST_HEAD(&ib_conn->fastreg.pool);
+ ib_conn->fastreg.pool_size = 0;
for (i = 0; i < cmds_max; i++) {
- desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc) {
iser_err("Failed to allocate a new fast_reg descriptor\n");
ret = -ENOMEM;
goto err;
}
- desc->data_frpl = ib_alloc_fast_reg_page_list(device->ib_device,
- ISCSI_ISER_SG_TABLESIZE + 1);
- if (IS_ERR(desc->data_frpl)) {
- ret = PTR_ERR(desc->data_frpl);
- iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n", ret);
- goto fast_reg_page_failure;
+ ret = iser_create_fastreg_desc(device->ib_device, device->pd,
+ ib_conn->pi_support, desc);
+ if (ret) {
+ iser_err("Failed to create fastreg descriptor err=%d\n",
+ ret);
+ kfree(desc);
+ goto err;
}
- desc->data_mr = ib_alloc_fast_reg_mr(device->pd,
- ISCSI_ISER_SG_TABLESIZE + 1);
- if (IS_ERR(desc->data_mr)) {
- ret = PTR_ERR(desc->data_mr);
- iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
- goto fast_reg_mr_failure;
- }
- desc->valid = true;
- list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool);
- ib_conn->fastreg.frwr.pool_size++;
+ list_add_tail(&desc->list, &ib_conn->fastreg.pool);
+ ib_conn->fastreg.pool_size++;
}
return 0;
-fast_reg_mr_failure:
- ib_free_fast_reg_page_list(desc->data_frpl);
-fast_reg_page_failure:
- kfree(desc);
err:
- iser_free_frwr_pool(ib_conn);
+ iser_free_fastreg_pool(ib_conn);
return ret;
}
/**
- * iser_free_frwr_pool - releases the pool of fast_reg descriptors
+ * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
*/
-void iser_free_frwr_pool(struct iser_conn *ib_conn)
+void iser_free_fastreg_pool(struct iser_conn *ib_conn)
{
struct fast_reg_descriptor *desc, *tmp;
int i = 0;
- if (list_empty(&ib_conn->fastreg.frwr.pool))
+ if (list_empty(&ib_conn->fastreg.pool))
return;
- iser_info("freeing conn %p frwr pool\n", ib_conn);
+ iser_info("freeing conn %p fr pool\n", ib_conn);
- list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.frwr.pool, list) {
+ list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
list_del(&desc->list);
ib_free_fast_reg_page_list(desc->data_frpl);
ib_dereg_mr(desc->data_mr);
+ if (desc->pi_ctx) {
+ ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+ ib_dereg_mr(desc->pi_ctx->prot_mr);
+ ib_destroy_mr(desc->pi_ctx->sig_mr);
+ kfree(desc->pi_ctx);
+ }
kfree(desc);
++i;
}
- if (i < ib_conn->fastreg.frwr.pool_size)
+ if (i < ib_conn->fastreg.pool_size)
iser_warn("pool still has %d regions registered\n",
- ib_conn->fastreg.frwr.pool_size - i);
+ ib_conn->fastreg.pool_size - i);
}
/**
@@ -389,12 +463,17 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
init_attr.qp_context = (void *)ib_conn;
init_attr.send_cq = device->tx_cq[min_index];
init_attr.recv_cq = device->rx_cq[min_index];
- init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
init_attr.cap.max_send_sge = 2;
init_attr.cap.max_recv_sge = 1;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr.qp_type = IB_QPT_RC;
+ if (ib_conn->pi_support) {
+ init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS;
+ init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
+ } else {
+ init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
+ }
ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
if (ret)
@@ -591,6 +670,19 @@ static int iser_addr_handler(struct rdma_cm_id *cma_id)
ib_conn = (struct iser_conn *)cma_id->context;
ib_conn->device = device;
+ /* connection T10-PI support */
+ if (iser_pi_enable) {
+ if (!(device->dev_attr.device_cap_flags &
+ IB_DEVICE_SIGNATURE_HANDOVER)) {
+ iser_warn("T10-PI requested but not supported on %s, "
+ "continue without T10-PI\n",
+ ib_conn->device->ib_device->name);
+ ib_conn->pi_support = false;
+ } else {
+ ib_conn->pi_support = true;
+ }
+ }
+
ret = rdma_resolve_route(cma_id, 1000);
if (ret) {
iser_err("resolve route failed: %d\n", ret);
@@ -636,6 +728,11 @@ failure:
static void iser_connected_handler(struct rdma_cm_id *cma_id)
{
struct iser_conn *ib_conn;
+ struct ib_qp_attr attr;
+ struct ib_qp_init_attr init_attr;
+
+ (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
+ iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
ib_conn = (struct iser_conn *)cma_id->context;
ib_conn->state = ISER_CONN_UP;
@@ -653,9 +750,8 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
* terminated asynchronously from the iSCSI layer's perspective. */
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
ISER_CONN_TERMINATING)){
- if (ib_conn->iser_conn)
- iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
- ISCSI_ERR_CONN_FAILED);
+ if (ib_conn->iscsi_conn)
+ iscsi_conn_failure(ib_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED);
else
iser_err("iscsi_iser connection isn't bound\n");
}
@@ -801,7 +897,7 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
page_list = page_vec->pages;
io_addr = page_list[0];
- mem = ib_fmr_pool_map_phys(ib_conn->fastreg.fmr.pool,
+ mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
page_list,
page_vec->length,
io_addr);
@@ -855,11 +951,11 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
reg->mem_h = NULL;
}
-void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
{
struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
+ struct iser_conn *ib_conn = iser_task->ib_conn;
struct fast_reg_descriptor *desc = reg->mem_h;
if (!reg->is_mr)
@@ -868,7 +964,7 @@ void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task,
reg->mem_h = NULL;
reg->is_mr = 0;
spin_lock_bh(&ib_conn->lock);
- list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool);
+ list_add_tail(&desc->list, &ib_conn->fastreg.pool);
spin_unlock_bh(&ib_conn->lock);
}
@@ -969,7 +1065,7 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
* perspective. */
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
ISER_CONN_TERMINATING))
- iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+ iscsi_conn_failure(ib_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
/* no more non completed posts to the QP, complete the
@@ -993,18 +1089,16 @@ static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
if (wc.status == IB_WC_SUCCESS) {
if (wc.opcode == IB_WC_SEND)
iser_snd_completion(tx_desc, ib_conn);
- else if (wc.opcode == IB_WC_LOCAL_INV ||
- wc.opcode == IB_WC_FAST_REG_MR) {
- atomic_dec(&ib_conn->post_send_buf_count);
- continue;
- } else
+ else
iser_err("expected opcode %d got %d\n",
IB_WC_SEND, wc.opcode);
} else {
iser_err("tx id %llx status %d vend_err %x\n",
- wc.wr_id, wc.status, wc.vendor_err);
- atomic_dec(&ib_conn->post_send_buf_count);
- iser_handle_comp_error(tx_desc, ib_conn);
+ wc.wr_id, wc.status, wc.vendor_err);
+ if (wc.wr_id != ISER_FASTREG_LI_WRID) {
+ atomic_dec(&ib_conn->post_send_buf_count);
+ iser_handle_comp_error(tx_desc, ib_conn);
+ }
}
completed_tx++;
}
@@ -1022,8 +1116,12 @@ static void iser_cq_tasklet_fn(unsigned long data)
struct iser_rx_desc *desc;
unsigned long xfer_len;
struct iser_conn *ib_conn;
- int completed_tx, completed_rx;
- completed_tx = completed_rx = 0;
+ int completed_tx, completed_rx = 0;
+
+ /* First do tx drain, so in a case where we have rx flushes and a successful
+ * tx completion we will still go through completion error handling.
+ */
+ completed_tx = iser_drain_tx_cq(device, cq_index);
while (ib_poll_cq(cq, 1, &wc) == 1) {
desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
@@ -1051,7 +1149,6 @@ static void iser_cq_tasklet_fn(unsigned long data)
* " would not cause interrupts to be missed" */
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
- completed_tx += iser_drain_tx_cq(device, cq_index);
iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
}
@@ -1063,3 +1160,51 @@ static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
tasklet_schedule(&device->cq_tasklet[cq_index]);
}
+
+u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir, sector_t *sector)
+{
+ struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
+ struct fast_reg_descriptor *desc = reg->mem_h;
+ unsigned long sector_size = iser_task->sc->device->sector_size;
+ struct ib_mr_status mr_status;
+ int ret;
+
+ if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
+ desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
+ ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
+ IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ goto err;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ sector_t sector_off = mr_status.sig_err.sig_err_offset;
+
+ do_div(sector_off, sector_size + 8);
+ *sector = scsi_get_lba(iser_task->sc) + sector_off;
+
+ pr_err("PI error found type %d at sector %llx "
+ "expected %x vs actual %x\n",
+ mr_status.sig_err.err_type,
+ (unsigned long long)*sector,
+ mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ return 0x1;
+ case IB_SIG_BAD_REFTAG:
+ return 0x3;
+ case IB_SIG_BAD_APPTAG:
+ return 0x2;
+ }
+ }
+ }
+
+ return 0;
+err:
+ /* Not alot we can do here, return ambiguous guard error */
+ return 0x1;
+}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 529b6bcdca7a..66a908bf3fb9 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -411,6 +411,8 @@ static void srp_path_rec_completion(int status,
static int srp_lookup_path(struct srp_target_port *target)
{
+ int ret;
+
target->path.numb_path = 1;
init_completion(&target->done);
@@ -431,7 +433,9 @@ static int srp_lookup_path(struct srp_target_port *target)
if (target->path_query_id < 0)
return target->path_query_id;
- wait_for_completion(&target->done);
+ ret = wait_for_completion_interruptible(&target->done);
+ if (ret < 0)
+ return ret;
if (target->status < 0)
shost_printk(KERN_WARNING, target->scsi_host,
@@ -710,7 +714,9 @@ static int srp_connect_target(struct srp_target_port *target)
ret = srp_send_req(target);
if (ret)
return ret;
- wait_for_completion(&target->done);
+ ret = wait_for_completion_interruptible(&target->done);
+ if (ret < 0)
+ return ret;
/*
* The CM event handling code will set status to
@@ -777,6 +783,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
* srp_claim_req - Take ownership of the scmnd associated with a request.
* @target: SRP target port.
* @req: SRP request.
+ * @sdev: If not NULL, only take ownership for this SCSI device.
* @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
* ownership of @req->scmnd if it equals @scmnd.
*
@@ -785,16 +792,17 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
*/
static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
struct srp_request *req,
+ struct scsi_device *sdev,
struct scsi_cmnd *scmnd)
{
unsigned long flags;
spin_lock_irqsave(&target->lock, flags);
- if (!scmnd) {
+ if (req->scmnd &&
+ (!sdev || req->scmnd->device == sdev) &&
+ (!scmnd || req->scmnd == scmnd)) {
scmnd = req->scmnd;
req->scmnd = NULL;
- } else if (req->scmnd == scmnd) {
- req->scmnd = NULL;
} else {
scmnd = NULL;
}
@@ -821,9 +829,10 @@ static void srp_free_req(struct srp_target_port *target,
}
static void srp_finish_req(struct srp_target_port *target,
- struct srp_request *req, int result)
+ struct srp_request *req, struct scsi_device *sdev,
+ int result)
{
- struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
+ struct scsi_cmnd *scmnd = srp_claim_req(target, req, sdev, NULL);
if (scmnd) {
srp_free_req(target, req, scmnd, 0);
@@ -835,11 +844,20 @@ static void srp_finish_req(struct srp_target_port *target,
static void srp_terminate_io(struct srp_rport *rport)
{
struct srp_target_port *target = rport->lld_data;
+ struct Scsi_Host *shost = target->scsi_host;
+ struct scsi_device *sdev;
int i;
+ /*
+ * Invoking srp_terminate_io() while srp_queuecommand() is running
+ * is not safe. Hence the warning statement below.
+ */
+ shost_for_each_device(sdev, shost)
+ WARN_ON_ONCE(sdev->request_queue->request_fn_active);
+
for (i = 0; i < target->req_ring_size; ++i) {
struct srp_request *req = &target->req_ring[i];
- srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
+ srp_finish_req(target, req, NULL, DID_TRANSPORT_FAILFAST << 16);
}
}
@@ -876,7 +894,7 @@ static int srp_rport_reconnect(struct srp_rport *rport)
for (i = 0; i < target->req_ring_size; ++i) {
struct srp_request *req = &target->req_ring[i];
- srp_finish_req(target, req, DID_RESET << 16);
+ srp_finish_req(target, req, NULL, DID_RESET << 16);
}
INIT_LIST_HEAD(&target->free_tx);
@@ -1284,7 +1302,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
complete(&target->tsk_mgmt_done);
} else {
req = &target->req_ring[rsp->tag];
- scmnd = srp_claim_req(target, req, NULL);
+ scmnd = srp_claim_req(target, req, NULL, NULL);
if (!scmnd) {
shost_printk(KERN_ERR, target->scsi_host,
"Null scmnd for RSP w/tag %016llx\n",
@@ -1804,8 +1822,10 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
shost_printk(KERN_WARNING, shost,
PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
else
- shost_printk(KERN_WARNING, shost,
- PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
+ shost_printk(KERN_WARNING, shost, PFX
+ "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
+ target->path.sgid.raw,
+ target->orig_dgid, reason);
} else
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
@@ -1863,6 +1883,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
case IB_CM_TIMEWAIT_EXIT:
shost_printk(KERN_ERR, target->scsi_host,
PFX "connection closed\n");
+ comp = 1;
target->status = 0;
break;
@@ -1999,7 +2020,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
- if (!req || !srp_claim_req(target, req, scmnd))
+ if (!req || !srp_claim_req(target, req, NULL, scmnd))
return SUCCESS;
if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
SRP_TSK_ABORT_TASK) == 0)
@@ -2030,8 +2051,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
for (i = 0; i < target->req_ring_size; ++i) {
struct srp_request *req = &target->req_ring[i];
- if (req->scmnd && req->scmnd->device == scmnd->device)
- srp_finish_req(target, req, DID_RESET << 16);
+ srp_finish_req(target, req, scmnd->device, DID_RESET << 16);
}
return SUCCESS;
@@ -2612,6 +2632,8 @@ static ssize_t srp_create_target(struct device *dev,
target->tl_retry_count = 7;
target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
+ mutex_lock(&host->add_target_mutex);
+
ret = srp_parse_options(buf, target);
if (ret)
goto err;
@@ -2649,16 +2671,9 @@ static ssize_t srp_create_target(struct device *dev,
if (ret)
goto err_free_mem;
- ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
-
- shost_printk(KERN_DEBUG, target->scsi_host, PFX
- "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
- "service_id %016llx dgid %pI6\n",
- (unsigned long long) be64_to_cpu(target->id_ext),
- (unsigned long long) be64_to_cpu(target->ioc_guid),
- be16_to_cpu(target->path.pkey),
- (unsigned long long) be64_to_cpu(target->service_id),
- target->path.dgid.raw);
+ ret = ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
+ if (ret)
+ goto err_free_mem;
ret = srp_create_target_ib(target);
if (ret)
@@ -2679,7 +2694,19 @@ static ssize_t srp_create_target(struct device *dev,
if (ret)
goto err_disconnect;
- return count;
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ be16_to_cpu(target->path.pkey),
+ be64_to_cpu(target->service_id),
+ target->path.sgid.raw, target->path.dgid.raw);
+
+ ret = count;
+
+out:
+ mutex_unlock(&host->add_target_mutex);
+ return ret;
err_disconnect:
srp_disconnect_target(target);
@@ -2695,8 +2722,7 @@ err_free_mem:
err:
scsi_host_put(target_host);
-
- return ret;
+ goto out;
}
static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
@@ -2732,6 +2758,7 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
INIT_LIST_HEAD(&host->target_list);
spin_lock_init(&host->target_lock);
init_completion(&host->released);
+ mutex_init(&host->add_target_mutex);
host->srp_dev = device;
host->port = port;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 575681063f38..aad27b7b4a46 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -105,6 +105,7 @@ struct srp_host {
spinlock_t target_lock;
struct completion released;
struct list_head list;
+ struct mutex add_target_mutex;
};
struct srp_request {
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index a06e12552886..ce953d895f5b 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -954,11 +954,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
error = input_ff_upload(dev, &effect, file);
+ if (error)
+ return error;
if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
return -EFAULT;
- return error;
+ return 0;
}
/* Multi-number variable-length handlers */
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index a673c9f3a0b9..76842d7dc2e3 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -151,6 +151,18 @@ config KEYBOARD_BFIN
To compile this driver as a module, choose M here: the
module will be called bf54x-keys.
+config KEYBOARD_CLPS711X
+ tristate "CLPS711X Keypad support"
+ depends on OF_GPIO && (ARCH_CLPS711X || COMPILE_TEST)
+ select INPUT_MATRIXKMAP
+ select INPUT_POLLDEV
+ help
+ Say Y here to enable the matrix keypad on the Cirrus Logic
+ CLPS711X CPUs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called clps711x-keypad.
+
config KEYBOARD_LKKBD
tristate "DECstation/VAXstation LK201/LK401 keyboard"
select SERIO
@@ -595,16 +607,6 @@ config KEYBOARD_TC3589X
To compile this driver as a module, choose M here: the
module will be called tc3589x-keypad.
-config KEYBOARD_TNETV107X
- tristate "TI TNETV107X keypad support"
- depends on ARCH_DAVINCI_TNETV107X
- select INPUT_MATRIXKMAP
- help
- Say Y here if you want to use the TNETV107X keypad.
-
- To compile this driver as a module, choose M here: the
- module will be called tnetv107x-keypad.
-
config KEYBOARD_TWL4030
tristate "TI TWL4030/TWL5030/TPS659x0 keypad support"
depends on TWL4030_CORE
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index a699b6172303..11cff7b84b47 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
+obj-$(CONFIG_KEYBOARD_CLPS711X) += clps711x-keypad.o
obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
@@ -53,7 +54,6 @@ obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
obj-$(CONFIG_KEYBOARD_TC3589X) += tc3589x-keypad.o
obj-$(CONFIG_KEYBOARD_TEGRA) += tegra-kbc.o
-obj-$(CONFIG_KEYBOARD_TNETV107X) += tnetv107x-keypad.o
obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o
diff --git a/drivers/input/keyboard/clps711x-keypad.c b/drivers/input/keyboard/clps711x-keypad.c
new file mode 100644
index 000000000000..3955aecee44b
--- /dev/null
+++ b/drivers/input/keyboard/clps711x-keypad.c
@@ -0,0 +1,207 @@
+/*
+ * Cirrus Logic CLPS711X Keypad driver
+ *
+ * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/input.h>
+#include <linux/input-polldev.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/clps711x.h>
+
+#define CLPS711X_KEYPAD_COL_COUNT 8
+
+struct clps711x_gpio_data {
+ struct gpio_desc *desc;
+ DECLARE_BITMAP(last_state, CLPS711X_KEYPAD_COL_COUNT);
+};
+
+struct clps711x_keypad_data {
+ struct regmap *syscon;
+ int row_count;
+ unsigned int row_shift;
+ struct clps711x_gpio_data *gpio_data;
+};
+
+static void clps711x_keypad_poll(struct input_polled_dev *dev)
+{
+ const unsigned short *keycodes = dev->input->keycode;
+ struct clps711x_keypad_data *priv = dev->private;
+ bool sync = false;
+ int col, row;
+
+ for (col = 0; col < CLPS711X_KEYPAD_COL_COUNT; col++) {
+ /* Assert column */
+ regmap_update_bits(priv->syscon, SYSCON_OFFSET,
+ SYSCON1_KBDSCAN_MASK,
+ SYSCON1_KBDSCAN(8 + col));
+
+ /* Scan rows */
+ for (row = 0; row < priv->row_count; row++) {
+ struct clps711x_gpio_data *data = &priv->gpio_data[row];
+ bool state, state1;
+
+ /* Read twice for protection against fluctuations */
+ do {
+ state = gpiod_get_value_cansleep(data->desc);
+ cond_resched();
+ state1 = gpiod_get_value_cansleep(data->desc);
+ } while (state != state1);
+
+ if (test_bit(col, data->last_state) != state) {
+ int code = MATRIX_SCAN_CODE(row, col,
+ priv->row_shift);
+
+ if (state) {
+ set_bit(col, data->last_state);
+ input_event(dev->input, EV_MSC,
+ MSC_SCAN, code);
+ } else {
+ clear_bit(col, data->last_state);
+ }
+
+ if (keycodes[code])
+ input_report_key(dev->input,
+ keycodes[code], state);
+ sync = true;
+ }
+ }
+
+ /* Set all columns to low */
+ regmap_update_bits(priv->syscon, SYSCON_OFFSET,
+ SYSCON1_KBDSCAN_MASK, SYSCON1_KBDSCAN(1));
+ }
+
+ if (sync)
+ input_sync(dev->input);
+}
+
+static int clps711x_keypad_probe(struct platform_device *pdev)
+{
+ struct clps711x_keypad_data *priv;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct input_polled_dev *poll_dev;
+ u32 poll_interval;
+ int i, err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->syscon =
+ syscon_regmap_lookup_by_compatible("cirrus,clps711x-syscon1");
+ if (IS_ERR(priv->syscon))
+ return PTR_ERR(priv->syscon);
+
+ priv->row_count = of_gpio_named_count(np, "row-gpios");
+ if (priv->row_count < 1)
+ return -EINVAL;
+
+ priv->gpio_data = devm_kzalloc(dev,
+ sizeof(*priv->gpio_data) * priv->row_count,
+ GFP_KERNEL);
+ if (!priv->gpio_data)
+ return -ENOMEM;
+
+ priv->row_shift = get_count_order(CLPS711X_KEYPAD_COL_COUNT);
+
+ for (i = 0; i < priv->row_count; i++) {
+ struct clps711x_gpio_data *data = &priv->gpio_data[i];
+
+ data->desc = devm_gpiod_get_index(dev, "row", i);
+ if (!data->desc)
+ return -EINVAL;
+
+ if (IS_ERR(data->desc))
+ return PTR_ERR(data->desc);
+
+ gpiod_direction_input(data->desc);
+ }
+
+ err = of_property_read_u32(np, "poll-interval", &poll_interval);
+ if (err)
+ return err;
+
+ poll_dev = input_allocate_polled_device();
+ if (!poll_dev)
+ return -ENOMEM;
+
+ poll_dev->private = priv;
+ poll_dev->poll = clps711x_keypad_poll;
+ poll_dev->poll_interval = poll_interval;
+ poll_dev->input->name = pdev->name;
+ poll_dev->input->dev.parent = dev;
+ poll_dev->input->id.bustype = BUS_HOST;
+ poll_dev->input->id.vendor = 0x0001;
+ poll_dev->input->id.product = 0x0001;
+ poll_dev->input->id.version = 0x0100;
+
+ err = matrix_keypad_build_keymap(NULL, NULL, priv->row_count,
+ CLPS711X_KEYPAD_COL_COUNT,
+ NULL, poll_dev->input);
+ if (err)
+ goto out_err;
+
+ input_set_capability(poll_dev->input, EV_MSC, MSC_SCAN);
+ if (of_property_read_bool(np, "autorepeat"))
+ __set_bit(EV_REP, poll_dev->input->evbit);
+
+ platform_set_drvdata(pdev, poll_dev);
+
+ /* Set all columns to low */
+ regmap_update_bits(priv->syscon, SYSCON_OFFSET, SYSCON1_KBDSCAN_MASK,
+ SYSCON1_KBDSCAN(1));
+
+ err = input_register_polled_device(poll_dev);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ input_free_polled_device(poll_dev);
+ return err;
+}
+
+static int clps711x_keypad_remove(struct platform_device *pdev)
+{
+ struct input_polled_dev *poll_dev = platform_get_drvdata(pdev);
+
+ input_unregister_polled_device(poll_dev);
+ input_free_polled_device(poll_dev);
+
+ return 0;
+}
+
+static struct of_device_id clps711x_keypad_of_match[] = {
+ { .compatible = "cirrus,clps711x-keypad", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clps711x_keypad_of_match);
+
+static struct platform_driver clps711x_keypad_driver = {
+ .driver = {
+ .name = "clps711x-keypad",
+ .owner = THIS_MODULE,
+ .of_match_table = clps711x_keypad_of_match,
+ },
+ .probe = clps711x_keypad_probe,
+ .remove = clps711x_keypad_remove,
+};
+module_platform_driver(clps711x_keypad_driver);
+
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("Cirrus Logic CLPS711X Keypad driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index cbf4f8038cba..97ec33572e56 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -439,7 +439,7 @@ static int imx_keypad_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no irq defined in platform data\n");
- return -EINVAL;
+ return irq;
}
input_dev = devm_input_allocate_device(&pdev->dev);
@@ -449,7 +449,7 @@ static int imx_keypad_probe(struct platform_device *pdev)
}
keypad = devm_kzalloc(&pdev->dev, sizeof(struct imx_keypad),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!keypad) {
dev_err(&pdev->dev, "not enough memory for driver data\n");
return -ENOMEM;
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 2c9f19ac35ea..80c6b0ef3fc8 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -19,10 +19,9 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/mutex.h>
-
-#include <linux/mfd/pm8xxx/core.h>
-#include <linux/mfd/pm8xxx/gpio.h>
-#include <linux/input/pmic8xxx-keypad.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/input/matrix_keypad.h>
#define PM8XXX_MAX_ROWS 18
#define PM8XXX_MAX_COLS 8
@@ -85,8 +84,10 @@
/**
* struct pmic8xxx_kp - internal keypad data structure
- * @pdata - keypad platform data pointer
+ * @num_cols - number of columns of keypad
+ * @num_rows - number of row of keypad
* @input - input device pointer for keypad
+ * @regmap - regmap handle
* @key_sense_irq - key press/release irq number
* @key_stuck_irq - key stuck notification irq number
* @keycodes - array to hold the key codes
@@ -96,8 +97,10 @@
* @ctrl_reg - control register value
*/
struct pmic8xxx_kp {
- const struct pm8xxx_keypad_platform_data *pdata;
+ unsigned int num_rows;
+ unsigned int num_cols;
struct input_dev *input;
+ struct regmap *regmap;
int key_sense_irq;
int key_stuck_irq;
@@ -110,40 +113,13 @@ struct pmic8xxx_kp {
u8 ctrl_reg;
};
-static int pmic8xxx_kp_write_u8(struct pmic8xxx_kp *kp,
- u8 data, u16 reg)
-{
- int rc;
-
- rc = pm8xxx_writeb(kp->dev->parent, reg, data);
- return rc;
-}
-
-static int pmic8xxx_kp_read(struct pmic8xxx_kp *kp,
- u8 *data, u16 reg, unsigned num_bytes)
-{
- int rc;
-
- rc = pm8xxx_read_buf(kp->dev->parent, reg, data, num_bytes);
- return rc;
-}
-
-static int pmic8xxx_kp_read_u8(struct pmic8xxx_kp *kp,
- u8 *data, u16 reg)
-{
- int rc;
-
- rc = pmic8xxx_kp_read(kp, data, reg, 1);
- return rc;
-}
-
static u8 pmic8xxx_col_state(struct pmic8xxx_kp *kp, u8 col)
{
/* all keys pressed on that particular row? */
if (col == 0x00)
- return 1 << kp->pdata->num_cols;
+ return 1 << kp->num_cols;
else
- return col & ((1 << kp->pdata->num_cols) - 1);
+ return col & ((1 << kp->num_cols) - 1);
}
/*
@@ -161,9 +137,9 @@ static u8 pmic8xxx_col_state(struct pmic8xxx_kp *kp, u8 col)
static int pmic8xxx_chk_sync_read(struct pmic8xxx_kp *kp)
{
int rc;
- u8 scan_val;
+ unsigned int scan_val;
- rc = pmic8xxx_kp_read_u8(kp, &scan_val, KEYP_SCAN);
+ rc = regmap_read(kp->regmap, KEYP_SCAN, &scan_val);
if (rc < 0) {
dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc);
return rc;
@@ -171,7 +147,7 @@ static int pmic8xxx_chk_sync_read(struct pmic8xxx_kp *kp)
scan_val |= 0x1;
- rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN);
+ rc = regmap_write(kp->regmap, KEYP_SCAN, scan_val);
if (rc < 0) {
dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc);
return rc;
@@ -187,31 +163,29 @@ static int pmic8xxx_kp_read_data(struct pmic8xxx_kp *kp, u16 *state,
u16 data_reg, int read_rows)
{
int rc, row;
- u8 new_data[PM8XXX_MAX_ROWS];
-
- rc = pmic8xxx_kp_read(kp, new_data, data_reg, read_rows);
- if (rc)
- return rc;
+ unsigned int val;
- for (row = 0; row < kp->pdata->num_rows; row++) {
- dev_dbg(kp->dev, "new_data[%d] = %d\n", row,
- new_data[row]);
- state[row] = pmic8xxx_col_state(kp, new_data[row]);
+ for (row = 0; row < read_rows; row++) {
+ rc = regmap_read(kp->regmap, data_reg, &val);
+ if (rc)
+ return rc;
+ dev_dbg(kp->dev, "%d = %d\n", row, val);
+ state[row] = pmic8xxx_col_state(kp, val);
}
- return rc;
+ return 0;
}
static int pmic8xxx_kp_read_matrix(struct pmic8xxx_kp *kp, u16 *new_state,
u16 *old_state)
{
int rc, read_rows;
- u8 scan_val;
+ unsigned int scan_val;
- if (kp->pdata->num_rows < PM8XXX_MIN_ROWS)
+ if (kp->num_rows < PM8XXX_MIN_ROWS)
read_rows = PM8XXX_MIN_ROWS;
else
- read_rows = kp->pdata->num_rows;
+ read_rows = kp->num_rows;
pmic8xxx_chk_sync_read(kp);
@@ -236,14 +210,14 @@ static int pmic8xxx_kp_read_matrix(struct pmic8xxx_kp *kp, u16 *new_state,
/* 4 * 32KHz clocks */
udelay((4 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
- rc = pmic8xxx_kp_read_u8(kp, &scan_val, KEYP_SCAN);
+ rc = regmap_read(kp->regmap, KEYP_SCAN, &scan_val);
if (rc < 0) {
dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc);
return rc;
}
scan_val &= 0xFE;
- rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN);
+ rc = regmap_write(kp->regmap, KEYP_SCAN, scan_val);
if (rc < 0)
dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc);
@@ -255,13 +229,13 @@ static void __pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, u16 *new_state,
{
int row, col, code;
- for (row = 0; row < kp->pdata->num_rows; row++) {
+ for (row = 0; row < kp->num_rows; row++) {
int bits_changed = new_state[row] ^ old_state[row];
if (!bits_changed)
continue;
- for (col = 0; col < kp->pdata->num_cols; col++) {
+ for (col = 0; col < kp->num_cols; col++) {
if (!(bits_changed & (1 << col)))
continue;
@@ -287,9 +261,9 @@ static bool pmic8xxx_detect_ghost_keys(struct pmic8xxx_kp *kp, u16 *new_state)
u16 check, row_state;
check = 0;
- for (row = 0; row < kp->pdata->num_rows; row++) {
+ for (row = 0; row < kp->num_rows; row++) {
row_state = (~new_state[row]) &
- ((1 << kp->pdata->num_cols) - 1);
+ ((1 << kp->num_cols) - 1);
if (hweight16(row_state) > 1) {
if (found_first == -1)
@@ -379,10 +353,10 @@ static irqreturn_t pmic8xxx_kp_stuck_irq(int irq, void *data)
static irqreturn_t pmic8xxx_kp_irq(int irq, void *data)
{
struct pmic8xxx_kp *kp = data;
- u8 ctrl_val, events;
+ unsigned int ctrl_val, events;
int rc;
- rc = pmic8xxx_kp_read(kp, &ctrl_val, KEYP_CTRL, 1);
+ rc = regmap_read(kp->regmap, KEYP_CTRL, &ctrl_val);
if (rc < 0) {
dev_err(kp->dev, "failed to read keyp_ctrl register\n");
return IRQ_HANDLED;
@@ -397,8 +371,13 @@ static irqreturn_t pmic8xxx_kp_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
+static int pmic8xxx_kpd_init(struct pmic8xxx_kp *kp,
+ struct platform_device *pdev)
{
+ const struct device_node *of_node = pdev->dev.of_node;
+ unsigned int scan_delay_ms;
+ unsigned int row_hold_ns;
+ unsigned int debounce_ms;
int bits, rc, cycles;
u8 scan_val = 0, ctrl_val = 0;
static const u8 row_bits[] = {
@@ -406,40 +385,69 @@ static int pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
};
/* Find column bits */
- if (kp->pdata->num_cols < KEYP_CTRL_SCAN_COLS_MIN)
+ if (kp->num_cols < KEYP_CTRL_SCAN_COLS_MIN)
bits = 0;
else
- bits = kp->pdata->num_cols - KEYP_CTRL_SCAN_COLS_MIN;
+ bits = kp->num_cols - KEYP_CTRL_SCAN_COLS_MIN;
ctrl_val = (bits & KEYP_CTRL_SCAN_COLS_BITS) <<
KEYP_CTRL_SCAN_COLS_SHIFT;
/* Find row bits */
- if (kp->pdata->num_rows < KEYP_CTRL_SCAN_ROWS_MIN)
+ if (kp->num_rows < KEYP_CTRL_SCAN_ROWS_MIN)
bits = 0;
else
- bits = row_bits[kp->pdata->num_rows - KEYP_CTRL_SCAN_ROWS_MIN];
+ bits = row_bits[kp->num_rows - KEYP_CTRL_SCAN_ROWS_MIN];
ctrl_val |= (bits << KEYP_CTRL_SCAN_ROWS_SHIFT);
- rc = pmic8xxx_kp_write_u8(kp, ctrl_val, KEYP_CTRL);
+ rc = regmap_write(kp->regmap, KEYP_CTRL, ctrl_val);
if (rc < 0) {
dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc);
return rc;
}
- bits = (kp->pdata->debounce_ms / 5) - 1;
+ if (of_property_read_u32(of_node, "scan-delay", &scan_delay_ms))
+ scan_delay_ms = MIN_SCAN_DELAY;
+
+ if (scan_delay_ms > MAX_SCAN_DELAY || scan_delay_ms < MIN_SCAN_DELAY ||
+ !is_power_of_2(scan_delay_ms)) {
+ dev_err(&pdev->dev, "invalid keypad scan time supplied\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(of_node, "row-hold", &row_hold_ns))
+ row_hold_ns = MIN_ROW_HOLD_DELAY;
+
+ if (row_hold_ns > MAX_ROW_HOLD_DELAY ||
+ row_hold_ns < MIN_ROW_HOLD_DELAY ||
+ ((row_hold_ns % MIN_ROW_HOLD_DELAY) != 0)) {
+ dev_err(&pdev->dev, "invalid keypad row hold time supplied\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(of_node, "debounce", &debounce_ms))
+ debounce_ms = MIN_DEBOUNCE_TIME;
+
+ if (((debounce_ms % 5) != 0) ||
+ debounce_ms > MAX_DEBOUNCE_TIME ||
+ debounce_ms < MIN_DEBOUNCE_TIME) {
+ dev_err(&pdev->dev, "invalid debounce time supplied\n");
+ return -EINVAL;
+ }
+
+ bits = (debounce_ms / 5) - 1;
scan_val |= (bits << KEYP_SCAN_DBOUNCE_SHIFT);
- bits = fls(kp->pdata->scan_delay_ms) - 1;
+ bits = fls(scan_delay_ms) - 1;
scan_val |= (bits << KEYP_SCAN_PAUSE_SHIFT);
/* Row hold time is a multiple of 32KHz cycles. */
- cycles = (kp->pdata->row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC;
+ cycles = (row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC;
scan_val |= (cycles << KEYP_SCAN_ROW_HOLD_SHIFT);
- rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN);
+ rc = regmap_write(kp->regmap, KEYP_SCAN, scan_val);
if (rc)
dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc);
@@ -447,34 +455,13 @@ static int pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
}
-static int pmic8xxx_kp_config_gpio(int gpio_start, int num_gpios,
- struct pmic8xxx_kp *kp, struct pm_gpio *gpio_config)
-{
- int rc, i;
-
- if (gpio_start < 0 || num_gpios < 0)
- return -EINVAL;
-
- for (i = 0; i < num_gpios; i++) {
- rc = pm8xxx_gpio_config(gpio_start + i, gpio_config);
- if (rc) {
- dev_err(kp->dev, "%s: FAIL pm8xxx_gpio_config():"
- "for PM GPIO [%d] rc=%d.\n",
- __func__, gpio_start + i, rc);
- return rc;
- }
- }
-
- return 0;
-}
-
static int pmic8xxx_kp_enable(struct pmic8xxx_kp *kp)
{
int rc;
kp->ctrl_reg |= KEYP_CTRL_KEYP_EN;
- rc = pmic8xxx_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL);
+ rc = regmap_write(kp->regmap, KEYP_CTRL, kp->ctrl_reg);
if (rc < 0)
dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc);
@@ -487,7 +474,7 @@ static int pmic8xxx_kp_disable(struct pmic8xxx_kp *kp)
kp->ctrl_reg &= ~KEYP_CTRL_KEYP_EN;
- rc = pmic8xxx_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL);
+ rc = regmap_write(kp->regmap, KEYP_CTRL, kp->ctrl_reg);
if (rc < 0)
return rc;
@@ -520,106 +507,62 @@ static void pmic8xxx_kp_close(struct input_dev *dev)
*/
static int pmic8xxx_kp_probe(struct platform_device *pdev)
{
- const struct pm8xxx_keypad_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
- const struct matrix_keymap_data *keymap_data;
+ unsigned int rows, cols;
+ bool repeat;
+ bool wakeup;
struct pmic8xxx_kp *kp;
int rc;
- u8 ctrl_val;
-
- struct pm_gpio kypd_drv = {
- .direction = PM_GPIO_DIR_OUT,
- .output_buffer = PM_GPIO_OUT_BUF_OPEN_DRAIN,
- .output_value = 0,
- .pull = PM_GPIO_PULL_NO,
- .vin_sel = PM_GPIO_VIN_S3,
- .out_strength = PM_GPIO_STRENGTH_LOW,
- .function = PM_GPIO_FUNC_1,
- .inv_int_pol = 1,
- };
-
- struct pm_gpio kypd_sns = {
- .direction = PM_GPIO_DIR_IN,
- .pull = PM_GPIO_PULL_UP_31P5,
- .vin_sel = PM_GPIO_VIN_S3,
- .out_strength = PM_GPIO_STRENGTH_NO,
- .function = PM_GPIO_FUNC_NORMAL,
- .inv_int_pol = 1,
- };
+ unsigned int ctrl_val;
+ rc = matrix_keypad_parse_of_params(&pdev->dev, &rows, &cols);
+ if (rc)
+ return rc;
- if (!pdata || !pdata->num_cols || !pdata->num_rows ||
- pdata->num_cols > PM8XXX_MAX_COLS ||
- pdata->num_rows > PM8XXX_MAX_ROWS ||
- pdata->num_cols < PM8XXX_MIN_COLS) {
+ if (cols > PM8XXX_MAX_COLS || rows > PM8XXX_MAX_ROWS ||
+ cols < PM8XXX_MIN_COLS) {
dev_err(&pdev->dev, "invalid platform data\n");
return -EINVAL;
}
- if (!pdata->scan_delay_ms ||
- pdata->scan_delay_ms > MAX_SCAN_DELAY ||
- pdata->scan_delay_ms < MIN_SCAN_DELAY ||
- !is_power_of_2(pdata->scan_delay_ms)) {
- dev_err(&pdev->dev, "invalid keypad scan time supplied\n");
- return -EINVAL;
- }
-
- if (!pdata->row_hold_ns ||
- pdata->row_hold_ns > MAX_ROW_HOLD_DELAY ||
- pdata->row_hold_ns < MIN_ROW_HOLD_DELAY ||
- ((pdata->row_hold_ns % MIN_ROW_HOLD_DELAY) != 0)) {
- dev_err(&pdev->dev, "invalid keypad row hold time supplied\n");
- return -EINVAL;
- }
-
- if (!pdata->debounce_ms ||
- ((pdata->debounce_ms % 5) != 0) ||
- pdata->debounce_ms > MAX_DEBOUNCE_TIME ||
- pdata->debounce_ms < MIN_DEBOUNCE_TIME) {
- dev_err(&pdev->dev, "invalid debounce time supplied\n");
- return -EINVAL;
- }
-
- keymap_data = pdata->keymap_data;
- if (!keymap_data) {
- dev_err(&pdev->dev, "no keymap data supplied\n");
- return -EINVAL;
- }
+ repeat = !of_property_read_bool(pdev->dev.of_node,
+ "linux,input-no-autorepeat");
+ wakeup = of_property_read_bool(pdev->dev.of_node,
+ "linux,keypad-wakeup");
- kp = kzalloc(sizeof(*kp), GFP_KERNEL);
+ kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
if (!kp)
return -ENOMEM;
+ kp->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!kp->regmap)
+ return -ENODEV;
+
platform_set_drvdata(pdev, kp);
- kp->pdata = pdata;
+ kp->num_rows = rows;
+ kp->num_cols = cols;
kp->dev = &pdev->dev;
- kp->input = input_allocate_device();
+ kp->input = devm_input_allocate_device(&pdev->dev);
if (!kp->input) {
dev_err(&pdev->dev, "unable to allocate input device\n");
- rc = -ENOMEM;
- goto err_alloc_device;
+ return -ENOMEM;
}
kp->key_sense_irq = platform_get_irq(pdev, 0);
if (kp->key_sense_irq < 0) {
dev_err(&pdev->dev, "unable to get keypad sense irq\n");
- rc = -ENXIO;
- goto err_get_irq;
+ return kp->key_sense_irq;
}
kp->key_stuck_irq = platform_get_irq(pdev, 1);
if (kp->key_stuck_irq < 0) {
dev_err(&pdev->dev, "unable to get keypad stuck irq\n");
- rc = -ENXIO;
- goto err_get_irq;
+ return kp->key_stuck_irq;
}
- kp->input->name = pdata->input_name ? : "PMIC8XXX keypad";
- kp->input->phys = pdata->input_phys_device ? : "pmic8xxx_keypad/input0";
-
- kp->input->dev.parent = &pdev->dev;
+ kp->input->name = "PMIC8XXX keypad";
+ kp->input->phys = "pmic8xxx_keypad/input0";
kp->input->id.bustype = BUS_I2C;
kp->input->id.version = 0x0001;
@@ -629,15 +572,15 @@ static int pmic8xxx_kp_probe(struct platform_device *pdev)
kp->input->open = pmic8xxx_kp_open;
kp->input->close = pmic8xxx_kp_close;
- rc = matrix_keypad_build_keymap(keymap_data, NULL,
+ rc = matrix_keypad_build_keymap(NULL, NULL,
PM8XXX_MAX_ROWS, PM8XXX_MAX_COLS,
kp->keycodes, kp->input);
if (rc) {
dev_err(&pdev->dev, "failed to build keymap\n");
- goto err_get_irq;
+ return rc;
}
- if (pdata->rep)
+ if (repeat)
__set_bit(EV_REP, kp->input->evbit);
input_set_capability(kp->input, EV_MSC, MSC_SCAN);
@@ -647,44 +590,32 @@ static int pmic8xxx_kp_probe(struct platform_device *pdev)
memset(kp->keystate, 0xff, sizeof(kp->keystate));
memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate));
- rc = pmic8xxx_kpd_init(kp);
+ rc = pmic8xxx_kpd_init(kp, pdev);
if (rc < 0) {
dev_err(&pdev->dev, "unable to initialize keypad controller\n");
- goto err_get_irq;
- }
-
- rc = pmic8xxx_kp_config_gpio(pdata->cols_gpio_start,
- pdata->num_cols, kp, &kypd_sns);
- if (rc < 0) {
- dev_err(&pdev->dev, "unable to configure keypad sense lines\n");
- goto err_gpio_config;
- }
-
- rc = pmic8xxx_kp_config_gpio(pdata->rows_gpio_start,
- pdata->num_rows, kp, &kypd_drv);
- if (rc < 0) {
- dev_err(&pdev->dev, "unable to configure keypad drive lines\n");
- goto err_gpio_config;
+ return rc;
}
- rc = request_any_context_irq(kp->key_sense_irq, pmic8xxx_kp_irq,
- IRQF_TRIGGER_RISING, "pmic-keypad", kp);
+ rc = devm_request_any_context_irq(&pdev->dev, kp->key_sense_irq,
+ pmic8xxx_kp_irq, IRQF_TRIGGER_RISING, "pmic-keypad",
+ kp);
if (rc < 0) {
dev_err(&pdev->dev, "failed to request keypad sense irq\n");
- goto err_get_irq;
+ return rc;
}
- rc = request_any_context_irq(kp->key_stuck_irq, pmic8xxx_kp_stuck_irq,
- IRQF_TRIGGER_RISING, "pmic-keypad-stuck", kp);
+ rc = devm_request_any_context_irq(&pdev->dev, kp->key_stuck_irq,
+ pmic8xxx_kp_stuck_irq, IRQF_TRIGGER_RISING,
+ "pmic-keypad-stuck", kp);
if (rc < 0) {
dev_err(&pdev->dev, "failed to request keypad stuck irq\n");
- goto err_req_stuck_irq;
+ return rc;
}
- rc = pmic8xxx_kp_read_u8(kp, &ctrl_val, KEYP_CTRL);
+ rc = regmap_read(kp->regmap, KEYP_CTRL, &ctrl_val);
if (rc < 0) {
dev_err(&pdev->dev, "failed to read KEYP_CTRL register\n");
- goto err_pmic_reg_read;
+ return rc;
}
kp->ctrl_reg = ctrl_val;
@@ -692,34 +623,10 @@ static int pmic8xxx_kp_probe(struct platform_device *pdev)
rc = input_register_device(kp->input);
if (rc < 0) {
dev_err(&pdev->dev, "unable to register keypad input device\n");
- goto err_pmic_reg_read;
+ return rc;
}
- device_init_wakeup(&pdev->dev, pdata->wakeup);
-
- return 0;
-
-err_pmic_reg_read:
- free_irq(kp->key_stuck_irq, kp);
-err_req_stuck_irq:
- free_irq(kp->key_sense_irq, kp);
-err_gpio_config:
-err_get_irq:
- input_free_device(kp->input);
-err_alloc_device:
- kfree(kp);
- return rc;
-}
-
-static int pmic8xxx_kp_remove(struct platform_device *pdev)
-{
- struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
-
- device_init_wakeup(&pdev->dev, 0);
- free_irq(kp->key_stuck_irq, kp);
- free_irq(kp->key_sense_irq, kp);
- input_unregister_device(kp->input);
- kfree(kp);
+ device_init_wakeup(&pdev->dev, wakeup);
return 0;
}
@@ -769,13 +676,20 @@ static int pmic8xxx_kp_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pm8xxx_kp_pm_ops,
pmic8xxx_kp_suspend, pmic8xxx_kp_resume);
+static const struct of_device_id pm8xxx_match_table[] = {
+ { .compatible = "qcom,pm8058-keypad" },
+ { .compatible = "qcom,pm8921-keypad" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_match_table);
+
static struct platform_driver pmic8xxx_kp_driver = {
.probe = pmic8xxx_kp_probe,
- .remove = pmic8xxx_kp_remove,
.driver = {
- .name = PM8XXX_KEYPAD_DEV_NAME,
+ .name = "pm8xxx-keypad",
.owner = THIS_MODULE,
.pm = &pm8xxx_kp_pm_ops,
+ .of_match_table = pm8xxx_match_table,
},
};
module_platform_driver(pmic8xxx_kp_driver);
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
deleted file mode 100644
index 086511c2121b..000000000000
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Texas Instruments TNETV107X Keypad Driver
- *
- * Copyright (C) 2010 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/input.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/input/matrix_keypad.h>
-#include <linux/module.h>
-
-#define BITS(x) (BIT(x) - 1)
-
-#define KEYPAD_ROWS 9
-#define KEYPAD_COLS 9
-
-#define DEBOUNCE_MIN 0x400ul
-#define DEBOUNCE_MAX 0x3ffffffful
-
-struct keypad_regs {
- u32 rev;
- u32 mode;
- u32 mask;
- u32 pol;
- u32 dclock;
- u32 rclock;
- u32 stable_cnt;
- u32 in_en;
- u32 out;
- u32 out_en;
- u32 in;
- u32 lock;
- u32 pres[3];
-};
-
-#define keypad_read(kp, reg) __raw_readl(&(kp)->regs->reg)
-#define keypad_write(kp, reg, val) __raw_writel(val, &(kp)->regs->reg)
-
-struct keypad_data {
- struct input_dev *input_dev;
- struct resource *res;
- struct keypad_regs __iomem *regs;
- struct clk *clk;
- struct device *dev;
- spinlock_t lock;
- int irq_press;
- int irq_release;
- int rows, cols, row_shift;
- int debounce_ms, active_low;
- u32 prev_keys[3];
- unsigned short keycodes[];
-};
-
-static irqreturn_t keypad_irq(int irq, void *data)
-{
- struct keypad_data *kp = data;
- int i, bit, val, row, col, code;
- unsigned long flags;
- u32 curr_keys[3];
- u32 change;
-
- spin_lock_irqsave(&kp->lock, flags);
-
- memset(curr_keys, 0, sizeof(curr_keys));
- if (irq == kp->irq_press)
- for (i = 0; i < 3; i++)
- curr_keys[i] = keypad_read(kp, pres[i]);
-
- for (i = 0; i < 3; i++) {
- change = curr_keys[i] ^ kp->prev_keys[i];
-
- while (change) {
- bit = fls(change) - 1;
- change ^= BIT(bit);
- val = curr_keys[i] & BIT(bit);
- bit += i * 32;
- row = bit / KEYPAD_COLS;
- col = bit % KEYPAD_COLS;
-
- code = MATRIX_SCAN_CODE(row, col, kp->row_shift);
- input_event(kp->input_dev, EV_MSC, MSC_SCAN, code);
- input_report_key(kp->input_dev, kp->keycodes[code],
- val);
- }
- }
- input_sync(kp->input_dev);
- memcpy(kp->prev_keys, curr_keys, sizeof(curr_keys));
-
- if (irq == kp->irq_press)
- keypad_write(kp, lock, 0); /* Allow hardware updates */
-
- spin_unlock_irqrestore(&kp->lock, flags);
-
- return IRQ_HANDLED;
-}
-
-static int keypad_start(struct input_dev *dev)
-{
- struct keypad_data *kp = input_get_drvdata(dev);
- unsigned long mask, debounce, clk_rate_khz;
- unsigned long flags;
-
- clk_enable(kp->clk);
- clk_rate_khz = clk_get_rate(kp->clk) / 1000;
-
- spin_lock_irqsave(&kp->lock, flags);
-
- /* Initialize device registers */
- keypad_write(kp, mode, 0);
-
- mask = BITS(kp->rows) << KEYPAD_COLS;
- mask |= BITS(kp->cols);
- keypad_write(kp, mask, ~mask);
-
- keypad_write(kp, pol, kp->active_low ? 0 : 0x3ffff);
- keypad_write(kp, stable_cnt, 3);
-
- debounce = kp->debounce_ms * clk_rate_khz;
- debounce = clamp(debounce, DEBOUNCE_MIN, DEBOUNCE_MAX);
- keypad_write(kp, dclock, debounce);
- keypad_write(kp, rclock, 4 * debounce);
-
- keypad_write(kp, in_en, 1);
-
- spin_unlock_irqrestore(&kp->lock, flags);
-
- return 0;
-}
-
-static void keypad_stop(struct input_dev *dev)
-{
- struct keypad_data *kp = input_get_drvdata(dev);
-
- synchronize_irq(kp->irq_press);
- synchronize_irq(kp->irq_release);
- clk_disable(kp->clk);
-}
-
-static int keypad_probe(struct platform_device *pdev)
-{
- const struct matrix_keypad_platform_data *pdata;
- const struct matrix_keymap_data *keymap_data;
- struct device *dev = &pdev->dev;
- struct keypad_data *kp;
- int error = 0, sz, row_shift;
- u32 rev = 0;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(dev, "cannot find device data\n");
- return -EINVAL;
- }
-
- keymap_data = pdata->keymap_data;
- if (!keymap_data) {
- dev_err(dev, "cannot find keymap data\n");
- return -EINVAL;
- }
-
- row_shift = get_count_order(pdata->num_col_gpios);
- sz = offsetof(struct keypad_data, keycodes);
- sz += (pdata->num_row_gpios << row_shift) * sizeof(kp->keycodes[0]);
- kp = kzalloc(sz, GFP_KERNEL);
- if (!kp) {
- dev_err(dev, "cannot allocate device info\n");
- return -ENOMEM;
- }
-
- kp->dev = dev;
- kp->rows = pdata->num_row_gpios;
- kp->cols = pdata->num_col_gpios;
- kp->row_shift = row_shift;
- platform_set_drvdata(pdev, kp);
- spin_lock_init(&kp->lock);
-
- kp->irq_press = platform_get_irq_byname(pdev, "press");
- kp->irq_release = platform_get_irq_byname(pdev, "release");
- if (kp->irq_press < 0 || kp->irq_release < 0) {
- dev_err(dev, "cannot determine device interrupts\n");
- error = -ENODEV;
- goto error_res;
- }
-
- kp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!kp->res) {
- dev_err(dev, "cannot determine register area\n");
- error = -ENODEV;
- goto error_res;
- }
-
- if (!request_mem_region(kp->res->start, resource_size(kp->res),
- pdev->name)) {
- dev_err(dev, "cannot claim register memory\n");
- kp->res = NULL;
- error = -EINVAL;
- goto error_res;
- }
-
- kp->regs = ioremap(kp->res->start, resource_size(kp->res));
- if (!kp->regs) {
- dev_err(dev, "cannot map register memory\n");
- error = -ENOMEM;
- goto error_map;
- }
-
- kp->clk = clk_get(dev, NULL);
- if (IS_ERR(kp->clk)) {
- dev_err(dev, "cannot claim device clock\n");
- error = PTR_ERR(kp->clk);
- goto error_clk;
- }
-
- error = request_threaded_irq(kp->irq_press, NULL, keypad_irq,
- IRQF_ONESHOT, dev_name(dev), kp);
- if (error < 0) {
- dev_err(kp->dev, "Could not allocate keypad press key irq\n");
- goto error_irq_press;
- }
-
- error = request_threaded_irq(kp->irq_release, NULL, keypad_irq,
- IRQF_ONESHOT, dev_name(dev), kp);
- if (error < 0) {
- dev_err(kp->dev, "Could not allocate keypad release key irq\n");
- goto error_irq_release;
- }
-
- kp->input_dev = input_allocate_device();
- if (!kp->input_dev) {
- dev_err(dev, "cannot allocate input device\n");
- error = -ENOMEM;
- goto error_input;
- }
-
- kp->input_dev->name = pdev->name;
- kp->input_dev->dev.parent = &pdev->dev;
- kp->input_dev->open = keypad_start;
- kp->input_dev->close = keypad_stop;
-
- clk_enable(kp->clk);
- rev = keypad_read(kp, rev);
- kp->input_dev->id.bustype = BUS_HOST;
- kp->input_dev->id.product = ((rev >> 8) & 0x07);
- kp->input_dev->id.version = ((rev >> 16) & 0xfff);
- clk_disable(kp->clk);
-
- error = matrix_keypad_build_keymap(keymap_data, NULL,
- kp->rows, kp->cols,
- kp->keycodes, kp->input_dev);
- if (error) {
- dev_err(dev, "Failed to build keymap\n");
- goto error_reg;
- }
-
- if (!pdata->no_autorepeat)
- kp->input_dev->evbit[0] |= BIT_MASK(EV_REP);
- input_set_capability(kp->input_dev, EV_MSC, MSC_SCAN);
-
- input_set_drvdata(kp->input_dev, kp);
-
- error = input_register_device(kp->input_dev);
- if (error < 0) {
- dev_err(dev, "Could not register input device\n");
- goto error_reg;
- }
-
- return 0;
-
-
-error_reg:
- input_free_device(kp->input_dev);
-error_input:
- free_irq(kp->irq_release, kp);
-error_irq_release:
- free_irq(kp->irq_press, kp);
-error_irq_press:
- clk_put(kp->clk);
-error_clk:
- iounmap(kp->regs);
-error_map:
- release_mem_region(kp->res->start, resource_size(kp->res));
-error_res:
- kfree(kp);
- return error;
-}
-
-static int keypad_remove(struct platform_device *pdev)
-{
- struct keypad_data *kp = platform_get_drvdata(pdev);
-
- free_irq(kp->irq_press, kp);
- free_irq(kp->irq_release, kp);
- input_unregister_device(kp->input_dev);
- clk_put(kp->clk);
- iounmap(kp->regs);
- release_mem_region(kp->res->start, resource_size(kp->res));
- kfree(kp);
-
- return 0;
-}
-
-static struct platform_driver keypad_driver = {
- .probe = keypad_probe,
- .remove = keypad_remove,
- .driver.name = "tnetv107x-keypad",
- .driver.owner = THIS_MODULE,
-};
-module_platform_driver(keypad_driver);
-
-MODULE_AUTHOR("Cyril Chemparathy");
-MODULE_DESCRIPTION("TNETV107X Keypad Driver");
-MODULE_ALIAS("platform:tnetv107x-keypad");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7904ab05527a..5928ea71dd69 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -156,7 +156,7 @@ config INPUT_MAX8925_ONKEY
config INPUT_MAX8997_HAPTIC
tristate "MAXIM MAX8997 haptic controller support"
- depends on PWM && HAVE_PWM && MFD_MAX8997
+ depends on PWM && MFD_MAX8997
select INPUT_FF_MEMLESS
help
This option enables device driver support for the haptic controller
@@ -269,7 +269,7 @@ config INPUT_COBALT_BTNS
config INPUT_WISTRON_BTNS
tristate "x86 Wistron laptop button interface"
- depends on X86 && !X86_64
+ depends on X86_32
select INPUT_POLLDEV
select INPUT_SPARSEKMAP
select NEW_LEDS
@@ -470,7 +470,7 @@ config INPUT_PCF8574
config INPUT_PWM_BEEPER
tristate "PWM beeper support"
- depends on PWM && HAVE_PWM
+ depends on PWM
help
Say Y here to get support for PWM based beeper devices.
@@ -666,4 +666,14 @@ config INPUT_IDEAPAD_SLIDEBAR
To compile this driver as a module, choose M here: the
module will be called ideapad_slidebar.
+config INPUT_SOC_BUTTON_ARRAY
+ tristate "Windows-compatible SoC Button Array"
+ depends on KEYBOARD_GPIO
+ help
+ Say Y here if you have a SoC-based tablet that originally
+ runs Windows 8.
+
+ To compile this driver as a module, choose M here: the
+ module will be called soc_button_array.
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index cda71fc52fb3..4955ad322a01 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o
obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o
+obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o
obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o
obj-$(CONFIG_INPUT_TWL4030_VIBRA) += twl4030-vibra.o
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index e204f26b0011..5a736397d9c8 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -51,6 +51,8 @@ struct ims_pcu_backlight {
#define IMS_PCU_BL_VERSION_LEN (9 + 1)
#define IMS_PCU_BL_RESET_REASON_LEN (2 + 1)
+#define IMS_PCU_PCU_B_DEVICE_ID 5
+
#define IMS_PCU_BUF_SIZE 128
struct ims_pcu {
@@ -68,6 +70,9 @@ struct ims_pcu {
char bl_version[IMS_PCU_BL_VERSION_LEN];
char reset_reason[IMS_PCU_BL_RESET_REASON_LEN];
int update_firmware_status;
+ u8 device_id;
+
+ u8 ofn_reg_addr;
struct usb_interface *ctrl_intf;
@@ -371,6 +376,8 @@ static void ims_pcu_destroy_gamepad(struct ims_pcu *pcu)
#define IMS_PCU_CMD_GET_DEVICE_ID 0xae
#define IMS_PCU_CMD_SPECIAL_INFO 0xb0
#define IMS_PCU_CMD_BOOTLOADER 0xb1 /* Pass data to bootloader */
+#define IMS_PCU_CMD_OFN_SET_CONFIG 0xb3
+#define IMS_PCU_CMD_OFN_GET_CONFIG 0xb4
/* PCU responses */
#define IMS_PCU_RSP_STATUS 0xc0
@@ -389,6 +396,9 @@ static void ims_pcu_destroy_gamepad(struct ims_pcu *pcu)
#define IMS_PCU_RSP_GET_DEVICE_ID 0xce
#define IMS_PCU_RSP_SPECIAL_INFO 0xd0
#define IMS_PCU_RSP_BOOTLOADER 0xd1 /* Bootloader response */
+#define IMS_PCU_RSP_OFN_SET_CONFIG 0xd2
+#define IMS_PCU_RSP_OFN_GET_CONFIG 0xd3
+
#define IMS_PCU_RSP_EVNT_BUTTONS 0xe0 /* Unsolicited, button state */
#define IMS_PCU_GAMEPAD_MASK 0x0001ff80UL /* Bits 7 through 16 */
@@ -1256,6 +1266,225 @@ static struct attribute_group ims_pcu_attr_group = {
.attrs = ims_pcu_attrs,
};
+/* Support for a separate OFN attribute group */
+
+#define OFN_REG_RESULT_OFFSET 2
+
+static int ims_pcu_read_ofn_config(struct ims_pcu *pcu, u8 addr, u8 *data)
+{
+ int error;
+ s16 result;
+
+ error = ims_pcu_execute_command(pcu, OFN_GET_CONFIG,
+ &addr, sizeof(addr));
+ if (error)
+ return error;
+
+ result = (s16)get_unaligned_le16(pcu->cmd_buf + OFN_REG_RESULT_OFFSET);
+ if (result < 0)
+ return -EIO;
+
+ /* We only need LSB */
+ *data = pcu->cmd_buf[OFN_REG_RESULT_OFFSET];
+ return 0;
+}
+
+static int ims_pcu_write_ofn_config(struct ims_pcu *pcu, u8 addr, u8 data)
+{
+ u8 buffer[] = { addr, data };
+ int error;
+ s16 result;
+
+ error = ims_pcu_execute_command(pcu, OFN_SET_CONFIG,
+ &buffer, sizeof(buffer));
+ if (error)
+ return error;
+
+ result = (s16)get_unaligned_le16(pcu->cmd_buf + OFN_REG_RESULT_OFFSET);
+ if (result < 0)
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t ims_pcu_ofn_reg_data_show(struct device *dev,
+ struct device_attribute *dattr,
+ char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ims_pcu *pcu = usb_get_intfdata(intf);
+ int error;
+ u8 data;
+
+ mutex_lock(&pcu->cmd_mutex);
+ error = ims_pcu_read_ofn_config(pcu, pcu->ofn_reg_addr, &data);
+ mutex_unlock(&pcu->cmd_mutex);
+
+ if (error)
+ return error;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", data);
+}
+
+static ssize_t ims_pcu_ofn_reg_data_store(struct device *dev,
+ struct device_attribute *dattr,
+ const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ims_pcu *pcu = usb_get_intfdata(intf);
+ int error;
+ u8 value;
+
+ error = kstrtou8(buf, 0, &value);
+ if (error)
+ return error;
+
+ mutex_lock(&pcu->cmd_mutex);
+ error = ims_pcu_write_ofn_config(pcu, pcu->ofn_reg_addr, value);
+ mutex_unlock(&pcu->cmd_mutex);
+
+ return error ?: count;
+}
+
+static DEVICE_ATTR(reg_data, S_IRUGO | S_IWUSR,
+ ims_pcu_ofn_reg_data_show, ims_pcu_ofn_reg_data_store);
+
+static ssize_t ims_pcu_ofn_reg_addr_show(struct device *dev,
+ struct device_attribute *dattr,
+ char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ims_pcu *pcu = usb_get_intfdata(intf);
+ int error;
+
+ mutex_lock(&pcu->cmd_mutex);
+ error = scnprintf(buf, PAGE_SIZE, "%x\n", pcu->ofn_reg_addr);
+ mutex_unlock(&pcu->cmd_mutex);
+
+ return error;
+}
+
+static ssize_t ims_pcu_ofn_reg_addr_store(struct device *dev,
+ struct device_attribute *dattr,
+ const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ims_pcu *pcu = usb_get_intfdata(intf);
+ int error;
+ u8 value;
+
+ error = kstrtou8(buf, 0, &value);
+ if (error)
+ return error;
+
+ mutex_lock(&pcu->cmd_mutex);
+ pcu->ofn_reg_addr = value;
+ mutex_unlock(&pcu->cmd_mutex);
+
+ return error ?: count;
+}
+
+static DEVICE_ATTR(reg_addr, S_IRUGO | S_IWUSR,
+ ims_pcu_ofn_reg_addr_show, ims_pcu_ofn_reg_addr_store);
+
+struct ims_pcu_ofn_bit_attribute {
+ struct device_attribute dattr;
+ u8 addr;
+ u8 nr;
+};
+
+static ssize_t ims_pcu_ofn_bit_show(struct device *dev,
+ struct device_attribute *dattr,
+ char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ims_pcu *pcu = usb_get_intfdata(intf);
+ struct ims_pcu_ofn_bit_attribute *attr =
+ container_of(dattr, struct ims_pcu_ofn_bit_attribute, dattr);
+ int error;
+ u8 data;
+
+ mutex_lock(&pcu->cmd_mutex);
+ error = ims_pcu_read_ofn_config(pcu, attr->addr, &data);
+ mutex_unlock(&pcu->cmd_mutex);
+
+ if (error)
+ return error;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!(data & (1 << attr->nr)));
+}
+
+static ssize_t ims_pcu_ofn_bit_store(struct device *dev,
+ struct device_attribute *dattr,
+ const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ims_pcu *pcu = usb_get_intfdata(intf);
+ struct ims_pcu_ofn_bit_attribute *attr =
+ container_of(dattr, struct ims_pcu_ofn_bit_attribute, dattr);
+ int error;
+ int value;
+ u8 data;
+
+ error = kstrtoint(buf, 0, &value);
+ if (error)
+ return error;
+
+ if (value > 1)
+ return -EINVAL;
+
+ mutex_lock(&pcu->cmd_mutex);
+
+ error = ims_pcu_read_ofn_config(pcu, attr->addr, &data);
+ if (!error) {
+ if (value)
+ data |= 1U << attr->nr;
+ else
+ data &= ~(1U << attr->nr);
+
+ error = ims_pcu_write_ofn_config(pcu, attr->addr, data);
+ }
+
+ mutex_unlock(&pcu->cmd_mutex);
+
+ return error ?: count;
+}
+
+#define IMS_PCU_OFN_BIT_ATTR(_field, _addr, _nr) \
+struct ims_pcu_ofn_bit_attribute ims_pcu_ofn_attr_##_field = { \
+ .dattr = __ATTR(_field, S_IWUSR | S_IRUGO, \
+ ims_pcu_ofn_bit_show, ims_pcu_ofn_bit_store), \
+ .addr = _addr, \
+ .nr = _nr, \
+}
+
+static IMS_PCU_OFN_BIT_ATTR(engine_enable, 0x60, 7);
+static IMS_PCU_OFN_BIT_ATTR(speed_enable, 0x60, 6);
+static IMS_PCU_OFN_BIT_ATTR(assert_enable, 0x60, 5);
+static IMS_PCU_OFN_BIT_ATTR(xyquant_enable, 0x60, 4);
+static IMS_PCU_OFN_BIT_ATTR(xyscale_enable, 0x60, 1);
+
+static IMS_PCU_OFN_BIT_ATTR(scale_x2, 0x63, 6);
+static IMS_PCU_OFN_BIT_ATTR(scale_y2, 0x63, 7);
+
+static struct attribute *ims_pcu_ofn_attrs[] = {
+ &dev_attr_reg_data.attr,
+ &dev_attr_reg_addr.attr,
+ &ims_pcu_ofn_attr_engine_enable.dattr.attr,
+ &ims_pcu_ofn_attr_speed_enable.dattr.attr,
+ &ims_pcu_ofn_attr_assert_enable.dattr.attr,
+ &ims_pcu_ofn_attr_xyquant_enable.dattr.attr,
+ &ims_pcu_ofn_attr_xyscale_enable.dattr.attr,
+ &ims_pcu_ofn_attr_scale_x2.dattr.attr,
+ &ims_pcu_ofn_attr_scale_y2.dattr.attr,
+ NULL
+};
+
+static struct attribute_group ims_pcu_ofn_attr_group = {
+ .name = "ofn",
+ .attrs = ims_pcu_ofn_attrs,
+};
+
static void ims_pcu_irq(struct urb *urb)
{
struct ims_pcu *pcu = urb->context;
@@ -1624,7 +1853,6 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
static atomic_t device_no = ATOMIC_INIT(0);
const struct ims_pcu_device_info *info;
- u8 device_id;
int error;
error = ims_pcu_get_device_info(pcu);
@@ -1633,7 +1861,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
return error;
}
- error = ims_pcu_identify_type(pcu, &device_id);
+ error = ims_pcu_identify_type(pcu, &pcu->device_id);
if (error) {
dev_err(pcu->dev,
"Failed to identify device, error: %d\n", error);
@@ -1645,9 +1873,9 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
return 0;
}
- if (device_id >= ARRAY_SIZE(ims_pcu_device_info) ||
- !ims_pcu_device_info[device_id].keymap) {
- dev_err(pcu->dev, "Device ID %d is not valid\n", device_id);
+ if (pcu->device_id >= ARRAY_SIZE(ims_pcu_device_info) ||
+ !ims_pcu_device_info[pcu->device_id].keymap) {
+ dev_err(pcu->dev, "Device ID %d is not valid\n", pcu->device_id);
/* Same as above, punt to userspace */
return 0;
}
@@ -1655,11 +1883,21 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
/* Device appears to be operable, complete initialization */
pcu->device_no = atomic_inc_return(&device_no) - 1;
+ /*
+ * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
+ */
+ if (pcu->device_id != IMS_PCU_PCU_B_DEVICE_ID) {
+ error = sysfs_create_group(&pcu->dev->kobj,
+ &ims_pcu_ofn_attr_group);
+ if (error)
+ return error;
+ }
+
error = ims_pcu_setup_backlight(pcu);
if (error)
return error;
- info = &ims_pcu_device_info[device_id];
+ info = &ims_pcu_device_info[pcu->device_id];
error = ims_pcu_setup_buttons(pcu, info->keymap, info->keymap_len);
if (error)
goto err_destroy_backlight;
@@ -1674,10 +1912,10 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
return 0;
-err_destroy_backlight:
- ims_pcu_destroy_backlight(pcu);
err_destroy_buttons:
ims_pcu_destroy_buttons(pcu);
+err_destroy_backlight:
+ ims_pcu_destroy_backlight(pcu);
return error;
}
@@ -1691,6 +1929,10 @@ static void ims_pcu_destroy_application_mode(struct ims_pcu *pcu)
ims_pcu_destroy_gamepad(pcu);
ims_pcu_destroy_buttons(pcu);
ims_pcu_destroy_backlight(pcu);
+
+ if (pcu->device_id != IMS_PCU_PCU_B_DEVICE_ID)
+ sysfs_remove_group(&pcu->dev->kobj,
+ &ims_pcu_ofn_attr_group);
}
}
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 17ccba88d636..ed8e5e8449d3 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -67,7 +67,7 @@ static int ixp4xx_spkr_event(struct input_dev *dev, unsigned int type, unsigned
}
if (value > 20 && value < 32767)
- count = (IXP4XX_TIMER_FREQ / (value * 4)) - 1;
+ count = (ixp4xx_timer_freq / (value * 4)) - 1;
ixp4xx_spkr_control(pin, count);
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index b88b7cbf93e2..6a915ba31bba 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -142,7 +142,6 @@ static int pm8xxx_vib_play_effect(struct input_dev *dev, void *data,
}
static int pm8xxx_vib_probe(struct platform_device *pdev)
-
{
struct pm8xxx_vib *vib;
struct input_dev *input_dev;
@@ -214,12 +213,20 @@ static int pm8xxx_vib_suspend(struct device *dev)
static SIMPLE_DEV_PM_OPS(pm8xxx_vib_pm_ops, pm8xxx_vib_suspend, NULL);
+static const struct of_device_id pm8xxx_vib_id_table[] = {
+ { .compatible = "qcom,pm8058-vib" },
+ { .compatible = "qcom,pm8921-vib" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_vib_id_table);
+
static struct platform_driver pm8xxx_vib_driver = {
.probe = pm8xxx_vib_probe,
.driver = {
.name = "pm8xxx-vib",
.owner = THIS_MODULE,
.pm = &pm8xxx_vib_pm_ops,
+ .of_match_table = pm8xxx_vib_id_table,
},
};
module_platform_driver(pm8xxx_vib_driver);
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 0e1a05f95858..1cb8fda7a166 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -19,8 +19,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/log2.h>
-
-#include <linux/input/pmic8xxx-pwrkey.h>
+#include <linux/of.h>
#define PON_CNTL_1 0x1C
#define PON_CNTL_PULL_UP BIT(7)
@@ -89,15 +88,15 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
unsigned int pon_cntl;
struct regmap *regmap;
struct pmic8xxx_pwrkey *pwrkey;
- const struct pm8xxx_pwrkey_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ u32 kpd_delay;
+ bool pull_up;
- if (!pdata) {
- dev_err(&pdev->dev, "power key platform data not supplied\n");
- return -EINVAL;
- }
+ if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
+ kpd_delay = 0;
- if (pdata->kpd_trigger_delay_us > 62500) {
+ pull_up = of_property_read_bool(pdev->dev.of_node, "pull-up");
+
+ if (kpd_delay > 62500) {
dev_err(&pdev->dev, "invalid power key trigger delay\n");
return -EINVAL;
}
@@ -125,7 +124,7 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
pwr->name = "pmic8xxx_pwrkey";
pwr->phys = "pmic8xxx_pwrkey/input0";
- delay = (pdata->kpd_trigger_delay_us << 10) / USEC_PER_SEC;
+ delay = (kpd_delay << 10) / USEC_PER_SEC;
delay = 1 + ilog2(delay);
err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
@@ -136,7 +135,7 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
pon_cntl &= ~PON_CNTL_TRIG_DELAY_MASK;
pon_cntl |= (delay & PON_CNTL_TRIG_DELAY_MASK);
- if (pdata->pull_up)
+ if (pull_up)
pon_cntl |= PON_CNTL_PULL_UP;
else
pon_cntl &= ~PON_CNTL_PULL_UP;
@@ -172,7 +171,7 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, pwrkey);
- device_init_wakeup(&pdev->dev, pdata->wakeup);
+ device_init_wakeup(&pdev->dev, 1);
return 0;
}
@@ -184,13 +183,21 @@ static int pmic8xxx_pwrkey_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id pm8xxx_pwr_key_id_table[] = {
+ { .compatible = "qcom,pm8058-pwrkey" },
+ { .compatible = "qcom,pm8921-pwrkey" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_pwr_key_id_table);
+
static struct platform_driver pmic8xxx_pwrkey_driver = {
.probe = pmic8xxx_pwrkey_probe,
.remove = pmic8xxx_pwrkey_remove,
.driver = {
- .name = PM8XXX_PWRKEY_DEV_NAME,
+ .name = "pm8xxx-pwrkey",
.owner = THIS_MODULE,
.pm = &pm8xxx_pwr_key_pm_ops,
+ .of_match_table = pm8xxx_pwr_key_id_table,
},
};
module_platform_driver(pmic8xxx_pwrkey_driver);
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index e8897c36d21b..e4104f9b2e6d 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -1,7 +1,8 @@
/*
* Power key driver for SiRF PrimaII
*
- * Copyright (c) 2013 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2013 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
*
* Licensed under GPLv2 or later.
*/
@@ -13,16 +14,41 @@
#include <linux/input.h>
#include <linux/rtc/sirfsoc_rtciobrg.h>
#include <linux/of.h>
+#include <linux/workqueue.h>
struct sirfsoc_pwrc_drvdata {
u32 pwrc_base;
struct input_dev *input;
+ struct delayed_work work;
};
#define PWRC_ON_KEY_BIT (1 << 0)
#define PWRC_INT_STATUS 0xc
#define PWRC_INT_MASK 0x10
+#define PWRC_PIN_STATUS 0x14
+#define PWRC_KEY_DETECT_UP_TIME 20 /* ms*/
+
+static int sirfsoc_pwrc_is_on_key_down(struct sirfsoc_pwrc_drvdata *pwrcdrv)
+{
+ u32 state = sirfsoc_rtc_iobrg_readl(pwrcdrv->pwrc_base +
+ PWRC_PIN_STATUS);
+ return !(state & PWRC_ON_KEY_BIT); /* ON_KEY is active low */
+}
+
+static void sirfsoc_pwrc_report_event(struct work_struct *work)
+{
+ struct sirfsoc_pwrc_drvdata *pwrcdrv =
+ container_of(work, struct sirfsoc_pwrc_drvdata, work.work);
+
+ if (sirfsoc_pwrc_is_on_key_down(pwrcdrv)) {
+ schedule_delayed_work(&pwrcdrv->work,
+ msecs_to_jiffies(PWRC_KEY_DETECT_UP_TIME));
+ } else {
+ input_event(pwrcdrv->input, EV_KEY, KEY_POWER, 0);
+ input_sync(pwrcdrv->input);
+ }
+}
static irqreturn_t sirfsoc_pwrc_isr(int irq, void *dev_id)
{
@@ -34,21 +60,44 @@ static irqreturn_t sirfsoc_pwrc_isr(int irq, void *dev_id)
sirfsoc_rtc_iobrg_writel(int_status & ~PWRC_ON_KEY_BIT,
pwrcdrv->pwrc_base + PWRC_INT_STATUS);
- /*
- * For a typical Linux system, we report KEY_SUSPEND to trigger apm-power.c
- * to queue a SUSPEND APM event
- */
- input_event(pwrcdrv->input, EV_PWR, KEY_SUSPEND, 1);
+ input_event(pwrcdrv->input, EV_KEY, KEY_POWER, 1);
input_sync(pwrcdrv->input);
-
- /*
- * Todo: report KEY_POWER event for Android platforms, Android PowerManager
- * will handle the suspend and powerdown/hibernation
- */
+ schedule_delayed_work(&pwrcdrv->work,
+ msecs_to_jiffies(PWRC_KEY_DETECT_UP_TIME));
return IRQ_HANDLED;
}
+static void sirfsoc_pwrc_toggle_interrupts(struct sirfsoc_pwrc_drvdata *pwrcdrv,
+ bool enable)
+{
+ u32 int_mask;
+
+ int_mask = sirfsoc_rtc_iobrg_readl(pwrcdrv->pwrc_base + PWRC_INT_MASK);
+ if (enable)
+ int_mask |= PWRC_ON_KEY_BIT;
+ else
+ int_mask &= ~PWRC_ON_KEY_BIT;
+ sirfsoc_rtc_iobrg_writel(int_mask, pwrcdrv->pwrc_base + PWRC_INT_MASK);
+}
+
+static int sirfsoc_pwrc_open(struct input_dev *input)
+{
+ struct sirfsoc_pwrc_drvdata *pwrcdrv = input_get_drvdata(input);
+
+ sirfsoc_pwrc_toggle_interrupts(pwrcdrv, true);
+
+ return 0;
+}
+
+static void sirfsoc_pwrc_close(struct input_dev *input)
+{
+ struct sirfsoc_pwrc_drvdata *pwrcdrv = input_get_drvdata(input);
+
+ sirfsoc_pwrc_toggle_interrupts(pwrcdrv, false);
+ cancel_delayed_work_sync(&pwrcdrv->work);
+}
+
static const struct of_device_id sirfsoc_pwrc_of_match[] = {
{ .compatible = "sirf,prima2-pwrc" },
{},
@@ -70,7 +119,7 @@ static int sirfsoc_pwrc_probe(struct platform_device *pdev)
}
/*
- * we can't use of_iomap because pwrc is not mapped in memory,
+ * We can't use of_iomap because pwrc is not mapped in memory,
* the so-called base address is only offset in rtciobrg
*/
error = of_property_read_u32(np, "reg", &pwrcdrv->pwrc_base);
@@ -86,11 +135,22 @@ static int sirfsoc_pwrc_probe(struct platform_device *pdev)
pwrcdrv->input->name = "sirfsoc pwrckey";
pwrcdrv->input->phys = "pwrc/input0";
- pwrcdrv->input->evbit[0] = BIT_MASK(EV_PWR);
+ pwrcdrv->input->evbit[0] = BIT_MASK(EV_KEY);
+ input_set_capability(pwrcdrv->input, EV_KEY, KEY_POWER);
+
+ INIT_DELAYED_WORK(&pwrcdrv->work, sirfsoc_pwrc_report_event);
+
+ pwrcdrv->input->open = sirfsoc_pwrc_open;
+ pwrcdrv->input->close = sirfsoc_pwrc_close;
+
+ input_set_drvdata(pwrcdrv->input, pwrcdrv);
+
+ /* Make sure the device is quiesced */
+ sirfsoc_pwrc_toggle_interrupts(pwrcdrv, false);
irq = platform_get_irq(pdev, 0);
error = devm_request_irq(&pdev->dev, irq,
- sirfsoc_pwrc_isr, IRQF_SHARED,
+ sirfsoc_pwrc_isr, 0,
"sirfsoc_pwrc_int", pwrcdrv);
if (error) {
dev_err(&pdev->dev, "unable to claim irq %d, error: %d\n",
@@ -98,11 +158,6 @@ static int sirfsoc_pwrc_probe(struct platform_device *pdev)
return error;
}
- sirfsoc_rtc_iobrg_writel(
- sirfsoc_rtc_iobrg_readl(pwrcdrv->pwrc_base + PWRC_INT_MASK) |
- PWRC_ON_KEY_BIT,
- pwrcdrv->pwrc_base + PWRC_INT_MASK);
-
error = input_register_device(pwrcdrv->input);
if (error) {
dev_err(&pdev->dev,
@@ -111,7 +166,7 @@ static int sirfsoc_pwrc_probe(struct platform_device *pdev)
return error;
}
- platform_set_drvdata(pdev, pwrcdrv);
+ dev_set_drvdata(&pdev->dev, pwrcdrv);
device_init_wakeup(&pdev->dev, 1);
return 0;
@@ -125,25 +180,25 @@ static int sirfsoc_pwrc_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int pwrc_resume(struct device *dev)
+static int sirfsoc_pwrc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sirfsoc_pwrc_drvdata *pwrcdrv = platform_get_drvdata(pdev);
+ struct sirfsoc_pwrc_drvdata *pwrcdrv = dev_get_drvdata(dev);
+ struct input_dev *input = pwrcdrv->input;
/*
* Do not mask pwrc interrupt as we want pwrc work as a wakeup source
* if users touch X_ONKEY_B, see arch/arm/mach-prima2/pm.c
*/
- sirfsoc_rtc_iobrg_writel(
- sirfsoc_rtc_iobrg_readl(
- pwrcdrv->pwrc_base + PWRC_INT_MASK) | PWRC_ON_KEY_BIT,
- pwrcdrv->pwrc_base + PWRC_INT_MASK);
+ mutex_lock(&input->mutex);
+ if (input->users)
+ sirfsoc_pwrc_toggle_interrupts(pwrcdrv, true);
+ mutex_unlock(&input->mutex);
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(sirfsoc_pwrc_pm_ops, NULL, pwrc_resume);
+static SIMPLE_DEV_PM_OPS(sirfsoc_pwrc_pm_ops, NULL, sirfsoc_pwrc_resume);
static struct platform_driver sirfsoc_pwrc_driver = {
.probe = sirfsoc_pwrc_probe,
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
new file mode 100644
index 000000000000..08ead2aaede5
--- /dev/null
+++ b/drivers/input/misc/soc_button_array.c
@@ -0,0 +1,218 @@
+/*
+ * Supports for the button array on SoC tablets originally running
+ * Windows 8.
+ *
+ * (C) Copyright 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/pnp.h>
+
+/*
+ * Definition of buttons on the tablet. The ACPI index of each button
+ * is defined in section 2.8.7.2 of "Windows ACPI Design Guide for SoC
+ * Platforms"
+ */
+#define MAX_NBUTTONS 5
+
+struct soc_button_info {
+ const char *name;
+ int acpi_index;
+ unsigned int event_type;
+ unsigned int event_code;
+ bool autorepeat;
+ bool wakeup;
+};
+
+/*
+ * Some of the buttons like volume up/down are auto repeat, while others
+ * are not. To support both, we register two platform devices, and put
+ * buttons into them based on whether the key should be auto repeat.
+ */
+#define BUTTON_TYPES 2
+
+struct soc_button_data {
+ struct platform_device *children[BUTTON_TYPES];
+};
+
+/*
+ * Get the Nth GPIO number from the ACPI object.
+ */
+static int soc_button_lookup_gpio(struct device *dev, int acpi_index)
+{
+ struct gpio_desc *desc;
+ int gpio;
+
+ desc = gpiod_get_index(dev, KBUILD_MODNAME, acpi_index);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ gpio = desc_to_gpio(desc);
+
+ gpiod_put(desc);
+
+ return gpio;
+}
+
+static struct platform_device *
+soc_button_device_create(struct pnp_dev *pdev,
+ const struct soc_button_info *button_info,
+ bool autorepeat)
+{
+ const struct soc_button_info *info;
+ struct platform_device *pd;
+ struct gpio_keys_button *gpio_keys;
+ struct gpio_keys_platform_data *gpio_keys_pdata;
+ int n_buttons = 0;
+ int gpio;
+ int error;
+
+ gpio_keys_pdata = devm_kzalloc(&pdev->dev,
+ sizeof(*gpio_keys_pdata) +
+ sizeof(*gpio_keys) * MAX_NBUTTONS,
+ GFP_KERNEL);
+ gpio_keys = (void *)(gpio_keys_pdata + 1);
+
+ for (info = button_info; info->name; info++) {
+ if (info->autorepeat != autorepeat)
+ continue;
+
+ gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index);
+ if (gpio < 0)
+ continue;
+
+ gpio_keys[n_buttons].type = info->event_type;
+ gpio_keys[n_buttons].code = info->event_code;
+ gpio_keys[n_buttons].gpio = gpio;
+ gpio_keys[n_buttons].active_low = 1;
+ gpio_keys[n_buttons].desc = info->name;
+ gpio_keys[n_buttons].wakeup = info->wakeup;
+ n_buttons++;
+ }
+
+ if (n_buttons == 0) {
+ error = -ENODEV;
+ goto err_free_mem;
+ }
+
+ gpio_keys_pdata->buttons = gpio_keys;
+ gpio_keys_pdata->nbuttons = n_buttons;
+ gpio_keys_pdata->rep = autorepeat;
+
+ pd = platform_device_alloc("gpio-keys", PLATFORM_DEVID_AUTO);
+ if (!pd) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ error = platform_device_add_data(pd, gpio_keys_pdata,
+ sizeof(*gpio_keys_pdata));
+ if (error)
+ goto err_free_pdev;
+
+ error = platform_device_add(pd);
+ if (error)
+ goto err_free_pdev;
+
+ return pd;
+
+err_free_pdev:
+ platform_device_put(pd);
+err_free_mem:
+ devm_kfree(&pdev->dev, gpio_keys_pdata);
+ return ERR_PTR(error);
+}
+
+static void soc_button_remove(struct pnp_dev *pdev)
+{
+ struct soc_button_data *priv = pnp_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < BUTTON_TYPES; i++)
+ if (priv->children[i])
+ platform_device_unregister(priv->children[i]);
+}
+
+static int soc_button_pnp_probe(struct pnp_dev *pdev,
+ const struct pnp_device_id *id)
+{
+ const struct soc_button_info *button_info = (void *)id->driver_data;
+ struct soc_button_data *priv;
+ struct platform_device *pd;
+ int i;
+ int error;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ pnp_set_drvdata(pdev, priv);
+
+ for (i = 0; i < BUTTON_TYPES; i++) {
+ pd = soc_button_device_create(pdev, button_info, i == 0);
+ if (IS_ERR(pd)) {
+ error = PTR_ERR(pd);
+ if (error != -ENODEV) {
+ soc_button_remove(pdev);
+ return error;
+ }
+ }
+
+ priv->children[i] = pd;
+ }
+
+ if (!priv->children[0] && !priv->children[1])
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct soc_button_info soc_button_PNP0C40[] = {
+ { "power", 0, EV_KEY, KEY_POWER, false, true },
+ { "home", 1, EV_KEY, KEY_HOME, false, true },
+ { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
+ { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
+ { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false },
+ { }
+};
+
+static const struct pnp_device_id soc_button_pnp_match[] = {
+ { .id = "PNP0C40", .driver_data = (long)soc_button_PNP0C40 },
+ { .id = "" }
+};
+MODULE_DEVICE_TABLE(pnp, soc_button_pnp_match);
+
+static struct pnp_driver soc_button_pnp_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = soc_button_pnp_match,
+ .probe = soc_button_pnp_probe,
+ .remove = soc_button_remove,
+};
+
+static int __init soc_button_init(void)
+{
+ return pnp_register_driver(&soc_button_pnp_driver);
+}
+
+static void __exit soc_button_exit(void)
+{
+ pnp_unregister_driver(&soc_button_pnp_driver);
+}
+
+module_init(soc_button_init);
+module_exit(soc_button_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 772835938a52..856936247500 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -20,6 +20,8 @@
* Author: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org>
*
* Changes/Revisions:
+ * 0.4 01/09/2014 (Benjamin Tissoires <benjamin.tissoires@redhat.com>)
+ * - add UI_GET_SYSNAME ioctl
* 0.3 09/04/2006 (Anssi Hannula <anssi.hannula@gmail.com>)
* - updated ff support for the changes in kernel interface
* - added MODULE_VERSION
@@ -670,6 +672,31 @@ static int uinput_ff_upload_from_user(const char __user *buffer,
__ret; \
})
+static int uinput_str_to_user(void __user *dest, const char *str,
+ unsigned int maxlen)
+{
+ char __user *p = dest;
+ int len, ret;
+
+ if (!str)
+ return -ENOENT;
+
+ if (maxlen == 0)
+ return -EINVAL;
+
+ len = strlen(str) + 1;
+ if (len > maxlen)
+ len = maxlen;
+
+ ret = copy_to_user(p, str, len);
+ if (ret)
+ return -EFAULT;
+
+ /* force terminating '\0' */
+ ret = put_user(0, p + len - 1);
+ return ret ? -EFAULT : len;
+}
+
static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
unsigned long arg, void __user *p)
{
@@ -679,6 +706,8 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
struct uinput_ff_erase ff_erase;
struct uinput_request *req;
char *phys;
+ const char *name;
+ unsigned int size;
retval = mutex_lock_interruptible(&udev->mutex);
if (retval)
@@ -693,51 +722,51 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
switch (cmd) {
case UI_DEV_CREATE:
retval = uinput_create_device(udev);
- break;
+ goto out;
case UI_DEV_DESTROY:
uinput_destroy_device(udev);
- break;
+ goto out;
case UI_SET_EVBIT:
retval = uinput_set_bit(arg, evbit, EV_MAX);
- break;
+ goto out;
case UI_SET_KEYBIT:
retval = uinput_set_bit(arg, keybit, KEY_MAX);
- break;
+ goto out;
case UI_SET_RELBIT:
retval = uinput_set_bit(arg, relbit, REL_MAX);
- break;
+ goto out;
case UI_SET_ABSBIT:
retval = uinput_set_bit(arg, absbit, ABS_MAX);
- break;
+ goto out;
case UI_SET_MSCBIT:
retval = uinput_set_bit(arg, mscbit, MSC_MAX);
- break;
+ goto out;
case UI_SET_LEDBIT:
retval = uinput_set_bit(arg, ledbit, LED_MAX);
- break;
+ goto out;
case UI_SET_SNDBIT:
retval = uinput_set_bit(arg, sndbit, SND_MAX);
- break;
+ goto out;
case UI_SET_FFBIT:
retval = uinput_set_bit(arg, ffbit, FF_MAX);
- break;
+ goto out;
case UI_SET_SWBIT:
retval = uinput_set_bit(arg, swbit, SW_MAX);
- break;
+ goto out;
case UI_SET_PROPBIT:
retval = uinput_set_bit(arg, propbit, INPUT_PROP_MAX);
- break;
+ goto out;
case UI_SET_PHYS:
if (udev->state == UIST_CREATED) {
@@ -753,18 +782,18 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
kfree(udev->dev->phys);
udev->dev->phys = phys;
- break;
+ goto out;
case UI_BEGIN_FF_UPLOAD:
retval = uinput_ff_upload_from_user(p, &ff_up);
if (retval)
- break;
+ goto out;
req = uinput_request_find(udev, ff_up.request_id);
if (!req || req->code != UI_FF_UPLOAD ||
!req->u.upload.effect) {
retval = -EINVAL;
- break;
+ goto out;
}
ff_up.retval = 0;
@@ -775,65 +804,77 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
memset(&ff_up.old, 0, sizeof(struct ff_effect));
retval = uinput_ff_upload_to_user(p, &ff_up);
- break;
+ goto out;
case UI_BEGIN_FF_ERASE:
if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
retval = -EFAULT;
- break;
+ goto out;
}
req = uinput_request_find(udev, ff_erase.request_id);
if (!req || req->code != UI_FF_ERASE) {
retval = -EINVAL;
- break;
+ goto out;
}
ff_erase.retval = 0;
ff_erase.effect_id = req->u.effect_id;
if (copy_to_user(p, &ff_erase, sizeof(ff_erase))) {
retval = -EFAULT;
- break;
+ goto out;
}
- break;
+ goto out;
case UI_END_FF_UPLOAD:
retval = uinput_ff_upload_from_user(p, &ff_up);
if (retval)
- break;
+ goto out;
req = uinput_request_find(udev, ff_up.request_id);
if (!req || req->code != UI_FF_UPLOAD ||
!req->u.upload.effect) {
retval = -EINVAL;
- break;
+ goto out;
}
req->retval = ff_up.retval;
uinput_request_done(udev, req);
- break;
+ goto out;
case UI_END_FF_ERASE:
if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
retval = -EFAULT;
- break;
+ goto out;
}
req = uinput_request_find(udev, ff_erase.request_id);
if (!req || req->code != UI_FF_ERASE) {
retval = -EINVAL;
- break;
+ goto out;
}
req->retval = ff_erase.retval;
uinput_request_done(udev, req);
- break;
+ goto out;
+ }
- default:
- retval = -EINVAL;
+ size = _IOC_SIZE(cmd);
+
+ /* Now check variable-length commands */
+ switch (cmd & ~IOCSIZE_MASK) {
+ case UI_GET_SYSNAME(0):
+ if (udev->state != UIST_CREATED) {
+ retval = -ENOENT;
+ goto out;
+ }
+ name = dev_name(&udev->dev->dev);
+ retval = uinput_str_to_user(p, name, size);
+ goto out;
}
+ retval = -EINVAL;
out:
mutex_unlock(&udev->mutex);
return retval;
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index b6505454bcc4..7b7add5061a5 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -277,6 +277,16 @@ static struct key_entry keymap_fs_amilo_pro_v3505[] __initdata = {
{ KE_END, 0 }
};
+static struct key_entry keymap_fs_amilo_pro_v8210[] __initdata = {
+ { KE_KEY, 0x01, {KEY_HELP} }, /* Fn+F1 */
+ { KE_KEY, 0x06, {KEY_DISPLAYTOGGLE} }, /* Fn+F4 */
+ { KE_BLUETOOTH, 0x30 }, /* Fn+F10 */
+ { KE_KEY, 0x31, {KEY_MAIL} }, /* mail button */
+ { KE_KEY, 0x36, {KEY_WWW} }, /* www button */
+ { KE_WIFI, 0x78 }, /* satelite dish button */
+ { KE_END, FE_WIFI_LED }
+};
+
static struct key_entry keymap_fujitsu_n3510[] __initdata = {
{ KE_KEY, 0x11, {KEY_PROG1} },
{ KE_KEY, 0x12, {KEY_PROG2} },
@@ -654,6 +664,15 @@ static const struct dmi_system_id dmi_ids[] __initconst = {
.driver_data = keymap_fs_amilo_pro_v3505
},
{
+ /* Fujitsu-Siemens Amilo Pro Edition V8210 */
+ .callback = dmi_matched,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro Series V8210"),
+ },
+ .driver_data = keymap_fs_amilo_pro_v8210
+ },
+ {
/* Fujitsu-Siemens Amilo M7400 */
.callback = dmi_matched,
.matches = {
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 800ca7dfafc2..ef234c9b2f2f 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -48,6 +48,7 @@ struct atp_info {
int yfact; /* Y multiplication factor */
int datalen; /* size of USB transfers */
void (*callback)(struct urb *); /* callback function */
+ int fuzz; /* fuzz touchpad generates */
};
static void atp_complete_geyser_1_2(struct urb *urb);
@@ -61,6 +62,7 @@ static const struct atp_info fountain_info = {
.yfact = 43,
.datalen = 81,
.callback = atp_complete_geyser_1_2,
+ .fuzz = 16,
};
static const struct atp_info geyser1_info = {
@@ -71,6 +73,7 @@ static const struct atp_info geyser1_info = {
.yfact = 43,
.datalen = 81,
.callback = atp_complete_geyser_1_2,
+ .fuzz = 16,
};
static const struct atp_info geyser2_info = {
@@ -81,6 +84,7 @@ static const struct atp_info geyser2_info = {
.yfact = 43,
.datalen = 64,
.callback = atp_complete_geyser_1_2,
+ .fuzz = 0,
};
static const struct atp_info geyser3_info = {
@@ -90,6 +94,7 @@ static const struct atp_info geyser3_info = {
.yfact = 64,
.datalen = 64,
.callback = atp_complete_geyser_3_4,
+ .fuzz = 0,
};
static const struct atp_info geyser4_info = {
@@ -99,6 +104,7 @@ static const struct atp_info geyser4_info = {
.yfact = 64,
.datalen = 64,
.callback = atp_complete_geyser_3_4,
+ .fuzz = 0,
};
#define ATP_DEVICE(prod, info) \
@@ -155,8 +161,11 @@ MODULE_DEVICE_TABLE(usb, atp_table);
#define ATP_XSENSORS 26
#define ATP_YSENSORS 16
-/* amount of fuzz this touchpad generates */
-#define ATP_FUZZ 16
+/*
+ * The largest possible bank of sensors with additional buffer of 4 extra values
+ * on either side, for an array of smoothed sensor values.
+ */
+#define ATP_SMOOTHSIZE 34
/* maximum pressure this driver will report */
#define ATP_PRESSURE 300
@@ -165,7 +174,13 @@ MODULE_DEVICE_TABLE(usb, atp_table);
* Threshold for the touchpad sensors. Any change less than ATP_THRESHOLD is
* ignored.
*/
-#define ATP_THRESHOLD 5
+#define ATP_THRESHOLD 5
+
+/*
+ * How far we'll bitshift our sensor values before averaging them. Mitigates
+ * rounding errors.
+ */
+#define ATP_SCALE 12
/* Geyser initialization constants */
#define ATP_GEYSER_MODE_READ_REQUEST_ID 1
@@ -203,11 +218,14 @@ struct atp {
bool valid; /* are the samples valid? */
bool size_detect_done;
bool overflow_warned;
+ int fingers_old; /* last reported finger count */
int x_old; /* last reported x/y, */
int y_old; /* used for smoothing */
signed char xy_cur[ATP_XSENSORS + ATP_YSENSORS];
signed char xy_old[ATP_XSENSORS + ATP_YSENSORS];
int xy_acc[ATP_XSENSORS + ATP_YSENSORS];
+ int smooth[ATP_SMOOTHSIZE];
+ int smooth_tmp[ATP_SMOOTHSIZE];
int idlecount; /* number of empty packets */
struct work_struct work;
};
@@ -326,10 +344,17 @@ static void atp_reinit(struct work_struct *work)
retval);
}
-static int atp_calculate_abs(int *xy_sensors, int nb_sensors, int fact,
- int *z, int *fingers)
+static int atp_calculate_abs(struct atp *dev, int offset, int nb_sensors,
+ int fact, int *z, int *fingers)
{
- int i;
+ int i, pass;
+
+ /*
+ * Use offset to point xy_sensors at the first value in dev->xy_acc
+ * for whichever dimension we're looking at this particular go-round.
+ */
+ int *xy_sensors = dev->xy_acc + offset;
+
/* values to calculate mean */
int pcum = 0, psum = 0;
int is_increasing = 0;
@@ -341,9 +366,6 @@ static int atp_calculate_abs(int *xy_sensors, int nb_sensors, int fact,
if (is_increasing)
is_increasing = 0;
- continue;
- }
-
/*
* Makes the finger detection more versatile. For example,
* two fingers with no gap will be detected. Also, my
@@ -358,27 +380,63 @@ static int atp_calculate_abs(int *xy_sensors, int nb_sensors, int fact,
*
* - Jason Parekh <jasonparekh@gmail.com>
*/
- if (i < 1 ||
+
+ } else if (i < 1 ||
(!is_increasing && xy_sensors[i - 1] < xy_sensors[i])) {
(*fingers)++;
is_increasing = 1;
} else if (i > 0 && (xy_sensors[i - 1] - xy_sensors[i] > threshold)) {
is_increasing = 0;
}
+ }
+
+ if (*fingers < 1) /* No need to continue if no fingers are found. */
+ return 0;
+ /*
+ * Use a smoothed version of sensor data for movement calculations, to
+ * combat noise without needing to rely so heavily on a threshold.
+ * This improves tracking.
+ *
+ * The smoothed array is bigger than the original so that the smoothing
+ * doesn't result in edge values being truncated.
+ */
+
+ memset(dev->smooth, 0, 4 * sizeof(dev->smooth[0]));
+ /* Pull base values, scaled up to help avoid truncation errors. */
+ for (i = 0; i < nb_sensors; i++)
+ dev->smooth[i + 4] = xy_sensors[i] << ATP_SCALE;
+ memset(&dev->smooth[nb_sensors + 4], 0, 4 * sizeof(dev->smooth[0]));
+
+ for (pass = 0; pass < 4; pass++) {
+ /* Handle edge. */
+ dev->smooth_tmp[0] = (dev->smooth[0] + dev->smooth[1]) / 2;
+
+ /* Average values with neighbors. */
+ for (i = 1; i < nb_sensors + 7; i++)
+ dev->smooth_tmp[i] = (dev->smooth[i - 1] +
+ dev->smooth[i] * 2 +
+ dev->smooth[i + 1]) / 4;
+
+ /* Handle other edge. */
+ dev->smooth_tmp[i] = (dev->smooth[i - 1] + dev->smooth[i]) / 2;
+
+ memcpy(dev->smooth, dev->smooth_tmp, sizeof(dev->smooth));
+ }
+
+ for (i = 0; i < nb_sensors + 8; i++) {
/*
- * Subtracts threshold so a high sensor that just passes the
- * threshold won't skew the calculated absolute coordinate.
- * Fixes an issue where slowly moving the mouse would
- * occasionally jump a number of pixels (slowly moving the
- * finger makes this issue most apparent.)
+ * Skip values if they're small enough to be truncated to 0
+ * by scale. Mostly noise.
*/
- pcum += (xy_sensors[i] - threshold) * i;
- psum += (xy_sensors[i] - threshold);
+ if ((dev->smooth[i] >> ATP_SCALE) > 0) {
+ pcum += dev->smooth[i] * i;
+ psum += dev->smooth[i];
+ }
}
if (psum > 0) {
- *z = psum;
+ *z = psum >> ATP_SCALE; /* Scale down pressure output. */
return pcum * fact / psum;
}
@@ -455,7 +513,7 @@ static void atp_detect_size(struct atp *dev)
input_set_abs_params(dev->input, ABS_X, 0,
(dev->info->xsensors_17 - 1) *
dev->info->xfact - 1,
- ATP_FUZZ, 0);
+ dev->info->fuzz, 0);
break;
}
}
@@ -471,7 +529,7 @@ static void atp_complete_geyser_1_2(struct urb *urb)
{
int x, y, x_z, y_z, x_f, y_f;
int retval, i, j;
- int key;
+ int key, fingers;
struct atp *dev = urb->context;
int status = atp_status_check(urb);
@@ -548,16 +606,18 @@ static void atp_complete_geyser_1_2(struct urb *urb)
dbg_dump("accumulator", dev->xy_acc);
- x = atp_calculate_abs(dev->xy_acc, ATP_XSENSORS,
+ x = atp_calculate_abs(dev, 0, ATP_XSENSORS,
dev->info->xfact, &x_z, &x_f);
- y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS,
+ y = atp_calculate_abs(dev, ATP_XSENSORS, ATP_YSENSORS,
dev->info->yfact, &y_z, &y_f);
key = dev->data[dev->info->datalen - 1] & ATP_STATUS_BUTTON;
- if (x && y) {
+ fingers = max(x_f, y_f);
+
+ if (x && y && fingers == dev->fingers_old) {
if (dev->x_old != -1) {
- x = (dev->x_old * 3 + x) >> 2;
- y = (dev->y_old * 3 + y) >> 2;
+ x = (dev->x_old * 7 + x) >> 3;
+ y = (dev->y_old * 7 + y) >> 3;
dev->x_old = x;
dev->y_old = y;
@@ -571,7 +631,7 @@ static void atp_complete_geyser_1_2(struct urb *urb)
input_report_abs(dev->input, ABS_Y, y);
input_report_abs(dev->input, ABS_PRESSURE,
min(ATP_PRESSURE, x_z + y_z));
- atp_report_fingers(dev->input, max(x_f, y_f));
+ atp_report_fingers(dev->input, fingers);
}
dev->x_old = x;
dev->y_old = y;
@@ -579,6 +639,7 @@ static void atp_complete_geyser_1_2(struct urb *urb)
} else if (!x && !y) {
dev->x_old = dev->y_old = -1;
+ dev->fingers_old = 0;
input_report_key(dev->input, BTN_TOUCH, 0);
input_report_abs(dev->input, ABS_PRESSURE, 0);
atp_report_fingers(dev->input, 0);
@@ -587,6 +648,10 @@ static void atp_complete_geyser_1_2(struct urb *urb)
memset(dev->xy_acc, 0, sizeof(dev->xy_acc));
}
+ if (fingers != dev->fingers_old)
+ dev->x_old = dev->y_old = -1;
+ dev->fingers_old = fingers;
+
input_report_key(dev->input, BTN_LEFT, key);
input_sync(dev->input);
@@ -604,7 +669,7 @@ static void atp_complete_geyser_3_4(struct urb *urb)
{
int x, y, x_z, y_z, x_f, y_f;
int retval, i, j;
- int key;
+ int key, fingers;
struct atp *dev = urb->context;
int status = atp_status_check(urb);
@@ -660,16 +725,19 @@ static void atp_complete_geyser_3_4(struct urb *urb)
dbg_dump("accumulator", dev->xy_acc);
- x = atp_calculate_abs(dev->xy_acc, ATP_XSENSORS,
+ x = atp_calculate_abs(dev, 0, ATP_XSENSORS,
dev->info->xfact, &x_z, &x_f);
- y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS,
+ y = atp_calculate_abs(dev, ATP_XSENSORS, ATP_YSENSORS,
dev->info->yfact, &y_z, &y_f);
+
key = dev->data[dev->info->datalen - 1] & ATP_STATUS_BUTTON;
- if (x && y) {
+ fingers = max(x_f, y_f);
+
+ if (x && y && fingers == dev->fingers_old) {
if (dev->x_old != -1) {
- x = (dev->x_old * 3 + x) >> 2;
- y = (dev->y_old * 3 + y) >> 2;
+ x = (dev->x_old * 7 + x) >> 3;
+ y = (dev->y_old * 7 + y) >> 3;
dev->x_old = x;
dev->y_old = y;
@@ -683,7 +751,7 @@ static void atp_complete_geyser_3_4(struct urb *urb)
input_report_abs(dev->input, ABS_Y, y);
input_report_abs(dev->input, ABS_PRESSURE,
min(ATP_PRESSURE, x_z + y_z));
- atp_report_fingers(dev->input, max(x_f, y_f));
+ atp_report_fingers(dev->input, fingers);
}
dev->x_old = x;
dev->y_old = y;
@@ -691,6 +759,7 @@ static void atp_complete_geyser_3_4(struct urb *urb)
} else if (!x && !y) {
dev->x_old = dev->y_old = -1;
+ dev->fingers_old = 0;
input_report_key(dev->input, BTN_TOUCH, 0);
input_report_abs(dev->input, ABS_PRESSURE, 0);
atp_report_fingers(dev->input, 0);
@@ -699,6 +768,10 @@ static void atp_complete_geyser_3_4(struct urb *urb)
memset(dev->xy_acc, 0, sizeof(dev->xy_acc));
}
+ if (fingers != dev->fingers_old)
+ dev->x_old = dev->y_old = -1;
+ dev->fingers_old = fingers;
+
input_report_key(dev->input, BTN_LEFT, key);
input_sync(dev->input);
@@ -843,10 +916,10 @@ static int atp_probe(struct usb_interface *iface,
input_set_abs_params(input_dev, ABS_X, 0,
(dev->info->xsensors - 1) * dev->info->xfact - 1,
- ATP_FUZZ, 0);
+ dev->info->fuzz, 0);
input_set_abs_params(input_dev, ABS_Y, 0,
(dev->info->ysensors - 1) * dev->info->yfact - 1,
- ATP_FUZZ, 0);
+ dev->info->fuzz, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, ATP_PRESSURE, 0, 0);
set_bit(EV_KEY, input_dev->evbit);
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 4c842c320c2e..b604564dec5c 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -67,7 +67,6 @@ struct mousedev {
struct device dev;
struct cdev cdev;
bool exist;
- bool is_mixdev;
struct list_head mixdev_node;
bool opened_by_mixdev;
@@ -77,6 +76,9 @@ struct mousedev {
int old_x[4], old_y[4];
int frac_dx, frac_dy;
unsigned long touch;
+
+ int (*open_device)(struct mousedev *mousedev);
+ void (*close_device)(struct mousedev *mousedev);
};
enum mousedev_emul {
@@ -116,9 +118,6 @@ static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
static struct mousedev *mousedev_mix;
static LIST_HEAD(mousedev_mix_list);
-static void mixdev_open_devices(void);
-static void mixdev_close_devices(void);
-
#define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
#define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
@@ -428,9 +427,7 @@ static int mousedev_open_device(struct mousedev *mousedev)
if (retval)
return retval;
- if (mousedev->is_mixdev)
- mixdev_open_devices();
- else if (!mousedev->exist)
+ if (!mousedev->exist)
retval = -ENODEV;
else if (!mousedev->open++) {
retval = input_open_device(&mousedev->handle);
@@ -446,9 +443,7 @@ static void mousedev_close_device(struct mousedev *mousedev)
{
mutex_lock(&mousedev->mutex);
- if (mousedev->is_mixdev)
- mixdev_close_devices();
- else if (mousedev->exist && !--mousedev->open)
+ if (mousedev->exist && !--mousedev->open)
input_close_device(&mousedev->handle);
mutex_unlock(&mousedev->mutex);
@@ -459,21 +454,29 @@ static void mousedev_close_device(struct mousedev *mousedev)
* stream. Note that this function is called with mousedev_mix->mutex
* held.
*/
-static void mixdev_open_devices(void)
+static int mixdev_open_devices(struct mousedev *mixdev)
{
- struct mousedev *mousedev;
+ int error;
+
+ error = mutex_lock_interruptible(&mixdev->mutex);
+ if (error)
+ return error;
- if (mousedev_mix->open++)
- return;
+ if (!mixdev->open++) {
+ struct mousedev *mousedev;
- list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
- if (!mousedev->opened_by_mixdev) {
- if (mousedev_open_device(mousedev))
- continue;
+ list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+ if (!mousedev->opened_by_mixdev) {
+ if (mousedev_open_device(mousedev))
+ continue;
- mousedev->opened_by_mixdev = true;
+ mousedev->opened_by_mixdev = true;
+ }
}
}
+
+ mutex_unlock(&mixdev->mutex);
+ return 0;
}
/*
@@ -481,19 +484,22 @@ static void mixdev_open_devices(void)
* device. Note that this function is called with mousedev_mix->mutex
* held.
*/
-static void mixdev_close_devices(void)
+static void mixdev_close_devices(struct mousedev *mixdev)
{
- struct mousedev *mousedev;
+ mutex_lock(&mixdev->mutex);
- if (--mousedev_mix->open)
- return;
+ if (!--mixdev->open) {
+ struct mousedev *mousedev;
- list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
- if (mousedev->opened_by_mixdev) {
- mousedev->opened_by_mixdev = false;
- mousedev_close_device(mousedev);
+ list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+ if (mousedev->opened_by_mixdev) {
+ mousedev->opened_by_mixdev = false;
+ mousedev_close_device(mousedev);
+ }
}
}
+
+ mutex_unlock(&mixdev->mutex);
}
@@ -522,7 +528,7 @@ static int mousedev_release(struct inode *inode, struct file *file)
mousedev_detach_client(mousedev, client);
kfree(client);
- mousedev_close_device(mousedev);
+ mousedev->close_device(mousedev);
return 0;
}
@@ -550,7 +556,7 @@ static int mousedev_open(struct inode *inode, struct file *file)
client->mousedev = mousedev;
mousedev_attach_client(mousedev, client);
- error = mousedev_open_device(mousedev);
+ error = mousedev->open_device(mousedev);
if (error)
goto err_free_client;
@@ -861,16 +867,21 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
if (mixdev) {
dev_set_name(&mousedev->dev, "mice");
+
+ mousedev->open_device = mixdev_open_devices;
+ mousedev->close_device = mixdev_close_devices;
} else {
int dev_no = minor;
/* Normalize device number if it falls into legacy range */
if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS)
dev_no -= MOUSEDEV_MINOR_BASE;
dev_set_name(&mousedev->dev, "mouse%d", dev_no);
+
+ mousedev->open_device = mousedev_open_device;
+ mousedev->close_device = mousedev_close_device;
}
mousedev->exist = true;
- mousedev->is_mixdev = mixdev;
mousedev->handle.dev = input_get_device(dev);
mousedev->handle.name = dev_name(&mousedev->dev);
mousedev->handle.handler = handler;
@@ -919,7 +930,7 @@ static void mousedev_destroy(struct mousedev *mousedev)
device_del(&mousedev->dev);
mousedev_cleanup(mousedev);
input_free_minor(MINOR(mousedev->dev.devt));
- if (!mousedev->is_mixdev)
+ if (mousedev != mousedev_mix)
input_unregister_handle(&mousedev->handle);
put_device(&mousedev->dev);
}
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index aec54e283580..bc2d47431bdc 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -263,7 +263,7 @@ config SERIO_APBPS2
config SERIO_OLPC_APSP
tristate "OLPC AP-SP input support"
- depends on OF
+ depends on OLPC || COMPILE_TEST
help
Say Y here if you want support for the keyboard and touchpad included
in the OLPC XO-1.75 and XO-4 laptops.
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index d7a7e54f6465..852858e5d8d0 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -984,7 +984,7 @@ static void hp_sdc_exit(void)
free_irq(hp_sdc.irq, &hp_sdc);
write_unlock_irq(&hp_sdc.lock);
- del_timer(&hp_sdc.kicker);
+ del_timer_sync(&hp_sdc.kicker);
tasklet_kill(&hp_sdc.task);
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index a70aa555bbff..e7409c45bdd0 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -236,7 +236,7 @@ EXPORT_SYMBOL(sparse_keymap_setup);
* in an input device that was set up by sparse_keymap_setup().
* NOTE: It is safe to cal this function while input device is
* still registered (however the drivers should care not to try to
- * use freed keymap and thus have to shut off interrups/polling
+ * use freed keymap and thus have to shut off interrupts/polling
* before freeing the keymap).
*/
void sparse_keymap_free(struct input_dev *dev)
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index caecffe8caff..858045694e9d 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -848,7 +848,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
gtco->inputdevice = input_dev;
/* Save interface information */
- gtco->usbdev = usb_get_dev(interface_to_usbdev(usbinterface));
+ gtco->usbdev = interface_to_usbdev(usbinterface);
gtco->intf = usbinterface;
/* Allocate some data for incoming reports */
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 07e9e82029d1..68edc9db2c64 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -514,15 +514,6 @@ config TOUCHSCREEN_MIGOR
To compile this driver as a module, choose M here: the
module will be called migor_ts.
-config TOUCHSCREEN_TNETV107X
- tristate "TI TNETV107X touchscreen support"
- depends on ARCH_DAVINCI_TNETV107X
- help
- Say Y here if you want to use the TNETV107X touchscreen.
-
- To compile this driver as a module, choose M here: the
- module will be called tnetv107x-ts.
-
config TOUCHSCREEN_TOUCHRIGHT
tristate "Touchright serial touchscreen"
select SERIO
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 62801f213346..4bc954b7c7c3 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -56,7 +56,6 @@ obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
obj-$(CONFIG_TOUCHSCREEN_SUR40) += sur40.o
obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o
-obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 412a85ec9ba5..f8815bebc9ef 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1,5 +1,7 @@
/*
* Copyright (C) 2012 Simon Budig, <simon.budig@kernelconcepts.de>
+ * Daniel Wagener <daniel.wagener@kernelconcepts.de> (M09 firmware support)
+ * Lothar Waßmann <LW@KARO-electronics.de> (DT support)
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -33,6 +35,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/input/mt.h>
#include <linux/input/edt-ft5x06.h>
@@ -45,6 +48,14 @@
#define WORK_REGISTER_NUM_X 0x33
#define WORK_REGISTER_NUM_Y 0x34
+#define M09_REGISTER_THRESHOLD 0x80
+#define M09_REGISTER_GAIN 0x92
+#define M09_REGISTER_OFFSET 0x93
+#define M09_REGISTER_NUM_X 0x94
+#define M09_REGISTER_NUM_Y 0x95
+
+#define NO_REGISTER 0xff
+
#define WORK_REGISTER_OPMODE 0x3c
#define FACTORY_REGISTER_OPMODE 0x01
@@ -59,12 +70,30 @@
#define EDT_RAW_DATA_RETRIES 100
#define EDT_RAW_DATA_DELAY 1 /* msec */
+enum edt_ver {
+ M06,
+ M09,
+};
+
+struct edt_reg_addr {
+ int reg_threshold;
+ int reg_report_rate;
+ int reg_gain;
+ int reg_offset;
+ int reg_num_x;
+ int reg_num_y;
+};
+
struct edt_ft5x06_ts_data {
struct i2c_client *client;
struct input_dev *input;
u16 num_x;
u16 num_y;
+ int reset_pin;
+ int irq_pin;
+ int wake_pin;
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debug_dir;
u8 *raw_buffer;
@@ -79,6 +108,9 @@ struct edt_ft5x06_ts_data {
int report_rate;
char name[EDT_NAME_LEN];
+
+ struct edt_reg_addr reg_addr;
+ enum edt_ver version;
};
static int edt_ft5x06_ts_readwrite(struct i2c_client *client,
@@ -136,33 +168,58 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
{
struct edt_ft5x06_ts_data *tsdata = dev_id;
struct device *dev = &tsdata->client->dev;
- u8 cmd = 0xf9;
- u8 rdbuf[26];
+ u8 cmd;
+ u8 rdbuf[29];
int i, type, x, y, id;
+ int offset, tplen, datalen;
int error;
+ switch (tsdata->version) {
+ case M06:
+ cmd = 0xf9; /* tell the controller to send touch data */
+ offset = 5; /* where the actual touch data starts */
+ tplen = 4; /* data comes in so called frames */
+ datalen = 26; /* how much bytes to listen for */
+ break;
+
+ case M09:
+ cmd = 0x02;
+ offset = 1;
+ tplen = 6;
+ datalen = 29;
+ break;
+
+ default:
+ goto out;
+ }
+
memset(rdbuf, 0, sizeof(rdbuf));
error = edt_ft5x06_ts_readwrite(tsdata->client,
sizeof(cmd), &cmd,
- sizeof(rdbuf), rdbuf);
+ datalen, rdbuf);
if (error) {
dev_err_ratelimited(dev, "Unable to fetch data, error: %d\n",
error);
goto out;
}
- if (rdbuf[0] != 0xaa || rdbuf[1] != 0xaa || rdbuf[2] != 26) {
- dev_err_ratelimited(dev, "Unexpected header: %02x%02x%02x!\n",
- rdbuf[0], rdbuf[1], rdbuf[2]);
- goto out;
- }
+ /* M09 does not send header or CRC */
+ if (tsdata->version == M06) {
+ if (rdbuf[0] != 0xaa || rdbuf[1] != 0xaa ||
+ rdbuf[2] != datalen) {
+ dev_err_ratelimited(dev,
+ "Unexpected header: %02x%02x%02x!\n",
+ rdbuf[0], rdbuf[1], rdbuf[2]);
+ goto out;
+ }
- if (!edt_ft5x06_ts_check_crc(tsdata, rdbuf, 26))
- goto out;
+ if (!edt_ft5x06_ts_check_crc(tsdata, rdbuf, datalen))
+ goto out;
+ }
for (i = 0; i < MAX_SUPPORT_POINTS; i++) {
- u8 *buf = &rdbuf[i * 4 + 5];
+ u8 *buf = &rdbuf[i * tplen + offset];
bool down;
type = buf[0] >> 6;
@@ -170,10 +227,14 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
if (type == TOUCH_EVENT_RESERVED)
continue;
+ /* M06 sometimes sends bogus coordinates in TOUCH_DOWN */
+ if (tsdata->version == M06 && type == TOUCH_EVENT_DOWN)
+ continue;
+
x = ((buf[0] << 8) | buf[1]) & 0x0fff;
y = ((buf[2] << 8) | buf[3]) & 0x0fff;
id = (buf[2] >> 4) & 0x0f;
- down = (type != TOUCH_EVENT_UP);
+ down = type != TOUCH_EVENT_UP;
input_mt_slot(tsdata->input, id);
input_mt_report_slot_state(tsdata->input, MT_TOOL_FINGER, down);
@@ -197,12 +258,25 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
{
u8 wrbuf[4];
- wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
- wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
- wrbuf[2] = value;
- wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2];
-
- return edt_ft5x06_ts_readwrite(tsdata->client, 4, wrbuf, 0, NULL);
+ switch (tsdata->version) {
+ case M06:
+ wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
+ wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
+ wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
+ wrbuf[2] = value;
+ wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2];
+ return edt_ft5x06_ts_readwrite(tsdata->client, 4,
+ wrbuf, 0, NULL);
+ case M09:
+ wrbuf[0] = addr;
+ wrbuf[1] = value;
+
+ return edt_ft5x06_ts_readwrite(tsdata->client, 3,
+ wrbuf, 0, NULL);
+
+ default:
+ return -EINVAL;
+ }
}
static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
@@ -211,19 +285,36 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
u8 wrbuf[2], rdbuf[2];
int error;
- wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
- wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
- wrbuf[1] |= tsdata->factory_mode ? 0x80 : 0x40;
+ switch (tsdata->version) {
+ case M06:
+ wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
+ wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
+ wrbuf[1] |= tsdata->factory_mode ? 0x80 : 0x40;
- error = edt_ft5x06_ts_readwrite(tsdata->client, 2, wrbuf, 2, rdbuf);
- if (error)
- return error;
+ error = edt_ft5x06_ts_readwrite(tsdata->client, 2, wrbuf, 2,
+ rdbuf);
+ if (error)
+ return error;
- if ((wrbuf[0] ^ wrbuf[1] ^ rdbuf[0]) != rdbuf[1]) {
- dev_err(&tsdata->client->dev,
- "crc error: 0x%02x expected, got 0x%02x\n",
- wrbuf[0] ^ wrbuf[1] ^ rdbuf[0], rdbuf[1]);
- return -EIO;
+ if ((wrbuf[0] ^ wrbuf[1] ^ rdbuf[0]) != rdbuf[1]) {
+ dev_err(&tsdata->client->dev,
+ "crc error: 0x%02x expected, got 0x%02x\n",
+ wrbuf[0] ^ wrbuf[1] ^ rdbuf[0],
+ rdbuf[1]);
+ return -EIO;
+ }
+ break;
+
+ case M09:
+ wrbuf[0] = addr;
+ error = edt_ft5x06_ts_readwrite(tsdata->client, 1,
+ wrbuf, 1, rdbuf);
+ if (error)
+ return error;
+ break;
+
+ default:
+ return -EINVAL;
}
return rdbuf[0];
@@ -234,19 +325,21 @@ struct edt_ft5x06_attribute {
size_t field_offset;
u8 limit_low;
u8 limit_high;
- u8 addr;
+ u8 addr_m06;
+ u8 addr_m09;
};
-#define EDT_ATTR(_field, _mode, _addr, _limit_low, _limit_high) \
+#define EDT_ATTR(_field, _mode, _addr_m06, _addr_m09, \
+ _limit_low, _limit_high) \
struct edt_ft5x06_attribute edt_ft5x06_attr_##_field = { \
.dattr = __ATTR(_field, _mode, \
edt_ft5x06_setting_show, \
edt_ft5x06_setting_store), \
- .field_offset = \
- offsetof(struct edt_ft5x06_ts_data, _field), \
+ .field_offset = offsetof(struct edt_ft5x06_ts_data, _field), \
+ .addr_m06 = _addr_m06, \
+ .addr_m09 = _addr_m09, \
.limit_low = _limit_low, \
.limit_high = _limit_high, \
- .addr = _addr, \
}
static ssize_t edt_ft5x06_setting_show(struct device *dev,
@@ -257,10 +350,11 @@ static ssize_t edt_ft5x06_setting_show(struct device *dev,
struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
struct edt_ft5x06_attribute *attr =
container_of(dattr, struct edt_ft5x06_attribute, dattr);
- u8 *field = (u8 *)((char *)tsdata + attr->field_offset);
+ u8 *field = (u8 *)tsdata + attr->field_offset;
int val;
size_t count = 0;
int error = 0;
+ u8 addr;
mutex_lock(&tsdata->mutex);
@@ -269,15 +363,33 @@ static ssize_t edt_ft5x06_setting_show(struct device *dev,
goto out;
}
- val = edt_ft5x06_register_read(tsdata, attr->addr);
- if (val < 0) {
- error = val;
- dev_err(&tsdata->client->dev,
- "Failed to fetch attribute %s, error %d\n",
- dattr->attr.name, error);
+ switch (tsdata->version) {
+ case M06:
+ addr = attr->addr_m06;
+ break;
+
+ case M09:
+ addr = attr->addr_m09;
+ break;
+
+ default:
+ error = -ENODEV;
goto out;
}
+ if (addr != NO_REGISTER) {
+ val = edt_ft5x06_register_read(tsdata, addr);
+ if (val < 0) {
+ error = val;
+ dev_err(&tsdata->client->dev,
+ "Failed to fetch attribute %s, error %d\n",
+ dattr->attr.name, error);
+ goto out;
+ }
+ } else {
+ val = *field;
+ }
+
if (val != *field) {
dev_warn(&tsdata->client->dev,
"%s: read (%d) and stored value (%d) differ\n",
@@ -299,9 +411,10 @@ static ssize_t edt_ft5x06_setting_store(struct device *dev,
struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
struct edt_ft5x06_attribute *attr =
container_of(dattr, struct edt_ft5x06_attribute, dattr);
- u8 *field = (u8 *)((char *)tsdata + attr->field_offset);
+ u8 *field = (u8 *)tsdata + attr->field_offset;
unsigned int val;
int error;
+ u8 addr;
mutex_lock(&tsdata->mutex);
@@ -319,14 +432,29 @@ static ssize_t edt_ft5x06_setting_store(struct device *dev,
goto out;
}
- error = edt_ft5x06_register_write(tsdata, attr->addr, val);
- if (error) {
- dev_err(&tsdata->client->dev,
- "Failed to update attribute %s, error: %d\n",
- dattr->attr.name, error);
+ switch (tsdata->version) {
+ case M06:
+ addr = attr->addr_m06;
+ break;
+
+ case M09:
+ addr = attr->addr_m09;
+ break;
+
+ default:
+ error = -ENODEV;
goto out;
}
+ if (addr != NO_REGISTER) {
+ error = edt_ft5x06_register_write(tsdata, addr, val);
+ if (error) {
+ dev_err(&tsdata->client->dev,
+ "Failed to update attribute %s, error: %d\n",
+ dattr->attr.name, error);
+ goto out;
+ }
+ }
*field = val;
out:
@@ -334,12 +462,14 @@ out:
return error ?: count;
}
-static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN, 0, 31);
-static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET, 0, 31);
-static EDT_ATTR(threshold, S_IWUSR | S_IRUGO,
- WORK_REGISTER_THRESHOLD, 20, 80);
-static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO,
- WORK_REGISTER_REPORT_RATE, 3, 14);
+static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN,
+ M09_REGISTER_GAIN, 0, 31);
+static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET,
+ M09_REGISTER_OFFSET, 0, 31);
+static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
+ M09_REGISTER_THRESHOLD, 20, 80);
+static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
+ NO_REGISTER, 3, 14);
static struct attribute *edt_ft5x06_attrs[] = {
&edt_ft5x06_attr_gain.dattr.attr,
@@ -374,6 +504,9 @@ static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
}
/* mode register is 0x3c when in the work mode */
+ if (tsdata->version == M09)
+ goto m09_out;
+
error = edt_ft5x06_register_write(tsdata, WORK_REGISTER_OPMODE, 0x03);
if (error) {
dev_err(&client->dev,
@@ -406,12 +539,18 @@ err_out:
enable_irq(client->irq);
return error;
+
+m09_out:
+ dev_err(&client->dev, "No factory mode support for M09\n");
+ return -EINVAL;
+
}
static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata)
{
struct i2c_client *client = tsdata->client;
int retries = EDT_SWITCH_MODE_RETRIES;
+ struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
int ret;
int error;
@@ -444,13 +583,14 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata)
tsdata->raw_buffer = NULL;
/* restore parameters */
- edt_ft5x06_register_write(tsdata, WORK_REGISTER_THRESHOLD,
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold,
tsdata->threshold);
- edt_ft5x06_register_write(tsdata, WORK_REGISTER_GAIN,
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_gain,
tsdata->gain);
- edt_ft5x06_register_write(tsdata, WORK_REGISTER_OFFSET,
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset,
tsdata->offset);
- edt_ft5x06_register_write(tsdata, WORK_REGISTER_REPORT_RATE,
+ if (reg_addr->reg_report_rate)
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate,
tsdata->report_rate);
enable_irq(client->irq);
@@ -479,7 +619,7 @@ static int edt_ft5x06_debugfs_mode_set(void *data, u64 mode)
if (mode != tsdata->factory_mode) {
retval = mode ? edt_ft5x06_factory_mode(tsdata) :
- edt_ft5x06_work_mode(tsdata);
+ edt_ft5x06_work_mode(tsdata);
}
mutex_unlock(&tsdata->mutex);
@@ -568,7 +708,6 @@ out:
return error ?: read;
};
-
static const struct file_operations debugfs_raw_data_fops = {
.open = simple_open,
.read = edt_ft5x06_debugfs_raw_data_read,
@@ -614,58 +753,100 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
#endif /* CONFIG_DEBUGFS */
-
-
static int edt_ft5x06_ts_reset(struct i2c_client *client,
- int reset_pin)
+ struct edt_ft5x06_ts_data *tsdata)
{
int error;
- if (gpio_is_valid(reset_pin)) {
+ if (gpio_is_valid(tsdata->wake_pin)) {
+ error = devm_gpio_request_one(&client->dev,
+ tsdata->wake_pin, GPIOF_OUT_INIT_LOW,
+ "edt-ft5x06 wake");
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to request GPIO %d as wake pin, error %d\n",
+ tsdata->wake_pin, error);
+ return error;
+ }
+
+ msleep(5);
+ gpio_set_value(tsdata->wake_pin, 1);
+ }
+ if (gpio_is_valid(tsdata->reset_pin)) {
/* this pulls reset down, enabling the low active reset */
- error = devm_gpio_request_one(&client->dev, reset_pin,
- GPIOF_OUT_INIT_LOW,
- "edt-ft5x06 reset");
+ error = devm_gpio_request_one(&client->dev,
+ tsdata->reset_pin, GPIOF_OUT_INIT_LOW,
+ "edt-ft5x06 reset");
if (error) {
dev_err(&client->dev,
"Failed to request GPIO %d as reset pin, error %d\n",
- reset_pin, error);
+ tsdata->reset_pin, error);
return error;
}
- mdelay(50);
- gpio_set_value(reset_pin, 1);
- mdelay(100);
+ msleep(5);
+ gpio_set_value(tsdata->reset_pin, 1);
+ msleep(300);
}
return 0;
}
static int edt_ft5x06_ts_identify(struct i2c_client *client,
- char *model_name,
- char *fw_version)
+ struct edt_ft5x06_ts_data *tsdata,
+ char *fw_version)
{
u8 rdbuf[EDT_NAME_LEN];
char *p;
int error;
+ char *model_name = tsdata->name;
+ /* see what we find if we assume it is a M06 *
+ * if we get less than EDT_NAME_LEN, we don't want
+ * to have garbage in there
+ */
+ memset(rdbuf, 0, sizeof(rdbuf));
error = edt_ft5x06_ts_readwrite(client, 1, "\xbb",
EDT_NAME_LEN - 1, rdbuf);
if (error)
return error;
- /* remove last '$' end marker */
- rdbuf[EDT_NAME_LEN - 1] = '\0';
- if (rdbuf[EDT_NAME_LEN - 2] == '$')
- rdbuf[EDT_NAME_LEN - 2] = '\0';
+ /* if we find something consistent, stay with that assumption
+ * at least M09 won't send 3 bytes here
+ */
+ if (!(strnicmp(rdbuf + 1, "EP0", 3))) {
+ tsdata->version = M06;
+
+ /* remove last '$' end marker */
+ rdbuf[EDT_NAME_LEN - 1] = '\0';
+ if (rdbuf[EDT_NAME_LEN - 2] == '$')
+ rdbuf[EDT_NAME_LEN - 2] = '\0';
+
+ /* look for Model/Version separator */
+ p = strchr(rdbuf, '*');
+ if (p)
+ *p++ = '\0';
+ strlcpy(model_name, rdbuf + 1, EDT_NAME_LEN);
+ strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
+ } else {
+ /* since there are only two versions around (M06, M09) */
+ tsdata->version = M09;
+
+ error = edt_ft5x06_ts_readwrite(client, 1, "\xA6",
+ 2, rdbuf);
+ if (error)
+ return error;
+
+ strlcpy(fw_version, rdbuf, 2);
- /* look for Model/Version separator */
- p = strchr(rdbuf, '*');
- if (p)
- *p++ = '\0';
+ error = edt_ft5x06_ts_readwrite(client, 1, "\xA8",
+ 1, rdbuf);
+ if (error)
+ return error;
- strlcpy(model_name, rdbuf + 1, EDT_NAME_LEN);
- strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
+ snprintf(model_name, EDT_NAME_LEN, "EP0%i%i0M09",
+ rdbuf[0] >> 4, rdbuf[0] & 0x0F);
+ }
return 0;
}
@@ -675,33 +856,104 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
pdata->name <= edt_ft5x06_attr_##name.limit_high) \
edt_ft5x06_register_write(tsdata, reg, pdata->name)
+#define EDT_GET_PROP(name, reg) { \
+ u32 val; \
+ if (of_property_read_u32(np, #name, &val) == 0) \
+ edt_ft5x06_register_write(tsdata, reg, val); \
+}
+
+static void edt_ft5x06_ts_get_dt_defaults(struct device_node *np,
+ struct edt_ft5x06_ts_data *tsdata)
+{
+ struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
+
+ EDT_GET_PROP(threshold, reg_addr->reg_threshold);
+ EDT_GET_PROP(gain, reg_addr->reg_gain);
+ EDT_GET_PROP(offset, reg_addr->reg_offset);
+}
+
static void
edt_ft5x06_ts_get_defaults(struct edt_ft5x06_ts_data *tsdata,
const struct edt_ft5x06_platform_data *pdata)
{
+ struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
+
if (!pdata->use_parameters)
return;
/* pick up defaults from the platform data */
- EDT_ATTR_CHECKSET(threshold, WORK_REGISTER_THRESHOLD);
- EDT_ATTR_CHECKSET(gain, WORK_REGISTER_GAIN);
- EDT_ATTR_CHECKSET(offset, WORK_REGISTER_OFFSET);
- EDT_ATTR_CHECKSET(report_rate, WORK_REGISTER_REPORT_RATE);
+ EDT_ATTR_CHECKSET(threshold, reg_addr->reg_threshold);
+ EDT_ATTR_CHECKSET(gain, reg_addr->reg_gain);
+ EDT_ATTR_CHECKSET(offset, reg_addr->reg_offset);
+ if (reg_addr->reg_report_rate != NO_REGISTER)
+ EDT_ATTR_CHECKSET(report_rate, reg_addr->reg_report_rate);
}
static void
edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
{
+ struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
+
tsdata->threshold = edt_ft5x06_register_read(tsdata,
- WORK_REGISTER_THRESHOLD);
- tsdata->gain = edt_ft5x06_register_read(tsdata, WORK_REGISTER_GAIN);
- tsdata->offset = edt_ft5x06_register_read(tsdata, WORK_REGISTER_OFFSET);
- tsdata->report_rate = edt_ft5x06_register_read(tsdata,
- WORK_REGISTER_REPORT_RATE);
- tsdata->num_x = edt_ft5x06_register_read(tsdata, WORK_REGISTER_NUM_X);
- tsdata->num_y = edt_ft5x06_register_read(tsdata, WORK_REGISTER_NUM_Y);
+ reg_addr->reg_threshold);
+ tsdata->gain = edt_ft5x06_register_read(tsdata, reg_addr->reg_gain);
+ tsdata->offset = edt_ft5x06_register_read(tsdata, reg_addr->reg_offset);
+ if (reg_addr->reg_report_rate != NO_REGISTER)
+ tsdata->report_rate = edt_ft5x06_register_read(tsdata,
+ reg_addr->reg_report_rate);
+ tsdata->num_x = edt_ft5x06_register_read(tsdata, reg_addr->reg_num_x);
+ tsdata->num_y = edt_ft5x06_register_read(tsdata, reg_addr->reg_num_y);
+}
+
+static void
+edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
+{
+ struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
+
+ switch (tsdata->version) {
+ case M06:
+ reg_addr->reg_threshold = WORK_REGISTER_THRESHOLD;
+ reg_addr->reg_report_rate = WORK_REGISTER_REPORT_RATE;
+ reg_addr->reg_gain = WORK_REGISTER_GAIN;
+ reg_addr->reg_offset = WORK_REGISTER_OFFSET;
+ reg_addr->reg_num_x = WORK_REGISTER_NUM_X;
+ reg_addr->reg_num_y = WORK_REGISTER_NUM_Y;
+ break;
+
+ case M09:
+ reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
+ reg_addr->reg_gain = M09_REGISTER_GAIN;
+ reg_addr->reg_offset = M09_REGISTER_OFFSET;
+ reg_addr->reg_num_x = M09_REGISTER_NUM_X;
+ reg_addr->reg_num_y = M09_REGISTER_NUM_Y;
+ break;
+ }
}
+#ifdef CONFIG_OF
+static int edt_ft5x06_i2c_ts_probe_dt(struct device *dev,
+ struct edt_ft5x06_ts_data *tsdata)
+{
+ struct device_node *np = dev->of_node;
+
+ /*
+ * irq_pin is not needed for DT setup.
+ * irq is associated via 'interrupts' property in DT
+ */
+ tsdata->irq_pin = -EINVAL;
+ tsdata->reset_pin = of_get_named_gpio(np, "reset-gpios", 0);
+ tsdata->wake_pin = of_get_named_gpio(np, "wake-gpios", 0);
+
+ return 0;
+}
+#else
+static inline int edt_ft5x06_i2c_ts_probe_dt(struct device *dev,
+ struct edt_ft5x06_ts_data *tsdata)
+{
+ return -ENODEV;
+}
+#endif
+
static int edt_ft5x06_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -714,32 +966,40 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
dev_dbg(&client->dev, "probing for EDT FT5x06 I2C\n");
+ tsdata = devm_kzalloc(&client->dev, sizeof(*tsdata), GFP_KERNEL);
+ if (!tsdata) {
+ dev_err(&client->dev, "failed to allocate driver data.\n");
+ return -ENOMEM;
+ }
+
if (!pdata) {
- dev_err(&client->dev, "no platform data?\n");
- return -EINVAL;
+ error = edt_ft5x06_i2c_ts_probe_dt(&client->dev, tsdata);
+ if (error) {
+ dev_err(&client->dev,
+ "DT probe failed and no platform data present\n");
+ return error;
+ }
+ } else {
+ tsdata->reset_pin = pdata->reset_pin;
+ tsdata->irq_pin = pdata->irq_pin;
+ tsdata->wake_pin = -EINVAL;
}
- error = edt_ft5x06_ts_reset(client, pdata->reset_pin);
+ error = edt_ft5x06_ts_reset(client, tsdata);
if (error)
return error;
- if (gpio_is_valid(pdata->irq_pin)) {
- error = devm_gpio_request_one(&client->dev, pdata->irq_pin,
- GPIOF_IN, "edt-ft5x06 irq");
+ if (gpio_is_valid(tsdata->irq_pin)) {
+ error = devm_gpio_request_one(&client->dev, tsdata->irq_pin,
+ GPIOF_IN, "edt-ft5x06 irq");
if (error) {
dev_err(&client->dev,
"Failed to request GPIO %d, error %d\n",
- pdata->irq_pin, error);
+ tsdata->irq_pin, error);
return error;
}
}
- tsdata = devm_kzalloc(&client->dev, sizeof(*tsdata), GFP_KERNEL);
- if (!tsdata) {
- dev_err(&client->dev, "failed to allocate driver data.\n");
- return -ENOMEM;
- }
-
input = devm_input_allocate_device(&client->dev);
if (!input) {
dev_err(&client->dev, "failed to allocate input device.\n");
@@ -751,13 +1011,19 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
tsdata->input = input;
tsdata->factory_mode = false;
- error = edt_ft5x06_ts_identify(client, tsdata->name, fw_version);
+ error = edt_ft5x06_ts_identify(client, tsdata, fw_version);
if (error) {
dev_err(&client->dev, "touchscreen probe failed\n");
return error;
}
- edt_ft5x06_ts_get_defaults(tsdata, pdata);
+ edt_ft5x06_ts_set_regs(tsdata);
+
+ if (!pdata)
+ edt_ft5x06_ts_get_dt_defaults(client->dev.of_node, tsdata);
+ else
+ edt_ft5x06_ts_get_defaults(tsdata, pdata);
+
edt_ft5x06_ts_get_parameters(tsdata);
dev_dbg(&client->dev,
@@ -787,10 +1053,10 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
input_set_drvdata(input, tsdata);
i2c_set_clientdata(client, tsdata);
- error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, edt_ft5x06_ts_isr,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->name, tsdata);
+ error = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ edt_ft5x06_ts_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->name, tsdata);
if (error) {
dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
return error;
@@ -801,19 +1067,21 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return error;
error = input_register_device(input);
- if (error) {
- sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
- return error;
- }
+ if (error)
+ goto err_remove_attrs;
edt_ft5x06_ts_prepare_debugfs(tsdata, dev_driver_string(&client->dev));
device_init_wakeup(&client->dev, 1);
dev_dbg(&client->dev,
- "EDT FT5x06 initialized: IRQ pin %d, Reset pin %d.\n",
- pdata->irq_pin, pdata->reset_pin);
+ "EDT FT5x06 initialized: IRQ %d, WAKE pin %d, Reset pin %d.\n",
+ client->irq, tsdata->wake_pin, tsdata->reset_pin);
return 0;
+
+err_remove_attrs:
+ sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
+ return error;
}
static int edt_ft5x06_ts_remove(struct i2c_client *client)
@@ -852,15 +1120,26 @@ static SIMPLE_DEV_PM_OPS(edt_ft5x06_ts_pm_ops,
edt_ft5x06_ts_suspend, edt_ft5x06_ts_resume);
static const struct i2c_device_id edt_ft5x06_ts_id[] = {
- { "edt-ft5x06", 0 },
- { }
+ { "edt-ft5x06", 0, },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, edt_ft5x06_ts_id);
+#ifdef CONFIG_OF
+static const struct of_device_id edt_ft5x06_of_match[] = {
+ { .compatible = "edt,edt-ft5206", },
+ { .compatible = "edt,edt-ft5306", },
+ { .compatible = "edt,edt-ft5406", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, edt_ft5x06_of_match);
+#endif
+
static struct i2c_driver edt_ft5x06_ts_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "edt_ft5x06",
+ .of_match_table = of_match_ptr(edt_ft5x06_of_match),
.pm = &edt_ft5x06_ts_pm_ops,
},
.id_table = edt_ft5x06_ts_id,
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 5c342b3139e8..3c0f57efe7b1 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -134,7 +134,8 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
} else if (!ts->low_latency_req.dev) {
/* First contact, request 100 us latency. */
dev_pm_qos_add_ancestor_request(&ts->client->dev,
- &ts->low_latency_req, 100);
+ &ts->low_latency_req,
+ DEV_PM_QOS_RESUME_LATENCY, 100);
}
/* SYN_REPORT */
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
deleted file mode 100644
index c47827a26e3c..000000000000
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ /dev/null
@@ -1,384 +0,0 @@
-/*
- * Texas Instruments TNETV107X Touchscreen Driver
- *
- * Copyright (C) 2010 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/input.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/ctype.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-
-#include <mach/tnetv107x.h>
-
-#define TSC_PENUP_POLL (HZ / 5)
-#define IDLE_TIMEOUT 100 /* msec */
-
-/*
- * The first and last samples of a touch interval are usually garbage and need
- * to be filtered out with these devices. The following definitions control
- * the number of samples skipped.
- */
-#define TSC_HEAD_SKIP 1
-#define TSC_TAIL_SKIP 1
-#define TSC_SKIP (TSC_HEAD_SKIP + TSC_TAIL_SKIP + 1)
-#define TSC_SAMPLES (TSC_SKIP + 1)
-
-/* Register Offsets */
-struct tsc_regs {
- u32 rev;
- u32 tscm;
- u32 bwcm;
- u32 swc;
- u32 adcchnl;
- u32 adcdata;
- u32 chval[4];
-};
-
-/* TSC Mode Configuration Register (tscm) bits */
-#define WMODE BIT(0)
-#define TSKIND BIT(1)
-#define ZMEASURE_EN BIT(2)
-#define IDLE BIT(3)
-#define TSC_EN BIT(4)
-#define STOP BIT(5)
-#define ONE_SHOT BIT(6)
-#define SINGLE BIT(7)
-#define AVG BIT(8)
-#define AVGNUM(x) (((x) & 0x03) << 9)
-#define PVSTC(x) (((x) & 0x07) << 11)
-#define PON BIT(14)
-#define PONBG BIT(15)
-#define AFERST BIT(16)
-
-/* ADC DATA Capture Register bits */
-#define DATA_VALID BIT(16)
-
-/* Register Access Macros */
-#define tsc_read(ts, reg) __raw_readl(&(ts)->regs->reg)
-#define tsc_write(ts, reg, val) __raw_writel(val, &(ts)->regs->reg);
-#define tsc_set_bits(ts, reg, val) \
- tsc_write(ts, reg, tsc_read(ts, reg) | (val))
-#define tsc_clr_bits(ts, reg, val) \
- tsc_write(ts, reg, tsc_read(ts, reg) & ~(val))
-
-struct sample {
- int x, y, p;
-};
-
-struct tsc_data {
- struct input_dev *input_dev;
- struct resource *res;
- struct tsc_regs __iomem *regs;
- struct timer_list timer;
- spinlock_t lock;
- struct clk *clk;
- struct device *dev;
- int sample_count;
- struct sample samples[TSC_SAMPLES];
- int tsc_irq;
-};
-
-static int tsc_read_sample(struct tsc_data *ts, struct sample* sample)
-{
- int x, y, z1, z2, t, p = 0;
- u32 val;
-
- val = tsc_read(ts, chval[0]);
- if (val & DATA_VALID)
- x = val & 0xffff;
- else
- return -EINVAL;
-
- y = tsc_read(ts, chval[1]) & 0xffff;
- z1 = tsc_read(ts, chval[2]) & 0xffff;
- z2 = tsc_read(ts, chval[3]) & 0xffff;
-
- if (z1) {
- t = ((600 * x) * (z2 - z1));
- p = t / (u32) (z1 << 12);
- if (p < 0)
- p = 0;
- }
-
- sample->x = x;
- sample->y = y;
- sample->p = p;
-
- return 0;
-}
-
-static void tsc_poll(unsigned long data)
-{
- struct tsc_data *ts = (struct tsc_data *)data;
- unsigned long flags;
- int i, val, x, y, p;
-
- spin_lock_irqsave(&ts->lock, flags);
-
- if (ts->sample_count >= TSC_SKIP) {
- input_report_abs(ts->input_dev, ABS_PRESSURE, 0);
- input_report_key(ts->input_dev, BTN_TOUCH, 0);
- input_sync(ts->input_dev);
- } else if (ts->sample_count > 0) {
- /*
- * A touch event lasted less than our skip count. Salvage and
- * report anyway.
- */
- for (i = 0, val = 0; i < ts->sample_count; i++)
- val += ts->samples[i].x;
- x = val / ts->sample_count;
-
- for (i = 0, val = 0; i < ts->sample_count; i++)
- val += ts->samples[i].y;
- y = val / ts->sample_count;
-
- for (i = 0, val = 0; i < ts->sample_count; i++)
- val += ts->samples[i].p;
- p = val / ts->sample_count;
-
- input_report_abs(ts->input_dev, ABS_X, x);
- input_report_abs(ts->input_dev, ABS_Y, y);
- input_report_abs(ts->input_dev, ABS_PRESSURE, p);
- input_report_key(ts->input_dev, BTN_TOUCH, 1);
- input_sync(ts->input_dev);
- }
-
- ts->sample_count = 0;
-
- spin_unlock_irqrestore(&ts->lock, flags);
-}
-
-static irqreturn_t tsc_irq(int irq, void *dev_id)
-{
- struct tsc_data *ts = (struct tsc_data *)dev_id;
- struct sample *sample;
- int index;
-
- spin_lock(&ts->lock);
-
- index = ts->sample_count % TSC_SAMPLES;
- sample = &ts->samples[index];
- if (tsc_read_sample(ts, sample) < 0)
- goto out;
-
- if (++ts->sample_count >= TSC_SKIP) {
- index = (ts->sample_count - TSC_TAIL_SKIP - 1) % TSC_SAMPLES;
- sample = &ts->samples[index];
-
- input_report_abs(ts->input_dev, ABS_X, sample->x);
- input_report_abs(ts->input_dev, ABS_Y, sample->y);
- input_report_abs(ts->input_dev, ABS_PRESSURE, sample->p);
- if (ts->sample_count == TSC_SKIP)
- input_report_key(ts->input_dev, BTN_TOUCH, 1);
- input_sync(ts->input_dev);
- }
- mod_timer(&ts->timer, jiffies + TSC_PENUP_POLL);
-out:
- spin_unlock(&ts->lock);
- return IRQ_HANDLED;
-}
-
-static int tsc_start(struct input_dev *dev)
-{
- struct tsc_data *ts = input_get_drvdata(dev);
- unsigned long timeout = jiffies + msecs_to_jiffies(IDLE_TIMEOUT);
- u32 val;
-
- clk_enable(ts->clk);
-
- /* Go to idle mode, before any initialization */
- while (time_after(timeout, jiffies)) {
- if (tsc_read(ts, tscm) & IDLE)
- break;
- }
-
- if (time_before(timeout, jiffies)) {
- dev_warn(ts->dev, "timeout waiting for idle\n");
- clk_disable(ts->clk);
- return -EIO;
- }
-
- /* Configure TSC Control register*/
- val = (PONBG | PON | PVSTC(4) | ONE_SHOT | ZMEASURE_EN);
- tsc_write(ts, tscm, val);
-
- /* Bring TSC out of reset: Clear AFE reset bit */
- val &= ~(AFERST);
- tsc_write(ts, tscm, val);
-
- /* Configure all pins for hardware control*/
- tsc_write(ts, bwcm, 0);
-
- /* Finally enable the TSC */
- tsc_set_bits(ts, tscm, TSC_EN);
-
- return 0;
-}
-
-static void tsc_stop(struct input_dev *dev)
-{
- struct tsc_data *ts = input_get_drvdata(dev);
-
- tsc_clr_bits(ts, tscm, TSC_EN);
- synchronize_irq(ts->tsc_irq);
- del_timer_sync(&ts->timer);
- clk_disable(ts->clk);
-}
-
-static int tsc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct tsc_data *ts;
- int error = 0;
- u32 rev = 0;
-
- ts = kzalloc(sizeof(struct tsc_data), GFP_KERNEL);
- if (!ts) {
- dev_err(dev, "cannot allocate device info\n");
- return -ENOMEM;
- }
-
- ts->dev = dev;
- spin_lock_init(&ts->lock);
- setup_timer(&ts->timer, tsc_poll, (unsigned long)ts);
- platform_set_drvdata(pdev, ts);
-
- ts->tsc_irq = platform_get_irq(pdev, 0);
- if (ts->tsc_irq < 0) {
- dev_err(dev, "cannot determine device interrupt\n");
- error = -ENODEV;
- goto error_res;
- }
-
- ts->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!ts->res) {
- dev_err(dev, "cannot determine register area\n");
- error = -ENODEV;
- goto error_res;
- }
-
- if (!request_mem_region(ts->res->start, resource_size(ts->res),
- pdev->name)) {
- dev_err(dev, "cannot claim register memory\n");
- ts->res = NULL;
- error = -EINVAL;
- goto error_res;
- }
-
- ts->regs = ioremap(ts->res->start, resource_size(ts->res));
- if (!ts->regs) {
- dev_err(dev, "cannot map register memory\n");
- error = -ENOMEM;
- goto error_map;
- }
-
- ts->clk = clk_get(dev, NULL);
- if (IS_ERR(ts->clk)) {
- dev_err(dev, "cannot claim device clock\n");
- error = PTR_ERR(ts->clk);
- goto error_clk;
- }
-
- error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, IRQF_ONESHOT,
- dev_name(dev), ts);
- if (error < 0) {
- dev_err(ts->dev, "Could not allocate ts irq\n");
- goto error_irq;
- }
-
- ts->input_dev = input_allocate_device();
- if (!ts->input_dev) {
- dev_err(dev, "cannot allocate input device\n");
- error = -ENOMEM;
- goto error_input;
- }
- input_set_drvdata(ts->input_dev, ts);
-
- ts->input_dev->name = pdev->name;
- ts->input_dev->id.bustype = BUS_HOST;
- ts->input_dev->dev.parent = &pdev->dev;
- ts->input_dev->open = tsc_start;
- ts->input_dev->close = tsc_stop;
-
- clk_enable(ts->clk);
- rev = tsc_read(ts, rev);
- ts->input_dev->id.product = ((rev >> 8) & 0x07);
- ts->input_dev->id.version = ((rev >> 16) & 0xfff);
- clk_disable(ts->clk);
-
- __set_bit(EV_KEY, ts->input_dev->evbit);
- __set_bit(EV_ABS, ts->input_dev->evbit);
- __set_bit(BTN_TOUCH, ts->input_dev->keybit);
-
- input_set_abs_params(ts->input_dev, ABS_X, 0, 0xffff, 5, 0);
- input_set_abs_params(ts->input_dev, ABS_Y, 0, 0xffff, 5, 0);
- input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 4095, 128, 0);
-
- error = input_register_device(ts->input_dev);
- if (error < 0) {
- dev_err(dev, "failed input device registration\n");
- goto error_reg;
- }
-
- return 0;
-
-error_reg:
- input_free_device(ts->input_dev);
-error_input:
- free_irq(ts->tsc_irq, ts);
-error_irq:
- clk_put(ts->clk);
-error_clk:
- iounmap(ts->regs);
-error_map:
- release_mem_region(ts->res->start, resource_size(ts->res));
-error_res:
- kfree(ts);
-
- return error;
-}
-
-static int tsc_remove(struct platform_device *pdev)
-{
- struct tsc_data *ts = platform_get_drvdata(pdev);
-
- input_unregister_device(ts->input_dev);
- free_irq(ts->tsc_irq, ts);
- clk_put(ts->clk);
- iounmap(ts->regs);
- release_mem_region(ts->res->start, resource_size(ts->res));
- kfree(ts);
-
- return 0;
-}
-
-static struct platform_driver tsc_driver = {
- .probe = tsc_probe,
- .remove = tsc_remove,
- .driver.name = "tnetv107x-ts",
- .driver.owner = THIS_MODULE,
-};
-module_platform_driver(tsc_driver);
-
-MODULE_AUTHOR("Cyril Chemparathy");
-MODULE_DESCRIPTION("TNETV107X Touchscreen Driver");
-MODULE_ALIAS("platform:tnetv107x-ts");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 2175f3419002..01d30cedde46 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -29,10 +29,13 @@
#include <linux/sysfs.h>
#include <linux/input/mt.h>
#include <linux/platform_data/zforce_ts.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#define WAIT_TIMEOUT msecs_to_jiffies(1000)
#define FRAME_START 0xee
+#define FRAME_MAXSIZE 257
/* Offsets of the different parts of the payload the controller sends */
#define PAYLOAD_HEADER 0
@@ -64,7 +67,7 @@
#define RESPONSE_STATUS 0X1e
/*
- * Notifications are send by the touch controller without
+ * Notifications are sent by the touch controller without
* being requested by the driver and include for example
* touch indications
*/
@@ -103,8 +106,8 @@ struct zforce_point {
* @suspended device suspended
* @access_mutex serialize i2c-access, to keep multipart reads together
* @command_done completion to wait for the command result
- * @command_mutex serialize commands send to the ic
- * @command_waiting the id of the command that that is currently waiting
+ * @command_mutex serialize commands sent to the ic
+ * @command_waiting the id of the command that is currently waiting
* for a result
* @command_result returned result of the command
*/
@@ -235,7 +238,8 @@ static int zforce_scan_frequency(struct zforce_ts *ts, u16 idle, u16 finger,
(finger & 0xff), ((finger >> 8) & 0xff),
(stylus & 0xff), ((stylus >> 8) & 0xff) };
- dev_dbg(&client->dev, "set scan frequency to (idle: %d, finger: %d, stylus: %d)\n",
+ dev_dbg(&client->dev,
+ "set scan frequency to (idle: %d, finger: %d, stylus: %d)\n",
idle, finger, stylus);
return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
@@ -255,7 +259,7 @@ static int zforce_setconfig(struct zforce_ts *ts, char b1)
static int zforce_start(struct zforce_ts *ts)
{
struct i2c_client *client = ts->client;
- const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+ const struct zforce_ts_platdata *pdata = ts->pdata;
int ret;
dev_dbg(&client->dev, "starting device\n");
@@ -326,13 +330,14 @@ static int zforce_stop(struct zforce_ts *ts)
static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
{
struct i2c_client *client = ts->client;
- const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+ const struct zforce_ts_platdata *pdata = ts->pdata;
struct zforce_point point;
int count, i, num = 0;
count = payload[0];
if (count > ZFORCE_REPORT_POINTS) {
- dev_warn(&client->dev, "to many coordinates %d, expected max %d\n",
+ dev_warn(&client->dev,
+ "too many coordinates %d, expected max %d\n",
count, ZFORCE_REPORT_POINTS);
count = ZFORCE_REPORT_POINTS;
}
@@ -421,7 +426,7 @@ static int zforce_read_packet(struct zforce_ts *ts, u8 *buf)
goto unlock;
}
- if (buf[PAYLOAD_LENGTH] <= 0 || buf[PAYLOAD_LENGTH] > 255) {
+ if (buf[PAYLOAD_LENGTH] == 0) {
dev_err(&client->dev, "invalid payload length: %d\n",
buf[PAYLOAD_LENGTH]);
ret = -EIO;
@@ -471,9 +476,9 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
{
struct zforce_ts *ts = dev_id;
struct i2c_client *client = ts->client;
- const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+ const struct zforce_ts_platdata *pdata = ts->pdata;
int ret;
- u8 payload_buffer[512];
+ u8 payload_buffer[FRAME_MAXSIZE];
u8 *payload;
/*
@@ -494,8 +499,8 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
while (!gpio_get_value(pdata->gpio_int)) {
ret = zforce_read_packet(ts, payload_buffer);
if (ret < 0) {
- dev_err(&client->dev, "could not read packet, ret: %d\n",
- ret);
+ dev_err(&client->dev,
+ "could not read packet, ret: %d\n", ret);
break;
}
@@ -539,7 +544,8 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
payload[RESPONSE_DATA + 4];
ts->version_rev = (payload[RESPONSE_DATA + 7] << 8) |
payload[RESPONSE_DATA + 6];
- dev_dbg(&ts->client->dev, "Firmware Version %04x:%04x %04x:%04x\n",
+ dev_dbg(&ts->client->dev,
+ "Firmware Version %04x:%04x %04x:%04x\n",
ts->version_major, ts->version_minor,
ts->version_build, ts->version_rev);
@@ -552,7 +558,8 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
break;
default:
- dev_err(&ts->client->dev, "unrecognized response id: 0x%x\n",
+ dev_err(&ts->client->dev,
+ "unrecognized response id: 0x%x\n",
payload[RESPONSE_ID]);
break;
}
@@ -618,7 +625,8 @@ static int zforce_suspend(struct device *dev)
enable_irq_wake(client->irq);
} else if (input->users) {
- dev_dbg(&client->dev, "suspend without being a wakeup source\n");
+ dev_dbg(&client->dev,
+ "suspend without being a wakeup source\n");
ret = zforce_stop(ts);
if (ret)
@@ -684,6 +692,45 @@ static void zforce_reset(void *data)
gpio_set_value(ts->pdata->gpio_rst, 0);
}
+static struct zforce_ts_platdata *zforce_parse_dt(struct device *dev)
+{
+ struct zforce_ts_platdata *pdata;
+ struct device_node *np = dev->of_node;
+
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "failed to allocate platform data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->gpio_int = of_get_gpio(np, 0);
+ if (!gpio_is_valid(pdata->gpio_int)) {
+ dev_err(dev, "failed to get interrupt gpio\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata->gpio_rst = of_get_gpio(np, 1);
+ if (!gpio_is_valid(pdata->gpio_rst)) {
+ dev_err(dev, "failed to get reset gpio\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(np, "x-size", &pdata->x_max)) {
+ dev_err(dev, "failed to get x-size property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(np, "y-size", &pdata->y_max)) {
+ dev_err(dev, "failed to get y-size property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return pdata;
+}
+
static int zforce_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -692,8 +739,11 @@ static int zforce_probe(struct i2c_client *client,
struct input_dev *input_dev;
int ret;
- if (!pdata)
- return -EINVAL;
+ if (!pdata) {
+ pdata = zforce_parse_dt(&client->dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ }
ts = devm_kzalloc(&client->dev, sizeof(struct zforce_ts), GFP_KERNEL);
if (!ts)
@@ -798,7 +848,7 @@ static int zforce_probe(struct i2c_client *client,
return ret;
}
- /* this gets the firmware version among other informations */
+ /* this gets the firmware version among other information */
ret = zforce_command_wait(ts, COMMAND_STATUS);
if (ret < 0) {
dev_err(&client->dev, "couldn't get status, %d\n", ret);
@@ -829,11 +879,20 @@ static struct i2c_device_id zforce_idtable[] = {
};
MODULE_DEVICE_TABLE(i2c, zforce_idtable);
+#ifdef CONFIG_OF
+static struct of_device_id zforce_dt_idtable[] = {
+ { .compatible = "neonode,zforce" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, zforce_dt_idtable);
+#endif
+
static struct i2c_driver zforce_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "zforce-ts",
.pm = &zforce_pm_ops,
+ .of_match_table = of_match_ptr(zforce_dt_idtable),
},
.probe = zforce_probe,
.id_table = zforce_idtable,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 79bbc21c1d01..df56e4c74a7e 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -207,7 +207,7 @@ config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI"
default n
depends on ARM
- depends on SH_MOBILE || COMPILE_TEST
+ depends on ARCH_SHMOBILE || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
select SHMOBILE_IPMMU
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index faf0da4bb3a2..c949520bd196 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -963,7 +963,7 @@ static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
address &= ~(0xfffULL);
- cmd->data[0] = pasid & PASID_MASK;
+ cmd->data[0] = pasid;
cmd->data[1] = domid;
cmd->data[2] = lower_32_bits(address);
cmd->data[3] = upper_32_bits(address);
@@ -982,10 +982,10 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
address &= ~(0xfffULL);
cmd->data[0] = devid;
- cmd->data[0] |= (pasid & 0xff) << 16;
+ cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
cmd->data[0] |= (qdep & 0xff) << 24;
cmd->data[1] = devid;
- cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
+ cmd->data[1] |= (pasid & 0xff) << 16;
cmd->data[2] = lower_32_bits(address);
cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
cmd->data[3] = upper_32_bits(address);
@@ -1001,7 +1001,7 @@ static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
cmd->data[0] = devid;
if (gn) {
- cmd->data[1] = pasid & PASID_MASK;
+ cmd->data[1] = pasid;
cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
}
cmd->data[3] = tag & 0x1ff;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 28b4bea7c109..b76c58dbe30c 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -150,7 +150,7 @@ int amd_iommus_present;
bool amd_iommu_np_cache __read_mostly;
bool amd_iommu_iotlb_sup __read_mostly = true;
-u32 amd_iommu_max_pasids __read_mostly = ~0;
+u32 amd_iommu_max_pasid __read_mostly = ~0;
bool amd_iommu_v2_present __read_mostly;
bool amd_iommu_pc_present __read_mostly;
@@ -1231,14 +1231,16 @@ static int iommu_init_pci(struct amd_iommu *iommu)
if (iommu_feature(iommu, FEATURE_GT)) {
int glxval;
- u32 pasids;
- u64 shift;
+ u32 max_pasid;
+ u64 pasmax;
- shift = iommu->features & FEATURE_PASID_MASK;
- shift >>= FEATURE_PASID_SHIFT;
- pasids = (1 << shift);
+ pasmax = iommu->features & FEATURE_PASID_MASK;
+ pasmax >>= FEATURE_PASID_SHIFT;
+ max_pasid = (1 << (pasmax + 1)) - 1;
- amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
+ amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
+
+ BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
glxval = iommu->features & FEATURE_GLXVAL_MASK;
glxval >>= FEATURE_GLXVAL_SHIFT;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index e400fbe411de..f1a5abf11acf 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/irqreturn.h>
/*
* Maximum number of IOMMUs supported
@@ -98,7 +99,12 @@
#define FEATURE_GLXVAL_SHIFT 14
#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
-#define PASID_MASK 0x000fffff
+/* Note:
+ * The current driver only support 16-bit PASID.
+ * Currently, hardware only implement upto 16-bit PASID
+ * even though the spec says it could have upto 20 bits.
+ */
+#define PASID_MASK 0x0000ffff
/* MMIO status bits */
#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
@@ -696,8 +702,8 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
*/
extern u32 amd_iommu_unmap_flush;
-/* Smallest number of PASIDs supported by any IOMMU in the system */
-extern u32 amd_iommu_max_pasids;
+/* Smallest max PASID supported by any IOMMU in the system */
+extern u32 amd_iommu_max_pasid;
extern bool amd_iommu_v2_present;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1d9ab39af29f..8b89e33a89fe 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -48,7 +48,7 @@
#include <asm/pgalloc.h>
/* Maximum number of stream IDs assigned to a single device */
-#define MAX_MASTER_STREAMIDS 8
+#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
/* Maximum number of context banks per SMMU */
#define ARM_SMMU_MAX_CBS 128
@@ -60,6 +60,16 @@
#define ARM_SMMU_GR0(smmu) ((smmu)->base)
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
+/*
+ * SMMU global address space with conditional offset to access secure
+ * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
+ * nsGFSYNR0: 0x450)
+ */
+#define ARM_SMMU_GR0_NS(smmu) \
+ ((smmu)->base + \
+ ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
+ ? 0x400 : 0))
+
/* Page table bits */
#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
@@ -351,6 +361,9 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
u32 features;
+
+#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
+ u32 options;
int version;
u32 num_context_banks;
@@ -401,6 +414,29 @@ struct arm_smmu_domain {
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
static LIST_HEAD(arm_smmu_devices);
+struct arm_smmu_option_prop {
+ u32 opt;
+ const char *prop;
+};
+
+static struct arm_smmu_option_prop arm_smmu_options [] = {
+ { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
+ { 0, NULL},
+};
+
+static void parse_driver_options(struct arm_smmu_device *smmu)
+{
+ int i = 0;
+ do {
+ if (of_property_read_bool(smmu->dev->of_node,
+ arm_smmu_options[i].prop)) {
+ smmu->options |= arm_smmu_options[i].opt;
+ dev_notice(smmu->dev, "option %s\n",
+ arm_smmu_options[i].prop);
+ }
+ } while (arm_smmu_options[++i].opt);
+}
+
static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
struct device_node *dev_node)
{
@@ -614,16 +650,16 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
{
u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
struct arm_smmu_device *smmu = dev;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
- if (!gfsr)
- return IRQ_NONE;
-
gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
+ if (!gfsr)
+ return IRQ_NONE;
+
dev_err_ratelimited(smmu->dev,
"Unexpected global fault, this could be serious\n");
dev_err_ratelimited(smmu->dev,
@@ -642,7 +678,7 @@ static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
/* Ensure new page tables are visible to the hardware walker */
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
- dsb();
+ dsb(ishst);
} else {
/*
* If the SMMU can't walk tables in the CPU caches, treat them
@@ -990,9 +1026,8 @@ static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
/*
* Recursively free the page tables for this domain. We don't
- * care about speculative TLB filling, because the TLB will be
- * nuked next time this context bank is re-allocated and no devices
- * currently map to these tables.
+ * care about speculative TLB filling because the tables should
+ * not be active in any context bank at this point (SCTLR.M is 0).
*/
pgd = pgd_base;
for (i = 0; i < PTRS_PER_PGD; ++i) {
@@ -1218,7 +1253,7 @@ static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
unsigned long addr, unsigned long end,
- unsigned long pfn, int flags, int stage)
+ unsigned long pfn, int prot, int stage)
{
pte_t *pte, *start;
pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
@@ -1240,28 +1275,28 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
if (stage == 1) {
pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
- if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ))
+ if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pteval |= ARM_SMMU_PTE_AP_RDONLY;
- if (flags & IOMMU_CACHE)
+ if (prot & IOMMU_CACHE)
pteval |= (MAIR_ATTR_IDX_CACHE <<
ARM_SMMU_PTE_ATTRINDX_SHIFT);
} else {
pteval |= ARM_SMMU_PTE_HAP_FAULT;
- if (flags & IOMMU_READ)
+ if (prot & IOMMU_READ)
pteval |= ARM_SMMU_PTE_HAP_READ;
- if (flags & IOMMU_WRITE)
+ if (prot & IOMMU_WRITE)
pteval |= ARM_SMMU_PTE_HAP_WRITE;
- if (flags & IOMMU_CACHE)
+ if (prot & IOMMU_CACHE)
pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
else
pteval |= ARM_SMMU_PTE_MEMATTR_NC;
}
/* If no access, create a faulting entry to avoid TLB fills */
- if (flags & IOMMU_EXEC)
+ if (prot & IOMMU_EXEC)
pteval &= ~ARM_SMMU_PTE_XN;
- else if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
+ else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_PAGE;
pteval |= ARM_SMMU_PTE_SH_IS;
@@ -1323,7 +1358,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
unsigned long addr, unsigned long end,
- phys_addr_t phys, int flags, int stage)
+ phys_addr_t phys, int prot, int stage)
{
int ret;
pmd_t *pmd;
@@ -1347,7 +1382,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
do {
next = pmd_addr_end(addr, end);
ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
- flags, stage);
+ prot, stage);
phys += next - addr;
} while (pmd++, addr = next, addr < end);
@@ -1356,7 +1391,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
unsigned long addr, unsigned long end,
- phys_addr_t phys, int flags, int stage)
+ phys_addr_t phys, int prot, int stage)
{
int ret = 0;
pud_t *pud;
@@ -1380,7 +1415,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
do {
next = pud_addr_end(addr, end);
ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
- flags, stage);
+ prot, stage);
phys += next - addr;
} while (pud++, addr = next, addr < end);
@@ -1389,7 +1424,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
unsigned long iova, phys_addr_t paddr,
- size_t size, int flags)
+ size_t size, int prot)
{
int ret, stage;
unsigned long end;
@@ -1397,7 +1432,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
pgd_t *pgd = root_cfg->pgd;
struct arm_smmu_device *smmu = root_cfg->smmu;
- unsigned long irqflags;
+ unsigned long flags;
if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
stage = 2;
@@ -1420,14 +1455,14 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
if (paddr & ~output_mask)
return -ERANGE;
- spin_lock_irqsave(&smmu_domain->lock, irqflags);
+ spin_lock_irqsave(&smmu_domain->lock, flags);
pgd += pgd_index(iova);
end = iova + size;
do {
unsigned long next = pgd_addr_end(iova, end);
ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
- flags, stage);
+ prot, stage);
if (ret)
goto out_unlock;
@@ -1436,13 +1471,13 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
} while (pgd++, iova != end);
out_unlock:
- spin_unlock_irqrestore(&smmu_domain->lock, irqflags);
+ spin_unlock_irqrestore(&smmu_domain->lock, flags);
return ret;
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int flags)
+ phys_addr_t paddr, size_t size, int prot)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
@@ -1453,7 +1488,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
if ((phys_addr_t)iova & ~smmu_domain->output_mask)
return -ERANGE;
- return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags);
+ return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot);
}
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -1597,9 +1632,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
int i = 0;
u32 reg;
- /* Clear Global FSR */
- reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
- writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR);
+ /* clear global FSR */
+ reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
+ writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
/* Mark all SMRn as invalid and all S2CRn as bypass */
for (i = 0; i < smmu->num_mapping_groups; ++i) {
@@ -1619,7 +1654,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
- reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
+ reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
/* Enable fault reporting */
reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
@@ -1638,7 +1673,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Push the button */
arm_smmu_tlb_sync(smmu);
- writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0);
+ writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
}
static int arm_smmu_id_size_to_bits(int size)
@@ -1885,6 +1920,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (err)
goto out_put_parent;
+ parse_driver_options(smmu);
+
if (smmu->version > 1 &&
smmu->num_context_banks != smmu->num_context_irqs) {
dev_err(dev,
@@ -1969,7 +2006,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
free_irq(smmu->irqs[i], smmu);
/* Turn the thing off */
- writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
+ writel(sCR0_CLIENTPD,ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
return 0;
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 158156543410..f445c10df8df 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -43,14 +43,24 @@
#include "irq_remapping.h"
-/* No locks are needed as DMA remapping hardware unit
- * list is constructed at boot time and hotplug of
- * these units are not supported by the architecture.
+/*
+ * Assumptions:
+ * 1) The hotplug framework guarentees that DMAR unit will be hot-added
+ * before IO devices managed by that unit.
+ * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
+ * after IO devices managed by that unit.
+ * 3) Hotplug events are rare.
+ *
+ * Locking rules for DMA and interrupt remapping related global data structures:
+ * 1) Use dmar_global_lock in process context
+ * 2) Use RCU in interrupt context
*/
+DECLARE_RWSEM(dmar_global_lock);
LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;
+static int dmar_dev_scope_status = 1;
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
@@ -62,73 +72,20 @@ static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
* the very end.
*/
if (drhd->include_all)
- list_add_tail(&drhd->list, &dmar_drhd_units);
+ list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
else
- list_add(&drhd->list, &dmar_drhd_units);
+ list_add_rcu(&drhd->list, &dmar_drhd_units);
}
-static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
- struct pci_dev **dev, u16 segment)
-{
- struct pci_bus *bus;
- struct pci_dev *pdev = NULL;
- struct acpi_dmar_pci_path *path;
- int count;
-
- bus = pci_find_bus(segment, scope->bus);
- path = (struct acpi_dmar_pci_path *)(scope + 1);
- count = (scope->length - sizeof(struct acpi_dmar_device_scope))
- / sizeof(struct acpi_dmar_pci_path);
-
- while (count) {
- if (pdev)
- pci_dev_put(pdev);
- /*
- * Some BIOSes list non-exist devices in DMAR table, just
- * ignore it
- */
- if (!bus) {
- pr_warn("Device scope bus [%d] not found\n", scope->bus);
- break;
- }
- pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
- if (!pdev) {
- /* warning will be printed below */
- break;
- }
- path ++;
- count --;
- bus = pdev->subordinate;
- }
- if (!pdev) {
- pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
- segment, scope->bus, path->device, path->function);
- return 0;
- }
- if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
- pdev->subordinate) || (scope->entry_type == \
- ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
- pci_dev_put(pdev);
- pr_warn("Device scope type does not match for %s\n",
- pci_name(pdev));
- return -EINVAL;
- }
- *dev = pdev;
- return 0;
-}
-
-int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
- struct pci_dev ***devices, u16 segment)
+void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
{
struct acpi_dmar_device_scope *scope;
- void * tmp = start;
- int index;
- int ret;
*cnt = 0;
while (start < end) {
scope = start;
- if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
+ if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ACPI ||
+ scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
(*cnt)++;
else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
@@ -138,43 +95,236 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
start += scope->length;
}
if (*cnt == 0)
- return 0;
+ return NULL;
- *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
- if (!*devices)
- return -ENOMEM;
+ return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
+}
- start = tmp;
- index = 0;
- while (start < end) {
+void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
+{
+ int i;
+ struct device *tmp_dev;
+
+ if (*devices && *cnt) {
+ for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
+ put_device(tmp_dev);
+ kfree(*devices);
+ }
+
+ *devices = NULL;
+ *cnt = 0;
+}
+
+/* Optimize out kzalloc()/kfree() for normal cases */
+static char dmar_pci_notify_info_buf[64];
+
+static struct dmar_pci_notify_info *
+dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
+{
+ int level = 0;
+ size_t size;
+ struct pci_dev *tmp;
+ struct dmar_pci_notify_info *info;
+
+ BUG_ON(dev->is_virtfn);
+
+ /* Only generate path[] for device addition event */
+ if (event == BUS_NOTIFY_ADD_DEVICE)
+ for (tmp = dev; tmp; tmp = tmp->bus->self)
+ level++;
+
+ size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
+ if (size <= sizeof(dmar_pci_notify_info_buf)) {
+ info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
+ } else {
+ info = kzalloc(size, GFP_KERNEL);
+ if (!info) {
+ pr_warn("Out of memory when allocating notify_info "
+ "for %s.\n", pci_name(dev));
+ if (dmar_dev_scope_status == 0)
+ dmar_dev_scope_status = -ENOMEM;
+ return NULL;
+ }
+ }
+
+ info->event = event;
+ info->dev = dev;
+ info->seg = pci_domain_nr(dev->bus);
+ info->level = level;
+ if (event == BUS_NOTIFY_ADD_DEVICE) {
+ for (tmp = dev, level--; tmp; tmp = tmp->bus->self) {
+ info->path[level].device = PCI_SLOT(tmp->devfn);
+ info->path[level].function = PCI_FUNC(tmp->devfn);
+ if (pci_is_root_bus(tmp->bus))
+ info->bus = tmp->bus->number;
+ }
+ }
+
+ return info;
+}
+
+static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
+{
+ if ((void *)info != dmar_pci_notify_info_buf)
+ kfree(info);
+}
+
+static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
+ struct acpi_dmar_pci_path *path, int count)
+{
+ int i;
+
+ if (info->bus != bus)
+ return false;
+ if (info->level != count)
+ return false;
+
+ for (i = 0; i < count; i++) {
+ if (path[i].device != info->path[i].device ||
+ path[i].function != info->path[i].function)
+ return false;
+ }
+
+ return true;
+}
+
+/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
+int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
+ void *start, void*end, u16 segment,
+ struct dmar_dev_scope *devices,
+ int devices_cnt)
+{
+ int i, level;
+ struct device *tmp, *dev = &info->dev->dev;
+ struct acpi_dmar_device_scope *scope;
+ struct acpi_dmar_pci_path *path;
+
+ if (segment != info->seg)
+ return 0;
+
+ for (; start < end; start += scope->length) {
scope = start;
- if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
- scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
- ret = dmar_parse_one_dev_scope(scope,
- &(*devices)[index], segment);
- if (ret) {
- dmar_free_dev_scope(devices, cnt);
- return ret;
- }
- index ++;
+ if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
+ scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
+ continue;
+
+ path = (struct acpi_dmar_pci_path *)(scope + 1);
+ level = (scope->length - sizeof(*scope)) / sizeof(*path);
+ if (!dmar_match_pci_path(info, scope->bus, path, level))
+ continue;
+
+ if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
+ (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
+ pr_warn("Device scope type does not match for %s\n",
+ pci_name(info->dev));
+ return -EINVAL;
}
- start += scope->length;
+
+ for_each_dev_scope(devices, devices_cnt, i, tmp)
+ if (tmp == NULL) {
+ devices[i].bus = info->dev->bus->number;
+ devices[i].devfn = info->dev->devfn;
+ rcu_assign_pointer(devices[i].dev,
+ get_device(dev));
+ return 1;
+ }
+ BUG_ON(i >= devices_cnt);
}
return 0;
}
-void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
+int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
+ struct dmar_dev_scope *devices, int count)
{
- if (*devices && *cnt) {
- while (--*cnt >= 0)
- pci_dev_put((*devices)[*cnt]);
- kfree(*devices);
- *devices = NULL;
- *cnt = 0;
+ int index;
+ struct device *tmp;
+
+ if (info->seg != segment)
+ return 0;
+
+ for_each_active_dev_scope(devices, count, index, tmp)
+ if (tmp == &info->dev->dev) {
+ rcu_assign_pointer(devices[index].dev, NULL);
+ synchronize_rcu();
+ put_device(tmp);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
+{
+ int ret = 0;
+ struct dmar_drhd_unit *dmaru;
+ struct acpi_dmar_hardware_unit *drhd;
+
+ for_each_drhd_unit(dmaru) {
+ if (dmaru->include_all)
+ continue;
+
+ drhd = container_of(dmaru->hdr,
+ struct acpi_dmar_hardware_unit, header);
+ ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
+ ((void *)drhd) + drhd->header.length,
+ dmaru->segment,
+ dmaru->devices, dmaru->devices_cnt);
+ if (ret != 0)
+ break;
}
+ if (ret >= 0)
+ ret = dmar_iommu_notify_scope_dev(info);
+ if (ret < 0 && dmar_dev_scope_status == 0)
+ dmar_dev_scope_status = ret;
+
+ return ret;
}
+static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
+{
+ struct dmar_drhd_unit *dmaru;
+
+ for_each_drhd_unit(dmaru)
+ if (dmar_remove_dev_scope(info, dmaru->segment,
+ dmaru->devices, dmaru->devices_cnt))
+ break;
+ dmar_iommu_notify_scope_dev(info);
+}
+
+static int dmar_pci_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(data);
+ struct dmar_pci_notify_info *info;
+
+ /* Only care about add/remove events for physical functions */
+ if (pdev->is_virtfn)
+ return NOTIFY_DONE;
+ if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
+ return NOTIFY_DONE;
+
+ info = dmar_alloc_pci_notify_info(pdev, action);
+ if (!info)
+ return NOTIFY_DONE;
+
+ down_write(&dmar_global_lock);
+ if (action == BUS_NOTIFY_ADD_DEVICE)
+ dmar_pci_bus_add_dev(info);
+ else if (action == BUS_NOTIFY_DEL_DEVICE)
+ dmar_pci_bus_del_dev(info);
+ up_write(&dmar_global_lock);
+
+ dmar_free_pci_notify_info(info);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block dmar_pci_bus_nb = {
+ .notifier_call = dmar_pci_bus_notifier,
+ .priority = INT_MIN,
+};
+
/**
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit
@@ -196,9 +346,18 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
dmaru->reg_base_addr = drhd->address;
dmaru->segment = drhd->segment;
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
+ dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
+ ((void *)drhd) + drhd->header.length,
+ &dmaru->devices_cnt);
+ if (dmaru->devices_cnt && dmaru->devices == NULL) {
+ kfree(dmaru);
+ return -ENOMEM;
+ }
ret = alloc_iommu(dmaru);
if (ret) {
+ dmar_free_dev_scope(&dmaru->devices,
+ &dmaru->devices_cnt);
kfree(dmaru);
return ret;
}
@@ -215,19 +374,24 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
kfree(dmaru);
}
-static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
+static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
{
- struct acpi_dmar_hardware_unit *drhd;
-
- drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
-
- if (dmaru->include_all)
- return 0;
+ struct acpi_dmar_andd *andd = (void *)header;
+
+ /* Check for NUL termination within the designated length */
+ if (strnlen(andd->object_name, header->length - 8) == header->length - 8) {
+ WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+ "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ return -EINVAL;
+ }
+ pr_info("ANDD device: %x name: %s\n", andd->device_number,
+ andd->object_name);
- return dmar_parse_dev_scope((void *)(drhd + 1),
- ((void *)drhd) + drhd->header.length,
- &dmaru->devices_cnt, &dmaru->devices,
- drhd->segment);
+ return 0;
}
#ifdef CONFIG_ACPI_NUMA
@@ -293,6 +457,10 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
(unsigned long long)rhsa->base_address,
rhsa->proximity_domain);
break;
+ case ACPI_DMAR_TYPE_ANDD:
+ /* We don't print this here because we need to sanity-check
+ it first. So print it in dmar_parse_one_andd() instead. */
+ break;
}
}
@@ -378,6 +546,9 @@ parse_dmar_table(void)
ret = dmar_parse_one_rhsa(entry_header);
#endif
break;
+ case ACPI_DMAR_TYPE_ANDD:
+ ret = dmar_parse_one_andd(entry_header);
+ break;
default:
pr_warn("Unknown DMAR structure type %d\n",
entry_header->type);
@@ -394,14 +565,15 @@ parse_dmar_table(void)
return ret;
}
-static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
- struct pci_dev *dev)
+static int dmar_pci_device_match(struct dmar_dev_scope devices[],
+ int cnt, struct pci_dev *dev)
{
int index;
+ struct device *tmp;
while (dev) {
- for (index = 0; index < cnt; index++)
- if (dev == devices[index])
+ for_each_active_dev_scope(devices, cnt, index, tmp)
+ if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
return 1;
/* Check our parent */
@@ -414,11 +586,12 @@ static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
struct dmar_drhd_unit *
dmar_find_matched_drhd_unit(struct pci_dev *dev)
{
- struct dmar_drhd_unit *dmaru = NULL;
+ struct dmar_drhd_unit *dmaru;
struct acpi_dmar_hardware_unit *drhd;
dev = pci_physfn(dev);
+ rcu_read_lock();
for_each_drhd_unit(dmaru) {
drhd = container_of(dmaru->hdr,
struct acpi_dmar_hardware_unit,
@@ -426,44 +599,128 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
if (dmaru->include_all &&
drhd->segment == pci_domain_nr(dev->bus))
- return dmaru;
+ goto out;
if (dmar_pci_device_match(dmaru->devices,
dmaru->devices_cnt, dev))
- return dmaru;
+ goto out;
}
+ dmaru = NULL;
+out:
+ rcu_read_unlock();
- return NULL;
+ return dmaru;
}
-int __init dmar_dev_scope_init(void)
+static void __init dmar_acpi_insert_dev_scope(u8 device_number,
+ struct acpi_device *adev)
{
- static int dmar_dev_scope_initialized;
- struct dmar_drhd_unit *drhd;
- int ret = -ENODEV;
-
- if (dmar_dev_scope_initialized)
- return dmar_dev_scope_initialized;
+ struct dmar_drhd_unit *dmaru;
+ struct acpi_dmar_hardware_unit *drhd;
+ struct acpi_dmar_device_scope *scope;
+ struct device *tmp;
+ int i;
+ struct acpi_dmar_pci_path *path;
- if (list_empty(&dmar_drhd_units))
- goto fail;
+ for_each_drhd_unit(dmaru) {
+ drhd = container_of(dmaru->hdr,
+ struct acpi_dmar_hardware_unit,
+ header);
- list_for_each_entry(drhd, &dmar_drhd_units, list) {
- ret = dmar_parse_dev(drhd);
- if (ret)
- goto fail;
+ for (scope = (void *)(drhd + 1);
+ (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
+ scope = ((void *)scope) + scope->length) {
+ if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ACPI)
+ continue;
+ if (scope->enumeration_id != device_number)
+ continue;
+
+ path = (void *)(scope + 1);
+ pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
+ dev_name(&adev->dev), dmaru->reg_base_addr,
+ scope->bus, path->device, path->function);
+ for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
+ if (tmp == NULL) {
+ dmaru->devices[i].bus = scope->bus;
+ dmaru->devices[i].devfn = PCI_DEVFN(path->device,
+ path->function);
+ rcu_assign_pointer(dmaru->devices[i].dev,
+ get_device(&adev->dev));
+ return;
+ }
+ BUG_ON(i >= dmaru->devices_cnt);
+ }
}
+ pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
+ device_number, dev_name(&adev->dev));
+}
- ret = dmar_parse_rmrr_atsr_dev();
- if (ret)
- goto fail;
+static int __init dmar_acpi_dev_scope_init(void)
+{
+ struct acpi_dmar_andd *andd;
+
+ if (dmar_tbl == NULL)
+ return -ENODEV;
- dmar_dev_scope_initialized = 1;
+ for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
+ ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
+ andd = ((void *)andd) + andd->header.length) {
+ if (andd->header.type == ACPI_DMAR_TYPE_ANDD) {
+ acpi_handle h;
+ struct acpi_device *adev;
+
+ if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
+ andd->object_name,
+ &h))) {
+ pr_err("Failed to find handle for ACPI object %s\n",
+ andd->object_name);
+ continue;
+ }
+ acpi_bus_get_device(h, &adev);
+ if (!adev) {
+ pr_err("Failed to get device for ACPI object %s\n",
+ andd->object_name);
+ continue;
+ }
+ dmar_acpi_insert_dev_scope(andd->device_number, adev);
+ }
+ }
return 0;
+}
-fail:
- dmar_dev_scope_initialized = ret;
- return ret;
+int __init dmar_dev_scope_init(void)
+{
+ struct pci_dev *dev = NULL;
+ struct dmar_pci_notify_info *info;
+
+ if (dmar_dev_scope_status != 1)
+ return dmar_dev_scope_status;
+
+ if (list_empty(&dmar_drhd_units)) {
+ dmar_dev_scope_status = -ENODEV;
+ } else {
+ dmar_dev_scope_status = 0;
+
+ dmar_acpi_dev_scope_init();
+
+ for_each_pci_dev(dev) {
+ if (dev->is_virtfn)
+ continue;
+
+ info = dmar_alloc_pci_notify_info(dev,
+ BUS_NOTIFY_ADD_DEVICE);
+ if (!info) {
+ return dmar_dev_scope_status;
+ } else {
+ dmar_pci_bus_add_dev(info);
+ dmar_free_pci_notify_info(info);
+ }
+ }
+
+ bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
+ }
+
+ return dmar_dev_scope_status;
}
@@ -557,6 +814,7 @@ int __init detect_intel_iommu(void)
{
int ret;
+ down_write(&dmar_global_lock);
ret = dmar_table_detect();
if (ret)
ret = check_zero_address();
@@ -574,6 +832,7 @@ int __init detect_intel_iommu(void)
}
early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL;
+ up_write(&dmar_global_lock);
return ret ? 1 : -ENODEV;
}
@@ -696,6 +955,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
iommu->agaw = agaw;
iommu->msagaw = msagaw;
+ iommu->segment = drhd->segment;
iommu->node = -1;
@@ -1386,10 +1646,15 @@ static int __init dmar_free_unused_resources(void)
if (irq_remapping_enabled || intel_iommu_enabled)
return 0;
+ if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
+ bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
+
+ down_write(&dmar_global_lock);
list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
list_del(&dmaru->list);
dmar_free_drhd(dmaru);
}
+ up_write(&dmar_global_lock);
return 0;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a22c86c867fa..69fa7da5e48b 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, Intel Corporation.
+ * Copyright © 2006-2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -10,15 +10,11 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) 2006-2008 Intel Corporation
- * Author: Ashok Raj <ashok.raj@intel.com>
- * Author: Shaohua Li <shaohua.li@intel.com>
- * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- * Author: Fenghua Yu <fenghua.yu@intel.com>
+ * Authors: David Woodhouse <dwmw2@infradead.org>,
+ * Ashok Raj <ashok.raj@intel.com>,
+ * Shaohua Li <shaohua.li@intel.com>,
+ * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
*/
#include <linux/init.h>
@@ -33,6 +29,7 @@
#include <linux/dmar.h>
#include <linux/dma-mapping.h>
#include <linux/mempool.h>
+#include <linux/memory.h>
#include <linux/timer.h>
#include <linux/iova.h>
#include <linux/iommu.h>
@@ -372,14 +369,36 @@ struct dmar_domain {
struct device_domain_info {
struct list_head link; /* link to domain siblings */
struct list_head global; /* link to global list */
- int segment; /* PCI domain */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
- struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
+ struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
};
+struct dmar_rmrr_unit {
+ struct list_head list; /* list of rmrr units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
+ u64 base_address; /* reserved base address*/
+ u64 end_address; /* reserved end address */
+ struct dmar_dev_scope *devices; /* target devices */
+ int devices_cnt; /* target device count */
+};
+
+struct dmar_atsr_unit {
+ struct list_head list; /* list of ATSR units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
+ struct dmar_dev_scope *devices; /* target devices */
+ int devices_cnt; /* target device count */
+ u8 include_all:1; /* include all ports */
+};
+
+static LIST_HEAD(dmar_atsr_units);
+static LIST_HEAD(dmar_rmrr_units);
+
+#define for_each_rmrr_units(rmrr) \
+ list_for_each_entry(rmrr, &dmar_rmrr_units, list)
+
static void flush_unmaps_timeout(unsigned long data);
static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
@@ -389,6 +408,7 @@ struct deferred_flush_tables {
int next;
struct iova *iova[HIGH_WATER_MARK];
struct dmar_domain *domain[HIGH_WATER_MARK];
+ struct page *freelist[HIGH_WATER_MARK];
};
static struct deferred_flush_tables *deferred_flush;
@@ -402,7 +422,12 @@ static LIST_HEAD(unmaps_to_do);
static int timer_on;
static long list_size;
+static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
+static void domain_remove_one_dev_info(struct dmar_domain *domain,
+ struct device *dev);
+static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
+ struct device *dev);
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0;
@@ -566,18 +591,31 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
- int i;
-
- i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ int i, found = 0;
- domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
+ domain->iommu_coherency = 1;
for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
+ found = 1;
if (!ecap_coherent(g_iommus[i]->ecap)) {
domain->iommu_coherency = 0;
break;
}
}
+ if (found)
+ return;
+
+ /* No hardware attached; use lowest common denominator */
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!ecap_coherent(iommu->ecap)) {
+ domain->iommu_coherency = 0;
+ break;
+ }
+ }
+ rcu_read_unlock();
}
static void domain_update_iommu_snooping(struct dmar_domain *domain)
@@ -606,12 +644,15 @@ static void domain_update_iommu_superpage(struct dmar_domain *domain)
}
/* set iommu_superpage to the smallest common denominator */
+ rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
mask &= cap_super_page_val(iommu->cap);
if (!mask) {
break;
}
}
+ rcu_read_unlock();
+
domain->iommu_superpage = fls(mask);
}
@@ -623,32 +664,56 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain_update_iommu_superpage(domain);
}
-static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
+static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
+ struct intel_iommu *iommu;
+ struct device *tmp;
+ struct pci_dev *ptmp, *pdev = NULL;
+ u16 segment;
int i;
- for_each_active_drhd_unit(drhd) {
- if (segment != drhd->segment)
+ if (dev_is_pci(dev)) {
+ pdev = to_pci_dev(dev);
+ segment = pci_domain_nr(pdev->bus);
+ } else if (ACPI_COMPANION(dev))
+ dev = &ACPI_COMPANION(dev)->dev;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (pdev && segment != drhd->segment)
continue;
- for (i = 0; i < drhd->devices_cnt; i++) {
- if (drhd->devices[i] &&
- drhd->devices[i]->bus->number == bus &&
- drhd->devices[i]->devfn == devfn)
- return drhd->iommu;
- if (drhd->devices[i] &&
- drhd->devices[i]->subordinate &&
- drhd->devices[i]->subordinate->number <= bus &&
- drhd->devices[i]->subordinate->busn_res.end >= bus)
- return drhd->iommu;
+ for_each_active_dev_scope(drhd->devices,
+ drhd->devices_cnt, i, tmp) {
+ if (tmp == dev) {
+ *bus = drhd->devices[i].bus;
+ *devfn = drhd->devices[i].devfn;
+ goto out;
+ }
+
+ if (!pdev || !dev_is_pci(tmp))
+ continue;
+
+ ptmp = to_pci_dev(tmp);
+ if (ptmp->subordinate &&
+ ptmp->subordinate->number <= pdev->bus->number &&
+ ptmp->subordinate->busn_res.end >= pdev->bus->number)
+ goto got_pdev;
}
- if (drhd->include_all)
- return drhd->iommu;
+ if (pdev && drhd->include_all) {
+ got_pdev:
+ *bus = pdev->bus->number;
+ *devfn = pdev->devfn;
+ goto out;
+ }
}
+ iommu = NULL;
+ out:
+ rcu_read_unlock();
- return NULL;
+ return iommu;
}
static void domain_flush_cache(struct dmar_domain *domain,
@@ -748,7 +813,7 @@ out:
}
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
- unsigned long pfn, int target_level)
+ unsigned long pfn, int *target_level)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct dma_pte *parent, *pte = NULL;
@@ -763,14 +828,14 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
parent = domain->pgd;
- while (level > 0) {
+ while (1) {
void *tmp_page;
offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
- if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
+ if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
break;
- if (level == target_level)
+ if (level == *target_level)
break;
if (!dma_pte_present(pte)) {
@@ -791,10 +856,16 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
domain_flush_cache(domain, pte, sizeof(*pte));
}
}
+ if (level == 1)
+ break;
+
parent = phys_to_virt(dma_pte_addr(pte));
level--;
}
+ if (!*target_level)
+ *target_level = level;
+
return pte;
}
@@ -832,7 +903,7 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
}
/* clear last level pte, a tlb flush should be followed */
-static int dma_pte_clear_range(struct dmar_domain *domain,
+static void dma_pte_clear_range(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
@@ -862,8 +933,6 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
(void *)pte - (void *)first_pte);
} while (start_pfn && start_pfn <= last_pfn);
-
- return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
}
static void dma_pte_free_level(struct dmar_domain *domain, int level,
@@ -921,6 +990,123 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
}
}
+/* When a page at a given level is being unlinked from its parent, we don't
+ need to *modify* it at all. All we need to do is make a list of all the
+ pages which can be freed just as soon as we've flushed the IOTLB and we
+ know the hardware page-walk will no longer touch them.
+ The 'pte' argument is the *parent* PTE, pointing to the page that is to
+ be freed. */
+static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
+ int level, struct dma_pte *pte,
+ struct page *freelist)
+{
+ struct page *pg;
+
+ pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
+ pg->freelist = freelist;
+ freelist = pg;
+
+ if (level == 1)
+ return freelist;
+
+ for (pte = page_address(pg); !first_pte_in_page(pte); pte++) {
+ if (dma_pte_present(pte) && !dma_pte_superpage(pte))
+ freelist = dma_pte_list_pagetables(domain, level - 1,
+ pte, freelist);
+ }
+
+ return freelist;
+}
+
+static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
+ struct dma_pte *pte, unsigned long pfn,
+ unsigned long start_pfn,
+ unsigned long last_pfn,
+ struct page *freelist)
+{
+ struct dma_pte *first_pte = NULL, *last_pte = NULL;
+
+ pfn = max(start_pfn, pfn);
+ pte = &pte[pfn_level_offset(pfn, level)];
+
+ do {
+ unsigned long level_pfn;
+
+ if (!dma_pte_present(pte))
+ goto next;
+
+ level_pfn = pfn & level_mask(level);
+
+ /* If range covers entire pagetable, free it */
+ if (start_pfn <= level_pfn &&
+ last_pfn >= level_pfn + level_size(level) - 1) {
+ /* These suborbinate page tables are going away entirely. Don't
+ bother to clear them; we're just going to *free* them. */
+ if (level > 1 && !dma_pte_superpage(pte))
+ freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
+
+ dma_clear_pte(pte);
+ if (!first_pte)
+ first_pte = pte;
+ last_pte = pte;
+ } else if (level > 1) {
+ /* Recurse down into a level that isn't *entirely* obsolete */
+ freelist = dma_pte_clear_level(domain, level - 1,
+ phys_to_virt(dma_pte_addr(pte)),
+ level_pfn, start_pfn, last_pfn,
+ freelist);
+ }
+next:
+ pfn += level_size(level);
+ } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
+
+ if (first_pte)
+ domain_flush_cache(domain, first_pte,
+ (void *)++last_pte - (void *)first_pte);
+
+ return freelist;
+}
+
+/* We can't just free the pages because the IOMMU may still be walking
+ the page tables, and may have cached the intermediate levels. The
+ pages can only be freed after the IOTLB flush has been done. */
+struct page *domain_unmap(struct dmar_domain *domain,
+ unsigned long start_pfn,
+ unsigned long last_pfn)
+{
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ struct page *freelist = NULL;
+
+ BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+ BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(start_pfn > last_pfn);
+
+ /* we don't need lock here; nobody else touches the iova range */
+ freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
+ domain->pgd, 0, start_pfn, last_pfn, NULL);
+
+ /* free pgd */
+ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
+ struct page *pgd_page = virt_to_page(domain->pgd);
+ pgd_page->freelist = freelist;
+ freelist = pgd_page;
+
+ domain->pgd = NULL;
+ }
+
+ return freelist;
+}
+
+void dma_free_pagelist(struct page *freelist)
+{
+ struct page *pg;
+
+ while ((pg = freelist)) {
+ freelist = pg->freelist;
+ free_pgtable_page(page_address(pg));
+ }
+}
+
/* iommu handling */
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
@@ -1030,7 +1216,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
break;
case DMA_TLB_PSI_FLUSH:
val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
- /* Note: always flush non-leaf currently */
+ /* IH bit is passed in as part of address */
val_iva = size_order | addr;
break;
default:
@@ -1069,13 +1255,14 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
(unsigned long long)DMA_TLB_IAIG(val));
}
-static struct device_domain_info *iommu_support_dev_iotlb(
- struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
+static struct device_domain_info *
+iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
+ u8 bus, u8 devfn)
{
int found = 0;
unsigned long flags;
struct device_domain_info *info;
- struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
+ struct pci_dev *pdev;
if (!ecap_dev_iotlb_support(iommu->ecap))
return NULL;
@@ -1091,34 +1278,35 @@ static struct device_domain_info *iommu_support_dev_iotlb(
}
spin_unlock_irqrestore(&device_domain_lock, flags);
- if (!found || !info->dev)
+ if (!found || !info->dev || !dev_is_pci(info->dev))
return NULL;
- if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
- return NULL;
+ pdev = to_pci_dev(info->dev);
- if (!dmar_find_matched_atsr_unit(info->dev))
+ if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
return NULL;
- info->iommu = iommu;
+ if (!dmar_find_matched_atsr_unit(pdev))
+ return NULL;
return info;
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
- if (!info)
+ if (!info || !dev_is_pci(info->dev))
return;
- pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
+ pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
}
static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
- if (!info->dev || !pci_ats_enabled(info->dev))
+ if (!info->dev || !dev_is_pci(info->dev) ||
+ !pci_ats_enabled(to_pci_dev(info->dev)))
return;
- pci_disable_ats(info->dev);
+ pci_disable_ats(to_pci_dev(info->dev));
}
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
@@ -1130,24 +1318,31 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link) {
- if (!info->dev || !pci_ats_enabled(info->dev))
+ struct pci_dev *pdev;
+ if (!info->dev || !dev_is_pci(info->dev))
+ continue;
+
+ pdev = to_pci_dev(info->dev);
+ if (!pci_ats_enabled(pdev))
continue;
sid = info->bus << 8 | info->devfn;
- qdep = pci_ats_queue_depth(info->dev);
+ qdep = pci_ats_queue_depth(pdev);
qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- unsigned long pfn, unsigned int pages, int map)
+ unsigned long pfn, unsigned int pages, int ih, int map)
{
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
BUG_ON(pages == 0);
+ if (ih)
+ ih = 1 << 6;
/*
* Fallback to domain selective flush if no PSI support or the size is
* too big.
@@ -1158,7 +1353,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);
else
- iommu->flush.flush_iotlb(iommu, did, addr, mask,
+ iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
DMA_TLB_PSI_FLUSH);
/*
@@ -1261,10 +1456,6 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return 0;
}
-
-static void domain_exit(struct dmar_domain *domain);
-static void vm_domain_exit(struct dmar_domain *domain);
-
static void free_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
@@ -1273,18 +1464,21 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if ((iommu->domains) && (iommu->domain_ids)) {
for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
+ /*
+ * Domain id 0 is reserved for invalid translation
+ * if hardware supports caching mode.
+ */
+ if (cap_caching_mode(iommu->cap) && i == 0)
+ continue;
+
domain = iommu->domains[i];
clear_bit(i, iommu->domain_ids);
spin_lock_irqsave(&domain->iommu_lock, flags);
count = --domain->iommu_count;
spin_unlock_irqrestore(&domain->iommu_lock, flags);
- if (count == 0) {
- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
- vm_domain_exit(domain);
- else
- domain_exit(domain);
- }
+ if (count == 0)
+ domain_exit(domain);
}
}
@@ -1298,21 +1492,14 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
g_iommus[iommu->seq_id] = NULL;
- /* if all iommus are freed, free g_iommus */
- for (i = 0; i < g_num_of_iommus; i++) {
- if (g_iommus[i])
- break;
- }
-
- if (i == g_num_of_iommus)
- kfree(g_iommus);
-
/* free context mapping */
free_context_table(iommu);
}
-static struct dmar_domain *alloc_domain(void)
+static struct dmar_domain *alloc_domain(bool vm)
{
+ /* domain id for virtual machine, it won't be set in context */
+ static atomic_t vm_domid = ATOMIC_INIT(0);
struct dmar_domain *domain;
domain = alloc_domain_mem();
@@ -1320,8 +1507,15 @@ static struct dmar_domain *alloc_domain(void)
return NULL;
domain->nid = -1;
+ domain->iommu_count = 0;
memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
domain->flags = 0;
+ spin_lock_init(&domain->iommu_lock);
+ INIT_LIST_HEAD(&domain->devices);
+ if (vm) {
+ domain->id = atomic_inc_return(&vm_domid);
+ domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
+ }
return domain;
}
@@ -1345,6 +1539,7 @@ static int iommu_attach_domain(struct dmar_domain *domain,
}
domain->id = num;
+ domain->iommu_count++;
set_bit(num, iommu->domain_ids);
set_bit(iommu->seq_id, domain->iommu_bmp);
iommu->domains[num] = domain;
@@ -1358,22 +1553,16 @@ static void iommu_detach_domain(struct dmar_domain *domain,
{
unsigned long flags;
int num, ndomains;
- int found = 0;
spin_lock_irqsave(&iommu->lock, flags);
ndomains = cap_ndoms(iommu->cap);
for_each_set_bit(num, iommu->domain_ids, ndomains) {
if (iommu->domains[num] == domain) {
- found = 1;
+ clear_bit(num, iommu->domain_ids);
+ iommu->domains[num] = NULL;
break;
}
}
-
- if (found) {
- clear_bit(num, iommu->domain_ids);
- clear_bit(iommu->seq_id, domain->iommu_bmp);
- iommu->domains[num] = NULL;
- }
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -1445,8 +1634,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
unsigned long sagaw;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
- spin_lock_init(&domain->iommu_lock);
-
domain_reserve_special_ranges(domain);
/* calculate AGAW */
@@ -1465,7 +1652,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
return -ENODEV;
}
domain->agaw = agaw;
- INIT_LIST_HEAD(&domain->devices);
if (ecap_coherent(iommu->ecap))
domain->iommu_coherency = 1;
@@ -1477,8 +1663,11 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
else
domain->iommu_snooping = 0;
- domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
- domain->iommu_count = 1;
+ if (intel_iommu_superpage)
+ domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
+ else
+ domain->iommu_superpage = 0;
+
domain->nid = iommu->node;
/* always allocate the top pgd */
@@ -1493,6 +1682,7 @@ static void domain_exit(struct dmar_domain *domain)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
+ struct page *freelist = NULL;
/* Domain 0 is reserved, so dont process it */
if (!domain)
@@ -1502,29 +1692,33 @@ static void domain_exit(struct dmar_domain *domain)
if (!intel_iommu_strict)
flush_unmaps_timeout(0);
+ /* remove associated devices */
domain_remove_dev_info(domain);
+
/* destroy iovas */
put_iova_domain(&domain->iovad);
- /* clear ptes */
- dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
- /* free page tables */
- dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+ freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+ /* clear attached or cached domains */
+ rcu_read_lock();
for_each_active_iommu(iommu, drhd)
- if (test_bit(iommu->seq_id, domain->iommu_bmp))
+ if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
+ test_bit(iommu->seq_id, domain->iommu_bmp))
iommu_detach_domain(domain, iommu);
+ rcu_read_unlock();
+
+ dma_free_pagelist(freelist);
free_domain_mem(domain);
}
-static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
- u8 bus, u8 devfn, int translation)
+static int domain_context_mapping_one(struct dmar_domain *domain,
+ struct intel_iommu *iommu,
+ u8 bus, u8 devfn, int translation)
{
struct context_entry *context;
unsigned long flags;
- struct intel_iommu *iommu;
struct dma_pte *pgd;
unsigned long num;
unsigned long ndomains;
@@ -1539,10 +1733,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
translation != CONTEXT_TT_MULTI_LEVEL);
- iommu = device_to_iommu(segment, bus, devfn);
- if (!iommu)
- return -ENODEV;
-
context = device_to_context_entry(iommu, bus, devfn);
if (!context)
return -ENOMEM;
@@ -1600,7 +1790,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
context_set_domain_id(context, id);
if (translation != CONTEXT_TT_PASS_THROUGH) {
- info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
+ info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
translation = info ? CONTEXT_TT_DEV_IOTLB :
CONTEXT_TT_MULTI_LEVEL;
}
@@ -1650,27 +1840,32 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
}
static int
-domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
- int translation)
+domain_context_mapping(struct dmar_domain *domain, struct device *dev,
+ int translation)
{
int ret;
- struct pci_dev *tmp, *parent;
+ struct pci_dev *pdev, *tmp, *parent;
+ struct intel_iommu *iommu;
+ u8 bus, devfn;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
+ return -ENODEV;
- ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
- pdev->bus->number, pdev->devfn,
+ ret = domain_context_mapping_one(domain, iommu, bus, devfn,
translation);
- if (ret)
+ if (ret || !dev_is_pci(dev))
return ret;
/* dependent device mapping */
+ pdev = to_pci_dev(dev);
tmp = pci_find_upstream_pcie_bridge(pdev);
if (!tmp)
return 0;
/* Secondary interface's bus number and devfn 0 */
parent = pdev->bus->self;
while (parent != tmp) {
- ret = domain_context_mapping_one(domain,
- pci_domain_nr(parent->bus),
+ ret = domain_context_mapping_one(domain, iommu,
parent->bus->number,
parent->devfn, translation);
if (ret)
@@ -1678,33 +1873,33 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
parent = parent->bus->self;
}
if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
- return domain_context_mapping_one(domain,
- pci_domain_nr(tmp->subordinate),
+ return domain_context_mapping_one(domain, iommu,
tmp->subordinate->number, 0,
translation);
else /* this is a legacy PCI bridge */
- return domain_context_mapping_one(domain,
- pci_domain_nr(tmp->bus),
+ return domain_context_mapping_one(domain, iommu,
tmp->bus->number,
tmp->devfn,
translation);
}
-static int domain_context_mapped(struct pci_dev *pdev)
+static int domain_context_mapped(struct device *dev)
{
int ret;
- struct pci_dev *tmp, *parent;
+ struct pci_dev *pdev, *tmp, *parent;
struct intel_iommu *iommu;
+ u8 bus, devfn;
- iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
- pdev->devfn);
+ iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
- ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
- if (!ret)
+ ret = device_context_mapped(iommu, bus, devfn);
+ if (!ret || !dev_is_pci(dev))
return ret;
+
/* dependent device mapping */
+ pdev = to_pci_dev(dev);
tmp = pci_find_upstream_pcie_bridge(pdev);
if (!tmp)
return ret;
@@ -1800,7 +1995,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
if (!pte) {
largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
- first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
+ first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
if (!pte)
return -ENOMEM;
/* It is large page*/
@@ -1899,14 +2094,13 @@ static inline void unlink_domain_info(struct device_domain_info *info)
list_del(&info->link);
list_del(&info->global);
if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
+ info->dev->archdata.iommu = NULL;
}
static void domain_remove_dev_info(struct dmar_domain *domain)
{
struct device_domain_info *info;
- unsigned long flags;
- struct intel_iommu *iommu;
+ unsigned long flags, flags2;
spin_lock_irqsave(&device_domain_lock, flags);
while (!list_empty(&domain->devices)) {
@@ -1916,10 +2110,23 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
spin_unlock_irqrestore(&device_domain_lock, flags);
iommu_disable_dev_iotlb(info);
- iommu = device_to_iommu(info->segment, info->bus, info->devfn);
- iommu_detach_dev(iommu, info->bus, info->devfn);
- free_devinfo_mem(info);
+ iommu_detach_dev(info->iommu, info->bus, info->devfn);
+ if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
+ iommu_detach_dependent_devices(info->iommu, info->dev);
+ /* clear this iommu in iommu_bmp, update iommu count
+ * and capabilities
+ */
+ spin_lock_irqsave(&domain->iommu_lock, flags2);
+ if (test_and_clear_bit(info->iommu->seq_id,
+ domain->iommu_bmp)) {
+ domain->iommu_count--;
+ domain_update_iommu_cap(domain);
+ }
+ spin_unlock_irqrestore(&domain->iommu_lock, flags2);
+ }
+
+ free_devinfo_mem(info);
spin_lock_irqsave(&device_domain_lock, flags);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -1927,155 +2134,151 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
/*
* find_domain
- * Note: we use struct pci_dev->dev.archdata.iommu stores the info
+ * Note: we use struct device->archdata.iommu stores the info
*/
-static struct dmar_domain *
-find_domain(struct pci_dev *pdev)
+static struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
/* No lock here, assumes no domain exit in normal case */
- info = pdev->dev.archdata.iommu;
+ info = dev->archdata.iommu;
if (info)
return info->domain;
return NULL;
}
+static inline struct device_domain_info *
+dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
+{
+ struct device_domain_info *info;
+
+ list_for_each_entry(info, &device_domain_list, global)
+ if (info->iommu->segment == segment && info->bus == bus &&
+ info->devfn == devfn)
+ return info;
+
+ return NULL;
+}
+
+static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
+ int bus, int devfn,
+ struct device *dev,
+ struct dmar_domain *domain)
+{
+ struct dmar_domain *found = NULL;
+ struct device_domain_info *info;
+ unsigned long flags;
+
+ info = alloc_devinfo_mem();
+ if (!info)
+ return NULL;
+
+ info->bus = bus;
+ info->devfn = devfn;
+ info->dev = dev;
+ info->domain = domain;
+ info->iommu = iommu;
+ if (!dev)
+ domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ if (dev)
+ found = find_domain(dev);
+ else {
+ struct device_domain_info *info2;
+ info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
+ if (info2)
+ found = info2->domain;
+ }
+ if (found) {
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ free_devinfo_mem(info);
+ /* Caller must free the original domain */
+ return found;
+ }
+
+ list_add(&info->link, &domain->devices);
+ list_add(&info->global, &device_domain_list);
+ if (dev)
+ dev->archdata.iommu = info;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return domain;
+}
+
/* domain is initialized */
-static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
+static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
{
- struct dmar_domain *domain, *found = NULL;
- struct intel_iommu *iommu;
- struct dmar_drhd_unit *drhd;
- struct device_domain_info *info, *tmp;
- struct pci_dev *dev_tmp;
+ struct dmar_domain *domain, *free = NULL;
+ struct intel_iommu *iommu = NULL;
+ struct device_domain_info *info;
+ struct pci_dev *dev_tmp = NULL;
unsigned long flags;
- int bus = 0, devfn = 0;
- int segment;
- int ret;
+ u8 bus, devfn, bridge_bus, bridge_devfn;
- domain = find_domain(pdev);
+ domain = find_domain(dev);
if (domain)
return domain;
- segment = pci_domain_nr(pdev->bus);
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 segment;
- dev_tmp = pci_find_upstream_pcie_bridge(pdev);
- if (dev_tmp) {
- if (pci_is_pcie(dev_tmp)) {
- bus = dev_tmp->subordinate->number;
- devfn = 0;
- } else {
- bus = dev_tmp->bus->number;
- devfn = dev_tmp->devfn;
- }
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &device_domain_list, global) {
- if (info->segment == segment &&
- info->bus == bus && info->devfn == devfn) {
- found = info->domain;
- break;
+ segment = pci_domain_nr(pdev->bus);
+ dev_tmp = pci_find_upstream_pcie_bridge(pdev);
+ if (dev_tmp) {
+ if (pci_is_pcie(dev_tmp)) {
+ bridge_bus = dev_tmp->subordinate->number;
+ bridge_devfn = 0;
+ } else {
+ bridge_bus = dev_tmp->bus->number;
+ bridge_devfn = dev_tmp->devfn;
}
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
- /* pcie-pci bridge already has a domain, uses it */
- if (found) {
- domain = found;
- goto found_domain;
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dmar_search_domain_by_dev_info(segment, bus, devfn);
+ if (info) {
+ iommu = info->iommu;
+ domain = info->domain;
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ /* pcie-pci bridge already has a domain, uses it */
+ if (info)
+ goto found_domain;
}
}
- domain = alloc_domain();
- if (!domain)
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
goto error;
- /* Allocate new domain for the device */
- drhd = dmar_find_matched_drhd_unit(pdev);
- if (!drhd) {
- printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
- pci_name(pdev));
- free_domain_mem(domain);
- return NULL;
- }
- iommu = drhd->iommu;
-
- ret = iommu_attach_domain(domain, iommu);
- if (ret) {
+ /* Allocate and initialize new domain for the device */
+ domain = alloc_domain(false);
+ if (!domain)
+ goto error;
+ if (iommu_attach_domain(domain, iommu)) {
free_domain_mem(domain);
+ domain = NULL;
goto error;
}
-
- if (domain_init(domain, gaw)) {
- domain_exit(domain);
+ free = domain;
+ if (domain_init(domain, gaw))
goto error;
- }
/* register pcie-to-pci device */
if (dev_tmp) {
- info = alloc_devinfo_mem();
- if (!info) {
- domain_exit(domain);
+ domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn,
+ NULL, domain);
+ if (!domain)
goto error;
- }
- info->segment = segment;
- info->bus = bus;
- info->devfn = devfn;
- info->dev = NULL;
- info->domain = domain;
- /* This domain is shared by devices under p2p bridge */
- domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
-
- /* pcie-to-pci bridge already has a domain, uses it */
- found = NULL;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(tmp, &device_domain_list, global) {
- if (tmp->segment == segment &&
- tmp->bus == bus && tmp->devfn == devfn) {
- found = tmp->domain;
- break;
- }
- }
- if (found) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
- free_devinfo_mem(info);
- domain_exit(domain);
- domain = found;
- } else {
- list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- }
}
found_domain:
- info = alloc_devinfo_mem();
- if (!info)
- goto error;
- info->segment = segment;
- info->bus = pdev->bus->number;
- info->devfn = pdev->devfn;
- info->dev = pdev;
- info->domain = domain;
- spin_lock_irqsave(&device_domain_lock, flags);
- /* somebody is fast */
- found = find_domain(pdev);
- if (found != NULL) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
- if (found != domain) {
- domain_exit(domain);
- domain = found;
- }
- free_devinfo_mem(info);
- return domain;
- }
- list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- pdev->dev.archdata.iommu = info;
- spin_unlock_irqrestore(&device_domain_lock, flags);
- return domain;
+ domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
error:
- /* recheck it here, maybe others set it */
- return find_domain(pdev);
+ if (free != domain)
+ domain_exit(free);
+
+ return domain;
}
static int iommu_identity_mapping;
@@ -2109,14 +2312,14 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
DMA_PTE_READ|DMA_PTE_WRITE);
}
-static int iommu_prepare_identity_map(struct pci_dev *pdev,
+static int iommu_prepare_identity_map(struct device *dev,
unsigned long long start,
unsigned long long end)
{
struct dmar_domain *domain;
int ret;
- domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
return -ENOMEM;
@@ -2126,13 +2329,13 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
up to start with in si_domain */
if (domain == si_domain && hw_pass_through) {
printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
- pci_name(pdev), start, end);
+ dev_name(dev), start, end);
return 0;
}
printk(KERN_INFO
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
- pci_name(pdev), start, end);
+ dev_name(dev), start, end);
if (end < start) {
WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
@@ -2160,7 +2363,7 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
goto error;
/* context entry init */
- ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
+ ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
if (ret)
goto error;
@@ -2172,12 +2375,12 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
}
static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
- struct pci_dev *pdev)
+ struct device *dev)
{
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return 0;
- return iommu_prepare_identity_map(pdev, rmrr->base_address,
- rmrr->end_address);
+ return iommu_prepare_identity_map(dev, rmrr->base_address,
+ rmrr->end_address);
}
#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
@@ -2191,7 +2394,7 @@ static inline void iommu_prepare_isa(void)
return;
printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
- ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
+ ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
if (ret)
printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
@@ -2213,10 +2416,12 @@ static int __init si_domain_init(int hw)
struct intel_iommu *iommu;
int nid, ret = 0;
- si_domain = alloc_domain();
+ si_domain = alloc_domain(false);
if (!si_domain)
return -EFAULT;
+ si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+
for_each_active_iommu(iommu, drhd) {
ret = iommu_attach_domain(si_domain, iommu);
if (ret) {
@@ -2230,7 +2435,6 @@ static int __init si_domain_init(int hw)
return -EFAULT;
}
- si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
pr_debug("IOMMU: identity mapping domain is domain %d\n",
si_domain->id);
@@ -2252,16 +2456,14 @@ static int __init si_domain_init(int hw)
return 0;
}
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
- struct pci_dev *pdev);
-static int identity_mapping(struct pci_dev *pdev)
+static int identity_mapping(struct device *dev)
{
struct device_domain_info *info;
if (likely(!iommu_identity_mapping))
return 0;
- info = pdev->dev.archdata.iommu;
+ info = dev->archdata.iommu;
if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
return (info->domain == si_domain);
@@ -2269,111 +2471,112 @@ static int identity_mapping(struct pci_dev *pdev)
}
static int domain_add_dev_info(struct dmar_domain *domain,
- struct pci_dev *pdev,
- int translation)
+ struct device *dev, int translation)
{
- struct device_domain_info *info;
- unsigned long flags;
+ struct dmar_domain *ndomain;
+ struct intel_iommu *iommu;
+ u8 bus, devfn;
int ret;
- info = alloc_devinfo_mem();
- if (!info)
- return -ENOMEM;
-
- info->segment = pci_domain_nr(pdev->bus);
- info->bus = pdev->bus->number;
- info->devfn = pdev->devfn;
- info->dev = pdev;
- info->domain = domain;
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
+ return -ENODEV;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- pdev->dev.archdata.iommu = info;
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
+ if (ndomain != domain)
+ return -EBUSY;
- ret = domain_context_mapping(domain, pdev, translation);
+ ret = domain_context_mapping(domain, dev, translation);
if (ret) {
- spin_lock_irqsave(&device_domain_lock, flags);
- unlink_domain_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- free_devinfo_mem(info);
+ domain_remove_one_dev_info(domain, dev);
return ret;
}
return 0;
}
-static bool device_has_rmrr(struct pci_dev *dev)
+static bool device_has_rmrr(struct device *dev)
{
struct dmar_rmrr_unit *rmrr;
+ struct device *tmp;
int i;
+ rcu_read_lock();
for_each_rmrr_units(rmrr) {
- for (i = 0; i < rmrr->devices_cnt; i++) {
- /*
- * Return TRUE if this RMRR contains the device that
- * is passed in.
- */
- if (rmrr->devices[i] == dev)
+ /*
+ * Return TRUE if this RMRR contains the device that
+ * is passed in.
+ */
+ for_each_active_dev_scope(rmrr->devices,
+ rmrr->devices_cnt, i, tmp)
+ if (tmp == dev) {
+ rcu_read_unlock();
return true;
- }
+ }
}
+ rcu_read_unlock();
return false;
}
-static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
+static int iommu_should_identity_map(struct device *dev, int startup)
{
- /*
- * We want to prevent any device associated with an RMRR from
- * getting placed into the SI Domain. This is done because
- * problems exist when devices are moved in and out of domains
- * and their respective RMRR info is lost. We exempt USB devices
- * from this process due to their usage of RMRRs that are known
- * to not be needed after BIOS hand-off to OS.
- */
- if (device_has_rmrr(pdev) &&
- (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
- return 0;
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
- if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
- return 1;
+ /*
+ * We want to prevent any device associated with an RMRR from
+ * getting placed into the SI Domain. This is done because
+ * problems exist when devices are moved in and out of domains
+ * and their respective RMRR info is lost. We exempt USB devices
+ * from this process due to their usage of RMRRs that are known
+ * to not be needed after BIOS hand-off to OS.
+ */
+ if (device_has_rmrr(dev) &&
+ (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
+ return 0;
- if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
- return 1;
+ if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+ return 1;
- if (!(iommu_identity_mapping & IDENTMAP_ALL))
- return 0;
+ if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
+ return 1;
- /*
- * We want to start off with all devices in the 1:1 domain, and
- * take them out later if we find they can't access all of memory.
- *
- * However, we can't do this for PCI devices behind bridges,
- * because all PCI devices behind the same bridge will end up
- * with the same source-id on their transactions.
- *
- * Practically speaking, we can't change things around for these
- * devices at run-time, because we can't be sure there'll be no
- * DMA transactions in flight for any of their siblings.
- *
- * So PCI devices (unless they're on the root bus) as well as
- * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
- * the 1:1 domain, just in _case_ one of their siblings turns out
- * not to be able to map all of memory.
- */
- if (!pci_is_pcie(pdev)) {
- if (!pci_is_root_bus(pdev->bus))
+ if (!(iommu_identity_mapping & IDENTMAP_ALL))
return 0;
- if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
+
+ /*
+ * We want to start off with all devices in the 1:1 domain, and
+ * take them out later if we find they can't access all of memory.
+ *
+ * However, we can't do this for PCI devices behind bridges,
+ * because all PCI devices behind the same bridge will end up
+ * with the same source-id on their transactions.
+ *
+ * Practically speaking, we can't change things around for these
+ * devices at run-time, because we can't be sure there'll be no
+ * DMA transactions in flight for any of their siblings.
+ *
+ * So PCI devices (unless they're on the root bus) as well as
+ * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
+ * the 1:1 domain, just in _case_ one of their siblings turns out
+ * not to be able to map all of memory.
+ */
+ if (!pci_is_pcie(pdev)) {
+ if (!pci_is_root_bus(pdev->bus))
+ return 0;
+ if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
+ return 0;
+ } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
return 0;
- } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
+ } else {
+ if (device_has_rmrr(dev))
+ return 0;
+ }
- /*
+ /*
* At boot time, we don't yet know if devices will be 64-bit capable.
- * Assume that they will -- if they turn out not to be, then we can
+ * Assume that they will — if they turn out not to be, then we can
* take them out of the 1:1 domain later.
*/
if (!startup) {
@@ -2381,42 +2584,77 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
* If the device's dma_mask is less than the system's memory
* size then this is not a candidate for identity mapping.
*/
- u64 dma_mask = pdev->dma_mask;
+ u64 dma_mask = *dev->dma_mask;
- if (pdev->dev.coherent_dma_mask &&
- pdev->dev.coherent_dma_mask < dma_mask)
- dma_mask = pdev->dev.coherent_dma_mask;
+ if (dev->coherent_dma_mask &&
+ dev->coherent_dma_mask < dma_mask)
+ dma_mask = dev->coherent_dma_mask;
- return dma_mask >= dma_get_required_mask(&pdev->dev);
+ return dma_mask >= dma_get_required_mask(dev);
}
return 1;
}
+static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
+{
+ int ret;
+
+ if (!iommu_should_identity_map(dev, 1))
+ return 0;
+
+ ret = domain_add_dev_info(si_domain, dev,
+ hw ? CONTEXT_TT_PASS_THROUGH :
+ CONTEXT_TT_MULTI_LEVEL);
+ if (!ret)
+ pr_info("IOMMU: %s identity mapping for device %s\n",
+ hw ? "hardware" : "software", dev_name(dev));
+ else if (ret == -ENODEV)
+ /* device not associated with an iommu */
+ ret = 0;
+
+ return ret;
+}
+
+
static int __init iommu_prepare_static_identity_mapping(int hw)
{
struct pci_dev *pdev = NULL;
- int ret;
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ struct device *dev;
+ int i;
+ int ret = 0;
ret = si_domain_init(hw);
if (ret)
return -EFAULT;
for_each_pci_dev(pdev) {
- if (iommu_should_identity_map(pdev, 1)) {
- ret = domain_add_dev_info(si_domain, pdev,
- hw ? CONTEXT_TT_PASS_THROUGH :
- CONTEXT_TT_MULTI_LEVEL);
- if (ret) {
- /* device not associated with an iommu */
- if (ret == -ENODEV)
- continue;
- return ret;
+ ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
+ if (ret)
+ return ret;
+ }
+
+ for_each_active_iommu(iommu, drhd)
+ for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
+ struct acpi_device_physical_node *pn;
+ struct acpi_device *adev;
+
+ if (dev->bus != &acpi_bus_type)
+ continue;
+
+ adev= to_acpi_device(dev);
+ mutex_lock(&adev->physical_node_lock);
+ list_for_each_entry(pn, &adev->physical_node_list, node) {
+ ret = dev_prepare_static_identity_mapping(pn->dev, hw);
+ if (ret)
+ break;
}
- pr_info("IOMMU: %s identity mapping for device %s\n",
- hw ? "hardware" : "software", pci_name(pdev));
+ mutex_unlock(&adev->physical_node_lock);
+ if (ret)
+ return ret;
}
- }
return 0;
}
@@ -2425,7 +2663,7 @@ static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
- struct pci_dev *pdev;
+ struct device *dev;
struct intel_iommu *iommu;
int i, ret;
@@ -2461,7 +2699,7 @@ static int __init init_dmars(void)
sizeof(struct deferred_flush_tables), GFP_KERNEL);
if (!deferred_flush) {
ret = -ENOMEM;
- goto error;
+ goto free_g_iommus;
}
for_each_active_iommu(iommu, drhd) {
@@ -2469,7 +2707,7 @@ static int __init init_dmars(void)
ret = iommu_init_domains(iommu);
if (ret)
- goto error;
+ goto free_iommu;
/*
* TBD:
@@ -2479,7 +2717,7 @@ static int __init init_dmars(void)
ret = iommu_alloc_root_entry(iommu);
if (ret) {
printk(KERN_ERR "IOMMU: allocate root entry failed\n");
- goto error;
+ goto free_iommu;
}
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
@@ -2548,7 +2786,7 @@ static int __init init_dmars(void)
ret = iommu_prepare_static_identity_mapping(hw_pass_through);
if (ret) {
printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
- goto error;
+ goto free_iommu;
}
}
/*
@@ -2567,15 +2805,10 @@ static int __init init_dmars(void)
*/
printk(KERN_INFO "IOMMU: Setting RMRR:\n");
for_each_rmrr_units(rmrr) {
- for (i = 0; i < rmrr->devices_cnt; i++) {
- pdev = rmrr->devices[i];
- /*
- * some BIOS lists non-exist devices in DMAR
- * table.
- */
- if (!pdev)
- continue;
- ret = iommu_prepare_rmrr_dev(rmrr, pdev);
+ /* some BIOS lists non-exist devices in DMAR table. */
+ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+ i, dev) {
+ ret = iommu_prepare_rmrr_dev(rmrr, dev);
if (ret)
printk(KERN_ERR
"IOMMU: mapping reserved region failed\n");
@@ -2606,7 +2839,7 @@ static int __init init_dmars(void)
ret = dmar_set_interrupt(iommu);
if (ret)
- goto error;
+ goto free_iommu;
iommu_set_root_entry(iommu);
@@ -2615,17 +2848,20 @@ static int __init init_dmars(void)
ret = iommu_enable_translation(iommu);
if (ret)
- goto error;
+ goto free_iommu;
iommu_disable_protect_mem_regions(iommu);
}
return 0;
-error:
+
+free_iommu:
for_each_active_iommu(iommu, drhd)
free_dmar_iommu(iommu);
kfree(deferred_flush);
+free_g_iommus:
kfree(g_iommus);
+error:
return ret;
}
@@ -2634,7 +2870,6 @@ static struct iova *intel_alloc_iova(struct device *dev,
struct dmar_domain *domain,
unsigned long nrpages, uint64_t dma_mask)
{
- struct pci_dev *pdev = to_pci_dev(dev);
struct iova *iova = NULL;
/* Restrict dma_mask to the width that the iommu can handle */
@@ -2654,34 +2889,31 @@ static struct iova *intel_alloc_iova(struct device *dev,
iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
if (unlikely(!iova)) {
printk(KERN_ERR "Allocating %ld-page iova for %s failed",
- nrpages, pci_name(pdev));
+ nrpages, dev_name(dev));
return NULL;
}
return iova;
}
-static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
+static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
{
struct dmar_domain *domain;
int ret;
- domain = get_domain_for_dev(pdev,
- DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain) {
- printk(KERN_ERR
- "Allocating domain for %s failed", pci_name(pdev));
+ printk(KERN_ERR "Allocating domain for %s failed",
+ dev_name(dev));
return NULL;
}
/* make sure context mapping is ok */
- if (unlikely(!domain_context_mapped(pdev))) {
- ret = domain_context_mapping(domain, pdev,
- CONTEXT_TT_MULTI_LEVEL);
+ if (unlikely(!domain_context_mapped(dev))) {
+ ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
if (ret) {
- printk(KERN_ERR
- "Domain context map for %s failed",
- pci_name(pdev));
+ printk(KERN_ERR "Domain context map for %s failed",
+ dev_name(dev));
return NULL;
}
}
@@ -2689,51 +2921,46 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
return domain;
}
-static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
+static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
{
struct device_domain_info *info;
/* No lock here, assumes no domain exit in normal case */
- info = dev->dev.archdata.iommu;
+ info = dev->archdata.iommu;
if (likely(info))
return info->domain;
return __get_valid_domain_for_dev(dev);
}
-static int iommu_dummy(struct pci_dev *pdev)
+static int iommu_dummy(struct device *dev)
{
- return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+ return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}
-/* Check if the pdev needs to go through non-identity map and unmap process.*/
+/* Check if the dev needs to go through non-identity map and unmap process.*/
static int iommu_no_mapping(struct device *dev)
{
- struct pci_dev *pdev;
int found;
- if (unlikely(!dev_is_pci(dev)))
- return 1;
-
- pdev = to_pci_dev(dev);
- if (iommu_dummy(pdev))
+ if (iommu_dummy(dev))
return 1;
if (!iommu_identity_mapping)
return 0;
- found = identity_mapping(pdev);
+ found = identity_mapping(dev);
if (found) {
- if (iommu_should_identity_map(pdev, 0))
+ if (iommu_should_identity_map(dev, 0))
return 1;
else {
/*
* 32 bit DMA is removed from si_domain and fall back
* to non-identity mapping.
*/
- domain_remove_one_dev_info(si_domain, pdev);
+ domain_remove_one_dev_info(si_domain, dev);
printk(KERN_INFO "32bit %s uses non-identity mapping\n",
- pci_name(pdev));
+ dev_name(dev));
return 0;
}
} else {
@@ -2741,15 +2968,15 @@ static int iommu_no_mapping(struct device *dev)
* In case of a detached 64 bit DMA device from vm, the device
* is put into si_domain for identity mapping.
*/
- if (iommu_should_identity_map(pdev, 0)) {
+ if (iommu_should_identity_map(dev, 0)) {
int ret;
- ret = domain_add_dev_info(si_domain, pdev,
+ ret = domain_add_dev_info(si_domain, dev,
hw_pass_through ?
CONTEXT_TT_PASS_THROUGH :
CONTEXT_TT_MULTI_LEVEL);
if (!ret) {
printk(KERN_INFO "64bit %s uses identity mapping\n",
- pci_name(pdev));
+ dev_name(dev));
return 1;
}
}
@@ -2758,10 +2985,9 @@ static int iommu_no_mapping(struct device *dev)
return 0;
}
-static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
+static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
size_t size, int dir, u64 dma_mask)
{
- struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
phys_addr_t start_paddr;
struct iova *iova;
@@ -2772,17 +2998,17 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(hwdev))
+ if (iommu_no_mapping(dev))
return paddr;
- domain = get_valid_domain_for_dev(pdev);
+ domain = get_valid_domain_for_dev(dev);
if (!domain)
return 0;
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
- iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
+ iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
if (!iova)
goto error;
@@ -2808,7 +3034,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
+ iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
else
iommu_flush_write_buffer(iommu);
@@ -2820,7 +3046,7 @@ error:
if (iova)
__free_iova(&domain->iovad, iova);
printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
- pci_name(pdev), size, (unsigned long long)paddr, dir);
+ dev_name(dev), size, (unsigned long long)paddr, dir);
return 0;
}
@@ -2830,7 +3056,7 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
struct dma_attrs *attrs)
{
return __intel_map_single(dev, page_to_phys(page) + offset, size,
- dir, to_pci_dev(dev)->dma_mask);
+ dir, *dev->dma_mask);
}
static void flush_unmaps(void)
@@ -2860,13 +3086,16 @@ static void flush_unmaps(void)
/* On real hardware multiple invalidations are expensive */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain->id,
- iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
+ iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1,
+ !deferred_flush[i].freelist[j], 0);
else {
mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
(uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
}
__free_iova(&deferred_flush[i].domain[j]->iovad, iova);
+ if (deferred_flush[i].freelist[j])
+ dma_free_pagelist(deferred_flush[i].freelist[j]);
}
deferred_flush[i].next = 0;
}
@@ -2883,7 +3112,7 @@ static void flush_unmaps_timeout(unsigned long data)
spin_unlock_irqrestore(&async_umap_flush_lock, flags);
}
-static void add_unmap(struct dmar_domain *dom, struct iova *iova)
+static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
{
unsigned long flags;
int next, iommu_id;
@@ -2899,6 +3128,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
next = deferred_flush[iommu_id].next;
deferred_flush[iommu_id].domain[next] = dom;
deferred_flush[iommu_id].iova[next] = iova;
+ deferred_flush[iommu_id].freelist[next] = freelist;
deferred_flush[iommu_id].next++;
if (!timer_on) {
@@ -2913,16 +3143,16 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- struct pci_dev *pdev = to_pci_dev(dev);
struct dmar_domain *domain;
unsigned long start_pfn, last_pfn;
struct iova *iova;
struct intel_iommu *iommu;
+ struct page *freelist;
if (iommu_no_mapping(dev))
return;
- domain = find_domain(pdev);
+ domain = find_domain(dev);
BUG_ON(!domain);
iommu = domain_get_iommu(domain);
@@ -2936,21 +3166,18 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
pr_debug("Device %s unmapping: pfn %lx-%lx\n",
- pci_name(pdev), start_pfn, last_pfn);
+ dev_name(dev), start_pfn, last_pfn);
- /* clear the whole page */
- dma_pte_clear_range(domain, start_pfn, last_pfn);
-
- /* free page tables */
- dma_pte_free_pagetable(domain, start_pfn, last_pfn);
+ freelist = domain_unmap(domain, start_pfn, last_pfn);
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- last_pfn - start_pfn + 1, 0);
+ last_pfn - start_pfn + 1, !freelist, 0);
/* free iova */
__free_iova(&domain->iovad, iova);
+ dma_free_pagelist(freelist);
} else {
- add_unmap(domain, iova);
+ add_unmap(domain, iova, freelist);
/*
* queue up the release of the unmap to save the 1/6th of the
* cpu used up by the iotlb flush operation...
@@ -2958,7 +3185,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
}
}
-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
+static void *intel_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
@@ -2968,10 +3195,10 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
size = PAGE_ALIGN(size);
order = get_order(size);
- if (!iommu_no_mapping(hwdev))
+ if (!iommu_no_mapping(dev))
flags &= ~(GFP_DMA | GFP_DMA32);
- else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
- if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
+ else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
flags |= GFP_DMA;
else
flags |= GFP_DMA32;
@@ -2982,16 +3209,16 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
return NULL;
memset(vaddr, 0, size);
- *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
+ *dma_handle = __intel_map_single(dev, virt_to_bus(vaddr), size,
DMA_BIDIRECTIONAL,
- hwdev->coherent_dma_mask);
+ dev->coherent_dma_mask);
if (*dma_handle)
return vaddr;
free_pages((unsigned long)vaddr, order);
return NULL;
}
-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
int order;
@@ -2999,24 +3226,24 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
size = PAGE_ALIGN(size);
order = get_order(size);
- intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
+ intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long)vaddr, order);
}
-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
+static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
unsigned long start_pfn, last_pfn;
struct iova *iova;
struct intel_iommu *iommu;
+ struct page *freelist;
- if (iommu_no_mapping(hwdev))
+ if (iommu_no_mapping(dev))
return;
- domain = find_domain(pdev);
+ domain = find_domain(dev);
BUG_ON(!domain);
iommu = domain_get_iommu(domain);
@@ -3029,19 +3256,16 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
start_pfn = mm_to_dma_pfn(iova->pfn_lo);
last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
- /* clear the whole page */
- dma_pte_clear_range(domain, start_pfn, last_pfn);
-
- /* free page tables */
- dma_pte_free_pagetable(domain, start_pfn, last_pfn);
+ freelist = domain_unmap(domain, start_pfn, last_pfn);
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- last_pfn - start_pfn + 1, 0);
+ last_pfn - start_pfn + 1, !freelist, 0);
/* free iova */
__free_iova(&domain->iovad, iova);
+ dma_free_pagelist(freelist);
} else {
- add_unmap(domain, iova);
+ add_unmap(domain, iova, freelist);
/*
* queue up the release of the unmap to save the 1/6th of the
* cpu used up by the iotlb flush operation...
@@ -3063,11 +3287,10 @@ static int intel_nontranslate_map_sg(struct device *hddev,
return nelems;
}
-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
+static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
int i;
- struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
size_t size = 0;
int prot = 0;
@@ -3078,10 +3301,10 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(hwdev))
- return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
+ if (iommu_no_mapping(dev))
+ return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
- domain = get_valid_domain_for_dev(pdev);
+ domain = get_valid_domain_for_dev(dev);
if (!domain)
return 0;
@@ -3090,8 +3313,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
for_each_sg(sglist, sg, nelems, i)
size += aligned_nrpages(sg->offset, sg->length);
- iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
- pdev->dma_mask);
+ iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
+ *dev->dma_mask);
if (!iova) {
sglist->dma_length = 0;
return 0;
@@ -3124,7 +3347,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
else
iommu_flush_write_buffer(iommu);
@@ -3259,29 +3482,28 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quir
static void __init init_no_remapping_devices(void)
{
struct dmar_drhd_unit *drhd;
+ struct device *dev;
+ int i;
for_each_drhd_unit(drhd) {
if (!drhd->include_all) {
- int i;
- for (i = 0; i < drhd->devices_cnt; i++)
- if (drhd->devices[i] != NULL)
- break;
- /* ignore DMAR unit if no pci devices exist */
+ for_each_active_dev_scope(drhd->devices,
+ drhd->devices_cnt, i, dev)
+ break;
+ /* ignore DMAR unit if no devices exist */
if (i == drhd->devices_cnt)
drhd->ignored = 1;
}
}
for_each_active_drhd_unit(drhd) {
- int i;
if (drhd->include_all)
continue;
- for (i = 0; i < drhd->devices_cnt; i++)
- if (drhd->devices[i] &&
- !IS_GFX_DEVICE(drhd->devices[i]))
+ for_each_active_dev_scope(drhd->devices,
+ drhd->devices_cnt, i, dev)
+ if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
break;
-
if (i < drhd->devices_cnt)
continue;
@@ -3291,11 +3513,9 @@ static void __init init_no_remapping_devices(void)
intel_iommu_gfx_mapped = 1;
} else {
drhd->ignored = 1;
- for (i = 0; i < drhd->devices_cnt; i++) {
- if (!drhd->devices[i])
- continue;
- drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
- }
+ for_each_active_dev_scope(drhd->devices,
+ drhd->devices_cnt, i, dev)
+ dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
}
}
}
@@ -3438,13 +3658,6 @@ static void __init init_iommu_pm_ops(void)
static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */
-LIST_HEAD(dmar_rmrr_units);
-
-static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
-{
- list_add(&rmrr->list, &dmar_rmrr_units);
-}
-
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
{
@@ -3459,25 +3672,19 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
rmrr = (struct acpi_dmar_reserved_memory *)header;
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
+ rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
+ ((void *)rmrr) + rmrr->header.length,
+ &rmrru->devices_cnt);
+ if (rmrru->devices_cnt && rmrru->devices == NULL) {
+ kfree(rmrru);
+ return -ENOMEM;
+ }
- dmar_register_rmrr_unit(rmrru);
- return 0;
-}
-
-static int __init
-rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
-{
- struct acpi_dmar_reserved_memory *rmrr;
+ list_add(&rmrru->list, &dmar_rmrr_units);
- rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
- return dmar_parse_dev_scope((void *)(rmrr + 1),
- ((void *)rmrr) + rmrr->header.length,
- &rmrru->devices_cnt, &rmrru->devices,
- rmrr->segment);
+ return 0;
}
-static LIST_HEAD(dmar_atsr_units);
-
int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
{
struct acpi_dmar_atsr *atsr;
@@ -3490,26 +3697,21 @@ int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
atsru->hdr = hdr;
atsru->include_all = atsr->flags & 0x1;
+ if (!atsru->include_all) {
+ atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
+ (void *)atsr + atsr->header.length,
+ &atsru->devices_cnt);
+ if (atsru->devices_cnt && atsru->devices == NULL) {
+ kfree(atsru);
+ return -ENOMEM;
+ }
+ }
- list_add(&atsru->list, &dmar_atsr_units);
+ list_add_rcu(&atsru->list, &dmar_atsr_units);
return 0;
}
-static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
-{
- struct acpi_dmar_atsr *atsr;
-
- if (atsru->include_all)
- return 0;
-
- atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- return dmar_parse_dev_scope((void *)(atsr + 1),
- (void *)atsr + atsr->header.length,
- &atsru->devices_cnt, &atsru->devices,
- atsr->segment);
-}
-
static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
{
dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
@@ -3535,62 +3737,97 @@ static void intel_iommu_free_dmars(void)
int dmar_find_matched_atsr_unit(struct pci_dev *dev)
{
- int i;
+ int i, ret = 1;
struct pci_bus *bus;
+ struct pci_dev *bridge = NULL;
+ struct device *tmp;
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
dev = pci_physfn(dev);
-
- list_for_each_entry(atsru, &dmar_atsr_units, list) {
- atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- if (atsr->segment == pci_domain_nr(dev->bus))
- goto found;
- }
-
- return 0;
-
-found:
for (bus = dev->bus; bus; bus = bus->parent) {
- struct pci_dev *bridge = bus->self;
-
+ bridge = bus->self;
if (!bridge || !pci_is_pcie(bridge) ||
pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
return 0;
-
- if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
- for (i = 0; i < atsru->devices_cnt; i++)
- if (atsru->devices[i] == bridge)
- return 1;
+ if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
break;
- }
}
+ if (!bridge)
+ return 0;
- if (atsru->include_all)
- return 1;
+ rcu_read_lock();
+ list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ if (atsr->segment != pci_domain_nr(dev->bus))
+ continue;
- return 0;
+ for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
+ if (tmp == &bridge->dev)
+ goto out;
+
+ if (atsru->include_all)
+ goto out;
+ }
+ ret = 0;
+out:
+ rcu_read_unlock();
+
+ return ret;
}
-int __init dmar_parse_rmrr_atsr_dev(void)
+int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
- struct dmar_rmrr_unit *rmrr;
- struct dmar_atsr_unit *atsr;
int ret = 0;
+ struct dmar_rmrr_unit *rmrru;
+ struct dmar_atsr_unit *atsru;
+ struct acpi_dmar_atsr *atsr;
+ struct acpi_dmar_reserved_memory *rmrr;
- list_for_each_entry(rmrr, &dmar_rmrr_units, list) {
- ret = rmrr_parse_dev(rmrr);
- if (ret)
- return ret;
+ if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
+ return 0;
+
+ list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
+ rmrr = container_of(rmrru->hdr,
+ struct acpi_dmar_reserved_memory, header);
+ if (info->event == BUS_NOTIFY_ADD_DEVICE) {
+ ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
+ ((void *)rmrr) + rmrr->header.length,
+ rmrr->segment, rmrru->devices,
+ rmrru->devices_cnt);
+ if (ret > 0)
+ break;
+ else if(ret < 0)
+ return ret;
+ } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+ if (dmar_remove_dev_scope(info, rmrr->segment,
+ rmrru->devices, rmrru->devices_cnt))
+ break;
+ }
}
- list_for_each_entry(atsr, &dmar_atsr_units, list) {
- ret = atsr_parse_dev(atsr);
- if (ret)
- return ret;
+ list_for_each_entry(atsru, &dmar_atsr_units, list) {
+ if (atsru->include_all)
+ continue;
+
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ if (info->event == BUS_NOTIFY_ADD_DEVICE) {
+ ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
+ (void *)atsr + atsr->header.length,
+ atsr->segment, atsru->devices,
+ atsru->devices_cnt);
+ if (ret > 0)
+ break;
+ else if(ret < 0)
+ return ret;
+ } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+ if (dmar_remove_dev_scope(info, atsr->segment,
+ atsru->devices, atsru->devices_cnt))
+ break;
+ }
}
- return ret;
+ return 0;
}
/*
@@ -3603,24 +3840,26 @@ static int device_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
- struct pci_dev *pdev = to_pci_dev(dev);
struct dmar_domain *domain;
- if (iommu_no_mapping(dev))
+ if (iommu_dummy(dev))
return 0;
- domain = find_domain(pdev);
- if (!domain)
+ if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
+ action != BUS_NOTIFY_DEL_DEVICE)
return 0;
- if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
- domain_remove_one_dev_info(domain, pdev);
+ domain = find_domain(dev);
+ if (!domain)
+ return 0;
- if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
- !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
- list_empty(&domain->devices))
- domain_exit(domain);
- }
+ down_read(&dmar_global_lock);
+ domain_remove_one_dev_info(domain, dev);
+ if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
+ list_empty(&domain->devices))
+ domain_exit(domain);
+ up_read(&dmar_global_lock);
return 0;
}
@@ -3629,6 +3868,75 @@ static struct notifier_block device_nb = {
.notifier_call = device_notifier,
};
+static int intel_iommu_memory_notifier(struct notifier_block *nb,
+ unsigned long val, void *v)
+{
+ struct memory_notify *mhp = v;
+ unsigned long long start, end;
+ unsigned long start_vpfn, last_vpfn;
+
+ switch (val) {
+ case MEM_GOING_ONLINE:
+ start = mhp->start_pfn << PAGE_SHIFT;
+ end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
+ if (iommu_domain_identity_map(si_domain, start, end)) {
+ pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
+ start, end);
+ return NOTIFY_BAD;
+ }
+ break;
+
+ case MEM_OFFLINE:
+ case MEM_CANCEL_ONLINE:
+ start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
+ last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
+ while (start_vpfn <= last_vpfn) {
+ struct iova *iova;
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ struct page *freelist;
+
+ iova = find_iova(&si_domain->iovad, start_vpfn);
+ if (iova == NULL) {
+ pr_debug("dmar: failed get IOVA for PFN %lx\n",
+ start_vpfn);
+ break;
+ }
+
+ iova = split_and_remove_iova(&si_domain->iovad, iova,
+ start_vpfn, last_vpfn);
+ if (iova == NULL) {
+ pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
+ start_vpfn, last_vpfn);
+ return NOTIFY_BAD;
+ }
+
+ freelist = domain_unmap(si_domain, iova->pfn_lo,
+ iova->pfn_hi);
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd)
+ iommu_flush_iotlb_psi(iommu, si_domain->id,
+ iova->pfn_lo,
+ iova->pfn_hi - iova->pfn_lo + 1,
+ !freelist, 0);
+ rcu_read_unlock();
+ dma_free_pagelist(freelist);
+
+ start_vpfn = iova->pfn_hi + 1;
+ free_iova_mem(iova);
+ }
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block intel_iommu_memory_nb = {
+ .notifier_call = intel_iommu_memory_notifier,
+ .priority = 0
+};
+
int __init intel_iommu_init(void)
{
int ret = -ENODEV;
@@ -3638,6 +3946,13 @@ int __init intel_iommu_init(void)
/* VT-d is required for a TXT/tboot launch, so enforce that */
force_on = tboot_force_iommu();
+ if (iommu_init_mempool()) {
+ if (force_on)
+ panic("tboot: Failed to initialize iommu memory\n");
+ return -ENOMEM;
+ }
+
+ down_write(&dmar_global_lock);
if (dmar_table_init()) {
if (force_on)
panic("tboot: Failed to initialize DMAR table\n");
@@ -3660,12 +3975,6 @@ int __init intel_iommu_init(void)
if (no_iommu || dmar_disabled)
goto out_free_dmar;
- if (iommu_init_mempool()) {
- if (force_on)
- panic("tboot: Failed to initialize iommu memory\n");
- goto out_free_dmar;
- }
-
if (list_empty(&dmar_rmrr_units))
printk(KERN_INFO "DMAR: No RMRR found\n");
@@ -3675,7 +3984,7 @@ int __init intel_iommu_init(void)
if (dmar_init_reserved_ranges()) {
if (force_on)
panic("tboot: Failed to reserve iommu ranges\n");
- goto out_free_mempool;
+ goto out_free_reserved_range;
}
init_no_remapping_devices();
@@ -3687,6 +3996,7 @@ int __init intel_iommu_init(void)
printk(KERN_ERR "IOMMU: dmar init failed\n");
goto out_free_reserved_range;
}
+ up_write(&dmar_global_lock);
printk(KERN_INFO
"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
@@ -3699,8 +4009,9 @@ int __init intel_iommu_init(void)
init_iommu_pm_ops();
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
-
bus_register_notifier(&pci_bus_type, &device_nb);
+ if (si_domain && !hw_pass_through)
+ register_memory_notifier(&intel_iommu_memory_nb);
intel_iommu_enabled = 1;
@@ -3708,21 +4019,23 @@ int __init intel_iommu_init(void)
out_free_reserved_range:
put_iova_domain(&reserved_iova_list);
-out_free_mempool:
- iommu_exit_mempool();
out_free_dmar:
intel_iommu_free_dmars();
+ up_write(&dmar_global_lock);
+ iommu_exit_mempool();
return ret;
}
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
- struct pci_dev *pdev)
+ struct device *dev)
{
- struct pci_dev *tmp, *parent;
+ struct pci_dev *tmp, *parent, *pdev;
- if (!iommu || !pdev)
+ if (!iommu || !dev || !dev_is_pci(dev))
return;
+ pdev = to_pci_dev(dev);
+
/* dependent device detach */
tmp = pci_find_upstream_pcie_bridge(pdev);
/* Secondary interface's bus number and devfn 0 */
@@ -3743,29 +4056,28 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
}
static void domain_remove_one_dev_info(struct dmar_domain *domain,
- struct pci_dev *pdev)
+ struct device *dev)
{
struct device_domain_info *info, *tmp;
struct intel_iommu *iommu;
unsigned long flags;
int found = 0;
+ u8 bus, devfn;
- iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
- pdev->devfn);
+ iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return;
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry_safe(info, tmp, &domain->devices, link) {
- if (info->segment == pci_domain_nr(pdev->bus) &&
- info->bus == pdev->bus->number &&
- info->devfn == pdev->devfn) {
+ if (info->iommu == iommu && info->bus == bus &&
+ info->devfn == devfn) {
unlink_domain_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
iommu_disable_dev_iotlb(info);
iommu_detach_dev(iommu, info->bus, info->devfn);
- iommu_detach_dependent_devices(iommu, pdev);
+ iommu_detach_dependent_devices(iommu, dev);
free_devinfo_mem(info);
spin_lock_irqsave(&device_domain_lock, flags);
@@ -3780,8 +4092,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
* owned by this domain, clear this iommu in iommu_bmp
* update iommu count and coherency
*/
- if (iommu == device_to_iommu(info->segment, info->bus,
- info->devfn))
+ if (info->iommu == iommu)
found = 1;
}
@@ -3805,67 +4116,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
}
}
-static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
-{
- struct device_domain_info *info;
- struct intel_iommu *iommu;
- unsigned long flags1, flags2;
-
- spin_lock_irqsave(&device_domain_lock, flags1);
- while (!list_empty(&domain->devices)) {
- info = list_entry(domain->devices.next,
- struct device_domain_info, link);
- unlink_domain_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags1);
-
- iommu_disable_dev_iotlb(info);
- iommu = device_to_iommu(info->segment, info->bus, info->devfn);
- iommu_detach_dev(iommu, info->bus, info->devfn);
- iommu_detach_dependent_devices(iommu, info->dev);
-
- /* clear this iommu in iommu_bmp, update iommu count
- * and capabilities
- */
- spin_lock_irqsave(&domain->iommu_lock, flags2);
- if (test_and_clear_bit(iommu->seq_id,
- domain->iommu_bmp)) {
- domain->iommu_count--;
- domain_update_iommu_cap(domain);
- }
- spin_unlock_irqrestore(&domain->iommu_lock, flags2);
-
- free_devinfo_mem(info);
- spin_lock_irqsave(&device_domain_lock, flags1);
- }
- spin_unlock_irqrestore(&device_domain_lock, flags1);
-}
-
-/* domain id for virtual machine, it won't be set in context */
-static atomic_t vm_domid = ATOMIC_INIT(0);
-
-static struct dmar_domain *iommu_alloc_vm_domain(void)
-{
- struct dmar_domain *domain;
-
- domain = alloc_domain_mem();
- if (!domain)
- return NULL;
-
- domain->id = atomic_inc_return(&vm_domid);
- domain->nid = -1;
- memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
- domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
-
- return domain;
-}
-
static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
- spin_lock_init(&domain->iommu_lock);
-
domain_reserve_special_ranges(domain);
/* calculate AGAW */
@@ -3873,9 +4128,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
adjust_width = guestwidth_to_adjustwidth(guest_width);
domain->agaw = width_to_agaw(adjust_width);
- INIT_LIST_HEAD(&domain->devices);
-
- domain->iommu_count = 0;
domain->iommu_coherency = 0;
domain->iommu_snooping = 0;
domain->iommu_superpage = 0;
@@ -3890,53 +4142,11 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
return 0;
}
-static void iommu_free_vm_domain(struct dmar_domain *domain)
-{
- unsigned long flags;
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- unsigned long i;
- unsigned long ndomains;
-
- for_each_active_iommu(iommu, drhd) {
- ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(i, iommu->domain_ids, ndomains) {
- if (iommu->domains[i] == domain) {
- spin_lock_irqsave(&iommu->lock, flags);
- clear_bit(i, iommu->domain_ids);
- iommu->domains[i] = NULL;
- spin_unlock_irqrestore(&iommu->lock, flags);
- break;
- }
- }
- }
-}
-
-static void vm_domain_exit(struct dmar_domain *domain)
-{
- /* Domain 0 is reserved, so dont process it */
- if (!domain)
- return;
-
- vm_domain_remove_all_dev_info(domain);
- /* destroy iovas */
- put_iova_domain(&domain->iovad);
-
- /* clear ptes */
- dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
- /* free page tables */
- dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
- iommu_free_vm_domain(domain);
- free_domain_mem(domain);
-}
-
static int intel_iommu_domain_init(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain;
- dmar_domain = iommu_alloc_vm_domain();
+ dmar_domain = alloc_domain(true);
if (!dmar_domain) {
printk(KERN_ERR
"intel_iommu_domain_init: dmar_domain == NULL\n");
@@ -3945,7 +4155,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
printk(KERN_ERR
"intel_iommu_domain_init() failed\n");
- vm_domain_exit(dmar_domain);
+ domain_exit(dmar_domain);
return -ENOMEM;
}
domain_update_iommu_cap(dmar_domain);
@@ -3963,33 +4173,32 @@ static void intel_iommu_domain_destroy(struct iommu_domain *domain)
struct dmar_domain *dmar_domain = domain->priv;
domain->priv = NULL;
- vm_domain_exit(dmar_domain);
+ domain_exit(dmar_domain);
}
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct dmar_domain *dmar_domain = domain->priv;
- struct pci_dev *pdev = to_pci_dev(dev);
struct intel_iommu *iommu;
int addr_width;
+ u8 bus, devfn;
- /* normally pdev is not mapped */
- if (unlikely(domain_context_mapped(pdev))) {
+ /* normally dev is not mapped */
+ if (unlikely(domain_context_mapped(dev))) {
struct dmar_domain *old_domain;
- old_domain = find_domain(pdev);
+ old_domain = find_domain(dev);
if (old_domain) {
if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
- domain_remove_one_dev_info(old_domain, pdev);
+ domain_remove_one_dev_info(old_domain, dev);
else
domain_remove_dev_info(old_domain);
}
}
- iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
- pdev->devfn);
+ iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
@@ -4021,16 +4230,15 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
dmar_domain->agaw--;
}
- return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
+ return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
}
static void intel_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct dmar_domain *dmar_domain = domain->priv;
- struct pci_dev *pdev = to_pci_dev(dev);
- domain_remove_one_dev_info(dmar_domain, pdev);
+ domain_remove_one_dev_info(dmar_domain, dev);
}
static int intel_iommu_map(struct iommu_domain *domain,
@@ -4072,18 +4280,51 @@ static int intel_iommu_map(struct iommu_domain *domain,
}
static size_t intel_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+ unsigned long iova, size_t size)
{
struct dmar_domain *dmar_domain = domain->priv;
- int order;
+ struct page *freelist = NULL;
+ struct intel_iommu *iommu;
+ unsigned long start_pfn, last_pfn;
+ unsigned int npages;
+ int iommu_id, num, ndomains, level = 0;
+
+ /* Cope with horrid API which requires us to unmap more than the
+ size argument if it happens to be a large-page mapping. */
+ if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
+ BUG();
+
+ if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
+ size = VTD_PAGE_SIZE << level_to_offset_bits(level);
+
+ start_pfn = iova >> VTD_PAGE_SHIFT;
+ last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
+
+ freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
+
+ npages = last_pfn - start_pfn + 1;
+
+ for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
+ iommu = g_iommus[iommu_id];
+
+ /*
+ * find bit position of dmar_domain
+ */
+ ndomains = cap_ndoms(iommu->cap);
+ for_each_set_bit(num, iommu->domain_ids, ndomains) {
+ if (iommu->domains[num] == dmar_domain)
+ iommu_flush_iotlb_psi(iommu, num, start_pfn,
+ npages, !freelist, 0);
+ }
+
+ }
- order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
- (iova + size - 1) >> VTD_PAGE_SHIFT);
+ dma_free_pagelist(freelist);
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
- return PAGE_SIZE << order;
+ return size;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4091,9 +4332,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
{
struct dmar_domain *dmar_domain = domain->priv;
struct dma_pte *pte;
+ int level = 0;
u64 phys = 0;
- pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
+ pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
if (pte)
phys = dma_pte_addr(pte);
@@ -4121,9 +4363,9 @@ static int intel_iommu_add_device(struct device *dev)
struct pci_dev *bridge, *dma_pdev = NULL;
struct iommu_group *group;
int ret;
+ u8 bus, devfn;
- if (!device_to_iommu(pci_domain_nr(pdev->bus),
- pdev->bus->number, pdev->devfn))
+ if (!device_to_iommu(dev, &bus, &devfn))
return -ENODEV;
bridge = pci_find_upstream_pcie_bridge(pdev);
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index ef5f65dbafe9..9b174893f0f5 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -38,6 +38,17 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
static int ir_ioapic_num, ir_hpet_num;
+/*
+ * Lock ordering:
+ * ->dmar_global_lock
+ * ->irq_2_ir_lock
+ * ->qi->q_lock
+ * ->iommu->register_lock
+ * Note:
+ * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
+ * in single-threaded environment with interrupt disabled, so no need to tabke
+ * the dmar_global_lock.
+ */
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
static int __init parse_ioapics_under_ir(void);
@@ -307,12 +318,14 @@ static int set_ioapic_sid(struct irte *irte, int apic)
if (!irte)
return -1;
+ down_read(&dmar_global_lock);
for (i = 0; i < MAX_IO_APICS; i++) {
if (ir_ioapic[i].id == apic) {
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
break;
}
}
+ up_read(&dmar_global_lock);
if (sid == 0) {
pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
@@ -332,12 +345,14 @@ static int set_hpet_sid(struct irte *irte, u8 id)
if (!irte)
return -1;
+ down_read(&dmar_global_lock);
for (i = 0; i < MAX_HPET_TBS; i++) {
if (ir_hpet[i].id == id) {
sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
break;
}
}
+ up_read(&dmar_global_lock);
if (sid == 0) {
pr_warning("Failed to set source-id of HPET block (%d)\n", id);
@@ -794,10 +809,16 @@ static int __init parse_ioapics_under_ir(void)
static int __init ir_dev_scope_init(void)
{
+ int ret;
+
if (!irq_remapping_enabled)
return 0;
- return dmar_dev_scope_init();
+ down_write(&dmar_global_lock);
+ ret = dmar_dev_scope_init();
+ up_write(&dmar_global_lock);
+
+ return ret;
}
rootfs_initcall(ir_dev_scope_init);
@@ -878,23 +899,27 @@ static int intel_setup_ioapic_entry(int irq,
struct io_apic_irq_attr *attr)
{
int ioapic_id = mpc_ioapic_id(attr->ioapic);
- struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
+ struct intel_iommu *iommu;
struct IR_IO_APIC_route_entry *entry;
struct irte irte;
int index;
+ down_read(&dmar_global_lock);
+ iommu = map_ioapic_to_ir(ioapic_id);
if (!iommu) {
pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
- return -ENODEV;
- }
-
- entry = (struct IR_IO_APIC_route_entry *)route_entry;
-
- index = alloc_irte(iommu, irq, 1);
- if (index < 0) {
- pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
- return -ENOMEM;
+ index = -ENODEV;
+ } else {
+ index = alloc_irte(iommu, irq, 1);
+ if (index < 0) {
+ pr_warn("Failed to allocate IRTE for ioapic %d\n",
+ ioapic_id);
+ index = -ENOMEM;
+ }
}
+ up_read(&dmar_global_lock);
+ if (index < 0)
+ return index;
prepare_irte(&irte, vector, destination);
@@ -913,6 +938,7 @@ static int intel_setup_ioapic_entry(int irq,
irte.avail, irte.vector, irte.dest_id,
irte.sid, irte.sq, irte.svt);
+ entry = (struct IR_IO_APIC_route_entry *)route_entry;
memset(entry, 0, sizeof(*entry));
entry->index2 = (index >> 15) & 0x1;
@@ -1043,20 +1069,23 @@ static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
struct intel_iommu *iommu;
int index;
+ down_read(&dmar_global_lock);
iommu = map_dev_to_ir(dev);
if (!iommu) {
printk(KERN_ERR
"Unable to map PCI %s to iommu\n", pci_name(dev));
- return -ENOENT;
+ index = -ENOENT;
+ } else {
+ index = alloc_irte(iommu, irq, nvec);
+ if (index < 0) {
+ printk(KERN_ERR
+ "Unable to allocate %d IRTE for PCI %s\n",
+ nvec, pci_name(dev));
+ index = -ENOSPC;
+ }
}
+ up_read(&dmar_global_lock);
- index = alloc_irte(iommu, irq, nvec);
- if (index < 0) {
- printk(KERN_ERR
- "Unable to allocate %d IRTE for PCI %s\n", nvec,
- pci_name(dev));
- return -ENOSPC;
- }
return index;
}
@@ -1064,33 +1093,40 @@ static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
int index, int sub_handle)
{
struct intel_iommu *iommu;
+ int ret = -ENOENT;
+ down_read(&dmar_global_lock);
iommu = map_dev_to_ir(pdev);
- if (!iommu)
- return -ENOENT;
- /*
- * setup the mapping between the irq and the IRTE
- * base index, the sub_handle pointing to the
- * appropriate interrupt remap table entry.
- */
- set_irte_irq(irq, iommu, index, sub_handle);
+ if (iommu) {
+ /*
+ * setup the mapping between the irq and the IRTE
+ * base index, the sub_handle pointing to the
+ * appropriate interrupt remap table entry.
+ */
+ set_irte_irq(irq, iommu, index, sub_handle);
+ ret = 0;
+ }
+ up_read(&dmar_global_lock);
- return 0;
+ return ret;
}
static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
{
- struct intel_iommu *iommu = map_hpet_to_ir(id);
+ int ret = -1;
+ struct intel_iommu *iommu;
int index;
- if (!iommu)
- return -1;
-
- index = alloc_irte(iommu, irq, 1);
- if (index < 0)
- return -1;
+ down_read(&dmar_global_lock);
+ iommu = map_hpet_to_ir(id);
+ if (iommu) {
+ index = alloc_irte(iommu, irq, 1);
+ if (index >= 0)
+ ret = 0;
+ }
+ up_read(&dmar_global_lock);
- return 0;
+ return ret;
}
struct irq_remap_ops intel_irq_remap_ops = {
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 67da6cff74e8..f6b17e6af2fb 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -342,19 +342,30 @@ __is_range_overlap(struct rb_node *node,
return 0;
}
+static inline struct iova *
+alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
+{
+ struct iova *iova;
+
+ iova = alloc_iova_mem();
+ if (iova) {
+ iova->pfn_lo = pfn_lo;
+ iova->pfn_hi = pfn_hi;
+ }
+
+ return iova;
+}
+
static struct iova *
__insert_new_range(struct iova_domain *iovad,
unsigned long pfn_lo, unsigned long pfn_hi)
{
struct iova *iova;
- iova = alloc_iova_mem();
- if (!iova)
- return iova;
+ iova = alloc_and_init_iova(pfn_lo, pfn_hi);
+ if (iova)
+ iova_insert_rbtree(&iovad->rbroot, iova);
- iova->pfn_hi = pfn_hi;
- iova->pfn_lo = pfn_lo;
- iova_insert_rbtree(&iovad->rbroot, iova);
return iova;
}
@@ -433,3 +444,44 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
}
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
}
+
+struct iova *
+split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
+ unsigned long pfn_lo, unsigned long pfn_hi)
+{
+ unsigned long flags;
+ struct iova *prev = NULL, *next = NULL;
+
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ if (iova->pfn_lo < pfn_lo) {
+ prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
+ if (prev == NULL)
+ goto error;
+ }
+ if (iova->pfn_hi > pfn_hi) {
+ next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
+ if (next == NULL)
+ goto error;
+ }
+
+ __cached_rbnode_delete_update(iovad, iova);
+ rb_erase(&iova->node, &iovad->rbroot);
+
+ if (prev) {
+ iova_insert_rbtree(&iovad->rbroot, prev);
+ iova->pfn_lo = pfn_lo;
+ }
+ if (next) {
+ iova_insert_rbtree(&iovad->rbroot, next);
+ iova->pfn_hi = pfn_hi;
+ }
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+
+ return iova;
+
+error:
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ if (prev)
+ free_iova_mem(prev);
+ return NULL;
+}
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index bcd78a720630..7fcbfc498fa9 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -23,6 +23,9 @@
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_iommu.h>
+#include <linux/of_irq.h>
#include <asm/cacheflush.h>
@@ -146,13 +149,10 @@ static int iommu_enable(struct omap_iommu *obj)
struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (!pdata)
- return -EINVAL;
-
if (!arch_iommu)
return -ENODEV;
- if (pdata->deassert_reset) {
+ if (pdata && pdata->deassert_reset) {
err = pdata->deassert_reset(pdev, pdata->reset_name);
if (err) {
dev_err(obj->dev, "deassert_reset failed: %d\n", err);
@@ -172,14 +172,11 @@ static void iommu_disable(struct omap_iommu *obj)
struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (!pdata)
- return;
-
arch_iommu->disable(obj);
pm_runtime_put_sync(obj->dev);
- if (pdata->assert_reset)
+ if (pdata && pdata->assert_reset)
pdata->assert_reset(pdev, pdata->reset_name);
}
@@ -523,7 +520,8 @@ static void flush_iopte_range(u32 *first, u32 *last)
static void iopte_free(u32 *iopte)
{
/* Note: freed iopte's must be clean ready for re-use */
- kmem_cache_free(iopte_cachep, iopte);
+ if (iopte)
+ kmem_cache_free(iopte_cachep, iopte);
}
static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
@@ -863,7 +861,7 @@ static int device_match_by_alias(struct device *dev, void *data)
**/
static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
{
- int err = -ENOMEM;
+ int err;
struct device *dev;
struct omap_iommu *obj;
@@ -871,7 +869,7 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
(void *)name,
device_match_by_alias);
if (!dev)
- return NULL;
+ return ERR_PTR(-ENODEV);
obj = to_iommu(dev);
@@ -890,8 +888,10 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
goto err_enable;
flush_iotlb_all(obj);
- if (!try_module_get(obj->owner))
+ if (!try_module_get(obj->owner)) {
+ err = -ENODEV;
goto err_module;
+ }
spin_unlock(&obj->iommu_lock);
@@ -940,17 +940,41 @@ static int omap_iommu_probe(struct platform_device *pdev)
struct omap_iommu *obj;
struct resource *res;
struct iommu_platform_data *pdata = pdev->dev.platform_data;
+ struct device_node *of = pdev->dev.of_node;
- obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
+ obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
if (!obj)
return -ENOMEM;
- obj->nr_tlb_entries = pdata->nr_tlb_entries;
- obj->name = pdata->name;
+ if (of) {
+ obj->name = dev_name(&pdev->dev);
+ obj->nr_tlb_entries = 32;
+ err = of_property_read_u32(of, "ti,#tlb-entries",
+ &obj->nr_tlb_entries);
+ if (err && err != -EINVAL)
+ return err;
+ if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
+ return -EINVAL;
+ /*
+ * da_start and da_end are needed for omap-iovmm, so hardcode
+ * these values as used by OMAP3 ISP - the only user for
+ * omap-iovmm
+ */
+ obj->da_start = 0;
+ obj->da_end = 0xfffff000;
+ if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
+ obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
+ } else {
+ obj->nr_tlb_entries = pdata->nr_tlb_entries;
+ obj->name = pdata->name;
+ obj->da_start = pdata->da_start;
+ obj->da_end = pdata->da_end;
+ }
+ if (obj->da_end <= obj->da_start)
+ return -EINVAL;
+
obj->dev = &pdev->dev;
obj->ctx = (void *)obj + sizeof(*obj);
- obj->da_start = pdata->da_start;
- obj->da_end = pdata->da_end;
spin_lock_init(&obj->iommu_lock);
mutex_init(&obj->mmap_lock);
@@ -958,33 +982,18 @@ static int omap_iommu_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&obj->mmap);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -ENODEV;
- goto err_mem;
- }
-
- res = request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev));
- if (!res) {
- err = -EIO;
- goto err_mem;
- }
-
- obj->regbase = ioremap(res->start, resource_size(res));
- if (!obj->regbase) {
- err = -ENOMEM;
- goto err_ioremap;
- }
+ obj->regbase = devm_ioremap_resource(obj->dev, res);
+ if (IS_ERR(obj->regbase))
+ return PTR_ERR(obj->regbase);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = -ENODEV;
- goto err_irq;
- }
- err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
- dev_name(&pdev->dev), obj);
+ if (irq < 0)
+ return -ENODEV;
+
+ err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
+ dev_name(obj->dev), obj);
if (err < 0)
- goto err_irq;
+ return err;
platform_set_drvdata(pdev, obj);
pm_runtime_irq_safe(obj->dev);
@@ -992,42 +1001,34 @@ static int omap_iommu_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "%s registered\n", obj->name);
return 0;
-
-err_irq:
- iounmap(obj->regbase);
-err_ioremap:
- release_mem_region(res->start, resource_size(res));
-err_mem:
- kfree(obj);
- return err;
}
static int omap_iommu_remove(struct platform_device *pdev)
{
- int irq;
- struct resource *res;
struct omap_iommu *obj = platform_get_drvdata(pdev);
iopgtable_clear_entry_all(obj);
- irq = platform_get_irq(pdev, 0);
- free_irq(irq, obj);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
- iounmap(obj->regbase);
-
pm_runtime_disable(obj->dev);
dev_info(&pdev->dev, "%s removed\n", obj->name);
- kfree(obj);
return 0;
}
+static struct of_device_id omap_iommu_of_match[] = {
+ { .compatible = "ti,omap2-iommu" },
+ { .compatible = "ti,omap4-iommu" },
+ { .compatible = "ti,dra7-iommu" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_iommu_of_match);
+
static struct platform_driver omap_iommu_driver = {
.probe = omap_iommu_probe,
.remove = omap_iommu_remove,
.driver = {
.name = "omap-iommu",
+ .of_match_table = of_match_ptr(omap_iommu_of_match),
},
};
@@ -1253,6 +1254,49 @@ static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
return 0;
}
+static int omap_iommu_add_device(struct device *dev)
+{
+ struct omap_iommu_arch_data *arch_data;
+ struct device_node *np;
+
+ /*
+ * Allocate the archdata iommu structure for DT-based devices.
+ *
+ * TODO: Simplify this when removing non-DT support completely from the
+ * IOMMU users.
+ */
+ if (!dev->of_node)
+ return 0;
+
+ np = of_parse_phandle(dev->of_node, "iommus", 0);
+ if (!np)
+ return 0;
+
+ arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
+ if (!arch_data) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ arch_data->name = kstrdup(dev_name(dev), GFP_KERNEL);
+ dev->archdata.iommu = arch_data;
+
+ of_node_put(np);
+
+ return 0;
+}
+
+static void omap_iommu_remove_device(struct device *dev)
+{
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+
+ if (!dev->of_node || !arch_data)
+ return;
+
+ kfree(arch_data->name);
+ kfree(arch_data);
+}
+
static struct iommu_ops omap_iommu_ops = {
.domain_init = omap_iommu_domain_init,
.domain_destroy = omap_iommu_domain_destroy,
@@ -1262,6 +1306,8 @@ static struct iommu_ops omap_iommu_ops = {
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
.domain_has_cap = omap_iommu_domain_has_cap,
+ .add_device = omap_iommu_add_device,
+ .remove_device = omap_iommu_remove_device,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 120084206602..ea920c3e94ff 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -52,6 +52,8 @@ struct omap_iommu {
void *ctx; /* iommu context: registres saved area */
u32 da_start;
u32 da_end;
+
+ int has_bus_err_back;
};
struct cr_regs {
@@ -130,6 +132,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_READ_CAM 0x68
#define MMU_READ_RAM 0x6c
#define MMU_EMU_FAULT_AD 0x70
+#define MMU_GP_REG 0x88
#define MMU_REG_SIZE 256
@@ -163,6 +166,8 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT)
#define MMU_RAM_MIXED MMU_RAM_MIXED_MASK
+#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
+
/*
* utilities for super page(16MB, 1MB, 64KB and 4KB)
*/
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c
index d745094a69dd..5e1ea3b0bf16 100644
--- a/drivers/iommu/omap-iommu2.c
+++ b/drivers/iommu/omap-iommu2.c
@@ -98,6 +98,9 @@ static int omap2_iommu_enable(struct omap_iommu *obj)
iommu_write_reg(obj, pa, MMU_TTB);
+ if (obj->has_bus_err_back)
+ iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
+
__iommu_set_twl(obj, true);
return 0;
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index 7a3b928fad1c..464acda0bbc4 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -343,7 +343,7 @@ static int shmobile_iommu_add_device(struct device *dev)
mapping = archdata->iommu_mapping;
if (!mapping) {
mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
- L1_LEN << 20, 0);
+ L1_LEN << 20);
if (IS_ERR(mapping))
return PTR_ERR(mapping);
archdata->iommu_mapping = mapping;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 61ffdca96e25..d770f7406631 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -39,6 +39,14 @@ config IMGPDC_IRQ
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+config CLPS711X_IRQCHIP
+ bool
+ depends on ARCH_CLPS711X
+ select IRQ_DOMAIN
+ select MULTI_IRQ_HANDLER
+ select SPARSE_IRQ
+ default y
+
config ORION_IRQCHIP
bool
select IRQ_DOMAIN
@@ -69,3 +77,11 @@ config VERSATILE_FPGA_IRQ_NR
config XTENSA_MX
bool
select IRQ_DOMAIN
+
+config IRQ_CROSSBAR
+ bool
+ help
+ Support for a CROSSBAR ip that preceeds the main interrupt controller.
+ The primary irqchip invokes the crossbar's callback which inturn allocates
+ a free irq and configures the IP. Thus the peripheral interrupts are
+ routed to one of the free irqchip interrupt lines.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 5194afb39e78..f180f8d5fb7b 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -10,8 +10,10 @@ obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
obj-$(CONFIG_METAG) += irq-metag-ext.o
obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
obj-$(CONFIG_ARCH_MOXART) += irq-moxart.o
+obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o
obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
+obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
@@ -26,3 +28,4 @@ obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
+obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index 40e6440348ff..f8636a650cf6 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -17,7 +17,6 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <asm/mach/irq.h>
#include "irqchip.h"
@@ -81,7 +80,7 @@ static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
if (unlikely(!cascade_irq))
- do_bad_IRQ(irq, desc);
+ handle_bad_irq(irq, desc);
else
generic_handle_irq(cascade_irq);
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 540956465ed2..41be897df8d5 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -42,6 +43,7 @@
#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
+#define ARMADA_375_PPI_CAUSE (0x10)
#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
@@ -352,7 +354,63 @@ static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static asmlinkage void __exception_irq_entry
+#ifdef CONFIG_PCI_MSI
+static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
+{
+ u32 msimask, msinr;
+
+ msimask = readl_relaxed(per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+ & PCI_MSI_DOORBELL_MASK;
+
+ writel(~msimask, per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+ for (msinr = PCI_MSI_DOORBELL_START;
+ msinr < PCI_MSI_DOORBELL_END; msinr++) {
+ int irq;
+
+ if (!(msimask & BIT(msinr)))
+ continue;
+
+ irq = irq_find_mapping(armada_370_xp_msi_domain,
+ msinr - 16);
+
+ if (is_chained)
+ generic_handle_irq(irq);
+ else
+ handle_IRQ(irq, regs);
+ }
+}
+#else
+static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
+#endif
+
+static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
+ struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned long irqmap, irqn;
+ unsigned int cascade_irq;
+
+ chained_irq_enter(chip, desc);
+
+ irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
+
+ if (irqmap & BIT(0)) {
+ armada_370_xp_handle_msi_irq(NULL, true);
+ irqmap &= ~BIT(0);
+ }
+
+ for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
+ cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
+ generic_handle_irq(cascade_irq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void __exception_irq_entry
armada_370_xp_handle_irq(struct pt_regs *regs)
{
u32 irqstat, irqnr;
@@ -372,31 +430,9 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
continue;
}
-#ifdef CONFIG_PCI_MSI
/* MSI handling */
- if (irqnr == 1) {
- u32 msimask, msinr;
-
- msimask = readl_relaxed(per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
- & PCI_MSI_DOORBELL_MASK;
-
- writel(~msimask, per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
-
- for (msinr = PCI_MSI_DOORBELL_START;
- msinr < PCI_MSI_DOORBELL_END; msinr++) {
- int irq;
-
- if (!(msimask & BIT(msinr)))
- continue;
-
- irq = irq_find_mapping(armada_370_xp_msi_domain,
- msinr - 16);
- handle_IRQ(irq, regs);
- }
- }
-#endif
+ if (irqnr == 1)
+ armada_370_xp_handle_msi_irq(regs, false);
#ifdef CONFIG_SMP
/* IPI Handling */
@@ -427,6 +463,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
struct device_node *parent)
{
struct resource main_int_res, per_cpu_int_res;
+ int parent_irq;
u32 control;
BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -455,8 +492,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
BUG_ON(!armada_370_xp_mpic_domain);
- irq_set_default_host(armada_370_xp_mpic_domain);
-
#ifdef CONFIG_SMP
armada_xp_mpic_smp_cpu_init();
@@ -472,7 +507,14 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
armada_370_xp_msi_init(node, main_int_res.start);
- set_handle_irq(armada_370_xp_handle_irq);
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (parent_irq <= 0) {
+ irq_set_default_host(armada_370_xp_mpic_domain);
+ set_handle_irq(armada_370_xp_handle_irq);
+ } else {
+ irq_set_chained_handler(parent_irq,
+ armada_370_xp_mpic_handle_cascade_irq);
+ }
return 0;
}
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index 1693b8e7f26a..5916d6cdafa1 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -95,7 +95,7 @@ struct armctrl_ic {
};
static struct armctrl_ic intc __read_mostly;
-static asmlinkage void __exception_irq_entry bcm2835_handle_irq(
+static void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs);
static void armctrl_mask_irq(struct irq_data *d)
@@ -196,7 +196,7 @@ static void armctrl_handle_shortcut(int bank, struct pt_regs *regs,
handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
}
-static asmlinkage void __exception_irq_entry bcm2835_handle_irq(
+static void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs)
{
u32 stat, irq;
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
new file mode 100644
index 000000000000..33340dc97d1d
--- /dev/null
+++ b/drivers/irqchip/irq-clps711x.c
@@ -0,0 +1,243 @@
+/*
+ * CLPS711X IRQ driver
+ *
+ * Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
+
+#define CLPS711X_INTSR1 (0x0240)
+#define CLPS711X_INTMR1 (0x0280)
+#define CLPS711X_BLEOI (0x0600)
+#define CLPS711X_MCEOI (0x0640)
+#define CLPS711X_TEOI (0x0680)
+#define CLPS711X_TC1EOI (0x06c0)
+#define CLPS711X_TC2EOI (0x0700)
+#define CLPS711X_RTCEOI (0x0740)
+#define CLPS711X_UMSEOI (0x0780)
+#define CLPS711X_COEOI (0x07c0)
+#define CLPS711X_INTSR2 (0x1240)
+#define CLPS711X_INTMR2 (0x1280)
+#define CLPS711X_SRXEOF (0x1600)
+#define CLPS711X_KBDEOI (0x1700)
+#define CLPS711X_INTSR3 (0x2240)
+#define CLPS711X_INTMR3 (0x2280)
+
+static const struct {
+#define CLPS711X_FLAG_EN (1 << 0)
+#define CLPS711X_FLAG_FIQ (1 << 1)
+ unsigned int flags;
+ phys_addr_t eoi;
+} clps711x_irqs[] = {
+ [1] = { CLPS711X_FLAG_FIQ, CLPS711X_BLEOI, },
+ [3] = { CLPS711X_FLAG_FIQ, CLPS711X_MCEOI, },
+ [4] = { CLPS711X_FLAG_EN, CLPS711X_COEOI, },
+ [5] = { CLPS711X_FLAG_EN, },
+ [6] = { CLPS711X_FLAG_EN, },
+ [7] = { CLPS711X_FLAG_EN, },
+ [8] = { CLPS711X_FLAG_EN, CLPS711X_TC1EOI, },
+ [9] = { CLPS711X_FLAG_EN, CLPS711X_TC2EOI, },
+ [10] = { CLPS711X_FLAG_EN, CLPS711X_RTCEOI, },
+ [11] = { CLPS711X_FLAG_EN, CLPS711X_TEOI, },
+ [12] = { CLPS711X_FLAG_EN, },
+ [13] = { CLPS711X_FLAG_EN, },
+ [14] = { CLPS711X_FLAG_EN, CLPS711X_UMSEOI, },
+ [15] = { CLPS711X_FLAG_EN, CLPS711X_SRXEOF, },
+ [16] = { CLPS711X_FLAG_EN, CLPS711X_KBDEOI, },
+ [17] = { CLPS711X_FLAG_EN, },
+ [18] = { CLPS711X_FLAG_EN, },
+ [28] = { CLPS711X_FLAG_EN, },
+ [29] = { CLPS711X_FLAG_EN, },
+ [32] = { CLPS711X_FLAG_FIQ, },
+};
+
+static struct {
+ void __iomem *base;
+ void __iomem *intmr[3];
+ void __iomem *intsr[3];
+ struct irq_domain *domain;
+ struct irq_domain_ops ops;
+} *clps711x_intc;
+
+static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
+{
+ u32 irqnr, irqstat;
+
+ do {
+ irqstat = readw_relaxed(clps711x_intc->intmr[0]) &
+ readw_relaxed(clps711x_intc->intsr[0]);
+ if (irqstat) {
+ irqnr = irq_find_mapping(clps711x_intc->domain,
+ fls(irqstat) - 1);
+ handle_IRQ(irqnr, regs);
+ }
+
+ irqstat = readw_relaxed(clps711x_intc->intmr[1]) &
+ readw_relaxed(clps711x_intc->intsr[1]);
+ if (irqstat) {
+ irqnr = irq_find_mapping(clps711x_intc->domain,
+ fls(irqstat) - 1 + 16);
+ handle_IRQ(irqnr, regs);
+ }
+ } while (irqstat);
+}
+
+static void clps711x_intc_eoi(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hwirq].eoi);
+}
+
+static void clps711x_intc_mask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ void __iomem *intmr = clps711x_intc->intmr[hwirq / 16];
+ u32 tmp;
+
+ tmp = readl_relaxed(intmr);
+ tmp &= ~(1 << (hwirq % 16));
+ writel_relaxed(tmp, intmr);
+}
+
+static void clps711x_intc_unmask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ void __iomem *intmr = clps711x_intc->intmr[hwirq / 16];
+ u32 tmp;
+
+ tmp = readl_relaxed(intmr);
+ tmp |= 1 << (hwirq % 16);
+ writel_relaxed(tmp, intmr);
+}
+
+static struct irq_chip clps711x_intc_chip = {
+ .name = "clps711x-intc",
+ .irq_eoi = clps711x_intc_eoi,
+ .irq_mask = clps711x_intc_mask,
+ .irq_unmask = clps711x_intc_unmask,
+};
+
+static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_flow_handler_t handler = handle_level_irq;
+ unsigned int flags = IRQF_VALID | IRQF_PROBE;
+
+ if (!clps711x_irqs[hw].flags)
+ return 0;
+
+ if (clps711x_irqs[hw].flags & CLPS711X_FLAG_FIQ) {
+ handler = handle_bad_irq;
+ flags |= IRQF_NOAUTOEN;
+ } else if (clps711x_irqs[hw].eoi) {
+ handler = handle_fasteoi_irq;
+ }
+
+ /* Clear down pending interrupt */
+ if (clps711x_irqs[hw].eoi)
+ writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hw].eoi);
+
+ irq_set_chip_and_handler(virq, &clps711x_intc_chip, handler);
+ set_irq_flags(virq, flags);
+
+ return 0;
+}
+
+static int __init _clps711x_intc_init(struct device_node *np,
+ phys_addr_t base, resource_size_t size)
+{
+ int err;
+
+ clps711x_intc = kzalloc(sizeof(*clps711x_intc), GFP_KERNEL);
+ if (!clps711x_intc)
+ return -ENOMEM;
+
+ clps711x_intc->base = ioremap(base, size);
+ if (!clps711x_intc->base) {
+ err = -ENOMEM;
+ goto out_kfree;
+ }
+
+ clps711x_intc->intsr[0] = clps711x_intc->base + CLPS711X_INTSR1;
+ clps711x_intc->intmr[0] = clps711x_intc->base + CLPS711X_INTMR1;
+ clps711x_intc->intsr[1] = clps711x_intc->base + CLPS711X_INTSR2;
+ clps711x_intc->intmr[1] = clps711x_intc->base + CLPS711X_INTMR2;
+ clps711x_intc->intsr[2] = clps711x_intc->base + CLPS711X_INTSR3;
+ clps711x_intc->intmr[2] = clps711x_intc->base + CLPS711X_INTMR3;
+
+ /* Mask all interrupts */
+ writel_relaxed(0, clps711x_intc->intmr[0]);
+ writel_relaxed(0, clps711x_intc->intmr[1]);
+ writel_relaxed(0, clps711x_intc->intmr[2]);
+
+ err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id());
+ if (IS_ERR_VALUE(err))
+ goto out_iounmap;
+
+ clps711x_intc->ops.map = clps711x_intc_irq_map;
+ clps711x_intc->ops.xlate = irq_domain_xlate_onecell;
+ clps711x_intc->domain =
+ irq_domain_add_legacy(np, ARRAY_SIZE(clps711x_irqs),
+ 0, 0, &clps711x_intc->ops, NULL);
+ if (!clps711x_intc->domain) {
+ err = -ENOMEM;
+ goto out_irqfree;
+ }
+
+ irq_set_default_host(clps711x_intc->domain);
+ set_handle_irq(clps711x_irqh);
+
+#ifdef CONFIG_FIQ
+ init_FIQ(0);
+#endif
+
+ return 0;
+
+out_irqfree:
+ irq_free_descs(0, ARRAY_SIZE(clps711x_irqs));
+
+out_iounmap:
+ iounmap(clps711x_intc->base);
+
+out_kfree:
+ kfree(clps711x_intc);
+
+ return err;
+}
+
+void __init clps711x_intc_init(phys_addr_t base, resource_size_t size)
+{
+ BUG_ON(_clps711x_intc_init(NULL, base, size));
+}
+
+#ifdef CONFIG_IRQCHIP
+static int __init clps711x_intc_init_dt(struct device_node *np,
+ struct device_node *parent)
+{
+ struct resource res;
+ int err;
+
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ return err;
+
+ return _clps711x_intc_init(np, res.start, resource_size(&res));
+}
+IRQCHIP_DECLARE(clps711x, "cirrus,clps711x-intc", clps711x_intc_init_dt);
+#endif
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
new file mode 100644
index 000000000000..fc817d28d1fe
--- /dev/null
+++ b/drivers/irqchip/irq-crossbar.c
@@ -0,0 +1,208 @@
+/*
+ * drivers/irqchip/irq-crossbar.c
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sricharan R <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/irqchip/arm-gic.h>
+
+#define IRQ_FREE -1
+#define GIC_IRQ_START 32
+
+/*
+ * @int_max: maximum number of supported interrupts
+ * @irq_map: array of interrupts to crossbar number mapping
+ * @crossbar_base: crossbar base address
+ * @register_offsets: offsets for each irq number
+ */
+struct crossbar_device {
+ uint int_max;
+ uint *irq_map;
+ void __iomem *crossbar_base;
+ int *register_offsets;
+ void (*write) (int, int);
+};
+
+static struct crossbar_device *cb;
+
+static inline void crossbar_writel(int irq_no, int cb_no)
+{
+ writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
+}
+
+static inline void crossbar_writew(int irq_no, int cb_no)
+{
+ writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
+}
+
+static inline void crossbar_writeb(int irq_no, int cb_no)
+{
+ writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
+}
+
+static inline int allocate_free_irq(int cb_no)
+{
+ int i;
+
+ for (i = 0; i < cb->int_max; i++) {
+ if (cb->irq_map[i] == IRQ_FREE) {
+ cb->irq_map[i] = cb_no;
+ return i;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static int crossbar_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ cb->write(hw - GIC_IRQ_START, cb->irq_map[hw - GIC_IRQ_START]);
+ return 0;
+}
+
+static void crossbar_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_hw_number_t hw = irq_get_irq_data(irq)->hwirq;
+
+ if (hw > GIC_IRQ_START)
+ cb->irq_map[hw - GIC_IRQ_START] = IRQ_FREE;
+}
+
+static int crossbar_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ unsigned long ret;
+
+ ret = allocate_free_irq(intspec[1]);
+
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ *out_hwirq = ret + GIC_IRQ_START;
+ return 0;
+}
+
+const struct irq_domain_ops routable_irq_domain_ops = {
+ .map = crossbar_domain_map,
+ .unmap = crossbar_domain_unmap,
+ .xlate = crossbar_domain_xlate
+};
+
+static int __init crossbar_of_init(struct device_node *node)
+{
+ int i, size, max, reserved = 0, entry;
+ const __be32 *irqsr;
+
+ cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL);
+
+ if (!cb)
+ return -ENOMEM;
+
+ cb->crossbar_base = of_iomap(node, 0);
+ if (!cb->crossbar_base)
+ goto err1;
+
+ of_property_read_u32(node, "ti,max-irqs", &max);
+ cb->irq_map = kzalloc(max * sizeof(int), GFP_KERNEL);
+ if (!cb->irq_map)
+ goto err2;
+
+ cb->int_max = max;
+
+ for (i = 0; i < max; i++)
+ cb->irq_map[i] = IRQ_FREE;
+
+ /* Get and mark reserved irqs */
+ irqsr = of_get_property(node, "ti,irqs-reserved", &size);
+ if (irqsr) {
+ size /= sizeof(__be32);
+
+ for (i = 0; i < size; i++) {
+ of_property_read_u32_index(node,
+ "ti,irqs-reserved",
+ i, &entry);
+ if (entry > max) {
+ pr_err("Invalid reserved entry\n");
+ goto err3;
+ }
+ cb->irq_map[entry] = 0;
+ }
+ }
+
+ cb->register_offsets = kzalloc(max * sizeof(int), GFP_KERNEL);
+ if (!cb->register_offsets)
+ goto err3;
+
+ of_property_read_u32(node, "ti,reg-size", &size);
+
+ switch (size) {
+ case 1:
+ cb->write = crossbar_writeb;
+ break;
+ case 2:
+ cb->write = crossbar_writew;
+ break;
+ case 4:
+ cb->write = crossbar_writel;
+ break;
+ default:
+ pr_err("Invalid reg-size property\n");
+ goto err4;
+ break;
+ }
+
+ /*
+ * Register offsets are not linear because of the
+ * reserved irqs. so find and store the offsets once.
+ */
+ for (i = 0; i < max; i++) {
+ if (!cb->irq_map[i])
+ continue;
+
+ cb->register_offsets[i] = reserved;
+ reserved += size;
+ }
+
+ register_routable_domain_ops(&routable_irq_domain_ops);
+ return 0;
+
+err4:
+ kfree(cb->register_offsets);
+err3:
+ kfree(cb->irq_map);
+err2:
+ iounmap(cb->crossbar_base);
+err1:
+ kfree(cb);
+ return -ENOMEM;
+}
+
+static const struct of_device_id crossbar_match[] __initconst = {
+ { .compatible = "ti,irq-crossbar" },
+ {}
+};
+
+int __init irqcrossbar_init(void)
+{
+ struct device_node *np;
+ np = of_find_matching_node(NULL, crossbar_match);
+ if (!np)
+ return -ENODEV;
+
+ crossbar_of_init(np);
+ return 0;
+}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 341c6016812d..4300b6606f5e 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -50,7 +50,7 @@
union gic_base {
void __iomem *common_base;
- void __percpu __iomem **percpu_base;
+ void __percpu * __iomem *percpu_base;
};
struct gic_chip_data {
@@ -279,7 +279,7 @@ static int gic_set_wake(struct irq_data *d, unsigned int on)
#define gic_set_wake NULL
#endif
-static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
u32 irqstat, irqnr;
struct gic_chip_data *gic = &gic_data[0];
@@ -648,7 +648,7 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
#endif
#ifdef CONFIG_SMP
-void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
int cpu;
unsigned long flags, map = 0;
@@ -661,9 +661,9 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
/*
* Ensure that stores to Normal memory are visible to the
- * other CPUs before issuing the IPI.
+ * other CPUs before they observe us issuing the IPI.
*/
- dsb();
+ dmb(ishst);
/* this always happens on GIC0 */
writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
@@ -824,16 +824,25 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_and_handler(irq, &gic_chip,
handle_fasteoi_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+
+ gic_routable_irq_domain_ops->map(d, irq, hw);
}
irq_set_chip_data(irq, d->host_data);
return 0;
}
+static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+ gic_routable_irq_domain_ops->unmap(d, irq);
+}
+
static int gic_irq_domain_xlate(struct irq_domain *d,
struct device_node *controller,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{
+ unsigned long ret = 0;
+
if (d->of_node != controller)
return -EINVAL;
if (intsize < 3)
@@ -843,11 +852,20 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
*out_hwirq = intspec[1] + 16;
/* For SPIs, we need to add 16 more to get the GIC irq ID number */
- if (!intspec[0])
- *out_hwirq += 16;
+ if (!intspec[0]) {
+ ret = gic_routable_irq_domain_ops->xlate(d, controller,
+ intspec,
+ intsize,
+ out_hwirq,
+ out_type);
+
+ if (IS_ERR_VALUE(ret))
+ return ret;
+ }
*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
- return 0;
+
+ return ret;
}
#ifdef CONFIG_SMP
@@ -869,11 +887,43 @@ static struct notifier_block gic_cpu_notifier = {
};
#endif
-const struct irq_domain_ops gic_irq_domain_ops = {
+static const struct irq_domain_ops gic_irq_domain_ops = {
.map = gic_irq_domain_map,
+ .unmap = gic_irq_domain_unmap,
.xlate = gic_irq_domain_xlate,
};
+/* Default functions for routable irq domain */
+static int gic_routable_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ return 0;
+}
+
+static void gic_routable_irq_domain_unmap(struct irq_domain *d,
+ unsigned int irq)
+{
+}
+
+static int gic_routable_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ *out_hwirq += 16;
+ return 0;
+}
+
+const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
+ .map = gic_routable_irq_domain_map,
+ .unmap = gic_routable_irq_domain_unmap,
+ .xlate = gic_routable_irq_domain_xlate,
+};
+
+const struct irq_domain_ops *gic_routable_irq_domain_ops =
+ &gic_default_routable_irq_domain_ops;
+
void __init gic_init_bases(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base,
u32 percpu_offset, struct device_node *node)
@@ -881,6 +931,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
irq_hw_number_t hwirq_base;
struct gic_chip_data *gic;
int gic_irqs, irq_base, i;
+ int nr_routable_irqs;
BUG_ON(gic_nr >= MAX_GIC_NR);
@@ -946,14 +997,25 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic->gic_irqs = gic_irqs;
gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
- irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
- if (IS_ERR_VALUE(irq_base)) {
- WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
- irq_start);
- irq_base = irq_start;
+
+ if (of_property_read_u32(node, "arm,routable-irqs",
+ &nr_routable_irqs)) {
+ irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
+ numa_node_id());
+ if (IS_ERR_VALUE(irq_base)) {
+ WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
+ irq_start);
+ irq_base = irq_start;
+ }
+
+ gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
+ hwirq_base, &gic_irq_domain_ops, gic);
+ } else {
+ gic->domain = irq_domain_add_linear(node, nr_routable_irqs,
+ &gic_irq_domain_ops,
+ gic);
}
- gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
- hwirq_base, &gic_irq_domain_ops, gic);
+
if (WARN_ON(!gic->domain))
return;
@@ -974,7 +1036,8 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
#ifdef CONFIG_OF
static int gic_cnt __initdata;
-int __init gic_of_init(struct device_node *node, struct device_node *parent)
+static int __init
+gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *cpu_base;
void __iomem *dist_base;
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 2cb7cd0bc2f5..1c3e2c9b46ba 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -22,7 +22,7 @@
#include <linux/of_irq.h>
#include <asm/exception.h>
-#include <asm/mach/irq.h>
+#include <asm/hardirq.h>
#include "irqchip.h"
@@ -194,8 +194,7 @@ static struct mmp_intc_conf mmp2_conf = {
.conf_mask = 0x7f,
};
-static asmlinkage void __exception_irq_entry
-mmp_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
{
int irq, hwirq;
@@ -207,8 +206,7 @@ mmp_handle_irq(struct pt_regs *regs)
handle_IRQ(irq, regs);
}
-static asmlinkage void __exception_irq_entry
-mmp2_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
{
int irq, hwirq;
diff --git a/drivers/irqchip/irq-moxart.c b/drivers/irqchip/irq-moxart.c
index 5552fc2bf28a..00b3cc908f76 100644
--- a/drivers/irqchip/irq-moxart.c
+++ b/drivers/irqchip/irq-moxart.c
@@ -44,7 +44,7 @@ struct moxart_irq_data {
static struct moxart_irq_data intc;
-static asmlinkage void __exception_irq_entry handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry handle_irq(struct pt_regs *regs)
{
u32 irqstat;
int hwirq;
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index 8e41be62812e..e25f246cd2fb 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -30,7 +30,7 @@
static struct irq_domain *orion_irq_domain;
-static asmlinkage void
+static void
__exception_irq_entry orion_handle_irq(struct pt_regs *regs)
{
struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
index 3a070c587ed9..581eefe331ae 100644
--- a/drivers/irqchip/irq-sirfsoc.c
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -47,7 +47,7 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
}
-static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
{
void __iomem *base = sirfsoc_irqdomain->host_data;
u32 irqstat, irqnr;
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index a5438d889245..6fcef4a95a18 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -36,18 +36,16 @@
static void __iomem *sun4i_irq_base;
static struct irq_domain *sun4i_irq_domain;
-static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs);
+static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs);
static void sun4i_irq_ack(struct irq_data *irqd)
{
unsigned int irq = irqd_to_hwirq(irqd);
- unsigned int irq_off = irq % 32;
- int reg = irq / 32;
- u32 val;
- val = readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(reg));
- writel(val | (1 << irq_off),
- sun4i_irq_base + SUN4I_IRQ_PENDING_REG(reg));
+ if (irq != 0)
+ return; /* Only IRQ 0 / the ENMI needs to be acked */
+
+ writel(BIT(0), sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0));
}
static void sun4i_irq_mask(struct irq_data *irqd)
@@ -76,16 +74,16 @@ static void sun4i_irq_unmask(struct irq_data *irqd)
static struct irq_chip sun4i_irq_chip = {
.name = "sun4i_irq",
- .irq_ack = sun4i_irq_ack,
+ .irq_eoi = sun4i_irq_ack,
.irq_mask = sun4i_irq_mask,
.irq_unmask = sun4i_irq_unmask,
+ .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
};
static int sun4i_irq_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw)
{
- irq_set_chip_and_handler(virq, &sun4i_irq_chip,
- handle_level_irq);
+ irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq);
set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
return 0;
@@ -109,7 +107,7 @@ static int __init sun4i_of_init(struct device_node *node,
writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(1));
writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(2));
- /* Mask all the interrupts */
+ /* Unmask all the interrupts, ENABLE_REG(x) is used for masking */
writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(0));
writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(1));
writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(2));
@@ -134,16 +132,30 @@ static int __init sun4i_of_init(struct device_node *node,
return 0;
}
-IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-ic", sun4i_of_init);
+IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-a10-ic", sun4i_of_init);
-static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
{
u32 irq, hwirq;
+ /*
+ * hwirq == 0 can mean one of 3 things:
+ * 1) no more irqs pending
+ * 2) irq 0 pending
+ * 3) spurious irq
+ * So if we immediately get a reading of 0, check the irq-pending reg
+ * to differentiate between 2 and 3. We only do this once to avoid
+ * the extra check in the common case of 1 hapening after having
+ * read the vector-reg once.
+ */
hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
- while (hwirq != 0) {
+ if (hwirq == 0 &&
+ !(readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0)) & BIT(0)))
+ return;
+
+ do {
irq = irq_find_mapping(sun4i_irq_domain, hwirq);
handle_IRQ(irq, regs);
hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
- }
+ } while (hwirq != 0);
}
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
new file mode 100644
index 000000000000..12f547a44ae4
--- /dev/null
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -0,0 +1,208 @@
+/*
+ * Allwinner A20/A31 SoCs NMI IRQ chip driver.
+ *
+ * Carlo Caione <carlo.caione@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/irqchip/chained_irq.h>
+#include "irqchip.h"
+
+#define SUNXI_NMI_SRC_TYPE_MASK 0x00000003
+
+enum {
+ SUNXI_SRC_TYPE_LEVEL_LOW = 0,
+ SUNXI_SRC_TYPE_EDGE_FALLING,
+ SUNXI_SRC_TYPE_LEVEL_HIGH,
+ SUNXI_SRC_TYPE_EDGE_RISING,
+};
+
+struct sunxi_sc_nmi_reg_offs {
+ u32 ctrl;
+ u32 pend;
+ u32 enable;
+};
+
+static struct sunxi_sc_nmi_reg_offs sun7i_reg_offs = {
+ .ctrl = 0x00,
+ .pend = 0x04,
+ .enable = 0x08,
+};
+
+static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = {
+ .ctrl = 0x00,
+ .pend = 0x04,
+ .enable = 0x34,
+};
+
+static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
+ u32 val)
+{
+ irq_reg_writel(val, gc->reg_base + off);
+}
+
+static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
+{
+ return irq_reg_readl(gc->reg_base + off);
+}
+
+static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned int virq = irq_find_mapping(domain, 0);
+
+ chained_irq_enter(chip, desc);
+ generic_handle_irq(virq);
+ chained_irq_exit(chip, desc);
+}
+
+static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct irq_chip_type *ct = gc->chip_types;
+ u32 src_type_reg;
+ u32 ctrl_off = ct->regs.type;
+ unsigned int src_type;
+ unsigned int i;
+
+ irq_gc_lock(gc);
+
+ switch (flow_type & IRQF_TRIGGER_MASK) {
+ case IRQ_TYPE_EDGE_FALLING:
+ src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ src_type = SUNXI_SRC_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_NONE:
+ case IRQ_TYPE_LEVEL_LOW:
+ src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
+ break;
+ default:
+ irq_gc_unlock(gc);
+ pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
+ __func__, data->irq);
+ return -EBADR;
+ }
+
+ irqd_set_trigger_type(data, flow_type);
+ irq_setup_alt_chip(data, flow_type);
+
+ for (i = 0; i <= gc->num_ct; i++, ct++)
+ if (ct->type & flow_type)
+ ctrl_off = ct->regs.type;
+
+ src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
+ src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
+ src_type_reg |= src_type;
+ sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
+
+ irq_gc_unlock(gc);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
+ struct sunxi_sc_nmi_reg_offs *reg_offs)
+{
+ struct irq_domain *domain;
+ struct irq_chip_generic *gc;
+ unsigned int irq;
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ int ret;
+
+
+ domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("%s: Could not register interrupt domain.\n", node->name);
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, 1, 2, node->name,
+ handle_fasteoi_irq, clr, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("%s: Could not allocate generic interrupt chip.\n",
+ node->name);
+ goto fail_irqd_remove;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ pr_err("%s: unable to parse irq\n", node->name);
+ ret = -EINVAL;
+ goto fail_irqd_remove;
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = of_iomap(node, 0);
+ if (!gc->reg_base) {
+ pr_err("%s: unable to map resource\n", node->name);
+ ret = -ENOMEM;
+ goto fail_irqd_remove;
+ }
+
+ gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
+ gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
+ gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
+ gc->chip_types[0].regs.ack = reg_offs->pend;
+ gc->chip_types[0].regs.mask = reg_offs->enable;
+ gc->chip_types[0].regs.type = reg_offs->ctrl;
+
+ gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
+ gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
+ gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type;
+ gc->chip_types[1].regs.ack = reg_offs->pend;
+ gc->chip_types[1].regs.mask = reg_offs->enable;
+ gc->chip_types[1].regs.type = reg_offs->ctrl;
+ gc->chip_types[1].handler = handle_edge_irq;
+
+ sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
+ sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1);
+
+ irq_set_handler_data(irq, domain);
+ irq_set_chained_handler(irq, sunxi_sc_nmi_handle_irq);
+
+ return 0;
+
+fail_irqd_remove:
+ irq_domain_remove(domain);
+
+ return ret;
+}
+
+static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
+}
+IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
+
+static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
+}
+IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 8e21ae0bab46..37dab0b472cd 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -57,6 +57,7 @@
/**
* struct vic_device - VIC PM device
+ * @parent_irq: The parent IRQ number of the VIC if cascaded, or 0.
* @irq: The IRQ number for the base of the VIC.
* @base: The register base for the VIC.
* @valid_sources: A bitmask of valid interrupts
@@ -224,11 +225,22 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
return handled;
}
+static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc)
+{
+ u32 stat, hwirq;
+ struct vic_device *vic = irq_desc_get_handler_data(desc);
+
+ while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
+ hwirq = ffs(stat) - 1;
+ generic_handle_irq(irq_find_mapping(vic->domain, hwirq));
+ }
+}
+
/*
* Keep iterating over all registered VIC's until there are no pending
* interrupts.
*/
-static asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
{
int i, handled;
@@ -246,6 +258,7 @@ static struct irq_domain_ops vic_irqdomain_ops = {
/**
* vic_register() - Register a VIC.
* @base: The base address of the VIC.
+ * @parent_irq: The parent IRQ if cascaded, else 0.
* @irq: The base IRQ for the VIC.
* @valid_sources: bitmask of valid interrupts
* @resume_sources: bitmask of interrupts allowed for resume sources.
@@ -257,7 +270,8 @@ static struct irq_domain_ops vic_irqdomain_ops = {
*
* This also configures the IRQ domain for the VIC.
*/
-static void __init vic_register(void __iomem *base, unsigned int irq,
+static void __init vic_register(void __iomem *base, unsigned int parent_irq,
+ unsigned int irq,
u32 valid_sources, u32 resume_sources,
struct device_node *node)
{
@@ -273,15 +287,25 @@ static void __init vic_register(void __iomem *base, unsigned int irq,
v->base = base;
v->valid_sources = valid_sources;
v->resume_sources = resume_sources;
- v->irq = irq;
set_handle_irq(vic_handle_irq);
vic_id++;
+
+ if (parent_irq) {
+ irq_set_handler_data(parent_irq, v);
+ irq_set_chained_handler(parent_irq, vic_handle_irq_cascaded);
+ }
+
v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
&vic_irqdomain_ops, v);
/* create an IRQ mapping for each valid IRQ */
for (i = 0; i < fls(valid_sources); i++)
if (valid_sources & (1 << i))
irq_create_mapping(v->domain, i);
+ /* If no base IRQ was passed, figure out our allocated base */
+ if (irq)
+ v->irq = irq;
+ else
+ v->irq = irq_find_mapping(v->domain, 0);
}
static void vic_ack_irq(struct irq_data *d)
@@ -409,10 +433,10 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
}
- vic_register(base, irq_start, vic_sources, 0, node);
+ vic_register(base, 0, irq_start, vic_sources, 0, node);
}
-void __init __vic_init(void __iomem *base, int irq_start,
+void __init __vic_init(void __iomem *base, int parent_irq, int irq_start,
u32 vic_sources, u32 resume_sources,
struct device_node *node)
{
@@ -449,7 +473,7 @@ void __init __vic_init(void __iomem *base, int irq_start,
vic_init2(base);
- vic_register(base, irq_start, vic_sources, resume_sources, node);
+ vic_register(base, parent_irq, irq_start, vic_sources, resume_sources, node);
}
/**
@@ -462,8 +486,30 @@ void __init __vic_init(void __iomem *base, int irq_start,
void __init vic_init(void __iomem *base, unsigned int irq_start,
u32 vic_sources, u32 resume_sources)
{
- __vic_init(base, irq_start, vic_sources, resume_sources, NULL);
+ __vic_init(base, 0, irq_start, vic_sources, resume_sources, NULL);
+}
+
+/**
+ * vic_init_cascaded() - initialise a cascaded vectored interrupt controller
+ * @base: iomem base address
+ * @parent_irq: the parent IRQ we're cascaded off
+ * @irq_start: starting interrupt number, must be muliple of 32
+ * @vic_sources: bitmask of interrupt sources to allow
+ * @resume_sources: bitmask of interrupt sources to allow for resume
+ *
+ * This returns the base for the new interrupts or negative on error.
+ */
+int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
+ u32 vic_sources, u32 resume_sources)
+{
+ struct vic_device *v;
+
+ v = &vic_devices[vic_id];
+ __vic_init(base, parent_irq, 0, vic_sources, resume_sources, NULL);
+ /* Return out acquired base */
+ return v->irq;
}
+EXPORT_SYMBOL_GPL(vic_init_cascaded);
#ifdef CONFIG_OF
int __init vic_of_init(struct device_node *node, struct device_node *parent)
@@ -485,7 +531,7 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent)
/*
* Passing 0 as first IRQ makes the simple domain allocate descriptors
*/
- __vic_init(regs, 0, interrupt_mask, wakeup_mask, node);
+ __vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node);
return 0;
}
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
index 1846e7d66681..eb6e91efdec8 100644
--- a/drivers/irqchip/irq-vt8500.c
+++ b/drivers/irqchip/irq-vt8500.c
@@ -178,8 +178,7 @@ static struct irq_domain_ops vt8500_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static asmlinkage
-void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
{
u32 stat, i;
int irqnr, virq;
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index f693f1bc1348..e1c2f9632893 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -122,7 +122,7 @@ static int xtensa_mx_irq_retrigger(struct irq_data *d)
static int xtensa_mx_irq_set_affinity(struct irq_data *d,
const struct cpumask *dest, bool force)
{
- unsigned mask = 1u << cpumask_any(dest);
+ unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask);
set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
return 0;
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
index 8ed04c4a43ee..ceb3a4318f73 100644
--- a/drivers/irqchip/irq-zevio.c
+++ b/drivers/irqchip/irq-zevio.c
@@ -50,7 +50,7 @@ static void zevio_irq_ack(struct irq_data *irqd)
readl(gc->reg_base + regs->ack);
}
-static asmlinkage void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
{
int irqnr;
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index f496afce29de..cad3e2495552 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -10,8 +10,7 @@
#include <linux/init.h>
#include <linux/of_irq.h>
-
-#include "irqchip.h"
+#include <linux/irqchip.h>
/*
* This special of_device_id is the sentinel at the end of the
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c
index b4147c0b14b7..c3a1b061838d 100644
--- a/drivers/isdn/act2000/module.c
+++ b/drivers/isdn/act2000/module.c
@@ -796,7 +796,7 @@ static void __exit act2000_exit(void)
act2000_card *last;
while (card) {
unregister_card(card);
- del_timer(&card->ptimer);
+ del_timer_sync(&card->ptimer);
card = card->next;
}
card = cards;
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index fb4f1bac0133..1c5dc345e7c5 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -86,12 +86,13 @@ isdn_divert_read(struct file *file, char __user *buf, size_t count, loff_t *off)
struct divert_info *inf;
int len;
- if (!*((struct divert_info **) file->private_data)) {
+ if (!(inf = *((struct divert_info **) file->private_data))) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
- interruptible_sleep_on(&(rd_queue));
+ wait_event_interruptible(rd_queue, (inf =
+ *((struct divert_info **) file->private_data)));
}
- if (!(inf = *((struct divert_info **) file->private_data)))
+ if (!inf)
return (0);
inf->usage_cnt--; /* new usage count */
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 2be1c8a3bb5f..d8ef64da26f1 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -509,7 +509,8 @@ static void
set_arcofi(struct IsdnCardState *cs, int bc) {
cs->dc.isac.arcofi_bc = bc;
arcofi_fsm(cs, ARCOFI_START, &ARCOFI_COP_5);
- interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
+ wait_event_interruptible(cs->dc.isac.arcofi_wait,
+ cs->dc.isac.arcofi_state == ARCOFI_NOP);
}
static int
@@ -528,7 +529,8 @@ check_arcofi(struct IsdnCardState *cs)
}
cs->dc.isac.arcofi_bc = 0;
arcofi_fsm(cs, ARCOFI_START, &ARCOFI_VERSION);
- interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
+ wait_event_interruptible(cs->dc.isac.arcofi_wait,
+ cs->dc.isac.arcofi_state == ARCOFI_NOP);
if (!test_and_clear_bit(FLG_ARCOFI_ERROR, &cs->HW_Flags)) {
debugl1(cs, "Arcofi response received %d bytes", cs->dc.isac.mon_rxp);
p = cs->dc.isac.mon_rx;
@@ -595,7 +597,8 @@ check_arcofi(struct IsdnCardState *cs)
Elsa_Types[cs->subtyp],
cs->hw.elsa.base + 8);
arcofi_fsm(cs, ARCOFI_START, &ARCOFI_XOP_0);
- interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
+ wait_event_interruptible(cs->dc.isac.arcofi_wait,
+ cs->dc.isac.arcofi_state == ARCOFI_NOP);
return (1);
}
return (0);
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c
index 3f84dd8f1757..a2a358c1dc8e 100644
--- a/drivers/isdn/hisax/elsa_ser.c
+++ b/drivers/isdn/hisax/elsa_ser.c
@@ -573,7 +573,8 @@ modem_l2l1(struct PStack *st, int pr, void *arg)
test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
bcs->cs->dc.isac.arcofi_bc = st->l1.bc;
arcofi_fsm(bcs->cs, ARCOFI_START, &ARCOFI_XOP_0);
- interruptible_sleep_on(&bcs->cs->dc.isac.arcofi_wait);
+ wait_event_interruptible(bcs->cs->dc.isac.arcofi_wait,
+ bcs->cs->dc.isac.arcofi_state == ARCOFI_NOP);
bcs->cs->hw.elsa.MFlag = 1;
} else {
printk(KERN_WARNING "ElsaSer: unknown pr %x\n", pr);
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index b61e8d5e84ad..7b5fd8fb1761 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -175,14 +175,15 @@ hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
int len;
hysdn_card *card = PDE_DATA(file_inode(file));
- if (!*((struct log_data **) file->private_data)) {
+ if (!(inf = *((struct log_data **) file->private_data))) {
struct procdata *pd = card->proclog;
if (file->f_flags & O_NONBLOCK)
return (-EAGAIN);
- interruptible_sleep_on(&(pd->rd_queue));
+ wait_event_interruptible(pd->rd_queue, (inf =
+ *((struct log_data **) file->private_data)));
}
- if (!(inf = *((struct log_data **) file->private_data)))
+ if (!inf)
return (0);
inf->usage_cnt--; /* new usage count */
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 9bb12ba3191f..9b856e1890d1 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -777,7 +777,8 @@ isdn_readbchan(int di, int channel, u_char *buf, u_char *fp, int len, wait_queue
return 0;
if (skb_queue_empty(&dev->drv[di]->rpqueue[channel])) {
if (sleep)
- interruptible_sleep_on(sleep);
+ wait_event_interruptible(*sleep,
+ !skb_queue_empty(&dev->drv[di]->rpqueue[channel]));
else
return 0;
}
@@ -1072,7 +1073,8 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off)
retval = -EAGAIN;
goto out;
}
- interruptible_sleep_on(&(dev->info_waitq));
+ wait_event_interruptible(dev->info_waitq,
+ file->private_data);
}
p = isdn_statstr();
file->private_data = NULL;
@@ -1128,7 +1130,8 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off)
retval = -EAGAIN;
goto out;
}
- interruptible_sleep_on(&(dev->drv[drvidx]->st_waitq));
+ wait_event_interruptible(dev->drv[drvidx]->st_waitq,
+ dev->drv[drvidx]->stavail);
}
if (dev->drv[drvidx]->interface->readstat) {
if (count > dev->drv[drvidx]->stavail)
@@ -1188,8 +1191,8 @@ isdn_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
goto out;
}
chidx = isdn_minor2chan(minor);
- while ((retval = isdn_writebuf_stub(drvidx, chidx, buf, count)) == 0)
- interruptible_sleep_on(&dev->drv[drvidx]->snd_waitq[chidx]);
+ wait_event_interruptible(dev->drv[drvidx]->snd_waitq[chidx],
+ (retval = isdn_writebuf_stub(drvidx, chidx, buf, count)));
goto out;
}
if (minor <= ISDN_MINOR_CTRLMAX) {
@@ -2378,7 +2381,7 @@ static void __exit isdn_exit(void)
}
isdn_tty_exit();
unregister_chrdev(ISDN_MAJOR, "isdn");
- del_timer(&dev->timer);
+ del_timer_sync(&dev->timer);
/* call vfree with interrupts enabled, else it will hang */
vfree(dev);
printk(KERN_NOTICE "ISDN-subsystem unloaded\n");
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 38ceac5053a0..a5da511e3c9a 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -378,10 +378,15 @@ isdn_ppp_release(int min, struct file *file)
is->slcomp = NULL;
#endif
#ifdef CONFIG_IPPP_FILTER
- kfree(is->pass_filter);
- is->pass_filter = NULL;
- kfree(is->active_filter);
- is->active_filter = NULL;
+ if (is->pass_filter) {
+ sk_unattached_filter_destroy(is->pass_filter);
+ is->pass_filter = NULL;
+ }
+
+ if (is->active_filter) {
+ sk_unattached_filter_destroy(is->active_filter);
+ is->active_filter = NULL;
+ }
#endif
/* TODO: if this was the previous master: link the stuff to the new master */
@@ -629,25 +634,41 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_IPPP_FILTER
case PPPIOCSPASS:
{
+ struct sock_fprog fprog;
struct sock_filter *code;
- int len = get_filter(argp, &code);
+ int err, len = get_filter(argp, &code);
+
if (len < 0)
return len;
- kfree(is->pass_filter);
- is->pass_filter = code;
- is->pass_len = len;
- break;
+
+ fprog.len = len;
+ fprog.filter = code;
+
+ if (is->pass_filter)
+ sk_unattached_filter_destroy(is->pass_filter);
+ err = sk_unattached_filter_create(&is->pass_filter, &fprog);
+ kfree(code);
+
+ return err;
}
case PPPIOCSACTIVE:
{
+ struct sock_fprog fprog;
struct sock_filter *code;
- int len = get_filter(argp, &code);
+ int err, len = get_filter(argp, &code);
+
if (len < 0)
return len;
- kfree(is->active_filter);
- is->active_filter = code;
- is->active_len = len;
- break;
+
+ fprog.len = len;
+ fprog.filter = code;
+
+ if (is->active_filter)
+ sk_unattached_filter_destroy(is->active_filter);
+ err = sk_unattached_filter_create(&is->active_filter, &fprog);
+ kfree(code);
+
+ return err;
}
#endif /* CONFIG_IPPP_FILTER */
default:
@@ -1147,14 +1168,14 @@ isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *
}
if (is->pass_filter
- && sk_run_filter(skb, is->pass_filter) == 0) {
+ && SK_RUN_FILTER(is->pass_filter, skb) == 0) {
if (is->debug & 0x2)
printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
kfree_skb(skb);
return;
}
if (!(is->active_filter
- && sk_run_filter(skb, is->active_filter) == 0)) {
+ && SK_RUN_FILTER(is->active_filter, skb) == 0)) {
if (is->debug & 0x2)
printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
lp->huptimer = 0;
@@ -1293,14 +1314,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
}
if (ipt->pass_filter
- && sk_run_filter(skb, ipt->pass_filter) == 0) {
+ && SK_RUN_FILTER(ipt->pass_filter, skb) == 0) {
if (ipt->debug & 0x4)
printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
kfree_skb(skb);
goto unlock;
}
if (!(ipt->active_filter
- && sk_run_filter(skb, ipt->active_filter) == 0)) {
+ && SK_RUN_FILTER(ipt->active_filter, skb) == 0)) {
if (ipt->debug & 0x4)
printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
lp->huptimer = 0;
@@ -1490,9 +1511,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
}
drop |= is->pass_filter
- && sk_run_filter(skb, is->pass_filter) == 0;
+ && SK_RUN_FILTER(is->pass_filter, skb) == 0;
drop |= is->active_filter
- && sk_run_filter(skb, is->active_filter) == 0;
+ && SK_RUN_FILTER(is->active_filter, skb) == 0;
skb_push(skb, IPPP_MAX_HEADER - 4);
return drop;
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index 02125e6a9109..5a4da94aefb0 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -518,9 +518,9 @@ static isdnloop_stat isdnloop_cmd_table[] =
static void
isdnloop_fake_err(isdnloop_card *card)
{
- char buf[60];
+ char buf[64];
- sprintf(buf, "E%s", card->omsg);
+ snprintf(buf, sizeof(buf), "E%s", card->omsg);
isdnloop_fake(card, buf, -1);
isdnloop_fake(card, "NAK", -1);
}
@@ -903,6 +903,8 @@ isdnloop_parse_cmd(isdnloop_card *card)
case 7:
/* 0x;EAZ */
p += 3;
+ if (strlen(p) >= sizeof(card->eazlist[0]))
+ break;
strcpy(card->eazlist[ch - 1], p);
break;
case 8:
@@ -1070,6 +1072,12 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
return -EBUSY;
if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
return -EFAULT;
+
+ for (i = 0; i < 3; i++) {
+ if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&card->isdnloop_lock, flags);
switch (sdef.ptype) {
case ISDN_PTYPE_EURO:
@@ -1127,7 +1135,7 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
{
ulong a;
int i;
- char cbuf[60];
+ char cbuf[80];
isdn_ctrl cmd;
isdnloop_cdef cdef;
@@ -1192,7 +1200,6 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
break;
if ((c->arg & 255) < ISDNLOOP_BCH) {
char *p;
- char dial[50];
char dcode[4];
a = c->arg;
@@ -1204,10 +1211,10 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
} else
/* Normal Dial */
strcpy(dcode, "CAL");
- strcpy(dial, p);
- sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
- dcode, dial, c->parm.setup.si1,
- c->parm.setup.si2, c->parm.setup.eazmsn);
+ snprintf(cbuf, sizeof(cbuf),
+ "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
+ dcode, p, c->parm.setup.si1,
+ c->parm.setup.si2, c->parm.setup.eazmsn);
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c
index 1eaf62273903..f02cc506fbfa 100644
--- a/drivers/isdn/pcbit/drv.c
+++ b/drivers/isdn/pcbit/drv.c
@@ -796,6 +796,7 @@ static void set_running_timeout(unsigned long ptr)
#endif
dev = (struct pcbit_dev *) ptr;
+ dev->l2_state = L2_DOWN;
wake_up_interruptible(&dev->set_running_wq);
}
@@ -818,7 +819,8 @@ static int set_protocol_running(struct pcbit_dev *dev)
add_timer(&dev->set_running_timer);
- interruptible_sleep_on(&dev->set_running_wq);
+ wait_event(dev->set_running_wq, dev->l2_state == L2_RUNNING ||
+ dev->l2_state == L2_DOWN);
del_timer(&dev->set_running_timer);
@@ -842,8 +844,6 @@ static int set_protocol_running(struct pcbit_dev *dev)
printk(KERN_DEBUG "pcbit: initialization failed\n");
printk(KERN_DEBUG "pcbit: firmware not loaded\n");
- dev->l2_state = L2_DOWN;
-
#ifdef DEBUG
printk(KERN_DEBUG "Bank3 = %02x\n",
readb(dev->sh_mem + BANK3));
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index 92acc81f844d..d6f19b168e8a 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -390,8 +390,8 @@ static void __exit sc_exit(void)
/*
* kill the timers
*/
- del_timer(&(sc_adapter[i]->reset_timer));
- del_timer(&(sc_adapter[i]->stat_timer));
+ del_timer_sync(&(sc_adapter[i]->reset_timer));
+ del_timer_sync(&(sc_adapter[i]->stat_timer));
/*
* Tell I4L we're toast
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 72156c123033..6de9dfbf61c1 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -416,12 +416,12 @@ config LEDS_MC13783
depends on MFD_MC13XXX
help
This option enable support for on-chip LED drivers found
- on Freescale Semiconductor MC13783/MC13892 PMIC.
+ on Freescale Semiconductor MC13783/MC13892/MC34708 PMIC.
config LEDS_NS2
tristate "LED support for Network Space v2 GPIO LEDs"
depends on LEDS_CLASS
- depends on ARCH_KIRKWOOD
+ depends on ARCH_KIRKWOOD || MACH_KIRKWOOD
default y
help
This option enable support for the dual-GPIO LED found on the
@@ -431,7 +431,7 @@ config LEDS_NS2
config LEDS_NETXBIG
tristate "LED support for Big Network series LEDs"
depends on LEDS_CLASS
- depends on ARCH_KIRKWOOD
+ depends on ARCH_KIRKWOOD || MACH_KIRKWOOD
default y
help
This option enable support for LEDs found on the LaCie 2Big
@@ -474,7 +474,7 @@ config LEDS_LM355x
config LEDS_OT200
tristate "LED support for the Bachmann OT200"
- depends on LEDS_CLASS && HAS_IOMEM
+ depends on LEDS_CLASS && HAS_IOMEM && (X86_32 || COMPILE_TEST)
help
This option enables support for the LEDs on the Bachmann OT200.
Say Y to enable LEDs on the Bachmann OT200.
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index ce8921a753a3..71b40d3bf776 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -39,9 +39,11 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
led_cdev->blink_delay_on = delay_on;
led_cdev->blink_delay_off = delay_off;
- /* never on - don't blink */
- if (!delay_on)
+ /* never on - just set to off */
+ if (!delay_on) {
+ __led_set_brightness(led_cdev, LED_OFF);
return;
+ }
/* never off - just set to brightness */
if (!delay_off) {
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index e387f41a9cb7..c3734f10fdd5 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/device.h>
@@ -220,9 +219,12 @@ void led_trigger_unregister(struct led_trigger *trig)
{
struct led_classdev *led_cdev;
+ if (list_empty_careful(&trig->next_trig))
+ return;
+
/* Remove from the list of led triggers */
down_write(&triggers_list_lock);
- list_del(&trig->next_trig);
+ list_del_init(&trig->next_trig);
up_write(&triggers_list_lock);
/* Remove anyone actively using this trigger */
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 5f588c0a376e..d1e1bca90d11 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -11,7 +11,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
index 7e311a120b11..86b5bdb0c773 100644
--- a/drivers/leds/leds-adp5520.c
+++ b/drivers/leds/leds-adp5520.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/workqueue.h>
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
index 6de216a89a0c..70c74a7f0dfe 100644
--- a/drivers/leds/leds-asic3.c
+++ b/drivers/leds/leds-asic3.c
@@ -7,7 +7,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/slab.h>
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index 66d0a57db221..d0452b099aee 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -18,7 +18,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
@@ -444,7 +443,7 @@ static void led_work(struct work_struct *work)
{
int ret;
struct blinkm_led *led;
- struct blinkm_data *data ;
+ struct blinkm_data *data;
struct blinkm_work *blm_work = work_to_blmwork(work);
led = blm_work->blinkm_led;
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index d93e2455da5c..f58a354428e3 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -19,7 +19,7 @@ MODULE_AUTHOR("Márton Németh <nm127@freemail.hu>");
MODULE_DESCRIPTION("Clevo mail LED driver");
MODULE_LICENSE("GPL");
-static bool __initdata nodetect;
+static bool nodetect;
module_param_named(nodetect, nodetect, bool, 0);
MODULE_PARM_DESC(nodetect, "Skip DMI hardware detection");
@@ -153,7 +153,7 @@ static struct led_classdev clevo_mail_led = {
.flags = LED_CORE_SUSPENDRESUME,
};
-static int clevo_mail_led_probe(struct platform_device *pdev)
+static int __init clevo_mail_led_probe(struct platform_device *pdev)
{
return led_classdev_register(&pdev->dev, &clevo_mail_led);
}
@@ -165,7 +165,6 @@ static int clevo_mail_led_remove(struct platform_device *pdev)
}
static struct platform_driver clevo_mail_led_driver = {
- .probe = clevo_mail_led_probe,
.remove = clevo_mail_led_remove,
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index 8abcb66db01c..910339d86edf 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -3,7 +3,6 @@
*
* Control the Cobalt Qube/RaQ front LED
*/
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/leds.h>
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index 2a4b87f8091a..35dffb100388 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/workqueue.h>
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
index 865d4faf874a..01486adc7f8b 100644
--- a/drivers/leds/leds-da9052.c
+++ b/drivers/leds/leds-da9052.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/workqueue.h>
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index b4d5a44cc41b..2b4dc738dcd6 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -16,7 +16,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/module.h>
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 78b0e273a903..57ff20fecf57 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -11,7 +11,6 @@
*
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/leds.h>
@@ -204,6 +203,9 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
led.default_state = LEDS_GPIO_DEFSTATE_OFF;
}
+ if (of_get_property(child, "retain-state-suspended", NULL))
+ led.retain_state_suspended = 1;
+
ret = create_gpio_led(&led, &priv->leds[priv->num_leds++],
&pdev->dev, NULL);
if (ret < 0) {
@@ -224,6 +226,8 @@ static const struct of_device_id of_gpio_leds_match[] = {
{ .compatible = "gpio-leds", },
{},
};
+
+MODULE_DEVICE_TABLE(of, of_gpio_leds_match);
#else /* CONFIG_OF_GPIO */
static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
{
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index 366b6055e330..d61a98896c71 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <asm/hd64461.h>
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index 027ede73b80d..e2c642c1169b 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/leds.h>
#include <linux/mfd/core.h>
#include <linux/mutex.h>
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 2ec34cfcedce..8ca197af2864 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 4ade66a2d9d4..cb5ed82994ba 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index bf006f4e44a0..ca85724ab138 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -13,7 +13,6 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -347,9 +346,9 @@ static void lp5562_write_program_memory(struct lp55xx_chip *chip,
/* check the size of program count */
static inline bool _is_pc_overflow(struct lp55xx_predef_pattern *ptn)
{
- return (ptn->size_r >= LP5562_PROGRAM_LENGTH ||
- ptn->size_g >= LP5562_PROGRAM_LENGTH ||
- ptn->size_b >= LP5562_PROGRAM_LENGTH);
+ return ptn->size_r >= LP5562_PROGRAM_LENGTH ||
+ ptn->size_g >= LP5562_PROGRAM_LENGTH ||
+ ptn->size_b >= LP5562_PROGRAM_LENGTH;
}
static int lp5562_run_predef_led_pattern(struct lp55xx_chip *chip, int mode)
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 3417e5be7b57..059f5b1f3553 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -17,7 +17,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/workqueue.h>
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index ca87a1b4a0db..f1db88e25138 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -1,5 +1,5 @@
/*
- * LEDs driver for Freescale MC13783/MC13892
+ * LEDs driver for Freescale MC13783/MC13892/MC34708
*
* Copyright (C) 2010 Philippe Rétornaz
*
@@ -17,57 +17,56 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
+#include <linux/of.h>
#include <linux/workqueue.h>
#include <linux/mfd/mc13xxx.h>
-#define MC13XXX_REG_LED_CONTROL(x) (51 + (x))
-
struct mc13xxx_led_devtype {
int led_min;
int led_max;
int num_regs;
+ u32 ledctrl_base;
};
struct mc13xxx_led {
struct led_classdev cdev;
struct work_struct work;
- struct mc13xxx *master;
enum led_brightness new_brightness;
int id;
+ struct mc13xxx_leds *leds;
};
struct mc13xxx_leds {
+ struct mc13xxx *master;
struct mc13xxx_led_devtype *devtype;
int num_leds;
- struct mc13xxx_led led[0];
+ struct mc13xxx_led *led;
};
+static unsigned int mc13xxx_max_brightness(int id)
+{
+ if (id >= MC13783_LED_MD && id <= MC13783_LED_KP)
+ return 0x0f;
+ else if (id >= MC13783_LED_R1 && id <= MC13783_LED_B3)
+ return 0x1f;
+
+ return 0x3f;
+}
+
static void mc13xxx_led_work(struct work_struct *work)
{
struct mc13xxx_led *led = container_of(work, struct mc13xxx_led, work);
- int reg, mask, value, bank, off, shift;
+ struct mc13xxx_leds *leds = led->leds;
+ unsigned int reg, bank, off, shift;
switch (led->id) {
case MC13783_LED_MD:
- reg = MC13XXX_REG_LED_CONTROL(2);
- shift = 9;
- mask = 0x0f;
- value = led->new_brightness >> 4;
- break;
case MC13783_LED_AD:
- reg = MC13XXX_REG_LED_CONTROL(2);
- shift = 13;
- mask = 0x0f;
- value = led->new_brightness >> 4;
- break;
case MC13783_LED_KP:
- reg = MC13XXX_REG_LED_CONTROL(2);
- shift = 17;
- mask = 0x0f;
- value = led->new_brightness >> 4;
+ reg = 2;
+ shift = 9 + (led->id - MC13783_LED_MD) * 4;
break;
case MC13783_LED_R1:
case MC13783_LED_G1:
@@ -80,44 +79,35 @@ static void mc13xxx_led_work(struct work_struct *work)
case MC13783_LED_B3:
off = led->id - MC13783_LED_R1;
bank = off / 3;
- reg = MC13XXX_REG_LED_CONTROL(3) + bank;
+ reg = 3 + bank;
shift = (off - bank * 3) * 5 + 6;
- value = led->new_brightness >> 3;
- mask = 0x1f;
break;
case MC13892_LED_MD:
- reg = MC13XXX_REG_LED_CONTROL(0);
- shift = 3;
- mask = 0x3f;
- value = led->new_brightness >> 2;
- break;
case MC13892_LED_AD:
- reg = MC13XXX_REG_LED_CONTROL(0);
- shift = 15;
- mask = 0x3f;
- value = led->new_brightness >> 2;
- break;
case MC13892_LED_KP:
- reg = MC13XXX_REG_LED_CONTROL(1);
- shift = 3;
- mask = 0x3f;
- value = led->new_brightness >> 2;
+ reg = (led->id - MC13892_LED_MD) / 2;
+ shift = 3 + (led->id - MC13892_LED_MD) * 12;
break;
case MC13892_LED_R:
case MC13892_LED_G:
case MC13892_LED_B:
off = led->id - MC13892_LED_R;
bank = off / 2;
- reg = MC13XXX_REG_LED_CONTROL(2) + bank;
+ reg = 2 + bank;
shift = (off - bank * 2) * 12 + 3;
- value = led->new_brightness >> 2;
- mask = 0x3f;
+ break;
+ case MC34708_LED_R:
+ case MC34708_LED_G:
+ reg = 0;
+ shift = 3 + (led->id - MC34708_LED_R) * 12;
break;
default:
BUG();
}
- mc13xxx_reg_rmw(led->master, reg, mask << shift, value << shift);
+ mc13xxx_reg_rmw(leds->master, leds->devtype->ledctrl_base + reg,
+ mc13xxx_max_brightness(led->id) << shift,
+ led->new_brightness << shift);
}
static void mc13xxx_led_set(struct led_classdev *led_cdev,
@@ -130,47 +120,121 @@ static void mc13xxx_led_set(struct led_classdev *led_cdev,
schedule_work(&led->work);
}
-static int __init mc13xxx_led_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
+ struct platform_device *pdev)
{
- struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct mc13xxx *mcdev = dev_get_drvdata(pdev->dev.parent);
- struct mc13xxx_led_devtype *devtype =
- (struct mc13xxx_led_devtype *)pdev->id_entry->driver_data;
- struct mc13xxx_leds *leds;
- int i, id, num_leds, ret = -ENODATA;
- u32 reg, init_led = 0;
+ struct mc13xxx_leds *leds = platform_get_drvdata(pdev);
+ struct mc13xxx_leds_platform_data *pdata;
+ struct device_node *parent, *child;
+ struct device *dev = &pdev->dev;
+ int i = 0, ret = -ENODATA;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ of_node_get(dev->parent->of_node);
+
+ parent = of_find_node_by_name(dev->parent->of_node, "leds");
+ if (!parent)
+ goto out_node_put;
- if (!pdata) {
- dev_err(&pdev->dev, "Missing platform data\n");
- return -ENODEV;
+ ret = of_property_read_u32_array(parent, "led-control",
+ pdata->led_control,
+ leds->devtype->num_regs);
+ if (ret)
+ goto out_node_put;
+
+ pdata->num_leds = of_get_child_count(parent);
+
+ pdata->led = devm_kzalloc(dev, pdata->num_leds * sizeof(*pdata->led),
+ GFP_KERNEL);
+ if (!pdata->led) {
+ ret = -ENOMEM;
+ goto out_node_put;
}
- num_leds = pdata->num_leds;
+ for_each_child_of_node(parent, child) {
+ const char *str;
+ u32 tmp;
- if ((num_leds < 1) ||
- (num_leds > (devtype->led_max - devtype->led_min + 1))) {
- dev_err(&pdev->dev, "Invalid LED count %d\n", num_leds);
- return -EINVAL;
+ if (of_property_read_u32(child, "reg", &tmp))
+ continue;
+ pdata->led[i].id = leds->devtype->led_min + tmp;
+
+ if (!of_property_read_string(child, "label", &str))
+ pdata->led[i].name = str;
+ if (!of_property_read_string(child, "linux,default-trigger",
+ &str))
+ pdata->led[i].default_trigger = str;
+
+ i++;
}
- leds = devm_kzalloc(&pdev->dev, num_leds * sizeof(struct mc13xxx_led) +
- sizeof(struct mc13xxx_leds), GFP_KERNEL);
+ pdata->num_leds = i;
+ ret = i > 0 ? 0 : -ENODATA;
+
+out_node_put:
+ of_node_put(parent);
+
+ return ret ? ERR_PTR(ret) : pdata;
+}
+#else
+static inline struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
+ struct platform_device *pdev)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif
+
+static int __init mc13xxx_led_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(dev);
+ struct mc13xxx *mcdev = dev_get_drvdata(dev->parent);
+ struct mc13xxx_led_devtype *devtype =
+ (struct mc13xxx_led_devtype *)pdev->id_entry->driver_data;
+ struct mc13xxx_leds *leds;
+ int i, id, ret = -ENODATA;
+ u32 init_led = 0;
+
+ leds = devm_kzalloc(dev, sizeof(*leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
leds->devtype = devtype;
- leds->num_leds = num_leds;
+ leds->master = mcdev;
platform_set_drvdata(pdev, leds);
+ if (dev->parent->of_node) {
+ pdata = mc13xxx_led_probe_dt(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ } else if (!pdata)
+ return -ENODATA;
+
+ leds->num_leds = pdata->num_leds;
+
+ if ((leds->num_leds < 1) ||
+ (leds->num_leds > (devtype->led_max - devtype->led_min + 1))) {
+ dev_err(dev, "Invalid LED count %d\n", leds->num_leds);
+ return -EINVAL;
+ }
+
+ leds->led = devm_kzalloc(dev, leds->num_leds * sizeof(*leds->led),
+ GFP_KERNEL);
+ if (!leds->led)
+ return -ENOMEM;
+
for (i = 0; i < devtype->num_regs; i++) {
- reg = pdata->led_control[i];
- WARN_ON(reg >= (1 << 24));
- ret = mc13xxx_reg_write(mcdev, MC13XXX_REG_LED_CONTROL(i), reg);
+ ret = mc13xxx_reg_write(mcdev, leds->devtype->ledctrl_base + i,
+ pdata->led_control[i]);
if (ret)
return ret;
}
- for (i = 0; i < num_leds; i++) {
+ for (i = 0; i < leds->num_leds; i++) {
const char *name, *trig;
ret = -EINVAL;
@@ -180,30 +244,29 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev)
trig = pdata->led[i].default_trigger;
if ((id > devtype->led_max) || (id < devtype->led_min)) {
- dev_err(&pdev->dev, "Invalid ID %i\n", id);
+ dev_err(dev, "Invalid ID %i\n", id);
break;
}
if (init_led & (1 << id)) {
- dev_warn(&pdev->dev,
- "LED %i already initialized\n", id);
+ dev_warn(dev, "LED %i already initialized\n", id);
break;
}
init_led |= 1 << id;
leds->led[i].id = id;
- leds->led[i].master = mcdev;
+ leds->led[i].leds = leds;
leds->led[i].cdev.name = name;
leds->led[i].cdev.default_trigger = trig;
+ leds->led[i].cdev.flags = LED_CORE_SUSPENDRESUME;
leds->led[i].cdev.brightness_set = mc13xxx_led_set;
- leds->led[i].cdev.brightness = LED_OFF;
+ leds->led[i].cdev.max_brightness = mc13xxx_max_brightness(id);
INIT_WORK(&leds->led[i].work, mc13xxx_led_work);
- ret = led_classdev_register(pdev->dev.parent,
- &leds->led[i].cdev);
+ ret = led_classdev_register(dev->parent, &leds->led[i].cdev);
if (ret) {
- dev_err(&pdev->dev, "Failed to register LED %i\n", id);
+ dev_err(dev, "Failed to register LED %i\n", id);
break;
}
}
@@ -219,7 +282,6 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev)
static int mc13xxx_led_remove(struct platform_device *pdev)
{
- struct mc13xxx *mcdev = dev_get_drvdata(pdev->dev.parent);
struct mc13xxx_leds *leds = platform_get_drvdata(pdev);
int i;
@@ -228,9 +290,6 @@ static int mc13xxx_led_remove(struct platform_device *pdev)
cancel_work_sync(&leds->led[i].work);
}
- for (i = 0; i < leds->devtype->num_regs; i++)
- mc13xxx_reg_write(mcdev, MC13XXX_REG_LED_CONTROL(i), 0);
-
return 0;
}
@@ -238,17 +297,27 @@ static const struct mc13xxx_led_devtype mc13783_led_devtype = {
.led_min = MC13783_LED_MD,
.led_max = MC13783_LED_B3,
.num_regs = 6,
+ .ledctrl_base = 51,
};
static const struct mc13xxx_led_devtype mc13892_led_devtype = {
.led_min = MC13892_LED_MD,
.led_max = MC13892_LED_B,
.num_regs = 4,
+ .ledctrl_base = 51,
+};
+
+static const struct mc13xxx_led_devtype mc34708_led_devtype = {
+ .led_min = MC34708_LED_R,
+ .led_max = MC34708_LED_G,
+ .num_regs = 1,
+ .ledctrl_base = 54,
};
static const struct platform_device_id mc13xxx_led_id_table[] = {
{ "mc13783-led", (kernel_ulong_t)&mc13783_led_devtype, },
{ "mc13892-led", (kernel_ulong_t)&mc13892_led_devtype, },
+ { "mc34708-led", (kernel_ulong_t)&mc34708_led_devtype, },
{ }
};
MODULE_DEVICE_TABLE(platform, mc13xxx_led_id_table);
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 2f9f141084ba..e97f443a6e07 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -21,7 +21,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index c7a4230233ea..efa625883c83 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -23,7 +23,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gpio.h>
diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c
index 98cae529373f..c9d906098466 100644
--- a/drivers/leds/leds-ot200.c
+++ b/drivers/leds/leds-ot200.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/leds.h>
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 605047428b5a..7d0aaed1e23a 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/fb.h>
@@ -84,6 +83,15 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
(sizeof(struct led_pwm_data) * num_leds);
}
+static void led_pwm_cleanup(struct led_pwm_priv *priv)
+{
+ while (priv->num_leds--) {
+ led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
+ if (priv->leds[priv->num_leds].can_sleep)
+ cancel_work_sync(&priv->leds[priv->num_leds].work);
+ }
+}
+
static int led_pwm_create_of(struct platform_device *pdev,
struct led_pwm_priv *priv)
{
@@ -131,8 +139,7 @@ static int led_pwm_create_of(struct platform_device *pdev,
return 0;
err:
- while (priv->num_leds--)
- led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
+ led_pwm_cleanup(priv);
return ret;
}
@@ -200,8 +207,8 @@ static int led_pwm_probe(struct platform_device *pdev)
return 0;
err:
- while (i--)
- led_classdev_unregister(&priv->leds[i].cdev);
+ priv->num_leds = i;
+ led_pwm_cleanup(priv);
return ret;
}
@@ -209,13 +216,8 @@ err:
static int led_pwm_remove(struct platform_device *pdev)
{
struct led_pwm_priv *priv = platform_get_drvdata(pdev);
- int i;
- for (i = 0; i < priv->num_leds; i++) {
- led_classdev_unregister(&priv->leds[i].cdev);
- if (priv->leds[i].can_sleep)
- cancel_work_sync(&priv->leds[i].work);
- }
+ led_pwm_cleanup(priv);
return 0;
}
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 98174e7240ee..28988b7b4fab 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -12,7 +12,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/gpio.h>
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 5b8f938a8d73..2eb3ef62962b 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -63,7 +63,7 @@ MODULE_LICENSE("GPL");
/*
* PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives.
*/
-static DEFINE_PCI_DEVICE_TABLE(ich7_lpc_pci_id) = {
+static const struct pci_device_id ich7_lpc_pci_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_30) },
@@ -78,7 +78,7 @@ static int __init ss4200_led_dmi_callback(const struct dmi_system_id *id)
return 1;
}
-static bool __initdata nodetect;
+static bool nodetect;
module_param_named(nodetect, nodetect, bool, 0);
MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index 0a1a13f3a6a5..e72c974142d0 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/leds.h>
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 3f75fd22fd49..4133ffe29015 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/err.h>
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 118335eccc56..1c3ee9fcaf34 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -26,6 +26,7 @@
#include <linux/percpu.h>
#include <linux/syscore_ops.h>
#include <linux/rwsem.h>
+#include <linux/cpu.h>
#include "../leds.h"
#define MAX_NAME_LEN 8
@@ -92,6 +93,26 @@ static struct syscore_ops ledtrig_cpu_syscore_ops = {
.resume = ledtrig_cpu_syscore_resume,
};
+static int ledtrig_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ ledtrig_cpu(CPU_LED_START);
+ break;
+ case CPU_DYING:
+ ledtrig_cpu(CPU_LED_STOP);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+
+static struct notifier_block ledtrig_cpu_nb = {
+ .notifier_call = ledtrig_cpu_notify,
+};
+
static int __init ledtrig_cpu_init(void)
{
int cpu;
@@ -113,6 +134,7 @@ static int __init ledtrig_cpu_init(void)
}
register_syscore_ops(&ledtrig_cpu_syscore_ops);
+ register_cpu_notifier(&ledtrig_cpu_nb);
pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n");
@@ -124,6 +146,8 @@ static void __exit ledtrig_cpu_exit(void)
{
int cpu;
+ unregister_cpu_notifier(&ledtrig_cpu_nb);
+
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index bfb39bb56ef1..e8b55c3a6170 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -887,7 +887,7 @@ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
* _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
* they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
*/
-static void do_set_pte(struct lg_cpu *cpu, int idx,
+static void __guest_set_pte(struct lg_cpu *cpu, int idx,
unsigned long vaddr, pte_t gpte)
{
/* Look up the matching shadow page directory entry. */
@@ -960,13 +960,13 @@ void guest_set_pte(struct lg_cpu *cpu,
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
if (cpu->lg->pgdirs[i].pgdir)
- do_set_pte(cpu, i, vaddr, gpte);
+ __guest_set_pte(cpu, i, vaddr, gpte);
} else {
/* Is this page table one we have a shadow for? */
int pgdir = find_pgdir(cpu->lg, gpgdir);
if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
/* If so, do the update. */
- do_set_pte(cpu, pgdir, vaddr, gpte);
+ __guest_set_pte(cpu, pgdir, vaddr, gpte);
}
}
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 04a50498f257..9e9c56758a08 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -38,7 +38,7 @@
#include <linux/platform_device.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifdef CONFIG_PPC
#include <asm/prom.h>
#include <asm/machdep.h>
@@ -193,8 +193,7 @@ static int adb_scan_bus(void)
break;
noMovement = 0;
- }
- else {
+ } else {
/*
* No devices left at address i; move the
* one(s) we moved to `highFree' back to i.
@@ -263,7 +262,7 @@ adb_reset_bus(void)
/*
* notify clients before sleep
*/
-static int adb_suspend(struct platform_device *dev, pm_message_t state)
+static int __adb_suspend(struct platform_device *dev, pm_message_t state)
{
adb_got_sleep = 1;
/* We need to get a lock on the probe thread */
@@ -276,10 +275,25 @@ static int adb_suspend(struct platform_device *dev, pm_message_t state)
return 0;
}
+static int adb_suspend(struct device *dev)
+{
+ return __adb_suspend(to_platform_device(dev), PMSG_SUSPEND);
+}
+
+static int adb_freeze(struct device *dev)
+{
+ return __adb_suspend(to_platform_device(dev), PMSG_FREEZE);
+}
+
+static int adb_poweroff(struct device *dev)
+{
+ return __adb_suspend(to_platform_device(dev), PMSG_HIBERNATE);
+}
+
/*
* reset bus after sleep
*/
-static int adb_resume(struct platform_device *dev)
+static int __adb_resume(struct platform_device *dev)
{
adb_got_sleep = 0;
up(&adb_probe_mutex);
@@ -287,6 +301,11 @@ static int adb_resume(struct platform_device *dev)
return 0;
}
+
+static int adb_resume(struct device *dev)
+{
+ return __adb_resume(to_platform_device(dev));
+}
#endif /* CONFIG_PM */
static int __init adb_init(void)
@@ -502,7 +521,7 @@ void
adb_input(unsigned char *buf, int nb, int autopoll)
{
int i, id;
- static int dump_adb_input = 0;
+ static int dump_adb_input;
unsigned long flags;
void (*handler)(unsigned char *, int, int);
@@ -624,8 +643,7 @@ do_adb_query(struct adb_request *req)
{
int ret = -EINVAL;
- switch(req->data[1])
- {
+ switch(req->data[1]) {
case ADB_QUERY_GETDEVINFO:
if (req->nbytes < 3)
break;
@@ -697,7 +715,7 @@ static ssize_t adb_read(struct file *file, char __user *buf,
int ret = 0;
struct adbdev_state *state = file->private_data;
struct adb_request *req;
- DECLARE_WAITQUEUE(wait,current);
+ DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
if (count < 2)
@@ -794,8 +812,8 @@ static ssize_t adb_write(struct file *file, const char __user *buf,
}
/* Special case for ADB_BUSRESET request, all others are sent to
the controller */
- else if ((req->data[0] == ADB_PACKET)&&(count > 1)
- &&(req->data[1] == ADB_BUSRESET)) {
+ else if ((req->data[0] == ADB_PACKET) && (count > 1)
+ && (req->data[1] == ADB_BUSRESET)) {
ret = do_adb_reset_bus();
up(&adb_probe_mutex);
atomic_dec(&state->n_pending);
@@ -831,14 +849,25 @@ static const struct file_operations adb_fops = {
.release = adb_release,
};
+#ifdef CONFIG_PM
+static const struct dev_pm_ops adb_dev_pm_ops = {
+ .suspend = adb_suspend,
+ .resume = adb_resume,
+ /* Hibernate hooks */
+ .freeze = adb_freeze,
+ .thaw = adb_resume,
+ .poweroff = adb_poweroff,
+ .restore = adb_resume,
+};
+#endif
+
static struct platform_driver adb_pfdrv = {
.driver = {
.name = "adb",
- },
#ifdef CONFIG_PM
- .suspend = adb_suspend,
- .resume = adb_resume,
+ .pm = &adb_dev_pm_ops,
#endif
+ },
};
static struct platform_device adb_pfdev = {
diff --git a/drivers/mcb/Kconfig b/drivers/mcb/Kconfig
new file mode 100644
index 000000000000..e9a6976e1010
--- /dev/null
+++ b/drivers/mcb/Kconfig
@@ -0,0 +1,31 @@
+#
+# MEN Chameleon Bus (MCB) support
+#
+
+menuconfig MCB
+ tristate "MCB support"
+ default n
+ depends on HAS_IOMEM
+ help
+
+ The MCB (MEN Chameleon Bus) is a Bus specific to MEN Mikroelektronik
+ FPGA based devices. It is used to identify MCB based IP-Cores within
+ an FPGA and provide the necessary framework for instantiating drivers
+ for these devices.
+
+ If build as a module, the module is called mcb.ko
+
+if MCB
+config MCB_PCI
+ tristate "PCI based MCB carrier"
+ default n
+ depends on PCI
+ help
+
+ This is a MCB carrier on a PCI device. Both PCI attached on-board
+ FPGAs as well as CompactPCI attached MCB FPGAs are supported with
+ this driver.
+
+ If build as a module, the module is called mcb-pci.ko
+
+endif # MCB
diff --git a/drivers/mcb/Makefile b/drivers/mcb/Makefile
new file mode 100644
index 000000000000..1ae141311def
--- /dev/null
+++ b/drivers/mcb/Makefile
@@ -0,0 +1,7 @@
+
+obj-$(CONFIG_MCB) += mcb.o
+
+mcb-y += mcb-core.o
+mcb-y += mcb-parse.o
+
+obj-$(CONFIG_MCB_PCI) += mcb-pci.o
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
new file mode 100644
index 000000000000..bbe12932d404
--- /dev/null
+++ b/drivers/mcb/mcb-core.c
@@ -0,0 +1,414 @@
+/*
+ * MEN Chameleon Bus.
+ *
+ * Copyright (C) 2013 MEN Mikroelektronik GmbH (www.men.de)
+ * Author: Johannes Thumshirn <johannes.thumshirn@men.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/mcb.h>
+
+static DEFINE_IDA(mcb_ida);
+
+static const struct mcb_device_id *mcb_match_id(const struct mcb_device_id *ids,
+ struct mcb_device *dev)
+{
+ if (ids) {
+ while (ids->device) {
+ if (ids->device == dev->id)
+ return ids;
+ ids++;
+ }
+ }
+
+ return NULL;
+}
+
+static int mcb_match(struct device *dev, struct device_driver *drv)
+{
+ struct mcb_driver *mdrv = to_mcb_driver(drv);
+ struct mcb_device *mdev = to_mcb_device(dev);
+ const struct mcb_device_id *found_id;
+
+ found_id = mcb_match_id(mdrv->id_table, mdev);
+ if (found_id)
+ return 1;
+
+ return 0;
+}
+
+static int mcb_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mcb_device *mdev = to_mcb_device(dev);
+ int ret;
+
+ ret = add_uevent_var(env, "MODALIAS=mcb:16z%03d", mdev->id);
+ if (ret)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int mcb_probe(struct device *dev)
+{
+ struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
+ struct mcb_device *mdev = to_mcb_device(dev);
+ const struct mcb_device_id *found_id;
+
+ found_id = mcb_match_id(mdrv->id_table, mdev);
+ if (!found_id)
+ return -ENODEV;
+
+ return mdrv->probe(mdev, found_id);
+}
+
+static int mcb_remove(struct device *dev)
+{
+ struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
+ struct mcb_device *mdev = to_mcb_device(dev);
+
+ mdrv->remove(mdev);
+
+ put_device(&mdev->dev);
+
+ return 0;
+}
+
+static void mcb_shutdown(struct device *dev)
+{
+ struct mcb_device *mdev = to_mcb_device(dev);
+ struct mcb_driver *mdrv = mdev->driver;
+
+ if (mdrv && mdrv->shutdown)
+ mdrv->shutdown(mdev);
+}
+
+static struct bus_type mcb_bus_type = {
+ .name = "mcb",
+ .match = mcb_match,
+ .uevent = mcb_uevent,
+ .probe = mcb_probe,
+ .remove = mcb_remove,
+ .shutdown = mcb_shutdown,
+};
+
+/**
+ * __mcb_register_driver() - Register a @mcb_driver at the system
+ * @drv: The @mcb_driver
+ * @owner: The @mcb_driver's module
+ * @mod_name: The name of the @mcb_driver's module
+ *
+ * Register a @mcb_driver at the system. Perform some sanity checks, if
+ * the .probe and .remove methods are provided by the driver.
+ */
+int __mcb_register_driver(struct mcb_driver *drv, struct module *owner,
+ const char *mod_name)
+{
+ if (!drv->probe || !drv->remove)
+ return -EINVAL;
+
+ drv->driver.owner = owner;
+ drv->driver.bus = &mcb_bus_type;
+ drv->driver.mod_name = mod_name;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__mcb_register_driver);
+
+/**
+ * mcb_unregister_driver() - Unregister a @mcb_driver from the system
+ * @drv: The @mcb_driver
+ *
+ * Unregister a @mcb_driver from the system.
+ */
+void mcb_unregister_driver(struct mcb_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(mcb_unregister_driver);
+
+static void mcb_release_dev(struct device *dev)
+{
+ struct mcb_device *mdev = to_mcb_device(dev);
+
+ mcb_bus_put(mdev->bus);
+ kfree(mdev);
+}
+
+/**
+ * mcb_device_register() - Register a mcb_device
+ * @bus: The @mcb_bus of the device
+ * @dev: The @mcb_device
+ *
+ * Register a specific @mcb_device at a @mcb_bus and the system itself.
+ */
+int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
+{
+ int ret;
+ int device_id;
+
+ device_initialize(&dev->dev);
+ dev->dev.bus = &mcb_bus_type;
+ dev->dev.parent = bus->dev.parent;
+ dev->dev.release = mcb_release_dev;
+
+ device_id = dev->id;
+ dev_set_name(&dev->dev, "mcb%d-16z%03d-%d:%d:%d",
+ bus->bus_nr, device_id, dev->inst, dev->group, dev->var);
+
+ ret = device_add(&dev->dev);
+ if (ret < 0) {
+ pr_err("Failed registering device 16z%03d on bus mcb%d (%d)\n",
+ device_id, bus->bus_nr, ret);
+ goto out;
+ }
+
+ return 0;
+
+out:
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mcb_device_register);
+
+/**
+ * mcb_alloc_bus() - Allocate a new @mcb_bus
+ *
+ * Allocate a new @mcb_bus.
+ */
+struct mcb_bus *mcb_alloc_bus(void)
+{
+ struct mcb_bus *bus;
+ int bus_nr;
+
+ bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL);
+ if (!bus)
+ return NULL;
+
+ bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
+ if (bus_nr < 0) {
+ kfree(bus);
+ return ERR_PTR(bus_nr);
+ }
+
+ INIT_LIST_HEAD(&bus->children);
+ bus->bus_nr = bus_nr;
+
+ return bus;
+}
+EXPORT_SYMBOL_GPL(mcb_alloc_bus);
+
+static int __mcb_devices_unregister(struct device *dev, void *data)
+{
+ device_unregister(dev);
+ return 0;
+}
+
+static void mcb_devices_unregister(struct mcb_bus *bus)
+{
+ bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_devices_unregister);
+}
+/**
+ * mcb_release_bus() - Free a @mcb_bus
+ * @bus: The @mcb_bus to release
+ *
+ * Release an allocated @mcb_bus from the system.
+ */
+void mcb_release_bus(struct mcb_bus *bus)
+{
+ mcb_devices_unregister(bus);
+
+ ida_simple_remove(&mcb_ida, bus->bus_nr);
+
+ kfree(bus);
+}
+EXPORT_SYMBOL_GPL(mcb_release_bus);
+
+/**
+ * mcb_bus_put() - Increment refcnt
+ * @bus: The @mcb_bus
+ *
+ * Get a @mcb_bus' ref
+ */
+struct mcb_bus *mcb_bus_get(struct mcb_bus *bus)
+{
+ if (bus)
+ get_device(&bus->dev);
+
+ return bus;
+}
+EXPORT_SYMBOL_GPL(mcb_bus_get);
+
+/**
+ * mcb_bus_put() - Decrement refcnt
+ * @bus: The @mcb_bus
+ *
+ * Release a @mcb_bus' ref
+ */
+void mcb_bus_put(struct mcb_bus *bus)
+{
+ if (bus)
+ put_device(&bus->dev);
+}
+EXPORT_SYMBOL_GPL(mcb_bus_put);
+
+/**
+ * mcb_alloc_dev() - Allocate a device
+ * @bus: The @mcb_bus the device is part of
+ *
+ * Allocate a @mcb_device and add bus.
+ */
+struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus)
+{
+ struct mcb_device *dev;
+
+ dev = kzalloc(sizeof(struct mcb_device), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ INIT_LIST_HEAD(&dev->bus_list);
+ dev->bus = bus;
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(mcb_alloc_dev);
+
+/**
+ * mcb_free_dev() - Free @mcb_device
+ * @dev: The device to free
+ *
+ * Free a @mcb_device
+ */
+void mcb_free_dev(struct mcb_device *dev)
+{
+ kfree(dev);
+}
+EXPORT_SYMBOL_GPL(mcb_free_dev);
+
+static int __mcb_bus_add_devices(struct device *dev, void *data)
+{
+ struct mcb_device *mdev = to_mcb_device(dev);
+ int retval;
+
+ if (mdev->is_added)
+ return 0;
+
+ retval = device_attach(dev);
+ if (retval < 0)
+ dev_err(dev, "Error adding device (%d)\n", retval);
+
+ mdev->is_added = true;
+
+ return 0;
+}
+
+static int __mcb_bus_add_child(struct device *dev, void *data)
+{
+ struct mcb_device *mdev = to_mcb_device(dev);
+ struct mcb_bus *child;
+
+ BUG_ON(!mdev->is_added);
+ child = mdev->subordinate;
+
+ if (child)
+ mcb_bus_add_devices(child);
+
+ return 0;
+}
+
+/**
+ * mcb_bus_add_devices() - Add devices in the bus' internal device list
+ * @bus: The @mcb_bus we add the devices
+ *
+ * Add devices in the bus' internal device list to the system.
+ */
+void mcb_bus_add_devices(const struct mcb_bus *bus)
+{
+ bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_devices);
+ bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_child);
+
+}
+EXPORT_SYMBOL_GPL(mcb_bus_add_devices);
+
+/**
+ * mcb_request_mem() - Request memory
+ * @dev: The @mcb_device the memory is for
+ * @name: The name for the memory reference.
+ *
+ * Request memory for a @mcb_device. If @name is NULL the driver name will
+ * be used.
+ */
+struct resource *mcb_request_mem(struct mcb_device *dev, const char *name)
+{
+ struct resource *mem;
+ u32 size;
+
+ if (!name)
+ name = dev->dev.driver->name;
+
+ size = resource_size(&dev->mem);
+
+ mem = request_mem_region(dev->mem.start, size, name);
+ if (!mem)
+ return ERR_PTR(-EBUSY);
+
+ return mem;
+}
+EXPORT_SYMBOL_GPL(mcb_request_mem);
+
+/**
+ * mcb_release_mem() - Release memory requested by device
+ * @dev: The @mcb_device that requested the memory
+ *
+ * Release memory that was prior requested via @mcb_request_mem().
+ */
+void mcb_release_mem(struct resource *mem)
+{
+ u32 size;
+
+ size = resource_size(mem);
+ release_mem_region(mem->start, size);
+}
+EXPORT_SYMBOL_GPL(mcb_release_mem);
+
+/**
+ * mcb_get_irq() - Get device's IRQ number
+ * @dev: The @mcb_device the IRQ is for
+ *
+ * Get the IRQ number of a given @mcb_device.
+ */
+int mcb_get_irq(struct mcb_device *dev)
+{
+ struct resource *irq = &dev->irq;
+
+ return irq->start;
+}
+EXPORT_SYMBOL_GPL(mcb_get_irq);
+
+static int mcb_init(void)
+{
+ return bus_register(&mcb_bus_type);
+}
+
+static void mcb_exit(void)
+{
+ bus_unregister(&mcb_bus_type);
+}
+
+/* mcb must be initialized after PCI but before the chameleon drivers.
+ * That means we must use some initcall between subsys_initcall and
+ * device_initcall.
+ */
+fs_initcall(mcb_init);
+module_exit(mcb_exit);
+
+MODULE_DESCRIPTION("MEN Chameleon Bus Driver");
+MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mcb/mcb-internal.h b/drivers/mcb/mcb-internal.h
new file mode 100644
index 000000000000..f956ef26c0ce
--- /dev/null
+++ b/drivers/mcb/mcb-internal.h
@@ -0,0 +1,118 @@
+#ifndef __MCB_INTERNAL
+#define __MCB_INTERNAL
+
+#include <linux/types.h>
+
+#define PCI_VENDOR_ID_MEN 0x1a88
+#define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45
+#define CHAMELEON_FILENAME_LEN 12
+#define CHAMELEONV2_MAGIC 0xabce
+
+enum chameleon_descriptor_type {
+ CHAMELEON_DTYPE_GENERAL = 0x0,
+ CHAMELEON_DTYPE_BRIDGE = 0x1,
+ CHAMELEON_DTYPE_CPU = 0x2,
+ CHAMELEON_DTYPE_BAR = 0x3,
+ CHAMELEON_DTYPE_END = 0xf,
+};
+
+enum chameleon_bus_type {
+ CHAMELEON_BUS_WISHBONE,
+ CHAMELEON_BUS_AVALON,
+ CHAMELEON_BUS_LPC,
+ CHAMELEON_BUS_ISA,
+};
+
+/**
+ * struct chameleon_fpga_header
+ *
+ * @revision: Revison of Chameleon table in FPGA
+ * @model: Chameleon table model ASCII char
+ * @minor: Revision minor
+ * @bus_type: Bus type (usually %CHAMELEON_BUS_WISHBONE)
+ * @magic: Chameleon header magic number (0xabce for version 2)
+ * @reserved: Reserved
+ * @filename: Filename of FPGA bitstream
+ */
+struct chameleon_fpga_header {
+ u8 revision;
+ char model;
+ u8 minor;
+ u8 bus_type;
+ u16 magic;
+ u16 reserved;
+ /* This one has no '\0' at the end!!! */
+ char filename[CHAMELEON_FILENAME_LEN];
+} __packed;
+#define HEADER_MAGIC_OFFSET 0x4
+
+/**
+ * struct chameleon_gdd - Chameleon General Device Descriptor
+ *
+ * @irq: the position in the FPGA's IRQ controller vector
+ * @rev: the revision of the variant's implementation
+ * @var: the variant of the IP core
+ * @dev: the device the IP core is
+ * @dtype: device descriptor type
+ * @bar: BAR offset that must be added to module offset
+ * @inst: the instance number of the device, 0 is first instance
+ * @group: the group the device belongs to (0 = no group)
+ * @reserved: reserved
+ * @offset: beginning of the address window of desired module
+ * @size: size of the module's address window
+ */
+struct chameleon_gdd {
+ __le32 reg1;
+ __le32 reg2;
+ __le32 offset;
+ __le32 size;
+
+} __packed;
+
+/* GDD Register 1 fields */
+#define GDD_IRQ(x) ((x) & 0x1f)
+#define GDD_REV(x) (((x) >> 5) & 0x3f)
+#define GDD_VAR(x) (((x) >> 11) & 0x3f)
+#define GDD_DEV(x) (((x) >> 18) & 0x3ff)
+#define GDD_DTY(x) (((x) >> 28) & 0xf)
+
+/* GDD Register 2 fields */
+#define GDD_BAR(x) ((x) & 0x7)
+#define GDD_INS(x) (((x) >> 3) & 0x3f)
+#define GDD_GRP(x) (((x) >> 9) & 0x3f)
+
+/**
+ * struct chameleon_bdd - Chameleon Bridge Device Descriptor
+ *
+ * @irq: the position in the FPGA's IRQ controller vector
+ * @rev: the revision of the variant's implementation
+ * @var: the variant of the IP core
+ * @dev: the device the IP core is
+ * @dtype: device descriptor type
+ * @bar: BAR offset that must be added to module offset
+ * @inst: the instance number of the device, 0 is first instance
+ * @dbar: destination bar from the bus _behind_ the bridge
+ * @chamoff: offset within the BAR of the source bus
+ * @offset:
+ * @size:
+ */
+struct chameleon_bdd {
+ unsigned int irq:6;
+ unsigned int rev:6;
+ unsigned int var:6;
+ unsigned int dev:10;
+ unsigned int dtype:4;
+ unsigned int bar:3;
+ unsigned int inst:6;
+ unsigned int dbar:3;
+ unsigned int group:6;
+ unsigned int reserved:14;
+ u32 chamoff;
+ u32 offset;
+ u32 size;
+} __packed;
+
+int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
+ void __iomem *base);
+
+#endif
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
new file mode 100644
index 000000000000..d1278b5f3028
--- /dev/null
+++ b/drivers/mcb/mcb-parse.c
@@ -0,0 +1,159 @@
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/mcb.h>
+
+#include "mcb-internal.h"
+
+struct mcb_parse_priv {
+ phys_addr_t mapbase;
+ void __iomem *base;
+};
+
+#define for_each_chameleon_cell(dtype, p) \
+ for ((dtype) = get_next_dtype((p)); \
+ (dtype) != CHAMELEON_DTYPE_END; \
+ (dtype) = get_next_dtype((p)))
+
+static inline uint32_t get_next_dtype(void __iomem *p)
+{
+ uint32_t dtype;
+
+ dtype = readl(p);
+ return dtype >> 28;
+}
+
+static int chameleon_parse_bdd(struct mcb_bus *bus,
+ phys_addr_t mapbase,
+ void __iomem *base)
+{
+ return 0;
+}
+
+static int chameleon_parse_gdd(struct mcb_bus *bus,
+ phys_addr_t mapbase,
+ void __iomem *base)
+{
+ struct chameleon_gdd __iomem *gdd =
+ (struct chameleon_gdd __iomem *) base;
+ struct mcb_device *mdev;
+ u32 offset;
+ u32 size;
+ int ret;
+ __le32 reg1;
+ __le32 reg2;
+
+ mdev = mcb_alloc_dev(bus);
+ if (!mdev)
+ return -ENOMEM;
+
+ reg1 = readl(&gdd->reg1);
+ reg2 = readl(&gdd->reg2);
+ offset = readl(&gdd->offset);
+ size = readl(&gdd->size);
+
+ mdev->id = GDD_DEV(reg1);
+ mdev->rev = GDD_REV(reg1);
+ mdev->var = GDD_VAR(reg1);
+ mdev->bar = GDD_BAR(reg1);
+ mdev->group = GDD_GRP(reg2);
+ mdev->inst = GDD_INS(reg2);
+
+ pr_debug("Found a 16z%03d\n", mdev->id);
+
+ mdev->irq.start = GDD_IRQ(reg1);
+ mdev->irq.end = GDD_IRQ(reg1);
+ mdev->irq.flags = IORESOURCE_IRQ;
+
+ mdev->mem.start = mapbase + offset;
+ mdev->mem.end = mdev->mem.start + size - 1;
+ mdev->mem.flags = IORESOURCE_MEM;
+
+ mdev->is_added = false;
+
+ ret = mcb_device_register(bus, mdev);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ mcb_free_dev(mdev);
+
+ return ret;
+}
+
+int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
+ void __iomem *base)
+{
+ char __iomem *p = base;
+ struct chameleon_fpga_header *header;
+ uint32_t dtype;
+ int num_cells = 0;
+ int ret = 0;
+ u32 hsize;
+
+ hsize = sizeof(struct chameleon_fpga_header);
+
+ header = kzalloc(hsize, GFP_KERNEL);
+ if (!header)
+ return -ENOMEM;
+
+ /* Extract header information */
+ memcpy_fromio(header, p, hsize);
+ /* We only support chameleon v2 at the moment */
+ header->magic = le16_to_cpu(header->magic);
+ if (header->magic != CHAMELEONV2_MAGIC) {
+ pr_err("Unsupported chameleon version 0x%x\n",
+ header->magic);
+ kfree(header);
+ return -ENODEV;
+ }
+ p += hsize;
+
+ pr_debug("header->revision = %d\n", header->revision);
+ pr_debug("header->model = 0x%x ('%c')\n", header->model,
+ header->model);
+ pr_debug("header->minor = %d\n", header->minor);
+ pr_debug("header->bus_type = 0x%x\n", header->bus_type);
+
+
+ pr_debug("header->magic = 0x%x\n", header->magic);
+ pr_debug("header->filename = \"%.*s\"\n", CHAMELEON_FILENAME_LEN,
+ header->filename);
+
+ for_each_chameleon_cell(dtype, p) {
+ switch (dtype) {
+ case CHAMELEON_DTYPE_GENERAL:
+ ret = chameleon_parse_gdd(bus, mapbase, p);
+ if (ret < 0)
+ goto out;
+ p += sizeof(struct chameleon_gdd);
+ break;
+ case CHAMELEON_DTYPE_BRIDGE:
+ chameleon_parse_bdd(bus, mapbase, p);
+ p += sizeof(struct chameleon_bdd);
+ break;
+ case CHAMELEON_DTYPE_END:
+ break;
+ default:
+ pr_err("Invalid chameleon descriptor type 0x%x\n",
+ dtype);
+ return -EINVAL;
+ }
+ num_cells++;
+ }
+
+ if (num_cells == 0)
+ num_cells = -EINVAL;
+
+ kfree(header);
+ return num_cells;
+
+out:
+ kfree(header);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chameleon_parse_cells);
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
new file mode 100644
index 000000000000..99c742cbfb5b
--- /dev/null
+++ b/drivers/mcb/mcb-pci.c
@@ -0,0 +1,114 @@
+/*
+ * MEN Chameleon Bus.
+ *
+ * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
+ * Author: Johannes Thumshirn <johannes.thumshirn@men.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mcb.h>
+
+#include "mcb-internal.h"
+
+struct priv {
+ struct mcb_bus *bus;
+ void __iomem *base;
+};
+
+static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct priv *priv;
+ phys_addr_t mapbase;
+ int ret;
+ int num_cells;
+ unsigned long flags;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return -ENODEV;
+ }
+
+ mapbase = pci_resource_start(pdev, 0);
+ if (!mapbase) {
+ dev_err(&pdev->dev, "No PCI resource\n");
+ goto err_start;
+ }
+
+ ret = pci_request_region(pdev, 0, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request PCI BARs\n");
+ goto err_start;
+ }
+
+ priv->base = pci_iomap(pdev, 0, 0);
+ if (!priv->base) {
+ dev_err(&pdev->dev, "Cannot ioremap\n");
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ flags = pci_resource_flags(pdev, 0);
+ if (flags & IORESOURCE_IO) {
+ ret = -ENOTSUPP;
+ dev_err(&pdev->dev,
+ "IO mapped PCI devices are not supported\n");
+ goto err_ioremap;
+ }
+
+ pci_set_drvdata(pdev, priv);
+
+ priv->bus = mcb_alloc_bus();
+
+ ret = chameleon_parse_cells(priv->bus, mapbase, priv->base);
+ if (ret < 0)
+ goto err_drvdata;
+ num_cells = ret;
+
+ dev_dbg(&pdev->dev, "Found %d cells\n", num_cells);
+
+ mcb_bus_add_devices(priv->bus);
+
+err_drvdata:
+ pci_iounmap(pdev, priv->base);
+err_ioremap:
+ pci_release_region(pdev, 0);
+err_start:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void mcb_pci_remove(struct pci_dev *pdev)
+{
+ struct priv *priv = pci_get_drvdata(pdev);
+
+ mcb_release_bus(priv->bus);
+}
+
+static const struct pci_device_id mcb_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MEN, PCI_DEVICE_ID_MEN_CHAMELEON) },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, mcb_pci_tbl);
+
+static struct pci_driver mcb_pci_driver = {
+ .name = "mcb-pci",
+ .id_table = mcb_pci_tbl,
+ .probe = mcb_pci_probe,
+ .remove = mcb_pci_remove,
+};
+
+module_pci_driver(mcb_pci_driver);
+
+MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MCB over PCI support");
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 95ad936e6048..5bdedf6df153 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -285,6 +285,17 @@ config DM_CACHE_CLEANER
A simple cache policy that writes back all data to the
origin. Used when decommissioning a dm-cache.
+config DM_ERA
+ tristate "Era target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM
+ default n
+ select DM_PERSISTENT_DATA
+ select DM_BIO_PRISON
+ ---help---
+ dm-era tracks which parts of a block device are written to
+ over time. Useful for maintaining cache coherency when using
+ vendor snapshots.
+
config DM_MIRROR
tristate "Mirror target"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f26d83292579..a2da532b1c2b 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -14,6 +14,7 @@ dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
dm-cache-mq-y += dm-cache-policy-mq.o
dm-cache-cleaner-y += dm-cache-policy-cleaner.o
+dm-era-y += dm-era-target.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o
@@ -53,6 +54,7 @@ obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
+obj-$(CONFIG_DM_ERA) += dm-era.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 2638417b19aa..4d200883c505 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -24,11 +24,3 @@ config BCACHE_CLOSURES_DEBUG
Keeps all active closures in a linked list and provides a debugfs
interface to list them, which makes it possible to see asynchronous
operations that get stuck.
-
-# cgroup code needs to be updated:
-#
-#config CGROUP_BCACHE
-# bool "Cgroup controls for bcache"
-# depends on BCACHE && BLK_CGROUP
-# ---help---
-# TODO
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index c0d37d082443..443d03fbac47 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -78,12 +78,6 @@ uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
- if (CACHE_SYNC(&ca->set->sb)) {
- ca->need_save_prio = max(ca->need_save_prio,
- bucket_disk_gen(b));
- WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX);
- }
-
return ret;
}
@@ -120,51 +114,45 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
mutex_unlock(&c->bucket_lock);
}
-/* Allocation */
+/*
+ * Background allocation thread: scans for buckets to be invalidated,
+ * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
+ * then optionally issues discard commands to the newly free buckets, then puts
+ * them on the various freelists.
+ */
static inline bool can_inc_bucket_gen(struct bucket *b)
{
- return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX &&
- bucket_disk_gen(b) < BUCKET_DISK_GEN_MAX;
+ return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
}
-bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
+bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
{
- BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
-
- if (CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) {
- unsigned i;
-
- for (i = 0; i < RESERVE_NONE; i++)
- if (!fifo_full(&ca->free[i]))
- goto add;
+ BUG_ON(!ca->set->gc_mark_valid);
- return false;
- }
-add:
- b->prio = 0;
-
- if (can_inc_bucket_gen(b) &&
- fifo_push(&ca->unused, b - ca->buckets)) {
- atomic_inc(&b->pin);
- return true;
- }
-
- return false;
-}
-
-static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
-{
- return GC_MARK(b) == GC_MARK_RECLAIMABLE &&
+ return (!GC_MARK(b) ||
+ GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
!atomic_read(&b->pin) &&
can_inc_bucket_gen(b);
}
-static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
+void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
{
+ lockdep_assert_held(&ca->set->bucket_lock);
+ BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
+
+ if (GC_SECTORS_USED(b))
+ trace_bcache_invalidate(ca, b - ca->buckets);
+
bch_inc_gen(ca, b);
b->prio = INITIAL_PRIO;
atomic_inc(&b->pin);
+}
+
+static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
+{
+ __bch_invalidate_one_bucket(ca, b);
+
fifo_push(&ca->free_inc, b - ca->buckets);
}
@@ -195,20 +183,7 @@ static void invalidate_buckets_lru(struct cache *ca)
ca->heap.used = 0;
for_each_bucket(b, ca) {
- /*
- * If we fill up the unused list, if we then return before
- * adding anything to the free_inc list we'll skip writing
- * prios/gens and just go back to allocating from the unused
- * list:
- */
- if (fifo_full(&ca->unused))
- return;
-
- if (!can_invalidate_bucket(ca, b))
- continue;
-
- if (!GC_SECTORS_USED(b) &&
- bch_bucket_add_unused(ca, b))
+ if (!bch_can_invalidate_bucket(ca, b))
continue;
if (!heap_full(&ca->heap))
@@ -233,7 +208,7 @@ static void invalidate_buckets_lru(struct cache *ca)
return;
}
- invalidate_one_bucket(ca, b);
+ bch_invalidate_one_bucket(ca, b);
}
}
@@ -249,8 +224,8 @@ static void invalidate_buckets_fifo(struct cache *ca)
b = ca->buckets + ca->fifo_last_bucket++;
- if (can_invalidate_bucket(ca, b))
- invalidate_one_bucket(ca, b);
+ if (bch_can_invalidate_bucket(ca, b))
+ bch_invalidate_one_bucket(ca, b);
if (++checked >= ca->sb.nbuckets) {
ca->invalidate_needs_gc = 1;
@@ -274,8 +249,8 @@ static void invalidate_buckets_random(struct cache *ca)
b = ca->buckets + n;
- if (can_invalidate_bucket(ca, b))
- invalidate_one_bucket(ca, b);
+ if (bch_can_invalidate_bucket(ca, b))
+ bch_invalidate_one_bucket(ca, b);
if (++checked >= ca->sb.nbuckets / 2) {
ca->invalidate_needs_gc = 1;
@@ -287,8 +262,7 @@ static void invalidate_buckets_random(struct cache *ca)
static void invalidate_buckets(struct cache *ca)
{
- if (ca->invalidate_needs_gc)
- return;
+ BUG_ON(ca->invalidate_needs_gc);
switch (CACHE_REPLACEMENT(&ca->sb)) {
case CACHE_REPLACEMENT_LRU:
@@ -301,8 +275,6 @@ static void invalidate_buckets(struct cache *ca)
invalidate_buckets_random(ca);
break;
}
-
- trace_bcache_alloc_invalidate(ca);
}
#define allocator_wait(ca, cond) \
@@ -350,17 +322,10 @@ static int bch_allocator_thread(void *arg)
* possibly issue discards to them, then we add the bucket to
* the free list:
*/
- while (1) {
+ while (!fifo_empty(&ca->free_inc)) {
long bucket;
- if ((!atomic_read(&ca->set->prio_blocked) ||
- !CACHE_SYNC(&ca->set->sb)) &&
- !fifo_empty(&ca->unused))
- fifo_pop(&ca->unused, bucket);
- else if (!fifo_empty(&ca->free_inc))
- fifo_pop(&ca->free_inc, bucket);
- else
- break;
+ fifo_pop(&ca->free_inc, bucket);
if (ca->discard) {
mutex_unlock(&ca->set->bucket_lock);
@@ -371,6 +336,7 @@ static int bch_allocator_thread(void *arg)
}
allocator_wait(ca, bch_allocator_push(ca, bucket));
+ wake_up(&ca->set->btree_cache_wait);
wake_up(&ca->set->bucket_wait);
}
@@ -380,9 +346,9 @@ static int bch_allocator_thread(void *arg)
* them to the free_inc list:
*/
+retry_invalidate:
allocator_wait(ca, ca->set->gc_mark_valid &&
- (ca->need_save_prio > 64 ||
- !ca->invalidate_needs_gc));
+ !ca->invalidate_needs_gc);
invalidate_buckets(ca);
/*
@@ -390,13 +356,28 @@ static int bch_allocator_thread(void *arg)
* new stuff to them:
*/
allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
- if (CACHE_SYNC(&ca->set->sb) &&
- (!fifo_empty(&ca->free_inc) ||
- ca->need_save_prio > 64))
+ if (CACHE_SYNC(&ca->set->sb)) {
+ /*
+ * This could deadlock if an allocation with a btree
+ * node locked ever blocked - having the btree node
+ * locked would block garbage collection, but here we're
+ * waiting on garbage collection before we invalidate
+ * and free anything.
+ *
+ * But this should be safe since the btree code always
+ * uses btree_check_reserve() before allocating now, and
+ * if it fails it blocks without btree nodes locked.
+ */
+ if (!fifo_full(&ca->free_inc))
+ goto retry_invalidate;
+
bch_prio_write(ca);
+ }
}
}
+/* Allocation */
+
long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
{
DEFINE_WAIT(w);
@@ -408,8 +389,10 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
fifo_pop(&ca->free[reserve], r))
goto out;
- if (!wait)
+ if (!wait) {
+ trace_bcache_alloc_fail(ca, reserve);
return -1;
+ }
do {
prepare_to_wait(&ca->set->bucket_wait, &w,
@@ -425,6 +408,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
out:
wake_up_process(ca->alloc_thread);
+ trace_bcache_alloc(ca, reserve);
+
if (expensive_debug_checks(ca->set)) {
size_t iter;
long i;
@@ -438,8 +423,6 @@ out:
BUG_ON(i == r);
fifo_for_each(i, &ca->free_inc, iter)
BUG_ON(i == r);
- fifo_for_each(i, &ca->unused, iter)
- BUG_ON(i == r);
}
b = ca->buckets + r;
@@ -461,17 +444,19 @@ out:
return r;
}
+void __bch_bucket_free(struct cache *ca, struct bucket *b)
+{
+ SET_GC_MARK(b, 0);
+ SET_GC_SECTORS_USED(b, 0);
+}
+
void bch_bucket_free(struct cache_set *c, struct bkey *k)
{
unsigned i;
- for (i = 0; i < KEY_PTRS(k); i++) {
- struct bucket *b = PTR_BUCKET(c, k, i);
-
- SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
- SET_GC_SECTORS_USED(b, 0);
- bch_bucket_add_unused(PTR_CACHE(c, k, i), b);
- }
+ for (i = 0; i < KEY_PTRS(k); i++)
+ __bch_bucket_free(PTR_CACHE(c, k, i),
+ PTR_BUCKET(c, k, i));
}
int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
@@ -709,25 +694,3 @@ int bch_cache_allocator_start(struct cache *ca)
ca->alloc_thread = k;
return 0;
}
-
-int bch_cache_allocator_init(struct cache *ca)
-{
- /*
- * Reserve:
- * Prio/gen writes first
- * Then 8 for btree allocations
- * Then half for the moving garbage collector
- */
-#if 0
- ca->watermark[WATERMARK_PRIO] = 0;
-
- ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
-
- ca->watermark[WATERMARK_MOVINGGC] = 8 +
- ca->watermark[WATERMARK_METADATA];
-
- ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
- ca->watermark[WATERMARK_MOVINGGC];
-#endif
- return 0;
-}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index a4c7306ff43d..82c9c5d35251 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -195,9 +195,7 @@ struct bucket {
atomic_t pin;
uint16_t prio;
uint8_t gen;
- uint8_t disk_gen;
uint8_t last_gc; /* Most out of date gen in the btree */
- uint8_t gc_gen;
uint16_t gc_mark; /* Bitfield used by GC. See below for field */
};
@@ -207,9 +205,9 @@ struct bucket {
*/
BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
-#define GC_MARK_RECLAIMABLE 0
-#define GC_MARK_DIRTY 1
-#define GC_MARK_METADATA 2
+#define GC_MARK_RECLAIMABLE 1
+#define GC_MARK_DIRTY 2
+#define GC_MARK_METADATA 3
#define GC_SECTORS_USED_SIZE 13
#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
@@ -426,14 +424,9 @@ struct cache {
* their new gen to disk. After prio_write() finishes writing the new
* gens/prios, they'll be moved to the free list (and possibly discarded
* in the process)
- *
- * unused: GC found nothing pointing into these buckets (possibly
- * because all the data they contained was overwritten), so we only
- * need to discard them before they can be moved to the free list.
*/
DECLARE_FIFO(long, free)[RESERVE_NR];
DECLARE_FIFO(long, free_inc);
- DECLARE_FIFO(long, unused);
size_t fifo_last_bucket;
@@ -443,12 +436,6 @@ struct cache {
DECLARE_HEAP(struct bucket *, heap);
/*
- * max(gen - disk_gen) for all buckets. When it gets too big we have to
- * call prio_write() to keep gens from wrapping.
- */
- uint8_t need_save_prio;
-
- /*
* If nonzero, we know we aren't going to find any buckets to invalidate
* until a gc finishes - otherwise we could pointlessly burn a ton of
* cpu
@@ -562,19 +549,16 @@ struct cache_set {
struct list_head btree_cache_freed;
/* Number of elements in btree_cache + btree_cache_freeable lists */
- unsigned bucket_cache_used;
+ unsigned btree_cache_used;
/*
* If we need to allocate memory for a new btree node and that
* allocation fails, we can cannibalize another node in the btree cache
- * to satisfy the allocation. However, only one thread can be doing this
- * at a time, for obvious reasons - try_harder and try_wait are
- * basically a lock for this that we can wait on asynchronously. The
- * btree_root() macro releases the lock when it returns.
+ * to satisfy the allocation - lock to guarantee only one thread does
+ * this at a time:
*/
- struct task_struct *try_harder;
- wait_queue_head_t try_wait;
- uint64_t try_harder_start;
+ wait_queue_head_t btree_cache_wait;
+ struct task_struct *btree_cache_alloc_lock;
/*
* When we free a btree node, we increment the gen of the bucket the
@@ -603,7 +587,7 @@ struct cache_set {
uint16_t min_prio;
/*
- * max(gen - gc_gen) for all buckets. When it gets too big we have to gc
+ * max(gen - last_gc) for all buckets. When it gets too big we have to gc
* to keep gens from wrapping around.
*/
uint8_t need_gc;
@@ -628,6 +612,8 @@ struct cache_set {
/* Number of moving GC bios in flight */
struct semaphore moving_in_flight;
+ struct workqueue_struct *moving_gc_wq;
+
struct btree *root;
#ifdef CONFIG_BCACHE_DEBUG
@@ -667,7 +653,6 @@ struct cache_set {
struct time_stats btree_gc_time;
struct time_stats btree_split_time;
struct time_stats btree_read_time;
- struct time_stats try_harder_time;
atomic_long_t cache_read_races;
atomic_long_t writeback_keys_done;
@@ -850,9 +835,6 @@ static inline bool cached_dev_get(struct cached_dev *dc)
/*
* bucket_gc_gen() returns the difference between the bucket's current gen and
* the oldest gen of any pointer into that bucket in the btree (last_gc).
- *
- * bucket_disk_gen() returns the difference between the current gen and the gen
- * on disk; they're both used to make sure gens don't wrap around.
*/
static inline uint8_t bucket_gc_gen(struct bucket *b)
@@ -860,13 +842,7 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
return b->gen - b->last_gc;
}
-static inline uint8_t bucket_disk_gen(struct bucket *b)
-{
- return b->gen - b->disk_gen;
-}
-
#define BUCKET_GC_GEN_MAX 96U
-#define BUCKET_DISK_GEN_MAX 64U
#define kobj_attribute_write(n, fn) \
static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
@@ -899,11 +875,14 @@ void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
uint8_t bch_inc_gen(struct cache *, struct bucket *);
void bch_rescale_priorities(struct cache_set *, int);
-bool bch_bucket_add_unused(struct cache *, struct bucket *);
-long bch_bucket_alloc(struct cache *, unsigned, bool);
+bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
+void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
+
+void __bch_bucket_free(struct cache *, struct bucket *);
void bch_bucket_free(struct cache_set *, struct bkey *);
+long bch_bucket_alloc(struct cache *, unsigned, bool);
int __bch_bucket_alloc_set(struct cache_set *, unsigned,
struct bkey *, int, bool);
int bch_bucket_alloc_set(struct cache_set *, unsigned,
@@ -954,13 +933,10 @@ int bch_open_buckets_alloc(struct cache_set *);
void bch_open_buckets_free(struct cache_set *);
int bch_cache_allocator_start(struct cache *ca);
-int bch_cache_allocator_init(struct cache *ca);
void bch_debug_exit(void);
int bch_debug_init(struct kobject *);
void bch_request_exit(void);
int bch_request_init(void);
-void bch_btree_exit(void);
-int bch_btree_init(void);
#endif /* _BCACHE_H */
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 3f74b4b0747b..545416415305 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -23,8 +23,8 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
for (k = i->start; k < bset_bkey_last(i); k = next) {
next = bkey_next(k);
- printk(KERN_ERR "block %u key %li/%u: ", set,
- (uint64_t *) k - i->d, i->keys);
+ printk(KERN_ERR "block %u key %u/%u: ", set,
+ (unsigned) ((u64 *) k - i->d), i->keys);
if (b->ops->key_dump)
b->ops->key_dump(b, k);
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 003260f4ddf6..5f6728d5d4dd 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -478,6 +478,12 @@ static inline void bch_keylist_init(struct keylist *l)
l->top_p = l->keys_p = l->inline_keys;
}
+static inline void bch_keylist_init_single(struct keylist *l, struct bkey *k)
+{
+ l->keys = k;
+ l->top = bkey_next(k);
+}
+
static inline void bch_keylist_push(struct keylist *l)
{
l->top = bkey_next(l->top);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5f9c2a665ca5..7347b6100961 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -68,15 +68,11 @@
* alloc_bucket() cannot fail. This should be true but is not completely
* obvious.
*
- * Make sure all allocations get charged to the root cgroup
- *
* Plugging?
*
* If data write is less than hard sector size of ssd, round up offset in open
* bucket to the next whole sector
*
- * Also lookup by cgroup in get_open_bucket()
- *
* Superblock needs to be fleshed out for multiple cache devices
*
* Add a sysfs tunable for the number of writeback IOs in flight
@@ -97,8 +93,6 @@
#define PTR_HASH(c, k) \
(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
-static struct workqueue_struct *btree_io_wq;
-
#define insert_lock(s, b) ((b)->level <= (s)->lock)
/*
@@ -123,7 +117,7 @@ static struct workqueue_struct *btree_io_wq;
({ \
int _r, l = (b)->level - 1; \
bool _w = l <= (op)->lock; \
- struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
+ struct btree *_child = bch_btree_node_get((b)->c, op, key, l, _w);\
if (!IS_ERR(_child)) { \
_child->parent = (b); \
_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
@@ -152,17 +146,12 @@ static struct workqueue_struct *btree_io_wq;
_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
} \
rw_unlock(_w, _b); \
+ bch_cannibalize_unlock(c); \
if (_r == -EINTR) \
schedule(); \
- bch_cannibalize_unlock(c); \
- if (_r == -ENOSPC) { \
- wait_event((c)->try_wait, \
- !(c)->try_harder); \
- _r = -EINTR; \
- } \
} while (_r == -EINTR); \
\
- finish_wait(&(c)->bucket_wait, &(op)->wait); \
+ finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
_r; \
})
@@ -171,6 +160,20 @@ static inline struct bset *write_block(struct btree *b)
return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
}
+static void bch_btree_init_next(struct btree *b)
+{
+ /* If not a leaf node, always sort */
+ if (b->level && b->keys.nsets)
+ bch_btree_sort(&b->keys, &b->c->sort);
+ else
+ bch_btree_sort_lazy(&b->keys, &b->c->sort);
+
+ if (b->written < btree_blocks(b))
+ bch_bset_init_next(&b->keys, write_block(b),
+ bset_magic(&b->c->sb));
+
+}
+
/* Btree key manipulation */
void bkey_put(struct cache_set *c, struct bkey *k)
@@ -352,8 +355,7 @@ static void __btree_node_write_done(struct closure *cl)
btree_complete_write(b, w);
if (btree_node_dirty(b))
- queue_delayed_work(btree_io_wq, &b->work,
- msecs_to_jiffies(30000));
+ schedule_delayed_work(&b->work, 30 * HZ);
closure_return_with_destructor(cl, btree_node_write_unlock);
}
@@ -442,10 +444,12 @@ static void do_btree_node_write(struct btree *b)
}
}
-void bch_btree_node_write(struct btree *b, struct closure *parent)
+void __bch_btree_node_write(struct btree *b, struct closure *parent)
{
struct bset *i = btree_bset_last(b);
+ lockdep_assert_held(&b->write_lock);
+
trace_bcache_btree_write(b);
BUG_ON(current->bio_list);
@@ -469,23 +473,24 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
b->written += set_blocks(i, block_bytes(b->c));
+}
- /* If not a leaf node, always sort */
- if (b->level && b->keys.nsets)
- bch_btree_sort(&b->keys, &b->c->sort);
- else
- bch_btree_sort_lazy(&b->keys, &b->c->sort);
+void bch_btree_node_write(struct btree *b, struct closure *parent)
+{
+ unsigned nsets = b->keys.nsets;
+
+ lockdep_assert_held(&b->lock);
+
+ __bch_btree_node_write(b, parent);
/*
* do verify if there was more than one set initially (i.e. we did a
* sort) and we sorted down to a single set:
*/
- if (i != b->keys.set->data && !b->keys.nsets)
+ if (nsets && !b->keys.nsets)
bch_btree_verify(b);
- if (b->written < btree_blocks(b))
- bch_bset_init_next(&b->keys, write_block(b),
- bset_magic(&b->c->sb));
+ bch_btree_init_next(b);
}
static void bch_btree_node_write_sync(struct btree *b)
@@ -493,7 +498,11 @@ static void bch_btree_node_write_sync(struct btree *b)
struct closure cl;
closure_init_stack(&cl);
+
+ mutex_lock(&b->write_lock);
bch_btree_node_write(b, &cl);
+ mutex_unlock(&b->write_lock);
+
closure_sync(&cl);
}
@@ -501,11 +510,10 @@ static void btree_node_write_work(struct work_struct *w)
{
struct btree *b = container_of(to_delayed_work(w), struct btree, work);
- rw_lock(true, b, b->level);
-
+ mutex_lock(&b->write_lock);
if (btree_node_dirty(b))
- bch_btree_node_write(b, NULL);
- rw_unlock(true, b);
+ __bch_btree_node_write(b, NULL);
+ mutex_unlock(&b->write_lock);
}
static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
@@ -513,11 +521,13 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
struct bset *i = btree_bset_last(b);
struct btree_write *w = btree_current_write(b);
+ lockdep_assert_held(&b->write_lock);
+
BUG_ON(!b->written);
BUG_ON(!i->keys);
if (!btree_node_dirty(b))
- queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
+ schedule_delayed_work(&b->work, 30 * HZ);
set_btree_node_dirty(b);
@@ -548,7 +558,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
#define mca_reserve(c) (((c->root && c->root->level) \
? c->root->level : 1) * 8 + 16)
#define mca_can_free(c) \
- max_t(int, 0, c->bucket_cache_used - mca_reserve(c))
+ max_t(int, 0, c->btree_cache_used - mca_reserve(c))
static void mca_data_free(struct btree *b)
{
@@ -556,7 +566,7 @@ static void mca_data_free(struct btree *b)
bch_btree_keys_free(&b->keys);
- b->c->bucket_cache_used--;
+ b->c->btree_cache_used--;
list_move(&b->list, &b->c->btree_cache_freed);
}
@@ -581,7 +591,7 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
ilog2(b->c->btree_pages),
btree_order(k)),
gfp)) {
- b->c->bucket_cache_used++;
+ b->c->btree_cache_used++;
list_move(&b->list, &b->c->btree_cache);
} else {
list_move(&b->list, &b->c->btree_cache_freed);
@@ -597,6 +607,8 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
init_rwsem(&b->lock);
lockdep_set_novalidate_class(&b->lock);
+ mutex_init(&b->write_lock);
+ lockdep_set_novalidate_class(&b->write_lock);
INIT_LIST_HEAD(&b->list);
INIT_DELAYED_WORK(&b->work, btree_node_write_work);
b->c = c;
@@ -630,8 +642,12 @@ static int mca_reap(struct btree *b, unsigned min_order, bool flush)
up(&b->io_mutex);
}
+ mutex_lock(&b->write_lock);
if (btree_node_dirty(b))
- bch_btree_node_write_sync(b);
+ __bch_btree_node_write(b, &cl);
+ mutex_unlock(&b->write_lock);
+
+ closure_sync(&cl);
/* wait for any in flight btree write */
down(&b->io_mutex);
@@ -654,7 +670,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
if (c->shrinker_disabled)
return SHRINK_STOP;
- if (c->try_harder)
+ if (c->btree_cache_alloc_lock)
return SHRINK_STOP;
/* Return -1 if we can't do anything right now */
@@ -686,7 +702,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
}
}
- for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
+ for (i = 0; (nr--) && i < c->btree_cache_used; i++) {
if (list_empty(&c->btree_cache))
goto out;
@@ -715,7 +731,7 @@ static unsigned long bch_mca_count(struct shrinker *shrink,
if (c->shrinker_disabled)
return 0;
- if (c->try_harder)
+ if (c->btree_cache_alloc_lock)
return 0;
return mca_can_free(c) * c->btree_pages;
@@ -819,17 +835,30 @@ out:
return b;
}
-static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
+static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
+{
+ struct task_struct *old;
+
+ old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
+ if (old && old != current) {
+ if (op)
+ prepare_to_wait(&c->btree_cache_wait, &op->wait,
+ TASK_UNINTERRUPTIBLE);
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
+ struct bkey *k)
{
struct btree *b;
trace_bcache_btree_cache_cannibalize(c);
- if (!c->try_harder) {
- c->try_harder = current;
- c->try_harder_start = local_clock();
- } else if (c->try_harder != current)
- return ERR_PTR(-ENOSPC);
+ if (mca_cannibalize_lock(c, op))
+ return ERR_PTR(-EINTR);
list_for_each_entry_reverse(b, &c->btree_cache, list)
if (!mca_reap(b, btree_order(k), false))
@@ -839,6 +868,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
if (!mca_reap(b, btree_order(k), true))
return b;
+ WARN(1, "btree cache cannibalize failed\n");
return ERR_PTR(-ENOMEM);
}
@@ -850,14 +880,14 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
*/
static void bch_cannibalize_unlock(struct cache_set *c)
{
- if (c->try_harder == current) {
- bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
- c->try_harder = NULL;
- wake_up(&c->try_wait);
+ if (c->btree_cache_alloc_lock == current) {
+ c->btree_cache_alloc_lock = NULL;
+ wake_up(&c->btree_cache_wait);
}
}
-static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
+static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
+ struct bkey *k, int level)
{
struct btree *b;
@@ -920,7 +950,7 @@ err:
if (b)
rw_unlock(true, b);
- b = mca_cannibalize(c, k);
+ b = mca_cannibalize(c, op, k);
if (!IS_ERR(b))
goto out;
@@ -936,8 +966,8 @@ err:
* The btree node will have either a read or a write lock held, depending on
* level and op->lock.
*/
-struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
- int level, bool write)
+struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
+ struct bkey *k, int level, bool write)
{
int i = 0;
struct btree *b;
@@ -951,7 +981,7 @@ retry:
return ERR_PTR(-EAGAIN);
mutex_lock(&c->bucket_lock);
- b = mca_alloc(c, k, level);
+ b = mca_alloc(c, op, k, level);
mutex_unlock(&c->bucket_lock);
if (!b)
@@ -997,7 +1027,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
struct btree *b;
mutex_lock(&c->bucket_lock);
- b = mca_alloc(c, k, level);
+ b = mca_alloc(c, NULL, k, level);
mutex_unlock(&c->bucket_lock);
if (!IS_ERR_OR_NULL(b)) {
@@ -1010,46 +1040,41 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
static void btree_node_free(struct btree *b)
{
- unsigned i;
-
trace_bcache_btree_node_free(b);
BUG_ON(b == b->c->root);
+ mutex_lock(&b->write_lock);
+
if (btree_node_dirty(b))
btree_complete_write(b, btree_current_write(b));
clear_bit(BTREE_NODE_dirty, &b->flags);
+ mutex_unlock(&b->write_lock);
+
cancel_delayed_work(&b->work);
mutex_lock(&b->c->bucket_lock);
-
- for (i = 0; i < KEY_PTRS(&b->key); i++) {
- BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin));
-
- bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
- PTR_BUCKET(b->c, &b->key, i));
- }
-
bch_bucket_free(b->c, &b->key);
mca_bucket_free(b);
mutex_unlock(&b->c->bucket_lock);
}
-struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
+struct btree *bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+ int level)
{
BKEY_PADDED(key) k;
struct btree *b = ERR_PTR(-EAGAIN);
mutex_lock(&c->bucket_lock);
retry:
- if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
+ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, op != NULL))
goto err;
bkey_put(c, &k.key);
SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
- b = mca_alloc(c, &k.key, level);
+ b = mca_alloc(c, op, &k.key, level);
if (IS_ERR(b))
goto err_free;
@@ -1075,12 +1100,15 @@ err:
return b;
}
-static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
+static struct btree *btree_node_alloc_replacement(struct btree *b,
+ struct btree_op *op)
{
- struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
+ struct btree *n = bch_btree_node_alloc(b->c, op, b->level);
if (!IS_ERR_OR_NULL(n)) {
+ mutex_lock(&n->write_lock);
bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
bkey_copy_key(&n->key, &b->key);
+ mutex_unlock(&n->write_lock);
}
return n;
@@ -1090,43 +1118,47 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
{
unsigned i;
+ mutex_lock(&b->c->bucket_lock);
+
+ atomic_inc(&b->c->prio_blocked);
+
bkey_copy(k, &b->key);
bkey_copy_key(k, &ZERO_KEY);
- for (i = 0; i < KEY_PTRS(k); i++) {
- uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1;
-
- SET_PTR_GEN(k, i, g);
- }
+ for (i = 0; i < KEY_PTRS(k); i++)
+ SET_PTR_GEN(k, i,
+ bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
+ PTR_BUCKET(b->c, &b->key, i)));
- atomic_inc(&b->c->prio_blocked);
+ mutex_unlock(&b->c->bucket_lock);
}
static int btree_check_reserve(struct btree *b, struct btree_op *op)
{
struct cache_set *c = b->c;
struct cache *ca;
- unsigned i, reserve = c->root->level * 2 + 1;
- int ret = 0;
+ unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
mutex_lock(&c->bucket_lock);
for_each_cache(ca, c, i)
if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
if (op)
- prepare_to_wait(&c->bucket_wait, &op->wait,
+ prepare_to_wait(&c->btree_cache_wait, &op->wait,
TASK_UNINTERRUPTIBLE);
- ret = -EINTR;
- break;
+ mutex_unlock(&c->bucket_lock);
+ return -EINTR;
}
mutex_unlock(&c->bucket_lock);
- return ret;
+
+ return mca_cannibalize_lock(b->c, op);
}
/* Garbage collection */
-uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
+static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
+ struct bkey *k)
{
uint8_t stale = 0;
unsigned i;
@@ -1146,8 +1178,8 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
g = PTR_BUCKET(c, k, i);
- if (gen_after(g->gc_gen, PTR_GEN(k, i)))
- g->gc_gen = PTR_GEN(k, i);
+ if (gen_after(g->last_gc, PTR_GEN(k, i)))
+ g->last_gc = PTR_GEN(k, i);
if (ptr_stale(c, k, i)) {
stale = max(stale, ptr_stale(c, k, i));
@@ -1163,6 +1195,8 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
SET_GC_MARK(g, GC_MARK_METADATA);
else if (KEY_DIRTY(k))
SET_GC_MARK(g, GC_MARK_DIRTY);
+ else if (!GC_MARK(g))
+ SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
/* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned,
@@ -1177,6 +1211,26 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
+void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
+{
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i) &&
+ !ptr_stale(c, k, i)) {
+ struct bucket *b = PTR_BUCKET(c, k, i);
+
+ b->gen = PTR_GEN(k, i);
+
+ if (level && bkey_cmp(k, &ZERO_KEY))
+ b->prio = BTREE_PRIO;
+ else if (!level && b->prio == BTREE_PRIO)
+ b->prio = INITIAL_PRIO;
+ }
+
+ __bch_btree_mark_key(c, level, k);
+}
+
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{
uint8_t stale = 0;
@@ -1230,14 +1284,19 @@ static int bch_btree_insert_node(struct btree *, struct btree_op *,
struct keylist *, atomic_t *, struct bkey *);
static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
- struct keylist *keylist, struct gc_stat *gc,
- struct gc_merge_info *r)
+ struct gc_stat *gc, struct gc_merge_info *r)
{
unsigned i, nodes = 0, keys = 0, blocks;
struct btree *new_nodes[GC_MERGE_NODES];
+ struct keylist keylist;
struct closure cl;
struct bkey *k;
+ bch_keylist_init(&keylist);
+
+ if (btree_check_reserve(b, NULL))
+ return 0;
+
memset(new_nodes, 0, sizeof(new_nodes));
closure_init_stack(&cl);
@@ -1252,11 +1311,23 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
return 0;
for (i = 0; i < nodes; i++) {
- new_nodes[i] = btree_node_alloc_replacement(r[i].b, false);
+ new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
if (IS_ERR_OR_NULL(new_nodes[i]))
goto out_nocoalesce;
}
+ /*
+ * We have to check the reserve here, after we've allocated our new
+ * nodes, to make sure the insert below will succeed - we also check
+ * before as an optimization to potentially avoid a bunch of expensive
+ * allocs/sorts
+ */
+ if (btree_check_reserve(b, NULL))
+ goto out_nocoalesce;
+
+ for (i = 0; i < nodes; i++)
+ mutex_lock(&new_nodes[i]->write_lock);
+
for (i = nodes - 1; i > 0; --i) {
struct bset *n1 = btree_bset_first(new_nodes[i]);
struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
@@ -1315,28 +1386,34 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
n2->keys -= keys;
- if (__bch_keylist_realloc(keylist,
+ if (__bch_keylist_realloc(&keylist,
bkey_u64s(&new_nodes[i]->key)))
goto out_nocoalesce;
bch_btree_node_write(new_nodes[i], &cl);
- bch_keylist_add(keylist, &new_nodes[i]->key);
+ bch_keylist_add(&keylist, &new_nodes[i]->key);
}
- for (i = 0; i < nodes; i++) {
- if (__bch_keylist_realloc(keylist, bkey_u64s(&r[i].b->key)))
- goto out_nocoalesce;
+ for (i = 0; i < nodes; i++)
+ mutex_unlock(&new_nodes[i]->write_lock);
- make_btree_freeing_key(r[i].b, keylist->top);
- bch_keylist_push(keylist);
- }
+ closure_sync(&cl);
/* We emptied out this node */
BUG_ON(btree_bset_first(new_nodes[0])->keys);
btree_node_free(new_nodes[0]);
rw_unlock(true, new_nodes[0]);
- closure_sync(&cl);
+ for (i = 0; i < nodes; i++) {
+ if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
+ goto out_nocoalesce;
+
+ make_btree_freeing_key(r[i].b, keylist.top);
+ bch_keylist_push(&keylist);
+ }
+
+ bch_btree_insert_node(b, op, &keylist, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&keylist));
for (i = 0; i < nodes; i++) {
btree_node_free(r[i].b);
@@ -1345,22 +1422,22 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
r[i].b = new_nodes[i];
}
- bch_btree_insert_node(b, op, keylist, NULL, NULL);
- BUG_ON(!bch_keylist_empty(keylist));
-
memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
r[nodes - 1].b = ERR_PTR(-EINTR);
trace_bcache_btree_gc_coalesce(nodes);
gc->nodes--;
+ bch_keylist_free(&keylist);
+
/* Invalidated our iterator */
return -EINTR;
out_nocoalesce:
closure_sync(&cl);
+ bch_keylist_free(&keylist);
- while ((k = bch_keylist_pop(keylist)))
+ while ((k = bch_keylist_pop(&keylist)))
if (!bkey_cmp(k, &ZERO_KEY))
atomic_dec(&b->c->prio_blocked);
@@ -1372,6 +1449,42 @@ out_nocoalesce:
return 0;
}
+static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
+ struct btree *replace)
+{
+ struct keylist keys;
+ struct btree *n;
+
+ if (btree_check_reserve(b, NULL))
+ return 0;
+
+ n = btree_node_alloc_replacement(replace, NULL);
+
+ /* recheck reserve after allocating replacement node */
+ if (btree_check_reserve(b, NULL)) {
+ btree_node_free(n);
+ rw_unlock(true, n);
+ return 0;
+ }
+
+ bch_btree_node_write_sync(n);
+
+ bch_keylist_init(&keys);
+ bch_keylist_add(&keys, &n->key);
+
+ make_btree_freeing_key(replace, keys.top);
+ bch_keylist_push(&keys);
+
+ bch_btree_insert_node(b, op, &keys, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&keys));
+
+ btree_node_free(replace);
+ rw_unlock(true, n);
+
+ /* Invalidated our iterator */
+ return -EINTR;
+}
+
static unsigned btree_gc_count_keys(struct btree *b)
{
struct bkey *k;
@@ -1387,26 +1500,23 @@ static unsigned btree_gc_count_keys(struct btree *b)
static int btree_gc_recurse(struct btree *b, struct btree_op *op,
struct closure *writes, struct gc_stat *gc)
{
- unsigned i;
int ret = 0;
bool should_rewrite;
- struct btree *n;
struct bkey *k;
- struct keylist keys;
struct btree_iter iter;
struct gc_merge_info r[GC_MERGE_NODES];
- struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
+ struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
- bch_keylist_init(&keys);
bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
- for (i = 0; i < GC_MERGE_NODES; i++)
- r[i].b = ERR_PTR(-EINTR);
+ for (i = r; i < r + ARRAY_SIZE(r); i++)
+ i->b = ERR_PTR(-EINTR);
while (1) {
k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
if (k) {
- r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
+ r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
+ true);
if (IS_ERR(r->b)) {
ret = PTR_ERR(r->b);
break;
@@ -1414,7 +1524,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
r->keys = btree_gc_count_keys(r->b);
- ret = btree_gc_coalesce(b, op, &keys, gc, r);
+ ret = btree_gc_coalesce(b, op, gc, r);
if (ret)
break;
}
@@ -1424,32 +1534,10 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
if (!IS_ERR(last->b)) {
should_rewrite = btree_gc_mark_node(last->b, gc);
- if (should_rewrite &&
- !btree_check_reserve(b, NULL)) {
- n = btree_node_alloc_replacement(last->b,
- false);
-
- if (!IS_ERR_OR_NULL(n)) {
- bch_btree_node_write_sync(n);
- bch_keylist_add(&keys, &n->key);
-
- make_btree_freeing_key(last->b,
- keys.top);
- bch_keylist_push(&keys);
-
- btree_node_free(last->b);
-
- bch_btree_insert_node(b, op, &keys,
- NULL, NULL);
- BUG_ON(!bch_keylist_empty(&keys));
-
- rw_unlock(true, last->b);
- last->b = n;
-
- /* Invalidated our iterator */
- ret = -EINTR;
+ if (should_rewrite) {
+ ret = btree_gc_rewrite_node(b, op, last->b);
+ if (ret)
break;
- }
}
if (last->b->level) {
@@ -1464,8 +1552,10 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
* Must flush leaf nodes before gc ends, since replace
* operations aren't journalled
*/
+ mutex_lock(&last->b->write_lock);
if (btree_node_dirty(last->b))
bch_btree_node_write(last->b, writes);
+ mutex_unlock(&last->b->write_lock);
rw_unlock(true, last->b);
}
@@ -1478,15 +1568,15 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
}
}
- for (i = 0; i < GC_MERGE_NODES; i++)
- if (!IS_ERR_OR_NULL(r[i].b)) {
- if (btree_node_dirty(r[i].b))
- bch_btree_node_write(r[i].b, writes);
- rw_unlock(true, r[i].b);
+ for (i = r; i < r + ARRAY_SIZE(r); i++)
+ if (!IS_ERR_OR_NULL(i->b)) {
+ mutex_lock(&i->b->write_lock);
+ if (btree_node_dirty(i->b))
+ bch_btree_node_write(i->b, writes);
+ mutex_unlock(&i->b->write_lock);
+ rw_unlock(true, i->b);
}
- bch_keylist_free(&keys);
-
return ret;
}
@@ -1499,10 +1589,11 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
should_rewrite = btree_gc_mark_node(b, gc);
if (should_rewrite) {
- n = btree_node_alloc_replacement(b, false);
+ n = btree_node_alloc_replacement(b, NULL);
if (!IS_ERR_OR_NULL(n)) {
bch_btree_node_write_sync(n);
+
bch_btree_set_root(n);
btree_node_free(b);
rw_unlock(true, n);
@@ -1511,6 +1602,8 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
}
}
+ __bch_btree_mark_key(b->c, b->level + 1, &b->key);
+
if (b->level) {
ret = btree_gc_recurse(b, op, writes, gc);
if (ret)
@@ -1538,9 +1631,9 @@ static void btree_gc_start(struct cache_set *c)
for_each_cache(ca, c, i)
for_each_bucket(b, ca) {
- b->gc_gen = b->gen;
+ b->last_gc = b->gen;
if (!atomic_read(&b->pin)) {
- SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ SET_GC_MARK(b, 0);
SET_GC_SECTORS_USED(b, 0);
}
}
@@ -1548,7 +1641,7 @@ static void btree_gc_start(struct cache_set *c)
mutex_unlock(&c->bucket_lock);
}
-size_t bch_btree_gc_finish(struct cache_set *c)
+static size_t bch_btree_gc_finish(struct cache_set *c)
{
size_t available = 0;
struct bucket *b;
@@ -1561,11 +1654,6 @@ size_t bch_btree_gc_finish(struct cache_set *c)
c->gc_mark_valid = 1;
c->need_gc = 0;
- if (c->root)
- for (i = 0; i < KEY_PTRS(&c->root->key); i++)
- SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i),
- GC_MARK_METADATA);
-
for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
GC_MARK_METADATA);
@@ -1605,15 +1693,15 @@ size_t bch_btree_gc_finish(struct cache_set *c)
SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
for_each_bucket(b, ca) {
- b->last_gc = b->gc_gen;
c->need_gc = max(c->need_gc, bucket_gc_gen(b));
- if (!atomic_read(&b->pin) &&
- GC_MARK(b) == GC_MARK_RECLAIMABLE) {
+ if (atomic_read(&b->pin))
+ continue;
+
+ BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
+
+ if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
available++;
- if (!GC_SECTORS_USED(b))
- bch_bucket_add_unused(ca, b);
- }
}
}
@@ -1705,36 +1793,16 @@ int bch_gc_thread_start(struct cache_set *c)
/* Initial partial gc */
-static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
- unsigned long **seen)
+static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
{
int ret = 0;
- unsigned i;
struct bkey *k, *p = NULL;
- struct bucket *g;
struct btree_iter iter;
- for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
- for (i = 0; i < KEY_PTRS(k); i++) {
- if (!ptr_available(b->c, k, i))
- continue;
-
- g = PTR_BUCKET(b->c, k, i);
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
+ bch_initial_mark_key(b->c, b->level, k);
- if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i),
- seen[PTR_DEV(k, i)]) ||
- !ptr_stale(b->c, k, i)) {
- g->gen = PTR_GEN(k, i);
-
- if (b->level)
- g->prio = BTREE_PRIO;
- else if (g->prio == BTREE_PRIO)
- g->prio = INITIAL_PRIO;
- }
- }
-
- btree_mark_key(b, k);
- }
+ bch_initial_mark_key(b->c, b->level + 1, &b->key);
if (b->level) {
bch_btree_iter_init(&b->keys, &iter, NULL);
@@ -1746,40 +1814,58 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
btree_node_prefetch(b->c, k, b->level - 1);
if (p)
- ret = btree(check_recurse, p, b, op, seen);
+ ret = btree(check_recurse, p, b, op);
p = k;
} while (p && !ret);
}
- return 0;
+ return ret;
}
int bch_btree_check(struct cache_set *c)
{
- int ret = -ENOMEM;
- unsigned i;
- unsigned long *seen[MAX_CACHES_PER_SET];
struct btree_op op;
- memset(seen, 0, sizeof(seen));
bch_btree_op_init(&op, SHRT_MAX);
- for (i = 0; c->cache[i]; i++) {
- size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
- seen[i] = kmalloc(n, GFP_KERNEL);
- if (!seen[i])
- goto err;
+ return btree_root(check_recurse, c, &op);
+}
+
+void bch_initial_gc_finish(struct cache_set *c)
+{
+ struct cache *ca;
+ struct bucket *b;
+ unsigned i;
+
+ bch_btree_gc_finish(c);
- /* Disables the seen array until prio_read() uses it too */
- memset(seen[i], 0xFF, n);
+ mutex_lock(&c->bucket_lock);
+
+ /*
+ * We need to put some unused buckets directly on the prio freelist in
+ * order to get the allocator thread started - it needs freed buckets in
+ * order to rewrite the prios and gens, and it needs to rewrite prios
+ * and gens in order to free buckets.
+ *
+ * This is only safe for buckets that have no live data in them, which
+ * there should always be some of.
+ */
+ for_each_cache(ca, c, i) {
+ for_each_bucket(b, ca) {
+ if (fifo_full(&ca->free[RESERVE_PRIO]))
+ break;
+
+ if (bch_can_invalidate_bucket(ca, b) &&
+ !GC_MARK(b)) {
+ __bch_invalidate_one_bucket(ca, b);
+ fifo_push(&ca->free[RESERVE_PRIO],
+ b - ca->buckets);
+ }
+ }
}
- ret = btree_root(check_recurse, c, &op, seen);
-err:
- for (i = 0; i < MAX_CACHES_PER_SET; i++)
- kfree(seen[i]);
- return ret;
+ mutex_unlock(&c->bucket_lock);
}
/* Btree insertion */
@@ -1871,11 +1957,14 @@ static int btree_split(struct btree *b, struct btree_op *op,
closure_init_stack(&cl);
bch_keylist_init(&parent_keys);
- if (!b->level &&
- btree_check_reserve(b, op))
- return -EINTR;
+ if (btree_check_reserve(b, op)) {
+ if (!b->level)
+ return -EINTR;
+ else
+ WARN(1, "insufficient reserve for split\n");
+ }
- n1 = btree_node_alloc_replacement(b, true);
+ n1 = btree_node_alloc_replacement(b, op);
if (IS_ERR(n1))
goto err;
@@ -1887,16 +1976,19 @@ static int btree_split(struct btree *b, struct btree_op *op,
trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
- n2 = bch_btree_node_alloc(b->c, b->level, true);
+ n2 = bch_btree_node_alloc(b->c, op, b->level);
if (IS_ERR(n2))
goto err_free1;
if (!b->parent) {
- n3 = bch_btree_node_alloc(b->c, b->level + 1, true);
+ n3 = bch_btree_node_alloc(b->c, op, b->level + 1);
if (IS_ERR(n3))
goto err_free2;
}
+ mutex_lock(&n1->write_lock);
+ mutex_lock(&n2->write_lock);
+
bch_btree_insert_keys(n1, op, insert_keys, replace_key);
/*
@@ -1923,45 +2015,45 @@ static int btree_split(struct btree *b, struct btree_op *op,
bch_keylist_add(&parent_keys, &n2->key);
bch_btree_node_write(n2, &cl);
+ mutex_unlock(&n2->write_lock);
rw_unlock(true, n2);
} else {
trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
+ mutex_lock(&n1->write_lock);
bch_btree_insert_keys(n1, op, insert_keys, replace_key);
}
bch_keylist_add(&parent_keys, &n1->key);
bch_btree_node_write(n1, &cl);
+ mutex_unlock(&n1->write_lock);
if (n3) {
/* Depth increases, make a new root */
+ mutex_lock(&n3->write_lock);
bkey_copy_key(&n3->key, &MAX_KEY);
bch_btree_insert_keys(n3, op, &parent_keys, NULL);
bch_btree_node_write(n3, &cl);
+ mutex_unlock(&n3->write_lock);
closure_sync(&cl);
bch_btree_set_root(n3);
rw_unlock(true, n3);
-
- btree_node_free(b);
} else if (!b->parent) {
/* Root filled up but didn't need to be split */
closure_sync(&cl);
bch_btree_set_root(n1);
-
- btree_node_free(b);
} else {
/* Split a non root node */
closure_sync(&cl);
make_btree_freeing_key(b, parent_keys.top);
bch_keylist_push(&parent_keys);
- btree_node_free(b);
-
bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
BUG_ON(!bch_keylist_empty(&parent_keys));
}
+ btree_node_free(b);
rw_unlock(true, n1);
bch_time_stats_update(&b->c->btree_split_time, start_time);
@@ -1976,7 +2068,7 @@ err_free1:
btree_node_free(n1);
rw_unlock(true, n1);
err:
- WARN(1, "bcache: btree split failed");
+ WARN(1, "bcache: btree split failed (level %u)", b->level);
if (n3 == ERR_PTR(-EAGAIN) ||
n2 == ERR_PTR(-EAGAIN) ||
@@ -1991,33 +2083,54 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
atomic_t *journal_ref,
struct bkey *replace_key)
{
+ struct closure cl;
+
BUG_ON(b->level && replace_key);
+ closure_init_stack(&cl);
+
+ mutex_lock(&b->write_lock);
+
+ if (write_block(b) != btree_bset_last(b) &&
+ b->keys.last_set_unwritten)
+ bch_btree_init_next(b); /* just wrote a set */
+
if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
- if (current->bio_list) {
- op->lock = b->c->root->level + 1;
- return -EAGAIN;
- } else if (op->lock <= b->c->root->level) {
- op->lock = b->c->root->level + 1;
- return -EINTR;
- } else {
- /* Invalidated all iterators */
- int ret = btree_split(b, op, insert_keys, replace_key);
+ mutex_unlock(&b->write_lock);
+ goto split;
+ }
- return bch_keylist_empty(insert_keys) ?
- 0 : ret ?: -EINTR;
- }
- } else {
- BUG_ON(write_block(b) != btree_bset_last(b));
+ BUG_ON(write_block(b) != btree_bset_last(b));
- if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
- if (!b->level)
- bch_btree_leaf_dirty(b, journal_ref);
- else
- bch_btree_node_write_sync(b);
- }
+ if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
+ if (!b->level)
+ bch_btree_leaf_dirty(b, journal_ref);
+ else
+ bch_btree_node_write(b, &cl);
+ }
- return 0;
+ mutex_unlock(&b->write_lock);
+
+ /* wait for btree node write if necessary, after unlock */
+ closure_sync(&cl);
+
+ return 0;
+split:
+ if (current->bio_list) {
+ op->lock = b->c->root->level + 1;
+ return -EAGAIN;
+ } else if (op->lock <= b->c->root->level) {
+ op->lock = b->c->root->level + 1;
+ return -EINTR;
+ } else {
+ /* Invalidated all iterators */
+ int ret = btree_split(b, op, insert_keys, replace_key);
+
+ if (bch_keylist_empty(insert_keys))
+ return 0;
+ else if (!ret)
+ return -EINTR;
+ return ret;
}
}
@@ -2403,18 +2516,3 @@ void bch_keybuf_init(struct keybuf *buf)
spin_lock_init(&buf->lock);
array_allocator_init(&buf->freelist);
}
-
-void bch_btree_exit(void)
-{
- if (btree_io_wq)
- destroy_workqueue(btree_io_wq);
-}
-
-int __init bch_btree_init(void)
-{
- btree_io_wq = create_singlethread_workqueue("bch_btree_io");
- if (!btree_io_wq)
- return -ENOMEM;
-
- return 0;
-}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index af065e97e55c..91dfa5e69685 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -127,6 +127,8 @@ struct btree {
struct cache_set *c;
struct btree *parent;
+ struct mutex write_lock;
+
unsigned long flags;
uint16_t written; /* would be nice to kill */
uint8_t level;
@@ -236,11 +238,13 @@ static inline void rw_unlock(bool w, struct btree *b)
}
void bch_btree_node_read_done(struct btree *);
+void __bch_btree_node_write(struct btree *, struct closure *);
void bch_btree_node_write(struct btree *, struct closure *);
void bch_btree_set_root(struct btree *);
-struct btree *bch_btree_node_alloc(struct cache_set *, int, bool);
-struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
+struct btree *bch_btree_node_alloc(struct cache_set *, struct btree_op *, int);
+struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *,
+ struct bkey *, int, bool);
int bch_btree_insert_check_key(struct btree *, struct btree_op *,
struct bkey *);
@@ -248,10 +252,10 @@ int bch_btree_insert(struct cache_set *, struct keylist *,
atomic_t *, struct bkey *);
int bch_gc_thread_start(struct cache_set *);
-size_t bch_btree_gc_finish(struct cache_set *);
+void bch_initial_gc_finish(struct cache_set *);
void bch_moving_gc(struct cache_set *);
int bch_btree_check(struct cache_set *);
-uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
+void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
static inline void wake_up_gc(struct cache_set *c)
{
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 416d1a3e028e..3a0de4cf9771 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -194,9 +194,9 @@ err:
mutex_unlock(&b->c->bucket_lock);
bch_extent_to_text(buf, sizeof(buf), k);
btree_bug(b,
-"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu",
buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
- g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+ g->prio, g->gen, g->last_gc, GC_MARK(g));
return true;
}
@@ -308,6 +308,16 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
return NULL;
}
+static void bch_subtract_dirty(struct bkey *k,
+ struct cache_set *c,
+ uint64_t offset,
+ int sectors)
+{
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
+ offset, -sectors);
+}
+
static bool bch_extent_insert_fixup(struct btree_keys *b,
struct bkey *insert,
struct btree_iter *iter,
@@ -315,13 +325,6 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
{
struct cache_set *c = container_of(b, struct btree, keys)->c;
- void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
- {
- if (KEY_DIRTY(k))
- bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
- offset, -sectors);
- }
-
uint64_t old_offset;
unsigned old_size, sectors_found = 0;
@@ -398,7 +401,8 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
struct bkey *top;
- subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
+ bch_subtract_dirty(k, c, KEY_START(insert),
+ KEY_SIZE(insert));
if (bkey_written(b, k)) {
/*
@@ -448,7 +452,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
}
}
- subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
+ bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k));
}
check_failed:
@@ -499,9 +503,9 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
if (mutex_trylock(&b->c->bucket_lock)) {
if (b->c->gc_mark_valid &&
- ((GC_MARK(g) != GC_MARK_DIRTY &&
- KEY_DIRTY(k)) ||
- GC_MARK(g) == GC_MARK_METADATA))
+ (!GC_MARK(g) ||
+ GC_MARK(g) == GC_MARK_METADATA ||
+ (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k))))
goto err;
if (g->prio == BTREE_PRIO)
@@ -515,9 +519,9 @@ err:
mutex_unlock(&b->c->bucket_lock);
bch_extent_to_text(buf, sizeof(buf), k);
btree_bug(b,
-"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu",
buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
- g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+ g->prio, g->gen, g->last_gc, GC_MARK(g));
return true;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 18039affc306..59e82021b5bb 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -237,8 +237,14 @@ bsearch:
for (i = 0; i < ca->sb.njournal_buckets; i++)
if (ja->seq[i] > seq) {
seq = ja->seq[i];
- ja->cur_idx = ja->discard_idx =
- ja->last_idx = i;
+ /*
+ * When journal_reclaim() goes to allocate for
+ * the first time, it'll use the bucket after
+ * ja->cur_idx
+ */
+ ja->cur_idx = i;
+ ja->last_idx = ja->discard_idx = (i + 1) %
+ ca->sb.njournal_buckets;
}
}
@@ -288,16 +294,11 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
k = bkey_next(k)) {
unsigned j;
- for (j = 0; j < KEY_PTRS(k); j++) {
- struct bucket *g = PTR_BUCKET(c, k, j);
- atomic_inc(&g->pin);
+ for (j = 0; j < KEY_PTRS(k); j++)
+ if (ptr_available(c, k, j))
+ atomic_inc(&PTR_BUCKET(c, k, j)->pin);
- if (g->prio == BTREE_PRIO &&
- !ptr_stale(c, k, j))
- g->prio = INITIAL_PRIO;
- }
-
- __bch_btree_mark_key(c, 0, k);
+ bch_initial_mark_key(c, 0, k);
}
}
}
@@ -312,8 +313,6 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
struct keylist keylist;
- bch_keylist_init(&keylist);
-
list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1);
@@ -326,8 +325,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
k = bkey_next(k)) {
trace_bcache_journal_replay_key(k);
- bkey_copy(keylist.top, k);
- bch_keylist_push(&keylist);
+ bch_keylist_init_single(&keylist, k);
ret = bch_btree_insert(s, &keylist, i->pin, NULL);
if (ret)
@@ -383,16 +381,15 @@ retry:
b = best;
if (b) {
- rw_lock(true, b, b->level);
-
+ mutex_lock(&b->write_lock);
if (!btree_current_write(b)->journal) {
- rw_unlock(true, b);
+ mutex_unlock(&b->write_lock);
/* We raced */
goto retry;
}
- bch_btree_node_write(b, NULL);
- rw_unlock(true, b);
+ __bch_btree_node_write(b, NULL);
+ mutex_unlock(&b->write_lock);
}
}
@@ -536,6 +533,7 @@ void bch_journal_next(struct journal *j)
atomic_set(&fifo_back(&j->pin), 1);
j->cur->data->seq = ++j->seq;
+ j->cur->dirty = false;
j->cur->need_write = false;
j->cur->data->keys = 0;
@@ -731,7 +729,10 @@ static void journal_write_work(struct work_struct *work)
struct cache_set,
journal.work);
spin_lock(&c->journal.lock);
- journal_try_write(c);
+ if (c->journal.cur->dirty)
+ journal_try_write(c);
+ else
+ spin_unlock(&c->journal.lock);
}
/*
@@ -761,7 +762,8 @@ atomic_t *bch_journal(struct cache_set *c,
if (parent) {
closure_wait(&w->wait, parent);
journal_try_write(c);
- } else if (!w->need_write) {
+ } else if (!w->dirty) {
+ w->dirty = true;
schedule_delayed_work(&c->journal.work,
msecs_to_jiffies(c->journal_delay_ms));
spin_unlock(&c->journal.lock);
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index 9180c4465075..e3c39457afbb 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -95,6 +95,7 @@ struct journal_write {
struct cache_set *c;
struct closure_waitlist wait;
+ bool dirty;
bool need_write;
};
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 9eb60d102de8..cd7490311e51 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -24,12 +24,10 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
moving_gc_keys);
unsigned i;
- for (i = 0; i < KEY_PTRS(k); i++) {
- struct bucket *g = PTR_BUCKET(c, k, i);
-
- if (GC_MOVE(g))
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i) &&
+ GC_MOVE(PTR_BUCKET(c, k, i)))
return true;
- }
return false;
}
@@ -115,7 +113,7 @@ static void write_moving(struct closure *cl)
closure_call(&op->cl, bch_data_insert, NULL, cl);
}
- continue_at(cl, write_moving_finish, system_wq);
+ continue_at(cl, write_moving_finish, op->wq);
}
static void read_moving_submit(struct closure *cl)
@@ -125,7 +123,7 @@ static void read_moving_submit(struct closure *cl)
bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
- continue_at(cl, write_moving, system_wq);
+ continue_at(cl, write_moving, io->op.wq);
}
static void read_moving(struct cache_set *c)
@@ -160,6 +158,7 @@ static void read_moving(struct cache_set *c)
io->w = w;
io->op.inode = KEY_INODE(&w->key);
io->op.c = c;
+ io->op.wq = c->moving_gc_wq;
moving_init(io);
bio = &io->bio.bio;
@@ -216,7 +215,10 @@ void bch_moving_gc(struct cache_set *c)
ca->heap.used = 0;
for_each_bucket(b, ca) {
- if (!GC_SECTORS_USED(b))
+ if (GC_MARK(b) == GC_MARK_METADATA ||
+ !GC_SECTORS_USED(b) ||
+ GC_SECTORS_USED(b) == ca->sb.bucket_size ||
+ atomic_read(&b->pin))
continue;
if (!heap_full(&ca->heap)) {
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 5d5d031cf381..15fff4f68a7c 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -12,11 +12,9 @@
#include "request.h"
#include "writeback.h"
-#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/random.h>
-#include "blk-cgroup.h"
#include <trace/events/bcache.h>
@@ -27,171 +25,13 @@ struct kmem_cache *bch_search_cache;
static void bch_data_insert_start(struct closure *);
-/* Cgroup interface */
-
-#ifdef CONFIG_CGROUP_BCACHE
-static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
-
-static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
-{
- struct cgroup_subsys_state *css;
- return cgroup &&
- (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
- ? container_of(css, struct bch_cgroup, css)
- : &bcache_default_cgroup;
-}
-
-struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
-{
- struct cgroup_subsys_state *css = bio->bi_css
- ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
- : task_subsys_state(current, bcache_subsys_id);
-
- return css
- ? container_of(css, struct bch_cgroup, css)
- : &bcache_default_cgroup;
-}
-
-static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- char __user *buf, size_t nbytes, loff_t *ppos)
-{
- char tmp[1024];
- int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
- cgroup_to_bcache(cgrp)->cache_mode + 1);
-
- if (len < 0)
- return len;
-
- return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
-}
-
-static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
- const char *buf)
-{
- int v = bch_read_string_list(buf, bch_cache_modes);
- if (v < 0)
- return v;
-
- cgroup_to_bcache(cgrp)->cache_mode = v - 1;
- return 0;
-}
-
-static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
-{
- return cgroup_to_bcache(cgrp)->verify;
-}
-
-static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
-{
- cgroup_to_bcache(cgrp)->verify = val;
- return 0;
-}
-
-static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
-{
- struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
- return atomic_read(&bcachecg->stats.cache_hits);
-}
-
-static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
-{
- struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
- return atomic_read(&bcachecg->stats.cache_misses);
-}
-
-static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
- struct cftype *cft)
-{
- struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
- return atomic_read(&bcachecg->stats.cache_bypass_hits);
-}
-
-static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
- struct cftype *cft)
-{
- struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
- return atomic_read(&bcachecg->stats.cache_bypass_misses);
-}
-
-static struct cftype bch_files[] = {
- {
- .name = "cache_mode",
- .read = cache_mode_read,
- .write_string = cache_mode_write,
- },
- {
- .name = "verify",
- .read_u64 = bch_verify_read,
- .write_u64 = bch_verify_write,
- },
- {
- .name = "cache_hits",
- .read_u64 = bch_cache_hits_read,
- },
- {
- .name = "cache_misses",
- .read_u64 = bch_cache_misses_read,
- },
- {
- .name = "cache_bypass_hits",
- .read_u64 = bch_cache_bypass_hits_read,
- },
- {
- .name = "cache_bypass_misses",
- .read_u64 = bch_cache_bypass_misses_read,
- },
- { } /* terminate */
-};
-
-static void init_bch_cgroup(struct bch_cgroup *cg)
-{
- cg->cache_mode = -1;
-}
-
-static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
-{
- struct bch_cgroup *cg;
-
- cg = kzalloc(sizeof(*cg), GFP_KERNEL);
- if (!cg)
- return ERR_PTR(-ENOMEM);
- init_bch_cgroup(cg);
- return &cg->css;
-}
-
-static void bcachecg_destroy(struct cgroup *cgroup)
-{
- struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
- kfree(cg);
-}
-
-struct cgroup_subsys bcache_subsys = {
- .create = bcachecg_create,
- .destroy = bcachecg_destroy,
- .subsys_id = bcache_subsys_id,
- .name = "bcache",
- .module = THIS_MODULE,
-};
-EXPORT_SYMBOL_GPL(bcache_subsys);
-#endif
-
static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
{
-#ifdef CONFIG_CGROUP_BCACHE
- int r = bch_bio_to_cgroup(bio)->cache_mode;
- if (r >= 0)
- return r;
-#endif
return BDEV_CACHE_MODE(&dc->sb);
}
static bool verify(struct cached_dev *dc, struct bio *bio)
{
-#ifdef CONFIG_CGROUP_BCACHE
- if (bch_bio_to_cgroup(bio)->verify)
- return true;
-#endif
return dc->verify;
}
@@ -248,7 +88,7 @@ static void bch_data_insert_keys(struct closure *cl)
atomic_dec_bug(journal_ref);
if (!op->insert_data_done)
- continue_at(cl, bch_data_insert_start, bcache_wq);
+ continue_at(cl, bch_data_insert_start, op->wq);
bch_keylist_free(&op->insert_keys);
closure_return(cl);
@@ -297,7 +137,7 @@ static void bch_data_invalidate(struct closure *cl)
op->insert_data_done = true;
bio_put(bio);
out:
- continue_at(cl, bch_data_insert_keys, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, op->wq);
}
static void bch_data_insert_error(struct closure *cl)
@@ -340,7 +180,7 @@ static void bch_data_insert_endio(struct bio *bio, int error)
if (op->writeback)
op->error = error;
else if (!op->replace)
- set_closure_fn(cl, bch_data_insert_error, bcache_wq);
+ set_closure_fn(cl, bch_data_insert_error, op->wq);
else
set_closure_fn(cl, NULL, NULL);
}
@@ -376,7 +216,7 @@ static void bch_data_insert_start(struct closure *cl)
if (bch_keylist_realloc(&op->insert_keys,
3 + (op->csum ? 1 : 0),
op->c))
- continue_at(cl, bch_data_insert_keys, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, op->wq);
k = op->insert_keys.top;
bkey_init(k);
@@ -413,7 +253,7 @@ static void bch_data_insert_start(struct closure *cl)
} while (n != bio);
op->insert_data_done = true;
- continue_at(cl, bch_data_insert_keys, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, op->wq);
err:
/* bch_alloc_sectors() blocks if s->writeback = true */
BUG_ON(op->writeback);
@@ -442,7 +282,7 @@ err:
bio_put(bio);
if (!bch_keylist_empty(&op->insert_keys))
- continue_at(cl, bch_data_insert_keys, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, op->wq);
else
closure_return(cl);
}
@@ -824,6 +664,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->iop.error = 0;
s->iop.flags = 0;
s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
+ s->iop.wq = bcache_wq;
return s;
}
@@ -1203,22 +1044,13 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
static int flash_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
{
- struct bio_vec bv;
- struct bvec_iter iter;
-
- /* Zero fill bio */
+ unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
- bio_for_each_segment(bv, bio, iter) {
- unsigned j = min(bv.bv_len >> 9, sectors);
-
- void *p = kmap(bv.bv_page);
- memset(p + bv.bv_offset, 0, j << 9);
- kunmap(bv.bv_page);
-
- sectors -= j;
- }
+ swap(bio->bi_iter.bi_size, bytes);
+ zero_fill_bio(bio);
+ swap(bio->bi_iter.bi_size, bytes);
- bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
+ bio_advance(bio, bytes);
if (!bio->bi_iter.bi_size)
return MAP_DONE;
@@ -1313,9 +1145,6 @@ void bch_flash_dev_request_init(struct bcache_device *d)
void bch_request_exit(void)
{
-#ifdef CONFIG_CGROUP_BCACHE
- cgroup_unload_subsys(&bcache_subsys);
-#endif
if (bch_search_cache)
kmem_cache_destroy(bch_search_cache);
}
@@ -1326,11 +1155,5 @@ int __init bch_request_init(void)
if (!bch_search_cache)
return -ENOMEM;
-#ifdef CONFIG_CGROUP_BCACHE
- cgroup_load_subsys(&bcache_subsys);
- init_bch_cgroup(&bcache_default_cgroup);
-
- cgroup_add_cftypes(&bcache_subsys, bch_files);
-#endif
return 0;
}
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 39f21dbedc38..1ff36875c2b3 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -1,12 +1,11 @@
#ifndef _BCACHE_REQUEST_H_
#define _BCACHE_REQUEST_H_
-#include <linux/cgroup.h>
-
struct data_insert_op {
struct closure cl;
struct cache_set *c;
struct bio *bio;
+ struct workqueue_struct *wq;
unsigned inode;
uint16_t write_point;
@@ -41,20 +40,4 @@ void bch_flash_dev_request_init(struct bcache_device *d);
extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
-struct bch_cgroup {
-#ifdef CONFIG_CGROUP_BCACHE
- struct cgroup_subsys_state css;
-#endif
- /*
- * We subtract one from the index into bch_cache_modes[], so that
- * default == -1; this makes it so the rest match up with d->cache_mode,
- * and we use d->cache_mode if cgrp->cache_mode < 0
- */
- short cache_mode;
- bool verify;
- struct cache_stat_collector stats;
-};
-
-struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
-
#endif /* _BCACHE_REQUEST_H_ */
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 84d0782f702e..0ca072c20d0d 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -201,9 +201,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
mark_cache_stats(&dc->accounting.collector, hit, bypass);
mark_cache_stats(&c->accounting.collector, hit, bypass);
-#ifdef CONFIG_CGROUP_BCACHE
- mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
-#endif
}
void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 24a3a1546caa..926ded8ccbf5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -541,9 +541,6 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
closure_sync(cl);
}
-#define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \
- fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused)
-
void bch_prio_write(struct cache *ca)
{
int i;
@@ -554,10 +551,6 @@ void bch_prio_write(struct cache *ca)
lockdep_assert_held(&ca->set->bucket_lock);
- for (b = ca->buckets;
- b < ca->buckets + ca->sb.nbuckets; b++)
- b->disk_gen = b->gen;
-
ca->disk_buckets->seq++;
atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
@@ -601,14 +594,17 @@ void bch_prio_write(struct cache *ca)
mutex_lock(&ca->set->bucket_lock);
- ca->need_save_prio = 0;
-
/*
* Don't want the old priorities to get garbage collected until after we
* finish writing the new ones, and they're journalled
*/
- for (i = 0; i < prio_buckets(ca); i++)
+ for (i = 0; i < prio_buckets(ca); i++) {
+ if (ca->prio_last_buckets[i])
+ __bch_bucket_free(ca,
+ &ca->buckets[ca->prio_last_buckets[i]]);
+
ca->prio_last_buckets[i] = ca->prio_buckets[i];
+ }
}
static void prio_read(struct cache *ca, uint64_t bucket)
@@ -639,7 +635,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
}
b->prio = le16_to_cpu(d->prio);
- b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen;
+ b->gen = b->last_gc = d->gen;
}
}
@@ -843,6 +839,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
q->limits.max_segment_size = UINT_MAX;
q->limits.max_segments = BIO_MAX_PAGES;
q->limits.max_discard_sectors = UINT_MAX;
+ q->limits.discard_granularity = 512;
q->limits.io_min = block_size;
q->limits.logical_block_size = block_size;
q->limits.physical_block_size = block_size;
@@ -1355,6 +1352,8 @@ static void cache_set_free(struct closure *cl)
bch_bset_sort_state_free(&c->sort);
free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
+ if (c->moving_gc_wq)
+ destroy_workqueue(c->moving_gc_wq);
if (c->bio_split)
bioset_free(c->bio_split);
if (c->fill_iter)
@@ -1395,14 +1394,21 @@ static void cache_set_flush(struct closure *cl)
list_add(&c->root->list, &c->btree_cache);
/* Should skip this if we're unregistering because of an error */
- list_for_each_entry(b, &c->btree_cache, list)
+ list_for_each_entry(b, &c->btree_cache, list) {
+ mutex_lock(&b->write_lock);
if (btree_node_dirty(b))
- bch_btree_node_write(b, NULL);
+ __bch_btree_node_write(b, NULL);
+ mutex_unlock(&b->write_lock);
+ }
for_each_cache(ca, c, i)
if (ca->alloc_thread)
kthread_stop(ca->alloc_thread);
+ cancel_delayed_work_sync(&c->journal.work);
+ /* flush last journal entry if needed */
+ c->journal.work.work.func(&c->journal.work.work);
+
closure_return(cl);
}
@@ -1485,14 +1491,13 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
sema_init(&c->sb_write_mutex, 1);
mutex_init(&c->bucket_lock);
- init_waitqueue_head(&c->try_wait);
+ init_waitqueue_head(&c->btree_cache_wait);
init_waitqueue_head(&c->bucket_wait);
sema_init(&c->uuid_write_mutex, 1);
spin_lock_init(&c->btree_gc_time.lock);
spin_lock_init(&c->btree_split_time.lock);
spin_lock_init(&c->btree_read_time.lock);
- spin_lock_init(&c->try_harder_time.lock);
bch_moving_init_cache_set(c);
@@ -1517,6 +1522,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
!(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
!(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
!(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
+ !(c->moving_gc_wq = create_workqueue("bcache_gc")) ||
bch_journal_alloc(c) ||
bch_btree_cache_alloc(c) ||
bch_open_buckets_alloc(c) ||
@@ -1580,7 +1586,7 @@ static void run_cache_set(struct cache_set *c)
goto err;
err = "error reading btree root";
- c->root = bch_btree_node_get(c, k, j->btree_level, true);
+ c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true);
if (IS_ERR_OR_NULL(c->root))
goto err;
@@ -1596,7 +1602,7 @@ static void run_cache_set(struct cache_set *c)
goto err;
bch_journal_mark(c, &journal);
- bch_btree_gc_finish(c);
+ bch_initial_gc_finish(c);
pr_debug("btree_check() done");
/*
@@ -1638,7 +1644,7 @@ static void run_cache_set(struct cache_set *c)
ca->sb.d[j] = ca->sb.first_bucket + j;
}
- bch_btree_gc_finish(c);
+ bch_initial_gc_finish(c);
err = "error starting allocator thread";
for_each_cache(ca, c, i)
@@ -1655,12 +1661,14 @@ static void run_cache_set(struct cache_set *c)
goto err;
err = "cannot allocate new btree root";
- c->root = bch_btree_node_alloc(c, 0, true);
+ c->root = bch_btree_node_alloc(c, NULL, 0);
if (IS_ERR_OR_NULL(c->root))
goto err;
+ mutex_lock(&c->root->write_lock);
bkey_copy_key(&c->root->key, &MAX_KEY);
bch_btree_node_write(c->root, &cl);
+ mutex_unlock(&c->root->write_lock);
bch_btree_set_root(c->root);
rw_unlock(true, c->root);
@@ -1782,7 +1790,6 @@ void bch_cache_release(struct kobject *kobj)
vfree(ca->buckets);
free_heap(&ca->heap);
- free_fifo(&ca->unused);
free_fifo(&ca->free_inc);
for (i = 0; i < RESERVE_NR; i++)
@@ -1819,7 +1826,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
- !init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
!init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
!(ca->buckets = vzalloc(sizeof(struct bucket) *
ca->sb.nbuckets)) ||
@@ -1834,13 +1840,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
for_each_bucket(b, ca)
atomic_set(&b->pin, 0);
- if (bch_cache_allocator_init(ca))
- goto err;
-
return 0;
-err:
- kobject_put(&ca->kobj);
- return -ENOMEM;
}
static void register_cache(struct cache_sb *sb, struct page *sb_page,
@@ -1869,7 +1869,10 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
goto err;
+ mutex_lock(&bch_register_lock);
err = register_cache_set(ca);
+ mutex_unlock(&bch_register_lock);
+
if (err)
goto err;
@@ -1931,8 +1934,6 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!try_module_get(THIS_MODULE))
return -EBUSY;
- mutex_lock(&bch_register_lock);
-
if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
!(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
goto err;
@@ -1965,7 +1966,9 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!dc)
goto err_close;
+ mutex_lock(&bch_register_lock);
register_bdev(sb, sb_page, bdev, dc);
+ mutex_unlock(&bch_register_lock);
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
@@ -1978,7 +1981,6 @@ out:
put_page(sb_page);
kfree(sb);
kfree(path);
- mutex_unlock(&bch_register_lock);
module_put(THIS_MODULE);
return ret;
@@ -2057,7 +2059,6 @@ static void bcache_exit(void)
{
bch_debug_exit();
bch_request_exit();
- bch_btree_exit();
if (bcache_kobj)
kobject_put(bcache_kobj);
if (bcache_wq)
@@ -2087,7 +2088,6 @@ static int __init bcache_init(void)
if (!(bcache_wq = create_workqueue("bcache")) ||
!(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
sysfs_create_files(bcache_kobj, files) ||
- bch_btree_init() ||
bch_request_init() ||
bch_debug_init(bcache_kobj))
goto err;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index d8458d477a12..b3ff57d61dde 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -54,7 +54,6 @@ sysfs_time_stats_attribute(btree_gc, sec, ms);
sysfs_time_stats_attribute(btree_split, sec, us);
sysfs_time_stats_attribute(btree_sort, ms, us);
sysfs_time_stats_attribute(btree_read, ms, us);
-sysfs_time_stats_attribute(try_harder, ms, us);
read_attribute(btree_nodes);
read_attribute(btree_used_percent);
@@ -406,7 +405,7 @@ struct bset_stats_op {
struct bset_stats stats;
};
-static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
+static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
{
struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
@@ -424,7 +423,7 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf)
memset(&op, 0, sizeof(op));
bch_btree_op_init(&op.op, -1);
- ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, btree_bset_stats);
+ ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
if (ret < 0)
return ret;
@@ -442,81 +441,81 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf)
op.stats.floats, op.stats.failed);
}
-SHOW(__bch_cache_set)
+static unsigned bch_root_usage(struct cache_set *c)
{
- unsigned root_usage(struct cache_set *c)
- {
- unsigned bytes = 0;
- struct bkey *k;
- struct btree *b;
- struct btree_iter iter;
+ unsigned bytes = 0;
+ struct bkey *k;
+ struct btree *b;
+ struct btree_iter iter;
- goto lock_root;
+ goto lock_root;
- do {
- rw_unlock(false, b);
+ do {
+ rw_unlock(false, b);
lock_root:
- b = c->root;
- rw_lock(false, b, b->level);
- } while (b != c->root);
+ b = c->root;
+ rw_lock(false, b, b->level);
+ } while (b != c->root);
- for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
- bytes += bkey_bytes(k);
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
+ bytes += bkey_bytes(k);
- rw_unlock(false, b);
+ rw_unlock(false, b);
- return (bytes * 100) / btree_bytes(c);
- }
+ return (bytes * 100) / btree_bytes(c);
+}
- size_t cache_size(struct cache_set *c)
- {
- size_t ret = 0;
- struct btree *b;
+static size_t bch_cache_size(struct cache_set *c)
+{
+ size_t ret = 0;
+ struct btree *b;
- mutex_lock(&c->bucket_lock);
- list_for_each_entry(b, &c->btree_cache, list)
- ret += 1 << (b->keys.page_order + PAGE_SHIFT);
+ mutex_lock(&c->bucket_lock);
+ list_for_each_entry(b, &c->btree_cache, list)
+ ret += 1 << (b->keys.page_order + PAGE_SHIFT);
- mutex_unlock(&c->bucket_lock);
- return ret;
- }
-
- unsigned cache_max_chain(struct cache_set *c)
- {
- unsigned ret = 0;
- struct hlist_head *h;
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
- mutex_lock(&c->bucket_lock);
+static unsigned bch_cache_max_chain(struct cache_set *c)
+{
+ unsigned ret = 0;
+ struct hlist_head *h;
- for (h = c->bucket_hash;
- h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
- h++) {
- unsigned i = 0;
- struct hlist_node *p;
+ mutex_lock(&c->bucket_lock);
- hlist_for_each(p, h)
- i++;
+ for (h = c->bucket_hash;
+ h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
+ h++) {
+ unsigned i = 0;
+ struct hlist_node *p;
- ret = max(ret, i);
- }
+ hlist_for_each(p, h)
+ i++;
- mutex_unlock(&c->bucket_lock);
- return ret;
+ ret = max(ret, i);
}
- unsigned btree_used(struct cache_set *c)
- {
- return div64_u64(c->gc_stats.key_bytes * 100,
- (c->gc_stats.nodes ?: 1) * btree_bytes(c));
- }
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
- unsigned average_key_size(struct cache_set *c)
- {
- return c->gc_stats.nkeys
- ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
- : 0;
- }
+static unsigned bch_btree_used(struct cache_set *c)
+{
+ return div64_u64(c->gc_stats.key_bytes * 100,
+ (c->gc_stats.nodes ?: 1) * btree_bytes(c));
+}
+static unsigned bch_average_key_size(struct cache_set *c)
+{
+ return c->gc_stats.nkeys
+ ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
+ : 0;
+}
+
+SHOW(__bch_cache_set)
+{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
sysfs_print(synchronous, CACHE_SYNC(&c->sb));
@@ -524,21 +523,20 @@ lock_root:
sysfs_hprint(bucket_size, bucket_bytes(c));
sysfs_hprint(block_size, block_bytes(c));
sysfs_print(tree_depth, c->root->level);
- sysfs_print(root_usage_percent, root_usage(c));
+ sysfs_print(root_usage_percent, bch_root_usage(c));
- sysfs_hprint(btree_cache_size, cache_size(c));
- sysfs_print(btree_cache_max_chain, cache_max_chain(c));
+ sysfs_hprint(btree_cache_size, bch_cache_size(c));
+ sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
- sysfs_print_time_stats(&c->try_harder_time, try_harder, ms, us);
- sysfs_print(btree_used_percent, btree_used(c));
+ sysfs_print(btree_used_percent, bch_btree_used(c));
sysfs_print(btree_nodes, c->gc_stats.nodes);
- sysfs_hprint(average_key_size, average_key_size(c));
+ sysfs_hprint(average_key_size, bch_average_key_size(c));
sysfs_print(cache_read_races,
atomic_long_read(&c->cache_read_races));
@@ -709,7 +707,6 @@ static struct attribute *bch_cache_set_internal_files[] = {
sysfs_time_stats_attribute_list(btree_split, sec, us)
sysfs_time_stats_attribute_list(btree_sort, ms, us)
sysfs_time_stats_attribute_list(btree_read, ms, us)
- sysfs_time_stats_attribute_list(try_harder, ms, us)
&sysfs_btree_nodes,
&sysfs_btree_used_percent,
@@ -761,7 +758,9 @@ SHOW(__bch_cache)
int cmp(const void *l, const void *r)
{ return *((uint16_t *) r) - *((uint16_t *) l); }
- size_t n = ca->sb.nbuckets, i, unused, btree;
+ struct bucket *b;
+ size_t n = ca->sb.nbuckets, i;
+ size_t unused = 0, available = 0, dirty = 0, meta = 0;
uint64_t sum = 0;
/* Compute 31 quantiles */
uint16_t q[31], *p, *cached;
@@ -772,6 +771,17 @@ SHOW(__bch_cache)
return -ENOMEM;
mutex_lock(&ca->set->bucket_lock);
+ for_each_bucket(b, ca) {
+ if (!GC_SECTORS_USED(b))
+ unused++;
+ if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
+ available++;
+ if (GC_MARK(b) == GC_MARK_DIRTY)
+ dirty++;
+ if (GC_MARK(b) == GC_MARK_METADATA)
+ meta++;
+ }
+
for (i = ca->sb.first_bucket; i < n; i++)
p[i] = ca->buckets[i].prio;
mutex_unlock(&ca->set->bucket_lock);
@@ -786,10 +796,7 @@ SHOW(__bch_cache)
while (cached < p + n &&
*cached == BTREE_PRIO)
- cached++;
-
- btree = cached - p;
- n -= btree;
+ cached++, n--;
for (i = 0; i < n; i++)
sum += INITIAL_PRIO - cached[i];
@@ -805,12 +812,16 @@ SHOW(__bch_cache)
ret = scnprintf(buf, PAGE_SIZE,
"Unused: %zu%%\n"
+ "Clean: %zu%%\n"
+ "Dirty: %zu%%\n"
"Metadata: %zu%%\n"
"Average: %llu\n"
"Sectors per Q: %zu\n"
"Quantiles: [",
unused * 100 / (size_t) ca->sb.nbuckets,
- btree * 100 / (size_t) ca->sb.nbuckets, sum,
+ available * 100 / (size_t) ca->sb.nbuckets,
+ dirty * 100 / (size_t) ca->sb.nbuckets,
+ meta * 100 / (size_t) ca->sb.nbuckets, sum,
n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
for (i = 0; i < ARRAY_SIZE(q); i++)
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
index adbc3df17a80..b7820b0d2621 100644
--- a/drivers/md/bcache/trace.c
+++ b/drivers/md/bcache/trace.c
@@ -45,7 +45,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_split);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_compact);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_set_root);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_invalidate);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_invalidate);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_fail);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 4195a01b1535..9a8e66ae04f5 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1988,7 +1988,6 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
if (mddev->bitmap_info.file) {
struct file *f = mddev->bitmap_info.file;
mddev->bitmap_info.file = NULL;
- restore_bitmap_write_access(f);
fput(f);
}
} else {
diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h
index bed4ad4e1b7c..aac0e2df06be 100644
--- a/drivers/md/dm-cache-block-types.h
+++ b/drivers/md/dm-cache-block-types.h
@@ -19,7 +19,6 @@
typedef dm_block_t __bitwise__ dm_oblock_t;
typedef uint32_t __bitwise__ dm_cblock_t;
-typedef dm_block_t __bitwise__ dm_dblock_t;
static inline dm_oblock_t to_oblock(dm_block_t b)
{
@@ -41,14 +40,4 @@ static inline uint32_t from_cblock(dm_cblock_t b)
return (__force uint32_t) b;
}
-static inline dm_dblock_t to_dblock(dm_block_t b)
-{
- return (__force dm_dblock_t) b;
-}
-
-static inline dm_block_t from_dblock(dm_dblock_t b)
-{
- return (__force dm_block_t) b;
-}
-
#endif /* DM_CACHE_BLOCK_TYPES_H */
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 9ef0752e8a08..4ead4ba60656 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -109,7 +109,7 @@ struct dm_cache_metadata {
dm_block_t discard_root;
sector_t discard_block_size;
- dm_dblock_t discard_nr_blocks;
+ dm_oblock_t discard_nr_blocks;
sector_t data_block_size;
dm_cblock_t cache_blocks;
@@ -120,6 +120,12 @@ struct dm_cache_metadata {
unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
size_t policy_hint_size;
struct dm_cache_statistics stats;
+
+ /*
+ * Reading the space map root can fail, so we read it into this
+ * buffer before the superblock is locked and updated.
+ */
+ __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
};
/*-------------------------------------------------------------------
@@ -260,11 +266,31 @@ static void __setup_mapping_info(struct dm_cache_metadata *cmd)
}
}
+static int __save_sm_root(struct dm_cache_metadata *cmd)
+{
+ int r;
+ size_t metadata_len;
+
+ r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
+ if (r < 0)
+ return r;
+
+ return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
+ metadata_len);
+}
+
+static void __copy_sm_root(struct dm_cache_metadata *cmd,
+ struct cache_disk_superblock *disk_super)
+{
+ memcpy(&disk_super->metadata_space_map_root,
+ &cmd->metadata_space_map_root,
+ sizeof(cmd->metadata_space_map_root));
+}
+
static int __write_initial_superblock(struct dm_cache_metadata *cmd)
{
int r;
struct dm_block *sblock;
- size_t metadata_len;
struct cache_disk_superblock *disk_super;
sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
@@ -272,12 +298,16 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
- r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
+ r = dm_tm_pre_commit(cmd->tm);
if (r < 0)
return r;
- r = dm_tm_pre_commit(cmd->tm);
- if (r < 0)
+ /*
+ * dm_sm_copy_root() can fail. So we need to do it before we start
+ * updating the superblock.
+ */
+ r = __save_sm_root(cmd);
+ if (r)
return r;
r = superblock_lock_zero(cmd, &sblock);
@@ -293,16 +323,13 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
disk_super->policy_hint_size = 0;
- r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
- metadata_len);
- if (r < 0)
- goto bad_locked;
+ __copy_sm_root(cmd, disk_super);
disk_super->mapping_root = cpu_to_le64(cmd->root);
disk_super->hint_root = cpu_to_le64(cmd->hint_root);
disk_super->discard_root = cpu_to_le64(cmd->discard_root);
disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
- disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
+ disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks));
disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
disk_super->cache_blocks = cpu_to_le32(0);
@@ -313,10 +340,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
disk_super->write_misses = cpu_to_le32(0);
return dm_tm_commit(cmd->tm, sblock);
-
-bad_locked:
- dm_bm_unlock(sblock);
- return r;
}
static int __format_metadata(struct dm_cache_metadata *cmd)
@@ -496,7 +519,7 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd,
cmd->hint_root = le64_to_cpu(disk_super->hint_root);
cmd->discard_root = le64_to_cpu(disk_super->discard_root);
cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
- cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
+ cmd->discard_nr_blocks = to_oblock(le64_to_cpu(disk_super->discard_nr_blocks));
cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
@@ -530,8 +553,9 @@ static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
disk_super = dm_block_data(sblock);
update_flags(disk_super, mutator);
read_superblock_fields(cmd, disk_super);
+ dm_bm_unlock(sblock);
- return dm_bm_flush_and_unlock(cmd->bm, sblock);
+ return dm_bm_flush(cmd->bm);
}
static int __begin_transaction(struct dm_cache_metadata *cmd)
@@ -559,7 +583,6 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
flags_mutator mutator)
{
int r;
- size_t metadata_len;
struct cache_disk_superblock *disk_super;
struct dm_block *sblock;
@@ -577,8 +600,8 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
if (r < 0)
return r;
- r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
- if (r < 0)
+ r = __save_sm_root(cmd);
+ if (r)
return r;
r = superblock_lock(cmd, &sblock);
@@ -594,7 +617,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
disk_super->hint_root = cpu_to_le64(cmd->hint_root);
disk_super->discard_root = cpu_to_le64(cmd->discard_root);
disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
- disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
+ disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks));
disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
@@ -605,13 +628,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
-
- r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
- metadata_len);
- if (r < 0) {
- dm_bm_unlock(sblock);
- return r;
- }
+ __copy_sm_root(cmd, disk_super);
return dm_tm_commit(cmd->tm, sblock);
}
@@ -771,15 +788,15 @@ out:
int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
sector_t discard_block_size,
- dm_dblock_t new_nr_entries)
+ dm_oblock_t new_nr_entries)
{
int r;
down_write(&cmd->root_lock);
r = dm_bitset_resize(&cmd->discard_info,
cmd->discard_root,
- from_dblock(cmd->discard_nr_blocks),
- from_dblock(new_nr_entries),
+ from_oblock(cmd->discard_nr_blocks),
+ from_oblock(new_nr_entries),
false, &cmd->discard_root);
if (!r) {
cmd->discard_block_size = discard_block_size;
@@ -792,28 +809,28 @@ int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
return r;
}
-static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
+static int __set_discard(struct dm_cache_metadata *cmd, dm_oblock_t b)
{
return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
- from_dblock(b), &cmd->discard_root);
+ from_oblock(b), &cmd->discard_root);
}
-static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
+static int __clear_discard(struct dm_cache_metadata *cmd, dm_oblock_t b)
{
return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
- from_dblock(b), &cmd->discard_root);
+ from_oblock(b), &cmd->discard_root);
}
-static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b,
+static int __is_discarded(struct dm_cache_metadata *cmd, dm_oblock_t b,
bool *is_discarded)
{
return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root,
- from_dblock(b), &cmd->discard_root,
+ from_oblock(b), &cmd->discard_root,
is_discarded);
}
static int __discard(struct dm_cache_metadata *cmd,
- dm_dblock_t dblock, bool discard)
+ dm_oblock_t dblock, bool discard)
{
int r;
@@ -826,7 +843,7 @@ static int __discard(struct dm_cache_metadata *cmd,
}
int dm_cache_set_discard(struct dm_cache_metadata *cmd,
- dm_dblock_t dblock, bool discard)
+ dm_oblock_t dblock, bool discard)
{
int r;
@@ -844,8 +861,8 @@ static int __load_discards(struct dm_cache_metadata *cmd,
dm_block_t b;
bool discard;
- for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
- dm_dblock_t dblock = to_dblock(b);
+ for (b = 0; b < from_oblock(cmd->discard_nr_blocks); b++) {
+ dm_oblock_t dblock = to_oblock(b);
if (cmd->clean_when_opened) {
r = __is_discarded(cmd, dblock, &discard);
@@ -1228,22 +1245,12 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
return 0;
}
-int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint)
{
+ struct dm_cache_metadata *cmd = context;
+ __le32 value = cpu_to_le32(hint);
int r;
- down_write(&cmd->root_lock);
- r = begin_hints(cmd, policy);
- up_write(&cmd->root_lock);
-
- return r;
-}
-
-static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
- uint32_t hint)
-{
- int r;
- __le32 value = cpu_to_le32(hint);
__dm_bless_for_disk(&value);
r = dm_array_set_value(&cmd->hint_info, cmd->hint_root,
@@ -1253,16 +1260,25 @@ static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
return r;
}
-int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
- uint32_t hint)
+static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
{
int r;
- if (!hints_array_initialized(cmd))
- return 0;
+ r = begin_hints(cmd, policy);
+ if (r) {
+ DMERR("begin_hints failed");
+ return r;
+ }
+
+ return policy_walk_mappings(policy, save_hint, cmd);
+}
+
+int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+{
+ int r;
down_write(&cmd->root_lock);
- r = save_hint(cmd, cblock, hint);
+ r = write_hints(cmd, policy);
up_write(&cmd->root_lock);
return r;
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index cd906f14f98d..cd70a78623a3 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -72,14 +72,14 @@ dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
sector_t discard_block_size,
- dm_dblock_t new_nr_entries);
+ dm_oblock_t new_nr_entries);
typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
- dm_dblock_t dblock, bool discarded);
+ dm_oblock_t dblock, bool discarded);
int dm_cache_load_discards(struct dm_cache_metadata *cmd,
load_discard_fn fn, void *context);
-int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
+int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_oblock_t dblock, bool discard);
int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
@@ -128,14 +128,7 @@ void dm_cache_dump(struct dm_cache_metadata *cmd);
* rather than querying the policy for each cblock, we let it walk its data
* structures and fill in the hints in whatever order it wishes.
*/
-
-int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
-
-/*
- * requests hints for every cblock and stores in the metadata device.
- */
-int dm_cache_save_hint(struct dm_cache_metadata *cmd,
- dm_cblock_t cblock, uint32_t hint);
+int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
/*
* Query method. Are all the blocks in the cache clean?
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 074b9c8e4cf0..1bf4a71919ec 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -237,9 +237,8 @@ struct cache {
/*
* origin_blocks entries, discarded if set.
*/
- dm_dblock_t discard_nr_blocks;
+ dm_oblock_t discard_nr_blocks;
unsigned long *discard_bitset;
- uint32_t discard_block_size; /* a power of 2 times sectors per block */
/*
* Rather than reconstructing the table line for the status we just
@@ -526,48 +525,33 @@ static dm_block_t block_div(dm_block_t b, uint32_t n)
return b;
}
-static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
-{
- uint32_t discard_blocks = cache->discard_block_size;
- dm_block_t b = from_oblock(oblock);
-
- if (!block_size_is_power_of_two(cache))
- discard_blocks = discard_blocks / cache->sectors_per_block;
- else
- discard_blocks >>= cache->sectors_per_block_shift;
-
- b = block_div(b, discard_blocks);
-
- return to_dblock(b);
-}
-
-static void set_discard(struct cache *cache, dm_dblock_t b)
+static void set_discard(struct cache *cache, dm_oblock_t b)
{
unsigned long flags;
atomic_inc(&cache->stats.discard_count);
spin_lock_irqsave(&cache->lock, flags);
- set_bit(from_dblock(b), cache->discard_bitset);
+ set_bit(from_oblock(b), cache->discard_bitset);
spin_unlock_irqrestore(&cache->lock, flags);
}
-static void clear_discard(struct cache *cache, dm_dblock_t b)
+static void clear_discard(struct cache *cache, dm_oblock_t b)
{
unsigned long flags;
spin_lock_irqsave(&cache->lock, flags);
- clear_bit(from_dblock(b), cache->discard_bitset);
+ clear_bit(from_oblock(b), cache->discard_bitset);
spin_unlock_irqrestore(&cache->lock, flags);
}
-static bool is_discarded(struct cache *cache, dm_dblock_t b)
+static bool is_discarded(struct cache *cache, dm_oblock_t b)
{
int r;
unsigned long flags;
spin_lock_irqsave(&cache->lock, flags);
- r = test_bit(from_dblock(b), cache->discard_bitset);
+ r = test_bit(from_oblock(b), cache->discard_bitset);
spin_unlock_irqrestore(&cache->lock, flags);
return r;
@@ -579,8 +563,7 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
unsigned long flags;
spin_lock_irqsave(&cache->lock, flags);
- r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
- cache->discard_bitset);
+ r = test_bit(from_oblock(b), cache->discard_bitset);
spin_unlock_irqrestore(&cache->lock, flags);
return r;
@@ -705,7 +688,7 @@ static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
check_if_tick_bio_needed(cache, bio);
remap_to_origin(cache, bio);
if (bio_data_dir(bio) == WRITE)
- clear_discard(cache, oblock_to_dblock(cache, oblock));
+ clear_discard(cache, oblock);
}
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
@@ -715,7 +698,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
remap_to_cache(cache, bio, cblock);
if (bio_data_dir(bio) == WRITE) {
set_dirty(cache, oblock, cblock);
- clear_discard(cache, oblock_to_dblock(cache, oblock));
+ clear_discard(cache, oblock);
}
}
@@ -1288,14 +1271,14 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
static void process_discard_bio(struct cache *cache, struct bio *bio)
{
dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
- cache->discard_block_size);
+ cache->sectors_per_block);
dm_block_t end_block = bio_end_sector(bio);
dm_block_t b;
- end_block = block_div(end_block, cache->discard_block_size);
+ end_block = block_div(end_block, cache->sectors_per_block);
for (b = start_block; b < end_block; b++)
- set_discard(cache, to_dblock(b));
+ set_discard(cache, to_oblock(b));
bio_endio(bio, 0);
}
@@ -2171,35 +2154,6 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
return 0;
}
-/*
- * We want the discard block size to be a power of two, at least the size
- * of the cache block size, and have no more than 2^14 discard blocks
- * across the origin.
- */
-#define MAX_DISCARD_BLOCKS (1 << 14)
-
-static bool too_many_discard_blocks(sector_t discard_block_size,
- sector_t origin_size)
-{
- (void) sector_div(origin_size, discard_block_size);
-
- return origin_size > MAX_DISCARD_BLOCKS;
-}
-
-static sector_t calculate_discard_block_size(sector_t cache_block_size,
- sector_t origin_size)
-{
- sector_t discard_block_size;
-
- discard_block_size = roundup_pow_of_two(cache_block_size);
-
- if (origin_size)
- while (too_many_discard_blocks(discard_block_size, origin_size))
- discard_block_size *= 2;
-
- return discard_block_size;
-}
-
#define DEFAULT_MIGRATION_THRESHOLD 2048
static int cache_create(struct cache_args *ca, struct cache **result)
@@ -2321,16 +2275,13 @@ static int cache_create(struct cache_args *ca, struct cache **result)
}
clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
- cache->discard_block_size =
- calculate_discard_block_size(cache->sectors_per_block,
- cache->origin_sectors);
- cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
- cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
+ cache->discard_nr_blocks = cache->origin_blocks;
+ cache->discard_bitset = alloc_bitset(from_oblock(cache->discard_nr_blocks));
if (!cache->discard_bitset) {
*error = "could not allocate discard bitset";
goto bad;
}
- clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
+ clear_bitset(cache->discard_bitset, from_oblock(cache->discard_nr_blocks));
cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(cache->copier)) {
@@ -2614,16 +2565,16 @@ static int write_discard_bitset(struct cache *cache)
{
unsigned i, r;
- r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
- cache->discard_nr_blocks);
+ r = dm_cache_discard_bitset_resize(cache->cmd, cache->sectors_per_block,
+ cache->origin_blocks);
if (r) {
DMERR("could not resize on-disk discard bitset");
return r;
}
- for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
- r = dm_cache_set_discard(cache->cmd, to_dblock(i),
- is_discarded(cache, to_dblock(i)));
+ for (i = 0; i < from_oblock(cache->discard_nr_blocks); i++) {
+ r = dm_cache_set_discard(cache->cmd, to_oblock(i),
+ is_discarded(cache, to_oblock(i)));
if (r)
return r;
}
@@ -2631,30 +2582,6 @@ static int write_discard_bitset(struct cache *cache)
return 0;
}
-static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
- uint32_t hint)
-{
- struct cache *cache = context;
- return dm_cache_save_hint(cache->cmd, cblock, hint);
-}
-
-static int write_hints(struct cache *cache)
-{
- int r;
-
- r = dm_cache_begin_hints(cache->cmd, cache->policy);
- if (r) {
- DMERR("dm_cache_begin_hints failed");
- return r;
- }
-
- r = policy_walk_mappings(cache->policy, save_hint, cache);
- if (r)
- DMERR("policy_walk_mappings failed");
-
- return r;
-}
-
/*
* returns true on success
*/
@@ -2672,7 +2599,7 @@ static bool sync_metadata(struct cache *cache)
save_stats(cache);
- r3 = write_hints(cache);
+ r3 = dm_cache_write_hints(cache->cmd, cache->policy);
if (r3)
DMERR("could not write hints");
@@ -2720,16 +2647,14 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
}
static int load_discard(void *context, sector_t discard_block_size,
- dm_dblock_t dblock, bool discard)
+ dm_oblock_t oblock, bool discard)
{
struct cache *cache = context;
- /* FIXME: handle mis-matched block size */
-
if (discard)
- set_discard(cache, dblock);
+ set_discard(cache, oblock);
else
- clear_discard(cache, dblock);
+ clear_discard(cache, oblock);
return 0;
}
@@ -3120,8 +3045,8 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
/*
* FIXME: these limits may be incompatible with the cache device
*/
- limits->max_discard_sectors = cache->discard_block_size * 1024;
- limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
+ limits->max_discard_sectors = cache->sectors_per_block;
+ limits->discard_granularity = cache->sectors_per_block << SECTOR_SHIFT;
}
static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -3145,7 +3070,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
new file mode 100644
index 000000000000..414dad4cb49b
--- /dev/null
+++ b/drivers/md/dm-era-target.c
@@ -0,0 +1,1746 @@
+#include "dm.h"
+#include "persistent-data/dm-transaction-manager.h"
+#include "persistent-data/dm-bitset.h"
+#include "persistent-data/dm-space-map.h"
+
+#include <linux/dm-io.h>
+#include <linux/dm-kcopyd.h>
+#include <linux/init.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#define DM_MSG_PREFIX "era"
+
+#define SUPERBLOCK_LOCATION 0
+#define SUPERBLOCK_MAGIC 2126579579
+#define SUPERBLOCK_CSUM_XOR 146538381
+#define MIN_ERA_VERSION 1
+#define MAX_ERA_VERSION 1
+#define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION
+#define MIN_BLOCK_SIZE 8
+
+/*----------------------------------------------------------------
+ * Writeset
+ *--------------------------------------------------------------*/
+struct writeset_metadata {
+ uint32_t nr_bits;
+ dm_block_t root;
+};
+
+struct writeset {
+ struct writeset_metadata md;
+
+ /*
+ * An in core copy of the bits to save constantly doing look ups on
+ * disk.
+ */
+ unsigned long *bits;
+};
+
+/*
+ * This does not free off the on disk bitset as this will normally be done
+ * after digesting into the era array.
+ */
+static void writeset_free(struct writeset *ws)
+{
+ vfree(ws->bits);
+}
+
+static int setup_on_disk_bitset(struct dm_disk_bitset *info,
+ unsigned nr_bits, dm_block_t *root)
+{
+ int r;
+
+ r = dm_bitset_empty(info, root);
+ if (r)
+ return r;
+
+ return dm_bitset_resize(info, *root, 0, nr_bits, false, root);
+}
+
+static size_t bitset_size(unsigned nr_bits)
+{
+ return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG);
+}
+
+/*
+ * Allocates memory for the in core bitset.
+ */
+static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
+{
+ ws->md.nr_bits = nr_blocks;
+ ws->md.root = INVALID_WRITESET_ROOT;
+ ws->bits = vzalloc(bitset_size(nr_blocks));
+ if (!ws->bits) {
+ DMERR("%s: couldn't allocate in memory bitset", __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Wipes the in-core bitset, and creates a new on disk bitset.
+ */
+static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws)
+{
+ int r;
+
+ memset(ws->bits, 0, bitset_size(ws->md.nr_bits));
+
+ r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
+ if (r) {
+ DMERR("%s: setup_on_disk_bitset failed", __func__);
+ return r;
+ }
+
+ return 0;
+}
+
+static bool writeset_marked(struct writeset *ws, dm_block_t block)
+{
+ return test_bit(block, ws->bits);
+}
+
+static int writeset_marked_on_disk(struct dm_disk_bitset *info,
+ struct writeset_metadata *m, dm_block_t block,
+ bool *result)
+{
+ dm_block_t old = m->root;
+
+ /*
+ * The bitset was flushed when it was archived, so we know there'll
+ * be no change to the root.
+ */
+ int r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
+ if (r) {
+ DMERR("%s: dm_bitset_test_bit failed", __func__);
+ return r;
+ }
+
+ BUG_ON(m->root != old);
+
+ return r;
+}
+
+/*
+ * Returns < 0 on error, 0 if the bit wasn't previously set, 1 if it was.
+ */
+static int writeset_test_and_set(struct dm_disk_bitset *info,
+ struct writeset *ws, uint32_t block)
+{
+ int r;
+
+ if (!test_and_set_bit(block, ws->bits)) {
+ r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
+ if (r) {
+ /* FIXME: fail mode */
+ return r;
+ }
+
+ return 0;
+ }
+
+ return 1;
+}
+
+/*----------------------------------------------------------------
+ * On disk metadata layout
+ *--------------------------------------------------------------*/
+#define SPACE_MAP_ROOT_SIZE 128
+#define UUID_LEN 16
+
+struct writeset_disk {
+ __le32 nr_bits;
+ __le64 root;
+} __packed;
+
+struct superblock_disk {
+ __le32 csum;
+ __le32 flags;
+ __le64 blocknr;
+
+ __u8 uuid[UUID_LEN];
+ __le64 magic;
+ __le32 version;
+
+ __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
+
+ __le32 data_block_size;
+ __le32 metadata_block_size;
+ __le32 nr_blocks;
+
+ __le32 current_era;
+ struct writeset_disk current_writeset;
+
+ /*
+ * Only these two fields are valid within the metadata snapshot.
+ */
+ __le64 writeset_tree_root;
+ __le64 era_array_root;
+
+ __le64 metadata_snap;
+} __packed;
+
+/*----------------------------------------------------------------
+ * Superblock validation
+ *--------------------------------------------------------------*/
+static void sb_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t sb_block_size)
+{
+ struct superblock_disk *disk = dm_block_data(b);
+
+ disk->blocknr = cpu_to_le64(dm_block_location(b));
+ disk->csum = cpu_to_le32(dm_bm_checksum(&disk->flags,
+ sb_block_size - sizeof(__le32),
+ SUPERBLOCK_CSUM_XOR));
+}
+
+static int check_metadata_version(struct superblock_disk *disk)
+{
+ uint32_t metadata_version = le32_to_cpu(disk->version);
+ if (metadata_version < MIN_ERA_VERSION || metadata_version > MAX_ERA_VERSION) {
+ DMERR("Era metadata version %u found, but only versions between %u and %u supported.",
+ metadata_version, MIN_ERA_VERSION, MAX_ERA_VERSION);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sb_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t sb_block_size)
+{
+ struct superblock_disk *disk = dm_block_data(b);
+ __le32 csum_le;
+
+ if (dm_block_location(b) != le64_to_cpu(disk->blocknr)) {
+ DMERR("sb_check failed: blocknr %llu: wanted %llu",
+ le64_to_cpu(disk->blocknr),
+ (unsigned long long)dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ if (le64_to_cpu(disk->magic) != SUPERBLOCK_MAGIC) {
+ DMERR("sb_check failed: magic %llu: wanted %llu",
+ le64_to_cpu(disk->magic),
+ (unsigned long long) SUPERBLOCK_MAGIC);
+ return -EILSEQ;
+ }
+
+ csum_le = cpu_to_le32(dm_bm_checksum(&disk->flags,
+ sb_block_size - sizeof(__le32),
+ SUPERBLOCK_CSUM_XOR));
+ if (csum_le != disk->csum) {
+ DMERR("sb_check failed: csum %u: wanted %u",
+ le32_to_cpu(csum_le), le32_to_cpu(disk->csum));
+ return -EILSEQ;
+ }
+
+ return check_metadata_version(disk);
+}
+
+static struct dm_block_validator sb_validator = {
+ .name = "superblock",
+ .prepare_for_write = sb_prepare_for_write,
+ .check = sb_check
+};
+
+/*----------------------------------------------------------------
+ * Low level metadata handling
+ *--------------------------------------------------------------*/
+#define DM_ERA_METADATA_BLOCK_SIZE 4096
+#define DM_ERA_METADATA_CACHE_SIZE 64
+#define ERA_MAX_CONCURRENT_LOCKS 5
+
+struct era_metadata {
+ struct block_device *bdev;
+ struct dm_block_manager *bm;
+ struct dm_space_map *sm;
+ struct dm_transaction_manager *tm;
+
+ dm_block_t block_size;
+ uint32_t nr_blocks;
+
+ uint32_t current_era;
+
+ /*
+ * We preallocate 2 writesets. When an era rolls over we
+ * switch between them. This means the allocation is done at
+ * preresume time, rather than on the io path.
+ */
+ struct writeset writesets[2];
+ struct writeset *current_writeset;
+
+ dm_block_t writeset_tree_root;
+ dm_block_t era_array_root;
+
+ struct dm_disk_bitset bitset_info;
+ struct dm_btree_info writeset_tree_info;
+ struct dm_array_info era_array_info;
+
+ dm_block_t metadata_snap;
+
+ /*
+ * A flag that is set whenever a writeset has been archived.
+ */
+ bool archived_writesets;
+
+ /*
+ * Reading the space map root can fail, so we read it into this
+ * buffer before the superblock is locked and updated.
+ */
+ __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
+};
+
+static int superblock_read_lock(struct era_metadata *md,
+ struct dm_block **sblock)
+{
+ return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+static int superblock_lock_zero(struct era_metadata *md,
+ struct dm_block **sblock)
+{
+ return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+static int superblock_lock(struct era_metadata *md,
+ struct dm_block **sblock)
+{
+ return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+/* FIXME: duplication with cache and thin */
+static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
+{
+ int r;
+ unsigned i;
+ struct dm_block *b;
+ __le64 *data_le, zero = cpu_to_le64(0);
+ unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+
+ /*
+ * We can't use a validator here - it may be all zeroes.
+ */
+ r = dm_bm_read_lock(bm, SUPERBLOCK_LOCATION, NULL, &b);
+ if (r)
+ return r;
+
+ data_le = dm_block_data(b);
+ *result = true;
+ for (i = 0; i < sb_block_size; i++) {
+ if (data_le[i] != zero) {
+ *result = false;
+ break;
+ }
+ }
+
+ return dm_bm_unlock(b);
+}
+
+/*----------------------------------------------------------------*/
+
+static void ws_pack(const struct writeset_metadata *core, struct writeset_disk *disk)
+{
+ disk->nr_bits = cpu_to_le32(core->nr_bits);
+ disk->root = cpu_to_le64(core->root);
+}
+
+static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata *core)
+{
+ core->nr_bits = le32_to_cpu(disk->nr_bits);
+ core->root = le64_to_cpu(disk->root);
+}
+
+static void ws_inc(void *context, const void *value)
+{
+ struct era_metadata *md = context;
+ struct writeset_disk ws_d;
+ dm_block_t b;
+
+ memcpy(&ws_d, value, sizeof(ws_d));
+ b = le64_to_cpu(ws_d.root);
+
+ dm_tm_inc(md->tm, b);
+}
+
+static void ws_dec(void *context, const void *value)
+{
+ struct era_metadata *md = context;
+ struct writeset_disk ws_d;
+ dm_block_t b;
+
+ memcpy(&ws_d, value, sizeof(ws_d));
+ b = le64_to_cpu(ws_d.root);
+
+ dm_bitset_del(&md->bitset_info, b);
+}
+
+static int ws_eq(void *context, const void *value1, const void *value2)
+{
+ return !memcmp(value1, value2, sizeof(struct writeset_metadata));
+}
+
+/*----------------------------------------------------------------*/
+
+static void setup_writeset_tree_info(struct era_metadata *md)
+{
+ struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type;
+ md->writeset_tree_info.tm = md->tm;
+ md->writeset_tree_info.levels = 1;
+ vt->context = md;
+ vt->size = sizeof(struct writeset_disk);
+ vt->inc = ws_inc;
+ vt->dec = ws_dec;
+ vt->equal = ws_eq;
+}
+
+static void setup_era_array_info(struct era_metadata *md)
+
+{
+ struct dm_btree_value_type vt;
+ vt.context = NULL;
+ vt.size = sizeof(__le32);
+ vt.inc = NULL;
+ vt.dec = NULL;
+ vt.equal = NULL;
+
+ dm_array_info_init(&md->era_array_info, md->tm, &vt);
+}
+
+static void setup_infos(struct era_metadata *md)
+{
+ dm_disk_bitset_init(md->tm, &md->bitset_info);
+ setup_writeset_tree_info(md);
+ setup_era_array_info(md);
+}
+
+/*----------------------------------------------------------------*/
+
+static int create_fresh_metadata(struct era_metadata *md)
+{
+ int r;
+
+ r = dm_tm_create_with_sm(md->bm, SUPERBLOCK_LOCATION,
+ &md->tm, &md->sm);
+ if (r < 0) {
+ DMERR("dm_tm_create_with_sm failed");
+ return r;
+ }
+
+ setup_infos(md);
+
+ r = dm_btree_empty(&md->writeset_tree_info, &md->writeset_tree_root);
+ if (r) {
+ DMERR("couldn't create new writeset tree");
+ goto bad;
+ }
+
+ r = dm_array_empty(&md->era_array_info, &md->era_array_root);
+ if (r) {
+ DMERR("couldn't create era array");
+ goto bad;
+ }
+
+ return 0;
+
+bad:
+ dm_sm_destroy(md->sm);
+ dm_tm_destroy(md->tm);
+
+ return r;
+}
+
+static int save_sm_root(struct era_metadata *md)
+{
+ int r;
+ size_t metadata_len;
+
+ r = dm_sm_root_size(md->sm, &metadata_len);
+ if (r < 0)
+ return r;
+
+ return dm_sm_copy_root(md->sm, &md->metadata_space_map_root,
+ metadata_len);
+}
+
+static void copy_sm_root(struct era_metadata *md, struct superblock_disk *disk)
+{
+ memcpy(&disk->metadata_space_map_root,
+ &md->metadata_space_map_root,
+ sizeof(md->metadata_space_map_root));
+}
+
+/*
+ * Writes a superblock, including the static fields that don't get updated
+ * with every commit (possible optimisation here). 'md' should be fully
+ * constructed when this is called.
+ */
+static void prepare_superblock(struct era_metadata *md, struct superblock_disk *disk)
+{
+ disk->magic = cpu_to_le64(SUPERBLOCK_MAGIC);
+ disk->flags = cpu_to_le32(0ul);
+
+ /* FIXME: can't keep blanking the uuid (uuid is currently unused though) */
+ memset(disk->uuid, 0, sizeof(disk->uuid));
+ disk->version = cpu_to_le32(MAX_ERA_VERSION);
+
+ copy_sm_root(md, disk);
+
+ disk->data_block_size = cpu_to_le32(md->block_size);
+ disk->metadata_block_size = cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ disk->nr_blocks = cpu_to_le32(md->nr_blocks);
+ disk->current_era = cpu_to_le32(md->current_era);
+
+ ws_pack(&md->current_writeset->md, &disk->current_writeset);
+ disk->writeset_tree_root = cpu_to_le64(md->writeset_tree_root);
+ disk->era_array_root = cpu_to_le64(md->era_array_root);
+ disk->metadata_snap = cpu_to_le64(md->metadata_snap);
+}
+
+static int write_superblock(struct era_metadata *md)
+{
+ int r;
+ struct dm_block *sblock;
+ struct superblock_disk *disk;
+
+ r = save_sm_root(md);
+ if (r) {
+ DMERR("%s: save_sm_root failed", __func__);
+ return r;
+ }
+
+ r = superblock_lock_zero(md, &sblock);
+ if (r)
+ return r;
+
+ disk = dm_block_data(sblock);
+ prepare_superblock(md, disk);
+
+ return dm_tm_commit(md->tm, sblock);
+}
+
+/*
+ * Assumes block_size and the infos are set.
+ */
+static int format_metadata(struct era_metadata *md)
+{
+ int r;
+
+ r = create_fresh_metadata(md);
+ if (r)
+ return r;
+
+ r = write_superblock(md);
+ if (r) {
+ dm_sm_destroy(md->sm);
+ dm_tm_destroy(md->tm);
+ return r;
+ }
+
+ return 0;
+}
+
+static int open_metadata(struct era_metadata *md)
+{
+ int r;
+ struct dm_block *sblock;
+ struct superblock_disk *disk;
+
+ r = superblock_read_lock(md, &sblock);
+ if (r) {
+ DMERR("couldn't read_lock superblock");
+ return r;
+ }
+
+ disk = dm_block_data(sblock);
+ r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
+ disk->metadata_space_map_root,
+ sizeof(disk->metadata_space_map_root),
+ &md->tm, &md->sm);
+ if (r) {
+ DMERR("dm_tm_open_with_sm failed");
+ goto bad;
+ }
+
+ setup_infos(md);
+
+ md->block_size = le32_to_cpu(disk->data_block_size);
+ md->nr_blocks = le32_to_cpu(disk->nr_blocks);
+ md->current_era = le32_to_cpu(disk->current_era);
+
+ md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
+ md->era_array_root = le64_to_cpu(disk->era_array_root);
+ md->metadata_snap = le64_to_cpu(disk->metadata_snap);
+ md->archived_writesets = true;
+
+ return dm_bm_unlock(sblock);
+
+bad:
+ dm_bm_unlock(sblock);
+ return r;
+}
+
+static int open_or_format_metadata(struct era_metadata *md,
+ bool may_format)
+{
+ int r;
+ bool unformatted = false;
+
+ r = superblock_all_zeroes(md->bm, &unformatted);
+ if (r)
+ return r;
+
+ if (unformatted)
+ return may_format ? format_metadata(md) : -EPERM;
+
+ return open_metadata(md);
+}
+
+static int create_persistent_data_objects(struct era_metadata *md,
+ bool may_format)
+{
+ int r;
+
+ md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE,
+ DM_ERA_METADATA_CACHE_SIZE,
+ ERA_MAX_CONCURRENT_LOCKS);
+ if (IS_ERR(md->bm)) {
+ DMERR("could not create block manager");
+ return PTR_ERR(md->bm);
+ }
+
+ r = open_or_format_metadata(md, may_format);
+ if (r)
+ dm_block_manager_destroy(md->bm);
+
+ return r;
+}
+
+static void destroy_persistent_data_objects(struct era_metadata *md)
+{
+ dm_sm_destroy(md->sm);
+ dm_tm_destroy(md->tm);
+ dm_block_manager_destroy(md->bm);
+}
+
+/*
+ * This waits until all era_map threads have picked up the new filter.
+ */
+static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset)
+{
+ rcu_assign_pointer(md->current_writeset, new_writeset);
+ synchronize_rcu();
+}
+
+/*----------------------------------------------------------------
+ * Writesets get 'digested' into the main era array.
+ *
+ * We're using a coroutine here so the worker thread can do the digestion,
+ * thus avoiding synchronisation of the metadata. Digesting a whole
+ * writeset in one go would cause too much latency.
+ *--------------------------------------------------------------*/
+struct digest {
+ uint32_t era;
+ unsigned nr_bits, current_bit;
+ struct writeset_metadata writeset;
+ __le32 value;
+ struct dm_disk_bitset info;
+
+ int (*step)(struct era_metadata *, struct digest *);
+};
+
+static int metadata_digest_lookup_writeset(struct era_metadata *md,
+ struct digest *d);
+
+static int metadata_digest_remove_writeset(struct era_metadata *md,
+ struct digest *d)
+{
+ int r;
+ uint64_t key = d->era;
+
+ r = dm_btree_remove(&md->writeset_tree_info, md->writeset_tree_root,
+ &key, &md->writeset_tree_root);
+ if (r) {
+ DMERR("%s: dm_btree_remove failed", __func__);
+ return r;
+ }
+
+ d->step = metadata_digest_lookup_writeset;
+ return 0;
+}
+
+#define INSERTS_PER_STEP 100
+
+static int metadata_digest_transcribe_writeset(struct era_metadata *md,
+ struct digest *d)
+{
+ int r;
+ bool marked;
+ unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
+
+ for (b = d->current_bit; b < e; b++) {
+ r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked);
+ if (r) {
+ DMERR("%s: writeset_marked_on_disk failed", __func__);
+ return r;
+ }
+
+ if (!marked)
+ continue;
+
+ __dm_bless_for_disk(&d->value);
+ r = dm_array_set_value(&md->era_array_info, md->era_array_root,
+ b, &d->value, &md->era_array_root);
+ if (r) {
+ DMERR("%s: dm_array_set_value failed", __func__);
+ return r;
+ }
+ }
+
+ if (b == d->nr_bits)
+ d->step = metadata_digest_remove_writeset;
+ else
+ d->current_bit = b;
+
+ return 0;
+}
+
+static int metadata_digest_lookup_writeset(struct era_metadata *md,
+ struct digest *d)
+{
+ int r;
+ uint64_t key;
+ struct writeset_disk disk;
+
+ r = dm_btree_find_lowest_key(&md->writeset_tree_info,
+ md->writeset_tree_root, &key);
+ if (r < 0)
+ return r;
+
+ d->era = key;
+
+ r = dm_btree_lookup(&md->writeset_tree_info,
+ md->writeset_tree_root, &key, &disk);
+ if (r) {
+ if (r == -ENODATA) {
+ d->step = NULL;
+ return 0;
+ }
+
+ DMERR("%s: dm_btree_lookup failed", __func__);
+ return r;
+ }
+
+ ws_unpack(&disk, &d->writeset);
+ d->value = cpu_to_le32(key);
+
+ d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
+ d->current_bit = 0;
+ d->step = metadata_digest_transcribe_writeset;
+
+ return 0;
+}
+
+static int metadata_digest_start(struct era_metadata *md, struct digest *d)
+{
+ if (d->step)
+ return 0;
+
+ memset(d, 0, sizeof(*d));
+
+ /*
+ * We initialise another bitset info to avoid any caching side
+ * effects with the previous one.
+ */
+ dm_disk_bitset_init(md->tm, &d->info);
+ d->step = metadata_digest_lookup_writeset;
+
+ return 0;
+}
+
+/*----------------------------------------------------------------
+ * High level metadata interface. Target methods should use these, and not
+ * the lower level ones.
+ *--------------------------------------------------------------*/
+static struct era_metadata *metadata_open(struct block_device *bdev,
+ sector_t block_size,
+ bool may_format)
+{
+ int r;
+ struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL);
+
+ if (!md)
+ return NULL;
+
+ md->bdev = bdev;
+ md->block_size = block_size;
+
+ md->writesets[0].md.root = INVALID_WRITESET_ROOT;
+ md->writesets[1].md.root = INVALID_WRITESET_ROOT;
+ md->current_writeset = &md->writesets[0];
+
+ r = create_persistent_data_objects(md, may_format);
+ if (r) {
+ kfree(md);
+ return ERR_PTR(r);
+ }
+
+ return md;
+}
+
+static void metadata_close(struct era_metadata *md)
+{
+ destroy_persistent_data_objects(md);
+ kfree(md);
+}
+
+static bool valid_nr_blocks(dm_block_t n)
+{
+ /*
+ * dm_bitset restricts us to 2^32. test_bit & co. restrict us
+ * further to 2^31 - 1
+ */
+ return n < (1ull << 31);
+}
+
+static int metadata_resize(struct era_metadata *md, void *arg)
+{
+ int r;
+ dm_block_t *new_size = arg;
+ __le32 value;
+
+ if (!valid_nr_blocks(*new_size)) {
+ DMERR("Invalid number of origin blocks %llu",
+ (unsigned long long) *new_size);
+ return -EINVAL;
+ }
+
+ writeset_free(&md->writesets[0]);
+ writeset_free(&md->writesets[1]);
+
+ r = writeset_alloc(&md->writesets[0], *new_size);
+ if (r) {
+ DMERR("%s: writeset_alloc failed for writeset 0", __func__);
+ return r;
+ }
+
+ r = writeset_alloc(&md->writesets[1], *new_size);
+ if (r) {
+ DMERR("%s: writeset_alloc failed for writeset 1", __func__);
+ return r;
+ }
+
+ value = cpu_to_le32(0u);
+ __dm_bless_for_disk(&value);
+ r = dm_array_resize(&md->era_array_info, md->era_array_root,
+ md->nr_blocks, *new_size,
+ &value, &md->era_array_root);
+ if (r) {
+ DMERR("%s: dm_array_resize failed", __func__);
+ return r;
+ }
+
+ md->nr_blocks = *new_size;
+ return 0;
+}
+
+static int metadata_era_archive(struct era_metadata *md)
+{
+ int r;
+ uint64_t keys[1];
+ struct writeset_disk value;
+
+ r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
+ &md->current_writeset->md.root);
+ if (r) {
+ DMERR("%s: dm_bitset_flush failed", __func__);
+ return r;
+ }
+
+ ws_pack(&md->current_writeset->md, &value);
+ md->current_writeset->md.root = INVALID_WRITESET_ROOT;
+
+ keys[0] = md->current_era;
+ __dm_bless_for_disk(&value);
+ r = dm_btree_insert(&md->writeset_tree_info, md->writeset_tree_root,
+ keys, &value, &md->writeset_tree_root);
+ if (r) {
+ DMERR("%s: couldn't insert writeset into btree", __func__);
+ /* FIXME: fail mode */
+ return r;
+ }
+
+ md->archived_writesets = true;
+
+ return 0;
+}
+
+static struct writeset *next_writeset(struct era_metadata *md)
+{
+ return (md->current_writeset == &md->writesets[0]) ?
+ &md->writesets[1] : &md->writesets[0];
+}
+
+static int metadata_new_era(struct era_metadata *md)
+{
+ int r;
+ struct writeset *new_writeset = next_writeset(md);
+
+ r = writeset_init(&md->bitset_info, new_writeset);
+ if (r) {
+ DMERR("%s: writeset_init failed", __func__);
+ return r;
+ }
+
+ swap_writeset(md, new_writeset);
+ md->current_era++;
+
+ return 0;
+}
+
+static int metadata_era_rollover(struct era_metadata *md)
+{
+ int r;
+
+ if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
+ r = metadata_era_archive(md);
+ if (r) {
+ DMERR("%s: metadata_archive_era failed", __func__);
+ /* FIXME: fail mode? */
+ return r;
+ }
+ }
+
+ r = metadata_new_era(md);
+ if (r) {
+ DMERR("%s: new era failed", __func__);
+ /* FIXME: fail mode */
+ return r;
+ }
+
+ return 0;
+}
+
+static bool metadata_current_marked(struct era_metadata *md, dm_block_t block)
+{
+ bool r;
+ struct writeset *ws;
+
+ rcu_read_lock();
+ ws = rcu_dereference(md->current_writeset);
+ r = writeset_marked(ws, block);
+ rcu_read_unlock();
+
+ return r;
+}
+
+static int metadata_commit(struct era_metadata *md)
+{
+ int r;
+ struct dm_block *sblock;
+
+ if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) {
+ r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
+ &md->current_writeset->md.root);
+ if (r) {
+ DMERR("%s: bitset flush failed", __func__);
+ return r;
+ }
+ }
+
+ r = save_sm_root(md);
+ if (r) {
+ DMERR("%s: save_sm_root failed", __func__);
+ return r;
+ }
+
+ r = dm_tm_pre_commit(md->tm);
+ if (r) {
+ DMERR("%s: pre commit failed", __func__);
+ return r;
+ }
+
+ r = superblock_lock(md, &sblock);
+ if (r) {
+ DMERR("%s: superblock lock failed", __func__);
+ return r;
+ }
+
+ prepare_superblock(md, dm_block_data(sblock));
+
+ return dm_tm_commit(md->tm, sblock);
+}
+
+static int metadata_checkpoint(struct era_metadata *md)
+{
+ /*
+ * For now we just rollover, but later I want to put a check in to
+ * avoid this if the filter is still pretty fresh.
+ */
+ return metadata_era_rollover(md);
+}
+
+/*
+ * Metadata snapshots allow userland to access era data.
+ */
+static int metadata_take_snap(struct era_metadata *md)
+{
+ int r, inc;
+ struct dm_block *clone;
+
+ if (md->metadata_snap != SUPERBLOCK_LOCATION) {
+ DMERR("%s: metadata snapshot already exists", __func__);
+ return -EINVAL;
+ }
+
+ r = metadata_era_rollover(md);
+ if (r) {
+ DMERR("%s: era rollover failed", __func__);
+ return r;
+ }
+
+ r = metadata_commit(md);
+ if (r) {
+ DMERR("%s: pre commit failed", __func__);
+ return r;
+ }
+
+ r = dm_sm_inc_block(md->sm, SUPERBLOCK_LOCATION);
+ if (r) {
+ DMERR("%s: couldn't increment superblock", __func__);
+ return r;
+ }
+
+ r = dm_tm_shadow_block(md->tm, SUPERBLOCK_LOCATION,
+ &sb_validator, &clone, &inc);
+ if (r) {
+ DMERR("%s: couldn't shadow superblock", __func__);
+ dm_sm_dec_block(md->sm, SUPERBLOCK_LOCATION);
+ return r;
+ }
+ BUG_ON(!inc);
+
+ r = dm_sm_inc_block(md->sm, md->writeset_tree_root);
+ if (r) {
+ DMERR("%s: couldn't inc writeset tree root", __func__);
+ dm_tm_unlock(md->tm, clone);
+ return r;
+ }
+
+ r = dm_sm_inc_block(md->sm, md->era_array_root);
+ if (r) {
+ DMERR("%s: couldn't inc era tree root", __func__);
+ dm_sm_dec_block(md->sm, md->writeset_tree_root);
+ dm_tm_unlock(md->tm, clone);
+ return r;
+ }
+
+ md->metadata_snap = dm_block_location(clone);
+
+ r = dm_tm_unlock(md->tm, clone);
+ if (r) {
+ DMERR("%s: couldn't unlock clone", __func__);
+ md->metadata_snap = SUPERBLOCK_LOCATION;
+ return r;
+ }
+
+ return 0;
+}
+
+static int metadata_drop_snap(struct era_metadata *md)
+{
+ int r;
+ dm_block_t location;
+ struct dm_block *clone;
+ struct superblock_disk *disk;
+
+ if (md->metadata_snap == SUPERBLOCK_LOCATION) {
+ DMERR("%s: no snap to drop", __func__);
+ return -EINVAL;
+ }
+
+ r = dm_tm_read_lock(md->tm, md->metadata_snap, &sb_validator, &clone);
+ if (r) {
+ DMERR("%s: couldn't read lock superblock clone", __func__);
+ return r;
+ }
+
+ /*
+ * Whatever happens now we'll commit with no record of the metadata
+ * snap.
+ */
+ md->metadata_snap = SUPERBLOCK_LOCATION;
+
+ disk = dm_block_data(clone);
+ r = dm_btree_del(&md->writeset_tree_info,
+ le64_to_cpu(disk->writeset_tree_root));
+ if (r) {
+ DMERR("%s: error deleting writeset tree clone", __func__);
+ dm_tm_unlock(md->tm, clone);
+ return r;
+ }
+
+ r = dm_array_del(&md->era_array_info, le64_to_cpu(disk->era_array_root));
+ if (r) {
+ DMERR("%s: error deleting era array clone", __func__);
+ dm_tm_unlock(md->tm, clone);
+ return r;
+ }
+
+ location = dm_block_location(clone);
+ dm_tm_unlock(md->tm, clone);
+
+ return dm_sm_dec_block(md->sm, location);
+}
+
+struct metadata_stats {
+ dm_block_t used;
+ dm_block_t total;
+ dm_block_t snap;
+ uint32_t era;
+};
+
+static int metadata_get_stats(struct era_metadata *md, void *ptr)
+{
+ int r;
+ struct metadata_stats *s = ptr;
+ dm_block_t nr_free, nr_total;
+
+ r = dm_sm_get_nr_free(md->sm, &nr_free);
+ if (r) {
+ DMERR("dm_sm_get_nr_free returned %d", r);
+ return r;
+ }
+
+ r = dm_sm_get_nr_blocks(md->sm, &nr_total);
+ if (r) {
+ DMERR("dm_pool_get_metadata_dev_size returned %d", r);
+ return r;
+ }
+
+ s->used = nr_total - nr_free;
+ s->total = nr_total;
+ s->snap = md->metadata_snap;
+ s->era = md->current_era;
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+struct era {
+ struct dm_target *ti;
+ struct dm_target_callbacks callbacks;
+
+ struct dm_dev *metadata_dev;
+ struct dm_dev *origin_dev;
+
+ dm_block_t nr_blocks;
+ uint32_t sectors_per_block;
+ int sectors_per_block_shift;
+ struct era_metadata *md;
+
+ struct workqueue_struct *wq;
+ struct work_struct worker;
+
+ spinlock_t deferred_lock;
+ struct bio_list deferred_bios;
+
+ spinlock_t rpc_lock;
+ struct list_head rpc_calls;
+
+ struct digest digest;
+ atomic_t suspended;
+};
+
+struct rpc {
+ struct list_head list;
+
+ int (*fn0)(struct era_metadata *);
+ int (*fn1)(struct era_metadata *, void *);
+ void *arg;
+ int result;
+
+ struct completion complete;
+};
+
+/*----------------------------------------------------------------
+ * Remapping.
+ *---------------------------------------------------------------*/
+static bool block_size_is_power_of_two(struct era *era)
+{
+ return era->sectors_per_block_shift >= 0;
+}
+
+static dm_block_t get_block(struct era *era, struct bio *bio)
+{
+ sector_t block_nr = bio->bi_iter.bi_sector;
+
+ if (!block_size_is_power_of_two(era))
+ (void) sector_div(block_nr, era->sectors_per_block);
+ else
+ block_nr >>= era->sectors_per_block_shift;
+
+ return block_nr;
+}
+
+static void remap_to_origin(struct era *era, struct bio *bio)
+{
+ bio->bi_bdev = era->origin_dev->bdev;
+}
+
+/*----------------------------------------------------------------
+ * Worker thread
+ *--------------------------------------------------------------*/
+static void wake_worker(struct era *era)
+{
+ if (!atomic_read(&era->suspended))
+ queue_work(era->wq, &era->worker);
+}
+
+static void process_old_eras(struct era *era)
+{
+ int r;
+
+ if (!era->digest.step)
+ return;
+
+ r = era->digest.step(era->md, &era->digest);
+ if (r < 0) {
+ DMERR("%s: digest step failed, stopping digestion", __func__);
+ era->digest.step = NULL;
+
+ } else if (era->digest.step)
+ wake_worker(era);
+}
+
+static void process_deferred_bios(struct era *era)
+{
+ int r;
+ struct bio_list deferred_bios, marked_bios;
+ struct bio *bio;
+ bool commit_needed = false;
+ bool failed = false;
+
+ bio_list_init(&deferred_bios);
+ bio_list_init(&marked_bios);
+
+ spin_lock(&era->deferred_lock);
+ bio_list_merge(&deferred_bios, &era->deferred_bios);
+ bio_list_init(&era->deferred_bios);
+ spin_unlock(&era->deferred_lock);
+
+ while ((bio = bio_list_pop(&deferred_bios))) {
+ r = writeset_test_and_set(&era->md->bitset_info,
+ era->md->current_writeset,
+ get_block(era, bio));
+ if (r < 0) {
+ /*
+ * This is bad news, we need to rollback.
+ * FIXME: finish.
+ */
+ failed = true;
+
+ } else if (r == 0)
+ commit_needed = true;
+
+ bio_list_add(&marked_bios, bio);
+ }
+
+ if (commit_needed) {
+ r = metadata_commit(era->md);
+ if (r)
+ failed = true;
+ }
+
+ if (failed)
+ while ((bio = bio_list_pop(&marked_bios)))
+ bio_io_error(bio);
+ else
+ while ((bio = bio_list_pop(&marked_bios)))
+ generic_make_request(bio);
+}
+
+static void process_rpc_calls(struct era *era)
+{
+ int r;
+ bool need_commit = false;
+ struct list_head calls;
+ struct rpc *rpc, *tmp;
+
+ INIT_LIST_HEAD(&calls);
+ spin_lock(&era->rpc_lock);
+ list_splice_init(&era->rpc_calls, &calls);
+ spin_unlock(&era->rpc_lock);
+
+ list_for_each_entry_safe(rpc, tmp, &calls, list) {
+ rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg);
+ need_commit = true;
+ }
+
+ if (need_commit) {
+ r = metadata_commit(era->md);
+ if (r)
+ list_for_each_entry_safe(rpc, tmp, &calls, list)
+ rpc->result = r;
+ }
+
+ list_for_each_entry_safe(rpc, tmp, &calls, list)
+ complete(&rpc->complete);
+}
+
+static void kick_off_digest(struct era *era)
+{
+ if (era->md->archived_writesets) {
+ era->md->archived_writesets = false;
+ metadata_digest_start(era->md, &era->digest);
+ }
+}
+
+static void do_work(struct work_struct *ws)
+{
+ struct era *era = container_of(ws, struct era, worker);
+
+ kick_off_digest(era);
+ process_old_eras(era);
+ process_deferred_bios(era);
+ process_rpc_calls(era);
+}
+
+static void defer_bio(struct era *era, struct bio *bio)
+{
+ spin_lock(&era->deferred_lock);
+ bio_list_add(&era->deferred_bios, bio);
+ spin_unlock(&era->deferred_lock);
+
+ wake_worker(era);
+}
+
+/*
+ * Make an rpc call to the worker to change the metadata.
+ */
+static int perform_rpc(struct era *era, struct rpc *rpc)
+{
+ rpc->result = 0;
+ init_completion(&rpc->complete);
+
+ spin_lock(&era->rpc_lock);
+ list_add(&rpc->list, &era->rpc_calls);
+ spin_unlock(&era->rpc_lock);
+
+ wake_worker(era);
+ wait_for_completion(&rpc->complete);
+
+ return rpc->result;
+}
+
+static int in_worker0(struct era *era, int (*fn)(struct era_metadata *))
+{
+ struct rpc rpc;
+ rpc.fn0 = fn;
+ rpc.fn1 = NULL;
+
+ return perform_rpc(era, &rpc);
+}
+
+static int in_worker1(struct era *era,
+ int (*fn)(struct era_metadata *, void *), void *arg)
+{
+ struct rpc rpc;
+ rpc.fn0 = NULL;
+ rpc.fn1 = fn;
+ rpc.arg = arg;
+
+ return perform_rpc(era, &rpc);
+}
+
+static void start_worker(struct era *era)
+{
+ atomic_set(&era->suspended, 0);
+}
+
+static void stop_worker(struct era *era)
+{
+ atomic_set(&era->suspended, 1);
+ flush_workqueue(era->wq);
+}
+
+/*----------------------------------------------------------------
+ * Target methods
+ *--------------------------------------------------------------*/
+static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+ return bdi_congested(&q->backing_dev_info, bdi_bits);
+}
+
+static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
+{
+ struct era *era = container_of(cb, struct era, callbacks);
+ return dev_is_congested(era->origin_dev, bdi_bits);
+}
+
+static void era_destroy(struct era *era)
+{
+ metadata_close(era->md);
+
+ if (era->wq)
+ destroy_workqueue(era->wq);
+
+ if (era->origin_dev)
+ dm_put_device(era->ti, era->origin_dev);
+
+ if (era->metadata_dev)
+ dm_put_device(era->ti, era->metadata_dev);
+
+ kfree(era);
+}
+
+static dm_block_t calc_nr_blocks(struct era *era)
+{
+ return dm_sector_div_up(era->ti->len, era->sectors_per_block);
+}
+
+static bool valid_block_size(dm_block_t block_size)
+{
+ bool greater_than_zero = block_size > 0;
+ bool multiple_of_min_block_size = (block_size & (MIN_BLOCK_SIZE - 1)) == 0;
+
+ return greater_than_zero && multiple_of_min_block_size;
+}
+
+/*
+ * <metadata dev> <data dev> <data block size (sectors)>
+ */
+static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int r;
+ char dummy;
+ struct era *era;
+ struct era_metadata *md;
+
+ if (argc != 3) {
+ ti->error = "Invalid argument count";
+ return -EINVAL;
+ }
+
+ era = kzalloc(sizeof(*era), GFP_KERNEL);
+ if (!era) {
+ ti->error = "Error allocating era structure";
+ return -ENOMEM;
+ }
+
+ era->ti = ti;
+
+ r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev);
+ if (r) {
+ ti->error = "Error opening metadata device";
+ era_destroy(era);
+ return -EINVAL;
+ }
+
+ r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev);
+ if (r) {
+ ti->error = "Error opening data device";
+ era_destroy(era);
+ return -EINVAL;
+ }
+
+ r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy);
+ if (r != 1) {
+ ti->error = "Error parsing block size";
+ era_destroy(era);
+ return -EINVAL;
+ }
+
+ r = dm_set_target_max_io_len(ti, era->sectors_per_block);
+ if (r) {
+ ti->error = "could not set max io len";
+ era_destroy(era);
+ return -EINVAL;
+ }
+
+ if (!valid_block_size(era->sectors_per_block)) {
+ ti->error = "Invalid block size";
+ era_destroy(era);
+ return -EINVAL;
+ }
+ if (era->sectors_per_block & (era->sectors_per_block - 1))
+ era->sectors_per_block_shift = -1;
+ else
+ era->sectors_per_block_shift = __ffs(era->sectors_per_block);
+
+ md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true);
+ if (IS_ERR(md)) {
+ ti->error = "Error reading metadata";
+ era_destroy(era);
+ return PTR_ERR(md);
+ }
+ era->md = md;
+
+ era->nr_blocks = calc_nr_blocks(era);
+
+ r = metadata_resize(era->md, &era->nr_blocks);
+ if (r) {
+ ti->error = "couldn't resize metadata";
+ era_destroy(era);
+ return -ENOMEM;
+ }
+
+ era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+ if (!era->wq) {
+ ti->error = "could not create workqueue for metadata object";
+ era_destroy(era);
+ return -ENOMEM;
+ }
+ INIT_WORK(&era->worker, do_work);
+
+ spin_lock_init(&era->deferred_lock);
+ bio_list_init(&era->deferred_bios);
+
+ spin_lock_init(&era->rpc_lock);
+ INIT_LIST_HEAD(&era->rpc_calls);
+
+ ti->private = era;
+ ti->num_flush_bios = 1;
+ ti->flush_supported = true;
+
+ ti->num_discard_bios = 1;
+ ti->discards_supported = true;
+ era->callbacks.congested_fn = era_is_congested;
+ dm_table_add_target_callbacks(ti->table, &era->callbacks);
+
+ return 0;
+}
+
+static void era_dtr(struct dm_target *ti)
+{
+ era_destroy(ti->private);
+}
+
+static int era_map(struct dm_target *ti, struct bio *bio)
+{
+ struct era *era = ti->private;
+ dm_block_t block = get_block(era, bio);
+
+ /*
+ * All bios get remapped to the origin device. We do this now, but
+ * it may not get issued until later. Depending on whether the
+ * block is marked in this era.
+ */
+ remap_to_origin(era, bio);
+
+ /*
+ * REQ_FLUSH bios carry no data, so we're not interested in them.
+ */
+ if (!(bio->bi_rw & REQ_FLUSH) &&
+ (bio_data_dir(bio) == WRITE) &&
+ !metadata_current_marked(era->md, block)) {
+ defer_bio(era, bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static void era_postsuspend(struct dm_target *ti)
+{
+ int r;
+ struct era *era = ti->private;
+
+ r = in_worker0(era, metadata_era_archive);
+ if (r) {
+ DMERR("%s: couldn't archive current era", __func__);
+ /* FIXME: fail mode */
+ }
+
+ stop_worker(era);
+}
+
+static int era_preresume(struct dm_target *ti)
+{
+ int r;
+ struct era *era = ti->private;
+ dm_block_t new_size = calc_nr_blocks(era);
+
+ if (era->nr_blocks != new_size) {
+ r = in_worker1(era, metadata_resize, &new_size);
+ if (r)
+ return r;
+
+ era->nr_blocks = new_size;
+ }
+
+ start_worker(era);
+
+ r = in_worker0(era, metadata_new_era);
+ if (r) {
+ DMERR("%s: metadata_era_rollover failed", __func__);
+ return r;
+ }
+
+ return 0;
+}
+
+/*
+ * Status format:
+ *
+ * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
+ * <current era> <held metadata root | '-'>
+ */
+static void era_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
+{
+ int r;
+ struct era *era = ti->private;
+ ssize_t sz = 0;
+ struct metadata_stats stats;
+ char buf[BDEVNAME_SIZE];
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ r = in_worker1(era, metadata_get_stats, &stats);
+ if (r)
+ goto err;
+
+ DMEMIT("%u %llu/%llu %u",
+ (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
+ (unsigned long long) stats.used,
+ (unsigned long long) stats.total,
+ (unsigned) stats.era);
+
+ if (stats.snap != SUPERBLOCK_LOCATION)
+ DMEMIT(" %llu", stats.snap);
+ else
+ DMEMIT(" -");
+ break;
+
+ case STATUSTYPE_TABLE:
+ format_dev_t(buf, era->metadata_dev->bdev->bd_dev);
+ DMEMIT("%s ", buf);
+ format_dev_t(buf, era->origin_dev->bdev->bd_dev);
+ DMEMIT("%s %u", buf, era->sectors_per_block);
+ break;
+ }
+
+ return;
+
+err:
+ DMEMIT("Error");
+}
+
+static int era_message(struct dm_target *ti, unsigned argc, char **argv)
+{
+ struct era *era = ti->private;
+
+ if (argc != 1) {
+ DMERR("incorrect number of message arguments");
+ return -EINVAL;
+ }
+
+ if (!strcasecmp(argv[0], "checkpoint"))
+ return in_worker0(era, metadata_checkpoint);
+
+ if (!strcasecmp(argv[0], "take_metadata_snap"))
+ return in_worker0(era, metadata_take_snap);
+
+ if (!strcasecmp(argv[0], "drop_metadata_snap"))
+ return in_worker0(era, metadata_drop_snap);
+
+ DMERR("unsupported message '%s'", argv[0]);
+ return -EINVAL;
+}
+
+static sector_t get_dev_size(struct dm_dev *dev)
+{
+ return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+static int era_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct era *era = ti->private;
+ return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
+}
+
+static int era_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct era *era = ti->private;
+ struct request_queue *q = bdev_get_queue(era->origin_dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = era->origin_dev->bdev;
+
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
+static void era_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct era *era = ti->private;
+ uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
+
+ /*
+ * If the system-determined stacked limits are compatible with the
+ * era device's blocksize (io_opt is a factor) do not override them.
+ */
+ if (io_opt_sectors < era->sectors_per_block ||
+ do_div(io_opt_sectors, era->sectors_per_block)) {
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT);
+ }
+}
+
+/*----------------------------------------------------------------*/
+
+static struct target_type era_target = {
+ .name = "era",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = era_ctr,
+ .dtr = era_dtr,
+ .map = era_map,
+ .postsuspend = era_postsuspend,
+ .preresume = era_preresume,
+ .status = era_status,
+ .message = era_message,
+ .iterate_devices = era_iterate_devices,
+ .merge = era_merge,
+ .io_hints = era_io_hints
+};
+
+static int __init dm_era_init(void)
+{
+ int r;
+
+ r = dm_register_target(&era_target);
+ if (r) {
+ DMERR("era target registration failed: %d", r);
+ return r;
+ }
+
+ return 0;
+}
+
+static void __exit dm_era_exit(void)
+{
+ dm_unregister_target(&era_target);
+}
+
+module_init(dm_era_init);
+module_exit(dm_era_exit);
+
+MODULE_DESCRIPTION(DM_NAME " era target");
+MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 08d9a207259a..b428c0ae63d5 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -66,7 +66,7 @@ static int dm_ulog_sendto_server(struct dm_ulog_request *tfr)
msg->seq = tfr->seq;
msg->len = sizeof(struct dm_ulog_request) + tfr->data_size;
- r = cn_netlink_send(msg, 0, gfp_any());
+ r = cn_netlink_send(msg, 0, 0, gfp_any());
return r;
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 422a9fdeb53e..aa009e865871 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -93,10 +93,6 @@ struct multipath {
unsigned pg_init_count; /* Number of times pg_init called */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
- unsigned queue_size;
- struct work_struct process_queued_ios;
- struct list_head queued_ios;
-
struct work_struct trigger_event;
/*
@@ -121,9 +117,9 @@ typedef int (*action_fn) (struct pgpath *pgpath);
static struct kmem_cache *_mpio_cache;
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
-static void process_queued_ios(struct work_struct *work);
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
+static int __pgpath_busy(struct pgpath *pgpath);
/*-----------------------------------------------
@@ -195,11 +191,9 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
- INIT_LIST_HEAD(&m->queued_ios);
spin_lock_init(&m->lock);
m->queue_io = 1;
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
- INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
@@ -256,13 +250,21 @@ static void clear_mapinfo(struct multipath *m, union map_info *info)
* Path selection
*-----------------------------------------------*/
-static void __pg_init_all_paths(struct multipath *m)
+static int __pg_init_all_paths(struct multipath *m)
{
struct pgpath *pgpath;
unsigned long pg_init_delay = 0;
+ if (m->pg_init_in_progress || m->pg_init_disabled)
+ return 0;
+
m->pg_init_count++;
m->pg_init_required = 0;
+
+ /* Check here to reset pg_init_required */
+ if (!m->current_pg)
+ return 0;
+
if (m->pg_init_delay_retry)
pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
@@ -274,6 +276,7 @@ static void __pg_init_all_paths(struct multipath *m)
pg_init_delay))
m->pg_init_in_progress++;
}
+ return m->pg_init_in_progress;
}
static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
@@ -365,19 +368,26 @@ failed:
*/
static int __must_push_back(struct multipath *m)
{
- return (m->queue_if_no_path != m->saved_queue_if_no_path &&
- dm_noflush_suspending(m->ti));
+ return (m->queue_if_no_path ||
+ (m->queue_if_no_path != m->saved_queue_if_no_path &&
+ dm_noflush_suspending(m->ti)));
}
-static int map_io(struct multipath *m, struct request *clone,
- union map_info *map_context, unsigned was_queued)
+#define pg_ready(m) (!(m)->queue_io && !(m)->pg_init_required)
+
+/*
+ * Map cloned requests
+ */
+static int multipath_map(struct dm_target *ti, struct request *clone,
+ union map_info *map_context)
{
- int r = DM_MAPIO_REMAPPED;
+ struct multipath *m = (struct multipath *) ti->private;
+ int r = DM_MAPIO_REQUEUE;
size_t nr_bytes = blk_rq_bytes(clone);
unsigned long flags;
struct pgpath *pgpath;
struct block_device *bdev;
- struct dm_mpath_io *mpio = map_context->ptr;
+ struct dm_mpath_io *mpio;
spin_lock_irqsave(&m->lock, flags);
@@ -388,38 +398,33 @@ static int map_io(struct multipath *m, struct request *clone,
pgpath = m->current_pgpath;
- if (was_queued)
- m->queue_size--;
-
- if (m->pg_init_required) {
- if (!m->pg_init_in_progress)
- queue_work(kmultipathd, &m->process_queued_ios);
- r = DM_MAPIO_REQUEUE;
- } else if ((pgpath && m->queue_io) ||
- (!pgpath && m->queue_if_no_path)) {
- /* Queue for the daemon to resubmit */
- list_add_tail(&clone->queuelist, &m->queued_ios);
- m->queue_size++;
- if (!m->queue_io)
- queue_work(kmultipathd, &m->process_queued_ios);
- pgpath = NULL;
- r = DM_MAPIO_SUBMITTED;
- } else if (pgpath) {
- bdev = pgpath->path.dev->bdev;
- clone->q = bdev_get_queue(bdev);
- clone->rq_disk = bdev->bd_disk;
- } else if (__must_push_back(m))
- r = DM_MAPIO_REQUEUE;
- else
- r = -EIO; /* Failed */
+ if (!pgpath) {
+ if (!__must_push_back(m))
+ r = -EIO; /* Failed */
+ goto out_unlock;
+ }
+ if (!pg_ready(m)) {
+ __pg_init_all_paths(m);
+ goto out_unlock;
+ }
+ if (set_mapinfo(m, map_context) < 0)
+ /* ENOMEM, requeue */
+ goto out_unlock;
+ bdev = pgpath->path.dev->bdev;
+ clone->q = bdev_get_queue(bdev);
+ clone->rq_disk = bdev->bd_disk;
+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ mpio = map_context->ptr;
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
-
- if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
- pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
+ if (pgpath->pg->ps.type->start_io)
+ pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
+ &pgpath->path,
nr_bytes);
+ r = DM_MAPIO_REMAPPED;
+out_unlock:
spin_unlock_irqrestore(&m->lock, flags);
return r;
@@ -440,76 +445,14 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
else
m->saved_queue_if_no_path = queue_if_no_path;
m->queue_if_no_path = queue_if_no_path;
- if (!m->queue_if_no_path && m->queue_size)
- queue_work(kmultipathd, &m->process_queued_ios);
+ if (!m->queue_if_no_path)
+ dm_table_run_md_queue_async(m->ti->table);
spin_unlock_irqrestore(&m->lock, flags);
return 0;
}
-/*-----------------------------------------------------------------
- * The multipath daemon is responsible for resubmitting queued ios.
- *---------------------------------------------------------------*/
-
-static void dispatch_queued_ios(struct multipath *m)
-{
- int r;
- unsigned long flags;
- union map_info *info;
- struct request *clone, *n;
- LIST_HEAD(cl);
-
- spin_lock_irqsave(&m->lock, flags);
- list_splice_init(&m->queued_ios, &cl);
- spin_unlock_irqrestore(&m->lock, flags);
-
- list_for_each_entry_safe(clone, n, &cl, queuelist) {
- list_del_init(&clone->queuelist);
-
- info = dm_get_rq_mapinfo(clone);
-
- r = map_io(m, clone, info, 1);
- if (r < 0) {
- clear_mapinfo(m, info);
- dm_kill_unmapped_request(clone, r);
- } else if (r == DM_MAPIO_REMAPPED)
- dm_dispatch_request(clone);
- else if (r == DM_MAPIO_REQUEUE) {
- clear_mapinfo(m, info);
- dm_requeue_unmapped_request(clone);
- }
- }
-}
-
-static void process_queued_ios(struct work_struct *work)
-{
- struct multipath *m =
- container_of(work, struct multipath, process_queued_ios);
- struct pgpath *pgpath = NULL;
- unsigned must_queue = 1;
- unsigned long flags;
-
- spin_lock_irqsave(&m->lock, flags);
-
- if (!m->current_pgpath)
- __choose_pgpath(m, 0);
-
- pgpath = m->current_pgpath;
-
- if ((pgpath && !m->queue_io) ||
- (!pgpath && !m->queue_if_no_path))
- must_queue = 0;
-
- if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
- !m->pg_init_disabled)
- __pg_init_all_paths(m);
-
- spin_unlock_irqrestore(&m->lock, flags);
- if (!must_queue)
- dispatch_queued_ios(m);
-}
-
/*
* An event is triggered whenever a path is taken out of use.
* Includes path failure and PG bypass.
@@ -972,27 +915,6 @@ static void multipath_dtr(struct dm_target *ti)
}
/*
- * Map cloned requests
- */
-static int multipath_map(struct dm_target *ti, struct request *clone,
- union map_info *map_context)
-{
- int r;
- struct multipath *m = (struct multipath *) ti->private;
-
- if (set_mapinfo(m, map_context) < 0)
- /* ENOMEM, requeue */
- return DM_MAPIO_REQUEUE;
-
- clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
- r = map_io(m, clone, map_context, 0);
- if (r < 0 || r == DM_MAPIO_REQUEUE)
- clear_mapinfo(m, map_context);
-
- return r;
-}
-
-/*
* Take a path out of use.
*/
static int fail_path(struct pgpath *pgpath)
@@ -1054,9 +976,9 @@ static int reinstate_path(struct pgpath *pgpath)
pgpath->is_active = 1;
- if (!m->nr_valid_paths++ && m->queue_size) {
+ if (!m->nr_valid_paths++) {
m->current_pgpath = NULL;
- queue_work(kmultipathd, &m->process_queued_ios);
+ dm_table_run_md_queue_async(m->ti->table);
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
m->pg_init_in_progress++;
@@ -1252,11 +1174,12 @@ static void pg_init_done(void *data, int errors)
/* Activations of other paths are still on going */
goto out;
- if (!m->pg_init_required)
- m->queue_io = 0;
-
- m->pg_init_delay_retry = delay_retry;
- queue_work(kmultipathd, &m->process_queued_ios);
+ if (m->pg_init_required) {
+ m->pg_init_delay_retry = delay_retry;
+ if (__pg_init_all_paths(m))
+ goto out;
+ }
+ m->queue_io = 0;
/*
* Wake up any thread waiting to suspend.
@@ -1272,8 +1195,11 @@ static void activate_path(struct work_struct *work)
struct pgpath *pgpath =
container_of(work, struct pgpath, activate_path.work);
- scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
- pg_init_done, pgpath);
+ if (pgpath->is_active)
+ scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
+ pg_init_done, pgpath);
+ else
+ pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
}
static int noretry_error(int error)
@@ -1433,7 +1359,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
/* Features */
if (type == STATUSTYPE_INFO)
- DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
+ DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
else {
DMEMIT("%u ", m->queue_if_no_path +
(m->pg_init_retries > 0) * 2 +
@@ -1552,7 +1478,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
}
if (argc != 2) {
- DMWARN("Unrecognised multipath message received.");
+ DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
goto out;
}
@@ -1570,7 +1496,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
else if (!strcasecmp(argv[0], "fail_path"))
action = fail_path;
else {
- DMWARN("Unrecognised multipath message received.");
+ DMWARN("Unrecognised multipath message received: %s", argv[0]);
goto out;
}
@@ -1632,8 +1558,17 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
r = err;
}
- if (r == -ENOTCONN && !fatal_signal_pending(current))
- queue_work(kmultipathd, &m->process_queued_ios);
+ if (r == -ENOTCONN && !fatal_signal_pending(current)) {
+ spin_lock_irqsave(&m->lock, flags);
+ if (!m->current_pg) {
+ /* Path status changed, redo selection */
+ __choose_pgpath(m, 0);
+ }
+ if (m->pg_init_required)
+ __pg_init_all_paths(m);
+ spin_unlock_irqrestore(&m->lock, flags);
+ dm_table_run_md_queue_async(m->ti->table);
+ }
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
@@ -1684,7 +1619,7 @@ static int multipath_busy(struct dm_target *ti)
spin_lock_irqsave(&m->lock, flags);
/* pg_init in progress, requeue until done */
- if (m->pg_init_in_progress) {
+ if (!pg_ready(m)) {
busy = 1;
goto out;
}
@@ -1737,7 +1672,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 6, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 6a7f2b83a126..50601ec7017a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -945,7 +945,7 @@ bool dm_table_request_based(struct dm_table *t)
return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
}
-int dm_table_alloc_md_mempools(struct dm_table *t)
+static int dm_table_alloc_md_mempools(struct dm_table *t)
{
unsigned type = dm_table_get_type(t);
unsigned per_bio_data_size = 0;
@@ -1618,6 +1618,25 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
}
EXPORT_SYMBOL(dm_table_get_md);
+void dm_table_run_md_queue_async(struct dm_table *t)
+{
+ struct mapped_device *md;
+ struct request_queue *queue;
+ unsigned long flags;
+
+ if (!dm_table_request_based(t))
+ return;
+
+ md = dm_table_get_md(t);
+ queue = dm_get_md_queue(md);
+ if (queue) {
+ spin_lock_irqsave(queue->queue_lock, flags);
+ blk_run_queue_async(queue);
+ spin_unlock_irqrestore(queue->queue_lock, flags);
+ }
+}
+EXPORT_SYMBOL(dm_table_run_md_queue_async);
+
static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index fb9efc829182..b086a945edcb 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -192,6 +192,13 @@ struct dm_pool_metadata {
* operation possible in this state is the closing of the device.
*/
bool fail_io:1;
+
+ /*
+ * Reading the space map roots can fail, so we read it into these
+ * buffers before the superblock is locked and updated.
+ */
+ __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
+ __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
};
struct dm_thin_device {
@@ -431,26 +438,53 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
pmd->details_info.value_type.equal = NULL;
}
+static int save_sm_roots(struct dm_pool_metadata *pmd)
+{
+ int r;
+ size_t len;
+
+ r = dm_sm_root_size(pmd->metadata_sm, &len);
+ if (r < 0)
+ return r;
+
+ r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len);
+ if (r < 0)
+ return r;
+
+ r = dm_sm_root_size(pmd->data_sm, &len);
+ if (r < 0)
+ return r;
+
+ return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len);
+}
+
+static void copy_sm_roots(struct dm_pool_metadata *pmd,
+ struct thin_disk_superblock *disk)
+{
+ memcpy(&disk->metadata_space_map_root,
+ &pmd->metadata_space_map_root,
+ sizeof(pmd->metadata_space_map_root));
+
+ memcpy(&disk->data_space_map_root,
+ &pmd->data_space_map_root,
+ sizeof(pmd->data_space_map_root));
+}
+
static int __write_initial_superblock(struct dm_pool_metadata *pmd)
{
int r;
struct dm_block *sblock;
- size_t metadata_len, data_len;
struct thin_disk_superblock *disk_super;
sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
if (bdev_size > THIN_METADATA_MAX_SECTORS)
bdev_size = THIN_METADATA_MAX_SECTORS;
- r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
- if (r < 0)
- return r;
-
- r = dm_sm_root_size(pmd->data_sm, &data_len);
+ r = dm_sm_commit(pmd->data_sm);
if (r < 0)
return r;
- r = dm_sm_commit(pmd->data_sm);
+ r = save_sm_roots(pmd);
if (r < 0)
return r;
@@ -471,15 +505,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
disk_super->trans_id = 0;
disk_super->held_root = 0;
- r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root,
- metadata_len);
- if (r < 0)
- goto bad_locked;
-
- r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root,
- data_len);
- if (r < 0)
- goto bad_locked;
+ copy_sm_roots(pmd, disk_super);
disk_super->data_mapping_root = cpu_to_le64(pmd->root);
disk_super->device_details_root = cpu_to_le64(pmd->details_root);
@@ -488,10 +514,6 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
return dm_tm_commit(pmd->tm, sblock);
-
-bad_locked:
- dm_bm_unlock(sblock);
- return r;
}
static int __format_metadata(struct dm_pool_metadata *pmd)
@@ -769,6 +791,10 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
if (r < 0)
return r;
+ r = save_sm_roots(pmd);
+ if (r < 0)
+ return r;
+
r = superblock_lock(pmd, &sblock);
if (r)
return r;
@@ -780,21 +806,9 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
disk_super->trans_id = cpu_to_le64(pmd->trans_id);
disk_super->flags = cpu_to_le32(pmd->flags);
- r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root,
- metadata_len);
- if (r < 0)
- goto out_locked;
-
- r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root,
- data_len);
- if (r < 0)
- goto out_locked;
+ copy_sm_roots(pmd, disk_super);
return dm_tm_commit(pmd->tm, sblock);
-
-out_locked:
- dm_bm_unlock(sblock);
- return r;
}
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index be70d38745f7..53728be84dee 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -12,9 +12,11 @@
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/list.h>
+#include <linux/rculist.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/rbtree.h>
#define DM_MSG_PREFIX "thin"
@@ -178,12 +180,10 @@ struct pool {
unsigned ref_count;
spinlock_t lock;
- struct bio_list deferred_bios;
struct bio_list deferred_flush_bios;
struct list_head prepared_mappings;
struct list_head prepared_discards;
-
- struct bio_list retry_on_resume_list;
+ struct list_head active_thins;
struct dm_deferred_set *shared_read_ds;
struct dm_deferred_set *all_io_ds;
@@ -220,6 +220,7 @@ struct pool_c {
* Target context for a thin.
*/
struct thin_c {
+ struct list_head list;
struct dm_dev *pool_dev;
struct dm_dev *origin_dev;
dm_thin_id dev_id;
@@ -227,6 +228,10 @@ struct thin_c {
struct pool *pool;
struct dm_thin_device *td;
bool requeue_mode:1;
+ spinlock_t lock;
+ struct bio_list deferred_bio_list;
+ struct bio_list retry_on_resume_list;
+ struct rb_root sort_bio_list; /* sorted list of deferred bios */
};
/*----------------------------------------------------------------*/
@@ -287,9 +292,9 @@ static void cell_defer_no_holder_no_free(struct thin_c *tc,
struct pool *pool = tc->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->lock, flags);
- dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_lock_irqsave(&tc->lock, flags);
+ dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
+ spin_unlock_irqrestore(&tc->lock, flags);
wake_worker(pool);
}
@@ -368,6 +373,7 @@ struct dm_thin_endio_hook {
struct dm_deferred_entry *shared_read_entry;
struct dm_deferred_entry *all_io_entry;
struct dm_thin_new_mapping *overwrite_mapping;
+ struct rb_node rb_node;
};
static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
@@ -378,30 +384,22 @@ static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
bio_list_init(&bios);
- spin_lock_irqsave(&tc->pool->lock, flags);
+ spin_lock_irqsave(&tc->lock, flags);
bio_list_merge(&bios, master);
bio_list_init(master);
- spin_unlock_irqrestore(&tc->pool->lock, flags);
+ spin_unlock_irqrestore(&tc->lock, flags);
- while ((bio = bio_list_pop(&bios))) {
- struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
-
- if (h->tc == tc)
- bio_endio(bio, DM_ENDIO_REQUEUE);
- else
- bio_list_add(master, bio);
- }
+ while ((bio = bio_list_pop(&bios)))
+ bio_endio(bio, DM_ENDIO_REQUEUE);
}
static void requeue_io(struct thin_c *tc)
{
- struct pool *pool = tc->pool;
-
- requeue_bio_list(tc, &pool->deferred_bios);
- requeue_bio_list(tc, &pool->retry_on_resume_list);
+ requeue_bio_list(tc, &tc->deferred_bio_list);
+ requeue_bio_list(tc, &tc->retry_on_resume_list);
}
-static void error_retry_list(struct pool *pool)
+static void error_thin_retry_list(struct thin_c *tc)
{
struct bio *bio;
unsigned long flags;
@@ -409,15 +407,25 @@ static void error_retry_list(struct pool *pool)
bio_list_init(&bios);
- spin_lock_irqsave(&pool->lock, flags);
- bio_list_merge(&bios, &pool->retry_on_resume_list);
- bio_list_init(&pool->retry_on_resume_list);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_lock_irqsave(&tc->lock, flags);
+ bio_list_merge(&bios, &tc->retry_on_resume_list);
+ bio_list_init(&tc->retry_on_resume_list);
+ spin_unlock_irqrestore(&tc->lock, flags);
while ((bio = bio_list_pop(&bios)))
bio_io_error(bio);
}
+static void error_retry_list(struct pool *pool)
+{
+ struct thin_c *tc;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tc, &pool->active_thins, list)
+ error_thin_retry_list(tc);
+ rcu_read_unlock();
+}
+
/*
* This section of code contains the logic for processing a thin device's IO.
* Much of the code depends on pool object resources (lists, workqueues, etc)
@@ -608,9 +616,9 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
struct pool *pool = tc->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->lock, flags);
- cell_release(pool, cell, &pool->deferred_bios);
- spin_unlock_irqrestore(&tc->pool->lock, flags);
+ spin_lock_irqsave(&tc->lock, flags);
+ cell_release(pool, cell, &tc->deferred_bio_list);
+ spin_unlock_irqrestore(&tc->lock, flags);
wake_worker(pool);
}
@@ -623,9 +631,9 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
struct pool *pool = tc->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->lock, flags);
- cell_release_no_holder(pool, cell, &pool->deferred_bios);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_lock_irqsave(&tc->lock, flags);
+ cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
+ spin_unlock_irqrestore(&tc->lock, flags);
wake_worker(pool);
}
@@ -1001,12 +1009,11 @@ static void retry_on_resume(struct bio *bio)
{
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
- struct pool *pool = tc->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->lock, flags);
- bio_list_add(&pool->retry_on_resume_list, bio);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_lock_irqsave(&tc->lock, flags);
+ bio_list_add(&tc->retry_on_resume_list, bio);
+ spin_unlock_irqrestore(&tc->lock, flags);
}
static bool should_error_unserviceable_bio(struct pool *pool)
@@ -1363,38 +1370,111 @@ static int need_commit_due_to_time(struct pool *pool)
jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
}
-static void process_deferred_bios(struct pool *pool)
+#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
+#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
+
+static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
+{
+ struct rb_node **rbp, *parent;
+ struct dm_thin_endio_hook *pbd;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
+
+ rbp = &tc->sort_bio_list.rb_node;
+ parent = NULL;
+ while (*rbp) {
+ parent = *rbp;
+ pbd = thin_pbd(parent);
+
+ if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
+ rbp = &(*rbp)->rb_left;
+ else
+ rbp = &(*rbp)->rb_right;
+ }
+
+ pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+ rb_link_node(&pbd->rb_node, parent, rbp);
+ rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
+}
+
+static void __extract_sorted_bios(struct thin_c *tc)
+{
+ struct rb_node *node;
+ struct dm_thin_endio_hook *pbd;
+ struct bio *bio;
+
+ for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
+ pbd = thin_pbd(node);
+ bio = thin_bio(pbd);
+
+ bio_list_add(&tc->deferred_bio_list, bio);
+ rb_erase(&pbd->rb_node, &tc->sort_bio_list);
+ }
+
+ WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
+}
+
+static void __sort_thin_deferred_bios(struct thin_c *tc)
+{
+ struct bio *bio;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
+ bio_list_merge(&bios, &tc->deferred_bio_list);
+ bio_list_init(&tc->deferred_bio_list);
+
+ /* Sort deferred_bio_list using rb-tree */
+ while ((bio = bio_list_pop(&bios)))
+ __thin_bio_rb_add(tc, bio);
+
+ /*
+ * Transfer the sorted bios in sort_bio_list back to
+ * deferred_bio_list to allow lockless submission of
+ * all bios.
+ */
+ __extract_sorted_bios(tc);
+}
+
+static void process_thin_deferred_bios(struct thin_c *tc)
{
+ struct pool *pool = tc->pool;
unsigned long flags;
struct bio *bio;
struct bio_list bios;
+ struct blk_plug plug;
+
+ if (tc->requeue_mode) {
+ requeue_bio_list(tc, &tc->deferred_bio_list);
+ return;
+ }
bio_list_init(&bios);
- spin_lock_irqsave(&pool->lock, flags);
- bio_list_merge(&bios, &pool->deferred_bios);
- bio_list_init(&pool->deferred_bios);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_lock_irqsave(&tc->lock, flags);
- while ((bio = bio_list_pop(&bios))) {
- struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
- struct thin_c *tc = h->tc;
+ if (bio_list_empty(&tc->deferred_bio_list)) {
+ spin_unlock_irqrestore(&tc->lock, flags);
+ return;
+ }
- if (tc->requeue_mode) {
- bio_endio(bio, DM_ENDIO_REQUEUE);
- continue;
- }
+ __sort_thin_deferred_bios(tc);
+
+ bio_list_merge(&bios, &tc->deferred_bio_list);
+ bio_list_init(&tc->deferred_bio_list);
+ spin_unlock_irqrestore(&tc->lock, flags);
+
+ blk_start_plug(&plug);
+ while ((bio = bio_list_pop(&bios))) {
/*
* If we've got no free new_mapping structs, and processing
* this bio might require one, we pause until there are some
* prepared mappings to process.
*/
if (ensure_next_mapping(pool)) {
- spin_lock_irqsave(&pool->lock, flags);
- bio_list_merge(&pool->deferred_bios, &bios);
- spin_unlock_irqrestore(&pool->lock, flags);
-
+ spin_lock_irqsave(&tc->lock, flags);
+ bio_list_add(&tc->deferred_bio_list, bio);
+ bio_list_merge(&tc->deferred_bio_list, &bios);
+ spin_unlock_irqrestore(&tc->lock, flags);
break;
}
@@ -1403,6 +1483,20 @@ static void process_deferred_bios(struct pool *pool)
else
pool->process_bio(tc, bio);
}
+ blk_finish_plug(&plug);
+}
+
+static void process_deferred_bios(struct pool *pool)
+{
+ unsigned long flags;
+ struct bio *bio;
+ struct bio_list bios;
+ struct thin_c *tc;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tc, &pool->active_thins, list)
+ process_thin_deferred_bios(tc);
+ rcu_read_unlock();
/*
* If there are any deferred flush bios, we must commit
@@ -1634,9 +1728,9 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
unsigned long flags;
struct pool *pool = tc->pool;
- spin_lock_irqsave(&pool->lock, flags);
- bio_list_add(&pool->deferred_bios, bio);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_lock_irqsave(&tc->lock, flags);
+ bio_list_add(&tc->deferred_bio_list, bio);
+ spin_unlock_irqrestore(&tc->lock, flags);
wake_worker(pool);
}
@@ -1757,26 +1851,29 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
{
- int r;
- unsigned long flags;
struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
+ struct request_queue *q;
- spin_lock_irqsave(&pt->pool->lock, flags);
- r = !bio_list_empty(&pt->pool->retry_on_resume_list);
- spin_unlock_irqrestore(&pt->pool->lock, flags);
+ if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
+ return 1;
- if (!r) {
- struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
- r = bdi_congested(&q->backing_dev_info, bdi_bits);
- }
-
- return r;
+ q = bdev_get_queue(pt->data_dev->bdev);
+ return bdi_congested(&q->backing_dev_info, bdi_bits);
}
-static void __requeue_bios(struct pool *pool)
+static void requeue_bios(struct pool *pool)
{
- bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
- bio_list_init(&pool->retry_on_resume_list);
+ unsigned long flags;
+ struct thin_c *tc;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tc, &pool->active_thins, list) {
+ spin_lock_irqsave(&tc->lock, flags);
+ bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
+ bio_list_init(&tc->retry_on_resume_list);
+ spin_unlock_irqrestore(&tc->lock, flags);
+ }
+ rcu_read_unlock();
}
/*----------------------------------------------------------------
@@ -1957,12 +2054,11 @@ static struct pool *pool_create(struct mapped_device *pool_md,
INIT_WORK(&pool->worker, do_worker);
INIT_DELAYED_WORK(&pool->waker, do_waker);
spin_lock_init(&pool->lock);
- bio_list_init(&pool->deferred_bios);
bio_list_init(&pool->deferred_flush_bios);
INIT_LIST_HEAD(&pool->prepared_mappings);
INIT_LIST_HEAD(&pool->prepared_discards);
+ INIT_LIST_HEAD(&pool->active_thins);
pool->low_water_triggered = false;
- bio_list_init(&pool->retry_on_resume_list);
pool->shared_read_ds = dm_deferred_set_create();
if (!pool->shared_read_ds) {
@@ -2507,8 +2603,8 @@ static void pool_resume(struct dm_target *ti)
spin_lock_irqsave(&pool->lock, flags);
pool->low_water_triggered = false;
- __requeue_bios(pool);
spin_unlock_irqrestore(&pool->lock, flags);
+ requeue_bios(pool);
do_waker(&pool->waker.work);
}
@@ -2947,7 +3043,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 11, 0},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2968,6 +3064,12 @@ static struct target_type pool_target = {
static void thin_dtr(struct dm_target *ti)
{
struct thin_c *tc = ti->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tc->pool->lock, flags);
+ list_del_rcu(&tc->list);
+ spin_unlock_irqrestore(&tc->pool->lock, flags);
+ synchronize_rcu();
mutex_lock(&dm_thin_pool_table.mutex);
@@ -3014,6 +3116,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -ENOMEM;
goto out_unlock;
}
+ spin_lock_init(&tc->lock);
+ bio_list_init(&tc->deferred_bio_list);
+ bio_list_init(&tc->retry_on_resume_list);
+ tc->sort_bio_list = RB_ROOT;
if (argc == 3) {
r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
@@ -3085,6 +3191,17 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
mutex_unlock(&dm_thin_pool_table.mutex);
+ spin_lock(&tc->pool->lock);
+ list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
+ spin_unlock(&tc->pool->lock);
+ /*
+ * This synchronize_rcu() call is needed here otherwise we risk a
+ * wake_worker() call finding no bios to process (because the newly
+ * added tc isn't yet visible). So this reduces latency since we
+ * aren't then dependent on the periodic commit to wake_worker().
+ */
+ synchronize_rcu();
+
return 0;
bad_target_max_io_len:
@@ -3250,7 +3367,7 @@ static int thin_iterate_devices(struct dm_target *ti,
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 11, 0},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8c53b09b9a2c..455e64916498 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -94,13 +94,6 @@ struct dm_rq_clone_bio_info {
struct bio clone;
};
-union map_info *dm_get_mapinfo(struct bio *bio)
-{
- if (bio && bio->bi_private)
- return &((struct dm_target_io *)bio->bi_private)->info;
- return NULL;
-}
-
union map_info *dm_get_rq_mapinfo(struct request *rq)
{
if (rq && rq->end_io_data)
@@ -475,6 +468,11 @@ sector_t dm_get_size(struct mapped_device *md)
return get_capacity(md->disk);
}
+struct request_queue *dm_get_md_queue(struct mapped_device *md)
+{
+ return md->queue;
+}
+
struct dm_stats *dm_get_stats(struct mapped_device *md)
{
return &md->stats;
@@ -760,7 +758,7 @@ static void dec_pending(struct dm_io *io, int error)
static void clone_endio(struct bio *bio, int error)
{
int r = 0;
- struct dm_target_io *tio = bio->bi_private;
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
@@ -794,7 +792,8 @@ static void clone_endio(struct bio *bio, int error)
*/
static void end_clone_bio(struct bio *clone, int error)
{
- struct dm_rq_clone_bio_info *info = clone->bi_private;
+ struct dm_rq_clone_bio_info *info =
+ container_of(clone, struct dm_rq_clone_bio_info, clone);
struct dm_rq_target_io *tio = info->tio;
struct bio *bio = info->orig;
unsigned int nr_bytes = info->orig->bi_iter.bi_size;
@@ -1120,7 +1119,6 @@ static void __map_bio(struct dm_target_io *tio)
struct dm_target *ti = tio->ti;
clone->bi_end_io = clone_endio;
- clone->bi_private = tio;
/*
* Map the clone. If r == 0 we don't need to do
@@ -1195,7 +1193,6 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
tio->io = ci->io;
tio->ti = ti;
- memset(&tio->info, 0, sizeof(tio->info));
tio->target_bio_nr = target_bio_nr;
return tio;
@@ -1530,7 +1527,6 @@ static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
info->orig = bio_orig;
info->tio = tio;
bio->bi_end_io = end_clone_bio;
- bio->bi_private = info;
return 0;
}
@@ -2172,7 +2168,7 @@ static struct dm_table *__unbind(struct mapped_device *md)
return NULL;
dm_table_event_callback(map, NULL, NULL);
- rcu_assign_pointer(md->map, NULL);
+ RCU_INIT_POINTER(md->map, NULL);
dm_sync_table(md);
return map;
@@ -2873,8 +2869,6 @@ static const struct block_device_operations dm_blk_dops = {
.owner = THIS_MODULE
};
-EXPORT_SYMBOL(dm_get_mapinfo);
-
/*
* module hooks
*/
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index c4569f02f50f..ed76126aac54 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -73,7 +73,6 @@ unsigned dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
bool dm_table_supports_discards(struct dm_table *t);
-int dm_table_alloc_md_mempools(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
@@ -189,6 +188,7 @@ int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only
int dm_cancel_deferred_remove(struct mapped_device *md);
int dm_request_based(struct mapped_device *md);
sector_t dm_get_size(struct mapped_device *md);
+struct request_queue *dm_get_md_queue(struct mapped_device *md);
struct dm_stats *dm_get_stats(struct mapped_device *md);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4ad5cc4e63e8..8fda38d23e38 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5181,32 +5181,6 @@ static int restart_array(struct mddev *mddev)
return 0;
}
-/* similar to deny_write_access, but accounts for our holding a reference
- * to the file ourselves */
-static int deny_bitmap_write_access(struct file * file)
-{
- struct inode *inode = file->f_mapping->host;
-
- spin_lock(&inode->i_lock);
- if (atomic_read(&inode->i_writecount) > 1) {
- spin_unlock(&inode->i_lock);
- return -ETXTBSY;
- }
- atomic_set(&inode->i_writecount, -1);
- spin_unlock(&inode->i_lock);
-
- return 0;
-}
-
-void restore_bitmap_write_access(struct file *file)
-{
- struct inode *inode = file->f_mapping->host;
-
- spin_lock(&inode->i_lock);
- atomic_set(&inode->i_writecount, 1);
- spin_unlock(&inode->i_lock);
-}
-
static void md_clean(struct mddev *mddev)
{
mddev->array_sectors = 0;
@@ -5427,7 +5401,6 @@ static int do_md_stop(struct mddev * mddev, int mode,
bitmap_destroy(mddev);
if (mddev->bitmap_info.file) {
- restore_bitmap_write_access(mddev->bitmap_info.file);
fput(mddev->bitmap_info.file);
mddev->bitmap_info.file = NULL;
}
@@ -5979,7 +5952,7 @@ abort_export:
static int set_bitmap_file(struct mddev *mddev, int fd)
{
- int err;
+ int err = 0;
if (mddev->pers) {
if (!mddev->pers->quiesce)
@@ -5991,6 +5964,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
if (fd >= 0) {
+ struct inode *inode;
if (mddev->bitmap)
return -EEXIST; /* cannot add when bitmap is present */
mddev->bitmap_info.file = fget(fd);
@@ -6001,10 +5975,21 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
return -EBADF;
}
- err = deny_bitmap_write_access(mddev->bitmap_info.file);
- if (err) {
+ inode = mddev->bitmap_info.file->f_mapping->host;
+ if (!S_ISREG(inode->i_mode)) {
+ printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
+ mdname(mddev));
+ err = -EBADF;
+ } else if (!(mddev->bitmap_info.file->f_mode & FMODE_WRITE)) {
+ printk(KERN_ERR "%s: error: bitmap file must open for write\n",
+ mdname(mddev));
+ err = -EBADF;
+ } else if (atomic_read(&inode->i_writecount) != 1) {
printk(KERN_ERR "%s: error: bitmap file is already in use\n",
mdname(mddev));
+ err = -EBUSY;
+ }
+ if (err) {
fput(mddev->bitmap_info.file);
mddev->bitmap_info.file = NULL;
return err;
@@ -6027,10 +6012,8 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
mddev->pers->quiesce(mddev, 0);
}
if (fd < 0) {
- if (mddev->bitmap_info.file) {
- restore_bitmap_write_access(mddev->bitmap_info.file);
+ if (mddev->bitmap_info.file)
fput(mddev->bitmap_info.file);
- }
mddev->bitmap_info.file = NULL;
}
@@ -7182,11 +7165,14 @@ static int md_seq_open(struct inode *inode, struct file *file)
return error;
}
+static int md_unloading;
static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
{
struct seq_file *seq = filp->private_data;
int mask;
+ if (md_unloading)
+ return POLLIN|POLLRDNORM|POLLERR|POLLPRI;;
poll_wait(filp, &md_event_waiters, wait);
/* always allow read */
@@ -8672,6 +8658,7 @@ static __exit void md_exit(void)
{
struct mddev *mddev;
struct list_head *tmp;
+ int delay = 1;
blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
@@ -8680,7 +8667,19 @@ static __exit void md_exit(void)
unregister_blkdev(mdp_major, "mdp");
unregister_reboot_notifier(&md_notifier);
unregister_sysctl_table(raid_table_header);
+
+ /* We cannot unload the modules while some process is
+ * waiting for us in select() or poll() - wake them up
+ */
+ md_unloading = 1;
+ while (waitqueue_active(&md_event_waiters)) {
+ /* not safe to leave yet */
+ wake_up(&md_event_waiters);
+ msleep(delay);
+ delay += delay;
+ }
remove_proc_entry("mdstat", NULL);
+
for_each_mddev(mddev, tmp) {
export_array(mddev);
mddev->hold_active = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 07bba96de260..a49d991f3fe1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -605,7 +605,6 @@ extern int md_check_no_bitmap(struct mddev *mddev);
extern int md_integrity_register(struct mddev *mddev);
extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
-extern void restore_bitmap_write_access(struct file *file);
extern void mddev_init(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c
index cd9a86d4cdf0..36f7cc2c7109 100644
--- a/drivers/md/persistent-data/dm-bitset.c
+++ b/drivers/md/persistent-data/dm-bitset.c
@@ -65,7 +65,7 @@ int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
int r;
__le64 value;
- if (!info->current_index_set)
+ if (!info->current_index_set || !info->dirty)
return 0;
value = cpu_to_le64(info->current_bits);
@@ -77,6 +77,8 @@ int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
return r;
info->current_index_set = false;
+ info->dirty = false;
+
return 0;
}
EXPORT_SYMBOL_GPL(dm_bitset_flush);
@@ -94,6 +96,8 @@ static int read_bits(struct dm_disk_bitset *info, dm_block_t root,
info->current_bits = le64_to_cpu(value);
info->current_index_set = true;
info->current_index = array_index;
+ info->dirty = false;
+
return 0;
}
@@ -126,6 +130,8 @@ int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
return r;
set_bit(b, (unsigned long *) &info->current_bits);
+ info->dirty = true;
+
return 0;
}
EXPORT_SYMBOL_GPL(dm_bitset_set_bit);
@@ -141,6 +147,8 @@ int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
return r;
clear_bit(b, (unsigned long *) &info->current_bits);
+ info->dirty = true;
+
return 0;
}
EXPORT_SYMBOL_GPL(dm_bitset_clear_bit);
diff --git a/drivers/md/persistent-data/dm-bitset.h b/drivers/md/persistent-data/dm-bitset.h
index e1b9bea14aa1..c2287d672ef5 100644
--- a/drivers/md/persistent-data/dm-bitset.h
+++ b/drivers/md/persistent-data/dm-bitset.h
@@ -71,6 +71,7 @@ struct dm_disk_bitset {
uint64_t current_bits;
bool current_index_set:1;
+ bool dirty:1;
};
/*
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 455f79279a16..087411c95ffc 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -595,25 +595,14 @@ int dm_bm_unlock(struct dm_block *b)
}
EXPORT_SYMBOL_GPL(dm_bm_unlock);
-int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
- struct dm_block *superblock)
+int dm_bm_flush(struct dm_block_manager *bm)
{
- int r;
-
if (bm->read_only)
return -EPERM;
- r = dm_bufio_write_dirty_buffers(bm->bufio);
- if (unlikely(r)) {
- dm_bm_unlock(superblock);
- return r;
- }
-
- dm_bm_unlock(superblock);
-
return dm_bufio_write_dirty_buffers(bm->bufio);
}
-EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
+EXPORT_SYMBOL_GPL(dm_bm_flush);
void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
{
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index 13cd58e1fe69..1b95dfc17786 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -105,8 +105,7 @@ int dm_bm_unlock(struct dm_block *b);
*
* This method always blocks.
*/
-int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
- struct dm_block *superblock);
+int dm_bm_flush(struct dm_block_manager *bm);
/*
* Request data is prefetched into the cache.
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 81da1a26042e..3bc30a0ae3d6 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -154,7 +154,7 @@ int dm_tm_pre_commit(struct dm_transaction_manager *tm)
if (r < 0)
return r;
- return 0;
+ return dm_bm_flush(tm->bm);
}
EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
@@ -164,8 +164,9 @@ int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
return -EWOULDBLOCK;
wipe_shadow_table(tm);
+ dm_bm_unlock(root);
- return dm_bm_flush_and_unlock(tm->bm, root);
+ return dm_bm_flush(tm->bm);
}
EXPORT_SYMBOL_GPL(dm_tm_commit);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h
index b5b139076ca5..2772ed2a781a 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.h
+++ b/drivers/md/persistent-data/dm-transaction-manager.h
@@ -38,18 +38,17 @@ struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transac
/*
* We use a 2-phase commit here.
*
- * i) In the first phase the block manager is told to start flushing, and
- * the changes to the space map are written to disk. You should interrogate
- * your particular space map to get detail of its root node etc. to be
- * included in your superblock.
+ * i) Make all changes for the transaction *except* for the superblock.
+ * Then call dm_tm_pre_commit() to flush them to disk.
*
- * ii) @root will be committed last. You shouldn't use more than the
- * first 512 bytes of @root if you wish the transaction to survive a power
- * failure. You *must* have a write lock held on @root for both stage (i)
- * and (ii). The commit will drop the write lock.
+ * ii) Lock your superblock. Update. Then call dm_tm_commit() which will
+ * unlock the superblock and flush it. No other blocks should be updated
+ * during this period. Care should be taken to never unlock a partially
+ * updated superblock; perform any operations that could fail *before* you
+ * take the superblock lock.
*/
int dm_tm_pre_commit(struct dm_transaction_manager *tm);
-int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root);
+int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *superblock);
/*
* These methods are the only way to get hold of a writeable block.
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4a6ca1cb2e78..56e24c072b62 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -97,6 +97,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
struct pool_info *pi = data;
struct r1bio *r1_bio;
struct bio *bio;
+ int need_pages;
int i, j;
r1_bio = r1bio_pool_alloc(gfp_flags, pi);
@@ -119,15 +120,15 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* RESYNC_PAGES for each bio.
*/
if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
- j = pi->raid_disks;
+ need_pages = pi->raid_disks;
else
- j = 1;
- while(j--) {
+ need_pages = 1;
+ for (j = 0; j < need_pages; j++) {
bio = r1_bio->bios[j];
bio->bi_vcnt = RESYNC_PAGES;
if (bio_alloc_pages(bio, gfp_flags))
- goto out_free_bio;
+ goto out_free_pages;
}
/* If not user-requests, copy the page pointers to all bios */
if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
@@ -141,6 +142,14 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
return r1_bio;
+out_free_pages:
+ while (--j >= 0) {
+ struct bio_vec *bv;
+
+ bio_for_each_segment_all(bv, r1_bio->bios[j], i)
+ __free_page(bv->bv_page);
+ }
+
out_free_bio:
while (++j < pi->raid_disks)
bio_put(r1_bio->bios[j]);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 16f5c21963db..25247a852912 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -679,14 +679,9 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
init_stripe(sh, sector, previous);
atomic_inc(&sh->count);
}
- } else {
+ } else if (!atomic_inc_not_zero(&sh->count)) {
spin_lock(&conf->device_lock);
- if (atomic_read(&sh->count)) {
- BUG_ON(!list_empty(&sh->lru)
- && !test_bit(STRIPE_EXPANDING, &sh->state)
- && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
- );
- } else {
+ if (!atomic_read(&sh->count)) {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
BUG_ON(list_empty(&sh->lru) &&
@@ -4552,6 +4547,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
int remaining;
+ DEFINE_WAIT(w);
+ bool do_prepare;
if (unlikely(bi->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bi);
@@ -4575,15 +4572,18 @@ static void make_request(struct mddev *mddev, struct bio * bi)
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
+ prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
- DEFINE_WAIT(w);
int previous;
int seq;
+ do_prepare = false;
retry:
seq = read_seqcount_begin(&conf->gen_lock);
previous = 0;
- prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
+ if (do_prepare)
+ prepare_to_wait(&conf->wait_for_overlap, &w,
+ TASK_UNINTERRUPTIBLE);
if (unlikely(conf->reshape_progress != MaxSector)) {
/* spinlock is needed as reshape_progress may be
* 64bit on a 32bit platform, and so it might be
@@ -4604,6 +4604,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
: logical_sector >= conf->reshape_safe) {
spin_unlock_irq(&conf->device_lock);
schedule();
+ do_prepare = true;
goto retry;
}
}
@@ -4640,6 +4641,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if (must_retry) {
release_stripe(sh);
schedule();
+ do_prepare = true;
goto retry;
}
}
@@ -4663,8 +4665,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
prepare_to_wait(&conf->wait_for_overlap,
&w, TASK_INTERRUPTIBLE);
if (logical_sector >= mddev->suspend_lo &&
- logical_sector < mddev->suspend_hi)
+ logical_sector < mddev->suspend_hi) {
schedule();
+ do_prepare = true;
+ }
goto retry;
}
@@ -4677,9 +4681,9 @@ static void make_request(struct mddev *mddev, struct bio * bi)
md_wakeup_thread(mddev->thread);
release_stripe(sh);
schedule();
+ do_prepare = true;
goto retry;
}
- finish_wait(&conf->wait_for_overlap, &w);
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((bi->bi_rw & REQ_SYNC) &&
@@ -4689,10 +4693,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
} else {
/* cannot get stripe for read-ahead, just give-up */
clear_bit(BIO_UPTODATE, &bi->bi_flags);
- finish_wait(&conf->wait_for_overlap, &w);
break;
}
}
+ finish_wait(&conf->wait_for_overlap, &w);
remaining = raid5_dec_bi_active_stripes(bi);
if (remaining == 0) {
diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c
index 0bb4430535f9..2408d7e9451e 100644
--- a/drivers/media/common/siano/smsdvb-debugfs.c
+++ b/drivers/media/common/siano/smsdvb-debugfs.c
@@ -1,6 +1,6 @@
/***********************************************************************
*
- * Copyright(c) 2013 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright(c) 2013 Mauro Carvalho Chehab
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c
index b8c5cad78537..6d7c0c858bd0 100644
--- a/drivers/media/common/siano/smsir.c
+++ b/drivers/media/common/siano/smsir.c
@@ -88,7 +88,7 @@ int sms_ir_init(struct smscore_device_t *coredev)
dev->priv = coredev;
dev->driver_type = RC_DRIVER_IR_RAW;
- dev->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(dev, RC_BIT_ALL);
dev->map_name = sms_get_board(board_id)->rc_codes;
dev->driver_name = MODULE_NAME;
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index f19a2ccd1e4b..1bdc0e7e8b79 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -257,6 +257,7 @@
#define USB_PID_TERRATEC_T5 0x10a1
#define USB_PID_NOXON_DAB_STICK 0x00b3
#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0
+#define USB_PID_NOXON_DAB_STICK_REV3 0x00b4
#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
#define USB_PID_PINNACLE_PCTV2000E 0x022c
#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 1f925e856974..6ce435ac866f 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -1279,7 +1279,7 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
switch(tvp->cmd) {
case DTV_ENUM_DELSYS:
ncaps = 0;
- while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) {
+ while (ncaps < MAX_DELSYS && fe->ops.delsys[ncaps]) {
tvp->u.buffer.data[ncaps] = fe->ops.delsys[ncaps];
ncaps++;
}
@@ -1596,7 +1596,7 @@ static int dvbv5_set_delivery_system(struct dvb_frontend *fe,
* supported
*/
ncaps = 0;
- while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) {
+ while (ncaps < MAX_DELSYS && fe->ops.delsys[ncaps]) {
if (fe->ops.delsys[ncaps] == desired_system) {
c->delivery_system = desired_system;
dev_dbg(fe->dvb->device,
@@ -1628,7 +1628,7 @@ static int dvbv5_set_delivery_system(struct dvb_frontend *fe,
* of the desired system
*/
ncaps = 0;
- while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) {
+ while (ncaps < MAX_DELSYS && fe->ops.delsys[ncaps]) {
if (dvbv3_type(fe->ops.delsys[ncaps]) == type)
delsys = fe->ops.delsys[ncaps];
ncaps++;
@@ -1703,7 +1703,7 @@ static int dvbv3_set_delivery_system(struct dvb_frontend *fe)
* DVBv3 standard
*/
ncaps = 0;
- while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) {
+ while (ncaps < MAX_DELSYS && fe->ops.delsys[ncaps]) {
if (dvbv3_type(fe->ops.delsys[ncaps]) != DVBV3_UNKNOWN) {
delsys = fe->ops.delsys[ncaps];
break;
@@ -1882,6 +1882,8 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
c->lna = tvp->u.data;
if (fe->ops.set_lna)
r = fe->ops.set_lna(fe);
+ if (r < 0)
+ c->lna = LNA_AUTO;
break;
default:
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index dd12a1ebda82..025fc5496bfc 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -441,7 +441,7 @@ config DVB_RTL2830
config DVB_RTL2832
tristate "Realtek RTL2832 DVB-T"
- depends on DVB_CORE && I2C
+ depends on DVB_CORE && I2C && I2C_MUX
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Say Y when you want to support this frontend.
@@ -650,6 +650,8 @@ config DVB_TUNER_DIB0090
comment "SEC control devices for DVB-S"
depends on DVB_CORE
+source "drivers/media/dvb-frontends/drx39xyj/Kconfig"
+
config DVB_LNBP21
tristate "LNBP21/LNBH24 SEC controllers"
depends on DVB_CORE && I2C
@@ -733,14 +735,6 @@ config DVB_IX2505V
help
A DVB-S tuner module. Say Y when you want to support this frontend.
-config DVB_IT913X_FE
- tristate "it913x frontend and it9137 tuner"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- A DVB-T tuner module.
- Say Y when you want to support this frontend.
-
config DVB_M88RS2000
tristate "M88RS2000 DVB-S demodulator and tuner"
depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index 0c75a6aafb9d..282aba2fe8db 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -92,13 +92,13 @@ obj-$(CONFIG_DVB_HD29L2) += hd29l2.o
obj-$(CONFIG_DVB_DS3000) += ds3000.o
obj-$(CONFIG_DVB_TS2020) += ts2020.o
obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
+obj-$(CONFIG_DVB_DRX39XYJ) += drx39xyj/
obj-$(CONFIG_DVB_MB86A20S) += mb86a20s.o
obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
obj-$(CONFIG_DVB_STV0367) += stv0367.o
obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o
obj-$(CONFIG_DVB_DRXK) += drxk.o
obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o
-obj-$(CONFIG_DVB_IT913X_FE) += it913x-fe.o
obj-$(CONFIG_DVB_A8293) += a8293.o
obj-$(CONFIG_DVB_TDA10071) += tda10071.o
obj-$(CONFIG_DVB_RTL2830) += rtl2830.o
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 65728c25ea05..be4bec2a9640 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -989,10 +989,62 @@ err:
return ret;
}
+static int af9033_pid_filter_ctrl(struct dvb_frontend *fe, int onoff)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret;
+
+ dev_dbg(&state->i2c->dev, "%s: onoff=%d\n", __func__, onoff);
+
+ ret = af9033_wr_reg_mask(state, 0x80f993, onoff, 0x01);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_pid_filter(struct dvb_frontend *fe, int index, u16 pid, int onoff)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret;
+ u8 wbuf[2] = {(pid >> 0) & 0xff, (pid >> 8) & 0xff};
+
+ dev_dbg(&state->i2c->dev, "%s: index=%d pid=%04x onoff=%d\n",
+ __func__, index, pid, onoff);
+
+ if (pid > 0x1fff)
+ return 0;
+
+ ret = af9033_wr_regs(state, 0x80f996, wbuf, 2);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg(state, 0x80f994, onoff);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg(state, 0x80f995, index);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
static struct dvb_frontend_ops af9033_ops;
struct dvb_frontend *af9033_attach(const struct af9033_config *config,
- struct i2c_adapter *i2c)
+ struct i2c_adapter *i2c,
+ struct af9033_ops *ops)
{
int ret;
struct af9033_state *state;
@@ -1067,6 +1119,11 @@ struct dvb_frontend *af9033_attach(const struct af9033_config *config,
memcpy(&state->fe.ops, &af9033_ops, sizeof(struct dvb_frontend_ops));
state->fe.demodulator_priv = state;
+ if (ops) {
+ ops->pid_filter = af9033_pid_filter;
+ ops->pid_filter_ctrl = af9033_pid_filter_ctrl;
+ }
+
return &state->fe;
err:
diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
index c286e8f1ec02..539f4db678b8 100644
--- a/drivers/media/dvb-frontends/af9033.h
+++ b/drivers/media/dvb-frontends/af9033.h
@@ -78,16 +78,42 @@ struct af9033_config {
};
+struct af9033_ops {
+ int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
+ int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
+ int onoff);
+};
+
+
#if IS_ENABLED(CONFIG_DVB_AF9033)
-extern struct dvb_frontend *af9033_attach(const struct af9033_config *config,
- struct i2c_adapter *i2c);
+extern
+struct dvb_frontend *af9033_attach(const struct af9033_config *config,
+ struct i2c_adapter *i2c,
+ struct af9033_ops *ops);
+
#else
-static inline struct dvb_frontend *af9033_attach(
- const struct af9033_config *config, struct i2c_adapter *i2c)
+static inline
+struct dvb_frontend *af9033_attach(const struct af9033_config *config,
+ struct i2c_adapter *i2c,
+ struct af9033_ops *ops)
{
pr_warn("%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
+
+static inline int af9033_pid_filter_ctrl(struct dvb_frontend *fe, int onoff)
+{
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int af9033_pid_filter(struct dvb_frontend *fe, int index, u16 pid,
+ int onoff)
+{
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
#endif
#endif /* AF9033_H */
diff --git a/drivers/media/dvb-frontends/drx39xyj/Kconfig b/drivers/media/dvb-frontends/drx39xyj/Kconfig
new file mode 100644
index 000000000000..15628eb5cf0c
--- /dev/null
+++ b/drivers/media/dvb-frontends/drx39xyj/Kconfig
@@ -0,0 +1,7 @@
+config DVB_DRX39XYJ
+ tristate "Micronas DRX-J demodulator"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
+ to support this frontend.
diff --git a/drivers/media/dvb-frontends/drx39xyj/Makefile b/drivers/media/dvb-frontends/drx39xyj/Makefile
new file mode 100644
index 000000000000..672e07774955
--- /dev/null
+++ b/drivers/media/dvb-frontends/drx39xyj/Makefile
@@ -0,0 +1,6 @@
+drx39xyj-objs := drxj.o
+
+obj-$(CONFIG_DVB_DRX39XYJ) += drx39xyj.o
+
+ccflags-y += -I$(srctree)/drivers/media/dvb-core/
+ccflags-y += -I$(srctree)/drivers/media/tuners/
diff --git a/drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h b/drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h
new file mode 100644
index 000000000000..5b5421f70388
--- /dev/null
+++ b/drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h
@@ -0,0 +1,139 @@
+/*
+ I2C API, implementation depends on board specifics
+
+ Copyright (c), 2004-2005,2007-2010 Trident Microsystems, Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Trident Microsystems nor Hauppauge Computer Works
+ nor the names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+ This module encapsulates I2C access.In some applications several devices
+ share one I2C bus. If these devices have the same I2C address some kind
+ off "switch" must be implemented to ensure error free communication with
+ one device. In case such a "switch" is used, the device ID can be used
+ to implement control over this "switch".
+*/
+
+#ifndef __BSPI2C_H__
+#define __BSPI2C_H__
+
+#include "bsp_types.h"
+
+/*
+ * This structure contains the I2C address, the device ID and a user_data pointer.
+ * The user_data pointer can be used for application specific purposes.
+ */
+struct i2c_device_addr {
+ u16 i2c_addr; /* The I2C address of the device. */
+ u16 i2c_dev_id; /* The device identifier. */
+ void *user_data; /* User data pointer */
+};
+
+
+/**
+* \def IS_I2C_10BIT( addr )
+* \brief Determine if I2C address 'addr' is a 10 bits address or not.
+* \param addr The I2C address.
+* \return int.
+* \retval 0 if address is not a 10 bits I2C address.
+* \retval 1 if address is a 10 bits I2C address.
+*/
+#define IS_I2C_10BIT(addr) \
+ (((addr) & 0xF8) == 0xF0)
+
+/*------------------------------------------------------------------------------
+Exported FUNCTIONS
+------------------------------------------------------------------------------*/
+
+/**
+* \fn drxbsp_i2c_init()
+* \brief Initialize I2C communication module.
+* \return drx_status_t Return status.
+* \retval 0 Initialization successful.
+* \retval -EIO Initialization failed.
+*/
+ drx_status_t drxbsp_i2c_init(void);
+
+/**
+* \fn drxbsp_i2c_term()
+* \brief Terminate I2C communication module.
+* \return drx_status_t Return status.
+* \retval 0 Termination successful.
+* \retval -EIO Termination failed.
+*/
+ drx_status_t drxbsp_i2c_term(void);
+
+/**
+* \fn drx_status_t drxbsp_i2c_write_read( struct i2c_device_addr *w_dev_addr,
+* u16 w_count,
+* u8 *wData,
+* struct i2c_device_addr *r_dev_addr,
+* u16 r_count,
+* u8 *r_data)
+* \brief Read and/or write count bytes from I2C bus, store them in data[].
+* \param w_dev_addr The device i2c address and the device ID to write to
+* \param w_count The number of bytes to write
+* \param wData The array to write the data to
+* \param r_dev_addr The device i2c address and the device ID to read from
+* \param r_count The number of bytes to read
+* \param r_data The array to read the data from
+* \return drx_status_t Return status.
+* \retval 0 Succes.
+* \retval -EIO Failure.
+* \retval -EINVAL Parameter 'wcount' is not zero but parameter
+* 'wdata' contains NULL.
+* Idem for 'rcount' and 'rdata'.
+* Both w_dev_addr and r_dev_addr are NULL.
+*
+* This function must implement an atomic write and/or read action on the I2C bus
+* No other process may use the I2C bus when this function is executing.
+* The critical section of this function runs from and including the I2C
+* write, up to and including the I2C read action.
+*
+* The device ID can be useful if several devices share an I2C address.
+* It can be used to control a "switch" on the I2C bus to the correct device.
+*/
+ drx_status_t drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
+ u16 w_count,
+ u8 *w_data,
+ struct i2c_device_addr *r_dev_addr,
+ u16 r_count, u8 *r_data);
+
+/**
+* \fn drxbsp_i2c_error_text()
+* \brief Returns a human readable error.
+* Counter part of numerical drx_i2c_error_g.
+*
+* \return char* Pointer to human readable error text.
+*/
+ char *drxbsp_i2c_error_text(void);
+
+/**
+* \var drx_i2c_error_g;
+* \brief I2C specific error codes, platform dependent.
+*/
+ extern int drx_i2c_error_g;
+
+#endif /* __BSPI2C_H__ */
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx39xxj.h b/drivers/media/dvb-frontends/drx39xyj/drx39xxj.h
new file mode 100644
index 000000000000..cfd0b96b6939
--- /dev/null
+++ b/drivers/media/dvb-frontends/drx39xyj/drx39xxj.h
@@ -0,0 +1,45 @@
+/*
+ * Driver for Micronas DRX39xx family (drx3933j)
+ *
+ * Written by Devin Heitmueller <devin.heitmueller@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
+ */
+
+#ifndef DRX39XXJ_H
+#define DRX39XXJ_H
+
+#include <linux/dvb/frontend.h>
+#include "dvb_frontend.h"
+#include "drx_driver.h"
+
+struct drx39xxj_state {
+ struct i2c_adapter *i2c;
+ struct drx_demod_instance *demod;
+ struct dvb_frontend frontend;
+ unsigned int i2c_gate_open:1;
+ const struct firmware *fw;
+};
+
+#if IS_ENABLED(CONFIG_DVB_DRX39XYJ)
+struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c) {
+ return NULL;
+};
+#endif
+
+#endif /* DVB_DUMMY_FE_H */
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h b/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h
new file mode 100644
index 000000000000..354ec07eae87
--- /dev/null
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h
@@ -0,0 +1,256 @@
+/*
+ Copyright (c), 2004-2005,2007-2010 Trident Microsystems, Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Trident Microsystems nor Hauppauge Computer Works
+ nor the names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*******************************************************************************
+* FILENAME: $Id: drx_dap_fasi.h,v 1.5 2009/07/07 14:21:40 justin Exp $
+*
+* DESCRIPTION:
+* Part of DRX driver.
+* Data access protocol: Fast Access Sequential Interface (fasi)
+* Fast access, because of short addressing format (16 instead of 32 bits addr)
+* Sequential, because of I2C.
+*
+* USAGE:
+* Include.
+*
+* NOTES:
+*
+*
+*******************************************************************************/
+
+/*-------- compilation control switches --------------------------------------*/
+
+#ifndef __DRX_DAP_FASI_H__
+#define __DRX_DAP_FASI_H__
+
+/*-------- Required includes -------------------------------------------------*/
+
+#include "drx_driver.h"
+
+/*-------- Defines, configuring the API --------------------------------------*/
+
+/********************************************
+* Allowed address formats
+********************************************/
+
+/*
+* Comments about short/long addressing format:
+*
+* The DAP FASI offers long address format (4 bytes) and short address format
+* (2 bytes). The DAP can operate in 3 modes:
+* (1) only short
+* (2) only long
+* (3) both long and short but short preferred and long only when necesarry
+*
+* These modes must be selected compile time via compile switches.
+* Compile switch settings for the diffrent modes:
+* (1) DRXDAPFASI_LONG_ADDR_ALLOWED=0, DRXDAPFASI_SHORT_ADDR_ALLOWED=1
+* (2) DRXDAPFASI_LONG_ADDR_ALLOWED=1, DRXDAPFASI_SHORT_ADDR_ALLOWED=0
+* (3) DRXDAPFASI_LONG_ADDR_ALLOWED=1, DRXDAPFASI_SHORT_ADDR_ALLOWED=1
+*
+* The default setting will be (3) both long and short.
+* The default setting will need no compile switches.
+* The default setting must be overridden if compile switches are already
+* defined.
+*
+*/
+
+/* set default */
+#if !defined(DRXDAPFASI_LONG_ADDR_ALLOWED)
+#define DRXDAPFASI_LONG_ADDR_ALLOWED 1
+#endif
+
+/* set default */
+#if !defined(DRXDAPFASI_SHORT_ADDR_ALLOWED)
+#define DRXDAPFASI_SHORT_ADDR_ALLOWED 1
+#endif
+
+/* check */
+#if ((DRXDAPFASI_LONG_ADDR_ALLOWED == 0) && \
+ (DRXDAPFASI_SHORT_ADDR_ALLOWED == 0))
+#error At least one of short- or long-addressing format must be allowed.
+*; /* illegal statement to force compiler error */
+#endif
+
+/********************************************
+* Single/master multi master setting
+********************************************/
+/*
+* Comments about SINGLE MASTER/MULTI MASTER modes:
+*
+* Consider the two sides:1) the master and 2)the slave.
+*
+* Master:
+* Single/multimaster operation set via DRXDAP_SINGLE_MASTER compile switch
+* + single master mode means no use of repeated starts
+* + multi master mode means use of repeated starts
+* Default is single master.
+* Default can be overriden by setting the compile switch DRXDAP_SINGLE_MASTER.
+*
+* Slave:
+* Single/multi master selected via the flags in the FASI protocol.
+* + single master means remember memory address between i2c packets
+* + multimaster means flush memory address between i2c packets
+* Default is single master, DAP FASI changes multi-master setting silently
+* into single master setting. This cannot be overrriden.
+*
+*/
+/* set default */
+#ifndef DRXDAP_SINGLE_MASTER
+#define DRXDAP_SINGLE_MASTER 0
+#endif
+
+/********************************************
+* Chunk/mode checking
+********************************************/
+/*
+* Comments about DRXDAP_MAX_WCHUNKSIZE in single or multi master mode and
+* in combination with short and long addressing format. All text below
+* assumes long addressing format. The table also includes information
+* for short ADDRessing format.
+*
+* In single master mode, data can be written by sending the register address
+* first, then two or four bytes of data in the next packet.
+* Because the device address plus a register address equals five bytes,
+* the mimimum chunk size must be five.
+* If ten-bit I2C device addresses are used, the minimum chunk size must be six,
+* because the I2C device address will then occupy two bytes when writing.
+*
+* Data in single master mode is transferred as follows:
+* <S> <devW> a0 a1 a2 a3 <P>
+* <S> <devW> d0 d1 [d2 d3] <P>
+* ..
+* or
+* ..
+* <S> <devW> a0 a1 a2 a3 <P>
+* <S> <devR> --- <P>
+*
+* In multi-master mode, the data must immediately follow the address (an I2C
+* stop resets the internal address), and hence the minimum chunk size is
+* 1 <I2C address> + 4 (register address) + 2 (data to send) = 7 bytes (8 if
+* 10-bit I2C device addresses are used).
+*
+* The 7-bit or 10-bit i2c address parameters is a runtime parameter.
+* The other parameters can be limited via compile time switches.
+*
+*-------------------------------------------------------------------------------
+*
+* Minimum chunk size table (in bytes):
+*
+* +----------------+----------------+
+* | 7b i2c addr | 10b i2c addr |
+* +----------------+----------------+
+* | single | multi | single | multi |
+* ------+--------+-------+--------+-------+
+* short | 3 | 5 | 4 | 6 |
+* long | 5 | 7 | 6 | 8 |
+* ------+--------+-------+--------+-------+
+*
+*/
+
+/* set default */
+#if !defined(DRXDAP_MAX_WCHUNKSIZE)
+#define DRXDAP_MAX_WCHUNKSIZE 254
+#endif
+
+/* check */
+#if ((DRXDAPFASI_LONG_ADDR_ALLOWED == 0) && (DRXDAPFASI_SHORT_ADDR_ALLOWED == 1))
+#if DRXDAP_SINGLE_MASTER
+#define DRXDAP_MAX_WCHUNKSIZE_MIN 3
+#else
+#define DRXDAP_MAX_WCHUNKSIZE_MIN 5
+#endif
+#else
+#if DRXDAP_SINGLE_MASTER
+#define DRXDAP_MAX_WCHUNKSIZE_MIN 5
+#else
+#define DRXDAP_MAX_WCHUNKSIZE_MIN 7
+#endif
+#endif
+
+#if DRXDAP_MAX_WCHUNKSIZE < DRXDAP_MAX_WCHUNKSIZE_MIN
+#if ((DRXDAPFASI_LONG_ADDR_ALLOWED == 0) && (DRXDAPFASI_SHORT_ADDR_ALLOWED == 1))
+#if DRXDAP_SINGLE_MASTER
+#error DRXDAP_MAX_WCHUNKSIZE must be at least 3 in single master mode
+*; /* illegal statement to force compiler error */
+#else
+#error DRXDAP_MAX_WCHUNKSIZE must be at least 5 in multi master mode
+*; /* illegal statement to force compiler error */
+#endif
+#else
+#if DRXDAP_SINGLE_MASTER
+#error DRXDAP_MAX_WCHUNKSIZE must be at least 5 in single master mode
+*; /* illegal statement to force compiler error */
+#else
+#error DRXDAP_MAX_WCHUNKSIZE must be at least 7 in multi master mode
+*; /* illegal statement to force compiler error */
+#endif
+#endif
+#endif
+
+/* set default */
+#if !defined(DRXDAP_MAX_RCHUNKSIZE)
+#define DRXDAP_MAX_RCHUNKSIZE 254
+#endif
+
+/* check */
+#if DRXDAP_MAX_RCHUNKSIZE < 2
+#error DRXDAP_MAX_RCHUNKSIZE must be at least 2
+*; /* illegal statement to force compiler error */
+#endif
+
+/* check */
+#if DRXDAP_MAX_RCHUNKSIZE & 1
+#error DRXDAP_MAX_RCHUNKSIZE must be even
+*; /* illegal statement to force compiler error */
+#endif
+
+/*-------- Public API functions ----------------------------------------------*/
+
+extern struct drx_access_func drx_dap_fasi_funct_g;
+
+#define DRXDAP_FASI_RMW 0x10000000
+#define DRXDAP_FASI_BROADCAST 0x20000000
+#define DRXDAP_FASI_CLEARCRC 0x80000000
+#define DRXDAP_FASI_SINGLE_MASTER 0xC0000000
+#define DRXDAP_FASI_MULTI_MASTER 0x40000000
+#define DRXDAP_FASI_SMM_SWITCH 0x40000000 /* single/multi master switch */
+#define DRXDAP_FASI_MODEFLAGS 0xC0000000
+#define DRXDAP_FASI_FLAGS 0xF0000000
+
+#define DRXDAP_FASI_ADDR2BLOCK(addr) (((addr)>>22)&0x3F)
+#define DRXDAP_FASI_ADDR2BANK(addr) (((addr)>>16)&0x3F)
+#define DRXDAP_FASI_ADDR2OFFSET(addr) ((addr)&0x7FFF)
+
+#define DRXDAP_FASI_SHORT_FORMAT(addr) (((addr) & 0xFC30FF80) == 0)
+#define DRXDAP_FASI_LONG_FORMAT(addr) (((addr) & 0xFC30FF80) != 0)
+#define DRXDAP_FASI_OFFSET_TOO_LARGE(addr) (((addr) & 0x00008000) != 0)
+
+#endif /* __DRX_DAP_FASI_H__ */
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
new file mode 100644
index 000000000000..9076bf21cc8a
--- /dev/null
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
@@ -0,0 +1,2343 @@
+/*
+ Copyright (c), 2004-2005,2007-2010 Trident Microsystems, Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Trident Microsystems nor Hauppauge Computer Works
+ nor the names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef __DRXDRIVER_H__
+#define __DRXDRIVER_H__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+
+/*
+ * This structure contains the I2C address, the device ID and a user_data pointer.
+ * The user_data pointer can be used for application specific purposes.
+ */
+struct i2c_device_addr {
+ u16 i2c_addr; /* The I2C address of the device. */
+ u16 i2c_dev_id; /* The device identifier. */
+ void *user_data; /* User data pointer */
+};
+
+/**
+* \def IS_I2C_10BIT( addr )
+* \brief Determine if I2C address 'addr' is a 10 bits address or not.
+* \param addr The I2C address.
+* \return int.
+* \retval 0 if address is not a 10 bits I2C address.
+* \retval 1 if address is a 10 bits I2C address.
+*/
+#define IS_I2C_10BIT(addr) \
+ (((addr) & 0xF8) == 0xF0)
+
+/*------------------------------------------------------------------------------
+Exported FUNCTIONS
+------------------------------------------------------------------------------*/
+
+/**
+* \fn drxbsp_i2c_init()
+* \brief Initialize I2C communication module.
+* \return int Return status.
+* \retval 0 Initialization successful.
+* \retval -EIO Initialization failed.
+*/
+int drxbsp_i2c_init(void);
+
+/**
+* \fn drxbsp_i2c_term()
+* \brief Terminate I2C communication module.
+* \return int Return status.
+* \retval 0 Termination successful.
+* \retval -EIO Termination failed.
+*/
+int drxbsp_i2c_term(void);
+
+/**
+* \fn int drxbsp_i2c_write_read( struct i2c_device_addr *w_dev_addr,
+* u16 w_count,
+* u8 * wData,
+* struct i2c_device_addr *r_dev_addr,
+* u16 r_count,
+* u8 * r_data)
+* \brief Read and/or write count bytes from I2C bus, store them in data[].
+* \param w_dev_addr The device i2c address and the device ID to write to
+* \param w_count The number of bytes to write
+* \param wData The array to write the data to
+* \param r_dev_addr The device i2c address and the device ID to read from
+* \param r_count The number of bytes to read
+* \param r_data The array to read the data from
+* \return int Return status.
+* \retval 0 Succes.
+* \retval -EIO Failure.
+* \retval -EINVAL Parameter 'wcount' is not zero but parameter
+* 'wdata' contains NULL.
+* Idem for 'rcount' and 'rdata'.
+* Both w_dev_addr and r_dev_addr are NULL.
+*
+* This function must implement an atomic write and/or read action on the I2C bus
+* No other process may use the I2C bus when this function is executing.
+* The critical section of this function runs from and including the I2C
+* write, up to and including the I2C read action.
+*
+* The device ID can be useful if several devices share an I2C address.
+* It can be used to control a "switch" on the I2C bus to the correct device.
+*/
+int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
+ u16 w_count,
+ u8 *wData,
+ struct i2c_device_addr *r_dev_addr,
+ u16 r_count, u8 *r_data);
+
+/**
+* \fn drxbsp_i2c_error_text()
+* \brief Returns a human readable error.
+* Counter part of numerical drx_i2c_error_g.
+*
+* \return char* Pointer to human readable error text.
+*/
+char *drxbsp_i2c_error_text(void);
+
+/**
+* \var drx_i2c_error_g;
+* \brief I2C specific error codes, platform dependent.
+*/
+extern int drx_i2c_error_g;
+
+#define TUNER_MODE_SUB0 0x0001 /* for sub-mode (e.g. RF-AGC setting) */
+#define TUNER_MODE_SUB1 0x0002 /* for sub-mode (e.g. RF-AGC setting) */
+#define TUNER_MODE_SUB2 0x0004 /* for sub-mode (e.g. RF-AGC setting) */
+#define TUNER_MODE_SUB3 0x0008 /* for sub-mode (e.g. RF-AGC setting) */
+#define TUNER_MODE_SUB4 0x0010 /* for sub-mode (e.g. RF-AGC setting) */
+#define TUNER_MODE_SUB5 0x0020 /* for sub-mode (e.g. RF-AGC setting) */
+#define TUNER_MODE_SUB6 0x0040 /* for sub-mode (e.g. RF-AGC setting) */
+#define TUNER_MODE_SUB7 0x0080 /* for sub-mode (e.g. RF-AGC setting) */
+
+#define TUNER_MODE_DIGITAL 0x0100 /* for digital channel (e.g. DVB-T) */
+#define TUNER_MODE_ANALOG 0x0200 /* for analog channel (e.g. PAL) */
+#define TUNER_MODE_SWITCH 0x0400 /* during channel switch & scanning */
+#define TUNER_MODE_LOCK 0x0800 /* after tuner has locked */
+#define TUNER_MODE_6MHZ 0x1000 /* for 6MHz bandwidth channels */
+#define TUNER_MODE_7MHZ 0x2000 /* for 7MHz bandwidth channels */
+#define TUNER_MODE_8MHZ 0x4000 /* for 8MHz bandwidth channels */
+
+#define TUNER_MODE_SUB_MAX 8
+#define TUNER_MODE_SUBALL (TUNER_MODE_SUB0 | TUNER_MODE_SUB1 | \
+ TUNER_MODE_SUB2 | TUNER_MODE_SUB3 | \
+ TUNER_MODE_SUB4 | TUNER_MODE_SUB5 | \
+ TUNER_MODE_SUB6 | TUNER_MODE_SUB7)
+
+
+enum tuner_lock_status {
+ TUNER_LOCKED,
+ TUNER_NOT_LOCKED
+};
+
+struct tuner_common {
+ char *name; /* Tuner brand & type name */
+ s32 min_freq_rf; /* Lowest RF input frequency, in kHz */
+ s32 max_freq_rf; /* Highest RF input frequency, in kHz */
+
+ u8 sub_mode; /* Index to sub-mode in use */
+ char ***sub_mode_descriptions; /* Pointer to description of sub-modes */
+ u8 sub_modes; /* Number of available sub-modes */
+
+ /* The following fields will be either 0, NULL or false and do not need
+ initialisation */
+ void *self_check; /* gives proof of initialization */
+ bool programmed; /* only valid if self_check is OK */
+ s32 r_ffrequency; /* only valid if programmed */
+ s32 i_ffrequency; /* only valid if programmed */
+
+ void *my_user_data; /* pointer to associated demod instance */
+ u16 my_capabilities; /* value for storing application flags */
+};
+
+struct tuner_instance;
+
+typedef int(*tuner_open_func_t) (struct tuner_instance *tuner);
+typedef int(*tuner_close_func_t) (struct tuner_instance *tuner);
+
+typedef int(*tuner_set_frequency_func_t) (struct tuner_instance *tuner,
+ u32 mode,
+ s32
+ frequency);
+
+typedef int(*tuner_get_frequency_func_t) (struct tuner_instance *tuner,
+ u32 mode,
+ s32 *
+ r_ffrequency,
+ s32 *
+ i_ffrequency);
+
+typedef int(*tuner_lock_status_func_t) (struct tuner_instance *tuner,
+ enum tuner_lock_status *
+ lock_stat);
+
+typedef int(*tune_ri2c_write_read_func_t) (struct tuner_instance *tuner,
+ struct i2c_device_addr *
+ w_dev_addr, u16 w_count,
+ u8 *wData,
+ struct i2c_device_addr *
+ r_dev_addr, u16 r_count,
+ u8 *r_data);
+
+struct tuner_ops {
+ tuner_open_func_t open_func;
+ tuner_close_func_t close_func;
+ tuner_set_frequency_func_t set_frequency_func;
+ tuner_get_frequency_func_t get_frequency_func;
+ tuner_lock_status_func_t lock_status_func;
+ tune_ri2c_write_read_func_t i2c_write_read_func;
+
+};
+
+struct tuner_instance {
+ struct i2c_device_addr my_i2c_dev_addr;
+ struct tuner_common *my_common_attr;
+ void *my_ext_attr;
+ struct tuner_ops *my_funct;
+};
+
+int drxbsp_tuner_set_frequency(struct tuner_instance *tuner,
+ u32 mode,
+ s32 frequency);
+
+int drxbsp_tuner_get_frequency(struct tuner_instance *tuner,
+ u32 mode,
+ s32 *r_ffrequency,
+ s32 *i_ffrequency);
+
+int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
+ struct i2c_device_addr *w_dev_addr,
+ u16 w_count,
+ u8 *wData,
+ struct i2c_device_addr *r_dev_addr,
+ u16 r_count, u8 *r_data);
+
+/**************
+*
+* This section configures the DRX Data Access Protocols (DAPs).
+*
+**************/
+
+/**
+* \def DRXDAP_SINGLE_MASTER
+* \brief Enable I2C single or I2C multimaster mode on host.
+*
+* Set to 1 to enable single master mode
+* Set to 0 to enable multi master mode
+*
+* The actual DAP implementation may be restricted to only one of the modes.
+* A compiler warning or error will be generated if the DAP implementation
+* overides or cannot handle the mode defined below.
+*
+*/
+#ifndef DRXDAP_SINGLE_MASTER
+#define DRXDAP_SINGLE_MASTER 1
+#endif
+
+/**
+* \def DRXDAP_MAX_WCHUNKSIZE
+* \brief Defines maximum chunksize of an i2c write action by host.
+*
+* This indicates the maximum size of data the I2C device driver is able to
+* write at a time. This includes I2C device address and register addressing.
+*
+* This maximum size may be restricted by the actual DAP implementation.
+* A compiler warning or error will be generated if the DAP implementation
+* overides or cannot handle the chunksize defined below.
+*
+* Beware that the DAP uses DRXDAP_MAX_WCHUNKSIZE to create a temporary data
+* buffer. Do not undefine or choose too large, unless your system is able to
+* handle a stack buffer of that size.
+*
+*/
+#ifndef DRXDAP_MAX_WCHUNKSIZE
+#define DRXDAP_MAX_WCHUNKSIZE 60
+#endif
+
+/**
+* \def DRXDAP_MAX_RCHUNKSIZE
+* \brief Defines maximum chunksize of an i2c read action by host.
+*
+* This indicates the maximum size of data the I2C device driver is able to read
+* at a time. Minimum value is 2. Also, the read chunk size must be even.
+*
+* This maximum size may be restricted by the actual DAP implementation.
+* A compiler warning or error will be generated if the DAP implementation
+* overides or cannot handle the chunksize defined below.
+*
+*/
+#ifndef DRXDAP_MAX_RCHUNKSIZE
+#define DRXDAP_MAX_RCHUNKSIZE 60
+#endif
+
+/**************
+*
+* This section describes drxdriver defines.
+*
+**************/
+
+/**
+* \def DRX_UNKNOWN
+* \brief Generic UNKNOWN value for DRX enumerated types.
+*
+* Used to indicate that the parameter value is unknown or not yet initalized.
+*/
+#ifndef DRX_UNKNOWN
+#define DRX_UNKNOWN (254)
+#endif
+
+/**
+* \def DRX_AUTO
+* \brief Generic AUTO value for DRX enumerated types.
+*
+* Used to instruct the driver to automatically determine the value of the
+* parameter.
+*/
+#ifndef DRX_AUTO
+#define DRX_AUTO (255)
+#endif
+
+/**************
+*
+* This section describes flag definitions for the device capbilities.
+*
+**************/
+
+/**
+* \brief LNA capability flag
+*
+* Device has a Low Noise Amplifier
+*
+*/
+#define DRX_CAPABILITY_HAS_LNA (1UL << 0)
+/**
+* \brief OOB-RX capability flag
+*
+* Device has OOB-RX
+*
+*/
+#define DRX_CAPABILITY_HAS_OOBRX (1UL << 1)
+/**
+* \brief ATV capability flag
+*
+* Device has ATV
+*
+*/
+#define DRX_CAPABILITY_HAS_ATV (1UL << 2)
+/**
+* \brief DVB-T capability flag
+*
+* Device has DVB-T
+*
+*/
+#define DRX_CAPABILITY_HAS_DVBT (1UL << 3)
+/**
+* \brief ITU-B capability flag
+*
+* Device has ITU-B
+*
+*/
+#define DRX_CAPABILITY_HAS_ITUB (1UL << 4)
+/**
+* \brief Audio capability flag
+*
+* Device has Audio
+*
+*/
+#define DRX_CAPABILITY_HAS_AUD (1UL << 5)
+/**
+* \brief SAW switch capability flag
+*
+* Device has SAW switch
+*
+*/
+#define DRX_CAPABILITY_HAS_SAWSW (1UL << 6)
+/**
+* \brief GPIO1 capability flag
+*
+* Device has GPIO1
+*
+*/
+#define DRX_CAPABILITY_HAS_GPIO1 (1UL << 7)
+/**
+* \brief GPIO2 capability flag
+*
+* Device has GPIO2
+*
+*/
+#define DRX_CAPABILITY_HAS_GPIO2 (1UL << 8)
+/**
+* \brief IRQN capability flag
+*
+* Device has IRQN
+*
+*/
+#define DRX_CAPABILITY_HAS_IRQN (1UL << 9)
+/**
+* \brief 8VSB capability flag
+*
+* Device has 8VSB
+*
+*/
+#define DRX_CAPABILITY_HAS_8VSB (1UL << 10)
+/**
+* \brief SMA-TX capability flag
+*
+* Device has SMATX
+*
+*/
+#define DRX_CAPABILITY_HAS_SMATX (1UL << 11)
+/**
+* \brief SMA-RX capability flag
+*
+* Device has SMARX
+*
+*/
+#define DRX_CAPABILITY_HAS_SMARX (1UL << 12)
+/**
+* \brief ITU-A/C capability flag
+*
+* Device has ITU-A/C
+*
+*/
+#define DRX_CAPABILITY_HAS_ITUAC (1UL << 13)
+
+/*-------------------------------------------------------------------------
+MACROS
+-------------------------------------------------------------------------*/
+/* Macros to stringify the version number */
+#define DRX_VERSIONSTRING(MAJOR, MINOR, PATCH) \
+ DRX_VERSIONSTRING_HELP(MAJOR)"." \
+ DRX_VERSIONSTRING_HELP(MINOR)"." \
+ DRX_VERSIONSTRING_HELP(PATCH)
+#define DRX_VERSIONSTRING_HELP(NUM) #NUM
+
+/**
+* \brief Macro to create byte array elements from 16 bit integers.
+* This macro is used to create byte arrays for block writes.
+* Block writes speed up I2C traffic between host and demod.
+* The macro takes care of the required byte order in a 16 bits word.
+* x->lowbyte(x), highbyte(x)
+*/
+#define DRX_16TO8(x) ((u8) (((u16)x) & 0xFF)), \
+ ((u8)((((u16)x)>>8)&0xFF))
+
+/**
+* \brief Macro to sign extend signed 9 bit value to signed 16 bit value
+*/
+#define DRX_S9TOS16(x) ((((u16)x)&0x100) ? ((s16)((u16)(x)|0xFF00)) : (x))
+
+/**
+* \brief Macro to sign extend signed 9 bit value to signed 16 bit value
+*/
+#define DRX_S24TODRXFREQ(x) ((((u32) x) & 0x00800000UL) ? \
+ ((s32) \
+ (((u32) x) | 0xFF000000)) : \
+ ((s32) x))
+
+/**
+* \brief Macro to convert 16 bit register value to a s32
+*/
+#define DRX_U16TODRXFREQ(x) ((x & 0x8000) ? \
+ ((s32) \
+ (((u32) x) | 0xFFFF0000)) : \
+ ((s32) x))
+
+/*-------------------------------------------------------------------------
+ENUM
+-------------------------------------------------------------------------*/
+
+/**
+* \enum enum drx_standard
+* \brief Modulation standards.
+*/
+enum drx_standard {
+ DRX_STANDARD_DVBT = 0, /**< Terrestrial DVB-T. */
+ DRX_STANDARD_8VSB, /**< Terrestrial 8VSB. */
+ DRX_STANDARD_NTSC, /**< Terrestrial\Cable analog NTSC. */
+ DRX_STANDARD_PAL_SECAM_BG,
+ /**< Terrestrial analog PAL/SECAM B/G */
+ DRX_STANDARD_PAL_SECAM_DK,
+ /**< Terrestrial analog PAL/SECAM D/K */
+ DRX_STANDARD_PAL_SECAM_I,
+ /**< Terrestrial analog PAL/SECAM I */
+ DRX_STANDARD_PAL_SECAM_L,
+ /**< Terrestrial analog PAL/SECAM L
+ with negative modulation */
+ DRX_STANDARD_PAL_SECAM_LP,
+ /**< Terrestrial analog PAL/SECAM L
+ with positive modulation */
+ DRX_STANDARD_ITU_A, /**< Cable ITU ANNEX A. */
+ DRX_STANDARD_ITU_B, /**< Cable ITU ANNEX B. */
+ DRX_STANDARD_ITU_C, /**< Cable ITU ANNEX C. */
+ DRX_STANDARD_ITU_D, /**< Cable ITU ANNEX D. */
+ DRX_STANDARD_FM, /**< Terrestrial\Cable FM radio */
+ DRX_STANDARD_DTMB, /**< Terrestrial DTMB standard (China)*/
+ DRX_STANDARD_UNKNOWN = DRX_UNKNOWN,
+ /**< Standard unknown. */
+ DRX_STANDARD_AUTO = DRX_AUTO
+ /**< Autodetect standard. */
+};
+
+/**
+* \enum enum drx_standard
+* \brief Modulation sub-standards.
+*/
+enum drx_substandard {
+ DRX_SUBSTANDARD_MAIN = 0, /**< Main subvariant of standard */
+ DRX_SUBSTANDARD_ATV_BG_SCANDINAVIA,
+ DRX_SUBSTANDARD_ATV_DK_POLAND,
+ DRX_SUBSTANDARD_ATV_DK_CHINA,
+ DRX_SUBSTANDARD_UNKNOWN = DRX_UNKNOWN,
+ /**< Sub-standard unknown. */
+ DRX_SUBSTANDARD_AUTO = DRX_AUTO
+ /**< Auto (default) sub-standard */
+};
+
+/**
+* \enum enum drx_bandwidth
+* \brief Channel bandwidth or channel spacing.
+*/
+enum drx_bandwidth {
+ DRX_BANDWIDTH_8MHZ = 0, /**< Bandwidth 8 MHz. */
+ DRX_BANDWIDTH_7MHZ, /**< Bandwidth 7 MHz. */
+ DRX_BANDWIDTH_6MHZ, /**< Bandwidth 6 MHz. */
+ DRX_BANDWIDTH_UNKNOWN = DRX_UNKNOWN,
+ /**< Bandwidth unknown. */
+ DRX_BANDWIDTH_AUTO = DRX_AUTO
+ /**< Auto Set Bandwidth */
+};
+
+/**
+* \enum enum drx_mirror
+* \brief Indicate if channel spectrum is mirrored or not.
+*/
+enum drx_mirror {
+ DRX_MIRROR_NO = 0, /**< Spectrum is not mirrored. */
+ DRX_MIRROR_YES, /**< Spectrum is mirrored. */
+ DRX_MIRROR_UNKNOWN = DRX_UNKNOWN,
+ /**< Unknown if spectrum is mirrored. */
+ DRX_MIRROR_AUTO = DRX_AUTO
+ /**< Autodetect if spectrum is mirrored. */
+};
+
+/**
+* \enum enum drx_modulation
+* \brief Constellation type of the channel.
+*/
+enum drx_modulation {
+ DRX_CONSTELLATION_BPSK = 0, /**< Modulation is BPSK. */
+ DRX_CONSTELLATION_QPSK, /**< Constellation is QPSK. */
+ DRX_CONSTELLATION_PSK8, /**< Constellation is PSK8. */
+ DRX_CONSTELLATION_QAM16, /**< Constellation is QAM16. */
+ DRX_CONSTELLATION_QAM32, /**< Constellation is QAM32. */
+ DRX_CONSTELLATION_QAM64, /**< Constellation is QAM64. */
+ DRX_CONSTELLATION_QAM128, /**< Constellation is QAM128. */
+ DRX_CONSTELLATION_QAM256, /**< Constellation is QAM256. */
+ DRX_CONSTELLATION_QAM512, /**< Constellation is QAM512. */
+ DRX_CONSTELLATION_QAM1024, /**< Constellation is QAM1024. */
+ DRX_CONSTELLATION_QPSK_NR, /**< Constellation is QPSK_NR */
+ DRX_CONSTELLATION_UNKNOWN = DRX_UNKNOWN,
+ /**< Constellation unknown. */
+ DRX_CONSTELLATION_AUTO = DRX_AUTO
+ /**< Autodetect constellation. */
+};
+
+/**
+* \enum enum drx_hierarchy
+* \brief Hierarchy of the channel.
+*/
+enum drx_hierarchy {
+ DRX_HIERARCHY_NONE = 0, /**< None hierarchical channel. */
+ DRX_HIERARCHY_ALPHA1, /**< Hierarchical channel, alpha=1. */
+ DRX_HIERARCHY_ALPHA2, /**< Hierarchical channel, alpha=2. */
+ DRX_HIERARCHY_ALPHA4, /**< Hierarchical channel, alpha=4. */
+ DRX_HIERARCHY_UNKNOWN = DRX_UNKNOWN,
+ /**< Hierarchy unknown. */
+ DRX_HIERARCHY_AUTO = DRX_AUTO
+ /**< Autodetect hierarchy. */
+};
+
+/**
+* \enum enum drx_priority
+* \brief Channel priority in case of hierarchical transmission.
+*/
+enum drx_priority {
+ DRX_PRIORITY_LOW = 0, /**< Low priority channel. */
+ DRX_PRIORITY_HIGH, /**< High priority channel. */
+ DRX_PRIORITY_UNKNOWN = DRX_UNKNOWN
+ /**< Priority unknown. */
+};
+
+/**
+* \enum enum drx_coderate
+* \brief Channel priority in case of hierarchical transmission.
+*/
+enum drx_coderate {
+ DRX_CODERATE_1DIV2 = 0, /**< Code rate 1/2nd. */
+ DRX_CODERATE_2DIV3, /**< Code rate 2/3nd. */
+ DRX_CODERATE_3DIV4, /**< Code rate 3/4nd. */
+ DRX_CODERATE_5DIV6, /**< Code rate 5/6nd. */
+ DRX_CODERATE_7DIV8, /**< Code rate 7/8nd. */
+ DRX_CODERATE_UNKNOWN = DRX_UNKNOWN,
+ /**< Code rate unknown. */
+ DRX_CODERATE_AUTO = DRX_AUTO
+ /**< Autodetect code rate. */
+};
+
+/**
+* \enum enum drx_guard
+* \brief Guard interval of a channel.
+*/
+enum drx_guard {
+ DRX_GUARD_1DIV32 = 0, /**< Guard interval 1/32nd. */
+ DRX_GUARD_1DIV16, /**< Guard interval 1/16th. */
+ DRX_GUARD_1DIV8, /**< Guard interval 1/8th. */
+ DRX_GUARD_1DIV4, /**< Guard interval 1/4th. */
+ DRX_GUARD_UNKNOWN = DRX_UNKNOWN,
+ /**< Guard interval unknown. */
+ DRX_GUARD_AUTO = DRX_AUTO
+ /**< Autodetect guard interval. */
+};
+
+/**
+* \enum enum drx_fft_mode
+* \brief FFT mode.
+*/
+enum drx_fft_mode {
+ DRX_FFTMODE_2K = 0, /**< 2K FFT mode. */
+ DRX_FFTMODE_4K, /**< 4K FFT mode. */
+ DRX_FFTMODE_8K, /**< 8K FFT mode. */
+ DRX_FFTMODE_UNKNOWN = DRX_UNKNOWN,
+ /**< FFT mode unknown. */
+ DRX_FFTMODE_AUTO = DRX_AUTO
+ /**< Autodetect FFT mode. */
+};
+
+/**
+* \enum enum drx_classification
+* \brief Channel classification.
+*/
+enum drx_classification {
+ DRX_CLASSIFICATION_GAUSS = 0, /**< Gaussion noise. */
+ DRX_CLASSIFICATION_HVY_GAUSS, /**< Heavy Gaussion noise. */
+ DRX_CLASSIFICATION_COCHANNEL, /**< Co-channel. */
+ DRX_CLASSIFICATION_STATIC, /**< Static echo. */
+ DRX_CLASSIFICATION_MOVING, /**< Moving echo. */
+ DRX_CLASSIFICATION_ZERODB, /**< Zero dB echo. */
+ DRX_CLASSIFICATION_UNKNOWN = DRX_UNKNOWN,
+ /**< Unknown classification */
+ DRX_CLASSIFICATION_AUTO = DRX_AUTO
+ /**< Autodetect classification. */
+};
+
+/**
+* /enum enum drx_interleave_mode
+* /brief Interleave modes
+*/
+enum drx_interleave_mode {
+ DRX_INTERLEAVEMODE_I128_J1 = 0,
+ DRX_INTERLEAVEMODE_I128_J1_V2,
+ DRX_INTERLEAVEMODE_I128_J2,
+ DRX_INTERLEAVEMODE_I64_J2,
+ DRX_INTERLEAVEMODE_I128_J3,
+ DRX_INTERLEAVEMODE_I32_J4,
+ DRX_INTERLEAVEMODE_I128_J4,
+ DRX_INTERLEAVEMODE_I16_J8,
+ DRX_INTERLEAVEMODE_I128_J5,
+ DRX_INTERLEAVEMODE_I8_J16,
+ DRX_INTERLEAVEMODE_I128_J6,
+ DRX_INTERLEAVEMODE_RESERVED_11,
+ DRX_INTERLEAVEMODE_I128_J7,
+ DRX_INTERLEAVEMODE_RESERVED_13,
+ DRX_INTERLEAVEMODE_I128_J8,
+ DRX_INTERLEAVEMODE_RESERVED_15,
+ DRX_INTERLEAVEMODE_I12_J17,
+ DRX_INTERLEAVEMODE_I5_J4,
+ DRX_INTERLEAVEMODE_B52_M240,
+ DRX_INTERLEAVEMODE_B52_M720,
+ DRX_INTERLEAVEMODE_B52_M48,
+ DRX_INTERLEAVEMODE_B52_M0,
+ DRX_INTERLEAVEMODE_UNKNOWN = DRX_UNKNOWN,
+ /**< Unknown interleave mode */
+ DRX_INTERLEAVEMODE_AUTO = DRX_AUTO
+ /**< Autodetect interleave mode */
+};
+
+/**
+* \enum enum drx_carrier_mode
+* \brief Channel Carrier Mode.
+*/
+enum drx_carrier_mode {
+ DRX_CARRIER_MULTI = 0, /**< Multi carrier mode */
+ DRX_CARRIER_SINGLE, /**< Single carrier mode */
+ DRX_CARRIER_UNKNOWN = DRX_UNKNOWN,
+ /**< Carrier mode unknown. */
+ DRX_CARRIER_AUTO = DRX_AUTO /**< Autodetect carrier mode */
+};
+
+/**
+* \enum enum drx_frame_mode
+* \brief Channel Frame Mode.
+*/
+enum drx_frame_mode {
+ DRX_FRAMEMODE_420 = 0, /**< 420 with variable PN */
+ DRX_FRAMEMODE_595, /**< 595 */
+ DRX_FRAMEMODE_945, /**< 945 with variable PN */
+ DRX_FRAMEMODE_420_FIXED_PN,
+ /**< 420 with fixed PN */
+ DRX_FRAMEMODE_945_FIXED_PN,
+ /**< 945 with fixed PN */
+ DRX_FRAMEMODE_UNKNOWN = DRX_UNKNOWN,
+ /**< Frame mode unknown. */
+ DRX_FRAMEMODE_AUTO = DRX_AUTO
+ /**< Autodetect frame mode */
+};
+
+/**
+* \enum enum drx_tps_frame
+* \brief Frame number in current super-frame.
+*/
+enum drx_tps_frame {
+ DRX_TPS_FRAME1 = 0, /**< TPS frame 1. */
+ DRX_TPS_FRAME2, /**< TPS frame 2. */
+ DRX_TPS_FRAME3, /**< TPS frame 3. */
+ DRX_TPS_FRAME4, /**< TPS frame 4. */
+ DRX_TPS_FRAME_UNKNOWN = DRX_UNKNOWN
+ /**< TPS frame unknown. */
+};
+
+/**
+* \enum enum drx_ldpc
+* \brief TPS LDPC .
+*/
+enum drx_ldpc {
+ DRX_LDPC_0_4 = 0, /**< LDPC 0.4 */
+ DRX_LDPC_0_6, /**< LDPC 0.6 */
+ DRX_LDPC_0_8, /**< LDPC 0.8 */
+ DRX_LDPC_UNKNOWN = DRX_UNKNOWN,
+ /**< LDPC unknown. */
+ DRX_LDPC_AUTO = DRX_AUTO /**< Autodetect LDPC */
+};
+
+/**
+* \enum enum drx_pilot_mode
+* \brief Pilot modes in DTMB.
+*/
+enum drx_pilot_mode {
+ DRX_PILOT_ON = 0, /**< Pilot On */
+ DRX_PILOT_OFF, /**< Pilot Off */
+ DRX_PILOT_UNKNOWN = DRX_UNKNOWN,
+ /**< Pilot unknown. */
+ DRX_PILOT_AUTO = DRX_AUTO /**< Autodetect Pilot */
+};
+
+/**
+ * enum drxu_code_action - indicate if firmware has to be uploaded or verified.
+ * @UCODE_UPLOAD: Upload the microcode image to device
+ * @UCODE_VERIFY: Compare microcode image with code on device
+ */
+enum drxu_code_action {
+ UCODE_UPLOAD,
+ UCODE_VERIFY
+};
+
+/**
+* \enum enum drx_lock_status * \brief Used to reflect current lock status of demodulator.
+*
+* The generic lock states have device dependent semantics.
+
+ DRX_NEVER_LOCK = 0,
+ **< Device will never lock on this signal *
+ DRX_NOT_LOCKED,
+ **< Device has no lock at all *
+ DRX_LOCK_STATE_1,
+ **< Generic lock state *
+ DRX_LOCK_STATE_2,
+ **< Generic lock state *
+ DRX_LOCK_STATE_3,
+ **< Generic lock state *
+ DRX_LOCK_STATE_4,
+ **< Generic lock state *
+ DRX_LOCK_STATE_5,
+ **< Generic lock state *
+ DRX_LOCK_STATE_6,
+ **< Generic lock state *
+ DRX_LOCK_STATE_7,
+ **< Generic lock state *
+ DRX_LOCK_STATE_8,
+ **< Generic lock state *
+ DRX_LOCK_STATE_9,
+ **< Generic lock state *
+ DRX_LOCKED **< Device is in lock *
+*/
+
+enum drx_lock_status {
+ DRX_NEVER_LOCK = 0,
+ DRX_NOT_LOCKED,
+ DRX_LOCK_STATE_1,
+ DRX_LOCK_STATE_2,
+ DRX_LOCK_STATE_3,
+ DRX_LOCK_STATE_4,
+ DRX_LOCK_STATE_5,
+ DRX_LOCK_STATE_6,
+ DRX_LOCK_STATE_7,
+ DRX_LOCK_STATE_8,
+ DRX_LOCK_STATE_9,
+ DRX_LOCKED
+};
+
+/**
+* \enum enum drx_uio* \brief Used to address a User IO (UIO).
+*/
+enum drx_uio {
+ DRX_UIO1,
+ DRX_UIO2,
+ DRX_UIO3,
+ DRX_UIO4,
+ DRX_UIO5,
+ DRX_UIO6,
+ DRX_UIO7,
+ DRX_UIO8,
+ DRX_UIO9,
+ DRX_UIO10,
+ DRX_UIO11,
+ DRX_UIO12,
+ DRX_UIO13,
+ DRX_UIO14,
+ DRX_UIO15,
+ DRX_UIO16,
+ DRX_UIO17,
+ DRX_UIO18,
+ DRX_UIO19,
+ DRX_UIO20,
+ DRX_UIO21,
+ DRX_UIO22,
+ DRX_UIO23,
+ DRX_UIO24,
+ DRX_UIO25,
+ DRX_UIO26,
+ DRX_UIO27,
+ DRX_UIO28,
+ DRX_UIO29,
+ DRX_UIO30,
+ DRX_UIO31,
+ DRX_UIO32,
+ DRX_UIO_MAX = DRX_UIO32
+};
+
+/**
+* \enum enum drxuio_mode * \brief Used to configure the modus oprandi of a UIO.
+*
+* DRX_UIO_MODE_FIRMWARE is an old uio mode.
+* It is replaced by the modes DRX_UIO_MODE_FIRMWARE0 .. DRX_UIO_MODE_FIRMWARE9.
+* To be backward compatible DRX_UIO_MODE_FIRMWARE is equivalent to
+* DRX_UIO_MODE_FIRMWARE0.
+*/
+enum drxuio_mode {
+ DRX_UIO_MODE_DISABLE = 0x01,
+ /**< not used, pin is configured as input */
+ DRX_UIO_MODE_READWRITE = 0x02,
+ /**< used for read/write by application */
+ DRX_UIO_MODE_FIRMWARE = 0x04,
+ /**< controlled by firmware, function 0 */
+ DRX_UIO_MODE_FIRMWARE0 = DRX_UIO_MODE_FIRMWARE,
+ /**< same as above */
+ DRX_UIO_MODE_FIRMWARE1 = 0x08,
+ /**< controlled by firmware, function 1 */
+ DRX_UIO_MODE_FIRMWARE2 = 0x10,
+ /**< controlled by firmware, function 2 */
+ DRX_UIO_MODE_FIRMWARE3 = 0x20,
+ /**< controlled by firmware, function 3 */
+ DRX_UIO_MODE_FIRMWARE4 = 0x40,
+ /**< controlled by firmware, function 4 */
+ DRX_UIO_MODE_FIRMWARE5 = 0x80
+ /**< controlled by firmware, function 5 */
+};
+
+/**
+* \enum enum drxoob_downstream_standard * \brief Used to select OOB standard.
+*
+* Based on ANSI 55-1 and 55-2
+*/
+enum drxoob_downstream_standard {
+ DRX_OOB_MODE_A = 0,
+ /**< ANSI 55-1 */
+ DRX_OOB_MODE_B_GRADE_A,
+ /**< ANSI 55-2 A */
+ DRX_OOB_MODE_B_GRADE_B
+ /**< ANSI 55-2 B */
+};
+
+/*-------------------------------------------------------------------------
+STRUCTS
+-------------------------------------------------------------------------*/
+
+/*============================================================================*/
+/*============================================================================*/
+/*== CTRL CFG related data structures ========================================*/
+/*============================================================================*/
+/*============================================================================*/
+
+#ifndef DRX_CFG_BASE
+#define DRX_CFG_BASE 0
+#endif
+
+#define DRX_CFG_MPEG_OUTPUT (DRX_CFG_BASE + 0) /* MPEG TS output */
+#define DRX_CFG_PKTERR (DRX_CFG_BASE + 1) /* Packet Error */
+#define DRX_CFG_SYMCLK_OFFS (DRX_CFG_BASE + 2) /* Symbol Clk Offset */
+#define DRX_CFG_SMA (DRX_CFG_BASE + 3) /* Smart Antenna */
+#define DRX_CFG_PINSAFE (DRX_CFG_BASE + 4) /* Pin safe mode */
+#define DRX_CFG_SUBSTANDARD (DRX_CFG_BASE + 5) /* substandard */
+#define DRX_CFG_AUD_VOLUME (DRX_CFG_BASE + 6) /* volume */
+#define DRX_CFG_AUD_RDS (DRX_CFG_BASE + 7) /* rds */
+#define DRX_CFG_AUD_AUTOSOUND (DRX_CFG_BASE + 8) /* ASS & ASC */
+#define DRX_CFG_AUD_ASS_THRES (DRX_CFG_BASE + 9) /* ASS Thresholds */
+#define DRX_CFG_AUD_DEVIATION (DRX_CFG_BASE + 10) /* Deviation */
+#define DRX_CFG_AUD_PRESCALE (DRX_CFG_BASE + 11) /* Prescale */
+#define DRX_CFG_AUD_MIXER (DRX_CFG_BASE + 12) /* Mixer */
+#define DRX_CFG_AUD_AVSYNC (DRX_CFG_BASE + 13) /* AVSync */
+#define DRX_CFG_AUD_CARRIER (DRX_CFG_BASE + 14) /* Audio carriers */
+#define DRX_CFG_I2S_OUTPUT (DRX_CFG_BASE + 15) /* I2S output */
+#define DRX_CFG_ATV_STANDARD (DRX_CFG_BASE + 16) /* ATV standard */
+#define DRX_CFG_SQI_SPEED (DRX_CFG_BASE + 17) /* SQI speed */
+#define DRX_CTRL_CFG_MAX (DRX_CFG_BASE + 18) /* never to be used */
+
+#define DRX_CFG_PINS_SAFE_MODE DRX_CFG_PINSAFE
+/*============================================================================*/
+/*============================================================================*/
+/*== CTRL related data structures ============================================*/
+/*============================================================================*/
+/*============================================================================*/
+
+/**
+ * struct drxu_code_info Parameters for microcode upload and verfiy.
+ *
+ * @mc_file: microcode file name
+ *
+ * Used by DRX_CTRL_LOAD_UCODE and DRX_CTRL_VERIFY_UCODE
+ */
+struct drxu_code_info {
+ char *mc_file;
+};
+
+/**
+* \struct drx_mc_version_rec_t
+* \brief Microcode version record
+* Version numbers are stored in BCD format, as usual:
+* o major number = bits 31-20 (first three nibbles of MSW)
+* o minor number = bits 19-16 (fourth nibble of MSW)
+* o patch number = bits 15-0 (remaining nibbles in LSW)
+*
+* The device type indicates for which the device is meant. It is based on the
+* JTAG ID, using everything except the bond ID and the metal fix.
+*
+* Special values:
+* - mc_dev_type == 0 => any device allowed
+* - mc_base_version == 0.0.0 => full microcode (mc_version is the version)
+* - mc_base_version != 0.0.0 => patch microcode, the base microcode version
+* (mc_version is the version)
+*/
+#define AUX_VER_RECORD 0x8000
+
+struct drx_mc_version_rec {
+ u16 aux_type; /* type of aux data - 0x8000 for version record */
+ u32 mc_dev_type; /* device type, based on JTAG ID */
+ u32 mc_version; /* version of microcode */
+ u32 mc_base_version; /* in case of patch: the original microcode version */
+};
+
+/*========================================*/
+
+/**
+* \struct drx_filter_info_t
+* \brief Parameters for loading filter coefficients
+*
+* Used by DRX_CTRL_LOAD_FILTER
+*/
+struct drx_filter_info {
+ u8 *data_re;
+ /**< pointer to coefficients for RE */
+ u8 *data_im;
+ /**< pointer to coefficients for IM */
+ u16 size_re;
+ /**< size of coefficients for RE */
+ u16 size_im;
+ /**< size of coefficients for IM */
+};
+
+/*========================================*/
+
+/**
+* \struct struct drx_channel * \brief The set of parameters describing a single channel.
+*
+* Used by DRX_CTRL_SET_CHANNEL and DRX_CTRL_GET_CHANNEL.
+* Only certain fields need to be used for a specfic standard.
+*
+*/
+struct drx_channel {
+ s32 frequency;
+ /**< frequency in kHz */
+ enum drx_bandwidth bandwidth;
+ /**< bandwidth */
+ enum drx_mirror mirror; /**< mirrored or not on RF */
+ enum drx_modulation constellation;
+ /**< constellation */
+ enum drx_hierarchy hierarchy;
+ /**< hierarchy */
+ enum drx_priority priority; /**< priority */
+ enum drx_coderate coderate; /**< coderate */
+ enum drx_guard guard; /**< guard interval */
+ enum drx_fft_mode fftmode; /**< fftmode */
+ enum drx_classification classification;
+ /**< classification */
+ u32 symbolrate;
+ /**< symbolrate in symbols/sec */
+ enum drx_interleave_mode interleavemode;
+ /**< interleaveMode QAM */
+ enum drx_ldpc ldpc; /**< ldpc */
+ enum drx_carrier_mode carrier; /**< carrier */
+ enum drx_frame_mode framemode;
+ /**< frame mode */
+ enum drx_pilot_mode pilot; /**< pilot mode */
+};
+
+/*========================================*/
+
+enum drx_cfg_sqi_speed {
+ DRX_SQI_SPEED_FAST = 0,
+ DRX_SQI_SPEED_MEDIUM,
+ DRX_SQI_SPEED_SLOW,
+ DRX_SQI_SPEED_UNKNOWN = DRX_UNKNOWN
+};
+
+/*========================================*/
+
+/**
+* \struct struct drx_complex * A complex number.
+*
+* Used by DRX_CTRL_CONSTEL.
+*/
+struct drx_complex {
+ s16 im;
+ /**< Imaginary part. */
+ s16 re;
+ /**< Real part. */
+};
+
+/*========================================*/
+
+/**
+* \struct struct drx_frequency_plan * Array element of a frequency plan.
+*
+* Used by DRX_CTRL_SCAN_INIT.
+*/
+struct drx_frequency_plan {
+ s32 first;
+ /**< First centre frequency in this band */
+ s32 last;
+ /**< Last centre frequency in this band */
+ s32 step;
+ /**< Stepping frequency in this band */
+ enum drx_bandwidth bandwidth;
+ /**< Bandwidth within this frequency band */
+ u16 ch_number;
+ /**< First channel number in this band, or first
+ index in ch_names */
+ char **ch_names;
+ /**< Optional list of channel names in this
+ band */
+};
+
+/*========================================*/
+
+/**
+* \struct struct drx_scan_param * Parameters for channel scan.
+*
+* Used by DRX_CTRL_SCAN_INIT.
+*/
+struct drx_scan_param {
+ struct drx_frequency_plan *frequency_plan;
+ /**< Frequency plan (array)*/
+ u16 frequency_plan_size; /**< Number of bands */
+ u32 num_tries; /**< Max channels tried */
+ s32 skip; /**< Minimum frequency step to take
+ after a channel is found */
+ void *ext_params; /**< Standard specific params */
+};
+
+/*========================================*/
+
+/**
+* \brief Scan commands.
+* Used by scanning algorithms.
+*/
+enum drx_scan_command {
+ DRX_SCAN_COMMAND_INIT = 0,/**< Initialize scanning */
+ DRX_SCAN_COMMAND_NEXT, /**< Next scan */
+ DRX_SCAN_COMMAND_STOP /**< Stop scanning */
+};
+
+/*========================================*/
+
+/**
+* \brief Inner scan function prototype.
+*/
+typedef int(*drx_scan_func_t) (void *scan_context,
+ enum drx_scan_command scan_command,
+ struct drx_channel *scan_channel,
+ bool *get_next_channel);
+
+/*========================================*/
+
+/**
+* \struct struct drxtps_info * TPS information, DVB-T specific.
+*
+* Used by DRX_CTRL_TPS_INFO.
+*/
+ struct drxtps_info {
+ enum drx_fft_mode fftmode; /**< Fft mode */
+ enum drx_guard guard; /**< Guard interval */
+ enum drx_modulation constellation;
+ /**< Constellation */
+ enum drx_hierarchy hierarchy;
+ /**< Hierarchy */
+ enum drx_coderate high_coderate;
+ /**< High code rate */
+ enum drx_coderate low_coderate;
+ /**< Low cod rate */
+ enum drx_tps_frame frame; /**< Tps frame */
+ u8 length; /**< Length */
+ u16 cell_id; /**< Cell id */
+ };
+
+/*========================================*/
+
+/**
+* \brief Power mode of device.
+*
+* Used by DRX_CTRL_SET_POWER_MODE.
+*/
+ enum drx_power_mode {
+ DRX_POWER_UP = 0,
+ /**< Generic , Power Up Mode */
+ DRX_POWER_MODE_1,
+ /**< Device specific , Power Up Mode */
+ DRX_POWER_MODE_2,
+ /**< Device specific , Power Up Mode */
+ DRX_POWER_MODE_3,
+ /**< Device specific , Power Up Mode */
+ DRX_POWER_MODE_4,
+ /**< Device specific , Power Up Mode */
+ DRX_POWER_MODE_5,
+ /**< Device specific , Power Up Mode */
+ DRX_POWER_MODE_6,
+ /**< Device specific , Power Up Mode */
+ DRX_POWER_MODE_7,
+ /**< Device specific , Power Up Mode */
+ DRX_POWER_MODE_8,
+ /**< Device specific , Power Up Mode */
+
+ DRX_POWER_MODE_9,
+ /**< Device specific , Power Down Mode */
+ DRX_POWER_MODE_10,
+ /**< Device specific , Power Down Mode */
+ DRX_POWER_MODE_11,
+ /**< Device specific , Power Down Mode */
+ DRX_POWER_MODE_12,
+ /**< Device specific , Power Down Mode */
+ DRX_POWER_MODE_13,
+ /**< Device specific , Power Down Mode */
+ DRX_POWER_MODE_14,
+ /**< Device specific , Power Down Mode */
+ DRX_POWER_MODE_15,
+ /**< Device specific , Power Down Mode */
+ DRX_POWER_MODE_16,
+ /**< Device specific , Power Down Mode */
+ DRX_POWER_DOWN = 255
+ /**< Generic , Power Down Mode */
+ };
+
+/*========================================*/
+
+/**
+* \enum enum drx_module * \brief Software module identification.
+*
+* Used by DRX_CTRL_VERSION.
+*/
+ enum drx_module {
+ DRX_MODULE_DEVICE,
+ DRX_MODULE_MICROCODE,
+ DRX_MODULE_DRIVERCORE,
+ DRX_MODULE_DEVICEDRIVER,
+ DRX_MODULE_DAP,
+ DRX_MODULE_BSP_I2C,
+ DRX_MODULE_BSP_TUNER,
+ DRX_MODULE_BSP_HOST,
+ DRX_MODULE_UNKNOWN
+ };
+
+/**
+* \enum struct drx_version * \brief Version information of one software module.
+*
+* Used by DRX_CTRL_VERSION.
+*/
+ struct drx_version {
+ enum drx_module module_type;
+ /**< Type identifier of the module */
+ char *module_name;
+ /**< Name or description of module */
+ u16 v_major; /**< Major version number */
+ u16 v_minor; /**< Minor version number */
+ u16 v_patch; /**< Patch version number */
+ char *v_string; /**< Version as text string */
+ };
+
+/**
+* \enum struct drx_version_list * \brief List element of NULL terminated, linked list for version information.
+*
+* Used by DRX_CTRL_VERSION.
+*/
+struct drx_version_list {
+ struct drx_version *version;/**< Version information */
+ struct drx_version_list *next;
+ /**< Next list element */
+};
+
+/*========================================*/
+
+/**
+* \brief Parameters needed to confiugure a UIO.
+*
+* Used by DRX_CTRL_UIO_CFG.
+*/
+ struct drxuio_cfg {
+ enum drx_uio uio;
+ /**< UIO identifier */
+ enum drxuio_mode mode;
+ /**< UIO operational mode */
+ };
+
+/*========================================*/
+
+/**
+* \brief Parameters needed to read from or write to a UIO.
+*
+* Used by DRX_CTRL_UIO_READ and DRX_CTRL_UIO_WRITE.
+*/
+ struct drxuio_data {
+ enum drx_uio uio;
+ /**< UIO identifier */
+ bool value;
+ /**< UIO value (true=1, false=0) */
+ };
+
+/*========================================*/
+
+/**
+* \brief Parameters needed to configure OOB.
+*
+* Used by DRX_CTRL_SET_OOB.
+*/
+ struct drxoob {
+ s32 frequency; /**< Frequency in kHz */
+ enum drxoob_downstream_standard standard;
+ /**< OOB standard */
+ bool spectrum_inverted; /**< If true, then spectrum
+ is inverted */
+ };
+
+/*========================================*/
+
+/**
+* \brief Metrics from OOB.
+*
+* Used by DRX_CTRL_GET_OOB.
+*/
+ struct drxoob_status {
+ s32 frequency; /**< Frequency in Khz */
+ enum drx_lock_status lock; /**< Lock status */
+ u32 mer; /**< MER */
+ s32 symbol_rate_offset; /**< Symbolrate offset in ppm */
+ };
+
+/*========================================*/
+
+/**
+* \brief Device dependent configuration data.
+*
+* Used by DRX_CTRL_SET_CFG and DRX_CTRL_GET_CFG.
+* A sort of nested drx_ctrl() functionality for device specific controls.
+*/
+ struct drx_cfg {
+ u32 cfg_type;
+ /**< Function identifier */
+ void *cfg_data;
+ /**< Function data */
+ };
+
+/*========================================*/
+
+/**
+* /struct DRXMpegStartWidth_t
+* MStart width [nr MCLK cycles] for serial MPEG output.
+*/
+
+ enum drxmpeg_str_width {
+ DRX_MPEG_STR_WIDTH_1,
+ DRX_MPEG_STR_WIDTH_8
+ };
+
+/* CTRL CFG MPEG ouput */
+/**
+* \struct struct drx_cfg_mpeg_output * \brief Configuartion parameters for MPEG output control.
+*
+* Used by DRX_CFG_MPEG_OUTPUT, in combination with DRX_CTRL_SET_CFG and
+* DRX_CTRL_GET_CFG.
+*/
+
+ struct drx_cfg_mpeg_output {
+ bool enable_mpeg_output;/**< If true, enable MPEG output */
+ bool insert_rs_byte; /**< If true, insert RS byte */
+ bool enable_parallel; /**< If true, parallel out otherwise
+ serial */
+ bool invert_data; /**< If true, invert DATA signals */
+ bool invert_err; /**< If true, invert ERR signal */
+ bool invert_str; /**< If true, invert STR signals */
+ bool invert_val; /**< If true, invert VAL signals */
+ bool invert_clk; /**< If true, invert CLK signals */
+ bool static_clk; /**< If true, static MPEG clockrate
+ will be used, otherwise clockrate
+ will adapt to the bitrate of the
+ TS */
+ u32 bitrate; /**< Maximum bitrate in b/s in case
+ static clockrate is selected */
+ enum drxmpeg_str_width width_str;
+ /**< MPEG start width */
+ };
+
+
+/*========================================*/
+
+/**
+* \struct struct drxi2c_data * \brief Data for I2C via 2nd or 3rd or etc I2C port.
+*
+* Used by DRX_CTRL_I2C_READWRITE.
+* If port_nr is equal to primairy port_nr BSPI2C will be used.
+*
+*/
+ struct drxi2c_data {
+ u16 port_nr; /**< I2C port number */
+ struct i2c_device_addr *w_dev_addr;
+ /**< Write device address */
+ u16 w_count; /**< Size of write data in bytes */
+ u8 *wData; /**< Pointer to write data */
+ struct i2c_device_addr *r_dev_addr;
+ /**< Read device address */
+ u16 r_count; /**< Size of data to read in bytes */
+ u8 *r_data; /**< Pointer to read buffer */
+ };
+
+/*========================================*/
+
+/**
+* \enum enum drx_aud_standard * \brief Audio standard identifier.
+*
+* Used by DRX_CTRL_SET_AUD.
+*/
+ enum drx_aud_standard {
+ DRX_AUD_STANDARD_BTSC, /**< set BTSC standard (USA) */
+ DRX_AUD_STANDARD_A2, /**< set A2-Korea FM Stereo */
+ DRX_AUD_STANDARD_EIAJ, /**< set to Japanese FM Stereo */
+ DRX_AUD_STANDARD_FM_STEREO,/**< set to FM-Stereo Radio */
+ DRX_AUD_STANDARD_M_MONO, /**< for 4.5 MHz mono detected */
+ DRX_AUD_STANDARD_D_K_MONO, /**< for 6.5 MHz mono detected */
+ DRX_AUD_STANDARD_BG_FM, /**< set BG_FM standard */
+ DRX_AUD_STANDARD_D_K1, /**< set D_K1 standard */
+ DRX_AUD_STANDARD_D_K2, /**< set D_K2 standard */
+ DRX_AUD_STANDARD_D_K3, /**< set D_K3 standard */
+ DRX_AUD_STANDARD_BG_NICAM_FM,
+ /**< set BG_NICAM_FM standard */
+ DRX_AUD_STANDARD_L_NICAM_AM,
+ /**< set L_NICAM_AM standard */
+ DRX_AUD_STANDARD_I_NICAM_FM,
+ /**< set I_NICAM_FM standard */
+ DRX_AUD_STANDARD_D_K_NICAM_FM,
+ /**< set D_K_NICAM_FM standard */
+ DRX_AUD_STANDARD_NOT_READY,/**< used to detect audio standard */
+ DRX_AUD_STANDARD_AUTO = DRX_AUTO,
+ /**< Automatic Standard Detection */
+ DRX_AUD_STANDARD_UNKNOWN = DRX_UNKNOWN
+ /**< used as auto and for readback */
+ };
+
+/* CTRL_AUD_GET_STATUS - struct drx_aud_status */
+/**
+* \enum enum drx_aud_nicam_status * \brief Status of NICAM carrier.
+*/
+ enum drx_aud_nicam_status {
+ DRX_AUD_NICAM_DETECTED = 0,
+ /**< NICAM carrier detected */
+ DRX_AUD_NICAM_NOT_DETECTED,
+ /**< NICAM carrier not detected */
+ DRX_AUD_NICAM_BAD /**< NICAM carrier bad quality */
+ };
+
+/**
+* \struct struct drx_aud_status * \brief Audio status characteristics.
+*/
+ struct drx_aud_status {
+ bool stereo; /**< stereo detection */
+ bool carrier_a; /**< carrier A detected */
+ bool carrier_b; /**< carrier B detected */
+ bool sap; /**< sap / bilingual detection */
+ bool rds; /**< RDS data array present */
+ enum drx_aud_nicam_status nicam_status;
+ /**< status of NICAM carrier */
+ s8 fm_ident; /**< FM Identification value */
+ };
+
+/* CTRL_AUD_READ_RDS - DRXRDSdata_t */
+
+/**
+* \struct DRXRDSdata_t
+* \brief Raw RDS data array.
+*/
+ struct drx_cfg_aud_rds {
+ bool valid; /**< RDS data validation */
+ u16 data[18]; /**< data from one RDS data array */
+ };
+
+/* DRX_CFG_AUD_VOLUME - struct drx_cfg_aud_volume - set/get */
+/**
+* \enum DRXAudAVCDecayTime_t
+* \brief Automatic volume control configuration.
+*/
+ enum drx_aud_avc_mode {
+ DRX_AUD_AVC_OFF, /**< Automatic volume control off */
+ DRX_AUD_AVC_DECAYTIME_8S, /**< level volume in 8 seconds */
+ DRX_AUD_AVC_DECAYTIME_4S, /**< level volume in 4 seconds */
+ DRX_AUD_AVC_DECAYTIME_2S, /**< level volume in 2 seconds */
+ DRX_AUD_AVC_DECAYTIME_20MS/**< level volume in 20 millisec */
+ };
+
+/**
+* /enum DRXAudMaxAVCGain_t
+* /brief Automatic volume control max gain in audio baseband.
+*/
+ enum drx_aud_avc_max_gain {
+ DRX_AUD_AVC_MAX_GAIN_0DB, /**< maximum AVC gain 0 dB */
+ DRX_AUD_AVC_MAX_GAIN_6DB, /**< maximum AVC gain 6 dB */
+ DRX_AUD_AVC_MAX_GAIN_12DB /**< maximum AVC gain 12 dB */
+ };
+
+/**
+* /enum DRXAudMaxAVCAtten_t
+* /brief Automatic volume control max attenuation in audio baseband.
+*/
+ enum drx_aud_avc_max_atten {
+ DRX_AUD_AVC_MAX_ATTEN_12DB,
+ /**< maximum AVC attenuation 12 dB */
+ DRX_AUD_AVC_MAX_ATTEN_18DB,
+ /**< maximum AVC attenuation 18 dB */
+ DRX_AUD_AVC_MAX_ATTEN_24DB/**< maximum AVC attenuation 24 dB */
+ };
+/**
+* \struct struct drx_cfg_aud_volume * \brief Audio volume configuration.
+*/
+ struct drx_cfg_aud_volume {
+ bool mute; /**< mute overrides volume setting */
+ s16 volume; /**< volume, range -114 to 12 dB */
+ enum drx_aud_avc_mode avc_mode; /**< AVC auto volume control mode */
+ u16 avc_ref_level; /**< AVC reference level */
+ enum drx_aud_avc_max_gain avc_max_gain;
+ /**< AVC max gain selection */
+ enum drx_aud_avc_max_atten avc_max_atten;
+ /**< AVC max attenuation selection */
+ s16 strength_left; /**< quasi-peak, left speaker */
+ s16 strength_right; /**< quasi-peak, right speaker */
+ };
+
+/* DRX_CFG_I2S_OUTPUT - struct drx_cfg_i2s_output - set/get */
+/**
+* \enum enum drxi2s_mode * \brief I2S output mode.
+*/
+ enum drxi2s_mode {
+ DRX_I2S_MODE_MASTER, /**< I2S is in master mode */
+ DRX_I2S_MODE_SLAVE /**< I2S is in slave mode */
+ };
+
+/**
+* \enum enum drxi2s_word_length * \brief Width of I2S data.
+*/
+ enum drxi2s_word_length {
+ DRX_I2S_WORDLENGTH_32 = 0,/**< I2S data is 32 bit wide */
+ DRX_I2S_WORDLENGTH_16 = 1 /**< I2S data is 16 bit wide */
+ };
+
+/**
+* \enum enum drxi2s_format * \brief Data wordstrobe alignment for I2S.
+*/
+ enum drxi2s_format {
+ DRX_I2S_FORMAT_WS_WITH_DATA,
+ /**< I2S data and wordstrobe are aligned */
+ DRX_I2S_FORMAT_WS_ADVANCED
+ /**< I2S data one cycle after wordstrobe */
+ };
+
+/**
+* \enum enum drxi2s_polarity * \brief Polarity of I2S data.
+*/
+ enum drxi2s_polarity {
+ DRX_I2S_POLARITY_RIGHT,/**< wordstrobe - right high, left low */
+ DRX_I2S_POLARITY_LEFT /**< wordstrobe - right low, left high */
+ };
+
+/**
+* \struct struct drx_cfg_i2s_output * \brief I2S output configuration.
+*/
+ struct drx_cfg_i2s_output {
+ bool output_enable; /**< I2S output enable */
+ u32 frequency; /**< range from 8000-48000 Hz */
+ enum drxi2s_mode mode; /**< I2S mode, master or slave */
+ enum drxi2s_word_length word_length;
+ /**< I2S wordlength, 16 or 32 bits */
+ enum drxi2s_polarity polarity;/**< I2S wordstrobe polarity */
+ enum drxi2s_format format; /**< I2S wordstrobe delay to data */
+ };
+
+/* ------------------------------expert interface-----------------------------*/
+/**
+* /enum enum drx_aud_fm_deemphasis * setting for FM-Deemphasis in audio demodulator.
+*
+*/
+ enum drx_aud_fm_deemphasis {
+ DRX_AUD_FM_DEEMPH_50US,
+ DRX_AUD_FM_DEEMPH_75US,
+ DRX_AUD_FM_DEEMPH_OFF
+ };
+
+/**
+* /enum DRXAudDeviation_t
+* setting for deviation mode in audio demodulator.
+*
+*/
+ enum drx_cfg_aud_deviation {
+ DRX_AUD_DEVIATION_NORMAL,
+ DRX_AUD_DEVIATION_HIGH
+ };
+
+/**
+* /enum enum drx_no_carrier_option * setting for carrier, mute/noise.
+*
+*/
+ enum drx_no_carrier_option {
+ DRX_NO_CARRIER_MUTE,
+ DRX_NO_CARRIER_NOISE
+ };
+
+/**
+* \enum DRXAudAutoSound_t
+* \brief Automatic Sound
+*/
+ enum drx_cfg_aud_auto_sound {
+ DRX_AUD_AUTO_SOUND_OFF = 0,
+ DRX_AUD_AUTO_SOUND_SELECT_ON_CHANGE_ON,
+ DRX_AUD_AUTO_SOUND_SELECT_ON_CHANGE_OFF
+ };
+
+/**
+* \enum DRXAudASSThres_t
+* \brief Automatic Sound Select Thresholds
+*/
+ struct drx_cfg_aud_ass_thres {
+ u16 a2; /* A2 Threshold for ASS configuration */
+ u16 btsc; /* BTSC Threshold for ASS configuration */
+ u16 nicam; /* Nicam Threshold for ASS configuration */
+ };
+
+/**
+* \struct struct drx_aud_carrier * \brief Carrier detection related parameters
+*/
+ struct drx_aud_carrier {
+ u16 thres; /* carrier detetcion threshold for primary carrier (A) */
+ enum drx_no_carrier_option opt; /* Mute or noise at no carrier detection (A) */
+ s32 shift; /* DC level of incoming signal (A) */
+ s32 dco; /* frequency adjustment (A) */
+ };
+
+/**
+* \struct struct drx_cfg_aud_carriers * \brief combining carrier A & B to one struct
+*/
+ struct drx_cfg_aud_carriers {
+ struct drx_aud_carrier a;
+ struct drx_aud_carrier b;
+ };
+
+/**
+* /enum enum drx_aud_i2s_src * Selection of audio source
+*/
+ enum drx_aud_i2s_src {
+ DRX_AUD_SRC_MONO,
+ DRX_AUD_SRC_STEREO_OR_AB,
+ DRX_AUD_SRC_STEREO_OR_A,
+ DRX_AUD_SRC_STEREO_OR_B};
+
+/**
+* \enum enum drx_aud_i2s_matrix * \brief Used for selecting I2S output.
+*/
+ enum drx_aud_i2s_matrix {
+ DRX_AUD_I2S_MATRIX_A_MONO,
+ /**< A sound only, stereo or mono */
+ DRX_AUD_I2S_MATRIX_B_MONO,
+ /**< B sound only, stereo or mono */
+ DRX_AUD_I2S_MATRIX_STEREO,
+ /**< A+B sound, transparant */
+ DRX_AUD_I2S_MATRIX_MONO /**< A+B mixed to mono sum, (L+R)/2 */};
+
+/**
+* /enum enum drx_aud_fm_matrix * setting for FM-Matrix in audio demodulator.
+*
+*/
+ enum drx_aud_fm_matrix {
+ DRX_AUD_FM_MATRIX_NO_MATRIX,
+ DRX_AUD_FM_MATRIX_GERMAN,
+ DRX_AUD_FM_MATRIX_KOREAN,
+ DRX_AUD_FM_MATRIX_SOUND_A,
+ DRX_AUD_FM_MATRIX_SOUND_B};
+
+/**
+* \struct DRXAudMatrices_t
+* \brief Mixer settings
+*/
+struct drx_cfg_aud_mixer {
+ enum drx_aud_i2s_src source_i2s;
+ enum drx_aud_i2s_matrix matrix_i2s;
+ enum drx_aud_fm_matrix matrix_fm;
+};
+
+/**
+* \enum DRXI2SVidSync_t
+* \brief Audio/video synchronization, interacts with I2S mode.
+* AUTO_1 and AUTO_2 are for automatic video standard detection with preference
+* for NTSC or Monochrome, because the frequencies are too close (59.94 & 60 Hz)
+*/
+ enum drx_cfg_aud_av_sync {
+ DRX_AUD_AVSYNC_OFF,/**< audio/video synchronization is off */
+ DRX_AUD_AVSYNC_NTSC,
+ /**< it is an NTSC system */
+ DRX_AUD_AVSYNC_MONOCHROME,
+ /**< it is a MONOCHROME system */
+ DRX_AUD_AVSYNC_PAL_SECAM
+ /**< it is a PAL/SECAM system */};
+
+/**
+* \struct struct drx_cfg_aud_prescale * \brief Prescalers
+*/
+struct drx_cfg_aud_prescale {
+ u16 fm_deviation;
+ s16 nicam_gain;
+};
+
+/**
+* \struct struct drx_aud_beep * \brief Beep
+*/
+struct drx_aud_beep {
+ s16 volume; /* dB */
+ u16 frequency; /* Hz */
+ bool mute;
+};
+
+/**
+* \enum enum drx_aud_btsc_detect * \brief BTSC detetcion mode
+*/
+ enum drx_aud_btsc_detect {
+ DRX_BTSC_STEREO,
+ DRX_BTSC_MONO_AND_SAP};
+
+/**
+* \struct struct drx_aud_data * \brief Audio data structure
+*/
+struct drx_aud_data {
+ /* audio storage */
+ bool audio_is_active;
+ enum drx_aud_standard audio_standard;
+ struct drx_cfg_i2s_output i2sdata;
+ struct drx_cfg_aud_volume volume;
+ enum drx_cfg_aud_auto_sound auto_sound;
+ struct drx_cfg_aud_ass_thres ass_thresholds;
+ struct drx_cfg_aud_carriers carriers;
+ struct drx_cfg_aud_mixer mixer;
+ enum drx_cfg_aud_deviation deviation;
+ enum drx_cfg_aud_av_sync av_sync;
+ struct drx_cfg_aud_prescale prescale;
+ enum drx_aud_fm_deemphasis deemph;
+ enum drx_aud_btsc_detect btsc_detect;
+ /* rds */
+ u16 rds_data_counter;
+ bool rds_data_present;
+};
+
+/**
+* \enum enum drx_qam_lock_range * \brief QAM lock range mode
+*/
+ enum drx_qam_lock_range {
+ DRX_QAM_LOCKRANGE_NORMAL,
+ DRX_QAM_LOCKRANGE_EXTENDED};
+
+/*============================================================================*/
+/*============================================================================*/
+/*== Data access structures ==================================================*/
+/*============================================================================*/
+/*============================================================================*/
+
+/* Address on device */
+ typedef u32 dr_xaddr_t, *pdr_xaddr_t;
+
+/* Protocol specific flags */
+ typedef u32 dr_xflags_t, *pdr_xflags_t;
+
+/* Write block of data to device */
+ typedef int(*drx_write_block_func_t) (struct i2c_device_addr *dev_addr, /* address of I2C device */
+ u32 addr, /* address of register/memory */
+ u16 datasize, /* size of data in bytes */
+ u8 *data, /* data to send */
+ u32 flags);
+
+/* Read block of data from device */
+ typedef int(*drx_read_block_func_t) (struct i2c_device_addr *dev_addr, /* address of I2C device */
+ u32 addr, /* address of register/memory */
+ u16 datasize, /* size of data in bytes */
+ u8 *data, /* receive buffer */
+ u32 flags);
+
+/* Write 8-bits value to device */
+ typedef int(*drx_write_reg8func_t) (struct i2c_device_addr *dev_addr, /* address of I2C device */
+ u32 addr, /* address of register/memory */
+ u8 data, /* data to send */
+ u32 flags);
+
+/* Read 8-bits value to device */
+ typedef int(*drx_read_reg8func_t) (struct i2c_device_addr *dev_addr, /* address of I2C device */
+ u32 addr, /* address of register/memory */
+ u8 *data, /* receive buffer */
+ u32 flags);
+
+/* Read modify write 8-bits value to device */
+ typedef int(*drx_read_modify_write_reg8func_t) (struct i2c_device_addr *dev_addr, /* address of I2C device */
+ u32 waddr, /* write address of register */
+ u32 raddr, /* read address of register */
+ u8 wdata, /* data to write */
+ u8 *rdata); /* data to read */
+
+/* Write 16-bits value to device */
+ typedef int(*drx_write_reg16func_t) (struct i2c_device_addr *dev_addr, /* address of I2C device */
+ u32 addr, /* address of register/memory */
+ u16 data, /* data to send */
+ u32 flags);
+
+/* Read 16-bits value to device */
+ typedef int(*drx_read_reg16func_t) (struct i2c_device_addr *dev_addr, /* address of I2C device */
+ u32 addr, /* address of register/memory */
+ u16 *data, /* receive buffer */
+ u32 flags);
+
+/* Read modify write 16-bits value to device */
+ typedef int(*drx_read_modify_write_reg16func_t) (struct i2c_device_addr *dev_addr, /* address of I2C device */
+ u32 waddr, /* write address of register */
+ u32 raddr, /* read address of register */
+ u16 wdata, /* data to write */
+ u16 *rdata); /* data to read */
+
+/* Write 32-bits value to device */
+ typedef int(*drx_write_reg32func_t) (struct i2c_device_addr *dev_addr, /* address of I2C device */
+ u32 addr, /* address of register/memory */
+ u32 data, /* data to send */
+ u32 flags);
+
+/* Read 32-bits value to device */
+ typedef int(*drx_read_reg32func_t) (struct i2c_device_addr *dev_addr, /* address of I2C device */
+ u32 addr, /* address of register/memory */
+ u32 *data, /* receive buffer */
+ u32 flags);
+
+/* Read modify write 32-bits value to device */
+ typedef int(*drx_read_modify_write_reg32func_t) (struct i2c_device_addr *dev_addr, /* address of I2C device */
+ u32 waddr, /* write address of register */
+ u32 raddr, /* read address of register */
+ u32 wdata, /* data to write */
+ u32 *rdata); /* data to read */
+
+/**
+* \struct struct drx_access_func * \brief Interface to an access protocol.
+*/
+struct drx_access_func {
+ drx_write_block_func_t write_block_func;
+ drx_read_block_func_t read_block_func;
+ drx_write_reg8func_t write_reg8func;
+ drx_read_reg8func_t read_reg8func;
+ drx_read_modify_write_reg8func_t read_modify_write_reg8func;
+ drx_write_reg16func_t write_reg16func;
+ drx_read_reg16func_t read_reg16func;
+ drx_read_modify_write_reg16func_t read_modify_write_reg16func;
+ drx_write_reg32func_t write_reg32func;
+ drx_read_reg32func_t read_reg32func;
+ drx_read_modify_write_reg32func_t read_modify_write_reg32func;
+};
+
+/* Register address and data for register dump function */
+struct drx_reg_dump {
+ u32 address;
+ u32 data;
+};
+
+/*============================================================================*/
+/*============================================================================*/
+/*== Demod instance data structures ==========================================*/
+/*============================================================================*/
+/*============================================================================*/
+
+/**
+* \struct struct drx_common_attr * \brief Set of common attributes, shared by all DRX devices.
+*/
+ struct drx_common_attr {
+ /* Microcode (firmware) attributes */
+ char *microcode_file; /**< microcode filename */
+ bool verify_microcode;
+ /**< Use microcode verify or not. */
+ struct drx_mc_version_rec mcversion;
+ /**< Version record of microcode from file */
+
+ /* Clocks and tuner attributes */
+ s32 intermediate_freq;
+ /**< IF,if tuner instance not used. (kHz)*/
+ s32 sys_clock_freq;
+ /**< Systemclock frequency. (kHz) */
+ s32 osc_clock_freq;
+ /**< Oscillator clock frequency. (kHz) */
+ s16 osc_clock_deviation;
+ /**< Oscillator clock deviation. (ppm) */
+ bool mirror_freq_spect;
+ /**< Mirror IF frequency spectrum or not.*/
+
+ /* Initial MPEG output attributes */
+ struct drx_cfg_mpeg_output mpeg_cfg;
+ /**< MPEG configuration */
+
+ bool is_opened; /**< if true instance is already opened. */
+
+ /* Channel scan */
+ struct drx_scan_param *scan_param;
+ /**< scan parameters */
+ u16 scan_freq_plan_index;
+ /**< next index in freq plan */
+ s32 scan_next_frequency;
+ /**< next freq to scan */
+ bool scan_ready; /**< scan ready flag */
+ u32 scan_max_channels;/**< number of channels in freqplan */
+ u32 scan_channels_scanned;
+ /**< number of channels scanned */
+ /* Channel scan - inner loop: demod related */
+ drx_scan_func_t scan_function;
+ /**< function to check channel */
+ /* Channel scan - inner loop: SYSObj related */
+ void *scan_context; /**< Context Pointer of SYSObj */
+ /* Channel scan - parameters for default DTV scan function in core driver */
+ u16 scan_demod_lock_timeout;
+ /**< millisecs to wait for lock */
+ enum drx_lock_status scan_desired_lock;
+ /**< lock requirement for channel found */
+ /* scan_active can be used by SetChannel to decide how to program the tuner,
+ fast or slow (but stable). Usually fast during scan. */
+ bool scan_active; /**< true when scan routines are active */
+
+ /* Power management */
+ enum drx_power_mode current_power_mode;
+ /**< current power management mode */
+
+ /* Tuner */
+ u8 tuner_port_nr; /**< nr of I2C port to wich tuner is */
+ s32 tuner_min_freq_rf;
+ /**< minimum RF input frequency, in kHz */
+ s32 tuner_max_freq_rf;
+ /**< maximum RF input frequency, in kHz */
+ bool tuner_rf_agc_pol; /**< if true invert RF AGC polarity */
+ bool tuner_if_agc_pol; /**< if true invert IF AGC polarity */
+ bool tuner_slow_mode; /**< if true invert IF AGC polarity */
+
+ struct drx_channel current_channel;
+ /**< current channel parameters */
+ enum drx_standard current_standard;
+ /**< current standard selection */
+ enum drx_standard prev_standard;
+ /**< previous standard selection */
+ enum drx_standard di_cache_standard;
+ /**< standard in DI cache if available */
+ bool use_bootloader; /**< use bootloader in open */
+ u32 capabilities; /**< capabilities flags */
+ u32 product_id; /**< product ID inc. metal fix number */};
+
+/*
+* Generic functions for DRX devices.
+*/
+
+struct drx_demod_instance;
+
+/**
+* \struct struct drx_demod_instance * \brief Top structure of demodulator instance.
+*/
+struct drx_demod_instance {
+ /**< data access protocol functions */
+ struct i2c_device_addr *my_i2c_dev_addr;
+ /**< i2c address and device identifier */
+ struct drx_common_attr *my_common_attr;
+ /**< common DRX attributes */
+ void *my_ext_attr; /**< device specific attributes */
+ /* generic demodulator data */
+
+ struct i2c_adapter *i2c;
+ const struct firmware *firmware;
+};
+
+/*-------------------------------------------------------------------------
+MACROS
+Conversion from enum values to human readable form.
+-------------------------------------------------------------------------*/
+
+/* standard */
+
+#define DRX_STR_STANDARD(x) ( \
+ (x == DRX_STANDARD_DVBT) ? "DVB-T" : \
+ (x == DRX_STANDARD_8VSB) ? "8VSB" : \
+ (x == DRX_STANDARD_NTSC) ? "NTSC" : \
+ (x == DRX_STANDARD_PAL_SECAM_BG) ? "PAL/SECAM B/G" : \
+ (x == DRX_STANDARD_PAL_SECAM_DK) ? "PAL/SECAM D/K" : \
+ (x == DRX_STANDARD_PAL_SECAM_I) ? "PAL/SECAM I" : \
+ (x == DRX_STANDARD_PAL_SECAM_L) ? "PAL/SECAM L" : \
+ (x == DRX_STANDARD_PAL_SECAM_LP) ? "PAL/SECAM LP" : \
+ (x == DRX_STANDARD_ITU_A) ? "ITU-A" : \
+ (x == DRX_STANDARD_ITU_B) ? "ITU-B" : \
+ (x == DRX_STANDARD_ITU_C) ? "ITU-C" : \
+ (x == DRX_STANDARD_ITU_D) ? "ITU-D" : \
+ (x == DRX_STANDARD_FM) ? "FM" : \
+ (x == DRX_STANDARD_DTMB) ? "DTMB" : \
+ (x == DRX_STANDARD_AUTO) ? "Auto" : \
+ (x == DRX_STANDARD_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+
+/* channel */
+
+#define DRX_STR_BANDWIDTH(x) ( \
+ (x == DRX_BANDWIDTH_8MHZ) ? "8 MHz" : \
+ (x == DRX_BANDWIDTH_7MHZ) ? "7 MHz" : \
+ (x == DRX_BANDWIDTH_6MHZ) ? "6 MHz" : \
+ (x == DRX_BANDWIDTH_AUTO) ? "Auto" : \
+ (x == DRX_BANDWIDTH_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+#define DRX_STR_FFTMODE(x) ( \
+ (x == DRX_FFTMODE_2K) ? "2k" : \
+ (x == DRX_FFTMODE_4K) ? "4k" : \
+ (x == DRX_FFTMODE_8K) ? "8k" : \
+ (x == DRX_FFTMODE_AUTO) ? "Auto" : \
+ (x == DRX_FFTMODE_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+#define DRX_STR_GUARD(x) ( \
+ (x == DRX_GUARD_1DIV32) ? "1/32nd" : \
+ (x == DRX_GUARD_1DIV16) ? "1/16th" : \
+ (x == DRX_GUARD_1DIV8) ? "1/8th" : \
+ (x == DRX_GUARD_1DIV4) ? "1/4th" : \
+ (x == DRX_GUARD_AUTO) ? "Auto" : \
+ (x == DRX_GUARD_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+#define DRX_STR_CONSTELLATION(x) ( \
+ (x == DRX_CONSTELLATION_BPSK) ? "BPSK" : \
+ (x == DRX_CONSTELLATION_QPSK) ? "QPSK" : \
+ (x == DRX_CONSTELLATION_PSK8) ? "PSK8" : \
+ (x == DRX_CONSTELLATION_QAM16) ? "QAM16" : \
+ (x == DRX_CONSTELLATION_QAM32) ? "QAM32" : \
+ (x == DRX_CONSTELLATION_QAM64) ? "QAM64" : \
+ (x == DRX_CONSTELLATION_QAM128) ? "QAM128" : \
+ (x == DRX_CONSTELLATION_QAM256) ? "QAM256" : \
+ (x == DRX_CONSTELLATION_QAM512) ? "QAM512" : \
+ (x == DRX_CONSTELLATION_QAM1024) ? "QAM1024" : \
+ (x == DRX_CONSTELLATION_QPSK_NR) ? "QPSK_NR" : \
+ (x == DRX_CONSTELLATION_AUTO) ? "Auto" : \
+ (x == DRX_CONSTELLATION_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+#define DRX_STR_CODERATE(x) ( \
+ (x == DRX_CODERATE_1DIV2) ? "1/2nd" : \
+ (x == DRX_CODERATE_2DIV3) ? "2/3rd" : \
+ (x == DRX_CODERATE_3DIV4) ? "3/4th" : \
+ (x == DRX_CODERATE_5DIV6) ? "5/6th" : \
+ (x == DRX_CODERATE_7DIV8) ? "7/8th" : \
+ (x == DRX_CODERATE_AUTO) ? "Auto" : \
+ (x == DRX_CODERATE_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+#define DRX_STR_HIERARCHY(x) ( \
+ (x == DRX_HIERARCHY_NONE) ? "None" : \
+ (x == DRX_HIERARCHY_ALPHA1) ? "Alpha=1" : \
+ (x == DRX_HIERARCHY_ALPHA2) ? "Alpha=2" : \
+ (x == DRX_HIERARCHY_ALPHA4) ? "Alpha=4" : \
+ (x == DRX_HIERARCHY_AUTO) ? "Auto" : \
+ (x == DRX_HIERARCHY_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+#define DRX_STR_PRIORITY(x) ( \
+ (x == DRX_PRIORITY_LOW) ? "Low" : \
+ (x == DRX_PRIORITY_HIGH) ? "High" : \
+ (x == DRX_PRIORITY_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+#define DRX_STR_MIRROR(x) ( \
+ (x == DRX_MIRROR_NO) ? "Normal" : \
+ (x == DRX_MIRROR_YES) ? "Mirrored" : \
+ (x == DRX_MIRROR_AUTO) ? "Auto" : \
+ (x == DRX_MIRROR_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+#define DRX_STR_CLASSIFICATION(x) ( \
+ (x == DRX_CLASSIFICATION_GAUSS) ? "Gaussion" : \
+ (x == DRX_CLASSIFICATION_HVY_GAUSS) ? "Heavy Gaussion" : \
+ (x == DRX_CLASSIFICATION_COCHANNEL) ? "Co-channel" : \
+ (x == DRX_CLASSIFICATION_STATIC) ? "Static echo" : \
+ (x == DRX_CLASSIFICATION_MOVING) ? "Moving echo" : \
+ (x == DRX_CLASSIFICATION_ZERODB) ? "Zero dB echo" : \
+ (x == DRX_CLASSIFICATION_UNKNOWN) ? "Unknown" : \
+ (x == DRX_CLASSIFICATION_AUTO) ? "Auto" : \
+ "(Invalid)")
+
+#define DRX_STR_INTERLEAVEMODE(x) ( \
+ (x == DRX_INTERLEAVEMODE_I128_J1) ? "I128_J1" : \
+ (x == DRX_INTERLEAVEMODE_I128_J1_V2) ? "I128_J1_V2" : \
+ (x == DRX_INTERLEAVEMODE_I128_J2) ? "I128_J2" : \
+ (x == DRX_INTERLEAVEMODE_I64_J2) ? "I64_J2" : \
+ (x == DRX_INTERLEAVEMODE_I128_J3) ? "I128_J3" : \
+ (x == DRX_INTERLEAVEMODE_I32_J4) ? "I32_J4" : \
+ (x == DRX_INTERLEAVEMODE_I128_J4) ? "I128_J4" : \
+ (x == DRX_INTERLEAVEMODE_I16_J8) ? "I16_J8" : \
+ (x == DRX_INTERLEAVEMODE_I128_J5) ? "I128_J5" : \
+ (x == DRX_INTERLEAVEMODE_I8_J16) ? "I8_J16" : \
+ (x == DRX_INTERLEAVEMODE_I128_J6) ? "I128_J6" : \
+ (x == DRX_INTERLEAVEMODE_RESERVED_11) ? "Reserved 11" : \
+ (x == DRX_INTERLEAVEMODE_I128_J7) ? "I128_J7" : \
+ (x == DRX_INTERLEAVEMODE_RESERVED_13) ? "Reserved 13" : \
+ (x == DRX_INTERLEAVEMODE_I128_J8) ? "I128_J8" : \
+ (x == DRX_INTERLEAVEMODE_RESERVED_15) ? "Reserved 15" : \
+ (x == DRX_INTERLEAVEMODE_I12_J17) ? "I12_J17" : \
+ (x == DRX_INTERLEAVEMODE_I5_J4) ? "I5_J4" : \
+ (x == DRX_INTERLEAVEMODE_B52_M240) ? "B52_M240" : \
+ (x == DRX_INTERLEAVEMODE_B52_M720) ? "B52_M720" : \
+ (x == DRX_INTERLEAVEMODE_B52_M48) ? "B52_M48" : \
+ (x == DRX_INTERLEAVEMODE_B52_M0) ? "B52_M0" : \
+ (x == DRX_INTERLEAVEMODE_UNKNOWN) ? "Unknown" : \
+ (x == DRX_INTERLEAVEMODE_AUTO) ? "Auto" : \
+ "(Invalid)")
+
+#define DRX_STR_LDPC(x) ( \
+ (x == DRX_LDPC_0_4) ? "0.4" : \
+ (x == DRX_LDPC_0_6) ? "0.6" : \
+ (x == DRX_LDPC_0_8) ? "0.8" : \
+ (x == DRX_LDPC_AUTO) ? "Auto" : \
+ (x == DRX_LDPC_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+
+#define DRX_STR_CARRIER(x) ( \
+ (x == DRX_CARRIER_MULTI) ? "Multi" : \
+ (x == DRX_CARRIER_SINGLE) ? "Single" : \
+ (x == DRX_CARRIER_AUTO) ? "Auto" : \
+ (x == DRX_CARRIER_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+
+#define DRX_STR_FRAMEMODE(x) ( \
+ (x == DRX_FRAMEMODE_420) ? "420" : \
+ (x == DRX_FRAMEMODE_595) ? "595" : \
+ (x == DRX_FRAMEMODE_945) ? "945" : \
+ (x == DRX_FRAMEMODE_420_FIXED_PN) ? "420 with fixed PN" : \
+ (x == DRX_FRAMEMODE_945_FIXED_PN) ? "945 with fixed PN" : \
+ (x == DRX_FRAMEMODE_AUTO) ? "Auto" : \
+ (x == DRX_FRAMEMODE_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+
+#define DRX_STR_PILOT(x) ( \
+ (x == DRX_PILOT_ON) ? "On" : \
+ (x == DRX_PILOT_OFF) ? "Off" : \
+ (x == DRX_PILOT_AUTO) ? "Auto" : \
+ (x == DRX_PILOT_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+/* TPS */
+
+#define DRX_STR_TPS_FRAME(x) ( \
+ (x == DRX_TPS_FRAME1) ? "Frame1" : \
+ (x == DRX_TPS_FRAME2) ? "Frame2" : \
+ (x == DRX_TPS_FRAME3) ? "Frame3" : \
+ (x == DRX_TPS_FRAME4) ? "Frame4" : \
+ (x == DRX_TPS_FRAME_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+
+/* lock status */
+
+#define DRX_STR_LOCKSTATUS(x) ( \
+ (x == DRX_NEVER_LOCK) ? "Never" : \
+ (x == DRX_NOT_LOCKED) ? "No" : \
+ (x == DRX_LOCKED) ? "Locked" : \
+ (x == DRX_LOCK_STATE_1) ? "Lock state 1" : \
+ (x == DRX_LOCK_STATE_2) ? "Lock state 2" : \
+ (x == DRX_LOCK_STATE_3) ? "Lock state 3" : \
+ (x == DRX_LOCK_STATE_4) ? "Lock state 4" : \
+ (x == DRX_LOCK_STATE_5) ? "Lock state 5" : \
+ (x == DRX_LOCK_STATE_6) ? "Lock state 6" : \
+ (x == DRX_LOCK_STATE_7) ? "Lock state 7" : \
+ (x == DRX_LOCK_STATE_8) ? "Lock state 8" : \
+ (x == DRX_LOCK_STATE_9) ? "Lock state 9" : \
+ "(Invalid)")
+
+/* version information , modules */
+#define DRX_STR_MODULE(x) ( \
+ (x == DRX_MODULE_DEVICE) ? "Device" : \
+ (x == DRX_MODULE_MICROCODE) ? "Microcode" : \
+ (x == DRX_MODULE_DRIVERCORE) ? "CoreDriver" : \
+ (x == DRX_MODULE_DEVICEDRIVER) ? "DeviceDriver" : \
+ (x == DRX_MODULE_BSP_I2C) ? "BSP I2C" : \
+ (x == DRX_MODULE_BSP_TUNER) ? "BSP Tuner" : \
+ (x == DRX_MODULE_BSP_HOST) ? "BSP Host" : \
+ (x == DRX_MODULE_DAP) ? "Data Access Protocol" : \
+ (x == DRX_MODULE_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+
+#define DRX_STR_POWER_MODE(x) ( \
+ (x == DRX_POWER_UP) ? "DRX_POWER_UP " : \
+ (x == DRX_POWER_MODE_1) ? "DRX_POWER_MODE_1" : \
+ (x == DRX_POWER_MODE_2) ? "DRX_POWER_MODE_2" : \
+ (x == DRX_POWER_MODE_3) ? "DRX_POWER_MODE_3" : \
+ (x == DRX_POWER_MODE_4) ? "DRX_POWER_MODE_4" : \
+ (x == DRX_POWER_MODE_5) ? "DRX_POWER_MODE_5" : \
+ (x == DRX_POWER_MODE_6) ? "DRX_POWER_MODE_6" : \
+ (x == DRX_POWER_MODE_7) ? "DRX_POWER_MODE_7" : \
+ (x == DRX_POWER_MODE_8) ? "DRX_POWER_MODE_8" : \
+ (x == DRX_POWER_MODE_9) ? "DRX_POWER_MODE_9" : \
+ (x == DRX_POWER_MODE_10) ? "DRX_POWER_MODE_10" : \
+ (x == DRX_POWER_MODE_11) ? "DRX_POWER_MODE_11" : \
+ (x == DRX_POWER_MODE_12) ? "DRX_POWER_MODE_12" : \
+ (x == DRX_POWER_MODE_13) ? "DRX_POWER_MODE_13" : \
+ (x == DRX_POWER_MODE_14) ? "DRX_POWER_MODE_14" : \
+ (x == DRX_POWER_MODE_15) ? "DRX_POWER_MODE_15" : \
+ (x == DRX_POWER_MODE_16) ? "DRX_POWER_MODE_16" : \
+ (x == DRX_POWER_DOWN) ? "DRX_POWER_DOWN " : \
+ "(Invalid)")
+
+#define DRX_STR_OOB_STANDARD(x) ( \
+ (x == DRX_OOB_MODE_A) ? "ANSI 55-1 " : \
+ (x == DRX_OOB_MODE_B_GRADE_A) ? "ANSI 55-2 A" : \
+ (x == DRX_OOB_MODE_B_GRADE_B) ? "ANSI 55-2 B" : \
+ "(Invalid)")
+
+#define DRX_STR_AUD_STANDARD(x) ( \
+ (x == DRX_AUD_STANDARD_BTSC) ? "BTSC" : \
+ (x == DRX_AUD_STANDARD_A2) ? "A2" : \
+ (x == DRX_AUD_STANDARD_EIAJ) ? "EIAJ" : \
+ (x == DRX_AUD_STANDARD_FM_STEREO) ? "FM Stereo" : \
+ (x == DRX_AUD_STANDARD_AUTO) ? "Auto" : \
+ (x == DRX_AUD_STANDARD_M_MONO) ? "M-Standard Mono" : \
+ (x == DRX_AUD_STANDARD_D_K_MONO) ? "D/K Mono FM" : \
+ (x == DRX_AUD_STANDARD_BG_FM) ? "B/G-Dual Carrier FM (A2)" : \
+ (x == DRX_AUD_STANDARD_D_K1) ? "D/K1-Dual Carrier FM" : \
+ (x == DRX_AUD_STANDARD_D_K2) ? "D/K2-Dual Carrier FM" : \
+ (x == DRX_AUD_STANDARD_D_K3) ? "D/K3-Dual Carrier FM" : \
+ (x == DRX_AUD_STANDARD_BG_NICAM_FM) ? "B/G-NICAM-FM" : \
+ (x == DRX_AUD_STANDARD_L_NICAM_AM) ? "L-NICAM-AM" : \
+ (x == DRX_AUD_STANDARD_I_NICAM_FM) ? "I-NICAM-FM" : \
+ (x == DRX_AUD_STANDARD_D_K_NICAM_FM) ? "D/K-NICAM-FM" : \
+ (x == DRX_AUD_STANDARD_UNKNOWN) ? "Unknown" : \
+ "(Invalid)")
+#define DRX_STR_AUD_STEREO(x) ( \
+ (x == true) ? "Stereo" : \
+ (x == false) ? "Mono" : \
+ "(Invalid)")
+
+#define DRX_STR_AUD_SAP(x) ( \
+ (x == true) ? "Present" : \
+ (x == false) ? "Not present" : \
+ "(Invalid)")
+
+#define DRX_STR_AUD_CARRIER(x) ( \
+ (x == true) ? "Present" : \
+ (x == false) ? "Not present" : \
+ "(Invalid)")
+
+#define DRX_STR_AUD_RDS(x) ( \
+ (x == true) ? "Available" : \
+ (x == false) ? "Not Available" : \
+ "(Invalid)")
+
+#define DRX_STR_AUD_NICAM_STATUS(x) ( \
+ (x == DRX_AUD_NICAM_DETECTED) ? "Detected" : \
+ (x == DRX_AUD_NICAM_NOT_DETECTED) ? "Not detected" : \
+ (x == DRX_AUD_NICAM_BAD) ? "Bad" : \
+ "(Invalid)")
+
+#define DRX_STR_RDS_VALID(x) ( \
+ (x == true) ? "Valid" : \
+ (x == false) ? "Not Valid" : \
+ "(Invalid)")
+
+/*-------------------------------------------------------------------------
+Access macros
+-------------------------------------------------------------------------*/
+
+/**
+* \brief Create a compilable reference to the microcode attribute
+* \param d pointer to demod instance
+*
+* Used as main reference to an attribute field.
+* Used by both macro implementation and function implementation.
+* These macros are defined to avoid duplication of code in macro and function
+* definitions that handle access of demod common or extended attributes.
+*
+*/
+
+#define DRX_ATTR_MCRECORD(d) ((d)->my_common_attr->mcversion)
+#define DRX_ATTR_MIRRORFREQSPECT(d) ((d)->my_common_attr->mirror_freq_spect)
+#define DRX_ATTR_CURRENTPOWERMODE(d)((d)->my_common_attr->current_power_mode)
+#define DRX_ATTR_ISOPENED(d) ((d)->my_common_attr->is_opened)
+#define DRX_ATTR_USEBOOTLOADER(d) ((d)->my_common_attr->use_bootloader)
+#define DRX_ATTR_CURRENTSTANDARD(d) ((d)->my_common_attr->current_standard)
+#define DRX_ATTR_PREVSTANDARD(d) ((d)->my_common_attr->prev_standard)
+#define DRX_ATTR_CACHESTANDARD(d) ((d)->my_common_attr->di_cache_standard)
+#define DRX_ATTR_CURRENTCHANNEL(d) ((d)->my_common_attr->current_channel)
+#define DRX_ATTR_MICROCODE(d) ((d)->my_common_attr->microcode)
+#define DRX_ATTR_VERIFYMICROCODE(d) ((d)->my_common_attr->verify_microcode)
+#define DRX_ATTR_CAPABILITIES(d) ((d)->my_common_attr->capabilities)
+#define DRX_ATTR_PRODUCTID(d) ((d)->my_common_attr->product_id)
+#define DRX_ATTR_INTERMEDIATEFREQ(d) ((d)->my_common_attr->intermediate_freq)
+#define DRX_ATTR_SYSCLOCKFREQ(d) ((d)->my_common_attr->sys_clock_freq)
+#define DRX_ATTR_TUNERRFAGCPOL(d) ((d)->my_common_attr->tuner_rf_agc_pol)
+#define DRX_ATTR_TUNERIFAGCPOL(d) ((d)->my_common_attr->tuner_if_agc_pol)
+#define DRX_ATTR_TUNERSLOWMODE(d) ((d)->my_common_attr->tuner_slow_mode)
+#define DRX_ATTR_TUNERSPORTNR(d) ((d)->my_common_attr->tuner_port_nr)
+#define DRX_ATTR_I2CADDR(d) ((d)->my_i2c_dev_addr->i2c_addr)
+#define DRX_ATTR_I2CDEVID(d) ((d)->my_i2c_dev_addr->i2c_dev_id)
+#define DRX_ISMCVERTYPE(x) ((x) == AUX_VER_RECORD)
+
+/**************************/
+
+/* Macros with device-specific handling are converted to CFG functions */
+
+#define DRX_ACCESSMACRO_SET(demod, value, cfg_name, data_type) \
+ do { \
+ struct drx_cfg config; \
+ data_type cfg_data; \
+ config.cfg_type = cfg_name; \
+ config.cfg_data = &cfg_data; \
+ cfg_data = value; \
+ drx_ctrl(demod, DRX_CTRL_SET_CFG, &config); \
+ } while (0)
+
+#define DRX_ACCESSMACRO_GET(demod, value, cfg_name, data_type, error_value) \
+ do { \
+ int cfg_status; \
+ struct drx_cfg config; \
+ data_type cfg_data; \
+ config.cfg_type = cfg_name; \
+ config.cfg_data = &cfg_data; \
+ cfg_status = drx_ctrl(demod, DRX_CTRL_GET_CFG, &config); \
+ if (cfg_status == 0) { \
+ value = cfg_data; \
+ } else { \
+ value = (data_type)error_value; \
+ } \
+ } while (0)
+
+/* Configuration functions for usage by Access (XS) Macros */
+
+#ifndef DRX_XS_CFG_BASE
+#define DRX_XS_CFG_BASE (500)
+#endif
+
+#define DRX_XS_CFG_PRESET (DRX_XS_CFG_BASE + 0)
+#define DRX_XS_CFG_AUD_BTSC_DETECT (DRX_XS_CFG_BASE + 1)
+#define DRX_XS_CFG_QAM_LOCKRANGE (DRX_XS_CFG_BASE + 2)
+
+/* Access Macros with device-specific handling */
+
+#define DRX_SET_PRESET(d, x) \
+ DRX_ACCESSMACRO_SET((d), (x), DRX_XS_CFG_PRESET, char*)
+#define DRX_GET_PRESET(d, x) \
+ DRX_ACCESSMACRO_GET((d), (x), DRX_XS_CFG_PRESET, char*, "ERROR")
+
+#define DRX_SET_AUD_BTSC_DETECT(d, x) DRX_ACCESSMACRO_SET((d), (x), \
+ DRX_XS_CFG_AUD_BTSC_DETECT, enum drx_aud_btsc_detect)
+#define DRX_GET_AUD_BTSC_DETECT(d, x) DRX_ACCESSMACRO_GET((d), (x), \
+ DRX_XS_CFG_AUD_BTSC_DETECT, enum drx_aud_btsc_detect, DRX_UNKNOWN)
+
+#define DRX_SET_QAM_LOCKRANGE(d, x) DRX_ACCESSMACRO_SET((d), (x), \
+ DRX_XS_CFG_QAM_LOCKRANGE, enum drx_qam_lock_range)
+#define DRX_GET_QAM_LOCKRANGE(d, x) DRX_ACCESSMACRO_GET((d), (x), \
+ DRX_XS_CFG_QAM_LOCKRANGE, enum drx_qam_lock_range, DRX_UNKNOWN)
+
+/**
+* \brief Macro to check if std is an ATV standard
+* \retval true std is an ATV standard
+* \retval false std is an ATV standard
+*/
+#define DRX_ISATVSTD(std) (((std) == DRX_STANDARD_PAL_SECAM_BG) || \
+ ((std) == DRX_STANDARD_PAL_SECAM_DK) || \
+ ((std) == DRX_STANDARD_PAL_SECAM_I) || \
+ ((std) == DRX_STANDARD_PAL_SECAM_L) || \
+ ((std) == DRX_STANDARD_PAL_SECAM_LP) || \
+ ((std) == DRX_STANDARD_NTSC) || \
+ ((std) == DRX_STANDARD_FM))
+
+/**
+* \brief Macro to check if std is an QAM standard
+* \retval true std is an QAM standards
+* \retval false std is an QAM standards
+*/
+#define DRX_ISQAMSTD(std) (((std) == DRX_STANDARD_ITU_A) || \
+ ((std) == DRX_STANDARD_ITU_B) || \
+ ((std) == DRX_STANDARD_ITU_C) || \
+ ((std) == DRX_STANDARD_ITU_D))
+
+/**
+* \brief Macro to check if std is VSB standard
+* \retval true std is VSB standard
+* \retval false std is not VSB standard
+*/
+#define DRX_ISVSBSTD(std) ((std) == DRX_STANDARD_8VSB)
+
+/**
+* \brief Macro to check if std is DVBT standard
+* \retval true std is DVBT standard
+* \retval false std is not DVBT standard
+*/
+#define DRX_ISDVBTSTD(std) ((std) == DRX_STANDARD_DVBT)
+
+/*-------------------------------------------------------------------------
+THE END
+-------------------------------------------------------------------------*/
+#endif /* __DRXDRIVER_H__ */
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver_version.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver_version.h
new file mode 100644
index 000000000000..ff05a4ffb190
--- /dev/null
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver_version.h
@@ -0,0 +1,72 @@
+/*
+ *******************************************************************************
+ * WARNING - THIS FILE HAS BEEN GENERATED - DO NOT CHANGE
+ *
+ * Filename: drx_driver_version.h
+ * Generated on: Mon Jan 18 12:09:23 2010
+ * Generated by: IDF:x 1.3.0
+ * Generated from: ../../../device/drxj/version
+ * Output start: [entry point]
+ *
+ * filename last modified re-use
+ *
+ Copyright (c), 2004-2005,2007-2010 Trident Microsystems, Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Trident Microsystems nor Hauppauge Computer Works
+ nor the names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/* -----------------------------------------------------
+ * version.idf Mon Jan 18 11:56:10 2010 -
+ *
+ */
+
+#ifndef __DRX_DRIVER_VERSION__H__
+#define __DRX_DRIVER_VERSION__H__ INCLUDED
+
+#ifdef _REGISTERTABLE_
+#include <registertable.h>
+ extern register_table_t drx_driver_version[];
+ extern register_table_info_t drx_driver_version_info[];
+#endif /* _REGISTERTABLE_ */
+
+/*
+ *==============================================================================
+ * VERSION
+ * version@/var/cvs/projects/drxj.cvsroot/hostcode/drxdriver/device/drxj
+ *==============================================================================
+ */
+
+#define VERSION__A 0x0
+#define VERSION_MAJOR 1
+#define VERSION_MINOR 0
+#define VERSION_PATCH 56
+
+#endif /* __DRX_DRIVER_VERSION__H__ */
+/*
+ * End of file (drx_driver_version.h)
+ *******************************************************************************
+ */
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
new file mode 100644
index 000000000000..9482954fd453
--- /dev/null
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -0,0 +1,12400 @@
+/*
+ Copyright (c), 2004-2005,2007-2010 Trident Microsystems, Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Trident Microsystems nor Hauppauge Computer Works
+ nor the names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+ DRXJ specific implementation of DRX driver
+ authors: Dragan Savic, Milos Nikolic, Mihajlo Katona, Tao Ding, Paul Janssen
+
+ The Linux DVB Driver for Micronas DRX39xx family (drx3933j) was
+ written by Devin Heitmueller <devin.heitmueller@kernellabs.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+/*-----------------------------------------------------------------------------
+INCLUDE FILES
+----------------------------------------------------------------------------*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/div64.h>
+
+#include "dvb_frontend.h"
+#include "drx39xxj.h"
+
+#include "drxj.h"
+#include "drxj_map.h"
+
+/*============================================================================*/
+/*=== DEFINES ================================================================*/
+/*============================================================================*/
+
+#define DRX39XX_MAIN_FIRMWARE "dvb-fe-drxj-mc-1.0.8.fw"
+
+/**
+* \brief Maximum u32 value.
+*/
+#ifndef MAX_U32
+#define MAX_U32 ((u32) (0xFFFFFFFFL))
+#endif
+
+/* Customer configurable hardware settings, etc */
+#ifndef MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH
+#define MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH 0x02
+#endif
+
+#ifndef MPEG_PARALLEL_OUTPUT_PIN_DRIVE_STRENGTH
+#define MPEG_PARALLEL_OUTPUT_PIN_DRIVE_STRENGTH 0x02
+#endif
+
+#ifndef MPEG_OUTPUT_CLK_DRIVE_STRENGTH
+#define MPEG_OUTPUT_CLK_DRIVE_STRENGTH 0x06
+#endif
+
+#ifndef OOB_CRX_DRIVE_STRENGTH
+#define OOB_CRX_DRIVE_STRENGTH 0x02
+#endif
+
+#ifndef OOB_DRX_DRIVE_STRENGTH
+#define OOB_DRX_DRIVE_STRENGTH 0x02
+#endif
+/**** START DJCOMBO patches to DRXJ registermap constants *********************/
+/**** registermap 200706071303 from drxj **************************************/
+#define ATV_TOP_CR_AMP_TH_FM 0x0
+#define ATV_TOP_CR_AMP_TH_L 0xA
+#define ATV_TOP_CR_AMP_TH_LP 0xA
+#define ATV_TOP_CR_AMP_TH_BG 0x8
+#define ATV_TOP_CR_AMP_TH_DK 0x8
+#define ATV_TOP_CR_AMP_TH_I 0x8
+#define ATV_TOP_CR_CONT_CR_D_MN 0x18
+#define ATV_TOP_CR_CONT_CR_D_FM 0x0
+#define ATV_TOP_CR_CONT_CR_D_L 0x20
+#define ATV_TOP_CR_CONT_CR_D_LP 0x20
+#define ATV_TOP_CR_CONT_CR_D_BG 0x18
+#define ATV_TOP_CR_CONT_CR_D_DK 0x18
+#define ATV_TOP_CR_CONT_CR_D_I 0x18
+#define ATV_TOP_CR_CONT_CR_I_MN 0x80
+#define ATV_TOP_CR_CONT_CR_I_FM 0x0
+#define ATV_TOP_CR_CONT_CR_I_L 0x80
+#define ATV_TOP_CR_CONT_CR_I_LP 0x80
+#define ATV_TOP_CR_CONT_CR_I_BG 0x80
+#define ATV_TOP_CR_CONT_CR_I_DK 0x80
+#define ATV_TOP_CR_CONT_CR_I_I 0x80
+#define ATV_TOP_CR_CONT_CR_P_MN 0x4
+#define ATV_TOP_CR_CONT_CR_P_FM 0x0
+#define ATV_TOP_CR_CONT_CR_P_L 0x4
+#define ATV_TOP_CR_CONT_CR_P_LP 0x4
+#define ATV_TOP_CR_CONT_CR_P_BG 0x4
+#define ATV_TOP_CR_CONT_CR_P_DK 0x4
+#define ATV_TOP_CR_CONT_CR_P_I 0x4
+#define ATV_TOP_CR_OVM_TH_MN 0xA0
+#define ATV_TOP_CR_OVM_TH_FM 0x0
+#define ATV_TOP_CR_OVM_TH_L 0xA0
+#define ATV_TOP_CR_OVM_TH_LP 0xA0
+#define ATV_TOP_CR_OVM_TH_BG 0xA0
+#define ATV_TOP_CR_OVM_TH_DK 0xA0
+#define ATV_TOP_CR_OVM_TH_I 0xA0
+#define ATV_TOP_EQU0_EQU_C0_FM 0x0
+#define ATV_TOP_EQU0_EQU_C0_L 0x3
+#define ATV_TOP_EQU0_EQU_C0_LP 0x3
+#define ATV_TOP_EQU0_EQU_C0_BG 0x7
+#define ATV_TOP_EQU0_EQU_C0_DK 0x0
+#define ATV_TOP_EQU0_EQU_C0_I 0x3
+#define ATV_TOP_EQU1_EQU_C1_FM 0x0
+#define ATV_TOP_EQU1_EQU_C1_L 0x1F6
+#define ATV_TOP_EQU1_EQU_C1_LP 0x1F6
+#define ATV_TOP_EQU1_EQU_C1_BG 0x197
+#define ATV_TOP_EQU1_EQU_C1_DK 0x198
+#define ATV_TOP_EQU1_EQU_C1_I 0x1F6
+#define ATV_TOP_EQU2_EQU_C2_FM 0x0
+#define ATV_TOP_EQU2_EQU_C2_L 0x28
+#define ATV_TOP_EQU2_EQU_C2_LP 0x28
+#define ATV_TOP_EQU2_EQU_C2_BG 0xC5
+#define ATV_TOP_EQU2_EQU_C2_DK 0xB0
+#define ATV_TOP_EQU2_EQU_C2_I 0x28
+#define ATV_TOP_EQU3_EQU_C3_FM 0x0
+#define ATV_TOP_EQU3_EQU_C3_L 0x192
+#define ATV_TOP_EQU3_EQU_C3_LP 0x192
+#define ATV_TOP_EQU3_EQU_C3_BG 0x12E
+#define ATV_TOP_EQU3_EQU_C3_DK 0x18E
+#define ATV_TOP_EQU3_EQU_C3_I 0x192
+#define ATV_TOP_STD_MODE_MN 0x0
+#define ATV_TOP_STD_MODE_FM 0x1
+#define ATV_TOP_STD_MODE_L 0x0
+#define ATV_TOP_STD_MODE_LP 0x0
+#define ATV_TOP_STD_MODE_BG 0x0
+#define ATV_TOP_STD_MODE_DK 0x0
+#define ATV_TOP_STD_MODE_I 0x0
+#define ATV_TOP_STD_VID_POL_MN 0x0
+#define ATV_TOP_STD_VID_POL_FM 0x0
+#define ATV_TOP_STD_VID_POL_L 0x2
+#define ATV_TOP_STD_VID_POL_LP 0x2
+#define ATV_TOP_STD_VID_POL_BG 0x0
+#define ATV_TOP_STD_VID_POL_DK 0x0
+#define ATV_TOP_STD_VID_POL_I 0x0
+#define ATV_TOP_VID_AMP_MN 0x380
+#define ATV_TOP_VID_AMP_FM 0x0
+#define ATV_TOP_VID_AMP_L 0xF50
+#define ATV_TOP_VID_AMP_LP 0xF50
+#define ATV_TOP_VID_AMP_BG 0x380
+#define ATV_TOP_VID_AMP_DK 0x394
+#define ATV_TOP_VID_AMP_I 0x3D8
+#define IQM_CF_OUT_ENA_OFDM__M 0x4
+#define IQM_FS_ADJ_SEL_B_QAM 0x1
+#define IQM_FS_ADJ_SEL_B_OFF 0x0
+#define IQM_FS_ADJ_SEL_B_VSB 0x2
+#define IQM_RC_ADJ_SEL_B_OFF 0x0
+#define IQM_RC_ADJ_SEL_B_QAM 0x1
+#define IQM_RC_ADJ_SEL_B_VSB 0x2
+/**** END DJCOMBO patches to DRXJ registermap *********************************/
+
+#include "drx_driver_version.h"
+
+/* #define DRX_DEBUG */
+#ifdef DRX_DEBUG
+#include <stdio.h>
+#endif
+
+/*-----------------------------------------------------------------------------
+ENUMS
+----------------------------------------------------------------------------*/
+
+/*-----------------------------------------------------------------------------
+DEFINES
+----------------------------------------------------------------------------*/
+#ifndef DRXJ_WAKE_UP_KEY
+#define DRXJ_WAKE_UP_KEY (demod->my_i2c_dev_addr->i2c_addr)
+#endif
+
+/**
+* \def DRXJ_DEF_I2C_ADDR
+* \brief Default I2C addres of a demodulator instance.
+*/
+#define DRXJ_DEF_I2C_ADDR (0x52)
+
+/**
+* \def DRXJ_DEF_DEMOD_DEV_ID
+* \brief Default device identifier of a demodultor instance.
+*/
+#define DRXJ_DEF_DEMOD_DEV_ID (1)
+
+/**
+* \def DRXJ_SCAN_TIMEOUT
+* \brief Timeout value for waiting on demod lock during channel scan (millisec).
+*/
+#define DRXJ_SCAN_TIMEOUT 1000
+
+/**
+* \def HI_I2C_DELAY
+* \brief HI timing delay for I2C timing (in nano seconds)
+*
+* Used to compute HI_CFG_DIV
+*/
+#define HI_I2C_DELAY 42
+
+/**
+* \def HI_I2C_BRIDGE_DELAY
+* \brief HI timing delay for I2C timing (in nano seconds)
+*
+* Used to compute HI_CFG_BDL
+*/
+#define HI_I2C_BRIDGE_DELAY 750
+
+/**
+* \brief Time Window for MER and SER Measurement in Units of Segment duration.
+*/
+#define VSB_TOP_MEASUREMENT_PERIOD 64
+#define SYMBOLS_PER_SEGMENT 832
+
+/**
+* \brief bit rate and segment rate constants used for SER and BER.
+*/
+/* values taken from the QAM microcode */
+#define DRXJ_QAM_SL_SIG_POWER_QAM_UNKNOWN 0
+#define DRXJ_QAM_SL_SIG_POWER_QPSK 32768
+#define DRXJ_QAM_SL_SIG_POWER_QAM8 24576
+#define DRXJ_QAM_SL_SIG_POWER_QAM16 40960
+#define DRXJ_QAM_SL_SIG_POWER_QAM32 20480
+#define DRXJ_QAM_SL_SIG_POWER_QAM64 43008
+#define DRXJ_QAM_SL_SIG_POWER_QAM128 20992
+#define DRXJ_QAM_SL_SIG_POWER_QAM256 43520
+/**
+* \brief Min supported symbolrates.
+*/
+#ifndef DRXJ_QAM_SYMBOLRATE_MIN
+#define DRXJ_QAM_SYMBOLRATE_MIN (520000)
+#endif
+
+/**
+* \brief Max supported symbolrates.
+*/
+#ifndef DRXJ_QAM_SYMBOLRATE_MAX
+#define DRXJ_QAM_SYMBOLRATE_MAX (7233000)
+#endif
+
+/**
+* \def DRXJ_QAM_MAX_WAITTIME
+* \brief Maximal wait time for QAM auto constellation in ms
+*/
+#ifndef DRXJ_QAM_MAX_WAITTIME
+#define DRXJ_QAM_MAX_WAITTIME 900
+#endif
+
+#ifndef DRXJ_QAM_FEC_LOCK_WAITTIME
+#define DRXJ_QAM_FEC_LOCK_WAITTIME 150
+#endif
+
+#ifndef DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME
+#define DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME 200
+#endif
+
+/**
+* \def SCU status and results
+* \brief SCU
+*/
+#define DRX_SCU_READY 0
+#define DRXJ_MAX_WAITTIME 100 /* ms */
+#define FEC_RS_MEASUREMENT_PERIOD 12894 /* 1 sec */
+#define FEC_RS_MEASUREMENT_PRESCALE 1 /* n sec */
+
+/**
+* \def DRX_AUD_MAX_DEVIATION
+* \brief Needed for calculation of prescale feature in AUD
+*/
+#ifndef DRXJ_AUD_MAX_FM_DEVIATION
+#define DRXJ_AUD_MAX_FM_DEVIATION 100 /* kHz */
+#endif
+
+/**
+* \brief Needed for calculation of NICAM prescale feature in AUD
+*/
+#ifndef DRXJ_AUD_MAX_NICAM_PRESCALE
+#define DRXJ_AUD_MAX_NICAM_PRESCALE (9) /* dB */
+#endif
+
+/**
+* \brief Needed for calculation of NICAM prescale feature in AUD
+*/
+#ifndef DRXJ_AUD_MAX_WAITTIME
+#define DRXJ_AUD_MAX_WAITTIME 250 /* ms */
+#endif
+
+/* ATV config changed flags */
+#define DRXJ_ATV_CHANGED_COEF (0x00000001UL)
+#define DRXJ_ATV_CHANGED_PEAK_FLT (0x00000008UL)
+#define DRXJ_ATV_CHANGED_NOISE_FLT (0x00000010UL)
+#define DRXJ_ATV_CHANGED_OUTPUT (0x00000020UL)
+#define DRXJ_ATV_CHANGED_SIF_ATT (0x00000040UL)
+
+/* UIO define */
+#define DRX_UIO_MODE_FIRMWARE_SMA DRX_UIO_MODE_FIRMWARE0
+#define DRX_UIO_MODE_FIRMWARE_SAW DRX_UIO_MODE_FIRMWARE1
+
+/*
+ * MICROCODE RELATED DEFINES
+ */
+
+/* Magic word for checking correct Endianess of microcode data */
+#define DRX_UCODE_MAGIC_WORD ((((u16)'H')<<8)+((u16)'L'))
+
+/* CRC flag in ucode header, flags field. */
+#define DRX_UCODE_CRC_FLAG (0x0001)
+
+/*
+ * Maximum size of buffer used to verify the microcode.
+ * Must be an even number
+ */
+#define DRX_UCODE_MAX_BUF_SIZE (DRXDAP_MAX_RCHUNKSIZE)
+
+#if DRX_UCODE_MAX_BUF_SIZE & 1
+#error DRX_UCODE_MAX_BUF_SIZE must be an even number
+#endif
+
+/*
+ * Power mode macros
+ */
+
+#define DRX_ISPOWERDOWNMODE(mode) ((mode == DRX_POWER_MODE_9) || \
+ (mode == DRX_POWER_MODE_10) || \
+ (mode == DRX_POWER_MODE_11) || \
+ (mode == DRX_POWER_MODE_12) || \
+ (mode == DRX_POWER_MODE_13) || \
+ (mode == DRX_POWER_MODE_14) || \
+ (mode == DRX_POWER_MODE_15) || \
+ (mode == DRX_POWER_MODE_16) || \
+ (mode == DRX_POWER_DOWN))
+
+/* Pin safe mode macro */
+#define DRXJ_PIN_SAFE_MODE 0x0000
+/*============================================================================*/
+/*=== GLOBAL VARIABLEs =======================================================*/
+/*============================================================================*/
+/**
+*/
+
+/**
+* \brief Temporary register definitions.
+* (register definitions that are not yet available in register master)
+*/
+
+/******************************************************************************/
+/* Audio block 0x103 is write only. To avoid shadowing in driver accessing */
+/* RAM adresses directly. This must be READ ONLY to avoid problems. */
+/* Writing to the interface adresses is more than only writing the RAM */
+/* locations */
+/******************************************************************************/
+/**
+* \brief RAM location of MODUS registers
+*/
+#define AUD_DEM_RAM_MODUS_HI__A 0x10204A3
+#define AUD_DEM_RAM_MODUS_HI__M 0xF000
+
+#define AUD_DEM_RAM_MODUS_LO__A 0x10204A4
+#define AUD_DEM_RAM_MODUS_LO__M 0x0FFF
+
+/**
+* \brief RAM location of I2S config registers
+*/
+#define AUD_DEM_RAM_I2S_CONFIG1__A 0x10204B1
+#define AUD_DEM_RAM_I2S_CONFIG2__A 0x10204B2
+
+/**
+* \brief RAM location of DCO config registers
+*/
+#define AUD_DEM_RAM_DCO_B_HI__A 0x1020461
+#define AUD_DEM_RAM_DCO_B_LO__A 0x1020462
+#define AUD_DEM_RAM_DCO_A_HI__A 0x1020463
+#define AUD_DEM_RAM_DCO_A_LO__A 0x1020464
+
+/**
+* \brief RAM location of Threshold registers
+*/
+#define AUD_DEM_RAM_NICAM_THRSHLD__A 0x102045A
+#define AUD_DEM_RAM_A2_THRSHLD__A 0x10204BB
+#define AUD_DEM_RAM_BTSC_THRSHLD__A 0x10204A6
+
+/**
+* \brief RAM location of Carrier Threshold registers
+*/
+#define AUD_DEM_RAM_CM_A_THRSHLD__A 0x10204AF
+#define AUD_DEM_RAM_CM_B_THRSHLD__A 0x10204B0
+
+/**
+* \brief FM Matrix register fix
+*/
+#ifdef AUD_DEM_WR_FM_MATRIX__A
+#undef AUD_DEM_WR_FM_MATRIX__A
+#endif
+#define AUD_DEM_WR_FM_MATRIX__A 0x105006F
+
+/*============================================================================*/
+/**
+* \brief Defines required for audio
+*/
+#define AUD_VOLUME_ZERO_DB 115
+#define AUD_VOLUME_DB_MIN -60
+#define AUD_VOLUME_DB_MAX 12
+#define AUD_CARRIER_STRENGTH_QP_0DB 0x4000
+#define AUD_CARRIER_STRENGTH_QP_0DB_LOG10T100 421
+#define AUD_MAX_AVC_REF_LEVEL 15
+#define AUD_I2S_FREQUENCY_MAX 48000UL
+#define AUD_I2S_FREQUENCY_MIN 12000UL
+#define AUD_RDS_ARRAY_SIZE 18
+
+/**
+* \brief Needed for calculation of prescale feature in AUD
+*/
+#ifndef DRX_AUD_MAX_FM_DEVIATION
+#define DRX_AUD_MAX_FM_DEVIATION (100) /* kHz */
+#endif
+
+/**
+* \brief Needed for calculation of NICAM prescale feature in AUD
+*/
+#ifndef DRX_AUD_MAX_NICAM_PRESCALE
+#define DRX_AUD_MAX_NICAM_PRESCALE (9) /* dB */
+#endif
+
+/*============================================================================*/
+/* Values for I2S Master/Slave pin configurations */
+#define SIO_PDR_I2S_CL_CFG_MODE__MASTER 0x0004
+#define SIO_PDR_I2S_CL_CFG_DRIVE__MASTER 0x0008
+#define SIO_PDR_I2S_CL_CFG_MODE__SLAVE 0x0004
+#define SIO_PDR_I2S_CL_CFG_DRIVE__SLAVE 0x0000
+
+#define SIO_PDR_I2S_DA_CFG_MODE__MASTER 0x0003
+#define SIO_PDR_I2S_DA_CFG_DRIVE__MASTER 0x0008
+#define SIO_PDR_I2S_DA_CFG_MODE__SLAVE 0x0003
+#define SIO_PDR_I2S_DA_CFG_DRIVE__SLAVE 0x0008
+
+#define SIO_PDR_I2S_WS_CFG_MODE__MASTER 0x0004
+#define SIO_PDR_I2S_WS_CFG_DRIVE__MASTER 0x0008
+#define SIO_PDR_I2S_WS_CFG_MODE__SLAVE 0x0004
+#define SIO_PDR_I2S_WS_CFG_DRIVE__SLAVE 0x0000
+
+/*============================================================================*/
+/*=== REGISTER ACCESS MACROS =================================================*/
+/*============================================================================*/
+
+/**
+* This macro is used to create byte arrays for block writes.
+* Block writes speed up I2C traffic between host and demod.
+* The macro takes care of the required byte order in a 16 bits word.
+* x -> lowbyte(x), highbyte(x)
+*/
+#define DRXJ_16TO8(x) ((u8) (((u16)x) & 0xFF)), \
+ ((u8)((((u16)x)>>8)&0xFF))
+/**
+* This macro is used to convert byte array to 16 bit register value for block read.
+* Block read speed up I2C traffic between host and demod.
+* The macro takes care of the required byte order in a 16 bits word.
+*/
+#define DRXJ_8TO16(x) ((u16) (x[0] | (x[1] << 8)))
+
+/*============================================================================*/
+/*=== MISC DEFINES ===========================================================*/
+/*============================================================================*/
+
+/*============================================================================*/
+/*=== HI COMMAND RELATED DEFINES =============================================*/
+/*============================================================================*/
+
+/**
+* \brief General maximum number of retries for ucode command interfaces
+*/
+#define DRXJ_MAX_RETRIES (100)
+
+/*============================================================================*/
+/*=== STANDARD RELATED MACROS ================================================*/
+/*============================================================================*/
+
+#define DRXJ_ISATVSTD(std) ((std == DRX_STANDARD_PAL_SECAM_BG) || \
+ (std == DRX_STANDARD_PAL_SECAM_DK) || \
+ (std == DRX_STANDARD_PAL_SECAM_I) || \
+ (std == DRX_STANDARD_PAL_SECAM_L) || \
+ (std == DRX_STANDARD_PAL_SECAM_LP) || \
+ (std == DRX_STANDARD_NTSC) || \
+ (std == DRX_STANDARD_FM))
+
+#define DRXJ_ISQAMSTD(std) ((std == DRX_STANDARD_ITU_A) || \
+ (std == DRX_STANDARD_ITU_B) || \
+ (std == DRX_STANDARD_ITU_C) || \
+ (std == DRX_STANDARD_ITU_D))
+
+/*-----------------------------------------------------------------------------
+GLOBAL VARIABLES
+----------------------------------------------------------------------------*/
+/*
+ * DRXJ DAP structures
+ */
+
+static int drxdap_fasi_read_block(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 datasize,
+ u8 *data, u32 flags);
+
+
+static int drxj_dap_read_modify_write_reg16(struct i2c_device_addr *dev_addr,
+ u32 waddr,
+ u32 raddr,
+ u16 wdata, u16 *rdata);
+
+static int drxj_dap_read_reg16(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 *data, u32 flags);
+
+static int drxdap_fasi_read_reg32(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u32 *data, u32 flags);
+
+static int drxdap_fasi_write_block(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 datasize,
+ u8 *data, u32 flags);
+
+static int drxj_dap_write_reg16(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 data, u32 flags);
+
+static int drxdap_fasi_write_reg32(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u32 data, u32 flags);
+
+static struct drxj_data drxj_data_g = {
+ false, /* has_lna : true if LNA (aka PGA) present */
+ false, /* has_oob : true if OOB supported */
+ false, /* has_ntsc: true if NTSC supported */
+ false, /* has_btsc: true if BTSC supported */
+ false, /* has_smatx: true if SMA_TX pin is available */
+ false, /* has_smarx: true if SMA_RX pin is available */
+ false, /* has_gpio : true if GPIO pin is available */
+ false, /* has_irqn : true if IRQN pin is available */
+ 0, /* mfx A1/A2/A... */
+
+ /* tuner settings */
+ false, /* tuner mirrors RF signal */
+ /* standard/channel settings */
+ DRX_STANDARD_UNKNOWN, /* current standard */
+ DRX_CONSTELLATION_AUTO, /* constellation */
+ 0, /* frequency in KHz */
+ DRX_BANDWIDTH_UNKNOWN, /* curr_bandwidth */
+ DRX_MIRROR_NO, /* mirror */
+
+ /* signal quality information: */
+ /* default values taken from the QAM Programming guide */
+ /* fec_bits_desired should not be less than 4000000 */
+ 4000000, /* fec_bits_desired */
+ 5, /* fec_vd_plen */
+ 4, /* qam_vd_prescale */
+ 0xFFFF, /* qamVDPeriod */
+ 204 * 8, /* fec_rs_plen annex A */
+ 1, /* fec_rs_prescale */
+ FEC_RS_MEASUREMENT_PERIOD, /* fec_rs_period */
+ true, /* reset_pkt_err_acc */
+ 0, /* pkt_err_acc_start */
+
+ /* HI configuration */
+ 0, /* hi_cfg_timing_div */
+ 0, /* hi_cfg_bridge_delay */
+ 0, /* hi_cfg_wake_up_key */
+ 0, /* hi_cfg_ctrl */
+ 0, /* HICfgTimeout */
+ /* UIO configuartion */
+ DRX_UIO_MODE_DISABLE, /* uio_sma_rx_mode */
+ DRX_UIO_MODE_DISABLE, /* uio_sma_tx_mode */
+ DRX_UIO_MODE_DISABLE, /* uioASELMode */
+ DRX_UIO_MODE_DISABLE, /* uio_irqn_mode */
+ /* FS setting */
+ 0UL, /* iqm_fs_rate_ofs */
+ false, /* pos_image */
+ /* RC setting */
+ 0UL, /* iqm_rc_rate_ofs */
+ /* AUD information */
+/* false, * flagSetAUDdone */
+/* false, * detectedRDS */
+/* true, * flagASDRequest */
+/* false, * flagHDevClear */
+/* false, * flagHDevSet */
+/* (u16) 0xFFF, * rdsLastCount */
+
+ /* ATV configuartion */
+ 0UL, /* flags cfg changes */
+ /* shadow of ATV_TOP_EQU0__A */
+ {-5,
+ ATV_TOP_EQU0_EQU_C0_FM,
+ ATV_TOP_EQU0_EQU_C0_L,
+ ATV_TOP_EQU0_EQU_C0_LP,
+ ATV_TOP_EQU0_EQU_C0_BG,
+ ATV_TOP_EQU0_EQU_C0_DK,
+ ATV_TOP_EQU0_EQU_C0_I},
+ /* shadow of ATV_TOP_EQU1__A */
+ {-50,
+ ATV_TOP_EQU1_EQU_C1_FM,
+ ATV_TOP_EQU1_EQU_C1_L,
+ ATV_TOP_EQU1_EQU_C1_LP,
+ ATV_TOP_EQU1_EQU_C1_BG,
+ ATV_TOP_EQU1_EQU_C1_DK,
+ ATV_TOP_EQU1_EQU_C1_I},
+ /* shadow of ATV_TOP_EQU2__A */
+ {210,
+ ATV_TOP_EQU2_EQU_C2_FM,
+ ATV_TOP_EQU2_EQU_C2_L,
+ ATV_TOP_EQU2_EQU_C2_LP,
+ ATV_TOP_EQU2_EQU_C2_BG,
+ ATV_TOP_EQU2_EQU_C2_DK,
+ ATV_TOP_EQU2_EQU_C2_I},
+ /* shadow of ATV_TOP_EQU3__A */
+ {-160,
+ ATV_TOP_EQU3_EQU_C3_FM,
+ ATV_TOP_EQU3_EQU_C3_L,
+ ATV_TOP_EQU3_EQU_C3_LP,
+ ATV_TOP_EQU3_EQU_C3_BG,
+ ATV_TOP_EQU3_EQU_C3_DK,
+ ATV_TOP_EQU3_EQU_C3_I},
+ false, /* flag: true=bypass */
+ ATV_TOP_VID_PEAK__PRE, /* shadow of ATV_TOP_VID_PEAK__A */
+ ATV_TOP_NOISE_TH__PRE, /* shadow of ATV_TOP_NOISE_TH__A */
+ true, /* flag CVBS ouput enable */
+ false, /* flag SIF ouput enable */
+ DRXJ_SIF_ATTENUATION_0DB, /* current SIF att setting */
+ { /* qam_rf_agc_cfg */
+ DRX_STANDARD_ITU_B, /* standard */
+ DRX_AGC_CTRL_AUTO, /* ctrl_mode */
+ 0, /* output_level */
+ 0, /* min_output_level */
+ 0xFFFF, /* max_output_level */
+ 0x0000, /* speed */
+ 0x0000, /* top */
+ 0x0000 /* c.o.c. */
+ },
+ { /* qam_if_agc_cfg */
+ DRX_STANDARD_ITU_B, /* standard */
+ DRX_AGC_CTRL_AUTO, /* ctrl_mode */
+ 0, /* output_level */
+ 0, /* min_output_level */
+ 0xFFFF, /* max_output_level */
+ 0x0000, /* speed */
+ 0x0000, /* top (don't care) */
+ 0x0000 /* c.o.c. (don't care) */
+ },
+ { /* vsb_rf_agc_cfg */
+ DRX_STANDARD_8VSB, /* standard */
+ DRX_AGC_CTRL_AUTO, /* ctrl_mode */
+ 0, /* output_level */
+ 0, /* min_output_level */
+ 0xFFFF, /* max_output_level */
+ 0x0000, /* speed */
+ 0x0000, /* top (don't care) */
+ 0x0000 /* c.o.c. (don't care) */
+ },
+ { /* vsb_if_agc_cfg */
+ DRX_STANDARD_8VSB, /* standard */
+ DRX_AGC_CTRL_AUTO, /* ctrl_mode */
+ 0, /* output_level */
+ 0, /* min_output_level */
+ 0xFFFF, /* max_output_level */
+ 0x0000, /* speed */
+ 0x0000, /* top (don't care) */
+ 0x0000 /* c.o.c. (don't care) */
+ },
+ 0, /* qam_pga_cfg */
+ 0, /* vsb_pga_cfg */
+ { /* qam_pre_saw_cfg */
+ DRX_STANDARD_ITU_B, /* standard */
+ 0, /* reference */
+ false /* use_pre_saw */
+ },
+ { /* vsb_pre_saw_cfg */
+ DRX_STANDARD_8VSB, /* standard */
+ 0, /* reference */
+ false /* use_pre_saw */
+ },
+
+ /* Version information */
+#ifndef _CH_
+ {
+ "01234567890", /* human readable version microcode */
+ "01234567890" /* human readable version device specific code */
+ },
+ {
+ { /* struct drx_version for microcode */
+ DRX_MODULE_UNKNOWN,
+ (char *)(NULL),
+ 0,
+ 0,
+ 0,
+ (char *)(NULL)
+ },
+ { /* struct drx_version for device specific code */
+ DRX_MODULE_UNKNOWN,
+ (char *)(NULL),
+ 0,
+ 0,
+ 0,
+ (char *)(NULL)
+ }
+ },
+ {
+ { /* struct drx_version_list for microcode */
+ (struct drx_version *) (NULL),
+ (struct drx_version_list *) (NULL)
+ },
+ { /* struct drx_version_list for device specific code */
+ (struct drx_version *) (NULL),
+ (struct drx_version_list *) (NULL)
+ }
+ },
+#endif
+ false, /* smart_ant_inverted */
+ /* Tracking filter setting for OOB */
+ {
+ 12000,
+ 9300,
+ 6600,
+ 5280,
+ 3700,
+ 3000,
+ 2000,
+ 0},
+ false, /* oob_power_on */
+ 0, /* mpeg_ts_static_bitrate */
+ false, /* disable_te_ihandling */
+ false, /* bit_reverse_mpeg_outout */
+ DRXJ_MPEGOUTPUT_CLOCK_RATE_AUTO, /* mpeg_output_clock_rate */
+ DRXJ_MPEG_START_WIDTH_1CLKCYC, /* mpeg_start_width */
+
+ /* Pre SAW & Agc configuration for ATV */
+ {
+ DRX_STANDARD_NTSC, /* standard */
+ 7, /* reference */
+ true /* use_pre_saw */
+ },
+ { /* ATV RF-AGC */
+ DRX_STANDARD_NTSC, /* standard */
+ DRX_AGC_CTRL_AUTO, /* ctrl_mode */
+ 0, /* output_level */
+ 0, /* min_output_level (d.c.) */
+ 0, /* max_output_level (d.c.) */
+ 3, /* speed */
+ 9500, /* top */
+ 4000 /* cut-off current */
+ },
+ { /* ATV IF-AGC */
+ DRX_STANDARD_NTSC, /* standard */
+ DRX_AGC_CTRL_AUTO, /* ctrl_mode */
+ 0, /* output_level */
+ 0, /* min_output_level (d.c.) */
+ 0, /* max_output_level (d.c.) */
+ 3, /* speed */
+ 2400, /* top */
+ 0 /* c.o.c. (d.c.) */
+ },
+ 140, /* ATV PGA config */
+ 0, /* curr_symbol_rate */
+
+ false, /* pdr_safe_mode */
+ SIO_PDR_GPIO_CFG__PRE, /* pdr_safe_restore_val_gpio */
+ SIO_PDR_VSYNC_CFG__PRE, /* pdr_safe_restore_val_v_sync */
+ SIO_PDR_SMA_RX_CFG__PRE, /* pdr_safe_restore_val_sma_rx */
+ SIO_PDR_SMA_TX_CFG__PRE, /* pdr_safe_restore_val_sma_tx */
+
+ 4, /* oob_pre_saw */
+ DRXJ_OOB_LO_POW_MINUS10DB, /* oob_lo_pow */
+ {
+ false /* aud_data, only first member */
+ },
+};
+
+/**
+* \var drxj_default_addr_g
+* \brief Default I2C address and device identifier.
+*/
+static struct i2c_device_addr drxj_default_addr_g = {
+ DRXJ_DEF_I2C_ADDR, /* i2c address */
+ DRXJ_DEF_DEMOD_DEV_ID /* device id */
+};
+
+/**
+* \var drxj_default_comm_attr_g
+* \brief Default common attributes of a drxj demodulator instance.
+*/
+static struct drx_common_attr drxj_default_comm_attr_g = {
+ NULL, /* ucode file */
+ true, /* ucode verify switch */
+ {0}, /* version record */
+
+ 44000, /* IF in kHz in case no tuner instance is used */
+ (151875 - 0), /* system clock frequency in kHz */
+ 0, /* oscillator frequency kHz */
+ 0, /* oscillator deviation in ppm, signed */
+ false, /* If true mirror frequency spectrum */
+ {
+ /* MPEG output configuration */
+ true, /* If true, enable MPEG ouput */
+ false, /* If true, insert RS byte */
+ false, /* If true, parallel out otherwise serial */
+ false, /* If true, invert DATA signals */
+ false, /* If true, invert ERR signal */
+ false, /* If true, invert STR signals */
+ false, /* If true, invert VAL signals */
+ false, /* If true, invert CLK signals */
+ true, /* If true, static MPEG clockrate will
+ be used, otherwise clockrate will
+ adapt to the bitrate of the TS */
+ 19392658UL, /* Maximum bitrate in b/s in case
+ static clockrate is selected */
+ DRX_MPEG_STR_WIDTH_1 /* MPEG Start width in clock cycles */
+ },
+ /* Initilisations below can be ommited, they require no user input and
+ are initialy 0, NULL or false. The compiler will initialize them to these
+ values when ommited. */
+ false, /* is_opened */
+
+ /* SCAN */
+ NULL, /* no scan params yet */
+ 0, /* current scan index */
+ 0, /* next scan frequency */
+ false, /* scan ready flag */
+ 0, /* max channels to scan */
+ 0, /* nr of channels scanned */
+ NULL, /* default scan function */
+ NULL, /* default context pointer */
+ 0, /* millisec to wait for demod lock */
+ DRXJ_DEMOD_LOCK, /* desired lock */
+ false,
+
+ /* Power management */
+ DRX_POWER_UP,
+
+ /* Tuner */
+ 1, /* nr of I2C port to wich tuner is */
+ 0L, /* minimum RF input frequency, in kHz */
+ 0L, /* maximum RF input frequency, in kHz */
+ false, /* Rf Agc Polarity */
+ false, /* If Agc Polarity */
+ false, /* tuner slow mode */
+
+ { /* current channel (all 0) */
+ 0UL /* channel.frequency */
+ },
+ DRX_STANDARD_UNKNOWN, /* current standard */
+ DRX_STANDARD_UNKNOWN, /* previous standard */
+ DRX_STANDARD_UNKNOWN, /* di_cache_standard */
+ false, /* use_bootloader */
+ 0UL, /* capabilities */
+ 0 /* mfx */
+};
+
+/**
+* \var drxj_default_demod_g
+* \brief Default drxj demodulator instance.
+*/
+static struct drx_demod_instance drxj_default_demod_g = {
+ &drxj_default_addr_g, /* i2c address & device id */
+ &drxj_default_comm_attr_g, /* demod common attributes */
+ &drxj_data_g /* demod device specific attributes */
+};
+
+/**
+* \brief Default audio data structure for DRK demodulator instance.
+*
+* This structure is DRXK specific.
+*
+*/
+static struct drx_aud_data drxj_default_aud_data_g = {
+ false, /* audio_is_active */
+ DRX_AUD_STANDARD_AUTO, /* audio_standard */
+
+ /* i2sdata */
+ {
+ false, /* output_enable */
+ 48000, /* frequency */
+ DRX_I2S_MODE_MASTER, /* mode */
+ DRX_I2S_WORDLENGTH_32, /* word_length */
+ DRX_I2S_POLARITY_RIGHT, /* polarity */
+ DRX_I2S_FORMAT_WS_WITH_DATA /* format */
+ },
+ /* volume */
+ {
+ true, /* mute; */
+ 0, /* volume */
+ DRX_AUD_AVC_OFF, /* avc_mode */
+ 0, /* avc_ref_level */
+ DRX_AUD_AVC_MAX_GAIN_12DB, /* avc_max_gain */
+ DRX_AUD_AVC_MAX_ATTEN_24DB, /* avc_max_atten */
+ 0, /* strength_left */
+ 0 /* strength_right */
+ },
+ DRX_AUD_AUTO_SOUND_SELECT_ON_CHANGE_ON, /* auto_sound */
+ /* ass_thresholds */
+ {
+ 440, /* A2 */
+ 12, /* BTSC */
+ 700, /* NICAM */
+ },
+ /* carrier */
+ {
+ /* a */
+ {
+ 42, /* thres */
+ DRX_NO_CARRIER_NOISE, /* opt */
+ 0, /* shift */
+ 0 /* dco */
+ },
+ /* b */
+ {
+ 42, /* thres */
+ DRX_NO_CARRIER_MUTE, /* opt */
+ 0, /* shift */
+ 0 /* dco */
+ },
+
+ },
+ /* mixer */
+ {
+ DRX_AUD_SRC_STEREO_OR_A, /* source_i2s */
+ DRX_AUD_I2S_MATRIX_STEREO, /* matrix_i2s */
+ DRX_AUD_FM_MATRIX_SOUND_A /* matrix_fm */
+ },
+ DRX_AUD_DEVIATION_NORMAL, /* deviation */
+ DRX_AUD_AVSYNC_OFF, /* av_sync */
+
+ /* prescale */
+ {
+ DRX_AUD_MAX_FM_DEVIATION, /* fm_deviation */
+ DRX_AUD_MAX_NICAM_PRESCALE /* nicam_gain */
+ },
+ DRX_AUD_FM_DEEMPH_75US, /* deemph */
+ DRX_BTSC_STEREO, /* btsc_detect */
+ 0, /* rds_data_counter */
+ false /* rds_data_present */
+};
+
+/*-----------------------------------------------------------------------------
+STRUCTURES
+----------------------------------------------------------------------------*/
+struct drxjeq_stat {
+ u16 eq_mse;
+ u8 eq_mode;
+ u8 eq_ctrl;
+ u8 eq_stat;
+};
+
+/* HI command */
+struct drxj_hi_cmd {
+ u16 cmd;
+ u16 param1;
+ u16 param2;
+ u16 param3;
+ u16 param4;
+ u16 param5;
+ u16 param6;
+};
+
+/*============================================================================*/
+/*=== MICROCODE RELATED STRUCTURES ===========================================*/
+/*============================================================================*/
+
+/**
+ * struct drxu_code_block_hdr - Structure of the microcode block headers
+ *
+ * @addr: Destination address of the data in this block
+ * @size: Size of the block data following this header counted in
+ * 16 bits words
+ * @CRC: CRC value of the data block, only valid if CRC flag is
+ * set.
+ */
+struct drxu_code_block_hdr {
+ u32 addr;
+ u16 size;
+ u16 flags;
+ u16 CRC;
+};
+
+/*-----------------------------------------------------------------------------
+FUNCTIONS
+----------------------------------------------------------------------------*/
+/* Some prototypes */
+static int
+hi_command(struct i2c_device_addr *dev_addr,
+ const struct drxj_hi_cmd *cmd, u16 *result);
+
+static int
+ctrl_lock_status(struct drx_demod_instance *demod, enum drx_lock_status *lock_stat);
+
+static int
+ctrl_power_mode(struct drx_demod_instance *demod, enum drx_power_mode *mode);
+
+static int power_down_aud(struct drx_demod_instance *demod);
+
+static int
+ctrl_set_cfg_pre_saw(struct drx_demod_instance *demod, struct drxj_cfg_pre_saw *pre_saw);
+
+static int
+ctrl_set_cfg_afe_gain(struct drx_demod_instance *demod, struct drxj_cfg_afe_gain *afe_gain);
+
+/*============================================================================*/
+/*============================================================================*/
+/*== HELPER FUNCTIONS ==*/
+/*============================================================================*/
+/*============================================================================*/
+
+
+/*============================================================================*/
+
+/*
+* \fn u32 frac28(u32 N, u32 D)
+* \brief Compute: (1<<28)*N/D
+* \param N 32 bits
+* \param D 32 bits
+* \return (1<<28)*N/D
+* This function is used to avoid floating-point calculations as they may
+* not be present on the target platform.
+
+* frac28 performs an unsigned 28/28 bits division to 32-bit fixed point
+* fraction used for setting the Frequency Shifter registers.
+* N and D can hold numbers up to width: 28-bits.
+* The 4 bits integer part and the 28 bits fractional part are calculated.
+
+* Usage condition: ((1<<28)*n)/d < ((1<<32)-1) => (n/d) < 15.999
+
+* N: 0...(1<<28)-1 = 268435454
+* D: 0...(1<<28)-1
+* Q: 0...(1<<32)-1
+*/
+static u32 frac28(u32 N, u32 D)
+{
+ int i = 0;
+ u32 Q1 = 0;
+ u32 R0 = 0;
+
+ R0 = (N % D) << 4; /* 32-28 == 4 shifts possible at max */
+ Q1 = N / D; /* integer part, only the 4 least significant bits
+ will be visible in the result */
+
+ /* division using radix 16, 7 nibbles in the result */
+ for (i = 0; i < 7; i++) {
+ Q1 = (Q1 << 4) | R0 / D;
+ R0 = (R0 % D) << 4;
+ }
+ /* rounding */
+ if ((R0 >> 3) >= D)
+ Q1++;
+
+ return Q1;
+}
+
+/**
+* \fn u32 log1_times100( u32 x)
+* \brief Compute: 100*log10(x)
+* \param x 32 bits
+* \return 100*log10(x)
+*
+* 100*log10(x)
+* = 100*(log2(x)/log2(10)))
+* = (100*(2^15)*log2(x))/((2^15)*log2(10))
+* = ((200*(2^15)*log2(x))/((2^15)*log2(10)))/2
+* = ((200*(2^15)*(log2(x/y)+log2(y)))/((2^15)*log2(10)))/2
+* = ((200*(2^15)*log2(x/y))+(200*(2^15)*log2(y)))/((2^15)*log2(10)))/2
+*
+* where y = 2^k and 1<= (x/y) < 2
+*/
+
+static u32 log1_times100(u32 x)
+{
+ static const u8 scale = 15;
+ static const u8 index_width = 5;
+ /*
+ log2lut[n] = (1<<scale) * 200 * log2( 1.0 + ( (1.0/(1<<INDEXWIDTH)) * n ))
+ 0 <= n < ((1<<INDEXWIDTH)+1)
+ */
+
+ static const u32 log2lut[] = {
+ 0, /* 0.000000 */
+ 290941, /* 290941.300628 */
+ 573196, /* 573196.476418 */
+ 847269, /* 847269.179851 */
+ 1113620, /* 1113620.489452 */
+ 1372674, /* 1372673.576986 */
+ 1624818, /* 1624817.752104 */
+ 1870412, /* 1870411.981536 */
+ 2109788, /* 2109787.962654 */
+ 2343253, /* 2343252.817465 */
+ 2571091, /* 2571091.461923 */
+ 2793569, /* 2793568.696416 */
+ 3010931, /* 3010931.055901 */
+ 3223408, /* 3223408.452106 */
+ 3431216, /* 3431215.635215 */
+ 3634553, /* 3634553.498355 */
+ 3833610, /* 3833610.244726 */
+ 4028562, /* 4028562.434393 */
+ 4219576, /* 4219575.925308 */
+ 4406807, /* 4406806.721144 */
+ 4590402, /* 4590401.736809 */
+ 4770499, /* 4770499.491025 */
+ 4947231, /* 4947230.734179 */
+ 5120719, /* 5120719.018555 */
+ 5291081, /* 5291081.217197 */
+ 5458428, /* 5458427.996830 */
+ 5622864, /* 5622864.249668 */
+ 5784489, /* 5784489.488298 */
+ 5943398, /* 5943398.207380 */
+ 6099680, /* 6099680.215452 */
+ 6253421, /* 6253420.939751 */
+ 6404702, /* 6404701.706649 */
+ 6553600, /* 6553600.000000 */
+ };
+
+ u8 i = 0;
+ u32 y = 0;
+ u32 d = 0;
+ u32 k = 0;
+ u32 r = 0;
+
+ if (x == 0)
+ return 0;
+
+ /* Scale x (normalize) */
+ /* computing y in log(x/y) = log(x) - log(y) */
+ if ((x & (((u32) (-1)) << (scale + 1))) == 0) {
+ for (k = scale; k > 0; k--) {
+ if (x & (((u32) 1) << scale))
+ break;
+ x <<= 1;
+ }
+ } else {
+ for (k = scale; k < 31; k++) {
+ if ((x & (((u32) (-1)) << (scale + 1))) == 0)
+ break;
+ x >>= 1;
+ }
+ }
+ /*
+ Now x has binary point between bit[scale] and bit[scale-1]
+ and 1.0 <= x < 2.0 */
+
+ /* correction for divison: log(x) = log(x/y)+log(y) */
+ y = k * ((((u32) 1) << scale) * 200);
+
+ /* remove integer part */
+ x &= ((((u32) 1) << scale) - 1);
+ /* get index */
+ i = (u8) (x >> (scale - index_width));
+ /* compute delta (x-a) */
+ d = x & ((((u32) 1) << (scale - index_width)) - 1);
+ /* compute log, multiplication ( d* (.. )) must be within range ! */
+ y += log2lut[i] +
+ ((d * (log2lut[i + 1] - log2lut[i])) >> (scale - index_width));
+ /* Conver to log10() */
+ y /= 108853; /* (log2(10) << scale) */
+ r = (y >> 1);
+ /* rounding */
+ if (y & ((u32)1))
+ r++;
+
+ return r;
+
+}
+
+/**
+* \fn u32 frac_times1e6( u16 N, u32 D)
+* \brief Compute: (N/D) * 1000000.
+* \param N nominator 16-bits.
+* \param D denominator 32-bits.
+* \return u32
+* \retval ((N/D) * 1000000), 32 bits
+*
+* No check on D=0!
+*/
+static u32 frac_times1e6(u32 N, u32 D)
+{
+ u32 remainder = 0;
+ u32 frac = 0;
+
+ /*
+ frac = (N * 1000000) / D
+ To let it fit in a 32 bits computation:
+ frac = (N * (1000000 >> 4)) / (D >> 4)
+ This would result in a problem in case D < 16 (div by 0).
+ So we do it more elaborate as shown below.
+ */
+ frac = (((u32) N) * (1000000 >> 4)) / D;
+ frac <<= 4;
+ remainder = (((u32) N) * (1000000 >> 4)) % D;
+ remainder <<= 4;
+ frac += remainder / D;
+ remainder = remainder % D;
+ if ((remainder * 2) > D)
+ frac++;
+
+ return frac;
+}
+
+/*============================================================================*/
+
+
+/**
+* \brief Values for NICAM prescaler gain. Computed from dB to integer
+* and rounded. For calc used formula: 16*10^(prescaleGain[dB]/20).
+*
+*/
+static const u16 nicam_presc_table_val[43] = {
+ 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4,
+ 5, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16,
+ 18, 20, 23, 25, 28, 32, 36, 40, 45,
+ 51, 57, 64, 71, 80, 90, 101, 113, 127
+};
+
+/*============================================================================*/
+/*== END HELPER FUNCTIONS ==*/
+/*============================================================================*/
+
+/*============================================================================*/
+/*============================================================================*/
+/*== DRXJ DAP FUNCTIONS ==*/
+/*============================================================================*/
+/*============================================================================*/
+
+/*
+ This layer takes care of some device specific register access protocols:
+ -conversion to short address format
+ -access to audio block
+ This layer is placed between the drx_dap_fasi and the rest of the drxj
+ specific implementation. This layer can use address map knowledge whereas
+ dap_fasi may not use memory map knowledge.
+
+ * For audio currently only 16 bits read and write register access is
+ supported. More is not needed. RMW and 32 or 8 bit access on audio
+ registers will have undefined behaviour. Flags (RMW, CRC reset, broadcast
+ single/multi master) will be ignored.
+
+ TODO: check ignoring single/multimaster is ok for AUD access ?
+*/
+
+#define DRXJ_ISAUDWRITE(addr) (((((addr)>>16)&1) == 1) ? true : false)
+#define DRXJ_DAP_AUDTRIF_TIMEOUT 80 /* millisec */
+/*============================================================================*/
+
+/**
+* \fn bool is_handled_by_aud_tr_if( u32 addr )
+* \brief Check if this address is handled by the audio token ring interface.
+* \param addr
+* \return bool
+* \retval true Yes, handled by audio token ring interface
+* \retval false No, not handled by audio token ring interface
+*
+*/
+static
+bool is_handled_by_aud_tr_if(u32 addr)
+{
+ bool retval = false;
+
+ if ((DRXDAP_FASI_ADDR2BLOCK(addr) == 4) &&
+ (DRXDAP_FASI_ADDR2BANK(addr) > 1) &&
+ (DRXDAP_FASI_ADDR2BANK(addr) < 6)) {
+ retval = true;
+ }
+
+ return retval;
+}
+
+/*============================================================================*/
+
+int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
+ u16 w_count,
+ u8 *wData,
+ struct i2c_device_addr *r_dev_addr,
+ u16 r_count, u8 *r_data)
+{
+ struct drx39xxj_state *state;
+ struct i2c_msg msg[2];
+ unsigned int num_msgs;
+
+ if (w_dev_addr == NULL) {
+ /* Read only */
+ state = r_dev_addr->user_data;
+ msg[0].addr = r_dev_addr->i2c_addr >> 1;
+ msg[0].flags = I2C_M_RD;
+ msg[0].buf = r_data;
+ msg[0].len = r_count;
+ num_msgs = 1;
+ } else if (r_dev_addr == NULL) {
+ /* Write only */
+ state = w_dev_addr->user_data;
+ msg[0].addr = w_dev_addr->i2c_addr >> 1;
+ msg[0].flags = 0;
+ msg[0].buf = wData;
+ msg[0].len = w_count;
+ num_msgs = 1;
+ } else {
+ /* Both write and read */
+ state = w_dev_addr->user_data;
+ msg[0].addr = w_dev_addr->i2c_addr >> 1;
+ msg[0].flags = 0;
+ msg[0].buf = wData;
+ msg[0].len = w_count;
+ msg[1].addr = r_dev_addr->i2c_addr >> 1;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = r_data;
+ msg[1].len = r_count;
+ num_msgs = 2;
+ }
+
+ if (state->i2c == NULL) {
+ pr_err("i2c was zero, aborting\n");
+ return 0;
+ }
+ if (i2c_transfer(state->i2c, msg, num_msgs) != num_msgs) {
+ pr_warn("drx3933: I2C write/read failed\n");
+ return -EREMOTEIO;
+ }
+
+#ifdef DJH_DEBUG
+ if (w_dev_addr == NULL || r_dev_addr == NULL)
+ return 0;
+
+ state = w_dev_addr->user_data;
+
+ if (state->i2c == NULL)
+ return 0;
+
+ msg[0].addr = w_dev_addr->i2c_addr;
+ msg[0].flags = 0;
+ msg[0].buf = wData;
+ msg[0].len = w_count;
+ msg[1].addr = r_dev_addr->i2c_addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = r_data;
+ msg[1].len = r_count;
+ num_msgs = 2;
+
+ pr_debug("drx3933 i2c operation addr=%x i2c=%p, wc=%x rc=%x\n",
+ w_dev_addr->i2c_addr, state->i2c, w_count, r_count);
+
+ if (i2c_transfer(state->i2c, msg, 2) != 2) {
+ pr_warn("drx3933: I2C write/read failed\n");
+ return -EREMOTEIO;
+ }
+#endif
+ return 0;
+}
+
+/*============================================================================*/
+
+/******************************
+*
+* int drxdap_fasi_read_block (
+* struct i2c_device_addr *dev_addr, -- address of I2C device
+* u32 addr, -- address of chip register/memory
+* u16 datasize, -- number of bytes to read
+* u8 *data, -- data to receive
+* u32 flags) -- special device flags
+*
+* Read block data from chip address. Because the chip is word oriented,
+* the number of bytes to read must be even.
+*
+* Make sure that the buffer to receive the data is large enough.
+*
+* Although this function expects an even number of bytes, it is still byte
+* oriented, and the data read back is NOT translated to the endianness of
+* the target platform.
+*
+* Output:
+* - 0 if reading was successful
+* in that case: data read is in *data.
+* - -EIO if anything went wrong
+*
+******************************/
+
+static int drxdap_fasi_read_block(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 datasize,
+ u8 *data, u32 flags)
+{
+ u8 buf[4];
+ u16 bufx;
+ int rc;
+ u16 overhead_size = 0;
+
+ /* Check parameters ******************************************************* */
+ if (dev_addr == NULL)
+ return -EINVAL;
+
+ overhead_size = (IS_I2C_10BIT(dev_addr->i2c_addr) ? 2 : 1) +
+ (DRXDAP_FASI_LONG_FORMAT(addr) ? 4 : 2);
+
+ if ((DRXDAP_FASI_OFFSET_TOO_LARGE(addr)) ||
+ ((!(DRXDAPFASI_LONG_ADDR_ALLOWED)) &&
+ DRXDAP_FASI_LONG_FORMAT(addr)) ||
+ (overhead_size > (DRXDAP_MAX_WCHUNKSIZE)) ||
+ ((datasize != 0) && (data == NULL)) || ((datasize & 1) == 1)) {
+ return -EINVAL;
+ }
+
+ /* ReadModifyWrite & mode flag bits are not allowed */
+ flags &= (~DRXDAP_FASI_RMW & ~DRXDAP_FASI_MODEFLAGS);
+#if DRXDAP_SINGLE_MASTER
+ flags |= DRXDAP_FASI_SINGLE_MASTER;
+#endif
+
+ /* Read block from I2C **************************************************** */
+ do {
+ u16 todo = (datasize < DRXDAP_MAX_RCHUNKSIZE ?
+ datasize : DRXDAP_MAX_RCHUNKSIZE);
+
+ bufx = 0;
+
+ addr &= ~DRXDAP_FASI_FLAGS;
+ addr |= flags;
+
+#if ((DRXDAPFASI_LONG_ADDR_ALLOWED == 1) && (DRXDAPFASI_SHORT_ADDR_ALLOWED == 1))
+ /* short format address preferred but long format otherwise */
+ if (DRXDAP_FASI_LONG_FORMAT(addr)) {
+#endif
+#if (DRXDAPFASI_LONG_ADDR_ALLOWED == 1)
+ buf[bufx++] = (u8) (((addr << 1) & 0xFF) | 0x01);
+ buf[bufx++] = (u8) ((addr >> 16) & 0xFF);
+ buf[bufx++] = (u8) ((addr >> 24) & 0xFF);
+ buf[bufx++] = (u8) ((addr >> 7) & 0xFF);
+#endif
+#if ((DRXDAPFASI_LONG_ADDR_ALLOWED == 1) && (DRXDAPFASI_SHORT_ADDR_ALLOWED == 1))
+ } else {
+#endif
+#if (DRXDAPFASI_SHORT_ADDR_ALLOWED == 1)
+ buf[bufx++] = (u8) ((addr << 1) & 0xFF);
+ buf[bufx++] =
+ (u8) (((addr >> 16) & 0x0F) |
+ ((addr >> 18) & 0xF0));
+#endif
+#if ((DRXDAPFASI_LONG_ADDR_ALLOWED == 1) && (DRXDAPFASI_SHORT_ADDR_ALLOWED == 1))
+ }
+#endif
+
+#if DRXDAP_SINGLE_MASTER
+ /*
+ * In single master mode, split the read and write actions.
+ * No special action is needed for write chunks here.
+ */
+ rc = drxbsp_i2c_write_read(dev_addr, bufx, buf,
+ NULL, 0, NULL);
+ if (rc == 0)
+ rc = drxbsp_i2c_write_read(NULL, 0, NULL, dev_addr, todo, data);
+#else
+ /* In multi master mode, do everything in one RW action */
+ rc = drxbsp_i2c_write_read(dev_addr, bufx, buf, dev_addr, todo,
+ data);
+#endif
+ data += todo;
+ addr += (todo >> 1);
+ datasize -= todo;
+ } while (datasize && rc == 0);
+
+ return rc;
+}
+
+
+/******************************
+*
+* int drxdap_fasi_read_reg16 (
+* struct i2c_device_addr *dev_addr, -- address of I2C device
+* u32 addr, -- address of chip register/memory
+* u16 *data, -- data to receive
+* u32 flags) -- special device flags
+*
+* Read one 16-bit register or memory location. The data received back is
+* converted back to the target platform's endianness.
+*
+* Output:
+* - 0 if reading was successful
+* in that case: read data is at *data
+* - -EIO if anything went wrong
+*
+******************************/
+
+static int drxdap_fasi_read_reg16(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 *data, u32 flags)
+{
+ u8 buf[sizeof(*data)];
+ int rc;
+
+ if (!data)
+ return -EINVAL;
+
+ rc = drxdap_fasi_read_block(dev_addr, addr, sizeof(*data), buf, flags);
+ *data = buf[0] + (((u16) buf[1]) << 8);
+ return rc;
+}
+
+/******************************
+*
+* int drxdap_fasi_read_reg32 (
+* struct i2c_device_addr *dev_addr, -- address of I2C device
+* u32 addr, -- address of chip register/memory
+* u32 *data, -- data to receive
+* u32 flags) -- special device flags
+*
+* Read one 32-bit register or memory location. The data received back is
+* converted back to the target platform's endianness.
+*
+* Output:
+* - 0 if reading was successful
+* in that case: read data is at *data
+* - -EIO if anything went wrong
+*
+******************************/
+
+static int drxdap_fasi_read_reg32(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u32 *data, u32 flags)
+{
+ u8 buf[sizeof(*data)];
+ int rc;
+
+ if (!data)
+ return -EINVAL;
+
+ rc = drxdap_fasi_read_block(dev_addr, addr, sizeof(*data), buf, flags);
+ *data = (((u32) buf[0]) << 0) +
+ (((u32) buf[1]) << 8) +
+ (((u32) buf[2]) << 16) + (((u32) buf[3]) << 24);
+ return rc;
+}
+
+/******************************
+*
+* int drxdap_fasi_write_block (
+* struct i2c_device_addr *dev_addr, -- address of I2C device
+* u32 addr, -- address of chip register/memory
+* u16 datasize, -- number of bytes to read
+* u8 *data, -- data to receive
+* u32 flags) -- special device flags
+*
+* Write block data to chip address. Because the chip is word oriented,
+* the number of bytes to write must be even.
+*
+* Although this function expects an even number of bytes, it is still byte
+* oriented, and the data being written is NOT translated from the endianness of
+* the target platform.
+*
+* Output:
+* - 0 if writing was successful
+* - -EIO if anything went wrong
+*
+******************************/
+
+static int drxdap_fasi_write_block(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 datasize,
+ u8 *data, u32 flags)
+{
+ u8 buf[DRXDAP_MAX_WCHUNKSIZE];
+ int st = -EIO;
+ int first_err = 0;
+ u16 overhead_size = 0;
+ u16 block_size = 0;
+
+ /* Check parameters ******************************************************* */
+ if (dev_addr == NULL)
+ return -EINVAL;
+
+ overhead_size = (IS_I2C_10BIT(dev_addr->i2c_addr) ? 2 : 1) +
+ (DRXDAP_FASI_LONG_FORMAT(addr) ? 4 : 2);
+
+ if ((DRXDAP_FASI_OFFSET_TOO_LARGE(addr)) ||
+ ((!(DRXDAPFASI_LONG_ADDR_ALLOWED)) &&
+ DRXDAP_FASI_LONG_FORMAT(addr)) ||
+ (overhead_size > (DRXDAP_MAX_WCHUNKSIZE)) ||
+ ((datasize != 0) && (data == NULL)) || ((datasize & 1) == 1))
+ return -EINVAL;
+
+ flags &= DRXDAP_FASI_FLAGS;
+ flags &= ~DRXDAP_FASI_MODEFLAGS;
+#if DRXDAP_SINGLE_MASTER
+ flags |= DRXDAP_FASI_SINGLE_MASTER;
+#endif
+
+ /* Write block to I2C ***************************************************** */
+ block_size = ((DRXDAP_MAX_WCHUNKSIZE) - overhead_size) & ~1;
+ do {
+ u16 todo = 0;
+ u16 bufx = 0;
+
+ /* Buffer device address */
+ addr &= ~DRXDAP_FASI_FLAGS;
+ addr |= flags;
+#if (((DRXDAPFASI_LONG_ADDR_ALLOWED) == 1) && ((DRXDAPFASI_SHORT_ADDR_ALLOWED) == 1))
+ /* short format address preferred but long format otherwise */
+ if (DRXDAP_FASI_LONG_FORMAT(addr)) {
+#endif
+#if ((DRXDAPFASI_LONG_ADDR_ALLOWED) == 1)
+ buf[bufx++] = (u8) (((addr << 1) & 0xFF) | 0x01);
+ buf[bufx++] = (u8) ((addr >> 16) & 0xFF);
+ buf[bufx++] = (u8) ((addr >> 24) & 0xFF);
+ buf[bufx++] = (u8) ((addr >> 7) & 0xFF);
+#endif
+#if (((DRXDAPFASI_LONG_ADDR_ALLOWED) == 1) && ((DRXDAPFASI_SHORT_ADDR_ALLOWED) == 1))
+ } else {
+#endif
+#if ((DRXDAPFASI_SHORT_ADDR_ALLOWED) == 1)
+ buf[bufx++] = (u8) ((addr << 1) & 0xFF);
+ buf[bufx++] =
+ (u8) (((addr >> 16) & 0x0F) |
+ ((addr >> 18) & 0xF0));
+#endif
+#if (((DRXDAPFASI_LONG_ADDR_ALLOWED) == 1) && ((DRXDAPFASI_SHORT_ADDR_ALLOWED) == 1))
+ }
+#endif
+
+ /*
+ In single master mode block_size can be 0. In such a case this I2C
+ sequense will be visible: (1) write address {i2c addr,
+ 4 bytes chip address} (2) write data {i2c addr, 4 bytes data }
+ (3) write address (4) write data etc...
+ Addres must be rewriten because HI is reset after data transport and
+ expects an address.
+ */
+ todo = (block_size < datasize ? block_size : datasize);
+ if (todo == 0) {
+ u16 overhead_size_i2c_addr = 0;
+ u16 data_block_size = 0;
+
+ overhead_size_i2c_addr =
+ (IS_I2C_10BIT(dev_addr->i2c_addr) ? 2 : 1);
+ data_block_size =
+ (DRXDAP_MAX_WCHUNKSIZE - overhead_size_i2c_addr) & ~1;
+
+ /* write device address */
+ st = drxbsp_i2c_write_read(dev_addr,
+ (u16) (bufx),
+ buf,
+ (struct i2c_device_addr *)(NULL),
+ 0, (u8 *)(NULL));
+
+ if ((st != 0) && (first_err == 0)) {
+ /* at the end, return the first error encountered */
+ first_err = st;
+ }
+ bufx = 0;
+ todo =
+ (data_block_size <
+ datasize ? data_block_size : datasize);
+ }
+ memcpy(&buf[bufx], data, todo);
+ /* write (address if can do and) data */
+ st = drxbsp_i2c_write_read(dev_addr,
+ (u16) (bufx + todo),
+ buf,
+ (struct i2c_device_addr *)(NULL),
+ 0, (u8 *)(NULL));
+
+ if ((st != 0) && (first_err == 0)) {
+ /* at the end, return the first error encountered */
+ first_err = st;
+ }
+ datasize -= todo;
+ data += todo;
+ addr += (todo >> 1);
+ } while (datasize);
+
+ return first_err;
+}
+
+/******************************
+*
+* int drxdap_fasi_write_reg16 (
+* struct i2c_device_addr *dev_addr, -- address of I2C device
+* u32 addr, -- address of chip register/memory
+* u16 data, -- data to send
+* u32 flags) -- special device flags
+*
+* Write one 16-bit register or memory location. The data being written is
+* converted from the target platform's endianness to little endian.
+*
+* Output:
+* - 0 if writing was successful
+* - -EIO if anything went wrong
+*
+******************************/
+
+static int drxdap_fasi_write_reg16(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 data, u32 flags)
+{
+ u8 buf[sizeof(data)];
+
+ buf[0] = (u8) ((data >> 0) & 0xFF);
+ buf[1] = (u8) ((data >> 8) & 0xFF);
+
+ return drxdap_fasi_write_block(dev_addr, addr, sizeof(data), buf, flags);
+}
+
+/******************************
+*
+* int drxdap_fasi_read_modify_write_reg16 (
+* struct i2c_device_addr *dev_addr, -- address of I2C device
+* u32 waddr, -- address of chip register/memory
+* u32 raddr, -- chip address to read back from
+* u16 wdata, -- data to send
+* u16 *rdata) -- data to receive back
+*
+* Write 16-bit data, then read back the original contents of that location.
+* Requires long addressing format to be allowed.
+*
+* Before sending data, the data is converted to little endian. The
+* data received back is converted back to the target platform's endianness.
+*
+* WARNING: This function is only guaranteed to work if there is one
+* master on the I2C bus.
+*
+* Output:
+* - 0 if reading was successful
+* in that case: read back data is at *rdata
+* - -EIO if anything went wrong
+*
+******************************/
+
+static int drxdap_fasi_read_modify_write_reg16(struct i2c_device_addr *dev_addr,
+ u32 waddr,
+ u32 raddr,
+ u16 wdata, u16 *rdata)
+{
+ int rc = -EIO;
+
+#if (DRXDAPFASI_LONG_ADDR_ALLOWED == 1)
+ if (rdata == NULL)
+ return -EINVAL;
+
+ rc = drxdap_fasi_write_reg16(dev_addr, waddr, wdata, DRXDAP_FASI_RMW);
+ if (rc == 0)
+ rc = drxdap_fasi_read_reg16(dev_addr, raddr, rdata, 0);
+#endif
+
+ return rc;
+}
+
+/******************************
+*
+* int drxdap_fasi_write_reg32 (
+* struct i2c_device_addr *dev_addr, -- address of I2C device
+* u32 addr, -- address of chip register/memory
+* u32 data, -- data to send
+* u32 flags) -- special device flags
+*
+* Write one 32-bit register or memory location. The data being written is
+* converted from the target platform's endianness to little endian.
+*
+* Output:
+* - 0 if writing was successful
+* - -EIO if anything went wrong
+*
+******************************/
+
+static int drxdap_fasi_write_reg32(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u32 data, u32 flags)
+{
+ u8 buf[sizeof(data)];
+
+ buf[0] = (u8) ((data >> 0) & 0xFF);
+ buf[1] = (u8) ((data >> 8) & 0xFF);
+ buf[2] = (u8) ((data >> 16) & 0xFF);
+ buf[3] = (u8) ((data >> 24) & 0xFF);
+
+ return drxdap_fasi_write_block(dev_addr, addr, sizeof(data), buf, flags);
+}
+
+/*============================================================================*/
+
+/**
+* \fn int drxj_dap_rm_write_reg16short
+* \brief Read modify write 16 bits audio register using short format only.
+* \param dev_addr
+* \param waddr Address to write to
+* \param raddr Address to read from (usually SIO_HI_RA_RAM_S0_RMWBUF__A)
+* \param wdata Data to write
+* \param rdata Buffer for data to read
+* \return int
+* \retval 0 Succes
+* \retval -EIO Timeout, I2C error, illegal bank
+*
+* 16 bits register read modify write access using short addressing format only.
+* Requires knowledge of the registermap, thus device dependent.
+* Using DAP FASI directly to avoid endless recursion of RMWs to audio registers.
+*
+*/
+
+/* TODO correct define should be #if ( DRXDAPFASI_SHORT_ADDR_ALLOWED==1 )
+ See comments drxj_dap_read_modify_write_reg16 */
+#if (DRXDAPFASI_LONG_ADDR_ALLOWED == 0)
+static int drxj_dap_rm_write_reg16short(struct i2c_device_addr *dev_addr,
+ u32 waddr,
+ u32 raddr,
+ u16 wdata, u16 *rdata)
+{
+ int rc;
+
+ if (rdata == NULL)
+ return -EINVAL;
+
+ /* Set RMW flag */
+ rc = drxdap_fasi_write_reg16(dev_addr,
+ SIO_HI_RA_RAM_S0_FLG_ACC__A,
+ SIO_HI_RA_RAM_S0_FLG_ACC_S0_RWM__M,
+ 0x0000);
+ if (rc == 0) {
+ /* Write new data: triggers RMW */
+ rc = drxdap_fasi_write_reg16(dev_addr, waddr, wdata,
+ 0x0000);
+ }
+ if (rc == 0) {
+ /* Read old data */
+ rc = drxdap_fasi_read_reg16(dev_addr, raddr, rdata,
+ 0x0000);
+ }
+ if (rc == 0) {
+ /* Reset RMW flag */
+ rc = drxdap_fasi_write_reg16(dev_addr,
+ SIO_HI_RA_RAM_S0_FLG_ACC__A,
+ 0, 0x0000);
+ }
+
+ return rc;
+}
+#endif
+
+/*============================================================================*/
+
+static int drxj_dap_read_modify_write_reg16(struct i2c_device_addr *dev_addr,
+ u32 waddr,
+ u32 raddr,
+ u16 wdata, u16 *rdata)
+{
+ /* TODO: correct short/long addressing format decision,
+ now long format has higher prio then short because short also
+ needs virt bnks (not impl yet) for certain audio registers */
+#if (DRXDAPFASI_LONG_ADDR_ALLOWED == 1)
+ return drxdap_fasi_read_modify_write_reg16(dev_addr,
+ waddr,
+ raddr, wdata, rdata);
+#else
+ return drxj_dap_rm_write_reg16short(dev_addr, waddr, raddr, wdata, rdata);
+#endif
+}
+
+
+/*============================================================================*/
+
+/**
+* \fn int drxj_dap_read_aud_reg16
+* \brief Read 16 bits audio register
+* \param dev_addr
+* \param addr
+* \param data
+* \return int
+* \retval 0 Succes
+* \retval -EIO Timeout, I2C error, illegal bank
+*
+* 16 bits register read access via audio token ring interface.
+*
+*/
+static int drxj_dap_read_aud_reg16(struct i2c_device_addr *dev_addr,
+ u32 addr, u16 *data)
+{
+ u32 start_timer = 0;
+ u32 current_timer = 0;
+ u32 delta_timer = 0;
+ u16 tr_status = 0;
+ int stat = -EIO;
+
+ /* No read possible for bank 3, return with error */
+ if (DRXDAP_FASI_ADDR2BANK(addr) == 3) {
+ stat = -EINVAL;
+ } else {
+ const u32 write_bit = ((dr_xaddr_t) 1) << 16;
+
+ /* Force reset write bit */
+ addr &= (~write_bit);
+
+ /* Set up read */
+ start_timer = jiffies_to_msecs(jiffies);
+ do {
+ /* RMW to aud TR IF until request is granted or timeout */
+ stat = drxj_dap_read_modify_write_reg16(dev_addr,
+ addr,
+ SIO_HI_RA_RAM_S0_RMWBUF__A,
+ 0x0000, &tr_status);
+
+ if (stat != 0)
+ break;
+
+ current_timer = jiffies_to_msecs(jiffies);
+ delta_timer = current_timer - start_timer;
+ if (delta_timer > DRXJ_DAP_AUDTRIF_TIMEOUT) {
+ stat = -EIO;
+ break;
+ }
+
+ } while (((tr_status & AUD_TOP_TR_CTR_FIFO_LOCK__M) ==
+ AUD_TOP_TR_CTR_FIFO_LOCK_LOCKED) ||
+ ((tr_status & AUD_TOP_TR_CTR_FIFO_FULL__M) ==
+ AUD_TOP_TR_CTR_FIFO_FULL_FULL));
+ } /* if ( DRXDAP_FASI_ADDR2BANK(addr)!=3 ) */
+
+ /* Wait for read ready status or timeout */
+ if (stat == 0) {
+ start_timer = jiffies_to_msecs(jiffies);
+
+ while ((tr_status & AUD_TOP_TR_CTR_FIFO_RD_RDY__M) !=
+ AUD_TOP_TR_CTR_FIFO_RD_RDY_READY) {
+ stat = drxj_dap_read_reg16(dev_addr,
+ AUD_TOP_TR_CTR__A,
+ &tr_status, 0x0000);
+ if (stat != 0)
+ break;
+
+ current_timer = jiffies_to_msecs(jiffies);
+ delta_timer = current_timer - start_timer;
+ if (delta_timer > DRXJ_DAP_AUDTRIF_TIMEOUT) {
+ stat = -EIO;
+ break;
+ }
+ } /* while ( ... ) */
+ }
+
+ /* Read value */
+ if (stat == 0)
+ stat = drxj_dap_read_modify_write_reg16(dev_addr,
+ AUD_TOP_TR_RD_REG__A,
+ SIO_HI_RA_RAM_S0_RMWBUF__A,
+ 0x0000, data);
+ return stat;
+}
+
+/*============================================================================*/
+
+static int drxj_dap_read_reg16(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 *data, u32 flags)
+{
+ int stat = -EIO;
+
+ /* Check param */
+ if ((dev_addr == NULL) || (data == NULL))
+ return -EINVAL;
+
+ if (is_handled_by_aud_tr_if(addr))
+ stat = drxj_dap_read_aud_reg16(dev_addr, addr, data);
+ else
+ stat = drxdap_fasi_read_reg16(dev_addr, addr, data, flags);
+
+ return stat;
+}
+/*============================================================================*/
+
+/**
+* \fn int drxj_dap_write_aud_reg16
+* \brief Write 16 bits audio register
+* \param dev_addr
+* \param addr
+* \param data
+* \return int
+* \retval 0 Succes
+* \retval -EIO Timeout, I2C error, illegal bank
+*
+* 16 bits register write access via audio token ring interface.
+*
+*/
+static int drxj_dap_write_aud_reg16(struct i2c_device_addr *dev_addr,
+ u32 addr, u16 data)
+{
+ int stat = -EIO;
+
+ /* No write possible for bank 2, return with error */
+ if (DRXDAP_FASI_ADDR2BANK(addr) == 2) {
+ stat = -EINVAL;
+ } else {
+ u32 start_timer = 0;
+ u32 current_timer = 0;
+ u32 delta_timer = 0;
+ u16 tr_status = 0;
+ const u32 write_bit = ((dr_xaddr_t) 1) << 16;
+
+ /* Force write bit */
+ addr |= write_bit;
+ start_timer = jiffies_to_msecs(jiffies);
+ do {
+ /* RMW to aud TR IF until request is granted or timeout */
+ stat = drxj_dap_read_modify_write_reg16(dev_addr,
+ addr,
+ SIO_HI_RA_RAM_S0_RMWBUF__A,
+ data, &tr_status);
+ if (stat != 0)
+ break;
+
+ current_timer = jiffies_to_msecs(jiffies);
+ delta_timer = current_timer - start_timer;
+ if (delta_timer > DRXJ_DAP_AUDTRIF_TIMEOUT) {
+ stat = -EIO;
+ break;
+ }
+
+ } while (((tr_status & AUD_TOP_TR_CTR_FIFO_LOCK__M) ==
+ AUD_TOP_TR_CTR_FIFO_LOCK_LOCKED) ||
+ ((tr_status & AUD_TOP_TR_CTR_FIFO_FULL__M) ==
+ AUD_TOP_TR_CTR_FIFO_FULL_FULL));
+
+ } /* if ( DRXDAP_FASI_ADDR2BANK(addr)!=2 ) */
+
+ return stat;
+}
+
+/*============================================================================*/
+
+static int drxj_dap_write_reg16(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 data, u32 flags)
+{
+ int stat = -EIO;
+
+ /* Check param */
+ if (dev_addr == NULL)
+ return -EINVAL;
+
+ if (is_handled_by_aud_tr_if(addr))
+ stat = drxj_dap_write_aud_reg16(dev_addr, addr, data);
+ else
+ stat = drxdap_fasi_write_reg16(dev_addr,
+ addr, data, flags);
+
+ return stat;
+}
+
+/*============================================================================*/
+
+/* Free data ram in SIO HI */
+#define SIO_HI_RA_RAM_USR_BEGIN__A 0x420040
+#define SIO_HI_RA_RAM_USR_END__A 0x420060
+
+#define DRXJ_HI_ATOMIC_BUF_START (SIO_HI_RA_RAM_USR_BEGIN__A)
+#define DRXJ_HI_ATOMIC_BUF_END (SIO_HI_RA_RAM_USR_BEGIN__A + 7)
+#define DRXJ_HI_ATOMIC_READ SIO_HI_RA_RAM_PAR_3_ACP_RW_READ
+#define DRXJ_HI_ATOMIC_WRITE SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE
+
+/**
+* \fn int drxj_dap_atomic_read_write_block()
+* \brief Basic access routine for atomic read or write access
+* \param dev_addr pointer to i2c dev address
+* \param addr destination/source address
+* \param datasize size of data buffer in bytes
+* \param data pointer to data buffer
+* \return int
+* \retval 0 Succes
+* \retval -EIO Timeout, I2C error, illegal bank
+*
+*/
+static
+int drxj_dap_atomic_read_write_block(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 datasize,
+ u8 *data, bool read_flag)
+{
+ struct drxj_hi_cmd hi_cmd;
+ int rc;
+ u16 word;
+ u16 dummy = 0;
+ u16 i = 0;
+
+ /* Parameter check */
+ if (!data || !dev_addr || ((datasize % 2)) || ((datasize / 2) > 8))
+ return -EINVAL;
+
+ /* Set up HI parameters to read or write n bytes */
+ hi_cmd.cmd = SIO_HI_RA_RAM_CMD_ATOMIC_COPY;
+ hi_cmd.param1 =
+ (u16) ((DRXDAP_FASI_ADDR2BLOCK(DRXJ_HI_ATOMIC_BUF_START) << 6) +
+ DRXDAP_FASI_ADDR2BANK(DRXJ_HI_ATOMIC_BUF_START));
+ hi_cmd.param2 =
+ (u16) DRXDAP_FASI_ADDR2OFFSET(DRXJ_HI_ATOMIC_BUF_START);
+ hi_cmd.param3 = (u16) ((datasize / 2) - 1);
+ if (!read_flag)
+ hi_cmd.param3 |= DRXJ_HI_ATOMIC_WRITE;
+ else
+ hi_cmd.param3 |= DRXJ_HI_ATOMIC_READ;
+ hi_cmd.param4 = (u16) ((DRXDAP_FASI_ADDR2BLOCK(addr) << 6) +
+ DRXDAP_FASI_ADDR2BANK(addr));
+ hi_cmd.param5 = (u16) DRXDAP_FASI_ADDR2OFFSET(addr);
+
+ if (!read_flag) {
+ /* write data to buffer */
+ for (i = 0; i < (datasize / 2); i++) {
+
+ word = ((u16) data[2 * i]);
+ word += (((u16) data[(2 * i) + 1]) << 8);
+ drxj_dap_write_reg16(dev_addr,
+ (DRXJ_HI_ATOMIC_BUF_START + i),
+ word, 0);
+ }
+ }
+
+ rc = hi_command(dev_addr, &hi_cmd, &dummy);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ if (read_flag) {
+ /* read data from buffer */
+ for (i = 0; i < (datasize / 2); i++) {
+ drxj_dap_read_reg16(dev_addr,
+ (DRXJ_HI_ATOMIC_BUF_START + i),
+ &word, 0);
+ data[2 * i] = (u8) (word & 0xFF);
+ data[(2 * i) + 1] = (u8) (word >> 8);
+ }
+ }
+
+ return 0;
+
+rw_error:
+ return -EIO;
+
+}
+
+/*============================================================================*/
+
+/**
+* \fn int drxj_dap_atomic_read_reg32()
+* \brief Atomic read of 32 bits words
+*/
+static
+int drxj_dap_atomic_read_reg32(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u32 *data, u32 flags)
+{
+ u8 buf[sizeof(*data)];
+ int rc = -EIO;
+ u32 word = 0;
+
+ if (!data)
+ return -EINVAL;
+
+ rc = drxj_dap_atomic_read_write_block(dev_addr, addr,
+ sizeof(*data), buf, true);
+
+ if (rc < 0)
+ return 0;
+
+ word = (u32) buf[3];
+ word <<= 8;
+ word |= (u32) buf[2];
+ word <<= 8;
+ word |= (u32) buf[1];
+ word <<= 8;
+ word |= (u32) buf[0];
+
+ *data = word;
+
+ return rc;
+}
+
+/*============================================================================*/
+
+/*============================================================================*/
+/*== END DRXJ DAP FUNCTIONS ==*/
+/*============================================================================*/
+
+/*============================================================================*/
+/*============================================================================*/
+/*== HOST INTERFACE FUNCTIONS ==*/
+/*============================================================================*/
+/*============================================================================*/
+
+/**
+* \fn int hi_cfg_command()
+* \brief Configure HI with settings stored in the demod structure.
+* \param demod Demodulator.
+* \return int.
+*
+* This routine was created because to much orthogonal settings have
+* been put into one HI API function (configure). Especially the I2C bridge
+* enable/disable should not need re-configuration of the HI.
+*
+*/
+static int hi_cfg_command(const struct drx_demod_instance *demod)
+{
+ struct drxj_data *ext_attr = (struct drxj_data *) (NULL);
+ struct drxj_hi_cmd hi_cmd;
+ u16 result = 0;
+ int rc;
+
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ hi_cmd.cmd = SIO_HI_RA_RAM_CMD_CONFIG;
+ hi_cmd.param1 = SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY;
+ hi_cmd.param2 = ext_attr->hi_cfg_timing_div;
+ hi_cmd.param3 = ext_attr->hi_cfg_bridge_delay;
+ hi_cmd.param4 = ext_attr->hi_cfg_wake_up_key;
+ hi_cmd.param5 = ext_attr->hi_cfg_ctrl;
+ hi_cmd.param6 = ext_attr->hi_cfg_transmit;
+
+ rc = hi_command(demod->my_i2c_dev_addr, &hi_cmd, &result);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Reset power down flag (set one call only) */
+ ext_attr->hi_cfg_ctrl &= (~(SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ));
+
+ return 0;
+
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int hi_command()
+* \brief Configure HI with settings stored in the demod structure.
+* \param dev_addr I2C address.
+* \param cmd HI command.
+* \param result HI command result.
+* \return int.
+*
+* Sends command to HI
+*
+*/
+static int
+hi_command(struct i2c_device_addr *dev_addr, const struct drxj_hi_cmd *cmd, u16 *result)
+{
+ u16 wait_cmd = 0;
+ u16 nr_retries = 0;
+ bool powerdown_cmd = false;
+ int rc;
+
+ /* Write parameters */
+ switch (cmd->cmd) {
+
+ case SIO_HI_RA_RAM_CMD_CONFIG:
+ case SIO_HI_RA_RAM_CMD_ATOMIC_COPY:
+ rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_6__A, cmd->param6, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_5__A, cmd->param5, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_4__A, cmd->param4, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_3__A, cmd->param3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* fallthrough */
+ case SIO_HI_RA_RAM_CMD_BRDCTRL:
+ rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_2__A, cmd->param2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_1__A, cmd->param1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* fallthrough */
+ case SIO_HI_RA_RAM_CMD_NULL:
+ /* No parameters */
+ break;
+
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ /* Write command */
+ rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_CMD__A, cmd->cmd, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ if ((cmd->cmd) == SIO_HI_RA_RAM_CMD_RESET)
+ msleep(1);
+
+ /* Detect power down to ommit reading result */
+ powerdown_cmd = (bool) ((cmd->cmd == SIO_HI_RA_RAM_CMD_CONFIG) &&
+ (((cmd->
+ param5) & SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M)
+ == SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ));
+ if (!powerdown_cmd) {
+ /* Wait until command rdy */
+ do {
+ nr_retries++;
+ if (nr_retries > DRXJ_MAX_RETRIES) {
+ pr_err("timeout\n");
+ goto rw_error;
+ }
+
+ rc = drxj_dap_read_reg16(dev_addr, SIO_HI_RA_RAM_CMD__A, &wait_cmd, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ } while (wait_cmd != 0);
+
+ /* Read result */
+ rc = drxj_dap_read_reg16(dev_addr, SIO_HI_RA_RAM_RES__A, result, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ }
+ /* if ( powerdown_cmd == true ) */
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int init_hi( const struct drx_demod_instance *demod )
+* \brief Initialise and configurate HI.
+* \param demod pointer to demod data.
+* \return int Return status.
+* \retval 0 Success.
+* \retval -EIO Failure.
+*
+* Needs to know Psys (System Clock period) and Posc (Osc Clock period)
+* Need to store configuration in driver because of the way I2C
+* bridging is controlled.
+*
+*/
+static int init_hi(const struct drx_demod_instance *demod)
+{
+ struct drxj_data *ext_attr = (struct drxj_data *) (NULL);
+ struct drx_common_attr *common_attr = (struct drx_common_attr *) (NULL);
+ struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL);
+ int rc;
+
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+ common_attr = (struct drx_common_attr *) demod->my_common_attr;
+ dev_addr = demod->my_i2c_dev_addr;
+
+ /* PATCH for bug 5003, HI ucode v3.1.0 */
+ rc = drxj_dap_write_reg16(dev_addr, 0x4301D7, 0x801, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Timing div, 250ns/Psys */
+ /* Timing div, = ( delay (nano seconds) * sysclk (kHz) )/ 1000 */
+ ext_attr->hi_cfg_timing_div =
+ (u16) ((common_attr->sys_clock_freq / 1000) * HI_I2C_DELAY) / 1000;
+ /* Clipping */
+ if ((ext_attr->hi_cfg_timing_div) > SIO_HI_RA_RAM_PAR_2_CFG_DIV__M)
+ ext_attr->hi_cfg_timing_div = SIO_HI_RA_RAM_PAR_2_CFG_DIV__M;
+ /* Bridge delay, uses oscilator clock */
+ /* Delay = ( delay (nano seconds) * oscclk (kHz) )/ 1000 */
+ /* SDA brdige delay */
+ ext_attr->hi_cfg_bridge_delay =
+ (u16) ((common_attr->osc_clock_freq / 1000) * HI_I2C_BRIDGE_DELAY) /
+ 1000;
+ /* Clipping */
+ if ((ext_attr->hi_cfg_bridge_delay) > SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M)
+ ext_attr->hi_cfg_bridge_delay = SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M;
+ /* SCL bridge delay, same as SDA for now */
+ ext_attr->hi_cfg_bridge_delay += ((ext_attr->hi_cfg_bridge_delay) <<
+ SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__B);
+ /* Wakeup key, setting the read flag (as suggest in the documentation) does
+ not always result into a working solution (barebones worked VI2C failed).
+ Not setting the bit works in all cases . */
+ ext_attr->hi_cfg_wake_up_key = DRXJ_WAKE_UP_KEY;
+ /* port/bridge/power down ctrl */
+ ext_attr->hi_cfg_ctrl = (SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE);
+ /* transit mode time out delay and watch dog divider */
+ ext_attr->hi_cfg_transmit = SIO_HI_RA_RAM_PAR_6__PRE;
+
+ rc = hi_cfg_command(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+/*== END HOST INTERFACE FUNCTIONS ==*/
+/*============================================================================*/
+
+/*============================================================================*/
+/*============================================================================*/
+/*== AUXILIARY FUNCTIONS ==*/
+/*============================================================================*/
+/*============================================================================*/
+
+/**
+* \fn int get_device_capabilities()
+* \brief Get and store device capabilities.
+* \param demod Pointer to demodulator instance.
+* \return int.
+* \return 0 Success
+* \retval -EIO Failure
+*
+* Depending on pulldowns on MDx pins the following internals are set:
+* * common_attr->osc_clock_freq
+* * ext_attr->has_lna
+* * ext_attr->has_ntsc
+* * ext_attr->has_btsc
+* * ext_attr->has_oob
+*
+*/
+static int get_device_capabilities(struct drx_demod_instance *demod)
+{
+ struct drx_common_attr *common_attr = (struct drx_common_attr *) (NULL);
+ struct drxj_data *ext_attr = (struct drxj_data *) NULL;
+ struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL);
+ u16 sio_pdr_ohw_cfg = 0;
+ u32 sio_top_jtagid_lo = 0;
+ u16 bid = 0;
+ int rc;
+
+ common_attr = (struct drx_common_attr *) demod->my_common_attr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+ dev_addr = demod->my_i2c_dev_addr;
+
+ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_read_reg16(dev_addr, SIO_PDR_OHW_CFG__A, &sio_pdr_ohw_cfg, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ switch ((sio_pdr_ohw_cfg & SIO_PDR_OHW_CFG_FREF_SEL__M)) {
+ case 0:
+ /* ignore (bypass ?) */
+ break;
+ case 1:
+ /* 27 MHz */
+ common_attr->osc_clock_freq = 27000;
+ break;
+ case 2:
+ /* 20.25 MHz */
+ common_attr->osc_clock_freq = 20250;
+ break;
+ case 3:
+ /* 4 MHz */
+ common_attr->osc_clock_freq = 4000;
+ break;
+ default:
+ return -EIO;
+ }
+
+ /*
+ Determine device capabilities
+ Based on pinning v47
+ */
+ rc = drxdap_fasi_read_reg32(dev_addr, SIO_TOP_JTAGID_LO__A, &sio_top_jtagid_lo, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ ext_attr->mfx = (u8) ((sio_top_jtagid_lo >> 29) & 0xF);
+
+ switch ((sio_top_jtagid_lo >> 12) & 0xFF) {
+ case 0x31:
+ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_read_reg16(dev_addr, SIO_PDR_UIO_IN_HI__A, &bid, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ bid = (bid >> 10) & 0xf;
+ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ ext_attr->has_lna = true;
+ ext_attr->has_ntsc = false;
+ ext_attr->has_btsc = false;
+ ext_attr->has_oob = false;
+ ext_attr->has_smatx = true;
+ ext_attr->has_smarx = false;
+ ext_attr->has_gpio = false;
+ ext_attr->has_irqn = false;
+ break;
+ case 0x33:
+ ext_attr->has_lna = false;
+ ext_attr->has_ntsc = false;
+ ext_attr->has_btsc = false;
+ ext_attr->has_oob = false;
+ ext_attr->has_smatx = true;
+ ext_attr->has_smarx = false;
+ ext_attr->has_gpio = false;
+ ext_attr->has_irqn = false;
+ break;
+ case 0x45:
+ ext_attr->has_lna = true;
+ ext_attr->has_ntsc = true;
+ ext_attr->has_btsc = false;
+ ext_attr->has_oob = false;
+ ext_attr->has_smatx = true;
+ ext_attr->has_smarx = true;
+ ext_attr->has_gpio = true;
+ ext_attr->has_irqn = false;
+ break;
+ case 0x46:
+ ext_attr->has_lna = false;
+ ext_attr->has_ntsc = true;
+ ext_attr->has_btsc = false;
+ ext_attr->has_oob = false;
+ ext_attr->has_smatx = true;
+ ext_attr->has_smarx = true;
+ ext_attr->has_gpio = true;
+ ext_attr->has_irqn = false;
+ break;
+ case 0x41:
+ ext_attr->has_lna = true;
+ ext_attr->has_ntsc = true;
+ ext_attr->has_btsc = true;
+ ext_attr->has_oob = false;
+ ext_attr->has_smatx = true;
+ ext_attr->has_smarx = true;
+ ext_attr->has_gpio = true;
+ ext_attr->has_irqn = false;
+ break;
+ case 0x43:
+ ext_attr->has_lna = false;
+ ext_attr->has_ntsc = true;
+ ext_attr->has_btsc = true;
+ ext_attr->has_oob = false;
+ ext_attr->has_smatx = true;
+ ext_attr->has_smarx = true;
+ ext_attr->has_gpio = true;
+ ext_attr->has_irqn = false;
+ break;
+ case 0x32:
+ ext_attr->has_lna = true;
+ ext_attr->has_ntsc = false;
+ ext_attr->has_btsc = false;
+ ext_attr->has_oob = true;
+ ext_attr->has_smatx = true;
+ ext_attr->has_smarx = true;
+ ext_attr->has_gpio = true;
+ ext_attr->has_irqn = true;
+ break;
+ case 0x34:
+ ext_attr->has_lna = false;
+ ext_attr->has_ntsc = true;
+ ext_attr->has_btsc = true;
+ ext_attr->has_oob = true;
+ ext_attr->has_smatx = true;
+ ext_attr->has_smarx = true;
+ ext_attr->has_gpio = true;
+ ext_attr->has_irqn = true;
+ break;
+ case 0x42:
+ ext_attr->has_lna = true;
+ ext_attr->has_ntsc = true;
+ ext_attr->has_btsc = true;
+ ext_attr->has_oob = true;
+ ext_attr->has_smatx = true;
+ ext_attr->has_smarx = true;
+ ext_attr->has_gpio = true;
+ ext_attr->has_irqn = true;
+ break;
+ case 0x44:
+ ext_attr->has_lna = false;
+ ext_attr->has_ntsc = true;
+ ext_attr->has_btsc = true;
+ ext_attr->has_oob = true;
+ ext_attr->has_smatx = true;
+ ext_attr->has_smarx = true;
+ ext_attr->has_gpio = true;
+ ext_attr->has_irqn = true;
+ break;
+ default:
+ /* Unknown device variant */
+ return -EIO;
+ break;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int power_up_device()
+* \brief Power up device.
+* \param demod Pointer to demodulator instance.
+* \return int.
+* \return 0 Success
+* \retval -EIO Failure, I2C or max retries reached
+*
+*/
+
+#ifndef DRXJ_MAX_RETRIES_POWERUP
+#define DRXJ_MAX_RETRIES_POWERUP 10
+#endif
+
+static int power_up_device(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL);
+ u8 data = 0;
+ u16 retry_count = 0;
+ struct i2c_device_addr wake_up_addr;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ wake_up_addr.i2c_addr = DRXJ_WAKE_UP_KEY;
+ wake_up_addr.i2c_dev_id = dev_addr->i2c_dev_id;
+ wake_up_addr.user_data = dev_addr->user_data;
+ /*
+ * I2C access may fail in this case: no ack
+ * dummy write must be used to wake uop device, dummy read must be used to
+ * reset HI state machine (avoiding actual writes)
+ */
+ do {
+ data = 0;
+ drxbsp_i2c_write_read(&wake_up_addr, 1, &data,
+ (struct i2c_device_addr *)(NULL), 0,
+ (u8 *)(NULL));
+ msleep(10);
+ retry_count++;
+ } while ((drxbsp_i2c_write_read
+ ((struct i2c_device_addr *) (NULL), 0, (u8 *)(NULL), dev_addr, 1,
+ &data)
+ != 0) && (retry_count < DRXJ_MAX_RETRIES_POWERUP));
+
+ /* Need some recovery time .... */
+ msleep(10);
+
+ if (retry_count == DRXJ_MAX_RETRIES_POWERUP)
+ return -EIO;
+
+ return 0;
+}
+
+/*----------------------------------------------------------------------------*/
+/* MPEG Output Configuration Functions - begin */
+/*----------------------------------------------------------------------------*/
+/**
+* \fn int ctrl_set_cfg_mpeg_output()
+* \brief Set MPEG output configuration of the device.
+* \param devmod Pointer to demodulator instance.
+* \param cfg_data Pointer to mpeg output configuaration.
+* \return int.
+*
+* Configure MPEG output parameters.
+*
+*/
+static int
+ctrl_set_cfg_mpeg_output(struct drx_demod_instance *demod, struct drx_cfg_mpeg_output *cfg_data)
+{
+ struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL);
+ struct drxj_data *ext_attr = (struct drxj_data *) (NULL);
+ struct drx_common_attr *common_attr = (struct drx_common_attr *) (NULL);
+ int rc;
+ u16 fec_oc_reg_mode = 0;
+ u16 fec_oc_reg_ipr_mode = 0;
+ u16 fec_oc_reg_ipr_invert = 0;
+ u32 max_bit_rate = 0;
+ u32 rcn_rate = 0;
+ u32 nr_bits = 0;
+ u16 sio_pdr_md_cfg = 0;
+ /* data mask for the output data byte */
+ u16 invert_data_mask =
+ FEC_OC_IPR_INVERT_MD7__M | FEC_OC_IPR_INVERT_MD6__M |
+ FEC_OC_IPR_INVERT_MD5__M | FEC_OC_IPR_INVERT_MD4__M |
+ FEC_OC_IPR_INVERT_MD3__M | FEC_OC_IPR_INVERT_MD2__M |
+ FEC_OC_IPR_INVERT_MD1__M | FEC_OC_IPR_INVERT_MD0__M;
+
+ /* check arguments */
+ if ((demod == NULL) || (cfg_data == NULL))
+ return -EINVAL;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+ common_attr = (struct drx_common_attr *) demod->my_common_attr;
+
+ if (cfg_data->enable_mpeg_output == true) {
+ /* quick and dirty patch to set MPEG incase current std is not
+ producing MPEG */
+ switch (ext_attr->standard) {
+ case DRX_STANDARD_8VSB:
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
+ case DRX_STANDARD_ITU_C:
+ break;
+ default:
+ return 0;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_OCR_INVERT__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ switch (ext_attr->standard) {
+ case DRX_STANDARD_8VSB:
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_USAGE__A, 7, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* 2048 bytes fifo ram */
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_TMD_CTL_UPD_RATE__A, 10, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_TMD_INT_UPD_RATE__A, 10, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_AVR_PARM_A__A, 5, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_AVR_PARM_B__A, 7, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_RCN_GAIN__A, 10, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* Low Water Mark for synchronization */
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_LWM__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* High Water Mark for synchronization */
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_HWM__A, 5, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_C:
+ switch (ext_attr->constellation) {
+ case DRX_CONSTELLATION_QAM256:
+ nr_bits = 8;
+ break;
+ case DRX_CONSTELLATION_QAM128:
+ nr_bits = 7;
+ break;
+ case DRX_CONSTELLATION_QAM64:
+ nr_bits = 6;
+ break;
+ case DRX_CONSTELLATION_QAM32:
+ nr_bits = 5;
+ break;
+ case DRX_CONSTELLATION_QAM16:
+ nr_bits = 4;
+ break;
+ default:
+ return -EIO;
+ } /* ext_attr->constellation */
+ /* max_bit_rate = symbol_rate * nr_bits * coef */
+ /* coef = 188/204 */
+ max_bit_rate =
+ (ext_attr->curr_symbol_rate / 8) * nr_bits * 188;
+ /* pass through b/c Annex A/c need following settings */
+ case DRX_STANDARD_ITU_B:
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_USAGE__A, FEC_OC_FCT_USAGE__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_TMD_CTL_UPD_RATE__A, FEC_OC_TMD_CTL_UPD_RATE__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_TMD_INT_UPD_RATE__A, 5, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_AVR_PARM_A__A, FEC_OC_AVR_PARM_A__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_AVR_PARM_B__A, FEC_OC_AVR_PARM_B__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (cfg_data->static_clk == true) {
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_RCN_GAIN__A, 0xD, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ } else {
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_RCN_GAIN__A, FEC_OC_RCN_GAIN__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_LWM__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_HWM__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ default:
+ break;
+ } /* swtich (standard) */
+
+ /* Check insertion of the Reed-Solomon parity bytes */
+ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_MODE__A, &fec_oc_reg_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_IPR_MODE__A, &fec_oc_reg_ipr_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (cfg_data->insert_rs_byte == true) {
+ /* enable parity symbol forward */
+ fec_oc_reg_mode |= FEC_OC_MODE_PARITY__M;
+ /* MVAL disable during parity bytes */
+ fec_oc_reg_ipr_mode |= FEC_OC_IPR_MODE_MVAL_DIS_PAR__M;
+ switch (ext_attr->standard) {
+ case DRX_STANDARD_8VSB:
+ rcn_rate = 0x004854D3;
+ break;
+ case DRX_STANDARD_ITU_B:
+ fec_oc_reg_mode |= FEC_OC_MODE_TRANSPARENT__M;
+ switch (ext_attr->constellation) {
+ case DRX_CONSTELLATION_QAM256:
+ rcn_rate = 0x008945E7;
+ break;
+ case DRX_CONSTELLATION_QAM64:
+ rcn_rate = 0x005F64D4;
+ break;
+ default:
+ return -EIO;
+ }
+ break;
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_C:
+ /* insert_rs_byte = true -> coef = 188/188 -> 1, RS bits are in MPEG output */
+ rcn_rate =
+ (frac28
+ (max_bit_rate,
+ (u32) (common_attr->sys_clock_freq / 8))) /
+ 188;
+ break;
+ default:
+ return -EIO;
+ } /* ext_attr->standard */
+ } else { /* insert_rs_byte == false */
+
+ /* disable parity symbol forward */
+ fec_oc_reg_mode &= (~FEC_OC_MODE_PARITY__M);
+ /* MVAL enable during parity bytes */
+ fec_oc_reg_ipr_mode &= (~FEC_OC_IPR_MODE_MVAL_DIS_PAR__M);
+ switch (ext_attr->standard) {
+ case DRX_STANDARD_8VSB:
+ rcn_rate = 0x0041605C;
+ break;
+ case DRX_STANDARD_ITU_B:
+ fec_oc_reg_mode &= (~FEC_OC_MODE_TRANSPARENT__M);
+ switch (ext_attr->constellation) {
+ case DRX_CONSTELLATION_QAM256:
+ rcn_rate = 0x0082D6A0;
+ break;
+ case DRX_CONSTELLATION_QAM64:
+ rcn_rate = 0x005AEC1A;
+ break;
+ default:
+ return -EIO;
+ }
+ break;
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_C:
+ /* insert_rs_byte = false -> coef = 188/204, RS bits not in MPEG output */
+ rcn_rate =
+ (frac28
+ (max_bit_rate,
+ (u32) (common_attr->sys_clock_freq / 8))) /
+ 204;
+ break;
+ default:
+ return -EIO;
+ } /* ext_attr->standard */
+ }
+
+ if (cfg_data->enable_parallel == true) { /* MPEG data output is paralel -> clear ipr_mode[0] */
+ fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M));
+ } else { /* MPEG data output is serial -> set ipr_mode[0] */
+ fec_oc_reg_ipr_mode |= FEC_OC_IPR_MODE_SERIAL__M;
+ }
+
+ /* Control slective inversion of output bits */
+ if (cfg_data->invert_data == true)
+ fec_oc_reg_ipr_invert |= invert_data_mask;
+ else
+ fec_oc_reg_ipr_invert &= (~(invert_data_mask));
+
+ if (cfg_data->invert_err == true)
+ fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MERR__M;
+ else
+ fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MERR__M));
+
+ if (cfg_data->invert_str == true)
+ fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MSTRT__M;
+ else
+ fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MSTRT__M));
+
+ if (cfg_data->invert_val == true)
+ fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MVAL__M;
+ else
+ fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MVAL__M));
+
+ if (cfg_data->invert_clk == true)
+ fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MCLK__M;
+ else
+ fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MCLK__M));
+
+
+ if (cfg_data->static_clk == true) { /* Static mode */
+ u32 dto_rate = 0;
+ u32 bit_rate = 0;
+ u16 fec_oc_dto_burst_len = 0;
+ u16 fec_oc_dto_period = 0;
+
+ fec_oc_dto_burst_len = FEC_OC_DTO_BURST_LEN__PRE;
+
+ switch (ext_attr->standard) {
+ case DRX_STANDARD_8VSB:
+ fec_oc_dto_period = 4;
+ if (cfg_data->insert_rs_byte == true)
+ fec_oc_dto_burst_len = 208;
+ break;
+ case DRX_STANDARD_ITU_A:
+ {
+ u32 symbol_rate_th = 6400000;
+ if (cfg_data->insert_rs_byte == true) {
+ fec_oc_dto_burst_len = 204;
+ symbol_rate_th = 5900000;
+ }
+ if (ext_attr->curr_symbol_rate >=
+ symbol_rate_th) {
+ fec_oc_dto_period = 0;
+ } else {
+ fec_oc_dto_period = 1;
+ }
+ }
+ break;
+ case DRX_STANDARD_ITU_B:
+ fec_oc_dto_period = 1;
+ if (cfg_data->insert_rs_byte == true)
+ fec_oc_dto_burst_len = 128;
+ break;
+ case DRX_STANDARD_ITU_C:
+ fec_oc_dto_period = 1;
+ if (cfg_data->insert_rs_byte == true)
+ fec_oc_dto_burst_len = 204;
+ break;
+ default:
+ return -EIO;
+ }
+ bit_rate =
+ common_attr->sys_clock_freq * 1000 / (fec_oc_dto_period +
+ 2);
+ dto_rate =
+ frac28(bit_rate, common_attr->sys_clock_freq * 1000);
+ dto_rate >>= 3;
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DTO_RATE_HI__A, (u16)((dto_rate >> 16) & FEC_OC_DTO_RATE_HI__M), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DTO_RATE_LO__A, (u16)(dto_rate & FEC_OC_DTO_RATE_LO_RATE_LO__M), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DTO_MODE__A, FEC_OC_DTO_MODE_DYNAMIC__M | FEC_OC_DTO_MODE_OFFSET_ENABLE__M, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_MODE__A, FEC_OC_FCT_MODE_RAT_ENA__M | FEC_OC_FCT_MODE_VIRT_ENA__M, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DTO_BURST_LEN__A, fec_oc_dto_burst_len, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (ext_attr->mpeg_output_clock_rate != DRXJ_MPEGOUTPUT_CLOCK_RATE_AUTO)
+ fec_oc_dto_period = ext_attr->mpeg_output_clock_rate - 1;
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DTO_PERIOD__A, fec_oc_dto_period, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ } else { /* Dynamic mode */
+
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DTO_MODE__A, FEC_OC_DTO_MODE_DYNAMIC__M, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_MODE__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ rc = drxdap_fasi_write_reg32(dev_addr, FEC_OC_RCN_CTL_RATE_LO__A, rcn_rate, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Write appropriate registers with requested configuration */
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_MODE__A, fec_oc_reg_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_IPR_MODE__A, fec_oc_reg_ipr_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_IPR_INVERT__A, fec_oc_reg_ipr_invert, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* enabling for both parallel and serial now */
+ /* Write magic word to enable pdr reg write */
+ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, 0xFABA, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* Set MPEG TS pads to outputmode */
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MSTRT_CFG__A, 0x0013, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MERR_CFG__A, 0x0013, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MCLK_CFG__A, MPEG_OUTPUT_CLK_DRIVE_STRENGTH << SIO_PDR_MCLK_CFG_DRIVE__B | 0x03 << SIO_PDR_MCLK_CFG_MODE__B, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MVAL_CFG__A, 0x0013, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ sio_pdr_md_cfg =
+ MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH <<
+ SIO_PDR_MD0_CFG_DRIVE__B | 0x03 << SIO_PDR_MD0_CFG_MODE__B;
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD0_CFG__A, sio_pdr_md_cfg, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (cfg_data->enable_parallel == true) { /* MPEG data output is paralel -> set MD1 to MD7 to output mode */
+ sio_pdr_md_cfg =
+ MPEG_PARALLEL_OUTPUT_PIN_DRIVE_STRENGTH <<
+ SIO_PDR_MD0_CFG_DRIVE__B | 0x03 <<
+ SIO_PDR_MD0_CFG_MODE__B;
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD0_CFG__A, sio_pdr_md_cfg, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD1_CFG__A, sio_pdr_md_cfg, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD2_CFG__A, sio_pdr_md_cfg, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD3_CFG__A, sio_pdr_md_cfg, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD4_CFG__A, sio_pdr_md_cfg, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD5_CFG__A, sio_pdr_md_cfg, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD6_CFG__A, sio_pdr_md_cfg, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD7_CFG__A, sio_pdr_md_cfg, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ } else { /* MPEG data output is serial -> set MD1 to MD7 to tri-state */
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD1_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD2_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD3_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD4_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD5_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD6_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD7_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+ /* Enable Monitor Bus output over MPEG pads and ctl input */
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MON_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* Write nomagic word to enable pdr reg write */
+ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ } else {
+ /* Write magic word to enable pdr reg write */
+ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, 0xFABA, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* Set MPEG TS pads to inputmode */
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MSTRT_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MERR_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MCLK_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MVAL_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD0_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD1_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD2_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD3_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD4_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD5_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD6_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD7_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* Enable Monitor Bus output over MPEG pads and ctl input */
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MON_CFG__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* Write nomagic word to enable pdr reg write */
+ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ /* save values for restore after re-acquire */
+ common_attr->mpeg_cfg.enable_mpeg_output = cfg_data->enable_mpeg_output;
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*----------------------------------------------------------------------------*/
+
+
+/*----------------------------------------------------------------------------*/
+/* MPEG Output Configuration Functions - end */
+/*----------------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------------*/
+/* miscellaneous configuartions - begin */
+/*----------------------------------------------------------------------------*/
+
+/**
+* \fn int set_mpegtei_handling()
+* \brief Activate MPEG TEI handling settings.
+* \param devmod Pointer to demodulator instance.
+* \return int.
+*
+* This routine should be called during a set channel of QAM/VSB
+*
+*/
+static int set_mpegtei_handling(struct drx_demod_instance *demod)
+{
+ struct drxj_data *ext_attr = (struct drxj_data *) (NULL);
+ struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL);
+ int rc;
+ u16 fec_oc_dpr_mode = 0;
+ u16 fec_oc_snc_mode = 0;
+ u16 fec_oc_ems_mode = 0;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_DPR_MODE__A, &fec_oc_dpr_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_EMS_MODE__A, &fec_oc_ems_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* reset to default, allow TEI bit to be changed */
+ fec_oc_dpr_mode &= (~FEC_OC_DPR_MODE_ERR_DISABLE__M);
+ fec_oc_snc_mode &= (~(FEC_OC_SNC_MODE_ERROR_CTL__M |
+ FEC_OC_SNC_MODE_CORR_DISABLE__M));
+ fec_oc_ems_mode &= (~FEC_OC_EMS_MODE_MODE__M);
+
+ if (ext_attr->disable_te_ihandling) {
+ /* do not change TEI bit */
+ fec_oc_dpr_mode |= FEC_OC_DPR_MODE_ERR_DISABLE__M;
+ fec_oc_snc_mode |= FEC_OC_SNC_MODE_CORR_DISABLE__M |
+ ((0x2) << (FEC_OC_SNC_MODE_ERROR_CTL__B));
+ fec_oc_ems_mode |= ((0x01) << (FEC_OC_EMS_MODE_MODE__B));
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DPR_MODE__A, fec_oc_dpr_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_MODE__A, fec_oc_snc_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_EMS_MODE__A, fec_oc_ems_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+* \fn int bit_reverse_mpeg_output()
+* \brief Set MPEG output bit-endian settings.
+* \param devmod Pointer to demodulator instance.
+* \return int.
+*
+* This routine should be called during a set channel of QAM/VSB
+*
+*/
+static int bit_reverse_mpeg_output(struct drx_demod_instance *demod)
+{
+ struct drxj_data *ext_attr = (struct drxj_data *) (NULL);
+ struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL);
+ int rc;
+ u16 fec_oc_ipr_mode = 0;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_IPR_MODE__A, &fec_oc_ipr_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* reset to default (normal bit order) */
+ fec_oc_ipr_mode &= (~FEC_OC_IPR_MODE_REVERSE_ORDER__M);
+
+ if (ext_attr->bit_reverse_mpeg_outout)
+ fec_oc_ipr_mode |= FEC_OC_IPR_MODE_REVERSE_ORDER__M;
+
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_IPR_MODE__A, fec_oc_ipr_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+* \fn int set_mpeg_start_width()
+* \brief Set MPEG start width.
+* \param devmod Pointer to demodulator instance.
+* \return int.
+*
+* This routine should be called during a set channel of QAM/VSB
+*
+*/
+static int set_mpeg_start_width(struct drx_demod_instance *demod)
+{
+ struct drxj_data *ext_attr = (struct drxj_data *) (NULL);
+ struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL);
+ struct drx_common_attr *common_attr = (struct drx_common_attr *) NULL;
+ int rc;
+ u16 fec_oc_comm_mb = 0;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+ common_attr = demod->my_common_attr;
+
+ if ((common_attr->mpeg_cfg.static_clk == true)
+ && (common_attr->mpeg_cfg.enable_parallel == false)) {
+ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_COMM_MB__A, &fec_oc_comm_mb, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ fec_oc_comm_mb &= ~FEC_OC_COMM_MB_CTL_ON;
+ if (ext_attr->mpeg_start_width == DRXJ_MPEG_START_WIDTH_8CLKCYC)
+ fec_oc_comm_mb |= FEC_OC_COMM_MB_CTL_ON;
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_COMM_MB__A, fec_oc_comm_mb, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*----------------------------------------------------------------------------*/
+/* miscellaneous configuartions - end */
+/*----------------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------------*/
+/* UIO Configuration Functions - begin */
+/*----------------------------------------------------------------------------*/
+/**
+* \fn int ctrl_set_uio_cfg()
+* \brief Configure modus oprandi UIO.
+* \param demod Pointer to demodulator instance.
+* \param uio_cfg Pointer to a configuration setting for a certain UIO.
+* \return int.
+*/
+static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg *uio_cfg)
+{
+ struct drxj_data *ext_attr = (struct drxj_data *) (NULL);
+ int rc;
+
+ if ((uio_cfg == NULL) || (demod == NULL))
+ return -EINVAL;
+
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ /* Write magic word to enable pdr reg write */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ switch (uio_cfg->uio) {
+ /*====================================================================*/
+ case DRX_UIO1:
+ /* DRX_UIO1: SMA_TX UIO-1 */
+ if (!ext_attr->has_smatx)
+ return -EIO;
+ switch (uio_cfg->mode) {
+ case DRX_UIO_MODE_FIRMWARE_SMA: /* falltrough */
+ case DRX_UIO_MODE_FIRMWARE_SAW: /* falltrough */
+ case DRX_UIO_MODE_READWRITE:
+ ext_attr->uio_sma_tx_mode = uio_cfg->mode;
+ break;
+ case DRX_UIO_MODE_DISABLE:
+ ext_attr->uio_sma_tx_mode = uio_cfg->mode;
+ /* pad configuration register is set 0 - input mode */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_SMA_TX_CFG__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ default:
+ return -EINVAL;
+ } /* switch ( uio_cfg->mode ) */
+ break;
+ /*====================================================================*/
+ case DRX_UIO2:
+ /* DRX_UIO2: SMA_RX UIO-2 */
+ if (!ext_attr->has_smarx)
+ return -EIO;
+ switch (uio_cfg->mode) {
+ case DRX_UIO_MODE_FIRMWARE0: /* falltrough */
+ case DRX_UIO_MODE_READWRITE:
+ ext_attr->uio_sma_rx_mode = uio_cfg->mode;
+ break;
+ case DRX_UIO_MODE_DISABLE:
+ ext_attr->uio_sma_rx_mode = uio_cfg->mode;
+ /* pad configuration register is set 0 - input mode */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_SMA_RX_CFG__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ default:
+ return -EINVAL;
+ break;
+ } /* switch ( uio_cfg->mode ) */
+ break;
+ /*====================================================================*/
+ case DRX_UIO3:
+ /* DRX_UIO3: GPIO UIO-3 */
+ if (!ext_attr->has_gpio)
+ return -EIO;
+ switch (uio_cfg->mode) {
+ case DRX_UIO_MODE_FIRMWARE0: /* falltrough */
+ case DRX_UIO_MODE_READWRITE:
+ ext_attr->uio_gpio_mode = uio_cfg->mode;
+ break;
+ case DRX_UIO_MODE_DISABLE:
+ ext_attr->uio_gpio_mode = uio_cfg->mode;
+ /* pad configuration register is set 0 - input mode */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_GPIO_CFG__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ default:
+ return -EINVAL;
+ break;
+ } /* switch ( uio_cfg->mode ) */
+ break;
+ /*====================================================================*/
+ case DRX_UIO4:
+ /* DRX_UIO4: IRQN UIO-4 */
+ if (!ext_attr->has_irqn)
+ return -EIO;
+ switch (uio_cfg->mode) {
+ case DRX_UIO_MODE_READWRITE:
+ ext_attr->uio_irqn_mode = uio_cfg->mode;
+ break;
+ case DRX_UIO_MODE_DISABLE:
+ /* pad configuration register is set 0 - input mode */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_IRQN_CFG__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ ext_attr->uio_irqn_mode = uio_cfg->mode;
+ break;
+ case DRX_UIO_MODE_FIRMWARE0: /* falltrough */
+ default:
+ return -EINVAL;
+ break;
+ } /* switch ( uio_cfg->mode ) */
+ break;
+ /*====================================================================*/
+ default:
+ return -EINVAL;
+ } /* switch ( uio_cfg->uio ) */
+
+ /* Write magic word to disable pdr reg write */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_TOP_COMM_KEY__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int ctrl_uio_write()
+* \brief Write to a UIO.
+* \param demod Pointer to demodulator instance.
+* \param uio_data Pointer to data container for a certain UIO.
+* \return int.
+*/
+static int
+ctrl_uio_write(struct drx_demod_instance *demod, struct drxuio_data *uio_data)
+{
+ struct drxj_data *ext_attr = (struct drxj_data *) (NULL);
+ int rc;
+ u16 pin_cfg_value = 0;
+ u16 value = 0;
+
+ if ((uio_data == NULL) || (demod == NULL))
+ return -EINVAL;
+
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ /* Write magic word to enable pdr reg write */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ switch (uio_data->uio) {
+ /*====================================================================*/
+ case DRX_UIO1:
+ /* DRX_UIO1: SMA_TX UIO-1 */
+ if (!ext_attr->has_smatx)
+ return -EIO;
+ if ((ext_attr->uio_sma_tx_mode != DRX_UIO_MODE_READWRITE)
+ && (ext_attr->uio_sma_tx_mode != DRX_UIO_MODE_FIRMWARE_SAW)) {
+ return -EIO;
+ }
+ pin_cfg_value = 0;
+ /* io_pad_cfg register (8 bit reg.) MSB bit is 1 (default value) */
+ pin_cfg_value |= 0x0113;
+ /* io_pad_cfg_mode output mode is drive always */
+ /* io_pad_cfg_drive is set to power 2 (23 mA) */
+
+ /* write to io pad configuration register - output mode */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_SMA_TX_CFG__A, pin_cfg_value, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* use corresponding bit in io data output registar */
+ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_LO__A, &value, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (!uio_data->value)
+ value &= 0x7FFF; /* write zero to 15th bit - 1st UIO */
+ else
+ value |= 0x8000; /* write one to 15th bit - 1st UIO */
+
+ /* write back to io data output register */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_LO__A, value, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ /*======================================================================*/
+ case DRX_UIO2:
+ /* DRX_UIO2: SMA_RX UIO-2 */
+ if (!ext_attr->has_smarx)
+ return -EIO;
+ if (ext_attr->uio_sma_rx_mode != DRX_UIO_MODE_READWRITE)
+ return -EIO;
+
+ pin_cfg_value = 0;
+ /* io_pad_cfg register (8 bit reg.) MSB bit is 1 (default value) */
+ pin_cfg_value |= 0x0113;
+ /* io_pad_cfg_mode output mode is drive always */
+ /* io_pad_cfg_drive is set to power 2 (23 mA) */
+
+ /* write to io pad configuration register - output mode */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_SMA_RX_CFG__A, pin_cfg_value, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* use corresponding bit in io data output registar */
+ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_LO__A, &value, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (!uio_data->value)
+ value &= 0xBFFF; /* write zero to 14th bit - 2nd UIO */
+ else
+ value |= 0x4000; /* write one to 14th bit - 2nd UIO */
+
+ /* write back to io data output register */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_LO__A, value, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ /*====================================================================*/
+ case DRX_UIO3:
+ /* DRX_UIO3: ASEL UIO-3 */
+ if (!ext_attr->has_gpio)
+ return -EIO;
+ if (ext_attr->uio_gpio_mode != DRX_UIO_MODE_READWRITE)
+ return -EIO;
+
+ pin_cfg_value = 0;
+ /* io_pad_cfg register (8 bit reg.) MSB bit is 1 (default value) */
+ pin_cfg_value |= 0x0113;
+ /* io_pad_cfg_mode output mode is drive always */
+ /* io_pad_cfg_drive is set to power 2 (23 mA) */
+
+ /* write to io pad configuration register - output mode */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_GPIO_CFG__A, pin_cfg_value, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* use corresponding bit in io data output registar */
+ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_HI__A, &value, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (!uio_data->value)
+ value &= 0xFFFB; /* write zero to 2nd bit - 3rd UIO */
+ else
+ value |= 0x0004; /* write one to 2nd bit - 3rd UIO */
+
+ /* write back to io data output register */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_HI__A, value, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ /*=====================================================================*/
+ case DRX_UIO4:
+ /* DRX_UIO4: IRQN UIO-4 */
+ if (!ext_attr->has_irqn)
+ return -EIO;
+
+ if (ext_attr->uio_irqn_mode != DRX_UIO_MODE_READWRITE)
+ return -EIO;
+
+ pin_cfg_value = 0;
+ /* io_pad_cfg register (8 bit reg.) MSB bit is 1 (default value) */
+ pin_cfg_value |= 0x0113;
+ /* io_pad_cfg_mode output mode is drive always */
+ /* io_pad_cfg_drive is set to power 2 (23 mA) */
+
+ /* write to io pad configuration register - output mode */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_IRQN_CFG__A, pin_cfg_value, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* use corresponding bit in io data output registar */
+ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_LO__A, &value, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (uio_data->value == false)
+ value &= 0xEFFF; /* write zero to 12th bit - 4th UIO */
+ else
+ value |= 0x1000; /* write one to 12th bit - 4th UIO */
+
+ /* write back to io data output register */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_LO__A, value, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ /*=====================================================================*/
+ default:
+ return -EINVAL;
+ } /* switch ( uio_data->uio ) */
+
+ /* Write magic word to disable pdr reg write */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_TOP_COMM_KEY__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*---------------------------------------------------------------------------*/
+/* UIO Configuration Functions - end */
+/*---------------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------------*/
+/* I2C Bridge Functions - begin */
+/*----------------------------------------------------------------------------*/
+/**
+* \fn int ctrl_i2c_bridge()
+* \brief Open or close the I2C switch to tuner.
+* \param demod Pointer to demodulator instance.
+* \param bridge_closed Pointer to bool indication if bridge is closed not.
+* \return int.
+
+*/
+static int
+ctrl_i2c_bridge(struct drx_demod_instance *demod, bool *bridge_closed)
+{
+ struct drxj_hi_cmd hi_cmd;
+ u16 result = 0;
+
+ /* check arguments */
+ if (bridge_closed == NULL)
+ return -EINVAL;
+
+ hi_cmd.cmd = SIO_HI_RA_RAM_CMD_BRDCTRL;
+ hi_cmd.param1 = SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY;
+ if (*bridge_closed)
+ hi_cmd.param2 = SIO_HI_RA_RAM_PAR_2_BRD_CFG_CLOSED;
+ else
+ hi_cmd.param2 = SIO_HI_RA_RAM_PAR_2_BRD_CFG_OPEN;
+
+ return hi_command(demod->my_i2c_dev_addr, &hi_cmd, &result);
+}
+
+/*----------------------------------------------------------------------------*/
+/* I2C Bridge Functions - end */
+/*----------------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------------*/
+/* Smart antenna Functions - begin */
+/*----------------------------------------------------------------------------*/
+/**
+* \fn int smart_ant_init()
+* \brief Initialize Smart Antenna.
+* \param pointer to struct drx_demod_instance.
+* \return int.
+*
+*/
+static int smart_ant_init(struct drx_demod_instance *demod)
+{
+ struct drxj_data *ext_attr = NULL;
+ struct i2c_device_addr *dev_addr = NULL;
+ struct drxuio_cfg uio_cfg = { DRX_UIO1, DRX_UIO_MODE_FIRMWARE_SMA };
+ int rc;
+ u16 data = 0;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ /* Write magic word to enable pdr reg write */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* init smart antenna */
+ rc = drxj_dap_read_reg16(dev_addr, SIO_SA_TX_COMMAND__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (ext_attr->smart_ant_inverted) {
+ rc = drxj_dap_write_reg16(dev_addr, SIO_SA_TX_COMMAND__A, (data | SIO_SA_TX_COMMAND_TX_INVERT__M) | SIO_SA_TX_COMMAND_TX_ENABLE__M, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ } else {
+ rc = drxj_dap_write_reg16(dev_addr, SIO_SA_TX_COMMAND__A, (data & (~SIO_SA_TX_COMMAND_TX_INVERT__M)) | SIO_SA_TX_COMMAND_TX_ENABLE__M, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ /* config SMA_TX pin to smart antenna mode */
+ rc = ctrl_set_uio_cfg(demod, &uio_cfg);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_SMA_TX_CFG__A, 0x13, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_SMA_TX_GPIO_FNC__A, 0x03, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Write magic word to disable pdr reg write */
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_TOP_COMM_KEY__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+static int scu_command(struct i2c_device_addr *dev_addr, struct drxjscu_cmd *cmd)
+{
+ int rc;
+ u16 cur_cmd = 0;
+ unsigned long timeout;
+
+ /* Check param */
+ if (cmd == NULL)
+ return -EINVAL;
+
+ /* Wait until SCU command interface is ready to receive command */
+ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_COMMAND__A, &cur_cmd, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (cur_cmd != DRX_SCU_READY)
+ return -EIO;
+
+ switch (cmd->parameter_len) {
+ case 5:
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_4__A, *(cmd->parameter + 4), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* fallthrough */
+ case 4:
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_3__A, *(cmd->parameter + 3), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* fallthrough */
+ case 3:
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_2__A, *(cmd->parameter + 2), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* fallthrough */
+ case 2:
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_1__A, *(cmd->parameter + 1), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* fallthrough */
+ case 1:
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_0__A, *(cmd->parameter + 0), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* fallthrough */
+ case 0:
+ /* do nothing */
+ break;
+ default:
+ /* this number of parameters is not supported */
+ return -EIO;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_COMMAND__A, cmd->command, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Wait until SCU has processed command */
+ timeout = jiffies + msecs_to_jiffies(DRXJ_MAX_WAITTIME);
+ while (time_is_after_jiffies(timeout)) {
+ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_COMMAND__A, &cur_cmd, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (cur_cmd == DRX_SCU_READY)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ if (cur_cmd != DRX_SCU_READY)
+ return -EIO;
+
+ /* read results */
+ if ((cmd->result_len > 0) && (cmd->result != NULL)) {
+ s16 err;
+
+ switch (cmd->result_len) {
+ case 4:
+ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_3__A, cmd->result + 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* fallthrough */
+ case 3:
+ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_2__A, cmd->result + 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* fallthrough */
+ case 2:
+ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_1__A, cmd->result + 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* fallthrough */
+ case 1:
+ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_0__A, cmd->result + 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* fallthrough */
+ case 0:
+ /* do nothing */
+ break;
+ default:
+ /* this number of parameters is not supported */
+ return -EIO;
+ }
+
+ /* Check if an error was reported by SCU */
+ err = cmd->result[0];
+
+ /* check a few fixed error codes */
+ if ((err == (s16) SCU_RAM_PARAM_0_RESULT_UNKSTD)
+ || (err == (s16) SCU_RAM_PARAM_0_RESULT_UNKCMD)
+ || (err == (s16) SCU_RAM_PARAM_0_RESULT_INVPAR)
+ || (err == (s16) SCU_RAM_PARAM_0_RESULT_SIZE)
+ ) {
+ return -EINVAL;
+ }
+ /* here it is assumed that negative means error, and positive no error */
+ else if (err < 0)
+ return -EIO;
+ else
+ return 0;
+ }
+
+ return 0;
+
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int DRXJ_DAP_SCUAtomicReadWriteBlock()
+* \brief Basic access routine for SCU atomic read or write access
+* \param dev_addr pointer to i2c dev address
+* \param addr destination/source address
+* \param datasize size of data buffer in bytes
+* \param data pointer to data buffer
+* \return int
+* \retval 0 Succes
+* \retval -EIO Timeout, I2C error, illegal bank
+*
+*/
+#define ADDR_AT_SCU_SPACE(x) ((x - 0x82E000) * 2)
+static
+int drxj_dap_scu_atomic_read_write_block(struct i2c_device_addr *dev_addr, u32 addr, u16 datasize, /* max 30 bytes because the limit of SCU parameter */
+ u8 *data, bool read_flag)
+{
+ struct drxjscu_cmd scu_cmd;
+ int rc;
+ u16 set_param_parameters[15];
+ u16 cmd_result[15];
+
+ /* Parameter check */
+ if (!data || !dev_addr || (datasize % 2) || ((datasize / 2) > 16))
+ return -EINVAL;
+
+ set_param_parameters[1] = (u16) ADDR_AT_SCU_SPACE(addr);
+ if (read_flag) { /* read */
+ set_param_parameters[0] = ((~(0x0080)) & datasize);
+ scu_cmd.parameter_len = 2;
+ scu_cmd.result_len = datasize / 2 + 2;
+ } else {
+ int i = 0;
+
+ set_param_parameters[0] = 0x0080 | datasize;
+ for (i = 0; i < (datasize / 2); i++) {
+ set_param_parameters[i + 2] =
+ (data[2 * i] | (data[(2 * i) + 1] << 8));
+ }
+ scu_cmd.parameter_len = datasize / 2 + 2;
+ scu_cmd.result_len = 1;
+ }
+
+ scu_cmd.command =
+ SCU_RAM_COMMAND_STANDARD_TOP |
+ SCU_RAM_COMMAND_CMD_AUX_SCU_ATOMIC_ACCESS;
+ scu_cmd.result = cmd_result;
+ scu_cmd.parameter = set_param_parameters;
+ rc = scu_command(dev_addr, &scu_cmd);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ if (read_flag) {
+ int i = 0;
+ /* read data from buffer */
+ for (i = 0; i < (datasize / 2); i++) {
+ data[2 * i] = (u8) (scu_cmd.result[i + 2] & 0xFF);
+ data[(2 * i) + 1] = (u8) (scu_cmd.result[i + 2] >> 8);
+ }
+ }
+
+ return 0;
+
+rw_error:
+ return -EIO;
+
+}
+
+/*============================================================================*/
+
+/**
+* \fn int DRXJ_DAP_AtomicReadReg16()
+* \brief Atomic read of 16 bits words
+*/
+static
+int drxj_dap_scu_atomic_read_reg16(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 *data, u32 flags)
+{
+ u8 buf[2];
+ int rc = -EIO;
+ u16 word = 0;
+
+ if (!data)
+ return -EINVAL;
+
+ rc = drxj_dap_scu_atomic_read_write_block(dev_addr, addr, 2, buf, true);
+ if (rc < 0)
+ return rc;
+
+ word = (u16) (buf[0] + (buf[1] << 8));
+
+ *data = word;
+
+ return rc;
+}
+
+/*============================================================================*/
+/**
+* \fn int drxj_dap_scu_atomic_write_reg16()
+* \brief Atomic read of 16 bits words
+*/
+static
+int drxj_dap_scu_atomic_write_reg16(struct i2c_device_addr *dev_addr,
+ u32 addr,
+ u16 data, u32 flags)
+{
+ u8 buf[2];
+ int rc = -EIO;
+
+ buf[0] = (u8) (data & 0xff);
+ buf[1] = (u8) ((data >> 8) & 0xff);
+
+ rc = drxj_dap_scu_atomic_read_write_block(dev_addr, addr, 2, buf, false);
+
+ return rc;
+}
+
+/* -------------------------------------------------------------------------- */
+/**
+* \brief Measure result of ADC synchronisation
+* \param demod demod instance
+* \param count (returned) count
+* \return int.
+* \retval 0 Success
+* \retval -EIO Failure: I2C error
+*
+*/
+static int adc_sync_measurement(struct drx_demod_instance *demod, u16 *count)
+{
+ struct i2c_device_addr *dev_addr = NULL;
+ int rc;
+ u16 data = 0;
+
+ dev_addr = demod->my_i2c_dev_addr;
+
+ /* Start measurement */
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_COMM_EXEC__A, IQM_AF_COMM_EXEC_ACTIVE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_START_LOCK__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Wait at least 3*128*(1/sysclk) <<< 1 millisec */
+ msleep(1);
+
+ *count = 0;
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_PHASE0__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (data == 127)
+ *count = *count + 1;
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_PHASE1__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (data == 127)
+ *count = *count + 1;
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_PHASE2__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (data == 127)
+ *count = *count + 1;
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \brief Synchronize analog and digital clock domains
+* \param demod demod instance
+* \return int.
+* \retval 0 Success
+* \retval -EIO Failure: I2C error or failure to synchronize
+*
+* An IQM reset will also reset the results of this synchronization.
+* After an IQM reset this routine needs to be called again.
+*
+*/
+
+static int adc_synchronization(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = NULL;
+ int rc;
+ u16 count = 0;
+
+ dev_addr = demod->my_i2c_dev_addr;
+
+ rc = adc_sync_measurement(demod, &count);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ if (count == 1) {
+ /* Try sampling on a diffrent edge */
+ u16 clk_neg = 0;
+
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_CLKNEG__A, &clk_neg, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ clk_neg ^= IQM_AF_CLKNEG_CLKNEGDATA__M;
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_CLKNEG__A, clk_neg, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = adc_sync_measurement(demod, &count);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ /* TODO: implement fallback scenarios */
+ if (count < 2)
+ return -EIO;
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+/*== END AUXILIARY FUNCTIONS ==*/
+/*============================================================================*/
+
+/*============================================================================*/
+/*============================================================================*/
+/*== 8VSB & QAM COMMON DATAPATH FUNCTIONS ==*/
+/*============================================================================*/
+/*============================================================================*/
+/**
+* \fn int init_agc ()
+* \brief Initialize AGC for all standards.
+* \param demod instance of demodulator.
+* \param channel pointer to channel data.
+* \return int.
+*/
+static int init_agc(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = NULL;
+ struct drx_common_attr *common_attr = NULL;
+ struct drxj_data *ext_attr = NULL;
+ struct drxj_cfg_agc *p_agc_rf_settings = NULL;
+ struct drxj_cfg_agc *p_agc_if_settings = NULL;
+ int rc;
+ u16 ingain_tgt_max = 0;
+ u16 clp_dir_to = 0;
+ u16 sns_sum_max = 0;
+ u16 clp_sum_max = 0;
+ u16 sns_dir_to = 0;
+ u16 ki_innergain_min = 0;
+ u16 agc_ki = 0;
+ u16 ki_max = 0;
+ u16 if_iaccu_hi_tgt_min = 0;
+ u16 data = 0;
+ u16 agc_ki_dgain = 0;
+ u16 ki_min = 0;
+ u16 clp_ctrl_mode = 0;
+ u16 agc_rf = 0;
+ u16 agc_if = 0;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ common_attr = (struct drx_common_attr *) demod->my_common_attr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ switch (ext_attr->standard) {
+ case DRX_STANDARD_8VSB:
+ clp_sum_max = 1023;
+ clp_dir_to = (u16) (-9);
+ sns_sum_max = 1023;
+ sns_dir_to = (u16) (-9);
+ ki_innergain_min = (u16) (-32768);
+ ki_max = 0x032C;
+ agc_ki_dgain = 0xC;
+ if_iaccu_hi_tgt_min = 2047;
+ ki_min = 0x0117;
+ ingain_tgt_max = 16383;
+ clp_ctrl_mode = 0;
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MINGAIN__A, 0x7fff, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MAXGAIN__A, 0x0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_SUM__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_CYCCNT__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_DIR_WD__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_DIR_STP__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_SUM__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_CYCCNT__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_DIR_WD__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_DIR_STP__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_INGAIN__A, 1024, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_VSB_AGC_POW_TGT__A, 22600, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_INGAIN_TGT__A, 13200, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ p_agc_if_settings = &(ext_attr->vsb_if_agc_cfg);
+ p_agc_rf_settings = &(ext_attr->vsb_rf_agc_cfg);
+ break;
+#ifndef DRXJ_VSB_ONLY
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_C:
+ case DRX_STANDARD_ITU_B:
+ ingain_tgt_max = 5119;
+ clp_sum_max = 1023;
+ clp_dir_to = (u16) (-5);
+ sns_sum_max = 127;
+ sns_dir_to = (u16) (-3);
+ ki_innergain_min = 0;
+ ki_max = 0x0657;
+ if_iaccu_hi_tgt_min = 2047;
+ agc_ki_dgain = 0x7;
+ ki_min = 0x0117;
+ clp_ctrl_mode = 0;
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MINGAIN__A, 0x7fff, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MAXGAIN__A, 0x0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_SUM__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_CYCCNT__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_DIR_WD__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_DIR_STP__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_SUM__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_CYCCNT__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_DIR_WD__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_DIR_STP__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ p_agc_if_settings = &(ext_attr->qam_if_agc_cfg);
+ p_agc_rf_settings = &(ext_attr->qam_rf_agc_cfg);
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_INGAIN_TGT__A, p_agc_if_settings->top, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_AGC_KI__A, &agc_ki, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ agc_ki &= 0xf000;
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI__A, agc_ki, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ /* for new AGC interface */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_INGAIN_TGT_MIN__A, p_agc_if_settings->top, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_INGAIN__A, p_agc_if_settings->top, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* Gain fed from inner to outer AGC */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_INGAIN_TGT_MAX__A, ingain_tgt_max, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__A, if_iaccu_hi_tgt_min, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_IF_IACCU_HI__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* set to p_agc_settings->top before */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_IF_IACCU_LO__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_RF_IACCU_HI__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_RF_IACCU_LO__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_RF_MAX__A, 32767, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_SUM_MAX__A, clp_sum_max, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_SUM_MAX__A, sns_sum_max, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_INNERGAIN_MIN__A, ki_innergain_min, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__A, 50, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_CYCLEN__A, 500, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_CYCLEN__A, 500, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MAXMINGAIN_TH__A, 20, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MIN__A, ki_min, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MAX__A, ki_max, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_RED__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_SUM_MIN__A, 8, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_CYCLEN__A, 500, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_DIR_TO__A, clp_dir_to, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_SUM_MIN__A, 8, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_DIR_TO__A, sns_dir_to, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A, 50, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_CTRL_MODE__A, clp_ctrl_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ agc_rf = 0x800 + p_agc_rf_settings->cut_off_current;
+ if (common_attr->tuner_rf_agc_pol == true)
+ agc_rf = 0x87ff - agc_rf;
+
+ agc_if = 0x800;
+ if (common_attr->tuner_if_agc_pol == true)
+ agc_rf = 0x87ff - agc_rf;
+
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_AGC_RF__A, agc_rf, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_AGC_IF__A, agc_if, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Set/restore Ki DGAIN factor */
+ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_AGC_KI__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data &= ~SCU_RAM_AGC_KI_DGAIN__M;
+ data |= (agc_ki_dgain << SCU_RAM_AGC_KI_DGAIN__B);
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int set_frequency ()
+* \brief Set frequency shift.
+* \param demod instance of demodulator.
+* \param channel pointer to channel data.
+* \param tuner_freq_offset residual frequency from tuner.
+* \return int.
+*/
+static int
+set_frequency(struct drx_demod_instance *demod,
+ struct drx_channel *channel, s32 tuner_freq_offset)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ struct drxj_data *ext_attr = demod->my_ext_attr;
+ int rc;
+ s32 sampling_frequency = 0;
+ s32 frequency_shift = 0;
+ s32 if_freq_actual = 0;
+ s32 rf_freq_residual = -1 * tuner_freq_offset;
+ s32 adc_freq = 0;
+ s32 intermediate_freq = 0;
+ u32 iqm_fs_rate_ofs = 0;
+ bool adc_flip = true;
+ bool select_pos_image = false;
+ bool rf_mirror;
+ bool tuner_mirror;
+ bool image_to_select = true;
+ s32 fm_frequency_shift = 0;
+
+ rf_mirror = (ext_attr->mirror == DRX_MIRROR_YES) ? true : false;
+ tuner_mirror = demod->my_common_attr->mirror_freq_spect ? false : true;
+ /*
+ Program frequency shifter
+ No need to account for mirroring on RF
+ */
+ switch (ext_attr->standard) {
+ case DRX_STANDARD_ITU_A: /* fallthrough */
+ case DRX_STANDARD_ITU_C: /* fallthrough */
+ case DRX_STANDARD_PAL_SECAM_LP: /* fallthrough */
+ case DRX_STANDARD_8VSB:
+ select_pos_image = true;
+ break;
+ case DRX_STANDARD_FM:
+ /* After IQM FS sound carrier must appear at 4 Mhz in spect.
+ Sound carrier is already 3Mhz above centre frequency due
+ to tuner setting so now add an extra shift of 1MHz... */
+ fm_frequency_shift = 1000;
+ case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_NTSC: /* fallthrough */
+ case DRX_STANDARD_PAL_SECAM_BG: /* fallthrough */
+ case DRX_STANDARD_PAL_SECAM_DK: /* fallthrough */
+ case DRX_STANDARD_PAL_SECAM_I: /* fallthrough */
+ case DRX_STANDARD_PAL_SECAM_L:
+ select_pos_image = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+ intermediate_freq = demod->my_common_attr->intermediate_freq;
+ sampling_frequency = demod->my_common_attr->sys_clock_freq / 3;
+ if (tuner_mirror)
+ if_freq_actual = intermediate_freq + rf_freq_residual + fm_frequency_shift;
+ else
+ if_freq_actual = intermediate_freq - rf_freq_residual - fm_frequency_shift;
+ if (if_freq_actual > sampling_frequency / 2) {
+ /* adc mirrors */
+ adc_freq = sampling_frequency - if_freq_actual;
+ adc_flip = true;
+ } else {
+ /* adc doesn't mirror */
+ adc_freq = if_freq_actual;
+ adc_flip = false;
+ }
+
+ frequency_shift = adc_freq;
+ image_to_select =
+ (bool) (rf_mirror ^ tuner_mirror ^ adc_flip ^ select_pos_image);
+ iqm_fs_rate_ofs = frac28(frequency_shift, sampling_frequency);
+
+ if (image_to_select)
+ iqm_fs_rate_ofs = ~iqm_fs_rate_ofs + 1;
+
+ /* Program frequency shifter with tuner offset compensation */
+ /* frequency_shift += tuner_freq_offset; TODO */
+ rc = drxdap_fasi_write_reg32(dev_addr, IQM_FS_RATE_OFS_LO__A, iqm_fs_rate_ofs, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ ext_attr->iqm_fs_rate_ofs = iqm_fs_rate_ofs;
+ ext_attr->pos_image = (bool) (rf_mirror ^ tuner_mirror ^ select_pos_image);
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int get_acc_pkt_err()
+* \brief Retrieve signal strength for VSB and QAM.
+* \param demod Pointer to demod instance
+* \param packet_err Pointer to packet error
+* \return int.
+* \retval 0 sig_strength contains valid data.
+* \retval -EINVAL sig_strength is NULL.
+* \retval -EIO Erroneous data, sig_strength contains invalid data.
+*/
+#ifdef DRXJ_SIGNAL_ACCUM_ERR
+static int get_acc_pkt_err(struct drx_demod_instance *demod, u16 *packet_err)
+{
+ int rc;
+ static u16 pkt_err;
+ static u16 last_pkt_err;
+ u16 data = 0;
+ struct drxj_data *ext_attr = NULL;
+ struct i2c_device_addr *dev_addr = NULL;
+
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+ dev_addr = demod->my_i2c_dev_addr;
+
+ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (ext_attr->reset_pkt_err_acc) {
+ last_pkt_err = data;
+ pkt_err = 0;
+ ext_attr->reset_pkt_err_acc = false;
+ }
+
+ if (data < last_pkt_err) {
+ pkt_err += 0xffff - last_pkt_err;
+ pkt_err += data;
+ } else {
+ pkt_err += (data - last_pkt_err);
+ }
+ *packet_err = pkt_err;
+ last_pkt_err = data;
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+#endif
+
+
+/*============================================================================*/
+
+/**
+* \fn int set_agc_rf ()
+* \brief Configure RF AGC
+* \param demod instance of demodulator.
+* \param agc_settings AGC configuration structure
+* \return int.
+*/
+static int
+set_agc_rf(struct drx_demod_instance *demod, struct drxj_cfg_agc *agc_settings, bool atomic)
+{
+ struct i2c_device_addr *dev_addr = NULL;
+ struct drxj_data *ext_attr = NULL;
+ struct drxj_cfg_agc *p_agc_settings = NULL;
+ struct drx_common_attr *common_attr = NULL;
+ int rc;
+ drx_write_reg16func_t scu_wr16 = NULL;
+ drx_read_reg16func_t scu_rr16 = NULL;
+
+ common_attr = (struct drx_common_attr *) demod->my_common_attr;
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ if (atomic) {
+ scu_rr16 = drxj_dap_scu_atomic_read_reg16;
+ scu_wr16 = drxj_dap_scu_atomic_write_reg16;
+ } else {
+ scu_rr16 = drxj_dap_read_reg16;
+ scu_wr16 = drxj_dap_write_reg16;
+ }
+
+ /* Configure AGC only if standard is currently active */
+ if ((ext_attr->standard == agc_settings->standard) ||
+ (DRXJ_ISQAMSTD(ext_attr->standard) &&
+ DRXJ_ISQAMSTD(agc_settings->standard)) ||
+ (DRXJ_ISATVSTD(ext_attr->standard) &&
+ DRXJ_ISATVSTD(agc_settings->standard))) {
+ u16 data = 0;
+
+ switch (agc_settings->ctrl_mode) {
+ case DRX_AGC_CTRL_AUTO:
+
+ /* Enable RF AGC DAC */
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data |= IQM_AF_STDBY_STDBY_TAGC_RF_A2_ACTIVE;
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Enable SCU RF AGC loop */
+ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data &= ~SCU_RAM_AGC_KI_RF__M;
+ if (ext_attr->standard == DRX_STANDARD_8VSB)
+ data |= (2 << SCU_RAM_AGC_KI_RF__B);
+ else if (DRXJ_ISQAMSTD(ext_attr->standard))
+ data |= (5 << SCU_RAM_AGC_KI_RF__B);
+ else
+ data |= (4 << SCU_RAM_AGC_KI_RF__B);
+
+ if (common_attr->tuner_rf_agc_pol)
+ data |= SCU_RAM_AGC_KI_INV_RF_POL__M;
+ else
+ data &= ~SCU_RAM_AGC_KI_INV_RF_POL__M;
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Set speed ( using complementary reduction value ) */
+ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI_RED__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data &= ~SCU_RAM_AGC_KI_RED_RAGC_RED__M;
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI_RED__A, (~(agc_settings->speed << SCU_RAM_AGC_KI_RED_RAGC_RED__B) & SCU_RAM_AGC_KI_RED_RAGC_RED__M) | data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ if (agc_settings->standard == DRX_STANDARD_8VSB)
+ p_agc_settings = &(ext_attr->vsb_if_agc_cfg);
+ else if (DRXJ_ISQAMSTD(agc_settings->standard))
+ p_agc_settings = &(ext_attr->qam_if_agc_cfg);
+ else if (DRXJ_ISATVSTD(agc_settings->standard))
+ p_agc_settings = &(ext_attr->atv_if_agc_cfg);
+ else
+ return -EINVAL;
+
+ /* Set TOP, only if IF-AGC is in AUTO mode */
+ if (p_agc_settings->ctrl_mode == DRX_AGC_CTRL_AUTO) {
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, agc_settings->top, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT__A, agc_settings->top, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ /* Cut-Off current */
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_RF_IACCU_HI_CO__A, agc_settings->cut_off_current, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_AGC_CTRL_USER:
+
+ /* Enable RF AGC DAC */
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data |= IQM_AF_STDBY_STDBY_TAGC_RF_A2_ACTIVE;
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Disable SCU RF AGC loop */
+ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data &= ~SCU_RAM_AGC_KI_RF__M;
+ if (common_attr->tuner_rf_agc_pol)
+ data |= SCU_RAM_AGC_KI_INV_RF_POL__M;
+ else
+ data &= ~SCU_RAM_AGC_KI_INV_RF_POL__M;
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Write value to output pin */
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_RF_IACCU_HI__A, agc_settings->output_level, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_AGC_CTRL_OFF:
+
+ /* Disable RF AGC DAC */
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data &= (~IQM_AF_STDBY_STDBY_TAGC_RF_A2_ACTIVE);
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Disable SCU RF AGC loop */
+ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data &= ~SCU_RAM_AGC_KI_RF__M;
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ default:
+ return -EINVAL;
+ } /* switch ( agcsettings->ctrl_mode ) */
+ }
+
+ /* Store rf agc settings */
+ switch (agc_settings->standard) {
+ case DRX_STANDARD_8VSB:
+ ext_attr->vsb_rf_agc_cfg = *agc_settings;
+ break;
+#ifndef DRXJ_VSB_ONLY
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
+ case DRX_STANDARD_ITU_C:
+ ext_attr->qam_rf_agc_cfg = *agc_settings;
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int set_agc_if ()
+* \brief Configure If AGC
+* \param demod instance of demodulator.
+* \param agc_settings AGC configuration structure
+* \return int.
+*/
+static int
+set_agc_if(struct drx_demod_instance *demod, struct drxj_cfg_agc *agc_settings, bool atomic)
+{
+ struct i2c_device_addr *dev_addr = NULL;
+ struct drxj_data *ext_attr = NULL;
+ struct drxj_cfg_agc *p_agc_settings = NULL;
+ struct drx_common_attr *common_attr = NULL;
+ drx_write_reg16func_t scu_wr16 = NULL;
+ drx_read_reg16func_t scu_rr16 = NULL;
+ int rc;
+
+ common_attr = (struct drx_common_attr *) demod->my_common_attr;
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ if (atomic) {
+ scu_rr16 = drxj_dap_scu_atomic_read_reg16;
+ scu_wr16 = drxj_dap_scu_atomic_write_reg16;
+ } else {
+ scu_rr16 = drxj_dap_read_reg16;
+ scu_wr16 = drxj_dap_write_reg16;
+ }
+
+ /* Configure AGC only if standard is currently active */
+ if ((ext_attr->standard == agc_settings->standard) ||
+ (DRXJ_ISQAMSTD(ext_attr->standard) &&
+ DRXJ_ISQAMSTD(agc_settings->standard)) ||
+ (DRXJ_ISATVSTD(ext_attr->standard) &&
+ DRXJ_ISATVSTD(agc_settings->standard))) {
+ u16 data = 0;
+
+ switch (agc_settings->ctrl_mode) {
+ case DRX_AGC_CTRL_AUTO:
+ /* Enable IF AGC DAC */
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data |= IQM_AF_STDBY_STDBY_TAGC_IF_A2_ACTIVE;
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Enable SCU IF AGC loop */
+ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data &= ~SCU_RAM_AGC_KI_IF_AGC_DISABLE__M;
+ data &= ~SCU_RAM_AGC_KI_IF__M;
+ if (ext_attr->standard == DRX_STANDARD_8VSB)
+ data |= (3 << SCU_RAM_AGC_KI_IF__B);
+ else if (DRXJ_ISQAMSTD(ext_attr->standard))
+ data |= (6 << SCU_RAM_AGC_KI_IF__B);
+ else
+ data |= (5 << SCU_RAM_AGC_KI_IF__B);
+
+ if (common_attr->tuner_if_agc_pol)
+ data |= SCU_RAM_AGC_KI_INV_IF_POL__M;
+ else
+ data &= ~SCU_RAM_AGC_KI_INV_IF_POL__M;
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Set speed (using complementary reduction value) */
+ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI_RED__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data &= ~SCU_RAM_AGC_KI_RED_IAGC_RED__M;
+ rc = (*scu_wr16) (dev_addr, SCU_RAM_AGC_KI_RED__A, (~(agc_settings->speed << SCU_RAM_AGC_KI_RED_IAGC_RED__B) & SCU_RAM_AGC_KI_RED_IAGC_RED__M) | data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ if (agc_settings->standard == DRX_STANDARD_8VSB)
+ p_agc_settings = &(ext_attr->vsb_rf_agc_cfg);
+ else if (DRXJ_ISQAMSTD(agc_settings->standard))
+ p_agc_settings = &(ext_attr->qam_rf_agc_cfg);
+ else if (DRXJ_ISATVSTD(agc_settings->standard))
+ p_agc_settings = &(ext_attr->atv_rf_agc_cfg);
+ else
+ return -EINVAL;
+
+ /* Restore TOP */
+ if (p_agc_settings->ctrl_mode == DRX_AGC_CTRL_AUTO) {
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, p_agc_settings->top, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT__A, p_agc_settings->top, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ } else {
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+ break;
+
+ case DRX_AGC_CTRL_USER:
+
+ /* Enable IF AGC DAC */
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data |= IQM_AF_STDBY_STDBY_TAGC_IF_A2_ACTIVE;
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Disable SCU IF AGC loop */
+ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data &= ~SCU_RAM_AGC_KI_IF_AGC_DISABLE__M;
+ data |= SCU_RAM_AGC_KI_IF_AGC_DISABLE__M;
+ if (common_attr->tuner_if_agc_pol)
+ data |= SCU_RAM_AGC_KI_INV_IF_POL__M;
+ else
+ data &= ~SCU_RAM_AGC_KI_INV_IF_POL__M;
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Write value to output pin */
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, agc_settings->output_level, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+
+ case DRX_AGC_CTRL_OFF:
+
+ /* Disable If AGC DAC */
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data &= (~IQM_AF_STDBY_STDBY_TAGC_IF_A2_ACTIVE);
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Disable SCU IF AGC loop */
+ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data &= ~SCU_RAM_AGC_KI_IF_AGC_DISABLE__M;
+ data |= SCU_RAM_AGC_KI_IF_AGC_DISABLE__M;
+ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ default:
+ return -EINVAL;
+ } /* switch ( agcsettings->ctrl_mode ) */
+
+ /* always set the top to support configurations without if-loop */
+ rc = (*scu_wr16) (dev_addr, SCU_RAM_AGC_INGAIN_TGT_MIN__A, agc_settings->top, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ /* Store if agc settings */
+ switch (agc_settings->standard) {
+ case DRX_STANDARD_8VSB:
+ ext_attr->vsb_if_agc_cfg = *agc_settings;
+ break;
+#ifndef DRXJ_VSB_ONLY
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
+ case DRX_STANDARD_ITU_C:
+ ext_attr->qam_if_agc_cfg = *agc_settings;
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int set_iqm_af ()
+* \brief Configure IQM AF registers
+* \param demod instance of demodulator.
+* \param active
+* \return int.
+*/
+static int set_iqm_af(struct drx_demod_instance *demod, bool active)
+{
+ u16 data = 0;
+ struct i2c_device_addr *dev_addr = NULL;
+ int rc;
+
+ dev_addr = demod->my_i2c_dev_addr;
+
+ /* Configure IQM */
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (!active)
+ data &= ((~IQM_AF_STDBY_STDBY_ADC_A2_ACTIVE) & (~IQM_AF_STDBY_STDBY_AMP_A2_ACTIVE) & (~IQM_AF_STDBY_STDBY_PD_A2_ACTIVE) & (~IQM_AF_STDBY_STDBY_TAGC_IF_A2_ACTIVE) & (~IQM_AF_STDBY_STDBY_TAGC_RF_A2_ACTIVE));
+ else
+ data |= (IQM_AF_STDBY_STDBY_ADC_A2_ACTIVE | IQM_AF_STDBY_STDBY_AMP_A2_ACTIVE | IQM_AF_STDBY_STDBY_PD_A2_ACTIVE | IQM_AF_STDBY_STDBY_TAGC_IF_A2_ACTIVE | IQM_AF_STDBY_STDBY_TAGC_RF_A2_ACTIVE);
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+/*== END 8VSB & QAM COMMON DATAPATH FUNCTIONS ==*/
+/*============================================================================*/
+
+/*============================================================================*/
+/*============================================================================*/
+/*== 8VSB DATAPATH FUNCTIONS ==*/
+/*============================================================================*/
+/*============================================================================*/
+
+/**
+* \fn int power_down_vsb ()
+* \brief Powr down QAM related blocks.
+* \param demod instance of demodulator.
+* \param channel pointer to channel data.
+* \return int.
+*/
+static int power_down_vsb(struct drx_demod_instance *demod, bool primary)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ struct drxjscu_cmd cmd_scu = { /* command */ 0,
+ /* parameter_len */ 0,
+ /* result_len */ 0,
+ /* *parameter */ NULL,
+ /* *result */ NULL
+ };
+ struct drx_cfg_mpeg_output cfg_mpeg_output;
+ int rc;
+ u16 cmd_result = 0;
+
+ /*
+ STOP demodulator
+ reset of FEC and VSB HW
+ */
+ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB |
+ SCU_RAM_COMMAND_CMD_DEMOD_STOP;
+ cmd_scu.parameter_len = 0;
+ cmd_scu.result_len = 1;
+ cmd_scu.parameter = NULL;
+ cmd_scu.result = &cmd_result;
+ rc = scu_command(dev_addr, &cmd_scu);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* stop all comm_exec */
+ rc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_COMM_EXEC__A, VSB_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (primary) {
+ rc = drxj_dap_write_reg16(dev_addr, IQM_COMM_EXEC__A, IQM_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = set_iqm_af(demod, false);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ } else {
+ rc = drxj_dap_write_reg16(dev_addr, IQM_FS_COMM_EXEC__A, IQM_FS_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_FD_COMM_EXEC__A, IQM_FD_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RC_COMM_EXEC__A, IQM_RC_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RT_COMM_EXEC__A, IQM_RT_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_COMM_EXEC__A, IQM_CF_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ cfg_mpeg_output.enable_mpeg_output = false;
+ rc = ctrl_set_cfg_mpeg_output(demod, &cfg_mpeg_output);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int set_vsb_leak_n_gain ()
+* \brief Set ATSC demod.
+* \param demod instance of demodulator.
+* \return int.
+*/
+static int set_vsb_leak_n_gain(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = NULL;
+ int rc;
+
+ const u8 vsb_ffe_leak_gain_ram0[] = {
+ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO1 */
+ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO2 */
+ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO3 */
+ DRXJ_16TO8(0xf), /* FFETRAINLKRATIO4 */
+ DRXJ_16TO8(0xf), /* FFETRAINLKRATIO5 */
+ DRXJ_16TO8(0xf), /* FFETRAINLKRATIO6 */
+ DRXJ_16TO8(0xf), /* FFETRAINLKRATIO7 */
+ DRXJ_16TO8(0xf), /* FFETRAINLKRATIO8 */
+ DRXJ_16TO8(0xf), /* FFETRAINLKRATIO9 */
+ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO10 */
+ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO11 */
+ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO12 */
+ DRXJ_16TO8(0x10), /* FFERCA1TRAINLKRATIO1 */
+ DRXJ_16TO8(0x10), /* FFERCA1TRAINLKRATIO2 */
+ DRXJ_16TO8(0x10), /* FFERCA1TRAINLKRATIO3 */
+ DRXJ_16TO8(0x20), /* FFERCA1TRAINLKRATIO4 */
+ DRXJ_16TO8(0x20), /* FFERCA1TRAINLKRATIO5 */
+ DRXJ_16TO8(0x20), /* FFERCA1TRAINLKRATIO6 */
+ DRXJ_16TO8(0x20), /* FFERCA1TRAINLKRATIO7 */
+ DRXJ_16TO8(0x20), /* FFERCA1TRAINLKRATIO8 */
+ DRXJ_16TO8(0x20), /* FFERCA1TRAINLKRATIO9 */
+ DRXJ_16TO8(0x10), /* FFERCA1TRAINLKRATIO10 */
+ DRXJ_16TO8(0x10), /* FFERCA1TRAINLKRATIO11 */
+ DRXJ_16TO8(0x10), /* FFERCA1TRAINLKRATIO12 */
+ DRXJ_16TO8(0x10), /* FFERCA1DATALKRATIO1 */
+ DRXJ_16TO8(0x10), /* FFERCA1DATALKRATIO2 */
+ DRXJ_16TO8(0x10), /* FFERCA1DATALKRATIO3 */
+ DRXJ_16TO8(0x20), /* FFERCA1DATALKRATIO4 */
+ DRXJ_16TO8(0x20), /* FFERCA1DATALKRATIO5 */
+ DRXJ_16TO8(0x20), /* FFERCA1DATALKRATIO6 */
+ DRXJ_16TO8(0x20), /* FFERCA1DATALKRATIO7 */
+ DRXJ_16TO8(0x20), /* FFERCA1DATALKRATIO8 */
+ DRXJ_16TO8(0x20), /* FFERCA1DATALKRATIO9 */
+ DRXJ_16TO8(0x10), /* FFERCA1DATALKRATIO10 */
+ DRXJ_16TO8(0x10), /* FFERCA1DATALKRATIO11 */
+ DRXJ_16TO8(0x10), /* FFERCA1DATALKRATIO12 */
+ DRXJ_16TO8(0x10), /* FFERCA2TRAINLKRATIO1 */
+ DRXJ_16TO8(0x10), /* FFERCA2TRAINLKRATIO2 */
+ DRXJ_16TO8(0x10), /* FFERCA2TRAINLKRATIO3 */
+ DRXJ_16TO8(0x20), /* FFERCA2TRAINLKRATIO4 */
+ DRXJ_16TO8(0x20), /* FFERCA2TRAINLKRATIO5 */
+ DRXJ_16TO8(0x20), /* FFERCA2TRAINLKRATIO6 */
+ DRXJ_16TO8(0x20), /* FFERCA2TRAINLKRATIO7 */
+ DRXJ_16TO8(0x20), /* FFERCA2TRAINLKRATIO8 */
+ DRXJ_16TO8(0x20), /* FFERCA2TRAINLKRATIO9 */
+ DRXJ_16TO8(0x10), /* FFERCA2TRAINLKRATIO10 */
+ DRXJ_16TO8(0x10), /* FFERCA2TRAINLKRATIO11 */
+ DRXJ_16TO8(0x10), /* FFERCA2TRAINLKRATIO12 */
+ DRXJ_16TO8(0x10), /* FFERCA2DATALKRATIO1 */
+ DRXJ_16TO8(0x10), /* FFERCA2DATALKRATIO2 */
+ DRXJ_16TO8(0x10), /* FFERCA2DATALKRATIO3 */
+ DRXJ_16TO8(0x20), /* FFERCA2DATALKRATIO4 */
+ DRXJ_16TO8(0x20), /* FFERCA2DATALKRATIO5 */
+ DRXJ_16TO8(0x20), /* FFERCA2DATALKRATIO6 */
+ DRXJ_16TO8(0x20), /* FFERCA2DATALKRATIO7 */
+ DRXJ_16TO8(0x20), /* FFERCA2DATALKRATIO8 */
+ DRXJ_16TO8(0x20), /* FFERCA2DATALKRATIO9 */
+ DRXJ_16TO8(0x10), /* FFERCA2DATALKRATIO10 */
+ DRXJ_16TO8(0x10), /* FFERCA2DATALKRATIO11 */
+ DRXJ_16TO8(0x10), /* FFERCA2DATALKRATIO12 */
+ DRXJ_16TO8(0x07), /* FFEDDM1TRAINLKRATIO1 */
+ DRXJ_16TO8(0x07), /* FFEDDM1TRAINLKRATIO2 */
+ DRXJ_16TO8(0x07), /* FFEDDM1TRAINLKRATIO3 */
+ DRXJ_16TO8(0x0e), /* FFEDDM1TRAINLKRATIO4 */
+ DRXJ_16TO8(0x0e), /* FFEDDM1TRAINLKRATIO5 */
+ DRXJ_16TO8(0x0e), /* FFEDDM1TRAINLKRATIO6 */
+ DRXJ_16TO8(0x0e), /* FFEDDM1TRAINLKRATIO7 */
+ DRXJ_16TO8(0x0e), /* FFEDDM1TRAINLKRATIO8 */
+ DRXJ_16TO8(0x0e), /* FFEDDM1TRAINLKRATIO9 */
+ DRXJ_16TO8(0x07), /* FFEDDM1TRAINLKRATIO10 */
+ DRXJ_16TO8(0x07), /* FFEDDM1TRAINLKRATIO11 */
+ DRXJ_16TO8(0x07), /* FFEDDM1TRAINLKRATIO12 */
+ DRXJ_16TO8(0x07), /* FFEDDM1DATALKRATIO1 */
+ DRXJ_16TO8(0x07), /* FFEDDM1DATALKRATIO2 */
+ DRXJ_16TO8(0x07), /* FFEDDM1DATALKRATIO3 */
+ DRXJ_16TO8(0x0e), /* FFEDDM1DATALKRATIO4 */
+ DRXJ_16TO8(0x0e), /* FFEDDM1DATALKRATIO5 */
+ DRXJ_16TO8(0x0e), /* FFEDDM1DATALKRATIO6 */
+ DRXJ_16TO8(0x0e), /* FFEDDM1DATALKRATIO7 */
+ DRXJ_16TO8(0x0e), /* FFEDDM1DATALKRATIO8 */
+ DRXJ_16TO8(0x0e), /* FFEDDM1DATALKRATIO9 */
+ DRXJ_16TO8(0x07), /* FFEDDM1DATALKRATIO10 */
+ DRXJ_16TO8(0x07), /* FFEDDM1DATALKRATIO11 */
+ DRXJ_16TO8(0x07), /* FFEDDM1DATALKRATIO12 */
+ DRXJ_16TO8(0x06), /* FFEDDM2TRAINLKRATIO1 */
+ DRXJ_16TO8(0x06), /* FFEDDM2TRAINLKRATIO2 */
+ DRXJ_16TO8(0x06), /* FFEDDM2TRAINLKRATIO3 */
+ DRXJ_16TO8(0x0c), /* FFEDDM2TRAINLKRATIO4 */
+ DRXJ_16TO8(0x0c), /* FFEDDM2TRAINLKRATIO5 */
+ DRXJ_16TO8(0x0c), /* FFEDDM2TRAINLKRATIO6 */
+ DRXJ_16TO8(0x0c), /* FFEDDM2TRAINLKRATIO7 */
+ DRXJ_16TO8(0x0c), /* FFEDDM2TRAINLKRATIO8 */
+ DRXJ_16TO8(0x0c), /* FFEDDM2TRAINLKRATIO9 */
+ DRXJ_16TO8(0x06), /* FFEDDM2TRAINLKRATIO10 */
+ DRXJ_16TO8(0x06), /* FFEDDM2TRAINLKRATIO11 */
+ DRXJ_16TO8(0x06), /* FFEDDM2TRAINLKRATIO12 */
+ DRXJ_16TO8(0x06), /* FFEDDM2DATALKRATIO1 */
+ DRXJ_16TO8(0x06), /* FFEDDM2DATALKRATIO2 */
+ DRXJ_16TO8(0x06), /* FFEDDM2DATALKRATIO3 */
+ DRXJ_16TO8(0x0c), /* FFEDDM2DATALKRATIO4 */
+ DRXJ_16TO8(0x0c), /* FFEDDM2DATALKRATIO5 */
+ DRXJ_16TO8(0x0c), /* FFEDDM2DATALKRATIO6 */
+ DRXJ_16TO8(0x0c), /* FFEDDM2DATALKRATIO7 */
+ DRXJ_16TO8(0x0c), /* FFEDDM2DATALKRATIO8 */
+ DRXJ_16TO8(0x0c), /* FFEDDM2DATALKRATIO9 */
+ DRXJ_16TO8(0x06), /* FFEDDM2DATALKRATIO10 */
+ DRXJ_16TO8(0x06), /* FFEDDM2DATALKRATIO11 */
+ DRXJ_16TO8(0x06), /* FFEDDM2DATALKRATIO12 */
+ DRXJ_16TO8(0x2020), /* FIRTRAINGAIN1 */
+ DRXJ_16TO8(0x2020), /* FIRTRAINGAIN2 */
+ DRXJ_16TO8(0x2020), /* FIRTRAINGAIN3 */
+ DRXJ_16TO8(0x4040), /* FIRTRAINGAIN4 */
+ DRXJ_16TO8(0x4040), /* FIRTRAINGAIN5 */
+ DRXJ_16TO8(0x4040), /* FIRTRAINGAIN6 */
+ DRXJ_16TO8(0x4040), /* FIRTRAINGAIN7 */
+ DRXJ_16TO8(0x4040), /* FIRTRAINGAIN8 */
+ DRXJ_16TO8(0x4040), /* FIRTRAINGAIN9 */
+ DRXJ_16TO8(0x2020), /* FIRTRAINGAIN10 */
+ DRXJ_16TO8(0x2020), /* FIRTRAINGAIN11 */
+ DRXJ_16TO8(0x2020), /* FIRTRAINGAIN12 */
+ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN1 */
+ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN2 */
+ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN3 */
+ DRXJ_16TO8(0x1010), /* FIRRCA1GAIN4 */
+ DRXJ_16TO8(0x1010), /* FIRRCA1GAIN5 */
+ DRXJ_16TO8(0x1010), /* FIRRCA1GAIN6 */
+ DRXJ_16TO8(0x1010), /* FIRRCA1GAIN7 */
+ DRXJ_16TO8(0x1010) /* FIRRCA1GAIN8 */
+ };
+
+ const u8 vsb_ffe_leak_gain_ram1[] = {
+ DRXJ_16TO8(0x1010), /* FIRRCA1GAIN9 */
+ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN10 */
+ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN11 */
+ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN12 */
+ DRXJ_16TO8(0x0808), /* FIRRCA2GAIN1 */
+ DRXJ_16TO8(0x0808), /* FIRRCA2GAIN2 */
+ DRXJ_16TO8(0x0808), /* FIRRCA2GAIN3 */
+ DRXJ_16TO8(0x1010), /* FIRRCA2GAIN4 */
+ DRXJ_16TO8(0x1010), /* FIRRCA2GAIN5 */
+ DRXJ_16TO8(0x1010), /* FIRRCA2GAIN6 */
+ DRXJ_16TO8(0x1010), /* FIRRCA2GAIN7 */
+ DRXJ_16TO8(0x1010), /* FIRRCA2GAIN8 */
+ DRXJ_16TO8(0x1010), /* FIRRCA2GAIN9 */
+ DRXJ_16TO8(0x0808), /* FIRRCA2GAIN10 */
+ DRXJ_16TO8(0x0808), /* FIRRCA2GAIN11 */
+ DRXJ_16TO8(0x0808), /* FIRRCA2GAIN12 */
+ DRXJ_16TO8(0x0303), /* FIRDDM1GAIN1 */
+ DRXJ_16TO8(0x0303), /* FIRDDM1GAIN2 */
+ DRXJ_16TO8(0x0303), /* FIRDDM1GAIN3 */
+ DRXJ_16TO8(0x0606), /* FIRDDM1GAIN4 */
+ DRXJ_16TO8(0x0606), /* FIRDDM1GAIN5 */
+ DRXJ_16TO8(0x0606), /* FIRDDM1GAIN6 */
+ DRXJ_16TO8(0x0606), /* FIRDDM1GAIN7 */
+ DRXJ_16TO8(0x0606), /* FIRDDM1GAIN8 */
+ DRXJ_16TO8(0x0606), /* FIRDDM1GAIN9 */
+ DRXJ_16TO8(0x0303), /* FIRDDM1GAIN10 */
+ DRXJ_16TO8(0x0303), /* FIRDDM1GAIN11 */
+ DRXJ_16TO8(0x0303), /* FIRDDM1GAIN12 */
+ DRXJ_16TO8(0x0303), /* FIRDDM2GAIN1 */
+ DRXJ_16TO8(0x0303), /* FIRDDM2GAIN2 */
+ DRXJ_16TO8(0x0303), /* FIRDDM2GAIN3 */
+ DRXJ_16TO8(0x0505), /* FIRDDM2GAIN4 */
+ DRXJ_16TO8(0x0505), /* FIRDDM2GAIN5 */
+ DRXJ_16TO8(0x0505), /* FIRDDM2GAIN6 */
+ DRXJ_16TO8(0x0505), /* FIRDDM2GAIN7 */
+ DRXJ_16TO8(0x0505), /* FIRDDM2GAIN8 */
+ DRXJ_16TO8(0x0505), /* FIRDDM2GAIN9 */
+ DRXJ_16TO8(0x0303), /* FIRDDM2GAIN10 */
+ DRXJ_16TO8(0x0303), /* FIRDDM2GAIN11 */
+ DRXJ_16TO8(0x0303), /* FIRDDM2GAIN12 */
+ DRXJ_16TO8(0x001f), /* DFETRAINLKRATIO */
+ DRXJ_16TO8(0x01ff), /* DFERCA1TRAINLKRATIO */
+ DRXJ_16TO8(0x01ff), /* DFERCA1DATALKRATIO */
+ DRXJ_16TO8(0x004f), /* DFERCA2TRAINLKRATIO */
+ DRXJ_16TO8(0x004f), /* DFERCA2DATALKRATIO */
+ DRXJ_16TO8(0x01ff), /* DFEDDM1TRAINLKRATIO */
+ DRXJ_16TO8(0x01ff), /* DFEDDM1DATALKRATIO */
+ DRXJ_16TO8(0x0352), /* DFEDDM2TRAINLKRATIO */
+ DRXJ_16TO8(0x0352), /* DFEDDM2DATALKRATIO */
+ DRXJ_16TO8(0x0000), /* DFETRAINGAIN */
+ DRXJ_16TO8(0x2020), /* DFERCA1GAIN */
+ DRXJ_16TO8(0x1010), /* DFERCA2GAIN */
+ DRXJ_16TO8(0x1818), /* DFEDDM1GAIN */
+ DRXJ_16TO8(0x1212) /* DFEDDM2GAIN */
+ };
+
+ dev_addr = demod->my_i2c_dev_addr;
+ rc = drxdap_fasi_write_block(dev_addr, VSB_SYSCTRL_RAM0_FFETRAINLKRATIO1__A, sizeof(vsb_ffe_leak_gain_ram0), ((u8 *)vsb_ffe_leak_gain_ram0), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxdap_fasi_write_block(dev_addr, VSB_SYSCTRL_RAM1_FIRRCA1GAIN9__A, sizeof(vsb_ffe_leak_gain_ram1), ((u8 *)vsb_ffe_leak_gain_ram1), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int set_vsb()
+* \brief Set 8VSB demod.
+* \param demod instance of demodulator.
+* \return int.
+*
+*/
+static int set_vsb(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = NULL;
+ int rc;
+ struct drx_common_attr *common_attr = NULL;
+ struct drxjscu_cmd cmd_scu;
+ struct drxj_data *ext_attr = NULL;
+ u16 cmd_result = 0;
+ u16 cmd_param = 0;
+ const u8 vsb_taps_re[] = {
+ DRXJ_16TO8(-2), /* re0 */
+ DRXJ_16TO8(4), /* re1 */
+ DRXJ_16TO8(1), /* re2 */
+ DRXJ_16TO8(-4), /* re3 */
+ DRXJ_16TO8(1), /* re4 */
+ DRXJ_16TO8(4), /* re5 */
+ DRXJ_16TO8(-3), /* re6 */
+ DRXJ_16TO8(-3), /* re7 */
+ DRXJ_16TO8(6), /* re8 */
+ DRXJ_16TO8(1), /* re9 */
+ DRXJ_16TO8(-9), /* re10 */
+ DRXJ_16TO8(3), /* re11 */
+ DRXJ_16TO8(12), /* re12 */
+ DRXJ_16TO8(-9), /* re13 */
+ DRXJ_16TO8(-15), /* re14 */
+ DRXJ_16TO8(17), /* re15 */
+ DRXJ_16TO8(19), /* re16 */
+ DRXJ_16TO8(-29), /* re17 */
+ DRXJ_16TO8(-22), /* re18 */
+ DRXJ_16TO8(45), /* re19 */
+ DRXJ_16TO8(25), /* re20 */
+ DRXJ_16TO8(-70), /* re21 */
+ DRXJ_16TO8(-28), /* re22 */
+ DRXJ_16TO8(111), /* re23 */
+ DRXJ_16TO8(30), /* re24 */
+ DRXJ_16TO8(-201), /* re25 */
+ DRXJ_16TO8(-31), /* re26 */
+ DRXJ_16TO8(629) /* re27 */
+ };
+
+ dev_addr = demod->my_i2c_dev_addr;
+ common_attr = (struct drx_common_attr *) demod->my_common_attr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ /* stop all comm_exec */
+ rc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_COMM_EXEC__A, VSB_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_FS_COMM_EXEC__A, IQM_FS_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_FD_COMM_EXEC__A, IQM_FD_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RC_COMM_EXEC__A, IQM_RC_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RT_COMM_EXEC__A, IQM_RT_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_COMM_EXEC__A, IQM_CF_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* reset demodulator */
+ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB
+ | SCU_RAM_COMMAND_CMD_DEMOD_RESET;
+ cmd_scu.parameter_len = 0;
+ cmd_scu.result_len = 1;
+ cmd_scu.parameter = NULL;
+ cmd_scu.result = &cmd_result;
+ rc = scu_command(dev_addr, &cmd_scu);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_DCF_BYPASS__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_FS_ADJ_SEL__A, IQM_FS_ADJ_SEL_B_VSB, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RC_ADJ_SEL__A, IQM_RC_ADJ_SEL_B_VSB, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ ext_attr->iqm_rc_rate_ofs = 0x00AD0D79;
+ rc = drxdap_fasi_write_reg32(dev_addr, IQM_RC_RATE_OFS_LO__A, ext_attr->iqm_rc_rate_ofs, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CFAGC_GAINSHIFT__A, 4, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CYGN1TRK__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RC_CROUT_ENA__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RC_STRETCH__A, 28, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RT_ACTIVE__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_SYMMETRIC__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_MIDTAP__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_OUT_ENA__A, IQM_CF_OUT_ENA_VSB__M, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_SCALE__A, 1393, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_SCALE_SH__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_POW_MEAS_LEN__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_RE0__A, sizeof(vsb_taps_re), ((u8 *)vsb_taps_re), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_IM0__A, sizeof(vsb_taps_re), ((u8 *)vsb_taps_re), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_BNTHRESH__A, 330, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* set higher threshold */
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CLPLASTNUM__A, 90, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* burst detection on */
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SNRTH_RCA1__A, 0x0042, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* drop thresholds by 1 dB */
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SNRTH_RCA2__A, 0x0053, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* drop thresholds by 2 dB */
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_EQCTRL__A, 0x1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* cma on */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_GPIO__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* GPIO */
+
+ /* Initialize the FEC Subsystem */
+ rc = drxj_dap_write_reg16(dev_addr, FEC_TOP_ANNEX__A, FEC_TOP_ANNEX_D, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ {
+ u16 fec_oc_snc_mode = 0;
+ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* output data even when not locked */
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_MODE__A, fec_oc_snc_mode | FEC_OC_SNC_MODE_UNLOCK_ENABLE__M, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ /* set clip */
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_CLP_LEN__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_CLP_TH__A, 470, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_SNS_LEN__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SNRTH_PT__A, 0xD4, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* no transparent, no A&C framing; parity is set in mpegoutput */
+ {
+ u16 fec_oc_reg_mode = 0;
+ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_MODE__A, &fec_oc_reg_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_MODE__A, fec_oc_reg_mode & (~(FEC_OC_MODE_TRANSPARENT__M | FEC_OC_MODE_CLEAR__M | FEC_OC_MODE_RETAIN_FRAMING__M)), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, FEC_DI_TIMEOUT_LO__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* timeout counter for restarting */
+ rc = drxj_dap_write_reg16(dev_addr, FEC_DI_TIMEOUT_HI__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_RS_MODE__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* bypass disabled */
+ /* initialize RS packet error measurement parameters */
+ rc = drxj_dap_write_reg16(dev_addr, FEC_RS_MEASUREMENT_PERIOD__A, FEC_RS_MEASUREMENT_PERIOD, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_RS_MEASUREMENT_PRESCALE__A, FEC_RS_MEASUREMENT_PRESCALE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* init measurement period of MER/SER */
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_MEASUREMENT_PERIOD__A, VSB_TOP_MEASUREMENT_PERIOD, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxdap_fasi_write_reg32(dev_addr, SCU_RAM_FEC_ACCUM_CW_CORRECTED_LO__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_FEC_MEAS_COUNT__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CKGN1TRK__A, 128, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* B-Input to ADC, PGA+filter in standby */
+ if (!ext_attr->has_lna) {
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_AMUX__A, 0x02, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ /* turn on IQMAF. It has to be in front of setAgc**() */
+ rc = set_iqm_af(demod, true);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = adc_synchronization(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = init_agc(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = set_agc_if(demod, &(ext_attr->vsb_if_agc_cfg), false);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = set_agc_rf(demod, &(ext_attr->vsb_rf_agc_cfg), false);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ {
+ /* TODO fix this, store a struct drxj_cfg_afe_gain structure in struct drxj_data instead
+ of only the gain */
+ struct drxj_cfg_afe_gain vsb_pga_cfg = { DRX_STANDARD_8VSB, 0 };
+
+ vsb_pga_cfg.gain = ext_attr->vsb_pga_cfg;
+ rc = ctrl_set_cfg_afe_gain(demod, &vsb_pga_cfg);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+ rc = ctrl_set_cfg_pre_saw(demod, &(ext_attr->vsb_pre_saw_cfg));
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Mpeg output has to be in front of FEC active */
+ rc = set_mpegtei_handling(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = bit_reverse_mpeg_output(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = set_mpeg_start_width(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ {
+ /* TODO: move to set_standard after hardware reset value problem is solved */
+ /* Configure initial MPEG output */
+ struct drx_cfg_mpeg_output cfg_mpeg_output;
+
+ memcpy(&cfg_mpeg_output, &common_attr->mpeg_cfg, sizeof(cfg_mpeg_output));
+ cfg_mpeg_output.enable_mpeg_output = true;
+
+ rc = ctrl_set_cfg_mpeg_output(demod, &cfg_mpeg_output);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ /* TBD: what parameters should be set */
+ cmd_param = 0x00; /* Default mode AGC on, etc */
+ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB
+ | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM;
+ cmd_scu.parameter_len = 1;
+ cmd_scu.result_len = 1;
+ cmd_scu.parameter = &cmd_param;
+ cmd_scu.result = &cmd_result;
+ rc = scu_command(dev_addr, &cmd_scu);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_BEAGC_GAINSHIFT__A, 0x0004, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SNRTH_PT__A, 0x00D2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SYSSMTRNCTRL__A, VSB_TOP_SYSSMTRNCTRL__PRE | VSB_TOP_SYSSMTRNCTRL_NCOTIMEOUTCNTEN__M, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_BEDETCTRL__A, 0x142, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_LBAGCREFLVL__A, 640, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CYGN1ACQ__A, 4, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CYGN1TRK__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CYGN2TRK__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* start demodulator */
+ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB
+ | SCU_RAM_COMMAND_CMD_DEMOD_START;
+ cmd_scu.parameter_len = 0;
+ cmd_scu.result_len = 1;
+ cmd_scu.parameter = NULL;
+ cmd_scu.result = &cmd_result;
+ rc = scu_command(dev_addr, &cmd_scu);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, IQM_COMM_EXEC__A, IQM_COMM_EXEC_ACTIVE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, VSB_COMM_EXEC__A, VSB_COMM_EXEC_ACTIVE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn static short get_vsb_post_rs_pck_err(struct i2c_device_addr *dev_addr, u16 *PckErrs)
+* \brief Get the values of packet error in 8VSB mode
+* \return Error code
+*/
+static int get_vsb_post_rs_pck_err(struct i2c_device_addr *dev_addr,
+ u32 *pck_errs, u32 *pck_count)
+{
+ int rc;
+ u16 data = 0;
+ u16 period = 0;
+ u16 prescale = 0;
+ u16 packet_errors_mant = 0;
+ u16 packet_errors_exp = 0;
+
+ rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_FAILURES__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ packet_errors_mant = data & FEC_RS_NR_FAILURES_FIXED_MANT__M;
+ packet_errors_exp = (data & FEC_RS_NR_FAILURES_EXP__M)
+ >> FEC_RS_NR_FAILURES_EXP__B;
+ period = FEC_RS_MEASUREMENT_PERIOD;
+ prescale = FEC_RS_MEASUREMENT_PRESCALE;
+ /* packet error rate = (error packet number) per second */
+ /* 77.3 us is time for per packet */
+ if (period * prescale == 0) {
+ pr_err("error: period and/or prescale is zero!\n");
+ return -EIO;
+ }
+ *pck_errs = packet_errors_mant * (1 << packet_errors_exp);
+ *pck_count = period * prescale * 77;
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn static short GetVSBBer(struct i2c_device_addr *dev_addr, u32 *ber)
+* \brief Get the values of ber in VSB mode
+* \return Error code
+*/
+static int get_vs_bpost_viterbi_ber(struct i2c_device_addr *dev_addr,
+ u32 *ber, u32 *cnt)
+{
+ int rc;
+ u16 data = 0;
+ u16 period = 0;
+ u16 prescale = 0;
+ u16 bit_errors_mant = 0;
+ u16 bit_errors_exp = 0;
+
+ rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_BIT_ERRORS__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ period = FEC_RS_MEASUREMENT_PERIOD;
+ prescale = FEC_RS_MEASUREMENT_PRESCALE;
+
+ bit_errors_mant = data & FEC_RS_NR_BIT_ERRORS_FIXED_MANT__M;
+ bit_errors_exp = (data & FEC_RS_NR_BIT_ERRORS_EXP__M)
+ >> FEC_RS_NR_BIT_ERRORS_EXP__B;
+
+ *cnt = period * prescale * 207 * ((bit_errors_exp > 2) ? 1 : 8);
+
+ if (((bit_errors_mant << bit_errors_exp) >> 3) > 68700)
+ *ber = (*cnt) * 26570;
+ else {
+ if (period * prescale == 0) {
+ pr_err("error: period and/or prescale is zero!\n");
+ return -EIO;
+ }
+ *ber = bit_errors_mant << ((bit_errors_exp > 2) ?
+ (bit_errors_exp - 3) : bit_errors_exp);
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn static short get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr, u32 *ber)
+* \brief Get the values of ber in VSB mode
+* \return Error code
+*/
+static int get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr,
+ u32 *ber, u32 *cnt)
+{
+ u16 data = 0;
+ int rc;
+
+ rc = drxj_dap_read_reg16(dev_addr, VSB_TOP_NR_SYM_ERRS__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ return -EIO;
+ }
+ *ber = data;
+ *cnt = VSB_TOP_MEASUREMENT_PERIOD * SYMBOLS_PER_SEGMENT;
+
+ return 0;
+}
+
+/**
+* \fn static int get_vsbmer(struct i2c_device_addr *dev_addr, u16 *mer)
+* \brief Get the values of MER
+* \return Error code
+*/
+static int get_vsbmer(struct i2c_device_addr *dev_addr, u16 *mer)
+{
+ int rc;
+ u16 data_hi = 0;
+
+ rc = drxj_dap_read_reg16(dev_addr, VSB_TOP_ERR_ENERGY_H__A, &data_hi, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ *mer =
+ (u16) (log1_times100(21504) - log1_times100((data_hi << 6) / 52));
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+
+/*============================================================================*/
+/*== END 8VSB DATAPATH FUNCTIONS ==*/
+/*============================================================================*/
+
+/*============================================================================*/
+/*============================================================================*/
+/*== QAM DATAPATH FUNCTIONS ==*/
+/*============================================================================*/
+/*============================================================================*/
+
+/**
+* \fn int power_down_qam ()
+* \brief Powr down QAM related blocks.
+* \param demod instance of demodulator.
+* \param channel pointer to channel data.
+* \return int.
+*/
+static int power_down_qam(struct drx_demod_instance *demod, bool primary)
+{
+ struct drxjscu_cmd cmd_scu = { /* command */ 0,
+ /* parameter_len */ 0,
+ /* result_len */ 0,
+ /* *parameter */ NULL,
+ /* *result */ NULL
+ };
+ int rc;
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ struct drx_cfg_mpeg_output cfg_mpeg_output;
+ struct drx_common_attr *common_attr = demod->my_common_attr;
+ u16 cmd_result = 0;
+
+ /*
+ STOP demodulator
+ resets IQM, QAM and FEC HW blocks
+ */
+ /* stop all comm_exec */
+ rc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_COMM_EXEC__A, QAM_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_QAM |
+ SCU_RAM_COMMAND_CMD_DEMOD_STOP;
+ cmd_scu.parameter_len = 0;
+ cmd_scu.result_len = 1;
+ cmd_scu.parameter = NULL;
+ cmd_scu.result = &cmd_result;
+ rc = scu_command(dev_addr, &cmd_scu);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ if (primary) {
+ rc = drxj_dap_write_reg16(dev_addr, IQM_COMM_EXEC__A, IQM_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = set_iqm_af(demod, false);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ } else {
+ rc = drxj_dap_write_reg16(dev_addr, IQM_FS_COMM_EXEC__A, IQM_FS_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_FD_COMM_EXEC__A, IQM_FD_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RC_COMM_EXEC__A, IQM_RC_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RT_COMM_EXEC__A, IQM_RT_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_COMM_EXEC__A, IQM_CF_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ memcpy(&cfg_mpeg_output, &common_attr->mpeg_cfg, sizeof(cfg_mpeg_output));
+ cfg_mpeg_output.enable_mpeg_output = false;
+
+ rc = ctrl_set_cfg_mpeg_output(demod, &cfg_mpeg_output);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+
+/**
+* \fn int set_qam_measurement ()
+* \brief Setup of the QAM Measuremnt intervals for signal quality
+* \param demod instance of demod.
+* \param constellation current constellation.
+* \return int.
+*
+* NOTE:
+* Take into account that for certain settings the errorcounters can overflow.
+* The implementation does not check this.
+*
+* TODO: overriding the ext_attr->fec_bits_desired by constellation dependent
+* constants to get a measurement period of approx. 1 sec. Remove fec_bits_desired
+* field ?
+*
+*/
+#ifndef DRXJ_VSB_ONLY
+static int
+set_qam_measurement(struct drx_demod_instance *demod,
+ enum drx_modulation constellation, u32 symbol_rate)
+{
+ struct i2c_device_addr *dev_addr = NULL; /* device address for I2C writes */
+ struct drxj_data *ext_attr = NULL; /* Global data container for DRXJ specif data */
+ int rc;
+ u32 fec_bits_desired = 0; /* BER accounting period */
+ u16 fec_rs_plen = 0; /* defines RS BER measurement period */
+ u16 fec_rs_prescale = 0; /* ReedSolomon Measurement Prescale */
+ u32 fec_rs_period = 0; /* Value for corresponding I2C register */
+ u32 fec_rs_bit_cnt = 0; /* Actual precise amount of bits */
+ u32 fec_oc_snc_fail_period = 0; /* Value for corresponding I2C register */
+ u32 qam_vd_period = 0; /* Value for corresponding I2C register */
+ u32 qam_vd_bit_cnt = 0; /* Actual precise amount of bits */
+ u16 fec_vd_plen = 0; /* no of trellis symbols: VD SER measur period */
+ u16 qam_vd_prescale = 0; /* Viterbi Measurement Prescale */
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ fec_bits_desired = ext_attr->fec_bits_desired;
+ fec_rs_prescale = ext_attr->fec_rs_prescale;
+
+ switch (constellation) {
+ case DRX_CONSTELLATION_QAM16:
+ fec_bits_desired = 4 * symbol_rate;
+ break;
+ case DRX_CONSTELLATION_QAM32:
+ fec_bits_desired = 5 * symbol_rate;
+ break;
+ case DRX_CONSTELLATION_QAM64:
+ fec_bits_desired = 6 * symbol_rate;
+ break;
+ case DRX_CONSTELLATION_QAM128:
+ fec_bits_desired = 7 * symbol_rate;
+ break;
+ case DRX_CONSTELLATION_QAM256:
+ fec_bits_desired = 8 * symbol_rate;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Parameters for Reed-Solomon Decoder */
+ /* fecrs_period = (int)ceil(FEC_BITS_DESIRED/(fecrs_prescale*plen)) */
+ /* rs_bit_cnt = fecrs_period*fecrs_prescale*plen */
+ /* result is within 32 bit arithmetic -> */
+ /* no need for mult or frac functions */
+
+ /* TODO: use constant instead of calculation and remove the fec_rs_plen in ext_attr */
+ switch (ext_attr->standard) {
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_C:
+ fec_rs_plen = 204 * 8;
+ break;
+ case DRX_STANDARD_ITU_B:
+ fec_rs_plen = 128 * 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ext_attr->fec_rs_plen = fec_rs_plen; /* for getSigQual */
+ fec_rs_bit_cnt = fec_rs_prescale * fec_rs_plen; /* temp storage */
+ if (fec_rs_bit_cnt == 0) {
+ pr_err("error: fec_rs_bit_cnt is zero!\n");
+ return -EIO;
+ }
+ fec_rs_period = fec_bits_desired / fec_rs_bit_cnt + 1; /* ceil */
+ if (ext_attr->standard != DRX_STANDARD_ITU_B)
+ fec_oc_snc_fail_period = fec_rs_period;
+
+ /* limit to max 16 bit value (I2C register width) if needed */
+ if (fec_rs_period > 0xFFFF)
+ fec_rs_period = 0xFFFF;
+
+ /* write corresponding registers */
+ switch (ext_attr->standard) {
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_C:
+ break;
+ case DRX_STANDARD_ITU_B:
+ switch (constellation) {
+ case DRX_CONSTELLATION_QAM64:
+ fec_rs_period = 31581;
+ fec_oc_snc_fail_period = 17932;
+ break;
+ case DRX_CONSTELLATION_QAM256:
+ fec_rs_period = 45446;
+ fec_oc_snc_fail_period = 25805;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_FAIL_PERIOD__A, (u16)fec_oc_snc_fail_period, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_RS_MEASUREMENT_PERIOD__A, (u16)fec_rs_period, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_RS_MEASUREMENT_PRESCALE__A, fec_rs_prescale, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ ext_attr->fec_rs_period = (u16) fec_rs_period;
+ ext_attr->fec_rs_prescale = fec_rs_prescale;
+ rc = drxdap_fasi_write_reg32(dev_addr, SCU_RAM_FEC_ACCUM_CW_CORRECTED_LO__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_FEC_MEAS_COUNT__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ if (ext_attr->standard == DRX_STANDARD_ITU_B) {
+ /* Parameters for Viterbi Decoder */
+ /* qamvd_period = (int)ceil(FEC_BITS_DESIRED/ */
+ /* (qamvd_prescale*plen*(qam_constellation+1))) */
+ /* vd_bit_cnt = qamvd_period*qamvd_prescale*plen */
+ /* result is within 32 bit arithmetic -> */
+ /* no need for mult or frac functions */
+
+ /* a(8 bit) * b(8 bit) = 16 bit result => mult32 not needed */
+ fec_vd_plen = ext_attr->fec_vd_plen;
+ qam_vd_prescale = ext_attr->qam_vd_prescale;
+ qam_vd_bit_cnt = qam_vd_prescale * fec_vd_plen; /* temp storage */
+
+ switch (constellation) {
+ case DRX_CONSTELLATION_QAM64:
+ /* a(16 bit) * b(4 bit) = 20 bit result => mult32 not needed */
+ qam_vd_period =
+ qam_vd_bit_cnt * (QAM_TOP_CONSTELLATION_QAM64 + 1)
+ * (QAM_TOP_CONSTELLATION_QAM64 + 1);
+ break;
+ case DRX_CONSTELLATION_QAM256:
+ /* a(16 bit) * b(5 bit) = 21 bit result => mult32 not needed */
+ qam_vd_period =
+ qam_vd_bit_cnt * (QAM_TOP_CONSTELLATION_QAM256 + 1)
+ * (QAM_TOP_CONSTELLATION_QAM256 + 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (qam_vd_period == 0) {
+ pr_err("error: qam_vd_period is zero!\n");
+ return -EIO;
+ }
+ qam_vd_period = fec_bits_desired / qam_vd_period;
+ /* limit to max 16 bit value (I2C register width) if needed */
+ if (qam_vd_period > 0xFFFF)
+ qam_vd_period = 0xFFFF;
+
+ /* a(16 bit) * b(16 bit) = 32 bit result => mult32 not needed */
+ qam_vd_bit_cnt *= qam_vd_period;
+
+ rc = drxj_dap_write_reg16(dev_addr, QAM_VD_MEASUREMENT_PERIOD__A, (u16)qam_vd_period, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_VD_MEASUREMENT_PRESCALE__A, qam_vd_prescale, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ ext_attr->qam_vd_period = (u16) qam_vd_period;
+ ext_attr->qam_vd_prescale = qam_vd_prescale;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+
+/**
+* \fn int set_qam16 ()
+* \brief QAM16 specific setup
+* \param demod instance of demod.
+* \return int.
+*/
+static int set_qam16(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ int rc;
+ const u8 qam_dq_qual_fun[] = {
+ DRXJ_16TO8(2), /* fun0 */
+ DRXJ_16TO8(2), /* fun1 */
+ DRXJ_16TO8(2), /* fun2 */
+ DRXJ_16TO8(2), /* fun3 */
+ DRXJ_16TO8(3), /* fun4 */
+ DRXJ_16TO8(3), /* fun5 */
+ };
+ const u8 qam_eq_cma_rad[] = {
+ DRXJ_16TO8(13517), /* RAD0 */
+ DRXJ_16TO8(13517), /* RAD1 */
+ DRXJ_16TO8(13517), /* RAD2 */
+ DRXJ_16TO8(13517), /* RAD3 */
+ DRXJ_16TO8(13517), /* RAD4 */
+ DRXJ_16TO8(13517), /* RAD5 */
+ };
+
+ rc = drxdap_fasi_write_block(dev_addr, QAM_DQ_QUAL_FUN0__A, sizeof(qam_dq_qual_fun), ((u8 *)qam_dq_qual_fun), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxdap_fasi_write_block(dev_addr, SCU_RAM_QAM_EQ_CMA_RAD0__A, sizeof(qam_eq_cma_rad), ((u8 *)qam_eq_cma_rad), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RTH__A, 140, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FTH__A, 50, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_PTH__A, 120, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_QTH__A, 230, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_CTH__A, 95, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MTH__A, 105, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RATE_LIM__A, 40, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FREQ_LIM__A, 56, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_COUNT_LIM__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, 220, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, 25, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, 6, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16)(-24), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16)(-65), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16)(-127), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_FINE__A, 15, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_COARSE__A, 40, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_FINE__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_MEDIUM__A, 20, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_COARSE__A, 255, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_FINE__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_MEDIUM__A, 10, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_COARSE__A, 50, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_FINE__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_COARSE__A, 24, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_FINE__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_COARSE__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_FINE__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_MEDIUM__A, 32, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_COARSE__A, 240, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_FINE__A, 5, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 15, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_COARSE__A, 32, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_SL_SIG_POWER__A, 40960, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+
+/**
+* \fn int set_qam32 ()
+* \brief QAM32 specific setup
+* \param demod instance of demod.
+* \return int.
+*/
+static int set_qam32(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ int rc;
+ const u8 qam_dq_qual_fun[] = {
+ DRXJ_16TO8(3), /* fun0 */
+ DRXJ_16TO8(3), /* fun1 */
+ DRXJ_16TO8(3), /* fun2 */
+ DRXJ_16TO8(3), /* fun3 */
+ DRXJ_16TO8(4), /* fun4 */
+ DRXJ_16TO8(4), /* fun5 */
+ };
+ const u8 qam_eq_cma_rad[] = {
+ DRXJ_16TO8(6707), /* RAD0 */
+ DRXJ_16TO8(6707), /* RAD1 */
+ DRXJ_16TO8(6707), /* RAD2 */
+ DRXJ_16TO8(6707), /* RAD3 */
+ DRXJ_16TO8(6707), /* RAD4 */
+ DRXJ_16TO8(6707), /* RAD5 */
+ };
+
+ rc = drxdap_fasi_write_block(dev_addr, QAM_DQ_QUAL_FUN0__A, sizeof(qam_dq_qual_fun), ((u8 *)qam_dq_qual_fun), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxdap_fasi_write_block(dev_addr, SCU_RAM_QAM_EQ_CMA_RAD0__A, sizeof(qam_eq_cma_rad), ((u8 *)qam_eq_cma_rad), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RTH__A, 90, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FTH__A, 50, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_PTH__A, 100, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_QTH__A, 170, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_CTH__A, 80, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MTH__A, 100, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RATE_LIM__A, 40, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FREQ_LIM__A, 56, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_COUNT_LIM__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, 140, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16)(-8), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16)(-16), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16)(-26), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16)(-56), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16)(-86), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_FINE__A, 15, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_COARSE__A, 40, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_FINE__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_MEDIUM__A, 20, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_COARSE__A, 255, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_FINE__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_MEDIUM__A, 10, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_COARSE__A, 50, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_FINE__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_COARSE__A, 24, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_FINE__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_COARSE__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_FINE__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_MEDIUM__A, 32, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_COARSE__A, 176, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_FINE__A, 5, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 15, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_COARSE__A, 8, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_SL_SIG_POWER__A, 20480, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+
+/**
+* \fn int set_qam64 ()
+* \brief QAM64 specific setup
+* \param demod instance of demod.
+* \return int.
+*/
+static int set_qam64(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ int rc;
+ const u8 qam_dq_qual_fun[] = { /* this is hw reset value. no necessary to re-write */
+ DRXJ_16TO8(4), /* fun0 */
+ DRXJ_16TO8(4), /* fun1 */
+ DRXJ_16TO8(4), /* fun2 */
+ DRXJ_16TO8(4), /* fun3 */
+ DRXJ_16TO8(6), /* fun4 */
+ DRXJ_16TO8(6), /* fun5 */
+ };
+ const u8 qam_eq_cma_rad[] = {
+ DRXJ_16TO8(13336), /* RAD0 */
+ DRXJ_16TO8(12618), /* RAD1 */
+ DRXJ_16TO8(11988), /* RAD2 */
+ DRXJ_16TO8(13809), /* RAD3 */
+ DRXJ_16TO8(13809), /* RAD4 */
+ DRXJ_16TO8(15609), /* RAD5 */
+ };
+
+ rc = drxdap_fasi_write_block(dev_addr, QAM_DQ_QUAL_FUN0__A, sizeof(qam_dq_qual_fun), ((u8 *)qam_dq_qual_fun), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxdap_fasi_write_block(dev_addr, SCU_RAM_QAM_EQ_CMA_RAD0__A, sizeof(qam_eq_cma_rad), ((u8 *)qam_eq_cma_rad), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RTH__A, 105, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FTH__A, 60, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_PTH__A, 100, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_QTH__A, 195, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_CTH__A, 80, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MTH__A, 84, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RATE_LIM__A, 40, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FREQ_LIM__A, 32, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_COUNT_LIM__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, 141, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, 7, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16)(-15), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16)(-45), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16)(-80), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_FINE__A, 15, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_COARSE__A, 40, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_FINE__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_MEDIUM__A, 30, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_COARSE__A, 255, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_FINE__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_MEDIUM__A, 15, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_COARSE__A, 80, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_FINE__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_COARSE__A, 24, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_FINE__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_COARSE__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_FINE__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_MEDIUM__A, 48, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_COARSE__A, 160, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_FINE__A, 5, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 15, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_COARSE__A, 32, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_SL_SIG_POWER__A, 43008, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+
+/**
+* \fn int set_qam128 ()
+* \brief QAM128 specific setup
+* \param demod: instance of demod.
+* \return int.
+*/
+static int set_qam128(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ int rc;
+ const u8 qam_dq_qual_fun[] = {
+ DRXJ_16TO8(6), /* fun0 */
+ DRXJ_16TO8(6), /* fun1 */
+ DRXJ_16TO8(6), /* fun2 */
+ DRXJ_16TO8(6), /* fun3 */
+ DRXJ_16TO8(9), /* fun4 */
+ DRXJ_16TO8(9), /* fun5 */
+ };
+ const u8 qam_eq_cma_rad[] = {
+ DRXJ_16TO8(6164), /* RAD0 */
+ DRXJ_16TO8(6598), /* RAD1 */
+ DRXJ_16TO8(6394), /* RAD2 */
+ DRXJ_16TO8(6409), /* RAD3 */
+ DRXJ_16TO8(6656), /* RAD4 */
+ DRXJ_16TO8(7238), /* RAD5 */
+ };
+
+ rc = drxdap_fasi_write_block(dev_addr, QAM_DQ_QUAL_FUN0__A, sizeof(qam_dq_qual_fun), ((u8 *)qam_dq_qual_fun), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxdap_fasi_write_block(dev_addr, SCU_RAM_QAM_EQ_CMA_RAD0__A, sizeof(qam_eq_cma_rad), ((u8 *)qam_eq_cma_rad), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RTH__A, 50, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FTH__A, 60, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_PTH__A, 100, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_QTH__A, 140, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_CTH__A, 80, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MTH__A, 100, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RATE_LIM__A, 40, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FREQ_LIM__A, 32, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_COUNT_LIM__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, 8, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, 65, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, 5, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16)(-1), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16)(-23), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_FINE__A, 15, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_COARSE__A, 40, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_FINE__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_MEDIUM__A, 40, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_COARSE__A, 255, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_FINE__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_MEDIUM__A, 20, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_COARSE__A, 80, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_FINE__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_COARSE__A, 24, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_FINE__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_COARSE__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_FINE__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_MEDIUM__A, 32, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_COARSE__A, 144, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_FINE__A, 5, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 15, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_COARSE__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_SL_SIG_POWER__A, 20992, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+
+/**
+* \fn int set_qam256 ()
+* \brief QAM256 specific setup
+* \param demod: instance of demod.
+* \return int.
+*/
+static int set_qam256(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ int rc;
+ const u8 qam_dq_qual_fun[] = {
+ DRXJ_16TO8(8), /* fun0 */
+ DRXJ_16TO8(8), /* fun1 */
+ DRXJ_16TO8(8), /* fun2 */
+ DRXJ_16TO8(8), /* fun3 */
+ DRXJ_16TO8(12), /* fun4 */
+ DRXJ_16TO8(12), /* fun5 */
+ };
+ const u8 qam_eq_cma_rad[] = {
+ DRXJ_16TO8(12345), /* RAD0 */
+ DRXJ_16TO8(12345), /* RAD1 */
+ DRXJ_16TO8(13626), /* RAD2 */
+ DRXJ_16TO8(12931), /* RAD3 */
+ DRXJ_16TO8(14719), /* RAD4 */
+ DRXJ_16TO8(15356), /* RAD5 */
+ };
+
+ rc = drxdap_fasi_write_block(dev_addr, QAM_DQ_QUAL_FUN0__A, sizeof(qam_dq_qual_fun), ((u8 *)qam_dq_qual_fun), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxdap_fasi_write_block(dev_addr, SCU_RAM_QAM_EQ_CMA_RAD0__A, sizeof(qam_eq_cma_rad), ((u8 *)qam_eq_cma_rad), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RTH__A, 50, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FTH__A, 60, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_PTH__A, 100, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_QTH__A, 150, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_CTH__A, 80, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MTH__A, 110, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RATE_LIM__A, 40, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FREQ_LIM__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_COUNT_LIM__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, 8, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, 74, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, 18, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, 13, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, 7, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16)(-8), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_FINE__A, 15, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_COARSE__A, 40, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_FINE__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_MEDIUM__A, 50, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_COARSE__A, 255, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_FINE__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_MEDIUM__A, 25, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_COARSE__A, 80, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_FINE__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_COARSE__A, 24, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_FINE__A, 12, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_COARSE__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_FINE__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_MEDIUM__A, 48, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_COARSE__A, 80, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_FINE__A, 5, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 15, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_COARSE__A, 16, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_SL_SIG_POWER__A, 43520, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+#define QAM_SET_OP_ALL 0x1
+#define QAM_SET_OP_CONSTELLATION 0x2
+#define QAM_SET_OP_SPECTRUM 0X4
+
+/**
+* \fn int set_qam ()
+* \brief Set QAM demod.
+* \param demod: instance of demod.
+* \param channel: pointer to channel data.
+* \return int.
+*/
+static int
+set_qam(struct drx_demod_instance *demod,
+ struct drx_channel *channel, s32 tuner_freq_offset, u32 op)
+{
+ struct i2c_device_addr *dev_addr = NULL;
+ struct drxj_data *ext_attr = NULL;
+ struct drx_common_attr *common_attr = NULL;
+ int rc;
+ u32 adc_frequency = 0;
+ u32 iqm_rc_rate = 0;
+ u16 cmd_result = 0;
+ u16 lc_symbol_freq = 0;
+ u16 iqm_rc_stretch = 0;
+ u16 set_env_parameters = 0;
+ u16 set_param_parameters[2] = { 0 };
+ struct drxjscu_cmd cmd_scu = { /* command */ 0,
+ /* parameter_len */ 0,
+ /* result_len */ 0,
+ /* parameter */ NULL,
+ /* result */ NULL
+ };
+ const u8 qam_a_taps[] = {
+ DRXJ_16TO8(-1), /* re0 */
+ DRXJ_16TO8(1), /* re1 */
+ DRXJ_16TO8(1), /* re2 */
+ DRXJ_16TO8(-1), /* re3 */
+ DRXJ_16TO8(-1), /* re4 */
+ DRXJ_16TO8(2), /* re5 */
+ DRXJ_16TO8(1), /* re6 */
+ DRXJ_16TO8(-2), /* re7 */
+ DRXJ_16TO8(0), /* re8 */
+ DRXJ_16TO8(3), /* re9 */
+ DRXJ_16TO8(-1), /* re10 */
+ DRXJ_16TO8(-3), /* re11 */
+ DRXJ_16TO8(4), /* re12 */
+ DRXJ_16TO8(1), /* re13 */
+ DRXJ_16TO8(-8), /* re14 */
+ DRXJ_16TO8(4), /* re15 */
+ DRXJ_16TO8(13), /* re16 */
+ DRXJ_16TO8(-13), /* re17 */
+ DRXJ_16TO8(-19), /* re18 */
+ DRXJ_16TO8(28), /* re19 */
+ DRXJ_16TO8(25), /* re20 */
+ DRXJ_16TO8(-53), /* re21 */
+ DRXJ_16TO8(-31), /* re22 */
+ DRXJ_16TO8(96), /* re23 */
+ DRXJ_16TO8(37), /* re24 */
+ DRXJ_16TO8(-190), /* re25 */
+ DRXJ_16TO8(-40), /* re26 */
+ DRXJ_16TO8(619) /* re27 */
+ };
+ const u8 qam_b64_taps[] = {
+ DRXJ_16TO8(0), /* re0 */
+ DRXJ_16TO8(-2), /* re1 */
+ DRXJ_16TO8(1), /* re2 */
+ DRXJ_16TO8(2), /* re3 */
+ DRXJ_16TO8(-2), /* re4 */
+ DRXJ_16TO8(0), /* re5 */
+ DRXJ_16TO8(4), /* re6 */
+ DRXJ_16TO8(-2), /* re7 */
+ DRXJ_16TO8(-4), /* re8 */
+ DRXJ_16TO8(4), /* re9 */
+ DRXJ_16TO8(3), /* re10 */
+ DRXJ_16TO8(-6), /* re11 */
+ DRXJ_16TO8(0), /* re12 */
+ DRXJ_16TO8(6), /* re13 */
+ DRXJ_16TO8(-5), /* re14 */
+ DRXJ_16TO8(-3), /* re15 */
+ DRXJ_16TO8(11), /* re16 */
+ DRXJ_16TO8(-4), /* re17 */
+ DRXJ_16TO8(-19), /* re18 */
+ DRXJ_16TO8(19), /* re19 */
+ DRXJ_16TO8(28), /* re20 */
+ DRXJ_16TO8(-45), /* re21 */
+ DRXJ_16TO8(-36), /* re22 */
+ DRXJ_16TO8(90), /* re23 */
+ DRXJ_16TO8(42), /* re24 */
+ DRXJ_16TO8(-185), /* re25 */
+ DRXJ_16TO8(-46), /* re26 */
+ DRXJ_16TO8(614) /* re27 */
+ };
+ const u8 qam_b256_taps[] = {
+ DRXJ_16TO8(-2), /* re0 */
+ DRXJ_16TO8(4), /* re1 */
+ DRXJ_16TO8(1), /* re2 */
+ DRXJ_16TO8(-4), /* re3 */
+ DRXJ_16TO8(0), /* re4 */
+ DRXJ_16TO8(4), /* re5 */
+ DRXJ_16TO8(-2), /* re6 */
+ DRXJ_16TO8(-4), /* re7 */
+ DRXJ_16TO8(5), /* re8 */
+ DRXJ_16TO8(2), /* re9 */
+ DRXJ_16TO8(-8), /* re10 */
+ DRXJ_16TO8(2), /* re11 */
+ DRXJ_16TO8(11), /* re12 */
+ DRXJ_16TO8(-8), /* re13 */
+ DRXJ_16TO8(-15), /* re14 */
+ DRXJ_16TO8(16), /* re15 */
+ DRXJ_16TO8(19), /* re16 */
+ DRXJ_16TO8(-27), /* re17 */
+ DRXJ_16TO8(-22), /* re18 */
+ DRXJ_16TO8(44), /* re19 */
+ DRXJ_16TO8(26), /* re20 */
+ DRXJ_16TO8(-69), /* re21 */
+ DRXJ_16TO8(-28), /* re22 */
+ DRXJ_16TO8(110), /* re23 */
+ DRXJ_16TO8(31), /* re24 */
+ DRXJ_16TO8(-201), /* re25 */
+ DRXJ_16TO8(-32), /* re26 */
+ DRXJ_16TO8(628) /* re27 */
+ };
+ const u8 qam_c_taps[] = {
+ DRXJ_16TO8(-3), /* re0 */
+ DRXJ_16TO8(3), /* re1 */
+ DRXJ_16TO8(2), /* re2 */
+ DRXJ_16TO8(-4), /* re3 */
+ DRXJ_16TO8(0), /* re4 */
+ DRXJ_16TO8(4), /* re5 */
+ DRXJ_16TO8(-1), /* re6 */
+ DRXJ_16TO8(-4), /* re7 */
+ DRXJ_16TO8(3), /* re8 */
+ DRXJ_16TO8(3), /* re9 */
+ DRXJ_16TO8(-5), /* re10 */
+ DRXJ_16TO8(0), /* re11 */
+ DRXJ_16TO8(9), /* re12 */
+ DRXJ_16TO8(-4), /* re13 */
+ DRXJ_16TO8(-12), /* re14 */
+ DRXJ_16TO8(10), /* re15 */
+ DRXJ_16TO8(16), /* re16 */
+ DRXJ_16TO8(-21), /* re17 */
+ DRXJ_16TO8(-20), /* re18 */
+ DRXJ_16TO8(37), /* re19 */
+ DRXJ_16TO8(25), /* re20 */
+ DRXJ_16TO8(-62), /* re21 */
+ DRXJ_16TO8(-28), /* re22 */
+ DRXJ_16TO8(105), /* re23 */
+ DRXJ_16TO8(31), /* re24 */
+ DRXJ_16TO8(-197), /* re25 */
+ DRXJ_16TO8(-33), /* re26 */
+ DRXJ_16TO8(626) /* re27 */
+ };
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+ common_attr = (struct drx_common_attr *) demod->my_common_attr;
+
+ if ((op & QAM_SET_OP_ALL) || (op & QAM_SET_OP_CONSTELLATION)) {
+ if (ext_attr->standard == DRX_STANDARD_ITU_B) {
+ switch (channel->constellation) {
+ case DRX_CONSTELLATION_QAM256:
+ iqm_rc_rate = 0x00AE3562;
+ lc_symbol_freq =
+ QAM_LC_SYMBOL_FREQ_FREQ_QAM_B_256;
+ channel->symbolrate = 5360537;
+ iqm_rc_stretch = IQM_RC_STRETCH_QAM_B_256;
+ break;
+ case DRX_CONSTELLATION_QAM64:
+ iqm_rc_rate = 0x00C05A0E;
+ lc_symbol_freq = 409;
+ channel->symbolrate = 5056941;
+ iqm_rc_stretch = IQM_RC_STRETCH_QAM_B_64;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ adc_frequency = (common_attr->sys_clock_freq * 1000) / 3;
+ if (channel->symbolrate == 0) {
+ pr_err("error: channel symbolrate is zero!\n");
+ return -EIO;
+ }
+ iqm_rc_rate =
+ (adc_frequency / channel->symbolrate) * (1 << 21) +
+ (frac28
+ ((adc_frequency % channel->symbolrate),
+ channel->symbolrate) >> 7) - (1 << 23);
+ lc_symbol_freq =
+ (u16) (frac28
+ (channel->symbolrate +
+ (adc_frequency >> 13),
+ adc_frequency) >> 16);
+ if (lc_symbol_freq > 511)
+ lc_symbol_freq = 511;
+
+ iqm_rc_stretch = 21;
+ }
+
+ if (ext_attr->standard == DRX_STANDARD_ITU_A) {
+ set_env_parameters = QAM_TOP_ANNEX_A; /* annex */
+ set_param_parameters[0] = channel->constellation; /* constellation */
+ set_param_parameters[1] = DRX_INTERLEAVEMODE_I12_J17; /* interleave mode */
+ } else if (ext_attr->standard == DRX_STANDARD_ITU_B) {
+ set_env_parameters = QAM_TOP_ANNEX_B; /* annex */
+ set_param_parameters[0] = channel->constellation; /* constellation */
+ set_param_parameters[1] = channel->interleavemode; /* interleave mode */
+ } else if (ext_attr->standard == DRX_STANDARD_ITU_C) {
+ set_env_parameters = QAM_TOP_ANNEX_C; /* annex */
+ set_param_parameters[0] = channel->constellation; /* constellation */
+ set_param_parameters[1] = DRX_INTERLEAVEMODE_I12_J17; /* interleave mode */
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ if (op & QAM_SET_OP_ALL) {
+ /*
+ STEP 1: reset demodulator
+ resets IQM, QAM and FEC HW blocks
+ resets SCU variables
+ */
+ /* stop all comm_exec */
+ rc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_COMM_EXEC__A, QAM_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_FS_COMM_EXEC__A, IQM_FS_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_FD_COMM_EXEC__A, IQM_FD_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RC_COMM_EXEC__A, IQM_RC_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RT_COMM_EXEC__A, IQM_RT_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_COMM_EXEC__A, IQM_CF_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_QAM |
+ SCU_RAM_COMMAND_CMD_DEMOD_RESET;
+ cmd_scu.parameter_len = 0;
+ cmd_scu.result_len = 1;
+ cmd_scu.parameter = NULL;
+ cmd_scu.result = &cmd_result;
+ rc = scu_command(dev_addr, &cmd_scu);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ if ((op & QAM_SET_OP_ALL) || (op & QAM_SET_OP_CONSTELLATION)) {
+ /*
+ STEP 2: configure demodulator
+ -set env
+ -set params (resets IQM,QAM,FEC HW; initializes some SCU variables )
+ */
+ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_QAM |
+ SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV;
+ cmd_scu.parameter_len = 1;
+ cmd_scu.result_len = 1;
+ cmd_scu.parameter = &set_env_parameters;
+ cmd_scu.result = &cmd_result;
+ rc = scu_command(dev_addr, &cmd_scu);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_QAM |
+ SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM;
+ cmd_scu.parameter_len = 2;
+ cmd_scu.result_len = 1;
+ cmd_scu.parameter = set_param_parameters;
+ cmd_scu.result = &cmd_result;
+ rc = scu_command(dev_addr, &cmd_scu);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* set symbol rate */
+ rc = drxdap_fasi_write_reg32(dev_addr, IQM_RC_RATE_OFS_LO__A, iqm_rc_rate, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ ext_attr->iqm_rc_rate_ofs = iqm_rc_rate;
+ rc = set_qam_measurement(demod, channel->constellation, channel->symbolrate);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+ /* STEP 3: enable the system in a mode where the ADC provides valid signal
+ setup constellation independent registers */
+ /* from qam_cmd.py script (qam_driver_b) */
+ /* TODO: remove re-writes of HW reset values */
+ if ((op & QAM_SET_OP_ALL) || (op & QAM_SET_OP_SPECTRUM)) {
+ rc = set_frequency(demod, channel, tuner_freq_offset);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ if ((op & QAM_SET_OP_ALL) || (op & QAM_SET_OP_CONSTELLATION)) {
+
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_SYMBOL_FREQ__A, lc_symbol_freq, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RC_STRETCH__A, iqm_rc_stretch, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ if (op & QAM_SET_OP_ALL) {
+ if (!ext_attr->has_lna) {
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_AMUX__A, 0x02, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_SYMMETRIC__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_MIDTAP__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_OUT_ENA__A, IQM_CF_OUT_ENA_QAM__M, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_WR_RSV_0__A, 0x5f, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* scu temporary shut down agc */
+
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_SYNC_SEL__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_CLP_LEN__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_CLP_TH__A, 448, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_SNS_LEN__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_PDREF__A, 4, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, 0x10, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_PGA_GAIN__A, 11, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_POW_MEAS_LEN__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_SCALE_SH__A, IQM_CF_SCALE_SH__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /*! reset default val ! */
+
+ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_TIMEOUT__A, QAM_SY_TIMEOUT__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /*! reset default val ! */
+ if (ext_attr->standard == DRX_STANDARD_ITU_B) {
+ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_LWM__A, QAM_SY_SYNC_LWM__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /*! reset default val ! */
+ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_AWM__A, QAM_SY_SYNC_AWM__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /*! reset default val ! */
+ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_HWM__A, QAM_SY_SYNC_HWM__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /*! reset default val ! */
+ } else {
+ switch (channel->constellation) {
+ case DRX_CONSTELLATION_QAM16:
+ case DRX_CONSTELLATION_QAM64:
+ case DRX_CONSTELLATION_QAM256:
+ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_LWM__A, 0x03, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_AWM__A, 0x04, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_HWM__A, QAM_SY_SYNC_HWM__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /*! reset default val ! */
+ break;
+ case DRX_CONSTELLATION_QAM32:
+ case DRX_CONSTELLATION_QAM128:
+ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_LWM__A, 0x03, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_AWM__A, 0x05, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_HWM__A, 0x06, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ default:
+ return -EIO;
+ } /* switch */
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_MODE__A, QAM_LC_MODE__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /*! reset default val ! */
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_RATE_LIMIT__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_LPF_FACTORP__A, 4, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_LPF_FACTORI__A, 4, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_MODE__A, 7, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB0__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB1__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB2__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB3__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB4__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB5__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB6__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB8__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB9__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB10__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB12__A, 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB15__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB16__A, 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB20__A, 4, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB25__A, 4, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, IQM_FS_ADJ_SEL__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RC_ADJ_SEL__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_ADJ_SEL__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_POW_MEAS_LEN__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_GPIO__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* No more resets of the IQM, current standard correctly set =>
+ now AGCs can be configured. */
+ /* turn on IQMAF. It has to be in front of setAgc**() */
+ rc = set_iqm_af(demod, true);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = adc_synchronization(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = init_agc(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = set_agc_if(demod, &(ext_attr->qam_if_agc_cfg), false);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = set_agc_rf(demod, &(ext_attr->qam_rf_agc_cfg), false);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ {
+ /* TODO fix this, store a struct drxj_cfg_afe_gain structure in struct drxj_data instead
+ of only the gain */
+ struct drxj_cfg_afe_gain qam_pga_cfg = { DRX_STANDARD_ITU_B, 0 };
+
+ qam_pga_cfg.gain = ext_attr->qam_pga_cfg;
+ rc = ctrl_set_cfg_afe_gain(demod, &qam_pga_cfg);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+ rc = ctrl_set_cfg_pre_saw(demod, &(ext_attr->qam_pre_saw_cfg));
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ if ((op & QAM_SET_OP_ALL) || (op & QAM_SET_OP_CONSTELLATION)) {
+ if (ext_attr->standard == DRX_STANDARD_ITU_A) {
+ rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_RE0__A, sizeof(qam_a_taps), ((u8 *)qam_a_taps), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_IM0__A, sizeof(qam_a_taps), ((u8 *)qam_a_taps), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ } else if (ext_attr->standard == DRX_STANDARD_ITU_B) {
+ switch (channel->constellation) {
+ case DRX_CONSTELLATION_QAM64:
+ rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_RE0__A, sizeof(qam_b64_taps), ((u8 *)qam_b64_taps), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_IM0__A, sizeof(qam_b64_taps), ((u8 *)qam_b64_taps), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_CONSTELLATION_QAM256:
+ rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_RE0__A, sizeof(qam_b256_taps), ((u8 *)qam_b256_taps), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_IM0__A, sizeof(qam_b256_taps), ((u8 *)qam_b256_taps), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ default:
+ return -EIO;
+ }
+ } else if (ext_attr->standard == DRX_STANDARD_ITU_C) {
+ rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_RE0__A, sizeof(qam_c_taps), ((u8 *)qam_c_taps), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_IM0__A, sizeof(qam_c_taps), ((u8 *)qam_c_taps), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ /* SETP 4: constellation specific setup */
+ switch (channel->constellation) {
+ case DRX_CONSTELLATION_QAM16:
+ rc = set_qam16(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_CONSTELLATION_QAM32:
+ rc = set_qam32(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_CONSTELLATION_QAM64:
+ rc = set_qam64(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_CONSTELLATION_QAM128:
+ rc = set_qam128(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_CONSTELLATION_QAM256:
+ rc = set_qam256(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ default:
+ return -EIO;
+ } /* switch */
+ }
+
+ if ((op & QAM_SET_OP_ALL)) {
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_SCALE_SH__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Mpeg output has to be in front of FEC active */
+ rc = set_mpegtei_handling(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = bit_reverse_mpeg_output(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = set_mpeg_start_width(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ {
+ /* TODO: move to set_standard after hardware reset value problem is solved */
+ /* Configure initial MPEG output */
+ struct drx_cfg_mpeg_output cfg_mpeg_output;
+
+ memcpy(&cfg_mpeg_output, &common_attr->mpeg_cfg, sizeof(cfg_mpeg_output));
+ cfg_mpeg_output.enable_mpeg_output = true;
+
+ rc = ctrl_set_cfg_mpeg_output(demod, &cfg_mpeg_output);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+ }
+
+ if ((op & QAM_SET_OP_ALL) || (op & QAM_SET_OP_CONSTELLATION)) {
+
+ /* STEP 5: start QAM demodulator (starts FEC, QAM and IQM HW) */
+ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_QAM |
+ SCU_RAM_COMMAND_CMD_DEMOD_START;
+ cmd_scu.parameter_len = 0;
+ cmd_scu.result_len = 1;
+ cmd_scu.parameter = NULL;
+ cmd_scu.result = &cmd_result;
+ rc = scu_command(dev_addr, &cmd_scu);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, IQM_COMM_EXEC__A, IQM_COMM_EXEC_ACTIVE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_COMM_EXEC__A, QAM_COMM_EXEC_ACTIVE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+static int ctrl_get_qam_sig_quality(struct drx_demod_instance *demod);
+
+static int qam_flip_spec(struct drx_demod_instance *demod, struct drx_channel *channel)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ struct drxj_data *ext_attr = demod->my_ext_attr;
+ int rc;
+ u32 iqm_fs_rate_ofs = 0;
+ u32 iqm_fs_rate_lo = 0;
+ u16 qam_ctl_ena = 0;
+ u16 data = 0;
+ u16 equ_mode = 0;
+ u16 fsm_state = 0;
+ int i = 0;
+ int ofsofs = 0;
+
+ /* Silence the controlling of lc, equ, and the acquisition state machine */
+ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_QAM_CTL_ENA__A, &qam_ctl_ena, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_CTL_ENA__A, qam_ctl_ena & ~(SCU_RAM_QAM_CTL_ENA_ACQ__M | SCU_RAM_QAM_CTL_ENA_EQU__M | SCU_RAM_QAM_CTL_ENA_LC__M), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* freeze the frequency control loop */
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_CF__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_CF1__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_atomic_read_reg32(dev_addr, IQM_FS_RATE_OFS_LO__A, &iqm_fs_rate_ofs, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_atomic_read_reg32(dev_addr, IQM_FS_RATE_LO__A, &iqm_fs_rate_lo, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ ofsofs = iqm_fs_rate_lo - iqm_fs_rate_ofs;
+ iqm_fs_rate_ofs = ~iqm_fs_rate_ofs + 1;
+ iqm_fs_rate_ofs -= 2 * ofsofs;
+
+ /* freeze dq/fq updating */
+ rc = drxj_dap_read_reg16(dev_addr, QAM_DQ_MODE__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ data = (data & 0xfff9);
+ rc = drxj_dap_write_reg16(dev_addr, QAM_DQ_MODE__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_FQ_MODE__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* lc_cp / _ci / _ca */
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_CI__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_EP__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_FQ_LA_FACTOR__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* flip the spec */
+ rc = drxdap_fasi_write_reg32(dev_addr, IQM_FS_RATE_OFS_LO__A, iqm_fs_rate_ofs, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ ext_attr->iqm_fs_rate_ofs = iqm_fs_rate_ofs;
+ ext_attr->pos_image = (ext_attr->pos_image) ? false : true;
+
+ /* freeze dq/fq updating */
+ rc = drxj_dap_read_reg16(dev_addr, QAM_DQ_MODE__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ equ_mode = data;
+ data = (data & 0xfff9);
+ rc = drxj_dap_write_reg16(dev_addr, QAM_DQ_MODE__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_FQ_MODE__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ for (i = 0; i < 28; i++) {
+ rc = drxj_dap_read_reg16(dev_addr, QAM_DQ_TAP_IM_EL0__A + (2 * i), &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_DQ_TAP_IM_EL0__A + (2 * i), -data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ for (i = 0; i < 24; i++) {
+ rc = drxj_dap_read_reg16(dev_addr, QAM_FQ_TAP_IM_EL0__A + (2 * i), &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_FQ_TAP_IM_EL0__A + (2 * i), -data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ data = equ_mode;
+ rc = drxj_dap_write_reg16(dev_addr, QAM_DQ_MODE__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, QAM_FQ_MODE__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_STATE_TGT__A, 4, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ i = 0;
+ while ((fsm_state != 4) && (i++ < 100)) {
+ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_QAM_FSM_STATE__A, &fsm_state, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_CTL_ENA__A, (qam_ctl_ena | 0x0016), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+
+}
+
+#define NO_LOCK 0x0
+#define DEMOD_LOCKED 0x1
+#define SYNC_FLIPPED 0x2
+#define SPEC_MIRRORED 0x4
+/**
+* \fn int qam64auto ()
+* \brief auto do sync pattern switching and mirroring.
+* \param demod: instance of demod.
+* \param channel: pointer to channel data.
+* \param tuner_freq_offset: tuner frequency offset.
+* \param lock_status: pointer to lock status.
+* \return int.
+*/
+static int
+qam64auto(struct drx_demod_instance *demod,
+ struct drx_channel *channel,
+ s32 tuner_freq_offset, enum drx_lock_status *lock_status)
+{
+ struct drxj_data *ext_attr = demod->my_ext_attr;
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ struct drx39xxj_state *state = dev_addr->user_data;
+ struct dtv_frontend_properties *p = &state->frontend.dtv_property_cache;
+ int rc;
+ u32 lck_state = NO_LOCK;
+ u32 start_time = 0;
+ u32 d_locked_time = 0;
+ u32 timeout_ofs = 0;
+ u16 data = 0;
+
+ /* external attributes for storing aquired channel constellation */
+ *lock_status = DRX_NOT_LOCKED;
+ start_time = jiffies_to_msecs(jiffies);
+ lck_state = NO_LOCK;
+ do {
+ rc = ctrl_lock_status(demod, lock_status);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ switch (lck_state) {
+ case NO_LOCK:
+ if (*lock_status == DRXJ_DEMOD_LOCK) {
+ rc = ctrl_get_qam_sig_quality(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (p->cnr.stat[0].svalue > 20800) {
+ lck_state = DEMOD_LOCKED;
+ /* some delay to see if fec_lock possible TODO find the right value */
+ timeout_ofs += DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME; /* see something, waiting longer */
+ d_locked_time = jiffies_to_msecs(jiffies);
+ }
+ }
+ break;
+ case DEMOD_LOCKED:
+ if ((*lock_status == DRXJ_DEMOD_LOCK) && /* still demod_lock in 150ms */
+ ((jiffies_to_msecs(jiffies) - d_locked_time) >
+ DRXJ_QAM_FEC_LOCK_WAITTIME)) {
+ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, QAM_SY_TIMEOUT__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, QAM_SY_TIMEOUT__A, data | 0x1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ lck_state = SYNC_FLIPPED;
+ msleep(10);
+ }
+ break;
+ case SYNC_FLIPPED:
+ if (*lock_status == DRXJ_DEMOD_LOCK) {
+ if (channel->mirror == DRX_MIRROR_AUTO) {
+ /* flip sync pattern back */
+ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, QAM_SY_TIMEOUT__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, QAM_SY_TIMEOUT__A, data & 0xFFFE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* flip spectrum */
+ ext_attr->mirror = DRX_MIRROR_YES;
+ rc = qam_flip_spec(demod, channel);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ lck_state = SPEC_MIRRORED;
+ /* reset timer TODO: still need 500ms? */
+ start_time = d_locked_time =
+ jiffies_to_msecs(jiffies);
+ timeout_ofs = 0;
+ } else { /* no need to wait lock */
+
+ start_time =
+ jiffies_to_msecs(jiffies) -
+ DRXJ_QAM_MAX_WAITTIME - timeout_ofs;
+ }
+ }
+ break;
+ case SPEC_MIRRORED:
+ if ((*lock_status == DRXJ_DEMOD_LOCK) && /* still demod_lock in 150ms */
+ ((jiffies_to_msecs(jiffies) - d_locked_time) >
+ DRXJ_QAM_FEC_LOCK_WAITTIME)) {
+ rc = ctrl_get_qam_sig_quality(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (p->cnr.stat[0].svalue > 20800) {
+ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, QAM_SY_TIMEOUT__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, QAM_SY_TIMEOUT__A, data | 0x1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* no need to wait lock */
+ start_time =
+ jiffies_to_msecs(jiffies) -
+ DRXJ_QAM_MAX_WAITTIME - timeout_ofs;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ msleep(10);
+ } while
+ ((*lock_status != DRX_LOCKED) &&
+ (*lock_status != DRX_NEVER_LOCK) &&
+ ((jiffies_to_msecs(jiffies) - start_time) <
+ (DRXJ_QAM_MAX_WAITTIME + timeout_ofs))
+ );
+ /* Returning control to apllication ... */
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int qam256auto ()
+* \brief auto do sync pattern switching and mirroring.
+* \param demod: instance of demod.
+* \param channel: pointer to channel data.
+* \param tuner_freq_offset: tuner frequency offset.
+* \param lock_status: pointer to lock status.
+* \return int.
+*/
+static int
+qam256auto(struct drx_demod_instance *demod,
+ struct drx_channel *channel,
+ s32 tuner_freq_offset, enum drx_lock_status *lock_status)
+{
+ struct drxj_data *ext_attr = demod->my_ext_attr;
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ struct drx39xxj_state *state = dev_addr->user_data;
+ struct dtv_frontend_properties *p = &state->frontend.dtv_property_cache;
+ int rc;
+ u32 lck_state = NO_LOCK;
+ u32 start_time = 0;
+ u32 d_locked_time = 0;
+ u32 timeout_ofs = DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME;
+
+ /* external attributes for storing aquired channel constellation */
+ *lock_status = DRX_NOT_LOCKED;
+ start_time = jiffies_to_msecs(jiffies);
+ lck_state = NO_LOCK;
+ do {
+ rc = ctrl_lock_status(demod, lock_status);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ switch (lck_state) {
+ case NO_LOCK:
+ if (*lock_status == DRXJ_DEMOD_LOCK) {
+ rc = ctrl_get_qam_sig_quality(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (p->cnr.stat[0].svalue > 26800) {
+ lck_state = DEMOD_LOCKED;
+ timeout_ofs += DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME; /* see something, wait longer */
+ d_locked_time = jiffies_to_msecs(jiffies);
+ }
+ }
+ break;
+ case DEMOD_LOCKED:
+ if (*lock_status == DRXJ_DEMOD_LOCK) {
+ if ((channel->mirror == DRX_MIRROR_AUTO) &&
+ ((jiffies_to_msecs(jiffies) - d_locked_time) >
+ DRXJ_QAM_FEC_LOCK_WAITTIME)) {
+ ext_attr->mirror = DRX_MIRROR_YES;
+ rc = qam_flip_spec(demod, channel);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ lck_state = SPEC_MIRRORED;
+ /* reset timer TODO: still need 300ms? */
+ start_time = jiffies_to_msecs(jiffies);
+ timeout_ofs = -DRXJ_QAM_MAX_WAITTIME / 2;
+ }
+ }
+ break;
+ case SPEC_MIRRORED:
+ break;
+ default:
+ break;
+ }
+ msleep(10);
+ } while
+ ((*lock_status < DRX_LOCKED) &&
+ (*lock_status != DRX_NEVER_LOCK) &&
+ ((jiffies_to_msecs(jiffies) - start_time) <
+ (DRXJ_QAM_MAX_WAITTIME + timeout_ofs)));
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int set_qam_channel ()
+* \brief Set QAM channel according to the requested constellation.
+* \param demod: instance of demod.
+* \param channel: pointer to channel data.
+* \return int.
+*/
+static int
+set_qam_channel(struct drx_demod_instance *demod,
+ struct drx_channel *channel, s32 tuner_freq_offset)
+{
+ struct drxj_data *ext_attr = NULL;
+ int rc;
+ enum drx_lock_status lock_status = DRX_NOT_LOCKED;
+ bool auto_flag = false;
+
+ /* external attributes for storing aquired channel constellation */
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ /* set QAM channel constellation */
+ switch (channel->constellation) {
+ case DRX_CONSTELLATION_QAM16:
+ case DRX_CONSTELLATION_QAM32:
+ case DRX_CONSTELLATION_QAM128:
+ return -EINVAL;
+ case DRX_CONSTELLATION_QAM64:
+ case DRX_CONSTELLATION_QAM256:
+ if (ext_attr->standard != DRX_STANDARD_ITU_B)
+ return -EINVAL;
+
+ ext_attr->constellation = channel->constellation;
+ if (channel->mirror == DRX_MIRROR_AUTO)
+ ext_attr->mirror = DRX_MIRROR_NO;
+ else
+ ext_attr->mirror = channel->mirror;
+
+ rc = set_qam(demod, channel, tuner_freq_offset, QAM_SET_OP_ALL);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ if (channel->constellation == DRX_CONSTELLATION_QAM64)
+ rc = qam64auto(demod, channel, tuner_freq_offset,
+ &lock_status);
+ else
+ rc = qam256auto(demod, channel, tuner_freq_offset,
+ &lock_status);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_CONSTELLATION_AUTO: /* for channel scan */
+ if (ext_attr->standard == DRX_STANDARD_ITU_B) {
+ u16 qam_ctl_ena = 0;
+
+ auto_flag = true;
+
+ /* try to lock default QAM constellation: QAM256 */
+ channel->constellation = DRX_CONSTELLATION_QAM256;
+ ext_attr->constellation = DRX_CONSTELLATION_QAM256;
+ if (channel->mirror == DRX_MIRROR_AUTO)
+ ext_attr->mirror = DRX_MIRROR_NO;
+ else
+ ext_attr->mirror = channel->mirror;
+ rc = set_qam(demod, channel, tuner_freq_offset,
+ QAM_SET_OP_ALL);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = qam256auto(demod, channel, tuner_freq_offset,
+ &lock_status);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ if (lock_status >= DRX_LOCKED) {
+ channel->constellation = DRX_CONSTELLATION_AUTO;
+ break;
+ }
+
+ /* QAM254 not locked. Try QAM64 constellation */
+ channel->constellation = DRX_CONSTELLATION_QAM64;
+ ext_attr->constellation = DRX_CONSTELLATION_QAM64;
+ if (channel->mirror == DRX_MIRROR_AUTO)
+ ext_attr->mirror = DRX_MIRROR_NO;
+ else
+ ext_attr->mirror = channel->mirror;
+
+ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr,
+ SCU_RAM_QAM_CTL_ENA__A,
+ &qam_ctl_ena, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr,
+ SCU_RAM_QAM_CTL_ENA__A,
+ qam_ctl_ena & ~SCU_RAM_QAM_CTL_ENA_ACQ__M, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr,
+ SCU_RAM_QAM_FSM_STATE_TGT__A,
+ 0x2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* force to rate hunting */
+
+ rc = set_qam(demod, channel, tuner_freq_offset,
+ QAM_SET_OP_CONSTELLATION);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr,
+ SCU_RAM_QAM_CTL_ENA__A,
+ qam_ctl_ena, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = qam64auto(demod, channel, tuner_freq_offset,
+ &lock_status);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ channel->constellation = DRX_CONSTELLATION_AUTO;
+ } else if (ext_attr->standard == DRX_STANDARD_ITU_C) {
+ u16 qam_ctl_ena = 0;
+
+ channel->constellation = DRX_CONSTELLATION_QAM64;
+ ext_attr->constellation = DRX_CONSTELLATION_QAM64;
+ auto_flag = true;
+
+ if (channel->mirror == DRX_MIRROR_AUTO)
+ ext_attr->mirror = DRX_MIRROR_NO;
+ else
+ ext_attr->mirror = channel->mirror;
+ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr,
+ SCU_RAM_QAM_CTL_ENA__A,
+ &qam_ctl_ena, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr,
+ SCU_RAM_QAM_CTL_ENA__A,
+ qam_ctl_ena & ~SCU_RAM_QAM_CTL_ENA_ACQ__M, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr,
+ SCU_RAM_QAM_FSM_STATE_TGT__A,
+ 0x2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* force to rate hunting */
+
+ rc = set_qam(demod, channel, tuner_freq_offset,
+ QAM_SET_OP_CONSTELLATION);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr,
+ SCU_RAM_QAM_CTL_ENA__A,
+ qam_ctl_ena, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = qam64auto(demod, channel, tuner_freq_offset,
+ &lock_status);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ channel->constellation = DRX_CONSTELLATION_AUTO;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+rw_error:
+ /* restore starting value */
+ if (auto_flag)
+ channel->constellation = DRX_CONSTELLATION_AUTO;
+ return -EIO;
+}
+
+/*============================================================================*/
+
+/**
+* \fn static short get_qamrs_err_count(struct i2c_device_addr *dev_addr)
+* \brief Get RS error count in QAM mode (used for post RS BER calculation)
+* \return Error code
+*
+* precondition: measurement period & measurement prescale must be set
+*
+*/
+static int
+get_qamrs_err_count(struct i2c_device_addr *dev_addr,
+ struct drxjrs_errors *rs_errors)
+{
+ int rc;
+ u16 nr_bit_errors = 0,
+ nr_symbol_errors = 0,
+ nr_packet_errors = 0, nr_failures = 0, nr_snc_par_fail_count = 0;
+
+ /* check arguments */
+ if (dev_addr == NULL)
+ return -EINVAL;
+
+ /* all reported errors are received in the */
+ /* most recently finished measurment period */
+ /* no of pre RS bit errors */
+ rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_BIT_ERRORS__A, &nr_bit_errors, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* no of symbol errors */
+ rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_SYMBOL_ERRORS__A, &nr_symbol_errors, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* no of packet errors */
+ rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_PACKET_ERRORS__A, &nr_packet_errors, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* no of failures to decode */
+ rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_FAILURES__A, &nr_failures, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* no of post RS bit erros */
+ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_SNC_FAIL_COUNT__A, &nr_snc_par_fail_count, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* TODO: NOTE */
+ /* These register values are fetched in non-atomic fashion */
+ /* It is possible that the read values contain unrelated information */
+
+ rs_errors->nr_bit_errors = nr_bit_errors & FEC_RS_NR_BIT_ERRORS__M;
+ rs_errors->nr_symbol_errors = nr_symbol_errors & FEC_RS_NR_SYMBOL_ERRORS__M;
+ rs_errors->nr_packet_errors = nr_packet_errors & FEC_RS_NR_PACKET_ERRORS__M;
+ rs_errors->nr_failures = nr_failures & FEC_RS_NR_FAILURES__M;
+ rs_errors->nr_snc_par_fail_count =
+ nr_snc_par_fail_count & FEC_OC_SNC_FAIL_COUNT__M;
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+
+/**
+ * \fn int get_sig_strength()
+ * \brief Retrieve signal strength for VSB and QAM.
+ * \param demod Pointer to demod instance
+ * \param u16-t Pointer to signal strength data; range 0, .. , 100.
+ * \return int.
+ * \retval 0 sig_strength contains valid data.
+ * \retval -EINVAL sig_strength is NULL.
+ * \retval -EIO Erroneous data, sig_strength contains invalid data.
+ */
+#define DRXJ_AGC_TOP 0x2800
+#define DRXJ_AGC_SNS 0x1600
+#define DRXJ_RFAGC_MAX 0x3fff
+#define DRXJ_RFAGC_MIN 0x800
+
+static int get_sig_strength(struct drx_demod_instance *demod, u16 *sig_strength)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ int rc;
+ u16 rf_gain = 0;
+ u16 if_gain = 0;
+ u16 if_agc_sns = 0;
+ u16 if_agc_top = 0;
+ u16 rf_agc_max = 0;
+ u16 rf_agc_min = 0;
+
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_AGC_IF__A, &if_gain, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if_gain &= IQM_AF_AGC_IF__M;
+ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_AGC_RF__A, &rf_gain, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rf_gain &= IQM_AF_AGC_RF__M;
+
+ if_agc_sns = DRXJ_AGC_SNS;
+ if_agc_top = DRXJ_AGC_TOP;
+ rf_agc_max = DRXJ_RFAGC_MAX;
+ rf_agc_min = DRXJ_RFAGC_MIN;
+
+ if (if_gain > if_agc_top) {
+ if (rf_gain > rf_agc_max)
+ *sig_strength = 100;
+ else if (rf_gain > rf_agc_min) {
+ if (rf_agc_max == rf_agc_min) {
+ pr_err("error: rf_agc_max == rf_agc_min\n");
+ return -EIO;
+ }
+ *sig_strength =
+ 75 + 25 * (rf_gain - rf_agc_min) / (rf_agc_max -
+ rf_agc_min);
+ } else
+ *sig_strength = 75;
+ } else if (if_gain > if_agc_sns) {
+ if (if_agc_top == if_agc_sns) {
+ pr_err("error: if_agc_top == if_agc_sns\n");
+ return -EIO;
+ }
+ *sig_strength =
+ 20 + 55 * (if_gain - if_agc_sns) / (if_agc_top - if_agc_sns);
+ } else {
+ if (!if_agc_sns) {
+ pr_err("error: if_agc_sns is zero!\n");
+ return -EIO;
+ }
+ *sig_strength = (20 * if_gain / if_agc_sns);
+ }
+
+ if (*sig_strength <= 7)
+ *sig_strength = 0;
+
+ return 0;
+ rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int ctrl_get_qam_sig_quality()
+* \brief Retreive QAM signal quality from device.
+* \param devmod Pointer to demodulator instance.
+* \param sig_quality Pointer to signal quality data.
+* \return int.
+* \retval 0 sig_quality contains valid data.
+* \retval -EINVAL sig_quality is NULL.
+* \retval -EIO Erroneous data, sig_quality contains invalid data.
+
+* Pre-condition: Device must be started and in lock.
+*/
+static int
+ctrl_get_qam_sig_quality(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ struct drxj_data *ext_attr = demod->my_ext_attr;
+ struct drx39xxj_state *state = dev_addr->user_data;
+ struct dtv_frontend_properties *p = &state->frontend.dtv_property_cache;
+ struct drxjrs_errors measuredrs_errors = { 0, 0, 0, 0, 0 };
+ enum drx_modulation constellation = ext_attr->constellation;
+ int rc;
+
+ u32 pre_bit_err_rs = 0; /* pre RedSolomon Bit Error Rate */
+ u32 post_bit_err_rs = 0; /* post RedSolomon Bit Error Rate */
+ u32 pkt_errs = 0; /* no of packet errors in RS */
+ u16 qam_sl_err_power = 0; /* accumulated error between raw and sliced symbols */
+ u16 qsym_err_vd = 0; /* quadrature symbol errors in QAM_VD */
+ u16 fec_oc_period = 0; /* SNC sync failure measurement period */
+ u16 fec_rs_prescale = 0; /* ReedSolomon Measurement Prescale */
+ u16 fec_rs_period = 0; /* Value for corresponding I2C register */
+ /* calculation constants */
+ u32 rs_bit_cnt = 0; /* RedSolomon Bit Count */
+ u32 qam_sl_sig_power = 0; /* used for MER, depends of QAM constellation */
+ /* intermediate results */
+ u32 e = 0; /* exponent value used for QAM BER/SER */
+ u32 m = 0; /* mantisa value used for QAM BER/SER */
+ u32 ber_cnt = 0; /* BER count */
+ /* signal quality info */
+ u32 qam_sl_mer = 0; /* QAM MER */
+ u32 qam_pre_rs_ber = 0; /* Pre RedSolomon BER */
+ u32 qam_post_rs_ber = 0; /* Post RedSolomon BER */
+ u32 qam_vd_ser = 0; /* ViterbiDecoder SER */
+ u16 qam_vd_prescale = 0; /* Viterbi Measurement Prescale */
+ u16 qam_vd_period = 0; /* Viterbi Measurement period */
+ u32 vd_bit_cnt = 0; /* ViterbiDecoder Bit Count */
+
+ p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ /* read the physical registers */
+ /* Get the RS error data */
+ rc = get_qamrs_err_count(dev_addr, &measuredrs_errors);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* get the register value needed for MER */
+ rc = drxj_dap_read_reg16(dev_addr, QAM_SL_ERR_POWER__A, &qam_sl_err_power, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* get the register value needed for post RS BER */
+ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_SNC_FAIL_PERIOD__A, &fec_oc_period, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* get constants needed for signal quality calculation */
+ fec_rs_period = ext_attr->fec_rs_period;
+ fec_rs_prescale = ext_attr->fec_rs_prescale;
+ rs_bit_cnt = fec_rs_period * fec_rs_prescale * ext_attr->fec_rs_plen;
+ qam_vd_period = ext_attr->qam_vd_period;
+ qam_vd_prescale = ext_attr->qam_vd_prescale;
+ vd_bit_cnt = qam_vd_period * qam_vd_prescale * ext_attr->fec_vd_plen;
+
+ /* DRXJ_QAM_SL_SIG_POWER_QAMxxx * 4 */
+ switch (constellation) {
+ case DRX_CONSTELLATION_QAM16:
+ qam_sl_sig_power = DRXJ_QAM_SL_SIG_POWER_QAM16 << 2;
+ break;
+ case DRX_CONSTELLATION_QAM32:
+ qam_sl_sig_power = DRXJ_QAM_SL_SIG_POWER_QAM32 << 2;
+ break;
+ case DRX_CONSTELLATION_QAM64:
+ qam_sl_sig_power = DRXJ_QAM_SL_SIG_POWER_QAM64 << 2;
+ break;
+ case DRX_CONSTELLATION_QAM128:
+ qam_sl_sig_power = DRXJ_QAM_SL_SIG_POWER_QAM128 << 2;
+ break;
+ case DRX_CONSTELLATION_QAM256:
+ qam_sl_sig_power = DRXJ_QAM_SL_SIG_POWER_QAM256 << 2;
+ break;
+ default:
+ return -EIO;
+ }
+
+ /* ------------------------------ */
+ /* MER Calculation */
+ /* ------------------------------ */
+ /* MER is good if it is above 27.5 for QAM256 or 21.5 for QAM64 */
+
+ /* 10.0*log10(qam_sl_sig_power * 4.0 / qam_sl_err_power); */
+ if (qam_sl_err_power == 0)
+ qam_sl_mer = 0;
+ else
+ qam_sl_mer = log1_times100(qam_sl_sig_power) - log1_times100((u32)qam_sl_err_power);
+
+ /* ----------------------------------------- */
+ /* Pre Viterbi Symbol Error Rate Calculation */
+ /* ----------------------------------------- */
+ /* pre viterbi SER is good if it is bellow 0.025 */
+
+ /* get the register value */
+ /* no of quadrature symbol errors */
+ rc = drxj_dap_read_reg16(dev_addr, QAM_VD_NR_QSYM_ERRORS__A, &qsym_err_vd, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* Extract the Exponent and the Mantisa */
+ /* of number of quadrature symbol errors */
+ e = (qsym_err_vd & QAM_VD_NR_QSYM_ERRORS_EXP__M) >>
+ QAM_VD_NR_QSYM_ERRORS_EXP__B;
+ m = (qsym_err_vd & QAM_VD_NR_SYMBOL_ERRORS_FIXED_MANT__M) >>
+ QAM_VD_NR_SYMBOL_ERRORS_FIXED_MANT__B;
+
+ if ((m << e) >> 3 > 549752)
+ qam_vd_ser = 500000 * vd_bit_cnt * ((e > 2) ? 1 : 8) / 8;
+ else
+ qam_vd_ser = m << ((e > 2) ? (e - 3) : e);
+
+ /* --------------------------------------- */
+ /* pre and post RedSolomon BER Calculation */
+ /* --------------------------------------- */
+ /* pre RS BER is good if it is below 3.5e-4 */
+
+ /* get the register values */
+ pre_bit_err_rs = (u32) measuredrs_errors.nr_bit_errors;
+ pkt_errs = post_bit_err_rs = (u32) measuredrs_errors.nr_snc_par_fail_count;
+
+ /* Extract the Exponent and the Mantisa of the */
+ /* pre Reed-Solomon bit error count */
+ e = (pre_bit_err_rs & FEC_RS_NR_BIT_ERRORS_EXP__M) >>
+ FEC_RS_NR_BIT_ERRORS_EXP__B;
+ m = (pre_bit_err_rs & FEC_RS_NR_BIT_ERRORS_FIXED_MANT__M) >>
+ FEC_RS_NR_BIT_ERRORS_FIXED_MANT__B;
+
+ ber_cnt = m << e;
+
+ /*qam_pre_rs_ber = frac_times1e6( ber_cnt, rs_bit_cnt ); */
+ if (m > (rs_bit_cnt >> (e + 1)) || (rs_bit_cnt >> e) == 0)
+ qam_pre_rs_ber = 500000 * rs_bit_cnt >> e;
+ else
+ qam_pre_rs_ber = ber_cnt;
+
+ /* post RS BER = 1000000* (11.17 * FEC_OC_SNC_FAIL_COUNT__A) / */
+ /* (1504.0 * FEC_OC_SNC_FAIL_PERIOD__A) */
+ /*
+ => c = (1000000*100*11.17)/1504 =
+ post RS BER = (( c* FEC_OC_SNC_FAIL_COUNT__A) /
+ (100 * FEC_OC_SNC_FAIL_PERIOD__A)
+ *100 and /100 is for more precision.
+ => (20 bits * 12 bits) /(16 bits * 7 bits) => safe in 32 bits computation
+
+ Precision errors still possible.
+ */
+ e = post_bit_err_rs * 742686;
+ m = fec_oc_period * 100;
+ if (fec_oc_period == 0)
+ qam_post_rs_ber = 0xFFFFFFFF;
+ else
+ qam_post_rs_ber = e / m;
+
+ /* fill signal quality data structure */
+ p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ p->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+
+ p->cnr.stat[0].svalue = ((u16) qam_sl_mer) * 100;
+ if (ext_attr->standard == DRX_STANDARD_ITU_B) {
+ p->pre_bit_error.stat[0].uvalue += qam_vd_ser;
+ p->pre_bit_count.stat[0].uvalue += vd_bit_cnt * ((e > 2) ? 1 : 8) / 8;
+ } else {
+ p->pre_bit_error.stat[0].uvalue += qam_pre_rs_ber;
+ p->pre_bit_count.stat[0].uvalue += rs_bit_cnt >> e;
+ }
+
+ p->post_bit_error.stat[0].uvalue += qam_post_rs_ber;
+ p->post_bit_count.stat[0].uvalue += rs_bit_cnt >> e;
+
+ p->block_error.stat[0].uvalue += pkt_errs;
+
+#ifdef DRXJ_SIGNAL_ACCUM_ERR
+ rc = get_acc_pkt_err(demod, &sig_quality->packet_error);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+#endif
+
+ return 0;
+rw_error:
+ p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ return -EIO;
+}
+
+#endif /* #ifndef DRXJ_VSB_ONLY */
+
+/*============================================================================*/
+/*== END QAM DATAPATH FUNCTIONS ==*/
+/*============================================================================*/
+
+/*============================================================================*/
+/*============================================================================*/
+/*== ATV DATAPATH FUNCTIONS ==*/
+/*============================================================================*/
+/*============================================================================*/
+
+/*
+ Implementation notes.
+
+ NTSC/FM AGCs
+
+ Four AGCs are used for NTSC:
+ (1) RF (used to attenuate the input signal in case of to much power)
+ (2) IF (used to attenuate the input signal in case of to much power)
+ (3) Video AGC (used to amplify the output signal in case input to low)
+ (4) SIF AGC (used to amplify the output signal in case input to low)
+
+ Video AGC is coupled to RF and IF. SIF AGC is not coupled. It is assumed
+ that the coupling between Video AGC and the RF and IF AGCs also works in
+ favor of the SIF AGC.
+
+ Three AGCs are used for FM:
+ (1) RF (used to attenuate the input signal in case of to much power)
+ (2) IF (used to attenuate the input signal in case of to much power)
+ (3) SIF AGC (used to amplify the output signal in case input to low)
+
+ The SIF AGC is now coupled to the RF/IF AGCs.
+ The SIF AGC is needed for both SIF ouput and the internal SIF signal to
+ the AUD block.
+
+ RF and IF AGCs DACs are part of AFE, Video and SIF AGC DACs are part of
+ the ATV block. The AGC control algorithms are all implemented in
+ microcode.
+
+ ATV SETTINGS
+
+ (Shadow settings will not be used for now, they will be implemented
+ later on because of the schedule)
+
+ Several HW/SCU "settings" can be used for ATV. The standard selection
+ will reset most of these settings. To avoid that the end user apllication
+ has to perform these settings each time the ATV or FM standards is
+ selected the driver will shadow these settings. This enables the end user
+ to perform the settings only once after a drx_open(). The driver must
+ write the shadow settings to HW/SCU incase:
+ ( setstandard FM/ATV) ||
+ ( settings have changed && FM/ATV standard is active)
+ The shadow settings will be stored in the device specific data container.
+ A set of flags will be defined to flag changes in shadow settings.
+ A routine will be implemented to write all changed shadow settings to
+ HW/SCU.
+
+ The "settings" will consist of: AGC settings, filter settings etc.
+
+ Disadvantage of use of shadow settings:
+ Direct changes in HW/SCU registers will not be reflected in the
+ shadow settings and these changes will be overwritten during a next
+ update. This can happen during evaluation. This will not be a problem
+ for normal customer usage.
+*/
+/* -------------------------------------------------------------------------- */
+
+/**
+* \fn int power_down_atv ()
+* \brief Power down ATV.
+* \param demod instance of demodulator
+* \param standard either NTSC or FM (sub strandard for ATV )
+* \return int.
+*
+* Stops and thus resets ATV and IQM block
+* SIF and CVBS ADC are powered down
+* Calls audio power down
+*/
+static int
+power_down_atv(struct drx_demod_instance *demod, enum drx_standard standard, bool primary)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ struct drxjscu_cmd cmd_scu = { /* command */ 0,
+ /* parameter_len */ 0,
+ /* result_len */ 0,
+ /* *parameter */ NULL,
+ /* *result */ NULL
+ };
+ int rc;
+ u16 cmd_result = 0;
+
+ /* ATV NTSC */
+
+ /* Stop ATV SCU (will reset ATV and IQM hardware */
+ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_ATV |
+ SCU_RAM_COMMAND_CMD_DEMOD_STOP;
+ cmd_scu.parameter_len = 0;
+ cmd_scu.result_len = 1;
+ cmd_scu.parameter = NULL;
+ cmd_scu.result = &cmd_result;
+ rc = scu_command(dev_addr, &cmd_scu);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* Disable ATV outputs (ATV reset enables CVBS, undo this) */
+ rc = drxj_dap_write_reg16(dev_addr, ATV_TOP_STDBY__A, (ATV_TOP_STDBY_SIF_STDBY_STANDBY & (~ATV_TOP_STDBY_CVBS_STDBY_A2_ACTIVE)), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, ATV_COMM_EXEC__A, ATV_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (primary) {
+ rc = drxj_dap_write_reg16(dev_addr, IQM_COMM_EXEC__A, IQM_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = set_iqm_af(demod, false);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ } else {
+ rc = drxj_dap_write_reg16(dev_addr, IQM_FS_COMM_EXEC__A, IQM_FS_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_FD_COMM_EXEC__A, IQM_FD_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RC_COMM_EXEC__A, IQM_RC_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_RT_COMM_EXEC__A, IQM_RT_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, IQM_CF_COMM_EXEC__A, IQM_CF_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+ rc = power_down_aud(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+
+/**
+* \brief Power up AUD.
+* \param demod instance of demodulator
+* \return int.
+*
+*/
+static int power_down_aud(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = NULL;
+ struct drxj_data *ext_attr = NULL;
+ int rc;
+
+ dev_addr = (struct i2c_device_addr *)demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ rc = drxj_dap_write_reg16(dev_addr, AUD_COMM_EXEC__A, AUD_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ ext_attr->aud_data.audio_is_active = false;
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int set_orx_nsu_aox()
+* \brief Configure OrxNsuAox for OOB
+* \param demod instance of demodulator.
+* \param active
+* \return int.
+*/
+static int set_orx_nsu_aox(struct drx_demod_instance *demod, bool active)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ int rc;
+ u16 data = 0;
+
+ /* Configure NSU_AOX */
+ rc = drxj_dap_read_reg16(dev_addr, ORX_NSU_AOX_STDBY_W__A, &data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (!active)
+ data &= ((~ORX_NSU_AOX_STDBY_W_STDBYADC_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYAMP_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYBIAS_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYPLL_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYPD_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYTAGC_IF_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYTAGC_RF_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYFLT_A2_ON));
+ else
+ data |= (ORX_NSU_AOX_STDBY_W_STDBYADC_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYAMP_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYBIAS_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYPLL_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYPD_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYTAGC_IF_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYTAGC_RF_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYFLT_A2_ON);
+ rc = drxj_dap_write_reg16(dev_addr, ORX_NSU_AOX_STDBY_W__A, data, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/**
+* \fn int ctrl_set_oob()
+* \brief Set OOB channel to be used.
+* \param demod instance of demodulator
+* \param oob_param OOB parameters for channel setting.
+* \frequency should be in KHz
+* \return int.
+*
+* Accepts only. Returns error otherwise.
+* Demapper value is written after scu_command START
+* because START command causes COMM_EXEC transition
+* from 0 to 1 which causes all registers to be
+* overwritten with initial value
+*
+*/
+
+/* Nyquist filter impulse response */
+#define IMPULSE_COSINE_ALPHA_0_3 {-3, -4, -1, 6, 10, 7, -5, -20, -25, -10, 29, 79, 123, 140} /*sqrt raised-cosine filter with alpha=0.3 */
+#define IMPULSE_COSINE_ALPHA_0_5 { 2, 0, -2, -2, 2, 5, 2, -10, -20, -14, 20, 74, 125, 145} /*sqrt raised-cosine filter with alpha=0.5 */
+#define IMPULSE_COSINE_ALPHA_RO_0_5 { 0, 0, 1, 2, 3, 0, -7, -15, -16, 0, 34, 77, 114, 128} /*full raised-cosine filter with alpha=0.5 (receiver only) */
+
+/* Coefficients for the nyquist fitler (total: 27 taps) */
+#define NYQFILTERLEN 27
+
+static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_param)
+{
+ int rc;
+ s32 freq = 0; /* KHz */
+ struct i2c_device_addr *dev_addr = NULL;
+ struct drxj_data *ext_attr = NULL;
+ u16 i = 0;
+ bool mirror_freq_spect_oob = false;
+ u16 trk_filter_value = 0;
+ struct drxjscu_cmd scu_cmd;
+ u16 set_param_parameters[3];
+ u16 cmd_result[2] = { 0, 0 };
+ s16 nyquist_coeffs[4][(NYQFILTERLEN + 1) / 2] = {
+ IMPULSE_COSINE_ALPHA_0_3, /* Target Mode 0 */
+ IMPULSE_COSINE_ALPHA_0_3, /* Target Mode 1 */
+ IMPULSE_COSINE_ALPHA_0_5, /* Target Mode 2 */
+ IMPULSE_COSINE_ALPHA_RO_0_5 /* Target Mode 3 */
+ };
+ u8 mode_val[4] = { 2, 2, 0, 1 };
+ u8 pfi_coeffs[4][6] = {
+ {DRXJ_16TO8(-92), DRXJ_16TO8(-108), DRXJ_16TO8(100)}, /* TARGET_MODE = 0: PFI_A = -23/32; PFI_B = -54/32; PFI_C = 25/32; fg = 0.5 MHz (Att=26dB) */
+ {DRXJ_16TO8(-64), DRXJ_16TO8(-80), DRXJ_16TO8(80)}, /* TARGET_MODE = 1: PFI_A = -16/32; PFI_B = -40/32; PFI_C = 20/32; fg = 1.0 MHz (Att=28dB) */
+ {DRXJ_16TO8(-80), DRXJ_16TO8(-98), DRXJ_16TO8(92)}, /* TARGET_MODE = 2, 3: PFI_A = -20/32; PFI_B = -49/32; PFI_C = 23/32; fg = 0.8 MHz (Att=25dB) */
+ {DRXJ_16TO8(-80), DRXJ_16TO8(-98), DRXJ_16TO8(92)} /* TARGET_MODE = 2, 3: PFI_A = -20/32; PFI_B = -49/32; PFI_C = 23/32; fg = 0.8 MHz (Att=25dB) */
+ };
+ u16 mode_index;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+ mirror_freq_spect_oob = ext_attr->mirror_freq_spect_oob;
+
+ /* Check parameters */
+ if (oob_param == NULL) {
+ /* power off oob module */
+ scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
+ | SCU_RAM_COMMAND_CMD_DEMOD_STOP;
+ scu_cmd.parameter_len = 0;
+ scu_cmd.result_len = 1;
+ scu_cmd.result = cmd_result;
+ rc = scu_command(dev_addr, &scu_cmd);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = set_orx_nsu_aox(demod, false);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, ORX_COMM_EXEC__A, ORX_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ ext_attr->oob_power_on = false;
+ return 0;
+ }
+
+ freq = oob_param->frequency;
+ if ((freq < 70000) || (freq > 130000))
+ return -EIO;
+ freq = (freq - 50000) / 50;
+
+ {
+ u16 index = 0;
+ u16 remainder = 0;
+ u16 *trk_filtercfg = ext_attr->oob_trk_filter_cfg;
+
+ index = (u16) ((freq - 400) / 200);
+ remainder = (u16) ((freq - 400) % 200);
+ trk_filter_value =
+ trk_filtercfg[index] - (trk_filtercfg[index] -
+ trk_filtercfg[index +
+ 1]) / 10 * remainder /
+ 20;
+ }
+
+ /*********/
+ /* Stop */
+ /*********/
+ rc = drxj_dap_write_reg16(dev_addr, ORX_COMM_EXEC__A, ORX_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
+ | SCU_RAM_COMMAND_CMD_DEMOD_STOP;
+ scu_cmd.parameter_len = 0;
+ scu_cmd.result_len = 1;
+ scu_cmd.result = cmd_result;
+ rc = scu_command(dev_addr, &scu_cmd);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /*********/
+ /* Reset */
+ /*********/
+ scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
+ | SCU_RAM_COMMAND_CMD_DEMOD_RESET;
+ scu_cmd.parameter_len = 0;
+ scu_cmd.result_len = 1;
+ scu_cmd.result = cmd_result;
+ rc = scu_command(dev_addr, &scu_cmd);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /***********/
+ /* SET_ENV */
+ /***********/
+ /* set frequency, spectrum inversion and data rate */
+ scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
+ | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV;
+ scu_cmd.parameter_len = 3;
+ /* 1-data rate;2-frequency */
+ switch (oob_param->standard) {
+ case DRX_OOB_MODE_A:
+ if (
+ /* signal is transmitted inverted */
+ ((oob_param->spectrum_inverted == true) &&
+ /* and tuner is not mirroring the signal */
+ (!mirror_freq_spect_oob)) |
+ /* or */
+ /* signal is transmitted noninverted */
+ ((oob_param->spectrum_inverted == false) &&
+ /* and tuner is mirroring the signal */
+ (mirror_freq_spect_oob))
+ )
+ set_param_parameters[0] =
+ SCU_RAM_ORX_RF_RX_DATA_RATE_2048KBPS_INVSPEC;
+ else
+ set_param_parameters[0] =
+ SCU_RAM_ORX_RF_RX_DATA_RATE_2048KBPS_REGSPEC;
+ break;
+ case DRX_OOB_MODE_B_GRADE_A:
+ if (
+ /* signal is transmitted inverted */
+ ((oob_param->spectrum_inverted == true) &&
+ /* and tuner is not mirroring the signal */
+ (!mirror_freq_spect_oob)) |
+ /* or */
+ /* signal is transmitted noninverted */
+ ((oob_param->spectrum_inverted == false) &&
+ /* and tuner is mirroring the signal */
+ (mirror_freq_spect_oob))
+ )
+ set_param_parameters[0] =
+ SCU_RAM_ORX_RF_RX_DATA_RATE_1544KBPS_INVSPEC;
+ else
+ set_param_parameters[0] =
+ SCU_RAM_ORX_RF_RX_DATA_RATE_1544KBPS_REGSPEC;
+ break;
+ case DRX_OOB_MODE_B_GRADE_B:
+ default:
+ if (
+ /* signal is transmitted inverted */
+ ((oob_param->spectrum_inverted == true) &&
+ /* and tuner is not mirroring the signal */
+ (!mirror_freq_spect_oob)) |
+ /* or */
+ /* signal is transmitted noninverted */
+ ((oob_param->spectrum_inverted == false) &&
+ /* and tuner is mirroring the signal */
+ (mirror_freq_spect_oob))
+ )
+ set_param_parameters[0] =
+ SCU_RAM_ORX_RF_RX_DATA_RATE_3088KBPS_INVSPEC;
+ else
+ set_param_parameters[0] =
+ SCU_RAM_ORX_RF_RX_DATA_RATE_3088KBPS_REGSPEC;
+ break;
+ }
+ set_param_parameters[1] = (u16) (freq & 0xFFFF);
+ set_param_parameters[2] = trk_filter_value;
+ scu_cmd.parameter = set_param_parameters;
+ scu_cmd.result_len = 1;
+ scu_cmd.result = cmd_result;
+ mode_index = mode_val[(set_param_parameters[0] & 0xC0) >> 6];
+ rc = scu_command(dev_addr, &scu_cmd);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, 0xFABA, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* Write magic word to enable pdr reg write */
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_OOB_CRX_CFG__A, OOB_CRX_DRIVE_STRENGTH << SIO_PDR_OOB_CRX_CFG_DRIVE__B | 0x03 << SIO_PDR_OOB_CRX_CFG_MODE__B, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_OOB_DRX_CFG__A, OOB_DRX_DRIVE_STRENGTH << SIO_PDR_OOB_DRX_CFG_DRIVE__B | 0x03 << SIO_PDR_OOB_DRX_CFG_MODE__B, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, 0x0000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ } /* Write magic word to disable pdr reg write */
+
+ rc = drxj_dap_write_reg16(dev_addr, ORX_TOP_COMM_KEY__A, 0, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, ORX_FWP_AAG_LEN_W__A, 16000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, ORX_FWP_AAG_THR_W__A, 40, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* ddc */
+ rc = drxj_dap_write_reg16(dev_addr, ORX_DDC_OFO_SET_W__A, ORX_DDC_OFO_SET_W__PRE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* nsu */
+ rc = drxj_dap_write_reg16(dev_addr, ORX_NSU_AOX_LOPOW_W__A, ext_attr->oob_lo_pow, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* initialization for target mode */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_TARGET_MODE__A, SCU_RAM_ORX_TARGET_MODE_2048KBPS_SQRT, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_FREQ_GAIN_CORR__A, SCU_RAM_ORX_FREQ_GAIN_CORR_2048KBPS, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Reset bits for timing and freq. recovery */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_RST_CPH__A, 0x0001, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_RST_CTI__A, 0x0002, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_RST_KRN__A, 0x0004, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_RST_KRP__A, 0x0008, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* AGN_LOCK = {2048>>3, -2048, 8, -8, 0, 1}; */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_AGN_LOCK_TH__A, 2048 >> 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_AGN_LOCK_TOTH__A, (u16)(-2048), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_AGN_ONLOCK_TTH__A, 8, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_AGN_UNLOCK_TTH__A, (u16)(-8), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_AGN_LOCK_MASK__A, 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* DGN_LOCK = {10, -2048, 8, -8, 0, 1<<1}; */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_DGN_LOCK_TH__A, 10, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_DGN_LOCK_TOTH__A, (u16)(-2048), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_DGN_ONLOCK_TTH__A, 8, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_DGN_UNLOCK_TTH__A, (u16)(-8), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_DGN_LOCK_MASK__A, 1 << 1, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* FRQ_LOCK = {15,-2048, 8, -8, 0, 1<<2}; */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_FRQ_LOCK_TH__A, 17, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_FRQ_LOCK_TOTH__A, (u16)(-2048), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_FRQ_ONLOCK_TTH__A, 8, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_FRQ_UNLOCK_TTH__A, (u16)(-8), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_FRQ_LOCK_MASK__A, 1 << 2, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* PHA_LOCK = {5000, -2048, 8, -8, 0, 1<<3}; */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_PHA_LOCK_TH__A, 3000, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_PHA_LOCK_TOTH__A, (u16)(-2048), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_PHA_ONLOCK_TTH__A, 8, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_PHA_UNLOCK_TTH__A, (u16)(-8), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_PHA_LOCK_MASK__A, 1 << 3, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* TIM_LOCK = {300, -2048, 8, -8, 0, 1<<4}; */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_TIM_LOCK_TH__A, 400, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_TIM_LOCK_TOTH__A, (u16)(-2048), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_TIM_ONLOCK_TTH__A, 8, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_TIM_UNLOCK_TTH__A, (u16)(-8), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_TIM_LOCK_MASK__A, 1 << 4, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* EQU_LOCK = {20, -2048, 8, -8, 0, 1<<5}; */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_EQU_LOCK_TH__A, 20, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_EQU_LOCK_TOTH__A, (u16)(-2048), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_EQU_ONLOCK_TTH__A, 4, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_EQU_UNLOCK_TTH__A, (u16)(-4), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_EQU_LOCK_MASK__A, 1 << 5, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* PRE-Filter coefficients (PFI) */
+ rc = drxdap_fasi_write_block(dev_addr, ORX_FWP_PFI_A_W__A, sizeof(pfi_coeffs[mode_index]), ((u8 *)pfi_coeffs[mode_index]), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, ORX_TOP_MDE_W__A, mode_index, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* NYQUIST-Filter coefficients (NYQ) */
+ for (i = 0; i < (NYQFILTERLEN + 1) / 2; i++) {
+ rc = drxj_dap_write_reg16(dev_addr, ORX_FWP_NYQ_ADR_W__A, i, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, ORX_FWP_NYQ_COF_RW__A, nyquist_coeffs[mode_index][i], 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+ rc = drxj_dap_write_reg16(dev_addr, ORX_FWP_NYQ_ADR_W__A, 31, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, ORX_COMM_EXEC__A, ORX_COMM_EXEC_ACTIVE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /*********/
+ /* Start */
+ /*********/
+ scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
+ | SCU_RAM_COMMAND_CMD_DEMOD_START;
+ scu_cmd.parameter_len = 0;
+ scu_cmd.result_len = 1;
+ scu_cmd.result = cmd_result;
+ rc = scu_command(dev_addr, &scu_cmd);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = set_orx_nsu_aox(demod, true);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, ORX_NSU_AOX_STHR_W__A, ext_attr->oob_pre_saw, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ ext_attr->oob_power_on = true;
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+/*== END OOB DATAPATH FUNCTIONS ==*/
+/*============================================================================*/
+
+/*=============================================================================
+ ===== MC command related functions ==========================================
+ ===========================================================================*/
+
+/*=============================================================================
+ ===== ctrl_set_channel() ==========================================================
+ ===========================================================================*/
+/**
+* \fn int ctrl_set_channel()
+* \brief Select a new transmission channel.
+* \param demod instance of demod.
+* \param channel Pointer to channel data.
+* \return int.
+*
+* In case the tuner module is not used and in case of NTSC/FM the pogrammer
+* must tune the tuner to the centre frequency of the NTSC/FM channel.
+*
+*/
+static int
+ctrl_set_channel(struct drx_demod_instance *demod, struct drx_channel *channel)
+{
+ int rc;
+ s32 tuner_freq_offset = 0;
+ struct drxj_data *ext_attr = NULL;
+ struct i2c_device_addr *dev_addr = NULL;
+ enum drx_standard standard = DRX_STANDARD_UNKNOWN;
+#ifndef DRXJ_VSB_ONLY
+ u32 min_symbol_rate = 0;
+ u32 max_symbol_rate = 0;
+ int bandwidth_temp = 0;
+ int bandwidth = 0;
+#endif
+ /*== check arguments ======================================================*/
+ if ((demod == NULL) || (channel == NULL))
+ return -EINVAL;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+ standard = ext_attr->standard;
+
+ /* check valid standards */
+ switch (standard) {
+ case DRX_STANDARD_8VSB:
+#ifndef DRXJ_VSB_ONLY
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
+ case DRX_STANDARD_ITU_C:
+#endif /* DRXJ_VSB_ONLY */
+ break;
+ case DRX_STANDARD_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+
+ /* check bandwidth QAM annex B, NTSC and 8VSB */
+ if ((standard == DRX_STANDARD_ITU_B) ||
+ (standard == DRX_STANDARD_8VSB) ||
+ (standard == DRX_STANDARD_NTSC)) {
+ switch (channel->bandwidth) {
+ case DRX_BANDWIDTH_6MHZ:
+ case DRX_BANDWIDTH_UNKNOWN: /* fall through */
+ channel->bandwidth = DRX_BANDWIDTH_6MHZ;
+ break;
+ case DRX_BANDWIDTH_8MHZ: /* fall through */
+ case DRX_BANDWIDTH_7MHZ: /* fall through */
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* For QAM annex A and annex C:
+ -check symbolrate and constellation
+ -derive bandwidth from symbolrate (input bandwidth is ignored)
+ */
+#ifndef DRXJ_VSB_ONLY
+ if ((standard == DRX_STANDARD_ITU_A) ||
+ (standard == DRX_STANDARD_ITU_C)) {
+ struct drxuio_cfg uio_cfg = { DRX_UIO1, DRX_UIO_MODE_FIRMWARE_SAW };
+ int bw_rolloff_factor = 0;
+
+ bw_rolloff_factor = (standard == DRX_STANDARD_ITU_A) ? 115 : 113;
+ min_symbol_rate = DRXJ_QAM_SYMBOLRATE_MIN;
+ max_symbol_rate = DRXJ_QAM_SYMBOLRATE_MAX;
+ /* config SMA_TX pin to SAW switch mode */
+ rc = ctrl_set_uio_cfg(demod, &uio_cfg);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ if (channel->symbolrate < min_symbol_rate ||
+ channel->symbolrate > max_symbol_rate) {
+ return -EINVAL;
+ }
+
+ switch (channel->constellation) {
+ case DRX_CONSTELLATION_QAM16: /* fall through */
+ case DRX_CONSTELLATION_QAM32: /* fall through */
+ case DRX_CONSTELLATION_QAM64: /* fall through */
+ case DRX_CONSTELLATION_QAM128: /* fall through */
+ case DRX_CONSTELLATION_QAM256:
+ bandwidth_temp = channel->symbolrate * bw_rolloff_factor;
+ bandwidth = bandwidth_temp / 100;
+
+ if ((bandwidth_temp % 100) >= 50)
+ bandwidth++;
+
+ if (bandwidth <= 6100000) {
+ channel->bandwidth = DRX_BANDWIDTH_6MHZ;
+ } else if ((bandwidth > 6100000)
+ && (bandwidth <= 7100000)) {
+ channel->bandwidth = DRX_BANDWIDTH_7MHZ;
+ } else if (bandwidth > 7100000) {
+ channel->bandwidth = DRX_BANDWIDTH_8MHZ;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* For QAM annex B:
+ -check constellation
+ */
+ if (standard == DRX_STANDARD_ITU_B) {
+ switch (channel->constellation) {
+ case DRX_CONSTELLATION_AUTO:
+ case DRX_CONSTELLATION_QAM256:
+ case DRX_CONSTELLATION_QAM64:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (channel->interleavemode) {
+ case DRX_INTERLEAVEMODE_I128_J1:
+ case DRX_INTERLEAVEMODE_I128_J1_V2:
+ case DRX_INTERLEAVEMODE_I128_J2:
+ case DRX_INTERLEAVEMODE_I64_J2:
+ case DRX_INTERLEAVEMODE_I128_J3:
+ case DRX_INTERLEAVEMODE_I32_J4:
+ case DRX_INTERLEAVEMODE_I128_J4:
+ case DRX_INTERLEAVEMODE_I16_J8:
+ case DRX_INTERLEAVEMODE_I128_J5:
+ case DRX_INTERLEAVEMODE_I8_J16:
+ case DRX_INTERLEAVEMODE_I128_J6:
+ case DRX_INTERLEAVEMODE_I128_J7:
+ case DRX_INTERLEAVEMODE_I128_J8:
+ case DRX_INTERLEAVEMODE_I12_J17:
+ case DRX_INTERLEAVEMODE_I5_J4:
+ case DRX_INTERLEAVEMODE_B52_M240:
+ case DRX_INTERLEAVEMODE_B52_M720:
+ case DRX_INTERLEAVEMODE_UNKNOWN:
+ case DRX_INTERLEAVEMODE_AUTO:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if ((ext_attr->uio_sma_tx_mode) == DRX_UIO_MODE_FIRMWARE_SAW) {
+ /* SAW SW, user UIO is used for switchable SAW */
+ struct drxuio_data uio1 = { DRX_UIO1, false };
+
+ switch (channel->bandwidth) {
+ case DRX_BANDWIDTH_8MHZ:
+ uio1.value = true;
+ break;
+ case DRX_BANDWIDTH_7MHZ:
+ uio1.value = false;
+ break;
+ case DRX_BANDWIDTH_6MHZ:
+ uio1.value = false;
+ break;
+ case DRX_BANDWIDTH_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+
+ rc = ctrl_uio_write(demod, &uio1);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+#endif /* DRXJ_VSB_ONLY */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ tuner_freq_offset = 0;
+
+ /*== Setup demod for specific standard ====================================*/
+ switch (standard) {
+ case DRX_STANDARD_8VSB:
+ if (channel->mirror == DRX_MIRROR_AUTO)
+ ext_attr->mirror = DRX_MIRROR_NO;
+ else
+ ext_attr->mirror = channel->mirror;
+ rc = set_vsb(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = set_frequency(demod, channel, tuner_freq_offset);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+#ifndef DRXJ_VSB_ONLY
+ case DRX_STANDARD_ITU_A: /* fallthrough */
+ case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_C:
+ rc = set_qam_channel(demod, channel, tuner_freq_offset);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+#endif
+ case DRX_STANDARD_UNKNOWN:
+ default:
+ return -EIO;
+ }
+
+ /* flag the packet error counter reset */
+ ext_attr->reset_pkt_err_acc = true;
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*=============================================================================
+ ===== SigQuality() ==========================================================
+ ===========================================================================*/
+
+/**
+* \fn int ctrl_sig_quality()
+* \brief Retreive signal quality form device.
+* \param devmod Pointer to demodulator instance.
+* \param sig_quality Pointer to signal quality data.
+* \return int.
+* \retval 0 sig_quality contains valid data.
+* \retval -EINVAL sig_quality is NULL.
+* \retval -EIO Erroneous data, sig_quality contains invalid data.
+
+*/
+static int
+ctrl_sig_quality(struct drx_demod_instance *demod,
+ enum drx_lock_status lock_status)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ struct drxj_data *ext_attr = demod->my_ext_attr;
+ struct drx39xxj_state *state = dev_addr->user_data;
+ struct dtv_frontend_properties *p = &state->frontend.dtv_property_cache;
+ enum drx_standard standard = ext_attr->standard;
+ int rc;
+ u32 ber, cnt, err, pkt;
+ u16 mer, strength;
+
+ rc = get_sig_strength(demod, &strength);
+ if (rc < 0) {
+ pr_err("error getting signal strength %d\n", rc);
+ p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ } else {
+ p->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ p->strength.stat[0].uvalue = 65535UL * strength/ 100;
+ }
+
+ switch (standard) {
+ case DRX_STANDARD_8VSB:
+#ifdef DRXJ_SIGNAL_ACCUM_ERR
+ rc = get_acc_pkt_err(demod, &pkt);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+#endif
+ if (lock_status != DRXJ_DEMOD_LOCK && lock_status != DRX_LOCKED) {
+ p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ } else {
+ rc = get_vsb_post_rs_pck_err(dev_addr, &err, &pkt);
+ if (rc != 0) {
+ pr_err("error %d getting UCB\n", rc);
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ } else {
+ p->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->block_error.stat[0].uvalue += err;
+ p->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ p->block_count.stat[0].uvalue += pkt;
+ }
+
+ /* PostViterbi is compute in steps of 10^(-6) */
+ rc = get_vs_bpre_viterbi_ber(dev_addr, &ber, &cnt);
+ if (rc != 0) {
+ pr_err("error %d getting pre-ber\n", rc);
+ p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ } else {
+ p->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->pre_bit_error.stat[0].uvalue += ber;
+ p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ p->pre_bit_count.stat[0].uvalue += cnt;
+ }
+
+ rc = get_vs_bpost_viterbi_ber(dev_addr, &ber, &cnt);
+ if (rc != 0) {
+ pr_err("error %d getting post-ber\n", rc);
+ p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ } else {
+ p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->post_bit_error.stat[0].uvalue += ber;
+ p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ p->post_bit_count.stat[0].uvalue += cnt;
+ }
+ rc = get_vsbmer(dev_addr, &mer);
+ if (rc != 0) {
+ pr_err("error %d getting MER\n", rc);
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ } else {
+ p->cnr.stat[0].svalue = mer * 100;
+ p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ }
+ }
+ break;
+#ifndef DRXJ_VSB_ONLY
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
+ case DRX_STANDARD_ITU_C:
+ rc = ctrl_get_qam_sig_quality(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+
+/**
+* \fn int ctrl_lock_status()
+* \brief Retreive lock status .
+* \param dev_addr Pointer to demodulator device address.
+* \param lock_stat Pointer to lock status structure.
+* \return int.
+*
+*/
+static int
+ctrl_lock_status(struct drx_demod_instance *demod, enum drx_lock_status *lock_stat)
+{
+ enum drx_standard standard = DRX_STANDARD_UNKNOWN;
+ struct drxj_data *ext_attr = NULL;
+ struct i2c_device_addr *dev_addr = NULL;
+ struct drxjscu_cmd cmd_scu = { /* command */ 0,
+ /* parameter_len */ 0,
+ /* result_len */ 0,
+ /* *parameter */ NULL,
+ /* *result */ NULL
+ };
+ int rc;
+ u16 cmd_result[2] = { 0, 0 };
+ u16 demod_lock = SCU_RAM_PARAM_1_RES_DEMOD_GET_LOCK_DEMOD_LOCKED;
+
+ /* check arguments */
+ if ((demod == NULL) || (lock_stat == NULL))
+ return -EINVAL;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+ standard = ext_attr->standard;
+
+ *lock_stat = DRX_NOT_LOCKED;
+
+ /* define the SCU command code */
+ switch (standard) {
+ case DRX_STANDARD_8VSB:
+ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB |
+ SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK;
+ demod_lock |= 0x6;
+ break;
+#ifndef DRXJ_VSB_ONLY
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
+ case DRX_STANDARD_ITU_C:
+ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_QAM |
+ SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK;
+ break;
+#endif
+ case DRX_STANDARD_UNKNOWN: /* fallthrough */
+ default:
+ return -EIO;
+ }
+
+ /* define the SCU command paramters and execute the command */
+ cmd_scu.parameter_len = 0;
+ cmd_scu.result_len = 2;
+ cmd_scu.parameter = NULL;
+ cmd_scu.result = cmd_result;
+ rc = scu_command(dev_addr, &cmd_scu);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* set the lock status */
+ if (cmd_scu.result[1] < demod_lock) {
+ /* 0x0000 NOT LOCKED */
+ *lock_stat = DRX_NOT_LOCKED;
+ } else if (cmd_scu.result[1] < SCU_RAM_PARAM_1_RES_DEMOD_GET_LOCK_LOCKED) {
+ *lock_stat = DRXJ_DEMOD_LOCK;
+ } else if (cmd_scu.result[1] <
+ SCU_RAM_PARAM_1_RES_DEMOD_GET_LOCK_NEVER_LOCK) {
+ /* 0x8000 DEMOD + FEC LOCKED (system lock) */
+ *lock_stat = DRX_LOCKED;
+ } else {
+ /* 0xC000 NEVER LOCKED */
+ /* (system will never be able to lock to the signal) */
+ *lock_stat = DRX_NEVER_LOCK;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+
+/**
+* \fn int ctrl_set_standard()
+* \brief Set modulation standard to be used.
+* \param standard Modulation standard.
+* \return int.
+*
+* Setup stuff for the desired demodulation standard.
+* Disable and power down the previous selected demodulation standard
+*
+*/
+static int
+ctrl_set_standard(struct drx_demod_instance *demod, enum drx_standard *standard)
+{
+ struct drxj_data *ext_attr = NULL;
+ int rc;
+ enum drx_standard prev_standard;
+
+ /* check arguments */
+ if ((standard == NULL) || (demod == NULL))
+ return -EINVAL;
+
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+ prev_standard = ext_attr->standard;
+
+ /*
+ Stop and power down previous standard
+ */
+ switch (prev_standard) {
+#ifndef DRXJ_VSB_ONLY
+ case DRX_STANDARD_ITU_A: /* fallthrough */
+ case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_C:
+ rc = power_down_qam(demod, false);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+#endif
+ case DRX_STANDARD_8VSB:
+ rc = power_down_vsb(demod, false);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_STANDARD_UNKNOWN:
+ /* Do nothing */
+ break;
+ case DRX_STANDARD_AUTO: /* fallthrough */
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ Initialize channel independent registers
+ Power up new standard
+ */
+ ext_attr->standard = *standard;
+
+ switch (*standard) {
+#ifndef DRXJ_VSB_ONLY
+ case DRX_STANDARD_ITU_A: /* fallthrough */
+ case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_C:
+ do {
+ u16 dummy;
+ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, SCU_RAM_VERSION_HI__A, &dummy, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ } while (0);
+ break;
+#endif
+ case DRX_STANDARD_8VSB:
+ rc = set_vsb_leak_n_gain(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ default:
+ ext_attr->standard = DRX_STANDARD_UNKNOWN;
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+rw_error:
+ /* Don't know what the standard is now ... try again */
+ ext_attr->standard = DRX_STANDARD_UNKNOWN;
+ return -EIO;
+}
+
+/*============================================================================*/
+
+static void drxj_reset_mode(struct drxj_data *ext_attr)
+{
+ /* Initialize default AFE configuartion for QAM */
+ if (ext_attr->has_lna) {
+ /* IF AGC off, PGA active */
+#ifndef DRXJ_VSB_ONLY
+ ext_attr->qam_if_agc_cfg.standard = DRX_STANDARD_ITU_B;
+ ext_attr->qam_if_agc_cfg.ctrl_mode = DRX_AGC_CTRL_OFF;
+ ext_attr->qam_pga_cfg = 140 + (11 * 13);
+#endif
+ ext_attr->vsb_if_agc_cfg.standard = DRX_STANDARD_8VSB;
+ ext_attr->vsb_if_agc_cfg.ctrl_mode = DRX_AGC_CTRL_OFF;
+ ext_attr->vsb_pga_cfg = 140 + (11 * 13);
+ } else {
+ /* IF AGC on, PGA not active */
+#ifndef DRXJ_VSB_ONLY
+ ext_attr->qam_if_agc_cfg.standard = DRX_STANDARD_ITU_B;
+ ext_attr->qam_if_agc_cfg.ctrl_mode = DRX_AGC_CTRL_AUTO;
+ ext_attr->qam_if_agc_cfg.min_output_level = 0;
+ ext_attr->qam_if_agc_cfg.max_output_level = 0x7FFF;
+ ext_attr->qam_if_agc_cfg.speed = 3;
+ ext_attr->qam_if_agc_cfg.top = 1297;
+ ext_attr->qam_pga_cfg = 140;
+#endif
+ ext_attr->vsb_if_agc_cfg.standard = DRX_STANDARD_8VSB;
+ ext_attr->vsb_if_agc_cfg.ctrl_mode = DRX_AGC_CTRL_AUTO;
+ ext_attr->vsb_if_agc_cfg.min_output_level = 0;
+ ext_attr->vsb_if_agc_cfg.max_output_level = 0x7FFF;
+ ext_attr->vsb_if_agc_cfg.speed = 3;
+ ext_attr->vsb_if_agc_cfg.top = 1024;
+ ext_attr->vsb_pga_cfg = 140;
+ }
+ /* TODO: remove min_output_level and max_output_level for both QAM and VSB after */
+ /* mc has not used them */
+#ifndef DRXJ_VSB_ONLY
+ ext_attr->qam_rf_agc_cfg.standard = DRX_STANDARD_ITU_B;
+ ext_attr->qam_rf_agc_cfg.ctrl_mode = DRX_AGC_CTRL_AUTO;
+ ext_attr->qam_rf_agc_cfg.min_output_level = 0;
+ ext_attr->qam_rf_agc_cfg.max_output_level = 0x7FFF;
+ ext_attr->qam_rf_agc_cfg.speed = 3;
+ ext_attr->qam_rf_agc_cfg.top = 9500;
+ ext_attr->qam_rf_agc_cfg.cut_off_current = 4000;
+ ext_attr->qam_pre_saw_cfg.standard = DRX_STANDARD_ITU_B;
+ ext_attr->qam_pre_saw_cfg.reference = 0x07;
+ ext_attr->qam_pre_saw_cfg.use_pre_saw = true;
+#endif
+ /* Initialize default AFE configuartion for VSB */
+ ext_attr->vsb_rf_agc_cfg.standard = DRX_STANDARD_8VSB;
+ ext_attr->vsb_rf_agc_cfg.ctrl_mode = DRX_AGC_CTRL_AUTO;
+ ext_attr->vsb_rf_agc_cfg.min_output_level = 0;
+ ext_attr->vsb_rf_agc_cfg.max_output_level = 0x7FFF;
+ ext_attr->vsb_rf_agc_cfg.speed = 3;
+ ext_attr->vsb_rf_agc_cfg.top = 9500;
+ ext_attr->vsb_rf_agc_cfg.cut_off_current = 4000;
+ ext_attr->vsb_pre_saw_cfg.standard = DRX_STANDARD_8VSB;
+ ext_attr->vsb_pre_saw_cfg.reference = 0x07;
+ ext_attr->vsb_pre_saw_cfg.use_pre_saw = true;
+}
+
+/**
+* \fn int ctrl_power_mode()
+* \brief Set the power mode of the device to the specified power mode
+* \param demod Pointer to demodulator instance.
+* \param mode Pointer to new power mode.
+* \return int.
+* \retval 0 Success
+* \retval -EIO I2C error or other failure
+* \retval -EINVAL Invalid mode argument.
+*
+*
+*/
+static int
+ctrl_power_mode(struct drx_demod_instance *demod, enum drx_power_mode *mode)
+{
+ struct drx_common_attr *common_attr = (struct drx_common_attr *) NULL;
+ struct drxj_data *ext_attr = (struct drxj_data *) NULL;
+ struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)NULL;
+ int rc;
+ u16 sio_cc_pwd_mode = 0;
+
+ common_attr = (struct drx_common_attr *) demod->my_common_attr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+ dev_addr = demod->my_i2c_dev_addr;
+
+ /* Check arguments */
+ if (mode == NULL)
+ return -EINVAL;
+
+ /* If already in requested power mode, do nothing */
+ if (common_attr->current_power_mode == *mode)
+ return 0;
+
+ switch (*mode) {
+ case DRX_POWER_UP:
+ case DRXJ_POWER_DOWN_MAIN_PATH:
+ sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_NONE;
+ break;
+ case DRXJ_POWER_DOWN_CORE:
+ sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_CLOCK;
+ break;
+ case DRXJ_POWER_DOWN_PLL:
+ sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_PLL;
+ break;
+ case DRX_POWER_DOWN:
+ sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_OSC;
+ break;
+ default:
+ /* Unknow sleep mode */
+ return -EINVAL;
+ break;
+ }
+
+ /* Check if device needs to be powered up */
+ if ((common_attr->current_power_mode != DRX_POWER_UP)) {
+ rc = power_up_device(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ if ((*mode == DRX_POWER_UP)) {
+ /* Restore analog & pin configuartion */
+
+ /* Initialize default AFE configuartion for VSB */
+ drxj_reset_mode(ext_attr);
+ } else {
+ /* Power down to requested mode */
+ /* Backup some register settings */
+ /* Set pins with possible pull-ups connected to them in input mode */
+ /* Analog power down */
+ /* ADC power down */
+ /* Power down device */
+ /* stop all comm_exec */
+ /*
+ Stop and power down previous standard
+ */
+
+ switch (ext_attr->standard) {
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
+ case DRX_STANDARD_ITU_C:
+ rc = power_down_qam(demod, true);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_STANDARD_8VSB:
+ rc = power_down_vsb(demod, true);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_STANDARD_PAL_SECAM_BG: /* fallthrough */
+ case DRX_STANDARD_PAL_SECAM_DK: /* fallthrough */
+ case DRX_STANDARD_PAL_SECAM_I: /* fallthrough */
+ case DRX_STANDARD_PAL_SECAM_L: /* fallthrough */
+ case DRX_STANDARD_PAL_SECAM_LP: /* fallthrough */
+ case DRX_STANDARD_NTSC: /* fallthrough */
+ case DRX_STANDARD_FM:
+ rc = power_down_atv(demod, ext_attr->standard, true);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ break;
+ case DRX_STANDARD_UNKNOWN:
+ /* Do nothing */
+ break;
+ case DRX_STANDARD_AUTO: /* fallthrough */
+ default:
+ return -EIO;
+ }
+ ext_attr->standard = DRX_STANDARD_UNKNOWN;
+ }
+
+ if (*mode != DRXJ_POWER_DOWN_MAIN_PATH) {
+ rc = drxj_dap_write_reg16(dev_addr, SIO_CC_PWD_MODE__A, sio_cc_pwd_mode, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ if ((*mode != DRX_POWER_UP)) {
+ /* Initialize HI, wakeup key especially before put IC to sleep */
+ rc = init_hi(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ ext_attr->hi_cfg_ctrl |= SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ;
+ rc = hi_cfg_command(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+ }
+
+ common_attr->current_power_mode = *mode;
+
+ return 0;
+rw_error:
+ return rc;
+}
+
+/*============================================================================*/
+/*== CTRL Set/Get Config related functions ===================================*/
+/*============================================================================*/
+
+/**
+* \fn int ctrl_set_cfg_pre_saw()
+* \brief Set Pre-saw reference.
+* \param demod demod instance
+* \param u16 *
+* \return int.
+*
+* Check arguments
+* Dispatch handling to standard specific function.
+*
+*/
+static int
+ctrl_set_cfg_pre_saw(struct drx_demod_instance *demod, struct drxj_cfg_pre_saw *pre_saw)
+{
+ struct i2c_device_addr *dev_addr = NULL;
+ struct drxj_data *ext_attr = NULL;
+ int rc;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ /* check arguments */
+ if ((pre_saw == NULL) || (pre_saw->reference > IQM_AF_PDREF__M)
+ ) {
+ return -EINVAL;
+ }
+
+ /* Only if standard is currently active */
+ if ((ext_attr->standard == pre_saw->standard) ||
+ (DRXJ_ISQAMSTD(ext_attr->standard) &&
+ DRXJ_ISQAMSTD(pre_saw->standard)) ||
+ (DRXJ_ISATVSTD(ext_attr->standard) &&
+ DRXJ_ISATVSTD(pre_saw->standard))) {
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_PDREF__A, pre_saw->reference, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ /* Store pre-saw settings */
+ switch (pre_saw->standard) {
+ case DRX_STANDARD_8VSB:
+ ext_attr->vsb_pre_saw_cfg = *pre_saw;
+ break;
+#ifndef DRXJ_VSB_ONLY
+ case DRX_STANDARD_ITU_A: /* fallthrough */
+ case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_C:
+ ext_attr->qam_pre_saw_cfg = *pre_saw;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+
+/**
+* \fn int ctrl_set_cfg_afe_gain()
+* \brief Set AFE Gain.
+* \param demod demod instance
+* \param u16 *
+* \return int.
+*
+* Check arguments
+* Dispatch handling to standard specific function.
+*
+*/
+static int
+ctrl_set_cfg_afe_gain(struct drx_demod_instance *demod, struct drxj_cfg_afe_gain *afe_gain)
+{
+ struct i2c_device_addr *dev_addr = NULL;
+ struct drxj_data *ext_attr = NULL;
+ int rc;
+ u8 gain = 0;
+
+ /* check arguments */
+ if (afe_gain == NULL)
+ return -EINVAL;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+
+ switch (afe_gain->standard) {
+ case DRX_STANDARD_8VSB: /* fallthrough */
+#ifndef DRXJ_VSB_ONLY
+ case DRX_STANDARD_ITU_A: /* fallthrough */
+ case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_C:
+#endif
+ /* Do nothing */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* TODO PGA gain is also written by microcode (at least by QAM and VSB)
+ So I (PJ) think interface requires choice between auto, user mode */
+
+ if (afe_gain->gain >= 329)
+ gain = 15;
+ else if (afe_gain->gain <= 147)
+ gain = 0;
+ else
+ gain = (afe_gain->gain - 140 + 6) / 13;
+
+ /* Only if standard is currently active */
+ if (ext_attr->standard == afe_gain->standard) {
+ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_PGA_GAIN__A, gain, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ }
+
+ /* Store AFE Gain settings */
+ switch (afe_gain->standard) {
+ case DRX_STANDARD_8VSB:
+ ext_attr->vsb_pga_cfg = gain * 13 + 140;
+ break;
+#ifndef DRXJ_VSB_ONLY
+ case DRX_STANDARD_ITU_A: /* fallthrough */
+ case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_C:
+ ext_attr->qam_pga_cfg = gain * 13 + 140;
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+
+ return 0;
+rw_error:
+ return -EIO;
+}
+
+/*============================================================================*/
+
+
+/*=============================================================================
+===== EXPORTED FUNCTIONS ====================================================*/
+
+static int drx_ctrl_u_code(struct drx_demod_instance *demod,
+ struct drxu_code_info *mc_info,
+ enum drxu_code_action action);
+
+/**
+* \fn drxj_open()
+* \brief Open the demod instance, configure device, configure drxdriver
+* \return Status_t Return status.
+*
+* drxj_open() can be called with a NULL ucode image => no ucode upload.
+* This means that drxj_open() must NOT contain SCU commands or, in general,
+* rely on SCU or AUD ucode to be present.
+*
+*/
+
+static int drxj_open(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = NULL;
+ struct drxj_data *ext_attr = NULL;
+ struct drx_common_attr *common_attr = NULL;
+ u32 driver_version = 0;
+ struct drxu_code_info ucode_info;
+ struct drx_cfg_mpeg_output cfg_mpeg_output;
+ int rc;
+ enum drx_power_mode power_mode = DRX_POWER_UP;
+
+ if ((demod == NULL) ||
+ (demod->my_common_attr == NULL) ||
+ (demod->my_ext_attr == NULL) ||
+ (demod->my_i2c_dev_addr == NULL) ||
+ (demod->my_common_attr->is_opened)) {
+ return -EINVAL;
+ }
+
+ /* Check arguments */
+ if (demod->my_ext_attr == NULL)
+ return -EINVAL;
+
+ dev_addr = demod->my_i2c_dev_addr;
+ ext_attr = (struct drxj_data *) demod->my_ext_attr;
+ common_attr = (struct drx_common_attr *) demod->my_common_attr;
+
+ rc = ctrl_power_mode(demod, &power_mode);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ if (power_mode != DRX_POWER_UP) {
+ rc = -EINVAL;
+ pr_err("failed to powerup device\n");
+ goto rw_error;
+ }
+
+ /* has to be in front of setIqmAf and setOrxNsuAox */
+ rc = get_device_capabilities(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /*
+ * Soft reset of sys- and osc-clockdomain
+ *
+ * HACK: On windows, it writes a 0x07 here, instead of just 0x03.
+ * As we didn't load the firmware here yet, we should do the same.
+ * Btw, this is coherent with DRX-K, where we send reset codes
+ * for modulation (OFTM, in DRX-k), SYS and OSC clock domains.
+ */
+ rc = drxj_dap_write_reg16(dev_addr, SIO_CC_SOFT_RST__A, (0x04 | SIO_CC_SOFT_RST_SYS__M | SIO_CC_SOFT_RST_OSC__M), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ msleep(1);
+
+ /* TODO first make sure that everything keeps working before enabling this */
+ /* PowerDownAnalogBlocks() */
+ rc = drxj_dap_write_reg16(dev_addr, ATV_TOP_STDBY__A, (~ATV_TOP_STDBY_CVBS_STDBY_A2_ACTIVE) | ATV_TOP_STDBY_SIF_STDBY_STANDBY, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = set_iqm_af(demod, false);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = set_orx_nsu_aox(demod, false);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = init_hi(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* disable mpegoutput pins */
+ memcpy(&cfg_mpeg_output, &common_attr->mpeg_cfg, sizeof(cfg_mpeg_output));
+ cfg_mpeg_output.enable_mpeg_output = false;
+
+ rc = ctrl_set_cfg_mpeg_output(demod, &cfg_mpeg_output);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* Stop AUD Inform SetAudio it will need to do all setting */
+ rc = power_down_aud(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ /* Stop SCU */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_COMM_EXEC__A, SCU_COMM_EXEC_STOP, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Upload microcode */
+ if (common_attr->microcode_file != NULL) {
+ /* Dirty trick to use common ucode upload & verify,
+ pretend device is already open */
+ common_attr->is_opened = true;
+ ucode_info.mc_file = common_attr->microcode_file;
+
+ if (DRX_ISPOWERDOWNMODE(demod->my_common_attr->current_power_mode)) {
+ pr_err("Should powerup before loading the firmware.");
+ return -EINVAL;
+ }
+
+ rc = drx_ctrl_u_code(demod, &ucode_info, UCODE_UPLOAD);
+ if (rc != 0) {
+ pr_err("error %d while uploading the firmware\n", rc);
+ goto rw_error;
+ }
+ if (common_attr->verify_microcode == true) {
+ rc = drx_ctrl_u_code(demod, &ucode_info, UCODE_VERIFY);
+ if (rc != 0) {
+ pr_err("error %d while verifying the firmware\n",
+ rc);
+ goto rw_error;
+ }
+ }
+ common_attr->is_opened = false;
+ }
+
+ /* Run SCU for a little while to initialize microcode version numbers */
+ rc = drxj_dap_write_reg16(dev_addr, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Initialize scan timeout */
+ common_attr->scan_demod_lock_timeout = DRXJ_SCAN_TIMEOUT;
+ common_attr->scan_desired_lock = DRX_LOCKED;
+
+ drxj_reset_mode(ext_attr);
+ ext_attr->standard = DRX_STANDARD_UNKNOWN;
+
+ rc = smart_ant_init(demod);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* Stamp driver version number in SCU data RAM in BCD code
+ Done to enable field application engineers to retreive drxdriver version
+ via I2C from SCU RAM
+ */
+ driver_version = (VERSION_MAJOR / 100) % 10;
+ driver_version <<= 4;
+ driver_version += (VERSION_MAJOR / 10) % 10;
+ driver_version <<= 4;
+ driver_version += (VERSION_MAJOR % 10);
+ driver_version <<= 4;
+ driver_version += (VERSION_MINOR % 10);
+ driver_version <<= 4;
+ driver_version += (VERSION_PATCH / 1000) % 10;
+ driver_version <<= 4;
+ driver_version += (VERSION_PATCH / 100) % 10;
+ driver_version <<= 4;
+ driver_version += (VERSION_PATCH / 10) % 10;
+ driver_version <<= 4;
+ driver_version += (VERSION_PATCH % 10);
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_DRIVER_VER_HI__A, (u16)(driver_version >> 16), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_DRIVER_VER_LO__A, (u16)(driver_version & 0xFFFF), 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = ctrl_set_oob(demod, NULL);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ /* refresh the audio data structure with default */
+ ext_attr->aud_data = drxj_default_aud_data_g;
+
+ demod->my_common_attr->is_opened = true;
+ return 0;
+rw_error:
+ common_attr->is_opened = false;
+ return -EIO;
+}
+
+/*============================================================================*/
+/**
+* \fn drxj_close()
+* \brief Close the demod instance, power down the device
+* \return Status_t Return status.
+*
+*/
+static int drxj_close(struct drx_demod_instance *demod)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ int rc;
+ enum drx_power_mode power_mode = DRX_POWER_UP;
+
+ if ((demod->my_common_attr == NULL) ||
+ (demod->my_ext_attr == NULL) ||
+ (demod->my_i2c_dev_addr == NULL) ||
+ (!demod->my_common_attr->is_opened)) {
+ return -EINVAL;
+ }
+
+ /* power up */
+ rc = ctrl_power_mode(demod, &power_mode);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ rc = drxj_dap_write_reg16(dev_addr, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE, 0);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+ power_mode = DRX_POWER_DOWN;
+ rc = ctrl_power_mode(demod, &power_mode);
+ if (rc != 0) {
+ pr_err("error %d\n", rc);
+ goto rw_error;
+ }
+
+ DRX_ATTR_ISOPENED(demod) = false;
+
+ return 0;
+rw_error:
+ DRX_ATTR_ISOPENED(demod) = false;
+
+ return -EIO;
+}
+
+/*
+ * Microcode related functions
+ */
+
+/**
+ * drx_u_code_compute_crc - Compute CRC of block of microcode data.
+ * @block_data: Pointer to microcode data.
+ * @nr_words: Size of microcode block (number of 16 bits words).
+ *
+ * returns The computed CRC residue.
+ */
+static u16 drx_u_code_compute_crc(u8 *block_data, u16 nr_words)
+{
+ u16 i = 0;
+ u16 j = 0;
+ u32 crc_word = 0;
+ u32 carry = 0;
+
+ while (i < nr_words) {
+ crc_word |= (u32)be16_to_cpu(*(u32 *)(block_data));
+ for (j = 0; j < 16; j++) {
+ crc_word <<= 1;
+ if (carry != 0)
+ crc_word ^= 0x80050000UL;
+ carry = crc_word & 0x80000000UL;
+ }
+ i++;
+ block_data += (sizeof(u16));
+ }
+ return (u16)(crc_word >> 16);
+}
+
+/**
+ * drx_check_firmware - checks if the loaded firmware is valid
+ *
+ * @demod: demod structure
+ * @mc_data: pointer to the start of the firmware
+ * @size: firmware size
+ */
+static int drx_check_firmware(struct drx_demod_instance *demod, u8 *mc_data,
+ unsigned size)
+{
+ struct drxu_code_block_hdr block_hdr;
+ int i;
+ unsigned count = 2 * sizeof(u16);
+ u32 mc_dev_type, mc_version, mc_base_version;
+ u16 mc_nr_of_blks = be16_to_cpu(*(u32 *)(mc_data + sizeof(u16)));
+
+ /*
+ * Scan microcode blocks first for version info
+ * and firmware check
+ */
+
+ /* Clear version block */
+ DRX_ATTR_MCRECORD(demod).aux_type = 0;
+ DRX_ATTR_MCRECORD(demod).mc_dev_type = 0;
+ DRX_ATTR_MCRECORD(demod).mc_version = 0;
+ DRX_ATTR_MCRECORD(demod).mc_base_version = 0;
+
+ for (i = 0; i < mc_nr_of_blks; i++) {
+ if (count + 3 * sizeof(u16) + sizeof(u32) > size)
+ goto eof;
+
+ /* Process block header */
+ block_hdr.addr = be32_to_cpu(*(u32 *)(mc_data + count));
+ count += sizeof(u32);
+ block_hdr.size = be16_to_cpu(*(u32 *)(mc_data + count));
+ count += sizeof(u16);
+ block_hdr.flags = be16_to_cpu(*(u32 *)(mc_data + count));
+ count += sizeof(u16);
+ block_hdr.CRC = be16_to_cpu(*(u32 *)(mc_data + count));
+ count += sizeof(u16);
+
+ pr_debug("%u: addr %u, size %u, flags 0x%04x, CRC 0x%04x\n",
+ count, block_hdr.addr, block_hdr.size, block_hdr.flags,
+ block_hdr.CRC);
+
+ if (block_hdr.flags & 0x8) {
+ u8 *auxblk = ((void *)mc_data) + block_hdr.addr;
+ u16 auxtype;
+
+ if (block_hdr.addr + sizeof(u16) > size)
+ goto eof;
+
+ auxtype = be16_to_cpu(*(u32 *)(auxblk));
+
+ /* Aux block. Check type */
+ if (DRX_ISMCVERTYPE(auxtype)) {
+ if (block_hdr.addr + 2 * sizeof(u16) + 2 * sizeof (u32) > size)
+ goto eof;
+
+ auxblk += sizeof(u16);
+ mc_dev_type = be32_to_cpu(*(u32 *)(auxblk));
+ auxblk += sizeof(u32);
+ mc_version = be32_to_cpu(*(u32 *)(auxblk));
+ auxblk += sizeof(u32);
+ mc_base_version = be32_to_cpu(*(u32 *)(auxblk));
+
+ DRX_ATTR_MCRECORD(demod).aux_type = auxtype;
+ DRX_ATTR_MCRECORD(demod).mc_dev_type = mc_dev_type;
+ DRX_ATTR_MCRECORD(demod).mc_version = mc_version;
+ DRX_ATTR_MCRECORD(demod).mc_base_version = mc_base_version;
+
+ pr_info("Firmware dev %x, ver %x, base ver %x\n",
+ mc_dev_type, mc_version, mc_base_version);
+
+ }
+ } else if (count + block_hdr.size * sizeof(u16) > size)
+ goto eof;
+
+ count += block_hdr.size * sizeof(u16);
+ }
+ return 0;
+eof:
+ pr_err("Firmware is truncated at pos %u/%u\n", count, size);
+ return -EINVAL;
+}
+
+/**
+ * drx_ctrl_u_code - Handle microcode upload or verify.
+ * @dev_addr: Address of device.
+ * @mc_info: Pointer to information about microcode data.
+ * @action: Either UCODE_UPLOAD or UCODE_VERIFY
+ *
+ * This function returns:
+ * 0:
+ * - In case of UCODE_UPLOAD: code is successfully uploaded.
+ * - In case of UCODE_VERIFY: image on device is equal to
+ * image provided to this control function.
+ * -EIO:
+ * - In case of UCODE_UPLOAD: I2C error.
+ * - In case of UCODE_VERIFY: I2C error or image on device
+ * is not equal to image provided to this control function.
+ * -EINVAL:
+ * - Invalid arguments.
+ * - Provided image is corrupt
+ */
+static int drx_ctrl_u_code(struct drx_demod_instance *demod,
+ struct drxu_code_info *mc_info,
+ enum drxu_code_action action)
+{
+ struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
+ int rc;
+ u16 i = 0;
+ u16 mc_nr_of_blks = 0;
+ u16 mc_magic_word = 0;
+ const u8 *mc_data_init = NULL;
+ u8 *mc_data = NULL;
+ unsigned size;
+ char *mc_file;
+
+ /* Check arguments */
+ if (!mc_info || !mc_info->mc_file)
+ return -EINVAL;
+
+ mc_file = mc_info->mc_file;
+
+ if (!demod->firmware) {
+ const struct firmware *fw = NULL;
+
+ rc = request_firmware(&fw, mc_file, demod->i2c->dev.parent);
+ if (rc < 0) {
+ pr_err("Couldn't read firmware %s\n", mc_file);
+ return rc;
+ }
+ demod->firmware = fw;
+
+ if (demod->firmware->size < 2 * sizeof(u16)) {
+ rc = -EINVAL;
+ pr_err("Firmware is too short!\n");
+ goto release;
+ }
+
+ pr_info("Firmware %s, size %zu\n",
+ mc_file, demod->firmware->size);
+ }
+
+ mc_data_init = demod->firmware->data;
+ size = demod->firmware->size;
+
+ mc_data = (void *)mc_data_init;
+ /* Check data */
+ mc_magic_word = be16_to_cpu(*(u32 *)(mc_data));
+ mc_data += sizeof(u16);
+ mc_nr_of_blks = be16_to_cpu(*(u32 *)(mc_data));
+ mc_data += sizeof(u16);
+
+ if ((mc_magic_word != DRX_UCODE_MAGIC_WORD) || (mc_nr_of_blks == 0)) {
+ rc = -EINVAL;
+ pr_err("Firmware magic word doesn't match\n");
+ goto release;
+ }
+
+ if (action == UCODE_UPLOAD) {
+ rc = drx_check_firmware(demod, (u8 *)mc_data_init, size);
+ if (rc)
+ goto release;
+ pr_info("Uploading firmware %s\n", mc_file);
+ } else {
+ pr_info("Verifying if firmware upload was ok.\n");
+ }
+
+ /* Process microcode blocks */
+ for (i = 0; i < mc_nr_of_blks; i++) {
+ struct drxu_code_block_hdr block_hdr;
+ u16 mc_block_nr_bytes = 0;
+
+ /* Process block header */
+ block_hdr.addr = be32_to_cpu(*(u32 *)(mc_data));
+ mc_data += sizeof(u32);
+ block_hdr.size = be16_to_cpu(*(u32 *)(mc_data));
+ mc_data += sizeof(u16);
+ block_hdr.flags = be16_to_cpu(*(u32 *)(mc_data));
+ mc_data += sizeof(u16);
+ block_hdr.CRC = be16_to_cpu(*(u32 *)(mc_data));
+ mc_data += sizeof(u16);
+
+ pr_debug("%u: addr %u, size %u, flags 0x%04x, CRC 0x%04x\n",
+ (unsigned)(mc_data - mc_data_init), block_hdr.addr,
+ block_hdr.size, block_hdr.flags, block_hdr.CRC);
+
+ /* Check block header on:
+ - data larger than 64Kb
+ - if CRC enabled check CRC
+ */
+ if ((block_hdr.size > 0x7FFF) ||
+ (((block_hdr.flags & DRX_UCODE_CRC_FLAG) != 0) &&
+ (block_hdr.CRC != drx_u_code_compute_crc(mc_data, block_hdr.size)))
+ ) {
+ /* Wrong data ! */
+ rc = -EINVAL;
+ pr_err("firmware CRC is wrong\n");
+ goto release;
+ }
+
+ if (!block_hdr.size)
+ continue;
+
+ mc_block_nr_bytes = block_hdr.size * ((u16) sizeof(u16));
+
+ /* Perform the desired action */
+ switch (action) {
+ case UCODE_UPLOAD: /* Upload microcode */
+ if (drxdap_fasi_write_block(dev_addr,
+ block_hdr.addr,
+ mc_block_nr_bytes,
+ mc_data, 0x0000)) {
+ rc = -EIO;
+ pr_err("error writing firmware at pos %u\n",
+ (unsigned)(mc_data - mc_data_init));
+ goto release;
+ }
+ break;
+ case UCODE_VERIFY: { /* Verify uploaded microcode */
+ int result = 0;
+ u8 mc_data_buffer[DRX_UCODE_MAX_BUF_SIZE];
+ u32 bytes_to_comp = 0;
+ u32 bytes_left = mc_block_nr_bytes;
+ u32 curr_addr = block_hdr.addr;
+ u8 *curr_ptr = mc_data;
+
+ while (bytes_left != 0) {
+ if (bytes_left > DRX_UCODE_MAX_BUF_SIZE)
+ bytes_to_comp = DRX_UCODE_MAX_BUF_SIZE;
+ else
+ bytes_to_comp = bytes_left;
+
+ if (drxdap_fasi_read_block(dev_addr,
+ curr_addr,
+ (u16)bytes_to_comp,
+ (u8 *)mc_data_buffer,
+ 0x0000)) {
+ pr_err("error reading firmware at pos %u\n",
+ (unsigned)(mc_data - mc_data_init));
+ return -EIO;
+ }
+
+ result = memcmp(curr_ptr, mc_data_buffer,
+ bytes_to_comp);
+
+ if (result) {
+ pr_err("error verifying firmware at pos %u\n",
+ (unsigned)(mc_data - mc_data_init));
+ return -EIO;
+ }
+
+ curr_addr += ((dr_xaddr_t)(bytes_to_comp / 2));
+ curr_ptr =&(curr_ptr[bytes_to_comp]);
+ bytes_left -=((u32) bytes_to_comp);
+ }
+ break;
+ }
+ default:
+ return -EINVAL;
+ break;
+
+ }
+ mc_data += mc_block_nr_bytes;
+ }
+
+ return 0;
+
+release:
+ release_firmware(demod->firmware);
+ demod->firmware = NULL;
+
+ return rc;
+}
+
+/*
+ * The Linux DVB Driver for Micronas DRX39xx family (drx3933j)
+ *
+ * Written by Devin Heitmueller <devin.heitmueller@kernellabs.com>
+ */
+
+static int drx39xxj_set_powerstate(struct dvb_frontend *fe, int enable)
+{
+ struct drx39xxj_state *state = fe->demodulator_priv;
+ struct drx_demod_instance *demod = state->demod;
+ int result;
+ enum drx_power_mode power_mode;
+
+ if (enable)
+ power_mode = DRX_POWER_UP;
+ else
+ power_mode = DRX_POWER_DOWN;
+
+ result = ctrl_power_mode(demod, &power_mode);
+ if (result != 0) {
+ pr_err("Power state change failed\n");
+ return 0;
+ }
+
+ return 0;
+}
+
+static int drx39xxj_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct drx39xxj_state *state = fe->demodulator_priv;
+ struct drx_demod_instance *demod = state->demod;
+ int result;
+ enum drx_lock_status lock_status;
+
+ *status = 0;
+
+ result = ctrl_lock_status(demod, &lock_status);
+ if (result != 0) {
+ pr_err("drx39xxj: could not get lock status!\n");
+ *status = 0;
+ }
+
+ switch (lock_status) {
+ case DRX_NEVER_LOCK:
+ *status = 0;
+ pr_err("drx says NEVER_LOCK\n");
+ break;
+ case DRX_NOT_LOCKED:
+ *status = 0;
+ break;
+ case DRX_LOCK_STATE_1:
+ case DRX_LOCK_STATE_2:
+ case DRX_LOCK_STATE_3:
+ case DRX_LOCK_STATE_4:
+ case DRX_LOCK_STATE_5:
+ case DRX_LOCK_STATE_6:
+ case DRX_LOCK_STATE_7:
+ case DRX_LOCK_STATE_8:
+ case DRX_LOCK_STATE_9:
+ *status = FE_HAS_SIGNAL
+ | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC;
+ break;
+ case DRX_LOCKED:
+ *status = FE_HAS_SIGNAL
+ | FE_HAS_CARRIER
+ | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ break;
+ default:
+ pr_err("Lock state unknown %d\n", lock_status);
+ }
+ ctrl_sig_quality(demod, lock_status);
+
+ return 0;
+}
+
+static int drx39xxj_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ if (p->pre_bit_error.stat[0].scale == FE_SCALE_NOT_AVAILABLE) {
+ *ber = 0;
+ return 0;
+ }
+
+ if (!p->pre_bit_count.stat[0].uvalue) {
+ if (!p->pre_bit_error.stat[0].uvalue)
+ *ber = 0;
+ else
+ *ber = 1000000;
+ } else {
+ *ber = frac_times1e6(p->pre_bit_error.stat[0].uvalue,
+ p->pre_bit_count.stat[0].uvalue);
+ }
+ return 0;
+}
+
+static int drx39xxj_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ if (p->strength.stat[0].scale == FE_SCALE_NOT_AVAILABLE) {
+ *strength = 0;
+ return 0;
+ }
+
+ *strength = p->strength.stat[0].uvalue;
+ return 0;
+}
+
+static int drx39xxj_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u64 tmp64;
+
+ if (p->cnr.stat[0].scale == FE_SCALE_NOT_AVAILABLE) {
+ *snr = 0;
+ return 0;
+ }
+
+ tmp64 = p->cnr.stat[0].svalue;
+ do_div(tmp64, 10);
+ *snr = tmp64;
+ return 0;
+}
+
+static int drx39xxj_read_ucblocks(struct dvb_frontend *fe, u32 *ucb)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ if (p->block_error.stat[0].scale == FE_SCALE_NOT_AVAILABLE) {
+ *ucb = 0;
+ return 0;
+ }
+
+ *ucb = p->block_error.stat[0].uvalue;
+ return 0;
+}
+
+static int drx39xxj_set_frontend(struct dvb_frontend *fe)
+{
+#ifdef DJH_DEBUG
+ int i;
+#endif
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct drx39xxj_state *state = fe->demodulator_priv;
+ struct drx_demod_instance *demod = state->demod;
+ enum drx_standard standard = DRX_STANDARD_8VSB;
+ struct drx_channel channel;
+ int result;
+ struct drxuio_data uio_data;
+ static const struct drx_channel def_channel = {
+ /* frequency */ 0,
+ /* bandwidth */ DRX_BANDWIDTH_6MHZ,
+ /* mirror */ DRX_MIRROR_NO,
+ /* constellation */ DRX_CONSTELLATION_AUTO,
+ /* hierarchy */ DRX_HIERARCHY_UNKNOWN,
+ /* priority */ DRX_PRIORITY_UNKNOWN,
+ /* coderate */ DRX_CODERATE_UNKNOWN,
+ /* guard */ DRX_GUARD_UNKNOWN,
+ /* fftmode */ DRX_FFTMODE_UNKNOWN,
+ /* classification */ DRX_CLASSIFICATION_AUTO,
+ /* symbolrate */ 5057000,
+ /* interleavemode */ DRX_INTERLEAVEMODE_UNKNOWN,
+ /* ldpc */ DRX_LDPC_UNKNOWN,
+ /* carrier */ DRX_CARRIER_UNKNOWN,
+ /* frame mode */ DRX_FRAMEMODE_UNKNOWN
+ };
+ u32 constellation = DRX_CONSTELLATION_AUTO;
+
+ /* Bring the demod out of sleep */
+ drx39xxj_set_powerstate(fe, 1);
+
+ if (fe->ops.tuner_ops.set_params) {
+ u32 int_freq;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ /* Set tuner to desired frequency and standard */
+ fe->ops.tuner_ops.set_params(fe);
+
+ /* Use the tuner's IF */
+ if (fe->ops.tuner_ops.get_if_frequency) {
+ fe->ops.tuner_ops.get_if_frequency(fe, &int_freq);
+ demod->my_common_attr->intermediate_freq = int_freq / 1000;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ }
+
+ switch (p->delivery_system) {
+ case SYS_ATSC:
+ standard = DRX_STANDARD_8VSB;
+ break;
+ case SYS_DVBC_ANNEX_B:
+ standard = DRX_STANDARD_ITU_B;
+
+ switch (p->modulation) {
+ case QAM_64:
+ constellation = DRX_CONSTELLATION_QAM64;
+ break;
+ case QAM_256:
+ constellation = DRX_CONSTELLATION_QAM256;
+ break;
+ default:
+ constellation = DRX_CONSTELLATION_AUTO;
+ break;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* Set the standard (will be powered up if necessary */
+ result = ctrl_set_standard(demod, &standard);
+ if (result != 0) {
+ pr_err("Failed to set standard! result=%02x\n",
+ result);
+ return -EINVAL;
+ }
+
+ /* set channel parameters */
+ channel = def_channel;
+ channel.frequency = p->frequency / 1000;
+ channel.bandwidth = DRX_BANDWIDTH_6MHZ;
+ channel.constellation = constellation;
+
+ /* program channel */
+ result = ctrl_set_channel(demod, &channel);
+ if (result != 0) {
+ pr_err("Failed to set channel!\n");
+ return -EINVAL;
+ }
+ /* Just for giggles, let's shut off the LNA again.... */
+ uio_data.uio = DRX_UIO1;
+ uio_data.value = false;
+ result = ctrl_uio_write(demod, &uio_data);
+ if (result != 0) {
+ pr_err("Failed to disable LNA!\n");
+ return 0;
+ }
+
+ /* After set_frontend, except for strength, stats aren't available */
+ p->strength.stat[0].scale = FE_SCALE_RELATIVE;
+
+ return 0;
+}
+
+static int drx39xxj_sleep(struct dvb_frontend *fe)
+{
+ /* power-down the demodulator */
+ return drx39xxj_set_powerstate(fe, 0);
+}
+
+static int drx39xxj_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct drx39xxj_state *state = fe->demodulator_priv;
+ struct drx_demod_instance *demod = state->demod;
+ bool i2c_gate_state;
+ int result;
+
+#ifdef DJH_DEBUG
+ pr_debug("i2c gate call: enable=%d state=%d\n", enable,
+ state->i2c_gate_open);
+#endif
+
+ if (enable)
+ i2c_gate_state = true;
+ else
+ i2c_gate_state = false;
+
+ if (state->i2c_gate_open == enable) {
+ /* We're already in the desired state */
+ return 0;
+ }
+
+ result = ctrl_i2c_bridge(demod, &i2c_gate_state);
+ if (result != 0) {
+ pr_err("drx39xxj: could not open i2c gate [%d]\n",
+ result);
+ dump_stack();
+ } else {
+ state->i2c_gate_open = enable;
+ }
+ return 0;
+}
+
+static int drx39xxj_init(struct dvb_frontend *fe)
+{
+ /* Bring the demod out of sleep */
+ drx39xxj_set_powerstate(fe, 1);
+
+ return 0;
+}
+
+static int drx39xxj_set_lna(struct dvb_frontend *fe)
+{
+ int result;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct drx39xxj_state *state = fe->demodulator_priv;
+ struct drx_demod_instance *demod = state->demod;
+ struct drxj_data *ext_attr = demod->my_ext_attr;
+ struct drxuio_cfg uio_cfg;
+ struct drxuio_data uio_data;
+
+ if (c->lna) {
+ if (!ext_attr->has_lna) {
+ pr_err("LNA is not supported on this device!\n");
+ return -EINVAL;
+
+ }
+ }
+
+ /* Turn off the LNA */
+ uio_cfg.uio = DRX_UIO1;
+ uio_cfg.mode = DRX_UIO_MODE_READWRITE;
+ /* Configure user-I/O #3: enable read/write */
+ result = ctrl_set_uio_cfg(demod, &uio_cfg);
+ if (result) {
+ pr_err("Failed to setup LNA GPIO!\n");
+ return result;
+ }
+
+ uio_data.uio = DRX_UIO1;
+ uio_data.value = c->lna;
+ result = ctrl_uio_write(demod, &uio_data);
+ if (result != 0) {
+ pr_err("Failed to %sable LNA!\n",
+ c->lna ? "en" : "dis");
+ return result;
+ }
+
+ return 0;
+}
+
+static int drx39xxj_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *tune)
+{
+ tune->min_delay_ms = 1000;
+ return 0;
+}
+
+static void drx39xxj_release(struct dvb_frontend *fe)
+{
+ struct drx39xxj_state *state = fe->demodulator_priv;
+ struct drx_demod_instance *demod = state->demod;
+
+ drxj_close(demod);
+
+ kfree(demod->my_ext_attr);
+ kfree(demod->my_common_attr);
+ kfree(demod->my_i2c_dev_addr);
+ if (demod->firmware)
+ release_firmware(demod->firmware);
+ kfree(demod);
+ kfree(state);
+}
+
+static struct dvb_frontend_ops drx39xxj_ops;
+
+struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c)
+{
+ struct drx39xxj_state *state = NULL;
+ struct i2c_device_addr *demod_addr = NULL;
+ struct drx_common_attr *demod_comm_attr = NULL;
+ struct drxj_data *demod_ext_attr = NULL;
+ struct drx_demod_instance *demod = NULL;
+ struct dtv_frontend_properties *p;
+ struct drxuio_cfg uio_cfg;
+ struct drxuio_data uio_data;
+ int result;
+
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(struct drx39xxj_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error;
+
+ demod = kmalloc(sizeof(struct drx_demod_instance), GFP_KERNEL);
+ if (demod == NULL)
+ goto error;
+
+ demod_addr = kmalloc(sizeof(struct i2c_device_addr), GFP_KERNEL);
+ if (demod_addr == NULL)
+ goto error;
+ memcpy(demod_addr, &drxj_default_addr_g,
+ sizeof(struct i2c_device_addr));
+
+ demod_comm_attr = kmalloc(sizeof(struct drx_common_attr), GFP_KERNEL);
+ if (demod_comm_attr == NULL)
+ goto error;
+ memcpy(demod_comm_attr, &drxj_default_comm_attr_g,
+ sizeof(struct drx_common_attr));
+
+ demod_ext_attr = kmalloc(sizeof(struct drxj_data), GFP_KERNEL);
+ if (demod_ext_attr == NULL)
+ goto error;
+ memcpy(demod_ext_attr, &drxj_data_g, sizeof(struct drxj_data));
+
+ /* setup the state */
+ state->i2c = i2c;
+ state->demod = demod;
+
+ /* setup the demod data */
+ memcpy(demod, &drxj_default_demod_g, sizeof(struct drx_demod_instance));
+
+ demod->my_i2c_dev_addr = demod_addr;
+ demod->my_common_attr = demod_comm_attr;
+ demod->my_i2c_dev_addr->user_data = state;
+ demod->my_common_attr->microcode_file = DRX39XX_MAIN_FIRMWARE;
+ demod->my_common_attr->verify_microcode = true;
+ demod->my_common_attr->intermediate_freq = 5000;
+ demod->my_common_attr->current_power_mode = DRX_POWER_DOWN;
+ demod->my_ext_attr = demod_ext_attr;
+ ((struct drxj_data *)demod_ext_attr)->uio_sma_tx_mode = DRX_UIO_MODE_READWRITE;
+ demod->i2c = i2c;
+
+ result = drxj_open(demod);
+ if (result != 0) {
+ pr_err("DRX open failed! Aborting\n");
+ goto error;
+ }
+
+ /* Turn off the LNA */
+ uio_cfg.uio = DRX_UIO1;
+ uio_cfg.mode = DRX_UIO_MODE_READWRITE;
+ /* Configure user-I/O #3: enable read/write */
+ result = ctrl_set_uio_cfg(demod, &uio_cfg);
+ if (result) {
+ pr_err("Failed to setup LNA GPIO!\n");
+ goto error;
+ }
+
+ uio_data.uio = DRX_UIO1;
+ uio_data.value = false;
+ result = ctrl_uio_write(demod, &uio_data);
+ if (result != 0) {
+ pr_err("Failed to disable LNA!\n");
+ goto error;
+ }
+
+ /* create dvb_frontend */
+ memcpy(&state->frontend.ops, &drx39xxj_ops,
+ sizeof(struct dvb_frontend_ops));
+
+ state->frontend.demodulator_priv = state;
+
+ /* Initialize stats - needed for DVBv5 stats to work */
+ p = &state->frontend.dtv_property_cache;
+ p->strength.len = 1;
+ p->pre_bit_count.len = 1;
+ p->pre_bit_error.len = 1;
+ p->post_bit_count.len = 1;
+ p->post_bit_error.len = 1;
+ p->block_count.len = 1;
+ p->block_error.len = 1;
+ p->cnr.len = 1;
+
+ p->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ return &state->frontend;
+
+error:
+ kfree(demod_ext_attr);
+ kfree(demod_comm_attr);
+ kfree(demod_addr);
+ kfree(demod);
+ kfree(state);
+
+ return NULL;
+}
+EXPORT_SYMBOL(drx39xxj_attach);
+
+static struct dvb_frontend_ops drx39xxj_ops = {
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+ .info = {
+ .name = "Micronas DRX39xxj family Frontend",
+ .frequency_stepsize = 62500,
+ .frequency_min = 51000000,
+ .frequency_max = 858000000,
+ .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
+ },
+
+ .init = drx39xxj_init,
+ .i2c_gate_ctrl = drx39xxj_i2c_gate_ctrl,
+ .sleep = drx39xxj_sleep,
+ .set_frontend = drx39xxj_set_frontend,
+ .get_tune_settings = drx39xxj_get_tune_settings,
+ .read_status = drx39xxj_read_status,
+ .read_ber = drx39xxj_read_ber,
+ .read_signal_strength = drx39xxj_read_signal_strength,
+ .read_snr = drx39xxj_read_snr,
+ .read_ucblocks = drx39xxj_read_ucblocks,
+ .release = drx39xxj_release,
+ .set_lna = drx39xxj_set_lna,
+};
+
+MODULE_DESCRIPTION("Micronas DRX39xxj Frontend");
+MODULE_AUTHOR("Devin Heitmueller");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(DRX39XX_MAIN_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.h b/drivers/media/dvb-frontends/drx39xyj/drxj.h
new file mode 100644
index 000000000000..55ad535197d2
--- /dev/null
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.h
@@ -0,0 +1,650 @@
+
+/*
+ Copyright (c), 2004-2005,2007-2010 Trident Microsystems, Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Trident Microsystems nor Hauppauge Computer Works
+ nor the names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+ DRXJ specific header file
+
+ Authors: Dragan Savic, Milos Nikolic, Mihajlo Katona, Tao Ding, Paul Janssen
+*/
+
+#ifndef __DRXJ_H__
+#define __DRXJ_H__
+/*-------------------------------------------------------------------------
+INCLUDES
+-------------------------------------------------------------------------*/
+
+#include "drx_driver.h"
+#include "drx_dap_fasi.h"
+
+/* Check DRX-J specific dap condition */
+/* Multi master mode and short addr format only will not work.
+ RMW, CRC reset, broadcast and switching back to single master mode
+ cannot be done with short addr only in multi master mode. */
+#if ((DRXDAP_SINGLE_MASTER == 0) && (DRXDAPFASI_LONG_ADDR_ALLOWED == 0))
+#error "Multi master mode and short addressing only is an illegal combination"
+ *; /* Generate a fatal compiler error to make sure it stops here,
+ this is necesarry because not all compilers stop after a #error. */
+#endif
+
+/*-------------------------------------------------------------------------
+TYPEDEFS
+-------------------------------------------------------------------------*/
+/*============================================================================*/
+/*============================================================================*/
+/*== code support ============================================================*/
+/*============================================================================*/
+/*============================================================================*/
+
+/*============================================================================*/
+/*============================================================================*/
+/*== SCU cmd if =============================================================*/
+/*============================================================================*/
+/*============================================================================*/
+
+ struct drxjscu_cmd {
+ u16 command;
+ /**< Command number */
+ u16 parameter_len;
+ /**< Data length in byte */
+ u16 result_len;
+ /**< result length in byte */
+ u16 *parameter;
+ /**< General purpous param */
+ u16 *result;
+ /**< General purpous param */};
+
+/*============================================================================*/
+/*============================================================================*/
+/*== CTRL CFG related data structures ========================================*/
+/*============================================================================*/
+/*============================================================================*/
+
+/* extra intermediate lock state for VSB,QAM,NTSC */
+#define DRXJ_DEMOD_LOCK (DRX_LOCK_STATE_1)
+
+/* OOB lock states */
+#define DRXJ_OOB_AGC_LOCK (DRX_LOCK_STATE_1) /* analog gain control lock */
+#define DRXJ_OOB_SYNC_LOCK (DRX_LOCK_STATE_2) /* digital gain control lock */
+
+/* Intermediate powermodes for DRXJ */
+#define DRXJ_POWER_DOWN_MAIN_PATH DRX_POWER_MODE_8
+#define DRXJ_POWER_DOWN_CORE DRX_POWER_MODE_9
+#define DRXJ_POWER_DOWN_PLL DRX_POWER_MODE_10
+
+/* supstition for GPIO FNC mux */
+#define APP_O (0x0000)
+
+/*#define DRX_CTRL_BASE (0x0000)*/
+
+#define DRXJ_CTRL_CFG_BASE (0x1000)
+ enum drxj_cfg_type {
+ DRXJ_CFG_AGC_RF = DRXJ_CTRL_CFG_BASE,
+ DRXJ_CFG_AGC_IF,
+ DRXJ_CFG_AGC_INTERNAL,
+ DRXJ_CFG_PRE_SAW,
+ DRXJ_CFG_AFE_GAIN,
+ DRXJ_CFG_SYMBOL_CLK_OFFSET,
+ DRXJ_CFG_ACCUM_CR_RS_CW_ERR,
+ DRXJ_CFG_FEC_MERS_SEQ_COUNT,
+ DRXJ_CFG_OOB_MISC,
+ DRXJ_CFG_SMART_ANT,
+ DRXJ_CFG_OOB_PRE_SAW,
+ DRXJ_CFG_VSB_MISC,
+ DRXJ_CFG_RESET_PACKET_ERR,
+
+ /* ATV (FM) */
+ DRXJ_CFG_ATV_OUTPUT, /* also for FM (SIF control) but not likely */
+ DRXJ_CFG_ATV_MISC,
+ DRXJ_CFG_ATV_EQU_COEF,
+ DRXJ_CFG_ATV_AGC_STATUS, /* also for FM ( IF,RF, audioAGC ) */
+
+ DRXJ_CFG_MPEG_OUTPUT_MISC,
+ DRXJ_CFG_HW_CFG,
+ DRXJ_CFG_OOB_LO_POW,
+
+ DRXJ_CFG_MAX /* dummy, never to be used */};
+
+/**
+* /struct enum drxj_cfg_smart_ant_io * smart antenna i/o.
+*/
+enum drxj_cfg_smart_ant_io {
+ DRXJ_SMT_ANT_OUTPUT = 0,
+ DRXJ_SMT_ANT_INPUT
+};
+
+/**
+* /struct struct drxj_cfg_smart_ant * Set smart antenna.
+*/
+ struct drxj_cfg_smart_ant {
+ enum drxj_cfg_smart_ant_io io;
+ u16 ctrl_data;
+ };
+
+/**
+* /struct DRXJAGCSTATUS_t
+* AGC status information from the DRXJ-IQM-AF.
+*/
+struct drxj_agc_status {
+ u16 IFAGC;
+ u16 RFAGC;
+ u16 digital_agc;
+};
+
+/* DRXJ_CFG_AGC_RF, DRXJ_CFG_AGC_IF */
+
+/**
+* /struct enum drxj_agc_ctrl_mode * Available AGCs modes in the DRXJ.
+*/
+ enum drxj_agc_ctrl_mode {
+ DRX_AGC_CTRL_AUTO = 0,
+ DRX_AGC_CTRL_USER,
+ DRX_AGC_CTRL_OFF};
+
+/**
+* /struct struct drxj_cfg_agc * Generic interface for all AGCs present on the DRXJ.
+*/
+ struct drxj_cfg_agc {
+ enum drx_standard standard; /* standard for which these settings apply */
+ enum drxj_agc_ctrl_mode ctrl_mode; /* off, user, auto */
+ u16 output_level; /* range dependent on AGC */
+ u16 min_output_level; /* range dependent on AGC */
+ u16 max_output_level; /* range dependent on AGC */
+ u16 speed; /* range dependent on AGC */
+ u16 top; /* rf-agc take over point */
+ u16 cut_off_current; /* rf-agc is accelerated if output current
+ is below cut-off current */};
+
+/* DRXJ_CFG_PRE_SAW */
+
+/**
+* /struct struct drxj_cfg_pre_saw * Interface to configure pre SAW sense.
+*/
+ struct drxj_cfg_pre_saw {
+ enum drx_standard standard; /* standard to which these settings apply */
+ u16 reference; /* pre SAW reference value, range 0 .. 31 */
+ bool use_pre_saw; /* true algorithms must use pre SAW sense */};
+
+/* DRXJ_CFG_AFE_GAIN */
+
+/**
+* /struct struct drxj_cfg_afe_gain * Interface to configure gain of AFE (LNA + PGA).
+*/
+ struct drxj_cfg_afe_gain {
+ enum drx_standard standard; /* standard to which these settings apply */
+ u16 gain; /* gain in 0.1 dB steps, DRXJ range 140 .. 335 */};
+
+/**
+* /struct drxjrs_errors
+* Available failure information in DRXJ_FEC_RS.
+*
+* Container for errors that are received in the most recently finished measurment period
+*
+*/
+ struct drxjrs_errors {
+ u16 nr_bit_errors;
+ /**< no of pre RS bit errors */
+ u16 nr_symbol_errors;
+ /**< no of pre RS symbol errors */
+ u16 nr_packet_errors;
+ /**< no of pre RS packet errors */
+ u16 nr_failures;
+ /**< no of post RS failures to decode */
+ u16 nr_snc_par_fail_count;
+ /**< no of post RS bit erros */
+ };
+
+/**
+* /struct struct drxj_cfg_vsb_misc * symbol error rate
+*/
+ struct drxj_cfg_vsb_misc {
+ u32 symb_error;
+ /**< symbol error rate sps */};
+
+/**
+* /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
+*
+*/
+ enum drxj_mpeg_start_width {
+ DRXJ_MPEG_START_WIDTH_1CLKCYC,
+ DRXJ_MPEG_START_WIDTH_8CLKCYC};
+
+/**
+* /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
+*
+*/
+ enum drxj_mpeg_output_clock_rate {
+ DRXJ_MPEGOUTPUT_CLOCK_RATE_AUTO,
+ DRXJ_MPEGOUTPUT_CLOCK_RATE_75973K,
+ DRXJ_MPEGOUTPUT_CLOCK_RATE_50625K,
+ DRXJ_MPEGOUTPUT_CLOCK_RATE_37968K,
+ DRXJ_MPEGOUTPUT_CLOCK_RATE_30375K,
+ DRXJ_MPEGOUTPUT_CLOCK_RATE_25313K,
+ DRXJ_MPEGOUTPUT_CLOCK_RATE_21696K};
+
+/**
+* /struct DRXJCfgMisc_t
+* Change TEI bit of MPEG output
+* reverse MPEG output bit order
+* set MPEG output clock rate
+*/
+ struct drxj_cfg_mpeg_output_misc {
+ bool disable_tei_handling; /**< if true pass (not change) TEI bit */
+ bool bit_reverse_mpeg_outout; /**< if true, parallel: msb on MD0; serial: lsb out first */
+ enum drxj_mpeg_output_clock_rate mpeg_output_clock_rate;
+ /**< set MPEG output clock rate that overwirtes the derived one from symbol rate */
+ enum drxj_mpeg_start_width mpeg_start_width; /**< set MPEG output start width */};
+
+/**
+* /enum enum drxj_xtal_freq * Supported external crystal reference frequency.
+*/
+ enum drxj_xtal_freq {
+ DRXJ_XTAL_FREQ_RSVD,
+ DRXJ_XTAL_FREQ_27MHZ,
+ DRXJ_XTAL_FREQ_20P25MHZ,
+ DRXJ_XTAL_FREQ_4MHZ};
+
+/**
+* /enum enum drxj_xtal_freq * Supported external crystal reference frequency.
+*/
+ enum drxji2c_speed {
+ DRXJ_I2C_SPEED_400KBPS,
+ DRXJ_I2C_SPEED_100KBPS};
+
+/**
+* /struct struct drxj_cfg_hw_cfg * Get hw configuration, such as crystal reference frequency, I2C speed, etc...
+*/
+ struct drxj_cfg_hw_cfg {
+ enum drxj_xtal_freq xtal_freq;
+ /**< crystal reference frequency */
+ enum drxji2c_speed i2c_speed;
+ /**< 100 or 400 kbps */};
+
+/*
+ * DRXJ_CFG_ATV_MISC
+ */
+ struct drxj_cfg_atv_misc {
+ s16 peak_filter; /* -8 .. 15 */
+ u16 noise_filter; /* 0 .. 15 */};
+
+/*
+ * struct drxj_cfg_oob_misc */
+#define DRXJ_OOB_STATE_RESET 0x0
+#define DRXJ_OOB_STATE_AGN_HUNT 0x1
+#define DRXJ_OOB_STATE_DGN_HUNT 0x2
+#define DRXJ_OOB_STATE_AGC_HUNT 0x3
+#define DRXJ_OOB_STATE_FRQ_HUNT 0x4
+#define DRXJ_OOB_STATE_PHA_HUNT 0x8
+#define DRXJ_OOB_STATE_TIM_HUNT 0x10
+#define DRXJ_OOB_STATE_EQU_HUNT 0x20
+#define DRXJ_OOB_STATE_EQT_HUNT 0x30
+#define DRXJ_OOB_STATE_SYNC 0x40
+
+struct drxj_cfg_oob_misc {
+ struct drxj_agc_status agc;
+ bool eq_lock;
+ bool sym_timing_lock;
+ bool phase_lock;
+ bool freq_lock;
+ bool dig_gain_lock;
+ bool ana_gain_lock;
+ u8 state;
+};
+
+/*
+ * Index of in array of coef
+ */
+ enum drxj_cfg_oob_lo_power {
+ DRXJ_OOB_LO_POW_MINUS0DB = 0,
+ DRXJ_OOB_LO_POW_MINUS5DB,
+ DRXJ_OOB_LO_POW_MINUS10DB,
+ DRXJ_OOB_LO_POW_MINUS15DB,
+ DRXJ_OOB_LO_POW_MAX};
+
+/*
+ * DRXJ_CFG_ATV_EQU_COEF
+ */
+ struct drxj_cfg_atv_equ_coef {
+ s16 coef0; /* -256 .. 255 */
+ s16 coef1; /* -256 .. 255 */
+ s16 coef2; /* -256 .. 255 */
+ s16 coef3; /* -256 .. 255 */};
+
+/*
+ * Index of in array of coef
+ */
+ enum drxj_coef_array_index {
+ DRXJ_COEF_IDX_MN = 0,
+ DRXJ_COEF_IDX_FM,
+ DRXJ_COEF_IDX_L,
+ DRXJ_COEF_IDX_LP,
+ DRXJ_COEF_IDX_BG,
+ DRXJ_COEF_IDX_DK,
+ DRXJ_COEF_IDX_I,
+ DRXJ_COEF_IDX_MAX};
+
+/*
+ * DRXJ_CFG_ATV_OUTPUT
+ */
+
+/**
+* /enum DRXJAttenuation_t
+* Attenuation setting for SIF AGC.
+*
+*/
+ enum drxjsif_attenuation {
+ DRXJ_SIF_ATTENUATION_0DB,
+ DRXJ_SIF_ATTENUATION_3DB,
+ DRXJ_SIF_ATTENUATION_6DB,
+ DRXJ_SIF_ATTENUATION_9DB};
+
+/**
+* /struct struct drxj_cfg_atv_output * SIF attenuation setting.
+*
+*/
+struct drxj_cfg_atv_output {
+ bool enable_cvbs_output; /* true= enabled */
+ bool enable_sif_output; /* true= enabled */
+ enum drxjsif_attenuation sif_attenuation;
+};
+
+/*
+ DRXJ_CFG_ATV_AGC_STATUS (get only)
+*/
+/* TODO : AFE interface not yet finished, subject to change */
+ struct drxj_cfg_atv_agc_status {
+ u16 rf_agc_gain; /* 0 .. 877 uA */
+ u16 if_agc_gain; /* 0 .. 877 uA */
+ s16 video_agc_gain; /* -75 .. 1972 in 0.1 dB steps */
+ s16 audio_agc_gain; /* -4 .. 1020 in 0.1 dB steps */
+ u16 rf_agc_loop_gain; /* 0 .. 7 */
+ u16 if_agc_loop_gain; /* 0 .. 7 */
+ u16 video_agc_loop_gain; /* 0 .. 7 */};
+
+/*============================================================================*/
+/*============================================================================*/
+/*== CTRL related data structures ============================================*/
+/*============================================================================*/
+/*============================================================================*/
+
+/* NONE */
+
+/*============================================================================*/
+/*============================================================================*/
+
+/*========================================*/
+/**
+* /struct struct drxj_data * DRXJ specific attributes.
+*
+* Global data container for DRXJ specific data.
+*
+*/
+ struct drxj_data {
+ /* device capabilties (determined during drx_open()) */
+ bool has_lna; /**< true if LNA (aka PGA) present */
+ bool has_oob; /**< true if OOB supported */
+ bool has_ntsc; /**< true if NTSC supported */
+ bool has_btsc; /**< true if BTSC supported */
+ bool has_smatx; /**< true if mat_tx is available */
+ bool has_smarx; /**< true if mat_rx is available */
+ bool has_gpio; /**< true if GPIO is available */
+ bool has_irqn; /**< true if IRQN is available */
+ /* A1/A2/A... */
+ u8 mfx; /**< metal fix */
+
+ /* tuner settings */
+ bool mirror_freq_spect_oob;/**< tuner inversion (true = tuner mirrors the signal */
+
+ /* standard/channel settings */
+ enum drx_standard standard; /**< current standard information */
+ enum drx_modulation constellation;
+ /**< current constellation */
+ s32 frequency; /**< center signal frequency in KHz */
+ enum drx_bandwidth curr_bandwidth;
+ /**< current channel bandwidth */
+ enum drx_mirror mirror; /**< current channel mirror */
+
+ /* signal quality information */
+ u32 fec_bits_desired; /**< BER accounting period */
+ u16 fec_vd_plen; /**< no of trellis symbols: VD SER measurement period */
+ u16 qam_vd_prescale; /**< Viterbi Measurement Prescale */
+ u16 qam_vd_period; /**< Viterbi Measurement period */
+ u16 fec_rs_plen; /**< defines RS BER measurement period */
+ u16 fec_rs_prescale; /**< ReedSolomon Measurement Prescale */
+ u16 fec_rs_period; /**< ReedSolomon Measurement period */
+ bool reset_pkt_err_acc; /**< Set a flag to reset accumulated packet error */
+ u16 pkt_err_acc_start; /**< Set a flag to reset accumulated packet error */
+
+ /* HI configuration */
+ u16 hi_cfg_timing_div; /**< HI Configure() parameter 2 */
+ u16 hi_cfg_bridge_delay; /**< HI Configure() parameter 3 */
+ u16 hi_cfg_wake_up_key; /**< HI Configure() parameter 4 */
+ u16 hi_cfg_ctrl; /**< HI Configure() parameter 5 */
+ u16 hi_cfg_transmit; /**< HI Configure() parameter 6 */
+
+ /* UIO configuartion */
+ enum drxuio_mode uio_sma_rx_mode;/**< current mode of SmaRx pin */
+ enum drxuio_mode uio_sma_tx_mode;/**< current mode of SmaTx pin */
+ enum drxuio_mode uio_gpio_mode; /**< current mode of ASEL pin */
+ enum drxuio_mode uio_irqn_mode; /**< current mode of IRQN pin */
+
+ /* IQM fs frequecy shift and inversion */
+ u32 iqm_fs_rate_ofs; /**< frequency shifter setting after setchannel */
+ bool pos_image; /**< Ture: positive image */
+ /* IQM RC frequecy shift */
+ u32 iqm_rc_rate_ofs; /**< frequency shifter setting after setchannel */
+
+ /* ATV configuartion */
+ u32 atv_cfg_changed_flags; /**< flag: flags cfg changes */
+ s16 atv_top_equ0[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU0__A */
+ s16 atv_top_equ1[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU1__A */
+ s16 atv_top_equ2[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU2__A */
+ s16 atv_top_equ3[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU3__A */
+ bool phase_correction_bypass;/**< flag: true=bypass */
+ s16 atv_top_vid_peak; /**< shadow of ATV_TOP_VID_PEAK__A */
+ u16 atv_top_noise_th; /**< shadow of ATV_TOP_NOISE_TH__A */
+ bool enable_cvbs_output; /**< flag CVBS ouput enable */
+ bool enable_sif_output; /**< flag SIF ouput enable */
+ enum drxjsif_attenuation sif_attenuation;
+ /**< current SIF att setting */
+ /* Agc configuration for QAM and VSB */
+ struct drxj_cfg_agc qam_rf_agc_cfg; /**< qam RF AGC config */
+ struct drxj_cfg_agc qam_if_agc_cfg; /**< qam IF AGC config */
+ struct drxj_cfg_agc vsb_rf_agc_cfg; /**< vsb RF AGC config */
+ struct drxj_cfg_agc vsb_if_agc_cfg; /**< vsb IF AGC config */
+
+ /* PGA gain configuration for QAM and VSB */
+ u16 qam_pga_cfg; /**< qam PGA config */
+ u16 vsb_pga_cfg; /**< vsb PGA config */
+
+ /* Pre SAW configuration for QAM and VSB */
+ struct drxj_cfg_pre_saw qam_pre_saw_cfg;
+ /**< qam pre SAW config */
+ struct drxj_cfg_pre_saw vsb_pre_saw_cfg;
+ /**< qam pre SAW config */
+
+ /* Version information */
+ char v_text[2][12]; /**< allocated text versions */
+ struct drx_version v_version[2]; /**< allocated versions structs */
+ struct drx_version_list v_list_elements[2];
+ /**< allocated version list */
+
+ /* smart antenna configuration */
+ bool smart_ant_inverted;
+
+ /* Tracking filter setting for OOB */
+ u16 oob_trk_filter_cfg[8];
+ bool oob_power_on;
+
+ /* MPEG static bitrate setting */
+ u32 mpeg_ts_static_bitrate; /**< bitrate static MPEG output */
+ bool disable_te_ihandling; /**< MPEG TS TEI handling */
+ bool bit_reverse_mpeg_outout;/**< MPEG output bit order */
+ enum drxj_mpeg_output_clock_rate mpeg_output_clock_rate;
+ /**< MPEG output clock rate */
+ enum drxj_mpeg_start_width mpeg_start_width;
+ /**< MPEG Start width */
+
+ /* Pre SAW & Agc configuration for ATV */
+ struct drxj_cfg_pre_saw atv_pre_saw_cfg;
+ /**< atv pre SAW config */
+ struct drxj_cfg_agc atv_rf_agc_cfg; /**< atv RF AGC config */
+ struct drxj_cfg_agc atv_if_agc_cfg; /**< atv IF AGC config */
+ u16 atv_pga_cfg; /**< atv pga config */
+
+ u32 curr_symbol_rate;
+
+ /* pin-safe mode */
+ bool pdr_safe_mode; /**< PDR safe mode activated */
+ u16 pdr_safe_restore_val_gpio;
+ u16 pdr_safe_restore_val_v_sync;
+ u16 pdr_safe_restore_val_sma_rx;
+ u16 pdr_safe_restore_val_sma_tx;
+
+ /* OOB pre-saw value */
+ u16 oob_pre_saw;
+ enum drxj_cfg_oob_lo_power oob_lo_pow;
+
+ struct drx_aud_data aud_data;
+ /**< audio storage */};
+
+/*-------------------------------------------------------------------------
+Access MACROS
+-------------------------------------------------------------------------*/
+/**
+* \brief Compilable references to attributes
+* \param d pointer to demod instance
+*
+* Used as main reference to an attribute field.
+* Can be used by both macro implementation and function implementation.
+* These macros are defined to avoid duplication of code in macro and function
+* definitions that handle access of demod common or extended attributes.
+*
+*/
+
+#define DRXJ_ATTR_BTSC_DETECT(d) \
+ (((struct drxj_data *)(d)->my_ext_attr)->aud_data.btsc_detect)
+
+/*-------------------------------------------------------------------------
+DEFINES
+-------------------------------------------------------------------------*/
+
+/**
+* \def DRXJ_NTSC_CARRIER_FREQ_OFFSET
+* \brief Offset from picture carrier to centre frequency in kHz, in RF domain
+*
+* For NTSC standard.
+* NTSC channels are listed by their picture carrier frequency (Fpc).
+* The function DRX_CTRL_SET_CHANNEL requires the centre frequency as input.
+* In case the tuner module is not used the DRX-J requires that the tuner is
+* tuned to the centre frequency of the channel:
+*
+* Fcentre = Fpc + DRXJ_NTSC_CARRIER_FREQ_OFFSET
+*
+*/
+#define DRXJ_NTSC_CARRIER_FREQ_OFFSET ((s32)(1750))
+
+/**
+* \def DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET
+* \brief Offset from picture carrier to centre frequency in kHz, in RF domain
+*
+* For PAL/SECAM - BG standard. This define is needed in case the tuner module
+* is NOT used. PAL/SECAM channels are listed by their picture carrier frequency (Fpc).
+* The DRX-J requires that the tuner is tuned to:
+* Fpc + DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET
+*
+* In case the tuner module is used the drxdriver takes care of this.
+* In case the tuner module is NOT used the application programmer must take
+* care of this.
+*
+*/
+#define DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET ((s32)(2375))
+
+/**
+* \def DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET
+* \brief Offset from picture carrier to centre frequency in kHz, in RF domain
+*
+* For PAL/SECAM - DK, I, L standards. This define is needed in case the tuner module
+* is NOT used. PAL/SECAM channels are listed by their picture carrier frequency (Fpc).
+* The DRX-J requires that the tuner is tuned to:
+* Fpc + DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET
+*
+* In case the tuner module is used the drxdriver takes care of this.
+* In case the tuner module is NOT used the application programmer must take
+* care of this.
+*
+*/
+#define DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET ((s32)(2775))
+
+/**
+* \def DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET
+* \brief Offset from picture carrier to centre frequency in kHz, in RF domain
+*
+* For PAL/SECAM - LP standard. This define is needed in case the tuner module
+* is NOT used. PAL/SECAM channels are listed by their picture carrier frequency (Fpc).
+* The DRX-J requires that the tuner is tuned to:
+* Fpc + DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET
+*
+* In case the tuner module is used the drxdriver takes care of this.
+* In case the tuner module is NOT used the application programmer must take
+* care of this.
+*/
+#define DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET ((s32)(-3255))
+
+/**
+* \def DRXJ_FM_CARRIER_FREQ_OFFSET
+* \brief Offset from sound carrier to centre frequency in kHz, in RF domain
+*
+* For FM standard.
+* FM channels are listed by their sound carrier frequency (Fsc).
+* The function DRX_CTRL_SET_CHANNEL requires the Ffm frequency (see below) as
+* input.
+* In case the tuner module is not used the DRX-J requires that the tuner is
+* tuned to the Ffm frequency of the channel.
+*
+* Ffm = Fsc + DRXJ_FM_CARRIER_FREQ_OFFSET
+*
+*/
+#define DRXJ_FM_CARRIER_FREQ_OFFSET ((s32)(-3000))
+
+/* Revision types -------------------------------------------------------*/
+
+#define DRXJ_TYPE_ID (0x3946000DUL)
+
+/* Macros ---------------------------------------------------------------*/
+
+/* Convert OOB lock status to string */
+#define DRXJ_STR_OOB_LOCKSTATUS(x) ( \
+ (x == DRX_NEVER_LOCK) ? "Never" : \
+ (x == DRX_NOT_LOCKED) ? "No" : \
+ (x == DRX_LOCKED) ? "Locked" : \
+ (x == DRX_LOCK_STATE_1) ? "AGC lock" : \
+ (x == DRX_LOCK_STATE_2) ? "sync lock" : \
+ "(Invalid)")
+
+#endif /* __DRXJ_H__ */
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj_map.h b/drivers/media/dvb-frontends/drx39xyj/drxj_map.h
new file mode 100644
index 000000000000..0bbd4ae1f524
--- /dev/null
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj_map.h
@@ -0,0 +1,15055 @@
+/*
+ Copyright (c), 2004-2005,2007-2010 Trident Microsystems, Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Trident Microsystems nor Hauppauge Computer Works
+ nor the names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*
+ ***********************************************************************************************************************
+ * WARNING - THIS FILE HAS BEEN GENERATED - DO NOT CHANGE
+ *
+ * Filename: drxj_map.h
+ * Generated on: Mon Jan 18 12:09:24 2010
+ * Generated by: IDF:x 1.3.0
+ * Generated from: reg_map
+ * Output start: [entry point]
+ *
+ * filename last modified re-use
+ * -----------------------------------------------------
+ * reg_map.1.tmp Mon Jan 18 12:09:24 2010 -
+ *
+ */
+
+#ifndef __DRXJ_MAP__H__
+#define __DRXJ_MAP__H__ INCLUDED
+
+#ifdef _REGISTERTABLE_
+#include <registertable.h>
+ extern register_table_t drxj_map[];
+ extern register_table_info_t drxj_map_info[];
+#endif
+
+#define ATV_COMM_EXEC__A 0xC00000
+#define ATV_COMM_EXEC__W 2
+#define ATV_COMM_EXEC__M 0x3
+#define ATV_COMM_EXEC__PRE 0x0
+#define ATV_COMM_EXEC_STOP 0x0
+#define ATV_COMM_EXEC_ACTIVE 0x1
+#define ATV_COMM_EXEC_HOLD 0x2
+
+#define ATV_COMM_STATE__A 0xC00001
+#define ATV_COMM_STATE__W 16
+#define ATV_COMM_STATE__M 0xFFFF
+#define ATV_COMM_STATE__PRE 0x0
+#define ATV_COMM_MB__A 0xC00002
+#define ATV_COMM_MB__W 16
+#define ATV_COMM_MB__M 0xFFFF
+#define ATV_COMM_MB__PRE 0x0
+#define ATV_COMM_INT_REQ__A 0xC00003
+#define ATV_COMM_INT_REQ__W 16
+#define ATV_COMM_INT_REQ__M 0xFFFF
+#define ATV_COMM_INT_REQ__PRE 0x0
+#define ATV_COMM_INT_REQ_COMM_INT_REQ__B 0
+#define ATV_COMM_INT_REQ_COMM_INT_REQ__W 1
+#define ATV_COMM_INT_REQ_COMM_INT_REQ__M 0x1
+#define ATV_COMM_INT_REQ_COMM_INT_REQ__PRE 0x0
+
+#define ATV_COMM_INT_STA__A 0xC00005
+#define ATV_COMM_INT_STA__W 16
+#define ATV_COMM_INT_STA__M 0xFFFF
+#define ATV_COMM_INT_STA__PRE 0x0
+#define ATV_COMM_INT_MSK__A 0xC00006
+#define ATV_COMM_INT_MSK__W 16
+#define ATV_COMM_INT_MSK__M 0xFFFF
+#define ATV_COMM_INT_MSK__PRE 0x0
+#define ATV_COMM_INT_STM__A 0xC00007
+#define ATV_COMM_INT_STM__W 16
+#define ATV_COMM_INT_STM__M 0xFFFF
+#define ATV_COMM_INT_STM__PRE 0x0
+
+#define ATV_COMM_KEY__A 0xC0000F
+#define ATV_COMM_KEY__W 16
+#define ATV_COMM_KEY__M 0xFFFF
+#define ATV_COMM_KEY__PRE 0x0
+#define ATV_COMM_KEY_KEY 0xFABA
+#define ATV_COMM_KEY_MIN 0x0
+#define ATV_COMM_KEY_MAX 0xFFFF
+
+#define ATV_TOP_COMM_EXEC__A 0xC10000
+#define ATV_TOP_COMM_EXEC__W 2
+#define ATV_TOP_COMM_EXEC__M 0x3
+#define ATV_TOP_COMM_EXEC__PRE 0x0
+#define ATV_TOP_COMM_EXEC_STOP 0x0
+#define ATV_TOP_COMM_EXEC_ACTIVE 0x1
+#define ATV_TOP_COMM_EXEC_HOLD 0x2
+
+#define ATV_TOP_COMM_STATE__A 0xC10001
+#define ATV_TOP_COMM_STATE__W 16
+#define ATV_TOP_COMM_STATE__M 0xFFFF
+#define ATV_TOP_COMM_STATE__PRE 0x0
+#define ATV_TOP_COMM_STATE_STATE__B 0
+#define ATV_TOP_COMM_STATE_STATE__W 16
+#define ATV_TOP_COMM_STATE_STATE__M 0xFFFF
+#define ATV_TOP_COMM_STATE_STATE__PRE 0x0
+
+#define ATV_TOP_COMM_MB__A 0xC10002
+#define ATV_TOP_COMM_MB__W 16
+#define ATV_TOP_COMM_MB__M 0xFFFF
+#define ATV_TOP_COMM_MB__PRE 0x0
+#define ATV_TOP_COMM_MB_CTL__B 0
+#define ATV_TOP_COMM_MB_CTL__W 1
+#define ATV_TOP_COMM_MB_CTL__M 0x1
+#define ATV_TOP_COMM_MB_CTL__PRE 0x0
+#define ATV_TOP_COMM_MB_OBS__B 1
+#define ATV_TOP_COMM_MB_OBS__W 1
+#define ATV_TOP_COMM_MB_OBS__M 0x2
+#define ATV_TOP_COMM_MB_OBS__PRE 0x0
+
+#define ATV_TOP_COMM_MB_MUX_CTRL__B 2
+#define ATV_TOP_COMM_MB_MUX_CTRL__W 4
+#define ATV_TOP_COMM_MB_MUX_CTRL__M 0x3C
+#define ATV_TOP_COMM_MB_MUX_CTRL__PRE 0x0
+#define ATV_TOP_COMM_MB_MUX_CTRL_PEAK_S 0x0
+#define ATV_TOP_COMM_MB_MUX_CTRL_VID_GAIN 0x4
+#define ATV_TOP_COMM_MB_MUX_CTRL_CORR_O 0x8
+#define ATV_TOP_COMM_MB_MUX_CTRL_CR_ROT_O 0xC
+#define ATV_TOP_COMM_MB_MUX_CTRL_CR_IIR_IQ 0x10
+#define ATV_TOP_COMM_MB_MUX_CTRL_VIDEO_O 0x14
+#define ATV_TOP_COMM_MB_MUX_CTRL_SIF_O 0x18
+#define ATV_TOP_COMM_MB_MUX_CTRL_SIF2025_O 0x1C
+#define ATV_TOP_COMM_MB_MUX_CTRL_POST_S 0x20
+
+#define ATV_TOP_COMM_MB_MUX_OBS__B 6
+#define ATV_TOP_COMM_MB_MUX_OBS__W 4
+#define ATV_TOP_COMM_MB_MUX_OBS__M 0x3C0
+#define ATV_TOP_COMM_MB_MUX_OBS__PRE 0x0
+#define ATV_TOP_COMM_MB_MUX_OBS_PEAK_S 0x0
+#define ATV_TOP_COMM_MB_MUX_OBS_VID_GAIN 0x40
+#define ATV_TOP_COMM_MB_MUX_OBS_CORR_O 0x80
+#define ATV_TOP_COMM_MB_MUX_OBS_CR_ROT_O 0xC0
+#define ATV_TOP_COMM_MB_MUX_OBS_CR_IIR_IQ 0x100
+#define ATV_TOP_COMM_MB_MUX_OBS_VIDEO_O 0x140
+#define ATV_TOP_COMM_MB_MUX_OBS_SIF_O 0x180
+#define ATV_TOP_COMM_MB_MUX_OBS_SIF2025_O 0x1C0
+#define ATV_TOP_COMM_MB_MUX_OBS_POST_S 0x200
+
+#define ATV_TOP_COMM_INT_REQ__A 0xC10003
+#define ATV_TOP_COMM_INT_REQ__W 16
+#define ATV_TOP_COMM_INT_REQ__M 0xFFFF
+#define ATV_TOP_COMM_INT_REQ__PRE 0x0
+#define ATV_TOP_COMM_INT_STA__A 0xC10005
+#define ATV_TOP_COMM_INT_STA__W 16
+#define ATV_TOP_COMM_INT_STA__M 0xFFFF
+#define ATV_TOP_COMM_INT_STA__PRE 0x0
+
+#define ATV_TOP_COMM_INT_STA_FAGC_STA__B 0
+#define ATV_TOP_COMM_INT_STA_FAGC_STA__W 1
+#define ATV_TOP_COMM_INT_STA_FAGC_STA__M 0x1
+#define ATV_TOP_COMM_INT_STA_FAGC_STA__PRE 0x0
+
+#define ATV_TOP_COMM_INT_STA_OVM_STA__B 1
+#define ATV_TOP_COMM_INT_STA_OVM_STA__W 1
+#define ATV_TOP_COMM_INT_STA_OVM_STA__M 0x2
+#define ATV_TOP_COMM_INT_STA_OVM_STA__PRE 0x0
+
+#define ATV_TOP_COMM_INT_STA_AMPTH_STA__B 2
+#define ATV_TOP_COMM_INT_STA_AMPTH_STA__W 1
+#define ATV_TOP_COMM_INT_STA_AMPTH_STA__M 0x4
+#define ATV_TOP_COMM_INT_STA_AMPTH_STA__PRE 0x0
+
+#define ATV_TOP_COMM_INT_MSK__A 0xC10006
+#define ATV_TOP_COMM_INT_MSK__W 16
+#define ATV_TOP_COMM_INT_MSK__M 0xFFFF
+#define ATV_TOP_COMM_INT_MSK__PRE 0x0
+
+#define ATV_TOP_COMM_INT_MSK_FAGC_MSK__B 0
+#define ATV_TOP_COMM_INT_MSK_FAGC_MSK__W 1
+#define ATV_TOP_COMM_INT_MSK_FAGC_MSK__M 0x1
+#define ATV_TOP_COMM_INT_MSK_FAGC_MSK__PRE 0x0
+
+#define ATV_TOP_COMM_INT_MSK_OVM_MSK__B 1
+#define ATV_TOP_COMM_INT_MSK_OVM_MSK__W 1
+#define ATV_TOP_COMM_INT_MSK_OVM_MSK__M 0x2
+#define ATV_TOP_COMM_INT_MSK_OVM_MSK__PRE 0x0
+
+#define ATV_TOP_COMM_INT_MSK_AMPTH_MSK__B 2
+#define ATV_TOP_COMM_INT_MSK_AMPTH_MSK__W 1
+#define ATV_TOP_COMM_INT_MSK_AMPTH_MSK__M 0x4
+#define ATV_TOP_COMM_INT_MSK_AMPTH_MSK__PRE 0x0
+
+#define ATV_TOP_COMM_INT_STM__A 0xC10007
+#define ATV_TOP_COMM_INT_STM__W 16
+#define ATV_TOP_COMM_INT_STM__M 0xFFFF
+#define ATV_TOP_COMM_INT_STM__PRE 0x0
+
+#define ATV_TOP_COMM_INT_STM_FAGC_STM__B 0
+#define ATV_TOP_COMM_INT_STM_FAGC_STM__W 1
+#define ATV_TOP_COMM_INT_STM_FAGC_STM__M 0x1
+#define ATV_TOP_COMM_INT_STM_FAGC_STM__PRE 0x0
+
+#define ATV_TOP_COMM_INT_STM_OVM_STM__B 1
+#define ATV_TOP_COMM_INT_STM_OVM_STM__W 1
+#define ATV_TOP_COMM_INT_STM_OVM_STM__M 0x2
+#define ATV_TOP_COMM_INT_STM_OVM_STM__PRE 0x0
+
+#define ATV_TOP_COMM_INT_STM_AMPTH_STM__B 2
+#define ATV_TOP_COMM_INT_STM_AMPTH_STM__W 1
+#define ATV_TOP_COMM_INT_STM_AMPTH_STM__M 0x4
+#define ATV_TOP_COMM_INT_STM_AMPTH_STM__PRE 0x0
+
+#define ATV_TOP_COMM_KEY__A 0xC1000F
+#define ATV_TOP_COMM_KEY__W 16
+#define ATV_TOP_COMM_KEY__M 0xFFFF
+#define ATV_TOP_COMM_KEY__PRE 0x0
+
+#define ATV_TOP_COMM_KEY_KEY__B 0
+#define ATV_TOP_COMM_KEY_KEY__W 16
+#define ATV_TOP_COMM_KEY_KEY__M 0xFFFF
+#define ATV_TOP_COMM_KEY_KEY__PRE 0x0
+#define ATV_TOP_COMM_KEY_KEY_KEY 0xFABA
+#define ATV_TOP_COMM_KEY_KEY_MIN 0x0
+#define ATV_TOP_COMM_KEY_KEY_MAX 0xFFFF
+
+#define ATV_TOP_CR_AMP_TH__A 0xC10010
+#define ATV_TOP_CR_AMP_TH__W 8
+#define ATV_TOP_CR_AMP_TH__M 0xFF
+#define ATV_TOP_CR_AMP_TH__PRE 0x8
+#define ATV_TOP_CR_AMP_TH_MN 0x8
+
+#define ATV_TOP_CR_CONT__A 0xC10011
+#define ATV_TOP_CR_CONT__W 9
+#define ATV_TOP_CR_CONT__M 0x1FF
+#define ATV_TOP_CR_CONT__PRE 0x9C
+
+#define ATV_TOP_CR_CONT_CR_P__B 0
+#define ATV_TOP_CR_CONT_CR_P__W 3
+#define ATV_TOP_CR_CONT_CR_P__M 0x7
+#define ATV_TOP_CR_CONT_CR_P__PRE 0x4
+#define ATV_TOP_CR_CONT_CR_P_MN 0x4
+#define ATV_TOP_CR_CONT_CR_P_FM 0x0
+
+#define ATV_TOP_CR_CONT_CR_D__B 3
+#define ATV_TOP_CR_CONT_CR_D__W 3
+#define ATV_TOP_CR_CONT_CR_D__M 0x38
+#define ATV_TOP_CR_CONT_CR_D__PRE 0x18
+#define ATV_TOP_CR_CONT_CR_D_MN 0x18
+#define ATV_TOP_CR_CONT_CR_D_FM 0x0
+
+#define ATV_TOP_CR_CONT_CR_I__B 6
+#define ATV_TOP_CR_CONT_CR_I__W 3
+#define ATV_TOP_CR_CONT_CR_I__M 0x1C0
+#define ATV_TOP_CR_CONT_CR_I__PRE 0x80
+#define ATV_TOP_CR_CONT_CR_I_MN 0x80
+#define ATV_TOP_CR_CONT_CR_I_FM 0x0
+
+#define ATV_TOP_CR_OVM_TH__A 0xC10012
+#define ATV_TOP_CR_OVM_TH__W 8
+#define ATV_TOP_CR_OVM_TH__M 0xFF
+#define ATV_TOP_CR_OVM_TH__PRE 0xA0
+#define ATV_TOP_CR_OVM_TH_MN 0xA0
+#define ATV_TOP_CR_OVM_TH_FM 0x0
+
+#define ATV_TOP_NOISE_TH__A 0xC10013
+#define ATV_TOP_NOISE_TH__W 4
+#define ATV_TOP_NOISE_TH__M 0xF
+#define ATV_TOP_NOISE_TH__PRE 0x8
+#define ATV_TOP_NOISE_TH_MN 0x8
+
+#define ATV_TOP_EQU0__A 0xC10014
+#define ATV_TOP_EQU0__W 9
+#define ATV_TOP_EQU0__M 0x1FF
+#define ATV_TOP_EQU0__PRE 0x1FB
+
+#define ATV_TOP_EQU0_EQU_C0__B 0
+#define ATV_TOP_EQU0_EQU_C0__W 9
+#define ATV_TOP_EQU0_EQU_C0__M 0x1FF
+#define ATV_TOP_EQU0_EQU_C0__PRE 0x1FB
+#define ATV_TOP_EQU0_EQU_C0_MN 0xFB
+
+#define ATV_TOP_EQU1__A 0xC10015
+#define ATV_TOP_EQU1__W 9
+#define ATV_TOP_EQU1__M 0x1FF
+#define ATV_TOP_EQU1__PRE 0x1CE
+
+#define ATV_TOP_EQU1_EQU_C1__B 0
+#define ATV_TOP_EQU1_EQU_C1__W 9
+#define ATV_TOP_EQU1_EQU_C1__M 0x1FF
+#define ATV_TOP_EQU1_EQU_C1__PRE 0x1CE
+#define ATV_TOP_EQU1_EQU_C1_MN 0xCE
+
+#define ATV_TOP_EQU2__A 0xC10016
+#define ATV_TOP_EQU2__W 9
+#define ATV_TOP_EQU2__M 0x1FF
+#define ATV_TOP_EQU2__PRE 0xD2
+
+#define ATV_TOP_EQU2_EQU_C2__B 0
+#define ATV_TOP_EQU2_EQU_C2__W 9
+#define ATV_TOP_EQU2_EQU_C2__M 0x1FF
+#define ATV_TOP_EQU2_EQU_C2__PRE 0xD2
+#define ATV_TOP_EQU2_EQU_C2_MN 0xD2
+
+#define ATV_TOP_EQU3__A 0xC10017
+#define ATV_TOP_EQU3__W 9
+#define ATV_TOP_EQU3__M 0x1FF
+#define ATV_TOP_EQU3__PRE 0x160
+
+#define ATV_TOP_EQU3_EQU_C3__B 0
+#define ATV_TOP_EQU3_EQU_C3__W 9
+#define ATV_TOP_EQU3_EQU_C3__M 0x1FF
+#define ATV_TOP_EQU3_EQU_C3__PRE 0x160
+#define ATV_TOP_EQU3_EQU_C3_MN 0x60
+
+#define ATV_TOP_ROT_MODE__A 0xC10018
+#define ATV_TOP_ROT_MODE__W 1
+#define ATV_TOP_ROT_MODE__M 0x1
+#define ATV_TOP_ROT_MODE__PRE 0x0
+#define ATV_TOP_ROT_MODE_AMPTH_DEPEND 0x0
+#define ATV_TOP_ROT_MODE_ALWAYS 0x1
+
+#define ATV_TOP_MOD_CONTROL__A 0xC10019
+#define ATV_TOP_MOD_CONTROL__W 12
+#define ATV_TOP_MOD_CONTROL__M 0xFFF
+#define ATV_TOP_MOD_CONTROL__PRE 0x5B1
+
+#define ATV_TOP_MOD_CONTROL_MOD_IR__B 0
+#define ATV_TOP_MOD_CONTROL_MOD_IR__W 3
+#define ATV_TOP_MOD_CONTROL_MOD_IR__M 0x7
+#define ATV_TOP_MOD_CONTROL_MOD_IR__PRE 0x1
+#define ATV_TOP_MOD_CONTROL_MOD_IR_MN 0x1
+#define ATV_TOP_MOD_CONTROL_MOD_IR_FM 0x0
+
+#define ATV_TOP_MOD_CONTROL_MOD_IF__B 3
+#define ATV_TOP_MOD_CONTROL_MOD_IF__W 4
+#define ATV_TOP_MOD_CONTROL_MOD_IF__M 0x78
+#define ATV_TOP_MOD_CONTROL_MOD_IF__PRE 0x30
+#define ATV_TOP_MOD_CONTROL_MOD_IF_MN 0x30
+#define ATV_TOP_MOD_CONTROL_MOD_IF_FM 0x0
+
+#define ATV_TOP_MOD_CONTROL_MOD_MODE__B 7
+#define ATV_TOP_MOD_CONTROL_MOD_MODE__W 1
+#define ATV_TOP_MOD_CONTROL_MOD_MODE__M 0x80
+#define ATV_TOP_MOD_CONTROL_MOD_MODE__PRE 0x80
+#define ATV_TOP_MOD_CONTROL_MOD_MODE_RISE 0x0
+#define ATV_TOP_MOD_CONTROL_MOD_MODE_RISE_FALL 0x80
+
+#define ATV_TOP_MOD_CONTROL_MOD_TH__B 8
+#define ATV_TOP_MOD_CONTROL_MOD_TH__W 4
+#define ATV_TOP_MOD_CONTROL_MOD_TH__M 0xF00
+#define ATV_TOP_MOD_CONTROL_MOD_TH__PRE 0x500
+#define ATV_TOP_MOD_CONTROL_MOD_TH_MN 0x500
+#define ATV_TOP_MOD_CONTROL_MOD_TH_FM 0x0
+
+#define ATV_TOP_STD__A 0xC1001A
+#define ATV_TOP_STD__W 2
+#define ATV_TOP_STD__M 0x3
+#define ATV_TOP_STD__PRE 0x0
+
+#define ATV_TOP_STD_MODE__B 0
+#define ATV_TOP_STD_MODE__W 1
+#define ATV_TOP_STD_MODE__M 0x1
+#define ATV_TOP_STD_MODE__PRE 0x0
+#define ATV_TOP_STD_MODE_MN 0x0
+#define ATV_TOP_STD_MODE_FM 0x1
+
+#define ATV_TOP_STD_VID_POL__B 1
+#define ATV_TOP_STD_VID_POL__W 1
+#define ATV_TOP_STD_VID_POL__M 0x2
+#define ATV_TOP_STD_VID_POL__PRE 0x0
+#define ATV_TOP_STD_VID_POL_NEG 0x0
+#define ATV_TOP_STD_VID_POL_POS 0x2
+
+#define ATV_TOP_VID_AMP__A 0xC1001B
+#define ATV_TOP_VID_AMP__W 12
+#define ATV_TOP_VID_AMP__M 0xFFF
+#define ATV_TOP_VID_AMP__PRE 0x380
+#define ATV_TOP_VID_AMP_MN 0x380
+#define ATV_TOP_VID_AMP_FM 0x0
+
+#define ATV_TOP_VID_PEAK__A 0xC1001C
+#define ATV_TOP_VID_PEAK__W 5
+#define ATV_TOP_VID_PEAK__M 0x1F
+#define ATV_TOP_VID_PEAK__PRE 0x1
+
+#define ATV_TOP_FAGC_TH__A 0xC1001D
+#define ATV_TOP_FAGC_TH__W 11
+#define ATV_TOP_FAGC_TH__M 0x7FF
+#define ATV_TOP_FAGC_TH__PRE 0x2B2
+#define ATV_TOP_FAGC_TH_MN 0x2B2
+
+#define ATV_TOP_SYNC_SLICE__A 0xC1001E
+#define ATV_TOP_SYNC_SLICE__W 11
+#define ATV_TOP_SYNC_SLICE__M 0x7FF
+#define ATV_TOP_SYNC_SLICE__PRE 0x243
+#define ATV_TOP_SYNC_SLICE_MN 0x243
+
+#define ATV_TOP_SIF_GAIN__A 0xC1001F
+#define ATV_TOP_SIF_GAIN__W 11
+#define ATV_TOP_SIF_GAIN__M 0x7FF
+#define ATV_TOP_SIF_GAIN__PRE 0x0
+
+#define ATV_TOP_SIF_TP__A 0xC10020
+#define ATV_TOP_SIF_TP__W 6
+#define ATV_TOP_SIF_TP__M 0x3F
+#define ATV_TOP_SIF_TP__PRE 0x0
+
+#define ATV_TOP_MOD_ACCU__A 0xC10021
+#define ATV_TOP_MOD_ACCU__W 10
+#define ATV_TOP_MOD_ACCU__M 0x3FF
+#define ATV_TOP_MOD_ACCU__PRE 0x0
+
+#define ATV_TOP_CR_FREQ__A 0xC10022
+#define ATV_TOP_CR_FREQ__W 8
+#define ATV_TOP_CR_FREQ__M 0xFF
+#define ATV_TOP_CR_FREQ__PRE 0x0
+
+#define ATV_TOP_CR_PHAD__A 0xC10023
+#define ATV_TOP_CR_PHAD__W 12
+#define ATV_TOP_CR_PHAD__M 0xFFF
+#define ATV_TOP_CR_PHAD__PRE 0x0
+
+#define ATV_TOP_AF_SIF_ATT__A 0xC10024
+#define ATV_TOP_AF_SIF_ATT__W 2
+#define ATV_TOP_AF_SIF_ATT__M 0x3
+#define ATV_TOP_AF_SIF_ATT__PRE 0x0
+#define ATV_TOP_AF_SIF_ATT_0DB 0x0
+#define ATV_TOP_AF_SIF_ATT_M3DB 0x1
+#define ATV_TOP_AF_SIF_ATT_M6DB 0x2
+#define ATV_TOP_AF_SIF_ATT_M9DB 0x3
+
+#define ATV_TOP_STDBY__A 0xC10025
+#define ATV_TOP_STDBY__W 2
+#define ATV_TOP_STDBY__M 0x3
+#define ATV_TOP_STDBY__PRE 0x1
+
+#define ATV_TOP_STDBY_SIF_STDBY__B 0
+#define ATV_TOP_STDBY_SIF_STDBY__W 1
+#define ATV_TOP_STDBY_SIF_STDBY__M 0x1
+#define ATV_TOP_STDBY_SIF_STDBY__PRE 0x1
+#define ATV_TOP_STDBY_SIF_STDBY_ACTIVE 0x0
+#define ATV_TOP_STDBY_SIF_STDBY_STANDBY 0x1
+
+#define ATV_TOP_STDBY_CVBS_STDBY__B 1
+#define ATV_TOP_STDBY_CVBS_STDBY__W 1
+#define ATV_TOP_STDBY_CVBS_STDBY__M 0x2
+#define ATV_TOP_STDBY_CVBS_STDBY__PRE 0x0
+#define ATV_TOP_STDBY_CVBS_STDBY_A1_ACTIVE 0x0
+#define ATV_TOP_STDBY_CVBS_STDBY_A1_STANDBY 0x2
+#define ATV_TOP_STDBY_CVBS_STDBY_A2_ACTIVE 0x2
+#define ATV_TOP_STDBY_CVBS_STDBY_A2_STANDBY 0x0
+
+#define ATV_TOP_OVERRIDE_SFR__A 0xC10026
+#define ATV_TOP_OVERRIDE_SFR__W 1
+#define ATV_TOP_OVERRIDE_SFR__M 0x1
+#define ATV_TOP_OVERRIDE_SFR__PRE 0x0
+#define ATV_TOP_OVERRIDE_SFR_ACTIVE 0x0
+#define ATV_TOP_OVERRIDE_SFR_OVERRIDE 0x1
+
+#define ATV_TOP_SFR_VID_GAIN__A 0xC10027
+#define ATV_TOP_SFR_VID_GAIN__W 16
+#define ATV_TOP_SFR_VID_GAIN__M 0xFFFF
+#define ATV_TOP_SFR_VID_GAIN__PRE 0x0
+
+#define ATV_TOP_SFR_AGC_RES__A 0xC10028
+#define ATV_TOP_SFR_AGC_RES__W 5
+#define ATV_TOP_SFR_AGC_RES__M 0x1F
+#define ATV_TOP_SFR_AGC_RES__PRE 0x0
+
+#define ATV_TOP_OVM_COMP__A 0xC10029
+#define ATV_TOP_OVM_COMP__W 12
+#define ATV_TOP_OVM_COMP__M 0xFFF
+#define ATV_TOP_OVM_COMP__PRE 0x0
+#define ATV_TOP_OUT_CONF__A 0xC1002A
+#define ATV_TOP_OUT_CONF__W 5
+#define ATV_TOP_OUT_CONF__M 0x1F
+#define ATV_TOP_OUT_CONF__PRE 0x0
+
+#define ATV_TOP_OUT_CONF_CVBS_DAC_SIGN__B 0
+#define ATV_TOP_OUT_CONF_CVBS_DAC_SIGN__W 1
+#define ATV_TOP_OUT_CONF_CVBS_DAC_SIGN__M 0x1
+#define ATV_TOP_OUT_CONF_CVBS_DAC_SIGN__PRE 0x0
+#define ATV_TOP_OUT_CONF_CVBS_DAC_SIGN_UNSIGNED 0x0
+#define ATV_TOP_OUT_CONF_CVBS_DAC_SIGN_SIGNED 0x1
+
+#define ATV_TOP_OUT_CONF_SIF_DAC_SIGN__B 1
+#define ATV_TOP_OUT_CONF_SIF_DAC_SIGN__W 1
+#define ATV_TOP_OUT_CONF_SIF_DAC_SIGN__M 0x2
+#define ATV_TOP_OUT_CONF_SIF_DAC_SIGN__PRE 0x0
+#define ATV_TOP_OUT_CONF_SIF_DAC_SIGN_UNSIGNED 0x0
+#define ATV_TOP_OUT_CONF_SIF_DAC_SIGN_SIGNED 0x2
+
+#define ATV_TOP_OUT_CONF_SIF20_SIGN__B 2
+#define ATV_TOP_OUT_CONF_SIF20_SIGN__W 1
+#define ATV_TOP_OUT_CONF_SIF20_SIGN__M 0x4
+#define ATV_TOP_OUT_CONF_SIF20_SIGN__PRE 0x0
+#define ATV_TOP_OUT_CONF_SIF20_SIGN_UNSIGNED 0x0
+#define ATV_TOP_OUT_CONF_SIF20_SIGN_SIGNED 0x4
+
+#define ATV_TOP_OUT_CONF_CVBS_DAC_BR__B 3
+#define ATV_TOP_OUT_CONF_CVBS_DAC_BR__W 1
+#define ATV_TOP_OUT_CONF_CVBS_DAC_BR__M 0x8
+#define ATV_TOP_OUT_CONF_CVBS_DAC_BR__PRE 0x0
+#define ATV_TOP_OUT_CONF_CVBS_DAC_BR_NORMAL 0x0
+#define ATV_TOP_OUT_CONF_CVBS_DAC_BR_BITREVERSED 0x8
+
+#define ATV_TOP_OUT_CONF_SIF_DAC_BR__B 4
+#define ATV_TOP_OUT_CONF_SIF_DAC_BR__W 1
+#define ATV_TOP_OUT_CONF_SIF_DAC_BR__M 0x10
+#define ATV_TOP_OUT_CONF_SIF_DAC_BR__PRE 0x0
+#define ATV_TOP_OUT_CONF_SIF_DAC_BR_NORMAL 0x0
+#define ATV_TOP_OUT_CONF_SIF_DAC_BR_BITREVERSED 0x10
+
+#define ATV_AFT_COMM_EXEC__A 0xFF0000
+#define ATV_AFT_COMM_EXEC__W 2
+#define ATV_AFT_COMM_EXEC__M 0x3
+#define ATV_AFT_COMM_EXEC__PRE 0x0
+#define ATV_AFT_COMM_EXEC_STOP 0x0
+#define ATV_AFT_COMM_EXEC_ACTIVE 0x1
+#define ATV_AFT_COMM_EXEC_HOLD 0x2
+
+#define ATV_AFT_TST__A 0xFF0010
+#define ATV_AFT_TST__W 4
+#define ATV_AFT_TST__M 0xF
+#define ATV_AFT_TST__PRE 0x0
+
+#define AUD_COMM_EXEC__A 0x1000000
+#define AUD_COMM_EXEC__W 2
+#define AUD_COMM_EXEC__M 0x3
+#define AUD_COMM_EXEC__PRE 0x0
+#define AUD_COMM_EXEC_STOP 0x0
+#define AUD_COMM_EXEC_ACTIVE 0x1
+
+#define AUD_COMM_MB__A 0x1000002
+#define AUD_COMM_MB__W 16
+#define AUD_COMM_MB__M 0xFFFF
+#define AUD_COMM_MB__PRE 0x0
+
+#define AUD_TOP_COMM_EXEC__A 0x1010000
+#define AUD_TOP_COMM_EXEC__W 2
+#define AUD_TOP_COMM_EXEC__M 0x3
+#define AUD_TOP_COMM_EXEC__PRE 0x0
+#define AUD_TOP_COMM_EXEC_STOP 0x0
+#define AUD_TOP_COMM_EXEC_ACTIVE 0x1
+
+#define AUD_TOP_COMM_MB__A 0x1010002
+#define AUD_TOP_COMM_MB__W 16
+#define AUD_TOP_COMM_MB__M 0xFFFF
+#define AUD_TOP_COMM_MB__PRE 0x0
+
+#define AUD_TOP_COMM_MB_CTL__B 0
+#define AUD_TOP_COMM_MB_CTL__W 1
+#define AUD_TOP_COMM_MB_CTL__M 0x1
+#define AUD_TOP_COMM_MB_CTL__PRE 0x0
+#define AUD_TOP_COMM_MB_CTL_CTR_OFF 0x0
+#define AUD_TOP_COMM_MB_CTL_CTR_ON 0x1
+
+#define AUD_TOP_COMM_MB_OBS__B 1
+#define AUD_TOP_COMM_MB_OBS__W 1
+#define AUD_TOP_COMM_MB_OBS__M 0x2
+#define AUD_TOP_COMM_MB_OBS__PRE 0x0
+#define AUD_TOP_COMM_MB_OBS_OBS_OFF 0x0
+#define AUD_TOP_COMM_MB_OBS_OBS_ON 0x2
+
+#define AUD_TOP_COMM_MB_MUX_CTRL__B 2
+#define AUD_TOP_COMM_MB_MUX_CTRL__W 4
+#define AUD_TOP_COMM_MB_MUX_CTRL__M 0x3C
+#define AUD_TOP_COMM_MB_MUX_CTRL__PRE 0x0
+#define AUD_TOP_COMM_MB_MUX_CTRL_DEMOD_TBO 0x0
+#define AUD_TOP_COMM_MB_MUX_CTRL_XDFP_IRQS 0x4
+#define AUD_TOP_COMM_MB_MUX_CTRL_OBSERVEPC 0x8
+#define AUD_TOP_COMM_MB_MUX_CTRL_SAOUT 0xC
+#define AUD_TOP_COMM_MB_MUX_CTRL_XDFP_SCHEQ 0x10
+
+#define AUD_TOP_COMM_MB_MUX_OBS__B 6
+#define AUD_TOP_COMM_MB_MUX_OBS__W 4
+#define AUD_TOP_COMM_MB_MUX_OBS__M 0x3C0
+#define AUD_TOP_COMM_MB_MUX_OBS__PRE 0x0
+#define AUD_TOP_COMM_MB_MUX_OBS_DEMOD_TBO 0x0
+#define AUD_TOP_COMM_MB_MUX_OBS_XDFP_IRQS 0x40
+#define AUD_TOP_COMM_MB_MUX_OBS_OBSERVEPC 0x80
+#define AUD_TOP_COMM_MB_MUX_OBS_SAOUT 0xC0
+#define AUD_TOP_COMM_MB_MUX_OBS_XDFP_SCHEQ 0x100
+
+#define AUD_TOP_TR_MDE__A 0x1010010
+#define AUD_TOP_TR_MDE__W 5
+#define AUD_TOP_TR_MDE__M 0x1F
+#define AUD_TOP_TR_MDE__PRE 0x18
+
+#define AUD_TOP_TR_MDE_FIFO_SIZE__B 0
+#define AUD_TOP_TR_MDE_FIFO_SIZE__W 4
+#define AUD_TOP_TR_MDE_FIFO_SIZE__M 0xF
+#define AUD_TOP_TR_MDE_FIFO_SIZE__PRE 0x8
+
+#define AUD_TOP_TR_MDE_RD_LOCK__B 4
+#define AUD_TOP_TR_MDE_RD_LOCK__W 1
+#define AUD_TOP_TR_MDE_RD_LOCK__M 0x10
+#define AUD_TOP_TR_MDE_RD_LOCK__PRE 0x10
+#define AUD_TOP_TR_MDE_RD_LOCK_NORMAL 0x0
+#define AUD_TOP_TR_MDE_RD_LOCK_LOCK 0x10
+
+#define AUD_TOP_TR_CTR__A 0x1010011
+#define AUD_TOP_TR_CTR__W 4
+#define AUD_TOP_TR_CTR__M 0xF
+#define AUD_TOP_TR_CTR__PRE 0x0
+
+#define AUD_TOP_TR_CTR_FIFO_RD_RDY__B 0
+#define AUD_TOP_TR_CTR_FIFO_RD_RDY__W 1
+#define AUD_TOP_TR_CTR_FIFO_RD_RDY__M 0x1
+#define AUD_TOP_TR_CTR_FIFO_RD_RDY__PRE 0x0
+#define AUD_TOP_TR_CTR_FIFO_RD_RDY_NOT_READY 0x0
+#define AUD_TOP_TR_CTR_FIFO_RD_RDY_READY 0x1
+
+#define AUD_TOP_TR_CTR_FIFO_EMPTY__B 1
+#define AUD_TOP_TR_CTR_FIFO_EMPTY__W 1
+#define AUD_TOP_TR_CTR_FIFO_EMPTY__M 0x2
+#define AUD_TOP_TR_CTR_FIFO_EMPTY__PRE 0x0
+#define AUD_TOP_TR_CTR_FIFO_EMPTY_NOT_EMPTY 0x0
+#define AUD_TOP_TR_CTR_FIFO_EMPTY_EMPTY 0x2
+
+#define AUD_TOP_TR_CTR_FIFO_LOCK__B 2
+#define AUD_TOP_TR_CTR_FIFO_LOCK__W 1
+#define AUD_TOP_TR_CTR_FIFO_LOCK__M 0x4
+#define AUD_TOP_TR_CTR_FIFO_LOCK__PRE 0x0
+#define AUD_TOP_TR_CTR_FIFO_LOCK_UNLOCKED 0x0
+#define AUD_TOP_TR_CTR_FIFO_LOCK_LOCKED 0x4
+
+#define AUD_TOP_TR_CTR_FIFO_FULL__B 3
+#define AUD_TOP_TR_CTR_FIFO_FULL__W 1
+#define AUD_TOP_TR_CTR_FIFO_FULL__M 0x8
+#define AUD_TOP_TR_CTR_FIFO_FULL__PRE 0x0
+#define AUD_TOP_TR_CTR_FIFO_FULL_EMPTY 0x0
+#define AUD_TOP_TR_CTR_FIFO_FULL_FULL 0x8
+
+#define AUD_TOP_TR_RD_REG__A 0x1010012
+#define AUD_TOP_TR_RD_REG__W 16
+#define AUD_TOP_TR_RD_REG__M 0xFFFF
+#define AUD_TOP_TR_RD_REG__PRE 0x0
+
+#define AUD_TOP_TR_RD_REG_RESULT__B 0
+#define AUD_TOP_TR_RD_REG_RESULT__W 16
+#define AUD_TOP_TR_RD_REG_RESULT__M 0xFFFF
+#define AUD_TOP_TR_RD_REG_RESULT__PRE 0x0
+
+#define AUD_TOP_TR_TIMER__A 0x1010013
+#define AUD_TOP_TR_TIMER__W 16
+#define AUD_TOP_TR_TIMER__M 0xFFFF
+#define AUD_TOP_TR_TIMER__PRE 0x0
+
+#define AUD_TOP_TR_TIMER_CYCLES__B 0
+#define AUD_TOP_TR_TIMER_CYCLES__W 16
+#define AUD_TOP_TR_TIMER_CYCLES__M 0xFFFF
+#define AUD_TOP_TR_TIMER_CYCLES__PRE 0x0
+
+#define AUD_TOP_DEMOD_TBO_SEL__A 0x1010014
+#define AUD_TOP_DEMOD_TBO_SEL__W 5
+#define AUD_TOP_DEMOD_TBO_SEL__M 0x1F
+#define AUD_TOP_DEMOD_TBO_SEL__PRE 0x0
+
+#define AUD_DEM_WR_MODUS__A 0x1030030
+#define AUD_DEM_WR_MODUS__W 16
+#define AUD_DEM_WR_MODUS__M 0xFFFF
+#define AUD_DEM_WR_MODUS__PRE 0x0
+
+#define AUD_DEM_WR_MODUS_MOD_ASS__B 0
+#define AUD_DEM_WR_MODUS_MOD_ASS__W 1
+#define AUD_DEM_WR_MODUS_MOD_ASS__M 0x1
+#define AUD_DEM_WR_MODUS_MOD_ASS__PRE 0x0
+#define AUD_DEM_WR_MODUS_MOD_ASS_OFF 0x0
+#define AUD_DEM_WR_MODUS_MOD_ASS_ON 0x1
+
+#define AUD_DEM_WR_MODUS_MOD_STATINTERR__B 1
+#define AUD_DEM_WR_MODUS_MOD_STATINTERR__W 1
+#define AUD_DEM_WR_MODUS_MOD_STATINTERR__M 0x2
+#define AUD_DEM_WR_MODUS_MOD_STATINTERR__PRE 0x0
+#define AUD_DEM_WR_MODUS_MOD_STATINTERR_DISABLE 0x0
+#define AUD_DEM_WR_MODUS_MOD_STATINTERR_ENABLE 0x2
+
+#define AUD_DEM_WR_MODUS_MOD_DIS_STD_CHG__B 2
+#define AUD_DEM_WR_MODUS_MOD_DIS_STD_CHG__W 1
+#define AUD_DEM_WR_MODUS_MOD_DIS_STD_CHG__M 0x4
+#define AUD_DEM_WR_MODUS_MOD_DIS_STD_CHG__PRE 0x0
+#define AUD_DEM_WR_MODUS_MOD_DIS_STD_CHG_ENABLED 0x0
+#define AUD_DEM_WR_MODUS_MOD_DIS_STD_CHG_DISABLED 0x4
+
+#define AUD_DEM_WR_MODUS_MOD_HDEV_A__B 8
+#define AUD_DEM_WR_MODUS_MOD_HDEV_A__W 1
+#define AUD_DEM_WR_MODUS_MOD_HDEV_A__M 0x100
+#define AUD_DEM_WR_MODUS_MOD_HDEV_A__PRE 0x0
+#define AUD_DEM_WR_MODUS_MOD_HDEV_A_NORMAL 0x0
+#define AUD_DEM_WR_MODUS_MOD_HDEV_A_HIGH_DEVIATION 0x100
+
+#define AUD_DEM_WR_MODUS_MOD_CM_A__B 9
+#define AUD_DEM_WR_MODUS_MOD_CM_A__W 1
+#define AUD_DEM_WR_MODUS_MOD_CM_A__M 0x200
+#define AUD_DEM_WR_MODUS_MOD_CM_A__PRE 0x0
+#define AUD_DEM_WR_MODUS_MOD_CM_A_MUTE 0x0
+#define AUD_DEM_WR_MODUS_MOD_CM_A_NOISE 0x200
+
+#define AUD_DEM_WR_MODUS_MOD_CM_B__B 10
+#define AUD_DEM_WR_MODUS_MOD_CM_B__W 1
+#define AUD_DEM_WR_MODUS_MOD_CM_B__M 0x400
+#define AUD_DEM_WR_MODUS_MOD_CM_B__PRE 0x0
+#define AUD_DEM_WR_MODUS_MOD_CM_B_MUTE 0x0
+#define AUD_DEM_WR_MODUS_MOD_CM_B_NOISE 0x400
+
+#define AUD_DEM_WR_MODUS_MOD_FMRADIO__B 11
+#define AUD_DEM_WR_MODUS_MOD_FMRADIO__W 1
+#define AUD_DEM_WR_MODUS_MOD_FMRADIO__M 0x800
+#define AUD_DEM_WR_MODUS_MOD_FMRADIO__PRE 0x0
+#define AUD_DEM_WR_MODUS_MOD_FMRADIO_US_75U 0x0
+#define AUD_DEM_WR_MODUS_MOD_FMRADIO_EU_50U 0x800
+
+#define AUD_DEM_WR_MODUS_MOD_6_5MHZ__B 12
+#define AUD_DEM_WR_MODUS_MOD_6_5MHZ__W 1
+#define AUD_DEM_WR_MODUS_MOD_6_5MHZ__M 0x1000
+#define AUD_DEM_WR_MODUS_MOD_6_5MHZ__PRE 0x0
+#define AUD_DEM_WR_MODUS_MOD_6_5MHZ_SECAM 0x0
+#define AUD_DEM_WR_MODUS_MOD_6_5MHZ_D_K 0x1000
+
+#define AUD_DEM_WR_MODUS_MOD_4_5MHZ__B 13
+#define AUD_DEM_WR_MODUS_MOD_4_5MHZ__W 2
+#define AUD_DEM_WR_MODUS_MOD_4_5MHZ__M 0x6000
+#define AUD_DEM_WR_MODUS_MOD_4_5MHZ__PRE 0x0
+#define AUD_DEM_WR_MODUS_MOD_4_5MHZ_M_KOREA 0x0
+#define AUD_DEM_WR_MODUS_MOD_4_5MHZ_M_BTSC 0x2000
+#define AUD_DEM_WR_MODUS_MOD_4_5MHZ_M_EIAJ 0x4000
+#define AUD_DEM_WR_MODUS_MOD_4_5MHZ_CHROMA 0x6000
+
+#define AUD_DEM_WR_MODUS_MOD_BTSC__B 15
+#define AUD_DEM_WR_MODUS_MOD_BTSC__W 1
+#define AUD_DEM_WR_MODUS_MOD_BTSC__M 0x8000
+#define AUD_DEM_WR_MODUS_MOD_BTSC__PRE 0x0
+#define AUD_DEM_WR_MODUS_MOD_BTSC_BTSC_STEREO 0x0
+#define AUD_DEM_WR_MODUS_MOD_BTSC_BTSC_SAP 0x8000
+
+#define AUD_DEM_WR_STANDARD_SEL__A 0x1030020
+#define AUD_DEM_WR_STANDARD_SEL__W 16
+#define AUD_DEM_WR_STANDARD_SEL__M 0xFFFF
+#define AUD_DEM_WR_STANDARD_SEL__PRE 0x0
+
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL__B 0
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL__W 12
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL__M 0xFFF
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL__PRE 0x0
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_AUTO 0x1
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_M_KOREA 0x2
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_BG_FM 0x3
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_D_K1 0x4
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_D_K2 0x5
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_D_K3 0x7
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_BG_NICAM_FM 0x8
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_L_NICAM_AM 0x9
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_I_NICAM_FM 0xA
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_D_K_NICAM_FM 0xB
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_BTSC_STEREO 0x20
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_BTSC_SAP 0x21
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_EIA_J 0x30
+#define AUD_DEM_WR_STANDARD_SEL_STD_SEL_FM_RADIO 0x40
+
+#define AUD_DEM_RD_STANDARD_RES__A 0x102007E
+#define AUD_DEM_RD_STANDARD_RES__W 16
+#define AUD_DEM_RD_STANDARD_RES__M 0xFFFF
+#define AUD_DEM_RD_STANDARD_RES__PRE 0x0
+
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT__B 0
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT__W 16
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT__M 0xFFFF
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT__PRE 0x0
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_NO_SOUND_STANDARD 0x0
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_NTSC_M_DUAL_CARRIER_FM 0x2
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_B_G_DUAL_CARRIER_FM 0x3
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_D_K1_DUAL_CARRIER_FM 0x4
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_D_K2_DUAL_CARRIER_FM 0x5
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_D_K3_DUAL_CARRIER_FM 0x7
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_B_G_NICAM_FM 0x8
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_L_NICAM_AM 0x9
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_I_NICAM_FM 0xA
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_D_K_NICAM_FM 0xB
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_BTSC_STEREO 0x20
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_BTSC_MONO_SAP 0x21
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_NTSC_EIA_J 0x30
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_FM_RADIO 0x40
+#define AUD_DEM_RD_STANDARD_RES_STD_RESULT_DETECTION_STILL_ACTIVE 0x7FF
+
+#define AUD_DEM_RD_STATUS__A 0x1020200
+#define AUD_DEM_RD_STATUS__W 16
+#define AUD_DEM_RD_STATUS__M 0xFFFF
+#define AUD_DEM_RD_STATUS__PRE 0x0
+
+#define AUD_DEM_RD_STATUS_STAT_NEW_RDS__B 0
+#define AUD_DEM_RD_STATUS_STAT_NEW_RDS__W 1
+#define AUD_DEM_RD_STATUS_STAT_NEW_RDS__M 0x1
+#define AUD_DEM_RD_STATUS_STAT_NEW_RDS__PRE 0x0
+#define AUD_DEM_RD_STATUS_STAT_NEW_RDS_NO_RDS_DATA 0x0
+#define AUD_DEM_RD_STATUS_STAT_NEW_RDS_NEW_RDS_DATA 0x1
+
+#define AUD_DEM_RD_STATUS_STAT_CARR_A__B 1
+#define AUD_DEM_RD_STATUS_STAT_CARR_A__W 1
+#define AUD_DEM_RD_STATUS_STAT_CARR_A__M 0x2
+#define AUD_DEM_RD_STATUS_STAT_CARR_A__PRE 0x0
+#define AUD_DEM_RD_STATUS_STAT_CARR_A_DETECTED 0x0
+#define AUD_DEM_RD_STATUS_STAT_CARR_A_NOT_DETECTED 0x2
+
+#define AUD_DEM_RD_STATUS_STAT_CARR_B__B 2
+#define AUD_DEM_RD_STATUS_STAT_CARR_B__W 1
+#define AUD_DEM_RD_STATUS_STAT_CARR_B__M 0x4
+#define AUD_DEM_RD_STATUS_STAT_CARR_B__PRE 0x0
+#define AUD_DEM_RD_STATUS_STAT_CARR_B_DETECTED 0x0
+#define AUD_DEM_RD_STATUS_STAT_CARR_B_NOT_DETECTED 0x4
+
+#define AUD_DEM_RD_STATUS_STAT_NICAM__B 5
+#define AUD_DEM_RD_STATUS_STAT_NICAM__W 1
+#define AUD_DEM_RD_STATUS_STAT_NICAM__M 0x20
+#define AUD_DEM_RD_STATUS_STAT_NICAM__PRE 0x0
+#define AUD_DEM_RD_STATUS_STAT_NICAM_NO_NICAM 0x0
+#define AUD_DEM_RD_STATUS_STAT_NICAM_NICAM_DETECTED 0x20
+
+#define AUD_DEM_RD_STATUS_STAT_STEREO__B 6
+#define AUD_DEM_RD_STATUS_STAT_STEREO__W 1
+#define AUD_DEM_RD_STATUS_STAT_STEREO__M 0x40
+#define AUD_DEM_RD_STATUS_STAT_STEREO__PRE 0x0
+#define AUD_DEM_RD_STATUS_STAT_STEREO_NO_STEREO 0x0
+#define AUD_DEM_RD_STATUS_STAT_STEREO_STEREO 0x40
+
+#define AUD_DEM_RD_STATUS_STAT_INDEP_MONO__B 7
+#define AUD_DEM_RD_STATUS_STAT_INDEP_MONO__W 1
+#define AUD_DEM_RD_STATUS_STAT_INDEP_MONO__M 0x80
+#define AUD_DEM_RD_STATUS_STAT_INDEP_MONO__PRE 0x0
+#define AUD_DEM_RD_STATUS_STAT_INDEP_MONO_DEPENDENT_FM_MONO_PROGRAM 0x0
+#define AUD_DEM_RD_STATUS_STAT_INDEP_MONO_INDEPENDENT_FM_MONO_PROGRAM 0x80
+
+#define AUD_DEM_RD_STATUS_STAT_BIL_OR_SAP__B 8
+#define AUD_DEM_RD_STATUS_STAT_BIL_OR_SAP__W 1
+#define AUD_DEM_RD_STATUS_STAT_BIL_OR_SAP__M 0x100
+#define AUD_DEM_RD_STATUS_STAT_BIL_OR_SAP__PRE 0x0
+#define AUD_DEM_RD_STATUS_STAT_BIL_OR_SAP_NO_SAP 0x0
+#define AUD_DEM_RD_STATUS_STAT_BIL_OR_SAP_SAP 0x100
+
+#define AUD_DEM_RD_STATUS_BAD_NICAM__B 9
+#define AUD_DEM_RD_STATUS_BAD_NICAM__W 1
+#define AUD_DEM_RD_STATUS_BAD_NICAM__M 0x200
+#define AUD_DEM_RD_STATUS_BAD_NICAM__PRE 0x0
+#define AUD_DEM_RD_STATUS_BAD_NICAM_OK 0x0
+#define AUD_DEM_RD_STATUS_BAD_NICAM_BAD 0x200
+
+#define AUD_DEM_RD_RDS_ARRAY_CNT__A 0x102020F
+#define AUD_DEM_RD_RDS_ARRAY_CNT__W 12
+#define AUD_DEM_RD_RDS_ARRAY_CNT__M 0xFFF
+#define AUD_DEM_RD_RDS_ARRAY_CNT__PRE 0x0
+
+#define AUD_DEM_RD_RDS_ARRAY_CNT_RDS_ARRAY_CT__B 0
+#define AUD_DEM_RD_RDS_ARRAY_CNT_RDS_ARRAY_CT__W 12
+#define AUD_DEM_RD_RDS_ARRAY_CNT_RDS_ARRAY_CT__M 0xFFF
+#define AUD_DEM_RD_RDS_ARRAY_CNT_RDS_ARRAY_CT__PRE 0x0
+#define AUD_DEM_RD_RDS_ARRAY_CNT_RDS_ARRAY_CT_RDS_DATA_NOT_VALID 0xFFF
+
+#define AUD_DEM_RD_RDS_DATA__A 0x1020210
+#define AUD_DEM_RD_RDS_DATA__W 12
+#define AUD_DEM_RD_RDS_DATA__M 0xFFF
+#define AUD_DEM_RD_RDS_DATA__PRE 0x0
+
+#define AUD_DSP_WR_FM_PRESC__A 0x105000E
+#define AUD_DSP_WR_FM_PRESC__W 16
+#define AUD_DSP_WR_FM_PRESC__M 0xFFFF
+#define AUD_DSP_WR_FM_PRESC__PRE 0x0
+
+#define AUD_DSP_WR_FM_PRESC_FM_AM_PRESC__B 8
+#define AUD_DSP_WR_FM_PRESC_FM_AM_PRESC__W 8
+#define AUD_DSP_WR_FM_PRESC_FM_AM_PRESC__M 0xFF00
+#define AUD_DSP_WR_FM_PRESC_FM_AM_PRESC__PRE 0x0
+#define AUD_DSP_WR_FM_PRESC_FM_AM_PRESC_28_KHZ_FM_DEVIATION 0x7F00
+#define AUD_DSP_WR_FM_PRESC_FM_AM_PRESC_50_KHZ_FM_DEVIATION 0x4800
+#define AUD_DSP_WR_FM_PRESC_FM_AM_PRESC_75_KHZ_FM_DEVIATION 0x3000
+#define AUD_DSP_WR_FM_PRESC_FM_AM_PRESC_100_KHZ_FM_DEVIATION 0x2400
+#define AUD_DSP_WR_FM_PRESC_FM_AM_PRESC_150_KHZ_FM_DEVIATION 0x1800
+#define AUD_DSP_WR_FM_PRESC_FM_AM_PRESC_180_KHZ_FM_DEVIATION 0x1300
+#define AUD_DSP_WR_FM_PRESC_FM_AM_PRESC_380_KHZ_FM_DEVIATION 0x900
+
+#define AUD_DSP_WR_NICAM_PRESC__A 0x1050010
+#define AUD_DSP_WR_NICAM_PRESC__W 16
+#define AUD_DSP_WR_NICAM_PRESC__M 0xFFFF
+#define AUD_DSP_WR_NICAM_PRESC__PRE 0x0
+#define AUD_DSP_WR_VOLUME__A 0x1050000
+#define AUD_DSP_WR_VOLUME__W 16
+#define AUD_DSP_WR_VOLUME__M 0xFFFF
+#define AUD_DSP_WR_VOLUME__PRE 0x0
+
+#define AUD_DSP_WR_VOLUME_VOL_MAIN__B 8
+#define AUD_DSP_WR_VOLUME_VOL_MAIN__W 8
+#define AUD_DSP_WR_VOLUME_VOL_MAIN__M 0xFF00
+#define AUD_DSP_WR_VOLUME_VOL_MAIN__PRE 0x0
+
+#define AUD_DSP_WR_SRC_I2S_MATR__A 0x1050038
+#define AUD_DSP_WR_SRC_I2S_MATR__W 16
+#define AUD_DSP_WR_SRC_I2S_MATR__M 0xFFFF
+#define AUD_DSP_WR_SRC_I2S_MATR__PRE 0x0
+
+#define AUD_DSP_WR_SRC_I2S_MATR_SRC_I2S__B 8
+#define AUD_DSP_WR_SRC_I2S_MATR_SRC_I2S__W 8
+#define AUD_DSP_WR_SRC_I2S_MATR_SRC_I2S__M 0xFF00
+#define AUD_DSP_WR_SRC_I2S_MATR_SRC_I2S__PRE 0x0
+#define AUD_DSP_WR_SRC_I2S_MATR_SRC_I2S_MONO 0x0
+#define AUD_DSP_WR_SRC_I2S_MATR_SRC_I2S_STEREO_AB 0x100
+#define AUD_DSP_WR_SRC_I2S_MATR_SRC_I2S_STEREO_A 0x300
+#define AUD_DSP_WR_SRC_I2S_MATR_SRC_I2S_STEREO_B 0x400
+
+#define AUD_DSP_WR_SRC_I2S_MATR_MAT_I2S__B 0
+#define AUD_DSP_WR_SRC_I2S_MATR_MAT_I2S__W 8
+#define AUD_DSP_WR_SRC_I2S_MATR_MAT_I2S__M 0xFF
+#define AUD_DSP_WR_SRC_I2S_MATR_MAT_I2S__PRE 0x0
+#define AUD_DSP_WR_SRC_I2S_MATR_MAT_I2S_SOUND_A 0x0
+#define AUD_DSP_WR_SRC_I2S_MATR_MAT_I2S_SOUND_B 0x10
+#define AUD_DSP_WR_SRC_I2S_MATR_MAT_I2S_STEREO 0x20
+#define AUD_DSP_WR_SRC_I2S_MATR_MAT_I2S_MONO 0x30
+
+#define AUD_DSP_WR_AVC__A 0x1050029
+#define AUD_DSP_WR_AVC__W 16
+#define AUD_DSP_WR_AVC__M 0xFFFF
+#define AUD_DSP_WR_AVC__PRE 0x0
+
+#define AUD_DSP_WR_AVC_AVC_ON__B 14
+#define AUD_DSP_WR_AVC_AVC_ON__W 2
+#define AUD_DSP_WR_AVC_AVC_ON__M 0xC000
+#define AUD_DSP_WR_AVC_AVC_ON__PRE 0x0
+#define AUD_DSP_WR_AVC_AVC_ON_OFF 0x0
+#define AUD_DSP_WR_AVC_AVC_ON_ON 0xC000
+
+#define AUD_DSP_WR_AVC_AVC_DECAY__B 8
+#define AUD_DSP_WR_AVC_AVC_DECAY__W 4
+#define AUD_DSP_WR_AVC_AVC_DECAY__M 0xF00
+#define AUD_DSP_WR_AVC_AVC_DECAY__PRE 0x0
+#define AUD_DSP_WR_AVC_AVC_DECAY_8_SEC 0x800
+#define AUD_DSP_WR_AVC_AVC_DECAY_4_SEC 0x400
+#define AUD_DSP_WR_AVC_AVC_DECAY_2_SEC 0x200
+#define AUD_DSP_WR_AVC_AVC_DECAY_20_MSEC 0x100
+
+#define AUD_DSP_WR_AVC_AVC_REF_LEV__B 4
+#define AUD_DSP_WR_AVC_AVC_REF_LEV__W 4
+#define AUD_DSP_WR_AVC_AVC_REF_LEV__M 0xF0
+#define AUD_DSP_WR_AVC_AVC_REF_LEV__PRE 0x0
+
+#define AUD_DSP_WR_AVC_AVC_MAX_ATT__B 2
+#define AUD_DSP_WR_AVC_AVC_MAX_ATT__W 2
+#define AUD_DSP_WR_AVC_AVC_MAX_ATT__M 0xC
+#define AUD_DSP_WR_AVC_AVC_MAX_ATT__PRE 0x0
+#define AUD_DSP_WR_AVC_AVC_MAX_ATT_24DB 0x0
+#define AUD_DSP_WR_AVC_AVC_MAX_ATT_18DB 0x4
+#define AUD_DSP_WR_AVC_AVC_MAX_ATT_12DB 0x8
+
+#define AUD_DSP_WR_AVC_AVC_MAX_GAIN__B 0
+#define AUD_DSP_WR_AVC_AVC_MAX_GAIN__W 2
+#define AUD_DSP_WR_AVC_AVC_MAX_GAIN__M 0x3
+#define AUD_DSP_WR_AVC_AVC_MAX_GAIN__PRE 0x0
+#define AUD_DSP_WR_AVC_AVC_MAX_GAIN_6DB 0x0
+#define AUD_DSP_WR_AVC_AVC_MAX_GAIN_12DB 0x1
+#define AUD_DSP_WR_AVC_AVC_MAX_GAIN_0DB 0x3
+
+#define AUD_DSP_WR_QPEAK__A 0x105000C
+#define AUD_DSP_WR_QPEAK__W 16
+#define AUD_DSP_WR_QPEAK__M 0xFFFF
+#define AUD_DSP_WR_QPEAK__PRE 0x0
+
+#define AUD_DSP_WR_QPEAK_SRC_QP__B 8
+#define AUD_DSP_WR_QPEAK_SRC_QP__W 8
+#define AUD_DSP_WR_QPEAK_SRC_QP__M 0xFF00
+#define AUD_DSP_WR_QPEAK_SRC_QP__PRE 0x0
+#define AUD_DSP_WR_QPEAK_SRC_QP_MONO 0x0
+#define AUD_DSP_WR_QPEAK_SRC_QP_STEREO_AB 0x100
+#define AUD_DSP_WR_QPEAK_SRC_QP_STEREO_A 0x300
+#define AUD_DSP_WR_QPEAK_SRC_QP_STEREO_B 0x400
+
+#define AUD_DSP_WR_QPEAK_MAT_QP__B 0
+#define AUD_DSP_WR_QPEAK_MAT_QP__W 8
+#define AUD_DSP_WR_QPEAK_MAT_QP__M 0xFF
+#define AUD_DSP_WR_QPEAK_MAT_QP__PRE 0x0
+#define AUD_DSP_WR_QPEAK_MAT_QP_SOUND_A 0x0
+#define AUD_DSP_WR_QPEAK_MAT_QP_SOUND_B 0x10
+#define AUD_DSP_WR_QPEAK_MAT_QP_STEREO 0x20
+#define AUD_DSP_WR_QPEAK_MAT_QP_MONO 0x30
+
+#define AUD_DSP_RD_QPEAK_L__A 0x1040019
+#define AUD_DSP_RD_QPEAK_L__W 16
+#define AUD_DSP_RD_QPEAK_L__M 0xFFFF
+#define AUD_DSP_RD_QPEAK_L__PRE 0x0
+
+#define AUD_DSP_RD_QPEAK_R__A 0x104001A
+#define AUD_DSP_RD_QPEAK_R__W 16
+#define AUD_DSP_RD_QPEAK_R__M 0xFFFF
+#define AUD_DSP_RD_QPEAK_R__PRE 0x0
+
+#define AUD_DSP_WR_BEEPER__A 0x1050014
+#define AUD_DSP_WR_BEEPER__W 16
+#define AUD_DSP_WR_BEEPER__M 0xFFFF
+#define AUD_DSP_WR_BEEPER__PRE 0x0
+
+#define AUD_DSP_WR_BEEPER_BEEP_VOLUME__B 8
+#define AUD_DSP_WR_BEEPER_BEEP_VOLUME__W 7
+#define AUD_DSP_WR_BEEPER_BEEP_VOLUME__M 0x7F00
+#define AUD_DSP_WR_BEEPER_BEEP_VOLUME__PRE 0x0
+
+#define AUD_DSP_WR_BEEPER_BEEP_FREQUENCY__B 0
+#define AUD_DSP_WR_BEEPER_BEEP_FREQUENCY__W 7
+#define AUD_DSP_WR_BEEPER_BEEP_FREQUENCY__M 0x7F
+#define AUD_DSP_WR_BEEPER_BEEP_FREQUENCY__PRE 0x0
+
+#define AUD_DEM_WR_I2S_CONFIG2__A 0x1030050
+#define AUD_DEM_WR_I2S_CONFIG2__W 16
+#define AUD_DEM_WR_I2S_CONFIG2__M 0xFFFF
+#define AUD_DEM_WR_I2S_CONFIG2__PRE 0x0
+
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_CL_POL__B 6
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_CL_POL__W 1
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_CL_POL__M 0x40
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_CL_POL__PRE 0x0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_CL_POL_NORMAL 0x0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_CL_POL_INVERTED 0x40
+
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_ENABLE__B 4
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_ENABLE__W 1
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_ENABLE__M 0x10
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_ENABLE__PRE 0x0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_ENABLE_DISABLE 0x0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_ENABLE_ENABLE 0x10
+
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_SLV_MST__B 3
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_SLV_MST__W 1
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_SLV_MST__M 0x8
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_SLV_MST__PRE 0x0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_SLV_MST_MASTER 0x0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_SLV_MST_SLAVE 0x8
+
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WS_POL__B 2
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WS_POL__W 1
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WS_POL__M 0x4
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WS_POL__PRE 0x0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WS_POL_LEFT_LOW 0x0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WS_POL_LEFT_HIGH 0x4
+
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WS_MODE__B 1
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WS_MODE__W 1
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WS_MODE__M 0x2
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WS_MODE__PRE 0x0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WS_MODE_NO_DELAY 0x0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WS_MODE_DELAY 0x2
+
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WORD_LEN__B 0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WORD_LEN__W 1
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WORD_LEN__M 0x1
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WORD_LEN__PRE 0x0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WORD_LEN_BIT_32 0x0
+#define AUD_DEM_WR_I2S_CONFIG2_I2S_WORD_LEN_BIT_16 0x1
+
+#define AUD_DSP_WR_I2S_OUT_FS__A 0x105002A
+#define AUD_DSP_WR_I2S_OUT_FS__W 16
+#define AUD_DSP_WR_I2S_OUT_FS__M 0xFFFF
+#define AUD_DSP_WR_I2S_OUT_FS__PRE 0x0
+
+#define AUD_DSP_WR_I2S_OUT_FS_FS_OUT__B 0
+#define AUD_DSP_WR_I2S_OUT_FS_FS_OUT__W 16
+#define AUD_DSP_WR_I2S_OUT_FS_FS_OUT__M 0xFFFF
+#define AUD_DSP_WR_I2S_OUT_FS_FS_OUT__PRE 0x0
+
+#define AUD_DSP_WR_AV_SYNC__A 0x105002B
+#define AUD_DSP_WR_AV_SYNC__W 16
+#define AUD_DSP_WR_AV_SYNC__M 0xFFFF
+#define AUD_DSP_WR_AV_SYNC__PRE 0x0
+
+#define AUD_DSP_WR_AV_SYNC_AV_ON__B 15
+#define AUD_DSP_WR_AV_SYNC_AV_ON__W 1
+#define AUD_DSP_WR_AV_SYNC_AV_ON__M 0x8000
+#define AUD_DSP_WR_AV_SYNC_AV_ON__PRE 0x0
+#define AUD_DSP_WR_AV_SYNC_AV_ON_DISABLE 0x0
+#define AUD_DSP_WR_AV_SYNC_AV_ON_ENABLE 0x8000
+
+#define AUD_DSP_WR_AV_SYNC_AV_AUTO_FREQ__B 14
+#define AUD_DSP_WR_AV_SYNC_AV_AUTO_FREQ__W 1
+#define AUD_DSP_WR_AV_SYNC_AV_AUTO_FREQ__M 0x4000
+#define AUD_DSP_WR_AV_SYNC_AV_AUTO_FREQ__PRE 0x0
+#define AUD_DSP_WR_AV_SYNC_AV_AUTO_FREQ_MONOCHROME 0x0
+#define AUD_DSP_WR_AV_SYNC_AV_AUTO_FREQ_NTSC 0x4000
+
+#define AUD_DSP_WR_AV_SYNC_AV_STD_SEL__B 0
+#define AUD_DSP_WR_AV_SYNC_AV_STD_SEL__W 2
+#define AUD_DSP_WR_AV_SYNC_AV_STD_SEL__M 0x3
+#define AUD_DSP_WR_AV_SYNC_AV_STD_SEL__PRE 0x0
+#define AUD_DSP_WR_AV_SYNC_AV_STD_SEL_AUTO 0x0
+#define AUD_DSP_WR_AV_SYNC_AV_STD_SEL_PAL_SECAM 0x1
+#define AUD_DSP_WR_AV_SYNC_AV_STD_SEL_NTSC 0x2
+#define AUD_DSP_WR_AV_SYNC_AV_STD_SEL_MONOCHROME 0x3
+
+#define AUD_DSP_RD_STATUS2__A 0x104007B
+#define AUD_DSP_RD_STATUS2__W 16
+#define AUD_DSP_RD_STATUS2__M 0xFFFF
+#define AUD_DSP_RD_STATUS2__PRE 0x0
+
+#define AUD_DSP_RD_STATUS2_AV_ACTIVE__B 15
+#define AUD_DSP_RD_STATUS2_AV_ACTIVE__W 1
+#define AUD_DSP_RD_STATUS2_AV_ACTIVE__M 0x8000
+#define AUD_DSP_RD_STATUS2_AV_ACTIVE__PRE 0x0
+#define AUD_DSP_RD_STATUS2_AV_ACTIVE_NO_SYNC 0x0
+#define AUD_DSP_RD_STATUS2_AV_ACTIVE_SYNC_ACTIVE 0x8000
+
+#define AUD_DSP_RD_XDFP_FW__A 0x104001D
+#define AUD_DSP_RD_XDFP_FW__W 16
+#define AUD_DSP_RD_XDFP_FW__M 0xFFFF
+#define AUD_DSP_RD_XDFP_FW__PRE 0x344
+
+#define AUD_DSP_RD_XDFP_FW_DSP_FW_REV__B 0
+#define AUD_DSP_RD_XDFP_FW_DSP_FW_REV__W 16
+#define AUD_DSP_RD_XDFP_FW_DSP_FW_REV__M 0xFFFF
+#define AUD_DSP_RD_XDFP_FW_DSP_FW_REV__PRE 0x344
+
+#define AUD_DSP_RD_XFP_FW__A 0x10404B8
+#define AUD_DSP_RD_XFP_FW__W 16
+#define AUD_DSP_RD_XFP_FW__M 0xFFFF
+#define AUD_DSP_RD_XFP_FW__PRE 0x42
+
+#define AUD_DSP_RD_XFP_FW_FP_FW_REV__B 0
+#define AUD_DSP_RD_XFP_FW_FP_FW_REV__W 16
+#define AUD_DSP_RD_XFP_FW_FP_FW_REV__M 0xFFFF
+#define AUD_DSP_RD_XFP_FW_FP_FW_REV__PRE 0x42
+
+#define AUD_DEM_WR_DCO_B_HI__A 0x103009B
+#define AUD_DEM_WR_DCO_B_HI__W 16
+#define AUD_DEM_WR_DCO_B_HI__M 0xFFFF
+#define AUD_DEM_WR_DCO_B_HI__PRE 0x0
+
+#define AUD_DEM_WR_DCO_B_LO__A 0x1030093
+#define AUD_DEM_WR_DCO_B_LO__W 16
+#define AUD_DEM_WR_DCO_B_LO__M 0xFFFF
+#define AUD_DEM_WR_DCO_B_LO__PRE 0x0
+
+#define AUD_DEM_WR_DCO_A_HI__A 0x10300AB
+#define AUD_DEM_WR_DCO_A_HI__W 16
+#define AUD_DEM_WR_DCO_A_HI__M 0xFFFF
+#define AUD_DEM_WR_DCO_A_HI__PRE 0x0
+
+#define AUD_DEM_WR_DCO_A_LO__A 0x10300A3
+#define AUD_DEM_WR_DCO_A_LO__W 16
+#define AUD_DEM_WR_DCO_A_LO__M 0xFFFF
+#define AUD_DEM_WR_DCO_A_LO__PRE 0x0
+#define AUD_DEM_WR_NICAM_THRSHLD__A 0x1030021
+#define AUD_DEM_WR_NICAM_THRSHLD__W 16
+#define AUD_DEM_WR_NICAM_THRSHLD__M 0xFFFF
+#define AUD_DEM_WR_NICAM_THRSHLD__PRE 0x2BC
+
+#define AUD_DEM_WR_NICAM_THRSHLD_NICAM_THLD__B 0
+#define AUD_DEM_WR_NICAM_THRSHLD_NICAM_THLD__W 12
+#define AUD_DEM_WR_NICAM_THRSHLD_NICAM_THLD__M 0xFFF
+#define AUD_DEM_WR_NICAM_THRSHLD_NICAM_THLD__PRE 0x2BC
+
+#define AUD_DEM_WR_A2_THRSHLD__A 0x1030022
+#define AUD_DEM_WR_A2_THRSHLD__W 16
+#define AUD_DEM_WR_A2_THRSHLD__M 0xFFFF
+#define AUD_DEM_WR_A2_THRSHLD__PRE 0x190
+
+#define AUD_DEM_WR_A2_THRSHLD_A2_THLD__B 0
+#define AUD_DEM_WR_A2_THRSHLD_A2_THLD__W 12
+#define AUD_DEM_WR_A2_THRSHLD_A2_THLD__M 0xFFF
+#define AUD_DEM_WR_A2_THRSHLD_A2_THLD__PRE 0x190
+
+#define AUD_DEM_WR_BTSC_THRSHLD__A 0x1030023
+#define AUD_DEM_WR_BTSC_THRSHLD__W 16
+#define AUD_DEM_WR_BTSC_THRSHLD__M 0xFFFF
+#define AUD_DEM_WR_BTSC_THRSHLD__PRE 0xC
+
+#define AUD_DEM_WR_BTSC_THRSHLD_BTSC_THLD__B 0
+#define AUD_DEM_WR_BTSC_THRSHLD_BTSC_THLD__W 12
+#define AUD_DEM_WR_BTSC_THRSHLD_BTSC_THLD__M 0xFFF
+#define AUD_DEM_WR_BTSC_THRSHLD_BTSC_THLD__PRE 0xC
+
+#define AUD_DEM_WR_CM_A_THRSHLD__A 0x1030024
+#define AUD_DEM_WR_CM_A_THRSHLD__W 16
+#define AUD_DEM_WR_CM_A_THRSHLD__M 0xFFFF
+#define AUD_DEM_WR_CM_A_THRSHLD__PRE 0x2A
+
+#define AUD_DEM_WR_CM_A_THRSHLD_CM_A_THLD__B 0
+#define AUD_DEM_WR_CM_A_THRSHLD_CM_A_THLD__W 12
+#define AUD_DEM_WR_CM_A_THRSHLD_CM_A_THLD__M 0xFFF
+#define AUD_DEM_WR_CM_A_THRSHLD_CM_A_THLD__PRE 0x2A
+
+#define AUD_DEM_WR_CM_B_THRSHLD__A 0x1030025
+#define AUD_DEM_WR_CM_B_THRSHLD__W 16
+#define AUD_DEM_WR_CM_B_THRSHLD__M 0xFFFF
+#define AUD_DEM_WR_CM_B_THRSHLD__PRE 0x2A
+
+#define AUD_DEM_WR_CM_B_THRSHLD_CM_B_THLD__B 0
+#define AUD_DEM_WR_CM_B_THRSHLD_CM_B_THLD__W 12
+#define AUD_DEM_WR_CM_B_THRSHLD_CM_B_THLD__M 0xFFF
+#define AUD_DEM_WR_CM_B_THRSHLD_CM_B_THLD__PRE 0x2A
+
+#define AUD_DEM_RD_NIC_C_AD_BITS__A 0x1020023
+#define AUD_DEM_RD_NIC_C_AD_BITS__W 16
+#define AUD_DEM_RD_NIC_C_AD_BITS__M 0xFFFF
+#define AUD_DEM_RD_NIC_C_AD_BITS__PRE 0x0
+
+#define AUD_DEM_RD_NIC_C_AD_BITS_NICAM_SYNC__B 0
+#define AUD_DEM_RD_NIC_C_AD_BITS_NICAM_SYNC__W 1
+#define AUD_DEM_RD_NIC_C_AD_BITS_NICAM_SYNC__M 0x1
+#define AUD_DEM_RD_NIC_C_AD_BITS_NICAM_SYNC__PRE 0x0
+#define AUD_DEM_RD_NIC_C_AD_BITS_NICAM_SYNC_NOT_SYNCED 0x0
+#define AUD_DEM_RD_NIC_C_AD_BITS_NICAM_SYNC_SYNCED 0x1
+
+#define AUD_DEM_RD_NIC_C_AD_BITS_C__B 1
+#define AUD_DEM_RD_NIC_C_AD_BITS_C__W 4
+#define AUD_DEM_RD_NIC_C_AD_BITS_C__M 0x1E
+#define AUD_DEM_RD_NIC_C_AD_BITS_C__PRE 0x0
+
+#define AUD_DEM_RD_NIC_C_AD_BITS_ADD_BIT_LO__B 5
+#define AUD_DEM_RD_NIC_C_AD_BITS_ADD_BIT_LO__W 3
+#define AUD_DEM_RD_NIC_C_AD_BITS_ADD_BIT_LO__M 0xE0
+#define AUD_DEM_RD_NIC_C_AD_BITS_ADD_BIT_LO__PRE 0x0
+
+#define AUD_DEM_RD_NIC_ADD_BITS_HI__A 0x1020038
+#define AUD_DEM_RD_NIC_ADD_BITS_HI__W 16
+#define AUD_DEM_RD_NIC_ADD_BITS_HI__M 0xFFFF
+#define AUD_DEM_RD_NIC_ADD_BITS_HI__PRE 0x0
+
+#define AUD_DEM_RD_NIC_ADD_BITS_HI_ADD_BIT_HI__B 0
+#define AUD_DEM_RD_NIC_ADD_BITS_HI_ADD_BIT_HI__W 8
+#define AUD_DEM_RD_NIC_ADD_BITS_HI_ADD_BIT_HI__M 0xFF
+#define AUD_DEM_RD_NIC_ADD_BITS_HI_ADD_BIT_HI__PRE 0x0
+
+#define AUD_DEM_RD_NIC_CIB__A 0x1020038
+#define AUD_DEM_RD_NIC_CIB__W 16
+#define AUD_DEM_RD_NIC_CIB__M 0xFFFF
+#define AUD_DEM_RD_NIC_CIB__PRE 0x0
+
+#define AUD_DEM_RD_NIC_CIB_CIB2__B 0
+#define AUD_DEM_RD_NIC_CIB_CIB2__W 1
+#define AUD_DEM_RD_NIC_CIB_CIB2__M 0x1
+#define AUD_DEM_RD_NIC_CIB_CIB2__PRE 0x0
+
+#define AUD_DEM_RD_NIC_CIB_CIB1__B 1
+#define AUD_DEM_RD_NIC_CIB_CIB1__W 1
+#define AUD_DEM_RD_NIC_CIB_CIB1__M 0x2
+#define AUD_DEM_RD_NIC_CIB_CIB1__PRE 0x0
+
+#define AUD_DEM_RD_NIC_ERROR_RATE__A 0x1020057
+#define AUD_DEM_RD_NIC_ERROR_RATE__W 16
+#define AUD_DEM_RD_NIC_ERROR_RATE__M 0xFFFF
+#define AUD_DEM_RD_NIC_ERROR_RATE__PRE 0x0
+
+#define AUD_DEM_RD_NIC_ERROR_RATE_ERROR_RATE__B 0
+#define AUD_DEM_RD_NIC_ERROR_RATE_ERROR_RATE__W 12
+#define AUD_DEM_RD_NIC_ERROR_RATE_ERROR_RATE__M 0xFFF
+#define AUD_DEM_RD_NIC_ERROR_RATE_ERROR_RATE__PRE 0x0
+
+#define AUD_DEM_WR_FM_DEEMPH__A 0x103000F
+#define AUD_DEM_WR_FM_DEEMPH__W 16
+#define AUD_DEM_WR_FM_DEEMPH__M 0xFFFF
+#define AUD_DEM_WR_FM_DEEMPH__PRE 0x0
+#define AUD_DEM_WR_FM_DEEMPH_50US 0x0
+#define AUD_DEM_WR_FM_DEEMPH_75US 0x1
+#define AUD_DEM_WR_FM_DEEMPH_OFF 0x3F
+
+#define AUD_DEM_WR_FM_MATRIX__A 0x103006F
+#define AUD_DEM_WR_FM_MATRIX__W 16
+#define AUD_DEM_WR_FM_MATRIX__M 0xFFFF
+#define AUD_DEM_WR_FM_MATRIX__PRE 0x0
+#define AUD_DEM_WR_FM_MATRIX_NO_MATRIX 0x0
+#define AUD_DEM_WR_FM_MATRIX_GERMAN_MATRIX 0x1
+#define AUD_DEM_WR_FM_MATRIX_KOREAN_MATRIX 0x2
+#define AUD_DEM_WR_FM_MATRIX_SOUND_A 0x3
+#define AUD_DEM_WR_FM_MATRIX_SOUND_B 0x4
+
+#define AUD_DSP_RD_FM_IDENT_VALUE__A 0x1040018
+#define AUD_DSP_RD_FM_IDENT_VALUE__W 16
+#define AUD_DSP_RD_FM_IDENT_VALUE__M 0xFFFF
+#define AUD_DSP_RD_FM_IDENT_VALUE__PRE 0x0
+
+#define AUD_DSP_RD_FM_IDENT_VALUE_FM_IDENT__B 8
+#define AUD_DSP_RD_FM_IDENT_VALUE_FM_IDENT__W 8
+#define AUD_DSP_RD_FM_IDENT_VALUE_FM_IDENT__M 0xFF00
+#define AUD_DSP_RD_FM_IDENT_VALUE_FM_IDENT__PRE 0x0
+
+#define AUD_DSP_RD_FM_DC_LEVEL_A__A 0x104001B
+#define AUD_DSP_RD_FM_DC_LEVEL_A__W 16
+#define AUD_DSP_RD_FM_DC_LEVEL_A__M 0xFFFF
+#define AUD_DSP_RD_FM_DC_LEVEL_A__PRE 0x0
+
+#define AUD_DSP_RD_FM_DC_LEVEL_A_FM_DC_LEV_A__B 0
+#define AUD_DSP_RD_FM_DC_LEVEL_A_FM_DC_LEV_A__W 16
+#define AUD_DSP_RD_FM_DC_LEVEL_A_FM_DC_LEV_A__M 0xFFFF
+#define AUD_DSP_RD_FM_DC_LEVEL_A_FM_DC_LEV_A__PRE 0x0
+
+#define AUD_DSP_RD_FM_DC_LEVEL_B__A 0x104001C
+#define AUD_DSP_RD_FM_DC_LEVEL_B__W 16
+#define AUD_DSP_RD_FM_DC_LEVEL_B__M 0xFFFF
+#define AUD_DSP_RD_FM_DC_LEVEL_B__PRE 0x0
+
+#define AUD_DSP_RD_FM_DC_LEVEL_B_FM_DC_LEV_B__B 0
+#define AUD_DSP_RD_FM_DC_LEVEL_B_FM_DC_LEV_B__W 16
+#define AUD_DSP_RD_FM_DC_LEVEL_B_FM_DC_LEV_B__M 0xFFFF
+#define AUD_DSP_RD_FM_DC_LEVEL_B_FM_DC_LEV_B__PRE 0x0
+
+#define AUD_DEM_WR_FM_DC_NOTCH_SW__A 0x1030017
+#define AUD_DEM_WR_FM_DC_NOTCH_SW__W 16
+#define AUD_DEM_WR_FM_DC_NOTCH_SW__M 0xFFFF
+#define AUD_DEM_WR_FM_DC_NOTCH_SW__PRE 0x0
+
+#define AUD_DEM_WR_FM_DC_NOTCH_SW_FM_DC_NO_SW__B 0
+#define AUD_DEM_WR_FM_DC_NOTCH_SW_FM_DC_NO_SW__W 16
+#define AUD_DEM_WR_FM_DC_NOTCH_SW_FM_DC_NO_SW__M 0xFFFF
+#define AUD_DEM_WR_FM_DC_NOTCH_SW_FM_DC_NO_SW__PRE 0x0
+#define AUD_DEM_WR_FM_DC_NOTCH_SW_FM_DC_NO_SW_ON 0x0
+#define AUD_DEM_WR_FM_DC_NOTCH_SW_FM_DC_NO_SW_OFF 0x3F
+
+#define AUD_DSP_WR_SYNC_OUT__A 0x1050026
+#define AUD_DSP_WR_SYNC_OUT__W 16
+#define AUD_DSP_WR_SYNC_OUT__M 0xFFFF
+#define AUD_DSP_WR_SYNC_OUT__PRE 0x0
+#define AUD_DSP_WR_SYNC_OUT_OFF 0x0
+#define AUD_DSP_WR_SYNC_OUT_SYNCHRONOUS 0x1
+
+#define AUD_XFP_DRAM_1K__A 0x1060000
+#define AUD_XFP_DRAM_1K__W 16
+#define AUD_XFP_DRAM_1K__M 0xFFFF
+#define AUD_XFP_DRAM_1K__PRE 0x0
+#define AUD_XFP_DRAM_1K_D__B 0
+#define AUD_XFP_DRAM_1K_D__W 16
+#define AUD_XFP_DRAM_1K_D__M 0xFFFF
+#define AUD_XFP_DRAM_1K_D__PRE 0x0
+
+#define AUD_XFP_PRAM_4K__A 0x1070000
+#define AUD_XFP_PRAM_4K__W 16
+#define AUD_XFP_PRAM_4K__M 0xFFFF
+#define AUD_XFP_PRAM_4K__PRE 0x0
+#define AUD_XFP_PRAM_4K_D__B 0
+#define AUD_XFP_PRAM_4K_D__W 16
+#define AUD_XFP_PRAM_4K_D__M 0xFFFF
+#define AUD_XFP_PRAM_4K_D__PRE 0x0
+
+#define AUD_XDFP_DRAM_1K__A 0x1080000
+#define AUD_XDFP_DRAM_1K__W 16
+#define AUD_XDFP_DRAM_1K__M 0xFFFF
+#define AUD_XDFP_DRAM_1K__PRE 0x0
+#define AUD_XDFP_DRAM_1K_D__B 0
+#define AUD_XDFP_DRAM_1K_D__W 16
+#define AUD_XDFP_DRAM_1K_D__M 0xFFFF
+#define AUD_XDFP_DRAM_1K_D__PRE 0x0
+
+#define AUD_XDFP_PRAM_4K__A 0x1090000
+#define AUD_XDFP_PRAM_4K__W 16
+#define AUD_XDFP_PRAM_4K__M 0xFFFF
+#define AUD_XDFP_PRAM_4K__PRE 0x0
+#define AUD_XDFP_PRAM_4K_D__B 0
+#define AUD_XDFP_PRAM_4K_D__W 16
+#define AUD_XDFP_PRAM_4K_D__M 0xFFFF
+#define AUD_XDFP_PRAM_4K_D__PRE 0x0
+
+#define FEC_COMM_EXEC__A 0x2400000
+#define FEC_COMM_EXEC__W 2
+#define FEC_COMM_EXEC__M 0x3
+#define FEC_COMM_EXEC__PRE 0x0
+#define FEC_COMM_EXEC_STOP 0x0
+#define FEC_COMM_EXEC_ACTIVE 0x1
+#define FEC_COMM_EXEC_HOLD 0x2
+
+#define FEC_COMM_MB__A 0x2400002
+#define FEC_COMM_MB__W 16
+#define FEC_COMM_MB__M 0xFFFF
+#define FEC_COMM_MB__PRE 0x0
+#define FEC_COMM_INT_REQ__A 0x2400003
+#define FEC_COMM_INT_REQ__W 16
+#define FEC_COMM_INT_REQ__M 0xFFFF
+#define FEC_COMM_INT_REQ__PRE 0x0
+#define FEC_COMM_INT_REQ_OC_REQ__B 0
+#define FEC_COMM_INT_REQ_OC_REQ__W 1
+#define FEC_COMM_INT_REQ_OC_REQ__M 0x1
+#define FEC_COMM_INT_REQ_OC_REQ__PRE 0x0
+#define FEC_COMM_INT_REQ_RS_REQ__B 1
+#define FEC_COMM_INT_REQ_RS_REQ__W 1
+#define FEC_COMM_INT_REQ_RS_REQ__M 0x2
+#define FEC_COMM_INT_REQ_RS_REQ__PRE 0x0
+#define FEC_COMM_INT_REQ_DI_REQ__B 2
+#define FEC_COMM_INT_REQ_DI_REQ__W 1
+#define FEC_COMM_INT_REQ_DI_REQ__M 0x4
+#define FEC_COMM_INT_REQ_DI_REQ__PRE 0x0
+
+#define FEC_COMM_INT_STA__A 0x2400005
+#define FEC_COMM_INT_STA__W 16
+#define FEC_COMM_INT_STA__M 0xFFFF
+#define FEC_COMM_INT_STA__PRE 0x0
+#define FEC_COMM_INT_MSK__A 0x2400006
+#define FEC_COMM_INT_MSK__W 16
+#define FEC_COMM_INT_MSK__M 0xFFFF
+#define FEC_COMM_INT_MSK__PRE 0x0
+#define FEC_COMM_INT_STM__A 0x2400007
+#define FEC_COMM_INT_STM__W 16
+#define FEC_COMM_INT_STM__M 0xFFFF
+#define FEC_COMM_INT_STM__PRE 0x0
+
+#define FEC_TOP_COMM_EXEC__A 0x2410000
+#define FEC_TOP_COMM_EXEC__W 2
+#define FEC_TOP_COMM_EXEC__M 0x3
+#define FEC_TOP_COMM_EXEC__PRE 0x0
+#define FEC_TOP_COMM_EXEC_STOP 0x0
+#define FEC_TOP_COMM_EXEC_ACTIVE 0x1
+#define FEC_TOP_COMM_EXEC_HOLD 0x2
+
+#define FEC_TOP_ANNEX__A 0x2410010
+#define FEC_TOP_ANNEX__W 2
+#define FEC_TOP_ANNEX__M 0x3
+#define FEC_TOP_ANNEX__PRE 0x0
+#define FEC_TOP_ANNEX_A 0x0
+#define FEC_TOP_ANNEX_B 0x1
+#define FEC_TOP_ANNEX_C 0x2
+#define FEC_TOP_ANNEX_D 0x3
+
+#define FEC_DI_COMM_EXEC__A 0x2420000
+#define FEC_DI_COMM_EXEC__W 2
+#define FEC_DI_COMM_EXEC__M 0x3
+#define FEC_DI_COMM_EXEC__PRE 0x0
+#define FEC_DI_COMM_EXEC_STOP 0x0
+#define FEC_DI_COMM_EXEC_ACTIVE 0x1
+#define FEC_DI_COMM_EXEC_HOLD 0x2
+
+#define FEC_DI_COMM_MB__A 0x2420002
+#define FEC_DI_COMM_MB__W 2
+#define FEC_DI_COMM_MB__M 0x3
+#define FEC_DI_COMM_MB__PRE 0x0
+#define FEC_DI_COMM_MB_CTL__B 0
+#define FEC_DI_COMM_MB_CTL__W 1
+#define FEC_DI_COMM_MB_CTL__M 0x1
+#define FEC_DI_COMM_MB_CTL__PRE 0x0
+#define FEC_DI_COMM_MB_CTL_OFF 0x0
+#define FEC_DI_COMM_MB_CTL_ON 0x1
+#define FEC_DI_COMM_MB_OBS__B 1
+#define FEC_DI_COMM_MB_OBS__W 1
+#define FEC_DI_COMM_MB_OBS__M 0x2
+#define FEC_DI_COMM_MB_OBS__PRE 0x0
+#define FEC_DI_COMM_MB_OBS_OFF 0x0
+#define FEC_DI_COMM_MB_OBS_ON 0x2
+
+#define FEC_DI_COMM_INT_REQ__A 0x2420003
+#define FEC_DI_COMM_INT_REQ__W 1
+#define FEC_DI_COMM_INT_REQ__M 0x1
+#define FEC_DI_COMM_INT_REQ__PRE 0x0
+#define FEC_DI_COMM_INT_STA__A 0x2420005
+#define FEC_DI_COMM_INT_STA__W 2
+#define FEC_DI_COMM_INT_STA__M 0x3
+#define FEC_DI_COMM_INT_STA__PRE 0x0
+
+#define FEC_DI_COMM_INT_STA_STAT_INT__B 0
+#define FEC_DI_COMM_INT_STA_STAT_INT__W 1
+#define FEC_DI_COMM_INT_STA_STAT_INT__M 0x1
+#define FEC_DI_COMM_INT_STA_STAT_INT__PRE 0x0
+
+#define FEC_DI_COMM_INT_STA_TIMEOUT_INT__B 1
+#define FEC_DI_COMM_INT_STA_TIMEOUT_INT__W 1
+#define FEC_DI_COMM_INT_STA_TIMEOUT_INT__M 0x2
+#define FEC_DI_COMM_INT_STA_TIMEOUT_INT__PRE 0x0
+
+#define FEC_DI_COMM_INT_MSK__A 0x2420006
+#define FEC_DI_COMM_INT_MSK__W 2
+#define FEC_DI_COMM_INT_MSK__M 0x3
+#define FEC_DI_COMM_INT_MSK__PRE 0x0
+#define FEC_DI_COMM_INT_MSK_STAT_INT__B 0
+#define FEC_DI_COMM_INT_MSK_STAT_INT__W 1
+#define FEC_DI_COMM_INT_MSK_STAT_INT__M 0x1
+#define FEC_DI_COMM_INT_MSK_STAT_INT__PRE 0x0
+#define FEC_DI_COMM_INT_MSK_TIMEOUT_INT__B 1
+#define FEC_DI_COMM_INT_MSK_TIMEOUT_INT__W 1
+#define FEC_DI_COMM_INT_MSK_TIMEOUT_INT__M 0x2
+#define FEC_DI_COMM_INT_MSK_TIMEOUT_INT__PRE 0x0
+
+#define FEC_DI_COMM_INT_STM__A 0x2420007
+#define FEC_DI_COMM_INT_STM__W 2
+#define FEC_DI_COMM_INT_STM__M 0x3
+#define FEC_DI_COMM_INT_STM__PRE 0x0
+#define FEC_DI_COMM_INT_STM_STAT_INT__B 0
+#define FEC_DI_COMM_INT_STM_STAT_INT__W 1
+#define FEC_DI_COMM_INT_STM_STAT_INT__M 0x1
+#define FEC_DI_COMM_INT_STM_STAT_INT__PRE 0x0
+#define FEC_DI_COMM_INT_STM_TIMEOUT_INT__B 1
+#define FEC_DI_COMM_INT_STM_TIMEOUT_INT__W 1
+#define FEC_DI_COMM_INT_STM_TIMEOUT_INT__M 0x2
+#define FEC_DI_COMM_INT_STM_TIMEOUT_INT__PRE 0x0
+
+#define FEC_DI_STATUS__A 0x2420010
+#define FEC_DI_STATUS__W 1
+#define FEC_DI_STATUS__M 0x1
+#define FEC_DI_STATUS__PRE 0x0
+#define FEC_DI_MODE__A 0x2420011
+#define FEC_DI_MODE__W 3
+#define FEC_DI_MODE__M 0x7
+#define FEC_DI_MODE__PRE 0x0
+
+#define FEC_DI_MODE_NO_SYNC__B 0
+#define FEC_DI_MODE_NO_SYNC__W 1
+#define FEC_DI_MODE_NO_SYNC__M 0x1
+#define FEC_DI_MODE_NO_SYNC__PRE 0x0
+
+#define FEC_DI_MODE_IGNORE_LOST_SYNC__B 1
+#define FEC_DI_MODE_IGNORE_LOST_SYNC__W 1
+#define FEC_DI_MODE_IGNORE_LOST_SYNC__M 0x2
+#define FEC_DI_MODE_IGNORE_LOST_SYNC__PRE 0x0
+
+#define FEC_DI_MODE_IGNORE_TIMEOUT__B 2
+#define FEC_DI_MODE_IGNORE_TIMEOUT__W 1
+#define FEC_DI_MODE_IGNORE_TIMEOUT__M 0x4
+#define FEC_DI_MODE_IGNORE_TIMEOUT__PRE 0x0
+
+#define FEC_DI_CONTROL_WORD__A 0x2420012
+#define FEC_DI_CONTROL_WORD__W 4
+#define FEC_DI_CONTROL_WORD__M 0xF
+#define FEC_DI_CONTROL_WORD__PRE 0x0
+
+#define FEC_DI_RESTART__A 0x2420013
+#define FEC_DI_RESTART__W 1
+#define FEC_DI_RESTART__M 0x1
+#define FEC_DI_RESTART__PRE 0x0
+
+#define FEC_DI_TIMEOUT_LO__A 0x2420014
+#define FEC_DI_TIMEOUT_LO__W 16
+#define FEC_DI_TIMEOUT_LO__M 0xFFFF
+#define FEC_DI_TIMEOUT_LO__PRE 0x0
+
+#define FEC_DI_TIMEOUT_HI__A 0x2420015
+#define FEC_DI_TIMEOUT_HI__W 8
+#define FEC_DI_TIMEOUT_HI__M 0xFF
+#define FEC_DI_TIMEOUT_HI__PRE 0xA
+
+#define FEC_RS_COMM_EXEC__A 0x2430000
+#define FEC_RS_COMM_EXEC__W 2
+#define FEC_RS_COMM_EXEC__M 0x3
+#define FEC_RS_COMM_EXEC__PRE 0x0
+#define FEC_RS_COMM_EXEC_STOP 0x0
+#define FEC_RS_COMM_EXEC_ACTIVE 0x1
+#define FEC_RS_COMM_EXEC_HOLD 0x2
+
+#define FEC_RS_COMM_MB__A 0x2430002
+#define FEC_RS_COMM_MB__W 2
+#define FEC_RS_COMM_MB__M 0x3
+#define FEC_RS_COMM_MB__PRE 0x0
+#define FEC_RS_COMM_MB_CTL__B 0
+#define FEC_RS_COMM_MB_CTL__W 1
+#define FEC_RS_COMM_MB_CTL__M 0x1
+#define FEC_RS_COMM_MB_CTL__PRE 0x0
+#define FEC_RS_COMM_MB_CTL_OFF 0x0
+#define FEC_RS_COMM_MB_CTL_ON 0x1
+#define FEC_RS_COMM_MB_OBS__B 1
+#define FEC_RS_COMM_MB_OBS__W 1
+#define FEC_RS_COMM_MB_OBS__M 0x2
+#define FEC_RS_COMM_MB_OBS__PRE 0x0
+#define FEC_RS_COMM_MB_OBS_OFF 0x0
+#define FEC_RS_COMM_MB_OBS_ON 0x2
+
+#define FEC_RS_COMM_INT_REQ__A 0x2430003
+#define FEC_RS_COMM_INT_REQ__W 1
+#define FEC_RS_COMM_INT_REQ__M 0x1
+#define FEC_RS_COMM_INT_REQ__PRE 0x0
+#define FEC_RS_COMM_INT_STA__A 0x2430005
+#define FEC_RS_COMM_INT_STA__W 2
+#define FEC_RS_COMM_INT_STA__M 0x3
+#define FEC_RS_COMM_INT_STA__PRE 0x0
+
+#define FEC_RS_COMM_INT_STA_FAILURE_INT__B 0
+#define FEC_RS_COMM_INT_STA_FAILURE_INT__W 1
+#define FEC_RS_COMM_INT_STA_FAILURE_INT__M 0x1
+#define FEC_RS_COMM_INT_STA_FAILURE_INT__PRE 0x0
+
+#define FEC_RS_COMM_INT_STA_MEASUREMENT_INT__B 1
+#define FEC_RS_COMM_INT_STA_MEASUREMENT_INT__W 1
+#define FEC_RS_COMM_INT_STA_MEASUREMENT_INT__M 0x2
+#define FEC_RS_COMM_INT_STA_MEASUREMENT_INT__PRE 0x0
+
+#define FEC_RS_COMM_INT_MSK__A 0x2430006
+#define FEC_RS_COMM_INT_MSK__W 2
+#define FEC_RS_COMM_INT_MSK__M 0x3
+#define FEC_RS_COMM_INT_MSK__PRE 0x0
+#define FEC_RS_COMM_INT_MSK_FAILURE_MSK__B 0
+#define FEC_RS_COMM_INT_MSK_FAILURE_MSK__W 1
+#define FEC_RS_COMM_INT_MSK_FAILURE_MSK__M 0x1
+#define FEC_RS_COMM_INT_MSK_FAILURE_MSK__PRE 0x0
+#define FEC_RS_COMM_INT_MSK_MEASUREMENT_MSK__B 1
+#define FEC_RS_COMM_INT_MSK_MEASUREMENT_MSK__W 1
+#define FEC_RS_COMM_INT_MSK_MEASUREMENT_MSK__M 0x2
+#define FEC_RS_COMM_INT_MSK_MEASUREMENT_MSK__PRE 0x0
+
+#define FEC_RS_COMM_INT_STM__A 0x2430007
+#define FEC_RS_COMM_INT_STM__W 2
+#define FEC_RS_COMM_INT_STM__M 0x3
+#define FEC_RS_COMM_INT_STM__PRE 0x0
+#define FEC_RS_COMM_INT_STM_FAILURE_MSK__B 0
+#define FEC_RS_COMM_INT_STM_FAILURE_MSK__W 1
+#define FEC_RS_COMM_INT_STM_FAILURE_MSK__M 0x1
+#define FEC_RS_COMM_INT_STM_FAILURE_MSK__PRE 0x0
+#define FEC_RS_COMM_INT_STM_MEASUREMENT_MSK__B 1
+#define FEC_RS_COMM_INT_STM_MEASUREMENT_MSK__W 1
+#define FEC_RS_COMM_INT_STM_MEASUREMENT_MSK__M 0x2
+#define FEC_RS_COMM_INT_STM_MEASUREMENT_MSK__PRE 0x0
+
+#define FEC_RS_STATUS__A 0x2430010
+#define FEC_RS_STATUS__W 1
+#define FEC_RS_STATUS__M 0x1
+#define FEC_RS_STATUS__PRE 0x0
+#define FEC_RS_MODE__A 0x2430011
+#define FEC_RS_MODE__W 1
+#define FEC_RS_MODE__M 0x1
+#define FEC_RS_MODE__PRE 0x0
+
+#define FEC_RS_MODE_BYPASS__B 0
+#define FEC_RS_MODE_BYPASS__W 1
+#define FEC_RS_MODE_BYPASS__M 0x1
+#define FEC_RS_MODE_BYPASS__PRE 0x0
+
+#define FEC_RS_MEASUREMENT_PERIOD__A 0x2430012
+#define FEC_RS_MEASUREMENT_PERIOD__W 16
+#define FEC_RS_MEASUREMENT_PERIOD__M 0xFFFF
+#define FEC_RS_MEASUREMENT_PERIOD__PRE 0x1171
+
+#define FEC_RS_MEASUREMENT_PERIOD_PERIOD__B 0
+#define FEC_RS_MEASUREMENT_PERIOD_PERIOD__W 16
+#define FEC_RS_MEASUREMENT_PERIOD_PERIOD__M 0xFFFF
+#define FEC_RS_MEASUREMENT_PERIOD_PERIOD__PRE 0x1171
+
+#define FEC_RS_MEASUREMENT_PRESCALE__A 0x2430013
+#define FEC_RS_MEASUREMENT_PRESCALE__W 16
+#define FEC_RS_MEASUREMENT_PRESCALE__M 0xFFFF
+#define FEC_RS_MEASUREMENT_PRESCALE__PRE 0x1
+
+#define FEC_RS_MEASUREMENT_PRESCALE_PRESCALE__B 0
+#define FEC_RS_MEASUREMENT_PRESCALE_PRESCALE__W 16
+#define FEC_RS_MEASUREMENT_PRESCALE_PRESCALE__M 0xFFFF
+#define FEC_RS_MEASUREMENT_PRESCALE_PRESCALE__PRE 0x1
+
+#define FEC_RS_NR_BIT_ERRORS__A 0x2430014
+#define FEC_RS_NR_BIT_ERRORS__W 16
+#define FEC_RS_NR_BIT_ERRORS__M 0xFFFF
+#define FEC_RS_NR_BIT_ERRORS__PRE 0xFFFF
+
+#define FEC_RS_NR_BIT_ERRORS_FIXED_MANT__B 0
+#define FEC_RS_NR_BIT_ERRORS_FIXED_MANT__W 12
+#define FEC_RS_NR_BIT_ERRORS_FIXED_MANT__M 0xFFF
+#define FEC_RS_NR_BIT_ERRORS_FIXED_MANT__PRE 0xFFF
+
+#define FEC_RS_NR_BIT_ERRORS_EXP__B 12
+#define FEC_RS_NR_BIT_ERRORS_EXP__W 4
+#define FEC_RS_NR_BIT_ERRORS_EXP__M 0xF000
+#define FEC_RS_NR_BIT_ERRORS_EXP__PRE 0xF000
+
+#define FEC_RS_NR_SYMBOL_ERRORS__A 0x2430015
+#define FEC_RS_NR_SYMBOL_ERRORS__W 16
+#define FEC_RS_NR_SYMBOL_ERRORS__M 0xFFFF
+#define FEC_RS_NR_SYMBOL_ERRORS__PRE 0xFFFF
+
+#define FEC_RS_NR_SYMBOL_ERRORS_FIXED_MANT__B 0
+#define FEC_RS_NR_SYMBOL_ERRORS_FIXED_MANT__W 12
+#define FEC_RS_NR_SYMBOL_ERRORS_FIXED_MANT__M 0xFFF
+#define FEC_RS_NR_SYMBOL_ERRORS_FIXED_MANT__PRE 0xFFF
+
+#define FEC_RS_NR_SYMBOL_ERRORS_EXP__B 12
+#define FEC_RS_NR_SYMBOL_ERRORS_EXP__W 4
+#define FEC_RS_NR_SYMBOL_ERRORS_EXP__M 0xF000
+#define FEC_RS_NR_SYMBOL_ERRORS_EXP__PRE 0xF000
+
+#define FEC_RS_NR_PACKET_ERRORS__A 0x2430016
+#define FEC_RS_NR_PACKET_ERRORS__W 16
+#define FEC_RS_NR_PACKET_ERRORS__M 0xFFFF
+#define FEC_RS_NR_PACKET_ERRORS__PRE 0xFFFF
+
+#define FEC_RS_NR_PACKET_ERRORS_FIXED_MANT__B 0
+#define FEC_RS_NR_PACKET_ERRORS_FIXED_MANT__W 12
+#define FEC_RS_NR_PACKET_ERRORS_FIXED_MANT__M 0xFFF
+#define FEC_RS_NR_PACKET_ERRORS_FIXED_MANT__PRE 0xFFF
+
+#define FEC_RS_NR_PACKET_ERRORS_EXP__B 12
+#define FEC_RS_NR_PACKET_ERRORS_EXP__W 4
+#define FEC_RS_NR_PACKET_ERRORS_EXP__M 0xF000
+#define FEC_RS_NR_PACKET_ERRORS_EXP__PRE 0xF000
+
+#define FEC_RS_NR_FAILURES__A 0x2430017
+#define FEC_RS_NR_FAILURES__W 16
+#define FEC_RS_NR_FAILURES__M 0xFFFF
+#define FEC_RS_NR_FAILURES__PRE 0x0
+
+#define FEC_RS_NR_FAILURES_FIXED_MANT__B 0
+#define FEC_RS_NR_FAILURES_FIXED_MANT__W 12
+#define FEC_RS_NR_FAILURES_FIXED_MANT__M 0xFFF
+#define FEC_RS_NR_FAILURES_FIXED_MANT__PRE 0x0
+
+#define FEC_RS_NR_FAILURES_EXP__B 12
+#define FEC_RS_NR_FAILURES_EXP__W 4
+#define FEC_RS_NR_FAILURES_EXP__M 0xF000
+#define FEC_RS_NR_FAILURES_EXP__PRE 0x0
+
+#define FEC_OC_COMM_EXEC__A 0x2440000
+#define FEC_OC_COMM_EXEC__W 2
+#define FEC_OC_COMM_EXEC__M 0x3
+#define FEC_OC_COMM_EXEC__PRE 0x0
+#define FEC_OC_COMM_EXEC_STOP 0x0
+#define FEC_OC_COMM_EXEC_ACTIVE 0x1
+#define FEC_OC_COMM_EXEC_HOLD 0x2
+
+#define FEC_OC_COMM_MB__A 0x2440002
+#define FEC_OC_COMM_MB__W 2
+#define FEC_OC_COMM_MB__M 0x3
+#define FEC_OC_COMM_MB__PRE 0x0
+#define FEC_OC_COMM_MB_CTL__B 0
+#define FEC_OC_COMM_MB_CTL__W 1
+#define FEC_OC_COMM_MB_CTL__M 0x1
+#define FEC_OC_COMM_MB_CTL__PRE 0x0
+#define FEC_OC_COMM_MB_CTL_OFF 0x0
+#define FEC_OC_COMM_MB_CTL_ON 0x1
+#define FEC_OC_COMM_MB_OBS__B 1
+#define FEC_OC_COMM_MB_OBS__W 1
+#define FEC_OC_COMM_MB_OBS__M 0x2
+#define FEC_OC_COMM_MB_OBS__PRE 0x0
+#define FEC_OC_COMM_MB_OBS_OFF 0x0
+#define FEC_OC_COMM_MB_OBS_ON 0x2
+
+#define FEC_OC_COMM_INT_REQ__A 0x2440003
+#define FEC_OC_COMM_INT_REQ__W 1
+#define FEC_OC_COMM_INT_REQ__M 0x1
+#define FEC_OC_COMM_INT_REQ__PRE 0x0
+#define FEC_OC_COMM_INT_STA__A 0x2440005
+#define FEC_OC_COMM_INT_STA__W 8
+#define FEC_OC_COMM_INT_STA__M 0xFF
+#define FEC_OC_COMM_INT_STA__PRE 0x0
+
+#define FEC_OC_COMM_INT_STA_DPR_LOCK_INT__B 0
+#define FEC_OC_COMM_INT_STA_DPR_LOCK_INT__W 1
+#define FEC_OC_COMM_INT_STA_DPR_LOCK_INT__M 0x1
+#define FEC_OC_COMM_INT_STA_DPR_LOCK_INT__PRE 0x0
+
+#define FEC_OC_COMM_INT_STA_SNC_LOCK_INT__B 1
+#define FEC_OC_COMM_INT_STA_SNC_LOCK_INT__W 1
+#define FEC_OC_COMM_INT_STA_SNC_LOCK_INT__M 0x2
+#define FEC_OC_COMM_INT_STA_SNC_LOCK_INT__PRE 0x0
+
+#define FEC_OC_COMM_INT_STA_SNC_LOST_INT__B 2
+#define FEC_OC_COMM_INT_STA_SNC_LOST_INT__W 1
+#define FEC_OC_COMM_INT_STA_SNC_LOST_INT__M 0x4
+#define FEC_OC_COMM_INT_STA_SNC_LOST_INT__PRE 0x0
+
+#define FEC_OC_COMM_INT_STA_SNC_PAR_INT__B 3
+#define FEC_OC_COMM_INT_STA_SNC_PAR_INT__W 1
+#define FEC_OC_COMM_INT_STA_SNC_PAR_INT__M 0x8
+#define FEC_OC_COMM_INT_STA_SNC_PAR_INT__PRE 0x0
+
+#define FEC_OC_COMM_INT_STA_FIFO_FULL_INT__B 4
+#define FEC_OC_COMM_INT_STA_FIFO_FULL_INT__W 1
+#define FEC_OC_COMM_INT_STA_FIFO_FULL_INT__M 0x10
+#define FEC_OC_COMM_INT_STA_FIFO_FULL_INT__PRE 0x0
+
+#define FEC_OC_COMM_INT_STA_FIFO_EMPTY_INT__B 5
+#define FEC_OC_COMM_INT_STA_FIFO_EMPTY_INT__W 1
+#define FEC_OC_COMM_INT_STA_FIFO_EMPTY_INT__M 0x20
+#define FEC_OC_COMM_INT_STA_FIFO_EMPTY_INT__PRE 0x0
+
+#define FEC_OC_COMM_INT_STA_OCR_ACQ_INT__B 6
+#define FEC_OC_COMM_INT_STA_OCR_ACQ_INT__W 1
+#define FEC_OC_COMM_INT_STA_OCR_ACQ_INT__M 0x40
+#define FEC_OC_COMM_INT_STA_OCR_ACQ_INT__PRE 0x0
+
+#define FEC_OC_COMM_INT_STA_STAT_CHG_INT__B 7
+#define FEC_OC_COMM_INT_STA_STAT_CHG_INT__W 1
+#define FEC_OC_COMM_INT_STA_STAT_CHG_INT__M 0x80
+#define FEC_OC_COMM_INT_STA_STAT_CHG_INT__PRE 0x0
+
+#define FEC_OC_COMM_INT_MSK__A 0x2440006
+#define FEC_OC_COMM_INT_MSK__W 8
+#define FEC_OC_COMM_INT_MSK__M 0xFF
+#define FEC_OC_COMM_INT_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_MSK_DPR_LOCK_MSK__B 0
+#define FEC_OC_COMM_INT_MSK_DPR_LOCK_MSK__W 1
+#define FEC_OC_COMM_INT_MSK_DPR_LOCK_MSK__M 0x1
+#define FEC_OC_COMM_INT_MSK_DPR_LOCK_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_MSK_SNC_LOCK_MSK__B 1
+#define FEC_OC_COMM_INT_MSK_SNC_LOCK_MSK__W 1
+#define FEC_OC_COMM_INT_MSK_SNC_LOCK_MSK__M 0x2
+#define FEC_OC_COMM_INT_MSK_SNC_LOCK_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_MSK_SNC_LOST_MSK__B 2
+#define FEC_OC_COMM_INT_MSK_SNC_LOST_MSK__W 1
+#define FEC_OC_COMM_INT_MSK_SNC_LOST_MSK__M 0x4
+#define FEC_OC_COMM_INT_MSK_SNC_LOST_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_MSK_SNC_PAR_MSK__B 3
+#define FEC_OC_COMM_INT_MSK_SNC_PAR_MSK__W 1
+#define FEC_OC_COMM_INT_MSK_SNC_PAR_MSK__M 0x8
+#define FEC_OC_COMM_INT_MSK_SNC_PAR_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_MSK_FIFO_FULL_MSK__B 4
+#define FEC_OC_COMM_INT_MSK_FIFO_FULL_MSK__W 1
+#define FEC_OC_COMM_INT_MSK_FIFO_FULL_MSK__M 0x10
+#define FEC_OC_COMM_INT_MSK_FIFO_FULL_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_MSK_FIFO_EMPTY_MSK__B 5
+#define FEC_OC_COMM_INT_MSK_FIFO_EMPTY_MSK__W 1
+#define FEC_OC_COMM_INT_MSK_FIFO_EMPTY_MSK__M 0x20
+#define FEC_OC_COMM_INT_MSK_FIFO_EMPTY_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_MSK_OCR_ACQ_MSK__B 6
+#define FEC_OC_COMM_INT_MSK_OCR_ACQ_MSK__W 1
+#define FEC_OC_COMM_INT_MSK_OCR_ACQ_MSK__M 0x40
+#define FEC_OC_COMM_INT_MSK_OCR_ACQ_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_MSK_STAT_CHG_MSK__B 7
+#define FEC_OC_COMM_INT_MSK_STAT_CHG_MSK__W 1
+#define FEC_OC_COMM_INT_MSK_STAT_CHG_MSK__M 0x80
+#define FEC_OC_COMM_INT_MSK_STAT_CHG_MSK__PRE 0x0
+
+#define FEC_OC_COMM_INT_STM__A 0x2440007
+#define FEC_OC_COMM_INT_STM__W 8
+#define FEC_OC_COMM_INT_STM__M 0xFF
+#define FEC_OC_COMM_INT_STM__PRE 0x0
+#define FEC_OC_COMM_INT_STM_DPR_LOCK_MSK__B 0
+#define FEC_OC_COMM_INT_STM_DPR_LOCK_MSK__W 1
+#define FEC_OC_COMM_INT_STM_DPR_LOCK_MSK__M 0x1
+#define FEC_OC_COMM_INT_STM_DPR_LOCK_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_STM_SNC_LOCK_MSK__B 1
+#define FEC_OC_COMM_INT_STM_SNC_LOCK_MSK__W 1
+#define FEC_OC_COMM_INT_STM_SNC_LOCK_MSK__M 0x2
+#define FEC_OC_COMM_INT_STM_SNC_LOCK_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_STM_SNC_LOST_MSK__B 2
+#define FEC_OC_COMM_INT_STM_SNC_LOST_MSK__W 1
+#define FEC_OC_COMM_INT_STM_SNC_LOST_MSK__M 0x4
+#define FEC_OC_COMM_INT_STM_SNC_LOST_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_STM_SNC_PAR_MSK__B 3
+#define FEC_OC_COMM_INT_STM_SNC_PAR_MSK__W 1
+#define FEC_OC_COMM_INT_STM_SNC_PAR_MSK__M 0x8
+#define FEC_OC_COMM_INT_STM_SNC_PAR_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_STM_FIFO_FULL_MSK__B 4
+#define FEC_OC_COMM_INT_STM_FIFO_FULL_MSK__W 1
+#define FEC_OC_COMM_INT_STM_FIFO_FULL_MSK__M 0x10
+#define FEC_OC_COMM_INT_STM_FIFO_FULL_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_STM_FIFO_EMPTY_MSK__B 5
+#define FEC_OC_COMM_INT_STM_FIFO_EMPTY_MSK__W 1
+#define FEC_OC_COMM_INT_STM_FIFO_EMPTY_MSK__M 0x20
+#define FEC_OC_COMM_INT_STM_FIFO_EMPTY_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_STM_OCR_ACQ_MSK__B 6
+#define FEC_OC_COMM_INT_STM_OCR_ACQ_MSK__W 1
+#define FEC_OC_COMM_INT_STM_OCR_ACQ_MSK__M 0x40
+#define FEC_OC_COMM_INT_STM_OCR_ACQ_MSK__PRE 0x0
+#define FEC_OC_COMM_INT_STM_STAT_CHG_MSK__B 7
+#define FEC_OC_COMM_INT_STM_STAT_CHG_MSK__W 1
+#define FEC_OC_COMM_INT_STM_STAT_CHG_MSK__M 0x80
+#define FEC_OC_COMM_INT_STM_STAT_CHG_MSK__PRE 0x0
+
+#define FEC_OC_STATUS__A 0x2440010
+#define FEC_OC_STATUS__W 5
+#define FEC_OC_STATUS__M 0x1F
+#define FEC_OC_STATUS__PRE 0x0
+
+#define FEC_OC_STATUS_DPR_STATUS__B 0
+#define FEC_OC_STATUS_DPR_STATUS__W 1
+#define FEC_OC_STATUS_DPR_STATUS__M 0x1
+#define FEC_OC_STATUS_DPR_STATUS__PRE 0x0
+
+#define FEC_OC_STATUS_SNC_STATUS__B 1
+#define FEC_OC_STATUS_SNC_STATUS__W 2
+#define FEC_OC_STATUS_SNC_STATUS__M 0x6
+#define FEC_OC_STATUS_SNC_STATUS__PRE 0x0
+
+#define FEC_OC_STATUS_FIFO_FULL__B 3
+#define FEC_OC_STATUS_FIFO_FULL__W 1
+#define FEC_OC_STATUS_FIFO_FULL__M 0x8
+#define FEC_OC_STATUS_FIFO_FULL__PRE 0x0
+
+#define FEC_OC_STATUS_FIFO_EMPTY__B 4
+#define FEC_OC_STATUS_FIFO_EMPTY__W 1
+#define FEC_OC_STATUS_FIFO_EMPTY__M 0x10
+#define FEC_OC_STATUS_FIFO_EMPTY__PRE 0x0
+
+#define FEC_OC_MODE__A 0x2440011
+#define FEC_OC_MODE__W 4
+#define FEC_OC_MODE__M 0xF
+#define FEC_OC_MODE__PRE 0x0
+
+#define FEC_OC_MODE_PARITY__B 0
+#define FEC_OC_MODE_PARITY__W 1
+#define FEC_OC_MODE_PARITY__M 0x1
+#define FEC_OC_MODE_PARITY__PRE 0x0
+
+#define FEC_OC_MODE_TRANSPARENT__B 1
+#define FEC_OC_MODE_TRANSPARENT__W 1
+#define FEC_OC_MODE_TRANSPARENT__M 0x2
+#define FEC_OC_MODE_TRANSPARENT__PRE 0x0
+
+#define FEC_OC_MODE_CLEAR__B 2
+#define FEC_OC_MODE_CLEAR__W 1
+#define FEC_OC_MODE_CLEAR__M 0x4
+#define FEC_OC_MODE_CLEAR__PRE 0x0
+
+#define FEC_OC_MODE_RETAIN_FRAMING__B 3
+#define FEC_OC_MODE_RETAIN_FRAMING__W 1
+#define FEC_OC_MODE_RETAIN_FRAMING__M 0x8
+#define FEC_OC_MODE_RETAIN_FRAMING__PRE 0x0
+
+#define FEC_OC_DPR_MODE__A 0x2440012
+#define FEC_OC_DPR_MODE__W 2
+#define FEC_OC_DPR_MODE__M 0x3
+#define FEC_OC_DPR_MODE__PRE 0x0
+
+#define FEC_OC_DPR_MODE_ERR_DISABLE__B 0
+#define FEC_OC_DPR_MODE_ERR_DISABLE__W 1
+#define FEC_OC_DPR_MODE_ERR_DISABLE__M 0x1
+#define FEC_OC_DPR_MODE_ERR_DISABLE__PRE 0x0
+
+#define FEC_OC_DPR_MODE_NOSYNC_ENABLE__B 1
+#define FEC_OC_DPR_MODE_NOSYNC_ENABLE__W 1
+#define FEC_OC_DPR_MODE_NOSYNC_ENABLE__M 0x2
+#define FEC_OC_DPR_MODE_NOSYNC_ENABLE__PRE 0x0
+
+#define FEC_OC_DPR_UNLOCK__A 0x2440013
+#define FEC_OC_DPR_UNLOCK__W 1
+#define FEC_OC_DPR_UNLOCK__M 0x1
+#define FEC_OC_DPR_UNLOCK__PRE 0x0
+#define FEC_OC_DTO_MODE__A 0x2440014
+#define FEC_OC_DTO_MODE__W 3
+#define FEC_OC_DTO_MODE__M 0x7
+#define FEC_OC_DTO_MODE__PRE 0x0
+
+#define FEC_OC_DTO_MODE_DYNAMIC__B 0
+#define FEC_OC_DTO_MODE_DYNAMIC__W 1
+#define FEC_OC_DTO_MODE_DYNAMIC__M 0x1
+#define FEC_OC_DTO_MODE_DYNAMIC__PRE 0x0
+
+#define FEC_OC_DTO_MODE_DUTY_CYCLE__B 1
+#define FEC_OC_DTO_MODE_DUTY_CYCLE__W 1
+#define FEC_OC_DTO_MODE_DUTY_CYCLE__M 0x2
+#define FEC_OC_DTO_MODE_DUTY_CYCLE__PRE 0x0
+
+#define FEC_OC_DTO_MODE_OFFSET_ENABLE__B 2
+#define FEC_OC_DTO_MODE_OFFSET_ENABLE__W 1
+#define FEC_OC_DTO_MODE_OFFSET_ENABLE__M 0x4
+#define FEC_OC_DTO_MODE_OFFSET_ENABLE__PRE 0x0
+
+#define FEC_OC_DTO_PERIOD__A 0x2440015
+#define FEC_OC_DTO_PERIOD__W 8
+#define FEC_OC_DTO_PERIOD__M 0xFF
+#define FEC_OC_DTO_PERIOD__PRE 0x0
+#define FEC_OC_DTO_RATE_LO__A 0x2440016
+#define FEC_OC_DTO_RATE_LO__W 16
+#define FEC_OC_DTO_RATE_LO__M 0xFFFF
+#define FEC_OC_DTO_RATE_LO__PRE 0x0
+
+#define FEC_OC_DTO_RATE_LO_RATE_LO__B 0
+#define FEC_OC_DTO_RATE_LO_RATE_LO__W 16
+#define FEC_OC_DTO_RATE_LO_RATE_LO__M 0xFFFF
+#define FEC_OC_DTO_RATE_LO_RATE_LO__PRE 0x0
+
+#define FEC_OC_DTO_RATE_HI__A 0x2440017
+#define FEC_OC_DTO_RATE_HI__W 10
+#define FEC_OC_DTO_RATE_HI__M 0x3FF
+#define FEC_OC_DTO_RATE_HI__PRE 0xC0
+
+#define FEC_OC_DTO_RATE_HI_RATE_HI__B 0
+#define FEC_OC_DTO_RATE_HI_RATE_HI__W 10
+#define FEC_OC_DTO_RATE_HI_RATE_HI__M 0x3FF
+#define FEC_OC_DTO_RATE_HI_RATE_HI__PRE 0xC0
+
+#define FEC_OC_DTO_BURST_LEN__A 0x2440018
+#define FEC_OC_DTO_BURST_LEN__W 8
+#define FEC_OC_DTO_BURST_LEN__M 0xFF
+#define FEC_OC_DTO_BURST_LEN__PRE 0xBC
+
+#define FEC_OC_DTO_BURST_LEN_BURST_LEN__B 0
+#define FEC_OC_DTO_BURST_LEN_BURST_LEN__W 8
+#define FEC_OC_DTO_BURST_LEN_BURST_LEN__M 0xFF
+#define FEC_OC_DTO_BURST_LEN_BURST_LEN__PRE 0xBC
+
+#define FEC_OC_FCT_MODE__A 0x244001A
+#define FEC_OC_FCT_MODE__W 2
+#define FEC_OC_FCT_MODE__M 0x3
+#define FEC_OC_FCT_MODE__PRE 0x0
+
+#define FEC_OC_FCT_MODE_RAT_ENA__B 0
+#define FEC_OC_FCT_MODE_RAT_ENA__W 1
+#define FEC_OC_FCT_MODE_RAT_ENA__M 0x1
+#define FEC_OC_FCT_MODE_RAT_ENA__PRE 0x0
+
+#define FEC_OC_FCT_MODE_VIRT_ENA__B 1
+#define FEC_OC_FCT_MODE_VIRT_ENA__W 1
+#define FEC_OC_FCT_MODE_VIRT_ENA__M 0x2
+#define FEC_OC_FCT_MODE_VIRT_ENA__PRE 0x0
+
+#define FEC_OC_FCT_USAGE__A 0x244001B
+#define FEC_OC_FCT_USAGE__W 3
+#define FEC_OC_FCT_USAGE__M 0x7
+#define FEC_OC_FCT_USAGE__PRE 0x2
+
+#define FEC_OC_FCT_USAGE_USAGE__B 0
+#define FEC_OC_FCT_USAGE_USAGE__W 3
+#define FEC_OC_FCT_USAGE_USAGE__M 0x7
+#define FEC_OC_FCT_USAGE_USAGE__PRE 0x2
+
+#define FEC_OC_FCT_OCCUPATION__A 0x244001C
+#define FEC_OC_FCT_OCCUPATION__W 12
+#define FEC_OC_FCT_OCCUPATION__M 0xFFF
+#define FEC_OC_FCT_OCCUPATION__PRE 0x0
+
+#define FEC_OC_FCT_OCCUPATION_OCCUPATION__B 0
+#define FEC_OC_FCT_OCCUPATION_OCCUPATION__W 12
+#define FEC_OC_FCT_OCCUPATION_OCCUPATION__M 0xFFF
+#define FEC_OC_FCT_OCCUPATION_OCCUPATION__PRE 0x0
+
+#define FEC_OC_TMD_MODE__A 0x244001E
+#define FEC_OC_TMD_MODE__W 3
+#define FEC_OC_TMD_MODE__M 0x7
+#define FEC_OC_TMD_MODE__PRE 0x4
+
+#define FEC_OC_TMD_MODE_MODE__B 0
+#define FEC_OC_TMD_MODE_MODE__W 3
+#define FEC_OC_TMD_MODE_MODE__M 0x7
+#define FEC_OC_TMD_MODE_MODE__PRE 0x4
+
+#define FEC_OC_TMD_COUNT__A 0x244001F
+#define FEC_OC_TMD_COUNT__W 10
+#define FEC_OC_TMD_COUNT__M 0x3FF
+#define FEC_OC_TMD_COUNT__PRE 0x1F4
+
+#define FEC_OC_TMD_COUNT_COUNT__B 0
+#define FEC_OC_TMD_COUNT_COUNT__W 10
+#define FEC_OC_TMD_COUNT_COUNT__M 0x3FF
+#define FEC_OC_TMD_COUNT_COUNT__PRE 0x1F4
+
+#define FEC_OC_TMD_HI_MARGIN__A 0x2440020
+#define FEC_OC_TMD_HI_MARGIN__W 11
+#define FEC_OC_TMD_HI_MARGIN__M 0x7FF
+#define FEC_OC_TMD_HI_MARGIN__PRE 0x200
+
+#define FEC_OC_TMD_HI_MARGIN_HI_MARGIN__B 0
+#define FEC_OC_TMD_HI_MARGIN_HI_MARGIN__W 11
+#define FEC_OC_TMD_HI_MARGIN_HI_MARGIN__M 0x7FF
+#define FEC_OC_TMD_HI_MARGIN_HI_MARGIN__PRE 0x200
+
+#define FEC_OC_TMD_LO_MARGIN__A 0x2440021
+#define FEC_OC_TMD_LO_MARGIN__W 11
+#define FEC_OC_TMD_LO_MARGIN__M 0x7FF
+#define FEC_OC_TMD_LO_MARGIN__PRE 0x100
+
+#define FEC_OC_TMD_LO_MARGIN_LO_MARGIN__B 0
+#define FEC_OC_TMD_LO_MARGIN_LO_MARGIN__W 11
+#define FEC_OC_TMD_LO_MARGIN_LO_MARGIN__M 0x7FF
+#define FEC_OC_TMD_LO_MARGIN_LO_MARGIN__PRE 0x100
+
+#define FEC_OC_TMD_CTL_UPD_RATE__A 0x2440022
+#define FEC_OC_TMD_CTL_UPD_RATE__W 4
+#define FEC_OC_TMD_CTL_UPD_RATE__M 0xF
+#define FEC_OC_TMD_CTL_UPD_RATE__PRE 0x1
+
+#define FEC_OC_TMD_CTL_UPD_RATE_RATE__B 0
+#define FEC_OC_TMD_CTL_UPD_RATE_RATE__W 4
+#define FEC_OC_TMD_CTL_UPD_RATE_RATE__M 0xF
+#define FEC_OC_TMD_CTL_UPD_RATE_RATE__PRE 0x1
+
+#define FEC_OC_TMD_INT_UPD_RATE__A 0x2440023
+#define FEC_OC_TMD_INT_UPD_RATE__W 4
+#define FEC_OC_TMD_INT_UPD_RATE__M 0xF
+#define FEC_OC_TMD_INT_UPD_RATE__PRE 0x4
+
+#define FEC_OC_TMD_INT_UPD_RATE_RATE__B 0
+#define FEC_OC_TMD_INT_UPD_RATE_RATE__W 4
+#define FEC_OC_TMD_INT_UPD_RATE_RATE__M 0xF
+#define FEC_OC_TMD_INT_UPD_RATE_RATE__PRE 0x4
+
+#define FEC_OC_AVR_PARM_A__A 0x2440026
+#define FEC_OC_AVR_PARM_A__W 4
+#define FEC_OC_AVR_PARM_A__M 0xF
+#define FEC_OC_AVR_PARM_A__PRE 0x6
+
+#define FEC_OC_AVR_PARM_A_PARM__B 0
+#define FEC_OC_AVR_PARM_A_PARM__W 4
+#define FEC_OC_AVR_PARM_A_PARM__M 0xF
+#define FEC_OC_AVR_PARM_A_PARM__PRE 0x6
+
+#define FEC_OC_AVR_PARM_B__A 0x2440027
+#define FEC_OC_AVR_PARM_B__W 4
+#define FEC_OC_AVR_PARM_B__M 0xF
+#define FEC_OC_AVR_PARM_B__PRE 0x4
+
+#define FEC_OC_AVR_PARM_B_PARM__B 0
+#define FEC_OC_AVR_PARM_B_PARM__W 4
+#define FEC_OC_AVR_PARM_B_PARM__M 0xF
+#define FEC_OC_AVR_PARM_B_PARM__PRE 0x4
+
+#define FEC_OC_AVR_AVG_LO__A 0x2440028
+#define FEC_OC_AVR_AVG_LO__W 16
+#define FEC_OC_AVR_AVG_LO__M 0xFFFF
+#define FEC_OC_AVR_AVG_LO__PRE 0x0
+
+#define FEC_OC_AVR_AVG_LO_AVG_LO__B 0
+#define FEC_OC_AVR_AVG_LO_AVG_LO__W 16
+#define FEC_OC_AVR_AVG_LO_AVG_LO__M 0xFFFF
+#define FEC_OC_AVR_AVG_LO_AVG_LO__PRE 0x0
+
+#define FEC_OC_AVR_AVG_HI__A 0x2440029
+#define FEC_OC_AVR_AVG_HI__W 6
+#define FEC_OC_AVR_AVG_HI__M 0x3F
+#define FEC_OC_AVR_AVG_HI__PRE 0x0
+
+#define FEC_OC_AVR_AVG_HI_AVG_HI__B 0
+#define FEC_OC_AVR_AVG_HI_AVG_HI__W 6
+#define FEC_OC_AVR_AVG_HI_AVG_HI__M 0x3F
+#define FEC_OC_AVR_AVG_HI_AVG_HI__PRE 0x0
+
+#define FEC_OC_RCN_MODE__A 0x244002C
+#define FEC_OC_RCN_MODE__W 5
+#define FEC_OC_RCN_MODE__M 0x1F
+#define FEC_OC_RCN_MODE__PRE 0x1F
+
+#define FEC_OC_RCN_MODE_MODE__B 0
+#define FEC_OC_RCN_MODE_MODE__W 5
+#define FEC_OC_RCN_MODE_MODE__M 0x1F
+#define FEC_OC_RCN_MODE_MODE__PRE 0x1F
+
+#define FEC_OC_RCN_OCC_SETTLE__A 0x244002D
+#define FEC_OC_RCN_OCC_SETTLE__W 11
+#define FEC_OC_RCN_OCC_SETTLE__M 0x7FF
+#define FEC_OC_RCN_OCC_SETTLE__PRE 0x180
+
+#define FEC_OC_RCN_OCC_SETTLE_LEVEL__B 0
+#define FEC_OC_RCN_OCC_SETTLE_LEVEL__W 11
+#define FEC_OC_RCN_OCC_SETTLE_LEVEL__M 0x7FF
+#define FEC_OC_RCN_OCC_SETTLE_LEVEL__PRE 0x180
+
+#define FEC_OC_RCN_GAIN__A 0x244002E
+#define FEC_OC_RCN_GAIN__W 4
+#define FEC_OC_RCN_GAIN__M 0xF
+#define FEC_OC_RCN_GAIN__PRE 0xC
+
+#define FEC_OC_RCN_GAIN_GAIN__B 0
+#define FEC_OC_RCN_GAIN_GAIN__W 4
+#define FEC_OC_RCN_GAIN_GAIN__M 0xF
+#define FEC_OC_RCN_GAIN_GAIN__PRE 0xC
+
+#define FEC_OC_RCN_CTL_RATE_LO__A 0x2440030
+#define FEC_OC_RCN_CTL_RATE_LO__W 16
+#define FEC_OC_RCN_CTL_RATE_LO__M 0xFFFF
+#define FEC_OC_RCN_CTL_RATE_LO__PRE 0x0
+
+#define FEC_OC_RCN_CTL_RATE_LO_CTL_LO__B 0
+#define FEC_OC_RCN_CTL_RATE_LO_CTL_LO__W 16
+#define FEC_OC_RCN_CTL_RATE_LO_CTL_LO__M 0xFFFF
+#define FEC_OC_RCN_CTL_RATE_LO_CTL_LO__PRE 0x0
+
+#define FEC_OC_RCN_CTL_RATE_HI__A 0x2440031
+#define FEC_OC_RCN_CTL_RATE_HI__W 8
+#define FEC_OC_RCN_CTL_RATE_HI__M 0xFF
+#define FEC_OC_RCN_CTL_RATE_HI__PRE 0xC0
+
+#define FEC_OC_RCN_CTL_RATE_HI_CTL_HI__B 0
+#define FEC_OC_RCN_CTL_RATE_HI_CTL_HI__W 8
+#define FEC_OC_RCN_CTL_RATE_HI_CTL_HI__M 0xFF
+#define FEC_OC_RCN_CTL_RATE_HI_CTL_HI__PRE 0xC0
+
+#define FEC_OC_RCN_CTL_STEP_LO__A 0x2440032
+#define FEC_OC_RCN_CTL_STEP_LO__W 16
+#define FEC_OC_RCN_CTL_STEP_LO__M 0xFFFF
+#define FEC_OC_RCN_CTL_STEP_LO__PRE 0x0
+
+#define FEC_OC_RCN_CTL_STEP_LO_CTL_LO__B 0
+#define FEC_OC_RCN_CTL_STEP_LO_CTL_LO__W 16
+#define FEC_OC_RCN_CTL_STEP_LO_CTL_LO__M 0xFFFF
+#define FEC_OC_RCN_CTL_STEP_LO_CTL_LO__PRE 0x0
+
+#define FEC_OC_RCN_CTL_STEP_HI__A 0x2440033
+#define FEC_OC_RCN_CTL_STEP_HI__W 8
+#define FEC_OC_RCN_CTL_STEP_HI__M 0xFF
+#define FEC_OC_RCN_CTL_STEP_HI__PRE 0x8
+
+#define FEC_OC_RCN_CTL_STEP_HI_CTL_HI__B 0
+#define FEC_OC_RCN_CTL_STEP_HI_CTL_HI__W 8
+#define FEC_OC_RCN_CTL_STEP_HI_CTL_HI__M 0xFF
+#define FEC_OC_RCN_CTL_STEP_HI_CTL_HI__PRE 0x8
+
+#define FEC_OC_RCN_DTO_OFS_LO__A 0x2440034
+#define FEC_OC_RCN_DTO_OFS_LO__W 16
+#define FEC_OC_RCN_DTO_OFS_LO__M 0xFFFF
+#define FEC_OC_RCN_DTO_OFS_LO__PRE 0x0
+
+#define FEC_OC_RCN_DTO_OFS_LO_OFS_LO__B 0
+#define FEC_OC_RCN_DTO_OFS_LO_OFS_LO__W 16
+#define FEC_OC_RCN_DTO_OFS_LO_OFS_LO__M 0xFFFF
+#define FEC_OC_RCN_DTO_OFS_LO_OFS_LO__PRE 0x0
+
+#define FEC_OC_RCN_DTO_OFS_HI__A 0x2440035
+#define FEC_OC_RCN_DTO_OFS_HI__W 8
+#define FEC_OC_RCN_DTO_OFS_HI__M 0xFF
+#define FEC_OC_RCN_DTO_OFS_HI__PRE 0x0
+
+#define FEC_OC_RCN_DTO_OFS_HI_OFS_HI__B 0
+#define FEC_OC_RCN_DTO_OFS_HI_OFS_HI__W 8
+#define FEC_OC_RCN_DTO_OFS_HI_OFS_HI__M 0xFF
+#define FEC_OC_RCN_DTO_OFS_HI_OFS_HI__PRE 0x0
+
+#define FEC_OC_RCN_DTO_RATE_LO__A 0x2440036
+#define FEC_OC_RCN_DTO_RATE_LO__W 16
+#define FEC_OC_RCN_DTO_RATE_LO__M 0xFFFF
+#define FEC_OC_RCN_DTO_RATE_LO__PRE 0x0
+
+#define FEC_OC_RCN_DTO_RATE_LO_OFS_LO__B 0
+#define FEC_OC_RCN_DTO_RATE_LO_OFS_LO__W 16
+#define FEC_OC_RCN_DTO_RATE_LO_OFS_LO__M 0xFFFF
+#define FEC_OC_RCN_DTO_RATE_LO_OFS_LO__PRE 0x0
+
+#define FEC_OC_RCN_DTO_RATE_HI__A 0x2440037
+#define FEC_OC_RCN_DTO_RATE_HI__W 8
+#define FEC_OC_RCN_DTO_RATE_HI__M 0xFF
+#define FEC_OC_RCN_DTO_RATE_HI__PRE 0x0
+
+#define FEC_OC_RCN_DTO_RATE_HI_OFS_HI__B 0
+#define FEC_OC_RCN_DTO_RATE_HI_OFS_HI__W 8
+#define FEC_OC_RCN_DTO_RATE_HI_OFS_HI__M 0xFF
+#define FEC_OC_RCN_DTO_RATE_HI_OFS_HI__PRE 0x0
+
+#define FEC_OC_RCN_RATE_CLIP_LO__A 0x2440038
+#define FEC_OC_RCN_RATE_CLIP_LO__W 16
+#define FEC_OC_RCN_RATE_CLIP_LO__M 0xFFFF
+#define FEC_OC_RCN_RATE_CLIP_LO__PRE 0x0
+
+#define FEC_OC_RCN_RATE_CLIP_LO_CLIP_LO__B 0
+#define FEC_OC_RCN_RATE_CLIP_LO_CLIP_LO__W 16
+#define FEC_OC_RCN_RATE_CLIP_LO_CLIP_LO__M 0xFFFF
+#define FEC_OC_RCN_RATE_CLIP_LO_CLIP_LO__PRE 0x0
+
+#define FEC_OC_RCN_RATE_CLIP_HI__A 0x2440039
+#define FEC_OC_RCN_RATE_CLIP_HI__W 8
+#define FEC_OC_RCN_RATE_CLIP_HI__M 0xFF
+#define FEC_OC_RCN_RATE_CLIP_HI__PRE 0xF0
+
+#define FEC_OC_RCN_RATE_CLIP_HI_CLIP_HI__B 0
+#define FEC_OC_RCN_RATE_CLIP_HI_CLIP_HI__W 8
+#define FEC_OC_RCN_RATE_CLIP_HI_CLIP_HI__M 0xFF
+#define FEC_OC_RCN_RATE_CLIP_HI_CLIP_HI__PRE 0xF0
+
+#define FEC_OC_RCN_DYN_RATE_LO__A 0x244003A
+#define FEC_OC_RCN_DYN_RATE_LO__W 16
+#define FEC_OC_RCN_DYN_RATE_LO__M 0xFFFF
+#define FEC_OC_RCN_DYN_RATE_LO__PRE 0x0
+
+#define FEC_OC_RCN_DYN_RATE_LO_RATE_LO__B 0
+#define FEC_OC_RCN_DYN_RATE_LO_RATE_LO__W 16
+#define FEC_OC_RCN_DYN_RATE_LO_RATE_LO__M 0xFFFF
+#define FEC_OC_RCN_DYN_RATE_LO_RATE_LO__PRE 0x0
+
+#define FEC_OC_RCN_DYN_RATE_HI__A 0x244003B
+#define FEC_OC_RCN_DYN_RATE_HI__W 8
+#define FEC_OC_RCN_DYN_RATE_HI__M 0xFF
+#define FEC_OC_RCN_DYN_RATE_HI__PRE 0x0
+
+#define FEC_OC_RCN_DYN_RATE_HI_RATE_HI__B 0
+#define FEC_OC_RCN_DYN_RATE_HI_RATE_HI__W 8
+#define FEC_OC_RCN_DYN_RATE_HI_RATE_HI__M 0xFF
+#define FEC_OC_RCN_DYN_RATE_HI_RATE_HI__PRE 0x0
+
+#define FEC_OC_SNC_MODE__A 0x2440040
+#define FEC_OC_SNC_MODE__W 4
+#define FEC_OC_SNC_MODE__M 0xF
+#define FEC_OC_SNC_MODE__PRE 0x0
+
+#define FEC_OC_SNC_MODE_UNLOCK_ENABLE__B 0
+#define FEC_OC_SNC_MODE_UNLOCK_ENABLE__W 1
+#define FEC_OC_SNC_MODE_UNLOCK_ENABLE__M 0x1
+#define FEC_OC_SNC_MODE_UNLOCK_ENABLE__PRE 0x0
+
+#define FEC_OC_SNC_MODE_ERROR_CTL__B 1
+#define FEC_OC_SNC_MODE_ERROR_CTL__W 2
+#define FEC_OC_SNC_MODE_ERROR_CTL__M 0x6
+#define FEC_OC_SNC_MODE_ERROR_CTL__PRE 0x0
+
+#define FEC_OC_SNC_MODE_CORR_DISABLE__B 3
+#define FEC_OC_SNC_MODE_CORR_DISABLE__W 1
+#define FEC_OC_SNC_MODE_CORR_DISABLE__M 0x8
+#define FEC_OC_SNC_MODE_CORR_DISABLE__PRE 0x0
+
+#define FEC_OC_SNC_LWM__A 0x2440041
+#define FEC_OC_SNC_LWM__W 4
+#define FEC_OC_SNC_LWM__M 0xF
+#define FEC_OC_SNC_LWM__PRE 0x3
+
+#define FEC_OC_SNC_LWM_MARK__B 0
+#define FEC_OC_SNC_LWM_MARK__W 4
+#define FEC_OC_SNC_LWM_MARK__M 0xF
+#define FEC_OC_SNC_LWM_MARK__PRE 0x3
+
+#define FEC_OC_SNC_HWM__A 0x2440042
+#define FEC_OC_SNC_HWM__W 4
+#define FEC_OC_SNC_HWM__M 0xF
+#define FEC_OC_SNC_HWM__PRE 0x5
+
+#define FEC_OC_SNC_HWM_MARK__B 0
+#define FEC_OC_SNC_HWM_MARK__W 4
+#define FEC_OC_SNC_HWM_MARK__M 0xF
+#define FEC_OC_SNC_HWM_MARK__PRE 0x5
+
+#define FEC_OC_SNC_UNLOCK__A 0x2440043
+#define FEC_OC_SNC_UNLOCK__W 1
+#define FEC_OC_SNC_UNLOCK__M 0x1
+#define FEC_OC_SNC_UNLOCK__PRE 0x0
+
+#define FEC_OC_SNC_UNLOCK_RESTART__B 0
+#define FEC_OC_SNC_UNLOCK_RESTART__W 1
+#define FEC_OC_SNC_UNLOCK_RESTART__M 0x1
+#define FEC_OC_SNC_UNLOCK_RESTART__PRE 0x0
+
+#define FEC_OC_SNC_LOCK_COUNT__A 0x2440044
+#define FEC_OC_SNC_LOCK_COUNT__W 12
+#define FEC_OC_SNC_LOCK_COUNT__M 0xFFF
+#define FEC_OC_SNC_LOCK_COUNT__PRE 0x0
+
+#define FEC_OC_SNC_LOCK_COUNT_COUNT__B 0
+#define FEC_OC_SNC_LOCK_COUNT_COUNT__W 12
+#define FEC_OC_SNC_LOCK_COUNT_COUNT__M 0xFFF
+#define FEC_OC_SNC_LOCK_COUNT_COUNT__PRE 0x0
+
+#define FEC_OC_SNC_FAIL_COUNT__A 0x2440045
+#define FEC_OC_SNC_FAIL_COUNT__W 12
+#define FEC_OC_SNC_FAIL_COUNT__M 0xFFF
+#define FEC_OC_SNC_FAIL_COUNT__PRE 0x0
+
+#define FEC_OC_SNC_FAIL_COUNT_COUNT__B 0
+#define FEC_OC_SNC_FAIL_COUNT_COUNT__W 12
+#define FEC_OC_SNC_FAIL_COUNT_COUNT__M 0xFFF
+#define FEC_OC_SNC_FAIL_COUNT_COUNT__PRE 0x0
+
+#define FEC_OC_SNC_FAIL_PERIOD__A 0x2440046
+#define FEC_OC_SNC_FAIL_PERIOD__W 16
+#define FEC_OC_SNC_FAIL_PERIOD__M 0xFFFF
+#define FEC_OC_SNC_FAIL_PERIOD__PRE 0x1171
+
+#define FEC_OC_SNC_FAIL_PERIOD_PERIOD__B 0
+#define FEC_OC_SNC_FAIL_PERIOD_PERIOD__W 16
+#define FEC_OC_SNC_FAIL_PERIOD_PERIOD__M 0xFFFF
+#define FEC_OC_SNC_FAIL_PERIOD_PERIOD__PRE 0x1171
+
+#define FEC_OC_EMS_MODE__A 0x2440047
+#define FEC_OC_EMS_MODE__W 2
+#define FEC_OC_EMS_MODE__M 0x3
+#define FEC_OC_EMS_MODE__PRE 0x0
+
+#define FEC_OC_EMS_MODE_MODE__B 0
+#define FEC_OC_EMS_MODE_MODE__W 2
+#define FEC_OC_EMS_MODE_MODE__M 0x3
+#define FEC_OC_EMS_MODE_MODE__PRE 0x0
+
+#define FEC_OC_IPR_MODE__A 0x2440048
+#define FEC_OC_IPR_MODE__W 12
+#define FEC_OC_IPR_MODE__M 0xFFF
+#define FEC_OC_IPR_MODE__PRE 0x0
+
+#define FEC_OC_IPR_MODE_SERIAL__B 0
+#define FEC_OC_IPR_MODE_SERIAL__W 1
+#define FEC_OC_IPR_MODE_SERIAL__M 0x1
+#define FEC_OC_IPR_MODE_SERIAL__PRE 0x0
+
+#define FEC_OC_IPR_MODE_REVERSE_ORDER__B 1
+#define FEC_OC_IPR_MODE_REVERSE_ORDER__W 1
+#define FEC_OC_IPR_MODE_REVERSE_ORDER__M 0x2
+#define FEC_OC_IPR_MODE_REVERSE_ORDER__PRE 0x0
+
+#define FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__B 2
+#define FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__W 1
+#define FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__M 0x4
+#define FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__PRE 0x0
+
+#define FEC_OC_IPR_MODE_MCLK_DIS_PAR__B 3
+#define FEC_OC_IPR_MODE_MCLK_DIS_PAR__W 1
+#define FEC_OC_IPR_MODE_MCLK_DIS_PAR__M 0x8
+#define FEC_OC_IPR_MODE_MCLK_DIS_PAR__PRE 0x0
+
+#define FEC_OC_IPR_MODE_MVAL_DIS_PAR__B 4
+#define FEC_OC_IPR_MODE_MVAL_DIS_PAR__W 1
+#define FEC_OC_IPR_MODE_MVAL_DIS_PAR__M 0x10
+#define FEC_OC_IPR_MODE_MVAL_DIS_PAR__PRE 0x0
+
+#define FEC_OC_IPR_MODE_MERR_DIS_PAR__B 5
+#define FEC_OC_IPR_MODE_MERR_DIS_PAR__W 1
+#define FEC_OC_IPR_MODE_MERR_DIS_PAR__M 0x20
+#define FEC_OC_IPR_MODE_MERR_DIS_PAR__PRE 0x0
+
+#define FEC_OC_IPR_MODE_MD_DIS_PAR__B 6
+#define FEC_OC_IPR_MODE_MD_DIS_PAR__W 1
+#define FEC_OC_IPR_MODE_MD_DIS_PAR__M 0x40
+#define FEC_OC_IPR_MODE_MD_DIS_PAR__PRE 0x0
+
+#define FEC_OC_IPR_MODE_MCLK_DIS_ERR__B 7
+#define FEC_OC_IPR_MODE_MCLK_DIS_ERR__W 1
+#define FEC_OC_IPR_MODE_MCLK_DIS_ERR__M 0x80
+#define FEC_OC_IPR_MODE_MCLK_DIS_ERR__PRE 0x0
+
+#define FEC_OC_IPR_MODE_MVAL_DIS_ERR__B 8
+#define FEC_OC_IPR_MODE_MVAL_DIS_ERR__W 1
+#define FEC_OC_IPR_MODE_MVAL_DIS_ERR__M 0x100
+#define FEC_OC_IPR_MODE_MVAL_DIS_ERR__PRE 0x0
+
+#define FEC_OC_IPR_MODE_MERR_DIS_ERR__B 9
+#define FEC_OC_IPR_MODE_MERR_DIS_ERR__W 1
+#define FEC_OC_IPR_MODE_MERR_DIS_ERR__M 0x200
+#define FEC_OC_IPR_MODE_MERR_DIS_ERR__PRE 0x0
+
+#define FEC_OC_IPR_MODE_MD_DIS_ERR__B 10
+#define FEC_OC_IPR_MODE_MD_DIS_ERR__W 1
+#define FEC_OC_IPR_MODE_MD_DIS_ERR__M 0x400
+#define FEC_OC_IPR_MODE_MD_DIS_ERR__PRE 0x0
+
+#define FEC_OC_IPR_MODE_MSTRT_DIS_ERR__B 11
+#define FEC_OC_IPR_MODE_MSTRT_DIS_ERR__W 1
+#define FEC_OC_IPR_MODE_MSTRT_DIS_ERR__M 0x800
+#define FEC_OC_IPR_MODE_MSTRT_DIS_ERR__PRE 0x0
+
+#define FEC_OC_IPR_INVERT__A 0x2440049
+#define FEC_OC_IPR_INVERT__W 12
+#define FEC_OC_IPR_INVERT__M 0xFFF
+#define FEC_OC_IPR_INVERT__PRE 0x0
+
+#define FEC_OC_IPR_INVERT_MD0__B 0
+#define FEC_OC_IPR_INVERT_MD0__W 1
+#define FEC_OC_IPR_INVERT_MD0__M 0x1
+#define FEC_OC_IPR_INVERT_MD0__PRE 0x0
+
+#define FEC_OC_IPR_INVERT_MD1__B 1
+#define FEC_OC_IPR_INVERT_MD1__W 1
+#define FEC_OC_IPR_INVERT_MD1__M 0x2
+#define FEC_OC_IPR_INVERT_MD1__PRE 0x0
+
+#define FEC_OC_IPR_INVERT_MD2__B 2
+#define FEC_OC_IPR_INVERT_MD2__W 1
+#define FEC_OC_IPR_INVERT_MD2__M 0x4
+#define FEC_OC_IPR_INVERT_MD2__PRE 0x0
+
+#define FEC_OC_IPR_INVERT_MD3__B 3
+#define FEC_OC_IPR_INVERT_MD3__W 1
+#define FEC_OC_IPR_INVERT_MD3__M 0x8
+#define FEC_OC_IPR_INVERT_MD3__PRE 0x0
+
+#define FEC_OC_IPR_INVERT_MD4__B 4
+#define FEC_OC_IPR_INVERT_MD4__W 1
+#define FEC_OC_IPR_INVERT_MD4__M 0x10
+#define FEC_OC_IPR_INVERT_MD4__PRE 0x0
+
+#define FEC_OC_IPR_INVERT_MD5__B 5
+#define FEC_OC_IPR_INVERT_MD5__W 1
+#define FEC_OC_IPR_INVERT_MD5__M 0x20
+#define FEC_OC_IPR_INVERT_MD5__PRE 0x0
+
+#define FEC_OC_IPR_INVERT_MD6__B 6
+#define FEC_OC_IPR_INVERT_MD6__W 1
+#define FEC_OC_IPR_INVERT_MD6__M 0x40
+#define FEC_OC_IPR_INVERT_MD6__PRE 0x0
+
+#define FEC_OC_IPR_INVERT_MD7__B 7
+#define FEC_OC_IPR_INVERT_MD7__W 1
+#define FEC_OC_IPR_INVERT_MD7__M 0x80
+#define FEC_OC_IPR_INVERT_MD7__PRE 0x0
+
+#define FEC_OC_IPR_INVERT_MERR__B 8
+#define FEC_OC_IPR_INVERT_MERR__W 1
+#define FEC_OC_IPR_INVERT_MERR__M 0x100
+#define FEC_OC_IPR_INVERT_MERR__PRE 0x0
+
+#define FEC_OC_IPR_INVERT_MSTRT__B 9
+#define FEC_OC_IPR_INVERT_MSTRT__W 1
+#define FEC_OC_IPR_INVERT_MSTRT__M 0x200
+#define FEC_OC_IPR_INVERT_MSTRT__PRE 0x0
+
+#define FEC_OC_IPR_INVERT_MVAL__B 10
+#define FEC_OC_IPR_INVERT_MVAL__W 1
+#define FEC_OC_IPR_INVERT_MVAL__M 0x400
+#define FEC_OC_IPR_INVERT_MVAL__PRE 0x0
+
+#define FEC_OC_IPR_INVERT_MCLK__B 11
+#define FEC_OC_IPR_INVERT_MCLK__W 1
+#define FEC_OC_IPR_INVERT_MCLK__M 0x800
+#define FEC_OC_IPR_INVERT_MCLK__PRE 0x0
+
+#define FEC_OC_OCR_MODE__A 0x2440050
+#define FEC_OC_OCR_MODE__W 4
+#define FEC_OC_OCR_MODE__M 0xF
+#define FEC_OC_OCR_MODE__PRE 0x0
+
+#define FEC_OC_OCR_MODE_MB_SELECT__B 0
+#define FEC_OC_OCR_MODE_MB_SELECT__W 1
+#define FEC_OC_OCR_MODE_MB_SELECT__M 0x1
+#define FEC_OC_OCR_MODE_MB_SELECT__PRE 0x0
+
+#define FEC_OC_OCR_MODE_GRAB_ENABLE__B 1
+#define FEC_OC_OCR_MODE_GRAB_ENABLE__W 1
+#define FEC_OC_OCR_MODE_GRAB_ENABLE__M 0x2
+#define FEC_OC_OCR_MODE_GRAB_ENABLE__PRE 0x0
+
+#define FEC_OC_OCR_MODE_GRAB_SELECT__B 2
+#define FEC_OC_OCR_MODE_GRAB_SELECT__W 1
+#define FEC_OC_OCR_MODE_GRAB_SELECT__M 0x4
+#define FEC_OC_OCR_MODE_GRAB_SELECT__PRE 0x0
+
+#define FEC_OC_OCR_MODE_GRAB_COUNTED__B 3
+#define FEC_OC_OCR_MODE_GRAB_COUNTED__W 1
+#define FEC_OC_OCR_MODE_GRAB_COUNTED__M 0x8
+#define FEC_OC_OCR_MODE_GRAB_COUNTED__PRE 0x0
+
+#define FEC_OC_OCR_RATE__A 0x2440051
+#define FEC_OC_OCR_RATE__W 4
+#define FEC_OC_OCR_RATE__M 0xF
+#define FEC_OC_OCR_RATE__PRE 0x0
+
+#define FEC_OC_OCR_RATE_RATE__B 0
+#define FEC_OC_OCR_RATE_RATE__W 4
+#define FEC_OC_OCR_RATE_RATE__M 0xF
+#define FEC_OC_OCR_RATE_RATE__PRE 0x0
+
+#define FEC_OC_OCR_INVERT__A 0x2440052
+#define FEC_OC_OCR_INVERT__W 12
+#define FEC_OC_OCR_INVERT__M 0xFFF
+#define FEC_OC_OCR_INVERT__PRE 0x800
+
+#define FEC_OC_OCR_INVERT_INVERT__B 0
+#define FEC_OC_OCR_INVERT_INVERT__W 12
+#define FEC_OC_OCR_INVERT_INVERT__M 0xFFF
+#define FEC_OC_OCR_INVERT_INVERT__PRE 0x800
+
+#define FEC_OC_OCR_GRAB_COUNT__A 0x2440053
+#define FEC_OC_OCR_GRAB_COUNT__W 16
+#define FEC_OC_OCR_GRAB_COUNT__M 0xFFFF
+#define FEC_OC_OCR_GRAB_COUNT__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_COUNT_COUNT__B 0
+#define FEC_OC_OCR_GRAB_COUNT_COUNT__W 16
+#define FEC_OC_OCR_GRAB_COUNT_COUNT__M 0xFFFF
+#define FEC_OC_OCR_GRAB_COUNT_COUNT__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_SYNC__A 0x2440054
+#define FEC_OC_OCR_GRAB_SYNC__W 8
+#define FEC_OC_OCR_GRAB_SYNC__M 0xFF
+#define FEC_OC_OCR_GRAB_SYNC__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_SYNC_BYTE_SEL__B 0
+#define FEC_OC_OCR_GRAB_SYNC_BYTE_SEL__W 3
+#define FEC_OC_OCR_GRAB_SYNC_BYTE_SEL__M 0x7
+#define FEC_OC_OCR_GRAB_SYNC_BYTE_SEL__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_SYNC_BIT_SEL__B 3
+#define FEC_OC_OCR_GRAB_SYNC_BIT_SEL__W 4
+#define FEC_OC_OCR_GRAB_SYNC_BIT_SEL__M 0x78
+#define FEC_OC_OCR_GRAB_SYNC_BIT_SEL__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_SYNC_VALUE_SEL__B 7
+#define FEC_OC_OCR_GRAB_SYNC_VALUE_SEL__W 1
+#define FEC_OC_OCR_GRAB_SYNC_VALUE_SEL__M 0x80
+#define FEC_OC_OCR_GRAB_SYNC_VALUE_SEL__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_RD0__A 0x2440055
+#define FEC_OC_OCR_GRAB_RD0__W 10
+#define FEC_OC_OCR_GRAB_RD0__M 0x3FF
+#define FEC_OC_OCR_GRAB_RD0__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_RD0_DATA__B 0
+#define FEC_OC_OCR_GRAB_RD0_DATA__W 10
+#define FEC_OC_OCR_GRAB_RD0_DATA__M 0x3FF
+#define FEC_OC_OCR_GRAB_RD0_DATA__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_RD1__A 0x2440056
+#define FEC_OC_OCR_GRAB_RD1__W 10
+#define FEC_OC_OCR_GRAB_RD1__M 0x3FF
+#define FEC_OC_OCR_GRAB_RD1__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_RD1_DATA__B 0
+#define FEC_OC_OCR_GRAB_RD1_DATA__W 10
+#define FEC_OC_OCR_GRAB_RD1_DATA__M 0x3FF
+#define FEC_OC_OCR_GRAB_RD1_DATA__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_RD2__A 0x2440057
+#define FEC_OC_OCR_GRAB_RD2__W 10
+#define FEC_OC_OCR_GRAB_RD2__M 0x3FF
+#define FEC_OC_OCR_GRAB_RD2__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_RD2_DATA__B 0
+#define FEC_OC_OCR_GRAB_RD2_DATA__W 10
+#define FEC_OC_OCR_GRAB_RD2_DATA__M 0x3FF
+#define FEC_OC_OCR_GRAB_RD2_DATA__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_RD3__A 0x2440058
+#define FEC_OC_OCR_GRAB_RD3__W 10
+#define FEC_OC_OCR_GRAB_RD3__M 0x3FF
+#define FEC_OC_OCR_GRAB_RD3__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_RD3_DATA__B 0
+#define FEC_OC_OCR_GRAB_RD3_DATA__W 10
+#define FEC_OC_OCR_GRAB_RD3_DATA__M 0x3FF
+#define FEC_OC_OCR_GRAB_RD3_DATA__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_RD4__A 0x2440059
+#define FEC_OC_OCR_GRAB_RD4__W 10
+#define FEC_OC_OCR_GRAB_RD4__M 0x3FF
+#define FEC_OC_OCR_GRAB_RD4__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_RD4_DATA__B 0
+#define FEC_OC_OCR_GRAB_RD4_DATA__W 10
+#define FEC_OC_OCR_GRAB_RD4_DATA__M 0x3FF
+#define FEC_OC_OCR_GRAB_RD4_DATA__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_RD5__A 0x244005A
+#define FEC_OC_OCR_GRAB_RD5__W 10
+#define FEC_OC_OCR_GRAB_RD5__M 0x3FF
+#define FEC_OC_OCR_GRAB_RD5__PRE 0x0
+
+#define FEC_OC_OCR_GRAB_RD5_DATA__B 0
+#define FEC_OC_OCR_GRAB_RD5_DATA__W 10
+#define FEC_OC_OCR_GRAB_RD5_DATA__M 0x3FF
+#define FEC_OC_OCR_GRAB_RD5_DATA__PRE 0x0
+
+#define FEC_DI_RAM__A 0x2450000
+
+#define FEC_RS_RAM__A 0x2460000
+
+#define FEC_OC_RAM__A 0x2470000
+
+#define IQM_COMM_EXEC__A 0x1800000
+#define IQM_COMM_EXEC__W 2
+#define IQM_COMM_EXEC__M 0x3
+#define IQM_COMM_EXEC__PRE 0x0
+#define IQM_COMM_EXEC_STOP 0x0
+#define IQM_COMM_EXEC_ACTIVE 0x1
+#define IQM_COMM_EXEC_HOLD 0x2
+
+#define IQM_COMM_MB__A 0x1800002
+#define IQM_COMM_MB__W 16
+#define IQM_COMM_MB__M 0xFFFF
+#define IQM_COMM_MB__PRE 0x0
+#define IQM_COMM_INT_REQ__A 0x1800003
+#define IQM_COMM_INT_REQ__W 2
+#define IQM_COMM_INT_REQ__M 0x3
+#define IQM_COMM_INT_REQ__PRE 0x0
+
+#define IQM_COMM_INT_REQ_AF_REQ__B 0
+#define IQM_COMM_INT_REQ_AF_REQ__W 1
+#define IQM_COMM_INT_REQ_AF_REQ__M 0x1
+#define IQM_COMM_INT_REQ_AF_REQ__PRE 0x0
+
+#define IQM_COMM_INT_REQ_CF_REQ__B 1
+#define IQM_COMM_INT_REQ_CF_REQ__W 1
+#define IQM_COMM_INT_REQ_CF_REQ__M 0x2
+#define IQM_COMM_INT_REQ_CF_REQ__PRE 0x0
+
+#define IQM_COMM_INT_STA__A 0x1800005
+#define IQM_COMM_INT_STA__W 16
+#define IQM_COMM_INT_STA__M 0xFFFF
+#define IQM_COMM_INT_STA__PRE 0x0
+#define IQM_COMM_INT_MSK__A 0x1800006
+#define IQM_COMM_INT_MSK__W 16
+#define IQM_COMM_INT_MSK__M 0xFFFF
+#define IQM_COMM_INT_MSK__PRE 0x0
+#define IQM_COMM_INT_STM__A 0x1800007
+#define IQM_COMM_INT_STM__W 16
+#define IQM_COMM_INT_STM__M 0xFFFF
+#define IQM_COMM_INT_STM__PRE 0x0
+
+#define IQM_FS_COMM_EXEC__A 0x1820000
+#define IQM_FS_COMM_EXEC__W 2
+#define IQM_FS_COMM_EXEC__M 0x3
+#define IQM_FS_COMM_EXEC__PRE 0x0
+#define IQM_FS_COMM_EXEC_STOP 0x0
+#define IQM_FS_COMM_EXEC_ACTIVE 0x1
+#define IQM_FS_COMM_EXEC_HOLD 0x2
+
+#define IQM_FS_COMM_MB__A 0x1820002
+#define IQM_FS_COMM_MB__W 2
+#define IQM_FS_COMM_MB__M 0x3
+#define IQM_FS_COMM_MB__PRE 0x0
+#define IQM_FS_COMM_MB_CTL__B 0
+#define IQM_FS_COMM_MB_CTL__W 1
+#define IQM_FS_COMM_MB_CTL__M 0x1
+#define IQM_FS_COMM_MB_CTL__PRE 0x0
+#define IQM_FS_COMM_MB_CTL_CTL_OFF 0x0
+#define IQM_FS_COMM_MB_CTL_CTL_ON 0x1
+#define IQM_FS_COMM_MB_OBS__B 1
+#define IQM_FS_COMM_MB_OBS__W 1
+#define IQM_FS_COMM_MB_OBS__M 0x2
+#define IQM_FS_COMM_MB_OBS__PRE 0x0
+#define IQM_FS_COMM_MB_OBS_OBS_OFF 0x0
+#define IQM_FS_COMM_MB_OBS_OBS_ON 0x2
+
+#define IQM_FS_RATE_OFS_LO__A 0x1820010
+#define IQM_FS_RATE_OFS_LO__W 16
+#define IQM_FS_RATE_OFS_LO__M 0xFFFF
+#define IQM_FS_RATE_OFS_LO__PRE 0x0
+#define IQM_FS_RATE_OFS_HI__A 0x1820011
+#define IQM_FS_RATE_OFS_HI__W 12
+#define IQM_FS_RATE_OFS_HI__M 0xFFF
+#define IQM_FS_RATE_OFS_HI__PRE 0x0
+#define IQM_FS_RATE_LO__A 0x1820012
+#define IQM_FS_RATE_LO__W 16
+#define IQM_FS_RATE_LO__M 0xFFFF
+#define IQM_FS_RATE_LO__PRE 0x0
+#define IQM_FS_RATE_HI__A 0x1820013
+#define IQM_FS_RATE_HI__W 12
+#define IQM_FS_RATE_HI__M 0xFFF
+#define IQM_FS_RATE_HI__PRE 0x0
+
+#define IQM_FS_ADJ_SEL__A 0x1820014
+#define IQM_FS_ADJ_SEL__W 2
+#define IQM_FS_ADJ_SEL__M 0x3
+#define IQM_FS_ADJ_SEL__PRE 0x0
+#define IQM_FS_ADJ_SEL_OFF 0x0
+#define IQM_FS_ADJ_SEL_QAM 0x1
+#define IQM_FS_ADJ_SEL_VSB 0x2
+
+#define IQM_FD_COMM_EXEC__A 0x1830000
+#define IQM_FD_COMM_EXEC__W 2
+#define IQM_FD_COMM_EXEC__M 0x3
+#define IQM_FD_COMM_EXEC__PRE 0x0
+#define IQM_FD_COMM_EXEC_STOP 0x0
+#define IQM_FD_COMM_EXEC_ACTIVE 0x1
+#define IQM_FD_COMM_EXEC_HOLD 0x2
+
+#define IQM_FD_COMM_MB__A 0x1830002
+#define IQM_FD_COMM_MB__W 2
+#define IQM_FD_COMM_MB__M 0x3
+#define IQM_FD_COMM_MB__PRE 0x0
+#define IQM_FD_COMM_MB_CTL__B 0
+#define IQM_FD_COMM_MB_CTL__W 1
+#define IQM_FD_COMM_MB_CTL__M 0x1
+#define IQM_FD_COMM_MB_CTL__PRE 0x0
+#define IQM_FD_COMM_MB_CTL_CTL_OFF 0x0
+#define IQM_FD_COMM_MB_CTL_CTL_ON 0x1
+#define IQM_FD_COMM_MB_OBS__B 1
+#define IQM_FD_COMM_MB_OBS__W 1
+#define IQM_FD_COMM_MB_OBS__M 0x2
+#define IQM_FD_COMM_MB_OBS__PRE 0x0
+#define IQM_FD_COMM_MB_OBS_OBS_OFF 0x0
+#define IQM_FD_COMM_MB_OBS_OBS_ON 0x2
+
+#define IQM_RC_COMM_EXEC__A 0x1840000
+#define IQM_RC_COMM_EXEC__W 2
+#define IQM_RC_COMM_EXEC__M 0x3
+#define IQM_RC_COMM_EXEC__PRE 0x0
+#define IQM_RC_COMM_EXEC_STOP 0x0
+#define IQM_RC_COMM_EXEC_ACTIVE 0x1
+#define IQM_RC_COMM_EXEC_HOLD 0x2
+
+#define IQM_RC_COMM_MB__A 0x1840002
+#define IQM_RC_COMM_MB__W 2
+#define IQM_RC_COMM_MB__M 0x3
+#define IQM_RC_COMM_MB__PRE 0x0
+#define IQM_RC_COMM_MB_CTL__B 0
+#define IQM_RC_COMM_MB_CTL__W 1
+#define IQM_RC_COMM_MB_CTL__M 0x1
+#define IQM_RC_COMM_MB_CTL__PRE 0x0
+#define IQM_RC_COMM_MB_CTL_CTL_OFF 0x0
+#define IQM_RC_COMM_MB_CTL_CTL_ON 0x1
+#define IQM_RC_COMM_MB_OBS__B 1
+#define IQM_RC_COMM_MB_OBS__W 1
+#define IQM_RC_COMM_MB_OBS__M 0x2
+#define IQM_RC_COMM_MB_OBS__PRE 0x0
+#define IQM_RC_COMM_MB_OBS_OBS_OFF 0x0
+#define IQM_RC_COMM_MB_OBS_OBS_ON 0x2
+
+#define IQM_RC_RATE_OFS_LO__A 0x1840010
+#define IQM_RC_RATE_OFS_LO__W 16
+#define IQM_RC_RATE_OFS_LO__M 0xFFFF
+#define IQM_RC_RATE_OFS_LO__PRE 0x0
+#define IQM_RC_RATE_OFS_HI__A 0x1840011
+#define IQM_RC_RATE_OFS_HI__W 8
+#define IQM_RC_RATE_OFS_HI__M 0xFF
+#define IQM_RC_RATE_OFS_HI__PRE 0x0
+#define IQM_RC_RATE_LO__A 0x1840012
+#define IQM_RC_RATE_LO__W 16
+#define IQM_RC_RATE_LO__M 0xFFFF
+#define IQM_RC_RATE_LO__PRE 0x0
+#define IQM_RC_RATE_HI__A 0x1840013
+#define IQM_RC_RATE_HI__W 8
+#define IQM_RC_RATE_HI__M 0xFF
+#define IQM_RC_RATE_HI__PRE 0x0
+
+#define IQM_RC_ADJ_SEL__A 0x1840014
+#define IQM_RC_ADJ_SEL__W 2
+#define IQM_RC_ADJ_SEL__M 0x3
+#define IQM_RC_ADJ_SEL__PRE 0x0
+#define IQM_RC_ADJ_SEL_OFF 0x0
+#define IQM_RC_ADJ_SEL_QAM 0x1
+#define IQM_RC_ADJ_SEL_VSB 0x2
+
+#define IQM_RC_CROUT_ENA__A 0x1840015
+#define IQM_RC_CROUT_ENA__W 1
+#define IQM_RC_CROUT_ENA__M 0x1
+#define IQM_RC_CROUT_ENA__PRE 0x0
+
+#define IQM_RC_CROUT_ENA_ENA__B 0
+#define IQM_RC_CROUT_ENA_ENA__W 1
+#define IQM_RC_CROUT_ENA_ENA__M 0x1
+#define IQM_RC_CROUT_ENA_ENA__PRE 0x0
+
+#define IQM_RC_STRETCH__A 0x1840016
+#define IQM_RC_STRETCH__W 5
+#define IQM_RC_STRETCH__M 0x1F
+#define IQM_RC_STRETCH__PRE 0x0
+#define IQM_RC_STRETCH_QAM_B_64 0x1E
+#define IQM_RC_STRETCH_QAM_B_256 0x1C
+#define IQM_RC_STRETCH_ATV 0xF
+
+#define IQM_RT_COMM_EXEC__A 0x1850000
+#define IQM_RT_COMM_EXEC__W 2
+#define IQM_RT_COMM_EXEC__M 0x3
+#define IQM_RT_COMM_EXEC__PRE 0x0
+#define IQM_RT_COMM_EXEC_STOP 0x0
+#define IQM_RT_COMM_EXEC_ACTIVE 0x1
+#define IQM_RT_COMM_EXEC_HOLD 0x2
+
+#define IQM_RT_COMM_MB__A 0x1850002
+#define IQM_RT_COMM_MB__W 2
+#define IQM_RT_COMM_MB__M 0x3
+#define IQM_RT_COMM_MB__PRE 0x0
+#define IQM_RT_COMM_MB_CTL__B 0
+#define IQM_RT_COMM_MB_CTL__W 1
+#define IQM_RT_COMM_MB_CTL__M 0x1
+#define IQM_RT_COMM_MB_CTL__PRE 0x0
+#define IQM_RT_COMM_MB_CTL_CTL_OFF 0x0
+#define IQM_RT_COMM_MB_CTL_CTL_ON 0x1
+#define IQM_RT_COMM_MB_OBS__B 1
+#define IQM_RT_COMM_MB_OBS__W 1
+#define IQM_RT_COMM_MB_OBS__M 0x2
+#define IQM_RT_COMM_MB_OBS__PRE 0x0
+#define IQM_RT_COMM_MB_OBS_OBS_OFF 0x0
+#define IQM_RT_COMM_MB_OBS_OBS_ON 0x2
+
+#define IQM_RT_ACTIVE__A 0x1850010
+#define IQM_RT_ACTIVE__W 2
+#define IQM_RT_ACTIVE__M 0x3
+#define IQM_RT_ACTIVE__PRE 0x0
+
+#define IQM_RT_ACTIVE_ACTIVE_RT__B 0
+#define IQM_RT_ACTIVE_ACTIVE_RT__W 1
+#define IQM_RT_ACTIVE_ACTIVE_RT__M 0x1
+#define IQM_RT_ACTIVE_ACTIVE_RT__PRE 0x0
+#define IQM_RT_ACTIVE_ACTIVE_RT_ATV_FCR_OFF 0x0
+#define IQM_RT_ACTIVE_ACTIVE_RT_ATV_FCR_ON 0x1
+
+#define IQM_RT_ACTIVE_ACTIVE_CR__B 1
+#define IQM_RT_ACTIVE_ACTIVE_CR__W 1
+#define IQM_RT_ACTIVE_ACTIVE_CR__M 0x2
+#define IQM_RT_ACTIVE_ACTIVE_CR__PRE 0x0
+#define IQM_RT_ACTIVE_ACTIVE_CR_ATV_CR_OFF 0x0
+#define IQM_RT_ACTIVE_ACTIVE_CR_ATV_CR_ON 0x2
+
+#define IQM_RT_LO_INCR__A 0x1850011
+#define IQM_RT_LO_INCR__W 12
+#define IQM_RT_LO_INCR__M 0xFFF
+#define IQM_RT_LO_INCR__PRE 0x588
+#define IQM_RT_LO_INCR_FM 0x0
+#define IQM_RT_LO_INCR_MN 0x588
+
+#define IQM_RT_ROT_BP__A 0x1850012
+#define IQM_RT_ROT_BP__W 2
+#define IQM_RT_ROT_BP__M 0x3
+#define IQM_RT_ROT_BP__PRE 0x0
+
+#define IQM_RT_ROT_BP_ROT_OFF__B 0
+#define IQM_RT_ROT_BP_ROT_OFF__W 1
+#define IQM_RT_ROT_BP_ROT_OFF__M 0x1
+#define IQM_RT_ROT_BP_ROT_OFF__PRE 0x0
+#define IQM_RT_ROT_BP_ROT_OFF_ACTIVE 0x0
+#define IQM_RT_ROT_BP_ROT_OFF_OFF 0x1
+
+#define IQM_RT_ROT_BP_ROT_BPF__B 1
+#define IQM_RT_ROT_BP_ROT_BPF__W 1
+#define IQM_RT_ROT_BP_ROT_BPF__M 0x2
+#define IQM_RT_ROT_BP_ROT_BPF__PRE 0x0
+
+#define IQM_RT_LP_BP__A 0x1850013
+#define IQM_RT_LP_BP__W 1
+#define IQM_RT_LP_BP__M 0x1
+#define IQM_RT_LP_BP__PRE 0x0
+
+#define IQM_RT_DELAY__A 0x1850014
+#define IQM_RT_DELAY__W 7
+#define IQM_RT_DELAY__M 0x7F
+#define IQM_RT_DELAY__PRE 0x45
+
+#define IQM_CF_COMM_EXEC__A 0x1860000
+#define IQM_CF_COMM_EXEC__W 2
+#define IQM_CF_COMM_EXEC__M 0x3
+#define IQM_CF_COMM_EXEC__PRE 0x0
+#define IQM_CF_COMM_EXEC_STOP 0x0
+#define IQM_CF_COMM_EXEC_ACTIVE 0x1
+#define IQM_CF_COMM_EXEC_HOLD 0x2
+
+#define IQM_CF_COMM_MB__A 0x1860002
+#define IQM_CF_COMM_MB__W 2
+#define IQM_CF_COMM_MB__M 0x3
+#define IQM_CF_COMM_MB__PRE 0x0
+#define IQM_CF_COMM_MB_CTL__B 0
+#define IQM_CF_COMM_MB_CTL__W 1
+#define IQM_CF_COMM_MB_CTL__M 0x1
+#define IQM_CF_COMM_MB_CTL__PRE 0x0
+#define IQM_CF_COMM_MB_CTL_CTL_OFF 0x0
+#define IQM_CF_COMM_MB_CTL_CTL_ON 0x1
+#define IQM_CF_COMM_MB_OBS__B 1
+#define IQM_CF_COMM_MB_OBS__W 1
+#define IQM_CF_COMM_MB_OBS__M 0x2
+#define IQM_CF_COMM_MB_OBS__PRE 0x0
+#define IQM_CF_COMM_MB_OBS_OBS_OFF 0x0
+#define IQM_CF_COMM_MB_OBS_OBS_ON 0x2
+
+#define IQM_CF_COMM_INT_REQ__A 0x1860003
+#define IQM_CF_COMM_INT_REQ__W 1
+#define IQM_CF_COMM_INT_REQ__M 0x1
+#define IQM_CF_COMM_INT_REQ__PRE 0x0
+#define IQM_CF_COMM_INT_STA__A 0x1860005
+#define IQM_CF_COMM_INT_STA__W 1
+#define IQM_CF_COMM_INT_STA__M 0x1
+#define IQM_CF_COMM_INT_STA__PRE 0x0
+#define IQM_CF_COMM_INT_STA_PM__B 0
+#define IQM_CF_COMM_INT_STA_PM__W 1
+#define IQM_CF_COMM_INT_STA_PM__M 0x1
+#define IQM_CF_COMM_INT_STA_PM__PRE 0x0
+
+#define IQM_CF_COMM_INT_MSK__A 0x1860006
+#define IQM_CF_COMM_INT_MSK__W 1
+#define IQM_CF_COMM_INT_MSK__M 0x1
+#define IQM_CF_COMM_INT_MSK__PRE 0x0
+#define IQM_CF_COMM_INT_MSK_PM__B 0
+#define IQM_CF_COMM_INT_MSK_PM__W 1
+#define IQM_CF_COMM_INT_MSK_PM__M 0x1
+#define IQM_CF_COMM_INT_MSK_PM__PRE 0x0
+
+#define IQM_CF_COMM_INT_STM__A 0x1860007
+#define IQM_CF_COMM_INT_STM__W 1
+#define IQM_CF_COMM_INT_STM__M 0x1
+#define IQM_CF_COMM_INT_STM__PRE 0x0
+#define IQM_CF_COMM_INT_STM_PM__B 0
+#define IQM_CF_COMM_INT_STM_PM__W 1
+#define IQM_CF_COMM_INT_STM_PM__M 0x1
+#define IQM_CF_COMM_INT_STM_PM__PRE 0x0
+
+#define IQM_CF_SYMMETRIC__A 0x1860010
+#define IQM_CF_SYMMETRIC__W 2
+#define IQM_CF_SYMMETRIC__M 0x3
+#define IQM_CF_SYMMETRIC__PRE 0x0
+
+#define IQM_CF_SYMMETRIC_RE__B 0
+#define IQM_CF_SYMMETRIC_RE__W 1
+#define IQM_CF_SYMMETRIC_RE__M 0x1
+#define IQM_CF_SYMMETRIC_RE__PRE 0x0
+
+#define IQM_CF_SYMMETRIC_IM__B 1
+#define IQM_CF_SYMMETRIC_IM__W 1
+#define IQM_CF_SYMMETRIC_IM__M 0x2
+#define IQM_CF_SYMMETRIC_IM__PRE 0x0
+
+#define IQM_CF_MIDTAP__A 0x1860011
+#define IQM_CF_MIDTAP__W 2
+#define IQM_CF_MIDTAP__M 0x3
+#define IQM_CF_MIDTAP__PRE 0x3
+
+#define IQM_CF_MIDTAP_RE__B 0
+#define IQM_CF_MIDTAP_RE__W 1
+#define IQM_CF_MIDTAP_RE__M 0x1
+#define IQM_CF_MIDTAP_RE__PRE 0x1
+
+#define IQM_CF_MIDTAP_IM__B 1
+#define IQM_CF_MIDTAP_IM__W 1
+#define IQM_CF_MIDTAP_IM__M 0x2
+#define IQM_CF_MIDTAP_IM__PRE 0x2
+
+#define IQM_CF_OUT_ENA__A 0x1860012
+#define IQM_CF_OUT_ENA__W 3
+#define IQM_CF_OUT_ENA__M 0x7
+#define IQM_CF_OUT_ENA__PRE 0x0
+
+#define IQM_CF_OUT_ENA_ATV__B 0
+#define IQM_CF_OUT_ENA_ATV__W 1
+#define IQM_CF_OUT_ENA_ATV__M 0x1
+#define IQM_CF_OUT_ENA_ATV__PRE 0x0
+
+#define IQM_CF_OUT_ENA_QAM__B 1
+#define IQM_CF_OUT_ENA_QAM__W 1
+#define IQM_CF_OUT_ENA_QAM__M 0x2
+#define IQM_CF_OUT_ENA_QAM__PRE 0x0
+
+#define IQM_CF_OUT_ENA_VSB__B 2
+#define IQM_CF_OUT_ENA_VSB__W 1
+#define IQM_CF_OUT_ENA_VSB__M 0x4
+#define IQM_CF_OUT_ENA_VSB__PRE 0x0
+
+#define IQM_CF_ADJ_SEL__A 0x1860013
+#define IQM_CF_ADJ_SEL__W 2
+#define IQM_CF_ADJ_SEL__M 0x3
+#define IQM_CF_ADJ_SEL__PRE 0x0
+#define IQM_CF_SCALE__A 0x1860014
+#define IQM_CF_SCALE__W 14
+#define IQM_CF_SCALE__M 0x3FFF
+#define IQM_CF_SCALE__PRE 0x400
+
+#define IQM_CF_SCALE_SH__A 0x1860015
+#define IQM_CF_SCALE_SH__W 2
+#define IQM_CF_SCALE_SH__M 0x3
+#define IQM_CF_SCALE_SH__PRE 0x0
+
+#define IQM_CF_AMP__A 0x1860016
+#define IQM_CF_AMP__W 14
+#define IQM_CF_AMP__M 0x3FFF
+#define IQM_CF_AMP__PRE 0x0
+
+#define IQM_CF_POW_MEAS_LEN__A 0x1860017
+#define IQM_CF_POW_MEAS_LEN__W 3
+#define IQM_CF_POW_MEAS_LEN__M 0x7
+#define IQM_CF_POW_MEAS_LEN__PRE 0x2
+#define IQM_CF_POW_MEAS_LEN_QAM_B_64 0x1
+#define IQM_CF_POW_MEAS_LEN_QAM_B_256 0x1
+
+#define IQM_CF_POW__A 0x1860018
+#define IQM_CF_POW__W 16
+#define IQM_CF_POW__M 0xFFFF
+#define IQM_CF_POW__PRE 0x2
+#define IQM_CF_TAP_RE0__A 0x1860020
+#define IQM_CF_TAP_RE0__W 7
+#define IQM_CF_TAP_RE0__M 0x7F
+#define IQM_CF_TAP_RE0__PRE 0x2
+#define IQM_CF_TAP_RE1__A 0x1860021
+#define IQM_CF_TAP_RE1__W 7
+#define IQM_CF_TAP_RE1__M 0x7F
+#define IQM_CF_TAP_RE1__PRE 0x2
+#define IQM_CF_TAP_RE2__A 0x1860022
+#define IQM_CF_TAP_RE2__W 7
+#define IQM_CF_TAP_RE2__M 0x7F
+#define IQM_CF_TAP_RE2__PRE 0x2
+#define IQM_CF_TAP_RE3__A 0x1860023
+#define IQM_CF_TAP_RE3__W 7
+#define IQM_CF_TAP_RE3__M 0x7F
+#define IQM_CF_TAP_RE3__PRE 0x2
+#define IQM_CF_TAP_RE4__A 0x1860024
+#define IQM_CF_TAP_RE4__W 7
+#define IQM_CF_TAP_RE4__M 0x7F
+#define IQM_CF_TAP_RE4__PRE 0x2
+#define IQM_CF_TAP_RE5__A 0x1860025
+#define IQM_CF_TAP_RE5__W 7
+#define IQM_CF_TAP_RE5__M 0x7F
+#define IQM_CF_TAP_RE5__PRE 0x2
+#define IQM_CF_TAP_RE6__A 0x1860026
+#define IQM_CF_TAP_RE6__W 7
+#define IQM_CF_TAP_RE6__M 0x7F
+#define IQM_CF_TAP_RE6__PRE 0x2
+#define IQM_CF_TAP_RE7__A 0x1860027
+#define IQM_CF_TAP_RE7__W 9
+#define IQM_CF_TAP_RE7__M 0x1FF
+#define IQM_CF_TAP_RE7__PRE 0x2
+#define IQM_CF_TAP_RE8__A 0x1860028
+#define IQM_CF_TAP_RE8__W 9
+#define IQM_CF_TAP_RE8__M 0x1FF
+#define IQM_CF_TAP_RE8__PRE 0x2
+#define IQM_CF_TAP_RE9__A 0x1860029
+#define IQM_CF_TAP_RE9__W 9
+#define IQM_CF_TAP_RE9__M 0x1FF
+#define IQM_CF_TAP_RE9__PRE 0x2
+#define IQM_CF_TAP_RE10__A 0x186002A
+#define IQM_CF_TAP_RE10__W 9
+#define IQM_CF_TAP_RE10__M 0x1FF
+#define IQM_CF_TAP_RE10__PRE 0x2
+#define IQM_CF_TAP_RE11__A 0x186002B
+#define IQM_CF_TAP_RE11__W 9
+#define IQM_CF_TAP_RE11__M 0x1FF
+#define IQM_CF_TAP_RE11__PRE 0x2
+#define IQM_CF_TAP_RE12__A 0x186002C
+#define IQM_CF_TAP_RE12__W 9
+#define IQM_CF_TAP_RE12__M 0x1FF
+#define IQM_CF_TAP_RE12__PRE 0x2
+#define IQM_CF_TAP_RE13__A 0x186002D
+#define IQM_CF_TAP_RE13__W 9
+#define IQM_CF_TAP_RE13__M 0x1FF
+#define IQM_CF_TAP_RE13__PRE 0x2
+#define IQM_CF_TAP_RE14__A 0x186002E
+#define IQM_CF_TAP_RE14__W 9
+#define IQM_CF_TAP_RE14__M 0x1FF
+#define IQM_CF_TAP_RE14__PRE 0x2
+#define IQM_CF_TAP_RE15__A 0x186002F
+#define IQM_CF_TAP_RE15__W 9
+#define IQM_CF_TAP_RE15__M 0x1FF
+#define IQM_CF_TAP_RE15__PRE 0x2
+#define IQM_CF_TAP_RE16__A 0x1860030
+#define IQM_CF_TAP_RE16__W 9
+#define IQM_CF_TAP_RE16__M 0x1FF
+#define IQM_CF_TAP_RE16__PRE 0x2
+#define IQM_CF_TAP_RE17__A 0x1860031
+#define IQM_CF_TAP_RE17__W 9
+#define IQM_CF_TAP_RE17__M 0x1FF
+#define IQM_CF_TAP_RE17__PRE 0x2
+#define IQM_CF_TAP_RE18__A 0x1860032
+#define IQM_CF_TAP_RE18__W 9
+#define IQM_CF_TAP_RE18__M 0x1FF
+#define IQM_CF_TAP_RE18__PRE 0x2
+#define IQM_CF_TAP_RE19__A 0x1860033
+#define IQM_CF_TAP_RE19__W 9
+#define IQM_CF_TAP_RE19__M 0x1FF
+#define IQM_CF_TAP_RE19__PRE 0x2
+#define IQM_CF_TAP_RE20__A 0x1860034
+#define IQM_CF_TAP_RE20__W 9
+#define IQM_CF_TAP_RE20__M 0x1FF
+#define IQM_CF_TAP_RE20__PRE 0x2
+#define IQM_CF_TAP_RE21__A 0x1860035
+#define IQM_CF_TAP_RE21__W 11
+#define IQM_CF_TAP_RE21__M 0x7FF
+#define IQM_CF_TAP_RE21__PRE 0x2
+#define IQM_CF_TAP_RE22__A 0x1860036
+#define IQM_CF_TAP_RE22__W 11
+#define IQM_CF_TAP_RE22__M 0x7FF
+#define IQM_CF_TAP_RE22__PRE 0x2
+#define IQM_CF_TAP_RE23__A 0x1860037
+#define IQM_CF_TAP_RE23__W 11
+#define IQM_CF_TAP_RE23__M 0x7FF
+#define IQM_CF_TAP_RE23__PRE 0x2
+#define IQM_CF_TAP_RE24__A 0x1860038
+#define IQM_CF_TAP_RE24__W 11
+#define IQM_CF_TAP_RE24__M 0x7FF
+#define IQM_CF_TAP_RE24__PRE 0x2
+#define IQM_CF_TAP_RE25__A 0x1860039
+#define IQM_CF_TAP_RE25__W 11
+#define IQM_CF_TAP_RE25__M 0x7FF
+#define IQM_CF_TAP_RE25__PRE 0x2
+#define IQM_CF_TAP_RE26__A 0x186003A
+#define IQM_CF_TAP_RE26__W 11
+#define IQM_CF_TAP_RE26__M 0x7FF
+#define IQM_CF_TAP_RE26__PRE 0x2
+#define IQM_CF_TAP_RE27__A 0x186003B
+#define IQM_CF_TAP_RE27__W 11
+#define IQM_CF_TAP_RE27__M 0x7FF
+#define IQM_CF_TAP_RE27__PRE 0x2
+#define IQM_CF_TAP_IM0__A 0x1860040
+#define IQM_CF_TAP_IM0__W 7
+#define IQM_CF_TAP_IM0__M 0x7F
+#define IQM_CF_TAP_IM0__PRE 0x2
+#define IQM_CF_TAP_IM1__A 0x1860041
+#define IQM_CF_TAP_IM1__W 7
+#define IQM_CF_TAP_IM1__M 0x7F
+#define IQM_CF_TAP_IM1__PRE 0x2
+#define IQM_CF_TAP_IM2__A 0x1860042
+#define IQM_CF_TAP_IM2__W 7
+#define IQM_CF_TAP_IM2__M 0x7F
+#define IQM_CF_TAP_IM2__PRE 0x2
+#define IQM_CF_TAP_IM3__A 0x1860043
+#define IQM_CF_TAP_IM3__W 7
+#define IQM_CF_TAP_IM3__M 0x7F
+#define IQM_CF_TAP_IM3__PRE 0x2
+#define IQM_CF_TAP_IM4__A 0x1860044
+#define IQM_CF_TAP_IM4__W 7
+#define IQM_CF_TAP_IM4__M 0x7F
+#define IQM_CF_TAP_IM4__PRE 0x2
+#define IQM_CF_TAP_IM5__A 0x1860045
+#define IQM_CF_TAP_IM5__W 7
+#define IQM_CF_TAP_IM5__M 0x7F
+#define IQM_CF_TAP_IM5__PRE 0x2
+#define IQM_CF_TAP_IM6__A 0x1860046
+#define IQM_CF_TAP_IM6__W 7
+#define IQM_CF_TAP_IM6__M 0x7F
+#define IQM_CF_TAP_IM6__PRE 0x2
+#define IQM_CF_TAP_IM7__A 0x1860047
+#define IQM_CF_TAP_IM7__W 9
+#define IQM_CF_TAP_IM7__M 0x1FF
+#define IQM_CF_TAP_IM7__PRE 0x2
+#define IQM_CF_TAP_IM8__A 0x1860048
+#define IQM_CF_TAP_IM8__W 9
+#define IQM_CF_TAP_IM8__M 0x1FF
+#define IQM_CF_TAP_IM8__PRE 0x2
+#define IQM_CF_TAP_IM9__A 0x1860049
+#define IQM_CF_TAP_IM9__W 9
+#define IQM_CF_TAP_IM9__M 0x1FF
+#define IQM_CF_TAP_IM9__PRE 0x2
+#define IQM_CF_TAP_IM10__A 0x186004A
+#define IQM_CF_TAP_IM10__W 9
+#define IQM_CF_TAP_IM10__M 0x1FF
+#define IQM_CF_TAP_IM10__PRE 0x2
+#define IQM_CF_TAP_IM11__A 0x186004B
+#define IQM_CF_TAP_IM11__W 9
+#define IQM_CF_TAP_IM11__M 0x1FF
+#define IQM_CF_TAP_IM11__PRE 0x2
+#define IQM_CF_TAP_IM12__A 0x186004C
+#define IQM_CF_TAP_IM12__W 9
+#define IQM_CF_TAP_IM12__M 0x1FF
+#define IQM_CF_TAP_IM12__PRE 0x2
+#define IQM_CF_TAP_IM13__A 0x186004D
+#define IQM_CF_TAP_IM13__W 9
+#define IQM_CF_TAP_IM13__M 0x1FF
+#define IQM_CF_TAP_IM13__PRE 0x2
+#define IQM_CF_TAP_IM14__A 0x186004E
+#define IQM_CF_TAP_IM14__W 9
+#define IQM_CF_TAP_IM14__M 0x1FF
+#define IQM_CF_TAP_IM14__PRE 0x2
+#define IQM_CF_TAP_IM15__A 0x186004F
+#define IQM_CF_TAP_IM15__W 9
+#define IQM_CF_TAP_IM15__M 0x1FF
+#define IQM_CF_TAP_IM15__PRE 0x2
+#define IQM_CF_TAP_IM16__A 0x1860050
+#define IQM_CF_TAP_IM16__W 9
+#define IQM_CF_TAP_IM16__M 0x1FF
+#define IQM_CF_TAP_IM16__PRE 0x2
+#define IQM_CF_TAP_IM17__A 0x1860051
+#define IQM_CF_TAP_IM17__W 9
+#define IQM_CF_TAP_IM17__M 0x1FF
+#define IQM_CF_TAP_IM17__PRE 0x2
+#define IQM_CF_TAP_IM18__A 0x1860052
+#define IQM_CF_TAP_IM18__W 9
+#define IQM_CF_TAP_IM18__M 0x1FF
+#define IQM_CF_TAP_IM18__PRE 0x2
+#define IQM_CF_TAP_IM19__A 0x1860053
+#define IQM_CF_TAP_IM19__W 9
+#define IQM_CF_TAP_IM19__M 0x1FF
+#define IQM_CF_TAP_IM19__PRE 0x2
+#define IQM_CF_TAP_IM20__A 0x1860054
+#define IQM_CF_TAP_IM20__W 9
+#define IQM_CF_TAP_IM20__M 0x1FF
+#define IQM_CF_TAP_IM20__PRE 0x2
+#define IQM_CF_TAP_IM21__A 0x1860055
+#define IQM_CF_TAP_IM21__W 11
+#define IQM_CF_TAP_IM21__M 0x7FF
+#define IQM_CF_TAP_IM21__PRE 0x2
+#define IQM_CF_TAP_IM22__A 0x1860056
+#define IQM_CF_TAP_IM22__W 11
+#define IQM_CF_TAP_IM22__M 0x7FF
+#define IQM_CF_TAP_IM22__PRE 0x2
+#define IQM_CF_TAP_IM23__A 0x1860057
+#define IQM_CF_TAP_IM23__W 11
+#define IQM_CF_TAP_IM23__M 0x7FF
+#define IQM_CF_TAP_IM23__PRE 0x2
+#define IQM_CF_TAP_IM24__A 0x1860058
+#define IQM_CF_TAP_IM24__W 11
+#define IQM_CF_TAP_IM24__M 0x7FF
+#define IQM_CF_TAP_IM24__PRE 0x2
+#define IQM_CF_TAP_IM25__A 0x1860059
+#define IQM_CF_TAP_IM25__W 11
+#define IQM_CF_TAP_IM25__M 0x7FF
+#define IQM_CF_TAP_IM25__PRE 0x2
+#define IQM_CF_TAP_IM26__A 0x186005A
+#define IQM_CF_TAP_IM26__W 11
+#define IQM_CF_TAP_IM26__M 0x7FF
+#define IQM_CF_TAP_IM26__PRE 0x2
+#define IQM_CF_TAP_IM27__A 0x186005B
+#define IQM_CF_TAP_IM27__W 11
+#define IQM_CF_TAP_IM27__M 0x7FF
+#define IQM_CF_TAP_IM27__PRE 0x2
+
+#define IQM_AF_COMM_EXEC__A 0x1870000
+#define IQM_AF_COMM_EXEC__W 2
+#define IQM_AF_COMM_EXEC__M 0x3
+#define IQM_AF_COMM_EXEC__PRE 0x0
+#define IQM_AF_COMM_EXEC_STOP 0x0
+#define IQM_AF_COMM_EXEC_ACTIVE 0x1
+#define IQM_AF_COMM_EXEC_HOLD 0x2
+
+#define IQM_AF_COMM_MB__A 0x1870002
+#define IQM_AF_COMM_MB__W 8
+#define IQM_AF_COMM_MB__M 0xFF
+#define IQM_AF_COMM_MB__PRE 0x0
+#define IQM_AF_COMM_MB_CTL__B 0
+#define IQM_AF_COMM_MB_CTL__W 1
+#define IQM_AF_COMM_MB_CTL__M 0x1
+#define IQM_AF_COMM_MB_CTL__PRE 0x0
+#define IQM_AF_COMM_MB_CTL_CTL_OFF 0x0
+#define IQM_AF_COMM_MB_CTL_CTL_ON 0x1
+#define IQM_AF_COMM_MB_OBS__B 1
+#define IQM_AF_COMM_MB_OBS__W 1
+#define IQM_AF_COMM_MB_OBS__M 0x2
+#define IQM_AF_COMM_MB_OBS__PRE 0x0
+#define IQM_AF_COMM_MB_OBS_OBS_OFF 0x0
+#define IQM_AF_COMM_MB_OBS_OBS_ON 0x2
+#define IQM_AF_COMM_MB_MUX_CTRL__B 2
+#define IQM_AF_COMM_MB_MUX_CTRL__W 3
+#define IQM_AF_COMM_MB_MUX_CTRL__M 0x1C
+#define IQM_AF_COMM_MB_MUX_CTRL__PRE 0x0
+#define IQM_AF_COMM_MB_MUX_CTRL_AF_DATA_INPUT 0x0
+#define IQM_AF_COMM_MB_MUX_CTRL_SENSE_INPUT 0x4
+#define IQM_AF_COMM_MB_MUX_CTRL_AF_DATA_OUTPUT 0x8
+#define IQM_AF_COMM_MB_MUX_CTRL_IF_AGC_OUTPUT 0xC
+#define IQM_AF_COMM_MB_MUX_CTRL_RF_AGC_OUTPUT 0x10
+#define IQM_AF_COMM_MB_MUX_OBS__B 5
+#define IQM_AF_COMM_MB_MUX_OBS__W 3
+#define IQM_AF_COMM_MB_MUX_OBS__M 0xE0
+#define IQM_AF_COMM_MB_MUX_OBS__PRE 0x0
+#define IQM_AF_COMM_MB_MUX_OBS_AF_DATA_INPUT 0x0
+#define IQM_AF_COMM_MB_MUX_OBS_SENSE_INPUT 0x20
+#define IQM_AF_COMM_MB_MUX_OBS_AF_DATA_OUTPUT 0x40
+#define IQM_AF_COMM_MB_MUX_OBS_IF_AGC_OUTPUT 0x60
+#define IQM_AF_COMM_MB_MUX_OBS_RF_AGC_OUTPUT 0x80
+
+#define IQM_AF_COMM_INT_REQ__A 0x1870003
+#define IQM_AF_COMM_INT_REQ__W 1
+#define IQM_AF_COMM_INT_REQ__M 0x1
+#define IQM_AF_COMM_INT_REQ__PRE 0x0
+#define IQM_AF_COMM_INT_STA__A 0x1870005
+#define IQM_AF_COMM_INT_STA__W 2
+#define IQM_AF_COMM_INT_STA__M 0x3
+#define IQM_AF_COMM_INT_STA__PRE 0x0
+#define IQM_AF_COMM_INT_STA_CLP_INT_STA__B 0
+#define IQM_AF_COMM_INT_STA_CLP_INT_STA__W 1
+#define IQM_AF_COMM_INT_STA_CLP_INT_STA__M 0x1
+#define IQM_AF_COMM_INT_STA_CLP_INT_STA__PRE 0x0
+#define IQM_AF_COMM_INT_STA_SNS_INT_STA__B 1
+#define IQM_AF_COMM_INT_STA_SNS_INT_STA__W 1
+#define IQM_AF_COMM_INT_STA_SNS_INT_STA__M 0x2
+#define IQM_AF_COMM_INT_STA_SNS_INT_STA__PRE 0x0
+
+#define IQM_AF_COMM_INT_MSK__A 0x1870006
+#define IQM_AF_COMM_INT_MSK__W 2
+#define IQM_AF_COMM_INT_MSK__M 0x3
+#define IQM_AF_COMM_INT_MSK__PRE 0x0
+#define IQM_AF_COMM_INT_MSK_CLP_INT_MSK__B 0
+#define IQM_AF_COMM_INT_MSK_CLP_INT_MSK__W 1
+#define IQM_AF_COMM_INT_MSK_CLP_INT_MSK__M 0x1
+#define IQM_AF_COMM_INT_MSK_CLP_INT_MSK__PRE 0x0
+#define IQM_AF_COMM_INT_MSK_SNS_INT_MSK__B 1
+#define IQM_AF_COMM_INT_MSK_SNS_INT_MSK__W 1
+#define IQM_AF_COMM_INT_MSK_SNS_INT_MSK__M 0x2
+#define IQM_AF_COMM_INT_MSK_SNS_INT_MSK__PRE 0x0
+
+#define IQM_AF_COMM_INT_STM__A 0x1870007
+#define IQM_AF_COMM_INT_STM__W 2
+#define IQM_AF_COMM_INT_STM__M 0x3
+#define IQM_AF_COMM_INT_STM__PRE 0x0
+#define IQM_AF_COMM_INT_STM_CLP_INT_STA__B 0
+#define IQM_AF_COMM_INT_STM_CLP_INT_STA__W 1
+#define IQM_AF_COMM_INT_STM_CLP_INT_STA__M 0x1
+#define IQM_AF_COMM_INT_STM_CLP_INT_STA__PRE 0x0
+#define IQM_AF_COMM_INT_STM_SNS_INT_STA__B 1
+#define IQM_AF_COMM_INT_STM_SNS_INT_STA__W 1
+#define IQM_AF_COMM_INT_STM_SNS_INT_STA__M 0x2
+#define IQM_AF_COMM_INT_STM_SNS_INT_STA__PRE 0x0
+
+#define IQM_AF_FDB_SEL__A 0x1870010
+#define IQM_AF_FDB_SEL__W 1
+#define IQM_AF_FDB_SEL__M 0x1
+#define IQM_AF_FDB_SEL__PRE 0x0
+
+#define IQM_AF_INVEXT__A 0x1870011
+#define IQM_AF_INVEXT__W 1
+#define IQM_AF_INVEXT__M 0x1
+#define IQM_AF_INVEXT__PRE 0x0
+#define IQM_AF_CLKNEG__A 0x1870012
+#define IQM_AF_CLKNEG__W 2
+#define IQM_AF_CLKNEG__M 0x3
+#define IQM_AF_CLKNEG__PRE 0x0
+
+#define IQM_AF_CLKNEG_CLKNEGPEAK__B 0
+#define IQM_AF_CLKNEG_CLKNEGPEAK__W 1
+#define IQM_AF_CLKNEG_CLKNEGPEAK__M 0x1
+#define IQM_AF_CLKNEG_CLKNEGPEAK__PRE 0x0
+#define IQM_AF_CLKNEG_CLKNEGPEAK_CLK_ADC_PEAK_POS 0x0
+#define IQM_AF_CLKNEG_CLKNEGPEAK_CLK_ADC_PEAK_NEG 0x1
+
+#define IQM_AF_CLKNEG_CLKNEGDATA__B 1
+#define IQM_AF_CLKNEG_CLKNEGDATA__W 1
+#define IQM_AF_CLKNEG_CLKNEGDATA__M 0x2
+#define IQM_AF_CLKNEG_CLKNEGDATA__PRE 0x0
+#define IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS 0x0
+#define IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_NEG 0x2
+
+#define IQM_AF_MON_IN_MUX__A 0x1870013
+#define IQM_AF_MON_IN_MUX__W 2
+#define IQM_AF_MON_IN_MUX__M 0x3
+#define IQM_AF_MON_IN_MUX__PRE 0x0
+
+#define IQM_AF_MON_IN5__A 0x1870014
+#define IQM_AF_MON_IN5__W 10
+#define IQM_AF_MON_IN5__M 0x3FF
+#define IQM_AF_MON_IN5__PRE 0x0
+
+#define IQM_AF_MON_IN4__A 0x1870015
+#define IQM_AF_MON_IN4__W 10
+#define IQM_AF_MON_IN4__M 0x3FF
+#define IQM_AF_MON_IN4__PRE 0x0
+
+#define IQM_AF_MON_IN3__A 0x1870016
+#define IQM_AF_MON_IN3__W 10
+#define IQM_AF_MON_IN3__M 0x3FF
+#define IQM_AF_MON_IN3__PRE 0x0
+
+#define IQM_AF_MON_IN2__A 0x1870017
+#define IQM_AF_MON_IN2__W 10
+#define IQM_AF_MON_IN2__M 0x3FF
+#define IQM_AF_MON_IN2__PRE 0x0
+
+#define IQM_AF_MON_IN1__A 0x1870018
+#define IQM_AF_MON_IN1__W 10
+#define IQM_AF_MON_IN1__M 0x3FF
+#define IQM_AF_MON_IN1__PRE 0x0
+
+#define IQM_AF_MON_IN0__A 0x1870019
+#define IQM_AF_MON_IN0__W 10
+#define IQM_AF_MON_IN0__M 0x3FF
+#define IQM_AF_MON_IN0__PRE 0x0
+
+#define IQM_AF_MON_IN_VAL__A 0x187001A
+#define IQM_AF_MON_IN_VAL__W 1
+#define IQM_AF_MON_IN_VAL__M 0x1
+#define IQM_AF_MON_IN_VAL__PRE 0x0
+
+#define IQM_AF_START_LOCK__A 0x187001B
+#define IQM_AF_START_LOCK__W 1
+#define IQM_AF_START_LOCK__M 0x1
+#define IQM_AF_START_LOCK__PRE 0x0
+
+#define IQM_AF_PHASE0__A 0x187001C
+#define IQM_AF_PHASE0__W 7
+#define IQM_AF_PHASE0__M 0x7F
+#define IQM_AF_PHASE0__PRE 0x0
+
+#define IQM_AF_PHASE1__A 0x187001D
+#define IQM_AF_PHASE1__W 7
+#define IQM_AF_PHASE1__M 0x7F
+#define IQM_AF_PHASE1__PRE 0x0
+
+#define IQM_AF_PHASE2__A 0x187001E
+#define IQM_AF_PHASE2__W 7
+#define IQM_AF_PHASE2__M 0x7F
+#define IQM_AF_PHASE2__PRE 0x0
+
+#define IQM_AF_SCU_PHASE__A 0x187001F
+#define IQM_AF_SCU_PHASE__W 2
+#define IQM_AF_SCU_PHASE__M 0x3
+#define IQM_AF_SCU_PHASE__PRE 0x0
+
+#define IQM_AF_SYNC_SEL__A 0x1870020
+#define IQM_AF_SYNC_SEL__W 2
+#define IQM_AF_SYNC_SEL__M 0x3
+#define IQM_AF_SYNC_SEL__PRE 0x0
+#define IQM_AF_ADC_CONF__A 0x1870021
+#define IQM_AF_ADC_CONF__W 4
+#define IQM_AF_ADC_CONF__M 0xF
+#define IQM_AF_ADC_CONF__PRE 0x0
+
+#define IQM_AF_ADC_CONF_ADC_SIGN__B 0
+#define IQM_AF_ADC_CONF_ADC_SIGN__W 1
+#define IQM_AF_ADC_CONF_ADC_SIGN__M 0x1
+#define IQM_AF_ADC_CONF_ADC_SIGN__PRE 0x0
+#define IQM_AF_ADC_CONF_ADC_SIGN_ADC_SIGNED 0x0
+#define IQM_AF_ADC_CONF_ADC_SIGN_ADC_UNSIGNED 0x1
+
+#define IQM_AF_ADC_CONF_BITREVERSE_ADC__B 1
+#define IQM_AF_ADC_CONF_BITREVERSE_ADC__W 1
+#define IQM_AF_ADC_CONF_BITREVERSE_ADC__M 0x2
+#define IQM_AF_ADC_CONF_BITREVERSE_ADC__PRE 0x0
+#define IQM_AF_ADC_CONF_BITREVERSE_ADC_ADC_NORMAL 0x0
+#define IQM_AF_ADC_CONF_BITREVERSE_ADC_ADC_BITREVERSED 0x2
+
+#define IQM_AF_ADC_CONF_BITREVERSE_NSSI__B 2
+#define IQM_AF_ADC_CONF_BITREVERSE_NSSI__W 1
+#define IQM_AF_ADC_CONF_BITREVERSE_NSSI__M 0x4
+#define IQM_AF_ADC_CONF_BITREVERSE_NSSI__PRE 0x0
+#define IQM_AF_ADC_CONF_BITREVERSE_NSSI_IFAGC_DAC_NORMAL 0x0
+#define IQM_AF_ADC_CONF_BITREVERSE_NSSI_IFAGC_DAC_BITREVERSED 0x4
+
+#define IQM_AF_ADC_CONF_BITREVERSE_NSSR__B 3
+#define IQM_AF_ADC_CONF_BITREVERSE_NSSR__W 1
+#define IQM_AF_ADC_CONF_BITREVERSE_NSSR__M 0x8
+#define IQM_AF_ADC_CONF_BITREVERSE_NSSR__PRE 0x0
+#define IQM_AF_ADC_CONF_BITREVERSE_NSSR_RFAGC_DAC_NORMAL 0x0
+#define IQM_AF_ADC_CONF_BITREVERSE_NSSR_RFAGC_DAC_BITREVERSED 0x8
+
+#define IQM_AF_CLP_CLIP__A 0x1870022
+#define IQM_AF_CLP_CLIP__W 16
+#define IQM_AF_CLP_CLIP__M 0xFFFF
+#define IQM_AF_CLP_CLIP__PRE 0x0
+
+#define IQM_AF_CLP_LEN__A 0x1870023
+#define IQM_AF_CLP_LEN__W 16
+#define IQM_AF_CLP_LEN__M 0xFFFF
+#define IQM_AF_CLP_LEN__PRE 0x0
+#define IQM_AF_CLP_LEN_QAM_B_64 0x400
+#define IQM_AF_CLP_LEN_QAM_B_256 0x400
+#define IQM_AF_CLP_LEN_ATV 0x0
+
+#define IQM_AF_CLP_TH__A 0x1870024
+#define IQM_AF_CLP_TH__W 9
+#define IQM_AF_CLP_TH__M 0x1FF
+#define IQM_AF_CLP_TH__PRE 0x0
+#define IQM_AF_CLP_TH_QAM_B_64 0x80
+#define IQM_AF_CLP_TH_QAM_B_256 0x80
+#define IQM_AF_CLP_TH_ATV 0x1C0
+
+#define IQM_AF_DCF_BYPASS__A 0x1870025
+#define IQM_AF_DCF_BYPASS__W 1
+#define IQM_AF_DCF_BYPASS__M 0x1
+#define IQM_AF_DCF_BYPASS__PRE 0x0
+#define IQM_AF_DCF_BYPASS_ACTIVE 0x0
+#define IQM_AF_DCF_BYPASS_BYPASS 0x1
+
+#define IQM_AF_SNS_LEN__A 0x1870026
+#define IQM_AF_SNS_LEN__W 16
+#define IQM_AF_SNS_LEN__M 0xFFFF
+#define IQM_AF_SNS_LEN__PRE 0x0
+#define IQM_AF_SNS_LEN_QAM_B_64 0x400
+#define IQM_AF_SNS_LEN_QAM_B_256 0x400
+#define IQM_AF_SNS_LEN_ATV 0x0
+
+#define IQM_AF_SNS_SENSE__A 0x1870027
+#define IQM_AF_SNS_SENSE__W 16
+#define IQM_AF_SNS_SENSE__M 0xFFFF
+#define IQM_AF_SNS_SENSE__PRE 0x0
+
+#define IQM_AF_AGC_IF__A 0x1870028
+#define IQM_AF_AGC_IF__W 15
+#define IQM_AF_AGC_IF__M 0x7FFF
+#define IQM_AF_AGC_IF__PRE 0x0
+
+#define IQM_AF_AGC_RF__A 0x1870029
+#define IQM_AF_AGC_RF__W 15
+#define IQM_AF_AGC_RF__M 0x7FFF
+#define IQM_AF_AGC_RF__PRE 0x0
+
+#define IQM_AF_PGA_GAIN__A 0x187002A
+#define IQM_AF_PGA_GAIN__W 4
+#define IQM_AF_PGA_GAIN__M 0xF
+#define IQM_AF_PGA_GAIN__PRE 0x0
+
+#define IQM_AF_PDREF__A 0x187002B
+#define IQM_AF_PDREF__W 5
+#define IQM_AF_PDREF__M 0x1F
+#define IQM_AF_PDREF__PRE 0x0
+#define IQM_AF_PDREF_QAM_B_64 0xF
+#define IQM_AF_PDREF_QAM_B_256 0xF
+#define IQM_AF_PDREF_ATV 0xF
+
+#define IQM_AF_STDBY__A 0x187002C
+#define IQM_AF_STDBY__W 6
+#define IQM_AF_STDBY__M 0x3F
+#define IQM_AF_STDBY__PRE 0x0
+
+#define IQM_AF_STDBY_STDBY_BIAS__B 0
+#define IQM_AF_STDBY_STDBY_BIAS__W 1
+#define IQM_AF_STDBY_STDBY_BIAS__M 0x1
+#define IQM_AF_STDBY_STDBY_BIAS__PRE 0x0
+#define IQM_AF_STDBY_STDBY_BIAS_ACTIVE 0x0
+#define IQM_AF_STDBY_STDBY_BIAS_STANDBY 0x1
+
+#define IQM_AF_STDBY_STDBY_ADC__B 1
+#define IQM_AF_STDBY_STDBY_ADC__W 1
+#define IQM_AF_STDBY_STDBY_ADC__M 0x2
+#define IQM_AF_STDBY_STDBY_ADC__PRE 0x0
+#define IQM_AF_STDBY_STDBY_ADC_A1_ACTIVE 0x0
+#define IQM_AF_STDBY_STDBY_ADC_A1_STANDBY 0x2
+#define IQM_AF_STDBY_STDBY_ADC_A2_ACTIVE 0x2
+#define IQM_AF_STDBY_STDBY_ADC_A2_STANDBY 0x0
+
+#define IQM_AF_STDBY_STDBY_AMP__B 2
+#define IQM_AF_STDBY_STDBY_AMP__W 1
+#define IQM_AF_STDBY_STDBY_AMP__M 0x4
+#define IQM_AF_STDBY_STDBY_AMP__PRE 0x0
+#define IQM_AF_STDBY_STDBY_AMP_A1_ACTIVE 0x0
+#define IQM_AF_STDBY_STDBY_AMP_A1_STANDBY 0x4
+#define IQM_AF_STDBY_STDBY_AMP_A2_ACTIVE 0x4
+#define IQM_AF_STDBY_STDBY_AMP_A2_STANDBY 0x0
+
+#define IQM_AF_STDBY_STDBY_PD__B 3
+#define IQM_AF_STDBY_STDBY_PD__W 1
+#define IQM_AF_STDBY_STDBY_PD__M 0x8
+#define IQM_AF_STDBY_STDBY_PD__PRE 0x0
+#define IQM_AF_STDBY_STDBY_PD_A1_ACTIVE 0x0
+#define IQM_AF_STDBY_STDBY_PD_A1_STANDBY 0x8
+#define IQM_AF_STDBY_STDBY_PD_A2_ACTIVE 0x8
+#define IQM_AF_STDBY_STDBY_PD_A2_STANDBY 0x0
+
+#define IQM_AF_STDBY_STDBY_TAGC_IF__B 4
+#define IQM_AF_STDBY_STDBY_TAGC_IF__W 1
+#define IQM_AF_STDBY_STDBY_TAGC_IF__M 0x10
+#define IQM_AF_STDBY_STDBY_TAGC_IF__PRE 0x0
+#define IQM_AF_STDBY_STDBY_TAGC_IF_A1_ACTIVE 0x0
+#define IQM_AF_STDBY_STDBY_TAGC_IF_A1_STANDBY 0x10
+#define IQM_AF_STDBY_STDBY_TAGC_IF_A2_ACTIVE 0x10
+#define IQM_AF_STDBY_STDBY_TAGC_IF_A2_STANDBY 0x0
+
+#define IQM_AF_STDBY_STDBY_TAGC_RF__B 5
+#define IQM_AF_STDBY_STDBY_TAGC_RF__W 1
+#define IQM_AF_STDBY_STDBY_TAGC_RF__M 0x20
+#define IQM_AF_STDBY_STDBY_TAGC_RF__PRE 0x0
+#define IQM_AF_STDBY_STDBY_TAGC_RF_A1_ACTIVE 0x0
+#define IQM_AF_STDBY_STDBY_TAGC_RF_A1_STANDBY 0x20
+#define IQM_AF_STDBY_STDBY_TAGC_RF_A2_ACTIVE 0x20
+#define IQM_AF_STDBY_STDBY_TAGC_RF_A2_STANDBY 0x0
+
+#define IQM_AF_AMUX__A 0x187002D
+#define IQM_AF_AMUX__W 2
+#define IQM_AF_AMUX__M 0x3
+#define IQM_AF_AMUX__PRE 0x0
+
+#define IQM_AF_TST_AFEMAIN__A 0x187002E
+#define IQM_AF_TST_AFEMAIN__W 8
+#define IQM_AF_TST_AFEMAIN__M 0xFF
+#define IQM_AF_TST_AFEMAIN__PRE 0x0
+
+#define IQM_RT_RAM__A 0x1880000
+
+#define IQM_RT_RAM_DLY__B 0
+#define IQM_RT_RAM_DLY__W 13
+#define IQM_RT_RAM_DLY__M 0x1FFF
+#define IQM_RT_RAM_DLY__PRE 0x0
+
+#define ORX_COMM_EXEC__A 0x2000000
+#define ORX_COMM_EXEC__W 2
+#define ORX_COMM_EXEC__M 0x3
+#define ORX_COMM_EXEC__PRE 0x0
+#define ORX_COMM_EXEC_STOP 0x0
+#define ORX_COMM_EXEC_ACTIVE 0x1
+#define ORX_COMM_EXEC_HOLD 0x2
+
+#define ORX_COMM_STATE__A 0x2000001
+#define ORX_COMM_STATE__W 16
+#define ORX_COMM_STATE__M 0xFFFF
+#define ORX_COMM_STATE__PRE 0x0
+#define ORX_COMM_MB__A 0x2000002
+#define ORX_COMM_MB__W 16
+#define ORX_COMM_MB__M 0xFFFF
+#define ORX_COMM_MB__PRE 0x0
+#define ORX_COMM_INT_REQ__A 0x2000003
+#define ORX_COMM_INT_REQ__W 16
+#define ORX_COMM_INT_REQ__M 0xFFFF
+#define ORX_COMM_INT_REQ__PRE 0x0
+#define ORX_COMM_INT_REQ_EQU_REQ__B 0
+#define ORX_COMM_INT_REQ_EQU_REQ__W 1
+#define ORX_COMM_INT_REQ_EQU_REQ__M 0x1
+#define ORX_COMM_INT_REQ_EQU_REQ__PRE 0x0
+#define ORX_COMM_INT_REQ_DDC_REQ__B 1
+#define ORX_COMM_INT_REQ_DDC_REQ__W 1
+#define ORX_COMM_INT_REQ_DDC_REQ__M 0x2
+#define ORX_COMM_INT_REQ_DDC_REQ__PRE 0x0
+#define ORX_COMM_INT_REQ_FWP_REQ__B 2
+#define ORX_COMM_INT_REQ_FWP_REQ__W 1
+#define ORX_COMM_INT_REQ_FWP_REQ__M 0x4
+#define ORX_COMM_INT_REQ_FWP_REQ__PRE 0x0
+#define ORX_COMM_INT_REQ_CON_REQ__B 3
+#define ORX_COMM_INT_REQ_CON_REQ__W 1
+#define ORX_COMM_INT_REQ_CON_REQ__M 0x8
+#define ORX_COMM_INT_REQ_CON_REQ__PRE 0x0
+#define ORX_COMM_INT_REQ_NSU_REQ__B 4
+#define ORX_COMM_INT_REQ_NSU_REQ__W 1
+#define ORX_COMM_INT_REQ_NSU_REQ__M 0x10
+#define ORX_COMM_INT_REQ_NSU_REQ__PRE 0x0
+
+#define ORX_COMM_INT_STA__A 0x2000005
+#define ORX_COMM_INT_STA__W 16
+#define ORX_COMM_INT_STA__M 0xFFFF
+#define ORX_COMM_INT_STA__PRE 0x0
+#define ORX_COMM_INT_MSK__A 0x2000006
+#define ORX_COMM_INT_MSK__W 16
+#define ORX_COMM_INT_MSK__M 0xFFFF
+#define ORX_COMM_INT_MSK__PRE 0x0
+#define ORX_COMM_INT_STM__A 0x2000007
+#define ORX_COMM_INT_STM__W 16
+#define ORX_COMM_INT_STM__M 0xFFFF
+#define ORX_COMM_INT_STM__PRE 0x0
+
+#define ORX_TOP_COMM_EXEC__A 0x2010000
+#define ORX_TOP_COMM_EXEC__W 2
+#define ORX_TOP_COMM_EXEC__M 0x3
+#define ORX_TOP_COMM_EXEC__PRE 0x0
+#define ORX_TOP_COMM_EXEC_STOP 0x0
+#define ORX_TOP_COMM_EXEC_ACTIVE 0x1
+#define ORX_TOP_COMM_EXEC_HOLD 0x2
+
+#define ORX_TOP_COMM_KEY__A 0x201000F
+#define ORX_TOP_COMM_KEY__W 16
+#define ORX_TOP_COMM_KEY__M 0xFFFF
+#define ORX_TOP_COMM_KEY__PRE 0x0
+#define ORX_TOP_COMM_KEY_KEY 0xFABA
+
+#define ORX_TOP_MDE_W__A 0x2010010
+#define ORX_TOP_MDE_W__W 2
+#define ORX_TOP_MDE_W__M 0x3
+#define ORX_TOP_MDE_W__PRE 0x2
+#define ORX_TOP_MDE_W_RATE_1544KBPS 0x0
+#define ORX_TOP_MDE_W_RATE_3088KBPS 0x1
+#define ORX_TOP_MDE_W_RATE_2048KBPS_SQRT 0x2
+#define ORX_TOP_MDE_W_RATE_2048KBPS_RO 0x3
+
+#define ORX_TOP_AIF_CTRL_W__A 0x2010011
+#define ORX_TOP_AIF_CTRL_W__W 3
+#define ORX_TOP_AIF_CTRL_W__M 0x7
+#define ORX_TOP_AIF_CTRL_W__PRE 0x0
+#define ORX_TOP_AIF_CTRL_W_NEG_CLK_EDGE__B 0
+#define ORX_TOP_AIF_CTRL_W_NEG_CLK_EDGE__W 1
+#define ORX_TOP_AIF_CTRL_W_NEG_CLK_EDGE__M 0x1
+#define ORX_TOP_AIF_CTRL_W_NEG_CLK_EDGE__PRE 0x0
+#define ORX_TOP_AIF_CTRL_W_NEG_CLK_EDGE_ADC_SAMPL_ON_POS_CLK_EDGE 0x0
+#define ORX_TOP_AIF_CTRL_W_NEG_CLK_EDGE_ADC_SAMPL_ON_NEG_CLK_EDGE 0x1
+#define ORX_TOP_AIF_CTRL_W_BIT_REVERSE__B 1
+#define ORX_TOP_AIF_CTRL_W_BIT_REVERSE__W 1
+#define ORX_TOP_AIF_CTRL_W_BIT_REVERSE__M 0x2
+#define ORX_TOP_AIF_CTRL_W_BIT_REVERSE__PRE 0x0
+#define ORX_TOP_AIF_CTRL_W_BIT_REVERSE_REGULAR_BIT_ORDER_ADC 0x0
+#define ORX_TOP_AIF_CTRL_W_BIT_REVERSE_REVERSAL_BIT_ORDER_ADC 0x2
+#define ORX_TOP_AIF_CTRL_W_INV_MSB__B 2
+#define ORX_TOP_AIF_CTRL_W_INV_MSB__W 1
+#define ORX_TOP_AIF_CTRL_W_INV_MSB__M 0x4
+#define ORX_TOP_AIF_CTRL_W_INV_MSB__PRE 0x0
+#define ORX_TOP_AIF_CTRL_W_INV_MSB_NO_MSB_INVERSION_ADC 0x0
+#define ORX_TOP_AIF_CTRL_W_INV_MSB_MSB_INVERSION_ADC 0x4
+
+#define ORX_FWP_COMM_EXEC__A 0x2020000
+#define ORX_FWP_COMM_EXEC__W 2
+#define ORX_FWP_COMM_EXEC__M 0x3
+#define ORX_FWP_COMM_EXEC__PRE 0x0
+#define ORX_FWP_COMM_EXEC_STOP 0x0
+#define ORX_FWP_COMM_EXEC_ACTIVE 0x1
+#define ORX_FWP_COMM_EXEC_HOLD 0x2
+
+#define ORX_FWP_COMM_MB__A 0x2020002
+#define ORX_FWP_COMM_MB__W 8
+#define ORX_FWP_COMM_MB__M 0xFF
+#define ORX_FWP_COMM_MB__PRE 0x0
+#define ORX_FWP_COMM_MB_CTL__B 0
+#define ORX_FWP_COMM_MB_CTL__W 1
+#define ORX_FWP_COMM_MB_CTL__M 0x1
+#define ORX_FWP_COMM_MB_CTL__PRE 0x0
+#define ORX_FWP_COMM_MB_CTL_OFF 0x0
+#define ORX_FWP_COMM_MB_CTL_ON 0x1
+#define ORX_FWP_COMM_MB_OBS__B 1
+#define ORX_FWP_COMM_MB_OBS__W 1
+#define ORX_FWP_COMM_MB_OBS__M 0x2
+#define ORX_FWP_COMM_MB_OBS__PRE 0x0
+#define ORX_FWP_COMM_MB_OBS_OFF 0x0
+#define ORX_FWP_COMM_MB_OBS_ON 0x2
+
+#define ORX_FWP_COMM_MB_CTL_MUX__B 2
+#define ORX_FWP_COMM_MB_CTL_MUX__W 3
+#define ORX_FWP_COMM_MB_CTL_MUX__M 0x1C
+#define ORX_FWP_COMM_MB_CTL_MUX__PRE 0x0
+
+#define ORX_FWP_COMM_MB_OBS_MUX__B 5
+#define ORX_FWP_COMM_MB_OBS_MUX__W 3
+#define ORX_FWP_COMM_MB_OBS_MUX__M 0xE0
+#define ORX_FWP_COMM_MB_OBS_MUX__PRE 0x0
+
+#define ORX_FWP_AAG_LEN_W__A 0x2020010
+#define ORX_FWP_AAG_LEN_W__W 16
+#define ORX_FWP_AAG_LEN_W__M 0xFFFF
+#define ORX_FWP_AAG_LEN_W__PRE 0x800
+
+#define ORX_FWP_AAG_THR_W__A 0x2020011
+#define ORX_FWP_AAG_THR_W__W 8
+#define ORX_FWP_AAG_THR_W__M 0xFF
+#define ORX_FWP_AAG_THR_W__PRE 0x50
+
+#define ORX_FWP_AAG_THR_CNT_R__A 0x2020012
+#define ORX_FWP_AAG_THR_CNT_R__W 16
+#define ORX_FWP_AAG_THR_CNT_R__M 0xFFFF
+#define ORX_FWP_AAG_THR_CNT_R__PRE 0x0
+
+#define ORX_FWP_AAG_SNS_CNT_R__A 0x2020013
+#define ORX_FWP_AAG_SNS_CNT_R__W 16
+#define ORX_FWP_AAG_SNS_CNT_R__M 0xFFFF
+#define ORX_FWP_AAG_SNS_CNT_R__PRE 0x0
+
+#define ORX_FWP_PFI_A_W__A 0x2020014
+#define ORX_FWP_PFI_A_W__W 8
+#define ORX_FWP_PFI_A_W__M 0xFF
+#define ORX_FWP_PFI_A_W__PRE 0xB0
+#define ORX_FWP_PFI_A_W_RATE_2048KBPS 0xB0
+#define ORX_FWP_PFI_A_W_RATE_1544KBPS 0xA4
+#define ORX_FWP_PFI_A_W_RATE_3088KBPS 0xC0
+
+#define ORX_FWP_PFI_B_W__A 0x2020015
+#define ORX_FWP_PFI_B_W__W 8
+#define ORX_FWP_PFI_B_W__M 0xFF
+#define ORX_FWP_PFI_B_W__PRE 0x9E
+#define ORX_FWP_PFI_B_W_RATE_2048KBPS 0x9E
+#define ORX_FWP_PFI_B_W_RATE_1544KBPS 0x94
+#define ORX_FWP_PFI_B_W_RATE_3088KBPS 0xB0
+
+#define ORX_FWP_PFI_C_W__A 0x2020016
+#define ORX_FWP_PFI_C_W__W 8
+#define ORX_FWP_PFI_C_W__M 0xFF
+#define ORX_FWP_PFI_C_W__PRE 0x5C
+#define ORX_FWP_PFI_C_W_RATE_2048KBPS 0x5C
+#define ORX_FWP_PFI_C_W_RATE_1544KBPS 0x64
+#define ORX_FWP_PFI_C_W_RATE_3088KBPS 0x50
+
+#define ORX_FWP_KR1_AMP_R__A 0x2020017
+#define ORX_FWP_KR1_AMP_R__W 9
+#define ORX_FWP_KR1_AMP_R__M 0x1FF
+#define ORX_FWP_KR1_AMP_R__PRE 0x0
+
+#define ORX_FWP_KR1_LDT_W__A 0x2020018
+#define ORX_FWP_KR1_LDT_W__W 3
+#define ORX_FWP_KR1_LDT_W__M 0x7
+#define ORX_FWP_KR1_LDT_W__PRE 0x2
+#define ORX_FWP_SRC_DGN_W__A 0x2020019
+#define ORX_FWP_SRC_DGN_W__W 16
+#define ORX_FWP_SRC_DGN_W__M 0xFFFF
+#define ORX_FWP_SRC_DGN_W__PRE 0x1FF
+
+#define ORX_FWP_SRC_DGN_W_MANT__B 0
+#define ORX_FWP_SRC_DGN_W_MANT__W 9
+#define ORX_FWP_SRC_DGN_W_MANT__M 0x1FF
+#define ORX_FWP_SRC_DGN_W_MANT__PRE 0x1FF
+
+#define ORX_FWP_SRC_DGN_W_EXP__B 12
+#define ORX_FWP_SRC_DGN_W_EXP__W 4
+#define ORX_FWP_SRC_DGN_W_EXP__M 0xF000
+#define ORX_FWP_SRC_DGN_W_EXP__PRE 0x0
+
+#define ORX_FWP_NYQ_ADR_W__A 0x202001A
+#define ORX_FWP_NYQ_ADR_W__W 5
+#define ORX_FWP_NYQ_ADR_W__M 0x1F
+#define ORX_FWP_NYQ_ADR_W__PRE 0x1F
+
+#define ORX_FWP_NYQ_COF_RW__A 0x202001B
+#define ORX_FWP_NYQ_COF_RW__W 10
+#define ORX_FWP_NYQ_COF_RW__M 0x3FF
+#define ORX_FWP_NYQ_COF_RW__PRE 0x0
+
+#define ORX_FWP_IQM_FRQ_W__A 0x202001C
+#define ORX_FWP_IQM_FRQ_W__W 16
+#define ORX_FWP_IQM_FRQ_W__M 0xFFFF
+#define ORX_FWP_IQM_FRQ_W__PRE 0x4301
+
+#define ORX_EQU_COMM_EXEC__A 0x2030000
+#define ORX_EQU_COMM_EXEC__W 2
+#define ORX_EQU_COMM_EXEC__M 0x3
+#define ORX_EQU_COMM_EXEC__PRE 0x0
+#define ORX_EQU_COMM_EXEC_STOP 0x0
+#define ORX_EQU_COMM_EXEC_ACTIVE 0x1
+#define ORX_EQU_COMM_EXEC_HOLD 0x2
+
+#define ORX_EQU_COMM_MB__A 0x2030002
+#define ORX_EQU_COMM_MB__W 8
+#define ORX_EQU_COMM_MB__M 0xFF
+#define ORX_EQU_COMM_MB__PRE 0x0
+#define ORX_EQU_COMM_MB_CTL__B 0
+#define ORX_EQU_COMM_MB_CTL__W 1
+#define ORX_EQU_COMM_MB_CTL__M 0x1
+#define ORX_EQU_COMM_MB_CTL__PRE 0x0
+#define ORX_EQU_COMM_MB_CTL_OFF 0x0
+#define ORX_EQU_COMM_MB_CTL_ON 0x1
+#define ORX_EQU_COMM_MB_OBS__B 1
+#define ORX_EQU_COMM_MB_OBS__W 1
+#define ORX_EQU_COMM_MB_OBS__M 0x2
+#define ORX_EQU_COMM_MB_OBS__PRE 0x0
+#define ORX_EQU_COMM_MB_OBS_OFF 0x0
+#define ORX_EQU_COMM_MB_OBS_ON 0x2
+
+#define ORX_EQU_COMM_MB_CTL_MUX__B 2
+#define ORX_EQU_COMM_MB_CTL_MUX__W 3
+#define ORX_EQU_COMM_MB_CTL_MUX__M 0x1C
+#define ORX_EQU_COMM_MB_CTL_MUX__PRE 0x0
+
+#define ORX_EQU_COMM_MB_OBS_MUX__B 5
+#define ORX_EQU_COMM_MB_OBS_MUX__W 3
+#define ORX_EQU_COMM_MB_OBS_MUX__M 0xE0
+#define ORX_EQU_COMM_MB_OBS_MUX__PRE 0x0
+
+#define ORX_EQU_COMM_INT_REQ__A 0x2030003
+#define ORX_EQU_COMM_INT_REQ__W 1
+#define ORX_EQU_COMM_INT_REQ__M 0x1
+#define ORX_EQU_COMM_INT_REQ__PRE 0x0
+#define ORX_EQU_COMM_INT_STA__A 0x2030005
+#define ORX_EQU_COMM_INT_STA__W 2
+#define ORX_EQU_COMM_INT_STA__M 0x3
+#define ORX_EQU_COMM_INT_STA__PRE 0x0
+
+#define ORX_EQU_COMM_INT_STA_FFF_READ__B 0
+#define ORX_EQU_COMM_INT_STA_FFF_READ__W 1
+#define ORX_EQU_COMM_INT_STA_FFF_READ__M 0x1
+#define ORX_EQU_COMM_INT_STA_FFF_READ__PRE 0x0
+
+#define ORX_EQU_COMM_INT_STA_FBF_READ__B 1
+#define ORX_EQU_COMM_INT_STA_FBF_READ__W 1
+#define ORX_EQU_COMM_INT_STA_FBF_READ__M 0x2
+#define ORX_EQU_COMM_INT_STA_FBF_READ__PRE 0x0
+
+#define ORX_EQU_COMM_INT_MSK__A 0x2030006
+#define ORX_EQU_COMM_INT_MSK__W 2
+#define ORX_EQU_COMM_INT_MSK__M 0x3
+#define ORX_EQU_COMM_INT_MSK__PRE 0x0
+#define ORX_EQU_COMM_INT_MSK_FFF_READ__B 0
+#define ORX_EQU_COMM_INT_MSK_FFF_READ__W 1
+#define ORX_EQU_COMM_INT_MSK_FFF_READ__M 0x1
+#define ORX_EQU_COMM_INT_MSK_FFF_READ__PRE 0x0
+#define ORX_EQU_COMM_INT_MSK_FBF_READ__B 1
+#define ORX_EQU_COMM_INT_MSK_FBF_READ__W 1
+#define ORX_EQU_COMM_INT_MSK_FBF_READ__M 0x2
+#define ORX_EQU_COMM_INT_MSK_FBF_READ__PRE 0x0
+
+#define ORX_EQU_COMM_INT_STM__A 0x2030007
+#define ORX_EQU_COMM_INT_STM__W 2
+#define ORX_EQU_COMM_INT_STM__M 0x3
+#define ORX_EQU_COMM_INT_STM__PRE 0x0
+#define ORX_EQU_COMM_INT_STM_FFF_READ__B 0
+#define ORX_EQU_COMM_INT_STM_FFF_READ__W 1
+#define ORX_EQU_COMM_INT_STM_FFF_READ__M 0x1
+#define ORX_EQU_COMM_INT_STM_FFF_READ__PRE 0x0
+#define ORX_EQU_COMM_INT_STM_FBF_READ__B 1
+#define ORX_EQU_COMM_INT_STM_FBF_READ__W 1
+#define ORX_EQU_COMM_INT_STM_FBF_READ__M 0x2
+#define ORX_EQU_COMM_INT_STM_FBF_READ__PRE 0x0
+
+#define ORX_EQU_FFF_SCL_W__A 0x2030010
+#define ORX_EQU_FFF_SCL_W__W 1
+#define ORX_EQU_FFF_SCL_W__M 0x1
+#define ORX_EQU_FFF_SCL_W__PRE 0x0
+#define ORX_EQU_FFF_SCL_W_SCALE_GAIN_1 0x0
+#define ORX_EQU_FFF_SCL_W_SCALE_GAIN_2 0x1
+
+#define ORX_EQU_FFF_UPD_W__A 0x2030011
+#define ORX_EQU_FFF_UPD_W__W 1
+#define ORX_EQU_FFF_UPD_W__M 0x1
+#define ORX_EQU_FFF_UPD_W__PRE 0x0
+#define ORX_EQU_FFF_UPD_W_NO_UPDATE 0x0
+#define ORX_EQU_FFF_UPD_W_LMS_UPDATE 0x1
+
+#define ORX_EQU_FFF_STP_W__A 0x2030012
+#define ORX_EQU_FFF_STP_W__W 3
+#define ORX_EQU_FFF_STP_W__M 0x7
+#define ORX_EQU_FFF_STP_W__PRE 0x2
+
+#define ORX_EQU_FFF_LEA_W__A 0x2030013
+#define ORX_EQU_FFF_LEA_W__W 4
+#define ORX_EQU_FFF_LEA_W__M 0xF
+#define ORX_EQU_FFF_LEA_W__PRE 0x4
+
+#define ORX_EQU_FFF_RWT_W__A 0x2030014
+#define ORX_EQU_FFF_RWT_W__W 2
+#define ORX_EQU_FFF_RWT_W__M 0x3
+#define ORX_EQU_FFF_RWT_W__PRE 0x0
+
+#define ORX_EQU_FFF_C0RE_RW__A 0x2030015
+#define ORX_EQU_FFF_C0RE_RW__W 12
+#define ORX_EQU_FFF_C0RE_RW__M 0xFFF
+#define ORX_EQU_FFF_C0RE_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C0IM_RW__A 0x2030016
+#define ORX_EQU_FFF_C0IM_RW__W 12
+#define ORX_EQU_FFF_C0IM_RW__M 0xFFF
+#define ORX_EQU_FFF_C0IM_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C1RE_RW__A 0x2030017
+#define ORX_EQU_FFF_C1RE_RW__W 12
+#define ORX_EQU_FFF_C1RE_RW__M 0xFFF
+#define ORX_EQU_FFF_C1RE_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C1IM_RW__A 0x2030018
+#define ORX_EQU_FFF_C1IM_RW__W 12
+#define ORX_EQU_FFF_C1IM_RW__M 0xFFF
+#define ORX_EQU_FFF_C1IM_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C2RE_RW__A 0x2030019
+#define ORX_EQU_FFF_C2RE_RW__W 12
+#define ORX_EQU_FFF_C2RE_RW__M 0xFFF
+#define ORX_EQU_FFF_C2RE_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C2IM_RW__A 0x203001A
+#define ORX_EQU_FFF_C2IM_RW__W 12
+#define ORX_EQU_FFF_C2IM_RW__M 0xFFF
+#define ORX_EQU_FFF_C2IM_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C3RE_RW__A 0x203001B
+#define ORX_EQU_FFF_C3RE_RW__W 12
+#define ORX_EQU_FFF_C3RE_RW__M 0xFFF
+#define ORX_EQU_FFF_C3RE_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C3IM_RW__A 0x203001C
+#define ORX_EQU_FFF_C3IM_RW__W 12
+#define ORX_EQU_FFF_C3IM_RW__M 0xFFF
+#define ORX_EQU_FFF_C3IM_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C4RE_RW__A 0x203001D
+#define ORX_EQU_FFF_C4RE_RW__W 12
+#define ORX_EQU_FFF_C4RE_RW__M 0xFFF
+#define ORX_EQU_FFF_C4RE_RW__PRE 0x400
+
+#define ORX_EQU_FFF_C4IM_RW__A 0x203001E
+#define ORX_EQU_FFF_C4IM_RW__W 12
+#define ORX_EQU_FFF_C4IM_RW__M 0xFFF
+#define ORX_EQU_FFF_C4IM_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C5RE_RW__A 0x203001F
+#define ORX_EQU_FFF_C5RE_RW__W 12
+#define ORX_EQU_FFF_C5RE_RW__M 0xFFF
+#define ORX_EQU_FFF_C5RE_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C5IM_RW__A 0x2030020
+#define ORX_EQU_FFF_C5IM_RW__W 12
+#define ORX_EQU_FFF_C5IM_RW__M 0xFFF
+#define ORX_EQU_FFF_C5IM_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C6RE_RW__A 0x2030021
+#define ORX_EQU_FFF_C6RE_RW__W 12
+#define ORX_EQU_FFF_C6RE_RW__M 0xFFF
+#define ORX_EQU_FFF_C6RE_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C6IM_RW__A 0x2030022
+#define ORX_EQU_FFF_C6IM_RW__W 12
+#define ORX_EQU_FFF_C6IM_RW__M 0xFFF
+#define ORX_EQU_FFF_C6IM_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C7RE_RW__A 0x2030023
+#define ORX_EQU_FFF_C7RE_RW__W 12
+#define ORX_EQU_FFF_C7RE_RW__M 0xFFF
+#define ORX_EQU_FFF_C7RE_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C7IM_RW__A 0x2030024
+#define ORX_EQU_FFF_C7IM_RW__W 12
+#define ORX_EQU_FFF_C7IM_RW__M 0xFFF
+#define ORX_EQU_FFF_C7IM_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C8RE_RW__A 0x2030025
+#define ORX_EQU_FFF_C8RE_RW__W 12
+#define ORX_EQU_FFF_C8RE_RW__M 0xFFF
+#define ORX_EQU_FFF_C8RE_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C8IM_RW__A 0x2030026
+#define ORX_EQU_FFF_C8IM_RW__W 12
+#define ORX_EQU_FFF_C8IM_RW__M 0xFFF
+#define ORX_EQU_FFF_C8IM_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C9RE_RW__A 0x2030027
+#define ORX_EQU_FFF_C9RE_RW__W 12
+#define ORX_EQU_FFF_C9RE_RW__M 0xFFF
+#define ORX_EQU_FFF_C9RE_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C9IM_RW__A 0x2030028
+#define ORX_EQU_FFF_C9IM_RW__W 12
+#define ORX_EQU_FFF_C9IM_RW__M 0xFFF
+#define ORX_EQU_FFF_C9IM_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C10RE_RW__A 0x2030029
+#define ORX_EQU_FFF_C10RE_RW__W 12
+#define ORX_EQU_FFF_C10RE_RW__M 0xFFF
+#define ORX_EQU_FFF_C10RE_RW__PRE 0x0
+
+#define ORX_EQU_FFF_C10IM_RW__A 0x203002A
+#define ORX_EQU_FFF_C10IM_RW__W 12
+#define ORX_EQU_FFF_C10IM_RW__M 0xFFF
+#define ORX_EQU_FFF_C10IM_RW__PRE 0x0
+
+#define ORX_EQU_MXB_SEL_W__A 0x203002B
+#define ORX_EQU_MXB_SEL_W__W 1
+#define ORX_EQU_MXB_SEL_W__M 0x1
+#define ORX_EQU_MXB_SEL_W__PRE 0x0
+#define ORX_EQU_MXB_SEL_W_UNDECIDED_SYMBOLS 0x0
+#define ORX_EQU_MXB_SEL_W_DECIDED_SYMBOLS 0x1
+
+#define ORX_EQU_FBF_UPD_W__A 0x203002C
+#define ORX_EQU_FBF_UPD_W__W 1
+#define ORX_EQU_FBF_UPD_W__M 0x1
+#define ORX_EQU_FBF_UPD_W__PRE 0x0
+#define ORX_EQU_FBF_UPD_W_NO_UPDATE 0x0
+#define ORX_EQU_FBF_UPD_W_LMS_UPDATE 0x1
+
+#define ORX_EQU_FBF_STP_W__A 0x203002D
+#define ORX_EQU_FBF_STP_W__W 3
+#define ORX_EQU_FBF_STP_W__M 0x7
+#define ORX_EQU_FBF_STP_W__PRE 0x2
+
+#define ORX_EQU_FBF_LEA_W__A 0x203002E
+#define ORX_EQU_FBF_LEA_W__W 4
+#define ORX_EQU_FBF_LEA_W__M 0xF
+#define ORX_EQU_FBF_LEA_W__PRE 0x4
+
+#define ORX_EQU_FBF_RWT_W__A 0x203002F
+#define ORX_EQU_FBF_RWT_W__W 2
+#define ORX_EQU_FBF_RWT_W__M 0x3
+#define ORX_EQU_FBF_RWT_W__PRE 0x0
+
+#define ORX_EQU_FBF_C0RE_RW__A 0x2030030
+#define ORX_EQU_FBF_C0RE_RW__W 12
+#define ORX_EQU_FBF_C0RE_RW__M 0xFFF
+#define ORX_EQU_FBF_C0RE_RW__PRE 0x0
+
+#define ORX_EQU_FBF_C0IM_RW__A 0x2030031
+#define ORX_EQU_FBF_C0IM_RW__W 12
+#define ORX_EQU_FBF_C0IM_RW__M 0xFFF
+#define ORX_EQU_FBF_C0IM_RW__PRE 0x0
+
+#define ORX_EQU_FBF_C1RE_RW__A 0x2030032
+#define ORX_EQU_FBF_C1RE_RW__W 12
+#define ORX_EQU_FBF_C1RE_RW__M 0xFFF
+#define ORX_EQU_FBF_C1RE_RW__PRE 0x0
+
+#define ORX_EQU_FBF_C1IM_RW__A 0x2030033
+#define ORX_EQU_FBF_C1IM_RW__W 12
+#define ORX_EQU_FBF_C1IM_RW__M 0xFFF
+#define ORX_EQU_FBF_C1IM_RW__PRE 0x0
+
+#define ORX_EQU_FBF_C2RE_RW__A 0x2030034
+#define ORX_EQU_FBF_C2RE_RW__W 12
+#define ORX_EQU_FBF_C2RE_RW__M 0xFFF
+#define ORX_EQU_FBF_C2RE_RW__PRE 0x0
+
+#define ORX_EQU_FBF_C2IM_RW__A 0x2030035
+#define ORX_EQU_FBF_C2IM_RW__W 12
+#define ORX_EQU_FBF_C2IM_RW__M 0xFFF
+#define ORX_EQU_FBF_C2IM_RW__PRE 0x0
+
+#define ORX_EQU_FBF_C3RE_RW__A 0x2030036
+#define ORX_EQU_FBF_C3RE_RW__W 12
+#define ORX_EQU_FBF_C3RE_RW__M 0xFFF
+#define ORX_EQU_FBF_C3RE_RW__PRE 0x0
+
+#define ORX_EQU_FBF_C3IM_RW__A 0x2030037
+#define ORX_EQU_FBF_C3IM_RW__W 12
+#define ORX_EQU_FBF_C3IM_RW__M 0xFFF
+#define ORX_EQU_FBF_C3IM_RW__PRE 0x0
+
+#define ORX_EQU_FBF_C4RE_RW__A 0x2030038
+#define ORX_EQU_FBF_C4RE_RW__W 12
+#define ORX_EQU_FBF_C4RE_RW__M 0xFFF
+#define ORX_EQU_FBF_C4RE_RW__PRE 0x0
+
+#define ORX_EQU_FBF_C4IM_RW__A 0x2030039
+#define ORX_EQU_FBF_C4IM_RW__W 12
+#define ORX_EQU_FBF_C4IM_RW__M 0xFFF
+#define ORX_EQU_FBF_C4IM_RW__PRE 0x0
+
+#define ORX_EQU_FBF_C5RE_RW__A 0x203003A
+#define ORX_EQU_FBF_C5RE_RW__W 12
+#define ORX_EQU_FBF_C5RE_RW__M 0xFFF
+#define ORX_EQU_FBF_C5RE_RW__PRE 0x0
+
+#define ORX_EQU_FBF_C5IM_RW__A 0x203003B
+#define ORX_EQU_FBF_C5IM_RW__W 12
+#define ORX_EQU_FBF_C5IM_RW__M 0xFFF
+#define ORX_EQU_FBF_C5IM_RW__PRE 0x0
+
+#define ORX_EQU_ERR_SEL_W__A 0x203003C
+#define ORX_EQU_ERR_SEL_W__W 1
+#define ORX_EQU_ERR_SEL_W__M 0x1
+#define ORX_EQU_ERR_SEL_W__PRE 0x0
+#define ORX_EQU_ERR_SEL_W_CMA_ERROR 0x0
+#define ORX_EQU_ERR_SEL_W_DDA_ERROR 0x1
+
+#define ORX_EQU_ERR_TIS_W__A 0x203003D
+#define ORX_EQU_ERR_TIS_W__W 1
+#define ORX_EQU_ERR_TIS_W__M 0x1
+#define ORX_EQU_ERR_TIS_W__PRE 0x0
+#define ORX_EQU_ERR_TIS_W_CMA_SIGNALS 0x0
+#define ORX_EQU_ERR_TIS_W_DDA_SIGNALS 0x1
+
+#define ORX_EQU_ERR_EDI_R__A 0x203003E
+#define ORX_EQU_ERR_EDI_R__W 5
+#define ORX_EQU_ERR_EDI_R__M 0x1F
+#define ORX_EQU_ERR_EDI_R__PRE 0xF
+
+#define ORX_EQU_ERR_EDQ_R__A 0x203003F
+#define ORX_EQU_ERR_EDQ_R__W 5
+#define ORX_EQU_ERR_EDQ_R__M 0x1F
+#define ORX_EQU_ERR_EDQ_R__PRE 0xF
+
+#define ORX_EQU_ERR_ECI_R__A 0x2030040
+#define ORX_EQU_ERR_ECI_R__W 5
+#define ORX_EQU_ERR_ECI_R__M 0x1F
+#define ORX_EQU_ERR_ECI_R__PRE 0xF
+
+#define ORX_EQU_ERR_ECQ_R__A 0x2030041
+#define ORX_EQU_ERR_ECQ_R__W 5
+#define ORX_EQU_ERR_ECQ_R__M 0x1F
+#define ORX_EQU_ERR_ECQ_R__PRE 0xF
+
+#define ORX_EQU_MER_MER_R__A 0x2030042
+#define ORX_EQU_MER_MER_R__W 6
+#define ORX_EQU_MER_MER_R__M 0x3F
+#define ORX_EQU_MER_MER_R__PRE 0x3F
+
+#define ORX_EQU_MER_LDT_W__A 0x2030043
+#define ORX_EQU_MER_LDT_W__W 3
+#define ORX_EQU_MER_LDT_W__M 0x7
+#define ORX_EQU_MER_LDT_W__PRE 0x4
+
+#define ORX_EQU_SYN_LEN_W__A 0x2030044
+#define ORX_EQU_SYN_LEN_W__W 16
+#define ORX_EQU_SYN_LEN_W__M 0xFFFF
+#define ORX_EQU_SYN_LEN_W__PRE 0x0
+
+#define ORX_DDC_COMM_EXEC__A 0x2040000
+#define ORX_DDC_COMM_EXEC__W 2
+#define ORX_DDC_COMM_EXEC__M 0x3
+#define ORX_DDC_COMM_EXEC__PRE 0x0
+#define ORX_DDC_COMM_EXEC_STOP 0x0
+#define ORX_DDC_COMM_EXEC_ACTIVE 0x1
+#define ORX_DDC_COMM_EXEC_HOLD 0x2
+
+#define ORX_DDC_COMM_MB__A 0x2040002
+#define ORX_DDC_COMM_MB__W 6
+#define ORX_DDC_COMM_MB__M 0x3F
+#define ORX_DDC_COMM_MB__PRE 0x0
+#define ORX_DDC_COMM_MB_CTL__B 0
+#define ORX_DDC_COMM_MB_CTL__W 1
+#define ORX_DDC_COMM_MB_CTL__M 0x1
+#define ORX_DDC_COMM_MB_CTL__PRE 0x0
+#define ORX_DDC_COMM_MB_CTL_OFF 0x0
+#define ORX_DDC_COMM_MB_CTL_ON 0x1
+#define ORX_DDC_COMM_MB_OBS__B 1
+#define ORX_DDC_COMM_MB_OBS__W 1
+#define ORX_DDC_COMM_MB_OBS__M 0x2
+#define ORX_DDC_COMM_MB_OBS__PRE 0x0
+#define ORX_DDC_COMM_MB_OBS_OFF 0x0
+#define ORX_DDC_COMM_MB_OBS_ON 0x2
+
+#define ORX_DDC_COMM_MB_CTL_MUX__B 2
+#define ORX_DDC_COMM_MB_CTL_MUX__W 2
+#define ORX_DDC_COMM_MB_CTL_MUX__M 0xC
+#define ORX_DDC_COMM_MB_CTL_MUX__PRE 0x0
+
+#define ORX_DDC_COMM_MB_OBS_MUX__B 4
+#define ORX_DDC_COMM_MB_OBS_MUX__W 2
+#define ORX_DDC_COMM_MB_OBS_MUX__M 0x30
+#define ORX_DDC_COMM_MB_OBS_MUX__PRE 0x0
+
+#define ORX_DDC_COMM_INT_REQ__A 0x2040003
+#define ORX_DDC_COMM_INT_REQ__W 1
+#define ORX_DDC_COMM_INT_REQ__M 0x1
+#define ORX_DDC_COMM_INT_REQ__PRE 0x0
+#define ORX_DDC_COMM_INT_STA__A 0x2040005
+#define ORX_DDC_COMM_INT_STA__W 1
+#define ORX_DDC_COMM_INT_STA__M 0x1
+#define ORX_DDC_COMM_INT_STA__PRE 0x0
+#define ORX_DDC_COMM_INT_MSK__A 0x2040006
+#define ORX_DDC_COMM_INT_MSK__W 1
+#define ORX_DDC_COMM_INT_MSK__M 0x1
+#define ORX_DDC_COMM_INT_MSK__PRE 0x0
+#define ORX_DDC_COMM_INT_STM__A 0x2040007
+#define ORX_DDC_COMM_INT_STM__W 1
+#define ORX_DDC_COMM_INT_STM__M 0x1
+#define ORX_DDC_COMM_INT_STM__PRE 0x0
+#define ORX_DDC_DEC_MAP_W__A 0x2040010
+#define ORX_DDC_DEC_MAP_W__W 9
+#define ORX_DDC_DEC_MAP_W__M 0x1FF
+#define ORX_DDC_DEC_MAP_W__PRE 0x178
+
+#define ORX_DDC_DEC_MAP_W_QUADR0__B 0
+#define ORX_DDC_DEC_MAP_W_QUADR0__W 2
+#define ORX_DDC_DEC_MAP_W_QUADR0__M 0x3
+#define ORX_DDC_DEC_MAP_W_QUADR0__PRE 0x0
+#define ORX_DDC_DEC_MAP_W_QUADR0_ROTATE_DEFAULT 0x0
+#define ORX_DDC_DEC_MAP_W_QUADR0_ROTATE_ALTERNATE 0x0
+
+#define ORX_DDC_DEC_MAP_W_QUADR1__B 2
+#define ORX_DDC_DEC_MAP_W_QUADR1__W 2
+#define ORX_DDC_DEC_MAP_W_QUADR1__M 0xC
+#define ORX_DDC_DEC_MAP_W_QUADR1__PRE 0x8
+#define ORX_DDC_DEC_MAP_W_QUADR1_ROTATE_DEFAULT 0x8
+#define ORX_DDC_DEC_MAP_W_QUADR1_ROTATE_ALTERNATE 0x4
+
+#define ORX_DDC_DEC_MAP_W_QUADR2__B 4
+#define ORX_DDC_DEC_MAP_W_QUADR2__W 2
+#define ORX_DDC_DEC_MAP_W_QUADR2__M 0x30
+#define ORX_DDC_DEC_MAP_W_QUADR2__PRE 0x30
+#define ORX_DDC_DEC_MAP_W_QUADR2_ROTATE_DEFAULT 0x30
+#define ORX_DDC_DEC_MAP_W_QUADR2_ROTATE_ALTERNATE 0x30
+
+#define ORX_DDC_DEC_MAP_W_QUADR3__B 6
+#define ORX_DDC_DEC_MAP_W_QUADR3__W 2
+#define ORX_DDC_DEC_MAP_W_QUADR3__M 0xC0
+#define ORX_DDC_DEC_MAP_W_QUADR3__PRE 0x40
+#define ORX_DDC_DEC_MAP_W_QUADR3_ROTATE_DEFAULT 0x40
+#define ORX_DDC_DEC_MAP_W_QUADR3_ROTATE_ALTERNATE 0x80
+#define ORX_DDC_DEC_MAP_W_DIFF_DECOD__B 8
+#define ORX_DDC_DEC_MAP_W_DIFF_DECOD__W 1
+#define ORX_DDC_DEC_MAP_W_DIFF_DECOD__M 0x100
+#define ORX_DDC_DEC_MAP_W_DIFF_DECOD__PRE 0x100
+#define ORX_DDC_DEC_MAP_W_DIFF_DECOD_COHERENT_DECODING 0x0
+#define ORX_DDC_DEC_MAP_W_DIFF_DECOD_DIFF_DECODING 0x100
+
+#define ORX_DDC_OFO_SET_W__A 0x2040011
+#define ORX_DDC_OFO_SET_W__W 16
+#define ORX_DDC_OFO_SET_W__M 0xFFFF
+#define ORX_DDC_OFO_SET_W__PRE 0x1402
+
+#define ORX_DDC_OFO_SET_W_PHASE__B 0
+#define ORX_DDC_OFO_SET_W_PHASE__W 7
+#define ORX_DDC_OFO_SET_W_PHASE__M 0x7F
+#define ORX_DDC_OFO_SET_W_PHASE__PRE 0x2
+
+#define ORX_DDC_OFO_SET_W_CRXHITIME__B 7
+#define ORX_DDC_OFO_SET_W_CRXHITIME__W 7
+#define ORX_DDC_OFO_SET_W_CRXHITIME__M 0x3F80
+#define ORX_DDC_OFO_SET_W_CRXHITIME__PRE 0x1400
+
+#define ORX_DDC_OFO_SET_W_CRXINV__B 14
+#define ORX_DDC_OFO_SET_W_CRXINV__W 1
+#define ORX_DDC_OFO_SET_W_CRXINV__M 0x4000
+#define ORX_DDC_OFO_SET_W_CRXINV__PRE 0x0
+
+#define ORX_DDC_OFO_SET_W_DISABLE__B 15
+#define ORX_DDC_OFO_SET_W_DISABLE__W 1
+#define ORX_DDC_OFO_SET_W_DISABLE__M 0x8000
+#define ORX_DDC_OFO_SET_W_DISABLE__PRE 0x0
+
+#define ORX_CON_COMM_EXEC__A 0x2050000
+#define ORX_CON_COMM_EXEC__W 2
+#define ORX_CON_COMM_EXEC__M 0x3
+#define ORX_CON_COMM_EXEC__PRE 0x0
+#define ORX_CON_COMM_EXEC_STOP 0x0
+#define ORX_CON_COMM_EXEC_ACTIVE 0x1
+#define ORX_CON_COMM_EXEC_HOLD 0x2
+
+#define ORX_CON_LDT_W__A 0x2050010
+#define ORX_CON_LDT_W__W 3
+#define ORX_CON_LDT_W__M 0x7
+#define ORX_CON_LDT_W__PRE 0x3
+
+#define ORX_CON_LDT_W_CON_LDT_W__B 0
+#define ORX_CON_LDT_W_CON_LDT_W__W 3
+#define ORX_CON_LDT_W_CON_LDT_W__M 0x7
+#define ORX_CON_LDT_W_CON_LDT_W__PRE 0x3
+
+#define ORX_CON_RST_W__A 0x2050011
+#define ORX_CON_RST_W__W 4
+#define ORX_CON_RST_W__M 0xF
+#define ORX_CON_RST_W__PRE 0x0
+
+#define ORX_CON_RST_W_CPH__B 0
+#define ORX_CON_RST_W_CPH__W 1
+#define ORX_CON_RST_W_CPH__M 0x1
+#define ORX_CON_RST_W_CPH__PRE 0x0
+
+#define ORX_CON_RST_W_CTI__B 1
+#define ORX_CON_RST_W_CTI__W 1
+#define ORX_CON_RST_W_CTI__M 0x2
+#define ORX_CON_RST_W_CTI__PRE 0x0
+
+#define ORX_CON_RST_W_KRN__B 2
+#define ORX_CON_RST_W_KRN__W 1
+#define ORX_CON_RST_W_KRN__M 0x4
+#define ORX_CON_RST_W_KRN__PRE 0x0
+
+#define ORX_CON_RST_W_KRP__B 3
+#define ORX_CON_RST_W_KRP__W 1
+#define ORX_CON_RST_W_KRP__M 0x8
+#define ORX_CON_RST_W_KRP__PRE 0x0
+
+#define ORX_CON_CPH_PHI_R__A 0x2050012
+#define ORX_CON_CPH_PHI_R__W 16
+#define ORX_CON_CPH_PHI_R__M 0xFFFF
+#define ORX_CON_CPH_PHI_R__PRE 0x0
+
+#define ORX_CON_CPH_FRQ_R__A 0x2050013
+#define ORX_CON_CPH_FRQ_R__W 16
+#define ORX_CON_CPH_FRQ_R__M 0xFFFF
+#define ORX_CON_CPH_FRQ_R__PRE 0x0
+
+#define ORX_CON_CPH_AMP_R__A 0x2050014
+#define ORX_CON_CPH_AMP_R__W 16
+#define ORX_CON_CPH_AMP_R__M 0xFFFF
+#define ORX_CON_CPH_AMP_R__PRE 0x0
+
+#define ORX_CON_CPH_KDF_W__A 0x2050015
+#define ORX_CON_CPH_KDF_W__W 4
+#define ORX_CON_CPH_KDF_W__M 0xF
+#define ORX_CON_CPH_KDF_W__PRE 0x0
+
+#define ORX_CON_CPH_KPF_W__A 0x2050016
+#define ORX_CON_CPH_KPF_W__W 4
+#define ORX_CON_CPH_KPF_W__M 0xF
+#define ORX_CON_CPH_KPF_W__PRE 0x0
+
+#define ORX_CON_CPH_KIF_W__A 0x2050017
+#define ORX_CON_CPH_KIF_W__W 4
+#define ORX_CON_CPH_KIF_W__M 0xF
+#define ORX_CON_CPH_KIF_W__PRE 0x0
+#define ORX_CON_CPH_APT_W__A 0x2050018
+#define ORX_CON_CPH_APT_W__W 16
+#define ORX_CON_CPH_APT_W__M 0xFFFF
+#define ORX_CON_CPH_APT_W__PRE 0x804
+
+#define ORX_CON_CPH_APT_W_PTH__B 0
+#define ORX_CON_CPH_APT_W_PTH__W 8
+#define ORX_CON_CPH_APT_W_PTH__M 0xFF
+#define ORX_CON_CPH_APT_W_PTH__PRE 0x4
+
+#define ORX_CON_CPH_APT_W_ATH__B 8
+#define ORX_CON_CPH_APT_W_ATH__W 8
+#define ORX_CON_CPH_APT_W_ATH__M 0xFF00
+#define ORX_CON_CPH_APT_W_ATH__PRE 0x800
+
+#define ORX_CON_CPH_WLC_W__A 0x2050019
+#define ORX_CON_CPH_WLC_W__W 8
+#define ORX_CON_CPH_WLC_W__M 0xFF
+#define ORX_CON_CPH_WLC_W__PRE 0x81
+
+#define ORX_CON_CPH_WLC_W_LATC__B 0
+#define ORX_CON_CPH_WLC_W_LATC__W 4
+#define ORX_CON_CPH_WLC_W_LATC__M 0xF
+#define ORX_CON_CPH_WLC_W_LATC__PRE 0x1
+
+#define ORX_CON_CPH_WLC_W_WLIM__B 4
+#define ORX_CON_CPH_WLC_W_WLIM__W 4
+#define ORX_CON_CPH_WLC_W_WLIM__M 0xF0
+#define ORX_CON_CPH_WLC_W_WLIM__PRE 0x80
+
+#define ORX_CON_CPH_DLY_W__A 0x205001A
+#define ORX_CON_CPH_DLY_W__W 3
+#define ORX_CON_CPH_DLY_W__M 0x7
+#define ORX_CON_CPH_DLY_W__PRE 0x4
+
+#define ORX_CON_CPH_TCL_W__A 0x205001B
+#define ORX_CON_CPH_TCL_W__W 3
+#define ORX_CON_CPH_TCL_W__M 0x7
+#define ORX_CON_CPH_TCL_W__PRE 0x3
+
+#define ORX_CON_KRP_AMP_R__A 0x205001C
+#define ORX_CON_KRP_AMP_R__W 9
+#define ORX_CON_KRP_AMP_R__M 0x1FF
+#define ORX_CON_KRP_AMP_R__PRE 0x0
+
+#define ORX_CON_KRN_AMP_R__A 0x205001D
+#define ORX_CON_KRN_AMP_R__W 9
+#define ORX_CON_KRN_AMP_R__M 0x1FF
+#define ORX_CON_KRN_AMP_R__PRE 0x0
+
+#define ORX_CON_CTI_DTI_R__A 0x205001E
+#define ORX_CON_CTI_DTI_R__W 16
+#define ORX_CON_CTI_DTI_R__M 0xFFFF
+#define ORX_CON_CTI_DTI_R__PRE 0x0
+
+#define ORX_CON_CTI_KDT_W__A 0x205001F
+#define ORX_CON_CTI_KDT_W__W 4
+#define ORX_CON_CTI_KDT_W__M 0xF
+#define ORX_CON_CTI_KDT_W__PRE 0x4
+
+#define ORX_CON_CTI_KPT_W__A 0x2050020
+#define ORX_CON_CTI_KPT_W__W 4
+#define ORX_CON_CTI_KPT_W__M 0xF
+#define ORX_CON_CTI_KPT_W__PRE 0x3
+
+#define ORX_CON_CTI_KIT_W__A 0x2050021
+#define ORX_CON_CTI_KIT_W__W 4
+#define ORX_CON_CTI_KIT_W__M 0xF
+#define ORX_CON_CTI_KIT_W__PRE 0xB
+
+#define ORX_CON_CTI_TAT_W__A 0x2050022
+#define ORX_CON_CTI_TAT_W__W 4
+#define ORX_CON_CTI_TAT_W__M 0xF
+#define ORX_CON_CTI_TAT_W__PRE 0x3
+
+#define ORX_NSU_COMM_EXEC__A 0x2060000
+#define ORX_NSU_COMM_EXEC__W 2
+#define ORX_NSU_COMM_EXEC__M 0x3
+#define ORX_NSU_COMM_EXEC__PRE 0x0
+#define ORX_NSU_COMM_EXEC_STOP 0x0
+#define ORX_NSU_COMM_EXEC_ACTIVE 0x1
+#define ORX_NSU_COMM_EXEC_HOLD 0x2
+
+#define ORX_NSU_AOX_STDBY_W__A 0x2060010
+#define ORX_NSU_AOX_STDBY_W__W 8
+#define ORX_NSU_AOX_STDBY_W__M 0xFF
+#define ORX_NSU_AOX_STDBY_W__PRE 0x0
+
+#define ORX_NSU_AOX_STDBY_W_STDBYADC__B 0
+#define ORX_NSU_AOX_STDBY_W_STDBYADC__W 1
+#define ORX_NSU_AOX_STDBY_W_STDBYADC__M 0x1
+#define ORX_NSU_AOX_STDBY_W_STDBYADC__PRE 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYADC_A1_ON 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYADC_A1_OFF 0x1
+#define ORX_NSU_AOX_STDBY_W_STDBYADC_A2_OFF 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYADC_A2_ON 0x1
+
+#define ORX_NSU_AOX_STDBY_W_STDBYAMP__B 1
+#define ORX_NSU_AOX_STDBY_W_STDBYAMP__W 1
+#define ORX_NSU_AOX_STDBY_W_STDBYAMP__M 0x2
+#define ORX_NSU_AOX_STDBY_W_STDBYAMP__PRE 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYAMP_A1_ON 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYAMP_A1_OFF 0x2
+#define ORX_NSU_AOX_STDBY_W_STDBYAMP_A2_OFF 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYAMP_A2_ON 0x2
+
+#define ORX_NSU_AOX_STDBY_W_STDBYBIAS__B 2
+#define ORX_NSU_AOX_STDBY_W_STDBYBIAS__W 1
+#define ORX_NSU_AOX_STDBY_W_STDBYBIAS__M 0x4
+#define ORX_NSU_AOX_STDBY_W_STDBYBIAS__PRE 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYBIAS_A1_ON 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYBIAS_A1_OFF 0x4
+#define ORX_NSU_AOX_STDBY_W_STDBYBIAS_A2_OFF 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYBIAS_A2_ON 0x4
+
+#define ORX_NSU_AOX_STDBY_W_STDBYPLL__B 3
+#define ORX_NSU_AOX_STDBY_W_STDBYPLL__W 1
+#define ORX_NSU_AOX_STDBY_W_STDBYPLL__M 0x8
+#define ORX_NSU_AOX_STDBY_W_STDBYPLL__PRE 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYPLL_A1_ON 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYPLL_A1_OFF 0x8
+#define ORX_NSU_AOX_STDBY_W_STDBYPLL_A2_OFF 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYPLL_A2_ON 0x8
+
+#define ORX_NSU_AOX_STDBY_W_STDBYPD__B 4
+#define ORX_NSU_AOX_STDBY_W_STDBYPD__W 1
+#define ORX_NSU_AOX_STDBY_W_STDBYPD__M 0x10
+#define ORX_NSU_AOX_STDBY_W_STDBYPD__PRE 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYPD_A1_ON 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYPD_A1_OFF 0x10
+#define ORX_NSU_AOX_STDBY_W_STDBYPD_A2_OFF 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYPD_A2_ON 0x10
+
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_IF__B 5
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_IF__W 1
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_IF__M 0x20
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_IF__PRE 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_IF_A1_ON 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_IF_A1_OFF 0x20
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_IF_A2_OFF 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_IF_A2_ON 0x20
+
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_RF__B 6
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_RF__W 1
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_RF__M 0x40
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_RF__PRE 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_RF_A1_ON 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_RF_A1_OFF 0x40
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_RF_A2_OFF 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYTAGC_RF_A2_ON 0x40
+
+#define ORX_NSU_AOX_STDBY_W_STDBYFLT__B 7
+#define ORX_NSU_AOX_STDBY_W_STDBYFLT__W 1
+#define ORX_NSU_AOX_STDBY_W_STDBYFLT__M 0x80
+#define ORX_NSU_AOX_STDBY_W_STDBYFLT__PRE 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYFLT_A1_ON 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYFLT_A1_OFF 0x80
+#define ORX_NSU_AOX_STDBY_W_STDBYFLT_A2_OFF 0x0
+#define ORX_NSU_AOX_STDBY_W_STDBYFLT_A2_ON 0x80
+
+#define ORX_NSU_AOX_LOFRQ_W__A 0x2060011
+#define ORX_NSU_AOX_LOFRQ_W__W 16
+#define ORX_NSU_AOX_LOFRQ_W__M 0xFFFF
+#define ORX_NSU_AOX_LOFRQ_W__PRE 0x0
+#define ORX_NSU_AOX_LOMDE_W__A 0x2060012
+#define ORX_NSU_AOX_LOMDE_W__W 16
+#define ORX_NSU_AOX_LOMDE_W__M 0xFFFF
+#define ORX_NSU_AOX_LOMDE_W__PRE 0x0
+
+#define ORX_NSU_AOX_LOMDE_W_AOX_LOFRQ_EXT__B 0
+#define ORX_NSU_AOX_LOMDE_W_AOX_LOFRQ_EXT__W 8
+#define ORX_NSU_AOX_LOMDE_W_AOX_LOFRQ_EXT__M 0xFF
+#define ORX_NSU_AOX_LOMDE_W_AOX_LOFRQ_EXT__PRE 0x0
+
+#define ORX_NSU_AOX_LOMDE_W_RESET_VCO__B 13
+#define ORX_NSU_AOX_LOMDE_W_RESET_VCO__W 1
+#define ORX_NSU_AOX_LOMDE_W_RESET_VCO__M 0x2000
+#define ORX_NSU_AOX_LOMDE_W_RESET_VCO__PRE 0x0
+
+#define ORX_NSU_AOX_LOMDE_W_PLL_DIV__B 14
+#define ORX_NSU_AOX_LOMDE_W_PLL_DIV__W 2
+#define ORX_NSU_AOX_LOMDE_W_PLL_DIV__M 0xC000
+#define ORX_NSU_AOX_LOMDE_W_PLL_DIV__PRE 0x0
+
+#define ORX_NSU_AOX_LOPOW_W__A 0x2060013
+#define ORX_NSU_AOX_LOPOW_W__W 2
+#define ORX_NSU_AOX_LOPOW_W__M 0x3
+#define ORX_NSU_AOX_LOPOW_W__PRE 0x0
+#define ORX_NSU_AOX_LOPOW_W_POWER_MINUS0DB 0x0
+#define ORX_NSU_AOX_LOPOW_W_POWER_MINUS5DB 0x1
+#define ORX_NSU_AOX_LOPOW_W_POWER_MINUS10DB 0x2
+#define ORX_NSU_AOX_LOPOW_W_POWER_MINUS15DB 0x3
+
+#define ORX_NSU_AOX_STHR_W__A 0x2060014
+#define ORX_NSU_AOX_STHR_W__W 5
+#define ORX_NSU_AOX_STHR_W__M 0x1F
+#define ORX_NSU_AOX_STHR_W__PRE 0x0
+
+#define ORX_NSU_TUN_RFGAIN_W__A 0x2060015
+#define ORX_NSU_TUN_RFGAIN_W__W 15
+#define ORX_NSU_TUN_RFGAIN_W__M 0x7FFF
+#define ORX_NSU_TUN_RFGAIN_W__PRE 0x0
+
+#define ORX_NSU_TUN_IFGAIN_W__A 0x2060016
+#define ORX_NSU_TUN_IFGAIN_W__W 15
+#define ORX_NSU_TUN_IFGAIN_W__M 0x7FFF
+#define ORX_NSU_TUN_IFGAIN_W__PRE 0x0
+
+#define ORX_NSU_TUN_BPF_W__A 0x2060017
+#define ORX_NSU_TUN_BPF_W__W 15
+#define ORX_NSU_TUN_BPF_W__M 0x7FFF
+#define ORX_NSU_TUN_BPF_W__PRE 0x1F9
+#define ORX_NSU_NSS_BITSWAP_W__A 0x2060018
+#define ORX_NSU_NSS_BITSWAP_W__W 3
+#define ORX_NSU_NSS_BITSWAP_W__M 0x7
+#define ORX_NSU_NSS_BITSWAP_W__PRE 0x0
+
+#define ORX_NSU_NSS_BITSWAP_W_BITSWAP_NS0_RF__B 0
+#define ORX_NSU_NSS_BITSWAP_W_BITSWAP_NS0_RF__W 1
+#define ORX_NSU_NSS_BITSWAP_W_BITSWAP_NS0_RF__M 0x1
+#define ORX_NSU_NSS_BITSWAP_W_BITSWAP_NS0_RF__PRE 0x0
+
+#define ORX_NSU_NSS_BITSWAP_W_BITSWAP_NS1_IF__B 1
+#define ORX_NSU_NSS_BITSWAP_W_BITSWAP_NS1_IF__W 1
+#define ORX_NSU_NSS_BITSWAP_W_BITSWAP_NS1_IF__M 0x2
+#define ORX_NSU_NSS_BITSWAP_W_BITSWAP_NS1_IF__PRE 0x0
+
+#define ORX_NSU_NSS_BITSWAP_W_BITSWAP_NS2_BP__B 2
+#define ORX_NSU_NSS_BITSWAP_W_BITSWAP_NS2_BP__W 1
+#define ORX_NSU_NSS_BITSWAP_W_BITSWAP_NS2_BP__M 0x4
+#define ORX_NSU_NSS_BITSWAP_W_BITSWAP_NS2_BP__PRE 0x0
+
+#define ORX_TST_COMM_EXEC__A 0x23F0000
+#define ORX_TST_COMM_EXEC__W 2
+#define ORX_TST_COMM_EXEC__M 0x3
+#define ORX_TST_COMM_EXEC__PRE 0x0
+#define ORX_TST_COMM_EXEC_STOP 0x0
+#define ORX_TST_COMM_EXEC_ACTIVE 0x1
+#define ORX_TST_COMM_EXEC_HOLD 0x2
+
+#define ORX_TST_AOX_TST_W__A 0x23F0010
+#define ORX_TST_AOX_TST_W__W 8
+#define ORX_TST_AOX_TST_W__M 0xFF
+#define ORX_TST_AOX_TST_W__PRE 0x0
+
+#define QAM_COMM_EXEC__A 0x1400000
+#define QAM_COMM_EXEC__W 2
+#define QAM_COMM_EXEC__M 0x3
+#define QAM_COMM_EXEC__PRE 0x0
+#define QAM_COMM_EXEC_STOP 0x0
+#define QAM_COMM_EXEC_ACTIVE 0x1
+#define QAM_COMM_EXEC_HOLD 0x2
+
+#define QAM_COMM_MB__A 0x1400002
+#define QAM_COMM_MB__W 16
+#define QAM_COMM_MB__M 0xFFFF
+#define QAM_COMM_MB__PRE 0x0
+#define QAM_COMM_INT_REQ__A 0x1400003
+#define QAM_COMM_INT_REQ__W 16
+#define QAM_COMM_INT_REQ__M 0xFFFF
+#define QAM_COMM_INT_REQ__PRE 0x0
+
+#define QAM_COMM_INT_REQ_SL_REQ__B 0
+#define QAM_COMM_INT_REQ_SL_REQ__W 1
+#define QAM_COMM_INT_REQ_SL_REQ__M 0x1
+#define QAM_COMM_INT_REQ_SL_REQ__PRE 0x0
+
+#define QAM_COMM_INT_REQ_LC_REQ__B 1
+#define QAM_COMM_INT_REQ_LC_REQ__W 1
+#define QAM_COMM_INT_REQ_LC_REQ__M 0x2
+#define QAM_COMM_INT_REQ_LC_REQ__PRE 0x0
+
+#define QAM_COMM_INT_REQ_VD_REQ__B 2
+#define QAM_COMM_INT_REQ_VD_REQ__W 1
+#define QAM_COMM_INT_REQ_VD_REQ__M 0x4
+#define QAM_COMM_INT_REQ_VD_REQ__PRE 0x0
+
+#define QAM_COMM_INT_REQ_SY_REQ__B 3
+#define QAM_COMM_INT_REQ_SY_REQ__W 1
+#define QAM_COMM_INT_REQ_SY_REQ__M 0x8
+#define QAM_COMM_INT_REQ_SY_REQ__PRE 0x0
+
+#define QAM_COMM_INT_STA__A 0x1400005
+#define QAM_COMM_INT_STA__W 16
+#define QAM_COMM_INT_STA__M 0xFFFF
+#define QAM_COMM_INT_STA__PRE 0x0
+#define QAM_COMM_INT_MSK__A 0x1400006
+#define QAM_COMM_INT_MSK__W 16
+#define QAM_COMM_INT_MSK__M 0xFFFF
+#define QAM_COMM_INT_MSK__PRE 0x0
+#define QAM_COMM_INT_STM__A 0x1400007
+#define QAM_COMM_INT_STM__W 16
+#define QAM_COMM_INT_STM__M 0xFFFF
+#define QAM_COMM_INT_STM__PRE 0x0
+
+#define QAM_TOP_COMM_EXEC__A 0x1410000
+#define QAM_TOP_COMM_EXEC__W 2
+#define QAM_TOP_COMM_EXEC__M 0x3
+#define QAM_TOP_COMM_EXEC__PRE 0x0
+#define QAM_TOP_COMM_EXEC_STOP 0x0
+#define QAM_TOP_COMM_EXEC_ACTIVE 0x1
+#define QAM_TOP_COMM_EXEC_HOLD 0x2
+
+#define QAM_TOP_ANNEX__A 0x1410010
+#define QAM_TOP_ANNEX__W 2
+#define QAM_TOP_ANNEX__M 0x3
+#define QAM_TOP_ANNEX__PRE 0x1
+#define QAM_TOP_ANNEX_A 0x0
+#define QAM_TOP_ANNEX_B 0x1
+#define QAM_TOP_ANNEX_C 0x2
+#define QAM_TOP_ANNEX_D 0x3
+
+#define QAM_TOP_CONSTELLATION__A 0x1410011
+#define QAM_TOP_CONSTELLATION__W 3
+#define QAM_TOP_CONSTELLATION__M 0x7
+#define QAM_TOP_CONSTELLATION__PRE 0x5
+#define QAM_TOP_CONSTELLATION_NONE 0x0
+#define QAM_TOP_CONSTELLATION_QPSK 0x1
+#define QAM_TOP_CONSTELLATION_QAM8 0x2
+#define QAM_TOP_CONSTELLATION_QAM16 0x3
+#define QAM_TOP_CONSTELLATION_QAM32 0x4
+#define QAM_TOP_CONSTELLATION_QAM64 0x5
+#define QAM_TOP_CONSTELLATION_QAM128 0x6
+#define QAM_TOP_CONSTELLATION_QAM256 0x7
+
+#define QAM_FQ_COMM_EXEC__A 0x1420000
+#define QAM_FQ_COMM_EXEC__W 2
+#define QAM_FQ_COMM_EXEC__M 0x3
+#define QAM_FQ_COMM_EXEC__PRE 0x0
+#define QAM_FQ_COMM_EXEC_STOP 0x0
+#define QAM_FQ_COMM_EXEC_ACTIVE 0x1
+#define QAM_FQ_COMM_EXEC_HOLD 0x2
+
+#define QAM_FQ_MODE__A 0x1420010
+#define QAM_FQ_MODE__W 3
+#define QAM_FQ_MODE__M 0x7
+#define QAM_FQ_MODE__PRE 0x0
+
+#define QAM_FQ_MODE_TAPRESET__B 0
+#define QAM_FQ_MODE_TAPRESET__W 1
+#define QAM_FQ_MODE_TAPRESET__M 0x1
+#define QAM_FQ_MODE_TAPRESET__PRE 0x0
+#define QAM_FQ_MODE_TAPRESET_RST 0x1
+
+#define QAM_FQ_MODE_TAPLMS__B 1
+#define QAM_FQ_MODE_TAPLMS__W 1
+#define QAM_FQ_MODE_TAPLMS__M 0x2
+#define QAM_FQ_MODE_TAPLMS__PRE 0x0
+#define QAM_FQ_MODE_TAPLMS_UPD 0x2
+
+#define QAM_FQ_MODE_TAPDRAIN__B 2
+#define QAM_FQ_MODE_TAPDRAIN__W 1
+#define QAM_FQ_MODE_TAPDRAIN__M 0x4
+#define QAM_FQ_MODE_TAPDRAIN__PRE 0x0
+#define QAM_FQ_MODE_TAPDRAIN_DRAIN 0x4
+
+#define QAM_FQ_MU_FACTOR__A 0x1420011
+#define QAM_FQ_MU_FACTOR__W 3
+#define QAM_FQ_MU_FACTOR__M 0x7
+#define QAM_FQ_MU_FACTOR__PRE 0x0
+
+#define QAM_FQ_LA_FACTOR__A 0x1420012
+#define QAM_FQ_LA_FACTOR__W 4
+#define QAM_FQ_LA_FACTOR__M 0xF
+#define QAM_FQ_LA_FACTOR__PRE 0xC
+#define QAM_FQ_CENTTAP_IDX__A 0x1420016
+#define QAM_FQ_CENTTAP_IDX__W 5
+#define QAM_FQ_CENTTAP_IDX__M 0x1F
+#define QAM_FQ_CENTTAP_IDX__PRE 0x13
+
+#define QAM_FQ_CENTTAP_IDX_IDX__B 0
+#define QAM_FQ_CENTTAP_IDX_IDX__W 5
+#define QAM_FQ_CENTTAP_IDX_IDX__M 0x1F
+#define QAM_FQ_CENTTAP_IDX_IDX__PRE 0x13
+
+#define QAM_FQ_CENTTAP_VALUE__A 0x1420017
+#define QAM_FQ_CENTTAP_VALUE__W 12
+#define QAM_FQ_CENTTAP_VALUE__M 0xFFF
+#define QAM_FQ_CENTTAP_VALUE__PRE 0x600
+
+#define QAM_FQ_CENTTAP_VALUE_TAP__B 0
+#define QAM_FQ_CENTTAP_VALUE_TAP__W 12
+#define QAM_FQ_CENTTAP_VALUE_TAP__M 0xFFF
+#define QAM_FQ_CENTTAP_VALUE_TAP__PRE 0x600
+
+#define QAM_FQ_TAP_RE_EL0__A 0x1420020
+#define QAM_FQ_TAP_RE_EL0__W 12
+#define QAM_FQ_TAP_RE_EL0__M 0xFFF
+#define QAM_FQ_TAP_RE_EL0__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL0_TAP__B 0
+#define QAM_FQ_TAP_RE_EL0_TAP__W 12
+#define QAM_FQ_TAP_RE_EL0_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL0_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL0__A 0x1420021
+#define QAM_FQ_TAP_IM_EL0__W 12
+#define QAM_FQ_TAP_IM_EL0__M 0xFFF
+#define QAM_FQ_TAP_IM_EL0__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL0_TAP__B 0
+#define QAM_FQ_TAP_IM_EL0_TAP__W 12
+#define QAM_FQ_TAP_IM_EL0_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL0_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL1__A 0x1420022
+#define QAM_FQ_TAP_RE_EL1__W 12
+#define QAM_FQ_TAP_RE_EL1__M 0xFFF
+#define QAM_FQ_TAP_RE_EL1__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL1_TAP__B 0
+#define QAM_FQ_TAP_RE_EL1_TAP__W 12
+#define QAM_FQ_TAP_RE_EL1_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL1_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL1__A 0x1420023
+#define QAM_FQ_TAP_IM_EL1__W 12
+#define QAM_FQ_TAP_IM_EL1__M 0xFFF
+#define QAM_FQ_TAP_IM_EL1__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL1_TAP__B 0
+#define QAM_FQ_TAP_IM_EL1_TAP__W 12
+#define QAM_FQ_TAP_IM_EL1_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL1_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL2__A 0x1420024
+#define QAM_FQ_TAP_RE_EL2__W 12
+#define QAM_FQ_TAP_RE_EL2__M 0xFFF
+#define QAM_FQ_TAP_RE_EL2__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL2_TAP__B 0
+#define QAM_FQ_TAP_RE_EL2_TAP__W 12
+#define QAM_FQ_TAP_RE_EL2_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL2_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL2__A 0x1420025
+#define QAM_FQ_TAP_IM_EL2__W 12
+#define QAM_FQ_TAP_IM_EL2__M 0xFFF
+#define QAM_FQ_TAP_IM_EL2__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL2_TAP__B 0
+#define QAM_FQ_TAP_IM_EL2_TAP__W 12
+#define QAM_FQ_TAP_IM_EL2_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL2_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL3__A 0x1420026
+#define QAM_FQ_TAP_RE_EL3__W 12
+#define QAM_FQ_TAP_RE_EL3__M 0xFFF
+#define QAM_FQ_TAP_RE_EL3__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL3_TAP__B 0
+#define QAM_FQ_TAP_RE_EL3_TAP__W 12
+#define QAM_FQ_TAP_RE_EL3_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL3_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL3__A 0x1420027
+#define QAM_FQ_TAP_IM_EL3__W 12
+#define QAM_FQ_TAP_IM_EL3__M 0xFFF
+#define QAM_FQ_TAP_IM_EL3__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL3_TAP__B 0
+#define QAM_FQ_TAP_IM_EL3_TAP__W 12
+#define QAM_FQ_TAP_IM_EL3_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL3_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL4__A 0x1420028
+#define QAM_FQ_TAP_RE_EL4__W 12
+#define QAM_FQ_TAP_RE_EL4__M 0xFFF
+#define QAM_FQ_TAP_RE_EL4__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL4_TAP__B 0
+#define QAM_FQ_TAP_RE_EL4_TAP__W 12
+#define QAM_FQ_TAP_RE_EL4_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL4_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL4__A 0x1420029
+#define QAM_FQ_TAP_IM_EL4__W 12
+#define QAM_FQ_TAP_IM_EL4__M 0xFFF
+#define QAM_FQ_TAP_IM_EL4__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL4_TAP__B 0
+#define QAM_FQ_TAP_IM_EL4_TAP__W 12
+#define QAM_FQ_TAP_IM_EL4_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL4_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL5__A 0x142002A
+#define QAM_FQ_TAP_RE_EL5__W 12
+#define QAM_FQ_TAP_RE_EL5__M 0xFFF
+#define QAM_FQ_TAP_RE_EL5__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL5_TAP__B 0
+#define QAM_FQ_TAP_RE_EL5_TAP__W 12
+#define QAM_FQ_TAP_RE_EL5_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL5_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL5__A 0x142002B
+#define QAM_FQ_TAP_IM_EL5__W 12
+#define QAM_FQ_TAP_IM_EL5__M 0xFFF
+#define QAM_FQ_TAP_IM_EL5__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL5_TAP__B 0
+#define QAM_FQ_TAP_IM_EL5_TAP__W 12
+#define QAM_FQ_TAP_IM_EL5_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL5_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL6__A 0x142002C
+#define QAM_FQ_TAP_RE_EL6__W 12
+#define QAM_FQ_TAP_RE_EL6__M 0xFFF
+#define QAM_FQ_TAP_RE_EL6__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL6_TAP__B 0
+#define QAM_FQ_TAP_RE_EL6_TAP__W 12
+#define QAM_FQ_TAP_RE_EL6_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL6_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL6__A 0x142002D
+#define QAM_FQ_TAP_IM_EL6__W 12
+#define QAM_FQ_TAP_IM_EL6__M 0xFFF
+#define QAM_FQ_TAP_IM_EL6__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL6_TAP__B 0
+#define QAM_FQ_TAP_IM_EL6_TAP__W 12
+#define QAM_FQ_TAP_IM_EL6_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL6_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL7__A 0x142002E
+#define QAM_FQ_TAP_RE_EL7__W 12
+#define QAM_FQ_TAP_RE_EL7__M 0xFFF
+#define QAM_FQ_TAP_RE_EL7__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL7_TAP__B 0
+#define QAM_FQ_TAP_RE_EL7_TAP__W 12
+#define QAM_FQ_TAP_RE_EL7_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL7_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL7__A 0x142002F
+#define QAM_FQ_TAP_IM_EL7__W 12
+#define QAM_FQ_TAP_IM_EL7__M 0xFFF
+#define QAM_FQ_TAP_IM_EL7__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL7_TAP__B 0
+#define QAM_FQ_TAP_IM_EL7_TAP__W 12
+#define QAM_FQ_TAP_IM_EL7_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL7_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL8__A 0x1420030
+#define QAM_FQ_TAP_RE_EL8__W 12
+#define QAM_FQ_TAP_RE_EL8__M 0xFFF
+#define QAM_FQ_TAP_RE_EL8__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL8_TAP__B 0
+#define QAM_FQ_TAP_RE_EL8_TAP__W 12
+#define QAM_FQ_TAP_RE_EL8_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL8_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL8__A 0x1420031
+#define QAM_FQ_TAP_IM_EL8__W 12
+#define QAM_FQ_TAP_IM_EL8__M 0xFFF
+#define QAM_FQ_TAP_IM_EL8__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL8_TAP__B 0
+#define QAM_FQ_TAP_IM_EL8_TAP__W 12
+#define QAM_FQ_TAP_IM_EL8_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL8_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL9__A 0x1420032
+#define QAM_FQ_TAP_RE_EL9__W 12
+#define QAM_FQ_TAP_RE_EL9__M 0xFFF
+#define QAM_FQ_TAP_RE_EL9__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL9_TAP__B 0
+#define QAM_FQ_TAP_RE_EL9_TAP__W 12
+#define QAM_FQ_TAP_RE_EL9_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL9_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL9__A 0x1420033
+#define QAM_FQ_TAP_IM_EL9__W 12
+#define QAM_FQ_TAP_IM_EL9__M 0xFFF
+#define QAM_FQ_TAP_IM_EL9__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL9_TAP__B 0
+#define QAM_FQ_TAP_IM_EL9_TAP__W 12
+#define QAM_FQ_TAP_IM_EL9_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL9_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL10__A 0x1420034
+#define QAM_FQ_TAP_RE_EL10__W 12
+#define QAM_FQ_TAP_RE_EL10__M 0xFFF
+#define QAM_FQ_TAP_RE_EL10__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL10_TAP__B 0
+#define QAM_FQ_TAP_RE_EL10_TAP__W 12
+#define QAM_FQ_TAP_RE_EL10_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL10_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL10__A 0x1420035
+#define QAM_FQ_TAP_IM_EL10__W 12
+#define QAM_FQ_TAP_IM_EL10__M 0xFFF
+#define QAM_FQ_TAP_IM_EL10__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL10_TAP__B 0
+#define QAM_FQ_TAP_IM_EL10_TAP__W 12
+#define QAM_FQ_TAP_IM_EL10_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL10_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL11__A 0x1420036
+#define QAM_FQ_TAP_RE_EL11__W 12
+#define QAM_FQ_TAP_RE_EL11__M 0xFFF
+#define QAM_FQ_TAP_RE_EL11__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL11_TAP__B 0
+#define QAM_FQ_TAP_RE_EL11_TAP__W 12
+#define QAM_FQ_TAP_RE_EL11_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL11_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL11__A 0x1420037
+#define QAM_FQ_TAP_IM_EL11__W 12
+#define QAM_FQ_TAP_IM_EL11__M 0xFFF
+#define QAM_FQ_TAP_IM_EL11__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL11_TAP__B 0
+#define QAM_FQ_TAP_IM_EL11_TAP__W 12
+#define QAM_FQ_TAP_IM_EL11_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL11_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL12__A 0x1420038
+#define QAM_FQ_TAP_RE_EL12__W 12
+#define QAM_FQ_TAP_RE_EL12__M 0xFFF
+#define QAM_FQ_TAP_RE_EL12__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL12_TAP__B 0
+#define QAM_FQ_TAP_RE_EL12_TAP__W 12
+#define QAM_FQ_TAP_RE_EL12_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL12_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL12__A 0x1420039
+#define QAM_FQ_TAP_IM_EL12__W 12
+#define QAM_FQ_TAP_IM_EL12__M 0xFFF
+#define QAM_FQ_TAP_IM_EL12__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL12_TAP__B 0
+#define QAM_FQ_TAP_IM_EL12_TAP__W 12
+#define QAM_FQ_TAP_IM_EL12_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL12_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL13__A 0x142003A
+#define QAM_FQ_TAP_RE_EL13__W 12
+#define QAM_FQ_TAP_RE_EL13__M 0xFFF
+#define QAM_FQ_TAP_RE_EL13__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL13_TAP__B 0
+#define QAM_FQ_TAP_RE_EL13_TAP__W 12
+#define QAM_FQ_TAP_RE_EL13_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL13_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL13__A 0x142003B
+#define QAM_FQ_TAP_IM_EL13__W 12
+#define QAM_FQ_TAP_IM_EL13__M 0xFFF
+#define QAM_FQ_TAP_IM_EL13__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL13_TAP__B 0
+#define QAM_FQ_TAP_IM_EL13_TAP__W 12
+#define QAM_FQ_TAP_IM_EL13_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL13_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL14__A 0x142003C
+#define QAM_FQ_TAP_RE_EL14__W 12
+#define QAM_FQ_TAP_RE_EL14__M 0xFFF
+#define QAM_FQ_TAP_RE_EL14__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL14_TAP__B 0
+#define QAM_FQ_TAP_RE_EL14_TAP__W 12
+#define QAM_FQ_TAP_RE_EL14_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL14_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL14__A 0x142003D
+#define QAM_FQ_TAP_IM_EL14__W 12
+#define QAM_FQ_TAP_IM_EL14__M 0xFFF
+#define QAM_FQ_TAP_IM_EL14__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL14_TAP__B 0
+#define QAM_FQ_TAP_IM_EL14_TAP__W 12
+#define QAM_FQ_TAP_IM_EL14_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL14_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL15__A 0x142003E
+#define QAM_FQ_TAP_RE_EL15__W 12
+#define QAM_FQ_TAP_RE_EL15__M 0xFFF
+#define QAM_FQ_TAP_RE_EL15__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL15_TAP__B 0
+#define QAM_FQ_TAP_RE_EL15_TAP__W 12
+#define QAM_FQ_TAP_RE_EL15_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL15_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL15__A 0x142003F
+#define QAM_FQ_TAP_IM_EL15__W 12
+#define QAM_FQ_TAP_IM_EL15__M 0xFFF
+#define QAM_FQ_TAP_IM_EL15__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL15_TAP__B 0
+#define QAM_FQ_TAP_IM_EL15_TAP__W 12
+#define QAM_FQ_TAP_IM_EL15_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL15_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL16__A 0x1420040
+#define QAM_FQ_TAP_RE_EL16__W 12
+#define QAM_FQ_TAP_RE_EL16__M 0xFFF
+#define QAM_FQ_TAP_RE_EL16__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL16_TAP__B 0
+#define QAM_FQ_TAP_RE_EL16_TAP__W 12
+#define QAM_FQ_TAP_RE_EL16_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL16_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL16__A 0x1420041
+#define QAM_FQ_TAP_IM_EL16__W 12
+#define QAM_FQ_TAP_IM_EL16__M 0xFFF
+#define QAM_FQ_TAP_IM_EL16__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL16_TAP__B 0
+#define QAM_FQ_TAP_IM_EL16_TAP__W 12
+#define QAM_FQ_TAP_IM_EL16_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL16_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL17__A 0x1420042
+#define QAM_FQ_TAP_RE_EL17__W 12
+#define QAM_FQ_TAP_RE_EL17__M 0xFFF
+#define QAM_FQ_TAP_RE_EL17__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL17_TAP__B 0
+#define QAM_FQ_TAP_RE_EL17_TAP__W 12
+#define QAM_FQ_TAP_RE_EL17_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL17_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL17__A 0x1420043
+#define QAM_FQ_TAP_IM_EL17__W 12
+#define QAM_FQ_TAP_IM_EL17__M 0xFFF
+#define QAM_FQ_TAP_IM_EL17__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL17_TAP__B 0
+#define QAM_FQ_TAP_IM_EL17_TAP__W 12
+#define QAM_FQ_TAP_IM_EL17_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL17_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL18__A 0x1420044
+#define QAM_FQ_TAP_RE_EL18__W 12
+#define QAM_FQ_TAP_RE_EL18__M 0xFFF
+#define QAM_FQ_TAP_RE_EL18__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL18_TAP__B 0
+#define QAM_FQ_TAP_RE_EL18_TAP__W 12
+#define QAM_FQ_TAP_RE_EL18_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL18_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL18__A 0x1420045
+#define QAM_FQ_TAP_IM_EL18__W 12
+#define QAM_FQ_TAP_IM_EL18__M 0xFFF
+#define QAM_FQ_TAP_IM_EL18__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL18_TAP__B 0
+#define QAM_FQ_TAP_IM_EL18_TAP__W 12
+#define QAM_FQ_TAP_IM_EL18_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL18_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL19__A 0x1420046
+#define QAM_FQ_TAP_RE_EL19__W 12
+#define QAM_FQ_TAP_RE_EL19__M 0xFFF
+#define QAM_FQ_TAP_RE_EL19__PRE 0x600
+
+#define QAM_FQ_TAP_RE_EL19_TAP__B 0
+#define QAM_FQ_TAP_RE_EL19_TAP__W 12
+#define QAM_FQ_TAP_RE_EL19_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL19_TAP__PRE 0x600
+
+#define QAM_FQ_TAP_IM_EL19__A 0x1420047
+#define QAM_FQ_TAP_IM_EL19__W 12
+#define QAM_FQ_TAP_IM_EL19__M 0xFFF
+#define QAM_FQ_TAP_IM_EL19__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL19_TAP__B 0
+#define QAM_FQ_TAP_IM_EL19_TAP__W 12
+#define QAM_FQ_TAP_IM_EL19_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL19_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL20__A 0x1420048
+#define QAM_FQ_TAP_RE_EL20__W 12
+#define QAM_FQ_TAP_RE_EL20__M 0xFFF
+#define QAM_FQ_TAP_RE_EL20__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL20_TAP__B 0
+#define QAM_FQ_TAP_RE_EL20_TAP__W 12
+#define QAM_FQ_TAP_RE_EL20_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL20_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL20__A 0x1420049
+#define QAM_FQ_TAP_IM_EL20__W 12
+#define QAM_FQ_TAP_IM_EL20__M 0xFFF
+#define QAM_FQ_TAP_IM_EL20__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL20_TAP__B 0
+#define QAM_FQ_TAP_IM_EL20_TAP__W 12
+#define QAM_FQ_TAP_IM_EL20_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL20_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL21__A 0x142004A
+#define QAM_FQ_TAP_RE_EL21__W 12
+#define QAM_FQ_TAP_RE_EL21__M 0xFFF
+#define QAM_FQ_TAP_RE_EL21__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL21_TAP__B 0
+#define QAM_FQ_TAP_RE_EL21_TAP__W 12
+#define QAM_FQ_TAP_RE_EL21_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL21_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL21__A 0x142004B
+#define QAM_FQ_TAP_IM_EL21__W 12
+#define QAM_FQ_TAP_IM_EL21__M 0xFFF
+#define QAM_FQ_TAP_IM_EL21__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL21_TAP__B 0
+#define QAM_FQ_TAP_IM_EL21_TAP__W 12
+#define QAM_FQ_TAP_IM_EL21_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL21_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL22__A 0x142004C
+#define QAM_FQ_TAP_RE_EL22__W 12
+#define QAM_FQ_TAP_RE_EL22__M 0xFFF
+#define QAM_FQ_TAP_RE_EL22__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL22_TAP__B 0
+#define QAM_FQ_TAP_RE_EL22_TAP__W 12
+#define QAM_FQ_TAP_RE_EL22_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL22_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL22__A 0x142004D
+#define QAM_FQ_TAP_IM_EL22__W 12
+#define QAM_FQ_TAP_IM_EL22__M 0xFFF
+#define QAM_FQ_TAP_IM_EL22__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL22_TAP__B 0
+#define QAM_FQ_TAP_IM_EL22_TAP__W 12
+#define QAM_FQ_TAP_IM_EL22_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL22_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL23__A 0x142004E
+#define QAM_FQ_TAP_RE_EL23__W 12
+#define QAM_FQ_TAP_RE_EL23__M 0xFFF
+#define QAM_FQ_TAP_RE_EL23__PRE 0x2
+
+#define QAM_FQ_TAP_RE_EL23_TAP__B 0
+#define QAM_FQ_TAP_RE_EL23_TAP__W 12
+#define QAM_FQ_TAP_RE_EL23_TAP__M 0xFFF
+#define QAM_FQ_TAP_RE_EL23_TAP__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL23__A 0x142004F
+#define QAM_FQ_TAP_IM_EL23__W 12
+#define QAM_FQ_TAP_IM_EL23__M 0xFFF
+#define QAM_FQ_TAP_IM_EL23__PRE 0x2
+
+#define QAM_FQ_TAP_IM_EL23_TAP__B 0
+#define QAM_FQ_TAP_IM_EL23_TAP__W 12
+#define QAM_FQ_TAP_IM_EL23_TAP__M 0xFFF
+#define QAM_FQ_TAP_IM_EL23_TAP__PRE 0x2
+
+#define QAM_SL_COMM_EXEC__A 0x1430000
+#define QAM_SL_COMM_EXEC__W 2
+#define QAM_SL_COMM_EXEC__M 0x3
+#define QAM_SL_COMM_EXEC__PRE 0x0
+#define QAM_SL_COMM_EXEC_STOP 0x0
+#define QAM_SL_COMM_EXEC_ACTIVE 0x1
+#define QAM_SL_COMM_EXEC_HOLD 0x2
+
+#define QAM_SL_COMM_MB__A 0x1430002
+#define QAM_SL_COMM_MB__W 4
+#define QAM_SL_COMM_MB__M 0xF
+#define QAM_SL_COMM_MB__PRE 0x0
+#define QAM_SL_COMM_MB_CTL__B 0
+#define QAM_SL_COMM_MB_CTL__W 1
+#define QAM_SL_COMM_MB_CTL__M 0x1
+#define QAM_SL_COMM_MB_CTL__PRE 0x0
+#define QAM_SL_COMM_MB_CTL_OFF 0x0
+#define QAM_SL_COMM_MB_CTL_ON 0x1
+#define QAM_SL_COMM_MB_OBS__B 1
+#define QAM_SL_COMM_MB_OBS__W 1
+#define QAM_SL_COMM_MB_OBS__M 0x2
+#define QAM_SL_COMM_MB_OBS__PRE 0x0
+#define QAM_SL_COMM_MB_OBS_OFF 0x0
+#define QAM_SL_COMM_MB_OBS_ON 0x2
+#define QAM_SL_COMM_MB_MUX_OBS__B 2
+#define QAM_SL_COMM_MB_MUX_OBS__W 2
+#define QAM_SL_COMM_MB_MUX_OBS__M 0xC
+#define QAM_SL_COMM_MB_MUX_OBS__PRE 0x0
+#define QAM_SL_COMM_MB_MUX_OBS_CONST_CORR 0x0
+#define QAM_SL_COMM_MB_MUX_OBS_CONST2LC_O 0x4
+#define QAM_SL_COMM_MB_MUX_OBS_CONST2DQ_O 0x8
+#define QAM_SL_COMM_MB_MUX_OBS_VDEC_O 0xC
+
+#define QAM_SL_COMM_INT_REQ__A 0x1430003
+#define QAM_SL_COMM_INT_REQ__W 1
+#define QAM_SL_COMM_INT_REQ__M 0x1
+#define QAM_SL_COMM_INT_REQ__PRE 0x0
+#define QAM_SL_COMM_INT_STA__A 0x1430005
+#define QAM_SL_COMM_INT_STA__W 2
+#define QAM_SL_COMM_INT_STA__M 0x3
+#define QAM_SL_COMM_INT_STA__PRE 0x0
+
+#define QAM_SL_COMM_INT_STA_MED_ERR_INT__B 0
+#define QAM_SL_COMM_INT_STA_MED_ERR_INT__W 1
+#define QAM_SL_COMM_INT_STA_MED_ERR_INT__M 0x1
+#define QAM_SL_COMM_INT_STA_MED_ERR_INT__PRE 0x0
+
+#define QAM_SL_COMM_INT_STA_MER_INT__B 1
+#define QAM_SL_COMM_INT_STA_MER_INT__W 1
+#define QAM_SL_COMM_INT_STA_MER_INT__M 0x2
+#define QAM_SL_COMM_INT_STA_MER_INT__PRE 0x0
+
+#define QAM_SL_COMM_INT_MSK__A 0x1430006
+#define QAM_SL_COMM_INT_MSK__W 2
+#define QAM_SL_COMM_INT_MSK__M 0x3
+#define QAM_SL_COMM_INT_MSK__PRE 0x0
+#define QAM_SL_COMM_INT_MSK_MED_ERR_MSK__B 0
+#define QAM_SL_COMM_INT_MSK_MED_ERR_MSK__W 1
+#define QAM_SL_COMM_INT_MSK_MED_ERR_MSK__M 0x1
+#define QAM_SL_COMM_INT_MSK_MED_ERR_MSK__PRE 0x0
+#define QAM_SL_COMM_INT_MSK_MER_MSK__B 1
+#define QAM_SL_COMM_INT_MSK_MER_MSK__W 1
+#define QAM_SL_COMM_INT_MSK_MER_MSK__M 0x2
+#define QAM_SL_COMM_INT_MSK_MER_MSK__PRE 0x0
+
+#define QAM_SL_COMM_INT_STM__A 0x1430007
+#define QAM_SL_COMM_INT_STM__W 2
+#define QAM_SL_COMM_INT_STM__M 0x3
+#define QAM_SL_COMM_INT_STM__PRE 0x0
+#define QAM_SL_COMM_INT_STM_MED_ERR_STM__B 0
+#define QAM_SL_COMM_INT_STM_MED_ERR_STM__W 1
+#define QAM_SL_COMM_INT_STM_MED_ERR_STM__M 0x1
+#define QAM_SL_COMM_INT_STM_MED_ERR_STM__PRE 0x0
+#define QAM_SL_COMM_INT_STM_MER_STM__B 1
+#define QAM_SL_COMM_INT_STM_MER_STM__W 1
+#define QAM_SL_COMM_INT_STM_MER_STM__M 0x2
+#define QAM_SL_COMM_INT_STM_MER_STM__PRE 0x0
+
+#define QAM_SL_MODE__A 0x1430010
+#define QAM_SL_MODE__W 11
+#define QAM_SL_MODE__M 0x7FF
+#define QAM_SL_MODE__PRE 0x0
+
+#define QAM_SL_MODE_SLICER4LC__B 0
+#define QAM_SL_MODE_SLICER4LC__W 2
+#define QAM_SL_MODE_SLICER4LC__M 0x3
+#define QAM_SL_MODE_SLICER4LC__PRE 0x0
+#define QAM_SL_MODE_SLICER4LC_RECT 0x0
+#define QAM_SL_MODE_SLICER4LC_ONET 0x1
+#define QAM_SL_MODE_SLICER4LC_RAD 0x2
+
+#define QAM_SL_MODE_SLICER4DQ__B 2
+#define QAM_SL_MODE_SLICER4DQ__W 2
+#define QAM_SL_MODE_SLICER4DQ__M 0xC
+#define QAM_SL_MODE_SLICER4DQ__PRE 0x0
+#define QAM_SL_MODE_SLICER4DQ_RECT 0x0
+#define QAM_SL_MODE_SLICER4DQ_ONET 0x4
+#define QAM_SL_MODE_SLICER4DQ_RAD 0x8
+
+#define QAM_SL_MODE_SLICER4VD__B 4
+#define QAM_SL_MODE_SLICER4VD__W 2
+#define QAM_SL_MODE_SLICER4VD__M 0x30
+#define QAM_SL_MODE_SLICER4VD__PRE 0x0
+#define QAM_SL_MODE_SLICER4VD_RECT 0x0
+#define QAM_SL_MODE_SLICER4VD_ONET 0x10
+#define QAM_SL_MODE_SLICER4VD_RAD 0x20
+
+#define QAM_SL_MODE_ROT_DIS__B 6
+#define QAM_SL_MODE_ROT_DIS__W 1
+#define QAM_SL_MODE_ROT_DIS__M 0x40
+#define QAM_SL_MODE_ROT_DIS__PRE 0x0
+
+#define QAM_SL_MODE_DQROT_DIS__B 7
+#define QAM_SL_MODE_DQROT_DIS__W 1
+#define QAM_SL_MODE_DQROT_DIS__M 0x80
+#define QAM_SL_MODE_DQROT_DIS__PRE 0x0
+
+#define QAM_SL_MODE_DFE_DIS__B 8
+#define QAM_SL_MODE_DFE_DIS__W 1
+#define QAM_SL_MODE_DFE_DIS__M 0x100
+#define QAM_SL_MODE_DFE_DIS__PRE 0x0
+
+#define QAM_SL_MODE_RADIUS_MIX__B 9
+#define QAM_SL_MODE_RADIUS_MIX__W 1
+#define QAM_SL_MODE_RADIUS_MIX__M 0x200
+#define QAM_SL_MODE_RADIUS_MIX__PRE 0x0
+
+#define QAM_SL_MODE_TILT_COMP__B 10
+#define QAM_SL_MODE_TILT_COMP__W 1
+#define QAM_SL_MODE_TILT_COMP__M 0x400
+#define QAM_SL_MODE_TILT_COMP__PRE 0x0
+
+#define QAM_SL_K_FACTOR__A 0x1430011
+#define QAM_SL_K_FACTOR__W 4
+#define QAM_SL_K_FACTOR__M 0xF
+#define QAM_SL_K_FACTOR__PRE 0x0
+#define QAM_SL_MEDIAN__A 0x1430012
+#define QAM_SL_MEDIAN__W 14
+#define QAM_SL_MEDIAN__M 0x3FFF
+#define QAM_SL_MEDIAN__PRE 0x0
+
+#define QAM_SL_MEDIAN_LENGTH__B 0
+#define QAM_SL_MEDIAN_LENGTH__W 2
+#define QAM_SL_MEDIAN_LENGTH__M 0x3
+#define QAM_SL_MEDIAN_LENGTH__PRE 0x0
+
+#define QAM_SL_MEDIAN_CORRECT__B 2
+#define QAM_SL_MEDIAN_CORRECT__W 4
+#define QAM_SL_MEDIAN_CORRECT__M 0x3C
+#define QAM_SL_MEDIAN_CORRECT__PRE 0x0
+
+#define QAM_SL_MEDIAN_TOLERANCE__B 6
+#define QAM_SL_MEDIAN_TOLERANCE__W 7
+#define QAM_SL_MEDIAN_TOLERANCE__M 0x1FC0
+#define QAM_SL_MEDIAN_TOLERANCE__PRE 0x0
+
+#define QAM_SL_MEDIAN_FAST__B 13
+#define QAM_SL_MEDIAN_FAST__W 1
+#define QAM_SL_MEDIAN_FAST__M 0x2000
+#define QAM_SL_MEDIAN_FAST__PRE 0x0
+
+#define QAM_SL_ALPHA__A 0x1430013
+#define QAM_SL_ALPHA__W 3
+#define QAM_SL_ALPHA__M 0x7
+#define QAM_SL_ALPHA__PRE 0x0
+
+#define QAM_SL_PHASELIMIT__A 0x1430014
+#define QAM_SL_PHASELIMIT__W 9
+#define QAM_SL_PHASELIMIT__M 0x1FF
+#define QAM_SL_PHASELIMIT__PRE 0x0
+#define QAM_SL_MTA_LENGTH__A 0x1430015
+#define QAM_SL_MTA_LENGTH__W 2
+#define QAM_SL_MTA_LENGTH__M 0x3
+#define QAM_SL_MTA_LENGTH__PRE 0x1
+
+#define QAM_SL_MTA_LENGTH_LENGTH__B 0
+#define QAM_SL_MTA_LENGTH_LENGTH__W 2
+#define QAM_SL_MTA_LENGTH_LENGTH__M 0x3
+#define QAM_SL_MTA_LENGTH_LENGTH__PRE 0x1
+
+#define QAM_SL_MEDIAN_ERROR__A 0x1430016
+#define QAM_SL_MEDIAN_ERROR__W 10
+#define QAM_SL_MEDIAN_ERROR__M 0x3FF
+#define QAM_SL_MEDIAN_ERROR__PRE 0x0
+
+#define QAM_SL_MEDIAN_ERROR_MEDIAN_ERR__B 0
+#define QAM_SL_MEDIAN_ERROR_MEDIAN_ERR__W 10
+#define QAM_SL_MEDIAN_ERROR_MEDIAN_ERR__M 0x3FF
+#define QAM_SL_MEDIAN_ERROR_MEDIAN_ERR__PRE 0x0
+
+#define QAM_SL_ERR_POWER__A 0x1430017
+#define QAM_SL_ERR_POWER__W 16
+#define QAM_SL_ERR_POWER__M 0xFFFF
+#define QAM_SL_ERR_POWER__PRE 0x0
+
+#define QAM_DQ_COMM_EXEC__A 0x1440000
+#define QAM_DQ_COMM_EXEC__W 2
+#define QAM_DQ_COMM_EXEC__M 0x3
+#define QAM_DQ_COMM_EXEC__PRE 0x0
+#define QAM_DQ_COMM_EXEC_STOP 0x0
+#define QAM_DQ_COMM_EXEC_ACTIVE 0x1
+#define QAM_DQ_COMM_EXEC_HOLD 0x2
+
+#define QAM_DQ_MODE__A 0x1440010
+#define QAM_DQ_MODE__W 5
+#define QAM_DQ_MODE__M 0x1F
+#define QAM_DQ_MODE__PRE 0x0
+
+#define QAM_DQ_MODE_TAPRESET__B 0
+#define QAM_DQ_MODE_TAPRESET__W 1
+#define QAM_DQ_MODE_TAPRESET__M 0x1
+#define QAM_DQ_MODE_TAPRESET__PRE 0x0
+#define QAM_DQ_MODE_TAPRESET_RST 0x1
+
+#define QAM_DQ_MODE_TAPLMS__B 1
+#define QAM_DQ_MODE_TAPLMS__W 1
+#define QAM_DQ_MODE_TAPLMS__M 0x2
+#define QAM_DQ_MODE_TAPLMS__PRE 0x0
+#define QAM_DQ_MODE_TAPLMS_UPD 0x2
+
+#define QAM_DQ_MODE_TAPDRAIN__B 2
+#define QAM_DQ_MODE_TAPDRAIN__W 1
+#define QAM_DQ_MODE_TAPDRAIN__M 0x4
+#define QAM_DQ_MODE_TAPDRAIN__PRE 0x0
+#define QAM_DQ_MODE_TAPDRAIN_DRAIN 0x4
+
+#define QAM_DQ_MODE_FB__B 3
+#define QAM_DQ_MODE_FB__W 2
+#define QAM_DQ_MODE_FB__M 0x18
+#define QAM_DQ_MODE_FB__PRE 0x0
+#define QAM_DQ_MODE_FB_CMA 0x0
+#define QAM_DQ_MODE_FB_RADIUS 0x8
+#define QAM_DQ_MODE_FB_DFB 0x10
+#define QAM_DQ_MODE_FB_TRELLIS 0x18
+
+#define QAM_DQ_MU_FACTOR__A 0x1440011
+#define QAM_DQ_MU_FACTOR__W 3
+#define QAM_DQ_MU_FACTOR__M 0x7
+#define QAM_DQ_MU_FACTOR__PRE 0x0
+
+#define QAM_DQ_LA_FACTOR__A 0x1440012
+#define QAM_DQ_LA_FACTOR__W 4
+#define QAM_DQ_LA_FACTOR__M 0xF
+#define QAM_DQ_LA_FACTOR__PRE 0xC
+
+#define QAM_DQ_CMA_RATIO__A 0x1440013
+#define QAM_DQ_CMA_RATIO__W 14
+#define QAM_DQ_CMA_RATIO__M 0x3FFF
+#define QAM_DQ_CMA_RATIO__PRE 0x3CF9
+#define QAM_DQ_CMA_RATIO_QPSK 0x2000
+#define QAM_DQ_CMA_RATIO_QAM16 0x34CD
+#define QAM_DQ_CMA_RATIO_QAM64 0x3A00
+#define QAM_DQ_CMA_RATIO_QAM256 0x3B4D
+#define QAM_DQ_CMA_RATIO_QAM1024 0x3BA0
+
+#define QAM_DQ_QUAL_RADSEL__A 0x1440014
+#define QAM_DQ_QUAL_RADSEL__W 3
+#define QAM_DQ_QUAL_RADSEL__M 0x7
+#define QAM_DQ_QUAL_RADSEL__PRE 0x0
+
+#define QAM_DQ_QUAL_RADSEL_BIT__B 0
+#define QAM_DQ_QUAL_RADSEL_BIT__W 3
+#define QAM_DQ_QUAL_RADSEL_BIT__M 0x7
+#define QAM_DQ_QUAL_RADSEL_BIT__PRE 0x0
+#define QAM_DQ_QUAL_RADSEL_BIT_PURE_RADIUS 0x0
+#define QAM_DQ_QUAL_RADSEL_BIT_PURE_CMA 0x6
+
+#define QAM_DQ_QUAL_ENA__A 0x1440015
+#define QAM_DQ_QUAL_ENA__W 1
+#define QAM_DQ_QUAL_ENA__M 0x1
+#define QAM_DQ_QUAL_ENA__PRE 0x0
+
+#define QAM_DQ_QUAL_ENA_ENA__B 0
+#define QAM_DQ_QUAL_ENA_ENA__W 1
+#define QAM_DQ_QUAL_ENA_ENA__M 0x1
+#define QAM_DQ_QUAL_ENA_ENA__PRE 0x0
+#define QAM_DQ_QUAL_ENA_ENA_QUAL_WEIGHTING 0x1
+
+#define QAM_DQ_QUAL_FUN0__A 0x1440018
+#define QAM_DQ_QUAL_FUN0__W 6
+#define QAM_DQ_QUAL_FUN0__M 0x3F
+#define QAM_DQ_QUAL_FUN0__PRE 0x4
+
+#define QAM_DQ_QUAL_FUN0_BIT__B 0
+#define QAM_DQ_QUAL_FUN0_BIT__W 6
+#define QAM_DQ_QUAL_FUN0_BIT__M 0x3F
+#define QAM_DQ_QUAL_FUN0_BIT__PRE 0x4
+
+#define QAM_DQ_QUAL_FUN1__A 0x1440019
+#define QAM_DQ_QUAL_FUN1__W 6
+#define QAM_DQ_QUAL_FUN1__M 0x3F
+#define QAM_DQ_QUAL_FUN1__PRE 0x4
+
+#define QAM_DQ_QUAL_FUN1_BIT__B 0
+#define QAM_DQ_QUAL_FUN1_BIT__W 6
+#define QAM_DQ_QUAL_FUN1_BIT__M 0x3F
+#define QAM_DQ_QUAL_FUN1_BIT__PRE 0x4
+
+#define QAM_DQ_QUAL_FUN2__A 0x144001A
+#define QAM_DQ_QUAL_FUN2__W 6
+#define QAM_DQ_QUAL_FUN2__M 0x3F
+#define QAM_DQ_QUAL_FUN2__PRE 0x4
+
+#define QAM_DQ_QUAL_FUN2_BIT__B 0
+#define QAM_DQ_QUAL_FUN2_BIT__W 6
+#define QAM_DQ_QUAL_FUN2_BIT__M 0x3F
+#define QAM_DQ_QUAL_FUN2_BIT__PRE 0x4
+
+#define QAM_DQ_QUAL_FUN3__A 0x144001B
+#define QAM_DQ_QUAL_FUN3__W 6
+#define QAM_DQ_QUAL_FUN3__M 0x3F
+#define QAM_DQ_QUAL_FUN3__PRE 0x4
+
+#define QAM_DQ_QUAL_FUN3_BIT__B 0
+#define QAM_DQ_QUAL_FUN3_BIT__W 6
+#define QAM_DQ_QUAL_FUN3_BIT__M 0x3F
+#define QAM_DQ_QUAL_FUN3_BIT__PRE 0x4
+
+#define QAM_DQ_QUAL_FUN4__A 0x144001C
+#define QAM_DQ_QUAL_FUN4__W 6
+#define QAM_DQ_QUAL_FUN4__M 0x3F
+#define QAM_DQ_QUAL_FUN4__PRE 0x6
+
+#define QAM_DQ_QUAL_FUN4_BIT__B 0
+#define QAM_DQ_QUAL_FUN4_BIT__W 6
+#define QAM_DQ_QUAL_FUN4_BIT__M 0x3F
+#define QAM_DQ_QUAL_FUN4_BIT__PRE 0x6
+
+#define QAM_DQ_QUAL_FUN5__A 0x144001D
+#define QAM_DQ_QUAL_FUN5__W 6
+#define QAM_DQ_QUAL_FUN5__M 0x3F
+#define QAM_DQ_QUAL_FUN5__PRE 0x6
+
+#define QAM_DQ_QUAL_FUN5_BIT__B 0
+#define QAM_DQ_QUAL_FUN5_BIT__W 6
+#define QAM_DQ_QUAL_FUN5_BIT__M 0x3F
+#define QAM_DQ_QUAL_FUN5_BIT__PRE 0x6
+
+#define QAM_DQ_RAW_LIM__A 0x144001E
+#define QAM_DQ_RAW_LIM__W 5
+#define QAM_DQ_RAW_LIM__M 0x1F
+#define QAM_DQ_RAW_LIM__PRE 0x1F
+
+#define QAM_DQ_RAW_LIM_BIT__B 0
+#define QAM_DQ_RAW_LIM_BIT__W 5
+#define QAM_DQ_RAW_LIM_BIT__M 0x1F
+#define QAM_DQ_RAW_LIM_BIT__PRE 0x1F
+
+#define QAM_DQ_TAP_RE_EL0__A 0x1440020
+#define QAM_DQ_TAP_RE_EL0__W 12
+#define QAM_DQ_TAP_RE_EL0__M 0xFFF
+#define QAM_DQ_TAP_RE_EL0__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL0_TAP__B 0
+#define QAM_DQ_TAP_RE_EL0_TAP__W 12
+#define QAM_DQ_TAP_RE_EL0_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL0_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL0__A 0x1440021
+#define QAM_DQ_TAP_IM_EL0__W 12
+#define QAM_DQ_TAP_IM_EL0__M 0xFFF
+#define QAM_DQ_TAP_IM_EL0__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL0_TAP__B 0
+#define QAM_DQ_TAP_IM_EL0_TAP__W 12
+#define QAM_DQ_TAP_IM_EL0_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL0_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL1__A 0x1440022
+#define QAM_DQ_TAP_RE_EL1__W 12
+#define QAM_DQ_TAP_RE_EL1__M 0xFFF
+#define QAM_DQ_TAP_RE_EL1__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL1_TAP__B 0
+#define QAM_DQ_TAP_RE_EL1_TAP__W 12
+#define QAM_DQ_TAP_RE_EL1_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL1_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL1__A 0x1440023
+#define QAM_DQ_TAP_IM_EL1__W 12
+#define QAM_DQ_TAP_IM_EL1__M 0xFFF
+#define QAM_DQ_TAP_IM_EL1__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL1_TAP__B 0
+#define QAM_DQ_TAP_IM_EL1_TAP__W 12
+#define QAM_DQ_TAP_IM_EL1_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL1_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL2__A 0x1440024
+#define QAM_DQ_TAP_RE_EL2__W 12
+#define QAM_DQ_TAP_RE_EL2__M 0xFFF
+#define QAM_DQ_TAP_RE_EL2__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL2_TAP__B 0
+#define QAM_DQ_TAP_RE_EL2_TAP__W 12
+#define QAM_DQ_TAP_RE_EL2_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL2_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL2__A 0x1440025
+#define QAM_DQ_TAP_IM_EL2__W 12
+#define QAM_DQ_TAP_IM_EL2__M 0xFFF
+#define QAM_DQ_TAP_IM_EL2__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL2_TAP__B 0
+#define QAM_DQ_TAP_IM_EL2_TAP__W 12
+#define QAM_DQ_TAP_IM_EL2_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL2_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL3__A 0x1440026
+#define QAM_DQ_TAP_RE_EL3__W 12
+#define QAM_DQ_TAP_RE_EL3__M 0xFFF
+#define QAM_DQ_TAP_RE_EL3__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL3_TAP__B 0
+#define QAM_DQ_TAP_RE_EL3_TAP__W 12
+#define QAM_DQ_TAP_RE_EL3_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL3_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL3__A 0x1440027
+#define QAM_DQ_TAP_IM_EL3__W 12
+#define QAM_DQ_TAP_IM_EL3__M 0xFFF
+#define QAM_DQ_TAP_IM_EL3__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL3_TAP__B 0
+#define QAM_DQ_TAP_IM_EL3_TAP__W 12
+#define QAM_DQ_TAP_IM_EL3_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL3_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL4__A 0x1440028
+#define QAM_DQ_TAP_RE_EL4__W 12
+#define QAM_DQ_TAP_RE_EL4__M 0xFFF
+#define QAM_DQ_TAP_RE_EL4__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL4_TAP__B 0
+#define QAM_DQ_TAP_RE_EL4_TAP__W 12
+#define QAM_DQ_TAP_RE_EL4_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL4_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL4__A 0x1440029
+#define QAM_DQ_TAP_IM_EL4__W 12
+#define QAM_DQ_TAP_IM_EL4__M 0xFFF
+#define QAM_DQ_TAP_IM_EL4__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL4_TAP__B 0
+#define QAM_DQ_TAP_IM_EL4_TAP__W 12
+#define QAM_DQ_TAP_IM_EL4_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL4_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL5__A 0x144002A
+#define QAM_DQ_TAP_RE_EL5__W 12
+#define QAM_DQ_TAP_RE_EL5__M 0xFFF
+#define QAM_DQ_TAP_RE_EL5__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL5_TAP__B 0
+#define QAM_DQ_TAP_RE_EL5_TAP__W 12
+#define QAM_DQ_TAP_RE_EL5_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL5_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL5__A 0x144002B
+#define QAM_DQ_TAP_IM_EL5__W 12
+#define QAM_DQ_TAP_IM_EL5__M 0xFFF
+#define QAM_DQ_TAP_IM_EL5__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL5_TAP__B 0
+#define QAM_DQ_TAP_IM_EL5_TAP__W 12
+#define QAM_DQ_TAP_IM_EL5_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL5_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL6__A 0x144002C
+#define QAM_DQ_TAP_RE_EL6__W 12
+#define QAM_DQ_TAP_RE_EL6__M 0xFFF
+#define QAM_DQ_TAP_RE_EL6__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL6_TAP__B 0
+#define QAM_DQ_TAP_RE_EL6_TAP__W 12
+#define QAM_DQ_TAP_RE_EL6_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL6_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL6__A 0x144002D
+#define QAM_DQ_TAP_IM_EL6__W 12
+#define QAM_DQ_TAP_IM_EL6__M 0xFFF
+#define QAM_DQ_TAP_IM_EL6__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL6_TAP__B 0
+#define QAM_DQ_TAP_IM_EL6_TAP__W 12
+#define QAM_DQ_TAP_IM_EL6_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL6_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL7__A 0x144002E
+#define QAM_DQ_TAP_RE_EL7__W 12
+#define QAM_DQ_TAP_RE_EL7__M 0xFFF
+#define QAM_DQ_TAP_RE_EL7__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL7_TAP__B 0
+#define QAM_DQ_TAP_RE_EL7_TAP__W 12
+#define QAM_DQ_TAP_RE_EL7_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL7_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL7__A 0x144002F
+#define QAM_DQ_TAP_IM_EL7__W 12
+#define QAM_DQ_TAP_IM_EL7__M 0xFFF
+#define QAM_DQ_TAP_IM_EL7__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL7_TAP__B 0
+#define QAM_DQ_TAP_IM_EL7_TAP__W 12
+#define QAM_DQ_TAP_IM_EL7_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL7_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL8__A 0x1440030
+#define QAM_DQ_TAP_RE_EL8__W 12
+#define QAM_DQ_TAP_RE_EL8__M 0xFFF
+#define QAM_DQ_TAP_RE_EL8__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL8_TAP__B 0
+#define QAM_DQ_TAP_RE_EL8_TAP__W 12
+#define QAM_DQ_TAP_RE_EL8_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL8_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL8__A 0x1440031
+#define QAM_DQ_TAP_IM_EL8__W 12
+#define QAM_DQ_TAP_IM_EL8__M 0xFFF
+#define QAM_DQ_TAP_IM_EL8__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL8_TAP__B 0
+#define QAM_DQ_TAP_IM_EL8_TAP__W 12
+#define QAM_DQ_TAP_IM_EL8_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL8_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL9__A 0x1440032
+#define QAM_DQ_TAP_RE_EL9__W 12
+#define QAM_DQ_TAP_RE_EL9__M 0xFFF
+#define QAM_DQ_TAP_RE_EL9__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL9_TAP__B 0
+#define QAM_DQ_TAP_RE_EL9_TAP__W 12
+#define QAM_DQ_TAP_RE_EL9_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL9_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL9__A 0x1440033
+#define QAM_DQ_TAP_IM_EL9__W 12
+#define QAM_DQ_TAP_IM_EL9__M 0xFFF
+#define QAM_DQ_TAP_IM_EL9__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL9_TAP__B 0
+#define QAM_DQ_TAP_IM_EL9_TAP__W 12
+#define QAM_DQ_TAP_IM_EL9_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL9_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL10__A 0x1440034
+#define QAM_DQ_TAP_RE_EL10__W 12
+#define QAM_DQ_TAP_RE_EL10__M 0xFFF
+#define QAM_DQ_TAP_RE_EL10__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL10_TAP__B 0
+#define QAM_DQ_TAP_RE_EL10_TAP__W 12
+#define QAM_DQ_TAP_RE_EL10_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL10_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL10__A 0x1440035
+#define QAM_DQ_TAP_IM_EL10__W 12
+#define QAM_DQ_TAP_IM_EL10__M 0xFFF
+#define QAM_DQ_TAP_IM_EL10__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL10_TAP__B 0
+#define QAM_DQ_TAP_IM_EL10_TAP__W 12
+#define QAM_DQ_TAP_IM_EL10_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL10_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL11__A 0x1440036
+#define QAM_DQ_TAP_RE_EL11__W 12
+#define QAM_DQ_TAP_RE_EL11__M 0xFFF
+#define QAM_DQ_TAP_RE_EL11__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL11_TAP__B 0
+#define QAM_DQ_TAP_RE_EL11_TAP__W 12
+#define QAM_DQ_TAP_RE_EL11_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL11_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL11__A 0x1440037
+#define QAM_DQ_TAP_IM_EL11__W 12
+#define QAM_DQ_TAP_IM_EL11__M 0xFFF
+#define QAM_DQ_TAP_IM_EL11__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL11_TAP__B 0
+#define QAM_DQ_TAP_IM_EL11_TAP__W 12
+#define QAM_DQ_TAP_IM_EL11_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL11_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL12__A 0x1440038
+#define QAM_DQ_TAP_RE_EL12__W 12
+#define QAM_DQ_TAP_RE_EL12__M 0xFFF
+#define QAM_DQ_TAP_RE_EL12__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL12_TAP__B 0
+#define QAM_DQ_TAP_RE_EL12_TAP__W 12
+#define QAM_DQ_TAP_RE_EL12_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL12_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL12__A 0x1440039
+#define QAM_DQ_TAP_IM_EL12__W 12
+#define QAM_DQ_TAP_IM_EL12__M 0xFFF
+#define QAM_DQ_TAP_IM_EL12__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL12_TAP__B 0
+#define QAM_DQ_TAP_IM_EL12_TAP__W 12
+#define QAM_DQ_TAP_IM_EL12_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL12_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL13__A 0x144003A
+#define QAM_DQ_TAP_RE_EL13__W 12
+#define QAM_DQ_TAP_RE_EL13__M 0xFFF
+#define QAM_DQ_TAP_RE_EL13__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL13_TAP__B 0
+#define QAM_DQ_TAP_RE_EL13_TAP__W 12
+#define QAM_DQ_TAP_RE_EL13_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL13_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL13__A 0x144003B
+#define QAM_DQ_TAP_IM_EL13__W 12
+#define QAM_DQ_TAP_IM_EL13__M 0xFFF
+#define QAM_DQ_TAP_IM_EL13__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL13_TAP__B 0
+#define QAM_DQ_TAP_IM_EL13_TAP__W 12
+#define QAM_DQ_TAP_IM_EL13_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL13_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL14__A 0x144003C
+#define QAM_DQ_TAP_RE_EL14__W 12
+#define QAM_DQ_TAP_RE_EL14__M 0xFFF
+#define QAM_DQ_TAP_RE_EL14__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL14_TAP__B 0
+#define QAM_DQ_TAP_RE_EL14_TAP__W 12
+#define QAM_DQ_TAP_RE_EL14_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL14_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL14__A 0x144003D
+#define QAM_DQ_TAP_IM_EL14__W 12
+#define QAM_DQ_TAP_IM_EL14__M 0xFFF
+#define QAM_DQ_TAP_IM_EL14__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL14_TAP__B 0
+#define QAM_DQ_TAP_IM_EL14_TAP__W 12
+#define QAM_DQ_TAP_IM_EL14_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL14_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL15__A 0x144003E
+#define QAM_DQ_TAP_RE_EL15__W 12
+#define QAM_DQ_TAP_RE_EL15__M 0xFFF
+#define QAM_DQ_TAP_RE_EL15__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL15_TAP__B 0
+#define QAM_DQ_TAP_RE_EL15_TAP__W 12
+#define QAM_DQ_TAP_RE_EL15_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL15_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL15__A 0x144003F
+#define QAM_DQ_TAP_IM_EL15__W 12
+#define QAM_DQ_TAP_IM_EL15__M 0xFFF
+#define QAM_DQ_TAP_IM_EL15__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL15_TAP__B 0
+#define QAM_DQ_TAP_IM_EL15_TAP__W 12
+#define QAM_DQ_TAP_IM_EL15_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL15_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL16__A 0x1440040
+#define QAM_DQ_TAP_RE_EL16__W 12
+#define QAM_DQ_TAP_RE_EL16__M 0xFFF
+#define QAM_DQ_TAP_RE_EL16__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL16_TAP__B 0
+#define QAM_DQ_TAP_RE_EL16_TAP__W 12
+#define QAM_DQ_TAP_RE_EL16_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL16_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL16__A 0x1440041
+#define QAM_DQ_TAP_IM_EL16__W 12
+#define QAM_DQ_TAP_IM_EL16__M 0xFFF
+#define QAM_DQ_TAP_IM_EL16__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL16_TAP__B 0
+#define QAM_DQ_TAP_IM_EL16_TAP__W 12
+#define QAM_DQ_TAP_IM_EL16_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL16_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL17__A 0x1440042
+#define QAM_DQ_TAP_RE_EL17__W 12
+#define QAM_DQ_TAP_RE_EL17__M 0xFFF
+#define QAM_DQ_TAP_RE_EL17__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL17_TAP__B 0
+#define QAM_DQ_TAP_RE_EL17_TAP__W 12
+#define QAM_DQ_TAP_RE_EL17_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL17_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL17__A 0x1440043
+#define QAM_DQ_TAP_IM_EL17__W 12
+#define QAM_DQ_TAP_IM_EL17__M 0xFFF
+#define QAM_DQ_TAP_IM_EL17__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL17_TAP__B 0
+#define QAM_DQ_TAP_IM_EL17_TAP__W 12
+#define QAM_DQ_TAP_IM_EL17_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL17_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL18__A 0x1440044
+#define QAM_DQ_TAP_RE_EL18__W 12
+#define QAM_DQ_TAP_RE_EL18__M 0xFFF
+#define QAM_DQ_TAP_RE_EL18__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL18_TAP__B 0
+#define QAM_DQ_TAP_RE_EL18_TAP__W 12
+#define QAM_DQ_TAP_RE_EL18_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL18_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL18__A 0x1440045
+#define QAM_DQ_TAP_IM_EL18__W 12
+#define QAM_DQ_TAP_IM_EL18__M 0xFFF
+#define QAM_DQ_TAP_IM_EL18__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL18_TAP__B 0
+#define QAM_DQ_TAP_IM_EL18_TAP__W 12
+#define QAM_DQ_TAP_IM_EL18_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL18_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL19__A 0x1440046
+#define QAM_DQ_TAP_RE_EL19__W 12
+#define QAM_DQ_TAP_RE_EL19__M 0xFFF
+#define QAM_DQ_TAP_RE_EL19__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL19_TAP__B 0
+#define QAM_DQ_TAP_RE_EL19_TAP__W 12
+#define QAM_DQ_TAP_RE_EL19_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL19_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL19__A 0x1440047
+#define QAM_DQ_TAP_IM_EL19__W 12
+#define QAM_DQ_TAP_IM_EL19__M 0xFFF
+#define QAM_DQ_TAP_IM_EL19__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL19_TAP__B 0
+#define QAM_DQ_TAP_IM_EL19_TAP__W 12
+#define QAM_DQ_TAP_IM_EL19_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL19_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL20__A 0x1440048
+#define QAM_DQ_TAP_RE_EL20__W 12
+#define QAM_DQ_TAP_RE_EL20__M 0xFFF
+#define QAM_DQ_TAP_RE_EL20__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL20_TAP__B 0
+#define QAM_DQ_TAP_RE_EL20_TAP__W 12
+#define QAM_DQ_TAP_RE_EL20_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL20_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL20__A 0x1440049
+#define QAM_DQ_TAP_IM_EL20__W 12
+#define QAM_DQ_TAP_IM_EL20__M 0xFFF
+#define QAM_DQ_TAP_IM_EL20__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL20_TAP__B 0
+#define QAM_DQ_TAP_IM_EL20_TAP__W 12
+#define QAM_DQ_TAP_IM_EL20_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL20_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL21__A 0x144004A
+#define QAM_DQ_TAP_RE_EL21__W 12
+#define QAM_DQ_TAP_RE_EL21__M 0xFFF
+#define QAM_DQ_TAP_RE_EL21__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL21_TAP__B 0
+#define QAM_DQ_TAP_RE_EL21_TAP__W 12
+#define QAM_DQ_TAP_RE_EL21_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL21_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL21__A 0x144004B
+#define QAM_DQ_TAP_IM_EL21__W 12
+#define QAM_DQ_TAP_IM_EL21__M 0xFFF
+#define QAM_DQ_TAP_IM_EL21__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL21_TAP__B 0
+#define QAM_DQ_TAP_IM_EL21_TAP__W 12
+#define QAM_DQ_TAP_IM_EL21_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL21_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL22__A 0x144004C
+#define QAM_DQ_TAP_RE_EL22__W 12
+#define QAM_DQ_TAP_RE_EL22__M 0xFFF
+#define QAM_DQ_TAP_RE_EL22__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL22_TAP__B 0
+#define QAM_DQ_TAP_RE_EL22_TAP__W 12
+#define QAM_DQ_TAP_RE_EL22_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL22_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL22__A 0x144004D
+#define QAM_DQ_TAP_IM_EL22__W 12
+#define QAM_DQ_TAP_IM_EL22__M 0xFFF
+#define QAM_DQ_TAP_IM_EL22__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL22_TAP__B 0
+#define QAM_DQ_TAP_IM_EL22_TAP__W 12
+#define QAM_DQ_TAP_IM_EL22_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL22_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL23__A 0x144004E
+#define QAM_DQ_TAP_RE_EL23__W 12
+#define QAM_DQ_TAP_RE_EL23__M 0xFFF
+#define QAM_DQ_TAP_RE_EL23__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL23_TAP__B 0
+#define QAM_DQ_TAP_RE_EL23_TAP__W 12
+#define QAM_DQ_TAP_RE_EL23_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL23_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL23__A 0x144004F
+#define QAM_DQ_TAP_IM_EL23__W 12
+#define QAM_DQ_TAP_IM_EL23__M 0xFFF
+#define QAM_DQ_TAP_IM_EL23__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL23_TAP__B 0
+#define QAM_DQ_TAP_IM_EL23_TAP__W 12
+#define QAM_DQ_TAP_IM_EL23_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL23_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL24__A 0x1440050
+#define QAM_DQ_TAP_RE_EL24__W 12
+#define QAM_DQ_TAP_RE_EL24__M 0xFFF
+#define QAM_DQ_TAP_RE_EL24__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL24_TAP__B 0
+#define QAM_DQ_TAP_RE_EL24_TAP__W 12
+#define QAM_DQ_TAP_RE_EL24_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL24_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL24__A 0x1440051
+#define QAM_DQ_TAP_IM_EL24__W 12
+#define QAM_DQ_TAP_IM_EL24__M 0xFFF
+#define QAM_DQ_TAP_IM_EL24__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL24_TAP__B 0
+#define QAM_DQ_TAP_IM_EL24_TAP__W 12
+#define QAM_DQ_TAP_IM_EL24_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL24_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL25__A 0x1440052
+#define QAM_DQ_TAP_RE_EL25__W 12
+#define QAM_DQ_TAP_RE_EL25__M 0xFFF
+#define QAM_DQ_TAP_RE_EL25__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL25_TAP__B 0
+#define QAM_DQ_TAP_RE_EL25_TAP__W 12
+#define QAM_DQ_TAP_RE_EL25_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL25_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL25__A 0x1440053
+#define QAM_DQ_TAP_IM_EL25__W 12
+#define QAM_DQ_TAP_IM_EL25__M 0xFFF
+#define QAM_DQ_TAP_IM_EL25__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL25_TAP__B 0
+#define QAM_DQ_TAP_IM_EL25_TAP__W 12
+#define QAM_DQ_TAP_IM_EL25_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL25_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL26__A 0x1440054
+#define QAM_DQ_TAP_RE_EL26__W 12
+#define QAM_DQ_TAP_RE_EL26__M 0xFFF
+#define QAM_DQ_TAP_RE_EL26__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL26_TAP__B 0
+#define QAM_DQ_TAP_RE_EL26_TAP__W 12
+#define QAM_DQ_TAP_RE_EL26_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL26_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL26__A 0x1440055
+#define QAM_DQ_TAP_IM_EL26__W 12
+#define QAM_DQ_TAP_IM_EL26__M 0xFFF
+#define QAM_DQ_TAP_IM_EL26__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL26_TAP__B 0
+#define QAM_DQ_TAP_IM_EL26_TAP__W 12
+#define QAM_DQ_TAP_IM_EL26_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL26_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL27__A 0x1440056
+#define QAM_DQ_TAP_RE_EL27__W 12
+#define QAM_DQ_TAP_RE_EL27__M 0xFFF
+#define QAM_DQ_TAP_RE_EL27__PRE 0x2
+
+#define QAM_DQ_TAP_RE_EL27_TAP__B 0
+#define QAM_DQ_TAP_RE_EL27_TAP__W 12
+#define QAM_DQ_TAP_RE_EL27_TAP__M 0xFFF
+#define QAM_DQ_TAP_RE_EL27_TAP__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL27__A 0x1440057
+#define QAM_DQ_TAP_IM_EL27__W 12
+#define QAM_DQ_TAP_IM_EL27__M 0xFFF
+#define QAM_DQ_TAP_IM_EL27__PRE 0x2
+
+#define QAM_DQ_TAP_IM_EL27_TAP__B 0
+#define QAM_DQ_TAP_IM_EL27_TAP__W 12
+#define QAM_DQ_TAP_IM_EL27_TAP__M 0xFFF
+#define QAM_DQ_TAP_IM_EL27_TAP__PRE 0x2
+
+#define QAM_LC_COMM_EXEC__A 0x1450000
+#define QAM_LC_COMM_EXEC__W 2
+#define QAM_LC_COMM_EXEC__M 0x3
+#define QAM_LC_COMM_EXEC__PRE 0x0
+#define QAM_LC_COMM_EXEC_STOP 0x0
+#define QAM_LC_COMM_EXEC_ACTIVE 0x1
+#define QAM_LC_COMM_EXEC_HOLD 0x2
+
+#define QAM_LC_COMM_MB__A 0x1450002
+#define QAM_LC_COMM_MB__W 2
+#define QAM_LC_COMM_MB__M 0x3
+#define QAM_LC_COMM_MB__PRE 0x0
+#define QAM_LC_COMM_MB_CTL__B 0
+#define QAM_LC_COMM_MB_CTL__W 1
+#define QAM_LC_COMM_MB_CTL__M 0x1
+#define QAM_LC_COMM_MB_CTL__PRE 0x0
+#define QAM_LC_COMM_MB_CTL_OFF 0x0
+#define QAM_LC_COMM_MB_CTL_ON 0x1
+#define QAM_LC_COMM_MB_OBS__B 1
+#define QAM_LC_COMM_MB_OBS__W 1
+#define QAM_LC_COMM_MB_OBS__M 0x2
+#define QAM_LC_COMM_MB_OBS__PRE 0x0
+#define QAM_LC_COMM_MB_OBS_OFF 0x0
+#define QAM_LC_COMM_MB_OBS_ON 0x2
+
+#define QAM_LC_COMM_INT_REQ__A 0x1450003
+#define QAM_LC_COMM_INT_REQ__W 1
+#define QAM_LC_COMM_INT_REQ__M 0x1
+#define QAM_LC_COMM_INT_REQ__PRE 0x0
+#define QAM_LC_COMM_INT_STA__A 0x1450005
+#define QAM_LC_COMM_INT_STA__W 3
+#define QAM_LC_COMM_INT_STA__M 0x7
+#define QAM_LC_COMM_INT_STA__PRE 0x0
+
+#define QAM_LC_COMM_INT_STA_READY__B 0
+#define QAM_LC_COMM_INT_STA_READY__W 1
+#define QAM_LC_COMM_INT_STA_READY__M 0x1
+#define QAM_LC_COMM_INT_STA_READY__PRE 0x0
+
+#define QAM_LC_COMM_INT_STA_OVERFLOW__B 1
+#define QAM_LC_COMM_INT_STA_OVERFLOW__W 1
+#define QAM_LC_COMM_INT_STA_OVERFLOW__M 0x2
+#define QAM_LC_COMM_INT_STA_OVERFLOW__PRE 0x0
+
+#define QAM_LC_COMM_INT_STA_FREQ_WRAP__B 2
+#define QAM_LC_COMM_INT_STA_FREQ_WRAP__W 1
+#define QAM_LC_COMM_INT_STA_FREQ_WRAP__M 0x4
+#define QAM_LC_COMM_INT_STA_FREQ_WRAP__PRE 0x0
+
+#define QAM_LC_COMM_INT_MSK__A 0x1450006
+#define QAM_LC_COMM_INT_MSK__W 3
+#define QAM_LC_COMM_INT_MSK__M 0x7
+#define QAM_LC_COMM_INT_MSK__PRE 0x0
+#define QAM_LC_COMM_INT_MSK_READY__B 0
+#define QAM_LC_COMM_INT_MSK_READY__W 1
+#define QAM_LC_COMM_INT_MSK_READY__M 0x1
+#define QAM_LC_COMM_INT_MSK_READY__PRE 0x0
+#define QAM_LC_COMM_INT_MSK_OVERFLOW__B 1
+#define QAM_LC_COMM_INT_MSK_OVERFLOW__W 1
+#define QAM_LC_COMM_INT_MSK_OVERFLOW__M 0x2
+#define QAM_LC_COMM_INT_MSK_OVERFLOW__PRE 0x0
+#define QAM_LC_COMM_INT_MSK_FREQ_WRAP__B 2
+#define QAM_LC_COMM_INT_MSK_FREQ_WRAP__W 1
+#define QAM_LC_COMM_INT_MSK_FREQ_WRAP__M 0x4
+#define QAM_LC_COMM_INT_MSK_FREQ_WRAP__PRE 0x0
+
+#define QAM_LC_COMM_INT_STM__A 0x1450007
+#define QAM_LC_COMM_INT_STM__W 3
+#define QAM_LC_COMM_INT_STM__M 0x7
+#define QAM_LC_COMM_INT_STM__PRE 0x0
+#define QAM_LC_COMM_INT_STM_READY__B 0
+#define QAM_LC_COMM_INT_STM_READY__W 1
+#define QAM_LC_COMM_INT_STM_READY__M 0x1
+#define QAM_LC_COMM_INT_STM_READY__PRE 0x0
+#define QAM_LC_COMM_INT_STM_OVERFLOW__B 1
+#define QAM_LC_COMM_INT_STM_OVERFLOW__W 1
+#define QAM_LC_COMM_INT_STM_OVERFLOW__M 0x2
+#define QAM_LC_COMM_INT_STM_OVERFLOW__PRE 0x0
+#define QAM_LC_COMM_INT_STM_FREQ_WRAP__B 2
+#define QAM_LC_COMM_INT_STM_FREQ_WRAP__W 1
+#define QAM_LC_COMM_INT_STM_FREQ_WRAP__M 0x4
+#define QAM_LC_COMM_INT_STM_FREQ_WRAP__PRE 0x0
+
+#define QAM_LC_MODE__A 0x1450010
+#define QAM_LC_MODE__W 3
+#define QAM_LC_MODE__M 0x7
+#define QAM_LC_MODE__PRE 0x7
+
+#define QAM_LC_MODE_ENABLE_A__B 0
+#define QAM_LC_MODE_ENABLE_A__W 1
+#define QAM_LC_MODE_ENABLE_A__M 0x1
+#define QAM_LC_MODE_ENABLE_A__PRE 0x1
+
+#define QAM_LC_MODE_ENABLE_F__B 1
+#define QAM_LC_MODE_ENABLE_F__W 1
+#define QAM_LC_MODE_ENABLE_F__M 0x2
+#define QAM_LC_MODE_ENABLE_F__PRE 0x2
+
+#define QAM_LC_MODE_ENABLE_R__B 2
+#define QAM_LC_MODE_ENABLE_R__W 1
+#define QAM_LC_MODE_ENABLE_R__M 0x4
+#define QAM_LC_MODE_ENABLE_R__PRE 0x4
+
+#define QAM_LC_CA__A 0x1450011
+#define QAM_LC_CA__W 6
+#define QAM_LC_CA__M 0x3F
+#define QAM_LC_CA__PRE 0x28
+
+#define QAM_LC_CA_COEF__B 0
+#define QAM_LC_CA_COEF__W 6
+#define QAM_LC_CA_COEF__M 0x3F
+#define QAM_LC_CA_COEF__PRE 0x28
+
+#define QAM_LC_CF__A 0x1450012
+#define QAM_LC_CF__W 8
+#define QAM_LC_CF__M 0xFF
+#define QAM_LC_CF__PRE 0x8C
+
+#define QAM_LC_CF_COEF__B 0
+#define QAM_LC_CF_COEF__W 8
+#define QAM_LC_CF_COEF__M 0xFF
+#define QAM_LC_CF_COEF__PRE 0x8C
+
+#define QAM_LC_CF1__A 0x1450013
+#define QAM_LC_CF1__W 8
+#define QAM_LC_CF1__M 0xFF
+#define QAM_LC_CF1__PRE 0x1E
+
+#define QAM_LC_CF1_COEF__B 0
+#define QAM_LC_CF1_COEF__W 8
+#define QAM_LC_CF1_COEF__M 0xFF
+#define QAM_LC_CF1_COEF__PRE 0x1E
+
+#define QAM_LC_CP__A 0x1450014
+#define QAM_LC_CP__W 8
+#define QAM_LC_CP__M 0xFF
+#define QAM_LC_CP__PRE 0x78
+
+#define QAM_LC_CP_COEF__B 0
+#define QAM_LC_CP_COEF__W 8
+#define QAM_LC_CP_COEF__M 0xFF
+#define QAM_LC_CP_COEF__PRE 0x78
+
+#define QAM_LC_CI__A 0x1450015
+#define QAM_LC_CI__W 8
+#define QAM_LC_CI__M 0xFF
+#define QAM_LC_CI__PRE 0x46
+
+#define QAM_LC_CI_COEF__B 0
+#define QAM_LC_CI_COEF__W 8
+#define QAM_LC_CI_COEF__M 0xFF
+#define QAM_LC_CI_COEF__PRE 0x46
+
+#define QAM_LC_EP__A 0x1450016
+#define QAM_LC_EP__W 6
+#define QAM_LC_EP__M 0x3F
+#define QAM_LC_EP__PRE 0x0
+
+#define QAM_LC_EP_COEF__B 0
+#define QAM_LC_EP_COEF__W 6
+#define QAM_LC_EP_COEF__M 0x3F
+#define QAM_LC_EP_COEF__PRE 0x0
+
+#define QAM_LC_EI__A 0x1450017
+#define QAM_LC_EI__W 6
+#define QAM_LC_EI__M 0x3F
+#define QAM_LC_EI__PRE 0x0
+
+#define QAM_LC_EI_COEF__B 0
+#define QAM_LC_EI_COEF__W 6
+#define QAM_LC_EI_COEF__M 0x3F
+#define QAM_LC_EI_COEF__PRE 0x0
+
+#define QAM_LC_QUAL_TAB0__A 0x1450018
+#define QAM_LC_QUAL_TAB0__W 5
+#define QAM_LC_QUAL_TAB0__M 0x1F
+#define QAM_LC_QUAL_TAB0__PRE 0x1
+
+#define QAM_LC_QUAL_TAB0_VALUE__B 0
+#define QAM_LC_QUAL_TAB0_VALUE__W 5
+#define QAM_LC_QUAL_TAB0_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB0_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB1__A 0x1450019
+#define QAM_LC_QUAL_TAB1__W 5
+#define QAM_LC_QUAL_TAB1__M 0x1F
+#define QAM_LC_QUAL_TAB1__PRE 0x1
+
+#define QAM_LC_QUAL_TAB1_VALUE__B 0
+#define QAM_LC_QUAL_TAB1_VALUE__W 5
+#define QAM_LC_QUAL_TAB1_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB1_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB2__A 0x145001A
+#define QAM_LC_QUAL_TAB2__W 5
+#define QAM_LC_QUAL_TAB2__M 0x1F
+#define QAM_LC_QUAL_TAB2__PRE 0x1
+
+#define QAM_LC_QUAL_TAB2_VALUE__B 0
+#define QAM_LC_QUAL_TAB2_VALUE__W 5
+#define QAM_LC_QUAL_TAB2_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB2_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB3__A 0x145001B
+#define QAM_LC_QUAL_TAB3__W 5
+#define QAM_LC_QUAL_TAB3__M 0x1F
+#define QAM_LC_QUAL_TAB3__PRE 0x1
+
+#define QAM_LC_QUAL_TAB3_VALUE__B 0
+#define QAM_LC_QUAL_TAB3_VALUE__W 5
+#define QAM_LC_QUAL_TAB3_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB3_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB4__A 0x145001C
+#define QAM_LC_QUAL_TAB4__W 5
+#define QAM_LC_QUAL_TAB4__M 0x1F
+#define QAM_LC_QUAL_TAB4__PRE 0x1
+
+#define QAM_LC_QUAL_TAB4_VALUE__B 0
+#define QAM_LC_QUAL_TAB4_VALUE__W 5
+#define QAM_LC_QUAL_TAB4_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB4_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB5__A 0x145001D
+#define QAM_LC_QUAL_TAB5__W 5
+#define QAM_LC_QUAL_TAB5__M 0x1F
+#define QAM_LC_QUAL_TAB5__PRE 0x1
+
+#define QAM_LC_QUAL_TAB5_VALUE__B 0
+#define QAM_LC_QUAL_TAB5_VALUE__W 5
+#define QAM_LC_QUAL_TAB5_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB5_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB6__A 0x145001E
+#define QAM_LC_QUAL_TAB6__W 5
+#define QAM_LC_QUAL_TAB6__M 0x1F
+#define QAM_LC_QUAL_TAB6__PRE 0x1
+
+#define QAM_LC_QUAL_TAB6_VALUE__B 0
+#define QAM_LC_QUAL_TAB6_VALUE__W 5
+#define QAM_LC_QUAL_TAB6_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB6_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB8__A 0x145001F
+#define QAM_LC_QUAL_TAB8__W 5
+#define QAM_LC_QUAL_TAB8__M 0x1F
+#define QAM_LC_QUAL_TAB8__PRE 0x1
+
+#define QAM_LC_QUAL_TAB8_VALUE__B 0
+#define QAM_LC_QUAL_TAB8_VALUE__W 5
+#define QAM_LC_QUAL_TAB8_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB8_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB9__A 0x1450020
+#define QAM_LC_QUAL_TAB9__W 5
+#define QAM_LC_QUAL_TAB9__M 0x1F
+#define QAM_LC_QUAL_TAB9__PRE 0x1
+
+#define QAM_LC_QUAL_TAB9_VALUE__B 0
+#define QAM_LC_QUAL_TAB9_VALUE__W 5
+#define QAM_LC_QUAL_TAB9_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB9_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB10__A 0x1450021
+#define QAM_LC_QUAL_TAB10__W 5
+#define QAM_LC_QUAL_TAB10__M 0x1F
+#define QAM_LC_QUAL_TAB10__PRE 0x1
+
+#define QAM_LC_QUAL_TAB10_VALUE__B 0
+#define QAM_LC_QUAL_TAB10_VALUE__W 5
+#define QAM_LC_QUAL_TAB10_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB10_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB12__A 0x1450022
+#define QAM_LC_QUAL_TAB12__W 5
+#define QAM_LC_QUAL_TAB12__M 0x1F
+#define QAM_LC_QUAL_TAB12__PRE 0x1
+
+#define QAM_LC_QUAL_TAB12_VALUE__B 0
+#define QAM_LC_QUAL_TAB12_VALUE__W 5
+#define QAM_LC_QUAL_TAB12_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB12_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB15__A 0x1450023
+#define QAM_LC_QUAL_TAB15__W 5
+#define QAM_LC_QUAL_TAB15__M 0x1F
+#define QAM_LC_QUAL_TAB15__PRE 0x1
+
+#define QAM_LC_QUAL_TAB15_VALUE__B 0
+#define QAM_LC_QUAL_TAB15_VALUE__W 5
+#define QAM_LC_QUAL_TAB15_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB15_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB16__A 0x1450024
+#define QAM_LC_QUAL_TAB16__W 5
+#define QAM_LC_QUAL_TAB16__M 0x1F
+#define QAM_LC_QUAL_TAB16__PRE 0x1
+
+#define QAM_LC_QUAL_TAB16_VALUE__B 0
+#define QAM_LC_QUAL_TAB16_VALUE__W 5
+#define QAM_LC_QUAL_TAB16_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB16_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB20__A 0x1450025
+#define QAM_LC_QUAL_TAB20__W 5
+#define QAM_LC_QUAL_TAB20__M 0x1F
+#define QAM_LC_QUAL_TAB20__PRE 0x1
+
+#define QAM_LC_QUAL_TAB20_VALUE__B 0
+#define QAM_LC_QUAL_TAB20_VALUE__W 5
+#define QAM_LC_QUAL_TAB20_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB20_VALUE__PRE 0x1
+
+#define QAM_LC_QUAL_TAB25__A 0x1450026
+#define QAM_LC_QUAL_TAB25__W 5
+#define QAM_LC_QUAL_TAB25__M 0x1F
+#define QAM_LC_QUAL_TAB25__PRE 0x1
+
+#define QAM_LC_QUAL_TAB25_VALUE__B 0
+#define QAM_LC_QUAL_TAB25_VALUE__W 5
+#define QAM_LC_QUAL_TAB25_VALUE__M 0x1F
+#define QAM_LC_QUAL_TAB25_VALUE__PRE 0x1
+
+#define QAM_LC_EQ_TIMING__A 0x1450027
+#define QAM_LC_EQ_TIMING__W 10
+#define QAM_LC_EQ_TIMING__M 0x3FF
+#define QAM_LC_EQ_TIMING__PRE 0x0
+
+#define QAM_LC_EQ_TIMING_OFFS__B 0
+#define QAM_LC_EQ_TIMING_OFFS__W 10
+#define QAM_LC_EQ_TIMING_OFFS__M 0x3FF
+#define QAM_LC_EQ_TIMING_OFFS__PRE 0x0
+
+#define QAM_LC_LPF_FACTORP__A 0x1450028
+#define QAM_LC_LPF_FACTORP__W 3
+#define QAM_LC_LPF_FACTORP__M 0x7
+#define QAM_LC_LPF_FACTORP__PRE 0x3
+
+#define QAM_LC_LPF_FACTORP_FACTOR__B 0
+#define QAM_LC_LPF_FACTORP_FACTOR__W 3
+#define QAM_LC_LPF_FACTORP_FACTOR__M 0x7
+#define QAM_LC_LPF_FACTORP_FACTOR__PRE 0x3
+
+#define QAM_LC_LPF_FACTORI__A 0x1450029
+#define QAM_LC_LPF_FACTORI__W 3
+#define QAM_LC_LPF_FACTORI__M 0x7
+#define QAM_LC_LPF_FACTORI__PRE 0x3
+
+#define QAM_LC_LPF_FACTORI_FACTOR__B 0
+#define QAM_LC_LPF_FACTORI_FACTOR__W 3
+#define QAM_LC_LPF_FACTORI_FACTOR__M 0x7
+#define QAM_LC_LPF_FACTORI_FACTOR__PRE 0x3
+
+#define QAM_LC_RATE_LIMIT__A 0x145002A
+#define QAM_LC_RATE_LIMIT__W 2
+#define QAM_LC_RATE_LIMIT__M 0x3
+#define QAM_LC_RATE_LIMIT__PRE 0x3
+
+#define QAM_LC_RATE_LIMIT_LIMIT__B 0
+#define QAM_LC_RATE_LIMIT_LIMIT__W 2
+#define QAM_LC_RATE_LIMIT_LIMIT__M 0x3
+#define QAM_LC_RATE_LIMIT_LIMIT__PRE 0x3
+
+#define QAM_LC_SYMBOL_FREQ__A 0x145002B
+#define QAM_LC_SYMBOL_FREQ__W 10
+#define QAM_LC_SYMBOL_FREQ__M 0x3FF
+#define QAM_LC_SYMBOL_FREQ__PRE 0x199
+
+#define QAM_LC_SYMBOL_FREQ_FREQ__B 0
+#define QAM_LC_SYMBOL_FREQ_FREQ__W 10
+#define QAM_LC_SYMBOL_FREQ_FREQ__M 0x3FF
+#define QAM_LC_SYMBOL_FREQ_FREQ__PRE 0x199
+#define QAM_LC_SYMBOL_FREQ_FREQ_QAM_B_64 0x197
+#define QAM_LC_SYMBOL_FREQ_FREQ_QAM_B_256 0x1B2
+
+#define QAM_LC_MTA_LENGTH__A 0x145002C
+#define QAM_LC_MTA_LENGTH__W 2
+#define QAM_LC_MTA_LENGTH__M 0x3
+#define QAM_LC_MTA_LENGTH__PRE 0x2
+
+#define QAM_LC_MTA_LENGTH_LENGTH__B 0
+#define QAM_LC_MTA_LENGTH_LENGTH__W 2
+#define QAM_LC_MTA_LENGTH_LENGTH__M 0x3
+#define QAM_LC_MTA_LENGTH_LENGTH__PRE 0x2
+
+#define QAM_LC_AMP_ACCU__A 0x145002D
+#define QAM_LC_AMP_ACCU__W 14
+#define QAM_LC_AMP_ACCU__M 0x3FFF
+#define QAM_LC_AMP_ACCU__PRE 0x600
+
+#define QAM_LC_AMP_ACCU_ACCU__B 0
+#define QAM_LC_AMP_ACCU_ACCU__W 14
+#define QAM_LC_AMP_ACCU_ACCU__M 0x3FFF
+#define QAM_LC_AMP_ACCU_ACCU__PRE 0x600
+
+#define QAM_LC_FREQ_ACCU__A 0x145002E
+#define QAM_LC_FREQ_ACCU__W 10
+#define QAM_LC_FREQ_ACCU__M 0x3FF
+#define QAM_LC_FREQ_ACCU__PRE 0x0
+
+#define QAM_LC_FREQ_ACCU_ACCU__B 0
+#define QAM_LC_FREQ_ACCU_ACCU__W 10
+#define QAM_LC_FREQ_ACCU_ACCU__M 0x3FF
+#define QAM_LC_FREQ_ACCU_ACCU__PRE 0x0
+
+#define QAM_LC_RATE_ACCU__A 0x145002F
+#define QAM_LC_RATE_ACCU__W 10
+#define QAM_LC_RATE_ACCU__M 0x3FF
+#define QAM_LC_RATE_ACCU__PRE 0x0
+
+#define QAM_LC_RATE_ACCU_ACCU__B 0
+#define QAM_LC_RATE_ACCU_ACCU__W 10
+#define QAM_LC_RATE_ACCU_ACCU__M 0x3FF
+#define QAM_LC_RATE_ACCU_ACCU__PRE 0x0
+
+#define QAM_LC_AMPLITUDE__A 0x1450030
+#define QAM_LC_AMPLITUDE__W 10
+#define QAM_LC_AMPLITUDE__M 0x3FF
+#define QAM_LC_AMPLITUDE__PRE 0x0
+
+#define QAM_LC_AMPLITUDE_SIZE__B 0
+#define QAM_LC_AMPLITUDE_SIZE__W 10
+#define QAM_LC_AMPLITUDE_SIZE__M 0x3FF
+#define QAM_LC_AMPLITUDE_SIZE__PRE 0x0
+
+#define QAM_LC_RAD_ERROR__A 0x1450031
+#define QAM_LC_RAD_ERROR__W 10
+#define QAM_LC_RAD_ERROR__M 0x3FF
+#define QAM_LC_RAD_ERROR__PRE 0x0
+
+#define QAM_LC_RAD_ERROR_SIZE__B 0
+#define QAM_LC_RAD_ERROR_SIZE__W 10
+#define QAM_LC_RAD_ERROR_SIZE__M 0x3FF
+#define QAM_LC_RAD_ERROR_SIZE__PRE 0x0
+
+#define QAM_LC_FREQ_OFFS__A 0x1450032
+#define QAM_LC_FREQ_OFFS__W 10
+#define QAM_LC_FREQ_OFFS__M 0x3FF
+#define QAM_LC_FREQ_OFFS__PRE 0x0
+
+#define QAM_LC_FREQ_OFFS_OFFS__B 0
+#define QAM_LC_FREQ_OFFS_OFFS__W 10
+#define QAM_LC_FREQ_OFFS_OFFS__M 0x3FF
+#define QAM_LC_FREQ_OFFS_OFFS__PRE 0x0
+
+#define QAM_LC_PHASE_ERROR__A 0x1450033
+#define QAM_LC_PHASE_ERROR__W 10
+#define QAM_LC_PHASE_ERROR__M 0x3FF
+#define QAM_LC_PHASE_ERROR__PRE 0x0
+
+#define QAM_LC_PHASE_ERROR_SIZE__B 0
+#define QAM_LC_PHASE_ERROR_SIZE__W 10
+#define QAM_LC_PHASE_ERROR_SIZE__M 0x3FF
+#define QAM_LC_PHASE_ERROR_SIZE__PRE 0x0
+
+#define QAM_VD_COMM_EXEC__A 0x1460000
+#define QAM_VD_COMM_EXEC__W 2
+#define QAM_VD_COMM_EXEC__M 0x3
+#define QAM_VD_COMM_EXEC__PRE 0x0
+#define QAM_VD_COMM_EXEC_STOP 0x0
+#define QAM_VD_COMM_EXEC_ACTIVE 0x1
+#define QAM_VD_COMM_EXEC_HOLD 0x2
+
+#define QAM_VD_COMM_MB__A 0x1460002
+#define QAM_VD_COMM_MB__W 2
+#define QAM_VD_COMM_MB__M 0x3
+#define QAM_VD_COMM_MB__PRE 0x0
+#define QAM_VD_COMM_MB_CTL__B 0
+#define QAM_VD_COMM_MB_CTL__W 1
+#define QAM_VD_COMM_MB_CTL__M 0x1
+#define QAM_VD_COMM_MB_CTL__PRE 0x0
+#define QAM_VD_COMM_MB_CTL_OFF 0x0
+#define QAM_VD_COMM_MB_CTL_ON 0x1
+#define QAM_VD_COMM_MB_OBS__B 1
+#define QAM_VD_COMM_MB_OBS__W 1
+#define QAM_VD_COMM_MB_OBS__M 0x2
+#define QAM_VD_COMM_MB_OBS__PRE 0x0
+#define QAM_VD_COMM_MB_OBS_OFF 0x0
+#define QAM_VD_COMM_MB_OBS_ON 0x2
+
+#define QAM_VD_COMM_INT_REQ__A 0x1460003
+#define QAM_VD_COMM_INT_REQ__W 1
+#define QAM_VD_COMM_INT_REQ__M 0x1
+#define QAM_VD_COMM_INT_REQ__PRE 0x0
+#define QAM_VD_COMM_INT_STA__A 0x1460005
+#define QAM_VD_COMM_INT_STA__W 2
+#define QAM_VD_COMM_INT_STA__M 0x3
+#define QAM_VD_COMM_INT_STA__PRE 0x0
+
+#define QAM_VD_COMM_INT_STA_LOCK_INT__B 0
+#define QAM_VD_COMM_INT_STA_LOCK_INT__W 1
+#define QAM_VD_COMM_INT_STA_LOCK_INT__M 0x1
+#define QAM_VD_COMM_INT_STA_LOCK_INT__PRE 0x0
+
+#define QAM_VD_COMM_INT_STA_PERIOD_INT__B 1
+#define QAM_VD_COMM_INT_STA_PERIOD_INT__W 1
+#define QAM_VD_COMM_INT_STA_PERIOD_INT__M 0x2
+#define QAM_VD_COMM_INT_STA_PERIOD_INT__PRE 0x0
+
+#define QAM_VD_COMM_INT_MSK__A 0x1460006
+#define QAM_VD_COMM_INT_MSK__W 2
+#define QAM_VD_COMM_INT_MSK__M 0x3
+#define QAM_VD_COMM_INT_MSK__PRE 0x0
+#define QAM_VD_COMM_INT_MSK_LOCK_INT__B 0
+#define QAM_VD_COMM_INT_MSK_LOCK_INT__W 1
+#define QAM_VD_COMM_INT_MSK_LOCK_INT__M 0x1
+#define QAM_VD_COMM_INT_MSK_LOCK_INT__PRE 0x0
+#define QAM_VD_COMM_INT_MSK_PERIOD_INT__B 1
+#define QAM_VD_COMM_INT_MSK_PERIOD_INT__W 1
+#define QAM_VD_COMM_INT_MSK_PERIOD_INT__M 0x2
+#define QAM_VD_COMM_INT_MSK_PERIOD_INT__PRE 0x0
+
+#define QAM_VD_COMM_INT_STM__A 0x1460007
+#define QAM_VD_COMM_INT_STM__W 2
+#define QAM_VD_COMM_INT_STM__M 0x3
+#define QAM_VD_COMM_INT_STM__PRE 0x0
+#define QAM_VD_COMM_INT_STM_LOCK_INT__B 0
+#define QAM_VD_COMM_INT_STM_LOCK_INT__W 1
+#define QAM_VD_COMM_INT_STM_LOCK_INT__M 0x1
+#define QAM_VD_COMM_INT_STM_LOCK_INT__PRE 0x0
+#define QAM_VD_COMM_INT_STM_PERIOD_INT__B 1
+#define QAM_VD_COMM_INT_STM_PERIOD_INT__W 1
+#define QAM_VD_COMM_INT_STM_PERIOD_INT__M 0x2
+#define QAM_VD_COMM_INT_STM_PERIOD_INT__PRE 0x0
+
+#define QAM_VD_STATUS__A 0x1460010
+#define QAM_VD_STATUS__W 1
+#define QAM_VD_STATUS__M 0x1
+#define QAM_VD_STATUS__PRE 0x0
+
+#define QAM_VD_STATUS_LOCK__B 0
+#define QAM_VD_STATUS_LOCK__W 1
+#define QAM_VD_STATUS_LOCK__M 0x1
+#define QAM_VD_STATUS_LOCK__PRE 0x0
+
+#define QAM_VD_UNLOCK_CONTROL__A 0x1460011
+#define QAM_VD_UNLOCK_CONTROL__W 1
+#define QAM_VD_UNLOCK_CONTROL__M 0x1
+#define QAM_VD_UNLOCK_CONTROL__PRE 0x0
+
+#define QAM_VD_UNLOCK_CONTROL_UNLOCK_CTRL__B 0
+#define QAM_VD_UNLOCK_CONTROL_UNLOCK_CTRL__W 1
+#define QAM_VD_UNLOCK_CONTROL_UNLOCK_CTRL__M 0x1
+#define QAM_VD_UNLOCK_CONTROL_UNLOCK_CTRL__PRE 0x0
+
+#define QAM_VD_MIN_VOTING_ROUNDS__A 0x1460012
+#define QAM_VD_MIN_VOTING_ROUNDS__W 6
+#define QAM_VD_MIN_VOTING_ROUNDS__M 0x3F
+#define QAM_VD_MIN_VOTING_ROUNDS__PRE 0x10
+
+#define QAM_VD_MIN_VOTING_ROUNDS_ROUNDS__B 0
+#define QAM_VD_MIN_VOTING_ROUNDS_ROUNDS__W 6
+#define QAM_VD_MIN_VOTING_ROUNDS_ROUNDS__M 0x3F
+#define QAM_VD_MIN_VOTING_ROUNDS_ROUNDS__PRE 0x10
+
+#define QAM_VD_MAX_VOTING_ROUNDS__A 0x1460013
+#define QAM_VD_MAX_VOTING_ROUNDS__W 6
+#define QAM_VD_MAX_VOTING_ROUNDS__M 0x3F
+#define QAM_VD_MAX_VOTING_ROUNDS__PRE 0x10
+
+#define QAM_VD_MAX_VOTING_ROUNDS_ROUNDS__B 0
+#define QAM_VD_MAX_VOTING_ROUNDS_ROUNDS__W 6
+#define QAM_VD_MAX_VOTING_ROUNDS_ROUNDS__M 0x3F
+#define QAM_VD_MAX_VOTING_ROUNDS_ROUNDS__PRE 0x10
+
+#define QAM_VD_TRACEBACK_DEPTH__A 0x1460014
+#define QAM_VD_TRACEBACK_DEPTH__W 5
+#define QAM_VD_TRACEBACK_DEPTH__M 0x1F
+#define QAM_VD_TRACEBACK_DEPTH__PRE 0x10
+
+#define QAM_VD_TRACEBACK_DEPTH_LENGTH__B 0
+#define QAM_VD_TRACEBACK_DEPTH_LENGTH__W 5
+#define QAM_VD_TRACEBACK_DEPTH_LENGTH__M 0x1F
+#define QAM_VD_TRACEBACK_DEPTH_LENGTH__PRE 0x10
+
+#define QAM_VD_UNLOCK__A 0x1460015
+#define QAM_VD_UNLOCK__W 1
+#define QAM_VD_UNLOCK__M 0x1
+#define QAM_VD_UNLOCK__PRE 0x0
+#define QAM_VD_MEASUREMENT_PERIOD__A 0x1460016
+#define QAM_VD_MEASUREMENT_PERIOD__W 16
+#define QAM_VD_MEASUREMENT_PERIOD__M 0xFFFF
+#define QAM_VD_MEASUREMENT_PERIOD__PRE 0x8236
+
+#define QAM_VD_MEASUREMENT_PERIOD_PERIOD__B 0
+#define QAM_VD_MEASUREMENT_PERIOD_PERIOD__W 16
+#define QAM_VD_MEASUREMENT_PERIOD_PERIOD__M 0xFFFF
+#define QAM_VD_MEASUREMENT_PERIOD_PERIOD__PRE 0x8236
+
+#define QAM_VD_MEASUREMENT_PRESCALE__A 0x1460017
+#define QAM_VD_MEASUREMENT_PRESCALE__W 16
+#define QAM_VD_MEASUREMENT_PRESCALE__M 0xFFFF
+#define QAM_VD_MEASUREMENT_PRESCALE__PRE 0x4
+
+#define QAM_VD_MEASUREMENT_PRESCALE_PRESCALE__B 0
+#define QAM_VD_MEASUREMENT_PRESCALE_PRESCALE__W 16
+#define QAM_VD_MEASUREMENT_PRESCALE_PRESCALE__M 0xFFFF
+#define QAM_VD_MEASUREMENT_PRESCALE_PRESCALE__PRE 0x4
+
+#define QAM_VD_DELTA_PATH_METRIC__A 0x1460018
+#define QAM_VD_DELTA_PATH_METRIC__W 16
+#define QAM_VD_DELTA_PATH_METRIC__M 0xFFFF
+#define QAM_VD_DELTA_PATH_METRIC__PRE 0xFFFF
+
+#define QAM_VD_DELTA_PATH_METRIC_FIXED_MANT__B 0
+#define QAM_VD_DELTA_PATH_METRIC_FIXED_MANT__W 12
+#define QAM_VD_DELTA_PATH_METRIC_FIXED_MANT__M 0xFFF
+#define QAM_VD_DELTA_PATH_METRIC_FIXED_MANT__PRE 0xFFF
+
+#define QAM_VD_DELTA_PATH_METRIC_EXP__B 12
+#define QAM_VD_DELTA_PATH_METRIC_EXP__W 4
+#define QAM_VD_DELTA_PATH_METRIC_EXP__M 0xF000
+#define QAM_VD_DELTA_PATH_METRIC_EXP__PRE 0xF000
+
+#define QAM_VD_NR_QSYM_ERRORS__A 0x1460019
+#define QAM_VD_NR_QSYM_ERRORS__W 16
+#define QAM_VD_NR_QSYM_ERRORS__M 0xFFFF
+#define QAM_VD_NR_QSYM_ERRORS__PRE 0xFFFF
+
+#define QAM_VD_NR_QSYM_ERRORS_FIXED_MANT__B 0
+#define QAM_VD_NR_QSYM_ERRORS_FIXED_MANT__W 12
+#define QAM_VD_NR_QSYM_ERRORS_FIXED_MANT__M 0xFFF
+#define QAM_VD_NR_QSYM_ERRORS_FIXED_MANT__PRE 0xFFF
+
+#define QAM_VD_NR_QSYM_ERRORS_EXP__B 12
+#define QAM_VD_NR_QSYM_ERRORS_EXP__W 4
+#define QAM_VD_NR_QSYM_ERRORS_EXP__M 0xF000
+#define QAM_VD_NR_QSYM_ERRORS_EXP__PRE 0xF000
+
+#define QAM_VD_NR_SYMBOL_ERRORS__A 0x146001A
+#define QAM_VD_NR_SYMBOL_ERRORS__W 16
+#define QAM_VD_NR_SYMBOL_ERRORS__M 0xFFFF
+#define QAM_VD_NR_SYMBOL_ERRORS__PRE 0xFFFF
+
+#define QAM_VD_NR_SYMBOL_ERRORS_FIXED_MANT__B 0
+#define QAM_VD_NR_SYMBOL_ERRORS_FIXED_MANT__W 12
+#define QAM_VD_NR_SYMBOL_ERRORS_FIXED_MANT__M 0xFFF
+#define QAM_VD_NR_SYMBOL_ERRORS_FIXED_MANT__PRE 0xFFF
+
+#define QAM_VD_NR_SYMBOL_ERRORS_EXP__B 12
+#define QAM_VD_NR_SYMBOL_ERRORS_EXP__W 4
+#define QAM_VD_NR_SYMBOL_ERRORS_EXP__M 0xF000
+#define QAM_VD_NR_SYMBOL_ERRORS_EXP__PRE 0xF000
+
+#define QAM_VD_RELOCK_COUNT__A 0x146001B
+#define QAM_VD_RELOCK_COUNT__W 16
+#define QAM_VD_RELOCK_COUNT__M 0xFFFF
+#define QAM_VD_RELOCK_COUNT__PRE 0x0
+
+#define QAM_VD_RELOCK_COUNT_COUNT__B 0
+#define QAM_VD_RELOCK_COUNT_COUNT__W 8
+#define QAM_VD_RELOCK_COUNT_COUNT__M 0xFF
+#define QAM_VD_RELOCK_COUNT_COUNT__PRE 0x0
+
+#define QAM_SY_COMM_EXEC__A 0x1470000
+#define QAM_SY_COMM_EXEC__W 2
+#define QAM_SY_COMM_EXEC__M 0x3
+#define QAM_SY_COMM_EXEC__PRE 0x0
+#define QAM_SY_COMM_EXEC_STOP 0x0
+#define QAM_SY_COMM_EXEC_ACTIVE 0x1
+#define QAM_SY_COMM_EXEC_HOLD 0x2
+
+#define QAM_SY_COMM_MB__A 0x1470002
+#define QAM_SY_COMM_MB__W 2
+#define QAM_SY_COMM_MB__M 0x3
+#define QAM_SY_COMM_MB__PRE 0x0
+#define QAM_SY_COMM_MB_CTL__B 0
+#define QAM_SY_COMM_MB_CTL__W 1
+#define QAM_SY_COMM_MB_CTL__M 0x1
+#define QAM_SY_COMM_MB_CTL__PRE 0x0
+#define QAM_SY_COMM_MB_CTL_OFF 0x0
+#define QAM_SY_COMM_MB_CTL_ON 0x1
+#define QAM_SY_COMM_MB_OBS__B 1
+#define QAM_SY_COMM_MB_OBS__W 1
+#define QAM_SY_COMM_MB_OBS__M 0x2
+#define QAM_SY_COMM_MB_OBS__PRE 0x0
+#define QAM_SY_COMM_MB_OBS_OFF 0x0
+#define QAM_SY_COMM_MB_OBS_ON 0x2
+
+#define QAM_SY_COMM_INT_REQ__A 0x1470003
+#define QAM_SY_COMM_INT_REQ__W 1
+#define QAM_SY_COMM_INT_REQ__M 0x1
+#define QAM_SY_COMM_INT_REQ__PRE 0x0
+#define QAM_SY_COMM_INT_STA__A 0x1470005
+#define QAM_SY_COMM_INT_STA__W 4
+#define QAM_SY_COMM_INT_STA__M 0xF
+#define QAM_SY_COMM_INT_STA__PRE 0x0
+
+#define QAM_SY_COMM_INT_STA_LOCK_INT__B 0
+#define QAM_SY_COMM_INT_STA_LOCK_INT__W 1
+#define QAM_SY_COMM_INT_STA_LOCK_INT__M 0x1
+#define QAM_SY_COMM_INT_STA_LOCK_INT__PRE 0x0
+
+#define QAM_SY_COMM_INT_STA_UNLOCK_INT__B 1
+#define QAM_SY_COMM_INT_STA_UNLOCK_INT__W 1
+#define QAM_SY_COMM_INT_STA_UNLOCK_INT__M 0x2
+#define QAM_SY_COMM_INT_STA_UNLOCK_INT__PRE 0x0
+
+#define QAM_SY_COMM_INT_STA_TIMEOUT_INT__B 2
+#define QAM_SY_COMM_INT_STA_TIMEOUT_INT__W 1
+#define QAM_SY_COMM_INT_STA_TIMEOUT_INT__M 0x4
+#define QAM_SY_COMM_INT_STA_TIMEOUT_INT__PRE 0x0
+
+#define QAM_SY_COMM_INT_STA_CTL_WORD_INT__B 3
+#define QAM_SY_COMM_INT_STA_CTL_WORD_INT__W 1
+#define QAM_SY_COMM_INT_STA_CTL_WORD_INT__M 0x8
+#define QAM_SY_COMM_INT_STA_CTL_WORD_INT__PRE 0x0
+
+#define QAM_SY_COMM_INT_MSK__A 0x1470006
+#define QAM_SY_COMM_INT_MSK__W 4
+#define QAM_SY_COMM_INT_MSK__M 0xF
+#define QAM_SY_COMM_INT_MSK__PRE 0x0
+#define QAM_SY_COMM_INT_MSK_LOCK_MSK__B 0
+#define QAM_SY_COMM_INT_MSK_LOCK_MSK__W 1
+#define QAM_SY_COMM_INT_MSK_LOCK_MSK__M 0x1
+#define QAM_SY_COMM_INT_MSK_LOCK_MSK__PRE 0x0
+#define QAM_SY_COMM_INT_MSK_UNLOCK_MSK__B 1
+#define QAM_SY_COMM_INT_MSK_UNLOCK_MSK__W 1
+#define QAM_SY_COMM_INT_MSK_UNLOCK_MSK__M 0x2
+#define QAM_SY_COMM_INT_MSK_UNLOCK_MSK__PRE 0x0
+#define QAM_SY_COMM_INT_MSK_TIMEOUT_MSK__B 2
+#define QAM_SY_COMM_INT_MSK_TIMEOUT_MSK__W 1
+#define QAM_SY_COMM_INT_MSK_TIMEOUT_MSK__M 0x4
+#define QAM_SY_COMM_INT_MSK_TIMEOUT_MSK__PRE 0x0
+#define QAM_SY_COMM_INT_MSK_CTL_WORD_MSK__B 3
+#define QAM_SY_COMM_INT_MSK_CTL_WORD_MSK__W 1
+#define QAM_SY_COMM_INT_MSK_CTL_WORD_MSK__M 0x8
+#define QAM_SY_COMM_INT_MSK_CTL_WORD_MSK__PRE 0x0
+
+#define QAM_SY_COMM_INT_STM__A 0x1470007
+#define QAM_SY_COMM_INT_STM__W 4
+#define QAM_SY_COMM_INT_STM__M 0xF
+#define QAM_SY_COMM_INT_STM__PRE 0x0
+#define QAM_SY_COMM_INT_STM_LOCK_MSK__B 0
+#define QAM_SY_COMM_INT_STM_LOCK_MSK__W 1
+#define QAM_SY_COMM_INT_STM_LOCK_MSK__M 0x1
+#define QAM_SY_COMM_INT_STM_LOCK_MSK__PRE 0x0
+#define QAM_SY_COMM_INT_STM_UNLOCK_MSK__B 1
+#define QAM_SY_COMM_INT_STM_UNLOCK_MSK__W 1
+#define QAM_SY_COMM_INT_STM_UNLOCK_MSK__M 0x2
+#define QAM_SY_COMM_INT_STM_UNLOCK_MSK__PRE 0x0
+#define QAM_SY_COMM_INT_STM_TIMEOUT_MSK__B 2
+#define QAM_SY_COMM_INT_STM_TIMEOUT_MSK__W 1
+#define QAM_SY_COMM_INT_STM_TIMEOUT_MSK__M 0x4
+#define QAM_SY_COMM_INT_STM_TIMEOUT_MSK__PRE 0x0
+#define QAM_SY_COMM_INT_STM_CTL_WORD_MSK__B 3
+#define QAM_SY_COMM_INT_STM_CTL_WORD_MSK__W 1
+#define QAM_SY_COMM_INT_STM_CTL_WORD_MSK__M 0x8
+#define QAM_SY_COMM_INT_STM_CTL_WORD_MSK__PRE 0x0
+
+#define QAM_SY_STATUS__A 0x1470010
+#define QAM_SY_STATUS__W 2
+#define QAM_SY_STATUS__M 0x3
+#define QAM_SY_STATUS__PRE 0x0
+
+#define QAM_SY_STATUS_SYNC_STATE__B 0
+#define QAM_SY_STATUS_SYNC_STATE__W 2
+#define QAM_SY_STATUS_SYNC_STATE__M 0x3
+#define QAM_SY_STATUS_SYNC_STATE__PRE 0x0
+
+#define QAM_SY_TIMEOUT__A 0x1470011
+#define QAM_SY_TIMEOUT__W 16
+#define QAM_SY_TIMEOUT__M 0xFFFF
+#define QAM_SY_TIMEOUT__PRE 0x3A98
+
+#define QAM_SY_SYNC_LWM__A 0x1470012
+#define QAM_SY_SYNC_LWM__W 4
+#define QAM_SY_SYNC_LWM__M 0xF
+#define QAM_SY_SYNC_LWM__PRE 0x2
+
+#define QAM_SY_SYNC_AWM__A 0x1470013
+#define QAM_SY_SYNC_AWM__W 4
+#define QAM_SY_SYNC_AWM__M 0xF
+#define QAM_SY_SYNC_AWM__PRE 0x3
+
+#define QAM_SY_SYNC_HWM__A 0x1470014
+#define QAM_SY_SYNC_HWM__W 4
+#define QAM_SY_SYNC_HWM__M 0xF
+#define QAM_SY_SYNC_HWM__PRE 0x5
+
+#define QAM_SY_UNLOCK__A 0x1470015
+#define QAM_SY_UNLOCK__W 1
+#define QAM_SY_UNLOCK__M 0x1
+#define QAM_SY_UNLOCK__PRE 0x0
+#define QAM_SY_CONTROL_WORD__A 0x1470016
+#define QAM_SY_CONTROL_WORD__W 4
+#define QAM_SY_CONTROL_WORD__M 0xF
+#define QAM_SY_CONTROL_WORD__PRE 0x0
+
+#define QAM_SY_CONTROL_WORD_CTRL_WORD__B 0
+#define QAM_SY_CONTROL_WORD_CTRL_WORD__W 4
+#define QAM_SY_CONTROL_WORD_CTRL_WORD__M 0xF
+#define QAM_SY_CONTROL_WORD_CTRL_WORD__PRE 0x0
+
+#define QAM_VD_ISS_RAM__A 0x1480000
+
+#define QAM_VD_QSS_RAM__A 0x1490000
+
+#define QAM_VD_SYM_RAM__A 0x14A0000
+
+#define SCU_COMM_EXEC__A 0x800000
+#define SCU_COMM_EXEC__W 2
+#define SCU_COMM_EXEC__M 0x3
+#define SCU_COMM_EXEC__PRE 0x0
+#define SCU_COMM_EXEC_STOP 0x0
+#define SCU_COMM_EXEC_ACTIVE 0x1
+#define SCU_COMM_EXEC_HOLD 0x2
+
+#define SCU_COMM_STATE__A 0x800001
+#define SCU_COMM_STATE__W 16
+#define SCU_COMM_STATE__M 0xFFFF
+#define SCU_COMM_STATE__PRE 0x0
+
+#define SCU_COMM_STATE_COMM_STATE__B 0
+#define SCU_COMM_STATE_COMM_STATE__W 16
+#define SCU_COMM_STATE_COMM_STATE__M 0xFFFF
+#define SCU_COMM_STATE_COMM_STATE__PRE 0x0
+
+#define SCU_TOP_COMM_EXEC__A 0x810000
+#define SCU_TOP_COMM_EXEC__W 2
+#define SCU_TOP_COMM_EXEC__M 0x3
+#define SCU_TOP_COMM_EXEC__PRE 0x0
+#define SCU_TOP_COMM_EXEC_STOP 0x0
+#define SCU_TOP_COMM_EXEC_ACTIVE 0x1
+#define SCU_TOP_COMM_EXEC_HOLD 0x2
+
+#define SCU_TOP_COMM_STATE__A 0x810001
+#define SCU_TOP_COMM_STATE__W 16
+#define SCU_TOP_COMM_STATE__M 0xFFFF
+#define SCU_TOP_COMM_STATE__PRE 0x0
+#define SCU_TOP_MWAIT_CTR__A 0x810010
+#define SCU_TOP_MWAIT_CTR__W 2
+#define SCU_TOP_MWAIT_CTR__M 0x3
+#define SCU_TOP_MWAIT_CTR__PRE 0x0
+
+#define SCU_TOP_MWAIT_CTR_MWAIT_SEL__B 0
+#define SCU_TOP_MWAIT_CTR_MWAIT_SEL__W 1
+#define SCU_TOP_MWAIT_CTR_MWAIT_SEL__M 0x1
+#define SCU_TOP_MWAIT_CTR_MWAIT_SEL__PRE 0x0
+#define SCU_TOP_MWAIT_CTR_MWAIT_SEL_TR_MW_OFF 0x0
+#define SCU_TOP_MWAIT_CTR_MWAIT_SEL_TR_MW_ON 0x1
+
+#define SCU_TOP_MWAIT_CTR_READY_DIS__B 1
+#define SCU_TOP_MWAIT_CTR_READY_DIS__W 1
+#define SCU_TOP_MWAIT_CTR_READY_DIS__M 0x2
+#define SCU_TOP_MWAIT_CTR_READY_DIS__PRE 0x0
+#define SCU_TOP_MWAIT_CTR_READY_DIS_NMI_ON 0x0
+#define SCU_TOP_MWAIT_CTR_READY_DIS_NMI_OFF 0x2
+
+#define SCU_LOW_RAM__A 0x820000
+
+#define SCU_LOW_RAM_LOW__B 0
+#define SCU_LOW_RAM_LOW__W 16
+#define SCU_LOW_RAM_LOW__M 0xFFFF
+#define SCU_LOW_RAM_LOW__PRE 0x0
+
+#define SCU_HIGH_RAM__A 0x830000
+
+#define SCU_HIGH_RAM_HIGH__B 0
+#define SCU_HIGH_RAM_HIGH__W 16
+#define SCU_HIGH_RAM_HIGH__M 0xFFFF
+#define SCU_HIGH_RAM_HIGH__PRE 0x0
+
+#define SCU_RAM_AGC_RF_MAX__A 0x831E96
+#define SCU_RAM_AGC_RF_MAX__W 15
+#define SCU_RAM_AGC_RF_MAX__M 0x7FFF
+#define SCU_RAM_AGC_RF_MAX__PRE 0x0
+
+#define SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__A 0x831E97
+#define SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__W 16
+#define SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__M 0xFFFF
+#define SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__PRE 0x0
+
+#define SCU_RAM_AGC_KI_CYCCNT__A 0x831E98
+#define SCU_RAM_AGC_KI_CYCCNT__W 16
+#define SCU_RAM_AGC_KI_CYCCNT__M 0xFFFF
+#define SCU_RAM_AGC_KI_CYCCNT__PRE 0x0
+
+#define SCU_RAM_AGC_KI_CYCLEN__A 0x831E99
+#define SCU_RAM_AGC_KI_CYCLEN__W 16
+#define SCU_RAM_AGC_KI_CYCLEN__M 0xFFFF
+#define SCU_RAM_AGC_KI_CYCLEN__PRE 0x0
+
+#define SCU_RAM_AGC_SNS_CYCLEN__A 0x831E9A
+#define SCU_RAM_AGC_SNS_CYCLEN__W 16
+#define SCU_RAM_AGC_SNS_CYCLEN__M 0xFFFF
+#define SCU_RAM_AGC_SNS_CYCLEN__PRE 0x0
+
+#define SCU_RAM_AGC_RF_SNS_DEV_MAX__A 0x831E9B
+#define SCU_RAM_AGC_RF_SNS_DEV_MAX__W 16
+#define SCU_RAM_AGC_RF_SNS_DEV_MAX__M 0xFFFF
+#define SCU_RAM_AGC_RF_SNS_DEV_MAX__PRE 0x0
+
+#define SCU_RAM_AGC_RF_SNS_DEV_MIN__A 0x831E9C
+#define SCU_RAM_AGC_RF_SNS_DEV_MIN__W 16
+#define SCU_RAM_AGC_RF_SNS_DEV_MIN__M 0xFFFF
+#define SCU_RAM_AGC_RF_SNS_DEV_MIN__PRE 0x0
+#define SCU_RAM_AGC_KI__A 0x831E9D
+#define SCU_RAM_AGC_KI__W 15
+#define SCU_RAM_AGC_KI__M 0x7FFF
+#define SCU_RAM_AGC_KI__PRE 0x0
+
+#define SCU_RAM_AGC_KI_DGAIN__B 0
+#define SCU_RAM_AGC_KI_DGAIN__W 4
+#define SCU_RAM_AGC_KI_DGAIN__M 0xF
+#define SCU_RAM_AGC_KI_DGAIN__PRE 0x0
+
+#define SCU_RAM_AGC_KI_RF__B 4
+#define SCU_RAM_AGC_KI_RF__W 4
+#define SCU_RAM_AGC_KI_RF__M 0xF0
+#define SCU_RAM_AGC_KI_RF__PRE 0x0
+
+#define SCU_RAM_AGC_KI_IF__B 8
+#define SCU_RAM_AGC_KI_IF__W 4
+#define SCU_RAM_AGC_KI_IF__M 0xF00
+#define SCU_RAM_AGC_KI_IF__PRE 0x0
+
+#define SCU_RAM_AGC_KI_IF_AGC_DISABLE__B 12
+#define SCU_RAM_AGC_KI_IF_AGC_DISABLE__W 1
+#define SCU_RAM_AGC_KI_IF_AGC_DISABLE__M 0x1000
+#define SCU_RAM_AGC_KI_IF_AGC_DISABLE__PRE 0x0
+
+#define SCU_RAM_AGC_KI_INV_IF_POL__B 13
+#define SCU_RAM_AGC_KI_INV_IF_POL__W 1
+#define SCU_RAM_AGC_KI_INV_IF_POL__M 0x2000
+#define SCU_RAM_AGC_KI_INV_IF_POL__PRE 0x0
+
+#define SCU_RAM_AGC_KI_INV_RF_POL__B 14
+#define SCU_RAM_AGC_KI_INV_RF_POL__W 1
+#define SCU_RAM_AGC_KI_INV_RF_POL__M 0x4000
+#define SCU_RAM_AGC_KI_INV_RF_POL__PRE 0x0
+
+#define SCU_RAM_AGC_KI_RED__A 0x831E9E
+#define SCU_RAM_AGC_KI_RED__W 6
+#define SCU_RAM_AGC_KI_RED__M 0x3F
+#define SCU_RAM_AGC_KI_RED__PRE 0x0
+
+#define SCU_RAM_AGC_KI_RED_INNER_RED__B 0
+#define SCU_RAM_AGC_KI_RED_INNER_RED__W 2
+#define SCU_RAM_AGC_KI_RED_INNER_RED__M 0x3
+#define SCU_RAM_AGC_KI_RED_INNER_RED__PRE 0x0
+
+#define SCU_RAM_AGC_KI_RED_RAGC_RED__B 2
+#define SCU_RAM_AGC_KI_RED_RAGC_RED__W 2
+#define SCU_RAM_AGC_KI_RED_RAGC_RED__M 0xC
+#define SCU_RAM_AGC_KI_RED_RAGC_RED__PRE 0x0
+
+#define SCU_RAM_AGC_KI_RED_IAGC_RED__B 4
+#define SCU_RAM_AGC_KI_RED_IAGC_RED__W 2
+#define SCU_RAM_AGC_KI_RED_IAGC_RED__M 0x30
+#define SCU_RAM_AGC_KI_RED_IAGC_RED__PRE 0x0
+
+#define SCU_RAM_AGC_KI_INNERGAIN_MIN__A 0x831E9F
+#define SCU_RAM_AGC_KI_INNERGAIN_MIN__W 16
+#define SCU_RAM_AGC_KI_INNERGAIN_MIN__M 0xFFFF
+#define SCU_RAM_AGC_KI_INNERGAIN_MIN__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MINGAIN__A 0x831EA0
+#define SCU_RAM_AGC_KI_MINGAIN__W 16
+#define SCU_RAM_AGC_KI_MINGAIN__M 0xFFFF
+#define SCU_RAM_AGC_KI_MINGAIN__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MAXGAIN__A 0x831EA1
+#define SCU_RAM_AGC_KI_MAXGAIN__W 16
+#define SCU_RAM_AGC_KI_MAXGAIN__M 0xFFFF
+#define SCU_RAM_AGC_KI_MAXGAIN__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MAXMINGAIN_TH__A 0x831EA2
+#define SCU_RAM_AGC_KI_MAXMINGAIN_TH__W 16
+#define SCU_RAM_AGC_KI_MAXMINGAIN_TH__M 0xFFFF
+#define SCU_RAM_AGC_KI_MAXMINGAIN_TH__PRE 0x0
+#define SCU_RAM_AGC_KI_MIN__A 0x831EA3
+#define SCU_RAM_AGC_KI_MIN__W 12
+#define SCU_RAM_AGC_KI_MIN__M 0xFFF
+#define SCU_RAM_AGC_KI_MIN__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MIN_DGAIN__B 0
+#define SCU_RAM_AGC_KI_MIN_DGAIN__W 4
+#define SCU_RAM_AGC_KI_MIN_DGAIN__M 0xF
+#define SCU_RAM_AGC_KI_MIN_DGAIN__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MIN_RF__B 4
+#define SCU_RAM_AGC_KI_MIN_RF__W 4
+#define SCU_RAM_AGC_KI_MIN_RF__M 0xF0
+#define SCU_RAM_AGC_KI_MIN_RF__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MIN_IF__B 8
+#define SCU_RAM_AGC_KI_MIN_IF__W 4
+#define SCU_RAM_AGC_KI_MIN_IF__M 0xF00
+#define SCU_RAM_AGC_KI_MIN_IF__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MAX__A 0x831EA4
+#define SCU_RAM_AGC_KI_MAX__W 12
+#define SCU_RAM_AGC_KI_MAX__M 0xFFF
+#define SCU_RAM_AGC_KI_MAX__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MAX_DGAIN__B 0
+#define SCU_RAM_AGC_KI_MAX_DGAIN__W 4
+#define SCU_RAM_AGC_KI_MAX_DGAIN__M 0xF
+#define SCU_RAM_AGC_KI_MAX_DGAIN__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MAX_RF__B 4
+#define SCU_RAM_AGC_KI_MAX_RF__W 4
+#define SCU_RAM_AGC_KI_MAX_RF__M 0xF0
+#define SCU_RAM_AGC_KI_MAX_RF__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MAX_IF__B 8
+#define SCU_RAM_AGC_KI_MAX_IF__W 4
+#define SCU_RAM_AGC_KI_MAX_IF__M 0xF00
+#define SCU_RAM_AGC_KI_MAX_IF__PRE 0x0
+
+#define SCU_RAM_AGC_CLP_SUM__A 0x831EA5
+#define SCU_RAM_AGC_CLP_SUM__W 16
+#define SCU_RAM_AGC_CLP_SUM__M 0xFFFF
+#define SCU_RAM_AGC_CLP_SUM__PRE 0x0
+
+#define SCU_RAM_AGC_CLP_SUM_MIN__A 0x831EA6
+#define SCU_RAM_AGC_CLP_SUM_MIN__W 16
+#define SCU_RAM_AGC_CLP_SUM_MIN__M 0xFFFF
+#define SCU_RAM_AGC_CLP_SUM_MIN__PRE 0x0
+
+#define SCU_RAM_AGC_CLP_SUM_MAX__A 0x831EA7
+#define SCU_RAM_AGC_CLP_SUM_MAX__W 16
+#define SCU_RAM_AGC_CLP_SUM_MAX__M 0xFFFF
+#define SCU_RAM_AGC_CLP_SUM_MAX__PRE 0x0
+
+#define SCU_RAM_AGC_CLP_CYCLEN__A 0x831EA8
+#define SCU_RAM_AGC_CLP_CYCLEN__W 16
+#define SCU_RAM_AGC_CLP_CYCLEN__M 0xFFFF
+#define SCU_RAM_AGC_CLP_CYCLEN__PRE 0x0
+
+#define SCU_RAM_AGC_CLP_CYCCNT__A 0x831EA9
+#define SCU_RAM_AGC_CLP_CYCCNT__W 16
+#define SCU_RAM_AGC_CLP_CYCCNT__M 0xFFFF
+#define SCU_RAM_AGC_CLP_CYCCNT__PRE 0x0
+
+#define SCU_RAM_AGC_CLP_DIR_TO__A 0x831EAA
+#define SCU_RAM_AGC_CLP_DIR_TO__W 8
+#define SCU_RAM_AGC_CLP_DIR_TO__M 0xFF
+#define SCU_RAM_AGC_CLP_DIR_TO__PRE 0x0
+
+#define SCU_RAM_AGC_CLP_DIR_WD__A 0x831EAB
+#define SCU_RAM_AGC_CLP_DIR_WD__W 8
+#define SCU_RAM_AGC_CLP_DIR_WD__M 0xFF
+#define SCU_RAM_AGC_CLP_DIR_WD__PRE 0x0
+
+#define SCU_RAM_AGC_CLP_DIR_STP__A 0x831EAC
+#define SCU_RAM_AGC_CLP_DIR_STP__W 16
+#define SCU_RAM_AGC_CLP_DIR_STP__M 0xFFFF
+#define SCU_RAM_AGC_CLP_DIR_STP__PRE 0x0
+
+#define SCU_RAM_AGC_SNS_SUM__A 0x831EAD
+#define SCU_RAM_AGC_SNS_SUM__W 16
+#define SCU_RAM_AGC_SNS_SUM__M 0xFFFF
+#define SCU_RAM_AGC_SNS_SUM__PRE 0x0
+
+#define SCU_RAM_AGC_SNS_SUM_MIN__A 0x831EAE
+#define SCU_RAM_AGC_SNS_SUM_MIN__W 16
+#define SCU_RAM_AGC_SNS_SUM_MIN__M 0xFFFF
+#define SCU_RAM_AGC_SNS_SUM_MIN__PRE 0x0
+
+#define SCU_RAM_AGC_SNS_SUM_MAX__A 0x831EAF
+#define SCU_RAM_AGC_SNS_SUM_MAX__W 16
+#define SCU_RAM_AGC_SNS_SUM_MAX__M 0xFFFF
+#define SCU_RAM_AGC_SNS_SUM_MAX__PRE 0x0
+
+#define SCU_RAM_AGC_SNS_CYCCNT__A 0x831EB0
+#define SCU_RAM_AGC_SNS_CYCCNT__W 16
+#define SCU_RAM_AGC_SNS_CYCCNT__M 0xFFFF
+#define SCU_RAM_AGC_SNS_CYCCNT__PRE 0x0
+
+#define SCU_RAM_AGC_SNS_DIR_TO__A 0x831EB1
+#define SCU_RAM_AGC_SNS_DIR_TO__W 8
+#define SCU_RAM_AGC_SNS_DIR_TO__M 0xFF
+#define SCU_RAM_AGC_SNS_DIR_TO__PRE 0x0
+
+#define SCU_RAM_AGC_SNS_DIR_WD__A 0x831EB2
+#define SCU_RAM_AGC_SNS_DIR_WD__W 8
+#define SCU_RAM_AGC_SNS_DIR_WD__M 0xFF
+#define SCU_RAM_AGC_SNS_DIR_WD__PRE 0x0
+
+#define SCU_RAM_AGC_SNS_DIR_STP__A 0x831EB3
+#define SCU_RAM_AGC_SNS_DIR_STP__W 16
+#define SCU_RAM_AGC_SNS_DIR_STP__M 0xFFFF
+#define SCU_RAM_AGC_SNS_DIR_STP__PRE 0x0
+
+#define SCU_RAM_AGC_INGAIN__A 0x831EB4
+#define SCU_RAM_AGC_INGAIN__W 16
+#define SCU_RAM_AGC_INGAIN__M 0xFFFF
+#define SCU_RAM_AGC_INGAIN__PRE 0x0
+
+#define SCU_RAM_AGC_INGAIN_TGT__A 0x831EB5
+#define SCU_RAM_AGC_INGAIN_TGT__W 15
+#define SCU_RAM_AGC_INGAIN_TGT__M 0x7FFF
+#define SCU_RAM_AGC_INGAIN_TGT__PRE 0x0
+
+#define SCU_RAM_AGC_INGAIN_TGT_MIN__A 0x831EB6
+#define SCU_RAM_AGC_INGAIN_TGT_MIN__W 15
+#define SCU_RAM_AGC_INGAIN_TGT_MIN__M 0x7FFF
+#define SCU_RAM_AGC_INGAIN_TGT_MIN__PRE 0x0
+
+#define SCU_RAM_AGC_INGAIN_TGT_MAX__A 0x831EB7
+#define SCU_RAM_AGC_INGAIN_TGT_MAX__W 15
+#define SCU_RAM_AGC_INGAIN_TGT_MAX__M 0x7FFF
+#define SCU_RAM_AGC_INGAIN_TGT_MAX__PRE 0x0
+
+#define SCU_RAM_AGC_IF_IACCU_HI__A 0x831EB8
+#define SCU_RAM_AGC_IF_IACCU_HI__W 16
+#define SCU_RAM_AGC_IF_IACCU_HI__M 0xFFFF
+#define SCU_RAM_AGC_IF_IACCU_HI__PRE 0x0
+
+#define SCU_RAM_AGC_IF_IACCU_LO__A 0x831EB9
+#define SCU_RAM_AGC_IF_IACCU_LO__W 8
+#define SCU_RAM_AGC_IF_IACCU_LO__M 0xFF
+#define SCU_RAM_AGC_IF_IACCU_LO__PRE 0x0
+
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT__A 0x831EBA
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT__W 15
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT__M 0x7FFF
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT__PRE 0x0
+
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__A 0x831EBB
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__W 15
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__M 0x7FFF
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__PRE 0x0
+
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A 0x831EBC
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__W 15
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__M 0x7FFF
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__PRE 0x0
+
+#define SCU_RAM_AGC_RF_IACCU_HI__A 0x831EBD
+#define SCU_RAM_AGC_RF_IACCU_HI__W 16
+#define SCU_RAM_AGC_RF_IACCU_HI__M 0xFFFF
+#define SCU_RAM_AGC_RF_IACCU_HI__PRE 0x0
+
+#define SCU_RAM_AGC_RF_IACCU_LO__A 0x831EBE
+#define SCU_RAM_AGC_RF_IACCU_LO__W 8
+#define SCU_RAM_AGC_RF_IACCU_LO__M 0xFF
+#define SCU_RAM_AGC_RF_IACCU_LO__PRE 0x0
+
+#define SCU_RAM_AGC_RF_IACCU_HI_CO__A 0x831EBF
+#define SCU_RAM_AGC_RF_IACCU_HI_CO__W 16
+#define SCU_RAM_AGC_RF_IACCU_HI_CO__M 0xFFFF
+#define SCU_RAM_AGC_RF_IACCU_HI_CO__PRE 0x0
+
+#define SCU_RAM_SP__A 0x831EC0
+#define SCU_RAM_SP__W 16
+#define SCU_RAM_SP__M 0xFFFF
+#define SCU_RAM_SP__PRE 0x0
+
+#define SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A 0x831EC1
+#define SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__W 16
+#define SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__M 0xFFFF
+#define SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MIN_IFGAIN__A 0x831EC2
+#define SCU_RAM_AGC_KI_MIN_IFGAIN__W 16
+#define SCU_RAM_AGC_KI_MIN_IFGAIN__M 0xFFFF
+#define SCU_RAM_AGC_KI_MIN_IFGAIN__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MAX_IFGAIN__A 0x831EC3
+#define SCU_RAM_AGC_KI_MAX_IFGAIN__W 16
+#define SCU_RAM_AGC_KI_MAX_IFGAIN__M 0xFFFF
+#define SCU_RAM_AGC_KI_MAX_IFGAIN__PRE 0x0
+
+#define SCU_RAM_FEC_MEAS_COUNT__A 0x831EC4
+#define SCU_RAM_FEC_MEAS_COUNT__W 16
+#define SCU_RAM_FEC_MEAS_COUNT__M 0xFFFF
+#define SCU_RAM_FEC_MEAS_COUNT__PRE 0x0
+
+#define SCU_RAM_FEC_ACCUM_CW_CORRECTED_LO__A 0x831EC5
+#define SCU_RAM_FEC_ACCUM_CW_CORRECTED_LO__W 16
+#define SCU_RAM_FEC_ACCUM_CW_CORRECTED_LO__M 0xFFFF
+#define SCU_RAM_FEC_ACCUM_CW_CORRECTED_LO__PRE 0x0
+
+#define SCU_RAM_FEC_ACCUM_CW_CORRECTED_HI__A 0x831EC6
+#define SCU_RAM_FEC_ACCUM_CW_CORRECTED_HI__W 16
+#define SCU_RAM_FEC_ACCUM_CW_CORRECTED_HI__M 0xFFFF
+#define SCU_RAM_FEC_ACCUM_CW_CORRECTED_HI__PRE 0x0
+#define SCU_RAM_GPIO__A 0x831EC7
+#define SCU_RAM_GPIO__W 1
+#define SCU_RAM_GPIO__M 0x1
+#define SCU_RAM_GPIO__PRE 0x0
+
+#define SCU_RAM_GPIO_HW_LOCK_IND__B 0
+#define SCU_RAM_GPIO_HW_LOCK_IND__W 1
+#define SCU_RAM_GPIO_HW_LOCK_IND__M 0x1
+#define SCU_RAM_GPIO_HW_LOCK_IND__PRE 0x0
+#define SCU_RAM_GPIO_HW_LOCK_IND_DISABLE 0x0
+#define SCU_RAM_GPIO_HW_LOCK_IND_ENABLE 0x1
+
+#define SCU_RAM_AGC_CLP_CTRL_MODE__A 0x831EC8
+#define SCU_RAM_AGC_CLP_CTRL_MODE__W 8
+#define SCU_RAM_AGC_CLP_CTRL_MODE__M 0xFF
+#define SCU_RAM_AGC_CLP_CTRL_MODE__PRE 0x0
+
+#define SCU_RAM_AGC_CLP_CTRL_MODE_NARROW_POW__B 0
+#define SCU_RAM_AGC_CLP_CTRL_MODE_NARROW_POW__W 1
+#define SCU_RAM_AGC_CLP_CTRL_MODE_NARROW_POW__M 0x1
+#define SCU_RAM_AGC_CLP_CTRL_MODE_NARROW_POW__PRE 0x0
+#define SCU_RAM_AGC_CLP_CTRL_MODE_NARROW_POW_false 0x0
+#define SCU_RAM_AGC_CLP_CTRL_MODE_NARROW_POW_true 0x1
+
+#define SCU_RAM_AGC_CLP_CTRL_MODE_FAST_CLP_BP__B 1
+#define SCU_RAM_AGC_CLP_CTRL_MODE_FAST_CLP_BP__W 1
+#define SCU_RAM_AGC_CLP_CTRL_MODE_FAST_CLP_BP__M 0x2
+#define SCU_RAM_AGC_CLP_CTRL_MODE_FAST_CLP_BP__PRE 0x0
+#define SCU_RAM_AGC_CLP_CTRL_MODE_FAST_CLP_BP_FCC_ENABLE 0x0
+#define SCU_RAM_AGC_CLP_CTRL_MODE_FAST_CLP_BP_FCC_DISABLE 0x2
+
+#define SCU_RAM_AGC_CLP_CTRL_MODE_FAST_CLP_DEC__B 2
+#define SCU_RAM_AGC_CLP_CTRL_MODE_FAST_CLP_DEC__W 1
+#define SCU_RAM_AGC_CLP_CTRL_MODE_FAST_CLP_DEC__M 0x4
+#define SCU_RAM_AGC_CLP_CTRL_MODE_FAST_CLP_DEC__PRE 0x0
+#define SCU_RAM_AGC_CLP_CTRL_MODE_FAST_CLP_DEC_DEC_DISABLE 0x0
+#define SCU_RAM_AGC_CLP_CTRL_MODE_FAST_CLP_DEC_DEC_ENABLE 0x4
+
+#define SCU_RAM_AGC_KI_MIN_RFGAIN__A 0x831EC9
+#define SCU_RAM_AGC_KI_MIN_RFGAIN__W 16
+#define SCU_RAM_AGC_KI_MIN_RFGAIN__M 0xFFFF
+#define SCU_RAM_AGC_KI_MIN_RFGAIN__PRE 0x0
+
+#define SCU_RAM_AGC_KI_MAX_RFGAIN__A 0x831ECA
+#define SCU_RAM_AGC_KI_MAX_RFGAIN__W 16
+#define SCU_RAM_AGC_KI_MAX_RFGAIN__M 0xFFFF
+#define SCU_RAM_AGC_KI_MAX_RFGAIN__PRE 0x0
+
+#define SCU_RAM_FEC_ACCUM_PKT_FAILURES__A 0x831ECB
+#define SCU_RAM_FEC_ACCUM_PKT_FAILURES__W 16
+#define SCU_RAM_FEC_ACCUM_PKT_FAILURES__M 0xFFFF
+#define SCU_RAM_FEC_ACCUM_PKT_FAILURES__PRE 0x0
+
+#define SCU_RAM_INHIBIT_1__A 0x831ECC
+#define SCU_RAM_INHIBIT_1__W 16
+#define SCU_RAM_INHIBIT_1__M 0xFFFF
+#define SCU_RAM_INHIBIT_1__PRE 0x0
+
+#define SCU_RAM_HTOL_BUF_0__A 0x831ECD
+#define SCU_RAM_HTOL_BUF_0__W 16
+#define SCU_RAM_HTOL_BUF_0__M 0xFFFF
+#define SCU_RAM_HTOL_BUF_0__PRE 0x0
+
+#define SCU_RAM_HTOL_BUF_1__A 0x831ECE
+#define SCU_RAM_HTOL_BUF_1__W 16
+#define SCU_RAM_HTOL_BUF_1__M 0xFFFF
+#define SCU_RAM_HTOL_BUF_1__PRE 0x0
+
+#define SCU_RAM_INHIBIT_2__A 0x831ECF
+#define SCU_RAM_INHIBIT_2__W 16
+#define SCU_RAM_INHIBIT_2__M 0xFFFF
+#define SCU_RAM_INHIBIT_2__PRE 0x0
+
+#define SCU_RAM_TR_SHORT_BUF_0__A 0x831ED0
+#define SCU_RAM_TR_SHORT_BUF_0__W 16
+#define SCU_RAM_TR_SHORT_BUF_0__M 0xFFFF
+#define SCU_RAM_TR_SHORT_BUF_0__PRE 0x0
+
+#define SCU_RAM_TR_SHORT_BUF_1__A 0x831ED1
+#define SCU_RAM_TR_SHORT_BUF_1__W 16
+#define SCU_RAM_TR_SHORT_BUF_1__M 0xFFFF
+#define SCU_RAM_TR_SHORT_BUF_1__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_0__A 0x831ED2
+#define SCU_RAM_TR_LONG_BUF_0__W 16
+#define SCU_RAM_TR_LONG_BUF_0__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_0__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_1__A 0x831ED3
+#define SCU_RAM_TR_LONG_BUF_1__W 16
+#define SCU_RAM_TR_LONG_BUF_1__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_1__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_2__A 0x831ED4
+#define SCU_RAM_TR_LONG_BUF_2__W 16
+#define SCU_RAM_TR_LONG_BUF_2__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_2__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_3__A 0x831ED5
+#define SCU_RAM_TR_LONG_BUF_3__W 16
+#define SCU_RAM_TR_LONG_BUF_3__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_3__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_4__A 0x831ED6
+#define SCU_RAM_TR_LONG_BUF_4__W 16
+#define SCU_RAM_TR_LONG_BUF_4__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_4__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_5__A 0x831ED7
+#define SCU_RAM_TR_LONG_BUF_5__W 16
+#define SCU_RAM_TR_LONG_BUF_5__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_5__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_6__A 0x831ED8
+#define SCU_RAM_TR_LONG_BUF_6__W 16
+#define SCU_RAM_TR_LONG_BUF_6__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_6__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_7__A 0x831ED9
+#define SCU_RAM_TR_LONG_BUF_7__W 16
+#define SCU_RAM_TR_LONG_BUF_7__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_7__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_8__A 0x831EDA
+#define SCU_RAM_TR_LONG_BUF_8__W 16
+#define SCU_RAM_TR_LONG_BUF_8__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_8__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_9__A 0x831EDB
+#define SCU_RAM_TR_LONG_BUF_9__W 16
+#define SCU_RAM_TR_LONG_BUF_9__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_9__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_10__A 0x831EDC
+#define SCU_RAM_TR_LONG_BUF_10__W 16
+#define SCU_RAM_TR_LONG_BUF_10__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_10__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_11__A 0x831EDD
+#define SCU_RAM_TR_LONG_BUF_11__W 16
+#define SCU_RAM_TR_LONG_BUF_11__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_11__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_12__A 0x831EDE
+#define SCU_RAM_TR_LONG_BUF_12__W 16
+#define SCU_RAM_TR_LONG_BUF_12__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_12__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_13__A 0x831EDF
+#define SCU_RAM_TR_LONG_BUF_13__W 16
+#define SCU_RAM_TR_LONG_BUF_13__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_13__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_14__A 0x831EE0
+#define SCU_RAM_TR_LONG_BUF_14__W 16
+#define SCU_RAM_TR_LONG_BUF_14__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_14__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_15__A 0x831EE1
+#define SCU_RAM_TR_LONG_BUF_15__W 16
+#define SCU_RAM_TR_LONG_BUF_15__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_15__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_16__A 0x831EE2
+#define SCU_RAM_TR_LONG_BUF_16__W 16
+#define SCU_RAM_TR_LONG_BUF_16__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_16__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_17__A 0x831EE3
+#define SCU_RAM_TR_LONG_BUF_17__W 16
+#define SCU_RAM_TR_LONG_BUF_17__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_17__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_18__A 0x831EE4
+#define SCU_RAM_TR_LONG_BUF_18__W 16
+#define SCU_RAM_TR_LONG_BUF_18__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_18__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_19__A 0x831EE5
+#define SCU_RAM_TR_LONG_BUF_19__W 16
+#define SCU_RAM_TR_LONG_BUF_19__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_19__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_20__A 0x831EE6
+#define SCU_RAM_TR_LONG_BUF_20__W 16
+#define SCU_RAM_TR_LONG_BUF_20__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_20__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_21__A 0x831EE7
+#define SCU_RAM_TR_LONG_BUF_21__W 16
+#define SCU_RAM_TR_LONG_BUF_21__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_21__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_22__A 0x831EE8
+#define SCU_RAM_TR_LONG_BUF_22__W 16
+#define SCU_RAM_TR_LONG_BUF_22__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_22__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_23__A 0x831EE9
+#define SCU_RAM_TR_LONG_BUF_23__W 16
+#define SCU_RAM_TR_LONG_BUF_23__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_23__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_24__A 0x831EEA
+#define SCU_RAM_TR_LONG_BUF_24__W 16
+#define SCU_RAM_TR_LONG_BUF_24__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_24__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_25__A 0x831EEB
+#define SCU_RAM_TR_LONG_BUF_25__W 16
+#define SCU_RAM_TR_LONG_BUF_25__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_25__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_26__A 0x831EEC
+#define SCU_RAM_TR_LONG_BUF_26__W 16
+#define SCU_RAM_TR_LONG_BUF_26__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_26__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_27__A 0x831EED
+#define SCU_RAM_TR_LONG_BUF_27__W 16
+#define SCU_RAM_TR_LONG_BUF_27__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_27__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_28__A 0x831EEE
+#define SCU_RAM_TR_LONG_BUF_28__W 16
+#define SCU_RAM_TR_LONG_BUF_28__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_28__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_29__A 0x831EEF
+#define SCU_RAM_TR_LONG_BUF_29__W 16
+#define SCU_RAM_TR_LONG_BUF_29__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_29__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_30__A 0x831EF0
+#define SCU_RAM_TR_LONG_BUF_30__W 16
+#define SCU_RAM_TR_LONG_BUF_30__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_30__PRE 0x0
+
+#define SCU_RAM_TR_LONG_BUF_31__A 0x831EF1
+#define SCU_RAM_TR_LONG_BUF_31__W 16
+#define SCU_RAM_TR_LONG_BUF_31__M 0xFFFF
+#define SCU_RAM_TR_LONG_BUF_31__PRE 0x0
+#define SCU_RAM_ATV_AMS_MAX__A 0x831EF2
+#define SCU_RAM_ATV_AMS_MAX__W 11
+#define SCU_RAM_ATV_AMS_MAX__M 0x7FF
+#define SCU_RAM_ATV_AMS_MAX__PRE 0x0
+
+#define SCU_RAM_ATV_AMS_MAX_AMS_MAX__B 0
+#define SCU_RAM_ATV_AMS_MAX_AMS_MAX__W 11
+#define SCU_RAM_ATV_AMS_MAX_AMS_MAX__M 0x7FF
+#define SCU_RAM_ATV_AMS_MAX_AMS_MAX__PRE 0x0
+
+#define SCU_RAM_ATV_AMS_MIN__A 0x831EF3
+#define SCU_RAM_ATV_AMS_MIN__W 11
+#define SCU_RAM_ATV_AMS_MIN__M 0x7FF
+#define SCU_RAM_ATV_AMS_MIN__PRE 0x0
+
+#define SCU_RAM_ATV_AMS_MIN_AMS_MIN__B 0
+#define SCU_RAM_ATV_AMS_MIN_AMS_MIN__W 11
+#define SCU_RAM_ATV_AMS_MIN_AMS_MIN__M 0x7FF
+#define SCU_RAM_ATV_AMS_MIN_AMS_MIN__PRE 0x0
+
+#define SCU_RAM_ATV_FIELD_CNT__A 0x831EF4
+#define SCU_RAM_ATV_FIELD_CNT__W 9
+#define SCU_RAM_ATV_FIELD_CNT__M 0x1FF
+#define SCU_RAM_ATV_FIELD_CNT__PRE 0x0
+
+#define SCU_RAM_ATV_FIELD_CNT_FIELD_CNT__B 0
+#define SCU_RAM_ATV_FIELD_CNT_FIELD_CNT__W 9
+#define SCU_RAM_ATV_FIELD_CNT_FIELD_CNT__M 0x1FF
+#define SCU_RAM_ATV_FIELD_CNT_FIELD_CNT__PRE 0x0
+
+#define SCU_RAM_ATV_AAGC_FAST__A 0x831EF5
+#define SCU_RAM_ATV_AAGC_FAST__W 1
+#define SCU_RAM_ATV_AAGC_FAST__M 0x1
+#define SCU_RAM_ATV_AAGC_FAST__PRE 0x0
+
+#define SCU_RAM_ATV_AAGC_FAST_AAGC_FAST__B 0
+#define SCU_RAM_ATV_AAGC_FAST_AAGC_FAST__W 1
+#define SCU_RAM_ATV_AAGC_FAST_AAGC_FAST__M 0x1
+#define SCU_RAM_ATV_AAGC_FAST_AAGC_FAST__PRE 0x0
+#define SCU_RAM_ATV_AAGC_FAST_AAGC_FAST_OFF 0x0
+#define SCU_RAM_ATV_AAGC_FAST_AAGC_FAST_ON 0x1
+
+#define SCU_RAM_ATV_AAGC_LP2__A 0x831EF6
+#define SCU_RAM_ATV_AAGC_LP2__W 16
+#define SCU_RAM_ATV_AAGC_LP2__M 0xFFFF
+#define SCU_RAM_ATV_AAGC_LP2__PRE 0x0
+
+#define SCU_RAM_ATV_AAGC_LP2_AAGC_LP2__B 0
+#define SCU_RAM_ATV_AAGC_LP2_AAGC_LP2__W 16
+#define SCU_RAM_ATV_AAGC_LP2_AAGC_LP2__M 0xFFFF
+#define SCU_RAM_ATV_AAGC_LP2_AAGC_LP2__PRE 0x0
+
+#define SCU_RAM_ATV_BP_LVL__A 0x831EF7
+#define SCU_RAM_ATV_BP_LVL__W 11
+#define SCU_RAM_ATV_BP_LVL__M 0x7FF
+#define SCU_RAM_ATV_BP_LVL__PRE 0x0
+
+#define SCU_RAM_ATV_BP_LVL_BP_LVL__B 0
+#define SCU_RAM_ATV_BP_LVL_BP_LVL__W 11
+#define SCU_RAM_ATV_BP_LVL_BP_LVL__M 0x7FF
+#define SCU_RAM_ATV_BP_LVL_BP_LVL__PRE 0x0
+
+#define SCU_RAM_ATV_BP_RELY__A 0x831EF8
+#define SCU_RAM_ATV_BP_RELY__W 8
+#define SCU_RAM_ATV_BP_RELY__M 0xFF
+#define SCU_RAM_ATV_BP_RELY__PRE 0x0
+
+#define SCU_RAM_ATV_BP_RELY_BP_RELY__B 0
+#define SCU_RAM_ATV_BP_RELY_BP_RELY__W 8
+#define SCU_RAM_ATV_BP_RELY_BP_RELY__M 0xFF
+#define SCU_RAM_ATV_BP_RELY_BP_RELY__PRE 0x0
+
+#define SCU_RAM_ATV_BP_MTA__A 0x831EF9
+#define SCU_RAM_ATV_BP_MTA__W 14
+#define SCU_RAM_ATV_BP_MTA__M 0x3FFF
+#define SCU_RAM_ATV_BP_MTA__PRE 0x0
+
+#define SCU_RAM_ATV_BP_MTA_BP_MTA__B 0
+#define SCU_RAM_ATV_BP_MTA_BP_MTA__W 14
+#define SCU_RAM_ATV_BP_MTA_BP_MTA__M 0x3FFF
+#define SCU_RAM_ATV_BP_MTA_BP_MTA__PRE 0x0
+
+#define SCU_RAM_ATV_BP_REF__A 0x831EFA
+#define SCU_RAM_ATV_BP_REF__W 11
+#define SCU_RAM_ATV_BP_REF__M 0x7FF
+#define SCU_RAM_ATV_BP_REF__PRE 0x0
+
+#define SCU_RAM_ATV_BP_REF_BP_REF__B 0
+#define SCU_RAM_ATV_BP_REF_BP_REF__W 11
+#define SCU_RAM_ATV_BP_REF_BP_REF__M 0x7FF
+#define SCU_RAM_ATV_BP_REF_BP_REF__PRE 0x0
+
+#define SCU_RAM_ATV_BP_REF_MIN__A 0x831EFB
+#define SCU_RAM_ATV_BP_REF_MIN__W 11
+#define SCU_RAM_ATV_BP_REF_MIN__M 0x7FF
+#define SCU_RAM_ATV_BP_REF_MIN__PRE 0x0
+
+#define SCU_RAM_ATV_BP_REF_MIN_BP_REF_MIN__B 0
+#define SCU_RAM_ATV_BP_REF_MIN_BP_REF_MIN__W 11
+#define SCU_RAM_ATV_BP_REF_MIN_BP_REF_MIN__M 0x7FF
+#define SCU_RAM_ATV_BP_REF_MIN_BP_REF_MIN__PRE 0x0
+
+#define SCU_RAM_ATV_BP_REF_MAX__A 0x831EFC
+#define SCU_RAM_ATV_BP_REF_MAX__W 11
+#define SCU_RAM_ATV_BP_REF_MAX__M 0x7FF
+#define SCU_RAM_ATV_BP_REF_MAX__PRE 0x0
+
+#define SCU_RAM_ATV_BP_REF_MAX_BP_REF_MAX__B 0
+#define SCU_RAM_ATV_BP_REF_MAX_BP_REF_MAX__W 11
+#define SCU_RAM_ATV_BP_REF_MAX_BP_REF_MAX__M 0x7FF
+#define SCU_RAM_ATV_BP_REF_MAX_BP_REF_MAX__PRE 0x0
+
+#define SCU_RAM_ATV_BP_CNT__A 0x831EFD
+#define SCU_RAM_ATV_BP_CNT__W 8
+#define SCU_RAM_ATV_BP_CNT__M 0xFF
+#define SCU_RAM_ATV_BP_CNT__PRE 0x0
+
+#define SCU_RAM_ATV_BP_CNT_BP_CNT__B 0
+#define SCU_RAM_ATV_BP_CNT_BP_CNT__W 8
+#define SCU_RAM_ATV_BP_CNT_BP_CNT__M 0xFF
+#define SCU_RAM_ATV_BP_CNT_BP_CNT__PRE 0x0
+
+#define SCU_RAM_ATV_BP_XD_CNT__A 0x831EFE
+#define SCU_RAM_ATV_BP_XD_CNT__W 12
+#define SCU_RAM_ATV_BP_XD_CNT__M 0xFFF
+#define SCU_RAM_ATV_BP_XD_CNT__PRE 0x0
+
+#define SCU_RAM_ATV_BP_XD_CNT_BP_XD_CNT__B 0
+#define SCU_RAM_ATV_BP_XD_CNT_BP_XD_CNT__W 12
+#define SCU_RAM_ATV_BP_XD_CNT_BP_XD_CNT__M 0xFFF
+#define SCU_RAM_ATV_BP_XD_CNT_BP_XD_CNT__PRE 0x0
+
+#define SCU_RAM_ATV_PAGC_KI_MIN__A 0x831EFF
+#define SCU_RAM_ATV_PAGC_KI_MIN__W 12
+#define SCU_RAM_ATV_PAGC_KI_MIN__M 0xFFF
+#define SCU_RAM_ATV_PAGC_KI_MIN__PRE 0x0
+
+#define SCU_RAM_ATV_PAGC_KI_MIN_PAGC_KI_MIN__B 0
+#define SCU_RAM_ATV_PAGC_KI_MIN_PAGC_KI_MIN__W 12
+#define SCU_RAM_ATV_PAGC_KI_MIN_PAGC_KI_MIN__M 0xFFF
+#define SCU_RAM_ATV_PAGC_KI_MIN_PAGC_KI_MIN__PRE 0x0
+
+#define SCU_RAM_ATV_BPC_KI_MIN__A 0x831F00
+#define SCU_RAM_ATV_BPC_KI_MIN__W 12
+#define SCU_RAM_ATV_BPC_KI_MIN__M 0xFFF
+#define SCU_RAM_ATV_BPC_KI_MIN__PRE 0x0
+
+#define SCU_RAM_ATV_BPC_KI_MIN_BPC_KI_MIN__B 0
+#define SCU_RAM_ATV_BPC_KI_MIN_BPC_KI_MIN__W 12
+#define SCU_RAM_ATV_BPC_KI_MIN_BPC_KI_MIN__M 0xFFF
+#define SCU_RAM_ATV_BPC_KI_MIN_BPC_KI_MIN__PRE 0x0
+
+#define SCU_RAM_ORX_RF_RX_FREQUENCY_VALUE__A 0x831F01
+#define SCU_RAM_ORX_RF_RX_FREQUENCY_VALUE__W 16
+#define SCU_RAM_ORX_RF_RX_FREQUENCY_VALUE__M 0xFFFF
+#define SCU_RAM_ORX_RF_RX_FREQUENCY_VALUE__PRE 0x0
+
+#define SCU_RAM_ORX_RF_RX_DATA_RATE__A 0x831F02
+#define SCU_RAM_ORX_RF_RX_DATA_RATE__W 8
+#define SCU_RAM_ORX_RF_RX_DATA_RATE__M 0xFF
+#define SCU_RAM_ORX_RF_RX_DATA_RATE__PRE 0x0
+#define SCU_RAM_ORX_RF_RX_DATA_RATE_2048KBPS_REGSPEC 0x0
+#define SCU_RAM_ORX_RF_RX_DATA_RATE_2048KBPS_INVSPEC 0x1
+#define SCU_RAM_ORX_RF_RX_DATA_RATE_2048KBPS_REGSPEC_ALT 0x40
+#define SCU_RAM_ORX_RF_RX_DATA_RATE_2048KBPS_INVSPEC_ALT 0x41
+#define SCU_RAM_ORX_RF_RX_DATA_RATE_1544KBPS_REGSPEC 0x80
+#define SCU_RAM_ORX_RF_RX_DATA_RATE_1544KBPS_INVSPEC 0x81
+#define SCU_RAM_ORX_RF_RX_DATA_RATE_3088KBPS_REGSPEC 0xC0
+#define SCU_RAM_ORX_RF_RX_DATA_RATE_3088KBPS_INVSPEC 0xC1
+
+#define SCU_RAM_ORX_SCU_STATE__A 0x831F03
+#define SCU_RAM_ORX_SCU_STATE__W 8
+#define SCU_RAM_ORX_SCU_STATE__M 0xFF
+#define SCU_RAM_ORX_SCU_STATE__PRE 0x0
+#define SCU_RAM_ORX_SCU_STATE_RESET 0x0
+#define SCU_RAM_ORX_SCU_STATE_AGN_HUNT 0x1
+#define SCU_RAM_ORX_SCU_STATE_DGN_HUNT 0x2
+#define SCU_RAM_ORX_SCU_STATE_AGC_HUNT 0x3
+#define SCU_RAM_ORX_SCU_STATE_FRQ_HUNT 0x4
+#define SCU_RAM_ORX_SCU_STATE_PHA_HUNT 0x8
+#define SCU_RAM_ORX_SCU_STATE_TIM_HUNT 0x10
+#define SCU_RAM_ORX_SCU_STATE_EQU_HUNT 0x20
+#define SCU_RAM_ORX_SCU_STATE_EQT_HUNT 0x30
+#define SCU_RAM_ORX_SCU_STATE_SYNC 0x40
+
+#define SCU_RAM_ORX_SCU_LOCK__A 0x831F04
+#define SCU_RAM_ORX_SCU_LOCK__W 16
+#define SCU_RAM_ORX_SCU_LOCK__M 0xFFFF
+#define SCU_RAM_ORX_SCU_LOCK__PRE 0x0
+
+#define SCU_RAM_ORX_TARGET_MODE__A 0x831F05
+#define SCU_RAM_ORX_TARGET_MODE__W 2
+#define SCU_RAM_ORX_TARGET_MODE__M 0x3
+#define SCU_RAM_ORX_TARGET_MODE__PRE 0x0
+#define SCU_RAM_ORX_TARGET_MODE_1544KBPS 0x0
+#define SCU_RAM_ORX_TARGET_MODE_3088KBPS 0x1
+#define SCU_RAM_ORX_TARGET_MODE_2048KBPS_SQRT 0x2
+#define SCU_RAM_ORX_TARGET_MODE_2048KBPS_RO 0x3
+
+#define SCU_RAM_ORX_MER_MIN_DB__A 0x831F06
+#define SCU_RAM_ORX_MER_MIN_DB__W 8
+#define SCU_RAM_ORX_MER_MIN_DB__M 0xFF
+#define SCU_RAM_ORX_MER_MIN_DB__PRE 0x0
+
+#define SCU_RAM_ORX_RF_GAIN__A 0x831F07
+#define SCU_RAM_ORX_RF_GAIN__W 16
+#define SCU_RAM_ORX_RF_GAIN__M 0xFFFF
+#define SCU_RAM_ORX_RF_GAIN__PRE 0x0
+
+#define SCU_RAM_ORX_RF_GAIN_MIN__A 0x831F08
+#define SCU_RAM_ORX_RF_GAIN_MIN__W 16
+#define SCU_RAM_ORX_RF_GAIN_MIN__M 0xFFFF
+#define SCU_RAM_ORX_RF_GAIN_MIN__PRE 0x0
+
+#define SCU_RAM_ORX_RF_GAIN_MAX__A 0x831F09
+#define SCU_RAM_ORX_RF_GAIN_MAX__W 16
+#define SCU_RAM_ORX_RF_GAIN_MAX__M 0xFFFF
+#define SCU_RAM_ORX_RF_GAIN_MAX__PRE 0x0
+
+#define SCU_RAM_ORX_IF_GAIN__A 0x831F0A
+#define SCU_RAM_ORX_IF_GAIN__W 16
+#define SCU_RAM_ORX_IF_GAIN__M 0xFFFF
+#define SCU_RAM_ORX_IF_GAIN__PRE 0x0
+
+#define SCU_RAM_ORX_IF_GAIN_MIN__A 0x831F0B
+#define SCU_RAM_ORX_IF_GAIN_MIN__W 16
+#define SCU_RAM_ORX_IF_GAIN_MIN__M 0xFFFF
+#define SCU_RAM_ORX_IF_GAIN_MIN__PRE 0x0
+
+#define SCU_RAM_ORX_IF_GAIN_MAX__A 0x831F0C
+#define SCU_RAM_ORX_IF_GAIN_MAX__W 16
+#define SCU_RAM_ORX_IF_GAIN_MAX__M 0xFFFF
+#define SCU_RAM_ORX_IF_GAIN_MAX__PRE 0x0
+
+#define SCU_RAM_ORX_AGN_HEADR__A 0x831F0D
+#define SCU_RAM_ORX_AGN_HEADR__W 16
+#define SCU_RAM_ORX_AGN_HEADR__M 0xFFFF
+#define SCU_RAM_ORX_AGN_HEADR__PRE 0x0
+
+#define SCU_RAM_ORX_AGN_HEADR_STP__A 0x831F0E
+#define SCU_RAM_ORX_AGN_HEADR_STP__W 8
+#define SCU_RAM_ORX_AGN_HEADR_STP__M 0xFF
+#define SCU_RAM_ORX_AGN_HEADR_STP__PRE 0x0
+
+#define SCU_RAM_ORX_AGN_KI__A 0x831F0F
+#define SCU_RAM_ORX_AGN_KI__W 8
+#define SCU_RAM_ORX_AGN_KI__M 0xFF
+#define SCU_RAM_ORX_AGN_KI__PRE 0x0
+
+#define SCU_RAM_ORX_AGN_LOCK_TH__A 0x831F10
+#define SCU_RAM_ORX_AGN_LOCK_TH__W 16
+#define SCU_RAM_ORX_AGN_LOCK_TH__M 0xFFFF
+#define SCU_RAM_ORX_AGN_LOCK_TH__PRE 0x0
+
+#define SCU_RAM_ORX_AGN_LOCK_WD__A 0x831F11
+#define SCU_RAM_ORX_AGN_LOCK_WD__W 16
+#define SCU_RAM_ORX_AGN_LOCK_WD__M 0xFFFF
+#define SCU_RAM_ORX_AGN_LOCK_WD__PRE 0x0
+
+#define SCU_RAM_ORX_AGN_ONLOCK_TTH__A 0x831F12
+#define SCU_RAM_ORX_AGN_ONLOCK_TTH__W 16
+#define SCU_RAM_ORX_AGN_ONLOCK_TTH__M 0xFFFF
+#define SCU_RAM_ORX_AGN_ONLOCK_TTH__PRE 0x0
+
+#define SCU_RAM_ORX_AGN_UNLOCK_TTH__A 0x831F13
+#define SCU_RAM_ORX_AGN_UNLOCK_TTH__W 16
+#define SCU_RAM_ORX_AGN_UNLOCK_TTH__M 0xFFFF
+#define SCU_RAM_ORX_AGN_UNLOCK_TTH__PRE 0x0
+
+#define SCU_RAM_ORX_AGN_LOCK_TOTH__A 0x831F14
+#define SCU_RAM_ORX_AGN_LOCK_TOTH__W 16
+#define SCU_RAM_ORX_AGN_LOCK_TOTH__M 0xFFFF
+#define SCU_RAM_ORX_AGN_LOCK_TOTH__PRE 0x0
+
+#define SCU_RAM_ORX_AGN_LOCK_MASK__A 0x831F15
+#define SCU_RAM_ORX_AGN_LOCK_MASK__W 8
+#define SCU_RAM_ORX_AGN_LOCK_MASK__M 0xFF
+#define SCU_RAM_ORX_AGN_LOCK_MASK__PRE 0x0
+
+#define SCU_RAM_ORX_DGN__A 0x831F16
+#define SCU_RAM_ORX_DGN__W 16
+#define SCU_RAM_ORX_DGN__M 0xFFFF
+#define SCU_RAM_ORX_DGN__PRE 0x0
+
+#define SCU_RAM_ORX_DGN_MIN__A 0x831F17
+#define SCU_RAM_ORX_DGN_MIN__W 16
+#define SCU_RAM_ORX_DGN_MIN__M 0xFFFF
+#define SCU_RAM_ORX_DGN_MIN__PRE 0x0
+
+#define SCU_RAM_ORX_DGN_MAX__A 0x831F18
+#define SCU_RAM_ORX_DGN_MAX__W 16
+#define SCU_RAM_ORX_DGN_MAX__M 0xFFFF
+#define SCU_RAM_ORX_DGN_MAX__PRE 0x0
+
+#define SCU_RAM_ORX_DGN_AMP__A 0x831F19
+#define SCU_RAM_ORX_DGN_AMP__W 16
+#define SCU_RAM_ORX_DGN_AMP__M 0xFFFF
+#define SCU_RAM_ORX_DGN_AMP__PRE 0x0
+
+#define SCU_RAM_ORX_DGN_AMPTARGET__A 0x831F1A
+#define SCU_RAM_ORX_DGN_AMPTARGET__W 16
+#define SCU_RAM_ORX_DGN_AMPTARGET__M 0xFFFF
+#define SCU_RAM_ORX_DGN_AMPTARGET__PRE 0x0
+
+#define SCU_RAM_ORX_DGN_KI__A 0x831F1B
+#define SCU_RAM_ORX_DGN_KI__W 8
+#define SCU_RAM_ORX_DGN_KI__M 0xFF
+#define SCU_RAM_ORX_DGN_KI__PRE 0x0
+
+#define SCU_RAM_ORX_DGN_LOCK_TH__A 0x831F1C
+#define SCU_RAM_ORX_DGN_LOCK_TH__W 16
+#define SCU_RAM_ORX_DGN_LOCK_TH__M 0xFFFF
+#define SCU_RAM_ORX_DGN_LOCK_TH__PRE 0x0
+
+#define SCU_RAM_ORX_DGN_LOCK_WD__A 0x831F1D
+#define SCU_RAM_ORX_DGN_LOCK_WD__W 16
+#define SCU_RAM_ORX_DGN_LOCK_WD__M 0xFFFF
+#define SCU_RAM_ORX_DGN_LOCK_WD__PRE 0x0
+
+#define SCU_RAM_ORX_DGN_ONLOCK_TTH__A 0x831F1E
+#define SCU_RAM_ORX_DGN_ONLOCK_TTH__W 16
+#define SCU_RAM_ORX_DGN_ONLOCK_TTH__M 0xFFFF
+#define SCU_RAM_ORX_DGN_ONLOCK_TTH__PRE 0x0
+
+#define SCU_RAM_ORX_DGN_UNLOCK_TTH__A 0x831F1F
+#define SCU_RAM_ORX_DGN_UNLOCK_TTH__W 16
+#define SCU_RAM_ORX_DGN_UNLOCK_TTH__M 0xFFFF
+#define SCU_RAM_ORX_DGN_UNLOCK_TTH__PRE 0x0
+
+#define SCU_RAM_ORX_DGN_LOCK_TOTH__A 0x831F20
+#define SCU_RAM_ORX_DGN_LOCK_TOTH__W 16
+#define SCU_RAM_ORX_DGN_LOCK_TOTH__M 0xFFFF
+#define SCU_RAM_ORX_DGN_LOCK_TOTH__PRE 0x0
+
+#define SCU_RAM_ORX_DGN_LOCK_MASK__A 0x831F21
+#define SCU_RAM_ORX_DGN_LOCK_MASK__W 8
+#define SCU_RAM_ORX_DGN_LOCK_MASK__M 0xFF
+#define SCU_RAM_ORX_DGN_LOCK_MASK__PRE 0x0
+
+#define SCU_RAM_ORX_FREQ_GAIN_CORR__A 0x831F22
+#define SCU_RAM_ORX_FREQ_GAIN_CORR__W 8
+#define SCU_RAM_ORX_FREQ_GAIN_CORR__M 0xFF
+#define SCU_RAM_ORX_FREQ_GAIN_CORR__PRE 0x0
+#define SCU_RAM_ORX_FREQ_GAIN_CORR_1544KBPS 0x60
+#define SCU_RAM_ORX_FREQ_GAIN_CORR_2048KBPS 0x80
+#define SCU_RAM_ORX_FREQ_GAIN_CORR_3088KBPS 0xC0
+
+#define SCU_RAM_ORX_FRQ_OFFSET__A 0x831F23
+#define SCU_RAM_ORX_FRQ_OFFSET__W 16
+#define SCU_RAM_ORX_FRQ_OFFSET__M 0xFFFF
+#define SCU_RAM_ORX_FRQ_OFFSET__PRE 0x0
+
+#define SCU_RAM_ORX_FRQ_OFFSET_MAX__A 0x831F24
+#define SCU_RAM_ORX_FRQ_OFFSET_MAX__W 15
+#define SCU_RAM_ORX_FRQ_OFFSET_MAX__M 0x7FFF
+#define SCU_RAM_ORX_FRQ_OFFSET_MAX__PRE 0x0
+
+#define SCU_RAM_ORX_FRQ_KI__A 0x831F25
+#define SCU_RAM_ORX_FRQ_KI__W 8
+#define SCU_RAM_ORX_FRQ_KI__M 0xFF
+#define SCU_RAM_ORX_FRQ_KI__PRE 0x0
+
+#define SCU_RAM_ORX_FRQ_DIFF__A 0x831F26
+#define SCU_RAM_ORX_FRQ_DIFF__W 16
+#define SCU_RAM_ORX_FRQ_DIFF__M 0xFFFF
+#define SCU_RAM_ORX_FRQ_DIFF__PRE 0x0
+
+#define SCU_RAM_ORX_FRQ_LOCK_TH__A 0x831F27
+#define SCU_RAM_ORX_FRQ_LOCK_TH__W 16
+#define SCU_RAM_ORX_FRQ_LOCK_TH__M 0xFFFF
+#define SCU_RAM_ORX_FRQ_LOCK_TH__PRE 0x0
+
+#define SCU_RAM_ORX_FRQ_LOCK_WD__A 0x831F28
+#define SCU_RAM_ORX_FRQ_LOCK_WD__W 16
+#define SCU_RAM_ORX_FRQ_LOCK_WD__M 0xFFFF
+#define SCU_RAM_ORX_FRQ_LOCK_WD__PRE 0x0
+
+#define SCU_RAM_ORX_FRQ_ONLOCK_TTH__A 0x831F29
+#define SCU_RAM_ORX_FRQ_ONLOCK_TTH__W 16
+#define SCU_RAM_ORX_FRQ_ONLOCK_TTH__M 0xFFFF
+#define SCU_RAM_ORX_FRQ_ONLOCK_TTH__PRE 0x0
+
+#define SCU_RAM_ORX_FRQ_UNLOCK_TTH__A 0x831F2A
+#define SCU_RAM_ORX_FRQ_UNLOCK_TTH__W 16
+#define SCU_RAM_ORX_FRQ_UNLOCK_TTH__M 0xFFFF
+#define SCU_RAM_ORX_FRQ_UNLOCK_TTH__PRE 0x0
+
+#define SCU_RAM_ORX_FRQ_LOCK_TOTH__A 0x831F2B
+#define SCU_RAM_ORX_FRQ_LOCK_TOTH__W 16
+#define SCU_RAM_ORX_FRQ_LOCK_TOTH__M 0xFFFF
+#define SCU_RAM_ORX_FRQ_LOCK_TOTH__PRE 0x0
+
+#define SCU_RAM_ORX_FRQ_LOCK_MASK__A 0x831F2C
+#define SCU_RAM_ORX_FRQ_LOCK_MASK__W 8
+#define SCU_RAM_ORX_FRQ_LOCK_MASK__M 0xFF
+#define SCU_RAM_ORX_FRQ_LOCK_MASK__PRE 0x0
+
+#define SCU_RAM_ORX_PHA_DIFF__A 0x831F2D
+#define SCU_RAM_ORX_PHA_DIFF__W 16
+#define SCU_RAM_ORX_PHA_DIFF__M 0xFFFF
+#define SCU_RAM_ORX_PHA_DIFF__PRE 0x0
+
+#define SCU_RAM_ORX_PHA_LOCK_TH__A 0x831F2E
+#define SCU_RAM_ORX_PHA_LOCK_TH__W 16
+#define SCU_RAM_ORX_PHA_LOCK_TH__M 0xFFFF
+#define SCU_RAM_ORX_PHA_LOCK_TH__PRE 0x0
+
+#define SCU_RAM_ORX_PHA_LOCK_WD__A 0x831F2F
+#define SCU_RAM_ORX_PHA_LOCK_WD__W 16
+#define SCU_RAM_ORX_PHA_LOCK_WD__M 0xFFFF
+#define SCU_RAM_ORX_PHA_LOCK_WD__PRE 0x0
+
+#define SCU_RAM_ORX_PHA_ONLOCK_TTH__A 0x831F30
+#define SCU_RAM_ORX_PHA_ONLOCK_TTH__W 16
+#define SCU_RAM_ORX_PHA_ONLOCK_TTH__M 0xFFFF
+#define SCU_RAM_ORX_PHA_ONLOCK_TTH__PRE 0x0
+
+#define SCU_RAM_ORX_PHA_UNLOCK_TTH__A 0x831F31
+#define SCU_RAM_ORX_PHA_UNLOCK_TTH__W 16
+#define SCU_RAM_ORX_PHA_UNLOCK_TTH__M 0xFFFF
+#define SCU_RAM_ORX_PHA_UNLOCK_TTH__PRE 0x0
+
+#define SCU_RAM_ORX_PHA_LOCK_TOTH__A 0x831F32
+#define SCU_RAM_ORX_PHA_LOCK_TOTH__W 16
+#define SCU_RAM_ORX_PHA_LOCK_TOTH__M 0xFFFF
+#define SCU_RAM_ORX_PHA_LOCK_TOTH__PRE 0x0
+
+#define SCU_RAM_ORX_PHA_LOCK_MASK__A 0x831F33
+#define SCU_RAM_ORX_PHA_LOCK_MASK__W 8
+#define SCU_RAM_ORX_PHA_LOCK_MASK__M 0xFF
+#define SCU_RAM_ORX_PHA_LOCK_MASK__PRE 0x0
+
+#define SCU_RAM_ORX_TIM_OFFSET__A 0x831F34
+#define SCU_RAM_ORX_TIM_OFFSET__W 16
+#define SCU_RAM_ORX_TIM_OFFSET__M 0xFFFF
+#define SCU_RAM_ORX_TIM_OFFSET__PRE 0x0
+
+#define SCU_RAM_ORX_TIM_DIFF__A 0x831F35
+#define SCU_RAM_ORX_TIM_DIFF__W 16
+#define SCU_RAM_ORX_TIM_DIFF__M 0xFFFF
+#define SCU_RAM_ORX_TIM_DIFF__PRE 0x0
+
+#define SCU_RAM_ORX_TIM_LOCK_TH__A 0x831F36
+#define SCU_RAM_ORX_TIM_LOCK_TH__W 16
+#define SCU_RAM_ORX_TIM_LOCK_TH__M 0xFFFF
+#define SCU_RAM_ORX_TIM_LOCK_TH__PRE 0x0
+
+#define SCU_RAM_ORX_TIM_LOCK_WD__A 0x831F37
+#define SCU_RAM_ORX_TIM_LOCK_WD__W 16
+#define SCU_RAM_ORX_TIM_LOCK_WD__M 0xFFFF
+#define SCU_RAM_ORX_TIM_LOCK_WD__PRE 0x0
+
+#define SCU_RAM_ORX_TIM_ONLOCK_TTH__A 0x831F38
+#define SCU_RAM_ORX_TIM_ONLOCK_TTH__W 16
+#define SCU_RAM_ORX_TIM_ONLOCK_TTH__M 0xFFFF
+#define SCU_RAM_ORX_TIM_ONLOCK_TTH__PRE 0x0
+
+#define SCU_RAM_ORX_TIM_UNLOCK_TTH__A 0x831F39
+#define SCU_RAM_ORX_TIM_UNLOCK_TTH__W 16
+#define SCU_RAM_ORX_TIM_UNLOCK_TTH__M 0xFFFF
+#define SCU_RAM_ORX_TIM_UNLOCK_TTH__PRE 0x0
+
+#define SCU_RAM_ORX_TIM_LOCK_TOTH__A 0x831F3A
+#define SCU_RAM_ORX_TIM_LOCK_TOTH__W 16
+#define SCU_RAM_ORX_TIM_LOCK_TOTH__M 0xFFFF
+#define SCU_RAM_ORX_TIM_LOCK_TOTH__PRE 0x0
+
+#define SCU_RAM_ORX_TIM_LOCK_MASK__A 0x831F3B
+#define SCU_RAM_ORX_TIM_LOCK_MASK__W 8
+#define SCU_RAM_ORX_TIM_LOCK_MASK__M 0xFF
+#define SCU_RAM_ORX_TIM_LOCK_MASK__PRE 0x0
+
+#define SCU_RAM_ORX_EQU_DIFF__A 0x831F3C
+#define SCU_RAM_ORX_EQU_DIFF__W 16
+#define SCU_RAM_ORX_EQU_DIFF__M 0xFFFF
+#define SCU_RAM_ORX_EQU_DIFF__PRE 0x0
+
+#define SCU_RAM_ORX_EQU_LOCK_TH__A 0x831F3D
+#define SCU_RAM_ORX_EQU_LOCK_TH__W 16
+#define SCU_RAM_ORX_EQU_LOCK_TH__M 0xFFFF
+#define SCU_RAM_ORX_EQU_LOCK_TH__PRE 0x0
+
+#define SCU_RAM_ORX_EQU_LOCK_WD__A 0x831F3E
+#define SCU_RAM_ORX_EQU_LOCK_WD__W 16
+#define SCU_RAM_ORX_EQU_LOCK_WD__M 0xFFFF
+#define SCU_RAM_ORX_EQU_LOCK_WD__PRE 0x0
+
+#define SCU_RAM_ORX_EQU_ONLOCK_TTH__A 0x831F3F
+#define SCU_RAM_ORX_EQU_ONLOCK_TTH__W 16
+#define SCU_RAM_ORX_EQU_ONLOCK_TTH__M 0xFFFF
+#define SCU_RAM_ORX_EQU_ONLOCK_TTH__PRE 0x0
+
+#define SCU_RAM_ORX_EQU_UNLOCK_TTH__A 0x831F40
+#define SCU_RAM_ORX_EQU_UNLOCK_TTH__W 16
+#define SCU_RAM_ORX_EQU_UNLOCK_TTH__M 0xFFFF
+#define SCU_RAM_ORX_EQU_UNLOCK_TTH__PRE 0x0
+
+#define SCU_RAM_ORX_EQU_LOCK_TOTH__A 0x831F41
+#define SCU_RAM_ORX_EQU_LOCK_TOTH__W 16
+#define SCU_RAM_ORX_EQU_LOCK_TOTH__M 0xFFFF
+#define SCU_RAM_ORX_EQU_LOCK_TOTH__PRE 0x0
+
+#define SCU_RAM_ORX_EQU_LOCK_MASK__A 0x831F42
+#define SCU_RAM_ORX_EQU_LOCK_MASK__W 8
+#define SCU_RAM_ORX_EQU_LOCK_MASK__M 0xFF
+#define SCU_RAM_ORX_EQU_LOCK_MASK__PRE 0x0
+
+#define SCU_RAM_ORX_FLT_FRQ__A 0x831F43
+#define SCU_RAM_ORX_FLT_FRQ__W 16
+#define SCU_RAM_ORX_FLT_FRQ__M 0xFFFF
+#define SCU_RAM_ORX_FLT_FRQ__PRE 0x0
+#define SCU_RAM_ORX_RST_CPH__A 0x831F44
+#define SCU_RAM_ORX_RST_CPH__W 4
+#define SCU_RAM_ORX_RST_CPH__M 0xF
+#define SCU_RAM_ORX_RST_CPH__PRE 0x0
+
+#define SCU_RAM_ORX_RST_CPH_RST_CPH__B 0
+#define SCU_RAM_ORX_RST_CPH_RST_CPH__W 4
+#define SCU_RAM_ORX_RST_CPH_RST_CPH__M 0xF
+#define SCU_RAM_ORX_RST_CPH_RST_CPH__PRE 0x0
+
+#define SCU_RAM_ORX_RST_CTI__A 0x831F45
+#define SCU_RAM_ORX_RST_CTI__W 4
+#define SCU_RAM_ORX_RST_CTI__M 0xF
+#define SCU_RAM_ORX_RST_CTI__PRE 0x0
+
+#define SCU_RAM_ORX_RST_CTI_RST_CTI__B 0
+#define SCU_RAM_ORX_RST_CTI_RST_CTI__W 4
+#define SCU_RAM_ORX_RST_CTI_RST_CTI__M 0xF
+#define SCU_RAM_ORX_RST_CTI_RST_CTI__PRE 0x0
+
+#define SCU_RAM_ORX_RST_KRN__A 0x831F46
+#define SCU_RAM_ORX_RST_KRN__W 4
+#define SCU_RAM_ORX_RST_KRN__M 0xF
+#define SCU_RAM_ORX_RST_KRN__PRE 0x0
+
+#define SCU_RAM_ORX_RST_KRN_RST_KRN__B 0
+#define SCU_RAM_ORX_RST_KRN_RST_KRN__W 4
+#define SCU_RAM_ORX_RST_KRN_RST_KRN__M 0xF
+#define SCU_RAM_ORX_RST_KRN_RST_KRN__PRE 0x0
+
+#define SCU_RAM_ORX_RST_KRP__A 0x831F47
+#define SCU_RAM_ORX_RST_KRP__W 4
+#define SCU_RAM_ORX_RST_KRP__M 0xF
+#define SCU_RAM_ORX_RST_KRP__PRE 0x0
+
+#define SCU_RAM_ORX_RST_KRP_RST_KRP__B 0
+#define SCU_RAM_ORX_RST_KRP_RST_KRP__W 4
+#define SCU_RAM_ORX_RST_KRP_RST_KRP__M 0xF
+#define SCU_RAM_ORX_RST_KRP_RST_KRP__PRE 0x0
+
+#define SCU_RAM_ATV_STANDARD__A 0x831F48
+#define SCU_RAM_ATV_STANDARD__W 12
+#define SCU_RAM_ATV_STANDARD__M 0xFFF
+#define SCU_RAM_ATV_STANDARD__PRE 0x0
+
+#define SCU_RAM_ATV_STANDARD_STANDARD__B 0
+#define SCU_RAM_ATV_STANDARD_STANDARD__W 12
+#define SCU_RAM_ATV_STANDARD_STANDARD__M 0xFFF
+#define SCU_RAM_ATV_STANDARD_STANDARD__PRE 0x0
+#define SCU_RAM_ATV_STANDARD_STANDARD_MN 0x2
+#define SCU_RAM_ATV_STANDARD_STANDARD_B 0x103
+#define SCU_RAM_ATV_STANDARD_STANDARD_G 0x3
+#define SCU_RAM_ATV_STANDARD_STANDARD_DK 0x4
+#define SCU_RAM_ATV_STANDARD_STANDARD_L 0x9
+#define SCU_RAM_ATV_STANDARD_STANDARD_LP 0x109
+#define SCU_RAM_ATV_STANDARD_STANDARD_I 0xA
+#define SCU_RAM_ATV_STANDARD_STANDARD_FM 0x40
+
+#define SCU_RAM_ATV_DETECT__A 0x831F49
+#define SCU_RAM_ATV_DETECT__W 1
+#define SCU_RAM_ATV_DETECT__M 0x1
+#define SCU_RAM_ATV_DETECT__PRE 0x0
+
+#define SCU_RAM_ATV_DETECT_DETECT__B 0
+#define SCU_RAM_ATV_DETECT_DETECT__W 1
+#define SCU_RAM_ATV_DETECT_DETECT__M 0x1
+#define SCU_RAM_ATV_DETECT_DETECT__PRE 0x0
+#define SCU_RAM_ATV_DETECT_DETECT_false 0x0
+#define SCU_RAM_ATV_DETECT_DETECT_true 0x1
+
+#define SCU_RAM_ATV_DETECT_TH__A 0x831F4A
+#define SCU_RAM_ATV_DETECT_TH__W 8
+#define SCU_RAM_ATV_DETECT_TH__M 0xFF
+#define SCU_RAM_ATV_DETECT_TH__PRE 0x0
+
+#define SCU_RAM_ATV_DETECT_TH_DETECT_TH__B 0
+#define SCU_RAM_ATV_DETECT_TH_DETECT_TH__W 8
+#define SCU_RAM_ATV_DETECT_TH_DETECT_TH__M 0xFF
+#define SCU_RAM_ATV_DETECT_TH_DETECT_TH__PRE 0x0
+
+#define SCU_RAM_ATV_LOCK__A 0x831F4B
+#define SCU_RAM_ATV_LOCK__W 2
+#define SCU_RAM_ATV_LOCK__M 0x3
+#define SCU_RAM_ATV_LOCK__PRE 0x0
+
+#define SCU_RAM_ATV_LOCK_CR_LOCK_BIT__B 0
+#define SCU_RAM_ATV_LOCK_CR_LOCK_BIT__W 1
+#define SCU_RAM_ATV_LOCK_CR_LOCK_BIT__M 0x1
+#define SCU_RAM_ATV_LOCK_CR_LOCK_BIT__PRE 0x0
+#define SCU_RAM_ATV_LOCK_CR_LOCK_BIT_NO_LOCK 0x0
+#define SCU_RAM_ATV_LOCK_CR_LOCK_BIT_LOCK 0x1
+
+#define SCU_RAM_ATV_LOCK_SYNC_FLAG__B 1
+#define SCU_RAM_ATV_LOCK_SYNC_FLAG__W 1
+#define SCU_RAM_ATV_LOCK_SYNC_FLAG__M 0x2
+#define SCU_RAM_ATV_LOCK_SYNC_FLAG__PRE 0x0
+#define SCU_RAM_ATV_LOCK_SYNC_FLAG_NO_SYNC 0x0
+#define SCU_RAM_ATV_LOCK_SYNC_FLAG_SYNC 0x2
+
+#define SCU_RAM_ATV_CR_LOCK__A 0x831F4C
+#define SCU_RAM_ATV_CR_LOCK__W 11
+#define SCU_RAM_ATV_CR_LOCK__M 0x7FF
+#define SCU_RAM_ATV_CR_LOCK__PRE 0x0
+
+#define SCU_RAM_ATV_CR_LOCK_CR_LOCK__B 0
+#define SCU_RAM_ATV_CR_LOCK_CR_LOCK__W 11
+#define SCU_RAM_ATV_CR_LOCK_CR_LOCK__M 0x7FF
+#define SCU_RAM_ATV_CR_LOCK_CR_LOCK__PRE 0x0
+
+#define SCU_RAM_ATV_AGC_MODE__A 0x831F4D
+#define SCU_RAM_ATV_AGC_MODE__W 8
+#define SCU_RAM_ATV_AGC_MODE__M 0xFF
+#define SCU_RAM_ATV_AGC_MODE__PRE 0x0
+
+#define SCU_RAM_ATV_AGC_MODE_VAGC_VEL__B 2
+#define SCU_RAM_ATV_AGC_MODE_VAGC_VEL__W 1
+#define SCU_RAM_ATV_AGC_MODE_VAGC_VEL__M 0x4
+#define SCU_RAM_ATV_AGC_MODE_VAGC_VEL__PRE 0x0
+#define SCU_RAM_ATV_AGC_MODE_VAGC_VEL_AGC_FAST 0x0
+#define SCU_RAM_ATV_AGC_MODE_VAGC_VEL_AGC_SLOW 0x4
+
+#define SCU_RAM_ATV_AGC_MODE_BP_EN__B 3
+#define SCU_RAM_ATV_AGC_MODE_BP_EN__W 1
+#define SCU_RAM_ATV_AGC_MODE_BP_EN__M 0x8
+#define SCU_RAM_ATV_AGC_MODE_BP_EN__PRE 0x0
+#define SCU_RAM_ATV_AGC_MODE_BP_EN_BPC_DISABLE 0x0
+#define SCU_RAM_ATV_AGC_MODE_BP_EN_BPC_ENABLE 0x8
+
+#define SCU_RAM_ATV_AGC_MODE_SIF_STD__B 4
+#define SCU_RAM_ATV_AGC_MODE_SIF_STD__W 2
+#define SCU_RAM_ATV_AGC_MODE_SIF_STD__M 0x30
+#define SCU_RAM_ATV_AGC_MODE_SIF_STD__PRE 0x0
+#define SCU_RAM_ATV_AGC_MODE_SIF_STD_SIF_AGC_OFF 0x0
+#define SCU_RAM_ATV_AGC_MODE_SIF_STD_SIF_AGC_FM 0x10
+#define SCU_RAM_ATV_AGC_MODE_SIF_STD_SIF_AGC_AM 0x20
+
+#define SCU_RAM_ATV_AGC_MODE_FAST_VAGC_EN__B 6
+#define SCU_RAM_ATV_AGC_MODE_FAST_VAGC_EN__W 1
+#define SCU_RAM_ATV_AGC_MODE_FAST_VAGC_EN__M 0x40
+#define SCU_RAM_ATV_AGC_MODE_FAST_VAGC_EN__PRE 0x0
+#define SCU_RAM_ATV_AGC_MODE_FAST_VAGC_EN_FAGC_DISABLE 0x0
+#define SCU_RAM_ATV_AGC_MODE_FAST_VAGC_EN_FAGC_ENABLE 0x40
+
+#define SCU_RAM_ATV_AGC_MODE_MOD_WA_BP__B 7
+#define SCU_RAM_ATV_AGC_MODE_MOD_WA_BP__W 1
+#define SCU_RAM_ATV_AGC_MODE_MOD_WA_BP__M 0x80
+#define SCU_RAM_ATV_AGC_MODE_MOD_WA_BP__PRE 0x0
+#define SCU_RAM_ATV_AGC_MODE_MOD_WA_BP_MWA_ENABLE 0x0
+#define SCU_RAM_ATV_AGC_MODE_MOD_WA_BP_MWA_DISABLE 0x80
+
+#define SCU_RAM_ATV_RSV_01__A 0x831F4E
+#define SCU_RAM_ATV_RSV_01__W 16
+#define SCU_RAM_ATV_RSV_01__M 0xFFFF
+#define SCU_RAM_ATV_RSV_01__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_02__A 0x831F4F
+#define SCU_RAM_ATV_RSV_02__W 16
+#define SCU_RAM_ATV_RSV_02__M 0xFFFF
+#define SCU_RAM_ATV_RSV_02__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_03__A 0x831F50
+#define SCU_RAM_ATV_RSV_03__W 16
+#define SCU_RAM_ATV_RSV_03__M 0xFFFF
+#define SCU_RAM_ATV_RSV_03__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_04__A 0x831F51
+#define SCU_RAM_ATV_RSV_04__W 16
+#define SCU_RAM_ATV_RSV_04__M 0xFFFF
+#define SCU_RAM_ATV_RSV_04__PRE 0x0
+#define SCU_RAM_ATV_FAGC_TH_RED__A 0x831F52
+#define SCU_RAM_ATV_FAGC_TH_RED__W 8
+#define SCU_RAM_ATV_FAGC_TH_RED__M 0xFF
+#define SCU_RAM_ATV_FAGC_TH_RED__PRE 0x0
+
+#define SCU_RAM_ATV_FAGC_TH_RED_FAGC_TH_RED__B 0
+#define SCU_RAM_ATV_FAGC_TH_RED_FAGC_TH_RED__W 8
+#define SCU_RAM_ATV_FAGC_TH_RED_FAGC_TH_RED__M 0xFF
+#define SCU_RAM_ATV_FAGC_TH_RED_FAGC_TH_RED__PRE 0x0
+
+#define SCU_RAM_ATV_AMS_MAX_REF__A 0x831F53
+#define SCU_RAM_ATV_AMS_MAX_REF__W 11
+#define SCU_RAM_ATV_AMS_MAX_REF__M 0x7FF
+#define SCU_RAM_ATV_AMS_MAX_REF__PRE 0x0
+
+#define SCU_RAM_ATV_AMS_MAX_REF_AMS_MAX_REF__B 0
+#define SCU_RAM_ATV_AMS_MAX_REF_AMS_MAX_REF__W 11
+#define SCU_RAM_ATV_AMS_MAX_REF_AMS_MAX_REF__M 0x7FF
+#define SCU_RAM_ATV_AMS_MAX_REF_AMS_MAX_REF__PRE 0x0
+#define SCU_RAM_ATV_AMS_MAX_REF_AMS_MAX_REF_BG_MN 0x2BC
+#define SCU_RAM_ATV_AMS_MAX_REF_AMS_MAX_REF_DK 0x2D0
+#define SCU_RAM_ATV_AMS_MAX_REF_AMS_MAX_REF_I 0x314
+#define SCU_RAM_ATV_AMS_MAX_REF_AMS_MAX_REF_LLP 0x28A
+
+#define SCU_RAM_ATV_ACT_AMX__A 0x831F54
+#define SCU_RAM_ATV_ACT_AMX__W 11
+#define SCU_RAM_ATV_ACT_AMX__M 0x7FF
+#define SCU_RAM_ATV_ACT_AMX__PRE 0x0
+
+#define SCU_RAM_ATV_ACT_AMX_ACT_AMX__B 0
+#define SCU_RAM_ATV_ACT_AMX_ACT_AMX__W 11
+#define SCU_RAM_ATV_ACT_AMX_ACT_AMX__M 0x7FF
+#define SCU_RAM_ATV_ACT_AMX_ACT_AMX__PRE 0x0
+
+#define SCU_RAM_ATV_ACT_AMI__A 0x831F55
+#define SCU_RAM_ATV_ACT_AMI__W 11
+#define SCU_RAM_ATV_ACT_AMI__M 0x7FF
+#define SCU_RAM_ATV_ACT_AMI__PRE 0x0
+
+#define SCU_RAM_ATV_ACT_AMI_ACT_AMI__B 0
+#define SCU_RAM_ATV_ACT_AMI_ACT_AMI__W 11
+#define SCU_RAM_ATV_ACT_AMI_ACT_AMI__M 0x7FF
+#define SCU_RAM_ATV_ACT_AMI_ACT_AMI__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_05__A 0x831F56
+#define SCU_RAM_ATV_RSV_05__W 16
+#define SCU_RAM_ATV_RSV_05__M 0xFFFF
+#define SCU_RAM_ATV_RSV_05__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_06__A 0x831F57
+#define SCU_RAM_ATV_RSV_06__W 16
+#define SCU_RAM_ATV_RSV_06__M 0xFFFF
+#define SCU_RAM_ATV_RSV_06__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_07__A 0x831F58
+#define SCU_RAM_ATV_RSV_07__W 16
+#define SCU_RAM_ATV_RSV_07__M 0xFFFF
+#define SCU_RAM_ATV_RSV_07__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_08__A 0x831F59
+#define SCU_RAM_ATV_RSV_08__W 16
+#define SCU_RAM_ATV_RSV_08__M 0xFFFF
+#define SCU_RAM_ATV_RSV_08__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_09__A 0x831F5A
+#define SCU_RAM_ATV_RSV_09__W 16
+#define SCU_RAM_ATV_RSV_09__M 0xFFFF
+#define SCU_RAM_ATV_RSV_09__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_10__A 0x831F5B
+#define SCU_RAM_ATV_RSV_10__W 16
+#define SCU_RAM_ATV_RSV_10__M 0xFFFF
+#define SCU_RAM_ATV_RSV_10__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_11__A 0x831F5C
+#define SCU_RAM_ATV_RSV_11__W 16
+#define SCU_RAM_ATV_RSV_11__M 0xFFFF
+#define SCU_RAM_ATV_RSV_11__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_12__A 0x831F5D
+#define SCU_RAM_ATV_RSV_12__W 16
+#define SCU_RAM_ATV_RSV_12__M 0xFFFF
+#define SCU_RAM_ATV_RSV_12__PRE 0x0
+#define SCU_RAM_ATV_VID_GAIN_HI__A 0x831F5E
+#define SCU_RAM_ATV_VID_GAIN_HI__W 16
+#define SCU_RAM_ATV_VID_GAIN_HI__M 0xFFFF
+#define SCU_RAM_ATV_VID_GAIN_HI__PRE 0x0
+
+#define SCU_RAM_ATV_VID_GAIN_HI_VID_GAIN_HI__B 0
+#define SCU_RAM_ATV_VID_GAIN_HI_VID_GAIN_HI__W 16
+#define SCU_RAM_ATV_VID_GAIN_HI_VID_GAIN_HI__M 0xFFFF
+#define SCU_RAM_ATV_VID_GAIN_HI_VID_GAIN_HI__PRE 0x0
+
+#define SCU_RAM_ATV_VID_GAIN_LO__A 0x831F5F
+#define SCU_RAM_ATV_VID_GAIN_LO__W 8
+#define SCU_RAM_ATV_VID_GAIN_LO__M 0xFF
+#define SCU_RAM_ATV_VID_GAIN_LO__PRE 0x0
+
+#define SCU_RAM_ATV_VID_GAIN_LO_VID_GAIN_LO__B 0
+#define SCU_RAM_ATV_VID_GAIN_LO_VID_GAIN_LO__W 8
+#define SCU_RAM_ATV_VID_GAIN_LO_VID_GAIN_LO__M 0xFF
+#define SCU_RAM_ATV_VID_GAIN_LO_VID_GAIN_LO__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_13__A 0x831F60
+#define SCU_RAM_ATV_RSV_13__W 16
+#define SCU_RAM_ATV_RSV_13__M 0xFFFF
+#define SCU_RAM_ATV_RSV_13__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_14__A 0x831F61
+#define SCU_RAM_ATV_RSV_14__W 16
+#define SCU_RAM_ATV_RSV_14__M 0xFFFF
+#define SCU_RAM_ATV_RSV_14__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_15__A 0x831F62
+#define SCU_RAM_ATV_RSV_15__W 16
+#define SCU_RAM_ATV_RSV_15__M 0xFFFF
+#define SCU_RAM_ATV_RSV_15__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_16__A 0x831F63
+#define SCU_RAM_ATV_RSV_16__W 16
+#define SCU_RAM_ATV_RSV_16__M 0xFFFF
+#define SCU_RAM_ATV_RSV_16__PRE 0x0
+#define SCU_RAM_ATV_AAGC_CNT__A 0x831F64
+#define SCU_RAM_ATV_AAGC_CNT__W 8
+#define SCU_RAM_ATV_AAGC_CNT__M 0xFF
+#define SCU_RAM_ATV_AAGC_CNT__PRE 0x0
+
+#define SCU_RAM_ATV_AAGC_CNT_AAGC_CNT__B 0
+#define SCU_RAM_ATV_AAGC_CNT_AAGC_CNT__W 8
+#define SCU_RAM_ATV_AAGC_CNT_AAGC_CNT__M 0xFF
+#define SCU_RAM_ATV_AAGC_CNT_AAGC_CNT__PRE 0x0
+
+#define SCU_RAM_ATV_SIF_GAIN__A 0x831F65
+#define SCU_RAM_ATV_SIF_GAIN__W 11
+#define SCU_RAM_ATV_SIF_GAIN__M 0x7FF
+#define SCU_RAM_ATV_SIF_GAIN__PRE 0x0
+
+#define SCU_RAM_ATV_SIF_GAIN_SIF_GAIN__B 0
+#define SCU_RAM_ATV_SIF_GAIN_SIF_GAIN__W 11
+#define SCU_RAM_ATV_SIF_GAIN_SIF_GAIN__M 0x7FF
+#define SCU_RAM_ATV_SIF_GAIN_SIF_GAIN__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_17__A 0x831F66
+#define SCU_RAM_ATV_RSV_17__W 16
+#define SCU_RAM_ATV_RSV_17__M 0xFFFF
+#define SCU_RAM_ATV_RSV_17__PRE 0x0
+
+#define SCU_RAM_ATV_RSV_18__A 0x831F67
+#define SCU_RAM_ATV_RSV_18__W 16
+#define SCU_RAM_ATV_RSV_18__M 0xFFFF
+#define SCU_RAM_ATV_RSV_18__PRE 0x0
+
+#define SCU_RAM_ATV_RATE_OFS__A 0x831F68
+#define SCU_RAM_ATV_RATE_OFS__W 12
+#define SCU_RAM_ATV_RATE_OFS__M 0xFFF
+#define SCU_RAM_ATV_RATE_OFS__PRE 0x0
+
+#define SCU_RAM_ATV_LO_INCR__A 0x831F69
+#define SCU_RAM_ATV_LO_INCR__W 12
+#define SCU_RAM_ATV_LO_INCR__M 0xFFF
+#define SCU_RAM_ATV_LO_INCR__PRE 0x0
+
+#define SCU_RAM_ATV_IIR_CRIT__A 0x831F6A
+#define SCU_RAM_ATV_IIR_CRIT__W 12
+#define SCU_RAM_ATV_IIR_CRIT__M 0xFFF
+#define SCU_RAM_ATV_IIR_CRIT__PRE 0x0
+
+#define SCU_RAM_ATV_DEF_RATE_OFS__A 0x831F6B
+#define SCU_RAM_ATV_DEF_RATE_OFS__W 12
+#define SCU_RAM_ATV_DEF_RATE_OFS__M 0xFFF
+#define SCU_RAM_ATV_DEF_RATE_OFS__PRE 0x0
+
+#define SCU_RAM_ATV_DEF_LO_INCR__A 0x831F6C
+#define SCU_RAM_ATV_DEF_LO_INCR__W 12
+#define SCU_RAM_ATV_DEF_LO_INCR__M 0xFFF
+#define SCU_RAM_ATV_DEF_LO_INCR__PRE 0x0
+
+#define SCU_RAM_ATV_ENABLE_IIR_WA__A 0x831F6D
+#define SCU_RAM_ATV_ENABLE_IIR_WA__W 1
+#define SCU_RAM_ATV_ENABLE_IIR_WA__M 0x1
+#define SCU_RAM_ATV_ENABLE_IIR_WA__PRE 0x0
+
+#define SCU_RAM_ATV_MOD_CONTROL__A 0x831F6E
+#define SCU_RAM_ATV_MOD_CONTROL__W 12
+#define SCU_RAM_ATV_MOD_CONTROL__M 0xFFF
+#define SCU_RAM_ATV_MOD_CONTROL__PRE 0x0
+
+#define SCU_RAM_ATV_PAGC_KI_MAX__A 0x831F6F
+#define SCU_RAM_ATV_PAGC_KI_MAX__W 12
+#define SCU_RAM_ATV_PAGC_KI_MAX__M 0xFFF
+#define SCU_RAM_ATV_PAGC_KI_MAX__PRE 0x0
+
+#define SCU_RAM_ATV_BPC_KI_MAX__A 0x831F70
+#define SCU_RAM_ATV_BPC_KI_MAX__W 12
+#define SCU_RAM_ATV_BPC_KI_MAX__M 0xFFF
+#define SCU_RAM_ATV_BPC_KI_MAX__PRE 0x0
+
+#define SCU_RAM_ATV_NAGC_KI_MAX__A 0x831F71
+#define SCU_RAM_ATV_NAGC_KI_MAX__W 12
+#define SCU_RAM_ATV_NAGC_KI_MAX__M 0xFFF
+#define SCU_RAM_ATV_NAGC_KI_MAX__PRE 0x0
+#define SCU_RAM_ATV_NAGC_KI_MIN__A 0x831F72
+#define SCU_RAM_ATV_NAGC_KI_MIN__W 12
+#define SCU_RAM_ATV_NAGC_KI_MIN__M 0xFFF
+#define SCU_RAM_ATV_NAGC_KI_MIN__PRE 0x0
+
+#define SCU_RAM_ATV_NAGC_KI_MIN_NAGC_KI_MIN__B 0
+#define SCU_RAM_ATV_NAGC_KI_MIN_NAGC_KI_MIN__W 12
+#define SCU_RAM_ATV_NAGC_KI_MIN_NAGC_KI_MIN__M 0xFFF
+#define SCU_RAM_ATV_NAGC_KI_MIN_NAGC_KI_MIN__PRE 0x0
+
+#define SCU_RAM_ATV_KI_CHANGE_TH__A 0x831F73
+#define SCU_RAM_ATV_KI_CHANGE_TH__W 8
+#define SCU_RAM_ATV_KI_CHANGE_TH__M 0xFF
+#define SCU_RAM_ATV_KI_CHANGE_TH__PRE 0x0
+
+#define SCU_RAM_ATV_KI_CHANGE_TH_KI_CHANGE_TH__B 0
+#define SCU_RAM_ATV_KI_CHANGE_TH_KI_CHANGE_TH__W 8
+#define SCU_RAM_ATV_KI_CHANGE_TH_KI_CHANGE_TH__M 0xFF
+#define SCU_RAM_ATV_KI_CHANGE_TH_KI_CHANGE_TH__PRE 0x0
+#define SCU_RAM_ATV_KI_CHANGE_TH_KI_CHANGE_TH_NEG_MOD 0x14
+#define SCU_RAM_ATV_KI_CHANGE_TH_KI_CHANGE_TH_POS_MOD 0x28
+
+#define SCU_RAM_QAM_PARAM_ANNEX__A 0x831F74
+#define SCU_RAM_QAM_PARAM_ANNEX__W 2
+#define SCU_RAM_QAM_PARAM_ANNEX__M 0x3
+#define SCU_RAM_QAM_PARAM_ANNEX__PRE 0x0
+
+#define SCU_RAM_QAM_PARAM_ANNEX_BIT__B 0
+#define SCU_RAM_QAM_PARAM_ANNEX_BIT__W 2
+#define SCU_RAM_QAM_PARAM_ANNEX_BIT__M 0x3
+#define SCU_RAM_QAM_PARAM_ANNEX_BIT__PRE 0x0
+#define SCU_RAM_QAM_PARAM_ANNEX_BIT_ANNEX_A 0x0
+#define SCU_RAM_QAM_PARAM_ANNEX_BIT_ANNEX_B 0x1
+#define SCU_RAM_QAM_PARAM_ANNEX_BIT_ANNEX_C 0x2
+#define SCU_RAM_QAM_PARAM_ANNEX_BIT_ANNEX_D 0x3
+
+#define SCU_RAM_QAM_PARAM_CONSTELLATION__A 0x831F75
+#define SCU_RAM_QAM_PARAM_CONSTELLATION__W 3
+#define SCU_RAM_QAM_PARAM_CONSTELLATION__M 0x7
+#define SCU_RAM_QAM_PARAM_CONSTELLATION__PRE 0x0
+
+#define SCU_RAM_QAM_PARAM_CONSTELLATION_BIT__B 0
+#define SCU_RAM_QAM_PARAM_CONSTELLATION_BIT__W 3
+#define SCU_RAM_QAM_PARAM_CONSTELLATION_BIT__M 0x7
+#define SCU_RAM_QAM_PARAM_CONSTELLATION_BIT__PRE 0x0
+#define SCU_RAM_QAM_PARAM_CONSTELLATION_BIT_UNKNOWN 0x0
+#define SCU_RAM_QAM_PARAM_CONSTELLATION_BIT_QAM_16 0x3
+#define SCU_RAM_QAM_PARAM_CONSTELLATION_BIT_QAM_32 0x4
+#define SCU_RAM_QAM_PARAM_CONSTELLATION_BIT_QAM_64 0x5
+#define SCU_RAM_QAM_PARAM_CONSTELLATION_BIT_QAM_128 0x6
+#define SCU_RAM_QAM_PARAM_CONSTELLATION_BIT_QAM_256 0x7
+
+#define SCU_RAM_QAM_PARAM_INTERLEAVE__A 0x831F76
+#define SCU_RAM_QAM_PARAM_INTERLEAVE__W 8
+#define SCU_RAM_QAM_PARAM_INTERLEAVE__M 0xFF
+#define SCU_RAM_QAM_PARAM_INTERLEAVE__PRE 0x0
+
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT__B 0
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT__W 8
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT__M 0xFF
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT__PRE 0x0
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I128_J1 0x0
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I128_J1_V2 0x1
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I128_J2 0x2
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I64_J2 0x3
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I128_J3 0x4
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I32_J4 0x5
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I128_J4 0x6
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I16_J8 0x7
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I128_J5 0x8
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I8_J16 0x9
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I128_J6 0xA
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I128_J7 0xC
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I128_J8 0xE
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I12_J17 0x10
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_I5_J4 0x11
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_UNKNOWN 0xFE
+#define SCU_RAM_QAM_PARAM_INTERLEAVE_BIT_AUTO 0xFF
+
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_HI__A 0x831F77
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_HI__W 16
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_HI__M 0xFFFF
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_HI__PRE 0x0
+
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_HI_BIT__B 0
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_HI_BIT__W 16
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_HI_BIT__M 0xFFFF
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_HI_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_LO__A 0x831F78
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_LO__W 16
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_LO__M 0xFFFF
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_LO__PRE 0x0
+
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_LO_BIT__B 0
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_LO_BIT__W 16
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_LO_BIT__M 0xFFFF
+#define SCU_RAM_QAM_PARAM_SYM_RCRATE_LO_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_CENTERTAP__A 0x831F79
+#define SCU_RAM_QAM_EQ_CENTERTAP__W 16
+#define SCU_RAM_QAM_EQ_CENTERTAP__M 0xFFFF
+#define SCU_RAM_QAM_EQ_CENTERTAP__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_CENTERTAP_BIT__B 0
+#define SCU_RAM_QAM_EQ_CENTERTAP_BIT__W 8
+#define SCU_RAM_QAM_EQ_CENTERTAP_BIT__M 0xFF
+#define SCU_RAM_QAM_EQ_CENTERTAP_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_0__A 0x831F7A
+#define SCU_RAM_QAM_WR_RSV_0__W 16
+#define SCU_RAM_QAM_WR_RSV_0__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_0__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_0_BIT__B 0
+#define SCU_RAM_QAM_WR_RSV_0_BIT__W 16
+#define SCU_RAM_QAM_WR_RSV_0_BIT__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_0_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_HI__A 0x831F7B
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_HI__W 16
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_HI__M 0xFFFF
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_HI__PRE 0x0
+
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_HI_BIT__B 0
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_HI_BIT__W 16
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_HI_BIT__M 0xFFFF
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_HI_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_LO__A 0x831F7C
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_LO__W 16
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_LO__M 0xFFFF
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_LO__PRE 0x0
+
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_LO_BIT__B 0
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_LO_BIT__W 16
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_LO_BIT__M 0xFFFF
+#define SCU_RAM_QAM_PARAM_ALT_RCRATE_LO_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_5__A 0x831F7D
+#define SCU_RAM_QAM_WR_RSV_5__W 16
+#define SCU_RAM_QAM_WR_RSV_5__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_5__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_5_BIT__B 0
+#define SCU_RAM_QAM_WR_RSV_5_BIT__W 16
+#define SCU_RAM_QAM_WR_RSV_5_BIT__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_5_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_6__A 0x831F7E
+#define SCU_RAM_QAM_WR_RSV_6__W 16
+#define SCU_RAM_QAM_WR_RSV_6__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_6__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_6_BIT__B 0
+#define SCU_RAM_QAM_WR_RSV_6_BIT__W 16
+#define SCU_RAM_QAM_WR_RSV_6_BIT__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_6_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_7__A 0x831F7F
+#define SCU_RAM_QAM_WR_RSV_7__W 16
+#define SCU_RAM_QAM_WR_RSV_7__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_7__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_7_BIT__B 0
+#define SCU_RAM_QAM_WR_RSV_7_BIT__W 16
+#define SCU_RAM_QAM_WR_RSV_7_BIT__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_7_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_8__A 0x831F80
+#define SCU_RAM_QAM_WR_RSV_8__W 16
+#define SCU_RAM_QAM_WR_RSV_8__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_8__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_8_BIT__B 0
+#define SCU_RAM_QAM_WR_RSV_8_BIT__W 16
+#define SCU_RAM_QAM_WR_RSV_8_BIT__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_8_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_9__A 0x831F81
+#define SCU_RAM_QAM_WR_RSV_9__W 16
+#define SCU_RAM_QAM_WR_RSV_9__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_9__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_9_BIT__B 0
+#define SCU_RAM_QAM_WR_RSV_9_BIT__W 16
+#define SCU_RAM_QAM_WR_RSV_9_BIT__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_9_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_10__A 0x831F82
+#define SCU_RAM_QAM_WR_RSV_10__W 16
+#define SCU_RAM_QAM_WR_RSV_10__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_10__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_10_BIT__B 0
+#define SCU_RAM_QAM_WR_RSV_10_BIT__W 16
+#define SCU_RAM_QAM_WR_RSV_10_BIT__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_10_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_FMHUM_TO__A 0x831F83
+#define SCU_RAM_QAM_FSM_FMHUM_TO__W 16
+#define SCU_RAM_QAM_FSM_FMHUM_TO__M 0xFFFF
+#define SCU_RAM_QAM_FSM_FMHUM_TO__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_FMHUM_TO_BIT__B 0
+#define SCU_RAM_QAM_FSM_FMHUM_TO_BIT__W 16
+#define SCU_RAM_QAM_FSM_FMHUM_TO_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_FMHUM_TO_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_FMHUM_TO_BIT_NO_FMHUM_TO 0x0
+
+#define SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A 0x831F84
+#define SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__W 16
+#define SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_MEDIAN_AV_MULT_BIT__B 0
+#define SCU_RAM_QAM_FSM_MEDIAN_AV_MULT_BIT__W 16
+#define SCU_RAM_QAM_FSM_MEDIAN_AV_MULT_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_MEDIAN_AV_MULT_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A 0x831F85
+#define SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__W 16
+#define SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT_BIT__B 0
+#define SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT_BIT__W 16
+#define SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A 0x831F86
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET1__W 16
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET1__M 0xFFFF
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET1__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET1_BIT__B 0
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET1_BIT__W 16
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET1_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET1_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A 0x831F87
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET2__W 16
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET2__M 0xFFFF
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET2__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET2_BIT__B 0
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET2_BIT__W 16
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET2_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET2_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A 0x831F88
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET3__W 16
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET3__M 0xFFFF
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET3__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET3_BIT__B 0
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET3_BIT__W 16
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET3_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET3_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A 0x831F89
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET4__W 16
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET4__M 0xFFFF
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET4__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET4_BIT__B 0
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET4_BIT__W 16
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET4_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET4_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A 0x831F8A
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET5__W 16
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET5__M 0xFFFF
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET5__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET5_BIT__B 0
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET5_BIT__W 16
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET5_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET5_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_STATE_TGT__A 0x831F8B
+#define SCU_RAM_QAM_FSM_STATE_TGT__W 4
+#define SCU_RAM_QAM_FSM_STATE_TGT__M 0xF
+#define SCU_RAM_QAM_FSM_STATE_TGT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_STATE_TGT_BIT__B 0
+#define SCU_RAM_QAM_FSM_STATE_TGT_BIT__W 4
+#define SCU_RAM_QAM_FSM_STATE_TGT_BIT__M 0xF
+#define SCU_RAM_QAM_FSM_STATE_TGT_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_STATE_TGT_BIT_HUNTING_AMP 0x0
+#define SCU_RAM_QAM_FSM_STATE_TGT_BIT_HUNTING_RATE 0x1
+#define SCU_RAM_QAM_FSM_STATE_TGT_BIT_HUNTING_FREQ 0x2
+#define SCU_RAM_QAM_FSM_STATE_TGT_BIT_HUNTING_UPRIGHT 0x3
+#define SCU_RAM_QAM_FSM_STATE_TGT_BIT_HUNTING_PHASE 0x4
+#define SCU_RAM_QAM_FSM_STATE_TGT_BIT_TRACKING_PHNOISE 0x5
+#define SCU_RAM_QAM_FSM_STATE_TGT_BIT_TRACKING 0x6
+#define SCU_RAM_QAM_FSM_STATE_TGT_BIT_TRACKING_BURST 0x7
+
+#define SCU_RAM_QAM_FSM_LOCK_OVERRIDE__A 0x831F8C
+#define SCU_RAM_QAM_FSM_LOCK_OVERRIDE__W 9
+#define SCU_RAM_QAM_FSM_LOCK_OVERRIDE__M 0x1FF
+#define SCU_RAM_QAM_FSM_LOCK_OVERRIDE__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LOCK_OVERRIDE_LCK_AMP__B 0
+#define SCU_RAM_QAM_FSM_LOCK_OVERRIDE_LCK_AMP__W 1
+#define SCU_RAM_QAM_FSM_LOCK_OVERRIDE_LCK_AMP__M 0x1
+#define SCU_RAM_QAM_FSM_LOCK_OVERRIDE_LCK_AMP__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_ATH__A 0x831F8D
+#define SCU_RAM_QAM_FSM_ATH__W 16
+#define SCU_RAM_QAM_FSM_ATH__M 0xFFFF
+#define SCU_RAM_QAM_FSM_ATH__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_ATH_BIT__B 0
+#define SCU_RAM_QAM_FSM_ATH_BIT__W 16
+#define SCU_RAM_QAM_FSM_ATH_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_ATH_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_RTH__A 0x831F8E
+#define SCU_RAM_QAM_FSM_RTH__W 16
+#define SCU_RAM_QAM_FSM_RTH__M 0xFFFF
+#define SCU_RAM_QAM_FSM_RTH__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_RTH_BIT__B 0
+#define SCU_RAM_QAM_FSM_RTH_BIT__W 16
+#define SCU_RAM_QAM_FSM_RTH_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_RTH_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_RTH_BIT_QAM_16 0x8C
+#define SCU_RAM_QAM_FSM_RTH_BIT_QAM_32 0x50
+#define SCU_RAM_QAM_FSM_RTH_BIT_QAM_64 0x4E
+#define SCU_RAM_QAM_FSM_RTH_BIT_QAM_128 0x32
+#define SCU_RAM_QAM_FSM_RTH_BIT_QAM_256 0x2D
+
+#define SCU_RAM_QAM_FSM_FTH__A 0x831F8F
+#define SCU_RAM_QAM_FSM_FTH__W 16
+#define SCU_RAM_QAM_FSM_FTH__M 0xFFFF
+#define SCU_RAM_QAM_FSM_FTH__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_FTH_BIT__B 0
+#define SCU_RAM_QAM_FSM_FTH_BIT__W 16
+#define SCU_RAM_QAM_FSM_FTH_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_FTH_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_FTH_BIT_QAM_16 0x32
+#define SCU_RAM_QAM_FSM_FTH_BIT_QAM_32 0x1E
+#define SCU_RAM_QAM_FSM_FTH_BIT_QAM_64 0x1E
+#define SCU_RAM_QAM_FSM_FTH_BIT_QAM_128 0x14
+#define SCU_RAM_QAM_FSM_FTH_BIT_QAM_256 0x14
+
+#define SCU_RAM_QAM_FSM_PTH__A 0x831F90
+#define SCU_RAM_QAM_FSM_PTH__W 16
+#define SCU_RAM_QAM_FSM_PTH__M 0xFFFF
+#define SCU_RAM_QAM_FSM_PTH__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_PTH_BIT__B 0
+#define SCU_RAM_QAM_FSM_PTH_BIT__W 16
+#define SCU_RAM_QAM_FSM_PTH_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_PTH_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_PTH_BIT_QAM_16 0xC8
+#define SCU_RAM_QAM_FSM_PTH_BIT_QAM_32 0x96
+#define SCU_RAM_QAM_FSM_PTH_BIT_QAM_64 0x8C
+#define SCU_RAM_QAM_FSM_PTH_BIT_QAM_128 0x64
+#define SCU_RAM_QAM_FSM_PTH_BIT_QAM_256 0x64
+
+#define SCU_RAM_QAM_FSM_MTH__A 0x831F91
+#define SCU_RAM_QAM_FSM_MTH__W 16
+#define SCU_RAM_QAM_FSM_MTH__M 0xFFFF
+#define SCU_RAM_QAM_FSM_MTH__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_MTH_BIT__B 0
+#define SCU_RAM_QAM_FSM_MTH_BIT__W 16
+#define SCU_RAM_QAM_FSM_MTH_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_MTH_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_MTH_BIT_QAM_16 0x5A
+#define SCU_RAM_QAM_FSM_MTH_BIT_QAM_32 0x50
+#define SCU_RAM_QAM_FSM_MTH_BIT_QAM_64 0x46
+#define SCU_RAM_QAM_FSM_MTH_BIT_QAM_128 0x3C
+#define SCU_RAM_QAM_FSM_MTH_BIT_QAM_256 0x50
+
+#define SCU_RAM_QAM_FSM_CTH__A 0x831F92
+#define SCU_RAM_QAM_FSM_CTH__W 16
+#define SCU_RAM_QAM_FSM_CTH__M 0xFFFF
+#define SCU_RAM_QAM_FSM_CTH__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_CTH_BIT__B 0
+#define SCU_RAM_QAM_FSM_CTH_BIT__W 16
+#define SCU_RAM_QAM_FSM_CTH_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_CTH_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_CTH_BIT_QAM_16 0xA0
+#define SCU_RAM_QAM_FSM_CTH_BIT_QAM_32 0x8C
+#define SCU_RAM_QAM_FSM_CTH_BIT_QAM_64 0x8C
+#define SCU_RAM_QAM_FSM_CTH_BIT_QAM_128 0x8C
+#define SCU_RAM_QAM_FSM_CTH_BIT_QAM_256 0x8C
+
+#define SCU_RAM_QAM_FSM_QTH__A 0x831F93
+#define SCU_RAM_QAM_FSM_QTH__W 16
+#define SCU_RAM_QAM_FSM_QTH__M 0xFFFF
+#define SCU_RAM_QAM_FSM_QTH__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_QTH_BIT__B 0
+#define SCU_RAM_QAM_FSM_QTH_BIT__W 16
+#define SCU_RAM_QAM_FSM_QTH_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_QTH_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_QTH_BIT_QAM_16 0xE6
+#define SCU_RAM_QAM_FSM_QTH_BIT_QAM_32 0xAA
+#define SCU_RAM_QAM_FSM_QTH_BIT_QAM_64 0xC3
+#define SCU_RAM_QAM_FSM_QTH_BIT_QAM_128 0x8C
+#define SCU_RAM_QAM_FSM_QTH_BIT_QAM_256 0x96
+
+#define SCU_RAM_QAM_FSM_RATE_LIM__A 0x831F94
+#define SCU_RAM_QAM_FSM_RATE_LIM__W 16
+#define SCU_RAM_QAM_FSM_RATE_LIM__M 0xFFFF
+#define SCU_RAM_QAM_FSM_RATE_LIM__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_RATE_LIM_BIT__B 0
+#define SCU_RAM_QAM_FSM_RATE_LIM_BIT__W 16
+#define SCU_RAM_QAM_FSM_RATE_LIM_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_RATE_LIM_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_RATE_LIM_BIT_QAM_16 0x46
+#define SCU_RAM_QAM_FSM_RATE_LIM_BIT_QAM_32 0x46
+#define SCU_RAM_QAM_FSM_RATE_LIM_BIT_QAM_64 0x46
+#define SCU_RAM_QAM_FSM_RATE_LIM_BIT_QAM_128 0x46
+#define SCU_RAM_QAM_FSM_RATE_LIM_BIT_QAM_256 0x46
+
+#define SCU_RAM_QAM_FSM_FREQ_LIM__A 0x831F95
+#define SCU_RAM_QAM_FSM_FREQ_LIM__W 16
+#define SCU_RAM_QAM_FSM_FREQ_LIM__M 0xFFFF
+#define SCU_RAM_QAM_FSM_FREQ_LIM__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_FREQ_LIM_BIT__B 0
+#define SCU_RAM_QAM_FSM_FREQ_LIM_BIT__W 16
+#define SCU_RAM_QAM_FSM_FREQ_LIM_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_FREQ_LIM_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_FREQ_LIM_BIT_QAM_16 0x1E
+#define SCU_RAM_QAM_FSM_FREQ_LIM_BIT_QAM_32 0x14
+#define SCU_RAM_QAM_FSM_FREQ_LIM_BIT_QAM_64 0x28
+#define SCU_RAM_QAM_FSM_FREQ_LIM_BIT_QAM_128 0x8
+#define SCU_RAM_QAM_FSM_FREQ_LIM_BIT_QAM_256 0x28
+
+#define SCU_RAM_QAM_FSM_COUNT_LIM__A 0x831F96
+#define SCU_RAM_QAM_FSM_COUNT_LIM__W 16
+#define SCU_RAM_QAM_FSM_COUNT_LIM__M 0xFFFF
+#define SCU_RAM_QAM_FSM_COUNT_LIM__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_COUNT_LIM_BIT__B 0
+#define SCU_RAM_QAM_FSM_COUNT_LIM_BIT__W 16
+#define SCU_RAM_QAM_FSM_COUNT_LIM_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_COUNT_LIM_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_COUNT_LIM_BIT_QAM_16 0x4
+#define SCU_RAM_QAM_FSM_COUNT_LIM_BIT_QAM_32 0x6
+#define SCU_RAM_QAM_FSM_COUNT_LIM_BIT_QAM_64 0x6
+#define SCU_RAM_QAM_FSM_COUNT_LIM_BIT_QAM_128 0x7
+#define SCU_RAM_QAM_FSM_COUNT_LIM_BIT_QAM_256 0x6
+
+#define SCU_RAM_QAM_LC_CA_COARSE__A 0x831F97
+#define SCU_RAM_QAM_LC_CA_COARSE__W 16
+#define SCU_RAM_QAM_LC_CA_COARSE__M 0xFFFF
+#define SCU_RAM_QAM_LC_CA_COARSE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CA_COARSE_BIT__B 0
+#define SCU_RAM_QAM_LC_CA_COARSE_BIT__W 8
+#define SCU_RAM_QAM_LC_CA_COARSE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CA_COARSE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CA_MEDIUM__A 0x831F98
+#define SCU_RAM_QAM_LC_CA_MEDIUM__W 16
+#define SCU_RAM_QAM_LC_CA_MEDIUM__M 0xFFFF
+#define SCU_RAM_QAM_LC_CA_MEDIUM__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CA_MEDIUM_BIT__B 0
+#define SCU_RAM_QAM_LC_CA_MEDIUM_BIT__W 8
+#define SCU_RAM_QAM_LC_CA_MEDIUM_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CA_MEDIUM_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CA_FINE__A 0x831F99
+#define SCU_RAM_QAM_LC_CA_FINE__W 16
+#define SCU_RAM_QAM_LC_CA_FINE__M 0xFFFF
+#define SCU_RAM_QAM_LC_CA_FINE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CA_FINE_BIT__B 0
+#define SCU_RAM_QAM_LC_CA_FINE_BIT__W 8
+#define SCU_RAM_QAM_LC_CA_FINE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CA_FINE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CP_COARSE__A 0x831F9A
+#define SCU_RAM_QAM_LC_CP_COARSE__W 16
+#define SCU_RAM_QAM_LC_CP_COARSE__M 0xFFFF
+#define SCU_RAM_QAM_LC_CP_COARSE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CP_COARSE_BIT__B 0
+#define SCU_RAM_QAM_LC_CP_COARSE_BIT__W 8
+#define SCU_RAM_QAM_LC_CP_COARSE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CP_COARSE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CP_MEDIUM__A 0x831F9B
+#define SCU_RAM_QAM_LC_CP_MEDIUM__W 16
+#define SCU_RAM_QAM_LC_CP_MEDIUM__M 0xFFFF
+#define SCU_RAM_QAM_LC_CP_MEDIUM__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CP_MEDIUM_BIT__B 0
+#define SCU_RAM_QAM_LC_CP_MEDIUM_BIT__W 8
+#define SCU_RAM_QAM_LC_CP_MEDIUM_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CP_MEDIUM_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CP_FINE__A 0x831F9C
+#define SCU_RAM_QAM_LC_CP_FINE__W 16
+#define SCU_RAM_QAM_LC_CP_FINE__M 0xFFFF
+#define SCU_RAM_QAM_LC_CP_FINE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CP_FINE_BIT__B 0
+#define SCU_RAM_QAM_LC_CP_FINE_BIT__W 8
+#define SCU_RAM_QAM_LC_CP_FINE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CP_FINE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CI_COARSE__A 0x831F9D
+#define SCU_RAM_QAM_LC_CI_COARSE__W 16
+#define SCU_RAM_QAM_LC_CI_COARSE__M 0xFFFF
+#define SCU_RAM_QAM_LC_CI_COARSE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CI_COARSE_BIT__B 0
+#define SCU_RAM_QAM_LC_CI_COARSE_BIT__W 8
+#define SCU_RAM_QAM_LC_CI_COARSE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CI_COARSE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CI_MEDIUM__A 0x831F9E
+#define SCU_RAM_QAM_LC_CI_MEDIUM__W 16
+#define SCU_RAM_QAM_LC_CI_MEDIUM__M 0xFFFF
+#define SCU_RAM_QAM_LC_CI_MEDIUM__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CI_MEDIUM_BIT__B 0
+#define SCU_RAM_QAM_LC_CI_MEDIUM_BIT__W 8
+#define SCU_RAM_QAM_LC_CI_MEDIUM_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CI_MEDIUM_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CI_FINE__A 0x831F9F
+#define SCU_RAM_QAM_LC_CI_FINE__W 16
+#define SCU_RAM_QAM_LC_CI_FINE__M 0xFFFF
+#define SCU_RAM_QAM_LC_CI_FINE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CI_FINE_BIT__B 0
+#define SCU_RAM_QAM_LC_CI_FINE_BIT__W 8
+#define SCU_RAM_QAM_LC_CI_FINE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CI_FINE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_EP_COARSE__A 0x831FA0
+#define SCU_RAM_QAM_LC_EP_COARSE__W 16
+#define SCU_RAM_QAM_LC_EP_COARSE__M 0xFFFF
+#define SCU_RAM_QAM_LC_EP_COARSE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_EP_COARSE_BIT__B 0
+#define SCU_RAM_QAM_LC_EP_COARSE_BIT__W 8
+#define SCU_RAM_QAM_LC_EP_COARSE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_EP_COARSE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_EP_MEDIUM__A 0x831FA1
+#define SCU_RAM_QAM_LC_EP_MEDIUM__W 16
+#define SCU_RAM_QAM_LC_EP_MEDIUM__M 0xFFFF
+#define SCU_RAM_QAM_LC_EP_MEDIUM__PRE 0x0
+
+#define SCU_RAM_QAM_LC_EP_MEDIUM_BIT__B 0
+#define SCU_RAM_QAM_LC_EP_MEDIUM_BIT__W 8
+#define SCU_RAM_QAM_LC_EP_MEDIUM_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_EP_MEDIUM_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_EP_FINE__A 0x831FA2
+#define SCU_RAM_QAM_LC_EP_FINE__W 16
+#define SCU_RAM_QAM_LC_EP_FINE__M 0xFFFF
+#define SCU_RAM_QAM_LC_EP_FINE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_EP_FINE_BIT__B 0
+#define SCU_RAM_QAM_LC_EP_FINE_BIT__W 8
+#define SCU_RAM_QAM_LC_EP_FINE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_EP_FINE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_EI_COARSE__A 0x831FA3
+#define SCU_RAM_QAM_LC_EI_COARSE__W 16
+#define SCU_RAM_QAM_LC_EI_COARSE__M 0xFFFF
+#define SCU_RAM_QAM_LC_EI_COARSE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_EI_COARSE_BIT__B 0
+#define SCU_RAM_QAM_LC_EI_COARSE_BIT__W 8
+#define SCU_RAM_QAM_LC_EI_COARSE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_EI_COARSE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_EI_MEDIUM__A 0x831FA4
+#define SCU_RAM_QAM_LC_EI_MEDIUM__W 16
+#define SCU_RAM_QAM_LC_EI_MEDIUM__M 0xFFFF
+#define SCU_RAM_QAM_LC_EI_MEDIUM__PRE 0x0
+
+#define SCU_RAM_QAM_LC_EI_MEDIUM_BIT__B 0
+#define SCU_RAM_QAM_LC_EI_MEDIUM_BIT__W 8
+#define SCU_RAM_QAM_LC_EI_MEDIUM_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_EI_MEDIUM_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_EI_FINE__A 0x831FA5
+#define SCU_RAM_QAM_LC_EI_FINE__W 16
+#define SCU_RAM_QAM_LC_EI_FINE__M 0xFFFF
+#define SCU_RAM_QAM_LC_EI_FINE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_EI_FINE_BIT__B 0
+#define SCU_RAM_QAM_LC_EI_FINE_BIT__W 8
+#define SCU_RAM_QAM_LC_EI_FINE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_EI_FINE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CF_COARSE__A 0x831FA6
+#define SCU_RAM_QAM_LC_CF_COARSE__W 16
+#define SCU_RAM_QAM_LC_CF_COARSE__M 0xFFFF
+#define SCU_RAM_QAM_LC_CF_COARSE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CF_COARSE_BIT__B 0
+#define SCU_RAM_QAM_LC_CF_COARSE_BIT__W 8
+#define SCU_RAM_QAM_LC_CF_COARSE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CF_COARSE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CF_MEDIUM__A 0x831FA7
+#define SCU_RAM_QAM_LC_CF_MEDIUM__W 16
+#define SCU_RAM_QAM_LC_CF_MEDIUM__M 0xFFFF
+#define SCU_RAM_QAM_LC_CF_MEDIUM__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CF_MEDIUM_BIT__B 0
+#define SCU_RAM_QAM_LC_CF_MEDIUM_BIT__W 8
+#define SCU_RAM_QAM_LC_CF_MEDIUM_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CF_MEDIUM_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CF_FINE__A 0x831FA8
+#define SCU_RAM_QAM_LC_CF_FINE__W 16
+#define SCU_RAM_QAM_LC_CF_FINE__M 0xFFFF
+#define SCU_RAM_QAM_LC_CF_FINE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CF_FINE_BIT__B 0
+#define SCU_RAM_QAM_LC_CF_FINE_BIT__W 8
+#define SCU_RAM_QAM_LC_CF_FINE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CF_FINE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CF1_COARSE__A 0x831FA9
+#define SCU_RAM_QAM_LC_CF1_COARSE__W 16
+#define SCU_RAM_QAM_LC_CF1_COARSE__M 0xFFFF
+#define SCU_RAM_QAM_LC_CF1_COARSE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CF1_COARSE_BIT__B 0
+#define SCU_RAM_QAM_LC_CF1_COARSE_BIT__W 8
+#define SCU_RAM_QAM_LC_CF1_COARSE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CF1_COARSE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CF1_MEDIUM__A 0x831FAA
+#define SCU_RAM_QAM_LC_CF1_MEDIUM__W 16
+#define SCU_RAM_QAM_LC_CF1_MEDIUM__M 0xFFFF
+#define SCU_RAM_QAM_LC_CF1_MEDIUM__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CF1_MEDIUM_BIT__B 0
+#define SCU_RAM_QAM_LC_CF1_MEDIUM_BIT__W 8
+#define SCU_RAM_QAM_LC_CF1_MEDIUM_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CF1_MEDIUM_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CF1_FINE__A 0x831FAB
+#define SCU_RAM_QAM_LC_CF1_FINE__W 16
+#define SCU_RAM_QAM_LC_CF1_FINE__M 0xFFFF
+#define SCU_RAM_QAM_LC_CF1_FINE__PRE 0x0
+
+#define SCU_RAM_QAM_LC_CF1_FINE_BIT__B 0
+#define SCU_RAM_QAM_LC_CF1_FINE_BIT__W 8
+#define SCU_RAM_QAM_LC_CF1_FINE_BIT__M 0xFF
+#define SCU_RAM_QAM_LC_CF1_FINE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_SL_SIG_POWER__A 0x831FAC
+#define SCU_RAM_QAM_SL_SIG_POWER__W 16
+#define SCU_RAM_QAM_SL_SIG_POWER__M 0xFFFF
+#define SCU_RAM_QAM_SL_SIG_POWER__PRE 0x0
+
+#define SCU_RAM_QAM_SL_SIG_POWER_BIT__B 0
+#define SCU_RAM_QAM_SL_SIG_POWER_BIT__W 16
+#define SCU_RAM_QAM_SL_SIG_POWER_BIT__M 0xFFFF
+#define SCU_RAM_QAM_SL_SIG_POWER_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_CMA_RAD0__A 0x831FAD
+#define SCU_RAM_QAM_EQ_CMA_RAD0__W 14
+#define SCU_RAM_QAM_EQ_CMA_RAD0__M 0x3FFF
+#define SCU_RAM_QAM_EQ_CMA_RAD0__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_CMA_RAD0_BIT__B 0
+#define SCU_RAM_QAM_EQ_CMA_RAD0_BIT__W 14
+#define SCU_RAM_QAM_EQ_CMA_RAD0_BIT__M 0x3FFF
+#define SCU_RAM_QAM_EQ_CMA_RAD0_BIT__PRE 0x0
+#define SCU_RAM_QAM_EQ_CMA_RAD0_BIT_QAM_16 0x34CD
+#define SCU_RAM_QAM_EQ_CMA_RAD0_BIT_QAM_32 0x1A33
+#define SCU_RAM_QAM_EQ_CMA_RAD0_BIT_QAM_64 0x3418
+#define SCU_RAM_QAM_EQ_CMA_RAD0_BIT_QAM_128 0x1814
+#define SCU_RAM_QAM_EQ_CMA_RAD0_BIT_QAM_256 0x2CEE
+
+#define SCU_RAM_QAM_EQ_CMA_RAD1__A 0x831FAE
+#define SCU_RAM_QAM_EQ_CMA_RAD1__W 14
+#define SCU_RAM_QAM_EQ_CMA_RAD1__M 0x3FFF
+#define SCU_RAM_QAM_EQ_CMA_RAD1__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_CMA_RAD1_BIT__B 0
+#define SCU_RAM_QAM_EQ_CMA_RAD1_BIT__W 14
+#define SCU_RAM_QAM_EQ_CMA_RAD1_BIT__M 0x3FFF
+#define SCU_RAM_QAM_EQ_CMA_RAD1_BIT__PRE 0x0
+#define SCU_RAM_QAM_EQ_CMA_RAD1_BIT_QAM_16 0x34CD
+#define SCU_RAM_QAM_EQ_CMA_RAD1_BIT_QAM_32 0x1A33
+#define SCU_RAM_QAM_EQ_CMA_RAD1_BIT_QAM_64 0x314A
+#define SCU_RAM_QAM_EQ_CMA_RAD1_BIT_QAM_128 0x19C6
+#define SCU_RAM_QAM_EQ_CMA_RAD1_BIT_QAM_256 0x2F34
+
+#define SCU_RAM_QAM_EQ_CMA_RAD2__A 0x831FAF
+#define SCU_RAM_QAM_EQ_CMA_RAD2__W 14
+#define SCU_RAM_QAM_EQ_CMA_RAD2__M 0x3FFF
+#define SCU_RAM_QAM_EQ_CMA_RAD2__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_CMA_RAD2_BIT__B 0
+#define SCU_RAM_QAM_EQ_CMA_RAD2_BIT__W 14
+#define SCU_RAM_QAM_EQ_CMA_RAD2_BIT__M 0x3FFF
+#define SCU_RAM_QAM_EQ_CMA_RAD2_BIT__PRE 0x0
+#define SCU_RAM_QAM_EQ_CMA_RAD2_BIT_QAM_16 0x34CD
+#define SCU_RAM_QAM_EQ_CMA_RAD2_BIT_QAM_32 0x1A33
+#define SCU_RAM_QAM_EQ_CMA_RAD2_BIT_QAM_64 0x2ED4
+#define SCU_RAM_QAM_EQ_CMA_RAD2_BIT_QAM_128 0x18FA
+#define SCU_RAM_QAM_EQ_CMA_RAD2_BIT_QAM_256 0x30FF
+
+#define SCU_RAM_QAM_EQ_CMA_RAD3__A 0x831FB0
+#define SCU_RAM_QAM_EQ_CMA_RAD3__W 14
+#define SCU_RAM_QAM_EQ_CMA_RAD3__M 0x3FFF
+#define SCU_RAM_QAM_EQ_CMA_RAD3__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_CMA_RAD3_BIT__B 0
+#define SCU_RAM_QAM_EQ_CMA_RAD3_BIT__W 14
+#define SCU_RAM_QAM_EQ_CMA_RAD3_BIT__M 0x3FFF
+#define SCU_RAM_QAM_EQ_CMA_RAD3_BIT__PRE 0x0
+#define SCU_RAM_QAM_EQ_CMA_RAD3_BIT_QAM_16 0x34CD
+#define SCU_RAM_QAM_EQ_CMA_RAD3_BIT_QAM_32 0x1A33
+#define SCU_RAM_QAM_EQ_CMA_RAD3_BIT_QAM_64 0x35F1
+#define SCU_RAM_QAM_EQ_CMA_RAD3_BIT_QAM_128 0x1909
+#define SCU_RAM_QAM_EQ_CMA_RAD3_BIT_QAM_256 0x3283
+
+#define SCU_RAM_QAM_EQ_CMA_RAD4__A 0x831FB1
+#define SCU_RAM_QAM_EQ_CMA_RAD4__W 14
+#define SCU_RAM_QAM_EQ_CMA_RAD4__M 0x3FFF
+#define SCU_RAM_QAM_EQ_CMA_RAD4__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_CMA_RAD4_BIT__B 0
+#define SCU_RAM_QAM_EQ_CMA_RAD4_BIT__W 14
+#define SCU_RAM_QAM_EQ_CMA_RAD4_BIT__M 0x3FFF
+#define SCU_RAM_QAM_EQ_CMA_RAD4_BIT__PRE 0x0
+#define SCU_RAM_QAM_EQ_CMA_RAD4_BIT_QAM_16 0x34CD
+#define SCU_RAM_QAM_EQ_CMA_RAD4_BIT_QAM_32 0x1A33
+#define SCU_RAM_QAM_EQ_CMA_RAD4_BIT_QAM_64 0x35F1
+#define SCU_RAM_QAM_EQ_CMA_RAD4_BIT_QAM_128 0x1A00
+#define SCU_RAM_QAM_EQ_CMA_RAD4_BIT_QAM_256 0x353D
+
+#define SCU_RAM_QAM_EQ_CMA_RAD5__A 0x831FB2
+#define SCU_RAM_QAM_EQ_CMA_RAD5__W 14
+#define SCU_RAM_QAM_EQ_CMA_RAD5__M 0x3FFF
+#define SCU_RAM_QAM_EQ_CMA_RAD5__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_CMA_RAD5_BIT__B 0
+#define SCU_RAM_QAM_EQ_CMA_RAD5_BIT__W 14
+#define SCU_RAM_QAM_EQ_CMA_RAD5_BIT__M 0x3FFF
+#define SCU_RAM_QAM_EQ_CMA_RAD5_BIT__PRE 0x0
+#define SCU_RAM_QAM_EQ_CMA_RAD5_BIT_QAM_16 0x34CD
+#define SCU_RAM_QAM_EQ_CMA_RAD5_BIT_QAM_32 0x1A33
+#define SCU_RAM_QAM_EQ_CMA_RAD5_BIT_QAM_64 0x3CF9
+#define SCU_RAM_QAM_EQ_CMA_RAD5_BIT_QAM_128 0x1C46
+#define SCU_RAM_QAM_EQ_CMA_RAD5_BIT_QAM_256 0x3C19
+
+#define SCU_RAM_QAM_CTL_ENA__A 0x831FB3
+#define SCU_RAM_QAM_CTL_ENA__W 16
+#define SCU_RAM_QAM_CTL_ENA__M 0xFFFF
+#define SCU_RAM_QAM_CTL_ENA__PRE 0x0
+
+#define SCU_RAM_QAM_CTL_ENA_AMP__B 0
+#define SCU_RAM_QAM_CTL_ENA_AMP__W 1
+#define SCU_RAM_QAM_CTL_ENA_AMP__M 0x1
+#define SCU_RAM_QAM_CTL_ENA_AMP__PRE 0x0
+
+#define SCU_RAM_QAM_CTL_ENA_ACQ__B 1
+#define SCU_RAM_QAM_CTL_ENA_ACQ__W 1
+#define SCU_RAM_QAM_CTL_ENA_ACQ__M 0x2
+#define SCU_RAM_QAM_CTL_ENA_ACQ__PRE 0x0
+
+#define SCU_RAM_QAM_CTL_ENA_EQU__B 2
+#define SCU_RAM_QAM_CTL_ENA_EQU__W 1
+#define SCU_RAM_QAM_CTL_ENA_EQU__M 0x4
+#define SCU_RAM_QAM_CTL_ENA_EQU__PRE 0x0
+
+#define SCU_RAM_QAM_CTL_ENA_SLC__B 3
+#define SCU_RAM_QAM_CTL_ENA_SLC__W 1
+#define SCU_RAM_QAM_CTL_ENA_SLC__M 0x8
+#define SCU_RAM_QAM_CTL_ENA_SLC__PRE 0x0
+
+#define SCU_RAM_QAM_CTL_ENA_LC__B 4
+#define SCU_RAM_QAM_CTL_ENA_LC__W 1
+#define SCU_RAM_QAM_CTL_ENA_LC__M 0x10
+#define SCU_RAM_QAM_CTL_ENA_LC__PRE 0x0
+
+#define SCU_RAM_QAM_CTL_ENA_AGC__B 5
+#define SCU_RAM_QAM_CTL_ENA_AGC__W 1
+#define SCU_RAM_QAM_CTL_ENA_AGC__M 0x20
+#define SCU_RAM_QAM_CTL_ENA_AGC__PRE 0x0
+
+#define SCU_RAM_QAM_CTL_ENA_FEC__B 6
+#define SCU_RAM_QAM_CTL_ENA_FEC__W 1
+#define SCU_RAM_QAM_CTL_ENA_FEC__M 0x40
+#define SCU_RAM_QAM_CTL_ENA_FEC__PRE 0x0
+
+#define SCU_RAM_QAM_CTL_ENA_AXIS__B 7
+#define SCU_RAM_QAM_CTL_ENA_AXIS__W 1
+#define SCU_RAM_QAM_CTL_ENA_AXIS__M 0x80
+#define SCU_RAM_QAM_CTL_ENA_AXIS__PRE 0x0
+
+#define SCU_RAM_QAM_CTL_ENA_FMHUM__B 8
+#define SCU_RAM_QAM_CTL_ENA_FMHUM__W 1
+#define SCU_RAM_QAM_CTL_ENA_FMHUM__M 0x100
+#define SCU_RAM_QAM_CTL_ENA_FMHUM__PRE 0x0
+
+#define SCU_RAM_QAM_CTL_ENA_EQTIME__B 9
+#define SCU_RAM_QAM_CTL_ENA_EQTIME__W 1
+#define SCU_RAM_QAM_CTL_ENA_EQTIME__M 0x200
+#define SCU_RAM_QAM_CTL_ENA_EQTIME__PRE 0x0
+
+#define SCU_RAM_QAM_CTL_ENA_EXTLCK__B 10
+#define SCU_RAM_QAM_CTL_ENA_EXTLCK__W 1
+#define SCU_RAM_QAM_CTL_ENA_EXTLCK__M 0x400
+#define SCU_RAM_QAM_CTL_ENA_EXTLCK__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_1__A 0x831FB4
+#define SCU_RAM_QAM_WR_RSV_1__W 16
+#define SCU_RAM_QAM_WR_RSV_1__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_1__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_1_BIT__B 0
+#define SCU_RAM_QAM_WR_RSV_1_BIT__W 16
+#define SCU_RAM_QAM_WR_RSV_1_BIT__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_1_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_2__A 0x831FB5
+#define SCU_RAM_QAM_WR_RSV_2__W 16
+#define SCU_RAM_QAM_WR_RSV_2__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_2__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_2_BIT__B 0
+#define SCU_RAM_QAM_WR_RSV_2_BIT__W 16
+#define SCU_RAM_QAM_WR_RSV_2_BIT__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_2_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_3__A 0x831FB6
+#define SCU_RAM_QAM_WR_RSV_3__W 16
+#define SCU_RAM_QAM_WR_RSV_3__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_3__PRE 0x0
+
+#define SCU_RAM_QAM_WR_RSV_3_BIT__B 0
+#define SCU_RAM_QAM_WR_RSV_3_BIT__W 16
+#define SCU_RAM_QAM_WR_RSV_3_BIT__M 0xFFFF
+#define SCU_RAM_QAM_WR_RSV_3_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION__A 0x831FB7
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION__W 3
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION__M 0x7
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION__PRE 0x0
+
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION_BIT__B 0
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION_BIT__W 3
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION_BIT__M 0x7
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION_BIT__PRE 0x0
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION_BIT_UNKNOWN 0x0
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION_BIT_QAM_16 0x3
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION_BIT_QAM_32 0x4
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION_BIT_QAM_64 0x5
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION_BIT_QAM_128 0x6
+#define SCU_RAM_QAM_ACTIVE_CONSTELLATION_BIT_QAM_256 0x7
+
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE__A 0x831FB8
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE__W 8
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE__M 0xFF
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE__PRE 0x0
+
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT__B 0
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT__W 8
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT__M 0xFF
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT__PRE 0x0
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I128_J1 0x0
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I128_J1_V2 0x1
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I128_J2 0x2
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I64_J2 0x3
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I128_J3 0x4
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I32_J4 0x5
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I128_J4 0x6
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I16_J8 0x7
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I128_J5 0x8
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I8_J16 0x9
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I128_J6 0xA
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I128_J7 0xC
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I128_J8 0xE
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I12_J17 0x10
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_I5_J4 0x11
+#define SCU_RAM_QAM_ACTIVE_INTERLEAVE_BIT_UNKNOWN 0xFE
+
+#define SCU_RAM_QAM_RD_RSV_4__A 0x831FB9
+#define SCU_RAM_QAM_RD_RSV_4__W 16
+#define SCU_RAM_QAM_RD_RSV_4__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_4__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_4_BIT__B 0
+#define SCU_RAM_QAM_RD_RSV_4_BIT__W 16
+#define SCU_RAM_QAM_RD_RSV_4_BIT__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_4_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_LOCKED__A 0x831FBA
+#define SCU_RAM_QAM_LOCKED__W 16
+#define SCU_RAM_QAM_LOCKED__M 0xFFFF
+#define SCU_RAM_QAM_LOCKED__PRE 0x0
+
+#define SCU_RAM_QAM_LOCKED_INTLEVEL__B 0
+#define SCU_RAM_QAM_LOCKED_INTLEVEL__W 8
+#define SCU_RAM_QAM_LOCKED_INTLEVEL__M 0xFF
+#define SCU_RAM_QAM_LOCKED_INTLEVEL__PRE 0x0
+#define SCU_RAM_QAM_LOCKED_INTLEVEL_NOT_LOCKED 0x0
+#define SCU_RAM_QAM_LOCKED_INTLEVEL_AMP_OK 0x1
+#define SCU_RAM_QAM_LOCKED_INTLEVEL_RATE_OK 0x2
+#define SCU_RAM_QAM_LOCKED_INTLEVEL_FREQ_OK 0x3
+#define SCU_RAM_QAM_LOCKED_INTLEVEL_UPRIGHT_OK 0x4
+#define SCU_RAM_QAM_LOCKED_INTLEVEL_PHNOISE_OK 0x5
+#define SCU_RAM_QAM_LOCKED_INTLEVEL_TRACK_OK 0x6
+#define SCU_RAM_QAM_LOCKED_INTLEVEL_IMPNOISE_OK 0x7
+
+#define SCU_RAM_QAM_LOCKED_LOCKED__B 8
+#define SCU_RAM_QAM_LOCKED_LOCKED__W 8
+#define SCU_RAM_QAM_LOCKED_LOCKED__M 0xFF00
+#define SCU_RAM_QAM_LOCKED_LOCKED__PRE 0x0
+#define SCU_RAM_QAM_LOCKED_LOCKED_NOT_LOCKED 0x0
+#define SCU_RAM_QAM_LOCKED_LOCKED_DEMOD_LOCKED 0x4000
+#define SCU_RAM_QAM_LOCKED_LOCKED_LOCKED 0x8000
+#define SCU_RAM_QAM_LOCKED_LOCKED_NEVER_LOCK 0xC000
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI__A 0x831FBB
+#define SCU_RAM_QAM_EVENTS_OCC_HI__W 16
+#define SCU_RAM_QAM_EVENTS_OCC_HI__M 0xFFFF
+#define SCU_RAM_QAM_EVENTS_OCC_HI__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_PREBER__B 0
+#define SCU_RAM_QAM_EVENTS_OCC_HI_PREBER__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_PREBER__M 0x1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_PREBER__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_PACKET_FAIL__B 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_PACKET_FAIL__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_PACKET_FAIL__M 0x2
+#define SCU_RAM_QAM_EVENTS_OCC_HI_PACKET_FAIL__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_PRBS__B 2
+#define SCU_RAM_QAM_EVENTS_OCC_HI_PRBS__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_PRBS__M 0x4
+#define SCU_RAM_QAM_EVENTS_OCC_HI_PRBS__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_LOCK_IN__B 3
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_LOCK_IN__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_LOCK_IN__M 0x8
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_LOCK_IN__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_LOCK_OUT__B 4
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_LOCK_OUT__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_LOCK_OUT__M 0x10
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_LOCK_OUT__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_POSTBER__B 5
+#define SCU_RAM_QAM_EVENTS_OCC_HI_POSTBER__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_POSTBER__M 0x20
+#define SCU_RAM_QAM_EVENTS_OCC_HI_POSTBER__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_FIFO_FULL__B 6
+#define SCU_RAM_QAM_EVENTS_OCC_HI_FIFO_FULL__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_FIFO_FULL__M 0x40
+#define SCU_RAM_QAM_EVENTS_OCC_HI_FIFO_FULL__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_FIFO_EMPTY__B 7
+#define SCU_RAM_QAM_EVENTS_OCC_HI_FIFO_EMPTY__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_FIFO_EMPTY__M 0x80
+#define SCU_RAM_QAM_EVENTS_OCC_HI_FIFO_EMPTY__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_GRAB__B 8
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_GRAB__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_GRAB__M 0x100
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_GRAB__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_CHANGE__B 9
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_CHANGE__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_CHANGE__M 0x200
+#define SCU_RAM_QAM_EVENTS_OCC_HI_OC_CHANGE__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_LCK_CHG__B 10
+#define SCU_RAM_QAM_EVENTS_OCC_HI_LCK_CHG__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_LCK_CHG__M 0x400
+#define SCU_RAM_QAM_EVENTS_OCC_HI_LCK_CHG__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_FSM_CHG__B 11
+#define SCU_RAM_QAM_EVENTS_OCC_HI_FSM_CHG__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_HI_FSM_CHG__M 0x800
+#define SCU_RAM_QAM_EVENTS_OCC_HI_FSM_CHG__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_HI_RSV__B 12
+#define SCU_RAM_QAM_EVENTS_OCC_HI_RSV__W 4
+#define SCU_RAM_QAM_EVENTS_OCC_HI_RSV__M 0xF000
+#define SCU_RAM_QAM_EVENTS_OCC_HI_RSV__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO__A 0x831FBC
+#define SCU_RAM_QAM_EVENTS_OCC_LO__W 16
+#define SCU_RAM_QAM_EVENTS_OCC_LO__M 0xFFFF
+#define SCU_RAM_QAM_EVENTS_OCC_LO__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_TIMER__B 0
+#define SCU_RAM_QAM_EVENTS_OCC_LO_TIMER__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_TIMER__M 0x1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_TIMER__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_CLIP__B 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_CLIP__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_CLIP__M 0x2
+#define SCU_RAM_QAM_EVENTS_OCC_LO_CLIP__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SENSE__B 2
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SENSE__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SENSE__M 0x4
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SENSE__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_POWER__B 3
+#define SCU_RAM_QAM_EVENTS_OCC_LO_POWER__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_POWER__M 0x8
+#define SCU_RAM_QAM_EVENTS_OCC_LO_POWER__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_MEDIAN__B 4
+#define SCU_RAM_QAM_EVENTS_OCC_LO_MEDIAN__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_MEDIAN__M 0x10
+#define SCU_RAM_QAM_EVENTS_OCC_LO_MEDIAN__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_MER__B 5
+#define SCU_RAM_QAM_EVENTS_OCC_LO_MER__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_MER__M 0x20
+#define SCU_RAM_QAM_EVENTS_OCC_LO_MER__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_LOOP__B 6
+#define SCU_RAM_QAM_EVENTS_OCC_LO_LOOP__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_LOOP__M 0x40
+#define SCU_RAM_QAM_EVENTS_OCC_LO_LOOP__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_FREQWRAP__B 7
+#define SCU_RAM_QAM_EVENTS_OCC_LO_FREQWRAP__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_FREQWRAP__M 0x80
+#define SCU_RAM_QAM_EVENTS_OCC_LO_FREQWRAP__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SER__B 8
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SER__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SER__M 0x100
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SER__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_VD_LOCK_IN__B 9
+#define SCU_RAM_QAM_EVENTS_OCC_LO_VD_LOCK_IN__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_VD_LOCK_IN__M 0x200
+#define SCU_RAM_QAM_EVENTS_OCC_LO_VD_LOCK_IN__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SY_LOCK_IN__B 10
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SY_LOCK_IN__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SY_LOCK_IN__M 0x400
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SY_LOCK_IN__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SY_LOCK_OUT__B 11
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SY_LOCK_OUT__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SY_LOCK_OUT__M 0x800
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SY_LOCK_OUT__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SY_TIME_OUT__B 12
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SY_TIME_OUT__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SY_TIME_OUT__M 0x1000
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SY_TIME_OUT__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SYNCWORD__B 13
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SYNCWORD__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SYNCWORD__M 0x2000
+#define SCU_RAM_QAM_EVENTS_OCC_LO_SYNCWORD__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_DI_LOCK_IN__B 14
+#define SCU_RAM_QAM_EVENTS_OCC_LO_DI_LOCK_IN__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_DI_LOCK_IN__M 0x4000
+#define SCU_RAM_QAM_EVENTS_OCC_LO_DI_LOCK_IN__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_OCC_LO_DI_LOCK_OUT__B 15
+#define SCU_RAM_QAM_EVENTS_OCC_LO_DI_LOCK_OUT__W 1
+#define SCU_RAM_QAM_EVENTS_OCC_LO_DI_LOCK_OUT__M 0x8000
+#define SCU_RAM_QAM_EVENTS_OCC_LO_DI_LOCK_OUT__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_SCHED_HI__A 0x831FBD
+#define SCU_RAM_QAM_EVENTS_SCHED_HI__W 16
+#define SCU_RAM_QAM_EVENTS_SCHED_HI__M 0xFFFF
+#define SCU_RAM_QAM_EVENTS_SCHED_HI__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_SCHED_HI_BIT__B 0
+#define SCU_RAM_QAM_EVENTS_SCHED_HI_BIT__W 16
+#define SCU_RAM_QAM_EVENTS_SCHED_HI_BIT__M 0xFFFF
+#define SCU_RAM_QAM_EVENTS_SCHED_HI_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_SCHED_LO__A 0x831FBE
+#define SCU_RAM_QAM_EVENTS_SCHED_LO__W 16
+#define SCU_RAM_QAM_EVENTS_SCHED_LO__M 0xFFFF
+#define SCU_RAM_QAM_EVENTS_SCHED_LO__PRE 0x0
+
+#define SCU_RAM_QAM_EVENTS_SCHED_LO_BIT__B 0
+#define SCU_RAM_QAM_EVENTS_SCHED_LO_BIT__W 16
+#define SCU_RAM_QAM_EVENTS_SCHED_LO_BIT__M 0xFFFF
+#define SCU_RAM_QAM_EVENTS_SCHED_LO_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_TASKLETS_SCHED__A 0x831FBF
+#define SCU_RAM_QAM_TASKLETS_SCHED__W 16
+#define SCU_RAM_QAM_TASKLETS_SCHED__M 0xFFFF
+#define SCU_RAM_QAM_TASKLETS_SCHED__PRE 0x0
+
+#define SCU_RAM_QAM_TASKLETS_SCHED_BIT__B 0
+#define SCU_RAM_QAM_TASKLETS_SCHED_BIT__W 16
+#define SCU_RAM_QAM_TASKLETS_SCHED_BIT__M 0xFFFF
+#define SCU_RAM_QAM_TASKLETS_SCHED_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_TASKLETS_RUN__A 0x831FC0
+#define SCU_RAM_QAM_TASKLETS_RUN__W 16
+#define SCU_RAM_QAM_TASKLETS_RUN__M 0xFFFF
+#define SCU_RAM_QAM_TASKLETS_RUN__PRE 0x0
+
+#define SCU_RAM_QAM_TASKLETS_RUN_BIT__B 0
+#define SCU_RAM_QAM_TASKLETS_RUN_BIT__W 16
+#define SCU_RAM_QAM_TASKLETS_RUN_BIT__M 0xFFFF
+#define SCU_RAM_QAM_TASKLETS_RUN_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_HI__A 0x831FC1
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_HI__W 16
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_HI__M 0xFFFF
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_HI__PRE 0x0
+
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_HI_BIT__B 0
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_HI_BIT__W 16
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_HI_BIT__M 0xFFFF
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_HI_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_LO__A 0x831FC2
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_LO__W 16
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_LO__M 0xFFFF
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_LO__PRE 0x0
+
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_LO_BIT__B 0
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_LO_BIT__W 16
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_LO_BIT__M 0xFFFF
+#define SCU_RAM_QAM_ACTIVE_SYM_RCRATE_LO_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_5__A 0x831FC3
+#define SCU_RAM_QAM_RD_RSV_5__W 16
+#define SCU_RAM_QAM_RD_RSV_5__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_5__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_5_BIT__B 0
+#define SCU_RAM_QAM_RD_RSV_5_BIT__W 16
+#define SCU_RAM_QAM_RD_RSV_5_BIT__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_5_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_6__A 0x831FC4
+#define SCU_RAM_QAM_RD_RSV_6__W 16
+#define SCU_RAM_QAM_RD_RSV_6__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_6__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_6_BIT__B 0
+#define SCU_RAM_QAM_RD_RSV_6_BIT__W 16
+#define SCU_RAM_QAM_RD_RSV_6_BIT__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_6_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_7__A 0x831FC5
+#define SCU_RAM_QAM_RD_RSV_7__W 16
+#define SCU_RAM_QAM_RD_RSV_7__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_7__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_7_BIT__B 0
+#define SCU_RAM_QAM_RD_RSV_7_BIT__W 16
+#define SCU_RAM_QAM_RD_RSV_7_BIT__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_7_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_8__A 0x831FC6
+#define SCU_RAM_QAM_RD_RSV_8__W 16
+#define SCU_RAM_QAM_RD_RSV_8__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_8__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_8_BIT__B 0
+#define SCU_RAM_QAM_RD_RSV_8_BIT__W 16
+#define SCU_RAM_QAM_RD_RSV_8_BIT__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_8_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_9__A 0x831FC7
+#define SCU_RAM_QAM_RD_RSV_9__W 16
+#define SCU_RAM_QAM_RD_RSV_9__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_9__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_9_BIT__B 0
+#define SCU_RAM_QAM_RD_RSV_9_BIT__W 16
+#define SCU_RAM_QAM_RD_RSV_9_BIT__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_9_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_10__A 0x831FC8
+#define SCU_RAM_QAM_RD_RSV_10__W 16
+#define SCU_RAM_QAM_RD_RSV_10__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_10__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_10_BIT__B 0
+#define SCU_RAM_QAM_RD_RSV_10_BIT__W 16
+#define SCU_RAM_QAM_RD_RSV_10_BIT__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_10_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_AGC_TPOW_OFFS__A 0x831FC9
+#define SCU_RAM_QAM_AGC_TPOW_OFFS__W 16
+#define SCU_RAM_QAM_AGC_TPOW_OFFS__M 0xFFFF
+#define SCU_RAM_QAM_AGC_TPOW_OFFS__PRE 0x0
+
+#define SCU_RAM_QAM_AGC_TPOW_OFFS_BIT__B 0
+#define SCU_RAM_QAM_AGC_TPOW_OFFS_BIT__W 16
+#define SCU_RAM_QAM_AGC_TPOW_OFFS_BIT__M 0xFFFF
+#define SCU_RAM_QAM_AGC_TPOW_OFFS_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_STATE__A 0x831FCA
+#define SCU_RAM_QAM_FSM_STATE__W 4
+#define SCU_RAM_QAM_FSM_STATE__M 0xF
+#define SCU_RAM_QAM_FSM_STATE__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_STATE_BIT__B 0
+#define SCU_RAM_QAM_FSM_STATE_BIT__W 4
+#define SCU_RAM_QAM_FSM_STATE_BIT__M 0xF
+#define SCU_RAM_QAM_FSM_STATE_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_STATE_BIT_HUNTING_AMP 0x0
+#define SCU_RAM_QAM_FSM_STATE_BIT_HUNTING_RATE 0x1
+#define SCU_RAM_QAM_FSM_STATE_BIT_HUNTING_FREQ 0x2
+#define SCU_RAM_QAM_FSM_STATE_BIT_HUNTING_UPRIGHT 0x3
+#define SCU_RAM_QAM_FSM_STATE_BIT_HUNTING_PHASE 0x4
+#define SCU_RAM_QAM_FSM_STATE_BIT_TRACKING_PHNOISE 0x5
+#define SCU_RAM_QAM_FSM_STATE_BIT_TRACKING 0x6
+#define SCU_RAM_QAM_FSM_STATE_BIT_TRACKING_BURST 0x7
+
+#define SCU_RAM_QAM_FSM_STATE_NEW__A 0x831FCB
+#define SCU_RAM_QAM_FSM_STATE_NEW__W 4
+#define SCU_RAM_QAM_FSM_STATE_NEW__M 0xF
+#define SCU_RAM_QAM_FSM_STATE_NEW__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_STATE_NEW_BIT__B 0
+#define SCU_RAM_QAM_FSM_STATE_NEW_BIT__W 4
+#define SCU_RAM_QAM_FSM_STATE_NEW_BIT__M 0xF
+#define SCU_RAM_QAM_FSM_STATE_NEW_BIT__PRE 0x0
+#define SCU_RAM_QAM_FSM_STATE_NEW_BIT_HUNTING_AMP 0x0
+#define SCU_RAM_QAM_FSM_STATE_NEW_BIT_HUNTING_RATE 0x1
+#define SCU_RAM_QAM_FSM_STATE_NEW_BIT_HUNTING_FREQ 0x2
+#define SCU_RAM_QAM_FSM_STATE_NEW_BIT_HUNTING_UPRIGHT 0x3
+#define SCU_RAM_QAM_FSM_STATE_NEW_BIT_HUNTING_PHASE 0x4
+#define SCU_RAM_QAM_FSM_STATE_NEW_BIT_TRACKING_PHNOISE 0x5
+#define SCU_RAM_QAM_FSM_STATE_NEW_BIT_TRACKING 0x6
+#define SCU_RAM_QAM_FSM_STATE_NEW_BIT_TRACKING_BURST 0x7
+
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS__A 0x831FCC
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS__W 9
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS__M 0x1FF
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_AMP__B 0
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_AMP__W 1
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_AMP__M 0x1
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_AMP__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_RATEVAR__B 1
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_RATEVAR__W 1
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_RATEVAR__M 0x2
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_RATEVAR__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_RADIUS__B 2
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_RADIUS__W 1
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_RADIUS__M 0x4
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_RADIUS__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_FREQ__B 3
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_FREQ__W 1
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_FREQ__M 0x8
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_FREQ__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_FREQVAR__B 4
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_FREQVAR__W 1
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_FREQVAR__M 0x10
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_FREQVAR__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_CPHASE__B 5
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_CPHASE__W 1
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_CPHASE__M 0x20
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_CPHASE__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_UPRIGHT__B 6
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_UPRIGHT__W 1
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_UPRIGHT__M 0x40
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_UPRIGHT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_PHASE__B 7
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_PHASE__W 1
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_PHASE__M 0x80
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_PHASE__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_MEDIAN__B 8
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_MEDIAN__W 1
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_MEDIAN__M 0x100
+#define SCU_RAM_QAM_FSM_LOCK_FLAGS_LCK_MEDIAN__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_RATE_VARIATION__A 0x831FCD
+#define SCU_RAM_QAM_FSM_RATE_VARIATION__W 16
+#define SCU_RAM_QAM_FSM_RATE_VARIATION__M 0xFFFF
+#define SCU_RAM_QAM_FSM_RATE_VARIATION__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_RATE_VARIATION_BIT__B 0
+#define SCU_RAM_QAM_FSM_RATE_VARIATION_BIT__W 16
+#define SCU_RAM_QAM_FSM_RATE_VARIATION_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_RATE_VARIATION_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_FREQ_VARIATION__A 0x831FCE
+#define SCU_RAM_QAM_FSM_FREQ_VARIATION__W 16
+#define SCU_RAM_QAM_FSM_FREQ_VARIATION__M 0xFFFF
+#define SCU_RAM_QAM_FSM_FREQ_VARIATION__PRE 0x0
+
+#define SCU_RAM_QAM_FSM_FREQ_VARIATION_BIT__B 0
+#define SCU_RAM_QAM_FSM_FREQ_VARIATION_BIT__W 16
+#define SCU_RAM_QAM_FSM_FREQ_VARIATION_BIT__M 0xFFFF
+#define SCU_RAM_QAM_FSM_FREQ_VARIATION_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_ERR_STATE__A 0x831FCF
+#define SCU_RAM_QAM_ERR_STATE__W 4
+#define SCU_RAM_QAM_ERR_STATE__M 0xF
+#define SCU_RAM_QAM_ERR_STATE__PRE 0x0
+
+#define SCU_RAM_QAM_ERR_STATE_BIT__B 0
+#define SCU_RAM_QAM_ERR_STATE_BIT__W 4
+#define SCU_RAM_QAM_ERR_STATE_BIT__M 0xF
+#define SCU_RAM_QAM_ERR_STATE_BIT__PRE 0x0
+#define SCU_RAM_QAM_ERR_STATE_BIT_HUNTING_AMP 0x0
+#define SCU_RAM_QAM_ERR_STATE_BIT_HUNTING_RATE 0x1
+#define SCU_RAM_QAM_ERR_STATE_BIT_HUNTING_FREQ 0x2
+#define SCU_RAM_QAM_ERR_STATE_BIT_HUNTING_UPRIGHT 0x3
+#define SCU_RAM_QAM_ERR_STATE_BIT_HUNTING_PHASE 0x4
+#define SCU_RAM_QAM_ERR_STATE_BIT_TRACKING_PHNOISE 0x5
+#define SCU_RAM_QAM_ERR_STATE_BIT_TRACKING 0x6
+#define SCU_RAM_QAM_ERR_STATE_BIT_TRACKING_BURST 0x7
+
+#define SCU_RAM_QAM_ERR_LOCK_FLAGS__A 0x831FD0
+#define SCU_RAM_QAM_ERR_LOCK_FLAGS__W 9
+#define SCU_RAM_QAM_ERR_LOCK_FLAGS__M 0x1FF
+#define SCU_RAM_QAM_ERR_LOCK_FLAGS__PRE 0x0
+
+#define SCU_RAM_QAM_ERR_LOCK_FLAGS_LCK_AMP__B 0
+#define SCU_RAM_QAM_ERR_LOCK_FLAGS_LCK_AMP__W 1
+#define SCU_RAM_QAM_ERR_LOCK_FLAGS_LCK_AMP__M 0x1
+#define SCU_RAM_QAM_ERR_LOCK_FLAGS_LCK_AMP__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_LOCK__A 0x831FD1
+#define SCU_RAM_QAM_EQ_LOCK__W 1
+#define SCU_RAM_QAM_EQ_LOCK__M 0x1
+#define SCU_RAM_QAM_EQ_LOCK__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_LOCK_BIT__B 0
+#define SCU_RAM_QAM_EQ_LOCK_BIT__W 1
+#define SCU_RAM_QAM_EQ_LOCK_BIT__M 0x1
+#define SCU_RAM_QAM_EQ_LOCK_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_STATE__A 0x831FD2
+#define SCU_RAM_QAM_EQ_STATE__W 16
+#define SCU_RAM_QAM_EQ_STATE__M 0xFFFF
+#define SCU_RAM_QAM_EQ_STATE__PRE 0x0
+
+#define SCU_RAM_QAM_EQ_STATE_BIT__B 0
+#define SCU_RAM_QAM_EQ_STATE_BIT__W 16
+#define SCU_RAM_QAM_EQ_STATE_BIT__M 0xFFFF
+#define SCU_RAM_QAM_EQ_STATE_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_0__A 0x831FD3
+#define SCU_RAM_QAM_RD_RSV_0__W 16
+#define SCU_RAM_QAM_RD_RSV_0__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_0__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_0_BIT__B 0
+#define SCU_RAM_QAM_RD_RSV_0_BIT__W 16
+#define SCU_RAM_QAM_RD_RSV_0_BIT__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_0_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_1__A 0x831FD4
+#define SCU_RAM_QAM_RD_RSV_1__W 16
+#define SCU_RAM_QAM_RD_RSV_1__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_1__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_1_BIT__B 0
+#define SCU_RAM_QAM_RD_RSV_1_BIT__W 16
+#define SCU_RAM_QAM_RD_RSV_1_BIT__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_1_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_2__A 0x831FD5
+#define SCU_RAM_QAM_RD_RSV_2__W 16
+#define SCU_RAM_QAM_RD_RSV_2__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_2__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_2_BIT__B 0
+#define SCU_RAM_QAM_RD_RSV_2_BIT__W 16
+#define SCU_RAM_QAM_RD_RSV_2_BIT__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_2_BIT__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_3__A 0x831FD6
+#define SCU_RAM_QAM_RD_RSV_3__W 16
+#define SCU_RAM_QAM_RD_RSV_3__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_3__PRE 0x0
+
+#define SCU_RAM_QAM_RD_RSV_3_BIT__B 0
+#define SCU_RAM_QAM_RD_RSV_3_BIT__W 16
+#define SCU_RAM_QAM_RD_RSV_3_BIT__M 0xFFFF
+#define SCU_RAM_QAM_RD_RSV_3_BIT__PRE 0x0
+
+#define SCU_RAM_VSB_CTL_MODE__A 0x831FD7
+#define SCU_RAM_VSB_CTL_MODE__W 2
+#define SCU_RAM_VSB_CTL_MODE__M 0x3
+#define SCU_RAM_VSB_CTL_MODE__PRE 0x0
+
+#define SCU_RAM_VSB_CTL_MODE_VSB_CTL_MODE_AGC__B 0
+#define SCU_RAM_VSB_CTL_MODE_VSB_CTL_MODE_AGC__W 1
+#define SCU_RAM_VSB_CTL_MODE_VSB_CTL_MODE_AGC__M 0x1
+#define SCU_RAM_VSB_CTL_MODE_VSB_CTL_MODE_AGC__PRE 0x0
+#define SCU_RAM_VSB_CTL_MODE_VSB_CTL_MODE_AGC_OFF 0x0
+#define SCU_RAM_VSB_CTL_MODE_VSB_CTL_MODE_AGC_ON 0x1
+
+#define SCU_RAM_VSB_CTL_MODE_VSB_CTL_MODE_MON__B 1
+#define SCU_RAM_VSB_CTL_MODE_VSB_CTL_MODE_MON__W 1
+#define SCU_RAM_VSB_CTL_MODE_VSB_CTL_MODE_MON__M 0x2
+#define SCU_RAM_VSB_CTL_MODE_VSB_CTL_MODE_MON__PRE 0x0
+#define SCU_RAM_VSB_CTL_MODE_VSB_CTL_MODE_MON_OFF 0x0
+#define SCU_RAM_VSB_CTL_MODE_VSB_CTL_MODE_MON_ON 0x2
+
+#define SCU_RAM_VSB_NOTCH_THRESHOLD__A 0x831FD8
+#define SCU_RAM_VSB_NOTCH_THRESHOLD__W 16
+#define SCU_RAM_VSB_NOTCH_THRESHOLD__M 0xFFFF
+#define SCU_RAM_VSB_NOTCH_THRESHOLD__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_0__A 0x831FD9
+#define SCU_RAM_VSB_RSV_0__W 16
+#define SCU_RAM_VSB_RSV_0__M 0xFFFF
+#define SCU_RAM_VSB_RSV_0__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_1__A 0x831FDA
+#define SCU_RAM_VSB_RSV_1__W 16
+#define SCU_RAM_VSB_RSV_1__M 0xFFFF
+#define SCU_RAM_VSB_RSV_1__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_2__A 0x831FDB
+#define SCU_RAM_VSB_RSV_2__W 16
+#define SCU_RAM_VSB_RSV_2__M 0xFFFF
+#define SCU_RAM_VSB_RSV_2__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_3__A 0x831FDC
+#define SCU_RAM_VSB_RSV_3__W 16
+#define SCU_RAM_VSB_RSV_3__M 0xFFFF
+#define SCU_RAM_VSB_RSV_3__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_4__A 0x831FDD
+#define SCU_RAM_VSB_RSV_4__W 16
+#define SCU_RAM_VSB_RSV_4__M 0xFFFF
+#define SCU_RAM_VSB_RSV_4__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_5__A 0x831FDE
+#define SCU_RAM_VSB_RSV_5__W 16
+#define SCU_RAM_VSB_RSV_5__M 0xFFFF
+#define SCU_RAM_VSB_RSV_5__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_6__A 0x831FDF
+#define SCU_RAM_VSB_RSV_6__W 16
+#define SCU_RAM_VSB_RSV_6__M 0xFFFF
+#define SCU_RAM_VSB_RSV_6__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_7__A 0x831FE0
+#define SCU_RAM_VSB_RSV_7__W 16
+#define SCU_RAM_VSB_RSV_7__M 0xFFFF
+#define SCU_RAM_VSB_RSV_7__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_8__A 0x831FE1
+#define SCU_RAM_VSB_RSV_8__W 16
+#define SCU_RAM_VSB_RSV_8__M 0xFFFF
+#define SCU_RAM_VSB_RSV_8__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_9__A 0x831FE2
+#define SCU_RAM_VSB_RSV_9__W 16
+#define SCU_RAM_VSB_RSV_9__M 0xFFFF
+#define SCU_RAM_VSB_RSV_9__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_10__A 0x831FE3
+#define SCU_RAM_VSB_RSV_10__W 16
+#define SCU_RAM_VSB_RSV_10__M 0xFFFF
+#define SCU_RAM_VSB_RSV_10__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_11__A 0x831FE4
+#define SCU_RAM_VSB_RSV_11__W 16
+#define SCU_RAM_VSB_RSV_11__M 0xFFFF
+#define SCU_RAM_VSB_RSV_11__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_12__A 0x831FE5
+#define SCU_RAM_VSB_RSV_12__W 16
+#define SCU_RAM_VSB_RSV_12__M 0xFFFF
+#define SCU_RAM_VSB_RSV_12__PRE 0x0
+
+#define SCU_RAM_VSB_RSV_13__A 0x831FE6
+#define SCU_RAM_VSB_RSV_13__W 16
+#define SCU_RAM_VSB_RSV_13__M 0xFFFF
+#define SCU_RAM_VSB_RSV_13__PRE 0x0
+
+#define SCU_RAM_VSB_AGC_POW_TGT__A 0x831FE7
+#define SCU_RAM_VSB_AGC_POW_TGT__W 15
+#define SCU_RAM_VSB_AGC_POW_TGT__M 0x7FFF
+#define SCU_RAM_VSB_AGC_POW_TGT__PRE 0x0
+
+#define SCU_RAM_VSB_OUTER_LOOP_CYCLE__A 0x831FE8
+#define SCU_RAM_VSB_OUTER_LOOP_CYCLE__W 8
+#define SCU_RAM_VSB_OUTER_LOOP_CYCLE__M 0xFF
+#define SCU_RAM_VSB_OUTER_LOOP_CYCLE__PRE 0x0
+
+#define SCU_RAM_VSB_FIELD_NUMBER__A 0x831FE9
+#define SCU_RAM_VSB_FIELD_NUMBER__W 9
+#define SCU_RAM_VSB_FIELD_NUMBER__M 0x1FF
+#define SCU_RAM_VSB_FIELD_NUMBER__PRE 0x0
+
+#define SCU_RAM_VSB_SEGMENT_NUMBER__A 0x831FEA
+#define SCU_RAM_VSB_SEGMENT_NUMBER__W 10
+#define SCU_RAM_VSB_SEGMENT_NUMBER__M 0x3FF
+#define SCU_RAM_VSB_SEGMENT_NUMBER__PRE 0x0
+
+#define SCU_RAM_DRIVER_VER_HI__A 0x831FEB
+#define SCU_RAM_DRIVER_VER_HI__W 16
+#define SCU_RAM_DRIVER_VER_HI__M 0xFFFF
+#define SCU_RAM_DRIVER_VER_HI__PRE 0x0
+
+#define SCU_RAM_DRIVER_VER_LO__A 0x831FEC
+#define SCU_RAM_DRIVER_VER_LO__W 16
+#define SCU_RAM_DRIVER_VER_LO__M 0xFFFF
+#define SCU_RAM_DRIVER_VER_LO__PRE 0x0
+
+#define SCU_RAM_PARAM_15__A 0x831FED
+#define SCU_RAM_PARAM_15__W 16
+#define SCU_RAM_PARAM_15__M 0xFFFF
+#define SCU_RAM_PARAM_15__PRE 0x0
+
+#define SCU_RAM_PARAM_14__A 0x831FEE
+#define SCU_RAM_PARAM_14__W 16
+#define SCU_RAM_PARAM_14__M 0xFFFF
+#define SCU_RAM_PARAM_14__PRE 0x0
+
+#define SCU_RAM_PARAM_13__A 0x831FEF
+#define SCU_RAM_PARAM_13__W 16
+#define SCU_RAM_PARAM_13__M 0xFFFF
+#define SCU_RAM_PARAM_13__PRE 0x0
+
+#define SCU_RAM_PARAM_12__A 0x831FF0
+#define SCU_RAM_PARAM_12__W 16
+#define SCU_RAM_PARAM_12__M 0xFFFF
+#define SCU_RAM_PARAM_12__PRE 0x0
+
+#define SCU_RAM_PARAM_11__A 0x831FF1
+#define SCU_RAM_PARAM_11__W 16
+#define SCU_RAM_PARAM_11__M 0xFFFF
+#define SCU_RAM_PARAM_11__PRE 0x0
+
+#define SCU_RAM_PARAM_10__A 0x831FF2
+#define SCU_RAM_PARAM_10__W 16
+#define SCU_RAM_PARAM_10__M 0xFFFF
+#define SCU_RAM_PARAM_10__PRE 0x0
+
+#define SCU_RAM_PARAM_9__A 0x831FF3
+#define SCU_RAM_PARAM_9__W 16
+#define SCU_RAM_PARAM_9__M 0xFFFF
+#define SCU_RAM_PARAM_9__PRE 0x0
+
+#define SCU_RAM_PARAM_8__A 0x831FF4
+#define SCU_RAM_PARAM_8__W 16
+#define SCU_RAM_PARAM_8__M 0xFFFF
+#define SCU_RAM_PARAM_8__PRE 0x0
+
+#define SCU_RAM_PARAM_7__A 0x831FF5
+#define SCU_RAM_PARAM_7__W 16
+#define SCU_RAM_PARAM_7__M 0xFFFF
+#define SCU_RAM_PARAM_7__PRE 0x0
+
+#define SCU_RAM_PARAM_6__A 0x831FF6
+#define SCU_RAM_PARAM_6__W 16
+#define SCU_RAM_PARAM_6__M 0xFFFF
+#define SCU_RAM_PARAM_6__PRE 0x0
+
+#define SCU_RAM_PARAM_5__A 0x831FF7
+#define SCU_RAM_PARAM_5__W 16
+#define SCU_RAM_PARAM_5__M 0xFFFF
+#define SCU_RAM_PARAM_5__PRE 0x0
+
+#define SCU_RAM_PARAM_4__A 0x831FF8
+#define SCU_RAM_PARAM_4__W 16
+#define SCU_RAM_PARAM_4__M 0xFFFF
+#define SCU_RAM_PARAM_4__PRE 0x0
+
+#define SCU_RAM_PARAM_3__A 0x831FF9
+#define SCU_RAM_PARAM_3__W 16
+#define SCU_RAM_PARAM_3__M 0xFFFF
+#define SCU_RAM_PARAM_3__PRE 0x0
+
+#define SCU_RAM_PARAM_2__A 0x831FFA
+#define SCU_RAM_PARAM_2__W 16
+#define SCU_RAM_PARAM_2__M 0xFFFF
+#define SCU_RAM_PARAM_2__PRE 0x0
+
+#define SCU_RAM_PARAM_1__A 0x831FFB
+#define SCU_RAM_PARAM_1__W 16
+#define SCU_RAM_PARAM_1__M 0xFFFF
+#define SCU_RAM_PARAM_1__PRE 0x0
+#define SCU_RAM_PARAM_1_RES_DEMOD_GET_LOCK_NOT_LOCKED 0x0
+#define SCU_RAM_PARAM_1_RES_DEMOD_GET_LOCK_DEMOD_LOCKED 0x4000
+#define SCU_RAM_PARAM_1_RES_DEMOD_GET_LOCK_LOCKED 0x8000
+#define SCU_RAM_PARAM_1_RES_DEMOD_GET_LOCK_NEVER_LOCK 0xC000
+
+#define SCU_RAM_PARAM_0__A 0x831FFC
+#define SCU_RAM_PARAM_0__W 16
+#define SCU_RAM_PARAM_0__M 0xFFFF
+#define SCU_RAM_PARAM_0__PRE 0x0
+#define SCU_RAM_PARAM_0_ATV_DEMOD_SETENV_MN_STANDARD 0x2
+#define SCU_RAM_PARAM_0_ATV_DEMOD_SETENV_B_STANDARD 0x103
+#define SCU_RAM_PARAM_0_ATV_DEMOD_SETENV_G_STANDARD 0x3
+#define SCU_RAM_PARAM_0_ATV_DEMOD_SETENV_DK_STANDARD 0x4
+#define SCU_RAM_PARAM_0_ATV_DEMOD_SETENV_L_STANDARD 0x9
+#define SCU_RAM_PARAM_0_ATV_DEMOD_SETENV_LP_STANDARD 0x109
+#define SCU_RAM_PARAM_0_ATV_DEMOD_SETENV_I_STANDARD 0xA
+#define SCU_RAM_PARAM_0_ATV_DEMOD_SETENV_FM_STANDARD 0x40
+#define SCU_RAM_PARAM_0_QAM_DEMOD_SETENV_ANNEX_A 0x0
+#define SCU_RAM_PARAM_0_QAM_DEMOD_SETENV_ANNEX_B 0x1
+#define SCU_RAM_PARAM_0_QAM_DEMOD_SETENV_ANNEX_C 0x2
+#define SCU_RAM_PARAM_0_QAM_DEMOD_SETENV_ANNEX_D 0x3
+#define SCU_RAM_PARAM_0_RESULT_OK 0x0
+#define SCU_RAM_PARAM_0_RESULT_UNKCMD 0xFFFF
+#define SCU_RAM_PARAM_0_RESULT_UNKSTD 0xFFFE
+#define SCU_RAM_PARAM_0_RESULT_INVPAR 0xFFFD
+#define SCU_RAM_PARAM_0_RESULT_SIZE 0xFFFC
+
+#define SCU_RAM_COMMAND__A 0x831FFD
+#define SCU_RAM_COMMAND__W 16
+#define SCU_RAM_COMMAND__M 0xFFFF
+#define SCU_RAM_COMMAND__PRE 0x0
+#define SCU_RAM_COMMAND_CMD_DEMOD_RESET 0x1
+#define SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV 0x2
+#define SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM 0x3
+#define SCU_RAM_COMMAND_CMD_DEMOD_START 0x4
+#define SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK 0x5
+#define SCU_RAM_COMMAND_CMD_DEMOD_GET_PARAM 0x6
+#define SCU_RAM_COMMAND_CMD_DEMOD_HOLD 0x7
+#define SCU_RAM_COMMAND_CMD_DEMOD_RESUME 0x8
+#define SCU_RAM_COMMAND_CMD_DEMOD_STOP 0x9
+#define SCU_RAM_COMMAND_CMD_STD_QAM_IRQ_ACTIVATE 0x80
+#define SCU_RAM_COMMAND_CMD_STD_QAM_IRQ_INACTIVATE 0x81
+#define SCU_RAM_COMMAND_CMD_STD_QAM_IRQ_SIGNAL 0x82
+#define SCU_RAM_COMMAND_CMD_STD_QAM_IRQ_MONITOR 0x83
+#define SCU_RAM_COMMAND_CMD_STD_QAM_TSK_ENABLE 0x84
+#define SCU_RAM_COMMAND_CMD_STD_QAM_FSM_SET_STATE 0x85
+#define SCU_RAM_COMMAND_CMD_DEBUG_GET_IRQ_REGS 0x80
+#define SCU_RAM_COMMAND_CMD_DEBUG_HTOL 0x81
+#define SCU_RAM_COMMAND_CMD_DEBUG_GET_STACK_POINTER 0x82
+#define SCU_RAM_COMMAND_CMD_DEBUG_START_STACK_CHECK 0x83
+#define SCU_RAM_COMMAND_CMD_DEBUG_STOP_STACK_CHECK 0x84
+#define SCU_RAM_COMMAND_CMD_ADMIN_NOP 0xFF
+#define SCU_RAM_COMMAND_CMD_ADMIN_GET_VERSION 0xFE
+#define SCU_RAM_COMMAND_CMD_ADMIN_GET_JTAG_VERSION 0xFD
+#define SCU_RAM_COMMAND_CMD_AUX_SCU_ATOMIC_ACCESS 0xC0
+
+#define SCU_RAM_COMMAND_STANDARD__B 8
+#define SCU_RAM_COMMAND_STANDARD__W 8
+#define SCU_RAM_COMMAND_STANDARD__M 0xFF00
+#define SCU_RAM_COMMAND_STANDARD__PRE 0x0
+#define SCU_RAM_COMMAND_STANDARD_ATV 0x100
+#define SCU_RAM_COMMAND_STANDARD_QAM 0x200
+#define SCU_RAM_COMMAND_STANDARD_VSB 0x300
+#define SCU_RAM_COMMAND_STANDARD_OFDM 0x400
+#define SCU_RAM_COMMAND_STANDARD_OOB 0x8000
+#define SCU_RAM_COMMAND_STANDARD_TOP 0xFF00
+
+#define SCU_RAM_VERSION_HI__A 0x831FFE
+#define SCU_RAM_VERSION_HI__W 16
+#define SCU_RAM_VERSION_HI__M 0xFFFF
+#define SCU_RAM_VERSION_HI__PRE 0x0
+
+#define SCU_RAM_VERSION_HI_VER_MAJOR_N3__B 12
+#define SCU_RAM_VERSION_HI_VER_MAJOR_N3__W 4
+#define SCU_RAM_VERSION_HI_VER_MAJOR_N3__M 0xF000
+#define SCU_RAM_VERSION_HI_VER_MAJOR_N3__PRE 0x0
+
+#define SCU_RAM_VERSION_HI_VER_MAJOR_N2__B 8
+#define SCU_RAM_VERSION_HI_VER_MAJOR_N2__W 4
+#define SCU_RAM_VERSION_HI_VER_MAJOR_N2__M 0xF00
+#define SCU_RAM_VERSION_HI_VER_MAJOR_N2__PRE 0x0
+
+#define SCU_RAM_VERSION_HI_VER_MAJOR_N1__B 4
+#define SCU_RAM_VERSION_HI_VER_MAJOR_N1__W 4
+#define SCU_RAM_VERSION_HI_VER_MAJOR_N1__M 0xF0
+#define SCU_RAM_VERSION_HI_VER_MAJOR_N1__PRE 0x0
+
+#define SCU_RAM_VERSION_HI_VER_MINOR_N1__B 0
+#define SCU_RAM_VERSION_HI_VER_MINOR_N1__W 4
+#define SCU_RAM_VERSION_HI_VER_MINOR_N1__M 0xF
+#define SCU_RAM_VERSION_HI_VER_MINOR_N1__PRE 0x0
+
+#define SCU_RAM_VERSION_LO__A 0x831FFF
+#define SCU_RAM_VERSION_LO__W 16
+#define SCU_RAM_VERSION_LO__M 0xFFFF
+#define SCU_RAM_VERSION_LO__PRE 0x0
+
+#define SCU_RAM_VERSION_LO_VER_PATCH_N4__B 12
+#define SCU_RAM_VERSION_LO_VER_PATCH_N4__W 4
+#define SCU_RAM_VERSION_LO_VER_PATCH_N4__M 0xF000
+#define SCU_RAM_VERSION_LO_VER_PATCH_N4__PRE 0x0
+
+#define SCU_RAM_VERSION_LO_VER_PATCH_N3__B 8
+#define SCU_RAM_VERSION_LO_VER_PATCH_N3__W 4
+#define SCU_RAM_VERSION_LO_VER_PATCH_N3__M 0xF00
+#define SCU_RAM_VERSION_LO_VER_PATCH_N3__PRE 0x0
+
+#define SCU_RAM_VERSION_LO_VER_PATCH_N2__B 4
+#define SCU_RAM_VERSION_LO_VER_PATCH_N2__W 4
+#define SCU_RAM_VERSION_LO_VER_PATCH_N2__M 0xF0
+#define SCU_RAM_VERSION_LO_VER_PATCH_N2__PRE 0x0
+
+#define SCU_RAM_VERSION_LO_VER_PATCH_N1__B 0
+#define SCU_RAM_VERSION_LO_VER_PATCH_N1__W 4
+#define SCU_RAM_VERSION_LO_VER_PATCH_N1__M 0xF
+#define SCU_RAM_VERSION_LO_VER_PATCH_N1__PRE 0x0
+
+#define SIO_COMM_EXEC__A 0x400000
+#define SIO_COMM_EXEC__W 2
+#define SIO_COMM_EXEC__M 0x3
+#define SIO_COMM_EXEC__PRE 0x0
+#define SIO_COMM_EXEC_STOP 0x0
+#define SIO_COMM_EXEC_ACTIVE 0x1
+#define SIO_COMM_EXEC_HOLD 0x2
+
+#define SIO_COMM_STATE__A 0x400001
+#define SIO_COMM_STATE__W 16
+#define SIO_COMM_STATE__M 0xFFFF
+#define SIO_COMM_STATE__PRE 0x0
+#define SIO_COMM_MB__A 0x400002
+#define SIO_COMM_MB__W 16
+#define SIO_COMM_MB__M 0xFFFF
+#define SIO_COMM_MB__PRE 0x0
+#define SIO_COMM_INT_REQ__A 0x400003
+#define SIO_COMM_INT_REQ__W 16
+#define SIO_COMM_INT_REQ__M 0xFFFF
+#define SIO_COMM_INT_REQ__PRE 0x0
+
+#define SIO_COMM_INT_REQ_HI_REQ__B 0
+#define SIO_COMM_INT_REQ_HI_REQ__W 1
+#define SIO_COMM_INT_REQ_HI_REQ__M 0x1
+#define SIO_COMM_INT_REQ_HI_REQ__PRE 0x0
+
+#define SIO_COMM_INT_REQ_SA_REQ__B 1
+#define SIO_COMM_INT_REQ_SA_REQ__W 1
+#define SIO_COMM_INT_REQ_SA_REQ__M 0x2
+#define SIO_COMM_INT_REQ_SA_REQ__PRE 0x0
+
+#define SIO_COMM_INT_STA__A 0x400005
+#define SIO_COMM_INT_STA__W 16
+#define SIO_COMM_INT_STA__M 0xFFFF
+#define SIO_COMM_INT_STA__PRE 0x0
+#define SIO_COMM_INT_MSK__A 0x400006
+#define SIO_COMM_INT_MSK__W 16
+#define SIO_COMM_INT_MSK__M 0xFFFF
+#define SIO_COMM_INT_MSK__PRE 0x0
+#define SIO_COMM_INT_STM__A 0x400007
+#define SIO_COMM_INT_STM__W 16
+#define SIO_COMM_INT_STM__M 0xFFFF
+#define SIO_COMM_INT_STM__PRE 0x0
+
+#define SIO_TOP_COMM_EXEC__A 0x410000
+#define SIO_TOP_COMM_EXEC__W 2
+#define SIO_TOP_COMM_EXEC__M 0x3
+#define SIO_TOP_COMM_EXEC__PRE 0x0
+#define SIO_TOP_COMM_EXEC_STOP 0x0
+#define SIO_TOP_COMM_EXEC_ACTIVE 0x1
+#define SIO_TOP_COMM_EXEC_HOLD 0x2
+
+#define SIO_TOP_COMM_KEY__A 0x41000F
+#define SIO_TOP_COMM_KEY__W 16
+#define SIO_TOP_COMM_KEY__M 0xFFFF
+#define SIO_TOP_COMM_KEY__PRE 0x0
+#define SIO_TOP_COMM_KEY_KEY 0xFABA
+
+#define SIO_TOP_JTAGID_LO__A 0x410012
+#define SIO_TOP_JTAGID_LO__W 16
+#define SIO_TOP_JTAGID_LO__M 0xFFFF
+#define SIO_TOP_JTAGID_LO__PRE 0x0
+
+#define SIO_TOP_JTAGID_HI__A 0x410013
+#define SIO_TOP_JTAGID_HI__W 16
+#define SIO_TOP_JTAGID_HI__M 0xFFFF
+#define SIO_TOP_JTAGID_HI__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_FLG_SMM__A 0x420010
+#define SIO_HI_RA_RAM_S0_FLG_SMM__W 1
+#define SIO_HI_RA_RAM_S0_FLG_SMM__M 0x1
+#define SIO_HI_RA_RAM_S0_FLG_SMM__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_DEV_ID__A 0x420011
+#define SIO_HI_RA_RAM_S0_DEV_ID__W 7
+#define SIO_HI_RA_RAM_S0_DEV_ID__M 0x7F
+#define SIO_HI_RA_RAM_S0_DEV_ID__PRE 0x52
+
+#define SIO_HI_RA_RAM_S0_FLG_CRC__A 0x420012
+#define SIO_HI_RA_RAM_S0_FLG_CRC__W 1
+#define SIO_HI_RA_RAM_S0_FLG_CRC__M 0x1
+#define SIO_HI_RA_RAM_S0_FLG_CRC__PRE 0x0
+#define SIO_HI_RA_RAM_S0_FLG_ACC__A 0x420013
+#define SIO_HI_RA_RAM_S0_FLG_ACC__W 4
+#define SIO_HI_RA_RAM_S0_FLG_ACC__M 0xF
+#define SIO_HI_RA_RAM_S0_FLG_ACC__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_FLG_ACC_S0_RWM__B 0
+#define SIO_HI_RA_RAM_S0_FLG_ACC_S0_RWM__W 2
+#define SIO_HI_RA_RAM_S0_FLG_ACC_S0_RWM__M 0x3
+#define SIO_HI_RA_RAM_S0_FLG_ACC_S0_RWM__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_FLG_ACC_S0_SLV_BRC__B 2
+#define SIO_HI_RA_RAM_S0_FLG_ACC_S0_SLV_BRC__W 1
+#define SIO_HI_RA_RAM_S0_FLG_ACC_S0_SLV_BRC__M 0x4
+#define SIO_HI_RA_RAM_S0_FLG_ACC_S0_SLV_BRC__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_FLG_ACC_S0_SLV_SWP__B 3
+#define SIO_HI_RA_RAM_S0_FLG_ACC_S0_SLV_SWP__W 1
+#define SIO_HI_RA_RAM_S0_FLG_ACC_S0_SLV_SWP__M 0x8
+#define SIO_HI_RA_RAM_S0_FLG_ACC_S0_SLV_SWP__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_STATE__A 0x420014
+#define SIO_HI_RA_RAM_S0_STATE__W 1
+#define SIO_HI_RA_RAM_S0_STATE__M 0x1
+#define SIO_HI_RA_RAM_S0_STATE__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_STATE_S0_SLV_STA__B 0
+#define SIO_HI_RA_RAM_S0_STATE_S0_SLV_STA__W 1
+#define SIO_HI_RA_RAM_S0_STATE_S0_SLV_STA__M 0x1
+#define SIO_HI_RA_RAM_S0_STATE_S0_SLV_STA__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_BLK_BNK__A 0x420015
+#define SIO_HI_RA_RAM_S0_BLK_BNK__W 12
+#define SIO_HI_RA_RAM_S0_BLK_BNK__M 0xFFF
+#define SIO_HI_RA_RAM_S0_BLK_BNK__PRE 0x82
+
+#define SIO_HI_RA_RAM_S0_BLK_BNK_S0_SLV_BNK__B 0
+#define SIO_HI_RA_RAM_S0_BLK_BNK_S0_SLV_BNK__W 6
+#define SIO_HI_RA_RAM_S0_BLK_BNK_S0_SLV_BNK__M 0x3F
+#define SIO_HI_RA_RAM_S0_BLK_BNK_S0_SLV_BNK__PRE 0x2
+
+#define SIO_HI_RA_RAM_S0_BLK_BNK_S0_SLV_BLK__B 6
+#define SIO_HI_RA_RAM_S0_BLK_BNK_S0_SLV_BLK__W 6
+#define SIO_HI_RA_RAM_S0_BLK_BNK_S0_SLV_BLK__M 0xFC0
+#define SIO_HI_RA_RAM_S0_BLK_BNK_S0_SLV_BLK__PRE 0x80
+
+#define SIO_HI_RA_RAM_S0_ADDR__A 0x420016
+#define SIO_HI_RA_RAM_S0_ADDR__W 16
+#define SIO_HI_RA_RAM_S0_ADDR__M 0xFFFF
+#define SIO_HI_RA_RAM_S0_ADDR__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_ADDR_S0_SLV_ADDR__B 0
+#define SIO_HI_RA_RAM_S0_ADDR_S0_SLV_ADDR__W 16
+#define SIO_HI_RA_RAM_S0_ADDR_S0_SLV_ADDR__M 0xFFFF
+#define SIO_HI_RA_RAM_S0_ADDR_S0_SLV_ADDR__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_CRC__A 0x420017
+#define SIO_HI_RA_RAM_S0_CRC__W 16
+#define SIO_HI_RA_RAM_S0_CRC__M 0xFFFF
+#define SIO_HI_RA_RAM_S0_CRC__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_BUFFER__A 0x420018
+#define SIO_HI_RA_RAM_S0_BUFFER__W 16
+#define SIO_HI_RA_RAM_S0_BUFFER__M 0xFFFF
+#define SIO_HI_RA_RAM_S0_BUFFER__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_RMWBUF__A 0x420019
+#define SIO_HI_RA_RAM_S0_RMWBUF__W 16
+#define SIO_HI_RA_RAM_S0_RMWBUF__M 0xFFFF
+#define SIO_HI_RA_RAM_S0_RMWBUF__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_FLG_VB__A 0x42001A
+#define SIO_HI_RA_RAM_S0_FLG_VB__W 1
+#define SIO_HI_RA_RAM_S0_FLG_VB__M 0x1
+#define SIO_HI_RA_RAM_S0_FLG_VB__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_TEMP0__A 0x42001B
+#define SIO_HI_RA_RAM_S0_TEMP0__W 16
+#define SIO_HI_RA_RAM_S0_TEMP0__M 0xFFFF
+#define SIO_HI_RA_RAM_S0_TEMP0__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_TEMP1__A 0x42001C
+#define SIO_HI_RA_RAM_S0_TEMP1__W 16
+#define SIO_HI_RA_RAM_S0_TEMP1__M 0xFFFF
+#define SIO_HI_RA_RAM_S0_TEMP1__PRE 0x0
+
+#define SIO_HI_RA_RAM_S0_OFFSET__A 0x42001D
+#define SIO_HI_RA_RAM_S0_OFFSET__W 16
+#define SIO_HI_RA_RAM_S0_OFFSET__M 0xFFFF
+#define SIO_HI_RA_RAM_S0_OFFSET__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_FLG_SMM__A 0x420020
+#define SIO_HI_RA_RAM_S1_FLG_SMM__W 1
+#define SIO_HI_RA_RAM_S1_FLG_SMM__M 0x1
+#define SIO_HI_RA_RAM_S1_FLG_SMM__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_DEV_ID__A 0x420021
+#define SIO_HI_RA_RAM_S1_DEV_ID__W 7
+#define SIO_HI_RA_RAM_S1_DEV_ID__M 0x7F
+#define SIO_HI_RA_RAM_S1_DEV_ID__PRE 0x52
+
+#define SIO_HI_RA_RAM_S1_FLG_CRC__A 0x420022
+#define SIO_HI_RA_RAM_S1_FLG_CRC__W 1
+#define SIO_HI_RA_RAM_S1_FLG_CRC__M 0x1
+#define SIO_HI_RA_RAM_S1_FLG_CRC__PRE 0x0
+#define SIO_HI_RA_RAM_S1_FLG_ACC__A 0x420023
+#define SIO_HI_RA_RAM_S1_FLG_ACC__W 4
+#define SIO_HI_RA_RAM_S1_FLG_ACC__M 0xF
+#define SIO_HI_RA_RAM_S1_FLG_ACC__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_FLG_ACC_S1_RWM__B 0
+#define SIO_HI_RA_RAM_S1_FLG_ACC_S1_RWM__W 2
+#define SIO_HI_RA_RAM_S1_FLG_ACC_S1_RWM__M 0x3
+#define SIO_HI_RA_RAM_S1_FLG_ACC_S1_RWM__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_FLG_ACC_S1_SLV_BRC__B 2
+#define SIO_HI_RA_RAM_S1_FLG_ACC_S1_SLV_BRC__W 1
+#define SIO_HI_RA_RAM_S1_FLG_ACC_S1_SLV_BRC__M 0x4
+#define SIO_HI_RA_RAM_S1_FLG_ACC_S1_SLV_BRC__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_FLG_ACC_S1_SLV_SWP__B 3
+#define SIO_HI_RA_RAM_S1_FLG_ACC_S1_SLV_SWP__W 1
+#define SIO_HI_RA_RAM_S1_FLG_ACC_S1_SLV_SWP__M 0x8
+#define SIO_HI_RA_RAM_S1_FLG_ACC_S1_SLV_SWP__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_STATE__A 0x420024
+#define SIO_HI_RA_RAM_S1_STATE__W 1
+#define SIO_HI_RA_RAM_S1_STATE__M 0x1
+#define SIO_HI_RA_RAM_S1_STATE__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_STATE_S1_SLV_STA__B 0
+#define SIO_HI_RA_RAM_S1_STATE_S1_SLV_STA__W 1
+#define SIO_HI_RA_RAM_S1_STATE_S1_SLV_STA__M 0x1
+#define SIO_HI_RA_RAM_S1_STATE_S1_SLV_STA__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_BLK_BNK__A 0x420025
+#define SIO_HI_RA_RAM_S1_BLK_BNK__W 12
+#define SIO_HI_RA_RAM_S1_BLK_BNK__M 0xFFF
+#define SIO_HI_RA_RAM_S1_BLK_BNK__PRE 0x82
+
+#define SIO_HI_RA_RAM_S1_BLK_BNK_S1_SLV_BNK__B 0
+#define SIO_HI_RA_RAM_S1_BLK_BNK_S1_SLV_BNK__W 6
+#define SIO_HI_RA_RAM_S1_BLK_BNK_S1_SLV_BNK__M 0x3F
+#define SIO_HI_RA_RAM_S1_BLK_BNK_S1_SLV_BNK__PRE 0x2
+
+#define SIO_HI_RA_RAM_S1_BLK_BNK_S1_SLV_BLK__B 6
+#define SIO_HI_RA_RAM_S1_BLK_BNK_S1_SLV_BLK__W 6
+#define SIO_HI_RA_RAM_S1_BLK_BNK_S1_SLV_BLK__M 0xFC0
+#define SIO_HI_RA_RAM_S1_BLK_BNK_S1_SLV_BLK__PRE 0x80
+
+#define SIO_HI_RA_RAM_S1_ADDR__A 0x420026
+#define SIO_HI_RA_RAM_S1_ADDR__W 16
+#define SIO_HI_RA_RAM_S1_ADDR__M 0xFFFF
+#define SIO_HI_RA_RAM_S1_ADDR__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_ADDR_S1_SLV_ADDR__B 0
+#define SIO_HI_RA_RAM_S1_ADDR_S1_SLV_ADDR__W 16
+#define SIO_HI_RA_RAM_S1_ADDR_S1_SLV_ADDR__M 0xFFFF
+#define SIO_HI_RA_RAM_S1_ADDR_S1_SLV_ADDR__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_CRC__A 0x420027
+#define SIO_HI_RA_RAM_S1_CRC__W 16
+#define SIO_HI_RA_RAM_S1_CRC__M 0xFFFF
+#define SIO_HI_RA_RAM_S1_CRC__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_BUFFER__A 0x420028
+#define SIO_HI_RA_RAM_S1_BUFFER__W 16
+#define SIO_HI_RA_RAM_S1_BUFFER__M 0xFFFF
+#define SIO_HI_RA_RAM_S1_BUFFER__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_RMWBUF__A 0x420029
+#define SIO_HI_RA_RAM_S1_RMWBUF__W 16
+#define SIO_HI_RA_RAM_S1_RMWBUF__M 0xFFFF
+#define SIO_HI_RA_RAM_S1_RMWBUF__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_FLG_VB__A 0x42002A
+#define SIO_HI_RA_RAM_S1_FLG_VB__W 1
+#define SIO_HI_RA_RAM_S1_FLG_VB__M 0x1
+#define SIO_HI_RA_RAM_S1_FLG_VB__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_TEMP0__A 0x42002B
+#define SIO_HI_RA_RAM_S1_TEMP0__W 16
+#define SIO_HI_RA_RAM_S1_TEMP0__M 0xFFFF
+#define SIO_HI_RA_RAM_S1_TEMP0__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_TEMP1__A 0x42002C
+#define SIO_HI_RA_RAM_S1_TEMP1__W 16
+#define SIO_HI_RA_RAM_S1_TEMP1__M 0xFFFF
+#define SIO_HI_RA_RAM_S1_TEMP1__PRE 0x0
+
+#define SIO_HI_RA_RAM_S1_OFFSET__A 0x42002D
+#define SIO_HI_RA_RAM_S1_OFFSET__W 16
+#define SIO_HI_RA_RAM_S1_OFFSET__M 0xFFFF
+#define SIO_HI_RA_RAM_S1_OFFSET__PRE 0x0
+#define SIO_HI_RA_RAM_SEMA__A 0x420030
+#define SIO_HI_RA_RAM_SEMA__W 1
+#define SIO_HI_RA_RAM_SEMA__M 0x1
+#define SIO_HI_RA_RAM_SEMA__PRE 0x0
+#define SIO_HI_RA_RAM_SEMA_FREE 0x0
+#define SIO_HI_RA_RAM_SEMA_BUSY 0x1
+
+#define SIO_HI_RA_RAM_RES__A 0x420031
+#define SIO_HI_RA_RAM_RES__W 3
+#define SIO_HI_RA_RAM_RES__M 0x7
+#define SIO_HI_RA_RAM_RES__PRE 0x0
+#define SIO_HI_RA_RAM_RES_OK 0x0
+#define SIO_HI_RA_RAM_RES_ERROR 0x1
+#define SIO_HI_RA_RAM_RES_I2C_START_FOUND 0x1
+#define SIO_HI_RA_RAM_RES_I2C_STOP_FOUND 0x2
+#define SIO_HI_RA_RAM_RES_I2C_ARB_LOST 0x3
+#define SIO_HI_RA_RAM_RES_I2C_ERROR 0x4
+
+#define SIO_HI_RA_RAM_CMD__A 0x420032
+#define SIO_HI_RA_RAM_CMD__W 4
+#define SIO_HI_RA_RAM_CMD__M 0xF
+#define SIO_HI_RA_RAM_CMD__PRE 0x0
+#define SIO_HI_RA_RAM_CMD_NULL 0x0
+#define SIO_HI_RA_RAM_CMD_UIO 0x1
+#define SIO_HI_RA_RAM_CMD_RESET 0x2
+#define SIO_HI_RA_RAM_CMD_CONFIG 0x3
+#define SIO_HI_RA_RAM_CMD_INTERNAL_TRANSFER 0x4
+#define SIO_HI_RA_RAM_CMD_I2C_TRANSMIT 0x5
+#define SIO_HI_RA_RAM_CMD_EXEC 0x6
+#define SIO_HI_RA_RAM_CMD_BRDCTRL 0x7
+#define SIO_HI_RA_RAM_CMD_ATOMIC_COPY 0x8
+
+#define SIO_HI_RA_RAM_PAR_1__A 0x420033
+#define SIO_HI_RA_RAM_PAR_1__W 16
+#define SIO_HI_RA_RAM_PAR_1__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_1__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_1_PAR1__B 0
+#define SIO_HI_RA_RAM_PAR_1_PAR1__W 16
+#define SIO_HI_RA_RAM_PAR_1_PAR1__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_1_PAR1__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY 0x3945
+
+#define SIO_HI_RA_RAM_PAR_1_ITX_SRC_BNK__B 0
+#define SIO_HI_RA_RAM_PAR_1_ITX_SRC_BNK__W 6
+#define SIO_HI_RA_RAM_PAR_1_ITX_SRC_BNK__M 0x3F
+#define SIO_HI_RA_RAM_PAR_1_ITX_SRC_BNK__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_1_ITX_SRC_BLK__B 6
+#define SIO_HI_RA_RAM_PAR_1_ITX_SRC_BLK__W 6
+#define SIO_HI_RA_RAM_PAR_1_ITX_SRC_BLK__M 0xFC0
+#define SIO_HI_RA_RAM_PAR_1_ITX_SRC_BLK__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_1_I2CTX_PORT__B 0
+#define SIO_HI_RA_RAM_PAR_1_I2CTX_PORT__W 1
+#define SIO_HI_RA_RAM_PAR_1_I2CTX_PORT__M 0x1
+#define SIO_HI_RA_RAM_PAR_1_I2CTX_PORT__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_1_I2CTX_TOE__B 1
+#define SIO_HI_RA_RAM_PAR_1_I2CTX_TOE__W 1
+#define SIO_HI_RA_RAM_PAR_1_I2CTX_TOE__M 0x2
+#define SIO_HI_RA_RAM_PAR_1_I2CTX_TOE__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_1_I2CTX_TOE_DISABLE 0x0
+#define SIO_HI_RA_RAM_PAR_1_I2CTX_TOE_ENABLE 0x2
+
+#define SIO_HI_RA_RAM_PAR_1_EXEC_FUNC__B 0
+#define SIO_HI_RA_RAM_PAR_1_EXEC_FUNC__W 10
+#define SIO_HI_RA_RAM_PAR_1_EXEC_FUNC__M 0x3FF
+#define SIO_HI_RA_RAM_PAR_1_EXEC_FUNC__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_1_ACP_INT_BNK__B 0
+#define SIO_HI_RA_RAM_PAR_1_ACP_INT_BNK__W 6
+#define SIO_HI_RA_RAM_PAR_1_ACP_INT_BNK__M 0x3F
+#define SIO_HI_RA_RAM_PAR_1_ACP_INT_BNK__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_1_ACP_INT_BLK__B 6
+#define SIO_HI_RA_RAM_PAR_1_ACP_INT_BLK__W 6
+#define SIO_HI_RA_RAM_PAR_1_ACP_INT_BLK__M 0xFC0
+#define SIO_HI_RA_RAM_PAR_1_ACP_INT_BLK__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_2__A 0x420034
+#define SIO_HI_RA_RAM_PAR_2__W 16
+#define SIO_HI_RA_RAM_PAR_2__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_2__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_2_PAR2__B 0
+#define SIO_HI_RA_RAM_PAR_2_PAR2__W 16
+#define SIO_HI_RA_RAM_PAR_2_PAR2__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_2_PAR2__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_2_CFG_DIV__B 0
+#define SIO_HI_RA_RAM_PAR_2_CFG_DIV__W 7
+#define SIO_HI_RA_RAM_PAR_2_CFG_DIV__M 0x7F
+#define SIO_HI_RA_RAM_PAR_2_CFG_DIV__PRE 0x25
+
+#define SIO_HI_RA_RAM_PAR_2_ITX_SRC_OFF__B 0
+#define SIO_HI_RA_RAM_PAR_2_ITX_SRC_OFF__W 16
+#define SIO_HI_RA_RAM_PAR_2_ITX_SRC_OFF__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_2_ITX_SRC_OFF__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_2_I2CTX_BUF__B 0
+#define SIO_HI_RA_RAM_PAR_2_I2CTX_BUF__W 16
+#define SIO_HI_RA_RAM_PAR_2_I2CTX_BUF__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_2_I2CTX_BUF__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_2_BRD_CFG__B 2
+#define SIO_HI_RA_RAM_PAR_2_BRD_CFG__W 1
+#define SIO_HI_RA_RAM_PAR_2_BRD_CFG__M 0x4
+#define SIO_HI_RA_RAM_PAR_2_BRD_CFG__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_2_BRD_CFG_OPEN 0x0
+#define SIO_HI_RA_RAM_PAR_2_BRD_CFG_CLOSED 0x4
+
+#define SIO_HI_RA_RAM_PAR_2_ACP_INT_OFF__B 0
+#define SIO_HI_RA_RAM_PAR_2_ACP_INT_OFF__W 16
+#define SIO_HI_RA_RAM_PAR_2_ACP_INT_OFF__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_2_ACP_INT_OFF__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_3__A 0x420035
+#define SIO_HI_RA_RAM_PAR_3__W 16
+#define SIO_HI_RA_RAM_PAR_3__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_3__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_3_PAR3__B 0
+#define SIO_HI_RA_RAM_PAR_3_PAR3__W 16
+#define SIO_HI_RA_RAM_PAR_3_PAR3__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_3_PAR3__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__B 0
+#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__W 7
+#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M 0x7F
+#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__PRE 0x3F
+
+#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__B 7
+#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__W 7
+#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__M 0x3F80
+#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__PRE 0x1F80
+
+#define SIO_HI_RA_RAM_PAR_3_ITX_LEN__B 0
+#define SIO_HI_RA_RAM_PAR_3_ITX_LEN__W 16
+#define SIO_HI_RA_RAM_PAR_3_ITX_LEN__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_3_ITX_LEN__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_3_ACP_LEN__B 0
+#define SIO_HI_RA_RAM_PAR_3_ACP_LEN__W 3
+#define SIO_HI_RA_RAM_PAR_3_ACP_LEN__M 0x7
+#define SIO_HI_RA_RAM_PAR_3_ACP_LEN__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_3_ACP_RW__B 3
+#define SIO_HI_RA_RAM_PAR_3_ACP_RW__W 1
+#define SIO_HI_RA_RAM_PAR_3_ACP_RW__M 0x8
+#define SIO_HI_RA_RAM_PAR_3_ACP_RW__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_3_ACP_RW_READ 0x0
+#define SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE 0x8
+
+#define SIO_HI_RA_RAM_PAR_4__A 0x420036
+#define SIO_HI_RA_RAM_PAR_4__W 16
+#define SIO_HI_RA_RAM_PAR_4__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_4__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_4_PAR4__B 0
+#define SIO_HI_RA_RAM_PAR_4_PAR4__W 16
+#define SIO_HI_RA_RAM_PAR_4_PAR4__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_4_PAR4__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_4_CFG_WUP__B 0
+#define SIO_HI_RA_RAM_PAR_4_CFG_WUP__W 8
+#define SIO_HI_RA_RAM_PAR_4_CFG_WUP__M 0xFF
+#define SIO_HI_RA_RAM_PAR_4_CFG_WUP__PRE 0xC1
+
+#define SIO_HI_RA_RAM_PAR_4_ITX_DST_BNK__B 0
+#define SIO_HI_RA_RAM_PAR_4_ITX_DST_BNK__W 6
+#define SIO_HI_RA_RAM_PAR_4_ITX_DST_BNK__M 0x3F
+#define SIO_HI_RA_RAM_PAR_4_ITX_DST_BNK__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_4_ITX_DST_BLK__B 6
+#define SIO_HI_RA_RAM_PAR_4_ITX_DST_BLK__W 6
+#define SIO_HI_RA_RAM_PAR_4_ITX_DST_BLK__M 0xFC0
+#define SIO_HI_RA_RAM_PAR_4_ITX_DST_BLK__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_4_ACP_EXT_BNK__B 0
+#define SIO_HI_RA_RAM_PAR_4_ACP_EXT_BNK__W 6
+#define SIO_HI_RA_RAM_PAR_4_ACP_EXT_BNK__M 0x3F
+#define SIO_HI_RA_RAM_PAR_4_ACP_EXT_BNK__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_4_ACP_EXT_BLK__B 6
+#define SIO_HI_RA_RAM_PAR_4_ACP_EXT_BLK__W 6
+#define SIO_HI_RA_RAM_PAR_4_ACP_EXT_BLK__M 0xFC0
+#define SIO_HI_RA_RAM_PAR_4_ACP_EXT_BLK__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_5__A 0x420037
+#define SIO_HI_RA_RAM_PAR_5__W 16
+#define SIO_HI_RA_RAM_PAR_5__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_5__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_5_PAR5__B 0
+#define SIO_HI_RA_RAM_PAR_5_PAR5__W 16
+#define SIO_HI_RA_RAM_PAR_5_PAR5__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_5_PAR5__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV0__B 0
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV0__W 1
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV0__M 0x1
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV0__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV0_NO_SLAVE 0x0
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE 0x1
+
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV1__B 1
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV1__W 1
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV1__M 0x2
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV1__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV1_NO_SLAVE 0x0
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV1_SLAVE 0x2
+
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__B 3
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__W 1
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M 0x8
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_AWAKE 0x0
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ 0x8
+
+#define SIO_HI_RA_RAM_PAR_5_CFG_BDGST__B 5
+#define SIO_HI_RA_RAM_PAR_5_CFG_BDGST__W 1
+#define SIO_HI_RA_RAM_PAR_5_CFG_BDGST__M 0x20
+#define SIO_HI_RA_RAM_PAR_5_CFG_BDGST__PRE 0x0
+#define SIO_HI_RA_RAM_PAR_5_CFG_BDGST_DISABLE 0x0
+#define SIO_HI_RA_RAM_PAR_5_CFG_BDGST_ENABLE 0x20
+
+#define SIO_HI_RA_RAM_PAR_5_ITX_DST_OFF__B 0
+#define SIO_HI_RA_RAM_PAR_5_ITX_DST_OFF__W 16
+#define SIO_HI_RA_RAM_PAR_5_ITX_DST_OFF__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_5_ITX_DST_OFF__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_5_ACP_EXT_OFF__B 0
+#define SIO_HI_RA_RAM_PAR_5_ACP_EXT_OFF__W 16
+#define SIO_HI_RA_RAM_PAR_5_ACP_EXT_OFF__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_5_ACP_EXT_OFF__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_6__A 0x420038
+#define SIO_HI_RA_RAM_PAR_6__W 16
+#define SIO_HI_RA_RAM_PAR_6__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_6__PRE 0x95FF
+#define SIO_HI_RA_RAM_PAR_6_PAR6__B 0
+#define SIO_HI_RA_RAM_PAR_6_PAR6__W 16
+#define SIO_HI_RA_RAM_PAR_6_PAR6__M 0xFFFF
+#define SIO_HI_RA_RAM_PAR_6_PAR6__PRE 0x0
+
+#define SIO_HI_RA_RAM_PAR_6_CFG_TOD__B 0
+#define SIO_HI_RA_RAM_PAR_6_CFG_TOD__W 8
+#define SIO_HI_RA_RAM_PAR_6_CFG_TOD__M 0xFF
+#define SIO_HI_RA_RAM_PAR_6_CFG_TOD__PRE 0xFF
+
+#define SIO_HI_RA_RAM_PAR_6_CFG_WDD__B 8
+#define SIO_HI_RA_RAM_PAR_6_CFG_WDD__W 8
+#define SIO_HI_RA_RAM_PAR_6_CFG_WDD__M 0xFF00
+#define SIO_HI_RA_RAM_PAR_6_CFG_WDD__PRE 0x9500
+
+#define SIO_HI_RA_RAM_AB_TEMP__A 0x42006E
+#define SIO_HI_RA_RAM_AB_TEMP__W 16
+#define SIO_HI_RA_RAM_AB_TEMP__M 0xFFFF
+#define SIO_HI_RA_RAM_AB_TEMP__PRE 0x0
+
+#define SIO_HI_RA_RAM_I2C_CTL__A 0x42006F
+#define SIO_HI_RA_RAM_I2C_CTL__W 16
+#define SIO_HI_RA_RAM_I2C_CTL__M 0xFFFF
+#define SIO_HI_RA_RAM_I2C_CTL__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_ENTRY0__A 0x420070
+#define SIO_HI_RA_RAM_VB_ENTRY0__W 16
+#define SIO_HI_RA_RAM_VB_ENTRY0__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_ENTRY0__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_MAP_BNK__B 0
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_MAP_BNK__W 4
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_MAP_BNK__M 0xF
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_MAP_BNK__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_MAP_BLK__B 4
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_MAP_BLK__W 4
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_MAP_BLK__M 0xF0
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_MAP_BLK__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_VIRT_BNK__B 8
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_VIRT_BNK__W 4
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_VIRT_BNK__M 0xF00
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_VIRT_BNK__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_VIRT_BLK__B 12
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_VIRT_BLK__W 4
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_VIRT_BLK__M 0xF000
+#define SIO_HI_RA_RAM_VB_ENTRY0_HI_VIRT_BLK__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_OFFSET0__A 0x420071
+#define SIO_HI_RA_RAM_VB_OFFSET0__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET0__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET0__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_OFFSET0_HI_MAP_OFF0__B 0
+#define SIO_HI_RA_RAM_VB_OFFSET0_HI_MAP_OFF0__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET0_HI_MAP_OFF0__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET0_HI_MAP_OFF0__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_ENTRY1__A 0x420072
+#define SIO_HI_RA_RAM_VB_ENTRY1__W 16
+#define SIO_HI_RA_RAM_VB_ENTRY1__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_ENTRY1__PRE 0x0
+#define SIO_HI_RA_RAM_VB_OFFSET1__A 0x420073
+#define SIO_HI_RA_RAM_VB_OFFSET1__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET1__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET1__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_OFFSET1_HI_MAP_OFF__B 0
+#define SIO_HI_RA_RAM_VB_OFFSET1_HI_MAP_OFF__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET1_HI_MAP_OFF__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET1_HI_MAP_OFF__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_ENTRY2__A 0x420074
+#define SIO_HI_RA_RAM_VB_ENTRY2__W 16
+#define SIO_HI_RA_RAM_VB_ENTRY2__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_ENTRY2__PRE 0x0
+#define SIO_HI_RA_RAM_VB_OFFSET2__A 0x420075
+#define SIO_HI_RA_RAM_VB_OFFSET2__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET2__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET2__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_OFFSET2_HI_MAP_OFF__B 0
+#define SIO_HI_RA_RAM_VB_OFFSET2_HI_MAP_OFF__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET2_HI_MAP_OFF__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET2_HI_MAP_OFF__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_ENTRY3__A 0x420076
+#define SIO_HI_RA_RAM_VB_ENTRY3__W 16
+#define SIO_HI_RA_RAM_VB_ENTRY3__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_ENTRY3__PRE 0x0
+#define SIO_HI_RA_RAM_VB_OFFSET3__A 0x420077
+#define SIO_HI_RA_RAM_VB_OFFSET3__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET3__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET3__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_OFFSET3_HI_MAP_OFF__B 0
+#define SIO_HI_RA_RAM_VB_OFFSET3_HI_MAP_OFF__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET3_HI_MAP_OFF__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET3_HI_MAP_OFF__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_ENTRY4__A 0x420078
+#define SIO_HI_RA_RAM_VB_ENTRY4__W 16
+#define SIO_HI_RA_RAM_VB_ENTRY4__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_ENTRY4__PRE 0x0
+#define SIO_HI_RA_RAM_VB_OFFSET4__A 0x420079
+#define SIO_HI_RA_RAM_VB_OFFSET4__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET4__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET4__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_OFFSET4_HI_MAP_OFF__B 0
+#define SIO_HI_RA_RAM_VB_OFFSET4_HI_MAP_OFF__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET4_HI_MAP_OFF__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET4_HI_MAP_OFF__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_ENTRY5__A 0x42007A
+#define SIO_HI_RA_RAM_VB_ENTRY5__W 16
+#define SIO_HI_RA_RAM_VB_ENTRY5__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_ENTRY5__PRE 0x0
+#define SIO_HI_RA_RAM_VB_OFFSET5__A 0x42007B
+#define SIO_HI_RA_RAM_VB_OFFSET5__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET5__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET5__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_OFFSET5_HI_MAP_OFF__B 0
+#define SIO_HI_RA_RAM_VB_OFFSET5_HI_MAP_OFF__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET5_HI_MAP_OFF__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET5_HI_MAP_OFF__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_ENTRY6__A 0x42007C
+#define SIO_HI_RA_RAM_VB_ENTRY6__W 16
+#define SIO_HI_RA_RAM_VB_ENTRY6__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_ENTRY6__PRE 0x0
+#define SIO_HI_RA_RAM_VB_OFFSET6__A 0x42007D
+#define SIO_HI_RA_RAM_VB_OFFSET6__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET6__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET6__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_OFFSET6_HI_MAP_OFF__B 0
+#define SIO_HI_RA_RAM_VB_OFFSET6_HI_MAP_OFF__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET6_HI_MAP_OFF__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET6_HI_MAP_OFF__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_ENTRY7__A 0x42007E
+#define SIO_HI_RA_RAM_VB_ENTRY7__W 16
+#define SIO_HI_RA_RAM_VB_ENTRY7__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_ENTRY7__PRE 0x0
+#define SIO_HI_RA_RAM_VB_OFFSET7__A 0x42007F
+#define SIO_HI_RA_RAM_VB_OFFSET7__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET7__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET7__PRE 0x0
+
+#define SIO_HI_RA_RAM_VB_OFFSET7_HI_MAP_OFF__B 0
+#define SIO_HI_RA_RAM_VB_OFFSET7_HI_MAP_OFF__W 16
+#define SIO_HI_RA_RAM_VB_OFFSET7_HI_MAP_OFF__M 0xFFFF
+#define SIO_HI_RA_RAM_VB_OFFSET7_HI_MAP_OFF__PRE 0x0
+
+#define SIO_HI_IF_RAM_TRP_BPT_0__A 0x430000
+#define SIO_HI_IF_RAM_TRP_BPT_0__W 12
+#define SIO_HI_IF_RAM_TRP_BPT_0__M 0xFFF
+#define SIO_HI_IF_RAM_TRP_BPT_0__PRE 0x0
+#define SIO_HI_IF_RAM_TRP_BPT_1__A 0x430001
+#define SIO_HI_IF_RAM_TRP_BPT_1__W 12
+#define SIO_HI_IF_RAM_TRP_BPT_1__M 0xFFF
+#define SIO_HI_IF_RAM_TRP_BPT_1__PRE 0x0
+#define SIO_HI_IF_RAM_TRP_STK_0__A 0x430002
+#define SIO_HI_IF_RAM_TRP_STK_0__W 12
+#define SIO_HI_IF_RAM_TRP_STK_0__M 0xFFF
+#define SIO_HI_IF_RAM_TRP_STK_0__PRE 0x0
+#define SIO_HI_IF_RAM_TRP_STK_1__A 0x430003
+#define SIO_HI_IF_RAM_TRP_STK_1__W 12
+#define SIO_HI_IF_RAM_TRP_STK_1__M 0xFFF
+#define SIO_HI_IF_RAM_TRP_STK_1__PRE 0x0
+#define SIO_HI_IF_RAM_FUN_BASE__A 0x430300
+#define SIO_HI_IF_RAM_FUN_BASE__W 12
+#define SIO_HI_IF_RAM_FUN_BASE__M 0xFFF
+#define SIO_HI_IF_RAM_FUN_BASE__PRE 0x0
+
+#define SIO_HI_IF_COMM_EXEC__A 0x440000
+#define SIO_HI_IF_COMM_EXEC__W 2
+#define SIO_HI_IF_COMM_EXEC__M 0x3
+#define SIO_HI_IF_COMM_EXEC__PRE 0x0
+#define SIO_HI_IF_COMM_EXEC_STOP 0x0
+#define SIO_HI_IF_COMM_EXEC_ACTIVE 0x1
+#define SIO_HI_IF_COMM_EXEC_HOLD 0x2
+#define SIO_HI_IF_COMM_EXEC_STEP 0x3
+
+#define SIO_HI_IF_COMM_STATE__A 0x440001
+#define SIO_HI_IF_COMM_STATE__W 10
+#define SIO_HI_IF_COMM_STATE__M 0x3FF
+#define SIO_HI_IF_COMM_STATE__PRE 0x0
+#define SIO_HI_IF_COMM_INT_REQ__A 0x440003
+#define SIO_HI_IF_COMM_INT_REQ__W 1
+#define SIO_HI_IF_COMM_INT_REQ__M 0x1
+#define SIO_HI_IF_COMM_INT_REQ__PRE 0x0
+#define SIO_HI_IF_COMM_INT_STA__A 0x440005
+#define SIO_HI_IF_COMM_INT_STA__W 1
+#define SIO_HI_IF_COMM_INT_STA__M 0x1
+#define SIO_HI_IF_COMM_INT_STA__PRE 0x0
+#define SIO_HI_IF_COMM_INT_STA_STAT__B 0
+#define SIO_HI_IF_COMM_INT_STA_STAT__W 1
+#define SIO_HI_IF_COMM_INT_STA_STAT__M 0x1
+#define SIO_HI_IF_COMM_INT_STA_STAT__PRE 0x0
+
+#define SIO_HI_IF_COMM_INT_MSK__A 0x440006
+#define SIO_HI_IF_COMM_INT_MSK__W 1
+#define SIO_HI_IF_COMM_INT_MSK__M 0x1
+#define SIO_HI_IF_COMM_INT_MSK__PRE 0x0
+#define SIO_HI_IF_COMM_INT_MSK_STAT__B 0
+#define SIO_HI_IF_COMM_INT_MSK_STAT__W 1
+#define SIO_HI_IF_COMM_INT_MSK_STAT__M 0x1
+#define SIO_HI_IF_COMM_INT_MSK_STAT__PRE 0x0
+
+#define SIO_HI_IF_COMM_INT_STM__A 0x440007
+#define SIO_HI_IF_COMM_INT_STM__W 1
+#define SIO_HI_IF_COMM_INT_STM__M 0x1
+#define SIO_HI_IF_COMM_INT_STM__PRE 0x0
+#define SIO_HI_IF_COMM_INT_STM_STAT__B 0
+#define SIO_HI_IF_COMM_INT_STM_STAT__W 1
+#define SIO_HI_IF_COMM_INT_STM_STAT__M 0x1
+#define SIO_HI_IF_COMM_INT_STM_STAT__PRE 0x0
+
+#define SIO_HI_IF_STK_0__A 0x440010
+#define SIO_HI_IF_STK_0__W 10
+#define SIO_HI_IF_STK_0__M 0x3FF
+#define SIO_HI_IF_STK_0__PRE 0x2
+
+#define SIO_HI_IF_STK_0_ADDR__B 0
+#define SIO_HI_IF_STK_0_ADDR__W 10
+#define SIO_HI_IF_STK_0_ADDR__M 0x3FF
+#define SIO_HI_IF_STK_0_ADDR__PRE 0x2
+
+#define SIO_HI_IF_STK_1__A 0x440011
+#define SIO_HI_IF_STK_1__W 10
+#define SIO_HI_IF_STK_1__M 0x3FF
+#define SIO_HI_IF_STK_1__PRE 0x2
+#define SIO_HI_IF_STK_1_ADDR__B 0
+#define SIO_HI_IF_STK_1_ADDR__W 10
+#define SIO_HI_IF_STK_1_ADDR__M 0x3FF
+#define SIO_HI_IF_STK_1_ADDR__PRE 0x2
+
+#define SIO_HI_IF_STK_2__A 0x440012
+#define SIO_HI_IF_STK_2__W 10
+#define SIO_HI_IF_STK_2__M 0x3FF
+#define SIO_HI_IF_STK_2__PRE 0x2
+#define SIO_HI_IF_STK_2_ADDR__B 0
+#define SIO_HI_IF_STK_2_ADDR__W 10
+#define SIO_HI_IF_STK_2_ADDR__M 0x3FF
+#define SIO_HI_IF_STK_2_ADDR__PRE 0x2
+
+#define SIO_HI_IF_STK_3__A 0x440013
+#define SIO_HI_IF_STK_3__W 10
+#define SIO_HI_IF_STK_3__M 0x3FF
+#define SIO_HI_IF_STK_3__PRE 0x2
+
+#define SIO_HI_IF_STK_3_ADDR__B 0
+#define SIO_HI_IF_STK_3_ADDR__W 10
+#define SIO_HI_IF_STK_3_ADDR__M 0x3FF
+#define SIO_HI_IF_STK_3_ADDR__PRE 0x2
+
+#define SIO_HI_IF_BPT_IDX__A 0x44001F
+#define SIO_HI_IF_BPT_IDX__W 1
+#define SIO_HI_IF_BPT_IDX__M 0x1
+#define SIO_HI_IF_BPT_IDX__PRE 0x0
+
+#define SIO_HI_IF_BPT_IDX_ADDR__B 0
+#define SIO_HI_IF_BPT_IDX_ADDR__W 1
+#define SIO_HI_IF_BPT_IDX_ADDR__M 0x1
+#define SIO_HI_IF_BPT_IDX_ADDR__PRE 0x0
+
+#define SIO_HI_IF_BPT__A 0x440020
+#define SIO_HI_IF_BPT__W 10
+#define SIO_HI_IF_BPT__M 0x3FF
+#define SIO_HI_IF_BPT__PRE 0x2
+
+#define SIO_HI_IF_BPT_ADDR__B 0
+#define SIO_HI_IF_BPT_ADDR__W 10
+#define SIO_HI_IF_BPT_ADDR__M 0x3FF
+#define SIO_HI_IF_BPT_ADDR__PRE 0x2
+
+#define SIO_CC_COMM_EXEC__A 0x450000
+#define SIO_CC_COMM_EXEC__W 2
+#define SIO_CC_COMM_EXEC__M 0x3
+#define SIO_CC_COMM_EXEC__PRE 0x0
+#define SIO_CC_COMM_EXEC_STOP 0x0
+#define SIO_CC_COMM_EXEC_ACTIVE 0x1
+#define SIO_CC_COMM_EXEC_HOLD 0x2
+
+#define SIO_CC_PLL_MODE__A 0x450010
+#define SIO_CC_PLL_MODE__W 6
+#define SIO_CC_PLL_MODE__M 0x3F
+#define SIO_CC_PLL_MODE__PRE 0x0
+
+#define SIO_CC_PLL_MODE_FREF_SEL__B 0
+#define SIO_CC_PLL_MODE_FREF_SEL__W 2
+#define SIO_CC_PLL_MODE_FREF_SEL__M 0x3
+#define SIO_CC_PLL_MODE_FREF_SEL__PRE 0x0
+#define SIO_CC_PLL_MODE_FREF_SEL_OHW 0x0
+#define SIO_CC_PLL_MODE_FREF_SEL_27_00 0x1
+#define SIO_CC_PLL_MODE_FREF_SEL_20_25 0x2
+#define SIO_CC_PLL_MODE_FREF_SEL_4_00 0x3
+
+#define SIO_CC_PLL_MODE_LOCKSEL__B 2
+#define SIO_CC_PLL_MODE_LOCKSEL__W 2
+#define SIO_CC_PLL_MODE_LOCKSEL__M 0xC
+#define SIO_CC_PLL_MODE_LOCKSEL__PRE 0x0
+
+#define SIO_CC_PLL_MODE_BYPASS__B 4
+#define SIO_CC_PLL_MODE_BYPASS__W 2
+#define SIO_CC_PLL_MODE_BYPASS__M 0x30
+#define SIO_CC_PLL_MODE_BYPASS__PRE 0x0
+#define SIO_CC_PLL_MODE_BYPASS_OHW 0x0
+#define SIO_CC_PLL_MODE_BYPASS_OFF 0x10
+#define SIO_CC_PLL_MODE_BYPASS_ON 0x20
+
+#define SIO_CC_PLL_TEST__A 0x450011
+#define SIO_CC_PLL_TEST__W 8
+#define SIO_CC_PLL_TEST__M 0xFF
+#define SIO_CC_PLL_TEST__PRE 0x0
+
+#define SIO_CC_PLL_LOCK__A 0x450012
+#define SIO_CC_PLL_LOCK__W 1
+#define SIO_CC_PLL_LOCK__M 0x1
+#define SIO_CC_PLL_LOCK__PRE 0x0
+#define SIO_CC_CLK_MODE__A 0x450014
+#define SIO_CC_CLK_MODE__W 5
+#define SIO_CC_CLK_MODE__M 0x1F
+#define SIO_CC_CLK_MODE__PRE 0x0
+
+#define SIO_CC_CLK_MODE_DELAY__B 0
+#define SIO_CC_CLK_MODE_DELAY__W 4
+#define SIO_CC_CLK_MODE_DELAY__M 0xF
+#define SIO_CC_CLK_MODE_DELAY__PRE 0x0
+
+#define SIO_CC_CLK_MODE_INVERT__B 4
+#define SIO_CC_CLK_MODE_INVERT__W 1
+#define SIO_CC_CLK_MODE_INVERT__M 0x10
+#define SIO_CC_CLK_MODE_INVERT__PRE 0x0
+
+#define SIO_CC_PWD_MODE__A 0x450015
+#define SIO_CC_PWD_MODE__W 3
+#define SIO_CC_PWD_MODE__M 0x7
+#define SIO_CC_PWD_MODE__PRE 0x0
+
+#define SIO_CC_PWD_MODE_LEVEL__B 0
+#define SIO_CC_PWD_MODE_LEVEL__W 2
+#define SIO_CC_PWD_MODE_LEVEL__M 0x3
+#define SIO_CC_PWD_MODE_LEVEL__PRE 0x0
+#define SIO_CC_PWD_MODE_LEVEL_NONE 0x0
+#define SIO_CC_PWD_MODE_LEVEL_CLOCK 0x1
+#define SIO_CC_PWD_MODE_LEVEL_PLL 0x2
+#define SIO_CC_PWD_MODE_LEVEL_OSC 0x3
+
+#define SIO_CC_PWD_MODE_USE_LOCK__B 2
+#define SIO_CC_PWD_MODE_USE_LOCK__W 1
+#define SIO_CC_PWD_MODE_USE_LOCK__M 0x4
+#define SIO_CC_PWD_MODE_USE_LOCK__PRE 0x0
+
+#define SIO_CC_SOFT_RST__A 0x450016
+#define SIO_CC_SOFT_RST__W 2
+#define SIO_CC_SOFT_RST__M 0x3
+#define SIO_CC_SOFT_RST__PRE 0x0
+
+#define SIO_CC_SOFT_RST_SYS__B 0
+#define SIO_CC_SOFT_RST_SYS__W 1
+#define SIO_CC_SOFT_RST_SYS__M 0x1
+#define SIO_CC_SOFT_RST_SYS__PRE 0x0
+
+#define SIO_CC_SOFT_RST_OSC__B 1
+#define SIO_CC_SOFT_RST_OSC__W 1
+#define SIO_CC_SOFT_RST_OSC__M 0x2
+#define SIO_CC_SOFT_RST_OSC__PRE 0x0
+
+#define SIO_CC_UPDATE__A 0x450017
+#define SIO_CC_UPDATE__W 16
+#define SIO_CC_UPDATE__M 0xFFFF
+#define SIO_CC_UPDATE__PRE 0x0
+#define SIO_CC_UPDATE_KEY 0xFABA
+
+#define SIO_SA_COMM_EXEC__A 0x460000
+#define SIO_SA_COMM_EXEC__W 2
+#define SIO_SA_COMM_EXEC__M 0x3
+#define SIO_SA_COMM_EXEC__PRE 0x0
+#define SIO_SA_COMM_EXEC_STOP 0x0
+#define SIO_SA_COMM_EXEC_ACTIVE 0x1
+#define SIO_SA_COMM_EXEC_HOLD 0x2
+
+#define SIO_SA_COMM_INT_REQ__A 0x460003
+#define SIO_SA_COMM_INT_REQ__W 1
+#define SIO_SA_COMM_INT_REQ__M 0x1
+#define SIO_SA_COMM_INT_REQ__PRE 0x0
+#define SIO_SA_COMM_INT_STA__A 0x460005
+#define SIO_SA_COMM_INT_STA__W 4
+#define SIO_SA_COMM_INT_STA__M 0xF
+#define SIO_SA_COMM_INT_STA__PRE 0x0
+
+#define SIO_SA_COMM_INT_STA_TR_END_INT_STA__B 0
+#define SIO_SA_COMM_INT_STA_TR_END_INT_STA__W 1
+#define SIO_SA_COMM_INT_STA_TR_END_INT_STA__M 0x1
+#define SIO_SA_COMM_INT_STA_TR_END_INT_STA__PRE 0x0
+
+#define SIO_SA_COMM_INT_STA_TR_BUFF_EMPTY_INT__B 1
+#define SIO_SA_COMM_INT_STA_TR_BUFF_EMPTY_INT__W 1
+#define SIO_SA_COMM_INT_STA_TR_BUFF_EMPTY_INT__M 0x2
+#define SIO_SA_COMM_INT_STA_TR_BUFF_EMPTY_INT__PRE 0x0
+
+#define SIO_SA_COMM_INT_STA_RX_END_INT_STA__B 2
+#define SIO_SA_COMM_INT_STA_RX_END_INT_STA__W 1
+#define SIO_SA_COMM_INT_STA_RX_END_INT_STA__M 0x4
+#define SIO_SA_COMM_INT_STA_RX_END_INT_STA__PRE 0x0
+
+#define SIO_SA_COMM_INT_STA_RX_BUFF_FULL_INT__B 3
+#define SIO_SA_COMM_INT_STA_RX_BUFF_FULL_INT__W 1
+#define SIO_SA_COMM_INT_STA_RX_BUFF_FULL_INT__M 0x8
+#define SIO_SA_COMM_INT_STA_RX_BUFF_FULL_INT__PRE 0x0
+
+#define SIO_SA_COMM_INT_MSK__A 0x460006
+#define SIO_SA_COMM_INT_MSK__W 4
+#define SIO_SA_COMM_INT_MSK__M 0xF
+#define SIO_SA_COMM_INT_MSK__PRE 0x0
+
+#define SIO_SA_COMM_INT_MSK_TR_END_INT_MASK__B 0
+#define SIO_SA_COMM_INT_MSK_TR_END_INT_MASK__W 1
+#define SIO_SA_COMM_INT_MSK_TR_END_INT_MASK__M 0x1
+#define SIO_SA_COMM_INT_MSK_TR_END_INT_MASK__PRE 0x0
+
+#define SIO_SA_COMM_INT_MSK_TR_BUFF_EMPTY_MASK__B 1
+#define SIO_SA_COMM_INT_MSK_TR_BUFF_EMPTY_MASK__W 1
+#define SIO_SA_COMM_INT_MSK_TR_BUFF_EMPTY_MASK__M 0x2
+#define SIO_SA_COMM_INT_MSK_TR_BUFF_EMPTY_MASK__PRE 0x0
+
+#define SIO_SA_COMM_INT_MSK_RX_END_INT_MASK__B 2
+#define SIO_SA_COMM_INT_MSK_RX_END_INT_MASK__W 1
+#define SIO_SA_COMM_INT_MSK_RX_END_INT_MASK__M 0x4
+#define SIO_SA_COMM_INT_MSK_RX_END_INT_MASK__PRE 0x0
+
+#define SIO_SA_COMM_INT_MSK_RX_BUFF_FULL_MASK__B 3
+#define SIO_SA_COMM_INT_MSK_RX_BUFF_FULL_MASK__W 1
+#define SIO_SA_COMM_INT_MSK_RX_BUFF_FULL_MASK__M 0x8
+#define SIO_SA_COMM_INT_MSK_RX_BUFF_FULL_MASK__PRE 0x0
+
+#define SIO_SA_COMM_INT_STM__A 0x460007
+#define SIO_SA_COMM_INT_STM__W 4
+#define SIO_SA_COMM_INT_STM__M 0xF
+#define SIO_SA_COMM_INT_STM__PRE 0x0
+
+#define SIO_SA_COMM_INT_STM_TR_END_INT_MASK__B 0
+#define SIO_SA_COMM_INT_STM_TR_END_INT_MASK__W 1
+#define SIO_SA_COMM_INT_STM_TR_END_INT_MASK__M 0x1
+#define SIO_SA_COMM_INT_STM_TR_END_INT_MASK__PRE 0x0
+
+#define SIO_SA_COMM_INT_STM_TR_BUFF_EMPTY_MASK__B 1
+#define SIO_SA_COMM_INT_STM_TR_BUFF_EMPTY_MASK__W 1
+#define SIO_SA_COMM_INT_STM_TR_BUFF_EMPTY_MASK__M 0x2
+#define SIO_SA_COMM_INT_STM_TR_BUFF_EMPTY_MASK__PRE 0x0
+
+#define SIO_SA_COMM_INT_STM_RX_END_INT_MASK__B 2
+#define SIO_SA_COMM_INT_STM_RX_END_INT_MASK__W 1
+#define SIO_SA_COMM_INT_STM_RX_END_INT_MASK__M 0x4
+#define SIO_SA_COMM_INT_STM_RX_END_INT_MASK__PRE 0x0
+
+#define SIO_SA_COMM_INT_STM_RX_BUFF_FULL_MASK__B 3
+#define SIO_SA_COMM_INT_STM_RX_BUFF_FULL_MASK__W 1
+#define SIO_SA_COMM_INT_STM_RX_BUFF_FULL_MASK__M 0x8
+#define SIO_SA_COMM_INT_STM_RX_BUFF_FULL_MASK__PRE 0x0
+
+#define SIO_SA_PRESCALER__A 0x460010
+#define SIO_SA_PRESCALER__W 13
+#define SIO_SA_PRESCALER__M 0x1FFF
+#define SIO_SA_PRESCALER__PRE 0x18B7
+#define SIO_SA_TX_DATA0__A 0x460011
+#define SIO_SA_TX_DATA0__W 16
+#define SIO_SA_TX_DATA0__M 0xFFFF
+#define SIO_SA_TX_DATA0__PRE 0x0
+#define SIO_SA_TX_DATA1__A 0x460012
+#define SIO_SA_TX_DATA1__W 16
+#define SIO_SA_TX_DATA1__M 0xFFFF
+#define SIO_SA_TX_DATA1__PRE 0x0
+#define SIO_SA_TX_DATA2__A 0x460013
+#define SIO_SA_TX_DATA2__W 16
+#define SIO_SA_TX_DATA2__M 0xFFFF
+#define SIO_SA_TX_DATA2__PRE 0x0
+#define SIO_SA_TX_DATA3__A 0x460014
+#define SIO_SA_TX_DATA3__W 16
+#define SIO_SA_TX_DATA3__M 0xFFFF
+#define SIO_SA_TX_DATA3__PRE 0x0
+#define SIO_SA_TX_LENGTH__A 0x460015
+#define SIO_SA_TX_LENGTH__W 6
+#define SIO_SA_TX_LENGTH__M 0x3F
+#define SIO_SA_TX_LENGTH__PRE 0x0
+#define SIO_SA_TX_COMMAND__A 0x460016
+#define SIO_SA_TX_COMMAND__W 2
+#define SIO_SA_TX_COMMAND__M 0x3
+#define SIO_SA_TX_COMMAND__PRE 0x3
+
+#define SIO_SA_TX_COMMAND_TX_INVERT__B 0
+#define SIO_SA_TX_COMMAND_TX_INVERT__W 1
+#define SIO_SA_TX_COMMAND_TX_INVERT__M 0x1
+#define SIO_SA_TX_COMMAND_TX_INVERT__PRE 0x1
+
+#define SIO_SA_TX_COMMAND_TX_ENABLE__B 1
+#define SIO_SA_TX_COMMAND_TX_ENABLE__W 1
+#define SIO_SA_TX_COMMAND_TX_ENABLE__M 0x2
+#define SIO_SA_TX_COMMAND_TX_ENABLE__PRE 0x2
+
+#define SIO_SA_TX_STATUS__A 0x460017
+#define SIO_SA_TX_STATUS__W 2
+#define SIO_SA_TX_STATUS__M 0x3
+#define SIO_SA_TX_STATUS__PRE 0x0
+
+#define SIO_SA_TX_STATUS_BUSY__B 0
+#define SIO_SA_TX_STATUS_BUSY__W 1
+#define SIO_SA_TX_STATUS_BUSY__M 0x1
+#define SIO_SA_TX_STATUS_BUSY__PRE 0x0
+
+#define SIO_SA_TX_STATUS_BUFF_FULL__B 1
+#define SIO_SA_TX_STATUS_BUFF_FULL__W 1
+#define SIO_SA_TX_STATUS_BUFF_FULL__M 0x2
+#define SIO_SA_TX_STATUS_BUFF_FULL__PRE 0x0
+
+#define SIO_SA_RX_DATA0__A 0x460018
+#define SIO_SA_RX_DATA0__W 16
+#define SIO_SA_RX_DATA0__M 0xFFFF
+#define SIO_SA_RX_DATA0__PRE 0x0
+#define SIO_SA_RX_DATA1__A 0x460019
+#define SIO_SA_RX_DATA1__W 16
+#define SIO_SA_RX_DATA1__M 0xFFFF
+#define SIO_SA_RX_DATA1__PRE 0x0
+#define SIO_SA_RX_LENGTH__A 0x46001A
+#define SIO_SA_RX_LENGTH__W 6
+#define SIO_SA_RX_LENGTH__M 0x3F
+#define SIO_SA_RX_LENGTH__PRE 0x0
+#define SIO_SA_RX_COMMAND__A 0x46001B
+#define SIO_SA_RX_COMMAND__W 1
+#define SIO_SA_RX_COMMAND__M 0x1
+#define SIO_SA_RX_COMMAND__PRE 0x1
+
+#define SIO_SA_RX_COMMAND_RX_INVERT__B 0
+#define SIO_SA_RX_COMMAND_RX_INVERT__W 1
+#define SIO_SA_RX_COMMAND_RX_INVERT__M 0x1
+#define SIO_SA_RX_COMMAND_RX_INVERT__PRE 0x1
+
+#define SIO_SA_RX_STATUS__A 0x46001C
+#define SIO_SA_RX_STATUS__W 2
+#define SIO_SA_RX_STATUS__M 0x3
+#define SIO_SA_RX_STATUS__PRE 0x0
+
+#define SIO_SA_RX_STATUS_BUSY__B 0
+#define SIO_SA_RX_STATUS_BUSY__W 1
+#define SIO_SA_RX_STATUS_BUSY__M 0x1
+#define SIO_SA_RX_STATUS_BUSY__PRE 0x0
+
+#define SIO_SA_RX_STATUS_BUFF_FULL__B 1
+#define SIO_SA_RX_STATUS_BUFF_FULL__W 1
+#define SIO_SA_RX_STATUS_BUFF_FULL__M 0x2
+#define SIO_SA_RX_STATUS_BUFF_FULL__PRE 0x0
+
+#define SIO_PDR_COMM_EXEC__A 0x7F0000
+#define SIO_PDR_COMM_EXEC__W 2
+#define SIO_PDR_COMM_EXEC__M 0x3
+#define SIO_PDR_COMM_EXEC__PRE 0x0
+#define SIO_PDR_COMM_EXEC_STOP 0x0
+#define SIO_PDR_COMM_EXEC_ACTIVE 0x1
+#define SIO_PDR_COMM_EXEC_HOLD 0x2
+
+#define SIO_PDR_MON_CFG__A 0x7F0010
+#define SIO_PDR_MON_CFG__W 2
+#define SIO_PDR_MON_CFG__M 0x3
+#define SIO_PDR_MON_CFG__PRE 0x0
+
+#define SIO_PDR_MON_CFG_OSEL__B 0
+#define SIO_PDR_MON_CFG_OSEL__W 1
+#define SIO_PDR_MON_CFG_OSEL__M 0x1
+#define SIO_PDR_MON_CFG_OSEL__PRE 0x0
+
+#define SIO_PDR_MON_CFG_IACT__B 1
+#define SIO_PDR_MON_CFG_IACT__W 1
+#define SIO_PDR_MON_CFG_IACT__M 0x2
+#define SIO_PDR_MON_CFG_IACT__PRE 0x0
+
+#define SIO_PDR_FDB_CFG__A 0x7F0011
+#define SIO_PDR_FDB_CFG__W 2
+#define SIO_PDR_FDB_CFG__M 0x3
+#define SIO_PDR_FDB_CFG__PRE 0x0
+#define SIO_PDR_FDB_CFG_SEL__B 0
+#define SIO_PDR_FDB_CFG_SEL__W 2
+#define SIO_PDR_FDB_CFG_SEL__M 0x3
+#define SIO_PDR_FDB_CFG_SEL__PRE 0x0
+
+#define SIO_PDR_SMA_RX_SEL__A 0x7F0012
+#define SIO_PDR_SMA_RX_SEL__W 4
+#define SIO_PDR_SMA_RX_SEL__M 0xF
+#define SIO_PDR_SMA_RX_SEL__PRE 0x0
+#define SIO_PDR_SMA_RX_SEL_SEL__B 0
+#define SIO_PDR_SMA_RX_SEL_SEL__W 4
+#define SIO_PDR_SMA_RX_SEL_SEL__M 0xF
+#define SIO_PDR_SMA_RX_SEL_SEL__PRE 0x0
+
+#define SIO_PDR_SMA_TX_SILENT__A 0x7F0013
+#define SIO_PDR_SMA_TX_SILENT__W 1
+#define SIO_PDR_SMA_TX_SILENT__M 0x1
+#define SIO_PDR_SMA_TX_SILENT__PRE 0x0
+#define SIO_PDR_UIO_IN_LO__A 0x7F0014
+#define SIO_PDR_UIO_IN_LO__W 16
+#define SIO_PDR_UIO_IN_LO__M 0xFFFF
+#define SIO_PDR_UIO_IN_LO__PRE 0x0
+#define SIO_PDR_UIO_IN_LO_DATA__B 0
+#define SIO_PDR_UIO_IN_LO_DATA__W 16
+#define SIO_PDR_UIO_IN_LO_DATA__M 0xFFFF
+#define SIO_PDR_UIO_IN_LO_DATA__PRE 0x0
+
+#define SIO_PDR_UIO_IN_HI__A 0x7F0015
+#define SIO_PDR_UIO_IN_HI__W 14
+#define SIO_PDR_UIO_IN_HI__M 0x3FFF
+#define SIO_PDR_UIO_IN_HI__PRE 0x0
+#define SIO_PDR_UIO_IN_HI_DATA__B 0
+#define SIO_PDR_UIO_IN_HI_DATA__W 14
+#define SIO_PDR_UIO_IN_HI_DATA__M 0x3FFF
+#define SIO_PDR_UIO_IN_HI_DATA__PRE 0x0
+
+#define SIO_PDR_UIO_OUT_LO__A 0x7F0016
+#define SIO_PDR_UIO_OUT_LO__W 16
+#define SIO_PDR_UIO_OUT_LO__M 0xFFFF
+#define SIO_PDR_UIO_OUT_LO__PRE 0x0
+#define SIO_PDR_UIO_OUT_LO_DATA__B 0
+#define SIO_PDR_UIO_OUT_LO_DATA__W 16
+#define SIO_PDR_UIO_OUT_LO_DATA__M 0xFFFF
+#define SIO_PDR_UIO_OUT_LO_DATA__PRE 0x0
+
+#define SIO_PDR_UIO_OUT_HI__A 0x7F0017
+#define SIO_PDR_UIO_OUT_HI__W 14
+#define SIO_PDR_UIO_OUT_HI__M 0x3FFF
+#define SIO_PDR_UIO_OUT_HI__PRE 0x0
+#define SIO_PDR_UIO_OUT_HI_DATA__B 0
+#define SIO_PDR_UIO_OUT_HI_DATA__W 14
+#define SIO_PDR_UIO_OUT_HI_DATA__M 0x3FFF
+#define SIO_PDR_UIO_OUT_HI_DATA__PRE 0x0
+
+#define SIO_PDR_PWM1_MODE__A 0x7F0018
+#define SIO_PDR_PWM1_MODE__W 2
+#define SIO_PDR_PWM1_MODE__M 0x3
+#define SIO_PDR_PWM1_MODE__PRE 0x0
+#define SIO_PDR_PWM1_PRESCALE__A 0x7F0019
+#define SIO_PDR_PWM1_PRESCALE__W 6
+#define SIO_PDR_PWM1_PRESCALE__M 0x3F
+#define SIO_PDR_PWM1_PRESCALE__PRE 0x0
+#define SIO_PDR_PWM1_VALUE__A 0x7F001A
+#define SIO_PDR_PWM1_VALUE__W 11
+#define SIO_PDR_PWM1_VALUE__M 0x7FF
+#define SIO_PDR_PWM1_VALUE__PRE 0x0
+#define SIO_PDR_PWM2_MODE__A 0x7F001C
+#define SIO_PDR_PWM2_MODE__W 2
+#define SIO_PDR_PWM2_MODE__M 0x3
+#define SIO_PDR_PWM2_MODE__PRE 0x0
+#define SIO_PDR_PWM2_PRESCALE__A 0x7F001D
+#define SIO_PDR_PWM2_PRESCALE__W 6
+#define SIO_PDR_PWM2_PRESCALE__M 0x3F
+#define SIO_PDR_PWM2_PRESCALE__PRE 0x0
+#define SIO_PDR_PWM2_VALUE__A 0x7F001E
+#define SIO_PDR_PWM2_VALUE__W 11
+#define SIO_PDR_PWM2_VALUE__M 0x7FF
+#define SIO_PDR_PWM2_VALUE__PRE 0x0
+#define SIO_PDR_OHW_CFG__A 0x7F001F
+#define SIO_PDR_OHW_CFG__W 7
+#define SIO_PDR_OHW_CFG__M 0x7F
+#define SIO_PDR_OHW_CFG__PRE 0x0
+
+#define SIO_PDR_OHW_CFG_FREF_SEL__B 0
+#define SIO_PDR_OHW_CFG_FREF_SEL__W 2
+#define SIO_PDR_OHW_CFG_FREF_SEL__M 0x3
+#define SIO_PDR_OHW_CFG_FREF_SEL__PRE 0x0
+
+#define SIO_PDR_OHW_CFG_BYPASS__B 2
+#define SIO_PDR_OHW_CFG_BYPASS__W 1
+#define SIO_PDR_OHW_CFG_BYPASS__M 0x4
+#define SIO_PDR_OHW_CFG_BYPASS__PRE 0x0
+
+#define SIO_PDR_OHW_CFG_ASEL__B 3
+#define SIO_PDR_OHW_CFG_ASEL__W 3
+#define SIO_PDR_OHW_CFG_ASEL__M 0x38
+#define SIO_PDR_OHW_CFG_ASEL__PRE 0x0
+
+#define SIO_PDR_OHW_CFG_SPEED__B 6
+#define SIO_PDR_OHW_CFG_SPEED__W 1
+#define SIO_PDR_OHW_CFG_SPEED__M 0x40
+#define SIO_PDR_OHW_CFG_SPEED__PRE 0x0
+
+#define SIO_PDR_I2S_WS_CFG__A 0x7F0020
+#define SIO_PDR_I2S_WS_CFG__W 9
+#define SIO_PDR_I2S_WS_CFG__M 0x1FF
+#define SIO_PDR_I2S_WS_CFG__PRE 0x10
+#define SIO_PDR_I2S_WS_CFG_MODE__B 0
+#define SIO_PDR_I2S_WS_CFG_MODE__W 3
+#define SIO_PDR_I2S_WS_CFG_MODE__M 0x7
+#define SIO_PDR_I2S_WS_CFG_MODE__PRE 0x0
+#define SIO_PDR_I2S_WS_CFG_DRIVE__B 3
+#define SIO_PDR_I2S_WS_CFG_DRIVE__W 3
+#define SIO_PDR_I2S_WS_CFG_DRIVE__M 0x38
+#define SIO_PDR_I2S_WS_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_I2S_WS_CFG_KEEP__B 6
+#define SIO_PDR_I2S_WS_CFG_KEEP__W 2
+#define SIO_PDR_I2S_WS_CFG_KEEP__M 0xC0
+#define SIO_PDR_I2S_WS_CFG_KEEP__PRE 0x0
+#define SIO_PDR_I2S_WS_CFG_UIO__B 8
+#define SIO_PDR_I2S_WS_CFG_UIO__W 1
+#define SIO_PDR_I2S_WS_CFG_UIO__M 0x100
+#define SIO_PDR_I2S_WS_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_GPIO_CFG__A 0x7F0021
+#define SIO_PDR_GPIO_CFG__W 9
+#define SIO_PDR_GPIO_CFG__M 0x1FF
+#define SIO_PDR_GPIO_CFG__PRE 0x10
+#define SIO_PDR_GPIO_CFG_MODE__B 0
+#define SIO_PDR_GPIO_CFG_MODE__W 3
+#define SIO_PDR_GPIO_CFG_MODE__M 0x7
+#define SIO_PDR_GPIO_CFG_MODE__PRE 0x0
+#define SIO_PDR_GPIO_CFG_DRIVE__B 3
+#define SIO_PDR_GPIO_CFG_DRIVE__W 3
+#define SIO_PDR_GPIO_CFG_DRIVE__M 0x38
+#define SIO_PDR_GPIO_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_GPIO_CFG_KEEP__B 6
+#define SIO_PDR_GPIO_CFG_KEEP__W 2
+#define SIO_PDR_GPIO_CFG_KEEP__M 0xC0
+#define SIO_PDR_GPIO_CFG_KEEP__PRE 0x0
+#define SIO_PDR_GPIO_CFG_UIO__B 8
+#define SIO_PDR_GPIO_CFG_UIO__W 1
+#define SIO_PDR_GPIO_CFG_UIO__M 0x100
+#define SIO_PDR_GPIO_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_IRQN_CFG__A 0x7F0022
+#define SIO_PDR_IRQN_CFG__W 9
+#define SIO_PDR_IRQN_CFG__M 0x1FF
+#define SIO_PDR_IRQN_CFG__PRE 0x10
+#define SIO_PDR_IRQN_CFG_MODE__B 0
+#define SIO_PDR_IRQN_CFG_MODE__W 3
+#define SIO_PDR_IRQN_CFG_MODE__M 0x7
+#define SIO_PDR_IRQN_CFG_MODE__PRE 0x0
+#define SIO_PDR_IRQN_CFG_DRIVE__B 3
+#define SIO_PDR_IRQN_CFG_DRIVE__W 3
+#define SIO_PDR_IRQN_CFG_DRIVE__M 0x38
+#define SIO_PDR_IRQN_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_IRQN_CFG_KEEP__B 6
+#define SIO_PDR_IRQN_CFG_KEEP__W 2
+#define SIO_PDR_IRQN_CFG_KEEP__M 0xC0
+#define SIO_PDR_IRQN_CFG_KEEP__PRE 0x0
+#define SIO_PDR_IRQN_CFG_UIO__B 8
+#define SIO_PDR_IRQN_CFG_UIO__W 1
+#define SIO_PDR_IRQN_CFG_UIO__M 0x100
+#define SIO_PDR_IRQN_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_OOB_CRX_CFG__A 0x7F0023
+#define SIO_PDR_OOB_CRX_CFG__W 9
+#define SIO_PDR_OOB_CRX_CFG__M 0x1FF
+#define SIO_PDR_OOB_CRX_CFG__PRE 0x10
+#define SIO_PDR_OOB_CRX_CFG_MODE__B 0
+#define SIO_PDR_OOB_CRX_CFG_MODE__W 3
+#define SIO_PDR_OOB_CRX_CFG_MODE__M 0x7
+#define SIO_PDR_OOB_CRX_CFG_MODE__PRE 0x0
+#define SIO_PDR_OOB_CRX_CFG_DRIVE__B 3
+#define SIO_PDR_OOB_CRX_CFG_DRIVE__W 3
+#define SIO_PDR_OOB_CRX_CFG_DRIVE__M 0x38
+#define SIO_PDR_OOB_CRX_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_OOB_CRX_CFG_KEEP__B 6
+#define SIO_PDR_OOB_CRX_CFG_KEEP__W 2
+#define SIO_PDR_OOB_CRX_CFG_KEEP__M 0xC0
+#define SIO_PDR_OOB_CRX_CFG_KEEP__PRE 0x0
+#define SIO_PDR_OOB_CRX_CFG_UIO__B 8
+#define SIO_PDR_OOB_CRX_CFG_UIO__W 1
+#define SIO_PDR_OOB_CRX_CFG_UIO__M 0x100
+#define SIO_PDR_OOB_CRX_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_OOB_DRX_CFG__A 0x7F0024
+#define SIO_PDR_OOB_DRX_CFG__W 9
+#define SIO_PDR_OOB_DRX_CFG__M 0x1FF
+#define SIO_PDR_OOB_DRX_CFG__PRE 0x10
+#define SIO_PDR_OOB_DRX_CFG_MODE__B 0
+#define SIO_PDR_OOB_DRX_CFG_MODE__W 3
+#define SIO_PDR_OOB_DRX_CFG_MODE__M 0x7
+#define SIO_PDR_OOB_DRX_CFG_MODE__PRE 0x0
+#define SIO_PDR_OOB_DRX_CFG_DRIVE__B 3
+#define SIO_PDR_OOB_DRX_CFG_DRIVE__W 3
+#define SIO_PDR_OOB_DRX_CFG_DRIVE__M 0x38
+#define SIO_PDR_OOB_DRX_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_OOB_DRX_CFG_KEEP__B 6
+#define SIO_PDR_OOB_DRX_CFG_KEEP__W 2
+#define SIO_PDR_OOB_DRX_CFG_KEEP__M 0xC0
+#define SIO_PDR_OOB_DRX_CFG_KEEP__PRE 0x0
+#define SIO_PDR_OOB_DRX_CFG_UIO__B 8
+#define SIO_PDR_OOB_DRX_CFG_UIO__W 1
+#define SIO_PDR_OOB_DRX_CFG_UIO__M 0x100
+#define SIO_PDR_OOB_DRX_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_MSTRT_CFG__A 0x7F0025
+#define SIO_PDR_MSTRT_CFG__W 9
+#define SIO_PDR_MSTRT_CFG__M 0x1FF
+#define SIO_PDR_MSTRT_CFG__PRE 0x50
+#define SIO_PDR_MSTRT_CFG_MODE__B 0
+#define SIO_PDR_MSTRT_CFG_MODE__W 3
+#define SIO_PDR_MSTRT_CFG_MODE__M 0x7
+#define SIO_PDR_MSTRT_CFG_MODE__PRE 0x0
+#define SIO_PDR_MSTRT_CFG_DRIVE__B 3
+#define SIO_PDR_MSTRT_CFG_DRIVE__W 3
+#define SIO_PDR_MSTRT_CFG_DRIVE__M 0x38
+#define SIO_PDR_MSTRT_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_MSTRT_CFG_KEEP__B 6
+#define SIO_PDR_MSTRT_CFG_KEEP__W 2
+#define SIO_PDR_MSTRT_CFG_KEEP__M 0xC0
+#define SIO_PDR_MSTRT_CFG_KEEP__PRE 0x40
+#define SIO_PDR_MSTRT_CFG_UIO__B 8
+#define SIO_PDR_MSTRT_CFG_UIO__W 1
+#define SIO_PDR_MSTRT_CFG_UIO__M 0x100
+#define SIO_PDR_MSTRT_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_MERR_CFG__A 0x7F0026
+#define SIO_PDR_MERR_CFG__W 9
+#define SIO_PDR_MERR_CFG__M 0x1FF
+#define SIO_PDR_MERR_CFG__PRE 0x50
+#define SIO_PDR_MERR_CFG_MODE__B 0
+#define SIO_PDR_MERR_CFG_MODE__W 3
+#define SIO_PDR_MERR_CFG_MODE__M 0x7
+#define SIO_PDR_MERR_CFG_MODE__PRE 0x0
+#define SIO_PDR_MERR_CFG_DRIVE__B 3
+#define SIO_PDR_MERR_CFG_DRIVE__W 3
+#define SIO_PDR_MERR_CFG_DRIVE__M 0x38
+#define SIO_PDR_MERR_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_MERR_CFG_KEEP__B 6
+#define SIO_PDR_MERR_CFG_KEEP__W 2
+#define SIO_PDR_MERR_CFG_KEEP__M 0xC0
+#define SIO_PDR_MERR_CFG_KEEP__PRE 0x40
+#define SIO_PDR_MERR_CFG_UIO__B 8
+#define SIO_PDR_MERR_CFG_UIO__W 1
+#define SIO_PDR_MERR_CFG_UIO__M 0x100
+#define SIO_PDR_MERR_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_MCLK_CFG__A 0x7F0028
+#define SIO_PDR_MCLK_CFG__W 9
+#define SIO_PDR_MCLK_CFG__M 0x1FF
+#define SIO_PDR_MCLK_CFG__PRE 0x50
+#define SIO_PDR_MCLK_CFG_MODE__B 0
+#define SIO_PDR_MCLK_CFG_MODE__W 3
+#define SIO_PDR_MCLK_CFG_MODE__M 0x7
+#define SIO_PDR_MCLK_CFG_MODE__PRE 0x0
+#define SIO_PDR_MCLK_CFG_DRIVE__B 3
+#define SIO_PDR_MCLK_CFG_DRIVE__W 3
+#define SIO_PDR_MCLK_CFG_DRIVE__M 0x38
+#define SIO_PDR_MCLK_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_MCLK_CFG_KEEP__B 6
+#define SIO_PDR_MCLK_CFG_KEEP__W 2
+#define SIO_PDR_MCLK_CFG_KEEP__M 0xC0
+#define SIO_PDR_MCLK_CFG_KEEP__PRE 0x40
+#define SIO_PDR_MCLK_CFG_UIO__B 8
+#define SIO_PDR_MCLK_CFG_UIO__W 1
+#define SIO_PDR_MCLK_CFG_UIO__M 0x100
+#define SIO_PDR_MCLK_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_MVAL_CFG__A 0x7F0029
+#define SIO_PDR_MVAL_CFG__W 9
+#define SIO_PDR_MVAL_CFG__M 0x1FF
+#define SIO_PDR_MVAL_CFG__PRE 0x50
+#define SIO_PDR_MVAL_CFG_MODE__B 0
+#define SIO_PDR_MVAL_CFG_MODE__W 3
+#define SIO_PDR_MVAL_CFG_MODE__M 0x7
+#define SIO_PDR_MVAL_CFG_MODE__PRE 0x0
+#define SIO_PDR_MVAL_CFG_DRIVE__B 3
+#define SIO_PDR_MVAL_CFG_DRIVE__W 3
+#define SIO_PDR_MVAL_CFG_DRIVE__M 0x38
+#define SIO_PDR_MVAL_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_MVAL_CFG_KEEP__B 6
+#define SIO_PDR_MVAL_CFG_KEEP__W 2
+#define SIO_PDR_MVAL_CFG_KEEP__M 0xC0
+#define SIO_PDR_MVAL_CFG_KEEP__PRE 0x40
+#define SIO_PDR_MVAL_CFG_UIO__B 8
+#define SIO_PDR_MVAL_CFG_UIO__W 1
+#define SIO_PDR_MVAL_CFG_UIO__M 0x100
+#define SIO_PDR_MVAL_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_MD0_CFG__A 0x7F002A
+#define SIO_PDR_MD0_CFG__W 9
+#define SIO_PDR_MD0_CFG__M 0x1FF
+#define SIO_PDR_MD0_CFG__PRE 0x50
+#define SIO_PDR_MD0_CFG_MODE__B 0
+#define SIO_PDR_MD0_CFG_MODE__W 3
+#define SIO_PDR_MD0_CFG_MODE__M 0x7
+#define SIO_PDR_MD0_CFG_MODE__PRE 0x0
+#define SIO_PDR_MD0_CFG_DRIVE__B 3
+#define SIO_PDR_MD0_CFG_DRIVE__W 3
+#define SIO_PDR_MD0_CFG_DRIVE__M 0x38
+#define SIO_PDR_MD0_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_MD0_CFG_KEEP__B 6
+#define SIO_PDR_MD0_CFG_KEEP__W 2
+#define SIO_PDR_MD0_CFG_KEEP__M 0xC0
+#define SIO_PDR_MD0_CFG_KEEP__PRE 0x40
+#define SIO_PDR_MD0_CFG_UIO__B 8
+#define SIO_PDR_MD0_CFG_UIO__W 1
+#define SIO_PDR_MD0_CFG_UIO__M 0x100
+#define SIO_PDR_MD0_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_MD1_CFG__A 0x7F002B
+#define SIO_PDR_MD1_CFG__W 9
+#define SIO_PDR_MD1_CFG__M 0x1FF
+#define SIO_PDR_MD1_CFG__PRE 0x50
+#define SIO_PDR_MD1_CFG_MODE__B 0
+#define SIO_PDR_MD1_CFG_MODE__W 3
+#define SIO_PDR_MD1_CFG_MODE__M 0x7
+#define SIO_PDR_MD1_CFG_MODE__PRE 0x0
+#define SIO_PDR_MD1_CFG_DRIVE__B 3
+#define SIO_PDR_MD1_CFG_DRIVE__W 3
+#define SIO_PDR_MD1_CFG_DRIVE__M 0x38
+#define SIO_PDR_MD1_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_MD1_CFG_KEEP__B 6
+#define SIO_PDR_MD1_CFG_KEEP__W 2
+#define SIO_PDR_MD1_CFG_KEEP__M 0xC0
+#define SIO_PDR_MD1_CFG_KEEP__PRE 0x40
+#define SIO_PDR_MD1_CFG_UIO__B 8
+#define SIO_PDR_MD1_CFG_UIO__W 1
+#define SIO_PDR_MD1_CFG_UIO__M 0x100
+#define SIO_PDR_MD1_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_MD2_CFG__A 0x7F002C
+#define SIO_PDR_MD2_CFG__W 9
+#define SIO_PDR_MD2_CFG__M 0x1FF
+#define SIO_PDR_MD2_CFG__PRE 0x50
+#define SIO_PDR_MD2_CFG_MODE__B 0
+#define SIO_PDR_MD2_CFG_MODE__W 3
+#define SIO_PDR_MD2_CFG_MODE__M 0x7
+#define SIO_PDR_MD2_CFG_MODE__PRE 0x0
+#define SIO_PDR_MD2_CFG_DRIVE__B 3
+#define SIO_PDR_MD2_CFG_DRIVE__W 3
+#define SIO_PDR_MD2_CFG_DRIVE__M 0x38
+#define SIO_PDR_MD2_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_MD2_CFG_KEEP__B 6
+#define SIO_PDR_MD2_CFG_KEEP__W 2
+#define SIO_PDR_MD2_CFG_KEEP__M 0xC0
+#define SIO_PDR_MD2_CFG_KEEP__PRE 0x40
+#define SIO_PDR_MD2_CFG_UIO__B 8
+#define SIO_PDR_MD2_CFG_UIO__W 1
+#define SIO_PDR_MD2_CFG_UIO__M 0x100
+#define SIO_PDR_MD2_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_MD3_CFG__A 0x7F002D
+#define SIO_PDR_MD3_CFG__W 9
+#define SIO_PDR_MD3_CFG__M 0x1FF
+#define SIO_PDR_MD3_CFG__PRE 0x50
+#define SIO_PDR_MD3_CFG_MODE__B 0
+#define SIO_PDR_MD3_CFG_MODE__W 3
+#define SIO_PDR_MD3_CFG_MODE__M 0x7
+#define SIO_PDR_MD3_CFG_MODE__PRE 0x0
+#define SIO_PDR_MD3_CFG_DRIVE__B 3
+#define SIO_PDR_MD3_CFG_DRIVE__W 3
+#define SIO_PDR_MD3_CFG_DRIVE__M 0x38
+#define SIO_PDR_MD3_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_MD3_CFG_KEEP__B 6
+#define SIO_PDR_MD3_CFG_KEEP__W 2
+#define SIO_PDR_MD3_CFG_KEEP__M 0xC0
+#define SIO_PDR_MD3_CFG_KEEP__PRE 0x40
+#define SIO_PDR_MD3_CFG_UIO__B 8
+#define SIO_PDR_MD3_CFG_UIO__W 1
+#define SIO_PDR_MD3_CFG_UIO__M 0x100
+#define SIO_PDR_MD3_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_MD4_CFG__A 0x7F002F
+#define SIO_PDR_MD4_CFG__W 9
+#define SIO_PDR_MD4_CFG__M 0x1FF
+#define SIO_PDR_MD4_CFG__PRE 0x50
+#define SIO_PDR_MD4_CFG_MODE__B 0
+#define SIO_PDR_MD4_CFG_MODE__W 3
+#define SIO_PDR_MD4_CFG_MODE__M 0x7
+#define SIO_PDR_MD4_CFG_MODE__PRE 0x0
+#define SIO_PDR_MD4_CFG_DRIVE__B 3
+#define SIO_PDR_MD4_CFG_DRIVE__W 3
+#define SIO_PDR_MD4_CFG_DRIVE__M 0x38
+#define SIO_PDR_MD4_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_MD4_CFG_KEEP__B 6
+#define SIO_PDR_MD4_CFG_KEEP__W 2
+#define SIO_PDR_MD4_CFG_KEEP__M 0xC0
+#define SIO_PDR_MD4_CFG_KEEP__PRE 0x40
+#define SIO_PDR_MD4_CFG_UIO__B 8
+#define SIO_PDR_MD4_CFG_UIO__W 1
+#define SIO_PDR_MD4_CFG_UIO__M 0x100
+#define SIO_PDR_MD4_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_MD5_CFG__A 0x7F0030
+#define SIO_PDR_MD5_CFG__W 9
+#define SIO_PDR_MD5_CFG__M 0x1FF
+#define SIO_PDR_MD5_CFG__PRE 0x50
+#define SIO_PDR_MD5_CFG_MODE__B 0
+#define SIO_PDR_MD5_CFG_MODE__W 3
+#define SIO_PDR_MD5_CFG_MODE__M 0x7
+#define SIO_PDR_MD5_CFG_MODE__PRE 0x0
+#define SIO_PDR_MD5_CFG_DRIVE__B 3
+#define SIO_PDR_MD5_CFG_DRIVE__W 3
+#define SIO_PDR_MD5_CFG_DRIVE__M 0x38
+#define SIO_PDR_MD5_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_MD5_CFG_KEEP__B 6
+#define SIO_PDR_MD5_CFG_KEEP__W 2
+#define SIO_PDR_MD5_CFG_KEEP__M 0xC0
+#define SIO_PDR_MD5_CFG_KEEP__PRE 0x40
+#define SIO_PDR_MD5_CFG_UIO__B 8
+#define SIO_PDR_MD5_CFG_UIO__W 1
+#define SIO_PDR_MD5_CFG_UIO__M 0x100
+#define SIO_PDR_MD5_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_MD6_CFG__A 0x7F0031
+#define SIO_PDR_MD6_CFG__W 9
+#define SIO_PDR_MD6_CFG__M 0x1FF
+#define SIO_PDR_MD6_CFG__PRE 0x50
+#define SIO_PDR_MD6_CFG_MODE__B 0
+#define SIO_PDR_MD6_CFG_MODE__W 3
+#define SIO_PDR_MD6_CFG_MODE__M 0x7
+#define SIO_PDR_MD6_CFG_MODE__PRE 0x0
+#define SIO_PDR_MD6_CFG_DRIVE__B 3
+#define SIO_PDR_MD6_CFG_DRIVE__W 3
+#define SIO_PDR_MD6_CFG_DRIVE__M 0x38
+#define SIO_PDR_MD6_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_MD6_CFG_KEEP__B 6
+#define SIO_PDR_MD6_CFG_KEEP__W 2
+#define SIO_PDR_MD6_CFG_KEEP__M 0xC0
+#define SIO_PDR_MD6_CFG_KEEP__PRE 0x40
+#define SIO_PDR_MD6_CFG_UIO__B 8
+#define SIO_PDR_MD6_CFG_UIO__W 1
+#define SIO_PDR_MD6_CFG_UIO__M 0x100
+#define SIO_PDR_MD6_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_MD7_CFG__A 0x7F0032
+#define SIO_PDR_MD7_CFG__W 9
+#define SIO_PDR_MD7_CFG__M 0x1FF
+#define SIO_PDR_MD7_CFG__PRE 0x50
+#define SIO_PDR_MD7_CFG_MODE__B 0
+#define SIO_PDR_MD7_CFG_MODE__W 3
+#define SIO_PDR_MD7_CFG_MODE__M 0x7
+#define SIO_PDR_MD7_CFG_MODE__PRE 0x0
+#define SIO_PDR_MD7_CFG_DRIVE__B 3
+#define SIO_PDR_MD7_CFG_DRIVE__W 3
+#define SIO_PDR_MD7_CFG_DRIVE__M 0x38
+#define SIO_PDR_MD7_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_MD7_CFG_KEEP__B 6
+#define SIO_PDR_MD7_CFG_KEEP__W 2
+#define SIO_PDR_MD7_CFG_KEEP__M 0xC0
+#define SIO_PDR_MD7_CFG_KEEP__PRE 0x40
+#define SIO_PDR_MD7_CFG_UIO__B 8
+#define SIO_PDR_MD7_CFG_UIO__W 1
+#define SIO_PDR_MD7_CFG_UIO__M 0x100
+#define SIO_PDR_MD7_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_I2C_SCL1_CFG__A 0x7F0033
+#define SIO_PDR_I2C_SCL1_CFG__W 9
+#define SIO_PDR_I2C_SCL1_CFG__M 0x1FF
+#define SIO_PDR_I2C_SCL1_CFG__PRE 0x11
+#define SIO_PDR_I2C_SCL1_CFG_MODE__B 0
+#define SIO_PDR_I2C_SCL1_CFG_MODE__W 3
+#define SIO_PDR_I2C_SCL1_CFG_MODE__M 0x7
+#define SIO_PDR_I2C_SCL1_CFG_MODE__PRE 0x1
+#define SIO_PDR_I2C_SCL1_CFG_DRIVE__B 3
+#define SIO_PDR_I2C_SCL1_CFG_DRIVE__W 3
+#define SIO_PDR_I2C_SCL1_CFG_DRIVE__M 0x38
+#define SIO_PDR_I2C_SCL1_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_I2C_SCL1_CFG_KEEP__B 6
+#define SIO_PDR_I2C_SCL1_CFG_KEEP__W 2
+#define SIO_PDR_I2C_SCL1_CFG_KEEP__M 0xC0
+#define SIO_PDR_I2C_SCL1_CFG_KEEP__PRE 0x0
+#define SIO_PDR_I2C_SCL1_CFG_UIO__B 8
+#define SIO_PDR_I2C_SCL1_CFG_UIO__W 1
+#define SIO_PDR_I2C_SCL1_CFG_UIO__M 0x100
+#define SIO_PDR_I2C_SCL1_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_I2C_SDA1_CFG__A 0x7F0034
+#define SIO_PDR_I2C_SDA1_CFG__W 9
+#define SIO_PDR_I2C_SDA1_CFG__M 0x1FF
+#define SIO_PDR_I2C_SDA1_CFG__PRE 0x11
+#define SIO_PDR_I2C_SDA1_CFG_MODE__B 0
+#define SIO_PDR_I2C_SDA1_CFG_MODE__W 3
+#define SIO_PDR_I2C_SDA1_CFG_MODE__M 0x7
+#define SIO_PDR_I2C_SDA1_CFG_MODE__PRE 0x1
+#define SIO_PDR_I2C_SDA1_CFG_DRIVE__B 3
+#define SIO_PDR_I2C_SDA1_CFG_DRIVE__W 3
+#define SIO_PDR_I2C_SDA1_CFG_DRIVE__M 0x38
+#define SIO_PDR_I2C_SDA1_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_I2C_SDA1_CFG_KEEP__B 6
+#define SIO_PDR_I2C_SDA1_CFG_KEEP__W 2
+#define SIO_PDR_I2C_SDA1_CFG_KEEP__M 0xC0
+#define SIO_PDR_I2C_SDA1_CFG_KEEP__PRE 0x0
+#define SIO_PDR_I2C_SDA1_CFG_UIO__B 8
+#define SIO_PDR_I2C_SDA1_CFG_UIO__W 1
+#define SIO_PDR_I2C_SDA1_CFG_UIO__M 0x100
+#define SIO_PDR_I2C_SDA1_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_VSYNC_CFG__A 0x7F0036
+#define SIO_PDR_VSYNC_CFG__W 9
+#define SIO_PDR_VSYNC_CFG__M 0x1FF
+#define SIO_PDR_VSYNC_CFG__PRE 0x10
+#define SIO_PDR_VSYNC_CFG_MODE__B 0
+#define SIO_PDR_VSYNC_CFG_MODE__W 3
+#define SIO_PDR_VSYNC_CFG_MODE__M 0x7
+#define SIO_PDR_VSYNC_CFG_MODE__PRE 0x0
+#define SIO_PDR_VSYNC_CFG_DRIVE__B 3
+#define SIO_PDR_VSYNC_CFG_DRIVE__W 3
+#define SIO_PDR_VSYNC_CFG_DRIVE__M 0x38
+#define SIO_PDR_VSYNC_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_VSYNC_CFG_KEEP__B 6
+#define SIO_PDR_VSYNC_CFG_KEEP__W 2
+#define SIO_PDR_VSYNC_CFG_KEEP__M 0xC0
+#define SIO_PDR_VSYNC_CFG_KEEP__PRE 0x0
+#define SIO_PDR_VSYNC_CFG_UIO__B 8
+#define SIO_PDR_VSYNC_CFG_UIO__W 1
+#define SIO_PDR_VSYNC_CFG_UIO__M 0x100
+#define SIO_PDR_VSYNC_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_SMA_RX_CFG__A 0x7F0037
+#define SIO_PDR_SMA_RX_CFG__W 9
+#define SIO_PDR_SMA_RX_CFG__M 0x1FF
+#define SIO_PDR_SMA_RX_CFG__PRE 0x10
+#define SIO_PDR_SMA_RX_CFG_MODE__B 0
+#define SIO_PDR_SMA_RX_CFG_MODE__W 3
+#define SIO_PDR_SMA_RX_CFG_MODE__M 0x7
+#define SIO_PDR_SMA_RX_CFG_MODE__PRE 0x0
+#define SIO_PDR_SMA_RX_CFG_DRIVE__B 3
+#define SIO_PDR_SMA_RX_CFG_DRIVE__W 3
+#define SIO_PDR_SMA_RX_CFG_DRIVE__M 0x38
+#define SIO_PDR_SMA_RX_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_SMA_RX_CFG_KEEP__B 6
+#define SIO_PDR_SMA_RX_CFG_KEEP__W 2
+#define SIO_PDR_SMA_RX_CFG_KEEP__M 0xC0
+#define SIO_PDR_SMA_RX_CFG_KEEP__PRE 0x0
+#define SIO_PDR_SMA_RX_CFG_UIO__B 8
+#define SIO_PDR_SMA_RX_CFG_UIO__W 1
+#define SIO_PDR_SMA_RX_CFG_UIO__M 0x100
+#define SIO_PDR_SMA_RX_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_SMA_TX_CFG__A 0x7F0038
+#define SIO_PDR_SMA_TX_CFG__W 9
+#define SIO_PDR_SMA_TX_CFG__M 0x1FF
+#define SIO_PDR_SMA_TX_CFG__PRE 0x90
+#define SIO_PDR_SMA_TX_CFG_MODE__B 0
+#define SIO_PDR_SMA_TX_CFG_MODE__W 3
+#define SIO_PDR_SMA_TX_CFG_MODE__M 0x7
+#define SIO_PDR_SMA_TX_CFG_MODE__PRE 0x0
+#define SIO_PDR_SMA_TX_CFG_DRIVE__B 3
+#define SIO_PDR_SMA_TX_CFG_DRIVE__W 3
+#define SIO_PDR_SMA_TX_CFG_DRIVE__M 0x38
+#define SIO_PDR_SMA_TX_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_SMA_TX_CFG_KEEP__B 6
+#define SIO_PDR_SMA_TX_CFG_KEEP__W 2
+#define SIO_PDR_SMA_TX_CFG_KEEP__M 0xC0
+#define SIO_PDR_SMA_TX_CFG_KEEP__PRE 0x80
+#define SIO_PDR_SMA_TX_CFG_UIO__B 8
+#define SIO_PDR_SMA_TX_CFG_UIO__W 1
+#define SIO_PDR_SMA_TX_CFG_UIO__M 0x100
+#define SIO_PDR_SMA_TX_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_I2C_SDA2_CFG__A 0x7F003F
+#define SIO_PDR_I2C_SDA2_CFG__W 9
+#define SIO_PDR_I2C_SDA2_CFG__M 0x1FF
+#define SIO_PDR_I2C_SDA2_CFG__PRE 0x11
+#define SIO_PDR_I2C_SDA2_CFG_MODE__B 0
+#define SIO_PDR_I2C_SDA2_CFG_MODE__W 3
+#define SIO_PDR_I2C_SDA2_CFG_MODE__M 0x7
+#define SIO_PDR_I2C_SDA2_CFG_MODE__PRE 0x1
+#define SIO_PDR_I2C_SDA2_CFG_DRIVE__B 3
+#define SIO_PDR_I2C_SDA2_CFG_DRIVE__W 3
+#define SIO_PDR_I2C_SDA2_CFG_DRIVE__M 0x38
+#define SIO_PDR_I2C_SDA2_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_I2C_SDA2_CFG_KEEP__B 6
+#define SIO_PDR_I2C_SDA2_CFG_KEEP__W 2
+#define SIO_PDR_I2C_SDA2_CFG_KEEP__M 0xC0
+#define SIO_PDR_I2C_SDA2_CFG_KEEP__PRE 0x0
+#define SIO_PDR_I2C_SDA2_CFG_UIO__B 8
+#define SIO_PDR_I2C_SDA2_CFG_UIO__W 1
+#define SIO_PDR_I2C_SDA2_CFG_UIO__M 0x100
+#define SIO_PDR_I2C_SDA2_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_I2C_SCL2_CFG__A 0x7F0040
+#define SIO_PDR_I2C_SCL2_CFG__W 9
+#define SIO_PDR_I2C_SCL2_CFG__M 0x1FF
+#define SIO_PDR_I2C_SCL2_CFG__PRE 0x11
+#define SIO_PDR_I2C_SCL2_CFG_MODE__B 0
+#define SIO_PDR_I2C_SCL2_CFG_MODE__W 3
+#define SIO_PDR_I2C_SCL2_CFG_MODE__M 0x7
+#define SIO_PDR_I2C_SCL2_CFG_MODE__PRE 0x1
+#define SIO_PDR_I2C_SCL2_CFG_DRIVE__B 3
+#define SIO_PDR_I2C_SCL2_CFG_DRIVE__W 3
+#define SIO_PDR_I2C_SCL2_CFG_DRIVE__M 0x38
+#define SIO_PDR_I2C_SCL2_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_I2C_SCL2_CFG_KEEP__B 6
+#define SIO_PDR_I2C_SCL2_CFG_KEEP__W 2
+#define SIO_PDR_I2C_SCL2_CFG_KEEP__M 0xC0
+#define SIO_PDR_I2C_SCL2_CFG_KEEP__PRE 0x0
+#define SIO_PDR_I2C_SCL2_CFG_UIO__B 8
+#define SIO_PDR_I2C_SCL2_CFG_UIO__W 1
+#define SIO_PDR_I2C_SCL2_CFG_UIO__M 0x100
+#define SIO_PDR_I2C_SCL2_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_I2S_CL_CFG__A 0x7F0041
+#define SIO_PDR_I2S_CL_CFG__W 9
+#define SIO_PDR_I2S_CL_CFG__M 0x1FF
+#define SIO_PDR_I2S_CL_CFG__PRE 0x10
+#define SIO_PDR_I2S_CL_CFG_MODE__B 0
+#define SIO_PDR_I2S_CL_CFG_MODE__W 3
+#define SIO_PDR_I2S_CL_CFG_MODE__M 0x7
+#define SIO_PDR_I2S_CL_CFG_MODE__PRE 0x0
+#define SIO_PDR_I2S_CL_CFG_DRIVE__B 3
+#define SIO_PDR_I2S_CL_CFG_DRIVE__W 3
+#define SIO_PDR_I2S_CL_CFG_DRIVE__M 0x38
+#define SIO_PDR_I2S_CL_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_I2S_CL_CFG_KEEP__B 6
+#define SIO_PDR_I2S_CL_CFG_KEEP__W 2
+#define SIO_PDR_I2S_CL_CFG_KEEP__M 0xC0
+#define SIO_PDR_I2S_CL_CFG_KEEP__PRE 0x0
+#define SIO_PDR_I2S_CL_CFG_UIO__B 8
+#define SIO_PDR_I2S_CL_CFG_UIO__W 1
+#define SIO_PDR_I2S_CL_CFG_UIO__M 0x100
+#define SIO_PDR_I2S_CL_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_I2S_DA_CFG__A 0x7F0042
+#define SIO_PDR_I2S_DA_CFG__W 9
+#define SIO_PDR_I2S_DA_CFG__M 0x1FF
+#define SIO_PDR_I2S_DA_CFG__PRE 0x10
+#define SIO_PDR_I2S_DA_CFG_MODE__B 0
+#define SIO_PDR_I2S_DA_CFG_MODE__W 3
+#define SIO_PDR_I2S_DA_CFG_MODE__M 0x7
+#define SIO_PDR_I2S_DA_CFG_MODE__PRE 0x0
+#define SIO_PDR_I2S_DA_CFG_DRIVE__B 3
+#define SIO_PDR_I2S_DA_CFG_DRIVE__W 3
+#define SIO_PDR_I2S_DA_CFG_DRIVE__M 0x38
+#define SIO_PDR_I2S_DA_CFG_DRIVE__PRE 0x10
+#define SIO_PDR_I2S_DA_CFG_KEEP__B 6
+#define SIO_PDR_I2S_DA_CFG_KEEP__W 2
+#define SIO_PDR_I2S_DA_CFG_KEEP__M 0xC0
+#define SIO_PDR_I2S_DA_CFG_KEEP__PRE 0x0
+#define SIO_PDR_I2S_DA_CFG_UIO__B 8
+#define SIO_PDR_I2S_DA_CFG_UIO__W 1
+#define SIO_PDR_I2S_DA_CFG_UIO__M 0x100
+#define SIO_PDR_I2S_DA_CFG_UIO__PRE 0x0
+
+#define SIO_PDR_GPIO_GPIO_FNC__A 0x7F0050
+#define SIO_PDR_GPIO_GPIO_FNC__W 2
+#define SIO_PDR_GPIO_GPIO_FNC__M 0x3
+#define SIO_PDR_GPIO_GPIO_FNC__PRE 0x0
+#define SIO_PDR_GPIO_GPIO_FNC_SEL__B 0
+#define SIO_PDR_GPIO_GPIO_FNC_SEL__W 2
+#define SIO_PDR_GPIO_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_GPIO_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_IRQN_GPIO_FNC__A 0x7F0051
+#define SIO_PDR_IRQN_GPIO_FNC__W 2
+#define SIO_PDR_IRQN_GPIO_FNC__M 0x3
+#define SIO_PDR_IRQN_GPIO_FNC__PRE 0x0
+#define SIO_PDR_IRQN_GPIO_FNC_SEL__B 0
+#define SIO_PDR_IRQN_GPIO_FNC_SEL__W 2
+#define SIO_PDR_IRQN_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_IRQN_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_MSTRT_GPIO_FNC__A 0x7F0052
+#define SIO_PDR_MSTRT_GPIO_FNC__W 2
+#define SIO_PDR_MSTRT_GPIO_FNC__M 0x3
+#define SIO_PDR_MSTRT_GPIO_FNC__PRE 0x0
+#define SIO_PDR_MSTRT_GPIO_FNC_SEL__B 0
+#define SIO_PDR_MSTRT_GPIO_FNC_SEL__W 2
+#define SIO_PDR_MSTRT_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_MSTRT_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_MERR_GPIO_FNC__A 0x7F0053
+#define SIO_PDR_MERR_GPIO_FNC__W 2
+#define SIO_PDR_MERR_GPIO_FNC__M 0x3
+#define SIO_PDR_MERR_GPIO_FNC__PRE 0x0
+#define SIO_PDR_MERR_GPIO_FNC_SEL__B 0
+#define SIO_PDR_MERR_GPIO_FNC_SEL__W 2
+#define SIO_PDR_MERR_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_MERR_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_MCLK_GPIO_FNC__A 0x7F0054
+#define SIO_PDR_MCLK_GPIO_FNC__W 2
+#define SIO_PDR_MCLK_GPIO_FNC__M 0x3
+#define SIO_PDR_MCLK_GPIO_FNC__PRE 0x0
+#define SIO_PDR_MCLK_GPIO_FNC_SEL__B 0
+#define SIO_PDR_MCLK_GPIO_FNC_SEL__W 2
+#define SIO_PDR_MCLK_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_MCLK_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_MVAL_GPIO_FNC__A 0x7F0055
+#define SIO_PDR_MVAL_GPIO_FNC__W 2
+#define SIO_PDR_MVAL_GPIO_FNC__M 0x3
+#define SIO_PDR_MVAL_GPIO_FNC__PRE 0x0
+#define SIO_PDR_MVAL_GPIO_FNC_SEL__B 0
+#define SIO_PDR_MVAL_GPIO_FNC_SEL__W 2
+#define SIO_PDR_MVAL_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_MVAL_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_MD0_GPIO_FNC__A 0x7F0056
+#define SIO_PDR_MD0_GPIO_FNC__W 2
+#define SIO_PDR_MD0_GPIO_FNC__M 0x3
+#define SIO_PDR_MD0_GPIO_FNC__PRE 0x0
+#define SIO_PDR_MD0_GPIO_FNC_SEL__B 0
+#define SIO_PDR_MD0_GPIO_FNC_SEL__W 2
+#define SIO_PDR_MD0_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_MD0_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_MD1_GPIO_FNC__A 0x7F0057
+#define SIO_PDR_MD1_GPIO_FNC__W 2
+#define SIO_PDR_MD1_GPIO_FNC__M 0x3
+#define SIO_PDR_MD1_GPIO_FNC__PRE 0x0
+#define SIO_PDR_MD1_GPIO_FNC_SEL__B 0
+#define SIO_PDR_MD1_GPIO_FNC_SEL__W 2
+#define SIO_PDR_MD1_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_MD1_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_MD2_GPIO_FNC__A 0x7F0058
+#define SIO_PDR_MD2_GPIO_FNC__W 2
+#define SIO_PDR_MD2_GPIO_FNC__M 0x3
+#define SIO_PDR_MD2_GPIO_FNC__PRE 0x0
+#define SIO_PDR_MD2_GPIO_FNC_SEL__B 0
+#define SIO_PDR_MD2_GPIO_FNC_SEL__W 2
+#define SIO_PDR_MD2_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_MD2_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_MD3_GPIO_FNC__A 0x7F0059
+#define SIO_PDR_MD3_GPIO_FNC__W 2
+#define SIO_PDR_MD3_GPIO_FNC__M 0x3
+#define SIO_PDR_MD3_GPIO_FNC__PRE 0x0
+#define SIO_PDR_MD3_GPIO_FNC_SEL__B 0
+#define SIO_PDR_MD3_GPIO_FNC_SEL__W 2
+#define SIO_PDR_MD3_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_MD3_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_MD4_GPIO_FNC__A 0x7F005A
+#define SIO_PDR_MD4_GPIO_FNC__W 2
+#define SIO_PDR_MD4_GPIO_FNC__M 0x3
+#define SIO_PDR_MD4_GPIO_FNC__PRE 0x0
+#define SIO_PDR_MD4_GPIO_FNC_SEL__B 0
+#define SIO_PDR_MD4_GPIO_FNC_SEL__W 2
+#define SIO_PDR_MD4_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_MD4_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_MD5_GPIO_FNC__A 0x7F005B
+#define SIO_PDR_MD5_GPIO_FNC__W 2
+#define SIO_PDR_MD5_GPIO_FNC__M 0x3
+#define SIO_PDR_MD5_GPIO_FNC__PRE 0x0
+#define SIO_PDR_MD5_GPIO_FNC_SEL__B 0
+#define SIO_PDR_MD5_GPIO_FNC_SEL__W 2
+#define SIO_PDR_MD5_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_MD5_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_MD6_GPIO_FNC__A 0x7F005C
+#define SIO_PDR_MD6_GPIO_FNC__W 2
+#define SIO_PDR_MD6_GPIO_FNC__M 0x3
+#define SIO_PDR_MD6_GPIO_FNC__PRE 0x0
+#define SIO_PDR_MD6_GPIO_FNC_SEL__B 0
+#define SIO_PDR_MD6_GPIO_FNC_SEL__W 2
+#define SIO_PDR_MD6_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_MD6_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_MD7_GPIO_FNC__A 0x7F005D
+#define SIO_PDR_MD7_GPIO_FNC__W 2
+#define SIO_PDR_MD7_GPIO_FNC__M 0x3
+#define SIO_PDR_MD7_GPIO_FNC__PRE 0x0
+#define SIO_PDR_MD7_GPIO_FNC_SEL__B 0
+#define SIO_PDR_MD7_GPIO_FNC_SEL__W 2
+#define SIO_PDR_MD7_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_MD7_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_SMA_RX_GPIO_FNC__A 0x7F005E
+#define SIO_PDR_SMA_RX_GPIO_FNC__W 2
+#define SIO_PDR_SMA_RX_GPIO_FNC__M 0x3
+#define SIO_PDR_SMA_RX_GPIO_FNC__PRE 0x0
+#define SIO_PDR_SMA_RX_GPIO_FNC_SEL__B 0
+#define SIO_PDR_SMA_RX_GPIO_FNC_SEL__W 2
+#define SIO_PDR_SMA_RX_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_SMA_RX_GPIO_FNC_SEL__PRE 0x0
+
+#define SIO_PDR_SMA_TX_GPIO_FNC__A 0x7F005F
+#define SIO_PDR_SMA_TX_GPIO_FNC__W 2
+#define SIO_PDR_SMA_TX_GPIO_FNC__M 0x3
+#define SIO_PDR_SMA_TX_GPIO_FNC__PRE 0x0
+#define SIO_PDR_SMA_TX_GPIO_FNC_SEL__B 0
+#define SIO_PDR_SMA_TX_GPIO_FNC_SEL__W 2
+#define SIO_PDR_SMA_TX_GPIO_FNC_SEL__M 0x3
+#define SIO_PDR_SMA_TX_GPIO_FNC_SEL__PRE 0x0
+
+#define VSB_COMM_EXEC__A 0x1C00000
+#define VSB_COMM_EXEC__W 2
+#define VSB_COMM_EXEC__M 0x3
+#define VSB_COMM_EXEC__PRE 0x0
+#define VSB_COMM_EXEC_STOP 0x0
+#define VSB_COMM_EXEC_ACTIVE 0x1
+#define VSB_COMM_EXEC_HOLD 0x2
+
+#define VSB_COMM_MB__A 0x1C00002
+#define VSB_COMM_MB__W 16
+#define VSB_COMM_MB__M 0xFFFF
+#define VSB_COMM_MB__PRE 0x0
+#define VSB_COMM_INT_REQ__A 0x1C00003
+#define VSB_COMM_INT_REQ__W 1
+#define VSB_COMM_INT_REQ__M 0x1
+#define VSB_COMM_INT_REQ__PRE 0x0
+
+#define VSB_COMM_INT_REQ_TOP_INT_REQ__B 0
+#define VSB_COMM_INT_REQ_TOP_INT_REQ__W 1
+#define VSB_COMM_INT_REQ_TOP_INT_REQ__M 0x1
+#define VSB_COMM_INT_REQ_TOP_INT_REQ__PRE 0x0
+
+#define VSB_COMM_INT_STA__A 0x1C00005
+#define VSB_COMM_INT_STA__W 16
+#define VSB_COMM_INT_STA__M 0xFFFF
+#define VSB_COMM_INT_STA__PRE 0x0
+
+#define VSB_COMM_INT_MSK__A 0x1C00006
+#define VSB_COMM_INT_MSK__W 16
+#define VSB_COMM_INT_MSK__M 0xFFFF
+#define VSB_COMM_INT_MSK__PRE 0x0
+
+#define VSB_COMM_INT_STM__A 0x1C00007
+#define VSB_COMM_INT_STM__W 16
+#define VSB_COMM_INT_STM__M 0xFFFF
+#define VSB_COMM_INT_STM__PRE 0x0
+
+#define VSB_TOP_COMM_EXEC__A 0x1C10000
+#define VSB_TOP_COMM_EXEC__W 2
+#define VSB_TOP_COMM_EXEC__M 0x3
+#define VSB_TOP_COMM_EXEC__PRE 0x0
+#define VSB_TOP_COMM_EXEC_STOP 0x0
+#define VSB_TOP_COMM_EXEC_ACTIVE 0x1
+#define VSB_TOP_COMM_EXEC_HOLD 0x2
+
+#define VSB_TOP_COMM_MB__A 0x1C10002
+#define VSB_TOP_COMM_MB__W 10
+#define VSB_TOP_COMM_MB__M 0x3FF
+#define VSB_TOP_COMM_MB__PRE 0x0
+
+#define VSB_TOP_COMM_MB_CTL__B 0
+#define VSB_TOP_COMM_MB_CTL__W 1
+#define VSB_TOP_COMM_MB_CTL__M 0x1
+#define VSB_TOP_COMM_MB_CTL__PRE 0x0
+#define VSB_TOP_COMM_MB_CTL_CTL_OFF 0x0
+#define VSB_TOP_COMM_MB_CTL_CTL_ON 0x1
+
+#define VSB_TOP_COMM_MB_OBS__B 1
+#define VSB_TOP_COMM_MB_OBS__W 1
+#define VSB_TOP_COMM_MB_OBS__M 0x2
+#define VSB_TOP_COMM_MB_OBS__PRE 0x0
+#define VSB_TOP_COMM_MB_OBS_OBS_OFF 0x0
+#define VSB_TOP_COMM_MB_OBS_OBS_ON 0x2
+
+#define VSB_TOP_COMM_MB_MUX_CTL__B 2
+#define VSB_TOP_COMM_MB_MUX_CTL__W 4
+#define VSB_TOP_COMM_MB_MUX_CTL__M 0x3C
+#define VSB_TOP_COMM_MB_MUX_CTL__PRE 0x0
+
+#define VSB_TOP_COMM_MB_MUX_OBS__B 6
+#define VSB_TOP_COMM_MB_MUX_OBS__W 4
+#define VSB_TOP_COMM_MB_MUX_OBS__M 0x3C0
+#define VSB_TOP_COMM_MB_MUX_OBS__PRE 0x0
+#define VSB_TOP_COMM_MB_MUX_OBS_VSB_FEC 0x0
+#define VSB_TOP_COMM_MB_MUX_OBS_VSB_IQM 0x40
+#define VSB_TOP_COMM_MB_MUX_OBS_VSB_IQM_AMPLITUDE 0x80
+#define VSB_TOP_COMM_MB_MUX_OBS_VSB_TCMEQ_1 0xC0
+#define VSB_TOP_COMM_MB_MUX_OBS_VSB_TCMEQ_2 0x100
+#define VSB_TOP_COMM_MB_MUX_OBS_VSB_FFE_1 0x140
+#define VSB_TOP_COMM_MB_MUX_OBS_VSB_FFE_2 0x180
+#define VSB_TOP_COMM_MB_MUX_OBS_VSB_DFE_1 0x1C0
+#define VSB_TOP_COMM_MB_MUX_OBS_VSB_DFE_2 0x200
+
+#define VSB_TOP_COMM_INT_REQ__A 0x1C10003
+#define VSB_TOP_COMM_INT_REQ__W 1
+#define VSB_TOP_COMM_INT_REQ__M 0x1
+#define VSB_TOP_COMM_INT_REQ__PRE 0x0
+#define VSB_TOP_COMM_INT_STA__A 0x1C10005
+#define VSB_TOP_COMM_INT_STA__W 6
+#define VSB_TOP_COMM_INT_STA__M 0x3F
+#define VSB_TOP_COMM_INT_STA__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STA_FIELD_INT_STA__B 0
+#define VSB_TOP_COMM_INT_STA_FIELD_INT_STA__W 1
+#define VSB_TOP_COMM_INT_STA_FIELD_INT_STA__M 0x1
+#define VSB_TOP_COMM_INT_STA_FIELD_INT_STA__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STA_LOCK_STA__B 1
+#define VSB_TOP_COMM_INT_STA_LOCK_STA__W 1
+#define VSB_TOP_COMM_INT_STA_LOCK_STA__M 0x2
+#define VSB_TOP_COMM_INT_STA_LOCK_STA__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STA_UNLOCK_STA__B 2
+#define VSB_TOP_COMM_INT_STA_UNLOCK_STA__W 1
+#define VSB_TOP_COMM_INT_STA_UNLOCK_STA__M 0x4
+#define VSB_TOP_COMM_INT_STA_UNLOCK_STA__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STA_TAPREADER_STA__B 3
+#define VSB_TOP_COMM_INT_STA_TAPREADER_STA__W 1
+#define VSB_TOP_COMM_INT_STA_TAPREADER_STA__M 0x8
+#define VSB_TOP_COMM_INT_STA_TAPREADER_STA__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STA_SEGSYNCINTR_STA__B 4
+#define VSB_TOP_COMM_INT_STA_SEGSYNCINTR_STA__W 1
+#define VSB_TOP_COMM_INT_STA_SEGSYNCINTR_STA__M 0x10
+#define VSB_TOP_COMM_INT_STA_SEGSYNCINTR_STA__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STA_MERSER_STA__B 5
+#define VSB_TOP_COMM_INT_STA_MERSER_STA__W 1
+#define VSB_TOP_COMM_INT_STA_MERSER_STA__M 0x20
+#define VSB_TOP_COMM_INT_STA_MERSER_STA__PRE 0x0
+
+#define VSB_TOP_COMM_INT_MSK__A 0x1C10006
+#define VSB_TOP_COMM_INT_MSK__W 6
+#define VSB_TOP_COMM_INT_MSK__M 0x3F
+#define VSB_TOP_COMM_INT_MSK__PRE 0x0
+
+#define VSB_TOP_COMM_INT_MSK_FIELD_INT_MSK__B 0
+#define VSB_TOP_COMM_INT_MSK_FIELD_INT_MSK__W 1
+#define VSB_TOP_COMM_INT_MSK_FIELD_INT_MSK__M 0x1
+#define VSB_TOP_COMM_INT_MSK_FIELD_INT_MSK__PRE 0x0
+
+#define VSB_TOP_COMM_INT_MSK_LOCK_MSK__B 1
+#define VSB_TOP_COMM_INT_MSK_LOCK_MSK__W 1
+#define VSB_TOP_COMM_INT_MSK_LOCK_MSK__M 0x2
+#define VSB_TOP_COMM_INT_MSK_LOCK_MSK__PRE 0x0
+
+#define VSB_TOP_COMM_INT_MSK_UNLOCK_MSK__B 2
+#define VSB_TOP_COMM_INT_MSK_UNLOCK_MSK__W 1
+#define VSB_TOP_COMM_INT_MSK_UNLOCK_MSK__M 0x4
+#define VSB_TOP_COMM_INT_MSK_UNLOCK_MSK__PRE 0x0
+
+#define VSB_TOP_COMM_INT_MSK_TAPREADER_MSK__B 3
+#define VSB_TOP_COMM_INT_MSK_TAPREADER_MSK__W 1
+#define VSB_TOP_COMM_INT_MSK_TAPREADER_MSK__M 0x8
+#define VSB_TOP_COMM_INT_MSK_TAPREADER_MSK__PRE 0x0
+
+#define VSB_TOP_COMM_INT_MSK_SEGSYNCINTR_MSK__B 4
+#define VSB_TOP_COMM_INT_MSK_SEGSYNCINTR_MSK__W 1
+#define VSB_TOP_COMM_INT_MSK_SEGSYNCINTR_MSK__M 0x10
+#define VSB_TOP_COMM_INT_MSK_SEGSYNCINTR_MSK__PRE 0x0
+
+#define VSB_TOP_COMM_INT_MSK_MERSER_MSK__B 5
+#define VSB_TOP_COMM_INT_MSK_MERSER_MSK__W 1
+#define VSB_TOP_COMM_INT_MSK_MERSER_MSK__M 0x20
+#define VSB_TOP_COMM_INT_MSK_MERSER_MSK__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STM__A 0x1C10007
+#define VSB_TOP_COMM_INT_STM__W 6
+#define VSB_TOP_COMM_INT_STM__M 0x3F
+#define VSB_TOP_COMM_INT_STM__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STM_FIELD_INT_STM__B 0
+#define VSB_TOP_COMM_INT_STM_FIELD_INT_STM__W 1
+#define VSB_TOP_COMM_INT_STM_FIELD_INT_STM__M 0x1
+#define VSB_TOP_COMM_INT_STM_FIELD_INT_STM__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STM_LOCK_STM__B 1
+#define VSB_TOP_COMM_INT_STM_LOCK_STM__W 1
+#define VSB_TOP_COMM_INT_STM_LOCK_STM__M 0x2
+#define VSB_TOP_COMM_INT_STM_LOCK_STM__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STM_UNLOCK_STM__B 2
+#define VSB_TOP_COMM_INT_STM_UNLOCK_STM__W 1
+#define VSB_TOP_COMM_INT_STM_UNLOCK_STM__M 0x4
+#define VSB_TOP_COMM_INT_STM_UNLOCK_STM__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STM_TAPREADER_STM__B 3
+#define VSB_TOP_COMM_INT_STM_TAPREADER_STM__W 1
+#define VSB_TOP_COMM_INT_STM_TAPREADER_STM__M 0x8
+#define VSB_TOP_COMM_INT_STM_TAPREADER_STM__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STM_SEGSYNCINTR_STM__B 4
+#define VSB_TOP_COMM_INT_STM_SEGSYNCINTR_STM__W 1
+#define VSB_TOP_COMM_INT_STM_SEGSYNCINTR_STM__M 0x10
+#define VSB_TOP_COMM_INT_STM_SEGSYNCINTR_STM__PRE 0x0
+
+#define VSB_TOP_COMM_INT_STM_MERSER_STM__B 5
+#define VSB_TOP_COMM_INT_STM_MERSER_STM__W 1
+#define VSB_TOP_COMM_INT_STM_MERSER_STM__M 0x20
+#define VSB_TOP_COMM_INT_STM_MERSER_STM__PRE 0x0
+
+#define VSB_TOP_CKGN1ACQ__A 0x1C10010
+#define VSB_TOP_CKGN1ACQ__W 8
+#define VSB_TOP_CKGN1ACQ__M 0xFF
+#define VSB_TOP_CKGN1ACQ__PRE 0x4
+
+#define VSB_TOP_CKGN1TRK__A 0x1C10011
+#define VSB_TOP_CKGN1TRK__W 8
+#define VSB_TOP_CKGN1TRK__M 0xFF
+#define VSB_TOP_CKGN1TRK__PRE 0x0
+
+#define VSB_TOP_CKGN2ACQ__A 0x1C10012
+#define VSB_TOP_CKGN2ACQ__W 8
+#define VSB_TOP_CKGN2ACQ__M 0xFF
+#define VSB_TOP_CKGN2ACQ__PRE 0x2
+
+#define VSB_TOP_CKGN2TRK__A 0x1C10013
+#define VSB_TOP_CKGN2TRK__W 8
+#define VSB_TOP_CKGN2TRK__M 0xFF
+#define VSB_TOP_CKGN2TRK__PRE 0x1
+
+#define VSB_TOP_CKGN3__A 0x1C10014
+#define VSB_TOP_CKGN3__W 8
+#define VSB_TOP_CKGN3__M 0xFF
+#define VSB_TOP_CKGN3__PRE 0x5
+
+#define VSB_TOP_CYGN1ACQ__A 0x1C10015
+#define VSB_TOP_CYGN1ACQ__W 8
+#define VSB_TOP_CYGN1ACQ__M 0xFF
+#define VSB_TOP_CYGN1ACQ__PRE 0x3
+
+#define VSB_TOP_CYGN1TRK__A 0x1C10016
+#define VSB_TOP_CYGN1TRK__W 8
+#define VSB_TOP_CYGN1TRK__M 0xFF
+#define VSB_TOP_CYGN1TRK__PRE 0x0
+
+#define VSB_TOP_CYGN2ACQ__A 0x1C10017
+#define VSB_TOP_CYGN2ACQ__W 8
+#define VSB_TOP_CYGN2ACQ__M 0xFF
+#define VSB_TOP_CYGN2ACQ__PRE 0x3
+
+#define VSB_TOP_CYGN2TRK__A 0x1C10018
+#define VSB_TOP_CYGN2TRK__W 8
+#define VSB_TOP_CYGN2TRK__M 0xFF
+#define VSB_TOP_CYGN2TRK__PRE 0x2
+
+#define VSB_TOP_CYGN3__A 0x1C10019
+#define VSB_TOP_CYGN3__W 8
+#define VSB_TOP_CYGN3__M 0xFF
+#define VSB_TOP_CYGN3__PRE 0x6
+#define VSB_TOP_SYNCCTRLWORD__A 0x1C1001A
+#define VSB_TOP_SYNCCTRLWORD__W 5
+#define VSB_TOP_SYNCCTRLWORD__M 0x1F
+#define VSB_TOP_SYNCCTRLWORD__PRE 0x0
+
+#define VSB_TOP_SYNCCTRLWORD_PRST__B 0
+#define VSB_TOP_SYNCCTRLWORD_PRST__W 1
+#define VSB_TOP_SYNCCTRLWORD_PRST__M 0x1
+#define VSB_TOP_SYNCCTRLWORD_PRST__PRE 0x0
+
+#define VSB_TOP_SYNCCTRLWORD_DCFREEZ__B 1
+#define VSB_TOP_SYNCCTRLWORD_DCFREEZ__W 1
+#define VSB_TOP_SYNCCTRLWORD_DCFREEZ__M 0x2
+#define VSB_TOP_SYNCCTRLWORD_DCFREEZ__PRE 0x0
+
+#define VSB_TOP_SYNCCTRLWORD_INVCNST__B 2
+#define VSB_TOP_SYNCCTRLWORD_INVCNST__W 1
+#define VSB_TOP_SYNCCTRLWORD_INVCNST__M 0x4
+#define VSB_TOP_SYNCCTRLWORD_INVCNST__PRE 0x0
+
+#define VSB_TOP_SYNCCTRLWORD_CPUAGCRST__B 3
+#define VSB_TOP_SYNCCTRLWORD_CPUAGCRST__W 1
+#define VSB_TOP_SYNCCTRLWORD_CPUAGCRST__M 0x8
+#define VSB_TOP_SYNCCTRLWORD_CPUAGCRST__PRE 0x0
+
+#define VSB_TOP_SYNCCTRLWORD_AGCIGNOREFS__B 4
+#define VSB_TOP_SYNCCTRLWORD_AGCIGNOREFS__W 1
+#define VSB_TOP_SYNCCTRLWORD_AGCIGNOREFS__M 0x10
+#define VSB_TOP_SYNCCTRLWORD_AGCIGNOREFS__PRE 0x0
+
+#define VSB_TOP_MAINSMUP__A 0x1C1001B
+#define VSB_TOP_MAINSMUP__W 8
+#define VSB_TOP_MAINSMUP__M 0xFF
+#define VSB_TOP_MAINSMUP__PRE 0xFF
+
+#define VSB_TOP_EQSMUP__A 0x1C1001C
+#define VSB_TOP_EQSMUP__W 8
+#define VSB_TOP_EQSMUP__M 0xFF
+#define VSB_TOP_EQSMUP__PRE 0xFF
+#define VSB_TOP_SYSMUXCTRL__A 0x1C1001D
+#define VSB_TOP_SYSMUXCTRL__W 13
+#define VSB_TOP_SYSMUXCTRL__M 0x1FFF
+#define VSB_TOP_SYSMUXCTRL__PRE 0x0
+
+#define VSB_TOP_SYSMUXCTRL_CYLK_STATIC__B 0
+#define VSB_TOP_SYSMUXCTRL_CYLK_STATIC__W 1
+#define VSB_TOP_SYSMUXCTRL_CYLK_STATIC__M 0x1
+#define VSB_TOP_SYSMUXCTRL_CYLK_STATIC__PRE 0x0
+
+#define VSB_TOP_SYSMUXCTRL_CYLK_SEL_STATIC__B 1
+#define VSB_TOP_SYSMUXCTRL_CYLK_SEL_STATIC__W 1
+#define VSB_TOP_SYSMUXCTRL_CYLK_SEL_STATIC__M 0x2
+#define VSB_TOP_SYSMUXCTRL_CYLK_SEL_STATIC__PRE 0x0
+
+#define VSB_TOP_SYSMUXCTRL_CTCALDONE_STATIC__B 2
+#define VSB_TOP_SYSMUXCTRL_CTCALDONE_STATIC__W 1
+#define VSB_TOP_SYSMUXCTRL_CTCALDONE_STATIC__M 0x4
+#define VSB_TOP_SYSMUXCTRL_CTCALDONE_STATIC__PRE 0x0
+
+#define VSB_TOP_SYSMUXCTRL_CTCALDONE_SEL_STATIC__B 3
+#define VSB_TOP_SYSMUXCTRL_CTCALDONE_SEL_STATIC__W 1
+#define VSB_TOP_SYSMUXCTRL_CTCALDONE_SEL_STATIC__M 0x8
+#define VSB_TOP_SYSMUXCTRL_CTCALDONE_SEL_STATIC__PRE 0x0
+
+#define VSB_TOP_SYSMUXCTRL_FRAMELOCK_STATIC__B 4
+#define VSB_TOP_SYSMUXCTRL_FRAMELOCK_STATIC__W 1
+#define VSB_TOP_SYSMUXCTRL_FRAMELOCK_STATIC__M 0x10
+#define VSB_TOP_SYSMUXCTRL_FRAMELOCK_STATIC__PRE 0x0
+
+#define VSB_TOP_SYSMUXCTRL_FRAMELOCK_SEL_STATIC__B 5
+#define VSB_TOP_SYSMUXCTRL_FRAMELOCK_SEL_STATIC__W 1
+#define VSB_TOP_SYSMUXCTRL_FRAMELOCK_SEL_STATIC__M 0x20
+#define VSB_TOP_SYSMUXCTRL_FRAMELOCK_SEL_STATIC__PRE 0x0
+
+#define VSB_TOP_SYSMUXCTRL_FRAMESYNC_STATIC__B 6
+#define VSB_TOP_SYSMUXCTRL_FRAMESYNC_STATIC__W 1
+#define VSB_TOP_SYSMUXCTRL_FRAMESYNC_STATIC__M 0x40
+#define VSB_TOP_SYSMUXCTRL_FRAMESYNC_STATIC__PRE 0x0
+
+#define VSB_TOP_SYSMUXCTRL_FRAMESYNC_SEL_STATIC__B 7
+#define VSB_TOP_SYSMUXCTRL_FRAMESYNC_SEL_STATIC__W 1
+#define VSB_TOP_SYSMUXCTRL_FRAMESYNC_SEL_STATIC__M 0x80
+#define VSB_TOP_SYSMUXCTRL_FRAMESYNC_SEL_STATIC__PRE 0x0
+
+#define VSB_TOP_SYSMUXCTRL_SNROVTH_STATIC__B 8
+#define VSB_TOP_SYSMUXCTRL_SNROVTH_STATIC__W 4
+#define VSB_TOP_SYSMUXCTRL_SNROVTH_STATIC__M 0xF00
+#define VSB_TOP_SYSMUXCTRL_SNROVTH_STATIC__PRE 0x0
+
+#define VSB_TOP_SYSMUXCTRL_SNROVTH_SEL_STATIC__B 12
+#define VSB_TOP_SYSMUXCTRL_SNROVTH_SEL_STATIC__W 1
+#define VSB_TOP_SYSMUXCTRL_SNROVTH_SEL_STATIC__M 0x1000
+#define VSB_TOP_SYSMUXCTRL_SNROVTH_SEL_STATIC__PRE 0x0
+
+#define VSB_TOP_SNRTH_RCA1__A 0x1C1001E
+#define VSB_TOP_SNRTH_RCA1__W 8
+#define VSB_TOP_SNRTH_RCA1__M 0xFF
+#define VSB_TOP_SNRTH_RCA1__PRE 0x53
+
+#define VSB_TOP_SNRTH_RCA1_DN__B 0
+#define VSB_TOP_SNRTH_RCA1_DN__W 4
+#define VSB_TOP_SNRTH_RCA1_DN__M 0xF
+#define VSB_TOP_SNRTH_RCA1_DN__PRE 0x3
+
+#define VSB_TOP_SNRTH_RCA1_UP__B 4
+#define VSB_TOP_SNRTH_RCA1_UP__W 4
+#define VSB_TOP_SNRTH_RCA1_UP__M 0xF0
+#define VSB_TOP_SNRTH_RCA1_UP__PRE 0x50
+
+#define VSB_TOP_SNRTH_RCA2__A 0x1C1001F
+#define VSB_TOP_SNRTH_RCA2__W 8
+#define VSB_TOP_SNRTH_RCA2__M 0xFF
+#define VSB_TOP_SNRTH_RCA2__PRE 0x75
+
+#define VSB_TOP_SNRTH_RCA2_DN__B 0
+#define VSB_TOP_SNRTH_RCA2_DN__W 4
+#define VSB_TOP_SNRTH_RCA2_DN__M 0xF
+#define VSB_TOP_SNRTH_RCA2_DN__PRE 0x5
+
+#define VSB_TOP_SNRTH_RCA2_UP__B 4
+#define VSB_TOP_SNRTH_RCA2_UP__W 4
+#define VSB_TOP_SNRTH_RCA2_UP__M 0xF0
+#define VSB_TOP_SNRTH_RCA2_UP__PRE 0x70
+
+#define VSB_TOP_SNRTH_DDM1__A 0x1C10020
+#define VSB_TOP_SNRTH_DDM1__W 8
+#define VSB_TOP_SNRTH_DDM1__M 0xFF
+#define VSB_TOP_SNRTH_DDM1__PRE 0xCA
+
+#define VSB_TOP_SNRTH_DDM1_DN__B 0
+#define VSB_TOP_SNRTH_DDM1_DN__W 4
+#define VSB_TOP_SNRTH_DDM1_DN__M 0xF
+#define VSB_TOP_SNRTH_DDM1_DN__PRE 0xA
+
+#define VSB_TOP_SNRTH_DDM1_UP__B 4
+#define VSB_TOP_SNRTH_DDM1_UP__W 4
+#define VSB_TOP_SNRTH_DDM1_UP__M 0xF0
+#define VSB_TOP_SNRTH_DDM1_UP__PRE 0xC0
+
+#define VSB_TOP_SNRTH_DDM2__A 0x1C10021
+#define VSB_TOP_SNRTH_DDM2__W 8
+#define VSB_TOP_SNRTH_DDM2__M 0xFF
+#define VSB_TOP_SNRTH_DDM2__PRE 0xCA
+
+#define VSB_TOP_SNRTH_DDM2_DN__B 0
+#define VSB_TOP_SNRTH_DDM2_DN__W 4
+#define VSB_TOP_SNRTH_DDM2_DN__M 0xF
+#define VSB_TOP_SNRTH_DDM2_DN__PRE 0xA
+
+#define VSB_TOP_SNRTH_DDM2_UP__B 4
+#define VSB_TOP_SNRTH_DDM2_UP__W 4
+#define VSB_TOP_SNRTH_DDM2_UP__M 0xF0
+#define VSB_TOP_SNRTH_DDM2_UP__PRE 0xC0
+
+#define VSB_TOP_SNRTH_PT__A 0x1C10022
+#define VSB_TOP_SNRTH_PT__W 8
+#define VSB_TOP_SNRTH_PT__M 0xFF
+#define VSB_TOP_SNRTH_PT__PRE 0xD8
+
+#define VSB_TOP_SNRTH_PT_DN__B 0
+#define VSB_TOP_SNRTH_PT_DN__W 4
+#define VSB_TOP_SNRTH_PT_DN__M 0xF
+#define VSB_TOP_SNRTH_PT_DN__PRE 0x8
+
+#define VSB_TOP_SNRTH_PT_UP__B 4
+#define VSB_TOP_SNRTH_PT_UP__W 4
+#define VSB_TOP_SNRTH_PT_UP__M 0xF0
+#define VSB_TOP_SNRTH_PT_UP__PRE 0xD0
+
+#define VSB_TOP_CYSMSTATES__A 0x1C10023
+#define VSB_TOP_CYSMSTATES__W 8
+#define VSB_TOP_CYSMSTATES__M 0xFF
+#define VSB_TOP_CYSMSTATES__PRE 0x0
+
+#define VSB_TOP_CYSMSTATES_SYSST__B 0
+#define VSB_TOP_CYSMSTATES_SYSST__W 4
+#define VSB_TOP_CYSMSTATES_SYSST__M 0xF
+#define VSB_TOP_CYSMSTATES_SYSST__PRE 0x0
+
+#define VSB_TOP_CYSMSTATES_EQST__B 4
+#define VSB_TOP_CYSMSTATES_EQST__W 4
+#define VSB_TOP_CYSMSTATES_EQST__M 0xF0
+#define VSB_TOP_CYSMSTATES_EQST__PRE 0x0
+
+#define VSB_TOP_SMALL_NOTCH_CONTROL__A 0x1C10024
+#define VSB_TOP_SMALL_NOTCH_CONTROL__W 8
+#define VSB_TOP_SMALL_NOTCH_CONTROL__M 0xFF
+#define VSB_TOP_SMALL_NOTCH_CONTROL__PRE 0x0
+
+#define VSB_TOP_SMALL_NOTCH_CONTROL_GO__B 0
+#define VSB_TOP_SMALL_NOTCH_CONTROL_GO__W 1
+#define VSB_TOP_SMALL_NOTCH_CONTROL_GO__M 0x1
+#define VSB_TOP_SMALL_NOTCH_CONTROL_GO__PRE 0x0
+
+#define VSB_TOP_SMALL_NOTCH_CONTROL_BYPASS1__B 1
+#define VSB_TOP_SMALL_NOTCH_CONTROL_BYPASS1__W 1
+#define VSB_TOP_SMALL_NOTCH_CONTROL_BYPASS1__M 0x2
+#define VSB_TOP_SMALL_NOTCH_CONTROL_BYPASS1__PRE 0x0
+
+#define VSB_TOP_SMALL_NOTCH_CONTROL_BYPASS2__B 2
+#define VSB_TOP_SMALL_NOTCH_CONTROL_BYPASS2__W 1
+#define VSB_TOP_SMALL_NOTCH_CONTROL_BYPASS2__M 0x4
+#define VSB_TOP_SMALL_NOTCH_CONTROL_BYPASS2__PRE 0x0
+
+#define VSB_TOP_SMALL_NOTCH_CONTROL_SPARE__B 3
+#define VSB_TOP_SMALL_NOTCH_CONTROL_SPARE__W 4
+#define VSB_TOP_SMALL_NOTCH_CONTROL_SPARE__M 0x78
+#define VSB_TOP_SMALL_NOTCH_CONTROL_SPARE__PRE 0x0
+
+#define VSB_TOP_SMALL_NOTCH_CONTROL_SOFT_RESET__B 7
+#define VSB_TOP_SMALL_NOTCH_CONTROL_SOFT_RESET__W 1
+#define VSB_TOP_SMALL_NOTCH_CONTROL_SOFT_RESET__M 0x80
+#define VSB_TOP_SMALL_NOTCH_CONTROL_SOFT_RESET__PRE 0x0
+
+#define VSB_TOP_TAPREADCYC__A 0x1C10025
+#define VSB_TOP_TAPREADCYC__W 9
+#define VSB_TOP_TAPREADCYC__M 0x1FF
+#define VSB_TOP_TAPREADCYC__PRE 0x1
+
+#define VSB_TOP_VALIDPKLVL__A 0x1C10026
+#define VSB_TOP_VALIDPKLVL__W 13
+#define VSB_TOP_VALIDPKLVL__M 0x1FFF
+#define VSB_TOP_VALIDPKLVL__PRE 0x100
+
+#define VSB_TOP_CENTROID_FINE_DELAY__A 0x1C10027
+#define VSB_TOP_CENTROID_FINE_DELAY__W 10
+#define VSB_TOP_CENTROID_FINE_DELAY__M 0x3FF
+#define VSB_TOP_CENTROID_FINE_DELAY__PRE 0xFF
+
+#define VSB_TOP_CENTROID_SMACH_DELAY__A 0x1C10028
+#define VSB_TOP_CENTROID_SMACH_DELAY__W 10
+#define VSB_TOP_CENTROID_SMACH_DELAY__M 0x3FF
+#define VSB_TOP_CENTROID_SMACH_DELAY__PRE 0x1FF
+
+#define VSB_TOP_SNR__A 0x1C10029
+#define VSB_TOP_SNR__W 14
+#define VSB_TOP_SNR__M 0x3FFF
+#define VSB_TOP_SNR__PRE 0x0
+#define VSB_TOP_LOCKSTATUS__A 0x1C1002A
+#define VSB_TOP_LOCKSTATUS__W 7
+#define VSB_TOP_LOCKSTATUS__M 0x7F
+#define VSB_TOP_LOCKSTATUS__PRE 0x0
+
+#define VSB_TOP_LOCKSTATUS_VSBMODE__B 0
+#define VSB_TOP_LOCKSTATUS_VSBMODE__W 4
+#define VSB_TOP_LOCKSTATUS_VSBMODE__M 0xF
+#define VSB_TOP_LOCKSTATUS_VSBMODE__PRE 0x0
+
+#define VSB_TOP_LOCKSTATUS_FRMLOCK__B 4
+#define VSB_TOP_LOCKSTATUS_FRMLOCK__W 1
+#define VSB_TOP_LOCKSTATUS_FRMLOCK__M 0x10
+#define VSB_TOP_LOCKSTATUS_FRMLOCK__PRE 0x0
+
+#define VSB_TOP_LOCKSTATUS_CYLOCK__B 5
+#define VSB_TOP_LOCKSTATUS_CYLOCK__W 1
+#define VSB_TOP_LOCKSTATUS_CYLOCK__M 0x20
+#define VSB_TOP_LOCKSTATUS_CYLOCK__PRE 0x0
+
+#define VSB_TOP_LOCKSTATUS_DDMON__B 6
+#define VSB_TOP_LOCKSTATUS_DDMON__W 1
+#define VSB_TOP_LOCKSTATUS_DDMON__M 0x40
+#define VSB_TOP_LOCKSTATUS_DDMON__PRE 0x0
+
+#define VSB_TOP_CTST__A 0x1C1002B
+#define VSB_TOP_CTST__W 4
+#define VSB_TOP_CTST__M 0xF
+#define VSB_TOP_CTST__PRE 0x0
+#define VSB_TOP_EQSMRSTCTRL__A 0x1C1002C
+#define VSB_TOP_EQSMRSTCTRL__W 7
+#define VSB_TOP_EQSMRSTCTRL__M 0x7F
+#define VSB_TOP_EQSMRSTCTRL__PRE 0x0
+
+#define VSB_TOP_EQSMRSTCTRL_RCAON__B 0
+#define VSB_TOP_EQSMRSTCTRL_RCAON__W 1
+#define VSB_TOP_EQSMRSTCTRL_RCAON__M 0x1
+#define VSB_TOP_EQSMRSTCTRL_RCAON__PRE 0x0
+
+#define VSB_TOP_EQSMRSTCTRL_DFEON__B 1
+#define VSB_TOP_EQSMRSTCTRL_DFEON__W 1
+#define VSB_TOP_EQSMRSTCTRL_DFEON__M 0x2
+#define VSB_TOP_EQSMRSTCTRL_DFEON__PRE 0x0
+
+#define VSB_TOP_EQSMRSTCTRL_DDMEN1__B 2
+#define VSB_TOP_EQSMRSTCTRL_DDMEN1__W 1
+#define VSB_TOP_EQSMRSTCTRL_DDMEN1__M 0x4
+#define VSB_TOP_EQSMRSTCTRL_DDMEN1__PRE 0x0
+
+#define VSB_TOP_EQSMRSTCTRL_DDMEN2__B 3
+#define VSB_TOP_EQSMRSTCTRL_DDMEN2__W 1
+#define VSB_TOP_EQSMRSTCTRL_DDMEN2__M 0x8
+#define VSB_TOP_EQSMRSTCTRL_DDMEN2__PRE 0x0
+
+#define VSB_TOP_EQSMRSTCTRL_DIGIAGCON__B 4
+#define VSB_TOP_EQSMRSTCTRL_DIGIAGCON__W 1
+#define VSB_TOP_EQSMRSTCTRL_DIGIAGCON__M 0x10
+#define VSB_TOP_EQSMRSTCTRL_DIGIAGCON__PRE 0x0
+
+#define VSB_TOP_EQSMRSTCTRL_PARAINITEN__B 5
+#define VSB_TOP_EQSMRSTCTRL_PARAINITEN__W 1
+#define VSB_TOP_EQSMRSTCTRL_PARAINITEN__M 0x20
+#define VSB_TOP_EQSMRSTCTRL_PARAINITEN__PRE 0x0
+
+#define VSB_TOP_EQSMRSTCTRL_TIMEOUTFRMCNTEN__B 6
+#define VSB_TOP_EQSMRSTCTRL_TIMEOUTFRMCNTEN__W 1
+#define VSB_TOP_EQSMRSTCTRL_TIMEOUTFRMCNTEN__M 0x40
+#define VSB_TOP_EQSMRSTCTRL_TIMEOUTFRMCNTEN__PRE 0x0
+
+#define VSB_TOP_EQSMTRNCTRL__A 0x1C1002D
+#define VSB_TOP_EQSMTRNCTRL__W 7
+#define VSB_TOP_EQSMTRNCTRL__M 0x7F
+#define VSB_TOP_EQSMTRNCTRL__PRE 0x40
+
+#define VSB_TOP_EQSMTRNCTRL_RCAON__B 0
+#define VSB_TOP_EQSMTRNCTRL_RCAON__W 1
+#define VSB_TOP_EQSMTRNCTRL_RCAON__M 0x1
+#define VSB_TOP_EQSMTRNCTRL_RCAON__PRE 0x0
+
+#define VSB_TOP_EQSMTRNCTRL_DFEON__B 1
+#define VSB_TOP_EQSMTRNCTRL_DFEON__W 1
+#define VSB_TOP_EQSMTRNCTRL_DFEON__M 0x2
+#define VSB_TOP_EQSMTRNCTRL_DFEON__PRE 0x0
+
+#define VSB_TOP_EQSMTRNCTRL_DDMEN1__B 2
+#define VSB_TOP_EQSMTRNCTRL_DDMEN1__W 1
+#define VSB_TOP_EQSMTRNCTRL_DDMEN1__M 0x4
+#define VSB_TOP_EQSMTRNCTRL_DDMEN1__PRE 0x0
+
+#define VSB_TOP_EQSMTRNCTRL_DDMEN2__B 3
+#define VSB_TOP_EQSMTRNCTRL_DDMEN2__W 1
+#define VSB_TOP_EQSMTRNCTRL_DDMEN2__M 0x8
+#define VSB_TOP_EQSMTRNCTRL_DDMEN2__PRE 0x0
+
+#define VSB_TOP_EQSMTRNCTRL_DIGIAGCON__B 4
+#define VSB_TOP_EQSMTRNCTRL_DIGIAGCON__W 1
+#define VSB_TOP_EQSMTRNCTRL_DIGIAGCON__M 0x10
+#define VSB_TOP_EQSMTRNCTRL_DIGIAGCON__PRE 0x0
+
+#define VSB_TOP_EQSMTRNCTRL_PARAINITEN__B 5
+#define VSB_TOP_EQSMTRNCTRL_PARAINITEN__W 1
+#define VSB_TOP_EQSMTRNCTRL_PARAINITEN__M 0x20
+#define VSB_TOP_EQSMTRNCTRL_PARAINITEN__PRE 0x0
+
+#define VSB_TOP_EQSMTRNCTRL_TIMEOUTFRMCNTEN__B 6
+#define VSB_TOP_EQSMTRNCTRL_TIMEOUTFRMCNTEN__W 1
+#define VSB_TOP_EQSMTRNCTRL_TIMEOUTFRMCNTEN__M 0x40
+#define VSB_TOP_EQSMTRNCTRL_TIMEOUTFRMCNTEN__PRE 0x40
+
+#define VSB_TOP_EQSMRCA1CTRL__A 0x1C1002E
+#define VSB_TOP_EQSMRCA1CTRL__W 7
+#define VSB_TOP_EQSMRCA1CTRL__M 0x7F
+#define VSB_TOP_EQSMRCA1CTRL__PRE 0x1
+
+#define VSB_TOP_EQSMRCA1CTRL_RCAON__B 0
+#define VSB_TOP_EQSMRCA1CTRL_RCAON__W 1
+#define VSB_TOP_EQSMRCA1CTRL_RCAON__M 0x1
+#define VSB_TOP_EQSMRCA1CTRL_RCAON__PRE 0x1
+
+#define VSB_TOP_EQSMRCA1CTRL_DFEON__B 1
+#define VSB_TOP_EQSMRCA1CTRL_DFEON__W 1
+#define VSB_TOP_EQSMRCA1CTRL_DFEON__M 0x2
+#define VSB_TOP_EQSMRCA1CTRL_DFEON__PRE 0x0
+
+#define VSB_TOP_EQSMRCA1CTRL_DDMEN1__B 2
+#define VSB_TOP_EQSMRCA1CTRL_DDMEN1__W 1
+#define VSB_TOP_EQSMRCA1CTRL_DDMEN1__M 0x4
+#define VSB_TOP_EQSMRCA1CTRL_DDMEN1__PRE 0x0
+
+#define VSB_TOP_EQSMRCA1CTRL_DDMEN2__B 3
+#define VSB_TOP_EQSMRCA1CTRL_DDMEN2__W 1
+#define VSB_TOP_EQSMRCA1CTRL_DDMEN2__M 0x8
+#define VSB_TOP_EQSMRCA1CTRL_DDMEN2__PRE 0x0
+
+#define VSB_TOP_EQSMRCA1CTRL_DIGIAGCON__B 4
+#define VSB_TOP_EQSMRCA1CTRL_DIGIAGCON__W 1
+#define VSB_TOP_EQSMRCA1CTRL_DIGIAGCON__M 0x10
+#define VSB_TOP_EQSMRCA1CTRL_DIGIAGCON__PRE 0x0
+
+#define VSB_TOP_EQSMRCA1CTRL_PARAINITEN__B 5
+#define VSB_TOP_EQSMRCA1CTRL_PARAINITEN__W 1
+#define VSB_TOP_EQSMRCA1CTRL_PARAINITEN__M 0x20
+#define VSB_TOP_EQSMRCA1CTRL_PARAINITEN__PRE 0x0
+
+#define VSB_TOP_EQSMRCA1CTRL_TIMEOUTFRMCNTEN__B 6
+#define VSB_TOP_EQSMRCA1CTRL_TIMEOUTFRMCNTEN__W 1
+#define VSB_TOP_EQSMRCA1CTRL_TIMEOUTFRMCNTEN__M 0x40
+#define VSB_TOP_EQSMRCA1CTRL_TIMEOUTFRMCNTEN__PRE 0x0
+
+#define VSB_TOP_EQSMRCA2CTRL__A 0x1C1002F
+#define VSB_TOP_EQSMRCA2CTRL__W 7
+#define VSB_TOP_EQSMRCA2CTRL__M 0x7F
+#define VSB_TOP_EQSMRCA2CTRL__PRE 0x3
+
+#define VSB_TOP_EQSMRCA2CTRL_RCAON__B 0
+#define VSB_TOP_EQSMRCA2CTRL_RCAON__W 1
+#define VSB_TOP_EQSMRCA2CTRL_RCAON__M 0x1
+#define VSB_TOP_EQSMRCA2CTRL_RCAON__PRE 0x1
+
+#define VSB_TOP_EQSMRCA2CTRL_DFEON__B 1
+#define VSB_TOP_EQSMRCA2CTRL_DFEON__W 1
+#define VSB_TOP_EQSMRCA2CTRL_DFEON__M 0x2
+#define VSB_TOP_EQSMRCA2CTRL_DFEON__PRE 0x2
+
+#define VSB_TOP_EQSMRCA2CTRL_DDMEN1__B 2
+#define VSB_TOP_EQSMRCA2CTRL_DDMEN1__W 1
+#define VSB_TOP_EQSMRCA2CTRL_DDMEN1__M 0x4
+#define VSB_TOP_EQSMRCA2CTRL_DDMEN1__PRE 0x0
+
+#define VSB_TOP_EQSMRCA2CTRL_DDMEN2__B 3
+#define VSB_TOP_EQSMRCA2CTRL_DDMEN2__W 1
+#define VSB_TOP_EQSMRCA2CTRL_DDMEN2__M 0x8
+#define VSB_TOP_EQSMRCA2CTRL_DDMEN2__PRE 0x0
+
+#define VSB_TOP_EQSMRCA2CTRL_DIGIAGCON__B 4
+#define VSB_TOP_EQSMRCA2CTRL_DIGIAGCON__W 1
+#define VSB_TOP_EQSMRCA2CTRL_DIGIAGCON__M 0x10
+#define VSB_TOP_EQSMRCA2CTRL_DIGIAGCON__PRE 0x0
+
+#define VSB_TOP_EQSMRCA2CTRL_PARAINITEN__B 5
+#define VSB_TOP_EQSMRCA2CTRL_PARAINITEN__W 1
+#define VSB_TOP_EQSMRCA2CTRL_PARAINITEN__M 0x20
+#define VSB_TOP_EQSMRCA2CTRL_PARAINITEN__PRE 0x0
+
+#define VSB_TOP_EQSMRCA2CTRL_TIMEOUTFRMCNTEN__B 6
+#define VSB_TOP_EQSMRCA2CTRL_TIMEOUTFRMCNTEN__W 1
+#define VSB_TOP_EQSMRCA2CTRL_TIMEOUTFRMCNTEN__M 0x40
+#define VSB_TOP_EQSMRCA2CTRL_TIMEOUTFRMCNTEN__PRE 0x0
+
+#define VSB_TOP_EQSMDDM1CTRL__A 0x1C10030
+#define VSB_TOP_EQSMDDM1CTRL__W 7
+#define VSB_TOP_EQSMDDM1CTRL__M 0x7F
+#define VSB_TOP_EQSMDDM1CTRL__PRE 0x6
+
+#define VSB_TOP_EQSMDDM1CTRL_RCAON__B 0
+#define VSB_TOP_EQSMDDM1CTRL_RCAON__W 1
+#define VSB_TOP_EQSMDDM1CTRL_RCAON__M 0x1
+#define VSB_TOP_EQSMDDM1CTRL_RCAON__PRE 0x0
+
+#define VSB_TOP_EQSMDDM1CTRL_DFEON__B 1
+#define VSB_TOP_EQSMDDM1CTRL_DFEON__W 1
+#define VSB_TOP_EQSMDDM1CTRL_DFEON__M 0x2
+#define VSB_TOP_EQSMDDM1CTRL_DFEON__PRE 0x2
+
+#define VSB_TOP_EQSMDDM1CTRL_DDMEN1__B 2
+#define VSB_TOP_EQSMDDM1CTRL_DDMEN1__W 1
+#define VSB_TOP_EQSMDDM1CTRL_DDMEN1__M 0x4
+#define VSB_TOP_EQSMDDM1CTRL_DDMEN1__PRE 0x4
+
+#define VSB_TOP_EQSMDDM1CTRL_DDMEN2__B 3
+#define VSB_TOP_EQSMDDM1CTRL_DDMEN2__W 1
+#define VSB_TOP_EQSMDDM1CTRL_DDMEN2__M 0x8
+#define VSB_TOP_EQSMDDM1CTRL_DDMEN2__PRE 0x0
+
+#define VSB_TOP_EQSMDDM1CTRL_DIGIAGCON__B 4
+#define VSB_TOP_EQSMDDM1CTRL_DIGIAGCON__W 1
+#define VSB_TOP_EQSMDDM1CTRL_DIGIAGCON__M 0x10
+#define VSB_TOP_EQSMDDM1CTRL_DIGIAGCON__PRE 0x0
+
+#define VSB_TOP_EQSMDDM1CTRL_PARAINITEN__B 5
+#define VSB_TOP_EQSMDDM1CTRL_PARAINITEN__W 1
+#define VSB_TOP_EQSMDDM1CTRL_PARAINITEN__M 0x20
+#define VSB_TOP_EQSMDDM1CTRL_PARAINITEN__PRE 0x0
+
+#define VSB_TOP_EQSMDDM1CTRL_TIMEOUTFRMCNTEN__B 6
+#define VSB_TOP_EQSMDDM1CTRL_TIMEOUTFRMCNTEN__W 1
+#define VSB_TOP_EQSMDDM1CTRL_TIMEOUTFRMCNTEN__M 0x40
+#define VSB_TOP_EQSMDDM1CTRL_TIMEOUTFRMCNTEN__PRE 0x0
+
+#define VSB_TOP_EQSMDDM2CTRL__A 0x1C10031
+#define VSB_TOP_EQSMDDM2CTRL__W 7
+#define VSB_TOP_EQSMDDM2CTRL__M 0x7F
+#define VSB_TOP_EQSMDDM2CTRL__PRE 0x1E
+
+#define VSB_TOP_EQSMDDM2CTRL_RCAON__B 0
+#define VSB_TOP_EQSMDDM2CTRL_RCAON__W 1
+#define VSB_TOP_EQSMDDM2CTRL_RCAON__M 0x1
+#define VSB_TOP_EQSMDDM2CTRL_RCAON__PRE 0x0
+
+#define VSB_TOP_EQSMDDM2CTRL_DFEON__B 1
+#define VSB_TOP_EQSMDDM2CTRL_DFEON__W 1
+#define VSB_TOP_EQSMDDM2CTRL_DFEON__M 0x2
+#define VSB_TOP_EQSMDDM2CTRL_DFEON__PRE 0x2
+
+#define VSB_TOP_EQSMDDM2CTRL_DDMEN1__B 2
+#define VSB_TOP_EQSMDDM2CTRL_DDMEN1__W 1
+#define VSB_TOP_EQSMDDM2CTRL_DDMEN1__M 0x4
+#define VSB_TOP_EQSMDDM2CTRL_DDMEN1__PRE 0x4
+
+#define VSB_TOP_EQSMDDM2CTRL_DDMEN2__B 3
+#define VSB_TOP_EQSMDDM2CTRL_DDMEN2__W 1
+#define VSB_TOP_EQSMDDM2CTRL_DDMEN2__M 0x8
+#define VSB_TOP_EQSMDDM2CTRL_DDMEN2__PRE 0x8
+
+#define VSB_TOP_EQSMDDM2CTRL_DIGIAGCON__B 4
+#define VSB_TOP_EQSMDDM2CTRL_DIGIAGCON__W 1
+#define VSB_TOP_EQSMDDM2CTRL_DIGIAGCON__M 0x10
+#define VSB_TOP_EQSMDDM2CTRL_DIGIAGCON__PRE 0x10
+
+#define VSB_TOP_EQSMDDM2CTRL_PARAINITEN__B 5
+#define VSB_TOP_EQSMDDM2CTRL_PARAINITEN__W 1
+#define VSB_TOP_EQSMDDM2CTRL_PARAINITEN__M 0x20
+#define VSB_TOP_EQSMDDM2CTRL_PARAINITEN__PRE 0x0
+
+#define VSB_TOP_EQSMDDM2CTRL_TIMEOUTFRMCNTEN__B 6
+#define VSB_TOP_EQSMDDM2CTRL_TIMEOUTFRMCNTEN__W 1
+#define VSB_TOP_EQSMDDM2CTRL_TIMEOUTFRMCNTEN__M 0x40
+#define VSB_TOP_EQSMDDM2CTRL_TIMEOUTFRMCNTEN__PRE 0x0
+
+#define VSB_TOP_SYSSMRSTCTRL__A 0x1C10032
+#define VSB_TOP_SYSSMRSTCTRL__W 11
+#define VSB_TOP_SYSSMRSTCTRL__M 0x7FF
+#define VSB_TOP_SYSSMRSTCTRL__PRE 0x7F9
+
+#define VSB_TOP_SYSSMRSTCTRL_RSTCTCAL__B 0
+#define VSB_TOP_SYSSMRSTCTRL_RSTCTCAL__W 1
+#define VSB_TOP_SYSSMRSTCTRL_RSTCTCAL__M 0x1
+#define VSB_TOP_SYSSMRSTCTRL_RSTCTCAL__PRE 0x1
+
+#define VSB_TOP_SYSSMRSTCTRL_CTCALEN__B 1
+#define VSB_TOP_SYSSMRSTCTRL_CTCALEN__W 1
+#define VSB_TOP_SYSSMRSTCTRL_CTCALEN__M 0x2
+#define VSB_TOP_SYSSMRSTCTRL_CTCALEN__PRE 0x0
+
+#define VSB_TOP_SYSSMRSTCTRL_STARTTRN__B 2
+#define VSB_TOP_SYSSMRSTCTRL_STARTTRN__W 1
+#define VSB_TOP_SYSSMRSTCTRL_STARTTRN__M 0x4
+#define VSB_TOP_SYSSMRSTCTRL_STARTTRN__PRE 0x0
+
+#define VSB_TOP_SYSSMRSTCTRL_RSTFRMSYNCDET__B 3
+#define VSB_TOP_SYSSMRSTCTRL_RSTFRMSYNCDET__W 1
+#define VSB_TOP_SYSSMRSTCTRL_RSTFRMSYNCDET__M 0x8
+#define VSB_TOP_SYSSMRSTCTRL_RSTFRMSYNCDET__PRE 0x8
+
+#define VSB_TOP_SYSSMRSTCTRL_RSTCYDET__B 4
+#define VSB_TOP_SYSSMRSTCTRL_RSTCYDET__W 1
+#define VSB_TOP_SYSSMRSTCTRL_RSTCYDET__M 0x10
+#define VSB_TOP_SYSSMRSTCTRL_RSTCYDET__PRE 0x10
+
+#define VSB_TOP_SYSSMRSTCTRL_RSTDCRMV__B 5
+#define VSB_TOP_SYSSMRSTCTRL_RSTDCRMV__W 1
+#define VSB_TOP_SYSSMRSTCTRL_RSTDCRMV__M 0x20
+#define VSB_TOP_SYSSMRSTCTRL_RSTDCRMV__PRE 0x20
+
+#define VSB_TOP_SYSSMRSTCTRL_RSTEQSIG__B 6
+#define VSB_TOP_SYSSMRSTCTRL_RSTEQSIG__W 1
+#define VSB_TOP_SYSSMRSTCTRL_RSTEQSIG__M 0x40
+#define VSB_TOP_SYSSMRSTCTRL_RSTEQSIG__PRE 0x40
+
+#define VSB_TOP_SYSSMRSTCTRL_CKFRZ__B 7
+#define VSB_TOP_SYSSMRSTCTRL_CKFRZ__W 1
+#define VSB_TOP_SYSSMRSTCTRL_CKFRZ__M 0x80
+#define VSB_TOP_SYSSMRSTCTRL_CKFRZ__PRE 0x80
+
+#define VSB_TOP_SYSSMRSTCTRL_CKBWSW__B 8
+#define VSB_TOP_SYSSMRSTCTRL_CKBWSW__W 1
+#define VSB_TOP_SYSSMRSTCTRL_CKBWSW__M 0x100
+#define VSB_TOP_SYSSMRSTCTRL_CKBWSW__PRE 0x100
+
+#define VSB_TOP_SYSSMRSTCTRL_NCOBWSW__B 9
+#define VSB_TOP_SYSSMRSTCTRL_NCOBWSW__W 1
+#define VSB_TOP_SYSSMRSTCTRL_NCOBWSW__M 0x200
+#define VSB_TOP_SYSSMRSTCTRL_NCOBWSW__PRE 0x200
+
+#define VSB_TOP_SYSSMRSTCTRL_NCOTIMEOUTCNTEN__B 10
+#define VSB_TOP_SYSSMRSTCTRL_NCOTIMEOUTCNTEN__W 1
+#define VSB_TOP_SYSSMRSTCTRL_NCOTIMEOUTCNTEN__M 0x400
+#define VSB_TOP_SYSSMRSTCTRL_NCOTIMEOUTCNTEN__PRE 0x400
+
+#define VSB_TOP_SYSSMCYCTRL__A 0x1C10033
+#define VSB_TOP_SYSSMCYCTRL__W 11
+#define VSB_TOP_SYSSMCYCTRL__M 0x7FF
+#define VSB_TOP_SYSSMCYCTRL__PRE 0x4E9
+
+#define VSB_TOP_SYSSMCYCTRL_RSTCTCAL__B 0
+#define VSB_TOP_SYSSMCYCTRL_RSTCTCAL__W 1
+#define VSB_TOP_SYSSMCYCTRL_RSTCTCAL__M 0x1
+#define VSB_TOP_SYSSMCYCTRL_RSTCTCAL__PRE 0x1
+
+#define VSB_TOP_SYSSMCYCTRL_CTCALEN__B 1
+#define VSB_TOP_SYSSMCYCTRL_CTCALEN__W 1
+#define VSB_TOP_SYSSMCYCTRL_CTCALEN__M 0x2
+#define VSB_TOP_SYSSMCYCTRL_CTCALEN__PRE 0x0
+
+#define VSB_TOP_SYSSMCYCTRL_STARTTRN__B 2
+#define VSB_TOP_SYSSMCYCTRL_STARTTRN__W 1
+#define VSB_TOP_SYSSMCYCTRL_STARTTRN__M 0x4
+#define VSB_TOP_SYSSMCYCTRL_STARTTRN__PRE 0x0
+
+#define VSB_TOP_SYSSMCYCTRL_RSTFRMSYNCDET__B 3
+#define VSB_TOP_SYSSMCYCTRL_RSTFRMSYNCDET__W 1
+#define VSB_TOP_SYSSMCYCTRL_RSTFRMSYNCDET__M 0x8
+#define VSB_TOP_SYSSMCYCTRL_RSTFRMSYNCDET__PRE 0x8
+
+#define VSB_TOP_SYSSMCYCTRL_RSTCYDET__B 4
+#define VSB_TOP_SYSSMCYCTRL_RSTCYDET__W 1
+#define VSB_TOP_SYSSMCYCTRL_RSTCYDET__M 0x10
+#define VSB_TOP_SYSSMCYCTRL_RSTCYDET__PRE 0x0
+
+#define VSB_TOP_SYSSMCYCTRL_RSTDCRMV__B 5
+#define VSB_TOP_SYSSMCYCTRL_RSTDCRMV__W 1
+#define VSB_TOP_SYSSMCYCTRL_RSTDCRMV__M 0x20
+#define VSB_TOP_SYSSMCYCTRL_RSTDCRMV__PRE 0x20
+
+#define VSB_TOP_SYSSMCYCTRL_RSTEQSIG__B 6
+#define VSB_TOP_SYSSMCYCTRL_RSTEQSIG__W 1
+#define VSB_TOP_SYSSMCYCTRL_RSTEQSIG__M 0x40
+#define VSB_TOP_SYSSMCYCTRL_RSTEQSIG__PRE 0x40
+
+#define VSB_TOP_SYSSMCYCTRL_CKFRZ__B 7
+#define VSB_TOP_SYSSMCYCTRL_CKFRZ__W 1
+#define VSB_TOP_SYSSMCYCTRL_CKFRZ__M 0x80
+#define VSB_TOP_SYSSMCYCTRL_CKFRZ__PRE 0x80
+
+#define VSB_TOP_SYSSMCYCTRL_CKBWSW__B 8
+#define VSB_TOP_SYSSMCYCTRL_CKBWSW__W 1
+#define VSB_TOP_SYSSMCYCTRL_CKBWSW__M 0x100
+#define VSB_TOP_SYSSMCYCTRL_CKBWSW__PRE 0x0
+
+#define VSB_TOP_SYSSMCYCTRL_NCOBWSW__B 9
+#define VSB_TOP_SYSSMCYCTRL_NCOBWSW__W 1
+#define VSB_TOP_SYSSMCYCTRL_NCOBWSW__M 0x200
+#define VSB_TOP_SYSSMCYCTRL_NCOBWSW__PRE 0x0
+
+#define VSB_TOP_SYSSMCYCTRL_NCOTIMEOUTCNTEN__B 10
+#define VSB_TOP_SYSSMCYCTRL_NCOTIMEOUTCNTEN__W 1
+#define VSB_TOP_SYSSMCYCTRL_NCOTIMEOUTCNTEN__M 0x400
+#define VSB_TOP_SYSSMCYCTRL_NCOTIMEOUTCNTEN__PRE 0x400
+
+#define VSB_TOP_SYSSMTRNCTRL__A 0x1C10034
+#define VSB_TOP_SYSSMTRNCTRL__W 11
+#define VSB_TOP_SYSSMTRNCTRL__M 0x7FF
+#define VSB_TOP_SYSSMTRNCTRL__PRE 0x204
+
+#define VSB_TOP_SYSSMTRNCTRL_RSTCTCAL__B 0
+#define VSB_TOP_SYSSMTRNCTRL_RSTCTCAL__W 1
+#define VSB_TOP_SYSSMTRNCTRL_RSTCTCAL__M 0x1
+#define VSB_TOP_SYSSMTRNCTRL_RSTCTCAL__PRE 0x0
+
+#define VSB_TOP_SYSSMTRNCTRL_CTCALEN__B 1
+#define VSB_TOP_SYSSMTRNCTRL_CTCALEN__W 1
+#define VSB_TOP_SYSSMTRNCTRL_CTCALEN__M 0x2
+#define VSB_TOP_SYSSMTRNCTRL_CTCALEN__PRE 0x0
+
+#define VSB_TOP_SYSSMTRNCTRL_STARTTRN__B 2
+#define VSB_TOP_SYSSMTRNCTRL_STARTTRN__W 1
+#define VSB_TOP_SYSSMTRNCTRL_STARTTRN__M 0x4
+#define VSB_TOP_SYSSMTRNCTRL_STARTTRN__PRE 0x4
+
+#define VSB_TOP_SYSSMTRNCTRL_RSTFRMSYNCDET__B 3
+#define VSB_TOP_SYSSMTRNCTRL_RSTFRMSYNCDET__W 1
+#define VSB_TOP_SYSSMTRNCTRL_RSTFRMSYNCDET__M 0x8
+#define VSB_TOP_SYSSMTRNCTRL_RSTFRMSYNCDET__PRE 0x0
+
+#define VSB_TOP_SYSSMTRNCTRL_RSTCYDET__B 4
+#define VSB_TOP_SYSSMTRNCTRL_RSTCYDET__W 1
+#define VSB_TOP_SYSSMTRNCTRL_RSTCYDET__M 0x10
+#define VSB_TOP_SYSSMTRNCTRL_RSTCYDET__PRE 0x0
+
+#define VSB_TOP_SYSSMTRNCTRL_RSTDCRMV__B 5
+#define VSB_TOP_SYSSMTRNCTRL_RSTDCRMV__W 1
+#define VSB_TOP_SYSSMTRNCTRL_RSTDCRMV__M 0x20
+#define VSB_TOP_SYSSMTRNCTRL_RSTDCRMV__PRE 0x0
+
+#define VSB_TOP_SYSSMTRNCTRL_RSTEQSIG__B 6
+#define VSB_TOP_SYSSMTRNCTRL_RSTEQSIG__W 1
+#define VSB_TOP_SYSSMTRNCTRL_RSTEQSIG__M 0x40
+#define VSB_TOP_SYSSMTRNCTRL_RSTEQSIG__PRE 0x0
+
+#define VSB_TOP_SYSSMTRNCTRL_CKFRZ__B 7
+#define VSB_TOP_SYSSMTRNCTRL_CKFRZ__W 1
+#define VSB_TOP_SYSSMTRNCTRL_CKFRZ__M 0x80
+#define VSB_TOP_SYSSMTRNCTRL_CKFRZ__PRE 0x0
+
+#define VSB_TOP_SYSSMTRNCTRL_CKBWSW__B 8
+#define VSB_TOP_SYSSMTRNCTRL_CKBWSW__W 1
+#define VSB_TOP_SYSSMTRNCTRL_CKBWSW__M 0x100
+#define VSB_TOP_SYSSMTRNCTRL_CKBWSW__PRE 0x0
+
+#define VSB_TOP_SYSSMTRNCTRL_NCOBWSW__B 9
+#define VSB_TOP_SYSSMTRNCTRL_NCOBWSW__W 1
+#define VSB_TOP_SYSSMTRNCTRL_NCOBWSW__M 0x200
+#define VSB_TOP_SYSSMTRNCTRL_NCOBWSW__PRE 0x200
+
+#define VSB_TOP_SYSSMTRNCTRL_NCOTIMEOUTCNTEN__B 10
+#define VSB_TOP_SYSSMTRNCTRL_NCOTIMEOUTCNTEN__W 1
+#define VSB_TOP_SYSSMTRNCTRL_NCOTIMEOUTCNTEN__M 0x400
+#define VSB_TOP_SYSSMTRNCTRL_NCOTIMEOUTCNTEN__PRE 0x0
+
+#define VSB_TOP_SYSSMEQCTRL__A 0x1C10035
+#define VSB_TOP_SYSSMEQCTRL__W 11
+#define VSB_TOP_SYSSMEQCTRL__M 0x7FF
+#define VSB_TOP_SYSSMEQCTRL__PRE 0x304
+
+#define VSB_TOP_SYSSMEQCTRL_RSTCTCAL__B 0
+#define VSB_TOP_SYSSMEQCTRL_RSTCTCAL__W 1
+#define VSB_TOP_SYSSMEQCTRL_RSTCTCAL__M 0x1
+#define VSB_TOP_SYSSMEQCTRL_RSTCTCAL__PRE 0x0
+
+#define VSB_TOP_SYSSMEQCTRL_CTCALEN__B 1
+#define VSB_TOP_SYSSMEQCTRL_CTCALEN__W 1
+#define VSB_TOP_SYSSMEQCTRL_CTCALEN__M 0x2
+#define VSB_TOP_SYSSMEQCTRL_CTCALEN__PRE 0x0
+
+#define VSB_TOP_SYSSMEQCTRL_STARTTRN__B 2
+#define VSB_TOP_SYSSMEQCTRL_STARTTRN__W 1
+#define VSB_TOP_SYSSMEQCTRL_STARTTRN__M 0x4
+#define VSB_TOP_SYSSMEQCTRL_STARTTRN__PRE 0x4
+
+#define VSB_TOP_SYSSMEQCTRL_RSTFRMSYNCDET__B 3
+#define VSB_TOP_SYSSMEQCTRL_RSTFRMSYNCDET__W 1
+#define VSB_TOP_SYSSMEQCTRL_RSTFRMSYNCDET__M 0x8
+#define VSB_TOP_SYSSMEQCTRL_RSTFRMSYNCDET__PRE 0x0
+
+#define VSB_TOP_SYSSMEQCTRL_RSTCYDET__B 4
+#define VSB_TOP_SYSSMEQCTRL_RSTCYDET__W 1
+#define VSB_TOP_SYSSMEQCTRL_RSTCYDET__M 0x10
+#define VSB_TOP_SYSSMEQCTRL_RSTCYDET__PRE 0x0
+
+#define VSB_TOP_SYSSMEQCTRL_RSTDCRMV__B 5
+#define VSB_TOP_SYSSMEQCTRL_RSTDCRMV__W 1
+#define VSB_TOP_SYSSMEQCTRL_RSTDCRMV__M 0x20
+#define VSB_TOP_SYSSMEQCTRL_RSTDCRMV__PRE 0x0
+
+#define VSB_TOP_SYSSMEQCTRL_RSTEQSIG__B 6
+#define VSB_TOP_SYSSMEQCTRL_RSTEQSIG__W 1
+#define VSB_TOP_SYSSMEQCTRL_RSTEQSIG__M 0x40
+#define VSB_TOP_SYSSMEQCTRL_RSTEQSIG__PRE 0x0
+
+#define VSB_TOP_SYSSMEQCTRL_CKFRZ__B 7
+#define VSB_TOP_SYSSMEQCTRL_CKFRZ__W 1
+#define VSB_TOP_SYSSMEQCTRL_CKFRZ__M 0x80
+#define VSB_TOP_SYSSMEQCTRL_CKFRZ__PRE 0x0
+
+#define VSB_TOP_SYSSMEQCTRL_CKBWSW__B 8
+#define VSB_TOP_SYSSMEQCTRL_CKBWSW__W 1
+#define VSB_TOP_SYSSMEQCTRL_CKBWSW__M 0x100
+#define VSB_TOP_SYSSMEQCTRL_CKBWSW__PRE 0x100
+
+#define VSB_TOP_SYSSMEQCTRL_NCOBWSW__B 9
+#define VSB_TOP_SYSSMEQCTRL_NCOBWSW__W 1
+#define VSB_TOP_SYSSMEQCTRL_NCOBWSW__M 0x200
+#define VSB_TOP_SYSSMEQCTRL_NCOBWSW__PRE 0x200
+
+#define VSB_TOP_SYSSMEQCTRL_NCOTIMEOUTCNTEN__B 10
+#define VSB_TOP_SYSSMEQCTRL_NCOTIMEOUTCNTEN__W 1
+#define VSB_TOP_SYSSMEQCTRL_NCOTIMEOUTCNTEN__M 0x400
+#define VSB_TOP_SYSSMEQCTRL_NCOTIMEOUTCNTEN__PRE 0x0
+
+#define VSB_TOP_SYSSMAGCCTRL__A 0x1C10036
+#define VSB_TOP_SYSSMAGCCTRL__W 11
+#define VSB_TOP_SYSSMAGCCTRL__M 0x7FF
+#define VSB_TOP_SYSSMAGCCTRL__PRE 0xF9
+
+#define VSB_TOP_SYSSMAGCCTRL_RSTCTCAL__B 0
+#define VSB_TOP_SYSSMAGCCTRL_RSTCTCAL__W 1
+#define VSB_TOP_SYSSMAGCCTRL_RSTCTCAL__M 0x1
+#define VSB_TOP_SYSSMAGCCTRL_RSTCTCAL__PRE 0x1
+
+#define VSB_TOP_SYSSMAGCCTRL_CTCALEN__B 1
+#define VSB_TOP_SYSSMAGCCTRL_CTCALEN__W 1
+#define VSB_TOP_SYSSMAGCCTRL_CTCALEN__M 0x2
+#define VSB_TOP_SYSSMAGCCTRL_CTCALEN__PRE 0x0
+
+#define VSB_TOP_SYSSMAGCCTRL_STARTTRN__B 2
+#define VSB_TOP_SYSSMAGCCTRL_STARTTRN__W 1
+#define VSB_TOP_SYSSMAGCCTRL_STARTTRN__M 0x4
+#define VSB_TOP_SYSSMAGCCTRL_STARTTRN__PRE 0x0
+
+#define VSB_TOP_SYSSMAGCCTRL_RSTFRMSYNCDET__B 3
+#define VSB_TOP_SYSSMAGCCTRL_RSTFRMSYNCDET__W 1
+#define VSB_TOP_SYSSMAGCCTRL_RSTFRMSYNCDET__M 0x8
+#define VSB_TOP_SYSSMAGCCTRL_RSTFRMSYNCDET__PRE 0x8
+
+#define VSB_TOP_SYSSMAGCCTRL_RSTCYDET__B 4
+#define VSB_TOP_SYSSMAGCCTRL_RSTCYDET__W 1
+#define VSB_TOP_SYSSMAGCCTRL_RSTCYDET__M 0x10
+#define VSB_TOP_SYSSMAGCCTRL_RSTCYDET__PRE 0x10
+
+#define VSB_TOP_SYSSMAGCCTRL_RSTDCRMV__B 5
+#define VSB_TOP_SYSSMAGCCTRL_RSTDCRMV__W 1
+#define VSB_TOP_SYSSMAGCCTRL_RSTDCRMV__M 0x20
+#define VSB_TOP_SYSSMAGCCTRL_RSTDCRMV__PRE 0x20
+
+#define VSB_TOP_SYSSMAGCCTRL_RSTEQSIG__B 6
+#define VSB_TOP_SYSSMAGCCTRL_RSTEQSIG__W 1
+#define VSB_TOP_SYSSMAGCCTRL_RSTEQSIG__M 0x40
+#define VSB_TOP_SYSSMAGCCTRL_RSTEQSIG__PRE 0x40
+
+#define VSB_TOP_SYSSMAGCCTRL_CKFRZ__B 7
+#define VSB_TOP_SYSSMAGCCTRL_CKFRZ__W 1
+#define VSB_TOP_SYSSMAGCCTRL_CKFRZ__M 0x80
+#define VSB_TOP_SYSSMAGCCTRL_CKFRZ__PRE 0x80
+
+#define VSB_TOP_SYSSMAGCCTRL_CKBWSW__B 8
+#define VSB_TOP_SYSSMAGCCTRL_CKBWSW__W 1
+#define VSB_TOP_SYSSMAGCCTRL_CKBWSW__M 0x100
+#define VSB_TOP_SYSSMAGCCTRL_CKBWSW__PRE 0x0
+
+#define VSB_TOP_SYSSMAGCCTRL_NCOBWSW__B 9
+#define VSB_TOP_SYSSMAGCCTRL_NCOBWSW__W 1
+#define VSB_TOP_SYSSMAGCCTRL_NCOBWSW__M 0x200
+#define VSB_TOP_SYSSMAGCCTRL_NCOBWSW__PRE 0x0
+
+#define VSB_TOP_SYSSMAGCCTRL_NCOTIMEOUTCNTEN__B 10
+#define VSB_TOP_SYSSMAGCCTRL_NCOTIMEOUTCNTEN__W 1
+#define VSB_TOP_SYSSMAGCCTRL_NCOTIMEOUTCNTEN__M 0x400
+#define VSB_TOP_SYSSMAGCCTRL_NCOTIMEOUTCNTEN__PRE 0x0
+
+#define VSB_TOP_SYSSMCTCTRL__A 0x1C10037
+#define VSB_TOP_SYSSMCTCTRL__W 11
+#define VSB_TOP_SYSSMCTCTRL__M 0x7FF
+#define VSB_TOP_SYSSMCTCTRL__PRE 0x4A
+
+#define VSB_TOP_SYSSMCTCTRL_RSTCTCAL__B 0
+#define VSB_TOP_SYSSMCTCTRL_RSTCTCAL__W 1
+#define VSB_TOP_SYSSMCTCTRL_RSTCTCAL__M 0x1
+#define VSB_TOP_SYSSMCTCTRL_RSTCTCAL__PRE 0x0
+
+#define VSB_TOP_SYSSMCTCTRL_CTCALEN__B 1
+#define VSB_TOP_SYSSMCTCTRL_CTCALEN__W 1
+#define VSB_TOP_SYSSMCTCTRL_CTCALEN__M 0x2
+#define VSB_TOP_SYSSMCTCTRL_CTCALEN__PRE 0x2
+
+#define VSB_TOP_SYSSMCTCTRL_STARTTRN__B 2
+#define VSB_TOP_SYSSMCTCTRL_STARTTRN__W 1
+#define VSB_TOP_SYSSMCTCTRL_STARTTRN__M 0x4
+#define VSB_TOP_SYSSMCTCTRL_STARTTRN__PRE 0x0
+
+#define VSB_TOP_SYSSMCTCTRL_RSTFRMSYNCDET__B 3
+#define VSB_TOP_SYSSMCTCTRL_RSTFRMSYNCDET__W 1
+#define VSB_TOP_SYSSMCTCTRL_RSTFRMSYNCDET__M 0x8
+#define VSB_TOP_SYSSMCTCTRL_RSTFRMSYNCDET__PRE 0x8
+
+#define VSB_TOP_SYSSMCTCTRL_RSTCYDET__B 4
+#define VSB_TOP_SYSSMCTCTRL_RSTCYDET__W 1
+#define VSB_TOP_SYSSMCTCTRL_RSTCYDET__M 0x10
+#define VSB_TOP_SYSSMCTCTRL_RSTCYDET__PRE 0x0
+
+#define VSB_TOP_SYSSMCTCTRL_RSTDCRMV__B 5
+#define VSB_TOP_SYSSMCTCTRL_RSTDCRMV__W 1
+#define VSB_TOP_SYSSMCTCTRL_RSTDCRMV__M 0x20
+#define VSB_TOP_SYSSMCTCTRL_RSTDCRMV__PRE 0x0
+
+#define VSB_TOP_SYSSMCTCTRL_RSTEQSIG__B 6
+#define VSB_TOP_SYSSMCTCTRL_RSTEQSIG__W 1
+#define VSB_TOP_SYSSMCTCTRL_RSTEQSIG__M 0x40
+#define VSB_TOP_SYSSMCTCTRL_RSTEQSIG__PRE 0x40
+
+#define VSB_TOP_SYSSMCTCTRL_CKFRZ__B 7
+#define VSB_TOP_SYSSMCTCTRL_CKFRZ__W 1
+#define VSB_TOP_SYSSMCTCTRL_CKFRZ__M 0x80
+#define VSB_TOP_SYSSMCTCTRL_CKFRZ__PRE 0x0
+
+#define VSB_TOP_SYSSMCTCTRL_CKBWSW__B 8
+#define VSB_TOP_SYSSMCTCTRL_CKBWSW__W 1
+#define VSB_TOP_SYSSMCTCTRL_CKBWSW__M 0x100
+#define VSB_TOP_SYSSMCTCTRL_CKBWSW__PRE 0x0
+
+#define VSB_TOP_SYSSMCTCTRL_NCOBWSW__B 9
+#define VSB_TOP_SYSSMCTCTRL_NCOBWSW__W 1
+#define VSB_TOP_SYSSMCTCTRL_NCOBWSW__M 0x200
+#define VSB_TOP_SYSSMCTCTRL_NCOBWSW__PRE 0x0
+
+#define VSB_TOP_SYSSMCTCTRL_NCOTIMEOUTCNTEN__B 10
+#define VSB_TOP_SYSSMCTCTRL_NCOTIMEOUTCNTEN__W 1
+#define VSB_TOP_SYSSMCTCTRL_NCOTIMEOUTCNTEN__M 0x400
+#define VSB_TOP_SYSSMCTCTRL_NCOTIMEOUTCNTEN__PRE 0x0
+
+#define VSB_TOP_EQCTRL__A 0x1C10038
+#define VSB_TOP_EQCTRL__W 10
+#define VSB_TOP_EQCTRL__M 0x3FF
+#define VSB_TOP_EQCTRL__PRE 0x6
+
+#define VSB_TOP_EQCTRL_STASSIGNEN__B 0
+#define VSB_TOP_EQCTRL_STASSIGNEN__W 1
+#define VSB_TOP_EQCTRL_STASSIGNEN__M 0x1
+#define VSB_TOP_EQCTRL_STASSIGNEN__PRE 0x0
+
+#define VSB_TOP_EQCTRL_ORCANCMAEN__B 1
+#define VSB_TOP_EQCTRL_ORCANCMAEN__W 1
+#define VSB_TOP_EQCTRL_ORCANCMAEN__M 0x2
+#define VSB_TOP_EQCTRL_ORCANCMAEN__PRE 0x2
+
+#define VSB_TOP_EQCTRL_ODAGCGO__B 2
+#define VSB_TOP_EQCTRL_ODAGCGO__W 1
+#define VSB_TOP_EQCTRL_ODAGCGO__M 0x4
+#define VSB_TOP_EQCTRL_ODAGCGO__PRE 0x4
+
+#define VSB_TOP_EQCTRL_OPTGAIN__B 3
+#define VSB_TOP_EQCTRL_OPTGAIN__W 3
+#define VSB_TOP_EQCTRL_OPTGAIN__M 0x38
+#define VSB_TOP_EQCTRL_OPTGAIN__PRE 0x0
+
+#define VSB_TOP_EQCTRL_TAPRAMWRTEN__B 6
+#define VSB_TOP_EQCTRL_TAPRAMWRTEN__W 1
+#define VSB_TOP_EQCTRL_TAPRAMWRTEN__M 0x40
+#define VSB_TOP_EQCTRL_TAPRAMWRTEN__PRE 0x0
+
+#define VSB_TOP_EQCTRL_CMAGAIN__B 7
+#define VSB_TOP_EQCTRL_CMAGAIN__W 3
+#define VSB_TOP_EQCTRL_CMAGAIN__M 0x380
+#define VSB_TOP_EQCTRL_CMAGAIN__PRE 0x0
+
+#define VSB_TOP_PREEQAGCCTRL__A 0x1C10039
+#define VSB_TOP_PREEQAGCCTRL__W 5
+#define VSB_TOP_PREEQAGCCTRL__M 0x1F
+#define VSB_TOP_PREEQAGCCTRL__PRE 0x10
+
+#define VSB_TOP_PREEQAGCCTRL_PREEQAGCBWSEL__B 0
+#define VSB_TOP_PREEQAGCCTRL_PREEQAGCBWSEL__W 4
+#define VSB_TOP_PREEQAGCCTRL_PREEQAGCBWSEL__M 0xF
+#define VSB_TOP_PREEQAGCCTRL_PREEQAGCBWSEL__PRE 0x0
+
+#define VSB_TOP_PREEQAGCCTRL_PREEQAGCFRZ__B 4
+#define VSB_TOP_PREEQAGCCTRL_PREEQAGCFRZ__W 1
+#define VSB_TOP_PREEQAGCCTRL_PREEQAGCFRZ__M 0x10
+#define VSB_TOP_PREEQAGCCTRL_PREEQAGCFRZ__PRE 0x10
+
+#define VSB_TOP_PREEQAGCPWRREFLVLHI__A 0x1C1003A
+#define VSB_TOP_PREEQAGCPWRREFLVLHI__W 8
+#define VSB_TOP_PREEQAGCPWRREFLVLHI__M 0xFF
+#define VSB_TOP_PREEQAGCPWRREFLVLHI__PRE 0x0
+
+#define VSB_TOP_PREEQAGCPWRREFLVLLO__A 0x1C1003B
+#define VSB_TOP_PREEQAGCPWRREFLVLLO__W 16
+#define VSB_TOP_PREEQAGCPWRREFLVLLO__M 0xFFFF
+#define VSB_TOP_PREEQAGCPWRREFLVLLO__PRE 0x1D66
+
+#define VSB_TOP_CORINGSEL__A 0x1C1003C
+#define VSB_TOP_CORINGSEL__W 8
+#define VSB_TOP_CORINGSEL__M 0xFF
+#define VSB_TOP_CORINGSEL__PRE 0x3
+#define VSB_TOP_BEDETCTRL__A 0x1C1003D
+#define VSB_TOP_BEDETCTRL__W 9
+#define VSB_TOP_BEDETCTRL__M 0x1FF
+#define VSB_TOP_BEDETCTRL__PRE 0x145
+
+#define VSB_TOP_BEDETCTRL_MIXRATIO__B 0
+#define VSB_TOP_BEDETCTRL_MIXRATIO__W 3
+#define VSB_TOP_BEDETCTRL_MIXRATIO__M 0x7
+#define VSB_TOP_BEDETCTRL_MIXRATIO__PRE 0x5
+
+#define VSB_TOP_BEDETCTRL_CYOFFSEL__B 3
+#define VSB_TOP_BEDETCTRL_CYOFFSEL__W 1
+#define VSB_TOP_BEDETCTRL_CYOFFSEL__M 0x8
+#define VSB_TOP_BEDETCTRL_CYOFFSEL__PRE 0x0
+
+#define VSB_TOP_BEDETCTRL_DATAOFFSEL__B 4
+#define VSB_TOP_BEDETCTRL_DATAOFFSEL__W 1
+#define VSB_TOP_BEDETCTRL_DATAOFFSEL__M 0x10
+#define VSB_TOP_BEDETCTRL_DATAOFFSEL__PRE 0x0
+
+#define VSB_TOP_BEDETCTRL_BYPASS_DSQ__B 5
+#define VSB_TOP_BEDETCTRL_BYPASS_DSQ__W 1
+#define VSB_TOP_BEDETCTRL_BYPASS_DSQ__M 0x20
+#define VSB_TOP_BEDETCTRL_BYPASS_DSQ__PRE 0x0
+
+#define VSB_TOP_BEDETCTRL_BYPASS_PSQ__B 6
+#define VSB_TOP_BEDETCTRL_BYPASS_PSQ__W 1
+#define VSB_TOP_BEDETCTRL_BYPASS_PSQ__M 0x40
+#define VSB_TOP_BEDETCTRL_BYPASS_PSQ__PRE 0x40
+
+#define VSB_TOP_BEDETCTRL_BYPASS_CSQ__B 7
+#define VSB_TOP_BEDETCTRL_BYPASS_CSQ__W 1
+#define VSB_TOP_BEDETCTRL_BYPASS_CSQ__M 0x80
+#define VSB_TOP_BEDETCTRL_BYPASS_CSQ__PRE 0x0
+
+#define VSB_TOP_BEDETCTRL_BYPASS_DMP__B 8
+#define VSB_TOP_BEDETCTRL_BYPASS_DMP__W 1
+#define VSB_TOP_BEDETCTRL_BYPASS_DMP__M 0x100
+#define VSB_TOP_BEDETCTRL_BYPASS_DMP__PRE 0x100
+
+#define VSB_TOP_LBAGCREFLVL__A 0x1C1003E
+#define VSB_TOP_LBAGCREFLVL__W 12
+#define VSB_TOP_LBAGCREFLVL__M 0xFFF
+#define VSB_TOP_LBAGCREFLVL__PRE 0x200
+
+#define VSB_TOP_UBAGCREFLVL__A 0x1C1003F
+#define VSB_TOP_UBAGCREFLVL__W 12
+#define VSB_TOP_UBAGCREFLVL__M 0xFFF
+#define VSB_TOP_UBAGCREFLVL__PRE 0x400
+
+#define VSB_TOP_NOTCH1_BIN_NUM__A 0x1C10040
+#define VSB_TOP_NOTCH1_BIN_NUM__W 11
+#define VSB_TOP_NOTCH1_BIN_NUM__M 0x7FF
+#define VSB_TOP_NOTCH1_BIN_NUM__PRE 0xB2
+
+#define VSB_TOP_NOTCH2_BIN_NUM__A 0x1C10041
+#define VSB_TOP_NOTCH2_BIN_NUM__W 11
+#define VSB_TOP_NOTCH2_BIN_NUM__M 0x7FF
+#define VSB_TOP_NOTCH2_BIN_NUM__PRE 0x40B
+
+#define VSB_TOP_NOTCH_START_BIN_NUM__A 0x1C10042
+#define VSB_TOP_NOTCH_START_BIN_NUM__W 11
+#define VSB_TOP_NOTCH_START_BIN_NUM__M 0x7FF
+#define VSB_TOP_NOTCH_START_BIN_NUM__PRE 0x7C0
+
+#define VSB_TOP_NOTCH_STOP_BIN_NUM__A 0x1C10043
+#define VSB_TOP_NOTCH_STOP_BIN_NUM__W 11
+#define VSB_TOP_NOTCH_STOP_BIN_NUM__M 0x7FF
+#define VSB_TOP_NOTCH_STOP_BIN_NUM__PRE 0x43F
+
+#define VSB_TOP_NOTCH_TEST_DURATION__A 0x1C10044
+#define VSB_TOP_NOTCH_TEST_DURATION__W 11
+#define VSB_TOP_NOTCH_TEST_DURATION__M 0x7FF
+#define VSB_TOP_NOTCH_TEST_DURATION__PRE 0x7FF
+
+#define VSB_TOP_RESULT_LARGE_PEAK_BIN__A 0x1C10045
+#define VSB_TOP_RESULT_LARGE_PEAK_BIN__W 11
+#define VSB_TOP_RESULT_LARGE_PEAK_BIN__M 0x7FF
+#define VSB_TOP_RESULT_LARGE_PEAK_BIN__PRE 0x0
+
+#define VSB_TOP_RESULT_LARGE_PEAK_VALUE__A 0x1C10046
+#define VSB_TOP_RESULT_LARGE_PEAK_VALUE__W 16
+#define VSB_TOP_RESULT_LARGE_PEAK_VALUE__M 0xFFFF
+#define VSB_TOP_RESULT_LARGE_PEAK_VALUE__PRE 0x0
+
+#define VSB_TOP_RESULT_SMALL_PEAK_BIN__A 0x1C10047
+#define VSB_TOP_RESULT_SMALL_PEAK_BIN__W 11
+#define VSB_TOP_RESULT_SMALL_PEAK_BIN__M 0x7FF
+#define VSB_TOP_RESULT_SMALL_PEAK_BIN__PRE 0x0
+
+#define VSB_TOP_RESULT_SMALL_PEAK_VALUE__A 0x1C10048
+#define VSB_TOP_RESULT_SMALL_PEAK_VALUE__W 16
+#define VSB_TOP_RESULT_SMALL_PEAK_VALUE__M 0xFFFF
+#define VSB_TOP_RESULT_SMALL_PEAK_VALUE__PRE 0x0
+
+#define VSB_TOP_NOTCH_SWEEP_RUNNING__A 0x1C10049
+#define VSB_TOP_NOTCH_SWEEP_RUNNING__W 1
+#define VSB_TOP_NOTCH_SWEEP_RUNNING__M 0x1
+#define VSB_TOP_NOTCH_SWEEP_RUNNING__PRE 0x0
+
+#define VSB_TOP_PREEQDAGCRATIO__A 0x1C1004A
+#define VSB_TOP_PREEQDAGCRATIO__W 13
+#define VSB_TOP_PREEQDAGCRATIO__M 0x1FFF
+#define VSB_TOP_PREEQDAGCRATIO__PRE 0x0
+#define VSB_TOP_AGC_TRUNCCTRL__A 0x1C1004B
+#define VSB_TOP_AGC_TRUNCCTRL__W 4
+#define VSB_TOP_AGC_TRUNCCTRL__M 0xF
+#define VSB_TOP_AGC_TRUNCCTRL__PRE 0xF
+
+#define VSB_TOP_AGC_TRUNCCTRL_TRUNC_LSB__B 0
+#define VSB_TOP_AGC_TRUNCCTRL_TRUNC_LSB__W 2
+#define VSB_TOP_AGC_TRUNCCTRL_TRUNC_LSB__M 0x3
+#define VSB_TOP_AGC_TRUNCCTRL_TRUNC_LSB__PRE 0x3
+
+#define VSB_TOP_AGC_TRUNCCTRL_TRUNC_12N__B 2
+#define VSB_TOP_AGC_TRUNCCTRL_TRUNC_12N__W 1
+#define VSB_TOP_AGC_TRUNCCTRL_TRUNC_12N__M 0x4
+#define VSB_TOP_AGC_TRUNCCTRL_TRUNC_12N__PRE 0x4
+
+#define VSB_TOP_AGC_TRUNCCTRL_TRUNC_EN__B 3
+#define VSB_TOP_AGC_TRUNCCTRL_TRUNC_EN__W 1
+#define VSB_TOP_AGC_TRUNCCTRL_TRUNC_EN__M 0x8
+#define VSB_TOP_AGC_TRUNCCTRL_TRUNC_EN__PRE 0x8
+
+#define VSB_TOP_BEAGC_DEADZONEINIT__A 0x1C1004C
+#define VSB_TOP_BEAGC_DEADZONEINIT__W 8
+#define VSB_TOP_BEAGC_DEADZONEINIT__M 0xFF
+#define VSB_TOP_BEAGC_DEADZONEINIT__PRE 0x50
+
+#define VSB_TOP_BEAGC_REFLEVEL__A 0x1C1004D
+#define VSB_TOP_BEAGC_REFLEVEL__W 9
+#define VSB_TOP_BEAGC_REFLEVEL__M 0x1FF
+#define VSB_TOP_BEAGC_REFLEVEL__PRE 0xAE
+
+#define VSB_TOP_BEAGC_GAINSHIFT__A 0x1C1004E
+#define VSB_TOP_BEAGC_GAINSHIFT__W 3
+#define VSB_TOP_BEAGC_GAINSHIFT__M 0x7
+#define VSB_TOP_BEAGC_GAINSHIFT__PRE 0x3
+
+#define VSB_TOP_BEAGC_REGINIT__A 0x1C1004F
+#define VSB_TOP_BEAGC_REGINIT__W 15
+#define VSB_TOP_BEAGC_REGINIT__M 0x7FFF
+#define VSB_TOP_BEAGC_REGINIT__PRE 0x40
+
+#define VSB_TOP_BEAGC_REGINIT_BEAGC_RST__B 14
+#define VSB_TOP_BEAGC_REGINIT_BEAGC_RST__W 1
+#define VSB_TOP_BEAGC_REGINIT_BEAGC_RST__M 0x4000
+#define VSB_TOP_BEAGC_REGINIT_BEAGC_RST__PRE 0x0
+
+#define VSB_TOP_BEAGC_SCALE__A 0x1C10050
+#define VSB_TOP_BEAGC_SCALE__W 14
+#define VSB_TOP_BEAGC_SCALE__M 0x3FFF
+#define VSB_TOP_BEAGC_SCALE__PRE 0x0
+
+#define VSB_TOP_CFAGC_DEADZONEINIT__A 0x1C10051
+#define VSB_TOP_CFAGC_DEADZONEINIT__W 8
+#define VSB_TOP_CFAGC_DEADZONEINIT__M 0xFF
+#define VSB_TOP_CFAGC_DEADZONEINIT__PRE 0x50
+
+#define VSB_TOP_CFAGC_REFLEVEL__A 0x1C10052
+#define VSB_TOP_CFAGC_REFLEVEL__W 9
+#define VSB_TOP_CFAGC_REFLEVEL__M 0x1FF
+#define VSB_TOP_CFAGC_REFLEVEL__PRE 0xAE
+
+#define VSB_TOP_CFAGC_GAINSHIFT__A 0x1C10053
+#define VSB_TOP_CFAGC_GAINSHIFT__W 3
+#define VSB_TOP_CFAGC_GAINSHIFT__M 0x7
+#define VSB_TOP_CFAGC_GAINSHIFT__PRE 0x3
+
+#define VSB_TOP_CFAGC_REGINIT__A 0x1C10054
+#define VSB_TOP_CFAGC_REGINIT__W 15
+#define VSB_TOP_CFAGC_REGINIT__M 0x7FFF
+#define VSB_TOP_CFAGC_REGINIT__PRE 0x80
+
+#define VSB_TOP_CFAGC_REGINIT_CFAGC_RST__B 14
+#define VSB_TOP_CFAGC_REGINIT_CFAGC_RST__W 1
+#define VSB_TOP_CFAGC_REGINIT_CFAGC_RST__M 0x4000
+#define VSB_TOP_CFAGC_REGINIT_CFAGC_RST__PRE 0x0
+
+#define VSB_TOP_CFAGC_SCALE__A 0x1C10055
+#define VSB_TOP_CFAGC_SCALE__W 14
+#define VSB_TOP_CFAGC_SCALE__M 0x3FFF
+#define VSB_TOP_CFAGC_SCALE__PRE 0x0
+
+#define VSB_TOP_CKTRKONCTL__A 0x1C10056
+#define VSB_TOP_CKTRKONCTL__W 2
+#define VSB_TOP_CKTRKONCTL__M 0x3
+#define VSB_TOP_CKTRKONCTL__PRE 0x0
+
+#define VSB_TOP_CYTRKONCTL__A 0x1C10057
+#define VSB_TOP_CYTRKONCTL__W 2
+#define VSB_TOP_CYTRKONCTL__M 0x3
+#define VSB_TOP_CYTRKONCTL__PRE 0x0
+
+#define VSB_TOP_PTONCTL__A 0x1C10058
+#define VSB_TOP_PTONCTL__W 2
+#define VSB_TOP_PTONCTL__M 0x3
+#define VSB_TOP_PTONCTL__PRE 0x0
+
+#define VSB_TOP_NOTCH_SCALE_1__A 0x1C10059
+#define VSB_TOP_NOTCH_SCALE_1__W 8
+#define VSB_TOP_NOTCH_SCALE_1__M 0xFF
+#define VSB_TOP_NOTCH_SCALE_1__PRE 0xA
+
+#define VSB_TOP_NOTCH_SCALE_2__A 0x1C1005A
+#define VSB_TOP_NOTCH_SCALE_2__W 8
+#define VSB_TOP_NOTCH_SCALE_2__M 0xFF
+#define VSB_TOP_NOTCH_SCALE_2__PRE 0xA
+
+#define VSB_TOP_FIRSTLARGFFETAP__A 0x1C1005B
+#define VSB_TOP_FIRSTLARGFFETAP__W 12
+#define VSB_TOP_FIRSTLARGFFETAP__M 0xFFF
+#define VSB_TOP_FIRSTLARGFFETAP__PRE 0x0
+
+#define VSB_TOP_FIRSTLARGFFETAPADDR__A 0x1C1005C
+#define VSB_TOP_FIRSTLARGFFETAPADDR__W 11
+#define VSB_TOP_FIRSTLARGFFETAPADDR__M 0x7FF
+#define VSB_TOP_FIRSTLARGFFETAPADDR__PRE 0x0
+
+#define VSB_TOP_SECONDLARGFFETAP__A 0x1C1005D
+#define VSB_TOP_SECONDLARGFFETAP__W 12
+#define VSB_TOP_SECONDLARGFFETAP__M 0xFFF
+#define VSB_TOP_SECONDLARGFFETAP__PRE 0x0
+
+#define VSB_TOP_SECONDLARGFFETAPADDR__A 0x1C1005E
+#define VSB_TOP_SECONDLARGFFETAPADDR__W 11
+#define VSB_TOP_SECONDLARGFFETAPADDR__M 0x7FF
+#define VSB_TOP_SECONDLARGFFETAPADDR__PRE 0x0
+
+#define VSB_TOP_FIRSTLARGDFETAP__A 0x1C1005F
+#define VSB_TOP_FIRSTLARGDFETAP__W 12
+#define VSB_TOP_FIRSTLARGDFETAP__M 0xFFF
+#define VSB_TOP_FIRSTLARGDFETAP__PRE 0x0
+
+#define VSB_TOP_FIRSTLARGDFETAPADDR__A 0x1C10060
+#define VSB_TOP_FIRSTLARGDFETAPADDR__W 11
+#define VSB_TOP_FIRSTLARGDFETAPADDR__M 0x7FF
+#define VSB_TOP_FIRSTLARGDFETAPADDR__PRE 0x0
+
+#define VSB_TOP_SECONDLARGDFETAP__A 0x1C10061
+#define VSB_TOP_SECONDLARGDFETAP__W 12
+#define VSB_TOP_SECONDLARGDFETAP__M 0xFFF
+#define VSB_TOP_SECONDLARGDFETAP__PRE 0x0
+
+#define VSB_TOP_SECONDLARGDFETAPADDR__A 0x1C10062
+#define VSB_TOP_SECONDLARGDFETAPADDR__W 11
+#define VSB_TOP_SECONDLARGDFETAPADDR__M 0x7FF
+#define VSB_TOP_SECONDLARGDFETAPADDR__PRE 0x0
+
+#define VSB_TOP_PARAOWDBUS__A 0x1C10063
+#define VSB_TOP_PARAOWDBUS__W 12
+#define VSB_TOP_PARAOWDBUS__M 0xFFF
+#define VSB_TOP_PARAOWDBUS__PRE 0x0
+#define VSB_TOP_PARAOWCTRL__A 0x1C10064
+#define VSB_TOP_PARAOWCTRL__W 7
+#define VSB_TOP_PARAOWCTRL__M 0x7F
+#define VSB_TOP_PARAOWCTRL__PRE 0x0
+
+#define VSB_TOP_PARAOWCTRL_PARAOWABUS__B 0
+#define VSB_TOP_PARAOWCTRL_PARAOWABUS__W 6
+#define VSB_TOP_PARAOWCTRL_PARAOWABUS__M 0x3F
+#define VSB_TOP_PARAOWCTRL_PARAOWABUS__PRE 0x0
+
+#define VSB_TOP_PARAOWCTRL_PARAOWEN__B 6
+#define VSB_TOP_PARAOWCTRL_PARAOWEN__W 1
+#define VSB_TOP_PARAOWCTRL_PARAOWEN__M 0x40
+#define VSB_TOP_PARAOWCTRL_PARAOWEN__PRE 0x0
+
+#define VSB_TOP_CURRENTSEGLOCAT__A 0x1C10065
+#define VSB_TOP_CURRENTSEGLOCAT__W 10
+#define VSB_TOP_CURRENTSEGLOCAT__M 0x3FF
+#define VSB_TOP_CURRENTSEGLOCAT__PRE 0x0
+
+#define VSB_TOP_MEASUREMENT_PERIOD__A 0x1C10066
+#define VSB_TOP_MEASUREMENT_PERIOD__W 16
+#define VSB_TOP_MEASUREMENT_PERIOD__M 0xFFFF
+#define VSB_TOP_MEASUREMENT_PERIOD__PRE 0x0
+
+#define VSB_TOP_NR_SYM_ERRS__A 0x1C10067
+#define VSB_TOP_NR_SYM_ERRS__W 16
+#define VSB_TOP_NR_SYM_ERRS__M 0xFFFF
+#define VSB_TOP_NR_SYM_ERRS__PRE 0xFFFF
+
+#define VSB_TOP_ERR_ENERGY_L__A 0x1C10068
+#define VSB_TOP_ERR_ENERGY_L__W 16
+#define VSB_TOP_ERR_ENERGY_L__M 0xFFFF
+#define VSB_TOP_ERR_ENERGY_L__PRE 0xFFFF
+
+#define VSB_TOP_ERR_ENERGY_H__A 0x1C10069
+#define VSB_TOP_ERR_ENERGY_H__W 16
+#define VSB_TOP_ERR_ENERGY_H__M 0xFFFF
+#define VSB_TOP_ERR_ENERGY_H__PRE 0xFFFF
+
+#define VSB_TOP_SLICER_SEL_8LEV__A 0x1C1006A
+#define VSB_TOP_SLICER_SEL_8LEV__W 1
+#define VSB_TOP_SLICER_SEL_8LEV__M 0x1
+#define VSB_TOP_SLICER_SEL_8LEV__PRE 0x1
+
+#define VSB_TOP_BNFIELD__A 0x1C1006B
+#define VSB_TOP_BNFIELD__W 3
+#define VSB_TOP_BNFIELD__M 0x7
+#define VSB_TOP_BNFIELD__PRE 0x3
+
+#define VSB_TOP_CLPLASTNUM__A 0x1C1006C
+#define VSB_TOP_CLPLASTNUM__W 8
+#define VSB_TOP_CLPLASTNUM__M 0xFF
+#define VSB_TOP_CLPLASTNUM__PRE 0x0
+
+#define VSB_TOP_BNSQERR__A 0x1C1006D
+#define VSB_TOP_BNSQERR__W 16
+#define VSB_TOP_BNSQERR__M 0xFFFF
+#define VSB_TOP_BNSQERR__PRE 0x1AD
+
+#define VSB_TOP_BNTHRESH__A 0x1C1006E
+#define VSB_TOP_BNTHRESH__W 9
+#define VSB_TOP_BNTHRESH__M 0x1FF
+#define VSB_TOP_BNTHRESH__PRE 0x120
+
+#define VSB_TOP_BNCLPNUM__A 0x1C1006F
+#define VSB_TOP_BNCLPNUM__W 16
+#define VSB_TOP_BNCLPNUM__M 0xFFFF
+#define VSB_TOP_BNCLPNUM__PRE 0x0
+#define VSB_TOP_PHASELOCKCTRL__A 0x1C10070
+#define VSB_TOP_PHASELOCKCTRL__W 7
+#define VSB_TOP_PHASELOCKCTRL__M 0x7F
+#define VSB_TOP_PHASELOCKCTRL__PRE 0x0
+
+#define VSB_TOP_PHASELOCKCTRL_DFORCEPOLARITY__B 0
+#define VSB_TOP_PHASELOCKCTRL_DFORCEPOLARITY__W 1
+#define VSB_TOP_PHASELOCKCTRL_DFORCEPOLARITY__M 0x1
+#define VSB_TOP_PHASELOCKCTRL_DFORCEPOLARITY__PRE 0x0
+
+#define VSB_TOP_PHASELOCKCTRL_DFORCEPLL__B 1
+#define VSB_TOP_PHASELOCKCTRL_DFORCEPLL__W 1
+#define VSB_TOP_PHASELOCKCTRL_DFORCEPLL__M 0x2
+#define VSB_TOP_PHASELOCKCTRL_DFORCEPLL__PRE 0x0
+
+#define VSB_TOP_PHASELOCKCTRL_PFORCEPOLARITY__B 2
+#define VSB_TOP_PHASELOCKCTRL_PFORCEPOLARITY__W 1
+#define VSB_TOP_PHASELOCKCTRL_PFORCEPOLARITY__M 0x4
+#define VSB_TOP_PHASELOCKCTRL_PFORCEPOLARITY__PRE 0x0
+
+#define VSB_TOP_PHASELOCKCTRL_PFORCEPLL__B 3
+#define VSB_TOP_PHASELOCKCTRL_PFORCEPLL__W 1
+#define VSB_TOP_PHASELOCKCTRL_PFORCEPLL__M 0x8
+#define VSB_TOP_PHASELOCKCTRL_PFORCEPLL__PRE 0x0
+
+#define VSB_TOP_PHASELOCKCTRL_CFORCEPOLARITY__B 4
+#define VSB_TOP_PHASELOCKCTRL_CFORCEPOLARITY__W 1
+#define VSB_TOP_PHASELOCKCTRL_CFORCEPOLARITY__M 0x10
+#define VSB_TOP_PHASELOCKCTRL_CFORCEPOLARITY__PRE 0x0
+
+#define VSB_TOP_PHASELOCKCTRL_CFORCEPLL__B 5
+#define VSB_TOP_PHASELOCKCTRL_CFORCEPLL__W 1
+#define VSB_TOP_PHASELOCKCTRL_CFORCEPLL__M 0x20
+#define VSB_TOP_PHASELOCKCTRL_CFORCEPLL__PRE 0x0
+
+#define VSB_TOP_PHASELOCKCTRL_IQSWITCH__B 6
+#define VSB_TOP_PHASELOCKCTRL_IQSWITCH__W 1
+#define VSB_TOP_PHASELOCKCTRL_IQSWITCH__M 0x40
+#define VSB_TOP_PHASELOCKCTRL_IQSWITCH__PRE 0x0
+
+#define VSB_TOP_DLOCKACCUM__A 0x1C10071
+#define VSB_TOP_DLOCKACCUM__W 16
+#define VSB_TOP_DLOCKACCUM__M 0xFFFF
+#define VSB_TOP_DLOCKACCUM__PRE 0x0
+
+#define VSB_TOP_PLOCKACCUM__A 0x1C10072
+#define VSB_TOP_PLOCKACCUM__W 16
+#define VSB_TOP_PLOCKACCUM__M 0xFFFF
+#define VSB_TOP_PLOCKACCUM__PRE 0x0
+
+#define VSB_TOP_CLOCKACCUM__A 0x1C10073
+#define VSB_TOP_CLOCKACCUM__W 16
+#define VSB_TOP_CLOCKACCUM__M 0xFFFF
+#define VSB_TOP_CLOCKACCUM__PRE 0x0
+
+#define VSB_TOP_DCRMVACUMI__A 0x1C10074
+#define VSB_TOP_DCRMVACUMI__W 10
+#define VSB_TOP_DCRMVACUMI__M 0x3FF
+#define VSB_TOP_DCRMVACUMI__PRE 0x0
+
+#define VSB_TOP_DCRMVACUMQ__A 0x1C10075
+#define VSB_TOP_DCRMVACUMQ__W 10
+#define VSB_TOP_DCRMVACUMQ__M 0x3FF
+#define VSB_TOP_DCRMVACUMQ__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO1__A 0x1C20000
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO1__W 12
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO1__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO2__A 0x1C20001
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO2__W 12
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO2__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO3__A 0x1C20002
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO3__W 12
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO3__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO4__A 0x1C20003
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO4__W 12
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO4__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO5__A 0x1C20004
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO5__W 12
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO5__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO6__A 0x1C20005
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO6__W 12
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO6__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO7__A 0x1C20006
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO7__W 12
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO7__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO8__A 0x1C20007
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO8__W 12
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO8__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO9__A 0x1C20008
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO9__W 12
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO9__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO10__A 0x1C20009
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO10__W 12
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO10__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO11__A 0x1C2000A
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO11__W 12
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO11__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO12__A 0x1C2000B
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO12__W 12
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO12__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFETRAINLKRATIO12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO1__A 0x1C2000C
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO1__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO1__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO2__A 0x1C2000D
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO2__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO2__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO3__A 0x1C2000E
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO3__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO3__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO4__A 0x1C2000F
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO4__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO4__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO5__A 0x1C20010
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO5__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO5__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO6__A 0x1C20011
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO6__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO6__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO7__A 0x1C20012
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO7__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO7__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO8__A 0x1C20013
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO8__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO8__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO9__A 0x1C20014
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO9__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO9__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO10__A 0x1C20015
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO10__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO10__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO11__A 0x1C20016
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO11__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO11__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO12__A 0x1C20017
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO12__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO12__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1TRAINLKRATIO12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO1__A 0x1C20018
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO1__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO1__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO2__A 0x1C20019
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO2__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO2__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO3__A 0x1C2001A
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO3__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO3__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO4__A 0x1C2001B
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO4__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO4__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO5__A 0x1C2001C
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO5__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO5__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO6__A 0x1C2001D
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO6__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO6__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO7__A 0x1C2001E
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO7__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO7__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO8__A 0x1C2001F
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO8__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO8__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO9__A 0x1C20020
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO9__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO9__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO10__A 0x1C20021
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO10__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO10__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO11__A 0x1C20022
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO11__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO11__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO12__A 0x1C20023
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO12__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO12__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA1DATALKRATIO12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO1__A 0x1C20024
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO1__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO1__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO2__A 0x1C20025
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO2__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO2__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO3__A 0x1C20026
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO3__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO3__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO4__A 0x1C20027
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO4__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO4__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO5__A 0x1C20028
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO5__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO5__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO6__A 0x1C20029
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO6__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO6__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO7__A 0x1C2002A
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO7__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO7__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO8__A 0x1C2002B
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO8__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO8__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO9__A 0x1C2002C
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO9__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO9__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO10__A 0x1C2002D
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO10__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO10__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO11__A 0x1C2002E
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO11__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO11__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO12__A 0x1C2002F
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO12__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO12__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2TRAINLKRATIO12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO1__A 0x1C20030
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO1__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO1__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO2__A 0x1C20031
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO2__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO2__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO3__A 0x1C20032
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO3__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO3__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO4__A 0x1C20033
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO4__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO4__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO5__A 0x1C20034
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO5__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO5__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO6__A 0x1C20035
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO6__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO6__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO7__A 0x1C20036
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO7__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO7__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO8__A 0x1C20037
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO8__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO8__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO9__A 0x1C20038
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO9__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO9__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO10__A 0x1C20039
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO10__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO10__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO11__A 0x1C2003A
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO11__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO11__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO12__A 0x1C2003B
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO12__W 12
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO12__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFERCA2DATALKRATIO12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO1__A 0x1C2003C
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO1__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO1__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO2__A 0x1C2003D
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO2__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO2__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO3__A 0x1C2003E
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO3__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO3__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO4__A 0x1C2003F
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO4__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO4__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO5__A 0x1C20040
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO5__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO5__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO6__A 0x1C20041
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO6__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO6__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO7__A 0x1C20042
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO7__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO7__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO8__A 0x1C20043
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO8__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO8__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO9__A 0x1C20044
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO9__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO9__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO10__A 0x1C20045
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO10__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO10__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO11__A 0x1C20046
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO11__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO11__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO12__A 0x1C20047
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO12__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO12__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1TRAINLKRATIO12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO1__A 0x1C20048
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO1__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO1__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO2__A 0x1C20049
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO2__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO2__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO3__A 0x1C2004A
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO3__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO3__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO4__A 0x1C2004B
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO4__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO4__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO5__A 0x1C2004C
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO5__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO5__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO6__A 0x1C2004D
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO6__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO6__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO7__A 0x1C2004E
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO7__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO7__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO8__A 0x1C2004F
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO8__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO8__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO9__A 0x1C20050
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO9__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO9__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO10__A 0x1C20051
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO10__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO10__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO11__A 0x1C20052
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO11__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO11__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO12__A 0x1C20053
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO12__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO12__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM1DATALKRATIO12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO1__A 0x1C20054
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO1__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO1__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO2__A 0x1C20055
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO2__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO2__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO3__A 0x1C20056
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO3__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO3__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO4__A 0x1C20057
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO4__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO4__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO5__A 0x1C20058
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO5__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO5__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO6__A 0x1C20059
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO6__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO6__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO7__A 0x1C2005A
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO7__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO7__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO8__A 0x1C2005B
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO8__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO8__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO9__A 0x1C2005C
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO9__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO9__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO10__A 0x1C2005D
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO10__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO10__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO11__A 0x1C2005E
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO11__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO11__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO12__A 0x1C2005F
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO12__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO12__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2TRAINLKRATIO12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO1__A 0x1C20060
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO1__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO1__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO2__A 0x1C20061
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO2__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO2__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO3__A 0x1C20062
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO3__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO3__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO4__A 0x1C20063
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO4__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO4__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO5__A 0x1C20064
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO5__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO5__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO6__A 0x1C20065
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO6__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO6__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO7__A 0x1C20066
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO7__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO7__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO8__A 0x1C20067
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO8__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO8__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO9__A 0x1C20068
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO9__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO9__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO10__A 0x1C20069
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO10__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO10__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO11__A 0x1C2006A
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO11__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO11__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO12__A 0x1C2006B
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO12__W 12
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO12__M 0xFFF
+#define VSB_SYSCTRL_RAM0_FFEDDM2DATALKRATIO12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN1__A 0x1C2006C
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN1__W 7
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN1__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN2__A 0x1C2006D
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN2__W 7
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN2__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN3__A 0x1C2006E
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN3__W 7
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN3__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN4__A 0x1C2006F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN4__W 7
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN4__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN5__A 0x1C20070
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN5__W 7
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN5__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN6__A 0x1C20071
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN6__W 7
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN6__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN7__A 0x1C20072
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN7__W 7
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN7__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN8__A 0x1C20073
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN8__W 7
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN8__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN9__A 0x1C20074
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN9__W 7
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN9__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN10__A 0x1C20075
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN10__W 7
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN10__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN11__A 0x1C20076
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN11__W 7
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN11__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN12__A 0x1C20077
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN12__W 7
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN12__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRTRAINGAIN12__PRE 0x0
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN1__A 0x1C20078
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN1__W 15
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN1__M 0x7FFF
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN1_FIRRCA1TRAINGAIN1__B 0
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN1_FIRRCA1TRAINGAIN1__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN1_FIRRCA1TRAINGAIN1__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN1_FIRRCA1TRAINGAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN1_FIRRCA1DATAGAIN1__B 8
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN1_FIRRCA1DATAGAIN1__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN1_FIRRCA1DATAGAIN1__M 0x7F00
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN1_FIRRCA1DATAGAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN2__A 0x1C20079
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN2__W 15
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN2__M 0x7FFF
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN2_FIRRCA1TRAINGAIN2__B 0
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN2_FIRRCA1TRAINGAIN2__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN2_FIRRCA1TRAINGAIN2__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN2_FIRRCA1TRAINGAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN2_FIRRCA1DATAGAIN2__B 8
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN2_FIRRCA1DATAGAIN2__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN2_FIRRCA1DATAGAIN2__M 0x7F00
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN2_FIRRCA1DATAGAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN3__A 0x1C2007A
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN3__W 15
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN3__M 0x7FFF
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN3_FIRRCA1TRAINGAIN3__B 0
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN3_FIRRCA1TRAINGAIN3__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN3_FIRRCA1TRAINGAIN3__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN3_FIRRCA1TRAINGAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN3_FIRRCA1DATAGAIN3__B 8
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN3_FIRRCA1DATAGAIN3__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN3_FIRRCA1DATAGAIN3__M 0x7F00
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN3_FIRRCA1DATAGAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN4__A 0x1C2007B
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN4__W 15
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN4__M 0x7FFF
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN4_FIRRCA1TRAINGAIN4__B 0
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN4_FIRRCA1TRAINGAIN4__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN4_FIRRCA1TRAINGAIN4__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN4_FIRRCA1TRAINGAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN4_FIRRCA1DATAGAIN4__B 8
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN4_FIRRCA1DATAGAIN4__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN4_FIRRCA1DATAGAIN4__M 0x7F00
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN4_FIRRCA1DATAGAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN5__A 0x1C2007C
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN5__W 15
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN5__M 0x7FFF
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN5_FIRRCA1TRAINGAIN5__B 0
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN5_FIRRCA1TRAINGAIN5__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN5_FIRRCA1TRAINGAIN5__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN5_FIRRCA1TRAINGAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN5_FIRRCA1DATAGAIN5__B 8
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN5_FIRRCA1DATAGAIN5__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN5_FIRRCA1DATAGAIN5__M 0x7F00
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN5_FIRRCA1DATAGAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN6__A 0x1C2007D
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN6__W 15
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN6__M 0x7FFF
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN6_FIRRCA1TRAINGAIN6__B 0
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN6_FIRRCA1TRAINGAIN6__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN6_FIRRCA1TRAINGAIN6__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN6_FIRRCA1TRAINGAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN6_FIRRCA1DATAGAIN6__B 8
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN6_FIRRCA1DATAGAIN6__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN6_FIRRCA1DATAGAIN6__M 0x7F00
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN6_FIRRCA1DATAGAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN7__A 0x1C2007E
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN7__W 15
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN7__M 0x7FFF
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN7_FIRRCA1TRAINGAIN7__B 0
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN7_FIRRCA1TRAINGAIN7__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN7_FIRRCA1TRAINGAIN7__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN7_FIRRCA1TRAINGAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN7_FIRRCA1DATAGAIN7__B 8
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN7_FIRRCA1DATAGAIN7__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN7_FIRRCA1DATAGAIN7__M 0x7F00
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN7_FIRRCA1DATAGAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN8__A 0x1C2007F
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN8__W 15
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN8__M 0x7FFF
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN8_FIRRCA1TRAINGAIN8__B 0
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN8_FIRRCA1TRAINGAIN8__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN8_FIRRCA1TRAINGAIN8__M 0x7F
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN8_FIRRCA1TRAINGAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN8_FIRRCA1DATAGAIN8__B 8
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN8_FIRRCA1DATAGAIN8__W 7
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN8_FIRRCA1DATAGAIN8__M 0x7F00
+#define VSB_SYSCTRL_RAM0_FIRRCA1GAIN8_FIRRCA1DATAGAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN9__A 0x1C30000
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN9__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN9__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN9_FIRRCA1TRAINGAIN9__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN9_FIRRCA1TRAINGAIN9__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN9_FIRRCA1TRAINGAIN9__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN9_FIRRCA1TRAINGAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN9_FIRRCA1DATAGAIN9__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN9_FIRRCA1DATAGAIN9__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN9_FIRRCA1DATAGAIN9__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN9_FIRRCA1DATAGAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN10__A 0x1C30001
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN10__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN10__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN10_FIRRCA1TRAINGAIN10__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN10_FIRRCA1TRAINGAIN10__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN10_FIRRCA1TRAINGAIN10__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN10_FIRRCA1TRAINGAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN10_FIRRCA1DATAGAIN10__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN10_FIRRCA1DATAGAIN10__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN10_FIRRCA1DATAGAIN10__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN10_FIRRCA1DATAGAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN11__A 0x1C30002
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN11__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN11__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN11_FIRRCA1TRAINGAIN11__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN11_FIRRCA1TRAINGAIN11__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN11_FIRRCA1TRAINGAIN11__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN11_FIRRCA1TRAINGAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN11_FIRRCA1DATAGAIN11__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN11_FIRRCA1DATAGAIN11__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN11_FIRRCA1DATAGAIN11__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN11_FIRRCA1DATAGAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN12__A 0x1C30003
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN12__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN12__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN12_FIRRCA1TRAINGAIN12__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN12_FIRRCA1TRAINGAIN12__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN12_FIRRCA1TRAINGAIN12__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN12_FIRRCA1TRAINGAIN12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN12_FIRRCA1DATAGAIN12__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN12_FIRRCA1DATAGAIN12__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN12_FIRRCA1DATAGAIN12__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA1GAIN12_FIRRCA1DATAGAIN12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN1__A 0x1C30004
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN1__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN1__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN1_FIRRCA2TRAINGAIN1__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN1_FIRRCA2TRAINGAIN1__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN1_FIRRCA2TRAINGAIN1__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN1_FIRRCA2TRAINGAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN1_FIRRCA2DATAGAIN1__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN1_FIRRCA2DATAGAIN1__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN1_FIRRCA2DATAGAIN1__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN1_FIRRCA2DATAGAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN2__A 0x1C30005
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN2__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN2__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN2_FIRRCA2TRAINGAIN2__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN2_FIRRCA2TRAINGAIN2__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN2_FIRRCA2TRAINGAIN2__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN2_FIRRCA2TRAINGAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN2_FIRRCA2DATAGAIN2__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN2_FIRRCA2DATAGAIN2__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN2_FIRRCA2DATAGAIN2__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN2_FIRRCA2DATAGAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN3__A 0x1C30006
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN3__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN3__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN3_FIRRCA2TRAINGAIN3__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN3_FIRRCA2TRAINGAIN3__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN3_FIRRCA2TRAINGAIN3__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN3_FIRRCA2TRAINGAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN3_FIRRCA2DATAGAIN3__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN3_FIRRCA2DATAGAIN3__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN3_FIRRCA2DATAGAIN3__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN3_FIRRCA2DATAGAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN4__A 0x1C30007
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN4__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN4__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN4_FIRRCA2TRAINGAIN4__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN4_FIRRCA2TRAINGAIN4__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN4_FIRRCA2TRAINGAIN4__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN4_FIRRCA2TRAINGAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN4_FIRRCA2DATAGAIN4__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN4_FIRRCA2DATAGAIN4__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN4_FIRRCA2DATAGAIN4__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN4_FIRRCA2DATAGAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN5__A 0x1C30008
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN5__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN5__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN5_FIRRCA2TRAINGAIN5__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN5_FIRRCA2TRAINGAIN5__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN5_FIRRCA2TRAINGAIN5__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN5_FIRRCA2TRAINGAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN5_FIRRCA2DATAGAIN5__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN5_FIRRCA2DATAGAIN5__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN5_FIRRCA2DATAGAIN5__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN5_FIRRCA2DATAGAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN6__A 0x1C30009
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN6__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN6__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN6_FIRRCA2TRAINGAIN6__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN6_FIRRCA2TRAINGAIN6__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN6_FIRRCA2TRAINGAIN6__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN6_FIRRCA2TRAINGAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN6_FIRRCA2DATAGAIN6__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN6_FIRRCA2DATAGAIN6__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN6_FIRRCA2DATAGAIN6__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN6_FIRRCA2DATAGAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN7__A 0x1C3000A
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN7__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN7__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN7_FIRRCA2TRAINGAIN7__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN7_FIRRCA2TRAINGAIN7__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN7_FIRRCA2TRAINGAIN7__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN7_FIRRCA2TRAINGAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN7_FIRRCA2DATAGAIN7__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN7_FIRRCA2DATAGAIN7__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN7_FIRRCA2DATAGAIN7__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN7_FIRRCA2DATAGAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN8__A 0x1C3000B
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN8__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN8__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN8_FIRRCA2TRAINGAIN8__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN8_FIRRCA2TRAINGAIN8__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN8_FIRRCA2TRAINGAIN8__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN8_FIRRCA2TRAINGAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN8_FIRRCA2DATAGAIN8__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN8_FIRRCA2DATAGAIN8__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN8_FIRRCA2DATAGAIN8__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN8_FIRRCA2DATAGAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN9__A 0x1C3000C
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN9__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN9__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN9_FIRRCA2TRAINGAIN9__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN9_FIRRCA2TRAINGAIN9__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN9_FIRRCA2TRAINGAIN9__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN9_FIRRCA2TRAINGAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN9_FIRRCA2DATAGAIN9__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN9_FIRRCA2DATAGAIN9__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN9_FIRRCA2DATAGAIN9__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN9_FIRRCA2DATAGAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN10__A 0x1C3000D
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN10__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN10__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN10_FIRRCA2TRAINGAIN10__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN10_FIRRCA2TRAINGAIN10__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN10_FIRRCA2TRAINGAIN10__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN10_FIRRCA2TRAINGAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN10_FIRRCA2DATAGAIN10__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN10_FIRRCA2DATAGAIN10__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN10_FIRRCA2DATAGAIN10__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN10_FIRRCA2DATAGAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN11__A 0x1C3000E
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN11__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN11__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN11_FIRRCA2TRAINGAIN11__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN11_FIRRCA2TRAINGAIN11__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN11_FIRRCA2TRAINGAIN11__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN11_FIRRCA2TRAINGAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN11_FIRRCA2DATAGAIN11__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN11_FIRRCA2DATAGAIN11__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN11_FIRRCA2DATAGAIN11__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN11_FIRRCA2DATAGAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN12__A 0x1C3000F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN12__W 15
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN12__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN12_FIRRCA2TRAINGAIN12__B 0
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN12_FIRRCA2TRAINGAIN12__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN12_FIRRCA2TRAINGAIN12__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN12_FIRRCA2TRAINGAIN12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN12_FIRRCA2DATAGAIN12__B 8
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN12_FIRRCA2DATAGAIN12__W 7
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN12_FIRRCA2DATAGAIN12__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRRCA2GAIN12_FIRRCA2DATAGAIN12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN1__A 0x1C30010
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN1__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN1__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN1_FIRDDM1TRAINGAIN1__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN1_FIRDDM1TRAINGAIN1__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN1_FIRDDM1TRAINGAIN1__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN1_FIRDDM1TRAINGAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN1_FIRDDM1DATAGAIN1__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN1_FIRDDM1DATAGAIN1__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN1_FIRDDM1DATAGAIN1__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN1_FIRDDM1DATAGAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN2__A 0x1C30011
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN2__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN2__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN2_FIRDDM1TRAINGAIN2__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN2_FIRDDM1TRAINGAIN2__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN2_FIRDDM1TRAINGAIN2__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN2_FIRDDM1TRAINGAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN2_FIRDDM1DATAGAIN2__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN2_FIRDDM1DATAGAIN2__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN2_FIRDDM1DATAGAIN2__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN2_FIRDDM1DATAGAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN3__A 0x1C30012
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN3__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN3__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN3_FIRDDM1TRAINGAIN3__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN3_FIRDDM1TRAINGAIN3__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN3_FIRDDM1TRAINGAIN3__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN3_FIRDDM1TRAINGAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN3_FIRDDM1DATAGAIN3__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN3_FIRDDM1DATAGAIN3__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN3_FIRDDM1DATAGAIN3__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN3_FIRDDM1DATAGAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN4__A 0x1C30013
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN4__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN4__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN4_FIRDDM1TRAINGAIN4__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN4_FIRDDM1TRAINGAIN4__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN4_FIRDDM1TRAINGAIN4__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN4_FIRDDM1TRAINGAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN4_FIRDDM1DATAGAIN4__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN4_FIRDDM1DATAGAIN4__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN4_FIRDDM1DATAGAIN4__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN4_FIRDDM1DATAGAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN5__A 0x1C30014
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN5__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN5__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN5_FIRDDM1TRAINGAIN5__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN5_FIRDDM1TRAINGAIN5__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN5_FIRDDM1TRAINGAIN5__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN5_FIRDDM1TRAINGAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN5_FIRDDM1DATAGAIN5__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN5_FIRDDM1DATAGAIN5__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN5_FIRDDM1DATAGAIN5__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN5_FIRDDM1DATAGAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN6__A 0x1C30015
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN6__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN6__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN6_FIRDDM1TRAINGAIN6__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN6_FIRDDM1TRAINGAIN6__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN6_FIRDDM1TRAINGAIN6__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN6_FIRDDM1TRAINGAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN6_FIRDDM1DATAGAIN6__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN6_FIRDDM1DATAGAIN6__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN6_FIRDDM1DATAGAIN6__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN6_FIRDDM1DATAGAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN7__A 0x1C30016
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN7__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN7__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN7_FIRDDM1TRAINGAIN7__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN7_FIRDDM1TRAINGAIN7__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN7_FIRDDM1TRAINGAIN7__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN7_FIRDDM1TRAINGAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN7_FIRDDM1DATAGAIN7__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN7_FIRDDM1DATAGAIN7__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN7_FIRDDM1DATAGAIN7__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN7_FIRDDM1DATAGAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN8__A 0x1C30017
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN8__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN8__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN8_FIRDDM1TRAINGAIN8__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN8_FIRDDM1TRAINGAIN8__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN8_FIRDDM1TRAINGAIN8__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN8_FIRDDM1TRAINGAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN8_FIRDDM1DATAGAIN8__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN8_FIRDDM1DATAGAIN8__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN8_FIRDDM1DATAGAIN8__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN8_FIRDDM1DATAGAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN9__A 0x1C30018
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN9__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN9__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN9_FIRDDM1TRAINGAIN9__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN9_FIRDDM1TRAINGAIN9__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN9_FIRDDM1TRAINGAIN9__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN9_FIRDDM1TRAINGAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN9_FIRDDM1DATAGAIN9__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN9_FIRDDM1DATAGAIN9__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN9_FIRDDM1DATAGAIN9__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN9_FIRDDM1DATAGAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN10__A 0x1C30019
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN10__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN10__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN10_FIRDDM1TRAINGAIN10__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN10_FIRDDM1TRAINGAIN10__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN10_FIRDDM1TRAINGAIN10__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN10_FIRDDM1TRAINGAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN10_FIRDDM1DATAGAIN10__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN10_FIRDDM1DATAGAIN10__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN10_FIRDDM1DATAGAIN10__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN10_FIRDDM1DATAGAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN11__A 0x1C3001A
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN11__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN11__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN11_FIRDDM1TRAINGAIN11__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN11_FIRDDM1TRAINGAIN11__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN11_FIRDDM1TRAINGAIN11__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN11_FIRDDM1TRAINGAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN11_FIRDDM1DATAGAIN11__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN11_FIRDDM1DATAGAIN11__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN11_FIRDDM1DATAGAIN11__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN11_FIRDDM1DATAGAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN12__A 0x1C3001B
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN12__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN12__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN12_FIRDDM1TRAINGAIN12__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN12_FIRDDM1TRAINGAIN12__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN12_FIRDDM1TRAINGAIN12__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN12_FIRDDM1TRAINGAIN12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN12_FIRDDM1DATAGAIN12__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN12_FIRDDM1DATAGAIN12__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN12_FIRDDM1DATAGAIN12__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM1GAIN12_FIRDDM1DATAGAIN12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN1__A 0x1C3001C
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN1__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN1__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN1_FIRDDM2TRAINGAIN1__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN1_FIRDDM2TRAINGAIN1__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN1_FIRDDM2TRAINGAIN1__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN1_FIRDDM2TRAINGAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN1_FIRDDM2DATAGAIN1__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN1_FIRDDM2DATAGAIN1__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN1_FIRDDM2DATAGAIN1__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN1_FIRDDM2DATAGAIN1__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN2__A 0x1C3001D
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN2__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN2__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN2_FIRDDM2TRAINGAIN2__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN2_FIRDDM2TRAINGAIN2__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN2_FIRDDM2TRAINGAIN2__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN2_FIRDDM2TRAINGAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN2_FIRDDM2DATAGAIN2__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN2_FIRDDM2DATAGAIN2__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN2_FIRDDM2DATAGAIN2__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN2_FIRDDM2DATAGAIN2__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN3__A 0x1C3001E
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN3__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN3__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN3_FIRDDM2TRAINGAIN3__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN3_FIRDDM2TRAINGAIN3__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN3_FIRDDM2TRAINGAIN3__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN3_FIRDDM2TRAINGAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN3_FIRDDM2DATAGAIN3__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN3_FIRDDM2DATAGAIN3__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN3_FIRDDM2DATAGAIN3__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN3_FIRDDM2DATAGAIN3__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN4__A 0x1C3001F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN4__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN4__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN4_FIRDDM2TRAINGAIN4__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN4_FIRDDM2TRAINGAIN4__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN4_FIRDDM2TRAINGAIN4__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN4_FIRDDM2TRAINGAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN4_FIRDDM2DATAGAIN4__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN4_FIRDDM2DATAGAIN4__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN4_FIRDDM2DATAGAIN4__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN4_FIRDDM2DATAGAIN4__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN5__A 0x1C30020
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN5__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN5__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN5_FIRDDM2TRAINGAIN5__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN5_FIRDDM2TRAINGAIN5__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN5_FIRDDM2TRAINGAIN5__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN5_FIRDDM2TRAINGAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN5_FIRDDM2DATAGAIN5__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN5_FIRDDM2DATAGAIN5__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN5_FIRDDM2DATAGAIN5__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN5_FIRDDM2DATAGAIN5__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN6__A 0x1C30021
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN6__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN6__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN6_FIRDDM2TRAINGAIN6__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN6_FIRDDM2TRAINGAIN6__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN6_FIRDDM2TRAINGAIN6__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN6_FIRDDM2TRAINGAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN6_FIRDDM2DATAGAIN6__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN6_FIRDDM2DATAGAIN6__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN6_FIRDDM2DATAGAIN6__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN6_FIRDDM2DATAGAIN6__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN7__A 0x1C30022
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN7__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN7__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN7_FIRDDM2TRAINGAIN7__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN7_FIRDDM2TRAINGAIN7__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN7_FIRDDM2TRAINGAIN7__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN7_FIRDDM2TRAINGAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN7_FIRDDM2DATAGAIN7__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN7_FIRDDM2DATAGAIN7__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN7_FIRDDM2DATAGAIN7__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN7_FIRDDM2DATAGAIN7__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN8__A 0x1C30023
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN8__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN8__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN8_FIRDDM2TRAINGAIN8__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN8_FIRDDM2TRAINGAIN8__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN8_FIRDDM2TRAINGAIN8__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN8_FIRDDM2TRAINGAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN8_FIRDDM2DATAGAIN8__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN8_FIRDDM2DATAGAIN8__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN8_FIRDDM2DATAGAIN8__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN8_FIRDDM2DATAGAIN8__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN9__A 0x1C30024
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN9__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN9__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN9_FIRDDM2TRAINGAIN9__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN9_FIRDDM2TRAINGAIN9__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN9_FIRDDM2TRAINGAIN9__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN9_FIRDDM2TRAINGAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN9_FIRDDM2DATAGAIN9__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN9_FIRDDM2DATAGAIN9__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN9_FIRDDM2DATAGAIN9__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN9_FIRDDM2DATAGAIN9__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN10__A 0x1C30025
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN10__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN10__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN10_FIRDDM2TRAINGAIN10__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN10_FIRDDM2TRAINGAIN10__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN10_FIRDDM2TRAINGAIN10__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN10_FIRDDM2TRAINGAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN10_FIRDDM2DATAGAIN10__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN10_FIRDDM2DATAGAIN10__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN10_FIRDDM2DATAGAIN10__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN10_FIRDDM2DATAGAIN10__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN11__A 0x1C30026
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN11__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN11__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN11_FIRDDM2TRAINGAIN11__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN11_FIRDDM2TRAINGAIN11__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN11_FIRDDM2TRAINGAIN11__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN11_FIRDDM2TRAINGAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN11_FIRDDM2DATAGAIN11__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN11_FIRDDM2DATAGAIN11__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN11_FIRDDM2DATAGAIN11__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN11_FIRDDM2DATAGAIN11__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN12__A 0x1C30027
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN12__W 15
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN12__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN12_FIRDDM2TRAINGAIN12__B 0
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN12_FIRDDM2TRAINGAIN12__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN12_FIRDDM2TRAINGAIN12__M 0x7F
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN12_FIRDDM2TRAINGAIN12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN12_FIRDDM2DATAGAIN12__B 8
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN12_FIRDDM2DATAGAIN12__W 7
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN12_FIRDDM2DATAGAIN12__M 0x7F00
+#define VSB_SYSCTRL_RAM1_FIRDDM2GAIN12_FIRDDM2DATAGAIN12__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFETRAINLKRATIO__A 0x1C30028
+#define VSB_SYSCTRL_RAM1_DFETRAINLKRATIO__W 12
+#define VSB_SYSCTRL_RAM1_DFETRAINLKRATIO__M 0xFFF
+#define VSB_SYSCTRL_RAM1_DFETRAINLKRATIO__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFERCA1TRAINLKRATIO__A 0x1C30029
+#define VSB_SYSCTRL_RAM1_DFERCA1TRAINLKRATIO__W 12
+#define VSB_SYSCTRL_RAM1_DFERCA1TRAINLKRATIO__M 0xFFF
+#define VSB_SYSCTRL_RAM1_DFERCA1TRAINLKRATIO__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFERCA1DATALKRATIO__A 0x1C3002A
+#define VSB_SYSCTRL_RAM1_DFERCA1DATALKRATIO__W 12
+#define VSB_SYSCTRL_RAM1_DFERCA1DATALKRATIO__M 0xFFF
+#define VSB_SYSCTRL_RAM1_DFERCA1DATALKRATIO__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFERCA2TRAINLKRATIO__A 0x1C3002B
+#define VSB_SYSCTRL_RAM1_DFERCA2TRAINLKRATIO__W 12
+#define VSB_SYSCTRL_RAM1_DFERCA2TRAINLKRATIO__M 0xFFF
+#define VSB_SYSCTRL_RAM1_DFERCA2TRAINLKRATIO__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFERCA2DATALKRATIO__A 0x1C3002C
+#define VSB_SYSCTRL_RAM1_DFERCA2DATALKRATIO__W 12
+#define VSB_SYSCTRL_RAM1_DFERCA2DATALKRATIO__M 0xFFF
+#define VSB_SYSCTRL_RAM1_DFERCA2DATALKRATIO__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFEDDM1TRAINLKRATIO__A 0x1C3002D
+#define VSB_SYSCTRL_RAM1_DFEDDM1TRAINLKRATIO__W 12
+#define VSB_SYSCTRL_RAM1_DFEDDM1TRAINLKRATIO__M 0xFFF
+#define VSB_SYSCTRL_RAM1_DFEDDM1TRAINLKRATIO__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFEDDM1DATALKRATIO__A 0x1C3002E
+#define VSB_SYSCTRL_RAM1_DFEDDM1DATALKRATIO__W 12
+#define VSB_SYSCTRL_RAM1_DFEDDM1DATALKRATIO__M 0xFFF
+#define VSB_SYSCTRL_RAM1_DFEDDM1DATALKRATIO__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFEDDM2TRAINLKRATIO__A 0x1C3002F
+#define VSB_SYSCTRL_RAM1_DFEDDM2TRAINLKRATIO__W 12
+#define VSB_SYSCTRL_RAM1_DFEDDM2TRAINLKRATIO__M 0xFFF
+#define VSB_SYSCTRL_RAM1_DFEDDM2TRAINLKRATIO__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFEDDM2DATALKRATIO__A 0x1C30030
+#define VSB_SYSCTRL_RAM1_DFEDDM2DATALKRATIO__W 12
+#define VSB_SYSCTRL_RAM1_DFEDDM2DATALKRATIO__M 0xFFF
+#define VSB_SYSCTRL_RAM1_DFEDDM2DATALKRATIO__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFETRAINGAIN__A 0x1C30031
+#define VSB_SYSCTRL_RAM1_DFETRAINGAIN__W 7
+#define VSB_SYSCTRL_RAM1_DFETRAINGAIN__M 0x7F
+#define VSB_SYSCTRL_RAM1_DFETRAINGAIN__PRE 0x0
+#define VSB_SYSCTRL_RAM1_DFERCA1GAIN__A 0x1C30032
+#define VSB_SYSCTRL_RAM1_DFERCA1GAIN__W 15
+#define VSB_SYSCTRL_RAM1_DFERCA1GAIN__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_DFERCA1GAIN__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFERCA1GAIN_DFERCA1TRAINGAIN__B 0
+#define VSB_SYSCTRL_RAM1_DFERCA1GAIN_DFERCA1TRAINGAIN__W 7
+#define VSB_SYSCTRL_RAM1_DFERCA1GAIN_DFERCA1TRAINGAIN__M 0x7F
+#define VSB_SYSCTRL_RAM1_DFERCA1GAIN_DFERCA1TRAINGAIN__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFERCA1GAIN_DFERCA1DATAGAIN__B 8
+#define VSB_SYSCTRL_RAM1_DFERCA1GAIN_DFERCA1DATAGAIN__W 7
+#define VSB_SYSCTRL_RAM1_DFERCA1GAIN_DFERCA1DATAGAIN__M 0x7F00
+#define VSB_SYSCTRL_RAM1_DFERCA1GAIN_DFERCA1DATAGAIN__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFERCA2GAIN__A 0x1C30033
+#define VSB_SYSCTRL_RAM1_DFERCA2GAIN__W 15
+#define VSB_SYSCTRL_RAM1_DFERCA2GAIN__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_DFERCA2GAIN__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFERCA2GAIN_DFERCA2TRAINGAIN__B 0
+#define VSB_SYSCTRL_RAM1_DFERCA2GAIN_DFERCA2TRAINGAIN__W 7
+#define VSB_SYSCTRL_RAM1_DFERCA2GAIN_DFERCA2TRAINGAIN__M 0x7F
+#define VSB_SYSCTRL_RAM1_DFERCA2GAIN_DFERCA2TRAINGAIN__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFERCA2GAIN_DFERCA2DATAGAIN__B 8
+#define VSB_SYSCTRL_RAM1_DFERCA2GAIN_DFERCA2DATAGAIN__W 7
+#define VSB_SYSCTRL_RAM1_DFERCA2GAIN_DFERCA2DATAGAIN__M 0x7F00
+#define VSB_SYSCTRL_RAM1_DFERCA2GAIN_DFERCA2DATAGAIN__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFEDDM1GAIN__A 0x1C30034
+#define VSB_SYSCTRL_RAM1_DFEDDM1GAIN__W 15
+#define VSB_SYSCTRL_RAM1_DFEDDM1GAIN__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_DFEDDM1GAIN__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFEDDM1GAIN_DFEDDM1TRAINGAIN__B 0
+#define VSB_SYSCTRL_RAM1_DFEDDM1GAIN_DFEDDM1TRAINGAIN__W 7
+#define VSB_SYSCTRL_RAM1_DFEDDM1GAIN_DFEDDM1TRAINGAIN__M 0x7F
+#define VSB_SYSCTRL_RAM1_DFEDDM1GAIN_DFEDDM1TRAINGAIN__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFEDDM1GAIN_DFEDDM1DATAGAIN__B 8
+#define VSB_SYSCTRL_RAM1_DFEDDM1GAIN_DFEDDM1DATAGAIN__W 7
+#define VSB_SYSCTRL_RAM1_DFEDDM1GAIN_DFEDDM1DATAGAIN__M 0x7F00
+#define VSB_SYSCTRL_RAM1_DFEDDM1GAIN_DFEDDM1DATAGAIN__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFEDDM2GAIN__A 0x1C30035
+#define VSB_SYSCTRL_RAM1_DFEDDM2GAIN__W 15
+#define VSB_SYSCTRL_RAM1_DFEDDM2GAIN__M 0x7FFF
+#define VSB_SYSCTRL_RAM1_DFEDDM2GAIN__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFEDDM2GAIN_DFEDDM2TRAINGAIN__B 0
+#define VSB_SYSCTRL_RAM1_DFEDDM2GAIN_DFEDDM2TRAINGAIN__W 7
+#define VSB_SYSCTRL_RAM1_DFEDDM2GAIN_DFEDDM2TRAINGAIN__M 0x7F
+#define VSB_SYSCTRL_RAM1_DFEDDM2GAIN_DFEDDM2TRAINGAIN__PRE 0x0
+
+#define VSB_SYSCTRL_RAM1_DFEDDM2GAIN_DFEDDM2DATAGAIN__B 8
+#define VSB_SYSCTRL_RAM1_DFEDDM2GAIN_DFEDDM2DATAGAIN__W 7
+#define VSB_SYSCTRL_RAM1_DFEDDM2GAIN_DFEDDM2DATAGAIN__M 0x7F00
+#define VSB_SYSCTRL_RAM1_DFEDDM2GAIN_DFEDDM2DATAGAIN__PRE 0x0
+
+#define VSB_TCMEQ_RAM__A 0x1C40000
+
+#define VSB_TCMEQ_RAM_TCMEQ_RAM__B 0
+#define VSB_TCMEQ_RAM_TCMEQ_RAM__W 16
+#define VSB_TCMEQ_RAM_TCMEQ_RAM__M 0xFFFF
+#define VSB_TCMEQ_RAM_TCMEQ_RAM__PRE 0x0
+
+#define VSB_FCPRE_RAM__A 0x1C50000
+
+#define VSB_FCPRE_RAM_FCPRE_RAM__B 0
+#define VSB_FCPRE_RAM_FCPRE_RAM__W 16
+#define VSB_FCPRE_RAM_FCPRE_RAM__M 0xFFFF
+#define VSB_FCPRE_RAM_FCPRE_RAM__PRE 0x0
+
+#define VSB_EQTAP_RAM__A 0x1C60000
+
+#define VSB_EQTAP_RAM_EQTAP_RAM__B 0
+#define VSB_EQTAP_RAM_EQTAP_RAM__W 12
+#define VSB_EQTAP_RAM_EQTAP_RAM__M 0xFFF
+#define VSB_EQTAP_RAM_EQTAP_RAM__PRE 0x0
+
+#endif
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 959ae36403b8..5b87ece69414 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -2688,11 +2688,11 @@ static int DRXD_init(struct drxd_state *state, const u8 *fw, u32 fw_size)
status = EnableAndResetMB(state);
if (status < 0)
break;
- if (state->type_A)
+ if (state->type_A) {
status = ResetCEFR(state);
if (status < 0)
break;
-
+ }
if (fw) {
status = DownloadMicrocode(state, fw, fw_size);
if (status < 0)
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 1e344b033277..335daeff91b9 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -616,7 +616,7 @@ static int ds3000_read_snr(struct dvb_frontend *fe, u16 *snr)
snr_reading = dvbs2_noise_reading / tmp;
if (snr_reading > 80)
snr_reading = 80;
- *snr = -(dvbs2_snr_tab[snr_reading] / 1000);
+ *snr = -(dvbs2_snr_tab[snr_reading - 1] / 1000);
}
dprintk("%s: raw / cooked = 0x%02x / 0x%04x\n", __func__,
snr_reading, *snr);
diff --git a/drivers/media/dvb-frontends/it913x-fe-priv.h b/drivers/media/dvb-frontends/it913x-fe-priv.h
deleted file mode 100644
index eb6fd8aebdb3..000000000000
--- a/drivers/media/dvb-frontends/it913x-fe-priv.h
+++ /dev/null
@@ -1,1051 +0,0 @@
-
-struct it913xset { u32 pro;
- u32 address;
- u8 reg[15];
- u8 count;
-};
-
-struct adctable { u32 adcFrequency;
- u32 bandwidth;
- u32 coeff_1_2048;
- u32 coeff_1_4096;
- u32 coeff_1_8191;
- u32 coeff_1_8192;
- u32 coeff_1_8193;
- u32 coeff_2_2k;
- u32 coeff_2_4k;
- u32 coeff_2_8k;
- u16 bfsfcw_fftinx_ratio;
- u16 fftinx_bfsfcw_ratio;
-};
-
-/* clock and coeff tables only table 3 is used with IT9137*/
-/* TODO other tables relate AF9035 may be removed */
-static struct adctable tab1[] = {
- { 20156250, 6000000,
- 0x02b8ba6e, 0x015c5d37, 0x00ae340d, 0x00ae2e9b, 0x00ae292a,
- 0x015c5d37, 0x00ae2e9b, 0x0057174e, 0x02f1, 0x015c },
- { 20156250, 7000000,
- 0x032cd980, 0x01966cc0, 0x00cb3cba, 0x00cb3660, 0x00cb3007,
- 0x01966cc0, 0x00cb3660, 0x00659b30, 0x0285, 0x0196 },
- { 20156250, 8000000,
- 0x03a0f893, 0x01d07c49, 0x00e84567, 0x00e83e25, 0x00e836e3,
- 0x01d07c49, 0x00e83e25, 0x00741f12, 0x0234, 0x01d0 },
- { 20156250, 5000000,
- 0x02449b5c, 0x01224dae, 0x00912b60, 0x009126d7, 0x0091224e,
- 0x01224dae, 0x009126d7, 0x0048936b, 0x0387, 0x0122 }
-};
-
-static struct adctable tab2[] = {
- { 20187500, 6000000,
- 0x02b7a654, 0x015bd32a, 0x00adef04, 0x00ade995, 0x00ade426,
- 0x015bd32a, 0x00ade995, 0x0056f4ca, 0x02f2, 0x015c },
- { 20187500, 7000000,
- 0x032b9761, 0x0195cbb1, 0x00caec30, 0x00cae5d8, 0x00cadf81,
- 0x0195cbb1, 0x00cae5d8, 0x006572ec, 0x0286, 0x0196 },
- { 20187500, 8000000,
- 0x039f886f, 0x01cfc438, 0x00e7e95b, 0x00e7e21c, 0x00e7dadd,
- 0x01cfc438, 0x00e7e21c, 0x0073f10e, 0x0235, 0x01d0 },
- { 20187500, 5000000,
- 0x0243b546, 0x0121daa3, 0x0090f1d9, 0x0090ed51, 0x0090e8ca,
- 0x0121daa3, 0x0090ed51, 0x004876a9, 0x0388, 0x0122 }
-
-};
-
-static struct adctable tab3[] = {
- { 20250000, 6000000,
- 0x02b580ad, 0x015ac057, 0x00ad6597, 0x00ad602b, 0x00ad5ac1,
- 0x015ac057, 0x00ad602b, 0x0056b016, 0x02f4, 0x015b },
- { 20250000, 7000000,
- 0x03291620, 0x01948b10, 0x00ca4bda, 0x00ca4588, 0x00ca3f36,
- 0x01948b10, 0x00ca4588, 0x006522c4, 0x0288, 0x0195 },
- { 20250000, 8000000,
- 0x039cab92, 0x01ce55c9, 0x00e7321e, 0x00e72ae4, 0x00e723ab,
- 0x01ce55c9, 0x00e72ae4, 0x00739572, 0x0237, 0x01ce },
- { 20250000, 5000000,
- 0x0241eb3b, 0x0120f59e, 0x00907f53, 0x00907acf, 0x0090764b,
- 0x0120f59e, 0x00907acf, 0x00483d67, 0x038b, 0x0121 }
-
-};
-
-static struct adctable tab4[] = {
- { 20583333, 6000000,
- 0x02aa4598, 0x015522cc, 0x00aa96bb, 0x00aa9166, 0x00aa8c12,
- 0x015522cc, 0x00aa9166, 0x005548b3, 0x0300, 0x0155 },
- { 20583333, 7000000,
- 0x031bfbdc, 0x018dfdee, 0x00c7052f, 0x00c6fef7, 0x00c6f8bf,
- 0x018dfdee, 0x00c6fef7, 0x00637f7b, 0x0293, 0x018e },
- { 20583333, 8000000,
- 0x038db21f, 0x01c6d910, 0x00e373a3, 0x00e36c88, 0x00e3656d,
- 0x01c6d910, 0x00e36c88, 0x0071b644, 0x0240, 0x01c7 },
- { 20583333, 5000000,
- 0x02388f54, 0x011c47aa, 0x008e2846, 0x008e23d5, 0x008e1f64,
- 0x011c47aa, 0x008e23d5, 0x004711ea, 0x039a, 0x011c }
-
-};
-
-static struct adctable tab5[] = {
- { 20416667, 6000000,
- 0x02afd765, 0x0157ebb3, 0x00abfb39, 0x00abf5d9, 0x00abf07a,
- 0x0157ebb3, 0x00abf5d9, 0x0055faed, 0x02fa, 0x0158 },
- { 20416667, 7000000,
- 0x03227b4b, 0x01913da6, 0x00c8a518, 0x00c89ed3, 0x00c8988e,
- 0x01913da6, 0x00c89ed3, 0x00644f69, 0x028d, 0x0191 },
- { 20416667, 8000000,
- 0x03951f32, 0x01ca8f99, 0x00e54ef7, 0x00e547cc, 0x00e540a2,
- 0x01ca8f99, 0x00e547cc, 0x0072a3e6, 0x023c, 0x01cb },
- { 20416667, 5000000,
- 0x023d337f, 0x011e99c0, 0x008f515a, 0x008f4ce0, 0x008f4865,
- 0x011e99c0, 0x008f4ce0, 0x0047a670, 0x0393, 0x011f }
-
-};
-
-static struct adctable tab6[] = {
- { 20480000, 6000000,
- 0x02adb6db, 0x0156db6e, 0x00ab7312, 0x00ab6db7, 0x00ab685c,
- 0x0156db6e, 0x00ab6db7, 0x0055b6db, 0x02fd, 0x0157 },
- { 20480000, 7000000,
- 0x03200000, 0x01900000, 0x00c80640, 0x00c80000, 0x00c7f9c0,
- 0x01900000, 0x00c80000, 0x00640000, 0x028f, 0x0190 },
- { 20480000, 8000000,
- 0x03924925, 0x01c92492, 0x00e4996e, 0x00e49249, 0x00e48b25,
- 0x01c92492, 0x00e49249, 0x00724925, 0x023d, 0x01c9 },
- { 20480000, 5000000,
- 0x023b6db7, 0x011db6db, 0x008edfe5, 0x008edb6e, 0x008ed6f7,
- 0x011db6db, 0x008edb6e, 0x00476db7, 0x0396, 0x011e }
-};
-
-static struct adctable tab7[] = {
- { 20500000, 6000000,
- 0x02ad0b99, 0x015685cc, 0x00ab4840, 0x00ab42e6, 0x00ab3d8c,
- 0x015685cc, 0x00ab42e6, 0x0055a173, 0x02fd, 0x0157 },
- { 20500000, 7000000,
- 0x031f3832, 0x018f9c19, 0x00c7d44b, 0x00c7ce0c, 0x00c7c7ce,
- 0x018f9c19, 0x00c7ce0c, 0x0063e706, 0x0290, 0x0190 },
- { 20500000, 8000000,
- 0x039164cb, 0x01c8b266, 0x00e46056, 0x00e45933, 0x00e45210,
- 0x01c8b266, 0x00e45933, 0x00722c99, 0x023e, 0x01c9 },
- { 20500000, 5000000,
- 0x023adeff, 0x011d6f80, 0x008ebc36, 0x008eb7c0, 0x008eb34a,
- 0x011d6f80, 0x008eb7c0, 0x00475be0, 0x0396, 0x011d }
-
-};
-
-static struct adctable tab8[] = {
- { 20625000, 6000000,
- 0x02a8e4bd, 0x0154725e, 0x00aa3e81, 0x00aa392f, 0x00aa33de,
- 0x0154725e, 0x00aa392f, 0x00551c98, 0x0302, 0x0154 },
- { 20625000, 7000000,
- 0x031a6032, 0x018d3019, 0x00c69e41, 0x00c6980c, 0x00c691d8,
- 0x018d3019, 0x00c6980c, 0x00634c06, 0x0294, 0x018d },
- { 20625000, 8000000,
- 0x038bdba6, 0x01c5edd3, 0x00e2fe02, 0x00e2f6ea, 0x00e2efd2,
- 0x01c5edd3, 0x00e2f6ea, 0x00717b75, 0x0242, 0x01c6 },
- { 20625000, 5000000,
- 0x02376948, 0x011bb4a4, 0x008ddec1, 0x008dda52, 0x008dd5e3,
- 0x011bb4a4, 0x008dda52, 0x0046ed29, 0x039c, 0x011c }
-
-};
-
-struct table {
- u32 xtal;
- struct adctable *table;
-};
-
-static struct table fe_clockTable[] = {
- {12000000, tab3}, /* 12.00MHz */
- {20480000, tab6}, /* 20.48MHz */
- {36000000, tab3}, /* 36.00MHz */
- {30000000, tab1}, /* 30.00MHz */
- {26000000, tab4}, /* 26.00MHz */
- {28000000, tab5}, /* 28.00MHz */
- {32000000, tab7}, /* 32.00MHz */
- {34000000, tab2}, /* 34.00MHz */
- {24000000, tab1}, /* 24.00MHz */
- {22000000, tab8}, /* 22.00MHz */
-};
-
-/* fe get */
-fe_code_rate_t fe_code[] = {
- FEC_1_2,
- FEC_2_3,
- FEC_3_4,
- FEC_5_6,
- FEC_7_8,
- FEC_NONE,
-};
-
-fe_guard_interval_t fe_gi[] = {
- GUARD_INTERVAL_1_32,
- GUARD_INTERVAL_1_16,
- GUARD_INTERVAL_1_8,
- GUARD_INTERVAL_1_4,
-};
-
-fe_hierarchy_t fe_hi[] = {
- HIERARCHY_NONE,
- HIERARCHY_1,
- HIERARCHY_2,
- HIERARCHY_4,
-};
-
-fe_transmit_mode_t fe_mode[] = {
- TRANSMISSION_MODE_2K,
- TRANSMISSION_MODE_8K,
- TRANSMISSION_MODE_4K,
-};
-
-fe_modulation_t fe_con[] = {
- QPSK,
- QAM_16,
- QAM_64,
-};
-
-enum {
- PRIORITY_HIGH = 0, /* High-priority stream */
- PRIORITY_LOW, /* Low-priority stream */
-};
-
-/* Standard demodulator functions */
-static struct it913xset set_solo_fe[] = {
- {PRO_LINK, GPIOH5_EN, {0x01}, 0x01},
- {PRO_LINK, GPIOH5_ON, {0x01}, 0x01},
- {PRO_LINK, GPIOH5_O, {0x00}, 0x01},
- {PRO_LINK, GPIOH5_O, {0x01}, 0x01},
- {PRO_LINK, DVBT_INTEN, {0x04}, 0x01},
- {PRO_LINK, DVBT_ENABLE, {0x05}, 0x01},
- {PRO_DMOD, MP2IF_MPEG_PAR_MODE, {0x00}, 0x01},
- {PRO_LINK, HOSTB_MPEG_SER_MODE, {0x00}, 0x01},
- {PRO_LINK, HOSTB_MPEG_PAR_MODE, {0x00}, 0x01},
- {PRO_DMOD, DCA_UPPER_CHIP, {0x00}, 0x01},
- {PRO_LINK, HOSTB_DCA_UPPER, {0x00}, 0x01},
- {PRO_DMOD, DCA_LOWER_CHIP, {0x00}, 0x01},
- {PRO_LINK, HOSTB_DCA_LOWER, {0x00}, 0x01},
- {PRO_DMOD, DCA_PLATCH, {0x00}, 0x01},
- {PRO_DMOD, DCA_FPGA_LATCH, {0x00}, 0x01},
- {PRO_DMOD, DCA_STAND_ALONE, {0x01}, 0x01},
- {PRO_DMOD, DCA_ENABLE, {0x00}, 0x01},
- {PRO_DMOD, MP2IF_MPEG_PAR_MODE, {0x00}, 0x01},
- {PRO_DMOD, BFS_FCW, {0x00, 0x00, 0x00}, 0x03},
- {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
-};
-
-
-static struct it913xset init_1[] = {
- {PRO_LINK, LOCK3_OUT, {0x01}, 0x01},
- {PRO_LINK, PADMISCDRSR, {0x01}, 0x01},
- {PRO_LINK, PADMISCDR2, {0x00}, 0x01},
- {PRO_DMOD, 0xec57, {0x00, 0x00}, 0x02},
- {PRO_LINK, PADMISCDR4, {0x00}, 0x01}, /* Power up */
- {PRO_LINK, PADMISCDR8, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
-};
-
-
-/* Version 1 types */
-static struct it913xset it9135_v1[] = {
- {PRO_DMOD, 0x0051, {0x01}, 0x01},
- {PRO_DMOD, 0x0070, {0x0a}, 0x01},
- {PRO_DMOD, 0x007e, {0x04}, 0x01},
- {PRO_DMOD, 0x0081, {0x0a}, 0x01},
- {PRO_DMOD, 0x008a, {0x01}, 0x01},
- {PRO_DMOD, 0x008e, {0x01}, 0x01},
- {PRO_DMOD, 0x0092, {0x06}, 0x01},
- {PRO_DMOD, 0x0099, {0x01}, 0x01},
- {PRO_DMOD, 0x009f, {0xe1}, 0x01},
- {PRO_DMOD, 0x00a0, {0xcf}, 0x01},
- {PRO_DMOD, 0x00a3, {0x01}, 0x01},
- {PRO_DMOD, 0x00a5, {0x01}, 0x01},
- {PRO_DMOD, 0x00a6, {0x01}, 0x01},
- {PRO_DMOD, 0x00a9, {0x00}, 0x01},
- {PRO_DMOD, 0x00aa, {0x01}, 0x01},
- {PRO_DMOD, 0x00b0, {0x01}, 0x01},
- {PRO_DMOD, 0x00c2, {0x05}, 0x01},
- {PRO_DMOD, 0x00c6, {0x19}, 0x01},
- {PRO_DMOD, 0xf000, {0x0f}, 0x01},
- {PRO_DMOD, 0xf016, {0x10}, 0x01},
- {PRO_DMOD, 0xf017, {0x04}, 0x01},
- {PRO_DMOD, 0xf018, {0x05}, 0x01},
- {PRO_DMOD, 0xf019, {0x04}, 0x01},
- {PRO_DMOD, 0xf01a, {0x05}, 0x01},
- {PRO_DMOD, 0xf021, {0x03}, 0x01},
- {PRO_DMOD, 0xf022, {0x0a}, 0x01},
- {PRO_DMOD, 0xf023, {0x0a}, 0x01},
- {PRO_DMOD, 0xf02b, {0x00}, 0x01},
- {PRO_DMOD, 0xf02c, {0x01}, 0x01},
- {PRO_DMOD, 0xf064, {0x03}, 0x01},
- {PRO_DMOD, 0xf065, {0xf9}, 0x01},
- {PRO_DMOD, 0xf066, {0x03}, 0x01},
- {PRO_DMOD, 0xf067, {0x01}, 0x01},
- {PRO_DMOD, 0xf06f, {0xe0}, 0x01},
- {PRO_DMOD, 0xf070, {0x03}, 0x01},
- {PRO_DMOD, 0xf072, {0x0f}, 0x01},
- {PRO_DMOD, 0xf073, {0x03}, 0x01},
- {PRO_DMOD, 0xf078, {0x00}, 0x01},
- {PRO_DMOD, 0xf087, {0x00}, 0x01},
- {PRO_DMOD, 0xf09b, {0x3f}, 0x01},
- {PRO_DMOD, 0xf09c, {0x00}, 0x01},
- {PRO_DMOD, 0xf09d, {0x20}, 0x01},
- {PRO_DMOD, 0xf09e, {0x00}, 0x01},
- {PRO_DMOD, 0xf09f, {0x0c}, 0x01},
- {PRO_DMOD, 0xf0a0, {0x00}, 0x01},
- {PRO_DMOD, 0xf130, {0x04}, 0x01},
- {PRO_DMOD, 0xf132, {0x04}, 0x01},
- {PRO_DMOD, 0xf144, {0x1a}, 0x01},
- {PRO_DMOD, 0xf146, {0x00}, 0x01},
- {PRO_DMOD, 0xf14a, {0x01}, 0x01},
- {PRO_DMOD, 0xf14c, {0x00}, 0x01},
- {PRO_DMOD, 0xf14d, {0x00}, 0x01},
- {PRO_DMOD, 0xf14f, {0x04}, 0x01},
- {PRO_DMOD, 0xf158, {0x7f}, 0x01},
- {PRO_DMOD, 0xf15a, {0x00}, 0x01},
- {PRO_DMOD, 0xf15b, {0x08}, 0x01},
- {PRO_DMOD, 0xf15d, {0x03}, 0x01},
- {PRO_DMOD, 0xf15e, {0x05}, 0x01},
- {PRO_DMOD, 0xf163, {0x05}, 0x01},
- {PRO_DMOD, 0xf166, {0x01}, 0x01},
- {PRO_DMOD, 0xf167, {0x40}, 0x01},
- {PRO_DMOD, 0xf168, {0x0f}, 0x01},
- {PRO_DMOD, 0xf17a, {0x00}, 0x01},
- {PRO_DMOD, 0xf17b, {0x00}, 0x01},
- {PRO_DMOD, 0xf183, {0x01}, 0x01},
- {PRO_DMOD, 0xf19d, {0x40}, 0x01},
- {PRO_DMOD, 0xf1bc, {0x36}, 0x01},
- {PRO_DMOD, 0xf1bd, {0x00}, 0x01},
- {PRO_DMOD, 0xf1cb, {0xa0}, 0x01},
- {PRO_DMOD, 0xf1cc, {0x01}, 0x01},
- {PRO_DMOD, 0xf204, {0x10}, 0x01},
- {PRO_DMOD, 0xf214, {0x00}, 0x01},
- {PRO_DMOD, 0xf40e, {0x0a}, 0x01},
- {PRO_DMOD, 0xf40f, {0x40}, 0x01},
- {PRO_DMOD, 0xf410, {0x08}, 0x01},
- {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
- {PRO_DMOD, 0xf561, {0x15}, 0x01},
- {PRO_DMOD, 0xf562, {0x20}, 0x01},
- {PRO_DMOD, 0xf5df, {0xfb}, 0x01},
- {PRO_DMOD, 0xf5e0, {0x00}, 0x01},
- {PRO_DMOD, 0xf5e3, {0x09}, 0x01},
- {PRO_DMOD, 0xf5e4, {0x01}, 0x01},
- {PRO_DMOD, 0xf5e5, {0x01}, 0x01},
- {PRO_DMOD, 0xf5f8, {0x01}, 0x01},
- {PRO_DMOD, 0xf5fd, {0x01}, 0x01},
- {PRO_DMOD, 0xf600, {0x05}, 0x01},
- {PRO_DMOD, 0xf601, {0x08}, 0x01},
- {PRO_DMOD, 0xf602, {0x0b}, 0x01},
- {PRO_DMOD, 0xf603, {0x0e}, 0x01},
- {PRO_DMOD, 0xf604, {0x11}, 0x01},
- {PRO_DMOD, 0xf605, {0x14}, 0x01},
- {PRO_DMOD, 0xf606, {0x17}, 0x01},
- {PRO_DMOD, 0xf607, {0x1f}, 0x01},
- {PRO_DMOD, 0xf60e, {0x00}, 0x01},
- {PRO_DMOD, 0xf60f, {0x04}, 0x01},
- {PRO_DMOD, 0xf610, {0x32}, 0x01},
- {PRO_DMOD, 0xf611, {0x10}, 0x01},
- {PRO_DMOD, 0xf707, {0xfc}, 0x01},
- {PRO_DMOD, 0xf708, {0x00}, 0x01},
- {PRO_DMOD, 0xf709, {0x37}, 0x01},
- {PRO_DMOD, 0xf70a, {0x00}, 0x01},
- {PRO_DMOD, 0xf78b, {0x01}, 0x01},
- {PRO_DMOD, 0xf80f, {0x40}, 0x01},
- {PRO_DMOD, 0xf810, {0x54}, 0x01},
- {PRO_DMOD, 0xf811, {0x5a}, 0x01},
- {PRO_DMOD, 0xf905, {0x01}, 0x01},
- {PRO_DMOD, 0xfb06, {0x03}, 0x01},
- {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
-};
-
-static struct it913xset it9135_38[] = {
- {PRO_DMOD, 0x0043, {0x00}, 0x01},
- {PRO_DMOD, 0x0046, {0x38}, 0x01},
- {PRO_DMOD, 0x0051, {0x01}, 0x01},
- {PRO_DMOD, 0x005f, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0x0068, {0x0a}, 0x01},
- {PRO_DMOD, 0x0070, {0x0a, 0x05, 0x02}, 0x03},
- {PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0xc8, 0x01}, 0x05},
- {PRO_DMOD, 0x007e, {0x04, 0x00}, 0x02},
- {PRO_DMOD, 0x0081, { 0x0a, 0x12, 0x02, 0x0a, 0x03, 0xc8, 0xb8,
- 0xd0, 0xc3, 0x01}, 0x0a},
- {PRO_DMOD, 0x008e, {0x01}, 0x01},
- {PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
- {PRO_DMOD, 0x0099, {0x01}, 0x01},
- {PRO_DMOD, 0x009b, {0x3c, 0x28}, 0x02},
- {PRO_DMOD, 0x009f, {0xe1, 0xcf}, 0x02},
- {PRO_DMOD, 0x00a3, {0x01, 0x5a, 0x01, 0x01}, 0x04},
- {PRO_DMOD, 0x00a9, {0x00, 0x01}, 0x02},
- {PRO_DMOD, 0x00b0, {0x01}, 0x01},
- {PRO_DMOD, 0x00b3, {0x02, 0x32}, 0x02},
- {PRO_DMOD, 0x00b6, {0x14}, 0x01},
- {PRO_DMOD, 0x00c0, {0x11, 0x00, 0x05}, 0x03},
- {PRO_DMOD, 0x00c4, {0x00}, 0x01},
- {PRO_DMOD, 0x00c6, {0x19, 0x00}, 0x02},
- {PRO_DMOD, 0x00cc, {0x2e, 0x51, 0x33}, 0x03},
- {PRO_DMOD, 0x00f3, {0x05, 0x8c, 0x8c}, 0x03},
- {PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
- {PRO_DMOD, 0x00fc, { 0x02, 0x02, 0x02, 0x09, 0x50, 0x7b, 0x77,
- 0x00, 0x02, 0xc8, 0x05, 0x7b}, 0x0c},
- {PRO_DMOD, 0x0109, {0x02}, 0x01},
- {PRO_DMOD, 0x0115, {0x0a, 0x03, 0x02, 0x80}, 0x04},
- {PRO_DMOD, 0x011a, {0xc8, 0x7b, 0x8a, 0xa0}, 0x04},
- {PRO_DMOD, 0x0122, {0x02, 0x18, 0xc3}, 0x03},
- {PRO_DMOD, 0x0127, {0x00, 0x07}, 0x02},
- {PRO_DMOD, 0x012a, {0x53, 0x51, 0x4e, 0x43}, 0x04},
- {PRO_DMOD, 0x0137, {0x01, 0x00, 0x07, 0x00, 0x06}, 0x05},
- {PRO_DMOD, 0x013d, {0x00, 0x01, 0x5b, 0xc8, 0x59}, 0x05},
- {PRO_DMOD, 0xf000, {0x0f}, 0x01},
- {PRO_DMOD, 0xf016, {0x10, 0x04, 0x05, 0x04, 0x05}, 0x05},
- {PRO_DMOD, 0xf01f, {0x8c, 0x00, 0x03, 0x0a, 0x0a}, 0x05},
- {PRO_DMOD, 0xf029, {0x8c, 0x00, 0x00, 0x01}, 0x04},
- {PRO_DMOD, 0xf064, {0x03, 0xf9, 0x03, 0x01}, 0x04},
- {PRO_DMOD, 0xf06f, {0xe0, 0x03}, 0x02},
- {PRO_DMOD, 0xf072, {0x0f, 0x03}, 0x02},
- {PRO_DMOD, 0xf077, {0x01, 0x00}, 0x02},
- {PRO_DMOD, 0xf085, {0x00, 0x02, 0x00}, 0x03},
- {PRO_DMOD, 0xf09b, {0x3f, 0x00, 0x20, 0x00, 0x0c, 0x00}, 0x06},
- {PRO_DMOD, 0xf130, {0x04}, 0x01},
- {PRO_DMOD, 0xf132, {0x04}, 0x01},
- {PRO_DMOD, 0xf144, {0x1a}, 0x01},
- {PRO_DMOD, 0xf146, {0x00}, 0x01},
- {PRO_DMOD, 0xf14a, {0x01}, 0x01},
- {PRO_DMOD, 0xf14c, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0xf14f, {0x04}, 0x01},
- {PRO_DMOD, 0xf158, {0x7f}, 0x01},
- {PRO_DMOD, 0xf15a, {0x00, 0x08}, 0x02},
- {PRO_DMOD, 0xf15d, {0x03, 0x05}, 0x02},
- {PRO_DMOD, 0xf163, {0x05}, 0x01},
- {PRO_DMOD, 0xf166, {0x01, 0x40, 0x0f}, 0x03},
- {PRO_DMOD, 0xf17a, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0xf183, {0x01}, 0x01},
- {PRO_DMOD, 0xf19d, {0x40}, 0x01},
- {PRO_DMOD, 0xf1bc, {0x36, 0x00}, 0x02},
- {PRO_DMOD, 0xf1cb, {0xa0, 0x01}, 0x02},
- {PRO_DMOD, 0xf204, {0x10}, 0x01},
- {PRO_DMOD, 0xf214, {0x00}, 0x01},
- {PRO_DMOD, 0xf24c, {0x88, 0x95, 0x9a, 0x90}, 0x04},
- {PRO_DMOD, 0xf25a, {0x07, 0xe8, 0x03, 0xb0, 0x04}, 0x05},
- {PRO_DMOD, 0xf270, {0x01, 0x02, 0x01, 0x02}, 0x04},
- {PRO_DMOD, 0xf40e, {0x0a, 0x40, 0x08}, 0x03},
- {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
- {PRO_DMOD, 0xf561, {0x15, 0x20}, 0x02},
- {PRO_DMOD, 0xf5df, {0xfb, 0x00}, 0x02},
- {PRO_DMOD, 0xf5e3, {0x09, 0x01, 0x01}, 0x03},
- {PRO_DMOD, 0xf5f8, {0x01}, 0x01},
- {PRO_DMOD, 0xf5fd, {0x01}, 0x01},
- {PRO_DMOD, 0xf600, { 0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17,
- 0x1f}, 0x08},
- {PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
- {PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
- {PRO_DMOD, 0xf78b, {0x01}, 0x01},
- {PRO_DMOD, 0xf80f, {0x40, 0x54, 0x5a}, 0x03},
- {PRO_DMOD, 0xf905, {0x01}, 0x01},
- {PRO_DMOD, 0xfb06, {0x03}, 0x01},
- {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
-};
-
-static struct it913xset it9135_51[] = {
- {PRO_DMOD, 0x0043, {0x00}, 0x01},
- {PRO_DMOD, 0x0046, {0x51}, 0x01},
- {PRO_DMOD, 0x0051, {0x01}, 0x01},
- {PRO_DMOD, 0x005f, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0x0068, {0x0a}, 0x01},
- {PRO_DMOD, 0x0070, {0x0a, 0x06, 0x02}, 0x03},
- {PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0xc8, 0x01}, 0x05},
- {PRO_DMOD, 0x007e, {0x04, 0x00}, 0x02},
- {PRO_DMOD, 0x0081, { 0x0a, 0x12, 0x02, 0x0a, 0x03, 0xc0, 0x96,
- 0xcf, 0xc3, 0x01}, 0x0a},
- {PRO_DMOD, 0x008e, {0x01}, 0x01},
- {PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
- {PRO_DMOD, 0x0099, {0x01}, 0x01},
- {PRO_DMOD, 0x009b, {0x3c, 0x28}, 0x02},
- {PRO_DMOD, 0x009f, {0xe1, 0xcf}, 0x02},
- {PRO_DMOD, 0x00a3, {0x01, 0x5a, 0x01, 0x01}, 0x04},
- {PRO_DMOD, 0x00a9, {0x00, 0x01}, 0x02},
- {PRO_DMOD, 0x00b0, {0x01}, 0x01},
- {PRO_DMOD, 0x00b3, {0x02, 0x3c}, 0x02},
- {PRO_DMOD, 0x00b6, {0x14}, 0x01},
- {PRO_DMOD, 0x00c0, {0x11, 0x00, 0x05}, 0x03},
- {PRO_DMOD, 0x00c4, {0x00}, 0x01},
- {PRO_DMOD, 0x00c6, {0x19, 0x00}, 0x02},
- {PRO_DMOD, 0x00cc, {0x2e, 0x51, 0x33}, 0x03},
- {PRO_DMOD, 0x00f3, {0x05, 0x8c, 0x8c}, 0x03},
- {PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
- {PRO_DMOD, 0x00fc, { 0x03, 0x02, 0x02, 0x09, 0x50, 0x7a, 0x77,
- 0x01, 0x02, 0xb0, 0x02, 0x7a}, 0x0c},
- {PRO_DMOD, 0x0109, {0x02}, 0x01},
- {PRO_DMOD, 0x0115, {0x0a, 0x03, 0x02, 0x80}, 0x04},
- {PRO_DMOD, 0x011a, {0xc0, 0x7a, 0xac, 0x8c}, 0x04},
- {PRO_DMOD, 0x0122, {0x02, 0x70, 0xa4}, 0x03},
- {PRO_DMOD, 0x0127, {0x00, 0x07}, 0x02},
- {PRO_DMOD, 0x012a, {0x53, 0x51, 0x4e, 0x43}, 0x04},
- {PRO_DMOD, 0x0137, {0x01, 0x00, 0x07, 0x00, 0x06}, 0x05},
- {PRO_DMOD, 0x013d, {0x00, 0x01, 0x5b, 0xc0, 0x59}, 0x05},
- {PRO_DMOD, 0xf000, {0x0f}, 0x01},
- {PRO_DMOD, 0xf016, {0x10, 0x04, 0x05, 0x04, 0x05}, 0x05},
- {PRO_DMOD, 0xf01f, {0x8c, 0x00, 0x03, 0x0a, 0x0a}, 0x05},
- {PRO_DMOD, 0xf029, {0x8c, 0x00, 0x00, 0x01}, 0x04},
- {PRO_DMOD, 0xf064, {0x03, 0xf9, 0x03, 0x01}, 0x04},
- {PRO_DMOD, 0xf06f, {0xe0, 0x03}, 0x02},
- {PRO_DMOD, 0xf072, {0x0f, 0x03}, 0x02},
- {PRO_DMOD, 0xf077, {0x01, 0x00}, 0x02},
- {PRO_DMOD, 0xf085, {0xc0, 0x01, 0x00}, 0x03},
- {PRO_DMOD, 0xf09b, {0x3f, 0x00, 0x20, 0x00, 0x0c, 0x00}, 0x06},
- {PRO_DMOD, 0xf130, {0x04}, 0x01},
- {PRO_DMOD, 0xf132, {0x04}, 0x01},
- {PRO_DMOD, 0xf144, {0x1a}, 0x01},
- {PRO_DMOD, 0xf146, {0x00}, 0x01},
- {PRO_DMOD, 0xf14a, {0x01}, 0x01},
- {PRO_DMOD, 0xf14c, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0xf14f, {0x04}, 0x01},
- {PRO_DMOD, 0xf158, {0x7f}, 0x01},
- {PRO_DMOD, 0xf15a, {0x00, 0x08}, 0x02},
- {PRO_DMOD, 0xf15d, {0x03, 0x05}, 0x02},
- {PRO_DMOD, 0xf163, {0x05}, 0x01},
- {PRO_DMOD, 0xf166, {0x01, 0x40, 0x0f}, 0x03},
- {PRO_DMOD, 0xf17a, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0xf183, {0x01}, 0x01},
- {PRO_DMOD, 0xf19d, {0x40}, 0x01},
- {PRO_DMOD, 0xf1bc, {0x36, 0x00}, 0x02},
- {PRO_DMOD, 0xf1cb, {0xa0, 0x01}, 0x02},
- {PRO_DMOD, 0xf204, {0x10}, 0x01},
- {PRO_DMOD, 0xf214, {0x00}, 0x01},
- {PRO_DMOD, 0xf24c, {0x88, 0x95, 0x9a, 0x90}, 0x04},
- {PRO_DMOD, 0xf25a, {0x07, 0xe8, 0x03, 0xb0, 0x04}, 0x05},
- {PRO_DMOD, 0xf270, {0x01, 0x02, 0x01, 0x02}, 0x04},
- {PRO_DMOD, 0xf40e, {0x0a, 0x40, 0x08}, 0x03},
- {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
- {PRO_DMOD, 0xf561, {0x15, 0x20}, 0x02},
- {PRO_DMOD, 0xf5df, {0xfb, 0x00}, 0x02},
- {PRO_DMOD, 0xf5e3, {0x09, 0x01, 0x01}, 0x03},
- {PRO_DMOD, 0xf5f8, {0x01}, 0x01},
- {PRO_DMOD, 0xf5fd, {0x01}, 0x01},
- {PRO_DMOD, 0xf600, { 0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17,
- 0x1f}, 0x08},
- {PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
- {PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
- {PRO_DMOD, 0xf78b, {0x01}, 0x01},
- {PRO_DMOD, 0xf80f, {0x40, 0x54, 0x5a}, 0x03},
- {PRO_DMOD, 0xf905, {0x01}, 0x01},
- {PRO_DMOD, 0xfb06, {0x03}, 0x01},
- {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
-};
-
-static struct it913xset it9135_52[] = {
- {PRO_DMOD, 0x0043, {0x00}, 0x01},
- {PRO_DMOD, 0x0046, {0x52}, 0x01},
- {PRO_DMOD, 0x0051, {0x01}, 0x01},
- {PRO_DMOD, 0x005f, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0x0068, {0x10}, 0x01},
- {PRO_DMOD, 0x0070, {0x0a, 0x05, 0x02}, 0x03},
- {PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0xa0, 0x01}, 0x05},
- {PRO_DMOD, 0x007e, {0x04, 0x00}, 0x02},
- {PRO_DMOD, 0x0081, { 0x0a, 0x12, 0x03, 0x0a, 0x03, 0xb3, 0x97,
- 0xc0, 0x9e, 0x01}, 0x0a},
- {PRO_DMOD, 0x008e, {0x01}, 0x01},
- {PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
- {PRO_DMOD, 0x0099, {0x01}, 0x01},
- {PRO_DMOD, 0x009b, {0x3c, 0x28}, 0x02},
- {PRO_DMOD, 0x009f, {0xe1, 0xcf}, 0x02},
- {PRO_DMOD, 0x00a3, {0x01, 0x5c, 0x01, 0x01}, 0x04},
- {PRO_DMOD, 0x00a9, {0x00, 0x01}, 0x02},
- {PRO_DMOD, 0x00b0, {0x01}, 0x01},
- {PRO_DMOD, 0x00b3, {0x02, 0x3c}, 0x02},
- {PRO_DMOD, 0x00b6, {0x14}, 0x01},
- {PRO_DMOD, 0x00c0, {0x11, 0x00, 0x05}, 0x03},
- {PRO_DMOD, 0x00c4, {0x00}, 0x01},
- {PRO_DMOD, 0x00c6, {0x19, 0x00}, 0x02},
- {PRO_DMOD, 0x00cc, {0x2e, 0x51, 0x33}, 0x03},
- {PRO_DMOD, 0x00f3, {0x05, 0x91, 0x8c}, 0x03},
- {PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
- {PRO_DMOD, 0x00fc, { 0x03, 0x02, 0x02, 0x09, 0x50, 0x74, 0x77,
- 0x02, 0x02, 0xae, 0x02, 0x6e}, 0x0c},
- {PRO_DMOD, 0x0109, {0x02}, 0x01},
- {PRO_DMOD, 0x0115, {0x0a, 0x03, 0x02, 0x80}, 0x04},
- {PRO_DMOD, 0x011a, {0xcd, 0x62, 0xa4, 0x8c}, 0x04},
- {PRO_DMOD, 0x0122, {0x03, 0x18, 0x9e}, 0x03},
- {PRO_DMOD, 0x0127, {0x00, 0x07}, 0x02},
- {PRO_DMOD, 0x012a, {0x53, 0x51, 0x4e, 0x43}, 0x04},
- {PRO_DMOD, 0x0137, {0x00, 0x00, 0x07, 0x00, 0x06}, 0x05},
- {PRO_DMOD, 0x013d, {0x00, 0x01, 0x5b, 0xb6, 0x59}, 0x05},
- {PRO_DMOD, 0xf000, {0x0f}, 0x01},
- {PRO_DMOD, 0xf016, {0x10, 0x04, 0x05, 0x04, 0x05}, 0x05},
- {PRO_DMOD, 0xf01f, {0x8c, 0x00, 0x03, 0x0a, 0x0a}, 0x05},
- {PRO_DMOD, 0xf029, {0x8c, 0x00, 0x00, 0x01}, 0x04},
- {PRO_DMOD, 0xf064, {0x03, 0xf9, 0x03, 0x01}, 0x04},
- {PRO_DMOD, 0xf06f, {0xe0, 0x03}, 0x02},
- {PRO_DMOD, 0xf072, {0x0f, 0x03}, 0x02},
- {PRO_DMOD, 0xf077, {0x01, 0x00}, 0x02},
- {PRO_DMOD, 0xf085, {0xc0, 0x01, 0x00}, 0x03},
- {PRO_DMOD, 0xf09b, {0x3f, 0x00, 0x20, 0x00, 0x0c, 0x00}, 0x06},
- {PRO_DMOD, 0xf130, {0x04}, 0x01},
- {PRO_DMOD, 0xf132, {0x04}, 0x01},
- {PRO_DMOD, 0xf144, {0x1a}, 0x01},
- {PRO_DMOD, 0xf146, {0x00}, 0x01},
- {PRO_DMOD, 0xf14a, {0x01}, 0x01},
- {PRO_DMOD, 0xf14c, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0xf14f, {0x04}, 0x01},
- {PRO_DMOD, 0xf158, {0x7f}, 0x01},
- {PRO_DMOD, 0xf15a, {0x00, 0x08}, 0x02},
- {PRO_DMOD, 0xf15d, {0x03, 0x05}, 0x02},
- {PRO_DMOD, 0xf163, {0x05}, 0x01},
- {PRO_DMOD, 0xf166, {0x01, 0x40, 0x0f}, 0x03},
- {PRO_DMOD, 0xf17a, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0xf183, {0x01}, 0x01},
- {PRO_DMOD, 0xf19d, {0x40}, 0x01},
- {PRO_DMOD, 0xf1bc, {0x36, 0x00}, 0x02},
- {PRO_DMOD, 0xf1cb, {0xa0, 0x01}, 0x02},
- {PRO_DMOD, 0xf204, {0x10}, 0x01},
- {PRO_DMOD, 0xf214, {0x00}, 0x01},
- {PRO_DMOD, 0xf24c, {0x88, 0x95, 0x9a, 0x90}, 0x04},
- {PRO_DMOD, 0xf25a, {0x07, 0xe8, 0x03, 0xb0, 0x04}, 0x05},
- {PRO_DMOD, 0xf270, {0x01, 0x02, 0x01, 0x02}, 0x04},
- {PRO_DMOD, 0xf40e, {0x0a, 0x40, 0x08}, 0x03},
- {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
- {PRO_DMOD, 0xf561, {0x15, 0x20}, 0x02},
- {PRO_DMOD, 0xf5df, {0xfb, 0x00}, 0x02},
- {PRO_DMOD, 0xf5e3, {0x09, 0x01, 0x01}, 0x03},
- {PRO_DMOD, 0xf5f8, {0x01}, 0x01},
- {PRO_DMOD, 0xf5fd, {0x01}, 0x01},
- {PRO_DMOD, 0xf600, {0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17,
- 0x1f}, 0x08},
- {PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
- {PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
- {PRO_DMOD, 0xf78b, {0x01}, 0x01},
- {PRO_DMOD, 0xf80f, {0x40, 0x54, 0x5a}, 0x03},
- {PRO_DMOD, 0xf905, {0x01}, 0x01},
- {PRO_DMOD, 0xfb06, {0x03}, 0x01},
- {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
-};
-
-/* Version 2 types */
-static struct it913xset it9135_v2[] = {
- {PRO_DMOD, 0x0051, {0x01}, 0x01},
- {PRO_DMOD, 0x0070, {0x0a}, 0x01},
- {PRO_DMOD, 0x007e, {0x04}, 0x01},
- {PRO_DMOD, 0x0081, {0x0a}, 0x01},
- {PRO_DMOD, 0x008a, {0x01}, 0x01},
- {PRO_DMOD, 0x008e, {0x01}, 0x01},
- {PRO_DMOD, 0x0092, {0x06}, 0x01},
- {PRO_DMOD, 0x0099, {0x01}, 0x01},
- {PRO_DMOD, 0x009f, {0xe1}, 0x01},
- {PRO_DMOD, 0x00a0, {0xcf}, 0x01},
- {PRO_DMOD, 0x00a3, {0x01}, 0x01},
- {PRO_DMOD, 0x00a5, {0x01}, 0x01},
- {PRO_DMOD, 0x00a6, {0x01}, 0x01},
- {PRO_DMOD, 0x00a9, {0x00}, 0x01},
- {PRO_DMOD, 0x00aa, {0x01}, 0x01},
- {PRO_DMOD, 0x00b0, {0x01}, 0x01},
- {PRO_DMOD, 0x00c2, {0x05}, 0x01},
- {PRO_DMOD, 0x00c6, {0x19}, 0x01},
- {PRO_DMOD, 0xf000, {0x0f}, 0x01},
- {PRO_DMOD, 0xf02b, {0x00}, 0x01},
- {PRO_DMOD, 0xf064, {0x03}, 0x01},
- {PRO_DMOD, 0xf065, {0xf9}, 0x01},
- {PRO_DMOD, 0xf066, {0x03}, 0x01},
- {PRO_DMOD, 0xf067, {0x01}, 0x01},
- {PRO_DMOD, 0xf06f, {0xe0}, 0x01},
- {PRO_DMOD, 0xf070, {0x03}, 0x01},
- {PRO_DMOD, 0xf072, {0x0f}, 0x01},
- {PRO_DMOD, 0xf073, {0x03}, 0x01},
- {PRO_DMOD, 0xf078, {0x00}, 0x01},
- {PRO_DMOD, 0xf087, {0x00}, 0x01},
- {PRO_DMOD, 0xf09b, {0x3f}, 0x01},
- {PRO_DMOD, 0xf09c, {0x00}, 0x01},
- {PRO_DMOD, 0xf09d, {0x20}, 0x01},
- {PRO_DMOD, 0xf09e, {0x00}, 0x01},
- {PRO_DMOD, 0xf09f, {0x0c}, 0x01},
- {PRO_DMOD, 0xf0a0, {0x00}, 0x01},
- {PRO_DMOD, 0xf130, {0x04}, 0x01},
- {PRO_DMOD, 0xf132, {0x04}, 0x01},
- {PRO_DMOD, 0xf144, {0x1a}, 0x01},
- {PRO_DMOD, 0xf146, {0x00}, 0x01},
- {PRO_DMOD, 0xf14a, {0x01}, 0x01},
- {PRO_DMOD, 0xf14c, {0x00}, 0x01},
- {PRO_DMOD, 0xf14d, {0x00}, 0x01},
- {PRO_DMOD, 0xf14f, {0x04}, 0x01},
- {PRO_DMOD, 0xf158, {0x7f}, 0x01},
- {PRO_DMOD, 0xf15a, {0x00}, 0x01},
- {PRO_DMOD, 0xf15b, {0x08}, 0x01},
- {PRO_DMOD, 0xf15d, {0x03}, 0x01},
- {PRO_DMOD, 0xf15e, {0x05}, 0x01},
- {PRO_DMOD, 0xf163, {0x05}, 0x01},
- {PRO_DMOD, 0xf166, {0x01}, 0x01},
- {PRO_DMOD, 0xf167, {0x40}, 0x01},
- {PRO_DMOD, 0xf168, {0x0f}, 0x01},
- {PRO_DMOD, 0xf17a, {0x00}, 0x01},
- {PRO_DMOD, 0xf17b, {0x00}, 0x01},
- {PRO_DMOD, 0xf183, {0x01}, 0x01},
- {PRO_DMOD, 0xf19d, {0x40}, 0x01},
- {PRO_DMOD, 0xf1bc, {0x36}, 0x01},
- {PRO_DMOD, 0xf1bd, {0x00}, 0x01},
- {PRO_DMOD, 0xf1cb, {0xa0}, 0x01},
- {PRO_DMOD, 0xf1cc, {0x01}, 0x01},
- {PRO_DMOD, 0xf204, {0x10}, 0x01},
- {PRO_DMOD, 0xf214, {0x00}, 0x01},
- {PRO_DMOD, 0xf40e, {0x0a}, 0x01},
- {PRO_DMOD, 0xf40f, {0x40}, 0x01},
- {PRO_DMOD, 0xf410, {0x08}, 0x01},
- {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
- {PRO_DMOD, 0xf561, {0x15}, 0x01},
- {PRO_DMOD, 0xf562, {0x20}, 0x01},
- {PRO_DMOD, 0xf5e3, {0x09}, 0x01},
- {PRO_DMOD, 0xf5e4, {0x01}, 0x01},
- {PRO_DMOD, 0xf5e5, {0x01}, 0x01},
- {PRO_DMOD, 0xf600, {0x05}, 0x01},
- {PRO_DMOD, 0xf601, {0x08}, 0x01},
- {PRO_DMOD, 0xf602, {0x0b}, 0x01},
- {PRO_DMOD, 0xf603, {0x0e}, 0x01},
- {PRO_DMOD, 0xf604, {0x11}, 0x01},
- {PRO_DMOD, 0xf605, {0x14}, 0x01},
- {PRO_DMOD, 0xf606, {0x17}, 0x01},
- {PRO_DMOD, 0xf607, {0x1f}, 0x01},
- {PRO_DMOD, 0xf60e, {0x00}, 0x01},
- {PRO_DMOD, 0xf60f, {0x04}, 0x01},
- {PRO_DMOD, 0xf610, {0x32}, 0x01},
- {PRO_DMOD, 0xf611, {0x10}, 0x01},
- {PRO_DMOD, 0xf707, {0xfc}, 0x01},
- {PRO_DMOD, 0xf708, {0x00}, 0x01},
- {PRO_DMOD, 0xf709, {0x37}, 0x01},
- {PRO_DMOD, 0xf70a, {0x00}, 0x01},
- {PRO_DMOD, 0xf78b, {0x01}, 0x01},
- {PRO_DMOD, 0xf80f, {0x40}, 0x01},
- {PRO_DMOD, 0xf810, {0x54}, 0x01},
- {PRO_DMOD, 0xf811, {0x5a}, 0x01},
- {PRO_DMOD, 0xf905, {0x01}, 0x01},
- {PRO_DMOD, 0xfb06, {0x03}, 0x01},
- {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
-};
-
-static struct it913xset it9135_60[] = {
- {PRO_DMOD, 0x0043, {0x00}, 0x01},
- {PRO_DMOD, 0x0046, {0x60}, 0x01},
- {PRO_DMOD, 0x0051, {0x01}, 0x01},
- {PRO_DMOD, 0x005f, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0x0068, {0x0a}, 0x01},
- {PRO_DMOD, 0x006a, {0x03}, 0x01},
- {PRO_DMOD, 0x0070, {0x0a, 0x05, 0x02}, 0x03},
- {PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0x8c, 0x01}, 0x05},
- {PRO_DMOD, 0x007e, {0x04}, 0x01},
- {PRO_DMOD, 0x0081, {0x0a, 0x12}, 0x02},
- {PRO_DMOD, 0x0084, {0x0a, 0x33, 0xbe, 0xa0, 0xc6, 0xb6, 0x01}, 0x07},
- {PRO_DMOD, 0x008e, {0x01}, 0x01},
- {PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
- {PRO_DMOD, 0x0099, {0x01}, 0x01},
- {PRO_DMOD, 0x009b, {0x3c, 0x28}, 0x02},
- {PRO_DMOD, 0x009f, {0xe1, 0xcf}, 0x02},
- {PRO_DMOD, 0x00a3, {0x01, 0x5a, 0x01, 0x01}, 0x04},
- {PRO_DMOD, 0x00a9, {0x00, 0x01}, 0x02},
- {PRO_DMOD, 0x00b0, {0x01}, 0x01},
- {PRO_DMOD, 0x00b3, {0x02, 0x3a}, 0x02},
- {PRO_DMOD, 0x00b6, {0x14}, 0x01},
- {PRO_DMOD, 0x00c0, {0x11, 0x00, 0x05, 0x01, 0x00}, 0x05},
- {PRO_DMOD, 0x00c6, {0x19, 0x00}, 0x02},
- {PRO_DMOD, 0x00cb, {0x32, 0x2c, 0x4f, 0x30}, 0x04},
- {PRO_DMOD, 0x00f3, {0x05, 0xa0, 0x8c}, 0x03},
- {PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
- {PRO_DMOD, 0x00fc, { 0x03, 0x03, 0x02, 0x0a, 0x50, 0x7b, 0x8c,
- 0x00, 0x02, 0xbe, 0x00}, 0x0b},
- {PRO_DMOD, 0x0109, {0x02}, 0x01},
- {PRO_DMOD, 0x0115, {0x0a, 0x03}, 0x02},
- {PRO_DMOD, 0x011a, {0xbe}, 0x01},
- {PRO_DMOD, 0x0124, {0xae}, 0x01},
- {PRO_DMOD, 0x0127, {0x00}, 0x01},
- {PRO_DMOD, 0x012a, {0x56, 0x50, 0x47, 0x42}, 0x04},
- {PRO_DMOD, 0x0137, {0x00}, 0x01},
- {PRO_DMOD, 0x013b, {0x08}, 0x01},
- {PRO_DMOD, 0x013f, {0x5b}, 0x01},
- {PRO_DMOD, 0x0141, { 0x59, 0xf9, 0x19, 0x19, 0x8c, 0x8c, 0x8c,
- 0x6e, 0x8c, 0x50, 0x8c, 0x8c, 0xac, 0xc6,
- 0x33}, 0x0f},
- {PRO_DMOD, 0x0151, {0x28}, 0x01},
- {PRO_DMOD, 0x0153, {0xbc}, 0x01},
- {PRO_DMOD, 0x0178, {0x09}, 0x01},
- {PRO_DMOD, 0x0181, {0x94, 0x6e}, 0x02},
- {PRO_DMOD, 0x0185, {0x24}, 0x01},
- {PRO_DMOD, 0x0187, {0x00, 0x00, 0xbe, 0x02, 0x80}, 0x05},
- {PRO_DMOD, 0xed02, {0xff}, 0x01},
- {PRO_DMOD, 0xee42, {0xff}, 0x01},
- {PRO_DMOD, 0xee82, {0xff}, 0x01},
- {PRO_DMOD, 0xf000, {0x0f}, 0x01},
- {PRO_DMOD, 0xf01f, {0x8c, 0x00}, 0x02},
- {PRO_DMOD, 0xf029, {0x8c, 0x00, 0x00}, 0x03},
- {PRO_DMOD, 0xf064, {0x03, 0xf9, 0x03, 0x01}, 0x04},
- {PRO_DMOD, 0xf06f, {0xe0, 0x03}, 0x02},
- {PRO_DMOD, 0xf072, {0x0f, 0x03}, 0x02},
- {PRO_DMOD, 0xf077, {0x01, 0x00}, 0x02},
- {PRO_DMOD, 0xf087, {0x00}, 0x01},
- {PRO_DMOD, 0xf09b, {0x3f, 0x00, 0x20, 0x00, 0x0c, 0x00}, 0x06},
- {PRO_DMOD, 0xf130, {0x04}, 0x01},
- {PRO_DMOD, 0xf132, {0x04}, 0x01},
- {PRO_DMOD, 0xf144, {0x1a}, 0x01},
- {PRO_DMOD, 0xf146, {0x00}, 0x01},
- {PRO_DMOD, 0xf14a, {0x01}, 0x01},
- {PRO_DMOD, 0xf14c, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0xf14f, {0x04}, 0x01},
- {PRO_DMOD, 0xf158, {0x7f}, 0x01},
- {PRO_DMOD, 0xf15a, {0x00, 0x08}, 0x02},
- {PRO_DMOD, 0xf15d, {0x03, 0x05}, 0x02},
- {PRO_DMOD, 0xf163, {0x05}, 0x01},
- {PRO_DMOD, 0xf166, {0x01, 0x40, 0x0f}, 0x03},
- {PRO_DMOD, 0xf17a, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0xf183, {0x01}, 0x01},
- {PRO_DMOD, 0xf19d, {0x40}, 0x01},
- {PRO_DMOD, 0xf1bc, {0x36, 0x00}, 0x02},
- {PRO_DMOD, 0xf1cb, {0xa0, 0x01}, 0x02},
- {PRO_DMOD, 0xf204, {0x10}, 0x01},
- {PRO_DMOD, 0xf214, {0x00}, 0x01},
- {PRO_DMOD, 0xf24c, {0x88, 0x95, 0x9a, 0x90}, 0x04},
- {PRO_DMOD, 0xf25a, {0x07, 0xe8, 0x03, 0xb0, 0x04}, 0x05},
- {PRO_DMOD, 0xf270, {0x01, 0x02, 0x01, 0x02}, 0x04},
- {PRO_DMOD, 0xf40e, {0x0a, 0x40, 0x08}, 0x03},
- {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
- {PRO_DMOD, 0xf561, {0x15, 0x20}, 0x02},
- {PRO_DMOD, 0xf5e3, {0x09, 0x01, 0x01}, 0x03},
- {PRO_DMOD, 0xf600, {0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17
- , 0x1f}, 0x08},
- {PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
- {PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
- {PRO_DMOD, 0xf78b, {0x01}, 0x01},
- {PRO_DMOD, 0xf80f, {0x40, 0x54, 0x5a}, 0x03},
- {PRO_DMOD, 0xf905, {0x01}, 0x01},
- {PRO_DMOD, 0xfb06, {0x03}, 0x01},
- {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
-};
-
-static struct it913xset it9135_61[] = {
- {PRO_DMOD, 0x0043, {0x00}, 0x01},
- {PRO_DMOD, 0x0046, {0x61}, 0x01},
- {PRO_DMOD, 0x0051, {0x01}, 0x01},
- {PRO_DMOD, 0x005f, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0x0068, {0x06}, 0x01},
- {PRO_DMOD, 0x006a, {0x03}, 0x01},
- {PRO_DMOD, 0x0070, {0x0a, 0x05, 0x02}, 0x03},
- {PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0x90, 0x01}, 0x05},
- {PRO_DMOD, 0x007e, {0x04}, 0x01},
- {PRO_DMOD, 0x0081, {0x0a, 0x12}, 0x02},
- {PRO_DMOD, 0x0084, {0x0a, 0x33, 0xbc, 0x9c, 0xcc, 0xa8, 0x01}, 0x07},
- {PRO_DMOD, 0x008e, {0x01}, 0x01},
- {PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
- {PRO_DMOD, 0x0099, {0x01}, 0x01},
- {PRO_DMOD, 0x009b, {0x3c, 0x28}, 0x02},
- {PRO_DMOD, 0x009f, {0xe1, 0xcf}, 0x02},
- {PRO_DMOD, 0x00a3, {0x01, 0x5c, 0x01, 0x01}, 0x04},
- {PRO_DMOD, 0x00a9, {0x00, 0x01}, 0x02},
- {PRO_DMOD, 0x00b0, {0x01}, 0x01},
- {PRO_DMOD, 0x00b3, {0x02, 0x3a}, 0x02},
- {PRO_DMOD, 0x00b6, {0x14}, 0x01},
- {PRO_DMOD, 0x00c0, {0x11, 0x00, 0x05, 0x01, 0x00}, 0x05},
- {PRO_DMOD, 0x00c6, {0x19, 0x00}, 0x02},
- {PRO_DMOD, 0x00cb, {0x32, 0x2c, 0x4f, 0x30}, 0x04},
- {PRO_DMOD, 0x00f3, {0x05, 0xa0, 0x8c}, 0x03},
- {PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
- {PRO_DMOD, 0x00fc, { 0x03, 0x03, 0x02, 0x08, 0x50, 0x7b, 0x8c,
- 0x01, 0x02, 0xc8, 0x00}, 0x0b},
- {PRO_DMOD, 0x0109, {0x02}, 0x01},
- {PRO_DMOD, 0x0115, {0x0a, 0x03}, 0x02},
- {PRO_DMOD, 0x011a, {0xc6}, 0x01},
- {PRO_DMOD, 0x0124, {0xa8}, 0x01},
- {PRO_DMOD, 0x0127, {0x00}, 0x01},
- {PRO_DMOD, 0x012a, {0x59, 0x50, 0x47, 0x42}, 0x04},
- {PRO_DMOD, 0x0137, {0x00}, 0x01},
- {PRO_DMOD, 0x013b, {0x05}, 0x01},
- {PRO_DMOD, 0x013f, {0x5b}, 0x01},
- {PRO_DMOD, 0x0141, { 0x59, 0xf9, 0x59, 0x59, 0x8c, 0x8c, 0x8c,
- 0x7b, 0x8c, 0x50, 0x8c, 0x8c, 0xa8, 0xc6,
- 0x33}, 0x0f},
- {PRO_DMOD, 0x0151, {0x28}, 0x01},
- {PRO_DMOD, 0x0153, {0xcc}, 0x01},
- {PRO_DMOD, 0x0178, {0x09}, 0x01},
- {PRO_DMOD, 0x0181, {0x9c, 0x76}, 0x02},
- {PRO_DMOD, 0x0185, {0x28}, 0x01},
- {PRO_DMOD, 0x0187, {0x01, 0x00, 0xaa, 0x02, 0x80}, 0x05},
- {PRO_DMOD, 0xed02, {0xff}, 0x01},
- {PRO_DMOD, 0xee42, {0xff}, 0x01},
- {PRO_DMOD, 0xee82, {0xff}, 0x01},
- {PRO_DMOD, 0xf000, {0x0f}, 0x01},
- {PRO_DMOD, 0xf01f, {0x8c, 0x00}, 0x02},
- {PRO_DMOD, 0xf029, {0x8c, 0x00, 0x00}, 0x03},
- {PRO_DMOD, 0xf064, {0x03, 0xf9, 0x03, 0x01}, 0x04},
- {PRO_DMOD, 0xf06f, {0xe0, 0x03}, 0x02},
- {PRO_DMOD, 0xf072, {0x0f, 0x03}, 0x02},
- {PRO_DMOD, 0xf077, {0x01, 0x00}, 0x02},
- {PRO_DMOD, 0xf087, {0x00}, 0x01},
- {PRO_DMOD, 0xf09b, {0x3f, 0x00, 0x20, 0x00, 0x0c, 0x00}, 0x06},
- {PRO_DMOD, 0xf130, {0x04}, 0x01},
- {PRO_DMOD, 0xf132, {0x04}, 0x01},
- {PRO_DMOD, 0xf144, {0x1a}, 0x01},
- {PRO_DMOD, 0xf146, {0x00}, 0x01},
- {PRO_DMOD, 0xf14a, {0x01}, 0x01},
- {PRO_DMOD, 0xf14c, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0xf14f, {0x04}, 0x01},
- {PRO_DMOD, 0xf158, {0x7f}, 0x01},
- {PRO_DMOD, 0xf15a, {0x00, 0x08}, 0x02},
- {PRO_DMOD, 0xf15d, {0x03, 0x05}, 0x02},
- {PRO_DMOD, 0xf163, {0x05}, 0x01},
- {PRO_DMOD, 0xf166, {0x01, 0x40, 0x0f}, 0x03},
- {PRO_DMOD, 0xf17a, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0xf183, {0x01}, 0x01},
- {PRO_DMOD, 0xf19d, {0x40}, 0x01},
- {PRO_DMOD, 0xf1bc, {0x36, 0x00}, 0x02},
- {PRO_DMOD, 0xf1cb, {0xa0, 0x01}, 0x02},
- {PRO_DMOD, 0xf204, {0x10}, 0x01},
- {PRO_DMOD, 0xf214, {0x00}, 0x01},
- {PRO_DMOD, 0xf24c, {0x88, 0x95, 0x9a, 0x90}, 0x04},
- {PRO_DMOD, 0xf25a, {0x07, 0xe8, 0x03, 0xb0, 0x04}, 0x05},
- {PRO_DMOD, 0xf270, {0x01, 0x02, 0x01, 0x02}, 0x04},
- {PRO_DMOD, 0xf40e, {0x0a, 0x40, 0x08}, 0x03},
- {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
- {PRO_DMOD, 0xf561, {0x15, 0x20}, 0x02},
- {PRO_DMOD, 0xf5e3, {0x09, 0x01, 0x01}, 0x03},
- {PRO_DMOD, 0xf600, { 0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17,
- 0x1f}, 0x08},
- {PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
- {PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
- {PRO_DMOD, 0xf78b, {0x01}, 0x01},
- {PRO_DMOD, 0xf80f, {0x40, 0x54, 0x5a}, 0x03},
- {PRO_DMOD, 0xf905, {0x01}, 0x01},
- {PRO_DMOD, 0xfb06, {0x03}, 0x01},
- {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
-};
-
-static struct it913xset it9135_62[] = {
- {PRO_DMOD, 0x0043, {0x00}, 0x01},
- {PRO_DMOD, 0x0046, {0x62}, 0x01},
- {PRO_DMOD, 0x0051, {0x01}, 0x01},
- {PRO_DMOD, 0x005f, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0x0068, {0x0a}, 0x01},
- {PRO_DMOD, 0x006a, {0x03}, 0x01},
- {PRO_DMOD, 0x0070, {0x0a, 0x05, 0x02}, 0x03},
- {PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0x8c, 0x01}, 0x05},
- {PRO_DMOD, 0x007e, {0x04}, 0x01},
- {PRO_DMOD, 0x0081, {0x0a, 0x12}, 0x02},
- {PRO_DMOD, 0x0084, { 0x0a, 0x33, 0xb8, 0x9c, 0xb2, 0xa6, 0x01},
- 0x07},
- {PRO_DMOD, 0x008e, {0x01}, 0x01},
- {PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
- {PRO_DMOD, 0x0099, {0x01}, 0x01},
- {PRO_DMOD, 0x009b, {0x3c, 0x28}, 0x02},
- {PRO_DMOD, 0x009f, {0xe1, 0xcf}, 0x02},
- {PRO_DMOD, 0x00a3, {0x01, 0x5a, 0x01, 0x01}, 0x04},
- {PRO_DMOD, 0x00a9, {0x00, 0x01}, 0x02},
- {PRO_DMOD, 0x00b0, {0x01}, 0x01},
- {PRO_DMOD, 0x00b3, {0x02, 0x3a}, 0x02},
- {PRO_DMOD, 0x00b6, {0x14}, 0x01},
- {PRO_DMOD, 0x00c0, {0x11, 0x00, 0x05, 0x01, 0x00}, 0x05},
- {PRO_DMOD, 0x00c6, {0x19, 0x00}, 0x02},
- {PRO_DMOD, 0x00cb, {0x32, 0x2c, 0x4f, 0x30}, 0x04},
- {PRO_DMOD, 0x00f3, {0x05, 0x8c, 0x8c}, 0x03},
- {PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
- {PRO_DMOD, 0x00fc, { 0x02, 0x03, 0x02, 0x09, 0x50, 0x6e, 0x8c,
- 0x02, 0x02, 0xc2, 0x00}, 0x0b},
- {PRO_DMOD, 0x0109, {0x02}, 0x01},
- {PRO_DMOD, 0x0115, {0x0a, 0x03}, 0x02},
- {PRO_DMOD, 0x011a, {0xb8}, 0x01},
- {PRO_DMOD, 0x0124, {0xa8}, 0x01},
- {PRO_DMOD, 0x0127, {0x00}, 0x01},
- {PRO_DMOD, 0x012a, {0x53, 0x51, 0x4e, 0x43}, 0x04},
- {PRO_DMOD, 0x0137, {0x00}, 0x01},
- {PRO_DMOD, 0x013b, {0x05}, 0x01},
- {PRO_DMOD, 0x013f, {0x5b}, 0x01},
- {PRO_DMOD, 0x0141, { 0x59, 0xf9, 0x59, 0x19, 0x8c, 0x8c, 0x8c,
- 0x7b, 0x8c, 0x50, 0x70, 0x8c, 0x96, 0xd0,
- 0x33}, 0x0f},
- {PRO_DMOD, 0x0151, {0x28}, 0x01},
- {PRO_DMOD, 0x0153, {0xb2}, 0x01},
- {PRO_DMOD, 0x0178, {0x09}, 0x01},
- {PRO_DMOD, 0x0181, {0x9c, 0x6e}, 0x02},
- {PRO_DMOD, 0x0185, {0x24}, 0x01},
- {PRO_DMOD, 0x0187, {0x00, 0x00, 0xb8, 0x02, 0x80}, 0x05},
- {PRO_DMOD, 0xed02, {0xff}, 0x01},
- {PRO_DMOD, 0xee42, {0xff}, 0x01},
- {PRO_DMOD, 0xee82, {0xff}, 0x01},
- {PRO_DMOD, 0xf000, {0x0f}, 0x01},
- {PRO_DMOD, 0xf01f, {0x8c, 0x00}, 0x02},
- {PRO_DMOD, 0xf029, {0x8c, 0x00, 0x00}, 0x03},
- {PRO_DMOD, 0xf064, {0x03, 0xf9, 0x03, 0x01}, 0x04},
- {PRO_DMOD, 0xf06f, {0xe0, 0x03}, 0x02},
- {PRO_DMOD, 0xf072, {0x0f, 0x03}, 0x02},
- {PRO_DMOD, 0xf077, {0x01, 0x00}, 0x02},
- {PRO_DMOD, 0xf087, {0x00}, 0x01},
- {PRO_DMOD, 0xf09b, {0x3f, 0x00, 0x20, 0x00, 0x0c, 0x00}, 0x06},
- {PRO_DMOD, 0xf130, {0x04}, 0x01},
- {PRO_DMOD, 0xf132, {0x04}, 0x01},
- {PRO_DMOD, 0xf144, {0x1a}, 0x01},
- {PRO_DMOD, 0xf146, {0x00}, 0x01},
- {PRO_DMOD, 0xf14a, {0x01}, 0x01},
- {PRO_DMOD, 0xf14c, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0xf14f, {0x04}, 0x01},
- {PRO_DMOD, 0xf158, {0x7f}, 0x01},
- {PRO_DMOD, 0xf15a, {0x00, 0x08}, 0x02},
- {PRO_DMOD, 0xf15d, {0x03, 0x05}, 0x02},
- {PRO_DMOD, 0xf163, {0x05}, 0x01},
- {PRO_DMOD, 0xf166, {0x01, 0x40, 0x0f}, 0x03},
- {PRO_DMOD, 0xf17a, {0x00, 0x00}, 0x02},
- {PRO_DMOD, 0xf183, {0x01}, 0x01},
- {PRO_DMOD, 0xf19d, {0x40}, 0x01},
- {PRO_DMOD, 0xf1bc, {0x36, 0x00}, 0x02},
- {PRO_DMOD, 0xf1cb, {0xa0, 0x01}, 0x02},
- {PRO_DMOD, 0xf204, {0x10}, 0x01},
- {PRO_DMOD, 0xf214, {0x00}, 0x01},
- {PRO_DMOD, 0xf24c, {0x88, 0x95, 0x9a, 0x90}, 0x04},
- {PRO_DMOD, 0xf25a, {0x07, 0xe8, 0x03, 0xb0, 0x04}, 0x05},
- {PRO_DMOD, 0xf270, {0x01, 0x02, 0x01, 0x02}, 0x04},
- {PRO_DMOD, 0xf40e, {0x0a, 0x40, 0x08}, 0x03},
- {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
- {PRO_DMOD, 0xf561, {0x15, 0x20}, 0x02},
- {PRO_DMOD, 0xf5e3, {0x09, 0x01, 0x01}, 0x03},
- {PRO_DMOD, 0xf600, { 0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17,
- 0x1f}, 0x08},
- {PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
- {PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
- {PRO_DMOD, 0xf78b, {0x01}, 0x01},
- {PRO_DMOD, 0xf80f, {0x40, 0x54, 0x5a}, 0x03},
- {PRO_DMOD, 0xf905, {0x01}, 0x01},
- {PRO_DMOD, 0xfb06, {0x03}, 0x01},
- {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
-};
-
-/* Tuner setting scripts (still keeping it9137) */
-static struct it913xset it9137_tuner_off[] = {
- {PRO_DMOD, 0xfba8, {0x01}, 0x01}, /* Tuner Clock Off */
- {PRO_DMOD, 0xec40, {0x00}, 0x01}, /* Power Down Tuner */
- {PRO_DMOD, 0xec02, {0x3f, 0x1f, 0x3f, 0x3f}, 0x04},
- {PRO_DMOD, 0xec06, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, 0x0c},
- {PRO_DMOD, 0xec12, {0x00, 0x00, 0x00, 0x00}, 0x04},
- {PRO_DMOD, 0xec17, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00}, 0x09},
- {PRO_DMOD, 0xec22, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00}, 0x0a},
- {PRO_DMOD, 0xec20, {0x00}, 0x01},
- {PRO_DMOD, 0xec3f, {0x01}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
-};
-
-static struct it913xset set_it9135_template[] = {
- {PRO_DMOD, 0xee06, {0x00}, 0x01},
- {PRO_DMOD, 0xec56, {0x00}, 0x01},
- {PRO_DMOD, 0xec4c, {0x00}, 0x01},
- {PRO_DMOD, 0xec4d, {0x00}, 0x01},
- {PRO_DMOD, 0xec4e, {0x00}, 0x01},
- {PRO_DMOD, 0x011e, {0x00}, 0x01}, /* Older Devices */
- {PRO_DMOD, 0x011f, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
-};
-
-static struct it913xset set_it9137_template[] = {
- {PRO_DMOD, 0xee06, {0x00}, 0x01},
- {PRO_DMOD, 0xec56, {0x00}, 0x01},
- {PRO_DMOD, 0xec4c, {0x00}, 0x01},
- {PRO_DMOD, 0xec4d, {0x00}, 0x01},
- {PRO_DMOD, 0xec4e, {0x00}, 0x01},
- {PRO_DMOD, 0xec4f, {0x00}, 0x01},
- {PRO_DMOD, 0xec50, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
-};
diff --git a/drivers/media/dvb-frontends/it913x-fe.c b/drivers/media/dvb-frontends/it913x-fe.c
deleted file mode 100644
index 6e1c6eb340b7..000000000000
--- a/drivers/media/dvb-frontends/it913x-fe.c
+++ /dev/null
@@ -1,1045 +0,0 @@
-/*
- * Driver for it913x-fe Frontend
- *
- * with support for on chip it9137 integral tuner
- *
- * Copyright (C) 2011 Malcolm Priestley (tvboxspy@gmail.com)
- * IT9137 Copyright (C) ITE Tech Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-#include "dvb_frontend.h"
-#include "it913x-fe.h"
-#include "it913x-fe-priv.h"
-
-static int it913x_debug;
-
-module_param_named(debug, it913x_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
-
-#define dprintk(level, args...) do { \
- if (level & it913x_debug) \
- printk(KERN_DEBUG "it913x-fe: " args); \
-} while (0)
-
-#define deb_info(args...) dprintk(0x01, args)
-#define debug_data_snipet(level, name, p) \
- dprintk(level, name" (%02x%02x%02x%02x%02x%02x%02x%02x)", \
- *p, *(p+1), *(p+2), *(p+3), *(p+4), \
- *(p+5), *(p+6), *(p+7));
-#define info(format, arg...) \
- printk(KERN_INFO "it913x-fe: " format "\n" , ## arg)
-
-struct it913x_fe_state {
- struct dvb_frontend frontend;
- struct i2c_adapter *i2c_adap;
- struct ite_config *config;
- u8 i2c_addr;
- u32 frequency;
- fe_modulation_t constellation;
- fe_transmit_mode_t transmission_mode;
- u8 priority;
- u32 crystalFrequency;
- u32 adcFrequency;
- u8 tuner_type;
- struct adctable *table;
- fe_status_t it913x_status;
- u16 tun_xtal;
- u8 tun_fdiv;
- u8 tun_clk_mode;
- u32 tun_fn_min;
- u32 ucblocks;
-};
-
-static int it913x_read_reg(struct it913x_fe_state *state,
- u32 reg, u8 *data, u8 count)
-{
- int ret;
- u8 pro = PRO_DMOD; /* All reads from demodulator */
- u8 b[4];
- struct i2c_msg msg[2] = {
- { .addr = state->i2c_addr + (pro << 1), .flags = 0,
- .buf = b, .len = sizeof(b) },
- { .addr = state->i2c_addr + (pro << 1), .flags = I2C_M_RD,
- .buf = data, .len = count }
- };
- b[0] = (u8) reg >> 24;
- b[1] = (u8)(reg >> 16) & 0xff;
- b[2] = (u8)(reg >> 8) & 0xff;
- b[3] = (u8) reg & 0xff;
-
- ret = i2c_transfer(state->i2c_adap, msg, 2);
-
- return ret;
-}
-
-static int it913x_read_reg_u8(struct it913x_fe_state *state, u32 reg)
-{
- int ret;
- u8 b[1];
- ret = it913x_read_reg(state, reg, &b[0], sizeof(b));
- return (ret < 0) ? -ENODEV : b[0];
-}
-
-static int it913x_write(struct it913x_fe_state *state,
- u8 pro, u32 reg, u8 buf[], u8 count)
-{
- u8 b[256];
- struct i2c_msg msg[1] = {
- { .addr = state->i2c_addr + (pro << 1), .flags = 0,
- .buf = b, .len = count + 4 }
- };
- int ret;
-
- b[0] = (u8) reg >> 24;
- b[1] = (u8)(reg >> 16) & 0xff;
- b[2] = (u8)(reg >> 8) & 0xff;
- b[3] = (u8) reg & 0xff;
- memcpy(&b[4], buf, count);
-
- ret = i2c_transfer(state->i2c_adap, msg, 1);
-
- if (ret < 0)
- return -EIO;
-
- return 0;
-}
-
-static int it913x_write_reg(struct it913x_fe_state *state,
- u8 pro, u32 reg, u32 data)
-{
- int ret;
- u8 b[4];
- u8 s;
-
- b[0] = data >> 24;
- b[1] = (data >> 16) & 0xff;
- b[2] = (data >> 8) & 0xff;
- b[3] = data & 0xff;
- /* expand write as needed */
- if (data < 0x100)
- s = 3;
- else if (data < 0x1000)
- s = 2;
- else if (data < 0x100000)
- s = 1;
- else
- s = 0;
-
- ret = it913x_write(state, pro, reg, &b[s], sizeof(b) - s);
-
- return ret;
-}
-
-static int it913x_fe_script_loader(struct it913x_fe_state *state,
- struct it913xset *loadscript)
-{
- int ret, i;
- if (loadscript == NULL)
- return -EINVAL;
-
- for (i = 0; i < 1000; ++i) {
- if (loadscript[i].pro == 0xff)
- break;
- ret = it913x_write(state, loadscript[i].pro,
- loadscript[i].address,
- loadscript[i].reg, loadscript[i].count);
- if (ret < 0)
- return -ENODEV;
- }
- return 0;
-}
-
-static int it913x_init_tuner(struct it913x_fe_state *state)
-{
- int ret, i, reg;
- u8 val, nv_val;
- u8 nv[] = {48, 32, 24, 16, 12, 8, 6, 4, 2};
- u8 b[2];
-
- reg = it913x_read_reg_u8(state, 0xec86);
- switch (reg) {
- case 0:
- state->tun_clk_mode = reg;
- state->tun_xtal = 2000;
- state->tun_fdiv = 3;
- val = 16;
- break;
- case -ENODEV:
- return -ENODEV;
- case 1:
- default:
- state->tun_clk_mode = reg;
- state->tun_xtal = 640;
- state->tun_fdiv = 1;
- val = 6;
- break;
- }
-
- reg = it913x_read_reg_u8(state, 0xed03);
-
- if (reg < 0)
- return -ENODEV;
- else if (reg < ARRAY_SIZE(nv))
- nv_val = nv[reg];
- else
- nv_val = 2;
-
- for (i = 0; i < 50; i++) {
- ret = it913x_read_reg(state, 0xed23, &b[0], sizeof(b));
- reg = (b[1] << 8) + b[0];
- if (reg > 0)
- break;
- if (ret < 0)
- return -ENODEV;
- udelay(2000);
- }
- state->tun_fn_min = state->tun_xtal * reg;
- state->tun_fn_min /= (state->tun_fdiv * nv_val);
- deb_info("Tuner fn_min %d", state->tun_fn_min);
-
- if (state->config->chip_ver > 1)
- msleep(50);
- else {
- for (i = 0; i < 50; i++) {
- reg = it913x_read_reg_u8(state, 0xec82);
- if (reg > 0)
- break;
- if (reg < 0)
- return -ENODEV;
- udelay(2000);
- }
- }
-
- return it913x_write_reg(state, PRO_DMOD, 0xed81, val);
-}
-
-static int it9137_set_tuner(struct it913x_fe_state *state,
- u32 bandwidth, u32 frequency_m)
-{
- struct it913xset *set_tuner = set_it9137_template;
- int ret, reg;
- u32 frequency = frequency_m / 1000;
- u32 freq, temp_f, tmp;
- u16 iqik_m_cal;
- u16 n_div;
- u8 n;
- u8 l_band;
- u8 lna_band;
- u8 bw;
-
- if (state->config->firmware_ver == 1)
- set_tuner = set_it9135_template;
- else
- set_tuner = set_it9137_template;
-
- deb_info("Tuner Frequency %d Bandwidth %d", frequency, bandwidth);
-
- if (frequency >= 51000 && frequency <= 440000) {
- l_band = 0;
- lna_band = 0;
- } else if (frequency > 440000 && frequency <= 484000) {
- l_band = 1;
- lna_band = 1;
- } else if (frequency > 484000 && frequency <= 533000) {
- l_band = 1;
- lna_band = 2;
- } else if (frequency > 533000 && frequency <= 587000) {
- l_band = 1;
- lna_band = 3;
- } else if (frequency > 587000 && frequency <= 645000) {
- l_band = 1;
- lna_band = 4;
- } else if (frequency > 645000 && frequency <= 710000) {
- l_band = 1;
- lna_band = 5;
- } else if (frequency > 710000 && frequency <= 782000) {
- l_band = 1;
- lna_band = 6;
- } else if (frequency > 782000 && frequency <= 860000) {
- l_band = 1;
- lna_band = 7;
- } else if (frequency > 1450000 && frequency <= 1492000) {
- l_band = 1;
- lna_band = 0;
- } else if (frequency > 1660000 && frequency <= 1685000) {
- l_band = 1;
- lna_band = 1;
- } else
- return -EINVAL;
- set_tuner[0].reg[0] = lna_band;
-
- switch (bandwidth) {
- case 5000000:
- bw = 0;
- break;
- case 6000000:
- bw = 2;
- break;
- case 7000000:
- bw = 4;
- break;
- default:
- case 8000000:
- bw = 6;
- break;
- }
-
- set_tuner[1].reg[0] = bw;
- set_tuner[2].reg[0] = 0xa0 | (l_band << 3);
-
- if (frequency > 53000 && frequency <= 74000) {
- n_div = 48;
- n = 0;
- } else if (frequency > 74000 && frequency <= 111000) {
- n_div = 32;
- n = 1;
- } else if (frequency > 111000 && frequency <= 148000) {
- n_div = 24;
- n = 2;
- } else if (frequency > 148000 && frequency <= 222000) {
- n_div = 16;
- n = 3;
- } else if (frequency > 222000 && frequency <= 296000) {
- n_div = 12;
- n = 4;
- } else if (frequency > 296000 && frequency <= 445000) {
- n_div = 8;
- n = 5;
- } else if (frequency > 445000 && frequency <= state->tun_fn_min) {
- n_div = 6;
- n = 6;
- } else if (frequency > state->tun_fn_min && frequency <= 950000) {
- n_div = 4;
- n = 7;
- } else if (frequency > 1450000 && frequency <= 1680000) {
- n_div = 2;
- n = 0;
- } else
- return -EINVAL;
-
- reg = it913x_read_reg_u8(state, 0xed81);
- iqik_m_cal = (u16)reg * n_div;
-
- if (reg < 0x20) {
- if (state->tun_clk_mode == 0)
- iqik_m_cal = (iqik_m_cal * 9) >> 5;
- else
- iqik_m_cal >>= 1;
- } else {
- iqik_m_cal = 0x40 - iqik_m_cal;
- if (state->tun_clk_mode == 0)
- iqik_m_cal = ~((iqik_m_cal * 9) >> 5);
- else
- iqik_m_cal = ~(iqik_m_cal >> 1);
- }
-
- temp_f = frequency * (u32)n_div * (u32)state->tun_fdiv;
- freq = temp_f / state->tun_xtal;
- tmp = freq * state->tun_xtal;
-
- if ((temp_f - tmp) >= (state->tun_xtal >> 1))
- freq++;
-
- freq += (u32) n << 13;
- /* Frequency OMEGA_IQIK_M_CAL_MID*/
- temp_f = freq + (u32)iqik_m_cal;
-
- set_tuner[3].reg[0] = temp_f & 0xff;
- set_tuner[4].reg[0] = (temp_f >> 8) & 0xff;
-
- deb_info("High Frequency = %04x", temp_f);
-
- /* Lower frequency */
- set_tuner[5].reg[0] = freq & 0xff;
- set_tuner[6].reg[0] = (freq >> 8) & 0xff;
-
- deb_info("low Frequency = %04x", freq);
-
- ret = it913x_fe_script_loader(state, set_tuner);
-
- return (ret < 0) ? -ENODEV : 0;
-}
-
-static int it913x_fe_select_bw(struct it913x_fe_state *state,
- u32 bandwidth, u32 adcFrequency)
-{
- int ret, i;
- u8 buffer[256];
- u32 coeff[8];
- u16 bfsfcw_fftinx_ratio;
- u16 fftinx_bfsfcw_ratio;
- u8 count;
- u8 bw;
- u8 adcmultiplier;
-
- deb_info("Bandwidth %d Adc %d", bandwidth, adcFrequency);
-
- switch (bandwidth) {
- case 5000000:
- bw = 3;
- break;
- case 6000000:
- bw = 0;
- break;
- case 7000000:
- bw = 1;
- break;
- default:
- case 8000000:
- bw = 2;
- break;
- }
- ret = it913x_write_reg(state, PRO_DMOD, REG_BW, bw);
-
- if (state->table == NULL)
- return -EINVAL;
-
- /* In write order */
- coeff[0] = state->table[bw].coeff_1_2048;
- coeff[1] = state->table[bw].coeff_2_2k;
- coeff[2] = state->table[bw].coeff_1_8191;
- coeff[3] = state->table[bw].coeff_1_8192;
- coeff[4] = state->table[bw].coeff_1_8193;
- coeff[5] = state->table[bw].coeff_2_8k;
- coeff[6] = state->table[bw].coeff_1_4096;
- coeff[7] = state->table[bw].coeff_2_4k;
- bfsfcw_fftinx_ratio = state->table[bw].bfsfcw_fftinx_ratio;
- fftinx_bfsfcw_ratio = state->table[bw].fftinx_bfsfcw_ratio;
-
- /* ADC multiplier */
- ret = it913x_read_reg_u8(state, ADC_X_2);
- if (ret < 0)
- return -EINVAL;
-
- adcmultiplier = ret;
-
- count = 0;
-
- /* Build Buffer for COEFF Registers */
- for (i = 0; i < 8; i++) {
- if (adcmultiplier == 1)
- coeff[i] /= 2;
- buffer[count++] = (coeff[i] >> 24) & 0x3;
- buffer[count++] = (coeff[i] >> 16) & 0xff;
- buffer[count++] = (coeff[i] >> 8) & 0xff;
- buffer[count++] = coeff[i] & 0xff;
- }
-
- /* bfsfcw_fftinx_ratio register 0x21-0x22 */
- buffer[count++] = bfsfcw_fftinx_ratio & 0xff;
- buffer[count++] = (bfsfcw_fftinx_ratio >> 8) & 0xff;
- /* fftinx_bfsfcw_ratio register 0x23-0x24 */
- buffer[count++] = fftinx_bfsfcw_ratio & 0xff;
- buffer[count++] = (fftinx_bfsfcw_ratio >> 8) & 0xff;
- /* start at COEFF_1_2048 and write through to fftinx_bfsfcw_ratio*/
- ret = it913x_write(state, PRO_DMOD, COEFF_1_2048, buffer, count);
-
- for (i = 0; i < 42; i += 8)
- debug_data_snipet(0x1, "Buffer", &buffer[i]);
-
- return ret;
-}
-
-
-
-static int it913x_fe_read_status(struct dvb_frontend *fe, fe_status_t *status)
-{
- struct it913x_fe_state *state = fe->demodulator_priv;
- int ret, i;
- fe_status_t old_status = state->it913x_status;
- *status = 0;
-
- if (state->it913x_status == 0) {
- ret = it913x_read_reg_u8(state, EMPTY_CHANNEL_STATUS);
- if (ret == 0x1) {
- *status |= FE_HAS_SIGNAL;
- for (i = 0; i < 40; i++) {
- ret = it913x_read_reg_u8(state, MP2IF_SYNC_LK);
- if (ret == 0x1)
- break;
- msleep(25);
- }
- if (ret == 0x1)
- *status |= FE_HAS_CARRIER
- | FE_HAS_VITERBI
- | FE_HAS_SYNC;
- state->it913x_status = *status;
- }
- }
-
- if (state->it913x_status & FE_HAS_SYNC) {
- ret = it913x_read_reg_u8(state, TPSD_LOCK);
- if (ret == 0x1)
- *status |= FE_HAS_LOCK
- | state->it913x_status;
- else
- state->it913x_status = 0;
- if (old_status != state->it913x_status)
- ret = it913x_write_reg(state, PRO_LINK, GPIOH3_O, ret);
- }
-
- return 0;
-}
-
-/* FEC values based on fe_code_rate_t non supported values 0*/
-int it913x_qpsk_pval[] = {0, -93, -91, -90, 0, -89, -88};
-int it913x_16qam_pval[] = {0, -87, -85, -84, 0, -83, -82};
-int it913x_64qam_pval[] = {0, -82, -80, -78, 0, -77, -76};
-
-static int it913x_get_signal_strength(struct dvb_frontend *fe)
-{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- struct it913x_fe_state *state = fe->demodulator_priv;
- u8 code_rate;
- int ret, temp;
- u8 lna_gain_os;
-
- ret = it913x_read_reg_u8(state, VAR_P_INBAND);
- if (ret < 0)
- return ret;
-
- /* VHF/UHF gain offset */
- if (state->frequency < 300000000)
- lna_gain_os = 7;
- else
- lna_gain_os = 14;
-
- temp = (ret - 100) - lna_gain_os;
-
- if (state->priority == PRIORITY_HIGH)
- code_rate = p->code_rate_HP;
- else
- code_rate = p->code_rate_LP;
-
- if (code_rate >= ARRAY_SIZE(it913x_qpsk_pval))
- return -EINVAL;
-
- deb_info("Reg VAR_P_INBAND:%d Calc Offset Value:%d", ret, temp);
-
- /* Apply FEC offset values*/
- switch (p->modulation) {
- case QPSK:
- temp -= it913x_qpsk_pval[code_rate];
- break;
- case QAM_16:
- temp -= it913x_16qam_pval[code_rate];
- break;
- case QAM_64:
- temp -= it913x_64qam_pval[code_rate];
- break;
- default:
- return -EINVAL;
- }
-
- if (temp < -15)
- ret = 0;
- else if ((-15 <= temp) && (temp < 0))
- ret = (2 * (temp + 15)) / 3;
- else if ((0 <= temp) && (temp < 20))
- ret = 4 * temp + 10;
- else if ((20 <= temp) && (temp < 35))
- ret = (2 * (temp - 20)) / 3 + 90;
- else if (temp >= 35)
- ret = 100;
-
- deb_info("Signal Strength :%d", ret);
-
- return ret;
-}
-
-static int it913x_fe_read_signal_strength(struct dvb_frontend *fe,
- u16 *strength)
-{
- struct it913x_fe_state *state = fe->demodulator_priv;
- int ret = 0;
- if (state->config->read_slevel) {
- if (state->it913x_status & FE_HAS_SIGNAL)
- ret = it913x_read_reg_u8(state, SIGNAL_LEVEL);
- } else
- ret = it913x_get_signal_strength(fe);
-
- if (ret >= 0)
- *strength = (u16)((u32)ret * 0xffff / 0x64);
-
- return (ret < 0) ? -ENODEV : 0;
-}
-
-static int it913x_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
-{
- struct it913x_fe_state *state = fe->demodulator_priv;
- int ret;
- u8 reg[3];
- u32 snr_val, snr_min, snr_max;
- u32 temp;
-
- ret = it913x_read_reg(state, 0x2c, reg, sizeof(reg));
-
- snr_val = (u32)(reg[2] << 16) | (reg[1] << 8) | reg[0];
-
- ret |= it913x_read_reg(state, 0xf78b, reg, 1);
- if (reg[0])
- snr_val /= reg[0];
-
- if (state->transmission_mode == TRANSMISSION_MODE_2K)
- snr_val *= 4;
- else if (state->transmission_mode == TRANSMISSION_MODE_4K)
- snr_val *= 2;
-
- if (state->constellation == QPSK) {
- snr_min = 0xb4711;
- snr_max = 0x191451;
- } else if (state->constellation == QAM_16) {
- snr_min = 0x4f0d5;
- snr_max = 0xc7925;
- } else if (state->constellation == QAM_64) {
- snr_min = 0x256d0;
- snr_max = 0x626be;
- } else
- return -EINVAL;
-
- if (snr_val < snr_min)
- *snr = 0;
- else if (snr_val < snr_max) {
- temp = (snr_val - snr_min) >> 5;
- temp *= 0xffff;
- temp /= (snr_max - snr_min) >> 5;
- *snr = (u16)temp;
- } else
- *snr = 0xffff;
-
- return (ret < 0) ? -ENODEV : 0;
-}
-
-static int it913x_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
-{
- struct it913x_fe_state *state = fe->demodulator_priv;
- u8 reg[5];
- /* Read Aborted Packets and Pre-Viterbi error rate 5 bytes */
- it913x_read_reg(state, RSD_ABORT_PKT_LSB, reg, sizeof(reg));
- state->ucblocks += (u32)(reg[1] << 8) | reg[0];
- *ber = (u32)(reg[4] << 16) | (reg[3] << 8) | reg[2];
- return 0;
-}
-
-static int it913x_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
-{
- struct it913x_fe_state *state = fe->demodulator_priv;
- int ret;
- u8 reg[2];
- /* Aborted Packets */
- ret = it913x_read_reg(state, RSD_ABORT_PKT_LSB, reg, sizeof(reg));
- state->ucblocks += (u32)(reg[1] << 8) | reg[0];
- *ucblocks = state->ucblocks;
- return ret;
-}
-
-static int it913x_fe_get_frontend(struct dvb_frontend *fe)
-{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- struct it913x_fe_state *state = fe->demodulator_priv;
- u8 reg[8];
-
- it913x_read_reg(state, REG_TPSD_TX_MODE, reg, sizeof(reg));
-
- if (reg[3] < 3)
- p->modulation = fe_con[reg[3]];
-
- if (reg[0] < 3)
- p->transmission_mode = fe_mode[reg[0]];
-
- if (reg[1] < 4)
- p->guard_interval = fe_gi[reg[1]];
-
- if (reg[2] < 4)
- p->hierarchy = fe_hi[reg[2]];
-
- state->priority = reg[5];
-
- p->code_rate_HP = (reg[6] < 6) ? fe_code[reg[6]] : FEC_NONE;
- p->code_rate_LP = (reg[7] < 6) ? fe_code[reg[7]] : FEC_NONE;
-
- /* Update internal state to reflect the autodetected props */
- state->constellation = p->modulation;
- state->transmission_mode = p->transmission_mode;
-
- return 0;
-}
-
-static int it913x_fe_set_frontend(struct dvb_frontend *fe)
-{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- struct it913x_fe_state *state = fe->demodulator_priv;
- int i;
- u8 empty_ch, last_ch;
-
- state->it913x_status = 0;
-
- /* Set bw*/
- it913x_fe_select_bw(state, p->bandwidth_hz,
- state->adcFrequency);
-
- /* Training Mode Off */
- it913x_write_reg(state, PRO_LINK, TRAINING_MODE, 0x0);
-
- /* Clear Empty Channel */
- it913x_write_reg(state, PRO_DMOD, EMPTY_CHANNEL_STATUS, 0x0);
-
- /* Clear bits */
- it913x_write_reg(state, PRO_DMOD, MP2IF_SYNC_LK, 0x0);
- /* LED on */
- it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x1);
- /* Select Band*/
- if ((p->frequency >= 51000000) && (p->frequency <= 230000000))
- i = 0;
- else if ((p->frequency >= 350000000) && (p->frequency <= 900000000))
- i = 1;
- else if ((p->frequency >= 1450000000) && (p->frequency <= 1680000000))
- i = 2;
- else
- return -EOPNOTSUPP;
-
- it913x_write_reg(state, PRO_DMOD, FREE_BAND, i);
-
- deb_info("Frontend Set Tuner Type %02x", state->tuner_type);
- switch (state->tuner_type) {
- case IT9135_38:
- case IT9135_51:
- case IT9135_52:
- case IT9135_60:
- case IT9135_61:
- case IT9135_62:
- it9137_set_tuner(state,
- p->bandwidth_hz, p->frequency);
- break;
- default:
- if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe);
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
- }
- break;
- }
- /* LED off */
- it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x0);
- /* Trigger ofsm */
- it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x0);
- last_ch = 2;
- for (i = 0; i < 40; ++i) {
- empty_ch = it913x_read_reg_u8(state, EMPTY_CHANNEL_STATUS);
- if (last_ch == 1 && empty_ch == 1)
- break;
- if (last_ch == 2 && empty_ch == 2)
- return 0;
- last_ch = empty_ch;
- msleep(25);
- }
- for (i = 0; i < 40; ++i) {
- if (it913x_read_reg_u8(state, D_TPSD_LOCK) == 1)
- break;
- msleep(25);
- }
-
- state->frequency = p->frequency;
- return 0;
-}
-
-static int it913x_fe_suspend(struct it913x_fe_state *state)
-{
- int ret, i;
- u8 b;
-
- ret = it913x_write_reg(state, PRO_DMOD, SUSPEND_FLAG, 0x1);
-
- ret |= it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x0);
-
- for (i = 0; i < 128; i++) {
- ret = it913x_read_reg(state, SUSPEND_FLAG, &b, 1);
- if (ret < 0)
- return -ENODEV;
- if (b == 0)
- break;
-
- }
-
- ret |= it913x_write_reg(state, PRO_DMOD, AFE_MEM0, 0x8);
- /* Turn LED off */
- ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x0);
-
- ret |= it913x_fe_script_loader(state, it9137_tuner_off);
-
- return (ret < 0) ? -ENODEV : 0;
-}
-
-/* Power sequence */
-/* Power Up Tuner on -> Frontend suspend off -> Tuner clk on */
-/* Power Down Frontend suspend on -> Tuner clk off -> Tuner off */
-
-static int it913x_fe_sleep(struct dvb_frontend *fe)
-{
- struct it913x_fe_state *state = fe->demodulator_priv;
- return it913x_fe_suspend(state);
-}
-
-static u32 compute_div(u32 a, u32 b, u32 x)
-{
- u32 res = 0;
- u32 c = 0;
- u32 i = 0;
-
- if (a > b) {
- c = a / b;
- a = a - c * b;
- }
-
- for (i = 0; i < x; i++) {
- if (a >= b) {
- res += 1;
- a -= b;
- }
- a <<= 1;
- res <<= 1;
- }
-
- res = (c << x) + res;
-
- return res;
-}
-
-static int it913x_fe_start(struct it913x_fe_state *state)
-{
- struct it913xset *set_lna;
- struct it913xset *set_mode;
- int ret;
- u8 adf = (state->config->adf & 0xf);
- u32 adc, xtal;
- u8 b[4];
-
- if (state->config->chip_ver == 1)
- ret = it913x_init_tuner(state);
-
- info("ADF table value :%02x", adf);
-
- if (adf < 10) {
- state->crystalFrequency = fe_clockTable[adf].xtal ;
- state->table = fe_clockTable[adf].table;
- state->adcFrequency = state->table->adcFrequency;
-
- adc = compute_div(state->adcFrequency, 1000000ul, 19ul);
- xtal = compute_div(state->crystalFrequency, 1000000ul, 19ul);
-
- } else
- return -EINVAL;
-
- /* Set LED indicator on GPIOH3 */
- ret = it913x_write_reg(state, PRO_LINK, GPIOH3_EN, 0x1);
- ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_ON, 0x1);
- ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x1);
-
- ret |= it913x_write_reg(state, PRO_LINK, 0xf641, state->tuner_type);
- ret |= it913x_write_reg(state, PRO_DMOD, 0xf5ca, 0x01);
- ret |= it913x_write_reg(state, PRO_DMOD, 0xf715, 0x01);
-
- b[0] = xtal & 0xff;
- b[1] = (xtal >> 8) & 0xff;
- b[2] = (xtal >> 16) & 0xff;
- b[3] = (xtal >> 24);
- ret |= it913x_write(state, PRO_DMOD, XTAL_CLK, b , 4);
-
- b[0] = adc & 0xff;
- b[1] = (adc >> 8) & 0xff;
- b[2] = (adc >> 16) & 0xff;
- ret |= it913x_write(state, PRO_DMOD, ADC_FREQ, b, 3);
-
- if (state->config->adc_x2)
- ret |= it913x_write_reg(state, PRO_DMOD, ADC_X_2, 0x01);
- b[0] = 0;
- b[1] = 0;
- b[2] = 0;
- ret |= it913x_write(state, PRO_DMOD, 0x0029, b, 3);
-
- info("Crystal Frequency :%d Adc Frequency :%d ADC X2: %02x",
- state->crystalFrequency, state->adcFrequency,
- state->config->adc_x2);
- deb_info("Xtal value :%04x Adc value :%04x", xtal, adc);
-
- if (ret < 0)
- return -ENODEV;
-
- /* v1 or v2 tuner script */
- if (state->config->chip_ver > 1)
- ret = it913x_fe_script_loader(state, it9135_v2);
- else
- ret = it913x_fe_script_loader(state, it9135_v1);
- if (ret < 0)
- return ret;
-
- /* LNA Scripts */
- switch (state->tuner_type) {
- case IT9135_51:
- set_lna = it9135_51;
- break;
- case IT9135_52:
- set_lna = it9135_52;
- break;
- case IT9135_60:
- set_lna = it9135_60;
- break;
- case IT9135_61:
- set_lna = it9135_61;
- break;
- case IT9135_62:
- set_lna = it9135_62;
- break;
- case IT9135_38:
- default:
- set_lna = it9135_38;
- }
- info("Tuner LNA type :%02x", state->tuner_type);
-
- ret = it913x_fe_script_loader(state, set_lna);
- if (ret < 0)
- return ret;
-
- if (state->config->chip_ver == 2) {
- ret = it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x1);
- ret |= it913x_write_reg(state, PRO_LINK, PADODPU, 0x0);
- ret |= it913x_write_reg(state, PRO_LINK, AGC_O_D, 0x0);
- ret |= it913x_init_tuner(state);
- }
- if (ret < 0)
- return -ENODEV;
-
- /* Always solo frontend */
- set_mode = set_solo_fe;
- ret |= it913x_fe_script_loader(state, set_mode);
-
- ret |= it913x_fe_suspend(state);
- return (ret < 0) ? -ENODEV : 0;
-}
-
-static int it913x_fe_init(struct dvb_frontend *fe)
-{
- struct it913x_fe_state *state = fe->demodulator_priv;
- int ret = 0;
- /* Power Up Tuner - common all versions */
- ret = it913x_write_reg(state, PRO_DMOD, 0xec40, 0x1);
-
- ret |= it913x_fe_script_loader(state, init_1);
-
- ret |= it913x_write_reg(state, PRO_DMOD, AFE_MEM0, 0x0);
-
- ret |= it913x_write_reg(state, PRO_DMOD, 0xfba8, 0x0);
-
- return (ret < 0) ? -ENODEV : 0;
-}
-
-static void it913x_fe_release(struct dvb_frontend *fe)
-{
- struct it913x_fe_state *state = fe->demodulator_priv;
- kfree(state);
-}
-
-static struct dvb_frontend_ops it913x_fe_ofdm_ops;
-
-struct dvb_frontend *it913x_fe_attach(struct i2c_adapter *i2c_adap,
- u8 i2c_addr, struct ite_config *config)
-{
- struct it913x_fe_state *state = NULL;
- int ret;
-
- /* allocate memory for the internal state */
- state = kzalloc(sizeof(struct it913x_fe_state), GFP_KERNEL);
- if (state == NULL)
- return NULL;
- if (config == NULL)
- goto error;
-
- state->i2c_adap = i2c_adap;
- state->i2c_addr = i2c_addr;
- state->config = config;
-
- switch (state->config->tuner_id_0) {
- case IT9135_51:
- case IT9135_52:
- case IT9135_60:
- case IT9135_61:
- case IT9135_62:
- state->tuner_type = state->config->tuner_id_0;
- break;
- default:
- case IT9135_38:
- state->tuner_type = IT9135_38;
- }
-
- ret = it913x_fe_start(state);
- if (ret < 0)
- goto error;
-
-
- /* create dvb_frontend */
- memcpy(&state->frontend.ops, &it913x_fe_ofdm_ops,
- sizeof(struct dvb_frontend_ops));
- state->frontend.demodulator_priv = state;
-
- return &state->frontend;
-error:
- kfree(state);
- return NULL;
-}
-EXPORT_SYMBOL(it913x_fe_attach);
-
-static struct dvb_frontend_ops it913x_fe_ofdm_ops = {
- .delsys = { SYS_DVBT },
- .info = {
- .name = "it913x-fe DVB-T",
- .frequency_min = 51000000,
- .frequency_max = 1680000000,
- .frequency_stepsize = 62500,
- .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
- FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
- FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO |
- FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO,
- },
-
- .release = it913x_fe_release,
-
- .init = it913x_fe_init,
- .sleep = it913x_fe_sleep,
-
- .set_frontend = it913x_fe_set_frontend,
- .get_frontend = it913x_fe_get_frontend,
-
- .read_status = it913x_fe_read_status,
- .read_signal_strength = it913x_fe_read_signal_strength,
- .read_snr = it913x_fe_read_snr,
- .read_ber = it913x_fe_read_ber,
- .read_ucblocks = it913x_fe_read_ucblocks,
-};
-
-MODULE_DESCRIPTION("it913x Frontend and it9137 tuner");
-MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
-MODULE_VERSION("1.15");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/it913x-fe.h b/drivers/media/dvb-frontends/it913x-fe.h
deleted file mode 100644
index df0ad4207343..000000000000
--- a/drivers/media/dvb-frontends/it913x-fe.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Driver for it913x Frontend
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
- */
-
-#ifndef IT913X_FE_H
-#define IT913X_FE_H
-
-#include <linux/kconfig.h>
-#include <linux/dvb/frontend.h>
-#include "dvb_frontend.h"
-
-struct ite_config {
- u8 chip_ver;
- u16 chip_type;
- u32 firmware;
- u8 firmware_ver;
- u8 adc_x2;
- u8 tuner_id_0;
- u8 tuner_id_1;
- u8 dual_mode;
- u8 adf;
- /* option to read SIGNAL_LEVEL */
- u8 read_slevel;
-};
-
-#if IS_ENABLED(CONFIG_DVB_IT913X_FE)
-extern struct dvb_frontend *it913x_fe_attach(struct i2c_adapter *i2c_adap,
- u8 i2c_addr, struct ite_config *config);
-#else
-static inline struct dvb_frontend *it913x_fe_attach(
- struct i2c_adapter *i2c_adap,
- u8 i2c_addr, struct ite_config *config)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-#endif /* CONFIG_IT913X_FE */
-#define I2C_BASE_ADDR 0x10
-#define DEV_0 0x0
-#define DEV_1 0x10
-#define PRO_LINK 0x0
-#define PRO_DMOD 0x1
-#define DEV_0_DMOD (PRO_DMOD << 0x7)
-#define DEV_1_DMOD (DEV_0_DMOD | DEV_1)
-#define CHIP2_I2C_ADDR 0x3a
-
-#define AFE_MEM0 0xfb24
-
-#define MP2_SW_RST 0xf99d
-#define MP2IF2_SW_RST 0xf9a4
-
-#define PADODPU 0xd827
-#define THIRDODPU 0xd828
-#define AGC_O_D 0xd829
-
-#define EP0_TX_EN 0xdd11
-#define EP0_TX_NAK 0xdd13
-#define EP4_TX_LEN_LSB 0xdd88
-#define EP4_TX_LEN_MSB 0xdd89
-#define EP4_MAX_PKT 0xdd0c
-#define EP5_TX_LEN_LSB 0xdd8a
-#define EP5_TX_LEN_MSB 0xdd8b
-#define EP5_MAX_PKT 0xdd0d
-
-#define IO_MUX_POWER_CLK 0xd800
-#define CLK_O_EN 0xd81a
-#define I2C_CLK 0xf103
-#define I2C_CLK_100 0x7
-#define I2C_CLK_400 0x1a
-
-#define D_TPSD_LOCK 0xf5a9
-#define MP2IF2_EN 0xf9a3
-#define MP2IF_SERIAL 0xf985
-#define TSIS_ENABLE 0xf9cd
-#define MP2IF2_HALF_PSB 0xf9a5
-#define MP2IF_STOP_EN 0xf9b5
-#define MPEG_FULL_SPEED 0xf990
-#define TOP_HOSTB_SER_MODE 0xd91c
-
-#define PID_RST 0xf992
-#define PID_EN 0xf993
-#define PID_INX_EN 0xf994
-#define PID_INX 0xf995
-#define PID_LSB 0xf996
-#define PID_MSB 0xf997
-
-#define MP2IF_MPEG_PAR_MODE 0xf986
-#define DCA_UPPER_CHIP 0xf731
-#define DCA_LOWER_CHIP 0xf732
-#define DCA_PLATCH 0xf730
-#define DCA_FPGA_LATCH 0xf778
-#define DCA_STAND_ALONE 0xf73c
-#define DCA_ENABLE 0xf776
-
-#define DVBT_INTEN 0xf41f
-#define DVBT_ENABLE 0xf41a
-#define HOSTB_DCA_LOWER 0xd91f
-#define HOSTB_MPEG_PAR_MODE 0xd91b
-#define HOSTB_MPEG_SER_MODE 0xd91c
-#define HOSTB_MPEG_SER_DO7 0xd91d
-#define HOSTB_DCA_UPPER 0xd91e
-#define PADMISCDR2 0xd830
-#define PADMISCDR4 0xd831
-#define PADMISCDR8 0xd832
-#define PADMISCDRSR 0xd833
-#define LOCK3_OUT 0xd8fd
-
-#define GPIOH1_O 0xd8af
-#define GPIOH1_EN 0xd8b0
-#define GPIOH1_ON 0xd8b1
-#define GPIOH3_O 0xd8b3
-#define GPIOH3_EN 0xd8b4
-#define GPIOH3_ON 0xd8b5
-#define GPIOH5_O 0xd8bb
-#define GPIOH5_EN 0xd8bc
-#define GPIOH5_ON 0xd8bd
-
-#define AFE_MEM0 0xfb24
-
-#define REG_TPSD_TX_MODE 0xf900
-#define REG_TPSD_GI 0xf901
-#define REG_TPSD_HIER 0xf902
-#define REG_TPSD_CONST 0xf903
-#define REG_BW 0xf904
-#define REG_PRIV 0xf905
-#define REG_TPSD_HP_CODE 0xf906
-#define REG_TPSD_LP_CODE 0xf907
-
-#define MP2IF_SYNC_LK 0xf999
-#define ADC_FREQ 0xf1cd
-
-#define TRIGGER_OFSM 0x0000
-/* COEFF Registers start at 0x0001 to 0x0020 */
-#define COEFF_1_2048 0x0001
-#define XTAL_CLK 0x0025
-#define BFS_FCW 0x0029
-
-/* Error Regs */
-#define RSD_ABORT_PKT_LSB 0x0032
-#define RSD_ABORT_PKT_MSB 0x0033
-#define RSD_BIT_ERR_0_7 0x0034
-#define RSD_BIT_ERR_8_15 0x0035
-#define RSD_BIT_ERR_23_16 0x0036
-#define RSD_BIT_COUNT_LSB 0x0037
-#define RSD_BIT_COUNT_MSB 0x0038
-
-#define TPSD_LOCK 0x003c
-#define TRAINING_MODE 0x0040
-#define ADC_X_2 0x0045
-#define TUNER_ID 0x0046
-#define EMPTY_CHANNEL_STATUS 0x0047
-#define SIGNAL_LEVEL 0x0048
-#define SIGNAL_QUALITY 0x0049
-#define EST_SIGNAL_LEVEL 0x004a
-#define FREE_BAND 0x004b
-#define SUSPEND_FLAG 0x004c
-#define VAR_P_INBAND 0x00f7
-
-/* Build in tuner types */
-#define IT9137 0x38
-#define IT9135_38 0x38
-#define IT9135_51 0x51
-#define IT9135_52 0x52
-#define IT9135_60 0x60
-#define IT9135_61 0x61
-#define IT9135_62 0x62
-
-enum {
- CMD_DEMOD_READ = 0,
- CMD_DEMOD_WRITE,
- CMD_TUNER_READ,
- CMD_TUNER_WRITE,
- CMD_REG_EEPROM_READ,
- CMD_REG_EEPROM_WRITE,
- CMD_DATA_READ,
- CMD_VAR_READ = 8,
- CMD_VAR_WRITE,
- CMD_PLATFORM_GET,
- CMD_PLATFORM_SET,
- CMD_IP_CACHE,
- CMD_IP_ADD,
- CMD_IP_REMOVE,
- CMD_PID_ADD,
- CMD_PID_REMOVE,
- CMD_SIPSI_GET,
- CMD_SIPSI_MPE_RESET,
- CMD_H_PID_ADD = 0x15,
- CMD_H_PID_REMOVE,
- CMD_ABORT,
- CMD_IR_GET,
- CMD_IR_SET,
- CMD_FW_DOWNLOAD = 0x21,
- CMD_QUERYINFO,
- CMD_BOOT,
- CMD_FW_DOWNLOAD_BEGIN,
- CMD_FW_DOWNLOAD_END,
- CMD_RUN_CODE,
- CMD_SCATTER_READ = 0x28,
- CMD_SCATTER_WRITE,
- CMD_GENERIC_READ,
- CMD_GENERIC_WRITE
-};
-
-enum {
- READ_LONG,
- WRITE_LONG,
- READ_SHORT,
- WRITE_SHORT,
- READ_DATA,
- WRITE_DATA,
- WRITE_CMD,
-};
-
-enum {
- IT9135_AUTO = 0,
- IT9137_FW,
- IT9135_V1_FW,
- IT9135_V2_FW,
-};
-
-#endif /* IT913X_FE_H */
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index b8a7897e7bd8..2ef8ce13fb60 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -271,6 +271,13 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
ret = fe->ops.tuner_ops.get_frequency(fe, &tuner_frequency);
if (ret)
goto err;
+ } else {
+ /*
+ * Use nominal target frequency as tuner driver does not provide
+ * actual frequency used. Carrier offset calculation is not
+ * valid.
+ */
+ tuner_frequency = c->frequency;
}
/* reset */
@@ -428,18 +435,10 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
goto err;
switch (target_mclk) {
- case 72000:
- u8tmp1 = 0x00; /* 0b00 */
- u8tmp2 = 0x03; /* 0b11 */
- break;
case 96000:
u8tmp1 = 0x02; /* 0b10 */
u8tmp2 = 0x01; /* 0b01 */
break;
- case 115200:
- u8tmp1 = 0x01; /* 0b01 */
- u8tmp2 = 0x01; /* 0b01 */
- break;
case 144000:
u8tmp1 = 0x00; /* 0b00 */
u8tmp2 = 0x01; /* 0b01 */
@@ -448,10 +447,6 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
u8tmp1 = 0x03; /* 0b11 */
u8tmp2 = 0x00; /* 0b00 */
break;
- default:
- dev_dbg(&priv->i2c->dev, "%s: invalid target_mclk\n", __func__);
- ret = -EINVAL;
- goto err;
}
ret = m88ds3103_wr_reg_mask(priv, 0x22, u8tmp1 << 6, 0xc0);
@@ -711,9 +706,6 @@ static int m88ds3103_get_frontend(struct dvb_frontend *fe)
case 1:
c->inversion = INVERSION_ON;
break;
- default:
- dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n",
- __func__);
}
switch ((buf[1] >> 5) & 0x07) {
@@ -793,9 +785,6 @@ static int m88ds3103_get_frontend(struct dvb_frontend *fe)
case 1:
c->pilot = PILOT_ON;
break;
- default:
- dev_dbg(&priv->i2c->dev, "%s: invalid pilot\n",
- __func__);
}
switch ((buf[0] >> 6) & 0x07) {
@@ -823,9 +812,6 @@ static int m88ds3103_get_frontend(struct dvb_frontend *fe)
case 1:
c->inversion = INVERSION_ON;
break;
- default:
- dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n",
- __func__);
}
switch ((buf[2] >> 0) & 0x03) {
@@ -958,7 +944,7 @@ static int m88ds3103_set_tone(struct dvb_frontend *fe,
switch (fe_sec_tone_mode) {
case SEC_TONE_ON:
tone = 0;
- reg_a1_mask = 0x87;
+ reg_a1_mask = 0x47;
break;
case SEC_TONE_OFF:
tone = 1;
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index b2351466b0da..32cffca14d0b 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -715,6 +715,22 @@ static int m88rs2000_get_frontend(struct dvb_frontend *fe)
return 0;
}
+static int m88rs2000_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *tune)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ if (c->symbol_rate > 3000000)
+ tune->min_delay_ms = 2000;
+ else
+ tune->min_delay_ms = 3000;
+
+ tune->step_size = c->symbol_rate / 16000;
+ tune->max_drift = c->symbol_rate / 2000;
+
+ return 0;
+}
+
static int m88rs2000_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct m88rs2000_state *state = fe->demodulator_priv;
@@ -746,7 +762,7 @@ static struct dvb_frontend_ops m88rs2000_ops = {
.symbol_rate_tolerance = 500, /* ppm */
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
- FE_CAN_QPSK |
+ FE_CAN_QPSK | FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_AUTO
},
@@ -766,6 +782,7 @@ static struct dvb_frontend_ops m88rs2000_ops = {
.set_frontend = m88rs2000_set_frontend,
.get_frontend = m88rs2000_get_frontend,
+ .get_tune_settings = m88rs2000_get_tune_settings,
};
struct dvb_frontend *m88rs2000_attach(const struct m88rs2000_config *config,
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 2c7217fb1415..2f458bb188c7 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -1,7 +1,7 @@
/*
* Fujitu mb86a20s ISDB-T/ISDB-Tsb Module driver
*
- * Copyright (C) 2010-2013 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2010-2013 Mauro Carvalho Chehab
* Copyright (C) 2009-2010 Douglas Landgraf <dougsland@redhat.com>
*
* This program is free software; you can redistribute it and/or
@@ -2156,5 +2156,5 @@ static struct dvb_frontend_ops mb86a20s_ops = {
};
MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/mb86a20s.h b/drivers/media/dvb-frontends/mb86a20s.h
index 6627a3976087..cbeb941fba7c 100644
--- a/drivers/media/dvb-frontends/mb86a20s.h
+++ b/drivers/media/dvb-frontends/mb86a20s.h
@@ -1,7 +1,7 @@
/*
* Fujitsu mb86a20s driver
*
- * Copyright (C) 2010 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2010 Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index ff73da9365e3..fdbed35c87fa 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -24,11 +24,6 @@
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
-
-int rtl2832_debug;
-module_param_named(debug, rtl2832_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
-
#define REG_MASK(b) (BIT(b + 1) - 1)
static const struct rtl2832_reg_entry registers[] = {
@@ -185,12 +180,13 @@ static int rtl2832_wr(struct rtl2832_priv *priv, u8 reg, u8 *val, int len)
buf[0] = reg;
memcpy(&buf[1], val, len);
- ret = i2c_transfer(priv->i2c, msg, 1);
+ ret = i2c_transfer(priv->i2c_adapter, msg, 1);
if (ret == 1) {
ret = 0;
} else {
- dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d reg=%02x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
@@ -214,12 +210,13 @@ static int rtl2832_rd(struct rtl2832_priv *priv, u8 reg, u8 *val, int len)
}
};
- ret = i2c_transfer(priv->i2c, msg, 2);
+ ret = i2c_transfer(priv->i2c_adapter, msg, 2);
if (ret == 2) {
ret = 0;
} else {
- dev_warn(&priv->i2c->dev, "%s: i2c rd failed=%d reg=%02x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c rd failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
@@ -417,7 +414,7 @@ static int rtl2832_set_if(struct dvb_frontend *fe, u32 if_freq)
ret = rtl2832_wr_demod_reg(priv, DVBT_PSET_IFFREQ, pset_iffreq);
- return (ret);
+ return ret;
}
static int rtl2832_init(struct dvb_frontend *fe)
@@ -514,15 +511,10 @@ static int rtl2832_init(struct dvb_frontend *fe)
goto err;
}
- if (!fe->ops.tuner_ops.get_if_frequency) {
- ret = rtl2832_set_if(fe, priv->cfg.if_dvbt);
- if (ret)
- goto err;
- }
-
/*
* r820t NIM code does a software reset here at the demod -
- * may not be needed, as there's already a software reset at set_params()
+ * may not be needed, as there's already a software reset at
+ * set_params()
*/
#if 1
/* soft reset */
@@ -599,9 +591,9 @@ static int rtl2832_set_frontend(struct dvb_frontend *fe)
};
- dev_dbg(&priv->i2c->dev, "%s: frequency=%d bandwidth_hz=%d " \
- "inversion=%d\n", __func__, c->frequency,
- c->bandwidth_hz, c->inversion);
+ dev_dbg(&priv->i2c->dev,
+ "%s: frequency=%d bandwidth_hz=%d inversion=%d\n",
+ __func__, c->frequency, c->bandwidth_hz, c->inversion);
/* program tuner */
if (fe->ops.tuner_ops.set_params)
@@ -899,9 +891,149 @@ static void rtl2832_release(struct dvb_frontend *fe)
struct rtl2832_priv *priv = fe->demodulator_priv;
dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+ cancel_delayed_work_sync(&priv->i2c_gate_work);
+ i2c_del_mux_adapter(priv->i2c_adapter_tuner);
+ i2c_del_mux_adapter(priv->i2c_adapter);
kfree(priv);
}
+/*
+ * Delay mechanism to avoid unneeded I2C gate open / close. Gate close is
+ * delayed here a little bit in order to see if there is sequence of I2C
+ * messages sent to same I2C bus.
+ * We must use unlocked version of __i2c_transfer() in order to avoid deadlock
+ * as lock is already taken by calling muxed i2c_transfer().
+ */
+static void rtl2832_i2c_gate_work(struct work_struct *work)
+{
+ struct rtl2832_priv *priv = container_of(work,
+ struct rtl2832_priv, i2c_gate_work.work);
+ struct i2c_adapter *adap = priv->i2c;
+ int ret;
+ u8 buf[2];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg.i2c_addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ /* select reg bank 1 */
+ buf[0] = 0x00;
+ buf[1] = 0x01;
+ ret = __i2c_transfer(adap, msg, 1);
+ if (ret != 1)
+ goto err;
+
+ priv->page = 1;
+
+ /* close I2C repeater gate */
+ buf[0] = 0x01;
+ buf[1] = 0x10;
+ ret = __i2c_transfer(adap, msg, 1);
+ if (ret != 1)
+ goto err;
+
+ priv->i2c_gate_state = 0;
+
+ return;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+
+ return;
+}
+
+static int rtl2832_select(struct i2c_adapter *adap, void *mux_priv, u32 chan_id)
+{
+ struct rtl2832_priv *priv = mux_priv;
+ int ret;
+ u8 buf[2], val;
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg.i2c_addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+ struct i2c_msg msg_rd[2] = {
+ {
+ .addr = priv->cfg.i2c_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = "\x01",
+ }, {
+ .addr = priv->cfg.i2c_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = &val,
+ }
+ };
+
+ /* terminate possible gate closing */
+ cancel_delayed_work_sync(&priv->i2c_gate_work);
+
+ if (priv->i2c_gate_state == chan_id)
+ return 0;
+
+ /* select reg bank 1 */
+ buf[0] = 0x00;
+ buf[1] = 0x01;
+ ret = __i2c_transfer(adap, msg, 1);
+ if (ret != 1)
+ goto err;
+
+ priv->page = 1;
+
+ /* we must read that register, otherwise there will be errors */
+ ret = __i2c_transfer(adap, msg_rd, 2);
+ if (ret != 2)
+ goto err;
+
+ /* open or close I2C repeater gate */
+ buf[0] = 0x01;
+ if (chan_id == 1)
+ buf[1] = 0x18; /* open */
+ else
+ buf[1] = 0x10; /* close */
+
+ ret = __i2c_transfer(adap, msg, 1);
+ if (ret != 1)
+ goto err;
+
+ priv->i2c_gate_state = chan_id;
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+
+ return -EREMOTEIO;
+}
+
+static int rtl2832_deselect(struct i2c_adapter *adap, void *mux_priv,
+ u32 chan_id)
+{
+ struct rtl2832_priv *priv = mux_priv;
+ schedule_delayed_work(&priv->i2c_gate_work, usecs_to_jiffies(100));
+ return 0;
+}
+
+struct i2c_adapter *rtl2832_get_i2c_adapter(struct dvb_frontend *fe)
+{
+ struct rtl2832_priv *priv = fe->demodulator_priv;
+ return priv->i2c_adapter_tuner;
+}
+EXPORT_SYMBOL(rtl2832_get_i2c_adapter);
+
+struct i2c_adapter *rtl2832_get_private_i2c_adapter(struct dvb_frontend *fe)
+{
+ struct rtl2832_priv *priv = fe->demodulator_priv;
+ return priv->i2c_adapter;
+}
+EXPORT_SYMBOL(rtl2832_get_private_i2c_adapter);
+
struct dvb_frontend *rtl2832_attach(const struct rtl2832_config *cfg,
struct i2c_adapter *i2c)
{
@@ -920,12 +1052,25 @@ struct dvb_frontend *rtl2832_attach(const struct rtl2832_config *cfg,
priv->i2c = i2c;
priv->tuner = cfg->tuner;
memcpy(&priv->cfg, cfg, sizeof(struct rtl2832_config));
+ INIT_DELAYED_WORK(&priv->i2c_gate_work, rtl2832_i2c_gate_work);
+
+ /* create muxed i2c adapter for demod itself */
+ priv->i2c_adapter = i2c_add_mux_adapter(i2c, &i2c->dev, priv, 0, 0, 0,
+ rtl2832_select, NULL);
+ if (priv->i2c_adapter == NULL)
+ goto err;
/* check if the demod is there */
ret = rtl2832_rd_reg(priv, 0x00, 0x0, &tmp);
if (ret)
goto err;
+ /* create muxed i2c adapter for demod tuner bus */
+ priv->i2c_adapter_tuner = i2c_add_mux_adapter(i2c, &i2c->dev, priv,
+ 0, 1, 0, rtl2832_select, rtl2832_deselect);
+ if (priv->i2c_adapter_tuner == NULL)
+ goto err;
+
/* create dvb_frontend */
memcpy(&priv->fe.ops, &rtl2832_ops, sizeof(struct dvb_frontend_ops));
priv->fe.demodulator_priv = priv;
@@ -936,6 +1081,8 @@ struct dvb_frontend *rtl2832_attach(const struct rtl2832_config *cfg,
return &priv->fe;
err:
dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
+ if (priv && priv->i2c_adapter)
+ i2c_del_mux_adapter(priv->i2c_adapter);
kfree(priv);
return NULL;
}
diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h
index 2cfbb6a97061..cb3b6b0775b8 100644
--- a/drivers/media/dvb-frontends/rtl2832.h
+++ b/drivers/media/dvb-frontends/rtl2832.h
@@ -38,13 +38,6 @@ struct rtl2832_config {
u32 xtal;
/*
- * IFs for all used modes.
- * Hz
- * 4570000, 4571429, 36000000, 36125000, 36166667, 44000000
- */
- u32 if_dvbt;
-
- /*
* tuner
* XXX: This must be keep sync with dvb_usb_rtl28xxu demod driver.
*/
@@ -58,11 +51,21 @@ struct rtl2832_config {
};
#if IS_ENABLED(CONFIG_DVB_RTL2832)
-extern struct dvb_frontend *rtl2832_attach(
+struct dvb_frontend *rtl2832_attach(
const struct rtl2832_config *cfg,
struct i2c_adapter *i2c
);
+
+extern struct i2c_adapter *rtl2832_get_i2c_adapter(
+ struct dvb_frontend *fe
+);
+
+extern struct i2c_adapter *rtl2832_get_private_i2c_adapter(
+ struct dvb_frontend *fe
+);
+
#else
+
static inline struct dvb_frontend *rtl2832_attach(
const struct rtl2832_config *config,
struct i2c_adapter *i2c
@@ -71,6 +74,21 @@ static inline struct dvb_frontend *rtl2832_attach(
pr_warn("%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
+
+static inline struct i2c_adapter *rtl2832_get_i2c_adapter(
+ struct dvb_frontend *fe
+)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *rtl2832_get_private_i2c_adapter(
+ struct dvb_frontend *fe
+)
+{
+ return NULL;
+}
+
#endif
diff --git a/drivers/media/dvb-frontends/rtl2832_priv.h b/drivers/media/dvb-frontends/rtl2832_priv.h
index b5f2b80092ee..ae469f032fe6 100644
--- a/drivers/media/dvb-frontends/rtl2832_priv.h
+++ b/drivers/media/dvb-frontends/rtl2832_priv.h
@@ -23,9 +23,12 @@
#include "dvb_frontend.h"
#include "rtl2832.h"
+#include <linux/i2c-mux.h>
struct rtl2832_priv {
struct i2c_adapter *i2c;
+ struct i2c_adapter *i2c_adapter;
+ struct i2c_adapter *i2c_adapter_tuner;
struct dvb_frontend fe;
struct rtl2832_config cfg;
@@ -34,6 +37,7 @@ struct rtl2832_priv {
u8 tuner;
u8 page; /* active register page */
+ struct delayed_work i2c_gate_work;
};
struct rtl2832_reg_entry {
@@ -267,7 +271,7 @@ static const struct rtl2832_reg_value rtl2832_tuner_init_tua9001[] = {
{DVBT_OPT_ADC_IQ, 0x1},
{DVBT_AD_AVI, 0x0},
{DVBT_AD_AVQ, 0x0},
- {DVBT_SPEC_INV, 0x0},
+ {DVBT_SPEC_INV, 0x0},
};
static const struct rtl2832_reg_value rtl2832_tuner_init_fc0012[] = {
@@ -301,7 +305,7 @@ static const struct rtl2832_reg_value rtl2832_tuner_init_fc0012[] = {
{DVBT_GI_PGA_STATE, 0x0},
{DVBT_EN_AGC_PGA, 0x1},
{DVBT_IF_AGC_MAN, 0x0},
- {DVBT_SPEC_INV, 0x0},
+ {DVBT_SPEC_INV, 0x0},
};
static const struct rtl2832_reg_value rtl2832_tuner_init_e4000[] = {
@@ -339,32 +343,32 @@ static const struct rtl2832_reg_value rtl2832_tuner_init_e4000[] = {
{DVBT_REG_MONSEL, 0x1},
{DVBT_REG_MON, 0x1},
{DVBT_REG_4MSEL, 0x0},
- {DVBT_SPEC_INV, 0x0},
+ {DVBT_SPEC_INV, 0x0},
};
static const struct rtl2832_reg_value rtl2832_tuner_init_r820t[] = {
- {DVBT_DAGC_TRG_VAL, 0x39},
- {DVBT_AGC_TARG_VAL_0, 0x0},
- {DVBT_AGC_TARG_VAL_8_1, 0x40},
- {DVBT_AAGC_LOOP_GAIN, 0x16},
- {DVBT_LOOP_GAIN2_3_0, 0x8},
- {DVBT_LOOP_GAIN2_4, 0x1},
- {DVBT_LOOP_GAIN3, 0x18},
- {DVBT_VTOP1, 0x35},
- {DVBT_VTOP2, 0x21},
- {DVBT_VTOP3, 0x21},
- {DVBT_KRF1, 0x0},
- {DVBT_KRF2, 0x40},
- {DVBT_KRF3, 0x10},
- {DVBT_KRF4, 0x10},
- {DVBT_IF_AGC_MIN, 0x80},
- {DVBT_IF_AGC_MAX, 0x7f},
- {DVBT_RF_AGC_MIN, 0x80},
- {DVBT_RF_AGC_MAX, 0x7f},
- {DVBT_POLAR_RF_AGC, 0x0},
- {DVBT_POLAR_IF_AGC, 0x0},
- {DVBT_AD7_SETTING, 0xe9f4},
- {DVBT_SPEC_INV, 0x1},
+ {DVBT_DAGC_TRG_VAL, 0x39},
+ {DVBT_AGC_TARG_VAL_0, 0x0},
+ {DVBT_AGC_TARG_VAL_8_1, 0x40},
+ {DVBT_AAGC_LOOP_GAIN, 0x16},
+ {DVBT_LOOP_GAIN2_3_0, 0x8},
+ {DVBT_LOOP_GAIN2_4, 0x1},
+ {DVBT_LOOP_GAIN3, 0x18},
+ {DVBT_VTOP1, 0x35},
+ {DVBT_VTOP2, 0x21},
+ {DVBT_VTOP3, 0x21},
+ {DVBT_KRF1, 0x0},
+ {DVBT_KRF2, 0x40},
+ {DVBT_KRF3, 0x10},
+ {DVBT_KRF4, 0x10},
+ {DVBT_IF_AGC_MIN, 0x80},
+ {DVBT_IF_AGC_MAX, 0x7f},
+ {DVBT_RF_AGC_MIN, 0x80},
+ {DVBT_RF_AGC_MAX, 0x7f},
+ {DVBT_POLAR_RF_AGC, 0x0},
+ {DVBT_POLAR_IF_AGC, 0x0},
+ {DVBT_AD7_SETTING, 0xe9f4},
+ {DVBT_SPEC_INV, 0x1},
};
#endif /* RTL2832_PRIV_H */
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index a271ac3eaec0..69862e1fd9e9 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -2,7 +2,7 @@
* Sharp VA3A5JZ921 One Seg Broadcast Module driver
* This device is labeled as just S. 921 at the top of the frontend can
*
- * Copyright (C) 2009-2010 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2009-2010 Mauro Carvalho Chehab
* Copyright (C) 2009-2010 Douglas Landgraf <dougsland@redhat.com>
*
* Developed for Leadership SBTVD 1seg device sold in Brazil
@@ -539,6 +539,6 @@ static struct dvb_frontend_ops s921_ops = {
};
MODULE_DESCRIPTION("DVB Frontend module for Sharp S921 hardware");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Douglas Landgraf <dougsland@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/s921.h b/drivers/media/dvb-frontends/s921.h
index 8d5e2a6e187c..9b20c9e0eb88 100644
--- a/drivers/media/dvb-frontends/s921.h
+++ b/drivers/media/dvb-frontends/s921.h
@@ -1,7 +1,7 @@
/*
* Sharp s921 driver
*
- * Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2009 Mauro Carvalho Chehab
* Copyright (C) 2009 Douglas Landgraf <dougsland@redhat.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index cea175d19890..4ef8a5c7003e 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -193,7 +193,7 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st
.len = len + 1
};
- if (1 + len > sizeof(buf)) {
+ if (1 + len > sizeof(cmdbuf)) {
printk(KERN_WARNING
"%s: i2c wr: len=%d is too big!\n",
KBUILD_MODNAME, len);
diff --git a/drivers/media/dvb-frontends/stv0900_sw.c b/drivers/media/dvb-frontends/stv0900_sw.c
index 0a40edfad739..4ce1d260b3eb 100644
--- a/drivers/media/dvb-frontends/stv0900_sw.c
+++ b/drivers/media/dvb-frontends/stv0900_sw.c
@@ -1081,7 +1081,7 @@ static int stv0900_wait_for_lock(struct stv0900_internal *intp,
lock = stv0900_get_demod_lock(intp, demod, dmd_timeout);
if (lock)
- lock = lock && stv0900_get_fec_lock(intp, demod, fec_timeout);
+ lock = stv0900_get_fec_lock(intp, demod, fec_timeout);
if (lock) {
lock = 0;
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 8ad3a57cf640..522fe00f5eee 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -42,8 +42,8 @@ static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
if (1 + len > sizeof(buf)) {
dev_warn(&priv->i2c->dev,
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
return -EINVAL;
}
@@ -54,8 +54,9 @@ static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
if (ret == 1) {
ret = 0;
} else {
- dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d reg=%02x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
@@ -83,8 +84,8 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
if (len > sizeof(buf)) {
dev_warn(&priv->i2c->dev,
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
return -EINVAL;
}
@@ -93,8 +94,9 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
memcpy(val, buf, len);
ret = 0;
} else {
- dev_warn(&priv->i2c->dev, "%s: i2c rd failed=%d reg=%02x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c rd failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
@@ -491,10 +493,9 @@ static int tda10071_read_status(struct dvb_frontend *fe, fe_status_t *status)
if (ret)
goto error;
- if (tmp & 0x01) /* tuner PLL */
- *status |= FE_HAS_SIGNAL;
+ /* 0x39[0] tuner PLL */
if (tmp & 0x02) /* demod PLL */
- *status |= FE_HAS_CARRIER;
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
if (tmp & 0x04) /* viterbi or LDPC*/
*status |= FE_HAS_VITERBI;
if (tmp & 0x08) /* RS or BCH */
@@ -668,11 +669,11 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
int ret, i;
u8 mode, rolloff, pilot, inversion, div;
- dev_dbg(&priv->i2c->dev, "%s: delivery_system=%d modulation=%d " \
- "frequency=%d symbol_rate=%d inversion=%d pilot=%d " \
- "rolloff=%d\n", __func__, c->delivery_system, c->modulation,
- c->frequency, c->symbol_rate, c->inversion, c->pilot,
- c->rolloff);
+ dev_dbg(&priv->i2c->dev,
+ "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
+ __func__, c->delivery_system, c->modulation,
+ c->frequency, c->symbol_rate, c->inversion, c->pilot,
+ c->rolloff);
priv->delivery_system = SYS_UNDEFINED;
@@ -952,10 +953,8 @@ static int tda10071_init(struct dvb_frontend *fe)
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
if (ret) {
- dev_err(&priv->i2c->dev, "%s: did not find the " \
- "firmware file. (%s) Please see " \
- "linux/Documentation/dvb/ for more " \
- "details on firmware-problems. (%d)\n",
+ dev_err(&priv->i2c->dev,
+ "%s: did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)\n",
KBUILD_MODNAME, fw_file, ret);
goto error;
}
@@ -985,11 +984,12 @@ static int tda10071_init(struct dvb_frontend *fe)
if (ret)
goto error_release_firmware;
- dev_info(&priv->i2c->dev, "%s: found a '%s' in cold state, " \
- "will try to load a firmware\n", KBUILD_MODNAME,
- tda10071_ops.info.name);
- dev_info(&priv->i2c->dev, "%s: downloading firmware from " \
- "file '%s'\n", KBUILD_MODNAME, fw_file);
+ dev_info(&priv->i2c->dev,
+ "%s: found a '%s' in cold state, will try to load a firmware\n",
+ KBUILD_MODNAME, tda10071_ops.info.name);
+ dev_info(&priv->i2c->dev,
+ "%s: downloading firmware from file '%s'\n",
+ KBUILD_MODNAME, fw_file);
/* do not download last byte */
fw_size = fw->size - 1;
@@ -1003,11 +1003,10 @@ static int tda10071_init(struct dvb_frontend *fe)
ret = tda10071_wr_regs(priv, 0xfa,
(u8 *) &fw->data[fw_size - remaining], len);
if (ret) {
- dev_err(&priv->i2c->dev, "%s: firmware " \
- "download failed=%d\n",
+ dev_err(&priv->i2c->dev,
+ "%s: firmware download failed=%d\n",
KBUILD_MODNAME, ret);
- if (ret)
- goto error_release_firmware;
+ goto error_release_firmware;
}
}
release_firmware(fw);
@@ -1069,12 +1068,17 @@ static int tda10071_init(struct dvb_frontend *fe)
if (ret)
goto error;
+ if (priv->cfg.tuner_i2c_addr)
+ tmp = priv->cfg.tuner_i2c_addr;
+ else
+ tmp = 0x14;
+
cmd.args[0] = CMD_TUNER_INIT;
cmd.args[1] = 0x00;
cmd.args[2] = 0x00;
cmd.args[3] = 0x00;
cmd.args[4] = 0x00;
- cmd.args[5] = (priv->cfg.tuner_i2c_addr) ? priv->cfg.tuner_i2c_addr : 0x14;
+ cmd.args[5] = tmp;
cmd.args[6] = 0x00;
cmd.args[7] = 0x03;
cmd.args[8] = 0x02;
@@ -1214,14 +1218,14 @@ struct dvb_frontend *tda10071_attach(const struct tda10071_config *config,
/* make sure demod i2c address is specified */
if (!config->demod_i2c_addr) {
- dev_dbg(&i2c->dev, "%s: invalid demod i2c address!\n", __func__);
+ dev_dbg(&i2c->dev, "%s: invalid demod i2c address\n", __func__);
ret = -EINVAL;
goto error;
}
/* make sure tuner i2c address is specified */
if (!config->tuner_i2c_addr) {
- dev_dbg(&i2c->dev, "%s: invalid tuner i2c address!\n", __func__);
+ dev_dbg(&i2c->dev, "%s: invalid tuner i2c address\n", __func__);
ret = -EINVAL;
goto error;
}
diff --git a/drivers/media/dvb-frontends/tda10071.h b/drivers/media/dvb-frontends/tda10071.h
index f9542f68fe78..331b5a819383 100644
--- a/drivers/media/dvb-frontends/tda10071.h
+++ b/drivers/media/dvb-frontends/tda10071.h
@@ -79,7 +79,7 @@ extern struct dvb_frontend *tda10071_attach(
static inline struct dvb_frontend *tda10071_attach(
const struct tda10071_config *config, struct i2c_adapter *i2c)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
#endif
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 4aa9c5311cc5..441053be7f55 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -196,7 +196,7 @@ config VIDEO_ADV7183
config VIDEO_ADV7604
tristate "Analog Devices ADV7604 decoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER
---help---
Support for the Analog Devices ADV7604 video decoder.
@@ -208,7 +208,7 @@ config VIDEO_ADV7604
config VIDEO_ADV7842
tristate "Analog Devices ADV7842 decoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER
---help---
Support for the Analog Devices ADV7842 video decoder.
@@ -431,7 +431,7 @@ config VIDEO_ADV7393
config VIDEO_ADV7511
tristate "Analog Devices ADV7511 encoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER
---help---
Support for the Analog Devices ADV7511 video encoder.
@@ -579,6 +579,14 @@ config VIDEO_S5K6AA
This is a V4L2 sensor-level driver for Samsung S5K6AA(FX) 1.3M
camera sensor with an embedded SoC image signal processor.
+config VIDEO_S5K6A3
+ tristate "Samsung S5K6A3 sensor support"
+ depends on MEDIA_CAMERA_SUPPORT
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a V4L2 sensor-level driver for Samsung S5K6A3 raw
+ camera sensor.
+
config VIDEO_S5K4ECGX
tristate "Samsung S5K4ECGX sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
@@ -629,6 +637,15 @@ config VIDEO_LM3560
This is a driver for the lm3560 dual flash controllers. It controls
flash, torch LEDs.
+config VIDEO_LM3646
+ tristate "LM3646 dual flash driver support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on MEDIA_CAMERA_SUPPORT
+ select REGMAP_I2C
+ ---help---
+ This is a driver for the lm3646 dual flash controllers. It controls
+ flash, torch LEDs.
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -659,6 +676,7 @@ comment "Audio/Video compression chips"
config VIDEO_SAA6752HS
tristate "Philips SAA6752HS MPEG-2 Audio/Video Encoder"
depends on VIDEO_V4L2 && I2C
+ select CRC32
---help---
Support for the Philips SAA6752HS MPEG-2 video and MPEG-audio/AC-3
audio encoder with multiplexer.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 48888ae876fb..01ae9328e582 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -66,12 +66,14 @@ obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
+obj-$(CONFIG_VIDEO_S5K6A3) += s5k6a3.o
obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o
obj-$(CONFIG_VIDEO_S5K5BAF) += s5k5baf.o
obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3/
obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_VIDEO_AS3645A) += as3645a.o
obj-$(CONFIG_VIDEO_LM3560) += lm3560.o
+obj-$(CONFIG_VIDEO_LM3646) += lm3646.o
obj-$(CONFIG_VIDEO_SMIAPP_PLL) += smiapp-pll.o
obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 83225d6a0dd9..1b7ecfd88673 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -573,7 +573,7 @@ static const struct v4l2_subdev_core_ops ad9389b_core_ops = {
/* ------------------------------ PAD OPS ------------------------------ */
-static int ad9389b_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+static int ad9389b_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
{
struct ad9389b_state *state = get_ad9389b_state(sd);
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index d7d99f1c69e4..5e638b159452 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -123,11 +123,11 @@
struct adv7180_state {
struct v4l2_ctrl_handler ctrl_hdl;
struct v4l2_subdev sd;
- struct work_struct work;
struct mutex mutex; /* mutual excl. when accessing chip */
int irq;
v4l2_std_id curr_norm;
bool autodetect;
+ bool powered;
u8 input;
};
#define to_adv7180_sd(_ctrl) (&container_of(_ctrl->handler, \
@@ -312,6 +312,37 @@ out:
return ret;
}
+static int adv7180_set_power(struct adv7180_state *state,
+ struct i2c_client *client, bool on)
+{
+ u8 val;
+
+ if (on)
+ val = ADV7180_PWR_MAN_ON;
+ else
+ val = ADV7180_PWR_MAN_OFF;
+
+ return i2c_smbus_write_byte_data(client, ADV7180_PWR_MAN_REG, val);
+}
+
+static int adv7180_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct adv7180_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ ret = mutex_lock_interruptible(&state->mutex);
+ if (ret)
+ return ret;
+
+ ret = adv7180_set_power(state, client, on);
+ if (ret == 0)
+ state->powered = on;
+
+ mutex_unlock(&state->mutex);
+ return ret;
+}
+
static int adv7180_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_adv7180_sd(ctrl);
@@ -442,6 +473,7 @@ static const struct v4l2_subdev_video_ops adv7180_video_ops = {
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
.s_std = adv7180_s_std,
+ .s_power = adv7180_s_power,
};
static const struct v4l2_subdev_ops adv7180_ops = {
@@ -449,10 +481,9 @@ static const struct v4l2_subdev_ops adv7180_ops = {
.video = &adv7180_video_ops,
};
-static void adv7180_work(struct work_struct *work)
+static irqreturn_t adv7180_irq(int irq, void *devid)
{
- struct adv7180_state *state = container_of(work, struct adv7180_state,
- work);
+ struct adv7180_state *state = devid;
struct i2c_client *client = v4l2_get_subdevdata(&state->sd);
u8 isr3;
@@ -468,17 +499,6 @@ static void adv7180_work(struct work_struct *work)
__adv7180_status(client, NULL, &state->curr_norm);
mutex_unlock(&state->mutex);
- enable_irq(state->irq);
-}
-
-static irqreturn_t adv7180_irq(int irq, void *devid)
-{
- struct adv7180_state *state = devid;
-
- schedule_work(&state->work);
-
- disable_irq_nosync(state->irq);
-
return IRQ_HANDLED;
}
@@ -533,48 +553,52 @@ static int init_device(struct i2c_client *client, struct adv7180_state *state)
/* register for interrupts */
if (state->irq > 0) {
- ret = request_irq(state->irq, adv7180_irq, 0, KBUILD_MODNAME,
- state);
+ ret = request_threaded_irq(state->irq, NULL, adv7180_irq,
+ IRQF_ONESHOT, KBUILD_MODNAME, state);
if (ret)
return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
ADV7180_ADI_CTRL_IRQ_SPACE);
if (ret < 0)
- return ret;
+ goto err;
/* config the Interrupt pin to be active low */
ret = i2c_smbus_write_byte_data(client, ADV7180_ICONF1_ADI,
ADV7180_ICONF1_ACTIVE_LOW |
ADV7180_ICONF1_PSYNC_ONLY);
if (ret < 0)
- return ret;
+ goto err;
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR1_ADI, 0);
if (ret < 0)
- return ret;
+ goto err;
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR2_ADI, 0);
if (ret < 0)
- return ret;
+ goto err;
/* enable AD change interrupts interrupts */
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR3_ADI,
ADV7180_IRQ3_AD_CHANGE);
if (ret < 0)
- return ret;
+ goto err;
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR4_ADI, 0);
if (ret < 0)
- return ret;
+ goto err;
ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
0);
if (ret < 0)
- return ret;
+ goto err;
}
return 0;
+
+err:
+ free_irq(state->irq, state);
+ return ret;
}
static int adv7180_probe(struct i2c_client *client,
@@ -598,9 +622,9 @@ static int adv7180_probe(struct i2c_client *client,
}
state->irq = client->irq;
- INIT_WORK(&state->work, adv7180_work);
mutex_init(&state->mutex);
state->autodetect = true;
+ state->powered = true;
state->input = 0;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
@@ -611,15 +635,21 @@ static int adv7180_probe(struct i2c_client *client,
ret = init_device(client, state);
if (ret)
goto err_free_ctrl;
+
+ ret = v4l2_async_register_subdev(sd);
+ if (ret)
+ goto err_free_irq;
+
return 0;
+err_free_irq:
+ if (state->irq > 0)
+ free_irq(client->irq, state);
err_free_ctrl:
adv7180_exit_controls(state);
err_unreg_subdev:
mutex_destroy(&state->mutex);
- v4l2_device_unregister_subdev(sd);
err:
- printk(KERN_ERR KBUILD_MODNAME ": Failed to probe: %d\n", ret);
return ret;
}
@@ -628,20 +658,14 @@ static int adv7180_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7180_state *state = to_state(sd);
- if (state->irq > 0) {
+ v4l2_async_unregister_subdev(sd);
+
+ if (state->irq > 0)
free_irq(client->irq, state);
- if (cancel_work_sync(&state->work)) {
- /*
- * Work was pending, therefore we need to enable
- * IRQ here to balance the disable_irq() done in the
- * interrupt handler.
- */
- enable_irq(state->irq);
- }
- }
- mutex_destroy(&state->mutex);
v4l2_device_unregister_subdev(sd);
+ adv7180_exit_controls(state);
+ mutex_destroy(&state->mutex);
return 0;
}
@@ -654,13 +678,10 @@ static const struct i2c_device_id adv7180_id[] = {
static int adv7180_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- int ret;
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct adv7180_state *state = to_state(sd);
- ret = i2c_smbus_write_byte_data(client, ADV7180_PWR_MAN_REG,
- ADV7180_PWR_MAN_OFF);
- if (ret < 0)
- return ret;
- return 0;
+ return adv7180_set_power(state, client, false);
}
static int adv7180_resume(struct device *dev)
@@ -670,10 +691,11 @@ static int adv7180_resume(struct device *dev)
struct adv7180_state *state = to_state(sd);
int ret;
- ret = i2c_smbus_write_byte_data(client, ADV7180_PWR_MAN_REG,
- ADV7180_PWR_MAN_ON);
- if (ret < 0)
- return ret;
+ if (state->powered) {
+ ret = adv7180_set_power(state, client, true);
+ if (ret)
+ return ret;
+ }
ret = init_device(client, state);
if (ret < 0)
return ret;
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index d4e15a617c3b..9d38f7b36cd1 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -26,12 +26,12 @@
#include <linux/videodev2.h>
#include <linux/uaccess.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <media/adv7343.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-of.h>
#include "adv7343_regs.h"
@@ -410,7 +410,7 @@ adv7343_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- np = v4l2_of_get_next_endpoint(client->dev.of_node, NULL);
+ np = of_graph_get_next_endpoint(client->dev.of_node, NULL);
if (!np)
return NULL;
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index ee618942cb8e..942ca4b99297 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -597,7 +597,7 @@ static int adv7511_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
return 0;
}
-static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
{
struct adv7511_state *state = get_adv7511_state(sd);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 71c8570bd9ea..98cc5407f1b1 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1658,7 +1658,7 @@ static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
return 0;
}
-static int adv7604_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+static int adv7604_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
{
struct adv7604_state *state = to_state(sd);
u8 *data = NULL;
@@ -1728,7 +1728,7 @@ static int get_edid_spa_location(const u8 *edid)
return -1;
}
-static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
{
struct adv7604_state *state = to_state(sd);
int spa_loc;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 9bbd6656fb8f..636ac08925f6 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -546,6 +546,14 @@ static void main_reset(struct v4l2_subdev *sd)
/* ----------------------------------------------------------------------- */
+static inline bool is_analog_input(struct v4l2_subdev *sd)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return ((state->mode == ADV7842_MODE_RGB) ||
+ (state->mode == ADV7842_MODE_COMP));
+}
+
static inline bool is_digital_input(struct v4l2_subdev *sd)
{
struct adv7842_state *state = to_state(sd);
@@ -1027,12 +1035,72 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
cp_write(sd, 0xac, (height & 0x0f) << 4);
}
+static void adv7842_set_offset(struct v4l2_subdev *sd, bool auto_offset, u16 offset_a, u16 offset_b, u16 offset_c)
+{
+ struct adv7842_state *state = to_state(sd);
+ u8 offset_buf[4];
+
+ if (auto_offset) {
+ offset_a = 0x3ff;
+ offset_b = 0x3ff;
+ offset_c = 0x3ff;
+ }
+
+ v4l2_dbg(2, debug, sd, "%s: %s offset: a = 0x%x, b = 0x%x, c = 0x%x\n",
+ __func__, auto_offset ? "Auto" : "Manual",
+ offset_a, offset_b, offset_c);
+
+ offset_buf[0]= (cp_read(sd, 0x77) & 0xc0) | ((offset_a & 0x3f0) >> 4);
+ offset_buf[1] = ((offset_a & 0x00f) << 4) | ((offset_b & 0x3c0) >> 6);
+ offset_buf[2] = ((offset_b & 0x03f) << 2) | ((offset_c & 0x300) >> 8);
+ offset_buf[3] = offset_c & 0x0ff;
+
+ /* Registers must be written in this order with no i2c access in between */
+ if (adv_smbus_write_i2c_block_data(state->i2c_cp, 0x77, 4, offset_buf))
+ v4l2_err(sd, "%s: i2c error writing to CP reg 0x77, 0x78, 0x79, 0x7a\n", __func__);
+}
+
+static void adv7842_set_gain(struct v4l2_subdev *sd, bool auto_gain, u16 gain_a, u16 gain_b, u16 gain_c)
+{
+ struct adv7842_state *state = to_state(sd);
+ u8 gain_buf[4];
+ u8 gain_man = 1;
+ u8 agc_mode_man = 1;
+
+ if (auto_gain) {
+ gain_man = 0;
+ agc_mode_man = 0;
+ gain_a = 0x100;
+ gain_b = 0x100;
+ gain_c = 0x100;
+ }
+
+ v4l2_dbg(2, debug, sd, "%s: %s gain: a = 0x%x, b = 0x%x, c = 0x%x\n",
+ __func__, auto_gain ? "Auto" : "Manual",
+ gain_a, gain_b, gain_c);
+
+ gain_buf[0] = ((gain_man << 7) | (agc_mode_man << 6) | ((gain_a & 0x3f0) >> 4));
+ gain_buf[1] = (((gain_a & 0x00f) << 4) | ((gain_b & 0x3c0) >> 6));
+ gain_buf[2] = (((gain_b & 0x03f) << 2) | ((gain_c & 0x300) >> 8));
+ gain_buf[3] = ((gain_c & 0x0ff));
+
+ /* Registers must be written in this order with no i2c access in between */
+ if (adv_smbus_write_i2c_block_data(state->i2c_cp, 0x73, 4, gain_buf))
+ v4l2_err(sd, "%s: i2c error writing to CP reg 0x73, 0x74, 0x75, 0x76\n", __func__);
+}
+
static void set_rgb_quantization_range(struct v4l2_subdev *sd)
{
struct adv7842_state *state = to_state(sd);
+ bool rgb_output = io_read(sd, 0x02) & 0x02;
+ bool hdmi_signal = hdmi_read(sd, 0x05) & 0x80;
+
+ v4l2_dbg(2, debug, sd, "%s: RGB quantization range: %d, RGB out: %d, HDMI: %d\n",
+ __func__, state->rgb_quantization_range,
+ rgb_output, hdmi_signal);
- v4l2_dbg(2, debug, sd, "%s: rgb_quantization_range = %d\n",
- __func__, state->rgb_quantization_range);
+ adv7842_set_gain(sd, true, 0x0, 0x0, 0x0);
+ adv7842_set_offset(sd, true, 0x0, 0x0, 0x0);
switch (state->rgb_quantization_range) {
case V4L2_DV_RGB_RANGE_AUTO:
@@ -1050,7 +1118,7 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
break;
}
- if (hdmi_read(sd, 0x05) & 0x80) {
+ if (hdmi_signal) {
/* Receiving HDMI signal
* Set automode */
io_write_and_or(sd, 0x02, 0x0f, 0xf0);
@@ -1066,24 +1134,45 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
} else {
/* RGB full range (0-255) */
io_write_and_or(sd, 0x02, 0x0f, 0x10);
+
+ if (is_digital_input(sd) && rgb_output) {
+ adv7842_set_offset(sd, false, 0x40, 0x40, 0x40);
+ } else {
+ adv7842_set_gain(sd, false, 0xe0, 0xe0, 0xe0);
+ adv7842_set_offset(sd, false, 0x70, 0x70, 0x70);
+ }
}
break;
case V4L2_DV_RGB_RANGE_LIMITED:
if (state->mode == ADV7842_MODE_COMP) {
/* YCrCb limited range (16-235) */
io_write_and_or(sd, 0x02, 0x0f, 0x20);
- } else {
- /* RGB limited range (16-235) */
- io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ break;
}
+
+ /* RGB limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x00);
+
break;
case V4L2_DV_RGB_RANGE_FULL:
if (state->mode == ADV7842_MODE_COMP) {
/* YCrCb full range (0-255) */
io_write_and_or(sd, 0x02, 0x0f, 0x60);
+ break;
+ }
+
+ /* RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+
+ if (is_analog_input(sd) || hdmi_signal)
+ break;
+
+ /* Adjust gain/offset for DVI-D signals only */
+ if (rgb_output) {
+ adv7842_set_offset(sd, false, 0x40, 0x40, 0x40);
} else {
- /* RGB full range (0-255) */
- io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ adv7842_set_gain(sd, false, 0xe0, 0xe0, 0xe0);
+ adv7842_set_offset(sd, false, 0x70, 0x70, 0x70);
}
break;
}
@@ -1360,12 +1449,11 @@ static int adv7842_query_dv_timings(struct v4l2_subdev *sd,
bt->width = (hdmi_read(sd, 0x07) & 0x0f) * 256 + hdmi_read(sd, 0x08);
bt->height = (hdmi_read(sd, 0x09) & 0x0f) * 256 + hdmi_read(sd, 0x0a);
- freq = (hdmi_read(sd, 0x06) * 1000000) +
- ((hdmi_read(sd, 0x3b) & 0x30) >> 4) * 250000;
-
+ freq = ((hdmi_read(sd, 0x51) << 1) + (hdmi_read(sd, 0x52) >> 7)) * 1000000;
+ freq += ((hdmi_read(sd, 0x52) & 0x7f) * 7813);
if (is_hdmi(sd)) {
/* adjust for deep color mode */
- freq = freq * 8 / (((hdmi_read(sd, 0x0b) & 0xc0) >> 5) + 8);
+ freq = freq * 8 / (((hdmi_read(sd, 0x0b) & 0xc0) >> 6) * 2 + 8);
}
bt->pixelclock = freq;
bt->hfrontporch = (hdmi_read(sd, 0x20) & 0x03) * 256 +
@@ -1717,8 +1805,8 @@ static void select_input(struct v4l2_subdev *sd,
* (rev. 2.5, June 2010)" p. 17. */
afe_write(sd, 0x12, 0xfb); /* ADC noise shaping filter controls */
afe_write(sd, 0x0c, 0x0d); /* CP core gain controls */
- cp_write(sd, 0x3e, 0x80); /* CP core pre-gain control,
- enable color control */
+ cp_write(sd, 0x3e, 0x00); /* CP core pre-gain control */
+
/* CP coast control */
cp_write(sd, 0xc3, 0x33); /* Component mode */
@@ -1926,7 +2014,7 @@ static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
return 0;
}
-static int adv7842_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+static int adv7842_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
{
struct adv7842_state *state = to_state(sd);
u8 *data = NULL;
@@ -1966,7 +2054,7 @@ static int adv7842_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edi
return 0;
}
-static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *e)
+static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *e)
{
struct adv7842_state *state = to_state(sd);
int err = 0;
@@ -2103,7 +2191,8 @@ static void print_avi_infoframe(struct v4l2_subdev *sd)
{
int i;
uint8_t buf[14];
- uint8_t avi_inf_len;
+ u8 avi_len;
+ u8 avi_ver;
struct avi_info_frame avi;
if (!(hdmi_read(sd, 0x05) & 0x80)) {
@@ -2116,18 +2205,20 @@ static void print_avi_infoframe(struct v4l2_subdev *sd)
}
if (io_read(sd, 0x88) & 0x10) {
- /* Note: the ADV7842 calculated incorrect checksums for InfoFrames
- with a length of 14 or 15. See the ADV7842 Register Settings
- Recommendations document for more details. */
- v4l2_info(sd, "AVI infoframe checksum error\n");
- return;
+ v4l2_info(sd, "AVI infoframe checksum error has occurred earlier\n");
+ io_write(sd, 0x8a, 0x10); /* clear AVI_INF_CKS_ERR_RAW */
+ if (io_read(sd, 0x88) & 0x10) {
+ v4l2_info(sd, "AVI infoframe checksum error still present\n");
+ io_write(sd, 0x8a, 0x10); /* clear AVI_INF_CKS_ERR_RAW */
+ }
}
- avi_inf_len = infoframe_read(sd, 0xe2);
+ avi_len = infoframe_read(sd, 0xe2);
+ avi_ver = infoframe_read(sd, 0xe1);
v4l2_info(sd, "AVI infoframe version %d (%d byte)\n",
- infoframe_read(sd, 0xe1), avi_inf_len);
+ avi_ver, avi_len);
- if (infoframe_read(sd, 0xe1) != 0x02)
+ if (avi_ver != 0x02)
return;
for (i = 0; i < 14; i++)
@@ -2602,9 +2693,15 @@ static int adv7842_core_init(struct v4l2_subdev *sd)
/* disable I2C access to internal EDID ram from HDMI DDC ports */
rep_write_and_or(sd, 0x77, 0xf3, 0x00);
- hdmi_write(sd, 0x69, 0xa3); /* HPA manual */
- /* HPA disable on port A and B */
- io_write_and_or(sd, 0x20, 0xcf, 0x00);
+ if (pdata->hpa_auto) {
+ /* HPA auto, HPA 0.5s after Edid set and Cable detect */
+ hdmi_write(sd, 0x69, 0x5c);
+ } else {
+ /* HPA manual */
+ hdmi_write(sd, 0x69, 0xa3);
+ /* HPA disable on port A and B */
+ io_write_and_or(sd, 0x20, 0xcf, 0x00);
+ }
/* LLC */
io_write(sd, 0x19, 0x80 | pdata->llc_dll_phase);
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 99ee456700f4..c8fe1358ec9e 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -431,8 +431,8 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
* Initialize the other fields of rc_dev
*/
rc->map_name = ir->ir_codes;
- rc->allowed_protos = rc_type;
- rc->enabled_protocols = rc_type;
+ rc_set_allowed_protocols(rc, rc_type);
+ rc_set_enabled_protocols(rc, rc_type);
if (!rc->driver_name)
rc->driver_name = MODULE_NAME;
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index d98ca3aebe23..c23de593c17d 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -15,12 +15,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#include <linux/delay.h>
@@ -42,7 +36,7 @@
#define REG_FLAG 0xd0
#define REG_CONFIG1 0xe0
-/* Fault Mask */
+/* fault mask */
#define FAULT_TIMEOUT (1<<0)
#define FAULT_OVERTEMP (1<<1)
#define FAULT_SHORT_CIRCUIT (1<<2)
@@ -53,7 +47,8 @@ enum led_enable {
MODE_FLASH = 0x3,
};
-/* struct lm3560_flash
+/**
+ * struct lm3560_flash
*
* @pdata: platform data
* @regmap: reg. map for i2c
@@ -98,7 +93,7 @@ static int lm3560_mode_ctrl(struct lm3560_flash *flash)
return rval;
}
-/* led1/2 enable/disable */
+/* led1/2 enable/disable */
static int lm3560_enable_ctrl(struct lm3560_flash *flash,
enum lm3560_led_id led_no, bool on)
{
@@ -168,7 +163,7 @@ static int lm3560_flash_brt_ctrl(struct lm3560_flash *flash,
return rval;
}
-/* V4L2 controls */
+/* v4l2 controls */
static int lm3560_get_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
{
struct lm3560_flash *flash = to_lm3560_flash(ctrl, led_no);
@@ -297,6 +292,7 @@ static int lm3560_init_controls(struct lm3560_flash *flash,
const struct v4l2_ctrl_ops *ops = &lm3560_led_ctrl_ops[led_no];
v4l2_ctrl_handler_init(hdl, 8);
+
/* flash mode */
v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_FLASH_LED_MODE,
V4L2_FLASH_LED_MODE_TORCH, ~0x7,
@@ -309,6 +305,7 @@ static int lm3560_init_controls(struct lm3560_flash *flash,
/* flash strobe */
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE, 0, 0, 0, 0);
+
/* flash strobe stop */
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0);
@@ -395,7 +392,7 @@ static int lm3560_init_device(struct lm3560_flash *flash)
rval = lm3560_mode_ctrl(flash);
if (rval < 0)
return rval;
- /* Reset faults */
+ /* reset faults */
rval = regmap_read(flash->regmap, REG_FLAG, &reg_val);
return rval;
}
@@ -419,8 +416,7 @@ static int lm3560_probe(struct i2c_client *client,
/* if there is no platform data, use chip default value */
if (pdata == NULL) {
- pdata =
- kzalloc(sizeof(struct lm3560_platform_data), GFP_KERNEL);
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (pdata == NULL)
return -ENODEV;
pdata->peak = LM3560_PEAK_3600mA;
diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c
new file mode 100644
index 000000000000..626fb4679c02
--- /dev/null
+++ b/drivers/media/i2c/lm3646.c
@@ -0,0 +1,414 @@
+/*
+ * drivers/media/i2c/lm3646.c
+ * General device driver for TI lm3646, Dual FLASH LED Driver
+ *
+ * Copyright (C) 2014 Texas Instruments
+ *
+ * Contact: Daniel Jeong <gshark.jeong@gmail.com>
+ * Ldd-Mlp <ldd-mlp@list.ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+#include <media/lm3646.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+/* registers definitions */
+#define REG_ENABLE 0x01
+#define REG_TORCH_BR 0x05
+#define REG_FLASH_BR 0x05
+#define REG_FLASH_TOUT 0x04
+#define REG_FLAG 0x08
+#define REG_STROBE_SRC 0x06
+#define REG_LED1_FLASH_BR 0x06
+#define REG_LED1_TORCH_BR 0x07
+
+#define MASK_ENABLE 0x03
+#define MASK_TORCH_BR 0x70
+#define MASK_FLASH_BR 0x0F
+#define MASK_FLASH_TOUT 0x07
+#define MASK_FLAG 0xFF
+#define MASK_STROBE_SRC 0x80
+
+/* Fault Mask */
+#define FAULT_TIMEOUT (1<<0)
+#define FAULT_SHORT_CIRCUIT (1<<1)
+#define FAULT_UVLO (1<<2)
+#define FAULT_IVFM (1<<3)
+#define FAULT_OCP (1<<4)
+#define FAULT_OVERTEMP (1<<5)
+#define FAULT_NTC_TRIP (1<<6)
+#define FAULT_OVP (1<<7)
+
+enum led_mode {
+ MODE_SHDN = 0x0,
+ MODE_TORCH = 0x2,
+ MODE_FLASH = 0x3,
+};
+
+/*
+ * struct lm3646_flash
+ *
+ * @pdata: platform data
+ * @regmap: reg. map for i2c
+ * @lock: muxtex for serial access.
+ * @led_mode: V4L2 LED mode
+ * @ctrls_led: V4L2 contols
+ * @subdev_led: V4L2 subdev
+ * @mode_reg : mode register value
+ */
+struct lm3646_flash {
+ struct device *dev;
+ struct lm3646_platform_data *pdata;
+ struct regmap *regmap;
+
+ struct v4l2_ctrl_handler ctrls_led;
+ struct v4l2_subdev subdev_led;
+
+ u8 mode_reg;
+};
+
+#define to_lm3646_flash(_ctrl) \
+ container_of(_ctrl->handler, struct lm3646_flash, ctrls_led)
+
+/* enable mode control */
+static int lm3646_mode_ctrl(struct lm3646_flash *flash,
+ enum v4l2_flash_led_mode led_mode)
+{
+ switch (led_mode) {
+ case V4L2_FLASH_LED_MODE_NONE:
+ return regmap_write(flash->regmap,
+ REG_ENABLE, flash->mode_reg | MODE_SHDN);
+ case V4L2_FLASH_LED_MODE_TORCH:
+ return regmap_write(flash->regmap,
+ REG_ENABLE, flash->mode_reg | MODE_TORCH);
+ case V4L2_FLASH_LED_MODE_FLASH:
+ return regmap_write(flash->regmap,
+ REG_ENABLE, flash->mode_reg | MODE_FLASH);
+ }
+ return -EINVAL;
+}
+
+/* V4L2 controls */
+static int lm3646_get_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct lm3646_flash *flash = to_lm3646_flash(ctrl);
+ unsigned int reg_val;
+ int rval;
+
+ if (ctrl->id != V4L2_CID_FLASH_FAULT)
+ return -EINVAL;
+
+ rval = regmap_read(flash->regmap, REG_FLAG, &reg_val);
+ if (rval < 0)
+ return rval;
+
+ ctrl->val = 0;
+ if (reg_val & FAULT_TIMEOUT)
+ ctrl->val |= V4L2_FLASH_FAULT_TIMEOUT;
+ if (reg_val & FAULT_SHORT_CIRCUIT)
+ ctrl->val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
+ if (reg_val & FAULT_UVLO)
+ ctrl->val |= V4L2_FLASH_FAULT_UNDER_VOLTAGE;
+ if (reg_val & FAULT_IVFM)
+ ctrl->val |= V4L2_FLASH_FAULT_INPUT_VOLTAGE;
+ if (reg_val & FAULT_OCP)
+ ctrl->val |= V4L2_FLASH_FAULT_OVER_CURRENT;
+ if (reg_val & FAULT_OVERTEMP)
+ ctrl->val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
+ if (reg_val & FAULT_NTC_TRIP)
+ ctrl->val |= V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE;
+ if (reg_val & FAULT_OVP)
+ ctrl->val |= V4L2_FLASH_FAULT_OVER_VOLTAGE;
+
+ return 0;
+}
+
+static int lm3646_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct lm3646_flash *flash = to_lm3646_flash(ctrl);
+ unsigned int reg_val;
+ int rval = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FLASH_LED_MODE:
+
+ if (ctrl->val != V4L2_FLASH_LED_MODE_FLASH)
+ return lm3646_mode_ctrl(flash, ctrl->val);
+ /* switch to SHDN mode before flash strobe on */
+ return lm3646_mode_ctrl(flash, V4L2_FLASH_LED_MODE_NONE);
+
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ return regmap_update_bits(flash->regmap,
+ REG_STROBE_SRC, MASK_STROBE_SRC,
+ (ctrl->val) << 7);
+
+ case V4L2_CID_FLASH_STROBE:
+
+ /* read and check current mode of chip to start flash */
+ rval = regmap_read(flash->regmap, REG_ENABLE, &reg_val);
+ if (rval < 0 || ((reg_val & MASK_ENABLE) != MODE_SHDN))
+ return rval;
+ /* flash on */
+ return lm3646_mode_ctrl(flash, V4L2_FLASH_LED_MODE_FLASH);
+
+ case V4L2_CID_FLASH_STROBE_STOP:
+
+ /*
+ * flash mode will be turned automatically
+ * from FLASH mode to SHDN mode after flash duration timeout
+ * read and check current mode of chip to stop flash
+ */
+ rval = regmap_read(flash->regmap, REG_ENABLE, &reg_val);
+ if (rval < 0)
+ return rval;
+ if ((reg_val & MASK_ENABLE) == MODE_FLASH)
+ return lm3646_mode_ctrl(flash,
+ V4L2_FLASH_LED_MODE_NONE);
+ return rval;
+
+ case V4L2_CID_FLASH_TIMEOUT:
+ return regmap_update_bits(flash->regmap,
+ REG_FLASH_TOUT, MASK_FLASH_TOUT,
+ LM3646_FLASH_TOUT_ms_TO_REG
+ (ctrl->val));
+
+ case V4L2_CID_FLASH_INTENSITY:
+ return regmap_update_bits(flash->regmap,
+ REG_FLASH_BR, MASK_FLASH_BR,
+ LM3646_TOTAL_FLASH_BRT_uA_TO_REG
+ (ctrl->val));
+
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ return regmap_update_bits(flash->regmap,
+ REG_TORCH_BR, MASK_TORCH_BR,
+ LM3646_TOTAL_TORCH_BRT_uA_TO_REG
+ (ctrl->val) << 4);
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops lm3646_led_ctrl_ops = {
+ .g_volatile_ctrl = lm3646_get_ctrl,
+ .s_ctrl = lm3646_set_ctrl,
+};
+
+static int lm3646_init_controls(struct lm3646_flash *flash)
+{
+ struct v4l2_ctrl *fault;
+ struct v4l2_ctrl_handler *hdl = &flash->ctrls_led;
+ const struct v4l2_ctrl_ops *ops = &lm3646_led_ctrl_ops;
+
+ v4l2_ctrl_handler_init(hdl, 8);
+ /* flash mode */
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_FLASH_LED_MODE,
+ V4L2_FLASH_LED_MODE_TORCH, ~0x7,
+ V4L2_FLASH_LED_MODE_NONE);
+
+ /* flash source */
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_FLASH_STROBE_SOURCE,
+ 0x1, ~0x3, V4L2_FLASH_STROBE_SOURCE_SOFTWARE);
+
+ /* flash strobe */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE, 0, 0, 0, 0);
+ /* flash strobe stop */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0);
+
+ /* flash strobe timeout */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_TIMEOUT,
+ LM3646_FLASH_TOUT_MIN,
+ LM3646_FLASH_TOUT_MAX,
+ LM3646_FLASH_TOUT_STEP, flash->pdata->flash_timeout);
+
+ /* max flash current */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_INTENSITY,
+ LM3646_TOTAL_FLASH_BRT_MIN,
+ LM3646_TOTAL_FLASH_BRT_MAX,
+ LM3646_TOTAL_FLASH_BRT_STEP,
+ LM3646_TOTAL_FLASH_BRT_MAX);
+
+ /* max torch current */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_TORCH_INTENSITY,
+ LM3646_TOTAL_TORCH_BRT_MIN,
+ LM3646_TOTAL_TORCH_BRT_MAX,
+ LM3646_TOTAL_TORCH_BRT_STEP,
+ LM3646_TOTAL_TORCH_BRT_MAX);
+
+ /* fault */
+ fault = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_FAULT, 0,
+ V4L2_FLASH_FAULT_OVER_VOLTAGE
+ | V4L2_FLASH_FAULT_OVER_TEMPERATURE
+ | V4L2_FLASH_FAULT_SHORT_CIRCUIT
+ | V4L2_FLASH_FAULT_TIMEOUT, 0, 0);
+ if (fault != NULL)
+ fault->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ if (hdl->error)
+ return hdl->error;
+
+ flash->subdev_led.ctrl_handler = hdl;
+ return 0;
+}
+
+/* initialize device */
+static const struct v4l2_subdev_ops lm3646_ops = {
+ .core = NULL,
+};
+
+static const struct regmap_config lm3646_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xFF,
+};
+
+static int lm3646_subdev_init(struct lm3646_flash *flash)
+{
+ struct i2c_client *client = to_i2c_client(flash->dev);
+ int rval;
+
+ v4l2_i2c_subdev_init(&flash->subdev_led, client, &lm3646_ops);
+ flash->subdev_led.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strcpy(flash->subdev_led.name, LM3646_NAME);
+ rval = lm3646_init_controls(flash);
+ if (rval)
+ goto err_out;
+ rval = media_entity_init(&flash->subdev_led.entity, 0, NULL, 0);
+ if (rval < 0)
+ goto err_out;
+ flash->subdev_led.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH;
+ return rval;
+
+err_out:
+ v4l2_ctrl_handler_free(&flash->ctrls_led);
+ return rval;
+}
+
+static int lm3646_init_device(struct lm3646_flash *flash)
+{
+ unsigned int reg_val;
+ int rval;
+
+ /* read the value of mode register to reduce redundant i2c accesses */
+ rval = regmap_read(flash->regmap, REG_ENABLE, &reg_val);
+ if (rval < 0)
+ return rval;
+ flash->mode_reg = reg_val & 0xfc;
+
+ /* output disable */
+ rval = lm3646_mode_ctrl(flash, V4L2_FLASH_LED_MODE_NONE);
+ if (rval < 0)
+ return rval;
+
+ /*
+ * LED1 flash current setting
+ * LED2 flash current = Total(Max) flash current - LED1 flash current
+ */
+ rval = regmap_update_bits(flash->regmap,
+ REG_LED1_FLASH_BR, 0x7F,
+ LM3646_LED1_FLASH_BRT_uA_TO_REG
+ (flash->pdata->led1_flash_brt));
+
+ if (rval < 0)
+ return rval;
+
+ /*
+ * LED1 torch current setting
+ * LED2 torch current = Total(Max) torch current - LED1 torch current
+ */
+ rval = regmap_update_bits(flash->regmap,
+ REG_LED1_TORCH_BR, 0x7F,
+ LM3646_LED1_TORCH_BRT_uA_TO_REG
+ (flash->pdata->led1_torch_brt));
+ if (rval < 0)
+ return rval;
+
+ /* Reset flag register */
+ return regmap_read(flash->regmap, REG_FLAG, &reg_val);
+}
+
+static int lm3646_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct lm3646_flash *flash;
+ struct lm3646_platform_data *pdata = dev_get_platdata(&client->dev);
+ int rval;
+
+ flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL);
+ if (flash == NULL)
+ return -ENOMEM;
+
+ flash->regmap = devm_regmap_init_i2c(client, &lm3646_regmap);
+ if (IS_ERR(flash->regmap))
+ return PTR_ERR(flash->regmap);
+
+ /* check device tree if there is no platform data */
+ if (pdata == NULL) {
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct lm3646_platform_data),
+ GFP_KERNEL);
+ if (pdata == NULL)
+ return -ENOMEM;
+ /* use default data in case of no platform data */
+ pdata->flash_timeout = LM3646_FLASH_TOUT_MAX;
+ pdata->led1_torch_brt = LM3646_LED1_TORCH_BRT_MAX;
+ pdata->led1_flash_brt = LM3646_LED1_FLASH_BRT_MAX;
+ }
+ flash->pdata = pdata;
+ flash->dev = &client->dev;
+
+ rval = lm3646_subdev_init(flash);
+ if (rval < 0)
+ return rval;
+
+ rval = lm3646_init_device(flash);
+ if (rval < 0)
+ return rval;
+
+ i2c_set_clientdata(client, flash);
+
+ return 0;
+}
+
+static int lm3646_remove(struct i2c_client *client)
+{
+ struct lm3646_flash *flash = i2c_get_clientdata(client);
+
+ v4l2_device_unregister_subdev(&flash->subdev_led);
+ v4l2_ctrl_handler_free(&flash->ctrls_led);
+ media_entity_cleanup(&flash->subdev_led.entity);
+
+ return 0;
+}
+
+static const struct i2c_device_id lm3646_id_table[] = {
+ {LM3646_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lm3646_id_table);
+
+static struct i2c_driver lm3646_i2c_driver = {
+ .driver = {
+ .name = LM3646_NAME,
+ },
+ .probe = lm3646_probe,
+ .remove = lm3646_remove,
+ .id_table = lm3646_id_table,
+};
+
+module_i2c_driver(lm3646_i2c_driver);
+
+MODULE_AUTHOR("Daniel Jeong <gshark.jeong@gmail.com>");
+MODULE_AUTHOR("Ldd Mlp <ldd-mlp@list.ti.com>");
+MODULE_DESCRIPTION("Texas Instruments LM3646 Dual Flash LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index e5ddf47030fd..33daace81297 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -29,7 +30,6 @@
#include <media/mt9p031.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-of.h>
#include <media/v4l2-subdev.h>
#include "aptina-pll.h"
@@ -78,6 +78,9 @@
#define MT9P031_PLL_CONFIG_1 0x11
#define MT9P031_PLL_CONFIG_2 0x12
#define MT9P031_PIXEL_CLOCK_CONTROL 0x0a
+#define MT9P031_PIXEL_CLOCK_INVERT (1 << 15)
+#define MT9P031_PIXEL_CLOCK_SHIFT(n) ((n) << 8)
+#define MT9P031_PIXEL_CLOCK_DIVIDE(n) ((n) << 0)
#define MT9P031_FRAME_RESTART 0x0b
#define MT9P031_SHUTTER_DELAY 0x0c
#define MT9P031_RST 0x0d
@@ -130,6 +133,8 @@ struct mt9p031 {
enum mt9p031_model model;
struct aptina_pll pll;
+ unsigned int clk_div;
+ bool use_pll;
int reset;
struct v4l2_ctrl_handler ctrls;
@@ -198,6 +203,11 @@ static int mt9p031_reset(struct mt9p031 *mt9p031)
if (ret < 0)
return ret;
+ ret = mt9p031_write(client, MT9P031_PIXEL_CLOCK_CONTROL,
+ MT9P031_PIXEL_CLOCK_DIVIDE(mt9p031->clk_div));
+ if (ret < 0)
+ return ret;
+
return mt9p031_set_output_control(mt9p031, MT9P031_OUTPUT_CONTROL_CEN,
0);
}
@@ -222,15 +232,34 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
struct mt9p031_platform_data *pdata = mt9p031->pdata;
+ int ret;
mt9p031->clk = devm_clk_get(&client->dev, NULL);
if (IS_ERR(mt9p031->clk))
return PTR_ERR(mt9p031->clk);
- clk_set_rate(mt9p031->clk, pdata->ext_freq);
+ ret = clk_set_rate(mt9p031->clk, pdata->ext_freq);
+ if (ret < 0)
+ return ret;
+
+ /* If the external clock frequency is out of bounds for the PLL use the
+ * pixel clock divider only and disable the PLL.
+ */
+ if (pdata->ext_freq > limits.ext_clock_max) {
+ unsigned int div;
+
+ div = DIV_ROUND_UP(pdata->ext_freq, pdata->target_freq);
+ div = roundup_pow_of_two(div) / 2;
+
+ mt9p031->clk_div = max_t(unsigned int, div, 64);
+ mt9p031->use_pll = false;
+
+ return 0;
+ }
mt9p031->pll.ext_clock = pdata->ext_freq;
mt9p031->pll.pix_clock = pdata->target_freq;
+ mt9p031->use_pll = true;
return aptina_pll_calculate(&client->dev, &limits, &mt9p031->pll);
}
@@ -240,6 +269,9 @@ static int mt9p031_pll_enable(struct mt9p031 *mt9p031)
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
int ret;
+ if (!mt9p031->use_pll)
+ return 0;
+
ret = mt9p031_write(client, MT9P031_PLL_CONTROL,
MT9P031_PLL_CONTROL_PWRON);
if (ret < 0)
@@ -265,6 +297,9 @@ static inline int mt9p031_pll_disable(struct mt9p031 *mt9p031)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
+ if (!mt9p031->use_pll)
+ return 0;
+
return mt9p031_write(client, MT9P031_PLL_CONTROL,
MT9P031_PLL_CONTROL_PWROFF);
}
@@ -285,9 +320,15 @@ static int mt9p031_power_on(struct mt9p031 *mt9p031)
if (ret < 0)
return ret;
- /* Emable clock */
- if (mt9p031->clk)
- clk_prepare_enable(mt9p031->clk);
+ /* Enable clock */
+ if (mt9p031->clk) {
+ ret = clk_prepare_enable(mt9p031->clk);
+ if (ret) {
+ regulator_bulk_disable(ARRAY_SIZE(mt9p031->regulators),
+ mt9p031->regulators);
+ return ret;
+ }
+ }
/* Now RESET_BAR must be high */
if (gpio_is_valid(mt9p031->reset)) {
@@ -943,7 +984,7 @@ mt9p031_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- np = v4l2_of_get_next_endpoint(client->dev.of_node, NULL);
+ np = of_graph_get_next_endpoint(client->dev.of_node, NULL);
if (!np)
return NULL;
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index d41c70eaf838..422e068f5f1b 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -12,9 +12,11 @@
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/v4l2-mediabus.h>
@@ -55,6 +57,7 @@
#define MT9T001_OUTPUT_CONTROL_SYNC (1 << 0)
#define MT9T001_OUTPUT_CONTROL_CHIP_ENABLE (1 << 1)
#define MT9T001_OUTPUT_CONTROL_TEST_DATA (1 << 6)
+#define MT9T001_OUTPUT_CONTROL_DEF 0x0002
#define MT9T001_SHUTTER_WIDTH_HIGH 0x08
#define MT9T001_SHUTTER_WIDTH_LOW 0x09
#define MT9T001_SHUTTER_WIDTH_MIN 1
@@ -116,6 +119,12 @@ struct mt9t001 {
struct v4l2_subdev subdev;
struct media_pad pad;
+ struct clk *clk;
+ struct regulator_bulk_data regulators[2];
+
+ struct mutex power_lock; /* lock to protect power_count */
+ int power_count;
+
struct v4l2_mbus_framefmt format;
struct v4l2_rect crop;
@@ -159,6 +168,77 @@ static int mt9t001_set_output_control(struct mt9t001 *mt9t001, u16 clear,
return 0;
}
+static int mt9t001_reset(struct mt9t001 *mt9t001)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9t001->subdev);
+ int ret;
+
+ /* Reset the chip and stop data read out */
+ ret = mt9t001_write(client, MT9T001_RESET, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9t001_write(client, MT9T001_RESET, 0);
+ if (ret < 0)
+ return ret;
+
+ mt9t001->output_control = MT9T001_OUTPUT_CONTROL_DEF;
+
+ return mt9t001_set_output_control(mt9t001,
+ MT9T001_OUTPUT_CONTROL_CHIP_ENABLE,
+ 0);
+}
+
+static int mt9t001_power_on(struct mt9t001 *mt9t001)
+{
+ int ret;
+
+ /* Bring up the supplies */
+ ret = regulator_bulk_enable(ARRAY_SIZE(mt9t001->regulators),
+ mt9t001->regulators);
+ if (ret < 0)
+ return ret;
+
+ /* Enable clock */
+ ret = clk_prepare_enable(mt9t001->clk);
+ if (ret < 0)
+ regulator_bulk_disable(ARRAY_SIZE(mt9t001->regulators),
+ mt9t001->regulators);
+
+ return ret;
+}
+
+static void mt9t001_power_off(struct mt9t001 *mt9t001)
+{
+ regulator_bulk_disable(ARRAY_SIZE(mt9t001->regulators),
+ mt9t001->regulators);
+
+ clk_disable_unprepare(mt9t001->clk);
+}
+
+static int __mt9t001_set_power(struct mt9t001 *mt9t001, bool on)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9t001->subdev);
+ int ret;
+
+ if (!on) {
+ mt9t001_power_off(mt9t001);
+ return 0;
+ }
+
+ ret = mt9t001_power_on(mt9t001);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9t001_reset(mt9t001);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to reset the camera\n");
+ return ret;
+ }
+
+ return v4l2_ctrl_handler_setup(&mt9t001->ctrls);
+}
+
/* -----------------------------------------------------------------------------
* V4L2 subdev video operations
*/
@@ -195,6 +275,7 @@ static int mt9t001_s_stream(struct v4l2_subdev *subdev, int enable)
{
const u16 mode = MT9T001_OUTPUT_CONTROL_CHIP_ENABLE;
struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct mt9t001_platform_data *pdata = client->dev.platform_data;
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
struct v4l2_mbus_framefmt *format = &mt9t001->format;
struct v4l2_rect *crop = &mt9t001->crop;
@@ -205,6 +286,14 @@ static int mt9t001_s_stream(struct v4l2_subdev *subdev, int enable)
if (!enable)
return mt9t001_set_output_control(mt9t001, mode, 0);
+ /* Configure the pixel clock polarity */
+ if (pdata->clk_pol) {
+ ret = mt9t001_write(client, MT9T001_PIXEL_CLOCK,
+ MT9T001_PIXEL_CLOCK_INVERT);
+ if (ret < 0)
+ return ret;
+ }
+
/* Configure the window size and row/column bin */
hratio = DIV_ROUND_CLOSEST(crop->width, format->width);
vratio = DIV_ROUND_CLOSEST(crop->height, format->height);
@@ -630,9 +719,67 @@ static const struct v4l2_ctrl_config mt9t001_gains[] = {
};
/* -----------------------------------------------------------------------------
+ * V4L2 subdev core operations
+ */
+
+static int mt9t001_set_power(struct v4l2_subdev *subdev, int on)
+{
+ struct mt9t001 *mt9t001 = to_mt9t001(subdev);
+ int ret = 0;
+
+ mutex_lock(&mt9t001->power_lock);
+
+ /* If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (mt9t001->power_count == !on) {
+ ret = __mt9t001_set_power(mt9t001, !!on);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* Update the power count. */
+ mt9t001->power_count += on ? 1 : -1;
+ WARN_ON(mt9t001->power_count < 0);
+
+out:
+ mutex_unlock(&mt9t001->power_lock);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
* V4L2 subdev internal operations
*/
+static int mt9t001_registered(struct v4l2_subdev *subdev)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct mt9t001 *mt9t001 = to_mt9t001(subdev);
+ s32 data;
+ int ret;
+
+ ret = mt9t001_power_on(mt9t001);
+ if (ret < 0) {
+ dev_err(&client->dev, "MT9T001 power up failed\n");
+ return ret;
+ }
+
+ /* Read out the chip version register */
+ data = mt9t001_read(client, MT9T001_CHIP_VERSION);
+ mt9t001_power_off(mt9t001);
+
+ if (data != MT9T001_CHIP_ID) {
+ dev_err(&client->dev,
+ "MT9T001 not detected, wrong version 0x%04x\n", data);
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev, "MT9T001 detected at address 0x%02x\n",
+ client->addr);
+
+ return 0;
+}
+
static int mt9t001_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *format;
@@ -651,9 +798,18 @@ static int mt9t001_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
format->field = V4L2_FIELD_NONE;
format->colorspace = V4L2_COLORSPACE_SRGB;
- return 0;
+ return mt9t001_set_power(subdev, 1);
+}
+
+static int mt9t001_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return mt9t001_set_power(subdev, 0);
}
+static struct v4l2_subdev_core_ops mt9t001_subdev_core_ops = {
+ .s_power = mt9t001_set_power,
+};
+
static struct v4l2_subdev_video_ops mt9t001_subdev_video_ops = {
.s_stream = mt9t001_s_stream,
};
@@ -668,58 +824,17 @@ static struct v4l2_subdev_pad_ops mt9t001_subdev_pad_ops = {
};
static struct v4l2_subdev_ops mt9t001_subdev_ops = {
+ .core = &mt9t001_subdev_core_ops,
.video = &mt9t001_subdev_video_ops,
.pad = &mt9t001_subdev_pad_ops,
};
static struct v4l2_subdev_internal_ops mt9t001_subdev_internal_ops = {
+ .registered = mt9t001_registered,
.open = mt9t001_open,
+ .close = mt9t001_close,
};
-static int mt9t001_video_probe(struct i2c_client *client)
-{
- struct mt9t001_platform_data *pdata = client->dev.platform_data;
- s32 data;
- int ret;
-
- dev_info(&client->dev, "Probing MT9T001 at address 0x%02x\n",
- client->addr);
-
- /* Reset the chip and stop data read out */
- ret = mt9t001_write(client, MT9T001_RESET, 1);
- if (ret < 0)
- return ret;
-
- ret = mt9t001_write(client, MT9T001_RESET, 0);
- if (ret < 0)
- return ret;
-
- ret = mt9t001_write(client, MT9T001_OUTPUT_CONTROL, 0);
- if (ret < 0)
- return ret;
-
- /* Configure the pixel clock polarity */
- if (pdata->clk_pol) {
- ret = mt9t001_write(client, MT9T001_PIXEL_CLOCK,
- MT9T001_PIXEL_CLOCK_INVERT);
- if (ret < 0)
- return ret;
- }
-
- /* Read and check the sensor version */
- data = mt9t001_read(client, MT9T001_CHIP_VERSION);
- if (data != MT9T001_CHIP_ID) {
- dev_err(&client->dev, "MT9T001 not detected, wrong version "
- "0x%04x\n", data);
- return -ENODEV;
- }
-
- dev_info(&client->dev, "MT9T001 detected at address 0x%02x\n",
- client->addr);
-
- return ret;
-}
-
static int mt9t001_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
@@ -740,14 +855,28 @@ static int mt9t001_probe(struct i2c_client *client,
return -EIO;
}
- ret = mt9t001_video_probe(client);
- if (ret < 0)
- return ret;
-
mt9t001 = devm_kzalloc(&client->dev, sizeof(*mt9t001), GFP_KERNEL);
if (!mt9t001)
return -ENOMEM;
+ mutex_init(&mt9t001->power_lock);
+ mt9t001->output_control = MT9T001_OUTPUT_CONTROL_DEF;
+
+ mt9t001->regulators[0].supply = "vdd";
+ mt9t001->regulators[1].supply = "vaa";
+
+ ret = devm_regulator_bulk_get(&client->dev, 2, mt9t001->regulators);
+ if (ret < 0) {
+ dev_err(&client->dev, "Unable to get regulators\n");
+ return ret;
+ }
+
+ mt9t001->clk = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(mt9t001->clk)) {
+ dev_err(&client->dev, "Unable to get clock\n");
+ return PTR_ERR(mt9t001->clk);
+ }
+
v4l2_ctrl_handler_init(&mt9t001->ctrls, ARRAY_SIZE(mt9t001_ctrls) +
ARRAY_SIZE(mt9t001_gains) + 4);
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index f74698cf14c9..47e475319a24 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -1,7 +1,7 @@
/*
* mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
*
- * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
+ * Copyright (c) 2009 Mauro Carvalho Chehab
* This code is placed under the terms of the GNU General Public License v2
*/
@@ -16,7 +16,7 @@
#include <media/mt9v011.h>
MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
static int debug;
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 36c504b78f2c..40172b8d8ea2 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -317,8 +317,14 @@ static int mt9v032_power_on(struct mt9v032 *mt9v032)
struct i2c_client *client = v4l2_get_subdevdata(&mt9v032->subdev);
int ret;
- clk_set_rate(mt9v032->clk, mt9v032->sysclk);
- clk_prepare_enable(mt9v032->clk);
+ ret = clk_set_rate(mt9v032->clk, mt9v032->sysclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(mt9v032->clk);
+ if (ret)
+ return ret;
+
udelay(1);
/* Reset the chip and stop data read out */
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index e7f555cc827a..a4459301b5f8 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -15,7 +15,7 @@
* GNU General Public License for more details.
*/
-#include <linux/sizes.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/gpio.h>
@@ -23,7 +23,9 @@
#include <linux/init.h>
#include <linux/media.h>
#include <linux/module.h>
+#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/videodev2.h>
@@ -33,6 +35,7 @@
#include <media/v4l2-subdev.h>
#include <media/v4l2-mediabus.h>
#include <media/s5c73m3.h>
+#include <media/v4l2-of.h>
#include "s5c73m3.h"
@@ -46,6 +49,8 @@ static int update_fw;
module_param(update_fw, int, 0644);
#define S5C73M3_EMBEDDED_DATA_MAXLEN SZ_4K
+#define S5C73M3_MIPI_DATA_LANES 4
+#define S5C73M3_CLK_NAME "cis_extclk"
static const char * const s5c73m3_supply_names[S5C73M3_MAX_SUPPLIES] = {
"vdd-int", /* Digital Core supply (1.2V), CAM_ISP_CORE_1.2V */
@@ -1355,9 +1360,20 @@ static int __s5c73m3_power_on(struct s5c73m3 *state)
for (i = 0; i < S5C73M3_MAX_SUPPLIES; i++) {
ret = regulator_enable(state->supplies[i].consumer);
if (ret)
- goto err;
+ goto err_reg_dis;
}
+ ret = clk_set_rate(state->clock, state->mclk_frequency);
+ if (ret < 0)
+ goto err_reg_dis;
+
+ ret = clk_prepare_enable(state->clock);
+ if (ret < 0)
+ goto err_reg_dis;
+
+ v4l2_dbg(1, s5c73m3_dbg, &state->oif_sd, "clock frequency: %ld\n",
+ clk_get_rate(state->clock));
+
s5c73m3_gpio_deassert(state, STBY);
usleep_range(100, 200);
@@ -1365,7 +1381,8 @@ static int __s5c73m3_power_on(struct s5c73m3 *state)
usleep_range(50, 100);
return 0;
-err:
+
+err_reg_dis:
for (--i; i >= 0; i--)
regulator_disable(state->supplies[i].consumer);
return ret;
@@ -1380,6 +1397,9 @@ static int __s5c73m3_power_off(struct s5c73m3 *state)
if (s5c73m3_gpio_assert(state, STBY))
usleep_range(100, 200);
+
+ clk_disable_unprepare(state->clock);
+
state->streaming = 0;
state->isp_ready = 0;
@@ -1388,6 +1408,7 @@ static int __s5c73m3_power_off(struct s5c73m3 *state)
if (ret)
goto err;
}
+
return 0;
err:
for (++i; i < S5C73M3_MAX_SUPPLIES; i++) {
@@ -1396,6 +1417,8 @@ err:
v4l2_err(&state->oif_sd, "Failed to reenable %s: %d\n",
state->supplies[i].supply, r);
}
+
+ clk_prepare_enable(state->clock);
return ret;
}
@@ -1451,17 +1474,6 @@ static int s5c73m3_oif_registered(struct v4l2_subdev *sd)
S5C73M3_JPEG_PAD, &state->oif_sd.entity, OIF_JPEG_PAD,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
- mutex_lock(&state->lock);
- ret = __s5c73m3_power_on(state);
- if (ret == 0)
- s5c73m3_get_fw_version(state);
-
- __s5c73m3_power_off(state);
- mutex_unlock(&state->lock);
-
- v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n",
- __func__, ret ? "failed" : "succeeded", ret);
-
return ret;
}
@@ -1519,41 +1531,112 @@ static const struct v4l2_subdev_ops oif_subdev_ops = {
.video = &s5c73m3_oif_video_ops,
};
-static int s5c73m3_configure_gpios(struct s5c73m3 *state,
- const struct s5c73m3_platform_data *pdata)
+static int s5c73m3_configure_gpios(struct s5c73m3 *state)
+{
+ static const char * const gpio_names[] = {
+ "S5C73M3_STBY", "S5C73M3_RST"
+ };
+ struct i2c_client *c = state->i2c_client;
+ struct s5c73m3_gpio *g = state->gpio;
+ int ret, i;
+
+ for (i = 0; i < GPIO_NUM; ++i) {
+ unsigned int flags = GPIOF_DIR_OUT;
+ if (g[i].level)
+ flags |= GPIOF_INIT_HIGH;
+ ret = devm_gpio_request_one(&c->dev, g[i].gpio, flags,
+ gpio_names[i]);
+ if (ret) {
+ v4l2_err(c, "failed to request gpio %s\n",
+ gpio_names[i]);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int s5c73m3_parse_gpios(struct s5c73m3 *state)
+{
+ static const char * const prop_names[] = {
+ "standby-gpios", "xshutdown-gpios",
+ };
+ struct device *dev = &state->i2c_client->dev;
+ struct device_node *node = dev->of_node;
+ int ret, i;
+
+ for (i = 0; i < GPIO_NUM; ++i) {
+ enum of_gpio_flags of_flags;
+
+ ret = of_get_named_gpio_flags(node, prop_names[i],
+ 0, &of_flags);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse %s DT property\n",
+ prop_names[i]);
+ return -EINVAL;
+ }
+ state->gpio[i].gpio = ret;
+ state->gpio[i].level = !(of_flags & OF_GPIO_ACTIVE_LOW);
+ }
+ return 0;
+}
+
+static int s5c73m3_get_platform_data(struct s5c73m3 *state)
{
struct device *dev = &state->i2c_client->dev;
- const struct s5c73m3_gpio *gpio;
- unsigned long flags;
+ const struct s5c73m3_platform_data *pdata = dev->platform_data;
+ struct device_node *node = dev->of_node;
+ struct device_node *node_ep;
+ struct v4l2_of_endpoint ep;
int ret;
- state->gpio[STBY].gpio = -EINVAL;
- state->gpio[RST].gpio = -EINVAL;
+ if (!node) {
+ if (!pdata) {
+ dev_err(dev, "Platform data not specified\n");
+ return -EINVAL;
+ }
- gpio = &pdata->gpio_stby;
- if (gpio_is_valid(gpio->gpio)) {
- flags = (gpio->level ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW)
- | GPIOF_EXPORT;
- ret = devm_gpio_request_one(dev, gpio->gpio, flags,
- "S5C73M3_STBY");
- if (ret < 0)
- return ret;
+ state->mclk_frequency = pdata->mclk_frequency;
+ state->gpio[STBY] = pdata->gpio_stby;
+ state->gpio[RST] = pdata->gpio_reset;
+ return 0;
+ }
+
+ state->clock = devm_clk_get(dev, S5C73M3_CLK_NAME);
+ if (IS_ERR(state->clock))
+ return PTR_ERR(state->clock);
- state->gpio[STBY] = *gpio;
+ if (of_property_read_u32(node, "clock-frequency",
+ &state->mclk_frequency)) {
+ state->mclk_frequency = S5C73M3_DEFAULT_MCLK_FREQ;
+ dev_info(dev, "using default %u Hz clock frequency\n",
+ state->mclk_frequency);
}
- gpio = &pdata->gpio_reset;
- if (gpio_is_valid(gpio->gpio)) {
- flags = (gpio->level ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW)
- | GPIOF_EXPORT;
- ret = devm_gpio_request_one(dev, gpio->gpio, flags,
- "S5C73M3_RST");
- if (ret < 0)
- return ret;
+ ret = s5c73m3_parse_gpios(state);
+ if (ret < 0)
+ return -EINVAL;
- state->gpio[RST] = *gpio;
+ node_ep = v4l2_of_get_next_endpoint(node, NULL);
+ if (!node_ep) {
+ dev_warn(dev, "no endpoint defined for node: %s\n",
+ node->full_name);
+ return 0;
}
+ v4l2_of_parse_endpoint(node_ep, &ep);
+ of_node_put(node_ep);
+
+ if (ep.bus_type != V4L2_MBUS_CSI2) {
+ dev_err(dev, "unsupported bus type\n");
+ return -EINVAL;
+ }
+ /*
+ * Number of MIPI CSI-2 data lanes is currently not configurable,
+ * always a default value of 4 lanes is used.
+ */
+ if (ep.bus.mipi_csi2.num_data_lanes != S5C73M3_MIPI_DATA_LANES)
+ dev_info(dev, "falling back to 4 MIPI CSI-2 data lanes\n");
+
return 0;
}
@@ -1561,21 +1644,20 @@ static int s5c73m3_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- const struct s5c73m3_platform_data *pdata = client->dev.platform_data;
struct v4l2_subdev *sd;
struct v4l2_subdev *oif_sd;
struct s5c73m3 *state;
int ret, i;
- if (pdata == NULL) {
- dev_err(&client->dev, "Platform data not specified\n");
- return -EINVAL;
- }
-
state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
+ state->i2c_client = client;
+ ret = s5c73m3_get_platform_data(state);
+ if (ret < 0)
+ return ret;
+
mutex_init(&state->lock);
sd = &state->sensor_sd;
oif_sd = &state->oif_sd;
@@ -1613,11 +1695,7 @@ static int s5c73m3_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- state->mclk_frequency = pdata->mclk_frequency;
- state->bus_type = pdata->bus_type;
- state->i2c_client = client;
-
- ret = s5c73m3_configure_gpios(state, pdata);
+ ret = s5c73m3_configure_gpios(state);
if (ret)
goto out_err;
@@ -1651,9 +1729,29 @@ static int s5c73m3_probe(struct i2c_client *client,
if (ret < 0)
goto out_err;
+ oif_sd->dev = dev;
+
+ ret = __s5c73m3_power_on(state);
+ if (ret < 0)
+ goto out_err1;
+
+ ret = s5c73m3_get_fw_version(state);
+ __s5c73m3_power_off(state);
+
+ if (ret < 0) {
+ dev_err(dev, "Device detection failed: %d\n", ret);
+ goto out_err1;
+ }
+
+ ret = v4l2_async_register_subdev(oif_sd);
+ if (ret < 0)
+ goto out_err1;
+
v4l2_info(sd, "%s: completed successfully\n", __func__);
return 0;
+out_err1:
+ s5c73m3_unregister_spi_driver(state);
out_err:
media_entity_cleanup(&sd->entity);
return ret;
@@ -1665,7 +1763,7 @@ static int s5c73m3_remove(struct i2c_client *client)
struct s5c73m3 *state = oif_sd_to_s5c73m3(oif_sd);
struct v4l2_subdev *sensor_sd = &state->sensor_sd;
- v4l2_device_unregister_subdev(oif_sd);
+ v4l2_async_unregister_subdev(oif_sd);
v4l2_ctrl_handler_free(oif_sd->ctrl_handler);
media_entity_cleanup(&oif_sd->entity);
@@ -1684,8 +1782,17 @@ static const struct i2c_device_id s5c73m3_id[] = {
};
MODULE_DEVICE_TABLE(i2c, s5c73m3_id);
+#ifdef CONFIG_OF
+static const struct of_device_id s5c73m3_of_match[] = {
+ { .compatible = "samsung,s5c73m3" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s5c73m3_of_match);
+#endif
+
static struct i2c_driver s5c73m3_i2c_driver = {
.driver = {
+ .of_match_table = of_match_ptr(s5c73m3_of_match),
.name = DRIVER_NAME,
},
.probe = s5c73m3_probe,
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
index 8079e26eb5e2..f60b265b4da1 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
@@ -27,6 +27,11 @@
#define S5C73M3_SPI_DRV_NAME "S5C73M3-SPI"
+static const struct of_device_id s5c73m3_spi_ids[] = {
+ { .compatible = "samsung,s5c73m3" },
+ { }
+};
+
enum spi_direction {
SPI_DIR_RX,
SPI_DIR_TX
@@ -146,6 +151,7 @@ int s5c73m3_register_spi_driver(struct s5c73m3 *state)
spidrv->driver.name = S5C73M3_SPI_DRV_NAME;
spidrv->driver.bus = &spi_bus_type;
spidrv->driver.owner = THIS_MODULE;
+ spidrv->driver.of_match_table = s5c73m3_spi_ids;
return spi_register_driver(spidrv);
}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
index 9dfa516f6944..9656b6723dc6 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -17,6 +17,7 @@
#ifndef S5C73M3_H_
#define S5C73M3_H_
+#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/regulator/consumer.h>
#include <media/v4l2-common.h>
@@ -321,6 +322,7 @@ enum s5c73m3_oif_pads {
#define S5C73M3_MAX_SUPPLIES 6
+#define S5C73M3_DEFAULT_MCLK_FREQ 24000000U
struct s5c73m3_ctrls {
struct v4l2_ctrl_handler handler;
@@ -391,6 +393,8 @@ struct s5c73m3 {
struct regulator_bulk_data supplies[S5C73M3_MAX_SUPPLIES];
struct s5c73m3_gpio gpio[GPIO_NUM];
+ struct clk *clock;
+
/* External master clock frequency */
u32 mclk_frequency;
/* Video bus type - MIPI-CSI2/parallel */
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 77e10e0fd8d6..2d768ef67cc5 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -21,6 +21,7 @@
#include <linux/media.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -1855,7 +1856,7 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
if (ret < 0)
return ret;
- node_ep = v4l2_of_get_next_endpoint(node, NULL);
+ node_ep = of_graph_get_next_endpoint(node, NULL);
if (!node_ep) {
dev_err(dev, "no endpoint defined at node %s\n",
node->full_name);
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
new file mode 100644
index 000000000000..7bc2271ca009
--- /dev/null
+++ b/drivers/media/i2c/s5k6a3.c
@@ -0,0 +1,389 @@
+/*
+ * Samsung S5K6A3 image sensor driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#define S5K6A3_SENSOR_MAX_WIDTH 1412
+#define S5K6A3_SENSOR_MAX_HEIGHT 1412
+#define S5K6A3_SENSOR_MIN_WIDTH 32
+#define S5K6A3_SENSOR_MIN_HEIGHT 32
+
+#define S5K6A3_DEFAULT_WIDTH 1296
+#define S5K6A3_DEFAULT_HEIGHT 732
+
+#define S5K6A3_DRV_NAME "S5K6A3"
+#define S5K6A3_CLK_NAME "extclk"
+#define S5K6A3_DEFAULT_CLK_FREQ 24000000U
+
+enum {
+ S5K6A3_SUPP_VDDA,
+ S5K6A3_SUPP_VDDIO,
+ S5K6A3_SUPP_AFVDD,
+ S5K6A3_NUM_SUPPLIES,
+};
+
+/**
+ * struct s5k6a3 - fimc-is sensor data structure
+ * @dev: pointer to this I2C client device structure
+ * @subdev: the image sensor's v4l2 subdev
+ * @pad: subdev media source pad
+ * @supplies: image sensor's voltage regulator supplies
+ * @gpio_reset: GPIO connected to the sensor's reset pin
+ * @lock: mutex protecting the structure's members below
+ * @format: media bus format at the sensor's source pad
+ */
+struct s5k6a3 {
+ struct device *dev;
+ struct v4l2_subdev subdev;
+ struct media_pad pad;
+ struct regulator_bulk_data supplies[S5K6A3_NUM_SUPPLIES];
+ int gpio_reset;
+ struct mutex lock;
+ struct v4l2_mbus_framefmt format;
+ struct clk *clock;
+ u32 clock_frequency;
+ int power_count;
+};
+
+static const char * const s5k6a3_supply_names[] = {
+ [S5K6A3_SUPP_VDDA] = "svdda",
+ [S5K6A3_SUPP_VDDIO] = "svddio",
+ [S5K6A3_SUPP_AFVDD] = "afvdd",
+};
+
+static inline struct s5k6a3 *sd_to_s5k6a3(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct s5k6a3, subdev);
+}
+
+static const struct v4l2_mbus_framefmt s5k6a3_formats[] = {
+ {
+ .code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ }
+};
+
+static const struct v4l2_mbus_framefmt *find_sensor_format(
+ struct v4l2_mbus_framefmt *mf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s5k6a3_formats); i++)
+ if (mf->code == s5k6a3_formats[i].code)
+ return &s5k6a3_formats[i];
+
+ return &s5k6a3_formats[0];
+}
+
+static int s5k6a3_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(s5k6a3_formats))
+ return -EINVAL;
+
+ code->code = s5k6a3_formats[code->index].code;
+ return 0;
+}
+
+static void s5k6a3_try_format(struct v4l2_mbus_framefmt *mf)
+{
+ const struct v4l2_mbus_framefmt *fmt;
+
+ fmt = find_sensor_format(mf);
+ mf->code = fmt->code;
+ v4l_bound_align_image(&mf->width, S5K6A3_SENSOR_MIN_WIDTH,
+ S5K6A3_SENSOR_MAX_WIDTH, 0,
+ &mf->height, S5K6A3_SENSOR_MIN_HEIGHT,
+ S5K6A3_SENSOR_MAX_HEIGHT, 0, 0);
+}
+
+static struct v4l2_mbus_framefmt *__s5k6a3_get_format(
+ struct s5k6a3 *sensor, struct v4l2_subdev_fh *fh,
+ u32 pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return fh ? v4l2_subdev_get_try_format(fh, pad) : NULL;
+
+ return &sensor->format;
+}
+
+static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct s5k6a3 *sensor = sd_to_s5k6a3(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ s5k6a3_try_format(&fmt->format);
+
+ mf = __s5k6a3_get_format(sensor, fh, fmt->pad, fmt->which);
+ if (mf) {
+ mutex_lock(&sensor->lock);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ *mf = fmt->format;
+ mutex_unlock(&sensor->lock);
+ }
+ return 0;
+}
+
+static int s5k6a3_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct s5k6a3 *sensor = sd_to_s5k6a3(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = __s5k6a3_get_format(sensor, fh, fmt->pad, fmt->which);
+
+ mutex_lock(&sensor->lock);
+ fmt->format = *mf;
+ mutex_unlock(&sensor->lock);
+ return 0;
+}
+
+static struct v4l2_subdev_pad_ops s5k6a3_pad_ops = {
+ .enum_mbus_code = s5k6a3_enum_mbus_code,
+ .get_fmt = s5k6a3_get_fmt,
+ .set_fmt = s5k6a3_set_fmt,
+};
+
+static int s5k6a3_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0);
+
+ *format = s5k6a3_formats[0];
+ format->width = S5K6A3_DEFAULT_WIDTH;
+ format->height = S5K6A3_DEFAULT_HEIGHT;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops s5k6a3_sd_internal_ops = {
+ .open = s5k6a3_open,
+};
+
+static int __s5k6a3_power_on(struct s5k6a3 *sensor)
+{
+ int i = S5K6A3_SUPP_VDDA;
+ int ret;
+
+ ret = clk_set_rate(sensor->clock, sensor->clock_frequency);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_get(sensor->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_enable(sensor->supplies[i].consumer);
+ if (ret < 0)
+ goto error_rpm_put;
+
+ ret = clk_prepare_enable(sensor->clock);
+ if (ret < 0)
+ goto error_reg_dis;
+
+ for (i++; i < S5K6A3_NUM_SUPPLIES; i++) {
+ ret = regulator_enable(sensor->supplies[i].consumer);
+ if (ret < 0)
+ goto error_reg_dis;
+ }
+
+ gpio_set_value(sensor->gpio_reset, 1);
+ usleep_range(600, 800);
+ gpio_set_value(sensor->gpio_reset, 0);
+ usleep_range(600, 800);
+ gpio_set_value(sensor->gpio_reset, 1);
+
+ /* Delay needed for the sensor initialization */
+ msleep(20);
+ return 0;
+
+error_reg_dis:
+ for (--i; i >= 0; --i)
+ regulator_disable(sensor->supplies[i].consumer);
+error_rpm_put:
+ pm_runtime_put(sensor->dev);
+ return ret;
+}
+
+static int __s5k6a3_power_off(struct s5k6a3 *sensor)
+{
+ int i;
+
+ gpio_set_value(sensor->gpio_reset, 0);
+
+ for (i = S5K6A3_NUM_SUPPLIES - 1; i >= 0; i--)
+ regulator_disable(sensor->supplies[i].consumer);
+
+ clk_disable_unprepare(sensor->clock);
+ pm_runtime_put(sensor->dev);
+ return 0;
+}
+
+static int s5k6a3_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct s5k6a3 *sensor = sd_to_s5k6a3(sd);
+ int ret = 0;
+
+ mutex_lock(&sensor->lock);
+
+ if (sensor->power_count == !on) {
+ if (on)
+ ret = __s5k6a3_power_on(sensor);
+ else
+ ret = __s5k6a3_power_off(sensor);
+
+ if (ret == 0)
+ sensor->power_count += on ? 1 : -1;
+ }
+
+ mutex_unlock(&sensor->lock);
+ return ret;
+}
+
+static struct v4l2_subdev_core_ops s5k6a3_core_ops = {
+ .s_power = s5k6a3_s_power,
+};
+
+static struct v4l2_subdev_ops s5k6a3_subdev_ops = {
+ .core = &s5k6a3_core_ops,
+ .pad = &s5k6a3_pad_ops,
+};
+
+static int s5k6a3_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct s5k6a3 *sensor;
+ struct v4l2_subdev *sd;
+ int gpio, i, ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ mutex_init(&sensor->lock);
+ sensor->gpio_reset = -EINVAL;
+ sensor->clock = ERR_PTR(-EINVAL);
+ sensor->dev = dev;
+
+ sensor->clock = devm_clk_get(sensor->dev, S5K6A3_CLK_NAME);
+ if (IS_ERR(sensor->clock))
+ return PTR_ERR(sensor->clock);
+
+ gpio = of_get_gpio_flags(dev->of_node, 0, NULL);
+ if (!gpio_is_valid(gpio))
+ return gpio;
+
+ ret = devm_gpio_request_one(dev, gpio, GPIOF_OUT_INIT_LOW,
+ S5K6A3_DRV_NAME);
+ if (ret < 0)
+ return ret;
+
+ sensor->gpio_reset = gpio;
+
+ if (of_property_read_u32(dev->of_node, "clock-frequency",
+ &sensor->clock_frequency)) {
+ sensor->clock_frequency = S5K6A3_DEFAULT_CLK_FREQ;
+ dev_info(dev, "using default %u Hz clock frequency\n",
+ sensor->clock_frequency);
+ }
+
+ for (i = 0; i < S5K6A3_NUM_SUPPLIES; i++)
+ sensor->supplies[i].supply = s5k6a3_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&client->dev, S5K6A3_NUM_SUPPLIES,
+ sensor->supplies);
+ if (ret < 0)
+ return ret;
+
+ sd = &sensor->subdev;
+ v4l2_i2c_subdev_init(sd, client, &s5k6a3_subdev_ops);
+ sensor->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->internal_ops = &s5k6a3_sd_internal_ops;
+
+ sensor->format.code = s5k6a3_formats[0].code;
+ sensor->format.width = S5K6A3_DEFAULT_WIDTH;
+ sensor->format.height = S5K6A3_DEFAULT_HEIGHT;
+
+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, 1, &sensor->pad, 0);
+ if (ret < 0)
+ return ret;
+
+ pm_runtime_no_callbacks(dev);
+ pm_runtime_enable(dev);
+
+ ret = v4l2_async_register_subdev(sd);
+
+ if (ret < 0) {
+ pm_runtime_disable(&client->dev);
+ media_entity_cleanup(&sd->entity);
+ }
+
+ return ret;
+}
+
+static int s5k6a3_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ pm_runtime_disable(&client->dev);
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ return 0;
+}
+
+static const struct i2c_device_id s5k6a3_ids[] = {
+ { }
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id s5k6a3_of_match[] = {
+ { .compatible = "samsung,s5k6a3" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s5k6a3_of_match);
+#endif
+
+static struct i2c_driver s5k6a3_driver = {
+ .driver = {
+ .of_match_table = of_match_ptr(s5k6a3_of_match),
+ .name = S5K6A3_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = s5k6a3_probe,
+ .remove = s5k6a3_remove,
+ .id_table = s5k6a3_ids,
+};
+
+module_i2c_driver(s5k6a3_driver);
+
+MODULE_DESCRIPTION("S5K6A3 image sensor subdev driver");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/sr030pc30.c b/drivers/media/i2c/sr030pc30.c
index ae9432637fcb..118f8ee88465 100644
--- a/drivers/media/i2c/sr030pc30.c
+++ b/drivers/media/i2c/sr030pc30.c
@@ -8,7 +8,7 @@
* and HeungJun Kim <riverful.kim@samsung.com>.
*
* Based on mt9v011 Micron Digital Image Sensor driver
- * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
+ * Copyright (c) 2009 Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index 04139eec8c4e..f72561e79739 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -217,8 +217,8 @@ static void ths8200_core_init(struct v4l2_subdev *sd)
/* Disable embedded syncs on the output by setting
* the amplitude to zero for all channels.
*/
- ths8200_write(sd, THS8200_DTG1_Y_SYNC_MSB, 0x2a);
- ths8200_write(sd, THS8200_DTG1_CBCR_SYNC_MSB, 0x2a);
+ ths8200_write(sd, THS8200_DTG1_Y_SYNC_MSB, 0x00);
+ ths8200_write(sd, THS8200_DTG1_CBCR_SYNC_MSB, 0x00);
}
static void ths8200_setup(struct v4l2_subdev *sd, struct v4l2_bt_timings *bt)
@@ -318,15 +318,15 @@ static void ths8200_setup(struct v4l2_subdev *sd, struct v4l2_bt_timings *bt)
(htotal(bt) >> 8) & 0x1f);
ths8200_write(sd, THS8200_DTG2_HLENGTH_HDLY_LSB, htotal(bt));
- /* v sync width transmitted */
- ths8200_write(sd, THS8200_DTG2_VLENGTH1_LSB, (bt->vsync) & 0xff);
+ /* v sync width transmitted (must add 1 to get correct output) */
+ ths8200_write(sd, THS8200_DTG2_VLENGTH1_LSB, (bt->vsync + 1) & 0xff);
ths8200_write_and_or(sd, THS8200_DTG2_VLENGTH1_MSB_VDLY1_MSB, 0x3f,
- ((bt->vsync) >> 2) & 0xc0);
+ ((bt->vsync + 1) >> 2) & 0xc0);
- /* The pixel value v sync is asserted on */
+ /* The pixel value v sync is asserted on (must add 1 to get correct output) */
ths8200_write_and_or(sd, THS8200_DTG2_VLENGTH1_MSB_VDLY1_MSB, 0xf8,
- (vtotal(bt)>>8) & 0x7);
- ths8200_write(sd, THS8200_DTG2_VDLY1_LSB, vtotal(bt));
+ ((vtotal(bt) + 1) >> 8) & 0x7);
+ ths8200_write(sd, THS8200_DTG2_VDLY1_LSB, vtotal(bt) + 1);
/* For progressive video vlength2 must be set to all 0 and vdly2 must
* be set to all 1.
@@ -336,11 +336,11 @@ static void ths8200_setup(struct v4l2_subdev *sd, struct v4l2_bt_timings *bt)
ths8200_write(sd, THS8200_DTG2_VDLY2_LSB, 0xff);
/* Internal delay factors to synchronize the sync pulses and the data */
- /* Experimental values delays (hor 4, ver 1) */
- ths8200_write(sd, THS8200_DTG2_HS_IN_DLY_MSB, (htotal(bt)>>8) & 0x1f);
- ths8200_write(sd, THS8200_DTG2_HS_IN_DLY_LSB, (htotal(bt) - 4) & 0xff);
+ /* Experimental values delays (hor 0, ver 0) */
+ ths8200_write(sd, THS8200_DTG2_HS_IN_DLY_MSB, 0);
+ ths8200_write(sd, THS8200_DTG2_HS_IN_DLY_LSB, 0);
ths8200_write(sd, THS8200_DTG2_VS_IN_DLY_MSB, 0);
- ths8200_write(sd, THS8200_DTG2_VS_IN_DLY_LSB, 1);
+ ths8200_write(sd, THS8200_DTG2_VS_IN_DLY_LSB, 0);
/* Polarity of received and transmitted sync signals */
if (bt->polarities & V4L2_DV_HSYNC_POS_POL) {
@@ -356,7 +356,7 @@ static void ths8200_setup(struct v4l2_subdev *sd, struct v4l2_bt_timings *bt)
/* Timing of video input bus is derived from HS, VS, and FID dedicated
* inputs
*/
- ths8200_write(sd, THS8200_DTG2_CNTL, 0x47 | polarity);
+ ths8200_write(sd, THS8200_DTG2_CNTL, 0x44 | polarity);
/* leave reset */
ths8200_s_stream(sd, true);
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 83d85df4853a..ca001178c5bf 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -36,6 +36,7 @@
#include <linux/module.h>
#include <linux/v4l2-mediabus.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
@@ -1068,7 +1069,7 @@ tvp514x_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- endpoint = v4l2_of_get_next_endpoint(client->dev.of_node, NULL);
+ endpoint = of_graph_get_next_endpoint(client->dev.of_node, NULL);
if (!endpoint)
return NULL;
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 542d2528b3f9..4fd3688e1164 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -16,9 +16,9 @@
#include "tvp5150_reg.h"
-#define TVP5150_H_MAX 720
-#define TVP5150_V_MAX_525_60 480
-#define TVP5150_V_MAX_OTHERS 576
+#define TVP5150_H_MAX 720U
+#define TVP5150_V_MAX_525_60 480U
+#define TVP5150_V_MAX_OTHERS 576U
#define TVP5150_MAX_CROP_LEFT 511
#define TVP5150_MAX_CROP_TOP 127
#define TVP5150_CROP_SHIFT 2
@@ -29,7 +29,7 @@ MODULE_LICENSE("GPL");
static int debug;
-module_param(debug, int, 0);
+module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
struct tvp5150 {
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 912e1cccdd1c..c4e1e2cb3094 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -30,6 +30,7 @@
#include <linux/videodev2.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/v4l2-dv-timings.h>
#include <media/tvp7002.h>
#include <media/v4l2-async.h>
@@ -957,7 +958,7 @@ tvp7002_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- endpoint = v4l2_of_get_next_endpoint(client->dev.of_node, NULL);
+ endpoint = of_graph_get_next_endpoint(client->dev.of_node, NULL);
if (!endpoint)
return NULL;
diff --git a/drivers/media/parport/bw-qcam.c b/drivers/media/parport/bw-qcam.c
index d12bd33f39cb..8a0e84c7d495 100644
--- a/drivers/media/parport/bw-qcam.c
+++ b/drivers/media/parport/bw-qcam.c
@@ -667,13 +667,16 @@ static void buffer_queue(struct vb2_buffer *vb)
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
}
-static int buffer_finish(struct vb2_buffer *vb)
+static void buffer_finish(struct vb2_buffer *vb)
{
struct qcam *qcam = vb2_get_drv_priv(vb->vb2_queue);
void *vbuf = vb2_plane_vaddr(vb, 0);
int size = vb->vb2_queue->plane_sizes[0];
int len;
+ if (!vb2_is_streaming(vb->vb2_queue))
+ return;
+
mutex_lock(&qcam->lock);
parport_claim_or_block(qcam->pdev);
@@ -691,7 +694,6 @@ static int buffer_finish(struct vb2_buffer *vb)
if (len != size)
vb->state = VB2_BUF_STATE_ERROR;
vb2_set_plane_payload(vb, 0, len);
- return 0;
}
static struct vb2_ops qcam_video_qops = {
@@ -965,7 +967,7 @@ static struct qcam *qcam_init(struct parport *port)
q->drv_priv = qcam;
q->ops = &qcam_video_qops;
q->mem_ops = &vb2_vmalloc_memops;
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
err = vb2_queue_init(q);
if (err < 0) {
v4l2_err(v4l2_dev, "couldn't init vb2_queue for %s.\n", port->name);
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index 6662b495b22c..d06963b3dcf3 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -2855,7 +2855,22 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
-
+ [BTTV_BOARD_KWORLD_VSTREAM_XPERT] = {
+ /* Pojar George <geoubuntu@gmail.com> */
+ .name = "Kworld V-Stream Xpert TV PVR878",
+ .video_inputs = 3,
+ /* .audio_inputs= 1, */
+ .svhs = 2,
+ .gpiomask = 0x001c0007,
+ .muxsel = MUXSEL(2, 3, 1, 1),
+ .gpiomux = { 0, 1, 2, 2 },
+ .gpiomute = 3,
+ .pll = PLL_28,
+ .tuner_type = TUNER_TENA_9533_DI,
+ .tuner_addr = ADDR_UNSET,
+ .has_remote = 1,
+ .has_radio = 1,
+ },
};
static const unsigned int bttv_num_tvcards = ARRAY_SIZE(bttv_tvcards);
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index f36821367d8d..5930bce16658 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -483,6 +483,7 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_ASKEY_CPH03X:
case BTTV_BOARD_CONCEPTRONIC_CTVFMI2:
case BTTV_BOARD_CONTVFMI:
+ case BTTV_BOARD_KWORLD_VSTREAM_XPERT:
ir_codes = RC_MAP_PIXELVIEW;
ir->mask_keycode = 0x001F00;
ir->mask_keyup = 0x006000;
diff --git a/drivers/media/pci/bt8xx/bttv.h b/drivers/media/pci/bt8xx/bttv.h
index df578efe03c9..bb5da349a46e 100644
--- a/drivers/media/pci/bt8xx/bttv.h
+++ b/drivers/media/pci/bt8xx/bttv.h
@@ -188,6 +188,7 @@
#define BTTV_BOARD_ADLINK_MPG24 0xa2
#define BTTV_BOARD_BT848_CAP_14 0xa3
#define BTTV_BOARD_CYBERVISION_CV06 0xa4
+#define BTTV_BOARD_KWORLD_VSTREAM_XPERT 0xa5
/* more card-specific defines */
#define PT2254_L_CHANNEL 0x10
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index b2c8c3439fea..ea272bcb38df 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -145,11 +145,12 @@ static int snd_cx18_init(struct v4l2_device *v4l2_dev)
/* This is a no-op for us. We'll use the cx->instance */
/* (2) Create a card instance */
- ret = snd_card_create(SNDRV_DEFAULT_IDX1, /* use first available id */
- SNDRV_DEFAULT_STR1, /* xid from end of shortname*/
- THIS_MODULE, 0, &sc);
+ ret = snd_card_new(&cx->pci_dev->dev,
+ SNDRV_DEFAULT_IDX1, /* use first available id */
+ SNDRV_DEFAULT_STR1, /* xid from end of shortname*/
+ THIS_MODULE, 0, &sc);
if (ret) {
- CX18_ALSA_ERR("%s: snd_card_create() failed with err %d\n",
+ CX18_ALSA_ERR("%s: snd_card_new() failed with err %d\n",
__func__, ret);
goto err_exit;
}
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index c6c9bd58f8be..554798dcedd0 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -489,7 +489,8 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
return NULL;
}
- err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ err = snd_card_new(&dev->pci->dev,
+ SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
THIS_MODULE, sizeof(struct cx23885_audio_dev), &card);
if (err < 0)
goto error;
@@ -500,8 +501,6 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
chip->card = card;
spin_lock_init(&chip->lock);
- snd_card_set_dev(card, &dev->pci->dev);
-
err = snd_cx23885_pcm(chip, 0, "CX23885 Digital");
if (err < 0)
goto error;
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 05492053b473..4be01b3bd4f5 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -473,6 +473,7 @@ static struct ds3000_config tevii_ds3000_config = {
static struct ts2020_config tevii_ts2020_config = {
.tuner_address = 0x60,
.clk_out_div = 1,
+ .frequency_div = 1146000,
};
static struct cx24116_config dvbworld_cx24116_config = {
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 8a49e7c9eddd..097d0a0b5f57 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -346,7 +346,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
}
rc->dev.parent = &dev->pci->dev;
rc->driver_type = driver_type;
- rc->allowed_protos = allowed_protos;
+ rc_set_allowed_protocols(rc, allowed_protos);
rc->priv = kernel_ir;
rc->open = cx23885_input_ir_open;
rc->close = cx23885_input_ir_close;
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index b1e08c3e55cd..2dd5bcaa7e53 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -645,8 +645,9 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
return -ENOENT;
}
- err = snd_card_create(index[devno], id[devno], THIS_MODULE,
- sizeof(struct cx25821_audio_dev), &card);
+ err = snd_card_new(&dev->pci->dev, index[devno], id[devno],
+ THIS_MODULE,
+ sizeof(struct cx25821_audio_dev), &card);
if (err < 0) {
pr_info("DEBUG ERROR: cannot create snd_card_new in %s\n",
__func__);
@@ -682,8 +683,6 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
goto error;
}
- snd_card_set_dev(card, &chip->pci->dev);
-
strcpy(card->shortname, "cx25821");
sprintf(card->longname, "%s at 0x%lx irq %d", chip->dev->name,
chip->iobase, chip->irq);
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index d014206e7176..a72579a9f67f 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -852,8 +852,6 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
chip->irq = pci->irq;
synchronize_irq(chip->irq);
- snd_card_set_dev(card, &pci->dev);
-
*rchip = chip;
*core_ptr = core;
@@ -876,8 +874,8 @@ static int cx88_audio_initdev(struct pci_dev *pci,
return (-ENOENT);
}
- err = snd_card_create(index[devno], id[devno], THIS_MODULE,
- sizeof(snd_cx88_card_t), &card);
+ err = snd_card_new(&pci->dev, index[devno], id[devno], THIS_MODULE,
+ sizeof(snd_cx88_card_t), &card);
if (err < 0)
return err;
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index f29e18c72f44..f991696a6c59 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -469,7 +469,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
dev->timeout = 10 * 1000 * 1000; /* 10 ms */
} else {
dev->driver_type = RC_DRIVER_SCANCODE;
- dev->allowed_protos = rc_type;
+ rc_set_allowed_protocols(dev, rc_type);
}
ir->core = core;
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 9375f30d9a81..fb52bda8d45f 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -876,10 +876,8 @@ static int dvb_input_attach(struct ddb_input *input)
return -ENODEV;
if (tuner_attach_tda18271(input) < 0)
return -ENODEV;
- if (input->fe) {
- if (dvb_register_frontend(adap, input->fe) < 0)
- return -ENODEV;
- }
+ if (dvb_register_frontend(adap, input->fe) < 0)
+ return -ENODEV;
if (input->fe2) {
if (dvb_register_frontend(adap, input->fe2) < 0)
return -ENODEV;
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index e970cface70e..39b52929755a 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -145,11 +145,12 @@ static int snd_ivtv_init(struct v4l2_device *v4l2_dev)
/* This is a no-op for us. We'll use the itv->instance */
/* (2) Create a card instance */
- ret = snd_card_create(SNDRV_DEFAULT_IDX1, /* use first available id */
- SNDRV_DEFAULT_STR1, /* xid from end of shortname*/
- THIS_MODULE, 0, &sc);
+ ret = snd_card_new(&itv->pdev->dev,
+ SNDRV_DEFAULT_IDX1, /* use first available id */
+ SNDRV_DEFAULT_STR1, /* xid from end of shortname*/
+ THIS_MODULE, 0, &sc);
if (ret) {
- IVTV_ALSA_ERR("%s: snd_card_create() failed with err %d\n",
+ IVTV_ALSA_ERR("%s: snd_card_new() failed with err %d\n",
__func__, ret);
goto err_exit;
}
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index dd67c8a400cc..e04a4d5d6672 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -1072,8 +1072,8 @@ static int alsa_card_saa7134_create(struct saa7134_dev *dev, int devnum)
if (!enable[devnum])
return -ENODEV;
- err = snd_card_create(index[devnum], id[devnum], THIS_MODULE,
- sizeof(snd_card_saa7134_t), &card);
+ err = snd_card_new(&dev->pci->dev, index[devnum], id[devnum],
+ THIS_MODULE, sizeof(snd_card_saa7134_t), &card);
if (err < 0)
return err;
@@ -1115,8 +1115,6 @@ static int alsa_card_saa7134_create(struct saa7134_dev *dev, int devnum)
if ((err = snd_card_saa7134_pcm(chip, 0)) < 0)
goto __nodev;
- snd_card_set_dev(card, &chip->pci->dev);
-
/* End of "creation" */
strcpy(card->shortname, "SAA7134");
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index c9b2350e92c8..6e4bdb90aa92 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -8045,8 +8045,8 @@ int saa7134_board_init2(struct saa7134_dev *dev)
break;
} /* switch() */
- /* initialize tuner */
- if (TUNER_ABSENT != dev->tuner_type) {
+ /* initialize tuner (don't do this when resuming) */
+ if (!dev->insuspend && TUNER_ABSENT != dev->tuner_type) {
int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
/* Note: radio tuner address is always filled in,
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index e5cfb6cfa18d..bb11443ed63e 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -327,7 +327,7 @@ static void buffer_queue(struct vb2_buffer *vb)
}
spin_unlock(&vip->lock);
}
-static int buffer_finish(struct vb2_buffer *vb)
+static void buffer_finish(struct vb2_buffer *vb)
{
struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
struct vip_buffer *vip_buf = to_vip_buffer(vb);
@@ -337,9 +337,8 @@ static int buffer_finish(struct vb2_buffer *vb)
list_del_init(&vip_buf->list);
spin_unlock(&vip->lock);
- vip_active_buf_next(vip);
-
- return 0;
+ if (vb2_is_streaming(vb->vb2_queue))
+ vip_active_buf_next(vip);
}
static int start_streaming(struct vb2_queue *vq, unsigned int count)
diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c
index 6299d5dadb82..300bd3c94738 100644
--- a/drivers/media/pci/ttpci/av7110_hw.c
+++ b/drivers/media/pci/ttpci/av7110_hw.c
@@ -501,7 +501,7 @@ int av7110_fw_cmd(struct av7110 *av7110, int type, int com, int num, ...)
// dprintk(4, "%p\n", av7110);
- if (2 + num > sizeof(buf)) {
+ if (2 + num > ARRAY_SIZE(buf)) {
printk(KERN_WARNING
"%s: %s len=%d is too big!\n",
KBUILD_MODNAME, __func__, num);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index b2a4403940c5..c137abfa0c54 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -36,7 +36,7 @@ source "drivers/media/platform/blackfin/Kconfig"
config VIDEO_SH_VOU
tristate "SuperH VOU video output driver"
depends on MEDIA_CAMERA_SUPPORT
- depends on VIDEO_DEV && I2C
+ depends on VIDEO_DEV && I2C && HAS_DMA
depends on ARCH_SHMOBILE || COMPILE_TEST
select VIDEOBUF_DMA_CONTIG
help
diff --git a/drivers/media/platform/arv.c b/drivers/media/platform/arv.c
index e346d32d08ce..e9410e41ae0c 100644
--- a/drivers/media/platform/arv.c
+++ b/drivers/media/platform/arv.c
@@ -109,7 +109,7 @@ extern struct cpuinfo_m32r boot_cpu_data;
struct ar {
struct v4l2_device v4l2_dev;
struct video_device vdev;
- unsigned int start_capture; /* duaring capture in INT. mode. */
+ int start_capture; /* duaring capture in INT. mode. */
#if USE_INT
unsigned char *line_buff; /* DMA line buffer */
#endif
@@ -307,11 +307,11 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
/*
* Okay, kick AR LSI to invoke an interrupt
*/
- ar->start_capture = 0;
+ ar->start_capture = -1;
ar_outl(arvcr1 | ARVCR1_HIEN, ARVCR1);
local_irq_restore(flags);
/* .... AR interrupts .... */
- interruptible_sleep_on(&ar->wait);
+ wait_event_interruptible(ar->wait, ar->start_capture == 0);
if (signal_pending(current)) {
printk(KERN_ERR "arv: interrupted while get frame data.\n");
ret = -EINTR;
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 281916591437..200bec91182e 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -997,7 +997,7 @@ static int bcap_probe(struct platform_device *pdev)
q->buf_struct_size = sizeof(struct bcap_buffer);
q->ops = &bcap_video_qops;
q->mem_ops = &vb2_dma_contig_memops;
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(q);
if (ret)
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index 61f3dbcc259f..3e5199ee5d25 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -2428,7 +2428,7 @@ static int coda_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &coda_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
- src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -2440,7 +2440,7 @@ static int coda_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &coda_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
- dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
return vb2_queue_init(dst_vq);
}
@@ -2829,6 +2829,9 @@ static void coda_finish_encode(struct coda_ctx *ctx)
}
dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->v4l2_buf.flags |=
+ src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index b02aba488826..b4f12d00be05 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -341,14 +341,8 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vpbe_fh *fh = vb2_get_drv_priv(vq);
struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
int ret;
- /* If buffer queue is empty, return error */
- if (list_empty(&layer->dma_queue)) {
- v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
- return -ENOBUFS;
- }
/* Get the next frame from the buffer queue */
layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
struct vpbe_disp_buffer, list);
@@ -1415,7 +1409,8 @@ static int vpbe_display_reqbufs(struct file *file, void *priv,
q->ops = &video_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct vpbe_disp_buffer);
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
ret = vb2_queue_init(q);
if (ret) {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 735ec47601a9..756da78bac23 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -272,13 +272,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
unsigned long flags;
int ret;
- /* If buffer queue is empty, return error */
spin_lock_irqsave(&common->irqlock, flags);
- if (list_empty(&common->dma_queue)) {
- spin_unlock_irqrestore(&common->irqlock, flags);
- vpif_dbg(1, debug, "buffer queue is empty\n");
- return -ENOBUFS;
- }
/* Get the next frame from the buffer queue */
common->cur_frm = common->next_frm = list_entry(common->dma_queue.next,
@@ -1023,7 +1017,8 @@ static int vpif_reqbufs(struct file *file, void *priv,
q->ops = &video_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct vpif_cap_buffer);
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
ret = vb2_queue_init(q);
if (ret) {
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 9d115cdc6bdb..0ac841e35aa4 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -234,13 +234,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
unsigned long flags;
int ret;
- /* If buffer queue is empty, return error */
spin_lock_irqsave(&common->irqlock, flags);
- if (list_empty(&common->dma_queue)) {
- spin_unlock_irqrestore(&common->irqlock, flags);
- vpif_err("buffer queue is empty\n");
- return -ENOBUFS;
- }
/* Get the next frame from the buffer queue */
common->next_frm = common->cur_frm =
@@ -983,7 +977,8 @@ static int vpif_reqbufs(struct file *file, void *priv,
q->ops = &video_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct vpif_disp_buffer);
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
ret = vb2_queue_init(q);
if (ret) {
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 810c3e13970c..d0ea94f58d6f 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -88,8 +88,12 @@ void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
if (src_vb && dst_vb) {
- src_vb->v4l2_buf.timestamp = dst_vb->v4l2_buf.timestamp;
- src_vb->v4l2_buf.timecode = dst_vb->v4l2_buf.timecode;
+ dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+ dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
+ dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->v4l2_buf.flags |=
+ src_vb->v4l2_buf.flags
+ & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
v4l2_m2m_buf_done(src_vb, vb_state);
v4l2_m2m_buf_done(dst_vb, vb_state);
@@ -590,7 +594,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &gsc_m2m_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -603,7 +607,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->ops = &gsc_m2m_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
return vb2_queue_init(dst_vq);
}
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index 01ed1ecdff7e..e1b2ceba00c1 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -64,4 +64,13 @@ config VIDEO_EXYNOS4_FIMC_IS
To compile this driver as a module, choose M here: the
module will be called exynos4-fimc-is.
+config VIDEO_EXYNOS4_ISP_DMA_CAPTURE
+ bool "EXYNOS4x12 FIMC-IS ISP Direct DMA capture support"
+ depends on VIDEO_EXYNOS4_FIMC_IS
+ select VIDEO_EXYNOS4_IS_COMMON
+ default y
+ help
+ This option enables an additional video device node exposing a V4L2
+ video capture interface for the FIMC-IS ISP raw (Bayer) capture DMA.
+
endif # VIDEO_SAMSUNG_EXYNOS4_IS
diff --git a/drivers/media/platform/exynos4-is/Makefile b/drivers/media/platform/exynos4-is/Makefile
index c2ff29ba6856..eed1b185d813 100644
--- a/drivers/media/platform/exynos4-is/Makefile
+++ b/drivers/media/platform/exynos4-is/Makefile
@@ -6,6 +6,10 @@ exynos4-is-common-objs := common.o
exynos-fimc-is-objs := fimc-is.o fimc-isp.o fimc-is-sensor.o fimc-is-regs.o
exynos-fimc-is-objs += fimc-is-param.o fimc-is-errno.o fimc-is-i2c.o
+ifeq ($(CONFIG_VIDEO_EXYNOS4_ISP_DMA_CAPTURE),y)
+exynos-fimc-is-objs += fimc-isp-video.o
+endif
+
obj-$(CONFIG_VIDEO_S5P_MIPI_CSIS) += s5p-csis.o
obj-$(CONFIG_VIDEO_EXYNOS_FIMC_LITE) += exynos-fimc-lite.o
obj-$(CONFIG_VIDEO_EXYNOS4_FIMC_IS) += exynos-fimc-is.o
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 8a712ca91d11..92ae812abce2 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -1782,7 +1782,7 @@ static int fimc_register_capture_device(struct fimc_dev *fimc,
q->ops = &fimc_capture_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct fimc_vid_buffer);
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &fimc->lock;
ret = vb2_queue_init(q);
diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.c b/drivers/media/platform/exynos4-is/fimc-is-param.c
index 9bf3ddd9e028..bf1465d1bf6d 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-param.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-param.c
@@ -56,7 +56,7 @@ static void __fimc_is_hw_update_param_sensor_framerate(struct fimc_is *is)
__hw_param_copy(dst, src);
}
-static int __fimc_is_hw_update_param(struct fimc_is *is, u32 offset)
+int __fimc_is_hw_update_param(struct fimc_is *is, u32 offset)
{
struct is_param_region *par = &is->is_p_region->parameter;
struct chain_config *cfg = &is->config[is->config_index];
diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.h b/drivers/media/platform/exynos4-is/fimc-is-param.h
index f9358c27ae2d..8e31f7642776 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-param.h
+++ b/drivers/media/platform/exynos4-is/fimc-is-param.h
@@ -911,6 +911,10 @@ struct is_region {
u32 shared[MAX_SHARED_COUNT];
} __packed;
+/* Offset to the ISP DMA2 output buffer address array. */
+#define DMA2_OUTPUT_ADDR_ARRAY_OFFS \
+ (offsetof(struct is_region, shared) + 32 * sizeof(u32))
+
struct is_debug_frame_descriptor {
u32 sensor_frame_time;
u32 sensor_exposure_time;
@@ -988,6 +992,7 @@ struct sensor_open_extended {
struct fimc_is;
int fimc_is_hw_get_sensor_max_framerate(struct fimc_is *is);
+int __fimc_is_hw_update_param(struct fimc_is *is, u32 offset);
void fimc_is_set_initial_params(struct fimc_is *is);
unsigned int __get_pending_param_count(struct fimc_is *is);
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.c b/drivers/media/platform/exynos4-is/fimc-is-regs.c
index 2628733c4e10..cfe4406a83ff 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.c
@@ -105,6 +105,20 @@ int fimc_is_hw_get_params(struct fimc_is *is, unsigned int num_args)
return 0;
}
+void fimc_is_hw_set_isp_buf_mask(struct fimc_is *is, unsigned int mask)
+{
+ if (hweight32(mask) == 1) {
+ dev_err(&is->pdev->dev, "%s(): not enough buffers (mask %#x)\n",
+ __func__, mask);
+ return;
+ }
+
+ if (mcuctl_read(is, MCUCTL_REG_ISSR(23)) != 0)
+ dev_dbg(&is->pdev->dev, "non-zero DMA buffer mask\n");
+
+ mcuctl_write(mask, is, MCUCTL_REG_ISSR(23));
+}
+
void fimc_is_hw_set_sensor_num(struct fimc_is *is)
{
pr_debug("setting sensor index to: %d\n", is->sensor_index);
@@ -112,7 +126,7 @@ void fimc_is_hw_set_sensor_num(struct fimc_is *is)
mcuctl_write(IH_REPLY_DONE, is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
mcuctl_write(IHC_GET_SENSOR_NUM, is, MCUCTL_REG_ISSR(2));
- mcuctl_write(FIMC_IS_SENSOR_NUM, is, MCUCTL_REG_ISSR(3));
+ mcuctl_write(FIMC_IS_SENSORS_NUM, is, MCUCTL_REG_ISSR(3));
}
void fimc_is_hw_close_sensor(struct fimc_is *is, unsigned int index)
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.h b/drivers/media/platform/exynos4-is/fimc-is-regs.h
index 1d9d4ffc6ad5..141e5ddadbeb 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.h
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.h
@@ -147,6 +147,7 @@ int fimc_is_hw_get_params(struct fimc_is *is, unsigned int num);
void fimc_is_hw_set_intgr0_gd0(struct fimc_is *is);
int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is *is);
void fimc_is_hw_set_sensor_num(struct fimc_is *is);
+void fimc_is_hw_set_isp_buf_mask(struct fimc_is *is, unsigned int mask);
void fimc_is_hw_stream_on(struct fimc_is *is);
void fimc_is_hw_stream_off(struct fimc_is *is);
int fimc_is_hw_set_param(struct fimc_is *is);
diff --git a/drivers/media/platform/exynos4-is/fimc-is-sensor.c b/drivers/media/platform/exynos4-is/fimc-is-sensor.c
index 6647421e5d3a..10e82e21b5d1 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-sensor.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-sensor.c
@@ -2,276 +2,21 @@
* Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
- *
* Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_gpio.h>
-#include <linux/pm_runtime.h>
-#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
-#include <media/v4l2-subdev.h>
-#include "fimc-is.h"
#include "fimc-is-sensor.h"
-#define DRIVER_NAME "FIMC-IS-SENSOR"
-
-static const char * const sensor_supply_names[] = {
- "svdda",
- "svddio",
-};
-
-static const struct v4l2_mbus_framefmt fimc_is_sensor_formats[] = {
- {
- .code = V4L2_MBUS_FMT_SGRBG10_1X10,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .field = V4L2_FIELD_NONE,
- }
-};
-
-static const struct v4l2_mbus_framefmt *find_sensor_format(
- struct v4l2_mbus_framefmt *mf)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(fimc_is_sensor_formats); i++)
- if (mf->code == fimc_is_sensor_formats[i].code)
- return &fimc_is_sensor_formats[i];
-
- return &fimc_is_sensor_formats[0];
-}
-
-static int fimc_is_sensor_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->index >= ARRAY_SIZE(fimc_is_sensor_formats))
- return -EINVAL;
-
- code->code = fimc_is_sensor_formats[code->index].code;
- return 0;
-}
-
-static void fimc_is_sensor_try_format(struct fimc_is_sensor *sensor,
- struct v4l2_mbus_framefmt *mf)
-{
- const struct sensor_drv_data *dd = sensor->drvdata;
- const struct v4l2_mbus_framefmt *fmt;
-
- fmt = find_sensor_format(mf);
- mf->code = fmt->code;
- v4l_bound_align_image(&mf->width, 16 + 8, dd->width, 0,
- &mf->height, 12 + 8, dd->height, 0, 0);
-}
-
-static struct v4l2_mbus_framefmt *__fimc_is_sensor_get_format(
- struct fimc_is_sensor *sensor, struct v4l2_subdev_fh *fh,
- u32 pad, enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return fh ? v4l2_subdev_get_try_format(fh, pad) : NULL;
-
- return &sensor->format;
-}
-
-static int fimc_is_sensor_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_format *fmt)
-{
- struct fimc_is_sensor *sensor = sd_to_fimc_is_sensor(sd);
- struct v4l2_mbus_framefmt *mf;
-
- fimc_is_sensor_try_format(sensor, &fmt->format);
-
- mf = __fimc_is_sensor_get_format(sensor, fh, fmt->pad, fmt->which);
- if (mf) {
- mutex_lock(&sensor->lock);
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- *mf = fmt->format;
- mutex_unlock(&sensor->lock);
- }
- return 0;
-}
-
-static int fimc_is_sensor_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_format *fmt)
-{
- struct fimc_is_sensor *sensor = sd_to_fimc_is_sensor(sd);
- struct v4l2_mbus_framefmt *mf;
-
- mf = __fimc_is_sensor_get_format(sensor, fh, fmt->pad, fmt->which);
-
- mutex_lock(&sensor->lock);
- fmt->format = *mf;
- mutex_unlock(&sensor->lock);
- return 0;
-}
-
-static struct v4l2_subdev_pad_ops fimc_is_sensor_pad_ops = {
- .enum_mbus_code = fimc_is_sensor_enum_mbus_code,
- .get_fmt = fimc_is_sensor_get_fmt,
- .set_fmt = fimc_is_sensor_set_fmt,
-};
-
-static int fimc_is_sensor_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0);
-
- *format = fimc_is_sensor_formats[0];
- format->width = FIMC_IS_SENSOR_DEF_PIX_WIDTH;
- format->height = FIMC_IS_SENSOR_DEF_PIX_HEIGHT;
-
- return 0;
-}
-
-static const struct v4l2_subdev_internal_ops fimc_is_sensor_sd_internal_ops = {
- .open = fimc_is_sensor_open,
-};
-
-static int fimc_is_sensor_s_power(struct v4l2_subdev *sd, int on)
-{
- struct fimc_is_sensor *sensor = sd_to_fimc_is_sensor(sd);
- int gpio = sensor->gpio_reset;
- int ret;
-
- if (on) {
- ret = pm_runtime_get(sensor->dev);
- if (ret < 0)
- return ret;
-
- ret = regulator_bulk_enable(SENSOR_NUM_SUPPLIES,
- sensor->supplies);
- if (ret < 0) {
- pm_runtime_put(sensor->dev);
- return ret;
- }
- if (gpio_is_valid(gpio)) {
- gpio_set_value(gpio, 1);
- usleep_range(600, 800);
- gpio_set_value(gpio, 0);
- usleep_range(10000, 11000);
- gpio_set_value(gpio, 1);
- }
-
- /* A delay needed for the sensor initialization. */
- msleep(20);
- } else {
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, 0);
-
- ret = regulator_bulk_disable(SENSOR_NUM_SUPPLIES,
- sensor->supplies);
- if (!ret)
- pm_runtime_put(sensor->dev);
- }
-
- pr_info("%s:%d: on: %d, ret: %d\n", __func__, __LINE__, on, ret);
-
- return ret;
-}
-
-static struct v4l2_subdev_core_ops fimc_is_sensor_core_ops = {
- .s_power = fimc_is_sensor_s_power,
-};
-
-static struct v4l2_subdev_ops fimc_is_sensor_subdev_ops = {
- .core = &fimc_is_sensor_core_ops,
- .pad = &fimc_is_sensor_pad_ops,
-};
-
-static const struct of_device_id fimc_is_sensor_of_match[];
-
-static int fimc_is_sensor_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct device *dev = &client->dev;
- struct fimc_is_sensor *sensor;
- const struct of_device_id *of_id;
- struct v4l2_subdev *sd;
- int gpio, i, ret;
-
- sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
- if (!sensor)
- return -ENOMEM;
-
- mutex_init(&sensor->lock);
- sensor->gpio_reset = -EINVAL;
-
- gpio = of_get_gpio_flags(dev->of_node, 0, NULL);
- if (gpio_is_valid(gpio)) {
- ret = devm_gpio_request_one(dev, gpio, GPIOF_OUT_INIT_LOW,
- DRIVER_NAME);
- if (ret < 0)
- return ret;
- }
- sensor->gpio_reset = gpio;
-
- for (i = 0; i < SENSOR_NUM_SUPPLIES; i++)
- sensor->supplies[i].supply = sensor_supply_names[i];
-
- ret = devm_regulator_bulk_get(&client->dev, SENSOR_NUM_SUPPLIES,
- sensor->supplies);
- if (ret < 0)
- return ret;
-
- of_id = of_match_node(fimc_is_sensor_of_match, dev->of_node);
- if (!of_id)
- return -ENODEV;
-
- sensor->drvdata = of_id->data;
- sensor->dev = dev;
-
- sd = &sensor->subdev;
- v4l2_i2c_subdev_init(sd, client, &fimc_is_sensor_subdev_ops);
- snprintf(sd->name, sizeof(sd->name), sensor->drvdata->subdev_name);
- sensor->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- sensor->format.code = fimc_is_sensor_formats[0].code;
- sensor->format.width = FIMC_IS_SENSOR_DEF_PIX_WIDTH;
- sensor->format.height = FIMC_IS_SENSOR_DEF_PIX_HEIGHT;
-
- sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
- ret = media_entity_init(&sd->entity, 1, &sensor->pad, 0);
- if (ret < 0)
- return ret;
-
- pm_runtime_no_callbacks(dev);
- pm_runtime_enable(dev);
-
- return ret;
-}
-
-static int fimc_is_sensor_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- media_entity_cleanup(&sd->entity);
- return 0;
-}
-
-static const struct i2c_device_id fimc_is_sensor_ids[] = {
- { }
-};
-
static const struct sensor_drv_data s5k6a3_drvdata = {
.id = FIMC_IS_SENSOR_ID_S5K6A3,
- .subdev_name = "S5K6A3",
- .width = S5K6A3_SENSOR_WIDTH,
- .height = S5K6A3_SENSOR_HEIGHT,
+ .open_timeout = S5K6A3_OPEN_TIMEOUT,
};
-static const struct of_device_id fimc_is_sensor_of_match[] = {
+static const struct of_device_id fimc_is_sensor_of_ids[] = {
{
.compatible = "samsung,s5k6a3",
.data = &s5k6a3_drvdata,
@@ -279,27 +24,11 @@ static const struct of_device_id fimc_is_sensor_of_match[] = {
{ }
};
-static struct i2c_driver fimc_is_sensor_driver = {
- .driver = {
- .of_match_table = fimc_is_sensor_of_match,
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- },
- .probe = fimc_is_sensor_probe,
- .remove = fimc_is_sensor_remove,
- .id_table = fimc_is_sensor_ids,
-};
-
-int fimc_is_register_sensor_driver(void)
+const struct sensor_drv_data *fimc_is_sensor_get_drvdata(
+ struct device_node *node)
{
- return i2c_add_driver(&fimc_is_sensor_driver);
-}
+ const struct of_device_id *of_id;
-void fimc_is_unregister_sensor_driver(void)
-{
- i2c_del_driver(&fimc_is_sensor_driver);
+ of_id = of_match_node(fimc_is_sensor_of_ids, node);
+ return of_id ? of_id->data : NULL;
}
-
-MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
-MODULE_DESCRIPTION("Exynos4x12 FIMC-IS image sensor subdev driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/exynos4-is/fimc-is-sensor.h b/drivers/media/platform/exynos4-is/fimc-is-sensor.h
index 6036d49a6c68..173ccffa4bcd 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-sensor.h
+++ b/drivers/media/platform/exynos4-is/fimc-is-sensor.h
@@ -13,24 +13,13 @@
#ifndef FIMC_IS_SENSOR_H_
#define FIMC_IS_SENSOR_H_
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-subdev.h>
-
-#define FIMC_IS_SENSOR_OPEN_TIMEOUT 2000 /* ms */
-
-#define FIMC_IS_SENSOR_DEF_PIX_WIDTH 1296
-#define FIMC_IS_SENSOR_DEF_PIX_HEIGHT 732
+#include <linux/of.h>
+#include <linux/types.h>
+#define S5K6A3_OPEN_TIMEOUT 2000 /* ms */
#define S5K6A3_SENSOR_WIDTH 1392
#define S5K6A3_SENSOR_HEIGHT 1392
-#define SENSOR_NUM_SUPPLIES 2
-
enum fimc_is_sensor_id {
FIMC_IS_SENSOR_ID_S5K3H2 = 1,
FIMC_IS_SENSOR_ID_S5K6A3,
@@ -45,45 +34,23 @@ enum fimc_is_sensor_id {
struct sensor_drv_data {
enum fimc_is_sensor_id id;
- const char * const subdev_name;
- unsigned int width;
- unsigned int height;
+ /* sensor open timeout in ms */
+ unsigned short open_timeout;
};
/**
* struct fimc_is_sensor - fimc-is sensor data structure
- * @dev: pointer to this I2C client device structure
- * @subdev: the image sensor's v4l2 subdev
- * @pad: subdev media source pad
- * @supplies: image sensor's voltage regulator supplies
- * @gpio_reset: GPIO connected to the sensor's reset pin
* @drvdata: a pointer to the sensor's parameters data structure
* @i2c_bus: ISP I2C bus index (0...1)
* @test_pattern: true to enable video test pattern
- * @lock: mutex protecting the structure's members below
- * @format: media bus format at the sensor's source pad
*/
struct fimc_is_sensor {
- struct device *dev;
- struct v4l2_subdev subdev;
- struct media_pad pad;
- struct regulator_bulk_data supplies[SENSOR_NUM_SUPPLIES];
- int gpio_reset;
const struct sensor_drv_data *drvdata;
unsigned int i2c_bus;
- bool test_pattern;
-
- struct mutex lock;
- struct v4l2_mbus_framefmt format;
+ u8 test_pattern;
};
-static inline
-struct fimc_is_sensor *sd_to_fimc_is_sensor(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct fimc_is_sensor, subdev);
-}
-
-int fimc_is_register_sensor_driver(void);
-void fimc_is_unregister_sensor_driver(void);
+const struct sensor_drv_data *fimc_is_sensor_get_drvdata(
+ struct device_node *node);
#endif /* FIMC_IS_SENSOR_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 13a4228952e3..128b73b6cce2 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -24,13 +24,13 @@
#include <linux/i2c.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/videodev2.h>
-#include <media/v4l2-of.h>
#include <media/videobuf2-dma-contig.h>
#include "media-dev.h"
@@ -161,78 +161,69 @@ static void fimc_is_disable_clocks(struct fimc_is *is)
}
}
-static int fimc_is_parse_sensor_config(struct fimc_is_sensor *sensor,
- struct device_node *np)
+static int fimc_is_parse_sensor_config(struct fimc_is *is, unsigned int index,
+ struct device_node *node)
{
+ struct fimc_is_sensor *sensor = &is->sensor[index];
u32 tmp = 0;
int ret;
- np = v4l2_of_get_next_endpoint(np, NULL);
- if (!np)
+ sensor->drvdata = fimc_is_sensor_get_drvdata(node);
+ if (!sensor->drvdata) {
+ dev_err(&is->pdev->dev, "no driver data found for: %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ node = of_graph_get_next_endpoint(node, NULL);
+ if (!node)
return -ENXIO;
- np = v4l2_of_get_remote_port(np);
- if (!np)
+
+ node = of_graph_get_remote_port(node);
+ if (!node)
return -ENXIO;
/* Use MIPI-CSIS channel id to determine the ISP I2C bus index. */
- ret = of_property_read_u32(np, "reg", &tmp);
- sensor->i2c_bus = tmp - FIMC_INPUT_MIPI_CSI2_0;
+ ret = of_property_read_u32(node, "reg", &tmp);
+ if (ret < 0) {
+ dev_err(&is->pdev->dev, "reg property not found at: %s\n",
+ node->full_name);
+ return ret;
+ }
- return ret;
+ sensor->i2c_bus = tmp - FIMC_INPUT_MIPI_CSI2_0;
+ return 0;
}
static int fimc_is_register_subdevs(struct fimc_is *is)
{
- struct device_node *adapter, *child;
- int ret;
+ struct device_node *i2c_bus, *child;
+ int ret, index = 0;
ret = fimc_isp_subdev_create(&is->isp);
if (ret < 0)
return ret;
- for_each_compatible_node(adapter, NULL, FIMC_IS_I2C_COMPATIBLE) {
- if (!of_find_device_by_node(adapter)) {
- of_node_put(adapter);
- return -EPROBE_DEFER;
- }
-
- for_each_available_child_of_node(adapter, child) {
- struct i2c_client *client;
- struct v4l2_subdev *sd;
-
- client = of_find_i2c_device_by_node(child);
- if (!client)
- goto e_retry;
-
- sd = i2c_get_clientdata(client);
- if (!sd)
- goto e_retry;
+ /* Initialize memory allocator context for the ISP DMA. */
+ is->isp.alloc_ctx = is->alloc_ctx;
- /* FIXME: Add support for multiple sensors. */
- if (WARN_ON(is->sensor))
- continue;
+ for_each_compatible_node(i2c_bus, NULL, FIMC_IS_I2C_COMPATIBLE) {
+ for_each_available_child_of_node(i2c_bus, child) {
+ ret = fimc_is_parse_sensor_config(is, index, child);
- is->sensor = sd_to_fimc_is_sensor(sd);
-
- if (fimc_is_parse_sensor_config(is->sensor, child)) {
- dev_warn(&is->pdev->dev, "DT parse error: %s\n",
- child->full_name);
+ if (ret < 0 || index >= FIMC_IS_SENSORS_NUM) {
+ of_node_put(child);
+ return ret;
}
- pr_debug("%s(): registered subdev: %p\n",
- __func__, sd->name);
+ index++;
}
}
return 0;
-
-e_retry:
- of_node_put(child);
- return -EPROBE_DEFER;
}
static int fimc_is_unregister_subdevs(struct fimc_is *is)
{
fimc_isp_subdev_destroy(&is->isp);
- is->sensor = NULL;
return 0;
}
@@ -647,7 +638,7 @@ static int fimc_is_hw_open_sensor(struct fimc_is *is,
fimc_is_hw_set_intgr0_gd0(is);
return fimc_is_wait_event(is, IS_ST_OPEN_SENSOR, 1,
- FIMC_IS_SENSOR_OPEN_TIMEOUT);
+ sensor->drvdata->open_timeout);
}
@@ -661,8 +652,8 @@ int fimc_is_hw_initialize(struct fimc_is *is)
u32 prev_id;
int i, ret;
- /* Sensor initialization. */
- ret = fimc_is_hw_open_sensor(is, is->sensor);
+ /* Sensor initialization. Only one sensor is currently supported. */
+ ret = fimc_is_hw_open_sensor(is, &is->sensor[0]);
if (ret < 0)
return ret;
@@ -977,27 +968,20 @@ static int fimc_is_module_init(void)
{
int ret;
- ret = fimc_is_register_sensor_driver();
- if (ret < 0)
- return ret;
-
ret = fimc_is_register_i2c_driver();
if (ret < 0)
- goto err_sens;
+ return ret;
ret = platform_driver_register(&fimc_is_driver);
- if (!ret)
- return ret;
- fimc_is_unregister_i2c_driver();
-err_sens:
- fimc_is_unregister_sensor_driver();
+ if (ret < 0)
+ fimc_is_unregister_i2c_driver();
+
return ret;
}
static void fimc_is_module_exit(void)
{
- fimc_is_unregister_sensor_driver();
fimc_is_unregister_i2c_driver();
platform_driver_unregister(&fimc_is_driver);
}
diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/exynos4-is/fimc-is.h
index 61bb0127e19d..e0be691af2d3 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.h
+++ b/drivers/media/platform/exynos4-is/fimc-is.h
@@ -39,7 +39,7 @@
#define FIMC_IS_FW_LOAD_TIMEOUT 1000 /* ms */
#define FIMC_IS_POWER_ON_TIMEOUT 1000 /* us */
-#define FIMC_IS_SENSOR_NUM 2
+#define FIMC_IS_SENSORS_NUM 2
/* Memory definitions */
#define FIMC_IS_CPU_MEM_SIZE (0xa00000)
@@ -253,7 +253,7 @@ struct fimc_is {
struct firmware *f_w;
struct fimc_isp isp;
- struct fimc_is_sensor *sensor;
+ struct fimc_is_sensor sensor[FIMC_IS_SENSORS_NUM];
struct fimc_is_setfile setfile;
struct vb2_alloc_ctx *alloc_ctx;
@@ -292,6 +292,11 @@ static inline struct fimc_is *fimc_isp_to_is(struct fimc_isp *isp)
return container_of(isp, struct fimc_is, isp);
}
+static inline struct chain_config *__get_curr_is_config(struct fimc_is *is)
+{
+ return &is->config[is->config_index];
+}
+
static inline void fimc_is_mem_barrier(void)
{
mb();
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
new file mode 100644
index 000000000000..e92b4e115adb
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -0,0 +1,660 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * FIMC-IS ISP video input and video output DMA interface driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * The hardware handling code derived from a driver written by
+ * Younghwan Joo <yhwan.joo@samsung.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/printk.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/s5p_fimc.h>
+
+#include "common.h"
+#include "media-dev.h"
+#include "fimc-is.h"
+#include "fimc-isp-video.h"
+#include "fimc-is-param.h"
+
+static int isp_video_capture_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *pfmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *vid_fmt = &isp->video_capture.pixfmt;
+ const struct v4l2_pix_format_mplane *pixm = NULL;
+ const struct fimc_fmt *fmt;
+ unsigned int wh, i;
+
+ if (pfmt) {
+ pixm = &pfmt->fmt.pix_mp;
+ fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, -1);
+ wh = pixm->width * pixm->height;
+ } else {
+ fmt = isp->video_capture.format;
+ wh = vid_fmt->width * vid_fmt->height;
+ }
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ *num_buffers = clamp_t(u32, *num_buffers, FIMC_ISP_REQ_BUFS_MIN,
+ FIMC_ISP_REQ_BUFS_MAX);
+ *num_planes = fmt->memplanes;
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ unsigned int size = (wh * fmt->depth[i]) / 8;
+ if (pixm)
+ sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
+ else
+ sizes[i] = size;
+ allocators[i] = isp->alloc_ctx;
+ }
+
+ return 0;
+}
+
+static inline struct param_dma_output *__get_isp_dma2(struct fimc_is *is)
+{
+ return &__get_curr_is_config(is)->isp.dma2_output;
+}
+
+static int isp_video_capture_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(q);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct param_dma_output *dma = __get_isp_dma2(is);
+ struct fimc_is_video *video = &isp->video_capture;
+ int ret;
+
+ if (!test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state) ||
+ test_bit(ST_ISP_VID_CAP_STREAMING, &isp->state))
+ return 0;
+
+
+ dma->cmd = DMA_OUTPUT_COMMAND_ENABLE;
+ dma->notify_dma_done = DMA_OUTPUT_NOTIFY_DMA_DONE_ENABLE;
+ dma->buffer_address = is->is_dma_p_region +
+ DMA2_OUTPUT_ADDR_ARRAY_OFFS;
+ dma->buffer_number = video->reqbufs_count;
+ dma->dma_out_mask = video->buf_mask;
+
+ isp_dbg(2, &video->ve.vdev,
+ "buf_count: %d, planes: %d, dma addr table: %#x\n",
+ video->buf_count, video->format->memplanes,
+ dma->buffer_address);
+
+ fimc_is_mem_barrier();
+
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA2_OUTPUT);
+ __fimc_is_hw_update_param(is, PARAM_ISP_DMA2_OUTPUT);
+
+ ret = fimc_is_itf_s_param(is, false);
+ if (ret < 0)
+ return ret;
+
+ ret = fimc_pipeline_call(&video->ve, set_stream, 1);
+ if (ret < 0)
+ return ret;
+
+ set_bit(ST_ISP_VID_CAP_STREAMING, &isp->state);
+ return ret;
+}
+
+static int isp_video_capture_stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(q);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct param_dma_output *dma = __get_isp_dma2(is);
+ int ret;
+
+ ret = fimc_pipeline_call(&isp->video_capture.ve, set_stream, 0);
+ if (ret < 0)
+ return ret;
+
+ dma->cmd = DMA_OUTPUT_COMMAND_DISABLE;
+ dma->notify_dma_done = DMA_OUTPUT_NOTIFY_DMA_DONE_DISABLE;
+ dma->buffer_number = 0;
+ dma->buffer_address = 0;
+ dma->dma_out_mask = 0;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA2_OUTPUT);
+ __fimc_is_hw_update_param(is, PARAM_ISP_DMA2_OUTPUT);
+
+ ret = fimc_is_itf_s_param(is, false);
+ if (ret < 0)
+ dev_warn(&is->pdev->dev, "%s: DMA stop failed\n", __func__);
+
+ fimc_is_hw_set_isp_buf_mask(is, 0);
+
+ clear_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state);
+ clear_bit(ST_ISP_VID_CAP_STREAMING, &isp->state);
+
+ isp->video_capture.buf_count = 0;
+ return 0;
+}
+
+static int isp_video_capture_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_is_video *video = &isp->video_capture;
+ int i;
+
+ if (video->format == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < video->format->memplanes; i++) {
+ unsigned long size = video->pixfmt.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ v4l2_err(&video->ve.vdev,
+ "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ /* Check if we get one of the already known buffers. */
+ if (test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state)) {
+ dma_addr_t dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ int i;
+
+ for (i = 0; i < video->buf_count; i++)
+ if (video->buffers[i]->dma_addr[0] == dma_addr)
+ return 0;
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_is_video *video = &isp->video_capture;
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct isp_video_buf *ivb = to_isp_video_buf(vb);
+ unsigned long flags;
+ unsigned int i;
+
+ if (test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state)) {
+ spin_lock_irqsave(&is->slock, flags);
+ video->buf_mask |= BIT(ivb->index);
+ spin_unlock_irqrestore(&is->slock, flags);
+ } else {
+ unsigned int num_planes = video->format->memplanes;
+
+ ivb->index = video->buf_count;
+ video->buffers[ivb->index] = ivb;
+
+ for (i = 0; i < num_planes; i++) {
+ int buf_index = ivb->index * num_planes + i;
+
+ ivb->dma_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+ is->is_p_region->shared[32 + buf_index] =
+ ivb->dma_addr[i];
+
+ isp_dbg(2, &video->ve.vdev,
+ "dma_buf %d (%d/%d/%d) addr: %#x\n",
+ buf_index, ivb->index, i, vb->v4l2_buf.index,
+ ivb->dma_addr[i]);
+ }
+
+ if (++video->buf_count < video->reqbufs_count)
+ return;
+
+ video->buf_mask = (1UL << video->buf_count) - 1;
+ set_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state);
+ }
+
+ if (!test_bit(ST_ISP_VID_CAP_STREAMING, &isp->state))
+ isp_video_capture_start_streaming(vb->vb2_queue, 0);
+}
+
+/*
+ * FIMC-IS ISP input and output DMA interface interrupt handler.
+ * Locking: called with is->slock spinlock held.
+ */
+void fimc_isp_video_irq_handler(struct fimc_is *is)
+{
+ struct fimc_is_video *video = &is->isp.video_capture;
+ struct vb2_buffer *vb;
+ int buf_index;
+
+ /* TODO: Ensure the DMA is really stopped in stop_streaming callback */
+ if (!test_bit(ST_ISP_VID_CAP_STREAMING, &is->isp.state))
+ return;
+
+ buf_index = (is->i2h_cmd.args[1] - 1) % video->buf_count;
+ vb = &video->buffers[buf_index]->vb;
+
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+
+ video->buf_mask &= ~BIT(buf_index);
+ fimc_is_hw_set_isp_buf_mask(is, video->buf_mask);
+}
+
+static const struct vb2_ops isp_video_capture_qops = {
+ .queue_setup = isp_video_capture_queue_setup,
+ .buf_prepare = isp_video_capture_buffer_prepare,
+ .buf_queue = isp_video_capture_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = isp_video_capture_start_streaming,
+ .stop_streaming = isp_video_capture_stop_streaming,
+};
+
+static int isp_video_open(struct file *file)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct exynos_video_entity *ve = &isp->video_capture.ve;
+ struct media_entity *me = &ve->vdev.entity;
+ int ret;
+
+ if (mutex_lock_interruptible(&isp->video_lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ ret = pm_runtime_get_sync(&isp->pdev->dev);
+ if (ret < 0)
+ goto rel_fh;
+
+ if (v4l2_fh_is_singular_file(file)) {
+ mutex_lock(&me->parent->graph_mutex);
+
+ ret = fimc_pipeline_call(ve, open, me, true);
+
+ /* Mark the video pipeline as in use. */
+ if (ret == 0)
+ me->use_count++;
+
+ mutex_unlock(&me->parent->graph_mutex);
+ }
+ if (!ret)
+ goto unlock;
+rel_fh:
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&isp->video_lock);
+ return ret;
+}
+
+static int isp_video_release(struct file *file)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct fimc_is_video *ivc = &isp->video_capture;
+ struct media_entity *entity = &ivc->ve.vdev.entity;
+ struct media_device *mdev = entity->parent;
+ int ret = 0;
+
+ mutex_lock(&isp->video_lock);
+
+ if (v4l2_fh_is_singular_file(file) && ivc->streaming) {
+ media_entity_pipeline_stop(entity);
+ ivc->streaming = 0;
+ }
+
+ vb2_fop_release(file);
+
+ if (v4l2_fh_is_singular_file(file)) {
+ fimc_pipeline_call(&ivc->ve, close);
+
+ mutex_lock(&mdev->graph_mutex);
+ entity->use_count--;
+ mutex_unlock(&mdev->graph_mutex);
+ }
+
+ pm_runtime_put(&isp->pdev->dev);
+ mutex_unlock(&isp->video_lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations isp_video_fops = {
+ .owner = THIS_MODULE,
+ .open = isp_video_open,
+ .release = isp_video_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/*
+ * Video node ioctl operations
+ */
+static int isp_video_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+
+ __fimc_vidioc_querycap(&isp->pdev->dev, cap, V4L2_CAP_STREAMING);
+ return 0;
+}
+
+static int isp_video_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct fimc_fmt *fmt;
+
+ if (f->index >= FIMC_ISP_NUM_FORMATS)
+ return -EINVAL;
+
+ fmt = fimc_isp_find_format(NULL, NULL, f->index);
+ if (WARN_ON(fmt == NULL))
+ return -EINVAL;
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int isp_video_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+
+ f->fmt.pix_mp = isp->video_capture.pixfmt;
+ return 0;
+}
+
+static void __isp_video_try_fmt(struct fimc_isp *isp,
+ struct v4l2_pix_format_mplane *pixm,
+ const struct fimc_fmt **fmt)
+{
+ *fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+
+ pixm->colorspace = V4L2_COLORSPACE_SRGB;
+ pixm->field = V4L2_FIELD_NONE;
+ pixm->num_planes = (*fmt)->memplanes;
+ pixm->pixelformat = (*fmt)->fourcc;
+ /*
+ * TODO: double check with the docmentation these width/height
+ * constraints are correct.
+ */
+ v4l_bound_align_image(&pixm->width, FIMC_ISP_SOURCE_WIDTH_MIN,
+ FIMC_ISP_SOURCE_WIDTH_MAX, 3,
+ &pixm->height, FIMC_ISP_SOURCE_HEIGHT_MIN,
+ FIMC_ISP_SOURCE_HEIGHT_MAX, 0, 0);
+}
+
+static int isp_video_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+
+ __isp_video_try_fmt(isp, &f->fmt.pix_mp, NULL);
+ return 0;
+}
+
+static int isp_video_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ const struct fimc_fmt *ifmt = NULL;
+ struct param_dma_output *dma = __get_isp_dma2(is);
+
+ __isp_video_try_fmt(isp, pixm, &ifmt);
+
+ if (WARN_ON(ifmt == NULL))
+ return -EINVAL;
+
+ dma->format = DMA_OUTPUT_FORMAT_BAYER;
+ dma->order = DMA_OUTPUT_ORDER_GB_BG;
+ dma->plane = ifmt->memplanes;
+ dma->bitwidth = ifmt->depth[0];
+ dma->width = pixm->width;
+ dma->height = pixm->height;
+
+ fimc_is_mem_barrier();
+
+ isp->video_capture.format = ifmt;
+ isp->video_capture.pixfmt = *pixm;
+
+ return 0;
+}
+
+/*
+ * Check for source/sink format differences at each link.
+ * Return 0 if the formats match or -EPIPE otherwise.
+ */
+static int isp_video_pipeline_validate(struct fimc_isp *isp)
+{
+ struct v4l2_subdev *sd = &isp->subdev;
+ struct v4l2_subdev_format sink_fmt, src_fmt;
+ struct media_pad *pad;
+ int ret;
+
+ while (1) {
+ /* Retrieve format at the sink pad */
+ pad = &sd->entity.pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+ sink_fmt.pad = pad->index;
+ sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ /* Retrieve format at the source pad */
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ src_fmt.pad = pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != sink_fmt.format.width ||
+ src_fmt.format.height != sink_fmt.format.height ||
+ src_fmt.format.code != sink_fmt.format.code)
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+static int isp_video_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct exynos_video_entity *ve = &isp->video_capture.ve;
+ struct media_entity *me = &ve->vdev.entity;
+ int ret;
+
+ ret = media_entity_pipeline_start(me, &ve->pipe->mp);
+ if (ret < 0)
+ return ret;
+
+ ret = isp_video_pipeline_validate(isp);
+ if (ret < 0)
+ goto p_stop;
+
+ ret = vb2_ioctl_streamon(file, priv, type);
+ if (ret < 0)
+ goto p_stop;
+
+ isp->video_capture.streaming = 1;
+ return 0;
+p_stop:
+ media_entity_pipeline_stop(me);
+ return ret;
+}
+
+static int isp_video_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct fimc_is_video *video = &isp->video_capture;
+ int ret;
+
+ ret = vb2_ioctl_streamoff(file, priv, type);
+ if (ret < 0)
+ return ret;
+
+ media_entity_pipeline_stop(&video->ve.vdev.entity);
+ video->streaming = 0;
+ return 0;
+}
+
+static int isp_video_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ int ret;
+
+ ret = vb2_ioctl_reqbufs(file, priv, rb);
+ if (ret < 0)
+ return ret;
+
+ if (rb->count && rb->count < FIMC_ISP_REQ_BUFS_MIN) {
+ rb->count = 0;
+ vb2_ioctl_reqbufs(file, priv, rb);
+ ret = -ENOMEM;
+ }
+
+ isp->video_capture.reqbufs_count = rb->count;
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
+ .vidioc_querycap = isp_video_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = isp_video_enum_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = isp_video_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = isp_video_s_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = isp_video_g_fmt_mplane,
+ .vidioc_reqbufs = isp_video_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = isp_video_streamon,
+ .vidioc_streamoff = isp_video_streamoff,
+};
+
+int fimc_isp_video_device_register(struct fimc_isp *isp,
+ struct v4l2_device *v4l2_dev,
+ enum v4l2_buf_type type)
+{
+ struct vb2_queue *q = &isp->video_capture.vb_queue;
+ struct fimc_is_video *iv;
+ struct video_device *vdev;
+ int ret;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ iv = &isp->video_capture;
+ else
+ return -ENOSYS;
+
+ mutex_init(&isp->video_lock);
+ INIT_LIST_HEAD(&iv->pending_buf_q);
+ INIT_LIST_HEAD(&iv->active_buf_q);
+ iv->format = fimc_isp_find_format(NULL, NULL, 0);
+ iv->pixfmt.width = IS_DEFAULT_WIDTH;
+ iv->pixfmt.height = IS_DEFAULT_HEIGHT;
+ iv->pixfmt.pixelformat = iv->format->fourcc;
+ iv->pixfmt.colorspace = V4L2_COLORSPACE_SRGB;
+ iv->reqbufs_count = 0;
+
+ memset(q, 0, sizeof(*q));
+ q->type = type;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = &isp_video_capture_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct isp_video_buf);
+ q->drv_priv = isp;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &isp->video_lock;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0)
+ return ret;
+
+ vdev = &iv->ve.vdev;
+ memset(vdev, 0, sizeof(*vdev));
+ snprintf(vdev->name, sizeof(vdev->name), "fimc-is-isp.%s",
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ?
+ "capture" : "output");
+ vdev->queue = q;
+ vdev->fops = &isp_video_fops;
+ vdev->ioctl_ops = &isp_video_ioctl_ops;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->minor = -1;
+ vdev->release = video_device_release_empty;
+ vdev->lock = &isp->video_lock;
+
+ iv->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&vdev->entity, 1, &iv->pad, 0);
+ if (ret < 0)
+ return ret;
+
+ video_set_drvdata(vdev, isp);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ media_entity_cleanup(&vdev->entity);
+ return ret;
+ }
+
+ v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
+
+ return 0;
+}
+
+void fimc_isp_video_device_unregister(struct fimc_isp *isp,
+ enum v4l2_buf_type type)
+{
+ struct exynos_video_entity *ve;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ve = &isp->video_capture.ve;
+ else
+ return;
+
+ mutex_lock(&isp->video_lock);
+
+ if (video_is_registered(&ve->vdev)) {
+ video_unregister_device(&ve->vdev);
+ media_entity_cleanup(&ve->vdev.entity);
+ ve->pipe = NULL;
+ }
+
+ mutex_unlock(&isp->video_lock);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.h b/drivers/media/platform/exynos4-is/fimc-isp-video.h
new file mode 100644
index 000000000000..98c662654bb6
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.h
@@ -0,0 +1,44 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_ISP_VIDEO__
+#define FIMC_ISP_VIDEO__
+
+#include <media/videobuf2-core.h>
+#include "fimc-isp.h"
+
+#ifdef CONFIG_VIDEO_EXYNOS4_ISP_DMA_CAPTURE
+int fimc_isp_video_device_register(struct fimc_isp *isp,
+ struct v4l2_device *v4l2_dev,
+ enum v4l2_buf_type type);
+
+void fimc_isp_video_device_unregister(struct fimc_isp *isp,
+ enum v4l2_buf_type type);
+
+void fimc_isp_video_irq_handler(struct fimc_is *is);
+#else
+static inline void fimc_isp_video_irq_handler(struct fimc_is *is)
+{
+}
+
+static inline int fimc_isp_video_device_register(struct fimc_isp *isp,
+ struct v4l2_device *v4l2_dev,
+ enum v4l2_buf_type type)
+{
+ return 0;
+}
+
+void fimc_isp_video_device_unregister(struct fimc_isp *isp,
+ enum v4l2_buf_type type)
+{
+}
+#endif /* !CONFIG_VIDEO_EXYNOS4_ISP_DMA_CAPTURE */
+
+#endif /* FIMC_ISP_VIDEO__ */
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index f3c6136aa5b4..be62d6b9ac48 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -25,6 +25,7 @@
#include <media/v4l2-device.h>
#include "media-dev.h"
+#include "fimc-isp-video.h"
#include "fimc-is-command.h"
#include "fimc-is-param.h"
#include "fimc-is-regs.h"
@@ -93,8 +94,8 @@ void fimc_isp_irq_handler(struct fimc_is *is)
is->i2h_cmd.args[1] = mcuctl_read(is, MCUCTL_REG_ISSR(21));
fimc_is_fw_clear_irq1(is, FIMC_IS_INT_FRAME_DONE_ISP);
+ fimc_isp_video_irq_handler(is);
- /* TODO: Complete ISP DMA interrupt handler */
wake_up(&is->irq_queue);
}
@@ -388,7 +389,33 @@ static int fimc_isp_subdev_open(struct v4l2_subdev *sd,
return 0;
}
+static int fimc_isp_subdev_registered(struct v4l2_subdev *sd)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+ int ret;
+
+ /* Use pipeline object allocated by the media device. */
+ isp->video_capture.ve.pipe = v4l2_get_subdev_hostdata(sd);
+
+ ret = fimc_isp_video_device_register(isp, sd->v4l2_dev,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret < 0)
+ isp->video_capture.ve.pipe = NULL;
+
+ return ret;
+}
+
+static void fimc_isp_subdev_unregistered(struct v4l2_subdev *sd)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+
+ fimc_isp_video_device_unregister(isp,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+}
+
static const struct v4l2_subdev_internal_ops fimc_is_subdev_internal_ops = {
+ .registered = fimc_isp_subdev_registered,
+ .unregistered = fimc_isp_subdev_unregistered,
.open = fimc_isp_subdev_open,
};
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.h b/drivers/media/platform/exynos4-is/fimc-isp.h
index 03bf95ab017b..4dc55a18d978 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.h
+++ b/drivers/media/platform/exynos4-is/fimc-isp.h
@@ -35,17 +35,18 @@ extern int fimc_isp_debug;
#define FIMC_ISP_SINK_WIDTH_MIN (16 + 8)
#define FIMC_ISP_SINK_HEIGHT_MIN (12 + 8)
#define FIMC_ISP_SOURCE_WIDTH_MIN 8
-#define FIMC_ISP_SOURC_HEIGHT_MIN 8
+#define FIMC_ISP_SOURCE_HEIGHT_MIN 8
#define FIMC_ISP_CAC_MARGIN_WIDTH 16
#define FIMC_ISP_CAC_MARGIN_HEIGHT 12
#define FIMC_ISP_SINK_WIDTH_MAX (4000 - 16)
#define FIMC_ISP_SINK_HEIGHT_MAX (4000 + 12)
#define FIMC_ISP_SOURCE_WIDTH_MAX 4000
-#define FIMC_ISP_SOURC_HEIGHT_MAX 4000
+#define FIMC_ISP_SOURCE_HEIGHT_MAX 4000
#define FIMC_ISP_NUM_FORMATS 3
#define FIMC_ISP_REQ_BUFS_MIN 2
+#define FIMC_ISP_REQ_BUFS_MAX 32
#define FIMC_ISP_SD_PAD_SINK 0
#define FIMC_ISP_SD_PAD_SRC_FIFO 1
@@ -100,6 +101,16 @@ struct fimc_isp_ctrls {
struct v4l2_ctrl *colorfx;
};
+struct isp_video_buf {
+ struct vb2_buffer vb;
+ dma_addr_t dma_addr[FIMC_ISP_MAX_PLANES];
+ unsigned int index;
+};
+
+#define to_isp_video_buf(_b) container_of(_b, struct isp_video_buf, vb)
+
+#define FIMC_ISP_MAX_BUFS 4
+
/**
* struct fimc_is_video - fimc-is video device structure
* @vdev: video_device structure
@@ -114,18 +125,26 @@ struct fimc_isp_ctrls {
* @format: current pixel format
*/
struct fimc_is_video {
- struct video_device vdev;
+ struct exynos_video_entity ve;
enum v4l2_buf_type type;
struct media_pad pad;
struct list_head pending_buf_q;
struct list_head active_buf_q;
struct vb2_queue vb_queue;
- unsigned int frame_count;
unsigned int reqbufs_count;
+ unsigned int buf_count;
+ unsigned int buf_mask;
+ unsigned int frame_count;
int streaming;
+ struct isp_video_buf *buffers[FIMC_ISP_MAX_BUFS];
const struct fimc_fmt *format;
+ struct v4l2_pix_format_mplane pixfmt;
};
+/* struct fimc_isp:state bit definitions */
+#define ST_ISP_VID_CAP_BUF_PREP 0
+#define ST_ISP_VID_CAP_STREAMING 1
+
/**
* struct fimc_isp - FIMC-IS ISP data structure
* @pdev: pointer to FIMC-IS platform device
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index 779ec3cd259d..3ad660b55b6b 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -1313,7 +1313,7 @@ static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct flite_buffer);
q->drv_priv = fimc;
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &fimc->lock;
ret = vb2_queue_init(q);
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index 9da95bd14820..36971d915b53 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -134,6 +134,9 @@ static void fimc_device_run(void *priv)
goto dma_unlock;
dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+ dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->v4l2_buf.flags |=
+ src_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
/* Reconfigure hardware if the context has changed. */
if (fimc->m2m.ctx != ctx) {
@@ -557,7 +560,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &fimc_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->fimc_dev->lock;
ret = vb2_queue_init(src_vq);
@@ -570,7 +573,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->ops = &fimc_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->fimc_dev->lock;
return vb2_queue_init(dst_vq);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index c1bce170df6f..e62211a80f0e 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -11,6 +11,8 @@
*/
#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/i2c.h>
@@ -20,10 +22,12 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-of.h>
#include <media/media-device.h>
@@ -218,6 +222,7 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
if (ret < 0)
return ret;
}
+
ret = fimc_md_set_camclk(sd, true);
if (ret < 0)
goto err_wbclk;
@@ -378,77 +383,18 @@ static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct i2c_adapter *adapter;
- if (!client)
+ if (!client || client->dev.of_node)
return;
v4l2_device_unregister_subdev(sd);
- if (!client->dev.of_node) {
- adapter = client->adapter;
- i2c_unregister_device(client);
- if (adapter)
- i2c_put_adapter(adapter);
- }
+ adapter = client->adapter;
+ i2c_unregister_device(client);
+ if (adapter)
+ i2c_put_adapter(adapter);
}
#ifdef CONFIG_OF
-/* Register I2C client subdev associated with @node. */
-static int fimc_md_of_add_sensor(struct fimc_md *fmd,
- struct device_node *node, int index)
-{
- struct fimc_sensor_info *si;
- struct i2c_client *client;
- struct v4l2_subdev *sd;
- int ret;
-
- if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor)))
- return -EINVAL;
- si = &fmd->sensor[index];
-
- client = of_find_i2c_device_by_node(node);
- if (!client)
- return -EPROBE_DEFER;
-
- device_lock(&client->dev);
-
- if (!client->dev.driver ||
- !try_module_get(client->dev.driver->owner)) {
- ret = -EPROBE_DEFER;
- v4l2_info(&fmd->v4l2_dev, "No driver found for %s\n",
- node->full_name);
- goto dev_put;
- }
-
- /* Enable sensor's master clock */
- ret = __fimc_md_set_camclk(fmd, &si->pdata, true);
- if (ret < 0)
- goto mod_put;
- sd = i2c_get_clientdata(client);
-
- ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
- __fimc_md_set_camclk(fmd, &si->pdata, false);
- if (ret < 0)
- goto mod_put;
-
- v4l2_set_subdev_hostdata(sd, &si->pdata);
- if (si->pdata.fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK)
- sd->grp_id = GRP_ID_FIMC_IS_SENSOR;
- else
- sd->grp_id = GRP_ID_SENSOR;
-
- si->subdev = sd;
- v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice: %s (%d)\n",
- sd->name, fmd->num_sensors);
- fmd->num_sensors++;
-
-mod_put:
- module_put(client->dev.driver->owner);
-dev_put:
- device_unlock(&client->dev);
- put_device(&client->dev);
- return ret;
-}
-
/* Parse port node and register as a sub-device any sensor specified there. */
static int fimc_md_parse_port_node(struct fimc_md *fmd,
struct device_node *port,
@@ -457,7 +403,6 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
struct device_node *rem, *ep, *np;
struct fimc_source_info *pd;
struct v4l2_of_endpoint endpoint;
- int ret;
u32 val;
pd = &fmd->sensor[index].pdata;
@@ -468,12 +413,12 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
return 0;
v4l2_of_parse_endpoint(ep, &endpoint);
- if (WARN_ON(endpoint.port == 0) || index >= FIMC_MAX_SENSORS)
+ if (WARN_ON(endpoint.base.port == 0) || index >= FIMC_MAX_SENSORS)
return -EINVAL;
- pd->mux_id = (endpoint.port - 1) & 0x1;
+ pd->mux_id = (endpoint.base.port - 1) & 0x1;
- rem = v4l2_of_get_remote_port_parent(ep);
+ rem = of_graph_get_remote_port_parent(ep);
of_node_put(ep);
if (rem == NULL) {
v4l2_info(&fmd->v4l2_dev, "Remote device at %s not found\n",
@@ -485,6 +430,8 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
if (!of_property_read_u32(rem, "clock-frequency", &val))
pd->clk_frequency = val;
+ else
+ pd->clk_frequency = DEFAULT_SENSOR_CLK_FREQ;
if (pd->clk_frequency == 0) {
v4l2_err(&fmd->v4l2_dev, "Wrong clock frequency at node %s\n",
@@ -493,13 +440,13 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
return -EINVAL;
}
- if (fimc_input_is_parallel(endpoint.port)) {
+ if (fimc_input_is_parallel(endpoint.base.port)) {
if (endpoint.bus_type == V4L2_MBUS_PARALLEL)
pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_601;
else
pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_656;
pd->flags = endpoint.bus.parallel.flags;
- } else if (fimc_input_is_mipi_csi(endpoint.port)) {
+ } else if (fimc_input_is_mipi_csi(endpoint.base.port)) {
/*
* MIPI CSI-2: only input mux selection and
* the sensor's clock frequency is needed.
@@ -507,7 +454,7 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
pd->sensor_bus_type = FIMC_BUS_TYPE_MIPI_CSI2;
} else {
v4l2_err(&fmd->v4l2_dev, "Wrong port id (%u) at node %s\n",
- endpoint.port, rem->full_name);
+ endpoint.base.port, rem->full_name);
}
/*
* For FIMC-IS handled sensors, that are placed under i2c-isp device
@@ -524,10 +471,17 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
else
pd->fimc_bus_type = pd->sensor_bus_type;
- ret = fimc_md_of_add_sensor(fmd, rem, index);
- of_node_put(rem);
+ if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor)))
+ return -EINVAL;
- return ret;
+ fmd->sensor[index].asd.match_type = V4L2_ASYNC_MATCH_OF;
+ fmd->sensor[index].asd.match.of.node = rem;
+ fmd->async_subdevs[index] = &fmd->sensor[index].asd;
+
+ fmd->num_sensors++;
+
+ of_node_put(rem);
+ return 0;
}
/* Register all SoC external sub-devices */
@@ -731,8 +685,16 @@ static int register_csis_entity(struct fimc_md *fmd,
static int register_fimc_is_entity(struct fimc_md *fmd, struct fimc_is *is)
{
struct v4l2_subdev *sd = &is->isp.subdev;
+ struct exynos_media_pipeline *ep;
int ret;
+ /* Allocate pipeline object for the ISP capture video node. */
+ ep = fimc_md_pipeline_create(fmd);
+ if (!ep)
+ return -ENOMEM;
+
+ v4l2_set_subdev_hostdata(sd, ep);
+
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
if (ret) {
v4l2_err(&fmd->v4l2_dev,
@@ -883,11 +845,13 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
v4l2_device_unregister_subdev(fmd->csis[i].sd);
fmd->csis[i].sd = NULL;
}
- for (i = 0; i < fmd->num_sensors; i++) {
- if (fmd->sensor[i].subdev == NULL)
- continue;
- fimc_md_unregister_sensor(fmd->sensor[i].subdev);
- fmd->sensor[i].subdev = NULL;
+ if (fmd->pdev->dev.of_node == NULL) {
+ for (i = 0; i < fmd->num_sensors; i++) {
+ if (fmd->sensor[i].subdev == NULL)
+ continue;
+ fimc_md_unregister_sensor(fmd->sensor[i].subdev);
+ fmd->sensor[i].subdev = NULL;
+ }
}
if (fmd->fimc_is)
@@ -1004,16 +968,17 @@ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
/* Create FIMC-IS links */
static int __fimc_md_create_fimc_is_links(struct fimc_md *fmd)
{
+ struct fimc_isp *isp = &fmd->fimc_is->isp;
struct media_entity *source, *sink;
int i, ret;
- source = &fmd->fimc_is->isp.subdev.entity;
+ source = &isp->subdev.entity;
for (i = 0; i < FIMC_MAX_DEVS; i++) {
if (fmd->fimc[i] == NULL)
continue;
- /* Link from IS-ISP subdev to FIMC */
+ /* Link from FIMC-IS-ISP subdev to FIMC */
sink = &fmd->fimc[i]->vid_cap.subdev.entity;
ret = media_entity_create_link(source, FIMC_ISP_SD_PAD_SRC_FIFO,
sink, FIMC_SD_PAD_SINK_FIFO, 0);
@@ -1021,7 +986,15 @@ static int __fimc_md_create_fimc_is_links(struct fimc_md *fmd)
return ret;
}
- return ret;
+ /* Link from FIMC-IS-ISP subdev to fimc-is-isp.capture video node */
+ sink = &isp->video_capture.ve.vdev.entity;
+
+ /* Skip this link if the fimc-is-isp video node driver isn't built-in */
+ if (sink->num_pads == 0)
+ return 0;
+
+ return media_entity_create_link(source, FIMC_ISP_SD_PAD_SRC_DMA,
+ sink, 0, 0);
}
/**
@@ -1222,6 +1195,14 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
struct fimc_camclk_info *camclk;
int ret = 0;
+ /*
+ * When device tree is used the sensor drivers are supposed to
+ * control the clock themselves. This whole function will be
+ * removed once S5PV210 platform is converted to the device tree.
+ */
+ if (fmd->pdev->dev.of_node)
+ return 0;
+
if (WARN_ON(si->clk_id >= FIMC_MAX_CAMCLKS) || !fmd || !fmd->pmf)
return -EINVAL;
@@ -1276,6 +1257,14 @@ int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on)
struct fimc_source_info *si = v4l2_get_subdev_hostdata(sd);
struct fimc_md *fmd = entity_to_fimc_mdev(&sd->entity);
+ /*
+ * If there is a clock provider registered the sensors will
+ * handle their clock themselves, no need to control it on
+ * the host interface side.
+ */
+ if (fmd->clk_provider.num_clocks > 0)
+ return 0;
+
return __fimc_md_set_camclk(fmd, si, on);
}
@@ -1437,6 +1426,153 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd)
return 0;
}
+#ifdef CONFIG_OF
+static int cam_clk_prepare(struct clk_hw *hw)
+{
+ struct cam_clk *camclk = to_cam_clk(hw);
+ int ret;
+
+ if (camclk->fmd->pmf == NULL)
+ return -ENODEV;
+
+ ret = pm_runtime_get_sync(camclk->fmd->pmf);
+ return ret < 0 ? ret : 0;
+}
+
+static void cam_clk_unprepare(struct clk_hw *hw)
+{
+ struct cam_clk *camclk = to_cam_clk(hw);
+
+ if (camclk->fmd->pmf == NULL)
+ return;
+
+ pm_runtime_put_sync(camclk->fmd->pmf);
+}
+
+static const struct clk_ops cam_clk_ops = {
+ .prepare = cam_clk_prepare,
+ .unprepare = cam_clk_unprepare,
+};
+
+static void fimc_md_unregister_clk_provider(struct fimc_md *fmd)
+{
+ struct cam_clk_provider *cp = &fmd->clk_provider;
+ unsigned int i;
+
+ if (cp->of_node)
+ of_clk_del_provider(cp->of_node);
+
+ for (i = 0; i < cp->num_clocks; i++)
+ clk_unregister(cp->clks[i]);
+}
+
+static int fimc_md_register_clk_provider(struct fimc_md *fmd)
+{
+ struct cam_clk_provider *cp = &fmd->clk_provider;
+ struct device *dev = &fmd->pdev->dev;
+ int i, ret;
+
+ for (i = 0; i < FIMC_MAX_CAMCLKS; i++) {
+ struct cam_clk *camclk = &cp->camclk[i];
+ struct clk_init_data init;
+ const char *p_name;
+
+ ret = of_property_read_string_index(dev->of_node,
+ "clock-output-names", i, &init.name);
+ if (ret < 0)
+ break;
+
+ p_name = __clk_get_name(fmd->camclk[i].clock);
+
+ /* It's safe since clk_register() will duplicate the string. */
+ init.parent_names = &p_name;
+ init.num_parents = 1;
+ init.ops = &cam_clk_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ camclk->hw.init = &init;
+ camclk->fmd = fmd;
+
+ cp->clks[i] = clk_register(NULL, &camclk->hw);
+ if (IS_ERR(cp->clks[i])) {
+ dev_err(dev, "failed to register clock: %s (%ld)\n",
+ init.name, PTR_ERR(cp->clks[i]));
+ ret = PTR_ERR(cp->clks[i]);
+ goto err;
+ }
+ cp->num_clocks++;
+ }
+
+ if (cp->num_clocks == 0) {
+ dev_warn(dev, "clk provider not registered\n");
+ return 0;
+ }
+
+ cp->clk_data.clks = cp->clks;
+ cp->clk_data.clk_num = cp->num_clocks;
+ cp->of_node = dev->of_node;
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get,
+ &cp->clk_data);
+ if (ret == 0)
+ return 0;
+err:
+ fimc_md_unregister_clk_provider(fmd);
+ return ret;
+}
+#else
+#define fimc_md_register_clk_provider(fmd) (0)
+#define fimc_md_unregister_clk_provider(fmd) (0)
+#endif
+
+static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct fimc_md *fmd = notifier_to_fimc_md(notifier);
+ struct fimc_sensor_info *si = NULL;
+ int i;
+
+ /* Find platform data for this sensor subdev */
+ for (i = 0; i < ARRAY_SIZE(fmd->sensor); i++)
+ if (fmd->sensor[i].asd.match.of.node == subdev->dev->of_node)
+ si = &fmd->sensor[i];
+
+ if (si == NULL)
+ return -EINVAL;
+
+ v4l2_set_subdev_hostdata(subdev, &si->pdata);
+
+ if (si->pdata.fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK)
+ subdev->grp_id = GRP_ID_FIMC_IS_SENSOR;
+ else
+ subdev->grp_id = GRP_ID_SENSOR;
+
+ si->subdev = subdev;
+
+ v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice: %s (%d)\n",
+ subdev->name, fmd->num_sensors);
+
+ fmd->num_sensors++;
+
+ return 0;
+}
+
+static int subdev_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct fimc_md *fmd = notifier_to_fimc_md(notifier);
+ int ret;
+
+ mutex_lock(&fmd->media_dev.graph_mutex);
+
+ ret = fimc_md_create_links(fmd);
+ if (ret < 0)
+ goto unlock;
+
+ ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev);
+unlock:
+ mutex_unlock(&fmd->media_dev.graph_mutex);
+ return ret;
+}
+
static int fimc_md_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1469,63 +1605,91 @@ static int fimc_md_probe(struct platform_device *pdev)
v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret);
return ret;
}
+
ret = media_device_register(&fmd->media_dev);
if (ret < 0) {
v4l2_err(v4l2_dev, "Failed to register media device: %d\n", ret);
- goto err_md;
+ goto err_v4l2_dev;
}
+
ret = fimc_md_get_clocks(fmd);
if (ret)
- goto err_clk;
+ goto err_md;
fmd->user_subdev_api = (dev->of_node != NULL);
- /* Protect the media graph while we're registering entities */
- mutex_lock(&fmd->media_dev.graph_mutex);
-
ret = fimc_md_get_pinctrl(fmd);
if (ret < 0) {
if (ret != EPROBE_DEFER)
dev_err(dev, "Failed to get pinctrl: %d\n", ret);
- goto err_unlock;
+ goto err_clk;
}
+ platform_set_drvdata(pdev, fmd);
+
+ /* Protect the media graph while we're registering entities */
+ mutex_lock(&fmd->media_dev.graph_mutex);
+
if (dev->of_node)
ret = fimc_md_register_of_platform_entities(fmd, dev->of_node);
else
ret = bus_for_each_dev(&platform_bus_type, NULL, fmd,
fimc_md_pdev_match);
- if (ret)
- goto err_unlock;
+ if (ret) {
+ mutex_unlock(&fmd->media_dev.graph_mutex);
+ goto err_clk;
+ }
if (dev->platform_data || dev->of_node) {
ret = fimc_md_register_sensor_entities(fmd);
- if (ret)
- goto err_unlock;
+ if (ret) {
+ mutex_unlock(&fmd->media_dev.graph_mutex);
+ goto err_m_ent;
+ }
}
- ret = fimc_md_create_links(fmd);
- if (ret)
- goto err_unlock;
- ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev);
- if (ret)
- goto err_unlock;
+ mutex_unlock(&fmd->media_dev.graph_mutex);
ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
if (ret)
- goto err_unlock;
+ goto err_m_ent;
+ /*
+ * FIMC platform devices need to be registered before the sclk_cam
+ * clocks provider, as one of these devices needs to be activated
+ * to enable the clock.
+ */
+ ret = fimc_md_register_clk_provider(fmd);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "clock provider registration failed\n");
+ goto err_attr;
+ }
+
+ if (fmd->num_sensors > 0) {
+ fmd->subdev_notifier.subdevs = fmd->async_subdevs;
+ fmd->subdev_notifier.num_subdevs = fmd->num_sensors;
+ fmd->subdev_notifier.bound = subdev_notifier_bound;
+ fmd->subdev_notifier.complete = subdev_notifier_complete;
+ fmd->num_sensors = 0;
+
+ ret = v4l2_async_notifier_register(&fmd->v4l2_dev,
+ &fmd->subdev_notifier);
+ if (ret)
+ goto err_clk_p;
+ }
- platform_set_drvdata(pdev, fmd);
- mutex_unlock(&fmd->media_dev.graph_mutex);
return 0;
-err_unlock:
- mutex_unlock(&fmd->media_dev.graph_mutex);
+err_clk_p:
+ fimc_md_unregister_clk_provider(fmd);
+err_attr:
+ device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
err_clk:
fimc_md_put_clocks(fmd);
+err_m_ent:
fimc_md_unregister_entities(fmd);
- media_device_unregister(&fmd->media_dev);
err_md:
+ media_device_unregister(&fmd->media_dev);
+err_v4l2_dev:
v4l2_device_unregister(&fmd->v4l2_dev);
return ret;
}
@@ -1537,12 +1701,16 @@ static int fimc_md_remove(struct platform_device *pdev)
if (!fmd)
return 0;
+ fimc_md_unregister_clk_provider(fmd);
+ v4l2_async_notifier_unregister(&fmd->subdev_notifier);
+
v4l2_device_unregister(&fmd->v4l2_dev);
device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
fimc_md_unregister_entities(fmd);
fimc_md_pipelines_free(fmd);
media_device_unregister(&fmd->media_dev);
fimc_md_put_clocks(fmd);
+
return 0;
}
diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h
index 62599fd7756f..ee1e2519f728 100644
--- a/drivers/media/platform/exynos4-is/media-dev.h
+++ b/drivers/media/platform/exynos4-is/media-dev.h
@@ -10,6 +10,7 @@
#define FIMC_MDEVICE_H_
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -31,8 +32,9 @@
#define PINCTRL_STATE_IDLE "idle"
-#define FIMC_MAX_SENSORS 8
+#define FIMC_MAX_SENSORS 4
#define FIMC_MAX_CAMCLKS 2
+#define DEFAULT_SENSOR_CLK_FREQ 24000000U
/* LCD/ISP Writeback clocks (PIXELASYNCMx) */
enum {
@@ -78,6 +80,7 @@ struct fimc_camclk_info {
/**
* struct fimc_sensor_info - image data source subdev information
* @pdata: sensor's atrributes passed as media device's platform data
+ * @asd: asynchronous subdev registration data structure
* @subdev: image sensor v4l2 subdev
* @host: fimc device the sensor is currently linked to
*
@@ -85,10 +88,17 @@ struct fimc_camclk_info {
*/
struct fimc_sensor_info {
struct fimc_source_info pdata;
+ struct v4l2_async_subdev asd;
struct v4l2_subdev *subdev;
struct fimc_dev *host;
};
+struct cam_clk {
+ struct clk_hw hw;
+ struct fimc_md *fmd;
+};
+#define to_cam_clk(_hw) container_of(_hw, struct cam_clk, hw)
+
/**
* struct fimc_md - fimc media device information
* @csis: MIPI CSIS subdevs data
@@ -105,6 +115,7 @@ struct fimc_sensor_info {
* @pinctrl: camera port pinctrl handle
* @state_default: pinctrl default state handle
* @state_idle: pinctrl idle state handle
+ * @cam_clk_provider: CAMCLK clock provider structure
* @user_subdev_api: true if subdevs are not configured by the host driver
* @slock: spinlock protecting @sensor array
*/
@@ -122,13 +133,25 @@ struct fimc_md {
struct media_device media_dev;
struct v4l2_device v4l2_dev;
struct platform_device *pdev;
+
struct fimc_pinctrl {
struct pinctrl *pinctrl;
struct pinctrl_state *state_default;
struct pinctrl_state *state_idle;
} pinctl;
- bool user_subdev_api;
+ struct cam_clk_provider {
+ struct clk *clks[FIMC_MAX_CAMCLKS];
+ struct clk_onecell_data clk_data;
+ struct device_node *of_node;
+ struct cam_clk camclk[FIMC_MAX_CAMCLKS];
+ int num_clocks;
+ } clk_provider;
+
+ struct v4l2_async_notifier subdev_notifier;
+ struct v4l2_async_subdev *async_subdevs[FIMC_MAX_SENSORS];
+
+ bool user_subdev_api;
spinlock_t slock;
struct list_head pipelines;
};
@@ -145,6 +168,11 @@ static inline struct fimc_md *entity_to_fimc_mdev(struct media_entity *me)
container_of(me->parent, struct fimc_md, media_dev);
}
+static inline struct fimc_md *notifier_to_fimc_md(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct fimc_md, subdev_notifier);
+}
+
static inline void fimc_md_graph_lock(struct exynos_video_entity *ve)
{
mutex_lock(&ve->vdev.entity.parent->graph_mutex);
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index f3c3591fdc5d..3678ba59725c 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -20,6 +20,7 @@
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/platform_data/mipi-csis.h>
#include <linux/platform_device.h>
@@ -762,7 +763,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
&state->max_num_lanes))
return -EINVAL;
- node = v4l2_of_get_next_endpoint(node, NULL);
+ node = of_graph_get_next_endpoint(node, NULL);
if (!node) {
dev_err(&pdev->dev, "No port node at %s\n",
pdev->dev.of_node->full_name);
@@ -771,7 +772,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
/* Get port node and validate MIPI-CSI channel id. */
v4l2_of_parse_endpoint(node, &endpoint);
- state->index = endpoint.port - FIMC_INPUT_MIPI_CSI2_0;
+ state->index = endpoint.base.port - FIMC_INPUT_MIPI_CSI2_0;
if (state->index < 0 || state->index >= CSIS_MAX_ENTITIES)
return -ENXIO;
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 6bb86b581a34..c21d14fd61db 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -207,8 +207,11 @@ static void dma_callback(void *data)
src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
- src_vb->v4l2_buf.timestamp = dst_vb->v4l2_buf.timestamp;
- src_vb->v4l2_buf.timecode = dst_vb->v4l2_buf.timecode;
+ dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+ dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->v4l2_buf.flags |=
+ src_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
@@ -868,7 +871,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &deinterlace_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
- src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
q_data[V4L2_M2M_SRC].fmt = &formats[0];
q_data[V4L2_M2M_SRC].width = 640;
q_data[V4L2_M2M_SRC].height = 480;
@@ -885,7 +888,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &deinterlace_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
- dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
q_data[V4L2_M2M_DST].fmt = &formats[0];
q_data[V4L2_M2M_DST].width = 640;
q_data[V4L2_M2M_DST].height = 480;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 32fab30a9105..8b34c485be79 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1238,7 +1238,7 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
return 0;
}
-static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
+static void mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
{
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
@@ -1246,7 +1246,6 @@ static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
if (sg_table)
dma_unmap_sg(cam->dev, sg_table->sgl,
sg_table->nents, DMA_FROM_DEVICE);
- return 0;
}
static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
index 08e24379b794..4f3096b17066 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -60,9 +60,7 @@ MODULE_PARM_DESC(debug, "activates debug info");
#define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024)
/* Default transaction time in msec */
-#define MEM2MEM_DEF_TRANSTIME 1000
-/* Default number of buffers per transaction */
-#define MEM2MEM_DEF_TRANSLEN 1
+#define MEM2MEM_DEF_TRANSTIME 40
#define MEM2MEM_COLOR_STEP (0xff >> 4)
#define MEM2MEM_NUM_TILES 8
@@ -114,6 +112,7 @@ struct m2mtest_q_data {
unsigned int width;
unsigned int height;
unsigned int sizeimage;
+ unsigned int sequence;
struct m2mtest_fmt *fmt;
};
@@ -236,9 +235,21 @@ static int device_process(struct m2mtest_ctx *ctx,
bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES;
w = 0;
+ out_vb->v4l2_buf.sequence = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++;
+ in_vb->v4l2_buf.sequence = q_data->sequence++;
memcpy(&out_vb->v4l2_buf.timestamp,
&in_vb->v4l2_buf.timestamp,
sizeof(struct timeval));
+ if (in_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TIMECODE)
+ memcpy(&out_vb->v4l2_buf.timecode, &in_vb->v4l2_buf.timecode,
+ sizeof(struct v4l2_timecode));
+ out_vb->v4l2_buf.field = in_vb->v4l2_buf.field;
+ out_vb->v4l2_buf.flags = in_vb->v4l2_buf.flags &
+ (V4L2_BUF_FLAG_TIMECODE |
+ V4L2_BUF_FLAG_KEYFRAME |
+ V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_BFRAME |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
switch (ctx->mode) {
case MEM2MEM_HFLIP | MEM2MEM_VFLIP:
@@ -505,19 +516,8 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt)
{
- enum v4l2_field field;
-
- field = f->fmt.pix.field;
-
- if (field == V4L2_FIELD_ANY)
- field = V4L2_FIELD_NONE;
- else if (V4L2_FIELD_NONE != field)
- return -EINVAL;
-
/* V4L2 specification suggests the driver corrects the format struct
* if any of the dimensions is unsupported */
- f->fmt.pix.field = field;
-
if (f->fmt.pix.height < MIN_H)
f->fmt.pix.height = MIN_H;
else if (f->fmt.pix.height > MAX_H)
@@ -531,6 +531,8 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt)
f->fmt.pix.width &= ~DIM_ALIGN_MASK;
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.priv = 0;
return 0;
}
@@ -542,7 +544,11 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct m2mtest_ctx *ctx = file2ctx(file);
fmt = find_format(f);
- if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+ if (!fmt) {
+ f->fmt.pix.pixelformat = formats[0].fourcc;
+ fmt = find_format(f);
+ }
+ if (!(fmt->types & MEM2MEM_CAPTURE)) {
v4l2_err(&ctx->dev->v4l2_dev,
"Fourcc format (0x%08x) invalid.\n",
f->fmt.pix.pixelformat);
@@ -560,7 +566,11 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
struct m2mtest_ctx *ctx = file2ctx(file);
fmt = find_format(f);
- if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+ if (!fmt) {
+ f->fmt.pix.pixelformat = formats[0].fourcc;
+ fmt = find_format(f);
+ }
+ if (!(fmt->types & MEM2MEM_OUTPUT)) {
v4l2_err(&ctx->dev->v4l2_dev,
"Fourcc format (0x%08x) invalid.\n",
f->fmt.pix.pixelformat);
@@ -740,6 +750,15 @@ static int m2mtest_buf_prepare(struct vb2_buffer *vb)
dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
q_data = get_q_data(ctx, vb->vb2_queue->type);
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vb->v4l2_buf.field == V4L2_FIELD_ANY)
+ vb->v4l2_buf.field = V4L2_FIELD_NONE;
+ if (vb->v4l2_buf.field != V4L2_FIELD_NONE) {
+ dprintk(ctx->dev, "%s field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
@@ -755,13 +774,45 @@ static int m2mtest_buf_prepare(struct vb2_buffer *vb)
static void m2mtest_buf_queue(struct vb2_buffer *vb)
{
struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
+static int m2mtest_start_streaming(struct vb2_queue *q, unsigned count)
+{
+ struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
+ struct m2mtest_q_data *q_data = get_q_data(ctx, q->type);
+
+ q_data->sequence = 0;
+ return 0;
+}
+
+static int m2mtest_stop_streaming(struct vb2_queue *q)
+{
+ struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_buffer *vb;
+ unsigned long flags;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vb == NULL)
+ return 0;
+ spin_lock_irqsave(&ctx->dev->irqlock, flags);
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+ }
+ return 0;
+}
+
static struct vb2_ops m2mtest_qops = {
.queue_setup = m2mtest_queue_setup,
.buf_prepare = m2mtest_buf_prepare,
.buf_queue = m2mtest_buf_queue,
+ .start_streaming = m2mtest_start_streaming,
+ .stop_streaming = m2mtest_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
@@ -772,12 +823,12 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &m2mtest_qops;
src_vq->mem_ops = &vb2_vmalloc_memops;
- src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->dev_mutex;
ret = vb2_queue_init(src_vq);
@@ -785,12 +836,12 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &m2mtest_qops;
dst_vq->mem_ops = &vb2_vmalloc_memops;
- dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->dev->dev_mutex;
return vb2_queue_init(dst_vq);
@@ -801,10 +852,10 @@ static const struct v4l2_ctrl_config m2mtest_ctrl_trans_time_msec = {
.id = V4L2_CID_TRANS_TIME_MSEC,
.name = "Transaction Time (msec)",
.type = V4L2_CTRL_TYPE_INTEGER,
- .def = 1001,
+ .def = MEM2MEM_DEF_TRANSTIME,
.min = 1,
.max = 10001,
- .step = 100,
+ .step = 1,
};
static const struct v4l2_ctrl_config m2mtest_ctrl_trans_num_bufs = {
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index c690435853bd..0b7480e82142 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -377,8 +377,13 @@ static irqreturn_t emmaprp_irq(int irq_emma, void *data)
src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
- src_vb->v4l2_buf.timestamp = dst_vb->v4l2_buf.timestamp;
- src_vb->v4l2_buf.timecode = dst_vb->v4l2_buf.timecode;
+ dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+ dst_vb->v4l2_buf.flags &=
+ ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->v4l2_buf.flags |=
+ src_vb->v4l2_buf.flags
+ & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
spin_lock_irqsave(&pcdev->irqlock, flags);
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
@@ -766,7 +771,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &emmaprp_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
- src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -778,7 +783,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &emmaprp_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
- dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
return vb2_queue_init(dst_vq);
}
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index dfd0a21a0658..9a726eacb29b 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -601,6 +601,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
switch (cur_display->type) {
case OMAP_DISPLAY_TYPE_DSI:
case OMAP_DISPLAY_TYPE_DPI:
+ case OMAP_DISPLAY_TYPE_DVI:
if (mgr_id == OMAP_DSS_CHANNEL_LCD)
irq = DISPC_IRQ_VSYNC;
else if (mgr_id == OMAP_DSS_CHANNEL_LCD2)
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index cf1c437a8687..62e7e5783ce8 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -270,7 +270,8 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
omap_start_dma(tx->dma_ch);
- interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
+ wait_event_interruptible_timeout(tx->wait, tx->tx_status == 1,
+ VRFB_TX_TIMEOUT);
if (tx->tx_status == 0) {
omap_stop_dma(tx->dma_ch);
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 5807185262fe..06a0df434249 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -391,7 +391,7 @@ static void isp_disable_interrupts(struct isp_device *isp)
* @isp: OMAP3 ISP device
* @idle: Consider idle state.
*
- * Set the power settings for the ISP and SBL bus and cConfigure the HS/VS
+ * Set the power settings for the ISP and SBL bus and configure the HS/VS
* interrupt source.
*
* We need to configure the HS/VS interrupt source before interrupts get
@@ -588,9 +588,6 @@ static void isp_isr_sbl(struct isp_device *isp)
* @_isp: Pointer to the OMAP3 ISP device
*
* Handles the corresponding callback if plugged in.
- *
- * Returns IRQ_HANDLED when IRQ was correctly handled, or IRQ_NONE when the
- * IRQ wasn't handled.
*/
static irqreturn_t isp_isr(int irq, void *_isp)
{
@@ -1420,7 +1417,7 @@ int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
}
/*
- * omap3isp_module_sync_is_stopped - Helper to verify if module was stopping
+ * omap3isp_module_sync_is_stopping - Helper to verify if module was stopping
* @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
* @stopping: flag which tells module wants to stop
*
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 081f5ec5a663..6d5e69711907 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -265,7 +265,7 @@ void omap3isp_unregister_entities(struct platform_device *pdev);
/*
* isp_reg_readl - Read value of an OMAP3 ISP register
- * @dev: Device pointer specific to the OMAP3 ISP.
+ * @isp: Device pointer specific to the OMAP3 ISP.
* @isp_mmio_range: Range to which the register offset refers to.
* @reg_offset: Register offset to read from.
*
@@ -280,7 +280,7 @@ u32 isp_reg_readl(struct isp_device *isp, enum isp_mem_resources isp_mmio_range,
/*
* isp_reg_writel - Write value to an OMAP3 ISP register
- * @dev: Device pointer specific to the OMAP3 ISP.
+ * @isp: Device pointer specific to the OMAP3 ISP.
* @reg_value: 32 bit value to write to the register.
* @isp_mmio_range: Range to which the register offset refers to.
* @reg_offset: Register offset to write into.
@@ -293,8 +293,8 @@ void isp_reg_writel(struct isp_device *isp, u32 reg_value,
}
/*
- * isp_reg_and - Clear individual bits in an OMAP3 ISP register
- * @dev: Device pointer specific to the OMAP3 ISP.
+ * isp_reg_clr - Clear individual bits in an OMAP3 ISP register
+ * @isp: Device pointer specific to the OMAP3 ISP.
* @mmio_range: Range to which the register offset refers to.
* @reg: Register offset to work on.
* @clr_bits: 32 bit value which would be cleared in the register.
@@ -310,7 +310,7 @@ void isp_reg_clr(struct isp_device *isp, enum isp_mem_resources mmio_range,
/*
* isp_reg_set - Set individual bits in an OMAP3 ISP register
- * @dev: Device pointer specific to the OMAP3 ISP.
+ * @isp: Device pointer specific to the OMAP3 ISP.
* @mmio_range: Range to which the register offset refers to.
* @reg: Register offset to work on.
* @set_bits: 32 bit value which would be set in the register.
@@ -326,7 +326,7 @@ void isp_reg_set(struct isp_device *isp, enum isp_mem_resources mmio_range,
/*
* isp_reg_clr_set - Clear and set invidial bits in an OMAP3 ISP register
- * @dev: Device pointer specific to the OMAP3 ISP.
+ * @isp: Device pointer specific to the OMAP3 ISP.
* @mmio_range: Range to which the register offset refers to.
* @reg: Register offset to work on.
* @clr_bits: 32 bit value which would be cleared in the register.
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 5db2c88b9ad8..4d920c800ff5 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -293,7 +293,7 @@ static int __ccdc_lsc_enable(struct isp_ccdc_device *ccdc, int enable)
isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC,
ISPCCDC_LSC_CONFIG, ISPCCDC_LSC_ENABLE);
ccdc->lsc.state = LSC_STATE_STOPPED;
- dev_warn(to_device(ccdc), "LSC prefecth timeout\n");
+ dev_warn(to_device(ccdc), "LSC prefetch timeout\n");
return -ETIMEDOUT;
}
ccdc->lsc.state = LSC_STATE_RUNNING;
@@ -674,7 +674,7 @@ static void ccdc_config_imgattr(struct isp_ccdc_device *ccdc, u32 colptn)
/*
* ccdc_config - Set CCDC configuration from userspace
* @ccdc: Pointer to ISP CCDC device.
- * @userspace_add: Structure containing CCDC configuration sent from userspace.
+ * @ccdc_struct: Structure containing CCDC configuration sent from userspace.
*
* Returns 0 if successful, -EINVAL if the pointer to the configuration
* structure is null, or the copy_from_user function fails to copy user space
@@ -793,7 +793,7 @@ static void ccdc_apply_controls(struct isp_ccdc_device *ccdc)
/*
* omap3isp_ccdc_restore_context - Restore values of the CCDC module registers
- * @dev: Pointer to ISP device
+ * @isp: Pointer to ISP device
*/
void omap3isp_ccdc_restore_context(struct isp_device *isp)
{
@@ -2525,7 +2525,7 @@ error_video:
/*
* omap3isp_ccdc_init - CCDC module initialization.
- * @dev: Device pointer specific to the OMAP3 ISP.
+ * @isp: Device pointer specific to the OMAP3 ISP.
*
* TODO: Get the initialisation values from platform data.
*
@@ -2564,7 +2564,7 @@ int omap3isp_ccdc_init(struct isp_device *isp)
/*
* omap3isp_ccdc_cleanup - CCDC module cleanup.
- * @dev: Device pointer specific to the OMAP3 ISP.
+ * @isp: Device pointer specific to the OMAP3 ISP.
*/
void omap3isp_ccdc_cleanup(struct isp_device *isp)
{
diff --git a/drivers/media/platform/omap3isp/ispccdc.h b/drivers/media/platform/omap3isp/ispccdc.h
index a5da9e19edbf..9d24e4107864 100644
--- a/drivers/media/platform/omap3isp/ispccdc.h
+++ b/drivers/media/platform/omap3isp/ispccdc.h
@@ -63,12 +63,6 @@ struct ispccdc_lsc_config_req {
/*
* ispccdc_lsc - CCDC LSC parameters
- * @update_config: Set when user changes config
- * @request_enable: Whether LSC is requested to be enabled
- * @config: LSC config set by user
- * @update_table: Set when user provides a new LSC table to table_new
- * @table_new: LSC table set by user, ISP address
- * @table_inuse: LSC table currently in use, ISP address
*/
struct ispccdc_lsc {
enum ispccdc_lsc_state state;
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index e84fe0543e47..b30b67d22a58 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -211,7 +211,7 @@ static void ccp2_mem_enable(struct isp_ccp2_device *ccp2, u8 enable)
/*
* ccp2_phyif_config - Initialize CCP2 phy interface config
* @ccp2: Pointer to ISP CCP2 device
- * @config: CCP2 platform data
+ * @pdata: CCP2 platform data
*
* Configure the CCP2 physical interface module from platform data.
*
@@ -518,7 +518,7 @@ static void ccp2_mem_configure(struct isp_ccp2_device *ccp2,
ISPCCP2_LCM_IRQSTATUS_EOF_IRQ,
OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQSTATUS);
- /* Enable LCM interupts */
+ /* Enable LCM interrupts */
isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQENABLE,
ISPCCP2_LCM_IRQSTATUS_EOF_IRQ |
ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ);
@@ -1096,7 +1096,7 @@ static int ccp2_init_entities(struct isp_ccp2_device *ccp2)
* implementation we use a fixed 32 bytes alignment regardless of the
* input format and width. If strict 128 bits alignment support is
* required ispvideo will need to be made aware of this special dual
- * alignement requirements.
+ * alignment requirements.
*/
ccp2->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
ccp2->video_in.bpl_alignment = 32;
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
index e070c24048ef..06a5f8164eaa 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -299,7 +299,7 @@ static u32 hist_get_buf_size(struct omap3isp_hist_config *conf)
/*
* hist_validate_params - Helper function to check user given params.
- * @user_cfg: Pointer to user configuration structure.
+ * @new_conf: Pointer to user configuration structure.
*
* Returns 0 on success configuration.
*/
@@ -351,7 +351,7 @@ static int hist_validate_params(struct ispstat *hist, void *new_conf)
buf_size = hist_get_buf_size(user_cfg);
if (buf_size > user_cfg->buf_size)
- /* User's buf_size request wasn't enoght */
+ /* User's buf_size request wasn't enough */
user_cfg->buf_size = buf_size;
else if (user_cfg->buf_size > OMAP3ISP_HIST_MAX_BUF_SIZE)
user_cfg->buf_size = OMAP3ISP_HIST_MAX_BUF_SIZE;
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 1c776c1186f1..395b2b068c75 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -122,7 +122,7 @@ static struct omap3isp_prev_csc flr_prev_csc = {
#define PREV_MAX_OUT_WIDTH_REV_15 4096
/*
- * Coeficient Tables for the submodules in Preview.
+ * Coefficient Tables for the submodules in Preview.
* Array is initialised with the values from.the tables text file.
*/
@@ -971,7 +971,8 @@ static void preview_setup_hw(struct isp_prev_device *prev, u32 update,
/*
* preview_config_ycpos - Configure byte layout of YUV image.
- * @mode: Indicates the required byte layout.
+ * @prev: pointer to previewer private structure
+ * @pixelcode: pixel code
*/
static void
preview_config_ycpos(struct isp_prev_device *prev,
@@ -1079,6 +1080,7 @@ static void preview_config_input_format(struct isp_prev_device *prev,
*/
static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
{
+ const struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK];
struct isp_device *isp = to_isp_device(prev);
unsigned int sph = prev->crop.left;
unsigned int eph = prev->crop.left + prev->crop.width - 1;
@@ -1086,6 +1088,14 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
unsigned int elv = prev->crop.top + prev->crop.height - 1;
u32 features;
+ if (format->code != V4L2_MBUS_FMT_Y8_1X8 &&
+ format->code != V4L2_MBUS_FMT_Y10_1X10) {
+ sph -= 2;
+ eph += 2;
+ slv -= 2;
+ elv += 2;
+ }
+
features = (prev->params.params[0].features & active)
| (prev->params.params[1].features & ~active);
@@ -1363,8 +1373,8 @@ static void preview_init_params(struct isp_prev_device *prev)
}
/*
- * preview_max_out_width - Handle previewer hardware ouput limitations
- * @isp_revision : ISP revision
+ * preview_max_out_width - Handle previewer hardware output limitations
+ * @prev: pointer to previewer private structure
* returns maximum width output for current isp revision
*/
static unsigned int preview_max_out_width(struct isp_prev_device *prev)
@@ -1610,7 +1620,7 @@ static const struct v4l2_ctrl_ops preview_ctrl_ops = {
/*
* preview_ioctl - Handle preview module private ioctl's
- * @prev: pointer to preview context structure
+ * @sd: pointer to v4l2 subdev structure
* @cmd: configuration command
* @arg: configuration argument
* return -EINVAL or zero on success
@@ -2341,7 +2351,7 @@ error_video_in:
/*
* omap3isp_preview_init - Previewer initialization.
- * @dev : Pointer to ISP device
+ * @isp : Pointer to ISP device
* return -ENOMEM or zero on success
*/
int omap3isp_preview_init(struct isp_device *isp)
diff --git a/drivers/media/platform/omap3isp/ispqueue.c b/drivers/media/platform/omap3isp/ispqueue.c
index 5f0f8fab1d17..a5e65858e799 100644
--- a/drivers/media/platform/omap3isp/ispqueue.c
+++ b/drivers/media/platform/omap3isp/ispqueue.c
@@ -597,7 +597,7 @@ static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
* isp_video_queue_free - Free video buffers memory
*
* Buffers can only be freed if the queue isn't streaming and if no buffer is
- * mapped to userspace. Return -EBUSY if those conditions aren't statisfied.
+ * mapped to userspace. Return -EBUSY if those conditions aren't satisfied.
*
* This function must be called with the queue lock held.
*/
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 0d36b8bc9f98..86369df81d74 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -206,7 +206,7 @@ static void resizer_set_bilinear(struct isp_res_device *res,
/*
* resizer_set_ycpos - Luminance and chrominance order
* @res: Device context.
- * @order: order type.
+ * @pixelcode: pixel code.
*/
static void resizer_set_ycpos(struct isp_res_device *res,
enum v4l2_mbus_pixelcode pixelcode)
@@ -918,8 +918,8 @@ static void resizer_calc_ratios(struct isp_res_device *res,
/*
* resizer_set_crop_params - Setup hardware with cropping parameters
* @res : resizer private structure
- * @crop_rect : current crop rectangle
- * @ratio : resizer ratios
+ * @input : format on sink pad
+ * @output : format on source pad
* return none
*/
static void resizer_set_crop_params(struct isp_res_device *res,
diff --git a/drivers/media/platform/omap3isp/ispresizer.h b/drivers/media/platform/omap3isp/ispresizer.h
index 70c1c0e1bbdf..9b01e9047c15 100644
--- a/drivers/media/platform/omap3isp/ispresizer.h
+++ b/drivers/media/platform/omap3isp/ispresizer.h
@@ -30,12 +30,12 @@
#include <linux/types.h>
/*
- * Constants for filter coefficents count
+ * Constants for filter coefficients count
*/
#define COEFF_CNT 32
/*
- * struct isprsz_coef - Structure for resizer filter coeffcients.
+ * struct isprsz_coef - Structure for resizer filter coefficients.
* @h_filter_coef_4tap: Horizontal filter coefficients for 8-phase/4-tap
* mode (.5x-4x)
* @v_filter_coef_4tap: Vertical filter coefficients for 8-phase/4-tap
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index a75407c3a726..5707f85c4cc4 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -144,7 +144,7 @@ static int isp_stat_buf_check_magic(struct ispstat *stat,
for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
w < end; w++) {
if (unlikely(*w != MAGIC_NUM)) {
- dev_dbg(stat->isp->dev, "%s: endding magic check does "
+ dev_dbg(stat->isp->dev, "%s: ending magic check does "
"not match.\n", stat->subdev.name);
return -EINVAL;
}
@@ -841,7 +841,7 @@ int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable)
if (enable) {
/*
* Only set enable PCR bit if the module was previously
- * enabled through ioct.
+ * enabled through ioctl.
*/
isp_stat_try_enable(stat);
} else {
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 856fdf554035..85b4036ba5e4 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -333,7 +333,7 @@ isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
/*
* ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
- * @dev: Device pointer specific to the OMAP3 ISP.
+ * @isp: Device pointer specific to the OMAP3 ISP.
* @sglist: Pointer to source Scatter gather list to allocate.
* @sglen: Number of elements of the scatter-gatter list.
*
@@ -363,7 +363,7 @@ ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
/*
* ispmmu_vunmap - Unmap a device address from the ISP MMU
- * @dev: Device pointer specific to the OMAP3 ISP.
+ * @isp: Device pointer specific to the OMAP3 ISP.
* @da: Device address generated from a ispmmu_vmap call.
*/
static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
@@ -886,7 +886,11 @@ static int isp_video_check_external_subdevs(struct isp_video *video,
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
unsigned int i;
- int ret = 0;
+ int ret;
+
+ /* Memory-to-memory pipelines have no external subdev. */
+ if (pipe->input != NULL)
+ return 0;
for (i = 0; i < ARRAY_SIZE(ents); i++) {
/* Is the entity part of the pipeline? */
@@ -905,7 +909,7 @@ static int isp_video_check_external_subdevs(struct isp_video *video,
if (!source) {
dev_warn(isp->dev, "can't find source, failing now\n");
- return ret;
+ return -EINVAL;
}
if (media_entity_type(source) != MEDIA_ENT_T_V4L2_SUBDEV)
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 40b298ab87f1..4e4d1631e042 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -1160,7 +1160,7 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct camif_buffer);
q->drv_priv = vp;
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(q);
if (ret)
@@ -1592,26 +1592,27 @@ int s3c_camif_create_subdev(struct camif_dev *camif)
ARRAY_SIZE(s3c_camif_test_pattern_menu) - 1, 0, 0,
s3c_camif_test_pattern_menu);
- camif->ctrl_colorfx = v4l2_ctrl_new_std_menu(handler,
+ if (camif->variant->has_img_effect) {
+ camif->ctrl_colorfx = v4l2_ctrl_new_std_menu(handler,
&s3c_camif_subdev_ctrl_ops,
V4L2_CID_COLORFX, V4L2_COLORFX_SET_CBCR,
~0x981f, V4L2_COLORFX_NONE);
- camif->ctrl_colorfx_cbcr = v4l2_ctrl_new_std(handler,
+ camif->ctrl_colorfx_cbcr = v4l2_ctrl_new_std(handler,
&s3c_camif_subdev_ctrl_ops,
V4L2_CID_COLORFX_CBCR, 0, 0xffff, 1, 0);
+ }
+
if (handler->error) {
v4l2_ctrl_handler_free(handler);
media_entity_cleanup(&sd->entity);
return handler->error;
}
- v4l2_ctrl_auto_cluster(2, &camif->ctrl_colorfx,
+ if (camif->variant->has_img_effect)
+ v4l2_ctrl_auto_cluster(2, &camif->ctrl_colorfx,
V4L2_COLORFX_SET_CBCR, false);
- if (!camif->variant->has_img_effect) {
- camif->ctrl_colorfx->flags |= V4L2_CTRL_FLAG_DISABLED;
- camif->ctrl_colorfx_cbcr->flags |= V4L2_CTRL_FLAG_DISABLED;
- }
+
sd->ctrl_handler = handler;
v4l2_set_subdevdata(sd, camif);
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 0fcf7d75e841..357af1ebaeda 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -157,7 +157,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &g2d_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->mutex;
ret = vb2_queue_init(src_vq);
@@ -170,7 +170,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->ops = &g2d_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->dev->mutex;
return vb2_queue_init(dst_vq);
@@ -560,6 +560,9 @@ static irqreturn_t g2d_isr(int irq, void *prv)
dst->v4l2_buf.timecode = src->v4l2_buf.timecode;
dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp;
+ dst->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->v4l2_buf.flags |=
+ src->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 7d68d0b9966a..8a18972012f7 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1701,7 +1701,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &s5p_jpeg_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
- src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->jpeg->lock;
ret = vb2_queue_init(src_vq);
@@ -1714,7 +1714,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &s5p_jpeg_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
- dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->jpeg->lock;
return vb2_queue_init(dst_vq);
@@ -1766,6 +1766,9 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->v4l2_buf.flags |=
+ src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
v4l2_m2m_buf_done(src_buf, state);
if (curr_ctx->mode == S5P_JPEG_ENCODE)
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
index 33f2c7374cfd..57fb05bb8c77 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-regs.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
@@ -210,19 +210,19 @@
/* JPEG CNTL Register bit */
#define EXYNOS4_ENC_DEC_MODE_MASK (0xfffffffc << 0)
-#define EXYNOS4_DEC_MODE (1 << 0)
-#define EXYNOS4_ENC_MODE (1 << 1)
+#define EXYNOS4_DEC_MODE (1 << 0)
+#define EXYNOS4_ENC_MODE (1 << 1)
#define EXYNOS4_AUTO_RST_MARKER (1 << 2)
#define EXYNOS4_RST_INTERVAL_SHIFT 3
#define EXYNOS4_RST_INTERVAL(x) (((x) & 0xffff) \
<< EXYNOS4_RST_INTERVAL_SHIFT)
#define EXYNOS4_HUF_TBL_EN (1 << 19)
#define EXYNOS4_HOR_SCALING_SHIFT 20
-#define EXYNOS4_HOR_SCALING_MASK (3 << EXYNOS4_HOR_SCALING_SHIFT)
+#define EXYNOS4_HOR_SCALING_MASK (3 << EXYNOS4_HOR_SCALING_SHIFT)
#define EXYNOS4_HOR_SCALING(x) (((x) & 0x3) \
<< EXYNOS4_HOR_SCALING_SHIFT)
#define EXYNOS4_VER_SCALING_SHIFT 22
-#define EXYNOS4_VER_SCALING_MASK (3 << EXYNOS4_VER_SCALING_SHIFT)
+#define EXYNOS4_VER_SCALING_MASK (3 << EXYNOS4_VER_SCALING_SHIFT)
#define EXYNOS4_VER_SCALING(x) (((x) & 0x3) \
<< EXYNOS4_VER_SCALING_SHIFT)
#define EXYNOS4_PADDING (1 << 27)
@@ -238,8 +238,8 @@
#define EXYNOS4_FRAME_ERR_EN (1 << 4)
#define EXYNOS4_INT_EN_ALL (0x1f << 0)
-#define EXYNOS4_MOD_REG_PROC_ENC (0 << 3)
-#define EXYNOS4_MOD_REG_PROC_DEC (1 << 3)
+#define EXYNOS4_MOD_REG_PROC_ENC (0 << 3)
+#define EXYNOS4_MOD_REG_PROC_DEC (1 << 3)
#define EXYNOS4_MOD_REG_SUBSAMPLE_444 (0 << 0)
#define EXYNOS4_MOD_REG_SUBSAMPLE_422 (1 << 0)
@@ -270,7 +270,7 @@
#define EXYNOS4_DEC_YUV_420_IMG (4 << 0)
#define EXYNOS4_GRAY_IMG_IP_SHIFT 3
-#define EXYNOS4_GRAY_IMG_IP_MASK (7 << EXYNOS4_GRAY_IMG_IP_SHIFT)
+#define EXYNOS4_GRAY_IMG_IP_MASK (7 << EXYNOS4_GRAY_IMG_IP_SHIFT)
#define EXYNOS4_GRAY_IMG_IP (4 << EXYNOS4_GRAY_IMG_IP_SHIFT)
#define EXYNOS4_RGB_IP_SHIFT 6
@@ -278,18 +278,18 @@
#define EXYNOS4_RGB_IP_RGB_16BIT_IMG (4 << EXYNOS4_RGB_IP_SHIFT)
#define EXYNOS4_RGB_IP_RGB_32BIT_IMG (5 << EXYNOS4_RGB_IP_SHIFT)
-#define EXYNOS4_YUV_444_IP_SHIFT 9
+#define EXYNOS4_YUV_444_IP_SHIFT 9
#define EXYNOS4_YUV_444_IP_MASK (7 << EXYNOS4_YUV_444_IP_SHIFT)
#define EXYNOS4_YUV_444_IP_YUV_444_2P_IMG (4 << EXYNOS4_YUV_444_IP_SHIFT)
#define EXYNOS4_YUV_444_IP_YUV_444_3P_IMG (5 << EXYNOS4_YUV_444_IP_SHIFT)
-#define EXYNOS4_YUV_422_IP_SHIFT 12
+#define EXYNOS4_YUV_422_IP_SHIFT 12
#define EXYNOS4_YUV_422_IP_MASK (7 << EXYNOS4_YUV_422_IP_SHIFT)
#define EXYNOS4_YUV_422_IP_YUV_422_1P_IMG (4 << EXYNOS4_YUV_422_IP_SHIFT)
#define EXYNOS4_YUV_422_IP_YUV_422_2P_IMG (5 << EXYNOS4_YUV_422_IP_SHIFT)
#define EXYNOS4_YUV_422_IP_YUV_422_3P_IMG (6 << EXYNOS4_YUV_422_IP_SHIFT)
-#define EXYNOS4_YUV_420_IP_SHIFT 15
+#define EXYNOS4_YUV_420_IP_SHIFT 15
#define EXYNOS4_YUV_420_IP_MASK (7 << EXYNOS4_YUV_420_IP_SHIFT)
#define EXYNOS4_YUV_420_IP_YUV_420_2P_IMG (4 << EXYNOS4_YUV_420_IP_SHIFT)
#define EXYNOS4_YUV_420_IP_YUV_420_3P_IMG (5 << EXYNOS4_YUV_420_IP_SHIFT)
@@ -303,8 +303,8 @@
#define EXYNOS4_JPEG_DECODED_IMG_FMT_MASK 0x03
-#define EXYNOS4_SWAP_CHROMA_CRCB (1 << 26)
-#define EXYNOS4_SWAP_CHROMA_CBCR (0 << 26)
+#define EXYNOS4_SWAP_CHROMA_CRCB (1 << 26)
+#define EXYNOS4_SWAP_CHROMA_CBCR (0 << 26)
/* JPEG HUFF count Register bit */
#define EXYNOS4_HUFF_COUNT_MASK 0xffff
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
index 2398cdf61341..8d0b686d9adb 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
@@ -229,6 +229,7 @@
#define S5P_FIMV_E_PADDING_CTRL_V6 0xf7a4
#define S5P_FIMV_E_MV_HOR_RANGE_V6 0xf7ac
#define S5P_FIMV_E_MV_VER_RANGE_V6 0xf7b0
+#define S5P_FIMV_E_MV_RANGE_V6_MASK 0x3fff
#define S5P_FIMV_E_VBV_BUFFER_SIZE_V6 0xf84c
#define S5P_FIMV_E_VBV_INIT_DELAY_V6 0xf850
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index e2aac592d29f..89356ae90238 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -232,6 +232,11 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
src_buf->b->v4l2_buf.timecode;
dst_buf->b->v4l2_buf.timestamp =
src_buf->b->v4l2_buf.timestamp;
+ dst_buf->b->v4l2_buf.flags &=
+ ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->b->v4l2_buf.flags |=
+ src_buf->b->v4l2_buf.flags
+ & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
switch (frame_type) {
case S5P_FIMV_DECODE_FRAME_I_FRAME:
dst_buf->b->v4l2_buf.flags |=
@@ -794,7 +799,7 @@ static int s5p_mfc_open(struct file *file)
goto err_queue_init;
}
q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(q);
if (ret) {
mfc_err("Failed to initialize videobuf2 queue(capture)\n");
@@ -816,7 +821,7 @@ static int s5p_mfc_open(struct file *file)
goto err_queue_init;
}
q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(q);
if (ret) {
mfc_err("Failed to initialize videobuf2 queue(output)\n");
@@ -1147,9 +1152,9 @@ static int s5p_mfc_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_dec_alloc;
}
- vfd->fops = &s5p_mfc_fops,
+ vfd->fops = &s5p_mfc_fops;
vfd->ioctl_ops = get_dec_v4l2_ioctl_ops();
- vfd->release = video_device_release,
+ vfd->release = video_device_release;
vfd->lock = &dev->mfc_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->vfl_dir = VFL_DIR_M2M;
@@ -1172,9 +1177,9 @@ static int s5p_mfc_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_enc_alloc;
}
- vfd->fops = &s5p_mfc_fops,
+ vfd->fops = &s5p_mfc_fops;
vfd->ioctl_ops = get_enc_v4l2_ioctl_ops();
- vfd->release = video_device_release,
+ vfd->release = video_device_release;
vfd->lock = &dev->mfc_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->vfl_dir = VFL_DIR_M2M;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index f723f1f2f578..5c28cc3e699b 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -426,6 +426,8 @@ struct s5p_mfc_vp8_enc_params {
struct s5p_mfc_enc_params {
u16 width;
u16 height;
+ u32 mv_h_range;
+ u32 mv_v_range;
u16 gop_size;
enum v4l2_mpeg_video_multi_slice_mode slice_mode;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index 2475a3c9a0a6..ee05f2dd439b 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -44,8 +44,6 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
return -ENOMEM;
}
- dev->bank1 = dev->bank1;
-
if (HAS_PORTNUM(dev) && IS_TWOPORT(dev)) {
bank2_virt = dma_alloc_coherent(dev->mem_dev_r, 1 << MFC_BASE_ALIGN_ORDER,
&bank2_dma_addr, GFP_KERNEL);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 91b6e020ddf3..df83cd157bab 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -208,6 +208,24 @@ static struct mfc_control controls[] = {
.default_value = 0,
},
{
+ .id = V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Horizontal MV Search Range",
+ .minimum = 16,
+ .maximum = 128,
+ .step = 16,
+ .default_value = 32,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Vertical MV Search Range",
+ .minimum = 16,
+ .maximum = 128,
+ .step = 16,
+ .default_value = 32,
+ },
+ {
.id = V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = 0,
@@ -1417,6 +1435,12 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
p->vbv_size = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:
+ p->mv_h_range = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
+ p->mv_v_range = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
p->codec.h264.cpb_size = ctrl->val;
break;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index f6ff2dbf3a1d..f64621ae9b5a 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -727,14 +727,10 @@ static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
/* setting for MV range [16, 256] */
- reg = 0;
- reg &= ~(0x3FFF);
- reg = 256;
+ reg = (p->mv_h_range & S5P_FIMV_E_MV_RANGE_V6_MASK);
WRITEL(reg, S5P_FIMV_E_MV_HOR_RANGE_V6);
- reg = 0;
- reg &= ~(0x3FFF);
- reg = 256;
+ reg = (p->mv_v_range & S5P_FIMV_E_MV_RANGE_V6_MASK);
WRITEL(reg, S5P_FIMV_E_MV_VER_RANGE_V6);
WRITEL(0x0, S5P_FIMV_E_FRAME_INSERTION_V6);
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index c5059ba0d733..a1ce55fd30f3 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -946,11 +946,6 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
mxr_dbg(mdev, "%s\n", __func__);
- if (count == 0) {
- mxr_dbg(mdev, "no output buffers queued\n");
- return -ENOBUFS;
- }
-
/* block any changes in output configuration */
mxr_output_get(mdev);
@@ -1124,6 +1119,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
.drv_priv = layer,
.buf_struct_size = sizeof(struct mxr_buffer),
.ops = &mxr_video_qops,
+ .min_buffers_needed = 1,
.mem_ops = &vb2_dma_contig_memops,
};
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 4835173d7f80..f0b6c900034d 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -472,7 +472,7 @@ static int isi_camera_init_videobuf(struct vb2_queue *q,
q->buf_struct_size = sizeof(struct frame_buffer);
q->ops = &isi_video_qops;
q->mem_ops = &vb2_dma_contig_memops;
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
return vb2_queue_init(q);
}
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index d73abca9c6ee..3e844803bdca 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -794,7 +794,7 @@ static int mx2_camera_init_videobuf(struct vb2_queue *q,
q->ops = &mx2_videobuf_ops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct mx2_buffer);
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
return vb2_queue_init(q);
}
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index f975b7008692..9ed81ac6881c 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -453,7 +453,7 @@ static int mx3_camera_init_videobuf(struct vb2_queue *q,
q->ops = &mx3_videobuf_ops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct mx3_camera_buffer);
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
return vb2_queue_init(q);
}
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 3b1c05a72d00..704eee766487 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -68,6 +68,8 @@
#define VNMC_YCAL (1 << 19)
#define VNMC_INF_YUV8_BT656 (0 << 16)
#define VNMC_INF_YUV8_BT601 (1 << 16)
+#define VNMC_INF_YUV10_BT656 (2 << 16)
+#define VNMC_INF_YUV10_BT601 (3 << 16)
#define VNMC_INF_YUV16 (5 << 16)
#define VNMC_VUP (1 << 10)
#define VNMC_IM_ODD (0 << 3)
@@ -275,6 +277,12 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
vnmc |= priv->pdata->flags & RCAR_VIN_BT656 ?
VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
+ break;
+ case V4L2_MBUS_FMT_YUYV10_2X10:
+ /* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
+ vnmc |= priv->pdata->flags & RCAR_VIN_BT656 ?
+ VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
+ break;
default:
break;
}
@@ -1003,6 +1011,7 @@ static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
switch (code) {
case V4L2_MBUS_FMT_YUYV8_1X16:
case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_YUYV10_2X10:
if (cam->extra_fmt)
break;
@@ -1360,7 +1369,7 @@ static int rcar_vin_init_videobuf2(struct vb2_queue *vq,
vq->ops = &rcar_vin_vb2_ops;
vq->mem_ops = &vb2_dma_contig_memops;
vq->buf_struct_size = sizeof(struct rcar_vin_buffer);
- vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
return vb2_queue_init(vq);
}
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 150bd4df413c..3e75a469cd49 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1665,7 +1665,7 @@ static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
q->ops = &sh_mobile_ceu_videobuf_ops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer);
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
return vb2_queue_init(q);
}
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 1296c5386231..7a77a5b7a075 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -1278,6 +1278,8 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
d_buf = &d_vb->v4l2_buf;
d_buf->timestamp = s_buf->timestamp;
+ d_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ d_buf->flags |= s_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) {
d_buf->flags |= V4L2_BUF_FLAG_TIMECODE;
d_buf->timecode = s_buf->timecode;
@@ -1770,7 +1772,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &vpe_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
- src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -1783,7 +1785,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &vpe_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
- dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
return vb2_queue_init(dst_vq);
}
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
index 2d4e73b45c5e..3890f4f42a78 100644
--- a/drivers/media/platform/vivi.c
+++ b/drivers/media/platform/vivi.c
@@ -70,10 +70,6 @@ static unsigned debug;
module_param(debug, uint, 0644);
MODULE_PARM_DESC(debug, "activates debug info");
-static unsigned int vid_limit = 16;
-module_param(vid_limit, uint, 0644);
-MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
-
/* Global font descriptor */
static const u8 *font8x16;
@@ -191,7 +187,6 @@ struct vivi_buffer {
/* common v4l buffer stuff -- must be first */
struct vb2_buffer vb;
struct list_head list;
- const struct vivi_fmt *fmt;
};
struct vivi_dmaqueue {
@@ -254,7 +249,7 @@ struct vivi_dev {
struct v4l2_fract timeperframe;
unsigned int width, height;
struct vb2_queue vb_vidq;
- unsigned int field_count;
+ unsigned int seq_count;
u8 bars[9][3];
u8 line[MAX_WIDTH * 8] __attribute__((__aligned__(4)));
@@ -675,8 +670,7 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
dev->mv_count += 2;
buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
- dev->field_count++;
- buf->vb.v4l2_buf.sequence = dev->field_count >> 1;
+ buf->vb.v4l2_buf.sequence = dev->seq_count++;
v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
}
@@ -818,19 +812,15 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
struct vivi_dev *dev = vb2_get_drv_priv(vq);
unsigned long size;
- if (fmt)
+ size = dev->width * dev->height * dev->pixelsize;
+ if (fmt) {
+ if (fmt->fmt.pix.sizeimage < size)
+ return -EINVAL;
size = fmt->fmt.pix.sizeimage;
- else
- size = dev->width * dev->height * dev->pixelsize;
-
- if (size == 0)
- return -EINVAL;
-
- if (0 == *nbuffers)
- *nbuffers = 32;
-
- while (size * *nbuffers > vid_limit * 1024 * 1024)
- (*nbuffers)--;
+ /* check against insane over 8K resolution buffers */
+ if (size > 7680 * 4320 * dev->pixelsize)
+ return -EINVAL;
+ }
*nplanes = 1;
@@ -876,8 +866,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
vb2_set_plane_payload(&buf->vb, 0, size);
- buf->fmt = dev->fmt;
-
precalculate_bars(dev);
precalculate_line(dev);
@@ -901,8 +889,20 @@ static void buffer_queue(struct vb2_buffer *vb)
static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vivi_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
dprintk(dev, 1, "%s\n", __func__);
- return vivi_start_generating(dev);
+ dev->seq_count = 0;
+ err = vivi_start_generating(dev);
+ if (err) {
+ struct vivi_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dev->vidq.active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
}
/* abort streaming and wait for last buffer */
@@ -1121,7 +1121,11 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
if (!fmt)
return -EINVAL;
- /* regarding width & height - we support any */
+ /* check for valid width/height */
+ if (fival->width < 48 || fival->width > MAX_WIDTH || (fival->width & 3))
+ return -EINVAL;
+ if (fival->height < 32 || fival->height > MAX_HEIGHT)
+ return -EINVAL;
fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
@@ -1439,7 +1443,7 @@ static int __init vivi_create_instance(int inst)
q->buf_struct_size = sizeof(struct vivi_buffer);
q->ops = &vivi_video_qops;
q->mem_ops = &vb2_vmalloc_memops;
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(q);
if (ret)
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index 94d1b02680c5..0313210c6e9e 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -1,7 +1,7 @@
/*
* vsp1.h -- R-Car VSP1 Driver
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 0df0a994e575..2f74f0e0ddf5 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -1,7 +1,7 @@
/*
* vsp1_drv.c -- R-Car VSP1 Driver
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index 0226e47df6d9..3fc9e4266caf 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -1,7 +1,7 @@
/*
* vsp1_entity.c -- R-Car VSP1 Base Entity
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index e152798d7f38..f6fd6988aeb0 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -1,7 +1,7 @@
/*
* vsp1_entity.h -- R-Car VSP1 Base Entity
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index 74a32e69ef10..135a78957014 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -1,7 +1,7 @@
/*
* vsp1_lif.c -- R-Car VSP1 LCD Controller Interface
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/platform/vsp1/vsp1_lif.h b/drivers/media/platform/vsp1/vsp1_lif.h
index 89b93af56fdc..7b35879028de 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.h
+++ b/drivers/media/platform/vsp1/vsp1_lif.h
@@ -1,7 +1,7 @@
/*
* vsp1_lif.h -- R-Car VSP1 LCD Controller Interface
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index bce2be5466b9..2b04d0f95c62 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -1,7 +1,7 @@
/*
* vsp1_rpf.c -- R-Car VSP1 Read Pixel Formatter
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 782f770daee5..ec3dab6a9b9b 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -1,7 +1,7 @@
/*
* vsp1_rwpf.c -- R-Car VSP1 Read and Write Pixel Formatters
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index 6cbdb547470b..5c5ee81bbeae 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -1,7 +1,7 @@
/*
* vsp1_rwpf.h -- R-Car VSP1 Read and Write Pixel Formatters
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index 0e50b37f060d..622342ac7770 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -1,7 +1,7 @@
/*
* vsp1_uds.c -- R-Car VSP1 Up and Down Scaler
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/platform/vsp1/vsp1_uds.h b/drivers/media/platform/vsp1/vsp1_uds.h
index 972a285abdb9..479d12df1180 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.h
+++ b/drivers/media/platform/vsp1/vsp1_uds.h
@@ -1,7 +1,7 @@
/*
* vsp1_uds.h -- R-Car VSP1 Up and Down Scaler
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index b4687a834f85..b48f135ffc01 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -1,7 +1,7 @@
/*
* vsp1_video.c -- R-Car VSP1 Video Node
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -1051,7 +1051,7 @@ int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf)
video->queue.buf_struct_size = sizeof(struct vsp1_video_buffer);
video->queue.ops = &vsp1_video_queue_qops;
video->queue.mem_ops = &vb2_dma_contig_memops;
- video->queue.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(&video->queue);
if (ret < 0) {
dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
index d8612a378345..53e4b3745940 100644
--- a/drivers/media/platform/vsp1/vsp1_video.h
+++ b/drivers/media/platform/vsp1/vsp1_video.h
@@ -1,7 +1,7 @@
/*
* vsp1_video.h -- R-Car VSP1 Video Node
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 7baed81ff005..11a61c601da0 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -1,7 +1,7 @@
/*
* vsp1_wpf.c -- R-Car VSP1 Write Pixel Formatter
*
- * Copyright (C) 2013 Renesas Corporation
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 545c04cf7226..d719e59e2179 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -270,6 +270,16 @@ reset_rds:
outb(inb(dev->io + 1) & 0x7f, dev->io + 1);
}
+static bool cadet_has_rds_data(struct cadet *dev)
+{
+ bool result;
+
+ mutex_lock(&dev->lock);
+ result = dev->rdsin != dev->rdsout;
+ mutex_unlock(&dev->lock);
+ return result;
+}
+
static void cadet_handler(unsigned long data)
{
@@ -279,13 +289,12 @@ static void cadet_handler(unsigned long data)
if (mutex_trylock(&dev->lock)) {
outb(0x3, dev->io); /* Select RDS Decoder Control */
if ((inb(dev->io + 1) & 0x20) != 0)
- printk(KERN_CRIT "cadet: RDS fifo overflow\n");
+ pr_err("cadet: RDS fifo overflow\n");
outb(0x80, dev->io); /* Select RDS fifo */
+
while ((inb(dev->io) & 0x80) != 0) {
dev->rdsbuf[dev->rdsin] = inb(dev->io + 1);
- if (dev->rdsin + 1 == dev->rdsout)
- printk(KERN_WARNING "cadet: RDS buffer overflow\n");
- else
+ if (dev->rdsin + 1 != dev->rdsout)
dev->rdsin++;
}
mutex_unlock(&dev->lock);
@@ -294,7 +303,7 @@ static void cadet_handler(unsigned long data)
/*
* Service pending read
*/
- if (dev->rdsin != dev->rdsout)
+ if (cadet_has_rds_data(dev))
wake_up_interruptible(&dev->read_queue);
/*
@@ -327,22 +336,21 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
mutex_lock(&dev->lock);
if (dev->rdsstat == 0)
cadet_start_rds(dev);
- if (dev->rdsin == dev->rdsout) {
- if (file->f_flags & O_NONBLOCK) {
- i = -EWOULDBLOCK;
- goto unlock;
- }
- mutex_unlock(&dev->lock);
- interruptible_sleep_on(&dev->read_queue);
- mutex_lock(&dev->lock);
- }
+ mutex_unlock(&dev->lock);
+
+ if (!cadet_has_rds_data(dev) && (file->f_flags & O_NONBLOCK))
+ return -EWOULDBLOCK;
+ i = wait_event_interruptible(dev->read_queue, cadet_has_rds_data(dev));
+ if (i)
+ return i;
+
+ mutex_lock(&dev->lock);
while (i < count && dev->rdsin != dev->rdsout)
readbuf[i++] = dev->rdsbuf[dev->rdsout++];
+ mutex_unlock(&dev->lock);
if (i && copy_to_user(data, readbuf, i))
- i = -EFAULT;
-unlock:
- mutex_unlock(&dev->lock);
+ return -EFAULT;
return i;
}
@@ -352,7 +360,7 @@ static int vidioc_querycap(struct file *file, void *priv,
{
strlcpy(v->driver, "ADS Cadet", sizeof(v->driver));
strlcpy(v->card, "ADS Cadet", sizeof(v->card));
- strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
+ strlcpy(v->bus_info, "ISA:radio-cadet", sizeof(v->bus_info));
v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE;
v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
@@ -491,7 +499,7 @@ static unsigned int cadet_poll(struct file *file, struct poll_table_struct *wait
cadet_start_rds(dev);
mutex_unlock(&dev->lock);
}
- if (dev->rdsin != dev->rdsout)
+ if (cadet_has_rds_data(dev))
res |= POLLIN | POLLRDNORM;
return res;
}
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index fa3964022b96..3d127825eceb 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -416,22 +416,5 @@ static struct usb_driver usb_keene_driver = {
.reset_resume = usb_keene_resume,
};
-static int __init keene_init(void)
-{
- int retval = usb_register(&usb_keene_driver);
-
- if (retval)
- pr_err(KBUILD_MODNAME
- ": usb_register failed. Error number %d\n", retval);
-
- return retval;
-}
-
-static void __exit keene_exit(void)
-{
- usb_deregister(&usb_keene_driver);
-}
-
-module_init(keene_init);
-module_exit(keene_exit);
+module_usb_driver(usb_keene_driver);
diff --git a/drivers/media/radio/si4713/Kconfig b/drivers/media/radio/si4713/Kconfig
index a7c3ba85d12b..9c8b887cff75 100644
--- a/drivers/media/radio/si4713/Kconfig
+++ b/drivers/media/radio/si4713/Kconfig
@@ -1,7 +1,7 @@
config USB_SI4713
tristate "Silicon Labs Si4713 FM Radio Transmitter support with USB"
- depends on USB && RADIO_SI4713
- select SI4713
+ depends on USB && I2C && RADIO_SI4713
+ select I2C_SI4713
---help---
This is a driver for USB devices with the Silicon Labs SI4713
chip. Currently these devices are known to work.
@@ -16,7 +16,7 @@ config USB_SI4713
config PLATFORM_SI4713
tristate "Silicon Labs Si4713 FM Radio Transmitter support with I2C"
depends on I2C && RADIO_SI4713
- select SI4713
+ select I2C_SI4713
---help---
This is a driver for I2C devices with the Silicon Labs SI4713
chip.
diff --git a/drivers/media/radio/si4713/radio-usb-si4713.c b/drivers/media/radio/si4713/radio-usb-si4713.c
index 779855b74bcd..86502b2786d0 100644
--- a/drivers/media/radio/si4713/radio-usb-si4713.c
+++ b/drivers/media/radio/si4713/radio-usb-si4713.c
@@ -223,7 +223,7 @@ struct si4713_start_seq_table {
* (0x03): Get serial number of the board (Response : CB000-00-00)
* (0x06, 0x03, 0x03, 0x08, 0x01, 0x0f) : Get Component revision
*/
-static struct si4713_start_seq_table start_seq[] = {
+static const struct si4713_start_seq_table start_seq[] = {
{ 1, { 0x03 } },
{ 2, { 0x32, 0x7f } },
@@ -261,7 +261,7 @@ static int si4713_start_seq(struct si4713_usb_device *radio)
for (i = 0; i < ARRAY_SIZE(start_seq); i++) {
int len = start_seq[i].len;
- u8 *payload = start_seq[i].payload;
+ const u8 *payload = start_seq[i].payload;
memcpy(radio->buffer + 1, payload, len);
memset(radio->buffer + len + 1, 0, BUFFER_LENGTH - 1 - len);
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 904f11367c29..8fbd377e6311 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -106,6 +106,15 @@ config IR_SANYO_DECODER
uses the Sanyo protocol (Sanyo, Aiwa, Chinon remotes),
and you need software decoding support.
+config IR_SHARP_DECODER
+ tristate "Enable IR raw decoder for the Sharp protocol"
+ depends on RC_CORE
+ default y
+
+ ---help---
+ Enable this option if you have an infrared remote control which
+ uses the Sharp protocol, and you need software decoding support.
+
config IR_MCE_KBD_DECODER
tristate "Enable IR raw decoder for the MCE keyboard/mouse protocol"
depends on RC_CORE
@@ -300,6 +309,8 @@ config IR_RX51
The driver uses omap DM timers for generating the carrier
wave and pulses.
+source "drivers/media/rc/img-ir/Kconfig"
+
config RC_LOOPBACK
tristate "Remote Control Loopback Driver"
depends on RC_CORE
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index f4eb32c0a455..f8b54ff46601 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o
obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
obj-$(CONFIG_IR_RC5_SZ_DECODER) += ir-rc5-sz-decoder.o
obj-$(CONFIG_IR_SANYO_DECODER) += ir-sanyo-decoder.o
+obj-$(CONFIG_IR_SHARP_DECODER) += ir-sharp-decoder.o
obj-$(CONFIG_IR_MCE_KBD_DECODER) += ir-mce_kbd-decoder.o
obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
@@ -31,3 +32,4 @@ obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o
obj-$(CONFIG_IR_IGUANA) += iguanair.o
obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o
obj-$(CONFIG_RC_ST) += st_rc.o
+obj-$(CONFIG_IR_IMG) += img-ir/
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 4d6a63fe6c5e..2df7c5516013 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -784,7 +784,7 @@ static void ati_remote_rc_init(struct ati_remote *ati_remote)
rdev->priv = ati_remote;
rdev->driver_type = RC_DRIVER_SCANCODE;
- rdev->allowed_protos = RC_BIT_OTHER;
+ rc_set_allowed_protocols(rdev, RC_BIT_OTHER);
rdev->driver_name = "ati_remote";
rdev->open = ati_remote_rc_open;
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index c1444f84717d..fc9d23f2ed3f 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -1059,7 +1059,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
learning_mode_force = false;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rdev, RC_BIT_ALL);
rdev->priv = dev;
rdev->open = ene_open;
rdev->close = ene_close;
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index d6fa441655d2..46b66e59438f 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -541,7 +541,7 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
/* Set up the rc device */
rdev->priv = fintek;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rdev, RC_BIT_ALL);
rdev->open = fintek_open;
rdev->close = fintek_close;
rdev->input_name = FINTEK_DESCRIPTION;
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 80c611c2e8c2..29b5f89813b4 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -145,9 +145,9 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
rcdev->dev.parent = &pdev->dev;
rcdev->driver_name = GPIO_IR_DRIVER_NAME;
if (pdata->allowed_protos)
- rcdev->allowed_protos = pdata->allowed_protos;
+ rc_set_allowed_protocols(rcdev, pdata->allowed_protos);
else
- rcdev->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rcdev, RC_BIT_ALL);
rcdev->map_name = pdata->map_name ?: RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index fdae05c4f377..627ddfd61980 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -286,10 +286,10 @@ static int iguanair_receiver(struct iguanair *ir, bool enable)
}
/*
- * The iguana ir creates the carrier by busy spinning after each pulse or
- * space. This is counted in CPU cycles, with the CPU running at 24MHz. It is
+ * The iguanair creates the carrier by busy spinning after each half period.
+ * This is counted in CPU cycles, with the CPU running at 24MHz. It is
* broken down into 7-cycles and 4-cyles delays, with a preference for
- * 4-cycle delays.
+ * 4-cycle delays, minus the overhead of the loop itself (cycle_overhead).
*/
static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier)
{
@@ -316,7 +316,14 @@ static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier)
sevens = (4 - cycles) & 3;
fours = (cycles - sevens * 7) / 4;
- /* magic happens here */
+ /*
+ * The firmware interprets these values as a relative offset
+ * for a branch. Immediately following the branches, there
+ * 4 instructions of 7 cycles (2 bytes each) and 110
+ * instructions of 4 cycles (1 byte each). A relative branch
+ * of 0 will execute all of them, branch further for less
+ * cycle burning.
+ */
ir->packet->busy7 = (4 - sevens) * 2;
ir->packet->busy4 = 110 - fours;
}
@@ -357,20 +364,14 @@ static int iguanair_tx(struct rc_dev *dev, unsigned *txbuf, unsigned count)
rc = -EINVAL;
goto out;
}
- while (periods > 127) {
- ir->packet->payload[size++] = 127 | space;
- periods -= 127;
+ while (periods) {
+ unsigned p = min(periods, 127u);
+ ir->packet->payload[size++] = p | space;
+ periods -= p;
}
-
- ir->packet->payload[size++] = periods | space;
space ^= 0x80;
}
- if (count == 0) {
- rc = -EINVAL;
- goto out;
- }
-
ir->packet->header.start = 0;
ir->packet->header.direction = DIR_OUT;
ir->packet->header.cmd = CMD_SEND;
@@ -494,7 +495,7 @@ static int iguanair_probe(struct usb_interface *intf,
usb_to_input_id(ir->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rc, RC_BIT_ALL);
rc->priv = ir;
rc->open = iguanair_open;
rc->close = iguanair_close;
diff --git a/drivers/media/rc/img-ir/Kconfig b/drivers/media/rc/img-ir/Kconfig
new file mode 100644
index 000000000000..03ba9fc170fb
--- /dev/null
+++ b/drivers/media/rc/img-ir/Kconfig
@@ -0,0 +1,61 @@
+config IR_IMG
+ tristate "ImgTec IR Decoder"
+ depends on RC_CORE
+ select IR_IMG_HW if !IR_IMG_RAW
+ help
+ Say Y or M here if you want to use the ImgTec infrared decoder
+ functionality found in SoCs such as TZ1090.
+
+config IR_IMG_RAW
+ bool "Raw decoder"
+ depends on IR_IMG
+ help
+ Say Y here to enable the raw mode driver which passes raw IR signal
+ changes to the IR raw decoders for software decoding. This is much
+ less reliable (due to lack of timestamps) and consumes more
+ processing power than using hardware decode, but can be useful for
+ testing, debug, and to make more protocols available.
+
+config IR_IMG_HW
+ bool "Hardware decoder"
+ depends on IR_IMG
+ help
+ Say Y here to enable the hardware decode driver which decodes the IR
+ signals in hardware. This is more reliable, consumes less processing
+ power since only a single interrupt is received for each scancode,
+ and allows an IR scancode to be used as a wake event.
+
+config IR_IMG_NEC
+ bool "NEC protocol support"
+ depends on IR_IMG_HW
+ help
+ Say Y here to enable support for the NEC, extended NEC, and 32-bit
+ NEC protocols in the ImgTec infrared decoder block.
+
+config IR_IMG_JVC
+ bool "JVC protocol support"
+ depends on IR_IMG_HW
+ help
+ Say Y here to enable support for the JVC protocol in the ImgTec
+ infrared decoder block.
+
+config IR_IMG_SONY
+ bool "Sony protocol support"
+ depends on IR_IMG_HW
+ help
+ Say Y here to enable support for the Sony protocol in the ImgTec
+ infrared decoder block.
+
+config IR_IMG_SHARP
+ bool "Sharp protocol support"
+ depends on IR_IMG_HW
+ help
+ Say Y here to enable support for the Sharp protocol in the ImgTec
+ infrared decoder block.
+
+config IR_IMG_SANYO
+ bool "Sanyo protocol support"
+ depends on IR_IMG_HW
+ help
+ Say Y here to enable support for the Sanyo protocol (used by Sanyo,
+ Aiwa, Chinon remotes) in the ImgTec infrared decoder block.
diff --git a/drivers/media/rc/img-ir/Makefile b/drivers/media/rc/img-ir/Makefile
new file mode 100644
index 000000000000..92a459d99509
--- /dev/null
+++ b/drivers/media/rc/img-ir/Makefile
@@ -0,0 +1,11 @@
+img-ir-y := img-ir-core.o
+img-ir-$(CONFIG_IR_IMG_RAW) += img-ir-raw.o
+img-ir-$(CONFIG_IR_IMG_HW) += img-ir-hw.o
+img-ir-$(CONFIG_IR_IMG_NEC) += img-ir-nec.o
+img-ir-$(CONFIG_IR_IMG_JVC) += img-ir-jvc.o
+img-ir-$(CONFIG_IR_IMG_SONY) += img-ir-sony.o
+img-ir-$(CONFIG_IR_IMG_SHARP) += img-ir-sharp.o
+img-ir-$(CONFIG_IR_IMG_SANYO) += img-ir-sanyo.o
+img-ir-objs := $(img-ir-y)
+
+obj-$(CONFIG_IR_IMG) += img-ir.o
diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
new file mode 100644
index 000000000000..6b7834834fb8
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir-core.c
@@ -0,0 +1,176 @@
+/*
+ * ImgTec IR Decoder found in PowerDown Controller.
+ *
+ * Copyright 2010-2014 Imagination Technologies Ltd.
+ *
+ * This contains core img-ir code for setting up the driver. The two interfaces
+ * (raw and hardware decode) are handled separately.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include "img-ir.h"
+
+static irqreturn_t img_ir_isr(int irq, void *dev_id)
+{
+ struct img_ir_priv *priv = dev_id;
+ u32 irq_status;
+
+ spin_lock(&priv->lock);
+ /* we have to clear irqs before reading */
+ irq_status = img_ir_read(priv, IMG_IR_IRQ_STATUS);
+ img_ir_write(priv, IMG_IR_IRQ_CLEAR, irq_status);
+
+ /* don't handle valid data irqs if we're only interested in matches */
+ irq_status &= img_ir_read(priv, IMG_IR_IRQ_ENABLE);
+
+ /* hand off edge interrupts to raw decode handler */
+ if (irq_status & IMG_IR_IRQ_EDGE && img_ir_raw_enabled(&priv->raw))
+ img_ir_isr_raw(priv, irq_status);
+
+ /* hand off hardware match interrupts to hardware decode handler */
+ if (irq_status & (IMG_IR_IRQ_DATA_MATCH |
+ IMG_IR_IRQ_DATA_VALID |
+ IMG_IR_IRQ_DATA2_VALID) &&
+ img_ir_hw_enabled(&priv->hw))
+ img_ir_isr_hw(priv, irq_status);
+
+ spin_unlock(&priv->lock);
+ return IRQ_HANDLED;
+}
+
+static void img_ir_setup(struct img_ir_priv *priv)
+{
+ /* start off with interrupts disabled */
+ img_ir_write(priv, IMG_IR_IRQ_ENABLE, 0);
+
+ img_ir_setup_raw(priv);
+ img_ir_setup_hw(priv);
+
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+}
+
+static void img_ir_ident(struct img_ir_priv *priv)
+{
+ u32 core_rev = img_ir_read(priv, IMG_IR_CORE_REV);
+
+ dev_info(priv->dev,
+ "IMG IR Decoder (%d.%d.%d.%d) probed successfully\n",
+ (core_rev & IMG_IR_DESIGNER) >> IMG_IR_DESIGNER_SHIFT,
+ (core_rev & IMG_IR_MAJOR_REV) >> IMG_IR_MAJOR_REV_SHIFT,
+ (core_rev & IMG_IR_MINOR_REV) >> IMG_IR_MINOR_REV_SHIFT,
+ (core_rev & IMG_IR_MAINT_REV) >> IMG_IR_MAINT_REV_SHIFT);
+ dev_info(priv->dev, "Modes:%s%s\n",
+ img_ir_hw_enabled(&priv->hw) ? " hardware" : "",
+ img_ir_raw_enabled(&priv->raw) ? " raw" : "");
+}
+
+static int img_ir_probe(struct platform_device *pdev)
+{
+ struct img_ir_priv *priv;
+ struct resource *res_regs;
+ int irq, error, error2;
+
+ /* Get resources from platform device */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "cannot find IRQ resource\n");
+ return irq;
+ }
+
+ /* Private driver data */
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "cannot allocate device data\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, priv);
+ priv->dev = &pdev->dev;
+ spin_lock_init(&priv->lock);
+
+ /* Ioremap the registers */
+ res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->reg_base = devm_ioremap_resource(&pdev->dev, res_regs);
+ if (IS_ERR(priv->reg_base))
+ return PTR_ERR(priv->reg_base);
+
+ /* Get core clock */
+ priv->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(priv->clk))
+ dev_warn(&pdev->dev, "cannot get core clock resource\n");
+ /*
+ * The driver doesn't need to know about the system ("sys") or power
+ * modulation ("mod") clocks yet
+ */
+
+ /* Set up raw & hw decoder */
+ error = img_ir_probe_raw(priv);
+ error2 = img_ir_probe_hw(priv);
+ if (error && error2)
+ return (error == -ENODEV) ? error2 : error;
+
+ /* Get the IRQ */
+ priv->irq = irq;
+ error = request_irq(priv->irq, img_ir_isr, 0, "img-ir", priv);
+ if (error) {
+ dev_err(&pdev->dev, "cannot register IRQ %u\n",
+ priv->irq);
+ error = -EIO;
+ goto err_irq;
+ }
+
+ img_ir_ident(priv);
+ img_ir_setup(priv);
+
+ return 0;
+
+err_irq:
+ img_ir_remove_hw(priv);
+ img_ir_remove_raw(priv);
+ return error;
+}
+
+static int img_ir_remove(struct platform_device *pdev)
+{
+ struct img_ir_priv *priv = platform_get_drvdata(pdev);
+
+ free_irq(priv->irq, img_ir_isr);
+ img_ir_remove_hw(priv);
+ img_ir_remove_raw(priv);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(img_ir_pmops, img_ir_suspend, img_ir_resume);
+
+static const struct of_device_id img_ir_match[] = {
+ { .compatible = "img,ir-rev1" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, img_ir_match);
+
+static struct platform_driver img_ir_driver = {
+ .driver = {
+ .name = "img-ir",
+ .owner = THIS_MODULE,
+ .of_match_table = img_ir_match,
+ .pm = &img_ir_pmops,
+ },
+ .probe = img_ir_probe,
+ .remove = img_ir_remove,
+};
+
+module_platform_driver(img_ir_driver);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("ImgTec IR");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
new file mode 100644
index 000000000000..579a52b3edce
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -0,0 +1,1053 @@
+/*
+ * ImgTec IR Hardware Decoder found in PowerDown Controller.
+ *
+ * Copyright 2010-2014 Imagination Technologies Ltd.
+ *
+ * This ties into the input subsystem using the RC-core. Protocol support is
+ * provided in separate modules which provide the parameters and scancode
+ * translation functions to set up the hardware decoder and interpret the
+ * resulting input.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <media/rc-core.h>
+#include "img-ir.h"
+
+/* Decoders lock (only modified to preprocess them) */
+static DEFINE_SPINLOCK(img_ir_decoders_lock);
+
+extern struct img_ir_decoder img_ir_nec;
+extern struct img_ir_decoder img_ir_jvc;
+extern struct img_ir_decoder img_ir_sony;
+extern struct img_ir_decoder img_ir_sharp;
+extern struct img_ir_decoder img_ir_sanyo;
+
+static bool img_ir_decoders_preprocessed;
+static struct img_ir_decoder *img_ir_decoders[] = {
+#ifdef CONFIG_IR_IMG_NEC
+ &img_ir_nec,
+#endif
+#ifdef CONFIG_IR_IMG_JVC
+ &img_ir_jvc,
+#endif
+#ifdef CONFIG_IR_IMG_SONY
+ &img_ir_sony,
+#endif
+#ifdef CONFIG_IR_IMG_SHARP
+ &img_ir_sharp,
+#endif
+#ifdef CONFIG_IR_IMG_SANYO
+ &img_ir_sanyo,
+#endif
+ NULL
+};
+
+#define IMG_IR_F_FILTER BIT(RC_FILTER_NORMAL) /* enable filtering */
+#define IMG_IR_F_WAKE BIT(RC_FILTER_WAKEUP) /* enable waking */
+
+/* code type quirks */
+
+#define IMG_IR_QUIRK_CODE_BROKEN 0x1 /* Decode is broken */
+#define IMG_IR_QUIRK_CODE_LEN_INCR 0x2 /* Bit length needs increment */
+
+/* functions for preprocessing timings, ensuring max is set */
+
+static void img_ir_timing_preprocess(struct img_ir_timing_range *range,
+ unsigned int unit)
+{
+ if (range->max < range->min)
+ range->max = range->min;
+ if (unit) {
+ /* multiply by unit and convert to microseconds */
+ range->min = (range->min*unit)/1000;
+ range->max = (range->max*unit + 999)/1000; /* round up */
+ }
+}
+
+static void img_ir_symbol_timing_preprocess(struct img_ir_symbol_timing *timing,
+ unsigned int unit)
+{
+ img_ir_timing_preprocess(&timing->pulse, unit);
+ img_ir_timing_preprocess(&timing->space, unit);
+}
+
+static void img_ir_timings_preprocess(struct img_ir_timings *timings,
+ unsigned int unit)
+{
+ img_ir_symbol_timing_preprocess(&timings->ldr, unit);
+ img_ir_symbol_timing_preprocess(&timings->s00, unit);
+ img_ir_symbol_timing_preprocess(&timings->s01, unit);
+ img_ir_symbol_timing_preprocess(&timings->s10, unit);
+ img_ir_symbol_timing_preprocess(&timings->s11, unit);
+ /* default s10 and s11 to s00 and s01 if no leader */
+ if (unit)
+ /* multiply by unit and convert to microseconds (round up) */
+ timings->ft.ft_min = (timings->ft.ft_min*unit + 999)/1000;
+}
+
+/* functions for filling empty fields with defaults */
+
+static void img_ir_timing_defaults(struct img_ir_timing_range *range,
+ struct img_ir_timing_range *defaults)
+{
+ if (!range->min)
+ range->min = defaults->min;
+ if (!range->max)
+ range->max = defaults->max;
+}
+
+static void img_ir_symbol_timing_defaults(struct img_ir_symbol_timing *timing,
+ struct img_ir_symbol_timing *defaults)
+{
+ img_ir_timing_defaults(&timing->pulse, &defaults->pulse);
+ img_ir_timing_defaults(&timing->space, &defaults->space);
+}
+
+static void img_ir_timings_defaults(struct img_ir_timings *timings,
+ struct img_ir_timings *defaults)
+{
+ img_ir_symbol_timing_defaults(&timings->ldr, &defaults->ldr);
+ img_ir_symbol_timing_defaults(&timings->s00, &defaults->s00);
+ img_ir_symbol_timing_defaults(&timings->s01, &defaults->s01);
+ img_ir_symbol_timing_defaults(&timings->s10, &defaults->s10);
+ img_ir_symbol_timing_defaults(&timings->s11, &defaults->s11);
+ if (!timings->ft.ft_min)
+ timings->ft.ft_min = defaults->ft.ft_min;
+}
+
+/* functions for converting timings to register values */
+
+/**
+ * img_ir_control() - Convert control struct to control register value.
+ * @control: Control data
+ *
+ * Returns: The control register value equivalent of @control.
+ */
+static u32 img_ir_control(const struct img_ir_control *control)
+{
+ u32 ctrl = control->code_type << IMG_IR_CODETYPE_SHIFT;
+ if (control->decoden)
+ ctrl |= IMG_IR_DECODEN;
+ if (control->hdrtog)
+ ctrl |= IMG_IR_HDRTOG;
+ if (control->ldrdec)
+ ctrl |= IMG_IR_LDRDEC;
+ if (control->decodinpol)
+ ctrl |= IMG_IR_DECODINPOL;
+ if (control->bitorien)
+ ctrl |= IMG_IR_BITORIEN;
+ if (control->d1validsel)
+ ctrl |= IMG_IR_D1VALIDSEL;
+ if (control->bitinv)
+ ctrl |= IMG_IR_BITINV;
+ if (control->decodend2)
+ ctrl |= IMG_IR_DECODEND2;
+ if (control->bitoriend2)
+ ctrl |= IMG_IR_BITORIEND2;
+ if (control->bitinvd2)
+ ctrl |= IMG_IR_BITINVD2;
+ return ctrl;
+}
+
+/**
+ * img_ir_timing_range_convert() - Convert microsecond range.
+ * @out: Output timing range in clock cycles with a shift.
+ * @in: Input timing range in microseconds.
+ * @tolerance: Tolerance as a fraction of 128 (roughly percent).
+ * @clock_hz: IR clock rate in Hz.
+ * @shift: Shift of output units.
+ *
+ * Converts min and max from microseconds to IR clock cycles, applies a
+ * tolerance, and shifts for the register, rounding in the right direction.
+ * Note that in and out can safely be the same object.
+ */
+static void img_ir_timing_range_convert(struct img_ir_timing_range *out,
+ const struct img_ir_timing_range *in,
+ unsigned int tolerance,
+ unsigned long clock_hz,
+ unsigned int shift)
+{
+ unsigned int min = in->min;
+ unsigned int max = in->max;
+ /* add a tolerance */
+ min = min - (min*tolerance >> 7);
+ max = max + (max*tolerance >> 7);
+ /* convert from microseconds into clock cycles */
+ min = min*clock_hz / 1000000;
+ max = (max*clock_hz + 999999) / 1000000; /* round up */
+ /* apply shift and copy to output */
+ out->min = min >> shift;
+ out->max = (max + ((1 << shift) - 1)) >> shift; /* round up */
+}
+
+/**
+ * img_ir_symbol_timing() - Convert symbol timing struct to register value.
+ * @timing: Symbol timing data
+ * @tolerance: Timing tolerance where 0-128 represents 0-100%
+ * @clock_hz: Frequency of source clock in Hz
+ * @pd_shift: Shift to apply to symbol period
+ * @w_shift: Shift to apply to symbol width
+ *
+ * Returns: Symbol timing register value based on arguments.
+ */
+static u32 img_ir_symbol_timing(const struct img_ir_symbol_timing *timing,
+ unsigned int tolerance,
+ unsigned long clock_hz,
+ unsigned int pd_shift,
+ unsigned int w_shift)
+{
+ struct img_ir_timing_range hw_pulse, hw_period;
+ /* we calculate period in hw_period, then convert in place */
+ hw_period.min = timing->pulse.min + timing->space.min;
+ hw_period.max = timing->pulse.max + timing->space.max;
+ img_ir_timing_range_convert(&hw_period, &hw_period,
+ tolerance, clock_hz, pd_shift);
+ img_ir_timing_range_convert(&hw_pulse, &timing->pulse,
+ tolerance, clock_hz, w_shift);
+ /* construct register value */
+ return (hw_period.max << IMG_IR_PD_MAX_SHIFT) |
+ (hw_period.min << IMG_IR_PD_MIN_SHIFT) |
+ (hw_pulse.max << IMG_IR_W_MAX_SHIFT) |
+ (hw_pulse.min << IMG_IR_W_MIN_SHIFT);
+}
+
+/**
+ * img_ir_free_timing() - Convert free time timing struct to register value.
+ * @timing: Free symbol timing data
+ * @clock_hz: Source clock frequency in Hz
+ *
+ * Returns: Free symbol timing register value.
+ */
+static u32 img_ir_free_timing(const struct img_ir_free_timing *timing,
+ unsigned long clock_hz)
+{
+ unsigned int minlen, maxlen, ft_min;
+ /* minlen is only 5 bits, and round minlen to multiple of 2 */
+ if (timing->minlen < 30)
+ minlen = timing->minlen & -2;
+ else
+ minlen = 30;
+ /* maxlen has maximum value of 48, and round maxlen to multiple of 2 */
+ if (timing->maxlen < 48)
+ maxlen = (timing->maxlen + 1) & -2;
+ else
+ maxlen = 48;
+ /* convert and shift ft_min, rounding upwards */
+ ft_min = (timing->ft_min*clock_hz + 999999) / 1000000;
+ ft_min = (ft_min + 7) >> 3;
+ /* construct register value */
+ return (maxlen << IMG_IR_MAXLEN_SHIFT) |
+ (minlen << IMG_IR_MINLEN_SHIFT) |
+ (ft_min << IMG_IR_FT_MIN_SHIFT);
+}
+
+/**
+ * img_ir_free_timing_dynamic() - Update free time register value.
+ * @st_ft: Static free time register value from img_ir_free_timing.
+ * @filter: Current filter which may additionally restrict min/max len.
+ *
+ * Returns: Updated free time register value based on the current filter.
+ */
+static u32 img_ir_free_timing_dynamic(u32 st_ft, struct img_ir_filter *filter)
+{
+ unsigned int minlen, maxlen, newminlen, newmaxlen;
+
+ /* round minlen, maxlen to multiple of 2 */
+ newminlen = filter->minlen & -2;
+ newmaxlen = (filter->maxlen + 1) & -2;
+ /* extract min/max len from register */
+ minlen = (st_ft & IMG_IR_MINLEN) >> IMG_IR_MINLEN_SHIFT;
+ maxlen = (st_ft & IMG_IR_MAXLEN) >> IMG_IR_MAXLEN_SHIFT;
+ /* if the new values are more restrictive, update the register value */
+ if (newminlen > minlen) {
+ st_ft &= ~IMG_IR_MINLEN;
+ st_ft |= newminlen << IMG_IR_MINLEN_SHIFT;
+ }
+ if (newmaxlen < maxlen) {
+ st_ft &= ~IMG_IR_MAXLEN;
+ st_ft |= newmaxlen << IMG_IR_MAXLEN_SHIFT;
+ }
+ return st_ft;
+}
+
+/**
+ * img_ir_timings_convert() - Convert timings to register values
+ * @regs: Output timing register values
+ * @timings: Input timing data
+ * @tolerance: Timing tolerance where 0-128 represents 0-100%
+ * @clock_hz: Source clock frequency in Hz
+ */
+static void img_ir_timings_convert(struct img_ir_timing_regvals *regs,
+ const struct img_ir_timings *timings,
+ unsigned int tolerance,
+ unsigned int clock_hz)
+{
+ /* leader symbol timings are divided by 16 */
+ regs->ldr = img_ir_symbol_timing(&timings->ldr, tolerance, clock_hz,
+ 4, 4);
+ /* other symbol timings, pd fields only are divided by 2 */
+ regs->s00 = img_ir_symbol_timing(&timings->s00, tolerance, clock_hz,
+ 1, 0);
+ regs->s01 = img_ir_symbol_timing(&timings->s01, tolerance, clock_hz,
+ 1, 0);
+ regs->s10 = img_ir_symbol_timing(&timings->s10, tolerance, clock_hz,
+ 1, 0);
+ regs->s11 = img_ir_symbol_timing(&timings->s11, tolerance, clock_hz,
+ 1, 0);
+ regs->ft = img_ir_free_timing(&timings->ft, clock_hz);
+}
+
+/**
+ * img_ir_decoder_preprocess() - Preprocess timings in decoder.
+ * @decoder: Decoder to be preprocessed.
+ *
+ * Ensures that the symbol timing ranges are valid with respect to ordering, and
+ * does some fixed conversion on them.
+ */
+static void img_ir_decoder_preprocess(struct img_ir_decoder *decoder)
+{
+ /* default tolerance */
+ if (!decoder->tolerance)
+ decoder->tolerance = 10; /* percent */
+ /* and convert tolerance to fraction out of 128 */
+ decoder->tolerance = decoder->tolerance * 128 / 100;
+
+ /* fill in implicit fields */
+ img_ir_timings_preprocess(&decoder->timings, decoder->unit);
+
+ /* do the same for repeat timings if applicable */
+ if (decoder->repeat) {
+ img_ir_timings_preprocess(&decoder->rtimings, decoder->unit);
+ img_ir_timings_defaults(&decoder->rtimings, &decoder->timings);
+ }
+}
+
+/**
+ * img_ir_decoder_convert() - Generate internal timings in decoder.
+ * @decoder: Decoder to be converted to internal timings.
+ * @timings: Timing register values.
+ * @clock_hz: IR clock rate in Hz.
+ *
+ * Fills out the repeat timings and timing register values for a specific clock
+ * rate.
+ */
+static void img_ir_decoder_convert(const struct img_ir_decoder *decoder,
+ struct img_ir_reg_timings *reg_timings,
+ unsigned int clock_hz)
+{
+ /* calculate control value */
+ reg_timings->ctrl = img_ir_control(&decoder->control);
+
+ /* fill in implicit fields and calculate register values */
+ img_ir_timings_convert(&reg_timings->timings, &decoder->timings,
+ decoder->tolerance, clock_hz);
+
+ /* do the same for repeat timings if applicable */
+ if (decoder->repeat)
+ img_ir_timings_convert(&reg_timings->rtimings,
+ &decoder->rtimings, decoder->tolerance,
+ clock_hz);
+}
+
+/**
+ * img_ir_write_timings() - Write timings to the hardware now
+ * @priv: IR private data
+ * @regs: Timing register values to write
+ * @type: RC filter type (RC_FILTER_*)
+ *
+ * Write timing register values @regs to the hardware, taking into account the
+ * current filter which may impose restrictions on the length of the expected
+ * data.
+ */
+static void img_ir_write_timings(struct img_ir_priv *priv,
+ struct img_ir_timing_regvals *regs,
+ enum rc_filter_type type)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+
+ /* filter may be more restrictive to minlen, maxlen */
+ u32 ft = regs->ft;
+ if (hw->flags & BIT(type))
+ ft = img_ir_free_timing_dynamic(regs->ft, &hw->filters[type]);
+ /* write to registers */
+ img_ir_write(priv, IMG_IR_LEAD_SYMB_TIMING, regs->ldr);
+ img_ir_write(priv, IMG_IR_S00_SYMB_TIMING, regs->s00);
+ img_ir_write(priv, IMG_IR_S01_SYMB_TIMING, regs->s01);
+ img_ir_write(priv, IMG_IR_S10_SYMB_TIMING, regs->s10);
+ img_ir_write(priv, IMG_IR_S11_SYMB_TIMING, regs->s11);
+ img_ir_write(priv, IMG_IR_FREE_SYMB_TIMING, ft);
+ dev_dbg(priv->dev, "timings: ldr=%#x, s=[%#x, %#x, %#x, %#x], ft=%#x\n",
+ regs->ldr, regs->s00, regs->s01, regs->s10, regs->s11, ft);
+}
+
+static void img_ir_write_filter(struct img_ir_priv *priv,
+ struct img_ir_filter *filter)
+{
+ if (filter) {
+ dev_dbg(priv->dev, "IR filter=%016llx & %016llx\n",
+ (unsigned long long)filter->data,
+ (unsigned long long)filter->mask);
+ img_ir_write(priv, IMG_IR_IRQ_MSG_DATA_LW, (u32)filter->data);
+ img_ir_write(priv, IMG_IR_IRQ_MSG_DATA_UP, (u32)(filter->data
+ >> 32));
+ img_ir_write(priv, IMG_IR_IRQ_MSG_MASK_LW, (u32)filter->mask);
+ img_ir_write(priv, IMG_IR_IRQ_MSG_MASK_UP, (u32)(filter->mask
+ >> 32));
+ } else {
+ dev_dbg(priv->dev, "IR clearing filter\n");
+ img_ir_write(priv, IMG_IR_IRQ_MSG_MASK_LW, 0);
+ img_ir_write(priv, IMG_IR_IRQ_MSG_MASK_UP, 0);
+ }
+}
+
+/* caller must have lock */
+static void _img_ir_set_filter(struct img_ir_priv *priv,
+ struct img_ir_filter *filter)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+ u32 irq_en, irq_on;
+
+ irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
+ if (filter) {
+ /* Only use the match interrupt */
+ hw->filters[RC_FILTER_NORMAL] = *filter;
+ hw->flags |= IMG_IR_F_FILTER;
+ irq_on = IMG_IR_IRQ_DATA_MATCH;
+ irq_en &= ~(IMG_IR_IRQ_DATA_VALID | IMG_IR_IRQ_DATA2_VALID);
+ } else {
+ /* Only use the valid interrupt */
+ hw->flags &= ~IMG_IR_F_FILTER;
+ irq_en &= ~IMG_IR_IRQ_DATA_MATCH;
+ irq_on = IMG_IR_IRQ_DATA_VALID | IMG_IR_IRQ_DATA2_VALID;
+ }
+ irq_en |= irq_on;
+
+ img_ir_write_filter(priv, filter);
+ /* clear any interrupts we're enabling so we don't handle old ones */
+ img_ir_write(priv, IMG_IR_IRQ_CLEAR, irq_on);
+ img_ir_write(priv, IMG_IR_IRQ_ENABLE, irq_en);
+}
+
+/* caller must have lock */
+static void _img_ir_set_wake_filter(struct img_ir_priv *priv,
+ struct img_ir_filter *filter)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+ if (filter) {
+ /* Enable wake, and copy filter for later */
+ hw->filters[RC_FILTER_WAKEUP] = *filter;
+ hw->flags |= IMG_IR_F_WAKE;
+ } else {
+ /* Disable wake */
+ hw->flags &= ~IMG_IR_F_WAKE;
+ }
+}
+
+/* Callback for setting scancode filter */
+static int img_ir_set_filter(struct rc_dev *dev, enum rc_filter_type type,
+ struct rc_scancode_filter *sc_filter)
+{
+ struct img_ir_priv *priv = dev->priv;
+ struct img_ir_priv_hw *hw = &priv->hw;
+ struct img_ir_filter filter, *filter_ptr = &filter;
+ int ret = 0;
+
+ dev_dbg(priv->dev, "IR scancode %sfilter=%08x & %08x\n",
+ type == RC_FILTER_WAKEUP ? "wake " : "",
+ sc_filter->data,
+ sc_filter->mask);
+
+ spin_lock_irq(&priv->lock);
+
+ /* filtering can always be disabled */
+ if (!sc_filter->mask) {
+ filter_ptr = NULL;
+ goto set_unlock;
+ }
+
+ /* current decoder must support scancode filtering */
+ if (!hw->decoder || !hw->decoder->filter) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* convert scancode filter to raw filter */
+ filter.minlen = 0;
+ filter.maxlen = ~0;
+ ret = hw->decoder->filter(sc_filter, &filter, hw->enabled_protocols);
+ if (ret)
+ goto unlock;
+ dev_dbg(priv->dev, "IR raw %sfilter=%016llx & %016llx\n",
+ type == RC_FILTER_WAKEUP ? "wake " : "",
+ (unsigned long long)filter.data,
+ (unsigned long long)filter.mask);
+
+set_unlock:
+ /* apply raw filters */
+ switch (type) {
+ case RC_FILTER_NORMAL:
+ _img_ir_set_filter(priv, filter_ptr);
+ break;
+ case RC_FILTER_WAKEUP:
+ _img_ir_set_wake_filter(priv, filter_ptr);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+unlock:
+ spin_unlock_irq(&priv->lock);
+ return ret;
+}
+
+/**
+ * img_ir_set_decoder() - Set the current decoder.
+ * @priv: IR private data.
+ * @decoder: Decoder to use with immediate effect.
+ * @proto: Protocol bitmap (or 0 to use decoder->type).
+ */
+static void img_ir_set_decoder(struct img_ir_priv *priv,
+ const struct img_ir_decoder *decoder,
+ u64 proto)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+ struct rc_dev *rdev = hw->rdev;
+ u32 ir_status, irq_en;
+ spin_lock_irq(&priv->lock);
+
+ /* switch off and disable interrupts */
+ img_ir_write(priv, IMG_IR_CONTROL, 0);
+ irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
+ img_ir_write(priv, IMG_IR_IRQ_ENABLE, irq_en & IMG_IR_IRQ_EDGE);
+ img_ir_write(priv, IMG_IR_IRQ_CLEAR, IMG_IR_IRQ_ALL & ~IMG_IR_IRQ_EDGE);
+
+ /* ack any data already detected */
+ ir_status = img_ir_read(priv, IMG_IR_STATUS);
+ if (ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2)) {
+ ir_status &= ~(IMG_IR_RXDVAL | IMG_IR_RXDVALD2);
+ img_ir_write(priv, IMG_IR_STATUS, ir_status);
+ img_ir_read(priv, IMG_IR_DATA_LW);
+ img_ir_read(priv, IMG_IR_DATA_UP);
+ }
+
+ /* stop the end timer and switch back to normal mode */
+ del_timer_sync(&hw->end_timer);
+ hw->mode = IMG_IR_M_NORMAL;
+
+ /* clear the wakeup scancode filter */
+ rdev->scancode_filters[RC_FILTER_WAKEUP].data = 0;
+ rdev->scancode_filters[RC_FILTER_WAKEUP].mask = 0;
+
+ /* clear raw filters */
+ _img_ir_set_filter(priv, NULL);
+ _img_ir_set_wake_filter(priv, NULL);
+
+ /* clear the enabled protocols */
+ hw->enabled_protocols = 0;
+
+ /* switch decoder */
+ hw->decoder = decoder;
+ if (!decoder)
+ goto unlock;
+
+ /* set the enabled protocols */
+ if (!proto)
+ proto = decoder->type;
+ hw->enabled_protocols = proto;
+
+ /* write the new timings */
+ img_ir_decoder_convert(decoder, &hw->reg_timings, hw->clk_hz);
+ img_ir_write_timings(priv, &hw->reg_timings.timings, RC_FILTER_NORMAL);
+
+ /* set up and enable */
+ img_ir_write(priv, IMG_IR_CONTROL, hw->reg_timings.ctrl);
+
+
+unlock:
+ spin_unlock_irq(&priv->lock);
+}
+
+/**
+ * img_ir_decoder_compatable() - Find whether a decoder will work with a device.
+ * @priv: IR private data.
+ * @dec: Decoder to check.
+ *
+ * Returns: true if @dec is compatible with the device @priv refers to.
+ */
+static bool img_ir_decoder_compatible(struct img_ir_priv *priv,
+ const struct img_ir_decoder *dec)
+{
+ unsigned int ct;
+
+ /* don't accept decoders using code types which aren't supported */
+ ct = dec->control.code_type;
+ if (priv->hw.ct_quirks[ct] & IMG_IR_QUIRK_CODE_BROKEN)
+ return false;
+
+ return true;
+}
+
+/**
+ * img_ir_allowed_protos() - Get allowed protocols from global decoder list.
+ * @priv: IR private data.
+ *
+ * Returns: Mask of protocols supported by the device @priv refers to.
+ */
+static u64 img_ir_allowed_protos(struct img_ir_priv *priv)
+{
+ u64 protos = 0;
+ struct img_ir_decoder **decp;
+
+ for (decp = img_ir_decoders; *decp; ++decp) {
+ const struct img_ir_decoder *dec = *decp;
+ if (img_ir_decoder_compatible(priv, dec))
+ protos |= dec->type;
+ }
+ return protos;
+}
+
+/* Callback for changing protocol using sysfs */
+static int img_ir_change_protocol(struct rc_dev *dev, u64 *ir_type)
+{
+ struct img_ir_priv *priv = dev->priv;
+ struct img_ir_priv_hw *hw = &priv->hw;
+ struct rc_dev *rdev = hw->rdev;
+ struct img_ir_decoder **decp;
+ u64 wakeup_protocols;
+
+ if (!*ir_type) {
+ /* disable all protocols */
+ img_ir_set_decoder(priv, NULL, 0);
+ goto success;
+ }
+ for (decp = img_ir_decoders; *decp; ++decp) {
+ const struct img_ir_decoder *dec = *decp;
+ if (!img_ir_decoder_compatible(priv, dec))
+ continue;
+ if (*ir_type & dec->type) {
+ *ir_type &= dec->type;
+ img_ir_set_decoder(priv, dec, *ir_type);
+ goto success;
+ }
+ }
+ return -EINVAL;
+
+success:
+ /*
+ * Only allow matching wakeup protocols for now, and only if filtering
+ * is supported.
+ */
+ wakeup_protocols = *ir_type;
+ if (!hw->decoder || !hw->decoder->filter)
+ wakeup_protocols = 0;
+ rc_set_allowed_wakeup_protocols(rdev, wakeup_protocols);
+ rc_set_enabled_wakeup_protocols(rdev, wakeup_protocols);
+ return 0;
+}
+
+/* Changes ir-core protocol device attribute */
+static void img_ir_set_protocol(struct img_ir_priv *priv, u64 proto)
+{
+ struct rc_dev *rdev = priv->hw.rdev;
+
+ spin_lock_irq(&rdev->rc_map.lock);
+ rdev->rc_map.rc_type = __ffs64(proto);
+ spin_unlock_irq(&rdev->rc_map.lock);
+
+ mutex_lock(&rdev->lock);
+ rc_set_enabled_protocols(rdev, proto);
+ rc_set_allowed_wakeup_protocols(rdev, proto);
+ rc_set_enabled_wakeup_protocols(rdev, proto);
+ mutex_unlock(&rdev->lock);
+}
+
+/* Set up IR decoders */
+static void img_ir_init_decoders(void)
+{
+ struct img_ir_decoder **decp;
+
+ spin_lock(&img_ir_decoders_lock);
+ if (!img_ir_decoders_preprocessed) {
+ for (decp = img_ir_decoders; *decp; ++decp)
+ img_ir_decoder_preprocess(*decp);
+ img_ir_decoders_preprocessed = true;
+ }
+ spin_unlock(&img_ir_decoders_lock);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * img_ir_enable_wake() - Switch to wake mode.
+ * @priv: IR private data.
+ *
+ * Returns: non-zero if the IR can wake the system.
+ */
+static int img_ir_enable_wake(struct img_ir_priv *priv)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+ int ret = 0;
+
+ spin_lock_irq(&priv->lock);
+ if (hw->flags & IMG_IR_F_WAKE) {
+ /* interrupt only on a match */
+ hw->suspend_irqen = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
+ img_ir_write(priv, IMG_IR_IRQ_ENABLE, IMG_IR_IRQ_DATA_MATCH);
+ img_ir_write_filter(priv, &hw->filters[RC_FILTER_WAKEUP]);
+ img_ir_write_timings(priv, &hw->reg_timings.timings,
+ RC_FILTER_WAKEUP);
+ hw->mode = IMG_IR_M_WAKE;
+ ret = 1;
+ }
+ spin_unlock_irq(&priv->lock);
+ return ret;
+}
+
+/**
+ * img_ir_disable_wake() - Switch out of wake mode.
+ * @priv: IR private data
+ *
+ * Returns: 1 if the hardware should be allowed to wake from a sleep state.
+ * 0 otherwise.
+ */
+static int img_ir_disable_wake(struct img_ir_priv *priv)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+ int ret = 0;
+
+ spin_lock_irq(&priv->lock);
+ if (hw->flags & IMG_IR_F_WAKE) {
+ /* restore normal filtering */
+ if (hw->flags & IMG_IR_F_FILTER) {
+ img_ir_write(priv, IMG_IR_IRQ_ENABLE,
+ (hw->suspend_irqen & IMG_IR_IRQ_EDGE) |
+ IMG_IR_IRQ_DATA_MATCH);
+ img_ir_write_filter(priv,
+ &hw->filters[RC_FILTER_NORMAL]);
+ } else {
+ img_ir_write(priv, IMG_IR_IRQ_ENABLE,
+ (hw->suspend_irqen & IMG_IR_IRQ_EDGE) |
+ IMG_IR_IRQ_DATA_VALID |
+ IMG_IR_IRQ_DATA2_VALID);
+ img_ir_write_filter(priv, NULL);
+ }
+ img_ir_write_timings(priv, &hw->reg_timings.timings,
+ RC_FILTER_NORMAL);
+ hw->mode = IMG_IR_M_NORMAL;
+ ret = 1;
+ }
+ spin_unlock_irq(&priv->lock);
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+/* lock must be held */
+static void img_ir_begin_repeat(struct img_ir_priv *priv)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+ if (hw->mode == IMG_IR_M_NORMAL) {
+ /* switch to repeat timings */
+ img_ir_write(priv, IMG_IR_CONTROL, 0);
+ hw->mode = IMG_IR_M_REPEATING;
+ img_ir_write_timings(priv, &hw->reg_timings.rtimings,
+ RC_FILTER_NORMAL);
+ img_ir_write(priv, IMG_IR_CONTROL, hw->reg_timings.ctrl);
+ }
+}
+
+/* lock must be held */
+static void img_ir_end_repeat(struct img_ir_priv *priv)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+ if (hw->mode == IMG_IR_M_REPEATING) {
+ /* switch to normal timings */
+ img_ir_write(priv, IMG_IR_CONTROL, 0);
+ hw->mode = IMG_IR_M_NORMAL;
+ img_ir_write_timings(priv, &hw->reg_timings.timings,
+ RC_FILTER_NORMAL);
+ img_ir_write(priv, IMG_IR_CONTROL, hw->reg_timings.ctrl);
+ }
+}
+
+/* lock must be held */
+static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+ const struct img_ir_decoder *dec = hw->decoder;
+ int ret = IMG_IR_SCANCODE;
+ int scancode;
+ if (dec->scancode)
+ ret = dec->scancode(len, raw, &scancode, hw->enabled_protocols);
+ else if (len >= 32)
+ scancode = (u32)raw;
+ else if (len < 32)
+ scancode = (u32)raw & ((1 << len)-1);
+ dev_dbg(priv->dev, "data (%u bits) = %#llx\n",
+ len, (unsigned long long)raw);
+ if (ret == IMG_IR_SCANCODE) {
+ dev_dbg(priv->dev, "decoded scan code %#x\n", scancode);
+ rc_keydown(hw->rdev, scancode, 0);
+ img_ir_end_repeat(priv);
+ } else if (ret == IMG_IR_REPEATCODE) {
+ if (hw->mode == IMG_IR_M_REPEATING) {
+ dev_dbg(priv->dev, "decoded repeat code\n");
+ rc_repeat(hw->rdev);
+ } else {
+ dev_dbg(priv->dev, "decoded unexpected repeat code, ignoring\n");
+ }
+ } else {
+ dev_dbg(priv->dev, "decode failed (%d)\n", ret);
+ return;
+ }
+
+
+ if (dec->repeat) {
+ unsigned long interval;
+
+ img_ir_begin_repeat(priv);
+
+ /* update timer, but allowing for 1/8th tolerance */
+ interval = dec->repeat + (dec->repeat >> 3);
+ mod_timer(&hw->end_timer,
+ jiffies + msecs_to_jiffies(interval));
+ }
+}
+
+/* timer function to end waiting for repeat. */
+static void img_ir_end_timer(unsigned long arg)
+{
+ struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+
+ spin_lock_irq(&priv->lock);
+ img_ir_end_repeat(priv);
+ spin_unlock_irq(&priv->lock);
+}
+
+#ifdef CONFIG_COMMON_CLK
+static void img_ir_change_frequency(struct img_ir_priv *priv,
+ struct clk_notifier_data *change)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+
+ dev_dbg(priv->dev, "clk changed %lu HZ -> %lu HZ\n",
+ change->old_rate, change->new_rate);
+
+ spin_lock_irq(&priv->lock);
+ if (hw->clk_hz == change->new_rate)
+ goto unlock;
+ hw->clk_hz = change->new_rate;
+ /* refresh current timings */
+ if (hw->decoder) {
+ img_ir_decoder_convert(hw->decoder, &hw->reg_timings,
+ hw->clk_hz);
+ switch (hw->mode) {
+ case IMG_IR_M_NORMAL:
+ img_ir_write_timings(priv, &hw->reg_timings.timings,
+ RC_FILTER_NORMAL);
+ break;
+ case IMG_IR_M_REPEATING:
+ img_ir_write_timings(priv, &hw->reg_timings.rtimings,
+ RC_FILTER_NORMAL);
+ break;
+#ifdef CONFIG_PM_SLEEP
+ case IMG_IR_M_WAKE:
+ img_ir_write_timings(priv, &hw->reg_timings.timings,
+ RC_FILTER_WAKEUP);
+ break;
+#endif
+ }
+ }
+unlock:
+ spin_unlock_irq(&priv->lock);
+}
+
+static int img_ir_clk_notify(struct notifier_block *self, unsigned long action,
+ void *data)
+{
+ struct img_ir_priv *priv = container_of(self, struct img_ir_priv,
+ hw.clk_nb);
+ switch (action) {
+ case POST_RATE_CHANGE:
+ img_ir_change_frequency(priv, data);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+#endif /* CONFIG_COMMON_CLK */
+
+/* called with priv->lock held */
+void img_ir_isr_hw(struct img_ir_priv *priv, u32 irq_status)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+ u32 ir_status, len, lw, up;
+ unsigned int ct;
+
+ /* use the current decoder */
+ if (!hw->decoder)
+ return;
+
+ ir_status = img_ir_read(priv, IMG_IR_STATUS);
+ if (!(ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2)))
+ return;
+ ir_status &= ~(IMG_IR_RXDVAL | IMG_IR_RXDVALD2);
+ img_ir_write(priv, IMG_IR_STATUS, ir_status);
+
+ len = (ir_status & IMG_IR_RXDLEN) >> IMG_IR_RXDLEN_SHIFT;
+ /* some versions report wrong length for certain code types */
+ ct = hw->decoder->control.code_type;
+ if (hw->ct_quirks[ct] & IMG_IR_QUIRK_CODE_LEN_INCR)
+ ++len;
+
+ lw = img_ir_read(priv, IMG_IR_DATA_LW);
+ up = img_ir_read(priv, IMG_IR_DATA_UP);
+ img_ir_handle_data(priv, len, (u64)up << 32 | lw);
+}
+
+void img_ir_setup_hw(struct img_ir_priv *priv)
+{
+ struct img_ir_decoder **decp;
+
+ if (!priv->hw.rdev)
+ return;
+
+ /* Use the first available decoder (or disable stuff if NULL) */
+ for (decp = img_ir_decoders; *decp; ++decp) {
+ const struct img_ir_decoder *dec = *decp;
+ if (img_ir_decoder_compatible(priv, dec)) {
+ img_ir_set_protocol(priv, dec->type);
+ img_ir_set_decoder(priv, dec, 0);
+ return;
+ }
+ }
+ img_ir_set_decoder(priv, NULL, 0);
+}
+
+/**
+ * img_ir_probe_hw_caps() - Probe capabilities of the hardware.
+ * @priv: IR private data.
+ */
+static void img_ir_probe_hw_caps(struct img_ir_priv *priv)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+ /*
+ * When a version of the block becomes available without these quirks,
+ * they'll have to depend on the core revision.
+ */
+ hw->ct_quirks[IMG_IR_CODETYPE_PULSELEN]
+ |= IMG_IR_QUIRK_CODE_LEN_INCR;
+ hw->ct_quirks[IMG_IR_CODETYPE_BIPHASE]
+ |= IMG_IR_QUIRK_CODE_BROKEN;
+ hw->ct_quirks[IMG_IR_CODETYPE_2BITPULSEPOS]
+ |= IMG_IR_QUIRK_CODE_BROKEN;
+}
+
+int img_ir_probe_hw(struct img_ir_priv *priv)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+ struct rc_dev *rdev;
+ int error;
+
+ /* Ensure hardware decoders have been preprocessed */
+ img_ir_init_decoders();
+
+ /* Probe hardware capabilities */
+ img_ir_probe_hw_caps(priv);
+
+ /* Set up the end timer */
+ setup_timer(&hw->end_timer, img_ir_end_timer, (unsigned long)priv);
+
+ /* Register a clock notifier */
+ if (!IS_ERR(priv->clk)) {
+ hw->clk_hz = clk_get_rate(priv->clk);
+#ifdef CONFIG_COMMON_CLK
+ hw->clk_nb.notifier_call = img_ir_clk_notify;
+ error = clk_notifier_register(priv->clk, &hw->clk_nb);
+ if (error)
+ dev_warn(priv->dev,
+ "failed to register clock notifier\n");
+#endif
+ } else {
+ hw->clk_hz = 32768;
+ }
+
+ /* Allocate hardware decoder */
+ hw->rdev = rdev = rc_allocate_device();
+ if (!rdev) {
+ dev_err(priv->dev, "cannot allocate input device\n");
+ error = -ENOMEM;
+ goto err_alloc_rc;
+ }
+ rdev->priv = priv;
+ rdev->map_name = RC_MAP_EMPTY;
+ rc_set_allowed_protocols(rdev, img_ir_allowed_protos(priv));
+ rdev->input_name = "IMG Infrared Decoder";
+ rdev->s_filter = img_ir_set_filter;
+
+ /* Register hardware decoder */
+ error = rc_register_device(rdev);
+ if (error) {
+ dev_err(priv->dev, "failed to register IR input device\n");
+ goto err_register_rc;
+ }
+
+ /*
+ * Set this after rc_register_device as no protocols have been
+ * registered yet.
+ */
+ rdev->change_protocol = img_ir_change_protocol;
+
+ device_init_wakeup(priv->dev, 1);
+
+ return 0;
+
+err_register_rc:
+ img_ir_set_decoder(priv, NULL, 0);
+ hw->rdev = NULL;
+ rc_free_device(rdev);
+err_alloc_rc:
+#ifdef CONFIG_COMMON_CLK
+ if (!IS_ERR(priv->clk))
+ clk_notifier_unregister(priv->clk, &hw->clk_nb);
+#endif
+ return error;
+}
+
+void img_ir_remove_hw(struct img_ir_priv *priv)
+{
+ struct img_ir_priv_hw *hw = &priv->hw;
+ struct rc_dev *rdev = hw->rdev;
+ if (!rdev)
+ return;
+ img_ir_set_decoder(priv, NULL, 0);
+ hw->rdev = NULL;
+ rc_unregister_device(rdev);
+#ifdef CONFIG_COMMON_CLK
+ if (!IS_ERR(priv->clk))
+ clk_notifier_unregister(priv->clk, &hw->clk_nb);
+#endif
+}
+
+#ifdef CONFIG_PM_SLEEP
+int img_ir_suspend(struct device *dev)
+{
+ struct img_ir_priv *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev) && img_ir_enable_wake(priv))
+ enable_irq_wake(priv->irq);
+ return 0;
+}
+
+int img_ir_resume(struct device *dev)
+{
+ struct img_ir_priv *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev) && img_ir_disable_wake(priv))
+ disable_irq_wake(priv->irq);
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/media/rc/img-ir/img-ir-hw.h b/drivers/media/rc/img-ir/img-ir-hw.h
new file mode 100644
index 000000000000..6c9a94a81190
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir-hw.h
@@ -0,0 +1,269 @@
+/*
+ * ImgTec IR Hardware Decoder found in PowerDown Controller.
+ *
+ * Copyright 2010-2014 Imagination Technologies Ltd.
+ */
+
+#ifndef _IMG_IR_HW_H_
+#define _IMG_IR_HW_H_
+
+#include <linux/kernel.h>
+#include <media/rc-core.h>
+
+/* constants */
+
+#define IMG_IR_CODETYPE_PULSELEN 0x0 /* Sony */
+#define IMG_IR_CODETYPE_PULSEDIST 0x1 /* NEC, Toshiba, Micom, Sharp */
+#define IMG_IR_CODETYPE_BIPHASE 0x2 /* RC-5/6 */
+#define IMG_IR_CODETYPE_2BITPULSEPOS 0x3 /* RC-MM */
+
+
+/* Timing information */
+
+/**
+ * struct img_ir_control - Decoder control settings
+ * @decoden: Primary decoder enable
+ * @code_type: Decode type (see IMG_IR_CODETYPE_*)
+ * @hdrtog: Detect header toggle symbol after leader symbol
+ * @ldrdec: Don't discard leader if maximum width reached
+ * @decodinpol: Decoder input polarity (1=active high)
+ * @bitorien: Bit orientation (1=MSB first)
+ * @d1validsel: Decoder 2 takes over if it detects valid data
+ * @bitinv: Bit inversion switch (1=don't invert)
+ * @decodend2: Secondary decoder enable (no leader symbol)
+ * @bitoriend2: Bit orientation (1=MSB first)
+ * @bitinvd2: Secondary decoder bit inversion switch (1=don't invert)
+ */
+struct img_ir_control {
+ unsigned decoden:1;
+ unsigned code_type:2;
+ unsigned hdrtog:1;
+ unsigned ldrdec:1;
+ unsigned decodinpol:1;
+ unsigned bitorien:1;
+ unsigned d1validsel:1;
+ unsigned bitinv:1;
+ unsigned decodend2:1;
+ unsigned bitoriend2:1;
+ unsigned bitinvd2:1;
+};
+
+/**
+ * struct img_ir_timing_range - range of timing values
+ * @min: Minimum timing value
+ * @max: Maximum timing value (if < @min, this will be set to @min during
+ * preprocessing step, so it is normally not explicitly initialised
+ * and is taken care of by the tolerance)
+ */
+struct img_ir_timing_range {
+ u16 min;
+ u16 max;
+};
+
+/**
+ * struct img_ir_symbol_timing - timing data for a symbol
+ * @pulse: Timing range for the length of the pulse in this symbol
+ * @space: Timing range for the length of the space in this symbol
+ */
+struct img_ir_symbol_timing {
+ struct img_ir_timing_range pulse;
+ struct img_ir_timing_range space;
+};
+
+/**
+ * struct img_ir_free_timing - timing data for free time symbol
+ * @minlen: Minimum number of bits of data
+ * @maxlen: Maximum number of bits of data
+ * @ft_min: Minimum free time after message
+ */
+struct img_ir_free_timing {
+ /* measured in bits */
+ u8 minlen;
+ u8 maxlen;
+ u16 ft_min;
+};
+
+/**
+ * struct img_ir_timings - Timing values.
+ * @ldr: Leader symbol timing data
+ * @s00: Zero symbol timing data for primary decoder
+ * @s01: One symbol timing data for primary decoder
+ * @s10: Zero symbol timing data for secondary (no leader symbol) decoder
+ * @s11: One symbol timing data for secondary (no leader symbol) decoder
+ * @ft: Free time symbol timing data
+ */
+struct img_ir_timings {
+ struct img_ir_symbol_timing ldr, s00, s01, s10, s11;
+ struct img_ir_free_timing ft;
+};
+
+/**
+ * struct img_ir_filter - Filter IR events.
+ * @data: Data to match.
+ * @mask: Mask of bits to compare.
+ * @minlen: Additional minimum number of bits.
+ * @maxlen: Additional maximum number of bits.
+ */
+struct img_ir_filter {
+ u64 data;
+ u64 mask;
+ u8 minlen;
+ u8 maxlen;
+};
+
+/**
+ * struct img_ir_timing_regvals - Calculated timing register values.
+ * @ldr: Leader symbol timing register value
+ * @s00: Zero symbol timing register value for primary decoder
+ * @s01: One symbol timing register value for primary decoder
+ * @s10: Zero symbol timing register value for secondary decoder
+ * @s11: One symbol timing register value for secondary decoder
+ * @ft: Free time symbol timing register value
+ */
+struct img_ir_timing_regvals {
+ u32 ldr, s00, s01, s10, s11, ft;
+};
+
+#define IMG_IR_SCANCODE 0 /* new scancode */
+#define IMG_IR_REPEATCODE 1 /* repeat the previous code */
+
+/**
+ * struct img_ir_decoder - Decoder settings for an IR protocol.
+ * @type: Protocol types bitmap.
+ * @tolerance: Timing tolerance as a percentage (default 10%).
+ * @unit: Unit of timings in nanoseconds (default 1 us).
+ * @timings: Primary timings
+ * @rtimings: Additional override timings while waiting for repeats.
+ * @repeat: Maximum repeat interval (always in milliseconds).
+ * @control: Control flags.
+ *
+ * @scancode: Pointer to function to convert the IR data into a scancode (it
+ * must be safe to execute in interrupt context).
+ * Returns IMG_IR_SCANCODE to emit new scancode.
+ * Returns IMG_IR_REPEATCODE to repeat previous code.
+ * Returns -errno (e.g. -EINVAL) on error.
+ * @filter: Pointer to function to convert scancode filter to raw hardware
+ * filter. The minlen and maxlen fields will have been initialised
+ * to the maximum range.
+ */
+struct img_ir_decoder {
+ /* core description */
+ u64 type;
+ unsigned int tolerance;
+ unsigned int unit;
+ struct img_ir_timings timings;
+ struct img_ir_timings rtimings;
+ unsigned int repeat;
+ struct img_ir_control control;
+
+ /* scancode logic */
+ int (*scancode)(int len, u64 raw, int *scancode, u64 protocols);
+ int (*filter)(const struct rc_scancode_filter *in,
+ struct img_ir_filter *out, u64 protocols);
+};
+
+/**
+ * struct img_ir_reg_timings - Reg values for decoder timings at clock rate.
+ * @ctrl: Processed control register value.
+ * @timings: Processed primary timings.
+ * @rtimings: Processed repeat timings.
+ */
+struct img_ir_reg_timings {
+ u32 ctrl;
+ struct img_ir_timing_regvals timings;
+ struct img_ir_timing_regvals rtimings;
+};
+
+int img_ir_register_decoder(struct img_ir_decoder *dec);
+void img_ir_unregister_decoder(struct img_ir_decoder *dec);
+
+struct img_ir_priv;
+
+#ifdef CONFIG_IR_IMG_HW
+
+enum img_ir_mode {
+ IMG_IR_M_NORMAL,
+ IMG_IR_M_REPEATING,
+#ifdef CONFIG_PM_SLEEP
+ IMG_IR_M_WAKE,
+#endif
+};
+
+/**
+ * struct img_ir_priv_hw - Private driver data for hardware decoder.
+ * @ct_quirks: Quirk bits for each code type.
+ * @rdev: Remote control device
+ * @clk_nb: Notifier block for clock notify events.
+ * @end_timer: Timer until repeat timeout.
+ * @decoder: Current decoder settings.
+ * @enabled_protocols: Currently enabled protocols.
+ * @clk_hz: Current core clock rate in Hz.
+ * @reg_timings: Timing reg values for decoder at clock rate.
+ * @flags: IMG_IR_F_*.
+ * @filters: HW filters (derived from scancode filters).
+ * @mode: Current decode mode.
+ * @suspend_irqen: Saved IRQ enable mask over suspend.
+ */
+struct img_ir_priv_hw {
+ unsigned int ct_quirks[4];
+ struct rc_dev *rdev;
+ struct notifier_block clk_nb;
+ struct timer_list end_timer;
+ const struct img_ir_decoder *decoder;
+ u64 enabled_protocols;
+ unsigned long clk_hz;
+ struct img_ir_reg_timings reg_timings;
+ unsigned int flags;
+ struct img_ir_filter filters[RC_FILTER_MAX];
+
+ enum img_ir_mode mode;
+ u32 suspend_irqen;
+};
+
+static inline bool img_ir_hw_enabled(struct img_ir_priv_hw *hw)
+{
+ return hw->rdev;
+};
+
+void img_ir_isr_hw(struct img_ir_priv *priv, u32 irq_status);
+void img_ir_setup_hw(struct img_ir_priv *priv);
+int img_ir_probe_hw(struct img_ir_priv *priv);
+void img_ir_remove_hw(struct img_ir_priv *priv);
+
+#ifdef CONFIG_PM_SLEEP
+int img_ir_suspend(struct device *dev);
+int img_ir_resume(struct device *dev);
+#else
+#define img_ir_suspend NULL
+#define img_ir_resume NULL
+#endif
+
+#else
+
+struct img_ir_priv_hw {
+};
+
+static inline bool img_ir_hw_enabled(struct img_ir_priv_hw *hw)
+{
+ return false;
+};
+static inline void img_ir_isr_hw(struct img_ir_priv *priv, u32 irq_status)
+{
+}
+static inline void img_ir_setup_hw(struct img_ir_priv *priv)
+{
+}
+static inline int img_ir_probe_hw(struct img_ir_priv *priv)
+{
+ return -ENODEV;
+}
+static inline void img_ir_remove_hw(struct img_ir_priv *priv)
+{
+}
+
+#define img_ir_suspend NULL
+#define img_ir_resume NULL
+
+#endif /* CONFIG_IR_IMG_HW */
+
+#endif /* _IMG_IR_HW_H_ */
diff --git a/drivers/media/rc/img-ir/img-ir-jvc.c b/drivers/media/rc/img-ir/img-ir-jvc.c
new file mode 100644
index 000000000000..10209d200efb
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir-jvc.c
@@ -0,0 +1,81 @@
+/*
+ * ImgTec IR Decoder setup for JVC protocol.
+ *
+ * Copyright 2012-2014 Imagination Technologies Ltd.
+ */
+
+#include "img-ir-hw.h"
+
+/* Convert JVC data to a scancode */
+static int img_ir_jvc_scancode(int len, u64 raw, int *scancode, u64 protocols)
+{
+ unsigned int cust, data;
+
+ if (len != 16)
+ return -EINVAL;
+
+ cust = (raw >> 0) & 0xff;
+ data = (raw >> 8) & 0xff;
+
+ *scancode = cust << 8 | data;
+ return IMG_IR_SCANCODE;
+}
+
+/* Convert JVC scancode to JVC data filter */
+static int img_ir_jvc_filter(const struct rc_scancode_filter *in,
+ struct img_ir_filter *out, u64 protocols)
+{
+ unsigned int cust, data;
+ unsigned int cust_m, data_m;
+
+ cust = (in->data >> 8) & 0xff;
+ cust_m = (in->mask >> 8) & 0xff;
+ data = (in->data >> 0) & 0xff;
+ data_m = (in->mask >> 0) & 0xff;
+
+ out->data = cust | data << 8;
+ out->mask = cust_m | data_m << 8;
+
+ return 0;
+}
+
+/*
+ * JVC decoder
+ * See also http://www.sbprojects.com/knowledge/ir/jvc.php
+ * http://support.jvc.com/consumer/support/documents/RemoteCodes.pdf
+ */
+struct img_ir_decoder img_ir_jvc = {
+ .type = RC_BIT_JVC,
+ .control = {
+ .decoden = 1,
+ .code_type = IMG_IR_CODETYPE_PULSEDIST,
+ },
+ /* main timings */
+ .unit = 527500, /* 527.5 us */
+ .timings = {
+ /* leader symbol */
+ .ldr = {
+ .pulse = { 16 /* 8.44 ms */ },
+ .space = { 8 /* 4.22 ms */ },
+ },
+ /* 0 symbol */
+ .s00 = {
+ .pulse = { 1 /* 527.5 us +-60 us */ },
+ .space = { 1 /* 527.5 us */ },
+ },
+ /* 1 symbol */
+ .s01 = {
+ .pulse = { 1 /* 527.5 us +-60 us */ },
+ .space = { 3 /* 1.5825 ms +-40 us */ },
+ },
+ /* free time */
+ .ft = {
+ .minlen = 16,
+ .maxlen = 16,
+ .ft_min = 10, /* 5.275 ms */
+ },
+ },
+ /* scancode logic */
+ .scancode = img_ir_jvc_scancode,
+ .filter = img_ir_jvc_filter,
+};
diff --git a/drivers/media/rc/img-ir/img-ir-nec.c b/drivers/media/rc/img-ir/img-ir-nec.c
new file mode 100644
index 000000000000..e7a731bc3a9b
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir-nec.c
@@ -0,0 +1,148 @@
+/*
+ * ImgTec IR Decoder setup for NEC protocol.
+ *
+ * Copyright 2010-2014 Imagination Technologies Ltd.
+ */
+
+#include "img-ir-hw.h"
+
+/* Convert NEC data to a scancode */
+static int img_ir_nec_scancode(int len, u64 raw, int *scancode, u64 protocols)
+{
+ unsigned int addr, addr_inv, data, data_inv;
+ /* a repeat code has no data */
+ if (!len)
+ return IMG_IR_REPEATCODE;
+ if (len != 32)
+ return -EINVAL;
+ /* raw encoding: ddDDaaAA */
+ addr = (raw >> 0) & 0xff;
+ addr_inv = (raw >> 8) & 0xff;
+ data = (raw >> 16) & 0xff;
+ data_inv = (raw >> 24) & 0xff;
+ if ((data_inv ^ data) != 0xff) {
+ /* 32-bit NEC (used by Apple and TiVo remotes) */
+ /* scan encoding: aaAAddDD */
+ *scancode = addr_inv << 24 |
+ addr << 16 |
+ data_inv << 8 |
+ data;
+ } else if ((addr_inv ^ addr) != 0xff) {
+ /* Extended NEC */
+ /* scan encoding: AAaaDD */
+ *scancode = addr << 16 |
+ addr_inv << 8 |
+ data;
+ } else {
+ /* Normal NEC */
+ /* scan encoding: AADD */
+ *scancode = addr << 8 |
+ data;
+ }
+ return IMG_IR_SCANCODE;
+}
+
+/* Convert NEC scancode to NEC data filter */
+static int img_ir_nec_filter(const struct rc_scancode_filter *in,
+ struct img_ir_filter *out, u64 protocols)
+{
+ unsigned int addr, addr_inv, data, data_inv;
+ unsigned int addr_m, addr_inv_m, data_m, data_inv_m;
+
+ data = in->data & 0xff;
+ data_m = in->mask & 0xff;
+
+ if ((in->data | in->mask) & 0xff000000) {
+ /* 32-bit NEC (used by Apple and TiVo remotes) */
+ /* scan encoding: aaAAddDD */
+ addr_inv = (in->data >> 24) & 0xff;
+ addr_inv_m = (in->mask >> 24) & 0xff;
+ addr = (in->data >> 16) & 0xff;
+ addr_m = (in->mask >> 16) & 0xff;
+ data_inv = (in->data >> 8) & 0xff;
+ data_inv_m = (in->mask >> 8) & 0xff;
+ } else if ((in->data | in->mask) & 0x00ff0000) {
+ /* Extended NEC */
+ /* scan encoding AAaaDD */
+ addr = (in->data >> 16) & 0xff;
+ addr_m = (in->mask >> 16) & 0xff;
+ addr_inv = (in->data >> 8) & 0xff;
+ addr_inv_m = (in->mask >> 8) & 0xff;
+ data_inv = data ^ 0xff;
+ data_inv_m = data_m;
+ } else {
+ /* Normal NEC */
+ /* scan encoding: AADD */
+ addr = (in->data >> 8) & 0xff;
+ addr_m = (in->mask >> 8) & 0xff;
+ addr_inv = addr ^ 0xff;
+ addr_inv_m = addr_m;
+ data_inv = data ^ 0xff;
+ data_inv_m = data_m;
+ }
+
+ /* raw encoding: ddDDaaAA */
+ out->data = data_inv << 24 |
+ data << 16 |
+ addr_inv << 8 |
+ addr;
+ out->mask = data_inv_m << 24 |
+ data_m << 16 |
+ addr_inv_m << 8 |
+ addr_m;
+ return 0;
+}
+
+/*
+ * NEC decoder
+ * See also http://www.sbprojects.com/knowledge/ir/nec.php
+ * http://wiki.altium.com/display/ADOH/NEC+Infrared+Transmission+Protocol
+ */
+struct img_ir_decoder img_ir_nec = {
+ .type = RC_BIT_NEC,
+ .control = {
+ .decoden = 1,
+ .code_type = IMG_IR_CODETYPE_PULSEDIST,
+ },
+ /* main timings */
+ .unit = 562500, /* 562.5 us */
+ .timings = {
+ /* leader symbol */
+ .ldr = {
+ .pulse = { 16 /* 9ms */ },
+ .space = { 8 /* 4.5ms */ },
+ },
+ /* 0 symbol */
+ .s00 = {
+ .pulse = { 1 /* 562.5 us */ },
+ .space = { 1 /* 562.5 us */ },
+ },
+ /* 1 symbol */
+ .s01 = {
+ .pulse = { 1 /* 562.5 us */ },
+ .space = { 3 /* 1687.5 us */ },
+ },
+ /* free time */
+ .ft = {
+ .minlen = 32,
+ .maxlen = 32,
+ .ft_min = 10, /* 5.625 ms */
+ },
+ },
+ /* repeat codes */
+ .repeat = 108, /* 108 ms */
+ .rtimings = {
+ /* leader symbol */
+ .ldr = {
+ .space = { 4 /* 2.25 ms */ },
+ },
+ /* free time */
+ .ft = {
+ .minlen = 0, /* repeat code has no data */
+ .maxlen = 0,
+ },
+ },
+ /* scancode logic */
+ .scancode = img_ir_nec_scancode,
+ .filter = img_ir_nec_filter,
+};
diff --git a/drivers/media/rc/img-ir/img-ir-raw.c b/drivers/media/rc/img-ir/img-ir-raw.c
new file mode 100644
index 000000000000..cfb01d9e571a
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir-raw.c
@@ -0,0 +1,151 @@
+/*
+ * ImgTec IR Raw Decoder found in PowerDown Controller.
+ *
+ * Copyright 2010-2014 Imagination Technologies Ltd.
+ *
+ * This ties into the input subsystem using the RC-core in raw mode. Raw IR
+ * signal edges are reported and decoded by generic software decoders.
+ */
+
+#include <linux/spinlock.h>
+#include <media/rc-core.h>
+#include "img-ir.h"
+
+#define ECHO_TIMEOUT_MS 150 /* ms between echos */
+
+/* must be called with priv->lock held */
+static void img_ir_refresh_raw(struct img_ir_priv *priv, u32 irq_status)
+{
+ struct img_ir_priv_raw *raw = &priv->raw;
+ struct rc_dev *rc_dev = priv->raw.rdev;
+ int multiple;
+ u32 ir_status;
+
+ /* find whether both rise and fall was detected */
+ multiple = ((irq_status & IMG_IR_IRQ_EDGE) == IMG_IR_IRQ_EDGE);
+ /*
+ * If so, we need to see if the level has actually changed.
+ * If it's just noise that we didn't have time to process,
+ * there's no point reporting it.
+ */
+ ir_status = img_ir_read(priv, IMG_IR_STATUS) & IMG_IR_IRRXD;
+ if (multiple && ir_status == raw->last_status)
+ return;
+ raw->last_status = ir_status;
+
+ /* report the edge to the IR raw decoders */
+ if (ir_status) /* low */
+ ir_raw_event_store_edge(rc_dev, IR_SPACE);
+ else /* high */
+ ir_raw_event_store_edge(rc_dev, IR_PULSE);
+ ir_raw_event_handle(rc_dev);
+}
+
+/* called with priv->lock held */
+void img_ir_isr_raw(struct img_ir_priv *priv, u32 irq_status)
+{
+ struct img_ir_priv_raw *raw = &priv->raw;
+
+ /* check not removing */
+ if (!raw->rdev)
+ return;
+
+ img_ir_refresh_raw(priv, irq_status);
+
+ /* start / push back the echo timer */
+ mod_timer(&raw->timer, jiffies + msecs_to_jiffies(ECHO_TIMEOUT_MS));
+}
+
+/*
+ * Echo timer callback function.
+ * The raw decoders expect to get a final sample even if there are no edges, in
+ * order to be assured of the final space. If there are no edges for a certain
+ * time we use this timer to emit a final sample to satisfy them.
+ */
+static void img_ir_echo_timer(unsigned long arg)
+{
+ struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+
+ spin_lock_irq(&priv->lock);
+
+ /* check not removing */
+ if (priv->raw.rdev)
+ /*
+ * It's safe to pass irq_status=0 since it's only used to check
+ * for double edges.
+ */
+ img_ir_refresh_raw(priv, 0);
+
+ spin_unlock_irq(&priv->lock);
+}
+
+void img_ir_setup_raw(struct img_ir_priv *priv)
+{
+ u32 irq_en;
+
+ if (!priv->raw.rdev)
+ return;
+
+ /* clear and enable edge interrupts */
+ spin_lock_irq(&priv->lock);
+ irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
+ irq_en |= IMG_IR_IRQ_EDGE;
+ img_ir_write(priv, IMG_IR_IRQ_CLEAR, IMG_IR_IRQ_EDGE);
+ img_ir_write(priv, IMG_IR_IRQ_ENABLE, irq_en);
+ spin_unlock_irq(&priv->lock);
+}
+
+int img_ir_probe_raw(struct img_ir_priv *priv)
+{
+ struct img_ir_priv_raw *raw = &priv->raw;
+ struct rc_dev *rdev;
+ int error;
+
+ /* Set up the echo timer */
+ setup_timer(&raw->timer, img_ir_echo_timer, (unsigned long)priv);
+
+ /* Allocate raw decoder */
+ raw->rdev = rdev = rc_allocate_device();
+ if (!rdev) {
+ dev_err(priv->dev, "cannot allocate raw input device\n");
+ return -ENOMEM;
+ }
+ rdev->priv = priv;
+ rdev->map_name = RC_MAP_EMPTY;
+ rdev->input_name = "IMG Infrared Decoder Raw";
+ rdev->driver_type = RC_DRIVER_IR_RAW;
+
+ /* Register raw decoder */
+ error = rc_register_device(rdev);
+ if (error) {
+ dev_err(priv->dev, "failed to register raw IR input device\n");
+ rc_free_device(rdev);
+ raw->rdev = NULL;
+ return error;
+ }
+
+ return 0;
+}
+
+void img_ir_remove_raw(struct img_ir_priv *priv)
+{
+ struct img_ir_priv_raw *raw = &priv->raw;
+ struct rc_dev *rdev = raw->rdev;
+ u32 irq_en;
+
+ if (!rdev)
+ return;
+
+ /* switch off and disable raw (edge) interrupts */
+ spin_lock_irq(&priv->lock);
+ raw->rdev = NULL;
+ irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
+ irq_en &= ~IMG_IR_IRQ_EDGE;
+ img_ir_write(priv, IMG_IR_IRQ_ENABLE, irq_en);
+ img_ir_write(priv, IMG_IR_IRQ_CLEAR, IMG_IR_IRQ_EDGE);
+ spin_unlock_irq(&priv->lock);
+
+ rc_unregister_device(rdev);
+
+ del_timer_sync(&raw->timer);
+}
diff --git a/drivers/media/rc/img-ir/img-ir-raw.h b/drivers/media/rc/img-ir/img-ir-raw.h
new file mode 100644
index 000000000000..9802ffd51b9a
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir-raw.h
@@ -0,0 +1,60 @@
+/*
+ * ImgTec IR Raw Decoder found in PowerDown Controller.
+ *
+ * Copyright 2010-2014 Imagination Technologies Ltd.
+ */
+
+#ifndef _IMG_IR_RAW_H_
+#define _IMG_IR_RAW_H_
+
+struct img_ir_priv;
+
+#ifdef CONFIG_IR_IMG_RAW
+
+/**
+ * struct img_ir_priv_raw - Private driver data for raw decoder.
+ * @rdev: Raw remote control device
+ * @timer: Timer to echo samples to keep soft decoders happy.
+ * @last_status: Last raw status bits.
+ */
+struct img_ir_priv_raw {
+ struct rc_dev *rdev;
+ struct timer_list timer;
+ u32 last_status;
+};
+
+static inline bool img_ir_raw_enabled(struct img_ir_priv_raw *raw)
+{
+ return raw->rdev;
+};
+
+void img_ir_isr_raw(struct img_ir_priv *priv, u32 irq_status);
+void img_ir_setup_raw(struct img_ir_priv *priv);
+int img_ir_probe_raw(struct img_ir_priv *priv);
+void img_ir_remove_raw(struct img_ir_priv *priv);
+
+#else
+
+struct img_ir_priv_raw {
+};
+static inline bool img_ir_raw_enabled(struct img_ir_priv_raw *raw)
+{
+ return false;
+};
+static inline void img_ir_isr_raw(struct img_ir_priv *priv, u32 irq_status)
+{
+}
+static inline void img_ir_setup_raw(struct img_ir_priv *priv)
+{
+}
+static inline int img_ir_probe_raw(struct img_ir_priv *priv)
+{
+ return -ENODEV;
+}
+static inline void img_ir_remove_raw(struct img_ir_priv *priv)
+{
+}
+
+#endif /* CONFIG_IR_IMG_RAW */
+
+#endif /* _IMG_IR_RAW_H_ */
diff --git a/drivers/media/rc/img-ir/img-ir-sanyo.c b/drivers/media/rc/img-ir/img-ir-sanyo.c
new file mode 100644
index 000000000000..c2c763e08a41
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir-sanyo.c
@@ -0,0 +1,122 @@
+/*
+ * ImgTec IR Decoder setup for Sanyo protocol.
+ *
+ * Copyright 2012-2014 Imagination Technologies Ltd.
+ *
+ * From ir-sanyo-decoder.c:
+ *
+ * This protocol uses the NEC protocol timings. However, data is formatted as:
+ * 13 bits Custom Code
+ * 13 bits NOT(Custom Code)
+ * 8 bits Key data
+ * 8 bits NOT(Key data)
+ *
+ * According with LIRC, this protocol is used on Sanyo, Aiwa and Chinon
+ * Information for this protocol is available at the Sanyo LC7461 datasheet.
+ */
+
+#include "img-ir-hw.h"
+
+/* Convert Sanyo data to a scancode */
+static int img_ir_sanyo_scancode(int len, u64 raw, int *scancode, u64 protocols)
+{
+ unsigned int addr, addr_inv, data, data_inv;
+ /* a repeat code has no data */
+ if (!len)
+ return IMG_IR_REPEATCODE;
+ if (len != 42)
+ return -EINVAL;
+ addr = (raw >> 0) & 0x1fff;
+ addr_inv = (raw >> 13) & 0x1fff;
+ data = (raw >> 26) & 0xff;
+ data_inv = (raw >> 34) & 0xff;
+ /* Validate data */
+ if ((data_inv ^ data) != 0xff)
+ return -EINVAL;
+ /* Validate address */
+ if ((addr_inv ^ addr) != 0x1fff)
+ return -EINVAL;
+
+ /* Normal Sanyo */
+ *scancode = addr << 8 | data;
+ return IMG_IR_SCANCODE;
+}
+
+/* Convert Sanyo scancode to Sanyo data filter */
+static int img_ir_sanyo_filter(const struct rc_scancode_filter *in,
+ struct img_ir_filter *out, u64 protocols)
+{
+ unsigned int addr, addr_inv, data, data_inv;
+ unsigned int addr_m, data_m;
+
+ data = in->data & 0xff;
+ data_m = in->mask & 0xff;
+ data_inv = data ^ 0xff;
+
+ if (in->data & 0xff700000)
+ return -EINVAL;
+
+ addr = (in->data >> 8) & 0x1fff;
+ addr_m = (in->mask >> 8) & 0x1fff;
+ addr_inv = addr ^ 0x1fff;
+
+ out->data = (u64)data_inv << 34 |
+ (u64)data << 26 |
+ addr_inv << 13 |
+ addr;
+ out->mask = (u64)data_m << 34 |
+ (u64)data_m << 26 |
+ addr_m << 13 |
+ addr_m;
+ return 0;
+}
+
+/* Sanyo decoder */
+struct img_ir_decoder img_ir_sanyo = {
+ .type = RC_BIT_SANYO,
+ .control = {
+ .decoden = 1,
+ .code_type = IMG_IR_CODETYPE_PULSEDIST,
+ },
+ /* main timings */
+ .unit = 562500, /* 562.5 us */
+ .timings = {
+ /* leader symbol */
+ .ldr = {
+ .pulse = { 16 /* 9ms */ },
+ .space = { 8 /* 4.5ms */ },
+ },
+ /* 0 symbol */
+ .s00 = {
+ .pulse = { 1 /* 562.5 us */ },
+ .space = { 1 /* 562.5 us */ },
+ },
+ /* 1 symbol */
+ .s01 = {
+ .pulse = { 1 /* 562.5 us */ },
+ .space = { 3 /* 1687.5 us */ },
+ },
+ /* free time */
+ .ft = {
+ .minlen = 42,
+ .maxlen = 42,
+ .ft_min = 10, /* 5.625 ms */
+ },
+ },
+ /* repeat codes */
+ .repeat = 108, /* 108 ms */
+ .rtimings = {
+ /* leader symbol */
+ .ldr = {
+ .space = { 4 /* 2.25 ms */ },
+ },
+ /* free time */
+ .ft = {
+ .minlen = 0, /* repeat code has no data */
+ .maxlen = 0,
+ },
+ },
+ /* scancode logic */
+ .scancode = img_ir_sanyo_scancode,
+ .filter = img_ir_sanyo_filter,
+};
diff --git a/drivers/media/rc/img-ir/img-ir-sharp.c b/drivers/media/rc/img-ir/img-ir-sharp.c
new file mode 100644
index 000000000000..3397cc5a6794
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir-sharp.c
@@ -0,0 +1,99 @@
+/*
+ * ImgTec IR Decoder setup for Sharp protocol.
+ *
+ * Copyright 2012-2014 Imagination Technologies Ltd.
+ */
+
+#include "img-ir-hw.h"
+
+/* Convert Sharp data to a scancode */
+static int img_ir_sharp_scancode(int len, u64 raw, int *scancode, u64 protocols)
+{
+ unsigned int addr, cmd, exp, chk;
+
+ if (len != 15)
+ return -EINVAL;
+
+ addr = (raw >> 0) & 0x1f;
+ cmd = (raw >> 5) & 0xff;
+ exp = (raw >> 13) & 0x1;
+ chk = (raw >> 14) & 0x1;
+
+ /* validate data */
+ if (!exp)
+ return -EINVAL;
+ if (chk)
+ /* probably the second half of the message */
+ return -EINVAL;
+
+ *scancode = addr << 8 | cmd;
+ return IMG_IR_SCANCODE;
+}
+
+/* Convert Sharp scancode to Sharp data filter */
+static int img_ir_sharp_filter(const struct rc_scancode_filter *in,
+ struct img_ir_filter *out, u64 protocols)
+{
+ unsigned int addr, cmd, exp = 0, chk = 0;
+ unsigned int addr_m, cmd_m, exp_m = 0, chk_m = 0;
+
+ addr = (in->data >> 8) & 0x1f;
+ addr_m = (in->mask >> 8) & 0x1f;
+ cmd = (in->data >> 0) & 0xff;
+ cmd_m = (in->mask >> 0) & 0xff;
+ if (cmd_m) {
+ /* if filtering commands, we can only match the first part */
+ exp = 1;
+ exp_m = 1;
+ chk = 0;
+ chk_m = 1;
+ }
+
+ out->data = addr |
+ cmd << 5 |
+ exp << 13 |
+ chk << 14;
+ out->mask = addr_m |
+ cmd_m << 5 |
+ exp_m << 13 |
+ chk_m << 14;
+
+ return 0;
+}
+
+/*
+ * Sharp decoder
+ * See also http://www.sbprojects.com/knowledge/ir/sharp.php
+ */
+struct img_ir_decoder img_ir_sharp = {
+ .type = RC_BIT_SHARP,
+ .control = {
+ .decoden = 0,
+ .decodend2 = 1,
+ .code_type = IMG_IR_CODETYPE_PULSEDIST,
+ .d1validsel = 1,
+ },
+ /* main timings */
+ .tolerance = 20, /* 20% */
+ .timings = {
+ /* 0 symbol */
+ .s10 = {
+ .pulse = { 320 /* 320 us */ },
+ .space = { 680 /* 1 ms period */ },
+ },
+ /* 1 symbol */
+ .s11 = {
+ .pulse = { 320 /* 320 us */ },
+ .space = { 1680 /* 2 ms period */ },
+ },
+ /* free time */
+ .ft = {
+ .minlen = 15,
+ .maxlen = 15,
+ .ft_min = 5000, /* 5 ms */
+ },
+ },
+ /* scancode logic */
+ .scancode = img_ir_sharp_scancode,
+ .filter = img_ir_sharp_filter,
+};
diff --git a/drivers/media/rc/img-ir/img-ir-sony.c b/drivers/media/rc/img-ir/img-ir-sony.c
new file mode 100644
index 000000000000..993409a51a71
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir-sony.c
@@ -0,0 +1,145 @@
+/*
+ * ImgTec IR Decoder setup for Sony (SIRC) protocol.
+ *
+ * Copyright 2012-2014 Imagination Technologies Ltd.
+ */
+
+#include "img-ir-hw.h"
+
+/* Convert Sony data to a scancode */
+static int img_ir_sony_scancode(int len, u64 raw, int *scancode, u64 protocols)
+{
+ unsigned int dev, subdev, func;
+
+ switch (len) {
+ case 12:
+ if (!(protocols & RC_BIT_SONY12))
+ return -EINVAL;
+ func = raw & 0x7f; /* first 7 bits */
+ raw >>= 7;
+ dev = raw & 0x1f; /* next 5 bits */
+ subdev = 0;
+ break;
+ case 15:
+ if (!(protocols & RC_BIT_SONY15))
+ return -EINVAL;
+ func = raw & 0x7f; /* first 7 bits */
+ raw >>= 7;
+ dev = raw & 0xff; /* next 8 bits */
+ subdev = 0;
+ break;
+ case 20:
+ if (!(protocols & RC_BIT_SONY20))
+ return -EINVAL;
+ func = raw & 0x7f; /* first 7 bits */
+ raw >>= 7;
+ dev = raw & 0x1f; /* next 5 bits */
+ raw >>= 5;
+ subdev = raw & 0xff; /* next 8 bits */
+ break;
+ default:
+ return -EINVAL;
+ }
+ *scancode = dev << 16 | subdev << 8 | func;
+ return IMG_IR_SCANCODE;
+}
+
+/* Convert NEC scancode to NEC data filter */
+static int img_ir_sony_filter(const struct rc_scancode_filter *in,
+ struct img_ir_filter *out, u64 protocols)
+{
+ unsigned int dev, subdev, func;
+ unsigned int dev_m, subdev_m, func_m;
+ unsigned int len = 0;
+
+ dev = (in->data >> 16) & 0xff;
+ dev_m = (in->mask >> 16) & 0xff;
+ subdev = (in->data >> 8) & 0xff;
+ subdev_m = (in->mask >> 8) & 0xff;
+ func = (in->data >> 0) & 0x7f;
+ func_m = (in->mask >> 0) & 0x7f;
+
+ if (subdev & subdev_m) {
+ /* can't encode subdev and higher device bits */
+ if (dev & dev_m & 0xe0)
+ return -EINVAL;
+ /* subdevice (extended) bits only in 20 bit encoding */
+ if (!(protocols & RC_BIT_SONY20))
+ return -EINVAL;
+ len = 20;
+ dev_m &= 0x1f;
+ } else if (dev & dev_m & 0xe0) {
+ /* upper device bits only in 15 bit encoding */
+ if (!(protocols & RC_BIT_SONY15))
+ return -EINVAL;
+ len = 15;
+ subdev_m = 0;
+ } else {
+ /*
+ * The hardware mask cannot distinguish high device bits and low
+ * extended bits, so logically AND those bits of the masks
+ * together.
+ */
+ subdev_m &= (dev_m >> 5) | 0xf8;
+ dev_m &= 0x1f;
+ }
+
+ /* ensure there aren't any bits straying between fields */
+ dev &= dev_m;
+ subdev &= subdev_m;
+
+ /* write the hardware filter */
+ out->data = func |
+ dev << 7 |
+ subdev << 15;
+ out->mask = func_m |
+ dev_m << 7 |
+ subdev_m << 15;
+
+ if (len) {
+ out->minlen = len;
+ out->maxlen = len;
+ }
+ return 0;
+}
+
+/*
+ * Sony SIRC decoder
+ * See also http://www.sbprojects.com/knowledge/ir/sirc.php
+ * http://picprojects.org.uk/projects/sirc/sonysirc.pdf
+ */
+struct img_ir_decoder img_ir_sony = {
+ .type = RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20,
+ .control = {
+ .decoden = 1,
+ .code_type = IMG_IR_CODETYPE_PULSELEN,
+ },
+ /* main timings */
+ .unit = 600000, /* 600 us */
+ .timings = {
+ /* leader symbol */
+ .ldr = {
+ .pulse = { 4 /* 2.4 ms */ },
+ .space = { 1 /* 600 us */ },
+ },
+ /* 0 symbol */
+ .s00 = {
+ .pulse = { 1 /* 600 us */ },
+ .space = { 1 /* 600 us */ },
+ },
+ /* 1 symbol */
+ .s01 = {
+ .pulse = { 2 /* 1.2 ms */ },
+ .space = { 1 /* 600 us */ },
+ },
+ /* free time */
+ .ft = {
+ .minlen = 12,
+ .maxlen = 20,
+ .ft_min = 10, /* 6 ms */
+ },
+ },
+ /* scancode logic */
+ .scancode = img_ir_sony_scancode,
+ .filter = img_ir_sony_filter,
+};
diff --git a/drivers/media/rc/img-ir/img-ir.h b/drivers/media/rc/img-ir/img-ir.h
new file mode 100644
index 000000000000..afb189394af9
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir.h
@@ -0,0 +1,166 @@
+/*
+ * ImgTec IR Decoder found in PowerDown Controller.
+ *
+ * Copyright 2010-2014 Imagination Technologies Ltd.
+ */
+
+#ifndef _IMG_IR_H_
+#define _IMG_IR_H_
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#include "img-ir-raw.h"
+#include "img-ir-hw.h"
+
+/* registers */
+
+/* relative to the start of the IR block of registers */
+#define IMG_IR_CONTROL 0x00
+#define IMG_IR_STATUS 0x04
+#define IMG_IR_DATA_LW 0x08
+#define IMG_IR_DATA_UP 0x0c
+#define IMG_IR_LEAD_SYMB_TIMING 0x10
+#define IMG_IR_S00_SYMB_TIMING 0x14
+#define IMG_IR_S01_SYMB_TIMING 0x18
+#define IMG_IR_S10_SYMB_TIMING 0x1c
+#define IMG_IR_S11_SYMB_TIMING 0x20
+#define IMG_IR_FREE_SYMB_TIMING 0x24
+#define IMG_IR_POW_MOD_PARAMS 0x28
+#define IMG_IR_POW_MOD_ENABLE 0x2c
+#define IMG_IR_IRQ_MSG_DATA_LW 0x30
+#define IMG_IR_IRQ_MSG_DATA_UP 0x34
+#define IMG_IR_IRQ_MSG_MASK_LW 0x38
+#define IMG_IR_IRQ_MSG_MASK_UP 0x3c
+#define IMG_IR_IRQ_ENABLE 0x40
+#define IMG_IR_IRQ_STATUS 0x44
+#define IMG_IR_IRQ_CLEAR 0x48
+#define IMG_IR_IRCORE_ID 0xf0
+#define IMG_IR_CORE_REV 0xf4
+#define IMG_IR_CORE_DES1 0xf8
+#define IMG_IR_CORE_DES2 0xfc
+
+
+/* field masks */
+
+/* IMG_IR_CONTROL */
+#define IMG_IR_DECODEN 0x40000000
+#define IMG_IR_CODETYPE 0x30000000
+#define IMG_IR_CODETYPE_SHIFT 28
+#define IMG_IR_HDRTOG 0x08000000
+#define IMG_IR_LDRDEC 0x04000000
+#define IMG_IR_DECODINPOL 0x02000000 /* active high */
+#define IMG_IR_BITORIEN 0x01000000 /* MSB first */
+#define IMG_IR_D1VALIDSEL 0x00008000
+#define IMG_IR_BITINV 0x00000040 /* don't invert */
+#define IMG_IR_DECODEND2 0x00000010
+#define IMG_IR_BITORIEND2 0x00000002 /* MSB first */
+#define IMG_IR_BITINVD2 0x00000001 /* don't invert */
+
+/* IMG_IR_STATUS */
+#define IMG_IR_RXDVALD2 0x00001000
+#define IMG_IR_IRRXD 0x00000400
+#define IMG_IR_TOGSTATE 0x00000200
+#define IMG_IR_RXDVAL 0x00000040
+#define IMG_IR_RXDLEN 0x0000003f
+#define IMG_IR_RXDLEN_SHIFT 0
+
+/* IMG_IR_LEAD_SYMB_TIMING, IMG_IR_Sxx_SYMB_TIMING */
+#define IMG_IR_PD_MAX 0xff000000
+#define IMG_IR_PD_MAX_SHIFT 24
+#define IMG_IR_PD_MIN 0x00ff0000
+#define IMG_IR_PD_MIN_SHIFT 16
+#define IMG_IR_W_MAX 0x0000ff00
+#define IMG_IR_W_MAX_SHIFT 8
+#define IMG_IR_W_MIN 0x000000ff
+#define IMG_IR_W_MIN_SHIFT 0
+
+/* IMG_IR_FREE_SYMB_TIMING */
+#define IMG_IR_MAXLEN 0x0007e000
+#define IMG_IR_MAXLEN_SHIFT 13
+#define IMG_IR_MINLEN 0x00001f00
+#define IMG_IR_MINLEN_SHIFT 8
+#define IMG_IR_FT_MIN 0x000000ff
+#define IMG_IR_FT_MIN_SHIFT 0
+
+/* IMG_IR_POW_MOD_PARAMS */
+#define IMG_IR_PERIOD_LEN 0x3f000000
+#define IMG_IR_PERIOD_LEN_SHIFT 24
+#define IMG_IR_PERIOD_DUTY 0x003f0000
+#define IMG_IR_PERIOD_DUTY_SHIFT 16
+#define IMG_IR_STABLE_STOP 0x00003f00
+#define IMG_IR_STABLE_STOP_SHIFT 8
+#define IMG_IR_STABLE_START 0x0000003f
+#define IMG_IR_STABLE_START_SHIFT 0
+
+/* IMG_IR_POW_MOD_ENABLE */
+#define IMG_IR_POWER_OUT_EN 0x00000002
+#define IMG_IR_POWER_MOD_EN 0x00000001
+
+/* IMG_IR_IRQ_ENABLE, IMG_IR_IRQ_STATUS, IMG_IR_IRQ_CLEAR */
+#define IMG_IR_IRQ_DEC2_ERR 0x00000080
+#define IMG_IR_IRQ_DEC_ERR 0x00000040
+#define IMG_IR_IRQ_ACT_LEVEL 0x00000020
+#define IMG_IR_IRQ_FALL_EDGE 0x00000010
+#define IMG_IR_IRQ_RISE_EDGE 0x00000008
+#define IMG_IR_IRQ_DATA_MATCH 0x00000004
+#define IMG_IR_IRQ_DATA2_VALID 0x00000002
+#define IMG_IR_IRQ_DATA_VALID 0x00000001
+#define IMG_IR_IRQ_ALL 0x000000ff
+#define IMG_IR_IRQ_EDGE (IMG_IR_IRQ_FALL_EDGE | IMG_IR_IRQ_RISE_EDGE)
+
+/* IMG_IR_CORE_ID */
+#define IMG_IR_CORE_ID 0x00ff0000
+#define IMG_IR_CORE_ID_SHIFT 16
+#define IMG_IR_CORE_CONFIG 0x0000ffff
+#define IMG_IR_CORE_CONFIG_SHIFT 0
+
+/* IMG_IR_CORE_REV */
+#define IMG_IR_DESIGNER 0xff000000
+#define IMG_IR_DESIGNER_SHIFT 24
+#define IMG_IR_MAJOR_REV 0x00ff0000
+#define IMG_IR_MAJOR_REV_SHIFT 16
+#define IMG_IR_MINOR_REV 0x0000ff00
+#define IMG_IR_MINOR_REV_SHIFT 8
+#define IMG_IR_MAINT_REV 0x000000ff
+#define IMG_IR_MAINT_REV_SHIFT 0
+
+struct device;
+struct clk;
+
+/**
+ * struct img_ir_priv - Private driver data.
+ * @dev: Platform device.
+ * @irq: IRQ number.
+ * @clk: Input clock.
+ * @reg_base: Iomem base address of IR register block.
+ * @lock: Protects IR registers and variables in this struct.
+ * @raw: Driver data for raw decoder.
+ * @hw: Driver data for hardware decoder.
+ */
+struct img_ir_priv {
+ struct device *dev;
+ int irq;
+ struct clk *clk;
+ void __iomem *reg_base;
+ spinlock_t lock;
+
+ struct img_ir_priv_raw raw;
+ struct img_ir_priv_hw hw;
+};
+
+/* Hardware access */
+
+static inline void img_ir_write(struct img_ir_priv *priv,
+ unsigned int reg_offs, unsigned int data)
+{
+ iowrite32(data, priv->reg_base + reg_offs);
+}
+
+static inline unsigned int img_ir_read(struct img_ir_priv *priv,
+ unsigned int reg_offs)
+{
+ return ioread32(priv->reg_base + reg_offs);
+}
+
+#endif /* _IMG_IR_H_ */
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 822b9f47ca72..6f24e77b1488 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1017,7 +1017,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
unsigned char ir_proto_packet[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
- if (*rc_type && !(*rc_type & rc->allowed_protos))
+ if (*rc_type && !rc_protocols_allowed(rc, *rc_type))
dev_warn(dev, "Looks like you're trying to use an IR protocol "
"this device does not support\n");
@@ -1867,7 +1867,8 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
rdev->priv = ictx;
rdev->driver_type = RC_DRIVER_SCANCODE;
- rdev->allowed_protos = RC_BIT_OTHER | RC_BIT_RC6_MCE; /* iMON PAD or MCE */
+ /* iMON PAD or MCE */
+ rc_set_allowed_protocols(rdev, RC_BIT_OTHER | RC_BIT_RC6_MCE);
rdev->change_protocol = imon_ir_change_protocol;
rdev->driver_name = MOD_NAME;
@@ -1880,7 +1881,7 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
if (ictx->product == 0xffdc) {
imon_get_ffdc_type(ictx);
- rdev->allowed_protos = ictx->rc_type;
+ rc_set_allowed_protocols(rdev, ictx->rc_type);
}
imon_set_display_type(ictx);
diff --git a/drivers/media/rc/ir-jvc-decoder.c b/drivers/media/rc/ir-jvc-decoder.c
index 3948138ca870..4ea62a1dcfda 100644
--- a/drivers/media/rc/ir-jvc-decoder.c
+++ b/drivers/media/rc/ir-jvc-decoder.c
@@ -47,7 +47,7 @@ static int ir_jvc_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct jvc_dec *data = &dev->raw->jvc;
- if (!(dev->enabled_protocols & RC_BIT_JVC))
+ if (!rc_protocols_enabled(dev, RC_BIT_JVC))
return 0;
if (!is_timing_event(ev)) {
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index ed2c8a1ed8ca..d731da6c414d 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -35,7 +35,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
struct lirc_codec *lirc = &dev->raw->lirc;
int sample;
- if (!(dev->enabled_protocols & RC_BIT_LIRC))
+ if (!rc_protocols_enabled(dev, RC_BIT_LIRC))
return 0;
if (!dev->raw->lirc.drv || !dev->raw->lirc.drv->rbuf)
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index 9f3c9b59f30c..0c55f794c8cf 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -216,7 +216,7 @@ static int ir_mce_kbd_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
unsigned long delay;
- if (!(dev->enabled_protocols & RC_BIT_MCE_KBD))
+ if (!rc_protocols_enabled(dev, RC_BIT_MCE_KBD))
return 0;
if (!is_timing_event(ev)) {
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 9a9009411439..9de1791d2494 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -1,6 +1,6 @@
/* ir-nec-decoder.c - handle NEC IR Pulse/Space protocol
*
- * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -52,7 +52,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 address, not_address, command, not_command;
bool send_32bits = false;
- if (!(dev->enabled_protocols & RC_BIT_NEC))
+ if (!rc_protocols_enabled(dev, RC_BIT_NEC))
return 0;
if (!is_timing_event(ev)) {
@@ -172,7 +172,10 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (send_32bits) {
/* NEC transport, but modified protocol, used by at
* least Apple and TiVo remotes */
- scancode = data->bits;
+ scancode = not_address << 24 |
+ address << 16 |
+ not_command << 8 |
+ command;
IR_dprintk(1, "NEC (modified) scancode 0x%08x\n", scancode);
} else if ((address ^ not_address) != 0xff) {
/* Extended NEC */
@@ -222,6 +225,6 @@ module_init(ir_nec_decode_init);
module_exit(ir_nec_decode_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("NEC IR protocol decoder");
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 5c42750c7b71..763c9d131d0f 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -1,6 +1,6 @@
/* ir-raw.c - handle IR pulse/space events
*
- * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -256,7 +256,7 @@ int ir_raw_event_register(struct rc_dev *dev)
return -ENOMEM;
dev->raw->dev = dev;
- dev->enabled_protocols = ~0;
+ rc_set_enabled_protocols(dev, ~0);
rc = kfifo_alloc(&dev->raw->kfifo,
sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
GFP_KERNEL);
@@ -352,6 +352,7 @@ void ir_raw_init(void)
load_jvc_decode();
load_sony_decode();
load_sanyo_decode();
+ load_sharp_decode();
load_mce_kbd_decode();
load_lirc_codec();
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 4e53a319c5d8..4295d9b250c8 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -1,6 +1,6 @@
/* ir-rc5-decoder.c - handle RC5(x) IR Pulse/Space protocol
*
- * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -52,7 +52,7 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 toggle;
u32 scancode;
- if (!(dev->enabled_protocols & (RC_BIT_RC5 | RC_BIT_RC5X)))
+ if (!rc_protocols_enabled(dev, RC_BIT_RC5 | RC_BIT_RC5X))
return 0;
if (!is_timing_event(ev)) {
@@ -128,7 +128,7 @@ again:
if (data->wanted_bits == RC5X_NBITS) {
/* RC5X */
u8 xdata, command, system;
- if (!(dev->enabled_protocols & RC_BIT_RC5X)) {
+ if (!rc_protocols_enabled(dev, RC_BIT_RC5X)) {
data->state = STATE_INACTIVE;
return 0;
}
@@ -145,7 +145,7 @@ again:
} else {
/* RC5 */
u8 command, system;
- if (!(dev->enabled_protocols & RC_BIT_RC5)) {
+ if (!rc_protocols_enabled(dev, RC_BIT_RC5)) {
data->state = STATE_INACTIVE;
return 0;
}
@@ -193,6 +193,6 @@ module_init(ir_rc5_decode_init);
module_exit(ir_rc5_decode_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("RC5(x) IR protocol decoder");
diff --git a/drivers/media/rc/ir-rc5-sz-decoder.c b/drivers/media/rc/ir-rc5-sz-decoder.c
index 865fe84fd854..dc18b7434db8 100644
--- a/drivers/media/rc/ir-rc5-sz-decoder.c
+++ b/drivers/media/rc/ir-rc5-sz-decoder.c
@@ -1,6 +1,6 @@
/* ir-rc5-sz-decoder.c - handle RC5 Streamzap IR Pulse/Space protocol
*
- * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2010 by Mauro Carvalho Chehab
* Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -48,7 +48,7 @@ static int ir_rc5_sz_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 toggle, command, system;
u32 scancode;
- if (!(dev->enabled_protocols & RC_BIT_RC5_SZ))
+ if (!rc_protocols_enabled(dev, RC_BIT_RC5_SZ))
return 0;
if (!is_timing_event(ev)) {
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index 7cba7d33a3fa..cfbd64e3999c 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -89,9 +89,9 @@ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
u8 toggle;
- if (!(dev->enabled_protocols &
- (RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
- RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE)))
+ if (!rc_protocols_enabled(dev, RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
+ RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
+ RC_BIT_RC6_MCE))
return 0;
if (!is_timing_event(ev)) {
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index 0a06205b5677..eb715f04dc27 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -1,6 +1,6 @@
/* ir-sanyo-decoder.c - handle SANYO IR Pulse/Space protocol
*
- * Copyright (C) 2011 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2011 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -58,7 +58,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
u8 address, command, not_command;
- if (!(dev->enabled_protocols & RC_BIT_SANYO))
+ if (!rc_protocols_enabled(dev, RC_BIT_SANYO))
return 0;
if (!is_timing_event(ev)) {
@@ -200,6 +200,6 @@ module_init(ir_sanyo_decode_init);
module_exit(ir_sanyo_decode_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("SANYO IR protocol decoder");
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
new file mode 100644
index 000000000000..66d20394ceaa
--- /dev/null
+++ b/drivers/media/rc/ir-sharp-decoder.c
@@ -0,0 +1,200 @@
+/* ir-sharp-decoder.c - handle Sharp IR Pulse/Space protocol
+ *
+ * Copyright (C) 2013-2014 Imagination Technologies Ltd.
+ *
+ * Based on NEC decoder:
+ * Copyright (C) 2010 by Mauro Carvalho Chehab
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/module.h>
+#include "rc-core-priv.h"
+
+#define SHARP_NBITS 15
+#define SHARP_UNIT 40000 /* ns */
+#define SHARP_BIT_PULSE (8 * SHARP_UNIT) /* 320us */
+#define SHARP_BIT_0_PERIOD (25 * SHARP_UNIT) /* 1ms (680us space) */
+#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680ms space) */
+#define SHARP_ECHO_SPACE (1000 * SHARP_UNIT) /* 40 ms */
+#define SHARP_TRAILER_SPACE (125 * SHARP_UNIT) /* 5 ms (even longer) */
+
+enum sharp_state {
+ STATE_INACTIVE,
+ STATE_BIT_PULSE,
+ STATE_BIT_SPACE,
+ STATE_TRAILER_PULSE,
+ STATE_ECHO_SPACE,
+ STATE_TRAILER_SPACE,
+};
+
+/**
+ * ir_sharp_decode() - Decode one Sharp pulse or space
+ * @dev: the struct rc_dev descriptor of the device
+ * @duration: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_sharp_decode(struct rc_dev *dev, struct ir_raw_event ev)
+{
+ struct sharp_dec *data = &dev->raw->sharp;
+ u32 msg, echo, address, command, scancode;
+
+ if (!rc_protocols_enabled(dev, RC_BIT_SHARP))
+ return 0;
+
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ IR_dprintk(2, "Sharp decode started at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, SHARP_BIT_PULSE,
+ SHARP_BIT_PULSE / 2))
+ break;
+
+ data->count = 0;
+ data->pulse_len = ev.duration;
+ data->state = STATE_BIT_SPACE;
+ return 0;
+
+ case STATE_BIT_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, SHARP_BIT_PULSE,
+ SHARP_BIT_PULSE / 2))
+ break;
+
+ data->pulse_len = ev.duration;
+ data->state = STATE_BIT_SPACE;
+ return 0;
+
+ case STATE_BIT_SPACE:
+ if (ev.pulse)
+ break;
+
+ data->bits <<= 1;
+ if (eq_margin(data->pulse_len + ev.duration, SHARP_BIT_1_PERIOD,
+ SHARP_BIT_PULSE * 2))
+ data->bits |= 1;
+ else if (!eq_margin(data->pulse_len + ev.duration,
+ SHARP_BIT_0_PERIOD, SHARP_BIT_PULSE * 2))
+ break;
+ data->count++;
+
+ if (data->count == SHARP_NBITS ||
+ data->count == SHARP_NBITS * 2)
+ data->state = STATE_TRAILER_PULSE;
+ else
+ data->state = STATE_BIT_PULSE;
+
+ return 0;
+
+ case STATE_TRAILER_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, SHARP_BIT_PULSE,
+ SHARP_BIT_PULSE / 2))
+ break;
+
+ if (data->count == SHARP_NBITS) {
+ /* exp,chk bits should be 1,0 */
+ if ((data->bits & 0x3) != 0x2)
+ break;
+ data->state = STATE_ECHO_SPACE;
+ } else {
+ data->state = STATE_TRAILER_SPACE;
+ }
+ return 0;
+
+ case STATE_ECHO_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, SHARP_ECHO_SPACE,
+ SHARP_ECHO_SPACE / 4))
+ break;
+
+ data->state = STATE_BIT_PULSE;
+
+ return 0;
+
+ case STATE_TRAILER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, SHARP_TRAILER_SPACE,
+ SHARP_BIT_PULSE / 2))
+ break;
+
+ /* Validate - command, ext, chk should be inverted in 2nd */
+ msg = (data->bits >> 15) & 0x7fff;
+ echo = data->bits & 0x7fff;
+ if ((msg ^ echo) != 0x3ff) {
+ IR_dprintk(1,
+ "Sharp checksum error: received 0x%04x, 0x%04x\n",
+ msg, echo);
+ break;
+ }
+
+ address = bitrev8((msg >> 7) & 0xf8);
+ command = bitrev8((msg >> 2) & 0xff);
+
+ scancode = address << 8 | command;
+ IR_dprintk(1, "Sharp scancode 0x%04x\n", scancode);
+
+ rc_keydown(dev, scancode, 0);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ IR_dprintk(1, "Sharp decode failed at count %d state %d (%uus %s)\n",
+ data->count, data->state, TO_US(ev.duration),
+ TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static struct ir_raw_handler sharp_handler = {
+ .protocols = RC_BIT_SHARP,
+ .decode = ir_sharp_decode,
+};
+
+static int __init ir_sharp_decode_init(void)
+{
+ ir_raw_handler_register(&sharp_handler);
+
+ pr_info("IR Sharp protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_sharp_decode_exit(void)
+{
+ ir_raw_handler_unregister(&sharp_handler);
+}
+
+module_init(ir_sharp_decode_init);
+module_exit(ir_sharp_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>");
+MODULE_DESCRIPTION("Sharp IR protocol decoder");
diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c
index 29ab9c2db060..599c19a73360 100644
--- a/drivers/media/rc/ir-sony-decoder.c
+++ b/drivers/media/rc/ir-sony-decoder.c
@@ -45,8 +45,8 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
u8 device, subdevice, function;
- if (!(dev->enabled_protocols &
- (RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20)))
+ if (!rc_protocols_enabled(dev, RC_BIT_SONY12 | RC_BIT_SONY15 |
+ RC_BIT_SONY20))
return 0;
if (!is_timing_event(ev)) {
@@ -124,7 +124,7 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
switch (data->count) {
case 12:
- if (!(dev->enabled_protocols & RC_BIT_SONY12)) {
+ if (!rc_protocols_enabled(dev, RC_BIT_SONY12)) {
data->state = STATE_INACTIVE;
return 0;
}
@@ -133,7 +133,7 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
function = bitrev8((data->bits >> 4) & 0xFE);
break;
case 15:
- if (!(dev->enabled_protocols & RC_BIT_SONY15)) {
+ if (!rc_protocols_enabled(dev, RC_BIT_SONY15)) {
data->state = STATE_INACTIVE;
return 0;
}
@@ -142,7 +142,7 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
function = bitrev8((data->bits >> 7) & 0xFE);
break;
case 20:
- if (!(dev->enabled_protocols & RC_BIT_SONY20)) {
+ if (!rc_protocols_enabled(dev, RC_BIT_SONY20)) {
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 63b42252166a..ab24cc6d3655 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1563,7 +1563,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
/* set up ir-core props */
rdev->priv = itdev;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rdev, RC_BIT_ALL);
rdev->open = ite_open;
rdev->close = ite_close;
rdev->s_idle = ite_s_idle;
diff --git a/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c b/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
index b0e42df7ff82..01d901fbfc8b 100644
--- a/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
+++ b/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -87,4 +87,4 @@ module_init(init_rc_map_adstech_dvb_t_pci)
module_exit(exit_rc_map_adstech_dvb_t_pci)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-apac-viewcomp.c b/drivers/media/rc/keymaps/rc-apac-viewcomp.c
index 8c92ff95f94d..bf9efa007e1c 100644
--- a/drivers/media/rc/keymaps/rc-apac-viewcomp.c
+++ b/drivers/media/rc/keymaps/rc-apac-viewcomp.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -78,4 +78,4 @@ module_init(init_rc_map_apac_viewcomp)
module_exit(exit_rc_map_apac_viewcomp)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-asus-pc39.c b/drivers/media/rc/keymaps/rc-asus-pc39.c
index 2caf2117759b..9e674ba5dd4f 100644
--- a/drivers/media/rc/keymaps/rc-asus-pc39.c
+++ b/drivers/media/rc/keymaps/rc-asus-pc39.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -89,4 +89,4 @@ module_init(init_rc_map_asus_pc39)
module_exit(exit_rc_map_asus_pc39)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-asus-ps3-100.c b/drivers/media/rc/keymaps/rc-asus-ps3-100.c
index ba76609c5936..e45de35f528f 100644
--- a/drivers/media/rc/keymaps/rc-asus-ps3-100.c
+++ b/drivers/media/rc/keymaps/rc-asus-ps3-100.c
@@ -1,6 +1,6 @@
/* asus-ps3-100.h - Keytable for asus_ps3_100 Remote Controller
*
- * Copyright (c) 2012 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2012 by Mauro Carvalho Chehab
*
* Based on a previous patch from Remi Schwartz <remi.schwartz@gmail.com>
*
@@ -88,4 +88,4 @@ module_init(init_rc_map_asus_ps3_100)
module_exit(exit_rc_map_asus_ps3_100)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c b/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c
index 2031224a2027..91392d4cfd6d 100644
--- a/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c
+++ b/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -67,4 +67,4 @@ module_init(init_rc_map_ati_tv_wonder_hd_600)
module_exit(exit_rc_map_ati_tv_wonder_hd_600)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-a16d.c b/drivers/media/rc/keymaps/rc-avermedia-a16d.c
index 894939ac17f2..ff30a71d623e 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-a16d.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-a16d.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -73,4 +73,4 @@ module_init(init_rc_map_avermedia_a16d)
module_exit(exit_rc_map_avermedia_a16d)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-cardbus.c b/drivers/media/rc/keymaps/rc-avermedia-cardbus.c
index d2aaf5b9e39f..d7471a6de9b4 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-cardbus.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-cardbus.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -95,4 +95,4 @@ module_init(init_rc_map_avermedia_cardbus)
module_exit(exit_rc_map_avermedia_cardbus)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-dvbt.c b/drivers/media/rc/keymaps/rc-avermedia-dvbt.c
index dc2baf062398..e2417d6331fe 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-dvbt.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-dvbt.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -76,4 +76,4 @@ module_init(init_rc_map_avermedia_dvbt)
module_exit(exit_rc_map_avermedia_dvbt)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-m135a.c b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
index 04269d31fa19..843598a5f1b5 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-m135a.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
@@ -1,6 +1,6 @@
/* avermedia-m135a.c - Keytable for Avermedia M135A Remote Controllers
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
* Copyright (c) 2010 by Herton Ronaldo Krzesinski <herton@mandriva.com.br>
*
* This program is free software; you can redistribute it and/or modify
@@ -145,4 +145,4 @@ module_init(init_rc_map_avermedia_m135a)
module_exit(exit_rc_map_avermedia_m135a)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c b/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
index e83b1a1939bf..b24e7481ac21 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
@@ -93,4 +93,4 @@ module_init(init_rc_map_avermedia_m733a_rm_k6)
module_exit(exit_rc_map_avermedia_m733a_rm_k6)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-avermedia.c b/drivers/media/rc/keymaps/rc-avermedia.c
index c6063dfcd507..3f68fbecc188 100644
--- a/drivers/media/rc/keymaps/rc-avermedia.c
+++ b/drivers/media/rc/keymaps/rc-avermedia.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -84,4 +84,4 @@ module_init(init_rc_map_avermedia)
module_exit(exit_rc_map_avermedia)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-avertv-303.c b/drivers/media/rc/keymaps/rc-avertv-303.c
index 14f78451e64e..c35bc5b835c4 100644
--- a/drivers/media/rc/keymaps/rc-avertv-303.c
+++ b/drivers/media/rc/keymaps/rc-avertv-303.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -83,4 +83,4 @@ module_init(init_rc_map_avertv_303)
module_exit(exit_rc_map_avertv_303)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-behold-columbus.c b/drivers/media/rc/keymaps/rc-behold-columbus.c
index 086b4b1f19e1..1fc344e9daa7 100644
--- a/drivers/media/rc/keymaps/rc-behold-columbus.c
+++ b/drivers/media/rc/keymaps/rc-behold-columbus.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -106,4 +106,4 @@ module_init(init_rc_map_behold_columbus)
module_exit(exit_rc_map_behold_columbus)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-behold.c b/drivers/media/rc/keymaps/rc-behold.c
index 0877e3480941..d6519f8ac95a 100644
--- a/drivers/media/rc/keymaps/rc-behold.c
+++ b/drivers/media/rc/keymaps/rc-behold.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -139,4 +139,4 @@ module_init(init_rc_map_behold)
module_exit(exit_rc_map_behold)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-budget-ci-old.c b/drivers/media/rc/keymaps/rc-budget-ci-old.c
index 8311e092c098..b196a5f436a3 100644
--- a/drivers/media/rc/keymaps/rc-budget-ci-old.c
+++ b/drivers/media/rc/keymaps/rc-budget-ci-old.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -91,4 +91,4 @@ module_init(init_rc_map_budget_ci_old)
module_exit(exit_rc_map_budget_ci_old)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-cinergy-1400.c b/drivers/media/rc/keymaps/rc-cinergy-1400.c
index 0c87fbaf99ab..a099c080bf8c 100644
--- a/drivers/media/rc/keymaps/rc-cinergy-1400.c
+++ b/drivers/media/rc/keymaps/rc-cinergy-1400.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -82,4 +82,4 @@ module_init(init_rc_map_cinergy_1400)
module_exit(exit_rc_map_cinergy_1400)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-cinergy.c b/drivers/media/rc/keymaps/rc-cinergy.c
index 309e9e3fb6f3..b0f4328bdd6f 100644
--- a/drivers/media/rc/keymaps/rc-cinergy.c
+++ b/drivers/media/rc/keymaps/rc-cinergy.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -76,4 +76,4 @@ module_init(init_rc_map_cinergy)
module_exit(exit_rc_map_cinergy)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c
index 492a05ade7e1..a0fa543c9f9e 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-nec.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c
@@ -1,6 +1,6 @@
/* rc-dvb0700-big.c - Keytable for devices in dvb0700
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* TODO: This table is a real mess, as it merges RC codes from several
* devices into a big table. It also has both RC-5 and NEC codes inside.
@@ -122,4 +122,4 @@ module_init(init_rc_map)
module_exit(exit_rc_map)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-dib0700-rc5.c b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
index 454ea596a7ee..907941145eb7 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-rc5.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
@@ -1,6 +1,6 @@
/* rc-dvb0700-big.c - Keytable for devices in dvb0700
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* TODO: This table is a real mess, as it merges RC codes from several
* devices into a big table. It also has both RC-5 and NEC codes inside.
@@ -233,4 +233,4 @@ module_init(init_rc_map)
module_exit(exit_rc_map)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-dm1105-nec.c b/drivers/media/rc/keymaps/rc-dm1105-nec.c
index 67fc9fb0c007..46e7ae414cc8 100644
--- a/drivers/media/rc/keymaps/rc-dm1105-nec.c
+++ b/drivers/media/rc/keymaps/rc-dm1105-nec.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -74,4 +74,4 @@ module_init(init_rc_map_dm1105_nec)
module_exit(exit_rc_map_dm1105_nec)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c b/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
index 91ea91de9179..d2826b46fea2 100644
--- a/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
+++ b/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -76,4 +76,4 @@ module_init(init_rc_map_dntv_live_dvb_t)
module_exit(exit_rc_map_dntv_live_dvb_t)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c b/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
index fd680d4d3eb6..0d74769467b5 100644
--- a/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
+++ b/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -95,4 +95,4 @@ module_init(init_rc_map_dntv_live_dvbt_pro)
module_exit(exit_rc_map_dntv_live_dvbt_pro)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-em-terratec.c b/drivers/media/rc/keymaps/rc-em-terratec.c
index d1fcd64c0f90..7f1e06be175b 100644
--- a/drivers/media/rc/keymaps/rc-em-terratec.c
+++ b/drivers/media/rc/keymaps/rc-em-terratec.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -67,4 +67,4 @@ module_init(init_rc_map_em_terratec)
module_exit(exit_rc_map_em_terratec)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
index 2fe45e41fe49..4fc3904daf06 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -79,4 +79,4 @@ module_init(init_rc_map_encore_enltv_fm53)
module_exit(exit_rc_map_encore_enltv_fm53)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv.c b/drivers/media/rc/keymaps/rc-encore-enltv.c
index 223de75a6d1c..f1914e23d203 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -110,4 +110,4 @@ module_init(init_rc_map_encore_enltv)
module_exit(exit_rc_map_encore_enltv)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv2.c b/drivers/media/rc/keymaps/rc-encore-enltv2.c
index 669cbff22b7e..9c6c55240d18 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv2.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv2.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -88,4 +88,4 @@ module_init(init_rc_map_encore_enltv2)
module_exit(exit_rc_map_encore_enltv2)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-evga-indtube.c b/drivers/media/rc/keymaps/rc-evga-indtube.c
index 2c647fc25916..2370d2a3deb6 100644
--- a/drivers/media/rc/keymaps/rc-evga-indtube.c
+++ b/drivers/media/rc/keymaps/rc-evga-indtube.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -59,4 +59,4 @@ module_init(init_rc_map_evga_indtube)
module_exit(exit_rc_map_evga_indtube)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-eztv.c b/drivers/media/rc/keymaps/rc-eztv.c
index 76921445c1d9..b5c96ed84376 100644
--- a/drivers/media/rc/keymaps/rc-eztv.c
+++ b/drivers/media/rc/keymaps/rc-eztv.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -94,4 +94,4 @@ module_init(init_rc_map_eztv)
module_exit(exit_rc_map_eztv)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-flydvb.c b/drivers/media/rc/keymaps/rc-flydvb.c
index 3a6bba311b08..25cb89fac03c 100644
--- a/drivers/media/rc/keymaps/rc-flydvb.c
+++ b/drivers/media/rc/keymaps/rc-flydvb.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -75,4 +75,4 @@ module_init(init_rc_map_flydvb)
module_exit(exit_rc_map_flydvb)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-flyvideo.c b/drivers/media/rc/keymaps/rc-flyvideo.c
index bf9da584643b..e71377dd0534 100644
--- a/drivers/media/rc/keymaps/rc-flyvideo.c
+++ b/drivers/media/rc/keymaps/rc-flyvideo.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -68,4 +68,4 @@ module_init(init_rc_map_flyvideo)
module_exit(exit_rc_map_flyvideo)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c b/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
index 2f0970fe7832..cf0608dc83d5 100644
--- a/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
+++ b/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -96,4 +96,4 @@ module_init(init_rc_map_fusionhdtv_mce)
module_exit(exit_rc_map_fusionhdtv_mce)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-gadmei-rm008z.c b/drivers/media/rc/keymaps/rc-gadmei-rm008z.c
index 0e98ec467c34..03575bdb2eca 100644
--- a/drivers/media/rc/keymaps/rc-gadmei-rm008z.c
+++ b/drivers/media/rc/keymaps/rc-gadmei-rm008z.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -79,4 +79,4 @@ module_init(init_rc_map_gadmei_rm008z)
module_exit(exit_rc_map_gadmei_rm008z)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c b/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
index a2e2faa1d1b3..b2ab13b0dcb1 100644
--- a/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
+++ b/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -82,4 +82,4 @@ module_init(init_rc_map_genius_tvgo_a11mce)
module_exit(exit_rc_map_genius_tvgo_a11mce)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-gotview7135.c b/drivers/media/rc/keymaps/rc-gotview7135.c
index 864614e19314..229a36ac7f0a 100644
--- a/drivers/media/rc/keymaps/rc-gotview7135.c
+++ b/drivers/media/rc/keymaps/rc-gotview7135.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -77,4 +77,4 @@ module_init(init_rc_map_gotview7135)
module_exit(exit_rc_map_gotview7135)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-hauppauge.c b/drivers/media/rc/keymaps/rc-hauppauge.c
index 929bbbc16393..36d57f7c532b 100644
--- a/drivers/media/rc/keymaps/rc-hauppauge.c
+++ b/drivers/media/rc/keymaps/rc-hauppauge.c
@@ -8,7 +8,7 @@
* - Hauppauge Black;
* - DSR-0112 remote bundled with Haupauge MiniStick.
*
- * Copyright (c) 2010-2011 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010-2011 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -290,4 +290,4 @@ module_init(init_rc_map_rc5_hauppauge_new)
module_exit(exit_rc_map_rc5_hauppauge_new)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-iodata-bctv7e.c b/drivers/media/rc/keymaps/rc-iodata-bctv7e.c
index 34540dfc3df5..9ee154cb0c6b 100644
--- a/drivers/media/rc/keymaps/rc-iodata-bctv7e.c
+++ b/drivers/media/rc/keymaps/rc-iodata-bctv7e.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -86,4 +86,4 @@ module_init(init_rc_map_iodata_bctv7e)
module_exit(exit_rc_map_iodata_bctv7e)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-kaiomy.c b/drivers/media/rc/keymaps/rc-kaiomy.c
index 4264a787c150..60803a732c08 100644
--- a/drivers/media/rc/keymaps/rc-kaiomy.c
+++ b/drivers/media/rc/keymaps/rc-kaiomy.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -85,4 +85,4 @@ module_init(init_rc_map_kaiomy)
module_exit(exit_rc_map_kaiomy)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-kworld-315u.c b/drivers/media/rc/keymaps/rc-kworld-315u.c
index e48cd267dda6..ba087eed1ed9 100644
--- a/drivers/media/rc/keymaps/rc-kworld-315u.c
+++ b/drivers/media/rc/keymaps/rc-kworld-315u.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -81,4 +81,4 @@ module_init(init_rc_map_kworld_315u)
module_exit(exit_rc_map_kworld_315u)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-kworld-pc150u.c b/drivers/media/rc/keymaps/rc-kworld-pc150u.c
index 233bb5ee087f..b92e571f4def 100644
--- a/drivers/media/rc/keymaps/rc-kworld-pc150u.c
+++ b/drivers/media/rc/keymaps/rc-kworld-pc150u.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 2010 by Kyle Strickland
* (based on kworld-plus-tv-analog.c by
- * Mauro Carvalho Chehab <mchehab@redhat.com>)
+ * Mauro Carvalho Chehab)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
index 32998d6b787d..edc868564f99 100644
--- a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
+++ b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -97,4 +97,4 @@ module_init(init_rc_map_kworld_plus_tv_analog)
module_exit(exit_rc_map_kworld_plus_tv_analog)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-manli.c b/drivers/media/rc/keymaps/rc-manli.c
index e7038bb71bf6..92424ef2aaa6 100644
--- a/drivers/media/rc/keymaps/rc-manli.c
+++ b/drivers/media/rc/keymaps/rc-manli.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -132,4 +132,4 @@ module_init(init_rc_map_manli)
module_exit(exit_rc_map_manli)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c b/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
index c393d8a50bca..fd7a55c56167 100644
--- a/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
+++ b/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -121,4 +121,4 @@ module_init(init_rc_map_msi_tvanywhere_plus)
module_exit(exit_rc_map_msi_tvanywhere_plus)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-msi-tvanywhere.c b/drivers/media/rc/keymaps/rc-msi-tvanywhere.c
index a7003d3a3c8a..4233a8d4d63e 100644
--- a/drivers/media/rc/keymaps/rc-msi-tvanywhere.c
+++ b/drivers/media/rc/keymaps/rc-msi-tvanywhere.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -67,4 +67,4 @@ module_init(init_rc_map_msi_tvanywhere)
module_exit(exit_rc_map_msi_tvanywhere)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-nebula.c b/drivers/media/rc/keymaps/rc-nebula.c
index 3f0ddd7afd30..8ec881adb7cf 100644
--- a/drivers/media/rc/keymaps/rc-nebula.c
+++ b/drivers/media/rc/keymaps/rc-nebula.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -94,4 +94,4 @@ module_init(init_rc_map_nebula)
module_exit(exit_rc_map_nebula)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c b/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
index 8d4dae2e2ece..292bbad35d21 100644
--- a/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
+++ b/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,7 +14,7 @@
#include <linux/module.h>
/* Terratec Cinergy Hybrid T USB XS FM
- Mauro Carvalho Chehab <mchehab@redhat.com>
+ Mauro Carvalho Chehab
*/
static struct rc_map_table nec_terratec_cinergy_xs[] = {
@@ -155,4 +155,4 @@ module_init(init_rc_map_nec_terratec_cinergy_xs)
module_exit(exit_rc_map_nec_terratec_cinergy_xs)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-norwood.c b/drivers/media/rc/keymaps/rc-norwood.c
index 9e65f07157ab..ca1b82a2c54f 100644
--- a/drivers/media/rc/keymaps/rc-norwood.c
+++ b/drivers/media/rc/keymaps/rc-norwood.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -83,4 +83,4 @@ module_init(init_rc_map_norwood)
module_exit(exit_rc_map_norwood)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-npgtech.c b/drivers/media/rc/keymaps/rc-npgtech.c
index 65d0cfc3c33b..1fb946024512 100644
--- a/drivers/media/rc/keymaps/rc-npgtech.c
+++ b/drivers/media/rc/keymaps/rc-npgtech.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -78,4 +78,4 @@ module_init(init_rc_map_npgtech)
module_exit(exit_rc_map_npgtech)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-pctv-sedna.c b/drivers/media/rc/keymaps/rc-pctv-sedna.c
index bf2cbdfe2e32..5ef01ab3fd50 100644
--- a/drivers/media/rc/keymaps/rc-pctv-sedna.c
+++ b/drivers/media/rc/keymaps/rc-pctv-sedna.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -78,4 +78,4 @@ module_init(init_rc_map_pctv_sedna)
module_exit(exit_rc_map_pctv_sedna)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-color.c b/drivers/media/rc/keymaps/rc-pinnacle-color.c
index b46cd8fe6438..a218b471a4ca 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-color.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-color.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -92,4 +92,4 @@ module_init(init_rc_map_pinnacle_color)
module_exit(exit_rc_map_pinnacle_color)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-grey.c b/drivers/media/rc/keymaps/rc-pinnacle-grey.c
index d525df9ad868..4a3f467a47a2 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-grey.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-grey.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -87,4 +87,4 @@ module_init(init_rc_map_pinnacle_grey)
module_exit(exit_rc_map_pinnacle_grey)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
index a4603d035374..e89cc10b68bf 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -68,4 +68,4 @@ module_init(init_rc_map_pinnacle_pctv_hd)
module_exit(exit_rc_map_pinnacle_pctv_hd)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-pixelview-002t.c b/drivers/media/rc/keymaps/rc-pixelview-002t.c
index 33eb64333c6f..d967c3816fdc 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-002t.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-002t.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -75,4 +75,4 @@ module_init(init_rc_map_pixelview)
module_exit(exit_rc_map_pixelview)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-pixelview-mk12.c b/drivers/media/rc/keymaps/rc-pixelview-mk12.c
index 21f4dd25c2ec..224d0efaa6e5 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-mk12.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-mk12.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -81,4 +81,4 @@ module_init(init_rc_map_pixelview)
module_exit(exit_rc_map_pixelview)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-pixelview-new.c b/drivers/media/rc/keymaps/rc-pixelview-new.c
index f944ad2cac2b..781d788d6b6d 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-new.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-new.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -81,4 +81,4 @@ module_init(init_rc_map_pixelview_new)
module_exit(exit_rc_map_pixelview_new)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-pixelview.c b/drivers/media/rc/keymaps/rc-pixelview.c
index a6020eea7b95..39e6feaa35a3 100644
--- a/drivers/media/rc/keymaps/rc-pixelview.c
+++ b/drivers/media/rc/keymaps/rc-pixelview.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -80,4 +80,4 @@ module_init(init_rc_map_pixelview)
module_exit(exit_rc_map_pixelview)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
index e74c571a5e44..e96fa3ab9f4b 100644
--- a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
+++ b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -79,4 +79,4 @@ module_init(init_rc_map_powercolor_real_angel)
module_exit(exit_rc_map_powercolor_real_angel)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-proteus-2309.c b/drivers/media/rc/keymaps/rc-proteus-2309.c
index adee8035ce96..eef626ee02df 100644
--- a/drivers/media/rc/keymaps/rc-proteus-2309.c
+++ b/drivers/media/rc/keymaps/rc-proteus-2309.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -67,4 +67,4 @@ module_init(init_rc_map_proteus_2309)
module_exit(exit_rc_map_proteus_2309)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-purpletv.c b/drivers/media/rc/keymaps/rc-purpletv.c
index 722597a20e4a..cec6fe466829 100644
--- a/drivers/media/rc/keymaps/rc-purpletv.c
+++ b/drivers/media/rc/keymaps/rc-purpletv.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -79,4 +79,4 @@ module_init(init_rc_map_purpletv)
module_exit(exit_rc_map_purpletv)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-pv951.c b/drivers/media/rc/keymaps/rc-pv951.c
index 0105d63c07a9..5ac89ce8c053 100644
--- a/drivers/media/rc/keymaps/rc-pv951.c
+++ b/drivers/media/rc/keymaps/rc-pv951.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -76,4 +76,4 @@ module_init(init_rc_map_pv951)
module_exit(exit_rc_map_pv951)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c b/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
index 073694d50f49..9f778bd091db 100644
--- a/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
+++ b/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -76,4 +76,4 @@ module_init(init_rc_map_real_audio_220_32_keys)
module_exit(exit_rc_map_real_audio_220_32_keys)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-tbs-nec.c b/drivers/media/rc/keymaps/rc-tbs-nec.c
index 5039be782bc5..24ce2a252502 100644
--- a/drivers/media/rc/keymaps/rc-tbs-nec.c
+++ b/drivers/media/rc/keymaps/rc-tbs-nec.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -73,4 +73,4 @@ module_init(init_rc_map_tbs_nec)
module_exit(exit_rc_map_tbs_nec)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c b/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
index 53629fb0151f..97eb83ab5a35 100644
--- a/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
+++ b/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -90,4 +90,4 @@ module_init(init_rc_map_terratec_cinergy_xs)
module_exit(exit_rc_map_terratec_cinergy_xs)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-tevii-nec.c b/drivers/media/rc/keymaps/rc-tevii-nec.c
index f2c3b75d8580..38e0c0875596 100644
--- a/drivers/media/rc/keymaps/rc-tevii-nec.c
+++ b/drivers/media/rc/keymaps/rc-tevii-nec.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -86,4 +86,4 @@ module_init(init_rc_map_tevii_nec)
module_exit(exit_rc_map_tevii_nec)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-tivo.c b/drivers/media/rc/keymaps/rc-tivo.c
index 454e06295692..5cc1b456e329 100644
--- a/drivers/media/rc/keymaps/rc-tivo.c
+++ b/drivers/media/rc/keymaps/rc-tivo.c
@@ -15,62 +15,62 @@
* Initial mapping is for the TiVo remote included in the Nero LiquidTV bundle,
* which also ships with a TiVo-branded IR transceiver, supported by the mceusb
* driver. Note that the remote uses an NEC-ish protocol, but instead of having
- * a command/not_command pair, it has a vendor ID of 0xa10c, but some keys, the
+ * a command/not_command pair, it has a vendor ID of 0x3085, but some keys, the
* NEC extended checksums do pass, so the table presently has the intended
* values and the checksum-passed versions for those keys.
*/
static struct rc_map_table tivo[] = {
- { 0xa10c900f, KEY_MEDIA }, /* TiVo Button */
- { 0xa10c0807, KEY_POWER2 }, /* TV Power */
- { 0xa10c8807, KEY_TV }, /* Live TV/Swap */
- { 0xa10c2c03, KEY_VIDEO_NEXT }, /* TV Input */
- { 0xa10cc807, KEY_INFO },
- { 0xa10cfa05, KEY_CYCLEWINDOWS }, /* Window */
+ { 0x3085f009, KEY_MEDIA }, /* TiVo Button */
+ { 0x3085e010, KEY_POWER2 }, /* TV Power */
+ { 0x3085e011, KEY_TV }, /* Live TV/Swap */
+ { 0x3085c034, KEY_VIDEO_NEXT }, /* TV Input */
+ { 0x3085e013, KEY_INFO },
+ { 0x3085a05f, KEY_CYCLEWINDOWS }, /* Window */
{ 0x0085305f, KEY_CYCLEWINDOWS },
- { 0xa10c6c03, KEY_EPG }, /* Guide */
+ { 0x3085c036, KEY_EPG }, /* Guide */
- { 0xa10c2807, KEY_UP },
- { 0xa10c6807, KEY_DOWN },
- { 0xa10ce807, KEY_LEFT },
- { 0xa10ca807, KEY_RIGHT },
+ { 0x3085e014, KEY_UP },
+ { 0x3085e016, KEY_DOWN },
+ { 0x3085e017, KEY_LEFT },
+ { 0x3085e015, KEY_RIGHT },
- { 0xa10c1807, KEY_SCROLLDOWN }, /* Red Thumbs Down */
- { 0xa10c9807, KEY_SELECT },
- { 0xa10c5807, KEY_SCROLLUP }, /* Green Thumbs Up */
+ { 0x3085e018, KEY_SCROLLDOWN }, /* Red Thumbs Down */
+ { 0x3085e019, KEY_SELECT },
+ { 0x3085e01a, KEY_SCROLLUP }, /* Green Thumbs Up */
- { 0xa10c3807, KEY_VOLUMEUP },
- { 0xa10cb807, KEY_VOLUMEDOWN },
- { 0xa10cd807, KEY_MUTE },
- { 0xa10c040b, KEY_RECORD },
- { 0xa10c7807, KEY_CHANNELUP },
- { 0xa10cf807, KEY_CHANNELDOWN },
+ { 0x3085e01c, KEY_VOLUMEUP },
+ { 0x3085e01d, KEY_VOLUMEDOWN },
+ { 0x3085e01b, KEY_MUTE },
+ { 0x3085d020, KEY_RECORD },
+ { 0x3085e01e, KEY_CHANNELUP },
+ { 0x3085e01f, KEY_CHANNELDOWN },
{ 0x0085301f, KEY_CHANNELDOWN },
- { 0xa10c840b, KEY_PLAY },
- { 0xa10cc40b, KEY_PAUSE },
- { 0xa10ca40b, KEY_SLOW },
- { 0xa10c440b, KEY_REWIND },
- { 0xa10c240b, KEY_FASTFORWARD },
- { 0xa10c640b, KEY_PREVIOUS },
- { 0xa10ce40b, KEY_NEXT }, /* ->| */
+ { 0x3085d021, KEY_PLAY },
+ { 0x3085d023, KEY_PAUSE },
+ { 0x3085d025, KEY_SLOW },
+ { 0x3085d022, KEY_REWIND },
+ { 0x3085d024, KEY_FASTFORWARD },
+ { 0x3085d026, KEY_PREVIOUS },
+ { 0x3085d027, KEY_NEXT }, /* ->| */
- { 0xa10c220d, KEY_ZOOM }, /* Aspect */
- { 0xa10c120d, KEY_STOP },
- { 0xa10c520d, KEY_DVD }, /* DVD Menu */
+ { 0x3085b044, KEY_ZOOM }, /* Aspect */
+ { 0x3085b048, KEY_STOP },
+ { 0x3085b04a, KEY_DVD }, /* DVD Menu */
- { 0xa10c140b, KEY_NUMERIC_1 },
- { 0xa10c940b, KEY_NUMERIC_2 },
- { 0xa10c540b, KEY_NUMERIC_3 },
- { 0xa10cd40b, KEY_NUMERIC_4 },
- { 0xa10c340b, KEY_NUMERIC_5 },
- { 0xa10cb40b, KEY_NUMERIC_6 },
- { 0xa10c740b, KEY_NUMERIC_7 },
- { 0xa10cf40b, KEY_NUMERIC_8 },
+ { 0x3085d028, KEY_NUMERIC_1 },
+ { 0x3085d029, KEY_NUMERIC_2 },
+ { 0x3085d02a, KEY_NUMERIC_3 },
+ { 0x3085d02b, KEY_NUMERIC_4 },
+ { 0x3085d02c, KEY_NUMERIC_5 },
+ { 0x3085d02d, KEY_NUMERIC_6 },
+ { 0x3085d02e, KEY_NUMERIC_7 },
+ { 0x3085d02f, KEY_NUMERIC_8 },
{ 0x0085302f, KEY_NUMERIC_8 },
- { 0xa10c0c03, KEY_NUMERIC_9 },
- { 0xa10c8c03, KEY_NUMERIC_0 },
- { 0xa10ccc03, KEY_ENTER },
- { 0xa10c4c03, KEY_CLEAR },
+ { 0x3085c030, KEY_NUMERIC_9 },
+ { 0x3085c031, KEY_NUMERIC_0 },
+ { 0x3085c033, KEY_ENTER },
+ { 0x3085c032, KEY_CLEAR },
};
static struct rc_map_list tivo_map = {
diff --git a/drivers/media/rc/keymaps/rc-tt-1500.c b/drivers/media/rc/keymaps/rc-tt-1500.c
index 80217ffc91db..c766d3b2b6b0 100644
--- a/drivers/media/rc/keymaps/rc-tt-1500.c
+++ b/drivers/media/rc/keymaps/rc-tt-1500.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -80,4 +80,4 @@ module_init(init_rc_map_tt_1500)
module_exit(exit_rc_map_tt_1500)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-videomate-s350.c b/drivers/media/rc/keymaps/rc-videomate-s350.c
index 8bfc3e8d909d..8a354775a2d8 100644
--- a/drivers/media/rc/keymaps/rc-videomate-s350.c
+++ b/drivers/media/rc/keymaps/rc-videomate-s350.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -83,4 +83,4 @@ module_init(init_rc_map_videomate_s350)
module_exit(exit_rc_map_videomate_s350)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c b/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
index 390ce9431b35..eb0cda7766c4 100644
--- a/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
+++ b/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -85,4 +85,4 @@ module_init(init_rc_map_videomate_tv_pvr)
module_exit(exit_rc_map_videomate_tv_pvr)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c b/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
index 2852bf705064..c1dd598e828e 100644
--- a/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
+++ b/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -80,4 +80,4 @@ module_init(init_rc_map_winfast_usbii_deluxe)
module_exit(exit_rc_map_winfast_usbii_deluxe)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-winfast.c b/drivers/media/rc/keymaps/rc-winfast.c
index 2df1cba23600..8a779da1e973 100644
--- a/drivers/media/rc/keymaps/rc-winfast.c
+++ b/drivers/media/rc/keymaps/rc-winfast.c
@@ -2,7 +2,7 @@
*
* keymap imported from ir-keymaps.c
*
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -100,4 +100,4 @@ module_init(init_rc_map_winfast)
module_exit(exit_rc_map_winfast)
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index a25bb1581e46..5d8f3d40d820 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -84,7 +84,7 @@
#define MCE_PORT_IR 0x4 /* (0x4 << 5) | MCE_CMD = 0x9f */
#define MCE_PORT_SYS 0x7 /* (0x7 << 5) | MCE_CMD = 0xff */
#define MCE_PORT_SER 0x6 /* 0xc0 thru 0xdf flush & 0x1f bytes */
-#define MCE_PORT_MASK 0xe0 /* Mask out command bits */
+#define MCE_PORT_MASK 0xe0 /* Mask out command bits */
/* Command port headers */
#define MCE_CMD_PORT_IR 0x9f /* IR-related cmd/rsp */
@@ -153,19 +153,6 @@
#define MCE_COMMAND_IRDATA 0x80
#define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */
-/* module parameters */
-#ifdef CONFIG_USB_DEBUG
-static bool debug = 1;
-#else
-static bool debug;
-#endif
-
-#define mce_dbg(dev, fmt, ...) \
- do { \
- if (debug) \
- dev_info(dev, fmt, ## __VA_ARGS__); \
- } while (0)
-
/* general constants */
#define SEND_FLAG_IN_PROGRESS 1
#define SEND_FLAG_COMPLETE 2
@@ -541,16 +528,13 @@ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd)
static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
int offset, int len, bool out)
{
- char codes[USB_BUFLEN * 3 + 1];
- char inout[9];
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+ char *inout;
u8 cmd, subcmd, data1, data2, data3, data4;
struct device *dev = ir->dev;
- int i, start, skip = 0;
+ int start, skip = 0;
u32 carrier, period;
- if (!debug)
- return;
-
/* skip meaningless 0xb1 0x60 header bytes on orig receiver */
if (ir->flags.microsoft_gen1 && !out && !offset)
skip = 2;
@@ -558,16 +542,10 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
if (len <= skip)
return;
- for (i = 0; i < len && i < USB_BUFLEN; i++)
- snprintf(codes + i * 3, 4, "%02x ", buf[i + offset] & 0xff);
-
- dev_info(dev, "%sx data: %s(length=%d)\n",
- (out ? "t" : "r"), codes, len);
+ dev_dbg(dev, "%cx data: %*ph (length=%d)",
+ (out ? 't' : 'r'), min(len, USB_BUFLEN), buf, len);
- if (out)
- strcpy(inout, "Request\0");
- else
- strcpy(inout, "Got\0");
+ inout = out ? "Request" : "Got";
start = offset + skip;
cmd = buf[start] & 0xff;
@@ -583,50 +561,50 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
break;
if ((subcmd == MCE_CMD_PORT_SYS) &&
(data1 == MCE_CMD_RESUME))
- dev_info(dev, "Device resume requested\n");
+ dev_dbg(dev, "Device resume requested");
else
- dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
+ dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
cmd, subcmd);
break;
case MCE_CMD_PORT_SYS:
switch (subcmd) {
case MCE_RSP_EQEMVER:
if (!out)
- dev_info(dev, "Emulator interface version %x\n",
+ dev_dbg(dev, "Emulator interface version %x",
data1);
break;
case MCE_CMD_G_REVISION:
if (len == 2)
- dev_info(dev, "Get hw/sw rev?\n");
+ dev_dbg(dev, "Get hw/sw rev?");
else
- dev_info(dev, "hw/sw rev 0x%02x 0x%02x "
- "0x%02x 0x%02x\n", data1, data2,
+ dev_dbg(dev, "hw/sw rev 0x%02x 0x%02x 0x%02x 0x%02x",
+ data1, data2,
buf[start + 4], buf[start + 5]);
break;
case MCE_CMD_RESUME:
- dev_info(dev, "Device resume requested\n");
+ dev_dbg(dev, "Device resume requested");
break;
case MCE_RSP_CMD_ILLEGAL:
- dev_info(dev, "Illegal PORT_SYS command\n");
+ dev_dbg(dev, "Illegal PORT_SYS command");
break;
case MCE_RSP_EQWAKEVERSION:
if (!out)
- dev_info(dev, "Wake version, proto: 0x%02x, "
+ dev_dbg(dev, "Wake version, proto: 0x%02x, "
"payload: 0x%02x, address: 0x%02x, "
- "version: 0x%02x\n",
+ "version: 0x%02x",
data1, data2, data3, data4);
break;
case MCE_RSP_GETPORTSTATUS:
if (!out)
/* We use data1 + 1 here, to match hw labels */
- dev_info(dev, "TX port %d: blaster is%s connected\n",
+ dev_dbg(dev, "TX port %d: blaster is%s connected",
data1 + 1, data4 ? " not" : "");
break;
case MCE_CMD_FLASHLED:
- dev_info(dev, "Attempting to flash LED\n");
+ dev_dbg(dev, "Attempting to flash LED");
break;
default:
- dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
+ dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
cmd, subcmd);
break;
}
@@ -634,13 +612,13 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
case MCE_CMD_PORT_IR:
switch (subcmd) {
case MCE_CMD_SIG_END:
- dev_info(dev, "End of signal\n");
+ dev_dbg(dev, "End of signal");
break;
case MCE_CMD_PING:
- dev_info(dev, "Ping\n");
+ dev_dbg(dev, "Ping");
break;
case MCE_CMD_UNKNOWN:
- dev_info(dev, "Resp to 9f 05 of 0x%02x 0x%02x\n",
+ dev_dbg(dev, "Resp to 9f 05 of 0x%02x 0x%02x",
data1, data2);
break;
case MCE_RSP_EQIRCFS:
@@ -649,51 +627,51 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
if (!period)
break;
carrier = (1000 * 1000) / period;
- dev_info(dev, "%s carrier of %u Hz (period %uus)\n",
+ dev_dbg(dev, "%s carrier of %u Hz (period %uus)",
inout, carrier, period);
break;
case MCE_CMD_GETIRCFS:
- dev_info(dev, "Get carrier mode and freq\n");
+ dev_dbg(dev, "Get carrier mode and freq");
break;
case MCE_RSP_EQIRTXPORTS:
- dev_info(dev, "%s transmit blaster mask of 0x%02x\n",
+ dev_dbg(dev, "%s transmit blaster mask of 0x%02x",
inout, data1);
break;
case MCE_RSP_EQIRTIMEOUT:
/* value is in units of 50us, so x*50/1000 ms */
period = ((data1 << 8) | data2) * MCE_TIME_UNIT / 1000;
- dev_info(dev, "%s receive timeout of %d ms\n",
+ dev_dbg(dev, "%s receive timeout of %d ms",
inout, period);
break;
case MCE_CMD_GETIRTIMEOUT:
- dev_info(dev, "Get receive timeout\n");
+ dev_dbg(dev, "Get receive timeout");
break;
case MCE_CMD_GETIRTXPORTS:
- dev_info(dev, "Get transmit blaster mask\n");
+ dev_dbg(dev, "Get transmit blaster mask");
break;
case MCE_RSP_EQIRRXPORTEN:
- dev_info(dev, "%s %s-range receive sensor in use\n",
+ dev_dbg(dev, "%s %s-range receive sensor in use",
inout, data1 == 0x02 ? "short" : "long");
break;
case MCE_CMD_GETIRRXPORTEN:
/* aka MCE_RSP_EQIRRXCFCNT */
if (out)
- dev_info(dev, "Get receive sensor\n");
+ dev_dbg(dev, "Get receive sensor");
else if (ir->learning_enabled)
- dev_info(dev, "RX pulse count: %d\n",
+ dev_dbg(dev, "RX pulse count: %d",
((data1 << 8) | data2));
break;
case MCE_RSP_EQIRNUMPORTS:
if (out)
break;
- dev_info(dev, "Num TX ports: %x, num RX ports: %x\n",
+ dev_dbg(dev, "Num TX ports: %x, num RX ports: %x",
data1, data2);
break;
case MCE_RSP_CMD_ILLEGAL:
- dev_info(dev, "Illegal PORT_IR command\n");
+ dev_dbg(dev, "Illegal PORT_IR command");
break;
default:
- dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
+ dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
cmd, subcmd);
break;
}
@@ -703,10 +681,11 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
}
if (cmd == MCE_IRDATA_TRAILER)
- dev_info(dev, "End of raw IR data\n");
+ dev_dbg(dev, "End of raw IR data");
else if ((cmd != MCE_CMD_PORT_IR) &&
((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
- dev_info(dev, "Raw IR data, %d pulse/space samples\n", ir->rem);
+ dev_dbg(dev, "Raw IR data, %d pulse/space samples", ir->rem);
+#endif
}
static void mce_async_callback(struct urb *urb)
@@ -718,10 +697,25 @@ static void mce_async_callback(struct urb *urb)
return;
ir = urb->context;
- if (ir) {
+
+ switch (urb->status) {
+ /* success */
+ case 0:
len = urb->actual_length;
mceusb_dev_printdata(ir, urb->transfer_buffer, 0, len, true);
+ break;
+
+ case -ECONNRESET:
+ case -ENOENT:
+ case -EILSEQ:
+ case -ESHUTDOWN:
+ break;
+
+ case -EPIPE:
+ default:
+ dev_err(ir->dev, "Error: request urb status = %d", urb->status);
+ break;
}
/* the transfer buffer and urb were allocated in mce_request_packet */
@@ -770,17 +764,17 @@ static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
return;
}
- mce_dbg(dev, "receive request called (size=%#x)\n", size);
+ dev_dbg(dev, "receive request called (size=%#x)", size);
async_urb->transfer_buffer_length = size;
async_urb->dev = ir->usbdev;
res = usb_submit_urb(async_urb, GFP_ATOMIC);
if (res) {
- mce_dbg(dev, "receive request FAILED! (res=%d)\n", res);
+ dev_err(dev, "receive request FAILED! (res=%d)", res);
return;
}
- mce_dbg(dev, "receive request complete (res=%d)\n", res);
+ dev_dbg(dev, "receive request complete (res=%d)", res);
}
static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
@@ -895,8 +889,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
ir->carrier = carrier;
cmdbuf[2] = MCE_CMD_SIG_END;
cmdbuf[3] = MCE_IRDATA_TRAILER;
- mce_dbg(ir->dev, "%s: disabling carrier "
- "modulation\n", __func__);
+ dev_dbg(ir->dev, "disabling carrier modulation");
mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
return carrier;
}
@@ -907,8 +900,8 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
ir->carrier = carrier;
cmdbuf[2] = prescaler;
cmdbuf[3] = divisor;
- mce_dbg(ir->dev, "%s: requesting %u HZ "
- "carrier\n", __func__, carrier);
+ dev_dbg(ir->dev, "requesting %u HZ carrier",
+ carrier);
/* Transmit new carrier to mce device */
mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
@@ -998,7 +991,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
* US_TO_NS(MCE_TIME_UNIT);
- mce_dbg(ir->dev, "Storing %s with duration %d\n",
+ dev_dbg(ir->dev, "Storing %s with duration %d",
rawir.pulse ? "pulse" : "space",
rawir.duration);
@@ -1032,7 +1025,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->parser_state = CMD_HEADER;
}
if (event) {
- mce_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
+ dev_dbg(ir->dev, "processed IR data");
ir_raw_event_handle(ir->rc);
}
}
@@ -1055,7 +1048,7 @@ static void mceusb_dev_recv(struct urb *urb)
if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
ir->send_flags = SEND_FLAG_COMPLETE;
- mce_dbg(ir->dev, "setup answer received %d bytes\n",
+ dev_dbg(ir->dev, "setup answer received %d bytes\n",
buf_len);
}
@@ -1067,13 +1060,14 @@ static void mceusb_dev_recv(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
+ case -EILSEQ:
case -ESHUTDOWN:
usb_unlink_urb(urb);
return;
case -EPIPE:
default:
- mce_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
+ dev_err(ir->dev, "Error: urb status = %d", urb->status);
break;
}
@@ -1095,7 +1089,7 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
data = kzalloc(USB_CTRL_MSG_SZ, GFP_KERNEL);
if (!data) {
- dev_err(dev, "%s: memory allocation failed!\n", __func__);
+ dev_err(dev, "%s: memory allocation failed!", __func__);
return;
}
@@ -1106,28 +1100,28 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
data, USB_CTRL_MSG_SZ, HZ * 3);
- mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
- mce_dbg(dev, "%s - data[0] = %d, data[1] = %d\n",
- __func__, data[0], data[1]);
+ dev_dbg(dev, "set address - ret = %d", ret);
+ dev_dbg(dev, "set address - data[0] = %d, data[1] = %d",
+ data[0], data[1]);
/* set feature: bit rate 38400 bps */
ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
0xc04e, 0x0000, NULL, 0, HZ * 3);
- mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
+ dev_dbg(dev, "set feature - ret = %d", ret);
/* bRequest 4: set char length to 8 bits */
ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
4, USB_TYPE_VENDOR,
0x0808, 0x0000, NULL, 0, HZ * 3);
- mce_dbg(dev, "%s - retB = %d\n", __func__, ret);
+ dev_dbg(dev, "set char length - retB = %d", ret);
/* bRequest 2: set handshaking to use DTR/DSR */
ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
2, USB_TYPE_VENDOR,
0x0000, 0x0100, NULL, 0, HZ * 3);
- mce_dbg(dev, "%s - retC = %d\n", __func__, ret);
+ dev_dbg(dev, "set handshake - retC = %d", ret);
/* device resume */
mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
@@ -1198,7 +1192,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
rc = rc_allocate_device();
if (!rc) {
- dev_err(dev, "remote dev allocation failed\n");
+ dev_err(dev, "remote dev allocation failed");
goto out;
}
@@ -1217,7 +1211,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
rc->dev.parent = dev;
rc->priv = ir;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rc, RC_BIT_ALL);
rc->timeout = MS_TO_NS(100);
if (!ir->flags.no_tx) {
rc->s_tx_mask = mceusb_set_tx_mask;
@@ -1230,7 +1224,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
ret = rc_register_device(rc);
if (ret < 0) {
- dev_err(dev, "remote dev registration failed\n");
+ dev_err(dev, "remote dev registration failed");
goto out;
}
@@ -1258,7 +1252,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
bool tx_mask_normal;
int ir_intfnum;
- mce_dbg(&intf->dev, "%s called\n", __func__);
+ dev_dbg(&intf->dev, "%s called", __func__);
idesc = intf->cur_altsetting;
@@ -1286,8 +1280,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
ep_in = ep;
ep_in->bmAttributes = USB_ENDPOINT_XFER_INT;
ep_in->bInterval = 1;
- mce_dbg(&intf->dev, "acceptable inbound endpoint "
- "found\n");
+ dev_dbg(&intf->dev, "acceptable inbound endpoint found");
}
if ((ep_out == NULL)
@@ -1301,12 +1294,11 @@ static int mceusb_dev_probe(struct usb_interface *intf,
ep_out = ep;
ep_out->bmAttributes = USB_ENDPOINT_XFER_INT;
ep_out->bInterval = 1;
- mce_dbg(&intf->dev, "acceptable outbound endpoint "
- "found\n");
+ dev_dbg(&intf->dev, "acceptable outbound endpoint found");
}
}
if (ep_in == NULL) {
- mce_dbg(&intf->dev, "inbound and/or endpoint not found\n");
+ dev_dbg(&intf->dev, "inbound and/or endpoint not found");
return -ENODEV;
}
@@ -1357,7 +1349,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* flush buffers on the device */
- mce_dbg(&intf->dev, "Flushing receive buffers\n");
+ dev_dbg(&intf->dev, "Flushing receive buffers\n");
mce_flush_rx_buffer(ir, maxp);
/* figure out which firmware/emulator version this hardware has */
@@ -1382,10 +1374,9 @@ static int mceusb_dev_probe(struct usb_interface *intf,
device_set_wakeup_capable(ir->dev, true);
device_set_wakeup_enable(ir->dev, true);
- dev_info(&intf->dev, "Registered %s with mce emulator interface "
- "version %x\n", name, ir->emver);
- dev_info(&intf->dev, "%x tx ports (0x%x cabled) and "
- "%x rx sensors (0x%x active)\n",
+ dev_info(&intf->dev, "Registered %s with mce emulator interface version %x",
+ name, ir->emver);
+ dev_info(&intf->dev, "%x tx ports (0x%x cabled) and %x rx sensors (0x%x active)",
ir->num_txports, ir->txports_cabled,
ir->num_rxports, ir->rxports_active);
@@ -1399,7 +1390,7 @@ urb_in_alloc_fail:
buf_in_alloc_fail:
kfree(ir);
mem_alloc_fail:
- dev_err(&intf->dev, "%s: device setup failed!\n", __func__);
+ dev_err(&intf->dev, "%s: device setup failed!", __func__);
return -ENOMEM;
}
@@ -1427,7 +1418,7 @@ static void mceusb_dev_disconnect(struct usb_interface *intf)
static int mceusb_dev_suspend(struct usb_interface *intf, pm_message_t message)
{
struct mceusb_dev *ir = usb_get_intfdata(intf);
- dev_info(ir->dev, "suspend\n");
+ dev_info(ir->dev, "suspend");
usb_kill_urb(ir->urb_in);
return 0;
}
@@ -1435,7 +1426,7 @@ static int mceusb_dev_suspend(struct usb_interface *intf, pm_message_t message)
static int mceusb_dev_resume(struct usb_interface *intf)
{
struct mceusb_dev *ir = usb_get_intfdata(intf);
- dev_info(ir->dev, "resume\n");
+ dev_info(ir->dev, "resume");
if (usb_submit_urb(ir->urb_in, GFP_ATOMIC))
return -EIO;
return 0;
@@ -1457,6 +1448,3 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, mceusb_dev_table);
-
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 21ee0dc1b7ec..d244e1a83f43 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -330,9 +330,6 @@ static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
/* Enable CIR Wake via PSOUT# (Pin60) */
nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
- /* enable cir interrupt of mouse/keyboard IRQ event */
- nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
-
/* enable pme interrupt of cir wakeup event */
nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
@@ -456,7 +453,6 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
- nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
@@ -989,6 +985,12 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
goto exit_free_dev_rdev;
ret = -ENODEV;
+ /* activate pnp device */
+ if (pnp_activate_dev(pdev) < 0) {
+ dev_err(&pdev->dev, "Could not activate PNP device!\n");
+ goto exit_free_dev_rdev;
+ }
+
/* validate pnp resources */
if (!pnp_port_valid(pdev, 0) ||
pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
@@ -1042,7 +1044,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* Set up the rc device */
rdev->priv = nvt;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rdev, RC_BIT_ALL);
rdev->open = nvt_open;
rdev->close = nvt_close;
rdev->tx_ir = nvt_tx_ir;
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 07e83108df0f..e1cf23c3875b 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -363,7 +363,6 @@ struct nvt_dev {
#define LOGICAL_DEV_ENABLE 0x01
#define CIR_WAKE_ENABLE_BIT 0x08
-#define CIR_INTR_MOUSE_IRQ_BIT 0x80
#define PME_INTR_CIR_PASS_BIT 0x08
/* w83677hg CIR pin config */
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 70a180bb0bd0..da536c93c978 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -1,7 +1,7 @@
/*
* Remote Controller core raw events header
*
- * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -88,6 +88,12 @@ struct ir_raw_event_ctrl {
unsigned count;
u64 bits;
} sanyo;
+ struct sharp_dec {
+ int state;
+ unsigned count;
+ u32 bits;
+ unsigned int pulse_len;
+ } sharp;
struct mce_kbd_dec {
struct input_dev *idev;
struct timer_list rx_timeout;
@@ -204,6 +210,13 @@ static inline void load_sony_decode(void) { }
static inline void load_sanyo_decode(void) { }
#endif
+/* from ir-sharp-decoder.c */
+#ifdef CONFIG_IR_SHARP_DECODER_MODULE
+#define load_sharp_decode() request_module_nowait("ir-sharp-decoder")
+#else
+static inline void load_sharp_decode(void) { }
+#endif
+
/* from ir-mce_kbd-decoder.c */
#ifdef CONFIG_IR_MCE_KBD_DECODER_MODULE
#define load_mce_kbd_decode() request_module_nowait("ir-mce_kbd-decoder")
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index 53d02827a472..0a88e0cf964f 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -195,7 +195,7 @@ static int __init loop_init(void)
rc->map_name = RC_MAP_EMPTY;
rc->priv = &loopdev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rc, RC_BIT_ALL);
rc->timeout = 100 * 1000 * 1000; /* 100 ms */
rc->min_timeout = 1;
rc->max_timeout = UINT_MAX;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 02e2f38c9c85..99697aae92ff 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1,6 +1,6 @@
/* rc-main.c - Remote Controller core module
*
- * Copyright (C) 2009-2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2009-2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,7 +24,7 @@
/* Bitmap to store allocated device numbers from 0 to IRRCV_NUM_DEVICES - 1 */
#define IRRCV_NUM_DEVICES 256
-DECLARE_BITMAP(ir_core_dev_number, IRRCV_NUM_DEVICES);
+static DECLARE_BITMAP(ir_core_dev_number, IRRCV_NUM_DEVICES);
/* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */
#define IR_TAB_MIN_SIZE 256
@@ -62,7 +62,7 @@ struct rc_map *rc_map_get(const char *name)
map = seek_rc_map(name);
#ifdef MODULE
if (!map) {
- int rc = request_module(name);
+ int rc = request_module("%s", name);
if (rc < 0) {
printk(KERN_ERR "Couldn't load IR keymap %s\n", name);
return NULL;
@@ -633,6 +633,7 @@ EXPORT_SYMBOL_GPL(rc_repeat);
static void ir_do_keydown(struct rc_dev *dev, int scancode,
u32 keycode, u8 toggle)
{
+ struct rc_scancode_filter *filter;
bool new_event = !dev->keypressed ||
dev->last_scancode != scancode ||
dev->last_toggle != toggle;
@@ -640,6 +641,11 @@ static void ir_do_keydown(struct rc_dev *dev, int scancode,
if (new_event && dev->keypressed)
ir_do_keyup(dev, false);
+ /* Generic scancode filtering */
+ filter = &dev->scancode_filters[RC_FILTER_NORMAL];
+ if (filter->mask && ((scancode ^ filter->data) & filter->mask))
+ return;
+
input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
if (new_event && keycode != KEY_RESERVED) {
@@ -653,9 +659,10 @@ static void ir_do_keydown(struct rc_dev *dev, int scancode,
"key 0x%04x, scancode 0x%04x\n",
dev->input_name, keycode, scancode);
input_report_key(dev->input_dev, keycode, 1);
+
+ led_trigger_event(led_feedback, LED_FULL);
}
- led_trigger_event(led_feedback, LED_FULL);
input_sync(dev->input_dev);
}
@@ -790,18 +797,44 @@ static struct {
RC_BIT_SONY20, "sony" },
{ RC_BIT_RC5_SZ, "rc-5-sz" },
{ RC_BIT_SANYO, "sanyo" },
+ { RC_BIT_SHARP, "sharp" },
{ RC_BIT_MCE_KBD, "mce_kbd" },
{ RC_BIT_LIRC, "lirc" },
};
/**
- * show_protocols() - shows the current IR protocol(s)
+ * struct rc_filter_attribute - Device attribute relating to a filter type.
+ * @attr: Device attribute.
+ * @type: Filter type.
+ * @mask: false for filter value, true for filter mask.
+ */
+struct rc_filter_attribute {
+ struct device_attribute attr;
+ enum rc_filter_type type;
+ bool mask;
+};
+#define to_rc_filter_attr(a) container_of(a, struct rc_filter_attribute, attr)
+
+#define RC_PROTO_ATTR(_name, _mode, _show, _store, _type) \
+ struct rc_filter_attribute dev_attr_##_name = { \
+ .attr = __ATTR(_name, _mode, _show, _store), \
+ .type = (_type), \
+ }
+#define RC_FILTER_ATTR(_name, _mode, _show, _store, _type, _mask) \
+ struct rc_filter_attribute dev_attr_##_name = { \
+ .attr = __ATTR(_name, _mode, _show, _store), \
+ .type = (_type), \
+ .mask = (_mask), \
+ }
+
+/**
+ * show_protocols() - shows the current/wakeup IR protocol(s)
* @device: the device descriptor
* @mattr: the device attribute struct (unused)
* @buf: a pointer to the output buffer
*
* This routine is a callback routine for input read the IR protocol type(s).
- * it is trigged by reading /sys/class/rc/rc?/protocols.
+ * it is trigged by reading /sys/class/rc/rc?/[wakeup_]protocols.
* It returns the protocol names of supported protocols.
* Enabled protocols are printed in brackets.
*
@@ -812,6 +845,7 @@ static ssize_t show_protocols(struct device *device,
struct device_attribute *mattr, char *buf)
{
struct rc_dev *dev = to_rc_dev(device);
+ struct rc_filter_attribute *fattr = to_rc_filter_attr(mattr);
u64 allowed, enabled;
char *tmp = buf;
int i;
@@ -822,9 +856,10 @@ static ssize_t show_protocols(struct device *device,
mutex_lock(&dev->lock);
- enabled = dev->enabled_protocols;
- if (dev->driver_type == RC_DRIVER_SCANCODE)
- allowed = dev->allowed_protos;
+ enabled = dev->enabled_protocols[fattr->type];
+ if (dev->driver_type == RC_DRIVER_SCANCODE ||
+ fattr->type == RC_FILTER_WAKEUP)
+ allowed = dev->allowed_protocols[fattr->type];
else if (dev->raw)
allowed = ir_raw_get_allowed_protocols();
else {
@@ -856,14 +891,14 @@ static ssize_t show_protocols(struct device *device,
}
/**
- * store_protocols() - changes the current IR protocol(s)
+ * store_protocols() - changes the current/wakeup IR protocol(s)
* @device: the device descriptor
* @mattr: the device attribute struct (unused)
* @buf: a pointer to the input buffer
* @len: length of the input buffer
*
* This routine is for changing the IR protocol type.
- * It is trigged by writing to /sys/class/rc/rc?/protocols.
+ * It is trigged by writing to /sys/class/rc/rc?/[wakeup_]protocols.
* Writing "+proto" will add a protocol to the list of enabled protocols.
* Writing "-proto" will remove a protocol from the list of enabled protocols.
* Writing "proto" will enable only "proto".
@@ -880,12 +915,15 @@ static ssize_t store_protocols(struct device *device,
size_t len)
{
struct rc_dev *dev = to_rc_dev(device);
+ struct rc_filter_attribute *fattr = to_rc_filter_attr(mattr);
bool enable, disable;
const char *tmp;
- u64 type;
+ u64 old_type, type;
u64 mask;
int rc, i, count = 0;
ssize_t ret;
+ int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
+ struct rc_scancode_filter local_filter, *filter;
/* Device is being removed */
if (!dev)
@@ -898,7 +936,8 @@ static ssize_t store_protocols(struct device *device,
ret = -EINVAL;
goto out;
}
- type = dev->enabled_protocols;
+ old_type = dev->enabled_protocols[fattr->type];
+ type = old_type;
while ((tmp = strsep((char **) &data, " \n")) != NULL) {
if (!*tmp)
@@ -946,8 +985,10 @@ static ssize_t store_protocols(struct device *device,
goto out;
}
- if (dev->change_protocol) {
- rc = dev->change_protocol(dev, &type);
+ change_protocol = (fattr->type == RC_FILTER_NORMAL)
+ ? dev->change_protocol : dev->change_wakeup_protocol;
+ if (change_protocol) {
+ rc = change_protocol(dev, &type);
if (rc < 0) {
IR_dprintk(1, "Error setting protocols to 0x%llx\n",
(long long)type);
@@ -956,10 +997,40 @@ static ssize_t store_protocols(struct device *device,
}
}
- dev->enabled_protocols = type;
+ dev->enabled_protocols[fattr->type] = type;
IR_dprintk(1, "Current protocol(s): 0x%llx\n",
(long long)type);
+ /*
+ * If the protocol is changed the filter needs updating.
+ * Try setting the same filter with the new protocol (if any).
+ * Fall back to clearing the filter.
+ */
+ filter = &dev->scancode_filters[fattr->type];
+ if (old_type != type && filter->mask) {
+ local_filter = *filter;
+ if (!type) {
+ /* no protocol => clear filter */
+ ret = -1;
+ } else if (!dev->s_filter) {
+ /* generic filtering => accept any filter */
+ ret = 0;
+ } else {
+ /* hardware filtering => try setting, otherwise clear */
+ ret = dev->s_filter(dev, fattr->type, &local_filter);
+ }
+ if (ret < 0) {
+ /* clear the filter */
+ local_filter.data = 0;
+ local_filter.mask = 0;
+ if (dev->s_filter)
+ dev->s_filter(dev, fattr->type, &local_filter);
+ }
+
+ /* commit the new filter */
+ *filter = local_filter;
+ }
+
ret = len;
out:
@@ -967,6 +1038,115 @@ out:
return ret;
}
+/**
+ * show_filter() - shows the current scancode filter value or mask
+ * @device: the device descriptor
+ * @attr: the device attribute struct
+ * @buf: a pointer to the output buffer
+ *
+ * This routine is a callback routine to read a scancode filter value or mask.
+ * It is trigged by reading /sys/class/rc/rc?/[wakeup_]filter[_mask].
+ * It prints the current scancode filter value or mask of the appropriate filter
+ * type in hexadecimal into @buf and returns the size of the buffer.
+ *
+ * Bits of the filter value corresponding to set bits in the filter mask are
+ * compared against input scancodes and non-matching scancodes are discarded.
+ *
+ * dev->lock is taken to guard against races between device registration,
+ * store_filter and show_filter.
+ */
+static ssize_t show_filter(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rc_dev *dev = to_rc_dev(device);
+ struct rc_filter_attribute *fattr = to_rc_filter_attr(attr);
+ u32 val;
+
+ /* Device is being removed */
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&dev->lock);
+ if (fattr->mask)
+ val = dev->scancode_filters[fattr->type].mask;
+ else
+ val = dev->scancode_filters[fattr->type].data;
+ mutex_unlock(&dev->lock);
+
+ return sprintf(buf, "%#x\n", val);
+}
+
+/**
+ * store_filter() - changes the scancode filter value
+ * @device: the device descriptor
+ * @attr: the device attribute struct
+ * @buf: a pointer to the input buffer
+ * @len: length of the input buffer
+ *
+ * This routine is for changing a scancode filter value or mask.
+ * It is trigged by writing to /sys/class/rc/rc?/[wakeup_]filter[_mask].
+ * Returns -EINVAL if an invalid filter value for the current protocol was
+ * specified or if scancode filtering is not supported by the driver, otherwise
+ * returns @len.
+ *
+ * Bits of the filter value corresponding to set bits in the filter mask are
+ * compared against input scancodes and non-matching scancodes are discarded.
+ *
+ * dev->lock is taken to guard against races between device registration,
+ * store_filter and show_filter.
+ */
+static ssize_t store_filter(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct rc_dev *dev = to_rc_dev(device);
+ struct rc_filter_attribute *fattr = to_rc_filter_attr(attr);
+ struct rc_scancode_filter local_filter, *filter;
+ int ret;
+ unsigned long val;
+
+ /* Device is being removed */
+ if (!dev)
+ return -EINVAL;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /* Scancode filter not supported (but still accept 0) */
+ if (!dev->s_filter && fattr->type != RC_FILTER_NORMAL)
+ return val ? -EINVAL : count;
+
+ mutex_lock(&dev->lock);
+
+ /* Tell the driver about the new filter */
+ filter = &dev->scancode_filters[fattr->type];
+ local_filter = *filter;
+ if (fattr->mask)
+ local_filter.mask = val;
+ else
+ local_filter.data = val;
+ if (!dev->enabled_protocols[fattr->type] && local_filter.mask) {
+ /* refuse to set a filter unless a protocol is enabled */
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (dev->s_filter) {
+ ret = dev->s_filter(dev, fattr->type, &local_filter);
+ if (ret < 0)
+ goto unlock;
+ }
+
+ /* Success, commit the new filter */
+ *filter = local_filter;
+
+unlock:
+ mutex_unlock(&dev->lock);
+ return (ret < 0) ? ret : count;
+}
+
static void rc_dev_release(struct device *device)
{
}
@@ -996,11 +1176,26 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
/*
* Static device attribute struct with the sysfs attributes for IR's
*/
-static DEVICE_ATTR(protocols, S_IRUGO | S_IWUSR,
- show_protocols, store_protocols);
+static RC_PROTO_ATTR(protocols, S_IRUGO | S_IWUSR,
+ show_protocols, store_protocols, RC_FILTER_NORMAL);
+static RC_PROTO_ATTR(wakeup_protocols, S_IRUGO | S_IWUSR,
+ show_protocols, store_protocols, RC_FILTER_WAKEUP);
+static RC_FILTER_ATTR(filter, S_IRUGO|S_IWUSR,
+ show_filter, store_filter, RC_FILTER_NORMAL, false);
+static RC_FILTER_ATTR(filter_mask, S_IRUGO|S_IWUSR,
+ show_filter, store_filter, RC_FILTER_NORMAL, true);
+static RC_FILTER_ATTR(wakeup_filter, S_IRUGO|S_IWUSR,
+ show_filter, store_filter, RC_FILTER_WAKEUP, false);
+static RC_FILTER_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR,
+ show_filter, store_filter, RC_FILTER_WAKEUP, true);
static struct attribute *rc_dev_attrs[] = {
- &dev_attr_protocols.attr,
+ &dev_attr_protocols.attr.attr,
+ &dev_attr_wakeup_protocols.attr.attr,
+ &dev_attr_filter.attr.attr,
+ &dev_attr_filter_mask.attr.attr,
+ &dev_attr_wakeup_filter.attr.attr,
+ &dev_attr_wakeup_filter_mask.attr.attr,
NULL,
};
@@ -1091,14 +1286,6 @@ int rc_register_device(struct rc_dev *dev)
if (dev->close)
dev->input_dev->close = ir_close;
- /*
- * Take the lock here, as the device sysfs node will appear
- * when device_add() is called, which may trigger an ir-keytable udev
- * rule, which will in turn call show_protocols and access
- * dev->enabled_protocols before it has been initialized.
- */
- mutex_lock(&dev->lock);
-
do {
devno = find_first_zero_bit(ir_core_dev_number,
IRRCV_NUM_DEVICES);
@@ -1107,6 +1294,14 @@ int rc_register_device(struct rc_dev *dev)
return -ENOMEM;
} while (test_and_set_bit(devno, ir_core_dev_number));
+ /*
+ * Take the lock here, as the device sysfs node will appear
+ * when device_add() is called, which may trigger an ir-keytable udev
+ * rule, which will in turn call show_protocols and access
+ * dev->enabled_protocols before it has been initialized.
+ */
+ mutex_lock(&dev->lock);
+
dev->devno = devno;
dev_set_name(&dev->dev, "rc%ld", dev->devno);
dev_set_drvdata(&dev->dev, dev);
@@ -1172,7 +1367,7 @@ int rc_register_device(struct rc_dev *dev)
rc = dev->change_protocol(dev, &rc_type);
if (rc < 0)
goto out_raw;
- dev->enabled_protocols = rc_type;
+ dev->enabled_protocols[RC_FILTER_NORMAL] = rc_type;
}
mutex_unlock(&dev->lock);
@@ -1260,5 +1455,5 @@ int rc_core_debug; /* ir_debug level (0,1,2) */
EXPORT_SYMBOL_GPL(rc_core_debug);
module_param_named(debug, rc_core_debug, int, 0644);
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index a5d4f883d053..47cd373e2295 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -922,7 +922,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->dev.parent = dev;
rc->priv = rr3;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rc, RC_BIT_ALL);
rc->timeout = US_TO_NS(2750);
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index 8f0cddb9e8f2..22e4c1f28ab4 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -287,7 +287,7 @@ static int st_rc_probe(struct platform_device *pdev)
st_rc_hardware_init(rc_dev);
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rdev, RC_BIT_ALL);
/* rx sampling rate is 10Mhz */
rdev->rx_resolution = 100;
rdev->timeout = US_TO_NS(MAX_SYMB_TIME);
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index d7b11e6a9982..f4e0bc3d382c 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -322,7 +322,7 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
rdev->dev.parent = dev;
rdev->priv = sz;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rdev, RC_BIT_ALL);
rdev->driver_name = DRIVER_NAME;
rdev->map_name = RC_MAP_STREAMZAP;
diff --git a/drivers/media/rc/ttusbir.c b/drivers/media/rc/ttusbir.c
index d8de2056a4f6..c5be38e2a2fe 100644
--- a/drivers/media/rc/ttusbir.c
+++ b/drivers/media/rc/ttusbir.c
@@ -318,7 +318,7 @@ static int ttusbir_probe(struct usb_interface *intf,
usb_to_input_id(tt->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(rc, RC_BIT_ALL);
rc->priv = tt;
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_TT_1500;
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 904baf4eec28..a8b981f5ce2e 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -1082,7 +1082,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->dev.parent = &device->dev;
data->dev->timeout = MS_TO_NS(100);
data->dev->rx_resolution = US_TO_NS(2);
- data->dev->allowed_protos = RC_BIT_ALL;
+ rc_set_allowed_protocols(data->dev, RC_BIT_ALL);
err = rc_register_device(data->dev);
if (err)
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index ba2e365296cf..a1284889cd15 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -204,6 +204,7 @@ config MEDIA_TUNER_TDA18212
config MEDIA_TUNER_E4000
tristate "Elonics E4000 silicon tuner"
depends on MEDIA_SUPPORT && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Elonics E4000 silicon tuner driver.
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 40c1da707d15..90d93348f20c 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -21,220 +21,113 @@
#include "e4000_priv.h"
#include <linux/math64.h>
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
-
-/* write multiple registers */
-static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
-{
- int ret;
- u8 buf[MAX_XFER_SIZE];
- struct i2c_msg msg[1] = {
- {
- .addr = priv->cfg->i2c_addr,
- .flags = 0,
- .len = 1 + len,
- .buf = buf,
- }
- };
-
- if (1 + len > sizeof(buf)) {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
- buf[0] = reg;
- memcpy(&buf[1], val, len);
-
- ret = i2c_transfer(priv->i2c, msg, 1);
- if (ret == 1) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr failed=%d reg=%02x len=%d\n",
- KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
- return ret;
-}
-
-/* read multiple registers */
-static int e4000_rd_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
-{
- int ret;
- u8 buf[MAX_XFER_SIZE];
- struct i2c_msg msg[2] = {
- {
- .addr = priv->cfg->i2c_addr,
- .flags = 0,
- .len = 1,
- .buf = &reg,
- }, {
- .addr = priv->cfg->i2c_addr,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (len > sizeof(buf)) {
- dev_warn(&priv->i2c->dev,
- "%s: i2c rd reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
- ret = i2c_transfer(priv->i2c, msg, 2);
- if (ret == 2) {
- memcpy(val, buf, len);
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev,
- "%s: i2c rd failed=%d reg=%02x len=%d\n",
- KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
-
- return ret;
-}
-
-/* write single register */
-static int e4000_wr_reg(struct e4000_priv *priv, u8 reg, u8 val)
-{
- return e4000_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register */
-static int e4000_rd_reg(struct e4000_priv *priv, u8 reg, u8 *val)
-{
- return e4000_rd_regs(priv, reg, val, 1);
-}
-
static int e4000_init(struct dvb_frontend *fe)
{
- struct e4000_priv *priv = fe->tuner_priv;
+ struct e4000 *s = fe->tuner_priv;
int ret;
- dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
+ dev_dbg(&s->client->dev, "%s:\n", __func__);
/* dummy I2C to ensure I2C wakes up */
- ret = e4000_wr_reg(priv, 0x02, 0x40);
+ ret = regmap_write(s->regmap, 0x02, 0x40);
/* reset */
- ret = e4000_wr_reg(priv, 0x00, 0x01);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x00, 0x01);
+ if (ret)
goto err;
/* disable output clock */
- ret = e4000_wr_reg(priv, 0x06, 0x00);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x06, 0x00);
+ if (ret)
goto err;
- ret = e4000_wr_reg(priv, 0x7a, 0x96);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x7a, 0x96);
+ if (ret)
goto err;
/* configure gains */
- ret = e4000_wr_regs(priv, 0x7e, "\x01\xfe", 2);
- if (ret < 0)
+ ret = regmap_bulk_write(s->regmap, 0x7e, "\x01\xfe", 2);
+ if (ret)
goto err;
- ret = e4000_wr_reg(priv, 0x82, 0x00);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x82, 0x00);
+ if (ret)
goto err;
- ret = e4000_wr_reg(priv, 0x24, 0x05);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x24, 0x05);
+ if (ret)
goto err;
- ret = e4000_wr_regs(priv, 0x87, "\x20\x01", 2);
- if (ret < 0)
+ ret = regmap_bulk_write(s->regmap, 0x87, "\x20\x01", 2);
+ if (ret)
goto err;
- ret = e4000_wr_regs(priv, 0x9f, "\x7f\x07", 2);
- if (ret < 0)
+ ret = regmap_bulk_write(s->regmap, 0x9f, "\x7f\x07", 2);
+ if (ret)
goto err;
/* DC offset control */
- ret = e4000_wr_reg(priv, 0x2d, 0x1f);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x2d, 0x1f);
+ if (ret)
goto err;
- ret = e4000_wr_regs(priv, 0x70, "\x01\x01", 2);
- if (ret < 0)
+ ret = regmap_bulk_write(s->regmap, 0x70, "\x01\x01", 2);
+ if (ret)
goto err;
/* gain control */
- ret = e4000_wr_reg(priv, 0x1a, 0x17);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x1a, 0x17);
+ if (ret)
goto err;
- ret = e4000_wr_reg(priv, 0x1f, 0x1a);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x1f, 0x1a);
+ if (ret)
goto err;
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
-
- return 0;
+ s->active = true;
err:
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
+ if (ret)
+ dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int e4000_sleep(struct dvb_frontend *fe)
{
- struct e4000_priv *priv = fe->tuner_priv;
+ struct e4000 *s = fe->tuner_priv;
int ret;
- dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&s->client->dev, "%s:\n", __func__);
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
+ s->active = false;
- ret = e4000_wr_reg(priv, 0x00, 0x00);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x00, 0x00);
+ if (ret)
goto err;
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
-
- return 0;
err:
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
+ if (ret)
+ dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int e4000_set_params(struct dvb_frontend *fe)
{
- struct e4000_priv *priv = fe->tuner_priv;
+ struct e4000 *s = fe->tuner_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i, sigma_delta;
- unsigned int f_vco;
+ unsigned int pll_n, pll_f;
+ u64 f_vco;
u8 buf[5], i_data[4], q_data[4];
- dev_dbg(&priv->i2c->dev,
- "%s: delivery_system=%d frequency=%d bandwidth_hz=%d\n",
+ dev_dbg(&s->client->dev,
+ "%s: delivery_system=%d frequency=%u bandwidth_hz=%u\n",
__func__, c->delivery_system, c->frequency,
c->bandwidth_hz);
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
-
/* gain control manual */
- ret = e4000_wr_reg(priv, 0x1a, 0x00);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x1a, 0x00);
+ if (ret)
goto err;
/* PLL */
@@ -248,23 +141,21 @@ static int e4000_set_params(struct dvb_frontend *fe)
goto err;
}
- /*
- * Note: Currently f_vco overflows when c->frequency is 1 073 741 824 Hz
- * or more.
- */
- f_vco = c->frequency * e4000_pll_lut[i].mul;
- sigma_delta = div_u64(0x10000ULL * (f_vco % priv->cfg->clock), priv->cfg->clock);
- buf[0] = f_vco / priv->cfg->clock;
+ f_vco = 1ull * c->frequency * e4000_pll_lut[i].mul;
+ pll_n = div_u64_rem(f_vco, s->clock, &pll_f);
+ sigma_delta = div_u64(0x10000ULL * pll_f, s->clock);
+ buf[0] = pll_n;
buf[1] = (sigma_delta >> 0) & 0xff;
buf[2] = (sigma_delta >> 8) & 0xff;
buf[3] = 0x00;
buf[4] = e4000_pll_lut[i].div;
- dev_dbg(&priv->i2c->dev, "%s: f_vco=%u pll div=%d sigma_delta=%04x\n",
+ dev_dbg(&s->client->dev,
+ "%s: f_vco=%llu pll div=%d sigma_delta=%04x\n",
__func__, f_vco, buf[0], sigma_delta);
- ret = e4000_wr_regs(priv, 0x09, buf, 5);
- if (ret < 0)
+ ret = regmap_bulk_write(s->regmap, 0x09, buf, 5);
+ if (ret)
goto err;
/* LNA filter (RF filter) */
@@ -278,8 +169,8 @@ static int e4000_set_params(struct dvb_frontend *fe)
goto err;
}
- ret = e4000_wr_reg(priv, 0x10, e400_lna_filter_lut[i].val);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x10, e400_lna_filter_lut[i].val);
+ if (ret)
goto err;
/* IF filters */
@@ -296,8 +187,8 @@ static int e4000_set_params(struct dvb_frontend *fe)
buf[0] = e4000_if_filter_lut[i].reg11_val;
buf[1] = e4000_if_filter_lut[i].reg12_val;
- ret = e4000_wr_regs(priv, 0x11, buf, 2);
- if (ret < 0)
+ ret = regmap_bulk_write(s->regmap, 0x11, buf, 2);
+ if (ret)
goto err;
/* frequency band */
@@ -311,34 +202,34 @@ static int e4000_set_params(struct dvb_frontend *fe)
goto err;
}
- ret = e4000_wr_reg(priv, 0x07, e4000_band_lut[i].reg07_val);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x07, e4000_band_lut[i].reg07_val);
+ if (ret)
goto err;
- ret = e4000_wr_reg(priv, 0x78, e4000_band_lut[i].reg78_val);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x78, e4000_band_lut[i].reg78_val);
+ if (ret)
goto err;
/* DC offset */
for (i = 0; i < 4; i++) {
if (i == 0)
- ret = e4000_wr_regs(priv, 0x15, "\x00\x7e\x24", 3);
+ ret = regmap_bulk_write(s->regmap, 0x15, "\x00\x7e\x24", 3);
else if (i == 1)
- ret = e4000_wr_regs(priv, 0x15, "\x00\x7f", 2);
+ ret = regmap_bulk_write(s->regmap, 0x15, "\x00\x7f", 2);
else if (i == 2)
- ret = e4000_wr_regs(priv, 0x15, "\x01", 1);
+ ret = regmap_bulk_write(s->regmap, 0x15, "\x01", 1);
else
- ret = e4000_wr_regs(priv, 0x16, "\x7e", 1);
+ ret = regmap_bulk_write(s->regmap, 0x16, "\x7e", 1);
- if (ret < 0)
+ if (ret)
goto err;
- ret = e4000_wr_reg(priv, 0x29, 0x01);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x29, 0x01);
+ if (ret)
goto err;
- ret = e4000_rd_regs(priv, 0x2a, buf, 3);
- if (ret < 0)
+ ret = regmap_bulk_read(s->regmap, 0x2a, buf, 3);
+ if (ret)
goto err;
i_data[i] = (((buf[2] >> 0) & 0x3) << 6) | (buf[0] & 0x3f);
@@ -348,53 +239,226 @@ static int e4000_set_params(struct dvb_frontend *fe)
swap(q_data[2], q_data[3]);
swap(i_data[2], i_data[3]);
- ret = e4000_wr_regs(priv, 0x50, q_data, 4);
- if (ret < 0)
+ ret = regmap_bulk_write(s->regmap, 0x50, q_data, 4);
+ if (ret)
goto err;
- ret = e4000_wr_regs(priv, 0x60, i_data, 4);
- if (ret < 0)
+ ret = regmap_bulk_write(s->regmap, 0x60, i_data, 4);
+ if (ret)
goto err;
/* gain control auto */
- ret = e4000_wr_reg(priv, 0x1a, 0x17);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x1a, 0x17);
+ if (ret)
goto err;
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
-
- return 0;
err:
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
+ if (ret)
+ dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int e4000_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
{
- struct e4000_priv *priv = fe->tuner_priv;
+ struct e4000 *s = fe->tuner_priv;
- dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&s->client->dev, "%s:\n", __func__);
*frequency = 0; /* Zero-IF */
return 0;
}
-static int e4000_release(struct dvb_frontend *fe)
+#if IS_ENABLED(CONFIG_VIDEO_V4L2)
+static int e4000_set_lna_gain(struct dvb_frontend *fe)
{
- struct e4000_priv *priv = fe->tuner_priv;
+ struct e4000 *s = fe->tuner_priv;
+ int ret;
+ u8 u8tmp;
+
+ dev_dbg(&s->client->dev, "%s: lna auto=%d->%d val=%d->%d\n",
+ __func__, s->lna_gain_auto->cur.val,
+ s->lna_gain_auto->val, s->lna_gain->cur.val,
+ s->lna_gain->val);
+
+ if (s->lna_gain_auto->val && s->if_gain_auto->cur.val)
+ u8tmp = 0x17;
+ else if (s->lna_gain_auto->val)
+ u8tmp = 0x19;
+ else if (s->if_gain_auto->cur.val)
+ u8tmp = 0x16;
+ else
+ u8tmp = 0x10;
+
+ ret = regmap_write(s->regmap, 0x1a, u8tmp);
+ if (ret)
+ goto err;
+
+ if (s->lna_gain_auto->val == false) {
+ ret = regmap_write(s->regmap, 0x14, s->lna_gain->val);
+ if (ret)
+ goto err;
+ }
+err:
+ if (ret)
+ dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
- dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+ return ret;
+}
- kfree(fe->tuner_priv);
+static int e4000_set_mixer_gain(struct dvb_frontend *fe)
+{
+ struct e4000 *s = fe->tuner_priv;
+ int ret;
+ u8 u8tmp;
- return 0;
+ dev_dbg(&s->client->dev, "%s: mixer auto=%d->%d val=%d->%d\n",
+ __func__, s->mixer_gain_auto->cur.val,
+ s->mixer_gain_auto->val, s->mixer_gain->cur.val,
+ s->mixer_gain->val);
+
+ if (s->mixer_gain_auto->val)
+ u8tmp = 0x15;
+ else
+ u8tmp = 0x14;
+
+ ret = regmap_write(s->regmap, 0x20, u8tmp);
+ if (ret)
+ goto err;
+
+ if (s->mixer_gain_auto->val == false) {
+ ret = regmap_write(s->regmap, 0x15, s->mixer_gain->val);
+ if (ret)
+ goto err;
+ }
+err:
+ if (ret)
+ dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+
+ return ret;
}
+static int e4000_set_if_gain(struct dvb_frontend *fe)
+{
+ struct e4000 *s = fe->tuner_priv;
+ int ret;
+ u8 buf[2];
+ u8 u8tmp;
+
+ dev_dbg(&s->client->dev, "%s: if auto=%d->%d val=%d->%d\n",
+ __func__, s->if_gain_auto->cur.val,
+ s->if_gain_auto->val, s->if_gain->cur.val,
+ s->if_gain->val);
+
+ if (s->if_gain_auto->val && s->lna_gain_auto->cur.val)
+ u8tmp = 0x17;
+ else if (s->lna_gain_auto->cur.val)
+ u8tmp = 0x19;
+ else if (s->if_gain_auto->val)
+ u8tmp = 0x16;
+ else
+ u8tmp = 0x10;
+
+ ret = regmap_write(s->regmap, 0x1a, u8tmp);
+ if (ret)
+ goto err;
+
+ if (s->if_gain_auto->val == false) {
+ buf[0] = e4000_if_gain_lut[s->if_gain->val].reg16_val;
+ buf[1] = e4000_if_gain_lut[s->if_gain->val].reg17_val;
+ ret = regmap_bulk_write(s->regmap, 0x16, buf, 2);
+ if (ret)
+ goto err;
+ }
+err:
+ if (ret)
+ dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int e4000_pll_lock(struct dvb_frontend *fe)
+{
+ struct e4000 *s = fe->tuner_priv;
+ int ret;
+ unsigned int utmp;
+
+ ret = regmap_read(s->regmap, 0x07, &utmp);
+ if (ret)
+ goto err;
+
+ s->pll_lock->val = (utmp & 0x01);
+err:
+ if (ret)
+ dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int e4000_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct e4000 *s = container_of(ctrl->handler, struct e4000, hdl);
+ int ret;
+
+ if (s->active == false)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
+ ret = e4000_pll_lock(s->fe);
+ break;
+ default:
+ dev_dbg(&s->client->dev, "%s: unknown ctrl: id=%d name=%s\n",
+ __func__, ctrl->id, ctrl->name);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int e4000_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct e4000 *s = container_of(ctrl->handler, struct e4000, hdl);
+ struct dvb_frontend *fe = s->fe;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+
+ if (s->active == false)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
+ case V4L2_CID_RF_TUNER_BANDWIDTH:
+ c->bandwidth_hz = s->bandwidth->val;
+ ret = e4000_set_params(s->fe);
+ break;
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_LNA_GAIN:
+ ret = e4000_set_lna_gain(s->fe);
+ break;
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN:
+ ret = e4000_set_mixer_gain(s->fe);
+ break;
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_IF_GAIN:
+ ret = e4000_set_if_gain(s->fe);
+ break;
+ default:
+ dev_dbg(&s->client->dev, "%s: unknown ctrl: id=%d name=%s\n",
+ __func__, ctrl->id, ctrl->name);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops e4000_ctrl_ops = {
+ .g_volatile_ctrl = e4000_g_volatile_ctrl,
+ .s_ctrl = e4000_s_ctrl,
+};
+#endif
+
static const struct dvb_tuner_ops e4000_tuner_ops = {
.info = {
.name = "Elonics E4000",
@@ -402,8 +466,6 @@ static const struct dvb_tuner_ops e4000_tuner_ops = {
.frequency_max = 862000000,
},
- .release = e4000_release,
-
.init = e4000_init,
.sleep = e4000_sleep,
.set_params = e4000_set_params,
@@ -411,62 +473,148 @@ static const struct dvb_tuner_ops e4000_tuner_ops = {
.get_if_frequency = e4000_get_if_frequency,
};
-struct dvb_frontend *e4000_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, const struct e4000_config *cfg)
+/*
+ * Use V4L2 subdev to carry V4L2 control handler, even we don't implement
+ * subdev itself, just to avoid reinventing the wheel.
+ */
+static int e4000_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
- struct e4000_priv *priv;
+ struct e4000_config *cfg = client->dev.platform_data;
+ struct dvb_frontend *fe = cfg->fe;
+ struct e4000 *s;
int ret;
- u8 chip_id;
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
+ unsigned int utmp;
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ };
- priv = kzalloc(sizeof(struct e4000_priv), GFP_KERNEL);
- if (!priv) {
+ s = kzalloc(sizeof(struct e4000), GFP_KERNEL);
+ if (!s) {
ret = -ENOMEM;
- dev_err(&i2c->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+ dev_err(&client->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
goto err;
}
- priv->cfg = cfg;
- priv->i2c = i2c;
+ s->clock = cfg->clock;
+ s->client = client;
+ s->fe = cfg->fe;
+ s->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(s->regmap)) {
+ ret = PTR_ERR(s->regmap);
+ goto err;
+ }
/* check if the tuner is there */
- ret = e4000_rd_reg(priv, 0x02, &chip_id);
- if (ret < 0)
+ ret = regmap_read(s->regmap, 0x02, &utmp);
+ if (ret)
goto err;
- dev_dbg(&priv->i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id);
+ dev_dbg(&s->client->dev, "%s: chip id=%02x\n", __func__, utmp);
- if (chip_id != 0x40)
+ if (utmp != 0x40) {
+ ret = -ENODEV;
goto err;
+ }
/* put sleep as chip seems to be in normal mode by default */
- ret = e4000_wr_reg(priv, 0x00, 0x00);
- if (ret < 0)
+ ret = regmap_write(s->regmap, 0x00, 0x00);
+ if (ret)
goto err;
- dev_info(&priv->i2c->dev,
+#if IS_ENABLED(CONFIG_VIDEO_V4L2)
+ /* Register controls */
+ v4l2_ctrl_handler_init(&s->hdl, 9);
+ s->bandwidth_auto = v4l2_ctrl_new_std(&s->hdl, &e4000_ctrl_ops,
+ V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 1, 1);
+ s->bandwidth = v4l2_ctrl_new_std(&s->hdl, &e4000_ctrl_ops,
+ V4L2_CID_RF_TUNER_BANDWIDTH, 4300000, 11000000, 100000, 4300000);
+ v4l2_ctrl_auto_cluster(2, &s->bandwidth_auto, 0, false);
+ s->lna_gain_auto = v4l2_ctrl_new_std(&s->hdl, &e4000_ctrl_ops,
+ V4L2_CID_RF_TUNER_LNA_GAIN_AUTO, 0, 1, 1, 1);
+ s->lna_gain = v4l2_ctrl_new_std(&s->hdl, &e4000_ctrl_ops,
+ V4L2_CID_RF_TUNER_LNA_GAIN, 0, 15, 1, 10);
+ v4l2_ctrl_auto_cluster(2, &s->lna_gain_auto, 0, false);
+ s->mixer_gain_auto = v4l2_ctrl_new_std(&s->hdl, &e4000_ctrl_ops,
+ V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO, 0, 1, 1, 1);
+ s->mixer_gain = v4l2_ctrl_new_std(&s->hdl, &e4000_ctrl_ops,
+ V4L2_CID_RF_TUNER_MIXER_GAIN, 0, 1, 1, 1);
+ v4l2_ctrl_auto_cluster(2, &s->mixer_gain_auto, 0, false);
+ s->if_gain_auto = v4l2_ctrl_new_std(&s->hdl, &e4000_ctrl_ops,
+ V4L2_CID_RF_TUNER_IF_GAIN_AUTO, 0, 1, 1, 1);
+ s->if_gain = v4l2_ctrl_new_std(&s->hdl, &e4000_ctrl_ops,
+ V4L2_CID_RF_TUNER_IF_GAIN, 0, 54, 1, 0);
+ v4l2_ctrl_auto_cluster(2, &s->if_gain_auto, 0, false);
+ s->pll_lock = v4l2_ctrl_new_std(&s->hdl, &e4000_ctrl_ops,
+ V4L2_CID_RF_TUNER_PLL_LOCK, 0, 1, 1, 0);
+ if (s->hdl.error) {
+ ret = s->hdl.error;
+ dev_err(&s->client->dev, "Could not initialize controls\n");
+ v4l2_ctrl_handler_free(&s->hdl);
+ goto err;
+ }
+
+ s->sd.ctrl_handler = &s->hdl;
+#endif
+
+ dev_info(&s->client->dev,
"%s: Elonics E4000 successfully identified\n",
KBUILD_MODNAME);
- fe->tuner_priv = priv;
+ fe->tuner_priv = s;
memcpy(&fe->ops.tuner_ops, &e4000_tuner_ops,
sizeof(struct dvb_tuner_ops));
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
+ v4l2_set_subdevdata(&s->sd, client);
+ i2c_set_clientdata(client, &s->sd);
- return fe;
+ return 0;
err:
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
+ if (ret) {
+ dev_dbg(&client->dev, "%s: failed=%d\n", __func__, ret);
+ kfree(s);
+ }
+
+ return ret;
+}
+
+static int e4000_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct e4000 *s = container_of(sd, struct e4000, sd);
+ struct dvb_frontend *fe = s->fe;
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
- dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
- kfree(priv);
- return NULL;
+#if IS_ENABLED(CONFIG_VIDEO_V4L2)
+ v4l2_ctrl_handler_free(&s->hdl);
+#endif
+ memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = NULL;
+ kfree(s);
+
+ return 0;
}
-EXPORT_SYMBOL(e4000_attach);
+
+static const struct i2c_device_id e4000_id[] = {
+ {"e4000", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, e4000_id);
+
+static struct i2c_driver e4000_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "e4000",
+ },
+ .probe = e4000_probe,
+ .remove = e4000_remove,
+ .id_table = e4000_id,
+};
+
+module_i2c_driver(e4000_driver);
MODULE_DESCRIPTION("Elonics E4000 silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/tuners/e4000.h b/drivers/media/tuners/e4000.h
index 25ee7c07abff..e74b8b2f2fc3 100644
--- a/drivers/media/tuners/e4000.h
+++ b/drivers/media/tuners/e4000.h
@@ -24,12 +24,15 @@
#include <linux/kconfig.h>
#include "dvb_frontend.h"
+/*
+ * I2C address
+ * 0x64, 0x65, 0x66, 0x67
+ */
struct e4000_config {
/*
- * I2C address
- * 0x64, 0x65, 0x66, 0x67
+ * frontend
*/
- u8 i2c_addr;
+ struct dvb_frontend *fe;
/*
* clock
@@ -37,16 +40,4 @@ struct e4000_config {
u32 clock;
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_E4000)
-extern struct dvb_frontend *e4000_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, const struct e4000_config *cfg);
-#else
-static inline struct dvb_frontend *e4000_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, const struct e4000_config *cfg)
-{
- dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-#endif
-
#endif
diff --git a/drivers/media/tuners/e4000_priv.h b/drivers/media/tuners/e4000_priv.h
index a3855053e78f..cb0070483e65 100644
--- a/drivers/media/tuners/e4000_priv.h
+++ b/drivers/media/tuners/e4000_priv.h
@@ -22,10 +22,29 @@
#define E4000_PRIV_H
#include "e4000.h"
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+#include <linux/regmap.h>
-struct e4000_priv {
- const struct e4000_config *cfg;
- struct i2c_adapter *i2c;
+struct e4000 {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ u32 clock;
+ struct dvb_frontend *fe;
+ struct v4l2_subdev sd;
+ bool active;
+
+ /* Controls */
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *bandwidth_auto;
+ struct v4l2_ctrl *bandwidth;
+ struct v4l2_ctrl *lna_gain_auto;
+ struct v4l2_ctrl *lna_gain;
+ struct v4l2_ctrl *mixer_gain_auto;
+ struct v4l2_ctrl *mixer_gain;
+ struct v4l2_ctrl *if_gain_auto;
+ struct v4l2_ctrl *if_gain;
+ struct v4l2_ctrl *pll_lock;
};
struct e4000_pll {
@@ -144,4 +163,67 @@ static const struct e4000_if_filter e4000_if_filter_lut[] = {
{ 0xffffffff, 0x00, 0x20 },
};
+struct e4000_if_gain {
+ u8 reg16_val;
+ u8 reg17_val;
+};
+
+static const struct e4000_if_gain e4000_if_gain_lut[] = {
+ {0x00, 0x00},
+ {0x20, 0x00},
+ {0x40, 0x00},
+ {0x02, 0x00},
+ {0x22, 0x00},
+ {0x42, 0x00},
+ {0x04, 0x00},
+ {0x24, 0x00},
+ {0x44, 0x00},
+ {0x01, 0x00},
+ {0x21, 0x00},
+ {0x41, 0x00},
+ {0x03, 0x00},
+ {0x23, 0x00},
+ {0x43, 0x00},
+ {0x05, 0x00},
+ {0x25, 0x00},
+ {0x45, 0x00},
+ {0x07, 0x00},
+ {0x27, 0x00},
+ {0x47, 0x00},
+ {0x0f, 0x00},
+ {0x2f, 0x00},
+ {0x4f, 0x00},
+ {0x17, 0x00},
+ {0x37, 0x00},
+ {0x57, 0x00},
+ {0x1f, 0x00},
+ {0x3f, 0x00},
+ {0x5f, 0x00},
+ {0x1f, 0x01},
+ {0x3f, 0x01},
+ {0x5f, 0x01},
+ {0x1f, 0x02},
+ {0x3f, 0x02},
+ {0x5f, 0x02},
+ {0x1f, 0x03},
+ {0x3f, 0x03},
+ {0x5f, 0x03},
+ {0x1f, 0x04},
+ {0x3f, 0x04},
+ {0x5f, 0x04},
+ {0x1f, 0x0c},
+ {0x3f, 0x0c},
+ {0x5f, 0x0c},
+ {0x1f, 0x14},
+ {0x3f, 0x14},
+ {0x5f, 0x14},
+ {0x1f, 0x1c},
+ {0x3f, 0x1c},
+ {0x5f, 0x1c},
+ {0x1f, 0x24},
+ {0x3f, 0x24},
+ {0x5f, 0x24},
+ {0x7f, 0x24},
+};
+
#endif
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 20cca405bf45..f640dcf4a81d 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -1,7 +1,7 @@
/*
* Driver for mt2063 Micronas tuner
*
- * Copyright (c) 2011 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2011 Mauro Carvalho Chehab
*
* This driver came from a driver originally written by:
* Henry Wang <Henry.wang@AzureWave.com>
@@ -2298,6 +2298,6 @@ static int tuner_MT2063_ClearPowerMaskBits(struct dvb_frontend *fe)
}
#endif
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_DESCRIPTION("MT2063 Silicon tuner");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index d9ee43fae62d..319adc4f0561 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -1,7 +1,7 @@
/*
* Rafael Micro R820T driver
*
- * Copyright (C) 2013 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2013 Mauro Carvalho Chehab
*
* This driver was written from scratch, based on an existing driver
* that it is part of rtl-sdr git tree, released under GPLv2:
@@ -2351,5 +2351,5 @@ err_no_gate:
EXPORT_SYMBOL_GPL(r820t_attach);
MODULE_DESCRIPTION("Rafael Micro r820t silicon tuner driver");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index abe256e1f843..05a4ac9edb6b 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -150,6 +150,8 @@ static int tda18212_set_params(struct dvb_frontend *fe)
#define DVBT2_8 5
#define DVBC_6 6
#define DVBC_8 7
+ #define ATSC_VSB 8
+ #define ATSC_QAM 9
static const u8 bw_params[][3] = {
/* reg: 0f 13 23 */
[DVBT_6] = { 0xb3, 0x20, 0x03 },
@@ -160,6 +162,8 @@ static int tda18212_set_params(struct dvb_frontend *fe)
[DVBT2_8] = { 0xbc, 0x22, 0x01 },
[DVBC_6] = { 0x92, 0x50, 0x03 },
[DVBC_8] = { 0x92, 0x53, 0x03 },
+ [ATSC_VSB] = { 0x7d, 0x20, 0x63 },
+ [ATSC_QAM] = { 0x7d, 0x20, 0x63 },
};
dev_dbg(&priv->i2c->dev,
@@ -171,6 +175,14 @@ static int tda18212_set_params(struct dvb_frontend *fe)
fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
switch (c->delivery_system) {
+ case SYS_ATSC:
+ if_khz = priv->cfg->if_atsc_vsb;
+ i = ATSC_VSB;
+ break;
+ case SYS_DVBC_ANNEX_B:
+ if_khz = priv->cfg->if_atsc_qam;
+ i = ATSC_QAM;
+ break;
case SYS_DVBT:
switch (c->bandwidth_hz) {
case 6000000:
diff --git a/drivers/media/tuners/tda18212.h b/drivers/media/tuners/tda18212.h
index 7e0d503baf05..c36b49e4b274 100644
--- a/drivers/media/tuners/tda18212.h
+++ b/drivers/media/tuners/tda18212.h
@@ -35,6 +35,8 @@ struct tda18212_config {
u16 if_dvbt2_7;
u16 if_dvbt2_8;
u16 if_dvbc;
+ u16 if_atsc_vsb;
+ u16 if_atsc_qam;
};
#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA18212)
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index cca508d4aafb..76a816511f2f 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -1107,6 +1107,9 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
offset += 200000;
}
#endif
+ default:
+ tuner_err("Unsupported tuner type %d.\n", new_type);
+ break;
}
div = (freq - offset + DIV / 2) / DIV;
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index dd32decb237d..7fdadf9bc90b 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -108,7 +108,7 @@ struct au0828_board au0828_boards[] = {
.name = "DViCO FusionHDTV USB",
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
- .i2c_clk_divider = AU0828_I2C_CLK_250KHZ,
+ .i2c_clk_divider = AU0828_I2C_CLK_20KHZ,
},
[AU0828_BOARD_HAUPPAUGE_WOODBURY] = {
.name = "Hauppauge Woodbury",
@@ -270,18 +270,25 @@ void au0828_gpio_setup(struct au0828_dev *dev)
* 9 - XC5000 Tuner
*/
- /* Into reset */
+ /* Set relevant GPIOs as outputs (leave the EEPROM W/P
+ as an input since we will never touch it and it has
+ a pullup) */
au0828_write(dev, REG_003, 0x02);
au0828_write(dev, REG_002, 0x80 | 0x20 | 0x10);
+
+ /* Into reset */
au0828_write(dev, REG_001, 0x0);
au0828_write(dev, REG_000, 0x0);
- msleep(100);
+ msleep(50);
- /* Out of reset (leave the cs5340 in reset until needed) */
- au0828_write(dev, REG_003, 0x02);
- au0828_write(dev, REG_001, 0x02);
- au0828_write(dev, REG_002, 0x80 | 0x20 | 0x10);
- au0828_write(dev, REG_000, 0x80 | 0x40 | 0x20);
+ /* Bring power supply out of reset */
+ au0828_write(dev, REG_000, 0x80);
+ msleep(50);
+
+ /* Bring xc5000 and au8522 out of reset (leave the
+ cs5340 in reset until needed) */
+ au0828_write(dev, REG_001, 0x02); /* xc5000 */
+ au0828_write(dev, REG_000, 0x80 | 0x20); /* PS + au8522 */
msleep(250);
break;
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 81a1d971d797..9b925874d392 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -665,8 +665,8 @@ static int cx231xx_audio_init(struct cx231xx *dev)
cx231xx_info("cx231xx-audio.c: probing for cx231xx "
"non standard usbaudio\n");
- err = snd_card_create(index[devnr], "Cx231xx Audio", THIS_MODULE,
- 0, &card);
+ err = snd_card_new(&dev->udev->dev, index[devnr], "Cx231xx Audio",
+ THIS_MODULE, 0, &card);
if (err < 0)
return err;
@@ -682,7 +682,6 @@ static int cx231xx_audio_init(struct cx231xx *dev)
pcm->info_flags = 0;
pcm->private_data = dev;
strcpy(pcm->name, "Conexant cx231xx Capture");
- snd_card_set_dev(card, &dev->udev->dev);
strcpy(card->driver, "Cx231xx-Audio");
strcpy(card->shortname, "Cx231xx Audio");
strcpy(card->longname, "Conexant cx231xx Audio");
diff --git a/drivers/media/usb/cx231xx/cx231xx-input.c b/drivers/media/usb/cx231xx/cx231xx-input.c
index 0f7b42446826..46d52fac8680 100644
--- a/drivers/media/usb/cx231xx/cx231xx-input.c
+++ b/drivers/media/usb/cx231xx/cx231xx-input.c
@@ -1,7 +1,7 @@
/*
* cx231xx IR glue driver
*
- * Copyright (C) 2010 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2010 Mauro Carvalho Chehab
*
* Polaris (cx231xx) has its support for IR's with a design close to MCE.
* however, a few designs are using an external I2C chip for IR, instead
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 2059d0c86ad3..037e519bbaa2 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -100,13 +100,6 @@ config DVB_USB_GL861
Say Y here to support the MSI Megasky 580 (55801) DVB-T USB2.0
receiver with USB ID 0db0:5581.
-config DVB_USB_IT913X
- tristate "ITE IT913X DVB-T USB2.0 support"
- depends on DVB_USB_V2
- select DVB_IT913X_FE
- help
- Say Y here to support the ITE IT913X DVB-T USB2.0
-
config DVB_USB_LME2510
tristate "LME DM04/QQBOX DVB-S USB2.0 support"
depends on DVB_USB_V2
@@ -133,7 +126,7 @@ config DVB_USB_MXL111SF
config DVB_USB_RTL28XXU
tristate "Realtek RTL28xxU DVB USB support"
- depends on DVB_USB_V2
+ depends on DVB_USB_V2 && I2C_MUX
select DVB_RTL2830
select DVB_RTL2832
select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/usb/dvb-usb-v2/Makefile b/drivers/media/usb/dvb-usb-v2/Makefile
index 2c06714b9ef0..7407b8338ccf 100644
--- a/drivers/media/usb/dvb-usb-v2/Makefile
+++ b/drivers/media/usb/dvb-usb-v2/Makefile
@@ -22,9 +22,6 @@ obj-$(CONFIG_DVB_USB_CE6230) += dvb-usb-ce6230.o
dvb-usb-ec168-objs := ec168.o
obj-$(CONFIG_DVB_USB_EC168) += dvb-usb-ec168.o
-dvb-usb-it913x-objs := it913x.o
-obj-$(CONFIG_DVB_USB_IT913X) += dvb-usb-it913x.o
-
dvb-usb-lmedm04-objs := lmedm04.o
obj-$(CONFIG_DVB_USB_LME2510) += dvb-usb-lmedm04.o
@@ -44,3 +41,4 @@ ccflags-y += -I$(srctree)/drivers/media/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
ccflags-y += -I$(srctree)/drivers/media/tuners
ccflags-y += -I$(srctree)/drivers/media/common
+ccflags-y += -I$(srctree)/drivers/staging/media/rtl2832u_sdr
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 8ede8ea762e6..021e4d35e4d7 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -575,6 +575,10 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
if (ret < 0)
goto err;
+ /* use default I2C address if eeprom has no address set */
+ if (!tmp)
+ tmp = 0x3a;
+
if (state->chip_type == 0x9135) {
ret = af9035_wr_reg(d, 0x004bfb, tmp);
if (ret < 0)
@@ -637,6 +641,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
/* demod I2C "address" */
state->af9033_config[0].i2c_addr = 0x38;
+ state->af9033_config[1].i2c_addr = 0x3a;
state->af9033_config[0].adc_multiplier = AF9033_ADC_MULTIPLIER_2X;
state->af9033_config[1].adc_multiplier = AF9033_ADC_MULTIPLIER_2X;
state->af9033_config[0].ts_mode = AF9033_TS_MODE_USB;
@@ -684,7 +689,9 @@ static int af9035_read_config(struct dvb_usb_device *d)
if (ret < 0)
goto err;
- state->af9033_config[1].i2c_addr = tmp;
+ if (tmp)
+ state->af9033_config[1].i2c_addr = tmp;
+
dev_dbg(&d->udev->dev, "%s: 2nd demod I2C addr=%02x\n",
__func__, tmp);
}
@@ -938,12 +945,7 @@ static int af9035_frontend_callback(void *adapter_priv, int component,
static int af9035_get_adapter_count(struct dvb_usb_device *d)
{
struct state *state = d_to_priv(d);
-
- /* disable 2nd adapter as we don't have PID filters implemented */
- if (d->udev->speed == USB_SPEED_FULL)
- return 1;
- else
- return state->dual_mode + 1;
+ return state->dual_mode + 1;
}
static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
@@ -961,7 +963,7 @@ static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
/* attach demodulator */
adap->fe[0] = dvb_attach(af9033_attach, &state->af9033_config[adap->id],
- &d->i2c_adap);
+ &d->i2c_adap, &state->ops);
if (adap->fe[0] == NULL) {
ret = -ENODEV;
goto err;
@@ -1369,58 +1371,19 @@ static int af9035_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
return 0;
}
-/*
- * FIXME: PID filter is property of demodulator and should be moved to the
- * correct driver. Also we support only adapter #0 PID filter and will
- * disable adapter #1 if USB1.1 is used.
- */
static int af9035_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
- struct dvb_usb_device *d = adap_to_d(adap);
- int ret;
-
- dev_dbg(&d->udev->dev, "%s: onoff=%d\n", __func__, onoff);
-
- ret = af9035_wr_reg_mask(d, 0x80f993, onoff, 0x01);
- if (ret < 0)
- goto err;
-
- return 0;
-
-err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ struct state *state = adap_to_priv(adap);
- return ret;
+ return state->ops.pid_filter_ctrl(adap->fe[0], onoff);
}
static int af9035_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
int onoff)
{
- struct dvb_usb_device *d = adap_to_d(adap);
- int ret;
- u8 wbuf[2] = {(pid >> 0) & 0xff, (pid >> 8) & 0xff};
-
- dev_dbg(&d->udev->dev, "%s: index=%d pid=%04x onoff=%d\n",
- __func__, index, pid, onoff);
-
- ret = af9035_wr_regs(d, 0x80f996, wbuf, 2);
- if (ret < 0)
- goto err;
-
- ret = af9035_wr_reg(d, 0x80f994, onoff);
- if (ret < 0)
- goto err;
-
- ret = af9035_wr_reg(d, 0x80f995, index);
- if (ret < 0)
- goto err;
-
- return 0;
-
-err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ struct state *state = adap_to_priv(adap);
- return ret;
+ return state->ops.pid_filter(adap->fe[0], index, pid, onoff);
}
static int af9035_probe(struct usb_interface *intf,
@@ -1494,6 +1457,13 @@ static const struct dvb_usb_device_properties af9035_props = {
.stream = DVB_USB_STREAM_BULK(0x84, 6, 87 * 188),
}, {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+
+ .pid_filter_count = 32,
+ .pid_filter_ctrl = af9035_pid_filter_ctrl,
+ .pid_filter = af9035_pid_filter,
+
.stream = DVB_USB_STREAM_BULK(0x85, 6, 87 * 188),
},
},
@@ -1528,12 +1498,30 @@ static const struct usb_device_id af9035_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00aa,
&af9035_props, "TerraTec Cinergy T Stick (rev. 2)", NULL) },
/* IT9135 devices */
-#if 0
- { DVB_USB_DEVICE(0x048d, 0x9135,
- &af9035_props, "IT9135 reference design", NULL) },
- { DVB_USB_DEVICE(0x048d, 0x9006,
- &af9035_props, "IT9135 reference design", NULL) },
-#endif
+ { DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135,
+ &af9035_props, "ITE 9135 Generic", RC_MAP_IT913X_V1) },
+ { DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135_9005,
+ &af9035_props, "ITE 9135(9005) Generic", RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135_9006,
+ &af9035_props, "ITE 9135(9006) Generic", RC_MAP_IT913X_V1) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_1835,
+ &af9035_props, "Avermedia A835B(1835)", RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_2835,
+ &af9035_props, "Avermedia A835B(2835)", RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_3835,
+ &af9035_props, "Avermedia A835B(3835)", RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_4835,
+ &af9035_props, "Avermedia A835B(4835)", RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_H335,
+ &af9035_props, "Avermedia H335", RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB499_2T_T09,
+ &af9035_props, "Kworld UB499-2T T09", RC_MAP_IT913X_V1) },
+ { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22_IT9137,
+ &af9035_props, "Sveon STV22 Dual DVB-T HDTV",
+ RC_MAP_IT913X_V1) },
+ { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CTVDIGDUAL_V2,
+ &af9035_props, "Digital Dual TV Receiver CTVDIGDUAL_V2",
+ RC_MAP_IT913X_V1) },
/* XXX: that same ID [0ccd:0099] is used by af9015 driver too */
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099,
&af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.h b/drivers/media/usb/dvb-usb-v2/af9035.h
index a1c68d829b8c..c21902fdd4c4 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.h
+++ b/drivers/media/usb/dvb-usb-v2/af9035.h
@@ -62,6 +62,8 @@ struct state {
u8 dual_mode:1;
u16 eeprom_addr;
struct af9033_config af9033_config[2];
+
+ struct af9033_ops ops;
};
static const u32 clock_lut_af9035[] = {
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index c1051c347744..c3c4b98733bf 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -7,7 +7,7 @@
* http://linux.terratec.de/files/TERRATEC_H7/20110323_TERRATEC_H7_Linux.tar.gz
* The original driver's license is GPL, as declared with MODULE_LICENSE()
*
- * Copyright (c) 2010-2012 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010-2012 Mauro Carvalho Chehab
* Driver modified by in order to work with upstream drxk driver, and
* tons of bugs got fixed, and converted to use dvb-usb-v2.
*
@@ -975,7 +975,7 @@ static struct usb_driver az6007_usb_driver = {
module_usb_driver(az6007_usb_driver);
MODULE_AUTHOR("Henry Wang <Henry.wang@AzureWave.com>");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_DESCRIPTION("Driver for AzureWave 6007 DVB-C/T USB2.0 and clones");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 8a054d66e708..de02db802ace 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -164,7 +164,7 @@ static int dvb_usbv2_remote_init(struct dvb_usb_device *d)
dev->driver_name = (char *) d->props->driver_name;
dev->map_name = d->rc.map_name;
dev->driver_type = d->rc.driver_type;
- dev->allowed_protos = d->rc.allowed_protos;
+ rc_set_allowed_protocols(dev, d->rc.allowed_protos);
dev->change_protocol = d->rc.change_protocol;
dev->priv = d;
diff --git a/drivers/media/usb/dvb-usb-v2/it913x.c b/drivers/media/usb/dvb-usb-v2/it913x.c
deleted file mode 100644
index fe95a586dd5d..000000000000
--- a/drivers/media/usb/dvb-usb-v2/it913x.c
+++ /dev/null
@@ -1,828 +0,0 @@
-/*
- * DVB USB compliant linux driver for ITE IT9135 and IT9137
- *
- * Copyright (C) 2011 Malcolm Priestley (tvboxspy@gmail.com)
- * IT9135 (C) ITE Tech Inc.
- * IT9137 (C) ITE Tech Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License Version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * see Documentation/dvb/README.dvb-usb for more information
- * see Documentation/dvb/it9137.txt for firmware information
- *
- */
-#define DVB_USB_LOG_PREFIX "it913x"
-
-#include <linux/usb.h>
-#include <linux/usb/input.h>
-#include <media/rc-core.h>
-
-#include "dvb_usb.h"
-#include "it913x-fe.h"
-
-/* debug */
-static int dvb_usb_it913x_debug;
-#define it_debug(var, level, args...) \
- do { if ((var & level)) pr_debug(DVB_USB_LOG_PREFIX": " args); \
-} while (0)
-#define deb_info(level, args...) it_debug(dvb_usb_it913x_debug, level, args)
-#define info(args...) pr_info(DVB_USB_LOG_PREFIX": " args)
-
-module_param_named(debug, dvb_usb_it913x_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
-
-static int dvb_usb_it913x_firmware;
-module_param_named(firmware, dvb_usb_it913x_firmware, int, 0644);
-MODULE_PARM_DESC(firmware, "set firmware 0=auto "\
- "1=IT9137 2=IT9135 V1 3=IT9135 V2");
-#define FW_IT9137 "dvb-usb-it9137-01.fw"
-#define FW_IT9135_V1 "dvb-usb-it9135-01.fw"
-#define FW_IT9135_V2 "dvb-usb-it9135-02.fw"
-
-DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-
-struct it913x_state {
- struct ite_config it913x_config;
- u8 pid_filter_onoff;
- bool proprietary_ir;
- int cmd_counter;
-};
-
-static u16 check_sum(u8 *p, u8 len)
-{
- u16 sum = 0;
- u8 i = 1;
- while (i < len)
- sum += (i++ & 1) ? (*p++) << 8 : *p++;
- return ~sum;
-}
-
-static int it913x_io(struct dvb_usb_device *d, u8 mode, u8 pro,
- u8 cmd, u32 reg, u8 addr, u8 *data, u8 len)
-{
- struct it913x_state *st = d->priv;
- int ret = 0, i, buf_size = 1;
- u8 *buff;
- u8 rlen;
- u16 chk_sum;
-
- buff = kzalloc(256, GFP_KERNEL);
- if (!buff) {
- info("USB Buffer Failed");
- return -ENOMEM;
- }
-
- buff[buf_size++] = pro;
- buff[buf_size++] = cmd;
- buff[buf_size++] = st->cmd_counter;
-
- switch (mode) {
- case READ_LONG:
- case WRITE_LONG:
- buff[buf_size++] = len;
- buff[buf_size++] = 2;
- buff[buf_size++] = (reg >> 24);
- buff[buf_size++] = (reg >> 16) & 0xff;
- buff[buf_size++] = (reg >> 8) & 0xff;
- buff[buf_size++] = reg & 0xff;
- break;
- case READ_SHORT:
- buff[buf_size++] = addr;
- break;
- case WRITE_SHORT:
- buff[buf_size++] = len;
- buff[buf_size++] = addr;
- buff[buf_size++] = (reg >> 8) & 0xff;
- buff[buf_size++] = reg & 0xff;
- break;
- case READ_DATA:
- case WRITE_DATA:
- break;
- case WRITE_CMD:
- mode = 7;
- break;
- default:
- kfree(buff);
- return -EINVAL;
- }
-
- if (mode & 1) {
- for (i = 0; i < len ; i++)
- buff[buf_size++] = data[i];
- }
- chk_sum = check_sum(&buff[1], buf_size);
-
- buff[buf_size++] = chk_sum >> 8;
- buff[0] = buf_size;
- buff[buf_size++] = (chk_sum & 0xff);
-
- ret = dvb_usbv2_generic_rw(d, buff, buf_size, buff, (mode & 1) ?
- 5 : len + 5);
- if (ret < 0)
- goto error;
-
- rlen = (mode & 0x1) ? 0x1 : len;
-
- if (mode & 1)
- ret = buff[2];
- else
- memcpy(data, &buff[3], rlen);
-
- st->cmd_counter++;
-
-error: kfree(buff);
-
- return ret;
-}
-
-static int it913x_wr_reg(struct dvb_usb_device *d, u8 pro, u32 reg , u8 data)
-{
- int ret;
- u8 b[1];
- b[0] = data;
- ret = it913x_io(d, WRITE_LONG, pro,
- CMD_DEMOD_WRITE, reg, 0, b, sizeof(b));
-
- return ret;
-}
-
-static int it913x_read_reg(struct dvb_usb_device *d, u32 reg)
-{
- int ret;
- u8 data[1];
-
- ret = it913x_io(d, READ_LONG, DEV_0,
- CMD_DEMOD_READ, reg, 0, &data[0], sizeof(data));
-
- return (ret < 0) ? ret : data[0];
-}
-
-static int it913x_query(struct dvb_usb_device *d, u8 pro)
-{
- struct it913x_state *st = d->priv;
- int ret, i;
- u8 data[4];
- u8 ver;
-
- for (i = 0; i < 5; i++) {
- ret = it913x_io(d, READ_LONG, pro, CMD_DEMOD_READ,
- 0x1222, 0, &data[0], 3);
- ver = data[0];
- if (ver > 0 && ver < 3)
- break;
- msleep(100);
- }
-
- if (ver < 1 || ver > 2) {
- info("Failed to identify chip version applying 1");
- st->it913x_config.chip_ver = 0x1;
- st->it913x_config.chip_type = 0x9135;
- return 0;
- }
-
- st->it913x_config.chip_ver = ver;
- st->it913x_config.chip_type = (u16)(data[2] << 8) + data[1];
-
- info("Chip Version=%02x Chip Type=%04x", st->it913x_config.chip_ver,
- st->it913x_config.chip_type);
-
- ret = it913x_io(d, READ_SHORT, pro,
- CMD_QUERYINFO, 0, 0x1, &data[0], 4);
-
- st->it913x_config.firmware = (data[0] << 24) | (data[1] << 16) |
- (data[2] << 8) | data[3];
-
- return ret;
-}
-
-static int it913x_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
-{
- struct dvb_usb_device *d = adap_to_d(adap);
- struct it913x_state *st = adap_to_priv(adap);
- int ret;
- u8 pro = (adap->id == 0) ? DEV_0_DMOD : DEV_1_DMOD;
-
- mutex_lock(&d->i2c_mutex);
-
- deb_info(1, "PID_C (%02x)", onoff);
-
- st->pid_filter_onoff = adap->pid_filtering;
- ret = it913x_wr_reg(d, pro, PID_EN, st->pid_filter_onoff);
-
- mutex_unlock(&d->i2c_mutex);
- return ret;
-}
-
-static int it913x_pid_filter(struct dvb_usb_adapter *adap,
- int index, u16 pid, int onoff)
-{
- struct dvb_usb_device *d = adap_to_d(adap);
- struct it913x_state *st = adap_to_priv(adap);
- int ret;
- u8 pro = (adap->id == 0) ? DEV_0_DMOD : DEV_1_DMOD;
-
- mutex_lock(&d->i2c_mutex);
-
- deb_info(1, "PID_F (%02x)", onoff);
-
- ret = it913x_wr_reg(d, pro, PID_LSB, (u8)(pid & 0xff));
-
- ret |= it913x_wr_reg(d, pro, PID_MSB, (u8)(pid >> 8));
-
- ret |= it913x_wr_reg(d, pro, PID_INX_EN, (u8)onoff);
-
- ret |= it913x_wr_reg(d, pro, PID_INX, (u8)(index & 0x1f));
-
- if (d->udev->speed == USB_SPEED_HIGH && pid == 0x2000) {
- ret |= it913x_wr_reg(d , pro, PID_EN, !onoff);
- st->pid_filter_onoff = !onoff;
- } else
- st->pid_filter_onoff =
- adap->pid_filtering;
-
- mutex_unlock(&d->i2c_mutex);
- return 0;
-}
-
-
-static int it913x_return_status(struct dvb_usb_device *d)
-{
- struct it913x_state *st = d->priv;
- int ret = it913x_query(d, DEV_0);
- if (st->it913x_config.firmware > 0)
- info("Firmware Version %d", st->it913x_config.firmware);
-
- return ret;
-}
-
-static int it913x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
-{
- struct dvb_usb_device *d = i2c_get_adapdata(adap);
- static u8 data[256];
- int ret;
- u32 reg;
- u8 pro;
-
- mutex_lock(&d->i2c_mutex);
-
- deb_info(2, "num of messages %d address %02x", num, msg[0].addr);
-
- pro = (msg[0].addr & 0x2) ? DEV_0_DMOD : 0x0;
- pro |= (msg[0].addr & 0x20) ? DEV_1 : DEV_0;
- memcpy(data, msg[0].buf, msg[0].len);
- reg = (data[0] << 24) + (data[1] << 16) +
- (data[2] << 8) + data[3];
- if (num == 2) {
- ret = it913x_io(d, READ_LONG, pro,
- CMD_DEMOD_READ, reg, 0, data, msg[1].len);
- memcpy(msg[1].buf, data, msg[1].len);
- } else
- ret = it913x_io(d, WRITE_LONG, pro, CMD_DEMOD_WRITE,
- reg, 0, &data[4], msg[0].len - 4);
-
- mutex_unlock(&d->i2c_mutex);
-
- return ret;
-}
-
-static u32 it913x_i2c_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C;
-}
-
-static struct i2c_algorithm it913x_i2c_algo = {
- .master_xfer = it913x_i2c_xfer,
- .functionality = it913x_i2c_func,
-};
-
-/* Callbacks for DVB USB */
-#if IS_ENABLED(CONFIG_RC_CORE)
-static int it913x_rc_query(struct dvb_usb_device *d)
-{
- u8 ibuf[4];
- int ret;
- u32 key;
- /* Avoid conflict with frontends*/
- mutex_lock(&d->i2c_mutex);
-
- ret = it913x_io(d, READ_LONG, PRO_LINK, CMD_IR_GET,
- 0, 0, &ibuf[0], sizeof(ibuf));
-
- if ((ibuf[2] + ibuf[3]) == 0xff) {
- key = ibuf[2];
- key += ibuf[0] << 16;
- key += ibuf[1] << 8;
- deb_info(1, "NEC Extended Key =%08x", key);
- if (d->rc_dev != NULL)
- rc_keydown(d->rc_dev, key, 0);
- }
-
- mutex_unlock(&d->i2c_mutex);
-
- return ret;
-}
-
-static int it913x_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
-{
- struct it913x_state *st = d->priv;
-
- if (st->proprietary_ir == false) {
- rc->map_name = NULL;
- return 0;
- }
-
- rc->allowed_protos = RC_BIT_NEC;
- rc->query = it913x_rc_query;
- rc->interval = 250;
-
- return 0;
-}
-#else
- #define it913x_get_rc_config NULL
-#endif
-
-/* Firmware sets raw */
-static const char fw_it9135_v1[] = FW_IT9135_V1;
-static const char fw_it9135_v2[] = FW_IT9135_V2;
-static const char fw_it9137[] = FW_IT9137;
-
-static void ite_get_firmware_name(struct dvb_usb_device *d,
- const char **name)
-{
- struct it913x_state *st = d->priv;
- int sw;
- /* auto switch */
- if (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_KWORLD_2)
- sw = IT9137_FW;
- else if (st->it913x_config.chip_ver == 1)
- sw = IT9135_V1_FW;
- else
- sw = IT9135_V2_FW;
-
- /* force switch */
- if (dvb_usb_it913x_firmware != IT9135_AUTO)
- sw = dvb_usb_it913x_firmware;
-
- switch (sw) {
- case IT9135_V1_FW:
- st->it913x_config.firmware_ver = 1;
- st->it913x_config.adc_x2 = 1;
- st->it913x_config.read_slevel = false;
- *name = fw_it9135_v1;
- break;
- case IT9135_V2_FW:
- st->it913x_config.firmware_ver = 1;
- st->it913x_config.adc_x2 = 1;
- st->it913x_config.read_slevel = false;
- *name = fw_it9135_v2;
- switch (st->it913x_config.tuner_id_0) {
- case IT9135_61:
- case IT9135_62:
- break;
- default:
- info("Unknown tuner ID applying default 0x60");
- case IT9135_60:
- st->it913x_config.tuner_id_0 = IT9135_60;
- }
- break;
- case IT9137_FW:
- default:
- st->it913x_config.firmware_ver = 0;
- st->it913x_config.adc_x2 = 0;
- st->it913x_config.read_slevel = true;
- *name = fw_it9137;
- }
-
- return;
-}
-
-#define TS_MPEG_PKT_SIZE 188
-#define EP_LOW 21
-#define TS_BUFFER_SIZE_PID (EP_LOW*TS_MPEG_PKT_SIZE)
-#define EP_HIGH 348
-#define TS_BUFFER_SIZE_MAX (EP_HIGH*TS_MPEG_PKT_SIZE)
-
-static int it913x_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
- struct usb_data_stream_properties *stream)
-{
- struct dvb_usb_adapter *adap = fe_to_adap(fe);
- if (adap->pid_filtering)
- stream->u.bulk.buffersize = TS_BUFFER_SIZE_PID;
- else
- stream->u.bulk.buffersize = TS_BUFFER_SIZE_MAX;
-
- return 0;
-}
-
-static int it913x_select_config(struct dvb_usb_device *d)
-{
- struct it913x_state *st = d->priv;
- int ret, reg;
-
- ret = it913x_return_status(d);
- if (ret < 0)
- return ret;
-
- if (st->it913x_config.chip_ver == 0x02
- && st->it913x_config.chip_type == 0x9135)
- reg = it913x_read_reg(d, 0x461d);
- else
- reg = it913x_read_reg(d, 0x461b);
-
- if (reg < 0)
- return reg;
-
- if (reg == 0) {
- st->it913x_config.dual_mode = 0;
- st->it913x_config.tuner_id_0 = IT9135_38;
- st->proprietary_ir = true;
- } else {
- /* TS mode */
- reg = it913x_read_reg(d, 0x49c5);
- if (reg < 0)
- return reg;
- st->it913x_config.dual_mode = reg;
-
- /* IR mode type */
- reg = it913x_read_reg(d, 0x49ac);
- if (reg < 0)
- return reg;
- if (reg == 5) {
- info("Remote propriety (raw) mode");
- st->proprietary_ir = true;
- } else if (reg == 1) {
- info("Remote HID mode NOT SUPPORTED");
- st->proprietary_ir = false;
- }
-
- /* Tuner_id */
- reg = it913x_read_reg(d, 0x49d0);
- if (reg < 0)
- return reg;
- st->it913x_config.tuner_id_0 = reg;
- }
-
- info("Dual mode=%x Tuner Type=%x", st->it913x_config.dual_mode,
- st->it913x_config.tuner_id_0);
-
- return ret;
-}
-
-static int it913x_streaming_ctrl(struct dvb_frontend *fe, int onoff)
-{
- struct dvb_usb_adapter *adap = fe_to_adap(fe);
- struct dvb_usb_device *d = adap_to_d(adap);
- struct it913x_state *st = fe_to_priv(fe);
- int ret = 0;
- u8 pro = (adap->id == 0) ? DEV_0_DMOD : DEV_1_DMOD;
-
- deb_info(1, "STM (%02x)", onoff);
-
- if (!onoff) {
- mutex_lock(&d->i2c_mutex);
-
- ret = it913x_wr_reg(d, pro, PID_RST, 0x1);
-
- mutex_unlock(&d->i2c_mutex);
- st->pid_filter_onoff =
- adap->pid_filtering;
-
- }
-
- return ret;
-}
-
-static int it913x_identify_state(struct dvb_usb_device *d, const char **name)
-{
- struct it913x_state *st = d->priv;
- int ret;
- u8 reg;
-
- /* Read and select config */
- ret = it913x_select_config(d);
- if (ret < 0)
- return ret;
-
- ite_get_firmware_name(d, name);
-
- if (st->it913x_config.firmware > 0)
- return WARM;
-
- if (st->it913x_config.dual_mode) {
- st->it913x_config.tuner_id_1 = it913x_read_reg(d, 0x49e0);
- ret = it913x_wr_reg(d, DEV_0, GPIOH1_EN, 0x1);
- ret |= it913x_wr_reg(d, DEV_0, GPIOH1_ON, 0x1);
- ret |= it913x_wr_reg(d, DEV_0, GPIOH1_O, 0x1);
- msleep(50);
- ret |= it913x_wr_reg(d, DEV_0, GPIOH1_O, 0x0);
- msleep(50);
- reg = it913x_read_reg(d, GPIOH1_O);
- if (reg == 0) {
- ret |= it913x_wr_reg(d, DEV_0, GPIOH1_O, 0x1);
- ret |= it913x_return_status(d);
- if (ret != 0)
- ret = it913x_wr_reg(d, DEV_0,
- GPIOH1_O, 0x0);
- }
- }
-
- reg = it913x_read_reg(d, IO_MUX_POWER_CLK);
-
- if (st->it913x_config.dual_mode) {
- ret |= it913x_wr_reg(d, DEV_0, 0x4bfb, CHIP2_I2C_ADDR);
- if (st->it913x_config.firmware_ver == 1)
- ret |= it913x_wr_reg(d, DEV_0, 0xcfff, 0x1);
- else
- ret |= it913x_wr_reg(d, DEV_0, CLK_O_EN, 0x1);
- } else {
- ret |= it913x_wr_reg(d, DEV_0, 0x4bfb, 0x0);
- if (st->it913x_config.firmware_ver == 1)
- ret |= it913x_wr_reg(d, DEV_0, 0xcfff, 0x0);
- else
- ret |= it913x_wr_reg(d, DEV_0, CLK_O_EN, 0x0);
- }
-
- ret |= it913x_wr_reg(d, DEV_0, I2C_CLK, I2C_CLK_100);
-
- return (ret < 0) ? ret : COLD;
-}
-
-static int it913x_download_firmware(struct dvb_usb_device *d,
- const struct firmware *fw)
-{
- struct it913x_state *st = d->priv;
- int ret = 0, i = 0, pos = 0;
- u8 packet_size, min_pkt;
- u8 *fw_data;
-
- ret = it913x_wr_reg(d, DEV_0, I2C_CLK, I2C_CLK_100);
-
- info("FRM Starting Firmware Download");
-
- /* Multi firmware loader */
- /* This uses scatter write firmware headers */
- /* The firmware must start with 03 XX 00 */
- /* and be the extact firmware length */
-
- if (st->it913x_config.chip_ver == 2)
- min_pkt = 0x11;
- else
- min_pkt = 0x19;
-
- while (i <= fw->size) {
- if (((fw->data[i] == 0x3) && (fw->data[i + 2] == 0x0))
- || (i == fw->size)) {
- packet_size = i - pos;
- if ((packet_size > min_pkt) || (i == fw->size)) {
- fw_data = (u8 *)(fw->data + pos);
- pos += packet_size;
- if (packet_size > 0) {
- ret = it913x_io(d, WRITE_DATA,
- DEV_0, CMD_SCATTER_WRITE, 0,
- 0, fw_data, packet_size);
- if (ret < 0)
- break;
- }
- udelay(1000);
- }
- }
- i++;
- }
-
- if (ret < 0)
- info("FRM Firmware Download Failed (%d)" , ret);
- else
- info("FRM Firmware Download Completed - Resetting Device");
-
- msleep(30);
-
- ret = it913x_io(d, WRITE_CMD, DEV_0, CMD_BOOT, 0, 0, NULL, 0);
- if (ret < 0)
- info("FRM Device not responding to reboot");
-
- ret = it913x_return_status(d);
- if (st->it913x_config.firmware == 0) {
- info("FRM Failed to reboot device");
- return -ENODEV;
- }
-
- msleep(30);
-
- ret = it913x_wr_reg(d, DEV_0, I2C_CLK, I2C_CLK_400);
-
- msleep(30);
-
- /* Tuner function */
- if (st->it913x_config.dual_mode)
- ret |= it913x_wr_reg(d, DEV_0_DMOD , 0xec4c, 0xa0);
- else
- ret |= it913x_wr_reg(d, DEV_0_DMOD , 0xec4c, 0x68);
-
- if ((st->it913x_config.chip_ver == 1) &&
- (st->it913x_config.chip_type == 0x9135)) {
- ret |= it913x_wr_reg(d, DEV_0, PADODPU, 0x0);
- ret |= it913x_wr_reg(d, DEV_0, AGC_O_D, 0x0);
- if (st->it913x_config.dual_mode) {
- ret |= it913x_wr_reg(d, DEV_1, PADODPU, 0x0);
- ret |= it913x_wr_reg(d, DEV_1, AGC_O_D, 0x0);
- }
- }
-
- return (ret < 0) ? -ENODEV : 0;
-}
-
-static int it913x_name(struct dvb_usb_adapter *adap)
-{
- struct dvb_usb_device *d = adap_to_d(adap);
- const char *desc = d->name;
- char *fe_name[] = {"_1", "_2", "_3", "_4"};
- char *name = adap->fe[0]->ops.info.name;
-
- strlcpy(name, desc, 128);
- strlcat(name, fe_name[adap->id], 128);
-
- return 0;
-}
-
-static int it913x_frontend_attach(struct dvb_usb_adapter *adap)
-{
- struct dvb_usb_device *d = adap_to_d(adap);
- struct it913x_state *st = d->priv;
- int ret = 0;
- u8 adap_addr = I2C_BASE_ADDR + (adap->id << 5);
- u16 ep_size = (adap->pid_filtering) ? TS_BUFFER_SIZE_PID / 4 :
- TS_BUFFER_SIZE_MAX / 4;
- u8 pkt_size = 0x80;
-
- if (d->udev->speed != USB_SPEED_HIGH)
- pkt_size = 0x10;
-
- st->it913x_config.adf = it913x_read_reg(d, IO_MUX_POWER_CLK);
-
- adap->fe[0] = dvb_attach(it913x_fe_attach,
- &d->i2c_adap, adap_addr, &st->it913x_config);
-
- if (adap->id == 0 && adap->fe[0]) {
- it913x_wr_reg(d, DEV_0_DMOD, MP2_SW_RST, 0x1);
- it913x_wr_reg(d, DEV_0_DMOD, MP2IF2_SW_RST, 0x1);
- it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x0f);
- it913x_wr_reg(d, DEV_0, EP0_TX_NAK, 0x1b);
- if (st->proprietary_ir == false) /* Enable endpoint 3 */
- it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x3f);
- else
- it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x2f);
- it913x_wr_reg(d, DEV_0, EP4_TX_LEN_LSB,
- ep_size & 0xff);
- it913x_wr_reg(d, DEV_0, EP4_TX_LEN_MSB, ep_size >> 8);
- ret = it913x_wr_reg(d, DEV_0, EP4_MAX_PKT, pkt_size);
- } else if (adap->id == 1 && adap->fe[0]) {
- if (st->proprietary_ir == false)
- it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x7f);
- else
- it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x6f);
- it913x_wr_reg(d, DEV_0, EP5_TX_LEN_LSB,
- ep_size & 0xff);
- it913x_wr_reg(d, DEV_0, EP5_TX_LEN_MSB, ep_size >> 8);
- it913x_wr_reg(d, DEV_0, EP5_MAX_PKT, pkt_size);
- it913x_wr_reg(d, DEV_0_DMOD, MP2IF2_EN, 0x1);
- it913x_wr_reg(d, DEV_1_DMOD, MP2IF_SERIAL, 0x1);
- it913x_wr_reg(d, DEV_1, TOP_HOSTB_SER_MODE, 0x1);
- it913x_wr_reg(d, DEV_0_DMOD, TSIS_ENABLE, 0x1);
- it913x_wr_reg(d, DEV_0_DMOD, MP2_SW_RST, 0x0);
- it913x_wr_reg(d, DEV_0_DMOD, MP2IF2_SW_RST, 0x0);
- it913x_wr_reg(d, DEV_0_DMOD, MP2IF2_HALF_PSB, 0x0);
- it913x_wr_reg(d, DEV_0_DMOD, MP2IF_STOP_EN, 0x1);
- it913x_wr_reg(d, DEV_1_DMOD, MPEG_FULL_SPEED, 0x0);
- ret = it913x_wr_reg(d, DEV_1_DMOD, MP2IF_STOP_EN, 0x0);
- } else
- return -ENODEV;
-
- ret |= it913x_name(adap);
-
- return ret;
-}
-
-/* DVB USB Driver */
-static int it913x_get_adapter_count(struct dvb_usb_device *d)
-{
- struct it913x_state *st = d->priv;
- if (st->it913x_config.dual_mode)
- return 2;
- return 1;
-}
-
-static struct dvb_usb_device_properties it913x_properties = {
- .driver_name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
- .bInterfaceNumber = 0,
- .generic_bulk_ctrl_endpoint = 0x02,
- .generic_bulk_ctrl_endpoint_response = 0x81,
-
- .adapter_nr = adapter_nr,
- .size_of_priv = sizeof(struct it913x_state),
-
- .identify_state = it913x_identify_state,
- .i2c_algo = &it913x_i2c_algo,
-
- .download_firmware = it913x_download_firmware,
-
- .frontend_attach = it913x_frontend_attach,
- .get_rc_config = it913x_get_rc_config,
- .get_stream_config = it913x_get_stream_config,
- .get_adapter_count = it913x_get_adapter_count,
- .streaming_ctrl = it913x_streaming_ctrl,
-
-
- .adapter = {
- {
- .caps = DVB_USB_ADAP_HAS_PID_FILTER|
- DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
- .pid_filter_count = 32,
- .pid_filter = it913x_pid_filter,
- .pid_filter_ctrl = it913x_pid_filter_ctrl,
- .stream =
- DVB_USB_STREAM_BULK(0x84, 10, TS_BUFFER_SIZE_MAX),
- },
- {
- .caps = DVB_USB_ADAP_HAS_PID_FILTER|
- DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
- .pid_filter_count = 32,
- .pid_filter = it913x_pid_filter,
- .pid_filter_ctrl = it913x_pid_filter_ctrl,
- .stream =
- DVB_USB_STREAM_BULK(0x85, 10, TS_BUFFER_SIZE_MAX),
- }
- }
-};
-
-static const struct usb_device_id it913x_id_table[] = {
- { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB499_2T_T09,
- &it913x_properties, "Kworld UB499-2T T09(IT9137)",
- RC_MAP_IT913X_V1) },
- { DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135,
- &it913x_properties, "ITE 9135 Generic",
- RC_MAP_IT913X_V1) },
- { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22_IT9137,
- &it913x_properties, "Sveon STV22 Dual DVB-T HDTV(IT9137)",
- RC_MAP_IT913X_V1) },
- { DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135_9005,
- &it913x_properties, "ITE 9135(9005) Generic",
- RC_MAP_IT913X_V2) },
- { DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135_9006,
- &it913x_properties, "ITE 9135(9006) Generic",
- RC_MAP_IT913X_V1) },
- { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_1835,
- &it913x_properties, "Avermedia A835B(1835)",
- RC_MAP_IT913X_V2) },
- { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_2835,
- &it913x_properties, "Avermedia A835B(2835)",
- RC_MAP_IT913X_V2) },
- { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_3835,
- &it913x_properties, "Avermedia A835B(3835)",
- RC_MAP_IT913X_V2) },
- { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_4835,
- &it913x_properties, "Avermedia A835B(4835)",
- RC_MAP_IT913X_V2) },
- { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CTVDIGDUAL_V2,
- &it913x_properties, "Digital Dual TV Receiver CTVDIGDUAL_V2",
- RC_MAP_IT913X_V1) },
- { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_H335,
- &it913x_properties, "Avermedia H335",
- RC_MAP_IT913X_V2) },
- {} /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, it913x_id_table);
-
-static struct usb_driver it913x_driver = {
- .name = KBUILD_MODNAME,
- .probe = dvb_usbv2_probe,
- .disconnect = dvb_usbv2_disconnect,
- .suspend = dvb_usbv2_suspend,
- .resume = dvb_usbv2_resume,
- .id_table = it913x_id_table,
-};
-
-module_usb_driver(it913x_driver);
-
-MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
-MODULE_DESCRIPTION("it913x USB 2 Driver");
-MODULE_VERSION("1.33");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(FW_IT9135_V1);
-MODULE_FIRMWARE(FW_IT9135_V2);
-MODULE_FIRMWARE(FW_IT9137);
-
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index fda5c64ba0e8..c83c16cece01 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -24,6 +24,7 @@
#include "rtl2830.h"
#include "rtl2832.h"
+#include "rtl2832_sdr.h"
#include "qt1010.h"
#include "mt2060.h"
@@ -35,6 +36,9 @@
#include "tua9001.h"
#include "r820t.h"
+static int rtl28xxu_disable_rc;
+module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644);
+MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
@@ -513,7 +517,7 @@ err:
return ret;
}
-static struct rtl2830_config rtl28xxu_rtl2830_mt2060_config = {
+static const struct rtl2830_config rtl28xxu_rtl2830_mt2060_config = {
.i2c_addr = 0x10, /* 0x20 */
.xtal = 28800000,
.ts_mode = 0,
@@ -524,7 +528,7 @@ static struct rtl2830_config rtl28xxu_rtl2830_mt2060_config = {
};
-static struct rtl2830_config rtl28xxu_rtl2830_qt1010_config = {
+static const struct rtl2830_config rtl28xxu_rtl2830_qt1010_config = {
.i2c_addr = 0x10, /* 0x20 */
.xtal = 28800000,
.ts_mode = 0,
@@ -534,7 +538,7 @@ static struct rtl2830_config rtl28xxu_rtl2830_qt1010_config = {
.agc_targ_val = 0x2d,
};
-static struct rtl2830_config rtl28xxu_rtl2830_mxl5005s_config = {
+static const struct rtl2830_config rtl28xxu_rtl2830_mxl5005s_config = {
.i2c_addr = 0x10, /* 0x20 */
.xtal = 28800000,
.ts_mode = 0,
@@ -548,7 +552,7 @@ static int rtl2831u_frontend_attach(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap_to_d(adap);
struct rtl28xxu_priv *priv = d_to_priv(d);
- struct rtl2830_config *rtl2830_config;
+ const struct rtl2830_config *rtl2830_config;
int ret;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
@@ -583,33 +587,31 @@ err:
return ret;
}
-static struct rtl2832_config rtl28xxu_rtl2832_fc0012_config = {
+static const struct rtl2832_config rtl28xxu_rtl2832_fc0012_config = {
.i2c_addr = 0x10, /* 0x20 */
.xtal = 28800000,
- .if_dvbt = 0,
.tuner = TUNER_RTL2832_FC0012
};
-static struct rtl2832_config rtl28xxu_rtl2832_fc0013_config = {
+static const struct rtl2832_config rtl28xxu_rtl2832_fc0013_config = {
.i2c_addr = 0x10, /* 0x20 */
.xtal = 28800000,
- .if_dvbt = 0,
.tuner = TUNER_RTL2832_FC0013
};
-static struct rtl2832_config rtl28xxu_rtl2832_tua9001_config = {
+static const struct rtl2832_config rtl28xxu_rtl2832_tua9001_config = {
.i2c_addr = 0x10, /* 0x20 */
.xtal = 28800000,
.tuner = TUNER_RTL2832_TUA9001,
};
-static struct rtl2832_config rtl28xxu_rtl2832_e4000_config = {
+static const struct rtl2832_config rtl28xxu_rtl2832_e4000_config = {
.i2c_addr = 0x10, /* 0x20 */
.xtal = 28800000,
.tuner = TUNER_RTL2832_E4000,
};
-static struct rtl2832_config rtl28xxu_rtl2832_r820t_config = {
+static const struct rtl2832_config rtl28xxu_rtl2832_r820t_config = {
.i2c_addr = 0x10,
.xtal = 28800000,
.tuner = TUNER_RTL2832_R820T,
@@ -733,7 +735,7 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
int ret;
struct dvb_usb_device *d = adap_to_d(adap);
struct rtl28xxu_priv *priv = d_to_priv(d);
- struct rtl2832_config *rtl2832_config;
+ const struct rtl2832_config *rtl2832_config;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
@@ -772,6 +774,9 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
goto err;
}
+ /* RTL2832 I2C repeater */
+ priv->demod_i2c_adapter = rtl2832_get_i2c_adapter(adap->fe[0]);
+
/* set fe callback */
adap->fe[0]->callback = rtl2832u_frontend_callback;
@@ -851,11 +856,6 @@ err:
return ret;
}
-static const struct e4000_config rtl2832u_e4000_config = {
- .i2c_addr = 0x64,
- .clock = 28800000,
-};
-
static const struct fc2580_config rtl2832u_fc2580_config = {
.i2c_addr = 0x56,
.clock = 16384000,
@@ -889,10 +889,14 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
int ret;
struct dvb_usb_device *d = adap_to_d(adap);
struct rtl28xxu_priv *priv = d_to_priv(d);
- struct dvb_frontend *fe;
+ struct dvb_frontend *fe = NULL;
+ struct i2c_board_info info;
+ struct i2c_client *client;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
+ memset(&info, 0, sizeof(struct i2c_board_info));
+
switch (priv->tuner) {
case TUNER_RTL2832_FC0012:
fe = dvb_attach(fc0012_attach, adap->fe[0],
@@ -902,7 +906,10 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
* that to the tuner driver */
adap->fe[0]->ops.read_signal_strength =
adap->fe[0]->ops.tuner_ops.get_rf_strength;
- return 0;
+
+ /* attach SDR */
+ dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+ &rtl28xxu_rtl2832_fc0012_config, NULL);
break;
case TUNER_RTL2832_FC0013:
fe = dvb_attach(fc0013_attach, adap->fe[0],
@@ -911,10 +918,43 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
/* fc0013 also supports signal strength reading */
adap->fe[0]->ops.read_signal_strength =
adap->fe[0]->ops.tuner_ops.get_rf_strength;
- return 0;
- case TUNER_RTL2832_E4000:
- fe = dvb_attach(e4000_attach, adap->fe[0], &d->i2c_adap,
- &rtl2832u_e4000_config);
+
+ /* attach SDR */
+ dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+ &rtl28xxu_rtl2832_fc0013_config, NULL);
+ break;
+ case TUNER_RTL2832_E4000: {
+ struct v4l2_subdev *sd;
+ struct i2c_adapter *i2c_adap_internal =
+ rtl2832_get_private_i2c_adapter(adap->fe[0]);
+ struct e4000_config e4000_config = {
+ .fe = adap->fe[0],
+ .clock = 28800000,
+ };
+
+ strlcpy(info.type, "e4000", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &e4000_config;
+
+ request_module(info.type);
+ client = i2c_new_device(priv->demod_i2c_adapter, &info);
+ if (client == NULL || client->dev.driver == NULL)
+ break;
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ break;
+ }
+
+ priv->client = client;
+ sd = i2c_get_clientdata(client);
+ i2c_set_adapdata(i2c_adap_internal, d);
+
+ /* attach SDR */
+ dvb_attach(rtl2832_sdr_attach, adap->fe[0],
+ i2c_adap_internal,
+ &rtl28xxu_rtl2832_e4000_config, sd);
+ }
break;
case TUNER_RTL2832_FC2580:
fe = dvb_attach(fc2580_attach, adap->fe[0], &d->i2c_adap,
@@ -940,6 +980,10 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
/* Use tuner to get the signal strength */
adap->fe[0]->ops.read_signal_strength =
adap->fe[0]->ops.tuner_ops.get_rf_strength;
+
+ /* attach SDR */
+ dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+ &rtl28xxu_rtl2832_r820t_config, NULL);
break;
case TUNER_RTL2832_R828D:
/* power off mn88472 demod on GPIO0 */
@@ -963,12 +1007,11 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
adap->fe[0]->ops.tuner_ops.get_rf_strength;
break;
default:
- fe = NULL;
dev_err(&d->udev->dev, "%s: unknown tuner=%d\n", KBUILD_MODNAME,
priv->tuner);
}
- if (fe == NULL) {
+ if (fe == NULL && priv->client == NULL) {
ret = -ENODEV;
goto err;
}
@@ -1013,6 +1056,22 @@ err:
return ret;
}
+static void rtl28xxu_exit(struct dvb_usb_device *d)
+{
+ struct rtl28xxu_priv *priv = d->priv;
+ struct i2c_client *client = priv->client;
+
+ dev_dbg(&d->udev->dev, "%s:\n", __func__);
+
+ /* remove I2C tuner */
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
+ return;
+}
+
static int rtl2831u_power_ctrl(struct dvb_usb_device *d, int onoff)
{
int ret;
@@ -1322,6 +1381,10 @@ err:
static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
struct dvb_usb_rc *rc)
{
+ /* disable IR interrupts in order to avoid SDR sample loss */
+ if (rtl28xxu_disable_rc)
+ return rtl28xx_wr_reg(d, IR_RX_IE, 0x00);
+
/* load empty to enable rc */
if (!rc->map_name)
rc->map_name = RC_MAP_EMPTY;
@@ -1371,6 +1434,7 @@ static const struct dvb_usb_device_properties rtl2832u_props = {
.frontend_attach = rtl2832u_frontend_attach,
.tuner_attach = rtl2832u_tuner_attach,
.init = rtl28xxu_init,
+ .exit = rtl28xxu_exit,
.get_rc_config = rtl2832u_get_rc_config,
.num_adapters = 1,
@@ -1382,6 +1446,7 @@ static const struct dvb_usb_device_properties rtl2832u_props = {
};
static const struct usb_device_id rtl28xxu_id_table[] = {
+ /* RTL2831U devices: */
{ DVB_USB_DEVICE(USB_VID_REALTEK, USB_PID_REALTEK_RTL2831U,
&rtl2831u_props, "Realtek RTL2831U reference design", NULL) },
{ DVB_USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_FREECOM_DVBT,
@@ -1389,6 +1454,7 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_FREECOM_DVBT_2,
&rtl2831u_props, "Freecom USB2.0 DVB-T", NULL) },
+ /* RTL2832U devices: */
{ DVB_USB_DEVICE(USB_VID_REALTEK, 0x2832,
&rtl2832u_props, "Realtek RTL2832U reference design", NULL) },
{ DVB_USB_DEVICE(USB_VID_REALTEK, 0x2838,
@@ -1401,6 +1467,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl2832u_props, "TerraTec NOXON DAB Stick", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK_REV2,
&rtl2832u_props, "TerraTec NOXON DAB Stick (rev 2)", NULL) },
+ { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK_REV3,
+ &rtl2832u_props, "TerraTec NOXON DAB Stick (rev 3)", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_TREKSTOR_TERRES_2_0,
&rtl2832u_props, "Trekstor DVB-T Stick Terres 2.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1101,
@@ -1429,9 +1497,14 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl2832u_props, "Leadtek WinFast DTV Dongle mini", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_CPYTO_REDI_PC50A,
&rtl2832u_props, "Crypto ReDi PC 50 A", NULL) },
+ { DVB_USB_DEVICE(USB_VID_KYE, 0x707f,
+ &rtl2832u_props, "Genius TVGo DVB-T03", NULL) },
+ /* RTL2832P devices: */
{ DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
&rtl2832u_props, "Astrometa DVB-T2", NULL) },
+ { DVB_USB_DEVICE(USB_VID_KYE, 0x707f,
+ &rtl2832u_props, "Genius TVGo DVB-T03", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
index 2142bcb41b41..a26cab10f382 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
@@ -55,7 +55,9 @@ struct rtl28xxu_priv {
u8 tuner;
char *tuner_name;
u8 page; /* integrated demod active register page */
+ struct i2c_adapter *demod_i2c_adapter;
bool rc_active;
+ struct i2c_client *client;
};
enum rtl28xxu_chip_id {
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-remote.c b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
index 41bacff24960..4058aea9272f 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
@@ -272,7 +272,7 @@ static int rc_core_dvb_usb_remote_init(struct dvb_usb_device *d)
dev->driver_name = d->props.rc.core.module_name;
dev->map_name = d->props.rc.core.rc_codes;
dev->change_protocol = d->props.rc.core.change_protocol;
- dev->allowed_protos = d->props.rc.core.allowed_protos;
+ rc_set_allowed_protocols(dev, d->props.rc.core.allowed_protos);
dev->driver_type = d->props.rc.core.driver_type;
usb_to_input_id(d->udev, &dev->input_id);
dev->input_name = "IR-receiver inside an USB DVB receiver";
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index a1fccf3096de..d23a912096f7 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -53,8 +53,10 @@ config VIDEO_EM28XX_DVB
select DVB_MB86A20S if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TDA18212 if MEDIA_SUBDRV_AUTOSELECT
select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_DRX39XYJ if MEDIA_SUBDRV_AUTOSELECT
---help---
This adds support for DVB cards based on the
Empiatech em28xx chips.
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 05e9bd11a3ff..342490f44ed2 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -252,7 +252,7 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
{
struct em28xx *dev = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
- int ret = 0;
+ int nonblock, ret = 0;
if (!dev) {
em28xx_err("BUG: em28xx can't find device struct."
@@ -265,45 +265,48 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
dprintk("opening device and trying to acquire exclusive lock\n");
+ nonblock = !!(substream->f_flags & O_NONBLOCK);
+ if (nonblock) {
+ if (!mutex_trylock(&dev->lock))
+ return -EAGAIN;
+ } else
+ mutex_lock(&dev->lock);
+
runtime->hw = snd_em28xx_hw_capture;
- if ((dev->alt == 0 || dev->is_audio_only) && dev->adev.users == 0) {
- int nonblock = !!(substream->f_flags & O_NONBLOCK);
-
- if (nonblock) {
- if (!mutex_trylock(&dev->lock))
- return -EAGAIN;
- } else
- mutex_lock(&dev->lock);
- if (dev->is_audio_only)
- /* vendor audio is on a separate interface */
- dev->alt = 1;
- else
- /* vendor audio is on the same interface as video */
- dev->alt = 7;
- /*
- * FIXME: The intention seems to be to select the alt
- * setting with the largest wMaxPacketSize for the video
- * endpoint.
- * At least dev->alt should be used instead, but we
- * should probably not touch it at all if it is
- * already >0, because wMaxPacketSize of the audio
- * endpoints seems to be the same for all.
- */
-
- dprintk("changing alternate number on interface %d to %d\n",
- dev->ifnum, dev->alt);
- usb_set_interface(dev->udev, dev->ifnum, dev->alt);
+
+ if (dev->adev.users == 0) {
+ if (dev->alt == 0 || dev->is_audio_only) {
+ if (dev->is_audio_only)
+ /* audio is on a separate interface */
+ dev->alt = 1;
+ else
+ /* audio is on the same interface as video */
+ dev->alt = 7;
+ /*
+ * FIXME: The intention seems to be to select
+ * the alt setting with the largest
+ * wMaxPacketSize for the video endpoint.
+ * At least dev->alt should be used instead, but
+ * we should probably not touch it at all if it
+ * is already >0, because wMaxPacketSize of the
+ * audio endpoints seems to be the same for all.
+ */
+ dprintk("changing alternate number on interface %d to %d\n",
+ dev->ifnum, dev->alt);
+ usb_set_interface(dev->udev, dev->ifnum, dev->alt);
+ }
/* Sets volume, mute, etc */
dev->mute = 0;
ret = em28xx_audio_analog_set(dev);
if (ret < 0)
goto err;
-
- dev->adev.users++;
- mutex_unlock(&dev->lock);
}
+ kref_get(&dev->ref);
+ dev->adev.users++;
+ mutex_unlock(&dev->lock);
+
/* Dynamically adjust the period size */
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
@@ -341,6 +344,7 @@ static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream)
substream->runtime->dma_area = NULL;
}
mutex_unlock(&dev->lock);
+ kref_put(&dev->ref, em28xx_free_device);
return 0;
}
@@ -895,13 +899,15 @@ static int em28xx_audio_init(struct em28xx *dev)
em28xx_info("Binding audio extension\n");
+ kref_get(&dev->ref);
+
printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus "
"Rechberger\n");
printk(KERN_INFO
"em28xx-audio.c: Copyright (C) 2007-2014 Mauro Carvalho Chehab\n");
- err = snd_card_create(index[devnr], "Em28xx Audio", THIS_MODULE, 0,
- &card);
+ err = snd_card_new(&dev->udev->dev, index[devnr], "Em28xx Audio",
+ THIS_MODULE, 0, &card);
if (err < 0)
return err;
@@ -918,7 +924,6 @@ static int em28xx_audio_init(struct em28xx *dev)
pcm->private_data = dev;
strcpy(pcm->name, "Empia 28xx Capture");
- snd_card_set_dev(card, &dev->udev->dev);
strcpy(card->driver, "Em28xx-Audio");
strcpy(card->shortname, "Em28xx Audio");
strcpy(card->longname, "Empia Em28xx Audio");
@@ -967,7 +972,7 @@ static int em28xx_audio_fini(struct em28xx *dev)
if (dev == NULL)
return 0;
- if (dev->has_alsa_audio != 1) {
+ if (!dev->has_alsa_audio) {
/* This device does not support the extension (in this case
the device is expecting the snd-usb-audio module or
doesn't have analog audio support at all) */
@@ -986,6 +991,35 @@ static int em28xx_audio_fini(struct em28xx *dev)
dev->adev.sndcard = NULL;
}
+ kref_put(&dev->ref, em28xx_free_device);
+ return 0;
+}
+
+static int em28xx_audio_suspend(struct em28xx *dev)
+{
+ if (dev == NULL)
+ return 0;
+
+ if (!dev->has_alsa_audio)
+ return 0;
+
+ em28xx_info("Suspending audio extension");
+ em28xx_deinit_isoc_audio(dev);
+ atomic_set(&dev->stream_started, 0);
+ return 0;
+}
+
+static int em28xx_audio_resume(struct em28xx *dev)
+{
+ if (dev == NULL)
+ return 0;
+
+ if (!dev->has_alsa_audio)
+ return 0;
+
+ em28xx_info("Resuming audio extension");
+ /* Nothing to do other than schedule_work() ?? */
+ schedule_work(&dev->wq_trigger);
return 0;
}
@@ -994,6 +1028,8 @@ static struct em28xx_ops audio_ops = {
.name = "Em28xx Audio Extension",
.init = em28xx_audio_init,
.fini = em28xx_audio_fini,
+ .suspend = em28xx_audio_suspend,
+ .resume = em28xx_audio_resume,
};
static int __init em28xx_alsa_register(void)
@@ -1008,7 +1044,7 @@ static void __exit em28xx_alsa_unregister(void)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Markus Rechberger <mrechberger@gmail.com>");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_DESCRIPTION(DRIVER_DESC " - audio interface");
MODULE_VERSION(EM28XX_VERSION);
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index c29f5c4e7b40..505e0505be04 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -120,7 +120,7 @@ static int em28xx_probe_sensor_micron(struct em28xx *dev)
reg = 0x00;
ret = i2c_master_send(&client, &reg, 1);
if (ret < 0) {
- if (ret != -ENODEV)
+ if (ret != -ENXIO)
em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
client.addr << 1, ret);
continue;
@@ -218,7 +218,7 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
reg = 0x1c;
ret = i2c_smbus_read_byte_data(&client, reg);
if (ret < 0) {
- if (ret != -ENODEV)
+ if (ret != -ENXIO)
em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
client.addr << 1, ret);
continue;
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 4d97a76cc3b0..50aa5a5317f2 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -66,7 +66,7 @@ MODULE_PARM_DESC(usb_xfer_mode,
/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS - 1 */
-DECLARE_BITMAP(em28xx_devused, EM28XX_MAXBOARDS);
+static DECLARE_BITMAP(em28xx_devused, EM28XX_MAXBOARDS);
struct em28xx_hash_table {
unsigned long hash;
@@ -189,6 +189,14 @@ static struct em28xx_reg_seq kworld_a340_digital[] = {
{ -1, -1, -1, -1},
};
+static struct em28xx_reg_seq kworld_ub435q_v3_digital[] = {
+ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0xbe, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100},
+ { -1, -1, -1, -1},
+};
+
/* Pinnacle Hybrid Pro eb1a:2881 */
static struct em28xx_reg_seq pinnacle_hybrid_pro_analog[] = {
{EM2820_R08_GPIO_CTRL, 0xfd, ~EM_GPIO_4, 10},
@@ -214,6 +222,17 @@ static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_digital[] = {
{ -1, -1, -1, -1},
};
+/* PCTV HD Mini (80e) GPIOs
+ 0-5: not used
+ 6: demod reset, active low
+ 7: LED on, active high */
+static struct em28xx_reg_seq em2874_pctv_80e_digital[] = {
+ {EM28XX_R06_I2C_CLK, 0x45, 0xff, 10}, /*400 KHz*/
+ {EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 100},/*Demod reset*/
+ {EM2874_R80_GPIO_P0_CTRL, 0x40, 0xff, 10},
+ { -1, -1, -1, -1},
+};
+
/* eb1a:2868 Reddo DVB-C USB TV Box
GPIO4 - CU1216L NIM
Other GPIOs seems to be don't care. */
@@ -497,6 +516,27 @@ static struct em28xx_led speedlink_vad_laplace_leds[] = {
{-1, 0, 0, 0},
};
+static struct em28xx_led kworld_ub435q_v3_leds[] = {
+ {
+ .role = EM28XX_LED_DIGITAL_CAPTURING,
+ .gpio_reg = EM2874_R80_GPIO_P0_CTRL,
+ .gpio_mask = 0x80,
+ .inverted = 1,
+ },
+ {-1, 0, 0, 0},
+};
+
+static struct em28xx_led pctv_80e_leds[] = {
+ {
+ .role = EM28XX_LED_DIGITAL_CAPTURING,
+ .gpio_reg = EM2874_R80_GPIO_P0_CTRL,
+ .gpio_mask = 0x80,
+ .inverted = 0,
+ },
+ {-1, 0, 0, 0},
+};
+
+
/*
* Board definitions
*/
@@ -2128,6 +2168,29 @@ struct em28xx_board em28xx_boards[] = {
.tuner_gpio = default_tuner_gpio,
.def_i2c_bus = 1,
},
+ /*
+ * 1b80:e34c KWorld USB ATSC TV Stick UB435-Q V3
+ * Empia EM2874B + LG DT3305 + NXP TDA18271HDC2
+ */
+ [EM2874_BOARD_KWORLD_UB435Q_V3] = {
+ .name = "KWorld USB ATSC TV Stick UB435-Q V3",
+ .tuner_type = TUNER_ABSENT,
+ .has_dvb = 1,
+ .tuner_gpio = kworld_ub435q_v3_digital,
+ .def_i2c_bus = 1,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_100_KHZ,
+ .leds = kworld_ub435q_v3_leds,
+ },
+ [EM2874_BOARD_PCTV_HD_MINI_80E] = {
+ .name = "Pinnacle PCTV HD Mini",
+ .tuner_type = TUNER_ABSENT,
+ .has_dvb = 1,
+ .dvb_gpio = em2874_pctv_80e_digital,
+ .decoder = EM28XX_NODECODER,
+ .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
+ .leds = pctv_80e_leds,
+ },
/* 1ae7:9003/9004 SpeedLink Vicious And Devine Laplace webcam
* Empia EM2765 + OmniVision OV2640 */
[EM2765_BOARD_SPEEDLINK_VAD_LAPLACE] = {
@@ -2290,6 +2353,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2882_BOARD_PINNACLE_HYBRID_PRO_330E },
{ USB_DEVICE(0x2304, 0x0227),
.driver_info = EM2880_BOARD_PINNACLE_PCTV_HD_PRO },
+ { USB_DEVICE(0x2304, 0x023f),
+ .driver_info = EM2874_BOARD_PCTV_HD_MINI_80E },
{ USB_DEVICE(0x0413, 0x6023),
.driver_info = EM2800_BOARD_LEADTEK_WINFAST_USBII },
{ USB_DEVICE(0x093b, 0xa003),
@@ -2304,6 +2369,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2870_BOARD_KWORLD_A340 },
{ USB_DEVICE(0x1b80, 0xe346),
.driver_info = EM2874_BOARD_KWORLD_UB435Q_V2 },
+ { USB_DEVICE(0x1b80, 0xe34c),
+ .driver_info = EM2874_BOARD_KWORLD_UB435Q_V3 },
{ USB_DEVICE(0x2013, 0x024f),
.driver_info = EM28174_BOARD_PCTV_290E },
{ USB_DEVICE(0x2013, 0x024c),
@@ -2872,7 +2939,7 @@ static void flush_request_modules(struct em28xx *dev)
* unregisters the v4l2,i2c and usb devices
* called when the device gets disconnected or at module unload
*/
-void em28xx_release_resources(struct em28xx *dev)
+static void em28xx_release_resources(struct em28xx *dev)
{
/*FIXME: I2C IR should be disconnected */
@@ -2889,7 +2956,27 @@ void em28xx_release_resources(struct em28xx *dev)
mutex_unlock(&dev->lock);
};
-EXPORT_SYMBOL_GPL(em28xx_release_resources);
+
+/**
+ * em28xx_free_device() - Free em28xx device
+ *
+ * @ref: struct kref for em28xx device
+ *
+ * This is called when all extensions and em28xx core unregisters a device
+ */
+void em28xx_free_device(struct kref *ref)
+{
+ struct em28xx *dev = kref_to_dev(ref);
+
+ em28xx_info("Freeing device\n");
+
+ if (!dev->disconnected)
+ em28xx_release_resources(dev);
+
+ kfree(dev->alt_max_pkt_size_isoc);
+ kfree(dev);
+}
+EXPORT_SYMBOL_GPL(em28xx_free_device);
/*
* em28xx_init_dev()
@@ -3331,8 +3418,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (has_video) {
if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk))
dev->analog_xfer_bulk = 1;
- em28xx_info("analog set to %s mode.\n",
- dev->analog_xfer_bulk ? "bulk" : "isoc");
+ em28xx_info("analog set to %s mode.\n",
+ dev->analog_xfer_bulk ? "bulk" : "isoc");
}
if (has_dvb) {
if (!dev->dvb_ep_isoc || (try_bulk && dev->dvb_ep_bulk))
@@ -3342,6 +3429,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
dev->dvb_xfer_bulk ? "bulk" : "isoc");
}
+ kref_init(&dev->ref);
+
request_modules(dev);
/* Should be the last thing to do, to avoid newer udev's to
@@ -3386,17 +3475,39 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
em28xx_close_extension(dev);
em28xx_release_resources(dev);
+ kref_put(&dev->ref, em28xx_free_device);
+}
- if (!dev->users) {
- kfree(dev->alt_max_pkt_size_isoc);
- kfree(dev);
- }
+static int em28xx_usb_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct em28xx *dev;
+
+ dev = usb_get_intfdata(interface);
+ if (!dev)
+ return 0;
+ em28xx_suspend_extension(dev);
+ return 0;
+}
+
+static int em28xx_usb_resume(struct usb_interface *interface)
+{
+ struct em28xx *dev;
+
+ dev = usb_get_intfdata(interface);
+ if (!dev)
+ return 0;
+ em28xx_resume_extension(dev);
+ return 0;
}
static struct usb_driver em28xx_usb_driver = {
.name = "em28xx",
.probe = em28xx_usb_probe,
.disconnect = em28xx_usb_disconnect,
+ .suspend = em28xx_usb_suspend,
+ .resume = em28xx_usb_resume,
+ .reset_resume = em28xx_usb_resume,
.id_table = em28xx_id_table,
};
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 898fb9bd88a2..523d7e92bf47 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -619,6 +619,7 @@ EXPORT_SYMBOL_GPL(em28xx_find_led);
int em28xx_capture_start(struct em28xx *dev, int start)
{
int rc;
+ const struct em28xx_led *led = NULL;
if (dev->chip_id == CHIP_ID_EM2874 ||
dev->chip_id == CHIP_ID_EM2884 ||
@@ -643,6 +644,8 @@ int em28xx_capture_start(struct em28xx *dev, int start)
/* Enable video capture */
rc = em28xx_write_reg(dev, 0x48, 0x00);
+ if (rc < 0)
+ return rc;
if (dev->mode == EM28XX_ANALOG_MODE)
rc = em28xx_write_reg(dev,
@@ -650,6 +653,8 @@ int em28xx_capture_start(struct em28xx *dev, int start)
else
rc = em28xx_write_reg(dev,
EM28XX_R12_VINENABLE, 0x37);
+ if (rc < 0)
+ return rc;
msleep(6);
} else {
@@ -658,19 +663,16 @@ int em28xx_capture_start(struct em28xx *dev, int start)
}
}
- if (rc < 0)
- return rc;
-
- /* Switch (explicitly controlled) analog capturing LED on/off */
- if (dev->mode == EM28XX_ANALOG_MODE) {
- const struct em28xx_led *led;
+ if (dev->mode == EM28XX_ANALOG_MODE)
led = em28xx_find_led(dev, EM28XX_LED_ANALOG_CAPTURING);
- if (led)
- em28xx_write_reg_bits(dev, led->gpio_reg,
- (!start ^ led->inverted) ?
- ~led->gpio_mask : led->gpio_mask,
- led->gpio_mask);
- }
+ else
+ led = em28xx_find_led(dev, EM28XX_LED_DIGITAL_CAPTURING);
+
+ if (led)
+ em28xx_write_reg_bits(dev, led->gpio_reg,
+ (!start ^ led->inverted) ?
+ ~led->gpio_mask : led->gpio_mask,
+ led->gpio_mask);
return rc;
}
@@ -1106,3 +1108,31 @@ void em28xx_close_extension(struct em28xx *dev)
list_del(&dev->devlist);
mutex_unlock(&em28xx_devlist_mutex);
}
+
+int em28xx_suspend_extension(struct em28xx *dev)
+{
+ const struct em28xx_ops *ops = NULL;
+
+ em28xx_info("Suspending extensions");
+ mutex_lock(&em28xx_devlist_mutex);
+ list_for_each_entry(ops, &em28xx_extension_devlist, next) {
+ if (ops->suspend)
+ ops->suspend(dev);
+ }
+ mutex_unlock(&em28xx_devlist_mutex);
+ return 0;
+}
+
+int em28xx_resume_extension(struct em28xx *dev)
+{
+ const struct em28xx_ops *ops = NULL;
+
+ em28xx_info("Resuming extensions");
+ mutex_lock(&em28xx_devlist_mutex);
+ list_for_each_entry(ops, &em28xx_extension_devlist, next) {
+ if (ops->resume)
+ ops->resume(dev);
+ }
+ mutex_unlock(&em28xx_devlist_mutex);
+ return 0;
+}
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index a0a669e81362..f599b18ef7ca 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -41,6 +41,7 @@
#include "mt352.h"
#include "mt352_priv.h" /* FIXME */
#include "tda1002x.h"
+#include "drx39xyj/drx39xxj.h"
#include "tda18271.h"
#include "s921.h"
#include "drxd.h"
@@ -48,6 +49,7 @@
#include "tda18271c2dd.h"
#include "drxk.h"
#include "tda10071.h"
+#include "tda18212.h"
#include "a8293.h"
#include "qt1010.h"
#include "mb86a20s.h"
@@ -161,6 +163,8 @@ static inline int em28xx_dvb_urb_data_copy(struct em28xx *dev, struct urb *urb)
if (urb->status != -EPROTO)
continue;
}
+ if (!urb->actual_length)
+ continue;
dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer,
urb->actual_length);
} else {
@@ -170,6 +174,8 @@ static inline int em28xx_dvb_urb_data_copy(struct em28xx *dev, struct urb *urb)
if (urb->iso_frame_desc[i].status != -EPROTO)
continue;
}
+ if (!urb->iso_frame_desc[i].actual_length)
+ continue;
dvb_dmx_swfilter(&dev->dvb->demux,
urb->transfer_buffer +
urb->iso_frame_desc[i].offset,
@@ -208,10 +214,10 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
if (rc < 0)
return rc;
- dprintk(1, "Using %d buffers each with %d x %d bytes\n",
+ dprintk(1, "Using %d buffers each with %d x %d bytes, alternate %d\n",
EM28XX_DVB_NUM_BUFS,
packet_multiplier,
- dvb_max_packet_size);
+ dvb_max_packet_size, dvb_alt);
return em28xx_init_usb_xfer(dev, EM28XX_DIGITAL_MODE,
dev->dvb_xfer_bulk,
@@ -315,6 +321,18 @@ static struct lgdt3305_config em2874_lgdt3305_dev = {
.qam_if_khz = 4000,
};
+static struct lgdt3305_config em2874_lgdt3305_nogate_dev = {
+ .i2c_addr = 0x0e,
+ .demod_chip = LGDT3305,
+ .spectral_inversion = 1,
+ .deny_i2c_rptr = 1,
+ .mpeg_mode = LGDT3305_MPEG_SERIAL,
+ .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE,
+ .tpvalid_polarity = LGDT3305_TP_VALID_HIGH,
+ .vsb_if_khz = 3600,
+ .qam_if_khz = 3600,
+};
+
static struct s921_config sharp_isdbt = {
.demod_address = 0x30 >> 1
};
@@ -351,6 +369,12 @@ static struct tda18271_config kworld_ub435q_v2_config = {
.gate = TDA18271_GATE_DIGITAL,
};
+static struct tda18212_config kworld_ub435q_v3_config = {
+ .i2c_address = 0x60,
+ .if_atsc_vsb = 3600,
+ .if_atsc_qam = 3600,
+};
+
static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
@@ -693,7 +717,8 @@ static void pctv_520e_init(struct em28xx *dev)
static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- struct em28xx *dev = fe->dvb->priv;
+ struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv;
+ struct em28xx *dev = i2c_bus->dev;
#ifdef CONFIG_GPIOLIB
struct em28xx_dvb *dvb = dev->dvb;
int ret;
@@ -817,6 +842,20 @@ static const struct m88ds3103_config pctv_461e_m88ds3103_config = {
.agc = 0x99,
};
+
+static struct tda18271_std_map drx_j_std_map = {
+ .atsc_6 = { .if_freq = 5000, .agc_mode = 3, .std = 0, .if_lvl = 1,
+ .rfagc_top = 0x37, },
+ .qam_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3, .if_lvl = 1,
+ .rfagc_top = 0x37, },
+};
+
+static struct tda18271_config pinnacle_80e_dvb_config = {
+ .std_map = &drx_j_std_map,
+ .gate = TDA18271_GATE_DIGITAL,
+ .role = TDA18271_MASTER,
+};
+
/* ------------------------------------------------------------------ */
static int em28xx_attach_xc3028(u8 addr, struct em28xx *dev)
@@ -1005,7 +1044,6 @@ static int em28xx_dvb_init(struct em28xx *dev)
em28xx_info("Binding DVB extension\n");
dvb = kzalloc(sizeof(struct em28xx_dvb), GFP_KERNEL);
-
if (dvb == NULL) {
em28xx_info("em28xx_dvb: memory allocation failed\n");
return -ENOMEM;
@@ -1370,10 +1408,40 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM2874_BOARD_KWORLD_UB435Q_V3:
+ dvb->fe[0] = dvb_attach(lgdt3305_attach,
+ &em2874_lgdt3305_nogate_dev,
+ &dev->i2c_adap[dev->def_i2c_bus]);
+ if (!dvb->fe[0]) {
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* Attach the demodulator. */
+ if (!dvb_attach(tda18212_attach, dvb->fe[0],
+ &dev->i2c_adap[dev->def_i2c_bus],
+ &kworld_ub435q_v3_config)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
+ case EM2874_BOARD_PCTV_HD_MINI_80E:
+ dvb->fe[0] = dvb_attach(drx39xxj_attach, &dev->i2c_adap[dev->def_i2c_bus]);
+ if (dvb->fe[0] != NULL) {
+ dvb->fe[0] = dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
+ &dev->i2c_adap[dev->def_i2c_bus],
+ &pinnacle_80e_dvb_config);
+ if (!dvb->fe[0]) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ }
+ break;
case EM28178_BOARD_PCTV_461E:
{
/* demod I2C adapter */
struct i2c_adapter *i2c_adapter;
+ struct i2c_client *client;
struct i2c_board_info info;
struct m88ts2022_config m88ts2022_config = {
.clock = 27000000,
@@ -1396,7 +1464,19 @@ static int em28xx_dvb_init(struct em28xx *dev)
info.addr = 0x60;
info.platform_data = &m88ts2022_config;
request_module("m88ts2022");
- dvb->i2c_client_tuner = i2c_new_device(i2c_adapter, &info);
+ client = i2c_new_device(i2c_adapter, &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ dvb_frontend_detach(dvb->fe[0]);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ dvb_frontend_detach(dvb->fe[0]);
+ result = -ENODEV;
+ goto out_free;
+ }
/* delegate signal strength measurement to tuner */
dvb->fe[0]->ops.read_signal_strength =
@@ -1406,10 +1486,14 @@ static int em28xx_dvb_init(struct em28xx *dev)
if (!dvb_attach(a8293_attach, dvb->fe[0],
&dev->i2c_adap[dev->def_i2c_bus],
&em28xx_a8293_config)) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
dvb_frontend_detach(dvb->fe[0]);
result = -ENODEV;
goto out_free;
}
+
+ dvb->i2c_client_tuner = client;
}
break;
default:
@@ -1437,6 +1521,9 @@ static int em28xx_dvb_init(struct em28xx *dev)
dvb->adapter.mfe_shared = mfe_shared;
em28xx_info("DVB extension successfully initialized\n");
+
+ kref_get(&dev->ref);
+
ret:
em28xx_set_mode(dev, EM28XX_SUSPEND);
mutex_unlock(&dev->lock);
@@ -1457,6 +1544,9 @@ static inline void prevent_sleep(struct dvb_frontend_ops *ops)
static int em28xx_dvb_fini(struct em28xx *dev)
{
+ struct em28xx_dvb *dvb;
+ struct i2c_client *client;
+
if (dev->is_audio_only) {
/* Shouldn't initialize IR for this interface */
return 0;
@@ -1467,23 +1557,96 @@ static int em28xx_dvb_fini(struct em28xx *dev)
return 0;
}
+ if (!dev->dvb)
+ return 0;
+
em28xx_info("Closing DVB extension");
+ dvb = dev->dvb;
+ client = dvb->i2c_client_tuner;
+
+ em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
+
+ if (dev->disconnected) {
+ /* We cannot tell the device to sleep
+ * once it has been unplugged. */
+ if (dvb->fe[0])
+ prevent_sleep(&dvb->fe[0]->ops);
+ if (dvb->fe[1])
+ prevent_sleep(&dvb->fe[1]->ops);
+ }
+
+ /* remove I2C tuner */
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
+ em28xx_unregister_dvb(dvb);
+ kfree(dvb);
+ dev->dvb = NULL;
+ kref_put(&dev->ref, em28xx_free_device);
+
+ return 0;
+}
+
+static int em28xx_dvb_suspend(struct em28xx *dev)
+{
+ int ret = 0;
+
+ if (dev->is_audio_only)
+ return 0;
+
+ if (!dev->board.has_dvb)
+ return 0;
+
+ em28xx_info("Suspending DVB extension");
if (dev->dvb) {
struct em28xx_dvb *dvb = dev->dvb;
- em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
+ if (dvb->fe[0]) {
+ ret = dvb_frontend_suspend(dvb->fe[0]);
+ em28xx_info("fe0 suspend %d", ret);
+ }
+ if (dvb->fe[1]) {
+ dvb_frontend_suspend(dvb->fe[1]);
+ em28xx_info("fe1 suspend %d", ret);
+ }
+ }
+
+ return 0;
+}
+
+static int em28xx_dvb_resume(struct em28xx *dev)
+{
+ int ret = 0;
- if (dev->disconnected) {
- /* We cannot tell the device to sleep
- * once it has been unplugged. */
- if (dvb->fe[0])
- prevent_sleep(&dvb->fe[0]->ops);
- if (dvb->fe[1])
- prevent_sleep(&dvb->fe[1]->ops);
+ if (dev->is_audio_only)
+ return 0;
+
+ if (!dev->board.has_dvb)
+ return 0;
+
+ em28xx_info("Resuming DVB extension");
+ if (dev->dvb) {
+ struct em28xx_dvb *dvb = dev->dvb;
+ struct i2c_client *client = dvb->i2c_client_tuner;
+
+ if (dvb->fe[0]) {
+ ret = dvb_frontend_resume(dvb->fe[0]);
+ em28xx_info("fe0 resume %d", ret);
+ }
+
+ if (dvb->fe[1]) {
+ ret = dvb_frontend_resume(dvb->fe[1]);
+ em28xx_info("fe1 resume %d", ret);
+ }
+ /* remove I2C tuner */
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
}
- i2c_release_client(dvb->i2c_client_tuner);
em28xx_unregister_dvb(dvb);
kfree(dvb);
dev->dvb = NULL;
@@ -1497,6 +1660,8 @@ static struct em28xx_ops dvb_ops = {
.name = "Em28xx dvb Extension",
.init = em28xx_dvb_init,
.fini = em28xx_dvb_fini,
+ .suspend = em28xx_dvb_suspend,
+ .resume = em28xx_dvb_resume,
};
static int __init em28xx_dvb_register(void)
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 7e1724076ac4..ba6433c3a643 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -81,7 +81,7 @@ static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
return len;
if (ret == 0x94 + len - 1) {
if (i2c_debug == 1)
- em28xx_warn("R05 returned 0x%02x: I2C timeout",
+ em28xx_warn("R05 returned 0x%02x: I2C ACK error\n",
ret);
return -ENXIO;
}
@@ -128,7 +128,7 @@ static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
break;
if (ret == 0x94 + len - 1) {
if (i2c_debug == 1)
- em28xx_warn("R05 returned 0x%02x: I2C timeout",
+ em28xx_warn("R05 returned 0x%02x: I2C ACK error\n",
ret);
return -ENXIO;
}
@@ -210,7 +210,7 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
return len;
if (ret == 0x10) {
if (i2c_debug == 1)
- em28xx_warn("I2C transfer timeout on writing to addr 0x%02x",
+ em28xx_warn("I2C ACK error on writing to addr 0x%02x\n",
addr);
return -ENXIO;
}
@@ -226,10 +226,18 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
* (even with high payload) ...
*/
}
- if (i2c_debug)
- em28xx_warn("write to i2c device at 0x%x timed out (status=%i)\n",
- addr, ret);
- return -ETIMEDOUT;
+
+ if (ret == 0x02 || ret == 0x04) {
+ /* NOTE: these errors seem to be related to clock stretching */
+ if (i2c_debug)
+ em28xx_warn("write to i2c device at 0x%x timed out (status=%i)\n",
+ addr, ret);
+ return -ETIMEDOUT;
+ }
+
+ em28xx_warn("write to i2c device at 0x%x failed with unknown error (status=%i)\n",
+ addr, ret);
+ return -EIO;
}
/*
@@ -274,13 +282,22 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
}
if (ret == 0x10) {
if (i2c_debug == 1)
- em28xx_warn("I2C transfer timeout on writing to addr 0x%02x",
+ em28xx_warn("I2C ACK error on writing to addr 0x%02x\n",
addr);
return -ENXIO;
}
- em28xx_warn("unknown i2c error (status=%i)\n", ret);
- return -ETIMEDOUT;
+ if (ret == 0x02 || ret == 0x04) {
+ /* NOTE: these errors seem to be related to clock stretching */
+ if (i2c_debug)
+ em28xx_warn("write to i2c device at 0x%x timed out (status=%i)\n",
+ addr, ret);
+ return -ETIMEDOUT;
+ }
+
+ em28xx_warn("write to i2c device at 0x%x failed with unknown error (status=%i)\n",
+ addr, ret);
+ return -EIO;
}
/*
@@ -337,7 +354,7 @@ static int em25xx_bus_B_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
return len;
else if (ret > 0) {
if (i2c_debug == 1)
- em28xx_warn("Bus B R08 returned 0x%02x: I2C timeout",
+ em28xx_warn("Bus B R08 returned 0x%02x: I2C ACK error\n",
ret);
return -ENXIO;
}
@@ -392,7 +409,7 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf,
return len;
else if (ret > 0) {
if (i2c_debug == 1)
- em28xx_warn("Bus B R08 returned 0x%02x: I2C timeout",
+ em28xx_warn("Bus B R08 returned 0x%02x: I2C ACK error\n",
ret);
return -ENXIO;
}
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 18f65d89d4bc..56ef49df4f8d 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -676,6 +676,8 @@ static int em28xx_ir_init(struct em28xx *dev)
return 0;
}
+ kref_get(&dev->ref);
+
if (dev->board.buttons)
em28xx_init_buttons(dev);
@@ -725,7 +727,7 @@ static int em28xx_ir_init(struct em28xx *dev)
case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
rc->map_name = RC_MAP_HAUPPAUGE;
ir->get_key_i2c = em28xx_get_key_em_haup;
- rc->allowed_protos = RC_BIT_RC5;
+ rc_set_allowed_protocols(rc, RC_BIT_RC5);
break;
case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
rc->map_name = RC_MAP_WINFAST_USBII_DELUXE;
@@ -741,7 +743,7 @@ static int em28xx_ir_init(struct em28xx *dev)
switch (dev->chip_id) {
case CHIP_ID_EM2860:
case CHIP_ID_EM2883:
- rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC;
+ rc_set_allowed_protocols(rc, RC_BIT_RC5 | RC_BIT_NEC);
ir->get_key = default_polling_getkey;
break;
case CHIP_ID_EM2884:
@@ -749,8 +751,8 @@ static int em28xx_ir_init(struct em28xx *dev)
case CHIP_ID_EM28174:
case CHIP_ID_EM28178:
ir->get_key = em2874_polling_getkey;
- rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC |
- RC_BIT_RC6_0;
+ rc_set_allowed_protocols(rc, RC_BIT_RC5 | RC_BIT_NEC |
+ RC_BIT_RC6_0);
break;
default:
err = -ENODEV;
@@ -816,7 +818,7 @@ static int em28xx_ir_fini(struct em28xx *dev)
/* skip detach on non attached boards */
if (!ir)
- return 0;
+ goto ref_put;
if (ir->rc)
rc_unregister_device(ir->rc);
@@ -824,6 +826,45 @@ static int em28xx_ir_fini(struct em28xx *dev)
/* done */
kfree(ir);
dev->ir = NULL;
+
+ref_put:
+ kref_put(&dev->ref, em28xx_free_device);
+
+ return 0;
+}
+
+static int em28xx_ir_suspend(struct em28xx *dev)
+{
+ struct em28xx_IR *ir = dev->ir;
+
+ if (dev->is_audio_only)
+ return 0;
+
+ em28xx_info("Suspending input extension");
+ if (ir)
+ cancel_delayed_work_sync(&ir->work);
+ cancel_delayed_work_sync(&dev->buttons_query_work);
+ /* is canceling delayed work sufficient or does the rc event
+ kthread needs stopping? kthread is stopped in
+ ir_raw_event_unregister() */
+ return 0;
+}
+
+static int em28xx_ir_resume(struct em28xx *dev)
+{
+ struct em28xx_IR *ir = dev->ir;
+
+ if (dev->is_audio_only)
+ return 0;
+
+ em28xx_info("Resuming input extension");
+ /* if suspend calls ir_raw_event_unregister(), the should call
+ ir_raw_event_register() */
+ if (ir)
+ schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
+ if (dev->num_button_polling_addresses)
+ schedule_delayed_work(&dev->buttons_query_work,
+ msecs_to_jiffies(dev->button_polling_interval));
return 0;
}
@@ -832,6 +873,8 @@ static struct em28xx_ops rc_ops = {
.name = "Em28xx Input Extension",
.init = em28xx_ir_init,
.fini = em28xx_ir_fini,
+ .suspend = em28xx_ir_suspend,
+ .resume = em28xx_ir_resume,
};
static int __init em28xx_rc_register(void)
@@ -845,7 +888,7 @@ static void __exit em28xx_rc_unregister(void)
}
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_DESCRIPTION(DRIVER_DESC " - input interface");
MODULE_VERSION(EM28XX_VERSION);
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index c3c928937dcd..0856e5d367b6 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1029,7 +1029,7 @@ static int em28xx_vb2_setup(struct em28xx *dev)
q = &dev->vb_vidq;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct em28xx_buffer);
q->ops = &em28xx_video_qops;
@@ -1043,7 +1043,7 @@ static int em28xx_vb2_setup(struct em28xx *dev)
q = &dev->vb_vbiq;
q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR;
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct em28xx_buffer);
q->ops = &em28xx_vbi_qops;
@@ -1837,7 +1837,6 @@ static int em28xx_v4l2_open(struct file *filp)
video_device_node_name(vdev), v4l2_type_names[fh_type],
dev->users);
-
if (mutex_lock_interruptible(&dev->lock))
return -ERESTARTSYS;
fh = kzalloc(sizeof(struct em28xx_fh), GFP_KERNEL);
@@ -1869,6 +1868,7 @@ static int em28xx_v4l2_open(struct file *filp)
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_radio);
}
+ kref_get(&dev->ref);
dev->users++;
mutex_unlock(&dev->lock);
@@ -1918,18 +1918,43 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
video_unregister_device(dev->vdev);
}
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
if (dev->clk) {
v4l2_clk_unregister_fixed(dev->clk);
dev->clk = NULL;
}
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
- v4l2_device_unregister(&dev->v4l2_dev);
-
- if (dev->users)
- em28xx_warn("Device is open ! Memory deallocation is deferred on last close.\n");
mutex_unlock(&dev->lock);
+ kref_put(&dev->ref, em28xx_free_device);
+
+ return 0;
+}
+
+static int em28xx_v4l2_suspend(struct em28xx *dev)
+{
+ if (dev->is_audio_only)
+ return 0;
+ if (!dev->has_video)
+ return 0;
+
+ em28xx_info("Suspending video extension");
+ em28xx_stop_urbs(dev);
+ return 0;
+}
+
+static int em28xx_v4l2_resume(struct em28xx *dev)
+{
+ if (dev->is_audio_only)
+ return 0;
+
+ if (!dev->has_video)
+ return 0;
+
+ em28xx_info("Resuming video extension");
+ /* what do we do here */
return 0;
}
@@ -1950,11 +1975,9 @@ static int em28xx_v4l2_close(struct file *filp)
mutex_lock(&dev->lock);
if (dev->users == 1) {
- /* free the remaining resources if device is disconnected */
- if (dev->disconnected) {
- kfree(dev->alt_max_pkt_size_isoc);
+ /* No sense to try to write to the device */
+ if (dev->disconnected)
goto exit;
- }
/* Save some power by putting tuner to sleep */
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
@@ -1975,6 +1998,8 @@ static int em28xx_v4l2_close(struct file *filp)
exit:
dev->users--;
mutex_unlock(&dev->lock);
+ kref_put(&dev->ref, em28xx_free_device);
+
return 0;
}
@@ -2273,7 +2298,8 @@ static int em28xx_v4l2_init(struct em28xx *dev)
}
em28xx_tuner_setup(dev);
- em28xx_init_camera(dev);
+ if (dev->em28xx_sensor != EM28XX_NOSENSOR)
+ em28xx_init_camera(dev);
/* Configure audio */
ret = em28xx_audio_setup(dev);
@@ -2488,6 +2514,8 @@ static int em28xx_v4l2_init(struct em28xx *dev)
em28xx_info("V4L2 extension successfully initialized\n");
+ kref_get(&dev->ref);
+
mutex_unlock(&dev->lock);
return 0;
@@ -2504,6 +2532,8 @@ static struct em28xx_ops v4l2_ops = {
.name = "Em28xx v4l2 Extension",
.init = em28xx_v4l2_init,
.fini = em28xx_v4l2_fini,
+ .suspend = em28xx_v4l2_suspend,
+ .resume = em28xx_v4l2_resume,
};
static int __init em28xx_video_register(void)
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 32d8a4bb7961..2051fc9fb932 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -32,6 +32,7 @@
#include <linux/workqueue.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
+#include <linux/kref.h>
#include <linux/videodev2.h>
#include <media/videobuf2-vmalloc.h>
@@ -104,6 +105,7 @@
#define EM2882_BOARD_PINNACLE_HYBRID_PRO_330E 56
#define EM2883_BOARD_KWORLD_HYBRID_330U 57
#define EM2820_BOARD_COMPRO_VIDEOMATE_FORYOU 58
+#define EM2874_BOARD_PCTV_HD_MINI_80E 59
#define EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 60
#define EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2 61
#define EM2820_BOARD_GADMEI_TVR200 62
@@ -137,6 +139,7 @@
#define EM2874_BOARD_KWORLD_UB435Q_V2 90
#define EM2765_BOARD_SPEEDLINK_VAD_LAPLACE 91
#define EM28178_BOARD_PCTV_461E 92
+#define EM2874_BOARD_KWORLD_UB435Q_V3 93
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -399,6 +402,7 @@ enum em28xx_adecoder {
enum em28xx_led_role {
EM28XX_LED_ANALOG_CAPTURING = 0,
+ EM28XX_LED_DIGITAL_CAPTURING,
EM28XX_LED_ILLUMINATION,
EM28XX_NUM_LED_ROLES, /* must be the last */
};
@@ -533,9 +537,10 @@ struct em28xx_i2c_bus {
enum em28xx_i2c_algo_type algo_type;
};
-
/* main device struct */
struct em28xx {
+ struct kref ref;
+
/* generic device properties */
char name[30]; /* name (including minor) of the device */
int model; /* index in the device_data struct */
@@ -707,12 +712,16 @@ struct em28xx {
struct em28xx_dvb *dvb;
};
+#define kref_to_dev(d) container_of(d, struct em28xx, ref)
+
struct em28xx_ops {
struct list_head next;
char *name;
int id;
int (*init)(struct em28xx *);
int (*fini)(struct em28xx *);
+ int (*suspend)(struct em28xx *);
+ int (*resume)(struct em28xx *);
};
/* Provided by em28xx-i2c.c */
@@ -758,13 +767,15 @@ int em28xx_register_extension(struct em28xx_ops *dev);
void em28xx_unregister_extension(struct em28xx_ops *dev);
void em28xx_init_extension(struct em28xx *dev);
void em28xx_close_extension(struct em28xx *dev);
+int em28xx_suspend_extension(struct em28xx *dev);
+int em28xx_resume_extension(struct em28xx *dev);
/* Provided by em28xx-cards.c */
extern struct em28xx_board em28xx_boards[];
extern struct usb_device_id em28xx_id_table[];
int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl);
-void em28xx_release_resources(struct em28xx *dev);
+void em28xx_free_device(struct kref *ref);
/* Provided by em28xx-camera.c */
int em28xx_detect_sensor(struct em28xx *dev);
diff --git a/drivers/media/usb/gspca/kinect.c b/drivers/media/usb/gspca/kinect.c
index 3773a8a745df..081f05162809 100644
--- a/drivers/media/usb/gspca/kinect.c
+++ b/drivers/media/usb/gspca/kinect.c
@@ -155,10 +155,11 @@ static int send_cmd(struct gspca_dev *gspca_dev, uint16_t cmd, void *cmdbuf,
do {
actual_len = kinect_read(udev, ibuf, 0x200);
} while (actual_len == 0);
- PDEBUG(D_USBO, "Control reply: %d", res);
+ PDEBUG(D_USBO, "Control reply: %d", actual_len);
if (actual_len < sizeof(*rhdr)) {
- pr_err("send_cmd: Input control transfer failed (%d)\n", res);
- return res;
+ pr_err("send_cmd: Input control transfer failed (%d)\n",
+ actual_len);
+ return actual_len < 0 ? actual_len : -EREMOTEIO;
}
actual_len -= sizeof(*rhdr);
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index 2a38621cf718..41a9a892f79c 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -2359,6 +2359,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)},
{USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)},
+ {USB_DEVICE(0x0458, 0x7045), SN9C20X(MT9M112, 0x5d, LED_REVERSE)},
{USB_DEVICE(0x0458, 0x704a), SN9C20X(MT9M112, 0x5d, 0)},
{USB_DEVICE(0x0458, 0x704c), SN9C20X(MT9M112, 0x5d, 0)},
{USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)},
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
index bf3e5c317a26..e60cbb3aa609 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
@@ -178,7 +178,7 @@ static int vv6410_stop(struct sd *sd)
PDEBUG(D_STREAM, "Halting stream");
- return (err < 0) ? err : 0;
+ return 0;
}
static int vv6410_dump(struct sd *sd)
diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
index 640c2fe760b3..5fcd1eec2004 100644
--- a/drivers/media/usb/gspca/topro.c
+++ b/drivers/media/usb/gspca/topro.c
@@ -4631,8 +4631,16 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
}
data++;
len--;
+ if (len < 2) {
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
if (*data == 0xff && data[1] == 0xd8) {
/*fixme: there may be information in the 4 high bits*/
+ if (len < 7) {
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
if ((data[6] & 0x0f) != sd->quality)
set_dqt(gspca_dev, data[6] & 0x0f);
gspca_frame_add(gspca_dev, FIRST_PACKET,
@@ -4672,7 +4680,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_dev->last_packet_type = DISCARD_PACKET;
break;
case 0xcc:
- if (data[1] != 0xff || data[2] != 0xd8)
+ if (len >= 3 && (data[1] != 0xff || data[2] != 0xd8))
gspca_frame_add(gspca_dev, INTER_PACKET,
data + 1, len - 1);
else
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index abf365ab025d..84a6720b1d00 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -614,17 +614,20 @@ static int buffer_prepare(struct vb2_buffer *vb)
return 0;
}
-static int buffer_finish(struct vb2_buffer *vb)
+static void buffer_finish(struct vb2_buffer *vb)
{
struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
- /*
- * Application has called dqbuf and is getting back a buffer we've
- * filled, take the pwc data we've stored in buf->data and decompress
- * it into a usable format, storing the result in the vb2_buffer
- */
- return pwc_decompress(pdev, buf);
+ if (vb->state == VB2_BUF_STATE_DONE) {
+ /*
+ * Application has called dqbuf and is getting back a buffer
+ * we've filled, take the pwc data we've stored in buf->data
+ * and decompress it into a usable format, storing the result
+ * in the vb2_buffer.
+ */
+ pwc_decompress(pdev, buf);
+ }
}
static void buffer_cleanup(struct vb2_buffer *vb)
@@ -1001,7 +1004,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->vb_queue.buf_struct_size = sizeof(struct pwc_frame_buf);
pdev->vb_queue.ops = &pwc_vb_queue_ops;
pdev->vb_queue.mem_ops = &vb2_vmalloc_memops;
- pdev->vb_queue.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ pdev->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
rc = vb2_queue_init(&pdev->vb_queue);
if (rc < 0) {
PWC_ERROR("Oops, could not initialize vb2 queue.\n");
diff --git a/drivers/media/usb/s2255/Kconfig b/drivers/media/usb/s2255/Kconfig
index 7e8ee1f864ab..8c3fceef9a09 100644
--- a/drivers/media/usb/s2255/Kconfig
+++ b/drivers/media/usb/s2255/Kconfig
@@ -1,7 +1,7 @@
config USB_S2255
tristate "USB Sensoray 2255 video capture device"
depends on VIDEO_V4L2
- select VIDEOBUF_VMALLOC
+ select VIDEOBUF2_VMALLOC
default n
help
Say Y here if you want support for the Sensoray 2255 USB device.
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 6bc9b8e19e20..1d4ba2b80490 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1,7 +1,7 @@
/*
* s2255drv.c - a driver for the Sensoray 2255 USB video capture device
*
- * Copyright (C) 2007-2013 by Sensoray Company Inc.
+ * Copyright (C) 2007-2014 by Sensoray Company Inc.
* Dean Anderson
*
* Some video buffer code based on vivi driver:
@@ -45,14 +45,14 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/usb.h>
-#include <media/videobuf-vmalloc.h>
+#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
-#define S2255_VERSION "1.23.1"
+#define S2255_VERSION "1.25.1"
#define FIRMWARE_FILE_NAME "f2255usb.bin"
/* default JPEG quality */
@@ -69,7 +69,7 @@
#define S2255_DSP_BOOTTIME 800
/* maximum time to wait for firmware to load (ms) */
#define S2255_LOAD_TIMEOUT (5000 + S2255_DSP_BOOTTIME)
-#define S2255_DEF_BUFS 16
+#define S2255_MIN_BUFS 2
#define S2255_SETMODE_TIMEOUT 500
#define S2255_VIDSTATUS_TIMEOUT 350
#define S2255_MARKER_FRAME cpu_to_le32(0x2255DA4AL)
@@ -178,11 +178,6 @@ struct s2255_bufferi {
DEF_FDEC, DEF_BRIGHT, DEF_CONTRAST, DEF_SATURATION, \
DEF_HUE, 0, DEF_USB_BLOCK, 0}
-struct s2255_dmaqueue {
- struct list_head active;
- struct s2255_dev *dev;
-};
-
/* for firmware loading, fw_state */
#define S2255_FW_NOTLOADED 0
#define S2255_FW_LOADED_DSPWAIT 1
@@ -217,12 +212,14 @@ struct s2255_pipeinfo {
struct s2255_fmt; /*forward declaration */
struct s2255_dev;
-struct s2255_channel {
+/* 2255 video channel */
+struct s2255_vc {
+ struct s2255_dev *dev;
struct video_device vdev;
struct v4l2_ctrl_handler hdl;
struct v4l2_ctrl *jpegqual_ctrl;
int resources;
- struct s2255_dmaqueue vidq;
+ struct list_head buf_list;
struct s2255_bufferi buffer;
struct s2255_mode mode;
v4l2_std_id std;
@@ -232,8 +229,6 @@ struct s2255_channel {
struct v4l2_captureparm cap_parm;
int cur_frame;
int last_frame;
-
- int b_acquire;
/* allocated image size */
unsigned long req_image_size;
/* received packet size */
@@ -252,17 +247,22 @@ struct s2255_channel {
int vidstatus_ready;
unsigned int width;
unsigned int height;
+ enum v4l2_field field;
const struct s2255_fmt *fmt;
int idx; /* channel number on device, 0-3 */
+ struct vb2_queue vb_vidq;
+ struct mutex vb_lock; /* streaming lock */
+ spinlock_t qlock;
};
struct s2255_dev {
- struct s2255_channel channel[MAX_CHANNELS];
- struct v4l2_device v4l2_dev;
+ struct s2255_vc vc[MAX_CHANNELS];
+ struct v4l2_device v4l2_dev;
atomic_t num_channels;
int frames;
struct mutex lock; /* channels[].vdev.lock */
+ struct mutex cmdlock; /* protects cmdbuf */
struct usb_device *udev;
struct usb_interface *interface;
u8 read_endpoint;
@@ -272,10 +272,11 @@ struct s2255_dev {
u32 cc; /* current channel */
int frame_ready;
int chn_ready;
- spinlock_t slock;
/* dsp firmware version (f2255usb.bin) */
int dsp_fw_ver;
u16 pid; /* product id */
+#define S2255_CMDBUF_SIZE 512
+ __le32 *cmdbuf;
};
static inline struct s2255_dev *to_s2255_dev(struct v4l2_device *v4l2_dev)
@@ -292,19 +293,10 @@ struct s2255_fmt {
/* buffer for one video frame */
struct s2255_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
- const struct s2255_fmt *fmt;
+ struct vb2_buffer vb;
+ struct list_head list;
};
-struct s2255_fh {
- /* this must be the first field in this struct */
- struct v4l2_fh fh;
- struct s2255_dev *dev;
- struct videobuf_queue vb_vidq;
- enum v4l2_buf_type type;
- struct s2255_channel *channel;
- int resources;
-};
/* current cypress EEPROM firmware version */
#define S2255_CUR_USB_FWVER ((3 << 8) | 12)
@@ -352,15 +344,14 @@ struct s2255_fh {
static unsigned long G_chnmap[MAX_CHANNELS] = {3, 2, 1, 0};
static int debug;
-static int *s2255_debug = &debug;
static int s2255_start_readpipe(struct s2255_dev *dev);
static void s2255_stop_readpipe(struct s2255_dev *dev);
-static int s2255_start_acquire(struct s2255_channel *channel);
-static int s2255_stop_acquire(struct s2255_channel *channel);
-static void s2255_fillbuff(struct s2255_channel *chn, struct s2255_buffer *buf,
+static int s2255_start_acquire(struct s2255_vc *vc);
+static int s2255_stop_acquire(struct s2255_vc *vc);
+static void s2255_fillbuff(struct s2255_vc *vc, struct s2255_buffer *buf,
int jpgsize);
-static int s2255_set_mode(struct s2255_channel *chan, struct s2255_mode *mode);
+static int s2255_set_mode(struct s2255_vc *vc, struct s2255_mode *mode);
static int s2255_board_shutdown(struct s2255_dev *dev);
static void s2255_fwload_start(struct s2255_dev *dev, int reset);
static void s2255_destroy(struct s2255_dev *dev);
@@ -373,19 +364,11 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char req,
#define s2255_dev_err(dev, fmt, arg...) \
dev_err(dev, S2255_DRIVER_NAME " - " fmt, ##arg)
-#define dprintk(level, fmt, arg...) \
- do { \
- if (*s2255_debug >= (level)) { \
- printk(KERN_DEBUG S2255_DRIVER_NAME \
- ": " fmt, ##arg); \
- } \
- } while (0)
+#define dprintk(dev, level, fmt, arg...) \
+ v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
static struct usb_driver s2255_driver;
-/* Declare static vars that will be used as parameters */
-static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
-
/* start video number */
static int video_nr = -1; /* /dev/videoN, -1 for autodetect */
@@ -394,8 +377,6 @@ static int jpeg_enable = 1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level(0-100) default 0");
-module_param(vid_limit, int, 0644);
-MODULE_PARM_DESC(vid_limit, "video memory limit(Mb)");
module_param(video_nr, int, 0644);
MODULE_PARM_DESC(video_nr, "start video minor(-1 default autodetect)");
module_param(jpeg_enable, int, 0644);
@@ -444,27 +425,27 @@ static const struct s2255_fmt formats[] = {
}
};
-static int norm_maxw(struct s2255_channel *channel)
+static int norm_maxw(struct s2255_vc *vc)
{
- return (channel->std & V4L2_STD_525_60) ?
+ return (vc->std & V4L2_STD_525_60) ?
LINE_SZ_4CIFS_NTSC : LINE_SZ_4CIFS_PAL;
}
-static int norm_maxh(struct s2255_channel *channel)
+static int norm_maxh(struct s2255_vc *vc)
{
- return (channel->std & V4L2_STD_525_60) ?
+ return (vc->std & V4L2_STD_525_60) ?
(NUM_LINES_1CIFS_NTSC * 2) : (NUM_LINES_1CIFS_PAL * 2);
}
-static int norm_minw(struct s2255_channel *channel)
+static int norm_minw(struct s2255_vc *vc)
{
- return (channel->std & V4L2_STD_525_60) ?
+ return (vc->std & V4L2_STD_525_60) ?
LINE_SZ_1CIFS_NTSC : LINE_SZ_1CIFS_PAL;
}
-static int norm_minh(struct s2255_channel *channel)
+static int norm_minh(struct s2255_vc *vc)
{
- return (channel->std & V4L2_STD_525_60) ?
+ return (vc->std & V4L2_STD_525_60) ?
(NUM_LINES_1CIFS_NTSC) : (NUM_LINES_1CIFS_PAL);
}
@@ -498,7 +479,7 @@ static void planar422p_to_yuv_packed(const unsigned char *in,
static void s2255_reset_dsppower(struct s2255_dev *dev)
{
s2255_vendor_req(dev, 0x40, 0x0000, 0x0001, NULL, 0, 1);
- msleep(10);
+ msleep(20);
s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
msleep(600);
s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1);
@@ -510,9 +491,8 @@ static void s2255_reset_dsppower(struct s2255_dev *dev)
static void s2255_timer(unsigned long user_data)
{
struct s2255_fw *data = (struct s2255_fw *)user_data;
- dprintk(100, "%s\n", __func__);
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
- printk(KERN_ERR "s2255: can't submit urb\n");
+ pr_err("s2255: can't submit urb\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
@@ -532,7 +512,6 @@ static void s2255_fwchunk_complete(struct urb *urb)
struct s2255_fw *data = urb->context;
struct usb_device *udev = urb->dev;
int len;
- dprintk(100, "%s: udev %p urb %p", __func__, udev, urb);
if (urb->status) {
dev_err(&udev->dev, "URB failed with status %d\n", urb->status);
atomic_set(&data->fw_state, S2255_FW_FAILED);
@@ -559,9 +538,6 @@ static void s2255_fwchunk_complete(struct urb *urb)
if (len < CHUNK_SIZE)
memset(data->pfw_data, 0, CHUNK_SIZE);
- dprintk(100, "completed len %d, loaded %d \n", len,
- data->fw_loaded);
-
memcpy(data->pfw_data,
(char *) data->fw->data + data->fw_loaded, len);
@@ -576,36 +552,32 @@ static void s2255_fwchunk_complete(struct urb *urb)
return;
}
data->fw_loaded += len;
- } else {
+ } else
atomic_set(&data->fw_state, S2255_FW_LOADED_DSPWAIT);
- dprintk(100, "%s: firmware upload complete\n", __func__);
- }
return;
}
-static int s2255_got_frame(struct s2255_channel *channel, int jpgsize)
+static int s2255_got_frame(struct s2255_vc *vc, int jpgsize)
{
- struct s2255_dmaqueue *dma_q = &channel->vidq;
struct s2255_buffer *buf;
- struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
+ struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev);
unsigned long flags = 0;
int rc = 0;
- spin_lock_irqsave(&dev->slock, flags);
- if (list_empty(&dma_q->active)) {
- dprintk(1, "No active queue to serve\n");
+ spin_lock_irqsave(&vc->qlock, flags);
+ if (list_empty(&vc->buf_list)) {
+ dprintk(dev, 1, "No active queue to serve\n");
rc = -1;
goto unlock;
}
- buf = list_entry(dma_q->active.next,
- struct s2255_buffer, vb.queue);
- list_del(&buf->vb.queue);
- v4l2_get_timestamp(&buf->vb.ts);
- s2255_fillbuff(channel, buf, jpgsize);
- wake_up(&buf->vb.done);
- dprintk(2, "%s: [buf/i] [%p/%d]\n", __func__, buf, buf->vb.i);
+ buf = list_entry(vc->buf_list.next,
+ struct s2255_buffer, list);
+ list_del(&buf->list);
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ s2255_fillbuff(vc, buf, jpgsize);
+ dprintk(dev, 2, "%s: [buf] [%p]\n", __func__, buf);
unlock:
- spin_unlock_irqrestore(&dev->slock, flags);
+ spin_unlock_irqrestore(&vc->qlock, flags);
return rc;
}
@@ -615,9 +587,9 @@ static const struct s2255_fmt *format_by_fourcc(int fourcc)
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (-1 == formats[i].fourcc)
continue;
- if (!jpeg_enable && ((formats[i].fourcc == V4L2_PIX_FMT_JPEG) ||
- (formats[i].fourcc == V4L2_PIX_FMT_MJPEG)))
- continue;
+ if (!jpeg_enable && ((formats[i].fourcc == V4L2_PIX_FMT_JPEG) ||
+ (formats[i].fourcc == V4L2_PIX_FMT_MJPEG)))
+ continue;
if (formats[i].fourcc == fourcc)
return formats + i;
}
@@ -632,56 +604,56 @@ static const struct s2255_fmt *format_by_fourcc(int fourcc)
* http://v4l.videotechnology.com/
*
*/
-static void s2255_fillbuff(struct s2255_channel *channel,
+static void s2255_fillbuff(struct s2255_vc *vc,
struct s2255_buffer *buf, int jpgsize)
{
int pos = 0;
const char *tmpbuf;
- char *vbuf = videobuf_to_vmalloc(&buf->vb);
+ char *vbuf = vb2_plane_vaddr(&buf->vb, 0);
unsigned long last_frame;
+ struct s2255_dev *dev = vc->dev;
if (!vbuf)
return;
- last_frame = channel->last_frame;
+ last_frame = vc->last_frame;
if (last_frame != -1) {
tmpbuf =
- (const char *)channel->buffer.frame[last_frame].lpvbits;
- switch (buf->fmt->fourcc) {
+ (const char *)vc->buffer.frame[last_frame].lpvbits;
+ switch (vc->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
planar422p_to_yuv_packed((const unsigned char *)tmpbuf,
- vbuf, buf->vb.width,
- buf->vb.height,
- buf->fmt->fourcc);
+ vbuf, vc->width,
+ vc->height,
+ vc->fmt->fourcc);
break;
case V4L2_PIX_FMT_GREY:
- memcpy(vbuf, tmpbuf, buf->vb.width * buf->vb.height);
+ memcpy(vbuf, tmpbuf, vc->width * vc->height);
break;
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_MJPEG:
- buf->vb.size = jpgsize;
- memcpy(vbuf, tmpbuf, buf->vb.size);
+ buf->vb.v4l2_buf.length = jpgsize;
+ memcpy(vbuf, tmpbuf, jpgsize);
break;
case V4L2_PIX_FMT_YUV422P:
memcpy(vbuf, tmpbuf,
- buf->vb.width * buf->vb.height * 2);
+ vc->width * vc->height * 2);
break;
default:
- printk(KERN_DEBUG "s2255: unknown format?\n");
+ pr_info("s2255: unknown format?\n");
}
- channel->last_frame = -1;
+ vc->last_frame = -1;
} else {
- printk(KERN_ERR "s2255: =======no frame\n");
+ pr_err("s2255: =======no frame\n");
return;
-
}
- dprintk(2, "s2255fill at : Buffer 0x%08lx size= %d\n",
+ dprintk(dev, 2, "s2255fill at : Buffer 0x%08lx size= %d\n",
(unsigned long)vbuf, pos);
/* tell v4l buffer was filled */
-
- buf->vb.field_count = channel->frame_count * 2;
- v4l2_get_timestamp(&buf->vb.ts);
- buf->vb.state = VIDEOBUF_DONE;
+ buf->vb.v4l2_buf.field = vc->field;
+ buf->vb.v4l2_buf.sequence = vc->frame_count;
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
}
@@ -689,144 +661,82 @@ static void s2255_fillbuff(struct s2255_channel *channel,
Videobuf operations
------------------------------------------------------------------*/
-static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct s2255_fh *fh = vq->priv_data;
- struct s2255_channel *channel = fh->channel;
- *size = channel->width * channel->height * (channel->fmt->depth >> 3);
-
- if (0 == *count)
- *count = S2255_DEF_BUFS;
-
- if (*size * *count > vid_limit * 1024 * 1024)
- *count = (vid_limit * 1024 * 1024) / *size;
-
+ struct s2255_vc *vc = vb2_get_drv_priv(vq);
+ if (*nbuffers < S2255_MIN_BUFS)
+ *nbuffers = S2255_MIN_BUFS;
+ *nplanes = 1;
+ sizes[0] = vc->width * vc->height * (vc->fmt->depth >> 3);
return 0;
}
-static void free_buffer(struct videobuf_queue *vq, struct s2255_buffer *buf)
-{
- dprintk(4, "%s\n", __func__);
-
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
-}
-
-static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int buffer_prepare(struct vb2_buffer *vb)
{
- struct s2255_fh *fh = vq->priv_data;
- struct s2255_channel *channel = fh->channel;
+ struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue);
struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
- int rc;
- int w = channel->width;
- int h = channel->height;
- dprintk(4, "%s, field=%d\n", __func__, field);
- if (channel->fmt == NULL)
+ int w = vc->width;
+ int h = vc->height;
+ unsigned long size;
+
+ dprintk(vc->dev, 4, "%s\n", __func__);
+ if (vc->fmt == NULL)
return -EINVAL;
- if ((w < norm_minw(channel)) ||
- (w > norm_maxw(channel)) ||
- (h < norm_minh(channel)) ||
- (h > norm_maxh(channel))) {
- dprintk(4, "invalid buffer prepare\n");
+ if ((w < norm_minw(vc)) ||
+ (w > norm_maxw(vc)) ||
+ (h < norm_minh(vc)) ||
+ (h > norm_maxh(vc))) {
+ dprintk(vc->dev, 4, "invalid buffer prepare\n");
return -EINVAL;
}
- buf->vb.size = w * h * (channel->fmt->depth >> 3);
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) {
- dprintk(4, "invalid buffer prepare\n");
+ size = w * h * (vc->fmt->depth >> 3);
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(vc->dev, 4, "invalid buffer prepare\n");
return -EINVAL;
}
- buf->fmt = channel->fmt;
- buf->vb.width = w;
- buf->vb.height = h;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
-
- buf->vb.state = VIDEOBUF_PREPARED;
+ vb2_set_plane_payload(&buf->vb, 0, size);
return 0;
-fail:
- free_buffer(vq, buf);
- return rc;
}
-static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static void buffer_queue(struct vb2_buffer *vb)
{
struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
- struct s2255_fh *fh = vq->priv_data;
- struct s2255_channel *channel = fh->channel;
- struct s2255_dmaqueue *vidq = &channel->vidq;
- dprintk(1, "%s\n", __func__);
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags = 0;
+ dprintk(vc->dev, 1, "%s\n", __func__);
+ spin_lock_irqsave(&vc->qlock, flags);
+ list_add_tail(&buf->list, &vc->buf_list);
+ spin_unlock_irqrestore(&vc->qlock, flags);
}
-static void buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
-{
- struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
- struct s2255_fh *fh = vq->priv_data;
- dprintk(4, "%s %d\n", __func__, fh->channel->idx);
- free_buffer(vq, buf);
-}
+static int start_streaming(struct vb2_queue *vq, unsigned int count);
+static int stop_streaming(struct vb2_queue *vq);
-static struct videobuf_queue_ops s2255_video_qops = {
- .buf_setup = buffer_setup,
+static struct vb2_ops s2255_video_qops = {
+ .queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
-
-static int res_get(struct s2255_fh *fh)
-{
- struct s2255_channel *channel = fh->channel;
- /* is it free? */
- if (channel->resources)
- return 0; /* no, someone else uses it */
- /* it's free, grab it */
- channel->resources = 1;
- fh->resources = 1;
- dprintk(1, "s2255: res: get\n");
- return 1;
-}
-
-static int res_locked(struct s2255_fh *fh)
-{
- return fh->channel->resources;
-}
-
-static int res_check(struct s2255_fh *fh)
-{
- return fh->resources;
-}
-
-
-static void res_free(struct s2255_fh *fh)
-{
- struct s2255_channel *channel = fh->channel;
- channel->resources = 0;
- fh->resources = 0;
- dprintk(1, "res: put\n");
-}
-
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct s2255_fh *fh = file->private_data;
- struct s2255_dev *dev = fh->dev;
+ struct s2255_vc *vc = video_drvdata(file);
+ struct s2255_dev *dev = vc->dev;
strlcpy(cap->driver, "s2255", sizeof(cap->driver));
strlcpy(cap->card, "s2255", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -841,7 +751,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (!jpeg_enable && ((formats[index].fourcc == V4L2_PIX_FMT_JPEG) ||
(formats[index].fourcc == V4L2_PIX_FMT_MJPEG)))
return -EINVAL;
- dprintk(4, "name %s\n", formats[index].name);
strlcpy(f->description, formats[index].name, sizeof(f->description));
f->pixelformat = formats[index].fourcc;
return 0;
@@ -850,19 +759,18 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct s2255_fh *fh = priv;
- struct s2255_channel *channel = fh->channel;
- int is_ntsc = channel->std & V4L2_STD_525_60;
+ struct s2255_vc *vc = video_drvdata(file);
+ int is_ntsc = vc->std & V4L2_STD_525_60;
- f->fmt.pix.width = channel->width;
- f->fmt.pix.height = channel->height;
+ f->fmt.pix.width = vc->width;
+ f->fmt.pix.height = vc->height;
if (f->fmt.pix.height >=
(is_ntsc ? NUM_LINES_1CIFS_NTSC : NUM_LINES_1CIFS_PAL) * 2)
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
else
f->fmt.pix.field = V4L2_FIELD_TOP;
- f->fmt.pix.pixelformat = channel->fmt->fourcc;
- f->fmt.pix.bytesperline = f->fmt.pix.width * (channel->fmt->depth >> 3);
+ f->fmt.pix.pixelformat = vc->fmt->fourcc;
+ f->fmt.pix.bytesperline = f->fmt.pix.width * (vc->fmt->depth >> 3);
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
f->fmt.pix.priv = 0;
@@ -874,9 +782,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
{
const struct s2255_fmt *fmt;
enum v4l2_field field;
- struct s2255_fh *fh = priv;
- struct s2255_channel *channel = fh->channel;
- int is_ntsc = channel->std & V4L2_STD_525_60;
+ struct s2255_vc *vc = video_drvdata(file);
+ int is_ntsc = vc->std & V4L2_STD_525_60;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -885,7 +792,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
field = f->fmt.pix.field;
- dprintk(50, "%s NTSC: %d suggested width: %d, height: %d\n",
+ dprintk(vc->dev, 50, "%s NTSC: %d suggested width: %d, height: %d\n",
__func__, is_ntsc, f->fmt.pix.width, f->fmt.pix.height);
if (is_ntsc) {
/* NTSC */
@@ -927,7 +834,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
f->fmt.pix.priv = 0;
- dprintk(50, "%s: set width %d height %d field %d\n", __func__,
+ dprintk(vc->dev, 50, "%s: set width %d height %d field %d\n", __func__,
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
return 0;
}
@@ -935,14 +842,13 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct s2255_fh *fh = priv;
- struct s2255_channel *channel = fh->channel;
+ struct s2255_vc *vc = video_drvdata(file);
const struct s2255_fmt *fmt;
- struct videobuf_queue *q = &fh->vb_vidq;
+ struct vb2_queue *q = &vc->vb_vidq;
struct s2255_mode mode;
int ret;
- ret = vidioc_try_fmt_vid_cap(file, fh, f);
+ ret = vidioc_try_fmt_vid_cap(file, vc, f);
if (ret < 0)
return ret;
@@ -952,28 +858,19 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
if (fmt == NULL)
return -EINVAL;
- mutex_lock(&q->vb_lock);
-
- if (videobuf_queue_is_busy(&fh->vb_vidq)) {
- dprintk(1, "queue busy\n");
- ret = -EBUSY;
- goto out_s_fmt;
+ if (vb2_is_busy(q)) {
+ dprintk(vc->dev, 1, "queue busy\n");
+ return -EBUSY;
}
- if (res_locked(fh)) {
- dprintk(1, "%s: channel busy\n", __func__);
- ret = -EBUSY;
- goto out_s_fmt;
- }
- mode = channel->mode;
- channel->fmt = fmt;
- channel->width = f->fmt.pix.width;
- channel->height = f->fmt.pix.height;
- fh->vb_vidq.field = f->fmt.pix.field;
- fh->type = f->type;
- if (channel->width > norm_minw(channel)) {
- if (channel->height > norm_minh(channel)) {
- if (channel->cap_parm.capturemode &
+ mode = vc->mode;
+ vc->fmt = fmt;
+ vc->width = f->fmt.pix.width;
+ vc->height = f->fmt.pix.height;
+ vc->field = f->fmt.pix.field;
+ if (vc->width > norm_minw(vc)) {
+ if (vc->height > norm_minh(vc)) {
+ if (vc->cap_parm.capturemode &
V4L2_MODE_HIGHQUALITY)
mode.scale = SCALE_4CIFSI;
else
@@ -985,7 +882,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
mode.scale = SCALE_1CIFS;
}
/* color mode */
- switch (channel->fmt->fourcc) {
+ switch (vc->fmt->fourcc) {
case V4L2_PIX_FMT_GREY:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_Y8;
@@ -994,7 +891,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
case V4L2_PIX_FMT_MJPEG:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_JPG;
- mode.color |= (channel->jpegqual << 8);
+ mode.color |= (vc->jpegqual << 8);
break;
case V4L2_PIX_FMT_YUV422P:
mode.color &= ~MASK_COLOR;
@@ -1007,52 +904,17 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
mode.color |= COLOR_YUVPK;
break;
}
- if ((mode.color & MASK_COLOR) != (channel->mode.color & MASK_COLOR))
+ if ((mode.color & MASK_COLOR) != (vc->mode.color & MASK_COLOR))
mode.restart = 1;
- else if (mode.scale != channel->mode.scale)
+ else if (mode.scale != vc->mode.scale)
mode.restart = 1;
- else if (mode.format != channel->mode.format)
+ else if (mode.format != vc->mode.format)
mode.restart = 1;
- channel->mode = mode;
- (void) s2255_set_mode(channel, &mode);
- ret = 0;
-out_s_fmt:
- mutex_unlock(&q->vb_lock);
- return ret;
-}
-
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- int rc;
- struct s2255_fh *fh = priv;
- rc = videobuf_reqbufs(&fh->vb_vidq, p);
- return rc;
-}
-
-static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- int rc;
- struct s2255_fh *fh = priv;
- rc = videobuf_querybuf(&fh->vb_vidq, p);
- return rc;
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- int rc;
- struct s2255_fh *fh = priv;
- rc = videobuf_qbuf(&fh->vb_vidq, p);
- return rc;
+ vc->mode = mode;
+ (void) s2255_set_mode(vc, &mode);
+ return 0;
}
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- int rc;
- struct s2255_fh *fh = priv;
- rc = videobuf_dqbuf(&fh->vb_vidq, p, file->f_flags & O_NONBLOCK);
- return rc;
-}
/* write to the configuration pipe, synchronously */
static int s2255_write_config(struct usb_device *udev, unsigned char *pbuf,
@@ -1150,201 +1012,166 @@ static void s2255_print_cfg(struct s2255_dev *sdev, struct s2255_mode *mode)
* When the restart parameter is set, we sleep for ONE frame to allow the
* DSP time to get the new frame
*/
-static int s2255_set_mode(struct s2255_channel *channel,
+static int s2255_set_mode(struct s2255_vc *vc,
struct s2255_mode *mode)
{
int res;
- __le32 *buffer;
unsigned long chn_rev;
- struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
+ struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev);
int i;
+ __le32 *buffer = dev->cmdbuf;
- chn_rev = G_chnmap[channel->idx];
- dprintk(3, "%s channel: %d\n", __func__, channel->idx);
+ mutex_lock(&dev->cmdlock);
+ chn_rev = G_chnmap[vc->idx];
+ dprintk(dev, 3, "%s channel: %d\n", __func__, vc->idx);
/* if JPEG, set the quality */
if ((mode->color & MASK_COLOR) == COLOR_JPG) {
mode->color &= ~MASK_COLOR;
mode->color |= COLOR_JPG;
mode->color &= ~MASK_JPG_QUALITY;
- mode->color |= (channel->jpegqual << 8);
+ mode->color |= (vc->jpegqual << 8);
}
/* save the mode */
- channel->mode = *mode;
- channel->req_image_size = get_transfer_size(mode);
- dprintk(1, "%s: reqsize %ld\n", __func__, channel->req_image_size);
- buffer = kzalloc(512, GFP_KERNEL);
- if (buffer == NULL) {
- dev_err(&dev->udev->dev, "out of mem\n");
- return -ENOMEM;
- }
+ vc->mode = *mode;
+ vc->req_image_size = get_transfer_size(mode);
+ dprintk(dev, 1, "%s: reqsize %ld\n", __func__, vc->req_image_size);
/* set the mode */
buffer[0] = IN_DATA_TOKEN;
buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_SET_MODE;
for (i = 0; i < sizeof(struct s2255_mode) / sizeof(u32); i++)
- buffer[3 + i] = cpu_to_le32(((u32 *)&channel->mode)[i]);
- channel->setmode_ready = 0;
+ buffer[3 + i] = cpu_to_le32(((u32 *)&vc->mode)[i]);
+ vc->setmode_ready = 0;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (debug)
s2255_print_cfg(dev, mode);
- kfree(buffer);
/* wait at least 3 frames before continuing */
if (mode->restart) {
- wait_event_timeout(channel->wait_setmode,
- (channel->setmode_ready != 0),
+ wait_event_timeout(vc->wait_setmode,
+ (vc->setmode_ready != 0),
msecs_to_jiffies(S2255_SETMODE_TIMEOUT));
- if (channel->setmode_ready != 1) {
- printk(KERN_DEBUG "s2255: no set mode response\n");
+ if (vc->setmode_ready != 1) {
+ dprintk(dev, 0, "s2255: no set mode response\n");
res = -EFAULT;
}
}
/* clear the restart flag */
- channel->mode.restart = 0;
- dprintk(1, "%s chn %d, result: %d\n", __func__, channel->idx, res);
+ vc->mode.restart = 0;
+ dprintk(dev, 1, "%s chn %d, result: %d\n", __func__, vc->idx, res);
+ mutex_unlock(&dev->cmdlock);
return res;
}
-static int s2255_cmd_status(struct s2255_channel *channel, u32 *pstatus)
+static int s2255_cmd_status(struct s2255_vc *vc, u32 *pstatus)
{
int res;
- __le32 *buffer;
u32 chn_rev;
- struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
- chn_rev = G_chnmap[channel->idx];
- dprintk(4, "%s chan %d\n", __func__, channel->idx);
- buffer = kzalloc(512, GFP_KERNEL);
- if (buffer == NULL) {
- dev_err(&dev->udev->dev, "out of mem\n");
- return -ENOMEM;
- }
+ struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev);
+ __le32 *buffer = dev->cmdbuf;
+
+ mutex_lock(&dev->cmdlock);
+ chn_rev = G_chnmap[vc->idx];
+ dprintk(dev, 4, "%s chan %d\n", __func__, vc->idx);
/* form the get vid status command */
buffer[0] = IN_DATA_TOKEN;
buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_STATUS;
*pstatus = 0;
- channel->vidstatus_ready = 0;
+ vc->vidstatus_ready = 0;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
- kfree(buffer);
- wait_event_timeout(channel->wait_vidstatus,
- (channel->vidstatus_ready != 0),
+ wait_event_timeout(vc->wait_vidstatus,
+ (vc->vidstatus_ready != 0),
msecs_to_jiffies(S2255_VIDSTATUS_TIMEOUT));
- if (channel->vidstatus_ready != 1) {
- printk(KERN_DEBUG "s2255: no vidstatus response\n");
+ if (vc->vidstatus_ready != 1) {
+ dprintk(dev, 0, "s2255: no vidstatus response\n");
res = -EFAULT;
}
- *pstatus = channel->vidstatus;
- dprintk(4, "%s, vid status %d\n", __func__, *pstatus);
+ *pstatus = vc->vidstatus;
+ dprintk(dev, 4, "%s, vid status %d\n", __func__, *pstatus);
+ mutex_unlock(&dev->cmdlock);
return res;
}
-static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
- int res;
- struct s2255_fh *fh = priv;
- struct s2255_dev *dev = fh->dev;
- struct s2255_channel *channel = fh->channel;
+ struct s2255_vc *vc = vb2_get_drv_priv(vq);
int j;
- dprintk(4, "%s\n", __func__);
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- dev_err(&dev->udev->dev, "invalid fh type0\n");
- return -EINVAL;
- }
- if (i != fh->type) {
- dev_err(&dev->udev->dev, "invalid fh type1\n");
- return -EINVAL;
- }
- if (!res_get(fh)) {
- s2255_dev_err(&dev->udev->dev, "stream busy\n");
- return -EBUSY;
- }
- channel->last_frame = -1;
- channel->bad_payload = 0;
- channel->cur_frame = 0;
- channel->frame_count = 0;
+ vc->last_frame = -1;
+ vc->bad_payload = 0;
+ vc->cur_frame = 0;
+ vc->frame_count = 0;
for (j = 0; j < SYS_FRAMES; j++) {
- channel->buffer.frame[j].ulState = S2255_READ_IDLE;
- channel->buffer.frame[j].cur_size = 0;
+ vc->buffer.frame[j].ulState = S2255_READ_IDLE;
+ vc->buffer.frame[j].cur_size = 0;
}
- res = videobuf_streamon(&fh->vb_vidq);
- if (res == 0) {
- s2255_start_acquire(channel);
- channel->b_acquire = 1;
- } else
- res_free(fh);
-
- return res;
+ return s2255_start_acquire(vc);
}
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+/* abort streaming and wait for last buffer */
+static int stop_streaming(struct vb2_queue *vq)
{
- struct s2255_fh *fh = priv;
- dprintk(4, "%s\n, channel: %d", __func__, fh->channel->idx);
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- printk(KERN_ERR "invalid fh type0\n");
- return -EINVAL;
+ struct s2255_vc *vc = vb2_get_drv_priv(vq);
+ struct s2255_buffer *buf, *node;
+ unsigned long flags;
+ (void) s2255_stop_acquire(vc);
+ spin_lock_irqsave(&vc->qlock, flags);
+ list_for_each_entry_safe(buf, node, &vc->buf_list, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ dprintk(vc->dev, 2, "[%p/%d] done\n",
+ buf, buf->vb.v4l2_buf.index);
}
- if (i != fh->type) {
- printk(KERN_ERR "invalid type i\n");
- return -EINVAL;
- }
- s2255_stop_acquire(fh->channel);
- videobuf_streamoff(&fh->vb_vidq);
- res_free(fh);
+ spin_unlock_irqrestore(&vc->qlock, flags);
return 0;
}
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id i)
{
- struct s2255_fh *fh = priv;
+ struct s2255_vc *vc = video_drvdata(file);
struct s2255_mode mode;
- struct videobuf_queue *q = &fh->vb_vidq;
- struct s2255_channel *channel = fh->channel;
- int ret = 0;
+ struct vb2_queue *q = &vc->vb_vidq;
- mutex_lock(&q->vb_lock);
- if (res_locked(fh)) {
- dprintk(1, "can't change standard after started\n");
- ret = -EBUSY;
- goto out_s_std;
- }
- mode = fh->channel->mode;
+ /*
+ * Changing the standard implies a format change, which is not allowed
+ * while buffers for use with streaming have already been allocated.
+ */
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ mode = vc->mode;
if (i & V4L2_STD_525_60) {
- dprintk(4, "%s 60 Hz\n", __func__);
+ dprintk(vc->dev, 4, "%s 60 Hz\n", __func__);
/* if changing format, reset frame decimation/intervals */
if (mode.format != FORMAT_NTSC) {
mode.restart = 1;
mode.format = FORMAT_NTSC;
mode.fdec = FDEC_1;
- channel->width = LINE_SZ_4CIFS_NTSC;
- channel->height = NUM_LINES_4CIFS_NTSC * 2;
+ vc->width = LINE_SZ_4CIFS_NTSC;
+ vc->height = NUM_LINES_4CIFS_NTSC * 2;
}
} else if (i & V4L2_STD_625_50) {
- dprintk(4, "%s 50 Hz\n", __func__);
+ dprintk(vc->dev, 4, "%s 50 Hz\n", __func__);
if (mode.format != FORMAT_PAL) {
mode.restart = 1;
mode.format = FORMAT_PAL;
mode.fdec = FDEC_1;
- channel->width = LINE_SZ_4CIFS_PAL;
- channel->height = NUM_LINES_4CIFS_PAL * 2;
+ vc->width = LINE_SZ_4CIFS_PAL;
+ vc->height = NUM_LINES_4CIFS_PAL * 2;
}
- } else {
- ret = -EINVAL;
- goto out_s_std;
- }
- fh->channel->std = i;
+ } else
+ return -EINVAL;
+ vc->std = i;
if (mode.restart)
- s2255_set_mode(fh->channel, &mode);
-out_s_std:
- mutex_unlock(&q->vb_lock);
- return ret;
+ s2255_set_mode(vc, &mode);
+ return 0;
}
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *i)
{
- struct s2255_fh *fh = priv;
+ struct s2255_vc *vc = video_drvdata(file);
- *i = fh->channel->std;
+ *i = vc->std;
return 0;
}
@@ -1358,10 +1185,10 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *i)
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
- struct s2255_fh *fh = priv;
- struct s2255_dev *dev = fh->dev;
- struct s2255_channel *channel = fh->channel;
+ struct s2255_vc *vc = video_drvdata(file);
+ struct s2255_dev *dev = vc->dev;
u32 status = 0;
+
if (inp->index != 0)
return -EINVAL;
inp->type = V4L2_INPUT_TYPE_CAMERA;
@@ -1369,8 +1196,9 @@ static int vidioc_enum_input(struct file *file, void *priv,
inp->status = 0;
if (dev->dsp_fw_ver >= S2255_MIN_DSP_STATUS) {
int rc;
- rc = s2255_cmd_status(fh->channel, &status);
- dprintk(4, "s2255_cmd_status rc: %d status %x\n", rc, status);
+ rc = s2255_cmd_status(vc, &status);
+ dprintk(dev, 4, "s2255_cmd_status rc: %d status %x\n",
+ rc, status);
if (rc == 0)
inp->status = (status & 0x01) ? 0
: V4L2_IN_ST_NO_SIGNAL;
@@ -1381,7 +1209,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
strlcpy(inp->name, "Composite", sizeof(inp->name));
break;
case 0x2257:
- strlcpy(inp->name, (channel->idx < 2) ? "Composite" : "S-Video",
+ strlcpy(inp->name, (vc->idx < 2) ? "Composite" : "S-Video",
sizeof(inp->name));
break;
}
@@ -1402,13 +1230,10 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int s2255_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct s2255_channel *channel =
- container_of(ctrl->handler, struct s2255_channel, hdl);
+ struct s2255_vc *vc =
+ container_of(ctrl->handler, struct s2255_vc, hdl);
struct s2255_mode mode;
-
- mode = channel->mode;
- dprintk(4, "%s\n", __func__);
-
+ mode = vc->mode;
/* update the mode to the corresponding value */
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
@@ -1428,7 +1253,7 @@ static int s2255_s_ctrl(struct v4l2_ctrl *ctrl)
mode.color |= !ctrl->val << 16;
break;
case V4L2_CID_JPEG_COMPRESSION_QUALITY:
- channel->jpegqual = ctrl->val;
+ vc->jpegqual = ctrl->val;
return 0;
default:
return -EINVAL;
@@ -1438,48 +1263,48 @@ static int s2255_s_ctrl(struct v4l2_ctrl *ctrl)
some V4L programs restart stream unnecessarily
after a s_crtl.
*/
- s2255_set_mode(channel, &mode);
+ s2255_set_mode(vc, &mode);
return 0;
}
static int vidioc_g_jpegcomp(struct file *file, void *priv,
struct v4l2_jpegcompression *jc)
{
- struct s2255_fh *fh = priv;
- struct s2255_channel *channel = fh->channel;
+ struct s2255_vc *vc = video_drvdata(file);
memset(jc, 0, sizeof(*jc));
- jc->quality = channel->jpegqual;
- dprintk(2, "%s: quality %d\n", __func__, jc->quality);
+ jc->quality = vc->jpegqual;
+ dprintk(vc->dev, 2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
static int vidioc_s_jpegcomp(struct file *file, void *priv,
const struct v4l2_jpegcompression *jc)
{
- struct s2255_fh *fh = priv;
- struct s2255_channel *channel = fh->channel;
+ struct s2255_vc *vc = video_drvdata(file);
+
if (jc->quality < 0 || jc->quality > 100)
return -EINVAL;
- v4l2_ctrl_s_ctrl(channel->jpegqual_ctrl, jc->quality);
- dprintk(2, "%s: quality %d\n", __func__, jc->quality);
+ v4l2_ctrl_s_ctrl(vc->jpegqual_ctrl, jc->quality);
+ dprintk(vc->dev, 2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
static int vidioc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
- struct s2255_fh *fh = priv;
__u32 def_num, def_dem;
- struct s2255_channel *channel = fh->channel;
+ struct s2255_vc *vc = video_drvdata(file);
+
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
- sp->parm.capture.capturemode = channel->cap_parm.capturemode;
- def_num = (channel->mode.format == FORMAT_NTSC) ? 1001 : 1000;
- def_dem = (channel->mode.format == FORMAT_NTSC) ? 30000 : 25000;
+ sp->parm.capture.capturemode = vc->cap_parm.capturemode;
+ sp->parm.capture.readbuffers = S2255_MIN_BUFS;
+ def_num = (vc->mode.format == FORMAT_NTSC) ? 1001 : 1000;
+ def_dem = (vc->mode.format == FORMAT_NTSC) ? 30000 : 25000;
sp->parm.capture.timeperframe.denominator = def_dem;
- switch (channel->mode.fdec) {
+ switch (vc->mode.fdec) {
default:
case FDEC_1:
sp->parm.capture.timeperframe.numerator = def_num;
@@ -1494,7 +1319,8 @@ static int vidioc_g_parm(struct file *file, void *priv,
sp->parm.capture.timeperframe.numerator = def_num * 5;
break;
}
- dprintk(4, "%s capture mode, %d timeperframe %d/%d\n", __func__,
+ dprintk(vc->dev, 4, "%s capture mode, %d timeperframe %d/%d\n",
+ __func__,
sp->parm.capture.capturemode,
sp->parm.capture.timeperframe.numerator,
sp->parm.capture.timeperframe.denominator);
@@ -1504,17 +1330,16 @@ static int vidioc_g_parm(struct file *file, void *priv,
static int vidioc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
- struct s2255_fh *fh = priv;
- struct s2255_channel *channel = fh->channel;
+ struct s2255_vc *vc = video_drvdata(file);
struct s2255_mode mode;
int fdec = FDEC_1;
__u32 def_num, def_dem;
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- mode = channel->mode;
+ mode = vc->mode;
/* high quality capture mode requires a stream restart */
- if (channel->cap_parm.capturemode
- != sp->parm.capture.capturemode && res_locked(fh))
+ if ((vc->cap_parm.capturemode != sp->parm.capture.capturemode)
+ && vb2_is_streaming(&vc->vb_vidq))
return -EBUSY;
def_num = (mode.format == FORMAT_NTSC) ? 1001 : 1000;
def_dem = (mode.format == FORMAT_NTSC) ? 30000 : 25000;
@@ -1534,8 +1359,9 @@ static int vidioc_s_parm(struct file *file, void *priv,
}
mode.fdec = fdec;
sp->parm.capture.timeperframe.denominator = def_dem;
- s2255_set_mode(channel, &mode);
- dprintk(4, "%s capture mode, %d timeperframe %d/%d, fdec %d\n",
+ sp->parm.capture.readbuffers = S2255_MIN_BUFS;
+ s2255_set_mode(vc, &mode);
+ dprintk(vc->dev, 4, "%s capture mode, %d timeperframe %d/%d, fdec %d\n",
__func__,
sp->parm.capture.capturemode,
sp->parm.capture.timeperframe.numerator,
@@ -1558,9 +1384,8 @@ static const struct v4l2_frmsize_discrete pal_sizes[] = {
static int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fe)
{
- struct s2255_fh *fh = priv;
- struct s2255_channel *channel = fh->channel;
- int is_ntsc = channel->std & V4L2_STD_525_60;
+ struct s2255_vc *vc = video_drvdata(file);
+ int is_ntsc = vc->std & V4L2_STD_525_60;
const struct s2255_fmt *fmt;
if (fe->index >= NUM_SIZE_ENUMS)
@@ -1577,11 +1402,10 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
static int vidioc_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fe)
{
- struct s2255_fh *fh = priv;
- struct s2255_channel *channel = fh->channel;
+ struct s2255_vc *vc = video_drvdata(file);
const struct s2255_fmt *fmt;
const struct v4l2_frmsize_discrete *sizes;
- int is_ntsc = channel->std & V4L2_STD_525_60;
+ int is_ntsc = vc->std & V4L2_STD_525_60;
#define NUM_FRAME_ENUMS 4
int frm_dec[NUM_FRAME_ENUMS] = {1, 2, 3, 5};
int i;
@@ -1604,21 +1428,24 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
fe->type = V4L2_FRMIVAL_TYPE_DISCRETE;
fe->discrete.denominator = is_ntsc ? 30000 : 25000;
fe->discrete.numerator = (is_ntsc ? 1001 : 1000) * frm_dec[fe->index];
- dprintk(4, "%s discrete %d/%d\n", __func__, fe->discrete.numerator,
+ dprintk(vc->dev, 4, "%s discrete %d/%d\n", __func__,
+ fe->discrete.numerator,
fe->discrete.denominator);
return 0;
}
-static int __s2255_open(struct file *file)
+static int s2255_open(struct file *file)
{
- struct video_device *vdev = video_devdata(file);
- struct s2255_channel *channel = video_drvdata(file);
- struct s2255_dev *dev = to_s2255_dev(vdev->v4l2_dev);
- struct s2255_fh *fh;
- enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ struct s2255_vc *vc = video_drvdata(file);
+ struct s2255_dev *dev = vc->dev;
int state;
- dprintk(1, "s2255: open called (dev=%s)\n",
- video_device_node_name(vdev));
+ int rc = 0;
+
+ rc = v4l2_fh_open(file);
+ if (rc != 0)
+ return rc;
+
+ dprintk(dev, 1, "s2255: %s\n", __func__);
state = atomic_read(&dev->fw_data->fw_state);
switch (state) {
case S2255_FW_DISCONNECTING:
@@ -1640,7 +1467,7 @@ static int __s2255_open(struct file *file)
case S2255_FW_LOADED_DSPWAIT:
/* give S2255_LOAD_TIMEOUT time for firmware to load in case
driver loaded and then device immediately opened */
- printk(KERN_INFO "%s waiting for firmware load\n", __func__);
+ pr_info("%s waiting for firmware load\n", __func__);
wait_event_timeout(dev->fw_data->wait_fw,
((atomic_read(&dev->fw_data->fw_state)
== S2255_FW_SUCCESS) ||
@@ -1659,16 +1486,15 @@ static int __s2255_open(struct file *file)
case S2255_FW_SUCCESS:
break;
case S2255_FW_FAILED:
- printk(KERN_INFO "2255 firmware load failed.\n");
+ pr_info("2255 firmware load failed.\n");
return -ENODEV;
case S2255_FW_DISCONNECTING:
- printk(KERN_INFO "%s: disconnecting\n", __func__);
+ pr_info("%s: disconnecting\n", __func__);
return -ENODEV;
case S2255_FW_LOADED_DSPWAIT:
case S2255_FW_NOTLOADED:
- printk(KERN_INFO "%s: firmware not loaded yet"
- "please try again later\n",
- __func__);
+ pr_info("%s: firmware not loaded, please retry\n",
+ __func__);
/*
* Timeout on firmware load means device unusable.
* Set firmware failure state.
@@ -1678,71 +1504,21 @@ static int __s2255_open(struct file *file)
S2255_FW_FAILED);
return -EAGAIN;
default:
- printk(KERN_INFO "%s: unknown state\n", __func__);
+ pr_info("%s: unknown state\n", __func__);
return -EFAULT;
}
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh)
- return -ENOMEM;
- v4l2_fh_init(&fh->fh, vdev);
- v4l2_fh_add(&fh->fh);
- file->private_data = &fh->fh;
- fh->dev = dev;
- fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- fh->channel = channel;
- if (!channel->configured) {
+ if (!vc->configured) {
/* configure channel to default state */
- channel->fmt = &formats[0];
- s2255_set_mode(channel, &channel->mode);
- channel->configured = 1;
- }
- dprintk(1, "%s: dev=%s type=%s\n", __func__,
- video_device_node_name(vdev), v4l2_type_names[type]);
- dprintk(2, "%s: fh=0x%08lx, dev=0x%08lx, vidq=0x%08lx\n", __func__,
- (unsigned long)fh, (unsigned long)dev,
- (unsigned long)&channel->vidq);
- dprintk(4, "%s: list_empty active=%d\n", __func__,
- list_empty(&channel->vidq.active));
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &s2255_video_qops,
- NULL, &dev->slock,
- fh->type,
- V4L2_FIELD_INTERLACED,
- sizeof(struct s2255_buffer),
- fh, vdev->lock);
+ vc->fmt = &formats[0];
+ s2255_set_mode(vc, &vc->mode);
+ vc->configured = 1;
+ }
return 0;
}
-static int s2255_open(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
- int ret;
-
- if (mutex_lock_interruptible(vdev->lock))
- return -ERESTARTSYS;
- ret = __s2255_open(file);
- mutex_unlock(vdev->lock);
- return ret;
-}
-
-static unsigned int s2255_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct s2255_fh *fh = file->private_data;
- struct s2255_dev *dev = fh->dev;
- int rc = v4l2_ctrl_poll(file, wait);
-
- dprintk(100, "%s\n", __func__);
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
- return POLLERR;
- mutex_lock(&dev->lock);
- rc |= videobuf_poll_stream(file, &fh->vb_vidq, wait);
- mutex_unlock(&dev->lock);
- return rc;
-}
-
static void s2255_destroy(struct s2255_dev *dev)
{
+ dprintk(dev, 1, "%s", __func__);
/* board shutdown stops the read pipe if it is running */
s2255_board_shutdown(dev);
/* make sure firmware still not trying to load */
@@ -1760,62 +1536,18 @@ static void s2255_destroy(struct s2255_dev *dev)
mutex_destroy(&dev->lock);
usb_put_dev(dev->udev);
v4l2_device_unregister(&dev->v4l2_dev);
- dprintk(1, "%s", __func__);
+ kfree(dev->cmdbuf);
kfree(dev);
}
-static int s2255_release(struct file *file)
-{
- struct s2255_fh *fh = file->private_data;
- struct s2255_dev *dev = fh->dev;
- struct video_device *vdev = video_devdata(file);
- struct s2255_channel *channel = fh->channel;
- if (!dev)
- return -ENODEV;
- mutex_lock(&dev->lock);
- /* turn off stream */
- if (res_check(fh)) {
- if (channel->b_acquire)
- s2255_stop_acquire(fh->channel);
- videobuf_streamoff(&fh->vb_vidq);
- res_free(fh);
- }
- videobuf_mmap_free(&fh->vb_vidq);
- mutex_unlock(&dev->lock);
- dprintk(1, "%s (dev=%s)\n", __func__, video_device_node_name(vdev));
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
- return 0;
-}
-
-static int s2255_mmap_v4l(struct file *file, struct vm_area_struct *vma)
-{
- struct s2255_fh *fh = file->private_data;
- struct s2255_dev *dev;
- int ret;
-
- if (!fh)
- return -ENODEV;
- dev = fh->dev;
- dprintk(4, "%s, vma=0x%08lx\n", __func__, (unsigned long)vma);
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
- mutex_unlock(&dev->lock);
- dprintk(4, "%s vma start=0x%08lx, size=%ld, ret=%d\n", __func__,
- (unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end - (unsigned long)vma->vm_start, ret);
- return ret;
-}
-
static const struct v4l2_file_operations s2255_fops_v4l = {
.owner = THIS_MODULE,
.open = s2255_open,
- .release = s2255_release,
- .poll = s2255_poll,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
- .mmap = s2255_mmap_v4l,
+ .mmap = vb2_fop_mmap,
+ .read = vb2_fop_read,
};
static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
@@ -1824,17 +1556,17 @@ static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_s_std = vidioc_s_std,
.vidioc_g_std = vidioc_g_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_s_jpegcomp = vidioc_s_jpegcomp,
.vidioc_g_jpegcomp = vidioc_g_jpegcomp,
.vidioc_s_parm = vidioc_s_parm,
@@ -1849,13 +1581,14 @@ static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
static void s2255_video_device_release(struct video_device *vdev)
{
struct s2255_dev *dev = to_s2255_dev(vdev->v4l2_dev);
- struct s2255_channel *channel =
- container_of(vdev, struct s2255_channel, vdev);
+ struct s2255_vc *vc =
+ container_of(vdev, struct s2255_vc, vdev);
- v4l2_ctrl_handler_free(&channel->hdl);
- dprintk(4, "%s, chnls: %d\n", __func__,
+ dprintk(dev, 4, "%s, chnls: %d\n", __func__,
atomic_read(&dev->num_channels));
+ v4l2_ctrl_handler_free(&vc->hdl);
+
if (atomic_dec_and_test(&dev->num_channels))
s2255_destroy(dev);
return;
@@ -1888,52 +1621,70 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
int ret;
int i;
int cur_nr = video_nr;
- struct s2255_channel *channel;
+ struct s2255_vc *vc;
+ struct vb2_queue *q;
+
ret = v4l2_device_register(&dev->interface->dev, &dev->v4l2_dev);
if (ret)
return ret;
/* initialize all video 4 linux */
/* register 4 video devices */
for (i = 0; i < MAX_CHANNELS; i++) {
- channel = &dev->channel[i];
- INIT_LIST_HEAD(&channel->vidq.active);
+ vc = &dev->vc[i];
+ INIT_LIST_HEAD(&vc->buf_list);
- v4l2_ctrl_handler_init(&channel->hdl, 6);
- v4l2_ctrl_new_std(&channel->hdl, &s2255_ctrl_ops,
+ v4l2_ctrl_handler_init(&vc->hdl, 6);
+ v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops,
V4L2_CID_BRIGHTNESS, -127, 127, 1, DEF_BRIGHT);
- v4l2_ctrl_new_std(&channel->hdl, &s2255_ctrl_ops,
+ v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops,
V4L2_CID_CONTRAST, 0, 255, 1, DEF_CONTRAST);
- v4l2_ctrl_new_std(&channel->hdl, &s2255_ctrl_ops,
+ v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops,
V4L2_CID_SATURATION, 0, 255, 1, DEF_SATURATION);
- v4l2_ctrl_new_std(&channel->hdl, &s2255_ctrl_ops,
+ v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops,
V4L2_CID_HUE, 0, 255, 1, DEF_HUE);
- channel->jpegqual_ctrl = v4l2_ctrl_new_std(&channel->hdl,
+ vc->jpegqual_ctrl = v4l2_ctrl_new_std(&vc->hdl,
&s2255_ctrl_ops,
V4L2_CID_JPEG_COMPRESSION_QUALITY,
0, 100, 1, S2255_DEF_JPEG_QUAL);
if (dev->dsp_fw_ver >= S2255_MIN_DSP_COLORFILTER &&
- (dev->pid != 0x2257 || channel->idx <= 1))
- v4l2_ctrl_new_custom(&channel->hdl, &color_filter_ctrl, NULL);
- if (channel->hdl.error) {
- ret = channel->hdl.error;
- v4l2_ctrl_handler_free(&channel->hdl);
+ (dev->pid != 0x2257 || vc->idx <= 1))
+ v4l2_ctrl_new_custom(&vc->hdl, &color_filter_ctrl,
+ NULL);
+ if (vc->hdl.error) {
+ ret = vc->hdl.error;
+ v4l2_ctrl_handler_free(&vc->hdl);
dev_err(&dev->udev->dev, "couldn't register control\n");
break;
}
- channel->vidq.dev = dev;
- /* register 4 video devices */
- channel->vdev = template;
- channel->vdev.ctrl_handler = &channel->hdl;
- channel->vdev.lock = &dev->lock;
- channel->vdev.v4l2_dev = &dev->v4l2_dev;
- set_bit(V4L2_FL_USE_FH_PRIO, &channel->vdev.flags);
- video_set_drvdata(&channel->vdev, channel);
+ q = &vc->vb_vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_READ | VB2_USERPTR;
+ q->drv_priv = vc;
+ q->lock = &vc->vb_lock;
+ q->buf_struct_size = sizeof(struct s2255_buffer);
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->ops = &s2255_video_qops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ ret = vb2_queue_init(q);
+ if (ret != 0) {
+ dev_err(&dev->udev->dev,
+ "%s vb2_queue_init 0x%x\n", __func__, ret);
+ break;
+ }
+ /* register video devices */
+ vc->vdev = template;
+ vc->vdev.queue = q;
+ vc->vdev.ctrl_handler = &vc->hdl;
+ vc->vdev.lock = &dev->lock;
+ vc->vdev.v4l2_dev = &dev->v4l2_dev;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vc->vdev.flags);
+ video_set_drvdata(&vc->vdev, vc);
if (video_nr == -1)
- ret = video_register_device(&channel->vdev,
+ ret = video_register_device(&vc->vdev,
VFL_TYPE_GRABBER,
video_nr);
else
- ret = video_register_device(&channel->vdev,
+ ret = video_register_device(&vc->vdev,
VFL_TYPE_GRABBER,
cur_nr + i);
@@ -1944,18 +1695,18 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
}
atomic_inc(&dev->num_channels);
v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
- video_device_node_name(&channel->vdev));
+ video_device_node_name(&vc->vdev));
}
- printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %s\n",
- S2255_VERSION);
+ pr_info("Sensoray 2255 V4L driver Revision: %s\n",
+ S2255_VERSION);
/* if no channels registered, return error and probe will fail*/
if (atomic_read(&dev->num_channels) == 0) {
v4l2_device_unregister(&dev->v4l2_dev);
return ret;
}
if (atomic_read(&dev->num_channels) != MAX_CHANNELS)
- printk(KERN_WARNING "s2255: Not all channels available.\n");
+ pr_warn("s2255: Not all channels available.\n");
return 0;
}
@@ -1981,11 +1732,11 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
s32 idx = -1;
struct s2255_framei *frm;
unsigned char *pdata;
- struct s2255_channel *channel;
- dprintk(100, "buffer to user\n");
- channel = &dev->channel[dev->cc];
- idx = channel->cur_frame;
- frm = &channel->buffer.frame[idx];
+ struct s2255_vc *vc;
+ dprintk(dev, 100, "buffer to user\n");
+ vc = &dev->vc[dev->cc];
+ idx = vc->cur_frame;
+ frm = &vc->buffer.frame[idx];
if (frm->ulState == S2255_READ_IDLE) {
int jj;
unsigned int cc;
@@ -1997,28 +1748,27 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
for (jj = 0; jj < (pipe_info->cur_transfer_size - 12); jj++) {
switch (*pdword) {
case S2255_MARKER_FRAME:
- dprintk(4, "found frame marker at offset:"
- " %d [%x %x]\n", jj, pdata[0],
- pdata[1]);
+ dprintk(dev, 4, "marker @ offset: %d [%x %x]\n",
+ jj, pdata[0], pdata[1]);
offset = jj + PREFIX_SIZE;
bframe = 1;
cc = le32_to_cpu(pdword[1]);
if (cc >= MAX_CHANNELS) {
- printk(KERN_ERR
- "bad channel\n");
+ dprintk(dev, 0,
+ "bad channel\n");
return -EINVAL;
}
/* reverse it */
dev->cc = G_chnmap[cc];
- channel = &dev->channel[dev->cc];
+ vc = &dev->vc[dev->cc];
payload = le32_to_cpu(pdword[3]);
- if (payload > channel->req_image_size) {
- channel->bad_payload++;
+ if (payload > vc->req_image_size) {
+ vc->bad_payload++;
/* discard the bad frame */
return -EINVAL;
}
- channel->pkt_size = payload;
- channel->jpg_size = le32_to_cpu(pdword[4]);
+ vc->pkt_size = payload;
+ vc->jpg_size = le32_to_cpu(pdword[4]);
break;
case S2255_MARKER_RESPONSE:
@@ -2029,34 +1779,34 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
cc = G_chnmap[le32_to_cpu(pdword[1])];
if (cc >= MAX_CHANNELS)
break;
- channel = &dev->channel[cc];
+ vc = &dev->vc[cc];
switch (pdword[2]) {
case S2255_RESPONSE_SETMODE:
/* check if channel valid */
/* set mode ready */
- channel->setmode_ready = 1;
- wake_up(&channel->wait_setmode);
- dprintk(5, "setmode ready %d\n", cc);
+ vc->setmode_ready = 1;
+ wake_up(&vc->wait_setmode);
+ dprintk(dev, 5, "setmode rdy %d\n", cc);
break;
case S2255_RESPONSE_FW:
dev->chn_ready |= (1 << cc);
if ((dev->chn_ready & 0x0f) != 0x0f)
break;
/* all channels ready */
- printk(KERN_INFO "s2255: fw loaded\n");
+ pr_info("s2255: fw loaded\n");
atomic_set(&dev->fw_data->fw_state,
S2255_FW_SUCCESS);
wake_up(&dev->fw_data->wait_fw);
break;
case S2255_RESPONSE_STATUS:
- channel->vidstatus = le32_to_cpu(pdword[3]);
- channel->vidstatus_ready = 1;
- wake_up(&channel->wait_vidstatus);
- dprintk(5, "got vidstatus %x chan %d\n",
+ vc->vidstatus = le32_to_cpu(pdword[3]);
+ vc->vidstatus_ready = 1;
+ wake_up(&vc->wait_vidstatus);
+ dprintk(dev, 5, "vstat %x chan %d\n",
le32_to_cpu(pdword[3]), cc);
break;
default:
- printk(KERN_INFO "s2255 unknown resp\n");
+ pr_info("s2255 unknown resp\n");
}
default:
pdata++;
@@ -2068,11 +1818,11 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
if (!bframe)
return -EINVAL;
}
- channel = &dev->channel[dev->cc];
- idx = channel->cur_frame;
- frm = &channel->buffer.frame[idx];
+ vc = &dev->vc[dev->cc];
+ idx = vc->cur_frame;
+ frm = &vc->buffer.frame[idx];
/* search done. now find out if should be acquiring on this channel */
- if (!channel->b_acquire) {
+ if (!vb2_is_streaming(&vc->vb_vidq)) {
/* we found a frame, but this channel is turned off */
frm->ulState = S2255_READ_IDLE;
return -EINVAL;
@@ -2088,7 +1838,7 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
if (frm->lpvbits == NULL) {
- dprintk(1, "s2255 frame buffer == NULL.%p %p %d %d",
+ dprintk(dev, 1, "s2255 frame buffer == NULL.%p %p %d %d",
frm, dev, dev->cc, idx);
return -ENOMEM;
}
@@ -2097,28 +1847,28 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
copy_size = (pipe_info->cur_transfer_size - offset);
- size = channel->pkt_size - PREFIX_SIZE;
+ size = vc->pkt_size - PREFIX_SIZE;
/* sanity check on pdest */
- if ((copy_size + frm->cur_size) < channel->req_image_size)
+ if ((copy_size + frm->cur_size) < vc->req_image_size)
memcpy(pdest, psrc, copy_size);
frm->cur_size += copy_size;
- dprintk(4, "cur_size size %lu size %lu \n", frm->cur_size, size);
+ dprintk(dev, 4, "cur_size: %lu, size: %lu\n", frm->cur_size, size);
if (frm->cur_size >= size) {
- dprintk(2, "****************[%d]Buffer[%d]full*************\n",
+ dprintk(dev, 2, "******[%d]Buffer[%d]full*******\n",
dev->cc, idx);
- channel->last_frame = channel->cur_frame;
- channel->cur_frame++;
+ vc->last_frame = vc->cur_frame;
+ vc->cur_frame++;
/* end of system frame ring buffer, start at zero */
- if ((channel->cur_frame == SYS_FRAMES) ||
- (channel->cur_frame == channel->buffer.dwFrames))
- channel->cur_frame = 0;
+ if ((vc->cur_frame == SYS_FRAMES) ||
+ (vc->cur_frame == vc->buffer.dwFrames))
+ vc->cur_frame = 0;
/* frame ready */
- if (channel->b_acquire)
- s2255_got_frame(channel, channel->jpg_size);
- channel->frame_count++;
+ if (vb2_is_streaming(&vc->vb_vidq))
+ s2255_got_frame(vc, vc->jpg_size);
+ vc->frame_count++;
frm->ulState = S2255_READ_IDLE;
frm->cur_size = 0;
@@ -2131,7 +1881,7 @@ static void s2255_read_video_callback(struct s2255_dev *dev,
struct s2255_pipeinfo *pipe_info)
{
int res;
- dprintk(50, "callback read video \n");
+ dprintk(dev, 50, "callback read video\n");
if (dev->cc >= MAX_CHANNELS) {
dev->cc = 0;
@@ -2141,9 +1891,9 @@ static void s2255_read_video_callback(struct s2255_dev *dev,
/* otherwise copy to the system buffers */
res = save_frame(dev, pipe_info);
if (res != 0)
- dprintk(4, "s2255: read callback failed\n");
+ dprintk(dev, 4, "s2255: read callback failed\n");
- dprintk(50, "callback read video done\n");
+ dprintk(dev, 50, "callback read video done\n");
return;
}
@@ -2181,9 +1931,9 @@ static int s2255_get_fx2fw(struct s2255_dev *dev)
ret = s2255_vendor_req(dev, S2255_VR_FW, 0, 0, transBuffer, 2,
S2255_VR_IN);
if (ret < 0)
- dprintk(2, "get fw error: %x\n", ret);
+ dprintk(dev, 2, "get fw error: %x\n", ret);
fw = transBuffer[0] + (transBuffer[1] << 8);
- dprintk(2, "Get FW %x %x\n", transBuffer[0], transBuffer[1]);
+ dprintk(dev, 2, "Get FW %x %x\n", transBuffer[0], transBuffer[1]);
return fw;
}
@@ -2191,12 +1941,11 @@ static int s2255_get_fx2fw(struct s2255_dev *dev)
* Create the system ring buffer to copy frames into from the
* usb read pipe.
*/
-static int s2255_create_sys_buffers(struct s2255_channel *channel)
+static int s2255_create_sys_buffers(struct s2255_vc *vc)
{
unsigned long i;
unsigned long reqsize;
- dprintk(1, "create sys buffers\n");
- channel->buffer.dwFrames = SYS_FRAMES;
+ vc->buffer.dwFrames = SYS_FRAMES;
/* always allocate maximum size(PAL) for system buffers */
reqsize = SYS_FRAMES_MAXSIZE;
@@ -2205,40 +1954,33 @@ static int s2255_create_sys_buffers(struct s2255_channel *channel)
for (i = 0; i < SYS_FRAMES; i++) {
/* allocate the frames */
- channel->buffer.frame[i].lpvbits = vmalloc(reqsize);
- dprintk(1, "valloc %p chan %d, idx %lu, pdata %p\n",
- &channel->buffer.frame[i], channel->idx, i,
- channel->buffer.frame[i].lpvbits);
- channel->buffer.frame[i].size = reqsize;
- if (channel->buffer.frame[i].lpvbits == NULL) {
- printk(KERN_INFO "out of memory. using less frames\n");
- channel->buffer.dwFrames = i;
+ vc->buffer.frame[i].lpvbits = vmalloc(reqsize);
+ vc->buffer.frame[i].size = reqsize;
+ if (vc->buffer.frame[i].lpvbits == NULL) {
+ pr_info("out of memory. using less frames\n");
+ vc->buffer.dwFrames = i;
break;
}
}
/* make sure internal states are set */
for (i = 0; i < SYS_FRAMES; i++) {
- channel->buffer.frame[i].ulState = 0;
- channel->buffer.frame[i].cur_size = 0;
+ vc->buffer.frame[i].ulState = 0;
+ vc->buffer.frame[i].cur_size = 0;
}
- channel->cur_frame = 0;
- channel->last_frame = -1;
+ vc->cur_frame = 0;
+ vc->last_frame = -1;
return 0;
}
-static int s2255_release_sys_buffers(struct s2255_channel *channel)
+static int s2255_release_sys_buffers(struct s2255_vc *vc)
{
unsigned long i;
- dprintk(1, "release sys buffers\n");
for (i = 0; i < SYS_FRAMES; i++) {
- if (channel->buffer.frame[i].lpvbits) {
- dprintk(1, "vfree %p\n",
- channel->buffer.frame[i].lpvbits);
- vfree(channel->buffer.frame[i].lpvbits);
- }
- channel->buffer.frame[i].lpvbits = NULL;
+ if (vc->buffer.frame[i].lpvbits)
+ vfree(vc->buffer.frame[i].lpvbits);
+ vc->buffer.frame[i].lpvbits = NULL;
}
return 0;
}
@@ -2249,7 +1991,7 @@ static int s2255_board_init(struct s2255_dev *dev)
int fw_ver;
int j;
struct s2255_pipeinfo *pipe = &dev->pipe;
- dprintk(4, "board init: %p", dev);
+ dprintk(dev, 4, "board init: %p", dev);
memset(pipe, 0, sizeof(*pipe));
pipe->dev = dev;
pipe->cur_transfer_size = S2255_USB_XFER_SIZE;
@@ -2258,54 +2000,53 @@ static int s2255_board_init(struct s2255_dev *dev)
pipe->transfer_buffer = kzalloc(pipe->max_transfer_size,
GFP_KERNEL);
if (pipe->transfer_buffer == NULL) {
- dprintk(1, "out of memory!\n");
+ dprintk(dev, 1, "out of memory!\n");
return -ENOMEM;
}
/* query the firmware */
fw_ver = s2255_get_fx2fw(dev);
- printk(KERN_INFO "s2255: usb firmware version %d.%d\n",
- (fw_ver >> 8) & 0xff,
- fw_ver & 0xff);
+ pr_info("s2255: usb firmware version %d.%d\n",
+ (fw_ver >> 8) & 0xff,
+ fw_ver & 0xff);
if (fw_ver < S2255_CUR_USB_FWVER)
- printk(KERN_INFO "s2255: newer USB firmware available\n");
+ pr_info("s2255: newer USB firmware available\n");
for (j = 0; j < MAX_CHANNELS; j++) {
- struct s2255_channel *channel = &dev->channel[j];
- channel->b_acquire = 0;
- channel->mode = mode_def;
+ struct s2255_vc *vc = &dev->vc[j];
+ vc->mode = mode_def;
if (dev->pid == 0x2257 && j > 1)
- channel->mode.color |= (1 << 16);
- channel->jpegqual = S2255_DEF_JPEG_QUAL;
- channel->width = LINE_SZ_4CIFS_NTSC;
- channel->height = NUM_LINES_4CIFS_NTSC * 2;
- channel->std = V4L2_STD_NTSC_M;
- channel->fmt = &formats[0];
- channel->mode.restart = 1;
- channel->req_image_size = get_transfer_size(&mode_def);
- channel->frame_count = 0;
+ vc->mode.color |= (1 << 16);
+ vc->jpegqual = S2255_DEF_JPEG_QUAL;
+ vc->width = LINE_SZ_4CIFS_NTSC;
+ vc->height = NUM_LINES_4CIFS_NTSC * 2;
+ vc->std = V4L2_STD_NTSC_M;
+ vc->fmt = &formats[0];
+ vc->mode.restart = 1;
+ vc->req_image_size = get_transfer_size(&mode_def);
+ vc->frame_count = 0;
/* create the system buffers */
- s2255_create_sys_buffers(channel);
+ s2255_create_sys_buffers(vc);
}
/* start read pipe */
s2255_start_readpipe(dev);
- dprintk(1, "%s: success\n", __func__);
+ dprintk(dev, 1, "%s: success\n", __func__);
return 0;
}
static int s2255_board_shutdown(struct s2255_dev *dev)
{
u32 i;
- dprintk(1, "%s: dev: %p", __func__, dev);
+ dprintk(dev, 1, "%s: dev: %p", __func__, dev);
for (i = 0; i < MAX_CHANNELS; i++) {
- if (dev->channel[i].b_acquire)
- s2255_stop_acquire(&dev->channel[i]);
+ if (vb2_is_streaming(&dev->vc[i].vb_vidq))
+ s2255_stop_acquire(&dev->vc[i]);
}
s2255_stop_readpipe(dev);
for (i = 0; i < MAX_CHANNELS; i++)
- s2255_release_sys_buffers(&dev->channel[i]);
+ s2255_release_sys_buffers(&dev->vc[i]);
/* release transfer buffer */
kfree(dev->pipe.transfer_buffer);
return 0;
@@ -2318,13 +2059,10 @@ static void read_pipe_completion(struct urb *purb)
int status;
int pipe;
pipe_info = purb->context;
- dprintk(100, "%s: urb:%p, status %d\n", __func__, purb,
- purb->status);
if (pipe_info == NULL) {
dev_err(&purb->dev->dev, "no context!\n");
return;
}
-
dev = pipe_info->dev;
if (dev == NULL) {
dev_err(&purb->dev->dev, "no context!\n");
@@ -2333,13 +2071,13 @@ static void read_pipe_completion(struct urb *purb)
status = purb->status;
/* if shutting down, do not resubmit, exit immediately */
if (status == -ESHUTDOWN) {
- dprintk(2, "%s: err shutdown\n", __func__);
+ dprintk(dev, 2, "%s: err shutdown\n", __func__);
pipe_info->err_count++;
return;
}
if (pipe_info->state == 0) {
- dprintk(2, "%s: exiting USB pipe", __func__);
+ dprintk(dev, 2, "%s: exiting USB pipe", __func__);
return;
}
@@ -2347,7 +2085,7 @@ static void read_pipe_completion(struct urb *purb)
s2255_read_video_callback(dev, pipe_info);
else {
pipe_info->err_count++;
- dprintk(1, "%s: failed URB %d\n", __func__, status);
+ dprintk(dev, 1, "%s: failed URB %d\n", __func__, status);
}
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
@@ -2359,11 +2097,10 @@ static void read_pipe_completion(struct urb *purb)
read_pipe_completion, pipe_info);
if (pipe_info->state != 0) {
- if (usb_submit_urb(pipe_info->stream_urb, GFP_ATOMIC)) {
+ if (usb_submit_urb(pipe_info->stream_urb, GFP_ATOMIC))
dev_err(&dev->udev->dev, "error submitting urb\n");
- }
} else {
- dprintk(2, "%s :complete state 0\n", __func__);
+ dprintk(dev, 2, "%s :complete state 0\n", __func__);
}
return;
}
@@ -2374,7 +2111,7 @@ static int s2255_start_readpipe(struct s2255_dev *dev)
int retval;
struct s2255_pipeinfo *pipe_info = &dev->pipe;
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
- dprintk(2, "%s: IN %d\n", __func__, dev->read_endpoint);
+ dprintk(dev, 2, "%s: IN %d\n", __func__, dev->read_endpoint);
pipe_info->state = 1;
pipe_info->err_count = 0;
pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -2391,70 +2128,64 @@ static int s2255_start_readpipe(struct s2255_dev *dev)
read_pipe_completion, pipe_info);
retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
if (retval) {
- printk(KERN_ERR "s2255: start read pipe failed\n");
+ pr_err("s2255: start read pipe failed\n");
return retval;
}
return 0;
}
/* starts acquisition process */
-static int s2255_start_acquire(struct s2255_channel *channel)
+static int s2255_start_acquire(struct s2255_vc *vc)
{
- unsigned char *buffer;
int res;
unsigned long chn_rev;
int j;
- struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
- chn_rev = G_chnmap[channel->idx];
- buffer = kzalloc(512, GFP_KERNEL);
- if (buffer == NULL) {
- dev_err(&dev->udev->dev, "out of mem\n");
- return -ENOMEM;
- }
-
- channel->last_frame = -1;
- channel->bad_payload = 0;
- channel->cur_frame = 0;
+ struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev);
+ __le32 *buffer = dev->cmdbuf;
+
+ mutex_lock(&dev->cmdlock);
+ chn_rev = G_chnmap[vc->idx];
+ vc->last_frame = -1;
+ vc->bad_payload = 0;
+ vc->cur_frame = 0;
for (j = 0; j < SYS_FRAMES; j++) {
- channel->buffer.frame[j].ulState = 0;
- channel->buffer.frame[j].cur_size = 0;
+ vc->buffer.frame[j].ulState = 0;
+ vc->buffer.frame[j].cur_size = 0;
}
/* send the start command */
- *(__le32 *) buffer = IN_DATA_TOKEN;
- *((__le32 *) buffer + 1) = (__le32) cpu_to_le32(chn_rev);
- *((__le32 *) buffer + 2) = CMD_START;
+ buffer[0] = IN_DATA_TOKEN;
+ buffer[1] = (__le32) cpu_to_le32(chn_rev);
+ buffer[2] = CMD_START;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (res != 0)
dev_err(&dev->udev->dev, "CMD_START error\n");
- dprintk(2, "start acquire exit[%d] %d \n", channel->idx, res);
- kfree(buffer);
- return 0;
+ dprintk(dev, 2, "start acquire exit[%d] %d\n", vc->idx, res);
+ mutex_unlock(&dev->cmdlock);
+ return res;
}
-static int s2255_stop_acquire(struct s2255_channel *channel)
+static int s2255_stop_acquire(struct s2255_vc *vc)
{
- unsigned char *buffer;
int res;
unsigned long chn_rev;
- struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
- chn_rev = G_chnmap[channel->idx];
- buffer = kzalloc(512, GFP_KERNEL);
- if (buffer == NULL) {
- dev_err(&dev->udev->dev, "out of mem\n");
- return -ENOMEM;
- }
+ struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev);
+ __le32 *buffer = dev->cmdbuf;
+
+ mutex_lock(&dev->cmdlock);
+ chn_rev = G_chnmap[vc->idx];
/* send the stop command */
- *(__le32 *) buffer = IN_DATA_TOKEN;
- *((__le32 *) buffer + 1) = (__le32) cpu_to_le32(chn_rev);
- *((__le32 *) buffer + 2) = CMD_STOP;
+ buffer[0] = IN_DATA_TOKEN;
+ buffer[1] = (__le32) cpu_to_le32(chn_rev);
+ buffer[2] = CMD_STOP;
+
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (res != 0)
dev_err(&dev->udev->dev, "CMD_STOP error\n");
- kfree(buffer);
- channel->b_acquire = 0;
- dprintk(4, "%s: chn %d, res %d\n", __func__, channel->idx, res);
+
+ dprintk(dev, 4, "%s: chn %d, res %d\n", __func__, vc->idx, res);
+ mutex_unlock(&dev->cmdlock);
return res;
}
@@ -2469,7 +2200,7 @@ static void s2255_stop_readpipe(struct s2255_dev *dev)
usb_free_urb(pipe->stream_urb);
pipe->stream_urb = NULL;
}
- dprintk(4, "%s", __func__);
+ dprintk(dev, 4, "%s", __func__);
return;
}
@@ -2501,19 +2232,27 @@ static int s2255_probe(struct usb_interface *interface,
int retval = -ENOMEM;
__le32 *pdata;
int fw_size;
- dprintk(2, "%s\n", __func__);
+
/* allocate memory for our device state and initialize it to zero */
dev = kzalloc(sizeof(struct s2255_dev), GFP_KERNEL);
if (dev == NULL) {
s2255_dev_err(&interface->dev, "out of memory\n");
return -ENOMEM;
}
+
+ dev->cmdbuf = kzalloc(S2255_CMDBUF_SIZE, GFP_KERNEL);
+ if (dev->cmdbuf == NULL) {
+ s2255_dev_err(&interface->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
atomic_set(&dev->num_channels, 0);
dev->pid = le16_to_cpu(id->idProduct);
dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
if (!dev->fw_data)
goto errorFWDATA1;
mutex_init(&dev->lock);
+ mutex_init(&dev->cmdlock);
/* grab usb_device and save it */
dev->udev = usb_get_dev(interface_to_usbdev(interface));
if (dev->udev == NULL) {
@@ -2521,12 +2260,13 @@ static int s2255_probe(struct usb_interface *interface,
retval = -ENODEV;
goto errorUDEV;
}
- dprintk(1, "dev: %p, udev %p interface %p\n", dev,
- dev->udev, interface);
+ dev_dbg(&interface->dev, "dev: %p, udev %p interface %p\n",
+ dev, dev->udev, interface);
dev->interface = interface;
/* set up the endpoint information */
iface_desc = interface->cur_altsetting;
- dprintk(1, "num endpoints %d\n", iface_desc->desc.bNumEndpoints);
+ dev_dbg(&interface->dev, "num EP: %d\n",
+ iface_desc->desc.bNumEndpoints);
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (!dev->read_endpoint && usb_endpoint_is_bulk_in(endpoint)) {
@@ -2544,10 +2284,13 @@ static int s2255_probe(struct usb_interface *interface,
dev->timer.data = (unsigned long)dev->fw_data;
init_waitqueue_head(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
- struct s2255_channel *channel = &dev->channel[i];
- dev->channel[i].idx = i;
- init_waitqueue_head(&channel->wait_setmode);
- init_waitqueue_head(&channel->wait_vidstatus);
+ struct s2255_vc *vc = &dev->vc[i];
+ vc->idx = i;
+ vc->dev = dev;
+ init_waitqueue_head(&vc->wait_setmode);
+ init_waitqueue_head(&vc->wait_vidstatus);
+ spin_lock_init(&vc->qlock);
+ mutex_init(&vc->vb_lock);
}
dev->fw_data->fw_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -2564,7 +2307,7 @@ static int s2255_probe(struct usb_interface *interface,
/* load the first chunk */
if (request_firmware(&dev->fw_data->fw,
FIRMWARE_FILE_NAME, &dev->udev->dev)) {
- printk(KERN_ERR "sensoray 2255 failed to get firmware\n");
+ dev_err(&interface->dev, "sensoray 2255 failed to get firmware\n");
goto errorREQFW;
}
/* check the firmware is valid */
@@ -2572,28 +2315,27 @@ static int s2255_probe(struct usb_interface *interface,
pdata = (__le32 *) &dev->fw_data->fw->data[fw_size - 8];
if (*pdata != S2255_FW_MARKER) {
- printk(KERN_INFO "Firmware invalid.\n");
+ dev_err(&interface->dev, "Firmware invalid.\n");
retval = -ENODEV;
goto errorFWMARKER;
} else {
/* make sure firmware is the latest */
__le32 *pRel;
pRel = (__le32 *) &dev->fw_data->fw->data[fw_size - 4];
- printk(KERN_INFO "s2255 dsp fw version %x\n", le32_to_cpu(*pRel));
+ pr_info("s2255 dsp fw version %x\n", le32_to_cpu(*pRel));
dev->dsp_fw_ver = le32_to_cpu(*pRel);
if (dev->dsp_fw_ver < S2255_CUR_DSP_FWVER)
- printk(KERN_INFO "s2255: f2255usb.bin out of date.\n");
+ pr_info("s2255: f2255usb.bin out of date.\n");
if (dev->pid == 0x2257 &&
dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
- printk(KERN_WARNING "s2255: 2257 requires firmware %d"
- " or above.\n", S2255_MIN_DSP_COLORFILTER);
+ pr_warn("2257 needs firmware %d or above.\n",
+ S2255_MIN_DSP_COLORFILTER);
}
usb_reset_device(dev->udev);
/* load 2255 board specific */
retval = s2255_board_init(dev);
if (retval)
goto errorBOARDINIT;
- spin_lock_init(&dev->slock);
s2255_fwload_start(dev, 0);
/* loads v4l specific */
retval = s2255_probe_v4l(dev);
@@ -2617,8 +2359,9 @@ errorUDEV:
kfree(dev->fw_data);
mutex_destroy(&dev->lock);
errorFWDATA1:
+ kfree(dev->cmdbuf);
kfree(dev);
- printk(KERN_WARNING "Sensoray 2255 driver load failed: 0x%x\n", retval);
+ pr_warn("Sensoray 2255 driver load failed: 0x%x\n", retval);
return retval;
}
@@ -2635,15 +2378,15 @@ static void s2255_disconnect(struct usb_interface *interface)
atomic_inc(&dev->num_channels);
/* unregister each video device. */
for (i = 0; i < channels; i++)
- video_unregister_device(&dev->channel[i].vdev);
+ video_unregister_device(&dev->vc[i].vdev);
/* wake up any of our timers */
atomic_set(&dev->fw_data->fw_state, S2255_FW_DISCONNECTING);
wake_up(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
- dev->channel[i].setmode_ready = 1;
- wake_up(&dev->channel[i].wait_setmode);
- dev->channel[i].vidstatus_ready = 1;
- wake_up(&dev->channel[i].wait_vidstatus);
+ dev->vc[i].setmode_ready = 1;
+ wake_up(&dev->vc[i].wait_setmode);
+ dev->vc[i].vidstatus_ready = 1;
+ wake_up(&dev->vc[i].wait_vidstatus);
}
if (atomic_dec_and_test(&dev->num_channels))
s2255_destroy(dev);
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 05bd91a60c09..1836a416d806 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -653,6 +653,8 @@ static const struct usb_device_id smsusb_id_table[] = {
.driver_info = SMS1XXX_BOARD_ZTE_DVB_DATA_CARD },
{ USB_DEVICE(0x19D2, 0x0078),
.driver_info = SMS1XXX_BOARD_ONDA_MDTV_DATA_CARD },
+ { USB_DEVICE(0x3275, 0x0080),
+ .driver_info = SMS1XXX_BOARD_SIANO_RIO },
{ } /* Terminating entry */
};
diff --git a/drivers/media/usb/stk1160/stk1160-ac97.c b/drivers/media/usb/stk1160/stk1160-ac97.c
index c8583c262c3d..c46c8be89602 100644
--- a/drivers/media/usb/stk1160/stk1160-ac97.c
+++ b/drivers/media/usb/stk1160/stk1160-ac97.c
@@ -98,13 +98,11 @@ int stk1160_ac97_register(struct stk1160 *dev)
* Just want a card to access ac96 controls,
* the actual capture interface will be handled by snd-usb-audio
*/
- rc = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
- THIS_MODULE, 0, &card);
+ rc = snd_card_new(dev->dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, 0, &card);
if (rc < 0)
return rc;
- snd_card_set_dev(card, dev->dev);
-
/* TODO: I'm not sure where should I get these names :-( */
snprintf(card->shortname, sizeof(card->shortname),
"stk1160-mixer");
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index c45c9881bb5f..37bc00f418f1 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -641,7 +641,7 @@ int stk1160_vb2_setup(struct stk1160 *dev)
q->buf_struct_size = sizeof(struct stk1160_buffer);
q->ops = &stk1160_video_qops;
q->mem_ops = &vb2_vmalloc_memops;
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
rc = vb2_queue_init(q);
if (rc < 0)
diff --git a/drivers/media/usb/tlg2300/pd-alsa.c b/drivers/media/usb/tlg2300/pd-alsa.c
index 3f3e141f70fb..dd8fe100590f 100644
--- a/drivers/media/usb/tlg2300/pd-alsa.c
+++ b/drivers/media/usb/tlg2300/pd-alsa.c
@@ -300,7 +300,8 @@ int poseidon_audio_init(struct poseidon *p)
struct snd_pcm *pcm;
int ret;
- ret = snd_card_create(-1, "Telegent", THIS_MODULE, 0, &card);
+ ret = snd_card_new(&p->interface->dev, -1, "Telegent",
+ THIS_MODULE, 0, &card);
if (ret != 0)
return ret;
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c
index 813c1ec53608..74e5697d8678 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/media/usb/tm6000/tm6000-alsa.c
@@ -1,7 +1,7 @@
/*
*
* Support for audio capture for tm5600/6000/6010
- * (c) 2007-2008 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * (c) 2007-2008 Mauro Carvalho Chehab
*
* Based on cx88-alsa.c
*
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s).");
****************************************************************************/
MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},"
"{{Trident,tm6000},"
@@ -431,7 +431,8 @@ static int tm6000_audio_init(struct tm6000_core *dev)
if (!enable[devnr])
return -ENOENT;
- rc = snd_card_create(index[devnr], "tm6000", THIS_MODULE, 0, &card);
+ rc = snd_card_new(&dev->udev->dev, index[devnr], "tm6000",
+ THIS_MODULE, 0, &card);
if (rc < 0) {
snd_printk(KERN_ERR "cannot create card instance %d\n", devnr);
return rc;
@@ -445,7 +446,6 @@ static int tm6000_audio_init(struct tm6000_core *dev)
le16_to_cpu(dev->udev->descriptor.idVendor),
le16_to_cpu(dev->udev->descriptor.idProduct));
snd_component_add(card, component);
- snd_card_set_dev(card, &dev->udev->dev);
chip = kzalloc(sizeof(struct snd_tm6000_card), GFP_KERNEL);
if (!chip) {
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 9fc1e940a82b..095f5db1a790 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -32,7 +32,7 @@
#include "xc5000.h"
MODULE_DESCRIPTION("DVB driver extension module for tm5600/6000/6010 based TV cards");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},"
diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c
index 8a6bbf1d80e1..d1af5438c168 100644
--- a/drivers/media/usb/tm6000/tm6000-input.c
+++ b/drivers/media/usb/tm6000/tm6000-input.c
@@ -422,7 +422,7 @@ int tm6000_ir_init(struct tm6000_core *dev)
ir->rc = rc;
/* input setup */
- rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC;
+ rc_set_allowed_protocols(rc, RC_BIT_RC5 | RC_BIT_NEC);
/* Neded, in order to support NEC remotes with 24 or 32 bits */
rc->scanmask = 0xffff;
rc->priv = ir;
diff --git a/drivers/media/usb/tm6000/tm6000-stds.c b/drivers/media/usb/tm6000/tm6000-stds.c
index 5e28d6a2412f..93a4b2434b6e 100644
--- a/drivers/media/usb/tm6000/tm6000-stds.c
+++ b/drivers/media/usb/tm6000/tm6000-stds.c
@@ -1,7 +1,7 @@
/*
* tm6000-stds.c - driver for TM5600/TM6000/TM6010 USB video capture devices
*
- * Copyright (C) 2007 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2007 Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/usbtv/Makefile b/drivers/media/usb/usbtv/Makefile
index 28b872fa94e1..775316a88ea6 100644
--- a/drivers/media/usb/usbtv/Makefile
+++ b/drivers/media/usb/usbtv/Makefile
@@ -1 +1,4 @@
+usbtv-y := usbtv-core.o \
+ usbtv-video.o
+
obj-$(CONFIG_VIDEO_USBTV) += usbtv.o
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
new file mode 100644
index 000000000000..2f87ddfa469f
--- /dev/null
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -0,0 +1,134 @@
+/*
+ * Fushicai USBTV007 Video Grabber Driver
+ *
+ * Product web site:
+ * http://www.fushicai.com/products_detail/&productId=d05449ee-b690-42f9-a661-aa7353894bed.html
+ *
+ * Following LWN articles were very useful in construction of this driver:
+ * Video4Linux2 API series: http://lwn.net/Articles/203924/
+ * videobuf2 API explanation: http://lwn.net/Articles/447435/
+ * Thanks go to Jonathan Corbet for providing this quality documentation.
+ * He is awesome.
+ *
+ * Copyright (c) 2013 Lubomir Rintel
+ * All rights reserved.
+ * No physical hardware was harmed running Windows during the
+ * reverse-engineering activity
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ */
+
+#include "usbtv.h"
+
+int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
+{
+ int ret;
+ int pipe = usb_rcvctrlpipe(usbtv->udev, 0);
+ int i;
+
+ for (i = 0; i < size; i++) {
+ u16 index = regs[i][0];
+ u16 value = regs[i][1];
+
+ ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, NULL, 0, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int usbtv_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int ret;
+ int size;
+ struct device *dev = &intf->dev;
+ struct usbtv *usbtv;
+
+ /* Checks that the device is what we think it is. */
+ if (intf->num_altsetting != 2)
+ return -ENODEV;
+ if (intf->altsetting[1].desc.bNumEndpoints != 4)
+ return -ENODEV;
+
+ /* Packet size is split into 11 bits of base size and count of
+ * extra multiplies of it.*/
+ size = usb_endpoint_maxp(&intf->altsetting[1].endpoint[0].desc);
+ size = (size & 0x07ff) * (((size & 0x1800) >> 11) + 1);
+
+ /* Device structure */
+ usbtv = kzalloc(sizeof(struct usbtv), GFP_KERNEL);
+ if (usbtv == NULL)
+ return -ENOMEM;
+ usbtv->dev = dev;
+ usbtv->udev = usb_get_dev(interface_to_usbdev(intf));
+
+ usbtv->iso_size = size;
+
+ usb_set_intfdata(intf, usbtv);
+
+ ret = usbtv_video_init(usbtv);
+ if (ret < 0)
+ goto usbtv_video_fail;
+
+ /* for simplicity we exploit the v4l2_device reference counting */
+ v4l2_device_get(&usbtv->v4l2_dev);
+
+ dev_info(dev, "Fushicai USBTV007 Video Grabber\n");
+ return 0;
+
+usbtv_video_fail:
+ kfree(usbtv);
+
+ return ret;
+}
+
+static void usbtv_disconnect(struct usb_interface *intf)
+{
+ struct usbtv *usbtv = usb_get_intfdata(intf);
+ usb_set_intfdata(intf, NULL);
+
+ if (!usbtv)
+ return;
+
+ usbtv_video_free(usbtv);
+
+ usb_put_dev(usbtv->udev);
+ usbtv->udev = NULL;
+
+ /* the usbtv structure will be deallocated when v4l2 will be
+ done using it */
+ v4l2_device_put(&usbtv->v4l2_dev);
+}
+
+static struct usb_device_id usbtv_id_table[] = {
+ { USB_DEVICE(0x1b71, 0x3002) },
+ {}
+};
+MODULE_DEVICE_TABLE(usb, usbtv_id_table);
+
+MODULE_AUTHOR("Lubomir Rintel");
+MODULE_DESCRIPTION("Fushicai USBTV007 Video Grabber Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct usb_driver usbtv_usb_driver = {
+ .name = "usbtv",
+ .id_table = usbtv_id_table,
+ .probe = usbtv_probe,
+ .disconnect = usbtv_disconnect,
+};
+
+module_usb_driver(usbtv_usb_driver);
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv-video.c
index 6222a4ab1e00..20365bd69d05 100644
--- a/drivers/media/usb/usbtv/usbtv.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -28,45 +28,10 @@
* GNU General Public License ("GPL").
*/
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/usb.h>
-#include <linux/videodev2.h>
-
-#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
-#include <media/videobuf2-vmalloc.h>
-
-/* Hardware. */
-#define USBTV_VIDEO_ENDP 0x81
-#define USBTV_BASE 0xc000
-#define USBTV_REQUEST_REG 12
-
-/* Number of concurrent isochronous urbs submitted.
- * Higher numbers was seen to overly saturate the USB bus. */
-#define USBTV_ISOC_TRANSFERS 16
-#define USBTV_ISOC_PACKETS 8
-
-#define USBTV_CHUNK_SIZE 256
-#define USBTV_CHUNK 240
-
-/* Chunk header. */
-#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \
- == 0x88000000)
-#define USBTV_FRAME_ID(chunk) ((be32_to_cpu(chunk[0]) & 0x00ff0000) >> 16)
-#define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15)
-#define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff)
-
-#define USBTV_TV_STD (V4L2_STD_525_60 | V4L2_STD_PAL)
-
-/* parameters for supported TV norms */
-struct usbtv_norm_params {
- v4l2_std_id norm;
- int cap_width, cap_height;
-};
+
+#include "usbtv.h"
static struct usbtv_norm_params norm_params[] = {
{
@@ -81,43 +46,6 @@ static struct usbtv_norm_params norm_params[] = {
}
};
-/* A single videobuf2 frame buffer. */
-struct usbtv_buf {
- struct vb2_buffer vb;
- struct list_head list;
-};
-
-/* Per-device structure. */
-struct usbtv {
- struct device *dev;
- struct usb_device *udev;
- struct v4l2_device v4l2_dev;
- struct video_device vdev;
- struct vb2_queue vb2q;
- struct mutex v4l2_lock;
- struct mutex vb2q_lock;
-
- /* List of videobuf2 buffers protected by a lock. */
- spinlock_t buflock;
- struct list_head bufs;
-
- /* Number of currently processed frame, useful find
- * out when a new one begins. */
- u32 frame_id;
- int chunks_done;
-
- enum {
- USBTV_COMPOSITE_INPUT,
- USBTV_SVIDEO_INPUT,
- } input;
- v4l2_std_id norm;
- int width, height;
- int n_chunks;
- int iso_size;
- unsigned int sequence;
- struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS];
-};
-
static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm)
{
int i, ret = 0;
@@ -142,26 +70,6 @@ static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm)
return ret;
}
-static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
-{
- int ret;
- int pipe = usb_rcvctrlpipe(usbtv->udev, 0);
- int i;
-
- for (i = 0; i < size; i++) {
- u16 index = regs[i][0];
- u16 value = regs[i][1];
-
- ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, index, NULL, 0, 0);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
static int usbtv_select_input(struct usbtv *usbtv, int input)
{
int ret;
@@ -560,12 +468,6 @@ start_fail:
return ret;
}
-struct usb_device_id usbtv_id_table[] = {
- { USB_DEVICE(0x1b71, 0x3002) },
- {}
-};
-MODULE_DEVICE_TABLE(usb, usbtv_id_table);
-
static int usbtv_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
@@ -660,7 +562,7 @@ static int usbtv_s_input(struct file *file, void *priv, unsigned int i)
return usbtv_select_input(usbtv, i);
}
-struct v4l2_ioctl_ops usbtv_ioctl_ops = {
+static struct v4l2_ioctl_ops usbtv_ioctl_ops = {
.vidioc_querycap = usbtv_querycap,
.vidioc_enum_input = usbtv_enum_input,
.vidioc_enum_fmt_vid_cap = usbtv_enum_fmt_vid_cap,
@@ -682,7 +584,7 @@ struct v4l2_ioctl_ops usbtv_ioctl_ops = {
.vidioc_streamoff = vb2_ioctl_streamoff,
};
-struct v4l2_file_operations usbtv_fops = {
+static struct v4l2_file_operations usbtv_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
.mmap = vb2_fop_mmap,
@@ -743,7 +645,7 @@ static int usbtv_stop_streaming(struct vb2_queue *vq)
return 0;
}
-struct vb2_ops usbtv_vb2_ops = {
+static struct vb2_ops usbtv_vb2_ops = {
.queue_setup = usbtv_queue_setup,
.buf_queue = usbtv_buf_queue,
.start_streaming = usbtv_start_streaming,
@@ -759,33 +661,9 @@ static void usbtv_release(struct v4l2_device *v4l2_dev)
kfree(usbtv);
}
-static int usbtv_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+int usbtv_video_init(struct usbtv *usbtv)
{
int ret;
- int size;
- struct device *dev = &intf->dev;
- struct usbtv *usbtv;
-
- /* Checks that the device is what we think it is. */
- if (intf->num_altsetting != 2)
- return -ENODEV;
- if (intf->altsetting[1].desc.bNumEndpoints != 4)
- return -ENODEV;
-
- /* Packet size is split into 11 bits of base size and count of
- * extra multiplies of it.*/
- size = usb_endpoint_maxp(&intf->altsetting[1].endpoint[0].desc);
- size = (size & 0x07ff) * (((size & 0x1800) >> 11) + 1);
-
- /* Device structure */
- usbtv = kzalloc(sizeof(struct usbtv), GFP_KERNEL);
- if (usbtv == NULL)
- return -ENOMEM;
- usbtv->dev = dev;
- usbtv->udev = usb_get_dev(interface_to_usbdev(intf));
-
- usbtv->iso_size = size;
(void)usbtv_configure_for_norm(usbtv, V4L2_STD_525_60);
@@ -801,24 +679,22 @@ static int usbtv_probe(struct usb_interface *intf,
usbtv->vb2q.buf_struct_size = sizeof(struct usbtv_buf);
usbtv->vb2q.ops = &usbtv_vb2_ops;
usbtv->vb2q.mem_ops = &vb2_vmalloc_memops;
- usbtv->vb2q.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ usbtv->vb2q.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
usbtv->vb2q.lock = &usbtv->vb2q_lock;
ret = vb2_queue_init(&usbtv->vb2q);
if (ret < 0) {
- dev_warn(dev, "Could not initialize videobuf2 queue\n");
- goto usbtv_fail;
+ dev_warn(usbtv->dev, "Could not initialize videobuf2 queue\n");
+ return ret;
}
/* v4l2 structure */
usbtv->v4l2_dev.release = usbtv_release;
- ret = v4l2_device_register(dev, &usbtv->v4l2_dev);
+ ret = v4l2_device_register(usbtv->dev, &usbtv->v4l2_dev);
if (ret < 0) {
- dev_warn(dev, "Could not register v4l2 device\n");
+ dev_warn(usbtv->dev, "Could not register v4l2 device\n");
goto v4l2_fail;
}
- usb_set_intfdata(intf, usbtv);
-
/* Video structure */
strlcpy(usbtv->vdev.name, "usbtv", sizeof(usbtv->vdev.name));
usbtv->vdev.v4l2_dev = &usbtv->v4l2_dev;
@@ -832,52 +708,31 @@ static int usbtv_probe(struct usb_interface *intf,
video_set_drvdata(&usbtv->vdev, usbtv);
ret = video_register_device(&usbtv->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
- dev_warn(dev, "Could not register video device\n");
+ dev_warn(usbtv->dev, "Could not register video device\n");
goto vdev_fail;
}
- dev_info(dev, "Fushicai USBTV007 Video Grabber\n");
return 0;
vdev_fail:
v4l2_device_unregister(&usbtv->v4l2_dev);
v4l2_fail:
vb2_queue_release(&usbtv->vb2q);
-usbtv_fail:
- kfree(usbtv);
return ret;
}
-static void usbtv_disconnect(struct usb_interface *intf)
+void usbtv_video_free(struct usbtv *usbtv)
{
- struct usbtv *usbtv = usb_get_intfdata(intf);
-
mutex_lock(&usbtv->vb2q_lock);
mutex_lock(&usbtv->v4l2_lock);
usbtv_stop(usbtv);
- usb_set_intfdata(intf, NULL);
video_unregister_device(&usbtv->vdev);
v4l2_device_disconnect(&usbtv->v4l2_dev);
- usb_put_dev(usbtv->udev);
- usbtv->udev = NULL;
mutex_unlock(&usbtv->v4l2_lock);
mutex_unlock(&usbtv->vb2q_lock);
v4l2_device_put(&usbtv->v4l2_dev);
}
-
-MODULE_AUTHOR("Lubomir Rintel");
-MODULE_DESCRIPTION("Fushicai USBTV007 Video Grabber Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-
-struct usb_driver usbtv_usb_driver = {
- .name = "usbtv",
- .id_table = usbtv_id_table,
- .probe = usbtv_probe,
- .disconnect = usbtv_disconnect,
-};
-
-module_usb_driver(usbtv_usb_driver);
diff --git a/drivers/media/usb/usbtv/usbtv.h b/drivers/media/usb/usbtv/usbtv.h
new file mode 100644
index 000000000000..cb1d388cc647
--- /dev/null
+++ b/drivers/media/usb/usbtv/usbtv.h
@@ -0,0 +1,99 @@
+/*
+ * Fushicai USBTV007 Video Grabber Driver
+ *
+ * Copyright (c) 2013 Lubomir Rintel
+ * All rights reserved.
+ * No physical hardware was harmed running Windows during the
+ * reverse-engineering activity
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <media/v4l2-device.h>
+#include <media/videobuf2-vmalloc.h>
+
+/* Hardware. */
+#define USBTV_VIDEO_ENDP 0x81
+#define USBTV_BASE 0xc000
+#define USBTV_REQUEST_REG 12
+
+/* Number of concurrent isochronous urbs submitted.
+ * Higher numbers was seen to overly saturate the USB bus. */
+#define USBTV_ISOC_TRANSFERS 16
+#define USBTV_ISOC_PACKETS 8
+
+#define USBTV_CHUNK_SIZE 256
+#define USBTV_CHUNK 240
+
+/* Chunk header. */
+#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \
+ == 0x88000000)
+#define USBTV_FRAME_ID(chunk) ((be32_to_cpu(chunk[0]) & 0x00ff0000) >> 16)
+#define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15)
+#define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff)
+
+#define USBTV_TV_STD (V4L2_STD_525_60 | V4L2_STD_PAL)
+
+/* parameters for supported TV norms */
+struct usbtv_norm_params {
+ v4l2_std_id norm;
+ int cap_width, cap_height;
+};
+
+/* A single videobuf2 frame buffer. */
+struct usbtv_buf {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+/* Per-device structure. */
+struct usbtv {
+ struct device *dev;
+ struct usb_device *udev;
+
+ /* video */
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct vb2_queue vb2q;
+ struct mutex v4l2_lock;
+ struct mutex vb2q_lock;
+
+ /* List of videobuf2 buffers protected by a lock. */
+ spinlock_t buflock;
+ struct list_head bufs;
+
+ /* Number of currently processed frame, useful find
+ * out when a new one begins. */
+ u32 frame_id;
+ int chunks_done;
+
+ enum {
+ USBTV_COMPOSITE_INPUT,
+ USBTV_SVIDEO_INPUT,
+ } input;
+ v4l2_std_id norm;
+ int width, height;
+ int n_chunks;
+ int iso_size;
+ unsigned int sequence;
+ struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS];
+};
+
+int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size);
+
+int usbtv_video_init(struct usbtv *usbtv);
+void usbtv_video_free(struct usbtv *usbtv);
diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/media/usb/usbvision/usbvision.h
index 8a25876d72c6..a0c73cf1517c 100644
--- a/drivers/media/usb/usbvision/usbvision.h
+++ b/drivers/media/usb/usbvision/usbvision.h
@@ -203,14 +203,6 @@ enum {
mr = LIMIT_RGB(mm_r); \
}
-/* Debugging aid */
-#define USBVISION_SAY_AND_WAIT(what) { \
- wait_queue_head_t wq; \
- init_waitqueue_head(&wq); \
- printk(KERN_INFO "Say: %s\n", what); \
- interruptible_sleep_on_timeout(&wq, HZ * 3); \
-}
-
/*
* This macro checks if usbvision is still operational. The 'usbvision'
* pointer must be valid, usbvision->dev must be valid, we are not
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index c3bb2502225b..ad47c5cb539a 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -108,11 +108,31 @@ static struct uvc_format_desc uvc_fmts[] = {
.fcc = V4L2_PIX_FMT_Y16,
},
{
- .name = "RGB Bayer",
+ .name = "BGGR Bayer (BY8 )",
.guid = UVC_GUID_FORMAT_BY8,
.fcc = V4L2_PIX_FMT_SBGGR8,
},
{
+ .name = "BGGR Bayer (BA81)",
+ .guid = UVC_GUID_FORMAT_BA81,
+ .fcc = V4L2_PIX_FMT_SBGGR8,
+ },
+ {
+ .name = "GBRG Bayer (GBRG)",
+ .guid = UVC_GUID_FORMAT_GBRG,
+ .fcc = V4L2_PIX_FMT_SGBRG8,
+ },
+ {
+ .name = "GRBG Bayer (GRBG)",
+ .guid = UVC_GUID_FORMAT_GRBG,
+ .fcc = V4L2_PIX_FMT_SGRBG8,
+ },
+ {
+ .name = "RGGB Bayer (RGGB)",
+ .guid = UVC_GUID_FORMAT_RGGB,
+ .fcc = V4L2_PIX_FMT_SRGGB8,
+ },
+ {
.name = "RGB565",
.guid = UVC_GUID_FORMAT_RGBP,
.fcc = V4L2_PIX_FMT_RGB565,
@@ -925,7 +945,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
case UVC_VC_HEADER:
n = buflen >= 12 ? buffer[11] : 0;
- if (buflen < 12 || buflen < 12 + n) {
+ if (buflen < 12 + n) {
uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
"interface %d HEADER error\n", udev->devnum,
alts->desc.bInterfaceNumber);
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index cd962be860ca..6e92d2080255 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -48,12 +48,14 @@ static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
struct uvc_streaming *stream =
container_of(queue, struct uvc_streaming, queue);
- if (*nbuffers > UVC_MAX_VIDEO_BUFFERS)
- *nbuffers = UVC_MAX_VIDEO_BUFFERS;
+ /* Make sure the image size is large enough. */
+ if (fmt && fmt->fmt.pix.sizeimage < stream->ctrl.dwMaxVideoFrameSize)
+ return -EINVAL;
*nplanes = 1;
- sizes[0] = stream->ctrl.dwMaxVideoFrameSize;
+ sizes[0] = fmt ? fmt->fmt.pix.sizeimage
+ : stream->ctrl.dwMaxVideoFrameSize;
return 0;
}
@@ -104,15 +106,15 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&queue->irqlock, flags);
}
-static int uvc_buffer_finish(struct vb2_buffer *vb)
+static void uvc_buffer_finish(struct vb2_buffer *vb)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
struct uvc_streaming *stream =
container_of(queue, struct uvc_streaming, queue);
struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
- uvc_video_clock_update(stream, &vb->v4l2_buf, buf);
- return 0;
+ if (vb->state == VB2_BUF_STATE_DONE)
+ uvc_video_clock_update(stream, &vb->v4l2_buf, buf);
}
static void uvc_wait_prepare(struct vb2_queue *vq)
@@ -149,7 +151,8 @@ int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
queue->queue.ops = &uvc_queue_qops;
queue->queue.mem_ops = &vb2_vmalloc_memops;
- queue->queue.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
+ | V4L2_BUF_FLAG_TSTAMP_SRC_SOE;
ret = vb2_queue_init(&queue->queue);
if (ret)
return ret;
@@ -196,6 +199,18 @@ int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
return ret;
}
+int uvc_create_buffers(struct uvc_video_queue *queue,
+ struct v4l2_create_buffers *cb)
+{
+ int ret;
+
+ mutex_lock(&queue->mutex);
+ ret = vb2_create_bufs(&queue->queue, cb);
+ mutex_unlock(&queue->mutex);
+
+ return ret;
+}
+
int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
{
int ret;
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 3afff92804d3..378ae02e593b 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -1000,6 +1000,17 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return uvc_query_buffer(&stream->queue, buf);
}
+ case VIDIOC_CREATE_BUFS:
+ {
+ struct v4l2_create_buffers *cb = arg;
+
+ ret = uvc_acquire_privileges(handle);
+ if (ret < 0)
+ return ret;
+
+ return uvc_create_buffers(&stream->queue, cb);
+ }
+
case VIDIOC_QBUF:
if (!uvc_has_privileges(handle))
return -EBUSY;
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 898c208889cd..8d52baf5952b 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1453,6 +1453,9 @@ static unsigned int uvc_endpoint_max_bpi(struct usb_device *dev,
case USB_SPEED_HIGH:
psize = usb_endpoint_maxp(&ep->desc);
return (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
+ case USB_SPEED_WIRELESS:
+ psize = usb_endpoint_maxp(&ep->desc);
+ return psize;
default:
psize = usb_endpoint_maxp(&ep->desc);
return psize & 0x07ff;
@@ -1847,7 +1850,25 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
if (!enable) {
uvc_uninit_video(stream, 1);
- usb_set_interface(stream->dev->udev, stream->intfnum, 0);
+ if (stream->intf->num_altsetting > 1) {
+ usb_set_interface(stream->dev->udev,
+ stream->intfnum, 0);
+ } else {
+ /* UVC doesn't specify how to inform a bulk-based device
+ * when the video stream is stopped. Windows sends a
+ * CLEAR_FEATURE(HALT) request to the video streaming
+ * bulk endpoint, mimic the same behaviour.
+ */
+ unsigned int epnum = stream->header.bEndpointAddress
+ & USB_ENDPOINT_NUMBER_MASK;
+ unsigned int dir = stream->header.bEndpointAddress
+ & USB_ENDPOINT_DIR_MASK;
+ unsigned int pipe;
+
+ pipe = usb_sndbulkpipe(stream->dev->udev, epnum) | dir;
+ usb_clear_halt(stream->dev->udev, pipe);
+ }
+
uvc_queue_enable(&stream->queue, 0);
uvc_video_clock_cleanup(stream);
return 0;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 9e35982d099a..b1f69a6d4068 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -94,6 +94,18 @@
#define UVC_GUID_FORMAT_BY8 \
{ 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BA81 \
+ { 'B', 'A', '8', '1', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GBRG \
+ { 'G', 'B', 'R', 'G', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GRBG \
+ { 'G', 'R', 'B', 'G', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RGGB \
+ { 'R', 'G', 'G', 'B', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_RGBP \
{ 'R', 'G', 'B', 'P', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
@@ -115,8 +127,6 @@
#define UVC_URBS 5
/* Maximum number of packets per URB. */
#define UVC_MAX_PACKETS 32
-/* Maximum number of video buffers. */
-#define UVC_MAX_VIDEO_BUFFERS 32
/* Maximum status buffer size in bytes of interrupt URB. */
#define UVC_MAX_STATUS_SIZE 16
@@ -616,6 +626,8 @@ extern int uvc_alloc_buffers(struct uvc_video_queue *queue,
extern void uvc_free_buffers(struct uvc_video_queue *queue);
extern int uvc_query_buffer(struct uvc_video_queue *queue,
struct v4l2_buffer *v4l2_buf);
+extern int uvc_create_buffers(struct uvc_video_queue *queue,
+ struct v4l2_create_buffers *v4l2_cb);
extern int uvc_queue_buffer(struct uvc_video_queue *queue,
struct v4l2_buffer *v4l2_buf);
extern int uvc_dequeue_buffer(struct uvc_video_queue *queue,
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 8f7a6a454a4c..04b2daf567be 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -733,14 +733,14 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
put_user(kp->pending, &up->pending) ||
put_user(kp->sequence, &up->sequence) ||
- put_compat_timespec(&kp->timestamp, &up->timestamp) ||
+ compat_put_timespec(&kp->timestamp, &up->timestamp) ||
put_user(kp->id, &up->id) ||
copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
return -EFAULT;
return 0;
}
-struct v4l2_subdev_edid32 {
+struct v4l2_edid32 {
__u32 pad;
__u32 start_block;
__u32 blocks;
@@ -748,11 +748,11 @@ struct v4l2_subdev_edid32 {
compat_caddr_t edid;
};
-static int get_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subdev_edid32 __user *up)
+static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
{
u32 tmp;
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_subdev_edid32)) ||
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) ||
get_user(kp->pad, &up->pad) ||
get_user(kp->start_block, &up->start_block) ||
get_user(kp->blocks, &up->blocks) ||
@@ -763,11 +763,11 @@ static int get_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
return 0;
}
-static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subdev_edid32 __user *up)
+static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
{
u32 tmp = (u32)((unsigned long)kp->edid);
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_subdev_edid32)) ||
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) ||
put_user(kp->pad, &up->pad) ||
put_user(kp->start_block, &up->start_block) ||
put_user(kp->blocks, &up->blocks) ||
@@ -787,8 +787,8 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32)
#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32)
#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
-#define VIDIOC_SUBDEV_G_EDID32 _IOWR('V', 63, struct v4l2_subdev_edid32)
-#define VIDIOC_SUBDEV_S_EDID32 _IOWR('V', 64, struct v4l2_subdev_edid32)
+#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32)
+#define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32)
#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
@@ -816,7 +816,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
struct v4l2_ext_controls v2ecs;
struct v4l2_event v2ev;
struct v4l2_create_buffers v2crt;
- struct v4l2_subdev_edid v2edid;
+ struct v4l2_edid v2edid;
unsigned long vx;
int vi;
} karg;
@@ -849,8 +849,8 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break;
case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break;
case VIDIOC_PREPARE_BUF32: cmd = VIDIOC_PREPARE_BUF; break;
- case VIDIOC_SUBDEV_G_EDID32: cmd = VIDIOC_SUBDEV_G_EDID; break;
- case VIDIOC_SUBDEV_S_EDID32: cmd = VIDIOC_SUBDEV_S_EDID; break;
+ case VIDIOC_G_EDID32: cmd = VIDIOC_G_EDID; break;
+ case VIDIOC_S_EDID32: cmd = VIDIOC_S_EDID; break;
}
switch (cmd) {
@@ -868,9 +868,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
- case VIDIOC_SUBDEV_G_EDID:
- case VIDIOC_SUBDEV_S_EDID:
- err = get_v4l2_subdev_edid32(&karg.v2edid, up);
+ case VIDIOC_G_EDID:
+ case VIDIOC_S_EDID:
+ err = get_v4l2_edid32(&karg.v2edid, up);
compatible_arg = 0;
break;
@@ -966,9 +966,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
err = put_v4l2_event32(&karg.v2ev, up);
break;
- case VIDIOC_SUBDEV_G_EDID:
- case VIDIOC_SUBDEV_S_EDID:
- err = put_v4l2_subdev_edid32(&karg.v2edid, up);
+ case VIDIOC_G_EDID:
+ case VIDIOC_S_EDID:
+ err = put_v4l2_edid32(&karg.v2edid, up);
break;
case VIDIOC_G_FMT:
@@ -1006,103 +1006,14 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
if (!file->f_op->unlocked_ioctl)
return ret;
- switch (cmd) {
- case VIDIOC_QUERYCAP:
- case VIDIOC_RESERVED:
- case VIDIOC_ENUM_FMT:
- case VIDIOC_G_FMT32:
- case VIDIOC_S_FMT32:
- case VIDIOC_REQBUFS:
- case VIDIOC_QUERYBUF32:
- case VIDIOC_G_FBUF32:
- case VIDIOC_S_FBUF32:
- case VIDIOC_OVERLAY32:
- case VIDIOC_QBUF32:
- case VIDIOC_EXPBUF:
- case VIDIOC_DQBUF32:
- case VIDIOC_STREAMON32:
- case VIDIOC_STREAMOFF32:
- case VIDIOC_G_PARM:
- case VIDIOC_S_PARM:
- case VIDIOC_G_STD:
- case VIDIOC_S_STD:
- case VIDIOC_ENUMSTD32:
- case VIDIOC_ENUMINPUT32:
- case VIDIOC_G_CTRL:
- case VIDIOC_S_CTRL:
- case VIDIOC_G_TUNER:
- case VIDIOC_S_TUNER:
- case VIDIOC_G_AUDIO:
- case VIDIOC_S_AUDIO:
- case VIDIOC_QUERYCTRL:
- case VIDIOC_QUERYMENU:
- case VIDIOC_G_INPUT32:
- case VIDIOC_S_INPUT32:
- case VIDIOC_G_OUTPUT32:
- case VIDIOC_S_OUTPUT32:
- case VIDIOC_ENUMOUTPUT:
- case VIDIOC_G_AUDOUT:
- case VIDIOC_S_AUDOUT:
- case VIDIOC_G_MODULATOR:
- case VIDIOC_S_MODULATOR:
- case VIDIOC_S_FREQUENCY:
- case VIDIOC_G_FREQUENCY:
- case VIDIOC_CROPCAP:
- case VIDIOC_G_CROP:
- case VIDIOC_S_CROP:
- case VIDIOC_G_SELECTION:
- case VIDIOC_S_SELECTION:
- case VIDIOC_G_JPEGCOMP:
- case VIDIOC_S_JPEGCOMP:
- case VIDIOC_QUERYSTD:
- case VIDIOC_TRY_FMT32:
- case VIDIOC_ENUMAUDIO:
- case VIDIOC_ENUMAUDOUT:
- case VIDIOC_G_PRIORITY:
- case VIDIOC_S_PRIORITY:
- case VIDIOC_G_SLICED_VBI_CAP:
- case VIDIOC_LOG_STATUS:
- case VIDIOC_G_EXT_CTRLS32:
- case VIDIOC_S_EXT_CTRLS32:
- case VIDIOC_TRY_EXT_CTRLS32:
- case VIDIOC_ENUM_FRAMESIZES:
- case VIDIOC_ENUM_FRAMEINTERVALS:
- case VIDIOC_G_ENC_INDEX:
- case VIDIOC_ENCODER_CMD:
- case VIDIOC_TRY_ENCODER_CMD:
- case VIDIOC_DECODER_CMD:
- case VIDIOC_TRY_DECODER_CMD:
- case VIDIOC_DBG_S_REGISTER:
- case VIDIOC_DBG_G_REGISTER:
- case VIDIOC_S_HW_FREQ_SEEK:
- case VIDIOC_S_DV_TIMINGS:
- case VIDIOC_G_DV_TIMINGS:
- case VIDIOC_DQEVENT:
- case VIDIOC_DQEVENT32:
- case VIDIOC_SUBSCRIBE_EVENT:
- case VIDIOC_UNSUBSCRIBE_EVENT:
- case VIDIOC_CREATE_BUFS32:
- case VIDIOC_PREPARE_BUF32:
- case VIDIOC_ENUM_DV_TIMINGS:
- case VIDIOC_QUERY_DV_TIMINGS:
- case VIDIOC_DV_TIMINGS_CAP:
- case VIDIOC_ENUM_FREQ_BANDS:
- case VIDIOC_SUBDEV_G_EDID32:
- case VIDIOC_SUBDEV_S_EDID32:
+ if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
ret = do_video_ioctl(file, cmd, arg);
- break;
+ else if (vdev->fops->compat_ioctl32)
+ ret = vdev->fops->compat_ioctl32(file, cmd, arg);
- default:
- if (vdev->fops->compat_ioctl32)
- ret = vdev->fops->compat_ioctl32(file, cmd, arg);
-
- if (ret == -ENOIOCTLCMD)
- printk(KERN_WARNING "compat_ioctl32: "
- "unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
- _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd),
- cmd);
- break;
- }
+ if (ret == -ENOIOCTLCMD)
+ pr_warn("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
+ _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 6ff002bd5909..55c683254102 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -735,6 +735,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS";
case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count";
case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control";
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range";
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
/* VPX controls */
@@ -857,6 +859,17 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls";
case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis";
case V4L2_CID_RDS_RECEPTION: return "RDS Reception";
+
+ case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls";
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto";
+ case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain";
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto";
+ case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain";
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto";
+ case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain";
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto";
+ case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth";
+ case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock";
default:
return NULL;
}
@@ -906,10 +919,19 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_WIDE_DYNAMIC_RANGE:
case V4L2_CID_IMAGE_STABILIZATION:
case V4L2_CID_RDS_RECEPTION:
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
*type = V4L2_CTRL_TYPE_BOOLEAN;
*min = 0;
*max = *step = 1;
break;
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
case V4L2_CID_PAN_RESET:
case V4L2_CID_TILT_RESET:
case V4L2_CID_FLASH_STROBE:
@@ -991,6 +1013,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_IMAGE_PROC_CLASS:
case V4L2_CID_DV_CLASS:
case V4L2_CID_FM_RX_CLASS:
+ case V4L2_CID_RF_TUNER_CLASS:
*type = V4L2_CTRL_TYPE_CTRL_CLASS;
/* You can neither read not write these */
*flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
@@ -1063,6 +1086,10 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_PILOT_TONE_FREQUENCY:
case V4L2_CID_TUNE_POWER_LEVEL:
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
+ case V4L2_CID_RF_TUNER_LNA_GAIN:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN:
+ case V4L2_CID_RF_TUNER_IF_GAIN:
+ case V4L2_CID_RF_TUNER_BANDWIDTH:
*flags |= V4L2_CTRL_FLAG_SLIDER;
break;
case V4L2_CID_PAN_RELATIVE:
@@ -1081,6 +1108,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_DV_RX_POWER_PRESENT:
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
break;
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
+ *flags |= V4L2_CTRL_FLAG_VOLATILE;
+ break;
}
}
EXPORT_SYMBOL(v4l2_ctrl_fill);
@@ -1921,7 +1951,8 @@ void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
int i;
/* The first control is the master control and it must not be NULL */
- BUG_ON(ncontrols == 0 || controls[0] == NULL);
+ if (WARN_ON(ncontrols == 0 || controls[0] == NULL))
+ return;
for (i = 0; i < ncontrols; i++) {
if (controls[i]) {
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 0a30dbf3d05c..634d863c05b4 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -554,6 +554,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER;
bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
+ bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
@@ -662,9 +663,20 @@ static void determine_valid_ioctls(struct video_device *vdev)
ops->vidioc_try_fmt_sliced_vbi_out)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
+ } else if (is_sdr) {
+ /* SDR specific ioctls */
+ if (ops->vidioc_enum_fmt_sdr_cap)
+ set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
+ if (ops->vidioc_g_fmt_sdr_cap)
+ set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ if (ops->vidioc_s_fmt_sdr_cap)
+ set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ if (ops->vidioc_try_fmt_sdr_cap)
+ set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
}
- if (!is_radio) {
- /* ioctls valid for video or vbi */
+
+ if (is_vid || is_vbi || is_sdr) {
+ /* ioctls valid for video, vbi or sdr */
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
@@ -672,6 +684,10 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
+ }
+
+ if (is_vid || is_vbi) {
+ /* ioctls valid for video or vbi */
if (ops->vidioc_s_std)
set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
@@ -685,6 +701,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio);
SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid);
}
if (is_tx) {
SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
@@ -710,9 +727,10 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid);
}
- if (is_tx) {
- /* transmitter only ioctls */
+ if (is_tx && (is_radio || is_sdr)) {
+ /* radio transmitter only ioctls */
SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
}
@@ -758,6 +776,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
* %VFL_TYPE_RADIO - A radio card
*
* %VFL_TYPE_SUBDEV - A subdevice
+ *
+ * %VFL_TYPE_SDR - Software Defined Radio
*/
int __video_register_device(struct video_device *vdev, int type, int nr,
int warn_if_nr_in_use, struct module *owner)
@@ -797,6 +817,10 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
case VFL_TYPE_SUBDEV:
name_base = "v4l-subdev";
break;
+ case VFL_TYPE_SDR:
+ /* Use device name 'swradio' because 'sdr' was already taken. */
+ name_base = "swradio";
+ break;
default:
printk(KERN_ERR "%s called with unknown type: %d\n",
__func__, type);
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index f7902fe8a526..48b20dfcc4d0 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -26,6 +26,10 @@
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-dv-timings.h>
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
+MODULE_LICENSE("GPL");
+
const struct v4l2_dv_timings v4l2_dv_timings_presets[] = {
V4L2_DV_BT_CEA_640X480P59_94,
V4L2_DV_BT_CEA_720X480I59_94,
@@ -324,6 +328,10 @@ EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
* This function will attempt to detect if the given values correspond to a
* valid CVT format. If so, then it will return true, and fmt will be filled
* in with the found CVT timings.
+ *
+ * TODO: VESA defined a new version 2 of their reduced blanking
+ * formula. Support for that is currently missing in this CVT
+ * detection function.
*/
bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
u32 polarities, struct v4l2_dv_timings *fmt)
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 707aef705a47..d9113cc71c77 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -152,6 +152,7 @@ const char *v4l2_type_names[] = {
[V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay",
[V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane",
[V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane",
+ [V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap",
};
EXPORT_SYMBOL(v4l2_type_names);
@@ -245,6 +246,7 @@ static void v4l_print_format(const void *arg, bool write_only)
const struct v4l2_vbi_format *vbi;
const struct v4l2_sliced_vbi_format *sliced;
const struct v4l2_window *win;
+ const struct v4l2_sdr_format *sdr;
unsigned i;
pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -318,6 +320,14 @@ static void v4l_print_format(const void *arg, bool write_only)
sliced->service_lines[0][i],
sliced->service_lines[1][i]);
break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ sdr = &p->fmt.sdr;
+ pr_cont(", pixelformat=%c%c%c%c\n",
+ (sdr->pixelformat >> 0) & 0xff,
+ (sdr->pixelformat >> 8) & 0xff,
+ (sdr->pixelformat >> 16) & 0xff,
+ (sdr->pixelformat >> 24) & 0xff);
+ break;
}
}
@@ -834,6 +844,14 @@ static void v4l_print_freq_band(const void *arg, bool write_only)
p->rangehigh, p->modulation);
}
+static void v4l_print_edid(const void *arg, bool write_only)
+{
+ const struct v4l2_edid *p = arg;
+
+ pr_cont("pad=%u, start_block=%u, blocks=%u\n",
+ p->pad, p->start_block, p->blocks);
+}
+
static void v4l_print_u32(const void *arg, bool write_only)
{
pr_cont("value=%u\n", *(const u32 *)arg);
@@ -881,6 +899,7 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -930,6 +949,10 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
if (is_vbi && is_tx && ops->vidioc_g_fmt_sliced_vbi_out)
return 0;
break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (is_sdr && is_rx && ops->vidioc_g_fmt_sdr_cap)
+ return 0;
+ break;
default:
break;
}
@@ -1049,6 +1072,10 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!is_tx || !ops->vidioc_enum_fmt_vid_out_mplane))
break;
return ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!is_rx || !ops->vidioc_enum_fmt_sdr_cap))
+ break;
+ return ops->vidioc_enum_fmt_sdr_cap(file, fh, arg);
}
return -EINVAL;
}
@@ -1059,6 +1086,7 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -1103,6 +1131,10 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_out))
break;
return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!is_rx || !is_sdr || !ops->vidioc_g_fmt_sdr_cap))
+ break;
+ return ops->vidioc_g_fmt_sdr_cap(file, fh, arg);
}
return -EINVAL;
}
@@ -1113,6 +1145,7 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -1167,6 +1200,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
break;
CLEAR_AFTER_FIELD(p, fmt.sliced);
return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!is_rx || !is_sdr || !ops->vidioc_s_fmt_sdr_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.sdr);
+ return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
}
return -EINVAL;
}
@@ -1177,6 +1215,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -1231,6 +1270,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
break;
CLEAR_AFTER_FIELD(p, fmt.sliced);
return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!is_rx || !is_sdr || !ops->vidioc_try_fmt_sdr_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.sdr);
+ return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
}
return -EINVAL;
}
@@ -1291,8 +1335,11 @@ static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
struct video_device *vfd = video_devdata(file);
struct v4l2_frequency *p = arg;
- p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (vfd->vfl_type == VFL_TYPE_SDR)
+ p->type = V4L2_TUNER_ADC;
+ else
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
return ops->vidioc_g_frequency(file, fh, p);
}
@@ -1303,10 +1350,15 @@ static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
const struct v4l2_frequency *p = arg;
enum v4l2_tuner_type type;
- type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- if (p->type != type)
- return -EINVAL;
+ if (vfd->vfl_type == VFL_TYPE_SDR) {
+ if (p->type != V4L2_TUNER_ADC && p->type != V4L2_TUNER_RF)
+ return -EINVAL;
+ } else {
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (type != p->type)
+ return -EINVAL;
+ }
return ops->vidioc_s_frequency(file, fh, p);
}
@@ -1386,6 +1438,10 @@ static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
struct v4l2_hw_freq_seek *p = arg;
enum v4l2_tuner_type type;
+ /* s_hw_freq_seek is not supported for SDR for now */
+ if (vfd->vfl_type == VFL_TYPE_SDR)
+ return -EINVAL;
+
type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
if (p->type != type)
@@ -1885,11 +1941,16 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
enum v4l2_tuner_type type;
int err;
- type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
-
- if (type != p->type)
- return -EINVAL;
+ if (vfd->vfl_type == VFL_TYPE_SDR) {
+ if (p->type != V4L2_TUNER_ADC && p->type != V4L2_TUNER_RF)
+ return -EINVAL;
+ type = p->type;
+ } else {
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (type != p->type)
+ return -EINVAL;
+ }
if (ops->vidioc_enum_freq_bands)
return ops->vidioc_enum_freq_bands(file, fh, p);
if (is_valid_ioctl(vfd, VIDIOC_G_TUNER)) {
@@ -2009,6 +2070,8 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_FNC(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),
IOCTL_INFO_STD(VIDIOC_G_INPUT, vidioc_g_input, v4l_print_u32, 0),
IOCTL_INFO_FNC(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_EDID, vidioc_g_edid, v4l_print_edid, INFO_FL_CLEAR(v4l2_edid, edid)),
+ IOCTL_INFO_STD(VIDIOC_S_EDID, vidioc_s_edid, v4l_print_edid, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_edid, edid)),
IOCTL_INFO_STD(VIDIOC_G_OUTPUT, vidioc_g_output, v4l_print_u32, 0),
IOCTL_INFO_FNC(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)),
@@ -2221,9 +2284,9 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
break;
}
- case VIDIOC_SUBDEV_G_EDID:
- case VIDIOC_SUBDEV_S_EDID: {
- struct v4l2_subdev_edid *edid = parg;
+ case VIDIOC_G_EDID:
+ case VIDIOC_S_EDID: {
+ struct v4l2_edid *edid = parg;
if (edid->blocks) {
if (edid->blocks > 256) {
diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c
index 42e3e8a5e361..b4ed9a955fbe 100644
--- a/drivers/media/v4l2-core/v4l2-of.c
+++ b/drivers/media/v4l2-core/v4l2-of.c
@@ -127,17 +127,9 @@ static void v4l2_of_parse_parallel_bus(const struct device_node *node,
int v4l2_of_parse_endpoint(const struct device_node *node,
struct v4l2_of_endpoint *endpoint)
{
- struct device_node *port_node = of_get_parent(node);
-
- memset(endpoint, 0, offsetof(struct v4l2_of_endpoint, head));
-
- endpoint->local_node = node;
- /*
- * It doesn't matter whether the two calls below succeed.
- * If they don't then the default value 0 is used.
- */
- of_property_read_u32(port_node, "reg", &endpoint->port);
- of_property_read_u32(node, "reg", &endpoint->id);
+ of_graph_parse_endpoint(node, &endpoint->base);
+ endpoint->bus_type = 0;
+ memset(&endpoint->bus, 0, sizeof(endpoint->bus));
v4l2_of_parse_csi_bus(node, endpoint);
/*
@@ -147,125 +139,6 @@ int v4l2_of_parse_endpoint(const struct device_node *node,
if (endpoint->bus.mipi_csi2.flags == 0)
v4l2_of_parse_parallel_bus(node, endpoint);
- of_node_put(port_node);
-
return 0;
}
EXPORT_SYMBOL(v4l2_of_parse_endpoint);
-
-/**
- * v4l2_of_get_next_endpoint() - get next endpoint node
- * @parent: pointer to the parent device node
- * @prev: previous endpoint node, or NULL to get first
- *
- * Return: An 'endpoint' node pointer with refcount incremented. Refcount
- * of the passed @prev node is not decremented, the caller have to use
- * of_node_put() on it when done.
- */
-struct device_node *v4l2_of_get_next_endpoint(const struct device_node *parent,
- struct device_node *prev)
-{
- struct device_node *endpoint;
- struct device_node *port = NULL;
-
- if (!parent)
- return NULL;
-
- if (!prev) {
- struct device_node *node;
- /*
- * It's the first call, we have to find a port subnode
- * within this node or within an optional 'ports' node.
- */
- node = of_get_child_by_name(parent, "ports");
- if (node)
- parent = node;
-
- port = of_get_child_by_name(parent, "port");
-
- if (port) {
- /* Found a port, get an endpoint. */
- endpoint = of_get_next_child(port, NULL);
- of_node_put(port);
- } else {
- endpoint = NULL;
- }
-
- if (!endpoint)
- pr_err("%s(): no endpoint nodes specified for %s\n",
- __func__, parent->full_name);
- of_node_put(node);
- } else {
- port = of_get_parent(prev);
- if (!port)
- /* Hm, has someone given us the root node ?... */
- return NULL;
-
- /* Avoid dropping prev node refcount to 0. */
- of_node_get(prev);
- endpoint = of_get_next_child(port, prev);
- if (endpoint) {
- of_node_put(port);
- return endpoint;
- }
-
- /* No more endpoints under this port, try the next one. */
- do {
- port = of_get_next_child(parent, port);
- if (!port)
- return NULL;
- } while (of_node_cmp(port->name, "port"));
-
- /* Pick up the first endpoint in this port. */
- endpoint = of_get_next_child(port, NULL);
- of_node_put(port);
- }
-
- return endpoint;
-}
-EXPORT_SYMBOL(v4l2_of_get_next_endpoint);
-
-/**
- * v4l2_of_get_remote_port_parent() - get remote port's parent node
- * @node: pointer to a local endpoint device_node
- *
- * Return: Remote device node associated with remote endpoint node linked
- * to @node. Use of_node_put() on it when done.
- */
-struct device_node *v4l2_of_get_remote_port_parent(
- const struct device_node *node)
-{
- struct device_node *np;
- unsigned int depth;
-
- /* Get remote endpoint node. */
- np = of_parse_phandle(node, "remote-endpoint", 0);
-
- /* Walk 3 levels up only if there is 'ports' node. */
- for (depth = 3; depth && np; depth--) {
- np = of_get_next_parent(np);
- if (depth == 2 && of_node_cmp(np->name, "ports"))
- break;
- }
- return np;
-}
-EXPORT_SYMBOL(v4l2_of_get_remote_port_parent);
-
-/**
- * v4l2_of_get_remote_port() - get remote port node
- * @node: pointer to a local endpoint device_node
- *
- * Return: Remote port node associated with remote endpoint node linked
- * to @node. Use of_node_put() on it when done.
- */
-struct device_node *v4l2_of_get_remote_port(const struct device_node *node)
-{
- struct device_node *np;
-
- /* Get remote endpoint node. */
- np = of_parse_phandle(node, "remote-endpoint", 0);
- if (!np)
- return NULL;
- return of_get_next_parent(np);
-}
-EXPORT_SYMBOL(v4l2_of_get_remote_port);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 996c248dea42..aea84ac5688a 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -349,10 +349,10 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
sd, pad, set_selection, subdev_fh, sel);
}
- case VIDIOC_SUBDEV_G_EDID:
+ case VIDIOC_G_EDID:
return v4l2_subdev_call(sd, pad, get_edid, arg);
- case VIDIOC_SUBDEV_S_EDID:
+ case VIDIOC_S_EDID:
return v4l2_subdev_call(sd, pad, set_edid, arg);
#endif
default:
@@ -368,6 +368,17 @@ static long subdev_ioctl(struct file *file, unsigned int cmd,
return video_usercopy(file, cmd, arg, subdev_do_ioctl);
}
+#ifdef CONFIG_COMPAT
+static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
+}
+#endif
+
static unsigned int subdev_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
@@ -389,6 +400,9 @@ const struct v4l2_file_operations v4l2_subdev_fops = {
.owner = THIS_MODULE,
.open = subdev_open,
.unlocked_ioctl = subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = subdev_compat_ioctl32,
+#endif
.release = subdev_close,
.poll = subdev_poll,
};
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index a127925c9d61..f9059bb73840 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -33,17 +33,74 @@ module_param(debug, int, 0644);
printk(KERN_DEBUG "vb2: " fmt, ## arg); \
} while (0)
-#define call_memop(q, op, args...) \
- (((q)->mem_ops->op) ? \
- ((q)->mem_ops->op(args)) : 0)
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+
+/*
+ * If advanced debugging is on, then count how often each op is called,
+ * which can either be per-buffer or per-queue.
+ *
+ * If the op failed then the 'fail_' variant is called to decrease the
+ * counter. That makes it easy to check that the 'init' and 'cleanup'
+ * (and variations thereof) stay balanced.
+ */
+
+#define call_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ dprintk(2, "call_memop(%p, %d, %s)%s\n", \
+ _q, (vb)->v4l2_buf.index, #op, \
+ _q->mem_ops->op ? "" : " (nop)"); \
+ (vb)->cnt_mem_ ## op++; \
+ _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
+})
+#define fail_memop(vb, op) ((vb)->cnt_mem_ ## op--)
+
+#define call_qop(q, op, args...) \
+({ \
+ dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
+ (q)->ops->op ? "" : " (nop)"); \
+ (q)->cnt_ ## op++; \
+ (q)->ops->op ? (q)->ops->op(args) : 0; \
+})
+#define fail_qop(q, op) ((q)->cnt_ ## op--)
+
+#define call_vb_qop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
+ _q, (vb)->v4l2_buf.index, #op, \
+ _q->ops->op ? "" : " (nop)"); \
+ (vb)->cnt_ ## op++; \
+ _q->ops->op ? _q->ops->op(args) : 0; \
+})
+#define fail_vb_qop(vb, op) ((vb)->cnt_ ## op--)
+
+#else
+
+#define call_memop(vb, op, args...) \
+ ((vb)->vb2_queue->mem_ops->op ? (vb)->vb2_queue->mem_ops->op(args) : 0)
+#define fail_memop(vb, op)
#define call_qop(q, op, args...) \
- (((q)->ops->op) ? ((q)->ops->op(args)) : 0)
+ ((q)->ops->op ? (q)->ops->op(args) : 0)
+#define fail_qop(q, op)
+#define call_vb_qop(vb, op, args...) \
+ ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
+#define fail_vb_qop(vb, op)
+
+#endif
+
+/* Flags that are set by the vb2 core */
#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
V4L2_BUF_FLAG_PREPARED | \
V4L2_BUF_FLAG_TIMESTAMP_MASK)
+/* Output buffer flags that should be passed on to the driver */
+#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
+ V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
+
+static void __vb2_queue_cancel(struct vb2_queue *q);
/**
* __vb2_buf_mem_alloc() - allocate video memory for the given buffer
@@ -61,7 +118,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
for (plane = 0; plane < vb->num_planes; ++plane) {
unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
- mem_priv = call_memop(q, alloc, q->alloc_ctx[plane],
+ mem_priv = call_memop(vb, alloc, q->alloc_ctx[plane],
size, q->gfp_flags);
if (IS_ERR_OR_NULL(mem_priv))
goto free;
@@ -73,9 +130,10 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
return 0;
free:
+ fail_memop(vb, alloc);
/* Free already allocated memory if one of the allocations failed */
for (; plane > 0; --plane) {
- call_memop(q, put, vb->planes[plane - 1].mem_priv);
+ call_memop(vb, put, vb->planes[plane - 1].mem_priv);
vb->planes[plane - 1].mem_priv = NULL;
}
@@ -87,11 +145,10 @@ free:
*/
static void __vb2_buf_mem_free(struct vb2_buffer *vb)
{
- struct vb2_queue *q = vb->vb2_queue;
unsigned int plane;
for (plane = 0; plane < vb->num_planes; ++plane) {
- call_memop(q, put, vb->planes[plane].mem_priv);
+ call_memop(vb, put, vb->planes[plane].mem_priv);
vb->planes[plane].mem_priv = NULL;
dprintk(3, "Freed plane %d of buffer %d\n", plane,
vb->v4l2_buf.index);
@@ -104,12 +161,11 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb)
*/
static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
{
- struct vb2_queue *q = vb->vb2_queue;
unsigned int plane;
for (plane = 0; plane < vb->num_planes; ++plane) {
if (vb->planes[plane].mem_priv)
- call_memop(q, put_userptr, vb->planes[plane].mem_priv);
+ call_memop(vb, put_userptr, vb->planes[plane].mem_priv);
vb->planes[plane].mem_priv = NULL;
}
}
@@ -118,15 +174,15 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
* __vb2_plane_dmabuf_put() - release memory associated with
* a DMABUF shared plane
*/
-static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
+static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
{
if (!p->mem_priv)
return;
if (p->dbuf_mapped)
- call_memop(q, unmap_dmabuf, p->mem_priv);
+ call_memop(vb, unmap_dmabuf, p->mem_priv);
- call_memop(q, detach_dmabuf, p->mem_priv);
+ call_memop(vb, detach_dmabuf, p->mem_priv);
dma_buf_put(p->dbuf);
memset(p, 0, sizeof(*p));
}
@@ -137,11 +193,10 @@ static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
*/
static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
{
- struct vb2_queue *q = vb->vb2_queue;
unsigned int plane;
for (plane = 0; plane < vb->num_planes; ++plane)
- __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+ __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
}
/**
@@ -246,10 +301,11 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
* callback, if given. An error in initialization
* results in queue setup failure.
*/
- ret = call_qop(q, buf_init, vb);
+ ret = call_vb_qop(vb, buf_init, vb);
if (ret) {
dprintk(1, "Buffer %d %p initialization"
" failed\n", buffer, vb);
+ fail_vb_qop(vb, buf_init);
__vb2_buf_mem_free(vb);
kfree(vb);
break;
@@ -321,18 +377,79 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
}
/* Call driver-provided cleanup function for each buffer, if provided */
- if (q->ops->buf_cleanup) {
- for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
- ++buffer) {
- if (NULL == q->bufs[buffer])
- continue;
- q->ops->buf_cleanup(q->bufs[buffer]);
- }
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ struct vb2_buffer *vb = q->bufs[buffer];
+
+ if (vb && vb->planes[0].mem_priv)
+ call_vb_qop(vb, buf_cleanup, vb);
}
/* Release video buffer memory */
__vb2_free_mem(q, buffers);
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ /*
+ * Check that all the calls were balances during the life-time of this
+ * queue. If not (or if the debug level is 1 or up), then dump the
+ * counters to the kernel log.
+ */
+ if (q->num_buffers) {
+ bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
+ q->cnt_wait_prepare != q->cnt_wait_finish;
+
+ if (unbalanced || debug) {
+ pr_info("vb2: counters for queue %p:%s\n", q,
+ unbalanced ? " UNBALANCED!" : "");
+ pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
+ q->cnt_queue_setup, q->cnt_start_streaming,
+ q->cnt_stop_streaming);
+ pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
+ q->cnt_wait_prepare, q->cnt_wait_finish);
+ }
+ q->cnt_queue_setup = 0;
+ q->cnt_wait_prepare = 0;
+ q->cnt_wait_finish = 0;
+ q->cnt_start_streaming = 0;
+ q->cnt_stop_streaming = 0;
+ }
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ struct vb2_buffer *vb = q->bufs[buffer];
+ bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
+ vb->cnt_mem_prepare != vb->cnt_mem_finish ||
+ vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
+ vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
+ vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
+ vb->cnt_buf_queue != vb->cnt_buf_done ||
+ vb->cnt_buf_prepare != vb->cnt_buf_finish ||
+ vb->cnt_buf_init != vb->cnt_buf_cleanup;
+
+ if (unbalanced || debug) {
+ pr_info("vb2: counters for queue %p, buffer %d:%s\n",
+ q, buffer, unbalanced ? " UNBALANCED!" : "");
+ pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
+ vb->cnt_buf_init, vb->cnt_buf_cleanup,
+ vb->cnt_buf_prepare, vb->cnt_buf_finish);
+ pr_info("vb2: buf_queue: %u buf_done: %u\n",
+ vb->cnt_buf_queue, vb->cnt_buf_done);
+ pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
+ vb->cnt_mem_alloc, vb->cnt_mem_put,
+ vb->cnt_mem_prepare, vb->cnt_mem_finish,
+ vb->cnt_mem_mmap);
+ pr_info("vb2: get_userptr: %u put_userptr: %u\n",
+ vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
+ pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
+ vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
+ vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
+ pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
+ vb->cnt_mem_get_dmabuf,
+ vb->cnt_mem_num_users,
+ vb->cnt_mem_vaddr,
+ vb->cnt_mem_cookie);
+ }
+ }
+#endif
+
/* Free videobuf buffers */
for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
++buffer) {
@@ -341,9 +458,10 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
}
q->num_buffers -= buffers;
- if (!q->num_buffers)
+ if (!q->num_buffers) {
q->memory = 0;
- INIT_LIST_HEAD(&q->queued_list);
+ INIT_LIST_HEAD(&q->queued_list);
+ }
return 0;
}
@@ -424,7 +542,7 @@ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
* case anyway. If num_users() returns more than 1,
* we are not the only user of the plane's memory.
*/
- if (mem_priv && call_memop(q, num_users, mem_priv) > 1)
+ if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
return true;
}
return false;
@@ -484,7 +602,16 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
* Clear any buffer state related flags.
*/
b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
- b->flags |= q->timestamp_type;
+ b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
+ if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
+ V4L2_BUF_FLAG_TIMESTAMP_COPY) {
+ /*
+ * For non-COPY timestamps, drop timestamp source bits
+ * and obtain the timestamp source from the queue.
+ */
+ b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ }
switch (vb->state) {
case VB2_BUF_STATE_QUEUED:
@@ -677,6 +804,12 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return -EBUSY;
}
+ /*
+ * Call queue_cancel to clean up any buffers in the PREPARED or
+ * QUEUED state which is possible if buffers were prepared or
+ * queued without ever calling STREAMON.
+ */
+ __vb2_queue_cancel(q);
ret = __vb2_queue_free(q, q->num_buffers);
if (ret)
return ret;
@@ -693,6 +826,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
* Make sure the requested values and current defaults are sane.
*/
num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
+ num_buffers = max_t(unsigned int, req->count, q->min_buffers_needed);
memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
q->memory = req->memory;
@@ -703,26 +837,35 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
*/
ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
q->plane_sizes, q->alloc_ctx);
- if (ret)
+ if (ret) {
+ fail_qop(q, queue_setup);
return ret;
+ }
/* Finally, allocate buffers and video memory */
- ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
- if (ret == 0) {
+ allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
+ if (allocated_buffers == 0) {
dprintk(1, "Memory allocation failed\n");
return -ENOMEM;
}
- allocated_buffers = ret;
+ /*
+ * There is no point in continuing if we can't allocate the minimum
+ * number of buffers needed by this vb2_queue.
+ */
+ if (allocated_buffers < q->min_buffers_needed)
+ ret = -ENOMEM;
/*
* Check if driver can handle the allocated number of buffers.
*/
- if (allocated_buffers < num_buffers) {
+ if (!ret && allocated_buffers < num_buffers) {
num_buffers = allocated_buffers;
ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
&num_planes, q->plane_sizes, q->alloc_ctx);
+ if (ret)
+ fail_qop(q, queue_setup);
if (!ret && allocated_buffers < num_buffers)
ret = -ENOMEM;
@@ -736,6 +879,10 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
q->num_buffers = allocated_buffers;
if (ret < 0) {
+ /*
+ * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+ * from q->num_buffers.
+ */
__vb2_queue_free(q, allocated_buffers);
return ret;
}
@@ -803,24 +950,24 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
*/
ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
&num_planes, q->plane_sizes, q->alloc_ctx);
- if (ret)
+ if (ret) {
+ fail_qop(q, queue_setup);
return ret;
+ }
/* Finally, allocate buffers and video memory */
- ret = __vb2_queue_alloc(q, create->memory, num_buffers,
+ allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers,
num_planes);
- if (ret == 0) {
+ if (allocated_buffers == 0) {
dprintk(1, "Memory allocation failed\n");
return -ENOMEM;
}
- allocated_buffers = ret;
-
/*
* Check if driver can handle the so far allocated number of buffers.
*/
- if (ret < num_buffers) {
- num_buffers = ret;
+ if (allocated_buffers < num_buffers) {
+ num_buffers = allocated_buffers;
/*
* q->num_buffers contains the total number of buffers, that the
@@ -828,6 +975,8 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
*/
ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
&num_planes, q->plane_sizes, q->alloc_ctx);
+ if (ret)
+ fail_qop(q, queue_setup);
if (!ret && allocated_buffers < num_buffers)
ret = -ENOMEM;
@@ -841,6 +990,10 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
q->num_buffers += allocated_buffers;
if (ret < 0) {
+ /*
+ * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+ * from q->num_buffers.
+ */
__vb2_queue_free(q, allocated_buffers);
return -ENOMEM;
}
@@ -882,12 +1035,10 @@ EXPORT_SYMBOL_GPL(vb2_create_bufs);
*/
void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
{
- struct vb2_queue *q = vb->vb2_queue;
-
if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
- return call_memop(q, vaddr, vb->planes[plane_no].mem_priv);
+ return call_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
}
EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
@@ -905,12 +1056,10 @@ EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
*/
void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
{
- struct vb2_queue *q = vb->vb2_queue;
-
if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
- return call_memop(q, cookie, vb->planes[plane_no].mem_priv);
+ return call_memop(vb, cookie, vb->planes[plane_no].mem_priv);
}
EXPORT_SYMBOL_GPL(vb2_plane_cookie);
@@ -918,13 +1067,20 @@ EXPORT_SYMBOL_GPL(vb2_plane_cookie);
* vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
* @vb: vb2_buffer returned from the driver
* @state: either VB2_BUF_STATE_DONE if the operation finished successfully
- * or VB2_BUF_STATE_ERROR if the operation finished with an error
+ * or VB2_BUF_STATE_ERROR if the operation finished with an error.
+ * If start_streaming fails then it should return buffers with state
+ * VB2_BUF_STATE_QUEUED to put them back into the queue.
*
* This function should be called by the driver after a hardware operation on
* a buffer is finished and the buffer may be returned to userspace. The driver
* cannot use this buffer anymore until it is queued back to it by videobuf
* by the means of buf_queue callback. Only buffers previously queued to the
* driver by buf_queue can be passed to this function.
+ *
+ * While streaming a buffer can only be returned in state DONE or ERROR.
+ * The start_streaming op can also return them in case the DMA engine cannot
+ * be started for some reason. In that case the buffers should be returned with
+ * state QUEUED.
*/
void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
{
@@ -932,26 +1088,43 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
unsigned long flags;
unsigned int plane;
- if (vb->state != VB2_BUF_STATE_ACTIVE)
+ if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
return;
- if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR)
- return;
+ if (!q->start_streaming_called) {
+ if (WARN_ON(state != VB2_BUF_STATE_QUEUED))
+ state = VB2_BUF_STATE_QUEUED;
+ } else if (!WARN_ON(!q->start_streaming_called)) {
+ if (WARN_ON(state != VB2_BUF_STATE_DONE &&
+ state != VB2_BUF_STATE_ERROR))
+ state = VB2_BUF_STATE_ERROR;
+ }
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ /*
+ * Although this is not a callback, it still does have to balance
+ * with the buf_queue op. So update this counter manually.
+ */
+ vb->cnt_buf_done++;
+#endif
dprintk(4, "Done processing on buffer %d, state: %d\n",
vb->v4l2_buf.index, state);
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
- call_memop(q, finish, vb->planes[plane].mem_priv);
+ call_memop(vb, finish, vb->planes[plane].mem_priv);
/* Add the buffer to the done buffers list */
spin_lock_irqsave(&q->done_lock, flags);
vb->state = state;
- list_add_tail(&vb->done_entry, &q->done_list);
- atomic_dec(&q->queued_count);
+ if (state != VB2_BUF_STATE_QUEUED)
+ list_add_tail(&vb->done_entry, &q->done_list);
+ atomic_dec(&q->owned_by_drv_count);
spin_unlock_irqrestore(&q->done_lock, flags);
+ if (state == VB2_BUF_STATE_QUEUED)
+ return;
+
/* Inform any processes that may be waiting for buffers */
wake_up(&q->done_wq);
}
@@ -1025,9 +1198,31 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
}
- vb->v4l2_buf.field = b->field;
- vb->v4l2_buf.timestamp = b->timestamp;
+ /* Zero flags that the vb2 core handles */
vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
+ if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
+ V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * Non-COPY timestamps and non-OUTPUT queues will get
+ * their timestamp and timestamp source flags from the
+ * queue.
+ */
+ vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * For output buffers mask out the timecode flag:
+ * this will be handled later in vb2_internal_qbuf().
+ * The 'field' is valid metadata for this output buffer
+ * and so that needs to be copied here.
+ */
+ vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE;
+ vb->v4l2_buf.field = b->field;
+ } else {
+ /* Zero any output buffer flags as this is a capture buffer */
+ vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS;
+ }
}
/**
@@ -1041,6 +1236,7 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
unsigned int plane;
int ret;
int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+ bool reacquired = vb->planes[0].mem_priv == NULL;
/* Copy relevant information provided by the userspace */
__fill_vb2_buffer(vb, b, planes);
@@ -1066,20 +1262,25 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
}
/* Release previously acquired memory if present */
- if (vb->planes[plane].mem_priv)
- call_memop(q, put_userptr, vb->planes[plane].mem_priv);
+ if (vb->planes[plane].mem_priv) {
+ if (!reacquired) {
+ reacquired = true;
+ call_vb_qop(vb, buf_cleanup, vb);
+ }
+ call_memop(vb, put_userptr, vb->planes[plane].mem_priv);
+ }
vb->planes[plane].mem_priv = NULL;
- vb->v4l2_planes[plane].m.userptr = 0;
- vb->v4l2_planes[plane].length = 0;
+ memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
/* Acquire each plane's memory */
- mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane],
+ mem_priv = call_memop(vb, get_userptr, q->alloc_ctx[plane],
planes[plane].m.userptr,
planes[plane].length, write);
if (IS_ERR_OR_NULL(mem_priv)) {
dprintk(1, "qbuf: failed acquiring userspace "
"memory for plane %d\n", plane);
+ fail_memop(vb, get_userptr);
ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
goto err;
}
@@ -1087,28 +1288,40 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
}
/*
- * Call driver-specific initialization on the newly acquired buffer,
- * if provided.
- */
- ret = call_qop(q, buf_init, vb);
- if (ret) {
- dprintk(1, "qbuf: buffer initialization failed\n");
- goto err;
- }
-
- /*
* Now that everything is in order, copy relevant information
* provided by userspace.
*/
for (plane = 0; plane < vb->num_planes; ++plane)
vb->v4l2_planes[plane] = planes[plane];
+ if (reacquired) {
+ /*
+ * One or more planes changed, so we must call buf_init to do
+ * the driver-specific initialization on the newly acquired
+ * buffer, if provided.
+ */
+ ret = call_vb_qop(vb, buf_init, vb);
+ if (ret) {
+ dprintk(1, "qbuf: buffer initialization failed\n");
+ fail_vb_qop(vb, buf_init);
+ goto err;
+ }
+ }
+
+ ret = call_vb_qop(vb, buf_prepare, vb);
+ if (ret) {
+ dprintk(1, "qbuf: buffer preparation failed\n");
+ fail_vb_qop(vb, buf_prepare);
+ call_vb_qop(vb, buf_cleanup, vb);
+ goto err;
+ }
+
return 0;
err:
/* In case of errors, release planes that were already acquired */
for (plane = 0; plane < vb->num_planes; ++plane) {
if (vb->planes[plane].mem_priv)
- call_memop(q, put_userptr, vb->planes[plane].mem_priv);
+ call_memop(vb, put_userptr, vb->planes[plane].mem_priv);
vb->planes[plane].mem_priv = NULL;
vb->v4l2_planes[plane].m.userptr = 0;
vb->v4l2_planes[plane].length = 0;
@@ -1122,8 +1335,13 @@ err:
*/
static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
{
+ int ret;
+
__fill_vb2_buffer(vb, b, vb->v4l2_planes);
- return 0;
+ ret = call_vb_qop(vb, buf_prepare, vb);
+ if (ret)
+ fail_vb_qop(vb, buf_prepare);
+ return ret;
}
/**
@@ -1137,6 +1355,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
unsigned int plane;
int ret;
int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+ bool reacquired = vb->planes[0].mem_priv == NULL;
/* Copy relevant information provided by the userspace */
__fill_vb2_buffer(vb, b, planes);
@@ -1172,15 +1391,21 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
+ if (!reacquired) {
+ reacquired = true;
+ call_vb_qop(vb, buf_cleanup, vb);
+ }
+
/* Release previously acquired memory if present */
- __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+ __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
/* Acquire each plane's memory */
- mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
+ mem_priv = call_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
dbuf, planes[plane].length, write);
if (IS_ERR(mem_priv)) {
dprintk(1, "qbuf: failed to attach dmabuf\n");
+ fail_memop(vb, attach_dmabuf);
ret = PTR_ERR(mem_priv);
dma_buf_put(dbuf);
goto err;
@@ -1195,32 +1420,44 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
* the buffer(s)..
*/
for (plane = 0; plane < vb->num_planes; ++plane) {
- ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
+ ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
if (ret) {
dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
plane);
+ fail_memop(vb, map_dmabuf);
goto err;
}
vb->planes[plane].dbuf_mapped = 1;
}
/*
- * Call driver-specific initialization on the newly acquired buffer,
- * if provided.
- */
- ret = call_qop(q, buf_init, vb);
- if (ret) {
- dprintk(1, "qbuf: buffer initialization failed\n");
- goto err;
- }
-
- /*
* Now that everything is in order, copy relevant information
* provided by userspace.
*/
for (plane = 0; plane < vb->num_planes; ++plane)
vb->v4l2_planes[plane] = planes[plane];
+ if (reacquired) {
+ /*
+ * Call driver-specific initialization on the newly acquired buffer,
+ * if provided.
+ */
+ ret = call_vb_qop(vb, buf_init, vb);
+ if (ret) {
+ dprintk(1, "qbuf: buffer initialization failed\n");
+ fail_vb_qop(vb, buf_init);
+ goto err;
+ }
+ }
+
+ ret = call_vb_qop(vb, buf_prepare, vb);
+ if (ret) {
+ dprintk(1, "qbuf: buffer preparation failed\n");
+ fail_vb_qop(vb, buf_prepare);
+ call_vb_qop(vb, buf_cleanup, vb);
+ goto err;
+ }
+
return 0;
err:
/* In case of errors, release planes that were already acquired */
@@ -1238,13 +1475,13 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
unsigned int plane;
vb->state = VB2_BUF_STATE_ACTIVE;
- atomic_inc(&q->queued_count);
+ atomic_inc(&q->owned_by_drv_count);
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
- call_memop(q, prepare, vb->planes[plane].mem_priv);
+ call_memop(vb, prepare, vb->planes[plane].mem_priv);
- q->ops->buf_queue(vb);
+ call_vb_qop(vb, buf_queue, vb);
}
static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
@@ -1261,6 +1498,10 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
}
vb->state = VB2_BUF_STATE_PREPARING;
+ vb->v4l2_buf.timestamp.tv_sec = 0;
+ vb->v4l2_buf.timestamp.tv_usec = 0;
+ vb->v4l2_buf.sequence = 0;
+
switch (q->memory) {
case V4L2_MEMORY_MMAP:
ret = __qbuf_mmap(vb, b);
@@ -1295,8 +1536,6 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
ret = -EINVAL;
}
- if (!ret)
- ret = call_qop(q, buf_prepare, vb);
if (ret)
dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
@@ -1382,32 +1621,49 @@ EXPORT_SYMBOL_GPL(vb2_prepare_buf);
* vb2_start_streaming() - Attempt to start streaming.
* @q: videobuf2 queue
*
- * If there are not enough buffers, then retry_start_streaming is set to
- * 1 and 0 is returned. The next time a buffer is queued and
- * retry_start_streaming is 1, this function will be called again to
- * retry starting the DMA engine.
+ * Attempt to start streaming. When this function is called there must be
+ * at least q->min_buffers_needed buffers queued up (i.e. the minimum
+ * number of buffers required for the DMA engine to function). If the
+ * @start_streaming op fails it is supposed to return all the driver-owned
+ * buffers back to vb2 in state QUEUED. Check if that happened and if
+ * not warn and reclaim them forcefully.
*/
static int vb2_start_streaming(struct vb2_queue *q)
{
+ struct vb2_buffer *vb;
int ret;
- /* Tell the driver to start streaming */
- ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
-
/*
- * If there are not enough buffers queued to start streaming, then
- * the start_streaming operation will return -ENOBUFS and you have to
- * retry when the next buffer is queued.
+ * If any buffers were queued before streamon,
+ * we can now pass them to driver for processing.
*/
- if (ret == -ENOBUFS) {
- dprintk(1, "qbuf: not enough buffers, retry when more buffers are queued.\n");
- q->retry_start_streaming = 1;
+ list_for_each_entry(vb, &q->queued_list, queued_entry)
+ __enqueue_in_driver(vb);
+
+ /* Tell the driver to start streaming */
+ ret = call_qop(q, start_streaming, q,
+ atomic_read(&q->owned_by_drv_count));
+ q->start_streaming_called = ret == 0;
+ if (!ret)
return 0;
+
+ fail_qop(q, start_streaming);
+ dprintk(1, "qbuf: driver refused to start streaming\n");
+ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
+ unsigned i;
+
+ /*
+ * Forcefully reclaim buffers if the driver did not
+ * correctly return them to vb2.
+ */
+ for (i = 0; i < q->num_buffers; ++i) {
+ vb = q->bufs[i];
+ if (vb->state == VB2_BUF_STATE_ACTIVE)
+ vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
+ }
+ /* Must be zero now */
+ WARN_ON(atomic_read(&q->owned_by_drv_count));
}
- if (ret)
- dprintk(1, "qbuf: driver refused to start streaming\n");
- else
- q->retry_start_streaming = 0;
return ret;
}
@@ -1420,11 +1676,6 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
return ret;
vb = q->bufs[b->index];
- if (vb->state != VB2_BUF_STATE_DEQUEUED) {
- dprintk(1, "%s(): invalid buffer state %d\n", __func__,
- vb->state);
- return -EINVAL;
- }
switch (vb->state) {
case VB2_BUF_STATE_DEQUEUED:
@@ -1438,7 +1689,8 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
dprintk(1, "qbuf: buffer still being prepared\n");
return -EINVAL;
default:
- dprintk(1, "qbuf: buffer already in use\n");
+ dprintk(1, "%s(): invalid buffer state %d\n", __func__,
+ vb->state);
return -EINVAL;
}
@@ -1447,19 +1699,39 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
* dequeued in dqbuf.
*/
list_add_tail(&vb->queued_entry, &q->queued_list);
+ q->queued_count++;
vb->state = VB2_BUF_STATE_QUEUED;
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * For output buffers copy the timestamp if needed,
+ * and the timecode field and flag if needed.
+ */
+ if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_COPY)
+ vb->v4l2_buf.timestamp = b->timestamp;
+ vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
+ if (b->flags & V4L2_BUF_FLAG_TIMECODE)
+ vb->v4l2_buf.timecode = b->timecode;
+ }
/*
* If already streaming, give the buffer to driver for processing.
* If not, the buffer will be given to driver on next streamon.
*/
- if (q->streaming)
+ if (q->start_streaming_called)
__enqueue_in_driver(vb);
/* Fill buffer information for the userspace */
__fill_v4l2_buffer(vb, b);
- if (q->retry_start_streaming) {
+ /*
+ * If streamon has been called, and we haven't yet called
+ * start_streaming() since not enough buffers were queued, and
+ * we now have reached the minimum number of queued buffers,
+ * then we can finally call start_streaming().
+ */
+ if (q->streaming && !q->start_streaming_called &&
+ q->queued_count >= q->min_buffers_needed) {
ret = vb2_start_streaming(q);
if (ret)
return ret;
@@ -1614,8 +1886,8 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
return -EINVAL;
}
- if (!q->retry_start_streaming)
- wait_event(q->done_wq, !atomic_read(&q->queued_count));
+ if (q->start_streaming_called)
+ wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
return 0;
}
EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
@@ -1639,7 +1911,7 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
for (i = 0; i < vb->num_planes; ++i) {
if (!vb->planes[i].dbuf_mapped)
continue;
- call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
+ call_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
vb->planes[i].dbuf_mapped = 0;
}
}
@@ -1657,12 +1929,6 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool n
if (ret < 0)
return ret;
- ret = call_qop(q, buf_finish, vb);
- if (ret) {
- dprintk(1, "dqbuf: buffer finish failed\n");
- return ret;
- }
-
switch (vb->state) {
case VB2_BUF_STATE_DONE:
dprintk(3, "dqbuf: Returning done buffer\n");
@@ -1675,10 +1941,13 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool n
return -EINVAL;
}
+ call_vb_qop(vb, buf_finish, vb);
+
/* Fill buffer information for the userspace */
__fill_v4l2_buffer(vb, b);
/* Remove from videobuf queue */
list_del(&vb->queued_entry);
+ q->queued_count--;
/* go back to dequeued state */
__vb2_dqbuf(vb);
@@ -1729,18 +1998,23 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
{
unsigned int i;
- if (q->retry_start_streaming) {
- q->retry_start_streaming = 0;
- q->streaming = 0;
- }
-
/*
* Tell driver to stop all transactions and release all queued
* buffers.
*/
- if (q->streaming)
+ if (q->start_streaming_called)
call_qop(q, stop_streaming, q);
q->streaming = 0;
+ q->start_streaming_called = 0;
+ q->queued_count = 0;
+
+ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
+ for (i = 0; i < q->num_buffers; ++i)
+ if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
+ vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
+ /* Must be zero now */
+ WARN_ON(atomic_read(&q->owned_by_drv_count));
+ }
/*
* Remove all buffers from videobuf's list...
@@ -1751,19 +2025,31 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* has not already dequeued before initiating cancel.
*/
INIT_LIST_HEAD(&q->done_list);
- atomic_set(&q->queued_count, 0);
+ atomic_set(&q->owned_by_drv_count, 0);
wake_up_all(&q->done_wq);
/*
* Reinitialize all buffers for next use.
+ * Make sure to call buf_finish for any queued buffers. Normally
+ * that's done in dqbuf, but that's not going to happen when we
+ * cancel the whole queue. Note: this code belongs here, not in
+ * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
+ * call to __fill_v4l2_buffer() after buf_finish(). That order can't
+ * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
*/
- for (i = 0; i < q->num_buffers; ++i)
- __vb2_dqbuf(q->bufs[i]);
+ for (i = 0; i < q->num_buffers; ++i) {
+ struct vb2_buffer *vb = q->bufs[i];
+
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ vb->state = VB2_BUF_STATE_PREPARED;
+ call_vb_qop(vb, buf_finish, vb);
+ }
+ __vb2_dqbuf(vb);
+ }
}
static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
{
- struct vb2_buffer *vb;
int ret;
if (type != q->type) {
@@ -1781,18 +2067,26 @@ static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
return -EINVAL;
}
+ if (!q->num_buffers) {
+ dprintk(1, "streamon: no buffers have been allocated\n");
+ return -EINVAL;
+ }
+ if (q->num_buffers < q->min_buffers_needed) {
+ dprintk(1, "streamon: need at least %u allocated buffers\n",
+ q->min_buffers_needed);
+ return -EINVAL;
+ }
+
/*
- * If any buffers were queued before streamon,
- * we can now pass them to driver for processing.
+ * Tell driver to start streaming provided sufficient buffers
+ * are available.
*/
- list_for_each_entry(vb, &q->queued_list, queued_entry)
- __enqueue_in_driver(vb);
-
- /* Tell driver to start streaming. */
- ret = vb2_start_streaming(q);
- if (ret) {
- __vb2_queue_cancel(q);
- return ret;
+ if (q->queued_count >= q->min_buffers_needed) {
+ ret = vb2_start_streaming(q);
+ if (ret) {
+ __vb2_queue_cancel(q);
+ return ret;
+ }
}
q->streaming = 1;
@@ -1831,14 +2125,14 @@ static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
return -EINVAL;
}
- if (!q->streaming) {
- dprintk(3, "streamoff successful: not streaming\n");
- return 0;
- }
-
/*
* Cancel will pause streaming and remove all buffers from the driver
* and videobuf, effectively returning control over them to userspace.
+ *
+ * Note that we do this even if q->streaming == 0: if you prepare or
+ * queue buffers, and then call streamoff without ever having called
+ * streamon, you would still expect those buffers to be returned to
+ * their normal dequeued state.
*/
__vb2_queue_cancel(q);
@@ -1950,10 +2244,11 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
vb_plane = &vb->planes[eb->plane];
- dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
+ dbuf = call_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
if (IS_ERR_OR_NULL(dbuf)) {
dprintk(1, "Failed to export buffer %d, plane %d\n",
eb->index, eb->plane);
+ fail_memop(vb, get_dmabuf);
return -EINVAL;
}
@@ -2045,9 +2340,11 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
return -EINVAL;
}
- ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma);
- if (ret)
+ ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
+ if (ret) {
+ fail_memop(vb, mmap);
return ret;
+ }
dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
return 0;
@@ -2200,11 +2497,14 @@ int vb2_queue_init(struct vb2_queue *q)
WARN_ON(!q->io_modes) ||
WARN_ON(!q->ops->queue_setup) ||
WARN_ON(!q->ops->buf_queue) ||
- WARN_ON(q->timestamp_type & ~V4L2_BUF_FLAG_TIMESTAMP_MASK))
+ WARN_ON(q->timestamp_flags &
+ ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
return -EINVAL;
/* Warn that the driver should choose an appropriate timestamp type */
- WARN_ON(q->timestamp_type == V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
+ WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
INIT_LIST_HEAD(&q->queued_list);
INIT_LIST_HEAD(&q->done_list);
@@ -2251,6 +2551,22 @@ struct vb2_fileio_buf {
/**
* struct vb2_fileio_data - queue context used by file io emulator
*
+ * @cur_index: the index of the buffer currently being read from or
+ * written to. If equal to q->num_buffers then a new buffer
+ * must be dequeued.
+ * @initial_index: in the read() case all buffers are queued up immediately
+ * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
+ * buffers. However, in the write() case no buffers are initially
+ * queued, instead whenever a buffer is full it is queued up by
+ * __vb2_perform_fileio(). Only once all available buffers have
+ * been queued up will __vb2_perform_fileio() start to dequeue
+ * buffers. This means that initially __vb2_perform_fileio()
+ * needs to know what buffer index to use when it is queuing up
+ * the buffers for the first time. That initial index is stored
+ * in this field. Once it is equal to q->num_buffers all
+ * available buffers have been queued and __vb2_perform_fileio()
+ * should start the normal dequeue/queue cycle.
+ *
* vb2 provides a compatibility layer and emulator of file io (read and
* write) calls on top of streaming API. For proper operation it required
* this structure to save the driver state between each call of the read
@@ -2260,7 +2576,8 @@ struct vb2_fileio_data {
struct v4l2_requestbuffers req;
struct v4l2_buffer b;
struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
- unsigned int index;
+ unsigned int cur_index;
+ unsigned int initial_index;
unsigned int q_count;
unsigned int dq_count;
unsigned int flags;
@@ -2280,9 +2597,9 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
/*
* Sanity check
*/
- if ((read && !(q->io_modes & VB2_READ)) ||
- (!read && !(q->io_modes & VB2_WRITE)))
- BUG();
+ if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
+ (!read && !(q->io_modes & VB2_WRITE))))
+ return -EINVAL;
/*
* Check if device supports mapping buffers to kernel virtual space.
@@ -2360,7 +2677,12 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
goto err_reqbufs;
fileio->bufs[i].queued = 1;
}
- fileio->index = q->num_buffers;
+ /*
+ * All buffers have been queued, so mark that by setting
+ * initial_index to q->num_buffers
+ */
+ fileio->initial_index = q->num_buffers;
+ fileio->cur_index = q->num_buffers;
}
/*
@@ -2439,7 +2761,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
/*
* Check if we need to dequeue the buffer.
*/
- index = fileio->index;
+ index = fileio->cur_index;
if (index >= q->num_buffers) {
/*
* Call vb2_dqbuf to get buffer back.
@@ -2453,7 +2775,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
return ret;
fileio->dq_count += 1;
- index = fileio->b.index;
+ fileio->cur_index = index = fileio->b.index;
buf = &fileio->bufs[index];
/*
@@ -2529,8 +2851,20 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
buf->queued = 1;
buf->size = vb2_plane_size(q->bufs[index], 0);
fileio->q_count += 1;
- if (fileio->index < q->num_buffers)
- fileio->index++;
+ /*
+ * If we are queuing up buffers for the first time, then
+ * increase initial_index by one.
+ */
+ if (fileio->initial_index < q->num_buffers)
+ fileio->initial_index++;
+ /*
+ * The next buffer to use is either a buffer that's going to be
+ * queued for the first time (initial_index < q->num_buffers)
+ * or it is equal to q->num_buffers, meaning that the next
+ * time we need to dequeue a buffer since we've now queued up
+ * all the 'first time' buffers.
+ */
+ fileio->cur_index = fileio->initial_index;
}
/*
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 33d3871d1e13..880be0782dd9 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -719,7 +719,7 @@ static int vb2_dc_map_dmabuf(void *mem_priv)
/* get the associated scatterlist for this buffer */
sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
- if (IS_ERR_OR_NULL(sgt)) {
+ if (IS_ERR(sgt)) {
pr_err("Error getting dmabuf scatterlist\n");
return -EINVAL;
}
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 29a11db365bc..c59e9c96e86d 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -7,6 +7,17 @@ menuconfig MEMORY
if MEMORY
+config TI_AEMIF
+ tristate "Texas Instruments AEMIF driver"
+ depends on (ARCH_DAVINCI || ARCH_KEYSTONE) && OF
+ help
+ This driver is for the AEMIF module available in Texas Instruments
+ SoCs. AEMIF stands for Asynchronous External Memory Interface and
+ is intended to provide a glue-less interface to a variety of
+ asynchronuous memory devices like ASRAM, NOR and NAND memory. A total
+ of 256M bytes of any of these memories can be accessed at a given
+ time via four chip selects with 64M byte access per chip select.
+
config TI_EMIF
tristate "Texas Instruments EMIF driver"
depends on ARCH_OMAP2PLUS
@@ -50,4 +61,8 @@ config TEGRA30_MC
analysis, especially for IOMMU/SMMU(System Memory Management
Unit) module.
+config FSL_IFC
+ bool
+ depends on FSL_SOC
+
endif
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 969d923dad93..71160a2b7313 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -5,7 +5,9 @@
ifeq ($(CONFIG_DDR),y)
obj-$(CONFIG_OF) += of_memory.o
endif
+obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
+obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
obj-$(CONFIG_TEGRA30_MC) += tegra30-mc.o
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
new file mode 100644
index 000000000000..3d5d792d5cb2
--- /dev/null
+++ b/drivers/memory/fsl_ifc.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc
+ *
+ * Freescale Integrated Flash Controller
+ *
+ * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_ifc.h>
+#include <asm/prom.h>
+
+struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+EXPORT_SYMBOL(fsl_ifc_ctrl_dev);
+
+/*
+ * convert_ifc_address - convert the base address
+ * @addr_base: base address of the memory bank
+ */
+unsigned int convert_ifc_address(phys_addr_t addr_base)
+{
+ return addr_base & CSPR_BA;
+}
+EXPORT_SYMBOL(convert_ifc_address);
+
+/*
+ * fsl_ifc_find - find IFC bank
+ * @addr_base: base address of the memory bank
+ *
+ * This function walks IFC banks comparing "Base address" field of the CSPR
+ * registers with the supplied addr_base argument. When bases match this
+ * function returns bank number (starting with 0), otherwise it returns
+ * appropriate errno value.
+ */
+int fsl_ifc_find(phys_addr_t addr_base)
+{
+ int i = 0;
+
+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) {
+ u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
+ if (cspr & CSPR_V && (cspr & CSPR_BA) ==
+ convert_ifc_address(addr_base))
+ return i;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(fsl_ifc_find);
+
+static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl)
+{
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+
+ /*
+ * Clear all the common status and event registers
+ */
+ if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
+ out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+
+ /* enable all error and events */
+ out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN);
+
+ /* enable all error and event interrupts */
+ out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN);
+ out_be32(&ifc->cm_erattr0, 0x0);
+ out_be32(&ifc->cm_erattr1, 0x0);
+
+ return 0;
+}
+
+static int fsl_ifc_ctrl_remove(struct platform_device *dev)
+{
+ struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(&dev->dev);
+
+ free_irq(ctrl->nand_irq, ctrl);
+ free_irq(ctrl->irq, ctrl);
+
+ irq_dispose_mapping(ctrl->nand_irq);
+ irq_dispose_mapping(ctrl->irq);
+
+ iounmap(ctrl->regs);
+
+ dev_set_drvdata(&dev->dev, NULL);
+ kfree(ctrl);
+
+ return 0;
+}
+
+/*
+ * NAND events are split between an operational interrupt which only
+ * receives OPC, and an error interrupt that receives everything else,
+ * including non-NAND errors. Whichever interrupt gets to it first
+ * records the status and wakes the wait queue.
+ */
+static DEFINE_SPINLOCK(nand_irq_lock);
+
+static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl)
+{
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ unsigned long flags;
+ u32 stat;
+
+ spin_lock_irqsave(&nand_irq_lock, flags);
+
+ stat = in_be32(&ifc->ifc_nand.nand_evter_stat);
+ if (stat) {
+ out_be32(&ifc->ifc_nand.nand_evter_stat, stat);
+ ctrl->nand_stat = stat;
+ wake_up(&ctrl->nand_wait);
+ }
+
+ spin_unlock_irqrestore(&nand_irq_lock, flags);
+
+ return stat;
+}
+
+static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data)
+{
+ struct fsl_ifc_ctrl *ctrl = data;
+
+ if (check_nand_stat(ctrl))
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+/*
+ * NOTE: This interrupt is used to report ifc events of various kinds,
+ * such as transaction errors on the chipselects.
+ */
+static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
+{
+ struct fsl_ifc_ctrl *ctrl = data;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ u32 err_axiid, err_srcid, status, cs_err, err_addr;
+ irqreturn_t ret = IRQ_NONE;
+
+ /* read for chip select error */
+ cs_err = in_be32(&ifc->cm_evter_stat);
+ if (cs_err) {
+ dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
+ "any memory bank 0x%08X\n", cs_err);
+ /* clear the chip select error */
+ out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+
+ /* read error attribute registers print the error information */
+ status = in_be32(&ifc->cm_erattr0);
+ err_addr = in_be32(&ifc->cm_erattr1);
+
+ if (status & IFC_CM_ERATTR0_ERTYP_READ)
+ dev_err(ctrl->dev, "Read transaction error"
+ "CM_ERATTR0 0x%08X\n", status);
+ else
+ dev_err(ctrl->dev, "Write transaction error"
+ "CM_ERATTR0 0x%08X\n", status);
+
+ err_axiid = (status & IFC_CM_ERATTR0_ERAID) >>
+ IFC_CM_ERATTR0_ERAID_SHIFT;
+ dev_err(ctrl->dev, "AXI ID of the error"
+ "transaction 0x%08X\n", err_axiid);
+
+ err_srcid = (status & IFC_CM_ERATTR0_ESRCID) >>
+ IFC_CM_ERATTR0_ESRCID_SHIFT;
+ dev_err(ctrl->dev, "SRC ID of the error"
+ "transaction 0x%08X\n", err_srcid);
+
+ dev_err(ctrl->dev, "Transaction Address corresponding to error"
+ "ERADDR 0x%08X\n", err_addr);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (check_nand_stat(ctrl))
+ ret = IRQ_HANDLED;
+
+ return ret;
+}
+
+/*
+ * fsl_ifc_ctrl_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code allocates all of
+ * the resources needed for the controller only. The
+ * resources for the NAND banks themselves are allocated
+ * in the chip probe function.
+*/
+static int fsl_ifc_ctrl_probe(struct platform_device *dev)
+{
+ int ret = 0;
+
+
+ dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
+
+ fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL);
+ if (!fsl_ifc_ctrl_dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev);
+
+ /* IOMAP the entire IFC region */
+ fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
+ if (!fsl_ifc_ctrl_dev->regs) {
+ dev_err(&dev->dev, "failed to get memory region\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* get the Controller level irq */
+ fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+ if (fsl_ifc_ctrl_dev->irq == NO_IRQ) {
+ dev_err(&dev->dev, "failed to get irq resource "
+ "for IFC\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* get the nand machine irq */
+ fsl_ifc_ctrl_dev->nand_irq =
+ irq_of_parse_and_map(dev->dev.of_node, 1);
+
+ fsl_ifc_ctrl_dev->dev = &dev->dev;
+
+ ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev);
+ if (ret < 0)
+ goto err;
+
+ init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait);
+
+ ret = request_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_irq, IRQF_SHARED,
+ "fsl-ifc", fsl_ifc_ctrl_dev);
+ if (ret != 0) {
+ dev_err(&dev->dev, "failed to install irq (%d)\n",
+ fsl_ifc_ctrl_dev->irq);
+ goto err_irq;
+ }
+
+ if (fsl_ifc_ctrl_dev->nand_irq) {
+ ret = request_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_nand_irq,
+ 0, "fsl-ifc-nand", fsl_ifc_ctrl_dev);
+ if (ret != 0) {
+ dev_err(&dev->dev, "failed to install irq (%d)\n",
+ fsl_ifc_ctrl_dev->nand_irq);
+ goto err_nandirq;
+ }
+ }
+
+ return 0;
+
+err_nandirq:
+ free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev);
+ irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq);
+err_irq:
+ free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
+ irq_dispose_mapping(fsl_ifc_ctrl_dev->irq);
+err:
+ return ret;
+}
+
+static const struct of_device_id fsl_ifc_match[] = {
+ {
+ .compatible = "fsl,ifc",
+ },
+ {},
+};
+
+static struct platform_driver fsl_ifc_ctrl_driver = {
+ .driver = {
+ .name = "fsl-ifc",
+ .of_match_table = fsl_ifc_match,
+ },
+ .probe = fsl_ifc_ctrl_probe,
+ .remove = fsl_ifc_ctrl_remove,
+};
+
+static int __init fsl_ifc_init(void)
+{
+ return platform_driver_register(&fsl_ifc_ctrl_driver);
+}
+subsys_initcall(fsl_ifc_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("Freescale Integrated Flash Controller driver");
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
new file mode 100644
index 000000000000..d3df7602f406
--- /dev/null
+++ b/drivers/memory/ti-aemif.c
@@ -0,0 +1,427 @@
+/*
+ * TI AEMIF driver
+ *
+ * Copyright (C) 2010 - 2013 Texas Instruments Incorporated. http://www.ti.com/
+ *
+ * Authors:
+ * Murali Karicheri <m-karicheri2@ti.com>
+ * Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#define TA_SHIFT 2
+#define RHOLD_SHIFT 4
+#define RSTROBE_SHIFT 7
+#define RSETUP_SHIFT 13
+#define WHOLD_SHIFT 17
+#define WSTROBE_SHIFT 20
+#define WSETUP_SHIFT 26
+#define EW_SHIFT 30
+#define SS_SHIFT 31
+
+#define TA(x) ((x) << TA_SHIFT)
+#define RHOLD(x) ((x) << RHOLD_SHIFT)
+#define RSTROBE(x) ((x) << RSTROBE_SHIFT)
+#define RSETUP(x) ((x) << RSETUP_SHIFT)
+#define WHOLD(x) ((x) << WHOLD_SHIFT)
+#define WSTROBE(x) ((x) << WSTROBE_SHIFT)
+#define WSETUP(x) ((x) << WSETUP_SHIFT)
+#define EW(x) ((x) << EW_SHIFT)
+#define SS(x) ((x) << SS_SHIFT)
+
+#define ASIZE_MAX 0x1
+#define TA_MAX 0x3
+#define RHOLD_MAX 0x7
+#define RSTROBE_MAX 0x3f
+#define RSETUP_MAX 0xf
+#define WHOLD_MAX 0x7
+#define WSTROBE_MAX 0x3f
+#define WSETUP_MAX 0xf
+#define EW_MAX 0x1
+#define SS_MAX 0x1
+#define NUM_CS 4
+
+#define TA_VAL(x) (((x) & TA(TA_MAX)) >> TA_SHIFT)
+#define RHOLD_VAL(x) (((x) & RHOLD(RHOLD_MAX)) >> RHOLD_SHIFT)
+#define RSTROBE_VAL(x) (((x) & RSTROBE(RSTROBE_MAX)) >> RSTROBE_SHIFT)
+#define RSETUP_VAL(x) (((x) & RSETUP(RSETUP_MAX)) >> RSETUP_SHIFT)
+#define WHOLD_VAL(x) (((x) & WHOLD(WHOLD_MAX)) >> WHOLD_SHIFT)
+#define WSTROBE_VAL(x) (((x) & WSTROBE(WSTROBE_MAX)) >> WSTROBE_SHIFT)
+#define WSETUP_VAL(x) (((x) & WSETUP(WSETUP_MAX)) >> WSETUP_SHIFT)
+#define EW_VAL(x) (((x) & EW(EW_MAX)) >> EW_SHIFT)
+#define SS_VAL(x) (((x) & SS(SS_MAX)) >> SS_SHIFT)
+
+#define NRCSR_OFFSET 0x00
+#define AWCCR_OFFSET 0x04
+#define A1CR_OFFSET 0x10
+
+#define ACR_ASIZE_MASK 0x3
+#define ACR_EW_MASK BIT(30)
+#define ACR_SS_MASK BIT(31)
+#define ASIZE_16BIT 1
+
+#define CONFIG_MASK (TA(TA_MAX) | \
+ RHOLD(RHOLD_MAX) | \
+ RSTROBE(RSTROBE_MAX) | \
+ RSETUP(RSETUP_MAX) | \
+ WHOLD(WHOLD_MAX) | \
+ WSTROBE(WSTROBE_MAX) | \
+ WSETUP(WSETUP_MAX) | \
+ EW(EW_MAX) | SS(SS_MAX) | \
+ ASIZE_MAX)
+
+/**
+ * struct aemif_cs_data: structure to hold cs parameters
+ * @cs: chip-select number
+ * @wstrobe: write strobe width, ns
+ * @rstrobe: read strobe width, ns
+ * @wsetup: write setup width, ns
+ * @whold: write hold width, ns
+ * @rsetup: read setup width, ns
+ * @rhold: read hold width, ns
+ * @ta: minimum turn around time, ns
+ * @enable_ss: enable/disable select strobe mode
+ * @enable_ew: enable/disable extended wait mode
+ * @asize: width of the asynchronous device's data bus
+ */
+struct aemif_cs_data {
+ u8 cs;
+ u16 wstrobe;
+ u16 rstrobe;
+ u8 wsetup;
+ u8 whold;
+ u8 rsetup;
+ u8 rhold;
+ u8 ta;
+ u8 enable_ss;
+ u8 enable_ew;
+ u8 asize;
+};
+
+/**
+ * struct aemif_device: structure to hold device data
+ * @base: base address of AEMIF registers
+ * @clk: source clock
+ * @clk_rate: clock's rate in kHz
+ * @num_cs: number of assigned chip-selects
+ * @cs_offset: start number of cs nodes
+ * @cs_data: array of chip-select settings
+ */
+struct aemif_device {
+ void __iomem *base;
+ struct clk *clk;
+ unsigned long clk_rate;
+ u8 num_cs;
+ int cs_offset;
+ struct aemif_cs_data cs_data[NUM_CS];
+};
+
+/**
+ * aemif_calc_rate - calculate timing data.
+ * @pdev: platform device to calculate for
+ * @wanted: The cycle time needed in nanoseconds.
+ * @clk: The input clock rate in kHz.
+ * @max: The maximum divider value that can be programmed.
+ *
+ * On success, returns the calculated timing value minus 1 for easy
+ * programming into AEMIF timing registers, else negative errno.
+ */
+static int aemif_calc_rate(struct platform_device *pdev, int wanted,
+ unsigned long clk, int max)
+{
+ int result;
+
+ result = DIV_ROUND_UP((wanted * clk), NSEC_PER_MSEC) - 1;
+
+ dev_dbg(&pdev->dev, "%s: result %d from %ld, %d\n", __func__, result,
+ clk, wanted);
+
+ /* It is generally OK to have a more relaxed timing than requested... */
+ if (result < 0)
+ result = 0;
+
+ /* ... But configuring tighter timings is not an option. */
+ else if (result > max)
+ result = -EINVAL;
+
+ return result;
+}
+
+/**
+ * aemif_config_abus - configure async bus parameters
+ * @pdev: platform device to configure for
+ * @csnum: aemif chip select number
+ *
+ * This function programs the given timing values (in real clock) into the
+ * AEMIF registers taking the AEMIF clock into account.
+ *
+ * This function does not use any locking while programming the AEMIF
+ * because it is expected that there is only one user of a given
+ * chip-select.
+ *
+ * Returns 0 on success, else negative errno.
+ */
+static int aemif_config_abus(struct platform_device *pdev, int csnum)
+{
+ struct aemif_device *aemif = platform_get_drvdata(pdev);
+ struct aemif_cs_data *data = &aemif->cs_data[csnum];
+ int ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup;
+ unsigned long clk_rate = aemif->clk_rate;
+ unsigned offset;
+ u32 set, val;
+
+ offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
+
+ ta = aemif_calc_rate(pdev, data->ta, clk_rate, TA_MAX);
+ rhold = aemif_calc_rate(pdev, data->rhold, clk_rate, RHOLD_MAX);
+ rstrobe = aemif_calc_rate(pdev, data->rstrobe, clk_rate, RSTROBE_MAX);
+ rsetup = aemif_calc_rate(pdev, data->rsetup, clk_rate, RSETUP_MAX);
+ whold = aemif_calc_rate(pdev, data->whold, clk_rate, WHOLD_MAX);
+ wstrobe = aemif_calc_rate(pdev, data->wstrobe, clk_rate, WSTROBE_MAX);
+ wsetup = aemif_calc_rate(pdev, data->wsetup, clk_rate, WSETUP_MAX);
+
+ if (ta < 0 || rhold < 0 || rstrobe < 0 || rsetup < 0 ||
+ whold < 0 || wstrobe < 0 || wsetup < 0) {
+ dev_err(&pdev->dev, "%s: cannot get suitable timings\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ set = TA(ta) | RHOLD(rhold) | RSTROBE(rstrobe) | RSETUP(rsetup) |
+ WHOLD(whold) | WSTROBE(wstrobe) | WSETUP(wsetup);
+
+ set |= (data->asize & ACR_ASIZE_MASK);
+ if (data->enable_ew)
+ set |= ACR_EW_MASK;
+ if (data->enable_ss)
+ set |= ACR_SS_MASK;
+
+ val = readl(aemif->base + offset);
+ val &= ~CONFIG_MASK;
+ val |= set;
+ writel(val, aemif->base + offset);
+
+ return 0;
+}
+
+static inline int aemif_cycles_to_nsec(int val, unsigned long clk_rate)
+{
+ return ((val + 1) * NSEC_PER_MSEC) / clk_rate;
+}
+
+/**
+ * aemif_get_hw_params - function to read hw register values
+ * @pdev: platform device to read for
+ * @csnum: aemif chip select number
+ *
+ * This function reads the defaults from the registers and update
+ * the timing values. Required for get/set commands and also for
+ * the case when driver needs to use defaults in hardware.
+ */
+static void aemif_get_hw_params(struct platform_device *pdev, int csnum)
+{
+ struct aemif_device *aemif = platform_get_drvdata(pdev);
+ struct aemif_cs_data *data = &aemif->cs_data[csnum];
+ unsigned long clk_rate = aemif->clk_rate;
+ u32 val, offset;
+
+ offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
+ val = readl(aemif->base + offset);
+
+ data->ta = aemif_cycles_to_nsec(TA_VAL(val), clk_rate);
+ data->rhold = aemif_cycles_to_nsec(RHOLD_VAL(val), clk_rate);
+ data->rstrobe = aemif_cycles_to_nsec(RSTROBE_VAL(val), clk_rate);
+ data->rsetup = aemif_cycles_to_nsec(RSETUP_VAL(val), clk_rate);
+ data->whold = aemif_cycles_to_nsec(WHOLD_VAL(val), clk_rate);
+ data->wstrobe = aemif_cycles_to_nsec(WSTROBE_VAL(val), clk_rate);
+ data->wsetup = aemif_cycles_to_nsec(WSETUP_VAL(val), clk_rate);
+ data->enable_ew = EW_VAL(val);
+ data->enable_ss = SS_VAL(val);
+ data->asize = val & ASIZE_MAX;
+}
+
+/**
+ * of_aemif_parse_abus_config - parse CS configuration from DT
+ * @pdev: platform device to parse for
+ * @np: device node ptr
+ *
+ * This function update the emif async bus configuration based on the values
+ * configured in a cs device binding node.
+ */
+static int of_aemif_parse_abus_config(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct aemif_device *aemif = platform_get_drvdata(pdev);
+ struct aemif_cs_data *data;
+ u32 cs;
+ u32 val;
+
+ if (of_property_read_u32(np, "ti,cs-chipselect", &cs)) {
+ dev_dbg(&pdev->dev, "cs property is required");
+ return -EINVAL;
+ }
+
+ if (cs - aemif->cs_offset >= NUM_CS || cs < aemif->cs_offset) {
+ dev_dbg(&pdev->dev, "cs number is incorrect %d", cs);
+ return -EINVAL;
+ }
+
+ if (aemif->num_cs >= NUM_CS) {
+ dev_dbg(&pdev->dev, "cs count is more than %d", NUM_CS);
+ return -EINVAL;
+ }
+
+ data = &aemif->cs_data[aemif->num_cs];
+ data->cs = cs;
+
+ /* read the current value in the hw register */
+ aemif_get_hw_params(pdev, aemif->num_cs++);
+
+ /* override the values from device node */
+ if (!of_property_read_u32(np, "ti,cs-min-turnaround-ns", &val))
+ data->ta = val;
+
+ if (!of_property_read_u32(np, "ti,cs-read-hold-ns", &val))
+ data->rhold = val;
+
+ if (!of_property_read_u32(np, "ti,cs-read-strobe-ns", &val))
+ data->rstrobe = val;
+
+ if (!of_property_read_u32(np, "ti,cs-read-setup-ns", &val))
+ data->rsetup = val;
+
+ if (!of_property_read_u32(np, "ti,cs-write-hold-ns", &val))
+ data->whold = val;
+
+ if (!of_property_read_u32(np, "ti,cs-write-strobe-ns", &val))
+ data->wstrobe = val;
+
+ if (!of_property_read_u32(np, "ti,cs-write-setup-ns", &val))
+ data->wsetup = val;
+
+ if (!of_property_read_u32(np, "ti,cs-bus-width", &val))
+ if (val == 16)
+ data->asize = 1;
+ data->enable_ew = of_property_read_bool(np, "ti,cs-extended-wait-mode");
+ data->enable_ss = of_property_read_bool(np, "ti,cs-select-strobe-mode");
+ return 0;
+}
+
+static const struct of_device_id aemif_of_match[] = {
+ { .compatible = "ti,davinci-aemif", },
+ { .compatible = "ti,da850-aemif", },
+ {},
+};
+
+static int aemif_probe(struct platform_device *pdev)
+{
+ int i;
+ int ret = -ENODEV;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child_np;
+ struct aemif_device *aemif;
+
+ if (np == NULL)
+ return 0;
+
+ aemif = devm_kzalloc(dev, sizeof(*aemif), GFP_KERNEL);
+ if (!aemif)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, aemif);
+
+ aemif->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(aemif->clk)) {
+ dev_err(dev, "cannot get clock 'aemif'\n");
+ return PTR_ERR(aemif->clk);
+ }
+
+ clk_prepare_enable(aemif->clk);
+ aemif->clk_rate = clk_get_rate(aemif->clk) / MSEC_PER_SEC;
+
+ if (of_device_is_compatible(np, "ti,da850-aemif"))
+ aemif->cs_offset = 2;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ aemif->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(aemif->base)) {
+ ret = PTR_ERR(aemif->base);
+ goto error;
+ }
+
+ /*
+ * For every controller device node, there is a cs device node that
+ * describe the bus configuration parameters. This functions iterate
+ * over these nodes and update the cs data array.
+ */
+ for_each_available_child_of_node(np, child_np) {
+ ret = of_aemif_parse_abus_config(pdev, child_np);
+ if (ret < 0)
+ goto error;
+ }
+
+ for (i = 0; i < aemif->num_cs; i++) {
+ ret = aemif_config_abus(pdev, i);
+ if (ret < 0) {
+ dev_err(dev, "Error configuring chip select %d\n",
+ aemif->cs_data[i].cs);
+ goto error;
+ }
+ }
+
+ /*
+ * Create a child devices explicitly from here to
+ * guarantee that the child will be probed after the AEMIF timing
+ * parameters are set.
+ */
+ for_each_available_child_of_node(np, child_np) {
+ ret = of_platform_populate(child_np, NULL, NULL, dev);
+ if (ret < 0)
+ goto error;
+ }
+
+ return 0;
+error:
+ clk_disable_unprepare(aemif->clk);
+ return ret;
+}
+
+static int aemif_remove(struct platform_device *pdev)
+{
+ struct aemif_device *aemif = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(aemif->clk);
+ return 0;
+}
+
+static struct platform_driver aemif_driver = {
+ .probe = aemif_probe,
+ .remove = aemif_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(aemif_of_match),
+ },
+};
+
+module_platform_driver(aemif_driver);
+
+MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
+MODULE_AUTHOR("Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments AEMIF driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index a8c08f332da0..92752fb5b2d3 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -652,6 +652,44 @@ static int i2o_iop_activate(struct i2o_controller *c)
return i2o_hrt_get(c);
};
+static void i2o_res_alloc(struct i2o_controller *c, unsigned long flags)
+{
+ i2o_status_block *sb = c->status_block.virt;
+ struct resource *res = &c->mem_resource;
+ resource_size_t size, align;
+ int err;
+
+ res->name = c->pdev->bus->name;
+ res->flags = flags;
+ res->start = 0;
+ res->end = 0;
+ osm_info("%s: requires private memory resources.\n", c->name);
+
+ if (flags & IORESOURCE_MEM) {
+ size = sb->desired_mem_size;
+ align = 1 << 20; /* unspecified, use 1Mb and play safe */
+ } else {
+ size = sb->desired_io_size;
+ align = 1 << 12; /* unspecified, use 4Kb and play safe */
+ }
+
+ err = pci_bus_alloc_resource(c->pdev->bus, res, size, align, 0, 0,
+ NULL, NULL);
+ if (err < 0)
+ return;
+
+ if (flags & IORESOURCE_MEM) {
+ c->mem_alloc = 1;
+ sb->current_mem_size = resource_size(res);
+ sb->current_mem_base = res->start;
+ } else if (flags & IORESOURCE_IO) {
+ c->io_alloc = 1;
+ sb->current_io_size = resource_size(res);
+ sb->current_io_base = res->start;
+ }
+ osm_info("%s: allocated PCI space %pR\n", c->name, res);
+}
+
/**
* i2o_iop_systab_set - Set the I2O System Table of the specified IOP
* @c: I2O controller to which the system table should be send
@@ -665,52 +703,13 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
struct i2o_message *msg;
i2o_status_block *sb = c->status_block.virt;
struct device *dev = &c->pdev->dev;
- struct resource *root;
int rc;
- if (sb->current_mem_size < sb->desired_mem_size) {
- struct resource *res = &c->mem_resource;
- res->name = c->pdev->bus->name;
- res->flags = IORESOURCE_MEM;
- res->start = 0;
- res->end = 0;
- osm_info("%s: requires private memory resources.\n", c->name);
- root = pci_find_parent_resource(c->pdev, res);
- if (root == NULL)
- osm_warn("%s: Can't find parent resource!\n", c->name);
- if (root && allocate_resource(root, res, sb->desired_mem_size, sb->desired_mem_size, sb->desired_mem_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
- NULL, NULL) >= 0) {
- c->mem_alloc = 1;
- sb->current_mem_size = resource_size(res);
- sb->current_mem_base = res->start;
- osm_info("%s: allocated %llu bytes of PCI memory at "
- "0x%016llX.\n", c->name,
- (unsigned long long)resource_size(res),
- (unsigned long long)res->start);
- }
- }
+ if (sb->current_mem_size < sb->desired_mem_size)
+ i2o_res_alloc(c, IORESOURCE_MEM);
- if (sb->current_io_size < sb->desired_io_size) {
- struct resource *res = &c->io_resource;
- res->name = c->pdev->bus->name;
- res->flags = IORESOURCE_IO;
- res->start = 0;
- res->end = 0;
- osm_info("%s: requires private memory resources.\n", c->name);
- root = pci_find_parent_resource(c->pdev, res);
- if (root == NULL)
- osm_warn("%s: Can't find parent resource!\n", c->name);
- if (root && allocate_resource(root, res, sb->desired_io_size, sb->desired_io_size, sb->desired_io_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
- NULL, NULL) >= 0) {
- c->io_alloc = 1;
- sb->current_io_size = resource_size(res);
- sb->current_mem_base = res->start;
- osm_info("%s: allocated %llu bytes of PCI I/O at "
- "0x%016llX.\n", c->name,
- (unsigned long long)resource_size(res),
- (unsigned long long)res->start);
- }
- }
+ if (sb->current_io_size < sb->desired_io_size)
+ i2o_res_alloc(c, IORESOURCE_IO);
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg))
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 7dca1e640970..841717a2842c 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -571,7 +571,7 @@ static int pm800_probe(struct i2c_client *client,
ret = pm800_pages_init(chip);
if (ret) {
dev_err(&client->dev, "pm800_pages_init failed!\n");
- goto err_page_init;
+ goto err_device_init;
}
ret = device_800_init(chip, pdata);
@@ -587,7 +587,6 @@ static int pm800_probe(struct i2c_client *client,
err_device_init:
pm800_pages_exit(chip);
-err_page_init:
err_subchip_alloc:
pm80x_deinit();
out_init:
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index c9b1f6422941..bcfc9e85b4a0 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1179,12 +1179,18 @@ static int pm860x_probe(struct i2c_client *client,
chip->companion_addr = pdata->companion_addr;
chip->companion = i2c_new_dummy(chip->client->adapter,
chip->companion_addr);
+ if (!chip->companion) {
+ dev_err(&client->dev,
+ "Failed to allocate I2C companion device\n");
+ return -ENODEV;
+ }
chip->regmap_companion = regmap_init_i2c(chip->companion,
&pm860x_regmap_config);
if (IS_ERR(chip->regmap_companion)) {
ret = PTR_ERR(chip->regmap_companion);
dev_err(&chip->companion->dev,
"Failed to allocate register map: %d\n", ret);
+ i2c_unregister_device(chip->companion);
return ret;
}
i2c_set_clientdata(chip->companion, chip);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 49bb445d846a..33834120d057 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -59,6 +59,14 @@ config MFD_AAT2870_CORE
additional drivers must be enabled in order to use the
functionality of the device.
+config MFD_BCM590XX
+ tristate "Broadcom BCM590xx PMUs"
+ select MFD_CORE
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Support for the BCM590xx PMUs from Broadcom
+
config MFD_CROS_EC
tristate "ChromeOS Embedded Controller"
select MFD_CORE
@@ -100,7 +108,7 @@ config PMIC_DA903X
bool "Dialog Semiconductor DA9030/DA9034 PMIC Support"
depends on I2C=y
help
- Say yes here to support for Dialog Semiconductor DA9030 (a.k.a
+ Say yes here to add support for Dialog Semiconductor DA9030 (a.k.a
ARAVA) and DA9034 (a.k.a MICCO), these are Power Management IC
usually found on PXA processors-based platforms. This includes
the I2C driver and the core APIs _only_, you have to select
@@ -270,13 +278,18 @@ config MFD_KEMPLD
device may provide functions like watchdog, GPIO, UART and I2C bus.
The following modules are supported:
+ * COMe-bHL6
* COMe-bIP#
* COMe-bPC2 (ETXexpress-PC)
* COMe-bSC# (ETXexpress-SC T#)
+ * COMe-cBT6
* COMe-cCT6
* COMe-cDC2 (microETXexpress-DC)
+ * COMe-cHL6
* COMe-cPC2 (microETXexpress-PC)
+ * COMe-mBT10
* COMe-mCT10
+ * COMe-mTT10 (nanoETXexpress-TT)
* ETX-OH
This driver can also be built as a module. If so, the module
@@ -322,9 +335,10 @@ config MFD_MAX14577
depends on I2C=y
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
select IRQ_DOMAIN
help
- Say yes here to support for Maxim Semiconductor MAX14577.
+ Say yes here to add support for Maxim Semiconductor MAX14577.
This is a Micro-USB IC with Charger controls on chip.
This driver provides common support for accessing the device;
additional drivers must be enabled in order to use the functionality
@@ -337,7 +351,7 @@ config MFD_MAX77686
select REGMAP_I2C
select IRQ_DOMAIN
help
- Say yes here to support for Maxim Semiconductor MAX77686.
+ Say yes here to add support for Maxim Semiconductor MAX77686.
This is a Power Management IC with RTC on chip.
This driver provides common support for accessing the device;
additional drivers must be enabled in order to use the functionality
@@ -349,7 +363,7 @@ config MFD_MAX77693
select MFD_CORE
select REGMAP_I2C
help
- Say yes here to support for Maxim Semiconductor MAX77693.
+ Say yes here to add support for Maxim Semiconductor MAX77693.
This is a companion Power Management IC with Flash, Haptic, Charger,
and MUIC(Micro USB Interface Controller) controls on chip.
This driver provides common support for accessing the device;
@@ -363,7 +377,7 @@ config MFD_MAX8907
select REGMAP_I2C
select REGMAP_IRQ
help
- Say yes here to support for Maxim Semiconductor MAX8907. This is
+ Say yes here to add support for Maxim Semiconductor MAX8907. This is
a Power Management IC. This driver provides common support for
accessing the device; additional drivers must be enabled in order
to use the functionality of the device.
@@ -373,7 +387,7 @@ config MFD_MAX8925
depends on I2C=y
select MFD_CORE
help
- Say yes here to support for Maxim Semiconductor MAX8925. This is
+ Say yes here to add support for Maxim Semiconductor MAX8925. This is
a Power Management IC. This driver provides common support for
accessing the device, additional drivers must be enabled in order
to use the functionality of the device.
@@ -384,7 +398,7 @@ config MFD_MAX8997
select MFD_CORE
select IRQ_DOMAIN
help
- Say yes here to support for Maxim Semiconductor MAX8997/8966.
+ Say yes here to add support for Maxim Semiconductor MAX8997/8966.
This is a Power Management IC with RTC, Flash, Fuel Gauge, Haptic,
MUIC controls on chip.
This driver provides common support for accessing the device;
@@ -397,7 +411,7 @@ config MFD_MAX8998
select MFD_CORE
select IRQ_DOMAIN
help
- Say yes here to support for Maxim Semiconductor MAX8998 and
+ Say yes here to add support for Maxim Semiconductor MAX8998 and
National Semiconductor LP3974. This is a Power Management IC.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the functionality
@@ -473,10 +487,11 @@ config MFD_PM8XXX
config MFD_PM8921_CORE
tristate "Qualcomm PM8921 PMIC chip"
- depends on (ARCH_MSM || HEXAGON)
- depends on BROKEN
+ depends on (ARM || HEXAGON)
+ select IRQ_DOMAIN
select MFD_CORE
select MFD_PM8XXX
+ select REGMAP
help
If you say yes to this option, support will be included for the
built-in PM8921 PMIC chip.
@@ -487,16 +502,6 @@ config MFD_PM8921_CORE
Say M here if you want to include support for PM8921 chip as a module.
This will build a module called "pm8921-core".
-config MFD_PM8XXX_IRQ
- bool "Qualcomm PM8xxx IRQ features"
- depends on MFD_PM8XXX
- default y if MFD_PM8XXX
- help
- This is the IRQ driver for Qualcomm PM 8xxx PMIC chips.
-
- This is required to use certain other PM 8xxx features, such as GPIO
- and MPP.
-
config MFD_RDC321X
tristate "RDC R-321x southbridge"
select MFD_CORE
@@ -516,6 +521,16 @@ config MFD_RTSX_PCI
types of memory cards, such as Memory Stick, Memory Stick Pro,
Secure Digital and MultiMediaCard.
+config MFD_RTSX_USB
+ tristate "Realtek USB card reader"
+ depends on USB
+ select MFD_CORE
+ help
+ Select this option to get support for Realtek USB 2.0 card readers
+ including RTS5129, RTS5139, RTS5179 and RTS5170.
+ Realtek card reader supports access to many types of memory cards,
+ such as Memory Stick Pro, Secure Digital and MultiMediaCard.
+
config MFD_RC5T583
bool "Ricoh RC5T583 Power Management system device"
depends on I2C=y
@@ -774,17 +789,6 @@ config MFD_PALMAS
If you say yes here you get support for the Palmas
series of PMIC chips from Texas Instruments.
-config MFD_TI_SSP
- tristate "TI Sequencer Serial Port support"
- depends on ARCH_DAVINCI_TNETV107X
- select MFD_CORE
- ---help---
- Say Y here if you want support for the Sequencer Serial Port
- in a Texas Instruments TNETV107X SoC.
-
- To compile this driver as a module, choose M here: the
- module will be called ti-ssp.
-
config TPS6105X
tristate "TI TPS61050/61052 Boost Converters"
depends on I2C
@@ -853,6 +857,22 @@ config MFD_TPS65217
This driver can also be built as a module. If so, the module
will be called tps65217.
+config MFD_TPS65218
+ tristate "TI TPS65218 Power Management chips"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ If you say yes here you get support for the TPS65218 series of
+ Power Management chips.
+ These include voltage regulators, gpio and other features
+ that are often used in portable devices. Only regulator
+ component is currently supported.
+
+ This driver can also be built as a module. If so, the module
+ will be called tps65218.
+
config MFD_TPS6586X
bool "TI TPS6586x Power Management chips"
depends on I2C=y
@@ -935,16 +955,6 @@ config TWL4030_CORE
high speed USB OTG transceiver, an audio codec (on most
versions) and many other features.
-config TWL4030_MADC
- tristate "TI TWL4030 MADC"
- depends on TWL4030_CORE
- help
- This driver provides support for triton TWL4030-MADC. The
- driver supports both RT and SW conversion methods.
-
- This driver can be built as a module. If so it will be
- named twl4030-madc
-
config TWL4030_POWER
bool "TI TWL4030 power resources"
depends on TWL4030_CORE && ARM
@@ -1193,9 +1203,6 @@ config MFD_STW481X
in various ST Microelectronics and ST-Ericsson embedded
Nomadik series.
-endmenu
-endif
-
menu "Multimedia Capabilities Port drivers"
depends on ARCH_SA1100
@@ -1226,3 +1233,6 @@ config VEXPRESS_CONFIG
help
Platform configuration infrastructure for the ARM Ltd.
Versatile Express.
+
+endmenu
+endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 5aea5ef0a62f..2851275e2656 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -8,12 +8,14 @@ obj-$(CONFIG_MFD_88PM800) += 88pm800.o 88pm80x.o
obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
+obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
obj-$(CONFIG_MFD_CROS_EC) += cros_ec.o
obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o
obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o
rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o
obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
+obj-$(CONFIG_MFD_RTSX_USB) += rtsx_usb.o
obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
@@ -21,7 +23,6 @@ obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
-obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
obj-$(CONFIG_MFD_TI_AM335X_TSCADC) += ti_am335x_tscadc.o
obj-$(CONFIG_MFD_STA2X11) += sta2x11-mfd.o
@@ -62,6 +63,7 @@ obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
obj-$(CONFIG_MFD_TPS65217) += tps65217.o
+obj-$(CONFIG_MFD_TPS65218) += tps65218.o
obj-$(CONFIG_MFD_TPS65910) += tps65910.o
tps65912-objs := tps65912-core.o tps65912-irq.o
obj-$(CONFIG_MFD_TPS65912) += tps65912.o
@@ -71,7 +73,6 @@ obj-$(CONFIG_MFD_TPS80031) += tps80031.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
-obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_MFD_TWL4030_AUDIO) += twl4030-audio.o
obj-$(CONFIG_TWL6040_CORE) += twl6040.o
@@ -150,7 +151,6 @@ obj-$(CONFIG_MFD_SI476X_CORE) += si476x-core.o
obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o omap-usb-tll.o
obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o ssbi.o
-obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
obj-$(CONFIG_MFD_TPS65090) += tps65090.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index aaff683cd37d..a8ee4a36a1d8 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -592,7 +592,7 @@ static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np)
/* If ->irq_base is zero this will give a linear mapping */
ab8500->domain = irq_domain_add_simple(NULL,
- num_irqs, ab8500->irq_base,
+ num_irqs, 0,
&ab8500_irq_ops, ab8500);
if (!ab8500->domain) {
@@ -1583,14 +1583,13 @@ static int ab8500_probe(struct platform_device *pdev)
if (!ab8500)
return -ENOMEM;
- if (plat)
- ab8500->irq_base = plat->irq_base;
-
ab8500->dev = &pdev->dev;
resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!resource)
+ if (!resource) {
+ dev_err(&pdev->dev, "no IRQ resource\n");
return -ENODEV;
+ }
ab8500->irq = resource->start;
@@ -1612,8 +1611,10 @@ static int ab8500_probe(struct platform_device *pdev)
else {
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_IC_NAME_REG, &value);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not probe HW\n");
return ret;
+ }
ab8500->version = value;
}
@@ -1759,30 +1760,30 @@ static int ab8500_probe(struct platform_device *pdev)
if (is_ab9540(ab8500))
ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
ARRAY_SIZE(ab9540_devs), NULL,
- ab8500->irq_base, ab8500->domain);
+ 0, ab8500->domain);
else if (is_ab8540(ab8500)) {
ret = mfd_add_devices(ab8500->dev, 0, ab8540_devs,
ARRAY_SIZE(ab8540_devs), NULL,
- ab8500->irq_base, NULL);
+ 0, ab8500->domain);
if (ret)
return ret;
if (is_ab8540_1p2_or_earlier(ab8500))
ret = mfd_add_devices(ab8500->dev, 0, ab8540_cut1_devs,
ARRAY_SIZE(ab8540_cut1_devs), NULL,
- ab8500->irq_base, NULL);
+ 0, ab8500->domain);
else /* ab8540 >= cut2 */
ret = mfd_add_devices(ab8500->dev, 0, ab8540_cut2_devs,
ARRAY_SIZE(ab8540_cut2_devs), NULL,
- ab8500->irq_base, NULL);
+ 0, ab8500->domain);
} else if (is_ab8505(ab8500))
ret = mfd_add_devices(ab8500->dev, 0, ab8505_devs,
ARRAY_SIZE(ab8505_devs), NULL,
- ab8500->irq_base, ab8500->domain);
+ 0, ab8500->domain);
else
ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
ARRAY_SIZE(ab8500_devs), NULL,
- ab8500->irq_base, ab8500->domain);
+ 0, ab8500->domain);
if (ret)
return ret;
@@ -1790,7 +1791,7 @@ static int ab8500_probe(struct platform_device *pdev)
/* Add battery management devices */
ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
ARRAY_SIZE(ab8500_bm_devs), NULL,
- ab8500->irq_base, ab8500->domain);
+ 0, ab8500->domain);
if (ret)
dev_err(ab8500->dev, "error adding bm devices\n");
}
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index 62501553d63c..f495b8b57dd7 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index a45aab9f6bb1..1c3ae57082ed 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -251,8 +251,6 @@ static int arizona_apply_hardware_patch(struct arizona* arizona)
unsigned int fll, sysclk;
int ret, err;
- regcache_cache_bypass(arizona->regmap, true);
-
/* Cache existing FLL and SYSCLK settings */
ret = regmap_read(arizona->regmap, ARIZONA_FLL1_CONTROL_1, &fll);
if (ret != 0) {
@@ -322,8 +320,6 @@ err_fll:
err);
}
- regcache_cache_bypass(arizona->regmap, false);
-
if (ret != 0)
return ret;
else
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index c71ff0af1547..39fa554f13bb 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -277,6 +277,7 @@ static const struct regmap_range as3722_readable_ranges[] = {
regmap_reg_range(AS3722_ADC0_CONTROL_REG, AS3722_ADC_CONFIGURATION_REG),
regmap_reg_range(AS3722_ASIC_ID1_REG, AS3722_ASIC_ID2_REG),
regmap_reg_range(AS3722_LOCK_REG, AS3722_LOCK_REG),
+ regmap_reg_range(AS3722_FUSE7_REG, AS3722_FUSE7_REG),
};
static const struct regmap_access_table as3722_readable_table = {
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
new file mode 100644
index 000000000000..e9a33c79431b
--- /dev/null
+++ b/drivers/mfd/bcm590xx.c
@@ -0,0 +1,93 @@
+/*
+ * Broadcom BCM590xx PMU
+ *
+ * Copyright 2014 Linaro Limited
+ * Author: Matt Porter <mporter@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/mfd/bcm590xx.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+static const struct mfd_cell bcm590xx_devs[] = {
+ {
+ .name = "bcm590xx-vregs",
+ },
+};
+
+static const struct regmap_config bcm590xx_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = BCM590XX_MAX_REGISTER,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int bcm590xx_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct bcm590xx *bcm590xx;
+ int ret;
+
+ bcm590xx = devm_kzalloc(&i2c->dev, sizeof(*bcm590xx), GFP_KERNEL);
+ if (!bcm590xx)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, bcm590xx);
+ bcm590xx->dev = &i2c->dev;
+ bcm590xx->i2c_client = i2c;
+
+ bcm590xx->regmap = devm_regmap_init_i2c(i2c, &bcm590xx_regmap_config);
+ if (IS_ERR(bcm590xx->regmap)) {
+ ret = PTR_ERR(bcm590xx->regmap);
+ dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mfd_add_devices(&i2c->dev, -1, bcm590xx_devs,
+ ARRAY_SIZE(bcm590xx_devs), NULL, 0, NULL);
+ if (ret < 0)
+ dev_err(&i2c->dev, "failed to add sub-devices: %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id bcm590xx_of_match[] = {
+ { .compatible = "brcm,bcm59056" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bcm590xx_of_match);
+
+static const struct i2c_device_id bcm590xx_i2c_id[] = {
+ { "bcm59056" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bcm590xx_i2c_id);
+
+static struct i2c_driver bcm590xx_i2c_driver = {
+ .driver = {
+ .name = "bcm590xx",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(bcm590xx_of_match),
+ },
+ .probe = bcm590xx_i2c_probe,
+ .id_table = bcm590xx_i2c_id,
+};
+module_i2c_driver(bcm590xx_i2c_driver);
+
+MODULE_AUTHOR("Matt Porter <mporter@linaro.org>");
+MODULE_DESCRIPTION("BCM590xx multi-function driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bcm590xx");
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 17c13012686a..be91cb5d6e78 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -23,7 +23,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 25838f10b35b..e8af816d73a9 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -279,6 +279,9 @@ static bool da9052_reg_volatile(struct device *dev, unsigned int reg)
case DA9052_EVENT_B_REG:
case DA9052_EVENT_C_REG:
case DA9052_EVENT_D_REG:
+ case DA9052_CONTROL_B_REG:
+ case DA9052_CONTROL_D_REG:
+ case DA9052_SUPPLY_REG:
case DA9052_FAULTLOG_REG:
case DA9052_CHG_TIME_REG:
case DA9052_ADC_RES_L_REG:
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index c319c4ef5d49..6da8ec8ff800 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -75,6 +75,7 @@ static int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
DA9052_PARK_REGISTER,
&val);
break;
+ case DA9053_BC:
default:
/*
* For other chips parking of I2C register
@@ -114,6 +115,7 @@ static const struct i2c_device_id da9052_i2c_id[] = {
{"da9053-aa", DA9053_AA},
{"da9053-ba", DA9053_BA},
{"da9053-bb", DA9053_BB},
+ {"da9053-bc", DA9053_BC},
{}
};
@@ -121,8 +123,9 @@ static const struct i2c_device_id da9052_i2c_id[] = {
static const struct of_device_id dialog_dt_ids[] = {
{ .compatible = "dlg,da9052", .data = &da9052_i2c_id[0] },
{ .compatible = "dlg,da9053-aa", .data = &da9052_i2c_id[1] },
- { .compatible = "dlg,da9053-ab", .data = &da9052_i2c_id[2] },
+ { .compatible = "dlg,da9053-ba", .data = &da9052_i2c_id[2] },
{ .compatible = "dlg,da9053-bb", .data = &da9052_i2c_id[3] },
+ { .compatible = "dlg,da9053-bc", .data = &da9052_i2c_id[4] },
{ /* sentinel */ }
};
#endif
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index 0680bcbc53de..17666b40b70c 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -71,6 +71,7 @@ static struct spi_device_id da9052_spi_id[] = {
{"da9053-aa", DA9053_AA},
{"da9053-ba", DA9053_BA},
{"da9053-bb", DA9053_BB},
+ {"da9053-bc", DA9053_BC},
{}
};
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index 8103e4362132..d4d4c165eb95 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/mfd/da9055/core.h>
@@ -66,6 +68,11 @@ static struct i2c_device_id da9055_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
+static const struct of_device_id da9055_of_match[] = {
+ { .compatible = "dlg,da9055-pmic", },
+ { }
+};
+
static struct i2c_driver da9055_i2c_driver = {
.probe = da9055_i2c_probe,
.remove = da9055_i2c_remove,
@@ -73,6 +80,7 @@ static struct i2c_driver da9055_i2c_driver = {
.driver = {
.name = "da9055-pmic",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(da9055_of_match),
},
};
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
index 26937cd01071..e70ae315abc7 100644
--- a/drivers/mfd/da9063-core.c
+++ b/drivers/mfd/da9063-core.c
@@ -110,7 +110,7 @@ static const struct mfd_cell da9063_devs[] = {
int da9063_device_init(struct da9063 *da9063, unsigned int irq)
{
struct da9063_pdata *pdata = da9063->dev->platform_data;
- int model, revision;
+ int model, variant_id, variant_code;
int ret;
if (pdata) {
@@ -141,23 +141,26 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq)
return -ENODEV;
}
- ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_VARIANT, &revision);
+ ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_VARIANT, &variant_id);
if (ret < 0) {
- dev_err(da9063->dev, "Cannot read chip revision id.\n");
+ dev_err(da9063->dev, "Cannot read chip variant id.\n");
return -EIO;
}
- revision >>= DA9063_CHIP_VARIANT_SHIFT;
- if (revision != 3) {
- dev_err(da9063->dev, "Unknown chip revision: %d\n", revision);
+
+ variant_code = variant_id >> DA9063_CHIP_VARIANT_SHIFT;
+
+ dev_info(da9063->dev,
+ "Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n",
+ model, variant_id);
+
+ if (variant_code != PMIC_DA9063_BB) {
+ dev_err(da9063->dev, "Unknown chip variant code: 0x%02X\n",
+ variant_code);
return -ENODEV;
}
da9063->model = model;
- da9063->revision = revision;
-
- dev_info(da9063->dev,
- "Device detected (model-ID: 0x%02X rev-ID: 0x%02X)\n",
- model, revision);
+ da9063->variant_code = variant_code;
ret = da9063_irq_init(da9063);
if (ret) {
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index e43e6e821117..7694e0700d34 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -25,6 +25,7 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/mfd/core.h>
@@ -2678,16 +2679,12 @@ static struct irq_domain_ops db8500_irq_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static int db8500_irq_init(struct device_node *np, int irq_base)
+static int db8500_irq_init(struct device_node *np)
{
int i;
- /* In the device tree case, just take some IRQs */
- if (np)
- irq_base = 0;
-
db8500_irq_domain = irq_domain_add_simple(
- np, NUM_PRCMU_WAKEUPS, irq_base,
+ np, NUM_PRCMU_WAKEUPS, 0,
&db8500_irq_ops, NULL);
if (!db8500_irq_domain) {
@@ -3114,10 +3111,10 @@ static void db8500_prcmu_update_cpufreq(void)
}
static int db8500_prcmu_register_ab8500(struct device *parent,
- struct ab8500_platform_data *pdata,
- int irq)
+ struct ab8500_platform_data *pdata)
{
- struct resource ab8500_resource = DEFINE_RES_IRQ(irq);
+ struct device_node *np;
+ struct resource ab8500_resource;
struct mfd_cell ab8500_cell = {
.name = "ab8500-core",
.of_compatible = "stericsson,ab8500",
@@ -3128,6 +3125,20 @@ static int db8500_prcmu_register_ab8500(struct device *parent,
.num_resources = 1,
};
+ if (!parent->of_node)
+ return -ENODEV;
+
+ /* Look up the device node, sneak the IRQ out of it */
+ for_each_child_of_node(parent->of_node, np) {
+ if (of_device_is_compatible(np, ab8500_cell.of_compatible))
+ break;
+ }
+ if (!np) {
+ dev_info(parent, "could not find AB8500 node in the device tree\n");
+ return -ENODEV;
+ }
+ of_irq_to_resource_table(np, &ab8500_resource, 1);
+
return mfd_add_devices(parent, 0, &ab8500_cell, 1, NULL, 0, NULL);
}
@@ -3180,7 +3191,7 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
goto no_irq_return;
}
- db8500_irq_init(np, pdata->irq_base);
+ db8500_irq_init(np);
prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
@@ -3205,8 +3216,7 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
}
}
- err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata,
- pdata->ab_irq);
+ err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata);
if (err) {
mfd_remove_devices(&pdev->dev);
pr_err("prcmu: Failed to add ab8500 subdevice\n");
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index 81b7d88af313..433f823037dd 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index d3e23278d299..07692604e119 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -322,9 +322,12 @@ static int kempld_detect_device(struct kempld_device_data *pld)
return -ENODEV;
}
- /* Release hardware mutex if aquired */
- if (!(index_reg & KEMPLD_MUTEX_KEY))
+ /* Release hardware mutex if acquired */
+ if (!(index_reg & KEMPLD_MUTEX_KEY)) {
iowrite8(KEMPLD_MUTEX_KEY, pld->io_index);
+ /* PXT and COMe-cPC2 boards may require a second release */
+ iowrite8(KEMPLD_MUTEX_KEY, pld->io_index);
+ }
mutex_unlock(&pld->lock);
@@ -438,6 +441,14 @@ static struct dmi_system_id __initdata kempld_dmi_table[] = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "CHL6",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-cHL6"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "CHR2",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -510,6 +521,14 @@ static struct dmi_system_id __initdata kempld_dmi_table[] = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "CVV6",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-cBT"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "FRI2",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -533,6 +552,14 @@ static struct dmi_system_id __initdata kempld_dmi_table[] = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "MVV1",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-mBT"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "NTC1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index be93fa261ded..3f10ea3f45d1 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -58,7 +58,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -72,9 +71,11 @@
#define ACPIBASE_GPE_END 0x2f
#define ACPIBASE_SMI_OFF 0x30
#define ACPIBASE_SMI_END 0x33
+#define ACPIBASE_PMC_OFF 0x08
+#define ACPIBASE_PMC_END 0x0c
#define ACPIBASE_TCO_OFF 0x60
#define ACPIBASE_TCO_END 0x7f
-#define ACPICTRL 0x44
+#define ACPICTRL_PMCBASE 0x44
#define ACPIBASE_GCS_OFF 0x3410
#define ACPIBASE_GCS_END 0x3414
@@ -90,16 +91,17 @@
#define wdt_mem_res(i) wdt_res(ICH_RES_MEM_OFF, i)
#define wdt_res(b, i) (&wdt_ich_res[(b) + (i)])
-struct lpc_ich_cfg {
- int base;
- int ctrl;
- int save;
-};
-
struct lpc_ich_priv {
int chipset;
- struct lpc_ich_cfg acpi;
- struct lpc_ich_cfg gpio;
+
+ int abase; /* ACPI base */
+ int actrl_pbase; /* ACPI control or PMC base */
+ int gbase; /* GPIO base */
+ int gctrl; /* GPIO control */
+
+ int abase_save; /* Cached ACPI base value */
+ int actrl_pbase_save; /* Cached ACPI control or PMC base value */
+ int gctrl_save; /* Cached GPIO control value */
};
static struct resource wdt_ich_res[] = {
@@ -111,7 +113,7 @@ static struct resource wdt_ich_res[] = {
{
.flags = IORESOURCE_IO,
},
- /* GCS */
+ /* GCS or PMC */
{
.flags = IORESOURCE_MEM,
},
@@ -211,6 +213,7 @@ enum lpc_chipsets {
LPC_LPT_LP, /* Lynx Point-LP */
LPC_WBG, /* Wellsburg */
LPC_AVN, /* Avoton SoC */
+ LPC_BAYTRAIL, /* Bay Trail SoC */
LPC_COLETO, /* Coleto Creek */
LPC_WPT_LP, /* Wildcat Point-LP */
};
@@ -303,6 +306,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
[LPC_NM10] = {
.name = "NM10",
.iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
},
[LPC_ICH8] = {
.name = "ICH8 or ICH8R",
@@ -499,7 +503,12 @@ static struct lpc_ich_info lpc_chipset_info[] = {
},
[LPC_AVN] = {
.name = "Avoton SoC",
- .iTCO_version = 1,
+ .iTCO_version = 3,
+ .gpio_version = AVOTON_GPIO,
+ },
+ [LPC_BAYTRAIL] = {
+ .name = "Bay Trail SoC",
+ .iTCO_version = 3,
},
[LPC_COLETO] = {
.name = "Coleto Creek",
@@ -726,6 +735,7 @@ static const struct pci_device_id lpc_ich_ids[] = {
{ PCI_VDEVICE(INTEL, 0x1f39), LPC_AVN},
{ PCI_VDEVICE(INTEL, 0x1f3a), LPC_AVN},
{ PCI_VDEVICE(INTEL, 0x1f3b), LPC_AVN},
+ { PCI_VDEVICE(INTEL, 0x0f1c), LPC_BAYTRAIL},
{ PCI_VDEVICE(INTEL, 0x2390), LPC_COLETO},
{ PCI_VDEVICE(INTEL, 0x9cc1), LPC_WPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc2), LPC_WPT_LP},
@@ -742,14 +752,20 @@ static void lpc_ich_restore_config_space(struct pci_dev *dev)
{
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
- if (priv->acpi.save >= 0) {
- pci_write_config_byte(dev, priv->acpi.ctrl, priv->acpi.save);
- priv->acpi.save = -1;
+ if (priv->abase_save >= 0) {
+ pci_write_config_byte(dev, priv->abase, priv->abase_save);
+ priv->abase_save = -1;
+ }
+
+ if (priv->actrl_pbase_save >= 0) {
+ pci_write_config_byte(dev, priv->actrl_pbase,
+ priv->actrl_pbase_save);
+ priv->actrl_pbase_save = -1;
}
- if (priv->gpio.save >= 0) {
- pci_write_config_byte(dev, priv->gpio.ctrl, priv->gpio.save);
- priv->gpio.save = -1;
+ if (priv->gctrl_save >= 0) {
+ pci_write_config_byte(dev, priv->gctrl, priv->gctrl_save);
+ priv->gctrl_save = -1;
}
}
@@ -758,9 +774,26 @@ static void lpc_ich_enable_acpi_space(struct pci_dev *dev)
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u8 reg_save;
- pci_read_config_byte(dev, priv->acpi.ctrl, &reg_save);
- pci_write_config_byte(dev, priv->acpi.ctrl, reg_save | 0x10);
- priv->acpi.save = reg_save;
+ switch (lpc_chipset_info[priv->chipset].iTCO_version) {
+ case 3:
+ /*
+ * Some chipsets (eg Avoton) enable the ACPI space in the
+ * ACPI BASE register.
+ */
+ pci_read_config_byte(dev, priv->abase, &reg_save);
+ pci_write_config_byte(dev, priv->abase, reg_save | 0x2);
+ priv->abase_save = reg_save;
+ break;
+ default:
+ /*
+ * Most chipsets enable the ACPI space in the ACPI control
+ * register.
+ */
+ pci_read_config_byte(dev, priv->actrl_pbase, &reg_save);
+ pci_write_config_byte(dev, priv->actrl_pbase, reg_save | 0x80);
+ priv->actrl_pbase_save = reg_save;
+ break;
+ }
}
static void lpc_ich_enable_gpio_space(struct pci_dev *dev)
@@ -768,9 +801,20 @@ static void lpc_ich_enable_gpio_space(struct pci_dev *dev)
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u8 reg_save;
- pci_read_config_byte(dev, priv->gpio.ctrl, &reg_save);
- pci_write_config_byte(dev, priv->gpio.ctrl, reg_save | 0x10);
- priv->gpio.save = reg_save;
+ pci_read_config_byte(dev, priv->gctrl, &reg_save);
+ pci_write_config_byte(dev, priv->gctrl, reg_save | 0x10);
+ priv->gctrl_save = reg_save;
+}
+
+static void lpc_ich_enable_pmc_space(struct pci_dev *dev)
+{
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+ u8 reg_save;
+
+ pci_read_config_byte(dev, priv->actrl_pbase, &reg_save);
+ pci_write_config_byte(dev, priv->actrl_pbase, reg_save | 0x2);
+
+ priv->actrl_pbase_save = reg_save;
}
static void lpc_ich_finalize_cell(struct pci_dev *dev, struct mfd_cell *cell)
@@ -815,7 +859,7 @@ static int lpc_ich_init_gpio(struct pci_dev *dev)
struct resource *res;
/* Setup power management base register */
- pci_read_config_dword(dev, priv->acpi.base, &base_addr_cfg);
+ pci_read_config_dword(dev, priv->abase, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
@@ -841,7 +885,7 @@ static int lpc_ich_init_gpio(struct pci_dev *dev)
gpe0_done:
/* Setup GPIO base register */
- pci_read_config_dword(dev, priv->gpio.base, &base_addr_cfg);
+ pci_read_config_dword(dev, priv->gbase, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
dev_notice(&dev->dev, "I/O space for GPIO uninitialized\n");
@@ -891,7 +935,7 @@ static int lpc_ich_init_wdt(struct pci_dev *dev)
struct resource *res;
/* Setup power management base register */
- pci_read_config_dword(dev, priv->acpi.base, &base_addr_cfg);
+ pci_read_config_dword(dev, priv->abase, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
@@ -910,14 +954,20 @@ static int lpc_ich_init_wdt(struct pci_dev *dev)
lpc_ich_enable_acpi_space(dev);
/*
+ * iTCO v2:
* Get the Memory-Mapped GCS register. To get access to it
* we have to read RCBA from PCI Config space 0xf0 and use
* it as base. GCS = RCBA + ICH6_GCS(0x3410).
+ *
+ * iTCO v3:
+ * Get the Power Management Configuration register. To get access
+ * to it we have to read the PMC BASE from config space and address
+ * the register at offset 0x8.
*/
if (lpc_chipset_info[priv->chipset].iTCO_version == 1) {
/* Don't register iomem for TCO ver 1 */
lpc_ich_cells[LPC_WDT].num_resources--;
- } else {
+ } else if (lpc_chipset_info[priv->chipset].iTCO_version == 2) {
pci_read_config_dword(dev, RCBABASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0xffffc000;
if (!(base_addr_cfg & 1)) {
@@ -926,9 +976,17 @@ static int lpc_ich_init_wdt(struct pci_dev *dev)
ret = -ENODEV;
goto wdt_done;
}
- res = wdt_mem_res(ICH_RES_MEM_GCS);
+ res = wdt_mem_res(ICH_RES_MEM_GCS_PMC);
res->start = base_addr + ACPIBASE_GCS_OFF;
res->end = base_addr + ACPIBASE_GCS_END;
+ } else if (lpc_chipset_info[priv->chipset].iTCO_version == 3) {
+ lpc_ich_enable_pmc_space(dev);
+ pci_read_config_dword(dev, ACPICTRL_PMCBASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0xfffffe00;
+
+ res = wdt_mem_res(ICH_RES_MEM_GCS_PMC);
+ res->start = base_addr + ACPIBASE_PMC_OFF;
+ res->end = base_addr + ACPIBASE_PMC_END;
}
lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]);
@@ -952,28 +1010,35 @@ static int lpc_ich_probe(struct pci_dev *dev,
return -ENOMEM;
priv->chipset = id->driver_data;
- priv->acpi.save = -1;
- priv->acpi.base = ACPIBASE;
- priv->acpi.ctrl = ACPICTRL;
- priv->gpio.save = -1;
+ priv->actrl_pbase_save = -1;
+ priv->abase_save = -1;
+
+ priv->abase = ACPIBASE;
+ priv->actrl_pbase = ACPICTRL_PMCBASE;
+
+ priv->gctrl_save = -1;
if (priv->chipset <= LPC_ICH5) {
- priv->gpio.base = GPIOBASE_ICH0;
- priv->gpio.ctrl = GPIOCTRL_ICH0;
+ priv->gbase = GPIOBASE_ICH0;
+ priv->gctrl = GPIOCTRL_ICH0;
} else {
- priv->gpio.base = GPIOBASE_ICH6;
- priv->gpio.ctrl = GPIOCTRL_ICH6;
+ priv->gbase = GPIOBASE_ICH6;
+ priv->gctrl = GPIOCTRL_ICH6;
}
pci_set_drvdata(dev, priv);
- ret = lpc_ich_init_wdt(dev);
- if (!ret)
- cell_added = true;
+ if (lpc_chipset_info[priv->chipset].iTCO_version) {
+ ret = lpc_ich_init_wdt(dev);
+ if (!ret)
+ cell_added = true;
+ }
- ret = lpc_ich_init_gpio(dev);
- if (!ret)
- cell_added = true;
+ if (lpc_chipset_info[priv->chipset].gpio_version) {
+ ret = lpc_ich_init_gpio(dev);
+ if (!ret)
+ cell_added = true;
+ }
/*
* We only care if at least one or none of the cells registered
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index 3bb05c03c68d..4ee755034f3b 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -23,7 +23,6 @@
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 71aa14a6bfbb..5f13cefe8def 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -18,6 +18,7 @@
* This driver is based on max8997.c
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
@@ -25,7 +26,10 @@
#include <linux/mfd/max14577-private.h>
static struct mfd_cell max14577_devs[] = {
- { .name = "max14577-muic", },
+ {
+ .name = "max14577-muic",
+ .of_compatible = "maxim,max14577-muic",
+ },
{
.name = "max14577-regulator",
.of_compatible = "maxim,max14577-regulator",
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index f53d5823a3f7..e5fce765accb 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -121,6 +121,10 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
dev_info(max77686->dev, "device found\n");
max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
+ if (!max77686->rtc) {
+ dev_err(max77686->dev, "Failed to allocate I2C device for RTC\n");
+ return -ENODEV;
+ }
i2c_set_clientdata(max77686->rtc, max77686);
max77686_irq_init(max77686);
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index e0859987ab6b..c5535f018466 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -148,9 +148,18 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
+ if (!max77693->muic) {
+ dev_err(max77693->dev, "Failed to allocate I2C device for MUIC\n");
+ return -ENODEV;
+ }
i2c_set_clientdata(max77693->muic, max77693);
max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
+ if (!max77693->haptic) {
+ dev_err(max77693->dev, "Failed to allocate I2C device for Haptic\n");
+ ret = -ENODEV;
+ goto err_i2c_haptic;
+ }
i2c_set_clientdata(max77693->haptic, max77693);
/*
@@ -184,8 +193,9 @@ err_mfd:
max77693_irq_exit(max77693);
err_irq:
err_regmap_muic:
- i2c_unregister_device(max77693->muic);
i2c_unregister_device(max77693->haptic);
+err_i2c_haptic:
+ i2c_unregister_device(max77693->muic);
return ret;
}
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 176aa26fc787..a83eed5c15ca 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -181,9 +181,18 @@ static int max8925_probe(struct i2c_client *client,
mutex_init(&chip->io_lock);
chip->rtc = i2c_new_dummy(chip->i2c->adapter, RTC_I2C_ADDR);
+ if (!chip->rtc) {
+ dev_err(chip->dev, "Failed to allocate I2C device for RTC\n");
+ return -ENODEV;
+ }
i2c_set_clientdata(chip->rtc, chip);
chip->adc = i2c_new_dummy(chip->i2c->adapter, ADC_I2C_ADDR);
+ if (!chip->adc) {
+ dev_err(chip->dev, "Failed to allocate I2C device for ADC\n");
+ i2c_unregister_device(chip->rtc);
+ return -ENODEV;
+ }
i2c_set_clientdata(chip->adc, chip);
device_init_wakeup(&client->dev, 1);
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 5adede0fb04c..8cf7a015cfe5 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -208,10 +208,26 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
mutex_init(&max8997->iolock);
max8997->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
+ if (!max8997->rtc) {
+ dev_err(max8997->dev, "Failed to allocate I2C device for RTC\n");
+ return -ENODEV;
+ }
i2c_set_clientdata(max8997->rtc, max8997);
+
max8997->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
+ if (!max8997->haptic) {
+ dev_err(max8997->dev, "Failed to allocate I2C device for Haptic\n");
+ ret = -ENODEV;
+ goto err_i2c_haptic;
+ }
i2c_set_clientdata(max8997->haptic, max8997);
+
max8997->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
+ if (!max8997->muic) {
+ dev_err(max8997->dev, "Failed to allocate I2C device for MUIC\n");
+ ret = -ENODEV;
+ goto err_i2c_muic;
+ }
i2c_set_clientdata(max8997->muic, max8997);
pm_runtime_set_active(max8997->dev);
@@ -239,7 +255,9 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
err_mfd:
mfd_remove_devices(max8997->dev);
i2c_unregister_device(max8997->muic);
+err_i2c_muic:
i2c_unregister_device(max8997->haptic);
+err_i2c_haptic:
i2c_unregister_device(max8997->rtc);
return ret;
}
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 5d5e186b5d8b..592db06098e6 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -215,6 +215,10 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
mutex_init(&max8998->iolock);
max8998->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
+ if (!max8998->rtc) {
+ dev_err(&i2c->dev, "Failed to allocate I2C device for RTC\n");
+ return -ENODEV;
+ }
i2c_set_clientdata(max8998->rtc, max8998);
max8998_irq_init(max8998);
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 38ab67829791..702925e242c9 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -140,6 +140,11 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
mc13xxx->irq = spi->irq;
+ spi->max_speed_hz = spi->max_speed_hz ? : 26000000;
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
mc13xxx->regmap = devm_regmap_init(&spi->dev, &regmap_mc13xxx_bus,
&spi->dev,
&mc13xxx_regmap_spi_config);
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
index 41c31b3ac940..29d76986b40b 100644
--- a/drivers/mfd/mcp-sa11x0.c
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -12,7 +12,6 @@
* MCP read/write timeouts from Jordi Colomer, rehacked by rmk.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/kernel.h>
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 90b630ccc8bc..651e249287dc 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -665,55 +665,78 @@ static int usbhs_omap_probe(struct platform_device *pdev)
goto err_mem;
}
- need_logic_fck = false;
+ /* Set all clocks as invalid to begin with */
+ omap->ehci_logic_fck = ERR_PTR(-ENODEV);
+ omap->init_60m_fclk = ERR_PTR(-ENODEV);
+ omap->utmi_p1_gfclk = ERR_PTR(-ENODEV);
+ omap->utmi_p2_gfclk = ERR_PTR(-ENODEV);
+ omap->xclk60mhsp1_ck = ERR_PTR(-ENODEV);
+ omap->xclk60mhsp2_ck = ERR_PTR(-ENODEV);
+
for (i = 0; i < omap->nports; i++) {
- if (is_ehci_phy_mode(i) || is_ehci_tll_mode(i) ||
- is_ehci_hsic_mode(i))
- need_logic_fck |= true;
+ omap->utmi_clk[i] = ERR_PTR(-ENODEV);
+ omap->hsic480m_clk[i] = ERR_PTR(-ENODEV);
+ omap->hsic60m_clk[i] = ERR_PTR(-ENODEV);
}
- omap->ehci_logic_fck = ERR_PTR(-EINVAL);
- if (need_logic_fck) {
- omap->ehci_logic_fck = clk_get(dev, "ehci_logic_fck");
- if (IS_ERR(omap->ehci_logic_fck)) {
- ret = PTR_ERR(omap->ehci_logic_fck);
- dev_dbg(dev, "ehci_logic_fck failed:%d\n", ret);
+ /* for OMAP3 i.e. USBHS REV1 */
+ if (omap->usbhs_rev == OMAP_USBHS_REV1) {
+ need_logic_fck = false;
+ for (i = 0; i < omap->nports; i++) {
+ if (is_ehci_phy_mode(pdata->port_mode[i]) ||
+ is_ehci_tll_mode(pdata->port_mode[i]) ||
+ is_ehci_hsic_mode(pdata->port_mode[i]))
+
+ need_logic_fck |= true;
}
+
+ if (need_logic_fck) {
+ omap->ehci_logic_fck = devm_clk_get(dev,
+ "usbhost_120m_fck");
+ if (IS_ERR(omap->ehci_logic_fck)) {
+ ret = PTR_ERR(omap->ehci_logic_fck);
+ dev_err(dev, "usbhost_120m_fck failed:%d\n",
+ ret);
+ goto err_mem;
+ }
+ }
+ goto initialize;
}
- omap->utmi_p1_gfclk = clk_get(dev, "utmi_p1_gfclk");
+ /* for OMAP4+ i.e. USBHS REV2+ */
+ omap->utmi_p1_gfclk = devm_clk_get(dev, "utmi_p1_gfclk");
if (IS_ERR(omap->utmi_p1_gfclk)) {
ret = PTR_ERR(omap->utmi_p1_gfclk);
dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
- goto err_p1_gfclk;
+ goto err_mem;
}
- omap->utmi_p2_gfclk = clk_get(dev, "utmi_p2_gfclk");
+ omap->utmi_p2_gfclk = devm_clk_get(dev, "utmi_p2_gfclk");
if (IS_ERR(omap->utmi_p2_gfclk)) {
ret = PTR_ERR(omap->utmi_p2_gfclk);
dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
- goto err_p2_gfclk;
+ goto err_mem;
}
- omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
+ omap->xclk60mhsp1_ck = devm_clk_get(dev, "refclk_60m_ext_p1");
if (IS_ERR(omap->xclk60mhsp1_ck)) {
ret = PTR_ERR(omap->xclk60mhsp1_ck);
- dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
- goto err_xclk60mhsp1;
+ dev_err(dev, "refclk_60m_ext_p1 failed error:%d\n", ret);
+ goto err_mem;
}
- omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
+ omap->xclk60mhsp2_ck = devm_clk_get(dev, "refclk_60m_ext_p2");
if (IS_ERR(omap->xclk60mhsp2_ck)) {
ret = PTR_ERR(omap->xclk60mhsp2_ck);
- dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
- goto err_xclk60mhsp2;
+ dev_err(dev, "refclk_60m_ext_p2 failed error:%d\n", ret);
+ goto err_mem;
}
- omap->init_60m_fclk = clk_get(dev, "init_60m_fclk");
+ omap->init_60m_fclk = devm_clk_get(dev, "refclk_60m_int");
if (IS_ERR(omap->init_60m_fclk)) {
ret = PTR_ERR(omap->init_60m_fclk);
- dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
- goto err_init60m;
+ dev_err(dev, "refclk_60m_int failed error:%d\n", ret);
+ goto err_mem;
}
for (i = 0; i < omap->nports; i++) {
@@ -727,55 +750,72 @@ static int usbhs_omap_probe(struct platform_device *pdev)
* platforms have all clocks and we can function without
* them
*/
- omap->utmi_clk[i] = clk_get(dev, clkname);
- if (IS_ERR(omap->utmi_clk[i]))
- dev_dbg(dev, "Failed to get clock : %s : %ld\n",
- clkname, PTR_ERR(omap->utmi_clk[i]));
+ omap->utmi_clk[i] = devm_clk_get(dev, clkname);
+ if (IS_ERR(omap->utmi_clk[i])) {
+ ret = PTR_ERR(omap->utmi_clk[i]);
+ dev_err(dev, "Failed to get clock : %s : %d\n",
+ clkname, ret);
+ goto err_mem;
+ }
snprintf(clkname, sizeof(clkname),
"usb_host_hs_hsic480m_p%d_clk", i + 1);
- omap->hsic480m_clk[i] = clk_get(dev, clkname);
- if (IS_ERR(omap->hsic480m_clk[i]))
- dev_dbg(dev, "Failed to get clock : %s : %ld\n",
- clkname, PTR_ERR(omap->hsic480m_clk[i]));
+ omap->hsic480m_clk[i] = devm_clk_get(dev, clkname);
+ if (IS_ERR(omap->hsic480m_clk[i])) {
+ ret = PTR_ERR(omap->hsic480m_clk[i]);
+ dev_err(dev, "Failed to get clock : %s : %d\n",
+ clkname, ret);
+ goto err_mem;
+ }
snprintf(clkname, sizeof(clkname),
"usb_host_hs_hsic60m_p%d_clk", i + 1);
- omap->hsic60m_clk[i] = clk_get(dev, clkname);
- if (IS_ERR(omap->hsic60m_clk[i]))
- dev_dbg(dev, "Failed to get clock : %s : %ld\n",
- clkname, PTR_ERR(omap->hsic60m_clk[i]));
+ omap->hsic60m_clk[i] = devm_clk_get(dev, clkname);
+ if (IS_ERR(omap->hsic60m_clk[i])) {
+ ret = PTR_ERR(omap->hsic60m_clk[i]);
+ dev_err(dev, "Failed to get clock : %s : %d\n",
+ clkname, ret);
+ goto err_mem;
+ }
}
if (is_ehci_phy_mode(pdata->port_mode[0])) {
- /* for OMAP3, clk_set_parent fails */
ret = clk_set_parent(omap->utmi_p1_gfclk,
omap->xclk60mhsp1_ck);
- if (ret != 0)
- dev_dbg(dev, "xclk60mhsp1_ck set parent failed: %d\n",
- ret);
+ if (ret != 0) {
+ dev_err(dev, "xclk60mhsp1_ck set parent failed: %d\n",
+ ret);
+ goto err_mem;
+ }
} else if (is_ehci_tll_mode(pdata->port_mode[0])) {
ret = clk_set_parent(omap->utmi_p1_gfclk,
omap->init_60m_fclk);
- if (ret != 0)
- dev_dbg(dev, "P0 init_60m_fclk set parent failed: %d\n",
- ret);
+ if (ret != 0) {
+ dev_err(dev, "P0 init_60m_fclk set parent failed: %d\n",
+ ret);
+ goto err_mem;
+ }
}
if (is_ehci_phy_mode(pdata->port_mode[1])) {
ret = clk_set_parent(omap->utmi_p2_gfclk,
omap->xclk60mhsp2_ck);
- if (ret != 0)
- dev_dbg(dev, "xclk60mhsp2_ck set parent failed: %d\n",
- ret);
+ if (ret != 0) {
+ dev_err(dev, "xclk60mhsp2_ck set parent failed: %d\n",
+ ret);
+ goto err_mem;
+ }
} else if (is_ehci_tll_mode(pdata->port_mode[1])) {
ret = clk_set_parent(omap->utmi_p2_gfclk,
omap->init_60m_fclk);
- if (ret != 0)
- dev_dbg(dev, "P1 init_60m_fclk set parent failed: %d\n",
- ret);
+ if (ret != 0) {
+ dev_err(dev, "P1 init_60m_fclk set parent failed: %d\n",
+ ret);
+ goto err_mem;
+ }
}
+initialize:
omap_usbhs_init(dev);
if (dev->of_node) {
@@ -784,7 +824,7 @@ static int usbhs_omap_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "Failed to create DT children: %d\n", ret);
- goto err_alloc;
+ goto err_mem;
}
} else {
@@ -792,40 +832,12 @@ static int usbhs_omap_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "omap_usbhs_alloc_children failed: %d\n",
ret);
- goto err_alloc;
+ goto err_mem;
}
}
return 0;
-err_alloc:
- for (i = 0; i < omap->nports; i++) {
- if (!IS_ERR(omap->utmi_clk[i]))
- clk_put(omap->utmi_clk[i]);
- if (!IS_ERR(omap->hsic60m_clk[i]))
- clk_put(omap->hsic60m_clk[i]);
- if (!IS_ERR(omap->hsic480m_clk[i]))
- clk_put(omap->hsic480m_clk[i]);
- }
-
- clk_put(omap->init_60m_fclk);
-
-err_init60m:
- clk_put(omap->xclk60mhsp2_ck);
-
-err_xclk60mhsp2:
- clk_put(omap->xclk60mhsp1_ck);
-
-err_xclk60mhsp1:
- clk_put(omap->utmi_p2_gfclk);
-
-err_p2_gfclk:
- clk_put(omap->utmi_p1_gfclk);
-
-err_p1_gfclk:
- if (!IS_ERR(omap->ehci_logic_fck))
- clk_put(omap->ehci_logic_fck);
-
err_mem:
pm_runtime_disable(dev);
@@ -847,27 +859,6 @@ static int usbhs_omap_remove_child(struct device *dev, void *data)
*/
static int usbhs_omap_remove(struct platform_device *pdev)
{
- struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < omap->nports; i++) {
- if (!IS_ERR(omap->utmi_clk[i]))
- clk_put(omap->utmi_clk[i]);
- if (!IS_ERR(omap->hsic60m_clk[i]))
- clk_put(omap->hsic60m_clk[i]);
- if (!IS_ERR(omap->hsic480m_clk[i]))
- clk_put(omap->hsic480m_clk[i]);
- }
-
- clk_put(omap->init_60m_fclk);
- clk_put(omap->utmi_p1_gfclk);
- clk_put(omap->utmi_p2_gfclk);
- clk_put(omap->xclk60mhsp2_ck);
- clk_put(omap->xclk60mhsp1_ck);
-
- if (!IS_ERR(omap->ehci_logic_fck))
- clk_put(omap->ehci_logic_fck);
-
pm_runtime_disable(&pdev->dev);
/* remove children */
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 5ee50f779ef6..532eacab6b46 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -252,7 +252,7 @@ static int usbtll_omap_probe(struct platform_device *pdev)
break;
}
- tll->ch_clk = devm_kzalloc(dev, sizeof(struct clk * [tll->nch]),
+ tll->ch_clk = devm_kzalloc(dev, sizeof(struct clk *) * tll->nch,
GFP_KERNEL);
if (!tll->ch_clk) {
ret = -ENOMEM;
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index b8941a556d71..c1984b0d1b65 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/completion.h>
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index 484fe66e6c88..b97a97187ae9 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -14,23 +14,316 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/ssbi.h>
+#include <linux/regmap.h>
+#include <linux/of_platform.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/pm8xxx/pm8921.h>
#include <linux/mfd/pm8xxx/core.h>
+#define SSBI_REG_ADDR_IRQ_BASE 0x1BB
+
+#define SSBI_REG_ADDR_IRQ_ROOT (SSBI_REG_ADDR_IRQ_BASE + 0)
+#define SSBI_REG_ADDR_IRQ_M_STATUS1 (SSBI_REG_ADDR_IRQ_BASE + 1)
+#define SSBI_REG_ADDR_IRQ_M_STATUS2 (SSBI_REG_ADDR_IRQ_BASE + 2)
+#define SSBI_REG_ADDR_IRQ_M_STATUS3 (SSBI_REG_ADDR_IRQ_BASE + 3)
+#define SSBI_REG_ADDR_IRQ_M_STATUS4 (SSBI_REG_ADDR_IRQ_BASE + 4)
+#define SSBI_REG_ADDR_IRQ_BLK_SEL (SSBI_REG_ADDR_IRQ_BASE + 5)
+#define SSBI_REG_ADDR_IRQ_IT_STATUS (SSBI_REG_ADDR_IRQ_BASE + 6)
+#define SSBI_REG_ADDR_IRQ_CONFIG (SSBI_REG_ADDR_IRQ_BASE + 7)
+#define SSBI_REG_ADDR_IRQ_RT_STATUS (SSBI_REG_ADDR_IRQ_BASE + 8)
+
+#define PM_IRQF_LVL_SEL 0x01 /* level select */
+#define PM_IRQF_MASK_FE 0x02 /* mask falling edge */
+#define PM_IRQF_MASK_RE 0x04 /* mask rising edge */
+#define PM_IRQF_CLR 0x08 /* clear interrupt */
+#define PM_IRQF_BITS_MASK 0x70
+#define PM_IRQF_BITS_SHIFT 4
+#define PM_IRQF_WRITE 0x80
+
+#define PM_IRQF_MASK_ALL (PM_IRQF_MASK_FE | \
+ PM_IRQF_MASK_RE)
+
#define REG_HWREV 0x002 /* PMIC4 revision */
#define REG_HWREV_2 0x0E8 /* PMIC4 revision 2 */
+#define PM8921_NR_IRQS 256
+
+struct pm_irq_chip {
+ struct device *dev;
+ struct regmap *regmap;
+ spinlock_t pm_irq_lock;
+ struct irq_domain *irqdomain;
+ unsigned int num_irqs;
+ unsigned int num_blocks;
+ unsigned int num_masters;
+ u8 config[0];
+};
+
struct pm8921 {
struct device *dev;
struct pm_irq_chip *irq_chip;
};
+static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, unsigned int bp,
+ unsigned int *ip)
+{
+ int rc;
+
+ spin_lock(&chip->pm_irq_lock);
+ rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, bp);
+ if (rc) {
+ pr_err("Failed Selecting Block %d rc=%d\n", bp, rc);
+ goto bail;
+ }
+
+ rc = regmap_read(chip->regmap, SSBI_REG_ADDR_IRQ_IT_STATUS, ip);
+ if (rc)
+ pr_err("Failed Reading Status rc=%d\n", rc);
+bail:
+ spin_unlock(&chip->pm_irq_lock);
+ return rc;
+}
+
+static int
+pm8xxx_config_irq(struct pm_irq_chip *chip, unsigned int bp, unsigned int cp)
+{
+ int rc;
+
+ spin_lock(&chip->pm_irq_lock);
+ rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, bp);
+ if (rc) {
+ pr_err("Failed Selecting Block %d rc=%d\n", bp, rc);
+ goto bail;
+ }
+
+ cp |= PM_IRQF_WRITE;
+ rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_CONFIG, cp);
+ if (rc)
+ pr_err("Failed Configuring IRQ rc=%d\n", rc);
+bail:
+ spin_unlock(&chip->pm_irq_lock);
+ return rc;
+}
+
+static int pm8xxx_irq_block_handler(struct pm_irq_chip *chip, int block)
+{
+ int pmirq, irq, i, ret = 0;
+ unsigned int bits;
+
+ ret = pm8xxx_read_block_irq(chip, block, &bits);
+ if (ret) {
+ pr_err("Failed reading %d block ret=%d", block, ret);
+ return ret;
+ }
+ if (!bits) {
+ pr_err("block bit set in master but no irqs: %d", block);
+ return 0;
+ }
+
+ /* Check IRQ bits */
+ for (i = 0; i < 8; i++) {
+ if (bits & (1 << i)) {
+ pmirq = block * 8 + i;
+ irq = irq_find_mapping(chip->irqdomain, pmirq);
+ generic_handle_irq(irq);
+ }
+ }
+ return 0;
+}
+
+static int pm8xxx_irq_master_handler(struct pm_irq_chip *chip, int master)
+{
+ unsigned int blockbits;
+ int block_number, i, ret = 0;
+
+ ret = regmap_read(chip->regmap, SSBI_REG_ADDR_IRQ_M_STATUS1 + master,
+ &blockbits);
+ if (ret) {
+ pr_err("Failed to read master %d ret=%d\n", master, ret);
+ return ret;
+ }
+ if (!blockbits) {
+ pr_err("master bit set in root but no blocks: %d", master);
+ return 0;
+ }
+
+ for (i = 0; i < 8; i++)
+ if (blockbits & (1 << i)) {
+ block_number = master * 8 + i; /* block # */
+ ret |= pm8xxx_irq_block_handler(chip, block_number);
+ }
+ return ret;
+}
+
+static void pm8xxx_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
+ struct irq_chip *irq_chip = irq_desc_get_chip(desc);
+ unsigned int root;
+ int i, ret, masters = 0;
+
+ chained_irq_enter(irq_chip, desc);
+
+ ret = regmap_read(chip->regmap, SSBI_REG_ADDR_IRQ_ROOT, &root);
+ if (ret) {
+ pr_err("Can't read root status ret=%d\n", ret);
+ return;
+ }
+
+ /* on pm8xxx series masters start from bit 1 of the root */
+ masters = root >> 1;
+
+ /* Read allowed masters for blocks. */
+ for (i = 0; i < chip->num_masters; i++)
+ if (masters & (1 << i))
+ pm8xxx_irq_master_handler(chip, i);
+
+ chained_irq_exit(irq_chip, desc);
+}
+
+static void pm8xxx_irq_mask_ack(struct irq_data *d)
+{
+ struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int pmirq = irqd_to_hwirq(d);
+ int irq_bit;
+ u8 block, config;
+
+ block = pmirq / 8;
+ irq_bit = pmirq % 8;
+
+ config = chip->config[pmirq] | PM_IRQF_MASK_ALL | PM_IRQF_CLR;
+ pm8xxx_config_irq(chip, block, config);
+}
+
+static void pm8xxx_irq_unmask(struct irq_data *d)
+{
+ struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int pmirq = irqd_to_hwirq(d);
+ int irq_bit;
+ u8 block, config;
+
+ block = pmirq / 8;
+ irq_bit = pmirq % 8;
+
+ config = chip->config[pmirq];
+ pm8xxx_config_irq(chip, block, config);
+}
+
+static int pm8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int pmirq = irqd_to_hwirq(d);
+ int irq_bit;
+ u8 block, config;
+
+ block = pmirq / 8;
+ irq_bit = pmirq % 8;
+
+ chip->config[pmirq] = (irq_bit << PM_IRQF_BITS_SHIFT)
+ | PM_IRQF_MASK_ALL;
+ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ if (flow_type & IRQF_TRIGGER_RISING)
+ chip->config[pmirq] &= ~PM_IRQF_MASK_RE;
+ if (flow_type & IRQF_TRIGGER_FALLING)
+ chip->config[pmirq] &= ~PM_IRQF_MASK_FE;
+ } else {
+ chip->config[pmirq] |= PM_IRQF_LVL_SEL;
+
+ if (flow_type & IRQF_TRIGGER_HIGH)
+ chip->config[pmirq] &= ~PM_IRQF_MASK_RE;
+ else
+ chip->config[pmirq] &= ~PM_IRQF_MASK_FE;
+ }
+
+ config = chip->config[pmirq] | PM_IRQF_CLR;
+ return pm8xxx_config_irq(chip, block, config);
+}
+
+static struct irq_chip pm8xxx_irq_chip = {
+ .name = "pm8xxx",
+ .irq_mask_ack = pm8xxx_irq_mask_ack,
+ .irq_unmask = pm8xxx_irq_unmask,
+ .irq_set_type = pm8xxx_irq_set_type,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
+};
+
+/**
+ * pm8xxx_get_irq_stat - get the status of the irq line
+ * @chip: pointer to identify a pmic irq controller
+ * @irq: the irq number
+ *
+ * The pm8xxx gpio and mpp rely on the interrupt block to read
+ * the values on their pins. This function is to facilitate reading
+ * the status of a gpio or an mpp line. The caller has to convert the
+ * gpio number to irq number.
+ *
+ * RETURNS:
+ * an int indicating the value read on that line
+ */
+static int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq)
+{
+ int pmirq, rc;
+ unsigned int block, bits, bit;
+ unsigned long flags;
+ struct irq_data *irq_data = irq_get_irq_data(irq);
+
+ pmirq = irq_data->hwirq;
+
+ block = pmirq / 8;
+ bit = pmirq % 8;
+
+ spin_lock_irqsave(&chip->pm_irq_lock, flags);
+
+ rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, block);
+ if (rc) {
+ pr_err("Failed Selecting block irq=%d pmirq=%d blk=%d rc=%d\n",
+ irq, pmirq, block, rc);
+ goto bail_out;
+ }
+
+ rc = regmap_read(chip->regmap, SSBI_REG_ADDR_IRQ_RT_STATUS, &bits);
+ if (rc) {
+ pr_err("Failed Configuring irq=%d pmirq=%d blk=%d rc=%d\n",
+ irq, pmirq, block, rc);
+ goto bail_out;
+ }
+
+ rc = (bits & (1 << bit)) ? 1 : 0;
+
+bail_out:
+ spin_unlock_irqrestore(&chip->pm_irq_lock, flags);
+
+ return rc;
+}
+
+static int pm8xxx_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct pm_irq_chip *chip = d->host_data;
+
+ irq_set_chip_and_handler(irq, &pm8xxx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, chip);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+ return 0;
+}
+
+static const struct irq_domain_ops pm8xxx_irq_domain_ops = {
+ .xlate = irq_domain_xlate_twocell,
+ .map = pm8xxx_irq_domain_map,
+};
+
static int pm8921_readb(const struct device *dev, u16 addr, u8 *val)
{
const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
@@ -81,42 +374,35 @@ static struct pm8xxx_drvdata pm8921_drvdata = {
.pmic_read_irq_stat = pm8921_read_irq_stat,
};
-static int pm8921_add_subdevices(const struct pm8921_platform_data
- *pdata,
- struct pm8921 *pmic,
- u32 rev)
-{
- int ret = 0, irq_base = 0;
- struct pm_irq_chip *irq_chip;
-
- if (pdata->irq_pdata) {
- pdata->irq_pdata->irq_cdata.nirqs = PM8921_NR_IRQS;
- pdata->irq_pdata->irq_cdata.rev = rev;
- irq_base = pdata->irq_pdata->irq_base;
- irq_chip = pm8xxx_irq_init(pmic->dev, pdata->irq_pdata);
+static const struct regmap_config ssbi_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0x3ff,
+ .fast_io = true,
+ .reg_read = ssbi_reg_read,
+ .reg_write = ssbi_reg_write
+};
- if (IS_ERR(irq_chip)) {
- pr_err("Failed to init interrupts ret=%ld\n",
- PTR_ERR(irq_chip));
- return PTR_ERR(irq_chip);
- }
- pmic->irq_chip = irq_chip;
- }
- return ret;
-}
+static const struct of_device_id pm8921_id_table[] = {
+ { .compatible = "qcom,pm8058", },
+ { .compatible = "qcom,pm8921", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pm8921_id_table);
static int pm8921_probe(struct platform_device *pdev)
{
- const struct pm8921_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct pm8921 *pmic;
- int rc;
- u8 val;
+ struct regmap *regmap;
+ int irq, rc;
+ unsigned int val;
u32 rev;
+ struct pm_irq_chip *chip;
+ unsigned int nirqs = PM8921_NR_IRQS;
- if (!pdata) {
- pr_err("missing platform data\n");
- return -EINVAL;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
pmic = devm_kzalloc(&pdev->dev, sizeof(struct pm8921), GFP_KERNEL);
if (!pmic) {
@@ -124,8 +410,13 @@ static int pm8921_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ regmap = devm_regmap_init(&pdev->dev, NULL, pdev->dev.parent,
+ &ssbi_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
/* Read PMIC chip revision */
- rc = ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val));
+ rc = regmap_read(regmap, REG_HWREV, &val);
if (rc) {
pr_err("Failed to read hw rev reg %d:rc=%d\n", REG_HWREV, rc);
return rc;
@@ -134,7 +425,7 @@ static int pm8921_probe(struct platform_device *pdev)
rev = val;
/* Read PMIC chip revision 2 */
- rc = ssbi_read(pdev->dev.parent, REG_HWREV_2, &val, sizeof(val));
+ rc = regmap_read(regmap, REG_HWREV_2, &val);
if (rc) {
pr_err("Failed to read hw rev 2 reg %d:rc=%d\n",
REG_HWREV_2, rc);
@@ -147,37 +438,56 @@ static int pm8921_probe(struct platform_device *pdev)
pm8921_drvdata.pm_chip_data = pmic;
platform_set_drvdata(pdev, &pm8921_drvdata);
- rc = pm8921_add_subdevices(pdata, pmic, rev);
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip) +
+ sizeof(chip->config[0]) * nirqs,
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ pmic->irq_chip = chip;
+ chip->dev = &pdev->dev;
+ chip->regmap = regmap;
+ chip->num_irqs = nirqs;
+ chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8);
+ chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8);
+ spin_lock_init(&chip->pm_irq_lock);
+
+ chip->irqdomain = irq_domain_add_linear(pdev->dev.of_node, nirqs,
+ &pm8xxx_irq_domain_ops,
+ chip);
+ if (!chip->irqdomain)
+ return -ENODEV;
+
+ irq_set_handler_data(irq, chip);
+ irq_set_chained_handler(irq, pm8xxx_irq_handler);
+ irq_set_irq_wake(irq, 1);
+
+ rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (rc) {
- pr_err("Cannot add subdevices rc=%d\n", rc);
- goto err;
+ irq_set_chained_handler(irq, NULL);
+ irq_set_handler_data(irq, NULL);
+ irq_domain_remove(chip->irqdomain);
}
- /* gpio might not work if no irq device is found */
- WARN_ON(pmic->irq_chip == NULL);
+ return rc;
+}
+static int pm8921_remove_child(struct device *dev, void *unused)
+{
+ platform_device_unregister(to_platform_device(dev));
return 0;
-
-err:
- mfd_remove_devices(pmic->dev);
- return rc;
}
static int pm8921_remove(struct platform_device *pdev)
{
- struct pm8xxx_drvdata *drvdata;
- struct pm8921 *pmic = NULL;
-
- drvdata = platform_get_drvdata(pdev);
- if (drvdata)
- pmic = drvdata->pm_chip_data;
- if (pmic) {
- mfd_remove_devices(pmic->dev);
- if (pmic->irq_chip) {
- pm8xxx_irq_exit(pmic->irq_chip);
- pmic->irq_chip = NULL;
- }
- }
+ int irq = platform_get_irq(pdev, 0);
+ struct pm8921 *pmic = pm8921_drvdata.pm_chip_data;
+ struct pm_irq_chip *chip = pmic->irq_chip;
+
+ device_for_each_child(&pdev->dev, NULL, pm8921_remove_child);
+ irq_set_chained_handler(irq, NULL);
+ irq_set_handler_data(irq, NULL);
+ irq_domain_remove(chip->irqdomain);
return 0;
}
@@ -188,6 +498,7 @@ static struct platform_driver pm8921_driver = {
.driver = {
.name = "pm8921-core",
.owner = THIS_MODULE,
+ .of_match_table = pm8921_id_table,
},
};
diff --git a/drivers/mfd/pm8xxx-irq.c b/drivers/mfd/pm8xxx-irq.c
deleted file mode 100644
index 1360e20adf11..000000000000
--- a/drivers/mfd/pm8xxx-irq.c
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/mfd/pm8xxx/core.h>
-#include <linux/mfd/pm8xxx/irq.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-/* PMIC8xxx IRQ */
-
-#define SSBI_REG_ADDR_IRQ_BASE 0x1BB
-
-#define SSBI_REG_ADDR_IRQ_ROOT (SSBI_REG_ADDR_IRQ_BASE + 0)
-#define SSBI_REG_ADDR_IRQ_M_STATUS1 (SSBI_REG_ADDR_IRQ_BASE + 1)
-#define SSBI_REG_ADDR_IRQ_M_STATUS2 (SSBI_REG_ADDR_IRQ_BASE + 2)
-#define SSBI_REG_ADDR_IRQ_M_STATUS3 (SSBI_REG_ADDR_IRQ_BASE + 3)
-#define SSBI_REG_ADDR_IRQ_M_STATUS4 (SSBI_REG_ADDR_IRQ_BASE + 4)
-#define SSBI_REG_ADDR_IRQ_BLK_SEL (SSBI_REG_ADDR_IRQ_BASE + 5)
-#define SSBI_REG_ADDR_IRQ_IT_STATUS (SSBI_REG_ADDR_IRQ_BASE + 6)
-#define SSBI_REG_ADDR_IRQ_CONFIG (SSBI_REG_ADDR_IRQ_BASE + 7)
-#define SSBI_REG_ADDR_IRQ_RT_STATUS (SSBI_REG_ADDR_IRQ_BASE + 8)
-
-#define PM_IRQF_LVL_SEL 0x01 /* level select */
-#define PM_IRQF_MASK_FE 0x02 /* mask falling edge */
-#define PM_IRQF_MASK_RE 0x04 /* mask rising edge */
-#define PM_IRQF_CLR 0x08 /* clear interrupt */
-#define PM_IRQF_BITS_MASK 0x70
-#define PM_IRQF_BITS_SHIFT 4
-#define PM_IRQF_WRITE 0x80
-
-#define PM_IRQF_MASK_ALL (PM_IRQF_MASK_FE | \
- PM_IRQF_MASK_RE)
-
-struct pm_irq_chip {
- struct device *dev;
- spinlock_t pm_irq_lock;
- unsigned int devirq;
- unsigned int irq_base;
- unsigned int num_irqs;
- unsigned int num_blocks;
- unsigned int num_masters;
- u8 config[0];
-};
-
-static int pm8xxx_read_root_irq(const struct pm_irq_chip *chip, u8 *rp)
-{
- return pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_ROOT, rp);
-}
-
-static int pm8xxx_read_master_irq(const struct pm_irq_chip *chip, u8 m, u8 *bp)
-{
- return pm8xxx_readb(chip->dev,
- SSBI_REG_ADDR_IRQ_M_STATUS1 + m, bp);
-}
-
-static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, u8 bp, u8 *ip)
-{
- int rc;
-
- spin_lock(&chip->pm_irq_lock);
- rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, bp);
- if (rc) {
- pr_err("Failed Selecting Block %d rc=%d\n", bp, rc);
- goto bail;
- }
-
- rc = pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_IT_STATUS, ip);
- if (rc)
- pr_err("Failed Reading Status rc=%d\n", rc);
-bail:
- spin_unlock(&chip->pm_irq_lock);
- return rc;
-}
-
-static int pm8xxx_config_irq(struct pm_irq_chip *chip, u8 bp, u8 cp)
-{
- int rc;
-
- spin_lock(&chip->pm_irq_lock);
- rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, bp);
- if (rc) {
- pr_err("Failed Selecting Block %d rc=%d\n", bp, rc);
- goto bail;
- }
-
- cp |= PM_IRQF_WRITE;
- rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_CONFIG, cp);
- if (rc)
- pr_err("Failed Configuring IRQ rc=%d\n", rc);
-bail:
- spin_unlock(&chip->pm_irq_lock);
- return rc;
-}
-
-static int pm8xxx_irq_block_handler(struct pm_irq_chip *chip, int block)
-{
- int pmirq, irq, i, ret = 0;
- u8 bits;
-
- ret = pm8xxx_read_block_irq(chip, block, &bits);
- if (ret) {
- pr_err("Failed reading %d block ret=%d", block, ret);
- return ret;
- }
- if (!bits) {
- pr_err("block bit set in master but no irqs: %d", block);
- return 0;
- }
-
- /* Check IRQ bits */
- for (i = 0; i < 8; i++) {
- if (bits & (1 << i)) {
- pmirq = block * 8 + i;
- irq = pmirq + chip->irq_base;
- generic_handle_irq(irq);
- }
- }
- return 0;
-}
-
-static int pm8xxx_irq_master_handler(struct pm_irq_chip *chip, int master)
-{
- u8 blockbits;
- int block_number, i, ret = 0;
-
- ret = pm8xxx_read_master_irq(chip, master, &blockbits);
- if (ret) {
- pr_err("Failed to read master %d ret=%d\n", master, ret);
- return ret;
- }
- if (!blockbits) {
- pr_err("master bit set in root but no blocks: %d", master);
- return 0;
- }
-
- for (i = 0; i < 8; i++)
- if (blockbits & (1 << i)) {
- block_number = master * 8 + i; /* block # */
- ret |= pm8xxx_irq_block_handler(chip, block_number);
- }
- return ret;
-}
-
-static void pm8xxx_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
- struct irq_chip *irq_chip = irq_desc_get_chip(desc);
- u8 root;
- int i, ret, masters = 0;
-
- ret = pm8xxx_read_root_irq(chip, &root);
- if (ret) {
- pr_err("Can't read root status ret=%d\n", ret);
- return;
- }
-
- /* on pm8xxx series masters start from bit 1 of the root */
- masters = root >> 1;
-
- /* Read allowed masters for blocks. */
- for (i = 0; i < chip->num_masters; i++)
- if (masters & (1 << i))
- pm8xxx_irq_master_handler(chip, i);
-
- irq_chip->irq_ack(&desc->irq_data);
-}
-
-static void pm8xxx_irq_mask_ack(struct irq_data *d)
-{
- struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
- unsigned int pmirq = d->irq - chip->irq_base;
- int master, irq_bit;
- u8 block, config;
-
- block = pmirq / 8;
- master = block / 8;
- irq_bit = pmirq % 8;
-
- config = chip->config[pmirq] | PM_IRQF_MASK_ALL | PM_IRQF_CLR;
- pm8xxx_config_irq(chip, block, config);
-}
-
-static void pm8xxx_irq_unmask(struct irq_data *d)
-{
- struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
- unsigned int pmirq = d->irq - chip->irq_base;
- int master, irq_bit;
- u8 block, config;
-
- block = pmirq / 8;
- master = block / 8;
- irq_bit = pmirq % 8;
-
- config = chip->config[pmirq];
- pm8xxx_config_irq(chip, block, config);
-}
-
-static int pm8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
-{
- struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
- unsigned int pmirq = d->irq - chip->irq_base;
- int master, irq_bit;
- u8 block, config;
-
- block = pmirq / 8;
- master = block / 8;
- irq_bit = pmirq % 8;
-
- chip->config[pmirq] = (irq_bit << PM_IRQF_BITS_SHIFT)
- | PM_IRQF_MASK_ALL;
- if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- if (flow_type & IRQF_TRIGGER_RISING)
- chip->config[pmirq] &= ~PM_IRQF_MASK_RE;
- if (flow_type & IRQF_TRIGGER_FALLING)
- chip->config[pmirq] &= ~PM_IRQF_MASK_FE;
- } else {
- chip->config[pmirq] |= PM_IRQF_LVL_SEL;
-
- if (flow_type & IRQF_TRIGGER_HIGH)
- chip->config[pmirq] &= ~PM_IRQF_MASK_RE;
- else
- chip->config[pmirq] &= ~PM_IRQF_MASK_FE;
- }
-
- config = chip->config[pmirq] | PM_IRQF_CLR;
- return pm8xxx_config_irq(chip, block, config);
-}
-
-static int pm8xxx_irq_set_wake(struct irq_data *d, unsigned int on)
-{
- return 0;
-}
-
-static struct irq_chip pm8xxx_irq_chip = {
- .name = "pm8xxx",
- .irq_mask_ack = pm8xxx_irq_mask_ack,
- .irq_unmask = pm8xxx_irq_unmask,
- .irq_set_type = pm8xxx_irq_set_type,
- .irq_set_wake = pm8xxx_irq_set_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
-};
-
-/**
- * pm8xxx_get_irq_stat - get the status of the irq line
- * @chip: pointer to identify a pmic irq controller
- * @irq: the irq number
- *
- * The pm8xxx gpio and mpp rely on the interrupt block to read
- * the values on their pins. This function is to facilitate reading
- * the status of a gpio or an mpp line. The caller has to convert the
- * gpio number to irq number.
- *
- * RETURNS:
- * an int indicating the value read on that line
- */
-int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq)
-{
- int pmirq, rc;
- u8 block, bits, bit;
- unsigned long flags;
-
- if (chip == NULL || irq < chip->irq_base ||
- irq >= chip->irq_base + chip->num_irqs)
- return -EINVAL;
-
- pmirq = irq - chip->irq_base;
-
- block = pmirq / 8;
- bit = pmirq % 8;
-
- spin_lock_irqsave(&chip->pm_irq_lock, flags);
-
- rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, block);
- if (rc) {
- pr_err("Failed Selecting block irq=%d pmirq=%d blk=%d rc=%d\n",
- irq, pmirq, block, rc);
- goto bail_out;
- }
-
- rc = pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_RT_STATUS, &bits);
- if (rc) {
- pr_err("Failed Configuring irq=%d pmirq=%d blk=%d rc=%d\n",
- irq, pmirq, block, rc);
- goto bail_out;
- }
-
- rc = (bits & (1 << bit)) ? 1 : 0;
-
-bail_out:
- spin_unlock_irqrestore(&chip->pm_irq_lock, flags);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(pm8xxx_get_irq_stat);
-
-struct pm_irq_chip * pm8xxx_irq_init(struct device *dev,
- const struct pm8xxx_irq_platform_data *pdata)
-{
- struct pm_irq_chip *chip;
- int devirq, rc;
- unsigned int pmirq;
-
- if (!pdata) {
- pr_err("No platform data\n");
- return ERR_PTR(-EINVAL);
- }
-
- devirq = pdata->devirq;
- if (devirq < 0) {
- pr_err("missing devirq\n");
- rc = devirq;
- return ERR_PTR(-EINVAL);
- }
-
- chip = kzalloc(sizeof(struct pm_irq_chip)
- + sizeof(u8) * pdata->irq_cdata.nirqs, GFP_KERNEL);
- if (!chip) {
- pr_err("Cannot alloc pm_irq_chip struct\n");
- return ERR_PTR(-EINVAL);
- }
-
- chip->dev = dev;
- chip->devirq = devirq;
- chip->irq_base = pdata->irq_base;
- chip->num_irqs = pdata->irq_cdata.nirqs;
- chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8);
- chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8);
- spin_lock_init(&chip->pm_irq_lock);
-
- for (pmirq = 0; pmirq < chip->num_irqs; pmirq++) {
- irq_set_chip_and_handler(chip->irq_base + pmirq,
- &pm8xxx_irq_chip,
- handle_level_irq);
- irq_set_chip_data(chip->irq_base + pmirq, chip);
-#ifdef CONFIG_ARM
- set_irq_flags(chip->irq_base + pmirq, IRQF_VALID);
-#else
- irq_set_noprobe(chip->irq_base + pmirq);
-#endif
- }
-
- irq_set_irq_type(devirq, pdata->irq_trigger_flag);
- irq_set_handler_data(devirq, chip);
- irq_set_chained_handler(devirq, pm8xxx_irq_handler);
- set_irq_wake(devirq, 1);
-
- return chip;
-}
-
-int pm8xxx_irq_exit(struct pm_irq_chip *chip)
-{
- irq_set_chained_handler(chip->devirq, NULL);
- kfree(chip);
- return 0;
-}
diff --git a/drivers/mfd/rc5t583-irq.c b/drivers/mfd/rc5t583-irq.c
index b41db5968706..bb8502020274 100644
--- a/drivers/mfd/rc5t583-irq.c
+++ b/drivers/mfd/rc5t583-irq.c
@@ -22,7 +22,6 @@
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/mfd/rc5t583.h>
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index d346146249a2..c79569750be9 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -19,7 +19,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index c8f345f7e9a2..663f8a37aa6b 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -19,7 +19,6 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/irq.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/module.h>
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 1d15735f9ef9..c9de3d598ea5 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -338,58 +338,28 @@ int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
int num_sg, bool read, int timeout)
{
struct completion trans_done;
- u8 dir;
- int err = 0, i, count;
+ int err = 0, count;
long timeleft;
unsigned long flags;
- struct scatterlist *sg;
- enum dma_data_direction dma_dir;
- u32 val;
- dma_addr_t addr;
- unsigned int len;
-
- dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg);
-
- /* don't transfer data during abort processing */
- if (pcr->remove_pci)
- return -EINVAL;
-
- if ((sglist == NULL) || (num_sg <= 0))
- return -EINVAL;
- if (read) {
- dir = DEVICE_TO_HOST;
- dma_dir = DMA_FROM_DEVICE;
- } else {
- dir = HOST_TO_DEVICE;
- dma_dir = DMA_TO_DEVICE;
- }
-
- count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
+ count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
if (count < 1) {
dev_err(&(pcr->pci->dev), "scatterlist map failed\n");
return -EINVAL;
}
dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count);
- val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
- pcr->sgi = 0;
- for_each_sg(sglist, sg, count, i) {
- addr = sg_dma_address(sg);
- len = sg_dma_len(sg);
- rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
- }
spin_lock_irqsave(&pcr->lock, flags);
pcr->done = &trans_done;
pcr->trans_result = TRANS_NOT_READY;
init_completion(&trans_done);
- rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
- rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
spin_unlock_irqrestore(&pcr->lock, flags);
+ rtsx_pci_dma_transfer(pcr, sglist, count, read);
+
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, msecs_to_jiffies(timeout));
if (timeleft <= 0) {
@@ -413,7 +383,7 @@ out:
pcr->done = NULL;
spin_unlock_irqrestore(&pcr->lock, flags);
- dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
+ rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
if ((err < 0) && (err != -ENODEV))
rtsx_pci_stop_cmd(pcr);
@@ -425,6 +395,73 @@ out:
}
EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
+int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+ int num_sg, bool read)
+{
+ enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (pcr->remove_pci)
+ return -EINVAL;
+
+ if ((sglist == NULL) || num_sg < 1)
+ return -EINVAL;
+
+ return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
+
+int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+ int num_sg, bool read)
+{
+ enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (pcr->remove_pci)
+ return -EINVAL;
+
+ if (sglist == NULL || num_sg < 1)
+ return -EINVAL;
+
+ dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
+ return num_sg;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
+
+int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+ int sg_count, bool read)
+{
+ struct scatterlist *sg;
+ dma_addr_t addr;
+ unsigned int len;
+ int i;
+ u32 val;
+ u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
+ unsigned long flags;
+
+ if (pcr->remove_pci)
+ return -EINVAL;
+
+ if ((sglist == NULL) || (sg_count < 1))
+ return -EINVAL;
+
+ val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
+ pcr->sgi = 0;
+ for_each_sg(sglist, sg, sg_count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ rtsx_pci_add_sg_tbl(pcr, addr, len, i == sg_count - 1);
+ }
+
+ spin_lock_irqsave(&pcr->lock, flags);
+
+ rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
+
+ spin_unlock_irqrestore(&pcr->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
+
int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
{
int err;
@@ -836,6 +873,8 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
/* Clear interrupt flag */
rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
+ dev_dbg(&pcr->pci->dev, "=========== BIPR 0x%8x ==========\n", int_reg);
+
if ((int_reg & pcr->bier) == 0) {
spin_unlock(&pcr->lock);
return IRQ_NONE;
@@ -866,17 +905,28 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
}
if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
- if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
+ if (int_reg & (TRANS_FAIL_INT | DELINK_INT))
pcr->trans_result = TRANS_RESULT_FAIL;
- if (pcr->done)
- complete(pcr->done);
- } else if (int_reg & TRANS_OK_INT) {
+ else if (int_reg & TRANS_OK_INT)
pcr->trans_result = TRANS_RESULT_OK;
- if (pcr->done)
- complete(pcr->done);
+
+ if (pcr->done)
+ complete(pcr->done);
+
+ if (int_reg & SD_EXIST) {
+ struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD];
+ if (slot && slot->done_transfer)
+ slot->done_transfer(slot->p_dev);
+ }
+
+ if (int_reg & MS_EXIST) {
+ struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD];
+ if (slot && slot->done_transfer)
+ slot->done_transfer(slot->p_dev);
}
}
+
if (pcr->card_inserted || pcr->card_removed)
schedule_delayed_work(&pcr->carddet_work,
msecs_to_jiffies(200));
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
new file mode 100644
index 000000000000..b53b9d46cc45
--- /dev/null
+++ b/drivers/mfd/rtsx_usb.c
@@ -0,0 +1,760 @@
+/* Driver for Realtek USB card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Roger Tseng <rogerable@realtek.com>
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/rtsx_usb.h>
+
+static int polling_pipe = 1;
+module_param(polling_pipe, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(polling_pipe, "polling pipe (0: ctl, 1: bulk)");
+
+static struct mfd_cell rtsx_usb_cells[] = {
+ [RTSX_USB_SD_CARD] = {
+ .name = "rtsx_usb_sdmmc",
+ .pdata_size = 0,
+ },
+ [RTSX_USB_MS_CARD] = {
+ .name = "rtsx_usb_ms",
+ .pdata_size = 0,
+ },
+};
+
+static void rtsx_usb_sg_timed_out(unsigned long data)
+{
+ struct rtsx_ucr *ucr = (struct rtsx_ucr *)data;
+
+ dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
+ usb_sg_cancel(&ucr->current_sg);
+
+ /* we know the cancellation is caused by time-out */
+ ucr->current_sg.status = -ETIMEDOUT;
+}
+
+static int rtsx_usb_bulk_transfer_sglist(struct rtsx_ucr *ucr,
+ unsigned int pipe, struct scatterlist *sg, int num_sg,
+ unsigned int length, unsigned int *act_len, int timeout)
+{
+ int ret;
+
+ dev_dbg(&ucr->pusb_intf->dev, "%s: xfer %u bytes, %d entries\n",
+ __func__, length, num_sg);
+ ret = usb_sg_init(&ucr->current_sg, ucr->pusb_dev, pipe, 0,
+ sg, num_sg, length, GFP_NOIO);
+ if (ret)
+ return ret;
+
+ ucr->sg_timer.expires = jiffies + msecs_to_jiffies(timeout);
+ add_timer(&ucr->sg_timer);
+ usb_sg_wait(&ucr->current_sg);
+ del_timer(&ucr->sg_timer);
+
+ if (act_len)
+ *act_len = ucr->current_sg.bytes;
+
+ return ucr->current_sg.status;
+}
+
+int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe,
+ void *buf, unsigned int len, int num_sg,
+ unsigned int *act_len, int timeout)
+{
+ if (timeout < 600)
+ timeout = 600;
+
+ if (num_sg)
+ return rtsx_usb_bulk_transfer_sglist(ucr, pipe,
+ (struct scatterlist *)buf, num_sg, len, act_len,
+ timeout);
+ else
+ return usb_bulk_msg(ucr->pusb_dev, pipe, buf, len, act_len,
+ timeout);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_transfer_data);
+
+static inline void rtsx_usb_seq_cmd_hdr(struct rtsx_ucr *ucr,
+ u16 addr, u16 len, u8 seq_type)
+{
+ rtsx_usb_cmd_hdr_tag(ucr);
+
+ ucr->cmd_buf[PACKET_TYPE] = seq_type;
+ ucr->cmd_buf[5] = (u8)(len >> 8);
+ ucr->cmd_buf[6] = (u8)len;
+ ucr->cmd_buf[8] = (u8)(addr >> 8);
+ ucr->cmd_buf[9] = (u8)addr;
+
+ if (seq_type == SEQ_WRITE)
+ ucr->cmd_buf[STAGE_FLAG] = 0;
+ else
+ ucr->cmd_buf[STAGE_FLAG] = STAGE_R;
+}
+
+static int rtsx_usb_seq_write_register(struct rtsx_ucr *ucr,
+ u16 addr, u16 len, u8 *data)
+{
+ u16 cmd_len = ALIGN(SEQ_WRITE_DATA_OFFSET + len, 4);
+
+ if (!data)
+ return -EINVAL;
+
+ if (cmd_len > IOBUF_SIZE)
+ return -EINVAL;
+
+ rtsx_usb_seq_cmd_hdr(ucr, addr, len, SEQ_WRITE);
+ memcpy(ucr->cmd_buf + SEQ_WRITE_DATA_OFFSET, data, len);
+
+ return rtsx_usb_transfer_data(ucr,
+ usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT),
+ ucr->cmd_buf, cmd_len, 0, NULL, 100);
+}
+
+static int rtsx_usb_seq_read_register(struct rtsx_ucr *ucr,
+ u16 addr, u16 len, u8 *data)
+{
+ int i, ret;
+ u16 rsp_len = round_down(len, 4);
+ u16 res_len = len - rsp_len;
+
+ if (!data)
+ return -EINVAL;
+
+ /* 4-byte aligned part */
+ if (rsp_len) {
+ rtsx_usb_seq_cmd_hdr(ucr, addr, len, SEQ_READ);
+ ret = rtsx_usb_transfer_data(ucr,
+ usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT),
+ ucr->cmd_buf, 12, 0, NULL, 100);
+ if (ret)
+ return ret;
+
+ ret = rtsx_usb_transfer_data(ucr,
+ usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN),
+ data, rsp_len, 0, NULL, 100);
+ if (ret)
+ return ret;
+ }
+
+ /* unaligned part */
+ for (i = 0; i < res_len; i++) {
+ ret = rtsx_usb_read_register(ucr, addr + rsp_len + i,
+ data + rsp_len + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len)
+{
+ return rtsx_usb_seq_read_register(ucr, PPBUF_BASE2, (u16)buf_len, buf);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_read_ppbuf);
+
+int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len)
+{
+ return rtsx_usb_seq_write_register(ucr, PPBUF_BASE2, (u16)buf_len, buf);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_write_ppbuf);
+
+int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr,
+ u8 mask, u8 data)
+{
+ u16 value, index;
+
+ addr |= EP0_WRITE_REG_CMD << EP0_OP_SHIFT;
+ value = swab16(addr);
+ index = mask | data << 8;
+
+ return usb_control_msg(ucr->pusb_dev,
+ usb_sndctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, NULL, 0, 100);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
+
+int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
+{
+ u16 value;
+
+ if (!data)
+ return -EINVAL;
+ *data = 0;
+
+ addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
+ value = swab16(addr);
+
+ return usb_control_msg(ucr->pusb_dev,
+ usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, 0, data, 1, 100);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
+
+void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type, u16 reg_addr,
+ u8 mask, u8 data)
+{
+ int i;
+
+ if (ucr->cmd_idx < (IOBUF_SIZE - CMD_OFFSET) / 4) {
+ i = CMD_OFFSET + ucr->cmd_idx * 4;
+
+ ucr->cmd_buf[i++] = ((cmd_type & 0x03) << 6) |
+ (u8)((reg_addr >> 8) & 0x3F);
+ ucr->cmd_buf[i++] = (u8)reg_addr;
+ ucr->cmd_buf[i++] = mask;
+ ucr->cmd_buf[i++] = data;
+
+ ucr->cmd_idx++;
+ }
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_add_cmd);
+
+int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout)
+{
+ int ret;
+
+ ucr->cmd_buf[CNT_H] = (u8)(ucr->cmd_idx >> 8);
+ ucr->cmd_buf[CNT_L] = (u8)(ucr->cmd_idx);
+ ucr->cmd_buf[STAGE_FLAG] = flag;
+
+ ret = rtsx_usb_transfer_data(ucr,
+ usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT),
+ ucr->cmd_buf, ucr->cmd_idx * 4 + CMD_OFFSET,
+ 0, NULL, timeout);
+ if (ret) {
+ rtsx_usb_clear_fsm_err(ucr);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_send_cmd);
+
+int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout)
+{
+ if (rsp_len <= 0)
+ return -EINVAL;
+
+ rsp_len = ALIGN(rsp_len, 4);
+
+ return rtsx_usb_transfer_data(ucr,
+ usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN),
+ ucr->rsp_buf, rsp_len, 0, NULL, timeout);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_get_rsp);
+
+static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
+{
+ int ret;
+
+ rtsx_usb_init_cmd(ucr);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_EXIST, 0x00, 0x00);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, OCPSTAT, 0x00, 0x00);
+ ret = rtsx_usb_send_cmd(ucr, MODE_CR, 100);
+ if (ret)
+ return ret;
+
+ ret = rtsx_usb_get_rsp(ucr, 2, 100);
+ if (ret)
+ return ret;
+
+ *status = ((ucr->rsp_buf[0] >> 2) & 0x0f) |
+ ((ucr->rsp_buf[1] & 0x03) << 4);
+
+ return 0;
+}
+
+int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
+{
+ int ret;
+
+ if (!status)
+ return -EINVAL;
+
+ if (polling_pipe == 0)
+ ret = usb_control_msg(ucr->pusb_dev,
+ usb_rcvctrlpipe(ucr->pusb_dev, 0),
+ RTSX_USB_REQ_POLL,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, 0, status, 2, 100);
+ else
+ ret = rtsx_usb_get_status_with_bulk(ucr, status);
+
+ /* usb_control_msg may return positive when success */
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_get_card_status);
+
+static int rtsx_usb_write_phy_register(struct rtsx_ucr *ucr, u8 addr, u8 val)
+{
+ dev_dbg(&ucr->pusb_intf->dev, "Write 0x%x to phy register 0x%x\n",
+ val, addr);
+
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VSTAIN, 0xFF, val);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VCONTROL, 0xFF, addr & 0x0F);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VCONTROL,
+ 0xFF, (addr >> 4) & 0x0F);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
+
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+
+int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data)
+{
+ rtsx_usb_init_cmd(ucr);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, addr, mask, data);
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_write_register);
+
+int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
+{
+ int ret;
+
+ if (data != NULL)
+ *data = 0;
+
+ rtsx_usb_init_cmd(ucr);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, addr, 0, 0);
+ ret = rtsx_usb_send_cmd(ucr, MODE_CR, 100);
+ if (ret)
+ return ret;
+
+ ret = rtsx_usb_get_rsp(ucr, 1, 100);
+ if (ret)
+ return ret;
+
+ if (data != NULL)
+ *data = ucr->rsp_buf[0];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_read_register);
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+ return (depth > 1) ? (depth - 1) : depth;
+}
+
+static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
+{
+ if (div > CLK_DIV_1) {
+ if (ssc_depth > div - 1)
+ ssc_depth -= (div - 1);
+ else
+ ssc_depth = SSC_DEPTH_2M;
+ }
+
+ return ssc_depth;
+}
+
+int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+ int ret;
+ u8 n, clk_divider, mcu_cnt, div;
+
+ if (!card_clock) {
+ ucr->cur_clk = 0;
+ return 0;
+ }
+
+ if (initial_mode) {
+ /* We use 250k(around) here, in initial stage */
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_0;
+ }
+
+ ret = rtsx_usb_write_register(ucr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, clk_divider);
+ if (ret < 0)
+ return ret;
+
+ card_clock /= 1000000;
+ dev_dbg(&ucr->pusb_intf->dev,
+ "Switch card clock to %dMHz\n", card_clock);
+
+ if (!initial_mode && double_clk)
+ card_clock *= 2;
+ dev_dbg(&ucr->pusb_intf->dev,
+ "Internal SSC clock: %dMHz (cur_clk = %d)\n",
+ card_clock, ucr->cur_clk);
+
+ if (card_clock == ucr->cur_clk)
+ return 0;
+
+ /* Converting clock value into internal settings: n and div */
+ n = card_clock - 2;
+ if ((card_clock <= 2) || (n > MAX_DIV_N))
+ return -EINVAL;
+
+ mcu_cnt = 60/card_clock + 3;
+ if (mcu_cnt > 15)
+ mcu_cnt = 15;
+
+ /* Make sure that the SSC clock div_n is not less than MIN_DIV_N */
+
+ div = CLK_DIV_1;
+ while (n < MIN_DIV_N && div < CLK_DIV_4) {
+ n = (n + 2) * 2 - 2;
+ div++;
+ }
+ dev_dbg(&ucr->pusb_intf->dev, "n = %d, div = %d\n", n, div);
+
+ if (double_clk)
+ ssc_depth = double_ssc_depth(ssc_depth);
+
+ ssc_depth = revise_ssc_depth(ssc_depth, div);
+ dev_dbg(&ucr->pusb_intf->dev, "ssc_depth = %d\n", ssc_depth);
+
+ rtsx_usb_init_cmd(ucr);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, CLK_CHANGE);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV,
+ 0x3F, (div << 4) | mcu_cnt);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL2,
+ SSC_DEPTH_MASK, ssc_depth);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ if (vpclk) {
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ ret = rtsx_usb_send_cmd(ucr, MODE_C, 2000);
+ if (ret < 0)
+ return ret;
+
+ ret = rtsx_usb_write_register(ucr, SSC_CTL1, 0xff,
+ SSC_RSTB | SSC_8X_EN | SSC_SEL_4M);
+ if (ret < 0)
+ return ret;
+
+ /* Wait SSC clock stable */
+ usleep_range(100, 1000);
+
+ ret = rtsx_usb_write_register(ucr, CLK_DIV, CLK_CHANGE, 0);
+ if (ret < 0)
+ return ret;
+
+ ucr->cur_clk = card_clock;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_switch_clock);
+
+int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card)
+{
+ int ret;
+ u16 val;
+ u16 cd_mask[] = {
+ [RTSX_USB_SD_CARD] = (CD_MASK & ~SD_CD),
+ [RTSX_USB_MS_CARD] = (CD_MASK & ~MS_CD)
+ };
+
+ ret = rtsx_usb_get_card_status(ucr, &val);
+ /*
+ * If get status fails, return 0 (ok) for the exclusive check
+ * and let the flow fail at somewhere else.
+ */
+ if (ret)
+ return 0;
+
+ if (val & cd_mask[card])
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_card_exclusive_check);
+
+static int rtsx_usb_reset_chip(struct rtsx_ucr *ucr)
+{
+ int ret;
+ u8 val;
+
+ rtsx_usb_init_cmd(ucr);
+
+ if (CHECK_PKG(ucr, LQFP48)) {
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
+ LDO3318_PWR_MASK, LDO_SUSPEND);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
+ FORCE_LDO_POWERB, FORCE_LDO_POWERB);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1,
+ 0x30, 0x10);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5,
+ 0x03, 0x01);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6,
+ 0x0C, 0x04);
+ }
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SYS_DUMMY0, NYET_MSAK, NYET_EN);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CD_DEGLITCH_WIDTH, 0xFF, 0x08);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ CD_DEGLITCH_EN, XD_CD_DEGLITCH_EN, 0x0);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+ SD30_DRIVE_MASK, DRIVER_TYPE_D);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ CARD_DRIVE_SEL, SD20_DRIVE_MASK, 0x0);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG, 0xE0, 0x0);
+
+ if (ucr->is_rts5179)
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ CARD_PULL_CTL5, 0x03, 0x01);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DMA1_CTL,
+ EXTEND_DMA1_ASYNC_SIGNAL, EXTEND_DMA1_ASYNC_SIGNAL);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_INT_PEND,
+ XD_INT | MS_INT | SD_INT,
+ XD_INT | MS_INT | SD_INT);
+
+ ret = rtsx_usb_send_cmd(ucr, MODE_C, 100);
+ if (ret)
+ return ret;
+
+ /* config non-crystal mode */
+ rtsx_usb_read_register(ucr, CFG_MODE, &val);
+ if ((val & XTAL_FREE) || ((val & CLK_MODE_MASK) == CLK_MODE_NON_XTAL)) {
+ ret = rtsx_usb_write_phy_register(ucr, 0xC2, 0x7C);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtsx_usb_init_chip(struct rtsx_ucr *ucr)
+{
+ int ret;
+ u8 val;
+
+ rtsx_usb_clear_fsm_err(ucr);
+
+ /* power on SSC */
+ ret = rtsx_usb_write_register(ucr,
+ FPDCTL, SSC_POWER_MASK, SSC_POWER_ON);
+ if (ret)
+ return ret;
+
+ usleep_range(100, 1000);
+ ret = rtsx_usb_write_register(ucr, CLK_DIV, CLK_CHANGE, 0x00);
+ if (ret)
+ return ret;
+
+ /* determine IC version */
+ ret = rtsx_usb_read_register(ucr, HW_VERSION, &val);
+ if (ret)
+ return ret;
+
+ ucr->ic_version = val & HW_VER_MASK;
+
+ /* determine package */
+ ret = rtsx_usb_read_register(ucr, CARD_SHARE_MODE, &val);
+ if (ret)
+ return ret;
+
+ if (val & CARD_SHARE_LQFP_SEL) {
+ ucr->package = LQFP48;
+ dev_dbg(&ucr->pusb_intf->dev, "Package: LQFP48\n");
+ } else {
+ ucr->package = QFN24;
+ dev_dbg(&ucr->pusb_intf->dev, "Package: QFN24\n");
+ }
+
+ /* determine IC variations */
+ rtsx_usb_read_register(ucr, CFG_MODE_1, &val);
+ if (val & RTS5179) {
+ ucr->is_rts5179 = true;
+ dev_dbg(&ucr->pusb_intf->dev, "Device is rts5179\n");
+ } else {
+ ucr->is_rts5179 = false;
+ }
+
+ return rtsx_usb_reset_chip(ucr);
+}
+
+static int rtsx_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct rtsx_ucr *ucr;
+ int ret;
+
+ dev_dbg(&intf->dev,
+ ": Realtek USB Card Reader found at bus %03d address %03d\n",
+ usb_dev->bus->busnum, usb_dev->devnum);
+
+ ucr = devm_kzalloc(&intf->dev, sizeof(*ucr), GFP_KERNEL);
+ if (!ucr)
+ return -ENOMEM;
+
+ ucr->pusb_dev = usb_dev;
+
+ ucr->iobuf = usb_alloc_coherent(ucr->pusb_dev, IOBUF_SIZE,
+ GFP_KERNEL, &ucr->iobuf_dma);
+ if (!ucr->iobuf)
+ return -ENOMEM;
+
+ usb_set_intfdata(intf, ucr);
+
+ ucr->vendor_id = id->idVendor;
+ ucr->product_id = id->idProduct;
+ ucr->cmd_buf = ucr->rsp_buf = ucr->iobuf;
+
+ mutex_init(&ucr->dev_mutex);
+
+ ucr->pusb_intf = intf;
+
+ /* initialize */
+ ret = rtsx_usb_init_chip(ucr);
+ if (ret)
+ goto out_init_fail;
+
+ ret = mfd_add_devices(&intf->dev, usb_dev->devnum, rtsx_usb_cells,
+ ARRAY_SIZE(rtsx_usb_cells), NULL, 0, NULL);
+ if (ret)
+ goto out_init_fail;
+
+ /* initialize USB SG transfer timer */
+ init_timer(&ucr->sg_timer);
+ setup_timer(&ucr->sg_timer, rtsx_usb_sg_timed_out, (unsigned long) ucr);
+#ifdef CONFIG_PM
+ intf->needs_remote_wakeup = 1;
+ usb_enable_autosuspend(usb_dev);
+#endif
+
+ return 0;
+
+out_init_fail:
+ usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
+ ucr->iobuf_dma);
+ return ret;
+}
+
+static void rtsx_usb_disconnect(struct usb_interface *intf)
+{
+ struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf);
+
+ dev_dbg(&intf->dev, "%s called\n", __func__);
+
+ mfd_remove_devices(&intf->dev);
+
+ usb_set_intfdata(ucr->pusb_intf, NULL);
+ usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
+ ucr->iobuf_dma);
+}
+
+#ifdef CONFIG_PM
+static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct rtsx_ucr *ucr =
+ (struct rtsx_ucr *)usb_get_intfdata(intf);
+
+ dev_dbg(&intf->dev, "%s called with pm message 0x%04u\n",
+ __func__, message.event);
+
+ mutex_lock(&ucr->dev_mutex);
+ rtsx_usb_turn_off_led(ucr);
+ mutex_unlock(&ucr->dev_mutex);
+ return 0;
+}
+
+static int rtsx_usb_resume(struct usb_interface *intf)
+{
+ return 0;
+}
+
+static int rtsx_usb_reset_resume(struct usb_interface *intf)
+{
+ struct rtsx_ucr *ucr =
+ (struct rtsx_ucr *)usb_get_intfdata(intf);
+
+ rtsx_usb_reset_chip(ucr);
+ return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define rtsx_usb_suspend NULL
+#define rtsx_usb_resume NULL
+#define rtsx_usb_reset_resume NULL
+
+#endif /* CONFIG_PM */
+
+
+static int rtsx_usb_pre_reset(struct usb_interface *intf)
+{
+ struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf);
+
+ mutex_lock(&ucr->dev_mutex);
+ return 0;
+}
+
+static int rtsx_usb_post_reset(struct usb_interface *intf)
+{
+ struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf);
+
+ mutex_unlock(&ucr->dev_mutex);
+ return 0;
+}
+
+static struct usb_device_id rtsx_usb_usb_ids[] = {
+ { USB_DEVICE(0x0BDA, 0x0129) },
+ { USB_DEVICE(0x0BDA, 0x0139) },
+ { USB_DEVICE(0x0BDA, 0x0140) },
+ { }
+};
+
+static struct usb_driver rtsx_usb_driver = {
+ .name = "rtsx_usb",
+ .probe = rtsx_usb_probe,
+ .disconnect = rtsx_usb_disconnect,
+ .suspend = rtsx_usb_suspend,
+ .resume = rtsx_usb_resume,
+ .reset_resume = rtsx_usb_reset_resume,
+ .pre_reset = rtsx_usb_pre_reset,
+ .post_reset = rtsx_usb_post_reset,
+ .id_table = rtsx_usb_usb_ids,
+ .supports_autosuspend = 1,
+ .soft_unbind = 1,
+};
+
+module_usb_driver(rtsx_usb_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Roger Tseng <rogerable@realtek.com>");
+MODULE_DESCRIPTION("Realtek USB Card Reader Driver");
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 714e2135210e..1cf27521fff4 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -26,7 +26,9 @@
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/irq.h>
#include <linux/mfd/samsung/rtc.h>
+#include <linux/mfd/samsung/s2mpa01.h>
#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s5m8763.h>
#include <linux/mfd/samsung/s5m8767.h>
#include <linux/regmap.h>
@@ -58,6 +60,7 @@ static const struct mfd_cell s5m8767_devs[] = {
.name = "s5m-rtc",
}, {
.name = "s5m8767-clk",
+ .of_compatible = "samsung,s5m8767-clk",
}
};
@@ -66,21 +69,58 @@ static const struct mfd_cell s2mps11_devs[] = {
.name = "s2mps11-pmic",
}, {
.name = "s2mps11-clk",
+ .of_compatible = "samsung,s2mps11-clk",
}
};
+static const struct mfd_cell s2mps14_devs[] = {
+ {
+ .name = "s2mps14-pmic",
+ }, {
+ .name = "s2mps14-rtc",
+ }, {
+ .name = "s2mps14-clk",
+ .of_compatible = "samsung,s2mps14-clk",
+ }
+};
+
+static const struct mfd_cell s2mpa01_devs[] = {
+ {
+ .name = "s2mpa01-pmic",
+ },
+};
+
#ifdef CONFIG_OF
static struct of_device_id sec_dt_match[] = {
{ .compatible = "samsung,s5m8767-pmic",
.data = (void *)S5M8767X,
- },
- { .compatible = "samsung,s2mps11-pmic",
+ }, {
+ .compatible = "samsung,s2mps11-pmic",
.data = (void *)S2MPS11X,
+ }, {
+ .compatible = "samsung,s2mps14-pmic",
+ .data = (void *)S2MPS14X,
+ }, {
+ .compatible = "samsung,s2mpa01-pmic",
+ .data = (void *)S2MPA01,
+ }, {
+ /* Sentinel */
},
- {},
};
#endif
+static bool s2mpa01_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S2MPA01_REG_INT1M:
+ case S2MPA01_REG_INT2M:
+ case S2MPA01_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
+}
+
static bool s2mps11_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -111,6 +151,15 @@ static const struct regmap_config sec_regmap_config = {
.val_bits = 8,
};
+static const struct regmap_config s2mpa01_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPA01_REG_LDO_OVCB4,
+ .volatile_reg = s2mpa01_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
static const struct regmap_config s2mps11_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -120,6 +169,15 @@ static const struct regmap_config s2mps11_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
+static const struct regmap_config s2mps14_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS14_REG_LDODSCH3,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
static const struct regmap_config s5m8763_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -138,9 +196,18 @@ static const struct regmap_config s5m8767_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
-static const struct regmap_config sec_rtc_regmap_config = {
+static const struct regmap_config s5m_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = SEC_RTC_REG_MAX,
+};
+
+static const struct regmap_config s2mps14_rtc_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+
+ .max_register = S2MPS_RTC_REG_MAX,
};
#ifdef CONFIG_OF
@@ -180,24 +247,24 @@ static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
}
#endif
-static inline int sec_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long sec_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
#ifdef CONFIG_OF
if (i2c->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(sec_dt_match, i2c->dev.of_node);
- return (int)match->data;
+ return (unsigned long)match->data;
}
#endif
- return (int)id->driver_data;
+ return id->driver_data;
}
static int sec_pmic_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct sec_platform_data *pdata = dev_get_platdata(&i2c->dev);
- const struct regmap_config *regmap;
+ const struct regmap_config *regmap, *regmap_rtc;
struct sec_pmic_dev *sec_pmic;
int ret;
@@ -229,17 +296,41 @@ static int sec_pmic_probe(struct i2c_client *i2c,
}
switch (sec_pmic->device_type) {
+ case S2MPA01:
+ regmap = &s2mpa01_regmap_config;
+ /*
+ * The rtc-s5m driver does not support S2MPA01 and there
+ * is no mfd_cell for S2MPA01 RTC device.
+ * However we must pass something to devm_regmap_init_i2c()
+ * so use S5M-like regmap config even though it wouldn't work.
+ */
+ regmap_rtc = &s5m_rtc_regmap_config;
+ break;
case S2MPS11X:
regmap = &s2mps11_regmap_config;
+ /*
+ * The rtc-s5m driver does not support S2MPS11 and there
+ * is no mfd_cell for S2MPS11 RTC device.
+ * However we must pass something to devm_regmap_init_i2c()
+ * so use S5M-like regmap config even though it wouldn't work.
+ */
+ regmap_rtc = &s5m_rtc_regmap_config;
+ break;
+ case S2MPS14X:
+ regmap = &s2mps14_regmap_config;
+ regmap_rtc = &s2mps14_rtc_regmap_config;
break;
case S5M8763X:
regmap = &s5m8763_regmap_config;
+ regmap_rtc = &s5m_rtc_regmap_config;
break;
case S5M8767X:
regmap = &s5m8767_regmap_config;
+ regmap_rtc = &s5m_rtc_regmap_config;
break;
default:
regmap = &sec_regmap_config;
+ regmap_rtc = &s5m_rtc_regmap_config;
break;
}
@@ -252,15 +343,18 @@ static int sec_pmic_probe(struct i2c_client *i2c,
}
sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
+ if (!sec_pmic->rtc) {
+ dev_err(&i2c->dev, "Failed to allocate I2C for RTC\n");
+ return -ENODEV;
+ }
i2c_set_clientdata(sec_pmic->rtc, sec_pmic);
- sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc,
- &sec_rtc_regmap_config);
+ sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc, regmap_rtc);
if (IS_ERR(sec_pmic->regmap_rtc)) {
ret = PTR_ERR(sec_pmic->regmap_rtc);
dev_err(&i2c->dev, "Failed to allocate RTC register map: %d\n",
ret);
- return ret;
+ goto err_regmap_rtc;
}
if (pdata && pdata->cfg_pmic_irq)
@@ -283,24 +377,33 @@ static int sec_pmic_probe(struct i2c_client *i2c,
ret = mfd_add_devices(sec_pmic->dev, -1, s5m8767_devs,
ARRAY_SIZE(s5m8767_devs), NULL, 0, NULL);
break;
+ case S2MPA01:
+ ret = mfd_add_devices(sec_pmic->dev, -1, s2mpa01_devs,
+ ARRAY_SIZE(s2mpa01_devs), NULL, 0, NULL);
+ break;
case S2MPS11X:
ret = mfd_add_devices(sec_pmic->dev, -1, s2mps11_devs,
ARRAY_SIZE(s2mps11_devs), NULL, 0, NULL);
break;
+ case S2MPS14X:
+ ret = mfd_add_devices(sec_pmic->dev, -1, s2mps14_devs,
+ ARRAY_SIZE(s2mps14_devs), NULL, 0, NULL);
+ break;
default:
/* If this happens the probe function is problem */
BUG();
}
if (ret)
- goto err;
+ goto err_mfd;
device_init_wakeup(sec_pmic->dev, sec_pmic->wakeup);
return ret;
-err:
+err_mfd:
sec_irq_exit(sec_pmic);
+err_regmap_rtc:
i2c_unregister_device(sec_pmic->rtc);
return ret;
}
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 4de494f51d40..64e7913aadc6 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -1,7 +1,7 @@
/*
* sec-irq.c
*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify it
@@ -19,6 +19,7 @@
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/irq.h>
#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s5m8763.h>
#include <linux/mfd/samsung/s5m8767.h>
@@ -59,13 +60,13 @@ static const struct regmap_irq s2mps11_irqs[] = {
.reg_offset = 1,
.mask = S2MPS11_IRQ_RTC60S_MASK,
},
- [S2MPS11_IRQ_RTCA1] = {
+ [S2MPS11_IRQ_RTCA0] = {
.reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA1_MASK,
+ .mask = S2MPS11_IRQ_RTCA0_MASK,
},
- [S2MPS11_IRQ_RTCA2] = {
+ [S2MPS11_IRQ_RTCA1] = {
.reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA2_MASK,
+ .mask = S2MPS11_IRQ_RTCA1_MASK,
},
[S2MPS11_IRQ_SMPL] = {
.reg_offset = 1,
@@ -89,6 +90,76 @@ static const struct regmap_irq s2mps11_irqs[] = {
},
};
+static const struct regmap_irq s2mps14_irqs[] = {
+ [S2MPS14_IRQ_PWRONF] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_PWRONF_MASK,
+ },
+ [S2MPS14_IRQ_PWRONR] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_PWRONR_MASK,
+ },
+ [S2MPS14_IRQ_JIGONBF] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_JIGONBF_MASK,
+ },
+ [S2MPS14_IRQ_JIGONBR] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_JIGONBR_MASK,
+ },
+ [S2MPS14_IRQ_ACOKBF] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_ACOKBF_MASK,
+ },
+ [S2MPS14_IRQ_ACOKBR] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_ACOKBR_MASK,
+ },
+ [S2MPS14_IRQ_PWRON1S] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_PWRON1S_MASK,
+ },
+ [S2MPS14_IRQ_MRB] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_MRB_MASK,
+ },
+ [S2MPS14_IRQ_RTC60S] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTC60S_MASK,
+ },
+ [S2MPS14_IRQ_RTCA1] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTCA1_MASK,
+ },
+ [S2MPS14_IRQ_RTCA0] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTCA0_MASK,
+ },
+ [S2MPS14_IRQ_SMPL] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_SMPL_MASK,
+ },
+ [S2MPS14_IRQ_RTC1S] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTC1S_MASK,
+ },
+ [S2MPS14_IRQ_WTSR] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_WTSR_MASK,
+ },
+ [S2MPS14_IRQ_INT120C] = {
+ .reg_offset = 2,
+ .mask = S2MPS11_IRQ_INT120C_MASK,
+ },
+ [S2MPS14_IRQ_INT140C] = {
+ .reg_offset = 2,
+ .mask = S2MPS11_IRQ_INT140C_MASK,
+ },
+ [S2MPS14_IRQ_TSD] = {
+ .reg_offset = 2,
+ .mask = S2MPS14_IRQ_TSD_MASK,
+ },
+};
static const struct regmap_irq s5m8767_irqs[] = {
[S5M8767_IRQ_PWRR] = {
@@ -246,6 +317,16 @@ static const struct regmap_irq_chip s2mps11_irq_chip = {
.ack_base = S2MPS11_REG_INT1,
};
+static const struct regmap_irq_chip s2mps14_irq_chip = {
+ .name = "s2mps14",
+ .irqs = s2mps14_irqs,
+ .num_irqs = ARRAY_SIZE(s2mps14_irqs),
+ .num_regs = 3,
+ .status_base = S2MPS14_REG_INT1,
+ .mask_base = S2MPS14_REG_INT1M,
+ .ack_base = S2MPS14_REG_INT1,
+};
+
static const struct regmap_irq_chip s5m8767_irq_chip = {
.name = "s5m8767",
.irqs = s5m8767_irqs,
@@ -297,6 +378,12 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
sec_pmic->irq_base, &s2mps11_irq_chip,
&sec_pmic->irq_data);
break;
+ case S2MPS14X:
+ ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ sec_pmic->irq_base, &s2mps14_irq_chip,
+ &sec_pmic->irq_data);
+ break;
default:
dev_err(sec_pmic->dev, "Unknown device type %d\n",
sec_pmic->device_type);
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
index 24ae3d8421c5..90112d4cc905 100644
--- a/drivers/mfd/smsc-ece1099.c
+++ b/drivers/mfd/smsc-ece1099.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 42ccd0544513..4a91f6771fb8 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -706,7 +706,7 @@ static int stmpe1801_reset(struct stmpe *stmpe)
if (!(ret & STMPE1801_MSK_SYS_CTRL_RESET))
return 0;
usleep_range(100, 200);
- };
+ }
return -EIO;
}
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
index 1243d5c6a448..7ceb3df09e25 100644
--- a/drivers/mfd/stw481x.c
+++ b/drivers/mfd/stw481x.c
@@ -167,7 +167,7 @@ static struct mfd_cell stw481x_cells[] = {
},
};
-const struct regmap_config stw481x_regmap_config = {
+static const struct regmap_config stw481x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
@@ -186,6 +186,12 @@ static int stw481x_probe(struct i2c_client *client,
i2c_set_clientdata(client, stw481x);
stw481x->client = client;
stw481x->map = devm_regmap_init_i2c(client, &stw481x_regmap_config);
+ if (IS_ERR(stw481x->map)) {
+ ret = PTR_ERR(stw481x->map);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
ret = stw481x_startup(stw481x);
if (ret) {
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 71841f9181bd..dbea55de4397 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -69,13 +69,6 @@ EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_compatible);
static int syscon_match_pdevname(struct device *dev, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
- const struct platform_device_id *id = platform_get_device_id(pdev);
-
- if (id)
- if (!strcmp(id->name, (const char *)data))
- return 1;
-
return !strcmp(dev_name(dev), (const char *)data);
}
@@ -152,7 +145,7 @@ static int syscon_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, syscon);
- dev_info(dev, "regmap %pR registered\n", res);
+ dev_dbg(dev, "regmap %pR registered\n", res);
return 0;
}
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 2cf636c267d9..bd83accc0f6d 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -13,8 +13,10 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tc3589x.h>
+#include <linux/err.h>
/**
* enum tc3589x_version - indicates the TC3589x version
@@ -160,7 +162,7 @@ static const struct mfd_cell tc3589x_dev_gpio[] = {
.name = "tc3589x-gpio",
.num_resources = ARRAY_SIZE(gpio_resources),
.resources = &gpio_resources[0],
- .of_compatible = "tc3589x-gpio",
+ .of_compatible = "toshiba,tc3589x-gpio",
},
};
@@ -169,7 +171,7 @@ static const struct mfd_cell tc3589x_dev_keypad[] = {
.name = "tc3589x-keypad",
.num_resources = ARRAY_SIZE(keypad_resources),
.resources = &keypad_resources[0],
- .of_compatible = "tc3589x-keypad",
+ .of_compatible = "toshiba,tc3589x-keypad",
},
};
@@ -318,45 +320,74 @@ static int tc3589x_device_init(struct tc3589x *tc3589x)
return ret;
}
-static int tc3589x_of_probe(struct device_node *np,
- struct tc3589x_platform_data *pdata)
+#ifdef CONFIG_OF
+static const struct of_device_id tc3589x_match[] = {
+ /* Legacy compatible string */
+ { .compatible = "tc3589x", .data = (void *) TC3589X_UNKNOWN },
+ { .compatible = "toshiba,tc35890", .data = (void *) TC3589X_TC35890 },
+ { .compatible = "toshiba,tc35892", .data = (void *) TC3589X_TC35892 },
+ { .compatible = "toshiba,tc35893", .data = (void *) TC3589X_TC35893 },
+ { .compatible = "toshiba,tc35894", .data = (void *) TC3589X_TC35894 },
+ { .compatible = "toshiba,tc35895", .data = (void *) TC3589X_TC35895 },
+ { .compatible = "toshiba,tc35896", .data = (void *) TC3589X_TC35896 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, tc3589x_match);
+
+static struct tc3589x_platform_data *
+tc3589x_of_probe(struct device *dev, enum tc3589x_version *version)
{
+ struct device_node *np = dev->of_node;
+ struct tc3589x_platform_data *pdata;
struct device_node *child;
+ const struct of_device_id *of_id;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ of_id = of_match_device(tc3589x_match, dev);
+ if (!of_id)
+ return ERR_PTR(-ENODEV);
+ *version = (enum tc3589x_version) of_id->data;
for_each_child_of_node(np, child) {
- if (!strcmp(child->name, "tc3589x_gpio")) {
+ if (of_device_is_compatible(child, "toshiba,tc3589x-gpio"))
pdata->block |= TC3589x_BLOCK_GPIO;
- }
- if (!strcmp(child->name, "tc3589x_keypad")) {
+ if (of_device_is_compatible(child, "toshiba,tc3589x-keypad"))
pdata->block |= TC3589x_BLOCK_KEYPAD;
- }
}
- return 0;
+ return pdata;
}
+#else
+static inline struct tc3589x_platform_data *
+tc3589x_of_probe(struct device *dev, enum tc3589x_version *version)
+{
+ dev_err(dev, "no device tree support\n");
+ return ERR_PTR(-ENODEV);
+}
+#endif
static int tc3589x_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- struct tc3589x_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct device_node *np = i2c->dev.of_node;
+ struct tc3589x_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct tc3589x *tc3589x;
+ enum tc3589x_version version;
int ret;
if (!pdata) {
- if (np) {
- pdata = devm_kzalloc(&i2c->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- ret = tc3589x_of_probe(np, pdata);
- if (ret)
- return ret;
- }
- else {
+ pdata = tc3589x_of_probe(&i2c->dev, &version);
+ if (IS_ERR(pdata)) {
dev_err(&i2c->dev, "No platform data or DT found\n");
- return -EINVAL;
+ return PTR_ERR(pdata);
}
+ } else {
+ /* When not probing from device tree we have this ID */
+ version = id->driver_data;
}
if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA
@@ -375,7 +406,7 @@ static int tc3589x_probe(struct i2c_client *i2c,
tc3589x->pdata = pdata;
tc3589x->irq_base = pdata->irq_base;
- switch (id->driver_data) {
+ switch (version) {
case TC3589X_TC35893:
case TC3589X_TC35895:
case TC3589X_TC35896:
@@ -471,9 +502,12 @@ static const struct i2c_device_id tc3589x_id[] = {
MODULE_DEVICE_TABLE(i2c, tc3589x_id);
static struct i2c_driver tc3589x_driver = {
- .driver.name = "tc3589x",
- .driver.owner = THIS_MODULE,
- .driver.pm = &tc3589x_dev_pm_ops,
+ .driver = {
+ .name = "tc3589x",
+ .owner = THIS_MODULE,
+ .pm = &tc3589x_dev_pm_ops,
+ .of_match_table = of_match_ptr(tc3589x_match),
+ },
.probe = tc3589x_probe,
.remove = tc3589x_remove,
.id_table = tc3589x_id,
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
deleted file mode 100644
index a5424579679c..000000000000
--- a/drivers/mfd/ti-ssp.c
+++ /dev/null
@@ -1,465 +0,0 @@
-/*
- * Sequencer Serial Port (SSP) driver for Texas Instruments' SoCs
- *
- * Copyright (C) 2010 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/sched.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/ti_ssp.h>
-
-/* Register Offsets */
-#define REG_REV 0x00
-#define REG_IOSEL_1 0x04
-#define REG_IOSEL_2 0x08
-#define REG_PREDIV 0x0c
-#define REG_INTR_ST 0x10
-#define REG_INTR_EN 0x14
-#define REG_TEST_CTRL 0x18
-
-/* Per port registers */
-#define PORT_CFG_2 0x00
-#define PORT_ADDR 0x04
-#define PORT_DATA 0x08
-#define PORT_CFG_1 0x0c
-#define PORT_STATE 0x10
-
-#define SSP_PORT_CONFIG_MASK (SSP_EARLY_DIN | SSP_DELAY_DOUT)
-#define SSP_PORT_CLKRATE_MASK 0x0f
-
-#define SSP_SEQRAM_WR_EN BIT(4)
-#define SSP_SEQRAM_RD_EN BIT(5)
-#define SSP_START BIT(15)
-#define SSP_BUSY BIT(10)
-#define SSP_PORT_ASL BIT(7)
-#define SSP_PORT_CFO1 BIT(6)
-
-#define SSP_PORT_SEQRAM_SIZE 32
-
-static const int ssp_port_base[] = {0x040, 0x080};
-static const int ssp_port_seqram[] = {0x100, 0x180};
-
-struct ti_ssp {
- struct resource *res;
- struct device *dev;
- void __iomem *regs;
- spinlock_t lock;
- struct clk *clk;
- int irq;
- wait_queue_head_t wqh;
-
- /*
- * Some of the iosel2 register bits always read-back as 0, we need to
- * remember these values so that we don't clobber previously set
- * values.
- */
- u32 iosel2;
-};
-
-static inline struct ti_ssp *dev_to_ssp(struct device *dev)
-{
- return dev_get_drvdata(dev->parent);
-}
-
-static inline int dev_to_port(struct device *dev)
-{
- return to_platform_device(dev)->id;
-}
-
-/* Register Access Helpers, rmw() functions need to run locked */
-static inline u32 ssp_read(struct ti_ssp *ssp, int reg)
-{
- return __raw_readl(ssp->regs + reg);
-}
-
-static inline void ssp_write(struct ti_ssp *ssp, int reg, u32 val)
-{
- __raw_writel(val, ssp->regs + reg);
-}
-
-static inline void ssp_rmw(struct ti_ssp *ssp, int reg, u32 mask, u32 bits)
-{
- ssp_write(ssp, reg, (ssp_read(ssp, reg) & ~mask) | bits);
-}
-
-static inline u32 ssp_port_read(struct ti_ssp *ssp, int port, int reg)
-{
- return ssp_read(ssp, ssp_port_base[port] + reg);
-}
-
-static inline void ssp_port_write(struct ti_ssp *ssp, int port, int reg,
- u32 val)
-{
- ssp_write(ssp, ssp_port_base[port] + reg, val);
-}
-
-static inline void ssp_port_rmw(struct ti_ssp *ssp, int port, int reg,
- u32 mask, u32 bits)
-{
- ssp_rmw(ssp, ssp_port_base[port] + reg, mask, bits);
-}
-
-static inline void ssp_port_clr_bits(struct ti_ssp *ssp, int port, int reg,
- u32 bits)
-{
- ssp_port_rmw(ssp, port, reg, bits, 0);
-}
-
-static inline void ssp_port_set_bits(struct ti_ssp *ssp, int port, int reg,
- u32 bits)
-{
- ssp_port_rmw(ssp, port, reg, 0, bits);
-}
-
-/* Called to setup port clock mode, caller must hold ssp->lock */
-static int __set_mode(struct ti_ssp *ssp, int port, int mode)
-{
- mode &= SSP_PORT_CONFIG_MASK;
- ssp_port_rmw(ssp, port, PORT_CFG_1, SSP_PORT_CONFIG_MASK, mode);
-
- return 0;
-}
-
-int ti_ssp_set_mode(struct device *dev, int mode)
-{
- struct ti_ssp *ssp = dev_to_ssp(dev);
- int port = dev_to_port(dev);
- int ret;
-
- spin_lock(&ssp->lock);
- ret = __set_mode(ssp, port, mode);
- spin_unlock(&ssp->lock);
-
- return ret;
-}
-EXPORT_SYMBOL(ti_ssp_set_mode);
-
-/* Called to setup iosel2, caller must hold ssp->lock */
-static void __set_iosel2(struct ti_ssp *ssp, u32 mask, u32 val)
-{
- ssp->iosel2 = (ssp->iosel2 & ~mask) | val;
- ssp_write(ssp, REG_IOSEL_2, ssp->iosel2);
-}
-
-/* Called to setup port iosel, caller must hold ssp->lock */
-static void __set_iosel(struct ti_ssp *ssp, int port, u32 iosel)
-{
- unsigned val, shift = port ? 16 : 0;
-
- /* IOSEL1 gets the least significant 16 bits */
- val = ssp_read(ssp, REG_IOSEL_1);
- val &= 0xffff << (port ? 0 : 16);
- val |= (iosel & 0xffff) << (port ? 16 : 0);
- ssp_write(ssp, REG_IOSEL_1, val);
-
- /* IOSEL2 gets the most significant 16 bits */
- val = (iosel >> 16) & 0x7;
- __set_iosel2(ssp, 0x7 << shift, val << shift);
-}
-
-int ti_ssp_set_iosel(struct device *dev, u32 iosel)
-{
- struct ti_ssp *ssp = dev_to_ssp(dev);
- int port = dev_to_port(dev);
-
- spin_lock(&ssp->lock);
- __set_iosel(ssp, port, iosel);
- spin_unlock(&ssp->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(ti_ssp_set_iosel);
-
-int ti_ssp_load(struct device *dev, int offs, u32* prog, int len)
-{
- struct ti_ssp *ssp = dev_to_ssp(dev);
- int port = dev_to_port(dev);
- int i;
-
- if (len > SSP_PORT_SEQRAM_SIZE)
- return -ENOSPC;
-
- spin_lock(&ssp->lock);
-
- /* Enable SeqRAM access */
- ssp_port_set_bits(ssp, port, PORT_CFG_2, SSP_SEQRAM_WR_EN);
-
- /* Copy code */
- for (i = 0; i < len; i++) {
- __raw_writel(prog[i], ssp->regs + offs + 4*i +
- ssp_port_seqram[port]);
- }
-
- /* Disable SeqRAM access */
- ssp_port_clr_bits(ssp, port, PORT_CFG_2, SSP_SEQRAM_WR_EN);
-
- spin_unlock(&ssp->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(ti_ssp_load);
-
-int ti_ssp_raw_read(struct device *dev)
-{
- struct ti_ssp *ssp = dev_to_ssp(dev);
- int port = dev_to_port(dev);
- int shift = port ? 27 : 11;
-
- return (ssp_read(ssp, REG_IOSEL_2) >> shift) & 0xf;
-}
-EXPORT_SYMBOL(ti_ssp_raw_read);
-
-int ti_ssp_raw_write(struct device *dev, u32 val)
-{
- struct ti_ssp *ssp = dev_to_ssp(dev);
- int port = dev_to_port(dev), shift;
-
- spin_lock(&ssp->lock);
-
- shift = port ? 22 : 6;
- val &= 0xf;
- __set_iosel2(ssp, 0xf << shift, val << shift);
-
- spin_unlock(&ssp->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(ti_ssp_raw_write);
-
-static inline int __xfer_done(struct ti_ssp *ssp, int port)
-{
- return !(ssp_port_read(ssp, port, PORT_CFG_1) & SSP_BUSY);
-}
-
-int ti_ssp_run(struct device *dev, u32 pc, u32 input, u32 *output)
-{
- struct ti_ssp *ssp = dev_to_ssp(dev);
- int port = dev_to_port(dev);
- int ret;
-
- if (pc & ~(0x3f))
- return -EINVAL;
-
- /* Grab ssp->lock to serialize rmw on ssp registers */
- spin_lock(&ssp->lock);
-
- ssp_port_write(ssp, port, PORT_ADDR, input >> 16);
- ssp_port_write(ssp, port, PORT_DATA, input & 0xffff);
- ssp_port_rmw(ssp, port, PORT_CFG_1, 0x3f, pc);
-
- /* grab wait queue head lock to avoid race with the isr */
- spin_lock_irq(&ssp->wqh.lock);
-
- /* kick off sequence execution in hardware */
- ssp_port_set_bits(ssp, port, PORT_CFG_1, SSP_START);
-
- /* drop ssp lock; no register writes beyond this */
- spin_unlock(&ssp->lock);
-
- ret = wait_event_interruptible_locked_irq(ssp->wqh,
- __xfer_done(ssp, port));
- spin_unlock_irq(&ssp->wqh.lock);
-
- if (ret < 0)
- return ret;
-
- if (output) {
- *output = (ssp_port_read(ssp, port, PORT_ADDR) << 16) |
- (ssp_port_read(ssp, port, PORT_DATA) & 0xffff);
- }
-
- ret = ssp_port_read(ssp, port, PORT_STATE) & 0x3f; /* stop address */
-
- return ret;
-}
-EXPORT_SYMBOL(ti_ssp_run);
-
-static irqreturn_t ti_ssp_interrupt(int irq, void *dev_data)
-{
- struct ti_ssp *ssp = dev_data;
-
- spin_lock(&ssp->wqh.lock);
-
- ssp_write(ssp, REG_INTR_ST, 0x3);
- wake_up_locked(&ssp->wqh);
-
- spin_unlock(&ssp->wqh.lock);
-
- return IRQ_HANDLED;
-}
-
-static int ti_ssp_probe(struct platform_device *pdev)
-{
- static struct ti_ssp *ssp;
- const struct ti_ssp_data *pdata = dev_get_platdata(&pdev->dev);
- int error = 0, prediv = 0xff, id;
- unsigned long sysclk;
- struct device *dev = &pdev->dev;
- struct mfd_cell cells[2];
-
- ssp = kzalloc(sizeof(*ssp), GFP_KERNEL);
- if (!ssp) {
- dev_err(dev, "cannot allocate device info\n");
- return -ENOMEM;
- }
-
- ssp->dev = dev;
- dev_set_drvdata(dev, ssp);
-
- ssp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!ssp->res) {
- error = -ENODEV;
- dev_err(dev, "cannot determine register area\n");
- goto error_res;
- }
-
- if (!request_mem_region(ssp->res->start, resource_size(ssp->res),
- pdev->name)) {
- error = -ENOMEM;
- dev_err(dev, "cannot claim register memory\n");
- goto error_res;
- }
-
- ssp->regs = ioremap(ssp->res->start, resource_size(ssp->res));
- if (!ssp->regs) {
- error = -ENOMEM;
- dev_err(dev, "cannot map register memory\n");
- goto error_map;
- }
-
- ssp->clk = clk_get(dev, NULL);
- if (IS_ERR(ssp->clk)) {
- error = PTR_ERR(ssp->clk);
- dev_err(dev, "cannot claim device clock\n");
- goto error_clk;
- }
-
- ssp->irq = platform_get_irq(pdev, 0);
- if (ssp->irq < 0) {
- error = -ENODEV;
- dev_err(dev, "unknown irq\n");
- goto error_irq;
- }
-
- error = request_threaded_irq(ssp->irq, NULL, ti_ssp_interrupt, 0,
- dev_name(dev), ssp);
- if (error < 0) {
- dev_err(dev, "cannot acquire irq\n");
- goto error_irq;
- }
-
- spin_lock_init(&ssp->lock);
- init_waitqueue_head(&ssp->wqh);
-
- /* Power on and initialize SSP */
- error = clk_enable(ssp->clk);
- if (error) {
- dev_err(dev, "cannot enable device clock\n");
- goto error_enable;
- }
-
- /* Reset registers to a sensible known state */
- ssp_write(ssp, REG_IOSEL_1, 0);
- ssp_write(ssp, REG_IOSEL_2, 0);
- ssp_write(ssp, REG_INTR_EN, 0x3);
- ssp_write(ssp, REG_INTR_ST, 0x3);
- ssp_write(ssp, REG_TEST_CTRL, 0);
- ssp_port_write(ssp, 0, PORT_CFG_1, SSP_PORT_ASL);
- ssp_port_write(ssp, 1, PORT_CFG_1, SSP_PORT_ASL);
- ssp_port_write(ssp, 0, PORT_CFG_2, SSP_PORT_CFO1);
- ssp_port_write(ssp, 1, PORT_CFG_2, SSP_PORT_CFO1);
-
- sysclk = clk_get_rate(ssp->clk);
- if (pdata && pdata->out_clock)
- prediv = (sysclk / pdata->out_clock) - 1;
- prediv = clamp(prediv, 0, 0xff);
- ssp_rmw(ssp, REG_PREDIV, 0xff, prediv);
-
- memset(cells, 0, sizeof(cells));
- for (id = 0; id < 2; id++) {
- const struct ti_ssp_dev_data *data = &pdata->dev_data[id];
-
- cells[id].id = id;
- cells[id].name = data->dev_name;
- cells[id].platform_data = data->pdata;
- }
-
- error = mfd_add_devices(dev, 0, cells, 2, NULL, 0, NULL);
- if (error < 0) {
- dev_err(dev, "cannot add mfd cells\n");
- goto error_enable;
- }
-
- return 0;
-
-error_enable:
- free_irq(ssp->irq, ssp);
-error_irq:
- clk_put(ssp->clk);
-error_clk:
- iounmap(ssp->regs);
-error_map:
- release_mem_region(ssp->res->start, resource_size(ssp->res));
-error_res:
- kfree(ssp);
- return error;
-}
-
-static int ti_ssp_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ti_ssp *ssp = dev_get_drvdata(dev);
-
- mfd_remove_devices(dev);
- clk_disable(ssp->clk);
- free_irq(ssp->irq, ssp);
- clk_put(ssp->clk);
- iounmap(ssp->regs);
- release_mem_region(ssp->res->start, resource_size(ssp->res));
- kfree(ssp);
- return 0;
-}
-
-static struct platform_driver ti_ssp_driver = {
- .probe = ti_ssp_probe,
- .remove = ti_ssp_remove,
- .driver = {
- .name = "ti-ssp",
- .owner = THIS_MODULE,
- }
-};
-
-module_platform_driver(ti_ssp_driver);
-
-MODULE_DESCRIPTION("Sequencer Serial Port (SSP) Driver");
-MODULE_AUTHOR("Cyril Chemparathy");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ti-ssp");
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index d4e860413bb5..dd4bf5816221 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -184,12 +183,6 @@ static int ti_tscadc_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no memory resource defined.\n");
- return -EINVAL;
- }
-
/* Allocate memory for device */
tscadc = devm_kzalloc(&pdev->dev,
sizeof(struct ti_tscadc_dev), GFP_KERNEL);
@@ -206,19 +199,10 @@ static int ti_tscadc_probe(struct platform_device *pdev)
} else
tscadc->irq = err;
- res = devm_request_mem_region(&pdev->dev,
- res->start, resource_size(res), pdev->name);
- if (!res) {
- dev_err(&pdev->dev, "failed to reserve registers.\n");
- return -EBUSY;
- }
-
- tscadc->tscadc_base = devm_ioremap(&pdev->dev,
- res->start, resource_size(res));
- if (!tscadc->tscadc_base) {
- dev_err(&pdev->dev, "failed to map registers.\n");
- return -ENOMEM;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tscadc->tscadc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tscadc->tscadc_base))
+ return PTR_ERR(tscadc->tscadc_base);
tscadc->regmap_tscadc = devm_regmap_init_mmio(&pdev->dev,
tscadc->tscadc_base, &tscadc_regmap_config);
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 2bc5cfb85204..6ce36d6970a4 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -715,7 +715,7 @@ static int timb_probe(struct pci_dev *dev,
for (i = 0; i < TIMBERDALE_NR_IRQS; i++)
msix_entries[i].entry = i;
- err = pci_enable_msix(dev, msix_entries, TIMBERDALE_NR_IRQS);
+ err = pci_enable_msix_exact(dev, msix_entries, TIMBERDALE_NR_IRQS);
if (err) {
dev_err(&dev->dev,
"MSI-X init failed: %d, expected entries: %d\n",
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
new file mode 100644
index 000000000000..a74bfb59f18f
--- /dev/null
+++ b/drivers/mfd/tps65218.c
@@ -0,0 +1,282 @@
+/*
+ * Driver for TPS65218 Integrated power management chipsets
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65218.h>
+
+#define TPS65218_PASSWORD_REGS_UNLOCK 0x7D
+
+/**
+ * tps65218_reg_read: Read a single tps65218 register.
+ *
+ * @tps: Device to read from.
+ * @reg: Register to read.
+ * @val: Contians the value
+ */
+int tps65218_reg_read(struct tps65218 *tps, unsigned int reg,
+ unsigned int *val)
+{
+ return regmap_read(tps->regmap, reg, val);
+}
+EXPORT_SYMBOL_GPL(tps65218_reg_read);
+
+/**
+ * tps65218_reg_write: Write a single tps65218 register.
+ *
+ * @tps65218: Device to write to.
+ * @reg: Register to write to.
+ * @val: Value to write.
+ * @level: Password protected level
+ */
+int tps65218_reg_write(struct tps65218 *tps, unsigned int reg,
+ unsigned int val, unsigned int level)
+{
+ int ret;
+ unsigned int xor_reg_val;
+
+ switch (level) {
+ case TPS65218_PROTECT_NONE:
+ return regmap_write(tps->regmap, reg, val);
+ case TPS65218_PROTECT_L1:
+ xor_reg_val = reg ^ TPS65218_PASSWORD_REGS_UNLOCK;
+ ret = regmap_write(tps->regmap, TPS65218_REG_PASSWORD,
+ xor_reg_val);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(tps->regmap, reg, val);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(tps65218_reg_write);
+
+/**
+ * tps65218_update_bits: Modify bits w.r.t mask, val and level.
+ *
+ * @tps65218: Device to write to.
+ * @reg: Register to read-write to.
+ * @mask: Mask.
+ * @val: Value to write.
+ * @level: Password protected level
+ */
+static int tps65218_update_bits(struct tps65218 *tps, unsigned int reg,
+ unsigned int mask, unsigned int val, unsigned int level)
+{
+ int ret;
+ unsigned int data;
+
+ ret = tps65218_reg_read(tps, reg, &data);
+ if (ret) {
+ dev_err(tps->dev, "Read from reg 0x%x failed\n", reg);
+ return ret;
+ }
+
+ data &= ~mask;
+ data |= val & mask;
+
+ mutex_lock(&tps->tps_lock);
+ ret = tps65218_reg_write(tps, reg, data, level);
+ if (ret)
+ dev_err(tps->dev, "Write for reg 0x%x failed\n", reg);
+ mutex_unlock(&tps->tps_lock);
+
+ return ret;
+}
+
+int tps65218_set_bits(struct tps65218 *tps, unsigned int reg,
+ unsigned int mask, unsigned int val, unsigned int level)
+{
+ return tps65218_update_bits(tps, reg, mask, val, level);
+}
+EXPORT_SYMBOL_GPL(tps65218_set_bits);
+
+int tps65218_clear_bits(struct tps65218 *tps, unsigned int reg,
+ unsigned int mask, unsigned int level)
+{
+ return tps65218_update_bits(tps, reg, mask, 0, level);
+}
+EXPORT_SYMBOL_GPL(tps65218_clear_bits);
+
+static struct regmap_config tps65218_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct regmap_irq tps65218_irqs[] = {
+ /* INT1 IRQs */
+ [TPS65218_PRGC_IRQ] = {
+ .mask = TPS65218_INT1_PRGC,
+ },
+ [TPS65218_CC_AQC_IRQ] = {
+ .mask = TPS65218_INT1_CC_AQC,
+ },
+ [TPS65218_HOT_IRQ] = {
+ .mask = TPS65218_INT1_HOT,
+ },
+ [TPS65218_PB_IRQ] = {
+ .mask = TPS65218_INT1_PB,
+ },
+ [TPS65218_AC_IRQ] = {
+ .mask = TPS65218_INT1_AC,
+ },
+ [TPS65218_VPRG_IRQ] = {
+ .mask = TPS65218_INT1_VPRG,
+ },
+ [TPS65218_INVALID1_IRQ] = {
+ },
+ [TPS65218_INVALID2_IRQ] = {
+ },
+ /* INT2 IRQs*/
+ [TPS65218_LS1_I_IRQ] = {
+ .mask = TPS65218_INT2_LS1_I,
+ .reg_offset = 1,
+ },
+ [TPS65218_LS2_I_IRQ] = {
+ .mask = TPS65218_INT2_LS2_I,
+ .reg_offset = 1,
+ },
+ [TPS65218_LS3_I_IRQ] = {
+ .mask = TPS65218_INT2_LS3_I,
+ .reg_offset = 1,
+ },
+ [TPS65218_LS1_F_IRQ] = {
+ .mask = TPS65218_INT2_LS1_F,
+ .reg_offset = 1,
+ },
+ [TPS65218_LS2_F_IRQ] = {
+ .mask = TPS65218_INT2_LS2_F,
+ .reg_offset = 1,
+ },
+ [TPS65218_LS3_F_IRQ] = {
+ .mask = TPS65218_INT2_LS3_F,
+ .reg_offset = 1,
+ },
+ [TPS65218_INVALID3_IRQ] = {
+ },
+ [TPS65218_INVALID4_IRQ] = {
+ },
+};
+
+static struct regmap_irq_chip tps65218_irq_chip = {
+ .name = "tps65218",
+ .irqs = tps65218_irqs,
+ .num_irqs = ARRAY_SIZE(tps65218_irqs),
+
+ .num_regs = 2,
+ .mask_base = TPS65218_REG_INT_MASK1,
+};
+
+static const struct of_device_id of_tps65218_match_table[] = {
+ { .compatible = "ti,tps65218", },
+};
+
+static int tps65218_probe(struct i2c_client *client,
+ const struct i2c_device_id *ids)
+{
+ struct tps65218 *tps;
+ const struct of_device_id *match;
+ int ret;
+
+ match = of_match_device(of_tps65218_match_table, &client->dev);
+ if (!match) {
+ dev_err(&client->dev,
+ "Failed to find matching dt id\n");
+ return -EINVAL;
+ }
+
+ tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
+ if (!tps)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, tps);
+ tps->dev = &client->dev;
+ tps->irq = client->irq;
+ tps->regmap = devm_regmap_init_i2c(client, &tps65218_regmap_config);
+ if (IS_ERR(tps->regmap)) {
+ ret = PTR_ERR(tps->regmap);
+ dev_err(tps->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ mutex_init(&tps->tps_lock);
+
+ ret = regmap_add_irq_chip(tps->regmap, tps->irq,
+ IRQF_ONESHOT, 0, &tps65218_irq_chip,
+ &tps->irq_data);
+ if (ret < 0)
+ return ret;
+
+ ret = of_platform_populate(client->dev.of_node, NULL, NULL,
+ &client->dev);
+ if (ret < 0)
+ goto err_irq;
+
+ return 0;
+
+err_irq:
+ regmap_del_irq_chip(tps->irq, tps->irq_data);
+
+ return ret;
+}
+
+static int tps65218_remove(struct i2c_client *client)
+{
+ struct tps65218 *tps = i2c_get_clientdata(client);
+
+ regmap_del_irq_chip(tps->irq, tps->irq_data);
+
+ return 0;
+}
+
+static const struct i2c_device_id tps65218_id_table[] = {
+ { "tps65218", TPS65218 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, tps65218_id_table);
+
+static struct i2c_driver tps65218_driver = {
+ .driver = {
+ .name = "tps65218",
+ .owner = THIS_MODULE,
+ .of_match_table = of_tps65218_match_table,
+ },
+ .probe = tps65218_probe,
+ .remove = tps65218_remove,
+ .id_table = tps65218_id_table,
+};
+
+module_i2c_driver(tps65218_driver);
+
+MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("TPS65218 chip family multi-function driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 1f142d76cbbc..460a014ca629 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -255,8 +255,10 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
ret = regmap_add_irq_chip(tps65910->regmap, tps65910->chip_irq,
IRQF_ONESHOT, pdata->irq_base,
tps6591x_irqs_chip, &tps65910->irq_data);
- if (ret < 0)
+ if (ret < 0) {
dev_warn(tps65910->dev, "Failed to add irq_chip %d\n", ret);
+ tps65910->chip_irq = 0;
+ }
return ret;
}
@@ -509,6 +511,7 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
regmap_irq_get_domain(tps65910->irq_data));
if (ret < 0) {
dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
+ tps65910_irq_exit(tps65910);
return ret;
}
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index 27a518e0eec6..1f82d60b1d0f 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/mfd/core.h>
diff --git a/drivers/mfd/tps65912-irq.c b/drivers/mfd/tps65912-irq.c
index d360a83a2738..fbecec7f1e3d 100644
--- a/drivers/mfd/tps65912-irq.c
+++ b/drivers/mfd/tps65912-irq.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/bug.h>
#include <linux/device.h>
#include <linux/interrupt.h>
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index ed718328eff1..e87140bef667 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -282,11 +282,11 @@ static struct reg_default twl4030_49_defaults[] = {
static bool twl4030_49_nop_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case 0:
- case 3:
- case 40:
- case 41:
- case 42:
+ case 0x00:
+ case 0x03:
+ case 0x40:
+ case 0x41:
+ case 0x42:
return false;
default:
return true;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 9aa6d1efa241..596b1f657e21 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -27,7 +27,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/init.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 18a607e2ca06..a6bb17d908b8 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -31,7 +31,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/init.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index 75316fb33448..6e88f25832fb 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -661,6 +661,11 @@ static int twl6040_probe(struct i2c_client *client,
init_completion(&twl6040->ready);
twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
+ if (twl6040->rev < 0) {
+ dev_err(&client->dev, "Failed to read revision register: %d\n",
+ twl6040->rev);
+ goto gpio_err;
+ }
/* ERRATA: Automatic power-up is not possible in ES1.0 */
if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0)
@@ -703,7 +708,6 @@ static int twl6040_probe(struct i2c_client *client,
}
/* dual-access registers controlled by I2C only */
- twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL);
regmap_register_patch(twl6040->regmap, twl6040_patch,
ARRAY_SIZE(twl6040_patch));
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 0313f839e8fa..153d595afaac 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -742,9 +742,7 @@ static int ucb1x00_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops ucb1x00_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ucb1x00_suspend, ucb1x00_resume)
-};
+static SIMPLE_DEV_PM_OPS(ucb1x00_pm_ops, ucb1x00_suspend, ucb1x00_resume);
static struct mcp_driver ucb1x00_driver = {
.drv = {
diff --git a/drivers/mfd/vexpress-config.c b/drivers/mfd/vexpress-config.c
index 84ce6b9daa3d..d0db89d13e01 100644
--- a/drivers/mfd/vexpress-config.c
+++ b/drivers/mfd/vexpress-config.c
@@ -16,7 +16,6 @@
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/export.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -27,7 +26,7 @@
#define VEXPRESS_CONFIG_MAX_BRIDGES 2
-struct vexpress_config_bridge {
+static struct vexpress_config_bridge {
struct device_node *node;
struct vexpress_config_bridge_info *info;
struct list_head transactions;
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index 981bef4b7ebc..35281e804e7e 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -168,7 +168,7 @@ static void *vexpress_sysreg_config_func_get(struct device *dev,
struct device_node *node)
{
struct vexpress_sysreg_config_func *config_func;
- u32 site;
+ u32 site = 0;
u32 position = 0;
u32 dcc = 0;
u32 func_device[2];
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 1e9a4b2102f9..070f8cfbbd7a 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -73,6 +73,7 @@ static const struct reg_default wm5102_revb_patch[] = {
{ 0x171, 0x0000 },
{ 0x35E, 0x000C },
{ 0x2D4, 0x0000 },
+ { 0x4DC, 0x0900 },
{ 0x80, 0x0000 },
};
@@ -80,8 +81,7 @@ static const struct reg_default wm5102_revb_patch[] = {
int wm5102_patch(struct arizona *arizona)
{
const struct reg_default *wm5102_patch;
- int ret = 0;
- int i, patch_size;
+ int patch_size;
switch (arizona->rev) {
case 0:
@@ -92,21 +92,9 @@ int wm5102_patch(struct arizona *arizona)
patch_size = ARRAY_SIZE(wm5102_revb_patch);
}
- regcache_cache_bypass(arizona->regmap, true);
-
- for (i = 0; i < patch_size; i++) {
- ret = regmap_write(arizona->regmap, wm5102_patch[i].reg,
- wm5102_patch[i].def);
- if (ret != 0) {
- dev_err(arizona->dev, "Failed to write %x = %x: %d\n",
- wm5102_patch[i].reg, wm5102_patch[i].def, ret);
- goto out;
- }
- }
-
-out:
- regcache_cache_bypass(arizona->regmap, false);
- return ret;
+ return regmap_multi_reg_write_bypassed(arizona->regmap,
+ wm5102_patch,
+ patch_size);
}
static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = {
@@ -1852,6 +1840,23 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
+ case ARIZONA_DSP1_WDMA_BUFFER_1:
+ case ARIZONA_DSP1_WDMA_BUFFER_2:
+ case ARIZONA_DSP1_WDMA_BUFFER_3:
+ case ARIZONA_DSP1_WDMA_BUFFER_4:
+ case ARIZONA_DSP1_WDMA_BUFFER_5:
+ case ARIZONA_DSP1_WDMA_BUFFER_6:
+ case ARIZONA_DSP1_WDMA_BUFFER_7:
+ case ARIZONA_DSP1_WDMA_BUFFER_8:
+ case ARIZONA_DSP1_RDMA_BUFFER_1:
+ case ARIZONA_DSP1_RDMA_BUFFER_2:
+ case ARIZONA_DSP1_RDMA_BUFFER_3:
+ case ARIZONA_DSP1_RDMA_BUFFER_4:
+ case ARIZONA_DSP1_RDMA_BUFFER_5:
+ case ARIZONA_DSP1_RDMA_BUFFER_6:
+ case ARIZONA_DSP1_WDMA_CONFIG_1:
+ case ARIZONA_DSP1_WDMA_CONFIG_2:
+ case ARIZONA_DSP1_RDMA_CONFIG_1:
case ARIZONA_DSP1_SCRATCH_0:
case ARIZONA_DSP1_SCRATCH_1:
case ARIZONA_DSP1_SCRATCH_2:
@@ -1907,9 +1912,27 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_AOD_IRQ1:
case ARIZONA_AOD_IRQ2:
case ARIZONA_AOD_IRQ_RAW_STATUS:
+ case ARIZONA_DSP1_CLOCKING_1:
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
+ case ARIZONA_DSP1_WDMA_BUFFER_1:
+ case ARIZONA_DSP1_WDMA_BUFFER_2:
+ case ARIZONA_DSP1_WDMA_BUFFER_3:
+ case ARIZONA_DSP1_WDMA_BUFFER_4:
+ case ARIZONA_DSP1_WDMA_BUFFER_5:
+ case ARIZONA_DSP1_WDMA_BUFFER_6:
+ case ARIZONA_DSP1_WDMA_BUFFER_7:
+ case ARIZONA_DSP1_WDMA_BUFFER_8:
+ case ARIZONA_DSP1_RDMA_BUFFER_1:
+ case ARIZONA_DSP1_RDMA_BUFFER_2:
+ case ARIZONA_DSP1_RDMA_BUFFER_3:
+ case ARIZONA_DSP1_RDMA_BUFFER_4:
+ case ARIZONA_DSP1_RDMA_BUFFER_5:
+ case ARIZONA_DSP1_RDMA_BUFFER_6:
+ case ARIZONA_DSP1_WDMA_CONFIG_1:
+ case ARIZONA_DSP1_WDMA_CONFIG_2:
+ case ARIZONA_DSP1_RDMA_CONFIG_1:
case ARIZONA_DSP1_SCRATCH_0:
case ARIZONA_DSP1_SCRATCH_1:
case ARIZONA_DSP1_SCRATCH_2:
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 11632f135e8c..1942b6f231da 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -538,7 +538,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
{ 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
{ 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
- { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
+ { 0x0000029B, 0x0028 }, /* R667 - Headphone Detect 1 */
{ 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */
{ 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
@@ -2461,6 +2461,27 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
+ case ARIZONA_DSP1_STATUS_4:
+ case ARIZONA_DSP1_WDMA_BUFFER_1:
+ case ARIZONA_DSP1_WDMA_BUFFER_2:
+ case ARIZONA_DSP1_WDMA_BUFFER_3:
+ case ARIZONA_DSP1_WDMA_BUFFER_4:
+ case ARIZONA_DSP1_WDMA_BUFFER_5:
+ case ARIZONA_DSP1_WDMA_BUFFER_6:
+ case ARIZONA_DSP1_WDMA_BUFFER_7:
+ case ARIZONA_DSP1_WDMA_BUFFER_8:
+ case ARIZONA_DSP1_RDMA_BUFFER_1:
+ case ARIZONA_DSP1_RDMA_BUFFER_2:
+ case ARIZONA_DSP1_RDMA_BUFFER_3:
+ case ARIZONA_DSP1_RDMA_BUFFER_4:
+ case ARIZONA_DSP1_RDMA_BUFFER_5:
+ case ARIZONA_DSP1_RDMA_BUFFER_6:
+ case ARIZONA_DSP1_WDMA_CONFIG_1:
+ case ARIZONA_DSP1_WDMA_CONFIG_2:
+ case ARIZONA_DSP1_WDMA_OFFSET_1:
+ case ARIZONA_DSP1_RDMA_CONFIG_1:
+ case ARIZONA_DSP1_RDMA_OFFSET_1:
+ case ARIZONA_DSP1_EXTERNAL_START_SELECT_1:
case ARIZONA_DSP1_SCRATCH_0:
case ARIZONA_DSP1_SCRATCH_1:
case ARIZONA_DSP1_SCRATCH_2:
@@ -2470,6 +2491,27 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP2_STATUS_1:
case ARIZONA_DSP2_STATUS_2:
case ARIZONA_DSP2_STATUS_3:
+ case ARIZONA_DSP2_STATUS_4:
+ case ARIZONA_DSP2_WDMA_BUFFER_1:
+ case ARIZONA_DSP2_WDMA_BUFFER_2:
+ case ARIZONA_DSP2_WDMA_BUFFER_3:
+ case ARIZONA_DSP2_WDMA_BUFFER_4:
+ case ARIZONA_DSP2_WDMA_BUFFER_5:
+ case ARIZONA_DSP2_WDMA_BUFFER_6:
+ case ARIZONA_DSP2_WDMA_BUFFER_7:
+ case ARIZONA_DSP2_WDMA_BUFFER_8:
+ case ARIZONA_DSP2_RDMA_BUFFER_1:
+ case ARIZONA_DSP2_RDMA_BUFFER_2:
+ case ARIZONA_DSP2_RDMA_BUFFER_3:
+ case ARIZONA_DSP2_RDMA_BUFFER_4:
+ case ARIZONA_DSP2_RDMA_BUFFER_5:
+ case ARIZONA_DSP2_RDMA_BUFFER_6:
+ case ARIZONA_DSP2_WDMA_CONFIG_1:
+ case ARIZONA_DSP2_WDMA_CONFIG_2:
+ case ARIZONA_DSP2_WDMA_OFFSET_1:
+ case ARIZONA_DSP2_RDMA_CONFIG_1:
+ case ARIZONA_DSP2_RDMA_OFFSET_1:
+ case ARIZONA_DSP2_EXTERNAL_START_SELECT_1:
case ARIZONA_DSP2_SCRATCH_0:
case ARIZONA_DSP2_SCRATCH_1:
case ARIZONA_DSP2_SCRATCH_2:
@@ -2479,6 +2521,27 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP3_STATUS_1:
case ARIZONA_DSP3_STATUS_2:
case ARIZONA_DSP3_STATUS_3:
+ case ARIZONA_DSP3_STATUS_4:
+ case ARIZONA_DSP3_WDMA_BUFFER_1:
+ case ARIZONA_DSP3_WDMA_BUFFER_2:
+ case ARIZONA_DSP3_WDMA_BUFFER_3:
+ case ARIZONA_DSP3_WDMA_BUFFER_4:
+ case ARIZONA_DSP3_WDMA_BUFFER_5:
+ case ARIZONA_DSP3_WDMA_BUFFER_6:
+ case ARIZONA_DSP3_WDMA_BUFFER_7:
+ case ARIZONA_DSP3_WDMA_BUFFER_8:
+ case ARIZONA_DSP3_RDMA_BUFFER_1:
+ case ARIZONA_DSP3_RDMA_BUFFER_2:
+ case ARIZONA_DSP3_RDMA_BUFFER_3:
+ case ARIZONA_DSP3_RDMA_BUFFER_4:
+ case ARIZONA_DSP3_RDMA_BUFFER_5:
+ case ARIZONA_DSP3_RDMA_BUFFER_6:
+ case ARIZONA_DSP3_WDMA_CONFIG_1:
+ case ARIZONA_DSP3_WDMA_CONFIG_2:
+ case ARIZONA_DSP3_WDMA_OFFSET_1:
+ case ARIZONA_DSP3_RDMA_CONFIG_1:
+ case ARIZONA_DSP3_RDMA_OFFSET_1:
+ case ARIZONA_DSP3_EXTERNAL_START_SELECT_1:
case ARIZONA_DSP3_SCRATCH_0:
case ARIZONA_DSP3_SCRATCH_1:
case ARIZONA_DSP3_SCRATCH_2:
@@ -2488,6 +2551,27 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP4_STATUS_1:
case ARIZONA_DSP4_STATUS_2:
case ARIZONA_DSP4_STATUS_3:
+ case ARIZONA_DSP4_STATUS_4:
+ case ARIZONA_DSP4_WDMA_BUFFER_1:
+ case ARIZONA_DSP4_WDMA_BUFFER_2:
+ case ARIZONA_DSP4_WDMA_BUFFER_3:
+ case ARIZONA_DSP4_WDMA_BUFFER_4:
+ case ARIZONA_DSP4_WDMA_BUFFER_5:
+ case ARIZONA_DSP4_WDMA_BUFFER_6:
+ case ARIZONA_DSP4_WDMA_BUFFER_7:
+ case ARIZONA_DSP4_WDMA_BUFFER_8:
+ case ARIZONA_DSP4_RDMA_BUFFER_1:
+ case ARIZONA_DSP4_RDMA_BUFFER_2:
+ case ARIZONA_DSP4_RDMA_BUFFER_3:
+ case ARIZONA_DSP4_RDMA_BUFFER_4:
+ case ARIZONA_DSP4_RDMA_BUFFER_5:
+ case ARIZONA_DSP4_RDMA_BUFFER_6:
+ case ARIZONA_DSP4_WDMA_CONFIG_1:
+ case ARIZONA_DSP4_WDMA_CONFIG_2:
+ case ARIZONA_DSP4_WDMA_OFFSET_1:
+ case ARIZONA_DSP4_RDMA_CONFIG_1:
+ case ARIZONA_DSP4_RDMA_OFFSET_1:
+ case ARIZONA_DSP4_EXTERNAL_START_SELECT_1:
case ARIZONA_DSP4_SCRATCH_0:
case ARIZONA_DSP4_SCRATCH_1:
case ARIZONA_DSP4_SCRATCH_2:
@@ -2543,31 +2627,119 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
+ case ARIZONA_DSP1_STATUS_4:
+ case ARIZONA_DSP1_WDMA_BUFFER_1:
+ case ARIZONA_DSP1_WDMA_BUFFER_2:
+ case ARIZONA_DSP1_WDMA_BUFFER_3:
+ case ARIZONA_DSP1_WDMA_BUFFER_4:
+ case ARIZONA_DSP1_WDMA_BUFFER_5:
+ case ARIZONA_DSP1_WDMA_BUFFER_6:
+ case ARIZONA_DSP1_WDMA_BUFFER_7:
+ case ARIZONA_DSP1_WDMA_BUFFER_8:
+ case ARIZONA_DSP1_RDMA_BUFFER_1:
+ case ARIZONA_DSP1_RDMA_BUFFER_2:
+ case ARIZONA_DSP1_RDMA_BUFFER_3:
+ case ARIZONA_DSP1_RDMA_BUFFER_4:
+ case ARIZONA_DSP1_RDMA_BUFFER_5:
+ case ARIZONA_DSP1_RDMA_BUFFER_6:
+ case ARIZONA_DSP1_WDMA_CONFIG_1:
+ case ARIZONA_DSP1_WDMA_CONFIG_2:
+ case ARIZONA_DSP1_WDMA_OFFSET_1:
+ case ARIZONA_DSP1_RDMA_CONFIG_1:
+ case ARIZONA_DSP1_RDMA_OFFSET_1:
+ case ARIZONA_DSP1_EXTERNAL_START_SELECT_1:
case ARIZONA_DSP1_SCRATCH_0:
case ARIZONA_DSP1_SCRATCH_1:
case ARIZONA_DSP1_SCRATCH_2:
case ARIZONA_DSP1_SCRATCH_3:
+ case ARIZONA_DSP1_CLOCKING_1:
case ARIZONA_DSP2_STATUS_1:
case ARIZONA_DSP2_STATUS_2:
case ARIZONA_DSP2_STATUS_3:
+ case ARIZONA_DSP2_STATUS_4:
+ case ARIZONA_DSP2_WDMA_BUFFER_1:
+ case ARIZONA_DSP2_WDMA_BUFFER_2:
+ case ARIZONA_DSP2_WDMA_BUFFER_3:
+ case ARIZONA_DSP2_WDMA_BUFFER_4:
+ case ARIZONA_DSP2_WDMA_BUFFER_5:
+ case ARIZONA_DSP2_WDMA_BUFFER_6:
+ case ARIZONA_DSP2_WDMA_BUFFER_7:
+ case ARIZONA_DSP2_WDMA_BUFFER_8:
+ case ARIZONA_DSP2_RDMA_BUFFER_1:
+ case ARIZONA_DSP2_RDMA_BUFFER_2:
+ case ARIZONA_DSP2_RDMA_BUFFER_3:
+ case ARIZONA_DSP2_RDMA_BUFFER_4:
+ case ARIZONA_DSP2_RDMA_BUFFER_5:
+ case ARIZONA_DSP2_RDMA_BUFFER_6:
+ case ARIZONA_DSP2_WDMA_CONFIG_1:
+ case ARIZONA_DSP2_WDMA_CONFIG_2:
+ case ARIZONA_DSP2_WDMA_OFFSET_1:
+ case ARIZONA_DSP2_RDMA_CONFIG_1:
+ case ARIZONA_DSP2_RDMA_OFFSET_1:
+ case ARIZONA_DSP2_EXTERNAL_START_SELECT_1:
case ARIZONA_DSP2_SCRATCH_0:
case ARIZONA_DSP2_SCRATCH_1:
case ARIZONA_DSP2_SCRATCH_2:
case ARIZONA_DSP2_SCRATCH_3:
+ case ARIZONA_DSP2_CLOCKING_1:
case ARIZONA_DSP3_STATUS_1:
case ARIZONA_DSP3_STATUS_2:
case ARIZONA_DSP3_STATUS_3:
+ case ARIZONA_DSP3_STATUS_4:
+ case ARIZONA_DSP3_WDMA_BUFFER_1:
+ case ARIZONA_DSP3_WDMA_BUFFER_2:
+ case ARIZONA_DSP3_WDMA_BUFFER_3:
+ case ARIZONA_DSP3_WDMA_BUFFER_4:
+ case ARIZONA_DSP3_WDMA_BUFFER_5:
+ case ARIZONA_DSP3_WDMA_BUFFER_6:
+ case ARIZONA_DSP3_WDMA_BUFFER_7:
+ case ARIZONA_DSP3_WDMA_BUFFER_8:
+ case ARIZONA_DSP3_RDMA_BUFFER_1:
+ case ARIZONA_DSP3_RDMA_BUFFER_2:
+ case ARIZONA_DSP3_RDMA_BUFFER_3:
+ case ARIZONA_DSP3_RDMA_BUFFER_4:
+ case ARIZONA_DSP3_RDMA_BUFFER_5:
+ case ARIZONA_DSP3_RDMA_BUFFER_6:
+ case ARIZONA_DSP3_WDMA_CONFIG_1:
+ case ARIZONA_DSP3_WDMA_CONFIG_2:
+ case ARIZONA_DSP3_WDMA_OFFSET_1:
+ case ARIZONA_DSP3_RDMA_CONFIG_1:
+ case ARIZONA_DSP3_RDMA_OFFSET_1:
+ case ARIZONA_DSP3_EXTERNAL_START_SELECT_1:
case ARIZONA_DSP3_SCRATCH_0:
case ARIZONA_DSP3_SCRATCH_1:
case ARIZONA_DSP3_SCRATCH_2:
case ARIZONA_DSP3_SCRATCH_3:
+ case ARIZONA_DSP3_CLOCKING_1:
case ARIZONA_DSP4_STATUS_1:
case ARIZONA_DSP4_STATUS_2:
case ARIZONA_DSP4_STATUS_3:
+ case ARIZONA_DSP4_STATUS_4:
+ case ARIZONA_DSP4_WDMA_BUFFER_1:
+ case ARIZONA_DSP4_WDMA_BUFFER_2:
+ case ARIZONA_DSP4_WDMA_BUFFER_3:
+ case ARIZONA_DSP4_WDMA_BUFFER_4:
+ case ARIZONA_DSP4_WDMA_BUFFER_5:
+ case ARIZONA_DSP4_WDMA_BUFFER_6:
+ case ARIZONA_DSP4_WDMA_BUFFER_7:
+ case ARIZONA_DSP4_WDMA_BUFFER_8:
+ case ARIZONA_DSP4_RDMA_BUFFER_1:
+ case ARIZONA_DSP4_RDMA_BUFFER_2:
+ case ARIZONA_DSP4_RDMA_BUFFER_3:
+ case ARIZONA_DSP4_RDMA_BUFFER_4:
+ case ARIZONA_DSP4_RDMA_BUFFER_5:
+ case ARIZONA_DSP4_RDMA_BUFFER_6:
+ case ARIZONA_DSP4_WDMA_CONFIG_1:
+ case ARIZONA_DSP4_WDMA_CONFIG_2:
+ case ARIZONA_DSP4_WDMA_OFFSET_1:
+ case ARIZONA_DSP4_RDMA_CONFIG_1:
+ case ARIZONA_DSP4_RDMA_OFFSET_1:
+ case ARIZONA_DSP4_EXTERNAL_START_SELECT_1:
case ARIZONA_DSP4_SCRATCH_0:
case ARIZONA_DSP4_SCRATCH_1:
case ARIZONA_DSP4_SCRATCH_2:
case ARIZONA_DSP4_SCRATCH_3:
+ case ARIZONA_DSP4_CLOCKING_1:
return true;
default:
return wm5110_is_adsp_memory(dev, reg);
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index 7c1ae24605d9..4ab527f5c53b 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/bug.h>
#include <linux/device.h>
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index 624ff90501cd..cd01f7962dfd 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/bug.h>
#include <linux/device.h>
#include <linux/interrupt.h>
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index d66d256551fb..e5eae751aa1b 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -161,31 +161,19 @@ static int wm8400_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8400 *wm8400;
- int ret;
wm8400 = devm_kzalloc(&i2c->dev, sizeof(struct wm8400), GFP_KERNEL);
- if (wm8400 == NULL) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!wm8400)
+ return -ENOMEM;
wm8400->regmap = devm_regmap_init_i2c(i2c, &wm8400_regmap_config);
- if (IS_ERR(wm8400->regmap)) {
- ret = PTR_ERR(wm8400->regmap);
- goto err;
- }
+ if (IS_ERR(wm8400->regmap))
+ return PTR_ERR(wm8400->regmap);
wm8400->dev = &i2c->dev;
i2c_set_clientdata(i2c, wm8400);
- ret = wm8400_init(wm8400, dev_get_platdata(&i2c->dev));
- if (ret != 0)
- goto err;
-
- return 0;
-
-err:
- return ret;
+ return wm8400_init(wm8400, dev_get_platdata(&i2c->dev));
}
static int wm8400_i2c_remove(struct i2c_client *i2c)
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6cb388e8fb7d..1cb74085e410 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -235,7 +235,7 @@ config SGI_XP
config CS5535_MFGPT
tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support"
- depends on PCI && X86 && MFD_CS5535
+ depends on MFD_CS5535
default n
help
This driver provides access to MFGPT functionality for other
@@ -526,4 +526,5 @@ source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
+source "drivers/misc/echo/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 99b9424ce31d..7eb4b69580c0 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -54,3 +54,4 @@ obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
+obj-$(CONFIG_ECHO) += echo/
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index d3eee113baeb..a43053daad0e 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -72,7 +72,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index 0c6e037153d2..c6cc3dc8ae1f 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 5be808406edc..22de13727641 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -150,6 +150,12 @@ static int ssc_probe(struct platform_device *pdev)
return -ENODEV;
ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat;
+ if (pdev->dev.of_node) {
+ struct device_node *np = pdev->dev.of_node;
+ ssc->clk_from_rk_pin =
+ of_property_read_bool(np, "atmel,clk-from-rk-pin");
+ }
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ssc->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(ssc->regs))
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 820e53d0048f..9b313f7810f5 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -47,7 +47,6 @@
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/of.h>
#include "bmp085.h"
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 9e2b985293fc..14d90eae605b 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -101,7 +101,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kref.h>
#include <linux/io.h>
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 154b02e5094f..6a672f9ef522 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -32,7 +32,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/string.h>
#include <linux/list.h>
diff --git a/drivers/staging/echo/Kconfig b/drivers/misc/echo/Kconfig
index f1d41ea9cd48..f1d41ea9cd48 100644
--- a/drivers/staging/echo/Kconfig
+++ b/drivers/misc/echo/Kconfig
diff --git a/drivers/staging/echo/Makefile b/drivers/misc/echo/Makefile
index 7d4caac12a8d..7d4caac12a8d 100644
--- a/drivers/staging/echo/Makefile
+++ b/drivers/misc/echo/Makefile
diff --git a/drivers/staging/echo/echo.c b/drivers/misc/echo/echo.c
index 9597e9523cac..9597e9523cac 100644
--- a/drivers/staging/echo/echo.c
+++ b/drivers/misc/echo/echo.c
diff --git a/drivers/staging/echo/echo.h b/drivers/misc/echo/echo.h
index 9b08c63e6369..9b08c63e6369 100644
--- a/drivers/staging/echo/echo.h
+++ b/drivers/misc/echo/echo.h
diff --git a/drivers/staging/echo/fir.h b/drivers/misc/echo/fir.h
index 7b9fabf1fea5..7b9fabf1fea5 100644
--- a/drivers/staging/echo/fir.h
+++ b/drivers/misc/echo/fir.h
diff --git a/drivers/staging/echo/oslec.h b/drivers/misc/echo/oslec.h
index f4175360ce27..f4175360ce27 100644
--- a/drivers/staging/echo/oslec.h
+++ b/drivers/misc/echo/oslec.h
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 4f3bca1003a1..634f72929e12 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index f0fa4e8ca124..33f8673d23a6 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -17,7 +17,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 78e55b501c94..9ebeacdb8ec4 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index e36157d5d3ab..580ff9df5529 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -27,7 +27,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
index 9c34e5704304..3f2b625b2032 100644
--- a/drivers/misc/eeprom/sunxi_sid.c
+++ b/drivers/misc/eeprom/sunxi_sid.c
@@ -21,7 +21,6 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
@@ -96,7 +95,7 @@ static int sunxi_sid_remove(struct platform_device *pdev)
}
static const struct of_device_id sunxi_sid_of_match[] = {
- { .compatible = "allwinner,sun4i-sid", .data = (void *)16},
+ { .compatible = "allwinner,sun4i-a10-sid", .data = (void *)16},
{ .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512},
{/* sentinel */},
};
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index 3bfdc07a7248..50d2096ea1c7 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 170bd3daf336..90520d76633f 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index e3183f26216b..12c30b486b27 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -26,7 +26,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index b7f84dacf822..4a9c50a43afb 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -23,7 +23,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 61fbe6acabef..0a1565e63c71 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/spi/spi.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 036effe9a795..3ef4627f9cb1 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -23,7 +23,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/types.h>
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 7c97550240f1..d324f8a97b88 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index 9aa2bd2a71ae..bd06d0cfac45 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/input.h>
#include <linux/interrupt.h>
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 49c7a23f02fc..d66a2f24f6b3 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -30,6 +30,7 @@
*
* See Documentation/fault-injection/provoke-crashes.txt for instructions
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/fs.h>
@@ -45,6 +46,7 @@
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
#include <linux/mman.h>
+#include <asm/cacheflush.h>
#ifdef CONFIG_IDE
#include <linux/ide.h>
@@ -101,6 +103,7 @@ enum ctype {
CT_EXEC_USERSPACE,
CT_ACCESS_USERSPACE,
CT_WRITE_RO,
+ CT_WRITE_KERN,
};
static char* cp_name[] = {
@@ -137,6 +140,7 @@ static char* cp_type[] = {
"EXEC_USERSPACE",
"ACCESS_USERSPACE",
"WRITE_RO",
+ "WRITE_KERN",
};
static struct jprobe lkdtm;
@@ -316,6 +320,13 @@ static void do_nothing(void)
return;
}
+/* Must immediately follow do_nothing for size calculuations to work out. */
+static void do_overwritten(void)
+{
+ pr_info("do_overwritten wasn't overwritten!\n");
+ return;
+}
+
static noinline void corrupt_stack(void)
{
/* Use default char array length that triggers stack protection. */
@@ -328,7 +339,12 @@ static void execute_location(void *dst)
{
void (*func)(void) = dst;
+ pr_info("attempting ok execution at %p\n", do_nothing);
+ do_nothing();
+
memcpy(dst, do_nothing, EXEC_SIZE);
+ flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
+ pr_info("attempting bad execution at %p\n", func);
func();
}
@@ -337,8 +353,13 @@ static void execute_user_location(void *dst)
/* Intentionally crossing kernel/user memory boundary. */
void (*func)(void) = dst;
+ pr_info("attempting ok execution at %p\n", do_nothing);
+ do_nothing();
+
if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
return;
+ flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
+ pr_info("attempting bad execution at %p\n", func);
func();
}
@@ -463,8 +484,12 @@ static void lkdtm_do_action(enum ctype which)
}
ptr = (unsigned long *)user_addr;
+
+ pr_info("attempting bad read at %p\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
+
+ pr_info("attempting bad write at %p\n", ptr);
*ptr = tmp;
vm_munmap(user_addr, PAGE_SIZE);
@@ -475,10 +500,28 @@ static void lkdtm_do_action(enum ctype which)
unsigned long *ptr;
ptr = (unsigned long *)&rodata;
+
+ pr_info("attempting bad write at %p\n", ptr);
*ptr ^= 0xabcd1234;
break;
}
+ case CT_WRITE_KERN: {
+ size_t size;
+ unsigned char *ptr;
+
+ size = (unsigned long)do_overwritten -
+ (unsigned long)do_nothing;
+ ptr = (unsigned char *)do_overwritten;
+
+ pr_info("attempting bad %zu byte write at %p\n", size, ptr);
+ memcpy(ptr, (unsigned char *)do_nothing, size);
+ flush_icache_range((unsigned long)ptr,
+ (unsigned long)(ptr + size));
+
+ do_overwritten();
+ break;
+ }
case CT_NONE:
default:
break;
@@ -493,8 +536,8 @@ static void lkdtm_handler(void)
spin_lock_irqsave(&count_lock, flags);
count--;
- printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n",
- cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
+ pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
+ cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
if (count == 0) {
do_it = true;
@@ -551,18 +594,18 @@ static int lkdtm_register_cpoint(enum cname which)
lkdtm.kp.symbol_name = "generic_ide_ioctl";
lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
#else
- printk(KERN_INFO "lkdtm: Crash point not available\n");
+ pr_info("Crash point not available\n");
return -EINVAL;
#endif
break;
default:
- printk(KERN_INFO "lkdtm: Invalid Crash Point\n");
+ pr_info("Invalid Crash Point\n");
return -EINVAL;
}
cpoint = which;
if ((ret = register_jprobe(&lkdtm)) < 0) {
- printk(KERN_INFO "lkdtm: Couldn't register jprobe\n");
+ pr_info("Couldn't register jprobe\n");
cpoint = CN_INVALID;
}
@@ -709,8 +752,7 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
if (type == CT_NONE)
return -EINVAL;
- printk(KERN_INFO "lkdtm: Performing direct entry %s\n",
- cp_type_to_str(type));
+ pr_info("Performing direct entry %s\n", cp_type_to_str(type));
lkdtm_do_action(type);
*off += count;
@@ -772,7 +814,7 @@ static int __init lkdtm_module_init(void)
/* Register debugfs interface */
lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
if (!lkdtm_debugfs_root) {
- printk(KERN_ERR "lkdtm: creating root dir failed\n");
+ pr_err("creating root dir failed\n");
return -ENODEV;
}
@@ -787,28 +829,26 @@ static int __init lkdtm_module_init(void)
de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
NULL, &cur->fops);
if (de == NULL) {
- printk(KERN_ERR "lkdtm: could not create %s\n",
- cur->name);
+ pr_err("could not create %s\n", cur->name);
goto out_err;
}
}
if (lkdtm_parse_commandline() == -EINVAL) {
- printk(KERN_INFO "lkdtm: Invalid command\n");
+ pr_info("Invalid command\n");
goto out_err;
}
if (cpoint != CN_INVALID && cptype != CT_NONE) {
ret = lkdtm_register_cpoint(cpoint);
if (ret < 0) {
- printk(KERN_INFO "lkdtm: Invalid crash point %d\n",
- cpoint);
+ pr_info("Invalid crash point %d\n", cpoint);
goto out_err;
}
- printk(KERN_INFO "lkdtm: Crash point %s of type %s registered\n",
- cpoint_name, cpoint_type);
+ pr_info("Crash point %s of type %s registered\n",
+ cpoint_name, cpoint_type);
} else {
- printk(KERN_INFO "lkdtm: No crash points registered, enable through debugfs\n");
+ pr_info("No crash points registered, enable through debugfs\n");
}
return 0;
@@ -823,7 +863,7 @@ static void __exit lkdtm_module_exit(void)
debugfs_remove_recursive(lkdtm_debugfs_root);
unregister_jprobe(&lkdtm);
- printk(KERN_INFO "lkdtm: Crash point unregistered\n");
+ pr_info("Crash point unregistered\n");
}
module_init(lkdtm_module_init);
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index c76fa31e9bf6..d23384dde73b 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -34,3 +34,12 @@ config INTEL_MEI_ME
82Q33 Express
82X38/X48 Express
+config INTEL_MEI_TXE
+ tristate "Intel Trusted Execution Environment with ME Interface"
+ select INTEL_MEI
+ depends on X86 && PCI && WATCHDOG_CORE
+ help
+ MEI Support for Trusted Execution Environment device on Intel SoCs
+
+ Supported SoCs:
+ Intel Bay Trail
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 08698a466268..8ebc6cda1373 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -1,6 +1,6 @@
#
# Makefile - Intel Management Engine Interface (Intel MEI) Linux driver
-# Copyright (c) 2010-2011, Intel Corporation.
+# Copyright (c) 2010-2014, Intel Corporation.
#
obj-$(CONFIG_INTEL_MEI) += mei.o
mei-objs := init.o
@@ -17,3 +17,7 @@ mei-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o
mei-me-objs := pci-me.o
mei-me-objs += hw-me.o
+
+obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o
+mei-txe-objs := pci-txe.o
+mei-txe-objs += hw-txe.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 2fad84432829..b8deb3455480 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -21,7 +21,6 @@
#include <linux/fcntl.h>
#include <linux/aio.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/cdev.h>
#include <linux/list.h>
@@ -35,7 +34,6 @@
#include "mei_dev.h"
#include "hbm.h"
-#include "hw-me.h"
#include "client.h"
const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,
@@ -79,10 +77,9 @@ int mei_amthif_host_init(struct mei_device *dev)
i = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
if (i < 0) {
- ret = i;
dev_info(&dev->pdev->dev,
- "amthif: failed to find the client %d\n", ret);
- return ret;
+ "amthif: failed to find the client %d\n", i);
+ return -ENOTTY;
}
cl->me_client_id = dev->me_clients[i].client_id;
@@ -116,14 +113,11 @@ int mei_amthif_host_init(struct mei_device *dev)
cl->state = MEI_FILE_CONNECTING;
- if (mei_hbm_cl_connect_req(dev, cl)) {
- dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n");
- cl->state = MEI_FILE_DISCONNECTED;
- cl->host_client_id = 0;
- } else {
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- }
- return 0;
+ ret = mei_cl_connect(cl, NULL);
+
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+
+ return ret;
}
/**
@@ -137,14 +131,12 @@ int mei_amthif_host_init(struct mei_device *dev)
struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
struct file *file)
{
- struct mei_cl_cb *pos = NULL;
- struct mei_cl_cb *next = NULL;
+ struct mei_cl_cb *cb;
- list_for_each_entry_safe(pos, next,
- &dev->amthif_rd_complete_list.list, list) {
- if (pos->cl && pos->cl == &dev->iamthif_cl &&
- pos->file_object == file)
- return pos;
+ list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list) {
+ if (cb->cl && cb->cl == &dev->iamthif_cl &&
+ cb->file_object == file)
+ return cb;
}
return NULL;
}
@@ -180,14 +172,13 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
/* Only possible if we are in timeout */
if (!cl || cl != &dev->iamthif_cl) {
dev_dbg(&dev->pdev->dev, "bad file ext.\n");
- return -ETIMEDOUT;
+ return -ETIME;
}
i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
-
if (i < 0) {
dev_dbg(&dev->pdev->dev, "amthif client not found.\n");
- return -ENODEV;
+ return -ENOTTY;
}
dev_dbg(&dev->pdev->dev, "checking amthif data\n");
cb = mei_amthif_find_read_list_entry(dev, file);
@@ -228,7 +219,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
dev_dbg(&dev->pdev->dev, "amthif Time out\n");
/* 15 sec for the message has expired */
list_del(&cb->list);
- rets = -ETIMEDOUT;
+ rets = -ETIME;
goto free;
}
}
@@ -253,9 +244,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
* the buf_idx may point beyond */
length = min_t(size_t, length, (cb->buf_idx - *offset));
- if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length))
+ if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
+ dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
- else {
+ } else {
rets = length;
if ((*offset + length) < cb->buf_idx) {
*offset += length;
@@ -302,9 +294,8 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
if (ret < 0)
return ret;
- if (ret && dev->hbuf_is_ready) {
+ if (ret && mei_hbuf_acquire(dev)) {
ret = 0;
- dev->hbuf_is_ready = false;
if (cb->request_buffer.size > mei_hbuf_max_len(dev)) {
mei_hdr.length = mei_hbuf_max_len(dev);
mei_hdr.msg_complete = 0;
@@ -336,10 +327,6 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
list_add_tail(&cb->list, &dev->write_list.list);
}
} else {
- if (!dev->hbuf_is_ready)
- dev_dbg(&dev->pdev->dev, "host buffer is not empty");
-
- dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n");
list_add_tail(&cb->list, &dev->write_list.list);
}
return 0;
@@ -365,7 +352,7 @@ int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb)
if (ret)
return ret;
- cb->fop_type = MEI_FOP_IOCTL;
+ cb->fop_type = MEI_FOP_WRITE;
if (!list_empty(&dev->amthif_cmd_list.list) ||
dev->iamthif_state != MEI_IAMTHIF_IDLE) {
@@ -447,23 +434,23 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
/**
- * mei_amthif_irq_write_completed - processes completed iamthif operation.
+ * mei_amthif_irq_write - write iamthif command in irq thread context.
*
* @dev: the device structure.
- * @slots: free slots.
* @cb_pos: callback block.
* @cl: private data of the file object.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
-int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list)
+int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
{
struct mei_device *dev = cl->dev;
struct mei_msg_hdr mei_hdr;
size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
u32 msg_slots = mei_data2slots(len);
+ int slots;
int rets;
rets = mei_cl_flow_ctrl_creds(cl);
@@ -480,13 +467,15 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
mei_hdr.reserved = 0;
mei_hdr.internal = 0;
- if (*slots >= msg_slots) {
+ slots = mei_hbuf_empty_slots(dev);
+
+ if (slots >= msg_slots) {
mei_hdr.length = len;
mei_hdr.msg_complete = 1;
/* Split the message only if we can write the whole host buffer */
- } else if (*slots == dev->hbuf_depth) {
- msg_slots = *slots;
- len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
+ } else if (slots == dev->hbuf_depth) {
+ msg_slots = slots;
+ len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
mei_hdr.length = len;
mei_hdr.msg_complete = 0;
} else {
@@ -496,7 +485,6 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
- *slots -= msg_slots;
rets = mei_write_message(dev, &mei_hdr,
dev->iamthif_msg_buf + dev->iamthif_msg_buf_index);
if (rets) {
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 4bc7d620d695..ddc5ac92a200 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -26,7 +26,6 @@
#include <linux/mei_cl_bus.h>
#include "mei_dev.h"
-#include "hw-me.h"
#include "client.h"
#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
@@ -145,9 +144,9 @@ static struct device_type mei_cl_device_type = {
static struct mei_cl *mei_bus_find_mei_cl_by_uuid(struct mei_device *dev,
uuid_le uuid)
{
- struct mei_cl *cl, *next;
+ struct mei_cl *cl;
- list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
+ list_for_each_entry(cl, &dev->device_list, device_link) {
if (!uuid_le_cmp(uuid, cl->device_uuid))
return cl;
}
@@ -524,6 +523,22 @@ void mei_cl_bus_rx_event(struct mei_cl *cl)
schedule_work(&device->event_work);
}
+void mei_cl_bus_remove_devices(struct mei_device *dev)
+{
+ struct mei_cl *cl, *next;
+
+ mutex_lock(&dev->device_lock);
+ list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
+ if (cl->device)
+ mei_cl_remove_device(cl->device);
+
+ list_del(&cl->device_link);
+ mei_cl_unlink(cl);
+ kfree(cl);
+ }
+ mutex_unlock(&dev->device_lock);
+}
+
int __init mei_cl_bus_init(void)
{
return bus_register(&mei_cl_bus_type);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 89a557972d1b..8c078b808cd3 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -29,20 +29,21 @@
* mei_me_cl_by_uuid - locate index of me client
*
* @dev: mei device
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
* returns me client index or -ENOENT if not found
*/
int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
{
- int i, res = -ENOENT;
+ int i;
for (i = 0; i < dev->me_clients_num; ++i)
if (uuid_le_cmp(*uuid,
- dev->me_clients[i].props.protocol_name) == 0) {
- res = i;
- break;
- }
+ dev->me_clients[i].props.protocol_name) == 0)
+ return i;
- return res;
+ return -ENOENT;
}
@@ -60,37 +61,79 @@ int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
{
int i;
+
for (i = 0; i < dev->me_clients_num; i++)
if (dev->me_clients[i].client_id == client_id)
- break;
- if (WARN_ON(dev->me_clients[i].client_id != client_id))
- return -ENOENT;
+ return i;
- if (i == dev->me_clients_num)
- return -ENOENT;
-
- return i;
+ return -ENOENT;
}
/**
- * mei_io_list_flush - removes list entry belonging to cl.
+ * mei_cl_cmp_id - tells if the clients are the same
*
- * @list: An instance of our list structure
- * @cl: host client
+ * @cl1: host client 1
+ * @cl2: host client 2
+ *
+ * returns true - if the clients has same host and me ids
+ * false - otherwise
+ */
+static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
+ const struct mei_cl *cl2)
+{
+ return cl1 && cl2 &&
+ (cl1->host_client_id == cl2->host_client_id) &&
+ (cl1->me_client_id == cl2->me_client_id);
+}
+
+/**
+ * mei_io_list_flush - removes cbs belonging to cl.
+ *
+ * @list: an instance of our list structure
+ * @cl: host client, can be NULL for flushing the whole list
+ * @free: whether to free the cbs
*/
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
+static void __mei_io_list_flush(struct mei_cl_cb *list,
+ struct mei_cl *cl, bool free)
{
struct mei_cl_cb *cb;
struct mei_cl_cb *next;
+ /* enable removing everything if no cl is specified */
list_for_each_entry_safe(cb, next, &list->list, list) {
- if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
+ if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
list_del(&cb->list);
+ if (free)
+ mei_io_cb_free(cb);
+ }
}
}
/**
+ * mei_io_list_flush - removes list entry belonging to cl.
+ *
+ * @list: An instance of our list structure
+ * @cl: host client
+ */
+static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
+{
+ __mei_io_list_flush(list, cl, false);
+}
+
+
+/**
+ * mei_io_list_free - removes cb belonging to cl and free them
+ *
+ * @list: An instance of our list structure
+ * @cl: host client
+ */
+static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
+{
+ __mei_io_list_flush(list, cl, true);
+}
+
+/**
* mei_io_cb_free - free mei_cb_private related memory
*
* @cb: mei callback struct
@@ -196,8 +239,8 @@ int mei_cl_flush_queues(struct mei_cl *cl)
cl_dbg(dev, cl, "remove list entry belonging to cl\n");
mei_io_list_flush(&cl->dev->read_list, cl);
- mei_io_list_flush(&cl->dev->write_list, cl);
- mei_io_list_flush(&cl->dev->write_waiting_list, cl);
+ mei_io_list_free(&cl->dev->write_list, cl);
+ mei_io_list_free(&cl->dev->write_waiting_list, cl);
mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
@@ -254,10 +297,9 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
- struct mei_cl_cb *cb = NULL;
- struct mei_cl_cb *next = NULL;
+ struct mei_cl_cb *cb;
- list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
+ list_for_each_entry(cb, &dev->read_list.list, list)
if (mei_cl_cmp_id(cl, cb->cl))
return cb;
return NULL;
@@ -375,6 +417,23 @@ void mei_host_client_init(struct work_struct *work)
mutex_unlock(&dev->device_lock);
}
+/**
+ * mei_hbuf_acquire: try to acquire host buffer
+ *
+ * @dev: the device structure
+ * returns true if host buffer was acquired
+ */
+bool mei_hbuf_acquire(struct mei_device *dev)
+{
+ if (!dev->hbuf_is_ready) {
+ dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
+ return false;
+ }
+
+ dev->hbuf_is_ready = false;
+
+ return true;
+}
/**
* mei_cl_disconnect - disconnect host client from the me one
@@ -406,8 +465,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
return -ENOMEM;
cb->fop_type = MEI_FOP_CLOSE;
- if (dev->hbuf_is_ready) {
- dev->hbuf_is_ready = false;
+ if (mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_disconnect_req(dev, cl)) {
rets = -ENODEV;
cl_err(dev, cl, "failed to disconnect.\n");
@@ -461,17 +519,17 @@ free:
bool mei_cl_is_other_connecting(struct mei_cl *cl)
{
struct mei_device *dev;
- struct mei_cl *pos;
- struct mei_cl *next;
+ struct mei_cl *ocl; /* the other client */
if (WARN_ON(!cl || !cl->dev))
return false;
dev = cl->dev;
- list_for_each_entry_safe(pos, next, &dev->file_list, link) {
- if ((pos->state == MEI_FILE_CONNECTING) &&
- (pos != cl) && cl->me_client_id == pos->me_client_id)
+ list_for_each_entry(ocl, &dev->file_list, link) {
+ if (ocl->state == MEI_FILE_CONNECTING &&
+ ocl != cl &&
+ cl->me_client_id == ocl->me_client_id)
return true;
}
@@ -505,11 +563,10 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
goto out;
}
- cb->fop_type = MEI_FOP_IOCTL;
-
- if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
- dev->hbuf_is_ready = false;
+ cb->fop_type = MEI_FOP_CONNECT;
+ /* run hbuf acquire last so we don't have to undo */
+ if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_connect_req(dev, cl)) {
rets = -ENODEV;
goto out;
@@ -521,18 +578,19 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
}
mutex_unlock(&dev->device_lock);
- rets = wait_event_timeout(dev->wait_recvd_msg,
- (cl->state == MEI_FILE_CONNECTED ||
- cl->state == MEI_FILE_DISCONNECTED),
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ wait_event_timeout(dev->wait_recvd_msg,
+ (cl->state == MEI_FILE_CONNECTED ||
+ cl->state == MEI_FILE_DISCONNECTED),
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (cl->state != MEI_FILE_CONNECTED) {
- rets = -EFAULT;
+ /* something went really wrong */
+ if (!cl->status)
+ cl->status = -EFAULT;
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
- goto out;
}
rets = cl->status;
@@ -554,7 +612,8 @@ out:
int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
{
struct mei_device *dev;
- int i;
+ struct mei_me_client *me_cl;
+ int id;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
@@ -567,19 +626,19 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
if (cl->mei_flow_ctrl_creds > 0)
return 1;
- for (i = 0; i < dev->me_clients_num; i++) {
- struct mei_me_client *me_cl = &dev->me_clients[i];
- if (me_cl->client_id == cl->me_client_id) {
- if (me_cl->mei_flow_ctrl_creds) {
- if (WARN_ON(me_cl->props.single_recv_buf == 0))
- return -EINVAL;
- return 1;
- } else {
- return 0;
- }
- }
+ id = mei_me_cl_by_id(dev, cl->me_client_id);
+ if (id < 0) {
+ cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
+ return id;
}
- return -ENOENT;
+
+ me_cl = &dev->me_clients[id];
+ if (me_cl->mei_flow_ctrl_creds) {
+ if (WARN_ON(me_cl->props.single_recv_buf == 0))
+ return -EINVAL;
+ return 1;
+ }
+ return 0;
}
/**
@@ -595,32 +654,31 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
{
struct mei_device *dev;
- int i;
+ struct mei_me_client *me_cl;
+ int id;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
- if (!dev->me_clients_num)
- return -ENOENT;
+ id = mei_me_cl_by_id(dev, cl->me_client_id);
+ if (id < 0) {
+ cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
+ return id;
+ }
- for (i = 0; i < dev->me_clients_num; i++) {
- struct mei_me_client *me_cl = &dev->me_clients[i];
- if (me_cl->client_id == cl->me_client_id) {
- if (me_cl->props.single_recv_buf != 0) {
- if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
- return -EINVAL;
- dev->me_clients[i].mei_flow_ctrl_creds--;
- } else {
- if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
- return -EINVAL;
- cl->mei_flow_ctrl_creds--;
- }
- return 0;
- }
+ me_cl = &dev->me_clients[id];
+ if (me_cl->props.single_recv_buf != 0) {
+ if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
+ me_cl->mei_flow_ctrl_creds--;
+ } else {
+ if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
+ cl->mei_flow_ctrl_creds--;
}
- return -ENOENT;
+ return 0;
}
/**
@@ -652,7 +710,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
i = mei_me_cl_by_id(dev, cl->me_client_id);
if (i < 0) {
cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
- return -ENODEV;
+ return -ENOTTY;
}
cb = mei_io_cb_init(cl, NULL);
@@ -666,8 +724,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
goto err;
cb->fop_type = MEI_FOP_READ;
- if (dev->hbuf_is_ready) {
- dev->hbuf_is_ready = false;
+ if (mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_flow_control_req(dev, cl)) {
cl_err(dev, cl, "flow control send failed\n");
rets = -ENODEV;
@@ -687,27 +744,26 @@ err:
}
/**
- * mei_cl_irq_write_complete - write a message to device
+ * mei_cl_irq_write - write a message to device
* from the interrupt thread context
*
* @cl: client
* @cb: callback block.
- * @slots: free slots.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise error.
*/
-int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list)
+int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
{
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr mei_hdr;
size_t len;
u32 msg_slots;
+ int slots;
int rets;
-
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -724,6 +780,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
return 0;
}
+ slots = mei_hbuf_empty_slots(dev);
len = buf->size - cb->buf_idx;
msg_slots = mei_data2slots(len);
@@ -732,13 +789,13 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
mei_hdr.reserved = 0;
mei_hdr.internal = cb->internal;
- if (*slots >= msg_slots) {
+ if (slots >= msg_slots) {
mei_hdr.length = len;
mei_hdr.msg_complete = 1;
/* Split the message only if we can write the whole host buffer */
- } else if (*slots == dev->hbuf_depth) {
- msg_slots = *slots;
- len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
+ } else if (slots == dev->hbuf_depth) {
+ msg_slots = slots;
+ len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
mei_hdr.length = len;
mei_hdr.msg_complete = 0;
} else {
@@ -749,7 +806,6 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
cb->request_buffer.size, cb->buf_idx);
- *slots -= msg_slots;
rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
if (rets) {
cl->status = rets;
@@ -802,21 +858,29 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
cb->fop_type = MEI_FOP_WRITE;
+ cb->buf_idx = 0;
+ cl->writing_state = MEI_IDLE;
+
+ mei_hdr.host_addr = cl->host_client_id;
+ mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.reserved = 0;
+ mei_hdr.msg_complete = 0;
+ mei_hdr.internal = cb->internal;
rets = mei_cl_flow_ctrl_creds(cl);
if (rets < 0)
goto err;
- /* Host buffer is not ready, we queue the request */
- if (rets == 0 || !dev->hbuf_is_ready) {
- cb->buf_idx = 0;
- /* unseting complete will enqueue the cb for write */
- mei_hdr.msg_complete = 0;
+ if (rets == 0) {
+ cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
+ rets = buf->size;
+ goto out;
+ }
+ if (!mei_hbuf_acquire(dev)) {
+ cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
rets = buf->size;
goto out;
}
-
- dev->hbuf_is_ready = false;
/* Check for a maximum length */
if (buf->size > mei_hbuf_max_len(dev)) {
@@ -827,12 +891,6 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
mei_hdr.msg_complete = 1;
}
- mei_hdr.host_addr = cl->host_client_id;
- mei_hdr.me_addr = cl->me_client_id;
- mei_hdr.reserved = 0;
- mei_hdr.internal = cb->internal;
-
-
rets = mei_write_message(dev, &mei_hdr, buf->data);
if (rets)
goto err;
@@ -840,13 +898,12 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
cl->writing_state = MEI_WRITING;
cb->buf_idx = mei_hdr.length;
- rets = buf->size;
out:
if (mei_hdr.msg_complete) {
- if (mei_cl_flow_ctrl_reduce(cl)) {
- rets = -ENODEV;
+ rets = mei_cl_flow_ctrl_reduce(cl);
+ if (rets < 0)
goto err;
- }
+
list_add_tail(&cb->list, &dev->write_waiting_list.list);
} else {
list_add_tail(&cb->list, &dev->write_list.list);
@@ -856,15 +913,18 @@ out:
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
mutex_unlock(&dev->device_lock);
- if (wait_event_interruptible(cl->tx_wait,
- cl->writing_state == MEI_WRITE_COMPLETE)) {
- if (signal_pending(current))
- rets = -EINTR;
- else
- rets = -ERESTARTSYS;
- }
+ rets = wait_event_interruptible(cl->tx_wait,
+ cl->writing_state == MEI_WRITE_COMPLETE);
mutex_lock(&dev->device_lock);
+ /* wait_event_interruptible returns -ERESTARTSYS */
+ if (rets) {
+ if (signal_pending(current))
+ rets = -EINTR;
+ goto err;
+ }
}
+
+ rets = buf->size;
err:
return rets;
}
@@ -905,9 +965,9 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
void mei_cl_all_disconnect(struct mei_device *dev)
{
- struct mei_cl *cl, *next;
+ struct mei_cl *cl;
- list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ list_for_each_entry(cl, &dev->file_list, link) {
cl->state = MEI_FILE_DISCONNECTED;
cl->mei_flow_ctrl_creds = 0;
cl->timer_count = 0;
@@ -922,8 +982,8 @@ void mei_cl_all_disconnect(struct mei_device *dev)
*/
void mei_cl_all_wakeup(struct mei_device *dev)
{
- struct mei_cl *cl, *next;
- list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ struct mei_cl *cl;
+ list_for_each_entry(cl, &dev->file_list, link) {
if (waitqueue_active(&cl->rx_wait)) {
cl_dbg(dev, cl, "Waking up reading client!\n");
wake_up_interruptible(&cl->rx_wait);
@@ -942,20 +1002,8 @@ void mei_cl_all_wakeup(struct mei_device *dev)
*/
void mei_cl_all_write_clear(struct mei_device *dev)
{
- struct mei_cl_cb *cb, *next;
- struct list_head *list;
-
- list = &dev->write_list.list;
- list_for_each_entry_safe(cb, next, list, list) {
- list_del(&cb->list);
- mei_io_cb_free(cb);
- }
-
- list = &dev->write_waiting_list.list;
- list_for_each_entry_safe(cb, next, list, list) {
- list_del(&cb->list);
- mei_io_cb_free(cb);
- }
+ mei_io_list_free(&dev->write_list, NULL);
+ mei_io_list_free(&dev->write_waiting_list, NULL);
}
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index c8396e582f1c..96d5de0389f9 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -45,8 +45,6 @@ static inline void mei_io_list_init(struct mei_cl_cb *list)
{
INIT_LIST_HEAD(&list->list);
}
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
-
/*
* MEI Host Client Functions
*/
@@ -61,22 +59,6 @@ int mei_cl_unlink(struct mei_cl *cl);
int mei_cl_flush_queues(struct mei_cl *cl);
struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
-/**
- * mei_cl_cmp_id - tells if file private data have same id
- *
- * @fe1: private data of 1. file object
- * @fe2: private data of 2. file object
- *
- * returns true - if ids are the same and not NULL
- */
-static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
- const struct mei_cl *cl2)
-{
- return cl1 && cl2 &&
- (cl1->host_client_id == cl2->host_client_id) &&
- (cl1->me_client_id == cl2->me_client_id);
-}
-
int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
@@ -86,15 +68,15 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
*/
static inline bool mei_cl_is_connected(struct mei_cl *cl)
{
- return (cl->dev &&
+ return cl->dev &&
cl->dev->dev_state == MEI_DEV_ENABLED &&
- cl->state == MEI_FILE_CONNECTED);
+ cl->state == MEI_FILE_CONNECTED;
}
static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
{
- return (MEI_FILE_INITIALIZING == cl->state ||
+ return MEI_FILE_INITIALIZING == cl->state ||
MEI_FILE_DISCONNECTED == cl->state ||
- MEI_FILE_DISCONNECTING == cl->state);
+ MEI_FILE_DISCONNECTING == cl->state;
}
bool mei_cl_is_other_connecting(struct mei_cl *cl);
@@ -102,8 +84,8 @@ int mei_cl_disconnect(struct mei_cl *cl);
int mei_cl_connect(struct mei_cl *cl, struct file *file);
int mei_cl_read_start(struct mei_cl *cl, size_t length);
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
-int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list);
+int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list);
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index a3ae154444b2..ced5b777c70f 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -75,6 +75,54 @@ static const struct file_operations mei_dbgfs_fops_meclients = {
.llseek = generic_file_llseek,
};
+static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct mei_device *dev = fp->private_data;
+ struct mei_cl *cl;
+ const size_t bufsz = 1024;
+ char *buf;
+ int i = 0;
+ int pos = 0;
+ int ret;
+
+ if (!dev)
+ return -ENODEV;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " |me|host|state|rd|wr|\n");
+
+ mutex_lock(&dev->device_lock);
+
+ /* if the driver is not enabled the list won't b consitent */
+ if (dev->dev_state != MEI_DEV_ENABLED)
+ goto out;
+
+ list_for_each_entry(cl, &dev->file_list, link) {
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%2d|%2d|%4d|%5d|%2d|%2d|\n",
+ i, cl->me_client_id, cl->host_client_id, cl->state,
+ cl->reading_state, cl->writing_state);
+ i++;
+ }
+out:
+ mutex_unlock(&dev->device_lock);
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations mei_dbgfs_fops_active = {
+ .open = simple_open,
+ .read = mei_dbgfs_read_active,
+ .llseek = generic_file_llseek,
+};
+
static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
@@ -128,6 +176,12 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
dev_err(&dev->pdev->dev, "meclients: registration failed\n");
goto err;
}
+ f = debugfs_create_file("active", S_IRUSR, dir,
+ dev, &mei_dbgfs_fops_active);
+ if (!f) {
+ dev_err(&dev->pdev->dev, "meclients: registration failed\n");
+ goto err;
+ }
f = debugfs_create_file("devstate", S_IRUSR, dir,
dev, &mei_dbgfs_fops_devstate);
if (!f) {
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 28cd74c073b9..4960288e543a 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -21,7 +21,41 @@
#include "mei_dev.h"
#include "hbm.h"
-#include "hw-me.h"
+#include "client.h"
+
+static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status)
+{
+#define MEI_CL_CS(status) case MEI_CL_CONN_##status: return #status
+ switch (status) {
+ MEI_CL_CS(SUCCESS);
+ MEI_CL_CS(NOT_FOUND);
+ MEI_CL_CS(ALREADY_STARTED);
+ MEI_CL_CS(OUT_OF_RESOURCES);
+ MEI_CL_CS(MESSAGE_SMALL);
+ default: return "unknown";
+ }
+#undef MEI_CL_CCS
+}
+
+/**
+ * mei_cl_conn_status_to_errno - convert client connect response
+ * status to error code
+ *
+ * @status: client connect response status
+ *
+ * returns corresponding error code
+ */
+static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status)
+{
+ switch (status) {
+ case MEI_CL_CONN_SUCCESS: return 0;
+ case MEI_CL_CONN_NOT_FOUND: return -ENOTTY;
+ case MEI_CL_CONN_ALREADY_STARTED: return -EBUSY;
+ case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY;
+ case MEI_CL_CONN_MESSAGE_SMALL: return -EINVAL;
+ default: return -EINVAL;
+ }
+}
/**
* mei_hbm_me_cl_allocate - allocates storage for me clients
@@ -100,33 +134,6 @@ bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
/**
- * is_treat_specially_client - checks if the message belongs
- * to the file private data.
- *
- * @cl: private data of the file object
- * @rs: connect response bus message
- *
- */
-static bool is_treat_specially_client(struct mei_cl *cl,
- struct hbm_client_connect_response *rs)
-{
- if (mei_hbm_cl_addr_equal(cl, rs)) {
- if (!rs->status) {
- cl->state = MEI_FILE_CONNECTED;
- cl->status = 0;
-
- } else {
- cl->state = MEI_FILE_DISCONNECTED;
- cl->status = -ENODEV;
- }
- cl->timer_count = 0;
-
- return true;
- }
- return false;
-}
-
-/**
* mei_hbm_idle - set hbm to idle state
*
* @dev: the device structure
@@ -147,13 +154,13 @@ int mei_hbm_start_wait(struct mei_device *dev)
ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
dev->hbm_state == MEI_HBM_IDLE ||
dev->hbm_state >= MEI_HBM_STARTED,
- mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
+ mei_secs_to_jiffies(MEI_HBM_TIMEOUT));
mutex_lock(&dev->device_lock);
if (ret <= 0 && (dev->hbm_state <= MEI_HBM_START)) {
dev->hbm_state = MEI_HBM_IDLE;
dev_err(&dev->pdev->dev, "waiting for mei start failed\n");
- return -ETIMEDOUT;
+ return -ETIME;
}
return 0;
}
@@ -283,17 +290,18 @@ static int mei_hbm_prop_req(struct mei_device *dev)
}
/**
- * mei_hbm_stop_req_prepare - prepare stop request message
+ * mei_hbm_stop_req - send stop request message
*
* @dev - mei device
- * @mei_hdr - mei message header
- * @data - hbm message body buffer
+ * @cl: client info
+ *
+ * This function returns -EIO on write failure
*/
-static void mei_hbm_stop_req_prepare(struct mei_device *dev,
- struct mei_msg_hdr *mei_hdr, unsigned char *data)
+static int mei_hbm_stop_req(struct mei_device *dev)
{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_host_stop_request *req =
- (struct hbm_host_stop_request *)data;
+ (struct hbm_host_stop_request *)dev->wr_msg.data;
const size_t len = sizeof(struct hbm_host_stop_request);
mei_hbm_hdr(mei_hdr, len);
@@ -301,6 +309,8 @@ static void mei_hbm_stop_req_prepare(struct mei_device *dev,
memset(req, 0, len);
req->hbm_cmd = HOST_STOP_REQ_CMD;
req->reason = DRIVER_STOP_REQUEST;
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
}
/**
@@ -319,8 +329,7 @@ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
mei_hbm_hdr(mei_hdr, len);
mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len);
- dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
- cl->host_client_id, cl->me_client_id);
+ cl_dbg(dev, cl, "sending flow control\n");
return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
}
@@ -330,27 +339,34 @@ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
*
* @dev: the device structure
* @flow: flow control.
+ *
+ * return 0 on success, < 0 otherwise
*/
-static void mei_hbm_add_single_flow_creds(struct mei_device *dev,
+static int mei_hbm_add_single_flow_creds(struct mei_device *dev,
struct hbm_flow_control *flow)
{
- struct mei_me_client *client;
- int i;
-
- for (i = 0; i < dev->me_clients_num; i++) {
- client = &dev->me_clients[i];
- if (client && flow->me_addr == client->client_id) {
- if (client->props.single_recv_buf) {
- client->mei_flow_ctrl_creds++;
- dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
- flow->me_addr);
- dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
- client->mei_flow_ctrl_creds);
- } else {
- BUG(); /* error in flow control */
- }
- }
+ struct mei_me_client *me_cl;
+ int id;
+
+ id = mei_me_cl_by_id(dev, flow->me_addr);
+ if (id < 0) {
+ dev_err(&dev->pdev->dev, "no such me client %d\n",
+ flow->me_addr);
+ return id;
}
+
+ me_cl = &dev->me_clients[id];
+ if (me_cl->props.single_recv_buf) {
+ me_cl->mei_flow_ctrl_creds++;
+ dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
+ flow->me_addr);
+ dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
+ me_cl->mei_flow_ctrl_creds);
+ } else {
+ BUG(); /* error in flow control */
+ }
+
+ return 0;
}
/**
@@ -362,8 +378,7 @@ static void mei_hbm_add_single_flow_creds(struct mei_device *dev,
static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
struct hbm_flow_control *flow_control)
{
- struct mei_cl *cl = NULL;
- struct mei_cl *next = NULL;
+ struct mei_cl *cl;
if (!flow_control->host_addr) {
/* single receive buffer */
@@ -372,7 +387,7 @@ static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
}
/* normal connection */
- list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ list_for_each_entry(cl, &dev->file_list, link) {
if (mei_hbm_cl_addr_equal(cl, flow_control)) {
cl->mei_flow_ctrl_creds++;
dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
@@ -405,6 +420,25 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
}
/**
+ * mei_hbm_cl_disconnect_rsp - sends disconnect respose to the FW
+ *
+ * @dev: the device structure
+ * @cl: a client to disconnect from
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ const size_t len = sizeof(struct hbm_client_connect_response);
+
+ mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, dev->wr_msg.data, len);
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
* mei_hbm_cl_disconnect_res - disconnect response from ME
*
* @dev: the device structure
@@ -414,29 +448,23 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev,
struct hbm_client_connect_response *rs)
{
struct mei_cl *cl;
- struct mei_cl_cb *pos = NULL, *next = NULL;
-
- dev_dbg(&dev->pdev->dev,
- "disconnect_response:\n"
- "ME Client = %d\n"
- "Host Client = %d\n"
- "Status = %d\n",
- rs->me_addr,
- rs->host_addr,
- rs->status);
-
- list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
- cl = pos->cl;
-
- if (!cl) {
- list_del(&pos->list);
+ struct mei_cl_cb *cb, *next;
+
+ dev_dbg(&dev->pdev->dev, "hbm: disconnect response cl:host=%02d me=%02d status=%d\n",
+ rs->me_addr, rs->host_addr, rs->status);
+
+ list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) {
+ cl = cb->cl;
+
+ /* this should not happen */
+ if (WARN_ON(!cl)) {
+ list_del(&cb->list);
return;
}
- dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
if (mei_hbm_cl_addr_equal(cl, rs)) {
- list_del(&pos->list);
- if (!rs->status)
+ list_del(&cb->list);
+ if (rs->status == MEI_CL_DISCONN_SUCCESS)
cl->state = MEI_FILE_DISCONNECTED;
cl->status = 0;
@@ -476,46 +504,41 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev,
{
struct mei_cl *cl;
- struct mei_cl_cb *pos = NULL, *next = NULL;
+ struct mei_cl_cb *cb, *next;
- dev_dbg(&dev->pdev->dev,
- "connect_response:\n"
- "ME Client = %d\n"
- "Host Client = %d\n"
- "Status = %d\n",
- rs->me_addr,
- rs->host_addr,
- rs->status);
+ dev_dbg(&dev->pdev->dev, "hbm: connect response cl:host=%02d me=%02d status=%s\n",
+ rs->me_addr, rs->host_addr,
+ mei_cl_conn_status_str(rs->status));
- /* if WD or iamthif client treat specially */
+ cl = NULL;
- if (is_treat_specially_client(&dev->wd_cl, rs)) {
- dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
- mei_watchdog_register(dev);
+ list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) {
- return;
- }
+ cl = cb->cl;
+ /* this should not happen */
+ if (WARN_ON(!cl)) {
+ list_del_init(&cb->list);
+ continue;
+ }
- if (is_treat_specially_client(&dev->iamthif_cl, rs)) {
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- return;
- }
- list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+ if (cb->fop_type != MEI_FOP_CONNECT)
+ continue;
- cl = pos->cl;
- if (!cl) {
- list_del(&pos->list);
- return;
- }
- if (pos->fop_type == MEI_FOP_IOCTL) {
- if (is_treat_specially_client(cl, rs)) {
- list_del(&pos->list);
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
+ if (mei_hbm_cl_addr_equal(cl, rs)) {
+ list_del(&cb->list);
+ break;
}
}
+
+ if (!cl)
+ return;
+
+ cl->timer_count = 0;
+ if (rs->status == MEI_CL_CONN_SUCCESS)
+ cl->state = MEI_FILE_CONNECTED;
+ else
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->status = mei_cl_conn_status_to_errno(rs->status);
}
@@ -525,32 +548,34 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev,
*
* @dev: the device structure.
* @disconnect_req: disconnect request bus message from the me
+ *
+ * returns -ENOMEM on allocation failure
*/
-static void mei_hbm_fw_disconnect_req(struct mei_device *dev,
+static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
struct hbm_client_connect_request *disconnect_req)
{
- struct mei_cl *cl, *next;
- const size_t len = sizeof(struct hbm_client_connect_response);
+ struct mei_cl *cl;
+ struct mei_cl_cb *cb;
- list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ list_for_each_entry(cl, &dev->file_list, link) {
if (mei_hbm_cl_addr_equal(cl, disconnect_req)) {
dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
disconnect_req->host_addr,
disconnect_req->me_addr);
cl->state = MEI_FILE_DISCONNECTED;
cl->timer_count = 0;
- if (cl == &dev->wd_cl)
- dev->wd_pending = false;
- else if (cl == &dev->iamthif_cl)
- dev->iamthif_timer = 0;
-
- /* prepare disconnect response */
- mei_hbm_hdr(&dev->wr_ext_msg.hdr, len);
- mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD,
- dev->wr_ext_msg.data, len);
+
+ cb = mei_io_cb_init(cl, NULL);
+ if (!cb)
+ return -ENOMEM;
+ cb->fop_type = MEI_FOP_DISCONNECT_RSP;
+ cl_dbg(dev, cl, "add disconnect response as first\n");
+ list_add(&cb->list, &dev->ctrl_wr_list.list);
+
break;
}
}
+ return 0;
}
@@ -629,10 +654,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n");
dev->hbm_state = MEI_HBM_STOPPED;
- mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr,
- dev->wr_msg.data);
- if (mei_write_message(dev, &dev->wr_msg.hdr,
- dev->wr_msg.data)) {
+ if (mei_hbm_stop_req(dev)) {
dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n");
return -EIO;
}
@@ -778,10 +800,11 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
case ME_STOP_REQ_CMD:
dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n");
-
dev->hbm_state = MEI_HBM_STOPPED;
- mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr,
- dev->wr_ext_msg.data);
+ if (mei_hbm_stop_req(dev)) {
+ dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n");
+ return -EIO;
+ }
break;
default:
BUG();
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 5f92188a5cd7..20e8782711c0 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -54,6 +54,7 @@ int mei_hbm_start_req(struct mei_device *dev);
int mei_hbm_start_wait(struct mei_device *dev);
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl);
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
bool mei_hbm_version_is_supported(struct mei_device *dev);
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 6f656c053b14..8dbdaaef1af5 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -20,10 +20,10 @@
#include <linux/interrupt.h>
#include "mei_dev.h"
-#include "hw-me.h"
-
#include "hbm.h"
+#include "hw-me.h"
+#include "hw-me-regs.h"
/**
* mei_me_reg_read - Reads 32bit data from the mei device
@@ -240,11 +240,11 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
err = wait_event_interruptible_timeout(dev->wait_hw_ready,
dev->recvd_hw_ready,
- mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
+ mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
mutex_lock(&dev->device_lock);
if (!err && !dev->recvd_hw_ready) {
if (!err)
- err = -ETIMEDOUT;
+ err = -ETIME;
dev_err(&dev->pdev->dev,
"wait hw ready failed. status = %d\n", err);
return err;
@@ -303,7 +303,7 @@ static bool mei_me_hbuf_is_empty(struct mei_device *dev)
*
* @dev: the device structure
*
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
+ * returns -EOVERFLOW if overflow, otherwise empty slots count
*/
static int mei_me_hbuf_empty_slots(struct mei_device *dev)
{
@@ -326,7 +326,7 @@ static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
/**
- * mei_write_message - writes a message to mei device.
+ * mei_me_write_message - writes a message to mei device.
*
* @dev: the device structure
* @header: mei HECI header of message
@@ -354,7 +354,7 @@ static int mei_me_write_message(struct mei_device *dev,
dw_cnt = mei_data2slots(length);
if (empty_slots < 0 || dw_cnt > empty_slots)
- return -EIO;
+ return -EMSGSIZE;
mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
@@ -381,7 +381,7 @@ static int mei_me_write_message(struct mei_device *dev,
*
* @dev: the device structure
*
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
+ * returns -EOVERFLOW if overflow, otherwise filled slots count
*/
static int mei_me_count_full_read_slots(struct mei_device *dev)
{
@@ -505,17 +505,25 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check slots available for reading */
slots = mei_count_full_read_slots(dev);
while (slots > 0) {
- /* we have urgent data to send so break the read */
- if (dev->wr_ext_msg.hdr.length)
- break;
dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots);
rets = mei_irq_read_handler(dev, &complete_list, &slots);
+ /* There is a race between ME write and interrupt delivery:
+ * Not all data is always available immediately after the
+ * interrupt, so try to read again on the next interrupt.
+ */
+ if (rets == -ENODATA)
+ break;
+
if (rets && dev->dev_state != MEI_DEV_RESETTING) {
+ dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n",
+ rets);
schedule_work(&dev->reset_work);
goto end;
}
}
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+
rets = mei_irq_write_handler(dev, &complete_list);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 80bd829fbd9a..893d5119fa9b 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -20,6 +20,7 @@
#define _MEI_INTERFACE_H_
#include <linux/mei.h>
+#include <linux/irqreturn.h>
#include "mei_dev.h"
#include "client.h"
diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h
new file mode 100644
index 000000000000..7283c24c1af1
--- /dev/null
+++ b/drivers/misc/mei/hw-txe-regs.h
@@ -0,0 +1,294 @@
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING
+ *
+ * Contact Information:
+ * Intel Corporation.
+ * linux-mei@linux.intel.com
+ * http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _MEI_HW_TXE_REGS_H_
+#define _MEI_HW_TXE_REGS_H_
+
+#include "hw.h"
+
+#define SEC_ALIVENESS_TIMER_TIMEOUT (5 * MSEC_PER_SEC)
+#define SEC_ALIVENESS_WAIT_TIMEOUT (1 * MSEC_PER_SEC)
+#define SEC_RESET_WAIT_TIMEOUT (1 * MSEC_PER_SEC)
+#define SEC_READY_WAIT_TIMEOUT (5 * MSEC_PER_SEC)
+#define START_MESSAGE_RESPONSE_WAIT_TIMEOUT (5 * MSEC_PER_SEC)
+#define RESET_CANCEL_WAIT_TIMEOUT (1 * MSEC_PER_SEC)
+
+enum {
+ SEC_BAR,
+ BRIDGE_BAR,
+
+ NUM_OF_MEM_BARS
+};
+
+/* SeC FW Status Register
+ *
+ * FW uses this register in order to report its status to host.
+ * This register resides in PCI-E config space.
+ */
+#define PCI_CFG_TXE_FW_STS0 0x40
+# define PCI_CFG_TXE_FW_STS0_WRK_ST_MSK 0x0000000F
+# define PCI_CFG_TXE_FW_STS0_OP_ST_MSK 0x000001C0
+# define PCI_CFG_TXE_FW_STS0_FW_INIT_CMPLT 0x00000200
+# define PCI_CFG_TXE_FW_STS0_ERR_CODE_MSK 0x0000F000
+# define PCI_CFG_TXE_FW_STS0_OP_MODE_MSK 0x000F0000
+# define PCI_CFG_TXE_FW_STS0_RST_CNT_MSK 0x00F00000
+
+
+#define IPC_BASE_ADDR 0x80400 /* SeC IPC Base Address */
+
+/* IPC Input Doorbell Register */
+#define SEC_IPC_INPUT_DOORBELL_REG (0x0000 + IPC_BASE_ADDR)
+
+/* IPC Input Status Register
+ * This register indicates whether or not processing of
+ * the most recent command has been completed by the SEC
+ * New commands and payloads should not be written by the Host
+ * until this indicates that the previous command has been processed.
+ */
+#define SEC_IPC_INPUT_STATUS_REG (0x0008 + IPC_BASE_ADDR)
+# define SEC_IPC_INPUT_STATUS_RDY BIT(0)
+
+/* IPC Host Interrupt Status Register */
+#define SEC_IPC_HOST_INT_STATUS_REG (0x0010 + IPC_BASE_ADDR)
+#define SEC_IPC_HOST_INT_STATUS_OUT_DB BIT(0)
+#define SEC_IPC_HOST_INT_STATUS_IN_RDY BIT(1)
+#define SEC_IPC_HOST_INT_STATUS_HDCP_M0_RCVD BIT(5)
+#define SEC_IPC_HOST_INT_STATUS_ILL_MEM_ACCESS BIT(17)
+#define SEC_IPC_HOST_INT_STATUS_AES_HKEY_ERR BIT(18)
+#define SEC_IPC_HOST_INT_STATUS_DES_HKEY_ERR BIT(19)
+#define SEC_IPC_HOST_INT_STATUS_TMRMTB_OVERFLOW BIT(21)
+
+/* Convenient mask for pending interrupts */
+#define SEC_IPC_HOST_INT_STATUS_PENDING \
+ (SEC_IPC_HOST_INT_STATUS_OUT_DB| \
+ SEC_IPC_HOST_INT_STATUS_IN_RDY)
+
+/* IPC Host Interrupt Mask Register */
+#define SEC_IPC_HOST_INT_MASK_REG (0x0014 + IPC_BASE_ADDR)
+
+# define SEC_IPC_HOST_INT_MASK_OUT_DB BIT(0) /* Output Doorbell Int Mask */
+# define SEC_IPC_HOST_INT_MASK_IN_RDY BIT(1) /* Input Ready Int Mask */
+
+/* IPC Input Payload RAM */
+#define SEC_IPC_INPUT_PAYLOAD_REG (0x0100 + IPC_BASE_ADDR)
+/* IPC Shared Payload RAM */
+#define IPC_SHARED_PAYLOAD_REG (0x0200 + IPC_BASE_ADDR)
+
+/* SeC Address Translation Table Entry 2 - Ctrl
+ *
+ * This register resides also in SeC's PCI-E Memory space.
+ */
+#define SATT2_CTRL_REG 0x1040
+# define SATT2_CTRL_VALID_MSK BIT(0)
+# define SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT 8
+# define SATT2_CTRL_BRIDGE_HOST_EN_MSK BIT(12)
+
+/* SATT Table Entry 2 SAP Base Address Register */
+#define SATT2_SAP_BA_REG 0x1044
+/* SATT Table Entry 2 SAP Size Register. */
+#define SATT2_SAP_SIZE_REG 0x1048
+ /* SATT Table Entry 2 SAP Bridge Address - LSB Register */
+#define SATT2_BRG_BA_LSB_REG 0x104C
+
+/* Host High-level Interrupt Status Register */
+#define HHISR_REG 0x2020
+/* Host High-level Interrupt Enable Register
+ *
+ * Resides in PCI memory space. This is the top hierarchy for
+ * interrupts from SeC to host, aggregating both interrupts that
+ * arrive through HICR registers as well as interrupts
+ * that arrive via IPC.
+ */
+#define HHIER_REG 0x2024
+#define IPC_HHIER_SEC BIT(0)
+#define IPC_HHIER_BRIDGE BIT(1)
+#define IPC_HHIER_MSK (IPC_HHIER_SEC | IPC_HHIER_BRIDGE)
+
+/* Host High-level Interrupt Mask Register.
+ *
+ * Resides in PCI memory space.
+ * This is the top hierarchy for masking interrupts from SeC to host.
+ */
+#define HHIMR_REG 0x2028
+#define IPC_HHIMR_SEC BIT(0)
+#define IPC_HHIMR_BRIDGE BIT(1)
+
+/* Host High-level IRQ Status Register */
+#define HHIRQSR_REG 0x202C
+
+/* Host Interrupt Cause Register 0 - SeC IPC Readiness
+ *
+ * This register is both an ICR to Host from PCI Memory Space
+ * and it is also exposed in the SeC memory space.
+ * This register is used by SeC's IPC driver in order
+ * to synchronize with host about IPC interface state.
+ */
+#define HICR_SEC_IPC_READINESS_REG 0x2040
+#define HICR_SEC_IPC_READINESS_HOST_RDY BIT(0)
+#define HICR_SEC_IPC_READINESS_SEC_RDY BIT(1)
+#define HICR_SEC_IPC_READINESS_SYS_RDY \
+ (HICR_SEC_IPC_READINESS_HOST_RDY | \
+ HICR_SEC_IPC_READINESS_SEC_RDY)
+#define HICR_SEC_IPC_READINESS_RDY_CLR BIT(2)
+
+/* Host Interrupt Cause Register 1 - Aliveness Response */
+/* This register is both an ICR to Host from PCI Memory Space
+ * and it is also exposed in the SeC memory space.
+ * The register may be used by SeC to ACK a host request for aliveness.
+ */
+#define HICR_HOST_ALIVENESS_RESP_REG 0x2044
+#define HICR_HOST_ALIVENESS_RESP_ACK BIT(0)
+
+/* Host Interrupt Cause Register 2 - SeC IPC Output Doorbell */
+#define HICR_SEC_IPC_OUTPUT_DOORBELL_REG 0x2048
+
+/* Host Interrupt Status Register.
+ *
+ * Resides in PCI memory space.
+ * This is the main register involved in generating interrupts
+ * from SeC to host via HICRs.
+ * The interrupt generation rules are as follows:
+ * An interrupt will be generated whenever for any i,
+ * there is a transition from a state where at least one of
+ * the following conditions did not hold, to a state where
+ * ALL the following conditions hold:
+ * A) HISR.INT[i]_STS == 1.
+ * B) HIER.INT[i]_EN == 1.
+ */
+#define HISR_REG 0x2060
+#define HISR_INT_0_STS BIT(0)
+#define HISR_INT_1_STS BIT(1)
+#define HISR_INT_2_STS BIT(2)
+#define HISR_INT_3_STS BIT(3)
+#define HISR_INT_4_STS BIT(4)
+#define HISR_INT_5_STS BIT(5)
+#define HISR_INT_6_STS BIT(6)
+#define HISR_INT_7_STS BIT(7)
+#define HISR_INT_STS_MSK \
+ (HISR_INT_0_STS | HISR_INT_1_STS | HISR_INT_2_STS)
+
+/* Host Interrupt Enable Register. Resides in PCI memory space. */
+#define HIER_REG 0x2064
+#define HIER_INT_0_EN BIT(0)
+#define HIER_INT_1_EN BIT(1)
+#define HIER_INT_2_EN BIT(2)
+#define HIER_INT_3_EN BIT(3)
+#define HIER_INT_4_EN BIT(4)
+#define HIER_INT_5_EN BIT(5)
+#define HIER_INT_6_EN BIT(6)
+#define HIER_INT_7_EN BIT(7)
+
+#define HIER_INT_EN_MSK \
+ (HIER_INT_0_EN | HIER_INT_1_EN | HIER_INT_2_EN)
+
+
+/* SEC Memory Space IPC output payload.
+ *
+ * This register is part of the output payload which SEC provides to host.
+ */
+#define BRIDGE_IPC_OUTPUT_PAYLOAD_REG 0x20C0
+
+/* SeC Interrupt Cause Register - Host Aliveness Request
+ * This register is both an ICR to SeC and it is also exposed
+ * in the host-visible PCI memory space.
+ * The register is used by host to request SeC aliveness.
+ */
+#define SICR_HOST_ALIVENESS_REQ_REG 0x214C
+#define SICR_HOST_ALIVENESS_REQ_REQUESTED BIT(0)
+
+
+/* SeC Interrupt Cause Register - Host IPC Readiness
+ *
+ * This register is both an ICR to SeC and it is also exposed
+ * in the host-visible PCI memory space.
+ * This register is used by the host's SeC driver uses in order
+ * to synchronize with SeC about IPC interface state.
+ */
+#define SICR_HOST_IPC_READINESS_REQ_REG 0x2150
+
+
+#define SICR_HOST_IPC_READINESS_HOST_RDY BIT(0)
+#define SICR_HOST_IPC_READINESS_SEC_RDY BIT(1)
+#define SICR_HOST_IPC_READINESS_SYS_RDY \
+ (SICR_HOST_IPC_READINESS_HOST_RDY | \
+ SICR_HOST_IPC_READINESS_SEC_RDY)
+#define SICR_HOST_IPC_READINESS_RDY_CLR BIT(2)
+
+/* SeC Interrupt Cause Register - SeC IPC Output Status
+ *
+ * This register indicates whether or not processing of the most recent
+ * command has been completed by the Host.
+ * New commands and payloads should not be written by SeC until this
+ * register indicates that the previous command has been processed.
+ */
+#define SICR_SEC_IPC_OUTPUT_STATUS_REG 0x2154
+# define SEC_IPC_OUTPUT_STATUS_RDY BIT(0)
+
+
+
+/* MEI IPC Message payload size 64 bytes */
+#define PAYLOAD_SIZE 64
+
+/* MAX size for SATT range 32MB */
+#define SATT_RANGE_MAX (32 << 20)
+
+
+#endif /* _MEI_HW_TXE_REGS_H_ */
+
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
new file mode 100644
index 000000000000..f60182a52f96
--- /dev/null
+++ b/drivers/misc/mei/hw-txe.c
@@ -0,0 +1,1107 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/irqreturn.h>
+
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hw-txe.h"
+#include "client.h"
+#include "hbm.h"
+
+/**
+ * mei_txe_reg_read - Reads 32bit data from the device
+ *
+ * @base_addr: registers base address
+ * @offset: register offset
+ *
+ */
+static inline u32 mei_txe_reg_read(void __iomem *base_addr,
+ unsigned long offset)
+{
+ return ioread32(base_addr + offset);
+}
+
+/**
+ * mei_txe_reg_write - Writes 32bit data to the device
+ *
+ * @base_addr: registers base address
+ * @offset: register offset
+ * @value: the value to write
+ */
+static inline void mei_txe_reg_write(void __iomem *base_addr,
+ unsigned long offset, u32 value)
+{
+ iowrite32(value, base_addr + offset);
+}
+
+/**
+ * mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR
+ *
+ * @dev: the device structure
+ * @offset: register offset
+ *
+ * Doesn't check for aliveness while Reads 32bit data from the SeC BAR
+ */
+static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw,
+ unsigned long offset)
+{
+ return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset);
+}
+
+/**
+ * mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR
+ *
+ * @dev: the device structure
+ * @offset: register offset
+ *
+ * Reads 32bit data from the SeC BAR and shout loud if aliveness is not set
+ */
+static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw,
+ unsigned long offset)
+{
+ WARN(!hw->aliveness, "sec read: aliveness not asserted\n");
+ return mei_txe_sec_reg_read_silent(hw, offset);
+}
+/**
+ * mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR
+ * doesn't check for aliveness
+ *
+ * @dev: the device structure
+ * @offset: register offset
+ * @value: value to write
+ *
+ * Doesn't check for aliveness while writes 32bit data from to the SeC BAR
+ */
+static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw,
+ unsigned long offset, u32 value)
+{
+ mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value);
+}
+
+/**
+ * mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR
+ *
+ * @dev: the device structure
+ * @offset: register offset
+ * @value: value to write
+ *
+ * Writes 32bit data from the SeC BAR and shout loud if aliveness is not set
+ */
+static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw,
+ unsigned long offset, u32 value)
+{
+ WARN(!hw->aliveness, "sec write: aliveness not asserted\n");
+ mei_txe_sec_reg_write_silent(hw, offset, value);
+}
+/**
+ * mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR
+ *
+ * @hw: the device structure
+ * @offset: offset from which to read the data
+ *
+ */
+static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw,
+ unsigned long offset)
+{
+ return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset);
+}
+
+/**
+ * mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR
+ *
+ * @hw: the device structure
+ * @offset: offset from which to write the data
+ * @value: the byte to write
+ */
+static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw,
+ unsigned long offset, u32 value)
+{
+ mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value);
+}
+
+/**
+ * mei_txe_aliveness_set - request for aliveness change
+ *
+ * @dev: the device structure
+ * @req: requested aliveness value
+ *
+ * Request for aliveness change and returns true if the change is
+ * really needed and false if aliveness is already
+ * in the requested state
+ * Requires device lock to be held
+ */
+static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
+{
+
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ bool do_req = hw->aliveness != req;
+
+ dev_dbg(&dev->pdev->dev, "Aliveness current=%d request=%d\n",
+ hw->aliveness, req);
+ if (do_req) {
+ hw->recvd_aliveness = false;
+ mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req);
+ }
+ return do_req;
+}
+
+
+/**
+ * mei_txe_aliveness_req_get - get aliveness requested register value
+ *
+ * @dev: the device structure
+ *
+ * Extract HICR_HOST_ALIVENESS_RESP_ACK bit from
+ * from HICR_HOST_ALIVENESS_REQ register value
+ */
+static u32 mei_txe_aliveness_req_get(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 reg;
+ reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG);
+ return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED;
+}
+
+/**
+ * mei_txe_aliveness_get - get aliveness response register value
+ * @dev: the device structure
+ *
+ * Extract HICR_HOST_ALIVENESS_RESP_ACK bit
+ * from HICR_HOST_ALIVENESS_RESP register value
+ */
+static u32 mei_txe_aliveness_get(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 reg;
+ reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG);
+ return reg & HICR_HOST_ALIVENESS_RESP_ACK;
+}
+
+/**
+ * mei_txe_aliveness_poll - waits for aliveness to settle
+ *
+ * @dev: the device structure
+ * @expected: expected aliveness value
+ *
+ * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
+ * returns > 0 if the expected value was received, -ETIME otherwise
+ */
+static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ int t = 0;
+
+ do {
+ hw->aliveness = mei_txe_aliveness_get(dev);
+ if (hw->aliveness == expected) {
+ dev_dbg(&dev->pdev->dev,
+ "aliveness settled after %d msecs\n", t);
+ return t;
+ }
+ mutex_unlock(&dev->device_lock);
+ msleep(MSEC_PER_SEC / 5);
+ mutex_lock(&dev->device_lock);
+ t += MSEC_PER_SEC / 5;
+ } while (t < SEC_ALIVENESS_WAIT_TIMEOUT);
+
+ dev_err(&dev->pdev->dev, "aliveness timed out\n");
+ return -ETIME;
+}
+
+/**
+ * mei_txe_aliveness_wait - waits for aliveness to settle
+ *
+ * @dev: the device structure
+ * @expected: expected aliveness value
+ *
+ * Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
+ * returns returns 0 on success and < 0 otherwise
+ */
+static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ const unsigned long timeout =
+ msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT);
+ long err;
+ int ret;
+
+ hw->aliveness = mei_txe_aliveness_get(dev);
+ if (hw->aliveness == expected)
+ return 0;
+
+ mutex_unlock(&dev->device_lock);
+ err = wait_event_timeout(hw->wait_aliveness,
+ hw->recvd_aliveness, timeout);
+ mutex_lock(&dev->device_lock);
+
+ hw->aliveness = mei_txe_aliveness_get(dev);
+ ret = hw->aliveness == expected ? 0 : -ETIME;
+
+ if (ret)
+ dev_err(&dev->pdev->dev, "aliveness timed out");
+ else
+ dev_dbg(&dev->pdev->dev, "aliveness settled after %d msecs\n",
+ jiffies_to_msecs(timeout - err));
+ hw->recvd_aliveness = false;
+ return ret;
+}
+
+/**
+ * mei_txe_aliveness_set_sync - sets an wait for aliveness to complete
+ *
+ * @dev: the device structure
+ *
+ * returns returns 0 on success and < 0 otherwise
+ */
+int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
+{
+ if (mei_txe_aliveness_set(dev, req))
+ return mei_txe_aliveness_wait(dev, req);
+ return 0;
+}
+
+/**
+ * mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 hintmsk;
+ /* Enable the SEC_IPC_HOST_INT_MASK_IN_RDY interrupt */
+ hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG);
+ hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY;
+ mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk);
+}
+
+/**
+ * mei_txe_input_doorbell_set
+ * - Sets bit 0 in SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL.
+ * @dev: the device structure
+ */
+static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw)
+{
+ /* Clear the interrupt cause */
+ clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause);
+ mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1);
+}
+
+/**
+ * mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_output_ready_set(struct mei_txe_hw *hw)
+{
+ mei_txe_br_reg_write(hw,
+ SICR_SEC_IPC_OUTPUT_STATUS_REG,
+ SEC_IPC_OUTPUT_STATUS_RDY);
+}
+
+/**
+ * mei_txe_is_input_ready - check if TXE is ready for receiving data
+ *
+ * @dev: the device structure
+ */
+static bool mei_txe_is_input_ready(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 status;
+ status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG);
+ return !!(SEC_IPC_INPUT_STATUS_RDY & status);
+}
+
+/**
+ * mei_txe_intr_clear - clear all interrupts
+ *
+ * @dev: the device structure
+ */
+static inline void mei_txe_intr_clear(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG,
+ SEC_IPC_HOST_INT_STATUS_PENDING);
+ mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK);
+ mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK);
+}
+
+/**
+ * mei_txe_intr_disable - disable all interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_intr_disable(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ mei_txe_br_reg_write(hw, HHIER_REG, 0);
+ mei_txe_br_reg_write(hw, HIER_REG, 0);
+}
+/**
+ * mei_txe_intr_disable - enable all interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_intr_enable(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK);
+ mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK);
+}
+
+/**
+ * mei_txe_pending_interrupts - check if there are pending interrupts
+ * only Aliveness, Input ready, and output doorbell are of relevance
+ *
+ * @dev: the device structure
+ *
+ * Checks if there are pending interrupts
+ * only Aliveness, Readiness, Input ready, and Output doorbell are relevant
+ */
+static bool mei_txe_pending_interrupts(struct mei_device *dev)
+{
+
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ bool ret = (hw->intr_cause & (TXE_INTR_READINESS |
+ TXE_INTR_ALIVENESS |
+ TXE_INTR_IN_READY |
+ TXE_INTR_OUT_DB));
+
+ if (ret) {
+ dev_dbg(&dev->pdev->dev,
+ "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n",
+ !!(hw->intr_cause & TXE_INTR_IN_READY),
+ !!(hw->intr_cause & TXE_INTR_READINESS),
+ !!(hw->intr_cause & TXE_INTR_ALIVENESS),
+ !!(hw->intr_cause & TXE_INTR_OUT_DB));
+ }
+ return ret;
+}
+
+/**
+ * mei_txe_input_payload_write - write a dword to the host buffer
+ * at offset idx
+ *
+ * @dev: the device structure
+ * @idx: index in the host buffer
+ * @value: value
+ */
+static void mei_txe_input_payload_write(struct mei_device *dev,
+ unsigned long idx, u32 value)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG +
+ (idx * sizeof(u32)), value);
+}
+
+/**
+ * mei_txe_out_data_read - read dword from the device buffer
+ * at offset idx
+ *
+ * @dev: the device structure
+ * @idx: index in the device buffer
+ *
+ * returns register value at index
+ */
+static u32 mei_txe_out_data_read(const struct mei_device *dev,
+ unsigned long idx)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ return mei_txe_br_reg_read(hw,
+ BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32)));
+}
+
+/* Readiness */
+
+/**
+ * mei_txe_readiness_set_host_rdy
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_readiness_set_host_rdy(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ mei_txe_br_reg_write(hw,
+ SICR_HOST_IPC_READINESS_REQ_REG,
+ SICR_HOST_IPC_READINESS_HOST_RDY);
+}
+
+/**
+ * mei_txe_readiness_clear
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_readiness_clear(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG,
+ SICR_HOST_IPC_READINESS_RDY_CLR);
+}
+/**
+ * mei_txe_readiness_get - Reads and returns
+ * the HICR_SEC_IPC_READINESS register value
+ *
+ * @dev: the device structure
+ */
+static u32 mei_txe_readiness_get(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
+}
+
+
+/**
+ * mei_txe_readiness_is_sec_rdy - check readiness
+ * for HICR_SEC_IPC_READINESS_SEC_RDY
+ *
+ * @readiness - cached readiness state
+ */
+static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness)
+{
+ return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY);
+}
+
+/**
+ * mei_txe_hw_is_ready - check if the hw is ready
+ *
+ * @dev: the device structure
+ */
+static bool mei_txe_hw_is_ready(struct mei_device *dev)
+{
+ u32 readiness = mei_txe_readiness_get(dev);
+ return mei_txe_readiness_is_sec_rdy(readiness);
+}
+
+/**
+ * mei_txe_host_is_ready - check if the host is ready
+ *
+ * @dev: the device structure
+ */
+static inline bool mei_txe_host_is_ready(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
+ return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY);
+}
+
+/**
+ * mei_txe_readiness_wait - wait till readiness settles
+ *
+ * @dev: the device structure
+ *
+ * returns 0 on success and -ETIME on timeout
+ */
+static int mei_txe_readiness_wait(struct mei_device *dev)
+{
+ if (mei_txe_hw_is_ready(dev))
+ return 0;
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready,
+ msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT));
+ mutex_lock(&dev->device_lock);
+ if (!dev->recvd_hw_ready) {
+ dev_err(&dev->pdev->dev, "wait for readiness failed\n");
+ return -ETIME;
+ }
+
+ dev->recvd_hw_ready = false;
+ return 0;
+}
+
+/**
+ * mei_txe_hw_config - configure hardware at the start of the devices
+ *
+ * @dev: the device structure
+ *
+ * Configure hardware at the start of the device should be done only
+ * once at the device probe time
+ */
+static void mei_txe_hw_config(struct mei_device *dev)
+{
+
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ /* Doesn't change in runtime */
+ dev->hbuf_depth = PAYLOAD_SIZE / 4;
+
+ hw->aliveness = mei_txe_aliveness_get(dev);
+ hw->readiness = mei_txe_readiness_get(dev);
+
+ dev_dbg(&dev->pdev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
+ hw->aliveness, hw->readiness);
+}
+
+
+/**
+ * mei_txe_write - writes a message to device.
+ *
+ * @dev: the device structure
+ * @header: header of message
+ * @buf: message buffer will be written
+ * returns 1 if success, 0 - otherwise.
+ */
+
+static int mei_txe_write(struct mei_device *dev,
+ struct mei_msg_hdr *header, unsigned char *buf)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ unsigned long rem;
+ unsigned long length;
+ int slots = dev->hbuf_depth;
+ u32 *reg_buf = (u32 *)buf;
+ u32 dw_cnt;
+ int i;
+
+ if (WARN_ON(!header || !buf))
+ return -EINVAL;
+
+ length = header->length;
+
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
+
+ dw_cnt = mei_data2slots(length);
+ if (dw_cnt > slots)
+ return -EMSGSIZE;
+
+ if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n"))
+ return -EAGAIN;
+
+ /* Enable Input Ready Interrupt. */
+ mei_txe_input_ready_interrupt_enable(dev);
+
+ if (!mei_txe_is_input_ready(dev)) {
+ dev_err(&dev->pdev->dev, "Input is not ready");
+ return -EAGAIN;
+ }
+
+ mei_txe_input_payload_write(dev, 0, *((u32 *)header));
+
+ for (i = 0; i < length / 4; i++)
+ mei_txe_input_payload_write(dev, i + 1, reg_buf[i]);
+
+ rem = length & 0x3;
+ if (rem > 0) {
+ u32 reg = 0;
+ memcpy(&reg, &buf[length - rem], rem);
+ mei_txe_input_payload_write(dev, i + 1, reg);
+ }
+
+ /* after each write the whole buffer is consumed */
+ hw->slots = 0;
+
+ /* Set Input-Doorbell */
+ mei_txe_input_doorbell_set(hw);
+
+ return 0;
+}
+
+/**
+ * mei_txe_hbuf_max_len - mimics the me hbuf circular buffer
+ *
+ * @dev: the device structure
+ *
+ * returns the PAYLOAD_SIZE - 4
+ */
+static size_t mei_txe_hbuf_max_len(const struct mei_device *dev)
+{
+ return PAYLOAD_SIZE - sizeof(struct mei_msg_hdr);
+}
+
+/**
+ * mei_txe_hbuf_empty_slots - mimics the me hbuf circular buffer
+ *
+ * @dev: the device structure
+ *
+ * returns always hbuf_depth
+ */
+static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ return hw->slots;
+}
+
+/**
+ * mei_txe_count_full_read_slots - mimics the me device circular buffer
+ *
+ * @dev: the device structure
+ *
+ * returns always buffer size in dwords count
+ */
+static int mei_txe_count_full_read_slots(struct mei_device *dev)
+{
+ /* read buffers has static size */
+ return PAYLOAD_SIZE / 4;
+}
+
+/**
+ * mei_txe_read_hdr - read message header which is always in 4 first bytes
+ *
+ * @dev: the device structure
+ *
+ * returns mei message header
+ */
+
+static u32 mei_txe_read_hdr(const struct mei_device *dev)
+{
+ return mei_txe_out_data_read(dev, 0);
+}
+/**
+ * mei_txe_read - reads a message from the txe device.
+ *
+ * @dev: the device structure
+ * @buf: message buffer will be written
+ * @len: message size will be read
+ *
+ * returns -EINVAL on error wrong argument and 0 on success
+ */
+static int mei_txe_read(struct mei_device *dev,
+ unsigned char *buf, unsigned long len)
+{
+
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 i;
+ u32 *reg_buf = (u32 *)buf;
+ u32 rem = len & 0x3;
+
+ if (WARN_ON(!buf || !len))
+ return -EINVAL;
+
+ dev_dbg(&dev->pdev->dev,
+ "buffer-length = %lu buf[0]0x%08X\n",
+ len, mei_txe_out_data_read(dev, 0));
+
+ for (i = 0; i < len / 4; i++) {
+ /* skip header: index starts from 1 */
+ u32 reg = mei_txe_out_data_read(dev, i + 1);
+ dev_dbg(&dev->pdev->dev, "buf[%d] = 0x%08X\n", i, reg);
+ *reg_buf++ = reg;
+ }
+
+ if (rem) {
+ u32 reg = mei_txe_out_data_read(dev, i + 1);
+ memcpy(reg_buf, &reg, rem);
+ }
+
+ mei_txe_output_ready_set(hw);
+ return 0;
+}
+
+/**
+ * mei_txe_hw_reset - resets host and fw.
+ *
+ * @dev: the device structure
+ * @intr_enable: if interrupt should be enabled after reset.
+ *
+ * returns 0 on success and < 0 in case of error
+ */
+static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+
+ u32 aliveness_req;
+ /*
+ * read input doorbell to ensure consistency between Bridge and SeC
+ * return value might be garbage return
+ */
+ (void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG);
+
+ aliveness_req = mei_txe_aliveness_req_get(dev);
+ hw->aliveness = mei_txe_aliveness_get(dev);
+
+ /* Disable interrupts in this stage we will poll */
+ mei_txe_intr_disable(dev);
+
+ /*
+ * If Aliveness Request and Aliveness Response are not equal then
+ * wait for them to be equal
+ * Since we might have interrupts disabled - poll for it
+ */
+ if (aliveness_req != hw->aliveness)
+ if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) {
+ dev_err(&dev->pdev->dev,
+ "wait for aliveness settle failed ... bailing out\n");
+ return -EIO;
+ }
+
+ /*
+ * If Aliveness Request and Aliveness Response are set then clear them
+ */
+ if (aliveness_req) {
+ mei_txe_aliveness_set(dev, 0);
+ if (mei_txe_aliveness_poll(dev, 0) < 0) {
+ dev_err(&dev->pdev->dev,
+ "wait for aliveness failed ... bailing out\n");
+ return -EIO;
+ }
+ }
+
+ /*
+ * Set rediness RDY_CLR bit
+ */
+ mei_txe_readiness_clear(dev);
+
+ return 0;
+}
+
+/**
+ * mei_txe_hw_start - start the hardware after reset
+ *
+ * @dev: the device structure
+ *
+ * returns 0 on success and < 0 in case of error
+ */
+static int mei_txe_hw_start(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ int ret;
+
+ u32 hisr;
+
+ /* bring back interrupts */
+ mei_txe_intr_enable(dev);
+
+ ret = mei_txe_readiness_wait(dev);
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev, "wating for readiness failed\n");
+ return ret;
+ }
+
+ /*
+ * If HISR.INT2_STS interrupt status bit is set then clear it.
+ */
+ hisr = mei_txe_br_reg_read(hw, HISR_REG);
+ if (hisr & HISR_INT_2_STS)
+ mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS);
+
+ /* Clear the interrupt cause of OutputDoorbell */
+ clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause);
+
+ ret = mei_txe_aliveness_set_sync(dev, 1);
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev, "wait for aliveness failed ... bailing out\n");
+ return ret;
+ }
+
+ /* enable input ready interrupts:
+ * SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK
+ */
+ mei_txe_input_ready_interrupt_enable(dev);
+
+
+ /* Set the SICR_SEC_IPC_OUTPUT_STATUS.IPC_OUTPUT_READY bit */
+ mei_txe_output_ready_set(hw);
+
+ /* Set bit SICR_HOST_IPC_READINESS.HOST_RDY
+ */
+ mei_txe_readiness_set_host_rdy(dev);
+
+ return 0;
+}
+
+/**
+ * mei_txe_check_and_ack_intrs - translate multi BAR interrupt into
+ * single bit mask and acknowledge the interrupts
+ *
+ * @dev: the device structure
+ * @do_ack: acknowledge interrupts
+ */
+static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 hisr;
+ u32 hhisr;
+ u32 ipc_isr;
+ u32 aliveness;
+ bool generated;
+
+ /* read interrupt registers */
+ hhisr = mei_txe_br_reg_read(hw, HHISR_REG);
+ generated = (hhisr & IPC_HHIER_MSK);
+ if (!generated)
+ goto out;
+
+ hisr = mei_txe_br_reg_read(hw, HISR_REG);
+
+ aliveness = mei_txe_aliveness_get(dev);
+ if (hhisr & IPC_HHIER_SEC && aliveness)
+ ipc_isr = mei_txe_sec_reg_read_silent(hw,
+ SEC_IPC_HOST_INT_STATUS_REG);
+ else
+ ipc_isr = 0;
+
+ generated = generated ||
+ (hisr & HISR_INT_STS_MSK) ||
+ (ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING);
+
+ if (generated && do_ack) {
+ /* Save the interrupt causes */
+ hw->intr_cause |= hisr & HISR_INT_STS_MSK;
+ if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY)
+ hw->intr_cause |= TXE_INTR_IN_READY;
+
+
+ mei_txe_intr_disable(dev);
+ /* Clear the interrupts in hierarchy:
+ * IPC and Bridge, than the High Level */
+ mei_txe_sec_reg_write_silent(hw,
+ SEC_IPC_HOST_INT_STATUS_REG, ipc_isr);
+ mei_txe_br_reg_write(hw, HISR_REG, hisr);
+ mei_txe_br_reg_write(hw, HHISR_REG, hhisr);
+ }
+
+out:
+ return generated;
+}
+
+/**
+ * mei_txe_irq_quick_handler - The ISR of the MEI device
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ */
+irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = dev_id;
+
+ if (mei_txe_check_and_ack_intrs(dev, true))
+ return IRQ_WAKE_THREAD;
+ return IRQ_NONE;
+}
+
+
+/**
+ * mei_txe_irq_thread_handler - txe interrupt thread
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ *
+ */
+irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = (struct mei_device *) dev_id;
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ struct mei_cl_cb complete_list;
+ s32 slots;
+ int rets = 0;
+
+ dev_dbg(&dev->pdev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
+ mei_txe_br_reg_read(hw, HHISR_REG),
+ mei_txe_br_reg_read(hw, HISR_REG),
+ mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG));
+
+
+ /* initialize our complete list */
+ mutex_lock(&dev->device_lock);
+ mei_io_list_init(&complete_list);
+
+ if (pci_dev_msi_enabled(dev->pdev))
+ mei_txe_check_and_ack_intrs(dev, true);
+
+ /* show irq events */
+ mei_txe_pending_interrupts(dev);
+
+ hw->aliveness = mei_txe_aliveness_get(dev);
+ hw->readiness = mei_txe_readiness_get(dev);
+
+ /* Readiness:
+ * Detection of TXE driver going through reset
+ * or TXE driver resetting the HECI interface.
+ */
+ if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) {
+ dev_dbg(&dev->pdev->dev, "Readiness Interrupt was received...\n");
+
+ /* Check if SeC is going through reset */
+ if (mei_txe_readiness_is_sec_rdy(hw->readiness)) {
+ dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
+ dev->recvd_hw_ready = true;
+ } else {
+ dev->recvd_hw_ready = false;
+ if (dev->dev_state != MEI_DEV_RESETTING) {
+
+ dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n");
+ schedule_work(&dev->reset_work);
+ goto end;
+
+ }
+ }
+ wake_up(&dev->wait_hw_ready);
+ }
+
+ /************************************************************/
+ /* Check interrupt cause:
+ * Aliveness: Detection of SeC acknowledge of host request that
+ * it remain alive or host cancellation of that request.
+ */
+
+ if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) {
+ /* Clear the interrupt cause */
+ dev_dbg(&dev->pdev->dev,
+ "Aliveness Interrupt: Status: %d\n", hw->aliveness);
+ hw->recvd_aliveness = true;
+ if (waitqueue_active(&hw->wait_aliveness))
+ wake_up(&hw->wait_aliveness);
+ }
+
+
+ /* Output Doorbell:
+ * Detection of SeC having sent output to host
+ */
+ slots = mei_count_full_read_slots(dev);
+ if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
+ /* Read from TXE */
+ rets = mei_irq_read_handler(dev, &complete_list, &slots);
+ if (rets && dev->dev_state != MEI_DEV_RESETTING) {
+ dev_err(&dev->pdev->dev,
+ "mei_irq_read_handler ret = %d.\n", rets);
+
+ schedule_work(&dev->reset_work);
+ goto end;
+ }
+ }
+ /* Input Ready: Detection if host can write to SeC */
+ if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) {
+ dev->hbuf_is_ready = true;
+ hw->slots = dev->hbuf_depth;
+ }
+
+ if (hw->aliveness && dev->hbuf_is_ready) {
+ /* get the real register value */
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ rets = mei_irq_write_handler(dev, &complete_list);
+ if (rets && rets != -EMSGSIZE)
+ dev_err(&dev->pdev->dev, "mei_irq_write_handler ret = %d.\n",
+ rets);
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ }
+
+ mei_irq_compl_handler(dev, &complete_list);
+
+end:
+ dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets);
+
+ mutex_unlock(&dev->device_lock);
+
+ mei_enable_interrupts(dev);
+ return IRQ_HANDLED;
+}
+
+static const struct mei_hw_ops mei_txe_hw_ops = {
+
+ .host_is_ready = mei_txe_host_is_ready,
+
+ .hw_is_ready = mei_txe_hw_is_ready,
+ .hw_reset = mei_txe_hw_reset,
+ .hw_config = mei_txe_hw_config,
+ .hw_start = mei_txe_hw_start,
+
+ .intr_clear = mei_txe_intr_clear,
+ .intr_enable = mei_txe_intr_enable,
+ .intr_disable = mei_txe_intr_disable,
+
+ .hbuf_free_slots = mei_txe_hbuf_empty_slots,
+ .hbuf_is_ready = mei_txe_is_input_ready,
+ .hbuf_max_len = mei_txe_hbuf_max_len,
+
+ .write = mei_txe_write,
+
+ .rdbuf_full_slots = mei_txe_count_full_read_slots,
+ .read_hdr = mei_txe_read_hdr,
+
+ .read = mei_txe_read,
+
+};
+
+/**
+ * mei_txe_dev_init - allocates and initializes txe hardware specific structure
+ *
+ * @pdev - pci device
+ * returns struct mei_device * on success or NULL;
+ *
+ */
+struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+ struct mei_txe_hw *hw;
+
+ dev = kzalloc(sizeof(struct mei_device) +
+ sizeof(struct mei_txe_hw), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ mei_device_init(dev);
+
+ hw = to_txe_hw(dev);
+
+ init_waitqueue_head(&hw->wait_aliveness);
+
+ dev->ops = &mei_txe_hw_ops;
+
+ dev->pdev = pdev;
+ return dev;
+}
+
+/**
+ * mei_txe_setup_satt2 - SATT2 configuration for DMA support.
+ *
+ * @dev: the device structure
+ * @addr: physical address start of the range
+ * @range: physical range size
+ */
+int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+
+ u32 lo32 = lower_32_bits(addr);
+ u32 hi32 = upper_32_bits(addr);
+ u32 ctrl;
+
+ /* SATT is limited to 36 Bits */
+ if (hi32 & ~0xF)
+ return -EINVAL;
+
+ /* SATT has to be 16Byte aligned */
+ if (lo32 & 0xF)
+ return -EINVAL;
+
+ /* SATT range has to be 4Bytes aligned */
+ if (range & 0x4)
+ return -EINVAL;
+
+ /* SATT is limited to 32 MB range*/
+ if (range > SATT_RANGE_MAX)
+ return -EINVAL;
+
+ ctrl = SATT2_CTRL_VALID_MSK;
+ ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT;
+
+ mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range);
+ mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32);
+ mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl);
+ dev_dbg(&dev->pdev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n",
+ range, lo32, ctrl);
+
+ return 0;
+}
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h
new file mode 100644
index 000000000000..0812d98633a4
--- /dev/null
+++ b/drivers/misc/mei/hw-txe.h
@@ -0,0 +1,74 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _MEI_HW_TXE_H_
+#define _MEI_HW_TXE_H_
+
+#include <linux/irqreturn.h>
+
+#include "hw.h"
+#include "hw-txe-regs.h"
+
+/* Flatten Hierarchy interrupt cause */
+#define TXE_INTR_READINESS_BIT 0 /* HISR_INT_0_STS */
+#define TXE_INTR_READINESS HISR_INT_0_STS
+#define TXE_INTR_ALIVENESS_BIT 1 /* HISR_INT_1_STS */
+#define TXE_INTR_ALIVENESS HISR_INT_1_STS
+#define TXE_INTR_OUT_DB_BIT 2 /* HISR_INT_2_STS */
+#define TXE_INTR_OUT_DB HISR_INT_2_STS
+#define TXE_INTR_IN_READY_BIT 8 /* beyond HISR */
+#define TXE_INTR_IN_READY BIT(8)
+
+/**
+ * struct mei_txe_hw - txe hardware specifics
+ *
+ * @mem_addr: SeC and BRIDGE bars
+ * @aliveness: aliveness (power gating) state of the hardware
+ * @readiness: readiness state of the hardware
+ * @wait_aliveness: aliveness wait queue
+ * @recvd_aliveness: aliveness interrupt was recived
+ * @intr_cause: translated interrupt cause
+ */
+struct mei_txe_hw {
+ void __iomem *mem_addr[NUM_OF_MEM_BARS];
+ u32 aliveness;
+ u32 readiness;
+ u32 slots;
+
+ wait_queue_head_t wait_aliveness;
+ bool recvd_aliveness;
+
+ unsigned long intr_cause;
+};
+
+#define to_txe_hw(dev) (struct mei_txe_hw *)((dev)->hw)
+
+static inline struct mei_device *hw_txe_to_mei(struct mei_txe_hw *hw)
+{
+ return container_of((void *)hw, struct mei_device, hw);
+}
+
+struct mei_device *mei_txe_dev_init(struct pci_dev *pdev);
+
+irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id);
+irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id);
+
+int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req);
+
+int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range);
+
+
+#endif /* _MEI_HW_TXE_H_ */
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index dd44e33ad2b6..6b476ab49b2e 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -22,7 +22,7 @@
/*
* Timeouts in Seconds
*/
-#define MEI_INTEROP_TIMEOUT 7 /* Timeout on ready message */
+#define MEI_HW_READY_TIMEOUT 2 /* Timeout on ready message */
#define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */
#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
@@ -31,13 +31,13 @@
#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
+#define MEI_HBM_TIMEOUT 1 /* 1 second */
/*
* MEI Version
*/
#define HBM_MINOR_VERSION 0
#define HBM_MAJOR_VERSION 1
-#define HBM_TIMEOUT 1 /* 1 second */
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
@@ -89,19 +89,19 @@ enum mei_stop_reason_types {
* Client Connect Status
* used by hbm_client_connect_response.status
*/
-enum client_connect_status_types {
- CCS_SUCCESS = 0x00,
- CCS_NOT_FOUND = 0x01,
- CCS_ALREADY_STARTED = 0x02,
- CCS_OUT_OF_RESOURCES = 0x03,
- CCS_MESSAGE_SMALL = 0x04
+enum mei_cl_connect_status {
+ MEI_CL_CONN_SUCCESS = 0x00,
+ MEI_CL_CONN_NOT_FOUND = 0x01,
+ MEI_CL_CONN_ALREADY_STARTED = 0x02,
+ MEI_CL_CONN_OUT_OF_RESOURCES = 0x03,
+ MEI_CL_CONN_MESSAGE_SMALL = 0x04
};
/*
* Client Disconnect Status
*/
-enum client_disconnect_status_types {
- CDS_SUCCESS = 0x00
+enum mei_cl_disconnect_status {
+ MEI_CL_DISCONN_SUCCESS = 0x00
};
/*
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index cdd31c2a2a2b..4460975c0eef 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -116,7 +116,6 @@ int mei_reset(struct mei_device *dev)
mei_cl_unlink(&dev->wd_cl);
mei_cl_unlink(&dev->iamthif_cl);
mei_amthif_reset_params(dev);
- memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
}
@@ -126,7 +125,6 @@ int mei_reset(struct mei_device *dev)
if (ret) {
dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret);
- dev->dev_state = MEI_DEV_DISABLED;
return ret;
}
@@ -139,7 +137,6 @@ int mei_reset(struct mei_device *dev)
ret = mei_hw_start(dev);
if (ret) {
dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret);
- dev->dev_state = MEI_DEV_DISABLED;
return ret;
}
@@ -149,7 +146,7 @@ int mei_reset(struct mei_device *dev)
ret = mei_hbm_start_req(dev);
if (ret) {
dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret);
- dev->dev_state = MEI_DEV_DISABLED;
+ dev->dev_state = MEI_DEV_RESETTING;
return ret;
}
@@ -166,6 +163,7 @@ EXPORT_SYMBOL_GPL(mei_reset);
*/
int mei_start(struct mei_device *dev)
{
+ int ret;
mutex_lock(&dev->device_lock);
/* acknowledge interrupt and stop interrupts */
@@ -175,10 +173,18 @@ int mei_start(struct mei_device *dev)
dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
- dev->dev_state = MEI_DEV_INITIALIZING;
dev->reset_count = 0;
- mei_reset(dev);
+ do {
+ dev->dev_state = MEI_DEV_INITIALIZING;
+ ret = mei_reset(dev);
+
+ if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
+ dev_err(&dev->pdev->dev, "reset failed ret = %d", ret);
+ goto err;
+ }
+ } while (ret);
+ /* we cannot start the device w/o hbm start message completed */
if (dev->dev_state == MEI_DEV_DISABLED) {
dev_err(&dev->pdev->dev, "reset failed");
goto err;
@@ -238,27 +244,40 @@ int mei_restart(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
- if (err || dev->dev_state == MEI_DEV_DISABLED)
+ if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
+ dev_err(&dev->pdev->dev, "device disabled = %d\n", err);
return -ENODEV;
+ }
+
+ /* try to start again */
+ if (err)
+ schedule_work(&dev->reset_work);
+
return 0;
}
EXPORT_SYMBOL_GPL(mei_restart);
-
static void mei_reset_work(struct work_struct *work)
{
struct mei_device *dev =
container_of(work, struct mei_device, reset_work);
+ int ret;
mutex_lock(&dev->device_lock);
- mei_reset(dev);
+ ret = mei_reset(dev);
mutex_unlock(&dev->device_lock);
- if (dev->dev_state == MEI_DEV_DISABLED)
- dev_err(&dev->pdev->dev, "reset failed");
+ if (dev->dev_state == MEI_DEV_DISABLED) {
+ dev_err(&dev->pdev->dev, "device disabled = %d\n", ret);
+ return;
+ }
+
+ /* retry reset in case of failure */
+ if (ret)
+ schedule_work(&dev->reset_work);
}
void mei_stop(struct mei_device *dev)
@@ -269,6 +288,8 @@ void mei_stop(struct mei_device *dev)
mei_nfc_host_exit(dev);
+ mei_cl_bus_remove_devices(dev);
+
mutex_lock(&dev->device_lock);
mei_wd_stop(dev);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index f0fbb5179f80..29b5af8efb71 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -26,7 +26,6 @@
#include "mei_dev.h"
#include "hbm.h"
-#include "hw-me.h"
#include "client.h"
@@ -161,29 +160,63 @@ static int mei_cl_irq_read_msg(struct mei_device *dev,
}
/**
+ * mei_cl_irq_disconnect_rsp - send disconnection response message
+ *
+ * @cl: client
+ * @cb: callback block.
+ * @cmpl_list: complete list.
+ *
+ * returns 0, OK; otherwise, error.
+ */
+static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
+{
+ struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
+ int ret;
+
+ slots = mei_hbuf_empty_slots(dev);
+ msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response));
+
+ if (slots < msg_slots)
+ return -EMSGSIZE;
+
+ ret = mei_hbm_cl_disconnect_rsp(dev, cl);
+
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->status = 0;
+ list_del(&cb->list);
+ mei_io_cb_free(cb);
+
+ return ret;
+}
+
+
+
+/**
* mei_cl_irq_close - processes close related operation from
* interrupt thread context - send disconnect request
*
* @cl: client
* @cb: callback block.
- * @slots: free slots.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list)
+ struct mei_cl_cb *cmpl_list)
{
struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
- u32 msg_slots =
- mei_data2slots(sizeof(struct hbm_client_connect_request));
+ msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ slots = mei_hbuf_empty_slots(dev);
- if (*slots < msg_slots)
+ if (slots < msg_slots)
return -EMSGSIZE;
- *slots -= msg_slots;
-
if (mei_hbm_cl_disconnect_req(dev, cl)) {
cl->status = 0;
cb->buf_idx = 0;
@@ -207,27 +240,23 @@ static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb,
*
* @cl: client
* @cb: callback block.
- * @slots: free slots.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list)
+ struct mei_cl_cb *cmpl_list)
{
struct mei_device *dev = cl->dev;
- u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
-
+ u32 msg_slots;
+ int slots;
int ret;
+ msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
+ slots = mei_hbuf_empty_slots(dev);
- if (*slots < msg_slots) {
- /* return the cancel routine */
- list_del(&cb->list);
+ if (slots < msg_slots)
return -EMSGSIZE;
- }
-
- *slots -= msg_slots;
ret = mei_hbm_cl_flow_control_req(dev, cl);
if (ret) {
@@ -244,32 +273,30 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
/**
- * mei_cl_irq_ioctl - processes client ioctl related operation from the
- * interrupt thread context - send connection request
+ * mei_cl_irq_connect - send connect request in irq_thread context
*
* @cl: client
* @cb: callback block.
- * @slots: free slots.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
-static int mei_cl_irq_ioctl(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list)
+static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
{
struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
int ret;
- u32 msg_slots =
- mei_data2slots(sizeof(struct hbm_client_connect_request));
+ msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ slots = mei_hbuf_empty_slots(dev);
- if (*slots < msg_slots) {
- /* return the cancel routine */
- list_del(&cb->list);
- return -EMSGSIZE;
- }
+ if (mei_cl_is_other_connecting(cl))
+ return 0;
- *slots -= msg_slots;
+ if (slots < msg_slots)
+ return -EMSGSIZE;
cl->state = MEI_FILE_CONNECTING;
@@ -323,7 +350,7 @@ int mei_irq_read_handler(struct mei_device *dev,
dev_err(&dev->pdev->dev, "less data available than length=%08x.\n",
*slots);
/* we can't read the message */
- ret = -ERANGE;
+ ret = -ENODATA;
goto end;
}
@@ -409,10 +436,10 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
s32 slots;
int ret;
- if (!mei_hbuf_is_ready(dev)) {
- dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
+
+ if (!mei_hbuf_acquire(dev))
return 0;
- }
+
slots = mei_hbuf_empty_slots(dev);
if (slots <= 0)
return -EMSGSIZE;
@@ -447,29 +474,16 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
if (dev->wd_state == MEI_WD_STOPPING) {
dev->wd_state = MEI_WD_IDLE;
- wake_up_interruptible(&dev->wait_stop_wd);
+ wake_up(&dev->wait_stop_wd);
}
- if (dev->wr_ext_msg.hdr.length) {
- mei_write_message(dev, &dev->wr_ext_msg.hdr,
- dev->wr_ext_msg.data);
- slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
- dev->wr_ext_msg.hdr.length = 0;
- }
- if (dev->dev_state == MEI_DEV_ENABLED) {
+ if (mei_cl_is_connected(&dev->wd_cl)) {
if (dev->wd_pending &&
mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
- if (mei_wd_send(dev))
- dev_dbg(&dev->pdev->dev, "wd send failed.\n");
- else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
- return -ENODEV;
-
+ ret = mei_wd_send(dev);
+ if (ret)
+ return ret;
dev->wd_pending = false;
-
- if (dev->wd_state == MEI_WD_RUNNING)
- slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
- else
- slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
}
}
@@ -484,28 +498,31 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
switch (cb->fop_type) {
case MEI_FOP_CLOSE:
/* send disconnect message */
- ret = mei_cl_irq_close(cl, cb, &slots, cmpl_list);
+ ret = mei_cl_irq_close(cl, cb, cmpl_list);
if (ret)
return ret;
break;
case MEI_FOP_READ:
/* send flow control message */
- ret = mei_cl_irq_read(cl, cb, &slots, cmpl_list);
+ ret = mei_cl_irq_read(cl, cb, cmpl_list);
if (ret)
return ret;
break;
- case MEI_FOP_IOCTL:
+ case MEI_FOP_CONNECT:
/* connect message */
- if (mei_cl_is_other_connecting(cl))
- continue;
- ret = mei_cl_irq_ioctl(cl, cb, &slots, cmpl_list);
+ ret = mei_cl_irq_connect(cl, cb, cmpl_list);
if (ret)
return ret;
break;
-
+ case MEI_FOP_DISCONNECT_RSP:
+ /* send disconnect resp */
+ ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
+ if (ret)
+ return ret;
+ break;
default:
BUG();
}
@@ -518,11 +535,9 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
if (cl == NULL)
continue;
if (cl == &dev->iamthif_cl)
- ret = mei_amthif_irq_write_complete(cl, cb,
- &slots, cmpl_list);
+ ret = mei_amthif_irq_write(cl, cb, cmpl_list);
else
- ret = mei_cl_irq_write_complete(cl, cb,
- &slots, cmpl_list);
+ ret = mei_cl_irq_write(cl, cb, cmpl_list);
if (ret)
return ret;
}
@@ -541,8 +556,7 @@ EXPORT_SYMBOL_GPL(mei_irq_write_handler);
void mei_timer(struct work_struct *work)
{
unsigned long timeout;
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
+ struct mei_cl *cl;
struct mei_cl_cb *cb_pos = NULL;
struct mei_cl_cb *cb_next = NULL;
@@ -570,9 +584,9 @@ void mei_timer(struct work_struct *work)
goto out;
/*** connect/disconnect timeouts ***/
- list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
- if (cl_pos->timer_count) {
- if (--cl_pos->timer_count == 0) {
+ list_for_each_entry(cl, &dev->file_list, link) {
+ if (cl->timer_count) {
+ if (--cl->timer_count == 0) {
dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n");
mei_reset(dev);
goto out;
@@ -580,6 +594,9 @@ void mei_timer(struct work_struct *work)
}
}
+ if (!mei_cl_is_connected(&dev->iamthif_cl))
+ goto out;
+
if (dev->iamthif_stall_timer) {
if (--dev->iamthif_stall_timer == 0) {
dev_err(&dev->pdev->dev, "timer: amthif hanged.\n");
@@ -619,10 +636,10 @@ void mei_timer(struct work_struct *work)
list_for_each_entry_safe(cb_pos, cb_next,
&dev->amthif_rd_complete_list.list, list) {
- cl_pos = cb_pos->file_object->private_data;
+ cl = cb_pos->file_object->private_data;
/* Finding the AMTHI entry. */
- if (cl_pos == &dev->iamthif_cl)
+ if (cl == &dev->iamthif_cl)
list_del(&cb_pos->list);
}
mei_io_cb_free(dev->iamthif_current_cb);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 5424f8ff3f7f..b35594dbf52f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -13,9 +13,6 @@
* more details.
*
*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -40,7 +37,6 @@
#include <linux/mei.h>
#include "mei_dev.h"
-#include "hw-me.h"
#include "client.h"
/**
@@ -129,17 +125,11 @@ static int mei_release(struct inode *inode, struct file *file)
}
if (cl->state == MEI_FILE_CONNECTED) {
cl->state = MEI_FILE_DISCONNECTING;
- dev_dbg(&dev->pdev->dev,
- "disconnecting client host client = %d, "
- "ME client = %d\n",
- cl->host_client_id,
- cl->me_client_id);
+ cl_dbg(dev, cl, "disconnecting\n");
rets = mei_cl_disconnect(cl);
}
mei_cl_flush_queues(cl);
- dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
- cl->host_client_id,
- cl->me_client_id);
+ cl_dbg(dev, cl, "removing\n");
mei_cl_unlink(cl);
@@ -284,6 +274,7 @@ copy_buffer:
length = min_t(size_t, length, cb->buf_idx - *offset);
if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
+ dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
goto free;
}
@@ -340,7 +331,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
id = mei_me_cl_by_id(dev, cl->me_client_id);
if (id < 0) {
- rets = -ENODEV;
+ rets = -ENOTTY;
goto out;
}
@@ -404,7 +395,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
if (rets) {
- dev_err(&dev->pdev->dev, "failed to copy data from userland\n");
+ dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
goto out;
}
@@ -471,7 +462,7 @@ static int mei_ioctl_connect_client(struct file *file,
if (i < 0 || dev->me_clients[i].props.fixed_address) {
dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n",
&data->in_client_uuid);
- rets = -ENODEV;
+ rets = -ENOTTY;
goto end;
}
@@ -569,7 +560,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
if (copy_from_user(connect_data, (char __user *)data,
sizeof(struct mei_connect_client_data))) {
- dev_err(&dev->pdev->dev, "failed to copy data from userland\n");
+ dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
goto out;
}
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index f7de95b4cdd9..94a516716d22 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -24,7 +24,6 @@
#include <linux/mei_cl_bus.h>
#include "hw.h"
-#include "hw-me-regs.h"
#include "hbm.h"
/*
@@ -130,16 +129,18 @@ enum mei_wd_states {
/**
* enum mei_cb_file_ops - file operation associated with the callback
- * @MEI_FOP_READ - read
- * @MEI_FOP_WRITE - write
- * @MEI_FOP_IOCTL - ioctl
- * @MEI_FOP_OPEN - open
- * @MEI_FOP_CLOSE - close
+ * @MEI_FOP_READ - read
+ * @MEI_FOP_WRITE - write
+ * @MEI_FOP_CONNECT - connect
+ * @MEI_FOP_DISCONNECT_RSP - disconnect response
+ * @MEI_FOP_OPEN - open
+ * @MEI_FOP_CLOSE - close
*/
enum mei_cb_file_ops {
MEI_FOP_READ = 0,
MEI_FOP_WRITE,
- MEI_FOP_IOCTL,
+ MEI_FOP_CONNECT,
+ MEI_FOP_DISCONNECT_RSP,
MEI_FOP_OPEN,
MEI_FOP_CLOSE
};
@@ -236,20 +237,20 @@ struct mei_cl {
*/
struct mei_hw_ops {
- bool (*host_is_ready) (struct mei_device *dev);
+ bool (*host_is_ready)(struct mei_device *dev);
- bool (*hw_is_ready) (struct mei_device *dev);
- int (*hw_reset) (struct mei_device *dev, bool enable);
- int (*hw_start) (struct mei_device *dev);
- void (*hw_config) (struct mei_device *dev);
+ bool (*hw_is_ready)(struct mei_device *dev);
+ int (*hw_reset)(struct mei_device *dev, bool enable);
+ int (*hw_start)(struct mei_device *dev);
+ void (*hw_config)(struct mei_device *dev);
- void (*intr_clear) (struct mei_device *dev);
- void (*intr_enable) (struct mei_device *dev);
- void (*intr_disable) (struct mei_device *dev);
+ void (*intr_clear)(struct mei_device *dev);
+ void (*intr_enable)(struct mei_device *dev);
+ void (*intr_disable)(struct mei_device *dev);
- int (*hbuf_free_slots) (struct mei_device *dev);
- bool (*hbuf_is_ready) (struct mei_device *dev);
- size_t (*hbuf_max_len) (const struct mei_device *dev);
+ int (*hbuf_free_slots)(struct mei_device *dev);
+ bool (*hbuf_is_ready)(struct mei_device *dev);
+ size_t (*hbuf_max_len)(const struct mei_device *dev);
int (*write)(struct mei_device *dev,
struct mei_msg_hdr *hdr,
@@ -258,7 +259,7 @@ struct mei_hw_ops {
int (*rdbuf_full_slots)(struct mei_device *dev);
u32 (*read_hdr)(const struct mei_device *dev);
- int (*read) (struct mei_device *dev,
+ int (*read)(struct mei_device *dev,
unsigned char *buf, unsigned long len);
};
@@ -294,6 +295,7 @@ int __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length);
int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length);
int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
void mei_cl_bus_rx_event(struct mei_cl *cl);
+void mei_cl_bus_remove_devices(struct mei_device *dev);
int mei_cl_bus_init(void);
void mei_cl_bus_exit(void);
@@ -339,7 +341,6 @@ struct mei_cl_device {
* @hbuf_depth - depth of hardware host/write buffer is slots
* @hbuf_is_ready - query if the host host/write buffer is ready
* @wr_msg - the buffer for hbm control messages
- * @wr_ext_msg - the buffer for hbm control responses (set in read cycle)
*/
struct mei_device {
struct pci_dev *pdev; /* pointer to pci device struct */
@@ -394,11 +395,6 @@ struct mei_device {
unsigned char data[128];
} wr_msg;
- struct {
- struct mei_msg_hdr hdr;
- unsigned char data[4]; /* All HBM messages are 4 bytes */
- } wr_ext_msg; /* for control responses */
-
struct hbm_version version;
struct mei_me_client *me_clients; /* Note: memory has to be allocated */
@@ -518,8 +514,8 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
void mei_amthif_run_next_cmd(struct mei_device *dev);
-int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list);
+int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list);
void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb);
int mei_amthif_irq_read_msg(struct mei_device *dev,
@@ -546,7 +542,7 @@ int mei_wd_host_init(struct mei_device *dev);
* once we got connection to the WD Client
* @dev - mei device
*/
-void mei_watchdog_register(struct mei_device *dev);
+int mei_watchdog_register(struct mei_device *dev);
/*
* mei_watchdog_unregister - Unregistering watchdog interface
* @dev - mei device
@@ -633,6 +629,8 @@ static inline int mei_count_full_read_slots(struct mei_device *dev)
return dev->ops->rdbuf_full_slots(dev);
}
+bool mei_hbuf_acquire(struct mei_device *dev);
+
#if IS_ENABLED(CONFIG_DEBUG_FS)
int mei_dbgfs_register(struct mei_device *dev, const char *name);
void mei_dbgfs_deregister(struct mei_device *dev);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index a58320c0c049..3095fc514a65 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -364,7 +364,7 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
if (!wait_event_interruptible_timeout(ndev->send_wq,
ndev->recv_req_id == ndev->req_id, HZ)) {
dev_err(&dev->pdev->dev, "NFC MEI command timeout\n");
- err = -ETIMEDOUT;
+ err = -ETIME;
} else {
ndev->req_id++;
}
@@ -502,7 +502,7 @@ int mei_nfc_host_init(struct mei_device *dev)
i = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
if (i < 0) {
dev_info(&dev->pdev->dev, "nfc: failed to find the client\n");
- ret = -ENOENT;
+ ret = -ENOTTY;
goto err;
}
@@ -520,7 +520,7 @@ int mei_nfc_host_init(struct mei_device *dev)
i = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
if (i < 0) {
dev_info(&dev->pdev->dev, "nfc: failed to find the client\n");
- ret = -ENOENT;
+ ret = -ENOTTY;
goto err;
}
@@ -552,13 +552,7 @@ err:
void mei_nfc_host_exit(struct mei_device *dev)
{
struct mei_nfc_dev *ndev = &nfc_dev;
-
cancel_work_sync(&ndev->init_work);
+}
- mutex_lock(&dev->device_lock);
- if (ndev->cl && ndev->cl->device)
- mei_cl_remove_device(ndev->cl->device);
- mei_nfc_free(ndev);
- mutex_unlock(&dev->device_lock);
-}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index ddadd08956f4..1c8fd3a3e135 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -13,9 +13,6 @@
* more details.
*
*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -27,7 +24,6 @@
#include <linux/aio.h>
#include <linux/pci.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/cdev.h>
#include <linux/sched.h>
@@ -40,11 +36,12 @@
#include <linux/mei.h>
#include "mei_dev.h"
-#include "hw-me.h"
#include "client.h"
+#include "hw-me-regs.h"
+#include "hw-me.h"
/* mei_pci_tbl - PCI Device ID Table */
-static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = {
+static const struct pci_device_id mei_me_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
@@ -270,7 +267,7 @@ static void mei_me_remove(struct pci_dev *pdev)
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int mei_me_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
@@ -330,11 +327,12 @@ static int mei_me_pci_resume(struct device *device)
return 0;
}
+
static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume);
#define MEI_ME_PM_OPS (&mei_me_pm_ops)
#else
#define MEI_ME_PM_OPS NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
/*
* PCI driver structure
*/
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
new file mode 100644
index 000000000000..ad3adb009a1e
--- /dev/null
+++ b/drivers/misc/mei/pci-txe.c
@@ -0,0 +1,293 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/uuid.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+#include <linux/mei.h>
+
+
+#include "mei_dev.h"
+#include "hw-txe.h"
+
+static const struct pci_device_id mei_txe_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F18)}, /* Baytrail */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
+
+
+static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
+{
+ int i;
+ for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
+ if (hw->mem_addr[i]) {
+ pci_iounmap(pdev, hw->mem_addr[i]);
+ hw->mem_addr[i] = NULL;
+ }
+ }
+}
+/**
+ * mei_probe - Device Initialization Routine
+ *
+ * @pdev: PCI device structure
+ * @ent: entry in mei_txe_pci_tbl
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct mei_device *dev;
+ struct mei_txe_hw *hw;
+ int err;
+ int i;
+
+ /* enable pci dev */
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pci device.\n");
+ goto end;
+ }
+ /* set PCI host mastering */
+ pci_set_master(pdev);
+ /* pci request regions for mei driver */
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get pci regions.\n");
+ goto disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No suitable DMA available.\n");
+ goto release_regions;
+ }
+ }
+
+ /* allocates and initializes the mei dev structure */
+ dev = mei_txe_dev_init(pdev);
+ if (!dev) {
+ err = -ENOMEM;
+ goto release_regions;
+ }
+ hw = to_txe_hw(dev);
+
+ /* mapping IO device memory */
+ for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
+ hw->mem_addr[i] = pci_iomap(pdev, i, 0);
+ if (!hw->mem_addr[i]) {
+ dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+ }
+
+
+ pci_enable_msi(pdev);
+
+ /* clear spurious interrupts */
+ mei_clear_interrupts(dev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_txe_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_txe_irq_quick_handler,
+ mei_txe_irq_thread_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (err) {
+ dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
+ pdev->irq);
+ goto free_device;
+ }
+
+ if (mei_start(dev)) {
+ dev_err(&pdev->dev, "init hw failure.\n");
+ err = -ENODEV;
+ goto release_irq;
+ }
+
+ err = mei_register(dev);
+ if (err)
+ goto release_irq;
+
+ pci_set_drvdata(pdev, dev);
+
+ return 0;
+
+release_irq:
+
+ mei_cancel_work(dev);
+
+ /* disable interrupts */
+ mei_disable_interrupts(dev);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+
+free_device:
+ mei_txe_pci_iounmap(pdev, hw);
+
+ kfree(dev);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+end:
+ dev_err(&pdev->dev, "initialization failed.\n");
+ return err;
+}
+
+/**
+ * mei_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * mei_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void mei_txe_remove(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+ struct mei_txe_hw *hw;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev) {
+ dev_err(&pdev->dev, "mei: dev =NULL\n");
+ return;
+ }
+
+ hw = to_txe_hw(dev);
+
+ mei_stop(dev);
+
+ /* disable interrupts */
+ mei_disable_interrupts(dev);
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+
+ mei_txe_pci_iounmap(pdev, hw);
+
+ mei_deregister(dev);
+
+ kfree(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int mei_txe_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev = pci_get_drvdata(pdev);
+
+ if (!dev)
+ return -ENODEV;
+
+ dev_dbg(&pdev->dev, "suspend\n");
+
+ mei_stop(dev);
+
+ mei_disable_interrupts(dev);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+
+ return 0;
+}
+
+static int mei_txe_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int err;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ pci_enable_msi(pdev);
+
+ mei_clear_interrupts(dev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_txe_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_txe_irq_quick_handler,
+ mei_txe_irq_thread_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (err) {
+ dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
+ pdev->irq);
+ return err;
+ }
+
+ err = mei_restart(dev);
+
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(mei_txe_pm_ops,
+ mei_txe_pci_suspend,
+ mei_txe_pci_resume);
+
+#define MEI_TXE_PM_OPS (&mei_txe_pm_ops)
+#else
+#define MEI_TXE_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+/*
+ * PCI driver structure
+ */
+static struct pci_driver mei_txe_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mei_txe_pci_tbl,
+ .probe = mei_txe_probe,
+ .remove = mei_txe_remove,
+ .shutdown = mei_txe_remove,
+ .driver.pm = MEI_TXE_PM_OPS,
+};
+
+module_pci_driver(mei_txe_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index f70945ed96f6..ebf1cbc198fd 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -25,7 +25,6 @@
#include "mei_dev.h"
#include "hbm.h"
-#include "hw-me.h"
#include "client.h"
static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
@@ -53,7 +52,7 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
*
* @dev: the device structure
*
- * returns -ENENT if wd client cannot be found
+ * returns -ENOTTY if wd client cannot be found
* -EIO if write has failed
* 0 on success
*/
@@ -73,7 +72,7 @@ int mei_wd_host_init(struct mei_device *dev)
id = mei_me_cl_by_uuid(dev, &mei_wd_guid);
if (id < 0) {
dev_info(&dev->pdev->dev, "wd: failed to find the client\n");
- return id;
+ return -ENOTTY;
}
cl->me_client_id = dev->me_clients[id].client_id;
@@ -87,15 +86,20 @@ int mei_wd_host_init(struct mei_device *dev)
cl->state = MEI_FILE_CONNECTING;
- if (mei_hbm_cl_connect_req(dev, cl)) {
- dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n");
- cl->state = MEI_FILE_DISCONNECTED;
- cl->host_client_id = 0;
- return -EIO;
+ ret = mei_cl_connect(cl, NULL);
+
+ if (ret) {
+ dev_err(&dev->pdev->dev, "wd: failed to connect = %d\n", ret);
+ mei_cl_unlink(cl);
+ return ret;
}
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- return 0;
+ ret = mei_watchdog_register(dev);
+ if (ret) {
+ mei_cl_disconnect(cl);
+ mei_cl_unlink(cl);
+ }
+ return ret;
}
/**
@@ -106,13 +110,16 @@ int mei_wd_host_init(struct mei_device *dev)
* returns 0 if success,
* -EIO when message send fails
* -EINVAL when invalid message is to be sent
+ * -ENODEV on flow control failure
*/
int mei_wd_send(struct mei_device *dev)
{
+ struct mei_cl *cl = &dev->wd_cl;
struct mei_msg_hdr hdr;
+ int ret;
- hdr.host_addr = dev->wd_cl.host_client_id;
- hdr.me_addr = dev->wd_cl.me_client_id;
+ hdr.host_addr = cl->host_client_id;
+ hdr.me_addr = cl->me_client_id;
hdr.msg_complete = 1;
hdr.reserved = 0;
hdr.internal = 0;
@@ -121,10 +128,24 @@ int mei_wd_send(struct mei_device *dev)
hdr.length = MEI_WD_START_MSG_SIZE;
else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
hdr.length = MEI_WD_STOP_MSG_SIZE;
- else
+ else {
+ dev_err(&dev->pdev->dev, "wd: invalid message is to be sent, aborting\n");
return -EINVAL;
+ }
- return mei_write_message(dev, &hdr, dev->wd_data);
+ ret = mei_write_message(dev, &hdr, dev->wd_data);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "wd: write message failed\n");
+ return ret;
+ }
+
+ ret = mei_cl_flow_ctrl_reduce(cl);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "wd: flow_ctrl_reduce failed.\n");
+ return ret;
+ }
+
+ return 0;
}
/**
@@ -133,9 +154,11 @@ int mei_wd_send(struct mei_device *dev)
* @dev: the device structure
* @preserve: indicate if to keep the timeout value
*
- * returns 0 if success,
- * -EIO when message send fails
+ * returns 0 if success
+ * on error:
+ * -EIO when message send fails
* -EINVAL when invalid message is to be sent
+ * -ETIME on message timeout
*/
int mei_wd_stop(struct mei_device *dev)
{
@@ -151,20 +174,12 @@ int mei_wd_stop(struct mei_device *dev)
ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
if (ret < 0)
- goto out;
-
- if (ret && dev->hbuf_is_ready) {
- ret = 0;
- dev->hbuf_is_ready = false;
-
- if (!mei_wd_send(dev)) {
- ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl);
- if (ret)
- goto out;
- } else {
- dev_err(&dev->pdev->dev, "wd: send stop failed\n");
- }
+ goto err;
+ if (ret && mei_hbuf_acquire(dev)) {
+ ret = mei_wd_send(dev);
+ if (ret)
+ goto err;
dev->wd_pending = false;
} else {
dev->wd_pending = true;
@@ -172,21 +187,21 @@ int mei_wd_stop(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
- ret = wait_event_interruptible_timeout(dev->wait_stop_wd,
- dev->wd_state == MEI_WD_IDLE,
- msecs_to_jiffies(MEI_WD_STOP_TIMEOUT));
+ ret = wait_event_timeout(dev->wait_stop_wd,
+ dev->wd_state == MEI_WD_IDLE,
+ msecs_to_jiffies(MEI_WD_STOP_TIMEOUT));
mutex_lock(&dev->device_lock);
- if (dev->wd_state == MEI_WD_IDLE) {
- dev_dbg(&dev->pdev->dev, "wd: stop completed ret=%d.\n", ret);
- ret = 0;
- } else {
- if (!ret)
- ret = -ETIMEDOUT;
+ if (dev->wd_state != MEI_WD_IDLE) {
+ /* timeout */
+ ret = -ETIME;
dev_warn(&dev->pdev->dev,
"wd: stop failed to complete ret=%d.\n", ret);
+ goto err;
}
-
-out:
+ dev_dbg(&dev->pdev->dev, "wd: stop completed after %u msec\n",
+ MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret));
+ return 0;
+err:
return ret;
}
@@ -260,8 +275,8 @@ static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
*/
static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
{
- int ret = 0;
struct mei_device *dev;
+ int ret;
dev = watchdog_get_drvdata(wd_dev);
if (!dev)
@@ -277,25 +292,18 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
dev->wd_state = MEI_WD_RUNNING;
+ ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
+ if (ret < 0)
+ goto end;
/* Check if we can send the ping to HW*/
- if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
+ if (ret && mei_hbuf_acquire(dev)) {
- dev->hbuf_is_ready = false;
dev_dbg(&dev->pdev->dev, "wd: sending ping\n");
- if (mei_wd_send(dev)) {
- dev_err(&dev->pdev->dev, "wd: send failed.\n");
- ret = -EIO;
+ ret = mei_wd_send(dev);
+ if (ret)
goto end;
- }
-
- if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) {
- dev_err(&dev->pdev->dev,
- "wd: mei_cl_flow_ctrl_reduce() failed.\n");
- ret = -EIO;
- goto end;
- }
-
+ dev->wd_pending = false;
} else {
dev->wd_pending = true;
}
@@ -363,17 +371,25 @@ static struct watchdog_device amt_wd_dev = {
};
-void mei_watchdog_register(struct mei_device *dev)
+int mei_watchdog_register(struct mei_device *dev)
{
- if (watchdog_register_device(&amt_wd_dev)) {
- dev_err(&dev->pdev->dev,
- "wd: unable to register watchdog device.\n");
- return;
+
+ int ret;
+
+ /* unlock to perserve correct locking order */
+ mutex_unlock(&dev->device_lock);
+ ret = watchdog_register_device(&amt_wd_dev);
+ mutex_lock(&dev->device_lock);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "wd: unable to register watchdog device = %d.\n",
+ ret);
+ return ret;
}
dev_dbg(&dev->pdev->dev,
"wd: successfully register watchdog interface.\n");
watchdog_set_drvdata(&amt_wd_dev, dev);
+ return 0;
}
void mei_watchdog_unregister(struct mei_device *dev)
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index e42b331edbc6..462a5b1d8651 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -4,7 +4,6 @@ config INTEL_MIC_HOST
tristate "Intel MIC Host Driver"
depends on 64BIT && PCI && X86
select VHOST_RING
- default N
help
This enables Host Driver support for the Intel Many Integrated
Core (MIC) family of PCIe form factor coprocessor devices that
@@ -25,7 +24,6 @@ config INTEL_MIC_CARD
tristate "Intel MIC Card Driver"
depends on 64BIT && X86
select VIRTIO
- default N
help
This enables card driver support for the Intel Many Integrated
Core (MIC) device family. The card driver communicates shutdown/
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h
index 347b9b3b7916..306f502be95e 100644
--- a/drivers/misc/mic/card/mic_device.h
+++ b/drivers/misc/mic/card/mic_device.h
@@ -29,6 +29,7 @@
#include <linux/workqueue.h>
#include <linux/io.h>
+#include <linux/irqreturn.h>
/**
* struct mic_intr_info - Contains h/w specific interrupt sources info
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 1a6edce2ecde..0398c696d257 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -24,6 +24,7 @@
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/notifier.h>
+#include <linux/irqreturn.h>
#include "mic_intr.h"
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
index f9c29bc918bc..dbc5afde1392 100644
--- a/drivers/misc/mic/host/mic_intr.c
+++ b/drivers/misc/mic/host/mic_intr.c
@@ -194,7 +194,7 @@ static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev)
for (i = 0; i < MIC_MIN_MSIX; i++)
mdev->irq_info.msix_entries[i].entry = i;
- rc = pci_enable_msix(pdev, mdev->irq_info.msix_entries,
+ rc = pci_enable_msix_exact(pdev, mdev->irq_info.msix_entries,
MIC_MIN_MSIX);
if (rc) {
dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc);
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index a5925f7f17f6..956597321d2a 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -636,6 +636,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
u8 mac[ETH_ALEN];
ssize_t rom_size;
struct pch_phub_reg *chip = dev_get_drvdata(dev);
+ int ret;
if (!mac_pton(buf, mac))
return -EINVAL;
@@ -644,8 +645,10 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
if (!chip->pch_phub_extrom_base_address)
return -ENOMEM;
- pch_phub_write_gbe_mac_addr(chip, mac);
+ ret = pch_phub_write_gbe_mac_addr(chip, mac);
pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 2bef3f76032a..a3700a56b8ff 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -178,10 +178,10 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
hdr.cbrcnt = cbrcnt;
hdr.dsrcnt = dsrcnt;
hdr.cch_locked = cch_locked;
- if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr)))
- ret = -EFAULT;
+ if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
+ return -EFAULT;
- return ret ? ret : bytes;
+ return bytes;
}
int gru_dump_chiplet_request(unsigned long arg)
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index afe66571ce0b..21181fa243df 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -24,6 +24,9 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -36,14 +39,35 @@ struct sram_dev {
struct clk *clk;
};
+struct sram_reserve {
+ struct list_head list;
+ u32 start;
+ u32 size;
+};
+
+static int sram_reserve_cmp(void *priv, struct list_head *a,
+ struct list_head *b)
+{
+ struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
+ struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
+
+ return ra->start - rb->start;
+}
+
static int sram_probe(struct platform_device *pdev)
{
void __iomem *virt_base;
struct sram_dev *sram;
struct resource *res;
- unsigned long size;
+ struct device_node *np = pdev->dev.of_node, *child;
+ unsigned long size, cur_start, cur_size;
+ struct sram_reserve *rblocks, *block;
+ struct list_head reserve_list;
+ unsigned int nblocks;
int ret;
+ INIT_LIST_HEAD(&reserve_list);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
virt_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(virt_base))
@@ -65,19 +89,106 @@ static int sram_probe(struct platform_device *pdev)
if (!sram->pool)
return -ENOMEM;
- ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base,
- res->start, size, -1);
- if (ret < 0) {
- if (sram->clk)
- clk_disable_unprepare(sram->clk);
- return ret;
+ /*
+ * We need an additional block to mark the end of the memory region
+ * after the reserved blocks from the dt are processed.
+ */
+ nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
+ rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
+ if (!rblocks) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ block = &rblocks[0];
+ for_each_available_child_of_node(np, child) {
+ struct resource child_res;
+
+ ret = of_address_to_resource(child, 0, &child_res);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "could not get address for node %s\n",
+ child->full_name);
+ goto err_chunks;
+ }
+
+ if (child_res.start < res->start || child_res.end > res->end) {
+ dev_err(&pdev->dev,
+ "reserved block %s outside the sram area\n",
+ child->full_name);
+ ret = -EINVAL;
+ goto err_chunks;
+ }
+
+ block->start = child_res.start - res->start;
+ block->size = resource_size(&child_res);
+ list_add_tail(&block->list, &reserve_list);
+
+ dev_dbg(&pdev->dev, "found reserved block 0x%x-0x%x\n",
+ block->start,
+ block->start + block->size);
+
+ block++;
+ }
+
+ /* the last chunk marks the end of the region */
+ rblocks[nblocks - 1].start = size;
+ rblocks[nblocks - 1].size = 0;
+ list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
+
+ list_sort(NULL, &reserve_list, sram_reserve_cmp);
+
+ cur_start = 0;
+
+ list_for_each_entry(block, &reserve_list, list) {
+ /* can only happen if sections overlap */
+ if (block->start < cur_start) {
+ dev_err(&pdev->dev,
+ "block at 0x%x starts after current offset 0x%lx\n",
+ block->start, cur_start);
+ ret = -EINVAL;
+ goto err_chunks;
+ }
+
+ /* current start is in a reserved block, so continue after it */
+ if (block->start == cur_start) {
+ cur_start = block->start + block->size;
+ continue;
+ }
+
+ /*
+ * allocate the space between the current starting
+ * address and the following reserved block, or the
+ * end of the region.
+ */
+ cur_size = block->start - cur_start;
+
+ dev_dbg(&pdev->dev, "adding chunk 0x%lx-0x%lx\n",
+ cur_start, cur_start + cur_size);
+ ret = gen_pool_add_virt(sram->pool,
+ (unsigned long)virt_base + cur_start,
+ res->start + cur_start, cur_size, -1);
+ if (ret < 0)
+ goto err_chunks;
+
+ /* next allocation after this reserved block */
+ cur_start = block->start + block->size;
}
+ kfree(rblocks);
+
platform_set_drvdata(pdev, sram);
dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base);
return 0;
+
+err_chunks:
+ kfree(rblocks);
+err_alloc:
+ if (sram->clk)
+ clk_disable_unprepare(sram->clk);
+ return ret;
}
static int sram_remove(struct platform_device *pdev)
@@ -87,8 +198,6 @@ static int sram_remove(struct platform_device *pdev)
if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
dev_dbg(&pdev->dev, "removed while SRAM allocated\n");
- gen_pool_destroy(sram->pool);
-
if (sram->clk)
clk_disable_unprepare(sram->clk);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 3aed525e55b4..1972d57aadb3 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -22,7 +22,6 @@
#define pr_fmt(fmt) "(stc): " fmt
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/seq_file.h>
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
index 83da711ce9f1..cb0289b44a17 100644
--- a/drivers/misc/ti_dac7512.c
+++ b/drivers/misc/ti_dac7512.c
@@ -20,7 +20,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 5bc10fa193de..b00335652e52 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -20,7 +20,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index d35cda06b5e8..e0d5017785e5 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -383,11 +383,12 @@ static int vmci_enable_msix(struct pci_dev *pdev,
vmci_dev->msix_entries[i].vector = i;
}
- result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS);
+ result = pci_enable_msix_exact(pdev,
+ vmci_dev->msix_entries, VMCI_MAX_INTRS);
if (result == 0)
vmci_dev->exclusive_vectors = true;
- else if (result > 0)
- result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1);
+ else if (result == -ENOSPC)
+ result = pci_enable_msix_exact(pdev, vmci_dev->msix_entries, 1);
return result;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 7b5424f398ac..452782bffebc 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -415,8 +415,7 @@ static int ioctl_do_sanitize(struct mmc_card *card)
{
int err;
- if (!(mmc_can_sanitize(card) &&
- (card->host->caps2 & MMC_CAP2_SANITIZE))) {
+ if (!mmc_can_sanitize(card)) {
pr_warn("%s: %s - SANITIZE is not supported\n",
mmc_hostname(card->host), __func__);
err = -EOPNOTSUPP;
@@ -722,19 +721,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
return result;
}
-static int send_stop(struct mmc_card *card, u32 *status)
-{
- struct mmc_command cmd = {0};
- int err;
-
- cmd.opcode = MMC_STOP_TRANSMISSION;
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 5);
- if (err == 0)
- *status = cmd.resp[0];
- return err;
-}
-
static int get_card_status(struct mmc_card *card, u32 *status, int retries)
{
struct mmc_command cmd = {0};
@@ -750,6 +736,99 @@ static int get_card_status(struct mmc_card *card, u32 *status, int retries)
return err;
}
+static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
+ bool hw_busy_detect, struct request *req, int *gen_err)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ int err = 0;
+ u32 status;
+
+ do {
+ err = get_card_status(card, &status, 5);
+ if (err) {
+ pr_err("%s: error %d requesting status\n",
+ req->rq_disk->disk_name, err);
+ return err;
+ }
+
+ if (status & R1_ERROR) {
+ pr_err("%s: %s: error sending status cmd, status %#x\n",
+ req->rq_disk->disk_name, __func__, status);
+ *gen_err = 1;
+ }
+
+ /* We may rely on the host hw to handle busy detection.*/
+ if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
+ hw_busy_detect)
+ break;
+
+ /*
+ * Timeout if the device never becomes ready for data and never
+ * leaves the program state.
+ */
+ if (time_after(jiffies, timeout)) {
+ pr_err("%s: Card stuck in programming state! %s %s\n",
+ mmc_hostname(card->host),
+ req->rq_disk->disk_name, __func__);
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ } while (!(status & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+
+ return err;
+}
+
+static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
+ struct request *req, int *gen_err, u32 *stop_status)
+{
+ struct mmc_host *host = card->host;
+ struct mmc_command cmd = {0};
+ int err;
+ bool use_r1b_resp = rq_data_dir(req) == WRITE;
+
+ /*
+ * Normally we use R1B responses for WRITE, but in cases where the host
+ * has specified a max_busy_timeout we need to validate it. A failure
+ * means we need to prevent the host from doing hw busy detection, which
+ * is done by converting to a R1 response instead.
+ */
+ if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
+ use_r1b_resp = false;
+
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ if (use_r1b_resp) {
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.busy_timeout = timeout_ms;
+ } else {
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ }
+
+ err = mmc_wait_for_cmd(host, &cmd, 5);
+ if (err)
+ return err;
+
+ *stop_status = cmd.resp[0];
+
+ /* No need to check card status in case of READ. */
+ if (rq_data_dir(req) == READ)
+ return 0;
+
+ if (!mmc_host_is_spi(host) &&
+ (*stop_status & R1_ERROR)) {
+ pr_err("%s: %s: general error sending stop command, resp %#x\n",
+ req->rq_disk->disk_name, __func__, *stop_status);
+ *gen_err = 1;
+ }
+
+ return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
+}
+
#define ERR_NOMEDIUM 3
#define ERR_RETRY 2
#define ERR_ABORT 1
@@ -866,26 +945,21 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
*/
if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
R1_CURRENT_STATE(status) == R1_STATE_RCV) {
- err = send_stop(card, &stop_status);
- if (err)
+ err = send_stop(card,
+ DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
+ req, gen_err, &stop_status);
+ if (err) {
pr_err("%s: error %d sending stop command\n",
req->rq_disk->disk_name, err);
-
- /*
- * If the stop cmd also timed out, the card is probably
- * not present, so abort. Other errors are bad news too.
- */
- if (err)
+ /*
+ * If the stop cmd also timed out, the card is probably
+ * not present, so abort. Other errors are bad news too.
+ */
return ERR_ABORT;
+ }
+
if (stop_status & R1_CARD_ECC_FAILED)
*ecc_err = 1;
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
- if (stop_status & R1_ERROR) {
- pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
- req->rq_disk->disk_name, __func__,
- stop_status);
- *gen_err = 1;
- }
}
/* Check for set block count errors */
@@ -1157,8 +1231,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
* program mode, which we have to wait for it to complete.
*/
if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
- u32 status;
- unsigned long timeout;
+ int err;
/* Check stop command response */
if (brq->stop.resp[0] & R1_ERROR) {
@@ -1168,39 +1241,10 @@ static int mmc_blk_err_check(struct mmc_card *card,
gen_err = 1;
}
- timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
- do {
- int err = get_card_status(card, &status, 5);
- if (err) {
- pr_err("%s: error %d requesting status\n",
- req->rq_disk->disk_name, err);
- return MMC_BLK_CMD_ERR;
- }
-
- if (status & R1_ERROR) {
- pr_err("%s: %s: general error sending status command, card status %#x\n",
- req->rq_disk->disk_name, __func__,
- status);
- gen_err = 1;
- }
-
- /* Timeout if the device never becomes ready for data
- * and never leaves the program state.
- */
- if (time_after(jiffies, timeout)) {
- pr_err("%s: Card stuck in programming state!"\
- " %s %s\n", mmc_hostname(card->host),
- req->rq_disk->disk_name, __func__);
-
- return MMC_BLK_CMD_ERR;
- }
- /*
- * Some cards mishandle the status bits,
- * so make sure to check both the busy
- * indication and the card state.
- */
- } while (!(status & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
+ &gen_err);
+ if (err)
+ return MMC_BLK_CMD_ERR;
}
/* if general error occurs, retry the write operation. */
@@ -1335,7 +1379,6 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->data.blksz = 512;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
- brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
brq->data.blocks = blk_rq_sectors(req);
/*
@@ -1378,9 +1421,15 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
if (rq_data_dir(req) == READ) {
brq->cmd.opcode = readcmd;
brq->data.flags |= MMC_DATA_READ;
+ if (brq->mrq.stop)
+ brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
+ MMC_CMD_AC;
} else {
brq->cmd.opcode = writecmd;
brq->data.flags |= MMC_DATA_WRITE;
+ if (brq->mrq.stop)
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
+ MMC_CMD_AC;
}
if (do_rel_wr)
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 269d072ef55e..9ebee72d9c3f 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -2,21 +2,6 @@
# MMC core configuration
#
-config MMC_UNSAFE_RESUME
- bool "Assume MMC/SD cards are non-removable (DANGEROUS)"
- help
- If you say Y here, the MMC layer will assume that all cards
- stayed in their respective slots during the suspend. The
- normal behaviour is to remove them at suspend and
- redetecting them at resume. Breaking this assumption will
- in most cases result in data corruption.
-
- This option is usually just for embedded systems which use
- a MMC/SD card for rootfs. Most people should say N here.
-
- This option sets a default which can be overridden by the
- module parameter "removable=0" or "removable=1".
-
config MMC_CLKGATE
bool "MMC host clock gating"
help
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 64145a32b917..824644875d41 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -185,24 +185,16 @@ static int mmc_runtime_suspend(struct device *dev)
{
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
- int ret = 0;
- if (host->bus_ops->runtime_suspend)
- ret = host->bus_ops->runtime_suspend(host);
-
- return ret;
+ return host->bus_ops->runtime_suspend(host);
}
static int mmc_runtime_resume(struct device *dev)
{
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
- int ret = 0;
- if (host->bus_ops->runtime_resume)
- ret = host->bus_ops->runtime_resume(host);
-
- return ret;
+ return host->bus_ops->runtime_resume(host);
}
static int mmc_runtime_idle(struct device *dev)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 098374b1ab2b..acbc3f2aaaf9 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -34,6 +34,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
+#include <linux/mmc/slot-gpio.h>
#include "core.h"
#include "bus.h"
@@ -65,23 +66,6 @@ bool use_spi_crc = 1;
module_param(use_spi_crc, bool, 0);
/*
- * We normally treat cards as removed during suspend if they are not
- * known to be on a non-removable bus, to avoid the risk of writing
- * back data to a different card after resume. Allow this to be
- * overridden if necessary.
- */
-#ifdef CONFIG_MMC_UNSAFE_RESUME
-bool mmc_assume_removable;
-#else
-bool mmc_assume_removable = 1;
-#endif
-EXPORT_SYMBOL(mmc_assume_removable);
-module_param_named(removable, mmc_assume_removable, bool, 0644);
-MODULE_PARM_DESC(
- removable,
- "MMC/SD cards are removable and may be removed during suspend");
-
-/*
* Internal function. Schedule delayed work in the MMC work queue.
*/
static int mmc_schedule_delayed_work(struct delayed_work *work,
@@ -302,7 +286,8 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
}
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal, true);
+ EXT_CSD_BKOPS_START, 1, timeout,
+ use_busy_signal, true, false);
if (err) {
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
@@ -1950,7 +1935,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
cmd.opcode = MMC_ERASE;
cmd.arg = arg;
cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
+ cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: erase error %d, status %#x\n",
@@ -2137,7 +2122,7 @@ static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
y = 0;
for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
timeout = mmc_erase_timeout(card, arg, qty + x);
- if (timeout > host->max_discard_to)
+ if (timeout > host->max_busy_timeout)
break;
if (timeout < last_timeout)
break;
@@ -2169,7 +2154,7 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
struct mmc_host *host = card->host;
unsigned int max_discard, max_trim;
- if (!host->max_discard_to)
+ if (!host->max_busy_timeout)
return UINT_MAX;
/*
@@ -2189,7 +2174,7 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
max_discard = 0;
}
pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
- mmc_hostname(host), max_discard, host->max_discard_to);
+ mmc_hostname(host), max_discard, host->max_busy_timeout);
return max_discard;
}
EXPORT_SYMBOL(mmc_calc_max_discard);
@@ -2248,9 +2233,6 @@ static int mmc_do_hw_reset(struct mmc_host *host, int check)
{
struct mmc_card *card = host->card;
- if (!host->bus_ops->power_restore)
- return -EOPNOTSUPP;
-
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
return -EOPNOTSUPP;
@@ -2352,7 +2334,7 @@ int _mmc_detect_card_removed(struct mmc_host *host)
{
int ret;
- if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
+ if (host->caps & MMC_CAP_NONREMOVABLE)
return 0;
if (!host->card || mmc_card_removed(host->card))
@@ -2435,7 +2417,7 @@ void mmc_rescan(struct work_struct *work)
* if there is a _removable_ card registered, check whether it is
* still present
*/
- if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
+ if (host->bus_ops && !host->bus_dead
&& !(host->caps & MMC_CAP_NONREMOVABLE))
host->bus_ops->detect(host);
@@ -2490,6 +2472,7 @@ void mmc_start_host(struct mmc_host *host)
mmc_power_off(host);
else
mmc_power_up(host, host->ocr_avail);
+ mmc_gpiod_request_cd_irq(host);
_mmc_detect_change(host, 0, false);
}
@@ -2501,6 +2484,8 @@ void mmc_stop_host(struct mmc_host *host)
host->removed = 1;
spin_unlock_irqrestore(&host->lock, flags);
#endif
+ if (host->slot.cd_irq >= 0)
+ disable_irq(host->slot.cd_irq);
host->rescan_disable = 1;
cancel_delayed_work_sync(&host->detect);
@@ -2537,7 +2522,7 @@ int mmc_power_save_host(struct mmc_host *host)
mmc_bus_get(host);
- if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
+ if (!host->bus_ops || host->bus_dead) {
mmc_bus_put(host);
return -EINVAL;
}
@@ -2563,7 +2548,7 @@ int mmc_power_restore_host(struct mmc_host *host)
mmc_bus_get(host);
- if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
+ if (!host->bus_ops || host->bus_dead) {
mmc_bus_put(host);
return -EINVAL;
}
@@ -2582,12 +2567,8 @@ EXPORT_SYMBOL(mmc_power_restore_host);
*/
int mmc_flush_cache(struct mmc_card *card)
{
- struct mmc_host *host = card->host;
int err = 0;
- if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
- return err;
-
if (mmc_card_mmc(card) &&
(card->ext_csd.cache_size > 0) &&
(card->ext_csd.cache_ctrl & 1)) {
@@ -2602,44 +2583,6 @@ int mmc_flush_cache(struct mmc_card *card)
}
EXPORT_SYMBOL(mmc_flush_cache);
-/*
- * Turn the cache ON/OFF.
- * Turning the cache OFF shall trigger flushing of the data
- * to the non-volatile storage.
- * This function should be called with host claimed
- */
-int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
-{
- struct mmc_card *card = host->card;
- unsigned int timeout;
- int err = 0;
-
- if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
- mmc_card_is_removable(host))
- return err;
-
- if (card && mmc_card_mmc(card) &&
- (card->ext_csd.cache_size > 0)) {
- enable = !!enable;
-
- if (card->ext_csd.cache_ctrl ^ enable) {
- timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_CACHE_CTRL, enable, timeout);
- if (err)
- pr_err("%s: cache %s error %d\n",
- mmc_hostname(card->host),
- enable ? "on" : "off",
- err);
- else
- card->ext_csd.cache_ctrl = enable;
- }
- }
-
- return err;
-}
-EXPORT_SYMBOL(mmc_cache_ctrl);
-
#ifdef CONFIG_PM
/* Do the card removal on suspend if card is assumed removeable
@@ -2668,7 +2611,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
/* Validate prerequisites for suspend */
if (host->bus_ops->pre_suspend)
err = host->bus_ops->pre_suspend(host);
- if (!err && host->bus_ops->suspend)
+ if (!err)
break;
/* Calling bus_ops->remove() with a claimed host can deadlock */
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 49bc403e31f0..fdea825dbb24 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -337,7 +337,7 @@ int mmc_of_parse(struct mmc_host *host)
break;
default:
dev_err(host->parent,
- "Invalid \"bus-width\" value %ud!\n", bus_width);
+ "Invalid \"bus-width\" value %u!\n", bus_width);
return -EINVAL;
}
@@ -419,6 +419,16 @@ int mmc_of_parse(struct mmc_host *host)
host->caps |= MMC_CAP_SD_HIGHSPEED;
if (of_find_property(np, "cap-mmc-highspeed", &len))
host->caps |= MMC_CAP_MMC_HIGHSPEED;
+ if (of_find_property(np, "sd-uhs-sdr12", &len))
+ host->caps |= MMC_CAP_UHS_SDR12;
+ if (of_find_property(np, "sd-uhs-sdr25", &len))
+ host->caps |= MMC_CAP_UHS_SDR25;
+ if (of_find_property(np, "sd-uhs-sdr50", &len))
+ host->caps |= MMC_CAP_UHS_SDR50;
+ if (of_find_property(np, "sd-uhs-sdr104", &len))
+ host->caps |= MMC_CAP_UHS_SDR104;
+ if (of_find_property(np, "sd-uhs-ddr50", &len))
+ host->caps |= MMC_CAP_UHS_DDR50;
if (of_find_property(np, "cap-power-off-card", &len))
host->caps |= MMC_CAP_POWER_OFF_CARD;
if (of_find_property(np, "cap-sdio-irq", &len))
@@ -429,6 +439,14 @@ int mmc_of_parse(struct mmc_host *host)
host->pm_caps |= MMC_PM_KEEP_POWER;
if (of_find_property(np, "enable-sdio-wakeup", &len))
host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+ if (of_find_property(np, "mmc-ddr-1_8v", &len))
+ host->caps |= MMC_CAP_1_8V_DDR;
+ if (of_find_property(np, "mmc-ddr-1_2v", &len))
+ host->caps |= MMC_CAP_1_2V_DDR;
+ if (of_find_property(np, "mmc-hs200-1_8v", &len))
+ host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+ if (of_find_property(np, "mmc-hs200-1_2v", &len))
+ host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
return 0;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 98e9eb0f6643..1ab5f3a0af5b 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -856,8 +856,10 @@ static int mmc_select_hs200(struct mmc_card *card)
/* switch to HS200 mode if bus width set successfully */
if (!err)
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HS_TIMING, 2, 0);
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, 2,
+ card->ext_csd.generic_cmd6_time,
+ true, true, true);
err:
return err;
}
@@ -1074,9 +1076,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
host->caps2 & MMC_CAP2_HS200)
err = mmc_select_hs200(card);
else if (host->caps & MMC_CAP_MMC_HIGHSPEED)
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HS_TIMING, 1,
- card->ext_csd.generic_cmd6_time);
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, 1,
+ card->ext_csd.generic_cmd6_time,
+ true, true, true);
if (err && err != -EBADMSG)
goto free_card;
@@ -1287,8 +1290,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* If cache size is higher than 0, this indicates
* the existence of cache and it can be turned on.
*/
- if ((host->caps2 & MMC_CAP2_CACHE_CTRL) &&
- card->ext_csd.cache_size > 0) {
+ if (card->ext_csd.cache_size > 0) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_CACHE_CTRL, 1,
card->ext_csd.generic_cmd6_time);
@@ -1356,11 +1358,9 @@ static int mmc_sleep(struct mmc_host *host)
{
struct mmc_command cmd = {0};
struct mmc_card *card = host->card;
+ unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
int err;
- if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
- return 0;
-
err = mmc_deselect_cards(host);
if (err)
return err;
@@ -1369,7 +1369,19 @@ static int mmc_sleep(struct mmc_host *host)
cmd.arg = card->rca << 16;
cmd.arg |= 1 << 15;
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ /*
+ * If the max_busy_timeout of the host is specified, validate it against
+ * the sleep cmd timeout. A failure means we need to prevent the host
+ * from doing hw busy detection, which is done by converting to a R1
+ * response instead of a R1B.
+ */
+ if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.busy_timeout = timeout_ms;
+ }
+
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
return err;
@@ -1380,8 +1392,8 @@ static int mmc_sleep(struct mmc_host *host)
* SEND_STATUS command to poll the status because that command (and most
* others) is invalid while the card sleeps.
*/
- if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
- mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
+ if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
+ mmc_delay(timeout_ms);
return err;
}
@@ -1404,7 +1416,7 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout, true, false);
+ notify_type, timeout, true, false, false);
if (err)
pr_err("%s: Power Off Notification timed out, %u\n",
mmc_hostname(card->host), timeout);
@@ -1484,7 +1496,7 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
goto out;
}
- err = mmc_cache_ctrl(host, 0);
+ err = mmc_flush_cache(host->card);
if (err)
goto out;
@@ -1636,16 +1648,6 @@ static int mmc_power_restore(struct mmc_host *host)
static const struct mmc_bus_ops mmc_ops = {
.remove = mmc_remove,
.detect = mmc_detect,
- .suspend = NULL,
- .resume = NULL,
- .power_restore = mmc_power_restore,
- .alive = mmc_alive,
- .shutdown = mmc_shutdown,
-};
-
-static const struct mmc_bus_ops mmc_ops_unsafe = {
- .remove = mmc_remove,
- .detect = mmc_detect,
.suspend = mmc_suspend,
.resume = mmc_resume,
.runtime_suspend = mmc_runtime_suspend,
@@ -1655,17 +1657,6 @@ static const struct mmc_bus_ops mmc_ops_unsafe = {
.shutdown = mmc_shutdown,
};
-static void mmc_attach_bus_ops(struct mmc_host *host)
-{
- const struct mmc_bus_ops *bus_ops;
-
- if (!mmc_card_is_removable(host))
- bus_ops = &mmc_ops_unsafe;
- else
- bus_ops = &mmc_ops;
- mmc_attach_bus(host, bus_ops);
-}
-
/*
* Starting point for MMC card init.
*/
@@ -1685,7 +1676,7 @@ int mmc_attach_mmc(struct mmc_host *host)
if (err)
return err;
- mmc_attach_bus_ops(host);
+ mmc_attach_bus(host, &mmc_ops);
if (host->ocr_avail_mmc)
host->ocr_avail = host->ocr_avail_mmc;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index e5b5eeb548d1..f51b5ba3bbea 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -405,20 +405,30 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
* timeout of zero implies maximum possible timeout
* @use_busy_signal: use the busy signal as response type
* @send_status: send status cmd to poll for busy
+ * @ignore_crc: ignore CRC errors when sending status cmd to poll for busy
*
* Modifies the EXT_CSD register for selected card.
*/
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
- unsigned int timeout_ms, bool use_busy_signal, bool send_status)
+ unsigned int timeout_ms, bool use_busy_signal, bool send_status,
+ bool ignore_crc)
{
+ struct mmc_host *host = card->host;
int err;
struct mmc_command cmd = {0};
unsigned long timeout;
u32 status = 0;
- bool ignore_crc = false;
+ bool use_r1b_resp = use_busy_signal;
- BUG_ON(!card);
- BUG_ON(!card->host);
+ /*
+ * If the cmd timeout and the max_busy_timeout of the host are both
+ * specified, let's validate them. A failure means we need to prevent
+ * the host from doing hw busy detection, which is done by converting
+ * to a R1 response instead of a R1B.
+ */
+ if (timeout_ms && host->max_busy_timeout &&
+ (timeout_ms > host->max_busy_timeout))
+ use_r1b_resp = false;
cmd.opcode = MMC_SWITCH;
cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
@@ -426,17 +436,21 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
(value << 8) |
set;
cmd.flags = MMC_CMD_AC;
- if (use_busy_signal)
+ if (use_r1b_resp) {
cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
- else
+ /*
+ * A busy_timeout of zero means the host can decide to use
+ * whatever value it finds suitable.
+ */
+ cmd.busy_timeout = timeout_ms;
+ } else {
cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
+ }
-
- cmd.cmd_timeout_ms = timeout_ms;
if (index == EXT_CSD_SANITIZE_START)
cmd.sanitize_busy = true;
- err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
@@ -445,24 +459,27 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
return 0;
/*
- * Must check status to be sure of no errors
- * If CMD13 is to check the busy completion of the timing change,
- * disable the check of CRC error.
+ * CRC errors shall only be ignored in cases were CMD13 is used to poll
+ * to detect busy completion.
*/
- if (index == EXT_CSD_HS_TIMING &&
- !(card->host->caps & MMC_CAP_WAIT_WHILE_BUSY))
- ignore_crc = true;
+ if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
+ ignore_crc = false;
+
+ /* We have an unspecified cmd timeout, use the fallback value. */
+ if (!timeout_ms)
+ timeout_ms = MMC_OPS_TIMEOUT_MS;
- timeout = jiffies + msecs_to_jiffies(MMC_OPS_TIMEOUT_MS);
+ /* Must check status to be sure of no errors. */
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
if (send_status) {
err = __mmc_send_status(card, &status, ignore_crc);
if (err)
return err;
}
- if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+ if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
break;
- if (mmc_host_is_spi(card->host))
+ if (mmc_host_is_spi(host))
break;
/*
@@ -478,18 +495,18 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
/* Timeout if the device never leaves the program state. */
if (time_after(jiffies, timeout)) {
pr_err("%s: Card stuck in programming state! %s\n",
- mmc_hostname(card->host), __func__);
+ mmc_hostname(host), __func__);
return -ETIMEDOUT;
}
} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
- if (mmc_host_is_spi(card->host)) {
+ if (mmc_host_is_spi(host)) {
if (status & R1_SPI_ILLEGAL_COMMAND)
return -EBADMSG;
} else {
if (status & 0xFDFFA000)
- pr_warning("%s: unexpected status %#x after "
- "switch", mmc_hostname(card->host), status);
+ pr_warn("%s: unexpected status %#x after switch\n",
+ mmc_hostname(host), status);
if (status & R1_SWITCH_ERROR)
return -EBADMSG;
}
@@ -501,7 +518,8 @@ EXPORT_SYMBOL_GPL(__mmc_switch);
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms)
{
- return __mmc_switch(card, set, index, value, timeout_ms, true, true);
+ return __mmc_switch(card, set, index, value, timeout_ms, true, true,
+ false);
}
EXPORT_SYMBOL_GPL(mmc_switch);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 692fdb177294..2dd359d2242f 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1209,16 +1209,6 @@ static int mmc_sd_power_restore(struct mmc_host *host)
static const struct mmc_bus_ops mmc_sd_ops = {
.remove = mmc_sd_remove,
.detect = mmc_sd_detect,
- .suspend = NULL,
- .resume = NULL,
- .power_restore = mmc_sd_power_restore,
- .alive = mmc_sd_alive,
- .shutdown = mmc_sd_suspend,
-};
-
-static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
- .remove = mmc_sd_remove,
- .detect = mmc_sd_detect,
.runtime_suspend = mmc_sd_runtime_suspend,
.runtime_resume = mmc_sd_runtime_resume,
.suspend = mmc_sd_suspend,
@@ -1228,17 +1218,6 @@ static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
.shutdown = mmc_sd_suspend,
};
-static void mmc_sd_attach_bus_ops(struct mmc_host *host)
-{
- const struct mmc_bus_ops *bus_ops;
-
- if (!mmc_card_is_removable(host))
- bus_ops = &mmc_sd_ops_unsafe;
- else
- bus_ops = &mmc_sd_ops;
- mmc_attach_bus(host, bus_ops);
-}
-
/*
* Starting point for SD card init.
*/
@@ -1254,7 +1233,7 @@ int mmc_attach_sd(struct mmc_host *host)
if (err)
return err;
- mmc_sd_attach_bus_ops(host);
+ mmc_attach_bus(host, &mmc_sd_ops);
if (host->ocr_avail_sd)
host->ocr_avail = host->ocr_avail_sd;
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 46596b71a32f..f7650b899e3d 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -10,6 +10,7 @@
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/mmc/host.h>
@@ -18,8 +19,10 @@
#include <linux/slab.h>
struct mmc_gpio {
- int ro_gpio;
- int cd_gpio;
+ struct gpio_desc *ro_gpio;
+ struct gpio_desc *cd_gpio;
+ bool override_ro_active_level;
+ bool override_cd_active_level;
char *ro_label;
char cd_label[0];
};
@@ -57,8 +60,6 @@ static int mmc_gpio_alloc(struct mmc_host *host)
ctx->ro_label = ctx->cd_label + len;
snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent));
snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent));
- ctx->cd_gpio = -EINVAL;
- ctx->ro_gpio = -EINVAL;
host->slot.handler_priv = ctx;
}
}
@@ -72,11 +73,14 @@ int mmc_gpio_get_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
- if (!ctx || !gpio_is_valid(ctx->ro_gpio))
+ if (!ctx || !ctx->ro_gpio)
return -ENOSYS;
- return !gpio_get_value_cansleep(ctx->ro_gpio) ^
- !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
+ if (ctx->override_ro_active_level)
+ return !gpiod_get_raw_value_cansleep(ctx->ro_gpio) ^
+ !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
+
+ return gpiod_get_value_cansleep(ctx->ro_gpio);
}
EXPORT_SYMBOL(mmc_gpio_get_ro);
@@ -84,11 +88,14 @@ int mmc_gpio_get_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
- if (!ctx || !gpio_is_valid(ctx->cd_gpio))
+ if (!ctx || !ctx->cd_gpio)
return -ENOSYS;
- return !gpio_get_value_cansleep(ctx->cd_gpio) ^
- !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
+ if (ctx->override_cd_active_level)
+ return !gpiod_get_raw_value_cansleep(ctx->cd_gpio) ^
+ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
+
+ return gpiod_get_value_cansleep(ctx->cd_gpio);
}
EXPORT_SYMBOL(mmc_gpio_get_cd);
@@ -125,12 +132,47 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
if (ret < 0)
return ret;
- ctx->ro_gpio = gpio;
+ ctx->override_ro_active_level = true;
+ ctx->ro_gpio = gpio_to_desc(gpio);
return 0;
}
EXPORT_SYMBOL(mmc_gpio_request_ro);
+void mmc_gpiod_request_cd_irq(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ int ret, irq;
+
+ if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio)
+ return;
+
+ irq = gpiod_to_irq(ctx->cd_gpio);
+
+ /*
+ * Even if gpiod_to_irq() returns a valid IRQ number, the platform might
+ * still prefer to poll, e.g., because that IRQ number is already used
+ * by another unit and cannot be shared.
+ */
+ if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL)
+ irq = -EINVAL;
+
+ if (irq >= 0) {
+ ret = devm_request_threaded_irq(&host->class_dev, irq,
+ NULL, mmc_gpio_cd_irqt,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ ctx->cd_label, host);
+ if (ret < 0)
+ irq = ret;
+ }
+
+ host->slot.cd_irq = irq;
+
+ if (irq < 0)
+ host->caps |= MMC_CAP_NEEDS_POLL;
+}
+EXPORT_SYMBOL(mmc_gpiod_request_cd_irq);
+
/**
* mmc_gpio_request_cd - request a gpio for card-detection
* @host: mmc host
@@ -154,7 +196,6 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
unsigned int debounce)
{
struct mmc_gpio *ctx;
- int irq = gpio_to_irq(gpio);
int ret;
ret = mmc_gpio_alloc(host);
@@ -179,29 +220,10 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
return ret;
}
- /*
- * Even if gpio_to_irq() returns a valid IRQ number, the platform might
- * still prefer to poll, e.g., because that IRQ number is already used
- * by another unit and cannot be shared.
- */
- if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL)
- irq = -EINVAL;
-
- if (irq >= 0) {
- ret = devm_request_threaded_irq(&host->class_dev, irq,
- NULL, mmc_gpio_cd_irqt,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- ctx->cd_label, host);
- if (ret < 0)
- irq = ret;
- }
-
- host->slot.cd_irq = irq;
-
- if (irq < 0)
- host->caps |= MMC_CAP_NEEDS_POLL;
+ ctx->override_cd_active_level = true;
+ ctx->cd_gpio = gpio_to_desc(gpio);
- ctx->cd_gpio = gpio;
+ mmc_gpiod_request_cd_irq(host);
return 0;
}
@@ -219,11 +241,11 @@ void mmc_gpio_free_ro(struct mmc_host *host)
struct mmc_gpio *ctx = host->slot.handler_priv;
int gpio;
- if (!ctx || !gpio_is_valid(ctx->ro_gpio))
+ if (!ctx || !ctx->ro_gpio)
return;
- gpio = ctx->ro_gpio;
- ctx->ro_gpio = -EINVAL;
+ gpio = desc_to_gpio(ctx->ro_gpio);
+ ctx->ro_gpio = NULL;
devm_gpio_free(&host->class_dev, gpio);
}
@@ -241,7 +263,7 @@ void mmc_gpio_free_cd(struct mmc_host *host)
struct mmc_gpio *ctx = host->slot.handler_priv;
int gpio;
- if (!ctx || !gpio_is_valid(ctx->cd_gpio))
+ if (!ctx || !ctx->cd_gpio)
return;
if (host->slot.cd_irq >= 0) {
@@ -249,9 +271,87 @@ void mmc_gpio_free_cd(struct mmc_host *host)
host->slot.cd_irq = -EINVAL;
}
- gpio = ctx->cd_gpio;
- ctx->cd_gpio = -EINVAL;
+ gpio = desc_to_gpio(ctx->cd_gpio);
+ ctx->cd_gpio = NULL;
devm_gpio_free(&host->class_dev, gpio);
}
EXPORT_SYMBOL(mmc_gpio_free_cd);
+
+/**
+ * mmc_gpiod_request_cd - request a gpio descriptor for card-detection
+ * @host: mmc host
+ * @con_id: function within the GPIO consumer
+ * @idx: index of the GPIO to obtain in the consumer
+ * @override_active_level: ignore %GPIO_ACTIVE_LOW flag
+ * @debounce: debounce time in microseconds
+ *
+ * Use this function in place of mmc_gpio_request_cd() to use the GPIO
+ * descriptor API. Note that it is paired with mmc_gpiod_free_cd() not
+ * mmc_gpio_free_cd(). Note also that it must be called prior to mmc_add_host()
+ * otherwise the caller must also call mmc_gpiod_request_cd_irq().
+ *
+ * Returns zero on success, else an error.
+ */
+int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
+ unsigned int idx, bool override_active_level,
+ unsigned int debounce)
+{
+ struct mmc_gpio *ctx;
+ struct gpio_desc *desc;
+ int ret;
+
+ ret = mmc_gpio_alloc(host);
+ if (ret < 0)
+ return ret;
+
+ ctx = host->slot.handler_priv;
+
+ if (!con_id)
+ con_id = ctx->cd_label;
+
+ desc = devm_gpiod_get_index(host->parent, con_id, idx);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ ret = gpiod_direction_input(desc);
+ if (ret < 0)
+ return ret;
+
+ if (debounce) {
+ ret = gpiod_set_debounce(desc, debounce);
+ if (ret < 0)
+ return ret;
+ }
+
+ ctx->override_cd_active_level = override_active_level;
+ ctx->cd_gpio = desc;
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_gpiod_request_cd);
+
+/**
+ * mmc_gpiod_free_cd - free the card-detection gpio descriptor
+ * @host: mmc host
+ *
+ * It's provided only for cases that client drivers need to manually free
+ * up the card-detection gpio requested by mmc_gpiod_request_cd().
+ */
+void mmc_gpiod_free_cd(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ if (!ctx || !ctx->cd_gpio)
+ return;
+
+ if (host->slot.cd_irq >= 0) {
+ devm_free_irq(&host->class_dev, host->slot.cd_irq, host);
+ host->slot.cd_irq = -EINVAL;
+ }
+
+ devm_gpiod_put(&host->class_dev, ctx->cd_gpio);
+
+ ctx->cd_gpio = NULL;
+}
+EXPORT_SYMBOL(mmc_gpiod_free_cd);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 1384f67abe21..8aaf8c1f3f63 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -263,7 +263,7 @@ config MMC_SDHCI_S3C_DMA
config MMC_SDHCI_BCM_KONA
tristate "SDHCI support on Broadcom KONA platform"
- depends on ARCH_BCM
+ depends on ARCH_BCM_MOBILE
select MMC_SDHCI_PLTFM
help
This selects the Broadcom Kona Secure Digital Host Controller
@@ -334,6 +334,19 @@ config MMC_ATMELMCI
If unsure, say N.
+config MMC_SDHCI_MSM
+ tristate "Qualcomm SDHCI Controller Support"
+ depends on ARCH_QCOM
+ depends on MMC_SDHCI_PLTFM
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ support present in Qualcomm SOCs. The controller supports
+ SD/MMC/SDIO devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_MSM
tristate "Qualcomm SDCC Controller Support"
depends on MMC && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
@@ -580,14 +593,6 @@ config MMC_DW_EXYNOS
Synopsys DesignWare Memory Card Interface driver. Select this option
for platforms based on Exynos4 and Exynos5 SoC's.
-config MMC_DW_SOCFPGA
- tristate "SOCFPGA specific extensions for Synopsys DW Memory Card Interface"
- depends on MMC_DW && MFD_SYSCON
- select MMC_DW_PLTFM
- help
- This selects support for Altera SoCFPGA specific extensions to the
- Synopsys DesignWare Memory Card Interface driver.
-
config MMC_DW_K3
tristate "K3 specific extensions for Synopsys DW Memory Card Interface"
depends on MMC_DW
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3483b6b6b880..0c8aa5e1e304 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -43,7 +43,6 @@ obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
obj-$(CONFIG_MMC_DW) += dw_mmc.o
obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o
obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
-obj-$(CONFIG_MMC_DW_SOCFPGA) += dw_mmc-socfpga.o
obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o
obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
@@ -64,6 +63,7 @@ obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o
+obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index d6153740b77f..5d4c5e0fba2f 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1192,7 +1192,7 @@ static struct davinci_mmc_config
struct device_node *np;
struct davinci_mmc_config *pdata = pdev->dev.platform_data;
const struct of_device_id *match =
- of_match_device(of_match_ptr(davinci_mmc_dt_ids), &pdev->dev);
+ of_match_device(davinci_mmc_dt_ids, &pdev->dev);
u32 data;
np = pdev->dev.of_node;
@@ -1468,7 +1468,7 @@ static struct platform_driver davinci_mmcsd_driver = {
.name = "davinci_mmc",
.owner = THIS_MODULE,
.pm = davinci_mmcsd_pm_ops,
- .of_match_table = of_match_ptr(davinci_mmc_dt_ids),
+ .of_match_table = davinci_mmc_dt_ids,
},
.remove = __exit_p(davinci_mmcsd_remove),
.id_table = davinci_mmc_devtype,
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index f567c219cff4..650f9cc3f7a6 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -50,6 +50,7 @@ static int dw_mci_k3_probe(struct platform_device *pdev)
return dw_mci_pltfm_register(pdev, drv_data);
}
+#ifdef CONFIG_PM_SLEEP
static int dw_mci_k3_suspend(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
@@ -75,6 +76,7 @@ static int dw_mci_k3_resume(struct device *dev)
return dw_mci_resume(host);
}
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(dw_mci_k3_pmops, dw_mci_k3_suspend, dw_mci_k3_resume);
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 5c4965655297..d4a47a9f5584 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -25,13 +25,17 @@
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
-static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
+static void dw_mci_pltfm_prepare_command(struct dw_mci *host, u32 *cmdr)
{
*cmdr |= SDMMC_CMD_USE_HOLD_REG;
}
static const struct dw_mci_drv_data rockchip_drv_data = {
- .prepare_command = dw_mci_rockchip_prepare_command,
+ .prepare_command = dw_mci_pltfm_prepare_command,
+};
+
+static const struct dw_mci_drv_data socfpga_drv_data = {
+ .prepare_command = dw_mci_pltfm_prepare_command,
};
int dw_mci_pltfm_register(struct platform_device *pdev,
@@ -92,6 +96,8 @@ static const struct of_device_id dw_mci_pltfm_match[] = {
{ .compatible = "snps,dw-mshc", },
{ .compatible = "rockchip,rk2928-dw-mshc",
.data = &rockchip_drv_data },
+ { .compatible = "altr,socfpga-dw-mshc",
+ .data = &socfpga_drv_data },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
@@ -123,7 +129,7 @@ static struct platform_driver dw_mci_pltfm_driver = {
.remove = dw_mci_pltfm_remove,
.driver = {
.name = "dw_mmc",
- .of_match_table = of_match_ptr(dw_mci_pltfm_match),
+ .of_match_table = dw_mci_pltfm_match,
.pm = &dw_mci_pltfm_pmops,
},
};
diff --git a/drivers/mmc/host/dw_mmc-socfpga.c b/drivers/mmc/host/dw_mmc-socfpga.c
deleted file mode 100644
index 3e8e53ae3302..000000000000
--- a/drivers/mmc/host/dw_mmc-socfpga.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Altera SoCFPGA Specific Extensions for Synopsys DW Multimedia Card Interface
- * driver
- *
- * Copyright (C) 2012, Samsung Electronics Co., Ltd.
- * Copyright (C) 2013 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Taken from dw_mmc-exynos.c
- */
-#include <linux/clk.h>
-#include <linux/mfd/syscon.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/dw_mmc.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-
-#include "dw_mmc.h"
-#include "dw_mmc-pltfm.h"
-
-#define SYSMGR_SDMMCGRP_CTRL_OFFSET 0x108
-#define DRV_CLK_PHASE_SHIFT_SEL_MASK 0x7
-#define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \
- ((((smplsel) & 0x7) << 3) | (((drvsel) & 0x7) << 0))
-
-/* SOCFPGA implementation specific driver private data */
-struct dw_mci_socfpga_priv_data {
- u8 ciu_div; /* card interface unit divisor */
- u32 hs_timing; /* bitmask for CIU clock phase shift */
- struct regmap *sysreg; /* regmap for system manager register */
-};
-
-static int dw_mci_socfpga_priv_init(struct dw_mci *host)
-{
- return 0;
-}
-
-static int dw_mci_socfpga_setup_clock(struct dw_mci *host)
-{
- struct dw_mci_socfpga_priv_data *priv = host->priv;
-
- clk_disable_unprepare(host->ciu_clk);
- regmap_write(priv->sysreg, SYSMGR_SDMMCGRP_CTRL_OFFSET,
- priv->hs_timing);
- clk_prepare_enable(host->ciu_clk);
-
- host->bus_hz /= (priv->ciu_div + 1);
- return 0;
-}
-
-static void dw_mci_socfpga_prepare_command(struct dw_mci *host, u32 *cmdr)
-{
- struct dw_mci_socfpga_priv_data *priv = host->priv;
-
- if (priv->hs_timing & DRV_CLK_PHASE_SHIFT_SEL_MASK)
- *cmdr |= SDMMC_CMD_USE_HOLD_REG;
-}
-
-static int dw_mci_socfpga_parse_dt(struct dw_mci *host)
-{
- struct dw_mci_socfpga_priv_data *priv;
- struct device_node *np = host->dev->of_node;
- u32 timing[2];
- u32 div = 0;
- int ret;
-
- priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(host->dev, "mem alloc failed for private data\n");
- return -ENOMEM;
- }
-
- priv->sysreg = syscon_regmap_lookup_by_compatible("altr,sys-mgr");
- if (IS_ERR(priv->sysreg)) {
- dev_err(host->dev, "regmap for altr,sys-mgr lookup failed.\n");
- return PTR_ERR(priv->sysreg);
- }
-
- ret = of_property_read_u32(np, "altr,dw-mshc-ciu-div", &div);
- if (ret)
- dev_info(host->dev, "No dw-mshc-ciu-div specified, assuming 1");
- priv->ciu_div = div;
-
- ret = of_property_read_u32_array(np,
- "altr,dw-mshc-sdr-timing", timing, 2);
- if (ret)
- return ret;
-
- priv->hs_timing = SYSMGR_SDMMC_CTRL_SET(timing[0], timing[1]);
- host->priv = priv;
- return 0;
-}
-
-static const struct dw_mci_drv_data socfpga_drv_data = {
- .init = dw_mci_socfpga_priv_init,
- .setup_clock = dw_mci_socfpga_setup_clock,
- .prepare_command = dw_mci_socfpga_prepare_command,
- .parse_dt = dw_mci_socfpga_parse_dt,
-};
-
-static const struct of_device_id dw_mci_socfpga_match[] = {
- { .compatible = "altr,socfpga-dw-mshc",
- .data = &socfpga_drv_data, },
- {},
-};
-MODULE_DEVICE_TABLE(of, dw_mci_socfpga_match);
-
-static int dw_mci_socfpga_probe(struct platform_device *pdev)
-{
- const struct dw_mci_drv_data *drv_data;
- const struct of_device_id *match;
-
- match = of_match_node(dw_mci_socfpga_match, pdev->dev.of_node);
- drv_data = match->data;
- return dw_mci_pltfm_register(pdev, drv_data);
-}
-
-static struct platform_driver dw_mci_socfpga_pltfm_driver = {
- .probe = dw_mci_socfpga_probe,
- .remove = __exit_p(dw_mci_pltfm_remove),
- .driver = {
- .name = "dwmmc_socfpga",
- .of_match_table = dw_mci_socfpga_match,
- .pm = &dw_mci_pltfm_pmops,
- },
-};
-
-module_platform_driver(dw_mci_socfpga_pltfm_driver);
-
-MODULE_DESCRIPTION("Altera SOCFPGA Specific DW-MSHC Driver Extension");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:dwmmc-socfpga");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 55cd110a49c4..cced599d5aeb 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1345,7 +1345,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
if (!err) {
if (!data->stop || mrq->sbc) {
- if (mrq->sbc)
+ if (mrq->sbc && data->stop)
data->stop->error = 0;
dw_mci_request_end(host, mrq);
goto unlock;
@@ -2607,7 +2607,7 @@ int dw_mci_probe(struct dw_mci *host)
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
host->card_workqueue = alloc_workqueue("dw-mci-card",
- WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
+ WQ_MEM_RECLAIM, 1);
if (!host->card_workqueue) {
ret = -ENOMEM;
goto err_dmaunmap;
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 6bf24ab917e6..68349779c396 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -185,7 +185,7 @@
extern int dw_mci_probe(struct dw_mci *host);
extern void dw_mci_remove(struct dw_mci *host);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
extern int dw_mci_suspend(struct dw_mci *host);
extern int dw_mci_resume(struct dw_mci *host);
#endif
@@ -244,6 +244,7 @@ struct dw_mci_tuning_data {
* @prepare_command: handle CMD register extensions.
* @set_ios: handle bus specific extensions.
* @parse_dt: parse implementation specific device tree properties.
+ * @execute_tuning: implementation specific tuning procedure.
*
* Provide controller implementation specific extensions. The usage of this
* data structure is fully optional and usage of each member in this structure
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index b93122636531..771c60ab4a32 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -921,6 +921,29 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
{
void __iomem *base = host->base;
bool sbc = (cmd == host->mrq->sbc);
+ bool busy_resp = host->variant->busy_detect &&
+ (cmd->flags & MMC_RSP_BUSY);
+
+ /* Check if we need to wait for busy completion. */
+ if (host->busy_status && (status & MCI_ST_CARDBUSY))
+ return;
+
+ /* Enable busy completion if needed and supported. */
+ if (!host->busy_status && busy_resp &&
+ !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
+ (readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) {
+ writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND,
+ base + MMCIMASK0);
+ host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND);
+ return;
+ }
+
+ /* At busy completion, mask the IRQ and complete the request. */
+ if (host->busy_status) {
+ writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND,
+ base + MMCIMASK0);
+ host->busy_status = 0;
+ }
host->cmd = NULL;
@@ -1139,20 +1162,30 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
status &= ~MCI_IRQ1MASK;
}
+ /*
+ * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
+ * enabled) since the HW seems to be triggering the IRQ on both
+ * edges while monitoring DAT0 for busy completion.
+ */
status &= readl(host->base + MMCIMASK0);
writel(status, host->base + MMCICLEAR);
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
+ cmd = host->cmd;
+ if ((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
+ MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
+ mmci_cmd_irq(host, cmd, status);
+
data = host->data;
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
MCI_DATABLOCKEND) && data)
mmci_data_irq(host, data, status);
- cmd = host->cmd;
- if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
- mmci_cmd_irq(host, cmd, status);
+ /* Don't poll for busy completion in irq context. */
+ if (host->busy_status)
+ status &= ~MCI_ST_CARDBUSY;
ret = 1;
} while (status);
@@ -1503,12 +1536,6 @@ static int mmci_probe(struct amba_device *dev,
goto clk_disable;
}
- if (variant->busy_detect) {
- mmci_ops.card_busy = mmci_card_busy;
- mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
- }
-
- mmc->ops = &mmci_ops;
/*
* The ARM and ST versions of the block have slightly different
* clock divider equations which means that the minimum divider
@@ -1542,6 +1569,15 @@ static int mmci_probe(struct amba_device *dev,
mmc->caps = plat->capabilities;
mmc->caps2 = plat->capabilities2;
+ if (variant->busy_detect) {
+ mmci_ops.card_busy = mmci_card_busy;
+ mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
+ mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+ mmc->max_busy_timeout = 0;
+ }
+
+ mmc->ops = &mmci_ops;
+
/* We support these PM capabilities. */
mmc->pm_caps = MMC_PM_KEEP_POWER;
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 168bc72f7a94..58b1b8896bf2 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -38,10 +38,11 @@
#define MCI_CPSM_INTERRUPT (1 << 8)
#define MCI_CPSM_PENDING (1 << 9)
#define MCI_CPSM_ENABLE (1 << 10)
-#define MCI_SDIO_SUSP (1 << 11)
-#define MCI_ENCMD_COMPL (1 << 12)
-#define MCI_NIEN (1 << 13)
-#define MCI_CE_ATACMD (1 << 14)
+/* Argument flag extenstions in the ST Micro versions */
+#define MCI_ST_SDIO_SUSP (1 << 11)
+#define MCI_ST_ENCMD_COMPL (1 << 12)
+#define MCI_ST_NIEN (1 << 13)
+#define MCI_ST_CE_ATACMD (1 << 14)
#define MMCIRESPCMD 0x010
#define MMCIRESPONSE0 0x014
@@ -139,6 +140,7 @@
/* Extended status bits for the ST Micro variants */
#define MCI_ST_SDIOITMASK (1 << 22)
#define MCI_ST_CEATAENDMASK (1 << 23)
+#define MCI_ST_BUSYEND (1 << 24)
#define MMCIMASK1 0x040
#define MMCIFIFOCNT 0x048
@@ -186,6 +188,7 @@ struct mmci_host {
u32 pwr_reg;
u32 clk_reg;
u32 datactrl_reg;
+ u32 busy_status;
bool vqmmc_enabled;
struct mmci_platform_data *plat;
struct variant_data *variant;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 98b6b6ef7e5c..5c2e58b29305 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -26,6 +26,7 @@
#include <linux/omap-dma.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
@@ -130,7 +131,6 @@ struct mmc_omap_host {
u32 dma_rx_burst;
struct dma_chan *dma_tx;
u32 dma_tx_burst;
- struct resource *mem_res;
void __iomem *virt_base;
unsigned int phys_base;
int irq;
@@ -153,7 +153,6 @@ struct mmc_omap_host {
u32 total_bytes_left;
unsigned features;
- unsigned use_dma:1;
unsigned brs_received:1, dma_done:1;
unsigned dma_in_use:1;
spinlock_t dma_lock;
@@ -338,6 +337,7 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
u32 cmdreg;
u32 resptype;
u32 cmdtype;
+ u16 irq_mask;
host->cmd = cmd;
@@ -390,12 +390,14 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
OMAP_MMC_WRITE(host, CTO, 200);
OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
- OMAP_MMC_WRITE(host, IE,
- OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
- OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
- OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
- OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
- OMAP_MMC_STAT_END_OF_DATA);
+ irq_mask = OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
+ OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
+ OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
+ OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
+ OMAP_MMC_STAT_END_OF_DATA;
+ if (cmd->opcode == MMC_ERASE)
+ irq_mask &= ~OMAP_MMC_STAT_DATA_TOUT;
+ OMAP_MMC_WRITE(host, IE, irq_mask);
OMAP_MMC_WRITE(host, CMD, cmdreg);
}
@@ -945,7 +947,7 @@ static void
mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
{
struct mmc_data *data = req->data;
- int i, use_dma, block_size;
+ int i, use_dma = 1, block_size;
unsigned sg_len;
host->data = data;
@@ -970,13 +972,10 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
sg_len = (data->blocks == 1) ? 1 : data->sg_len;
/* Only do DMA for entire blocks */
- use_dma = host->use_dma;
- if (use_dma) {
- for (i = 0; i < sg_len; i++) {
- if ((data->sg[i].length % block_size) != 0) {
- use_dma = 0;
- break;
- }
+ for (i = 0; i < sg_len; i++) {
+ if ((data->sg[i].length % block_size) != 0) {
+ use_dma = 0;
+ break;
}
}
@@ -1239,7 +1238,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
mmc->caps = 0;
if (host->pdata->slots[id].wires >= 4)
- mmc->caps |= MMC_CAP_4_BIT_DATA;
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_ERASE;
mmc->ops = &mmc_omap_ops;
mmc->f_min = 400000;
@@ -1262,6 +1261,13 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
+ if (slot->pdata->get_cover_state != NULL) {
+ setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
+ (unsigned long)slot);
+ tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
+ (unsigned long)slot);
+ }
+
r = mmc_add_host(mmc);
if (r < 0)
goto err_remove_host;
@@ -1278,11 +1284,6 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
&dev_attr_cover_switch);
if (r < 0)
goto err_remove_slot_name;
-
- setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
- (unsigned long)slot);
- tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
- (unsigned long)slot);
tasklet_schedule(&slot->cover_tasklet);
}
@@ -1333,21 +1334,19 @@ static int mmc_omap_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host = devm_kzalloc(&pdev->dev, sizeof(struct mmc_omap_host),
+ GFP_KERNEL);
+ if (host == NULL)
+ return -ENOMEM;
+
irq = platform_get_irq(pdev, 0);
- if (res == NULL || irq < 0)
+ if (irq < 0)
return -ENXIO;
- res = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (res == NULL)
- return -EBUSY;
-
- host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL);
- if (host == NULL) {
- ret = -ENOMEM;
- goto err_free_mem_region;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->virt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->virt_base))
+ return PTR_ERR(host->virt_base);
INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
@@ -1369,20 +1368,11 @@ static int mmc_omap_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
host->id = pdev->id;
- host->mem_res = res;
- host->irq = irq;
- host->use_dma = 1;
host->irq = irq;
- host->phys_base = host->mem_res->start;
- host->virt_base = ioremap(res->start, resource_size(res));
- if (!host->virt_base)
- goto err_ioremap;
-
+ host->phys_base = res->start;
host->iclk = clk_get(&pdev->dev, "ick");
- if (IS_ERR(host->iclk)) {
- ret = PTR_ERR(host->iclk);
- goto err_free_mmc_host;
- }
+ if (IS_ERR(host->iclk))
+ return PTR_ERR(host->iclk);
clk_enable(host->iclk);
host->fclk = clk_get(&pdev->dev, "fck");
@@ -1460,12 +1450,6 @@ err_free_dma:
err_free_iclk:
clk_disable(host->iclk);
clk_put(host->iclk);
-err_free_mmc_host:
- iounmap(host->virt_base);
-err_ioremap:
- kfree(host);
-err_free_mem_region:
- release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -1493,13 +1477,8 @@ static int mmc_omap_remove(struct platform_device *pdev)
if (host->dma_rx)
dma_release_channel(host->dma_rx);
- iounmap(host->virt_base);
- release_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
destroy_workqueue(host->mmc_omap_wq);
- kfree(host);
-
return 0;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index dbd32ad3b749..e91ee21549d0 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -45,6 +45,7 @@
/* OMAP HSMMC Host Controller Registers */
#define OMAP_HSMMC_SYSSTATUS 0x0014
#define OMAP_HSMMC_CON 0x002C
+#define OMAP_HSMMC_SDMASA 0x0100
#define OMAP_HSMMC_BLK 0x0104
#define OMAP_HSMMC_ARG 0x0108
#define OMAP_HSMMC_CMD 0x010C
@@ -58,6 +59,7 @@
#define OMAP_HSMMC_STAT 0x0130
#define OMAP_HSMMC_IE 0x0134
#define OMAP_HSMMC_ISE 0x0138
+#define OMAP_HSMMC_AC12 0x013C
#define OMAP_HSMMC_CAPA 0x0140
#define VS18 (1 << 26)
@@ -81,6 +83,7 @@
#define DTO_MASK 0x000F0000
#define DTO_SHIFT 16
#define INIT_STREAM (1 << 1)
+#define ACEN_ACMD23 (2 << 2)
#define DP_SELECT (1 << 21)
#define DDIR (1 << 4)
#define DMAE 0x1
@@ -97,7 +100,6 @@
#define SRC (1 << 25)
#define SRD (1 << 26)
#define SOFTRESET (1 << 1)
-#define RESETDONE (1 << 0)
/* Interrupt masks for IE and ISE register */
#define CC_EN (1 << 0)
@@ -112,13 +114,21 @@
#define DTO_EN (1 << 20)
#define DCRC_EN (1 << 21)
#define DEB_EN (1 << 22)
+#define ACE_EN (1 << 24)
#define CERR_EN (1 << 28)
#define BADA_EN (1 << 29)
-#define INT_EN_MASK (BADA_EN | CERR_EN | DEB_EN | DCRC_EN |\
+#define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\
DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \
BRR_EN | BWR_EN | TC_EN | CC_EN)
+#define CNI (1 << 7)
+#define ACIE (1 << 4)
+#define ACEB (1 << 3)
+#define ACCE (1 << 2)
+#define ACTO (1 << 1)
+#define ACNE (1 << 0)
+
#define MMC_AUTOSUSPEND_DELAY 100
#define MMC_TIMEOUT_MS 20 /* 20 mSec */
#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */
@@ -126,6 +136,11 @@
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
+#define VDD_1V8 1800000 /* 180000 uV */
+#define VDD_3V0 3000000 /* 300000 uV */
+#define VDD_165_195 (ffs(MMC_VDD_165_195) - 1)
+
+#define AUTO_CMD23 (1 << 1) /* Auto CMD23 support */
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -164,7 +179,8 @@ struct omap_hsmmc_host {
*/
struct regulator *vcc;
struct regulator *vcc_aux;
- int pbias_disable;
+ struct regulator *pbias;
+ bool pbias_enabled;
void __iomem *base;
resource_size_t mapbase;
spinlock_t irq_lock; /* Prevent races with irq handler */
@@ -188,10 +204,19 @@ struct omap_hsmmc_host {
int reqs_blocked;
int use_reg;
int req_in_progress;
+ unsigned long clk_rate;
+ unsigned int flags;
struct omap_hsmmc_next next_data;
struct omap_mmc_platform_data *pdata;
};
+struct omap_mmc_of_data {
+ u32 reg_offset;
+ u8 controller_flags;
+};
+
+static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host);
+
static int omap_hsmmc_card_detect(struct device *dev, int slot)
{
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
@@ -261,17 +286,19 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,
*/
if (!host->vcc)
return 0;
- /*
- * With DT, never turn OFF the regulator for MMC1. This is because
- * the pbias cell programming support is still missing when
- * booting with Device tree
- */
- if (host->pbias_disable && !vdd)
- return 0;
if (mmc_slot(host).before_set_reg)
mmc_slot(host).before_set_reg(dev, slot, power_on, vdd);
+ if (host->pbias) {
+ if (host->pbias_enabled == 1) {
+ ret = regulator_disable(host->pbias);
+ if (!ret)
+ host->pbias_enabled = 0;
+ }
+ regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0);
+ }
+
/*
* Assume Vcc regulator is used only to power the card ... OMAP
* VDDS is used to power the pins, optionally with a transceiver to
@@ -286,11 +313,12 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,
* chips/cards need an interface voltage rail too.
*/
if (power_on) {
- ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
+ if (host->vcc)
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
/* Enable interface voltage rail, if needed */
if (ret == 0 && host->vcc_aux) {
ret = regulator_enable(host->vcc_aux);
- if (ret < 0)
+ if (ret < 0 && host->vcc)
ret = mmc_regulator_set_ocr(host->mmc,
host->vcc, 0);
}
@@ -298,16 +326,34 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,
/* Shut down the rail */
if (host->vcc_aux)
ret = regulator_disable(host->vcc_aux);
- if (!ret) {
+ if (host->vcc) {
/* Then proceed to shut down the local regulator */
ret = mmc_regulator_set_ocr(host->mmc,
host->vcc, 0);
}
}
+ if (host->pbias) {
+ if (vdd <= VDD_165_195)
+ ret = regulator_set_voltage(host->pbias, VDD_1V8,
+ VDD_1V8);
+ else
+ ret = regulator_set_voltage(host->pbias, VDD_3V0,
+ VDD_3V0);
+ if (ret < 0)
+ goto error_set_power;
+
+ if (host->pbias_enabled == 0) {
+ ret = regulator_enable(host->pbias);
+ if (!ret)
+ host->pbias_enabled = 1;
+ }
+ }
+
if (mmc_slot(host).after_set_reg)
mmc_slot(host).after_set_reg(dev, slot, power_on, vdd);
+error_set_power:
return ret;
}
@@ -316,12 +362,12 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
struct regulator *reg;
int ocr_value = 0;
- reg = regulator_get(host->dev, "vmmc");
+ reg = devm_regulator_get(host->dev, "vmmc");
if (IS_ERR(reg)) {
- dev_err(host->dev, "vmmc regulator missing\n");
+ dev_err(host->dev, "unable to get vmmc regulator %ld\n",
+ PTR_ERR(reg));
return PTR_ERR(reg);
} else {
- mmc_slot(host).set_power = omap_hsmmc_set_power;
host->vcc = reg;
ocr_value = mmc_regulator_get_ocrmask(reg);
if (!mmc_slot(host).ocr_mask) {
@@ -334,31 +380,29 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
return -EINVAL;
}
}
+ }
+ mmc_slot(host).set_power = omap_hsmmc_set_power;
- /* Allow an aux regulator */
- reg = regulator_get(host->dev, "vmmc_aux");
- host->vcc_aux = IS_ERR(reg) ? NULL : reg;
+ /* Allow an aux regulator */
+ reg = devm_regulator_get_optional(host->dev, "vmmc_aux");
+ host->vcc_aux = IS_ERR(reg) ? NULL : reg;
- /* For eMMC do not power off when not in sleep state */
- if (mmc_slot(host).no_regulator_off_init)
- return 0;
- /*
- * UGLY HACK: workaround regulator framework bugs.
- * When the bootloader leaves a supply active, it's
- * initialized with zero usecount ... and we can't
- * disable it without first enabling it. Until the
- * framework is fixed, we need a workaround like this
- * (which is safe for MMC, but not in general).
- */
- if (regulator_is_enabled(host->vcc) > 0 ||
- (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) {
- int vdd = ffs(mmc_slot(host).ocr_mask) - 1;
+ reg = devm_regulator_get_optional(host->dev, "pbias");
+ host->pbias = IS_ERR(reg) ? NULL : reg;
- mmc_slot(host).set_power(host->dev, host->slot_id,
- 1, vdd);
- mmc_slot(host).set_power(host->dev, host->slot_id,
- 0, 0);
- }
+ /* For eMMC do not power off when not in sleep state */
+ if (mmc_slot(host).no_regulator_off_init)
+ return 0;
+ /*
+ * To disable boot_on regulator, enable regulator
+ * to increase usecount and then disable it.
+ */
+ if ((host->vcc && regulator_is_enabled(host->vcc) > 0) ||
+ (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) {
+ int vdd = ffs(mmc_slot(host).ocr_mask) - 1;
+
+ mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd);
+ mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
}
return 0;
@@ -366,8 +410,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
{
- regulator_put(host->vcc);
- regulator_put(host->vcc_aux);
mmc_slot(host).set_power = NULL;
}
@@ -605,9 +647,6 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
u32 hctl, capa;
unsigned long timeout;
- if (!OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE)
- return 1;
-
if (host->con == OMAP_HSMMC_READ(host->base, CON) &&
host->hctl == OMAP_HSMMC_READ(host->base, HCTL) &&
host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) &&
@@ -787,6 +826,11 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
+ if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) &&
+ host->mrq->sbc) {
+ cmdreg |= ACEN_ACMD23;
+ OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg);
+ }
if (data) {
cmdreg |= DP_SELECT | MSBS | BCE;
if (data->flags & MMC_DATA_READ)
@@ -864,11 +908,10 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
else
data->bytes_xfered = 0;
- if (!data->stop) {
+ if (data->stop && (data->error || !host->mrq->sbc))
+ omap_hsmmc_start_command(host, data->stop, NULL);
+ else
omap_hsmmc_request_done(host, data->mrq);
- return;
- }
- omap_hsmmc_start_command(host, data->stop, NULL);
}
/*
@@ -879,6 +922,14 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
{
host->cmd = NULL;
+ if (host->mrq->sbc && (host->cmd == host->mrq->sbc) &&
+ !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) {
+ omap_hsmmc_start_dma_transfer(host);
+ omap_hsmmc_start_command(host, host->mrq->cmd,
+ host->mrq->data);
+ return;
+ }
+
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
/* response type 2 */
@@ -892,7 +943,7 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
}
}
if ((host->data == NULL && !host->response_busy) || cmd->error)
- omap_hsmmc_request_done(host, cmd->mrq);
+ omap_hsmmc_request_done(host, host->mrq);
}
/*
@@ -1015,6 +1066,7 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
{
struct mmc_data *data;
int end_cmd = 0, end_trans = 0;
+ int error = 0;
data = host->data;
dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
@@ -1029,6 +1081,20 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
else if (status & (CCRC_EN | DCRC_EN))
hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
+ if (status & ACE_EN) {
+ u32 ac12;
+ ac12 = OMAP_HSMMC_READ(host->base, AC12);
+ if (!(ac12 & ACNE) && host->mrq->sbc) {
+ end_cmd = 1;
+ if (ac12 & ACTO)
+ error = -ETIMEDOUT;
+ else if (ac12 & (ACCE | ACEB | ACIE))
+ error = -EILSEQ;
+ host->mrq->sbc->error = error;
+ hsmmc_command_incomplete(host, error, end_cmd);
+ }
+ dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
+ }
if (host->data || host->response_busy) {
end_trans = !end_cmd;
host->response_busy = 0;
@@ -1236,8 +1302,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
}
/* Check if next job is already prepared */
- if (next ||
- (!next && data->host_cookie != host->next_data.cookie)) {
+ if (next || data->host_cookie != host->next_data.cookie) {
dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
omap_hsmmc_get_dma_dir(host, data));
@@ -1262,7 +1327,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
/*
* Routine to configure and start DMA for the MMC card
*/
-static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
+static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_request *req)
{
struct dma_slave_config cfg;
@@ -1321,8 +1386,6 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
host->dma_ch = 1;
- dma_async_issue_pending(chan);
-
return 0;
}
@@ -1338,7 +1401,7 @@ static void set_data_timeout(struct omap_hsmmc_host *host,
if (clkd == 0)
clkd = 1;
- cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd);
+ cycle_ns = 1000000000 / (host->clk_rate / clkd);
timeout = timeout_ns / cycle_ns;
timeout += timeout_clks;
if (timeout) {
@@ -1363,6 +1426,21 @@ static void set_data_timeout(struct omap_hsmmc_host *host,
OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
}
+static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
+{
+ struct mmc_request *req = host->mrq;
+ struct dma_chan *chan;
+
+ if (!req->data)
+ return;
+ OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
+ | (req->data->blocks << 16));
+ set_data_timeout(host, req->data->timeout_ns,
+ req->data->timeout_clks);
+ chan = omap_hsmmc_get_dma_chan(host, req->data);
+ dma_async_issue_pending(chan);
+}
+
/*
* Configure block length for MMC/SD cards and initiate the transfer.
*/
@@ -1383,12 +1461,8 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
return 0;
}
- OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
- | (req->data->blocks << 16));
- set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);
-
if (host->use_dma) {
- ret = omap_hsmmc_start_dma_transfer(host, req);
+ ret = omap_hsmmc_setup_dma_transfer(host, req);
if (ret != 0) {
dev_err(mmc_dev(host->mmc), "MMC start dma failure\n");
return ret;
@@ -1462,6 +1536,7 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
host->reqs_blocked = 0;
WARN_ON(host->mrq != NULL);
host->mrq = req;
+ host->clk_rate = clk_get_rate(host->fclk);
err = omap_hsmmc_prepare_data(host, req);
if (err) {
req->cmd->error = err;
@@ -1471,7 +1546,12 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
mmc_request_done(mmc, req);
return;
}
+ if (req->sbc && !(host->flags & AUTO_CMD23)) {
+ omap_hsmmc_start_command(host, req->sbc, NULL);
+ return;
+ }
+ omap_hsmmc_start_dma_transfer(host);
omap_hsmmc_start_command(host, req->cmd, req->data);
}
@@ -1509,13 +1589,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
* of external transceiver; but they all handle 1.8V.
*/
if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) &&
- (ios->vdd == DUAL_VOLT_OCR_BIT) &&
- /*
- * With pbias cell programming missing, this
- * can't be allowed on MMC1 when booting with device
- * tree.
- */
- !host->pbias_disable) {
+ (ios->vdd == DUAL_VOLT_OCR_BIT)) {
/*
* The mmc_select_voltage fn of the core does
* not seem to set the power_mode to
@@ -1678,18 +1752,29 @@ static void omap_hsmmc_debugfs(struct mmc_host *mmc)
#endif
#ifdef CONFIG_OF
-static u16 omap4_reg_offset = 0x100;
+static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = {
+ /* See 35xx errata 2.1.1.128 in SPRZ278F */
+ .controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
+};
+
+static const struct omap_mmc_of_data omap4_mmc_of_data = {
+ .reg_offset = 0x100,
+};
static const struct of_device_id omap_mmc_of_match[] = {
{
.compatible = "ti,omap2-hsmmc",
},
{
+ .compatible = "ti,omap3-pre-es3-hsmmc",
+ .data = &omap3_pre_es3_mmc_of_data,
+ },
+ {
.compatible = "ti,omap3-hsmmc",
},
{
.compatible = "ti,omap4-hsmmc",
- .data = &omap4_reg_offset,
+ .data = &omap4_mmc_of_data,
},
{},
};
@@ -1709,7 +1794,7 @@ static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
- return NULL; /* out of memory */
+ return ERR_PTR(-ENOMEM); /* out of memory */
if (of_find_property(np, "ti,dual-volt", NULL))
pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;
@@ -1738,13 +1823,19 @@ static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
if (of_find_property(np, "ti,needs-special-hs-handling", NULL))
pdata->slots[0].features |= HSMMC_HAS_HSPE_SUPPORT;
+ if (of_find_property(np, "keep-power-in-suspend", NULL))
+ pdata->slots[0].pm_caps |= MMC_PM_KEEP_POWER;
+
+ if (of_find_property(np, "enable-sdio-wakeup", NULL))
+ pdata->slots[0].pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+
return pdata;
}
#else
static inline struct omap_mmc_platform_data
*of_get_hsmmc_pdata(struct device *dev)
{
- return NULL;
+ return ERR_PTR(-EINVAL);
}
#endif
@@ -1759,6 +1850,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
dma_cap_mask_t mask;
unsigned tx_req, rx_req;
struct pinctrl *pinctrl;
+ const struct omap_mmc_of_data *data;
match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
if (match) {
@@ -1768,8 +1860,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
return PTR_ERR(pdata);
if (match->data) {
- const u16 *offsetp = match->data;
- pdata->reg_offset = *offsetp;
+ data = match->data;
+ pdata->reg_offset = data->reg_offset;
+ pdata->controller_flags |= data->controller_flags;
}
}
@@ -1814,6 +1907,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
host->base = ioremap(host->mapbase, SZ_4K);
host->power_mode = MMC_POWER_OFF;
host->next_data.cookie = 1;
+ host->pbias_enabled = 0;
platform_set_drvdata(pdev, host);
@@ -1847,10 +1941,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_context_save(host);
- /* This can be removed once we support PBIAS with DT */
- if (host->dev->of_node && res->start == 0x4809c000)
- host->pbias_disable = 1;
-
host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
/*
* MMC can still work without debounce clock.
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index c46feda07d56..5fb994f9a653 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -31,14 +31,9 @@
#include <linux/mfd/rtsx_pci.h>
#include <asm/unaligned.h>
-/* SD Tuning Data Structure
- * Record continuous timing phase path
- */
-struct timing_phase_path {
- int start;
- int end;
- int mid;
- int len;
+struct realtek_next {
+ unsigned int sg_count;
+ s32 cookie;
};
struct realtek_pci_sdmmc {
@@ -46,9 +41,18 @@ struct realtek_pci_sdmmc {
struct rtsx_pcr *pcr;
struct mmc_host *mmc;
struct mmc_request *mrq;
-
- struct mutex host_mutex;
-
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ spinlock_t lock;
+ struct timer_list timer;
+ struct tasklet_struct cmd_tasklet;
+ struct tasklet_struct data_tasklet;
+ struct tasklet_struct finish_tasklet;
+
+ u8 rsp_type;
+ u8 rsp_len;
+ int sg_count;
u8 ssc_depth;
unsigned int clock;
bool vpclk;
@@ -58,8 +62,13 @@ struct realtek_pci_sdmmc {
int power_state;
#define SDMMC_POWER_ON 1
#define SDMMC_POWER_OFF 0
+
+ struct realtek_next next_data;
};
+static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
+ struct mmc_request *mrq);
+
static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host)
{
return &(host->pdev->dev);
@@ -96,6 +105,95 @@ static void sd_print_debug_regs(struct realtek_pci_sdmmc *host)
#define sd_print_debug_regs(host)
#endif /* DEBUG */
+static void sd_isr_done_transfer(struct platform_device *pdev)
+{
+ struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
+
+ spin_lock(&host->lock);
+ if (host->cmd)
+ tasklet_schedule(&host->cmd_tasklet);
+ if (host->data)
+ tasklet_schedule(&host->data_tasklet);
+ spin_unlock(&host->lock);
+}
+
+static void sd_request_timeout(unsigned long host_addr)
+{
+ struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!host->mrq) {
+ dev_err(sdmmc_dev(host), "error: no request exist\n");
+ goto out;
+ }
+
+ if (host->cmd)
+ host->cmd->error = -ETIMEDOUT;
+ if (host->data)
+ host->data->error = -ETIMEDOUT;
+
+ dev_dbg(sdmmc_dev(host), "timeout for request\n");
+
+out:
+ tasklet_schedule(&host->finish_tasklet);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void sd_finish_request(unsigned long host_addr)
+{
+ struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
+ struct rtsx_pcr *pcr = host->pcr;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ unsigned long flags;
+ bool any_error;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ del_timer(&host->timer);
+ mrq = host->mrq;
+ if (!mrq) {
+ dev_err(sdmmc_dev(host), "error: no request need finish\n");
+ goto out;
+ }
+
+ cmd = mrq->cmd;
+ data = mrq->data;
+
+ any_error = (mrq->sbc && mrq->sbc->error) ||
+ (mrq->stop && mrq->stop->error) ||
+ (cmd && cmd->error) || (data && data->error);
+
+ if (any_error) {
+ rtsx_pci_stop_cmd(pcr);
+ sd_clear_error(host);
+ }
+
+ if (data) {
+ if (any_error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blocks * data->blksz;
+
+ if (!data->host_cookie)
+ rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len,
+ data->flags & MMC_DATA_READ);
+
+ }
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+ mutex_unlock(&pcr->pcr_mutex);
+ mmc_request_done(host->mmc, mrq);
+}
+
static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
u8 *buf, int buf_len, int timeout)
{
@@ -213,8 +311,7 @@ static int sd_write_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
return 0;
}
-static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
- struct mmc_command *cmd)
+static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
{
struct rtsx_pcr *pcr = host->pcr;
u8 cmd_idx = (u8)cmd->opcode;
@@ -222,11 +319,14 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
int err = 0;
int timeout = 100;
int i;
- u8 *ptr;
- int stat_idx = 0;
u8 rsp_type;
int rsp_len = 5;
- bool clock_toggled = false;
+ unsigned long flags;
+
+ if (host->cmd)
+ dev_err(sdmmc_dev(host), "error: cmd already exist\n");
+
+ host->cmd = cmd;
dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
__func__, cmd_idx, arg);
@@ -261,6 +361,8 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
err = -EINVAL;
goto out;
}
+ host->rsp_type = rsp_type;
+ host->rsp_len = rsp_len;
if (rsp_type == SD_RSP_TYPE_R1b)
timeout = 3000;
@@ -270,8 +372,6 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
0xFF, SD_CLK_TOGGLE_EN);
if (err < 0)
goto out;
-
- clock_toggled = true;
}
rtsx_pci_init_cmd(pcr);
@@ -295,25 +395,60 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
/* Read data from ping-pong buffer */
for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++)
rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
- stat_idx = 16;
} else if (rsp_type != SD_RSP_TYPE_R0) {
/* Read data from SD_CMDx registers */
for (i = SD_CMD0; i <= SD_CMD4; i++)
rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
- stat_idx = 5;
}
rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0);
- err = rtsx_pci_send_cmd(pcr, timeout);
- if (err < 0) {
- sd_print_debug_regs(host);
- sd_clear_error(host);
- dev_dbg(sdmmc_dev(host),
- "rtsx_pci_send_cmd error (err = %d)\n", err);
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout));
+
+ spin_lock_irqsave(&pcr->lock, flags);
+ pcr->trans_result = TRANS_NOT_READY;
+ rtsx_pci_send_cmd_no_wait(pcr);
+ spin_unlock_irqrestore(&pcr->lock, flags);
+
+ return;
+
+out:
+ cmd->error = err;
+ tasklet_schedule(&host->finish_tasklet);
+}
+
+static void sd_get_rsp(unsigned long host_addr)
+{
+ struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
+ struct rtsx_pcr *pcr = host->pcr;
+ struct mmc_command *cmd;
+ int i, err = 0, stat_idx;
+ u8 *ptr, rsp_type;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ cmd = host->cmd;
+ host->cmd = NULL;
+
+ if (!cmd) {
+ dev_err(sdmmc_dev(host), "error: cmd not exist\n");
goto out;
}
+ spin_lock(&pcr->lock);
+ if (pcr->trans_result == TRANS_NO_DEVICE)
+ err = -ENODEV;
+ else if (pcr->trans_result != TRANS_RESULT_OK)
+ err = -EINVAL;
+ spin_unlock(&pcr->lock);
+
+ if (err < 0)
+ goto out;
+
+ rsp_type = host->rsp_type;
+ stat_idx = host->rsp_len;
+
if (rsp_type == SD_RSP_TYPE_R0) {
err = 0;
goto out;
@@ -350,26 +485,106 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
cmd->resp[0]);
}
+ if (cmd == host->mrq->sbc) {
+ sd_send_cmd(host, host->mrq->cmd);
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ if (cmd == host->mrq->stop)
+ goto out;
+
+ if (cmd->data) {
+ sd_start_multi_rw(host, host->mrq);
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
out:
cmd->error = err;
- if (err && clock_toggled)
- rtsx_pci_write_register(pcr, SD_BUS_STAT,
- SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+ tasklet_schedule(&host->finish_tasklet);
+ spin_unlock_irqrestore(&host->lock, flags);
}
-static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
+static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host,
+ struct mmc_data *data, struct realtek_next *next)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int read = data->flags & MMC_DATA_READ;
+ int sg_count = 0;
+
+ if (!next && data->host_cookie &&
+ data->host_cookie != host->next_data.cookie) {
+ dev_err(sdmmc_dev(host),
+ "error: invalid cookie data[%d] host[%d]\n",
+ data->host_cookie, host->next_data.cookie);
+ data->host_cookie = 0;
+ }
+
+ if (next || (!next && data->host_cookie != host->next_data.cookie))
+ sg_count = rtsx_pci_dma_map_sg(pcr,
+ data->sg, data->sg_len, read);
+ else
+ sg_count = host->next_data.sg_count;
+
+ if (next) {
+ next->sg_count = sg_count;
+ if (++next->cookie < 0)
+ next->cookie = 1;
+ data->host_cookie = next->cookie;
+ }
+
+ return sg_count;
+}
+
+static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (data->host_cookie) {
+ dev_err(sdmmc_dev(host),
+ "error: descard already cookie data[%d]\n",
+ data->host_cookie);
+ data->host_cookie = 0;
+ }
+
+ dev_dbg(sdmmc_dev(host), "dma sg prepared: %d\n",
+ sd_pre_dma_transfer(host, data, &host->next_data));
+}
+
+static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+ struct mmc_data *data = mrq->data;
+ int read = data->flags & MMC_DATA_READ;
+
+ rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read);
+ data->host_cookie = 0;
+}
+
+static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
+ struct mmc_request *mrq)
{
struct rtsx_pcr *pcr = host->pcr;
struct mmc_host *mmc = host->mmc;
struct mmc_card *card = mmc->card;
struct mmc_data *data = mrq->data;
int uhs = mmc_card_uhs(card);
- int read = (data->flags & MMC_DATA_READ) ? 1 : 0;
+ int read = data->flags & MMC_DATA_READ;
u8 cfg2, trans_mode;
int err;
size_t data_len = data->blksz * data->blocks;
+ if (host->data)
+ dev_err(sdmmc_dev(host), "error: data already exist\n");
+
+ host->data = data;
+
if (read) {
cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0;
@@ -420,17 +635,56 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
SD_TRANSFER_END, SD_TRANSFER_END);
+ mod_timer(&host->timer, jiffies + 10 * HZ);
rtsx_pci_send_cmd_no_wait(pcr);
- err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000);
+ err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, read);
if (err < 0) {
- sd_clear_error(host);
- return err;
+ data->error = err;
+ tasklet_schedule(&host->finish_tasklet);
}
-
return 0;
}
+static void sd_finish_multi_rw(unsigned long host_addr)
+{
+ struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
+ struct rtsx_pcr *pcr = host->pcr;
+ struct mmc_data *data;
+ int err = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!host->data) {
+ dev_err(sdmmc_dev(host), "error: no data exist\n");
+ goto out;
+ }
+
+ data = host->data;
+ host->data = NULL;
+
+ if (pcr->trans_result == TRANS_NO_DEVICE)
+ err = -ENODEV;
+ else if (pcr->trans_result != TRANS_RESULT_OK)
+ err = -EINVAL;
+
+ if (err < 0) {
+ data->error = err;
+ goto out;
+ }
+
+ if (!host->mrq->sbc && data->stop) {
+ sd_send_cmd(host, data->stop);
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+out:
+ tasklet_schedule(&host->finish_tasklet);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
{
rtsx_pci_write_register(host->pcr, SD_CFG1,
@@ -511,85 +765,47 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host,
return 0;
}
-static u8 sd_search_final_phase(struct realtek_pci_sdmmc *host, u32 phase_map)
+static inline u32 test_phase_bit(u32 phase_map, unsigned int bit)
{
- struct timing_phase_path path[MAX_PHASE + 1];
- int i, j, cont_path_cnt;
- int new_block, max_len, final_path_idx;
- u8 final_phase = 0xFF;
+ bit %= RTSX_PHASE_MAX;
+ return phase_map & (1 << bit);
+}
- /* Parse phase_map, take it as a bit-ring */
- cont_path_cnt = 0;
- new_block = 1;
- j = 0;
- for (i = 0; i < MAX_PHASE + 1; i++) {
- if (phase_map & (1 << i)) {
- if (new_block) {
- new_block = 0;
- j = cont_path_cnt++;
- path[j].start = i;
- path[j].end = i;
- } else {
- path[j].end = i;
- }
- } else {
- new_block = 1;
- if (cont_path_cnt) {
- /* Calculate path length and middle point */
- int idx = cont_path_cnt - 1;
- path[idx].len =
- path[idx].end - path[idx].start + 1;
- path[idx].mid =
- path[idx].start + path[idx].len / 2;
- }
- }
- }
+static int sd_get_phase_len(u32 phase_map, unsigned int start_bit)
+{
+ int i;
- if (cont_path_cnt == 0) {
- dev_dbg(sdmmc_dev(host), "No continuous phase path\n");
- goto finish;
- } else {
- /* Calculate last continuous path length and middle point */
- int idx = cont_path_cnt - 1;
- path[idx].len = path[idx].end - path[idx].start + 1;
- path[idx].mid = path[idx].start + path[idx].len / 2;
+ for (i = 0; i < RTSX_PHASE_MAX; i++) {
+ if (test_phase_bit(phase_map, start_bit + i) == 0)
+ return i;
}
+ return RTSX_PHASE_MAX;
+}
+
+static u8 sd_search_final_phase(struct realtek_pci_sdmmc *host, u32 phase_map)
+{
+ int start = 0, len = 0;
+ int start_final = 0, len_final = 0;
+ u8 final_phase = 0xFF;
- /* Connect the first and last continuous paths if they are adjacent */
- if (!path[0].start && (path[cont_path_cnt - 1].end == MAX_PHASE)) {
- /* Using negative index */
- path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1;
- path[0].len += path[cont_path_cnt - 1].len;
- path[0].mid = path[0].start + path[0].len / 2;
- /* Convert negative middle point index to positive one */
- if (path[0].mid < 0)
- path[0].mid += MAX_PHASE + 1;
- cont_path_cnt--;
+ if (phase_map == 0) {
+ dev_err(sdmmc_dev(host), "phase error: [map:%x]\n", phase_map);
+ return final_phase;
}
- /* Choose the longest continuous phase path */
- max_len = 0;
- final_phase = 0;
- final_path_idx = 0;
- for (i = 0; i < cont_path_cnt; i++) {
- if (path[i].len > max_len) {
- max_len = path[i].len;
- final_phase = (u8)path[i].mid;
- final_path_idx = i;
+ while (start < RTSX_PHASE_MAX) {
+ len = sd_get_phase_len(phase_map, start);
+ if (len_final < len) {
+ start_final = start;
+ len_final = len;
}
-
- dev_dbg(sdmmc_dev(host), "path[%d].start = %d\n",
- i, path[i].start);
- dev_dbg(sdmmc_dev(host), "path[%d].end = %d\n",
- i, path[i].end);
- dev_dbg(sdmmc_dev(host), "path[%d].len = %d\n",
- i, path[i].len);
- dev_dbg(sdmmc_dev(host), "path[%d].mid = %d\n",
- i, path[i].mid);
+ start += len ? len : 1;
}
-finish:
- dev_dbg(sdmmc_dev(host), "Final chosen phase: %d\n", final_phase);
+ final_phase = (start_final + len_final / 2) % RTSX_PHASE_MAX;
+ dev_dbg(sdmmc_dev(host), "phase: [map:%x] [maxlen:%d] [final:%d]\n",
+ phase_map, len_final, final_phase);
+
return final_phase;
}
@@ -635,7 +851,7 @@ static int sd_tuning_phase(struct realtek_pci_sdmmc *host,
int err, i;
u32 raw_phase_map = 0;
- for (i = MAX_PHASE; i >= 0; i--) {
+ for (i = 0; i < RTSX_PHASE_MAX; i++) {
err = sd_tuning_rx_cmd(host, opcode, (u8)i);
if (err == 0)
raw_phase_map |= 1 << i;
@@ -685,6 +901,13 @@ static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode)
return 0;
}
+static inline bool sd_use_muti_rw(struct mmc_command *cmd)
+{
+ return mmc_op_multi(cmd->opcode) ||
+ (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
+ (cmd->opcode == MMC_WRITE_BLOCK);
+}
+
static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
@@ -693,6 +916,14 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct mmc_data *data = mrq->data;
unsigned int data_size = 0;
int err;
+ unsigned long flags;
+
+ mutex_lock(&pcr->pcr_mutex);
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->mrq)
+ dev_err(sdmmc_dev(host), "error: request already exist\n");
+ host->mrq = mrq;
if (host->eject) {
cmd->error = -ENOMEDIUM;
@@ -705,8 +936,6 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
goto finish;
}
- mutex_lock(&pcr->pcr_mutex);
-
rtsx_pci_start_run(pcr);
rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth,
@@ -715,46 +944,28 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
rtsx_pci_write_register(pcr, CARD_SHARE_MODE,
CARD_SHARE_MASK, CARD_SHARE_48_SD);
- mutex_lock(&host->host_mutex);
- host->mrq = mrq;
- mutex_unlock(&host->host_mutex);
-
if (mrq->data)
data_size = data->blocks * data->blksz;
- if (!data_size || mmc_op_multi(cmd->opcode) ||
- (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
- (cmd->opcode == MMC_WRITE_BLOCK)) {
- sd_send_cmd_get_rsp(host, cmd);
-
- if (!cmd->error && data_size) {
- sd_rw_multi(host, mrq);
+ if (sd_use_muti_rw(cmd))
+ host->sg_count = sd_pre_dma_transfer(host, data, NULL);
- if (mmc_op_multi(cmd->opcode) && mrq->stop)
- sd_send_cmd_get_rsp(host, mrq->stop);
- }
+ if (!data_size || sd_use_muti_rw(cmd)) {
+ if (mrq->sbc)
+ sd_send_cmd(host, mrq->sbc);
+ else
+ sd_send_cmd(host, cmd);
+ spin_unlock_irqrestore(&host->lock, flags);
} else {
+ spin_unlock_irqrestore(&host->lock, flags);
sd_normal_rw(host, mrq);
+ tasklet_schedule(&host->finish_tasklet);
}
-
- if (mrq->data) {
- if (cmd->error || data->error)
- data->bytes_xfered = 0;
- else
- data->bytes_xfered = data->blocks * data->blksz;
- }
-
- mutex_unlock(&pcr->pcr_mutex);
+ return;
finish:
- if (cmd->error)
- dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error);
-
- mutex_lock(&host->host_mutex);
- host->mrq = NULL;
- mutex_unlock(&host->host_mutex);
-
- mmc_request_done(mmc, mrq);
+ tasklet_schedule(&host->finish_tasklet);
+ spin_unlock_irqrestore(&host->lock, flags);
}
static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
@@ -1189,6 +1400,8 @@ out:
}
static const struct mmc_host_ops realtek_pci_sdmmc_ops = {
+ .pre_req = sdmmc_pre_req,
+ .post_req = sdmmc_post_req,
.request = sdmmc_request,
.set_ios = sdmmc_set_ios,
.get_ro = sdmmc_get_ro,
@@ -1252,6 +1465,7 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
struct realtek_pci_sdmmc *host;
struct rtsx_pcr *pcr;
struct pcr_handle *handle = pdev->dev.platform_data;
+ unsigned long host_addr;
if (!handle)
return -ENXIO;
@@ -1275,8 +1489,15 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
pcr->slots[RTSX_SD_CARD].p_dev = pdev;
pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event;
- mutex_init(&host->host_mutex);
+ host_addr = (unsigned long)host;
+ host->next_data.cookie = 1;
+ setup_timer(&host->timer, sd_request_timeout, host_addr);
+ tasklet_init(&host->cmd_tasklet, sd_get_rsp, host_addr);
+ tasklet_init(&host->data_tasklet, sd_finish_multi_rw, host_addr);
+ tasklet_init(&host->finish_tasklet, sd_finish_request, host_addr);
+ spin_lock_init(&host->lock);
+ pcr->slots[RTSX_SD_CARD].done_transfer = sd_isr_done_transfer;
realtek_init_host(host);
mmc_add_host(mmc);
@@ -1289,6 +1510,8 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
struct rtsx_pcr *pcr;
struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ unsigned long flags;
if (!host)
return 0;
@@ -1296,25 +1519,37 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
pcr = host->pcr;
pcr->slots[RTSX_SD_CARD].p_dev = NULL;
pcr->slots[RTSX_SD_CARD].card_event = NULL;
+ pcr->slots[RTSX_SD_CARD].done_transfer = NULL;
mmc = host->mmc;
- host->eject = true;
+ mrq = host->mrq;
- mutex_lock(&host->host_mutex);
+ spin_lock_irqsave(&host->lock, flags);
if (host->mrq) {
dev_dbg(&(pdev->dev),
"%s: Controller removed during transfer\n",
mmc_hostname(mmc));
- rtsx_pci_complete_unfinished_transfer(pcr);
+ if (mrq->sbc)
+ mrq->sbc->error = -ENOMEDIUM;
+ if (mrq->cmd)
+ mrq->cmd->error = -ENOMEDIUM;
+ if (mrq->stop)
+ mrq->stop->error = -ENOMEDIUM;
+ if (mrq->data)
+ mrq->data->error = -ENOMEDIUM;
- host->mrq->cmd->error = -ENOMEDIUM;
- if (host->mrq->stop)
- host->mrq->stop->error = -ENOMEDIUM;
- mmc_request_done(mmc, host->mrq);
+ tasklet_schedule(&host->finish_tasklet);
}
- mutex_unlock(&host->host_mutex);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ del_timer_sync(&host->timer);
+ tasklet_kill(&host->cmd_tasklet);
+ tasklet_kill(&host->data_tasklet);
+ tasklet_kill(&host->finish_tasklet);
mmc_remove_host(mmc);
+ host->eject = true;
+
mmc_free_host(mmc);
dev_dbg(&(pdev->dev),
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 9ce17f6e4014..ebb3f392b589 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -31,7 +31,6 @@
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/err.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/acpi.h>
#include <linux/pm.h>
@@ -40,13 +39,15 @@
#include <linux/mmc/host.h>
#include <linux/mmc/pm.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/mmc/sdhci.h>
#include "sdhci.h"
enum {
- SDHCI_ACPI_SD_CD = BIT(0),
- SDHCI_ACPI_RUNTIME_PM = BIT(1),
+ SDHCI_ACPI_SD_CD = BIT(0),
+ SDHCI_ACPI_RUNTIME_PM = BIT(1),
+ SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL = BIT(2),
};
struct sdhci_acpi_chip {
@@ -121,6 +122,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
.caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD,
.flags = SDHCI_ACPI_RUNTIME_PM,
@@ -128,7 +130,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
- .flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_RUNTIME_PM,
+ .flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL |
+ SDHCI_ACPI_RUNTIME_PM,
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON,
};
@@ -141,6 +144,7 @@ struct sdhci_acpi_uid_slot {
static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
{ "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
+ { "80860F16" , NULL, &sdhci_acpi_slot_int_sd },
{ "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio },
{ "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
{ "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
@@ -150,6 +154,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "80860F14" },
+ { "80860F16" },
{ "INT33BB" },
{ "INT33C6" },
{ "INT3436" },
@@ -192,59 +197,6 @@ static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(acpi_handle handle,
return slot;
}
-#ifdef CONFIG_PM_RUNTIME
-
-static irqreturn_t sdhci_acpi_sd_cd(int irq, void *dev_id)
-{
- mmc_detect_change(dev_id, msecs_to_jiffies(200));
- return IRQ_HANDLED;
-}
-
-static int sdhci_acpi_add_own_cd(struct device *dev, struct mmc_host *mmc)
-{
- struct gpio_desc *desc;
- unsigned long flags;
- int err, irq;
-
- desc = devm_gpiod_get_index(dev, "sd_cd", 0);
- if (IS_ERR(desc)) {
- err = PTR_ERR(desc);
- goto out;
- }
-
- err = gpiod_direction_input(desc);
- if (err)
- goto out_free;
-
- irq = gpiod_to_irq(desc);
- if (irq < 0) {
- err = irq;
- goto out_free;
- }
-
- flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
- err = devm_request_irq(dev, irq, sdhci_acpi_sd_cd, flags, "sd_cd", mmc);
- if (err)
- goto out_free;
-
- return 0;
-
-out_free:
- devm_gpiod_put(dev, desc);
-out:
- dev_warn(dev, "failed to setup card detect wake up\n");
- return err;
-}
-
-#else
-
-static int sdhci_acpi_add_own_cd(struct device *dev, struct mmc_host *mmc)
-{
- return 0;
-}
-
-#endif
-
static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -332,15 +284,19 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
- err = sdhci_add_host(host);
- if (err)
- goto err_free;
-
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
- if (sdhci_acpi_add_own_cd(dev, host->mmc))
+ bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
+
+ if (mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0)) {
+ dev_warn(dev, "failed to setup card detect gpio\n");
c->use_runtime_pm = false;
+ }
}
+ err = sdhci_add_host(host);
+ if (err)
+ goto err_free;
+
if (c->use_runtime_pm) {
pm_runtime_set_active(dev);
pm_suspend_ignore_children(dev, 1);
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index 7a190fe4dff1..6f166e63b817 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -54,6 +54,7 @@
struct sdhci_bcm_kona_dev {
struct mutex write_lock; /* protect back to back writes */
+ struct clk *external_clk;
};
@@ -257,6 +258,24 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
goto err_pltfm_free;
}
+ /* Get and enable the external clock */
+ kona_dev->external_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(kona_dev->external_clk)) {
+ dev_err(dev, "Failed to get external clock\n");
+ ret = PTR_ERR(kona_dev->external_clk);
+ goto err_pltfm_free;
+ }
+
+ if (clk_set_rate(kona_dev->external_clk, host->mmc->f_max) != 0) {
+ dev_err(dev, "Failed to set rate external clock\n");
+ goto err_pltfm_free;
+ }
+
+ if (clk_prepare_enable(kona_dev->external_clk) != 0) {
+ dev_err(dev, "Failed to enable external clock\n");
+ goto err_pltfm_free;
+ }
+
dev_dbg(dev, "non-removable=%c\n",
(host->mmc->caps & MMC_CAP_NONREMOVABLE) ? 'Y' : 'N');
dev_dbg(dev, "cd_gpio %c, wp_gpio %c\n",
@@ -271,7 +290,7 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
ret = sdhci_bcm_kona_sd_reset(host);
if (ret)
- goto err_pltfm_free;
+ goto err_clk_disable;
sdhci_bcm_kona_sd_init(host);
@@ -307,6 +326,9 @@ err_remove_host:
err_reset:
sdhci_bcm_kona_sd_reset(host);
+err_clk_disable:
+ clk_disable_unprepare(kona_dev->external_clk);
+
err_pltfm_free:
sdhci_pltfm_free(pdev);
@@ -314,9 +336,20 @@ err_pltfm_free:
return ret;
}
-static int __exit sdhci_bcm_kona_remove(struct platform_device *pdev)
+static int sdhci_bcm_kona_remove(struct platform_device *pdev)
{
- return sdhci_pltfm_unregister(pdev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_priv = sdhci_priv(host);
+ struct sdhci_bcm_kona_dev *kona_dev = sdhci_pltfm_priv(pltfm_priv);
+ int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+
+ sdhci_remove_host(host, dead);
+
+ clk_disable_unprepare(kona_dev->external_clk);
+
+ sdhci_pltfm_free(pdev);
+
+ return 0;
}
static struct platform_driver sdhci_bcm_kona_driver = {
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 8424839660f8..736d7a2eb7ec 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -208,7 +208,7 @@ static struct platform_driver sdhci_dove_driver = {
.name = "sdhci-dove",
.owner = THIS_MODULE,
.pm = SDHCI_PLTFM_PMOPS,
- .of_match_table = of_match_ptr(sdhci_dove_of_match_table),
+ .of_match_table = sdhci_dove_of_match_table,
},
.probe = sdhci_dove_probe,
.remove = sdhci_dove_remove,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
new file mode 100644
index 000000000000..acb0e9eb55f1
--- /dev/null
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -0,0 +1,618 @@
+/*
+ * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/mmc/mmc.h>
+#include <linux/slab.h>
+
+#include "sdhci-pltfm.h"
+
+#define CORE_HC_MODE 0x78
+#define HC_MODE_EN 0x1
+#define CORE_POWER 0x0
+#define CORE_SW_RST BIT(7)
+
+#define MAX_PHASES 16
+#define CORE_DLL_LOCK BIT(7)
+#define CORE_DLL_EN BIT(16)
+#define CORE_CDR_EN BIT(17)
+#define CORE_CK_OUT_EN BIT(18)
+#define CORE_CDR_EXT_EN BIT(19)
+#define CORE_DLL_PDN BIT(29)
+#define CORE_DLL_RST BIT(30)
+#define CORE_DLL_CONFIG 0x100
+#define CORE_DLL_STATUS 0x108
+
+#define CORE_VENDOR_SPEC 0x10c
+#define CORE_CLK_PWRSAVE BIT(1)
+
+#define CDR_SELEXT_SHIFT 20
+#define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
+#define CMUX_SHIFT_PHASE_SHIFT 24
+#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
+
+static const u32 tuning_block_64[] = {
+ 0x00ff0fff, 0xccc3ccff, 0xffcc3cc3, 0xeffefffe,
+ 0xddffdfff, 0xfbfffbff, 0xff7fffbf, 0xefbdf777,
+ 0xf0fff0ff, 0x3cccfc0f, 0xcfcc33cc, 0xeeffefff,
+ 0xfdfffdff, 0xffbfffdf, 0xfff7ffbb, 0xde7b7ff7
+};
+
+static const u32 tuning_block_128[] = {
+ 0xff00ffff, 0x0000ffff, 0xccccffff, 0xcccc33cc,
+ 0xcc3333cc, 0xffffcccc, 0xffffeeff, 0xffeeeeff,
+ 0xffddffff, 0xddddffff, 0xbbffffff, 0xbbffffff,
+ 0xffffffbb, 0xffffff77, 0x77ff7777, 0xffeeddbb,
+ 0x00ffffff, 0x00ffffff, 0xccffff00, 0xcc33cccc,
+ 0x3333cccc, 0xffcccccc, 0xffeeffff, 0xeeeeffff,
+ 0xddffffff, 0xddffffff, 0xffffffdd, 0xffffffbb,
+ 0xffffbbbb, 0xffff77ff, 0xff7777ff, 0xeeddbb77
+};
+
+struct sdhci_msm_host {
+ struct platform_device *pdev;
+ void __iomem *core_mem; /* MSM SDCC mapped address */
+ struct clk *clk; /* main SD/MMC bus clock */
+ struct clk *pclk; /* SDHC peripheral bus clock */
+ struct clk *bus_clk; /* SDHC bus voter clock */
+ struct mmc_host *mmc;
+ struct sdhci_pltfm_data sdhci_msm_pdata;
+};
+
+/* Platform specific tuning */
+static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
+{
+ u32 wait_cnt = 50;
+ u8 ck_out_en;
+ struct mmc_host *mmc = host->mmc;
+
+ /* Poll for CK_OUT_EN bit. max. poll time = 50us */
+ ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
+ CORE_CK_OUT_EN);
+
+ while (ck_out_en != poll) {
+ if (--wait_cnt == 0) {
+ dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
+ mmc_hostname(mmc), poll);
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+
+ ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
+ CORE_CK_OUT_EN);
+ }
+
+ return 0;
+}
+
+static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
+{
+ int rc;
+ static const u8 grey_coded_phase_table[] = {
+ 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
+ 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
+ };
+ unsigned long flags;
+ u32 config;
+ struct mmc_host *mmc = host->mmc;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
+ config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
+ rc = msm_dll_poll_ck_out_en(host, 0);
+ if (rc)
+ goto err_out;
+
+ /*
+ * Write the selected DLL clock output phase (0 ... 15)
+ * to CDR_SELEXT bit field of DLL_CONFIG register.
+ */
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CDR_SELEXT_MASK;
+ config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
+ rc = msm_dll_poll_ck_out_en(host, 1);
+ if (rc)
+ goto err_out;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CDR_EN;
+ config &= ~CORE_CDR_EXT_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ goto out;
+
+err_out:
+ dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
+ mmc_hostname(mmc), phase);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+ return rc;
+}
+
+/*
+ * Find out the greatest range of consecuitive selected
+ * DLL clock output phases that can be used as sampling
+ * setting for SD3.0 UHS-I card read operation (in SDR104
+ * timing mode) or for eMMC4.5 card read operation (in HS200
+ * timing mode).
+ * Select the 3/4 of the range and configure the DLL with the
+ * selected DLL clock output phase.
+ */
+
+static int msm_find_most_appropriate_phase(struct sdhci_host *host,
+ u8 *phase_table, u8 total_phases)
+{
+ int ret;
+ u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
+ u8 phases_per_row[MAX_PHASES] = { 0 };
+ int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
+ int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
+ bool phase_0_found = false, phase_15_found = false;
+ struct mmc_host *mmc = host->mmc;
+
+ if (!total_phases || (total_phases > MAX_PHASES)) {
+ dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
+ mmc_hostname(mmc), total_phases);
+ return -EINVAL;
+ }
+
+ for (cnt = 0; cnt < total_phases; cnt++) {
+ ranges[row_index][col_index] = phase_table[cnt];
+ phases_per_row[row_index] += 1;
+ col_index++;
+
+ if ((cnt + 1) == total_phases) {
+ continue;
+ /* check if next phase in phase_table is consecutive or not */
+ } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
+ row_index++;
+ col_index = 0;
+ }
+ }
+
+ if (row_index >= MAX_PHASES)
+ return -EINVAL;
+
+ /* Check if phase-0 is present in first valid window? */
+ if (!ranges[0][0]) {
+ phase_0_found = true;
+ phase_0_raw_index = 0;
+ /* Check if cycle exist between 2 valid windows */
+ for (cnt = 1; cnt <= row_index; cnt++) {
+ if (phases_per_row[cnt]) {
+ for (i = 0; i < phases_per_row[cnt]; i++) {
+ if (ranges[cnt][i] == 15) {
+ phase_15_found = true;
+ phase_15_raw_index = cnt;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /* If 2 valid windows form cycle then merge them as single window */
+ if (phase_0_found && phase_15_found) {
+ /* number of phases in raw where phase 0 is present */
+ u8 phases_0 = phases_per_row[phase_0_raw_index];
+ /* number of phases in raw where phase 15 is present */
+ u8 phases_15 = phases_per_row[phase_15_raw_index];
+
+ if (phases_0 + phases_15 >= MAX_PHASES)
+ /*
+ * If there are more than 1 phase windows then total
+ * number of phases in both the windows should not be
+ * more than or equal to MAX_PHASES.
+ */
+ return -EINVAL;
+
+ /* Merge 2 cyclic windows */
+ i = phases_15;
+ for (cnt = 0; cnt < phases_0; cnt++) {
+ ranges[phase_15_raw_index][i] =
+ ranges[phase_0_raw_index][cnt];
+ if (++i >= MAX_PHASES)
+ break;
+ }
+
+ phases_per_row[phase_0_raw_index] = 0;
+ phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
+ }
+
+ for (cnt = 0; cnt <= row_index; cnt++) {
+ if (phases_per_row[cnt] > curr_max) {
+ curr_max = phases_per_row[cnt];
+ selected_row_index = cnt;
+ }
+ }
+
+ i = (curr_max * 3) / 4;
+ if (i)
+ i--;
+
+ ret = ranges[selected_row_index][i];
+
+ if (ret >= MAX_PHASES) {
+ ret = -EINVAL;
+ dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
+ mmc_hostname(mmc), ret);
+ }
+
+ return ret;
+}
+
+static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
+{
+ u32 mclk_freq = 0, config;
+
+ /* Program the MCLK value to MCLK_FREQ bit field */
+ if (host->clock <= 112000000)
+ mclk_freq = 0;
+ else if (host->clock <= 125000000)
+ mclk_freq = 1;
+ else if (host->clock <= 137000000)
+ mclk_freq = 2;
+ else if (host->clock <= 150000000)
+ mclk_freq = 3;
+ else if (host->clock <= 162000000)
+ mclk_freq = 4;
+ else if (host->clock <= 175000000)
+ mclk_freq = 5;
+ else if (host->clock <= 187000000)
+ mclk_freq = 6;
+ else if (host->clock <= 200000000)
+ mclk_freq = 7;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CMUX_SHIFT_PHASE_MASK;
+ config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+}
+
+/* Initialize the DLL (Programmable Delay Line) */
+static int msm_init_cm_dll(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ int wait_cnt = 50;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * Make sure that clock is always enabled when DLL
+ * tuning is in progress. Keeping PWRSAVE ON may
+ * turn off the clock.
+ */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
+ & ~CORE_CLK_PWRSAVE), host->ioaddr + CORE_VENDOR_SPEC);
+
+ /* Write 1 to DLL_RST bit of DLL_CONFIG register */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+ msm_cm_dll_set_freq(host);
+
+ /* Write 0 to DLL_RST bit of DLL_CONFIG register */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Set DLL_EN bit to 1. */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Set CK_OUT_EN bit to 1. */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
+ while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
+ CORE_DLL_LOCK)) {
+ /* max. wait for 50us sec for LOCK bit to be set */
+ if (--wait_cnt == 0) {
+ dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
+ mmc_hostname(mmc));
+ spin_unlock_irqrestore(&host->lock, flags);
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ return 0;
+}
+
+static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ int tuning_seq_cnt = 3;
+ u8 phase, *data_buf, tuned_phases[16], tuned_phase_cnt = 0;
+ const u32 *tuning_block_pattern = tuning_block_64;
+ int size = sizeof(tuning_block_64); /* Pattern size in bytes */
+ int rc;
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_ios ios = host->mmc->ios;
+
+ /*
+ * Tuning is required for SDR104, HS200 and HS400 cards and
+ * if clock frequency is greater than 100MHz in these modes.
+ */
+ if (host->clock <= 100 * 1000 * 1000 ||
+ !((ios.timing == MMC_TIMING_MMC_HS200) ||
+ (ios.timing == MMC_TIMING_UHS_SDR104)))
+ return 0;
+
+ if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
+ (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
+ tuning_block_pattern = tuning_block_128;
+ size = sizeof(tuning_block_128);
+ }
+
+ data_buf = kmalloc(size, GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+retry:
+ /* First of all reset the tuning block */
+ rc = msm_init_cm_dll(host);
+ if (rc)
+ goto out;
+
+ phase = 0;
+ do {
+ struct mmc_command cmd = { 0 };
+ struct mmc_data data = { 0 };
+ struct mmc_request mrq = {
+ .cmd = &cmd,
+ .data = &data
+ };
+ struct scatterlist sg;
+
+ /* Set the phase in delay line hw block */
+ rc = msm_config_cm_dll_phase(host, phase);
+ if (rc)
+ goto out;
+
+ cmd.opcode = opcode;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = size;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.timeout_ns = NSEC_PER_SEC; /* 1 second */
+
+ data.sg = &sg;
+ data.sg_len = 1;
+ sg_init_one(&sg, data_buf, size);
+ memset(data_buf, 0, size);
+ mmc_wait_for_req(mmc, &mrq);
+
+ if (!cmd.error && !data.error &&
+ !memcmp(data_buf, tuning_block_pattern, size)) {
+ /* Tuning is successful at this tuning point */
+ tuned_phases[tuned_phase_cnt++] = phase;
+ dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
+ mmc_hostname(mmc), phase);
+ }
+ } while (++phase < ARRAY_SIZE(tuned_phases));
+
+ if (tuned_phase_cnt) {
+ rc = msm_find_most_appropriate_phase(host, tuned_phases,
+ tuned_phase_cnt);
+ if (rc < 0)
+ goto out;
+ else
+ phase = rc;
+
+ /*
+ * Finally set the selected phase in delay
+ * line hw block.
+ */
+ rc = msm_config_cm_dll_phase(host, phase);
+ if (rc)
+ goto out;
+ dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
+ mmc_hostname(mmc), phase);
+ } else {
+ if (--tuning_seq_cnt)
+ goto retry;
+ /* Tuning failed */
+ dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
+ mmc_hostname(mmc));
+ rc = -EIO;
+ }
+
+out:
+ kfree(data_buf);
+ return rc;
+}
+
+static const struct of_device_id sdhci_msm_dt_match[] = {
+ { .compatible = "qcom,sdhci-msm-v4" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+
+static struct sdhci_ops sdhci_msm_ops = {
+ .platform_execute_tuning = sdhci_msm_execute_tuning,
+};
+
+static int sdhci_msm_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_msm_host *msm_host;
+ struct resource *core_memres;
+ int ret;
+ u16 host_version;
+
+ msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
+ if (!msm_host)
+ return -ENOMEM;
+
+ msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
+ host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = msm_host;
+ msm_host->mmc = host->mmc;
+ msm_host->pdev = pdev;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto pltfm_free;
+
+ sdhci_get_of_property(pdev);
+
+ /* Setup SDCC bus voter clock. */
+ msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (!IS_ERR(msm_host->bus_clk)) {
+ /* Vote for max. clk rate for max. performance */
+ ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
+ if (ret)
+ goto pltfm_free;
+ ret = clk_prepare_enable(msm_host->bus_clk);
+ if (ret)
+ goto pltfm_free;
+ }
+
+ /* Setup main peripheral bus clock */
+ msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(msm_host->pclk)) {
+ ret = PTR_ERR(msm_host->pclk);
+ dev_err(&pdev->dev, "Perpheral clk setup failed (%d)\n", ret);
+ goto bus_clk_disable;
+ }
+
+ ret = clk_prepare_enable(msm_host->pclk);
+ if (ret)
+ goto bus_clk_disable;
+
+ /* Setup SDC MMC clock */
+ msm_host->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(msm_host->clk)) {
+ ret = PTR_ERR(msm_host->clk);
+ dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
+ goto pclk_disable;
+ }
+
+ ret = clk_prepare_enable(msm_host->clk);
+ if (ret)
+ goto pclk_disable;
+
+ core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
+
+ if (IS_ERR(msm_host->core_mem)) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ ret = PTR_ERR(msm_host->core_mem);
+ goto clk_disable;
+ }
+
+ /* Reset the core and Enable SDHC mode */
+ writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
+ CORE_SW_RST, msm_host->core_mem + CORE_POWER);
+
+ /* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */
+ usleep_range(1000, 5000);
+ if (readl(msm_host->core_mem + CORE_POWER) & CORE_SW_RST) {
+ dev_err(&pdev->dev, "Stuck in reset\n");
+ ret = -ETIMEDOUT;
+ goto clk_disable;
+ }
+
+ /* Set HC_MODE_EN bit in HC_MODE register */
+ writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
+
+ host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
+ dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
+ host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
+ SDHCI_VENDOR_VER_SHIFT));
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto clk_disable;
+
+ return 0;
+
+clk_disable:
+ clk_disable_unprepare(msm_host->clk);
+pclk_disable:
+ clk_disable_unprepare(msm_host->pclk);
+bus_clk_disable:
+ if (!IS_ERR(msm_host->bus_clk))
+ clk_disable_unprepare(msm_host->bus_clk);
+pltfm_free:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_msm_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
+ 0xffffffff);
+
+ sdhci_remove_host(host, dead);
+ sdhci_pltfm_free(pdev);
+ clk_disable_unprepare(msm_host->clk);
+ clk_disable_unprepare(msm_host->pclk);
+ if (!IS_ERR(msm_host->bus_clk))
+ clk_disable_unprepare(msm_host->bus_clk);
+ return 0;
+}
+
+static struct platform_driver sdhci_msm_driver = {
+ .probe = sdhci_msm_probe,
+ .remove = sdhci_msm_remove,
+ .driver = {
+ .name = "sdhci_msm",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_msm_dt_match,
+ },
+};
+
+module_platform_driver(sdhci_msm_driver);
+
+MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 0955777b6c7e..fdc612120362 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -610,6 +610,18 @@ static const struct sdhci_pci_fixes sdhci_via = {
.probe = via_probe,
};
+static int rtsx_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps2 |= MMC_CAP2_HS200;
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_rtsx = {
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_BROKEN_DDR50,
+ .probe_slot = rtsx_probe_slot,
+};
+
static const struct pci_device_id pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_RICOH,
@@ -732,6 +744,14 @@ static const struct pci_device_id pci_ids[] = {
},
{
+ .vendor = PCI_VENDOR_ID_REALTEK,
+ .device = 0x5250,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_rtsx,
+ },
+
+ {
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_MRST_SD0,
.subvendor = PCI_ANY_ID,
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 793dacd3b841..2fd73b38c303 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -34,6 +34,7 @@
#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/mbus.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
@@ -57,6 +58,60 @@
#define SDCE_MISC_INT (1<<2)
#define SDCE_MISC_INT_EN (1<<1)
+/*
+ * These registers are relative to the second register region, for the
+ * MBus bridge.
+ */
+#define SDHCI_WINDOW_CTRL(i) (0x80 + ((i) << 3))
+#define SDHCI_WINDOW_BASE(i) (0x84 + ((i) << 3))
+#define SDHCI_MAX_WIN_NUM 8
+
+static int mv_conf_mbus_windows(struct platform_device *pdev,
+ const struct mbus_dram_target_info *dram)
+{
+ int i;
+ void __iomem *regs;
+ struct resource *res;
+
+ if (!dram) {
+ dev_err(&pdev->dev, "no mbus dram info\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot get mbus registers\n");
+ return -EINVAL;
+ }
+
+ regs = ioremap(res->start, resource_size(res));
+ if (!regs) {
+ dev_err(&pdev->dev, "cannot map mbus registers\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SDHCI_MAX_WIN_NUM; i++) {
+ writel(0, regs + SDHCI_WINDOW_CTRL(i));
+ writel(0, regs + SDHCI_WINDOW_BASE(i));
+ }
+
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ /* Write size, attributes and target id to control register */
+ writel(((cs->size - 1) & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ (dram->mbus_dram_target_id << 4) | 1,
+ regs + SDHCI_WINDOW_CTRL(i));
+ /* Write base address to base register */
+ writel(cs->base, regs + SDHCI_WINDOW_BASE(i));
+ }
+
+ iounmap(regs);
+
+ return 0;
+}
+
static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask)
{
struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
@@ -187,6 +242,9 @@ static const struct of_device_id sdhci_pxav3_of_match[] = {
{
.compatible = "mrvl,pxav3-mmc",
},
+ {
+ .compatible = "marvell,armada-380-sdhci",
+ },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_pxav3_of_match);
@@ -219,6 +277,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
struct sdhci_host *host = NULL;
struct sdhci_pxa *pxa = NULL;
const struct of_device_id *match;
@@ -235,6 +294,14 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
kfree(pxa);
return PTR_ERR(host);
}
+
+ if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+ ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
+ if (ret < 0)
+ goto err_mbus_win;
+ }
+
+
pltfm_host = sdhci_priv(host);
pltfm_host->priv = pxa;
@@ -321,6 +388,7 @@ err_add_host:
clk_disable_unprepare(clk);
clk_put(clk);
err_clk_get:
+err_mbus_win:
sdhci_pltfm_free(pdev);
kfree(pxa);
return ret;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 6debda952155..d61eb5a70833 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -51,12 +51,13 @@ struct sdhci_s3c {
struct platform_device *pdev;
struct resource *ioarea;
struct s3c_sdhci_platdata *pdata;
- unsigned int cur_clk;
+ int cur_clk;
int ext_cd_irq;
int ext_cd_gpio;
struct clk *clk_io;
struct clk *clk_bus[MAX_BUS_CLK];
+ unsigned long clk_rates[MAX_BUS_CLK];
};
/**
@@ -77,32 +78,6 @@ static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
}
/**
- * get_curclk - convert ctrl2 register to clock source number
- * @ctrl2: Control2 register value.
- */
-static u32 get_curclk(u32 ctrl2)
-{
- ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
- ctrl2 >>= S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
-
- return ctrl2;
-}
-
-static void sdhci_s3c_check_sclk(struct sdhci_host *host)
-{
- struct sdhci_s3c *ourhost = to_s3c(host);
- u32 tmp = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
-
- if (get_curclk(tmp) != ourhost->cur_clk) {
- dev_dbg(&ourhost->pdev->dev, "restored ctrl2 clock setting\n");
-
- tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
- tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
- writel(tmp, host->ioaddr + S3C_SDHCI_CONTROL2);
- }
-}
-
-/**
* sdhci_s3c_get_max_clk - callback to get maximum clock frequency.
* @host: The SDHCI host instance.
*
@@ -111,20 +86,11 @@ static void sdhci_s3c_check_sclk(struct sdhci_host *host)
static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
- struct clk *busclk;
- unsigned int rate, max;
- int clk;
-
- /* note, a reset will reset the clock source */
-
- sdhci_s3c_check_sclk(host);
-
- for (max = 0, clk = 0; clk < MAX_BUS_CLK; clk++) {
- busclk = ourhost->clk_bus[clk];
- if (!busclk)
- continue;
+ unsigned long rate, max = 0;
+ int src;
- rate = clk_get_rate(busclk);
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ rate = ourhost->clk_rates[src];
if (rate > max)
max = rate;
}
@@ -144,9 +110,9 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
{
unsigned long rate;
struct clk *clksrc = ourhost->clk_bus[src];
- int div;
+ int shift;
- if (!clksrc)
+ if (IS_ERR(clksrc))
return UINT_MAX;
/*
@@ -158,17 +124,24 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
return wanted - rate;
}
- rate = clk_get_rate(clksrc);
+ rate = ourhost->clk_rates[src];
- for (div = 1; div < 256; div *= 2) {
- if ((rate / div) <= wanted)
+ for (shift = 0; shift <= 8; ++shift) {
+ if ((rate >> shift) <= wanted)
break;
}
+ if (shift > 8) {
+ dev_dbg(&ourhost->pdev->dev,
+ "clk %d: rate %ld, min rate %lu > wanted %u\n",
+ src, rate, rate / 256, wanted);
+ return UINT_MAX;
+ }
+
dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
- src, rate, wanted, rate / div);
+ src, rate, wanted, rate >> shift);
- return wanted - (rate / div);
+ return wanted - (rate >> shift);
}
/**
@@ -209,20 +182,22 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
struct clk *clk = ourhost->clk_bus[best_src];
clk_prepare_enable(clk);
- clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
-
- /* turn clock off to card before changing clock source */
- writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
+ if (ourhost->cur_clk >= 0)
+ clk_disable_unprepare(
+ ourhost->clk_bus[ourhost->cur_clk]);
ourhost->cur_clk = best_src;
- host->max_clk = clk_get_rate(clk);
-
- ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
- ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
- ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
- writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
+ host->max_clk = ourhost->clk_rates[best_src];
}
+ /* turn clock off to card before changing clock source */
+ writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
+
+ ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
+ ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
+ ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
+ writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
+
/* reprogram default hardware configuration */
writel(S3C64XX_SDHCI_CONTROL4_DRIVE_9mA,
host->ioaddr + S3C64XX_SDHCI_CONTROL4);
@@ -254,17 +229,17 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
- unsigned int delta, min = UINT_MAX;
+ unsigned long rate, min = ULONG_MAX;
int src;
for (src = 0; src < MAX_BUS_CLK; src++) {
- delta = sdhci_s3c_consider_clock(ourhost, src, 0);
- if (delta == UINT_MAX)
+ rate = ourhost->clk_rates[src] / 256;
+ if (!rate)
continue;
- /* delta is a negative value in this case */
- if (-delta < min)
- min = -delta;
+ if (rate < min)
+ min = rate;
}
+
return min;
}
@@ -272,20 +247,44 @@ static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
+ unsigned long rate, max = 0;
+ int src;
- return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX);
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ struct clk *clk;
+
+ clk = ourhost->clk_bus[src];
+ if (IS_ERR(clk))
+ continue;
+
+ rate = clk_round_rate(clk, ULONG_MAX);
+ if (rate > max)
+ max = rate;
+ }
+
+ return max;
}
/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
+ unsigned long rate, min = ULONG_MAX;
+ int src;
- /*
- * initial clock can be in the frequency range of
- * 100KHz-400KHz, so we set it as max value.
- */
- return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], 400000);
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ struct clk *clk;
+
+ clk = ourhost->clk_bus[src];
+ if (IS_ERR(clk))
+ continue;
+
+ rate = clk_round_rate(clk, 0);
+ if (rate < min)
+ min = rate;
+ }
+
+ return min;
}
/* sdhci_cmu_set_clock - callback on clock change.*/
@@ -552,6 +551,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
sc->host = host;
sc->pdev = pdev;
sc->pdata = pdata;
+ sc->cur_clk = -1;
platform_set_drvdata(pdev, host);
@@ -566,25 +566,18 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
clk_prepare_enable(sc->clk_io);
for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
- struct clk *clk;
char name[14];
snprintf(name, 14, "mmc_busclk.%d", ptr);
- clk = devm_clk_get(dev, name);
- if (IS_ERR(clk))
+ sc->clk_bus[ptr] = devm_clk_get(dev, name);
+ if (IS_ERR(sc->clk_bus[ptr]))
continue;
clks++;
- sc->clk_bus[ptr] = clk;
-
- /*
- * save current clock index to know which clock bus
- * is used later in overriding functions.
- */
- sc->cur_clk = ptr;
+ sc->clk_rates[ptr] = clk_get_rate(sc->clk_bus[ptr]);
dev_info(dev, "clock source %d: %s (%ld Hz)\n",
- ptr, name, clk_get_rate(clk));
+ ptr, name, sc->clk_rates[ptr]);
}
if (clks == 0) {
@@ -593,10 +586,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
goto err_no_busclks;
}
-#ifndef CONFIG_PM_RUNTIME
- clk_prepare_enable(sc->clk_bus[sc->cur_clk]);
-#endif
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->ioaddr)) {
@@ -709,10 +698,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
return 0;
err_req_regs:
-#ifndef CONFIG_PM_RUNTIME
- clk_disable_unprepare(sc->clk_bus[sc->cur_clk]);
-#endif
-
err_no_busclks:
clk_disable_unprepare(sc->clk_io);
@@ -743,9 +728,6 @@ static int sdhci_s3c_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-#ifndef CONFIG_PM_RUNTIME
- clk_disable_unprepare(sc->clk_bus[sc->cur_clk]);
-#endif
clk_disable_unprepare(sc->clk_io);
sdhci_free_host(host);
@@ -779,7 +761,8 @@ static int sdhci_s3c_runtime_suspend(struct device *dev)
ret = sdhci_runtime_suspend_host(host);
- clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
+ if (ourhost->cur_clk >= 0)
+ clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
clk_disable_unprepare(busclk);
return ret;
}
@@ -792,7 +775,8 @@ static int sdhci_s3c_runtime_resume(struct device *dev)
int ret;
clk_prepare_enable(busclk);
- clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
+ if (ourhost->cur_clk >= 0)
+ clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
ret = sdhci_runtime_resume_host(host);
return ret;
}
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 2dba9f8d1760..0316dec3f006 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdhci-spear.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/io.h>
#include "sdhci.h"
@@ -40,36 +41,6 @@ static const struct sdhci_ops sdhci_pltfm_ops = {
/* Nothing to do for now. */
};
-/* gpio card detection interrupt handler */
-static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
-{
- struct platform_device *pdev = dev_id;
- struct sdhci_host *host = platform_get_drvdata(pdev);
- struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
- unsigned long gpio_irq_type;
- int val;
-
- val = gpio_get_value(sdhci->data->card_int_gpio);
-
- /* val == 1 -> card removed, val == 0 -> card inserted */
- /* if card removed - set irq for low level, else vice versa */
- gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
- irq_set_irq_type(irq, gpio_irq_type);
-
- if (sdhci->data->card_power_gpio >= 0) {
- if (!sdhci->data->power_always_enb) {
- /* if card inserted, give power, otherwise remove it */
- val = sdhci->data->power_active_high ? !val : val ;
- gpio_set_value(sdhci->data->card_power_gpio, val);
- }
- }
-
- /* inform sdhci driver about card insertion/removal */
- tasklet_schedule(&host->card_tasklet);
-
- return IRQ_HANDLED;
-}
-
#ifdef CONFIG_OF
static struct sdhci_plat_data *sdhci_probe_config_dt(struct platform_device *pdev)
{
@@ -84,14 +55,12 @@ static struct sdhci_plat_data *sdhci_probe_config_dt(struct platform_device *pde
/* If pdata is required */
if (cd_gpio != -1) {
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
+ if (!pdata)
dev_err(&pdev->dev, "DT: kzalloc failed\n");
- return ERR_PTR(-ENOMEM);
- }
+ else
+ pdata->card_int_gpio = cd_gpio;
}
- pdata->card_int_gpio = cd_gpio;
-
return pdata;
}
#else
@@ -107,41 +76,44 @@ static int sdhci_probe(struct platform_device *pdev)
struct sdhci_host *host;
struct resource *iomem;
struct spear_sdhci *sdhci;
+ struct device *dev;
int ret;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem) {
- ret = -ENOMEM;
- dev_dbg(&pdev->dev, "memory resource not defined\n");
+ dev = pdev->dev.parent ? pdev->dev.parent : &pdev->dev;
+ host = sdhci_alloc_host(dev, sizeof(*sdhci));
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
goto err;
}
- if (!devm_request_mem_region(&pdev->dev, iomem->start,
- resource_size(iomem), "spear-sdhci")) {
- ret = -EBUSY;
- dev_dbg(&pdev->dev, "cannot request region\n");
- goto err;
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(host->ioaddr)) {
+ ret = PTR_ERR(host->ioaddr);
+ dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret);
+ goto err_host;
}
- sdhci = devm_kzalloc(&pdev->dev, sizeof(*sdhci), GFP_KERNEL);
- if (!sdhci) {
- ret = -ENOMEM;
- dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
- goto err;
- }
+ host->hw_name = "sdhci";
+ host->ops = &sdhci_pltfm_ops;
+ host->irq = platform_get_irq(pdev, 0);
+ host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
+
+ sdhci = sdhci_priv(host);
/* clk enable */
- sdhci->clk = clk_get(&pdev->dev, NULL);
+ sdhci->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sdhci->clk)) {
ret = PTR_ERR(sdhci->clk);
dev_dbg(&pdev->dev, "Error getting clock\n");
- goto err;
+ goto err_host;
}
ret = clk_prepare_enable(sdhci->clk);
if (ret) {
dev_dbg(&pdev->dev, "Error enabling clock\n");
- goto put_clk;
+ goto err_host;
}
ret = clk_set_rate(sdhci->clk, 50000000);
@@ -153,118 +125,42 @@ static int sdhci_probe(struct platform_device *pdev)
sdhci->data = sdhci_probe_config_dt(pdev);
if (IS_ERR(sdhci->data)) {
dev_err(&pdev->dev, "DT: Failed to get pdata\n");
- return -ENODEV;
+ goto disable_clk;
}
} else {
sdhci->data = dev_get_platdata(&pdev->dev);
}
- pdev->dev.platform_data = sdhci;
-
- if (pdev->dev.parent)
- host = sdhci_alloc_host(pdev->dev.parent, 0);
- else
- host = sdhci_alloc_host(&pdev->dev, 0);
-
- if (IS_ERR(host)) {
- ret = PTR_ERR(host);
- dev_dbg(&pdev->dev, "error allocating host\n");
- goto disable_clk;
- }
-
- host->hw_name = "sdhci";
- host->ops = &sdhci_pltfm_ops;
- host->irq = platform_get_irq(pdev, 0);
- host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
-
- host->ioaddr = devm_ioremap(&pdev->dev, iomem->start,
- resource_size(iomem));
- if (!host->ioaddr) {
- ret = -ENOMEM;
- dev_dbg(&pdev->dev, "failed to remap registers\n");
- goto free_host;
+ /*
+ * It is optional to use GPIOs for sdhci card detection. If
+ * sdhci->data is NULL, then use original sdhci lines otherwise
+ * GPIO lines. We use the built-in GPIO support for this.
+ */
+ if (sdhci->data && sdhci->data->card_int_gpio >= 0) {
+ ret = mmc_gpio_request_cd(host->mmc,
+ sdhci->data->card_int_gpio, 0);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev,
+ "failed to request card-detect gpio%d\n",
+ sdhci->data->card_int_gpio);
+ goto disable_clk;
+ }
}
ret = sdhci_add_host(host);
if (ret) {
dev_dbg(&pdev->dev, "error adding host\n");
- goto free_host;
+ goto disable_clk;
}
platform_set_drvdata(pdev, host);
- /*
- * It is optional to use GPIOs for sdhci Power control & sdhci card
- * interrupt detection. If sdhci->data is NULL, then use original sdhci
- * lines otherwise GPIO lines.
- * If GPIO is selected for power control, then power should be disabled
- * after card removal and should be enabled when card insertion
- * interrupt occurs
- */
- if (!sdhci->data)
- return 0;
-
- if (sdhci->data->card_power_gpio >= 0) {
- int val = 0;
-
- ret = devm_gpio_request(&pdev->dev,
- sdhci->data->card_power_gpio, "sdhci");
- if (ret < 0) {
- dev_dbg(&pdev->dev, "gpio request fail: %d\n",
- sdhci->data->card_power_gpio);
- goto set_drvdata;
- }
-
- if (sdhci->data->power_always_enb)
- val = sdhci->data->power_active_high;
- else
- val = !sdhci->data->power_active_high;
-
- ret = gpio_direction_output(sdhci->data->card_power_gpio, val);
- if (ret) {
- dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
- sdhci->data->card_power_gpio);
- goto set_drvdata;
- }
- }
-
- if (sdhci->data->card_int_gpio >= 0) {
- ret = devm_gpio_request(&pdev->dev, sdhci->data->card_int_gpio,
- "sdhci");
- if (ret < 0) {
- dev_dbg(&pdev->dev, "gpio request fail: %d\n",
- sdhci->data->card_int_gpio);
- goto set_drvdata;
- }
-
- ret = gpio_direction_input(sdhci->data->card_int_gpio);
- if (ret) {
- dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
- sdhci->data->card_int_gpio);
- goto set_drvdata;
- }
- ret = devm_request_irq(&pdev->dev,
- gpio_to_irq(sdhci->data->card_int_gpio),
- sdhci_gpio_irq, IRQF_TRIGGER_LOW,
- mmc_hostname(host->mmc), pdev);
- if (ret) {
- dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
- sdhci->data->card_int_gpio);
- goto set_drvdata;
- }
-
- }
-
return 0;
-set_drvdata:
- sdhci_remove_host(host, 1);
-free_host:
- sdhci_free_host(host);
disable_clk:
clk_disable_unprepare(sdhci->clk);
-put_clk:
- clk_put(sdhci->clk);
+err_host:
+ sdhci_free_host(host);
err:
dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
return ret;
@@ -273,7 +169,7 @@ err:
static int sdhci_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
- struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
+ struct spear_sdhci *sdhci = sdhci_priv(host);
int dead = 0;
u32 scratch;
@@ -282,9 +178,8 @@ static int sdhci_remove(struct platform_device *pdev)
dead = 1;
sdhci_remove_host(host, dead);
- sdhci_free_host(host);
clk_disable_unprepare(sdhci->clk);
- clk_put(sdhci->clk);
+ sdhci_free_host(host);
return 0;
}
@@ -293,7 +188,7 @@ static int sdhci_remove(struct platform_device *pdev)
static int sdhci_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
- struct spear_sdhci *sdhci = dev_get_platdata(dev);
+ struct spear_sdhci *sdhci = sdhci_priv(host);
int ret;
ret = sdhci_suspend_host(host);
@@ -306,7 +201,7 @@ static int sdhci_suspend(struct device *dev)
static int sdhci_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
- struct spear_sdhci *sdhci = dev_get_platdata(dev);
+ struct spear_sdhci *sdhci = sdhci_priv(host);
int ret;
ret = clk_enable(sdhci->clk);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9ddef4763541..9a79fc4b60ca 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -675,12 +675,12 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
return 0xE;
/* Unspecified timeout, assume max */
- if (!data && !cmd->cmd_timeout_ms)
+ if (!data && !cmd->busy_timeout)
return 0xE;
/* timeout in us */
if (!data)
- target_timeout = cmd->cmd_timeout_ms * 1000;
+ target_timeout = cmd->busy_timeout * 1000;
else {
target_timeout = data->timeout_ns / 1000;
if (host->clock)
@@ -1019,8 +1019,8 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
}
timeout = jiffies;
- if (!cmd->data && cmd->cmd_timeout_ms > 9000)
- timeout += DIV_ROUND_UP(cmd->cmd_timeout_ms, 1000) * HZ + HZ;
+ if (!cmd->data && cmd->busy_timeout > 9000)
+ timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
else
timeout += 10 * HZ;
mod_timer(&host->timer, timeout);
@@ -2026,12 +2026,11 @@ out:
host->tuning_count * HZ);
/* Tuning mode 1 limits the maximum data length to 4MB */
mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
- } else {
+ } else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
host->flags &= ~SDHCI_NEEDS_RETUNING;
/* Reload the new initial value for timer */
- if (host->tuning_mode == SDHCI_TUNING_MODE_1)
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
+ mod_timer(&host->tuning_timer, jiffies +
+ host->tuning_count * HZ);
}
/*
@@ -2434,9 +2433,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
if (host->runtime_suspended) {
spin_unlock(&host->lock);
- pr_warning("%s: got irq while runtime suspended\n",
- mmc_hostname(host->mmc));
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
intmask = sdhci_readl(host, SDHCI_INT_STATUS);
@@ -2941,7 +2938,7 @@ int sdhci_add_host(struct sdhci_host *host)
if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
host->timeout_clk = mmc->f_max / 1000;
- mmc->max_discard_to = (1 << 27) / host->timeout_clk;
+ mmc->max_busy_timeout = (1 << 27) / host->timeout_clk;
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
@@ -3020,7 +3017,8 @@ int sdhci_add_host(struct sdhci_host *host)
} else if (caps[1] & SDHCI_SUPPORT_SDR50)
mmc->caps |= MMC_CAP_UHS_SDR50;
- if (caps[1] & SDHCI_SUPPORT_DDR50)
+ if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
+ !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
mmc->caps |= MMC_CAP_UHS_DDR50;
/* Does the host need tuning for SDR50? */
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 2d6ce257a273..91058dabd11a 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -37,6 +37,8 @@
struct sh_mobile_sdhi_of_data {
unsigned long tmio_flags;
+ unsigned long capabilities;
+ unsigned long capabilities2;
};
static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = {
@@ -45,6 +47,31 @@ static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = {
},
};
+static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
+};
+
+static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
+ .capabilities2 = MMC_CAP2_NO_MULTI_READ,
+};
+
+static const struct of_device_id sh_mobile_sdhi_of_match[] = {
+ { .compatible = "renesas,sdhi-shmobile" },
+ { .compatible = "renesas,sdhi-sh7372" },
+ { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
+ { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
+ { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
+
struct sh_mobile_sdhi {
struct clk *clk;
struct tmio_mmc_data mmc_data;
@@ -114,19 +141,6 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
.cd_wakeup = sh_mobile_sdhi_cd_wakeup,
};
-static const struct of_device_id sh_mobile_sdhi_of_match[] = {
- { .compatible = "renesas,sdhi-shmobile" },
- { .compatible = "renesas,sdhi-sh7372" },
- { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], },
- { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], },
- { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], },
- { .compatible = "renesas,sdhi-r8a7778", .data = &sh_mobile_sdhi_of_cfg[0], },
- { .compatible = "renesas,sdhi-r8a7779", .data = &sh_mobile_sdhi_of_cfg[0], },
- { .compatible = "renesas,sdhi-r8a7790", .data = &sh_mobile_sdhi_of_cfg[0], },
- {},
-};
-MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
-
static int sh_mobile_sdhi_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -212,6 +226,8 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
if (of_id && of_id->data) {
const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
mmc_data->flags |= of_data->tmio_flags;
+ mmc_data->capabilities |= of_data->capabilities;
+ mmc_data->capabilities2 |= of_data->capabilities2;
}
/* SD control register space size is 0x100, 0x200 for bus_shift=1 */
@@ -316,10 +332,10 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
- .suspend = tmio_mmc_host_suspend,
- .resume = tmio_mmc_host_resume,
- .runtime_suspend = tmio_mmc_host_runtime_suspend,
- .runtime_resume = tmio_mmc_host_runtime_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_host_suspend, tmio_mmc_host_resume)
+ SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
+ tmio_mmc_host_runtime_resume,
+ NULL)
};
static struct platform_driver sh_mobile_sdhi_driver = {
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 1900abb04236..cfad844730d8 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -23,38 +23,37 @@
#include "tmio_mmc.h"
-#ifdef CONFIG_PM
-static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int tmio_mmc_suspend(struct device *dev)
{
- const struct mfd_cell *cell = mfd_get_cell(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
int ret;
- ret = tmio_mmc_host_suspend(&dev->dev);
+ ret = tmio_mmc_host_suspend(dev);
/* Tell MFD core it can disable us now.*/
if (!ret && cell->disable)
- cell->disable(dev);
+ cell->disable(pdev);
return ret;
}
-static int tmio_mmc_resume(struct platform_device *dev)
+static int tmio_mmc_resume(struct device *dev)
{
- const struct mfd_cell *cell = mfd_get_cell(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
int ret = 0;
/* Tell the MFD core we are ready to be enabled */
if (cell->resume)
- ret = cell->resume(dev);
+ ret = cell->resume(pdev);
if (!ret)
- ret = tmio_mmc_host_resume(&dev->dev);
+ ret = tmio_mmc_host_resume(dev);
return ret;
}
-#else
-#define tmio_mmc_suspend NULL
-#define tmio_mmc_resume NULL
#endif
static int tmio_mmc_probe(struct platform_device *pdev)
@@ -134,15 +133,18 @@ static int tmio_mmc_remove(struct platform_device *pdev)
/* ------------------- device registration ----------------------- */
+static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_suspend, tmio_mmc_resume)
+};
+
static struct platform_driver tmio_mmc_driver = {
.driver = {
.name = "tmio-mmc",
.owner = THIS_MODULE,
+ .pm = &tmio_mmc_dev_pm_ops,
},
.probe = tmio_mmc_probe,
.remove = tmio_mmc_remove,
- .suspend = tmio_mmc_suspend,
- .resume = tmio_mmc_resume,
};
module_platform_driver(tmio_mmc_driver);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index aaa9c7e9e730..100ffe0b2faf 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -162,16 +162,15 @@ static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
}
#endif
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
int tmio_mmc_host_suspend(struct device *dev);
int tmio_mmc_host_resume(struct device *dev);
-#else
-#define tmio_mmc_host_suspend NULL
-#define tmio_mmc_host_resume NULL
#endif
+#ifdef CONFIG_PM_RUNTIME
int tmio_mmc_host_runtime_suspend(struct device *dev);
int tmio_mmc_host_runtime_resume(struct device *dev);
+#endif
static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 8d8abf23a611..faf0924e71cb 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1142,7 +1142,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
}
EXPORT_SYMBOL(tmio_mmc_host_remove);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
int tmio_mmc_host_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
@@ -1165,9 +1165,9 @@ int tmio_mmc_host_resume(struct device *dev)
return 0;
}
EXPORT_SYMBOL(tmio_mmc_host_resume);
+#endif
-#endif /* CONFIG_PM */
-
+#ifdef CONFIG_PM_RUNTIME
int tmio_mmc_host_runtime_suspend(struct device *dev)
{
return 0;
@@ -1184,5 +1184,6 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
return 0;
}
EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
+#endif
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index c0105a2e269a..d2c386f09d69 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -504,7 +504,7 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
ret = -ENOMEM;
goto err;
}
- ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
+ ushc->csw = kzalloc(sizeof(struct ushc_csw), GFP_KERNEL);
if (ushc->csw == NULL) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index e902ed7846b0..498d1f799085 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -757,7 +757,7 @@ static int wmt_mci_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
of_match_device(wmt_mci_dt_ids, &pdev->dev);
- const struct wmt_mci_caps *wmt_caps = of_id->data;
+ const struct wmt_mci_caps *wmt_caps;
int ret;
int regular_irq, dma_irq;
@@ -766,6 +766,8 @@ static int wmt_mci_probe(struct platform_device *pdev)
return -EFAULT;
}
+ wmt_caps = of_id->data;
+
if (!np) {
dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
return -EFAULT;
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5ebcda39f554..5d49a2129618 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -150,7 +150,7 @@ config MTD_BCM63XX_PARTS
config MTD_BCM47XX_PARTS
tristate "BCM47XX partitioning support"
- depends on BCM47XX
+ depends on BCM47XX || ARCH_BCM_5301X
help
This provides partitions parser for devices based on BCM47xx
boards.
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index de1eb92e42f5..adfa74c1bc45 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -14,7 +14,6 @@
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <bcm47xx_nvram.h>
/* 10 parts were found on sflash on Netgear WNDR4500 */
#define BCM47XXPART_MAX_PARTS 12
@@ -30,6 +29,7 @@
#define BOARD_DATA_MAGIC2 0xBD0D0BBD
#define CFE_MAGIC 0x43464531 /* 1EFC */
#define FACTORY_MAGIC 0x59544346 /* FCTY */
+#define NVRAM_HEADER 0x48534C46 /* FLSH */
#define POT_MAGIC1 0x54544f50 /* POTT */
#define POT_MAGIC2 0x504f /* OP */
#define ML_MAGIC1 0x39685a42
@@ -91,7 +91,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
if (offset >= 0x2000000)
break;
- if (curr_part > BCM47XXPART_MAX_PARTS) {
+ if (curr_part >= BCM47XXPART_MAX_PARTS) {
pr_warn("Reached maximum number of partitions, scanning stopped!\n");
break;
}
@@ -147,6 +147,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
/* TRX */
if (buf[0x000 / 4] == TRX_MAGIC) {
+ if (BCM47XXPART_MAX_PARTS - curr_part < 4) {
+ pr_warn("Not enough partitions left to register trx, scanning stopped!\n");
+ break;
+ }
+
trx = (struct trx_header *)buf;
trx_part = curr_part;
@@ -212,7 +217,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
/* Look for NVRAM at the end of the last block. */
for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) {
- if (curr_part > BCM47XXPART_MAX_PARTS) {
+ if (curr_part >= BCM47XXPART_MAX_PARTS) {
pr_warn("Reached maximum number of partitions, scanning stopped!\n");
break;
}
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 77514430f1fe..e4ec355704a6 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -21,7 +21,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -69,10 +68,10 @@ static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, s
static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
-static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
- struct otp_info *, size_t);
-static int cfi_intelext_get_user_prot_info (struct mtd_info *,
- struct otp_info *, size_t);
+static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
+static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
#endif
static int cfi_intelext_suspend (struct mtd_info *);
static void cfi_intelext_resume (struct mtd_info *);
@@ -435,10 +434,8 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
int i;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
- if (!mtd) {
- printk(KERN_ERR "Failed to allocate memory for MTD device\n");
+ if (!mtd)
return NULL;
- }
mtd->priv = map;
mtd->type = MTD_NORFLASH;
@@ -564,10 +561,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
* mtd->numeraseregions, GFP_KERNEL);
- if (!mtd->eraseregions) {
- printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
+ if (!mtd->eraseregions)
goto setup_err;
- }
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
unsigned long ernum, ersize;
@@ -2399,24 +2394,19 @@ static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
NULL, do_otp_lock, 1);
}
-static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
-{
- size_t retlen;
- int ret;
+static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
- ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
- return ret ? : retlen;
+{
+ return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 0);
}
-static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
+static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
{
- size_t retlen;
- int ret;
-
- ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
- return ret ? : retlen;
+ return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 1);
}
#endif
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 89b9d6891532..e21fde9d4d7e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -24,7 +24,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -507,10 +506,8 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
int i;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
- if (!mtd) {
- printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
+ if (!mtd)
return NULL;
- }
mtd->priv = map;
mtd->type = MTD_NORFLASH;
@@ -661,10 +658,8 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
* mtd->numeraseregions, GFP_KERNEL);
- if (!mtd->eraseregions) {
- printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
+ if (!mtd->eraseregions)
goto setup_err;
- }
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
unsigned long ernum, ersize;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 096993f9711e..6293855fb5ee 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -22,7 +22,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -176,7 +175,6 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
if (!mtd) {
- printk(KERN_ERR "Failed to allocate memory for MTD device\n");
kfree(cfi->cmdset_priv);
return NULL;
}
@@ -189,7 +187,6 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
* mtd->numeraseregions, GFP_KERNEL);
if (!mtd->eraseregions) {
- printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
kfree(cfi->cmdset_priv);
kfree(mtd);
return NULL;
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index d25535279404..e8d0164498b0 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -168,10 +168,8 @@ static int __xipram cfi_chip_setup(struct map_info *map,
return 0;
cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
- if (!cfi->cfiq) {
- printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
+ if (!cfi->cfiq)
return 0;
- }
memset(cfi->cfiq,0,sizeof(struct cfi_ident));
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index f992418f40a8..08049f6eea60 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -116,10 +116,8 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
extp = kmalloc(size, GFP_KERNEL);
- if (!extp) {
- printk(KERN_ERR "Failed to allocate memory\n");
+ if (!extp)
goto out;
- }
#ifdef CONFIG_MTD_XIP
local_irq_disable();
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index ffb36ba8a6e0..b57ceea21513 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -114,7 +114,6 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
mapsize = sizeof(long) * DIV_ROUND_UP(max_chips, BITS_PER_LONG);
chip_map = kzalloc(mapsize, GFP_KERNEL);
if (!chip_map) {
- printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
kfree(cfi.cfiq);
return NULL;
}
@@ -139,7 +138,6 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL);
if (!retcfi) {
- printk(KERN_WARNING "%s: kmalloc failed for CFI private structure\n", map->name);
kfree(cfi.cfiq);
kfree(chip_map);
return NULL;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 01281382180b..1210bc2923b7 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -210,6 +210,14 @@ config MTD_DOCG3
M-Systems and now Sandisk. The support is very experimental,
and doesn't give access to any write operations.
+config MTD_ST_SPI_FSM
+ tristate "ST Microelectronics SPI FSM Serial Flash Controller"
+ depends on ARM || SH
+ help
+ This provides an MTD device driver for the ST Microelectronics
+ SPI Fast Sequence Mode (FSM) Serial Flash Controller and support
+ for a subset of connected Serial Flash devices.
+
if MTD_DOCG3
config BCH_CONST_M
default 14
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index d83bd73096f6..c68868f60588 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MTD_NAND_OMAP_BCH) += elm.o
obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
+obj-$(CONFIG_MTD_ST_SPI_FSM) += st_spi_fsm.o
CFLAGS_docg3.o += -I$(src)
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index d9fd87a4c8dc..66f0405f7e53 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -209,7 +209,6 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
}
-/* FIXME: ensure that mtd->size % erase_size == 0 */
static struct block2mtd_dev *add_device(char *devname, int erase_size)
{
const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
@@ -240,13 +239,18 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
if (IS_ERR(bdev)) {
pr_err("error: cannot open device %s\n", devname);
- goto devinit_err;
+ goto err_free_block2mtd;
}
dev->blkdev = bdev;
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
pr_err("attempting to use an MTD device as a block device\n");
- goto devinit_err;
+ goto err_free_block2mtd;
+ }
+
+ if ((long)dev->blkdev->bd_inode->i_size % erase_size) {
+ pr_err("erasesize must be a divisor of device size\n");
+ goto err_free_block2mtd;
}
mutex_init(&dev->write_mutex);
@@ -255,7 +259,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* make the name contain the block device in */
name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
if (!name)
- goto devinit_err;
+ goto err_destroy_mutex;
dev->mtd.name = name;
@@ -274,7 +278,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
if (mtd_device_register(&dev->mtd, NULL, 0)) {
/* Device didn't get added, so free the entry */
- goto devinit_err;
+ goto err_destroy_mutex;
}
list_add(&dev->list, &blkmtd_device_list);
pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
@@ -283,7 +287,9 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.erasesize >> 10, dev->mtd.erasesize);
return dev;
-devinit_err:
+err_destroy_mutex:
+ mutex_destroy(&dev->write_mutex);
+err_free_block2mtd:
block2mtd_free_device(dev);
return NULL;
}
@@ -448,6 +454,7 @@ static void block2mtd_exit(void)
struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
block2mtd_sync(&dev->mtd);
mtd_device_unregister(&dev->mtd);
+ mutex_destroy(&dev->write_mutex);
pr_info("mtd%d: [%s] removed\n",
dev->mtd.index,
dev->mtd.name + strlen("block2mtd: "));
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
index d1dd6a33a050..1fd4a0f77967 100644
--- a/drivers/mtd/devices/elm.c
+++ b/drivers/mtd/devices/elm.c
@@ -15,6 +15,8 @@
*
*/
+#define DRIVER_NAME "omap-elm"
+
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -84,6 +86,8 @@ struct elm_info {
struct list_head list;
enum bch_ecc bch_type;
struct elm_registers elm_regs;
+ int ecc_steps;
+ int ecc_syndrome_size;
};
static LIST_HEAD(elm_devices);
@@ -103,7 +107,8 @@ static u32 elm_read_reg(struct elm_info *info, int offset)
* @dev: ELM device
* @bch_type: Type of BCH ecc
*/
-int elm_config(struct device *dev, enum bch_ecc bch_type)
+int elm_config(struct device *dev, enum bch_ecc bch_type,
+ int ecc_steps, int ecc_step_size, int ecc_syndrome_size)
{
u32 reg_val;
struct elm_info *info = dev_get_drvdata(dev);
@@ -112,10 +117,22 @@ int elm_config(struct device *dev, enum bch_ecc bch_type)
dev_err(dev, "Unable to configure elm - device not probed?\n");
return -ENODEV;
}
+ /* ELM cannot detect ECC errors for chunks > 1KB */
+ if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) {
+ dev_err(dev, "unsupported config ecc-size=%d\n", ecc_step_size);
+ return -EINVAL;
+ }
+ /* ELM support 8 error syndrome process */
+ if (ecc_steps > ERROR_VECTOR_MAX) {
+ dev_err(dev, "unsupported config ecc-step=%d\n", ecc_steps);
+ return -EINVAL;
+ }
reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
- info->bch_type = bch_type;
+ info->bch_type = bch_type;
+ info->ecc_steps = ecc_steps;
+ info->ecc_syndrome_size = ecc_syndrome_size;
return 0;
}
@@ -157,17 +174,15 @@ static void elm_load_syndrome(struct elm_info *info,
int i, offset;
u32 val;
- for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ for (i = 0; i < info->ecc_steps; i++) {
/* Check error reported */
if (err_vec[i].error_reported) {
elm_configure_page_mode(info, i, true);
offset = ELM_SYNDROME_FRAGMENT_0 +
SYNDROME_FRAGMENT_REG_SIZE * i;
-
- /* BCH8 */
- if (info->bch_type) {
-
+ switch (info->bch_type) {
+ case BCH8_ECC:
/* syndrome fragment 0 = ecc[9-12B] */
val = cpu_to_be32(*(u32 *) &ecc[9]);
elm_write_reg(info, offset, val);
@@ -186,7 +201,8 @@ static void elm_load_syndrome(struct elm_info *info,
offset += 4;
val = ecc[0];
elm_write_reg(info, offset, val);
- } else {
+ break;
+ case BCH4_ECC:
/* syndrome fragment 0 = ecc[20-52b] bits */
val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
((ecc[2] & 0xf) << 28);
@@ -196,11 +212,14 @@ static void elm_load_syndrome(struct elm_info *info,
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
elm_write_reg(info, offset, val);
+ break;
+ default:
+ pr_err("invalid config bch_type\n");
}
}
/* Update ecc pointer with ecc byte size */
- ecc += info->bch_type ? BCH8_SIZE : BCH4_SIZE;
+ ecc += info->ecc_syndrome_size;
}
}
@@ -223,7 +242,7 @@ static void elm_start_processing(struct elm_info *info,
* Set syndrome vector valid, so that ELM module
* will process it for vectors error is reported
*/
- for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ for (i = 0; i < info->ecc_steps; i++) {
if (err_vec[i].error_reported) {
offset = ELM_SYNDROME_FRAGMENT_6 +
SYNDROME_FRAGMENT_REG_SIZE * i;
@@ -252,7 +271,7 @@ static void elm_error_correction(struct elm_info *info,
int offset;
u32 reg_val;
- for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ for (i = 0; i < info->ecc_steps; i++) {
/* Check error reported */
if (err_vec[i].error_reported) {
@@ -354,10 +373,8 @@ static int elm_probe(struct platform_device *pdev)
struct elm_info *info;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
- if (!info) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!info)
return -ENOMEM;
- }
info->dev = &pdev->dev;
@@ -380,7 +397,7 @@ static int elm_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- if (pm_runtime_get_sync(&pdev->dev)) {
+ if (pm_runtime_get_sync(&pdev->dev) < 0) {
ret = -EINVAL;
pm_runtime_disable(&pdev->dev);
dev_err(&pdev->dev, "can't enable clock\n");
@@ -505,7 +522,7 @@ MODULE_DEVICE_TABLE(of, elm_of_match);
static struct platform_driver elm_driver = {
.driver = {
- .name = "elm",
+ .name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(elm_of_match),
.pm = &elm_pm_ops,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index ad1913909702..524dab3ac938 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -15,7 +15,6 @@
*
*/
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -41,7 +40,8 @@
#define OPCODE_WRSR 0x01 /* Write status register 1 byte */
#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
-#define OPCODE_QUAD_READ 0x6b /* Read data bytes */
+#define OPCODE_DUAL_READ 0x3b /* Read data bytes (Dual SPI) */
+#define OPCODE_QUAD_READ 0x6b /* Read data bytes (Quad SPI) */
#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
#define OPCODE_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
@@ -54,7 +54,8 @@
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define OPCODE_NORM_READ_4B 0x13 /* Read data bytes (low frequency) */
#define OPCODE_FAST_READ_4B 0x0c /* Read data bytes (high frequency) */
-#define OPCODE_QUAD_READ_4B 0x6c /* Read data bytes */
+#define OPCODE_DUAL_READ_4B 0x3c /* Read data bytes (Dual SPI) */
+#define OPCODE_QUAD_READ_4B 0x6c /* Read data bytes (Quad SPI) */
#define OPCODE_PP_4B 0x12 /* Page program (up to 256 bytes) */
#define OPCODE_SE_4B 0xdc /* Sector erase (usually 64KiB) */
@@ -95,6 +96,7 @@
enum read_type {
M25P80_NORMAL = 0,
M25P80_FAST,
+ M25P80_DUAL,
M25P80_QUAD,
};
@@ -479,6 +481,7 @@ static inline int m25p80_dummy_cycles_read(struct m25p *flash)
{
switch (flash->flash_read) {
case M25P80_FAST:
+ case M25P80_DUAL:
case M25P80_QUAD:
return 1;
case M25P80_NORMAL:
@@ -492,6 +495,8 @@ static inline int m25p80_dummy_cycles_read(struct m25p *flash)
static inline unsigned int m25p80_rx_nbits(const struct m25p *flash)
{
switch (flash->flash_read) {
+ case M25P80_DUAL:
+ return 2;
case M25P80_QUAD:
return 4;
default:
@@ -855,7 +860,8 @@ struct flash_info {
#define SST_WRITE 0x04 /* use SST byte programming */
#define M25P_NO_FR 0x08 /* Can't do fastread */
#define SECT_4K_PMC 0x10 /* OPCODE_BE_4K_PMC works uniformly */
-#define M25P80_QUAD_READ 0x20 /* Flash supports Quad Read */
+#define M25P80_DUAL_READ 0x20 /* Flash supports Dual Read */
+#define M25P80_QUAD_READ 0x40 /* Flash supports Quad Read */
};
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
@@ -934,6 +940,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, M25P80_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, M25P80_QUAD_READ) },
/* Micron */
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
@@ -953,8 +960,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) },
{ "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
- { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, M25P80_QUAD_READ) },
- { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, M25P80_QUAD_READ) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, M25P80_DUAL_READ | M25P80_QUAD_READ) },
+ { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, M25P80_DUAL_READ | M25P80_QUAD_READ) },
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
@@ -965,6 +972,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
@@ -1072,9 +1080,8 @@ static const struct spi_device_id *jedec_probe(struct spi_device *spi)
for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) {
info = (void *)m25p_ids[tmp].driver_data;
if (info->jedec_id == jedec) {
- if (info->ext_id != 0 && info->ext_id != ext_jedec)
- continue;
- return &m25p_ids[tmp];
+ if (info->ext_id == 0 || info->ext_id == ext_jedec)
+ return &m25p_ids[tmp];
}
}
dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
@@ -1226,7 +1233,7 @@ static int m25p_probe(struct spi_device *spi)
if (info->flags & M25P_NO_FR)
flash->flash_read = M25P80_NORMAL;
- /* Quad-read mode takes precedence over fast/normal */
+ /* Quad/Dual-read mode takes precedence over fast/normal */
if (spi->mode & SPI_RX_QUAD && info->flags & M25P80_QUAD_READ) {
ret = set_quad_mode(flash, info->jedec_id);
if (ret) {
@@ -1234,6 +1241,8 @@ static int m25p_probe(struct spi_device *spi)
return ret;
}
flash->flash_read = M25P80_QUAD;
+ } else if (spi->mode & SPI_RX_DUAL && info->flags & M25P80_DUAL_READ) {
+ flash->flash_read = M25P80_DUAL;
}
/* Default commands */
@@ -1241,6 +1250,9 @@ static int m25p_probe(struct spi_device *spi)
case M25P80_QUAD:
flash->read_opcode = OPCODE_QUAD_READ;
break;
+ case M25P80_DUAL:
+ flash->read_opcode = OPCODE_DUAL_READ;
+ break;
case M25P80_FAST:
flash->read_opcode = OPCODE_FAST_READ;
break;
@@ -1265,6 +1277,9 @@ static int m25p_probe(struct spi_device *spi)
case M25P80_QUAD:
flash->read_opcode = OPCODE_QUAD_READ_4B;
break;
+ case M25P80_DUAL:
+ flash->read_opcode = OPCODE_DUAL_READ_4B;
+ break;
case M25P80_FAST:
flash->read_opcode = OPCODE_FAST_READ_4B;
break;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 624069de4f28..dd22ce2cc9ad 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -10,7 +10,6 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -440,8 +439,8 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
#ifdef CONFIG_MTD_DATAFLASH_OTP
-static int dataflash_get_otp_info(struct mtd_info *mtd,
- struct otp_info *info, size_t len)
+static int dataflash_get_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *info)
{
/* Report both blocks as identical: bytes 0..64, locked.
* Unless the user block changed from all-ones, we can't
@@ -450,7 +449,8 @@ static int dataflash_get_otp_info(struct mtd_info *mtd,
info->start = 0;
info->length = 64;
info->locked = 1;
- return sizeof(*info);
+ *retlen = sizeof(*info);
+ return 0;
}
static ssize_t otp_read(struct spi_device *spi, unsigned base,
@@ -542,14 +542,18 @@ static int dataflash_write_user_otp(struct mtd_info *mtd,
struct dataflash *priv = mtd->priv;
int status;
- if (len > 64)
- return -EINVAL;
+ if (from >= 64) {
+ /*
+ * Attempting to write beyond the end of OTP memory,
+ * no data can be written.
+ */
+ *retlen = 0;
+ return 0;
+ }
- /* Strictly speaking, we *could* truncate the write ... but
- * let's not do that for the only write that's ever possible.
- */
+ /* Truncate the write to fit into OTP memory. */
if ((from + len) > 64)
- return -EINVAL;
+ len = 64 - from;
/* OUT: OP_WRITE_SECURITY, 3 zeroes, 64 data-or-zero bytes
* IN: ignore all
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index e1f2aebaa489..2cceebfb251e 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -205,6 +205,8 @@ static inline void kill_final_newline(char *str)
return 1; \
} while (0)
+#ifndef MODULE
+static int phram_init_called;
/*
* This shall contain the module parameter if any. It is of the form:
* - phram=<device>,<address>,<size> for module case
@@ -213,9 +215,10 @@ static inline void kill_final_newline(char *str)
* size.
* Example: phram.phram=rootfs,0xa0000000,512Mi
*/
-static __initdata char phram_paramline[64 + 20 + 20];
+static char phram_paramline[64 + 20 + 20];
+#endif
-static int __init phram_setup(const char *val)
+static int phram_setup(const char *val)
{
char buf[64 + 20 + 20], *str = buf;
char *token[3];
@@ -264,17 +267,36 @@ static int __init phram_setup(const char *val)
return ret;
}
-static int __init phram_param_call(const char *val, struct kernel_param *kp)
+static int phram_param_call(const char *val, struct kernel_param *kp)
{
+#ifdef MODULE
+ return phram_setup(val);
+#else
/*
- * This function is always called before 'init_phram()', whether
- * built-in or module.
+ * If more parameters are later passed in via
+ * /sys/module/phram/parameters/phram
+ * and init_phram() has already been called,
+ * we can parse the argument now.
*/
+
+ if (phram_init_called)
+ return phram_setup(val);
+
+ /*
+ * During early boot stage, we only save the parameters
+ * here. We must parse them later: if the param passed
+ * from kernel boot command line, phram_param_call() is
+ * called so early that it is not possible to resolve
+ * the device (even kmalloc() fails). Defer that work to
+ * phram_setup().
+ */
+
if (strlen(val) >= sizeof(phram_paramline))
return -ENOSPC;
strcpy(phram_paramline, val);
return 0;
+#endif
}
module_param_call(phram, phram_param_call, NULL, NULL, 000);
@@ -283,10 +305,15 @@ MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\"
static int __init init_phram(void)
{
+ int ret = 0;
+
+#ifndef MODULE
if (phram_paramline[0])
- return phram_setup(phram_paramline);
+ ret = phram_setup(phram_paramline);
+ phram_init_called = 1;
+#endif
- return 0;
+ return ret;
}
static void __exit cleanup_phram(void)
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 0c51b988e1f8..f02603e1bfeb 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -725,16 +725,11 @@ static int __init init_pmc551(void)
}
mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
- if (!mtd) {
- printk(KERN_NOTICE "pmc551: Cannot allocate new MTD "
- "device.\n");
+ if (!mtd)
break;
- }
priv = kzalloc(sizeof(struct mypriv), GFP_KERNEL);
if (!priv) {
- printk(KERN_NOTICE "pmc551: Cannot allocate new MTD "
- "device.\n");
kfree(mtd);
break;
}
diff --git a/drivers/mtd/devices/serial_flash_cmds.h b/drivers/mtd/devices/serial_flash_cmds.h
new file mode 100644
index 000000000000..4f0c2c7c898e
--- /dev/null
+++ b/drivers/mtd/devices/serial_flash_cmds.h
@@ -0,0 +1,81 @@
+/*
+ * Generic/SFDP Flash Commands and Device Capabilities
+ *
+ * Copyright (C) 2013 Lee Jones <lee.jones@lianro.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _MTD_SERIAL_FLASH_CMDS_H
+#define _MTD_SERIAL_FLASH_CMDS_H
+
+/* Generic Flash Commands/OPCODEs */
+#define FLASH_CMD_WREN 0x06
+#define FLASH_CMD_WRDI 0x04
+#define FLASH_CMD_RDID 0x9f
+#define FLASH_CMD_RDSR 0x05
+#define FLASH_CMD_RDSR2 0x35
+#define FLASH_CMD_WRSR 0x01
+#define FLASH_CMD_SE_4K 0x20
+#define FLASH_CMD_SE_32K 0x52
+#define FLASH_CMD_SE 0xd8
+#define FLASH_CMD_CHIPERASE 0xc7
+#define FLASH_CMD_WRVCR 0x81
+#define FLASH_CMD_RDVCR 0x85
+
+/* JEDEC Standard - Serial Flash Discoverable Parmeters (SFDP) Commands */
+#define FLASH_CMD_READ 0x03 /* READ */
+#define FLASH_CMD_READ_FAST 0x0b /* FAST READ */
+#define FLASH_CMD_READ_1_1_2 0x3b /* DUAL OUTPUT READ */
+#define FLASH_CMD_READ_1_2_2 0xbb /* DUAL I/O READ */
+#define FLASH_CMD_READ_1_1_4 0x6b /* QUAD OUTPUT READ */
+#define FLASH_CMD_READ_1_4_4 0xeb /* QUAD I/O READ */
+
+#define FLASH_CMD_WRITE 0x02 /* PAGE PROGRAM */
+#define FLASH_CMD_WRITE_1_1_2 0xa2 /* DUAL INPUT PROGRAM */
+#define FLASH_CMD_WRITE_1_2_2 0xd2 /* DUAL INPUT EXT PROGRAM */
+#define FLASH_CMD_WRITE_1_1_4 0x32 /* QUAD INPUT PROGRAM */
+#define FLASH_CMD_WRITE_1_4_4 0x12 /* QUAD INPUT EXT PROGRAM */
+
+#define FLASH_CMD_EN4B_ADDR 0xb7 /* Enter 4-byte address mode */
+#define FLASH_CMD_EX4B_ADDR 0xe9 /* Exit 4-byte address mode */
+
+/* READ commands with 32-bit addressing */
+#define FLASH_CMD_READ4 0x13
+#define FLASH_CMD_READ4_FAST 0x0c
+#define FLASH_CMD_READ4_1_1_2 0x3c
+#define FLASH_CMD_READ4_1_2_2 0xbc
+#define FLASH_CMD_READ4_1_1_4 0x6c
+#define FLASH_CMD_READ4_1_4_4 0xec
+
+/* Configuration flags */
+#define FLASH_FLAG_SINGLE 0x000000ff
+#define FLASH_FLAG_READ_WRITE 0x00000001
+#define FLASH_FLAG_READ_FAST 0x00000002
+#define FLASH_FLAG_SE_4K 0x00000004
+#define FLASH_FLAG_SE_32K 0x00000008
+#define FLASH_FLAG_CE 0x00000010
+#define FLASH_FLAG_32BIT_ADDR 0x00000020
+#define FLASH_FLAG_RESET 0x00000040
+#define FLASH_FLAG_DYB_LOCKING 0x00000080
+
+#define FLASH_FLAG_DUAL 0x0000ff00
+#define FLASH_FLAG_READ_1_1_2 0x00000100
+#define FLASH_FLAG_READ_1_2_2 0x00000200
+#define FLASH_FLAG_READ_2_2_2 0x00000400
+#define FLASH_FLAG_WRITE_1_1_2 0x00001000
+#define FLASH_FLAG_WRITE_1_2_2 0x00002000
+#define FLASH_FLAG_WRITE_2_2_2 0x00004000
+
+#define FLASH_FLAG_QUAD 0x00ff0000
+#define FLASH_FLAG_READ_1_1_4 0x00010000
+#define FLASH_FLAG_READ_1_4_4 0x00020000
+#define FLASH_FLAG_READ_4_4_4 0x00040000
+#define FLASH_FLAG_WRITE_1_1_4 0x00100000
+#define FLASH_FLAG_WRITE_1_4_4 0x00200000
+#define FLASH_FLAG_WRITE_4_4_4 0x00400000
+
+#endif /* _MTD_SERIAL_FLASH_CMDS_H */
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 423821412062..363da96e6891 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -913,7 +913,6 @@ static int spear_smi_probe(struct platform_device *pdev)
if (np) {
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
- pr_err("%s: ERROR: no memory", __func__);
ret = -ENOMEM;
goto err;
}
@@ -943,7 +942,6 @@ static int spear_smi_probe(struct platform_device *pdev)
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_ATOMIC);
if (!dev) {
ret = -ENOMEM;
- dev_err(&pdev->dev, "mem alloc fail\n");
goto err;
}
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 687bf27ec850..c63ecbcad0b7 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -15,7 +15,6 @@
*
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mutex.h>
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
new file mode 100644
index 000000000000..1957d7c8e185
--- /dev/null
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -0,0 +1,2108 @@
+/*
+ * st_spi_fsm.c - ST Fast Sequence Mode (FSM) Serial Flash Controller
+ *
+ * Author: Angus Clark <angus.clark@st.com>
+ *
+ * Copyright (C) 2010-2014 STMicroelectronics Limited
+ *
+ * JEDEC probe based on drivers/mtd/devices/m25p80.c
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include "serial_flash_cmds.h"
+
+/*
+ * FSM SPI Controller Registers
+ */
+#define SPI_CLOCKDIV 0x0010
+#define SPI_MODESELECT 0x0018
+#define SPI_CONFIGDATA 0x0020
+#define SPI_STA_MODE_CHANGE 0x0028
+#define SPI_FAST_SEQ_TRANSFER_SIZE 0x0100
+#define SPI_FAST_SEQ_ADD1 0x0104
+#define SPI_FAST_SEQ_ADD2 0x0108
+#define SPI_FAST_SEQ_ADD_CFG 0x010c
+#define SPI_FAST_SEQ_OPC1 0x0110
+#define SPI_FAST_SEQ_OPC2 0x0114
+#define SPI_FAST_SEQ_OPC3 0x0118
+#define SPI_FAST_SEQ_OPC4 0x011c
+#define SPI_FAST_SEQ_OPC5 0x0120
+#define SPI_MODE_BITS 0x0124
+#define SPI_DUMMY_BITS 0x0128
+#define SPI_FAST_SEQ_FLASH_STA_DATA 0x012c
+#define SPI_FAST_SEQ_1 0x0130
+#define SPI_FAST_SEQ_2 0x0134
+#define SPI_FAST_SEQ_3 0x0138
+#define SPI_FAST_SEQ_4 0x013c
+#define SPI_FAST_SEQ_CFG 0x0140
+#define SPI_FAST_SEQ_STA 0x0144
+#define SPI_QUAD_BOOT_SEQ_INIT_1 0x0148
+#define SPI_QUAD_BOOT_SEQ_INIT_2 0x014c
+#define SPI_QUAD_BOOT_READ_SEQ_1 0x0150
+#define SPI_QUAD_BOOT_READ_SEQ_2 0x0154
+#define SPI_PROGRAM_ERASE_TIME 0x0158
+#define SPI_MULT_PAGE_REPEAT_SEQ_1 0x015c
+#define SPI_MULT_PAGE_REPEAT_SEQ_2 0x0160
+#define SPI_STATUS_WR_TIME_REG 0x0164
+#define SPI_FAST_SEQ_DATA_REG 0x0300
+
+/*
+ * Register: SPI_MODESELECT
+ */
+#define SPI_MODESELECT_CONTIG 0x01
+#define SPI_MODESELECT_FASTREAD 0x02
+#define SPI_MODESELECT_DUALIO 0x04
+#define SPI_MODESELECT_FSM 0x08
+#define SPI_MODESELECT_QUADBOOT 0x10
+
+/*
+ * Register: SPI_CONFIGDATA
+ */
+#define SPI_CFG_DEVICE_ST 0x1
+#define SPI_CFG_DEVICE_ATMEL 0x4
+#define SPI_CFG_MIN_CS_HIGH(x) (((x) & 0xfff) << 4)
+#define SPI_CFG_CS_SETUPHOLD(x) (((x) & 0xff) << 16)
+#define SPI_CFG_DATA_HOLD(x) (((x) & 0xff) << 24)
+
+#define SPI_CFG_DEFAULT_MIN_CS_HIGH SPI_CFG_MIN_CS_HIGH(0x0AA)
+#define SPI_CFG_DEFAULT_CS_SETUPHOLD SPI_CFG_CS_SETUPHOLD(0xA0)
+#define SPI_CFG_DEFAULT_DATA_HOLD SPI_CFG_DATA_HOLD(0x00)
+
+/*
+ * Register: SPI_FAST_SEQ_TRANSFER_SIZE
+ */
+#define TRANSFER_SIZE(x) ((x) * 8)
+
+/*
+ * Register: SPI_FAST_SEQ_ADD_CFG
+ */
+#define ADR_CFG_CYCLES_ADD1(x) ((x) << 0)
+#define ADR_CFG_PADS_1_ADD1 (0x0 << 6)
+#define ADR_CFG_PADS_2_ADD1 (0x1 << 6)
+#define ADR_CFG_PADS_4_ADD1 (0x3 << 6)
+#define ADR_CFG_CSDEASSERT_ADD1 (1 << 8)
+#define ADR_CFG_CYCLES_ADD2(x) ((x) << (0+16))
+#define ADR_CFG_PADS_1_ADD2 (0x0 << (6+16))
+#define ADR_CFG_PADS_2_ADD2 (0x1 << (6+16))
+#define ADR_CFG_PADS_4_ADD2 (0x3 << (6+16))
+#define ADR_CFG_CSDEASSERT_ADD2 (1 << (8+16))
+
+/*
+ * Register: SPI_FAST_SEQ_n
+ */
+#define SEQ_OPC_OPCODE(x) ((x) << 0)
+#define SEQ_OPC_CYCLES(x) ((x) << 8)
+#define SEQ_OPC_PADS_1 (0x0 << 14)
+#define SEQ_OPC_PADS_2 (0x1 << 14)
+#define SEQ_OPC_PADS_4 (0x3 << 14)
+#define SEQ_OPC_CSDEASSERT (1 << 16)
+
+/*
+ * Register: SPI_FAST_SEQ_CFG
+ */
+#define SEQ_CFG_STARTSEQ (1 << 0)
+#define SEQ_CFG_SWRESET (1 << 5)
+#define SEQ_CFG_CSDEASSERT (1 << 6)
+#define SEQ_CFG_READNOTWRITE (1 << 7)
+#define SEQ_CFG_ERASE (1 << 8)
+#define SEQ_CFG_PADS_1 (0x0 << 16)
+#define SEQ_CFG_PADS_2 (0x1 << 16)
+#define SEQ_CFG_PADS_4 (0x3 << 16)
+
+/*
+ * Register: SPI_MODE_BITS
+ */
+#define MODE_DATA(x) (x & 0xff)
+#define MODE_CYCLES(x) ((x & 0x3f) << 16)
+#define MODE_PADS_1 (0x0 << 22)
+#define MODE_PADS_2 (0x1 << 22)
+#define MODE_PADS_4 (0x3 << 22)
+#define DUMMY_CSDEASSERT (1 << 24)
+
+/*
+ * Register: SPI_DUMMY_BITS
+ */
+#define DUMMY_CYCLES(x) ((x & 0x3f) << 16)
+#define DUMMY_PADS_1 (0x0 << 22)
+#define DUMMY_PADS_2 (0x1 << 22)
+#define DUMMY_PADS_4 (0x3 << 22)
+#define DUMMY_CSDEASSERT (1 << 24)
+
+/*
+ * Register: SPI_FAST_SEQ_FLASH_STA_DATA
+ */
+#define STA_DATA_BYTE1(x) ((x & 0xff) << 0)
+#define STA_DATA_BYTE2(x) ((x & 0xff) << 8)
+#define STA_PADS_1 (0x0 << 16)
+#define STA_PADS_2 (0x1 << 16)
+#define STA_PADS_4 (0x3 << 16)
+#define STA_CSDEASSERT (0x1 << 20)
+#define STA_RDNOTWR (0x1 << 21)
+
+/*
+ * FSM SPI Instruction Opcodes
+ */
+#define STFSM_OPC_CMD 0x1
+#define STFSM_OPC_ADD 0x2
+#define STFSM_OPC_STA 0x3
+#define STFSM_OPC_MODE 0x4
+#define STFSM_OPC_DUMMY 0x5
+#define STFSM_OPC_DATA 0x6
+#define STFSM_OPC_WAIT 0x7
+#define STFSM_OPC_JUMP 0x8
+#define STFSM_OPC_GOTO 0x9
+#define STFSM_OPC_STOP 0xF
+
+/*
+ * FSM SPI Instructions (== opcode + operand).
+ */
+#define STFSM_INSTR(cmd, op) ((cmd) | ((op) << 4))
+
+#define STFSM_INST_CMD1 STFSM_INSTR(STFSM_OPC_CMD, 1)
+#define STFSM_INST_CMD2 STFSM_INSTR(STFSM_OPC_CMD, 2)
+#define STFSM_INST_CMD3 STFSM_INSTR(STFSM_OPC_CMD, 3)
+#define STFSM_INST_CMD4 STFSM_INSTR(STFSM_OPC_CMD, 4)
+#define STFSM_INST_CMD5 STFSM_INSTR(STFSM_OPC_CMD, 5)
+#define STFSM_INST_ADD1 STFSM_INSTR(STFSM_OPC_ADD, 1)
+#define STFSM_INST_ADD2 STFSM_INSTR(STFSM_OPC_ADD, 2)
+
+#define STFSM_INST_DATA_WRITE STFSM_INSTR(STFSM_OPC_DATA, 1)
+#define STFSM_INST_DATA_READ STFSM_INSTR(STFSM_OPC_DATA, 2)
+
+#define STFSM_INST_STA_RD1 STFSM_INSTR(STFSM_OPC_STA, 0x1)
+#define STFSM_INST_STA_WR1 STFSM_INSTR(STFSM_OPC_STA, 0x1)
+#define STFSM_INST_STA_RD2 STFSM_INSTR(STFSM_OPC_STA, 0x2)
+#define STFSM_INST_STA_WR1_2 STFSM_INSTR(STFSM_OPC_STA, 0x3)
+
+#define STFSM_INST_MODE STFSM_INSTR(STFSM_OPC_MODE, 0)
+#define STFSM_INST_DUMMY STFSM_INSTR(STFSM_OPC_DUMMY, 0)
+#define STFSM_INST_WAIT STFSM_INSTR(STFSM_OPC_WAIT, 0)
+#define STFSM_INST_STOP STFSM_INSTR(STFSM_OPC_STOP, 0)
+
+#define STFSM_DEFAULT_EMI_FREQ 100000000UL /* 100 MHz */
+#define STFSM_DEFAULT_WR_TIME (STFSM_DEFAULT_EMI_FREQ * (15/1000)) /* 15ms */
+
+#define STFSM_FLASH_SAFE_FREQ 10000000UL /* 10 MHz */
+
+#define STFSM_MAX_WAIT_SEQ_MS 1000 /* FSM execution time */
+
+/* Flash Commands */
+#define FLASH_CMD_WREN 0x06
+#define FLASH_CMD_WRDI 0x04
+#define FLASH_CMD_RDID 0x9f
+#define FLASH_CMD_RDSR 0x05
+#define FLASH_CMD_RDSR2 0x35
+#define FLASH_CMD_WRSR 0x01
+#define FLASH_CMD_SE_4K 0x20
+#define FLASH_CMD_SE_32K 0x52
+#define FLASH_CMD_SE 0xd8
+#define FLASH_CMD_CHIPERASE 0xc7
+#define FLASH_CMD_WRVCR 0x81
+#define FLASH_CMD_RDVCR 0x85
+
+#define FLASH_CMD_READ 0x03 /* READ */
+#define FLASH_CMD_READ_FAST 0x0b /* FAST READ */
+#define FLASH_CMD_READ_1_1_2 0x3b /* DUAL OUTPUT READ */
+#define FLASH_CMD_READ_1_2_2 0xbb /* DUAL I/O READ */
+#define FLASH_CMD_READ_1_1_4 0x6b /* QUAD OUTPUT READ */
+#define FLASH_CMD_READ_1_4_4 0xeb /* QUAD I/O READ */
+
+#define FLASH_CMD_WRITE 0x02 /* PAGE PROGRAM */
+#define FLASH_CMD_WRITE_1_1_2 0xa2 /* DUAL INPUT PROGRAM */
+#define FLASH_CMD_WRITE_1_2_2 0xd2 /* DUAL INPUT EXT PROGRAM */
+#define FLASH_CMD_WRITE_1_1_4 0x32 /* QUAD INPUT PROGRAM */
+#define FLASH_CMD_WRITE_1_4_4 0x12 /* QUAD INPUT EXT PROGRAM */
+
+#define FLASH_CMD_EN4B_ADDR 0xb7 /* Enter 4-byte address mode */
+#define FLASH_CMD_EX4B_ADDR 0xe9 /* Exit 4-byte address mode */
+
+/* READ commands with 32-bit addressing (N25Q256 and S25FLxxxS) */
+#define FLASH_CMD_READ4 0x13
+#define FLASH_CMD_READ4_FAST 0x0c
+#define FLASH_CMD_READ4_1_1_2 0x3c
+#define FLASH_CMD_READ4_1_2_2 0xbc
+#define FLASH_CMD_READ4_1_1_4 0x6c
+#define FLASH_CMD_READ4_1_4_4 0xec
+
+/* S25FLxxxS commands */
+#define S25FL_CMD_WRITE4_1_1_4 0x34
+#define S25FL_CMD_SE4 0xdc
+#define S25FL_CMD_CLSR 0x30
+#define S25FL_CMD_DYBWR 0xe1
+#define S25FL_CMD_DYBRD 0xe0
+#define S25FL_CMD_WRITE4 0x12 /* Note, opcode clashes with
+ * 'FLASH_CMD_WRITE_1_4_4'
+ * as found on N25Qxxx devices! */
+
+/* Status register */
+#define FLASH_STATUS_BUSY 0x01
+#define FLASH_STATUS_WEL 0x02
+#define FLASH_STATUS_BP0 0x04
+#define FLASH_STATUS_BP1 0x08
+#define FLASH_STATUS_BP2 0x10
+#define FLASH_STATUS_SRWP0 0x80
+#define FLASH_STATUS_TIMEOUT 0xff
+/* S25FL Error Flags */
+#define S25FL_STATUS_E_ERR 0x20
+#define S25FL_STATUS_P_ERR 0x40
+
+#define FLASH_PAGESIZE 256 /* In Bytes */
+#define FLASH_PAGESIZE_32 (FLASH_PAGESIZE / 4) /* In uint32_t */
+#define FLASH_MAX_BUSY_WAIT (300 * HZ) /* Maximum 'CHIPERASE' time */
+
+/*
+ * Flags to tweak operation of default read/write/erase routines
+ */
+#define CFG_READ_TOGGLE_32BIT_ADDR 0x00000001
+#define CFG_WRITE_TOGGLE_32BIT_ADDR 0x00000002
+#define CFG_WRITE_EX_32BIT_ADDR_DELAY 0x00000004
+#define CFG_ERASESEC_TOGGLE_32BIT_ADDR 0x00000008
+#define CFG_S25FL_CHECK_ERROR_FLAGS 0x00000010
+
+struct stfsm_seq {
+ uint32_t data_size;
+ uint32_t addr1;
+ uint32_t addr2;
+ uint32_t addr_cfg;
+ uint32_t seq_opc[5];
+ uint32_t mode;
+ uint32_t dummy;
+ uint32_t status;
+ uint8_t seq[16];
+ uint32_t seq_cfg;
+} __packed __aligned(4);
+
+struct stfsm {
+ struct device *dev;
+ void __iomem *base;
+ struct resource *region;
+ struct mtd_info mtd;
+ struct mutex lock;
+ struct flash_info *info;
+
+ uint32_t configuration;
+ uint32_t fifo_dir_delay;
+ bool booted_from_spi;
+ bool reset_signal;
+ bool reset_por;
+
+ struct stfsm_seq stfsm_seq_read;
+ struct stfsm_seq stfsm_seq_write;
+ struct stfsm_seq stfsm_seq_en_32bit_addr;
+};
+
+/* Parameters to configure a READ or WRITE FSM sequence */
+struct seq_rw_config {
+ uint32_t flags; /* flags to support config */
+ uint8_t cmd; /* FLASH command */
+ int write; /* Write Sequence */
+ uint8_t addr_pads; /* No. of addr pads (MODE & DUMMY) */
+ uint8_t data_pads; /* No. of data pads */
+ uint8_t mode_data; /* MODE data */
+ uint8_t mode_cycles; /* No. of MODE cycles */
+ uint8_t dummy_cycles; /* No. of DUMMY cycles */
+};
+
+/* SPI Flash Device Table */
+struct flash_info {
+ char *name;
+ /*
+ * JEDEC id zero means "no ID" (most older chips); otherwise it has
+ * a high byte of zero plus three data bytes: the manufacturer id,
+ * then a two byte device id.
+ */
+ u32 jedec_id;
+ u16 ext_id;
+ /*
+ * The size listed here is what works with FLASH_CMD_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+ u32 flags;
+ /*
+ * Note, where FAST_READ is supported, freq_max specifies the
+ * FAST_READ frequency, not the READ frequency.
+ */
+ u32 max_freq;
+ int (*config)(struct stfsm *);
+};
+
+static int stfsm_n25q_config(struct stfsm *fsm);
+static int stfsm_mx25_config(struct stfsm *fsm);
+static int stfsm_s25fl_config(struct stfsm *fsm);
+static int stfsm_w25q_config(struct stfsm *fsm);
+
+static struct flash_info flash_types[] = {
+ /*
+ * ST Microelectronics/Numonyx --
+ * (newer production versions may have feature updates
+ * (eg faster operating frequency)
+ */
+#define M25P_FLAG (FLASH_FLAG_READ_WRITE | FLASH_FLAG_READ_FAST)
+ { "m25p40", 0x202013, 0, 64 * 1024, 8, M25P_FLAG, 25, NULL },
+ { "m25p80", 0x202014, 0, 64 * 1024, 16, M25P_FLAG, 25, NULL },
+ { "m25p16", 0x202015, 0, 64 * 1024, 32, M25P_FLAG, 25, NULL },
+ { "m25p32", 0x202016, 0, 64 * 1024, 64, M25P_FLAG, 50, NULL },
+ { "m25p64", 0x202017, 0, 64 * 1024, 128, M25P_FLAG, 50, NULL },
+ { "m25p128", 0x202018, 0, 256 * 1024, 64, M25P_FLAG, 50, NULL },
+
+#define M25PX_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_WRITE_1_1_2)
+ { "m25px32", 0x207116, 0, 64 * 1024, 64, M25PX_FLAG, 75, NULL },
+ { "m25px64", 0x207117, 0, 64 * 1024, 128, M25PX_FLAG, 75, NULL },
+
+#define MX25_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_SE_4K | \
+ FLASH_FLAG_SE_32K)
+ { "mx25l25635e", 0xc22019, 0, 64*1024, 512,
+ (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
+ stfsm_mx25_config },
+
+#define N25Q_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_WRITE_1_1_2 | \
+ FLASH_FLAG_WRITE_1_2_2 | \
+ FLASH_FLAG_WRITE_1_1_4 | \
+ FLASH_FLAG_WRITE_1_4_4)
+ { "n25q128", 0x20ba18, 0, 64 * 1024, 256, N25Q_FLAG, 108,
+ stfsm_n25q_config },
+ { "n25q256", 0x20ba19, 0, 64 * 1024, 512,
+ N25Q_FLAG | FLASH_FLAG_32BIT_ADDR, 108, stfsm_n25q_config },
+
+ /*
+ * Spansion S25FLxxxP
+ * - 256KiB and 64KiB sector variants (identified by ext. JEDEC)
+ */
+#define S25FLXXXP_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_WRITE_1_1_4 | \
+ FLASH_FLAG_READ_FAST)
+ { "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, S25FLXXXP_FLAG, 80,
+ stfsm_s25fl_config },
+ { "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, S25FLXXXP_FLAG, 80,
+ stfsm_s25fl_config },
+
+ /*
+ * Spansion S25FLxxxS
+ * - 256KiB and 64KiB sector variants (identified by ext. JEDEC)
+ * - RESET# signal supported by die but not bristled out on all
+ * package types. The package type is a function of board design,
+ * so this information is captured in the board's flags.
+ * - Supports 'DYB' sector protection. Depending on variant, sectors
+ * may default to locked state on power-on.
+ */
+#define S25FLXXXS_FLAG (S25FLXXXP_FLAG | \
+ FLASH_FLAG_RESET | \
+ FLASH_FLAG_DYB_LOCKING)
+ { "s25fl128s0", 0x012018, 0x0300, 256 * 1024, 64, S25FLXXXS_FLAG, 80,
+ stfsm_s25fl_config },
+ { "s25fl128s1", 0x012018, 0x0301, 64 * 1024, 256, S25FLXXXS_FLAG, 80,
+ stfsm_s25fl_config },
+ { "s25fl256s0", 0x010219, 0x4d00, 256 * 1024, 128,
+ S25FLXXXS_FLAG | FLASH_FLAG_32BIT_ADDR, 80, stfsm_s25fl_config },
+ { "s25fl256s1", 0x010219, 0x4d01, 64 * 1024, 512,
+ S25FLXXXS_FLAG | FLASH_FLAG_32BIT_ADDR, 80, stfsm_s25fl_config },
+
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+#define W25X_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_WRITE_1_1_2)
+ { "w25x40", 0xef3013, 0, 64 * 1024, 8, W25X_FLAG, 75, NULL },
+ { "w25x80", 0xef3014, 0, 64 * 1024, 16, W25X_FLAG, 75, NULL },
+ { "w25x16", 0xef3015, 0, 64 * 1024, 32, W25X_FLAG, 75, NULL },
+ { "w25x32", 0xef3016, 0, 64 * 1024, 64, W25X_FLAG, 75, NULL },
+ { "w25x64", 0xef3017, 0, 64 * 1024, 128, W25X_FLAG, 75, NULL },
+
+ /* Winbond -- w25q "blocks" are 64K, "sectors" are 4KiB */
+#define W25Q_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_WRITE_1_1_4)
+ { "w25q80", 0xef4014, 0, 64 * 1024, 16, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+ { "w25q16", 0xef4015, 0, 64 * 1024, 32, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+ { "w25q32", 0xef4016, 0, 64 * 1024, 64, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+ { "w25q64", 0xef4017, 0, 64 * 1024, 128, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+
+ /* Sentinel */
+ { NULL, 0x000000, 0, 0, 0, 0, 0, NULL },
+};
+
+/*
+ * FSM message sequence configurations:
+ *
+ * All configs are presented in order of preference
+ */
+
+/* Default READ configurations, in order of preference */
+static struct seq_rw_config default_read_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, FLASH_CMD_READ_1_4_4, 0, 4, 4, 0x00, 2, 4},
+ {FLASH_FLAG_READ_1_1_4, FLASH_CMD_READ_1_1_4, 0, 1, 4, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_2_2, FLASH_CMD_READ_1_2_2, 0, 2, 2, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_1_2, FLASH_CMD_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, FLASH_CMD_READ_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, FLASH_CMD_READ, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/* Default WRITE configurations */
+static struct seq_rw_config default_write_configs[] = {
+ {FLASH_FLAG_WRITE_1_4_4, FLASH_CMD_WRITE_1_4_4, 1, 4, 4, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_1_4, FLASH_CMD_WRITE_1_1_4, 1, 1, 4, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_2_2, FLASH_CMD_WRITE_1_2_2, 1, 2, 2, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_1_2, FLASH_CMD_WRITE_1_1_2, 1, 1, 2, 0x00, 0, 0},
+ {FLASH_FLAG_READ_WRITE, FLASH_CMD_WRITE, 1, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/*
+ * [N25Qxxx] Configuration
+ */
+#define N25Q_VCR_DUMMY_CYCLES(x) (((x) & 0xf) << 4)
+#define N25Q_VCR_XIP_DISABLED ((uint8_t)0x1 << 3)
+#define N25Q_VCR_WRAP_CONT 0x3
+
+/* N25Q 3-byte Address READ configurations
+ * - 'FAST' variants configured for 8 dummy cycles.
+ *
+ * Note, the number of dummy cycles used for 'FAST' READ operations is
+ * configurable and would normally be tuned according to the READ command and
+ * operating frequency. However, this applies universally to all 'FAST' READ
+ * commands, including those used by the SPIBoot controller, and remains in
+ * force until the device is power-cycled. Since the SPIBoot controller is
+ * hard-wired to use 8 dummy cycles, we must configure the device to also use 8
+ * cycles.
+ */
+static struct seq_rw_config n25q_read3_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, FLASH_CMD_READ_1_4_4, 0, 4, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_4, FLASH_CMD_READ_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, FLASH_CMD_READ_1_2_2, 0, 2, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_2, FLASH_CMD_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, FLASH_CMD_READ_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, FLASH_CMD_READ, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/* N25Q 4-byte Address READ configurations
+ * - use special 4-byte address READ commands (reduces overheads, and
+ * reduces risk of hitting watchdog reset issues).
+ * - 'FAST' variants configured for 8 dummy cycles (see note above.)
+ */
+static struct seq_rw_config n25q_read4_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, FLASH_CMD_READ4_1_4_4, 0, 4, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_4, FLASH_CMD_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, FLASH_CMD_READ4_1_2_2, 0, 2, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_2, FLASH_CMD_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, FLASH_CMD_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, FLASH_CMD_READ4, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/*
+ * [MX25xxx] Configuration
+ */
+#define MX25_STATUS_QE (0x1 << 6)
+
+static int stfsm_mx25_en_32bit_addr_seq(struct stfsm_seq *seq)
+{
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_EN4B_ADDR) |
+ SEQ_OPC_CSDEASSERT);
+
+ seq->seq[0] = STFSM_INST_CMD1;
+ seq->seq[1] = STFSM_INST_WAIT;
+ seq->seq[2] = STFSM_INST_STOP;
+
+ seq->seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ);
+
+ return 0;
+}
+
+/*
+ * [S25FLxxx] Configuration
+ */
+#define STFSM_S25FL_CONFIG_QE (0x1 << 1)
+
+/*
+ * S25FLxxxS devices provide three ways of supporting 32-bit addressing: Bank
+ * Register, Extended Address Modes, and a 32-bit address command set. The
+ * 32-bit address command set is used here, since it avoids any problems with
+ * entering a state that is incompatible with the SPIBoot Controller.
+ */
+static struct seq_rw_config stfsm_s25fl_read4_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, FLASH_CMD_READ4_1_4_4, 0, 4, 4, 0x00, 2, 4},
+ {FLASH_FLAG_READ_1_1_4, FLASH_CMD_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, FLASH_CMD_READ4_1_2_2, 0, 2, 2, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_1_2, FLASH_CMD_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, FLASH_CMD_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, FLASH_CMD_READ4, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+static struct seq_rw_config stfsm_s25fl_write4_configs[] = {
+ {FLASH_FLAG_WRITE_1_1_4, S25FL_CMD_WRITE4_1_1_4, 1, 1, 4, 0x00, 0, 0},
+ {FLASH_FLAG_READ_WRITE, S25FL_CMD_WRITE4, 1, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/*
+ * [W25Qxxx] Configuration
+ */
+#define W25Q_STATUS_QE (0x1 << 9)
+
+static struct stfsm_seq stfsm_seq_read_jedec = {
+ .data_size = TRANSFER_SIZE(8),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_RDID)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_read_status_fifo = {
+ .data_size = TRANSFER_SIZE(4),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_RDSR)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_erase_sector = {
+ /* 'addr_cfg' configured during initialisation */
+ .seq_opc = {
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_WREN) | SEQ_OPC_CSDEASSERT),
+
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_SE)),
+ },
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_ADD1,
+ STFSM_INST_ADD2,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_erase_chip = {
+ .seq_opc = {
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_WREN) | SEQ_OPC_CSDEASSERT),
+
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_CHIPERASE) | SEQ_OPC_CSDEASSERT),
+ },
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_WAIT,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_write_status = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_WREN) | SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_WRSR)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_STA_WR1,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_wrvcr = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_WREN) | SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_WRVCR)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_STA_WR1,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq)
+{
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_EN4B_ADDR));
+ seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_WREN) |
+ SEQ_OPC_CSDEASSERT);
+
+ seq->seq[0] = STFSM_INST_CMD2;
+ seq->seq[1] = STFSM_INST_CMD1;
+ seq->seq[2] = STFSM_INST_WAIT;
+ seq->seq[3] = STFSM_INST_STOP;
+
+ seq->seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ);
+
+ return 0;
+}
+
+static inline int stfsm_is_idle(struct stfsm *fsm)
+{
+ return readl(fsm->base + SPI_FAST_SEQ_STA) & 0x10;
+}
+
+static inline uint32_t stfsm_fifo_available(struct stfsm *fsm)
+{
+ return (readl(fsm->base + SPI_FAST_SEQ_STA) >> 5) & 0x7f;
+}
+
+static void stfsm_clear_fifo(struct stfsm *fsm)
+{
+ uint32_t avail;
+
+ for (;;) {
+ avail = stfsm_fifo_available(fsm);
+ if (!avail)
+ break;
+
+ while (avail) {
+ readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
+ avail--;
+ }
+ }
+}
+
+static inline void stfsm_load_seq(struct stfsm *fsm,
+ const struct stfsm_seq *seq)
+{
+ void __iomem *dst = fsm->base + SPI_FAST_SEQ_TRANSFER_SIZE;
+ const uint32_t *src = (const uint32_t *)seq;
+ int words = sizeof(*seq) / sizeof(*src);
+
+ BUG_ON(!stfsm_is_idle(fsm));
+
+ while (words--) {
+ writel(*src, dst);
+ src++;
+ dst += 4;
+ }
+}
+
+static void stfsm_wait_seq(struct stfsm *fsm)
+{
+ unsigned long deadline;
+ int timeout = 0;
+
+ deadline = jiffies + msecs_to_jiffies(STFSM_MAX_WAIT_SEQ_MS);
+
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ if (stfsm_is_idle(fsm))
+ return;
+
+ cond_resched();
+ }
+
+ dev_err(fsm->dev, "timeout on sequence completion\n");
+}
+
+static void stfsm_read_fifo(struct stfsm *fsm, uint32_t *buf, uint32_t size)
+{
+ uint32_t remaining = size >> 2;
+ uint32_t avail;
+ uint32_t words;
+
+ dev_dbg(fsm->dev, "Reading %d bytes from FIFO\n", size);
+
+ BUG_ON((((uint32_t)buf) & 0x3) || (size & 0x3));
+
+ while (remaining) {
+ for (;;) {
+ avail = stfsm_fifo_available(fsm);
+ if (avail)
+ break;
+ udelay(1);
+ }
+ words = min(avail, remaining);
+ remaining -= words;
+
+ readsl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words);
+ buf += words;
+ }
+}
+
+static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf,
+ uint32_t size)
+{
+ uint32_t words = size >> 2;
+
+ dev_dbg(fsm->dev, "writing %d bytes to FIFO\n", size);
+
+ BUG_ON((((uint32_t)buf) & 0x3) || (size & 0x3));
+
+ writesl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words);
+
+ return size;
+}
+
+static int stfsm_enter_32bit_addr(struct stfsm *fsm, int enter)
+{
+ struct stfsm_seq *seq = &fsm->stfsm_seq_en_32bit_addr;
+ uint32_t cmd = enter ? FLASH_CMD_EN4B_ADDR : FLASH_CMD_EX4B_ADDR;
+
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cmd) |
+ SEQ_OPC_CSDEASSERT);
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+static uint8_t stfsm_wait_busy(struct stfsm *fsm)
+{
+ struct stfsm_seq *seq = &stfsm_seq_read_status_fifo;
+ unsigned long deadline;
+ uint32_t status;
+ int timeout = 0;
+
+ /* Use RDRS1 */
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_RDSR));
+
+ /* Load read_status sequence */
+ stfsm_load_seq(fsm, seq);
+
+ /*
+ * Repeat until busy bit is deasserted, or timeout, or error (S25FLxxxS)
+ */
+ deadline = jiffies + FLASH_MAX_BUSY_WAIT;
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ stfsm_wait_seq(fsm);
+
+ stfsm_read_fifo(fsm, &status, 4);
+
+ if ((status & FLASH_STATUS_BUSY) == 0)
+ return 0;
+
+ if ((fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS) &&
+ ((status & S25FL_STATUS_P_ERR) ||
+ (status & S25FL_STATUS_E_ERR)))
+ return (uint8_t)(status & 0xff);
+
+ if (!timeout)
+ /* Restart */
+ writel(seq->seq_cfg, fsm->base + SPI_FAST_SEQ_CFG);
+
+ cond_resched();
+ }
+
+ dev_err(fsm->dev, "timeout on wait_busy\n");
+
+ return FLASH_STATUS_TIMEOUT;
+}
+
+static int stfsm_read_status(struct stfsm *fsm, uint8_t cmd,
+ uint8_t *status)
+{
+ struct stfsm_seq *seq = &stfsm_seq_read_status_fifo;
+ uint32_t tmp;
+
+ dev_dbg(fsm->dev, "reading STA[%s]\n",
+ (cmd == FLASH_CMD_RDSR) ? "1" : "2");
+
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cmd)),
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_read_fifo(fsm, &tmp, 4);
+
+ *status = (uint8_t)(tmp >> 24);
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+static int stfsm_write_status(struct stfsm *fsm, uint16_t status,
+ int sta_bytes)
+{
+ struct stfsm_seq *seq = &stfsm_seq_write_status;
+
+ dev_dbg(fsm->dev, "writing STA[%s] 0x%04x\n",
+ (sta_bytes == 1) ? "1" : "1+2", status);
+
+ seq->status = (uint32_t)status | STA_PADS_1 | STA_CSDEASSERT;
+ seq->seq[2] = (sta_bytes == 1) ?
+ STFSM_INST_STA_WR1 : STFSM_INST_STA_WR1_2;
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+};
+
+static int stfsm_wrvcr(struct stfsm *fsm, uint8_t data)
+{
+ struct stfsm_seq *seq = &stfsm_seq_wrvcr;
+
+ dev_dbg(fsm->dev, "writing VCR 0x%02x\n", data);
+
+ seq->status = (STA_DATA_BYTE1(data) | STA_PADS_1 | STA_CSDEASSERT);
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+/*
+ * SoC reset on 'boot-from-spi' systems
+ *
+ * Certain modes of operation cause the Flash device to enter a particular state
+ * for a period of time (e.g. 'Erase Sector', 'Quad Enable', and 'Enter 32-bit
+ * Addr' commands). On boot-from-spi systems, it is important to consider what
+ * happens if a warm reset occurs during this period. The SPIBoot controller
+ * assumes that Flash device is in its default reset state, 24-bit address mode,
+ * and ready to accept commands. This can be achieved using some form of
+ * on-board logic/controller to force a device POR in response to a SoC-level
+ * reset or by making use of the device reset signal if available (limited
+ * number of devices only).
+ *
+ * Failure to take such precautions can cause problems following a warm reset.
+ * For some operations (e.g. ERASE), there is little that can be done. For
+ * other modes of operation (e.g. 32-bit addressing), options are often
+ * available that can help minimise the window in which a reset could cause a
+ * problem.
+ *
+ */
+static bool stfsm_can_handle_soc_reset(struct stfsm *fsm)
+{
+ /* Reset signal is available on the board and supported by the device */
+ if (fsm->reset_signal && fsm->info->flags & FLASH_FLAG_RESET)
+ return true;
+
+ /* Board-level logic forces a power-on-reset */
+ if (fsm->reset_por)
+ return true;
+
+ /* Reset is not properly handled and may result in failure to reboot */
+ return false;
+}
+
+/* Configure 'addr_cfg' according to addressing mode */
+static void stfsm_prepare_erasesec_seq(struct stfsm *fsm,
+ struct stfsm_seq *seq)
+{
+ int addr1_cycles = fsm->info->flags & FLASH_FLAG_32BIT_ADDR ? 16 : 8;
+
+ seq->addr_cfg = (ADR_CFG_CYCLES_ADD1(addr1_cycles) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2 |
+ ADR_CFG_CSDEASSERT_ADD2);
+}
+
+/* Search for preferred configuration based on available flags */
+static struct seq_rw_config *
+stfsm_search_seq_rw_configs(struct stfsm *fsm,
+ struct seq_rw_config cfgs[])
+{
+ struct seq_rw_config *config;
+ int flags = fsm->info->flags;
+
+ for (config = cfgs; config->cmd != 0; config++)
+ if ((config->flags & flags) == config->flags)
+ return config;
+
+ return NULL;
+}
+
+/* Prepare a READ/WRITE sequence according to configuration parameters */
+static void stfsm_prepare_rw_seq(struct stfsm *fsm,
+ struct stfsm_seq *seq,
+ struct seq_rw_config *cfg)
+{
+ int addr1_cycles, addr2_cycles;
+ int i = 0;
+
+ memset(seq, 0, sizeof(*seq));
+
+ /* Add READ/WRITE OPC */
+ seq->seq_opc[i++] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cfg->cmd));
+
+ /* Add WREN OPC for a WRITE sequence */
+ if (cfg->write)
+ seq->seq_opc[i++] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_WREN) |
+ SEQ_OPC_CSDEASSERT);
+
+ /* Address configuration (24 or 32-bit addresses) */
+ addr1_cycles = (fsm->info->flags & FLASH_FLAG_32BIT_ADDR) ? 16 : 8;
+ addr1_cycles /= cfg->addr_pads;
+ addr2_cycles = 16 / cfg->addr_pads;
+ seq->addr_cfg = ((addr1_cycles & 0x3f) << 0 | /* ADD1 cycles */
+ (cfg->addr_pads - 1) << 6 | /* ADD1 pads */
+ (addr2_cycles & 0x3f) << 16 | /* ADD2 cycles */
+ ((cfg->addr_pads - 1) << 22)); /* ADD2 pads */
+
+ /* Data/Sequence configuration */
+ seq->seq_cfg = ((cfg->data_pads - 1) << 16 |
+ SEQ_CFG_STARTSEQ |
+ SEQ_CFG_CSDEASSERT);
+ if (!cfg->write)
+ seq->seq_cfg |= SEQ_CFG_READNOTWRITE;
+
+ /* Mode configuration (no. of pads taken from addr cfg) */
+ seq->mode = ((cfg->mode_data & 0xff) << 0 | /* data */
+ (cfg->mode_cycles & 0x3f) << 16 | /* cycles */
+ (cfg->addr_pads - 1) << 22); /* pads */
+
+ /* Dummy configuration (no. of pads taken from addr cfg) */
+ seq->dummy = ((cfg->dummy_cycles & 0x3f) << 16 | /* cycles */
+ (cfg->addr_pads - 1) << 22); /* pads */
+
+
+ /* Instruction sequence */
+ i = 0;
+ if (cfg->write)
+ seq->seq[i++] = STFSM_INST_CMD2;
+
+ seq->seq[i++] = STFSM_INST_CMD1;
+
+ seq->seq[i++] = STFSM_INST_ADD1;
+ seq->seq[i++] = STFSM_INST_ADD2;
+
+ if (cfg->mode_cycles)
+ seq->seq[i++] = STFSM_INST_MODE;
+
+ if (cfg->dummy_cycles)
+ seq->seq[i++] = STFSM_INST_DUMMY;
+
+ seq->seq[i++] =
+ cfg->write ? STFSM_INST_DATA_WRITE : STFSM_INST_DATA_READ;
+ seq->seq[i++] = STFSM_INST_STOP;
+}
+
+static int stfsm_search_prepare_rw_seq(struct stfsm *fsm,
+ struct stfsm_seq *seq,
+ struct seq_rw_config *cfgs)
+{
+ struct seq_rw_config *config;
+
+ config = stfsm_search_seq_rw_configs(fsm, cfgs);
+ if (!config) {
+ dev_err(fsm->dev, "failed to find suitable config\n");
+ return -EINVAL;
+ }
+
+ stfsm_prepare_rw_seq(fsm, seq, config);
+
+ return 0;
+}
+
+/* Prepare a READ/WRITE/ERASE 'default' sequences */
+static int stfsm_prepare_rwe_seqs_default(struct stfsm *fsm)
+{
+ uint32_t flags = fsm->info->flags;
+ int ret;
+
+ /* Configure 'READ' sequence */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ default_read_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "failed to prep READ sequence with flags [0x%08x]\n",
+ flags);
+ return ret;
+ }
+
+ /* Configure 'WRITE' sequence */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
+ default_write_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "failed to prep WRITE sequence with flags [0x%08x]\n",
+ flags);
+ return ret;
+ }
+
+ /* Configure 'ERASE_SECTOR' sequence */
+ stfsm_prepare_erasesec_seq(fsm, &stfsm_seq_erase_sector);
+
+ return 0;
+}
+
+static int stfsm_mx25_config(struct stfsm *fsm)
+{
+ uint32_t flags = fsm->info->flags;
+ uint32_t data_pads;
+ uint8_t sta;
+ int ret;
+ bool soc_reset;
+
+ /*
+ * Use default READ/WRITE sequences
+ */
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure 32-bit Address Support
+ */
+ if (flags & FLASH_FLAG_32BIT_ADDR) {
+ /* Configure 'enter_32bitaddr' FSM sequence */
+ stfsm_mx25_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr);
+
+ soc_reset = stfsm_can_handle_soc_reset(fsm);
+ if (soc_reset || !fsm->booted_from_spi) {
+ /* If we can handle SoC resets, we enable 32-bit address
+ * mode pervasively */
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ } else {
+ /* Else, enable/disable 32-bit addressing before/after
+ * each operation */
+ fsm->configuration = (CFG_READ_TOGGLE_32BIT_ADDR |
+ CFG_WRITE_TOGGLE_32BIT_ADDR |
+ CFG_ERASESEC_TOGGLE_32BIT_ADDR);
+ /* It seems a small delay is required after exiting
+ * 32-bit mode following a write operation. The issue
+ * is under investigation.
+ */
+ fsm->configuration |= CFG_WRITE_EX_32BIT_ADDR_DELAY;
+ }
+ }
+
+ /* For QUAD mode, set 'QE' STATUS bit */
+ data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
+ if (data_pads == 4) {
+ stfsm_read_status(fsm, FLASH_CMD_RDSR, &sta);
+ sta |= MX25_STATUS_QE;
+ stfsm_write_status(fsm, sta, 1);
+ }
+
+ return 0;
+}
+
+static int stfsm_n25q_config(struct stfsm *fsm)
+{
+ uint32_t flags = fsm->info->flags;
+ uint8_t vcr;
+ int ret = 0;
+ bool soc_reset;
+
+ /* Configure 'READ' sequence */
+ if (flags & FLASH_FLAG_32BIT_ADDR)
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ n25q_read4_configs);
+ else
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ n25q_read3_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "failed to prepare READ sequence with flags [0x%08x]\n",
+ flags);
+ return ret;
+ }
+
+ /* Configure 'WRITE' sequence (default configs) */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
+ default_write_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "preparing WRITE sequence using flags [0x%08x] failed\n",
+ flags);
+ return ret;
+ }
+
+ /* * Configure 'ERASE_SECTOR' sequence */
+ stfsm_prepare_erasesec_seq(fsm, &stfsm_seq_erase_sector);
+
+ /* Configure 32-bit address support */
+ if (flags & FLASH_FLAG_32BIT_ADDR) {
+ stfsm_n25q_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr);
+
+ soc_reset = stfsm_can_handle_soc_reset(fsm);
+ if (soc_reset || !fsm->booted_from_spi) {
+ /*
+ * If we can handle SoC resets, we enable 32-bit
+ * address mode pervasively
+ */
+ stfsm_enter_32bit_addr(fsm, 1);
+ } else {
+ /*
+ * If not, enable/disable for WRITE and ERASE
+ * operations (READ uses special commands)
+ */
+ fsm->configuration = (CFG_WRITE_TOGGLE_32BIT_ADDR |
+ CFG_ERASESEC_TOGGLE_32BIT_ADDR);
+ }
+ }
+
+ /*
+ * Configure device to use 8 dummy cycles
+ */
+ vcr = (N25Q_VCR_DUMMY_CYCLES(8) | N25Q_VCR_XIP_DISABLED |
+ N25Q_VCR_WRAP_CONT);
+ stfsm_wrvcr(fsm, vcr);
+
+ return 0;
+}
+
+static void stfsm_s25fl_prepare_erasesec_seq_32(struct stfsm_seq *seq)
+{
+ seq->seq_opc[1] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_SE4));
+
+ seq->addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2 |
+ ADR_CFG_CSDEASSERT_ADD2);
+}
+
+static void stfsm_s25fl_read_dyb(struct stfsm *fsm, uint32_t offs, uint8_t *dby)
+{
+ uint32_t tmp;
+ struct stfsm_seq seq = {
+ .data_size = TRANSFER_SIZE(4),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_DYBRD)),
+ .addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2),
+ .addr1 = (offs >> 16) & 0xffff,
+ .addr2 = offs & 0xffff,
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_ADD1,
+ STFSM_INST_ADD2,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+ };
+
+ stfsm_load_seq(fsm, &seq);
+
+ stfsm_read_fifo(fsm, &tmp, 4);
+
+ *dby = (uint8_t)(tmp >> 24);
+
+ stfsm_wait_seq(fsm);
+}
+
+static void stfsm_s25fl_write_dyb(struct stfsm *fsm, uint32_t offs, uint8_t dby)
+{
+ struct stfsm_seq seq = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_WREN) |
+ SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_DYBWR)),
+ .addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2),
+ .status = (uint32_t)dby | STA_PADS_1 | STA_CSDEASSERT,
+ .addr1 = (offs >> 16) & 0xffff,
+ .addr2 = offs & 0xffff,
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_ADD1,
+ STFSM_INST_ADD2,
+ STFSM_INST_STA_WR1,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+ };
+
+ stfsm_load_seq(fsm, &seq);
+ stfsm_wait_seq(fsm);
+
+ stfsm_wait_busy(fsm);
+}
+
+static int stfsm_s25fl_clear_status_reg(struct stfsm *fsm)
+{
+ struct stfsm_seq seq = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_CLSR) |
+ SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(FLASH_CMD_WRDI) |
+ SEQ_OPC_CSDEASSERT),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_WAIT,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+ };
+
+ stfsm_load_seq(fsm, &seq);
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+static int stfsm_s25fl_config(struct stfsm *fsm)
+{
+ struct flash_info *info = fsm->info;
+ uint32_t flags = info->flags;
+ uint32_t data_pads;
+ uint32_t offs;
+ uint16_t sta_wr;
+ uint8_t sr1, cr1, dyb;
+ int ret;
+
+ if (flags & FLASH_FLAG_32BIT_ADDR) {
+ /*
+ * Prepare Read/Write/Erase sequences according to S25FLxxx
+ * 32-bit address command set
+ */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ stfsm_s25fl_read4_configs);
+ if (ret)
+ return ret;
+
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
+ stfsm_s25fl_write4_configs);
+ if (ret)
+ return ret;
+
+ stfsm_s25fl_prepare_erasesec_seq_32(&stfsm_seq_erase_sector);
+
+ } else {
+ /* Use default configurations for 24-bit addressing */
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * For devices that support 'DYB' sector locking, check lock status and
+ * unlock sectors if necessary (some variants power-on with sectors
+ * locked by default)
+ */
+ if (flags & FLASH_FLAG_DYB_LOCKING) {
+ offs = 0;
+ for (offs = 0; offs < info->sector_size * info->n_sectors;) {
+ stfsm_s25fl_read_dyb(fsm, offs, &dyb);
+ if (dyb == 0x00)
+ stfsm_s25fl_write_dyb(fsm, offs, 0xff);
+
+ /* Handle bottom/top 4KiB parameter sectors */
+ if ((offs < info->sector_size * 2) ||
+ (offs >= (info->sector_size - info->n_sectors * 4)))
+ offs += 0x1000;
+ else
+ offs += 0x10000;
+ }
+ }
+
+ /* Check status of 'QE' bit */
+ data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
+ stfsm_read_status(fsm, FLASH_CMD_RDSR2, &cr1);
+ if (data_pads == 4) {
+ if (!(cr1 & STFSM_S25FL_CONFIG_QE)) {
+ /* Set 'QE' */
+ cr1 |= STFSM_S25FL_CONFIG_QE;
+
+ stfsm_read_status(fsm, FLASH_CMD_RDSR, &sr1);
+ sta_wr = ((uint16_t)cr1 << 8) | sr1;
+
+ stfsm_write_status(fsm, sta_wr, 2);
+
+ stfsm_wait_busy(fsm);
+ }
+ } else {
+ if ((cr1 & STFSM_S25FL_CONFIG_QE)) {
+ /* Clear 'QE' */
+ cr1 &= ~STFSM_S25FL_CONFIG_QE;
+
+ stfsm_read_status(fsm, FLASH_CMD_RDSR, &sr1);
+ sta_wr = ((uint16_t)cr1 << 8) | sr1;
+
+ stfsm_write_status(fsm, sta_wr, 2);
+
+ stfsm_wait_busy(fsm);
+ }
+
+ }
+
+ /*
+ * S25FLxxx devices support Program and Error error flags.
+ * Configure driver to check flags and clear if necessary.
+ */
+ fsm->configuration |= CFG_S25FL_CHECK_ERROR_FLAGS;
+
+ return 0;
+}
+
+static int stfsm_w25q_config(struct stfsm *fsm)
+{
+ uint32_t data_pads;
+ uint16_t sta_wr;
+ uint8_t sta1, sta2;
+ int ret;
+
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+
+ /* If using QUAD mode, set QE STATUS bit */
+ data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
+ if (data_pads == 4) {
+ stfsm_read_status(fsm, FLASH_CMD_RDSR, &sta1);
+ stfsm_read_status(fsm, FLASH_CMD_RDSR2, &sta2);
+
+ sta_wr = ((uint16_t)sta2 << 8) | sta1;
+
+ sta_wr |= W25Q_STATUS_QE;
+
+ stfsm_write_status(fsm, sta_wr, 2);
+
+ stfsm_wait_busy(fsm);
+ }
+
+ return 0;
+}
+
+static int stfsm_read(struct stfsm *fsm, uint8_t *buf, uint32_t size,
+ uint32_t offset)
+{
+ struct stfsm_seq *seq = &fsm->stfsm_seq_read;
+ uint32_t data_pads;
+ uint32_t read_mask;
+ uint32_t size_ub;
+ uint32_t size_lb;
+ uint32_t size_mop;
+ uint32_t tmp[4];
+ uint32_t page_buf[FLASH_PAGESIZE_32];
+ uint8_t *p;
+
+ dev_dbg(fsm->dev, "reading %d bytes from 0x%08x\n", size, offset);
+
+ /* Enter 32-bit address mode, if required */
+ if (fsm->configuration & CFG_READ_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ /* Must read in multiples of 32 cycles (or 32*pads/8 Bytes) */
+ data_pads = ((seq->seq_cfg >> 16) & 0x3) + 1;
+ read_mask = (data_pads << 2) - 1;
+
+ /* Handle non-aligned buf */
+ p = ((uint32_t)buf & 0x3) ? (uint8_t *)page_buf : buf;
+
+ /* Handle non-aligned size */
+ size_ub = (size + read_mask) & ~read_mask;
+ size_lb = size & ~read_mask;
+ size_mop = size & read_mask;
+
+ seq->data_size = TRANSFER_SIZE(size_ub);
+ seq->addr1 = (offset >> 16) & 0xffff;
+ seq->addr2 = offset & 0xffff;
+
+ stfsm_load_seq(fsm, seq);
+
+ if (size_lb)
+ stfsm_read_fifo(fsm, (uint32_t *)p, size_lb);
+
+ if (size_mop) {
+ stfsm_read_fifo(fsm, tmp, read_mask + 1);
+ memcpy(p + size_lb, &tmp, size_mop);
+ }
+
+ /* Handle non-aligned buf */
+ if ((uint32_t)buf & 0x3)
+ memcpy(buf, page_buf, size);
+
+ /* Wait for sequence to finish */
+ stfsm_wait_seq(fsm);
+
+ stfsm_clear_fifo(fsm);
+
+ /* Exit 32-bit address mode, if required */
+ if (fsm->configuration & CFG_READ_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 0);
+
+ return 0;
+}
+
+static int stfsm_write(struct stfsm *fsm, const uint8_t *buf,
+ uint32_t size, uint32_t offset)
+{
+ struct stfsm_seq *seq = &fsm->stfsm_seq_write;
+ uint32_t data_pads;
+ uint32_t write_mask;
+ uint32_t size_ub;
+ uint32_t size_lb;
+ uint32_t size_mop;
+ uint32_t tmp[4];
+ uint32_t page_buf[FLASH_PAGESIZE_32];
+ uint8_t *t = (uint8_t *)&tmp;
+ const uint8_t *p;
+ int ret;
+ int i;
+
+ dev_dbg(fsm->dev, "writing %d bytes to 0x%08x\n", size, offset);
+
+ /* Enter 32-bit address mode, if required */
+ if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ /* Must write in multiples of 32 cycles (or 32*pads/8 bytes) */
+ data_pads = ((seq->seq_cfg >> 16) & 0x3) + 1;
+ write_mask = (data_pads << 2) - 1;
+
+ /* Handle non-aligned buf */
+ if ((uint32_t)buf & 0x3) {
+ memcpy(page_buf, buf, size);
+ p = (uint8_t *)page_buf;
+ } else {
+ p = buf;
+ }
+
+ /* Handle non-aligned size */
+ size_ub = (size + write_mask) & ~write_mask;
+ size_lb = size & ~write_mask;
+ size_mop = size & write_mask;
+
+ seq->data_size = TRANSFER_SIZE(size_ub);
+ seq->addr1 = (offset >> 16) & 0xffff;
+ seq->addr2 = offset & 0xffff;
+
+ /* Need to set FIFO to write mode, before writing data to FIFO (see
+ * GNBvb79594)
+ */
+ writel(0x00040000, fsm->base + SPI_FAST_SEQ_CFG);
+
+ /*
+ * Before writing data to the FIFO, apply a small delay to allow a
+ * potential change of FIFO direction to complete.
+ */
+ if (fsm->fifo_dir_delay == 0)
+ readl(fsm->base + SPI_FAST_SEQ_CFG);
+ else
+ udelay(fsm->fifo_dir_delay);
+
+
+ /* Write data to FIFO, before starting sequence (see GNBvd79593) */
+ if (size_lb) {
+ stfsm_write_fifo(fsm, (uint32_t *)p, size_lb);
+ p += size_lb;
+ }
+
+ /* Handle non-aligned size */
+ if (size_mop) {
+ memset(t, 0xff, write_mask + 1); /* fill with 0xff's */
+ for (i = 0; i < size_mop; i++)
+ t[i] = *p++;
+
+ stfsm_write_fifo(fsm, tmp, write_mask + 1);
+ }
+
+ /* Start sequence */
+ stfsm_load_seq(fsm, seq);
+
+ /* Wait for sequence to finish */
+ stfsm_wait_seq(fsm);
+
+ /* Wait for completion */
+ ret = stfsm_wait_busy(fsm);
+ if (ret && fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS)
+ stfsm_s25fl_clear_status_reg(fsm);
+
+ /* Exit 32-bit address mode, if required */
+ if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR) {
+ stfsm_enter_32bit_addr(fsm, 0);
+ if (fsm->configuration & CFG_WRITE_EX_32BIT_ADDR_DELAY)
+ udelay(1);
+ }
+
+ return 0;
+}
+
+/*
+ * Read an address range from the flash chip. The address range
+ * may be any size provided it is within the physical boundaries.
+ */
+static int stfsm_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
+ uint32_t bytes;
+
+ dev_dbg(fsm->dev, "%s from 0x%08x, len %zd\n",
+ __func__, (u32)from, len);
+
+ mutex_lock(&fsm->lock);
+
+ while (len > 0) {
+ bytes = min_t(size_t, len, FLASH_PAGESIZE);
+
+ stfsm_read(fsm, buf, bytes, from);
+
+ buf += bytes;
+ from += bytes;
+ len -= bytes;
+
+ *retlen += bytes;
+ }
+
+ mutex_unlock(&fsm->lock);
+
+ return 0;
+}
+
+static int stfsm_erase_sector(struct stfsm *fsm, uint32_t offset)
+{
+ struct stfsm_seq *seq = &stfsm_seq_erase_sector;
+ int ret;
+
+ dev_dbg(fsm->dev, "erasing sector at 0x%08x\n", offset);
+
+ /* Enter 32-bit address mode, if required */
+ if (fsm->configuration & CFG_ERASESEC_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ seq->addr1 = (offset >> 16) & 0xffff;
+ seq->addr2 = offset & 0xffff;
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ /* Wait for completion */
+ ret = stfsm_wait_busy(fsm);
+ if (ret && fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS)
+ stfsm_s25fl_clear_status_reg(fsm);
+
+ /* Exit 32-bit address mode, if required */
+ if (fsm->configuration & CFG_ERASESEC_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 0);
+
+ return ret;
+}
+
+static int stfsm_erase_chip(struct stfsm *fsm)
+{
+ const struct stfsm_seq *seq = &stfsm_seq_erase_chip;
+
+ dev_dbg(fsm->dev, "erasing chip\n");
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ return stfsm_wait_busy(fsm);
+}
+
+/*
+ * Write an address range to the flash chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int stfsm_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
+
+ u32 page_offs;
+ u32 bytes;
+ uint8_t *b = (uint8_t *)buf;
+ int ret = 0;
+
+ dev_dbg(fsm->dev, "%s to 0x%08x, len %zd\n", __func__, (u32)to, len);
+
+ /* Offset within page */
+ page_offs = to % FLASH_PAGESIZE;
+
+ mutex_lock(&fsm->lock);
+
+ while (len) {
+ /* Write up to page boundary */
+ bytes = min(FLASH_PAGESIZE - page_offs, len);
+
+ ret = stfsm_write(fsm, b, bytes, to);
+ if (ret)
+ goto out1;
+
+ b += bytes;
+ len -= bytes;
+ to += bytes;
+
+ /* We are now page-aligned */
+ page_offs = 0;
+
+ *retlen += bytes;
+
+ }
+
+out1:
+ mutex_unlock(&fsm->lock);
+
+ return ret;
+}
+
+/*
+ * Erase an address range on the flash chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int stfsm_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
+ u32 addr, len;
+ int ret;
+
+ dev_dbg(fsm->dev, "%s at 0x%llx, len %lld\n", __func__,
+ (long long)instr->addr, (long long)instr->len);
+
+ addr = instr->addr;
+ len = instr->len;
+
+ mutex_lock(&fsm->lock);
+
+ /* Whole-chip erase? */
+ if (len == mtd->size) {
+ ret = stfsm_erase_chip(fsm);
+ if (ret)
+ goto out1;
+ } else {
+ while (len) {
+ ret = stfsm_erase_sector(fsm, addr);
+ if (ret)
+ goto out1;
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+ }
+
+ mutex_unlock(&fsm->lock);
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+
+out1:
+ instr->state = MTD_ERASE_FAILED;
+ mutex_unlock(&fsm->lock);
+
+ return ret;
+}
+
+static void stfsm_read_jedec(struct stfsm *fsm, uint8_t *jedec)
+{
+ const struct stfsm_seq *seq = &stfsm_seq_read_jedec;
+ uint32_t tmp[2];
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_read_fifo(fsm, tmp, 8);
+
+ memcpy(jedec, tmp, 5);
+
+ stfsm_wait_seq(fsm);
+}
+
+static struct flash_info *stfsm_jedec_probe(struct stfsm *fsm)
+{
+ struct flash_info *info;
+ u16 ext_jedec;
+ u32 jedec;
+ u8 id[5];
+
+ stfsm_read_jedec(fsm, id);
+
+ jedec = id[0] << 16 | id[1] << 8 | id[2];
+ /*
+ * JEDEC also defines an optional "extended device information"
+ * string for after vendor-specific data, after the three bytes
+ * we use here. Supporting some chips might require using it.
+ */
+ ext_jedec = id[3] << 8 | id[4];
+
+ dev_dbg(fsm->dev, "JEDEC = 0x%08x [%02x %02x %02x %02x %02x]\n",
+ jedec, id[0], id[1], id[2], id[3], id[4]);
+
+ for (info = flash_types; info->name; info++) {
+ if (info->jedec_id == jedec) {
+ if (info->ext_id && info->ext_id != ext_jedec)
+ continue;
+ return info;
+ }
+ }
+ dev_err(fsm->dev, "Unrecognized JEDEC id %06x\n", jedec);
+
+ return NULL;
+}
+
+static int stfsm_set_mode(struct stfsm *fsm, uint32_t mode)
+{
+ int ret, timeout = 10;
+
+ /* Wait for controller to accept mode change */
+ while (--timeout) {
+ ret = readl(fsm->base + SPI_STA_MODE_CHANGE);
+ if (ret & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (!timeout)
+ return -EBUSY;
+
+ writel(mode, fsm->base + SPI_MODESELECT);
+
+ return 0;
+}
+
+static void stfsm_set_freq(struct stfsm *fsm, uint32_t spi_freq)
+{
+ uint32_t emi_freq;
+ uint32_t clk_div;
+
+ /* TODO: Make this dynamic */
+ emi_freq = STFSM_DEFAULT_EMI_FREQ;
+
+ /*
+ * Calculate clk_div - values between 2 and 128
+ * Multiple of 2, rounded up
+ */
+ clk_div = 2 * DIV_ROUND_UP(emi_freq, 2 * spi_freq);
+ if (clk_div < 2)
+ clk_div = 2;
+ else if (clk_div > 128)
+ clk_div = 128;
+
+ /*
+ * Determine a suitable delay for the IP to complete a change of
+ * direction of the FIFO. The required delay is related to the clock
+ * divider used. The following heuristics are based on empirical tests,
+ * using a 100MHz EMI clock.
+ */
+ if (clk_div <= 4)
+ fsm->fifo_dir_delay = 0;
+ else if (clk_div <= 10)
+ fsm->fifo_dir_delay = 1;
+ else
+ fsm->fifo_dir_delay = DIV_ROUND_UP(clk_div, 10);
+
+ dev_dbg(fsm->dev, "emi_clk = %uHZ, spi_freq = %uHZ, clk_div = %u\n",
+ emi_freq, spi_freq, clk_div);
+
+ writel(clk_div, fsm->base + SPI_CLOCKDIV);
+}
+
+static int stfsm_init(struct stfsm *fsm)
+{
+ int ret;
+
+ /* Perform a soft reset of the FSM controller */
+ writel(SEQ_CFG_SWRESET, fsm->base + SPI_FAST_SEQ_CFG);
+ udelay(1);
+ writel(0, fsm->base + SPI_FAST_SEQ_CFG);
+
+ /* Set clock to 'safe' frequency initially */
+ stfsm_set_freq(fsm, STFSM_FLASH_SAFE_FREQ);
+
+ /* Switch to FSM */
+ ret = stfsm_set_mode(fsm, SPI_MODESELECT_FSM);
+ if (ret)
+ return ret;
+
+ /* Set timing parameters */
+ writel(SPI_CFG_DEVICE_ST |
+ SPI_CFG_DEFAULT_MIN_CS_HIGH |
+ SPI_CFG_DEFAULT_CS_SETUPHOLD |
+ SPI_CFG_DEFAULT_DATA_HOLD,
+ fsm->base + SPI_CONFIGDATA);
+ writel(STFSM_DEFAULT_WR_TIME, fsm->base + SPI_STATUS_WR_TIME_REG);
+
+ /* Clear FIFO, just in case */
+ stfsm_clear_fifo(fsm);
+
+ return 0;
+}
+
+static void stfsm_fetch_platform_configs(struct platform_device *pdev)
+{
+ struct stfsm *fsm = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct regmap *regmap;
+ uint32_t boot_device_reg;
+ uint32_t boot_device_spi;
+ uint32_t boot_device; /* Value we read from *boot_device_reg */
+ int ret;
+
+ /* Booting from SPI NOR Flash is the default */
+ fsm->booted_from_spi = true;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(regmap))
+ goto boot_device_fail;
+
+ fsm->reset_signal = of_property_read_bool(np, "st,reset-signal");
+
+ fsm->reset_por = of_property_read_bool(np, "st,reset-por");
+
+ /* Where in the syscon the boot device information lives */
+ ret = of_property_read_u32(np, "st,boot-device-reg", &boot_device_reg);
+ if (ret)
+ goto boot_device_fail;
+
+ /* Boot device value when booted from SPI NOR */
+ ret = of_property_read_u32(np, "st,boot-device-spi", &boot_device_spi);
+ if (ret)
+ goto boot_device_fail;
+
+ ret = regmap_read(regmap, boot_device_reg, &boot_device);
+ if (ret)
+ goto boot_device_fail;
+
+ if (boot_device != boot_device_spi)
+ fsm->booted_from_spi = false;
+
+ return;
+
+boot_device_fail:
+ dev_warn(&pdev->dev,
+ "failed to fetch boot device, assuming boot from SPI\n");
+}
+
+static int stfsm_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+ struct flash_info *info;
+ struct resource *res;
+ struct stfsm *fsm;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "No DT found\n");
+ return -EINVAL;
+ }
+ ppdata.of_node = np;
+
+ fsm = devm_kzalloc(&pdev->dev, sizeof(*fsm), GFP_KERNEL);
+ if (!fsm)
+ return -ENOMEM;
+
+ fsm->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, fsm);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Resource not found\n");
+ return -ENODEV;
+ }
+
+ fsm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsm->base)) {
+ dev_err(&pdev->dev,
+ "Failed to reserve memory region %pR\n", res);
+ return PTR_ERR(fsm->base);
+ }
+
+ mutex_init(&fsm->lock);
+
+ ret = stfsm_init(fsm);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialise FSM Controller\n");
+ return ret;
+ }
+
+ stfsm_fetch_platform_configs(pdev);
+
+ /* Detect SPI FLASH device */
+ info = stfsm_jedec_probe(fsm);
+ if (!info)
+ return -ENODEV;
+ fsm->info = info;
+
+ /* Use device size to determine address width */
+ if (info->sector_size * info->n_sectors > 0x1000000)
+ info->flags |= FLASH_FLAG_32BIT_ADDR;
+
+ /*
+ * Configure READ/WRITE/ERASE sequences according to platform and
+ * device flags.
+ */
+ if (info->config) {
+ ret = info->config(fsm);
+ if (ret)
+ return ret;
+ } else {
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+ }
+
+ fsm->mtd.name = info->name;
+ fsm->mtd.dev.parent = &pdev->dev;
+ fsm->mtd.type = MTD_NORFLASH;
+ fsm->mtd.writesize = 4;
+ fsm->mtd.writebufsize = fsm->mtd.writesize;
+ fsm->mtd.flags = MTD_CAP_NORFLASH;
+ fsm->mtd.size = info->sector_size * info->n_sectors;
+ fsm->mtd.erasesize = info->sector_size;
+
+ fsm->mtd._read = stfsm_mtd_read;
+ fsm->mtd._write = stfsm_mtd_write;
+ fsm->mtd._erase = stfsm_mtd_erase;
+
+ dev_info(&pdev->dev,
+ "Found serial flash device: %s\n"
+ " size = %llx (%lldMiB) erasesize = 0x%08x (%uKiB)\n",
+ info->name,
+ (long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20),
+ fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10));
+
+ return mtd_device_parse_register(&fsm->mtd, NULL, &ppdata, NULL, 0);
+}
+
+static int stfsm_remove(struct platform_device *pdev)
+{
+ struct stfsm *fsm = platform_get_drvdata(pdev);
+
+ return mtd_device_unregister(&fsm->mtd);
+}
+
+static struct of_device_id stfsm_match[] = {
+ { .compatible = "st,spi-fsm", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stfsm_match);
+
+static struct platform_driver stfsm_driver = {
+ .probe = stfsm_probe,
+ .remove = stfsm_remove,
+ .driver = {
+ .name = "st-spi-fsm",
+ .owner = THIS_MODULE,
+ .of_match_table = stfsm_match,
+ },
+};
+module_platform_driver(stfsm_driver);
+
+MODULE_AUTHOR("Angus Clark <angus.clark@st.com>");
+MODULE_DESCRIPTION("ST SPI FSM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 4adc0374fb6b..487e64f411a5 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -30,7 +30,6 @@
#include <asm/uaccess.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nftl.h>
#include <linux/mtd/inftl.h>
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index d38b6460d505..018c75faadb3 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -55,10 +55,8 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
int i, j;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
- if (!mtd) {
- printk(KERN_ERR "Failed to allocate memory for MTD device\n");
+ if (!mtd)
return NULL;
- }
mtd->priv = map;
mtd->type = MTD_NORFLASH;
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index 45abed67f1ef..69f2112340b1 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -135,11 +135,8 @@ static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
{
lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
- if (!lpddr->qinfo) {
- printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n",
- map->name);
+ if (!lpddr->qinfo)
return 0;
- }
/* Get the ManuID */
lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 310dc7c93425..fce23fe043f7 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -66,11 +66,11 @@ config MTD_PHYSMAP_BANKWIDTH
used internally by the CFI drivers.
config MTD_PHYSMAP_OF
- tristate "Flash device in physical memory map based on OF description"
- depends on OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
+ tristate "Memory device in physical memory map based on OF description"
+ depends on OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_RAM)
help
- This provides a 'mapping' driver which allows the NOR Flash and
- ROM driver code to communicate with chips which are mapped
+ This provides a 'mapping' driver which allows the NOR Flash, ROM
+ and RAM driver code to communicate with chips which are mapped
physically into the CPU's memory. The mapping description here is
taken from OF device tree.
@@ -124,7 +124,7 @@ config MTD_NETSC520
config MTD_TS5500
tristate "JEDEC Flash device mapped on Technologic Systems TS-5500"
- depends on X86
+ depends on TS5500 || COMPILE_TEST
select MTD_JEDECPROBE
select MTD_CFI_AMDSTD
help
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 5434d8ded015..6ea51e549045 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -14,7 +14,6 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 1adba86474a5..a4c477b9fdd6 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -14,7 +14,6 @@
*/
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 46d195fca942..5ab71f0e1bcd 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -31,7 +31,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index d6b2451eab1d..6a589f1e2880 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -16,7 +16,6 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 93c507a6f862..7aa682cd4d7e 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 98bb5d5375d7..cadfbe051873 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -10,7 +10,6 @@
* kind, whether express or implied.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 36da518915b5..eb0242e0b2d9 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index d11109762ac5..217c25d7381b 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 10196f5a897d..d597e89f2692 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
@@ -138,7 +137,6 @@ static int platram_probe(struct platform_device *pdev)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
- dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
goto exit_error;
}
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 9aad854fe912..cb4d92eea9fe 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -13,7 +13,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 93525121d69d..146b6047ed2b 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 3051c4c36240..b7a22a612a46 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -47,7 +47,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 39cc4181f025..b6f1aac3510c 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 5073cbc796d8..0b2ccb68c0d0 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -30,7 +30,6 @@
#include <linux/blkpg.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
-#include <linux/init.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 2147e733533b..7d4e7b9da3a1 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -324,6 +324,15 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c
default:
ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
}
+
+ /*
+ * Return -ENOSPC only if no data could be written at all.
+ * Otherwise just return the number of bytes that actually
+ * have been written.
+ */
+ if ((ret == -ENOSPC) && (total_retlen))
+ break;
+
if (!ret) {
*ppos += retlen;
total_retlen += retlen;
@@ -889,25 +898,26 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
case OTPGETREGIONINFO:
{
struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
+ size_t retlen;
if (!buf)
return -ENOMEM;
switch (mfi->mode) {
case MTD_FILE_MODE_OTP_FACTORY:
- ret = mtd_get_fact_prot_info(mtd, buf, 4096);
+ ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
break;
case MTD_FILE_MODE_OTP_USER:
- ret = mtd_get_user_prot_info(mtd, buf, 4096);
+ ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
break;
default:
ret = -EINVAL;
break;
}
- if (ret >= 0) {
+ if (!ret) {
if (cmd == OTPGETREGIONCOUNT) {
- int nbr = ret / sizeof(struct otp_info);
+ int nbr = retlen / sizeof(struct otp_info);
ret = copy_to_user(argp, &nbr, sizeof(int));
} else
- ret = copy_to_user(argp, buf, ret);
+ ret = copy_to_user(argp, buf, retlen);
if (ret)
ret = -EFAULT;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 34c0b16aed5c..d201feeb3ca6 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -883,14 +883,14 @@ EXPORT_SYMBOL_GPL(mtd_read_oob);
* devices. The user data is one time programmable but the factory data is read
* only.
*/
-int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
- size_t len)
+int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf)
{
if (!mtd->_get_fact_prot_info)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_get_fact_prot_info(mtd, buf, len);
+ return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
@@ -906,14 +906,14 @@ int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
}
EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
-int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
- size_t len)
+int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf)
{
if (!mtd->_get_user_prot_info)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_get_user_prot_info(mtd, buf, len);
+ return mtd->_get_user_prot_info(mtd, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
@@ -932,12 +932,22 @@ EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, u_char *buf)
{
+ int ret;
+
*retlen = 0;
if (!mtd->_write_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
+ ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
+ if (ret)
+ return ret;
+
+ /*
+ * If no data could be written at all, we are out of memory and
+ * must return -ENOSPC.
+ */
+ return (*retlen) ? 0 : -ENOSPC;
}
EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 3c7d6d7623c1..1ca9aec141ff 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -150,11 +150,12 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
retlen, buf);
}
-static int part_get_user_prot_info(struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
+static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->_get_user_prot_info(part->master, buf, len);
+ return part->master->_get_user_prot_info(part->master, len, retlen,
+ buf);
}
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
@@ -165,11 +166,12 @@ static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
retlen, buf);
}
-static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
- size_t len)
+static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->_get_fact_prot_info(part->master, buf, len);
+ return part->master->_get_fact_prot_info(part->master, len, retlen,
+ buf);
}
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 90ff447bf043..f1cf503517fd 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -428,6 +428,7 @@ config MTD_NAND_FSL_IFC
tristate "NAND support for Freescale IFC controller"
depends on MTD_NAND && FSL_SOC
select FSL_IFC
+ select MEMORY
help
Various Freescale chips e.g P1010, include a NAND Flash machine
with built-in hardware ECC capabilities.
@@ -459,6 +460,8 @@ config MTD_NAND_MXC
config MTD_NAND_SH_FLCTL
tristate "Support for NAND on Renesas SuperH FLCTL"
depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on HAS_DMA
help
Several Renesas SuperH CPU has FLCTL. This option enables support
for NAND Flash using FLCTL.
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 8611eb4b45fc..4936e9e0002f 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -17,7 +17,6 @@
*/
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index c36e9b84487c..4ce181a35bcd 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -430,7 +430,7 @@ err_dma:
dma_unmap_single(dma_dev->dev, phys_addr, len, dir);
err_buf:
if (err != 0)
- dev_warn(host->dev, "Fall back to CPU I/O\n");
+ dev_dbg(host->dev, "Fall back to CPU I/O\n");
return err;
}
@@ -1220,6 +1220,7 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
goto err;
}
+ nand_chip->options |= NAND_NO_SUBPAGE_WRITE;
nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
@@ -1659,8 +1660,8 @@ static void nfc_select_chip(struct mtd_info *mtd, int chip)
nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_ENABLE);
}
-static int nfc_make_addr(struct mtd_info *mtd, int column, int page_addr,
- unsigned int *addr1234, unsigned int *cycle0)
+static int nfc_make_addr(struct mtd_info *mtd, int command, int column,
+ int page_addr, unsigned int *addr1234, unsigned int *cycle0)
{
struct nand_chip *chip = mtd->priv;
@@ -1674,7 +1675,8 @@ static int nfc_make_addr(struct mtd_info *mtd, int column, int page_addr,
*addr1234 = 0;
if (column != -1) {
- if (chip->options & NAND_BUSWIDTH_16)
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
column >>= 1;
addr_bytes[acycle++] = column & 0xff;
if (mtd->writesize > 512)
@@ -1787,8 +1789,8 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
}
if (do_addr)
- acycle = nfc_make_addr(mtd, column, page_addr, &addr1234,
- &cycle0);
+ acycle = nfc_make_addr(mtd, command, column, page_addr,
+ &addr1234, &cycle0);
nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr;
nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 2880d888cfc5..bc5c518828d2 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -11,7 +11,6 @@
#include <linux/slab.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
@@ -308,7 +307,8 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
- if (this->options & NAND_BUSWIDTH_16)
+ if (this->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
column >>= 1;
ctx->write_byte(mtd, column);
}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 94f55dbde995..b7a24946ca26 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -37,7 +37,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index f2f64addb5e8..4e66726da9aa 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -627,6 +627,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
struct cafe_priv *cafe;
uint32_t ctrl;
int err = 0;
+ int old_dma;
+ struct nand_buffers *nbuf;
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -655,13 +657,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
err = -ENOMEM;
goto out_free_mtd;
}
- cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112 + sizeof(struct nand_buffers),
- &cafe->dmaaddr, GFP_KERNEL);
- if (!cafe->dmabuf) {
- err = -ENOMEM;
- goto out_ior;
- }
- cafe->nand.buffers = (void *)cafe->dmabuf + 2112;
cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);
if (!cafe->rs) {
@@ -721,7 +716,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
"CAFE NAND", mtd);
if (err) {
dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
- goto out_free_dma;
+ goto out_ior;
}
/* Disable master reset, enable NAND clock */
@@ -735,6 +730,32 @@ static int cafe_nand_probe(struct pci_dev *pdev,
cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
+ /* Enable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
+ cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n",
+ cafe_readl(cafe, GLOBAL_CTRL),
+ cafe_readl(cafe, GLOBAL_IRQ_MASK));
+
+ /* Do not use the DMA for the nand_scan_ident() */
+ old_dma = usedma;
+ usedma = 0;
+
+ /* Scan to find existence of the device */
+ if (nand_scan_ident(mtd, 2, NULL)) {
+ err = -ENXIO;
+ goto out_irq;
+ }
+
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
+ 2112 + sizeof(struct nand_buffers) +
+ mtd->writesize + mtd->oobsize,
+ &cafe->dmaaddr, GFP_KERNEL);
+ if (!cafe->dmabuf) {
+ err = -ENOMEM;
+ goto out_irq;
+ }
+ cafe->nand.buffers = nbuf = (void *)cafe->dmabuf + 2112;
+
/* Set up DMA address */
cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
if (sizeof(cafe->dmaaddr) > 4)
@@ -746,16 +767,13 @@ static int cafe_nand_probe(struct pci_dev *pdev,
cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
- /* Enable NAND IRQ in global IRQ mask register */
- cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
- cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n",
- cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
+ /* this driver does not need the @ecccalc and @ecccode */
+ nbuf->ecccalc = NULL;
+ nbuf->ecccode = NULL;
+ nbuf->databuf = (uint8_t *)(nbuf + 1);
- /* Scan to find existence of the device */
- if (nand_scan_ident(mtd, 2, NULL)) {
- err = -ENXIO;
- goto out_irq;
- }
+ /* Restore the DMA flag */
+ usedma = old_dma;
cafe->ctl2 = 1<<27; /* Reed-Solomon ECC */
if (mtd->writesize == 2048)
@@ -773,7 +791,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
} else {
printk(KERN_WARNING "Unexpected NAND flash writesize %d. Aborting\n",
mtd->writesize);
- goto out_irq;
+ goto out_free_dma;
}
cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
cafe->nand.ecc.size = mtd->writesize;
@@ -790,7 +808,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
err = nand_scan_tail(mtd);
if (err)
- goto out_irq;
+ goto out_free_dma;
pci_set_drvdata(pdev, mtd);
@@ -799,12 +817,15 @@ static int cafe_nand_probe(struct pci_dev *pdev,
goto out;
+ out_free_dma:
+ dma_free_coherent(&cafe->pdev->dev,
+ 2112 + sizeof(struct nand_buffers) +
+ mtd->writesize + mtd->oobsize,
+ cafe->dmabuf, cafe->dmaaddr);
out_irq:
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
free_irq(pdev->irq, mtd);
- out_free_dma:
- dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
out_ior:
pci_iounmap(pdev, cafe->mmio);
out_free_mtd:
@@ -824,7 +845,10 @@ static void cafe_nand_remove(struct pci_dev *pdev)
nand_release(mtd);
free_rs(cafe->rs);
pci_iounmap(pdev, cafe->mmio);
- dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+ dma_free_coherent(&cafe->pdev->dev,
+ 2112 + sizeof(struct nand_buffers) +
+ mtd->writesize + mtd->oobsize,
+ cafe->dmabuf, cafe->dmaaddr);
kfree(mtd);
}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index a4989ec6292e..4615d79fc93f 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -24,7 +24,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
@@ -746,28 +745,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
goto err_clk_enable;
}
- /*
- * Setup Async configuration register in case we did not boot from
- * NAND and so bootloader did not bother to set it up.
- */
- val = davinci_nand_readl(info, A1CR_OFFSET + info->core_chipsel * 4);
-
- /* Extended Wait is not valid and Select Strobe mode is not used */
- val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK);
- if (info->chip.options & NAND_BUSWIDTH_16)
- val |= 0x1;
-
- davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
-
- ret = 0;
- if (info->timing)
- ret = davinci_aemif_setup_timing(info->timing, info->base,
- info->core_chipsel);
- if (ret < 0) {
- dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
- goto err;
- }
-
spin_lock_irq(&davinci_nand_lock);
/* put CSxNAND into NAND mode */
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index babb02c4b220..35cb17f57800 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -30,24 +30,6 @@ struct denali_dt {
struct clk *clk;
};
-static void __iomem *request_and_map(struct device *dev,
- const struct resource *res)
-{
- void __iomem *ptr;
-
- if (!devm_request_mem_region(dev, res->start, resource_size(res),
- "denali-dt")) {
- dev_err(dev, "unable to request %s\n", res->name);
- return NULL;
- }
-
- ptr = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!ptr)
- dev_err(dev, "ioremap_nocache of %s failed!", res->name);
-
- return ptr;
-}
-
static const struct of_device_id denali_nand_dt_ids[] = {
{ .compatible = "denali,denali-nand-dt" },
{ /* sentinel */ }
@@ -78,13 +60,6 @@ static int denali_dt_probe(struct platform_device *ofdev)
return -ENOMEM;
denali = &dt->denali;
- denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg");
- nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data");
- if (!denali_reg || !nand_data) {
- dev_err(&ofdev->dev, "resources not completely defined\n");
- return -EINVAL;
- }
-
denali->platform = DT;
denali->dev = &ofdev->dev;
denali->irq = platform_get_irq(ofdev, 0);
@@ -93,13 +68,15 @@ static int denali_dt_probe(struct platform_device *ofdev)
return denali->irq;
}
- denali->flash_reg = request_and_map(&ofdev->dev, denali_reg);
- if (!denali->flash_reg)
- return -ENOMEM;
+ denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg");
+ denali->flash_reg = devm_ioremap_resource(&ofdev->dev, denali_reg);
+ if (IS_ERR(denali->flash_reg))
+ return PTR_ERR(denali->flash_reg);
- denali->flash_mem = request_and_map(&ofdev->dev, nand_data);
- if (!denali->flash_mem)
- return -ENOMEM;
+ nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data");
+ denali->flash_mem = devm_ioremap_resource(&ofdev->dev, nand_data);
+ if (IS_ERR(denali->flash_mem))
+ return PTR_ERR(denali->flash_mem);
if (!of_property_read_u32(ofdev->dev.of_node,
"dma-mask", (u32 *)&denali_dma_mask)) {
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index fec31d71b84e..f68a7bccecdc 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -698,7 +698,8 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
- if (this->options & NAND_BUSWIDTH_16)
+ if (this->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
column >>= 1;
WriteDOC(column, docptr, Mplus_FlashAddress);
}
@@ -1438,7 +1439,7 @@ static int __init doc_probe(unsigned long physadr)
int reg, len, numchips;
int ret = 0;
- if (!request_mem_region(physadr, DOC_IOREMAP_LEN, NULL))
+ if (!request_mem_region(physadr, DOC_IOREMAP_LEN, "DiskOnChip"))
return -EBUSY;
virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
if (!virtadr) {
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index bcf60800c3ce..ec549cd9849f 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 90ca7e75d6f0..cb45d2f8e208 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/slab.h>
@@ -30,7 +29,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand_ecc.h>
-#include <asm/fsl_ifc.h>
+#include <linux/fsl_ifc.h>
#define FSL_IFC_V1_1_0 0x01010000
#define ERR_BYTE 0xFF /* Value returned for read
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 8e6148aa4539..117ce333fdd4 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index ca6369fe91ff..bb77f750e75a 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -27,6 +27,7 @@
#include <linux/of_device.h>
#include <linux/of_mtd.h>
#include "gpmi-nand.h"
+#include "bch-regs.h"
/* Resource names for the GPMI NAND driver. */
#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
@@ -985,7 +986,7 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
int ret;
dev_dbg(this->dev, "page number is : %d\n", page);
- ret = read_page_prepare(this, buf, mtd->writesize,
+ ret = read_page_prepare(this, buf, nfc_geo->payload_size,
this->payload_virt, this->payload_phys,
nfc_geo->payload_size,
&payload_virt, &payload_phys);
@@ -999,7 +1000,7 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
/* go! */
ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
- read_page_end(this, buf, mtd->writesize,
+ read_page_end(this, buf, nfc_geo->payload_size,
this->payload_virt, this->payload_phys,
nfc_geo->payload_size,
payload_virt, payload_phys);
@@ -1041,7 +1042,7 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
}
- read_page_swap_end(this, buf, mtd->writesize,
+ read_page_swap_end(this, buf, nfc_geo->payload_size,
this->payload_virt, this->payload_phys,
nfc_geo->payload_size,
payload_virt, payload_phys);
@@ -1049,6 +1050,90 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return max_bitflips;
}
+/* Fake a virtual small page for the subpage read */
+static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offs, uint32_t len, uint8_t *buf, int page)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ void __iomem *bch_regs = this->resources.bch_regs;
+ struct bch_geometry old_geo = this->bch_geometry;
+ struct bch_geometry *geo = &this->bch_geometry;
+ int size = chip->ecc.size; /* ECC chunk size */
+ int meta, n, page_size;
+ u32 r1_old, r2_old, r1_new, r2_new;
+ unsigned int max_bitflips;
+ int first, last, marker_pos;
+ int ecc_parity_size;
+ int col = 0;
+
+ /* The size of ECC parity */
+ ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
+
+ /* Align it with the chunk size */
+ first = offs / size;
+ last = (offs + len - 1) / size;
+
+ /*
+ * Find the chunk which contains the Block Marker. If this chunk is
+ * in the range of [first, last], we have to read out the whole page.
+ * Why? since we had swapped the data at the position of Block Marker
+ * to the metadata which is bound with the chunk 0.
+ */
+ marker_pos = geo->block_mark_byte_offset / size;
+ if (last >= marker_pos && first <= marker_pos) {
+ dev_dbg(this->dev, "page:%d, first:%d, last:%d, marker at:%d\n",
+ page, first, last, marker_pos);
+ return gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+ }
+
+ meta = geo->metadata_size;
+ if (first) {
+ col = meta + (size + ecc_parity_size) * first;
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1);
+
+ meta = 0;
+ buf = buf + first * size;
+ }
+
+ /* Save the old environment */
+ r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
+ r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+ /* change the BCH registers and bch_geometry{} */
+ n = last - first + 1;
+ page_size = meta + (size + ecc_parity_size) * n;
+
+ r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS |
+ BM_BCH_FLASH0LAYOUT0_META_SIZE);
+ r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1)
+ | BF_BCH_FLASH0LAYOUT0_META_SIZE(meta);
+ writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0);
+
+ r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE;
+ r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size);
+ writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+ geo->ecc_chunk_count = n;
+ geo->payload_size = n * size;
+ geo->page_size = page_size;
+ geo->auxiliary_status_offset = ALIGN(meta, 4);
+
+ dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
+ page, offs, len, col, first, n, page_size);
+
+ /* Read the subpage now */
+ this->swap_block_mark = false;
+ max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+
+ /* Restore */
+ writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
+ writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1);
+ this->bch_geometry = old_geo;
+ this->swap_block_mark = true;
+
+ return max_bitflips;
+}
+
static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
@@ -1566,6 +1651,17 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
ecc->layout = &gpmi_hw_ecclayout;
/*
+ * We only enable the subpage read when:
+ * (1) the chip is imx6, and
+ * (2) the size of the ECC parity is byte aligned.
+ */
+ if (GPMI_IS_MX6Q(this) &&
+ ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
+ ecc->read_subpage = gpmi_ecc_read_subpage;
+ chip->options |= NAND_SUBPAGE_READ;
+ }
+
+ /*
* Can we enable the extra features? such as EDO or Sync mode.
*
* We do not check the return value now. That's means if we fail in
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 31ee7cfbc12b..e78841a2dcc3 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -30,7 +30,6 @@
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index e9a4835c4dd9..dba262bf766f 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1501,6 +1501,8 @@ static int mxcnd_probe(struct platform_device *pdev)
init_completion(&host->op_completion);
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0)
+ return host->irq;
/*
* Use host->devtype_data->irq_control() here instead of irq_control()
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 9715a7ba164a..9d01c4df838c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -589,7 +589,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
- if (chip->options & NAND_BUSWIDTH_16)
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
column >>= 1;
chip->cmd_ctrl(mtd, column, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
@@ -680,7 +681,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
- if (chip->options & NAND_BUSWIDTH_16)
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
column >>= 1;
chip->cmd_ctrl(mtd, column, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
@@ -1160,9 +1162,11 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
* @data_offs: offset of requested data within the page
* @readlen: data length
* @bufpoi: buffer to store read data
+ * @page: page number to read
*/
static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
+ uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
+ int page)
{
int start_step, end_step, num_steps;
uint32_t *eccpos = chip->ecc.layout->eccpos;
@@ -1170,13 +1174,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
int data_col_addr, i, gaps = 0;
int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
- int index = 0;
+ int index;
unsigned int max_bitflips = 0;
/* Column address within the page aligned to ECC size (256bytes) */
start_step = data_offs / chip->ecc.size;
end_step = (data_offs + readlen - 1) / chip->ecc.size;
num_steps = end_step - start_step + 1;
+ index = start_step * chip->ecc.bytes;
/* Data size aligned to ECC ecc.size */
datafrag_len = num_steps * chip->ecc.size;
@@ -1213,8 +1218,6 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
* Send the command to read the particular ECC bytes take care
* about buswidth alignment in read_buf.
*/
- index = start_step * chip->ecc.bytes;
-
aligned_pos = eccpos[index] & ~(busw - 1);
aligned_len = eccfrag_len;
if (eccpos[index] & (busw - 1))
@@ -1538,7 +1541,8 @@ read_retry:
else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
!oob)
ret = chip->ecc.read_subpage(mtd, chip,
- col, bytes, bufpoi);
+ col, bytes, bufpoi,
+ page);
else
ret = chip->ecc.read_page(mtd, chip, bufpoi,
oob_required, page);
@@ -2000,7 +2004,7 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
oob += chip->ecc.prepad;
}
- chip->read_buf(mtd, oob, eccbytes);
+ chip->write_buf(mtd, oob, eccbytes);
oob += eccbytes;
if (chip->ecc.postpad) {
@@ -3063,7 +3067,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
int *busw)
{
struct nand_onfi_params *p = &chip->onfi_params;
- int i;
+ int i, j;
int val;
/* Try ONFI for unknown chip or LP */
@@ -3072,18 +3076,10 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
return 0;
- /*
- * ONFI must be probed in 8-bit mode or with NAND_BUSWIDTH_AUTO, not
- * with NAND_BUSWIDTH_16
- */
- if (chip->options & NAND_BUSWIDTH_16) {
- pr_err("ONFI cannot be probed in 16-bit mode; aborting\n");
- return 0;
- }
-
chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
for (i = 0; i < 3; i++) {
- chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
+ for (j = 0; j < sizeof(*p); j++)
+ ((uint8_t *)p)[j] = chip->read_byte(mtd);
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
le16_to_cpu(p->crc)) {
break;
@@ -3169,6 +3165,87 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
}
/*
+ * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
+ */
+static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip,
+ int *busw)
+{
+ struct nand_jedec_params *p = &chip->jedec_params;
+ struct jedec_ecc_info *ecc;
+ int val;
+ int i, j;
+
+ /* Try JEDEC for unknown chip or LP */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
+ if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
+ chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
+ chip->read_byte(mtd) != 'C')
+ return 0;
+
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < sizeof(*p); j++)
+ ((uint8_t *)p)[j] = chip->read_byte(mtd);
+
+ if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
+ le16_to_cpu(p->crc))
+ break;
+ }
+
+ if (i == 3) {
+ pr_err("Could not find valid JEDEC parameter page; aborting\n");
+ return 0;
+ }
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & (1 << 2))
+ chip->jedec_version = 10;
+ else if (val & (1 << 1))
+ chip->jedec_version = 1; /* vendor specific version */
+
+ if (!chip->jedec_version) {
+ pr_info("unsupported JEDEC version: %d\n", val);
+ return 0;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ if (!mtd->name)
+ mtd->name = p->model;
+
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize *= mtd->writesize;
+
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ chip->bits_per_cell = p->bits_per_cell;
+
+ if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
+ *busw = NAND_BUSWIDTH_16;
+ else
+ *busw = 0;
+
+ /* ECC info */
+ ecc = &p->ecc_info[0];
+
+ if (ecc->codeword_size >= 9) {
+ chip->ecc_strength_ds = ecc->ecc_bits;
+ chip->ecc_step_ds = 1 << ecc->codeword_size;
+ } else {
+ pr_warn("Invalid codeword size\n");
+ }
+
+ return 1;
+}
+
+/*
* nand_id_has_period - Check if an ID string has a given wraparound period
* @id_data: the ID string
* @arrlen: the length of the @id_data array
@@ -3474,10 +3551,10 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
*/
static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
struct nand_chip *chip,
- int busw,
int *maf_id, int *dev_id,
struct nand_flash_dev *type)
{
+ int busw;
int i, maf_idx;
u8 id_data[8];
@@ -3533,6 +3610,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/* Check is chip is ONFI compliant */
if (nand_flash_detect_onfi(mtd, chip, &busw))
goto ident_done;
+
+ /* Check if the chip is JEDEC compliant */
+ if (nand_flash_detect_jedec(mtd, chip, &busw))
+ goto ident_done;
}
if (!type->name)
@@ -3612,8 +3693,17 @@ ident_done:
pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
*maf_id, *dev_id);
- pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
- chip->onfi_version ? chip->onfi_params.model : type->name);
+
+ if (chip->onfi_version)
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ chip->onfi_params.model);
+ else if (chip->jedec_version)
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ chip->jedec_params.model);
+ else
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ type->name);
+
pr_info("%dMiB, %s, page size: %d, OOB size: %d\n",
(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
mtd->writesize, mtd->oobsize);
@@ -3634,18 +3724,16 @@ ident_done:
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
{
- int i, busw, nand_maf_id, nand_dev_id;
+ int i, nand_maf_id, nand_dev_id;
struct nand_chip *chip = mtd->priv;
struct nand_flash_dev *type;
- /* Get buswidth to select the correct functions */
- busw = chip->options & NAND_BUSWIDTH_16;
/* Set the default functions */
- nand_set_defaults(chip, busw);
+ nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
/* Read the flash type */
- type = nand_get_flash_type(mtd, chip, busw,
- &nand_maf_id, &nand_dev_id, table);
+ type = nand_get_flash_type(mtd, chip, &nand_maf_id,
+ &nand_dev_id, table);
if (IS_ERR(type)) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
@@ -3696,15 +3784,26 @@ int nand_scan_tail(struct mtd_info *mtd)
int i;
struct nand_chip *chip = mtd->priv;
struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct nand_buffers *nbuf;
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
!(chip->bbt_options & NAND_BBT_USE_FLASH));
- if (!(chip->options & NAND_OWN_BUFFERS))
- chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
- if (!chip->buffers)
- return -ENOMEM;
+ if (!(chip->options & NAND_OWN_BUFFERS)) {
+ nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
+ + mtd->oobsize * 3, GFP_KERNEL);
+ if (!nbuf)
+ return -ENOMEM;
+ nbuf->ecccalc = (uint8_t *)(nbuf + 1);
+ nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
+ nbuf->databuf = nbuf->ecccode + mtd->oobsize;
+
+ chip->buffers = nbuf;
+ } else {
+ if (!chip->buffers)
+ return -ENOMEM;
+ }
/* Set the internal oob buffer location, just after the page data */
chip->oob_poi = chip->buffers->databuf + mtd->writesize;
@@ -3825,7 +3924,7 @@ int nand_scan_tail(struct mtd_info *mtd)
case NAND_ECC_SOFT_BCH:
if (!mtd_nand_has_bch()) {
- pr_warn("CONFIG_MTD_ECC_BCH not enabled\n");
+ pr_warn("CONFIG_MTD_NAND_ECC_BCH not enabled\n");
BUG();
}
ecc->calculate = nand_bch_calculate_ecc;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index daa2faacd7d0..3d7c89fc1031 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -43,6 +43,9 @@ struct nand_flash_dev nand_flash_ids[] = {
{"TC58NVG6D2 64G 3.3V 8-bit",
{ .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"SDTNRGAMA 64G 3.3V 8-bit",
+ { .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x50} },
+ SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 9ee09a8177c6..e8a5fffd6ab2 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -10,7 +10,6 @@
*/
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -152,7 +151,8 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
if (column != -1 || page_addr != -1) {
if (column != -1) {
- if (chip->options & NAND_BUSWIDTH_16)
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
column >>= 1;
write_addr_reg(nand, column);
write_addr_reg(nand, column >> 8 | ENDADDR);
@@ -225,7 +225,7 @@ static void nuc900_nand_enable(struct nuc900_nand *nand)
val = __raw_readl(nand->reg + REG_FMICSR);
if (!(val & NAND_EN))
- __raw_writel(val | NAND_EN, REG_FMICSR);
+ __raw_writel(val | NAND_EN, nand->reg + REG_FMICSR);
val = __raw_readl(nand->reg + REG_SMCSR);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index bf642ceef681..1ff49b80bdaf 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -118,14 +118,9 @@
#define OMAP24XX_DMA_GPMC 4
-#define BCH8_MAX_ERROR 8 /* upto 8 bit correctable */
-#define BCH4_MAX_ERROR 4 /* upto 4 bit correctable */
-
#define SECTOR_BYTES 512
/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
#define BCH4_BIT_PAD 4
-#define BCH8_ECC_MAX ((SECTOR_BYTES + BCH8_ECC_OOB_BYTES) * 8)
-#define BCH4_ECC_MAX ((SECTOR_BYTES + BCH4_ECC_OOB_BYTES) * 8)
/* GPMC ecc engine settings for read */
#define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
@@ -159,7 +154,7 @@ struct omap_nand_info {
int gpmc_cs;
unsigned long phys_base;
- unsigned long mem_size;
+ enum omap_ecc ecc_opt;
struct completion comp;
struct dma_chan *dma;
int gpmc_irq_fifo;
@@ -172,7 +167,6 @@ struct omap_nand_info {
int buf_len;
struct gpmc_nand_regs reg;
/* fields specific for BCHx_HW ECC scheme */
- bool is_elm_used;
struct device *elm_dev;
struct device_node *of_node;
};
@@ -1043,9 +1037,8 @@ static int omap_dev_ready(struct mtd_info *mtd)
}
}
-#if defined(CONFIG_MTD_NAND_ECC_BCH) || defined(CONFIG_MTD_NAND_OMAP_BCH)
/**
- * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
+ * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
* @mtd: MTD device structure
* @mode: Read/Write mode
*
@@ -1056,50 +1049,73 @@ static int omap_dev_ready(struct mtd_info *mtd)
* eccsize0 = 0 (no additional protected byte in spare area)
* eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
*/
-static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
+static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
{
- int nerrors;
+ unsigned int bch_type;
unsigned int dev_width, nsectors;
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
+ enum omap_ecc ecc_opt = info->ecc_opt;
struct nand_chip *chip = mtd->priv;
u32 val, wr_mode;
unsigned int ecc_size1, ecc_size0;
- /* Using wrapping mode 6 for writing */
- wr_mode = BCH_WRAPMODE_6;
-
- /*
- * ECC engine enabled for valid ecc_size0 nibbles
- * and disabled for ecc_size1 nibbles.
- */
- ecc_size0 = BCH_ECC_SIZE0;
- ecc_size1 = BCH_ECC_SIZE1;
-
- /* Perform ecc calculation on 512-byte sector */
- nsectors = 1;
-
- /* Update number of error correction */
- nerrors = info->nand.ecc.strength;
-
- /* Multi sector reading/writing for NAND flash with page size < 4096 */
- if (info->is_elm_used && (mtd->writesize <= 4096)) {
+ /* GPMC configurations for calculating ECC */
+ switch (ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ bch_type = 0;
+ nsectors = 1;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_type = 0;
+ nsectors = chip->ecc.steps;
if (mode == NAND_ECC_READ) {
- /* Using wrapping mode 1 for reading */
- wr_mode = BCH_WRAPMODE_1;
-
- /*
- * ECC engine enabled for ecc_size0 nibbles
- * and disabled for ecc_size1 nibbles.
- */
- ecc_size0 = (nerrors == 8) ?
- BCH8R_ECC_SIZE0 : BCH4R_ECC_SIZE0;
- ecc_size1 = (nerrors == 8) ?
- BCH8R_ECC_SIZE1 : BCH4R_ECC_SIZE1;
+ wr_mode = BCH_WRAPMODE_1;
+ ecc_size0 = BCH4R_ECC_SIZE0;
+ ecc_size1 = BCH4R_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
}
-
- /* Perform ecc calculation for one page (< 4096) */
- nsectors = info->nand.ecc.steps;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ bch_type = 1;
+ nsectors = 1;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_type = 1;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_1;
+ ecc_size0 = BCH8R_ECC_SIZE0;
+ ecc_size1 = BCH8R_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ default:
+ return;
}
writel(ECC1, info->reg.gpmc_ecc_control);
@@ -1112,7 +1128,7 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
/* BCH configuration */
val = ((1 << 16) | /* enable BCH */
- (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
+ (bch_type << 12) | /* BCH4/BCH8/BCH16 */
(wr_mode << 8) | /* wrap mode */
(dev_width << 7) | /* bus width */
(((nsectors-1) & 0x7) << 4) | /* number of sectors */
@@ -1124,132 +1140,40 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
/* Clear ecc and enable bits */
writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
}
-#endif
-
-#ifdef CONFIG_MTD_NAND_ECC_BCH
-/**
- * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
- * @mtd: MTD device structure
- * @dat: The pointer to data on which ecc is computed
- * @ecc_code: The ecc_code buffer
- */
-static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code)
-{
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
- unsigned long nsectors, val1, val2;
- int i;
-
- nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
-
- for (i = 0; i < nsectors; i++) {
- /* Read hw-computed remainder */
- val1 = readl(info->reg.gpmc_bch_result0[i]);
- val2 = readl(info->reg.gpmc_bch_result1[i]);
-
- /*
- * Add constant polynomial to remainder, in order to get an ecc
- * sequence of 0xFFs for a buffer filled with 0xFFs; and
- * left-justify the resulting polynomial.
- */
- *ecc_code++ = 0x28 ^ ((val2 >> 12) & 0xFF);
- *ecc_code++ = 0x13 ^ ((val2 >> 4) & 0xFF);
- *ecc_code++ = 0xcc ^ (((val2 & 0xF) << 4)|((val1 >> 28) & 0xF));
- *ecc_code++ = 0x39 ^ ((val1 >> 20) & 0xFF);
- *ecc_code++ = 0x96 ^ ((val1 >> 12) & 0xFF);
- *ecc_code++ = 0xac ^ ((val1 >> 4) & 0xFF);
- *ecc_code++ = 0x7f ^ ((val1 & 0xF) << 4);
- }
-
- return 0;
-}
+static u8 bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
+static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
+ 0x97, 0x79, 0xe5, 0x24, 0xb5};
/**
- * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
- * @mtd: MTD device structure
- * @dat: The pointer to data on which ecc is computed
- * @ecc_code: The ecc_code buffer
- */
-static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code)
-{
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
- unsigned long nsectors, val1, val2, val3, val4;
- int i;
-
- nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
-
- for (i = 0; i < nsectors; i++) {
-
- /* Read hw-computed remainder */
- val1 = readl(info->reg.gpmc_bch_result0[i]);
- val2 = readl(info->reg.gpmc_bch_result1[i]);
- val3 = readl(info->reg.gpmc_bch_result2[i]);
- val4 = readl(info->reg.gpmc_bch_result3[i]);
-
- /*
- * Add constant polynomial to remainder, in order to get an ecc
- * sequence of 0xFFs for a buffer filled with 0xFFs.
- */
- *ecc_code++ = 0xef ^ (val4 & 0xFF);
- *ecc_code++ = 0x51 ^ ((val3 >> 24) & 0xFF);
- *ecc_code++ = 0x2e ^ ((val3 >> 16) & 0xFF);
- *ecc_code++ = 0x09 ^ ((val3 >> 8) & 0xFF);
- *ecc_code++ = 0xed ^ (val3 & 0xFF);
- *ecc_code++ = 0x93 ^ ((val2 >> 24) & 0xFF);
- *ecc_code++ = 0x9a ^ ((val2 >> 16) & 0xFF);
- *ecc_code++ = 0xc2 ^ ((val2 >> 8) & 0xFF);
- *ecc_code++ = 0x97 ^ (val2 & 0xFF);
- *ecc_code++ = 0x79 ^ ((val1 >> 24) & 0xFF);
- *ecc_code++ = 0xe5 ^ ((val1 >> 16) & 0xFF);
- *ecc_code++ = 0x24 ^ ((val1 >> 8) & 0xFF);
- *ecc_code++ = 0xb5 ^ (val1 & 0xFF);
- }
-
- return 0;
-}
-#endif /* CONFIG_MTD_NAND_ECC_BCH */
-
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
-/**
- * omap3_calculate_ecc_bch - Generate bytes of ECC bytes
+ * omap_calculate_ecc_bch - Generate bytes of ECC bytes
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
*
* Support calculating of BCH4/8 ecc vectors for the page
*/
-static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code)
+static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
{
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
+ int eccbytes = info->nand.ecc.bytes;
+ struct gpmc_nand_regs *gpmc_regs = &info->reg;
+ u8 *ecc_code;
unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
- int i, eccbchtsel;
+ int i;
nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
- /*
- * find BCH scheme used
- * 0 -> BCH4
- * 1 -> BCH8
- */
- eccbchtsel = ((readl(info->reg.gpmc_ecc_config) >> 12) & 0x3);
-
for (i = 0; i < nsectors; i++) {
-
- /* Read hw-computed remainder */
- bch_val1 = readl(info->reg.gpmc_bch_result0[i]);
- bch_val2 = readl(info->reg.gpmc_bch_result1[i]);
- if (eccbchtsel) {
- bch_val3 = readl(info->reg.gpmc_bch_result2[i]);
- bch_val4 = readl(info->reg.gpmc_bch_result3[i]);
- }
-
- if (eccbchtsel) {
- /* BCH8 ecc scheme */
+ ecc_code = ecc_calc;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+ bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
*ecc_code++ = (bch_val4 & 0xFF);
*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
@@ -1263,14 +1187,11 @@ static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat,
*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
*ecc_code++ = (bch_val1 & 0xFF);
- /*
- * Setting 14th byte to zero to handle
- * erased page & maintain compatibility
- * with RBL
- */
- *ecc_code++ = 0x0;
- } else {
- /* BCH4 ecc scheme */
+ break;
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
*ecc_code++ = ((bch_val2 & 0xF) << 4) |
@@ -1279,12 +1200,38 @@ static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat,
*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
*ecc_code++ = ((bch_val1 & 0xF) << 4);
- /*
- * Setting 8th byte to zero to handle
- * erased page
- */
- *ecc_code++ = 0x0;
+ break;
+ default:
+ return -EINVAL;
}
+
+ /* ECC scheme specific syndrome customizations */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back */
+ for (i = 0; i < eccbytes; i++)
+ ecc_calc[i] ^= bch4_polynomial[i];
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Set 8th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back */
+ for (i = 0; i < eccbytes; i++)
+ ecc_calc[i] ^= bch8_polynomial[i];
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* Set 14th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ecc_calc += eccbytes;
}
return 0;
@@ -1329,6 +1276,7 @@ static int erased_sector_bitflips(u_char *data, u_char *oob,
return flip_bits;
}
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
/**
* omap_elm_correct_data - corrects page data area in case error reported
* @mtd: MTD device structure
@@ -1337,55 +1285,46 @@ static int erased_sector_bitflips(u_char *data, u_char *oob,
* @calc_ecc: ecc read from HW ECC registers
*
* Calculated ecc vector reported as zero in case of non-error pages.
- * In case of error/erased pages non-zero error vector is reported.
- * In case of non-zero ecc vector, check read_ecc at fixed offset
- * (x = 13/7 in case of BCH8/4 == 0) to find page programmed or not.
- * To handle bit flips in this data, count the number of 0's in
- * read_ecc[x] and check if it greater than 4. If it is less, it is
- * programmed page, else erased page.
- *
- * 1. If page is erased, check with standard ecc vector (ecc vector
- * for erased page to find any bit flip). If check fails, bit flip
- * is present in erased page. Count the bit flips in erased page and
- * if it falls under correctable level, report page with 0xFF and
- * update the correctable bit information.
- * 2. If error is reported on programmed page, update elm error
- * vector and correct the page with ELM error correction routine.
- *
+ * In case of non-zero ecc vector, first filter out erased-pages, and
+ * then process data via ELM to detect bit-flips.
*/
static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
u_char *read_ecc, u_char *calc_ecc)
{
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
+ struct nand_ecc_ctrl *ecc = &info->nand.ecc;
int eccsteps = info->nand.ecc.steps;
int i , j, stat = 0;
- int eccsize, eccflag, ecc_vector_size;
+ int eccflag, actual_eccbytes;
struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
u_char *ecc_vec = calc_ecc;
u_char *spare_ecc = read_ecc;
u_char *erased_ecc_vec;
- enum bch_ecc type;
+ u_char *buf;
+ int bitflip_count;
bool is_error_reported = false;
+ u32 bit_pos, byte_pos, error_max, pos;
+ int err;
- /* Initialize elm error vector to zero */
- memset(err_vec, 0, sizeof(err_vec));
-
- if (info->nand.ecc.strength == BCH8_MAX_ERROR) {
- type = BCH8_ECC;
- erased_ecc_vec = bch8_vector;
- } else {
- type = BCH4_ECC;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* omit 7th ECC byte reserved for ROM code compatibility */
+ actual_eccbytes = ecc->bytes - 1;
erased_ecc_vec = bch4_vector;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* omit 14th ECC byte reserved for ROM code compatibility */
+ actual_eccbytes = ecc->bytes - 1;
+ erased_ecc_vec = bch8_vector;
+ break;
+ default:
+ pr_err("invalid driver configuration\n");
+ return -EINVAL;
}
- ecc_vector_size = info->nand.ecc.bytes;
-
- /*
- * Remove extra byte padding for BCH8 RBL
- * compatibility and erased page handling
- */
- eccsize = ecc_vector_size - 1;
+ /* Initialize elm error vector to zero */
+ memset(err_vec, 0, sizeof(err_vec));
for (i = 0; i < eccsteps ; i++) {
eccflag = 0; /* initialize eccflag */
@@ -1394,8 +1333,7 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
* Check any error reported,
* In case of error, non zero ecc reported.
*/
-
- for (j = 0; (j < eccsize); j++) {
+ for (j = 0; j < actual_eccbytes; j++) {
if (calc_ecc[j] != 0) {
eccflag = 1; /* non zero ecc, error present */
break;
@@ -1403,50 +1341,43 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
}
if (eccflag == 1) {
- /*
- * Set threshold to minimum of 4, half of ecc.strength/2
- * to allow max bit flip in byte to 4
- */
- unsigned int threshold = min_t(unsigned int, 4,
- info->nand.ecc.strength / 2);
-
- /*
- * Check data area is programmed by counting
- * number of 0's at fixed offset in spare area.
- * Checking count of 0's against threshold.
- * In case programmed page expects at least threshold
- * zeros in byte.
- * If zeros are less than threshold for programmed page/
- * zeros are more than threshold erased page, either
- * case page reported as uncorrectable.
- */
- if (hweight8(~read_ecc[eccsize]) >= threshold) {
+ if (memcmp(calc_ecc, erased_ecc_vec,
+ actual_eccbytes) == 0) {
/*
- * Update elm error vector as
- * data area is programmed
+ * calc_ecc[] matches pattern for ECC(all 0xff)
+ * so this is definitely an erased-page
*/
- err_vec[i].error_reported = true;
- is_error_reported = true;
} else {
- /* Error reported in erased page */
- int bitflip_count;
- u_char *buf = &data[info->nand.ecc.size * i];
-
- if (memcmp(calc_ecc, erased_ecc_vec, eccsize)) {
- bitflip_count = erased_sector_bitflips(
- buf, read_ecc, info);
-
- if (bitflip_count)
- stat += bitflip_count;
- else
- return -EINVAL;
+ buf = &data[info->nand.ecc.size * i];
+ /*
+ * count number of 0-bits in read_buf.
+ * This check can be removed once a similar
+ * check is introduced in generic NAND driver
+ */
+ bitflip_count = erased_sector_bitflips(
+ buf, read_ecc, info);
+ if (bitflip_count) {
+ /*
+ * number of 0-bits within ECC limits
+ * So this may be an erased-page
+ */
+ stat += bitflip_count;
+ } else {
+ /*
+ * Too many 0-bits. It may be a
+ * - programmed-page, OR
+ * - erased-page with many bit-flips
+ * So this page requires check by ELM
+ */
+ err_vec[i].error_reported = true;
+ is_error_reported = true;
}
}
}
/* Update the ecc vector */
- calc_ecc += ecc_vector_size;
- read_ecc += ecc_vector_size;
+ calc_ecc += ecc->bytes;
+ read_ecc += ecc->bytes;
}
/* Check if any error reported */
@@ -1456,23 +1387,26 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
/* Decode BCH error using ELM module */
elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+ err = 0;
for (i = 0; i < eccsteps; i++) {
- if (err_vec[i].error_reported) {
+ if (err_vec[i].error_uncorrectable) {
+ pr_err("nand: uncorrectable bit-flips found\n");
+ err = -EBADMSG;
+ } else if (err_vec[i].error_reported) {
for (j = 0; j < err_vec[i].error_count; j++) {
- u32 bit_pos, byte_pos, error_max, pos;
-
- if (type == BCH8_ECC)
- error_max = BCH8_ECC_MAX;
- else
- error_max = BCH4_ECC_MAX;
-
- if (info->nand.ecc.strength == BCH8_MAX_ERROR)
- pos = err_vec[i].error_loc[j];
- else
- /* Add 4 to take care 4 bit padding */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Add 4 bits to take care of padding */
pos = err_vec[i].error_loc[j] +
BCH4_BIT_PAD;
-
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ pos = err_vec[i].error_loc[j];
+ break;
+ default:
+ return -EINVAL;
+ }
+ error_max = (ecc->size + actual_eccbytes) * 8;
/* Calculate bit position of error */
bit_pos = pos % 8;
@@ -1480,13 +1414,22 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
byte_pos = (error_max - pos - 1) / 8;
if (pos < error_max) {
- if (byte_pos < 512)
+ if (byte_pos < 512) {
+ pr_debug("bitflip@dat[%d]=%x\n",
+ byte_pos, data[byte_pos]);
data[byte_pos] ^= 1 << bit_pos;
- else
+ } else {
+ pr_debug("bitflip@oob[%d]=%x\n",
+ (byte_pos - 512),
+ spare_ecc[byte_pos - 512]);
spare_ecc[byte_pos - 512] ^=
1 << bit_pos;
+ }
+ } else {
+ pr_err("invalid bit-flip @ %d:%d\n",
+ byte_pos, bit_pos);
+ err = -EBADMSG;
}
- /* else, not interested to correct ecc */
}
}
@@ -1494,16 +1437,11 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
stat += err_vec[i].error_count;
/* Update page data with sector size */
- data += info->nand.ecc.size;
- spare_ecc += ecc_vector_size;
+ data += ecc->size;
+ spare_ecc += ecc->bytes;
}
- for (i = 0; i < eccsteps; i++)
- /* Return error if uncorrectable error present */
- if (err_vec[i].error_uncorrectable)
- return -EINVAL;
-
- return stat;
+ return (err) ? err : stat;
}
/**
@@ -1601,7 +1539,8 @@ static int is_elm_present(struct omap_nand_info *info,
struct device_node *elm_node, enum bch_ecc bch_type)
{
struct platform_device *pdev;
- info->is_elm_used = false;
+ struct nand_ecc_ctrl *ecc = &info->nand.ecc;
+ int err;
/* check whether elm-id is passed via DT */
if (!elm_node) {
pr_err("nand: error: ELM DT node not found\n");
@@ -1615,10 +1554,10 @@ static int is_elm_present(struct omap_nand_info *info,
}
/* ELM module available, now configure it */
info->elm_dev = &pdev->dev;
- if (elm_config(info->elm_dev, bch_type))
- return -ENODEV;
- info->is_elm_used = true;
- return 0;
+ err = elm_config(info->elm_dev, bch_type,
+ (info->mtd.writesize / ecc->size), ecc->size, ecc->bytes);
+
+ return err;
}
#endif /* CONFIG_MTD_NAND_ECC_BCH */
@@ -1657,6 +1596,7 @@ static int omap_nand_probe(struct platform_device *pdev)
info->gpmc_cs = pdata->cs;
info->reg = pdata->reg;
info->of_node = pdata->of_node;
+ info->ecc_opt = pdata->ecc_opt;
mtd = &info->mtd;
mtd->priv = &info->nand;
mtd->name = dev_name(&pdev->dev);
@@ -1666,27 +1606,11 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->options |= NAND_SKIP_BBTSCAN;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- err = -EINVAL;
- dev_err(&pdev->dev, "error getting memory resource\n");
- goto return_error;
- }
+ nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nand_chip->IO_ADDR_R))
+ return PTR_ERR(nand_chip->IO_ADDR_R);
info->phys_base = res->start;
- info->mem_size = resource_size(res);
-
- if (!devm_request_mem_region(&pdev->dev, info->phys_base,
- info->mem_size, pdev->dev.driver->name)) {
- err = -EBUSY;
- goto return_error;
- }
-
- nand_chip->IO_ADDR_R = devm_ioremap(&pdev->dev, info->phys_base,
- info->mem_size);
- if (!nand_chip->IO_ADDR_R) {
- err = -ENOMEM;
- goto return_error;
- }
nand_chip->controller = &info->controller;
@@ -1812,7 +1736,7 @@ static int omap_nand_probe(struct platform_device *pdev)
/* populate MTD interface based on ECC scheme */
nand_chip->ecc.layout = &omap_oobinfo;
ecclayout = &omap_oobinfo;
- switch (pdata->ecc_opt) {
+ switch (info->ecc_opt) {
case OMAP_ECC_HAM1_CODE_HW:
pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
nand_chip->ecc.mode = NAND_ECC_HW;
@@ -1844,9 +1768,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.size = 512;
nand_chip->ecc.bytes = 7;
nand_chip->ecc.strength = 4;
- nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap3_calculate_ecc_bch4;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
/* define ECC layout */
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
@@ -1884,9 +1808,9 @@ static int omap_nand_probe(struct platform_device *pdev)
/* 14th bit is kept reserved for ROM-code compatibility */
nand_chip->ecc.bytes = 7 + 1;
nand_chip->ecc.strength = 4;
- nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap3_calculate_ecc_bch;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
/* define ECC layout */
@@ -1919,9 +1843,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.size = 512;
nand_chip->ecc.bytes = 13;
nand_chip->ecc.strength = 8;
- nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap3_calculate_ecc_bch8;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
/* define ECC layout */
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
@@ -1960,9 +1884,9 @@ static int omap_nand_probe(struct platform_device *pdev)
/* 14th bit is kept reserved for ROM-code compatibility */
nand_chip->ecc.bytes = 13 + 1;
nand_chip->ecc.strength = 8;
- nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap3_calculate_ecc_bch;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
/* This ECC scheme requires ELM H/W block */
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 90f871acb0ef..2c98f9da7471 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -23,7 +23,6 @@
#undef DEBUG
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 2a7a0b27ac38..7588fe2c127f 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -38,7 +38,6 @@
#include <linux/platform_data/mtd-nand-pxa3xx.h>
-#define NAND_DEV_READY_TIMEOUT 50
#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
#define NAND_STOP_DELAY (2 * HZ/50)
#define PAGE_CHUNK_SIZE (2048)
@@ -1531,7 +1530,7 @@ KEEP_CONFIG:
if (!ret) {
dev_err(&info->pdev->dev,
"ECC strength %d at page size %d is not supported\n",
- chip->ecc_strength_ds, mtd->writesize);
+ ecc_strength, mtd->writesize);
return -ENODEV;
}
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index f0918e7411d9..79acbb8691b5 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/io.h>
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index d72783dd7b96..c0670237e7a2 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -897,7 +897,7 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
if (!flctl->qos_request) {
ret = dev_pm_qos_add_request(&flctl->pdev->dev,
&flctl->pm_qos,
- DEV_PM_QOS_LATENCY,
+ DEV_PM_QOS_RESUME_LATENCY,
100);
if (ret < 0)
dev_err(&flctl->pdev->dev,
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 8e1919b6f074..093c29ac1a13 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -13,7 +13,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 6547c84afc3a..d945473c3882 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -25,7 +25,6 @@
#include <linux/device.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 1de33b5d3903..635ee0027691 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -3238,20 +3237,17 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
/**
* onenand_get_fact_prot_info - [MTD Interface] Read factory OTP info
* @param mtd MTD device structure
- * @param buf the databuffer to put/get data
* @param len number of bytes to read
+ * @param retlen pointer to variable to store the number of read bytes
+ * @param buf the databuffer to put/get data
*
* Read factory OTP info.
*/
-static int onenand_get_fact_prot_info(struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
+static int onenand_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
{
- size_t retlen;
- int ret;
-
- ret = onenand_otp_walk(mtd, 0, len, &retlen, (u_char *) buf, NULL, MTD_OTP_FACTORY);
-
- return ret ? : retlen;
+ return onenand_otp_walk(mtd, 0, len, retlen, (u_char *) buf, NULL,
+ MTD_OTP_FACTORY);
}
/**
@@ -3273,20 +3269,17 @@ static int onenand_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
/**
* onenand_get_user_prot_info - [MTD Interface] Read user OTP info
* @param mtd MTD device structure
- * @param buf the databuffer to put/get data
+ * @param retlen pointer to variable to store the number of read bytes
* @param len number of bytes to read
+ * @param buf the databuffer to put/get data
*
* Read user OTP info.
*/
-static int onenand_get_user_prot_info(struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
+static int onenand_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
{
- size_t retlen;
- int ret;
-
- ret = onenand_otp_walk(mtd, 0, len, &retlen, (u_char *) buf, NULL, MTD_OTP_USER);
-
- return ret ? : retlen;
+ return onenand_otp_walk(mtd, 0, len, retlen, (u_char *) buf, NULL,
+ MTD_OTP_USER);
}
/**
@@ -3995,11 +3988,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
/* Allocate buffers, if necessary */
if (!this->page_buf) {
this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL);
- if (!this->page_buf) {
- printk(KERN_ERR "%s: Can't allocate page_buf\n",
- __func__);
+ if (!this->page_buf)
return -ENOMEM;
- }
#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL);
if (!this->verify_buf) {
@@ -4012,8 +4002,6 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
if (!this->oob_buf) {
this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
if (!this->oob_buf) {
- printk(KERN_ERR "%s: Can't allocate oob_buf\n",
- __func__);
if (this->options & ONENAND_PAGEBUF_ALLOC) {
this->options &= ~ONENAND_PAGEBUF_ALLOC;
kfree(this->page_buf);
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index df7400dd4df8..b1a792fd1c23 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -872,10 +872,8 @@ static int s3c_onenand_probe(struct platform_device *pdev)
size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
mtd = kzalloc(size, GFP_KERNEL);
- if (!mtd) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!mtd)
return -ENOMEM;
- }
onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
if (!onenand) {
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index 233b946e5d66..d1cbf26db2c0 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -602,8 +602,7 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)
if (rc) {
printk(KERN_ERR PREFIX "error writing '%s' at "
"0x%lx\n", part->mbd.mtd->name, addr);
- if (rc)
- goto err;
+ goto err;
}
if (block == part->current_block)
part->header_cache[offset + HEADER_MAP_OFFSET] = del;
@@ -675,8 +674,7 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
if (rc) {
printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
part->mbd.mtd->name, addr);
- if (rc)
- goto err;
+ goto err;
}
part->sector_map[sector] = addr;
@@ -695,8 +693,7 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
if (rc) {
printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
part->mbd.mtd->name, addr);
- if (rc)
- goto err;
+ goto err;
}
block->used_sectors++;
block->free_sectors--;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 4b8e89583f2a..cf49c22673b9 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -59,15 +59,12 @@ static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
struct attribute_group *attr_group;
struct attribute **attributes;
struct sm_sysfs_attribute *vendor_attribute;
+ char *vendor;
- int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
- SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
-
- char *vendor = kmalloc(vendor_len, GFP_KERNEL);
+ vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
+ SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
if (!vendor)
goto error1;
- memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
- vendor[vendor_len] = 0;
/* Initialize sysfs attributes */
vendor_attribute =
@@ -78,7 +75,7 @@ static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
sysfs_attr_init(&vendor_attribute->dev_attr.attr);
vendor_attribute->data = vendor;
- vendor_attribute->len = vendor_len;
+ vendor_attribute->len = strlen(vendor);
vendor_attribute->dev_attr.attr.name = "vendor";
vendor_attribute->dev_attr.attr.mode = S_IRUGO;
vendor_attribute->dev_attr.show = sm_attr_show;
diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c
index c818a63532e7..111ee46a7428 100644
--- a/drivers/mtd/tests/mtd_test.c
+++ b/drivers/mtd/tests/mtd_test.c
@@ -1,6 +1,5 @@
#define pr_fmt(fmt) "mtd_test: " fmt
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/printk.h>
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 36663af56d89..f0855ce08ed9 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -87,4 +87,20 @@ config MTD_UBI_GLUEBI
work on top of UBI. Do not enable this unless you use legacy
software.
+config MTD_UBI_BLOCK
+ bool "Read-only block devices on top of UBI volumes"
+ default n
+ depends on BLOCK
+ help
+ This option enables read-only UBI block devices support. UBI block
+ devices will be layered on top of UBI volumes, which means that the
+ UBI driver will transparently handle things like bad eraseblocks and
+ bit-flips. You can put any block-oriented file system on top of UBI
+ volumes in read-only mode (e.g., ext4), but it is probably most
+ practical for read-only file systems, like squashfs.
+
+ When selected, this feature will be built in the UBI driver.
+
+ If in doubt, say "N".
+
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index b46b0c978581..4e3c3d70d8c3 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -3,5 +3,6 @@ obj-$(CONFIG_MTD_UBI) += ubi.o
ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
ubi-y += misc.o debug.o
ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
+ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
new file mode 100644
index 000000000000..7ff473c871a9
--- /dev/null
+++ b/drivers/mtd/ubi/block.c
@@ -0,0 +1,647 @@
+/*
+ * Copyright (c) 2014 Ezequiel Garcia
+ * Copyright (c) 2011 Free Electrons
+ *
+ * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2007
+ * Authors: Artem Bityutskiy, Frank Haverkamp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ */
+
+/*
+ * Read-only block devices on top of UBI volumes
+ *
+ * A simple implementation to allow a block device to be layered on top of a
+ * UBI volume. The implementation is provided by creating a static 1-to-1
+ * mapping between the block device and the UBI volume.
+ *
+ * The addressed byte is obtained from the addressed block sector, which is
+ * mapped linearly into the corresponding LEB:
+ *
+ * LEB number = addressed byte / LEB size
+ *
+ * This feature is compiled in the UBI core, and adds a 'block' parameter
+ * to allow early creation of block devices on top of UBI volumes. Runtime
+ * block creation/removal for UBI volumes is provided through two UBI ioctls:
+ * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/ubi.h>
+#include <linux/workqueue.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <asm/div64.h>
+
+#include "ubi-media.h"
+#include "ubi.h"
+
+/* Maximum number of supported devices */
+#define UBIBLOCK_MAX_DEVICES 32
+
+/* Maximum length of the 'block=' parameter */
+#define UBIBLOCK_PARAM_LEN 63
+
+/* Maximum number of comma-separated items in the 'block=' parameter */
+#define UBIBLOCK_PARAM_COUNT 2
+
+struct ubiblock_param {
+ int ubi_num;
+ int vol_id;
+ char name[UBIBLOCK_PARAM_LEN+1];
+};
+
+/* Numbers of elements set in the @ubiblock_param array */
+static int ubiblock_devs __initdata;
+
+/* MTD devices specification parameters */
+static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
+
+struct ubiblock {
+ struct ubi_volume_desc *desc;
+ int ubi_num;
+ int vol_id;
+ int refcnt;
+ int leb_size;
+
+ struct gendisk *gd;
+ struct request_queue *rq;
+
+ struct workqueue_struct *wq;
+ struct work_struct work;
+
+ struct mutex dev_mutex;
+ spinlock_t queue_lock;
+ struct list_head list;
+};
+
+/* Linked list of all ubiblock instances */
+static LIST_HEAD(ubiblock_devices);
+static DEFINE_MUTEX(devices_mutex);
+static int ubiblock_major;
+
+static int __init ubiblock_set_param(const char *val,
+ const struct kernel_param *kp)
+{
+ int i, ret;
+ size_t len;
+ struct ubiblock_param *param;
+ char buf[UBIBLOCK_PARAM_LEN];
+ char *pbuf = &buf[0];
+ char *tokens[UBIBLOCK_PARAM_COUNT];
+
+ if (!val)
+ return -EINVAL;
+
+ len = strnlen(val, UBIBLOCK_PARAM_LEN);
+ if (len == 0) {
+ ubi_warn("block: empty 'block=' parameter - ignored\n");
+ return 0;
+ }
+
+ if (len == UBIBLOCK_PARAM_LEN) {
+ ubi_err("block: parameter \"%s\" is too long, max. is %d\n",
+ val, UBIBLOCK_PARAM_LEN);
+ return -EINVAL;
+ }
+
+ strcpy(buf, val);
+
+ /* Get rid of the final newline */
+ if (buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
+ tokens[i] = strsep(&pbuf, ",");
+
+ param = &ubiblock_param[ubiblock_devs];
+ if (tokens[1]) {
+ /* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
+ ret = kstrtoint(tokens[0], 10, &param->ubi_num);
+ if (ret < 0)
+ return -EINVAL;
+
+ /* Second param can be a number or a name */
+ ret = kstrtoint(tokens[1], 10, &param->vol_id);
+ if (ret < 0) {
+ param->vol_id = -1;
+ strcpy(param->name, tokens[1]);
+ }
+
+ } else {
+ /* One parameter: must be device path */
+ strcpy(param->name, tokens[0]);
+ param->ubi_num = -1;
+ param->vol_id = -1;
+ }
+
+ ubiblock_devs++;
+
+ return 0;
+}
+
+static struct kernel_param_ops ubiblock_param_ops = {
+ .set = ubiblock_set_param,
+};
+module_param_cb(block, &ubiblock_param_ops, NULL, 0);
+MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
+ "Multiple \"block\" parameters may be specified.\n"
+ "UBI volumes may be specified by their number, name, or path to the device node.\n"
+ "Examples\n"
+ "Using the UBI volume path:\n"
+ "ubi.block=/dev/ubi0_0\n"
+ "Using the UBI device, and the volume name:\n"
+ "ubi.block=0,rootfs\n"
+ "Using both UBI device number and UBI volume number:\n"
+ "ubi.block=0,0\n");
+
+static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
+{
+ struct ubiblock *dev;
+
+ list_for_each_entry(dev, &ubiblock_devices, list)
+ if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
+ return dev;
+ return NULL;
+}
+
+static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer,
+ int leb, int offset, int len)
+{
+ int ret;
+
+ ret = ubi_read(dev->desc, leb, buffer, offset, len);
+ if (ret) {
+ ubi_err("%s ubi_read error %d",
+ dev->gd->disk_name, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int ubiblock_read(struct ubiblock *dev, char *buffer,
+ sector_t sec, int len)
+{
+ int ret, leb, offset;
+ int bytes_left = len;
+ int to_read = len;
+ u64 pos = sec << 9;
+
+ /* Get LEB:offset address to read from */
+ offset = do_div(pos, dev->leb_size);
+ leb = pos;
+
+ while (bytes_left) {
+ /*
+ * We can only read one LEB at a time. Therefore if the read
+ * length is larger than one LEB size, we split the operation.
+ */
+ if (offset + to_read > dev->leb_size)
+ to_read = dev->leb_size - offset;
+
+ ret = ubiblock_read_to_buf(dev, buffer, leb, offset, to_read);
+ if (ret)
+ return ret;
+
+ buffer += to_read;
+ bytes_left -= to_read;
+ to_read = bytes_left;
+ leb += 1;
+ offset = 0;
+ }
+ return 0;
+}
+
+static int do_ubiblock_request(struct ubiblock *dev, struct request *req)
+{
+ int len, ret;
+ sector_t sec;
+
+ if (req->cmd_type != REQ_TYPE_FS)
+ return -EIO;
+
+ if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
+ get_capacity(req->rq_disk))
+ return -EIO;
+
+ if (rq_data_dir(req) != READ)
+ return -ENOSYS; /* Write not implemented */
+
+ sec = blk_rq_pos(req);
+ len = blk_rq_cur_bytes(req);
+
+ /*
+ * Let's prevent the device from being removed while we're doing I/O
+ * work. Notice that this means we serialize all the I/O operations,
+ * but it's probably of no impact given the NAND core serializes
+ * flash access anyway.
+ */
+ mutex_lock(&dev->dev_mutex);
+ ret = ubiblock_read(dev, req->buffer, sec, len);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static void ubiblock_do_work(struct work_struct *work)
+{
+ struct ubiblock *dev =
+ container_of(work, struct ubiblock, work);
+ struct request_queue *rq = dev->rq;
+ struct request *req;
+ int res;
+
+ spin_lock_irq(rq->queue_lock);
+
+ req = blk_fetch_request(rq);
+ while (req) {
+
+ spin_unlock_irq(rq->queue_lock);
+ res = do_ubiblock_request(dev, req);
+ spin_lock_irq(rq->queue_lock);
+
+ /*
+ * If we're done with this request,
+ * we need to fetch a new one
+ */
+ if (!__blk_end_request_cur(req, res))
+ req = blk_fetch_request(rq);
+ }
+
+ spin_unlock_irq(rq->queue_lock);
+}
+
+static void ubiblock_request(struct request_queue *rq)
+{
+ struct ubiblock *dev;
+ struct request *req;
+
+ dev = rq->queuedata;
+
+ if (!dev)
+ while ((req = blk_fetch_request(rq)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
+ else
+ queue_work(dev->wq, &dev->work);
+}
+
+static int ubiblock_open(struct block_device *bdev, fmode_t mode)
+{
+ struct ubiblock *dev = bdev->bd_disk->private_data;
+ int ret;
+
+ mutex_lock(&dev->dev_mutex);
+ if (dev->refcnt > 0) {
+ /*
+ * The volume is already open, just increase the reference
+ * counter.
+ */
+ goto out_done;
+ }
+
+ /*
+ * We want users to be aware they should only mount us as read-only.
+ * It's just a paranoid check, as write requests will get rejected
+ * in any case.
+ */
+ if (mode & FMODE_WRITE) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
+ if (IS_ERR(dev->desc)) {
+ ubi_err("%s failed to open ubi volume %d_%d",
+ dev->gd->disk_name, dev->ubi_num, dev->vol_id);
+ ret = PTR_ERR(dev->desc);
+ dev->desc = NULL;
+ goto out_unlock;
+ }
+
+out_done:
+ dev->refcnt++;
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+
+out_unlock:
+ mutex_unlock(&dev->dev_mutex);
+ return ret;
+}
+
+static void ubiblock_release(struct gendisk *gd, fmode_t mode)
+{
+ struct ubiblock *dev = gd->private_data;
+
+ mutex_lock(&dev->dev_mutex);
+ dev->refcnt--;
+ if (dev->refcnt == 0) {
+ ubi_close_volume(dev->desc);
+ dev->desc = NULL;
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ /* Some tools might require this information */
+ geo->heads = 1;
+ geo->cylinders = 1;
+ geo->sectors = get_capacity(bdev->bd_disk);
+ geo->start = 0;
+ return 0;
+}
+
+static const struct block_device_operations ubiblock_ops = {
+ .owner = THIS_MODULE,
+ .open = ubiblock_open,
+ .release = ubiblock_release,
+ .getgeo = ubiblock_getgeo,
+};
+
+int ubiblock_create(struct ubi_volume_info *vi)
+{
+ struct ubiblock *dev;
+ struct gendisk *gd;
+ int disk_capacity;
+ int ret;
+
+ /* Check that the volume isn't already handled */
+ mutex_lock(&devices_mutex);
+ if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
+ mutex_unlock(&devices_mutex);
+ return -EEXIST;
+ }
+ mutex_unlock(&devices_mutex);
+
+ dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mutex_init(&dev->dev_mutex);
+
+ dev->ubi_num = vi->ubi_num;
+ dev->vol_id = vi->vol_id;
+ dev->leb_size = vi->usable_leb_size;
+
+ /* Initialize the gendisk of this ubiblock device */
+ gd = alloc_disk(1);
+ if (!gd) {
+ ubi_err("block: alloc_disk failed");
+ ret = -ENODEV;
+ goto out_free_dev;
+ }
+
+ gd->fops = &ubiblock_ops;
+ gd->major = ubiblock_major;
+ gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id;
+ gd->private_data = dev;
+ sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
+ disk_capacity = (vi->size * vi->usable_leb_size) >> 9;
+ set_capacity(gd, disk_capacity);
+ dev->gd = gd;
+
+ spin_lock_init(&dev->queue_lock);
+ dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock);
+ if (!dev->rq) {
+ ubi_err("block: blk_init_queue failed");
+ ret = -ENODEV;
+ goto out_put_disk;
+ }
+
+ dev->rq->queuedata = dev;
+ dev->gd->queue = dev->rq;
+
+ /*
+ * Create one workqueue per volume (per registered block device).
+ * Rembember workqueues are cheap, they're not threads.
+ */
+ dev->wq = alloc_workqueue(gd->disk_name, 0, 0);
+ if (!dev->wq)
+ goto out_free_queue;
+ INIT_WORK(&dev->work, ubiblock_do_work);
+
+ mutex_lock(&devices_mutex);
+ list_add_tail(&dev->list, &ubiblock_devices);
+ mutex_unlock(&devices_mutex);
+
+ /* Must be the last step: anyone can call file ops from now on */
+ add_disk(dev->gd);
+ ubi_msg("%s created from ubi%d:%d(%s)",
+ dev->gd->disk_name, dev->ubi_num, dev->vol_id, vi->name);
+ return 0;
+
+out_free_queue:
+ blk_cleanup_queue(dev->rq);
+out_put_disk:
+ put_disk(dev->gd);
+out_free_dev:
+ kfree(dev);
+
+ return ret;
+}
+
+static void ubiblock_cleanup(struct ubiblock *dev)
+{
+ del_gendisk(dev->gd);
+ blk_cleanup_queue(dev->rq);
+ ubi_msg("%s released", dev->gd->disk_name);
+ put_disk(dev->gd);
+}
+
+int ubiblock_remove(struct ubi_volume_info *vi)
+{
+ struct ubiblock *dev;
+
+ mutex_lock(&devices_mutex);
+ dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
+ if (!dev) {
+ mutex_unlock(&devices_mutex);
+ return -ENODEV;
+ }
+
+ /* Found a device, let's lock it so we can check if it's busy */
+ mutex_lock(&dev->dev_mutex);
+ if (dev->refcnt > 0) {
+ mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+ return -EBUSY;
+ }
+
+ /* Remove from device list */
+ list_del(&dev->list);
+ mutex_unlock(&devices_mutex);
+
+ /* Flush pending work and stop this workqueue */
+ destroy_workqueue(dev->wq);
+
+ ubiblock_cleanup(dev);
+ mutex_unlock(&dev->dev_mutex);
+ kfree(dev);
+ return 0;
+}
+
+static void ubiblock_resize(struct ubi_volume_info *vi)
+{
+ struct ubiblock *dev;
+ int disk_capacity;
+
+ /*
+ * Need to lock the device list until we stop using the device,
+ * otherwise the device struct might get released in
+ * 'ubiblock_remove()'.
+ */
+ mutex_lock(&devices_mutex);
+ dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
+ if (!dev) {
+ mutex_unlock(&devices_mutex);
+ return;
+ }
+
+ mutex_lock(&dev->dev_mutex);
+ disk_capacity = (vi->size * vi->usable_leb_size) >> 9;
+ set_capacity(dev->gd, disk_capacity);
+ ubi_msg("%s resized to %d LEBs", dev->gd->disk_name, vi->size);
+ mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+}
+
+static int ubiblock_notify(struct notifier_block *nb,
+ unsigned long notification_type, void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (notification_type) {
+ case UBI_VOLUME_ADDED:
+ /*
+ * We want to enforce explicit block device creation for
+ * volumes, so when a volume is added we do nothing.
+ */
+ break;
+ case UBI_VOLUME_REMOVED:
+ ubiblock_remove(&nt->vi);
+ break;
+ case UBI_VOLUME_RESIZED:
+ ubiblock_resize(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ubiblock_notifier = {
+ .notifier_call = ubiblock_notify,
+};
+
+static struct ubi_volume_desc * __init
+open_volume_desc(const char *name, int ubi_num, int vol_id)
+{
+ if (ubi_num == -1)
+ /* No ubi num, name must be a vol device path */
+ return ubi_open_volume_path(name, UBI_READONLY);
+ else if (vol_id == -1)
+ /* No vol_id, must be vol_name */
+ return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
+ else
+ return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
+}
+
+static int __init ubiblock_create_from_param(void)
+{
+ int i, ret;
+ struct ubiblock_param *p;
+ struct ubi_volume_desc *desc;
+ struct ubi_volume_info vi;
+
+ for (i = 0; i < ubiblock_devs; i++) {
+ p = &ubiblock_param[i];
+
+ desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
+ if (IS_ERR(desc)) {
+ ubi_err("block: can't open volume, err=%ld\n",
+ PTR_ERR(desc));
+ ret = PTR_ERR(desc);
+ break;
+ }
+
+ ubi_get_volume_info(desc, &vi);
+ ubi_close_volume(desc);
+
+ ret = ubiblock_create(&vi);
+ if (ret) {
+ ubi_err("block: can't add '%s' volume, err=%d\n",
+ vi.name, ret);
+ break;
+ }
+ }
+ return ret;
+}
+
+static void ubiblock_remove_all(void)
+{
+ struct ubiblock *next;
+ struct ubiblock *dev;
+
+ list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
+ /* Flush pending work and stop workqueue */
+ destroy_workqueue(dev->wq);
+ /* The module is being forcefully removed */
+ WARN_ON(dev->desc);
+ /* Remove from device list */
+ list_del(&dev->list);
+ ubiblock_cleanup(dev);
+ kfree(dev);
+ }
+}
+
+int __init ubiblock_init(void)
+{
+ int ret;
+
+ ubiblock_major = register_blkdev(0, "ubiblock");
+ if (ubiblock_major < 0)
+ return ubiblock_major;
+
+ /* Attach block devices from 'block=' module param */
+ ret = ubiblock_create_from_param();
+ if (ret)
+ goto err_remove;
+
+ /*
+ * Block devices are only created upon user requests, so we ignore
+ * existing volumes.
+ */
+ ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
+ if (ret)
+ goto err_unreg;
+ return 0;
+
+err_unreg:
+ unregister_blkdev(ubiblock_major, "ubiblock");
+err_remove:
+ ubiblock_remove_all();
+ return ret;
+}
+
+void __exit ubiblock_exit(void)
+{
+ ubi_unregister_volume_notifier(&ubiblock_notifier);
+ ubiblock_remove_all();
+ unregister_blkdev(ubiblock_major, "ubiblock");
+}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 57deae961429..6e30a3c280d0 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1298,6 +1298,15 @@ static int __init ubi_init(void)
}
}
+ err = ubiblock_init();
+ if (err) {
+ ubi_err("block: cannot initialize, error %d", err);
+
+ /* See comment above re-ubi_is_module(). */
+ if (ubi_is_module())
+ goto out_detach;
+ }
+
return 0;
out_detach:
@@ -1326,6 +1335,8 @@ static void __exit ubi_exit(void)
{
int i;
+ ubiblock_exit();
+
for (i = 0; i < UBI_MAX_DEVICES; i++)
if (ubi_devices[i]) {
mutex_lock(&ubi_devices_mutex);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 8ca49f2043e4..f54562a5998e 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -561,6 +561,26 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
+ /* Create a R/O block device on top of the UBI volume */
+ case UBI_IOCVOLCRBLK:
+ {
+ struct ubi_volume_info vi;
+
+ ubi_get_volume_info(desc, &vi);
+ err = ubiblock_create(&vi);
+ break;
+ }
+
+ /* Remove the R/O block device */
+ case UBI_IOCVOLRMBLK:
+ {
+ struct ubi_volume_info vi;
+
+ ubi_get_volume_info(desc, &vi);
+ err = ubiblock_remove(&vi);
+ break;
+ }
+
default:
err = -ENOTTY;
break;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 8ea6297a208f..7bf416329c19 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -22,7 +22,6 @@
#ifndef __UBI_UBI_H__
#define __UBI_UBI_H__
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/rbtree.h>
@@ -864,6 +863,26 @@ int ubi_update_fastmap(struct ubi_device *ubi);
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
int fm_anchor);
+/* block.c */
+#ifdef CONFIG_MTD_UBI_BLOCK
+int ubiblock_init(void);
+void ubiblock_exit(void);
+int ubiblock_create(struct ubi_volume_info *vi);
+int ubiblock_remove(struct ubi_volume_info *vi);
+#else
+static inline int ubiblock_init(void) { return 0; }
+static inline void ubiblock_exit(void) {}
+static inline int ubiblock_create(struct ubi_volume_info *vi)
+{
+ return -ENOSYS;
+}
+static inline int ubiblock_remove(struct ubi_volume_info *vi)
+{
+ return -ENOSYS;
+}
+#endif
+
+
/*
* ubi_rb_for_each_entry - walk an RB-tree.
* @rb: a pointer to type 'struct rb_node' to use as a loop counter
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 494b888a6568..89402c3b64f8 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -177,11 +177,6 @@ config NETCONSOLE_DYNAMIC
config NETPOLL
def_bool NETCONSOLE
-config NETPOLL_TRAP
- bool "Netpoll traffic trapping"
- default n
- depends on NETPOLL
-
config NET_POLL_CONTROLLER
def_bool NETPOLL
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index dcde56057fe1..b667a51ed215 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -768,11 +768,11 @@ static int ad_lacpdu_send(struct port *port)
lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
- memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
+ ether_addr_copy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr);
/* Note: source address is set to be the member's PERMANENT address,
* because we use it to identify loopback lacpdus in receive.
*/
- memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(lacpdu_header->hdr.h_source, slave->perm_hwaddr);
lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
lacpdu_header->lacpdu = port->lacpdu;
@@ -810,11 +810,11 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
marker_header = (struct bond_marker_header *)skb_put(skb, length);
- memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
+ ether_addr_copy(marker_header->hdr.h_dest, lacpdu_mcast_addr);
/* Note: source address is set to be the member's PERMANENT address,
* because we use it to identify loopback MARKERs in receive.
*/
- memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(marker_header->hdr.h_source, slave->perm_hwaddr);
marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
marker_header->marker = *marker;
@@ -1079,7 +1079,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/* detect loopback situation */
if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
&(port->actor_system))) {
- pr_err("%s: An illegal loopback occurred on adapter (%s).\nCheck the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
+ pr_err("%s: An illegal loopback occurred on adapter (%s)\n"
+ "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
port->slave->bond->dev->name,
port->slave->dev->name);
return;
@@ -1283,11 +1284,11 @@ static void ad_port_selection_logic(struct port *port)
/* meaning: the port was related to an aggregator
* but was not on the aggregator port list
*/
- pr_warn("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
- port->slave->bond->dev->name,
- port->actor_port_number,
- port->slave->dev->name,
- port->aggregator->aggregator_identifier);
+ pr_warn_ratelimited("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
+ port->slave->bond->dev->name,
+ port->actor_port_number,
+ port->slave->dev->name,
+ port->aggregator->aggregator_identifier);
}
}
/* search on all aggregators for a suitable aggregator for this port */
@@ -1444,9 +1445,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
break;
default:
- pr_warn("%s: Impossible agg select mode %d\n",
- curr->slave->bond->dev->name,
- __get_agg_selection_mode(curr->lag_ports));
+ pr_warn_ratelimited("%s: Impossible agg select mode %d\n",
+ curr->slave->bond->dev->name,
+ __get_agg_selection_mode(curr->lag_ports));
break;
}
@@ -1559,9 +1560,9 @@ static void ad_agg_selection_logic(struct aggregator *agg)
/* check if any partner replys */
if (best->is_individual) {
- pr_warn("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- best->slave ?
- best->slave->bond->dev->name : "NULL");
+ pr_warn_ratelimited("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
+ best->slave ?
+ best->slave->bond->dev->name : "NULL");
}
best->is_active = 1;
@@ -1948,7 +1949,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
* new aggregator
*/
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
- pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n",
+ pr_debug("Some port(s) related to LAG %d - replacing with LAG %d\n",
aggregator->aggregator_identifier,
new_aggregator->aggregator_identifier);
@@ -2080,8 +2081,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
/* select the active aggregator for the bond */
if (port) {
if (!port->slave) {
- pr_warn("%s: Warning: bond's first port is uninitialized\n",
- bond->dev->name);
+ pr_warn_ratelimited("%s: Warning: bond's first port is uninitialized\n",
+ bond->dev->name);
goto re_arm;
}
@@ -2095,8 +2096,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
bond_for_each_slave_rcu(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
- pr_warn("%s: Warning: Found an uninitialized port\n",
- bond->dev->name);
+ pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
+ bond->dev->name);
goto re_arm;
}
@@ -2157,8 +2158,8 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
- pr_warn("%s: Warning: port of slave %s is uninitialized\n",
- slave->dev->name, slave->bond->dev->name);
+ pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
+ slave->dev->name, slave->bond->dev->name);
return ret;
}
@@ -2310,9 +2311,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->actor_oper_port_key = (port->actor_admin_port_key &=
~AD_SPEED_KEY_BITS);
}
- pr_debug("Port %d changed link status to %s",
- port->actor_port_number,
- (link == BOND_LINK_UP) ? "UP" : "DOWN");
+ pr_debug("Port %d changed link status to %s\n",
+ port->actor_port_number,
+ link == BOND_LINK_UP ? "UP" : "DOWN");
/* there is no need to reselect a new aggregator, just signal the
* state machines to reinitialize
*/
@@ -2390,17 +2391,16 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
}
}
- if (aggregator) {
- ad_info->aggregator_id = aggregator->aggregator_identifier;
- ad_info->ports = aggregator->num_of_ports;
- ad_info->actor_key = aggregator->actor_oper_aggregator_key;
- ad_info->partner_key = aggregator->partner_oper_aggregator_key;
- memcpy(ad_info->partner_system,
- aggregator->partner_system.mac_addr_value, ETH_ALEN);
- return 0;
- }
+ if (!aggregator)
+ return -1;
- return -1;
+ ad_info->aggregator_id = aggregator->aggregator_identifier;
+ ad_info->ports = aggregator->num_of_ports;
+ ad_info->actor_key = aggregator->actor_oper_aggregator_key;
+ ad_info->partner_key = aggregator->partner_oper_aggregator_key;
+ ether_addr_copy(ad_info->partner_system,
+ aggregator->partner_system.mac_addr_value);
+ return 0;
}
/* Wrapper used to hold bond->lock so no slave manipulation can occur */
@@ -2479,7 +2479,7 @@ out:
return NETDEV_TX_OK;
err_free:
/* no suitable interface, frame not sent */
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
goto out;
}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index f4dd9592ac62..bb03b1df2f3e 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -28,7 +28,7 @@
#include <linux/netdevice.h>
#include <linux/if_ether.h>
-// General definitions
+/* General definitions */
#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
#define AD_TIMER_INTERVAL 100 /*msec*/
@@ -47,54 +47,54 @@ enum {
BOND_AD_COUNT = 2,
};
-// rx machine states(43.4.11 in the 802.3ad standard)
+/* rx machine states(43.4.11 in the 802.3ad standard) */
typedef enum {
AD_RX_DUMMY,
- AD_RX_INITIALIZE, // rx Machine
- AD_RX_PORT_DISABLED, // rx Machine
- AD_RX_LACP_DISABLED, // rx Machine
- AD_RX_EXPIRED, // rx Machine
- AD_RX_DEFAULTED, // rx Machine
- AD_RX_CURRENT // rx Machine
+ AD_RX_INITIALIZE, /* rx Machine */
+ AD_RX_PORT_DISABLED, /* rx Machine */
+ AD_RX_LACP_DISABLED, /* rx Machine */
+ AD_RX_EXPIRED, /* rx Machine */
+ AD_RX_DEFAULTED, /* rx Machine */
+ AD_RX_CURRENT /* rx Machine */
} rx_states_t;
-// periodic machine states(43.4.12 in the 802.3ad standard)
+/* periodic machine states(43.4.12 in the 802.3ad standard) */
typedef enum {
AD_PERIODIC_DUMMY,
- AD_NO_PERIODIC, // periodic machine
- AD_FAST_PERIODIC, // periodic machine
- AD_SLOW_PERIODIC, // periodic machine
- AD_PERIODIC_TX // periodic machine
+ AD_NO_PERIODIC, /* periodic machine */
+ AD_FAST_PERIODIC, /* periodic machine */
+ AD_SLOW_PERIODIC, /* periodic machine */
+ AD_PERIODIC_TX /* periodic machine */
} periodic_states_t;
-// mux machine states(43.4.13 in the 802.3ad standard)
+/* mux machine states(43.4.13 in the 802.3ad standard) */
typedef enum {
AD_MUX_DUMMY,
- AD_MUX_DETACHED, // mux machine
- AD_MUX_WAITING, // mux machine
- AD_MUX_ATTACHED, // mux machine
- AD_MUX_COLLECTING_DISTRIBUTING // mux machine
+ AD_MUX_DETACHED, /* mux machine */
+ AD_MUX_WAITING, /* mux machine */
+ AD_MUX_ATTACHED, /* mux machine */
+ AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
} mux_states_t;
-// tx machine states(43.4.15 in the 802.3ad standard)
+/* tx machine states(43.4.15 in the 802.3ad standard) */
typedef enum {
AD_TX_DUMMY,
- AD_TRANSMIT // tx Machine
+ AD_TRANSMIT /* tx Machine */
} tx_states_t;
-// rx indication types
+/* rx indication types */
typedef enum {
- AD_TYPE_LACPDU = 1, // type lacpdu
- AD_TYPE_MARKER // type marker
+ AD_TYPE_LACPDU = 1, /* type lacpdu */
+ AD_TYPE_MARKER /* type marker */
} pdu_type_t;
-// rx marker indication types
+/* rx marker indication types */
typedef enum {
- AD_MARKER_INFORMATION_SUBTYPE = 1, // marker imformation subtype
- AD_MARKER_RESPONSE_SUBTYPE // marker response subtype
+ AD_MARKER_INFORMATION_SUBTYPE = 1, /* marker imformation subtype */
+ AD_MARKER_RESPONSE_SUBTYPE /* marker response subtype */
} bond_marker_subtype_t;
-// timers types(43.4.9 in the 802.3ad standard)
+/* timers types(43.4.9 in the 802.3ad standard) */
typedef enum {
AD_CURRENT_WHILE_TIMER,
AD_ACTOR_CHURN_TIMER,
@@ -105,35 +105,35 @@ typedef enum {
#pragma pack(1)
-// Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard)
+/* Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) */
typedef struct lacpdu {
- u8 subtype; // = LACP(= 0x01)
+ u8 subtype; /* = LACP(= 0x01) */
u8 version_number;
- u8 tlv_type_actor_info; // = actor information(type/length/value)
- u8 actor_information_length; // = 20
+ u8 tlv_type_actor_info; /* = actor information(type/length/value) */
+ u8 actor_information_length; /* = 20 */
__be16 actor_system_priority;
struct mac_addr actor_system;
__be16 actor_key;
__be16 actor_port_priority;
__be16 actor_port;
u8 actor_state;
- u8 reserved_3_1[3]; // = 0
- u8 tlv_type_partner_info; // = partner information
- u8 partner_information_length; // = 20
+ u8 reserved_3_1[3]; /* = 0 */
+ u8 tlv_type_partner_info; /* = partner information */
+ u8 partner_information_length; /* = 20 */
__be16 partner_system_priority;
struct mac_addr partner_system;
__be16 partner_key;
__be16 partner_port_priority;
__be16 partner_port;
u8 partner_state;
- u8 reserved_3_2[3]; // = 0
- u8 tlv_type_collector_info; // = collector information
- u8 collector_information_length; // = 16
+ u8 reserved_3_2[3]; /* = 0 */
+ u8 tlv_type_collector_info; /* = collector information */
+ u8 collector_information_length;/* = 16 */
__be16 collector_max_delay;
u8 reserved_12[12];
- u8 tlv_type_terminator; // = terminator
- u8 terminator_length; // = 0
- u8 reserved_50[50]; // = 0
+ u8 tlv_type_terminator; /* = terminator */
+ u8 terminator_length; /* = 0 */
+ u8 reserved_50[50]; /* = 0 */
} __packed lacpdu_t;
typedef struct lacpdu_header {
@@ -141,20 +141,20 @@ typedef struct lacpdu_header {
struct lacpdu lacpdu;
} __packed lacpdu_header_t;
-// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard)
+/* Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) */
typedef struct bond_marker {
- u8 subtype; // = 0x02 (marker PDU)
- u8 version_number; // = 0x01
- u8 tlv_type; // = 0x01 (marker information)
- // = 0x02 (marker response information)
- u8 marker_length; // = 0x16
- u16 requester_port; // The number assigned to the port by the requester
- struct mac_addr requester_system; // The requester's system id
- u32 requester_transaction_id; // The transaction id allocated by the requester,
- u16 pad; // = 0
- u8 tlv_type_terminator; // = 0x00
- u8 terminator_length; // = 0x00
- u8 reserved_90[90]; // = 0
+ u8 subtype; /* = 0x02 (marker PDU) */
+ u8 version_number; /* = 0x01 */
+ u8 tlv_type; /* = 0x01 (marker information) */
+ /* = 0x02 (marker response information) */
+ u8 marker_length; /* = 0x16 */
+ u16 requester_port; /* The number assigned to the port by the requester */
+ struct mac_addr requester_system; /* The requester's system id */
+ u32 requester_transaction_id; /* The transaction id allocated by the requester, */
+ u16 pad; /* = 0 */
+ u8 tlv_type_terminator; /* = 0x00 */
+ u8 terminator_length; /* = 0x00 */
+ u8 reserved_90[90]; /* = 0 */
} __packed bond_marker_t;
typedef struct bond_marker_header {
@@ -173,7 +173,7 @@ struct port;
#pragma pack(8)
#endif
-// aggregator structure(43.4.5 in the 802.3ad standard)
+/* aggregator structure(43.4.5 in the 802.3ad standard) */
typedef struct aggregator {
struct mac_addr aggregator_mac_address;
u16 aggregator_identifier;
@@ -183,12 +183,12 @@ typedef struct aggregator {
struct mac_addr partner_system;
u16 partner_system_priority;
u16 partner_oper_aggregator_key;
- u16 receive_state; // BOOLEAN
- u16 transmit_state; // BOOLEAN
+ u16 receive_state; /* BOOLEAN */
+ u16 transmit_state; /* BOOLEAN */
struct port *lag_ports;
- // ****** PRIVATE PARAMETERS ******
- struct slave *slave; // pointer to the bond slave that this aggregator belongs to
- u16 is_active; // BOOLEAN. Indicates if this aggregator is active
+ /* ****** PRIVATE PARAMETERS ****** */
+ struct slave *slave; /* pointer to the bond slave that this aggregator belongs to */
+ u16 is_active; /* BOOLEAN. Indicates if this aggregator is active */
u16 num_of_ports;
} aggregator_t;
@@ -201,12 +201,12 @@ struct port_params {
u16 port_state;
};
-// port structure(43.4.6 in the 802.3ad standard)
+/* port structure(43.4.6 in the 802.3ad standard) */
typedef struct port {
u16 actor_port_number;
u16 actor_port_priority;
- struct mac_addr actor_system; // This parameter is added here although it is not specified in the standard, just for simplification
- u16 actor_system_priority; // This parameter is added here although it is not specified in the standard, just for simplification
+ struct mac_addr actor_system; /* This parameter is added here although it is not specified in the standard, just for simplification */
+ u16 actor_system_priority; /* This parameter is added here although it is not specified in the standard, just for simplification */
u16 actor_port_aggregator_identifier;
bool ntt;
u16 actor_admin_port_key;
@@ -219,24 +219,24 @@ typedef struct port {
bool is_enabled;
- // ****** PRIVATE PARAMETERS ******
- u16 sm_vars; // all state machines variables for this port
- rx_states_t sm_rx_state; // state machine rx state
- u16 sm_rx_timer_counter; // state machine rx timer counter
- periodic_states_t sm_periodic_state;// state machine periodic state
- u16 sm_periodic_timer_counter; // state machine periodic timer counter
- mux_states_t sm_mux_state; // state machine mux state
- u16 sm_mux_timer_counter; // state machine mux timer counter
- tx_states_t sm_tx_state; // state machine tx state
- u16 sm_tx_timer_counter; // state machine tx timer counter(allways on - enter to transmit state 3 time per second)
- struct slave *slave; // pointer to the bond slave that this port belongs to
- struct aggregator *aggregator; // pointer to an aggregator that this port related to
- struct port *next_port_in_aggregator; // Next port on the linked list of the parent aggregator
- u32 transaction_id; // continuous number for identification of Marker PDU's;
- struct lacpdu lacpdu; // the lacpdu that will be sent for this port
+ /* ****** PRIVATE PARAMETERS ****** */
+ u16 sm_vars; /* all state machines variables for this port */
+ rx_states_t sm_rx_state; /* state machine rx state */
+ u16 sm_rx_timer_counter; /* state machine rx timer counter */
+ periodic_states_t sm_periodic_state; /* state machine periodic state */
+ u16 sm_periodic_timer_counter; /* state machine periodic timer counter */
+ mux_states_t sm_mux_state; /* state machine mux state */
+ u16 sm_mux_timer_counter; /* state machine mux timer counter */
+ tx_states_t sm_tx_state; /* state machine tx state */
+ u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+ struct slave *slave; /* pointer to the bond slave that this port belongs to */
+ struct aggregator *aggregator; /* pointer to an aggregator that this port related to */
+ struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */
+ u32 transaction_id; /* continuous number for identification of Marker PDU's; */
+ struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */
} port_t;
-// system structure
+/* system structure */
struct ad_system {
u16 sys_priority;
struct mac_addr sys_mac_addr;
@@ -246,27 +246,26 @@ struct ad_system {
#pragma pack()
#endif
-// ================= AD Exported structures to the main bonding code ==================
+/* ========== AD Exported structures to the main bonding code ========== */
#define BOND_AD_INFO(bond) ((bond)->ad_info)
#define SLAVE_AD_INFO(slave) ((slave)->ad_info)
struct ad_bond_info {
- struct ad_system system; /* 802.3ad system structure */
- u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
+ struct ad_system system; /* 802.3ad system structure */
+ u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
u16 aggregator_identifier;
};
struct ad_slave_info {
- struct aggregator aggregator; // 802.3ad aggregator structure
- struct port port; // 802.3ad port structure
- spinlock_t state_machine_lock; /* mutex state machines vs.
- incoming LACPDU */
+ struct aggregator aggregator; /* 802.3ad aggregator structure */
+ struct port port; /* 802.3ad port structure */
+ spinlock_t state_machine_lock; /* mutex state machines vs. incoming LACPDU */
u16 id;
};
-// ================= AD Exported functions to the main bonding code ==================
+/* ========== AD Exported functions to the main bonding code ========== */
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
-void bond_3ad_bind_slave(struct slave *slave);
+void bond_3ad_bind_slave(struct slave *slave);
void bond_3ad_unbind_slave(struct slave *slave);
void bond_3ad_state_machine_handler(struct work_struct *);
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
@@ -281,5 +280,5 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
-#endif //__BOND_3AD_H__
+#endif /* __BOND_3AD_H__ */
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index e8f133e926aa..9f69e818b000 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -93,9 +93,8 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
int i;
u8 hash = 0;
- for (i = 0; i < hash_size; i++) {
+ for (i = 0; i < hash_size; i++)
hash ^= hash_start[i];
- }
return hash;
}
@@ -190,9 +189,8 @@ static int tlb_initialize(struct bonding *bond)
bond_info->tx_hashtbl = new_hashtbl;
- for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
+ for (i = 0; i < TLB_HASH_TABLE_SIZE; i++)
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
- }
_unlock_tx_hashtbl_bh(bond);
@@ -264,9 +262,8 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
hash_table[hash_index].next = next_index;
hash_table[hash_index].prev = TLB_NULL_INDEX;
- if (next_index != TLB_NULL_INDEX) {
+ if (next_index != TLB_NULL_INDEX)
hash_table[next_index].prev = hash_index;
- }
slave_info->head = hash_index;
slave_info->load +=
@@ -274,9 +271,8 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
}
}
- if (assigned_slave) {
+ if (assigned_slave)
hash_table[hash_index].tx_bytes += skb_len;
- }
return assigned_slave;
}
@@ -329,7 +325,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
_lock_rx_hashtbl_bh(bond);
- hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
+ hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
client_info = &(bond_info->rx_hashtbl[hash_index]);
if ((client_info->assigned) &&
@@ -337,7 +333,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
(client_info->ip_dst == arp->ip_src) &&
(!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
/* update the clients MAC address */
- memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
+ ether_addr_copy(client_info->mac_dst, arp->mac_src);
client_info->ntt = 1;
bond_info->rx_ntt = 1;
}
@@ -451,9 +447,8 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
*/
static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
{
- if (!bond->curr_active_slave) {
+ if (!bond->curr_active_slave)
return;
- }
if (!bond->alb_info.primary_is_promisc) {
if (!dev_set_promiscuity(bond->curr_active_slave->dev, 1))
@@ -513,9 +508,8 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
write_lock_bh(&bond->curr_slave_lock);
- if (slave != bond->curr_active_slave) {
+ if (slave != bond->curr_active_slave)
rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
- }
write_unlock_bh(&bond->curr_slave_lock);
}
@@ -524,9 +518,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
{
int i;
- if (!client_info->slave) {
+ if (!client_info->slave)
return;
- }
for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
struct sk_buff *skb;
@@ -574,9 +567,8 @@ static void rlb_update_rx_clients(struct bonding *bond)
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (client_info->ntt) {
rlb_update_client(client_info);
- if (bond_info->rlb_update_retry_counter == 0) {
+ if (bond_info->rlb_update_retry_counter == 0)
client_info->ntt = 0;
- }
}
}
@@ -610,10 +602,10 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
}
}
- // update the team's flag only after the whole iteration
+ /* update the team's flag only after the whole iteration */
if (ntt) {
bond_info->rx_ntt = 1;
- //fasten the change
+ /* fasten the change */
bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
}
@@ -677,9 +669,9 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
/* the entry is already assigned to this client */
if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) {
/* update mac address from arp */
- memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
+ ether_addr_copy(client_info->mac_dst, arp->mac_dst);
}
- memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
+ ether_addr_copy(client_info->mac_src, arp->mac_src);
assigned_slave = client_info->slave;
if (assigned_slave) {
@@ -719,8 +711,8 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
* will be updated with clients actual unicast mac address
* upon receiving an arp reply.
*/
- memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
- memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
+ ether_addr_copy(client_info->mac_dst, arp->mac_dst);
+ ether_addr_copy(client_info->mac_src, arp->mac_src);
client_info->slave = assigned_slave;
if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
@@ -770,9 +762,8 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
* rx channel
*/
tx_slave = rlb_choose_channel(skb, bond);
- if (tx_slave) {
- memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN);
- }
+ if (tx_slave)
+ ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
pr_debug("Server sent ARP Reply packet\n");
} else if (arp->op_code == htons(ARPOP_REQUEST)) {
/* Create an entry in the rx_hashtbl for this client as a
@@ -824,9 +815,8 @@ static void rlb_rebalance(struct bonding *bond)
}
/* update the team's flag only after the whole iteration */
- if (ntt) {
+ if (ntt)
bond_info->rx_ntt = 1;
- }
_unlock_rx_hashtbl_bh(bond);
}
@@ -923,7 +913,7 @@ static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- u32 ip_src_hash = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
+ u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
u32 index;
_lock_rx_hashtbl_bh(bond);
@@ -957,9 +947,8 @@ static int rlb_initialize(struct bonding *bond)
bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
- for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) {
+ for (i = 0; i < RLB_HASH_TABLE_SIZE; i++)
rlb_init_table_entry(bond_info->rx_hashtbl + i);
- }
_unlock_rx_hashtbl_bh(bond);
@@ -1014,9 +1003,9 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
char *data;
memset(&pkt, 0, size);
- memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
- memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
- pkt.type = cpu_to_be16(ETH_P_LOOP);
+ ether_addr_copy(pkt.mac_dst, mac_addr);
+ ether_addr_copy(pkt.mac_src, mac_addr);
+ pkt.type = cpu_to_be16(ETH_P_LOOPBACK);
skb = dev_alloc_skb(size);
if (!skb)
@@ -1097,7 +1086,7 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
{
u8 tmp_mac_addr[ETH_ALEN];
- memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_mac_addr, slave1->dev->dev_addr);
alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
alb_set_slave_mac_addr(slave2, tmp_mac_addr);
@@ -1254,9 +1243,9 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
if (free_mac_slave) {
alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
- pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
- bond->dev->name, slave->dev->name,
- free_mac_slave->dev->name);
+ pr_warn("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
+ bond->dev->name, slave->dev->name,
+ free_mac_slave->dev->name);
} else if (has_bond_addr) {
pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
@@ -1294,12 +1283,12 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
bond_for_each_slave(bond, slave, iter) {
/* save net_device's current hw address */
- memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_addr, slave->dev->dev_addr);
res = dev_set_mac_address(slave->dev, addr);
/* restore net_device's hw address */
- memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ ether_addr_copy(slave->dev->dev_addr, tmp_addr);
if (res)
goto unwind;
@@ -1315,9 +1304,9 @@ unwind:
bond_for_each_slave(bond, rollback_slave, iter) {
if (rollback_slave == slave)
break;
- memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_addr, rollback_slave->dev->dev_addr);
dev_set_mac_address(rollback_slave->dev, &sa);
- memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ ether_addr_copy(rollback_slave->dev->dev_addr, tmp_addr);
}
return res;
@@ -1330,9 +1319,8 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
int res;
res = tlb_initialize(bond);
- if (res) {
+ if (res)
return res;
- }
if (rlb_enabled) {
bond->alb_info.rlb_enabled = 1;
@@ -1355,9 +1343,8 @@ void bond_alb_deinitialize(struct bonding *bond)
tlb_deinitialize(bond);
- if (bond_info->rlb_enabled) {
+ if (bond_info->rlb_enabled)
rlb_deinitialize(bond);
- }
}
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
@@ -1436,14 +1423,13 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
- hash_start = (char*)eth_data->h_dest;
+ hash_start = (char *)eth_data->h_dest;
hash_size = ETH_ALEN;
break;
case ETH_P_ARP:
do_tx_balance = 0;
- if (bond_info->rlb_enabled) {
+ if (bond_info->rlb_enabled)
tx_slave = rlb_arp_xmit(skb, bond);
- }
break;
default:
do_tx_balance = 0;
@@ -1463,23 +1449,22 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
if (tx_slave && SLAVE_IS_OK(tx_slave)) {
if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
- memcpy(eth_data->h_source,
- tx_slave->dev->dev_addr,
- ETH_ALEN);
+ ether_addr_copy(eth_data->h_source,
+ tx_slave->dev->dev_addr);
}
bond_dev_queue_xmit(bond, skb, tx_slave->dev);
goto out;
- } else {
- if (tx_slave) {
- _lock_tx_hashtbl(bond);
- __tlb_clear_slave(bond, tx_slave, 0);
- _unlock_tx_hashtbl(bond);
- }
+ }
+
+ if (tx_slave) {
+ _lock_tx_hashtbl(bond);
+ __tlb_clear_slave(bond, tx_slave, 0);
+ _unlock_tx_hashtbl(bond);
}
/* no suitable interface, frame not sent */
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
out:
return NETDEV_TX_OK;
}
@@ -1577,11 +1562,10 @@ void bond_alb_monitor(struct work_struct *work)
--bond_info->rlb_update_delay_counter;
} else {
rlb_update_rx_clients(bond);
- if (bond_info->rlb_update_retry_counter) {
+ if (bond_info->rlb_update_retry_counter)
--bond_info->rlb_update_retry_counter;
- } else {
+ else
bond_info->rx_ntt = 0;
- }
}
}
}
@@ -1598,23 +1582,20 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
int res;
res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
- if (res) {
+ if (res)
return res;
- }
res = alb_handle_addr_collision_on_attach(bond, slave);
- if (res) {
+ if (res)
return res;
- }
tlb_init_slave(slave);
/* order a rebalance ASAP */
bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
bond->alb_info.rlb_rebalance = 1;
- }
return 0;
}
@@ -1645,9 +1626,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
if (link == BOND_LINK_DOWN) {
tlb_clear_slave(bond, slave, 0);
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
rlb_clear_slave(bond, slave);
- }
} else if (link == BOND_LINK_UP) {
/* order a rebalance ASAP */
bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
@@ -1723,14 +1703,14 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
struct sockaddr sa;
u8 tmp_addr[ETH_ALEN];
- memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_addr, new_slave->dev->dev_addr);
memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
sa.sa_family = bond->dev->type;
/* we don't care if it can't change its mac, best effort */
dev_set_mac_address(new_slave->dev, &sa);
- memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ ether_addr_copy(new_slave->dev->dev_addr, tmp_addr);
}
/* curr_active_slave must be set before calling alb_swap_mac_addr */
@@ -1759,14 +1739,12 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
struct slave *swap_slave;
int res;
- if (!is_valid_ether_addr(sa->sa_data)) {
+ if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- }
res = alb_set_mac_address(bond, addr);
- if (res) {
+ if (res)
return res;
- }
memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
@@ -1774,9 +1752,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
* Otherwise we'll need to pass the new address to it and handle
* duplications.
*/
- if (!bond->curr_active_slave) {
+ if (!bond->curr_active_slave)
return 0;
- }
swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
@@ -1800,8 +1777,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
{
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
rlb_clear_vlan(bond, vlan_id);
- }
}
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 5fc4c2351478..2d3f7fa541ff 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -69,7 +69,7 @@ void bond_debug_register(struct bonding *bond)
debugfs_create_dir(bond->dev->name, bonding_debug_root);
if (!bond->debug_dir) {
- pr_warning("%s: Warning: failed to register to debugfs\n",
+ pr_warn("%s: Warning: failed to register to debugfs\n",
bond->dev->name);
return;
}
@@ -98,9 +98,8 @@ void bond_debug_reregister(struct bonding *bond)
if (d) {
bond->debug_dir = d;
} else {
- pr_warning("%s: Warning: failed to reregister, "
- "so just unregister old one\n",
- bond->dev->name);
+ pr_warn("%s: Warning: failed to reregister, so just unregister old one\n",
+ bond->dev->name);
bond_debug_unregister(bond);
}
}
@@ -110,8 +109,7 @@ void bond_create_debugfs(void)
bonding_debug_root = debugfs_create_dir("bonding", NULL);
if (!bonding_debug_root) {
- pr_warning("Warning: Cannot create bonding directory"
- " in debugfs\n");
+ pr_warn("Warning: Cannot create bonding directory in debugfs\n");
}
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e5628fc725c3..d9f85464b362 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -673,12 +673,12 @@ static void bond_do_fail_over_mac(struct bonding *bond,
write_unlock_bh(&bond->curr_slave_lock);
if (old_active) {
- memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
- memcpy(saddr.sa_data, old_active->dev->dev_addr,
- ETH_ALEN);
+ ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
+ ether_addr_copy(saddr.sa_data,
+ old_active->dev->dev_addr);
saddr.sa_family = new_active->dev->type;
} else {
- memcpy(saddr.sa_data, bond->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(saddr.sa_data, bond->dev->dev_addr);
saddr.sa_family = bond->dev->type;
}
@@ -692,7 +692,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
if (!old_active)
goto out;
- memcpy(saddr.sa_data, tmp_mac, ETH_ALEN);
+ ether_addr_copy(saddr.sa_data, tmp_mac);
saddr.sa_family = old_active->dev->type;
rv = dev_set_mac_address(old_active->dev, &saddr);
@@ -798,11 +798,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
return;
if (new_active) {
- new_active->jiffies = jiffies;
+ new_active->last_link_up = jiffies;
if (new_active->link == BOND_LINK_BACK) {
if (USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: making interface %s the new active one %d ms earlier.\n",
+ pr_info("%s: making interface %s the new active one %d ms earlier\n",
bond->dev->name, new_active->dev->name,
(bond->params.updelay - new_active->delay) * bond->params.miimon);
}
@@ -817,7 +817,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
if (USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: making interface %s the new active one.\n",
+ pr_info("%s: making interface %s the new active one\n",
bond->dev->name, new_active->dev->name);
}
}
@@ -910,7 +910,7 @@ void bond_select_active_slave(struct bonding *bond)
pr_info("%s: first active interface up!\n",
bond->dev->name);
} else {
- pr_info("%s: now running without any active interface !\n",
+ pr_info("%s: now running without any active interface!\n",
bond->dev->name);
}
}
@@ -922,12 +922,12 @@ static inline int slave_enable_netpoll(struct slave *slave)
struct netpoll *np;
int err = 0;
- np = kzalloc(sizeof(*np), GFP_ATOMIC);
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
err = -ENOMEM;
if (!np)
goto out;
- err = __netpoll_setup(np, slave->dev, GFP_ATOMIC);
+ err = __netpoll_setup(np, slave->dev);
if (err) {
kfree(np);
goto out;
@@ -946,14 +946,6 @@ static inline void slave_disable_netpoll(struct slave *slave)
slave->np = NULL;
__netpoll_free_async(np);
}
-static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
-{
- if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
- return false;
- if (!slave_dev->netdev_ops->ndo_poll_controller)
- return false;
- return true;
-}
static void bond_poll_controller(struct net_device *bond_dev)
{
@@ -970,7 +962,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
slave_disable_netpoll(slave);
}
-static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
+static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct bonding *bond = netdev_priv(dev);
struct list_head *iter;
@@ -1119,9 +1111,6 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
slave = bond_slave_get_rcu(skb->dev);
bond = slave->bond;
- if (bond->params.arp_interval)
- slave->dev->last_rx = jiffies;
-
recv_probe = ACCESS_ONCE(bond->recv_probe);
if (recv_probe) {
ret = recv_probe(skb, bond, slave);
@@ -1146,7 +1135,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
- memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr);
}
return ret;
@@ -1187,13 +1176,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond->params.use_carrier &&
slave_dev->ethtool_ops->get_link == NULL &&
slave_ops->ndo_do_ioctl == NULL) {
- pr_warning("%s: Warning: no link monitoring support for %s\n",
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: no link monitoring support for %s\n",
+ bond_dev->name, slave_dev->name);
}
/* already enslaved */
if (slave_dev->flags & IFF_SLAVE) {
- pr_debug("Error, Device was already enslaved\n");
+ pr_debug("Error: Device was already enslaved\n");
return -EBUSY;
}
@@ -1211,9 +1200,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev->name, slave_dev->name, bond_dev->name);
return -EPERM;
} else {
- pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
- bond_dev->name, slave_dev->name,
- slave_dev->name, bond_dev->name);
+ pr_warn("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
+ bond_dev->name, slave_dev->name,
+ slave_dev->name, bond_dev->name);
}
} else {
pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
@@ -1226,7 +1215,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* enslaving it; the old ifenslave will not.
*/
if ((slave_dev->flags & IFF_UP)) {
- pr_err("%s is up. This may be due to an out of date ifenslave.\n",
+ pr_err("%s is up - this may be due to an out of date ifenslave\n",
slave_dev->name);
res = -EPERM;
goto err_undo_flags;
@@ -1270,24 +1259,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev);
}
} else if (bond_dev->type != slave_dev->type) {
- pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
- slave_dev->name,
- slave_dev->type, bond_dev->type);
+ pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
+ slave_dev->name, slave_dev->type, bond_dev->type);
res = -EINVAL;
goto err_undo_flags;
}
if (slave_ops->ndo_set_mac_address == NULL) {
if (!bond_has_slaves(bond)) {
- pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
+ pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
bond_dev->name);
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
- pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
+ pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
bond_dev->name);
}
} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
- pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
+ pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n",
bond_dev->name);
res = -EOPNOTSUPP;
goto err_undo_flags;
@@ -1326,7 +1314,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* that need it, and for restoring it upon release, and then
* set it to the master's address
*/
- memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
if (!bond->params.fail_over_mac ||
bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
@@ -1410,10 +1398,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_update_speed_duplex(new_slave);
- new_slave->last_arp_rx = jiffies -
+ new_slave->last_rx = jiffies -
(msecs_to_jiffies(bond->params.arp_interval) + 1);
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
- new_slave->target_last_arp_rx[i] = new_slave->last_arp_rx;
+ new_slave->target_last_arp_rx[i] = new_slave->last_rx;
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1428,12 +1416,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* supported); thus, we don't need to change
* the messages for netif_carrier.
*/
- pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n",
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
+ bond_dev->name, slave_dev->name);
} else if (link_reporting == -1) {
/* unable get link status using mii/ethtool */
- pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n",
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
+ bond_dev->name, slave_dev->name);
}
}
@@ -1457,10 +1445,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
if (new_slave->link != BOND_LINK_DOWN)
- new_slave->jiffies = jiffies;
+ new_slave->last_link_up = jiffies;
pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
- new_slave->link == BOND_LINK_DOWN ? "DOWN" :
- (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+ new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+ (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
/* if there is a primary slave, remember it */
@@ -1520,9 +1508,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
- pr_info("Error, %s: master_dev is using netpoll, "
- "but new slave device does not support netpoll.\n",
- bond_dev->name);
+ pr_info("Error, %s: master_dev is using netpoll, but new slave device does not support netpoll\n",
+ bond_dev->name);
res = -EBUSY;
goto err_detach;
}
@@ -1560,10 +1547,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
unblock_netpoll_tx();
}
- pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
+ pr_info("%s: Enslaving %s as %s interface with %s link\n",
bond_dev->name, slave_dev->name,
- bond_is_active_slave(new_slave) ? "n active" : " backup",
- new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
+ bond_is_active_slave(new_slave) ? "an active" : "a backup",
+ new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
/* enslave is successful */
return 0;
@@ -1603,7 +1590,7 @@ err_restore_mac:
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
*/
- memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(addr.sa_data, new_slave->perm_hwaddr);
addr.sa_family = slave_dev->type;
dev_set_mac_address(slave_dev, &addr);
}
@@ -1648,7 +1635,7 @@ static int __bond_release_one(struct net_device *bond_dev,
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
- pr_err("%s: Error: cannot release %s.\n",
+ pr_err("%s: Error: cannot release %s\n",
bond_dev->name, slave_dev->name);
return -EINVAL;
}
@@ -1679,7 +1666,7 @@ static int __bond_release_one(struct net_device *bond_dev,
write_unlock_bh(&bond->lock);
- pr_info("%s: releasing %s interface %s\n",
+ pr_info("%s: Releasing %s interface %s\n",
bond_dev->name,
bond_is_active_slave(slave) ? "active" : "backup",
slave_dev->name);
@@ -1692,10 +1679,10 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
- pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
- bond_dev->name, slave_dev->name,
- slave->perm_hwaddr,
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
+ bond_dev->name, slave_dev->name,
+ slave->perm_hwaddr,
+ bond_dev->name, slave_dev->name);
}
if (bond->primary_slave == slave)
@@ -1736,10 +1723,10 @@ static int __bond_release_one(struct net_device *bond_dev,
eth_hw_addr_random(bond_dev);
if (vlan_uses_dev(bond_dev)) {
- pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
- bond_dev->name, bond_dev->name);
- pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
- bond_dev->name);
+ pr_warn("%s: Warning: clearing HW address of %s while it still has VLANs\n",
+ bond_dev->name, bond_dev->name);
+ pr_warn("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs\n",
+ bond_dev->name);
}
}
@@ -1755,7 +1742,7 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_compute_features(bond);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
- pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
+ pr_info("%s: last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
bond_dev->name, slave_dev->name, bond_dev->name);
/* must do this from outside any spinlocks */
@@ -1790,7 +1777,7 @@ static int __bond_release_one(struct net_device *bond_dev,
if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
- memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
addr.sa_family = slave_dev->type;
dev_set_mac_address(slave_dev, &addr);
}
@@ -1823,7 +1810,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
ret = bond_release(bond_dev, slave_dev);
if (ret == 0 && !bond_has_slaves(bond)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
- pr_info("%s: destroying bond %s.\n",
+ pr_info("%s: Destroying bond %s\n",
bond_dev->name, bond_dev->name);
unregister_netdevice(bond_dev);
}
@@ -1837,9 +1824,7 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
info->bond_mode = bond->params.mode;
info->miimon = bond->params.miimon;
- read_lock(&bond->lock);
info->num_slaves = bond->slave_cnt;
- read_unlock(&bond->lock);
return 0;
}
@@ -1851,7 +1836,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
int i = 0, res = -ENODEV;
struct slave *slave;
- read_lock(&bond->lock);
bond_for_each_slave(bond, slave, iter) {
if (i++ == (int)info->slave_id) {
res = 0;
@@ -1862,7 +1846,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
break;
}
}
- read_unlock(&bond->lock);
return res;
}
@@ -1892,7 +1875,7 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->link = BOND_LINK_FAIL;
slave->delay = bond->params.downdelay;
if (slave->delay) {
- pr_info("%s: link status down for %sinterface %s, disabling it in %d ms.\n",
+ pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
bond->dev->name,
(bond->params.mode ==
BOND_MODE_ACTIVEBACKUP) ?
@@ -1908,8 +1891,8 @@ static int bond_miimon_inspect(struct bonding *bond)
* recovered before downdelay expired
*/
slave->link = BOND_LINK_UP;
- slave->jiffies = jiffies;
- pr_info("%s: link status up again after %d ms for interface %s.\n",
+ slave->last_link_up = jiffies;
+ pr_info("%s: link status up again after %d ms for interface %s\n",
bond->dev->name,
(bond->params.downdelay - slave->delay) *
bond->params.miimon,
@@ -1934,7 +1917,7 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->delay = bond->params.updelay;
if (slave->delay) {
- pr_info("%s: link status up for interface %s, enabling it in %d ms.\n",
+ pr_info("%s: link status up for interface %s, enabling it in %d ms\n",
bond->dev->name, slave->dev->name,
ignore_updelay ? 0 :
bond->params.updelay *
@@ -1944,7 +1927,7 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_BACK:
if (!link_state) {
slave->link = BOND_LINK_DOWN;
- pr_info("%s: link status down again after %d ms for interface %s.\n",
+ pr_info("%s: link status down again after %d ms for interface %s\n",
bond->dev->name,
(bond->params.updelay - slave->delay) *
bond->params.miimon,
@@ -1983,7 +1966,7 @@ static void bond_miimon_commit(struct bonding *bond)
case BOND_LINK_UP:
slave->link = BOND_LINK_UP;
- slave->jiffies = jiffies;
+ slave->last_link_up = jiffies;
if (bond->params.mode == BOND_MODE_8023AD) {
/* prevent it from being the active one */
@@ -1996,7 +1979,7 @@ static void bond_miimon_commit(struct bonding *bond)
bond_set_backup_slave(slave);
}
- pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
+ pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex\n",
bond->dev->name, slave->dev->name,
slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
slave->duplex ? "full" : "half");
@@ -2141,24 +2124,40 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
* switches in VLAN mode (especially if ports are configured as
* "native" to a VLAN) might not pass non-tagged frames.
*/
-static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, unsigned short vlan_id)
+static void bond_arp_send(struct net_device *slave_dev, int arp_op,
+ __be32 dest_ip, __be32 src_ip,
+ struct bond_vlan_tag *inner,
+ struct bond_vlan_tag *outer)
{
struct sk_buff *skb;
- pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op,
- slave_dev->name, &dest_ip, &src_ip, vlan_id);
+ pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
+ arp_op, slave_dev->name, &dest_ip, &src_ip);
skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
NULL, slave_dev->dev_addr, NULL);
if (!skb) {
- pr_err("ARP packet allocation failed\n");
+ net_err_ratelimited("ARP packet allocation failed\n");
return;
}
- if (vlan_id) {
- skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
+ if (outer->vlan_id) {
+ if (inner->vlan_id) {
+ pr_debug("inner tag: proto %X vid %X\n",
+ ntohs(inner->vlan_proto), inner->vlan_id);
+ skb = __vlan_put_tag(skb, inner->vlan_proto,
+ inner->vlan_id);
+ if (!skb) {
+ net_err_ratelimited("failed to insert inner VLAN tag\n");
+ return;
+ }
+ }
+
+ pr_debug("outer reg: proto %X vid %X\n",
+ ntohs(outer->vlan_proto), outer->vlan_id);
+ skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
if (!skb) {
- pr_err("failed to insert VLAN tag\n");
+ net_err_ratelimited("failed to insert outer VLAN tag\n");
return;
}
}
@@ -2171,23 +2170,32 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
struct net_device *upper, *vlan_upper;
struct list_head *iter, *vlan_iter;
struct rtable *rt;
+ struct bond_vlan_tag inner, outer;
__be32 *targets = bond->params.arp_targets, addr;
- int i, vlan_id;
+ int i;
for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
pr_debug("basa: target %pI4\n", &targets[i]);
+ inner.vlan_proto = 0;
+ inner.vlan_id = 0;
+ outer.vlan_proto = 0;
+ outer.vlan_id = 0;
/* Find out through which dev should the packet go */
rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
RTO_ONLINK, 0);
if (IS_ERR(rt)) {
- pr_debug("%s: no route to arp_ip_target %pI4\n",
- bond->dev->name, &targets[i]);
+ /* there's no route to target - try to send arp
+ * probe to generate any traffic (arp_validate=0)
+ */
+ if (bond->params.arp_validate)
+ net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
+ bond->dev->name,
+ &targets[i]);
+ bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
continue;
}
- vlan_id = 0;
-
/* bond device itself */
if (rt->dst.dev == bond->dev)
goto found;
@@ -2197,17 +2205,30 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
* found we verify its upper dev list, searching for the
* rt->dst.dev. If found we save the tag of the vlan and
* proceed to send the packet.
- *
- * TODO: QinQ?
*/
netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
vlan_iter) {
if (!is_vlan_dev(vlan_upper))
continue;
+
+ if (vlan_upper == rt->dst.dev) {
+ outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
+ outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
+ rcu_read_unlock();
+ goto found;
+ }
netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
iter) {
if (upper == rt->dst.dev) {
- vlan_id = vlan_dev_vlan_id(vlan_upper);
+ /* If the upper dev is a vlan dev too,
+ * set the vlan tag to inner tag.
+ */
+ if (is_vlan_dev(upper)) {
+ inner.vlan_proto = vlan_dev_vlan_proto(upper);
+ inner.vlan_id = vlan_dev_vlan_id(upper);
+ }
+ outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
+ outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
rcu_read_unlock();
goto found;
}
@@ -2220,10 +2241,6 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
*/
netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (upper == rt->dst.dev) {
- /* if it's a vlan - get its VID */
- if (is_vlan_dev(upper))
- vlan_id = vlan_dev_vlan_id(upper);
-
rcu_read_unlock();
goto found;
}
@@ -2242,7 +2259,7 @@ found:
addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
ip_rt_put(rt);
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, vlan_id);
+ addr, &inner, &outer);
}
}
@@ -2260,7 +2277,7 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
pr_debug("bva: sip %pI4 not found in targets\n", &sip);
return;
}
- slave->last_arp_rx = jiffies;
+ slave->last_rx = jiffies;
slave->target_last_arp_rx[i] = jiffies;
}
@@ -2268,17 +2285,19 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
+ struct slave *curr_active_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
- int alen;
+ int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
- if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
+ if (!slave_do_arp_validate(bond, slave)) {
+ if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
+ !slave_do_arp_validate_only(bond, slave))
+ slave->last_rx = jiffies;
return RX_HANDLER_ANOTHER;
-
- read_lock(&bond->lock);
-
- if (!slave_do_arp_validate(bond, slave))
- goto out_unlock;
+ } else if (!is_arp) {
+ return RX_HANDLER_ANOTHER;
+ }
alen = arp_hdr_len(bond->dev);
@@ -2312,6 +2331,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
bond->params.arp_validate, slave_do_arp_validate(bond, slave),
&sip, &tip);
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
+
/*
* Backup slaves won't see the ARP reply, but do come through
* here for each ARP probe (so we swap the sip/tip to validate
@@ -2325,15 +2346,15 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
* is done to avoid endless looping when we can't reach the
* arp_ip_target and fool ourselves with our own arp requests.
*/
+
if (bond_is_active_slave(slave))
bond_validate_arp(bond, slave, sip, tip);
- else if (bond->curr_active_slave &&
- time_after(slave_last_rx(bond, bond->curr_active_slave),
- bond->curr_active_slave->jiffies))
+ else if (curr_active_slave &&
+ time_after(slave_last_rx(bond, curr_active_slave),
+ curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
out_unlock:
- read_unlock(&bond->lock);
if (arp != (struct arphdr *)skb->data)
kfree(arp);
return RX_HANDLER_ANOTHER;
@@ -2376,9 +2397,9 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
/* see if any of the previous devices are up now (i.e. they have
* xmt and rcv traffic). the curr_active_slave does not come into
- * the picture unless it is null. also, slave->jiffies is not needed
- * here because we send an arp on each slave and give a slave as
- * long as it needs to get the tx/rx within the delta.
+ * the picture unless it is null. also, slave->last_link_up is not
+ * needed here because we send an arp on each slave and give a slave
+ * as long as it needs to get the tx/rx within the delta.
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
@@ -2387,7 +2408,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) &&
- bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
+ bond_time_in_interval(bond, slave->last_rx, 1)) {
slave->link = BOND_LINK_UP;
slave_state_changed = 1;
@@ -2398,7 +2419,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
* is closed.
*/
if (!oldcurrent) {
- pr_info("%s: link status definitely up for interface %s, ",
+ pr_info("%s: link status definitely up for interface %s\n",
bond->dev->name,
slave->dev->name);
do_failover = 1;
@@ -2416,7 +2437,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
* if we don't know our ip yet
*/
if (!bond_time_in_interval(bond, trans_start, 2) ||
- !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
+ !bond_time_in_interval(bond, slave->last_rx, 2)) {
slave->link = BOND_LINK_DOWN;
slave_state_changed = 1;
@@ -2424,9 +2445,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- pr_info("%s: interface %s is now down.\n",
- bond->dev->name,
- slave->dev->name);
+ pr_info("%s: interface %s is now down\n",
+ bond->dev->name, slave->dev->name);
if (slave == oldcurrent)
do_failover = 1;
@@ -2505,7 +2525,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
- if (bond_time_in_interval(bond, slave->jiffies, 2))
+ if (bond_time_in_interval(bond, slave->last_link_up, 2))
continue;
/*
@@ -2576,7 +2596,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
bond->current_arp_slave = NULL;
}
- pr_info("%s: link status definitely up for interface %s.\n",
+ pr_info("%s: link status definitely up for interface %s\n",
bond->dev->name, slave->dev->name);
if (!bond->curr_active_slave ||
@@ -2682,7 +2702,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_LATER);
- pr_info("%s: backup interface %s is now down.\n",
+ pr_info("%s: backup interface %s is now down\n",
bond->dev->name, slave->dev->name);
}
if (slave == curr_arp_slave)
@@ -2698,7 +2718,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
new_slave->link = BOND_LINK_BACK;
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
bond_arp_send_all(bond, new_slave);
- new_slave->jiffies = jiffies;
+ new_slave->last_link_up = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
check_state:
@@ -2879,9 +2899,9 @@ static int bond_slave_netdev_event(unsigned long event,
break;
}
- pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
- bond->dev->name, bond->primary_slave ? slave_dev->name :
- "none");
+ pr_info("%s: Primary slave changed to %s, reselecting active slave\n",
+ bond->dev->name,
+ bond->primary_slave ? slave_dev->name : "none");
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
@@ -2917,8 +2937,7 @@ static int bond_netdev_event(struct notifier_block *this,
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
pr_debug("event_dev: %s, event: %lx\n",
- event_dev ? event_dev->name : "None",
- event);
+ event_dev ? event_dev->name : "None", event);
if (!(event_dev->priv_flags & IFF_BONDING))
return NOTIFY_DONE;
@@ -2967,7 +2986,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
fk->ports = 0;
noff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
- if (!pskb_may_pull(skb, noff + sizeof(*iph)))
+ if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
return false;
iph = ip_hdr(skb);
fk->src = iph->saddr;
@@ -2976,7 +2995,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
if (!ip_is_fragment(iph))
proto = iph->protocol;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
+ if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
return false;
iph6 = ipv6_hdr(skb);
fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
@@ -3058,7 +3077,7 @@ static int bond_open(struct net_device *bond_dev)
if (bond_has_slaves(bond)) {
read_lock(&bond->curr_slave_lock);
bond_for_each_slave(bond, slave, iter) {
- if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+ if (USES_PRIMARY(bond->params.mode)
&& (slave != bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
@@ -3087,8 +3106,7 @@ static int bond_open(struct net_device *bond_dev)
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->arp_work, 0);
- if (bond->params.arp_validate)
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_arp_rcv;
}
if (bond->params.mode == BOND_MODE_8023AD) {
@@ -3375,8 +3393,8 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
struct list_head *iter;
int res = 0;
- pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
- (bond_dev ? bond_dev->name : "None"), new_mtu);
+ pr_debug("bond=%p, name=%s, new_mtu=%d\n",
+ bond, bond_dev ? bond_dev->name : "None", new_mtu);
/* Can't hold bond->lock with bh disabled here since
* some base drivers panic. On the other hand we can't
@@ -3395,8 +3413,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
bond_for_each_slave(bond, slave, iter) {
pr_debug("s %p c_m %p\n",
- slave,
- slave->dev->netdev_ops->ndo_change_mtu);
+ slave, slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@ -3484,15 +3501,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
*/
bond_for_each_slave(bond, slave, iter) {
- const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
pr_debug("slave %p %s\n", slave, slave->dev->name);
-
- if (slave_ops->ndo_set_mac_address == NULL) {
- res = -EOPNOTSUPP;
- pr_debug("EOPNOTSUPP %s\n", slave->dev->name);
- goto unwind;
- }
-
res = dev_set_mac_address(slave->dev, addr);
if (res) {
/* TODO: consider downing the slave
@@ -3568,7 +3577,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
}
}
/* no slave that can tx has been found */
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
}
/**
@@ -3644,7 +3653,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
if (slave)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -3676,8 +3685,8 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
- pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
- bond_dev->name);
+ net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
+ bond_dev->name, __func__);
continue;
}
/* bond_dev_queue_xmit always returns 0 */
@@ -3687,7 +3696,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -3774,7 +3783,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
pr_err("%s: Error: Unknown bonding mode %d\n",
dev->name, bond->params.mode);
WARN_ON_ONCE(1);
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
}
@@ -3788,14 +3797,14 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
* If we risk deadlock from transmitting this in the
* netpoll path, tell netpoll to queue the frame for later tx
*/
- if (is_netpoll_tx_blocked(dev))
+ if (unlikely(is_netpoll_tx_blocked(dev)))
return NETDEV_TX_BUSY;
rcu_read_lock();
if (bond_has_slaves(bond))
ret = __bond_start_xmit(skb, dev);
else
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
rcu_read_unlock();
return ret;
@@ -3958,7 +3967,7 @@ static void bond_uninit(struct net_device *bond_dev)
/* Release the bonded slaves */
bond_for_each_slave(bond, slave, iter)
__bond_release_one(bond_dev, slave->dev, true);
- pr_info("%s: released all slaves\n", bond_dev->name);
+ pr_info("%s: Released all slaves\n", bond_dev->name);
list_del(&bond->bond_list);
@@ -3967,56 +3976,11 @@ static void bond_uninit(struct net_device *bond_dev)
/*------------------------- Module initialization ---------------------------*/
-int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl)
-{
- int i;
-
- for (i = 0; tbl[i].modename; i++)
- if (mode == tbl[i].mode)
- return tbl[i].mode;
-
- return -1;
-}
-
-static int bond_parm_tbl_lookup_name(const char *modename,
- const struct bond_parm_tbl *tbl)
-{
- int i;
-
- for (i = 0; tbl[i].modename; i++)
- if (strcmp(modename, tbl[i].modename) == 0)
- return tbl[i].mode;
-
- return -1;
-}
-
-/*
- * Convert string input module parms. Accept either the
- * number of the mode or its string name. A bit complicated because
- * some mode names are substrings of other names, and calls from sysfs
- * may have whitespace in the name (trailing newlines, for example).
- */
-int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
-{
- int modeint;
- char *p, modestr[BOND_MAX_MODENAME_LEN + 1];
-
- for (p = (char *)buf; *p; p++)
- if (!(isdigit(*p) || isspace(*p)))
- break;
-
- if (*p && sscanf(buf, "%20s", modestr) != 0)
- return bond_parm_tbl_lookup_name(modestr, tbl);
- else if (sscanf(buf, "%d", &modeint) != 0)
- return bond_parm_tbl_lookup(modeint, tbl);
-
- return -1;
-}
-
static int bond_check_params(struct bond_params *params)
{
int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
- struct bond_opt_value newval, *valptr;
+ struct bond_opt_value newval;
+ const struct bond_opt_value *valptr;
int arp_all_targets_value;
/*
@@ -4036,7 +4000,7 @@ static int bond_check_params(struct bond_params *params)
if ((bond_mode != BOND_MODE_XOR) &&
(bond_mode != BOND_MODE_8023AD)) {
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
- bond_mode_name(bond_mode));
+ bond_mode_name(bond_mode));
} else {
bond_opt_initstr(&newval, xmit_hash_policy);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
@@ -4077,74 +4041,71 @@ static int bond_check_params(struct bond_params *params)
}
params->ad_select = valptr->value;
if (bond_mode != BOND_MODE_8023AD)
- pr_warning("ad_select param only affects 802.3ad mode\n");
+ pr_warn("ad_select param only affects 802.3ad mode\n");
} else {
params->ad_select = BOND_AD_STABLE;
}
if (max_bonds < 0) {
- pr_warning("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
- max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
+ pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
+ max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
max_bonds = BOND_DEFAULT_MAX_BONDS;
}
if (miimon < 0) {
- pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
- miimon, INT_MAX);
+ pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ miimon, INT_MAX);
miimon = 0;
}
if (updelay < 0) {
- pr_warning("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
- updelay, INT_MAX);
+ pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ updelay, INT_MAX);
updelay = 0;
}
if (downdelay < 0) {
- pr_warning("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
- downdelay, INT_MAX);
+ pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ downdelay, INT_MAX);
downdelay = 0;
}
if ((use_carrier != 0) && (use_carrier != 1)) {
- pr_warning("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
- use_carrier);
+ pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
+ use_carrier);
use_carrier = 1;
}
if (num_peer_notif < 0 || num_peer_notif > 255) {
- pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
- num_peer_notif);
+ pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
+ num_peer_notif);
num_peer_notif = 1;
}
/* reset values for 802.3ad/TLB/ALB */
if (BOND_NO_USES_ARP(bond_mode)) {
if (!miimon) {
- pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
- pr_warning("Forcing miimon to 100msec\n");
+ pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
+ pr_warn("Forcing miimon to 100msec\n");
miimon = BOND_DEFAULT_MIIMON;
}
}
if (tx_queues < 1 || tx_queues > 255) {
- pr_warning("Warning: tx_queues (%d) should be between "
- "1 and 255, resetting to %d\n",
- tx_queues, BOND_DEFAULT_TX_QUEUES);
+ pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
+ tx_queues, BOND_DEFAULT_TX_QUEUES);
tx_queues = BOND_DEFAULT_TX_QUEUES;
}
if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
- pr_warning("Warning: all_slaves_active module parameter (%d), "
- "not of valid value (0/1), so it was set to "
- "0\n", all_slaves_active);
+ pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
+ all_slaves_active);
all_slaves_active = 0;
}
if (resend_igmp < 0 || resend_igmp > 255) {
- pr_warning("Warning: resend_igmp (%d) should be between "
- "0 and 255, resetting to %d\n",
- resend_igmp, BOND_DEFAULT_RESEND_IGMP);
+ pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
+ resend_igmp, BOND_DEFAULT_RESEND_IGMP);
resend_igmp = BOND_DEFAULT_RESEND_IGMP;
}
@@ -4165,37 +4126,36 @@ static int bond_check_params(struct bond_params *params)
/* just warn the user the up/down delay will have
* no effect since miimon is zero...
*/
- pr_warning("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
- updelay, downdelay);
+ pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
+ updelay, downdelay);
}
} else {
/* don't allow arp monitoring */
if (arp_interval) {
- pr_warning("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
- miimon, arp_interval);
+ pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
+ miimon, arp_interval);
arp_interval = 0;
}
if ((updelay % miimon) != 0) {
- pr_warning("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
- updelay, miimon,
- (updelay / miimon) * miimon);
+ pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+ updelay, miimon, (updelay / miimon) * miimon);
}
updelay /= miimon;
if ((downdelay % miimon) != 0) {
- pr_warning("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
- downdelay, miimon,
- (downdelay / miimon) * miimon);
+ pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
+ downdelay, miimon,
+ (downdelay / miimon) * miimon);
}
downdelay /= miimon;
}
if (arp_interval < 0) {
- pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to 0\n",
- arp_interval, INT_MAX);
+ pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ arp_interval, INT_MAX);
arp_interval = 0;
}
@@ -4206,30 +4166,26 @@ static int bond_check_params(struct bond_params *params)
__be32 ip;
if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
- pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
- arp_ip_target[i]);
+ pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
+ arp_ip_target[i]);
arp_interval = 0;
} else {
if (bond_get_targets_ip(arp_target, ip) == -1)
arp_target[arp_ip_count++] = ip;
else
- pr_warning("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
- &ip);
+ pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
+ &ip);
}
}
if (arp_interval && !arp_ip_count) {
/* don't allow arping if no arp_ip_target given... */
- pr_warning("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
- arp_interval);
+ pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
+ arp_interval);
arp_interval = 0;
}
if (arp_validate) {
- if (bond_mode != BOND_MODE_ACTIVEBACKUP) {
- pr_err("arp_validate only supported in active-backup mode\n");
- return -EINVAL;
- }
if (!arp_interval) {
pr_err("arp_validate requires arp_interval\n");
return -EINVAL;
@@ -4271,23 +4227,23 @@ static int bond_check_params(struct bond_params *params)
arp_interval, valptr->string, arp_ip_count);
for (i = 0; i < arp_ip_count; i++)
- pr_info(" %s", arp_ip_target[i]);
+ pr_cont(" %s", arp_ip_target[i]);
- pr_info("\n");
+ pr_cont("\n");
} else if (max_bonds) {
/* miimon and arp_interval not set, we need one so things
* work as expected, see bonding.txt for details
*/
- pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
+ pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
}
if (primary && !USES_PRIMARY(bond_mode)) {
/* currently, using a primary only makes sense
* in active backup, TLB or ALB modes
*/
- pr_warning("Warning: %s primary device specified but has no effect in %s mode\n",
- primary, bond_mode_name(bond_mode));
+ pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
+ primary, bond_mode_name(bond_mode));
primary = NULL;
}
@@ -4316,14 +4272,14 @@ static int bond_check_params(struct bond_params *params)
}
fail_over_mac_value = valptr->value;
if (bond_mode != BOND_MODE_ACTIVEBACKUP)
- pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
+ pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
} else {
fail_over_mac_value = BOND_FOM_NONE;
}
if (lp_interval == 0) {
- pr_warning("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
- INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
+ pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
+ INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
}
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 70651f8e8e3b..f847e165d252 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -181,7 +181,7 @@ static int bond_changelink(struct net_device *bond_dev,
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
if (arp_interval && miimon) {
- pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
+ pr_err("%s: ARP monitoring cannot be used with MII monitoring\n",
bond->dev->name);
return -EINVAL;
}
@@ -199,7 +199,7 @@ static int bond_changelink(struct net_device *bond_dev,
nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
__be32 target = nla_get_be32(attr);
- bond_opt_initval(&newval, target);
+ bond_opt_initval(&newval, (__force u64)target);
err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
&newval);
if (err)
@@ -207,7 +207,7 @@ static int bond_changelink(struct net_device *bond_dev,
i++;
}
if (i == 0 && bond->params.arp_interval)
- pr_warn("%s: removing last arp target with arp_interval on\n",
+ pr_warn("%s: Removing last arp target with arp_interval on\n",
bond->dev->name);
if (err)
return err;
@@ -216,7 +216,7 @@ static int bond_changelink(struct net_device *bond_dev,
int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
if (arp_validate && miimon) {
- pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
+ pr_err("%s: ARP validating cannot be used with MII monitoring\n",
bond->dev->name);
return -EINVAL;
}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 298c26509095..724e30fa20b9 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -20,7 +20,59 @@
#include <linux/inet.h>
#include "bonding.h"
-static struct bond_opt_value bond_mode_tbl[] = {
+static int bond_option_active_slave_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_miimon_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_updelay_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_downdelay_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_use_carrier_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_arp_interval_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
+static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
+static int bond_option_arp_ip_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_arp_validate_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_arp_all_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_primary_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_primary_reselect_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_fail_over_mac_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_resend_igmp_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_num_peer_notif_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_all_slaves_active_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_min_links_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_lp_interval_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_pps_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_lacp_rate_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_ad_select_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_queue_id_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_mode_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_slaves_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+
+
+static const struct bond_opt_value bond_mode_tbl[] = {
{ "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
{ "active-backup", BOND_MODE_ACTIVEBACKUP, 0},
{ "balance-xor", BOND_MODE_XOR, 0},
@@ -31,13 +83,13 @@ static struct bond_opt_value bond_mode_tbl[] = {
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_pps_tbl[] = {
+static const struct bond_opt_value bond_pps_tbl[] = {
{ "default", 1, BOND_VALFLAG_DEFAULT},
{ "maxval", USHRT_MAX, BOND_VALFLAG_MAX},
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_xmit_hashtype_tbl[] = {
+static const struct bond_opt_value bond_xmit_hashtype_tbl[] = {
{ "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
{ "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
{ "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
@@ -46,85 +98,88 @@ static struct bond_opt_value bond_xmit_hashtype_tbl[] = {
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_arp_validate_tbl[] = {
- { "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT},
- { "active", BOND_ARP_VALIDATE_ACTIVE, 0},
- { "backup", BOND_ARP_VALIDATE_BACKUP, 0},
- { "all", BOND_ARP_VALIDATE_ALL, 0},
- { NULL, -1, 0},
+static const struct bond_opt_value bond_arp_validate_tbl[] = {
+ { "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT},
+ { "active", BOND_ARP_VALIDATE_ACTIVE, 0},
+ { "backup", BOND_ARP_VALIDATE_BACKUP, 0},
+ { "all", BOND_ARP_VALIDATE_ALL, 0},
+ { "filter", BOND_ARP_FILTER, 0},
+ { "filter_active", BOND_ARP_FILTER_ACTIVE, 0},
+ { "filter_backup", BOND_ARP_FILTER_BACKUP, 0},
+ { NULL, -1, 0},
};
-static struct bond_opt_value bond_arp_all_targets_tbl[] = {
+static const struct bond_opt_value bond_arp_all_targets_tbl[] = {
{ "any", BOND_ARP_TARGETS_ANY, BOND_VALFLAG_DEFAULT},
{ "all", BOND_ARP_TARGETS_ALL, 0},
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_fail_over_mac_tbl[] = {
+static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
{ "none", BOND_FOM_NONE, BOND_VALFLAG_DEFAULT},
{ "active", BOND_FOM_ACTIVE, 0},
{ "follow", BOND_FOM_FOLLOW, 0},
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_intmax_tbl[] = {
+static const struct bond_opt_value bond_intmax_tbl[] = {
{ "off", 0, BOND_VALFLAG_DEFAULT},
{ "maxval", INT_MAX, BOND_VALFLAG_MAX},
};
-static struct bond_opt_value bond_lacp_rate_tbl[] = {
+static const struct bond_opt_value bond_lacp_rate_tbl[] = {
{ "slow", AD_LACP_SLOW, 0},
{ "fast", AD_LACP_FAST, 0},
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_ad_select_tbl[] = {
+static const struct bond_opt_value bond_ad_select_tbl[] = {
{ "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
{ "bandwidth", BOND_AD_BANDWIDTH, 0},
{ "count", BOND_AD_COUNT, 0},
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_num_peer_notif_tbl[] = {
+static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
{ "off", 0, 0},
{ "maxval", 255, BOND_VALFLAG_MAX},
{ "default", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
-static struct bond_opt_value bond_primary_reselect_tbl[] = {
+static const struct bond_opt_value bond_primary_reselect_tbl[] = {
{ "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
{ "better", BOND_PRI_RESELECT_BETTER, 0},
{ "failure", BOND_PRI_RESELECT_FAILURE, 0},
{ NULL, -1},
};
-static struct bond_opt_value bond_use_carrier_tbl[] = {
+static const struct bond_opt_value bond_use_carrier_tbl[] = {
{ "off", 0, 0},
{ "on", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
-static struct bond_opt_value bond_all_slaves_active_tbl[] = {
+static const struct bond_opt_value bond_all_slaves_active_tbl[] = {
{ "off", 0, BOND_VALFLAG_DEFAULT},
{ "on", 1, 0},
{ NULL, -1, 0}
};
-static struct bond_opt_value bond_resend_igmp_tbl[] = {
+static const struct bond_opt_value bond_resend_igmp_tbl[] = {
{ "off", 0, 0},
{ "maxval", 255, BOND_VALFLAG_MAX},
{ "default", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
-static struct bond_opt_value bond_lp_interval_tbl[] = {
+static const struct bond_opt_value bond_lp_interval_tbl[] = {
{ "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
{ "maxval", INT_MAX, BOND_VALFLAG_MAX},
{ NULL, -1, 0},
};
-static struct bond_option bond_opts[] = {
+static const struct bond_option bond_opts[] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
.name = "mode",
@@ -152,7 +207,8 @@ static struct bond_option bond_opts[] = {
.id = BOND_OPT_ARP_VALIDATE,
.name = "arp_validate",
.desc = "validate src/dst of ARP probes",
- .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP)),
+ .unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB),
.values = bond_arp_validate_tbl,
.set = bond_option_arp_validate_set
},
@@ -312,9 +368,9 @@ static struct bond_option bond_opts[] = {
};
/* Searches for a value in opt's values[] table */
-struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
+const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
{
- struct bond_option *opt;
+ const struct bond_option *opt;
int i;
opt = bond_opt_get(option);
@@ -328,7 +384,7 @@ struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
}
/* Searches for a value in opt's values[] table which matches the flagmask */
-static struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
+static const struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
u32 flagmask)
{
int i;
@@ -345,7 +401,7 @@ static struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
*/
static bool bond_opt_check_range(const struct bond_option *opt, u64 val)
{
- struct bond_opt_value *minval, *maxval;
+ const struct bond_opt_value *minval, *maxval;
minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
@@ -365,11 +421,12 @@ static bool bond_opt_check_range(const struct bond_option *opt, u64 val)
* or the struct_opt_value that matched. It also strips the new line from
* @val->string if it's present.
*/
-struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
- struct bond_opt_value *val)
+const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+ struct bond_opt_value *val)
{
char *p, valstr[BOND_OPT_MAX_NAMELEN + 1] = { 0, };
- struct bond_opt_value *tbl, *ret = NULL;
+ const struct bond_opt_value *tbl;
+ const struct bond_opt_value *ret = NULL;
bool checkval;
int i, rv;
@@ -448,7 +505,7 @@ static int bond_opt_check_deps(struct bonding *bond,
static void bond_opt_dep_print(struct bonding *bond,
const struct bond_option *opt)
{
- struct bond_opt_value *modeval;
+ const struct bond_opt_value *modeval;
struct bond_params *params;
params = &bond->params;
@@ -461,9 +518,9 @@ static void bond_opt_dep_print(struct bonding *bond,
static void bond_opt_error_interpret(struct bonding *bond,
const struct bond_option *opt,
- int error, struct bond_opt_value *val)
+ int error, const struct bond_opt_value *val)
{
- struct bond_opt_value *minval, *maxval;
+ const struct bond_opt_value *minval, *maxval;
char *p;
switch (error) {
@@ -474,10 +531,10 @@ static void bond_opt_error_interpret(struct bonding *bond,
p = strchr(val->string, '\n');
if (p)
*p = '\0';
- pr_err("%s: option %s: invalid value (%s).\n",
+ pr_err("%s: option %s: invalid value (%s)\n",
bond->dev->name, opt->name, val->string);
} else {
- pr_err("%s: option %s: invalid value (%llu).\n",
+ pr_err("%s: option %s: invalid value (%llu)\n",
bond->dev->name, opt->name, val->value);
}
}
@@ -485,7 +542,7 @@ static void bond_opt_error_interpret(struct bonding *bond,
maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
if (!maxval)
break;
- pr_err("%s: option %s: allowed values %llu - %llu.\n",
+ pr_err("%s: option %s: allowed values %llu - %llu\n",
bond->dev->name, opt->name, minval ? minval->value : 0,
maxval->value);
break;
@@ -493,11 +550,11 @@ static void bond_opt_error_interpret(struct bonding *bond,
bond_opt_dep_print(bond, opt);
break;
case -ENOTEMPTY:
- pr_err("%s: option %s: unable to set because the bond device has slaves.\n",
+ pr_err("%s: option %s: unable to set because the bond device has slaves\n",
bond->dev->name, opt->name);
break;
case -EBUSY:
- pr_err("%s: option %s: unable to set because the bond device is up.\n",
+ pr_err("%s: option %s: unable to set because the bond device is up\n",
bond->dev->name, opt->name);
break;
default:
@@ -518,7 +575,7 @@ static void bond_opt_error_interpret(struct bonding *bond,
int __bond_opt_set(struct bonding *bond,
unsigned int option, struct bond_opt_value *val)
{
- struct bond_opt_value *retval = NULL;
+ const struct bond_opt_value *retval = NULL;
const struct bond_option *opt;
int ret = -ENOENT;
@@ -573,7 +630,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf)
* This function checks if option is valid and if so returns a pointer
* to its entry in the bond_opts[] option array.
*/
-struct bond_option *bond_opt_get(unsigned int option)
+const struct bond_option *bond_opt_get(unsigned int option)
{
if (!BOND_OPT_VALID(option))
return NULL;
@@ -581,7 +638,7 @@ struct bond_option *bond_opt_get(unsigned int option)
return &bond_opts[option];
}
-int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval)
+int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
{
if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) {
pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
@@ -590,7 +647,7 @@ int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval)
bond->params.arp_interval = 0;
/* set miimon to default value */
bond->params.miimon = BOND_DEFAULT_MIIMON;
- pr_info("%s: Setting MII monitoring interval to %d.\n",
+ pr_info("%s: Setting MII monitoring interval to %d\n",
bond->dev->name, bond->params.miimon);
}
@@ -619,8 +676,8 @@ struct net_device *bond_option_active_slave_get(struct bonding *bond)
return __bond_option_active_slave_get(bond, bond->curr_active_slave);
}
-int bond_option_active_slave_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_active_slave_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
char ifname[IFNAMSIZ] = { 0, };
struct net_device *slave_dev;
@@ -637,13 +694,13 @@ int bond_option_active_slave_set(struct bonding *bond,
if (slave_dev) {
if (!netif_is_bond_slave(slave_dev)) {
- pr_err("Device %s is not bonding slave.\n",
+ pr_err("Device %s is not bonding slave\n",
slave_dev->name);
return -EINVAL;
}
if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
- pr_err("%s: Device %s is not our slave.\n",
+ pr_err("%s: Device %s is not our slave\n",
bond->dev->name, slave_dev->name);
return -EINVAL;
}
@@ -654,9 +711,8 @@ int bond_option_active_slave_set(struct bonding *bond,
/* check to see if we are clearing active */
if (!slave_dev) {
- pr_info("%s: Clearing current active slave.\n",
- bond->dev->name);
- rcu_assign_pointer(bond->curr_active_slave, NULL);
+ pr_info("%s: Clearing current active slave\n", bond->dev->name);
+ RCU_INIT_POINTER(bond->curr_active_slave, NULL);
bond_select_active_slave(bond);
} else {
struct slave *old_active = bond->curr_active_slave;
@@ -666,16 +722,16 @@ int bond_option_active_slave_set(struct bonding *bond,
if (new_active == old_active) {
/* do nothing */
- pr_info("%s: %s is already the current active slave.\n",
+ pr_info("%s: %s is already the current active slave\n",
bond->dev->name, new_active->dev->name);
} else {
if (old_active && (new_active->link == BOND_LINK_UP) &&
IS_UP(new_active->dev)) {
- pr_info("%s: Setting %s as active slave.\n",
+ pr_info("%s: Setting %s as active slave\n",
bond->dev->name, new_active->dev->name);
bond_change_active_slave(bond, new_active);
} else {
- pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+ pr_err("%s: Could not set %s as active slave; either %s is down or the link is down\n",
bond->dev->name, new_active->dev->name,
new_active->dev->name);
ret = -EINVAL;
@@ -689,21 +745,22 @@ int bond_option_active_slave_set(struct bonding *bond,
return ret;
}
-int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_miimon_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting MII monitoring interval to %llu.\n",
+ pr_info("%s: Setting MII monitoring interval to %llu\n",
bond->dev->name, newval->value);
bond->params.miimon = newval->value;
if (bond->params.updelay)
- pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
bond->dev->name,
bond->params.updelay * bond->params.miimon);
if (bond->params.downdelay)
- pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
bond->dev->name,
bond->params.downdelay * bond->params.miimon);
if (newval->value && bond->params.arp_interval) {
- pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+ pr_info("%s: MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n",
bond->dev->name);
bond->params.arp_interval = 0;
if (bond->params.arp_validate)
@@ -726,7 +783,8 @@ int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval)
return 0;
}
-int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_updelay_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
int value = newval->value;
@@ -743,15 +801,14 @@ int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval)
bond->params.miimon);
}
bond->params.updelay = value / bond->params.miimon;
- pr_info("%s: Setting up delay to %d.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
+ pr_info("%s: Setting up delay to %d\n",
+ bond->dev->name, bond->params.updelay * bond->params.miimon);
return 0;
}
-int bond_option_downdelay_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_downdelay_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
int value = newval->value;
@@ -768,37 +825,36 @@ int bond_option_downdelay_set(struct bonding *bond,
bond->params.miimon);
}
bond->params.downdelay = value / bond->params.miimon;
- pr_info("%s: Setting down delay to %d.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
+ pr_info("%s: Setting down delay to %d\n",
+ bond->dev->name, bond->params.downdelay * bond->params.miimon);
return 0;
}
-int bond_option_use_carrier_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_use_carrier_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting use_carrier to %llu.\n",
+ pr_info("%s: Setting use_carrier to %llu\n",
bond->dev->name, newval->value);
bond->params.use_carrier = newval->value;
return 0;
}
-int bond_option_arp_interval_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_arp_interval_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting ARP monitoring interval to %llu.\n",
+ pr_info("%s: Setting ARP monitoring interval to %llu\n",
bond->dev->name, newval->value);
bond->params.arp_interval = newval->value;
if (newval->value) {
if (bond->params.miimon) {
- pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring\n",
bond->dev->name, bond->dev->name);
bond->params.miimon = 0;
}
if (!bond->params.arp_targets[0])
- pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified\n",
bond->dev->name);
}
if (bond->dev->flags & IFF_UP) {
@@ -813,8 +869,7 @@ int bond_option_arp_interval_set(struct bonding *bond,
cancel_delayed_work_sync(&bond->arp_work);
} else {
/* arp_validate can be set only in active-backup mode */
- if (bond->params.arp_validate)
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_arp_rcv;
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
@@ -857,19 +912,18 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
ind = bond_get_targets_ip(targets, 0); /* first free slot */
if (ind == -1) {
- pr_err("%s: ARP target table is full!\n",
- bond->dev->name);
+ pr_err("%s: ARP target table is full!\n", bond->dev->name);
return -EINVAL;
}
- pr_info("%s: adding ARP target %pI4.\n", bond->dev->name, &target);
+ pr_info("%s: Adding ARP target %pI4\n", bond->dev->name, &target);
_bond_options_arp_ip_target_set(bond, ind, target, jiffies);
return 0;
}
-int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
{
int ret;
@@ -881,7 +935,7 @@ int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
return ret;
}
-int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
+static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
{
__be32 *targets = bond->params.arp_targets;
struct list_head *iter;
@@ -897,17 +951,16 @@ int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
ind = bond_get_targets_ip(targets, target);
if (ind == -1) {
- pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
+ pr_err("%s: unable to remove nonexistent ARP target %pI4\n",
bond->dev->name, &target);
return -EINVAL;
}
if (ind == 0 && !targets[1] && bond->params.arp_interval)
- pr_warn("%s: removing last arp target with arp_interval on\n",
+ pr_warn("%s: Removing last arp target with arp_interval on\n",
bond->dev->name);
- pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
- &target);
+ pr_info("%s: Removing ARP target %pI4\n", bond->dev->name, &target);
/* not to race with bond_arp_rcv */
write_lock_bh(&bond->lock);
@@ -938,8 +991,8 @@ void bond_option_arp_ip_targets_clear(struct bonding *bond)
write_unlock_bh(&bond->lock);
}
-int bond_option_arp_ip_targets_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_arp_ip_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
int ret = -EPERM;
__be32 target;
@@ -955,7 +1008,7 @@ int bond_option_arp_ip_targets_set(struct bonding *bond,
else if (newval->string[0] == '-')
ret = bond_option_arp_ip_target_rem(bond, target);
else
- pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
+ pr_err("no command found in arp_ip_targets file for bond %s - use +<addr> or -<addr>\n",
bond->dev->name);
} else {
target = newval->value;
@@ -965,10 +1018,10 @@ int bond_option_arp_ip_targets_set(struct bonding *bond,
return ret;
}
-int bond_option_arp_validate_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_arp_validate_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: setting arp_validate to %s (%llu).\n",
+ pr_info("%s: Setting arp_validate to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
if (bond->dev->flags & IFF_UP) {
@@ -982,17 +1035,18 @@ int bond_option_arp_validate_set(struct bonding *bond,
return 0;
}
-int bond_option_arp_all_targets_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_arp_all_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: setting arp_all_targets to %s (%llu).\n",
+ pr_info("%s: Setting arp_all_targets to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.arp_all_targets = newval->value;
return 0;
}
-int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_primary_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
char *p, *primary = newval->string;
struct list_head *iter;
@@ -1007,8 +1061,7 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
*p = '\0';
/* check to see if we are clearing primary */
if (!strlen(primary)) {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
+ pr_info("%s: Setting primary slave to None\n", bond->dev->name);
bond->primary_slave = NULL;
memset(bond->params.primary, 0, sizeof(bond->params.primary));
bond_select_active_slave(bond);
@@ -1017,7 +1070,7 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
bond_for_each_slave(bond, slave, iter) {
if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
- pr_info("%s: Setting %s as primary slave.\n",
+ pr_info("%s: Setting %s as primary slave\n",
bond->dev->name, slave->dev->name);
bond->primary_slave = slave;
strcpy(bond->params.primary, slave->dev->name);
@@ -1027,15 +1080,14 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
}
if (bond->primary_slave) {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
+ pr_info("%s: Setting primary slave to None\n", bond->dev->name);
bond->primary_slave = NULL;
bond_select_active_slave(bond);
}
strncpy(bond->params.primary, primary, IFNAMSIZ);
bond->params.primary[IFNAMSIZ - 1] = 0;
- pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet.\n",
+ pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet\n",
bond->dev->name, primary, bond->dev->name);
out:
@@ -1046,10 +1098,10 @@ out:
return 0;
}
-int bond_option_primary_reselect_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_primary_reselect_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: setting primary_reselect to %s (%llu).\n",
+ pr_info("%s: Setting primary_reselect to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.primary_reselect = newval->value;
@@ -1062,46 +1114,46 @@ int bond_option_primary_reselect_set(struct bonding *bond,
return 0;
}
-int bond_option_fail_over_mac_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_fail_over_mac_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting fail_over_mac to %s (%llu).\n",
+ pr_info("%s: Setting fail_over_mac to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.fail_over_mac = newval->value;
return 0;
}
-int bond_option_xmit_hash_policy_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: setting xmit hash policy to %s (%llu).\n",
+ pr_info("%s: Setting xmit hash policy to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.xmit_policy = newval->value;
return 0;
}
-int bond_option_resend_igmp_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_resend_igmp_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting resend_igmp to %llu.\n",
+ pr_info("%s: Setting resend_igmp to %llu\n",
bond->dev->name, newval->value);
bond->params.resend_igmp = newval->value;
return 0;
}
-int bond_option_num_peer_notif_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_num_peer_notif_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
bond->params.num_peer_notif = newval->value;
return 0;
}
-int bond_option_all_slaves_active_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_all_slaves_active_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
struct list_head *iter;
struct slave *slave;
@@ -1121,8 +1173,8 @@ int bond_option_all_slaves_active_set(struct bonding *bond,
return 0;
}
-int bond_option_min_links_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_min_links_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
pr_info("%s: Setting min links value to %llu\n",
bond->dev->name, newval->value);
@@ -1131,15 +1183,16 @@ int bond_option_min_links_set(struct bonding *bond,
return 0;
}
-int bond_option_lp_interval_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_lp_interval_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
bond->params.lp_interval = newval->value;
return 0;
}
-int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_pps_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
bond->params.packets_per_slave = newval->value;
if (newval->value > 0) {
@@ -1156,10 +1209,10 @@ int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval)
return 0;
}
-int bond_option_lacp_rate_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_lacp_rate_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting LACP rate to %s (%llu).\n",
+ pr_info("%s: Setting LACP rate to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.lacp_fast = newval->value;
bond_3ad_update_lacp_rate(bond);
@@ -1167,18 +1220,18 @@ int bond_option_lacp_rate_set(struct bonding *bond,
return 0;
}
-int bond_option_ad_select_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_ad_select_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting ad_select to %s (%llu).\n",
+ pr_info("%s: Setting ad_select to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.ad_select = newval->value;
return 0;
}
-int bond_option_queue_id_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_queue_id_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
struct slave *slave, *update_slave;
struct net_device *sdev;
@@ -1200,8 +1253,7 @@ int bond_option_queue_id_set(struct bonding *bond,
goto err_no_cmd;
/* Check buffer length, valid ifname and queue id */
- if (strlen(newval->string) > IFNAMSIZ ||
- !dev_valid_name(newval->string) ||
+ if (!dev_valid_name(newval->string) ||
qid > bond->dev->real_num_tx_queues)
goto err_no_cmd;
@@ -1233,14 +1285,14 @@ out:
return ret;
err_no_cmd:
- pr_info("invalid input for queue_id set for %s.\n",
- bond->dev->name);
+ pr_info("invalid input for queue_id set for %s\n", bond->dev->name);
ret = -EPERM;
goto out;
}
-int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_slaves_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
char command[IFNAMSIZ + 1] = { 0, };
struct net_device *dev;
@@ -1255,7 +1307,7 @@ int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
dev = __dev_get_by_name(dev_net(bond->dev), ifname);
if (!dev) {
- pr_info("%s: Interface %s does not exist!\n",
+ pr_info("%s: interface %s does not exist!\n",
bond->dev->name, ifname);
ret = -ENODEV;
goto out;
@@ -1263,12 +1315,12 @@ int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
switch (command[0]) {
case '+':
- pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
+ pr_info("%s: Adding slave %s\n", bond->dev->name, dev->name);
ret = bond_enslave(bond->dev, dev);
break;
case '-':
- pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
+ pr_info("%s: Removing slave %s\n", bond->dev->name, dev->name);
ret = bond_release(bond->dev, dev);
break;
@@ -1280,7 +1332,7 @@ out:
return ret;
err_no_cmd:
- pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
+ pr_err("no command found in slaves file for bond %s - use +ifname or -ifname\n",
bond->dev->name);
ret = -EPERM;
goto out;
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
index 433d37f6940b..12be9e1bfb0c 100644
--- a/drivers/net/bonding/bond_options.h
+++ b/drivers/net/bonding/bond_options.h
@@ -81,8 +81,8 @@ struct bonding;
struct bond_option {
int id;
- char *name;
- char *desc;
+ const char *name;
+ const char *desc;
u32 flags;
/* unsuppmodes is used to denote modes in which the option isn't
@@ -92,18 +92,19 @@ struct bond_option {
/* supported values which this option can have, can be a subset of
* BOND_OPTVAL_RANGE's value range
*/
- struct bond_opt_value *values;
+ const struct bond_opt_value *values;
- int (*set)(struct bonding *bond, struct bond_opt_value *val);
+ int (*set)(struct bonding *bond, const struct bond_opt_value *val);
};
int __bond_opt_set(struct bonding *bond, unsigned int option,
struct bond_opt_value *val);
int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
-struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
- struct bond_opt_value *val);
-struct bond_option *bond_opt_get(unsigned int option);
-struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
+
+const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+ struct bond_opt_value *val);
+const struct bond_option *bond_opt_get(unsigned int option);
+const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
/* This helper is used to initialize a bond_opt_value structure for parameter
* passing. There should be either a valid string or value, but not both.
@@ -122,49 +123,6 @@ static inline void __bond_opt_init(struct bond_opt_value *optval,
#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value)
#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX)
-int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval);
-int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval);
-int bond_option_xmit_hash_policy_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_arp_validate_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_arp_all_targets_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_fail_over_mac_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_arp_interval_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_arp_ip_targets_set(struct bonding *bond,
- struct bond_opt_value *newval);
void bond_option_arp_ip_targets_clear(struct bonding *bond);
-int bond_option_downdelay_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_updelay_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_lacp_rate_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_min_links_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_ad_select_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_num_peer_notif_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval);
-int bond_option_primary_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_primary_reselect_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_use_carrier_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_active_slave_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_queue_id_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_all_slaves_active_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_resend_igmp_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_lp_interval_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval);
+
#endif /* _BOND_OPTIONS_H */
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 3ac20e78eafc..013fdd0f45e9 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -65,13 +65,11 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
static void bond_info_show_master(struct seq_file *seq)
{
struct bonding *bond = seq->private;
- struct bond_opt_value *optval;
+ const struct bond_opt_value *optval;
struct slave *curr;
int i;
- read_lock(&bond->curr_slave_lock);
- curr = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
+ curr = rcu_dereference(bond->curr_active_slave);
seq_printf(seq, "Bonding Mode: %s",
bond_mode_name(bond->params.mode));
@@ -254,8 +252,8 @@ void bond_create_proc_entry(struct bonding *bond)
S_IRUGO, bn->proc_dir,
&bond_info_fops, bond);
if (bond->proc_entry == NULL)
- pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
- DRV_NAME, bond_dev->name);
+ pr_warn("Warning: Cannot create /proc/net/%s/%s\n",
+ DRV_NAME, bond_dev->name);
else
memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
}
@@ -281,8 +279,8 @@ void __net_init bond_create_proc_dir(struct bond_net *bn)
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
if (!bn->proc_dir)
- pr_warning("Warning: cannot create /proc/net/%s\n",
- DRV_NAME);
+ pr_warn("Warning: Cannot create /proc/net/%s\n",
+ DRV_NAME);
}
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 643fcc110299..0e8b268da0a0 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -117,9 +117,9 @@ static ssize_t bonding_store_bonds(struct class *cls,
rv = bond_create(bn->net, ifname);
if (rv) {
if (rv == -EEXIST)
- pr_info("%s already exists.\n", ifname);
+ pr_info("%s already exists\n", ifname);
else
- pr_info("%s creation failed.\n", ifname);
+ pr_info("%s creation failed\n", ifname);
res = rv;
}
} else if (command[0] == '-') {
@@ -144,7 +144,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
return res;
err_no_cmd:
- pr_err("no command found in bonding_masters. Use +ifname or -ifname.\n");
+ pr_err("no command found in bonding_masters - use +ifname or -ifname\n");
return -EPERM;
}
@@ -220,7 +220,7 @@ static ssize_t bonding_show_mode(struct device *d,
struct device_attribute *attr, char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode);
@@ -251,7 +251,7 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
@@ -282,7 +282,7 @@ static ssize_t bonding_show_arp_validate(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
bond->params.arp_validate);
@@ -314,7 +314,7 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
bond->params.arp_all_targets);
@@ -348,7 +348,7 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
bond->params.fail_over_mac);
@@ -505,7 +505,7 @@ static ssize_t bonding_show_lacp(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
@@ -558,7 +558,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
@@ -686,7 +686,7 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
bond->params.primary_reselect);
@@ -1135,7 +1135,7 @@ int bond_create_sysfs(struct bond_net *bn)
/* Is someone being kinky and naming a device bonding_master? */
if (__dev_get_by_name(bn->net,
class_attr_bonding_masters.attr.name))
- pr_err("network device named %s already exists in sysfs",
+ pr_err("network device named %s already exists in sysfs\n",
class_attr_bonding_masters.attr.name);
ret = 0;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 2b0fdec695f7..b8bdd0acc8f3 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -188,8 +188,9 @@ struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
int delay;
- unsigned long jiffies;
- unsigned long last_arp_rx;
+ /* all three in jiffies */
+ unsigned long last_link_up;
+ unsigned long last_rx;
unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
s8 link; /* one of BOND_LINK_XXXX */
s8 new_link;
@@ -265,6 +266,11 @@ struct bonding {
#define bond_slave_get_rtnl(dev) \
((struct slave *) rtnl_dereference(dev->rx_handler_data))
+struct bond_vlan_tag {
+ __be16 vlan_proto;
+ unsigned short vlan_id;
+};
+
/**
* Returns NULL if the net_device does not belong to any of the bond's slaves
*
@@ -292,7 +298,7 @@ static inline void bond_set_active_slave(struct slave *slave)
{
if (slave->backup) {
slave->backup = 0;
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
}
}
@@ -300,7 +306,7 @@ static inline void bond_set_backup_slave(struct slave *slave)
{
if (!slave->backup) {
slave->backup = 1;
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
}
}
@@ -312,7 +318,7 @@ static inline void bond_set_slave_state(struct slave *slave,
slave->backup = slave_state;
if (notify) {
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
slave->should_notify = 0;
} else {
if (slave->should_notify)
@@ -342,7 +348,7 @@ static inline void bond_slave_state_notify(struct bonding *bond)
bond_for_each_slave(bond, tmp, iter) {
if (tmp->should_notify) {
- rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC);
tmp->should_notify = 0;
}
}
@@ -374,6 +380,11 @@ static inline bool bond_is_active_slave(struct slave *slave)
#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP)
#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
BOND_ARP_VALIDATE_BACKUP)
+#define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1)
+#define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \
+ BOND_ARP_FILTER)
+#define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \
+ BOND_ARP_FILTER)
#define BOND_SLAVE_NOTIFY_NOW true
#define BOND_SLAVE_NOTIFY_LATER false
@@ -384,6 +395,12 @@ static inline int slave_do_arp_validate(struct bonding *bond,
return bond->params.arp_validate & (1 << bond_slave_state(slave));
}
+static inline int slave_do_arp_validate_only(struct bonding *bond,
+ struct slave *slave)
+{
+ return bond->params.arp_validate & BOND_ARP_FILTER;
+}
+
/* Get the oldest arp which we've received on this slave for bond's
* arp_targets.
*/
@@ -403,14 +420,10 @@ static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond,
static inline unsigned long slave_last_rx(struct bonding *bond,
struct slave *slave)
{
- if (slave_do_arp_validate(bond, slave)) {
- if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
- return slave_oldest_target_arp_rx(bond, slave);
- else
- return slave->last_arp_rx;
- }
+ if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
+ return slave_oldest_target_arp_rx(bond, slave);
- return slave->dev->last_rx;
+ return slave->last_rx;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -487,8 +500,6 @@ void bond_sysfs_slave_del(struct slave *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
-int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
-int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_create_debugfs(void);
@@ -501,8 +512,6 @@ void bond_setup(struct net_device *bond_dev);
unsigned int bond_get_num_tx_queues(void);
int bond_netlink_init(void);
void bond_netlink_fini(void);
-int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
-int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
struct net_device *bond_option_active_slave_get(struct bonding *bond);
const char *bond_slave_link_status(s8 link);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 88a6a5810ec6..fc73865bb83a 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -204,7 +204,6 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
- skb->dev = ser->dev;
debugfs_rx(ser, data, count);
/* Push received packet up the stack. */
ret = netif_rx_ni(skb);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 155db68e13ba..ff54c0eb2052 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -554,7 +554,6 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
- skb->dev = cfspi->ndev;
/*
* Push received packet up the stack.
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 6efe27458116..f07fa89b5fd5 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -420,7 +420,11 @@ static void at91_chip_start(struct net_device *dev)
at91_transceiver_switch(priv, 1);
/* enable chip */
- at91_write(priv, AT91_MR, AT91_MR_CANEN);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ reg_mr = AT91_MR_CANEN | AT91_MR_ABM;
+ else
+ reg_mr = AT91_MR_CANEN;
+ at91_write(priv, AT91_MR, reg_mr);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -1190,6 +1194,7 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_open = at91_open,
.ndo_stop = at91_close,
.ndo_start_xmit = at91_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
@@ -1341,7 +1346,8 @@ static int at91_can_probe(struct platform_device *pdev)
priv->can.bittiming_const = &at91_bittiming_const;
priv->can.do_set_mode = at91_set_mode;
priv->can.do_get_berr_counter = at91_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY;
priv->dev = dev;
priv->reg_base = addr;
priv->devtype_data = *devtype_data;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 8d2b89a12e09..543ecceb33e9 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -528,6 +528,7 @@ static const struct net_device_ops bfin_can_netdev_ops = {
.ndo_open = bfin_can_open,
.ndo_stop = bfin_can_close,
.ndo_start_xmit = bfin_can_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static int bfin_can_probe(struct platform_device *pdev)
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 951bfede8f3d..a5c8dcfa8357 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -114,6 +114,14 @@
IF_COMM_CONTROL | IF_COMM_TXRQST | \
IF_COMM_DATAA | IF_COMM_DATAB)
+/* For the low buffers we clear the interrupt bit, but keep newdat */
+#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
+ IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \
+ IF_COMM_DATAA | IF_COMM_DATAB)
+
+/* For the high buffers we clear the interrupt bit and newdat */
+#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_TXRQST)
+
/* IFx arbitration */
#define IF_ARB_MSGVAL BIT(15)
#define IF_ARB_MSGXTD BIT(14)
@@ -122,7 +130,6 @@
/* IFx message control */
#define IF_MCONT_NEWDAT BIT(15)
#define IF_MCONT_MSGLST BIT(14)
-#define IF_MCONT_CLR_MSGLST (0 << 14)
#define IF_MCONT_INTPND BIT(13)
#define IF_MCONT_UMASK BIT(12)
#define IF_MCONT_TXIE BIT(11)
@@ -133,31 +140,10 @@
#define IF_MCONT_DLC_MASK 0xf
/*
- * IFx register masks:
- * allow easy operation on 16-bit registers when the
- * argument is 32-bit instead
+ * Use IF1 for RX and IF2 for TX
*/
-#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
-#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
-
-/* message object split */
-#define C_CAN_NO_OF_OBJECTS 32
-#define C_CAN_MSG_OBJ_RX_NUM 16
-#define C_CAN_MSG_OBJ_TX_NUM 16
-
-#define C_CAN_MSG_OBJ_RX_FIRST 1
-#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
- C_CAN_MSG_OBJ_RX_NUM - 1)
-
-#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
-#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
- C_CAN_MSG_OBJ_TX_NUM - 1)
-
-#define C_CAN_MSG_OBJ_RX_SPLIT 9
-#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
-
-#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
-#define RECEIVE_OBJECT_BITS 0x0000ffff
+#define IF_RX 0
+#define IF_TX 1
/* status interrupt */
#define STATUS_INTERRUPT 0x8000
@@ -246,10 +232,9 @@ static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
C_CAN_MSG_OBJ_TX_FIRST;
}
-static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
+static inline int get_tx_echo_msg_obj(int txecho)
{
- return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
- C_CAN_MSG_OBJ_TX_FIRST;
+ return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
}
static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
@@ -366,18 +351,6 @@ static void c_can_write_msg_object(struct net_device *dev,
c_can_object_put(dev, iface, objno, IF_COMM_ALL);
}
-static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
- int iface, int ctrl_mask,
- int obj)
-{
- struct c_can_priv *priv = netdev_priv(dev);
-
- priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
- ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
- c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
-
-}
-
static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
int iface,
int ctrl_mask)
@@ -387,45 +360,27 @@ static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
- ctrl_mask & ~(IF_MCONT_MSGLST |
- IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+ ctrl_mask & ~IF_MCONT_NEWDAT);
c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
}
}
-static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
- int iface, int ctrl_mask,
- int obj)
-{
- struct c_can_priv *priv = netdev_priv(dev);
-
- priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
- ctrl_mask & ~(IF_MCONT_MSGLST |
- IF_MCONT_INTPND | IF_MCONT_NEWDAT));
- c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
-}
-
-static void c_can_handle_lost_msg_obj(struct net_device *dev,
- int iface, int objno)
+static int c_can_handle_lost_msg_obj(struct net_device *dev,
+ int iface, int objno, u32 ctrl)
{
- struct c_can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
- struct sk_buff *skb;
+ struct c_can_priv *priv = netdev_priv(dev);
struct can_frame *frame;
+ struct sk_buff *skb;
- netdev_err(dev, "msg lost in buffer %d\n", objno);
-
- c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
-
- priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
- IF_MCONT_CLR_MSGLST);
-
- c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
+ ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT);
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
+ c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
/* create an error msg */
skb = alloc_can_err_skb(dev, &frame);
if (unlikely(!skb))
- return;
+ return 0;
frame->can_id |= CAN_ERR_CRTL;
frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
@@ -433,6 +388,7 @@ static void c_can_handle_lost_msg_obj(struct net_device *dev,
stats->rx_over_errors++;
netif_receive_skb(skb);
+ return 1;
}
static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
@@ -477,9 +433,6 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
-
- can_led_event(dev, CAN_LED_EVENT_RX);
-
return 0;
}
@@ -548,10 +501,12 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
+ spin_lock_bh(&priv->xmit_lock);
msg_obj_no = get_tx_next_msg_obj(priv);
/* prepare message object for transmission */
- c_can_write_msg_object(dev, 0, frame, msg_obj_no);
+ c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
+ priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
/*
@@ -562,10 +517,26 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
(priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
netif_stop_queue(dev);
+ spin_unlock_bh(&priv->xmit_lock);
return NETDEV_TX_OK;
}
+static int c_can_wait_for_ctrl_init(struct net_device *dev,
+ struct c_can_priv *priv, u32 init)
+{
+ int retry = 0;
+
+ while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) {
+ udelay(10);
+ if (retry++ > 1000) {
+ netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
static int c_can_set_bittiming(struct net_device *dev)
{
unsigned int reg_btr, reg_brpe, ctrl_save;
@@ -573,6 +544,7 @@ static int c_can_set_bittiming(struct net_device *dev)
u32 ten_bit_brp;
struct c_can_priv *priv = netdev_priv(dev);
const struct can_bittiming *bt = &priv->can.bittiming;
+ int res;
/* c_can provides a 6-bit brp and 4-bit brpe fields */
ten_bit_brp = bt->brp - 1;
@@ -590,13 +562,17 @@ static int c_can_set_bittiming(struct net_device *dev)
"setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
- priv->write_reg(priv, C_CAN_CTRL_REG,
- ctrl_save | CONTROL_CCE | CONTROL_INIT);
+ ctrl_save &= ~CONTROL_INIT;
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT);
+ res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT);
+ if (res)
+ return res;
+
priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
- return 0;
+ return c_can_wait_for_ctrl_init(dev, priv, 0);
}
/*
@@ -614,14 +590,14 @@ static void c_can_configure_msg_objects(struct net_device *dev)
/* first invalidate all message objects */
for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
- c_can_inval_msg_object(dev, 0, i);
+ c_can_inval_msg_object(dev, IF_RX, i);
/* setup receive message objects */
for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
- c_can_setup_receive_object(dev, 0, i, 0, 0,
+ c_can_setup_receive_object(dev, IF_RX, i, 0, 0,
(IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
- c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
+ c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
}
@@ -631,7 +607,7 @@ static void c_can_configure_msg_objects(struct net_device *dev)
* - set operating mode
* - configure message objects
*/
-static void c_can_chip_config(struct net_device *dev)
+static int c_can_chip_config(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
@@ -668,15 +644,18 @@ static void c_can_chip_config(struct net_device *dev)
priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
/* set bittiming params */
- c_can_set_bittiming(dev);
+ return c_can_set_bittiming(dev);
}
-static void c_can_start(struct net_device *dev)
+static int c_can_start(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
+ int err;
/* basic c_can configuration */
- c_can_chip_config(dev);
+ err = c_can_chip_config(dev);
+ if (err)
+ return err;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -685,6 +664,8 @@ static void c_can_start(struct net_device *dev)
/* enable status change, error and module interrupts */
c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+
+ return 0;
}
static void c_can_stop(struct net_device *dev)
@@ -700,9 +681,13 @@ static void c_can_stop(struct net_device *dev)
static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
{
+ int err;
+
switch (mode) {
case CAN_MODE_START:
- c_can_start(dev);
+ err = c_can_start(dev);
+ if (err)
+ return err;
netif_wake_queue(dev);
break;
default:
@@ -740,8 +725,6 @@ static int c_can_get_berr_counter(const struct net_device *dev,
}
/*
- * theory of operation:
- *
* priv->tx_echo holds the number of the oldest can_frame put for
* transmission into the hardware, but not yet ACKed by the CAN tx
* complete IRQ.
@@ -752,33 +735,113 @@ static int c_can_get_berr_counter(const struct net_device *dev,
*/
static void c_can_do_tx(struct net_device *dev)
{
- u32 val;
- u32 msg_obj_no;
struct c_can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
+ u32 val, obj, pkts = 0, bytes = 0;
- for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
- msg_obj_no = get_tx_echo_msg_obj(priv);
+ spin_lock_bh(&priv->xmit_lock);
+
+ for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+ obj = get_tx_echo_msg_obj(priv->tx_echo);
val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
- if (!(val & (1 << (msg_obj_no - 1)))) {
- can_get_echo_skb(dev,
- msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
- c_can_object_get(dev, 0, msg_obj_no, IF_COMM_ALL);
- stats->tx_bytes += priv->read_reg(priv,
- C_CAN_IFACE(MSGCTRL_REG, 0))
- & IF_MCONT_DLC_MASK;
- stats->tx_packets++;
- can_led_event(dev, CAN_LED_EVENT_TX);
- c_can_inval_msg_object(dev, 0, msg_obj_no);
- } else {
+
+ if (val & (1 << (obj - 1)))
break;
- }
+
+ can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST);
+ bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST];
+ pkts++;
+ c_can_inval_msg_object(dev, IF_TX, obj);
}
/* restart queue if wrap-up or if queue stalled on last pkt */
if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
netif_wake_queue(dev);
+
+ spin_unlock_bh(&priv->xmit_lock);
+
+ if (pkts) {
+ stats->tx_bytes += bytes;
+ stats->tx_packets += pkts;
+ can_led_event(dev, CAN_LED_EVENT_TX);
+ }
+}
+
+/*
+ * If we have a gap in the pending bits, that means we either
+ * raced with the hardware or failed to readout all upper
+ * objects in the last run due to quota limit.
+ */
+static u32 c_can_adjust_pending(u32 pend)
+{
+ u32 weight, lasts;
+
+ if (pend == RECEIVE_OBJECT_BITS)
+ return pend;
+
+ /*
+ * If the last set bit is larger than the number of pending
+ * bits we have a gap.
+ */
+ weight = hweight32(pend);
+ lasts = fls(pend);
+
+ /* If the bits are linear, nothing to do */
+ if (lasts == weight)
+ return pend;
+
+ /*
+ * Find the first set bit after the gap. We walk backwards
+ * from the last set bit.
+ */
+ for (lasts--; pend & (1 << (lasts - 1)); lasts--);
+
+ return pend & ~((1 << lasts) - 1);
+}
+
+static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
+ u32 pend, int quota)
+{
+ u32 pkts = 0, ctrl, obj, mcmd;
+
+ while ((obj = ffs(pend)) && quota > 0) {
+ pend &= ~BIT(obj - 1);
+
+ mcmd = obj < C_CAN_MSG_RX_LOW_LAST ?
+ IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
+
+ c_can_object_get(dev, IF_RX, obj, mcmd);
+ ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
+
+ if (ctrl & IF_MCONT_MSGLST) {
+ int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
+
+ pkts += n;
+ quota -= n;
+ continue;
+ }
+
+ /*
+ * This really should not happen, but this covers some
+ * odd HW behaviour. Do not remove that unless you
+ * want to brick your machine.
+ */
+ if (!(ctrl & IF_MCONT_NEWDAT))
+ continue;
+
+ /* read the data from the message object */
+ c_can_read_msg_object(dev, IF_RX, ctrl);
+
+ if (obj == C_CAN_MSG_RX_LOW_LAST)
+ /* activate all lower message objects */
+ c_can_activate_all_lower_rx_msg_obj(dev, IF_RX, ctrl);
+
+ pkts++;
+ quota--;
+ }
+
+ return pkts;
}
/*
@@ -805,10 +868,8 @@ static void c_can_do_tx(struct net_device *dev)
*/
static int c_can_do_rx_poll(struct net_device *dev, int quota)
{
- u32 num_rx_pkts = 0;
- unsigned int msg_obj, msg_ctrl_save;
struct c_can_priv *priv = netdev_priv(dev);
- u16 val;
+ u32 pkts = 0, pend = 0, toread, n;
/*
* It is faster to read only one 16bit register. This is only possible
@@ -817,49 +878,31 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
"Implementation does not support more message objects than 16");
- while (quota > 0 && (val = priv->read_reg(priv, C_CAN_INTPND1_REG))) {
- while ((msg_obj = ffs(val)) && quota > 0) {
- val &= ~BIT(msg_obj - 1);
-
- c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
- ~IF_COMM_TXRQST);
- msg_ctrl_save = priv->read_reg(priv,
- C_CAN_IFACE(MSGCTRL_REG, 0));
-
- if (msg_ctrl_save & IF_MCONT_MSGLST) {
- c_can_handle_lost_msg_obj(dev, 0, msg_obj);
- num_rx_pkts++;
- quota--;
- continue;
- }
-
- if (msg_ctrl_save & IF_MCONT_EOB)
- return num_rx_pkts;
-
- if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
- continue;
-
- /* read the data from the message object */
- c_can_read_msg_object(dev, 0, msg_ctrl_save);
-
- if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
- c_can_mark_rx_msg_obj(dev, 0,
- msg_ctrl_save, msg_obj);
- else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
- /* activate this msg obj */
- c_can_activate_rx_msg_obj(dev, 0,
- msg_ctrl_save, msg_obj);
- else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
- /* activate all lower message objects */
- c_can_activate_all_lower_rx_msg_obj(dev,
- 0, msg_ctrl_save);
-
- num_rx_pkts++;
- quota--;
+ while (quota > 0) {
+ if (!pend) {
+ pend = priv->read_reg(priv, C_CAN_INTPND1_REG);
+ if (!pend)
+ break;
+ /*
+ * If the pending field has a gap, handle the
+ * bits above the gap first.
+ */
+ toread = c_can_adjust_pending(pend);
+ } else {
+ toread = pend;
}
+ /* Remove the bits from pend */
+ pend &= ~toread;
+ /* Read the objects */
+ n = c_can_read_objects(dev, priv, toread, quota);
+ pkts += n;
+ quota -= n;
}
- return num_rx_pkts;
+ if (pkts)
+ can_led_event(dev, CAN_LED_EVENT_RX);
+
+ return pkts;
}
static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
@@ -1133,17 +1176,20 @@ static int c_can_open(struct net_device *dev)
goto exit_irq_fail;
}
- napi_enable(&priv->napi);
+ /* start the c_can controller */
+ err = c_can_start(dev);
+ if (err)
+ goto exit_start_fail;
can_led_event(dev, CAN_LED_EVENT_OPEN);
- /* start the c_can controller */
- c_can_start(dev);
-
+ napi_enable(&priv->napi);
netif_start_queue(dev);
return 0;
+exit_start_fail:
+ free_irq(dev->irq, dev);
exit_irq_fail:
close_candev(dev);
exit_open_fail:
@@ -1180,6 +1226,7 @@ struct net_device *alloc_c_can_dev(void)
return NULL;
priv = netdev_priv(dev);
+ spin_lock_init(&priv->xmit_lock);
netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
priv->dev = dev;
@@ -1260,15 +1307,16 @@ int c_can_power_up(struct net_device *dev)
if (time_after(jiffies, time_out))
return -ETIMEDOUT;
- c_can_start(dev);
-
- return 0;
+ return c_can_start(dev);
}
EXPORT_SYMBOL_GPL(c_can_power_up);
#endif
void free_c_can_dev(struct net_device *dev)
{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ netif_napi_del(&priv->napi);
free_candev(dev);
}
EXPORT_SYMBOL_GPL(free_c_can_dev);
@@ -1277,6 +1325,7 @@ static const struct net_device_ops c_can_netdev_ops = {
.ndo_open = c_can_open,
.ndo_stop = c_can_close,
.ndo_start_xmit = c_can_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
int register_c_can_dev(struct net_device *dev)
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index d2e1c21b143f..faa8404162b3 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,6 +22,33 @@
#ifndef C_CAN_H
#define C_CAN_H
+/*
+ * IFx register masks:
+ * allow easy operation on 16-bit registers when the
+ * argument is 32-bit instead
+ */
+#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
+#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
+
+/* message object split */
+#define C_CAN_NO_OF_OBJECTS 32
+#define C_CAN_MSG_OBJ_RX_NUM 16
+#define C_CAN_MSG_OBJ_TX_NUM 16
+
+#define C_CAN_MSG_OBJ_RX_FIRST 1
+#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
+ C_CAN_MSG_OBJ_RX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
+#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
+ C_CAN_MSG_OBJ_TX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_RX_SPLIT 9
+#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
+
+#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
+#define RECEIVE_OBJECT_BITS 0x0000ffff
+
enum reg {
C_CAN_CTRL_REG = 0,
C_CAN_CTRL_EX_REG,
@@ -156,6 +183,7 @@ struct c_can_priv {
struct napi_struct napi;
struct net_device *dev;
struct device *device;
+ spinlock_t xmit_lock;
int tx_object;
int current_status;
int last_status;
@@ -172,6 +200,7 @@ struct c_can_priv {
u32 __iomem *raminit_ctrlreg;
unsigned int instance;
void (*raminit) (const struct c_can_priv *priv, bool enable);
+ u32 dlc[C_CAN_MSG_OBJ_TX_NUM];
};
struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index d66ac265269c..806d92753427 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -37,8 +37,10 @@
#include "c_can.h"
-#define CAN_RAMINIT_START_MASK(i) (1 << (i))
-
+#define CAN_RAMINIT_START_MASK(i) (0x001 << (i))
+#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i))
+#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i))
+static DEFINE_SPINLOCK(raminit_lock);
/*
* 16-bit c_can registers can be arranged differently in the memory
* architecture of different implementations. For example: 16-bit
@@ -69,16 +71,41 @@ static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
writew(val, priv->base + 2 * priv->regs[index]);
}
+static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
+ u32 val)
+{
+ /* We look only at the bits of our instance. */
+ val &= mask;
+ while ((readl(priv->raminit_ctrlreg) & mask) != val)
+ udelay(1);
+}
+
static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
{
- u32 val;
-
- val = readl(priv->raminit_ctrlreg);
- if (enable)
- val |= CAN_RAMINIT_START_MASK(priv->instance);
- else
- val &= ~CAN_RAMINIT_START_MASK(priv->instance);
- writel(val, priv->raminit_ctrlreg);
+ u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance);
+ u32 ctrl;
+
+ spin_lock(&raminit_lock);
+
+ ctrl = readl(priv->raminit_ctrlreg);
+ /* We clear the done and start bit first. The start bit is
+ * looking at the 0 -> transition, but is not self clearing;
+ * And we clear the init done bit as well.
+ */
+ ctrl &= ~CAN_RAMINIT_START_MASK(priv->instance);
+ ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
+ writel(ctrl, priv->raminit_ctrlreg);
+ ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
+ c_can_hw_raminit_wait(priv, ctrl, mask);
+
+ if (enable) {
+ /* Set start bit and wait for the done bit. */
+ ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
+ writel(ctrl, priv->raminit_ctrlreg);
+ ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
+ c_can_hw_raminit_wait(priv, ctrl, mask);
+ }
+ spin_unlock(&raminit_lock);
}
static struct platform_device_id c_can_id_table[] = {
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 0f12abf6591c..d8379278d648 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -823,6 +823,7 @@ static const struct net_device_ops cc770_netdev_ops = {
.ndo_open = cc770_open,
.ndo_stop = cc770_close,
.ndo_start_xmit = cc770_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
int register_cc770dev(struct net_device *dev)
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index fc59bc6f040b..c7a260478749 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -99,10 +99,10 @@ static int can_update_spt(const struct can_bittiming_const *btc,
return 1000 * (tseg + 1 - *tseg2) / (tseg + 1);
}
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
{
struct can_priv *priv = netdev_priv(dev);
- const struct can_bittiming_const *btc = priv->bittiming_const;
long rate, best_rate = 0;
long best_error = 1000000000, error = 0;
int best_tseg = 0, best_brp = 0, brp = 0;
@@ -110,9 +110,6 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
int spt_error = 1000, spt = 0, sampl_pt;
u64 v64;
- if (!priv->bittiming_const)
- return -ENOTSUPP;
-
/* Use CIA recommended sample points */
if (bt->sample_point) {
sampl_pt = bt->sample_point;
@@ -204,7 +201,8 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
return 0;
}
#else /* !CONFIG_CAN_CALC_BITTIMING */
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
{
netdev_err(dev, "bit-timing calculation not available\n");
return -EINVAL;
@@ -217,16 +215,13 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
* prescaler value brp. You can find more information in the header
* file linux/can/netlink.h.
*/
-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
{
struct can_priv *priv = netdev_priv(dev);
- const struct can_bittiming_const *btc = priv->bittiming_const;
int tseg1, alltseg;
u64 brp64;
- if (!priv->bittiming_const)
- return -ENOTSUPP;
-
tseg1 = bt->prop_seg + bt->phase_seg1;
if (!bt->sjw)
bt->sjw = 1;
@@ -254,26 +249,29 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt)
return 0;
}
-static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
{
- struct can_priv *priv = netdev_priv(dev);
int err;
/* Check if the CAN device has bit-timing parameters */
- if (priv->bittiming_const) {
+ if (!btc)
+ return -ENOTSUPP;
- /* Non-expert mode? Check if the bitrate has been pre-defined */
- if (!bt->tq)
- /* Determine bit-timing parameters */
- err = can_calc_bittiming(dev, bt);
- else
- /* Check bit-timing params and calculate proper brp */
- err = can_fixup_bittiming(dev, bt);
- if (err)
- return err;
- }
+ /*
+ * Depending on the given can_bittiming parameter structure the CAN
+ * timing parameters are calculated based on the provided bitrate OR
+ * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
+ * provided directly which are then checked and fixed up.
+ */
+ if (!bt->tq && bt->bitrate)
+ err = can_calc_bittiming(dev, bt, btc);
+ else if (bt->tq && !bt->bitrate)
+ err = can_fixup_bittiming(dev, bt, btc);
+ else
+ err = -EINVAL;
- return 0;
+ return err;
}
/*
@@ -317,7 +315,9 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
BUG_ON(idx >= priv->echo_skb_max);
/* check flag whether this packet has to be looped back */
- if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) {
+ if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
+ (skb->protocol != htons(ETH_P_CAN) &&
+ skb->protocol != htons(ETH_P_CANFD))) {
kfree_skb(skb);
return;
}
@@ -329,7 +329,6 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
return;
/* make settings for echo to reduce code in irq context */
- skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->dev = dev;
@@ -512,6 +511,30 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
}
EXPORT_SYMBOL_GPL(alloc_can_skb);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct canfd_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CANFD);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
+ memset(*cfd, 0, sizeof(struct canfd_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+
struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -572,6 +595,39 @@ void free_candev(struct net_device *dev)
EXPORT_SYMBOL_GPL(free_candev);
/*
+ * changing MTU and control mode for CAN/CANFD devices
+ */
+int can_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* Do not allow changing the MTU while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* allow change of MTU according to the CANFD ability of the device */
+ switch (new_mtu) {
+ case CAN_MTU:
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD;
+ break;
+
+ case CANFD_MTU:
+ if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
+ return -EINVAL;
+
+ priv->ctrlmode |= CAN_CTRLMODE_FD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_change_mtu);
+
+/*
* Common open function when the device gets opened.
*
* This function should be called in the open function of the device
@@ -581,11 +637,19 @@ int open_candev(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- if (!priv->bittiming.tq && !priv->bittiming.bitrate) {
+ if (!priv->bittiming.bitrate) {
netdev_err(dev, "bit-timing not yet defined\n");
return -EINVAL;
}
+ /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
+ if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
+ (!priv->data_bittiming.bitrate ||
+ (priv->data_bittiming.bitrate < priv->bittiming.bitrate))) {
+ netdev_err(dev, "incorrect/missing data bit-timing\n");
+ return -EINVAL;
+ }
+
/* Switch carrier on if device was stopped while in bus-off state */
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
@@ -624,6 +688,10 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
= { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
[IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
+ [IFLA_CAN_DATA_BITTIMING]
+ = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_DATA_BITTIMING_CONST]
+ = { .len = sizeof(struct can_bittiming_const) },
};
static int can_changelink(struct net_device *dev,
@@ -642,9 +710,7 @@ static int can_changelink(struct net_device *dev,
if (dev->flags & IFF_UP)
return -EBUSY;
memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- if ((!bt.bitrate && !bt.tq) || (bt.bitrate && bt.tq))
- return -EINVAL;
- err = can_get_bittiming(dev, &bt);
+ err = can_get_bittiming(dev, &bt, priv->bittiming_const);
if (err)
return err;
memcpy(&priv->bittiming, &bt, sizeof(bt));
@@ -668,6 +734,12 @@ static int can_changelink(struct net_device *dev,
return -EOPNOTSUPP;
priv->ctrlmode &= ~cm->mask;
priv->ctrlmode |= cm->flags;
+
+ /* CAN_CTRLMODE_FD can only be set when driver supports FD */
+ if (priv->ctrlmode & CAN_CTRLMODE_FD)
+ dev->mtu = CANFD_MTU;
+ else
+ dev->mtu = CAN_MTU;
}
if (data[IFLA_CAN_RESTART_MS]) {
@@ -686,6 +758,27 @@ static int can_changelink(struct net_device *dev,
return err;
}
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ struct can_bittiming dbt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
+ sizeof(dbt));
+ err = can_get_bittiming(dev, &dbt, priv->data_bittiming_const);
+ if (err)
+ return err;
+ memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+
+ if (priv->do_set_data_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_data_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
return 0;
}
@@ -694,7 +787,8 @@ static size_t can_get_size(const struct net_device *dev)
struct can_priv *priv = netdev_priv(dev);
size_t size = 0;
- size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
+ if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
size += nla_total_size(sizeof(struct can_bittiming_const));
size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
@@ -703,6 +797,10 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
size += nla_total_size(sizeof(struct can_berr_counter));
+ if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
+ if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
+ size += nla_total_size(sizeof(struct can_bittiming_const));
return size;
}
@@ -716,19 +814,34 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if (nla_put(skb, IFLA_CAN_BITTIMING,
- sizeof(priv->bittiming), &priv->bittiming) ||
+
+ if ((priv->bittiming.bitrate &&
+ nla_put(skb, IFLA_CAN_BITTIMING,
+ sizeof(priv->bittiming), &priv->bittiming)) ||
+
(priv->bittiming_const &&
nla_put(skb, IFLA_CAN_BITTIMING_CONST,
sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+
nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
nla_put_u32(skb, IFLA_CAN_STATE, state) ||
nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+
(priv->do_get_berr_counter &&
!priv->do_get_berr_counter(dev, &bec) &&
- nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)))
+ nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+
+ (priv->data_bittiming.bitrate &&
+ nla_put(skb, IFLA_CAN_DATA_BITTIMING,
+ sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+
+ (priv->data_bittiming_const &&
+ nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+ sizeof(*priv->data_bittiming_const),
+ priv->data_bittiming_const)))
return -EMSGSIZE;
+
return 0;
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 61376abdab39..f425ec2c7839 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1011,6 +1011,7 @@ static const struct net_device_ops flexcan_netdev_ops = {
.ndo_open = flexcan_open,
.ndo_stop = flexcan_close,
.ndo_start_xmit = flexcan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static int register_flexcandev(struct net_device *dev)
@@ -1132,9 +1133,9 @@ static int flexcan_probe(struct platform_device *pdev)
of_id = of_match_device(flexcan_of_match, &pdev->dev);
if (of_id) {
devtype_data = of_id->data;
- } else if (pdev->id_entry->driver_data) {
+ } else if (platform_get_device_id(pdev)->driver_data) {
devtype_data = (struct flexcan_devtype_data *)
- pdev->id_entry->driver_data;
+ platform_get_device_id(pdev)->driver_data;
} else {
return -ENODEV;
}
@@ -1201,8 +1202,7 @@ static int flexcan_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int flexcan_suspend(struct device *device)
+static int __maybe_unused flexcan_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
@@ -1221,7 +1221,7 @@ static int flexcan_suspend(struct device *device)
return 0;
}
-static int flexcan_resume(struct device *device)
+static int __maybe_unused flexcan_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
@@ -1233,7 +1233,6 @@ static int flexcan_resume(struct device *device)
}
return flexcan_chip_enable(priv);
}
-#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index ab506d6cab37..3fd9fd942c6e 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1578,6 +1578,7 @@ static const struct net_device_ops grcan_netdev_ops = {
.ndo_open = grcan_open,
.ndo_stop = grcan_close,
.ndo_start_xmit = grcan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static int grcan_setup_netdev(struct platform_device *ofdev,
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 71594e5676fd..2382c04dc780 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -198,9 +198,6 @@ struct ican3_dev {
struct net_device *ndev;
struct napi_struct napi;
- /* Device for printing */
- struct device *dev;
-
/* module number */
unsigned int num;
@@ -295,7 +292,7 @@ static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
xord = locl ^ peer;
if ((xord & MSYNC_RB_MASK) == 0x00) {
- dev_dbg(mod->dev, "no mbox for reading\n");
+ netdev_dbg(mod->ndev, "no mbox for reading\n");
return -ENOMEM;
}
@@ -340,7 +337,7 @@ static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
xord = locl ^ peer;
if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) {
- dev_err(mod->dev, "no mbox for writing\n");
+ netdev_err(mod->ndev, "no mbox for writing\n");
return -ENOMEM;
}
@@ -542,7 +539,7 @@ static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
memcpy_fromio(&desc, desc_addr, sizeof(desc));
if (!(desc.control & DESC_VALID)) {
- dev_dbg(mod->dev, "%s: no free buffers\n", __func__);
+ netdev_dbg(mod->ndev, "%s: no free buffers\n", __func__);
return -ENOMEM;
}
@@ -573,7 +570,7 @@ static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
memcpy_fromio(&desc, desc_addr, sizeof(desc));
if (!(desc.control & DESC_VALID)) {
- dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__);
+ netdev_dbg(mod->ndev, "%s: no buffers to recv\n", __func__);
return -ENOMEM;
}
@@ -883,7 +880,7 @@ static void can_frame_to_ican3(struct ican3_dev *mod,
*/
static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg)
{
- dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data);
+ netdev_dbg(mod->ndev, "IDVERS response: %s\n", msg->data);
}
static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
@@ -899,7 +896,7 @@ static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
* error frame for userspace
*/
if (msg->spec == MSG_MSGLOST) {
- dev_err(mod->dev, "lost %d control messages\n", msg->data[0]);
+ netdev_err(mod->ndev, "lost %d control messages\n", msg->data[0]);
return;
}
@@ -939,13 +936,13 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
/* we can only handle the SJA1000 part */
if (msg->data[1] != CEVTIND_CHIP_SJA1000) {
- dev_err(mod->dev, "unable to handle errors on non-SJA1000\n");
+ netdev_err(mod->ndev, "unable to handle errors on non-SJA1000\n");
return -ENODEV;
}
/* check the message length for sanity */
if (le16_to_cpu(msg->len) < 6) {
- dev_err(mod->dev, "error message too short\n");
+ netdev_err(mod->ndev, "error message too short\n");
return -EINVAL;
}
@@ -967,7 +964,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
*/
if (isrc == CEVTIND_BEI) {
int ret;
- dev_dbg(mod->dev, "bus error interrupt\n");
+ netdev_dbg(mod->ndev, "bus error interrupt\n");
/* TX error */
if (!(ecc & ECC_DIR)) {
@@ -983,7 +980,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
*/
ret = ican3_set_buserror(mod, 1);
if (ret) {
- dev_err(mod->dev, "unable to re-enable bus-error\n");
+ netdev_err(mod->ndev, "unable to re-enable bus-error\n");
return ret;
}
@@ -998,7 +995,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
/* data overrun interrupt */
if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) {
- dev_dbg(mod->dev, "data overrun interrupt\n");
+ netdev_dbg(mod->ndev, "data overrun interrupt\n");
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_over_errors++;
@@ -1007,7 +1004,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
/* error warning + passive interrupt */
if (isrc == CEVTIND_EI) {
- dev_dbg(mod->dev, "error warning + passive interrupt\n");
+ netdev_dbg(mod->ndev, "error warning + passive interrupt\n");
if (status & SR_BS) {
state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
@@ -1088,7 +1085,7 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
complete(&mod->termination_comp);
break;
default:
- dev_err(mod->dev, "received an unknown inquiry response\n");
+ netdev_err(mod->ndev, "received an unknown inquiry response\n");
break;
}
}
@@ -1096,7 +1093,7 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
static void ican3_handle_unknown_message(struct ican3_dev *mod,
struct ican3_msg *msg)
{
- dev_warn(mod->dev, "received unknown message: spec 0x%.2x length %d\n",
+ netdev_warn(mod->ndev, "received unknown message: spec 0x%.2x length %d\n",
msg->spec, le16_to_cpu(msg->len));
}
@@ -1105,7 +1102,7 @@ static void ican3_handle_unknown_message(struct ican3_dev *mod,
*/
static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
{
- dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__,
+ netdev_dbg(mod->ndev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__,
mod->num, msg->spec, le16_to_cpu(msg->len));
switch (msg->spec) {
@@ -1406,7 +1403,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
msleep(10);
} while (time_before(jiffies, start + HZ / 4));
- dev_err(mod->dev, "failed to reset CAN module\n");
+ netdev_err(mod->ndev, "failed to reset CAN module\n");
return -ETIMEDOUT;
}
@@ -1425,7 +1422,7 @@ static int ican3_startup_module(struct ican3_dev *mod)
ret = ican3_reset_module(mod);
if (ret) {
- dev_err(mod->dev, "unable to reset module\n");
+ netdev_err(mod->ndev, "unable to reset module\n");
return ret;
}
@@ -1434,41 +1431,41 @@ static int ican3_startup_module(struct ican3_dev *mod)
ret = ican3_msg_connect(mod);
if (ret) {
- dev_err(mod->dev, "unable to connect to module\n");
+ netdev_err(mod->ndev, "unable to connect to module\n");
return ret;
}
ican3_init_new_host_interface(mod);
ret = ican3_msg_newhostif(mod);
if (ret) {
- dev_err(mod->dev, "unable to switch to new-style interface\n");
+ netdev_err(mod->ndev, "unable to switch to new-style interface\n");
return ret;
}
/* default to "termination on" */
ret = ican3_set_termination(mod, true);
if (ret) {
- dev_err(mod->dev, "unable to enable termination\n");
+ netdev_err(mod->ndev, "unable to enable termination\n");
return ret;
}
/* default to "bus errors enabled" */
ret = ican3_set_buserror(mod, 1);
if (ret) {
- dev_err(mod->dev, "unable to set bus-error\n");
+ netdev_err(mod->ndev, "unable to set bus-error\n");
return ret;
}
ican3_init_fast_host_interface(mod);
ret = ican3_msg_fasthostif(mod);
if (ret) {
- dev_err(mod->dev, "unable to switch to fast host interface\n");
+ netdev_err(mod->ndev, "unable to switch to fast host interface\n");
return ret;
}
ret = ican3_set_id_filter(mod, true);
if (ret) {
- dev_err(mod->dev, "unable to set acceptance filter\n");
+ netdev_err(mod->ndev, "unable to set acceptance filter\n");
return ret;
}
@@ -1487,14 +1484,14 @@ static int ican3_open(struct net_device *ndev)
/* open the CAN layer */
ret = open_candev(ndev);
if (ret) {
- dev_err(mod->dev, "unable to start CAN layer\n");
+ netdev_err(mod->ndev, "unable to start CAN layer\n");
return ret;
}
/* bring the bus online */
ret = ican3_set_bus_state(mod, true);
if (ret) {
- dev_err(mod->dev, "unable to set bus-on\n");
+ netdev_err(mod->ndev, "unable to set bus-on\n");
close_candev(ndev);
return ret;
}
@@ -1518,7 +1515,7 @@ static int ican3_stop(struct net_device *ndev)
/* bring the bus offline, stop receiving packets */
ret = ican3_set_bus_state(mod, false);
if (ret) {
- dev_err(mod->dev, "unable to set bus-off\n");
+ netdev_err(mod->ndev, "unable to set bus-off\n");
return ret;
}
@@ -1545,7 +1542,7 @@ static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
/* check that we can actually transmit */
if (!ican3_txok(mod)) {
- dev_err(mod->dev, "BUG: no free descriptors\n");
+ netdev_err(mod->ndev, "BUG: no free descriptors\n");
spin_unlock_irqrestore(&mod->lock, flags);
return NETDEV_TX_BUSY;
}
@@ -1597,6 +1594,7 @@ static const struct net_device_ops ican3_netdev_ops = {
.ndo_open = ican3_open,
.ndo_stop = ican3_stop,
.ndo_start_xmit = ican3_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
/*
@@ -1657,7 +1655,7 @@ static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
/* bring the bus online */
ret = ican3_set_bus_state(mod, true);
if (ret) {
- dev_err(mod->dev, "unable to set bus-on\n");
+ netdev_err(ndev, "unable to set bus-on\n");
return ret;
}
@@ -1682,7 +1680,7 @@ static int ican3_get_berr_counter(const struct net_device *ndev,
ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
if (ret == 0) {
- dev_info(mod->dev, "%s timed out\n", __func__);
+ netdev_info(mod->ndev, "%s timed out\n", __func__);
return -ETIMEDOUT;
}
@@ -1708,7 +1706,7 @@ static ssize_t ican3_sysfs_show_term(struct device *dev,
ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
if (ret == 0) {
- dev_info(mod->dev, "%s timed out\n", __func__);
+ netdev_info(mod->ndev, "%s timed out\n", __func__);
return -ETIMEDOUT;
}
@@ -1778,7 +1776,6 @@ static int ican3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
mod = netdev_priv(ndev);
mod->ndev = ndev;
- mod->dev = &pdev->dev;
mod->num = pdata->modno;
netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
skb_queue_head_init(&mod->echoq);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index cdb9808d12db..28c11f815245 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -601,10 +601,10 @@ static int mcp251x_do_set_bittiming(struct net_device *net)
(bt->prop_seg - 1));
mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
(bt->phase_seg2 - 1));
- dev_info(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
- mcp251x_read_reg(spi, CNF1),
- mcp251x_read_reg(spi, CNF2),
- mcp251x_read_reg(spi, CNF3));
+ dev_dbg(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
+ mcp251x_read_reg(spi, CNF1),
+ mcp251x_read_reg(spi, CNF2),
+ mcp251x_read_reg(spi, CNF3));
return 0;
}
@@ -672,7 +672,7 @@ static int mcp251x_hw_probe(struct spi_device *spi)
static int mcp251x_power_enable(struct regulator *reg, int enable)
{
- if (IS_ERR(reg))
+ if (IS_ERR_OR_NULL(reg))
return 0;
if (enable)
@@ -996,6 +996,7 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_open = mcp251x_open,
.ndo_stop = mcp251x_stop,
.ndo_start_xmit = mcp251x_hard_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct of_device_id mcp251x_of_match[] = {
@@ -1155,8 +1156,6 @@ static int mcp251x_can_probe(struct spi_device *spi)
devm_can_led_init(net);
- dev_info(&spi->dev, "probed\n");
-
return ret;
error_probe:
@@ -1197,9 +1196,7 @@ static int mcp251x_can_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-
-static int mcp251x_can_suspend(struct device *dev)
+static int __maybe_unused mcp251x_can_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -1221,7 +1218,7 @@ static int mcp251x_can_suspend(struct device *dev)
priv->after_suspend = AFTER_SUSPEND_DOWN;
}
- if (!IS_ERR(priv->power)) {
+ if (!IS_ERR_OR_NULL(priv->power)) {
regulator_disable(priv->power);
priv->after_suspend |= AFTER_SUSPEND_POWER;
}
@@ -1229,7 +1226,7 @@ static int mcp251x_can_suspend(struct device *dev)
return 0;
}
-static int mcp251x_can_resume(struct device *dev)
+static int __maybe_unused mcp251x_can_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -1249,7 +1246,6 @@ static int mcp251x_can_resume(struct device *dev)
enable_irq(spi->irq);
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
mcp251x_can_resume);
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index b9f3faabb0f3..e0c9be5e2ab7 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -647,9 +647,10 @@ static int mscan_close(struct net_device *dev)
}
static const struct net_device_ops mscan_netdev_ops = {
- .ndo_open = mscan_open,
- .ndo_stop = mscan_close,
- .ndo_start_xmit = mscan_start_xmit,
+ .ndo_open = mscan_open,
+ .ndo_stop = mscan_close,
+ .ndo_start_xmit = mscan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
int register_mscandev(struct net_device *dev, int mscan_clksrc)
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 6c077eb87b5e..6472562efedc 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -950,6 +950,7 @@ static const struct net_device_ops pch_can_netdev_ops = {
.ndo_open = pch_can_open,
.ndo_stop = pch_close,
.ndo_start_xmit = pch_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static void pch_can_remove(struct pci_dev *pdev)
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index ff2ba86cd4a4..1e65cb6c2591 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -17,16 +17,9 @@ config CAN_SJA1000_PLATFORM
the "platform bus" (Linux abstraction for directly to the
processor attached devices). Which can be found on various
boards from Phytec (http://www.phytec.de) like the PCM027,
- PCM038.
-
-config CAN_SJA1000_OF_PLATFORM
- tristate "Generic OF Platform Bus based SJA1000 driver"
- depends on OF
- ---help---
- This driver adds support for the SJA1000 chips connected to
- the OpenFirmware "platform bus" found on embedded systems with
- OpenFirmware bindings, e.g. if you have a PowerPC based system
- you may want to enable this option.
+ PCM038. It also provides the OpenFirmware "platform bus" found
+ on embedded systems with OpenFirmware bindings, e.g. if you
+ have a PowerPC based system you may want to enable this option.
config CAN_EMS_PCMCIA
tristate "EMS CPC-CARD Card"
@@ -46,7 +39,7 @@ config CAN_EMS_PCI
config CAN_PEAK_PCMCIA
tristate "PEAK PCAN-PC Card"
depends on PCMCIA
- depends on HAS_IOPORT
+ depends on HAS_IOPORT_MAP
---help---
This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
from PEAK-System (http://www.peak-system.com). To compile this
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index b3d05cbfec36..531d5fcc97e5 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_CAN_SJA1000) += sja1000.o
obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o
obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
-obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index d790b874ca79..fd13dbf07d9c 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -323,6 +323,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
priv->cdr = EMS_PCI_CDR;
SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = i;
if (card->version == 1)
/* reset int flag of pita */
diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c
index 9e535f2ef52b..381de998d2f1 100644
--- a/drivers/net/can/sja1000/ems_pcmcia.c
+++ b/drivers/net/can/sja1000/ems_pcmcia.c
@@ -211,6 +211,7 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
priv = netdev_priv(dev);
priv->priv = card;
SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = i;
priv->irq_flags = IRQF_SHARED;
dev->irq = pdev->irq;
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index c96eb14699d5..23b8e1324e25 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -270,6 +270,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
priv->reg_base, board->conf_addr, dev->irq);
SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = channel;
/* Register SJA1000 device */
err = register_sja1000dev(dev);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 065ca49eb45e..c540e3d12e3d 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -642,6 +642,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
icr |= chan->icr_mask;
SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = i;
/* Create chain of SJA1000 devices */
chan->prev_dev = pci_get_drvdata(pdev);
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index f7ad754dd2aa..dd56133cc461 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -550,6 +550,7 @@ static int pcan_add_channels(struct pcan_pccard *card)
priv = netdev_priv(netdev);
priv->priv = card;
SET_NETDEV_DEV(netdev, &pdev->dev);
+ netdev->dev_id = i;
priv->irq_flags = IRQF_SHARED;
netdev->irq = pdev->irq;
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index fbb61a0d901f..ec39b7cb2287 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -587,6 +587,7 @@ static int plx_pci_add_card(struct pci_dev *pdev,
priv->cdr = ci->cdr;
SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = i;
/* Register SJA1000 device */
err = register_sja1000dev(dev);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index f17c3018b7c7..f31499a32d7d 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -106,8 +106,7 @@ static int sja1000_probe_chip(struct net_device *dev)
struct sja1000_priv *priv = netdev_priv(dev);
if (priv->reg_base && sja1000_is_absent(priv)) {
- printk(KERN_INFO "%s: probing @0x%lX failed\n",
- DRV_NAME, dev->base_addr);
+ netdev_err(dev, "probing failed\n");
return 0;
}
return -1;
@@ -643,9 +642,10 @@ void free_sja1000dev(struct net_device *dev)
EXPORT_SYMBOL_GPL(free_sja1000dev);
static const struct net_device_ops sja1000_netdev_ops = {
- .ndo_open = sja1000_open,
- .ndo_stop = sja1000_close,
- .ndo_start_xmit = sja1000_start_xmit,
+ .ndo_open = sja1000_open,
+ .ndo_stop = sja1000_close,
+ .ndo_start_xmit = sja1000_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
int register_sja1000dev(struct net_device *dev)
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
deleted file mode 100644
index 2f6e24534231..000000000000
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Driver for SJA1000 CAN controllers on the OpenFirmware platform bus
- *
- * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
- * bus found on embedded PowerPC systems. You need a SJA1000 CAN node
- * definition in your flattened device tree source (DTS) file similar to:
- *
- * can@3,100 {
- * compatible = "nxp,sja1000";
- * reg = <3 0x100 0x80>;
- * interrupts = <2 0>;
- * interrupt-parent = <&mpic>;
- * nxp,external-clock-frequency = <16000000>;
- * };
- *
- * See "Documentation/devicetree/bindings/net/can/sja1000.txt" for further
- * information.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/can/dev.h>
-
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-
-#include "sja1000.h"
-
-#define DRV_NAME "sja1000_of_platform"
-
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the OF platform bus");
-MODULE_LICENSE("GPL v2");
-
-#define SJA1000_OFP_CAN_CLOCK (16000000 / 2)
-
-#define SJA1000_OFP_OCR OCR_TX0_PULLDOWN
-#define SJA1000_OFP_CDR (CDR_CBP | CDR_CLK_OFF)
-
-static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
-{
- return ioread8(priv->reg_base + reg);
-}
-
-static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
- int reg, u8 val)
-{
- iowrite8(val, priv->reg_base + reg);
-}
-
-static int sja1000_ofp_remove(struct platform_device *ofdev)
-{
- struct net_device *dev = platform_get_drvdata(ofdev);
- struct sja1000_priv *priv = netdev_priv(dev);
- struct device_node *np = ofdev->dev.of_node;
- struct resource res;
-
- unregister_sja1000dev(dev);
- free_sja1000dev(dev);
- iounmap(priv->reg_base);
- irq_dispose_mapping(dev->irq);
-
- of_address_to_resource(np, 0, &res);
- release_mem_region(res.start, resource_size(&res));
-
- return 0;
-}
-
-static int sja1000_ofp_probe(struct platform_device *ofdev)
-{
- struct device_node *np = ofdev->dev.of_node;
- struct net_device *dev;
- struct sja1000_priv *priv;
- struct resource res;
- u32 prop;
- int err, irq, res_size;
- void __iomem *base;
-
- err = of_address_to_resource(np, 0, &res);
- if (err) {
- dev_err(&ofdev->dev, "invalid address\n");
- return err;
- }
-
- res_size = resource_size(&res);
-
- if (!request_mem_region(res.start, res_size, DRV_NAME)) {
- dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
- return -EBUSY;
- }
-
- base = ioremap_nocache(res.start, res_size);
- if (!base) {
- dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
- err = -ENOMEM;
- goto exit_release_mem;
- }
-
- irq = irq_of_parse_and_map(np, 0);
- if (irq == 0) {
- dev_err(&ofdev->dev, "no irq found\n");
- err = -ENODEV;
- goto exit_unmap_mem;
- }
-
- dev = alloc_sja1000dev(0);
- if (!dev) {
- err = -ENOMEM;
- goto exit_dispose_irq;
- }
-
- priv = netdev_priv(dev);
-
- priv->read_reg = sja1000_ofp_read_reg;
- priv->write_reg = sja1000_ofp_write_reg;
-
- err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
- if (!err)
- priv->can.clock.freq = prop / 2;
- else
- priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
-
- err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
- if (!err)
- priv->ocr |= prop & OCR_MODE_MASK;
- else
- priv->ocr |= OCR_MODE_NORMAL; /* default */
-
- err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
- if (!err)
- priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
- else
- priv->ocr |= OCR_TX0_PULLDOWN; /* default */
-
- err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
- if (!err && prop) {
- u32 divider = priv->can.clock.freq * 2 / prop;
-
- if (divider > 1)
- priv->cdr |= divider / 2 - 1;
- else
- priv->cdr |= CDR_CLKOUT_MASK;
- } else {
- priv->cdr |= CDR_CLK_OFF; /* default */
- }
-
- if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
- priv->cdr |= CDR_CBP; /* default */
-
- priv->irq_flags = IRQF_SHARED;
- priv->reg_base = base;
-
- dev->irq = irq;
-
- dev_info(&ofdev->dev,
- "reg_base=0x%p irq=%d clock=%d ocr=0x%02x cdr=0x%02x\n",
- priv->reg_base, dev->irq, priv->can.clock.freq,
- priv->ocr, priv->cdr);
-
- platform_set_drvdata(ofdev, dev);
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
- err = register_sja1000dev(dev);
- if (err) {
- dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
- DRV_NAME, err);
- goto exit_free_sja1000;
- }
-
- return 0;
-
-exit_free_sja1000:
- free_sja1000dev(dev);
-exit_dispose_irq:
- irq_dispose_mapping(irq);
-exit_unmap_mem:
- iounmap(base);
-exit_release_mem:
- release_mem_region(res.start, res_size);
-
- return err;
-}
-
-static struct of_device_id sja1000_ofp_table[] = {
- {.compatible = "nxp,sja1000"},
- {},
-};
-MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
-
-static struct platform_driver sja1000_ofp_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
- .of_match_table = sja1000_ofp_table,
- },
- .probe = sja1000_ofp_probe,
- .remove = sja1000_ofp_remove,
-};
-
-module_platform_driver(sja1000_ofp_driver);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 943df645b459..95a844a7ee7b 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -26,12 +26,16 @@
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
#include "sja1000.h"
#define DRV_NAME "sja1000_platform"
+#define SP_CAN_CLOCK (16000000 / 2)
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_LICENSE("GPL v2");
@@ -66,59 +70,16 @@ static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
iowrite8(val, priv->reg_base + reg * 4);
}
-static int sp_probe(struct platform_device *pdev)
+static void sp_populate(struct sja1000_priv *priv,
+ struct sja1000_platform_data *pdata,
+ unsigned long resource_mem_flags)
{
- int err;
- void __iomem *addr;
- struct net_device *dev;
- struct sja1000_priv *priv;
- struct resource *res_mem, *res_irq;
- struct sja1000_platform_data *pdata;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data provided!\n");
- err = -ENODEV;
- goto exit;
- }
-
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_mem || !res_irq) {
- err = -ENODEV;
- goto exit;
- }
-
- if (!request_mem_region(res_mem->start, resource_size(res_mem),
- DRV_NAME)) {
- err = -EBUSY;
- goto exit;
- }
-
- addr = ioremap_nocache(res_mem->start, resource_size(res_mem));
- if (!addr) {
- err = -ENOMEM;
- goto exit_release;
- }
-
- dev = alloc_sja1000dev(0);
- if (!dev) {
- err = -ENOMEM;
- goto exit_iounmap;
- }
- priv = netdev_priv(dev);
-
- dev->irq = res_irq->start;
- priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
- if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
- priv->irq_flags |= IRQF_SHARED;
- priv->reg_base = addr;
/* The CAN clock frequency is half the oscillator clock frequency */
priv->can.clock.freq = pdata->osc_freq / 2;
priv->ocr = pdata->ocr;
priv->cdr = pdata->cdr;
- switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ switch (resource_mem_flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_32BIT:
priv->read_reg = sp_read_reg32;
priv->write_reg = sp_write_reg32;
@@ -133,6 +94,124 @@ static int sp_probe(struct platform_device *pdev)
priv->write_reg = sp_write_reg8;
break;
}
+}
+
+static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
+{
+ int err;
+ u32 prop;
+
+ err = of_property_read_u32(of, "reg-io-width", &prop);
+ if (err)
+ prop = 1; /* 8 bit is default */
+
+ switch (prop) {
+ case 4:
+ priv->read_reg = sp_read_reg32;
+ priv->write_reg = sp_write_reg32;
+ break;
+ case 2:
+ priv->read_reg = sp_read_reg16;
+ priv->write_reg = sp_write_reg16;
+ break;
+ case 1: /* fallthrough */
+ default:
+ priv->read_reg = sp_read_reg8;
+ priv->write_reg = sp_write_reg8;
+ }
+
+ err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
+ if (!err)
+ priv->can.clock.freq = prop / 2;
+ else
+ priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+
+ err = of_property_read_u32(of, "nxp,tx-output-mode", &prop);
+ if (!err)
+ priv->ocr |= prop & OCR_MODE_MASK;
+ else
+ priv->ocr |= OCR_MODE_NORMAL; /* default */
+
+ err = of_property_read_u32(of, "nxp,tx-output-config", &prop);
+ if (!err)
+ priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+ else
+ priv->ocr |= OCR_TX0_PULLDOWN; /* default */
+
+ err = of_property_read_u32(of, "nxp,clock-out-frequency", &prop);
+ if (!err && prop) {
+ u32 divider = priv->can.clock.freq * 2 / prop;
+
+ if (divider > 1)
+ priv->cdr |= divider / 2 - 1;
+ else
+ priv->cdr |= CDR_CLKOUT_MASK;
+ } else {
+ priv->cdr |= CDR_CLK_OFF; /* default */
+ }
+
+ if (!of_property_read_bool(of, "nxp,no-comparator-bypass"))
+ priv->cdr |= CDR_CBP; /* default */
+}
+
+static int sp_probe(struct platform_device *pdev)
+{
+ int err, irq = 0;
+ void __iomem *addr;
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ struct resource *res_mem, *res_irq = NULL;
+ struct sja1000_platform_data *pdata;
+ struct device_node *of = pdev->dev.of_node;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata && !of) {
+ dev_err(&pdev->dev, "No platform data provided!\n");
+ return -ENODEV;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_mem)
+ return -ENODEV;
+
+ if (!devm_request_mem_region(&pdev->dev, res_mem->start,
+ resource_size(res_mem), DRV_NAME))
+ return -EBUSY;
+
+ addr = devm_ioremap_nocache(&pdev->dev, res_mem->start,
+ resource_size(res_mem));
+ if (!addr)
+ return -ENOMEM;
+
+ if (of)
+ irq = irq_of_parse_and_map(of, 0);
+ else
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!irq && !res_irq)
+ return -ENODEV;
+
+ dev = alloc_sja1000dev(0);
+ if (!dev)
+ return -ENOMEM;
+ priv = netdev_priv(dev);
+
+ if (res_irq) {
+ irq = res_irq->start;
+ priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+ priv->irq_flags |= IRQF_SHARED;
+ } else {
+ priv->irq_flags = IRQF_SHARED;
+ }
+
+ dev->irq = irq;
+ priv->reg_base = addr;
+
+ if (of)
+ sp_populate_of(priv, of);
+ else
+ sp_populate(priv, pdata, res_mem->flags);
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -150,39 +229,32 @@ static int sp_probe(struct platform_device *pdev)
exit_free:
free_sja1000dev(dev);
- exit_iounmap:
- iounmap(addr);
- exit_release:
- release_mem_region(res_mem->start, resource_size(res_mem));
- exit:
return err;
}
static int sp_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
- struct sja1000_priv *priv = netdev_priv(dev);
- struct resource *res;
unregister_sja1000dev(dev);
-
- if (priv->reg_base)
- iounmap(priv->reg_base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
free_sja1000dev(dev);
return 0;
}
+static struct of_device_id sp_of_table[] = {
+ {.compatible = "nxp,sja1000"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, sp_of_table);
+
static struct platform_driver sp_driver = {
.probe = sp_probe,
.remove = sp_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = sp_of_table,
},
};
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 3fcdae266377..f5b16e0e3a12 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -411,10 +411,16 @@ static void slc_free_netdev(struct net_device *dev)
slcan_devs[i] = NULL;
}
+static int slcan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ return -EINVAL;
+}
+
static const struct net_device_ops slc_netdev_ops = {
.ndo_open = slc_open,
.ndo_stop = slc_close,
.ndo_start_xmit = slc_xmit,
+ .ndo_change_mtu = slcan_change_mtu,
};
static void slc_setup(struct net_device *dev)
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 9ea0dcde94ce..7d8c8f3672dd 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -628,6 +628,7 @@ static const struct net_device_ops softing_netdev_ops = {
.ndo_open = softing_netdev_open,
.ndo_stop = softing_netdev_stop,
.ndo_start_xmit = softing_netdev_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct can_bittiming_const softing_btr_const = {
@@ -832,6 +833,7 @@ static int softing_pdev_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto netdev_failed;
}
+ netdev->dev_id = j;
priv = netdev_priv(card->net[j]);
priv->index = j;
ret = softing_netdev_register(netdev);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 2c62fe6c8fa9..258b9c4856ec 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -871,6 +871,7 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
.ndo_open = ti_hecc_open,
.ndo_stop = ti_hecc_close,
.ndo_start_xmit = ti_hecc_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static int ti_hecc_probe(struct platform_device *pdev)
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 52c42fd49510..00f2534dde73 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -883,6 +883,7 @@ static const struct net_device_ops ems_usb_netdev_ops = {
.ndo_open = ems_usb_open,
.ndo_stop = ems_usb_close,
.ndo_start_xmit = ems_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct can_bittiming_const ems_usb_bittiming_const = {
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 7fbe85935f1d..b7c9e8b11460 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -888,6 +888,7 @@ static const struct net_device_ops esd_usb2_netdev_ops = {
.ndo_open = esd_usb2_open,
.ndo_stop = esd_usb2_close,
.ndo_start_xmit = esd_usb2_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct can_bittiming_const esd_usb2_bittiming_const = {
@@ -1024,6 +1025,7 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
netdev->netdev_ops = &esd_usb2_netdev_ops;
SET_NETDEV_DEV(netdev, &intf->dev);
+ netdev->dev_id = index;
err = register_candev(netdev);
if (err) {
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index e77d11049747..4ca46edc061d 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1388,6 +1388,7 @@ static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_open = kvaser_usb_open,
.ndo_stop = kvaser_usb_close,
.ndo_start_xmit = kvaser_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct can_bittiming_const kvaser_usb_bittiming_const = {
@@ -1529,6 +1530,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
netdev->netdev_ops = &kvaser_usb_netdev_ops;
SET_NETDEV_DEV(netdev, &intf->dev);
+ netdev->dev_id = channel;
dev->nets[channel] = priv;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 0b7a4c3b01a2..644e6ab8a489 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -702,6 +702,7 @@ static const struct net_device_ops peak_usb_netdev_ops = {
.ndo_open = peak_usb_ndo_open,
.ndo_stop = peak_usb_ndo_stop,
.ndo_start_xmit = peak_usb_ndo_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
/*
@@ -769,6 +770,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
usb_set_intfdata(intf, dev);
SET_NETDEV_DEV(netdev, &intf->dev);
+ netdev->dev_id = ctrl_idx;
err = register_candev(netdev);
if (err) {
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index a0fa1fd5092b..ef674ecb82f8 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -697,8 +697,8 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
nofreecontext:
- usb_unanchor_urb(urb);
usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
+ usb_free_urb(urb);
netdev_warn(netdev, "couldn't find free context");
@@ -887,6 +887,7 @@ static const struct net_device_ops usb_8dev_netdev_ops = {
.ndo_open = usb_8dev_open,
.ndo_stop = usb_8dev_close,
.ndo_start_xmit = usb_8dev_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct can_bittiming_const usb_8dev_bittiming_const = {
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index bd8f84b0b894..0932ffbf381b 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -63,10 +63,10 @@ static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
dstats = per_cpu_ptr(dev->dstats, i);
do {
- start = u64_stats_fetch_begin_bh(&dstats->syncp);
+ start = u64_stats_fetch_begin_irq(&dstats->syncp);
tbytes = dstats->tx_bytes;
tpackets = dstats->tx_packets;
- } while (u64_stats_fetch_retry_bh(&dstats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpackets;
}
@@ -88,16 +88,10 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- int i;
- dev->dstats = alloc_percpu(struct pcpu_dstats);
+ dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
if (!dev->dstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_dstats *dstats;
- dstats = per_cpu_ptr(dev->dstats, i);
- u64_stats_init(&dstats->syncp);
- }
return 0;
}
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index c53384d41c96..35df0b9e6848 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -749,7 +749,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
- dev_kfree_skb (skb);
+ dev_consume_skb_any (skb);
/* Clear the Tx status stack. */
{
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 5992860a39c9..063557e037f2 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -1,23 +1,24 @@
-/*======================================================================
-
- A PCMCIA ethernet driver for the 3com 3c589 card.
-
- Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
-
- 3c589_cs.c 1.162 2001/10/13 00:08:50
-
- The network driver code is based on Donald Becker's 3c589 code:
-
- Written 1994 by Donald Becker.
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
- Donald Becker may be reached at becker@scyld.com
-
- Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
-
-======================================================================*/
+/* ======================================================================
+ *
+ * A PCMCIA ethernet driver for the 3com 3c589 card.
+ *
+ * Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+ *
+ * 3c589_cs.c 1.162 2001/10/13 00:08:50
+ *
+ * The network driver code is based on Donald Becker's 3c589 code:
+ *
+ * Written 1994 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may be used and
+ * distributed according to the terms of the GNU General Public License,
+ * incorporated herein by reference.
+ * Donald Becker may be reached at becker@scyld.com
+ *
+ * Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * ======================================================================
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -41,18 +42,20 @@
#include <linux/ioport.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
/* To minimize the size of the driver source I only define operating
- constants if they are used several times. You'll need the manual
- if you want to understand driver details. */
+ * constants if they are used several times. You'll need the manual
+ * if you want to understand driver details.
+ */
+
/* Offsets from base I/O address. */
#define EL3_DATA 0x00
#define EL3_TIMER 0x0a
@@ -65,7 +68,9 @@
#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
/* The top five bits written to EL3_CMD are a command, the lower
- 11 bits are the parameter, if applicable. */
+ * 11 bits are the parameter, if applicable.
+ */
+
enum c509cmd {
TotalReset = 0<<11,
SelectWindow = 1<<11,
@@ -190,138 +195,142 @@ static const struct net_device_ops el3_netdev_ops = {
static int tc589_probe(struct pcmcia_device *link)
{
- struct el3_private *lp;
- struct net_device *dev;
+ struct el3_private *lp;
+ struct net_device *dev;
- dev_dbg(&link->dev, "3c589_attach()\n");
+ dev_dbg(&link->dev, "3c589_attach()\n");
- /* Create new ethernet device */
- dev = alloc_etherdev(sizeof(struct el3_private));
- if (!dev)
- return -ENOMEM;
- lp = netdev_priv(dev);
- link->priv = dev;
- lp->p_dev = link;
+ /* Create new ethernet device */
+ dev = alloc_etherdev(sizeof(struct el3_private));
+ if (!dev)
+ return -ENOMEM;
+ lp = netdev_priv(dev);
+ link->priv = dev;
+ lp->p_dev = link;
- spin_lock_init(&lp->lock);
- link->resource[0]->end = 16;
- link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+ spin_lock_init(&lp->lock);
+ link->resource[0]->end = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
- link->config_flags |= CONF_ENABLE_IRQ;
- link->config_index = 1;
+ link->config_flags |= CONF_ENABLE_IRQ;
+ link->config_index = 1;
- dev->netdev_ops = &el3_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
+ dev->netdev_ops = &el3_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
- return tc589_config(link);
+ return tc589_config(link);
}
static void tc589_detach(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
+ struct net_device *dev = link->priv;
- dev_dbg(&link->dev, "3c589_detach\n");
+ dev_dbg(&link->dev, "3c589_detach\n");
- unregister_netdev(dev);
+ unregister_netdev(dev);
- tc589_release(link);
+ tc589_release(link);
- free_netdev(dev);
+ free_netdev(dev);
} /* tc589_detach */
static int tc589_config(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
- __be16 *phys_addr;
- int ret, i, j, multi = 0, fifo;
- unsigned int ioaddr;
- static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
- u8 *buf;
- size_t len;
-
- dev_dbg(&link->dev, "3c589_config\n");
-
- phys_addr = (__be16 *)dev->dev_addr;
- /* Is this a 3c562? */
- if (link->manf_id != MANFID_3COM)
- dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
- multi = (link->card_id == PRODID_3COM_3C562);
-
- link->io_lines = 16;
-
- /* For the 3c562, the base address must be xx00-xx7f */
- for (i = j = 0; j < 0x400; j += 0x10) {
- if (multi && (j & 0x80)) continue;
- link->resource[0]->start = j ^ 0x300;
- i = pcmcia_request_io(link);
- if (i == 0)
- break;
- }
- if (i != 0)
- goto failed;
-
- ret = pcmcia_request_irq(link, el3_interrupt);
- if (ret)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- dev->irq = link->irq;
- dev->base_addr = link->resource[0]->start;
- ioaddr = dev->base_addr;
- EL3WINDOW(0);
-
- /* The 3c589 has an extra EEPROM for configuration info, including
- the hardware address. The 3c562 puts the address in the CIS. */
- len = pcmcia_get_tuple(link, 0x88, &buf);
- if (buf && len >= 6) {
- for (i = 0; i < 3; i++)
- phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
- kfree(buf);
- } else {
- kfree(buf); /* 0 < len < 6 */
- for (i = 0; i < 3; i++)
- phys_addr[i] = htons(read_eeprom(ioaddr, i));
- if (phys_addr[0] == htons(0x6060)) {
- dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
- dev->base_addr, dev->base_addr+15);
- goto failed;
+ struct net_device *dev = link->priv;
+ __be16 *phys_addr;
+ int ret, i, j, multi = 0, fifo;
+ unsigned int ioaddr;
+ static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ u8 *buf;
+ size_t len;
+
+ dev_dbg(&link->dev, "3c589_config\n");
+
+ phys_addr = (__be16 *)dev->dev_addr;
+ /* Is this a 3c562? */
+ if (link->manf_id != MANFID_3COM)
+ dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
+ multi = (link->card_id == PRODID_3COM_3C562);
+
+ link->io_lines = 16;
+
+ /* For the 3c562, the base address must be xx00-xx7f */
+ for (i = j = 0; j < 0x400; j += 0x10) {
+ if (multi && (j & 0x80))
+ continue;
+ link->resource[0]->start = j ^ 0x300;
+ i = pcmcia_request_io(link);
+ if (i == 0)
+ break;
}
- }
-
- /* The address and resource configuration register aren't loaded from
- the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */
- outw(0x3f00, ioaddr + 8);
- fifo = inl(ioaddr);
-
- /* The if_port symbol can be set when the module is loaded */
- if ((if_port >= 0) && (if_port <= 3))
- dev->if_port = if_port;
- else
- dev_err(&link->dev, "invalid if_port requested\n");
-
- SET_NETDEV_DEV(dev, &link->dev);
-
- if (register_netdev(dev) != 0) {
- dev_err(&link->dev, "register_netdev() failed\n");
- goto failed;
- }
-
- netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
- (multi ? "562" : "589"), dev->base_addr, dev->irq,
- dev->dev_addr);
- netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
- (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
- if_names[dev->if_port]);
- return 0;
+ if (i != 0)
+ goto failed;
+
+ ret = pcmcia_request_irq(link, el3_interrupt);
+ if (ret)
+ goto failed;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+ ioaddr = dev->base_addr;
+ EL3WINDOW(0);
+
+ /* The 3c589 has an extra EEPROM for configuration info, including
+ * the hardware address. The 3c562 puts the address in the CIS.
+ */
+ len = pcmcia_get_tuple(link, 0x88, &buf);
+ if (buf && len >= 6) {
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
+ kfree(buf);
+ } else {
+ kfree(buf); /* 0 < len < 6 */
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+ if (phys_addr[0] == htons(0x6060)) {
+ dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
+ dev->base_addr, dev->base_addr+15);
+ goto failed;
+ }
+ }
+
+ /* The address and resource configuration register aren't loaded from
+ * the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version.
+ */
+
+ outw(0x3f00, ioaddr + 8);
+ fifo = inl(ioaddr);
+
+ /* The if_port symbol can be set when the module is loaded */
+ if ((if_port >= 0) && (if_port <= 3))
+ dev->if_port = if_port;
+ else
+ dev_err(&link->dev, "invalid if_port requested\n");
+
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ if (register_netdev(dev) != 0) {
+ dev_err(&link->dev, "register_netdev() failed\n");
+ goto failed;
+ }
+
+ netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
+ (multi ? "562" : "589"), dev->base_addr, dev->irq,
+ dev->dev_addr);
+ netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
+ (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
+ if_names[dev->if_port]);
+ return 0;
failed:
- tc589_release(link);
- return -ENODEV;
+ tc589_release(link);
+ return -ENODEV;
} /* tc589_config */
static void tc589_release(struct pcmcia_device *link)
@@ -353,113 +362,120 @@ static int tc589_resume(struct pcmcia_device *link)
/*====================================================================*/
-/*
- Use this for commands that may take time to finish
-*/
+/* Use this for commands that may take time to finish */
+
static void tc589_wait_for_completion(struct net_device *dev, int cmd)
{
- int i = 100;
- outw(cmd, dev->base_addr + EL3_CMD);
- while (--i > 0)
- if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
- if (i == 0)
- netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
+ int i = 100;
+ outw(cmd, dev->base_addr + EL3_CMD);
+ while (--i > 0)
+ if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000))
+ break;
+ if (i == 0)
+ netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
}
-/*
- Read a word from the EEPROM using the regular EEPROM access register.
- Assume that we are in register window zero.
-*/
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ * Assume that we are in register window zero.
+ */
+
static u16 read_eeprom(unsigned int ioaddr, int index)
{
- int i;
- outw(EEPROM_READ + index, ioaddr + 10);
- /* Reading the eeprom takes 162 us */
- for (i = 1620; i >= 0; i--)
- if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
- break;
- return inw(ioaddr + 12);
+ int i;
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Reading the eeprom takes 162 us */
+ for (i = 1620; i >= 0; i--)
+ if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
+ break;
+ return inw(ioaddr + 12);
}
-/*
- Set transceiver type, perhaps to something other than what the user
- specified in dev->if_port.
-*/
+/* Set transceiver type, perhaps to something other than what the user
+ * specified in dev->if_port.
+ */
+
static void tc589_set_xcvr(struct net_device *dev, int if_port)
{
- struct el3_private *lp = netdev_priv(dev);
- unsigned int ioaddr = dev->base_addr;
-
- EL3WINDOW(0);
- switch (if_port) {
- case 0: case 1: outw(0, ioaddr + 6); break;
- case 2: outw(3<<14, ioaddr + 6); break;
- case 3: outw(1<<14, ioaddr + 6); break;
- }
- /* On PCMCIA, this just turns on the LED */
- outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
- /* 10baseT interface, enable link beat and jabber check. */
- EL3WINDOW(4);
- outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
- EL3WINDOW(1);
- if (if_port == 2)
- lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
- else
- lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ EL3WINDOW(0);
+ switch (if_port) {
+ case 0:
+ case 1:
+ outw(0, ioaddr + 6);
+ break;
+ case 2:
+ outw(3<<14, ioaddr + 6);
+ break;
+ case 3:
+ outw(1<<14, ioaddr + 6);
+ break;
+ }
+ /* On PCMCIA, this just turns on the LED */
+ outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
+ /* 10baseT interface, enable link beat and jabber check. */
+ EL3WINDOW(4);
+ outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
+ EL3WINDOW(1);
+ if (if_port == 2)
+ lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
+ else
+ lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
}
static void dump_status(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- EL3WINDOW(1);
- netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
- inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
- inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
- EL3WINDOW(4);
- netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
- inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
- inw(ioaddr+0x0a));
- EL3WINDOW(1);
+ unsigned int ioaddr = dev->base_addr;
+ EL3WINDOW(1);
+ netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
+ inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
+ EL3WINDOW(4);
+ netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+ inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
+ inw(ioaddr+0x0a));
+ EL3WINDOW(1);
}
/* Reset and restore all of the 3c589 registers. */
static void tc589_reset(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- int i;
-
- EL3WINDOW(0);
- outw(0x0001, ioaddr + 4); /* Activate board. */
- outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
-
- /* Set the station address in window 2. */
- EL3WINDOW(2);
- for (i = 0; i < 6; i++)
- outb(dev->dev_addr[i], ioaddr + i);
-
- tc589_set_xcvr(dev, dev->if_port);
-
- /* Switch to the stats window, and clear all stats by reading. */
- outw(StatsDisable, ioaddr + EL3_CMD);
- EL3WINDOW(6);
- for (i = 0; i < 9; i++)
- inb(ioaddr+i);
- inw(ioaddr + 10);
- inw(ioaddr + 12);
-
- /* Switch to register set 1 for normal use. */
- EL3WINDOW(1);
-
- set_rx_mode(dev);
- outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
- outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
- outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
- /* Allow status bits to be seen. */
- outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
- /* Ack all pending events, and set active indicator mask. */
- outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ EL3WINDOW(0);
+ outw(0x0001, ioaddr + 4); /* Activate board. */
+ outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
+
+ /* Set the station address in window 2. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ tc589_set_xcvr(dev, dev->if_port);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr+i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ set_rx_mode(dev);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
ioaddr + EL3_CMD);
- outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
| AdapterFailure, ioaddr + EL3_CMD);
}
@@ -478,381 +494,406 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static int el3_config(struct net_device *dev, struct ifmap *map)
{
- if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
- if (map->port <= 3) {
- dev->if_port = map->port;
- netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
- tc589_set_xcvr(dev, dev->if_port);
- } else
- return -EINVAL;
- }
- return 0;
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 3) {
+ dev->if_port = map->port;
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
+ tc589_set_xcvr(dev, dev->if_port);
+ } else {
+ return -EINVAL;
+ }
+ }
+ return 0;
}
static int el3_open(struct net_device *dev)
{
- struct el3_private *lp = netdev_priv(dev);
- struct pcmcia_device *link = lp->p_dev;
+ struct el3_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
- if (!pcmcia_dev_present(link))
- return -ENODEV;
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
- link->open++;
- netif_start_queue(dev);
+ link->open++;
+ netif_start_queue(dev);
- tc589_reset(dev);
- init_timer(&lp->media);
- lp->media.function = media_check;
- lp->media.data = (unsigned long) dev;
- lp->media.expires = jiffies + HZ;
- add_timer(&lp->media);
+ tc589_reset(dev);
+ init_timer(&lp->media);
+ lp->media.function = media_check;
+ lp->media.data = (unsigned long) dev;
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
- dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
+ dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
dev->name, inw(dev->base_addr + EL3_STATUS));
- return 0;
+ return 0;
}
static void el3_tx_timeout(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
-
- netdev_warn(dev, "Transmit timed out!\n");
- dump_status(dev);
- dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
- /* Issue TX_RESET and TX_START commands. */
- tc589_wait_for_completion(dev, TxReset);
- outw(TxEnable, ioaddr + EL3_CMD);
- netif_wake_queue(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_warn(dev, "Transmit timed out!\n");
+ dump_status(dev);
+ dev->stats.tx_errors++;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ /* Issue TX_RESET and TX_START commands. */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
}
static void pop_tx_status(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- int i;
-
- /* Clear the Tx status stack. */
- for (i = 32; i > 0; i--) {
- u_char tx_status = inb(ioaddr + TX_STATUS);
- if (!(tx_status & 0x84)) break;
- /* reset transmitter on jabber error or underrun */
- if (tx_status & 0x30)
- tc589_wait_for_completion(dev, TxReset);
- if (tx_status & 0x38) {
- netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
- outw(TxEnable, ioaddr + EL3_CMD);
- dev->stats.tx_aborted_errors++;
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ /* Clear the Tx status stack. */
+ for (i = 32; i > 0; i--) {
+ u_char tx_status = inb(ioaddr + TX_STATUS);
+ if (!(tx_status & 0x84))
+ break;
+ /* reset transmitter on jabber error or underrun */
+ if (tx_status & 0x30)
+ tc589_wait_for_completion(dev, TxReset);
+ if (tx_status & 0x38) {
+ netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_aborted_errors++;
+ }
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
}
- outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
- }
}
static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- struct el3_private *priv = netdev_priv(dev);
- unsigned long flags;
+ unsigned int ioaddr = dev->base_addr;
+ struct el3_private *priv = netdev_priv(dev);
+ unsigned long flags;
- netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
+ netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
(long)skb->len, inw(ioaddr + EL3_STATUS));
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
- dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
- /* Put out the doubleword header... */
- outw(skb->len, ioaddr + TX_FIFO);
- outw(0x00, ioaddr + TX_FIFO);
- /* ... and the packet rounded to a doubleword. */
- outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- if (inw(ioaddr + TX_FREE) <= 1536) {
- netif_stop_queue(dev);
- /* Interrupt us when the FIFO has room for max-sized packet. */
- outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
- }
+ if (inw(ioaddr + TX_FREE) <= 1536) {
+ netif_stop_queue(dev);
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+ }
- pop_tx_status(dev);
- spin_unlock_irqrestore(&priv->lock, flags);
- dev_kfree_skb(skb);
+ pop_tx_status(dev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
/* The EL3 interrupt handler. */
static irqreturn_t el3_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *) dev_id;
- struct el3_private *lp = netdev_priv(dev);
- unsigned int ioaddr;
- __u16 status;
- int i = 0, handled = 1;
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr;
+ __u16 status;
+ int i = 0, handled = 1;
- if (!netif_device_present(dev))
- return IRQ_NONE;
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
- ioaddr = dev->base_addr;
+ ioaddr = dev->base_addr;
- netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
+ netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
- spin_lock(&lp->lock);
- while ((status = inw(ioaddr + EL3_STATUS)) &
+ spin_lock(&lp->lock);
+ while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | StatsFull)) {
- if ((status & 0xe000) != 0x2000) {
- netdev_dbg(dev, "interrupt from dead card\n");
- handled = 0;
- break;
- }
- if (status & RxComplete)
- el3_rx(dev);
- if (status & TxAvailable) {
- netdev_dbg(dev, " TX room bit was handled.\n");
- /* There's room in the FIFO for a full-sized packet. */
- outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
- netif_wake_queue(dev);
- }
- if (status & TxComplete)
- pop_tx_status(dev);
- if (status & (AdapterFailure | RxEarly | StatsFull)) {
- /* Handle all uncommon interrupts. */
- if (status & StatsFull) /* Empty statistics. */
- update_stats(dev);
- if (status & RxEarly) { /* Rx early is unused. */
- el3_rx(dev);
- outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
- }
- if (status & AdapterFailure) {
- u16 fifo_diag;
- EL3WINDOW(4);
- fifo_diag = inw(ioaddr + 4);
- EL3WINDOW(1);
- netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
+ if ((status & 0xe000) != 0x2000) {
+ netdev_dbg(dev, "interrupt from dead card\n");
+ handled = 0;
+ break;
+ }
+ if (status & RxComplete)
+ el3_rx(dev);
+ if (status & TxAvailable) {
+ netdev_dbg(dev, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+ if (status & TxComplete)
+ pop_tx_status(dev);
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull) /* Empty statistics. */
+ update_stats(dev);
+ if (status & RxEarly) {
+ /* Rx early is unused. */
+ el3_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & AdapterFailure) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + 4);
+ EL3WINDOW(1);
+ netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
fifo_diag);
- if (fifo_diag & 0x0400) {
- /* Tx overrun */
- tc589_wait_for_completion(dev, TxReset);
- outw(TxEnable, ioaddr + EL3_CMD);
+ if (fifo_diag & 0x0400) {
+ /* Tx overrun */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ /* Rx underrun */
+ tc589_wait_for_completion(dev, RxReset);
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ }
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
}
- if (fifo_diag & 0x2000) {
- /* Rx underrun */
- tc589_wait_for_completion(dev, RxReset);
- set_rx_mode(dev);
- outw(RxEnable, ioaddr + EL3_CMD);
+ if (++i > 10) {
+ netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
+ status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
}
- outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
- }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
}
- if (++i > 10) {
- netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
- status);
- /* Clear all interrupts */
- outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
- break;
- }
- /* Acknowledge the IRQ. */
- outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
- }
- lp->last_irq = jiffies;
- spin_unlock(&lp->lock);
- netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
- inw(ioaddr + EL3_STATUS));
- return IRQ_RETVAL(handled);
+ lp->last_irq = jiffies;
+ spin_unlock(&lp->lock);
+ netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS));
+ return IRQ_RETVAL(handled);
}
static void media_check(unsigned long arg)
{
- struct net_device *dev = (struct net_device *)(arg);
- struct el3_private *lp = netdev_priv(dev);
- unsigned int ioaddr = dev->base_addr;
- u16 media, errs;
- unsigned long flags;
+ struct net_device *dev = (struct net_device *)(arg);
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ u16 media, errs;
+ unsigned long flags;
- if (!netif_device_present(dev)) goto reschedule;
+ if (!netif_device_present(dev))
+ goto reschedule;
- /* Check for pending interrupt with expired latency timer: with
- this, we can limp along even if the interrupt is blocked */
- if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
+ /* Check for pending interrupt with expired latency timer: with
+ * this, we can limp along even if the interrupt is blocked
+ */
+ if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
(inb(ioaddr + EL3_TIMER) == 0xff)) {
- if (!lp->fast_poll)
- netdev_warn(dev, "interrupt(s) dropped!\n");
-
- local_irq_save(flags);
- el3_interrupt(dev->irq, dev);
- local_irq_restore(flags);
-
- lp->fast_poll = HZ;
- }
- if (lp->fast_poll) {
- lp->fast_poll--;
- lp->media.expires = jiffies + HZ/100;
- add_timer(&lp->media);
- return;
- }
-
- /* lp->lock guards the EL3 window. Window should always be 1 except
- when the lock is held */
- spin_lock_irqsave(&lp->lock, flags);
- EL3WINDOW(4);
- media = inw(ioaddr+WN4_MEDIA) & 0xc810;
-
- /* Ignore collisions unless we've had no irq's recently */
- if (time_before(jiffies, lp->last_irq + HZ)) {
- media &= ~0x0010;
- } else {
- /* Try harder to detect carrier errors */
- EL3WINDOW(6);
- outw(StatsDisable, ioaddr + EL3_CMD);
- errs = inb(ioaddr + 0);
- outw(StatsEnable, ioaddr + EL3_CMD);
- dev->stats.tx_carrier_errors += errs;
- if (errs || (lp->media_status & 0x0010)) media |= 0x0010;
- }
+ if (!lp->fast_poll)
+ netdev_warn(dev, "interrupt(s) dropped!\n");
+
+ local_irq_save(flags);
+ el3_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+
+ lp->fast_poll = HZ;
+ }
+ if (lp->fast_poll) {
+ lp->fast_poll--;
+ lp->media.expires = jiffies + HZ/100;
+ add_timer(&lp->media);
+ return;
+ }
+
+ /* lp->lock guards the EL3 window. Window should always be 1 except
+ * when the lock is held
+ */
+
+ spin_lock_irqsave(&lp->lock, flags);
+ EL3WINDOW(4);
+ media = inw(ioaddr+WN4_MEDIA) & 0xc810;
+
+ /* Ignore collisions unless we've had no irq's recently */
+ if (time_before(jiffies, lp->last_irq + HZ)) {
+ media &= ~0x0010;
+ } else {
+ /* Try harder to detect carrier errors */
+ EL3WINDOW(6);
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ errs = inb(ioaddr + 0);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_carrier_errors += errs;
+ if (errs || (lp->media_status & 0x0010))
+ media |= 0x0010;
+ }
- if (media != lp->media_status) {
- if ((media & lp->media_status & 0x8000) &&
- ((lp->media_status ^ media) & 0x0800))
+ if (media != lp->media_status) {
+ if ((media & lp->media_status & 0x8000) &&
+ ((lp->media_status ^ media) & 0x0800))
netdev_info(dev, "%s link beat\n",
- (lp->media_status & 0x0800 ? "lost" : "found"));
- else if ((media & lp->media_status & 0x4000) &&
+ (lp->media_status & 0x0800 ? "lost" : "found"));
+ else if ((media & lp->media_status & 0x4000) &&
((lp->media_status ^ media) & 0x0010))
netdev_info(dev, "coax cable %s\n",
- (lp->media_status & 0x0010 ? "ok" : "problem"));
- if (dev->if_port == 0) {
- if (media & 0x8000) {
- if (media & 0x0800)
- netdev_info(dev, "flipped to 10baseT\n");
- else
+ (lp->media_status & 0x0010 ? "ok" : "problem"));
+ if (dev->if_port == 0) {
+ if (media & 0x8000) {
+ if (media & 0x0800)
+ netdev_info(dev, "flipped to 10baseT\n");
+ else
tc589_set_xcvr(dev, 2);
- } else if (media & 0x4000) {
- if (media & 0x0010)
- tc589_set_xcvr(dev, 1);
- else
- netdev_info(dev, "flipped to 10base2\n");
- }
+ } else if (media & 0x4000) {
+ if (media & 0x0010)
+ tc589_set_xcvr(dev, 1);
+ else
+ netdev_info(dev, "flipped to 10base2\n");
+ }
+ }
+ lp->media_status = media;
}
- lp->media_status = media;
- }
- EL3WINDOW(1);
- spin_unlock_irqrestore(&lp->lock, flags);
+ EL3WINDOW(1);
+ spin_unlock_irqrestore(&lp->lock, flags);
reschedule:
- lp->media.expires = jiffies + HZ;
- add_timer(&lp->media);
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
}
static struct net_device_stats *el3_get_stats(struct net_device *dev)
{
- struct el3_private *lp = netdev_priv(dev);
- unsigned long flags;
- struct pcmcia_device *link = lp->p_dev;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ struct pcmcia_device *link = lp->p_dev;
- if (pcmcia_dev_present(link)) {
- spin_lock_irqsave(&lp->lock, flags);
- update_stats(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
- return &dev->stats;
+ if (pcmcia_dev_present(link)) {
+ spin_lock_irqsave(&lp->lock, flags);
+ update_stats(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return &dev->stats;
}
-/*
- Update statistics. We change to register window 6, so this should be run
- single-threaded if the device is active. This is expected to be a rare
- operation, and it's simpler for the rest of the driver to assume that
- window 1 is always valid rather than use a special window-state variable.
-
- Caller must hold the lock for this
+/* Update statistics. We change to register window 6, so this should be run
+* single-threaded if the device is active. This is expected to be a rare
+* operation, and it's simpler for the rest of the driver to assume that
+* window 1 is always valid rather than use a special window-state variable.
+*
+* Caller must hold the lock for this
*/
+
static void update_stats(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
-
- netdev_dbg(dev, "updating the statistics.\n");
- /* Turn off statistics updates while reading. */
- outw(StatsDisable, ioaddr + EL3_CMD);
- /* Switch to the stats window, and read everything. */
- EL3WINDOW(6);
- dev->stats.tx_carrier_errors += inb(ioaddr + 0);
- dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
- /* Multiple collisions. */ inb(ioaddr + 2);
- dev->stats.collisions += inb(ioaddr + 3);
- dev->stats.tx_window_errors += inb(ioaddr + 4);
- dev->stats.rx_fifo_errors += inb(ioaddr + 5);
- dev->stats.tx_packets += inb(ioaddr + 6);
- /* Rx packets */ inb(ioaddr + 7);
- /* Tx deferrals */ inb(ioaddr + 8);
- /* Rx octets */ inw(ioaddr + 10);
- /* Tx octets */ inw(ioaddr + 12);
-
- /* Back to window 1, and turn statistics back on. */
- EL3WINDOW(1);
- outw(StatsEnable, ioaddr + EL3_CMD);
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_dbg(dev, "updating the statistics.\n");
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+ dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */
+ inb(ioaddr + 2);
+ dev->stats.collisions += inb(ioaddr + 3);
+ dev->stats.tx_window_errors += inb(ioaddr + 4);
+ dev->stats.rx_fifo_errors += inb(ioaddr + 5);
+ dev->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */
+ inb(ioaddr + 7);
+ /* Tx deferrals */
+ inb(ioaddr + 8);
+ /* Rx octets */
+ inw(ioaddr + 10);
+ /* Tx octets */
+ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
}
static int el3_rx(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- int worklimit = 32;
- short rx_status;
+ unsigned int ioaddr = dev->base_addr;
+ int worklimit = 32;
+ short rx_status;
- netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
- while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
+ while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
worklimit > 0) {
- worklimit--;
- if (rx_status & 0x4000) { /* Error, update stats. */
- short error = rx_status & 0x3800;
- dev->stats.rx_errors++;
- switch (error) {
- case 0x0000: dev->stats.rx_over_errors++; break;
- case 0x0800: dev->stats.rx_length_errors++; break;
- case 0x1000: dev->stats.rx_frame_errors++; break;
- case 0x1800: dev->stats.rx_length_errors++; break;
- case 0x2000: dev->stats.rx_frame_errors++; break;
- case 0x2800: dev->stats.rx_crc_errors++; break;
- }
- } else {
- short pkt_len = rx_status & 0x7ff;
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len + 5);
-
- netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
+ worklimit--;
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ dev->stats.rx_errors++;
+ switch (error) {
+ case 0x0000:
+ dev->stats.rx_over_errors++;
+ break;
+ case 0x0800:
+ dev->stats.rx_length_errors++;
+ break;
+ case 0x1000:
+ dev->stats.rx_frame_errors++;
+ break;
+ case 0x1800:
+ dev->stats.rx_length_errors++;
+ break;
+ case 0x2000:
+ dev->stats.rx_frame_errors++;
+ break;
+ case 0x2800:
+ dev->stats.rx_crc_errors++;
+ break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, pkt_len + 5);
+
+ netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
- if (skb != NULL) {
- skb_reserve(skb, 2);
- insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+ if (skb != NULL) {
+ skb_reserve(skb, 2);
+ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
(pkt_len+3)>>2);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- } else {
- netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ } else {
+ netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
pkt_len);
- dev->stats.rx_dropped++;
- }
+ dev->stats.rx_dropped++;
+ }
+ }
+ /* Pop the top of the Rx FIFO */
+ tc589_wait_for_completion(dev, RxDiscard);
}
- /* Pop the top of the Rx FIFO */
- tc589_wait_for_completion(dev, RxDiscard);
- }
- if (worklimit == 0)
- netdev_warn(dev, "too much work in el3_rx!\n");
- return 0;
+ if (worklimit == 0)
+ netdev_warn(dev, "too much work in el3_rx!\n");
+ return 0;
}
static void set_rx_mode(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- u16 opts = SetRxFilter | RxStation | RxBroadcast;
-
- if (dev->flags & IFF_PROMISC)
- opts |= RxMulticast | RxProm;
- else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
- opts |= RxMulticast;
- outw(opts, ioaddr + EL3_CMD);
+ unsigned int ioaddr = dev->base_addr;
+ u16 opts = SetRxFilter | RxStation | RxBroadcast;
+
+ if (dev->flags & IFF_PROMISC)
+ opts |= RxMulticast | RxProm;
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
+ opts |= RxMulticast;
+ outw(opts, ioaddr + EL3_CMD);
}
static void set_multicast_list(struct net_device *dev)
@@ -867,44 +908,44 @@ static void set_multicast_list(struct net_device *dev)
static int el3_close(struct net_device *dev)
{
- struct el3_private *lp = netdev_priv(dev);
- struct pcmcia_device *link = lp->p_dev;
- unsigned int ioaddr = dev->base_addr;
-
- dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+ struct el3_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+ unsigned int ioaddr = dev->base_addr;
+
+ dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+
+ if (pcmcia_dev_present(link)) {
+ /* Turn off statistics ASAP. We update dev->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 1) {
+ /* Disable link beat and jabber */
+ EL3WINDOW(4);
+ outw(0, ioaddr + WN4_MEDIA);
+ }
- if (pcmcia_dev_present(link)) {
- /* Turn off statistics ASAP. We update dev->stats below. */
- outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ /* But we explicitly zero the IRQ line select anyway. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
- /* Disable the receiver and transmitter. */
- outw(RxDisable, ioaddr + EL3_CMD);
- outw(TxDisable, ioaddr + EL3_CMD);
-
- if (dev->if_port == 2)
- /* Turn off thinnet power. Green! */
- outw(StopCoax, ioaddr + EL3_CMD);
- else if (dev->if_port == 1) {
- /* Disable link beat and jabber */
- EL3WINDOW(4);
- outw(0, ioaddr + WN4_MEDIA);
+ /* Check if the card still exists */
+ if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
+ update_stats(dev);
}
- /* Switching back to window 0 disables the IRQ. */
- EL3WINDOW(0);
- /* But we explicitly zero the IRQ line select anyway. */
- outw(0x0f00, ioaddr + WN0_IRQ);
-
- /* Check if the card still exists */
- if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
- update_stats(dev);
- }
-
- link->open--;
- netif_stop_queue(dev);
- del_timer_sync(&lp->media);
+ link->open--;
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->media);
- return 0;
+ return 0;
}
static const struct pcmcia_device_id tc589_ids[] = {
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 238ccea965c8..61477b8e8d24 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2086,7 +2086,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* ... and the packet rounded to a doubleword. */
skb_tx_timestamp(skb);
iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- dev_kfree_skb (skb);
+ dev_consume_skb_any (skb);
if (ioread16(ioaddr + TxFree) > 1536) {
netif_start_queue (dev); /* AKPM: redundant? */
} else {
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 65b735d4a6ad..afaab4b2333f 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -66,7 +66,7 @@ config PCMCIA_3C589
config VORTEX
tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support"
- depends on (PCI || EISA) && HAS_IOPORT
+ depends on (PCI || EISA) && HAS_IOPORT_MAP
select MII
---help---
This option enables driver support for a large number of 10Mbps and
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index d2cd80444ade..599311f0e05c 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -404,7 +404,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
spin_unlock(&ei_local->page_lock);
enable_irq_lockdep_irqrestore(dev->irq, &flags);
skb_tx_timestamp(skb);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
dev->stats.tx_bytes += send_length;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 506b0248c400..39b26fe28d10 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -22,6 +22,7 @@ source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
+source "drivers/net/ethernet/altera/Kconfig"
source "drivers/net/ethernet/amd/Kconfig"
source "drivers/net/ethernet/apple/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
@@ -149,6 +150,7 @@ config S6GMAC
To compile this driver as a module, choose M here. The module
will be called s6gmac.
+source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c0b8789952e7..545d0b3b9cb4 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
+obj-$(CONFIG_ALTERA_TSE) += altera/
obj-$(CONFIG_NET_VENDOR_AMD) += amd/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
@@ -60,6 +61,7 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
obj-$(CONFIG_S6GMAC) += s6gmac.o
+obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
obj-$(CONFIG_NET_VENDOR_SIS) += sis/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index c0f68dcd1dc1..7ae74d450e8f 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -307,11 +307,6 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
return bfin_mdio_poll();
}
-static int bfin_mdiobus_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static void bfin_mac_adjust_link(struct net_device *dev)
{
struct bfin_mac_local *lp = netdev_priv(dev);
@@ -1040,6 +1035,7 @@ static struct ptp_clock_info bfin_ptp_caps = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = bfin_ptp_adjfreq,
.adjtime = bfin_ptp_adjtime,
@@ -1086,7 +1082,7 @@ static inline void _tx_reclaim_skb(void)
tx_list_head->desc_a.config &= ~DMAEN;
tx_list_head->status.status_word = 0;
if (tx_list_head->skb) {
- dev_kfree_skb(tx_list_head->skb);
+ dev_consume_skb_any(tx_list_head->skb);
tx_list_head->skb = NULL;
}
tx_list_head = tx_list_head->next;
@@ -1823,7 +1819,6 @@ static int bfin_mii_bus_probe(struct platform_device *pdev)
goto out_err_alloc;
miibus->read = bfin_mdiobus_read;
miibus->write = bfin_mdiobus_write;
- miibus->reset = bfin_mdiobus_reset;
miibus->parent = &pdev->dev;
miibus->name = "bfin_mii_bus";
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index c5d75e7aeeb6..23578dfee249 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1213,11 +1213,6 @@ static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
return 0;
}
-static int greth_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static void greth_link_change(struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
@@ -1332,7 +1327,6 @@ static int greth_mdio_init(struct greth_private *greth)
snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
greth->mdio->read = greth_mdio_read;
greth->mdio->write = greth_mdio_write;
- greth->mdio->reset = greth_mdio_reset;
greth->mdio->priv = greth;
greth->mdio->irq = greth->mdio_irqs;
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 511f6eecd58b..fcaeeb8a4929 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -476,7 +476,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&db->lock, flags);
/* free this SKB */
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
new file mode 100644
index 000000000000..80c1ab74a4b8
--- /dev/null
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -0,0 +1,8 @@
+config ALTERA_TSE
+ tristate "Altera Triple-Speed Ethernet MAC support"
+ select PHYLIB
+ ---help---
+ This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
+
+ To compile this driver as a module, choose M here. The module
+ will be called alteratse.
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
new file mode 100644
index 000000000000..d4a187e45369
--- /dev/null
+++ b/drivers/net/ethernet/altera/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Altera device drivers.
+#
+
+obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
+altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
+altera_msgdma.o altera_sgdma.o altera_utils.o
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
new file mode 100644
index 000000000000..3df18669ea30
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -0,0 +1,202 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/netdevice.h>
+#include "altera_utils.h"
+#include "altera_tse.h"
+#include "altera_msgdmahw.h"
+
+/* No initialization work to do for MSGDMA */
+int msgdma_initialize(struct altera_tse_private *priv)
+{
+ return 0;
+}
+
+void msgdma_uninitialize(struct altera_tse_private *priv)
+{
+}
+
+void msgdma_reset(struct altera_tse_private *priv)
+{
+ int counter;
+ struct msgdma_csr *txcsr =
+ (struct msgdma_csr *)priv->tx_dma_csr;
+ struct msgdma_csr *rxcsr =
+ (struct msgdma_csr *)priv->rx_dma_csr;
+
+ /* Reset Rx mSGDMA */
+ iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+ iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+
+ counter = 0;
+ while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
+ if (tse_bit_is_clear(&rxcsr->status,
+ MSGDMA_CSR_STAT_RESETTING))
+ break;
+ udelay(1);
+ }
+
+ if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR)
+ netif_warn(priv, drv, priv->dev,
+ "TSE Rx mSGDMA resetting bit never cleared!\n");
+
+ /* clear all status bits */
+ iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+
+ /* Reset Tx mSGDMA */
+ iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+ iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+
+ counter = 0;
+ while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
+ if (tse_bit_is_clear(&txcsr->status,
+ MSGDMA_CSR_STAT_RESETTING))
+ break;
+ udelay(1);
+ }
+
+ if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR)
+ netif_warn(priv, drv, priv->dev,
+ "TSE Tx mSGDMA resetting bit never cleared!\n");
+
+ /* clear all status bits */
+ iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+}
+
+void msgdma_disable_rxirq(struct altera_tse_private *priv)
+{
+ struct msgdma_csr *csr = priv->rx_dma_csr;
+ tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+}
+
+void msgdma_enable_rxirq(struct altera_tse_private *priv)
+{
+ struct msgdma_csr *csr = priv->rx_dma_csr;
+ tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+}
+
+void msgdma_disable_txirq(struct altera_tse_private *priv)
+{
+ struct msgdma_csr *csr = priv->tx_dma_csr;
+ tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+}
+
+void msgdma_enable_txirq(struct altera_tse_private *priv)
+{
+ struct msgdma_csr *csr = priv->tx_dma_csr;
+ tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+}
+
+void msgdma_clear_rxirq(struct altera_tse_private *priv)
+{
+ struct msgdma_csr *csr = priv->rx_dma_csr;
+ iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+}
+
+void msgdma_clear_txirq(struct altera_tse_private *priv)
+{
+ struct msgdma_csr *csr = priv->tx_dma_csr;
+ iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+}
+
+/* return 0 to indicate transmit is pending */
+int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+ struct msgdma_extended_desc *desc = priv->tx_dma_desc;
+
+ iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
+ iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
+ iowrite32(0, &desc->write_addr_lo);
+ iowrite32(0, &desc->write_addr_hi);
+ iowrite32(buffer->len, &desc->len);
+ iowrite32(0, &desc->burst_seq_num);
+ iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
+ iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+ return 0;
+}
+
+u32 msgdma_tx_completions(struct altera_tse_private *priv)
+{
+ u32 ready = 0;
+ u32 inuse;
+ u32 status;
+ struct msgdma_csr *txcsr =
+ (struct msgdma_csr *)priv->tx_dma_csr;
+
+ /* Get number of sent descriptors */
+ inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+
+ if (inuse) { /* Tx FIFO is not empty */
+ ready = priv->tx_prod - priv->tx_cons - inuse - 1;
+ } else {
+ /* Check for buffered last packet */
+ status = ioread32(&txcsr->status);
+ if (status & MSGDMA_CSR_STAT_BUSY)
+ ready = priv->tx_prod - priv->tx_cons - 1;
+ else
+ ready = priv->tx_prod - priv->tx_cons;
+ }
+ return ready;
+}
+
+/* Put buffer to the mSGDMA RX FIFO
+ */
+int msgdma_add_rx_desc(struct altera_tse_private *priv,
+ struct tse_buffer *rxbuffer)
+{
+ struct msgdma_extended_desc *desc = priv->rx_dma_desc;
+ u32 len = priv->rx_dma_buf_sz;
+ dma_addr_t dma_addr = rxbuffer->dma_addr;
+ u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
+ | MSGDMA_DESC_CTL_END_ON_LEN
+ | MSGDMA_DESC_CTL_TR_COMP_IRQ
+ | MSGDMA_DESC_CTL_EARLY_IRQ
+ | MSGDMA_DESC_CTL_TR_ERR_IRQ
+ | MSGDMA_DESC_CTL_GO);
+
+ iowrite32(0, &desc->read_addr_lo);
+ iowrite32(0, &desc->read_addr_hi);
+ iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
+ iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
+ iowrite32(len, &desc->len);
+ iowrite32(0, &desc->burst_seq_num);
+ iowrite32(0x00010001, &desc->stride);
+ iowrite32(control, &desc->control);
+ return 1;
+}
+
+/* status is returned on upper 16 bits,
+ * length is returned in lower 16 bits
+ */
+u32 msgdma_rx_status(struct altera_tse_private *priv)
+{
+ u32 rxstatus = 0;
+ u32 pktlength;
+ u32 pktstatus;
+ struct msgdma_csr *rxcsr =
+ (struct msgdma_csr *)priv->rx_dma_csr;
+ struct msgdma_response *rxresp =
+ (struct msgdma_response *)priv->rx_dma_resp;
+
+ if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
+ pktlength = ioread32(&rxresp->bytes_transferred);
+ pktstatus = ioread32(&rxresp->status);
+ rxstatus = pktstatus;
+ rxstatus = rxstatus << 16;
+ rxstatus |= (pktlength & 0xffff);
+ }
+ return rxstatus;
+}
diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h
new file mode 100644
index 000000000000..7f0f5bf2bba2
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_msgdma.h
@@ -0,0 +1,34 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_MSGDMA_H__
+#define __ALTERA_MSGDMA_H__
+
+void msgdma_reset(struct altera_tse_private *);
+void msgdma_enable_txirq(struct altera_tse_private *);
+void msgdma_enable_rxirq(struct altera_tse_private *);
+void msgdma_disable_rxirq(struct altera_tse_private *);
+void msgdma_disable_txirq(struct altera_tse_private *);
+void msgdma_clear_rxirq(struct altera_tse_private *);
+void msgdma_clear_txirq(struct altera_tse_private *);
+u32 msgdma_tx_completions(struct altera_tse_private *);
+int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
+int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
+u32 msgdma_rx_status(struct altera_tse_private *);
+int msgdma_initialize(struct altera_tse_private *);
+void msgdma_uninitialize(struct altera_tse_private *);
+
+#endif /* __ALTERA_MSGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
new file mode 100644
index 000000000000..d7b59ba4019c
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -0,0 +1,167 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_MSGDMAHW_H__
+#define __ALTERA_MSGDMAHW_H__
+
+/* mSGDMA standard descriptor format
+ */
+struct msgdma_desc {
+ u32 read_addr; /* data buffer source address */
+ u32 write_addr; /* data buffer destination address */
+ u32 len; /* the number of bytes to transfer per descriptor */
+ u32 control; /* characteristics of the transfer */
+};
+
+/* mSGDMA extended descriptor format
+ */
+struct msgdma_extended_desc {
+ u32 read_addr_lo; /* data buffer source address low bits */
+ u32 write_addr_lo; /* data buffer destination address low bits */
+ u32 len; /* the number of bytes to transfer
+ * per descriptor
+ */
+ u32 burst_seq_num; /* bit 31:24 write burst
+ * bit 23:16 read burst
+ * bit 15:0 sequence number
+ */
+ u32 stride; /* bit 31:16 write stride
+ * bit 15:0 read stride
+ */
+ u32 read_addr_hi; /* data buffer source address high bits */
+ u32 write_addr_hi; /* data buffer destination address high bits */
+ u32 control; /* characteristics of the transfer */
+};
+
+/* mSGDMA descriptor control field bit definitions
+ */
+#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff)
+#define MSGDMA_DESC_CTL_GEN_SOP BIT(8)
+#define MSGDMA_DESC_CTL_GEN_EOP BIT(9)
+#define MSGDMA_DESC_CTL_PARK_READS BIT(10)
+#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11)
+#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12)
+#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13)
+#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14)
+#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15)
+#define MSGDMA_DESC_CTL_TR_ERR_IRQ (0xff << 16)
+#define MSGDMA_DESC_CTL_EARLY_DONE BIT(24)
+/* Writing ‘1’ to the ‘go’ bit commits the entire descriptor into the
+ * descriptor FIFO(s)
+ */
+#define MSGDMA_DESC_CTL_GO BIT(31)
+
+/* Tx buffer control flags
+ */
+#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
+ MSGDMA_DESC_CTL_TR_COMP_IRQ | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
+ MSGDMA_DESC_CTL_GEN_EOP | \
+ MSGDMA_DESC_CTL_TR_COMP_IRQ | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
+ MSGDMA_DESC_CTL_END_ON_LEN | \
+ MSGDMA_DESC_CTL_TR_COMP_IRQ | \
+ MSGDMA_DESC_CTL_EARLY_IRQ | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+/* mSGDMA extended descriptor stride definitions
+ */
+#define MSGDMA_DESC_TX_STRIDE (0x00010001)
+#define MSGDMA_DESC_RX_STRIDE (0x00010001)
+
+/* mSGDMA dispatcher control and status register map
+ */
+struct msgdma_csr {
+ u32 status; /* Read/Clear */
+ u32 control; /* Read/Write */
+ u32 rw_fill_level; /* bit 31:16 - write fill level
+ * bit 15:0 - read fill level
+ */
+ u32 resp_fill_level; /* bit 15:0 */
+ u32 rw_seq_num; /* bit 31:16 - write sequence number
+ * bit 15:0 - read sequence number
+ */
+ u32 pad[3]; /* reserved */
+};
+
+/* mSGDMA CSR status register bit definitions
+ */
+#define MSGDMA_CSR_STAT_BUSY BIT(0)
+#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1)
+#define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2)
+#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3)
+#define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4)
+#define MSGDMA_CSR_STAT_STOPPED BIT(5)
+#define MSGDMA_CSR_STAT_RESETTING BIT(6)
+#define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7)
+#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8)
+#define MSGDMA_CSR_STAT_IRQ BIT(9)
+#define MSGDMA_CSR_STAT_MASK 0x3FF
+#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ 0x1FF
+
+#define MSGDMA_CSR_STAT_BUSY_GET(v) GET_BIT_VALUE(v, 0)
+#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY_GET(v) GET_BIT_VALUE(v, 1)
+#define MSGDMA_CSR_STAT_DESC_BUF_FULL_GET(v) GET_BIT_VALUE(v, 2)
+#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY_GET(v) GET_BIT_VALUE(v, 3)
+#define MSGDMA_CSR_STAT_RESP_BUF_FULL_GET(v) GET_BIT_VALUE(v, 4)
+#define MSGDMA_CSR_STAT_STOPPED_GET(v) GET_BIT_VALUE(v, 5)
+#define MSGDMA_CSR_STAT_RESETTING_GET(v) GET_BIT_VALUE(v, 6)
+#define MSGDMA_CSR_STAT_STOPPED_ON_ERR_GET(v) GET_BIT_VALUE(v, 7)
+#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY_GET(v) GET_BIT_VALUE(v, 8)
+#define MSGDMA_CSR_STAT_IRQ_GET(v) GET_BIT_VALUE(v, 9)
+
+/* mSGDMA CSR control register bit definitions
+ */
+#define MSGDMA_CSR_CTL_STOP BIT(0)
+#define MSGDMA_CSR_CTL_RESET BIT(1)
+#define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2)
+#define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3)
+#define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4)
+#define MSGDMA_CSR_CTL_STOP_DESCS BIT(5)
+
+/* mSGDMA CSR fill level bits
+ */
+#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16)
+#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
+#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
+
+/* mSGDMA response register map
+ */
+struct msgdma_response {
+ u32 bytes_transferred;
+ u32 status;
+};
+
+/* mSGDMA response register bit definitions
+ */
+#define MSGDMA_RESP_EARLY_TERM BIT(8)
+#define MSGDMA_RESP_ERR_MASK 0xFF
+
+#endif /* __ALTERA_MSGDMA_H__*/
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
new file mode 100644
index 000000000000..0ee96639ae44
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -0,0 +1,509 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/list.h>
+#include "altera_utils.h"
+#include "altera_tse.h"
+#include "altera_sgdmahw.h"
+#include "altera_sgdma.h"
+
+static void sgdma_descrip(struct sgdma_descrip *desc,
+ struct sgdma_descrip *ndesc,
+ dma_addr_t ndesc_phys,
+ dma_addr_t raddr,
+ dma_addr_t waddr,
+ u16 length,
+ int generate_eop,
+ int rfixed,
+ int wfixed);
+
+static int sgdma_async_write(struct altera_tse_private *priv,
+ struct sgdma_descrip *desc);
+
+static int sgdma_async_read(struct altera_tse_private *priv);
+
+static dma_addr_t
+sgdma_txphysaddr(struct altera_tse_private *priv,
+ struct sgdma_descrip *desc);
+
+static dma_addr_t
+sgdma_rxphysaddr(struct altera_tse_private *priv,
+ struct sgdma_descrip *desc);
+
+static int sgdma_txbusy(struct altera_tse_private *priv);
+
+static int sgdma_rxbusy(struct altera_tse_private *priv);
+
+static void
+queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
+
+static void
+queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
+
+static struct tse_buffer *
+dequeue_tx(struct altera_tse_private *priv);
+
+static struct tse_buffer *
+dequeue_rx(struct altera_tse_private *priv);
+
+static struct tse_buffer *
+queue_rx_peekhead(struct altera_tse_private *priv);
+
+int sgdma_initialize(struct altera_tse_private *priv)
+{
+ priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
+
+ priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
+ SGDMA_CTRLREG_ILASTD;
+
+ INIT_LIST_HEAD(&priv->txlisthd);
+ INIT_LIST_HEAD(&priv->rxlisthd);
+
+ priv->rxdescphys = (dma_addr_t) 0;
+ priv->txdescphys = (dma_addr_t) 0;
+
+ priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+ priv->rxdescmem, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(priv->device, priv->rxdescphys)) {
+ sgdma_uninitialize(priv);
+ netdev_err(priv->dev, "error mapping rx descriptor memory\n");
+ return -EINVAL;
+ }
+
+ priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+ priv->txdescmem, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(priv->device, priv->txdescphys)) {
+ sgdma_uninitialize(priv);
+ netdev_err(priv->dev, "error mapping tx descriptor memory\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void sgdma_uninitialize(struct altera_tse_private *priv)
+{
+ if (priv->rxdescphys)
+ dma_unmap_single(priv->device, priv->rxdescphys,
+ priv->rxdescmem, DMA_BIDIRECTIONAL);
+
+ if (priv->txdescphys)
+ dma_unmap_single(priv->device, priv->txdescphys,
+ priv->txdescmem, DMA_TO_DEVICE);
+}
+
+/* This function resets the SGDMA controller and clears the
+ * descriptor memory used for transmits and receives.
+ */
+void sgdma_reset(struct altera_tse_private *priv)
+{
+ u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
+ u32 txdescriplen = priv->txdescmem;
+ u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
+ u32 rxdescriplen = priv->rxdescmem;
+ struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
+ struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
+
+ /* Initialize descriptor memory to 0 */
+ memset(ptxdescripmem, 0, txdescriplen);
+ memset(prxdescripmem, 0, rxdescriplen);
+
+ iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
+ iowrite32(0, &ptxsgdma->control);
+
+ iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
+ iowrite32(0, &prxsgdma->control);
+}
+
+void sgdma_enable_rxirq(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
+ tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
+}
+
+void sgdma_enable_txirq(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+ priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
+ tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
+}
+
+/* for SGDMA, RX interrupts remain enabled after enabling */
+void sgdma_disable_rxirq(struct altera_tse_private *priv)
+{
+}
+
+/* for SGDMA, TX interrupts remain enabled after enabling */
+void sgdma_disable_txirq(struct altera_tse_private *priv)
+{
+}
+
+void sgdma_clear_rxirq(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+}
+
+void sgdma_clear_txirq(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+ tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+}
+
+/* transmits buffer through SGDMA. Returns number of buffers
+ * transmitted, 0 if not possible.
+ *
+ * tx_lock is held by the caller
+ */
+int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+ int pktstx = 0;
+ struct sgdma_descrip *descbase =
+ (struct sgdma_descrip *)priv->tx_dma_desc;
+
+ struct sgdma_descrip *cdesc = &descbase[0];
+ struct sgdma_descrip *ndesc = &descbase[1];
+
+ /* wait 'til the tx sgdma is ready for the next transmit request */
+ if (sgdma_txbusy(priv))
+ return 0;
+
+ sgdma_descrip(cdesc, /* current descriptor */
+ ndesc, /* next descriptor */
+ sgdma_txphysaddr(priv, ndesc),
+ buffer->dma_addr, /* address of packet to xmit */
+ 0, /* write addr 0 for tx dma */
+ buffer->len, /* length of packet */
+ SGDMA_CONTROL_EOP, /* Generate EOP */
+ 0, /* read fixed */
+ SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
+
+ pktstx = sgdma_async_write(priv, cdesc);
+
+ /* enqueue the request to the pending transmit queue */
+ queue_tx(priv, buffer);
+
+ return 1;
+}
+
+
+/* tx_lock held to protect access to queued tx list
+ */
+u32 sgdma_tx_completions(struct altera_tse_private *priv)
+{
+ u32 ready = 0;
+ struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
+
+ if (!sgdma_txbusy(priv) &&
+ ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+ (dequeue_tx(priv))) {
+ ready = 1;
+ }
+
+ return ready;
+}
+
+int sgdma_add_rx_desc(struct altera_tse_private *priv,
+ struct tse_buffer *rxbuffer)
+{
+ queue_rx(priv, rxbuffer);
+ return sgdma_async_read(priv);
+}
+
+/* status is returned on upper 16 bits,
+ * length is returned in lower 16 bits
+ */
+u32 sgdma_rx_status(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
+ struct sgdma_descrip *desc = NULL;
+ int pktsrx;
+ unsigned int rxstatus = 0;
+ unsigned int pktlength = 0;
+ unsigned int pktstatus = 0;
+ struct tse_buffer *rxbuffer = NULL;
+
+ dma_sync_single_for_cpu(priv->device,
+ priv->rxdescphys,
+ priv->rxdescmem,
+ DMA_BIDIRECTIONAL);
+
+ desc = &base[0];
+ if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
+ (desc->status & SGDMA_STATUS_EOP)) {
+ pktlength = desc->bytes_xferred;
+ pktstatus = desc->status & 0x3f;
+ rxstatus = pktstatus;
+ rxstatus = rxstatus << 16;
+ rxstatus |= (pktlength & 0xffff);
+
+ desc->status = 0;
+
+ rxbuffer = dequeue_rx(priv);
+ if (rxbuffer == NULL)
+ netdev_err(priv->dev,
+ "sgdma rx and rx queue empty!\n");
+
+ /* kick the rx sgdma after reaping this descriptor */
+ pktsrx = sgdma_async_read(priv);
+ }
+
+ return rxstatus;
+}
+
+
+/* Private functions */
+static void sgdma_descrip(struct sgdma_descrip *desc,
+ struct sgdma_descrip *ndesc,
+ dma_addr_t ndesc_phys,
+ dma_addr_t raddr,
+ dma_addr_t waddr,
+ u16 length,
+ int generate_eop,
+ int rfixed,
+ int wfixed)
+{
+ /* Clear the next descriptor as not owned by hardware */
+ u32 ctrl = ndesc->control;
+ ctrl &= ~SGDMA_CONTROL_HW_OWNED;
+ ndesc->control = ctrl;
+
+ ctrl = 0;
+ ctrl = SGDMA_CONTROL_HW_OWNED;
+ ctrl |= generate_eop;
+ ctrl |= rfixed;
+ ctrl |= wfixed;
+
+ /* Channel is implicitly zero, initialized to 0 by default */
+
+ desc->raddr = raddr;
+ desc->waddr = waddr;
+ desc->next = lower_32_bits(ndesc_phys);
+ desc->control = ctrl;
+ desc->status = 0;
+ desc->rburst = 0;
+ desc->wburst = 0;
+ desc->bytes = length;
+ desc->bytes_xferred = 0;
+}
+
+/* If hardware is busy, don't restart async read.
+ * if status register is 0 - meaning initial state, restart async read,
+ * probably for the first time when populating a receive buffer.
+ * If read status indicate not busy and a status, restart the async
+ * DMA read.
+ */
+static int sgdma_async_read(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ struct sgdma_descrip *descbase =
+ (struct sgdma_descrip *)priv->rx_dma_desc;
+
+ struct sgdma_descrip *cdesc = &descbase[0];
+ struct sgdma_descrip *ndesc = &descbase[1];
+
+ unsigned int sts = ioread32(&csr->status);
+ struct tse_buffer *rxbuffer = NULL;
+
+ if (!sgdma_rxbusy(priv)) {
+ rxbuffer = queue_rx_peekhead(priv);
+ if (rxbuffer == NULL)
+ return 0;
+
+ sgdma_descrip(cdesc, /* current descriptor */
+ ndesc, /* next descriptor */
+ sgdma_rxphysaddr(priv, ndesc),
+ 0, /* read addr 0 for rx dma */
+ rxbuffer->dma_addr, /* write addr for rx dma */
+ 0, /* read 'til EOP */
+ 0, /* EOP: NA for rx dma */
+ 0, /* read fixed: NA for rx dma */
+ 0); /* SOP: NA for rx DMA */
+
+ /* clear control and status */
+ iowrite32(0, &csr->control);
+
+ /* If status available, clear those bits */
+ if (sts & 0xf)
+ iowrite32(0xf, &csr->status);
+
+ dma_sync_single_for_device(priv->device,
+ priv->rxdescphys,
+ priv->rxdescmem,
+ DMA_BIDIRECTIONAL);
+
+ iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+ &csr->next_descrip);
+
+ iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+ &csr->control);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int sgdma_async_write(struct altera_tse_private *priv,
+ struct sgdma_descrip *desc)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+
+ if (sgdma_txbusy(priv))
+ return 0;
+
+ /* clear control and status */
+ iowrite32(0, &csr->control);
+ iowrite32(0x1f, &csr->status);
+
+ dma_sync_single_for_device(priv->device, priv->txdescphys,
+ priv->txdescmem, DMA_TO_DEVICE);
+
+ iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+ &csr->next_descrip);
+
+ iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
+ &csr->control);
+
+ return 1;
+}
+
+static dma_addr_t
+sgdma_txphysaddr(struct altera_tse_private *priv,
+ struct sgdma_descrip *desc)
+{
+ dma_addr_t paddr = priv->txdescmem_busaddr;
+ uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
+ return (dma_addr_t)((uintptr_t)paddr + offs);
+}
+
+static dma_addr_t
+sgdma_rxphysaddr(struct altera_tse_private *priv,
+ struct sgdma_descrip *desc)
+{
+ dma_addr_t paddr = priv->rxdescmem_busaddr;
+ uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
+ return (dma_addr_t)((uintptr_t)paddr + offs);
+}
+
+#define list_remove_head(list, entry, type, member) \
+ do { \
+ entry = NULL; \
+ if (!list_empty(list)) { \
+ entry = list_entry((list)->next, type, member); \
+ list_del_init(&entry->member); \
+ } \
+ } while (0)
+
+#define list_peek_head(list, entry, type, member) \
+ do { \
+ entry = NULL; \
+ if (!list_empty(list)) { \
+ entry = list_entry((list)->next, type, member); \
+ } \
+ } while (0)
+
+/* adds a tse_buffer to the tail of a tx buffer list.
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static void
+queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+ list_add_tail(&buffer->lh, &priv->txlisthd);
+}
+
+
+/* adds a tse_buffer to the tail of a rx buffer list
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static void
+queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+ list_add_tail(&buffer->lh, &priv->rxlisthd);
+}
+
+/* dequeues a tse_buffer from the transmit buffer list, otherwise
+ * returns NULL if empty.
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static struct tse_buffer *
+dequeue_tx(struct altera_tse_private *priv)
+{
+ struct tse_buffer *buffer = NULL;
+ list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
+ return buffer;
+}
+
+/* dequeues a tse_buffer from the receive buffer list, otherwise
+ * returns NULL if empty
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static struct tse_buffer *
+dequeue_rx(struct altera_tse_private *priv)
+{
+ struct tse_buffer *buffer = NULL;
+ list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
+ return buffer;
+}
+
+/* dequeues a tse_buffer from the receive buffer list, otherwise
+ * returns NULL if empty
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list while the
+ * head is being examined.
+ */
+static struct tse_buffer *
+queue_rx_peekhead(struct altera_tse_private *priv)
+{
+ struct tse_buffer *buffer = NULL;
+ list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
+ return buffer;
+}
+
+/* check and return rx sgdma status without polling
+ */
+static int sgdma_rxbusy(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+}
+
+/* waits for the tx sgdma to finish it's current operation, returns 0
+ * when it transitions to nonbusy, returns 1 if the operation times out
+ */
+static int sgdma_txbusy(struct altera_tse_private *priv)
+{
+ int delay = 0;
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+
+ /* if DMA is busy, wait for current transactino to finish */
+ while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+ udelay(1);
+
+ if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+ netdev_err(priv->dev, "timeout waiting for tx dma\n");
+ return 1;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h
new file mode 100644
index 000000000000..07d471729dc4
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdma.h
@@ -0,0 +1,35 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_SGDMA_H__
+#define __ALTERA_SGDMA_H__
+
+void sgdma_reset(struct altera_tse_private *);
+void sgdma_enable_txirq(struct altera_tse_private *);
+void sgdma_enable_rxirq(struct altera_tse_private *);
+void sgdma_disable_rxirq(struct altera_tse_private *);
+void sgdma_disable_txirq(struct altera_tse_private *);
+void sgdma_clear_rxirq(struct altera_tse_private *);
+void sgdma_clear_txirq(struct altera_tse_private *);
+int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
+u32 sgdma_tx_completions(struct altera_tse_private *);
+int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
+void sgdma_status(struct altera_tse_private *);
+u32 sgdma_rx_status(struct altera_tse_private *);
+int sgdma_initialize(struct altera_tse_private *);
+void sgdma_uninitialize(struct altera_tse_private *);
+
+#endif /* __ALTERA_SGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
new file mode 100644
index 000000000000..ba3334f35383
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -0,0 +1,124 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_SGDMAHW_H__
+#define __ALTERA_SGDMAHW_H__
+
+/* SGDMA descriptor structure */
+struct sgdma_descrip {
+ unsigned int raddr; /* address of data to be read */
+ unsigned int pad1;
+ unsigned int waddr;
+ unsigned int pad2;
+ unsigned int next;
+ unsigned int pad3;
+ unsigned short bytes;
+ unsigned char rburst;
+ unsigned char wburst;
+ unsigned short bytes_xferred; /* 16 bits, bytes xferred */
+
+ /* bit 0: error
+ * bit 1: length error
+ * bit 2: crc error
+ * bit 3: truncated error
+ * bit 4: phy error
+ * bit 5: collision error
+ * bit 6: reserved
+ * bit 7: status eop for recv case
+ */
+ unsigned char status;
+
+ /* bit 0: eop
+ * bit 1: read_fixed
+ * bit 2: write fixed
+ * bits 3,4,5,6: Channel (always 0)
+ * bit 7: hardware owned
+ */
+ unsigned char control;
+} __packed;
+
+
+#define SGDMA_STATUS_ERR BIT(0)
+#define SGDMA_STATUS_LENGTH_ERR BIT(1)
+#define SGDMA_STATUS_CRC_ERR BIT(2)
+#define SGDMA_STATUS_TRUNC_ERR BIT(3)
+#define SGDMA_STATUS_PHY_ERR BIT(4)
+#define SGDMA_STATUS_COLL_ERR BIT(5)
+#define SGDMA_STATUS_EOP BIT(7)
+
+#define SGDMA_CONTROL_EOP BIT(0)
+#define SGDMA_CONTROL_RD_FIXED BIT(1)
+#define SGDMA_CONTROL_WR_FIXED BIT(2)
+
+/* Channel is always 0, so just zero initialize it */
+
+#define SGDMA_CONTROL_HW_OWNED BIT(7)
+
+/* SGDMA register space */
+struct sgdma_csr {
+ /* bit 0: error
+ * bit 1: eop
+ * bit 2: descriptor completed
+ * bit 3: chain completed
+ * bit 4: busy
+ * remainder reserved
+ */
+ u32 status;
+ u32 pad1[3];
+
+ /* bit 0: interrupt on error
+ * bit 1: interrupt on eop
+ * bit 2: interrupt after every descriptor
+ * bit 3: interrupt after last descrip in a chain
+ * bit 4: global interrupt enable
+ * bit 5: starts descriptor processing
+ * bit 6: stop core on dma error
+ * bit 7: interrupt on max descriptors
+ * bits 8-15: max descriptors to generate interrupt
+ * bit 16: Software reset
+ * bit 17: clears owned by hardware if 0, does not clear otherwise
+ * bit 18: enables descriptor polling mode
+ * bit 19-26: clocks before polling again
+ * bit 27-30: reserved
+ * bit 31: clear interrupt
+ */
+ u32 control;
+ u32 pad2[3];
+ u32 next_descrip;
+ u32 pad3[3];
+};
+
+
+#define SGDMA_STSREG_ERR BIT(0) /* Error */
+#define SGDMA_STSREG_EOP BIT(1) /* EOP */
+#define SGDMA_STSREG_DESCRIP BIT(2) /* Descriptor completed */
+#define SGDMA_STSREG_CHAIN BIT(3) /* Chain completed */
+#define SGDMA_STSREG_BUSY BIT(4) /* Controller busy */
+
+#define SGDMA_CTRLREG_IOE BIT(0) /* Interrupt on error */
+#define SGDMA_CTRLREG_IOEOP BIT(1) /* Interrupt on EOP */
+#define SGDMA_CTRLREG_IDESCRIP BIT(2) /* Interrupt after every descriptor */
+#define SGDMA_CTRLREG_ILASTD BIT(3) /* Interrupt after last descriptor */
+#define SGDMA_CTRLREG_INTEN BIT(4) /* Global Interrupt enable */
+#define SGDMA_CTRLREG_START BIT(5) /* starts descriptor processing */
+#define SGDMA_CTRLREG_STOPERR BIT(6) /* stop on dma error */
+#define SGDMA_CTRLREG_INTMAX BIT(7) /* Interrupt on max descriptors */
+#define SGDMA_CTRLREG_RESET BIT(16)/* Software reset */
+#define SGDMA_CTRLREG_COBHW BIT(17)/* Clears owned by hardware */
+#define SGDMA_CTRLREG_POLL BIT(18)/* enables descriptor polling mode */
+#define SGDMA_CTRLREG_CLRINT BIT(31)/* Clears interrupt */
+
+#endif /* __ALTERA_SGDMAHW_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
new file mode 100644
index 000000000000..8feeed05de0e
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -0,0 +1,486 @@
+/* Altera Triple-Speed Ethernet MAC driver
+ * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
+ *
+ * Contributors:
+ * Dalon Westergreen
+ * Thomas Chou
+ * Ian Abbott
+ * Yuriy Kozlov
+ * Tobias Klauser
+ * Andriy Smolskyy
+ * Roman Bulgakov
+ * Dmytro Mytarchuk
+ * Matthew Gerlach
+ *
+ * Original driver contributed by SLS.
+ * Major updates contributed by GlobalLogic
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_TSE_H__
+#define __ALTERA_TSE_H__
+
+#define ALTERA_TSE_RESOURCE_NAME "altera_tse"
+
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000
+#define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in
+ * bytes
+ */
+/* Rx FIFO default settings */
+#define ALTERA_TSE_RX_SECTION_EMPTY 16
+#define ALTERA_TSE_RX_SECTION_FULL 0
+#define ALTERA_TSE_RX_ALMOST_EMPTY 8
+#define ALTERA_TSE_RX_ALMOST_FULL 8
+
+/* Tx FIFO default settings */
+#define ALTERA_TSE_TX_SECTION_EMPTY 16
+#define ALTERA_TSE_TX_SECTION_FULL 0
+#define ALTERA_TSE_TX_ALMOST_EMPTY 8
+#define ALTERA_TSE_TX_ALMOST_FULL 3
+
+/* MAC function configuration default settings */
+#define ALTERA_TSE_TX_IPG_LENGTH 12
+
+#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1)
+
+/* MAC Command_Config Register Bit Definitions
+ */
+#define MAC_CMDCFG_TX_ENA BIT(0)
+#define MAC_CMDCFG_RX_ENA BIT(1)
+#define MAC_CMDCFG_XON_GEN BIT(2)
+#define MAC_CMDCFG_ETH_SPEED BIT(3)
+#define MAC_CMDCFG_PROMIS_EN BIT(4)
+#define MAC_CMDCFG_PAD_EN BIT(5)
+#define MAC_CMDCFG_CRC_FWD BIT(6)
+#define MAC_CMDCFG_PAUSE_FWD BIT(7)
+#define MAC_CMDCFG_PAUSE_IGNORE BIT(8)
+#define MAC_CMDCFG_TX_ADDR_INS BIT(9)
+#define MAC_CMDCFG_HD_ENA BIT(10)
+#define MAC_CMDCFG_EXCESS_COL BIT(11)
+#define MAC_CMDCFG_LATE_COL BIT(12)
+#define MAC_CMDCFG_SW_RESET BIT(13)
+#define MAC_CMDCFG_MHASH_SEL BIT(14)
+#define MAC_CMDCFG_LOOP_ENA BIT(15)
+#define MAC_CMDCFG_TX_ADDR_SEL(v) (((v) & 0x7) << 16)
+#define MAC_CMDCFG_MAGIC_ENA BIT(19)
+#define MAC_CMDCFG_SLEEP BIT(20)
+#define MAC_CMDCFG_WAKEUP BIT(21)
+#define MAC_CMDCFG_XOFF_GEN BIT(22)
+#define MAC_CMDCFG_CNTL_FRM_ENA BIT(23)
+#define MAC_CMDCFG_NO_LGTH_CHECK BIT(24)
+#define MAC_CMDCFG_ENA_10 BIT(25)
+#define MAC_CMDCFG_RX_ERR_DISC BIT(26)
+#define MAC_CMDCFG_DISABLE_READ_TIMEOUT BIT(27)
+#define MAC_CMDCFG_CNT_RESET BIT(31)
+
+#define MAC_CMDCFG_TX_ENA_GET(v) GET_BIT_VALUE(v, 0)
+#define MAC_CMDCFG_RX_ENA_GET(v) GET_BIT_VALUE(v, 1)
+#define MAC_CMDCFG_XON_GEN_GET(v) GET_BIT_VALUE(v, 2)
+#define MAC_CMDCFG_ETH_SPEED_GET(v) GET_BIT_VALUE(v, 3)
+#define MAC_CMDCFG_PROMIS_EN_GET(v) GET_BIT_VALUE(v, 4)
+#define MAC_CMDCFG_PAD_EN_GET(v) GET_BIT_VALUE(v, 5)
+#define MAC_CMDCFG_CRC_FWD_GET(v) GET_BIT_VALUE(v, 6)
+#define MAC_CMDCFG_PAUSE_FWD_GET(v) GET_BIT_VALUE(v, 7)
+#define MAC_CMDCFG_PAUSE_IGNORE_GET(v) GET_BIT_VALUE(v, 8)
+#define MAC_CMDCFG_TX_ADDR_INS_GET(v) GET_BIT_VALUE(v, 9)
+#define MAC_CMDCFG_HD_ENA_GET(v) GET_BIT_VALUE(v, 10)
+#define MAC_CMDCFG_EXCESS_COL_GET(v) GET_BIT_VALUE(v, 11)
+#define MAC_CMDCFG_LATE_COL_GET(v) GET_BIT_VALUE(v, 12)
+#define MAC_CMDCFG_SW_RESET_GET(v) GET_BIT_VALUE(v, 13)
+#define MAC_CMDCFG_MHASH_SEL_GET(v) GET_BIT_VALUE(v, 14)
+#define MAC_CMDCFG_LOOP_ENA_GET(v) GET_BIT_VALUE(v, 15)
+#define MAC_CMDCFG_TX_ADDR_SEL_GET(v) (((v) >> 16) & 0x7)
+#define MAC_CMDCFG_MAGIC_ENA_GET(v) GET_BIT_VALUE(v, 19)
+#define MAC_CMDCFG_SLEEP_GET(v) GET_BIT_VALUE(v, 20)
+#define MAC_CMDCFG_WAKEUP_GET(v) GET_BIT_VALUE(v, 21)
+#define MAC_CMDCFG_XOFF_GEN_GET(v) GET_BIT_VALUE(v, 22)
+#define MAC_CMDCFG_CNTL_FRM_ENA_GET(v) GET_BIT_VALUE(v, 23)
+#define MAC_CMDCFG_NO_LGTH_CHECK_GET(v) GET_BIT_VALUE(v, 24)
+#define MAC_CMDCFG_ENA_10_GET(v) GET_BIT_VALUE(v, 25)
+#define MAC_CMDCFG_RX_ERR_DISC_GET(v) GET_BIT_VALUE(v, 26)
+#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27)
+#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31)
+
+/* MDIO registers within MAC register Space
+ */
+struct altera_tse_mdio {
+ u32 control; /* PHY device operation control register */
+ u32 status; /* PHY device operation status register */
+ u32 phy_id1; /* Bits 31:16 of PHY identifier */
+ u32 phy_id2; /* Bits 15:0 of PHY identifier */
+ u32 auto_negotiation_advertisement; /* Auto-negotiation
+ * advertisement
+ * register
+ */
+ u32 remote_partner_base_page_ability;
+
+ u32 reg6;
+ u32 reg7;
+ u32 reg8;
+ u32 reg9;
+ u32 rega;
+ u32 regb;
+ u32 regc;
+ u32 regd;
+ u32 rege;
+ u32 regf;
+ u32 reg10;
+ u32 reg11;
+ u32 reg12;
+ u32 reg13;
+ u32 reg14;
+ u32 reg15;
+ u32 reg16;
+ u32 reg17;
+ u32 reg18;
+ u32 reg19;
+ u32 reg1a;
+ u32 reg1b;
+ u32 reg1c;
+ u32 reg1d;
+ u32 reg1e;
+ u32 reg1f;
+};
+
+/* MAC register Space. Note that some of these registers may or may not be
+ * present depending upon options chosen by the user when the core was
+ * configured and built. Please consult the Altera Triple Speed Ethernet User
+ * Guide for details.
+ */
+struct altera_tse_mac {
+ /* Bits 15:0: MegaCore function revision (0x0800). Bit 31:16: Customer
+ * specific revision
+ */
+ u32 megacore_revision;
+ /* Provides a memory location for user applications to test the device
+ * memory operation.
+ */
+ u32 scratch_pad;
+ /* The host processor uses this register to control and configure the
+ * MAC block
+ */
+ u32 command_config;
+ /* 32-bit primary MAC address word 0 bits 0 to 31 of the primary
+ * MAC address
+ */
+ u32 mac_addr_0;
+ /* 32-bit primary MAC address word 1 bits 32 to 47 of the primary
+ * MAC address
+ */
+ u32 mac_addr_1;
+ /* 14-bit maximum frame length. The MAC receive logic */
+ u32 frm_length;
+ /* The pause quanta is used in each pause frame sent to a remote
+ * Ethernet device, in increments of 512 Ethernet bit times
+ */
+ u32 pause_quanta;
+ /* 12-bit receive FIFO section-empty threshold */
+ u32 rx_section_empty;
+ /* 12-bit receive FIFO section-full threshold */
+ u32 rx_section_full;
+ /* 12-bit transmit FIFO section-empty threshold */
+ u32 tx_section_empty;
+ /* 12-bit transmit FIFO section-full threshold */
+ u32 tx_section_full;
+ /* 12-bit receive FIFO almost-empty threshold */
+ u32 rx_almost_empty;
+ /* 12-bit receive FIFO almost-full threshold */
+ u32 rx_almost_full;
+ /* 12-bit transmit FIFO almost-empty threshold */
+ u32 tx_almost_empty;
+ /* 12-bit transmit FIFO almost-full threshold */
+ u32 tx_almost_full;
+ /* MDIO address of PHY Device 0. Bits 0 to 4 hold a 5-bit PHY address */
+ u32 mdio_phy0_addr;
+ /* MDIO address of PHY Device 1. Bits 0 to 4 hold a 5-bit PHY address */
+ u32 mdio_phy1_addr;
+
+ /* Bit[15:0]—16-bit holdoff quanta */
+ u32 holdoff_quant;
+
+ /* only if 100/1000 BaseX PCS, reserved otherwise */
+ u32 reserved1[5];
+
+ /* Minimum IPG between consecutive transmit frame in terms of bytes */
+ u32 tx_ipg_length;
+
+ /* IEEE 802.3 oEntity Managed Object Support */
+
+ /* The MAC addresses */
+ u32 mac_id_1;
+ u32 mac_id_2;
+
+ /* Number of frames transmitted without error including pause frames */
+ u32 frames_transmitted_ok;
+ /* Number of frames received without error including pause frames */
+ u32 frames_received_ok;
+ /* Number of frames received with a CRC error */
+ u32 frames_check_sequence_errors;
+ /* Frame received with an alignment error */
+ u32 alignment_errors;
+ /* Sum of payload and padding octets of frames transmitted without
+ * error
+ */
+ u32 octets_transmitted_ok;
+ /* Sum of payload and padding octets of frames received without error */
+ u32 octets_received_ok;
+
+ /* IEEE 802.3 oPausedEntity Managed Object Support */
+
+ /* Number of transmitted pause frames */
+ u32 tx_pause_mac_ctrl_frames;
+ /* Number of Received pause frames */
+ u32 rx_pause_mac_ctrl_frames;
+
+ /* IETF MIB (MIB-II) Object Support */
+
+ /* Number of frames received with error */
+ u32 if_in_errors;
+ /* Number of frames transmitted with error */
+ u32 if_out_errors;
+ /* Number of valid received unicast frames */
+ u32 if_in_ucast_pkts;
+ /* Number of valid received multicasts frames (without pause) */
+ u32 if_in_multicast_pkts;
+ /* Number of valid received broadcast frames */
+ u32 if_in_broadcast_pkts;
+ u32 if_out_discards;
+ /* The number of valid unicast frames transmitted */
+ u32 if_out_ucast_pkts;
+ /* The number of valid multicast frames transmitted,
+ * excluding pause frames
+ */
+ u32 if_out_multicast_pkts;
+ u32 if_out_broadcast_pkts;
+
+ /* IETF RMON MIB Object Support */
+
+ /* Counts the number of dropped packets due to internal errors
+ * of the MAC client.
+ */
+ u32 ether_stats_drop_events;
+ /* Total number of bytes received. Good and bad frames. */
+ u32 ether_stats_octets;
+ /* Total number of packets received. Counts good and bad packets. */
+ u32 ether_stats_pkts;
+ /* Number of packets received with less than 64 bytes. */
+ u32 ether_stats_undersize_pkts;
+ /* The number of frames received that are longer than the
+ * value configured in the frm_length register
+ */
+ u32 ether_stats_oversize_pkts;
+ /* Number of received packet with 64 bytes */
+ u32 ether_stats_pkts_64_octets;
+ /* Frames (good and bad) with 65 to 127 bytes */
+ u32 ether_stats_pkts_65to127_octets;
+ /* Frames (good and bad) with 128 to 255 bytes */
+ u32 ether_stats_pkts_128to255_octets;
+ /* Frames (good and bad) with 256 to 511 bytes */
+ u32 ether_stats_pkts_256to511_octets;
+ /* Frames (good and bad) with 512 to 1023 bytes */
+ u32 ether_stats_pkts_512to1023_octets;
+ /* Frames (good and bad) with 1024 to 1518 bytes */
+ u32 ether_stats_pkts_1024to1518_octets;
+
+ /* Any frame length from 1519 to the maximum length configured in the
+ * frm_length register, if it is greater than 1518
+ */
+ u32 ether_stats_pkts_1519tox_octets;
+ /* Too long frames with CRC error */
+ u32 ether_stats_jabbers;
+ /* Too short frames with CRC error */
+ u32 ether_stats_fragments;
+
+ u32 reserved2;
+
+ /* FIFO control register */
+ u32 tx_cmd_stat;
+ u32 rx_cmd_stat;
+
+ /* Extended Statistics Counters */
+ u32 msb_octets_transmitted_ok;
+ u32 msb_octets_received_ok;
+ u32 msb_ether_stats_octets;
+
+ u32 reserved3;
+
+ /* Multicast address resolution table, mapped in the controller address
+ * space
+ */
+ u32 hash_table[64];
+
+ /* Registers 0 to 31 within PHY device 0/1 connected to the MDIO PHY
+ * management interface
+ */
+ struct altera_tse_mdio mdio_phy0;
+ struct altera_tse_mdio mdio_phy1;
+
+ /* 4 Supplemental MAC Addresses */
+ u32 supp_mac_addr_0_0;
+ u32 supp_mac_addr_0_1;
+ u32 supp_mac_addr_1_0;
+ u32 supp_mac_addr_1_1;
+ u32 supp_mac_addr_2_0;
+ u32 supp_mac_addr_2_1;
+ u32 supp_mac_addr_3_0;
+ u32 supp_mac_addr_3_1;
+
+ u32 reserved4[8];
+
+ /* IEEE 1588v2 Feature */
+ u32 tx_period;
+ u32 tx_adjust_fns;
+ u32 tx_adjust_ns;
+ u32 rx_period;
+ u32 rx_adjust_fns;
+ u32 rx_adjust_ns;
+
+ u32 reserved5[42];
+};
+
+/* Transmit and Receive Command Registers Bit Definitions
+ */
+#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
+#define ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 BIT(18)
+#define ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16 BIT(25)
+
+/* Wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct tse_buffer {
+ struct list_head lh;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u32 len;
+ int mapped_as_page;
+};
+
+struct altera_tse_private;
+
+#define ALTERA_DTYPE_SGDMA 1
+#define ALTERA_DTYPE_MSGDMA 2
+
+/* standard DMA interface for SGDMA and MSGDMA */
+struct altera_dmaops {
+ int altera_dtype;
+ int dmamask;
+ void (*reset_dma)(struct altera_tse_private *);
+ void (*enable_txirq)(struct altera_tse_private *);
+ void (*enable_rxirq)(struct altera_tse_private *);
+ void (*disable_txirq)(struct altera_tse_private *);
+ void (*disable_rxirq)(struct altera_tse_private *);
+ void (*clear_txirq)(struct altera_tse_private *);
+ void (*clear_rxirq)(struct altera_tse_private *);
+ int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
+ u32 (*tx_completions)(struct altera_tse_private *);
+ int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
+ u32 (*get_rx_status)(struct altera_tse_private *);
+ int (*init_dma)(struct altera_tse_private *);
+ void (*uninit_dma)(struct altera_tse_private *);
+};
+
+/* This structure is private to each device.
+ */
+struct altera_tse_private {
+ struct net_device *dev;
+ struct device *device;
+ struct napi_struct napi;
+
+ /* MAC address space */
+ struct altera_tse_mac __iomem *mac_dev;
+
+ /* TSE Revision */
+ u32 revision;
+
+ /* mSGDMA Rx Dispatcher address space */
+ void __iomem *rx_dma_csr;
+ void __iomem *rx_dma_desc;
+ void __iomem *rx_dma_resp;
+
+ /* mSGDMA Tx Dispatcher address space */
+ void __iomem *tx_dma_csr;
+ void __iomem *tx_dma_desc;
+
+ /* Rx buffers queue */
+ struct tse_buffer *rx_ring;
+ u32 rx_cons;
+ u32 rx_prod;
+ u32 rx_ring_size;
+ u32 rx_dma_buf_sz;
+
+ /* Tx ring buffer */
+ struct tse_buffer *tx_ring;
+ u32 tx_prod;
+ u32 tx_cons;
+ u32 tx_ring_size;
+
+ /* Interrupts */
+ u32 tx_irq;
+ u32 rx_irq;
+
+ /* RX/TX MAC FIFO configs */
+ u32 tx_fifo_depth;
+ u32 rx_fifo_depth;
+ u32 max_mtu;
+
+ /* Hash filter settings */
+ u32 hash_filter;
+ u32 added_unicast;
+
+ /* Descriptor memory info for managing SGDMA */
+ u32 txdescmem;
+ u32 rxdescmem;
+ dma_addr_t rxdescmem_busaddr;
+ dma_addr_t txdescmem_busaddr;
+ u32 txctrlreg;
+ u32 rxctrlreg;
+ dma_addr_t rxdescphys;
+ dma_addr_t txdescphys;
+
+ struct list_head txlisthd;
+ struct list_head rxlisthd;
+
+ /* MAC command_config register protection */
+ spinlock_t mac_cfg_lock;
+ /* Tx path protection */
+ spinlock_t tx_lock;
+ /* Rx DMA & interrupt control protection */
+ spinlock_t rxdma_irq_lock;
+
+ /* PHY */
+ int phy_addr; /* PHY's MDIO address, -1 for autodetection */
+ phy_interface_t phy_iface;
+ struct mii_bus *mdio;
+ struct phy_device *phydev;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+
+ /* ethtool msglvl option */
+ u32 msg_enable;
+
+ struct altera_dmaops *dmaops;
+};
+
+/* Function prototypes
+ */
+void altera_tse_set_ethtool_ops(struct net_device *);
+
+#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
new file mode 100644
index 000000000000..319ca74f5e74
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -0,0 +1,235 @@
+/* Ethtool support for Altera Triple-Speed Ethernet MAC driver
+ * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
+ *
+ * Contributors:
+ * Dalon Westergreen
+ * Thomas Chou
+ * Ian Abbott
+ * Yuriy Kozlov
+ * Tobias Klauser
+ * Andriy Smolskyy
+ * Roman Bulgakov
+ * Dmytro Mytarchuk
+ *
+ * Original driver contributed by SLS.
+ * Major updates contributed by GlobalLogic
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#include "altera_tse.h"
+
+#define TSE_STATS_LEN 31
+#define TSE_NUM_REGS 128
+
+static char const stat_gstrings[][ETH_GSTRING_LEN] = {
+ "tx_packets",
+ "rx_packets",
+ "rx_crc_errors",
+ "rx_align_errors",
+ "tx_bytes",
+ "rx_bytes",
+ "tx_pause",
+ "rx_pause",
+ "rx_errors",
+ "tx_errors",
+ "rx_unicast",
+ "rx_multicast",
+ "rx_broadcast",
+ "tx_discards",
+ "tx_unicast",
+ "tx_multicast",
+ "tx_broadcast",
+ "ether_drops",
+ "rx_total_bytes",
+ "rx_total_packets",
+ "rx_undersize",
+ "rx_oversize",
+ "rx_64_bytes",
+ "rx_65_127_bytes",
+ "rx_128_255_bytes",
+ "rx_256_511_bytes",
+ "rx_512_1023_bytes",
+ "rx_1024_1518_bytes",
+ "rx_gte_1519_bytes",
+ "rx_jabbers",
+ "rx_runts",
+};
+
+static void tse_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ u32 rev = ioread32(&priv->mac_dev->megacore_revision);
+
+ strcpy(info->driver, "Altera TSE MAC IP Driver");
+ strcpy(info->version, "v8.0");
+ snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
+ rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
+ sprintf(info->bus_info, "platform");
+}
+
+/* Fill in a buffer with the strings which correspond to the
+ * stats
+ */
+static void tse_gstrings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ memcpy(buf, stat_gstrings, TSE_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 *buf)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct altera_tse_mac *mac = priv->mac_dev;
+ u64 ext;
+
+ buf[0] = ioread32(&mac->frames_transmitted_ok);
+ buf[1] = ioread32(&mac->frames_received_ok);
+ buf[2] = ioread32(&mac->frames_check_sequence_errors);
+ buf[3] = ioread32(&mac->alignment_errors);
+
+ /* Extended aOctetsTransmittedOK counter */
+ ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
+ ext |= ioread32(&mac->octets_transmitted_ok);
+ buf[4] = ext;
+
+ /* Extended aOctetsReceivedOK counter */
+ ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
+ ext |= ioread32(&mac->octets_received_ok);
+ buf[5] = ext;
+
+ buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
+ buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
+ buf[8] = ioread32(&mac->if_in_errors);
+ buf[9] = ioread32(&mac->if_out_errors);
+ buf[10] = ioread32(&mac->if_in_ucast_pkts);
+ buf[11] = ioread32(&mac->if_in_multicast_pkts);
+ buf[12] = ioread32(&mac->if_in_broadcast_pkts);
+ buf[13] = ioread32(&mac->if_out_discards);
+ buf[14] = ioread32(&mac->if_out_ucast_pkts);
+ buf[15] = ioread32(&mac->if_out_multicast_pkts);
+ buf[16] = ioread32(&mac->if_out_broadcast_pkts);
+ buf[17] = ioread32(&mac->ether_stats_drop_events);
+
+ /* Extended etherStatsOctets counter */
+ ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
+ ext |= ioread32(&mac->ether_stats_octets);
+ buf[18] = ext;
+
+ buf[19] = ioread32(&mac->ether_stats_pkts);
+ buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
+ buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
+ buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
+ buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
+ buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
+ buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
+ buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
+ buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
+ buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
+ buf[29] = ioread32(&mac->ether_stats_jabbers);
+ buf[30] = ioread32(&mac->ether_stats_fragments);
+}
+
+static int tse_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return TSE_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 tse_get_msglevel(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void tse_set_msglevel(struct net_device *dev, uint32_t data)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ priv->msg_enable = data;
+}
+
+static int tse_reglen(struct net_device *dev)
+{
+ return TSE_NUM_REGS * sizeof(u32);
+}
+
+static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *regbuf)
+{
+ int i;
+ struct altera_tse_private *priv = netdev_priv(dev);
+ u32 *tse_mac_regs = (u32 *)priv->mac_dev;
+ u32 *buf = regbuf;
+
+ /* Set version to a known value, so ethtool knows
+ * how to do any special formatting of this data.
+ * This version number will need to change if and
+ * when this register table is changed.
+ */
+
+ regs->version = 1;
+
+ for (i = 0; i < TSE_NUM_REGS; i++)
+ buf[i] = ioread32(&tse_mac_regs[i]);
+}
+
+static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (phydev == NULL)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (phydev == NULL)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static const struct ethtool_ops tse_ethtool_ops = {
+ .get_drvinfo = tse_get_drvinfo,
+ .get_regs_len = tse_reglen,
+ .get_regs = tse_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_settings = tse_get_settings,
+ .set_settings = tse_set_settings,
+ .get_strings = tse_gstrings,
+ .get_sset_count = tse_sset_count,
+ .get_ethtool_stats = tse_fill_stats,
+ .get_msglevel = tse_get_msglevel,
+ .set_msglevel = tse_set_msglevel,
+};
+
+void altera_tse_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
new file mode 100644
index 000000000000..c70a29e0b9f7
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -0,0 +1,1543 @@
+/* Altera Triple-Speed Ethernet MAC driver
+ * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
+ *
+ * Contributors:
+ * Dalon Westergreen
+ * Thomas Chou
+ * Ian Abbott
+ * Yuriy Kozlov
+ * Tobias Klauser
+ * Andriy Smolskyy
+ * Roman Bulgakov
+ * Dmytro Mytarchuk
+ * Matthew Gerlach
+ *
+ * Original driver contributed by SLS.
+ * Major updates contributed by GlobalLogic
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <asm/cacheflush.h>
+
+#include "altera_utils.h"
+#include "altera_tse.h"
+#include "altera_sgdma.h"
+#include "altera_msgdma.h"
+
+static atomic_t instance_count = ATOMIC_INIT(~0);
+/* Module parameters */
+static int debug = -1;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
+
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN);
+
+#define RX_DESCRIPTORS 64
+static int dma_rx_num = RX_DESCRIPTORS;
+module_param(dma_rx_num, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
+
+#define TX_DESCRIPTORS 64
+static int dma_tx_num = TX_DESCRIPTORS;
+module_param(dma_tx_num, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
+
+
+#define POLL_PHY (-1)
+
+/* Make sure DMA buffer size is larger than the max frame size
+ * plus some alignment offset and a VLAN header. If the max frame size is
+ * 1518, a VLAN header would be additional 4 bytes and additional
+ * headroom for alignment is 2 bytes, 2048 is just fine.
+ */
+#define ALTERA_RXDMABUFFER_SIZE 2048
+
+/* Allow network stack to resume queueing packets after we've
+ * finished transmitting at least 1/4 of the packets in the queue.
+ */
+#define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
+
+#define TXQUEUESTOP_THRESHHOLD 2
+
+static struct of_device_id altera_tse_ids[];
+
+static inline u32 tse_tx_avail(struct altera_tse_private *priv)
+{
+ return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
+}
+
+/* MDIO specific functions
+ */
+static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
+ unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+ u32 data;
+
+ /* set MDIO address */
+ iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+
+ /* get the data */
+ data = ioread32(&mdio_regs[regnum]) & 0xffff;
+ return data;
+}
+
+static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
+ unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+
+ /* set MDIO address */
+ iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+
+ /* write the data */
+ iowrite32((u32) value, &mdio_regs[regnum]);
+ return 0;
+}
+
+static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ int ret;
+ int i;
+ struct device_node *mdio_node = NULL;
+ struct mii_bus *mdio = NULL;
+ struct device_node *child_node = NULL;
+
+ for_each_child_of_node(priv->device->of_node, child_node) {
+ if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
+ mdio_node = child_node;
+ break;
+ }
+ }
+
+ if (mdio_node) {
+ netdev_dbg(dev, "FOUND MDIO subnode\n");
+ } else {
+ netdev_dbg(dev, "NO MDIO subnode\n");
+ return 0;
+ }
+
+ mdio = mdiobus_alloc();
+ if (mdio == NULL) {
+ netdev_err(dev, "Error allocating MDIO bus\n");
+ return -ENOMEM;
+ }
+
+ mdio->name = ALTERA_TSE_RESOURCE_NAME;
+ mdio->read = &altera_tse_mdio_read;
+ mdio->write = &altera_tse_mdio_write;
+ snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
+
+ mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+ if (mdio->irq == NULL) {
+ ret = -ENOMEM;
+ goto out_free_mdio;
+ }
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ mdio->irq[i] = PHY_POLL;
+
+ mdio->priv = priv->mac_dev;
+ mdio->parent = priv->device;
+
+ ret = of_mdiobus_register(mdio, mdio_node);
+ if (ret != 0) {
+ netdev_err(dev, "Cannot register MDIO bus %s\n",
+ mdio->id);
+ goto out_free_mdio_irq;
+ }
+
+ if (netif_msg_drv(priv))
+ netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
+
+ priv->mdio = mdio;
+ return 0;
+out_free_mdio_irq:
+ kfree(mdio->irq);
+out_free_mdio:
+ mdiobus_free(mdio);
+ mdio = NULL;
+ return ret;
+}
+
+static void altera_tse_mdio_destroy(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ if (priv->mdio == NULL)
+ return;
+
+ if (netif_msg_drv(priv))
+ netdev_info(dev, "MDIO bus %s: removed\n",
+ priv->mdio->id);
+
+ mdiobus_unregister(priv->mdio);
+ kfree(priv->mdio->irq);
+ mdiobus_free(priv->mdio);
+ priv->mdio = NULL;
+}
+
+static int tse_init_rx_buffer(struct altera_tse_private *priv,
+ struct tse_buffer *rxbuffer, int len)
+{
+ rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
+ if (!rxbuffer->skb)
+ return -ENOMEM;
+
+ rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
+ len,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
+ netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
+ dev_kfree_skb_any(rxbuffer->skb);
+ return -EINVAL;
+ }
+ rxbuffer->len = len;
+ return 0;
+}
+
+static void tse_free_rx_buffer(struct altera_tse_private *priv,
+ struct tse_buffer *rxbuffer)
+{
+ struct sk_buff *skb = rxbuffer->skb;
+ dma_addr_t dma_addr = rxbuffer->dma_addr;
+
+ if (skb != NULL) {
+ if (dma_addr)
+ dma_unmap_single(priv->device, dma_addr,
+ rxbuffer->len,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ rxbuffer->skb = NULL;
+ rxbuffer->dma_addr = 0;
+ }
+}
+
+/* Unmap and free Tx buffer resources
+ */
+static void tse_free_tx_buffer(struct altera_tse_private *priv,
+ struct tse_buffer *buffer)
+{
+ if (buffer->dma_addr) {
+ if (buffer->mapped_as_page)
+ dma_unmap_page(priv->device, buffer->dma_addr,
+ buffer->len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device, buffer->dma_addr,
+ buffer->len, DMA_TO_DEVICE);
+ buffer->dma_addr = 0;
+ }
+ if (buffer->skb) {
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+}
+
+static int alloc_init_skbufs(struct altera_tse_private *priv)
+{
+ unsigned int rx_descs = priv->rx_ring_size;
+ unsigned int tx_descs = priv->tx_ring_size;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Create Rx ring buffer */
+ priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
+ GFP_KERNEL);
+ if (!priv->rx_ring)
+ goto err_rx_ring;
+
+ /* Create Tx ring buffer */
+ priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
+ GFP_KERNEL);
+ if (!priv->tx_ring)
+ goto err_tx_ring;
+
+ priv->tx_cons = 0;
+ priv->tx_prod = 0;
+
+ /* Init Rx ring */
+ for (i = 0; i < rx_descs; i++) {
+ ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
+ priv->rx_dma_buf_sz);
+ if (ret)
+ goto err_init_rx_buffers;
+ }
+
+ priv->rx_cons = 0;
+ priv->rx_prod = 0;
+
+ return 0;
+err_init_rx_buffers:
+ while (--i >= 0)
+ tse_free_rx_buffer(priv, &priv->rx_ring[i]);
+ kfree(priv->tx_ring);
+err_tx_ring:
+ kfree(priv->rx_ring);
+err_rx_ring:
+ return ret;
+}
+
+static void free_skbufs(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned int rx_descs = priv->rx_ring_size;
+ unsigned int tx_descs = priv->tx_ring_size;
+ int i;
+
+ /* Release the DMA TX/RX socket buffers */
+ for (i = 0; i < rx_descs; i++)
+ tse_free_rx_buffer(priv, &priv->rx_ring[i]);
+ for (i = 0; i < tx_descs; i++)
+ tse_free_tx_buffer(priv, &priv->tx_ring[i]);
+
+
+ kfree(priv->tx_ring);
+}
+
+/* Reallocate the skb for the reception process
+ */
+static inline void tse_rx_refill(struct altera_tse_private *priv)
+{
+ unsigned int rxsize = priv->rx_ring_size;
+ unsigned int entry;
+ int ret;
+
+ for (; priv->rx_cons - priv->rx_prod > 0;
+ priv->rx_prod++) {
+ entry = priv->rx_prod % rxsize;
+ if (likely(priv->rx_ring[entry].skb == NULL)) {
+ ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
+ priv->rx_dma_buf_sz);
+ if (unlikely(ret != 0))
+ break;
+ priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
+ }
+ }
+}
+
+/* Pull out the VLAN tag and fix up the packet
+ */
+static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ethhdr *eth_hdr;
+ u16 vid;
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ !__vlan_get_tag(skb, &vid)) {
+ eth_hdr = (struct ethhdr *)skb->data;
+ memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
+/* Receive a packet: retrieve and pass over to upper levels
+ */
+static int tse_rx(struct altera_tse_private *priv, int limit)
+{
+ unsigned int count = 0;
+ unsigned int next_entry;
+ struct sk_buff *skb;
+ unsigned int entry = priv->rx_cons % priv->rx_ring_size;
+ u32 rxstatus;
+ u16 pktlength;
+ u16 pktstatus;
+
+ while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
+ pktstatus = rxstatus >> 16;
+ pktlength = rxstatus & 0xffff;
+
+ if ((pktstatus & 0xFF) || (pktlength == 0))
+ netdev_err(priv->dev,
+ "RCV pktstatus %08X pktlength %08X\n",
+ pktstatus, pktlength);
+
+ count++;
+ next_entry = (++priv->rx_cons) % priv->rx_ring_size;
+
+ skb = priv->rx_ring[entry].skb;
+ if (unlikely(!skb)) {
+ netdev_err(priv->dev,
+ "%s: Inconsistent Rx descriptor chain\n",
+ __func__);
+ priv->dev->stats.rx_dropped++;
+ break;
+ }
+ priv->rx_ring[entry].skb = NULL;
+
+ skb_put(skb, pktlength);
+
+ /* make cache consistent with receive packet buffer */
+ dma_sync_single_for_cpu(priv->device,
+ priv->rx_ring[entry].dma_addr,
+ priv->rx_ring[entry].len,
+ DMA_FROM_DEVICE);
+
+ dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
+ priv->rx_ring[entry].len, DMA_FROM_DEVICE);
+
+ if (netif_msg_pktdata(priv)) {
+ netdev_info(priv->dev, "frame received %d bytes\n",
+ pktlength);
+ print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
+ 16, 1, skb->data, pktlength, true);
+ }
+
+ tse_rx_vlan(priv->dev, skb);
+
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ skb_checksum_none_assert(skb);
+
+ napi_gro_receive(&priv->napi, skb);
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += pktlength;
+
+ entry = next_entry;
+ }
+
+ tse_rx_refill(priv);
+ return count;
+}
+
+/* Reclaim resources after transmission completes
+ */
+static int tse_tx_complete(struct altera_tse_private *priv)
+{
+ unsigned int txsize = priv->tx_ring_size;
+ u32 ready;
+ unsigned int entry;
+ struct tse_buffer *tx_buff;
+ int txcomplete = 0;
+
+ spin_lock(&priv->tx_lock);
+
+ ready = priv->dmaops->tx_completions(priv);
+
+ /* Free sent buffers */
+ while (ready && (priv->tx_cons != priv->tx_prod)) {
+ entry = priv->tx_cons % txsize;
+ tx_buff = &priv->tx_ring[entry];
+
+ if (netif_msg_tx_done(priv))
+ netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
+ __func__, priv->tx_prod, priv->tx_cons);
+
+ if (likely(tx_buff->skb))
+ priv->dev->stats.tx_packets++;
+
+ tse_free_tx_buffer(priv, tx_buff);
+ priv->tx_cons++;
+
+ txcomplete++;
+ ready--;
+ }
+
+ if (unlikely(netif_queue_stopped(priv->dev) &&
+ tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
+ netif_tx_lock(priv->dev);
+ if (netif_queue_stopped(priv->dev) &&
+ tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
+ if (netif_msg_tx_done(priv))
+ netdev_dbg(priv->dev, "%s: restart transmit\n",
+ __func__);
+ netif_wake_queue(priv->dev);
+ }
+ netif_tx_unlock(priv->dev);
+ }
+
+ spin_unlock(&priv->tx_lock);
+ return txcomplete;
+}
+
+/* NAPI polling function
+ */
+static int tse_poll(struct napi_struct *napi, int budget)
+{
+ struct altera_tse_private *priv =
+ container_of(napi, struct altera_tse_private, napi);
+ int rxcomplete = 0;
+ int txcomplete = 0;
+ unsigned long int flags;
+
+ txcomplete = tse_tx_complete(priv);
+
+ rxcomplete = tse_rx(priv, budget);
+
+ if (rxcomplete >= budget || txcomplete > 0)
+ return rxcomplete;
+
+ napi_gro_flush(napi, false);
+ __napi_complete(napi);
+
+ netdev_dbg(priv->dev,
+ "NAPI Complete, did %d packets with budget %d\n",
+ txcomplete+rxcomplete, budget);
+
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ priv->dmaops->enable_rxirq(priv);
+ priv->dmaops->enable_txirq(priv);
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+ return rxcomplete + txcomplete;
+}
+
+/* DMA TX & RX FIFO interrupt routing
+ */
+static irqreturn_t altera_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct altera_tse_private *priv;
+ unsigned long int flags;
+
+
+ if (unlikely(!dev)) {
+ pr_err("%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ priv = netdev_priv(dev);
+
+ /* turn off desc irqs and enable napi rx */
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ priv->dmaops->disable_rxirq(priv);
+ priv->dmaops->disable_txirq(priv);
+ __napi_schedule(&priv->napi);
+ }
+
+ /* reset IRQs */
+ priv->dmaops->clear_rxirq(priv);
+ priv->dmaops->clear_txirq(priv);
+
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/* Transmit a packet (called by the kernel). Dispatches
+ * either the SGDMA method for transmitting or the
+ * MSGDMA method, assumes no scatter/gather support,
+ * implying an assumption that there's only one
+ * physically contiguous fragment starting at
+ * skb->data, for length of skb_headlen(skb).
+ */
+static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned int txsize = priv->tx_ring_size;
+ unsigned int entry;
+ struct tse_buffer *buffer = NULL;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int nopaged_len = skb_headlen(skb);
+ enum netdev_tx ret = NETDEV_TX_OK;
+ dma_addr_t dma_addr;
+ int txcomplete = 0;
+
+ spin_lock_bh(&priv->tx_lock);
+
+ if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ /* This is a hard error, log it. */
+ netdev_err(priv->dev,
+ "%s: Tx list full when queue awake\n",
+ __func__);
+ }
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* Map the first skb fragment */
+ entry = priv->tx_prod % txsize;
+ buffer = &priv->tx_ring[entry];
+
+ dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, dma_addr)) {
+ netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ buffer->skb = skb;
+ buffer->dma_addr = dma_addr;
+ buffer->len = nopaged_len;
+
+ /* Push data out of the cache hierarchy into main memory */
+ dma_sync_single_for_device(priv->device, buffer->dma_addr,
+ buffer->len, DMA_TO_DEVICE);
+
+ txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+
+ skb_tx_timestamp(skb);
+
+ priv->tx_prod++;
+ dev->stats.tx_bytes += skb->len;
+
+ if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
+ if (netif_msg_hw(priv))
+ netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
+ __func__);
+ netif_stop_queue(dev);
+ }
+
+out:
+ spin_unlock_bh(&priv->tx_lock);
+
+ return ret;
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the phydev structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+static void altera_tse_adjust_link(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ int new_state = 0;
+
+ /* only change config if there is a link */
+ spin_lock(&priv->mac_cfg_lock);
+ if (phydev->link) {
+ /* Read old config */
+ u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
+
+ /* Check duplex */
+ if (phydev->duplex != priv->oldduplex) {
+ new_state = 1;
+ if (!(phydev->duplex))
+ cfg_reg |= MAC_CMDCFG_HD_ENA;
+ else
+ cfg_reg &= ~MAC_CMDCFG_HD_ENA;
+
+ netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
+ dev->name, phydev->duplex);
+
+ priv->oldduplex = phydev->duplex;
+ }
+
+ /* Check speed */
+ if (phydev->speed != priv->oldspeed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case 1000:
+ cfg_reg |= MAC_CMDCFG_ETH_SPEED;
+ cfg_reg &= ~MAC_CMDCFG_ENA_10;
+ break;
+ case 100:
+ cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
+ cfg_reg &= ~MAC_CMDCFG_ENA_10;
+ break;
+ case 10:
+ cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
+ cfg_reg |= MAC_CMDCFG_ENA_10;
+ break;
+ default:
+ if (netif_msg_link(priv))
+ netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
+ phydev->speed);
+ break;
+ }
+ priv->oldspeed = phydev->speed;
+ }
+ iowrite32(cfg_reg, &priv->mac_dev->command_config);
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+
+ spin_unlock(&priv->mac_cfg_lock);
+}
+static struct phy_device *connect_local_phy(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ char phy_id_fmt[MII_BUS_ID_SIZE + 3];
+ int ret;
+
+ if (priv->phy_addr != POLL_PHY) {
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
+ priv->mdio->id, priv->phy_addr);
+
+ netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
+
+ phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
+ priv->phy_iface);
+ if (IS_ERR(phydev))
+ netdev_err(dev, "Could not attach to PHY\n");
+
+ } else {
+ phydev = phy_find_first(priv->mdio);
+ if (phydev == NULL) {
+ netdev_err(dev, "No PHY found\n");
+ return phydev;
+ }
+
+ ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
+ priv->phy_iface);
+ if (ret != 0) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ phydev = NULL;
+ }
+ }
+ return phydev;
+}
+
+/* Initialize driver's PHY state, and attach to the PHY
+ */
+static int init_phy(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct phy_device *phydev;
+ struct device_node *phynode;
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
+
+ if (!phynode) {
+ netdev_dbg(dev, "no phy-handle found\n");
+ if (!priv->mdio) {
+ netdev_err(dev,
+ "No phy-handle nor local mdio specified\n");
+ return -ENODEV;
+ }
+ phydev = connect_local_phy(dev);
+ } else {
+ netdev_dbg(dev, "phy-handle found\n");
+ phydev = of_phy_connect(dev, phynode,
+ &altera_tse_adjust_link, 0, priv->phy_iface);
+ }
+
+ if (!phydev) {
+ netdev_err(dev, "Could not find the PHY\n");
+ return -ENODEV;
+ }
+
+ /* Stop Advertising 1000BASE Capability if interface is not GMII
+ * Note: Checkpatch throws CHECKs for the camel case defines below,
+ * it's ok to ignore.
+ */
+ if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
+ (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
+ phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ /* Broken HW is sometimes missing the pull-up resistor on the
+ * MDIO line, which results in reads to non-existent devices returning
+ * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
+ * device as well.
+ * Note: phydev->phy_id is the result of reading the UID PHY registers.
+ */
+ if (phydev->phy_id == 0) {
+ netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
+ phy_disconnect(phydev);
+ return -ENODEV;
+ }
+
+ netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
+ phydev->addr, phydev->phy_id, phydev->link);
+
+ priv->phydev = phydev;
+ return 0;
+}
+
+static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
+{
+ struct altera_tse_mac *mac = priv->mac_dev;
+ u32 msb;
+ u32 lsb;
+
+ msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
+
+ /* Set primary MAC address */
+ iowrite32(msb, &mac->mac_addr_0);
+ iowrite32(lsb, &mac->mac_addr_1);
+}
+
+/* MAC software reset.
+ * When reset is triggered, the MAC function completes the current
+ * transmission or reception, and subsequently disables the transmit and
+ * receive logic, flushes the receive FIFO buffer, and resets the statistics
+ * counters.
+ */
+static int reset_mac(struct altera_tse_private *priv)
+{
+ void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
+ int counter;
+ u32 dat;
+
+ dat = ioread32(cmd_cfg_reg);
+ dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
+ dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
+ iowrite32(dat, cmd_cfg_reg);
+
+ counter = 0;
+ while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
+ if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+ break;
+ udelay(1);
+ }
+
+ if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
+ dat = ioread32(cmd_cfg_reg);
+ dat &= ~MAC_CMDCFG_SW_RESET;
+ iowrite32(dat, cmd_cfg_reg);
+ return -1;
+ }
+ return 0;
+}
+
+/* Initialize MAC core registers
+*/
+static int init_mac(struct altera_tse_private *priv)
+{
+ struct altera_tse_mac *mac = priv->mac_dev;
+ unsigned int cmd = 0;
+ u32 frm_length;
+
+ /* Setup Rx FIFO */
+ iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+ &mac->rx_section_empty);
+ iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
+ iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
+ iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+
+ /* Setup Tx FIFO */
+ iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+ &mac->tx_section_empty);
+ iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
+ iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
+ iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+
+ /* MAC Address Configuration */
+ tse_update_mac_addr(priv, priv->dev->dev_addr);
+
+ /* MAC Function Configuration */
+ frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
+ iowrite32(frm_length, &mac->frm_length);
+ iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+
+ /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
+ * start address
+ */
+ tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+ tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+ ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+
+ /* Set the MAC options */
+ cmd = ioread32(&mac->command_config);
+ cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */
+ cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
+ cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
+ * with CRC errors
+ */
+ cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
+ cmd &= ~MAC_CMDCFG_TX_ENA;
+ cmd &= ~MAC_CMDCFG_RX_ENA;
+ iowrite32(cmd, &mac->command_config);
+
+ if (netif_msg_hw(priv))
+ dev_dbg(priv->device,
+ "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
+
+ return 0;
+}
+
+/* Start/stop MAC transmission logic
+ */
+static void tse_set_mac(struct altera_tse_private *priv, bool enable)
+{
+ struct altera_tse_mac *mac = priv->mac_dev;
+ u32 value = ioread32(&mac->command_config);
+
+ if (enable)
+ value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
+ else
+ value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
+
+ iowrite32(value, &mac->command_config);
+}
+
+/* Change the MTU
+ */
+static int tse_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned int max_mtu = priv->max_mtu;
+ unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN;
+
+ if (netif_running(dev)) {
+ netdev_err(dev, "must be stopped to change its MTU\n");
+ return -EBUSY;
+ }
+
+ if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) {
+ netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu);
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static void altera_tse_set_mcfilter(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct altera_tse_mac *mac = priv->mac_dev;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ /* clear the hash filter */
+ for (i = 0; i < 64; i++)
+ iowrite32(0, &(mac->hash_table[i]));
+
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int hash = 0;
+ int mac_octet;
+
+ for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
+ unsigned char xor_bit = 0;
+ unsigned char octet = ha->addr[mac_octet];
+ unsigned int bitshift;
+
+ for (bitshift = 0; bitshift < 8; bitshift++)
+ xor_bit ^= ((octet >> bitshift) & 0x01);
+
+ hash = (hash << 1) | xor_bit;
+ }
+ iowrite32(1, &(mac->hash_table[hash]));
+ }
+}
+
+
+static void altera_tse_set_mcfilterall(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct altera_tse_mac *mac = priv->mac_dev;
+ int i;
+
+ /* set the hash filter */
+ for (i = 0; i < 64; i++)
+ iowrite32(1, &(mac->hash_table[i]));
+}
+
+/* Set or clear the multicast filter for this adaptor
+ */
+static void tse_set_rx_mode_hashfilter(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct altera_tse_mac *mac = priv->mac_dev;
+
+ spin_lock(&priv->mac_cfg_lock);
+
+ if (dev->flags & IFF_PROMISC)
+ tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+
+ if (dev->flags & IFF_ALLMULTI)
+ altera_tse_set_mcfilterall(dev);
+ else
+ altera_tse_set_mcfilter(dev);
+
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+/* Set or clear the multicast filter for this adaptor
+ */
+static void tse_set_rx_mode(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct altera_tse_mac *mac = priv->mac_dev;
+
+ spin_lock(&priv->mac_cfg_lock);
+
+ if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
+ !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
+ tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+ else
+ tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+/* Open and initialize the interface
+ */
+static int tse_open(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ int ret = 0;
+ int i;
+ unsigned long int flags;
+
+ /* Reset and configure TSE MAC and probe associated PHY */
+ ret = priv->dmaops->init_dma(priv);
+ if (ret != 0) {
+ netdev_err(dev, "Cannot initialize DMA\n");
+ goto phy_error;
+ }
+
+ if (netif_msg_ifup(priv))
+ netdev_warn(dev, "device MAC address %pM\n",
+ dev->dev_addr);
+
+ if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
+ netdev_warn(dev, "TSE revision %x\n", priv->revision);
+
+ spin_lock(&priv->mac_cfg_lock);
+ ret = reset_mac(priv);
+ if (ret)
+ netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
+
+ ret = init_mac(priv);
+ spin_unlock(&priv->mac_cfg_lock);
+ if (ret) {
+ netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
+ goto alloc_skbuf_error;
+ }
+
+ priv->dmaops->reset_dma(priv);
+
+ /* Create and initialize the TX/RX descriptors chains. */
+ priv->rx_ring_size = dma_rx_num;
+ priv->tx_ring_size = dma_tx_num;
+ ret = alloc_init_skbufs(priv);
+ if (ret) {
+ netdev_err(dev, "DMA descriptors initialization failed\n");
+ goto alloc_skbuf_error;
+ }
+
+
+ /* Register RX interrupt */
+ ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
+ dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "Unable to register RX interrupt %d\n",
+ priv->rx_irq);
+ goto init_error;
+ }
+
+ /* Register TX interrupt */
+ ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
+ dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "Unable to register TX interrupt %d\n",
+ priv->tx_irq);
+ goto tx_request_irq_error;
+ }
+
+ /* Enable DMA interrupts */
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ priv->dmaops->enable_rxirq(priv);
+ priv->dmaops->enable_txirq(priv);
+
+ /* Setup RX descriptor chain */
+ for (i = 0; i < priv->rx_ring_size; i++)
+ priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
+
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+
+ /* Start MAC Rx/Tx */
+ spin_lock(&priv->mac_cfg_lock);
+ tse_set_mac(priv, true);
+ spin_unlock(&priv->mac_cfg_lock);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+
+tx_request_irq_error:
+ free_irq(priv->rx_irq, dev);
+init_error:
+ free_skbufs(dev);
+alloc_skbuf_error:
+ if (priv->phydev) {
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+phy_error:
+ return ret;
+}
+
+/* Stop TSE MAC interface and put the device in an inactive state
+ */
+static int tse_shutdown(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ int ret;
+ unsigned long int flags;
+
+ /* Stop and disconnect the PHY */
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+
+ /* Disable DMA interrupts */
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ priv->dmaops->disable_rxirq(priv);
+ priv->dmaops->disable_txirq(priv);
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+
+ /* Free the IRQ lines */
+ free_irq(priv->rx_irq, dev);
+ free_irq(priv->tx_irq, dev);
+
+ /* disable and reset the MAC, empties fifo */
+ spin_lock(&priv->mac_cfg_lock);
+ spin_lock(&priv->tx_lock);
+
+ ret = reset_mac(priv);
+ if (ret)
+ netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
+ priv->dmaops->reset_dma(priv);
+ free_skbufs(dev);
+
+ spin_unlock(&priv->tx_lock);
+ spin_unlock(&priv->mac_cfg_lock);
+
+ priv->dmaops->uninit_dma(priv);
+
+ return 0;
+}
+
+static struct net_device_ops altera_tse_netdev_ops = {
+ .ndo_open = tse_open,
+ .ndo_stop = tse_shutdown,
+ .ndo_start_xmit = tse_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_rx_mode = tse_set_rx_mode,
+ .ndo_change_mtu = tse_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+
+static int request_and_map(struct platform_device *pdev, const char *name,
+ struct resource **res, void __iomem **ptr)
+{
+ struct resource *region;
+ struct device *device = &pdev->dev;
+
+ *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (*res == NULL) {
+ dev_err(device, "resource %s not defined\n", name);
+ return -ENODEV;
+ }
+
+ region = devm_request_mem_region(device, (*res)->start,
+ resource_size(*res), dev_name(device));
+ if (region == NULL) {
+ dev_err(device, "unable to request %s\n", name);
+ return -EBUSY;
+ }
+
+ *ptr = devm_ioremap_nocache(device, region->start,
+ resource_size(region));
+ if (*ptr == NULL) {
+ dev_err(device, "ioremap_nocache of %s failed!", name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Probe Altera TSE MAC device
+ */
+static int altera_tse_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ int ret = -ENODEV;
+ struct resource *control_port;
+ struct resource *dma_res;
+ struct altera_tse_private *priv;
+ const unsigned char *macaddr;
+ struct device_node *np = pdev->dev.of_node;
+ void __iomem *descmap;
+ const struct of_device_id *of_id = NULL;
+
+ ndev = alloc_etherdev(sizeof(struct altera_tse_private));
+ if (!ndev) {
+ dev_err(&pdev->dev, "Could not allocate network device\n");
+ return -ENODEV;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ priv = netdev_priv(ndev);
+ priv->device = &pdev->dev;
+ priv->dev = ndev;
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ of_id = of_match_device(altera_tse_ids, &pdev->dev);
+
+ if (of_id)
+ priv->dmaops = (struct altera_dmaops *)of_id->data;
+
+
+ if (priv->dmaops &&
+ priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
+ /* Get the mapped address to the SGDMA descriptor memory */
+ ret = request_and_map(pdev, "s1", &dma_res, &descmap);
+ if (ret)
+ goto out_free;
+
+ /* Start of that memory is for transmit descriptors */
+ priv->tx_dma_desc = descmap;
+
+ /* First half is for tx descriptors, other half for tx */
+ priv->txdescmem = resource_size(dma_res)/2;
+
+ priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
+
+ priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
+ priv->txdescmem));
+ priv->rxdescmem = resource_size(dma_res)/2;
+ priv->rxdescmem_busaddr = dma_res->start;
+ priv->rxdescmem_busaddr += priv->txdescmem;
+
+ if (upper_32_bits(priv->rxdescmem_busaddr)) {
+ dev_dbg(priv->device,
+ "SGDMA bus addresses greater than 32-bits\n");
+ goto out_free;
+ }
+ if (upper_32_bits(priv->txdescmem_busaddr)) {
+ dev_dbg(priv->device,
+ "SGDMA bus addresses greater than 32-bits\n");
+ goto out_free;
+ }
+ } else if (priv->dmaops &&
+ priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
+ ret = request_and_map(pdev, "rx_resp", &dma_res,
+ &priv->rx_dma_resp);
+ if (ret)
+ goto out_free;
+
+ ret = request_and_map(pdev, "tx_desc", &dma_res,
+ &priv->tx_dma_desc);
+ if (ret)
+ goto out_free;
+
+ priv->txdescmem = resource_size(dma_res);
+ priv->txdescmem_busaddr = dma_res->start;
+
+ ret = request_and_map(pdev, "rx_desc", &dma_res,
+ &priv->rx_dma_desc);
+ if (ret)
+ goto out_free;
+
+ priv->rxdescmem = resource_size(dma_res);
+ priv->rxdescmem_busaddr = dma_res->start;
+
+ } else {
+ goto out_free;
+ }
+
+ if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
+ dma_set_coherent_mask(priv->device,
+ DMA_BIT_MASK(priv->dmaops->dmamask));
+ else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
+ dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
+ else
+ goto out_free;
+
+ /* MAC address space */
+ ret = request_and_map(pdev, "control_port", &control_port,
+ (void __iomem **)&priv->mac_dev);
+ if (ret)
+ goto out_free;
+
+ /* xSGDMA Rx Dispatcher address space */
+ ret = request_and_map(pdev, "rx_csr", &dma_res,
+ &priv->rx_dma_csr);
+ if (ret)
+ goto out_free;
+
+
+ /* xSGDMA Tx Dispatcher address space */
+ ret = request_and_map(pdev, "tx_csr", &dma_res,
+ &priv->tx_dma_csr);
+ if (ret)
+ goto out_free;
+
+
+ /* Rx IRQ */
+ priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
+ if (priv->rx_irq == -ENXIO) {
+ dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
+ ret = -ENXIO;
+ goto out_free;
+ }
+
+ /* Tx IRQ */
+ priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
+ if (priv->tx_irq == -ENXIO) {
+ dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
+ ret = -ENXIO;
+ goto out_free;
+ }
+
+ /* get FIFO depths from device tree */
+ if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
+ &priv->rx_fifo_depth)) {
+ dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
+ ret = -ENXIO;
+ goto out_free;
+ }
+
+ if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
+ &priv->rx_fifo_depth)) {
+ dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
+ ret = -ENXIO;
+ goto out_free;
+ }
+
+ /* get hash filter settings for this instance */
+ priv->hash_filter =
+ of_property_read_bool(pdev->dev.of_node,
+ "altr,has-hash-multicast-filter");
+
+ /* get supplemental address settings for this instance */
+ priv->added_unicast =
+ of_property_read_bool(pdev->dev.of_node,
+ "altr,has-supplementary-unicast");
+
+ /* Max MTU is 1500, ETH_DATA_LEN */
+ priv->max_mtu = ETH_DATA_LEN;
+
+ /* Get the max mtu from the device tree. Note that the
+ * "max-frame-size" parameter is actually max mtu. Definition
+ * in the ePAPR v1.1 spec and usage differ, so go with usage.
+ */
+ of_property_read_u32(pdev->dev.of_node, "max-frame-size",
+ &priv->max_mtu);
+
+ /* The DMA buffer size already accounts for an alignment bias
+ * to avoid unaligned access exceptions for the NIOS processor,
+ */
+ priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
+
+ /* get default MAC address from device tree */
+ macaddr = of_get_mac_address(pdev->dev.of_node);
+ if (macaddr)
+ ether_addr_copy(ndev->dev_addr, macaddr);
+ else
+ eth_hw_addr_random(ndev);
+
+ priv->phy_iface = of_get_phy_mode(np);
+
+ /* try to get PHY address from device tree, use PHY autodetection if
+ * no valid address is given
+ */
+ if (of_property_read_u32(pdev->dev.of_node, "phy-addr",
+ &priv->phy_addr)) {
+ priv->phy_addr = POLL_PHY;
+ }
+
+ if (!((priv->phy_addr == POLL_PHY) ||
+ ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
+ dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
+ priv->phy_addr);
+ goto out_free;
+ }
+
+ /* Create/attach to MDIO bus */
+ ret = altera_tse_mdio_create(ndev,
+ atomic_add_return(1, &instance_count));
+
+ if (ret)
+ goto out_free;
+
+ /* initialize netdev */
+ ether_setup(ndev);
+ ndev->mem_start = control_port->start;
+ ndev->mem_end = control_port->end;
+ ndev->netdev_ops = &altera_tse_netdev_ops;
+ altera_tse_set_ethtool_ops(ndev);
+
+ altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
+
+ if (priv->hash_filter)
+ altera_tse_netdev_ops.ndo_set_rx_mode =
+ tse_set_rx_mode_hashfilter;
+
+ /* Scatter/gather IO is not supported,
+ * so it is turned off
+ */
+ ndev->hw_features &= ~NETIF_F_SG;
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+
+ /* VLAN offloading of tagging, stripping and filtering is not
+ * supported by hardware, but driver will accommodate the
+ * extra 4-byte VLAN tag for processing by upper layers
+ */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+ /* setup NAPI interface */
+ netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
+
+ spin_lock_init(&priv->mac_cfg_lock);
+ spin_lock_init(&priv->tx_lock);
+ spin_lock_init(&priv->rxdma_irq_lock);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register TSE net device\n");
+ goto out_free_mdio;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv->revision = ioread32(&priv->mac_dev->megacore_revision);
+
+ if (netif_msg_probe(priv))
+ dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
+ (priv->revision >> 8) & 0xff,
+ priv->revision & 0xff,
+ (unsigned long) control_port->start, priv->rx_irq,
+ priv->tx_irq);
+
+ ret = init_phy(ndev);
+ if (ret != 0) {
+ netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
+ goto out_free_mdio;
+ }
+ return 0;
+
+out_free_mdio:
+ altera_tse_mdio_destroy(ndev);
+out_free:
+ free_netdev(ndev);
+ return ret;
+}
+
+/* Remove Altera TSE MAC device
+ */
+static int altera_tse_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ altera_tse_mdio_destroy(ndev);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+struct altera_dmaops altera_dtype_sgdma = {
+ .altera_dtype = ALTERA_DTYPE_SGDMA,
+ .dmamask = 32,
+ .reset_dma = sgdma_reset,
+ .enable_txirq = sgdma_enable_txirq,
+ .enable_rxirq = sgdma_enable_rxirq,
+ .disable_txirq = sgdma_disable_txirq,
+ .disable_rxirq = sgdma_disable_rxirq,
+ .clear_txirq = sgdma_clear_txirq,
+ .clear_rxirq = sgdma_clear_rxirq,
+ .tx_buffer = sgdma_tx_buffer,
+ .tx_completions = sgdma_tx_completions,
+ .add_rx_desc = sgdma_add_rx_desc,
+ .get_rx_status = sgdma_rx_status,
+ .init_dma = sgdma_initialize,
+ .uninit_dma = sgdma_uninitialize,
+};
+
+struct altera_dmaops altera_dtype_msgdma = {
+ .altera_dtype = ALTERA_DTYPE_MSGDMA,
+ .dmamask = 64,
+ .reset_dma = msgdma_reset,
+ .enable_txirq = msgdma_enable_txirq,
+ .enable_rxirq = msgdma_enable_rxirq,
+ .disable_txirq = msgdma_disable_txirq,
+ .disable_rxirq = msgdma_disable_rxirq,
+ .clear_txirq = msgdma_clear_txirq,
+ .clear_rxirq = msgdma_clear_rxirq,
+ .tx_buffer = msgdma_tx_buffer,
+ .tx_completions = msgdma_tx_completions,
+ .add_rx_desc = msgdma_add_rx_desc,
+ .get_rx_status = msgdma_rx_status,
+ .init_dma = msgdma_initialize,
+ .uninit_dma = msgdma_uninitialize,
+};
+
+static struct of_device_id altera_tse_ids[] = {
+ { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
+ { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
+ { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_tse_ids);
+
+static struct platform_driver altera_tse_driver = {
+ .probe = altera_tse_probe,
+ .remove = altera_tse_remove,
+ .suspend = NULL,
+ .resume = NULL,
+ .driver = {
+ .name = ALTERA_TSE_RESOURCE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = altera_tse_ids,
+ },
+};
+
+module_platform_driver(altera_tse_driver);
+
+MODULE_AUTHOR("Altera Corporation");
+MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
new file mode 100644
index 000000000000..70fa13f486b2
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -0,0 +1,44 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "altera_tse.h"
+#include "altera_utils.h"
+
+void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+{
+ u32 value = ioread32(ioaddr);
+ value |= bit_mask;
+ iowrite32(value, ioaddr);
+}
+
+void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+{
+ u32 value = ioread32(ioaddr);
+ value &= ~bit_mask;
+ iowrite32(value, ioaddr);
+}
+
+int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+{
+ u32 value = ioread32(ioaddr);
+ return (value & bit_mask) ? 1 : 0;
+}
+
+int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+{
+ u32 value = ioread32(ioaddr);
+ return (value & bit_mask) ? 0 : 1;
+}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
new file mode 100644
index 000000000000..ce1db36d3583
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -0,0 +1,27 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+
+#ifndef __ALTERA_UTILS_H__
+#define __ALTERA_UTILS_H__
+
+void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+
+#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 18e542f7853d..98a10d555b79 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -578,7 +578,7 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
outs++;
/* Kick the lance: transmit now */
WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
spin_lock_irqsave(&lp->devlock, flags);
if (TX_BUFFS_AVAIL)
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 9793767996a2..87e727b921dc 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -472,7 +472,7 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
netif_stop_queue(dev);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 2061b471fd16..26efaaa5e73f 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -720,6 +720,9 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
int rx_pkt_limit = budget;
unsigned long flags;
+ if (rx_pkt_limit <= 0)
+ goto rx_not_empty;
+
do{
/* process receive packets until we use the quota*/
/* If we own the next entry, it's a new packet. Send it up. */
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 9339cccfe05a..e7cc9174e364 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -549,35 +549,35 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
struct pcnet32_rx_head *new_rx_ring;
struct sk_buff **new_skb_list;
int new, overlap;
+ unsigned int entries = 1 << size;
new_rx_ring = pci_alloc_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
- (1 << size),
+ entries,
&new_ring_dma_addr);
if (new_rx_ring == NULL) {
netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return;
}
- memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
+ memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * entries);
- new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC);
+ new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
if (!new_dma_addr_list)
goto free_new_rx_ring;
- new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
if (!new_skb_list)
goto free_new_lists;
/* first copy the current receive buffers */
- overlap = min(size, lp->rx_ring_size);
+ overlap = min(entries, lp->rx_ring_size);
for (new = 0; new < overlap; new++) {
new_rx_ring[new] = lp->rx_ring[new];
new_dma_addr_list[new] = lp->rx_dma_addr[new];
new_skb_list[new] = lp->rx_skbuff[new];
}
/* now allocate any new buffers needed */
- for (; new < size; new++) {
+ for (; new < entries; new++) {
struct sk_buff *rx_skbuff;
new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = new_skb_list[new];
@@ -592,6 +592,13 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
new_dma_addr_list[new] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev,
+ new_dma_addr_list[new])) {
+ netif_err(lp, drv, dev, "%s dma mapping failed\n",
+ __func__);
+ dev_kfree_skb(new_skb_list[new]);
+ goto free_all_new;
+ }
new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
new_rx_ring[new].status = cpu_to_le16(0x8000);
@@ -599,8 +606,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
/* and free any unneeded buffers */
for (; new < lp->rx_ring_size; new++) {
if (lp->rx_skbuff[new]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[new]))
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[new],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_skbuff[new]);
}
}
@@ -612,7 +623,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
lp->rx_ring_size, lp->rx_ring,
lp->rx_ring_dma_addr);
- lp->rx_ring_size = (1 << size);
+ lp->rx_ring_size = entries;
lp->rx_mod_mask = lp->rx_ring_size - 1;
lp->rx_len_bits = (size << 4);
lp->rx_ring = new_rx_ring;
@@ -624,8 +635,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
free_all_new:
while (--new >= lp->rx_ring_size) {
if (new_skb_list[new]) {
- pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ new_dma_addr_list[new]))
+ pci_unmap_single(lp->pci_dev,
+ new_dma_addr_list[new],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb(new_skb_list[new]);
}
}
@@ -634,8 +649,7 @@ free_new_lists:
kfree(new_dma_addr_list);
free_new_rx_ring:
pci_free_consistent(lp->pci_dev,
- sizeof(struct pcnet32_rx_head) *
- (1 << size),
+ sizeof(struct pcnet32_rx_head) * entries,
new_rx_ring,
new_ring_dma_addr);
}
@@ -650,8 +664,12 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
lp->rx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
if (lp->rx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[i]))
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[i],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(lp->rx_skbuff[i]);
}
lp->rx_skbuff[i] = NULL;
@@ -930,6 +948,12 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
lp->tx_dma_addr[x] =
pci_map_single(lp->pci_dev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "DMA mapping error at line: %d!\n",
+ __LINE__);
+ goto clean_up;
+ }
lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[x].status = cpu_to_le16(status);
@@ -1142,24 +1166,36 @@ static void pcnet32_rx_entry(struct net_device *dev,
if (pkt_len > rx_copybreak) {
struct sk_buff *newskb;
+ dma_addr_t new_dma_addr;
newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
+ /*
+ * map the new buffer, if mapping fails, drop the packet and
+ * reuse the old buffer
+ */
if (newskb) {
skb_reserve(newskb, NET_IP_ALIGN);
- skb = lp->rx_skbuff[entry];
- pci_unmap_single(lp->pci_dev,
- lp->rx_dma_addr[entry],
- PKT_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- skb_put(skb, pkt_len);
- lp->rx_skbuff[entry] = newskb;
- lp->rx_dma_addr[entry] =
- pci_map_single(lp->pci_dev,
- newskb->data,
- PKT_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
- rx_in_place = 1;
+ new_dma_addr = pci_map_single(lp->pci_dev,
+ newskb->data,
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) {
+ netif_err(lp, rx_err, dev,
+ "DMA mapping error.\n");
+ dev_kfree_skb(newskb);
+ skb = NULL;
+ } else {
+ skb = lp->rx_skbuff[entry];
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[entry],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[entry] = newskb;
+ lp->rx_dma_addr[entry] = new_dma_addr;
+ rxp->base = cpu_to_le32(new_dma_addr);
+ rx_in_place = 1;
+ }
} else
skb = NULL;
} else
@@ -2229,9 +2265,12 @@ static void pcnet32_purge_tx_ring(struct net_device *dev)
lp->tx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
if (lp->tx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
- lp->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->tx_dma_addr[i]))
+ pci_unmap_single(lp->pci_dev,
+ lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(lp->tx_skbuff[i]);
}
lp->tx_skbuff[i] = NULL;
@@ -2264,10 +2303,19 @@ static int pcnet32_init_ring(struct net_device *dev)
}
rmb();
- if (lp->rx_dma_addr[i] == 0)
+ if (lp->rx_dma_addr[i] == 0) {
lp->rx_dma_addr[i] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[i])) {
+ /* there is not much we can do at this point */
+ netif_err(lp, drv, dev,
+ "%s pci dma mapping error\n",
+ __func__);
+ return -1;
+ }
+ }
lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
wmb(); /* Make sure owner changes after all others are visible */
@@ -2397,9 +2445,14 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_ring[entry].misc = 0x00000000;
- lp->tx_skbuff[entry] = skb;
lp->tx_dma_addr[entry] =
pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ goto drop_packet;
+ }
+ lp->tx_skbuff[entry] = skb;
lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[entry].status = cpu_to_le16(status);
@@ -2414,6 +2467,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_full = 1;
netif_stop_queue(dev);
}
+drop_packet:
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 380d24922049..17bb9ce96260 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -535,7 +535,7 @@ static int alx_alloc_descriptors(struct alx_priv *alx)
if (!alx->descmem.virt)
goto out_free;
- alx->txq.tpd = (void *)alx->descmem.virt;
+ alx->txq.tpd = alx->descmem.virt;
alx->txq.tpd_dma = alx->descmem.dma;
/* alignment requirement for next block */
@@ -1097,7 +1097,7 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
drop:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 4d3258dd0a88..e11bf18fbbd1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -832,7 +832,7 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
}
static inline void atl1c_clean_buffer(struct pci_dev *pdev,
- struct atl1c_buffer *buffer_info, int in_irq)
+ struct atl1c_buffer *buffer_info)
{
u16 pci_driection;
if (buffer_info->flags & ATL1C_BUFFER_FREE)
@@ -850,12 +850,8 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
pci_unmap_page(pdev, buffer_info->dma,
buffer_info->length, pci_driection);
}
- if (buffer_info->skb) {
- if (in_irq)
- dev_kfree_skb_irq(buffer_info->skb);
- else
- dev_kfree_skb(buffer_info->skb);
- }
+ if (buffer_info->skb)
+ dev_consume_skb_any(buffer_info->skb);
buffer_info->dma = 0;
buffer_info->skb = NULL;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
@@ -875,7 +871,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
ring_count = tpd_ring->count;
for (index = 0; index < ring_count; index++) {
buffer_info = &tpd_ring->buffer_info[index];
- atl1c_clean_buffer(pdev, buffer_info, 0);
+ atl1c_clean_buffer(pdev, buffer_info);
}
/* Zero out Tx-buffers */
@@ -899,7 +895,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
for (j = 0; j < rfd_ring->count; j++) {
buffer_info = &rfd_ring->buffer_info[j];
- atl1c_clean_buffer(pdev, buffer_info, 0);
+ atl1c_clean_buffer(pdev, buffer_info);
}
/* zero out the descriptor ring */
memset(rfd_ring->desc, 0, rfd_ring->size);
@@ -1562,7 +1558,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
while (next_to_clean != hw_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[next_to_clean];
- atl1c_clean_buffer(pdev, buffer_info, 1);
+ atl1c_clean_buffer(pdev, buffer_info);
if (++next_to_clean == tpd_ring->count)
next_to_clean = 0;
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
@@ -1977,17 +1973,17 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
enum atl1c_trans_queue type)
{
struct pci_dev *pdev = adapter->pdev;
+ unsigned short offload_type;
u8 hdr_len;
u32 real_len;
- unsigned short offload_type;
- int err;
if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (unlikely(err))
- return -1;
- }
+ int err;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
offload_type = skb_shinfo(skb)->gso_type;
if (offload_type & SKB_GSO_TCPV4) {
@@ -2085,7 +2081,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
while (index != tpd_ring->next_to_use) {
tpd = ATL1C_TPD_DESC(tpd_ring, index);
buffer_info = &tpd_ring->buffer_info[index];
- atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
+ atl1c_clean_buffer(adpt->pdev, buffer_info);
memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
if (++index == tpd_ring->count)
index = 0;
@@ -2258,7 +2254,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
/* roll back tpd/buffer */
atl1c_tx_rollback(adapter, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
} else {
atl1c_tx_queue(adapter, skb, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 422aab27ea1b..4345332533ad 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1641,17 +1641,17 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
static int atl1e_tso_csum(struct atl1e_adapter *adapter,
struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
{
+ unsigned short offload_type;
u8 hdr_len;
u32 real_len;
- unsigned short offload_type;
- int err;
if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (unlikely(err))
- return -1;
- }
+ int err;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
offload_type = skb_shinfo(skb)->gso_type;
if (offload_type & SKB_GSO_TCPV4) {
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 287272dd69da..dfd0e91fa726 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2118,18 +2118,17 @@ static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
}
static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
- struct tx_packet_desc *ptpd)
+ struct tx_packet_desc *ptpd)
{
u8 hdr_len, ip_off;
u32 real_len;
- int err;
if (skb_shinfo(skb)->gso_size) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (unlikely(err))
- return -1;
- }
+ int err;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@@ -2175,7 +2174,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
return 3;
}
}
- return false;
+ return 0;
}
static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 265ce1b752ed..78befb522a52 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -55,6 +55,7 @@ static const char atl2_driver_name[] = "atl2";
static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
static const char atl2_driver_version[] = ATL2_DRV_VERSION;
+static const struct ethtool_ops atl2_ethtool_ops;
MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
@@ -71,8 +72,6 @@ static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
-static void atl2_set_ethtool_ops(struct net_device *netdev);
-
static void atl2_check_options(struct atl2_adapter *adapter);
/**
@@ -1397,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
atl2_setup_pcicmd(pdev);
netdev->netdev_ops = &atl2_netdev_ops;
- atl2_set_ethtool_ops(netdev);
+ SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
netdev->watchdog_timeo = 5 * HZ;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
@@ -2105,11 +2104,6 @@ static const struct ethtool_ops atl2_ethtool_ops = {
.set_eeprom = atl2_set_eeprom,
};
-static void atl2_set_ethtool_ops(struct net_device *netdev)
-{
- SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
-}
-
#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \
(((a) & 0xff00ff00) >> 8))
#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 3f97d9fd0a71..85dbddd03722 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -60,6 +60,17 @@ config BCM63XX_ENET
This driver supports the ethernet MACs in the Broadcom 63xx
MIPS chipset family (BCM63XX).
+config BCMGENET
+ tristate "Broadcom GENET internal MAC support"
+ depends on OF
+ select MII
+ select PHYLIB
+ select FIXED_PHY if BCMGENET=y
+ select BCM7XXX_PHY
+ help
+ This driver supports the built-in Ethernet MACs found in the
+ Broadcom BCM7xxx Set Top Box family chipset.
+
config BNX2
tristate "Broadcom NetXtremeII support"
depends on PCI
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 68efa1a3fb88..fd639a0d4c7d 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_BCMGENET) += genet/
obj-$(CONFIG_BNX2) += bnx2.o
obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x/
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 8a7bf7dad898..05ba62589017 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1685,7 +1685,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_bh(&hwstat->syncp);
+ start = u64_stats_fetch_begin_irq(&hwstat->syncp);
/* Convert HW stats into rtnl_link_stats64 stats. */
nstat->rx_packets = hwstat->rx_pkts;
@@ -1719,7 +1719,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
/* Carrier lost counter seems to be broken for some devices */
nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
#endif
- } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
return nstat;
}
@@ -2073,12 +2073,12 @@ static void b44_get_ethtool_stats(struct net_device *dev,
do {
data_src = &hwstat->tx_good_octets;
data_dst = data;
- start = u64_stats_fetch_begin_bh(&hwstat->syncp);
+ start = u64_stats_fetch_begin_irq(&hwstat->syncp);
for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
*data_dst++ = *data_src++;
- } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
}
static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index b9a5fb6400d3..a7d11f5565d6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1722,9 +1722,6 @@ static const struct net_device_ops bcm_enet_ops = {
.ndo_set_rx_mode = bcm_enet_set_multicast_list,
.ndo_do_ioctl = bcm_enet_ioctl,
.ndo_change_mtu = bcm_enet_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = bcm_enet_netpoll,
-#endif
};
/*
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6c9e1c9bdeb8..a8efb18e42fa 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2886,7 +2886,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_cons = BNX2_NEXT_TX_BD(sw_cons);
tx_bytes += skb->len;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
tx_pkt++;
if (tx_pkt == budget)
break;
@@ -3133,6 +3133,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct l2_fhdr *rx_hdr;
int rx_pkt = 0, pg_ring_used = 0;
+ if (budget <= 0)
+ return rx_pkt;
+
hw_cons = bnx2_get_hw_rx_cons(bnapi);
sw_cons = rxr->rx_cons;
sw_prod = rxr->rx_prod;
@@ -6235,7 +6238,7 @@ bnx2_free_irq(struct bnx2 *bp)
static void
bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
{
- int i, total_vecs, rc;
+ int i, total_vecs;
struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
struct net_device *dev = bp->dev;
const int len = sizeof(bp->irq_tbl[0].name);
@@ -6258,16 +6261,9 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
#ifdef BCM_CNIC
total_vecs++;
#endif
- rc = -ENOSPC;
- while (total_vecs >= BNX2_MIN_MSIX_VEC) {
- rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
- if (rc <= 0)
- break;
- if (rc > 0)
- total_vecs = rc;
- }
-
- if (rc != 0)
+ total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
+ BNX2_MIN_MSIX_VEC, total_vecs);
+ if (total_vecs < 0)
return;
msix_vecs = total_vecs;
@@ -6640,7 +6636,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -6733,7 +6729,7 @@ dma_error:
PCI_DMA_TODEVICE);
}
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 391f29ef6d2e..4d8f8aba0ea5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -26,8 +26,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.17-0"
-#define DRV_MODULE_RELDATE "2013/04/11"
+#define DRV_MODULE_VERSION "1.78.19-0"
+#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -75,13 +75,22 @@ enum bnx2x_int_mode {
#define BNX2X_MSG_DCB 0x8000000
/* regular debug print */
+#define DP_INNER(fmt, ...) \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__);
+
#define DP(__mask, fmt, ...) \
do { \
if (unlikely(bp->msg_enable & (__mask))) \
- pr_notice("[%s:%d(%s)]" fmt, \
- __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", \
- ##__VA_ARGS__); \
+ DP_INNER(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define DP_AND(__mask, fmt, ...) \
+do { \
+ if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
+ DP_INNER(fmt, ##__VA_ARGS__); \
} while (0)
#define DP_CONT(__mask, fmt, ...) \
@@ -1146,10 +1155,6 @@ struct bnx2x_port {
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
/* slow path */
-
-/* slow path work-queue */
-extern struct workqueue_struct *bnx2x_wq;
-
#define BNX2X_MAX_NUM_OF_VFS 64
#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
@@ -1261,6 +1266,7 @@ struct bnx2x_slowpath {
union {
struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data;
+ struct tpa_update_ramrod_data tpa_data;
} q_rdata;
union {
@@ -1392,7 +1398,7 @@ struct bnx2x_fw_stats_data {
};
/* Public slow path states */
-enum {
+enum sp_rtnl_flag {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
BNX2X_SP_RTNL_FAN_FAILURE,
@@ -1403,6 +1409,12 @@ enum {
BNX2X_SP_RTNL_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_SP_RTNL_TX_STOP,
+ BNX2X_SP_RTNL_GET_DRV_VERSION,
+};
+
+enum bnx2x_iov_flag {
+ BNX2X_IOV_HANDLE_VF_MSG,
+ BNX2X_IOV_HANDLE_FLR,
};
struct bnx2x_prev_path_list {
@@ -1603,6 +1615,8 @@ struct bnx2x {
int mrrs;
struct delayed_work sp_task;
+ struct delayed_work iov_task;
+
atomic_t interrupt_occurred;
struct delayed_work sp_rtnl_task;
@@ -1693,6 +1707,10 @@ struct bnx2x {
struct bnx2x_slowpath *slowpath;
dma_addr_t slowpath_mapping;
+ /* Mechanism protecting the drv_info_to_mcp */
+ struct mutex drv_info_mutex;
+ bool drv_info_mng_owner;
+
/* Total number of FW statistics requests */
u8 fw_stats_num;
@@ -1882,6 +1900,9 @@ struct bnx2x {
/* operation indication for the sp_rtnl task */
unsigned long sp_rtnl_state;
+ /* Indication of the IOV tasks */
+ unsigned long iov_task_state;
+
/* DCBX Negotiation results */
struct dcbx_features dcbx_local_feat;
u32 dcbx_error;
@@ -2525,6 +2546,8 @@ enum {
void bnx2x_set_local_cmng(struct bnx2x *bp);
+void bnx2x_update_mng_version(struct bnx2x *bp);
+
#define MCPR_SCRATCH_BASE(bp) \
(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dbcff509dc3f..9261d5313b5b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -61,10 +61,14 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
static int bnx2x_calc_num_queues(struct bnx2x *bp)
{
- return bnx2x_num_queues ?
- min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, netif_get_num_default_rss_queues(),
- BNX2X_MAX_QUEUES(bp));
+ int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
+
+ /* Reduce memory usage in kdump environment by using only one queue */
+ if (reset_devices)
+ nq = 1;
+
+ nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
+ return nq;
}
/**
@@ -868,6 +872,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (unlikely(bp->panic))
return 0;
#endif
+ if (budget <= 0)
+ return rx_pkt;
bd_cons = fp->rx_bd_cons;
bd_prod = fp->rx_bd_prod;
@@ -1638,36 +1644,16 @@ int bnx2x_enable_msix(struct bnx2x *bp)
DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
msix_vec);
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
-
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
+ BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
/*
* reconfigure number of tx/rx queues according to available
* MSI-X vectors
*/
- if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
- /* how less vectors we will have? */
- int diff = msix_vec - rc;
-
- BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
-
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
-
- if (rc) {
- BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
- goto no_msix;
- }
- /*
- * decrease number of queues by number of unallocated entries
- */
- bp->num_ethernet_queues -= diff;
- bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
-
- BNX2X_DEV_INFO("New queue configuration set: %d\n",
- bp->num_queues);
- } else if (rc > 0) {
+ if (rc == -ENOSPC) {
/* Get by with single vector */
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
- if (rc) {
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
+ if (rc < 0) {
BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
rc);
goto no_msix;
@@ -1680,8 +1666,22 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
} else if (rc < 0) {
- BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
goto no_msix;
+ } else if (rc < msix_vec) {
+ /* how less vectors we will have? */
+ int diff = msix_vec - rc;
+
+ BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
+
+ /*
+ * decrease number of queues by number of unallocated entries
+ */
+ bp->num_ethernet_queues -= diff;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+
+ BNX2X_DEV_INFO("New queue configuration set: %d\n",
+ bp->num_queues);
}
bp->flags |= USING_MSIX_FLAG;
@@ -2234,8 +2234,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
sizeof(struct per_queue_stats) * num_queue_stats +
sizeof(struct stats_counter);
- BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ if (!bp->fw_stats)
+ goto alloc_mem_err;
/* Set shortcuts */
bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
@@ -2802,6 +2804,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (CNIC_ENABLED(bp))
bnx2x_load_cnic(bp);
+ if (IS_PF(bp))
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
/* mark driver is loaded in shmem2 */
u32 val;
@@ -3028,6 +3033,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false;
+ /* Clear driver version indication in shmem */
+ if (IS_PF(bp))
+ bnx2x_update_mng_version(bp);
+
/* Check if there are pending parity attentions. If there are - set
* RECOVERY_IN_PROGRESS.
*/
@@ -4370,14 +4379,17 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
if (!IS_FCOE_IDX(index)) {
/* status blocks */
- if (!CHIP_IS_E1x(bp))
- BNX2X_PCI_ALLOC(sb->e2_sb,
- &bnx2x_fp(bp, index, status_blk_mapping),
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(sb->e1x_sb,
- &bnx2x_fp(bp, index, status_blk_mapping),
- sizeof(struct host_hc_status_block_e1x));
+ if (!CHIP_IS_E1x(bp)) {
+ sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ if (!sb->e2_sb)
+ goto alloc_mem_err;
+ } else {
+ sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+ if (!sb->e1x_sb)
+ goto alloc_mem_err;
+ }
}
/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
@@ -4396,35 +4408,49 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
"allocating tx memory of fp %d cos %d\n",
index, cos);
- BNX2X_ALLOC(txdata->tx_buf_ring,
- sizeof(struct sw_tx_bd) * NUM_TX_BD);
- BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
- &txdata->tx_desc_mapping,
- sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
+ sizeof(struct sw_tx_bd),
+ GFP_KERNEL);
+ if (!txdata->tx_buf_ring)
+ goto alloc_mem_err;
+ txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ if (!txdata->tx_desc_ring)
+ goto alloc_mem_err;
}
}
/* Rx */
if (!skip_rx_queue(bp, index)) {
/* fastpath rx rings: rx_buf rx_desc rx_comp */
- BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
- sizeof(struct sw_rx_bd) * NUM_RX_BD);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
- &bnx2x_fp(bp, index, rx_desc_mapping),
- sizeof(struct eth_rx_bd) * NUM_RX_BD);
+ bnx2x_fp(bp, index, rx_buf_ring) =
+ kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
+ if (!bnx2x_fp(bp, index, rx_buf_ring))
+ goto alloc_mem_err;
+ bnx2x_fp(bp, index, rx_desc_ring) =
+ BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+ if (!bnx2x_fp(bp, index, rx_desc_ring))
+ goto alloc_mem_err;
/* Seed all CQEs by 1s */
- BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
- &bnx2x_fp(bp, index, rx_comp_mapping),
- sizeof(struct eth_fast_path_rx_cqe) *
- NUM_RCQ_BD);
+ bnx2x_fp(bp, index, rx_comp_ring) =
+ BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
+ if (!bnx2x_fp(bp, index, rx_comp_ring))
+ goto alloc_mem_err;
/* SGE ring */
- BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
- sizeof(struct sw_rx_page) * NUM_RX_SGE);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
- &bnx2x_fp(bp, index, rx_sge_mapping),
- BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ bnx2x_fp(bp, index, rx_page_ring) =
+ kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
+ GFP_KERNEL);
+ if (!bnx2x_fp(bp, index, rx_page_ring))
+ goto alloc_mem_err;
+ bnx2x_fp(bp, index, rx_sge_ring) =
+ BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ if (!bnx2x_fp(bp, index, rx_sge_ring))
+ goto alloc_mem_err;
/* RX BD ring */
bnx2x_set_next_page_rx_bd(fp);
@@ -4780,12 +4806,8 @@ void bnx2x_tx_timeout(struct net_device *dev)
bnx2x_panic();
#endif
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
-
/* This allows the netif to be shutdown gracefully before resetting */
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
}
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -4913,3 +4935,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
disable = disable ? 1 : (usec ? 0 : 1);
storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
}
+
+void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
+ u32 verbose)
+{
+ smp_mb__before_clear_bit();
+ set_bit(flag, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
+ flag);
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a89a40f88c25..3448cc033ca5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -21,6 +21,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/irq.h>
#include "bnx2x.h"
#include "bnx2x_sriov.h"
@@ -47,31 +48,26 @@ extern int bnx2x_num_queues;
} \
} while (0)
-#define BNX2X_PCI_ALLOC(x, y, size) \
- do { \
- x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
- (unsigned long long)(*y), x); \
- } while (0)
-
-#define BNX2X_PCI_FALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- memset((void *)x, 0xFFFFFFFF, size); \
- DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\
- (unsigned long long)(*y), x); \
- } while (0)
-
-#define BNX2X_ALLOC(x, size) \
- do { \
- x = kzalloc(size, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- } while (0)
+#define BNX2X_PCI_ALLOC(y, size) \
+({ \
+ void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) \
+ DP(NETIF_MSG_HW, \
+ "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
+ (unsigned long long)(*y), x); \
+ x; \
+})
+#define BNX2X_PCI_FALLOC(y, size) \
+({ \
+ void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) { \
+ memset(x, 0xff, size); \
+ DP(NETIF_MSG_HW, \
+ "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \
+ (unsigned long long)(*y), x); \
+ } \
+ x; \
+})
/*********************** Interfaces ****************************
* Functions that need to be implemented by each driver version
@@ -1324,4 +1320,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
int bnx2x_drain_tx_queues(struct bnx2x *bp);
void bnx2x_squeeze_objects(struct bnx2x *bp);
+void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
+ u32 verbose);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index fdace204b054..97ea5421dd96 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
* as we are handling an attention on a work queue which must be
* flushed at some rtnl-locked contexts (e.g. if down)
*/
- if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
}
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
@@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
if (IS_MF(bp))
bnx2x_link_sync_notify(bp);
- set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
-
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 38fc794c1655..b6de05e3149b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2969,8 +2969,9 @@ static void bnx2x_self_test(struct net_device *dev,
#define IS_PORT_STAT(i) \
((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
-#define IS_MF_MODE_STAT(bp) \
- (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
+#define HIDE_PORT_STAT(bp) \
+ ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \
+ IS_VF(bp))
/* ethtool statistics are displayed for all regular ethernet queues and the
* fcoe L2 queue if not disabled
@@ -2992,7 +2993,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
BNX2X_NUM_Q_STATS;
} else
num_strings = 0;
- if (IS_MF_MODE_STAT(bp)) {
+ if (HIDE_PORT_STAT(bp)) {
for (i = 0; i < BNX2X_NUM_STATS; i++)
if (IS_FUNC_STAT(i))
num_strings++;
@@ -3047,7 +3048,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
strcpy(buf + (k + j)*ETH_GSTRING_LEN,
bnx2x_stats_arr[i].string);
@@ -3105,7 +3106,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
hw_stats = (u32 *)&bp->eth_stats;
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
if (bnx2x_stats_arr[i].size == 0) {
/* skip this counter */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 84aecdf06f7a..95dc36543548 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -87,7 +87,6 @@
(IRO[156].base + ((vfId) * IRO[156].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[150].base + ((funcId) * IRO[150].m1))
-#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IRO[203].base + ((pfId) * IRO[203].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index cf1df8b62e2c..5ba8af50c84f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -2003,6 +2003,23 @@ struct shmem_lfa {
#define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
};
+/* Used to support NSCI get OS driver version
+ * on driver load the version value will be set
+ * on driver unload driver value of 0x0 will be set.
+ */
+struct os_drv_ver {
+#define DRV_VER_NOT_LOADED 0
+
+ /* personalties order is important */
+#define DRV_PERS_ETHERNET 0
+#define DRV_PERS_ISCSI 1
+#define DRV_PERS_FCOE 2
+
+ /* shmem2 struct is constant can't add more personalties here */
+#define MAX_DRV_PERS 3
+ u32 versions[MAX_DRV_PERS];
+};
+
struct ncsi_oem_fcoe_features {
u32 fcoe_features1;
#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
@@ -2217,6 +2234,18 @@ struct shmem2_region {
u32 reserved4; /* Offset 0x150 */
u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
#define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
+
+ u32 reserved5[2];
+ u32 reserved6[PORT_MAX];
+
+ /* driver version for each personality */
+ struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */
+
+ /* Flag to the driver that PF's drv_info_host_addr buffer was read */
+ u32 mfw_drv_indication;
+
+ /* We use indication for each PF (0..3) */
+#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
};
@@ -2848,7 +2877,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8
-#define BCM_5710_FW_REVISION_VERSION 17
+#define BCM_5710_FW_REVISION_VERSION 19
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7d4382286457..a78edaccceee 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -120,7 +120,8 @@ static int debug;
module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, " Default debug msglevel");
-struct workqueue_struct *bnx2x_wq;
+static struct workqueue_struct *bnx2x_wq;
+struct workqueue_struct *bnx2x_iov_wq;
struct bnx2x_mac_vals {
u32 xmac_addr;
@@ -918,7 +919,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
u16 start = 0, end = 0;
u8 cos;
#endif
- if (disable_int)
+ if (IS_PF(bp) && disable_int)
bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED;
@@ -929,33 +930,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
/* Indices */
/* Common */
- BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
- bp->def_idx, bp->def_att_idx, bp->attn_state,
- bp->spq_prod_idx, bp->stats_counter);
- BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
- bp->def_status_blk->atten_status_block.attn_bits,
- bp->def_status_blk->atten_status_block.attn_bits_ack,
- bp->def_status_blk->atten_status_block.status_block_id,
- bp->def_status_blk->atten_status_block.attn_bits_index);
- BNX2X_ERR(" def (");
- for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
- pr_cont("0x%x%s",
- bp->def_status_blk->sp_sb.index_values[i],
- (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
-
- for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
- *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
- i*sizeof(u32));
-
- pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
- sp_sb_data.igu_sb_id,
- sp_sb_data.igu_seg_id,
- sp_sb_data.p_func.pf_id,
- sp_sb_data.p_func.vnic_id,
- sp_sb_data.p_func.vf_id,
- sp_sb_data.p_func.vf_valid,
- sp_sb_data.state);
+ if (IS_PF(bp)) {
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ int data_size, cstorm_offset;
+
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ bp->def_idx, bp->def_att_idx, bp->attn_state,
+ bp->spq_prod_idx, bp->stats_counter);
+ BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
+ def_sb->atten_status_block.attn_bits,
+ def_sb->atten_status_block.attn_bits_ack,
+ def_sb->atten_status_block.status_block_id,
+ def_sb->atten_status_block.attn_bits_index);
+ BNX2X_ERR(" def (");
+ for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+ pr_cont("0x%x%s",
+ def_sb->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+
+ data_size = sizeof(struct hc_sp_status_block_data) /
+ sizeof(u32);
+ cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
+ for (i = 0; i < data_size; i++)
+ *((u32 *)&sp_sb_data + i) =
+ REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
+ i * sizeof(u32));
+
+ pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
+ sp_sb_data.igu_sb_id,
+ sp_sb_data.igu_seg_id,
+ sp_sb_data.p_func.pf_id,
+ sp_sb_data.p_func.vnic_id,
+ sp_sb_data.p_func.vf_id,
+ sp_sb_data.p_func.vf_valid,
+ sp_sb_data.state);
+ }
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -1013,6 +1022,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
pr_cont("0x%x%s",
fp->sb_index_values[j],
(j == loop - 1) ? ")" : " ");
+
+ /* VF cannot access FW refelection for status block */
+ if (IS_VF(bp))
+ continue;
+
/* fw sb data */
data_size = CHIP_IS_E1x(bp) ?
sizeof(struct hc_status_block_data_e1x) :
@@ -1064,16 +1078,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
}
#ifdef BNX2X_STOP_ON_ERROR
-
- /* event queue */
- BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
- for (i = 0; i < NUM_EQ_DESC; i++) {
- u32 *data = (u32 *)&bp->eq_ring[i].message.data;
-
- BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
- i, bp->eq_ring[i].message.opcode,
- bp->eq_ring[i].message.error);
- BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
+ if (IS_PF(bp)) {
+ /* event queue */
+ BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
+ for (i = 0; i < NUM_EQ_DESC; i++) {
+ u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+ BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+ i, bp->eq_ring[i].message.opcode,
+ bp->eq_ring[i].message.error);
+ BNX2X_ERR("data: %x %x %x\n",
+ data[0], data[1], data[2]);
+ }
}
/* Rings */
@@ -1140,8 +1156,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
}
}
#endif
- bnx2x_fw_dump(bp);
- bnx2x_mc_assert(bp);
+ if (IS_PF(bp)) {
+ bnx2x_fw_dump(bp);
+ bnx2x_mc_assert(bp);
+ }
BNX2X_ERR("end crash dump -----------------\n");
}
@@ -1814,6 +1832,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
drv_cmd = BNX2X_Q_CMD_EMPTY;
break;
+ case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
+ DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
+ drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ break;
+
default:
BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
command, fp->index);
@@ -1834,8 +1857,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
#else
return;
#endif
- /* SRIOV: reschedule any 'in_progress' operations */
- bnx2x_iov_sp_event(bp, cid, true);
smp_mb__before_atomic_inc();
atomic_inc(&bp->cq_spq_left);
@@ -3460,10 +3481,15 @@ static void bnx2x_handle_eee_event(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
}
+#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
+#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
+
static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
{
enum drv_info_opcode op_code;
u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+ bool release = false;
+ int wait;
/* if drv_info version supported by MFW doesn't match - send NACK */
if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
@@ -3474,6 +3500,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
DRV_INFO_CONTROL_OP_CODE_SHIFT;
+ /* Must prevent other flows from accessing drv_info_to_mcp */
+ mutex_lock(&bp->drv_info_mutex);
+
memset(&bp->slowpath->drv_info_to_mcp, 0,
sizeof(union drv_info_to_mcp));
@@ -3490,7 +3519,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
default:
/* if op code isn't supported - send NACK */
bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
- return;
+ goto out;
}
/* if we got drv_info attn from MFW then these fields are defined in
@@ -3502,6 +3531,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+
+ /* Since possible management wants both this and get_driver_version
+ * need to wait until management notifies us it finished utilizing
+ * the buffer.
+ */
+ if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
+ DP(BNX2X_MSG_MCP, "Management does not support indication\n");
+ } else if (!bp->drv_info_mng_owner) {
+ u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
+
+ for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
+ u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
+
+ /* Management is done; need to clear indication */
+ if (indication & bit) {
+ SHMEM2_WR(bp, mfw_drv_indication,
+ indication & ~bit);
+ release = true;
+ break;
+ }
+
+ msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
+ }
+ }
+ if (!release) {
+ DP(BNX2X_MSG_MCP, "Management did not release indication\n");
+ bp->drv_info_mng_owner = true;
+ }
+
+out:
+ mutex_unlock(&bp->drv_info_mutex);
+}
+
+static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
+{
+ u8 vals[4];
+ int i = 0;
+
+ if (bnx2x_format) {
+ i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
+ &vals[0], &vals[1], &vals[2], &vals[3]);
+ if (i > 0)
+ vals[0] -= '0';
+ } else {
+ i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
+ &vals[0], &vals[1], &vals[2], &vals[3]);
+ }
+
+ while (i < 4)
+ vals[i++] = 0;
+
+ return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
+}
+
+void bnx2x_update_mng_version(struct bnx2x *bp)
+{
+ u32 iscsiver = DRV_VER_NOT_LOADED;
+ u32 fcoever = DRV_VER_NOT_LOADED;
+ u32 ethver = DRV_VER_NOT_LOADED;
+ int idx = BP_FW_MB_IDX(bp);
+ u8 *version;
+
+ if (!SHMEM2_HAS(bp, func_os_drv_ver))
+ return;
+
+ mutex_lock(&bp->drv_info_mutex);
+ /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
+ if (bp->drv_info_mng_owner)
+ goto out;
+
+ if (bp->state != BNX2X_STATE_OPEN)
+ goto out;
+
+ /* Parse ethernet driver version */
+ ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+ if (!CNIC_LOADED(bp))
+ goto out;
+
+ /* Try getting storage driver version via cnic */
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+ bnx2x_drv_info_iscsi_stat(bp);
+ version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
+ iscsiver = bnx2x_update_mng_version_utility(version, false);
+
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+ bnx2x_drv_info_fcoe_stat(bp);
+ version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
+ fcoever = bnx2x_update_mng_version_utility(version, false);
+
+out:
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
+
+ mutex_unlock(&bp->drv_info_mutex);
+
+ DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
+ ethver, iscsiver, fcoever);
}
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
@@ -3644,10 +3773,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
HW_CID(bp, cid));
- type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
-
- type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
- SPE_HDR_FUNCTION_ID);
+ /* In some cases, type may already contain the func-id
+ * mainly in SRIOV related use cases, so we add it here only
+ * if it's not already set.
+ */
+ if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
+ type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
+ SPE_HDR_CONN_TYPE;
+ type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+ } else {
+ type = cmd_type;
+ }
spe->hdr.type = cpu_to_le16(type);
@@ -3878,10 +4015,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
* This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails.
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
}
static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -4025,7 +4159,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
bnx2x_handle_drv_info_req(bp);
if (val & DRV_STATUS_VF_DISABLED)
- bnx2x_vf_handle_flr_event(bp);
+ bnx2x_schedule_iov_task(bp,
+ BNX2X_IOV_HANDLE_FLR);
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@@ -5216,14 +5351,14 @@ static void bnx2x_eq_int(struct bnx2x *bp)
/* handle eq element */
switch (opcode) {
case EVENT_RING_OPCODE_VF_PF_CHANNEL:
- DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
- bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
+ bnx2x_vf_mbx_schedule(bp,
+ &elem->message.data.vf_pf_event);
continue;
case EVENT_RING_OPCODE_STAT_QUERY:
- DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
- "got statistics comp event %d\n",
- bp->stats_comp++);
+ DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
+ "got statistics comp event %d\n",
+ bp->stats_comp++);
/* nothing to do with stats comp */
goto next_spqe;
@@ -5273,6 +5408,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
break;
} else {
+ int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
+
DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
"AFEX: ramrod completed FUNCTION_UPDATE\n");
f_obj->complete_cmd(bp, f_obj,
@@ -5282,12 +5419,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
* sp_rtnl task as all Queue SP operations
* should run under rtnl_lock.
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
-
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, cmd, 0);
}
goto next_spqe;
@@ -5435,13 +5567,6 @@ static void bnx2x_sp_task(struct work_struct *work)
le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
}
- /* must be called after the EQ processing (since eq leads to sriov
- * ramrod completion flows).
- * This flow may have been scheduled by the arrival of a ramrod
- * completion, or by the sriov code rescheduling itself.
- */
- bnx2x_iov_sp_task(bp);
-
/* afex - poll to check if VIFSET_ACK should be sent to MFW */
if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
&bp->sp_state)) {
@@ -6005,18 +6130,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
{
int i;
- if (IS_MF_SI(bp))
- /*
- * In switch independent mode, the TSTORM needs to accept
- * packets that failed classification, since approximate match
- * mac addresses aren't written to NIG LLH
- */
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
- else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
-
/* Zero this manually as its initialization is
currently missing in the initTool */
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -7989,19 +8102,25 @@ void bnx2x_free_mem(struct bnx2x *bp)
int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
{
- if (!CHIP_IS_E1x(bp))
+ if (!CHIP_IS_E1x(bp)) {
/* size = the status block + ramrod buffers */
- BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
- &bp->cnic_sb_mapping,
- sizeof(struct
- host_hc_status_block_e1x));
+ bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ if (!bp->cnic_sb.e2_sb)
+ goto alloc_mem_err;
+ } else {
+ bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+ if (!bp->cnic_sb.e1x_sb)
+ goto alloc_mem_err;
+ }
- if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
+ if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
/* allocate searcher T2 table, as it wasn't allocated before */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+ bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+ if (!bp->t2)
+ goto alloc_mem_err;
+ }
/* write address to which L5 should insert its values */
bp->cnic_eth_dev.addr_drv_info_to_mcp =
@@ -8022,15 +8141,22 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
{
int i, allocated, context_size;
- if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
+ if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
/* allocate searcher T2 table */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+ bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+ if (!bp->t2)
+ goto alloc_mem_err;
+ }
- BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
- sizeof(struct host_sp_status_block));
+ bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+ if (!bp->def_status_blk)
+ goto alloc_mem_err;
- BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
- sizeof(struct bnx2x_slowpath));
+ bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+ if (!bp->slowpath)
+ goto alloc_mem_err;
/* Allocate memory for CDU context:
* This memory is allocated separately and not in the generic ILT
@@ -8050,12 +8176,16 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
for (i = 0, allocated = 0; allocated < context_size; i++) {
bp->context[i].size = min(CDU_ILT_PAGE_SZ,
(context_size - allocated));
- BNX2X_PCI_ALLOC(bp->context[i].vcxt,
- &bp->context[i].cxt_mapping,
- bp->context[i].size);
+ bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
+ bp->context[i].size);
+ if (!bp->context[i].vcxt)
+ goto alloc_mem_err;
allocated += bp->context[i].size;
}
- BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
+ bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
+ GFP_KERNEL);
+ if (!bp->ilt->lines)
+ goto alloc_mem_err;
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
goto alloc_mem_err;
@@ -8064,11 +8194,15 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
goto alloc_mem_err;
/* Slow path ring */
- BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+ bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
+ if (!bp->spq)
+ goto alloc_mem_err;
/* EQ */
- BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
- BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ if (!bp->eq_ring)
+ goto alloc_mem_err;
return 0;
@@ -8849,6 +8983,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
synchronize_irq(bp->pdev->irq);
flush_workqueue(bnx2x_wq);
+ flush_workqueue(bnx2x_iov_wq);
while (bnx2x_func_get_state(bp, &bp->func_obj) !=
BNX2X_F_STATE_STARTED && tout--)
@@ -9774,6 +9909,10 @@ sp_rtnl_not_reset:
bnx2x_dcbx_resume_hw_tx(bp);
}
+ if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
+ &bp->sp_rtnl_state))
+ bnx2x_update_mng_version(bp);
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -11724,12 +11863,15 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
+ mutex_init(&bp->drv_info_mutex);
+ bp->drv_info_mng_owner = false;
spin_lock_init(&bp->stats_lock);
sema_init(&bp->stats_sema, 1);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
+ INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
if (IS_PF(bp)) {
rc = bnx2x_get_hwinfo(bp);
if (rc)
@@ -11771,6 +11913,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->disable_tpa = disable_tpa;
bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
+ /* Reduce memory usage in kdump environment by disabling TPA */
+ bp->disable_tpa |= reset_devices;
/* Set TPA flags */
if (bp->disable_tpa) {
@@ -11942,7 +12086,7 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
{
int mc_count = netdev_mc_count(bp->dev);
struct bnx2x_mcast_list_elem *mc_mac =
- kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
+ kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
struct netdev_hw_addr *ha;
if (!mc_mac)
@@ -12064,11 +12208,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
return;
} else {
/* Schedule an SP task to handle rest of change */
- DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
+ NETIF_MSG_IFUP);
}
}
@@ -12101,11 +12242,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
/* configuring mcast to a vf involves sleeping (when we
* wait for the pf's response).
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp,
+ BNX2X_SP_RTNL_VFPF_MCAST, 0);
}
}
@@ -13356,11 +13494,18 @@ static int __init bnx2x_init(void)
pr_err("Cannot create workqueue\n");
return -ENOMEM;
}
+ bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
+ if (!bnx2x_iov_wq) {
+ pr_err("Cannot create iov workqueue\n");
+ destroy_workqueue(bnx2x_wq);
+ return -ENOMEM;
+ }
ret = pci_register_driver(&bnx2x_pci_driver);
if (ret) {
pr_err("Cannot register driver\n");
destroy_workqueue(bnx2x_wq);
+ destroy_workqueue(bnx2x_iov_wq);
}
return ret;
}
@@ -13372,6 +13517,7 @@ static void __exit bnx2x_cleanup(void)
pci_unregister_driver(&bnx2x_pci_driver);
destroy_workqueue(bnx2x_wq);
+ destroy_workqueue(bnx2x_iov_wq);
/* Free globally allocated resources */
list_for_each_safe(pos, q, &bnx2x_prev_list) {
@@ -13765,6 +13911,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
REG_WR(bp, scratch_offset + i,
*(host_addr + i/4));
}
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
break;
}
@@ -13782,6 +13929,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
}
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
break;
}
@@ -13887,6 +14035,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
rcu_assign_pointer(bp->cnic_ops, ops);
+ /* Schedule driver to read CNIC driver versions */
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 0fb6ff2ac8e3..31297266b743 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->header.rule_cnt, p->rx_accept_flags,
p->tx_accept_flags);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
raw->clear_pending(raw);
return 0;
} else {
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
raw->clear_pending(raw);
return 0;
} else {
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
}
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -4158,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
rss_obj->config_rss = bnx2x_setup_rss;
}
-int validate_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac)
-{
- if (!vlan_mac->get_n_elements) {
- BNX2X_ERR("vlan mac object was not intialized\n");
- return -EINVAL;
- }
- return 0;
-}
-
/********************** Queue state object ***********************************/
/**
@@ -4587,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4615,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
bnx2x_q_fill_setup_data_e2(bp, params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4659,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4760,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_update_data(bp, o, update_params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
o->cids[cid_index], U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4813,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
return bnx2x_q_send_update(bp, params);
}
+static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_update_tpa_params *params,
+ struct tpa_update_ramrod_data *data)
+{
+ data->client_id = obj->cl_id;
+ data->complete_on_both_clients = params->complete_on_both_clients;
+ data->dont_verify_rings_pause_thr_flg =
+ params->dont_verify_thr;
+ data->max_agg_size = cpu_to_le16(params->max_agg_sz);
+ data->max_sges_for_packet = params->max_sges_pkt;
+ data->max_tpa_queues = params->max_tpa_queues;
+ data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
+ data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
+ data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
+ data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
+ data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
+ data->tpa_mode = params->tpa_mode;
+ data->update_ipv4 = params->update_ipv4;
+ data->update_ipv6 = params->update_ipv6;
+}
+
static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
struct bnx2x_queue_state_params *params)
{
- /* TODO: Not implemented yet. */
- return -1;
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct tpa_update_ramrod_data *rdata =
+ (struct tpa_update_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_queue_update_tpa_params *update_tpa_params =
+ &params->params.update_tpa;
+ u16 type;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
+
+ /* Add the function id inside the type, so that sp post function
+ * doesn't automatically add the PF func-id, this is required
+ * for operations done by PFs on behalf of their VFs
+ */
+ type = ETH_CONNECTION_TYPE |
+ ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
+ o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), type);
}
static inline int bnx2x_q_send_halt(struct bnx2x *bp,
@@ -5647,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
rdata->tx_switch_suspend = switch_update_params->suspend;
rdata->echo = SWITCH_UPDATE;
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5674,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
rdata->allowed_priorities = afex_update_params->allowed_priorities;
rdata->echo = AFEX_UPDATE;
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
DP(BNX2X_MSG_SP,
"afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
@@ -5763,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
rdata->traffic_type_to_priority_cos[i] =
tx_start_params->traffic_type_to_priority_cos[i];
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 00d7f214a40a..80f6c790ed88 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
u8 cid_index;
};
+struct bnx2x_queue_update_tpa_params {
+ dma_addr_t sge_map;
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_pkt;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u8 _pad;
+
+ u16 sge_buff_sz;
+ u16 max_agg_sz;
+
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+};
+
struct rxq_pause_params {
u16 bd_th_lo;
u16 bd_th_hi;
@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
/* Params according to the current command */
union {
struct bnx2x_queue_update_params update;
+ struct bnx2x_queue_update_tpa_params update_tpa;
struct bnx2x_queue_setup_params setup;
struct bnx2x_queue_init_params init;
struct bnx2x_queue_setup_tx_only_params tx_only;
@@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table);
-int validate_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac);
#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e42f48df6e94..5c523b32db70 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -102,82 +102,22 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
mmiowb();
barrier();
}
-/* VFOP - VF slow-path operation support */
-#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
+static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ bool print_err)
+{
+ if (!bnx2x_leading_vfq(vf, sp_initialized)) {
+ if (print_err)
+ BNX2X_ERR("Slowpath objects not yet initialized!\n");
+ else
+ DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
+ return false;
+ }
+ return true;
+}
/* VFOP operations states */
-enum bnx2x_vfop_qctor_state {
- BNX2X_VFOP_QCTOR_INIT,
- BNX2X_VFOP_QCTOR_SETUP,
- BNX2X_VFOP_QCTOR_INT_EN
-};
-
-enum bnx2x_vfop_qdtor_state {
- BNX2X_VFOP_QDTOR_HALT,
- BNX2X_VFOP_QDTOR_TERMINATE,
- BNX2X_VFOP_QDTOR_CFCDEL,
- BNX2X_VFOP_QDTOR_DONE
-};
-
-enum bnx2x_vfop_vlan_mac_state {
- BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
- BNX2X_VFOP_VLAN_MAC_CLEAR,
- BNX2X_VFOP_VLAN_MAC_CHK_DONE,
- BNX2X_VFOP_MAC_CONFIG_LIST,
- BNX2X_VFOP_VLAN_CONFIG_LIST,
- BNX2X_VFOP_VLAN_CONFIG_LIST_0
-};
-
-enum bnx2x_vfop_qsetup_state {
- BNX2X_VFOP_QSETUP_CTOR,
- BNX2X_VFOP_QSETUP_VLAN0,
- BNX2X_VFOP_QSETUP_DONE
-};
-
-enum bnx2x_vfop_mcast_state {
- BNX2X_VFOP_MCAST_DEL,
- BNX2X_VFOP_MCAST_ADD,
- BNX2X_VFOP_MCAST_CHK_DONE
-};
-enum bnx2x_vfop_qflr_state {
- BNX2X_VFOP_QFLR_CLR_VLAN,
- BNX2X_VFOP_QFLR_CLR_MAC,
- BNX2X_VFOP_QFLR_TERMINATE,
- BNX2X_VFOP_QFLR_DONE
-};
-
-enum bnx2x_vfop_flr_state {
- BNX2X_VFOP_FLR_QUEUES,
- BNX2X_VFOP_FLR_HW
-};
-
-enum bnx2x_vfop_close_state {
- BNX2X_VFOP_CLOSE_QUEUES,
- BNX2X_VFOP_CLOSE_HW
-};
-
-enum bnx2x_vfop_rxmode_state {
- BNX2X_VFOP_RXMODE_CONFIG,
- BNX2X_VFOP_RXMODE_DONE
-};
-
-enum bnx2x_vfop_qteardown_state {
- BNX2X_VFOP_QTEARDOWN_RXMODE,
- BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
- BNX2X_VFOP_QTEARDOWN_CLR_MAC,
- BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
- BNX2X_VFOP_QTEARDOWN_QDTOR,
- BNX2X_VFOP_QTEARDOWN_DONE
-};
-
-enum bnx2x_vfop_rss_state {
- BNX2X_VFOP_RSS_CONFIG,
- BNX2X_VFOP_RSS_DONE
-};
-
-#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
-
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
struct bnx2x_queue_setup_params *setup_params,
@@ -221,7 +161,7 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q,
- struct bnx2x_vfop_qctor_params *p,
+ struct bnx2x_vf_queue_construct_params *p,
unsigned long q_type)
{
struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
@@ -290,191 +230,85 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
}
}
-/* VFOP queue construction */
-static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_queue_create(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
- struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
- enum bnx2x_vfop_qctor_state state = vfop->state;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_QCTOR_INIT:
-
- /* has this queue already been opened? */
- if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
- BNX2X_Q_LOGICAL_STATE_ACTIVE) {
- DP(BNX2X_MSG_IOV,
- "Entered qctor but queue was already up. Aborting gracefully\n");
- goto op_done;
- }
-
- /* next state */
- vfop->state = BNX2X_VFOP_QCTOR_SETUP;
-
- q_params->cmd = BNX2X_Q_CMD_INIT;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_QCTOR_SETUP:
- /* next state */
- vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
-
- /* copy pre-prepared setup params to the queue-state params */
- vfop->op_p->qctor.qstate.params.setup =
- vfop->op_p->qctor.prep_qsetup;
-
- q_params->cmd = BNX2X_Q_CMD_SETUP;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
+ struct bnx2x_queue_state_params *q_params;
+ int rc = 0;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
- case BNX2X_VFOP_QCTOR_INT_EN:
+ /* Prepare ramrod information */
+ q_params = &qctor->qstate;
+ q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
- /* enable interrupts */
- bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
- USTORM_ID, 0, IGU_INT_ENABLE, 0);
- goto op_done;
- default:
- bnx2x_vfop_default(state);
+ if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
+ goto out;
}
-op_err:
- BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
- vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
-op_done:
- bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
- return;
-}
-static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ /* Run Queue 'construction' ramrods */
+ q_params->cmd = BNX2X_Q_CMD_INIT;
+ rc = bnx2x_queue_state_change(bp, q_params);
+ if (rc)
+ goto out;
- vfop->args.qctor.qid = qid;
- vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
+ memcpy(&q_params->params.setup, &qctor->prep_qsetup,
+ sizeof(struct bnx2x_queue_setup_params));
+ q_params->cmd = BNX2X_Q_CMD_SETUP;
+ rc = bnx2x_queue_state_change(bp, q_params);
+ if (rc)
+ goto out;
- bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
- bnx2x_vfop_qctor, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
- cmd->block);
- }
- return -ENOMEM;
+ /* enable interrupts */
+ bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
+ USTORM_ID, 0, IGU_INT_ENABLE, 0);
+out:
+ return rc;
}
-/* VFOP queue destruction */
-static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
- struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
- enum bnx2x_vfop_qdtor_state state = vfop->state;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_QDTOR_HALT:
-
- /* has this queue already been stopped? */
- if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
- BNX2X_Q_LOGICAL_STATE_STOPPED) {
- DP(BNX2X_MSG_IOV,
- "Entered qdtor but queue was already stopped. Aborting gracefully\n");
-
- /* next state */
- vfop->state = BNX2X_VFOP_QDTOR_DONE;
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
- }
-
- /* next state */
- vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
-
- q_params->cmd = BNX2X_Q_CMD_HALT;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_QDTOR_TERMINATE:
- /* next state */
- vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
-
- q_params->cmd = BNX2X_Q_CMD_TERMINATE;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
+ enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
+ BNX2X_Q_CMD_TERMINATE,
+ BNX2X_Q_CMD_CFC_DEL};
+ struct bnx2x_queue_state_params q_params;
+ int rc, i;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- case BNX2X_VFOP_QDTOR_CFCDEL:
- /* next state */
- vfop->state = BNX2X_VFOP_QDTOR_DONE;
+ /* Prepare ramrod information */
+ memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
+ q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
- q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
+ if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED) {
+ DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
+ goto out;
+ }
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-op_err:
- BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
- vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
-op_done:
- case BNX2X_VFOP_QDTOR_DONE:
- /* invalidate the context */
- if (qdtor->cxt) {
- qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
- qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ /* Run Queue 'destruction' ramrods */
+ for (i = 0; i < ARRAY_SIZE(cmds); i++) {
+ q_params.cmd = cmds[i];
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
+ return rc;
}
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
}
-op_pending:
- return;
-}
-
-static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- struct bnx2x_queue_state_params *qstate =
- &vf->op_params.qctor.qstate;
-
- memset(qstate, 0, sizeof(*qstate));
- qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
-
- vfop->args.qdtor.qid = qid;
- vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
-
- bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
- bnx2x_vfop_qdtor, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
- cmd->block);
- } else {
- BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
- return -ENOMEM;
+out:
+ /* Clean Context */
+ if (bnx2x_vfq(vf, qid, cxt)) {
+ bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
+ bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
}
+
+ return 0;
}
static void
@@ -496,751 +330,293 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
BP_VFDB(bp)->vf_sbs_pool++;
}
-/* VFOP MAC/VLAN helpers */
-static inline void bnx2x_vfop_credit(struct bnx2x *bp,
- struct bnx2x_vfop *vfop,
- struct bnx2x_vlan_mac_obj *obj)
+static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *obj,
+ atomic_t *counter)
{
- struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
-
- /* update credit only if there is no error
- * and a valid credit counter
- */
- if (!vfop->rc && args->credit) {
- struct list_head *pos;
- int read_lock;
- int cnt = 0;
-
- read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
- if (read_lock)
- DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
+ struct list_head *pos;
+ int read_lock;
+ int cnt = 0;
- list_for_each(pos, &obj->head)
- cnt++;
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
+ if (read_lock)
+ DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
- if (!read_lock)
- bnx2x_vlan_mac_h_read_unlock(bp, obj);
+ list_for_each(pos, &obj->head)
+ cnt++;
- atomic_set(args->credit, cnt);
- }
-}
-
-static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
- struct bnx2x_vfop_filter *pos,
- struct bnx2x_vlan_mac_data *user_req)
-{
- user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
- BNX2X_VLAN_MAC_DEL;
+ if (!read_lock)
+ bnx2x_vlan_mac_h_read_unlock(bp, obj);
- switch (pos->type) {
- case BNX2X_VFOP_FILTER_MAC:
- memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
- break;
- case BNX2X_VFOP_FILTER_VLAN:
- user_req->u.vlan.vlan = pos->vid;
- break;
- default:
- BNX2X_ERR("Invalid filter type, skipping\n");
- return 1;
- }
- return 0;
+ atomic_set(counter, cnt);
}
-static int bnx2x_vfop_config_list(struct bnx2x *bp,
- struct bnx2x_vfop_filters *filters,
- struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
+static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, bool drv_only, bool mac)
{
- struct bnx2x_vfop_filter *pos, *tmp;
- struct list_head rollback_list, *filters_list = &filters->head;
- struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
- int rc = 0, cnt = 0;
-
- INIT_LIST_HEAD(&rollback_list);
-
- list_for_each_entry_safe(pos, tmp, filters_list, link) {
- if (bnx2x_vfop_set_user_req(bp, pos, user_req))
- continue;
+ struct bnx2x_vlan_mac_ramrod_params ramrod;
+ int rc;
- rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- if (rc >= 0) {
- cnt += pos->add ? 1 : -1;
- list_move(&pos->link, &rollback_list);
- rc = 0;
- } else if (rc == -EEXIST) {
- rc = 0;
- } else {
- BNX2X_ERR("Failed to add a new vlan_mac command\n");
- break;
- }
- }
+ DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
+ mac ? "MACs" : "VLANs");
- /* rollback if error or too many rules added */
- if (rc || cnt > filters->add_cnt) {
- BNX2X_ERR("error or too many rules added. Performing rollback\n");
- list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
- pos->add = !pos->add; /* reverse op */
- bnx2x_vfop_set_user_req(bp, pos, user_req);
- bnx2x_config_vlan_mac(bp, vlan_mac);
- list_del(&pos->link);
- }
- cnt = 0;
- if (!rc)
- rc = -EINVAL;
+ /* Prepare ramrod params */
+ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+ if (mac) {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+ } else {
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
}
- filters->add_cnt = cnt;
- return rc;
-}
-
-/* VFOP set VLAN/MAC */
-static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
- struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
- struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
-
- enum bnx2x_vfop_vlan_mac_state state = vfop->state;
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- bnx2x_vfop_reset_wq(vf);
-
- switch (state) {
- case BNX2X_VFOP_VLAN_MAC_CLEAR:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
- /* do delete */
- vfop->rc = obj->delete_all(bp, obj,
- &vlan_mac->user_req.vlan_mac_flags,
- &vlan_mac->ramrod_flags);
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
- /* do config */
- vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- if (vfop->rc == -EEXIST)
- vfop->rc = 0;
+ ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
- vfop->rc = !!obj->raw.check_pending(&obj->raw);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-
- case BNX2X_VFOP_MAC_CONFIG_LIST:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
- /* do list config */
- vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
- if (vfop->rc)
- goto op_err;
-
- set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
- vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_VLAN_CONFIG_LIST:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
- /* do list config */
- vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
- if (!vfop->rc) {
- set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
- vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- }
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
- default:
- bnx2x_vfop_default(state);
+ /* Start deleting */
+ rc = ramrod.vlan_mac_obj->delete_all(bp,
+ ramrod.vlan_mac_obj,
+ &ramrod.user_req.vlan_mac_flags,
+ &ramrod.ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("Failed to delete all %s\n",
+ mac ? "MACs" : "VLANs");
+ return rc;
}
-op_err:
- BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
-op_done:
- kfree(filters);
- bnx2x_vfop_credit(bp, vfop, obj);
- bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
- return;
-}
-
-struct bnx2x_vfop_vlan_mac_flags {
- bool drv_only;
- bool dont_consume;
- bool single_cmd;
- bool add;
-};
-
-static void
-bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
- struct bnx2x_vfop_vlan_mac_flags *flags)
-{
- struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
-
- memset(ramrod, 0, sizeof(*ramrod));
- /* ramrod flags */
- if (flags->drv_only)
- set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
- if (flags->single_cmd)
- set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
+ /* Clear the vlan counters */
+ if (!mac)
+ atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
- /* mac_vlan flags */
- if (flags->dont_consume)
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
-
- /* cmd */
- ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
-}
-
-static inline void
-bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
- struct bnx2x_vfop_vlan_mac_flags *flags)
-{
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
- set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
+ return 0;
}
-static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, bool drv_only)
+static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_mac_vlan_filter *filter,
+ bool drv_only)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ struct bnx2x_vlan_mac_ramrod_params ramrod;
int rc;
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = NULL, /* single */
- .credit = NULL, /* consume credit */
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = drv_only,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = true,
- .add = false /* don't care */,
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
-
- /* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
- if (rc)
- return rc;
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
-
- /* set extra args */
- vfop->args.filters = filters;
-
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
+ DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
+ vf->abs_vfid, filter->add ? "Adding" : "Deleting",
+ filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+
+ /* Prepare ramrod params */
+ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+ if (filter->type == BNX2X_VF_FILTER_VLAN) {
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+ ramrod.user_req.u.vlan.vlan = filter->vid;
+ } else {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+ memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+ }
+ ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
+ BNX2X_VLAN_MAC_DEL;
+
+ /* Verify there are available vlan credits */
+ if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
+ (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
+ vf_vlan_rules_cnt(vf))) {
+ BNX2X_ERR("No credits for vlan\n");
+ return -ENOMEM;
}
- return -ENOMEM;
-}
-
-int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- struct bnx2x_vfop_filters *macs,
- int qid, bool drv_only)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = macs,
- .credit = NULL, /* consume credit */
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = drv_only,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = false,
- .add = false, /* don't care since only the items in the
- * filters list affect the sp operation,
- * not the list itself
- */
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
-
- /* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
- if (rc)
- return rc;
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+ set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+
+ /* Add/Remove the filter */
+ rc = bnx2x_config_vlan_mac(bp, &ramrod);
+ if (rc && rc != -EEXIST) {
+ BNX2X_ERR("Failed to %s %s\n",
+ filter->add ? "add" : "delete",
+ filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
+ "VLAN");
+ return rc;
+ }
- /* set extra args */
- filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
- vfop->args.filters = filters;
+ /* Update the vlan counters */
+ if (filter->type == BNX2X_VF_FILTER_VLAN)
+ bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
+ &bnx2x_vfq(vf, qid, vlan_count));
- bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
- }
- return -ENOMEM;
+ return 0;
}
-static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, u16 vid, bool add)
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mac_vlan_filters *filters,
+ int qid, bool drv_only)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
+ int rc = 0, i;
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = NULL, /* single command */
- .credit = &bnx2x_vfq(vf, qid, vlan_count),
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = false,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = true,
- .add = add,
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
- ramrod->user_req.u.vlan.vlan = vid;
-
- /* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
- if (rc)
- return rc;
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- /* set extra args */
- vfop->args.filters = filters;
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
+ /* Prepare ramrod params */
+ for (i = 0; i < filters->count; i++) {
+ rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
+ &filters->filters[i], drv_only);
+ if (rc)
+ break;
}
- return -ENOMEM;
-}
-
-static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, bool drv_only)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = NULL, /* single command */
- .credit = &bnx2x_vfq(vf, qid, vlan_count),
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = drv_only,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = true,
- .add = false, /* don't care */
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
-
- /* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
- if (rc)
- return rc;
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+ /* Rollback if needed */
+ if (i != filters->count) {
+ BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
+ i, filters->count + 1);
+ while (--i >= 0) {
+ filters->filters[i].add = !filters->filters[i].add;
+ bnx2x_vf_mac_vlan_config(bp, vf, qid,
+ &filters->filters[i],
+ drv_only);
+ }
+ }
- /* set extra args */
- vfop->args.filters = filters;
+ /* It's our responsibility to free the filters */
+ kfree(filters);
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
- }
- return -ENOMEM;
+ return rc;
}
-int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- struct bnx2x_vfop_filters *vlans,
- int qid, bool drv_only)
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = vlans,
- .credit = &bnx2x_vfq(vf, qid, vlan_count),
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = drv_only,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = false,
- .add = false, /* don't care */
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
-
- /* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
- if (rc)
- return rc;
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
-
- /* set extra args */
- filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
- atomic_read(filters.credit);
-
- vfop->args.filters = filters;
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
- }
- return -ENOMEM;
-}
-
-/* VFOP queue setup (queue constructor + set vlan 0) */
-static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- int qid = vfop->args.qctor.qid;
- enum bnx2x_vfop_qsetup_state state = vfop->state;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_qsetup,
- .block = false,
- };
-
- if (vfop->rc < 0)
+ rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
+ if (rc)
goto op_err;
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+ /* Configure vlan0 for leading queue */
+ if (!qid) {
+ struct bnx2x_vf_mac_vlan_filter filter;
- switch (state) {
- case BNX2X_VFOP_QSETUP_CTOR:
- /* init the queue ctor command */
- vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
- vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
- if (vfop->rc)
+ memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
+ filter.type = BNX2X_VF_FILTER_VLAN;
+ filter.add = true;
+ filter.vid = 0;
+ rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
+ if (rc)
goto op_err;
- return;
-
- case BNX2X_VFOP_QSETUP_VLAN0:
- /* skip if non-leading or FPGA/EMU*/
- if (qid)
- goto op_done;
+ }
- /* init the queue set-vlan command (for vlan 0) */
- vfop->state = BNX2X_VFOP_QSETUP_DONE;
- vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
- if (vfop->rc)
- goto op_err;
- return;
+ /* Schedule the configuration of any pending vlan filters */
+ vf->cfg_flags |= VF_CFG_VLAN;
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_MSG_IOV);
+ return 0;
op_err:
- BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
-op_done:
- case BNX2X_VFOP_QSETUP_DONE:
- vf->cfg_flags |= VF_CFG_VLAN;
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
- }
+ BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+ return rc;
}
-int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
+static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
- if (vfop) {
- vfop->args.qctor.qid = qid;
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
- bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
- bnx2x_vfop_qsetup, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
- cmd->block);
+ /* If needed, clean the filtering data base */
+ if ((qid == LEADING_IDX) &&
+ bnx2x_validate_vf_sp_objs(bp, vf, false)) {
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+ if (rc)
+ goto op_err;
}
- return -ENOMEM;
-}
-
-/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
-static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- int qid = vfop->args.qx.qid;
- enum bnx2x_vfop_qflr_state state = vfop->state;
- struct bnx2x_queue_state_params *qstate;
- struct bnx2x_vfop_cmd cmd;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
- DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
+ /* Terminate queue */
+ if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
+ struct bnx2x_queue_state_params qstate;
- cmd.done = bnx2x_vfop_qflr;
- cmd.block = false;
-
- switch (state) {
- case BNX2X_VFOP_QFLR_CLR_VLAN:
- /* vlan-clear-all: driver-only, don't consume credit */
- vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
-
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) {
- /* the vlan_mac vfop will re-schedule us */
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd,
- qid, true);
- if (vfop->rc)
- goto op_err;
- return;
-
- } else {
- /* need to reschedule ourselves */
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
- }
-
- case BNX2X_VFOP_QFLR_CLR_MAC:
- /* mac-clear-all: driver only consume credit */
- vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) {
- /* the vlan_mac vfop will re-schedule us */
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd,
- qid, true);
- if (vfop->rc)
- goto op_err;
- return;
-
- } else {
- /* need to reschedule ourselves */
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
- }
-
- case BNX2X_VFOP_QFLR_TERMINATE:
- qstate = &vfop->op_p->qctor.qstate;
- memset(qstate , 0, sizeof(*qstate));
- qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
- vfop->state = BNX2X_VFOP_QFLR_DONE;
-
- DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
- vf->abs_vfid, qstate->q_obj->state);
-
- if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
- qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
- qstate->cmd = BNX2X_Q_CMD_TERMINATE;
- vfop->rc = bnx2x_queue_state_change(bp, qstate);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
- } else {
- goto op_done;
- }
-
-op_err:
- BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
- vf->abs_vfid, qid, vfop->rc);
-op_done:
- case BNX2X_VFOP_QFLR_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
+ memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+ qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
+ qstate.cmd = BNX2X_Q_CMD_TERMINATE;
+ set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
+ rc = bnx2x_queue_state_change(bp, &qstate);
+ if (rc)
+ goto op_err;
}
-op_pending:
- return;
-}
-
-static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- vfop->args.qx.qid = qid;
- bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
- bnx2x_vfop_qflr, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
- cmd->block);
- }
- return -ENOMEM;
+ return 0;
+op_err:
+ BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+ return rc;
}
-/* VFOP multi-casts */
-static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
- struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
- struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
- enum bnx2x_vfop_mcast_state state = vfop->state;
- int i;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
+ struct bnx2x_mcast_list_elem *mc = NULL;
+ struct bnx2x_mcast_ramrod_params mcast;
+ int rc, i;
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_MCAST_DEL:
- /* clear existing mcasts */
- vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
- : BNX2X_VFOP_MCAST_CHK_DONE;
- mcast->mcast_list_len = vf->mcast_list_len;
- vf->mcast_list_len = args->mc_num;
- vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_MCAST_ADD:
- if (raw->check_pending(raw))
- goto op_pending;
-
- /* update mcast list on the ramrod params */
- INIT_LIST_HEAD(&mcast->mcast_list);
- for (i = 0; i < args->mc_num; i++)
- list_add_tail(&(args->mc[i].link),
- &mcast->mcast_list);
- mcast->mcast_list_len = args->mc_num;
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- /* add new mcasts */
- vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
- vfop->rc = bnx2x_config_mcast(bp, mcast,
- BNX2X_MCAST_CMD_ADD);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-
- case BNX2X_VFOP_MCAST_CHK_DONE:
- vfop->rc = raw->check_pending(raw) ? 1 : 0;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
- default:
- bnx2x_vfop_default(state);
+ /* Prepare Multicast command */
+ memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
+ mcast.mcast_obj = &vf->mcast_obj;
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
+ if (mc_num) {
+ mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
+ GFP_KERNEL);
+ if (!mc) {
+ BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
+ return -ENOMEM;
+ }
}
-op_err:
- BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
-op_done:
- kfree(args->mc);
- bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
- return;
-}
-int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- bnx2x_mac_addr_t *mcasts,
- int mcast_num, bool drv_only)
-{
- struct bnx2x_vfop *vfop = NULL;
- size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
- struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
- NULL;
-
- if (!mc_sz || mc) {
- vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- int i;
- struct bnx2x_mcast_ramrod_params *ramrod =
- &vf->op_params.mcast;
-
- /* set ramrod params */
- memset(ramrod, 0, sizeof(*ramrod));
- ramrod->mcast_obj = &vf->mcast_obj;
- if (drv_only)
- set_bit(RAMROD_DRV_CLR_ONLY,
- &ramrod->ramrod_flags);
-
- /* copy mcasts pointers */
- vfop->args.mc_list.mc_num = mcast_num;
- vfop->args.mc_list.mc = mc;
- for (i = 0; i < mcast_num; i++)
- mc[i].mac = mcasts[i];
-
- bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
- bnx2x_vfop_mcast, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
- cmd->block);
- } else {
+ /* clear existing mcasts */
+ mcast.mcast_list_len = vf->mcast_list_len;
+ vf->mcast_list_len = mc_num;
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
+ if (rc) {
+ BNX2X_ERR("Failed to remove multicasts\n");
+ if (mc)
kfree(mc);
- }
+ return rc;
}
- return -ENOMEM;
-}
-
-/* VFOP rx-mode */
-static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
- enum bnx2x_vfop_rxmode_state state = vfop->state;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_RXMODE_CONFIG:
- /* next state */
- vfop->state = BNX2X_VFOP_RXMODE_DONE;
+ /* update mcast list on the ramrod params */
+ if (mc_num) {
+ INIT_LIST_HEAD(&mcast.mcast_list);
+ for (i = 0; i < mc_num; i++) {
+ mc[i].mac = mcasts[i];
+ list_add_tail(&mc[i].link,
+ &mcast.mcast_list);
+ }
- /* record the accept flags in vfdb so hypervisor can modify them
- * if necessary
- */
- bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
- ramrod->rx_accept_flags;
- vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-op_err:
- BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
-op_done:
- case BNX2X_VFOP_RXMODE_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
+ /* add new mcasts */
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
+ if (rc)
+ BNX2X_ERR("Faled to add multicasts\n");
+ kfree(mc);
}
-op_pending:
- return;
+
+ return rc;
}
static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
@@ -1268,118 +644,56 @@ static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
}
-int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, unsigned long accept_flags)
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, unsigned long accept_flags)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- struct bnx2x_rx_mode_ramrod_params *ramrod =
- &vf->op_params.rx_mode;
+ struct bnx2x_rx_mode_ramrod_params ramrod;
- bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
- bnx2x_vfop_rxmode, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
- cmd->block);
- }
- return -ENOMEM;
+ bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+ vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
+ return bnx2x_config_rx_mode(bp, &ramrod);
}
-/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
- * queue destructor)
- */
-static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- int qid = vfop->args.qx.qid;
- enum bnx2x_vfop_qteardown_state state = vfop->state;
- struct bnx2x_vfop_cmd cmd;
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- cmd.done = bnx2x_vfop_qdown;
- cmd.block = false;
-
- switch (state) {
- case BNX2X_VFOP_QTEARDOWN_RXMODE:
- /* Drop all */
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
- vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
- if (vfop->rc)
- goto op_err;
- return;
-
- case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
- /* vlan-clear-all: don't consume credit */
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
- if (vfop->rc)
- goto op_err;
- return;
-
- case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
- /* mac-clear-all: consume credit */
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
- if (vfop->rc)
- goto op_err;
- return;
+ int rc;
- case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
- vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
- vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
- if (vfop->rc)
- goto op_err;
- return;
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
- case BNX2X_VFOP_QTEARDOWN_QDTOR:
- /* run the queue destruction flow */
- DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
- vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
- DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
- vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
- DP(BNX2X_MSG_IOV, "returned from cmd\n");
- if (vfop->rc)
+ /* Remove all classification configuration for leading queue */
+ if (qid == LEADING_IDX) {
+ rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
+ if (rc)
goto op_err;
- return;
-op_err:
- BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
- vf->abs_vfid, qid, vfop->rc);
-
- case BNX2X_VFOP_QTEARDOWN_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
- }
-}
-int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- /* for non leading queues skip directly to qdown sate */
- if (vfop) {
- vfop->args.qx.qid = qid;
- bnx2x_vfop_opset(qid == LEADING_IDX ?
- BNX2X_VFOP_QTEARDOWN_RXMODE :
- BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
- cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
- cmd->block);
+ /* Remove filtering if feasible */
+ if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false, false);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false, true);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
+ if (rc)
+ goto op_err;
+ }
}
- return -ENOMEM;
+ /* Destroy queue */
+ rc = bnx2x_vf_queue_destroy(bp, vf, qid);
+ if (rc)
+ goto op_err;
+ return rc;
+op_err:
+ BNX2X_ERR("vf[%d:%d] error: rc %d\n",
+ vf->abs_vfid, qid, rc);
+ return rc;
}
/* VF enable primitives
@@ -1579,120 +893,63 @@ static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
bnx2x_tx_hw_flushed(bp, poll_cnt);
}
-static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
- enum bnx2x_vfop_flr_state state = vfop->state;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_flr,
- .block = false,
- };
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+ int rc, i;
- switch (state) {
- case BNX2X_VFOP_FLR_QUEUES:
- /* the cleanup operations are valid if and only if the VF
- * was first acquired.
- */
- if (++(qx->qid) < vf_rxq_count(vf)) {
- vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
- qx->qid);
- if (vfop->rc)
- goto op_err;
- return;
- }
- /* remove multicasts */
- vfop->state = BNX2X_VFOP_FLR_HW;
- vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
- 0, true);
- if (vfop->rc)
- goto op_err;
- return;
- case BNX2X_VFOP_FLR_HW:
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- /* dispatch final cleanup and wait for HW queues to flush */
- bnx2x_vf_flr_clnup_hw(bp, vf);
+ /* the cleanup operations are valid if and only if the VF
+ * was first acquired.
+ */
+ for (i = 0; i < vf_rxq_count(vf); i++) {
+ rc = bnx2x_vf_queue_flr(bp, vf, i);
+ if (rc)
+ goto out;
+ }
- /* release VF resources */
- bnx2x_vf_free_resc(bp, vf);
+ /* remove multicasts */
+ bnx2x_vf_mcast(bp, vf, NULL, 0, true);
- /* re-open the mailbox */
- bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ /* dispatch final cleanup and wait for HW queues to flush */
+ bnx2x_vf_flr_clnup_hw(bp, vf);
- goto op_done;
- default:
- bnx2x_vfop_default(state);
- }
-op_err:
- BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
-op_done:
- vf->flr_clnup_stage = VF_FLR_ACK;
- bnx2x_vfop_end(bp, vf, vfop);
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
-}
+ /* release VF resources */
+ bnx2x_vf_free_resc(bp, vf);
-static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- vfop_handler_t done)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- vfop->args.qx.qid = -1; /* loop */
- bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
- bnx2x_vfop_flr, done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
- }
- return -ENOMEM;
+ /* re-open the mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ return;
+out:
+ BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
+ vf->abs_vfid, i, rc);
}
-static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
+static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
{
- int i = prev_vf ? prev_vf->index + 1 : 0;
struct bnx2x_virtf *vf;
+ int i;
- /* find next VF to cleanup */
-next_vf_to_clean:
- for (;
- i < BNX2X_NR_VIRTFN(bp) &&
- (bnx2x_vf(bp, i, state) != VF_RESET ||
- bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
- i++)
- ;
+ for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
+ /* VF should be RESET & in FLR cleanup states */
+ if (bnx2x_vf(bp, i, state) != VF_RESET ||
+ !bnx2x_vf(bp, i, flr_clnup_stage))
+ continue;
- DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
- BNX2X_NR_VIRTFN(bp));
+ DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
+ i, BNX2X_NR_VIRTFN(bp));
- if (i < BNX2X_NR_VIRTFN(bp)) {
vf = BP_VF(bp, i);
/* lock the vf pf channel */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
/* invoke the VF FLR SM */
- if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
- BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
- vf->abs_vfid);
+ bnx2x_vf_flr(bp, vf);
- /* mark the VF to be ACKED and continue */
- vf->flr_clnup_stage = VF_FLR_ACK;
- goto next_vf_to_clean;
- }
- return;
- }
-
- /* we are done, update vf records */
- for_each_vf(bp, i) {
- vf = BP_VF(bp, i);
-
- if (vf->flr_clnup_stage != VF_FLR_ACK)
- continue;
-
- vf->flr_clnup_stage = VF_FLR_EPILOG;
+ /* mark the VF to be ACKED and continue */
+ vf->flr_clnup_stage = false;
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
}
/* Acknowledge the handled VFs.
@@ -1742,7 +999,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
if (reset) {
/* set as reset and ready for cleanup */
vf->state = VF_RESET;
- vf->flr_clnup_stage = VF_FLR_CLN;
+ vf->flr_clnup_stage = true;
DP(BNX2X_MSG_IOV,
"Initiating Final cleanup for VF %d\n",
@@ -1751,7 +1008,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
}
/* do the FLR cleanup for all marked VFs*/
- bnx2x_vf_flr_clnup(bp, NULL);
+ bnx2x_vf_flr_clnup(bp);
}
/* IOV global initialization routines */
@@ -2018,7 +1275,6 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
bnx2x_vf(bp, i, index) = i;
bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
bnx2x_vf(bp, i, state) = VF_FREE;
- INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
mutex_init(&bnx2x_vf(bp, i, op_mutex));
bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
}
@@ -2039,6 +1295,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
goto failed;
}
+ /* Prepare the VFs event synchronization mechanism */
+ mutex_init(&bp->vfdb->event_mutex);
+
return 0;
failed:
DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -2117,7 +1376,9 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
if (cxt->size) {
- BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
+ cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
+ if (!cxt->addr)
+ goto alloc_mem_err;
} else {
cxt->addr = NULL;
cxt->mapping = 0;
@@ -2127,20 +1388,28 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
/* allocate vfs ramrods dma memory - client_init and set_mac */
tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
- BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
- tot_size);
+ BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
+ tot_size);
+ if (!BP_VFDB(bp)->sp_dma.addr)
+ goto alloc_mem_err;
BP_VFDB(bp)->sp_dma.size = tot_size;
/* allocate mailboxes */
tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
- BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
- tot_size);
+ BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
+ tot_size);
+ if (!BP_VF_MBX_DMA(bp)->addr)
+ goto alloc_mem_err;
+
BP_VF_MBX_DMA(bp)->size = tot_size;
/* allocate local bulletin boards */
tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
- BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
- &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
+ BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
+ tot_size);
+ if (!BP_VF_BULLETIN_DMA(bp)->addr)
+ goto alloc_mem_err;
+
BP_VF_BULLETIN_DMA(bp)->size = tot_size;
return 0;
@@ -2166,6 +1435,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_sp_map(bp, vf, q_data),
q_type);
+ /* sp indication is set only when vlan/mac/etc. are initialized */
+ q->sp_initialized = false;
+
DP(BNX2X_MSG_IOV,
"initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
vf->abs_vfid, q->sp_obj.func_id, q->cid);
@@ -2269,7 +1541,7 @@ int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
/* release all the VFs */
for_each_vf(bp, i)
- bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
+ bnx2x_vf_release(bp, BP_VF(bp, i));
return 0;
}
@@ -2359,6 +1631,12 @@ void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
smp_mb__after_clear_bit();
}
+static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
+}
+
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
{
struct bnx2x_virtf *vf;
@@ -2383,6 +1661,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
case EVENT_RING_OPCODE_MULTICAST_RULES:
case EVENT_RING_OPCODE_FILTERS_RULES:
+ case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
cid = (elem->message.data.eth_event.echo &
BNX2X_SWCID_MASK);
DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
@@ -2447,13 +1726,15 @@ get_vf:
vf->abs_vfid, qidx);
bnx2x_vf_handle_filters_eqe(bp, vf);
break;
+ case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_rss_update_eqe(bp, vf);
case EVENT_RING_OPCODE_VF_FLR:
case EVENT_RING_OPCODE_MALICIOUS_VF:
/* Do nothing for now */
return 0;
}
- /* SRIOV: reschedule any 'in_progress' operations */
- bnx2x_iov_sp_event(bp, cid, false);
return 0;
}
@@ -2490,23 +1771,6 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
}
}
-void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
-{
- struct bnx2x_virtf *vf;
-
- /* check if the cid is the VF range */
- if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
- return;
-
- vf = bnx2x_vf_by_cid(bp, vf_cid);
- if (vf) {
- /* set in_progress flag */
- atomic_set(&vf->op_in_progress, 1);
- if (queue_work)
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
- }
-}
-
void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
{
int i;
@@ -2527,10 +1791,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
(is_fcoe ? 0 : 1);
- DP(BNX2X_MSG_IOV,
- "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
- BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
- first_queue_query_index + num_queues_req);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
+ BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
+ first_queue_query_index + num_queues_req);
cur_data_offset = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats) +
@@ -2544,9 +1808,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
struct bnx2x_virtf *vf = BP_VF(bp, i);
if (vf->state != VF_ENABLED) {
- DP(BNX2X_MSG_IOV,
- "vf %d not enabled so no stats for it\n",
- vf->abs_vfid);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "vf %d not enabled so no stats for it\n",
+ vf->abs_vfid);
continue;
}
@@ -2588,32 +1852,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
}
-void bnx2x_iov_sp_task(struct bnx2x *bp)
-{
- int i;
-
- if (!IS_SRIOV(bp))
- return;
- /* Iterate over all VFs and invoke state transition for VFs with
- * 'in-progress' slow-path operations
- */
- DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
- for_each_vf(bp, i) {
- struct bnx2x_virtf *vf = BP_VF(bp, i);
-
- if (!vf) {
- BNX2X_ERR("VF was null! skipping...\n");
- continue;
- }
-
- if (!list_empty(&vf->op_list_head) &&
- atomic_read(&vf->op_in_progress)) {
- DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
- bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
- }
- }
-}
-
static inline
struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
{
@@ -2849,52 +2087,26 @@ static void bnx2x_set_vf_state(void *cookie)
p->vf->state = p->state;
}
-/* VFOP close (teardown the queues, delete mcasts and close HW) */
-static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
- enum bnx2x_vfop_close_state state = vfop->state;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_close,
- .block = false,
- };
+ int rc = 0, i;
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_CLOSE_QUEUES:
-
- if (++(qx->qid) < vf_rxq_count(vf)) {
- vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
- if (vfop->rc)
- goto op_err;
- return;
- }
- vfop->state = BNX2X_VFOP_CLOSE_HW;
- vfop->rc = 0;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- case BNX2X_VFOP_CLOSE_HW:
-
- /* disable the interrupts */
- DP(BNX2X_MSG_IOV, "disabling igu\n");
- bnx2x_vf_igu_disable(bp, vf);
+ /* Close all queues */
+ for (i = 0; i < vf_rxq_count(vf); i++) {
+ rc = bnx2x_vf_queue_teardown(bp, vf, i);
+ if (rc)
+ goto op_err;
+ }
- /* disable the VF */
- DP(BNX2X_MSG_IOV, "clearing qtbl\n");
- bnx2x_vf_clr_qtbl(bp, vf);
+ /* disable the interrupts */
+ DP(BNX2X_MSG_IOV, "disabling igu\n");
+ bnx2x_vf_igu_disable(bp, vf);
- goto op_done;
- default:
- bnx2x_vfop_default(state);
- }
-op_err:
- BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
-op_done:
+ /* disable the VF */
+ DP(BNX2X_MSG_IOV, "clearing qtbl\n");
+ bnx2x_vf_clr_qtbl(bp, vf);
/* need to make sure there are no outstanding stats ramrods which may
* cause the device to access the VF's stats buffer which it will free
@@ -2909,43 +2121,20 @@ op_done:
}
DP(BNX2X_MSG_IOV, "set state to acquired\n");
- bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
- /* Not supported at the moment; Exists for macros only */
- return;
-}
-int bnx2x_vfop_close_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- vfop->args.qx.qid = -1; /* loop */
- bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
- bnx2x_vfop_close, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
- cmd->block);
- }
- return -ENOMEM;
+ return 0;
+op_err:
+ BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
+ return rc;
}
/* VF release can be called either: 1. The VF was acquired but
* not enabled 2. the vf was enabled or in the process of being
* enabled
*/
-static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_release,
- .block = false,
- };
-
- DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
-
- if (vfop->rc < 0)
- goto op_err;
+ int rc;
DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
vf->state == VF_FREE ? "Free" :
@@ -2956,116 +2145,87 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
switch (vf->state) {
case VF_ENABLED:
- vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
- if (vfop->rc)
+ rc = bnx2x_vf_close(bp, vf);
+ if (rc)
goto op_err;
- return;
-
+ /* Fallthrough to release resources */
case VF_ACQUIRED:
DP(BNX2X_MSG_IOV, "about to free resources\n");
bnx2x_vf_free_resc(bp, vf);
- DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
- goto op_done;
+ break;
case VF_FREE:
case VF_RESET:
- /* do nothing */
- goto op_done;
default:
- bnx2x_vfop_default(vf->state);
+ break;
}
+ return 0;
op_err:
- BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
-op_done:
- bnx2x_vfop_end(bp, vf, vfop);
+ BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
+ return rc;
}
-static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_config_rss_params *rss)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- enum bnx2x_vfop_rss_state state;
-
- if (!vfop) {
- BNX2X_ERR("vfop was null\n");
- return;
- }
-
- state = vfop->state;
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_RSS_CONFIG:
- /* next state */
- vfop->state = BNX2X_VFOP_RSS_DONE;
- bnx2x_config_rss(bp, &vfop->op_p->rss);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-op_err:
- BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
-op_done:
- case BNX2X_VFOP_RSS_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
- }
-op_pending:
- return;
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+ set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
+ return bnx2x_config_rss(bp, rss);
}
-int bnx2x_vfop_release_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd)
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vfpf_tpa_tlv *tlv,
+ struct bnx2x_queue_update_tpa_params *params)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- bnx2x_vfop_opset(-1, /* use vf->state */
- bnx2x_vfop_release, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
- cmd->block);
- }
- return -ENOMEM;
-}
+ aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
+ struct bnx2x_queue_state_params qstate;
+ int qid, rc = 0;
-int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* Set ramrod params */
+ memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+ memcpy(&qstate.params.update_tpa, params,
+ sizeof(struct bnx2x_queue_update_tpa_params));
+ qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
- if (vfop) {
- bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
- cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
- cmd->block);
+ for (qid = 0; qid < vf_rxq_count(vf); qid++) {
+ qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ qstate.params.update_tpa.sge_map = sge_addr[qid];
+ DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
+ vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
+ U64_LO(sge_addr[qid]));
+ rc = bnx2x_queue_state_change(bp, &qstate);
+ if (rc) {
+ BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
+ U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
+ vf->abs_vfid, qid);
+ return rc;
+ }
}
- return -ENOMEM;
+
+ return rc;
}
/* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
*/
-void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- struct bnx2x_vfop_cmd cmd = {
- .done = NULL,
- .block = block,
- };
int rc;
DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
- rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
+ rc = bnx2x_vf_free(bp, vf);
if (rc)
WARN(rc,
"VF[%d] Failed to allocate resources for release op- rc=%d\n",
vf->abs_vfid, rc);
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+ return rc;
}
static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
@@ -3074,16 +2234,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
*sbdf = vf->devfn | (vf->bus << 8);
}
-static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
- struct bnx2x_vf_bar_info *bar_info)
-{
- int n;
-
- bar_info->nr_bars = bp->vfdb->sriov.nres;
- for (n = 0; n < bar_info->nr_bars; n++)
- bar_info->bars[n] = vf->bars[n];
-}
-
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv)
{
@@ -3405,13 +2555,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
- if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
+ if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
0, ETH_ALEN);
- if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
vlan_obj->get_n_elements(bp, vlan_obj, 1,
(u8 *)&ivi->vlan, 0,
VLAN_HLEN);
+ }
} else {
/* mac */
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3485,17 +2635,17 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
unsigned long ramrod_flags = 0;
- struct bnx2x_vlan_mac_obj *mac_obj =
- &bnx2x_leading_vfq(vf, mac_obj);
+ struct bnx2x_vlan_mac_obj *mac_obj;
- rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- if (rc)
- return rc;
+ /* User should be able to see failure reason in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
/* remove existing eth macs */
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete eth macs\n");
@@ -3569,17 +2719,16 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
BNX2X_Q_LOGICAL_STATE_ACTIVE)
return rc;
- /* configure the vlan in device on this vf's queue */
- vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
- rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- if (rc)
- return rc;
+ /* User should be able to see error in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
/* remove existing vlans */
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc) {
@@ -3736,13 +2885,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp)
bnx2x_sample_bulletin(bp);
/* if channel is down we need to self destruct */
- if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
- }
+ if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+ BNX2X_MSG_IOV);
}
void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
@@ -3756,12 +2901,16 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
mutex_init(&bp->vf2pf_mutex);
/* allocate vf2pf mailbox for vf to pf channel */
- BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
- sizeof(struct bnx2x_vf_mbx_msg));
+ bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ if (!bp->vf2pf_mbox)
+ goto alloc_mem_err;
/* allocate pf 2 vf bulletin board */
- BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
- sizeof(union pf_vf_bulletin));
+ bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+ if (!bp->pf2vf_bulletin)
+ goto alloc_mem_err;
return 0;
@@ -3792,3 +2941,28 @@ void bnx2x_iov_channel_down(struct bnx2x *bp)
bnx2x_post_vf_bulletin(bp, vf_idx);
}
}
+
+void bnx2x_iov_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
+
+ if (!netif_running(bp->dev))
+ return;
+
+ if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
+ &bp->iov_task_state))
+ bnx2x_vf_handle_flr_event(bp);
+
+ if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
+ &bp->iov_task_state))
+ bnx2x_vf_mbx(bp);
+}
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
+{
+ smp_mb__before_clear_bit();
+ set_bit(flag, &bp->iov_task_state);
+ smp_mb__after_clear_bit();
+ DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+ queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d9fcca1b5a9d..8bf764570eef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -30,6 +30,8 @@ enum sample_bulletin_result {
#ifdef CONFIG_BNX2X_SRIOV
+extern struct workqueue_struct *bnx2x_iov_wq;
+
/* The bnx2x device structure holds vfdb structure described below.
* The VF array is indexed by the relative vfid.
*/
@@ -83,108 +85,35 @@ struct bnx2x_vf_queue {
u16 index;
u16 sb_idx;
bool is_leading;
+ bool sp_initialized;
};
-/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
- * q-init, q-setup and SB index
+/* struct bnx2x_vf_queue_construct_params - prepare queue construction
+ * parameters: q-init, q-setup and SB index
*/
-struct bnx2x_vfop_qctor_params {
+struct bnx2x_vf_queue_construct_params {
struct bnx2x_queue_state_params qstate;
struct bnx2x_queue_setup_params prep_qsetup;
};
-/* VFOP parameters (one copy per VF) */
-union bnx2x_vfop_params {
- struct bnx2x_vlan_mac_ramrod_params vlan_mac;
- struct bnx2x_rx_mode_ramrod_params rx_mode;
- struct bnx2x_mcast_ramrod_params mcast;
- struct bnx2x_config_rss_params rss;
- struct bnx2x_vfop_qctor_params qctor;
-};
-
/* forward */
struct bnx2x_virtf;
/* VFOP definitions */
-typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
-
-struct bnx2x_vfop_cmd {
- vfop_handler_t done;
- bool block;
-};
-/* VFOP queue filters command additional arguments */
-struct bnx2x_vfop_filter {
- struct list_head link;
+struct bnx2x_vf_mac_vlan_filter {
int type;
-#define BNX2X_VFOP_FILTER_MAC 1
-#define BNX2X_VFOP_FILTER_VLAN 2
+#define BNX2X_VF_FILTER_MAC 1
+#define BNX2X_VF_FILTER_VLAN 2
bool add;
u8 *mac;
u16 vid;
};
-struct bnx2x_vfop_filters {
- int add_cnt;
- struct list_head head;
- struct bnx2x_vfop_filter filters[];
-};
-
-/* transient list allocated, built and saved until its
- * passed to the SP-VERBs layer.
- */
-struct bnx2x_vfop_args_mcast {
- int mc_num;
- struct bnx2x_mcast_list_elem *mc;
-};
-
-struct bnx2x_vfop_args_qctor {
- int qid;
- u16 sb_idx;
-};
-
-struct bnx2x_vfop_args_qdtor {
- int qid;
- struct eth_context *cxt;
-};
-
-struct bnx2x_vfop_args_defvlan {
- int qid;
- bool enable;
- u16 vid;
- u8 prio;
-};
-
-struct bnx2x_vfop_args_qx {
- int qid;
- bool en_add;
-};
-
-struct bnx2x_vfop_args_filters {
- struct bnx2x_vfop_filters *multi_filter;
- atomic_t *credit; /* non NULL means 'don't consume credit' */
-};
-
-union bnx2x_vfop_args {
- struct bnx2x_vfop_args_mcast mc_list;
- struct bnx2x_vfop_args_qctor qctor;
- struct bnx2x_vfop_args_qdtor qdtor;
- struct bnx2x_vfop_args_defvlan defvlan;
- struct bnx2x_vfop_args_qx qx;
- struct bnx2x_vfop_args_filters filters;
-};
-
-struct bnx2x_vfop {
- struct list_head link;
- int rc; /* return code */
- int state; /* next state */
- union bnx2x_vfop_args args; /* extra arguments */
- union bnx2x_vfop_params *op_p; /* ramrod params */
-
- /* state machine callbacks */
- vfop_handler_t transition;
- vfop_handler_t done;
+struct bnx2x_vf_mac_vlan_filters {
+ int count;
+ struct bnx2x_vf_mac_vlan_filter filters[];
};
/* vf context */
@@ -204,15 +133,7 @@ struct bnx2x_virtf {
#define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
- /* non 0 during flr cleanup */
- u8 flr_clnup_stage;
-#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup'
- * sans the end-wait
- */
-#define VF_FLR_ACK 2 /* ACK flr notification */
-#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW
- * ~ final cleanup' end wait
- */
+ bool flr_clnup_stage; /* true during flr cleanup */
/* dma */
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
@@ -276,11 +197,6 @@ struct bnx2x_virtf {
struct bnx2x_rss_config_obj rss_conf_obj;
/* slow-path operations */
- atomic_t op_in_progress;
- int op_rc;
- bool op_wait_blocking;
- struct list_head op_list_head;
- union bnx2x_vfop_params op_params;
struct mutex op_mutex; /* one vfop at a time mutex */
enum channel_tlvs op_current;
};
@@ -338,11 +254,6 @@ struct bnx2x_vf_mbx {
u32 vf_addr_hi;
struct vfpf_first_tlv first_tlv; /* saved VF request header */
-
- u8 flags;
-#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
- * more then one pending msg
- */
};
struct bnx2x_vf_sp {
@@ -419,6 +330,10 @@ struct bnx2x_vfdb {
/* the number of msix vectors belonging to this PF designated for VFs */
u16 vf_sbs_pool;
u16 first_vf_igu_entry;
+
+ /* sp_rtnl synchronization */
+ struct mutex event_mutex;
+ u64 event_occur;
};
/* queue access */
@@ -468,13 +383,13 @@ void bnx2x_iov_init_dq(struct bnx2x *bp);
void bnx2x_iov_init_dmae(struct bnx2x *bp);
void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj);
-void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work);
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
-void bnx2x_iov_sp_task(struct bnx2x *bp);
/* global vf mailbox routines */
-void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event);
+void bnx2x_vf_mbx(struct bnx2x *bp);
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event);
void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
/* CORE VF API */
@@ -487,162 +402,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
dma_addr_t *sb_map);
-/* VFOP generic helpers */
-#define bnx2x_vfop_default(state) do { \
- BNX2X_ERR("Bad state %d\n", (state)); \
- vfop->rc = -EINVAL; \
- goto op_err; \
- } while (0)
-
-enum {
- VFOP_DONE,
- VFOP_CONT,
- VFOP_VERIFY_PEND,
-};
-
-#define bnx2x_vfop_finalize(vf, rc, next) do { \
- if ((rc) < 0) \
- goto op_err; \
- else if ((rc) > 0) \
- goto op_pending; \
- else if ((next) == VFOP_DONE) \
- goto op_done; \
- else if ((next) == VFOP_VERIFY_PEND) \
- BNX2X_ERR("expected pending\n"); \
- else { \
- DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \
- atomic_set(&vf->op_in_progress, 1); \
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \
- return; \
- } \
- } while (0)
-
-#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
- do { \
- vfop->state = first_state; \
- vfop->op_p = &vf->op_params; \
- vfop->transition = trans_hndlr; \
- vfop->done = done_hndlr; \
- } while (0)
-
-static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
-{
- WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
- WARN_ON(list_empty(&vf->op_list_head));
- return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
-}
-
-static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
-
- WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
- if (vfop) {
- INIT_LIST_HEAD(&vfop->link);
- list_add(&vfop->link, &vf->op_list_head);
- }
- return vfop;
-}
-
-static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
- struct bnx2x_vfop *vfop)
-{
- /* rc < 0 - error, otherwise set to 0 */
- DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
- if (vfop->rc >= 0)
- vfop->rc = 0;
- DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
-
- /* unlink the current op context and propagate error code
- * must be done before invoking the 'done()' handler
- */
- WARN(!mutex_is_locked(&vf->op_mutex),
- "about to access vf op linked list but mutex was not locked!");
- list_del(&vfop->link);
-
- if (list_empty(&vf->op_list_head)) {
- DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
- vf->op_rc = vfop->rc;
- DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
- vf->op_rc, vfop->rc);
- } else {
- struct bnx2x_vfop *cur_vfop;
-
- DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
- cur_vfop = bnx2x_vfop_cur(bp, vf);
- cur_vfop->rc = vfop->rc;
- DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
- vf->op_rc, vfop->rc);
- }
-
- /* invoke done handler */
- if (vfop->done) {
- DP(BNX2X_MSG_IOV, "calling done handler\n");
- vfop->done(bp, vf);
- } else {
- /* there is no done handler for the operation to unlock
- * the mutex. Must have gotten here from PF initiated VF RELEASE
- */
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
- }
-
- DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
- vf->op_rc, vfop->rc);
-
- /* if this is the last nested op reset the wait_blocking flag
- * to release any blocking wrappers, only after 'done()' is invoked
- */
- if (list_empty(&vf->op_list_head)) {
- DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
- vf->op_wait_blocking = false;
- }
-
- kfree(vfop);
-}
-
-static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
-{
- /* can take a while if any port is running */
- int cnt = 5000;
-
- might_sleep();
- while (cnt--) {
- if (vf->op_wait_blocking == false) {
-#ifdef BNX2X_STOP_ON_ERROR
- DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
-#endif
- return 0;
- }
- usleep_range(1000, 2000);
-
- if (bp->panic)
- return -EIO;
- }
-
- /* timeout! */
-#ifdef BNX2X_STOP_ON_ERROR
- bnx2x_panic();
-#endif
-
- return -EBUSY;
-}
-
-static inline int bnx2x_vfop_transition(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- vfop_handler_t transition,
- bool block)
-{
- if (block)
- vf->op_wait_blocking = true;
- transition(bp, vf);
- if (block)
- return bnx2x_vfop_wait_blocking(bp, vf);
- return 0;
-}
-
/* VFOP queue construction helpers */
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
@@ -657,59 +416,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q,
- struct bnx2x_vfop_qctor_params *p,
+ struct bnx2x_vf_queue_construct_params *p,
unsigned long q_type);
-int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- struct bnx2x_vfop_filters *macs,
- int qid, bool drv_only);
-
-int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- struct bnx2x_vfop_filters *vlans,
- int qid, bool drv_only);
-
-int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid);
-
-int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid);
-
-int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- bnx2x_mac_addr_t *mcasts,
- int mcast_num, bool drv_only);
-
-int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, unsigned long accept_flags);
-
-int bnx2x_vfop_close_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd);
-
-int bnx2x_vfop_release_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd);
-int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd);
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mac_vlan_filters *filters,
+ int qid, bool drv_only);
+
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor);
+
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
+
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only);
+
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, unsigned long accept_flags);
+
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_config_rss_params *rss);
+
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vfpf_tpa_tlv *tlv,
+ struct bnx2x_queue_update_tpa_params *params);
/* VF release ~ VF close + VF release-resources
*
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
*/
-void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block);
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
@@ -772,18 +513,20 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
void bnx2x_iov_channel_down(struct bnx2x *bp);
+void bnx2x_iov_task(struct work_struct *work);
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
+
#else /* CONFIG_BNX2X_SRIOV */
static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj) {}
-static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid,
- bool queue_work) {}
static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
union event_ring_elem *elem) {return 1; }
-static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {}
-static inline void bnx2x_vf_mbx(struct bnx2x *bp,
- struct vf_pf_event_data *vfpf_event) {}
+static inline void bnx2x_vf_mbx(struct bnx2x *bp) {}
+static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event) {}
static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
@@ -830,5 +573,8 @@ static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
+static inline void bnx2x_iov_task(struct work_struct *work) {}
+static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
+
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 3fa6c2a2a5a9..0622884596b2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->leading_rss = cl_id;
q->is_leading = true;
+ q->sp_initialized = true;
}
/* ask the pf to open a queue for the vf */
@@ -672,6 +673,7 @@ static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
return rc;
}
@@ -894,29 +896,16 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
- switch (mode) {
- case BNX2X_RX_MODE_NONE: /* no Rx */
+ /* Ignore everything accept MODE_NONE */
+ if (mode == BNX2X_RX_MODE_NONE) {
req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
- break;
- case BNX2X_RX_MODE_NORMAL:
+ } else {
+ /* Current PF driver will not look at the specific flags,
+ * but they are required when working with older drivers on hv.
+ */
req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
- break;
- case BNX2X_RX_MODE_ALLMULTI:
- req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
- break;
- case BNX2X_RX_MODE_PROMISC:
- req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
- break;
- default:
- BNX2X_ERR("BAD rx mode (%d)\n", mode);
- rc = -EINVAL;
- goto out;
}
req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
@@ -937,7 +926,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
rc = -EINVAL;
}
-out:
+
bnx2x_vfpf_finalize(bp, &req->first_tlv);
return rc;
@@ -1047,7 +1036,8 @@ static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
}
static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
+ struct bnx2x_virtf *vf,
+ int vf_rc)
{
struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
@@ -1059,7 +1049,7 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
- resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
+ resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
/* send response */
vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
@@ -1088,9 +1078,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
mmiowb();
- /* initiate dmae to send the response */
- mbx->flags &= ~VF_MSG_INPROCESS;
-
/* copy the response header including status-done field,
* must be last dmae, must be after FW is acked
*/
@@ -1110,14 +1097,15 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
return;
mbx_error:
- bnx2x_vf_release(bp, vf, false); /* non blocking */
+ bnx2x_vf_release(bp, vf);
}
static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
+ struct bnx2x_virtf *vf,
+ int rc)
{
bnx2x_vf_mbx_resp_single_tlv(bp, vf);
- bnx2x_vf_mbx_resp_send_msg(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
}
static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
@@ -1159,7 +1147,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
- /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
+ PFVF_CAP_TPA |
+ PFVF_CAP_TPA_UPDATE);
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
sizeof(resp->pfdev_info.fw_ver));
@@ -1240,8 +1229,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
sizeof(struct channel_list_end_tlv));
/* send the response */
- vf->op_rc = vfop_status;
- bnx2x_vf_mbx_resp_send_msg(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
}
static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1273,19 +1261,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_init_tlv *init = &mbx->msg->req.init;
+ int rc;
/* record ghost addresses from vf message */
vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr;
vf->stats_stride = init->stats_stride;
- vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+ rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
/* set VF multiqueue statistics collection mode */
if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
vf->cfg_flags |= VF_CFG_STATS_COALESCE;
/* response */
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
/* convert MBX queue-flags to standard SP queue-flags */
@@ -1320,16 +1309,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ struct bnx2x_vf_queue_construct_params qctor;
+ int rc = 0;
/* verify vf_qid */
if (setup_q->vf_qid >= vf_rxq_count(vf)) {
BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
setup_q->vf_qid, vf_rxq_count(vf));
- vf->op_rc = -EINVAL;
+ rc = -EINVAL;
goto response;
}
@@ -1347,9 +1334,10 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_leading_vfq_init(bp, vf, q);
/* re-init the VF operation context */
- memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
- setup_p = &vf->op_params.qctor.prep_qsetup;
- init_p = &vf->op_params.qctor.qstate.params.init;
+ memset(&qctor, 0 ,
+ sizeof(struct bnx2x_vf_queue_construct_params));
+ setup_p = &qctor.prep_qsetup;
+ init_p = &qctor.qstate.params.init;
/* activate immediately */
__set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
@@ -1435,44 +1423,34 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
q->index, q->sb_idx);
}
/* complete the preparations */
- bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
+ bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
- vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
- if (vf->op_rc)
+ rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
+ if (rc)
goto response;
- return;
}
response:
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
-enum bnx2x_vfop_filters_state {
- BNX2X_VFOP_MBX_Q_FILTERS_MACS,
- BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
- BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
- BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
- BNX2X_VFOP_MBX_Q_FILTERS_DONE
-};
-
static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct vfpf_set_q_filters_tlv *tlv,
- struct bnx2x_vfop_filters **pfl,
+ struct bnx2x_vf_mac_vlan_filters **pfl,
u32 type_flag)
{
int i, j;
- struct bnx2x_vfop_filters *fl = NULL;
+ struct bnx2x_vf_mac_vlan_filters *fl = NULL;
size_t fsz;
- fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
- sizeof(struct bnx2x_vfop_filters);
+ fsz = tlv->n_mac_vlan_filters *
+ sizeof(struct bnx2x_vf_mac_vlan_filter) +
+ sizeof(struct bnx2x_vf_mac_vlan_filters);
fl = kzalloc(fsz, GFP_KERNEL);
if (!fl)
return -ENOMEM;
- INIT_LIST_HEAD(&fl->head);
-
for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
@@ -1480,17 +1458,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
continue;
if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
fl->filters[j].mac = msg_filter->mac;
- fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
+ fl->filters[j].type = BNX2X_VF_FILTER_MAC;
} else {
fl->filters[j].vid = msg_filter->vlan_tag;
- fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
+ fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
}
fl->filters[j].add =
(msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
true : false;
- list_add_tail(&fl->filters[j++].link, &fl->head);
+ fl->count++;
}
- if (list_empty(&fl->head))
+ if (!fl->count)
kfree(fl);
else
*pfl = fl;
@@ -1530,180 +1508,96 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
-static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- int rc;
+ int rc = 0;
struct vfpf_set_q_filters_tlv *msg =
&BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- enum bnx2x_vfop_filters_state state = vfop->state;
-
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_mbx_qfilters,
- .block = false,
- };
-
- DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
-
- if (vfop->rc < 0)
- goto op_err;
+ /* check for any mac/vlan changes */
+ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+ /* build mac list */
+ struct bnx2x_vf_mac_vlan_filters *fl = NULL;
- switch (state) {
- case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
- /* next state */
- vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_MAC_FILTER);
+ if (rc)
+ goto op_err;
- /* check for any vlan/mac changes */
- if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
- /* build mac list */
- struct bnx2x_vfop_filters *fl = NULL;
+ if (fl) {
- vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
- VFPF_MAC_FILTER);
- if (vfop->rc)
+ /* set mac list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
goto op_err;
-
- if (fl) {
- /* set mac list */
- rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
- msg->vf_qid,
- false);
- if (rc) {
- vfop->rc = rc;
- goto op_err;
- }
- return;
- }
}
- /* fall through */
- case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
- /* next state */
- vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
+ /* build vlan list */
+ fl = NULL;
- /* check for any vlan/mac changes */
- if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
- /* build vlan list */
- struct bnx2x_vfop_filters *fl = NULL;
-
- vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
- VFPF_VLAN_FILTER);
- if (vfop->rc)
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_VLAN_FILTER);
+ if (rc)
+ goto op_err;
+
+ if (fl) {
+ /* set vlan list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
goto op_err;
-
- if (fl) {
- /* set vlan list */
- rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
- msg->vf_qid,
- false);
- if (rc) {
- vfop->rc = rc;
- goto op_err;
- }
- return;
- }
}
- /* fall through */
-
- case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
- /* next state */
- vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
-
- if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
- unsigned long accept = 0;
- struct pf_vf_bulletin_content *bulletin =
- BP_VF_BULLETIN(bp, vf->index);
-
- /* covert VF-PF if mask to bnx2x accept flags */
- if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
- __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
-
- if (msg->rx_mask &
- VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
- __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
-
- if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
- __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
-
- if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
+ }
- if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
- __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
+ if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
+ unsigned long accept = 0;
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
- /* A packet arriving the vf's mac should be accepted
- * with any vlan, unless a vlan has already been
- * configured.
- */
- if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
-
- /* set rx-mode */
- rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
- msg->vf_qid, accept);
- if (rc) {
- vfop->rc = rc;
- goto op_err;
- }
- return;
+ /* Ignore VF requested mode; instead set a regular mode */
+ if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
}
- /* fall through */
-
- case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
- /* next state */
- vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
-
- if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
- /* set mcasts */
- rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
- msg->n_multicast, false);
- if (rc) {
- vfop->rc = rc;
- goto op_err;
- }
- return;
- }
- /* fall through */
-op_done:
- case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
-op_err:
- BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
- vf->abs_vfid, msg->vf_qid, vfop->rc);
- goto op_done;
- default:
- bnx2x_vfop_default(state);
+ /* A packet arriving the vf's mac should be accepted
+ * with any vlan, unless a vlan has already been
+ * configured.
+ */
+ if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+
+ /* set rx-mode */
+ rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
+ if (rc)
+ goto op_err;
}
-}
-static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
- bnx2x_vfop_mbx_qfilters, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
- cmd->block);
+ if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
+ /* set mcasts */
+ rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
+ msg->n_multicast, false);
+ if (rc)
+ goto op_err;
}
- return -ENOMEM;
+op_err:
+ if (rc)
+ BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
+ vf->abs_vfid, msg->vf_qid, rc);
+ return rc;
}
-static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vf_mbx *mbx)
+static int bnx2x_filters_validate_mac(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
{
- struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc = 0;
/* if a mac was already set for this VF via the set vf mac ndo, we only
* accept mac configurations of that mac. Why accept them at all?
@@ -1715,7 +1609,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
if (filters->n_mac_vlan_filters > 1) {
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
vf->abs_vfid);
- vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
@@ -1725,10 +1619,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid);
- vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
}
+
+response:
+ return rc;
+}
+
+static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+ int rc = 0;
+
/* if vlan was set by hypervisor we don't allow guest to config vlan */
if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
int i;
@@ -1739,14 +1645,35 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
VFPF_Q_FILTER_VLAN_TAG_VALID) {
BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
vf->abs_vfid);
- vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
}
}
/* verify vf_qid */
- if (filters->vf_qid > vf_rxq_count(vf))
+ if (filters->vf_qid > vf_rxq_count(vf)) {
+ rc = -EPERM;
+ goto response;
+ }
+
+response:
+ return rc;
+}
+
+static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
+ int rc;
+
+ rc = bnx2x_filters_validate_mac(bp, vf, filters);
+ if (rc)
+ goto response;
+
+ rc = bnx2x_filters_validate_vlan(bp, vf, filters);
+ if (rc)
goto response;
DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
@@ -1756,125 +1683,169 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
/* print q_filter message */
bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
- vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
- if (vf->op_rc)
- goto response;
- return;
-
+ rc = bnx2x_vf_mbx_qfilters(bp, vf);
response:
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
int qid = mbx->msg->req.q_op.vf_qid;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc;
DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
vf->abs_vfid, qid);
- vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
- if (vf->op_rc)
- bnx2x_vf_mbx_resp(bp, vf);
+ rc = bnx2x_vf_queue_teardown(bp, vf, qid);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc;
DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
- vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
- if (vf->op_rc)
- bnx2x_vf_mbx_resp(bp, vf);
+ rc = bnx2x_vf_close(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc;
DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
- vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
- if (vf->op_rc)
- bnx2x_vf_mbx_resp(bp, vf);
+ rc = bnx2x_vf_free(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
- struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
+ struct bnx2x_config_rss_params rss;
struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
+ int rc = 0;
if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
vf->index);
- vf->op_rc = -EINVAL;
+ rc = -EINVAL;
goto mbx_resp;
}
+ memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
+
/* set vfop params according to rss tlv */
- memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
+ memcpy(rss.ind_table, rss_tlv->ind_table,
T_ETH_INDIRECTION_TABLE_SIZE);
- memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
- sizeof(rss_tlv->rss_key));
- vf_op_params->rss_obj = &vf->rss_conf_obj;
- vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
+ memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
+ rss.rss_obj = &vf->rss_conf_obj;
+ rss.rss_result_mask = rss_tlv->rss_result_mask;
/* flags handled individually for backward/forward compatability */
- vf_op_params->rss_flags = 0;
- vf_op_params->ramrod_flags = 0;
+ rss.rss_flags = 0;
+ rss.ramrod_flags = 0;
if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
- __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
- __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
- __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
- __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
- __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
- __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
- __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
- __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
- __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
(!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
BNX2X_ERR("about to hit a FW assert. aborting...\n");
- vf->op_rc = -EINVAL;
+ rc = -EINVAL;
goto mbx_resp;
}
- vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
+ rc = bnx2x_vf_rss_update(bp, vf, &rss);
+mbx_resp:
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static int bnx2x_validate_tpa_params(struct bnx2x *bp,
+ struct vfpf_tpa_tlv *tpa_tlv)
+{
+ int rc = 0;
+
+ if (tpa_tlv->tpa_client_info.max_sges_for_packet >
+ U_ETH_MAX_SGES_FOR_PACKET) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_sges_for_packet,
+ U_ETH_MAX_SGES_FOR_PACKET);
+ }
+
+ if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_tpa_queues,
+ MAX_AGG_QS(bp));
+ }
+
+ return rc;
+}
+
+static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_queue_update_tpa_params vf_op_params;
+ struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
+ int rc = 0;
+
+ memset(&vf_op_params, 0, sizeof(vf_op_params));
+
+ if (bnx2x_validate_tpa_params(bp, tpa_tlv))
+ goto mbx_resp;
+
+ vf_op_params.complete_on_both_clients =
+ tpa_tlv->tpa_client_info.complete_on_both_clients;
+ vf_op_params.dont_verify_thr =
+ tpa_tlv->tpa_client_info.dont_verify_thr;
+ vf_op_params.max_agg_sz =
+ tpa_tlv->tpa_client_info.max_agg_size;
+ vf_op_params.max_sges_pkt =
+ tpa_tlv->tpa_client_info.max_sges_for_packet;
+ vf_op_params.max_tpa_queues =
+ tpa_tlv->tpa_client_info.max_tpa_queues;
+ vf_op_params.sge_buff_sz =
+ tpa_tlv->tpa_client_info.sge_buff_size;
+ vf_op_params.sge_pause_thr_high =
+ tpa_tlv->tpa_client_info.sge_pause_thr_high;
+ vf_op_params.sge_pause_thr_low =
+ tpa_tlv->tpa_client_info.sge_pause_thr_low;
+ vf_op_params.tpa_mode =
+ tpa_tlv->tpa_client_info.tpa_mode;
+ vf_op_params.update_ipv4 =
+ tpa_tlv->tpa_client_info.update_ipv4;
+ vf_op_params.update_ipv6 =
+ tpa_tlv->tpa_client_info.update_ipv6;
+
+ rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
mbx_resp:
- if (vf->op_rc)
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
/* dispatch request */
@@ -1916,6 +1887,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
case CHANNEL_TLV_UPDATE_RSS:
bnx2x_vf_mbx_update_rss(bp, vf, mbx);
return;
+ case CHANNEL_TLV_UPDATE_TPA:
+ bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
+ return;
}
} else {
@@ -1935,11 +1909,8 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* can we respond to VF (do we have an address for it?) */
if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
- /* mbx_resp uses the op_rc of the VF */
- vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
-
/* notify the VF that we do not support this request */
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
} else {
/* can't send a response since this VF is unknown to us
* just ack the FW to release the mailbox and unlock
@@ -1952,13 +1923,10 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
}
}
-/* handle new vf-pf message */
-void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event)
{
- struct bnx2x_virtf *vf;
- struct bnx2x_vf_mbx *mbx;
u8 vf_idx;
- int rc;
DP(BNX2X_MSG_IOV,
"vf pf event received: vfid %d, address_hi %x, address lo %x",
@@ -1970,50 +1938,73 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
BNX2X_NR_VIRTFN(bp)) {
BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
- goto mbx_done;
+ return;
}
+
vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
- mbx = BP_VF_MBX(bp, vf_idx);
- /* verify an event is not currently being processed -
- * debug failsafe only
- */
- if (mbx->flags & VF_MSG_INPROCESS) {
- BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
- vfpf_event->vf_id);
- goto mbx_done;
- }
- vf = BP_VF(bp, vf_idx);
+ /* Update VFDB with current message and schedule its handling */
+ mutex_lock(&BP_VFDB(bp)->event_mutex);
+ BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
+ BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
+ BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
+ mutex_unlock(&BP_VFDB(bp)->event_mutex);
- /* save the VF message address */
- mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
- mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
- DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
- mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+ bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
+}
- /* dmae to get the VF request */
- rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
- mbx->vf_addr_hi, mbx->vf_addr_lo,
- sizeof(union vfpf_tlvs)/4);
- if (rc) {
- BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
- goto mbx_error;
- }
+/* handle new vf-pf messages */
+void bnx2x_vf_mbx(struct bnx2x *bp)
+{
+ struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
+ u64 events;
+ u8 vf_idx;
+ int rc;
- /* process the VF message header */
- mbx->first_tlv = mbx->msg->req.first_tlv;
+ if (!vfdb)
+ return;
- /* Clean response buffer to refrain from falsely seeing chains */
- memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+ mutex_lock(&vfdb->event_mutex);
+ events = vfdb->event_occur;
+ vfdb->event_occur = 0;
+ mutex_unlock(&vfdb->event_mutex);
- /* dispatch the request (will prepare the response) */
- bnx2x_vf_mbx_request(bp, vf, mbx);
- goto mbx_done;
+ for_each_vf(bp, vf_idx) {
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
-mbx_error:
- bnx2x_vf_release(bp, vf, false); /* non blocking */
-mbx_done:
- return;
+ /* Handle VFs which have pending events */
+ if (!(events & (1ULL << vf_idx)))
+ continue;
+
+ DP(BNX2X_MSG_IOV,
+ "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
+ vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
+ mbx->first_tlv.resp_msg_offset);
+
+ /* dmae to get the VF request */
+ rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
+ vf->abs_vfid, mbx->vf_addr_hi,
+ mbx->vf_addr_lo,
+ sizeof(union vfpf_tlvs)/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy request VF %d\n",
+ vf->abs_vfid);
+ bnx2x_vf_release(bp, vf);
+ return;
+ }
+
+ /* process the VF message header */
+ mbx->first_tlv = mbx->msg->req.first_tlv;
+
+ /* Clean response buffer to refrain from falsely
+ * seeing chains.
+ */
+ memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
+ /* dispatch the request (will prepare the response) */
+ bnx2x_vf_mbx_request(bp, vf, mbx);
+ }
}
/* propagate local bulletin board to vf */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 208568bc7a71..c922b81170e5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
#define PFVF_CAP_RSS 0x00000001
#define PFVF_CAP_DHC 0x00000002
#define PFVF_CAP_TPA 0x00000004
+#define PFVF_CAP_TPA_UPDATE 0x00000008
char fw_ver[32];
u16 db_size;
u8 indices_per_sb;
@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
u32 rx_mask; /* see mask constants at the top of the file */
};
+struct vfpf_tpa_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_tpa_client_info {
+ aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_for_packet;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u16 sge_buff_size;
+ u16 max_agg_size;
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+ } tpa_client_info;
+};
+
/* close VF (disable VF) */
struct vfpf_close_tlv {
struct vfpf_first_tlv first_tlv;
@@ -331,6 +351,7 @@ union vfpf_tlvs {
struct vfpf_set_q_filters_tlv set_q_filters;
struct vfpf_release_tlv release;
struct vfpf_rss_tlv update_rss;
+ struct vfpf_tpa_tlv update_tpa;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
@@ -405,6 +426,7 @@ enum channel_tlvs {
CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_PHYS_PORT_ID,
+ CHANNEL_TLV_UPDATE_TPA,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/genet/Makefile b/drivers/net/ethernet/broadcom/genet/Makefile
new file mode 100644
index 000000000000..31f55a90a197
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BCMGENET) += genet.o
+genet-objs := bcmgenet.o bcmmii.o
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
new file mode 100644
index 000000000000..0966bd04375f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -0,0 +1,2583 @@
+/*
+ * Broadcom GENET (Gigabit Ethernet) controller driver
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "bcmgenet: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <net/arp.h>
+
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/phy.h>
+
+#include <asm/unaligned.h>
+
+#include "bcmgenet.h"
+
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT 4
+
+/* Default highest priority queue for multi queue support */
+#define GENET_Q0_PRIORITY 0
+
+#define GENET_DEFAULT_BD_CNT \
+ (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
+
+#define RX_BUF_LENGTH 2048
+#define SKB_ALIGNMENT 32
+
+/* Tx/Rx DMA register offset, skip 256 descriptors */
+#define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
+#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
+
+#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
+ TOTAL_DESC * DMA_DESC_SIZE)
+
+#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
+ TOTAL_DESC * DMA_DESC_SIZE)
+
+static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
+ void __iomem *d, u32 value)
+{
+ __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
+}
+
+static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
+ void __iomem *d)
+{
+ return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
+}
+
+static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
+ void __iomem *d,
+ dma_addr_t addr)
+{
+ __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
+
+ /* Register writes to GISB bus can take couple hundred nanoseconds
+ * and are done for each packet, save these expensive writes unless
+ * the platform is explicitely configured for 64-bits/LPAE.
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (priv->hw_params->flags & GENET_HAS_40BITS)
+ __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
+#endif
+}
+
+/* Combined address + length/status setter */
+static inline void dmadesc_set(struct bcmgenet_priv *priv,
+ void __iomem *d, dma_addr_t addr, u32 val)
+{
+ dmadesc_set_length_status(priv, d, val);
+ dmadesc_set_addr(priv, d, addr);
+}
+
+static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
+ void __iomem *d)
+{
+ dma_addr_t addr;
+
+ addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
+
+ /* Register writes to GISB bus can take couple hundred nanoseconds
+ * and are done for each packet, save these expensive writes unless
+ * the platform is explicitely configured for 64-bits/LPAE.
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (priv->hw_params->flags & GENET_HAS_40BITS)
+ addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
+#endif
+ return addr;
+}
+
+#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
+
+#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv))
+ return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
+ else
+ return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
+}
+
+static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
+{
+ if (GENET_IS_V1(priv))
+ bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
+ else
+ bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
+}
+
+/* These macros are defined to deal with register map change
+ * between GENET1.1 and GENET2. Only those currently being used
+ * by driver are defined.
+ */
+static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv))
+ return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
+ else
+ return __raw_readl(priv->base +
+ priv->hw_params->tbuf_offset + TBUF_CTRL);
+}
+
+static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
+{
+ if (GENET_IS_V1(priv))
+ bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
+ else
+ __raw_writel(val, priv->base +
+ priv->hw_params->tbuf_offset + TBUF_CTRL);
+}
+
+static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv))
+ return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
+ else
+ return __raw_readl(priv->base +
+ priv->hw_params->tbuf_offset + TBUF_BP_MC);
+}
+
+static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
+{
+ if (GENET_IS_V1(priv))
+ bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
+ else
+ __raw_writel(val, priv->base +
+ priv->hw_params->tbuf_offset + TBUF_BP_MC);
+}
+
+/* RX/TX DMA register accessors */
+enum dma_reg {
+ DMA_RING_CFG = 0,
+ DMA_CTRL,
+ DMA_STATUS,
+ DMA_SCB_BURST_SIZE,
+ DMA_ARB_CTRL,
+ DMA_PRIORITY,
+ DMA_RING_PRIORITY,
+};
+
+static const u8 bcmgenet_dma_regs_v3plus[] = {
+ [DMA_RING_CFG] = 0x00,
+ [DMA_CTRL] = 0x04,
+ [DMA_STATUS] = 0x08,
+ [DMA_SCB_BURST_SIZE] = 0x0C,
+ [DMA_ARB_CTRL] = 0x2C,
+ [DMA_PRIORITY] = 0x30,
+ [DMA_RING_PRIORITY] = 0x38,
+};
+
+static const u8 bcmgenet_dma_regs_v2[] = {
+ [DMA_RING_CFG] = 0x00,
+ [DMA_CTRL] = 0x04,
+ [DMA_STATUS] = 0x08,
+ [DMA_SCB_BURST_SIZE] = 0x0C,
+ [DMA_ARB_CTRL] = 0x30,
+ [DMA_PRIORITY] = 0x34,
+ [DMA_RING_PRIORITY] = 0x3C,
+};
+
+static const u8 bcmgenet_dma_regs_v1[] = {
+ [DMA_CTRL] = 0x00,
+ [DMA_STATUS] = 0x04,
+ [DMA_SCB_BURST_SIZE] = 0x0C,
+ [DMA_ARB_CTRL] = 0x30,
+ [DMA_PRIORITY] = 0x34,
+ [DMA_RING_PRIORITY] = 0x3C,
+};
+
+/* Set at runtime once bcmgenet version is known */
+static const u8 *bcmgenet_dma_regs;
+
+static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
+{
+ return netdev_priv(dev_get_drvdata(dev));
+}
+
+static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
+ enum dma_reg r)
+{
+ return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
+ u32 val, enum dma_reg r)
+{
+ __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
+ enum dma_reg r)
+{
+ return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
+ u32 val, enum dma_reg r)
+{
+ __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+/* RDMA/TDMA ring registers and accessors
+ * we merge the common fields and just prefix with T/D the registers
+ * having different meaning depending on the direction
+ */
+enum dma_ring_reg {
+ TDMA_READ_PTR = 0,
+ RDMA_WRITE_PTR = TDMA_READ_PTR,
+ TDMA_READ_PTR_HI,
+ RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
+ TDMA_CONS_INDEX,
+ RDMA_PROD_INDEX = TDMA_CONS_INDEX,
+ TDMA_PROD_INDEX,
+ RDMA_CONS_INDEX = TDMA_PROD_INDEX,
+ DMA_RING_BUF_SIZE,
+ DMA_START_ADDR,
+ DMA_START_ADDR_HI,
+ DMA_END_ADDR,
+ DMA_END_ADDR_HI,
+ DMA_MBUF_DONE_THRESH,
+ TDMA_FLOW_PERIOD,
+ RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
+ TDMA_WRITE_PTR,
+ RDMA_READ_PTR = TDMA_WRITE_PTR,
+ TDMA_WRITE_PTR_HI,
+ RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
+};
+
+/* GENET v4 supports 40-bits pointer addressing
+ * for obvious reasons the LO and HI word parts
+ * are contiguous, but this offsets the other
+ * registers.
+ */
+static const u8 genet_dma_ring_regs_v4[] = {
+ [TDMA_READ_PTR] = 0x00,
+ [TDMA_READ_PTR_HI] = 0x04,
+ [TDMA_CONS_INDEX] = 0x08,
+ [TDMA_PROD_INDEX] = 0x0C,
+ [DMA_RING_BUF_SIZE] = 0x10,
+ [DMA_START_ADDR] = 0x14,
+ [DMA_START_ADDR_HI] = 0x18,
+ [DMA_END_ADDR] = 0x1C,
+ [DMA_END_ADDR_HI] = 0x20,
+ [DMA_MBUF_DONE_THRESH] = 0x24,
+ [TDMA_FLOW_PERIOD] = 0x28,
+ [TDMA_WRITE_PTR] = 0x2C,
+ [TDMA_WRITE_PTR_HI] = 0x30,
+};
+
+static const u8 genet_dma_ring_regs_v123[] = {
+ [TDMA_READ_PTR] = 0x00,
+ [TDMA_CONS_INDEX] = 0x04,
+ [TDMA_PROD_INDEX] = 0x08,
+ [DMA_RING_BUF_SIZE] = 0x0C,
+ [DMA_START_ADDR] = 0x10,
+ [DMA_END_ADDR] = 0x14,
+ [DMA_MBUF_DONE_THRESH] = 0x18,
+ [TDMA_FLOW_PERIOD] = 0x1C,
+ [TDMA_WRITE_PTR] = 0x20,
+};
+
+/* Set at runtime once GENET version is known */
+static const u8 *genet_dma_ring_regs;
+
+static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ enum dma_ring_reg r)
+{
+ return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ u32 val,
+ enum dma_ring_reg r)
+{
+ __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ enum dma_ring_reg r)
+{
+ return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ u32 val,
+ enum dma_ring_reg r)
+{
+ __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static int bcmgenet_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcmgenet_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcmgenet_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 rbuf_chk_ctrl;
+ bool rx_csum_en;
+
+ rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+
+ rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
+
+ /* enable rx checksumming */
+ if (rx_csum_en)
+ rbuf_chk_ctrl |= RBUF_RXCHK_EN;
+ else
+ rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
+ priv->desc_rxchk_en = rx_csum_en;
+
+ /* If UniMAC forwards CRC, we need to skip over it to get
+ * a valid CHK bit to be set in the per-packet status word
+ */
+ if (rx_csum_en && priv->crc_fwd_en)
+ rbuf_chk_ctrl |= RBUF_SKIP_FCS;
+ else
+ rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
+
+ bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
+
+ return 0;
+}
+
+static int bcmgenet_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ bool desc_64b_en;
+ u32 tbuf_ctrl, rbuf_ctrl;
+
+ tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
+ rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
+
+ desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+
+ /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
+ if (desc_64b_en) {
+ tbuf_ctrl |= RBUF_64B_EN;
+ rbuf_ctrl |= RBUF_64B_EN;
+ } else {
+ tbuf_ctrl &= ~RBUF_64B_EN;
+ rbuf_ctrl &= ~RBUF_64B_EN;
+ }
+ priv->desc_64b_en = desc_64b_en;
+
+ bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
+ bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
+
+ return 0;
+}
+
+static int bcmgenet_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ dev->features;
+ netdev_features_t wanted = dev->wanted_features;
+ int ret = 0;
+
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+ ret = bcmgenet_set_tx_csum(dev, wanted);
+ if (changed & (NETIF_F_RXCSUM))
+ ret = bcmgenet_set_rx_csum(dev, wanted);
+
+ return ret;
+}
+
+static u32 bcmgenet_get_msglevel(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = level;
+}
+
+/* standard ethtool support functions. */
+enum bcmgenet_stat_type {
+ BCMGENET_STAT_NETDEV = -1,
+ BCMGENET_STAT_MIB_RX,
+ BCMGENET_STAT_MIB_TX,
+ BCMGENET_STAT_RUNT,
+ BCMGENET_STAT_MISC,
+};
+
+struct bcmgenet_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_sizeof;
+ int stat_offset;
+ enum bcmgenet_stat_type type;
+ /* reg offset from UMAC base for misc counters */
+ u16 reg_offset;
+};
+
+#define STAT_NETDEV(m) { \
+ .stat_string = __stringify(m), \
+ .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+ .stat_offset = offsetof(struct net_device_stats, m), \
+ .type = BCMGENET_STAT_NETDEV, \
+}
+
+#define STAT_GENET_MIB(str, m, _type) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, m), \
+ .type = _type, \
+}
+
+#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
+#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
+#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+
+#define STAT_GENET_MISC(str, m, offset) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, m), \
+ .type = BCMGENET_STAT_MISC, \
+ .reg_offset = offset, \
+}
+
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define BCMGENET_STAT_OFFSET 0xc
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
+ /* general stats */
+ STAT_NETDEV(rx_packets),
+ STAT_NETDEV(tx_packets),
+ STAT_NETDEV(rx_bytes),
+ STAT_NETDEV(tx_bytes),
+ STAT_NETDEV(rx_errors),
+ STAT_NETDEV(tx_errors),
+ STAT_NETDEV(rx_dropped),
+ STAT_NETDEV(tx_dropped),
+ STAT_NETDEV(multicast),
+ /* UniMAC RSV counters */
+ STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+ STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+ STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+ STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+ STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+ STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+ STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+ STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+ STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+ STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+ STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
+ STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
+ STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
+ STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
+ STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
+ STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
+ STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
+ STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
+ STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
+ STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
+ STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
+ STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
+ STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
+ STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
+ STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
+ STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
+ STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
+ STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
+ STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
+ /* UniMAC TSV counters */
+ STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+ STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+ STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+ STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+ STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+ STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+ STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+ STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+ STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+ STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+ STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
+ STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
+ STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
+ STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
+ STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
+ STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
+ STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
+ STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
+ STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
+ STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
+ STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
+ STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
+ STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
+ STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
+ STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
+ STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
+ STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
+ STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
+ STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
+ /* UniMAC RUNT counters */
+ STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+ STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+ STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+ STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+ /* Misc UniMAC counters */
+ STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
+ UMAC_RBUF_OVFL_CNT),
+ STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+ STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
+};
+
+#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
+
+static void bcmgenet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
+ strlcpy(info->version, "v2.0", sizeof(info->version));
+ info->n_stats = BCMGENET_STATS_LEN;
+
+}
+
+static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCMGENET_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bcmgenet_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcmgenet_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
+{
+ int i, j = 0;
+
+ for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ const struct bcmgenet_stats *s;
+ u8 offset = 0;
+ u32 val = 0;
+ char *p;
+
+ s = &bcmgenet_gstrings_stats[i];
+ switch (s->type) {
+ case BCMGENET_STAT_NETDEV:
+ continue;
+ case BCMGENET_STAT_MIB_RX:
+ case BCMGENET_STAT_MIB_TX:
+ case BCMGENET_STAT_RUNT:
+ if (s->type != BCMGENET_STAT_MIB_RX)
+ offset = BCMGENET_STAT_OFFSET;
+ val = bcmgenet_umac_readl(priv, UMAC_MIB_START +
+ j + offset);
+ break;
+ case BCMGENET_STAT_MISC:
+ val = bcmgenet_umac_readl(priv, s->reg_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_umac_writel(priv, 0, s->reg_offset);
+ break;
+ }
+
+ j += s->stat_sizeof;
+ p = (char *)priv + s->stat_offset;
+ *(u32 *)p = val;
+ }
+}
+
+static void bcmgenet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_running(dev))
+ bcmgenet_update_mib_counters(priv);
+
+ for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ const struct bcmgenet_stats *s;
+ char *p;
+
+ s = &bcmgenet_gstrings_stats[i];
+ if (s->type == BCMGENET_STAT_NETDEV)
+ p = (char *)&dev->stats;
+ else
+ p = (char *)priv;
+ p += s->stat_offset;
+ data[i] = *(u32 *)p;
+ }
+}
+
+/* standard ethtool support functions. */
+static struct ethtool_ops bcmgenet_ethtool_ops = {
+ .get_strings = bcmgenet_get_strings,
+ .get_sset_count = bcmgenet_get_sset_count,
+ .get_ethtool_stats = bcmgenet_get_ethtool_stats,
+ .get_settings = bcmgenet_get_settings,
+ .set_settings = bcmgenet_set_settings,
+ .get_drvinfo = bcmgenet_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = bcmgenet_get_msglevel,
+ .set_msglevel = bcmgenet_set_msglevel,
+};
+
+/* Power down the unimac, based on mode. */
+static void bcmgenet_power_down(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
+{
+ u32 reg;
+
+ switch (mode) {
+ case GENET_POWER_CABLE_SENSE:
+ phy_detach(priv->phydev);
+ break;
+
+ case GENET_POWER_PASSIVE:
+ /* Power down LED */
+ bcmgenet_mii_reset(priv->dev);
+ if (priv->hw_params->flags & GENET_HAS_EXT) {
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= (EXT_PWR_DOWN_PHY |
+ EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void bcmgenet_power_up(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
+{
+ u32 reg;
+
+ if (!(priv->hw_params->flags & GENET_HAS_EXT))
+ return;
+
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+
+ switch (mode) {
+ case GENET_POWER_PASSIVE:
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
+ EXT_PWR_DOWN_BIAS);
+ /* fallthrough */
+ case GENET_POWER_CABLE_SENSE:
+ /* enable APD */
+ reg |= EXT_PWR_DN_EN_LD;
+ break;
+ default:
+ break;
+ }
+
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_mii_reset(priv->dev);
+}
+
+/* ioctl handle special commands that are not present in ethtool. */
+static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int val = 0;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!priv->phydev)
+ val = -ENODEV;
+ else
+ val = phy_mii_ioctl(priv->phydev, rq, cmd);
+ break;
+
+ default:
+ val = -EINVAL;
+ break;
+ }
+
+ return val;
+}
+
+static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct enet_cb *tx_cb_ptr;
+
+ tx_cb_ptr = ring->cbs;
+ tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
+ tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
+ /* Advancing local write pointer */
+ if (ring->write_ptr == ring->end_ptr)
+ ring->write_ptr = ring->cb_ptr;
+ else
+ ring->write_ptr++;
+
+ return tx_cb_ptr;
+}
+
+/* Simple helper to free a control block's resources */
+static void bcmgenet_free_cb(struct enet_cb *cb)
+{
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+ INTRL2_CPU_MASK_SET);
+}
+
+static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+ INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_1_writel(priv,
+ (1 << ring->index), INTRL2_CPU_MASK_CLEAR);
+ priv->int1_mask &= ~(1 << ring->index);
+}
+
+static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_1_writel(priv,
+ (1 << ring->index), INTRL2_CPU_MASK_SET);
+ priv->int1_mask |= (1 << ring->index);
+}
+
+/* Unlocked version of the reclaim routine */
+static void __bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int last_tx_cn, last_c_index, num_tx_bds;
+ struct enet_cb *tx_cb_ptr;
+ struct netdev_queue *txq;
+ unsigned int c_index;
+
+ /* Compute how many buffers are transmited since last xmit call */
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+ txq = netdev_get_tx_queue(dev, ring->queue);
+
+ last_c_index = ring->c_index;
+ num_tx_bds = ring->size;
+
+ c_index &= (num_tx_bds - 1);
+
+ if (c_index >= last_c_index)
+ last_tx_cn = c_index - last_c_index;
+ else
+ last_tx_cn = num_tx_bds - last_c_index + c_index;
+
+ netif_dbg(priv, tx_done, dev,
+ "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
+ __func__, ring->index,
+ c_index, last_tx_cn, last_c_index);
+
+ /* Reclaim transmitted buffers */
+ while (last_tx_cn-- > 0) {
+ tx_cb_ptr = ring->cbs + last_c_index;
+ if (tx_cb_ptr->skb) {
+ dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ dma_unmap_single(&dev->dev,
+ dma_unmap_addr(tx_cb_ptr, dma_addr),
+ tx_cb_ptr->skb->len,
+ DMA_TO_DEVICE);
+ bcmgenet_free_cb(tx_cb_ptr);
+ } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
+ dev->stats.tx_bytes +=
+ dma_unmap_len(tx_cb_ptr, dma_len);
+ dma_unmap_page(&dev->dev,
+ dma_unmap_addr(tx_cb_ptr, dma_addr),
+ dma_unmap_len(tx_cb_ptr, dma_len),
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
+ }
+ dev->stats.tx_packets++;
+ ring->free_bds += 1;
+
+ last_c_index++;
+ last_c_index &= (num_tx_bds - 1);
+ }
+
+ if (ring->free_bds > (MAX_SKB_FRAGS + 1))
+ ring->int_disable(priv, ring);
+
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_wake_queue(txq);
+
+ ring->c_index = c_index;
+}
+
+static void bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ __bcmgenet_tx_reclaim(dev, ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+}
+
+static void bcmgenet_tx_reclaim_all(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_is_multiqueue(dev)) {
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
+ }
+
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+}
+
+/* Transmits a single SKB (either head of a fragment or a single SKB)
+ * caller must hold priv->lock
+ */
+static int bcmgenet_xmit_single(struct net_device *dev,
+ struct sk_buff *skb,
+ u16 dma_desc_flags,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct enet_cb *tx_cb_ptr;
+ unsigned int skb_len;
+ dma_addr_t mapping;
+ u32 length_status;
+ int ret;
+
+ tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+ if (unlikely(!tx_cb_ptr))
+ BUG();
+
+ tx_cb_ptr->skb = skb;
+
+ skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
+
+ mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
+ length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
+ DMA_TX_APPEND_CRC;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ length_status |= DMA_TX_DO_CSUM;
+
+ dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
+
+ /* Decrement total BD count and advance our write pointer */
+ ring->free_bds -= 1;
+ ring->prod_index += 1;
+ ring->prod_index &= DMA_P_INDEX_MASK;
+
+ return 0;
+}
+
+/* Transmit a SKB fragement */
+static int bcmgenet_xmit_frag(struct net_device *dev,
+ skb_frag_t *frag,
+ u16 dma_desc_flags,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct enet_cb *tx_cb_ptr;
+ dma_addr_t mapping;
+ int ret;
+
+ tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+ if (unlikely(!tx_cb_ptr))
+ BUG();
+ tx_cb_ptr->skb = NULL;
+
+ mapping = skb_frag_dma_map(kdev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
+ __func__);
+ return ret;
+ }
+
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
+
+ dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
+ (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
+
+
+ ring->free_bds -= 1;
+ ring->prod_index += 1;
+ ring->prod_index &= DMA_P_INDEX_MASK;
+
+ return 0;
+}
+
+/* Reallocate the SKB to put enough headroom in front of it and insert
+ * the transmit checksum offsets in the descriptors
+ */
+static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
+{
+ struct status_64 *status = NULL;
+ struct sk_buff *new_skb;
+ u16 offset;
+ u8 ip_proto;
+ u16 ip_ver;
+ u32 tx_csum_info;
+
+ if (unlikely(skb_headroom(skb) < sizeof(*status))) {
+ /* If 64 byte status block enabled, must make sure skb has
+ * enough headroom for us to insert 64B status block.
+ */
+ new_skb = skb_realloc_headroom(skb, sizeof(*status));
+ dev_kfree_skb(skb);
+ if (!new_skb) {
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+ skb = new_skb;
+ }
+
+ skb_push(skb, sizeof(*status));
+ status = (struct status_64 *)skb->data;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ip_ver = htons(skb->protocol);
+ switch (ip_ver) {
+ case ETH_P_IP:
+ ip_proto = ip_hdr(skb)->protocol;
+ break;
+ case ETH_P_IPV6:
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return 0;
+ }
+
+ offset = skb_checksum_start_offset(skb) - sizeof(*status);
+ tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
+ (offset + skb->csum_offset);
+
+ /* Set the length valid bit for TCP and UDP and just set
+ * the special UDP flag for IPv4, else just set to 0.
+ */
+ if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+ tx_csum_info |= STATUS_TX_CSUM_LV;
+ if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+ tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
+ } else
+ tx_csum_info = 0;
+
+ status->tx_csum_info = tx_csum_info;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_tx_ring *ring = NULL;
+ struct netdev_queue *txq;
+ unsigned long flags = 0;
+ int nr_frags, index;
+ u16 dma_desc_flags;
+ int ret;
+ int i;
+
+ index = skb_get_queue_mapping(skb);
+ /* Mapping strategy:
+ * queue_mapping = 0, unclassified, packet xmited through ring16
+ * queue_mapping = 1, goes to ring 0. (highest priority queue
+ * queue_mapping = 2, goes to ring 1.
+ * queue_mapping = 3, goes to ring 2.
+ * queue_mapping = 4, goes to ring 3.
+ */
+ if (index == 0)
+ index = DESC_INDEX;
+ else
+ index -= 1;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ ring = &priv->tx_rings[index];
+ txq = netdev_get_tx_queue(dev, ring->queue);
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (ring->free_bds <= nr_frags + 1) {
+ netif_tx_stop_queue(txq);
+ netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
+ __func__, index, ring->queue);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* set the SKB transmit checksum */
+ if (priv->desc_64b_en) {
+ ret = bcmgenet_put_tx_csum(dev, skb);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ dma_desc_flags = DMA_SOP;
+ if (nr_frags == 0)
+ dma_desc_flags |= DMA_EOP;
+
+ /* Transmit single SKB or head of fragment list */
+ ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ /* xmit fragment */
+ for (i = 0; i < nr_frags; i++) {
+ ret = bcmgenet_xmit_frag(dev,
+ &skb_shinfo(skb)->frags[i],
+ (i == nr_frags - 1) ? DMA_EOP : 0, ring);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ skb_tx_timestamp(skb);
+
+ /* we kept a software copy of how much we should advance the TDMA
+ * producer index, now write it down to the hardware
+ */
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
+
+ if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+ netif_tx_stop_queue(txq);
+ ring->int_enable(priv, ring);
+ }
+
+out:
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return ret;
+}
+
+
+static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
+ struct enet_cb *cb)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int ret;
+
+ skb = netdev_alloc_skb(priv->dev,
+ priv->rx_buf_len + SKB_ALIGNMENT);
+ if (!skb)
+ return -ENOMEM;
+
+ /* a caller did not release this control block */
+ WARN_ON(cb->skb != NULL);
+ cb->skb = skb;
+ mapping = dma_map_single(kdev, skb->data,
+ priv->rx_buf_len, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ bcmgenet_free_cb(cb);
+ netif_err(priv, rx_err, priv->dev,
+ "%s DMA map failed\n", __func__);
+ return ret;
+ }
+
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ /* assign packet, prepare descriptor, and advance pointer */
+
+ dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+
+ /* turn on the newly assigned BD for DMA to use */
+ priv->rx_bd_assign_index++;
+ priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+
+ priv->rx_bd_assign_ptr = priv->rx_bds +
+ (priv->rx_bd_assign_index * DMA_DESC_SIZE);
+
+ return 0;
+}
+
+/* bcmgenet_desc_rx - descriptor based rx process.
+ * this could be called from bottom half, or from NAPI polling method.
+ */
+static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
+ unsigned int budget)
+{
+ struct net_device *dev = priv->dev;
+ struct enet_cb *cb;
+ struct sk_buff *skb;
+ u32 dma_length_status;
+ unsigned long dma_flag;
+ int len, err;
+ unsigned int rxpktprocessed = 0, rxpkttoprocess;
+ unsigned int p_index;
+ unsigned int chksum_ok = 0;
+
+ p_index = bcmgenet_rdma_ring_readl(priv,
+ DESC_INDEX, RDMA_PROD_INDEX);
+ p_index &= DMA_P_INDEX_MASK;
+
+ if (p_index < priv->rx_c_index)
+ rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
+ priv->rx_c_index + p_index;
+ else
+ rxpkttoprocess = p_index - priv->rx_c_index;
+
+ netif_dbg(priv, rx_status, dev,
+ "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
+
+ while ((rxpktprocessed < rxpkttoprocess) &&
+ (rxpktprocessed < budget)) {
+
+ /* Unmap the packet contents such that we can use the
+ * RSV from the 64 bytes descriptor when enabled and save
+ * a 32-bits register read
+ */
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
+ skb = cb->skb;
+ dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
+ priv->rx_buf_len, DMA_FROM_DEVICE);
+
+ if (!priv->desc_64b_en) {
+ dma_length_status = dmadesc_get_length_status(priv,
+ priv->rx_bds +
+ (priv->rx_read_ptr *
+ DMA_DESC_SIZE));
+ } else {
+ struct status_64 *status;
+ status = (struct status_64 *)skb->data;
+ dma_length_status = status->length_status;
+ }
+
+ /* DMA flags and length are still valid no matter how
+ * we got the Receive Status Vector (64B RSB or register)
+ */
+ dma_flag = dma_length_status & 0xffff;
+ len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
+
+ netif_dbg(priv, rx_status, dev,
+ "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
+ __func__, p_index, priv->rx_c_index, priv->rx_read_ptr,
+ dma_length_status);
+
+ rxpktprocessed++;
+
+ priv->rx_read_ptr++;
+ priv->rx_read_ptr &= (priv->num_rx_bds - 1);
+
+ /* out of memory, just drop packets at the hardware level */
+ if (unlikely(!skb)) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+ goto refill;
+ }
+
+ if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
+ netif_err(priv, rx_status, dev,
+ "Droping fragmented packet!\n");
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ goto refill;
+ }
+ /* report errors */
+ if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
+ DMA_RX_OV |
+ DMA_RX_NO |
+ DMA_RX_LG |
+ DMA_RX_RXER))) {
+ netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
+ (unsigned int)dma_flag);
+ if (dma_flag & DMA_RX_CRC_ERROR)
+ dev->stats.rx_crc_errors++;
+ if (dma_flag & DMA_RX_OV)
+ dev->stats.rx_over_errors++;
+ if (dma_flag & DMA_RX_NO)
+ dev->stats.rx_frame_errors++;
+ if (dma_flag & DMA_RX_LG)
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+
+ /* discard the packet and advance consumer index.*/
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ goto refill;
+ } /* error packet */
+
+ chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
+ priv->desc_rxchk_en;
+
+ skb_put(skb, len);
+ if (priv->desc_64b_en) {
+ skb_pull(skb, 64);
+ len -= 64;
+ }
+
+ if (likely(chksum_ok))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* remove hardware 2bytes added for IP alignment */
+ skb_pull(skb, 2);
+ len -= 2;
+
+ if (priv->crc_fwd_en) {
+ skb_trim(skb, len - ETH_FCS_LEN);
+ len -= ETH_FCS_LEN;
+ }
+
+ /*Finish setting up the received SKB and send it to the kernel*/
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ if (dma_flag & DMA_RX_MULT)
+ dev->stats.multicast++;
+
+ /* Notify kernel */
+ napi_gro_receive(&priv->napi, skb);
+ cb->skb = NULL;
+ netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
+
+ /* refill RX path on the current control block */
+refill:
+ err = bcmgenet_rx_refill(priv, cb);
+ if (err)
+ netif_err(priv, rx_err, dev, "Rx refill failed\n");
+ }
+
+ return rxpktprocessed;
+}
+
+/* Assign skb to RX DMA descriptor. */
+static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
+{
+ struct enet_cb *cb;
+ int ret = 0;
+ int i;
+
+ netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
+
+ /* loop here for each buffer needing assign */
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[priv->rx_bd_assign_index];
+ if (cb->skb)
+ continue;
+
+ /* set the DMA descriptor length once and for all
+ * it will only change if we support dynamically sizing
+ * priv->rx_buf_len, but we do not
+ */
+ dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
+ priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
+
+ ret = bcmgenet_rx_refill(priv, cb);
+ if (ret)
+ break;
+
+ }
+
+ return ret;
+}
+
+static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
+{
+ struct enet_cb *cb;
+ int i;
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[i];
+
+ if (dma_unmap_addr(cb, dma_addr)) {
+ dma_unmap_single(&priv->dev->dev,
+ dma_unmap_addr(cb, dma_addr),
+ priv->rx_buf_len, DMA_FROM_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+
+ if (cb->skb)
+ bcmgenet_free_cb(cb);
+ }
+}
+
+static int reset_umac(struct bcmgenet_priv *priv)
+{
+ struct device *kdev = &priv->pdev->dev;
+ unsigned int timeout = 0;
+ u32 reg;
+
+ /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
+ bcmgenet_rbuf_ctrl_set(priv, 0);
+ udelay(10);
+
+ /* disable MAC while updating its registers */
+ bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+
+ /* issue soft reset, wait for it to complete */
+ bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+ while (timeout++ < 1000) {
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (!(reg & CMD_SW_RESET))
+ return 0;
+
+ udelay(1);
+ }
+
+ if (timeout == 1000) {
+ dev_err(kdev,
+ "timeout waiting for MAC to come out of resetn\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int init_umac(struct bcmgenet_priv *priv)
+{
+ struct device *kdev = &priv->pdev->dev;
+ int ret;
+ u32 reg, cpu_mask_clear;
+
+ dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
+
+ ret = reset_umac(priv);
+ if (ret)
+ return ret;
+
+ bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+ /* clear tx/rx counter */
+ bcmgenet_umac_writel(priv,
+ MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL);
+ bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
+
+ bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+ /* init rx registers, enable ip header optimization */
+ reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
+ reg |= RBUF_ALIGN_2B;
+ bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
+
+ if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
+ bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
+
+ /* Mask all interrupts.*/
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+ cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+
+ dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
+
+ /* Monitor cable plug/unpluged event for internal PHY */
+ if (phy_is_internal(priv->phydev))
+ cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+ else if (priv->ext_phy)
+ cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+ else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ reg = bcmgenet_bp_mc_get(priv);
+ reg |= BIT(priv->hw_params->bp_in_en_shift);
+
+ /* bp_mask: back pressure mask */
+ if (netif_is_multiqueue(priv->dev))
+ reg |= priv->hw_params->bp_in_mask;
+ else
+ reg &= ~priv->hw_params->bp_in_mask;
+ bcmgenet_bp_mc_set(priv, reg);
+ }
+
+ /* Enable MDIO interrupts on GENET v3+ */
+ if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
+ cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
+
+ bcmgenet_intrl2_0_writel(priv, cpu_mask_clear,
+ INTRL2_CPU_MASK_CLEAR);
+
+ /* Enable rx/tx engine.*/
+ dev_dbg(kdev, "done init umac\n");
+
+ return 0;
+}
+
+/* Initialize all house-keeping variables for a TX ring, along
+ * with corresponding hardware registers
+ */
+static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
+ unsigned int index, unsigned int size,
+ unsigned int write_ptr, unsigned int end_ptr)
+{
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+ u32 words_per_bd = WORDS_PER_BD(priv);
+ u32 flow_period_val = 0;
+ unsigned int first_bd;
+
+ spin_lock_init(&ring->lock);
+ ring->index = index;
+ if (index == DESC_INDEX) {
+ ring->queue = 0;
+ ring->int_enable = bcmgenet_tx_ring16_int_enable;
+ ring->int_disable = bcmgenet_tx_ring16_int_disable;
+ } else {
+ ring->queue = index + 1;
+ ring->int_enable = bcmgenet_tx_ring_int_enable;
+ ring->int_disable = bcmgenet_tx_ring_int_disable;
+ }
+ ring->cbs = priv->tx_cbs + write_ptr;
+ ring->size = size;
+ ring->c_index = 0;
+ ring->free_bds = size;
+ ring->write_ptr = write_ptr;
+ ring->cb_ptr = write_ptr;
+ ring->end_ptr = end_ptr - 1;
+ ring->prod_index = 0;
+
+ /* Set flow period for ring != 16 */
+ if (index != DESC_INDEX)
+ flow_period_val = ENET_MAX_MTU_SIZE << 16;
+
+ bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
+ bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
+ bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
+ /* Disable rate control for now */
+ bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
+ TDMA_FLOW_PERIOD);
+ /* Unclassified traffic goes to ring 16 */
+ bcmgenet_tdma_ring_writel(priv, index,
+ ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
+ DMA_RING_BUF_SIZE);
+
+ first_bd = write_ptr;
+
+ /* Set start and end address, read and write pointers */
+ bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+ DMA_START_ADDR);
+ bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+ TDMA_READ_PTR);
+ bcmgenet_tdma_ring_writel(priv, index, first_bd,
+ TDMA_WRITE_PTR);
+ bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
+ DMA_END_ADDR);
+}
+
+/* Initialize a RDMA ring */
+static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
+ unsigned int index, unsigned int size)
+{
+ u32 words_per_bd = WORDS_PER_BD(priv);
+ int ret;
+
+ priv->num_rx_bds = TOTAL_DESC;
+ priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
+ priv->rx_bd_assign_ptr = priv->rx_bds;
+ priv->rx_bd_assign_index = 0;
+ priv->rx_c_index = 0;
+ priv->rx_read_ptr = 0;
+ priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb),
+ GFP_KERNEL);
+ if (!priv->rx_cbs)
+ return -ENOMEM;
+
+ ret = bcmgenet_alloc_rx_buffers(priv);
+ if (ret) {
+ kfree(priv->rx_cbs);
+ return ret;
+ }
+
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
+ bcmgenet_rdma_ring_writel(priv, index,
+ ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
+ DMA_RING_BUF_SIZE);
+ bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
+ bcmgenet_rdma_ring_writel(priv, index,
+ words_per_bd * size - 1, DMA_END_ADDR);
+ bcmgenet_rdma_ring_writel(priv, index,
+ (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) |
+ DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
+
+ return ret;
+}
+
+/* init multi xmit queues, only available for GENET2+
+ * the queue is partitioned as follows:
+ *
+ * queue 0 - 3 is priority based, each one has 32 descriptors,
+ * with queue 0 being the highest priority queue.
+ *
+ * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
+ * descriptors: 256 - (number of tx queues * bds per queues) = 128
+ * descriptors.
+ *
+ * The transmit control block pool is then partitioned as following:
+ * - tx_cbs[0...127] are for queue 16
+ * - tx_ring_cbs[0] points to tx_cbs[128..159]
+ * - tx_ring_cbs[1] points to tx_cbs[160..191]
+ * - tx_ring_cbs[2] points to tx_cbs[192..223]
+ * - tx_ring_cbs[3] points to tx_cbs[224..255]
+ */
+static void bcmgenet_init_multiq(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned int i, dma_enable;
+ u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0;
+
+ if (!netif_is_multiqueue(dev)) {
+ netdev_warn(dev, "called with non multi queue aware HW\n");
+ return;
+ }
+
+ dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ dma_enable = dma_ctrl & DMA_EN;
+ dma_ctrl &= ~DMA_EN;
+ bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+
+ /* Enable strict priority arbiter mode */
+ bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
+
+ for (i = 0; i < priv->hw_params->tx_queues; i++) {
+ /* first 64 tx_cbs are reserved for default tx queue
+ * (ring 16)
+ */
+ bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
+ i * priv->hw_params->bds_cnt,
+ (i + 1) * priv->hw_params->bds_cnt);
+
+ /* Configure ring as decriptor ring and setup priority */
+ ring_cfg |= 1 << i;
+ dma_priority |= ((GENET_Q0_PRIORITY + i) <<
+ (GENET_MAX_MQ_CNT + 1) * i);
+ dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
+ }
+
+ /* Enable rings */
+ reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
+ reg |= ring_cfg;
+ bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
+
+ /* Use configured rings priority and set ring #16 priority */
+ reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY);
+ reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20);
+ reg |= dma_priority;
+ bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY);
+
+ /* Configure ring as descriptor ring and re-enable DMA if enabled */
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= dma_ctrl;
+ if (dma_enable)
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+}
+
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+ int i;
+
+ /* disable DMA */
+ bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
+ bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
+
+ for (i = 0; i < priv->num_tx_bds; i++) {
+ if (priv->tx_cbs[i].skb != NULL) {
+ dev_kfree_skb(priv->tx_cbs[i].skb);
+ priv->tx_cbs[i].skb = NULL;
+ }
+ }
+
+ bcmgenet_free_rx_buffers(priv);
+ kfree(priv->rx_cbs);
+ kfree(priv->tx_cbs);
+}
+
+/* init_edma: Initialize DMA control register */
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+{
+ int ret;
+
+ netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
+
+ /* by default, enable ring 16 (descriptor based) */
+ ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
+ if (ret) {
+ netdev_err(priv->dev, "failed to initialize RX ring\n");
+ return ret;
+ }
+
+ /* init rDma */
+ bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+ /* Init tDma */
+ bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+ /* Initialize commont TX ring structures */
+ priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
+ priv->num_tx_bds = TOTAL_DESC;
+ priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb),
+ GFP_KERNEL);
+ if (!priv->tx_cbs) {
+ bcmgenet_fini_dma(priv);
+ return -ENOMEM;
+ }
+
+ /* initialize multi xmit queue */
+ bcmgenet_init_multiq(priv->dev);
+
+ /* initialize special ring 16 */
+ bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
+ priv->hw_params->tx_queues * priv->hw_params->bds_cnt,
+ TOTAL_DESC);
+
+ return 0;
+}
+
+/* NAPI polling method*/
+static int bcmgenet_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmgenet_priv *priv = container_of(napi,
+ struct bcmgenet_priv, napi);
+ unsigned int work_done;
+
+ /* tx reclaim */
+ bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+
+ work_done = bcmgenet_desc_rx(priv, budget);
+
+ /* Advancing our consumer index*/
+ priv->rx_c_index += work_done;
+ priv->rx_c_index &= DMA_C_INDEX_MASK;
+ bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
+ priv->rx_c_index, RDMA_CONS_INDEX);
+ if (work_done < budget) {
+ napi_complete(napi);
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR);
+ }
+
+ return work_done;
+}
+
+/* Interrupt bottom half */
+static void bcmgenet_irq_task(struct work_struct *work)
+{
+ struct bcmgenet_priv *priv = container_of(
+ work, struct bcmgenet_priv, bcmgenet_irq_work);
+
+ netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
+
+ /* Link UP/DOWN event */
+ if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+ (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
+ phy_mac_interrupt(priv->phydev,
+ priv->irq0_stat & UMAC_IRQ_LINK_UP);
+ priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
+ }
+}
+
+/* bcmgenet_isr1: interrupt handler for ring buffer. */
+static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
+{
+ struct bcmgenet_priv *priv = dev_id;
+ unsigned int index;
+
+ /* Save irq status for bottom-half processing. */
+ priv->irq1_stat =
+ bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+ ~priv->int1_mask;
+ /* clear inerrupts*/
+ bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+
+ netif_dbg(priv, intr, priv->dev,
+ "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+ /* Check the MBDONE interrupts.
+ * packet is done, reclaim descriptors
+ */
+ if (priv->irq1_stat & 0x0000ffff) {
+ index = 0;
+ for (index = 0; index < 16; index++) {
+ if (priv->irq1_stat & (1 << index))
+ bcmgenet_tx_reclaim(priv->dev,
+ &priv->tx_rings[index]);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* bcmgenet_isr0: Handle various interrupts. */
+static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
+{
+ struct bcmgenet_priv *priv = dev_id;
+
+ /* Save irq status for bottom-half processing. */
+ priv->irq0_stat =
+ bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+ ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ /* clear inerrupts*/
+ bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+ netif_dbg(priv, intr, priv->dev,
+ "IRQ=0x%x\n", priv->irq0_stat);
+
+ if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
+ /* We use NAPI(software interrupt throttling, if
+ * Rx Descriptor throttling is not used.
+ * Disable interrupt, will be enabled in the poll method.
+ */
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET);
+ __napi_schedule(&priv->napi);
+ }
+ }
+ if (priv->irq0_stat &
+ (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
+ /* Tx reclaim */
+ bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+ }
+ if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+ UMAC_IRQ_PHY_DET_F |
+ UMAC_IRQ_LINK_UP |
+ UMAC_IRQ_LINK_DOWN |
+ UMAC_IRQ_HFB_SM |
+ UMAC_IRQ_HFB_MM |
+ UMAC_IRQ_MPD_R)) {
+ /* all other interested interrupts handled in bottom half */
+ schedule_work(&priv->bcmgenet_irq_work);
+ }
+
+ if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+ priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+ priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ wake_up(&priv->wq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
+{
+ u32 reg;
+
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ reg |= BIT(1);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+
+ reg &= ~BIT(1);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+}
+
+static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
+ unsigned char *addr)
+{
+ bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | addr[3], UMAC_MAC0);
+ bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static int bcmgenet_wol_resume(struct bcmgenet_priv *priv)
+{
+ int ret;
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ clk_disable(priv->clk_wol);
+ /* init umac registers to synchronize s/w with h/w */
+ ret = init_umac(priv);
+ if (ret)
+ return ret;
+
+ phy_init_hw(priv->phydev);
+ /* Speed settings must be restored */
+ bcmgenet_mii_config(priv->dev);
+
+ return 0;
+}
+
+/* Returns a reusable dma control register value */
+static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+{
+ u32 reg;
+ u32 dma_ctrl;
+
+ /* disable DMA */
+ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+ udelay(10);
+ bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+ return dma_ctrl;
+}
+
+static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
+{
+ u32 reg;
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg |= dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+}
+
+static int bcmgenet_open(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned long dma_ctrl;
+ u32 reg;
+ int ret;
+
+ netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
+
+ /* Turn on the clock */
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ /* take MAC out of reset */
+ bcmgenet_umac_reset(priv);
+
+ ret = init_umac(priv);
+ if (ret)
+ goto err_clk_disable;
+
+ /* disable ethernet MAC while updating its registers */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~(CMD_TX_EN | CMD_RX_EN);
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ bcmgenet_set_hw_addr(priv, dev->dev_addr);
+
+ if (priv->wol_enabled) {
+ ret = bcmgenet_wol_resume(priv);
+ if (ret)
+ return ret;
+ }
+
+ if (phy_is_internal(priv->phydev)) {
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= EXT_ENERGY_DET_MASK;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
+
+ /* Disable RX/TX DMA and flush TX queues */
+ dma_ctrl = bcmgenet_dma_disable(priv);
+
+ /* Reinitialize TDMA and RDMA and SW housekeeping */
+ ret = bcmgenet_init_dma(priv);
+ if (ret) {
+ netdev_err(dev, "failed to initialize DMA\n");
+ goto err_fini_dma;
+ }
+
+ /* Always enable ring 16 - descriptor ring */
+ bcmgenet_enable_dma(priv, dma_ctrl);
+
+ ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
+ dev->name, priv);
+ if (ret < 0) {
+ netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
+ goto err_fini_dma;
+ }
+
+ ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
+ dev->name, priv);
+ if (ret < 0) {
+ netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
+ goto err_irq0;
+ }
+
+ /* Start the network engine */
+ napi_enable(&priv->napi);
+
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg |= (CMD_TX_EN | CMD_RX_EN);
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ /* Make sure we reflect the value of CRC_CMD_FWD */
+ priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+
+ device_set_wakeup_capable(&dev->dev, 1);
+
+ if (phy_is_internal(priv->phydev))
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+ netif_tx_start_all_queues(dev);
+
+ phy_start(priv->phydev);
+
+ return 0;
+
+err_irq0:
+ free_irq(priv->irq0, dev);
+err_fini_dma:
+ bcmgenet_fini_dma(priv);
+err_clk_disable:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+{
+ int ret = 0;
+ int timeout = 0;
+ u32 reg;
+
+ /* Disable TDMA to stop add more frames in TX DMA */
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check TDMA status register to confirm TDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev,
+ "Timed out while disabling TX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ /* Wait 10ms for packet drain in both tx and rx dma */
+ usleep_range(10000, 20000);
+
+ /* Disable RDMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ timeout = 0;
+ /* Check RDMA status register to confirm RDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev,
+ "Timed out while disabling RX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int bcmgenet_close(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret;
+ u32 reg;
+
+ netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
+
+ phy_stop(priv->phydev);
+
+ /* Disable MAC receive */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_RX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ netif_tx_stop_all_queues(dev);
+
+ ret = bcmgenet_dma_teardown(priv);
+ if (ret)
+ return ret;
+
+ /* Disable MAC transmit. TX DMA disabled have to done before this */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_TX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ napi_disable(&priv->napi);
+
+ /* tx reclaim */
+ bcmgenet_tx_reclaim_all(dev);
+ bcmgenet_fini_dma(priv);
+
+ free_irq(priv->irq0, priv);
+ free_irq(priv->irq1, priv);
+
+ /* Wait for pending work items to complete - we are stopping
+ * the clock now. Since interrupts are disabled, no new work
+ * will be scheduled.
+ */
+ cancel_work_sync(&priv->bcmgenet_irq_work);
+
+ if (phy_is_internal(priv->phydev))
+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+
+ if (priv->wol_enabled)
+ clk_enable(priv->clk_wol);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static void bcmgenet_timeout(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
+
+ dev->trans_start = jiffies;
+
+ dev->stats.tx_errors++;
+
+ netif_tx_wake_all_queues(dev);
+}
+
+#define MAX_MC_COUNT 16
+
+static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
+ unsigned char *addr,
+ int *i,
+ int *mc)
+{
+ u32 reg;
+
+ bcmgenet_umac_writel(priv,
+ addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4));
+ bcmgenet_umac_writel(priv,
+ addr[2] << 24 | addr[3] << 16 |
+ addr[4] << 8 | addr[5],
+ UMAC_MDF_ADDR + ((*i + 1) * 4));
+ reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
+ reg |= (1 << (MAX_MC_COUNT - *mc));
+ bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
+ *i += 2;
+ (*mc)++;
+}
+
+static void bcmgenet_set_rx_mode(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ int i, mc;
+ u32 reg;
+
+ netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
+
+ /* Promiscous mode */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (dev->flags & IFF_PROMISC) {
+ reg |= CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
+ return;
+ } else {
+ reg &= ~CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ }
+
+ /* UniMac doesn't support ALLMULTI */
+ if (dev->flags & IFF_ALLMULTI) {
+ netdev_warn(dev, "ALLMULTI is not supported\n");
+ return;
+ }
+
+ /* update MDF filter */
+ i = 0;
+ mc = 0;
+ /* Broadcast */
+ bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
+ /* my own address.*/
+ bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
+ /* Unicast list*/
+ if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
+ return;
+
+ if (!netdev_uc_empty(dev))
+ netdev_for_each_uc_addr(ha, dev)
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+ /* Multicast */
+ if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
+ return;
+
+ netdev_for_each_mc_addr(ha, dev)
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+}
+
+/* Set the hardware MAC address. */
+static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ /* Setting the MAC address at the hardware level is not possible
+ * without disabling the UniMAC RX/TX enable bits.
+ */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
+static const struct net_device_ops bcmgenet_netdev_ops = {
+ .ndo_open = bcmgenet_open,
+ .ndo_stop = bcmgenet_close,
+ .ndo_start_xmit = bcmgenet_xmit,
+ .ndo_tx_timeout = bcmgenet_timeout,
+ .ndo_set_rx_mode = bcmgenet_set_rx_mode,
+ .ndo_set_mac_address = bcmgenet_set_mac_addr,
+ .ndo_do_ioctl = bcmgenet_ioctl,
+ .ndo_set_features = bcmgenet_set_features,
+};
+
+/* Array of GENET hardware parameters/characteristics */
+static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
+ [GENET_V1] = {
+ .tx_queues = 0,
+ .rx_queues = 0,
+ .bds_cnt = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .qtag_mask = 0x1F,
+ .hfb_offset = 0x1000,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x3000,
+ .words_per_bd = 2,
+ },
+ [GENET_V2] = {
+ .tx_queues = 4,
+ .rx_queues = 4,
+ .bds_cnt = 32,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .qtag_mask = 0x1F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = 0x2000,
+ .rdma_offset = 0x3000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 2,
+ .flags = GENET_HAS_EXT,
+ },
+ [GENET_V3] = {
+ .tx_queues = 4,
+ .rx_queues = 4,
+ .bds_cnt = 32,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x10000,
+ .tdma_offset = 0x11000,
+ .words_per_bd = 2,
+ .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+ },
+ [GENET_V4] = {
+ .tx_queues = 4,
+ .rx_queues = 4,
+ .bds_cnt = 32,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+ },
+};
+
+/* Infer hardware parameters from the detected GENET version */
+static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
+{
+ struct bcmgenet_hw_params *params;
+ u32 reg;
+ u8 major;
+
+ if (GENET_IS_V4(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v4;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
+ priv->version = GENET_V4;
+ } else if (GENET_IS_V3(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v123;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
+ priv->version = GENET_V3;
+ } else if (GENET_IS_V2(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
+ genet_dma_ring_regs = genet_dma_ring_regs_v123;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
+ priv->version = GENET_V2;
+ } else if (GENET_IS_V1(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
+ genet_dma_ring_regs = genet_dma_ring_regs_v123;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
+ priv->version = GENET_V1;
+ }
+
+ /* enum genet_version starts at 1 */
+ priv->hw_params = &bcmgenet_hw_params[priv->version];
+ params = priv->hw_params;
+
+ /* Read GENET HW version */
+ reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
+ major = (reg >> 24 & 0x0f);
+ if (major == 5)
+ major = 4;
+ else if (major == 0)
+ major = 1;
+ if (major != priv->version) {
+ dev_err(&priv->pdev->dev,
+ "GENET version mismatch, got: %d, configured for: %d\n",
+ major, priv->version);
+ }
+
+ /* Print the GENET core version */
+ dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
+ major, (reg >> 16) & 0x0f, reg & 0xffff);
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (!(params->flags & GENET_HAS_40BITS))
+ pr_warn("GENET does not support 40-bits PA\n");
+#endif
+
+ pr_debug("Configuration for version: %d\n"
+ "TXq: %1d, RXq: %1d, BDs: %1d\n"
+ "BP << en: %2d, BP msk: 0x%05x\n"
+ "HFB count: %2d, QTAQ msk: 0x%05x\n"
+ "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
+ "RDMA: 0x%05x, TDMA: 0x%05x\n"
+ "Words/BD: %d\n",
+ priv->version,
+ params->tx_queues, params->rx_queues, params->bds_cnt,
+ params->bp_in_en_shift, params->bp_in_mask,
+ params->hfb_filter_cnt, params->qtag_mask,
+ params->tbuf_offset, params->hfb_offset,
+ params->hfb_reg_offset,
+ params->rdma_offset, params->tdma_offset,
+ params->words_per_bd);
+}
+
+static const struct of_device_id bcmgenet_match[] = {
+ { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
+ { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
+ { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
+ { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
+ { },
+};
+
+static int bcmgenet_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ const struct of_device_id *of_id;
+ struct bcmgenet_priv *priv;
+ struct net_device *dev;
+ const void *macaddr;
+ struct resource *r;
+ int err = -EIO;
+
+ /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
+ dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
+ if (!dev) {
+ dev_err(&pdev->dev, "can't allocate net device\n");
+ return -ENOMEM;
+ }
+
+ of_id = of_match_node(bcmgenet_match, dn);
+ if (!of_id)
+ return -EINVAL;
+
+ priv = netdev_priv(dev);
+ priv->irq0 = platform_get_irq(pdev, 0);
+ priv->irq1 = platform_get_irq(pdev, 1);
+ if (!priv->irq0 || !priv->irq1) {
+ dev_err(&pdev->dev, "can't find IRQs\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ macaddr = of_get_mac_address(dn);
+ if (!macaddr) {
+ dev_err(&pdev->dev, "can't find MAC address\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->base)) {
+ err = PTR_ERR(priv->base);
+ goto err;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev_set_drvdata(&pdev->dev, dev);
+ ether_addr_copy(dev->dev_addr, macaddr);
+ dev->watchdog_timeo = 2 * HZ;
+ SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops);
+ dev->netdev_ops = &bcmgenet_netdev_ops;
+ netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
+
+ priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
+
+ /* Set hardware features */
+ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+
+ /* Set the needed headroom to account for any possible
+ * features enabling/disabling at runtime
+ */
+ dev->needed_headroom += 64;
+
+ netdev_boot_setup_check(dev);
+
+ priv->dev = dev;
+ priv->pdev = pdev;
+ priv->version = (enum bcmgenet_version)of_id->data;
+
+ bcmgenet_set_hw_params(priv);
+
+ /* Mii wait queue */
+ init_waitqueue_head(&priv->wq);
+ /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
+ priv->rx_buf_len = RX_BUF_LENGTH;
+ INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
+
+ priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
+ if (IS_ERR(priv->clk))
+ dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+
+ priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
+ if (IS_ERR(priv->clk_wol))
+ dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ err = reset_umac(priv);
+ if (err)
+ goto err_clk_disable;
+
+ err = bcmgenet_mii_init(dev);
+ if (err)
+ goto err_clk_disable;
+
+ /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
+ * just the ring 16 descriptor based TX
+ */
+ netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
+ netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_clk_disable;
+
+ /* Turn off the main clock, WOL clock is handled separately */
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
+ return err;
+
+err_clk_disable:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+err:
+ free_netdev(dev);
+ return err;
+}
+
+static int bcmgenet_remove(struct platform_device *pdev)
+{
+ struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+ unregister_netdev(priv->dev);
+ bcmgenet_mii_exit(priv->dev);
+ free_netdev(priv->dev);
+
+ return 0;
+}
+
+
+static struct platform_driver bcmgenet_driver = {
+ .probe = bcmgenet_probe,
+ .remove = bcmgenet_remove,
+ .driver = {
+ .name = "bcmgenet",
+ .owner = THIS_MODULE,
+ .of_match_table = bcmgenet_match,
+ },
+};
+module_platform_driver(bcmgenet_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
+MODULE_ALIAS("platform:bcmgenet");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
new file mode 100644
index 000000000000..0f117105fed1
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -0,0 +1,628 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *
+*/
+#ifndef __BCMGENET_H__
+#define __BCMGENET_H__
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+
+/* total number of Buffer Descriptors, same for Rx/Tx */
+#define TOTAL_DESC 256
+
+/* which ring is descriptor based */
+#define DESC_INDEX 16
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN 6
+#define ENET_PAD 8
+#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+ ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+#define DMA_MAX_BURST_LENGTH 0x10
+
+/* misc. configuration */
+#define CLEAR_ALL_HFB 0xFF
+#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4)
+#define DMA_FC_THRESH_LO 5
+
+/* 64B receive/transmit status block */
+struct status_64 {
+ u32 length_status; /* length and peripheral status */
+ u32 ext_status; /* Extended status*/
+ u32 rx_csum; /* partial rx checksum */
+ u32 unused1[9]; /* unused */
+ u32 tx_csum_info; /* Tx checksum info. */
+ u32 unused2[3]; /* unused */
+};
+
+/* Rx status bits */
+#define STATUS_RX_EXT_MASK 0x1FFFFF
+#define STATUS_RX_CSUM_MASK 0xFFFF
+#define STATUS_RX_CSUM_OK 0x10000
+#define STATUS_RX_CSUM_FR 0x20000
+#define STATUS_RX_PROTO_TCP 0
+#define STATUS_RX_PROTO_UDP 1
+#define STATUS_RX_PROTO_ICMP 2
+#define STATUS_RX_PROTO_OTHER 3
+#define STATUS_RX_PROTO_MASK 3
+#define STATUS_RX_PROTO_SHIFT 18
+#define STATUS_FILTER_INDEX_MASK 0xFFFF
+/* Tx status bits */
+#define STATUS_TX_CSUM_START_MASK 0X7FFF
+#define STATUS_TX_CSUM_START_SHIFT 16
+#define STATUS_TX_CSUM_PROTO_UDP 0x8000
+#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF
+#define STATUS_TX_CSUM_LV 0x80000000
+
+/* DMA Descriptor */
+#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */
+#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */
+#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */
+
+/* Rx/Tx common counter group */
+struct bcmgenet_pkt_counters {
+ u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
+ u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
+ u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
+ u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
+ u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
+ u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
+ u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
+ u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
+ u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
+ u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcmgenet_rx_counters {
+ struct bcmgenet_pkt_counters pkt_cnt;
+ u32 pkt; /* RO (0x428) Received pkt count*/
+ u32 bytes; /* RO Received byte count */
+ u32 mca; /* RO # of Received multicast pkt */
+ u32 bca; /* RO # of Receive broadcast pkt */
+ u32 fcs; /* RO # of Received FCS error */
+ u32 cf; /* RO # of Received control frame pkt*/
+ u32 pf; /* RO # of Received pause frame pkt */
+ u32 uo; /* RO # of unknown op code pkt */
+ u32 aln; /* RO # of alignment error count */
+ u32 flr; /* RO # of frame length out of range count */
+ u32 cde; /* RO # of code error pkt */
+ u32 fcr; /* RO # of carrier sense error pkt */
+ u32 ovr; /* RO # of oversize pkt*/
+ u32 jbr; /* RO # of jabber count */
+ u32 mtue; /* RO # of MTU error pkt*/
+ u32 pok; /* RO # of Received good pkt */
+ u32 uc; /* RO # of unicast pkt */
+ u32 ppp; /* RO # of PPP pkt */
+ u32 rcrc; /* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcmgenet_tx_counters {
+ struct bcmgenet_pkt_counters pkt_cnt;
+ u32 pkts; /* RO (0x4a8) Transmited pkt */
+ u32 mca; /* RO # of xmited multicast pkt */
+ u32 bca; /* RO # of xmited broadcast pkt */
+ u32 pf; /* RO # of xmited pause frame count */
+ u32 cf; /* RO # of xmited control frame count */
+ u32 fcs; /* RO # of xmited FCS error count */
+ u32 ovr; /* RO # of xmited oversize pkt */
+ u32 drf; /* RO # of xmited deferral pkt */
+ u32 edf; /* RO # of xmited Excessive deferral pkt*/
+ u32 scl; /* RO # of xmited single collision pkt */
+ u32 mcl; /* RO # of xmited multiple collision pkt*/
+ u32 lcl; /* RO # of xmited late collision pkt */
+ u32 ecl; /* RO # of xmited excessive collision pkt*/
+ u32 frg; /* RO # of xmited fragments pkt*/
+ u32 ncl; /* RO # of xmited total collision count */
+ u32 jbr; /* RO # of xmited jabber count*/
+ u32 bytes; /* RO # of xmited byte count */
+ u32 pok; /* RO # of xmited good pkt */
+ u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcmgenet_mib_counters {
+ struct bcmgenet_rx_counters rx;
+ struct bcmgenet_tx_counters tx;
+ u32 rx_runt_cnt;
+ u32 rx_runt_fcs;
+ u32 rx_runt_fcs_align;
+ u32 rx_runt_bytes;
+ u32 rbuf_ovflow_cnt;
+ u32 rbuf_err_cnt;
+ u32 mdf_err_cnt;
+};
+
+#define UMAC_HD_BKP_CTRL 0x004
+#define HD_FC_EN (1 << 0)
+#define HD_FC_BKOFF_OK (1 << 1)
+#define IPG_CONFIG_RX_SHIFT 2
+#define IPG_CONFIG_RX_MASK 0x1F
+
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define UMAC_SPEED_10 0
+#define UMAC_SPEED_100 1
+#define UMAC_SPEED_1000 2
+#define UMAC_SPEED_2500 3
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+
+#define UMAC_MAC0 0x00C
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+
+#define UMAC_TX_FLUSH 0x334
+
+#define UMAC_MIB_START 0x400
+
+#define UMAC_MDIO_CMD 0x614
+#define MDIO_START_BUSY (1 << 29)
+#define MDIO_READ_FAIL (1 << 28)
+#define MDIO_RD (2 << 26)
+#define MDIO_WR (1 << 26)
+#define MDIO_PMD_SHIFT 21
+#define MDIO_PMD_MASK 0x1F
+#define MDIO_REG_SHIFT 16
+#define MDIO_REG_MASK 0x1F
+
+#define UMAC_RBUF_OVFL_CNT 0x61C
+
+#define UMAC_MPD_CTRL 0x620
+#define MPD_EN (1 << 0)
+#define MPD_PW_EN (1 << 27)
+#define MPD_MSEQ_LEN_SHIFT 16
+#define MPD_MSEQ_LEN_MASK 0xFF
+
+#define UMAC_MPD_PW_MS 0x624
+#define UMAC_MPD_PW_LS 0x628
+#define UMAC_RBUF_ERR_CNT 0x634
+#define UMAC_MDF_ERR_CNT 0x638
+#define UMAC_MDF_CTRL 0x650
+#define UMAC_MDF_ADDR 0x654
+#define UMAC_MIB_CTRL 0x580
+#define MIB_RESET_RX (1 << 0)
+#define MIB_RESET_RUNT (1 << 1)
+#define MIB_RESET_TX (1 << 2)
+
+#define RBUF_CTRL 0x00
+#define RBUF_64B_EN (1 << 0)
+#define RBUF_ALIGN_2B (1 << 1)
+#define RBUF_BAD_DIS (1 << 2)
+
+#define RBUF_STATUS 0x0C
+#define RBUF_STATUS_WOL (1 << 0)
+#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1)
+#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2)
+
+#define RBUF_CHK_CTRL 0x14
+#define RBUF_RXCHK_EN (1 << 0)
+#define RBUF_SKIP_FCS (1 << 4)
+
+#define RBUF_TBUF_SIZE_CTRL 0xb4
+
+#define RBUF_HFB_CTRL_V1 0x38
+#define RBUF_HFB_FILTER_EN_SHIFT 16
+#define RBUF_HFB_FILTER_EN_MASK 0xffff0000
+#define RBUF_HFB_EN (1 << 0)
+#define RBUF_HFB_256B (1 << 1)
+#define RBUF_ACPI_EN (1 << 2)
+
+#define RBUF_HFB_LEN_V1 0x3C
+#define RBUF_FLTR_LEN_MASK 0xFF
+#define RBUF_FLTR_LEN_SHIFT 8
+
+#define TBUF_CTRL 0x00
+#define TBUF_BP_MC 0x0C
+
+#define TBUF_CTRL_V1 0x80
+#define TBUF_BP_MC_V1 0xA0
+
+#define HFB_CTRL 0x00
+#define HFB_FLT_ENABLE_V3PLUS 0x04
+#define HFB_FLT_LEN_V2 0x04
+#define HFB_FLT_LEN_V3PLUS 0x1C
+
+/* uniMac intrl2 registers */
+#define INTRL2_CPU_STAT 0x00
+#define INTRL2_CPU_SET 0x04
+#define INTRL2_CPU_CLEAR 0x08
+#define INTRL2_CPU_MASK_STATUS 0x0C
+#define INTRL2_CPU_MASK_SET 0x10
+#define INTRL2_CPU_MASK_CLEAR 0x14
+
+/* INTRL2 instance 0 definitions */
+#define UMAC_IRQ_SCB (1 << 0)
+#define UMAC_IRQ_EPHY (1 << 1)
+#define UMAC_IRQ_PHY_DET_R (1 << 2)
+#define UMAC_IRQ_PHY_DET_F (1 << 3)
+#define UMAC_IRQ_LINK_UP (1 << 4)
+#define UMAC_IRQ_LINK_DOWN (1 << 5)
+#define UMAC_IRQ_UMAC (1 << 6)
+#define UMAC_IRQ_UMAC_TSV (1 << 7)
+#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8)
+#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9)
+#define UMAC_IRQ_HFB_SM (1 << 10)
+#define UMAC_IRQ_HFB_MM (1 << 11)
+#define UMAC_IRQ_MPD_R (1 << 12)
+#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
+#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
+#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
+#define UMAC_IRQ_TXDMA_MBDONE (1 << 16)
+#define UMAC_IRQ_TXDMA_PDONE (1 << 17)
+#define UMAC_IRQ_TXDMA_BDONE (1 << 18)
+/* Only valid for GENETv3+ */
+#define UMAC_IRQ_MDIO_DONE (1 << 23)
+#define UMAC_IRQ_MDIO_ERROR (1 << 24)
+
+/* Register block offsets */
+#define GENET_SYS_OFF 0x0000
+#define GENET_GR_BRIDGE_OFF 0x0040
+#define GENET_EXT_OFF 0x0080
+#define GENET_INTRL2_0_OFF 0x0200
+#define GENET_INTRL2_1_OFF 0x0240
+#define GENET_RBUF_OFF 0x0300
+#define GENET_UMAC_OFF 0x0800
+
+/* SYS block offsets and register definitions */
+#define SYS_REV_CTRL 0x00
+#define SYS_PORT_CTRL 0x04
+#define PORT_MODE_INT_EPHY 0
+#define PORT_MODE_INT_GPHY 1
+#define PORT_MODE_EXT_EPHY 2
+#define PORT_MODE_EXT_GPHY 3
+#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4))
+#define PORT_MODE_EXT_RVMII_50 4
+#define LED_ACT_SOURCE_MAC (1 << 9)
+
+#define SYS_RBUF_FLUSH_CTRL 0x08
+#define SYS_TBUF_FLUSH_CTRL 0x0C
+#define RBUF_FLUSH_CTRL_V1 0x04
+
+/* Ext block register offsets and definitions */
+#define EXT_EXT_PWR_MGMT 0x00
+#define EXT_PWR_DOWN_BIAS (1 << 0)
+#define EXT_PWR_DOWN_DLL (1 << 1)
+#define EXT_PWR_DOWN_PHY (1 << 2)
+#define EXT_PWR_DN_EN_LD (1 << 3)
+#define EXT_ENERGY_DET (1 << 4)
+#define EXT_IDDQ_FROM_PHY (1 << 5)
+#define EXT_PHY_RESET (1 << 8)
+#define EXT_ENERGY_DET_MASK (1 << 12)
+
+#define EXT_RGMII_OOB_CTRL 0x0C
+#define RGMII_MODE_EN (1 << 0)
+#define RGMII_LINK (1 << 4)
+#define OOB_DISABLE (1 << 5)
+#define ID_MODE_DIS (1 << 16)
+
+#define EXT_GPHY_CTRL 0x1C
+#define EXT_CFG_IDDQ_BIAS (1 << 0)
+#define EXT_CFG_PWR_DOWN (1 << 1)
+#define EXT_GPHY_RESET (1 << 5)
+
+/* DMA rings size */
+#define DMA_RING_SIZE (0x40)
+#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1))
+
+/* DMA registers common definitions */
+#define DMA_RW_POINTER_MASK 0x1FF
+#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF
+#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16
+#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF
+#define DMA_BUFFER_DONE_CNT_SHIFT 16
+#define DMA_P_INDEX_MASK 0xFFFF
+#define DMA_C_INDEX_MASK 0xFFFF
+
+/* DMA ring size register */
+#define DMA_RING_SIZE_MASK 0xFFFF
+#define DMA_RING_SIZE_SHIFT 16
+#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF
+
+/* DMA interrupt threshold register */
+#define DMA_INTR_THRESHOLD_MASK 0x00FF
+
+/* DMA XON/XOFF register */
+#define DMA_XON_THREHOLD_MASK 0xFFFF
+#define DMA_XOFF_THRESHOLD_MASK 0xFFFF
+#define DMA_XOFF_THRESHOLD_SHIFT 16
+
+/* DMA flow period register */
+#define DMA_FLOW_PERIOD_MASK 0xFFFF
+#define DMA_MAX_PKT_SIZE_MASK 0xFFFF
+#define DMA_MAX_PKT_SIZE_SHIFT 16
+
+
+/* DMA control register */
+#define DMA_EN (1 << 0)
+#define DMA_RING_BUF_EN_SHIFT 0x01
+#define DMA_RING_BUF_EN_MASK 0xFFFF
+#define DMA_TSB_SWAP_EN (1 << 20)
+
+/* DMA status register */
+#define DMA_DISABLED (1 << 0)
+#define DMA_DESC_RAM_INIT_BUSY (1 << 1)
+
+/* DMA SCB burst size register */
+#define DMA_SCB_BURST_SIZE_MASK 0x1F
+
+/* DMA activity vector register */
+#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF
+
+/* DMA backpressure mask register */
+#define DMA_BACKPRESSURE_MASK 0x1FFFF
+#define DMA_PFC_ENABLE (1 << 31)
+
+/* DMA backpressure status register */
+#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF
+
+/* DMA override register */
+#define DMA_LITTLE_ENDIAN_MODE (1 << 0)
+#define DMA_REGISTER_MODE (1 << 1)
+
+/* DMA timeout register */
+#define DMA_TIMEOUT_MASK 0xFFFF
+#define DMA_TIMEOUT_VAL 5000 /* micro seconds */
+
+/* TDMA rate limiting control register */
+#define DMA_RATE_LIMIT_EN_MASK 0xFFFF
+
+/* TDMA arbitration control register */
+#define DMA_ARBITER_MODE_MASK 0x03
+#define DMA_RING_BUF_PRIORITY_MASK 0x1F
+#define DMA_RING_BUF_PRIORITY_SHIFT 5
+#define DMA_RATE_ADJ_MASK 0xFF
+
+/* Tx/Rx Dma Descriptor common bits*/
+#define DMA_BUFLENGTH_MASK 0x0fff
+#define DMA_BUFLENGTH_SHIFT 16
+#define DMA_OWN 0x8000
+#define DMA_EOP 0x4000
+#define DMA_SOP 0x2000
+#define DMA_WRAP 0x1000
+/* Tx specific Dma descriptor bits */
+#define DMA_TX_UNDERRUN 0x0200
+#define DMA_TX_APPEND_CRC 0x0040
+#define DMA_TX_OW_CRC 0x0020
+#define DMA_TX_DO_CSUM 0x0010
+#define DMA_TX_QTAG_SHIFT 7
+
+/* Rx Specific Dma descriptor bits */
+#define DMA_RX_CHK_V3PLUS 0x8000
+#define DMA_RX_CHK_V12 0x1000
+#define DMA_RX_BRDCAST 0x0040
+#define DMA_RX_MULT 0x0020
+#define DMA_RX_LG 0x0010
+#define DMA_RX_NO 0x0008
+#define DMA_RX_RXER 0x0004
+#define DMA_RX_CRC_ERROR 0x0002
+#define DMA_RX_OV 0x0001
+#define DMA_RX_FI_MASK 0x001F
+#define DMA_RX_FI_SHIFT 0x0007
+#define DMA_DESC_ALLOC_MASK 0x00FF
+
+#define DMA_ARBITER_RR 0x00
+#define DMA_ARBITER_WRR 0x01
+#define DMA_ARBITER_SP 0x02
+
+struct enet_cb {
+ struct sk_buff *skb;
+ void __iomem *bd_addr;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* power management mode */
+enum bcmgenet_power_mode {
+ GENET_POWER_CABLE_SENSE = 0,
+ GENET_POWER_PASSIVE,
+};
+
+struct bcmgenet_priv;
+
+/* We support both runtime GENET detection and compile-time
+ * to optimize code-paths for a given hardware
+ */
+enum bcmgenet_version {
+ GENET_V1 = 1,
+ GENET_V2,
+ GENET_V3,
+ GENET_V4
+};
+
+#define GENET_IS_V1(p) ((p)->version == GENET_V1)
+#define GENET_IS_V2(p) ((p)->version == GENET_V2)
+#define GENET_IS_V3(p) ((p)->version == GENET_V3)
+#define GENET_IS_V4(p) ((p)->version == GENET_V4)
+
+/* Hardware flags */
+#define GENET_HAS_40BITS (1 << 0)
+#define GENET_HAS_EXT (1 << 1)
+#define GENET_HAS_MDIO_INTR (1 << 2)
+
+/* BCMGENET hardware parameters, keep this structure nicely aligned
+ * since it is going to be used in hot paths
+ */
+struct bcmgenet_hw_params {
+ u8 tx_queues;
+ u8 rx_queues;
+ u8 bds_cnt;
+ u8 bp_in_en_shift;
+ u32 bp_in_mask;
+ u8 hfb_filter_cnt;
+ u8 qtag_mask;
+ u16 tbuf_offset;
+ u32 hfb_offset;
+ u32 hfb_reg_offset;
+ u32 rdma_offset;
+ u32 tdma_offset;
+ u32 words_per_bd;
+ u32 flags;
+};
+
+struct bcmgenet_tx_ring {
+ spinlock_t lock; /* ring lock */
+ unsigned int index; /* ring index */
+ unsigned int queue; /* queue index */
+ struct enet_cb *cbs; /* tx ring buffer control block*/
+ unsigned int size; /* size of each tx ring */
+ unsigned int c_index; /* last consumer index of each ring*/
+ unsigned int free_bds; /* # of free bds for each ring */
+ unsigned int write_ptr; /* Tx ring write pointer SW copy */
+ unsigned int prod_index; /* Tx ring producer index SW copy */
+ unsigned int cb_ptr; /* Tx ring initial CB ptr */
+ unsigned int end_ptr; /* Tx ring end CB ptr */
+ void (*int_enable)(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *);
+ void (*int_disable)(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *);
+};
+
+/* device context */
+struct bcmgenet_priv {
+ void __iomem *base;
+ enum bcmgenet_version version;
+ struct net_device *dev;
+ u32 int0_mask;
+ u32 int1_mask;
+
+ /* NAPI for descriptor based rx */
+ struct napi_struct napi ____cacheline_aligned;
+
+ /* transmit variables */
+ void __iomem *tx_bds;
+ struct enet_cb *tx_cbs;
+ unsigned int num_tx_bds;
+
+ struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+
+ /* receive variables */
+ void __iomem *rx_bds;
+ void __iomem *rx_bd_assign_ptr;
+ int rx_bd_assign_index;
+ struct enet_cb *rx_cbs;
+ unsigned int num_rx_bds;
+ unsigned int rx_buf_len;
+ unsigned int rx_read_ptr;
+ unsigned int rx_c_index;
+
+ /* other misc variables */
+ struct bcmgenet_hw_params *hw_params;
+
+ /* MDIO bus variables */
+ wait_queue_head_t wq;
+ struct phy_device *phydev;
+ struct device_node *phy_dn;
+ struct mii_bus *mii_bus;
+
+ /* PHY device variables */
+ int old_duplex;
+ int old_link;
+ int old_pause;
+ phy_interface_t phy_interface;
+ int phy_addr;
+ int ext_phy;
+
+ /* Interrupt variables */
+ struct work_struct bcmgenet_irq_work;
+ int irq0;
+ int irq1;
+ unsigned int irq0_stat;
+ unsigned int irq1_stat;
+
+ /* HW descriptors/checksum variables */
+ bool desc_64b_en;
+ bool desc_rxchk_en;
+ bool crc_fwd_en;
+
+ unsigned int dma_rx_chk_bit;
+
+ u32 msg_enable;
+
+ struct clk *clk;
+ struct platform_device *pdev;
+
+ /* WOL */
+ unsigned long wol_enabled;
+ struct clk *clk_wol;
+ u32 wolopts;
+
+ struct bcmgenet_mib_counters mib;
+};
+
+#define GENET_IO_MACRO(name, offset) \
+static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
+ u32 off) \
+{ \
+ return __raw_readl(priv->base + offset + off); \
+} \
+static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ __raw_writel(val, priv->base + offset + off); \
+}
+
+GENET_IO_MACRO(ext, GENET_EXT_OFF);
+GENET_IO_MACRO(umac, GENET_UMAC_OFF);
+GENET_IO_MACRO(sys, GENET_SYS_OFF);
+
+/* interrupt l2 registers accessors */
+GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF);
+GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF);
+
+/* HFB register accessors */
+GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset);
+
+/* GENET v2+ HFB control and filter len helpers */
+GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset);
+
+/* RBUF register accessors */
+GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
+
+/* MDIO routines */
+int bcmgenet_mii_init(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev);
+void bcmgenet_mii_exit(struct net_device *dev);
+void bcmgenet_mii_reset(struct net_device *dev);
+
+#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
new file mode 100644
index 000000000000..4608673beaff
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -0,0 +1,464 @@
+/*
+ * Broadcom GENET MDIO routines
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/brcmphy.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+
+#include "bcmgenet.h"
+
+/* read a value from the MII */
+static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
+{
+ int ret;
+ struct net_device *dev = bus->priv;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
+ (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
+ /* Start MDIO transaction*/
+ reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+ reg |= MDIO_START_BUSY;
+ bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
+ wait_event_timeout(priv->wq,
+ !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
+ & MDIO_START_BUSY),
+ HZ / 100);
+ ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+
+ if (ret & MDIO_READ_FAIL)
+ return -EIO;
+
+ return ret & 0xffff;
+}
+
+/* write a value to the MII */
+static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
+ int location, u16 val)
+{
+ struct net_device *dev = bus->priv;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
+ (location << MDIO_REG_SHIFT) | (0xffff & val)),
+ UMAC_MDIO_CMD);
+ reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+ reg |= MDIO_START_BUSY;
+ bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
+ wait_event_timeout(priv->wq,
+ !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
+ MDIO_START_BUSY),
+ HZ / 100);
+
+ return 0;
+}
+
+/* setup netdev link state when PHY link status change and
+ * update UMAC and RGMII block when link up
+ */
+static void bcmgenet_mii_setup(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ u32 reg, cmd_bits = 0;
+ unsigned int status_changed = 0;
+
+ if (priv->old_link != phydev->link) {
+ status_changed = 1;
+ priv->old_link = phydev->link;
+ }
+
+ if (phydev->link) {
+ /* program UMAC and RGMII block based on established link
+ * speed, pause, and duplex.
+ * the speed set in umac->cmd tell RGMII block which clock
+ * 25MHz(100Mbps)/125MHz(1Gbps) to use for transmit.
+ * receive clock is provided by PHY.
+ */
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~OOB_DISABLE;
+ reg |= RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+
+ /* speed */
+ if (phydev->speed == SPEED_1000)
+ cmd_bits = UMAC_SPEED_1000;
+ else if (phydev->speed == SPEED_100)
+ cmd_bits = UMAC_SPEED_100;
+ else
+ cmd_bits = UMAC_SPEED_10;
+ cmd_bits <<= CMD_SPEED_SHIFT;
+
+ if (priv->old_duplex != phydev->duplex) {
+ status_changed = 1;
+ priv->old_duplex = phydev->duplex;
+ }
+
+ /* duplex */
+ if (phydev->duplex != DUPLEX_FULL)
+ cmd_bits |= CMD_HD_EN;
+
+ if (priv->old_pause != phydev->pause) {
+ status_changed = 1;
+ priv->old_pause = phydev->pause;
+ }
+
+ /* pause capability */
+ if (!phydev->pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ }
+
+ if (status_changed)
+ phy_print_status(phydev);
+}
+
+void bcmgenet_mii_reset(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (priv->phydev) {
+ phy_init_hw(priv->phydev);
+ phy_start_aneg(priv->phydev);
+ }
+}
+
+static void bcmgenet_ephy_power_up(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg = 0;
+
+ /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
+ if (!GENET_IS_V4(priv))
+ return;
+
+ reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
+ reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(2);
+
+ reg &= ~EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ udelay(20);
+}
+
+static void bcmgenet_internal_phy_setup(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ /* Power up EPHY */
+ bcmgenet_ephy_power_up(dev);
+ /* enable APD */
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= EXT_PWR_DN_EN_LD;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_mii_reset(dev);
+}
+
+static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
+{
+ u32 reg;
+
+ /* Speed settings are set in bcmgenet_mii_setup() */
+ reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+ reg |= LED_ACT_SOURCE_MAC;
+ bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+}
+
+int bcmgenet_mii_config(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ struct device *kdev = &priv->pdev->dev;
+ const char *phy_name = NULL;
+ u32 id_mode_dis = 0;
+ u32 port_ctrl;
+ u32 reg;
+
+ priv->ext_phy = !phy_is_internal(priv->phydev) &&
+ (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
+
+ if (phy_is_internal(priv->phydev))
+ priv->phy_interface = PHY_INTERFACE_MODE_NA;
+
+ switch (priv->phy_interface) {
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MOCA:
+ /* Irrespective of the actually configured PHY speed (100 or
+ * 1000) GENETv4 only has an internal GPHY so we will just end
+ * up masking the Gigabit features from what we support, not
+ * switching to the EPHY
+ */
+ if (GENET_IS_V4(priv))
+ port_ctrl = PORT_MODE_INT_GPHY;
+ else
+ port_ctrl = PORT_MODE_INT_EPHY;
+
+ bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+
+ if (phy_is_internal(priv->phydev)) {
+ phy_name = "internal PHY";
+ bcmgenet_internal_phy_setup(dev);
+ } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ phy_name = "MoCA";
+ bcmgenet_moca_phy_setup(priv);
+ }
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ phy_name = "external MII";
+ phydev->supported &= PHY_BASIC_FEATURES;
+ bcmgenet_sys_writel(priv,
+ PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
+ break;
+
+ case PHY_INTERFACE_MODE_REVMII:
+ phy_name = "external RvMII";
+ /* of_mdiobus_register took care of reading the 'max-speed'
+ * PHY property for us, effectively limiting the PHY supported
+ * capabilities, use that knowledge to also configure the
+ * Reverse MII interface correctly.
+ */
+ if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
+ PHY_BASIC_FEATURES)
+ port_ctrl = PORT_MODE_EXT_RVMII_25;
+ else
+ port_ctrl = PORT_MODE_EXT_RVMII_50;
+ bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ /* RGMII_NO_ID: TXC transitions at the same time as TXD
+ * (requires PCB or receiver-side delay)
+ * RGMII: Add 2ns delay on TXC (90 degree shift)
+ *
+ * ID is implicitly disabled for 100Mbps (RG)MII operation.
+ */
+ id_mode_dis = BIT(16);
+ /* fall through */
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (id_mode_dis)
+ phy_name = "external RGMII (no delay)";
+ else
+ phy_name = "external RGMII (TX delay)";
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg |= RGMII_MODE_EN | id_mode_dis;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ bcmgenet_sys_writel(priv,
+ PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+ break;
+ default:
+ dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
+ return -EINVAL;
+ }
+
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
+
+ return 0;
+}
+
+static int bcmgenet_mii_probe(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
+ unsigned int phy_flags;
+ int ret;
+
+ if (priv->phydev) {
+ pr_info("PHY already attached\n");
+ return 0;
+ }
+
+ if (priv->phy_dn)
+ phydev = of_phy_connect(dev, priv->phy_dn,
+ bcmgenet_mii_setup, 0,
+ priv->phy_interface);
+ else
+ phydev = of_phy_connect_fixed_link(dev,
+ bcmgenet_mii_setup,
+ priv->phy_interface);
+
+ if (!phydev) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ priv->old_link = -1;
+ priv->old_duplex = -1;
+ priv->old_pause = -1;
+ priv->phydev = phydev;
+
+ /* Configure port multiplexer based on what the probed PHY device since
+ * reading the 'max-speed' property determines the maximum supported
+ * PHY speed which is needed for bcmgenet_mii_config() to configure
+ * things appropriately.
+ */
+ ret = bcmgenet_mii_config(dev);
+ if (ret) {
+ phy_disconnect(priv->phydev);
+ return ret;
+ }
+
+ phy_flags = PHY_BRCM_100MBPS_WAR;
+
+ /* workarounds are only needed for 100Mpbs PHYs, and
+ * never on GENET V1 hardware
+ */
+ if ((phydev->supported & PHY_GBIT_FEATURES) || GENET_IS_V1(priv))
+ phy_flags = 0;
+
+ phydev->dev_flags |= phy_flags;
+ phydev->advertising = phydev->supported;
+
+ /* The internal PHY has its link interrupts routed to the
+ * Ethernet MAC ISRs
+ */
+ if (phy_is_internal(priv->phydev))
+ priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
+ else
+ priv->mii_bus->irq[phydev->addr] = PHY_POLL;
+
+ pr_info("attached PHY at address %d [%s]\n",
+ phydev->addr, phydev->drv->name);
+
+ return 0;
+}
+
+static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
+{
+ struct mii_bus *bus;
+
+ if (priv->mii_bus)
+ return 0;
+
+ priv->mii_bus = mdiobus_alloc();
+ if (!priv->mii_bus) {
+ pr_err("failed to allocate\n");
+ return -ENOMEM;
+ }
+
+ bus = priv->mii_bus;
+ bus->priv = priv->dev;
+ bus->name = "bcmgenet MII bus";
+ bus->parent = &priv->pdev->dev;
+ bus->read = bcmgenet_mii_read;
+ bus->write = bcmgenet_mii_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
+ priv->pdev->name, priv->pdev->id);
+
+ bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!bus->irq) {
+ mdiobus_free(priv->mii_bus);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
+{
+ struct device_node *dn = priv->pdev->dev.of_node;
+ struct device *kdev = &priv->pdev->dev;
+ struct device_node *mdio_dn;
+ char *compat;
+ int ret;
+
+ compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
+ if (!compat)
+ return -ENOMEM;
+
+ mdio_dn = of_find_compatible_node(dn, NULL, compat);
+ kfree(compat);
+ if (!mdio_dn) {
+ dev_err(kdev, "unable to find MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ ret = of_mdiobus_register(priv->mii_bus, mdio_dn);
+ if (ret) {
+ dev_err(kdev, "failed to register MDIO bus\n");
+ return ret;
+ }
+
+ /* Fetch the PHY phandle */
+ priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
+
+ /* Get the link mode */
+ priv->phy_interface = of_get_phy_mode(dn);
+
+ return 0;
+}
+
+int bcmgenet_mii_init(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = bcmgenet_mii_alloc(priv);
+ if (ret)
+ return ret;
+
+ ret = bcmgenet_mii_of_init(priv);
+ if (ret)
+ goto out_free;
+
+ ret = bcmgenet_mii_probe(dev);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ mdiobus_unregister(priv->mii_bus);
+out_free:
+ kfree(priv->mii_bus->irq);
+ mdiobus_free(priv->mii_bus);
+ return ret;
+}
+
+void bcmgenet_mii_exit(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ mdiobus_unregister(priv->mii_bus);
+ kfree(priv->mii_bus->irq);
+ mdiobus_free(priv->mii_bus);
+}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 70a225c8df5c..b9f7022f4e81 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1401,11 +1401,6 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
return ret;
}
-static int tg3_mdio_reset(struct mii_bus *bp)
-{
- return 0;
-}
-
static void tg3_mdio_config_5785(struct tg3 *tp)
{
u32 val;
@@ -1542,7 +1537,6 @@ static int tg3_mdio_init(struct tg3 *tp)
tp->mdio_bus->parent = &tp->pdev->dev;
tp->mdio_bus->read = &tg3_mdio_read;
tp->mdio_bus->write = &tg3_mdio_write;
- tp->mdio_bus->reset = &tg3_mdio_reset;
tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
tp->mdio_bus->irq = &tp->mdio_irq[0];
@@ -6322,6 +6316,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 1,
+ .n_pins = 0,
.pps = 0,
.adjfreq = tg3_ptp_adjfreq,
.adjtime = tg3_ptp_adjtime,
@@ -6593,7 +6588,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
pkts_compl++;
bytes_compl += skb->len;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
if (unlikely(tx_bug)) {
tg3_tx_recover(tp);
@@ -6924,7 +6919,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
if (len > (tp->dev->mtu + ETH_HLEN) &&
skb->protocol != htons(ETH_P_8021Q)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
goto drop_it_no_recycle;
}
@@ -7807,7 +7802,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
PCI_DMA_TODEVICE);
/* Make sure the mapping succeeded */
if (pci_dma_mapping_error(tp->pdev, new_addr)) {
- dev_kfree_skb(new_skb);
+ dev_kfree_skb_any(new_skb);
ret = -1;
} else {
u32 save_entry = *entry;
@@ -7822,13 +7817,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
new_skb->len, base_flags,
mss, vlan)) {
tg3_tx_skb_unmap(tnapi, save_entry, -1);
- dev_kfree_skb(new_skb);
+ dev_kfree_skb_any(new_skb);
ret = -1;
}
}
}
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
*pskb = new_skb;
return ret;
}
@@ -7871,7 +7866,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
} while (segs);
tg3_tso_bug_end:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -7923,8 +7918,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct iphdr *iph;
u32 tcp_opt_len, hdr_len;
- if (skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_cow_head(skb, 0))
goto drop;
iph = ip_hdr(skb);
@@ -8093,7 +8087,7 @@ dma_error:
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
drop:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
drop_nofree:
tp->tx_dropped++;
return NETDEV_TX_OK;
@@ -11361,12 +11355,10 @@ static bool tg3_enable_msix(struct tg3 *tp)
msix_ent[i].vector = 0;
}
- rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
+ rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
if (rc < 0) {
return false;
- } else if (rc != 0) {
- if (pci_enable_msix(tp->pdev, msix_ent, rc))
- return false;
+ } else if (rc < tp->irq_cnt) {
netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
tp->irq_cnt, rc);
tp->irq_cnt = rc;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 4ad1187e82fb..675550fe8ee9 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2496,12 +2496,10 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
{
int err;
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err) {
- BNAD_UPDATE_CTR(bnad, tso_err);
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0) {
+ BNAD_UPDATE_CTR(bnad, tso_err);
+ return err;
}
/*
@@ -2669,9 +2667,11 @@ bnad_enable_msix(struct bnad *bnad)
for (i = 0; i < bnad->msix_num; i++)
bnad->msix_table[i].entry = i;
- ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
- if (ret > 0) {
- /* Not enough MSI-X vectors. */
+ ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
+ 1, bnad->msix_num);
+ if (ret < 0) {
+ goto intx_mode;
+ } else if (ret < bnad->msix_num) {
pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
ret, bnad->msix_num);
@@ -2684,18 +2684,11 @@ bnad_enable_msix(struct bnad *bnad)
bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
BNAD_MAILBOX_MSIX_VECTORS;
- if (bnad->msix_num > ret)
+ if (bnad->msix_num > ret) {
+ pci_disable_msix(bnad->pcidev);
goto intx_mode;
-
- /* Try once more with adjusted numbers */
- /* If this fails, fall back to INTx */
- ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
- bnad->msix_num);
- if (ret)
- goto intx_mode;
-
- } else if (ret < 0)
- goto intx_mode;
+ }
+ }
pci_intx(bnad->pcidev, 0);
@@ -2850,13 +2843,11 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
}
if (unlikely((gso_size + skb_transport_offset(skb) +
tcp_hdrlen(skb)) >= skb->len)) {
- txqent->hdr.wi.opcode =
- __constant_htons(BNA_TXQ_WI_SEND);
+ txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
} else {
- txqent->hdr.wi.opcode =
- __constant_htons(BNA_TXQ_WI_SEND_LSO);
+ txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
txqent->hdr.wi.lso_mss = htons(gso_size);
}
@@ -2870,7 +2861,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
} else {
- txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
+ txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
@@ -2881,11 +2872,10 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 proto = 0;
- if (skb->protocol == __constant_htons(ETH_P_IP))
+ if (skb->protocol == htons(ETH_P_IP))
proto = ip_hdr(skb)->protocol;
#ifdef NETIF_F_IPV6_CSUM
- else if (skb->protocol ==
- __constant_htons(ETH_P_IPV6)) {
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
/* nexthdr may not be TCP immediately. */
proto = ipv6_hdr(skb)->nexthdr;
}
@@ -2954,17 +2944,17 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Sanity checks for the skb */
if (unlikely(skb->len <= ETH_HLEN)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
return NETDEV_TX_OK;
}
if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
if (unlikely(len == 0)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
@@ -2976,7 +2966,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
* and the netif_tx_stop_all_queues() call.
*/
if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
@@ -2989,7 +2979,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
return NETDEV_TX_OK;
}
@@ -3029,7 +3019,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Program the opcode, flags, frame_len, num_vectors in WI */
if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
txqent->hdr.wi.reserved = 0;
@@ -3055,7 +3045,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Undo the changes starting at tcb->producer_index */
bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
tcb->producer_index);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
return NETDEV_TX_OK;
}
@@ -3067,8 +3057,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
vect_id = 0;
BNA_QE_INDX_INC(prod, q_depth);
txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
- txqent->hdr.wi_ext.opcode =
- __constant_htons(BNA_TXQ_WI_EXTENSION);
+ txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
unmap = &unmap_q[prod];
}
@@ -3085,7 +3074,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(len != skb->len)) {
/* Undo the changes starting at tcb->producer_index */
bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index ce75de9bae9e..4a79edaf3885 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -342,6 +342,9 @@ static int __init at91ether_probe(struct platform_device *pdev)
}
clk_enable(lp->pclk);
+ lp->hclk = ERR_PTR(-ENOENT);
+ lp->tx_clk = ERR_PTR(-ENOENT);
+
/* Install the interrupt handler */
dev->irq = platform_get_irq(pdev, 0);
res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index d0c38e01e99f..ca97005e24b4 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -199,11 +199,6 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
-static int macb_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
/**
* macb_set_tx_clk() - Set a clock to a new frequency
* @clk Pointer to the clock to change
@@ -375,7 +370,6 @@ int macb_mii_init(struct macb *bp)
bp->mii_bus->name = "MACB_mii_bus";
bp->mii_bus->read = &macb_mdio_read;
bp->mii_bus->write = &macb_mdio_write;
- bp->mii_bus->reset = &macb_mdio_reset;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
@@ -1045,7 +1039,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
goto unlock;
}
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index d2a183c3a6ce..521dfea44b83 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -897,7 +897,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
/* Check tx error on the last segment */
if (desc_get_tx_ls(p)) {
desc_get_tx_status(priv, p);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
}
priv->tx_skbuff[entry] = NULL;
@@ -1105,7 +1105,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb);
paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, paddr)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
priv->tx_skbuff[entry] = skb;
@@ -1169,7 +1169,7 @@ dma_err:
desc = first;
dma_unmap_single(priv->device, desc_get_buf_addr(desc),
desc_get_buf_len(desc), DMA_TO_DEVICE);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 45d77334d7d9..07bbb711b7e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3088,30 +3088,22 @@ static int cxgb_enable_msix(struct adapter *adap)
{
struct msix_entry entries[SGE_QSETS + 1];
int vectors;
- int i, err;
+ int i;
vectors = ARRAY_SIZE(entries);
for (i = 0; i < vectors; ++i)
entries[i].entry = i;
- while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
- vectors = err;
-
- if (err < 0)
- pci_disable_msix(adap->pdev);
-
- if (!err && vectors < (adap->params.nports + 1)) {
- pci_disable_msix(adap->pdev);
- err = -1;
- }
+ vectors = pci_enable_msix_range(adap->pdev, entries,
+ adap->params.nports + 1, vectors);
+ if (vectors < 0)
+ return vectors;
- if (!err) {
- for (i = 0; i < vectors; ++i)
- adap->msix_info[i].vec = entries[i].vector;
- adap->msix_nvectors = vectors;
- }
+ for (i = 0; i < vectors; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+ adap->msix_nvectors = vectors;
- return err;
+ return 0;
}
static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 632b318eb38a..8b069f96e920 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -298,7 +298,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
if (d->eop) {
- kfree_skb(d->skb);
+ dev_consume_skb_any(d->skb);
d->skb = NULL;
}
}
@@ -1188,7 +1188,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
V_WR_TID(q->token));
wr_gen2(d, gen);
- kfree_skb(skb);
+ dev_consume_skb_any(skb);
return;
}
@@ -1233,7 +1233,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1f4b9b30b9ed..32db37709263 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -66,6 +66,7 @@ enum {
SERNUM_LEN = 24, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
+ PN_LEN = 16, /* Part Number length */
};
enum {
@@ -254,6 +255,7 @@ struct vpd_params {
u8 ec[EC_LEN + 1];
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
+ u8 pn[PN_LEN + 1];
};
struct pci_params {
@@ -306,6 +308,7 @@ struct adapter_params {
unsigned char bypass;
unsigned int ofldq_wr_cred;
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
};
#include "t4fw_api.h"
@@ -497,6 +500,7 @@ struct sge_txq {
spinlock_t db_lock;
int db_disabled;
unsigned short db_pidx;
+ unsigned short db_pidx_inc;
u64 udb;
};
@@ -553,8 +557,13 @@ struct sge {
u32 pktshift; /* padding between CPL & packet data */
u32 fl_align; /* response queue message alignment */
u32 fl_starve_thres; /* Free List starvation threshold */
- unsigned int starve_thres;
- u8 idma_state[2];
+
+ /* State variables for detecting an SGE Ingress DMA hang */
+ unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
+ unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
+ unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
+ unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
+
unsigned int egr_start;
unsigned int ingr_start;
void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
@@ -957,7 +966,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
-
+const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
@@ -1029,4 +1038,5 @@ void t4_db_dropped(struct adapter *adapter);
int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
+void t4_sge_decode_idma_state(struct adapter *adapter, int state);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 34e2488767d9..6fe58913403a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -254,6 +254,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x5011, 4),
CH_DEVICE(0x5012, 4),
CH_DEVICE(0x5013, 4),
+ CH_DEVICE(0x5014, 4),
+ CH_DEVICE(0x5015, 4),
+ CH_DEVICE(0x5080, 4),
+ CH_DEVICE(0x5081, 4),
+ CH_DEVICE(0x5082, 4),
+ CH_DEVICE(0x5083, 4),
+ CH_DEVICE(0x5084, 4),
+ CH_DEVICE(0x5085, 4),
CH_DEVICE(0x5401, 4),
CH_DEVICE(0x5402, 4),
CH_DEVICE(0x5403, 4),
@@ -273,6 +281,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x5411, 4),
CH_DEVICE(0x5412, 4),
CH_DEVICE(0x5413, 4),
+ CH_DEVICE(0x5414, 4),
+ CH_DEVICE(0x5415, 4),
+ CH_DEVICE(0x5480, 4),
+ CH_DEVICE(0x5481, 4),
+ CH_DEVICE(0x5482, 4),
+ CH_DEVICE(0x5483, 4),
+ CH_DEVICE(0x5484, 4),
+ CH_DEVICE(0x5485, 4),
{ 0, }
};
@@ -423,15 +439,18 @@ static void link_report(struct net_device *dev)
const struct port_info *p = netdev_priv(dev);
switch (p->link_cfg.speed) {
- case SPEED_10000:
+ case 10000:
s = "10Gbps";
break;
- case SPEED_1000:
+ case 1000:
s = "1000Mbps";
break;
- case SPEED_100:
+ case 100:
s = "100Mbps";
break;
+ case 40000:
+ s = "40Gbps";
+ break;
}
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
@@ -2061,7 +2080,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x40200, 0x40298,
0x402ac, 0x4033c,
0x403f8, 0x403fc,
- 0x41300, 0x413c4,
+ 0x41304, 0x413c4,
0x41400, 0x4141c,
0x41480, 0x414d0,
0x44000, 0x44078,
@@ -2089,7 +2108,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x48200, 0x48298,
0x482ac, 0x4833c,
0x483f8, 0x483fc,
- 0x49300, 0x493c4,
+ 0x49304, 0x493c4,
0x49400, 0x4941c,
0x49480, 0x494d0,
0x4c000, 0x4c078,
@@ -2199,6 +2218,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
else if (type == FW_PORT_TYPE_FIBER_XFI ||
type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
v |= SUPPORTED_FIBRE;
+ else if (type == FW_PORT_TYPE_BP40_BA)
+ v |= SUPPORTED_40000baseSR4_Full;
if (caps & FW_PORT_CAP_ANEG)
v |= SUPPORTED_Autoneg;
@@ -2215,6 +2236,8 @@ static unsigned int to_fw_linkcaps(unsigned int caps)
v |= FW_PORT_CAP_SPEED_1G;
if (caps & ADVERTISED_10000baseT_Full)
v |= FW_PORT_CAP_SPEED_10G;
+ if (caps & ADVERTISED_40000baseSR4_Full)
+ v |= FW_PORT_CAP_SPEED_40G;
return v;
}
@@ -2263,12 +2286,14 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static unsigned int speed_to_caps(int speed)
{
- if (speed == SPEED_100)
+ if (speed == 100)
return FW_PORT_CAP_SPEED_100M;
- if (speed == SPEED_1000)
+ if (speed == 1000)
return FW_PORT_CAP_SPEED_1G;
- if (speed == SPEED_10000)
+ if (speed == 10000)
return FW_PORT_CAP_SPEED_10G;
+ if (speed == 40000)
+ return FW_PORT_CAP_SPEED_40G;
return 0;
}
@@ -2296,8 +2321,10 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->autoneg == AUTONEG_DISABLE) {
cap = speed_to_caps(speed);
- if (!(lc->supported & cap) || (speed == SPEED_1000) ||
- (speed == SPEED_10000))
+ if (!(lc->supported & cap) ||
+ (speed == 1000) ||
+ (speed == 10000) ||
+ (speed == 40000))
return -EINVAL;
lc->requested_speed = cap;
lc->advertising = 0;
@@ -3205,8 +3232,8 @@ static int cxgb4_clip_get(const struct net_device *dev,
c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE);
c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
- *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
- *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+ c.ip_hi = *(__be64 *)(lip->s6_addr);
+ c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
}
@@ -3221,8 +3248,8 @@ static int cxgb4_clip_release(const struct net_device *dev,
c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
- *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
- *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+ c.ip_hi = *(__be64 *)(lip->s6_addr);
+ c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
}
@@ -3563,14 +3590,25 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
static void disable_txq_db(struct sge_txq *q)
{
- spin_lock_irq(&q->db_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->db_lock, flags);
q->db_disabled = 1;
- spin_unlock_irq(&q->db_lock);
+ spin_unlock_irqrestore(&q->db_lock, flags);
}
-static void enable_txq_db(struct sge_txq *q)
+static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
{
spin_lock_irq(&q->db_lock);
+ if (q->db_pidx_inc) {
+ /* Make sure that all writes to the TX descriptors
+ * are committed before we tell HW about them.
+ */
+ wmb();
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
+ q->db_pidx_inc = 0;
+ }
q->db_disabled = 0;
spin_unlock_irq(&q->db_lock);
}
@@ -3592,11 +3630,32 @@ static void enable_dbs(struct adapter *adap)
int i;
for_each_ethrxq(&adap->sge, i)
- enable_txq_db(&adap->sge.ethtxq[i].q);
+ enable_txq_db(adap, &adap->sge.ethtxq[i].q);
for_each_ofldrxq(&adap->sge, i)
- enable_txq_db(&adap->sge.ofldtxq[i].q);
+ enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
- enable_txq_db(&adap->sge.ctrlq[i].q);
+ enable_txq_db(adap, &adap->sge.ctrlq[i].q);
+}
+
+static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
+{
+ if (adap->uld_handle[CXGB4_ULD_RDMA])
+ ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
+ cmd);
+}
+
+static void process_db_full(struct work_struct *work)
+{
+ struct adapter *adap;
+
+ adap = container_of(work, struct adapter, db_full_task);
+
+ drain_db_fifo(adap, dbfifo_drain_delay);
+ enable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
+ t4_set_reg_field(adap, SGE_INT_ENABLE3,
+ DBFIFO_HP_INT | DBFIFO_LP_INT,
+ DBFIFO_HP_INT | DBFIFO_LP_INT);
}
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -3604,7 +3663,7 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
u16 hw_pidx, hw_cidx;
int ret;
- spin_lock_bh(&q->db_lock);
+ spin_lock_irq(&q->db_lock);
ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
if (ret)
goto out;
@@ -3621,7 +3680,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
}
out:
q->db_disabled = 0;
- spin_unlock_bh(&q->db_lock);
+ q->db_pidx_inc = 0;
+ spin_unlock_irq(&q->db_lock);
if (ret)
CH_WARN(adap, "DB drop recovery failed.\n");
}
@@ -3637,29 +3697,6 @@ static void recover_all_queues(struct adapter *adap)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
}
-static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
-{
- mutex_lock(&uld_mutex);
- if (adap->uld_handle[CXGB4_ULD_RDMA])
- ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
- cmd);
- mutex_unlock(&uld_mutex);
-}
-
-static void process_db_full(struct work_struct *work)
-{
- struct adapter *adap;
-
- adap = container_of(work, struct adapter, db_full_task);
-
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
- drain_db_fifo(adap, dbfifo_drain_delay);
- t4_set_reg_field(adap, SGE_INT_ENABLE3,
- DBFIFO_HP_INT | DBFIFO_LP_INT,
- DBFIFO_HP_INT | DBFIFO_LP_INT);
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
-}
-
static void process_db_drop(struct work_struct *work)
{
struct adapter *adap;
@@ -3667,11 +3704,13 @@ static void process_db_drop(struct work_struct *work)
adap = container_of(work, struct adapter, db_drop_task);
if (is_t4(adap->params.chip)) {
- disable_dbs(adap);
+ drain_db_fifo(adap, dbfifo_drain_delay);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
- drain_db_fifo(adap, 1);
+ drain_db_fifo(adap, dbfifo_drain_delay);
recover_all_queues(adap);
+ drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
} else {
u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff;
@@ -3712,6 +3751,8 @@ static void process_db_drop(struct work_struct *work)
void t4_db_full(struct adapter *adap)
{
if (is_t4(adap->params.chip)) {
+ disable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
queue_work(workq, &adap->db_full_task);
@@ -3720,8 +3761,11 @@ void t4_db_full(struct adapter *adap)
void t4_db_dropped(struct adapter *adap)
{
- if (is_t4(adap->params.chip))
- queue_work(workq, &adap->db_drop_task);
+ if (is_t4(adap->params.chip)) {
+ disable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
+ }
+ queue_work(workq, &adap->db_drop_task);
}
static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -3765,6 +3809,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.dbfifo_int_thresh = dbfifo_int_thresh;
lli.sge_pktshift = adap->sge.pktshift;
lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+ lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
@@ -5370,6 +5415,21 @@ static int adap_init0(struct adapter *adap)
(void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
/*
+ * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
+ * capability. Earlier versions of the firmware didn't have the
+ * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
+ * permission to use ULPTX MEMWRITE DSGL.
+ */
+ if (is_t4(adap->params.chip)) {
+ adap->params.ulptx_memwrite_dsgl = false;
+ } else {
+ params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
+ 1, params, val);
+ adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
+ }
+
+ /*
* Get device capabilities so we can determine what resources we need
* to manage.
*/
@@ -5603,9 +5663,10 @@ static const struct pci_error_handlers cxgb4_eeh = {
.resume = eeh_resume,
};
-static inline bool is_10g_port(const struct link_config *lc)
+static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
+ (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
}
static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
@@ -5629,7 +5690,7 @@ static void cfg_queues(struct adapter *adap)
int i, q10g = 0, n10g = 0, qidx = 0;
for_each_port(adap, i)
- n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
/*
* We default to 1 queue per non-10G port and up to # of cores queues
@@ -5644,7 +5705,7 @@ static void cfg_queues(struct adapter *adap)
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
- pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+ pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
qidx += pi->nqsets;
}
@@ -5737,7 +5798,7 @@ static void reduce_ethqs(struct adapter *adap, int n)
static int enable_msix(struct adapter *adap)
{
int ofld_need = 0;
- int i, err, want, need;
+ int i, want, need;
struct sge *s = &adap->sge;
unsigned int nchan = adap->params.nports;
struct msix_entry entries[MAX_INGQ + 1];
@@ -5753,32 +5814,30 @@ static int enable_msix(struct adapter *adap)
}
need = adap->params.nports + EXTRA_VECS + ofld_need;
- while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
- want = err;
+ want = pci_enable_msix_range(adap->pdev, entries, need, want);
+ if (want < 0)
+ return want;
- if (!err) {
- /*
- * Distribute available vectors to the various queue groups.
- * Every group gets its minimum requirement and NIC gets top
- * priority for leftovers.
- */
- i = want - EXTRA_VECS - ofld_need;
- if (i < s->max_ethqsets) {
- s->max_ethqsets = i;
- if (i < s->ethqsets)
- reduce_ethqs(adap, i);
- }
- if (is_offload(adap)) {
- i = want - EXTRA_VECS - s->max_ethqsets;
- i -= ofld_need - nchan;
- s->ofldqsets = (i / nchan) * nchan; /* round down */
- }
- for (i = 0; i < want; ++i)
- adap->msix_info[i].vec = entries[i].vector;
- } else if (err > 0)
- dev_info(adap->pdev_dev,
- "only %d MSI-X vectors left, not using MSI-X\n", err);
- return err;
+ /*
+ * Distribute available vectors to the various queue groups.
+ * Every group gets its minimum requirement and NIC gets top
+ * priority for leftovers.
+ */
+ i = want - EXTRA_VECS - ofld_need;
+ if (i < s->max_ethqsets) {
+ s->max_ethqsets = i;
+ if (i < s->ethqsets)
+ reduce_ethqs(adap, i);
+ }
+ if (is_offload(adap)) {
+ i = want - EXTRA_VECS - s->max_ethqsets;
+ i -= ofld_need - nchan;
+ s->ofldqsets = (i / nchan) * nchan; /* round down */
+ }
+ for (i = 0; i < want; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+
+ return 0;
}
#undef EXTRA_VECS
@@ -5801,11 +5860,6 @@ static int init_rss(struct adapter *adap)
static void print_port_info(const struct net_device *dev)
{
- static const char *base[] = {
- "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
- "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
- };
-
char buf[80];
char *bufp = buf;
const char *spd = "";
@@ -5823,9 +5877,11 @@ static void print_port_info(const struct net_device *dev)
bufp += sprintf(bufp, "1000/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+ bufp += sprintf(bufp, "40G/");
if (bufp != buf)
--bufp;
- sprintf(bufp, "BASE-%s", base[pi->port_type]);
+ sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
adap->params.vpd.id,
@@ -5833,8 +5889,8 @@ static void print_port_info(const struct net_device *dev)
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
- netdev_info(dev, "S/N: %s, E/C: %s\n",
- adap->params.vpd.sn, adap->params.vpd.ec);
+ netdev_info(dev, "S/N: %s, P/N: %s\n",
+ adap->params.vpd.sn, adap->params.vpd.pn);
}
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 4dd0a82533e4..e274a047528f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -253,6 +253,7 @@ struct cxgb4_lld_info {
/* packet data */
bool enable_fw_ofld_conn; /* Enable connection through fw */
/* WR */
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
};
struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 47ffa64fcf19..ca95cf2954eb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -93,6 +93,16 @@
*/
#define TX_QCHECK_PERIOD (HZ / 2)
+/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
+ * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA
+ * State Machines in the same state for this amount of time (in HZ) then we'll
+ * issue a warning about a potential hang. We'll repeat the warning as the
+ * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
+ * the situation clears. If the situation clears, we'll note that as well.
+ */
+#define SGE_IDMA_WARN_THRESH (1 * HZ)
+#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
+
/*
* Max number of Tx descriptors to be reclaimed by the Tx timer.
*/
@@ -373,7 +383,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
if (d->skb) { /* an SGL is present */
if (unmap)
unmap_sgl(dev, d->skb, d->sgl, q);
- kfree_skb(d->skb);
+ dev_consume_skb_any(d->skb);
d->skb = NULL;
}
++d;
@@ -706,11 +716,17 @@ static inline unsigned int flits_to_desc(unsigned int n)
* @skb: the packet
*
* Returns whether an Ethernet packet is small enough to fit as
- * immediate data.
+ * immediate data. Return value corresponds to headroom required.
*/
static inline int is_eth_imm(const struct sk_buff *skb)
{
- return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
+ int hdrlen = skb_shinfo(skb)->gso_size ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
}
/**
@@ -723,9 +739,10 @@ static inline int is_eth_imm(const struct sk_buff *skb)
static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
{
unsigned int flits;
+ int hdrlen = is_eth_imm(skb);
- if (is_eth_imm(skb))
- return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
+ if (hdrlen)
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
if (skb_shinfo(skb)->gso_size)
@@ -843,9 +860,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
unsigned int *wr, index;
+ unsigned long flags;
wmb(); /* write descriptors before telling HW */
- spin_lock(&q->db_lock);
+ spin_lock_irqsave(&q->db_lock, flags);
if (!q->db_disabled) {
if (is_t4(adap->params.chip)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
@@ -861,9 +879,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
writel(n, adap->bar2 + q->udb + 8);
wmb();
}
- }
+ } else
+ q->db_pidx_inc += n;
q->db_pidx = q->pidx;
- spin_unlock(&q->db_lock);
+ spin_unlock_irqrestore(&q->db_lock, flags);
}
/**
@@ -971,6 +990,7 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ int len;
u32 wr_mid;
u64 cntrl, *end;
int qidx, credits;
@@ -982,13 +1002,14 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt_core *cpl;
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ bool immediate = false;
/*
* The chip min packet length is 10 octets but play safe and reject
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
-out_free: dev_kfree_skb(skb);
+out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1011,7 +1032,10 @@ out_free: dev_kfree_skb(skb);
return NETDEV_TX_BUSY;
}
- if (!is_eth_imm(skb) &&
+ if (is_eth_imm(skb))
+ immediate = true;
+
+ if (!immediate &&
unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
goto out_free;
@@ -1028,6 +1052,7 @@ out_free: dev_kfree_skb(skb);
wr->r3 = cpu_to_be64(0);
end = (u64 *)wr + flits;
+ len = immediate ? skb->len : 0;
ssi = skb_shinfo(skb);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso *lso = (void *)wr;
@@ -1035,8 +1060,9 @@ out_free: dev_kfree_skb(skb);
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
- FW_WR_IMMDLEN(sizeof(*lso)));
+ FW_WR_IMMDLEN(len));
lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
LSO_FIRST_SLICE | LSO_LAST_SLICE |
LSO_IPV6(v6) |
@@ -1054,9 +1080,7 @@ out_free: dev_kfree_skb(skb);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
- int len;
-
- len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+ len += sizeof(*cpl);
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN(len));
cpl = (void *)(wr + 1);
@@ -1078,9 +1102,9 @@ out_free: dev_kfree_skb(skb);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
- if (is_eth_imm(skb)) {
+ if (immediate) {
inline_tx_skb(skb, &q->q, cpl + 1);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
} else {
int last_desc;
@@ -1467,8 +1491,12 @@ static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
{
unsigned int idx = skb_txq(skb);
- if (unlikely(is_ctrl_pkt(skb)))
+ if (unlikely(is_ctrl_pkt(skb))) {
+ /* Single ctrl queue is a requirement for LE workaround path */
+ if (adap->tids.nsftids)
+ idx = 0;
return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
+ }
return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
}
@@ -1992,7 +2020,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
static void sge_rx_timer_cb(unsigned long data)
{
unsigned long m;
- unsigned int i, cnt[2];
+ unsigned int i, idma_same_state_cnt[2];
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
@@ -2015,21 +2043,64 @@ static void sge_rx_timer_cb(unsigned long data)
}
t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
- cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
- cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
-
- for (i = 0; i < 2; i++)
- if (cnt[i] >= s->starve_thres) {
- if (s->idma_state[i] || cnt[i] == 0xffffffff)
- continue;
- s->idma_state[i] = 1;
- t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
- m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
- dev_warn(adap->pdev_dev,
- "SGE idma%u starvation detected for "
- "queue %lu\n", i, m & 0xffff);
- } else if (s->idma_state[i])
- s->idma_state[i] = 0;
+ idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
+ idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+
+ for (i = 0; i < 2; i++) {
+ u32 debug0, debug11;
+
+ /* If the Ingress DMA Same State Counter ("timer") is less
+ * than 1s, then we can reset our synthesized Stall Timer and
+ * continue. If we have previously emitted warnings about a
+ * potential stalled Ingress Queue, issue a note indicating
+ * that the Ingress Queue has resumed forward progress.
+ */
+ if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
+ if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
+ CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
+ i, s->idma_qid[i],
+ s->idma_stalled[i]/HZ);
+ s->idma_stalled[i] = 0;
+ continue;
+ }
+
+ /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
+ * domain. The first time we get here it'll be because we
+ * passed the 1s Threshold; each additional time it'll be
+ * because the RX Timer Callback is being fired on its regular
+ * schedule.
+ *
+ * If the stall is below our Potential Hung Ingress Queue
+ * Warning Threshold, continue.
+ */
+ if (s->idma_stalled[i] == 0)
+ s->idma_stalled[i] = HZ;
+ else
+ s->idma_stalled[i] += RX_QCHECK_PERIOD;
+
+ if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
+ continue;
+
+ /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
+ if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
+ continue;
+
+ /* Read and save the SGE IDMA State and Queue ID information.
+ * We do this every time in case it changes across time ...
+ */
+ t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
+ debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+ s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
+
+ t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
+ debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+ s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
+
+ CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
+ i, s->idma_qid[i], s->idma_state[i],
+ s->idma_stalled[i]/HZ, debug0, debug11);
+ t4_sge_decode_idma_state(adap, s->idma_state[i]);
+ }
mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
}
@@ -2580,11 +2651,19 @@ static int t4_sge_init_soft(struct adapter *adap)
fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
#undef READ_FL_BUF
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
if (fl_small_pg != PAGE_SIZE ||
- (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
- (fl_large_pg & (fl_large_pg-1)) != 0))) {
+ (fl_large_pg & (fl_large_pg-1)) != 0) {
dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
fl_small_pg, fl_large_pg);
return -EINVAL;
@@ -2699,8 +2778,8 @@ static int t4_sge_init_hard(struct adapter *adap)
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
- u32 sge_control;
- int ret;
+ u32 sge_control, sge_conm_ctrl;
+ int ret, egress_threshold;
/*
* Ingress Padding Boundary and Egress Status Page Size are set up by
@@ -2725,15 +2804,24 @@ int t4_sge_init(struct adapter *adap)
* SGE's Egress Congestion Threshold. If it isn't, then we can get
* stuck waiting for new packets while the SGE is waiting for us to
* give it more Free List entries. (Note that the SGE's Egress
- * Congestion Threshold is in units of 2 Free List pointers.)
+ * Congestion Threshold is in units of 2 Free List pointers.) For T4,
+ * there was only a single field to control this. For T5 there's the
+ * original field which now only applies to Unpacked Mode Free List
+ * buffers and a new field which only applies to Packed Mode Free List
+ * buffers.
*/
- s->fl_starve_thres
- = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
+ sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
+ if (is_t4(adap->params.chip))
+ egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
+ else
+ egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
+ s->fl_starve_thres = 2*egress_threshold + 1;
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
- s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
- s->idma_state[0] = s->idma_state[1] = 0;
+ s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */
+ s->idma_stalled[0] = 0;
+ s->idma_stalled[1] = 0;
spin_lock_init(&s->intrq_lock);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2c109343d570..fb2fe65903c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -573,7 +573,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
u32 cclk_param, cclk_val;
int i, ret, addr;
- int ec, sn;
+ int ec, sn, pn;
u8 *vpd, csum;
unsigned int vpdr_len, kw_offset, id_len;
@@ -638,6 +638,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
+ FIND_VPD_KW(pn, "PN");
#undef FIND_VPD_KW
memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -647,6 +648,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
+ memcpy(p->pn, vpd + pn, min(i, PN_LEN));
+ strim(p->pn);
/*
* Ask firmware for the Core Clock since it knows how to translate the
@@ -1155,7 +1158,8 @@ out:
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_ANEG)
/**
* t4_link_start - apply link configuration to MAC/PHY
@@ -2247,6 +2251,36 @@ static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
}
/**
+ * t4_get_port_type_description - return Port Type string description
+ * @port_type: firmware Port Type enumeration
+ */
+const char *t4_get_port_type_description(enum fw_port_type port_type)
+{
+ static const char *const port_type_description[] = {
+ "R XFI",
+ "R XAUI",
+ "T SGMII",
+ "T XFI",
+ "T XAUI",
+ "KX4",
+ "CX4",
+ "KX",
+ "KR",
+ "R SFP+",
+ "KR/KX",
+ "KR/KX/KX4",
+ "R QSFP_10G",
+ "",
+ "R QSFP",
+ "R BP40_BA",
+ };
+
+ if (port_type < ARRAY_SIZE(port_type_description))
+ return port_type_description[port_type];
+ return "UNKNOWN";
+}
+
+/**
* t4_get_port_stats - collect port statistics
* @adap: the adapter
* @idx: the port index
@@ -2563,6 +2597,112 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
}
/**
+ * t4_sge_decode_idma_state - decode the idma state
+ * @adap: the adapter
+ * @state: the state idma is stuck in
+ */
+void t4_sge_decode_idma_state(struct adapter *adapter, int state)
+{
+ static const char * const t4_decode[] = {
+ "IDMA_IDLE",
+ "IDMA_PUSH_MORE_CPL_FIFO",
+ "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+ "Not used",
+ "IDMA_PHYSADDR_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+ "IDMA_PHYSADDR_SEND_PAYLOAD",
+ "IDMA_SEND_FIFO_TO_IMSG",
+ "IDMA_FL_REQ_DATA_FL_PREP",
+ "IDMA_FL_REQ_DATA_FL",
+ "IDMA_FL_DROP",
+ "IDMA_FL_H_REQ_HEADER_FL",
+ "IDMA_FL_H_SEND_PCIEHDR",
+ "IDMA_FL_H_PUSH_CPL_FIFO",
+ "IDMA_FL_H_SEND_CPL",
+ "IDMA_FL_H_SEND_IP_HDR_FIRST",
+ "IDMA_FL_H_SEND_IP_HDR",
+ "IDMA_FL_H_REQ_NEXT_HEADER_FL",
+ "IDMA_FL_H_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_H_SEND_IP_HDR_PADDING",
+ "IDMA_FL_D_SEND_PCIEHDR",
+ "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+ "IDMA_FL_D_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_PCIEHDR",
+ "IDMA_FL_PUSH_CPL_FIFO",
+ "IDMA_FL_SEND_CPL",
+ "IDMA_FL_SEND_PAYLOAD_FIRST",
+ "IDMA_FL_SEND_PAYLOAD",
+ "IDMA_FL_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_SEND_PADDING",
+ "IDMA_FL_SEND_COMPLETION_TO_IMSG",
+ "IDMA_FL_SEND_FIFO_TO_IMSG",
+ "IDMA_FL_REQ_DATAFL_DONE",
+ "IDMA_FL_REQ_HEADERFL_DONE",
+ };
+ static const char * const t5_decode[] = {
+ "IDMA_IDLE",
+ "IDMA_ALMOST_IDLE",
+ "IDMA_PUSH_MORE_CPL_FIFO",
+ "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+ "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+ "IDMA_PHYSADDR_SEND_PAYLOAD",
+ "IDMA_SEND_FIFO_TO_IMSG",
+ "IDMA_FL_REQ_DATA_FL",
+ "IDMA_FL_DROP",
+ "IDMA_FL_DROP_SEND_INC",
+ "IDMA_FL_H_REQ_HEADER_FL",
+ "IDMA_FL_H_SEND_PCIEHDR",
+ "IDMA_FL_H_PUSH_CPL_FIFO",
+ "IDMA_FL_H_SEND_CPL",
+ "IDMA_FL_H_SEND_IP_HDR_FIRST",
+ "IDMA_FL_H_SEND_IP_HDR",
+ "IDMA_FL_H_REQ_NEXT_HEADER_FL",
+ "IDMA_FL_H_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_H_SEND_IP_HDR_PADDING",
+ "IDMA_FL_D_SEND_PCIEHDR",
+ "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+ "IDMA_FL_D_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_PCIEHDR",
+ "IDMA_FL_PUSH_CPL_FIFO",
+ "IDMA_FL_SEND_CPL",
+ "IDMA_FL_SEND_PAYLOAD_FIRST",
+ "IDMA_FL_SEND_PAYLOAD",
+ "IDMA_FL_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_SEND_PADDING",
+ "IDMA_FL_SEND_COMPLETION_TO_IMSG",
+ };
+ static const u32 sge_regs[] = {
+ SGE_DEBUG_DATA_LOW_INDEX_2,
+ SGE_DEBUG_DATA_LOW_INDEX_3,
+ SGE_DEBUG_DATA_HIGH_INDEX_10,
+ };
+ const char **sge_idma_decode;
+ int sge_idma_decode_nstates;
+ int i;
+
+ if (is_t4(adapter->params.chip)) {
+ sge_idma_decode = (const char **)t4_decode;
+ sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
+ } else {
+ sge_idma_decode = (const char **)t5_decode;
+ sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
+ }
+
+ if (state < sge_idma_decode_nstates)
+ CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
+ else
+ CH_WARN(adapter, "idma state %d unknown\n", state);
+
+ for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
+ CH_WARN(adapter, "SGE register %#x value %#x\n",
+ sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
+}
+
+/**
* t4_fw_hello - establish communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -3533,11 +3673,13 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
if (stat & FW_PORT_CMD_TXPAUSE)
fc |= PAUSE_TX;
if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
- speed = SPEED_100;
+ speed = 100;
else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
- speed = SPEED_1000;
+ speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
- speed = SPEED_10000;
+ speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ speed = 40000;
if (link_ok != lc->link_ok || speed != lc->speed ||
fc != lc->fc) { /* something changed */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index cd6874b571ee..f2738c710789 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -116,6 +116,7 @@ enum CPL_error {
CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
CPL_ERR_RTX_NEG_ADVICE = 35,
CPL_ERR_PERSIST_NEG_ADVICE = 36,
+ CPL_ERR_KEEPALV_NEG_ADVICE = 37,
CPL_ERR_ABORT_FAILED = 42,
CPL_ERR_IWARP_FLM = 50,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 4082522d8140..225ad8a5722d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -230,6 +230,12 @@
#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
+#define EGRTHRESHOLDPACKING_MASK 0x3fU
+#define EGRTHRESHOLDPACKING_SHIFT 14
+#define EGRTHRESHOLDPACKING(x) ((x) << EGRTHRESHOLDPACKING_SHIFT)
+#define EGRTHRESHOLDPACKING_GET(x) (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \
+ EGRTHRESHOLDPACKING_MASK)
+
#define SGE_DBFIFO_STATUS 0x10a4
#define HP_INT_THRESH_SHIFT 28
#define HP_INT_THRESH_MASK 0xfU
@@ -278,6 +284,9 @@
#define SGE_DEBUG_INDEX 0x10cc
#define SGE_DEBUG_DATA_HIGH 0x10d0
#define SGE_DEBUG_DATA_LOW 0x10d4
+#define SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8
+#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
+#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
#define S_HP_INT_THRESH 28
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 74fea74ce0aa..9cc973fbcf26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -932,6 +932,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
FW_PARAMS_PARAM_DEV_CF = 0x0D,
+ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
};
/*
@@ -1742,6 +1743,9 @@ enum fw_port_type {
FW_PORT_TYPE_SFP,
FW_PORT_TYPE_BP_AP,
FW_PORT_TYPE_BP4_AP,
+ FW_PORT_TYPE_QSFP_10G,
+ FW_PORT_TYPE_QSFP,
+ FW_PORT_TYPE_BP40_BA,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0899c0983594..52859288de7b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2444,7 +2444,7 @@ static void reduce_ethqs(struct adapter *adapter, int n)
*/
static int enable_msix(struct adapter *adapter)
{
- int i, err, want, need;
+ int i, want, need, nqsets;
struct msix_entry entries[MSIX_ENTRIES];
struct sge *s = &adapter->sge;
@@ -2460,26 +2460,23 @@ static int enable_msix(struct adapter *adapter)
*/
want = s->max_ethqsets + MSIX_EXTRAS;
need = adapter->params.nports + MSIX_EXTRAS;
- while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
- want = err;
- if (err == 0) {
- int nqsets = want - MSIX_EXTRAS;
- if (nqsets < s->max_ethqsets) {
- dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
- " for %d Queue Sets\n", nqsets);
- s->max_ethqsets = nqsets;
- if (nqsets < s->ethqsets)
- reduce_ethqs(adapter, nqsets);
- }
- for (i = 0; i < want; ++i)
- adapter->msix_info[i].vec = entries[i].vector;
- } else if (err > 0) {
- pci_disable_msix(adapter->pdev);
- dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
- " not using MSI-X\n", err);
+ want = pci_enable_msix_range(adapter->pdev, entries, need, want);
+ if (want < 0)
+ return want;
+
+ nqsets = want - MSIX_EXTRAS;
+ if (nqsets < s->max_ethqsets) {
+ dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
+ " for %d Queue Sets\n", nqsets);
+ s->max_ethqsets = nqsets;
+ if (nqsets < s->ethqsets)
+ reduce_ethqs(adapter, nqsets);
}
- return err;
+ for (i = 0; i < want; ++i)
+ adapter->msix_info[i].vec = entries[i].vector;
+
+ return 0;
}
static const struct net_device_ops cxgb4vf_netdev_ops = {
@@ -2947,6 +2944,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
CH_DEVICE(0x5811, 0), /* T520-lp-cr */
CH_DEVICE(0x5812, 0), /* T560-cr */
CH_DEVICE(0x5813, 0), /* T580-cr */
+ CH_DEVICE(0x5814, 0), /* T580-so-cr */
+ CH_DEVICE(0x5815, 0), /* T502-bt */
+ CH_DEVICE(0x5880, 0),
+ CH_DEVICE(0x5881, 0),
+ CH_DEVICE(0x5882, 0),
+ CH_DEVICE(0x5883, 0),
+ CH_DEVICE(0x5884, 0),
+ CH_DEVICE(0x5885, 0),
{ 0, }
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0a89963c48ce..9cfa4b4bb089 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -401,7 +401,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
if (sdesc->skb) {
if (need_unmap)
unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
- kfree_skb(sdesc->skb);
+ dev_consume_skb_any(sdesc->skb);
sdesc->skb = NULL;
}
@@ -1275,7 +1275,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* need it any longer.
*/
inline_tx_skb(skb, &txq->q, cpl + 1);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
} else {
/*
* Write the skb's Scatter/Gather list into the TX Packet CPL
@@ -1354,7 +1354,7 @@ out_free:
* An error of some sort happened. Free the TX skb and tell the
* OS that we've "dealt" with the packet ...
*/
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 19f642a45f40..fe84fbabc0d4 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1174,7 +1174,7 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
spin_unlock_irqrestore(&lp->lock, flags);
dev->stats.tx_bytes += skb->len;
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
/* We DO NOT call netif_wake_queue() here.
* We also DO NOT call netif_start_queue().
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index e9f7c656ddda..e35c8e0202ad 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -29,6 +29,7 @@
#include "vnic_stats.h"
#include "vnic_nic.h"
#include "vnic_rss.h"
+#include <linux/irq.h>
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b740bfce72ef..2945718ce806 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -521,7 +521,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
unsigned int txq_map;
if (skb->len <= 0) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -536,7 +536,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (skb_shinfo(skb)->gso_size == 0 &&
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
skb_linearize(skb)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1086,14 +1086,15 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int intr = enic_legacy_io_intr();
unsigned int rq_work_to_do = budget;
unsigned int wq_work_to_do = -1; /* no limit */
- unsigned int work_done, rq_work_done, wq_work_done;
+ unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
/* Service RQ (first) and WQ
*/
- rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
- rq_work_to_do, enic_rq_service, NULL);
+ if (budget > 0)
+ rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
+ rq_work_to_do, enic_rq_service, NULL);
wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
wq_work_to_do, enic_wq_service, NULL);
@@ -1141,14 +1142,15 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
unsigned int cq = enic_cq_rq(enic, rq);
unsigned int intr = enic_msix_rq_intr(enic, rq);
unsigned int work_to_do = budget;
- unsigned int work_done;
+ unsigned int work_done = 0;
int err;
/* Service RQ
*/
- work_done = vnic_cq_service(&enic->cq[cq],
- work_to_do, enic_rq_service, NULL);
+ if (budget > 0)
+ work_done = vnic_cq_service(&enic->cq[cq],
+ work_to_do, enic_rq_service, NULL);
/* Return intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1796,7 +1798,8 @@ static int enic_set_intr_mode(struct enic *enic)
enic->cq_count >= n + m &&
enic->intr_count >= n + m + 2) {
- if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
+ if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
+ n + m + 2, n + m + 2) > 0) {
enic->rq_count = n;
enic->wq_count = m;
@@ -1815,7 +1818,8 @@ static int enic_set_intr_mode(struct enic *enic)
enic->wq_count >= m &&
enic->cq_count >= 1 + m &&
enic->intr_count >= 1 + m + 2) {
- if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
+ if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
+ 1 + m + 2, 1 + m + 2) > 0) {
enic->rq_count = 1;
enic->wq_count = m;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a1a2b4028a5c..8c4b93be333b 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1033,7 +1033,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&db->lock, flags);
/* free this SKB */
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 5ad9e3e3c0b8..53f0c618045c 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -696,7 +696,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
pr_err("big packet = %d\n", (u16)skb->len);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -743,7 +743,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
dw32(DCR7, db->cr7_data);
/* free this SKB */
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index aa4ee385091f..aa801a6af7b9 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -607,7 +607,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
netdev_err(dev, "big packet = %d\n", (u16)skb->len);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -648,7 +648,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
uw32(DCR7, db->cr7_data);
/* free this SKB */
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 113cd799a131..d9e5ca0d48c1 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1137,7 +1137,7 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
drop_frame:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
np->tx_skbuff[entry] = NULL;
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 8a79a32a5674..e9b0faba3078 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -170,11 +170,6 @@ static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
-static int dnet_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static void dnet_handle_link_change(struct net_device *dev)
{
struct dnet *bp = netdev_priv(dev);
@@ -322,7 +317,6 @@ static int dnet_mii_init(struct dnet *bp)
bp->mii_bus->name = "dnet_mii_bus";
bp->mii_bus->read = &dnet_mdio_read;
bp->mii_bus->write = &dnet_mdio_write;
- bp->mii_bus->reset = &dnet_mdio_reset;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index 231129dd1764..ea94a8eb6b35 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -4,3 +4,11 @@ config BE2NET
---help---
This driver implements the NIC functionality for ServerEngines'
10Gbps network adapter - BladeEngine.
+
+config BE2NET_VXLAN
+ bool "VXLAN offload support on be2net driver"
+ default y
+ depends on BE2NET && VXLAN && !(BE2NET=y && VXLAN=m)
+ ---help---
+ Say Y here if you want to enable VXLAN offload support on
+ be2net driver.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 05529e273050..8ccaa2520dc3 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "10.0.600.0u"
+#define DRV_VER "10.2u"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -88,7 +88,6 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE_MIN_MTU 256
#define BE_NUM_VLANS_SUPPORTED 64
-#define BE_UMC_NUM_VLANS_SUPPORTED 15
#define BE_MAX_EQD 128u
#define BE_MAX_TX_FRAG_COUNT 30
@@ -262,9 +261,10 @@ struct be_tx_obj {
/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
struct page *page;
+ /* set to page-addr for last frag of the page & frag-addr otherwise */
DEFINE_DMA_UNMAP_ADDR(bus);
u16 page_offset;
- bool last_page_user;
+ bool last_frag; /* last frag of the page */
};
struct be_rx_stats {
@@ -293,9 +293,10 @@ struct be_rx_compl_info {
u8 ip_csum;
u8 l4_csum;
u8 ipv6;
- u8 vtm;
+ u8 qnq;
u8 pkt_type;
u8 ip_frag;
+ u8 tunneled;
};
struct be_rx_obj {
@@ -359,6 +360,7 @@ struct be_vf_cfg {
int pmac_id;
u16 vlan_tag;
u32 tx_rate;
+ u32 plink_tracking;
};
enum vf_state {
@@ -370,10 +372,11 @@ enum vf_state {
#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
#define BE_FLAGS_VLAN_PROMISC (1 << 4)
#define BE_FLAGS_NAPI_ENABLED (1 << 9)
-#define BE_UC_PMAC_COUNT 30
-#define BE_VF_UC_PMAC_COUNT 2
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
+#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
+#define BE_UC_PMAC_COUNT 30
+#define BE_VF_UC_PMAC_COUNT 2
/* Ethtool set_dump flags */
#define LANCER_INITIATE_FW_DUMP 0x1
@@ -467,6 +470,7 @@ struct be_adapter {
u32 port_num;
bool promiscuous;
+ u8 mc_type;
u32 function_mode;
u32 function_caps;
u32 rx_fc; /* Rx flow control */
@@ -492,6 +496,7 @@ struct be_adapter {
u32 sli_family;
u8 hba_port_num;
u16 pvid;
+ __be16 vxlan_port;
struct phy_info phy;
u8 wol_cap;
bool wol_en;
@@ -536,6 +541,14 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
return min_t(u16, num, num_online_cpus());
}
+/* Is BE in pvid_tagging mode */
+#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
+
+/* Is BE in QNQ multi-channel mode */
+#define be_is_qnq_mode(adapter) (adapter->mc_type == FLEX10 || \
+ adapter->mc_type == vNIC1 || \
+ adapter->mc_type == UFP)
+
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
adapter->pdev->device == OC_DEVICE_ID4)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 48076a6370c3..d1ec15af0d24 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -202,8 +202,12 @@ static void be_async_link_state_process(struct be_adapter *adapter,
/* When link status changes, link speed must be re-queried from FW */
adapter->phy.link_speed = -1;
- /* Ignore physical link event */
- if (lancer_chip(adapter) &&
+ /* On BEx the FW does not send a separate link status
+ * notification for physical and logical link.
+ * On other chips just process the logical link
+ * status notification
+ */
+ if (!BEx_chip(adapter) &&
!(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
return;
@@ -211,7 +215,8 @@ static void be_async_link_state_process(struct be_adapter *adapter,
* it may not be received in some cases.
*/
if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
- be_link_status_update(adapter, evt->port_link_status);
+ be_link_status_update(adapter,
+ evt->port_link_status & LINK_STATUS_MASK);
}
/* Grp5 CoS Priority evt */
@@ -239,10 +244,12 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
struct be_async_event_grp5_pvid_state *evt)
{
- if (evt->enabled)
+ if (evt->enabled) {
adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
- else
+ dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
+ } else {
adapter->pvid = 0;
+ }
}
static void be_async_grp5_evt_process(struct be_adapter *adapter,
@@ -3296,6 +3303,21 @@ static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
return NULL;
}
+static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ int i;
+
+ for (i = 0; i < desc_count; i++) {
+ if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
+ return (struct be_port_res_desc *)hdr;
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
+ return NULL;
+}
+
static void be_copy_nic_desc(struct be_resources *res,
struct be_nic_res_desc *desc)
{
@@ -3439,6 +3461,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
{
struct be_cmd_resp_get_profile_config *resp;
struct be_pcie_res_desc *pcie;
+ struct be_port_res_desc *port;
struct be_nic_res_desc *nic;
struct be_queue_info *mccq = &adapter->mcc_obj.q;
struct be_dma_mem cmd;
@@ -3466,6 +3489,10 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
if (pcie)
res->max_vfs = le16_to_cpu(pcie->num_vfs);
+ port = be_get_port_desc(resp->func_param, desc_count);
+ if (port)
+ adapter->mc_type = port->mc_type;
+
nic = be_get_nic_desc(resp->func_param, desc_count);
if (nic)
be_copy_nic_desc(res, nic);
@@ -3476,14 +3503,11 @@ err:
return status;
}
-/* Currently only Lancer uses this command and it supports version 0 only
- * Uses sync mcc
- */
-int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
- u8 domain)
+int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
+ int size, u8 version, u8 domain)
{
- struct be_mcc_wrb *wrb;
struct be_cmd_req_set_profile_config *req;
+ struct be_mcc_wrb *wrb;
int status;
spin_lock_bh(&adapter->mcc_lock);
@@ -3495,44 +3519,116 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
}
req = embedded_payload(wrb);
-
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
wrb, NULL);
+ req->hdr.version = version;
req->hdr.domain = domain;
req->desc_count = cpu_to_le32(1);
- req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
- req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
- req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
- req->nic_desc.pf_num = adapter->pf_number;
- req->nic_desc.vf_num = domain;
-
- /* Mark fields invalid */
- req->nic_desc.unicast_mac_count = 0xFFFF;
- req->nic_desc.mcc_count = 0xFFFF;
- req->nic_desc.vlan_count = 0xFFFF;
- req->nic_desc.mcast_mac_count = 0xFFFF;
- req->nic_desc.txq_count = 0xFFFF;
- req->nic_desc.rq_count = 0xFFFF;
- req->nic_desc.rssq_count = 0xFFFF;
- req->nic_desc.lro_count = 0xFFFF;
- req->nic_desc.cq_count = 0xFFFF;
- req->nic_desc.toe_conn_count = 0xFFFF;
- req->nic_desc.eq_count = 0xFFFF;
- req->nic_desc.link_param = 0xFF;
- req->nic_desc.bw_min = 0xFFFFFFFF;
- req->nic_desc.acpi_params = 0xFF;
- req->nic_desc.wol_param = 0x0F;
-
- /* Change BW */
- req->nic_desc.bw_min = cpu_to_le32(bps);
- req->nic_desc.bw_max = cpu_to_le32(bps);
+ memcpy(req->desc, desc, size);
+
status = be_mcc_notify_wait(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
+/* Mark all fields invalid */
+void be_reset_nic_desc(struct be_nic_res_desc *nic)
+{
+ memset(nic, 0, sizeof(*nic));
+ nic->unicast_mac_count = 0xFFFF;
+ nic->mcc_count = 0xFFFF;
+ nic->vlan_count = 0xFFFF;
+ nic->mcast_mac_count = 0xFFFF;
+ nic->txq_count = 0xFFFF;
+ nic->rq_count = 0xFFFF;
+ nic->rssq_count = 0xFFFF;
+ nic->lro_count = 0xFFFF;
+ nic->cq_count = 0xFFFF;
+ nic->toe_conn_count = 0xFFFF;
+ nic->eq_count = 0xFFFF;
+ nic->link_param = 0xFF;
+ nic->acpi_params = 0xFF;
+ nic->wol_param = 0x0F;
+ nic->bw_min = 0xFFFFFFFF;
+ nic->bw_max = 0xFFFFFFFF;
+}
+
+int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain)
+{
+ if (lancer_chip(adapter)) {
+ struct be_nic_res_desc nic_desc;
+
+ be_reset_nic_desc(&nic_desc);
+ nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
+ nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
+ nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
+ (1 << NOSV_SHIFT);
+ nic_desc.pf_num = adapter->pf_number;
+ nic_desc.vf_num = domain;
+ nic_desc.bw_max = cpu_to_le32(bps);
+
+ return be_cmd_set_profile_config(adapter, &nic_desc,
+ RESOURCE_DESC_SIZE_V0,
+ 0, domain);
+ } else {
+ return be_cmd_set_qos(adapter, bps, domain);
+ }
+}
+
+int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_manage_iface_filters *req;
+ int status;
+
+ if (iface == 0xFFFFFFFF)
+ return -1;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
+ wrb, NULL);
+ req->op = op;
+ req->target_iface_id = cpu_to_le32(iface);
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
+{
+ struct be_port_res_desc port_desc;
+
+ memset(&port_desc, 0, sizeof(port_desc));
+ port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
+ port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+ port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+ port_desc.link_num = adapter->hba_port_num;
+ if (port) {
+ port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
+ (1 << RCVID_SHIFT);
+ port_desc.nv_port = swab16(port);
+ } else {
+ port_desc.nv_flags = NV_TYPE_DISABLED;
+ port_desc.nv_port = 0;
+ }
+
+ return be_cmd_set_profile_config(adapter, &port_desc,
+ RESOURCE_DESC_SIZE_V1, 1, 0);
+}
+
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
int vf_num)
{
@@ -3723,6 +3819,45 @@ err:
return status;
}
+int be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, u8 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_ll_link *req;
+ int status;
+
+ if (BEx_chip(adapter) || lancer_chip(adapter))
+ return 0;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
+ sizeof(*req), wrb, NULL);
+
+ req->hdr.version = 1;
+ req->hdr.domain = domain;
+
+ if (link_state == IFLA_VF_LINK_STATE_ENABLE)
+ req->link_config |= 1;
+
+ if (link_state == IFLA_VF_LINK_STATE_AUTO)
+ req->link_config |= 1 << PLINK_TRACK_SHIFT;
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index fc4e076dc202..b60e4d53c1c9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -203,6 +203,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PORT_NAME 77
+#define OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG 80
#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
#define OPCODE_COMMON_GET_PHY_DETAILS 102
@@ -221,6 +222,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
+#define OPCODE_COMMON_MANAGE_IFACE_FILTERS 193
#define OPCODE_COMMON_GET_IFACE_LIST 194
#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
@@ -1098,14 +1100,6 @@ struct be_cmd_resp_query_fw_cfg {
u32 function_caps;
};
-/* Is BE in a multi-channel mode */
-static inline bool be_is_mc(struct be_adapter *adapter)
-{
- return adapter->function_mode & FLEX10_MODE ||
- adapter->function_mode & VNIC_MODE ||
- adapter->function_mode & UMC_ENABLED;
-}
-
/******************** RSS Config ****************************************/
/* RSS type Input parameters used to compute RX hash
* RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
@@ -1828,20 +1822,36 @@ struct be_cmd_req_set_ext_fat_caps {
#define NIC_RESOURCE_DESC_TYPE_V0 0x41
#define PCIE_RESOURCE_DESC_TYPE_V1 0x50
#define NIC_RESOURCE_DESC_TYPE_V1 0x51
+#define PORT_RESOURCE_DESC_TYPE_V1 0x55
#define MAX_RESOURCE_DESC 264
-/* QOS unit number */
-#define QUN 4
-/* Immediate */
-#define IMM 6
-/* No save */
-#define NOSV 7
+#define IMM_SHIFT 6 /* Immediate */
+#define NOSV_SHIFT 7 /* No save */
struct be_res_desc_hdr {
u8 desc_type;
u8 desc_len;
} __packed;
+struct be_port_res_desc {
+ struct be_res_desc_hdr hdr;
+ u8 rsvd0;
+ u8 flags;
+ u8 link_num;
+ u8 mc_type;
+ u16 rsvd1;
+
+#define NV_TYPE_MASK 0x3 /* bits 0-1 */
+#define NV_TYPE_DISABLED 1
+#define NV_TYPE_VXLAN 3
+#define SOCVID_SHIFT 2 /* Strip outer vlan */
+#define RCVID_SHIFT 4 /* Report vlan */
+ u8 nv_flags;
+ u8 rsvd2;
+ __le16 nv_port; /* vxlan/gre port */
+ u32 rsvd3[19];
+} __packed;
+
struct be_pcie_res_desc {
struct be_res_desc_hdr hdr;
u8 rsvd0;
@@ -1862,6 +1872,8 @@ struct be_pcie_res_desc {
struct be_nic_res_desc {
struct be_res_desc_hdr hdr;
u8 rsvd1;
+
+#define QUN_SHIFT 4 /* QoS is in absolute units */
u8 flags;
u8 vf_num;
u8 rsvd2;
@@ -1891,6 +1903,23 @@ struct be_nic_res_desc {
u32 rsvd8[7];
} __packed;
+/************ Multi-Channel type ***********/
+enum mc_type {
+ MC_NONE = 0x01,
+ UMC = 0x02,
+ FLEX10 = 0x03,
+ vNIC1 = 0x04,
+ nPAR = 0x05,
+ UFP = 0x06,
+ vNIC2 = 0x07
+};
+
+/* Is BE in a multi-channel mode */
+static inline bool be_is_mc(struct be_adapter *adapter)
+{
+ return adapter->mc_type > MC_NONE;
+}
+
struct be_cmd_req_get_func_config {
struct be_cmd_req_hdr hdr;
};
@@ -1919,7 +1948,7 @@ struct be_cmd_req_set_profile_config {
struct be_cmd_req_hdr hdr;
u32 rsvd;
u32 desc_count;
- struct be_nic_res_desc nic_desc;
+ u8 desc[RESOURCE_DESC_SIZE_V1];
};
struct be_cmd_resp_set_profile_config {
@@ -1971,6 +2000,33 @@ struct be_cmd_resp_get_iface_list {
struct be_if_desc if_desc;
};
+/*************** Set logical link ********************/
+#define PLINK_TRACK_SHIFT 8
+struct be_cmd_req_set_ll_link {
+ struct be_cmd_req_hdr hdr;
+ u32 link_config; /* Bit 0: UP_DOWN, Bit 9: PLINK */
+};
+
+/************** Manage IFACE Filters *******************/
+#define OP_CONVERT_NORMAL_TO_TUNNEL 0
+#define OP_CONVERT_TUNNEL_TO_NORMAL 1
+
+struct be_cmd_req_manage_iface_filters {
+ struct be_cmd_req_hdr hdr;
+ u8 op;
+ u8 rsvd0;
+ u8 flags;
+ u8 rsvd1;
+ u32 tunnel_iface_id;
+ u32 target_iface_id;
+ u8 mac[6];
+ u16 vlan_tag;
+ u32 tenant_id;
+ u32 filter_id;
+ u32 cap_flags;
+ u32 cap_control_flags;
+} __packed;
+
int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -2045,7 +2101,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
int be_cmd_get_phy_info(struct be_adapter *adapter);
-int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain);
void be_detect_error(struct be_adapter *adapter);
int be_cmd_get_die_temperature(struct be_adapter *adapter);
int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
@@ -2086,9 +2142,14 @@ int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
int be_cmd_get_profile_config(struct be_adapter *adapter,
struct be_resources *res, u8 domain);
-int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
+ int size, u8 version, u8 domain);
int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
int vf_num);
int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
+int be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, u8 domain);
+int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
+int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 05be0070f55f..15ba96cba65d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -357,10 +357,10 @@ be_get_ethtool_stats(struct net_device *netdev,
struct be_rx_stats *stats = rx_stats(rxo);
do {
- start = u64_stats_fetch_begin_bh(&stats->sync);
+ start = u64_stats_fetch_begin_irq(&stats->sync);
data[base] = stats->rx_bytes;
data[base + 1] = stats->rx_pkts;
- } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+ } while (u64_stats_fetch_retry_irq(&stats->sync, start));
for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
p = (u8 *)stats + et_rx_stats[i].offset;
@@ -373,19 +373,19 @@ be_get_ethtool_stats(struct net_device *netdev,
struct be_tx_stats *stats = tx_stats(txo);
do {
- start = u64_stats_fetch_begin_bh(&stats->sync_compl);
+ start = u64_stats_fetch_begin_irq(&stats->sync_compl);
data[base] = stats->tx_compl;
- } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
+ } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
do {
- start = u64_stats_fetch_begin_bh(&stats->sync);
+ start = u64_stats_fetch_begin_irq(&stats->sync);
for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
p = (u8 *)stats + et_tx_stats[i].offset;
data[base + i] =
(et_tx_stats[i].size == sizeof(u64)) ?
*(u64 *)p : *(u32 *)p;
}
- } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+ } while (u64_stats_fetch_retry_irq(&stats->sync, start));
base += ETHTOOL_TXSTATS_NUM;
}
}
@@ -802,16 +802,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
- &data[0]) != 0) {
+ &data[0]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
- }
+
if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
- &data[1]) != 0) {
- test->flags |= ETH_TEST_FL_FAILED;
- }
- if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
- &data[2]) != 0) {
+ &data[1]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
+
+ if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
+ &data[2]) != 0)
+ test->flags |= ETH_TEST_FL_FAILED;
+ test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
}
}
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index dc88782185f2..3bd198550edb 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -368,7 +368,7 @@ struct amap_eth_rx_compl_v0 {
u8 numfrags[3]; /* dword 1 */
u8 rss_flush; /* dword 2 */
u8 cast_enc[2]; /* dword 2 */
- u8 vtm; /* dword 2 */
+ u8 qnq; /* dword 2 */
u8 rss_bank; /* dword 2 */
u8 rsvd1[23]; /* dword 2 */
u8 lro_pkt; /* dword 2 */
@@ -401,13 +401,14 @@ struct amap_eth_rx_compl_v1 {
u8 numfrags[3]; /* dword 1 */
u8 rss_flush; /* dword 2 */
u8 cast_enc[2]; /* dword 2 */
- u8 vtm; /* dword 2 */
+ u8 qnq; /* dword 2 */
u8 rss_bank; /* dword 2 */
u8 port[2]; /* dword 2 */
u8 vntagp; /* dword 2 */
u8 header_len[8]; /* dword 2 */
u8 header_split[2]; /* dword 2 */
- u8 rsvd1[13]; /* dword 2 */
+ u8 rsvd1[12]; /* dword 2 */
+ u8 tunneled;
u8 valid; /* dword 2 */
u8 rsshash[32]; /* dword 3 */
} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 36c80612e21a..3e6df47b6973 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -23,6 +23,7 @@
#include <linux/aer.h>
#include <linux/if_bridge.h>
#include <net/busy_poll.h>
+#include <net/vxlan.h>
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -591,10 +592,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
for_all_rx_queues(adapter, rxo, i) {
const struct be_rx_stats *rx_stats = rx_stats(rxo);
do {
- start = u64_stats_fetch_begin_bh(&rx_stats->sync);
+ start = u64_stats_fetch_begin_irq(&rx_stats->sync);
pkts = rx_stats(rxo)->rx_pkts;
bytes = rx_stats(rxo)->rx_bytes;
- } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
+ } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
stats->rx_packets += pkts;
stats->rx_bytes += bytes;
stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
@@ -605,10 +606,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
for_all_tx_queues(adapter, txo, i) {
const struct be_tx_stats *tx_stats = tx_stats(txo);
do {
- start = u64_stats_fetch_begin_bh(&tx_stats->sync);
+ start = u64_stats_fetch_begin_irq(&tx_stats->sync);
pkts = tx_stats(txo)->tx_pkts;
bytes = tx_stats(txo)->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
+ } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
stats->tx_packets += pkts;
stats->tx_bytes += bytes;
}
@@ -652,7 +653,7 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
}
- if ((link_status & LINK_STATUS_MASK) == LINK_UP)
+ if (link_status)
netif_carrier_on(netdev);
else
netif_carrier_off(netdev);
@@ -718,10 +719,23 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
return vlan_tag;
}
+/* Used only for IP tunnel packets */
+static u16 skb_inner_ip_proto(struct sk_buff *skb)
+{
+ return (inner_ip_hdr(skb)->version == 4) ?
+ inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
+}
+
+static u16 skb_ip_proto(struct sk_buff *skb)
+{
+ return (ip_hdr(skb)->version == 4) ?
+ ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+}
+
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
{
- u16 vlan_tag;
+ u16 vlan_tag, proto;
memset(hdr, 0, sizeof(*hdr));
@@ -734,9 +748,15 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (is_tcp_pkt(skb))
+ if (skb->encapsulation) {
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
+ proto = skb_inner_ip_proto(skb);
+ } else {
+ proto = skb_ip_proto(skb);
+ }
+ if (proto == IPPROTO_TCP)
AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
- else if (is_udp_pkt(skb))
+ else if (proto == IPPROTO_UDP)
AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
}
@@ -935,9 +955,9 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
}
/* If vlan tag is already inlined in the packet, skip HW VLAN
- * tagging in UMC mode
+ * tagging in pvid-tagging mode
*/
- if ((adapter->function_mode & UMC_ENABLED) &&
+ if (be_pvid_tagging_enabled(adapter) &&
veh->h_vlan_proto == htons(ETH_P_8021Q))
*skip_hw_vlan = true;
@@ -1138,7 +1158,10 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
- goto ret;
+ return status;
+
+ if (adapter->vlan_tag[vid])
+ return status;
adapter->vlan_tag[vid] = 1;
adapter->vlans_added++;
@@ -1148,7 +1171,7 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
adapter->vlans_added--;
adapter->vlan_tag[vid] = 0;
}
-ret:
+
return status;
}
@@ -1288,6 +1311,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
+ vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
return 0;
}
@@ -1342,11 +1366,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
return -EINVAL;
}
- if (lancer_chip(adapter))
- status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
- else
- status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
-
+ status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
if (status)
dev_err(&adapter->pdev->dev,
"tx rate %d on VF %d failed\n", rate, vf);
@@ -1354,6 +1374,24 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
adapter->vf_cfg[vf].tx_rate = rate;
return status;
}
+static int be_set_vf_link_state(struct net_device *netdev, int vf,
+ int link_state)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (!sriov_enabled(adapter))
+ return -EPERM;
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
+ if (!status)
+ adapter->vf_cfg[vf].plink_tracking = link_state;
+
+ return status;
+}
static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
ulong now)
@@ -1386,15 +1424,15 @@ static void be_eqd_update(struct be_adapter *adapter)
rxo = &adapter->rx_obj[eqo->idx];
do {
- start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
+ start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
rx_pkts = rxo->stats.rx_pkts;
- } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
+ } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
txo = &adapter->tx_obj[eqo->idx];
do {
- start = u64_stats_fetch_begin_bh(&txo->stats.sync);
+ start = u64_stats_fetch_begin_irq(&txo->stats.sync);
tx_pkts = txo->stats.tx_reqs;
- } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
+ } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
/* Skip, if wrapped around or first calculation */
@@ -1449,9 +1487,10 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
static inline bool csum_passed(struct be_rx_compl_info *rxcp)
{
/* L4 checksum is not reliable for non TCP/UDP packets.
- * Also ignore ipcksm for ipv6 pkts */
+ * Also ignore ipcksm for ipv6 pkts
+ */
return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
- (rxcp->ip_csum || rxcp->ipv6);
+ (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
}
static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
@@ -1464,11 +1503,15 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
rx_page_info = &rxo->page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
- if (rx_page_info->last_page_user) {
+ if (rx_page_info->last_frag) {
dma_unmap_page(&adapter->pdev->dev,
dma_unmap_addr(rx_page_info, bus),
adapter->big_page_size, DMA_FROM_DEVICE);
- rx_page_info->last_page_user = false;
+ rx_page_info->last_frag = false;
+ } else {
+ dma_sync_single_for_cpu(&adapter->pdev->dev,
+ dma_unmap_addr(rx_page_info, bus),
+ rx_frag_size, DMA_FROM_DEVICE);
}
queue_tail_inc(rxq);
@@ -1590,6 +1633,8 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
+
+ skb->encapsulation = rxcp->tunneled;
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1646,6 +1691,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (adapter->netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
+
+ skb->encapsulation = rxcp->tunneled;
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1676,12 +1723,14 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
rxcp->rss_hash =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
if (rxcp->vlanf) {
- rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
+ rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
compl);
rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
+ rxcp->tunneled =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
}
static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
@@ -1706,7 +1755,7 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
rxcp->rss_hash =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
if (rxcp->vlanf) {
- rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
+ rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
compl);
rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
compl);
@@ -1739,9 +1788,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
rxcp->l4_csum = 0;
if (rxcp->vlanf) {
- /* vlanf could be wrongly set in some cards.
- * ignore if vtm is not set */
- if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
+ /* In QNQ modes, if qnq bit is not set, then the packet was
+ * tagged only with the transparent outer vlan-tag and must
+ * not be treated as a vlan packet by host
+ */
+ if (be_is_qnq_mode(adapter) && !rxcp->qnq)
rxcp->vlanf = 0;
if (!lancer_chip(adapter))
@@ -1800,17 +1851,16 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
rx_stats(rxo)->rx_post_fail++;
break;
}
- page_info->page_offset = 0;
+ page_offset = 0;
} else {
get_page(pagep);
- page_info->page_offset = page_offset + rx_frag_size;
+ page_offset += rx_frag_size;
}
- page_offset = page_info->page_offset;
+ page_info->page_offset = page_offset;
page_info->page = pagep;
- dma_unmap_addr_set(page_info, bus, page_dmaaddr);
- frag_dmaaddr = page_dmaaddr + page_info->page_offset;
rxd = queue_head_node(rxq);
+ frag_dmaaddr = page_dmaaddr + page_info->page_offset;
rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
@@ -1818,15 +1868,24 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
if ((page_offset + rx_frag_size + rx_frag_size) >
adapter->big_page_size) {
pagep = NULL;
- page_info->last_page_user = true;
+ page_info->last_frag = true;
+ dma_unmap_addr_set(page_info, bus, page_dmaaddr);
+ } else {
+ dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
}
prev_page_info = page_info;
queue_head_inc(rxq);
page_info = &rxo->page_info_tbl[rxq->head];
}
- if (pagep)
- prev_page_info->last_page_user = true;
+
+ /* Mark the last frag of a page when we break out of the above loop
+ * with no more slots available in the RXQ
+ */
+ if (pagep) {
+ prev_page_info->last_frag = true;
+ dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
+ }
if (posted) {
atomic_add(posted, &rxq->used);
@@ -1883,7 +1942,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
queue_tail_inc(txq);
} while (cur_index != last_index);
- kfree_skb(sent_skb);
+ dev_kfree_skb_any(sent_skb);
return num_wrbs;
}
@@ -2439,6 +2498,9 @@ void be_detect_error(struct be_adapter *adapter)
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
+ bool error_detected = false;
+ struct device *dev = &adapter->pdev->dev;
+ struct net_device *netdev = adapter->netdev;
if (be_hw_error(adapter))
return;
@@ -2450,6 +2512,21 @@ void be_detect_error(struct be_adapter *adapter)
SLIPORT_ERROR1_OFFSET);
sliport_err2 = ioread32(adapter->db +
SLIPORT_ERROR2_OFFSET);
+ adapter->hw_error = true;
+ /* Do not log error messages if its a FW reset */
+ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
+ sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
+ dev_info(dev, "Firmware update in progress\n");
+ } else {
+ error_detected = true;
+ dev_err(dev, "Error detected in the card\n");
+ dev_err(dev, "ERR: sliport status 0x%x\n",
+ sliport_status);
+ dev_err(dev, "ERR: sliport error1 0x%x\n",
+ sliport_err1);
+ dev_err(dev, "ERR: sliport error2 0x%x\n",
+ sliport_err2);
+ }
}
} else {
pci_read_config_dword(adapter->pdev,
@@ -2463,51 +2540,33 @@ void be_detect_error(struct be_adapter *adapter)
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
- }
-
- /* On certain platforms BE hardware can indicate spurious UEs.
- * Allow the h/w to stop working completely in case of a real UE.
- * Hence not setting the hw_error for UE detection.
- */
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- adapter->hw_error = true;
- /* Do not log error messages if its a FW reset */
- if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
- sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
- dev_info(&adapter->pdev->dev,
- "Firmware update in progress\n");
- return;
- } else {
- dev_err(&adapter->pdev->dev,
- "Error detected in the card\n");
- }
- }
-
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- dev_err(&adapter->pdev->dev,
- "ERR: sliport status 0x%x\n", sliport_status);
- dev_err(&adapter->pdev->dev,
- "ERR: sliport error1 0x%x\n", sliport_err1);
- dev_err(&adapter->pdev->dev,
- "ERR: sliport error2 0x%x\n", sliport_err2);
- }
- if (ue_lo) {
- for (i = 0; ue_lo; ue_lo >>= 1, i++) {
- if (ue_lo & 1)
- dev_err(&adapter->pdev->dev,
- "UE: %s bit set\n", ue_status_low_desc[i]);
- }
- }
+ /* On certain platforms BE hardware can indicate spurious UEs.
+ * Allow HW to stop working completely in case of a real UE.
+ * Hence not setting the hw_error for UE detection.
+ */
- if (ue_hi) {
- for (i = 0; ue_hi; ue_hi >>= 1, i++) {
- if (ue_hi & 1)
- dev_err(&adapter->pdev->dev,
- "UE: %s bit set\n", ue_status_hi_desc[i]);
+ if (ue_lo || ue_hi) {
+ error_detected = true;
+ dev_err(dev,
+ "Unrecoverable Error detected in the adapter");
+ dev_err(dev, "Please reboot server to recover");
+ if (skyhawk_chip(adapter))
+ adapter->hw_error = true;
+ for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+ if (ue_lo & 1)
+ dev_err(dev, "UE: %s bit set\n",
+ ue_status_low_desc[i]);
+ }
+ for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+ if (ue_hi & 1)
+ dev_err(dev, "UE: %s bit set\n",
+ ue_status_hi_desc[i]);
+ }
}
}
-
+ if (error_detected)
+ netif_carrier_off(netdev);
}
static void be_msix_disable(struct be_adapter *adapter)
@@ -2521,7 +2580,7 @@ static void be_msix_disable(struct be_adapter *adapter)
static int be_msix_enable(struct be_adapter *adapter)
{
- int i, status, num_vec;
+ int i, num_vec;
struct device *dev = &adapter->pdev->dev;
/* If RoCE is supported, program the max number of NIC vectors that
@@ -2537,24 +2596,11 @@ static int be_msix_enable(struct be_adapter *adapter)
for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
- status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
- if (status == 0) {
- goto done;
- } else if (status >= MIN_MSIX_VECTORS) {
- num_vec = status;
- status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- num_vec);
- if (!status)
- goto done;
- }
-
- dev_warn(dev, "MSIx enable failed\n");
+ num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ MIN_MSIX_VECTORS, num_vec);
+ if (num_vec < 0)
+ goto fail;
- /* INTx is not supported in VFs, so fail probe if enable_msix fails */
- if (!be_physfn(adapter))
- return status;
- return 0;
-done:
if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
adapter->num_msix_roce_vec = num_vec / 2;
dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
@@ -2566,6 +2612,14 @@ done:
dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
adapter->num_msix_vec);
return 0;
+
+fail:
+ dev_warn(dev, "MSIx enable failed\n");
+
+ /* INTx is not supported in VFs, so fail probe if enable_msix fails */
+ if (!be_physfn(adapter))
+ return num_vec;
+ return 0;
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
@@ -2807,6 +2861,12 @@ static int be_open(struct net_device *netdev)
netif_tx_start_all_queues(netdev);
be_roce_dev_open(adapter);
+
+#ifdef CONFIG_BE2NET_VXLAN
+ if (skyhawk_chip(adapter))
+ vxlan_get_rx_port(netdev);
+#endif
+
return 0;
err:
be_close(adapter->netdev);
@@ -2962,6 +3022,21 @@ static void be_mac_clear(struct be_adapter *adapter)
}
}
+#ifdef CONFIG_BE2NET_VXLAN
+static void be_disable_vxlan_offloads(struct be_adapter *adapter)
+{
+ if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
+ be_cmd_manage_iface(adapter, adapter->if_handle,
+ OP_CONVERT_TUNNEL_TO_NORMAL);
+
+ if (adapter->vxlan_port)
+ be_cmd_set_vxlan_port(adapter, 0);
+
+ adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
+ adapter->vxlan_port = 0;
+}
+#endif
+
static int be_clear(struct be_adapter *adapter)
{
be_cancel_worker(adapter);
@@ -2969,6 +3044,9 @@ static int be_clear(struct be_adapter *adapter)
if (sriov_enabled(adapter))
be_vf_clear(adapter);
+#ifdef CONFIG_BE2NET_VXLAN
+ be_disable_vxlan_offloads(adapter);
+#endif
/* delete the primary mac along with the uc-mac list */
be_mac_clear(adapter);
@@ -3093,15 +3171,19 @@ static int be_vf_setup(struct be_adapter *adapter)
* Allow full available bandwidth
*/
if (BE3_chip(adapter) && !old_vfs)
- be_cmd_set_qos(adapter, 1000, vf+1);
+ be_cmd_config_qos(adapter, 1000, vf + 1);
status = be_cmd_link_status_query(adapter, &lnk_speed,
NULL, vf + 1);
if (!status)
vf_cfg->tx_rate = lnk_speed;
- if (!old_vfs)
+ if (!old_vfs) {
be_cmd_enable_vf(adapter, vf + 1);
+ be_cmd_set_logical_link_config(adapter,
+ IFLA_VF_LINK_STATE_AUTO,
+ vf+1);
+ }
}
if (!old_vfs) {
@@ -3119,19 +3201,38 @@ err:
return status;
}
+/* Converting function_mode bits on BE3 to SH mc_type enums */
+
+static u8 be_convert_mc_type(u32 function_mode)
+{
+ if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
+ return vNIC1;
+ else if (function_mode & FLEX10_MODE)
+ return FLEX10;
+ else if (function_mode & VNIC_MODE)
+ return vNIC2;
+ else if (function_mode & UMC_ENABLED)
+ return UMC;
+ else
+ return MC_NONE;
+}
+
/* On BE2/BE3 FW does not suggest the supported limits */
static void BEx_get_resources(struct be_adapter *adapter,
struct be_resources *res)
{
struct pci_dev *pdev = adapter->pdev;
bool use_sriov = false;
- int max_vfs;
-
- max_vfs = pci_sriov_get_totalvfs(pdev);
-
- if (BE3_chip(adapter) && sriov_want(adapter)) {
- res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
- use_sriov = res->max_vfs;
+ int max_vfs = 0;
+
+ if (be_physfn(adapter) && BE3_chip(adapter)) {
+ be_cmd_get_profile_config(adapter, res, 0);
+ /* Some old versions of BE3 FW don't report max_vfs value */
+ if (res->max_vfs == 0) {
+ max_vfs = pci_sriov_get_totalvfs(pdev);
+ res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
+ }
+ use_sriov = res->max_vfs && sriov_want(adapter);
}
if (be_physfn(adapter))
@@ -3139,17 +3240,32 @@ static void BEx_get_resources(struct be_adapter *adapter,
else
res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
- if (adapter->function_mode & FLEX10_MODE)
- res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
- else if (adapter->function_mode & UMC_ENABLED)
- res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
- else
+ adapter->mc_type = be_convert_mc_type(adapter->function_mode);
+
+ if (be_is_mc(adapter)) {
+ /* Assuming that there are 4 channels per port,
+ * when multi-channel is enabled
+ */
+ if (be_is_qnq_mode(adapter))
+ res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+ else
+ /* In a non-qnq multichannel mode, the pvid
+ * takes up one vlan entry
+ */
+ res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
+ } else {
res->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ }
+
res->max_mcast_mac = BE_MAX_MC;
- /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
- if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
- !be_physfn(adapter) || (adapter->port_num > 1))
+ /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
+ * 2) Create multiple TX rings on a BE3-R multi-channel interface
+ * *only* if it is RSS-capable.
+ */
+ if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
+ !be_physfn(adapter) || (be_is_mc(adapter) &&
+ !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
res->max_tx_qs = 1;
else
res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3161,7 +3277,7 @@ static void BEx_get_resources(struct be_adapter *adapter,
res->max_rx_qs = res->max_rss_qs + 1;
if (be_physfn(adapter))
- res->max_evt_qs = (max_vfs > 0) ?
+ res->max_evt_qs = (res->max_vfs > 0) ?
BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
else
res->max_evt_qs = 1;
@@ -3252,9 +3368,8 @@ static int be_get_config(struct be_adapter *adapter)
if (status)
return status;
- /* primary mac needs 1 pmac entry */
- adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
- GFP_KERNEL);
+ adapter->pmac_id = kcalloc(be_max_uc(adapter),
+ sizeof(*adapter->pmac_id), GFP_KERNEL);
if (!adapter->pmac_id)
return -ENOMEM;
@@ -3428,6 +3543,10 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
+ if (be_physfn(adapter))
+ be_cmd_set_logical_link_config(adapter,
+ IFLA_VF_LINK_STATE_AUTO, 0);
+
if (sriov_want(adapter)) {
if (be_max_vfs(adapter))
be_vf_setup(adapter);
@@ -4052,6 +4171,67 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
}
+#ifdef CONFIG_BE2NET_VXLAN
+static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ if (lancer_chip(adapter) || BEx_chip(adapter))
+ return;
+
+ if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
+ dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
+ be16_to_cpu(port));
+ dev_info(dev,
+ "Only one UDP port supported for VxLAN offloads\n");
+ return;
+ }
+
+ status = be_cmd_manage_iface(adapter, adapter->if_handle,
+ OP_CONVERT_NORMAL_TO_TUNNEL);
+ if (status) {
+ dev_warn(dev, "Failed to convert normal interface to tunnel\n");
+ goto err;
+ }
+
+ status = be_cmd_set_vxlan_port(adapter, port);
+ if (status) {
+ dev_warn(dev, "Failed to add VxLAN port\n");
+ goto err;
+ }
+ adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
+ adapter->vxlan_port = port;
+
+ dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
+ be16_to_cpu(port));
+ return;
+err:
+ be_disable_vxlan_offloads(adapter);
+ return;
+}
+
+static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (lancer_chip(adapter) || BEx_chip(adapter))
+ return;
+
+ if (adapter->vxlan_port != port)
+ return;
+
+ be_disable_vxlan_offloads(adapter);
+
+ dev_info(&adapter->pdev->dev,
+ "Disabled VxLAN offloads for UDP port %d\n",
+ be16_to_cpu(port));
+}
+#endif
+
static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
@@ -4067,13 +4247,18 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_set_vf_vlan = be_set_vf_vlan,
.ndo_set_vf_tx_rate = be_set_vf_tx_rate,
.ndo_get_vf_config = be_get_vf_config,
+ .ndo_set_vf_link_state = be_set_vf_link_state,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = be_netpoll,
#endif
.ndo_bridge_setlink = be_ndo_bridge_setlink,
.ndo_bridge_getlink = be_ndo_bridge_getlink,
#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = be_busy_poll
+ .ndo_busy_poll = be_busy_poll,
+#endif
+#ifdef CONFIG_BE2NET_VXLAN
+ .ndo_add_vxlan_port = be_add_vxlan_port,
+ .ndo_del_vxlan_port = be_del_vxlan_port,
#endif
};
@@ -4081,6 +4266,12 @@ static void be_netdev_init(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ if (skyhawk_chip(adapter)) {
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX;
@@ -4427,14 +4618,32 @@ static bool be_reset_required(struct be_adapter *adapter)
static char *mc_name(struct be_adapter *adapter)
{
- if (adapter->function_mode & FLEX10_MODE)
- return "FLEX10";
- else if (adapter->function_mode & VNIC_MODE)
- return "vNIC";
- else if (adapter->function_mode & UMC_ENABLED)
- return "UMC";
- else
- return "";
+ char *str = ""; /* default */
+
+ switch (adapter->mc_type) {
+ case UMC:
+ str = "UMC";
+ break;
+ case FLEX10:
+ str = "FLEX10";
+ break;
+ case vNIC1:
+ str = "vNIC-1";
+ break;
+ case nPAR:
+ str = "nPAR";
+ break;
+ case UFP:
+ str = "UFP";
+ break;
+ case vNIC2:
+ str = "vNIC-2";
+ break;
+ default:
+ str = "";
+ }
+
+ return str;
}
static inline char *func_name(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 9cd5415fe017..5bf16603a3e9 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -35,6 +35,12 @@ static void _be_roce_dev_add(struct be_adapter *adapter)
if (!ocrdma_drv)
return;
+
+ if (ocrdma_drv->be_abi_version != BE_ROCE_ABI_VERSION) {
+ dev_warn(&pdev->dev, "Cannot initialize RoCE due to ocrdma ABI mismatch\n");
+ return;
+ }
+
if (pdev->device == OC_DEVICE_ID5) {
/* only msix is supported on these devices */
if (!msix_enabled(adapter))
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index 2cd1129e19af..a3d9e96c18eb 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -21,6 +21,8 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#define BE_ROCE_ABI_VERSION 1
+
struct ocrdma_dev;
enum be_interrupt_mode {
@@ -52,6 +54,7 @@ struct be_dev_info {
/* ocrdma driver register's the callback functions with nic driver. */
struct ocrdma_driver {
unsigned char name[32];
+ u32 be_abi_version;
struct ocrdma_dev *(*add) (struct be_dev_info *dev_info);
void (*remove) (struct ocrdma_dev *);
void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 55e0fa03dc90..8b70ca7e342b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -660,11 +660,6 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
return -EBUSY;
}
-static int ethoc_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static void ethoc_mdio_poll(struct net_device *dev)
{
}
@@ -1210,7 +1205,6 @@ static int ethoc_probe(struct platform_device *pdev)
priv->mdio->name, pdev->id);
priv->mdio->read = ethoc_mdio_read;
priv->mdio->write = ethoc_mdio_write;
- priv->mdio->reset = ethoc_mdio_reset;
priv->mdio->priv = priv;
priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index c11ecbc98149..68069eabc4f8 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -940,11 +940,6 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
return -EIO;
}
-static int ftgmac100_mdiobus_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
/******************************************************************************
* struct ethtool_ops functions
*****************************************************************************/
@@ -1262,7 +1257,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->mii_bus->priv = netdev;
priv->mii_bus->read = ftgmac100_mdiobus_read;
priv->mii_bus->write = ftgmac100_mdiobus_write;
- priv->mii_bus->reset = ftgmac100_mdiobus_reset;
priv->mii_bus->irq = priv->phy_irq;
for (i = 0; i < PHY_MAX_ADDR; i++)
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 549ce13b92ac..71debd1c18c9 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
gianfar_driver-objs := gianfar.o \
- gianfar_ethtool.o \
- gianfar_sysfs.o
+ gianfar_ethtool.o
obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 03a351300013..8d69e439f0c5 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -338,7 +338,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Protocol checksum off-load for TCP and UDP. */
if (fec_enet_clear_csum(skb, ndev)) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1255,11 +1255,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
-static int fec_enet_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static int fec_enet_mii_probe(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1384,7 +1379,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->name = "fec_enet_mii_bus";
fep->mii_bus->read = fec_enet_mdio_read;
fep->mii_bus->write = fec_enet_mdio_write;
- fep->mii_bus->reset = fec_enet_mdio_reset;
snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, fep->dev_id + 1);
fep->mii_bus->priv = fep;
@@ -1904,10 +1898,11 @@ fec_set_mac_address(struct net_device *ndev, void *p)
struct fec_enet_private *fep = netdev_priv(ndev);
struct sockaddr *addr = p;
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ if (addr) {
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ }
writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
@@ -2006,6 +2001,8 @@ static int fec_enet_init(struct net_device *ndev)
/* Get the Ethernet address */
fec_get_mac(ndev);
+ /* make sure MAC we just acquired is programmed into the hw */
+ fec_set_mac_address(ndev, NULL);
/* init the tx & rx ring size */
fep->tx_ring_size = TX_RING_SIZE;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 89ccb5b08708..82386b29914a 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -372,6 +372,7 @@ void fec_ptp_init(struct platform_device *pdev)
fep->ptp_caps.n_alarm = 0;
fep->ptp_caps.n_ext_ts = 0;
fep->ptp_caps.n_per_out = 0;
+ fep->ptp_caps.n_pins = 0;
fep->ptp_caps.pps = 0;
fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
fep->ptp_caps.adjtime = fec_ptp_adjtime;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 62f042d4aaa9..dc80db41d6b3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -91,6 +91,9 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
u16 pkt_len, sc;
int curidx;
+ if (budget <= 0)
+ return received;
+
/*
* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 7e69c983d12a..ebf5d6429a8d 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -95,12 +95,6 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location,
}
-static int fs_enet_fec_mii_reset(struct mii_bus *bus)
-{
- /* nothing here - for now */
- return 0;
-}
-
static struct of_device_id fs_enet_mdio_fec_match[];
static int fs_enet_mdio_probe(struct platform_device *ofdev)
{
@@ -128,7 +122,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
new_bus->name = "FEC MII Bus";
new_bus->read = &fs_enet_fec_mii_read;
new_bus->write = &fs_enet_fec_mii_write;
- new_bus->reset = &fs_enet_fec_mii_reset;
ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ad5a5aadc7e1..9125d9abf099 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
* Copyright 2007 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -121,7 +121,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
static irqreturn_t gfar_transmit(int irq, void *dev_id);
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
static void adjust_link(struct net_device *dev);
-static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
static int gfar_probe(struct platform_device *ofdev);
static int gfar_remove(struct platform_device *ofdev);
@@ -129,8 +128,10 @@ static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
static void gfar_configure_serdes(struct net_device *dev);
-static int gfar_poll(struct napi_struct *napi, int budget);
-static int gfar_poll_sq(struct napi_struct *napi, int budget);
+static int gfar_poll_rx(struct napi_struct *napi, int budget);
+static int gfar_poll_tx(struct napi_struct *napi, int budget);
+static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
+static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev);
#endif
@@ -138,9 +139,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi);
-void gfar_halt(struct net_device *dev);
-static void gfar_halt_nodisable(struct net_device *dev);
-void gfar_start(struct net_device *dev);
+static void gfar_halt_nodisable(struct gfar_private *priv);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
const u8 *addr);
@@ -332,72 +331,76 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
}
}
-static void gfar_init_mac(struct net_device *ndev)
+static void gfar_rx_buff_size_config(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(ndev);
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u32 rctrl = 0;
- u32 tctrl = 0;
- u32 attrs = 0;
-
- /* write the tx/rx base registers */
- gfar_init_tx_rx_base(priv);
-
- /* Configure the coalescing support */
- gfar_configure_coalescing_all(priv);
+ int frame_size = priv->ndev->mtu + ETH_HLEN;
/* set this when rx hw offload (TOE) functions are being used */
priv->uses_rxfcb = 0;
+ if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
+ priv->uses_rxfcb = 1;
+
+ if (priv->hwts_rx_en)
+ priv->uses_rxfcb = 1;
+
+ if (priv->uses_rxfcb)
+ frame_size += GMAC_FCB_LEN;
+
+ frame_size += priv->padding;
+
+ frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+ INCREMENTAL_BUFFER_SIZE;
+
+ priv->rx_buffer_size = frame_size;
+}
+
+static void gfar_mac_rx_config(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 rctrl = 0;
+
if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN;
/* Program the RIR0 reg with the required distribution */
- gfar_write(&regs->rir0, DEFAULT_RIR0);
+ if (priv->poll_mode == GFAR_SQ_POLLING)
+ gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
+ else /* GFAR_MQ_POLLING */
+ gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
}
/* Restore PROMISC mode */
- if (ndev->flags & IFF_PROMISC)
+ if (priv->ndev->flags & IFF_PROMISC)
rctrl |= RCTRL_PROM;
- if (ndev->features & NETIF_F_RXCSUM) {
+ if (priv->ndev->features & NETIF_F_RXCSUM)
rctrl |= RCTRL_CHECKSUMMING;
- priv->uses_rxfcb = 1;
- }
- if (priv->extended_hash) {
- rctrl |= RCTRL_EXTHASH;
-
- gfar_clear_exact_match(ndev);
- rctrl |= RCTRL_EMEN;
- }
+ if (priv->extended_hash)
+ rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
if (priv->padding) {
rctrl &= ~RCTRL_PAL_MASK;
rctrl |= RCTRL_PADDING(priv->padding);
}
- /* Insert receive time stamps into padding alignment bytes */
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
- rctrl &= ~RCTRL_PAL_MASK;
- rctrl |= RCTRL_PADDING(8);
- priv->padding = 8;
- }
-
/* Enable HW time stamping if requested from user space */
- if (priv->hwts_rx_en) {
+ if (priv->hwts_rx_en)
rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
- priv->uses_rxfcb = 1;
- }
- if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
- priv->uses_rxfcb = 1;
- }
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
+}
+
+static void gfar_mac_tx_config(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tctrl = 0;
- if (ndev->features & NETIF_F_IP_CSUM)
+ if (priv->ndev->features & NETIF_F_IP_CSUM)
tctrl |= TCTRL_INIT_CSUM;
if (priv->prio_sched_en)
@@ -408,30 +411,51 @@ static void gfar_init_mac(struct net_device *ndev)
gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
}
- gfar_write(&regs->tctrl, tctrl);
+ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ tctrl |= TCTRL_VLINS;
- /* Set the extraction length and index */
- attrs = ATTRELI_EL(priv->rx_stash_size) |
- ATTRELI_EI(priv->rx_stash_index);
+ gfar_write(&regs->tctrl, tctrl);
+}
- gfar_write(&regs->attreli, attrs);
+static void gfar_configure_coalescing(struct gfar_private *priv,
+ unsigned long tx_mask, unsigned long rx_mask)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
- /* Start with defaults, and add stashing or locking
- * depending on the approprate variables
- */
- attrs = ATTR_INIT_SETTINGS;
+ if (priv->mode == MQ_MG_MODE) {
+ int i = 0;
- if (priv->bd_stash_en)
- attrs |= ATTR_BDSTASH;
+ baddr = &regs->txic0;
+ for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
+ gfar_write(baddr + i, 0);
+ if (likely(priv->tx_queue[i]->txcoalescing))
+ gfar_write(baddr + i, priv->tx_queue[i]->txic);
+ }
- if (priv->rx_stash_size != 0)
- attrs |= ATTR_BUFSTASH;
+ baddr = &regs->rxic0;
+ for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
+ gfar_write(baddr + i, 0);
+ if (likely(priv->rx_queue[i]->rxcoalescing))
+ gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+ }
+ } else {
+ /* Backward compatible case -- even if we enable
+ * multiple queues, there's only single reg to program
+ */
+ gfar_write(&regs->txic, 0);
+ if (likely(priv->tx_queue[0]->txcoalescing))
+ gfar_write(&regs->txic, priv->tx_queue[0]->txic);
- gfar_write(&regs->attr, attrs);
+ gfar_write(&regs->rxic, 0);
+ if (unlikely(priv->rx_queue[0]->rxcoalescing))
+ gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
+ }
+}
- gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
- gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
- gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
+void gfar_configure_coalescing_all(struct gfar_private *priv)
+{
+ gfar_configure_coalescing(priv, 0xFF, 0xFF);
}
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
@@ -479,12 +503,27 @@ static const struct net_device_ops gfar_netdev_ops = {
#endif
};
-void lock_rx_qs(struct gfar_private *priv)
+static void gfar_ints_disable(struct gfar_private *priv)
{
int i;
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar __iomem *regs = priv->gfargrp[i].regs;
+ /* Clear IEVENT */
+ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
- for (i = 0; i < priv->num_rx_queues; i++)
- spin_lock(&priv->rx_queue[i]->rxlock);
+ /* Initialize IMASK */
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+ }
+}
+
+static void gfar_ints_enable(struct gfar_private *priv)
+{
+ int i;
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar __iomem *regs = priv->gfargrp[i].regs;
+ /* Unmask the interrupts we look for */
+ gfar_write(&regs->imask, IMASK_DEFAULT);
+ }
}
void lock_tx_qs(struct gfar_private *priv)
@@ -495,23 +534,50 @@ void lock_tx_qs(struct gfar_private *priv)
spin_lock(&priv->tx_queue[i]->txlock);
}
-void unlock_rx_qs(struct gfar_private *priv)
+void unlock_tx_qs(struct gfar_private *priv)
{
int i;
- for (i = 0; i < priv->num_rx_queues; i++)
- spin_unlock(&priv->rx_queue[i]->rxlock);
+ for (i = 0; i < priv->num_tx_queues; i++)
+ spin_unlock(&priv->tx_queue[i]->txlock);
}
-void unlock_tx_qs(struct gfar_private *priv)
+static int gfar_alloc_tx_queues(struct gfar_private *priv)
{
int i;
- for (i = 0; i < priv->num_tx_queues; i++)
- spin_unlock(&priv->tx_queue[i]->txlock);
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+ GFP_KERNEL);
+ if (!priv->tx_queue[i])
+ return -ENOMEM;
+
+ priv->tx_queue[i]->tx_skbuff = NULL;
+ priv->tx_queue[i]->qindex = i;
+ priv->tx_queue[i]->dev = priv->ndev;
+ spin_lock_init(&(priv->tx_queue[i]->txlock));
+ }
+ return 0;
}
-static void free_tx_pointers(struct gfar_private *priv)
+static int gfar_alloc_rx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+ GFP_KERNEL);
+ if (!priv->rx_queue[i])
+ return -ENOMEM;
+
+ priv->rx_queue[i]->rx_skbuff = NULL;
+ priv->rx_queue[i]->qindex = i;
+ priv->rx_queue[i]->dev = priv->ndev;
+ }
+ return 0;
+}
+
+static void gfar_free_tx_queues(struct gfar_private *priv)
{
int i;
@@ -519,7 +585,7 @@ static void free_tx_pointers(struct gfar_private *priv)
kfree(priv->tx_queue[i]);
}
-static void free_rx_pointers(struct gfar_private *priv)
+static void gfar_free_rx_queues(struct gfar_private *priv)
{
int i;
@@ -553,23 +619,26 @@ static void disable_napi(struct gfar_private *priv)
{
int i;
- for (i = 0; i < priv->num_grps; i++)
- napi_disable(&priv->gfargrp[i].napi);
+ for (i = 0; i < priv->num_grps; i++) {
+ napi_disable(&priv->gfargrp[i].napi_rx);
+ napi_disable(&priv->gfargrp[i].napi_tx);
+ }
}
static void enable_napi(struct gfar_private *priv)
{
int i;
- for (i = 0; i < priv->num_grps; i++)
- napi_enable(&priv->gfargrp[i].napi);
+ for (i = 0; i < priv->num_grps; i++) {
+ napi_enable(&priv->gfargrp[i].napi_rx);
+ napi_enable(&priv->gfargrp[i].napi_tx);
+ }
}
static int gfar_parse_group(struct device_node *np,
struct gfar_private *priv, const char *model)
{
struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
- u32 *queue_mask;
int i;
for (i = 0; i < GFAR_NUM_IRQS; i++) {
@@ -598,16 +667,52 @@ static int gfar_parse_group(struct device_node *np,
grp->priv = priv;
spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) {
- queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
- grp->rx_bit_map = queue_mask ?
- *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
- queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
- grp->tx_bit_map = queue_mask ?
- *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ u32 *rxq_mask, *txq_mask;
+ rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
+ txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
+
+ if (priv->poll_mode == GFAR_SQ_POLLING) {
+ /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
+ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+ grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+ } else { /* GFAR_MQ_POLLING */
+ grp->rx_bit_map = rxq_mask ?
+ *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ grp->tx_bit_map = txq_mask ?
+ *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ }
} else {
grp->rx_bit_map = 0xFF;
grp->tx_bit_map = 0xFF;
}
+
+ /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
+ * right to left, so we need to revert the 8 bits to get the q index
+ */
+ grp->rx_bit_map = bitrev8(grp->rx_bit_map);
+ grp->tx_bit_map = bitrev8(grp->tx_bit_map);
+
+ /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+ * also assign queues to groups
+ */
+ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+ if (!grp->rx_queue)
+ grp->rx_queue = priv->rx_queue[i];
+ grp->num_rx_queues++;
+ grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
+ priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+ priv->rx_queue[i]->grp = grp;
+ }
+
+ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+ if (!grp->tx_queue)
+ grp->tx_queue = priv->tx_queue[i];
+ grp->num_tx_queues++;
+ grp->tstat |= (TSTAT_CLEAR_THALT >> i);
+ priv->tqueue |= (TQUEUE_EN0 >> i);
+ priv->tx_queue[i]->grp = grp;
+ }
+
priv->num_grps++;
return 0;
@@ -628,13 +733,45 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
const u32 *stash_idx;
unsigned int num_tx_qs, num_rx_qs;
u32 *tx_queues, *rx_queues;
+ unsigned short mode, poll_mode;
if (!np || !of_device_is_available(np))
return -ENODEV;
- /* parse the num of tx and rx queues */
+ if (of_device_is_compatible(np, "fsl,etsec2")) {
+ mode = MQ_MG_MODE;
+ poll_mode = GFAR_SQ_POLLING;
+ } else {
+ mode = SQ_SG_MODE;
+ poll_mode = GFAR_SQ_POLLING;
+ }
+
+ /* parse the num of HW tx and rx queues */
tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
- num_tx_qs = tx_queues ? *tx_queues : 1;
+ rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
+
+ if (mode == SQ_SG_MODE) {
+ num_tx_qs = 1;
+ num_rx_qs = 1;
+ } else { /* MQ_MG_MODE */
+ /* get the actual number of supported groups */
+ unsigned int num_grps = of_get_available_child_count(np);
+
+ if (num_grps == 0 || num_grps > MAXGROUPS) {
+ dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
+ num_grps);
+ pr_err("Cannot do alloc_etherdev, aborting\n");
+ return -EINVAL;
+ }
+
+ if (poll_mode == GFAR_SQ_POLLING) {
+ num_tx_qs = num_grps; /* one txq per int group */
+ num_rx_qs = num_grps; /* one rxq per int group */
+ } else { /* GFAR_MQ_POLLING */
+ num_tx_qs = tx_queues ? *tx_queues : 1;
+ num_rx_qs = rx_queues ? *rx_queues : 1;
+ }
+ }
if (num_tx_qs > MAX_TX_QS) {
pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
@@ -643,9 +780,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return -EINVAL;
}
- rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
- num_rx_qs = rx_queues ? *rx_queues : 1;
-
if (num_rx_qs > MAX_RX_QS) {
pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
num_rx_qs, MAX_RX_QS);
@@ -661,10 +795,20 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv = netdev_priv(dev);
priv->ndev = dev;
+ priv->mode = mode;
+ priv->poll_mode = poll_mode;
+
priv->num_tx_queues = num_tx_qs;
netif_set_real_num_rx_queues(dev, num_rx_qs);
priv->num_rx_queues = num_rx_qs;
- priv->num_grps = 0x0;
+
+ err = gfar_alloc_tx_queues(priv);
+ if (err)
+ goto tx_alloc_failed;
+
+ err = gfar_alloc_rx_queues(priv);
+ if (err)
+ goto rx_alloc_failed;
/* Init Rx queue filer rule set linked list */
INIT_LIST_HEAD(&priv->rx_list.list);
@@ -677,52 +821,18 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->gfargrp[i].regs = NULL;
/* Parse and initialize group specific information */
- if (of_device_is_compatible(np, "fsl,etsec2")) {
- priv->mode = MQ_MG_MODE;
+ if (priv->mode == MQ_MG_MODE) {
for_each_child_of_node(np, child) {
err = gfar_parse_group(child, priv, model);
if (err)
goto err_grp_init;
}
- } else {
- priv->mode = SQ_SG_MODE;
+ } else { /* SQ_SG_MODE */
err = gfar_parse_group(np, priv, model);
if (err)
goto err_grp_init;
}
- for (i = 0; i < priv->num_tx_queues; i++)
- priv->tx_queue[i] = NULL;
- for (i = 0; i < priv->num_rx_queues; i++)
- priv->rx_queue[i] = NULL;
-
- for (i = 0; i < priv->num_tx_queues; i++) {
- priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
- GFP_KERNEL);
- if (!priv->tx_queue[i]) {
- err = -ENOMEM;
- goto tx_alloc_failed;
- }
- priv->tx_queue[i]->tx_skbuff = NULL;
- priv->tx_queue[i]->qindex = i;
- priv->tx_queue[i]->dev = dev;
- spin_lock_init(&(priv->tx_queue[i]->txlock));
- }
-
- for (i = 0; i < priv->num_rx_queues; i++) {
- priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
- GFP_KERNEL);
- if (!priv->rx_queue[i]) {
- err = -ENOMEM;
- goto rx_alloc_failed;
- }
- priv->rx_queue[i]->rx_skbuff = NULL;
- priv->rx_queue[i]->qindex = i;
- priv->rx_queue[i]->dev = dev;
- spin_lock_init(&(priv->rx_queue[i]->rxlock));
- }
-
-
stash = of_get_property(np, "bd-stash", NULL);
if (stash) {
@@ -749,17 +859,16 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
- priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
FSL_GIANFAR_DEV_HAS_COALESCE |
FSL_GIANFAR_DEV_HAS_RMON |
FSL_GIANFAR_DEV_HAS_MULTI_INTR;
if (model && !strcasecmp(model, "eTSEC"))
- priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
FSL_GIANFAR_DEV_HAS_COALESCE |
FSL_GIANFAR_DEV_HAS_RMON |
FSL_GIANFAR_DEV_HAS_MULTI_INTR |
- FSL_GIANFAR_DEV_HAS_PADDING |
FSL_GIANFAR_DEV_HAS_CSUM |
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
@@ -784,12 +893,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return 0;
-rx_alloc_failed:
- free_rx_pointers(priv);
-tx_alloc_failed:
- free_tx_pointers(priv);
err_grp_init:
unmap_group_regs(priv);
+rx_alloc_failed:
+ gfar_free_rx_queues(priv);
+tx_alloc_failed:
+ gfar_free_tx_queues(priv);
free_gfar_dev(priv);
return err;
}
@@ -822,18 +931,16 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
if (priv->hwts_rx_en) {
- stop_gfar(netdev);
priv->hwts_rx_en = 0;
- startup_gfar(netdev);
+ reset_gfar(netdev);
}
break;
default:
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
return -ERANGE;
if (!priv->hwts_rx_en) {
- stop_gfar(netdev);
priv->hwts_rx_en = 1;
- startup_gfar(netdev);
+ reset_gfar(netdev);
}
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
@@ -875,19 +982,6 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(priv->phydev, rq, cmd);
}
-static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
-{
- unsigned int new_bit_map = 0x0;
- int mask = 0x1 << (max_qs - 1), i;
-
- for (i = 0; i < max_qs; i++) {
- if (bit_map & mask)
- new_bit_map = new_bit_map + (1 << i);
- mask = mask >> 0x1;
- }
- return new_bit_map;
-}
-
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
u32 class)
{
@@ -1005,99 +1099,140 @@ static void gfar_detect_errata(struct gfar_private *priv)
priv->errata);
}
-/* Set up the ethernet device structure, private data,
- * and anything else we need before we start
- */
-static int gfar_probe(struct platform_device *ofdev)
+void gfar_mac_reset(struct gfar_private *priv)
{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- struct net_device *dev = NULL;
- struct gfar_private *priv = NULL;
- struct gfar __iomem *regs = NULL;
- int err = 0, i, grp_idx = 0;
- u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
- u32 isrg = 0;
- u32 __iomem *baddr;
-
- err = gfar_of_init(ofdev, &dev);
-
- if (err)
- return err;
-
- priv = netdev_priv(dev);
- priv->ndev = dev;
- priv->ofdev = ofdev;
- priv->dev = &ofdev->dev;
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
- spin_lock_init(&priv->bflock);
- INIT_WORK(&priv->reset_task, gfar_reset_task);
-
- platform_set_drvdata(ofdev, priv);
- regs = priv->gfargrp[0].regs;
-
- gfar_detect_errata(priv);
-
- /* Stop the DMA engine now, in case it was running before
- * (The firmware could have used it, and left it running).
- */
- gfar_halt(dev);
/* Reset MAC layer */
gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
/* We need to delay at least 3 TX clocks */
- udelay(2);
+ udelay(3);
- tempval = 0;
- if (!priv->pause_aneg_en && priv->tx_pause_en)
- tempval |= MACCFG1_TX_FLOW;
- if (!priv->pause_aneg_en && priv->rx_pause_en)
- tempval |= MACCFG1_RX_FLOW;
/* the soft reset bit is not self-resetting, so we need to
* clear it before resuming normal operation
*/
- gfar_write(&regs->maccfg1, tempval);
+ gfar_write(&regs->maccfg1, 0);
+
+ udelay(3);
+
+ /* Compute rx_buff_size based on config flags */
+ gfar_rx_buff_size_config(priv);
+
+ /* Initialize the max receive frame/buffer lengths */
+ gfar_write(&regs->maxfrm, priv->rx_buffer_size);
+ gfar_write(&regs->mrblr, priv->rx_buffer_size);
+
+ /* Initialize the Minimum Frame Length Register */
+ gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
/* Initialize MACCFG2. */
tempval = MACCFG2_INIT_SETTINGS;
- if (gfar_has_errata(priv, GFAR_ERRATA_74))
+
+ /* If the mtu is larger than the max size for standard
+ * ethernet frames (ie, a jumbo frame), then set maccfg2
+ * to allow huge frames, and to check the length
+ */
+ if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
+ gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+
gfar_write(&regs->maccfg2, tempval);
+ /* Clear mac addr hash registers */
+ gfar_write(&regs->igaddr0, 0);
+ gfar_write(&regs->igaddr1, 0);
+ gfar_write(&regs->igaddr2, 0);
+ gfar_write(&regs->igaddr3, 0);
+ gfar_write(&regs->igaddr4, 0);
+ gfar_write(&regs->igaddr5, 0);
+ gfar_write(&regs->igaddr6, 0);
+ gfar_write(&regs->igaddr7, 0);
+
+ gfar_write(&regs->gaddr0, 0);
+ gfar_write(&regs->gaddr1, 0);
+ gfar_write(&regs->gaddr2, 0);
+ gfar_write(&regs->gaddr3, 0);
+ gfar_write(&regs->gaddr4, 0);
+ gfar_write(&regs->gaddr5, 0);
+ gfar_write(&regs->gaddr6, 0);
+ gfar_write(&regs->gaddr7, 0);
+
+ if (priv->extended_hash)
+ gfar_clear_exact_match(priv->ndev);
+
+ gfar_mac_rx_config(priv);
+
+ gfar_mac_tx_config(priv);
+
+ gfar_set_mac_address(priv->ndev);
+
+ gfar_set_multi(priv->ndev);
+
+ /* clear ievent and imask before configuring coalescing */
+ gfar_ints_disable(priv);
+
+ /* Configure the coalescing support */
+ gfar_configure_coalescing_all(priv);
+}
+
+static void gfar_hw_init(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 attrs;
+
+ /* Stop the DMA engine now, in case it was running before
+ * (The firmware could have used it, and left it running).
+ */
+ gfar_halt(priv);
+
+ gfar_mac_reset(priv);
+
+ /* Zero out the rmon mib registers if it has them */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
+
+ /* Mask off the CAM interrupts */
+ gfar_write(&regs->rmon.cam1, 0xffffffff);
+ gfar_write(&regs->rmon.cam2, 0xffffffff);
+ }
+
/* Initialize ECNTRL */
gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
- /* Set the dev->base_addr to the gfar reg region */
- dev->base_addr = (unsigned long) regs;
+ /* Set the extraction length and index */
+ attrs = ATTRELI_EL(priv->rx_stash_size) |
+ ATTRELI_EI(priv->rx_stash_index);
- /* Fill in the dev structure */
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->mtu = 1500;
- dev->netdev_ops = &gfar_netdev_ops;
- dev->ethtool_ops = &gfar_ethtool_ops;
+ gfar_write(&regs->attreli, attrs);
- /* Register for napi ...We are registering NAPI for each grp */
- if (priv->mode == SQ_SG_MODE)
- netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
- GFAR_DEV_WEIGHT);
- else
- for (i = 0; i < priv->num_grps; i++)
- netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
- GFAR_DEV_WEIGHT);
+ /* Start with defaults, and add stashing
+ * depending on driver parameters
+ */
+ attrs = ATTR_INIT_SETTINGS;
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
- dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM;
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
- }
+ if (priv->bd_stash_en)
+ attrs |= ATTR_BDSTASH;
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
- dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
- }
+ if (priv->rx_stash_size != 0)
+ attrs |= ATTR_BUFSTASH;
+
+ gfar_write(&regs->attr, attrs);
+
+ /* FIFO configs */
+ gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
+ gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
+ gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
+
+ /* Program the interrupt steering regs, only for MG devices */
+ if (priv->num_grps > 1)
+ gfar_write_isrg(priv);
+}
+
+static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
priv->extended_hash = 1;
@@ -1133,68 +1268,81 @@ static int gfar_probe(struct platform_device *ofdev)
priv->hash_regs[6] = &regs->gaddr6;
priv->hash_regs[7] = &regs->gaddr7;
}
+}
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
- priv->padding = DEFAULT_PADDING;
- else
- priv->padding = 0;
+/* Set up the ethernet device structure, private data,
+ * and anything else we need before we start
+ */
+static int gfar_probe(struct platform_device *ofdev)
+{
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ int err = 0, i;
- if (dev->features & NETIF_F_IP_CSUM ||
- priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- dev->needed_headroom = GMAC_FCB_LEN;
+ err = gfar_of_init(ofdev, &dev);
- /* Program the isrg regs only if number of grps > 1 */
- if (priv->num_grps > 1) {
- baddr = &regs->isrg0;
- for (i = 0; i < priv->num_grps; i++) {
- isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
- isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
- gfar_write(baddr, isrg);
- baddr++;
- isrg = 0x0;
+ if (err)
+ return err;
+
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+ priv->ofdev = ofdev;
+ priv->dev = &ofdev->dev;
+ SET_NETDEV_DEV(dev, &ofdev->dev);
+
+ spin_lock_init(&priv->bflock);
+ INIT_WORK(&priv->reset_task, gfar_reset_task);
+
+ platform_set_drvdata(ofdev, priv);
+
+ gfar_detect_errata(priv);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
+
+ /* Fill in the dev structure */
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->mtu = 1500;
+ dev->netdev_ops = &gfar_netdev_ops;
+ dev->ethtool_ops = &gfar_ethtool_ops;
+
+ /* Register for napi ...We are registering NAPI for each grp */
+ for (i = 0; i < priv->num_grps; i++) {
+ if (priv->poll_mode == GFAR_SQ_POLLING) {
+ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
+ gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
+ netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
+ gfar_poll_tx_sq, 2);
+ } else {
+ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
+ gfar_poll_rx, GFAR_DEV_WEIGHT);
+ netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
+ gfar_poll_tx, 2);
}
}
- /* Need to reverse the bit maps as bit_map's MSB is q0
- * but, for_each_set_bit parses from right to left, which
- * basically reverses the queue numbers
- */
- for (i = 0; i< priv->num_grps; i++) {
- priv->gfargrp[i].tx_bit_map =
- reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
- priv->gfargrp[i].rx_bit_map =
- reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
}
- /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
- * also assign queues to groups
- */
- for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
- priv->gfargrp[grp_idx].num_rx_queues = 0x0;
-
- for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
- priv->num_rx_queues) {
- priv->gfargrp[grp_idx].num_rx_queues++;
- priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
- rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
- rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
- }
- priv->gfargrp[grp_idx].num_tx_queues = 0x0;
-
- for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
- priv->num_tx_queues) {
- priv->gfargrp[grp_idx].num_tx_queues++;
- priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
- tstat = tstat | (TSTAT_CLEAR_THALT >> i);
- tqueue = tqueue | (TQUEUE_EN0 >> i);
- }
- priv->gfargrp[grp_idx].rstat = rstat;
- priv->gfargrp[grp_idx].tstat = tstat;
- rstat = tstat =0;
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
- gfar_write(&regs->rqueue, rqueue);
- gfar_write(&regs->tqueue, tqueue);
+ gfar_init_addr_hash_table(priv);
+
+ /* Insert receive time stamps into padding alignment bytes */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ priv->padding = 8;
+
+ if (dev->features & NETIF_F_IP_CSUM ||
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ dev->needed_headroom = GMAC_FCB_LEN;
priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
@@ -1220,8 +1368,9 @@ static int gfar_probe(struct platform_device *ofdev)
if (priv->num_tx_queues == 1)
priv->prio_sched_en = 1;
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(dev);
+ set_bit(GFAR_DOWN, &priv->state);
+
+ gfar_hw_init(priv);
err = register_netdev(dev);
@@ -1230,6 +1379,9 @@ static int gfar_probe(struct platform_device *ofdev)
goto register_fail;
}
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
device_init_wakeup(&dev->dev,
priv->device_flags &
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
@@ -1251,9 +1403,6 @@ static int gfar_probe(struct platform_device *ofdev)
/* Initialize the filer table */
gfar_init_filer_table(priv);
- /* Create all the sysfs files */
- gfar_init_sysfs(dev);
-
/* Print out the device info */
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
@@ -1272,8 +1421,8 @@ static int gfar_probe(struct platform_device *ofdev)
register_fail:
unmap_group_regs(priv);
- free_tx_pointers(priv);
- free_rx_pointers(priv);
+ gfar_free_rx_queues(priv);
+ gfar_free_tx_queues(priv);
if (priv->phy_node)
of_node_put(priv->phy_node);
if (priv->tbi_node)
@@ -1293,6 +1442,8 @@ static int gfar_remove(struct platform_device *ofdev)
unregister_netdev(priv->ndev);
unmap_group_regs(priv);
+ gfar_free_rx_queues(priv);
+ gfar_free_tx_queues(priv);
free_gfar_dev(priv);
return 0;
@@ -1318,9 +1469,8 @@ static int gfar_suspend(struct device *dev)
local_irq_save(flags);
lock_tx_qs(priv);
- lock_rx_qs(priv);
- gfar_halt_nodisable(ndev);
+ gfar_halt_nodisable(priv);
/* Disable Tx, and Rx if wake-on-LAN is disabled. */
tempval = gfar_read(&regs->maccfg1);
@@ -1332,7 +1482,6 @@ static int gfar_suspend(struct device *dev)
gfar_write(&regs->maccfg1, tempval);
- unlock_rx_qs(priv);
unlock_tx_qs(priv);
local_irq_restore(flags);
@@ -1378,15 +1527,13 @@ static int gfar_resume(struct device *dev)
*/
local_irq_save(flags);
lock_tx_qs(priv);
- lock_rx_qs(priv);
tempval = gfar_read(&regs->maccfg2);
tempval &= ~MACCFG2_MPEN;
gfar_write(&regs->maccfg2, tempval);
- gfar_start(ndev);
+ gfar_start(priv);
- unlock_rx_qs(priv);
unlock_tx_qs(priv);
local_irq_restore(flags);
@@ -1413,10 +1560,11 @@ static int gfar_restore(struct device *dev)
return -ENOMEM;
}
- init_registers(ndev);
- gfar_set_mac_address(ndev);
- gfar_init_mac(ndev);
- gfar_start(ndev);
+ gfar_mac_reset(priv);
+
+ gfar_init_tx_rx_base(priv);
+
+ gfar_start(priv);
priv->oldlink = 0;
priv->oldspeed = 0;
@@ -1574,57 +1722,6 @@ static void gfar_configure_serdes(struct net_device *dev)
BMCR_SPEED1000);
}
-static void init_registers(struct net_device *dev)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = NULL;
- int i;
-
- for (i = 0; i < priv->num_grps; i++) {
- regs = priv->gfargrp[i].regs;
- /* Clear IEVENT */
- gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
-
- /* Initialize IMASK */
- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
- }
-
- regs = priv->gfargrp[0].regs;
- /* Init hash registers to zero */
- gfar_write(&regs->igaddr0, 0);
- gfar_write(&regs->igaddr1, 0);
- gfar_write(&regs->igaddr2, 0);
- gfar_write(&regs->igaddr3, 0);
- gfar_write(&regs->igaddr4, 0);
- gfar_write(&regs->igaddr5, 0);
- gfar_write(&regs->igaddr6, 0);
- gfar_write(&regs->igaddr7, 0);
-
- gfar_write(&regs->gaddr0, 0);
- gfar_write(&regs->gaddr1, 0);
- gfar_write(&regs->gaddr2, 0);
- gfar_write(&regs->gaddr3, 0);
- gfar_write(&regs->gaddr4, 0);
- gfar_write(&regs->gaddr5, 0);
- gfar_write(&regs->gaddr6, 0);
- gfar_write(&regs->gaddr7, 0);
-
- /* Zero out the rmon mib registers if it has them */
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
- memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
-
- /* Mask off the CAM interrupts */
- gfar_write(&regs->rmon.cam1, 0xffffffff);
- gfar_write(&regs->rmon.cam2, 0xffffffff);
- }
-
- /* Initialize the max receive buffer length */
- gfar_write(&regs->mrblr, priv->rx_buffer_size);
-
- /* Initialize the Minimum Frame Length Register */
- gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
-}
-
static int __gfar_is_rx_idle(struct gfar_private *priv)
{
u32 res;
@@ -1648,23 +1745,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
}
/* Halt the receive and transmit queues */
-static void gfar_halt_nodisable(struct net_device *dev)
+static void gfar_halt_nodisable(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = NULL;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- int i;
-
- for (i = 0; i < priv->num_grps; i++) {
- regs = priv->gfargrp[i].regs;
- /* Mask all interrupts */
- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
- /* Clear all interrupts */
- gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
- }
+ gfar_ints_disable(priv);
- regs = priv->gfargrp[0].regs;
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&regs->dmactrl);
if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
@@ -1685,56 +1772,41 @@ static void gfar_halt_nodisable(struct net_device *dev)
}
/* Halt the receive and transmit queues */
-void gfar_halt(struct net_device *dev)
+void gfar_halt(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- gfar_halt_nodisable(dev);
+ /* Dissable the Rx/Tx hw queues */
+ gfar_write(&regs->rqueue, 0);
+ gfar_write(&regs->tqueue, 0);
- /* Disable Rx and Tx */
+ mdelay(10);
+
+ gfar_halt_nodisable(priv);
+
+ /* Disable Rx/Tx DMA */
tempval = gfar_read(&regs->maccfg1);
tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
gfar_write(&regs->maccfg1, tempval);
}
-static void free_grp_irqs(struct gfar_priv_grp *grp)
-{
- free_irq(gfar_irq(grp, TX)->irq, grp);
- free_irq(gfar_irq(grp, RX)->irq, grp);
- free_irq(gfar_irq(grp, ER)->irq, grp);
-}
-
void stop_gfar(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- unsigned long flags;
- int i;
-
- phy_stop(priv->phydev);
+ netif_tx_stop_all_queues(dev);
- /* Lock it down */
- local_irq_save(flags);
- lock_tx_qs(priv);
- lock_rx_qs(priv);
+ smp_mb__before_clear_bit();
+ set_bit(GFAR_DOWN, &priv->state);
+ smp_mb__after_clear_bit();
- gfar_halt(dev);
+ disable_napi(priv);
- unlock_rx_qs(priv);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
+ /* disable ints and gracefully shut down Rx/Tx DMA */
+ gfar_halt(priv);
- /* Free the IRQs */
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
- for (i = 0; i < priv->num_grps; i++)
- free_grp_irqs(&priv->gfargrp[i]);
- } else {
- for (i = 0; i < priv->num_grps; i++)
- free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
- &priv->gfargrp[i]);
- }
+ phy_stop(priv->phydev);
free_skb_resources(priv);
}
@@ -1825,17 +1897,15 @@ static void free_skb_resources(struct gfar_private *priv)
priv->tx_queue[0]->tx_bd_dma_base);
}
-void gfar_start(struct net_device *dev)
+void gfar_start(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
int i = 0;
- /* Enable Rx and Tx in MACCFG1 */
- tempval = gfar_read(&regs->maccfg1);
- tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
- gfar_write(&regs->maccfg1, tempval);
+ /* Enable Rx/Tx hw queues */
+ gfar_write(&regs->rqueue, priv->rqueue);
+ gfar_write(&regs->tqueue, priv->tqueue);
/* Initialize DMACTRL to have WWR and WOP */
tempval = gfar_read(&regs->dmactrl);
@@ -1852,52 +1922,23 @@ void gfar_start(struct net_device *dev)
/* Clear THLT/RHLT, so that the DMA starts polling now */
gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
- /* Unmask the interrupts we look for */
- gfar_write(&regs->imask, IMASK_DEFAULT);
}
- dev->trans_start = jiffies; /* prevent tx timeout */
-}
-
-static void gfar_configure_coalescing(struct gfar_private *priv,
- unsigned long tx_mask, unsigned long rx_mask)
-{
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u32 __iomem *baddr;
-
- if (priv->mode == MQ_MG_MODE) {
- int i = 0;
-
- baddr = &regs->txic0;
- for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
- gfar_write(baddr + i, 0);
- if (likely(priv->tx_queue[i]->txcoalescing))
- gfar_write(baddr + i, priv->tx_queue[i]->txic);
- }
+ /* Enable Rx/Tx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
- baddr = &regs->rxic0;
- for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
- gfar_write(baddr + i, 0);
- if (likely(priv->rx_queue[i]->rxcoalescing))
- gfar_write(baddr + i, priv->rx_queue[i]->rxic);
- }
- } else {
- /* Backward compatible case -- even if we enable
- * multiple queues, there's only single reg to program
- */
- gfar_write(&regs->txic, 0);
- if (likely(priv->tx_queue[0]->txcoalescing))
- gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+ gfar_ints_enable(priv);
- gfar_write(&regs->rxic, 0);
- if (unlikely(priv->rx_queue[0]->rxcoalescing))
- gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
- }
+ priv->ndev->trans_start = jiffies; /* prevent tx timeout */
}
-void gfar_configure_coalescing_all(struct gfar_private *priv)
+static void free_grp_irqs(struct gfar_priv_grp *grp)
{
- gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ free_irq(gfar_irq(grp, TX)->irq, grp);
+ free_irq(gfar_irq(grp, RX)->irq, grp);
+ free_irq(gfar_irq(grp, ER)->irq, grp);
}
static int register_grp_irqs(struct gfar_priv_grp *grp)
@@ -1956,46 +1997,65 @@ err_irq_fail:
}
-/* Bring the controller up and running */
-int startup_gfar(struct net_device *ndev)
+static void gfar_free_irq(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(ndev);
- struct gfar __iomem *regs = NULL;
- int err, i, j;
+ int i;
- for (i = 0; i < priv->num_grps; i++) {
- regs= priv->gfargrp[i].regs;
- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+ /* Free the IRQs */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ for (i = 0; i < priv->num_grps; i++)
+ free_grp_irqs(&priv->gfargrp[i]);
+ } else {
+ for (i = 0; i < priv->num_grps; i++)
+ free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
+ &priv->gfargrp[i]);
}
+}
- regs= priv->gfargrp[0].regs;
- err = gfar_alloc_skb_resources(ndev);
- if (err)
- return err;
-
- gfar_init_mac(ndev);
+static int gfar_request_irq(struct gfar_private *priv)
+{
+ int err, i, j;
for (i = 0; i < priv->num_grps; i++) {
err = register_grp_irqs(&priv->gfargrp[i]);
if (err) {
for (j = 0; j < i; j++)
free_grp_irqs(&priv->gfargrp[j]);
- goto irq_fail;
+ return err;
}
}
- /* Start the controller */
- gfar_start(ndev);
+ return 0;
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ int err;
+
+ gfar_mac_reset(priv);
+
+ err = gfar_alloc_skb_resources(ndev);
+ if (err)
+ return err;
+
+ gfar_init_tx_rx_base(priv);
+
+ smp_mb__before_clear_bit();
+ clear_bit(GFAR_DOWN, &priv->state);
+ smp_mb__after_clear_bit();
+
+ /* Start Rx/Tx DMA and enable the interrupts */
+ gfar_start(priv);
phy_start(priv->phydev);
- gfar_configure_coalescing_all(priv);
+ enable_napi(priv);
- return 0;
+ netif_tx_wake_all_queues(ndev);
-irq_fail:
- free_skb_resources(priv);
- return err;
+ return 0;
}
/* Called when something needs to use the ethernet device
@@ -2006,27 +2066,17 @@ static int gfar_enet_open(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
int err;
- enable_napi(priv);
-
- /* Initialize a bunch of registers */
- init_registers(dev);
-
- gfar_set_mac_address(dev);
-
err = init_phy(dev);
+ if (err)
+ return err;
- if (err) {
- disable_napi(priv);
+ err = gfar_request_irq(priv);
+ if (err)
return err;
- }
err = startup_gfar(dev);
- if (err) {
- disable_napi(priv);
+ if (err)
return err;
- }
-
- netif_tx_start_all_queues(dev);
device_set_wakeup_enable(&dev->dev, priv->wol_en);
@@ -2152,13 +2202,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_new = skb_realloc_headroom(skb, fcb_len);
if (!skb_new) {
dev->stats.tx_errors++;
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (skb->sk)
skb_set_owner_w(skb_new, skb->sk);
- consume_skb(skb);
+ dev_consume_skb_any(skb);
skb = skb_new;
}
@@ -2351,8 +2401,6 @@ static int gfar_close(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- disable_napi(priv);
-
cancel_work_sync(&priv->reset_task);
stop_gfar(dev);
@@ -2360,7 +2408,7 @@ static int gfar_close(struct net_device *dev)
phy_disconnect(priv->phydev);
priv->phydev = NULL;
- netif_tx_stop_all_queues(dev);
+ gfar_free_irq(priv);
return 0;
}
@@ -2373,77 +2421,9 @@ static int gfar_set_mac_address(struct net_device *dev)
return 0;
}
-/* Check if rx parser should be activated */
-void gfar_check_rx_parser_mode(struct gfar_private *priv)
-{
- struct gfar __iomem *regs;
- u32 tempval;
-
- regs = priv->gfargrp[0].regs;
-
- tempval = gfar_read(&regs->rctrl);
- /* If parse is no longer required, then disable parser */
- if (tempval & RCTRL_REQ_PARSER) {
- tempval |= RCTRL_PRSDEP_INIT;
- priv->uses_rxfcb = 1;
- } else {
- tempval &= ~RCTRL_PRSDEP_INIT;
- priv->uses_rxfcb = 0;
- }
- gfar_write(&regs->rctrl, tempval);
-}
-
-/* Enables and disables VLAN insertion/extraction */
-void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = NULL;
- unsigned long flags;
- u32 tempval;
-
- regs = priv->gfargrp[0].regs;
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- if (features & NETIF_F_HW_VLAN_CTAG_TX) {
- /* Enable VLAN tag insertion */
- tempval = gfar_read(&regs->tctrl);
- tempval |= TCTRL_VLINS;
- gfar_write(&regs->tctrl, tempval);
- } else {
- /* Disable VLAN tag insertion */
- tempval = gfar_read(&regs->tctrl);
- tempval &= ~TCTRL_VLINS;
- gfar_write(&regs->tctrl, tempval);
- }
-
- if (features & NETIF_F_HW_VLAN_CTAG_RX) {
- /* Enable VLAN tag extraction */
- tempval = gfar_read(&regs->rctrl);
- tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
- gfar_write(&regs->rctrl, tempval);
- priv->uses_rxfcb = 1;
- } else {
- /* Disable VLAN tag extraction */
- tempval = gfar_read(&regs->rctrl);
- tempval &= ~RCTRL_VLEX;
- gfar_write(&regs->rctrl, tempval);
-
- gfar_check_rx_parser_mode(priv);
- }
-
- gfar_change_mtu(dev, dev->mtu);
-
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-}
-
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
{
- int tempsize, tempval;
struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- int oldsize = priv->rx_buffer_size;
int frame_size = new_mtu + ETH_HLEN;
if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
@@ -2451,45 +2431,33 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
- if (priv->uses_rxfcb)
- frame_size += GMAC_FCB_LEN;
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
- frame_size += priv->padding;
-
- tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
- INCREMENTAL_BUFFER_SIZE;
-
- /* Only stop and start the controller if it isn't already
- * stopped, and we changed something
- */
- if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+ if (dev->flags & IFF_UP)
stop_gfar(dev);
- priv->rx_buffer_size = tempsize;
-
dev->mtu = new_mtu;
- gfar_write(&regs->mrblr, priv->rx_buffer_size);
- gfar_write(&regs->maxfrm, priv->rx_buffer_size);
+ if (dev->flags & IFF_UP)
+ startup_gfar(dev);
- /* If the mtu is larger than the max size for standard
- * ethernet frames (ie, a jumbo frame), then set maccfg2
- * to allow huge frames, and to check the length
- */
- tempval = gfar_read(&regs->maccfg2);
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
- if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
- gfar_has_errata(priv, GFAR_ERRATA_74))
- tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
- else
- tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+ return 0;
+}
- gfar_write(&regs->maccfg2, tempval);
+void reset_gfar(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
- if ((oldsize != tempsize) && (dev->flags & IFF_UP))
- startup_gfar(dev);
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
- return 0;
+ stop_gfar(ndev);
+ startup_gfar(ndev);
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
}
/* gfar_reset_task gets scheduled when a packet has not been
@@ -2501,16 +2469,7 @@ static void gfar_reset_task(struct work_struct *work)
{
struct gfar_private *priv = container_of(work, struct gfar_private,
reset_task);
- struct net_device *dev = priv->ndev;
-
- if (dev->flags & IFF_UP) {
- netif_tx_stop_all_queues(dev);
- stop_gfar(dev);
- startup_gfar(dev);
- netif_tx_start_all_queues(dev);
- }
-
- netif_tx_schedule_all(dev);
+ reset_gfar(priv->ndev);
}
static void gfar_timeout(struct net_device *dev)
@@ -2623,8 +2582,10 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
}
/* If we freed a buffer, we can restart transmission, if necessary */
- if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
- netif_wake_subqueue(dev, tqi);
+ if (tx_queue->num_txbdfree &&
+ netif_tx_queue_stopped(txq) &&
+ !(test_bit(GFAR_DOWN, &priv->state)))
+ netif_wake_subqueue(priv->ndev, tqi);
/* Update dirty indicators */
tx_queue->skb_dirtytx = skb_dirtytx;
@@ -2633,31 +2594,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
netdev_tx_completed_queue(txq, howmany, bytes_sent);
}
-static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&gfargrp->grplock, flags);
- if (napi_schedule_prep(&gfargrp->napi)) {
- gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
- __napi_schedule(&gfargrp->napi);
- } else {
- /* Clear IEVENT, so interrupts aren't called again
- * because of the packets that have already arrived.
- */
- gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
- }
- spin_unlock_irqrestore(&gfargrp->grplock, flags);
-
-}
-
-/* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *grp_id)
-{
- gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
- return IRQ_HANDLED;
-}
-
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
struct sk_buff *skb)
{
@@ -2728,7 +2664,48 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
irqreturn_t gfar_receive(int irq, void *grp_id)
{
- gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
+ struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
+ unsigned long flags;
+ u32 imask;
+
+ if (likely(napi_schedule_prep(&grp->napi_rx))) {
+ spin_lock_irqsave(&grp->grplock, flags);
+ imask = gfar_read(&grp->regs->imask);
+ imask &= IMASK_RX_DISABLED;
+ gfar_write(&grp->regs->imask, imask);
+ spin_unlock_irqrestore(&grp->grplock, flags);
+ __napi_schedule(&grp->napi_rx);
+ } else {
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived.
+ */
+ gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *grp_id)
+{
+ struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
+ unsigned long flags;
+ u32 imask;
+
+ if (likely(napi_schedule_prep(&grp->napi_tx))) {
+ spin_lock_irqsave(&grp->grplock, flags);
+ imask = gfar_read(&grp->regs->imask);
+ imask &= IMASK_TX_DISABLED;
+ gfar_write(&grp->regs->imask, imask);
+ spin_unlock_irqrestore(&grp->grplock, flags);
+ __napi_schedule(&grp->napi_tx);
+ } else {
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived.
+ */
+ gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
+ }
+
return IRQ_HANDLED;
}
@@ -2852,7 +2829,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
rx_queue->stats.rx_bytes += pkt_len;
skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull,
- &rx_queue->grp->napi);
+ &rx_queue->grp->napi_rx);
} else {
netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@ -2881,66 +2858,81 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
return howmany;
}
-static int gfar_poll_sq(struct napi_struct *napi, int budget)
+static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
{
struct gfar_priv_grp *gfargrp =
- container_of(napi, struct gfar_priv_grp, napi);
+ container_of(napi, struct gfar_priv_grp, napi_rx);
struct gfar __iomem *regs = gfargrp->regs;
- struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0];
- struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
+ struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
int work_done = 0;
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived
*/
- gfar_write(&regs->ievent, IEVENT_RTX_MASK);
-
- /* run Tx cleanup to completion */
- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
- gfar_clean_tx_ring(tx_queue);
+ gfar_write(&regs->ievent, IEVENT_RX_MASK);
work_done = gfar_clean_rx_ring(rx_queue, budget);
if (work_done < budget) {
+ u32 imask;
napi_complete(napi);
/* Clear the halt bit in RSTAT */
gfar_write(&regs->rstat, gfargrp->rstat);
- gfar_write(&regs->imask, IMASK_DEFAULT);
-
- /* If we are coalescing interrupts, update the timer
- * Otherwise, clear it
- */
- gfar_write(&regs->txic, 0);
- if (likely(tx_queue->txcoalescing))
- gfar_write(&regs->txic, tx_queue->txic);
-
- gfar_write(&regs->rxic, 0);
- if (unlikely(rx_queue->rxcoalescing))
- gfar_write(&regs->rxic, rx_queue->rxic);
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_RX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
}
return work_done;
}
-static int gfar_poll(struct napi_struct *napi, int budget)
+static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
+{
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi_tx);
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
+ u32 imask;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+ gfar_write(&regs->ievent, IEVENT_TX_MASK);
+
+ /* run Tx cleanup to completion */
+ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
+ gfar_clean_tx_ring(tx_queue);
+
+ napi_complete(napi);
+
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_TX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
+
+ return 0;
+}
+
+static int gfar_poll_rx(struct napi_struct *napi, int budget)
{
struct gfar_priv_grp *gfargrp =
- container_of(napi, struct gfar_priv_grp, napi);
+ container_of(napi, struct gfar_priv_grp, napi_rx);
struct gfar_private *priv = gfargrp->priv;
struct gfar __iomem *regs = gfargrp->regs;
- struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
int work_done = 0, work_done_per_q = 0;
int i, budget_per_q = 0;
- int has_tx_work = 0;
unsigned long rstat_rxf;
int num_act_queues;
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived
*/
- gfar_write(&regs->ievent, IEVENT_RTX_MASK);
+ gfar_write(&regs->ievent, IEVENT_RX_MASK);
rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
@@ -2948,15 +2940,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
if (num_act_queues)
budget_per_q = budget/num_act_queues;
- for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
- tx_queue = priv->tx_queue[i];
- /* run Tx cleanup to completion */
- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
- gfar_clean_tx_ring(tx_queue);
- has_tx_work = 1;
- }
- }
-
for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
/* skip queue if not active */
if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
@@ -2979,25 +2962,62 @@ static int gfar_poll(struct napi_struct *napi, int budget)
}
}
- if (!num_act_queues && !has_tx_work) {
-
+ if (!num_act_queues) {
+ u32 imask;
napi_complete(napi);
/* Clear the halt bit in RSTAT */
gfar_write(&regs->rstat, gfargrp->rstat);
- gfar_write(&regs->imask, IMASK_DEFAULT);
-
- /* If we are coalescing interrupts, update the timer
- * Otherwise, clear it
- */
- gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
- gfargrp->tx_bit_map);
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_RX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
}
return work_done;
}
+static int gfar_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi_tx);
+ struct gfar_private *priv = gfargrp->priv;
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ int has_tx_work = 0;
+ int i;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+ gfar_write(&regs->ievent, IEVENT_TX_MASK);
+
+ for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+ tx_queue = priv->tx_queue[i];
+ /* run Tx cleanup to completion */
+ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+ gfar_clean_tx_ring(tx_queue);
+ has_tx_work = 1;
+ }
+ }
+
+ if (!has_tx_work) {
+ u32 imask;
+ napi_complete(napi);
+
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_TX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
+ }
+
+ return 0;
+}
+
+
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
@@ -3101,12 +3121,11 @@ static void adjust_link(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned long flags;
struct phy_device *phydev = priv->phydev;
int new_state = 0;
- local_irq_save(flags);
- lock_tx_qs(priv);
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return;
if (phydev->link) {
u32 tempval1 = gfar_read(&regs->maccfg1);
@@ -3178,8 +3197,6 @@ static void adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
}
/* Update the hash table based on the current list of multicast
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 52bb2b0195cc..84632c569f2c 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -377,8 +377,11 @@ extern const char gfar_driver_version[];
IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
| IMASK_PERR)
-#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \
- & IMASK_DEFAULT)
+#define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY)
+#define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN)
+
+#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT)
+#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT)
/* Fifo management */
#define FIFO_TX_THR_MASK 0x01ff
@@ -409,7 +412,9 @@ extern const char gfar_driver_version[];
/* This default RIR value directly corresponds
* to the 3-bit hash value generated */
-#define DEFAULT_RIR0 0x05397700
+#define DEFAULT_8RXQ_RIR0 0x05397700
+/* Map even hash values to Q0, and odd ones to Q1 */
+#define DEFAULT_2RXQ_RIR0 0x04104100
/* RQFCR register bits */
#define RQFCR_GPI 0x80000000
@@ -880,7 +885,6 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
-#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
@@ -892,8 +896,8 @@ struct gfar {
#define DEFAULT_MAPPING 0xFF
#endif
-#define ISRG_SHIFT_TX 0x10
-#define ISRG_SHIFT_RX 0x18
+#define ISRG_RR0 0x80000000
+#define ISRG_TR0 0x00800000
/* The same driver can operate in two modes */
/* SQ_SG_MODE: Single Queue Single Group Mode
@@ -905,6 +909,22 @@ enum {
MQ_MG_MODE
};
+/* GFAR_SQ_POLLING: Single Queue NAPI polling mode
+ * The driver supports a single pair of RX/Tx queues
+ * per interrupt group (Rx/Tx int line). MQ_MG mode
+ * devices have 2 interrupt groups, so the device will
+ * have a total of 2 Tx and 2 Rx queues in this case.
+ * GFAR_MQ_POLLING: Multi Queue NAPI polling mode
+ * The driver supports all the 8 Rx and Tx HW queues
+ * each queue mapped by the Device Tree to one of
+ * the 2 interrupt groups. This mode implies significant
+ * processing overhead (CPU and controller level).
+ */
+enum gfar_poll_mode {
+ GFAR_SQ_POLLING = 0,
+ GFAR_MQ_POLLING
+};
+
/*
* Per TX queue stats
*/
@@ -966,7 +986,6 @@ struct rx_q_stats {
/**
* struct gfar_priv_rx_q - per rx queue structure
- * @rxlock: per queue rx spin lock
* @rx_skbuff: skb pointers
* @skb_currx: currently use skb pointer
* @rx_bd_base: First rx buffer descriptor
@@ -979,8 +998,7 @@ struct rx_q_stats {
*/
struct gfar_priv_rx_q {
- spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
- struct sk_buff ** rx_skbuff;
+ struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
dma_addr_t rx_bd_dma_base;
struct rxbd8 *rx_bd_base;
struct rxbd8 *cur_rx;
@@ -1016,17 +1034,20 @@ struct gfar_irqinfo {
*/
struct gfar_priv_grp {
- spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES)));
- struct napi_struct napi;
- struct gfar_private *priv;
+ spinlock_t grplock __aligned(SMP_CACHE_BYTES);
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
struct gfar __iomem *regs;
- unsigned int rstat;
- unsigned long num_rx_queues;
- unsigned long rx_bit_map;
- /* cacheline 3 */
+ struct gfar_priv_tx_q *tx_queue;
+ struct gfar_priv_rx_q *rx_queue;
unsigned int tstat;
+ unsigned int rstat;
+
+ struct gfar_private *priv;
unsigned long num_tx_queues;
unsigned long tx_bit_map;
+ unsigned long num_rx_queues;
+ unsigned long rx_bit_map;
struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
};
@@ -1041,6 +1062,11 @@ enum gfar_errata {
GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
};
+enum gfar_dev_state {
+ GFAR_DOWN = 1,
+ GFAR_RESETTING
+};
+
/* Struct stolen almost completely (and shamelessly) from the FCC enet source
* (Ok, that's not so true anymore, but there is a family resemblance)
* The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -1051,8 +1077,6 @@ enum gfar_errata {
* the buffer descriptor determines the actual condition.
*/
struct gfar_private {
- unsigned int num_rx_queues;
-
struct device *dev;
struct net_device *ndev;
enum gfar_errata errata;
@@ -1060,6 +1084,7 @@ struct gfar_private {
u16 uses_rxfcb;
u16 padding;
+ u32 device_flags;
/* HW time stamping enabled flag */
int hwts_rx_en;
@@ -1069,10 +1094,12 @@ struct gfar_private {
struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
struct gfar_priv_grp gfargrp[MAXGROUPS];
- u32 device_flags;
+ unsigned long state;
- unsigned int mode;
+ unsigned short mode;
+ unsigned short poll_mode;
unsigned int num_tx_queues;
+ unsigned int num_rx_queues;
unsigned int num_grps;
/* Network Statistics */
@@ -1113,6 +1140,9 @@ struct gfar_private {
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
+ u32 rqueue;
+ u32 tqueue;
+
/* RX per device parameters */
unsigned int rx_stash_size;
unsigned int rx_stash_index;
@@ -1127,11 +1157,6 @@ struct gfar_private {
u32 __iomem *hash_regs[16];
int hash_width;
- /* global parameters */
- unsigned int fifo_threshold;
- unsigned int fifo_starve;
- unsigned int fifo_starve_off;
-
/*Filer table*/
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
@@ -1176,21 +1201,42 @@ static inline void gfar_read_filer(struct gfar_private *priv,
*fpr = gfar_read(&regs->rqfpr);
}
-void lock_rx_qs(struct gfar_private *priv);
-void lock_tx_qs(struct gfar_private *priv);
-void unlock_rx_qs(struct gfar_private *priv);
-void unlock_tx_qs(struct gfar_private *priv);
+static inline void gfar_write_isrg(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr = &regs->isrg0;
+ u32 isrg = 0;
+ int grp_idx, i;
+
+ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx];
+
+ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+ isrg |= (ISRG_RR0 >> i);
+ }
+
+ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+ isrg |= (ISRG_TR0 >> i);
+ }
+
+ gfar_write(baddr, isrg);
+
+ baddr++;
+ isrg = 0;
+ }
+}
+
irqreturn_t gfar_receive(int irq, void *dev_id);
int startup_gfar(struct net_device *dev);
void stop_gfar(struct net_device *dev);
-void gfar_halt(struct net_device *dev);
+void reset_gfar(struct net_device *dev);
+void gfar_mac_reset(struct gfar_private *priv);
+void gfar_halt(struct gfar_private *priv);
+void gfar_start(struct gfar_private *priv);
void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
u32 regnum, u32 read);
void gfar_configure_coalescing_all(struct gfar_private *priv);
-void gfar_init_sysfs(struct net_device *dev);
int gfar_set_features(struct net_device *dev, netdev_features_t features);
-void gfar_check_rx_parser_mode(struct gfar_private *priv);
-void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 63d234419cc1..891dbee6e6c1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -44,10 +44,6 @@
#include "gianfar.h"
-extern void gfar_start(struct net_device *dev);
-extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
- int rx_work_limit);
-
#define GFAR_MAX_COAL_USECS 0xffff
#define GFAR_MAX_COAL_FRAMES 0xff
static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
@@ -364,25 +360,11 @@ static int gfar_scoalesce(struct net_device *dev,
struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
- int i = 0;
+ int i, err = 0;
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
return -EOPNOTSUPP;
- /* Set up rx coalescing */
- /* As of now, we will enable/disable coalescing for all
- * queues together in case of eTSEC2, this will be modified
- * along with the ethtool interface
- */
- if ((cvals->rx_coalesce_usecs == 0) ||
- (cvals->rx_max_coalesced_frames == 0)) {
- for (i = 0; i < priv->num_rx_queues; i++)
- priv->rx_queue[i]->rxcoalescing = 0;
- } else {
- for (i = 0; i < priv->num_rx_queues; i++)
- priv->rx_queue[i]->rxcoalescing = 1;
- }
-
if (NULL == priv->phydev)
return -ENODEV;
@@ -399,6 +381,32 @@ static int gfar_scoalesce(struct net_device *dev,
return -EINVAL;
}
+ /* Check the bounds of the values */
+ if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ netdev_info(dev, "Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ netdev_info(dev, "Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ /* Set up rx coalescing */
+ if ((cvals->rx_coalesce_usecs == 0) ||
+ (cvals->rx_max_coalesced_frames == 0)) {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 0;
+ } else {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 1;
+ }
+
for (i = 0; i < priv->num_rx_queues; i++) {
priv->rx_queue[i]->rxic = mk_ic_value(
cvals->rx_max_coalesced_frames,
@@ -415,28 +423,22 @@ static int gfar_scoalesce(struct net_device *dev,
priv->tx_queue[i]->txcoalescing = 1;
}
- /* Check the bounds of the values */
- if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
- netdev_info(dev, "Coalescing is limited to %d microseconds\n",
- GFAR_MAX_COAL_USECS);
- return -EINVAL;
- }
-
- if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
- netdev_info(dev, "Coalescing is limited to %d frames\n",
- GFAR_MAX_COAL_FRAMES);
- return -EINVAL;
- }
-
for (i = 0; i < priv->num_tx_queues; i++) {
priv->tx_queue[i]->txic = mk_ic_value(
cvals->tx_max_coalesced_frames,
gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
}
- gfar_configure_coalescing_all(priv);
+ if (dev->flags & IFF_UP) {
+ stop_gfar(dev);
+ err = startup_gfar(dev);
+ } else {
+ gfar_mac_reset(priv);
+ }
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
- return 0;
+ return err;
}
/* Fills in rvals with the current ring parameters. Currently,
@@ -467,15 +469,13 @@ static void gfar_gringparam(struct net_device *dev,
}
/* Change the current ring parameters, stopping the controller if
- * necessary so that we don't mess things up while we're in
- * motion. We wait for the ring to be clean before reallocating
- * the rings.
+ * necessary so that we don't mess things up while we're in motion.
*/
static int gfar_sringparam(struct net_device *dev,
struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
- int err = 0, i = 0;
+ int err = 0, i;
if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
return -EINVAL;
@@ -493,44 +493,25 @@ static int gfar_sringparam(struct net_device *dev,
return -EINVAL;
}
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
- if (dev->flags & IFF_UP) {
- unsigned long flags;
-
- /* Halt TX and RX, and process the frames which
- * have already been received
- */
- local_irq_save(flags);
- lock_tx_qs(priv);
- lock_rx_qs(priv);
-
- gfar_halt(dev);
-
- unlock_rx_qs(priv);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- for (i = 0; i < priv->num_rx_queues; i++)
- gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
-
- /* Now we take down the rings to rebuild them */
+ if (dev->flags & IFF_UP)
stop_gfar(dev);
- }
- /* Change the size */
- for (i = 0; i < priv->num_rx_queues; i++) {
+ /* Change the sizes */
+ for (i = 0; i < priv->num_rx_queues; i++)
priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
- priv->tx_queue[i]->num_txbdfree =
- priv->tx_queue[i]->tx_ring_size;
- }
/* Rebuild the rings with the new size */
- if (dev->flags & IFF_UP) {
+ if (dev->flags & IFF_UP)
err = startup_gfar(dev);
- netif_tx_wake_all_queues(dev);
- }
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
return err;
}
@@ -608,43 +589,29 @@ static int gfar_spauseparam(struct net_device *dev,
int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
- struct gfar_private *priv = netdev_priv(dev);
- unsigned long flags;
- int err = 0, i = 0;
netdev_features_t changed = dev->features ^ features;
+ struct gfar_private *priv = netdev_priv(dev);
+ int err = 0;
- if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
- gfar_vlan_mode(dev, features);
-
- if (!(changed & NETIF_F_RXCSUM))
+ if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_RXCSUM)))
return 0;
- if (dev->flags & IFF_UP) {
- /* Halt TX and RX, and process the frames which
- * have already been received
- */
- local_irq_save(flags);
- lock_tx_qs(priv);
- lock_rx_qs(priv);
-
- gfar_halt(dev);
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
- unlock_tx_qs(priv);
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- for (i = 0; i < priv->num_rx_queues; i++)
- gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
+ dev->features = features;
+ if (dev->flags & IFF_UP) {
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
-
- dev->features = features;
-
err = startup_gfar(dev);
- netif_tx_wake_all_queues(dev);
+ } else {
+ gfar_mac_reset(priv);
}
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
return err;
}
@@ -1610,9 +1577,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
if (tab->index > MAX_FILER_IDX - 1)
return -EBUSY;
- /* Avoid inconsistent filer table to be processed */
- lock_rx_qs(priv);
-
/* Fill regular entries */
for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
i++)
@@ -1625,8 +1589,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
*/
gfar_write_filer(priv, i, 0x20, 0x0);
- unlock_rx_qs(priv);
-
return 0;
}
@@ -1831,6 +1793,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
struct gfar_private *priv = netdev_priv(dev);
int ret = 0;
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return -EBUSY;
+
mutex_lock(&priv->rx_queue_access);
switch (cmd->cmd) {
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index abc28da27042..bb568006f37d 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -414,6 +414,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
.n_alarm = 0,
.n_ext_ts = N_EXT_TS,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 1,
.adjfreq = ptp_gianfar_adjfreq,
.adjtime = ptp_gianfar_adjtime,
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
deleted file mode 100644
index e02dd1378751..000000000000
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * drivers/net/ethernet/freescale/gianfar_sysfs.c
- *
- * Gianfar Ethernet Driver
- * This driver is designed for the non-CPM ethernet controllers
- * on the 85xx and 83xx family of integrated processors
- * Based on 8260_io/fcc_enet.c
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala (galak@kernel.crashing.org)
- * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
- *
- * Copyright 2002-2009 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * Sysfs file creation and management
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/device.h>
-
-#include <asm/uaccess.h>
-#include <linux/module.h>
-
-#include "gianfar.h"
-
-static ssize_t gfar_show_bd_stash(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off");
-}
-
-static ssize_t gfar_set_bd_stash(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- int new_setting = 0;
- u32 temp;
- unsigned long flags;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
- return count;
-
-
- /* Find out the new setting */
- if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
- new_setting = 1;
- else if (!strncmp("off", buf, count - 1) ||
- !strncmp("0", buf, count - 1))
- new_setting = 0;
- else
- return count;
-
-
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- /* Set the new stashing value */
- priv->bd_stash_en = new_setting;
-
- temp = gfar_read(&regs->attr);
-
- if (new_setting)
- temp |= ATTR_BDSTASH;
- else
- temp &= ~(ATTR_BDSTASH);
-
- gfar_write(&regs->attr, temp);
-
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash);
-
-static ssize_t gfar_show_rx_stash_size(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->rx_stash_size);
-}
-
-static ssize_t gfar_set_rx_stash_size(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int length = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
- return count;
-
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- if (length > priv->rx_buffer_size)
- goto out;
-
- if (length == priv->rx_stash_size)
- goto out;
-
- priv->rx_stash_size = length;
-
- temp = gfar_read(&regs->attreli);
- temp &= ~ATTRELI_EL_MASK;
- temp |= ATTRELI_EL(length);
- gfar_write(&regs->attreli, temp);
-
- /* Turn stashing on/off as appropriate */
- temp = gfar_read(&regs->attr);
-
- if (length)
- temp |= ATTR_BUFSTASH;
- else
- temp &= ~(ATTR_BUFSTASH);
-
- gfar_write(&regs->attr, temp);
-
-out:
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size,
- gfar_set_rx_stash_size);
-
-/* Stashing will only be enabled when rx_stash_size != 0 */
-static ssize_t gfar_show_rx_stash_index(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->rx_stash_index);
-}
-
-static ssize_t gfar_set_rx_stash_index(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned short index = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
- return count;
-
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- if (index > priv->rx_stash_size)
- goto out;
-
- if (index == priv->rx_stash_index)
- goto out;
-
- priv->rx_stash_index = index;
-
- temp = gfar_read(&regs->attreli);
- temp &= ~ATTRELI_EI_MASK;
- temp |= ATTRELI_EI(index);
- gfar_write(&regs->attreli, temp);
-
-out:
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index,
- gfar_set_rx_stash_index);
-
-static ssize_t gfar_show_fifo_threshold(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->fifo_threshold);
-}
-
-static ssize_t gfar_set_fifo_threshold(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int length = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (length > GFAR_MAX_FIFO_THRESHOLD)
- return count;
-
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- priv->fifo_threshold = length;
-
- temp = gfar_read(&regs->fifo_tx_thr);
- temp &= ~FIFO_TX_THR_MASK;
- temp |= length;
- gfar_write(&regs->fifo_tx_thr, temp);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold,
- gfar_set_fifo_threshold);
-
-static ssize_t gfar_show_fifo_starve(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->fifo_starve);
-}
-
-static ssize_t gfar_set_fifo_starve(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int num = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (num > GFAR_MAX_FIFO_STARVE)
- return count;
-
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- priv->fifo_starve = num;
-
- temp = gfar_read(&regs->fifo_tx_starve);
- temp &= ~FIFO_TX_STARVE_MASK;
- temp |= num;
- gfar_write(&regs->fifo_tx_starve, temp);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve,
- gfar_set_fifo_starve);
-
-static ssize_t gfar_show_fifo_starve_off(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->fifo_starve_off);
-}
-
-static ssize_t gfar_set_fifo_starve_off(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int num = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (num > GFAR_MAX_FIFO_STARVE_OFF)
- return count;
-
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- priv->fifo_starve_off = num;
-
- temp = gfar_read(&regs->fifo_tx_starve_shutoff);
- temp &= ~FIFO_TX_STARVE_OFF_MASK;
- temp |= num;
- gfar_write(&regs->fifo_tx_starve_shutoff, temp);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off,
- gfar_set_fifo_starve_off);
-
-void gfar_init_sysfs(struct net_device *dev)
-{
- struct gfar_private *priv = netdev_priv(dev);
- int rc;
-
- /* Initialize the default values */
- priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
- priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
- priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
-
- /* Create our sysfs files */
- rc = device_create_file(&dev->dev, &dev_attr_bd_stash);
- rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size);
- rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index);
- rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold);
- rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve);
- rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off);
- if (rc)
- dev_err(&dev->dev, "Error creating gianfar sysfs files\n");
-}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 72291a8904a9..c8299c31b21f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3261,7 +3261,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
dev->stats.tx_packets++;
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
ugeth->skb_dirtytx[txQ] =
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 17fca323c143..c984998b34a0 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -993,7 +993,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->name));
dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
} else {
if (++lp->next_tx_cmd == TX_RING_SIZE)
lp->next_tx_cmd = 0;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 7628e0fd8455..538903bf13bc 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -490,7 +490,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
skb_arr[index] = skb;
tmp_addr = ehea_map_vaddr(skb->data);
if (tmp_addr == -1) {
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
q_skba->os_skbs = fill_wqes - i;
ret = 0;
break;
@@ -856,7 +856,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
skb = pr->sq_skba.arr[index];
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
pr->sq_skba.arr[index] = NULL;
}
@@ -2044,7 +2044,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
skb_copy_bits(skb, 0, imm_data, skb->len);
swqe->immediate_data_length = skb->len;
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
}
static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 1fc8334fc181..c9127562bd22 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1044,7 +1044,7 @@ retry_bounce:
DMA_TO_DEVICE);
out:
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
map_failed_frags:
@@ -1072,7 +1072,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
unsigned long lpar_rc;
restart_poll:
- do {
+ while (frames_processed < budget) {
if (!ibmveth_rxq_pending_buffer(adapter))
break;
@@ -1121,7 +1121,7 @@ restart_poll:
netdev->stats.rx_bytes += length;
frames_processed++;
}
- } while (frames_processed < budget);
+ }
ibmveth_replenish_task(adapter);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index bf7a01ef9a57..b56461ce674c 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1778,9 +1778,9 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
* testing, ie sending frames with bad CRC.
*/
if (unlikely(skb->no_fcs))
- cb->command |= __constant_cpu_to_le16(cb_tx_nc);
+ cb->command |= cpu_to_le16(cb_tx_nc);
else
- cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
+ cb->command &= ~cpu_to_le16(cb_tx_nc);
/* interrupt every 16 packets regardless of delay */
if ((nic->cbs_avail & ~15) == nic->cbs_avail)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index ff2d806eaef7..a5f6b11d6992 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
* 80003ES2LAN Gigabit Ethernet Controller (Serdes)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index 90d363b2d280..535a9430976d 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_80003ES2LAN_H_
#define _E1000E_80003ES2LAN_H_
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 8fed74e3fa53..e0aa7f1efb08 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* 82571EB Gigabit Ethernet Controller
* 82571EB Gigabit Ethernet Controller (Copper)
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 08e24dc3dc0e..2e758f796d60 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_82571_H_
#define _E1000E_82571_H_
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index c2dcfcc10857..106de493373c 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2013 Intel Corporation.
+# Copyright(c) 1999 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 351c94a0cf74..d18e89212575 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
@@ -35,9 +28,11 @@
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
-#define E1000_WUC_APME 0x00000001 /* APM Enable */
-#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
-#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
/* Wake Up Filter Control */
#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 0150f7fc893d..1471c5464a89 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* Linux PRO/1000 Ethernet Driver main header file */
@@ -269,6 +262,7 @@ struct e1000_adapter {
u32 tx_head_addr;
u32 tx_fifo_size;
u32 tx_dma_failed;
+ u32 tx_hwtstamp_timeouts;
/* Rx */
bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
@@ -333,7 +327,6 @@ struct e1000_adapter {
struct work_struct update_phy_task;
struct work_struct print_hang_task;
- bool idle_check;
int phy_hang_count;
u16 tx_ring_count;
@@ -342,6 +335,7 @@ struct e1000_adapter {
struct hwtstamp_config hwtstamp_config;
struct delayed_work systim_overflow_work;
struct sk_buff *tx_hwtstamp_skb;
+ unsigned long tx_hwtstamp_start;
struct work_struct tx_hwtstamp_work;
spinlock_t systim_lock; /* protects SYSTIML/H regsters */
struct cyclecounter cc;
@@ -476,7 +470,7 @@ void e1000e_check_options(struct e1000_adapter *adapter);
void e1000e_set_ethtool_ops(struct net_device *netdev);
int e1000e_up(struct e1000_adapter *adapter);
-void e1000e_down(struct e1000_adapter *adapter);
+void e1000e_down(struct e1000_adapter *adapter, bool reset);
void e1000e_reinit_locked(struct e1000_adapter *adapter);
void e1000e_reset(struct e1000_adapter *adapter);
void e1000e_power_up_phy(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index d14c8f53384c..cad250bc1b99 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* ethtool support for e1000 */
@@ -111,6 +104,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
E1000_STAT("uncorr_ecc_errors", uncorr_errors),
E1000_STAT("corr_ecc_errors", corr_errors),
+ E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
};
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
@@ -332,7 +326,7 @@ static int e1000_set_settings(struct net_device *netdev,
/* reset the link */
if (netif_running(adapter->netdev)) {
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
e1000e_up(adapter);
} else {
e1000e_reset(adapter);
@@ -380,7 +374,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
if (netif_running(adapter->netdev)) {
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
e1000e_up(adapter);
} else {
e1000e_reset(adapter);
@@ -726,7 +720,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
pm_runtime_get_sync(netdev->dev.parent);
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
/* We can't just free everything and then setup again, because the
* ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
@@ -924,15 +918,21 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
}
if (mac->type == e1000_pch2lan) {
/* SHRAH[0,1,2] different than previous */
- if (i == 7)
+ if (i == 1)
mask &= 0xFFF4FFFF;
/* SHRAH[3] different than SHRAH[0,1,2] */
- if (i == 10)
+ if (i == 4)
mask |= (1 << 30);
+ /* RAR[1-6] owned by management engine - skipping */
+ if (i > 0)
+ i += 6;
}
REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
0xFFFFFFFF);
+ /* reset index to actual value */
+ if ((mac->type == e1000_pch2lan) && (i > 6))
+ i -= 6;
}
for (i = 0; i < mac->mta_reg_count; i++)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index b7f38435d1fd..6b3de5f39a97 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
@@ -655,12 +648,20 @@ struct e1000_shadow_ram {
#define E1000_ICH8_SHADOW_RAM_WORDS 2048
+/* I218 PHY Ultra Low Power (ULP) states */
+enum e1000_ulp_state {
+ e1000_ulp_state_unknown,
+ e1000_ulp_state_off,
+ e1000_ulp_state_on,
+};
+
struct e1000_dev_spec_ich8lan {
bool kmrn_lock_loss_workaround_enabled;
struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
bool nvm_k1_enabled;
bool eee_disable;
u16 eee_lp_ability;
+ enum e1000_ulp_state ulp_state;
};
struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 42f0f6717511..9866f264f55e 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* 82562G 10/100 Network Connection
* 82562G-2 10/100 Network Connection
@@ -53,6 +46,14 @@
* 82578DC Gigabit Network Connection
* 82579LM Gigabit Network Connection
* 82579V Gigabit Network Connection
+ * Ethernet Connection I217-LM
+ * Ethernet Connection I217-V
+ * Ethernet Connection I218-V
+ * Ethernet Connection I218-LM
+ * Ethernet Connection (2) I218-LM
+ * Ethernet Connection (2) I218-V
+ * Ethernet Connection (3) I218-LM
+ * Ethernet Connection (3) I218-V
*/
#include "e1000.h"
@@ -142,7 +143,9 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
+static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
+static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -239,6 +242,47 @@ out:
}
/**
+ * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
+ * @hw: pointer to the HW structure
+ *
+ * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
+ * used to reset the PHY to a quiescent state when necessary.
+ **/
+static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+
+ /* Set Phy Config Counter to 50msec */
+ mac_reg = er32(FEXTNVM3);
+ mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+ mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+ ew32(FEXTNVM3, mac_reg);
+
+ /* Toggle LANPHYPC Value bit */
+ mac_reg = er32(CTRL);
+ mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
+ ew32(CTRL, mac_reg);
+ e1e_flush();
+ usleep_range(10, 20);
+ mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ ew32(CTRL, mac_reg);
+ e1e_flush();
+
+ if (hw->mac.type < e1000_pch_lpt) {
+ msleep(50);
+ } else {
+ u16 count = 20;
+
+ do {
+ usleep_range(5000, 10000);
+ } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
+
+ msleep(30);
+ }
+}
+
+/**
* e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
* @hw: pointer to the HW structure
*
@@ -247,6 +291,7 @@ out:
**/
static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
{
+ struct e1000_adapter *adapter = hw->adapter;
u32 mac_reg, fwsm = er32(FWSM);
s32 ret_val;
@@ -255,6 +300,12 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
e1000_gate_hw_phy_config_ich8lan(hw, true);
+ /* It is not possible to be certain of the current state of ULP
+ * so forcibly disable it.
+ */
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
+ e1000_disable_ulp_lpt_lp(hw, true);
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
e_dbg("Failed to initialize PHY flow\n");
@@ -300,33 +351,9 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
break;
}
- e_dbg("Toggling LANPHYPC\n");
-
- /* Set Phy Config Counter to 50msec */
- mac_reg = er32(FEXTNVM3);
- mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
- mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
- ew32(FEXTNVM3, mac_reg);
-
/* Toggle LANPHYPC Value bit */
- mac_reg = er32(CTRL);
- mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
- mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
- ew32(CTRL, mac_reg);
- e1e_flush();
- usleep_range(10, 20);
- mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
- ew32(CTRL, mac_reg);
- e1e_flush();
- if (hw->mac.type < e1000_pch_lpt) {
- msleep(50);
- } else {
- u16 count = 20;
- do {
- usleep_range(5000, 10000);
- } while (!(er32(CTRL_EXT) &
- E1000_CTRL_EXT_LPCD) && count--);
- usleep_range(30000, 60000);
+ e1000_toggle_lanphypc_pch_lpt(hw);
+ if (hw->mac.type >= e1000_pch_lpt) {
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -349,12 +376,31 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
hw->phy.ops.release(hw);
if (!ret_val) {
+
+ /* Check to see if able to reset PHY. Print error if not */
+ if (hw->phy.ops.check_reset_block(hw)) {
+ e_err("Reset blocked by ME\n");
+ goto out;
+ }
+
/* Reset the PHY before any access to it. Doing so, ensures
* that the PHY is in a known good state before we read/write
* PHY registers. The generic reset is sufficient here,
* because we haven't determined the PHY type yet.
*/
ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ /* On a successful reset, possibly need to wait for the PHY
+ * to quiesce to an accessible state before returning control
+ * to the calling function. If the PHY does not quiesce, then
+ * return E1000E_BLK_PHY_RESET, as this is the condition that
+ * the PHY is in.
+ */
+ ret_val = hw->phy.ops.check_reset_block(hw);
+ if (ret_val)
+ e_err("ME blocked access to PHY after reset\n");
}
out:
@@ -724,8 +770,14 @@ s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
* Enable/disable EEE based on setting in dev_spec structure, the duplex of
* the link and the EEE capabilities of the link partner. The LPI Control
* register bits will remain set only if/when link is up.
+ *
+ * EEE LPI must not be asserted earlier than one second after link is up.
+ * On 82579, EEE LPI should not be enabled until such time otherwise there
+ * can be link issues with some switches. Other devices can have EEE LPI
+ * enabled immediately upon link up since they have a timer in hardware which
+ * prevents LPI from being asserted too early.
**/
-static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
s32 ret_val;
@@ -979,6 +1031,253 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
}
/**
+ * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @to_sx: boolean indicating a system power state transition to Sx
+ *
+ * When link is down, configure ULP mode to significantly reduce the power
+ * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
+ * ME firmware to start the ULP configuration. If not on an ME enabled
+ * system, configure the ULP mode by software.
+ */
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
+{
+ u32 mac_reg;
+ s32 ret_val = 0;
+ u16 phy_reg;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
+ return 0;
+
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ /* Request ME configure ULP mode in the PHY */
+ mac_reg = er32(H2ME);
+ mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
+ ew32(H2ME, mac_reg);
+
+ goto out;
+ }
+
+ if (!to_sx) {
+ int i = 0;
+
+ /* Poll up to 5 seconds for Cable Disconnected indication */
+ while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
+ /* Bail if link is re-acquired */
+ if (er32(STATUS) & E1000_STATUS_LU)
+ return -E1000_ERR_PHY;
+
+ if (i++ == 100)
+ break;
+
+ msleep(50);
+ }
+ e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
+ (er32(FEXT) &
+ E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* Force SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Force SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ /* Set Inband ULP Exit, Reset to SMBus mode and
+ * Disable SMBus Release on PERST# in PHY
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ if (to_sx) {
+ if (er32(WUFC) & E1000_WUFC_LNKC)
+ phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
+
+ phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
+ } else {
+ phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
+ }
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Set Disable SMBus Release on PERST# in MAC */
+ mac_reg = er32(FEXTNVM7);
+ mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ ew32(FEXTNVM7, mac_reg);
+
+ /* Commit ULP changes in PHY by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+release:
+ hw->phy.ops.release(hw);
+out:
+ if (ret_val)
+ e_dbg("Error in ULP enable flow: %d\n", ret_val);
+ else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
+
+ return ret_val;
+}
+
+/**
+ * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @force: boolean indicating whether or not to force disabling ULP
+ *
+ * Un-configure ULP mode when link is up, the system is transitioned from
+ * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
+ * system, poll for an indication from ME that ULP has been un-configured.
+ * If not on an ME enabled system, un-configure the ULP mode by software.
+ *
+ * During nominal operation, this function is called when link is acquired
+ * to disable ULP mode (force=false); otherwise, for example when unloading
+ * the driver or during Sx->S0 transitions, this is called with force=true
+ * to forcibly disable ULP.
+ */
+static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
+{
+ s32 ret_val = 0;
+ u32 mac_reg;
+ u16 phy_reg;
+ int i = 0;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
+ return 0;
+
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (force) {
+ /* Request ME un-configure ULP mode in the PHY */
+ mac_reg = er32(H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
+ ew32(H2ME, mac_reg);
+ }
+
+ /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
+ while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
+ if (i++ == 10) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ usleep_range(10000, 20000);
+ }
+ e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+
+ if (force) {
+ mac_reg = er32(H2ME);
+ mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
+ ew32(H2ME, mac_reg);
+ } else {
+ /* Clear H2ME.ULP after ME ULP configuration */
+ mac_reg = er32(H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ ew32(H2ME, mac_reg);
+ }
+
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ if (force)
+ /* Toggle LANPHYPC Value bit */
+ e1000_toggle_lanphypc_pch_lpt(hw);
+
+ /* Unforce SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val) {
+ /* The MAC might be in PCIe mode, so temporarily force to
+ * SMBus mode in order to access the PHY.
+ */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ msleep(50);
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ }
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ /* When ULP mode was previously entered, K1 was disabled by the
+ * hardware. Re-Enable K1 in the PHY when exiting ULP.
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= HV_PM_CTRL_K1_ENABLE;
+ e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
+
+ /* Clear ULP enabled configuration */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg &= ~(I218_ULP_CONFIG1_IND |
+ I218_ULP_CONFIG1_STICKY_ULP |
+ I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_WOL_HOST |
+ I218_ULP_CONFIG1_INBAND_EXIT |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Commit ULP changes by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Clear Disable SMBus Release on PERST# in MAC */
+ mac_reg = er32(FEXTNVM7);
+ mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ ew32(FEXTNVM7, mac_reg);
+
+release:
+ hw->phy.ops.release(hw);
+ if (force) {
+ e1000_phy_hw_reset(hw);
+ msleep(50);
+ }
+out:
+ if (ret_val)
+ e_dbg("Error in ULP disable flow: %d\n", ret_val);
+ else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
+
+ return ret_val;
+}
+
+/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
*
@@ -1106,9 +1405,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
e1000e_check_downshift(hw);
/* Enable/Disable EEE after link up */
- ret_val = e1000_set_eee_pchlan(hw);
- if (ret_val)
- return ret_val;
+ if (hw->phy.type > e1000_phy_82579) {
+ ret_val = e1000_set_eee_pchlan(hw);
+ if (ret_val)
+ return ret_val;
+ }
/* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
@@ -1374,7 +1675,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
/* RAR[1-6] are owned by manageability. Skip those and program the
* next address into the SHRA register array.
*/
- if (index < (u32)(hw->mac.rar_entry_count - 6)) {
+ if (index < (u32)(hw->mac.rar_entry_count)) {
s32 ret_val;
ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1484,11 +1785,13 @@ out:
**/
static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
{
- u32 fwsm;
+ bool blocked = false;
+ int i = 0;
- fwsm = er32(FWSM);
-
- return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
+ while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
+ (i++ < 10))
+ usleep_range(10000, 20000);
+ return blocked ? E1000_BLK_PHY_RESET : 0;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 217090df33e7..bead50f9187b 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_ICH8LAN_H_
#define _E1000E_ICH8LAN_H_
@@ -65,11 +58,16 @@
#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
#define E1000_FWSM_WLOCK_MAC_SHIFT 7
+#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */
/* Shared Receive Address Registers */
#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
+#define E1000_H2ME 0x05B50 /* Host to ME */
+#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */
+#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
+
#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_OFF1_ON2 << 4) | \
@@ -82,6 +80,9 @@
#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
+/* FEXT register bit definition */
+#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004
+
#define E1000_FEXTNVM_SW_CONFIG 1
#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
@@ -95,10 +96,12 @@
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
+#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
+
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
-#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */
+#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
#define PHY_PAGE_SHIFT 5
@@ -161,6 +164,16 @@
#define CV_SMB_CTRL PHY_REG(769, 23)
#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
+/* I218 Ultra Low Power Configuration 1 Register */
+#define I218_ULP_CONFIG1 PHY_REG(779, 16)
+#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */
+#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */
+#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */
+#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */
+#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */
+#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
+#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
+
/* SMBus Address Phy Register */
#define HV_SMB_ADDR PHY_REG(768, 26)
#define HV_SMB_ADDR_MASK 0x007F
@@ -195,6 +208,7 @@
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_ENABLE 0x4000
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
@@ -268,4 +282,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw);
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx);
#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 2480c1091873..baa0a466d1d0 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index a61fee404ebe..4e81c2825b7a 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_MAC_H_
#define _E1000E_MAC_H_
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index e4b0f1ef92f6..cb37ff1f1321 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index 326897c29ea8..a8c27f98f7b0 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_MANAGE_H_
#define _E1000E_MANAGE_H_
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6d91933c4cdd..dce377b59b2c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -885,7 +878,7 @@ static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
struct sk_buff *skb)
{
if (netdev->features & NETIF_F_RXHASH)
- skb->rxhash = le32_to_cpu(rss);
+ skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
}
/**
@@ -1097,8 +1090,14 @@ static void e1000_print_hw_hang(struct work_struct *work)
adapter->tx_hang_recheck = true;
return;
}
- /* Real hang detected */
adapter->tx_hang_recheck = false;
+
+ if (er32(TDH(0)) == er32(TDT(0))) {
+ e_dbg("false hang detected, ignoring\n");
+ return;
+ }
+
+ /* Real hang detected */
netif_stop_queue(netdev);
e1e_rphy(hw, MII_BMSR, &phy_status);
@@ -1128,6 +1127,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
phy_status, phy_1000t_status, phy_ext_status, pci_status);
+ e1000e_dump(adapter);
+
/* Suggest workaround for known h/w issue */
if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
e_err("Try turning off Tx pause (flow control) via ethtool\n");
@@ -1147,9 +1148,6 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
tx_hwtstamp_work);
struct e1000_hw *hw = &adapter->hw;
- if (!adapter->tx_hwtstamp_skb)
- return;
-
if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
struct skb_shared_hwtstamps shhwtstamps;
u64 txstmp;
@@ -1162,6 +1160,12 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
adapter->tx_hwtstamp_skb = NULL;
+ } else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ + adapter->tx_timeout_factor * HZ)) {
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ adapter->tx_hwtstamp_skb = NULL;
+ adapter->tx_hwtstamp_timeouts++;
+ e_warn("clearing Tx timestamp hang");
} else {
/* reschedule to check later */
schedule_work(&adapter->tx_hwtstamp_work);
@@ -1701,7 +1705,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
writel(0, rx_ring->head);
- if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, 0);
else
writel(0, rx_ring->tail);
@@ -2038,13 +2042,16 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
msix_entry),
GFP_KERNEL);
if (adapter->msix_entries) {
+ struct e1000_adapter *a = adapter;
+
for (i = 0; i < adapter->num_vectors; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries,
- adapter->num_vectors);
- if (err == 0)
+ err = pci_enable_msix_range(a->pdev,
+ a->msix_entries,
+ a->num_vectors,
+ a->num_vectors);
+ if (err > 0)
return;
}
/* MSI-X failed, so fall through and try MSI */
@@ -2402,7 +2409,7 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
tx_ring->next_to_clean = 0;
writel(0, tx_ring->head);
- if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring, 0);
else
writel(0, tx_ring->tail);
@@ -2894,7 +2901,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_ring *tx_ring = adapter->tx_ring;
u64 tdba;
- u32 tdlen, tarc;
+ u32 tdlen, tctl, tarc;
/* Setup the HW Tx Head and Tail descriptor pointers */
tdba = tx_ring->dma;
@@ -2931,6 +2938,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
/* erratum work around: set txdctl the same for both queues */
ew32(TXDCTL(1), er32(TXDCTL(0)));
+ /* Program the Transmit Control Register */
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
tarc = er32(TARC(0));
/* set the speed mode bit, we'll clear it if we're not at
@@ -2961,6 +2974,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
/* enable Report Status bit */
adapter->txd_cmd |= E1000_TXD_CMD_RS;
+ ew32(TCTL, tctl);
+
hw->mac.ops.config_collision_dist(hw);
}
@@ -2976,11 +2991,21 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
u32 rctl, rfctl;
u32 pages = 0;
- /* Workaround Si errata on PCHx - configure jumbo frame flow */
- if ((hw->mac.type >= e1000_pch2lan) &&
- (adapter->netdev->mtu > ETH_DATA_LEN) &&
- e1000_lv_jumbo_workaround_ich8lan(hw, true))
- e_dbg("failed to enable jumbo frame workaround mode\n");
+ /* Workaround Si errata on PCHx - configure jumbo frame flow.
+ * If jumbo frames not set, program related MAC/PHY registers
+ * to h/w defaults
+ */
+ if (hw->mac.type >= e1000_pch2lan) {
+ s32 ret_val;
+
+ if (adapter->netdev->mtu > ETH_DATA_LEN)
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+ else
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+ if (ret_val)
+ e_dbg("failed to enable|disable jumbo frame workaround mode\n");
+ }
/* Program MC offset vector base */
rctl = er32(RCTL);
@@ -3331,6 +3356,9 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
+ if (pm_runtime_suspended(netdev->dev.parent))
+ return;
+
/* Check for Promiscuous and All Multicast modes */
rctl = er32(RCTL);
@@ -3691,10 +3719,6 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
*/
static void e1000_power_down_phy(struct e1000_adapter *adapter)
{
- /* WoL is enabled */
- if (adapter->wol)
- return;
-
if (adapter->hw.phy.ops.power_down)
adapter->hw.phy.ops.power_down(&adapter->hw);
}
@@ -3911,10 +3935,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
}
if (!netif_running(adapter->netdev) &&
- !test_bit(__E1000_TESTING, &adapter->state)) {
+ !test_bit(__E1000_TESTING, &adapter->state))
e1000_power_down_phy(adapter);
- return;
- }
e1000_get_phy_info(hw);
@@ -3981,7 +4003,12 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
static void e1000e_update_stats(struct e1000_adapter *adapter);
-void e1000e_down(struct e1000_adapter *adapter)
+/**
+ * e1000e_down - quiesce the device and optionally reset the hardware
+ * @adapter: board private structure
+ * @reset: boolean flag to reset the hardware or not
+ */
+void e1000e_down(struct e1000_adapter *adapter, bool reset)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
@@ -4035,12 +4062,8 @@ void e1000e_down(struct e1000_adapter *adapter)
e1000_lv_jumbo_workaround_ich8lan(hw, false))
e_dbg("failed to disable jumbo frame workaround mode\n");
- if (!pci_channel_offline(adapter->pdev))
+ if (reset && !pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
-
- /* TODO: for power management, we could drop the link and
- * pci_disable_device here.
- */
}
void e1000e_reinit_locked(struct e1000_adapter *adapter)
@@ -4048,7 +4071,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
might_sleep();
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
e1000e_up(adapter);
clear_bit(__E1000_RESETTING, &adapter->state);
}
@@ -4326,7 +4349,6 @@ static int e1000_open(struct net_device *netdev)
adapter->tx_hang_recheck = false;
netif_start_queue(netdev);
- adapter->idle_check = true;
hw->mac.get_link_status = true;
pm_runtime_put(&pdev->dev);
@@ -4376,14 +4398,15 @@ static int e1000_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
e1000_free_irq(adapter);
+
+ /* Link status message must follow this format */
+ pr_info("%s NIC Link is Down\n", adapter->netdev->name);
}
napi_disable(&adapter->napi);
- e1000_power_down_phy(adapter);
-
e1000e_free_tx_resources(adapter->tx_ring);
e1000e_free_rx_resources(adapter->rx_ring);
@@ -4460,11 +4483,16 @@ static void e1000e_update_phy_task(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
update_phy_task);
+ struct e1000_hw *hw = &adapter->hw;
if (test_bit(__E1000_DOWN, &adapter->state))
return;
- e1000_get_phy_info(&adapter->hw);
+ e1000_get_phy_info(hw);
+
+ /* Enable EEE on 82579 after link up */
+ if (hw->phy.type == e1000_phy_82579)
+ e1000_set_eee_pchlan(hw);
}
/**
@@ -4799,6 +4827,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
if (adapter->phy_hang_count > 1) {
adapter->phy_hang_count = 0;
+ e_dbg("PHY appears hung - resetting\n");
schedule_work(&adapter->reset_task);
}
}
@@ -4957,15 +4986,11 @@ static void e1000_watchdog_task(struct work_struct *work)
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
- /* The link is lost so the controller stops DMA.
- * If there is queued Tx work that cannot be done
- * or if on an 8000ES2LAN which requires a Rx packet
- * buffer work-around on link down event, reset the
- * controller to flush the Tx/Rx packet buffers.
- * (Do the reset outside of interrupt context).
+ /* 8000ES2LAN requires a Rx packet buffer work-around
+ * on link down event; reset the controller to flush
+ * the Rx packet buffer.
*/
- if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
- (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+ if (adapter->flags & FLAG_RX_NEEDS_RESTART)
adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
@@ -4988,6 +5013,15 @@ link_up:
adapter->gotc_old = adapter->stats.gotc;
spin_unlock(&adapter->stats64_lock);
+ /* If the link is lost the controller stops DMA, but
+ * if there is queued Tx work it cannot be done. So
+ * reset the controller to flush the Tx packet buffers.
+ */
+ if (!netif_carrier_ok(netdev) &&
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+ adapter->flags |= FLAG_RESTART_NOW;
+
+ /* If reset is necessary, do it outside of interrupt context. */
if (adapter->flags & FLAG_RESTART_NOW) {
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
@@ -5546,6 +5580,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
adapter->tx_hwtstamp_skb = skb_get(skb);
+ adapter->tx_hwtstamp_start = jiffies;
schedule_work(&adapter->tx_hwtstamp_work);
} else {
skb_tx_timestamp(skb);
@@ -5684,8 +5719,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->max_frame_size = max_frame;
e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (netif_running(netdev))
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
@@ -5711,6 +5749,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
else
e1000e_reset(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
+
clear_bit(__E1000_RESETTING, &adapter->state);
return 0;
@@ -5852,7 +5892,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
{
struct e1000_hw *hw = &adapter->hw;
- u32 i, mac_reg;
+ u32 i, mac_reg, wuc;
u16 phy_reg, wuc_enable;
int retval;
@@ -5899,13 +5939,18 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
phy_reg |= BM_RCTL_RFCE;
hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
+ wuc = E1000_WUC_PME_EN;
+ if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC))
+ wuc |= E1000_WUC_APME;
+
/* enable PHY wakeup in MAC register */
ew32(WUFC, wufc);
- ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
+ ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME |
+ E1000_WUC_PME_STATUS | wuc));
/* configure and enable PHY wakeup in PHY registers */
hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
- hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc);
/* activate PHY wakeup */
wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
@@ -5918,15 +5963,10 @@ release:
return retval;
}
-static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
+static int e1000e_pm_freeze(struct device *dev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, ctrl_ext, rctl, status;
- /* Runtime suspend should only enable wakeup for link changes */
- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
- int retval = 0;
netif_device_detach(netdev);
@@ -5937,11 +5977,29 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
usleep_range(10000, 20000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
- e1000e_down(adapter);
+
+ /* Quiesce the device without resetting the hardware */
+ e1000e_down(adapter, false);
e1000_free_irq(adapter);
}
e1000e_reset_interrupt_capability(adapter);
+ /* Allow time for pending master requests to run */
+ e1000e_disable_pcie_master(&adapter->hw);
+
+ return 0;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, ctrl_ext, rctl, status;
+ /* Runtime suspend should only enable wakeup for link changes */
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+ int retval = 0;
+
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -5972,12 +6030,12 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
ew32(CTRL_EXT, ctrl_ext);
}
+ if (!runtime)
+ e1000e_power_up_phy(adapter);
+
if (adapter->flags & FLAG_IS_ICH)
e1000_suspend_workarounds_ich8lan(&adapter->hw);
- /* Allow time for pending master requests to run */
- e1000e_disable_pcie_master(&adapter->hw);
-
if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
/* enable wakeup by the PHY */
retval = e1000_init_phy_wakeup(adapter, wufc);
@@ -5991,10 +6049,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
} else {
ew32(WUC, 0);
ew32(WUFC, 0);
+
+ e1000_power_down_phy(adapter);
}
- if (adapter->hw.phy.type == e1000_phy_igp_3)
+ if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
+ } else if (hw->mac.type == e1000_pch_lpt) {
+ if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ /* ULP does not support wake from unicast, multicast
+ * or broadcast.
+ */
+ retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
+
+ if (retval)
+ return retval;
+ }
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
@@ -6102,18 +6173,12 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
}
#ifdef CONFIG_PM
-static bool e1000e_pm_ready(struct e1000_adapter *adapter)
-{
- return !!adapter->tx_ring->buffer_info;
-}
-
static int __e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u16 aspm_disable_flag = 0;
- u32 err;
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -6124,13 +6189,6 @@ static int __e1000_resume(struct pci_dev *pdev)
pci_set_master(pdev);
- e1000e_set_interrupt_capability(adapter);
- if (netif_running(netdev)) {
- err = e1000_request_irq(adapter);
- if (err)
- return err;
- }
-
if (hw->mac.type >= e1000_pch2lan)
e1000_resume_workarounds_pchlan(&adapter->hw);
@@ -6169,11 +6227,6 @@ static int __e1000_resume(struct pci_dev *pdev)
e1000_init_manageability_pt(adapter);
- if (netif_running(netdev))
- e1000e_up(adapter);
-
- netif_device_attach(netdev);
-
/* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
* under the control of the driver.
@@ -6184,75 +6237,111 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
+static int e1000e_pm_thaw(struct device *dev)
+{
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ e1000e_set_interrupt_capability(adapter);
+ if (netif_running(netdev)) {
+ u32 err = e1000_request_irq(adapter);
+
+ if (err)
+ return err;
+
+ e1000e_up(adapter);
+ }
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
-static int e1000_suspend(struct device *dev)
+static int e1000e_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ e1000e_pm_freeze(dev);
+
return __e1000_shutdown(pdev, false);
}
-static int e1000_resume(struct device *dev)
+static int e1000e_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
+ int rc;
- if (e1000e_pm_ready(adapter))
- adapter->idle_check = true;
+ rc = __e1000_resume(pdev);
+ if (rc)
+ return rc;
- return __e1000_resume(pdev);
+ return e1000e_pm_thaw(dev);
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
-static int e1000_runtime_suspend(struct device *dev)
+static int e1000e_pm_runtime_idle(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (!e1000e_pm_ready(adapter))
- return 0;
+ if (!e1000e_has_link(adapter))
+ pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
- return __e1000_shutdown(pdev, true);
+ return -EBUSY;
}
-static int e1000_idle(struct device *dev)
+static int e1000e_pm_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ int rc;
- if (!e1000e_pm_ready(adapter))
- return 0;
+ rc = __e1000_resume(pdev);
+ if (rc)
+ return rc;
- if (adapter->idle_check) {
- adapter->idle_check = false;
- if (!e1000e_has_link(adapter))
- pm_schedule_suspend(dev, MSEC_PER_SEC);
- }
+ if (netdev->flags & IFF_UP)
+ rc = e1000e_up(adapter);
- return -EBUSY;
+ return rc;
}
-static int e1000_runtime_resume(struct device *dev)
+static int e1000e_pm_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (!e1000e_pm_ready(adapter))
- return 0;
+ if (netdev->flags & IFF_UP) {
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
+ usleep_range(10000, 20000);
+
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+
+ /* Down the device without resetting the hardware */
+ e1000e_down(adapter, false);
+ }
- adapter->idle_check = !dev->power.runtime_auto;
- return __e1000_resume(pdev);
+ if (__e1000_shutdown(pdev, true)) {
+ e1000e_pm_runtime_resume(dev);
+ return -EBUSY;
+ }
+
+ return 0;
}
#endif /* CONFIG_PM_RUNTIME */
#endif /* CONFIG_PM */
static void e1000_shutdown(struct pci_dev *pdev)
{
+ e1000e_pm_freeze(&pdev->dev);
+
__e1000_shutdown(pdev, false);
}
@@ -6338,7 +6427,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_DISCONNECT;
if (netif_running(netdev))
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
pci_disable_device(pdev);
/* Request a slot slot reset. */
@@ -6350,7 +6439,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the e1000_resume routine.
+ * resembles the first-half of the e1000e_pm_resume routine.
*/
static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
{
@@ -6397,7 +6486,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the e1000_resume routine.
+ * second-half of the e1000e_pm_resume routine.
*/
static void e1000_io_resume(struct pci_dev *pdev)
{
@@ -6902,9 +6991,6 @@ static void e1000_remove(struct pci_dev *pdev)
}
}
- if (!(netdev->flags & IFF_UP))
- e1000_power_down_phy(adapter);
-
/* Don't lie to e1000_close() down the road. */
if (!down)
clear_bit(__E1000_DOWN, &adapter->state);
@@ -7026,9 +7112,16 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
static const struct dev_pm_ops e1000_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
- SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
- e1000_idle)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = e1000e_pm_suspend,
+ .resume = e1000e_pm_resume,
+ .freeze = e1000e_pm_freeze,
+ .thaw = e1000e_pm_thaw,
+ .poweroff = e1000e_pm_suspend,
+ .restore = e1000e_pm_resume,
+#endif
+ SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
+ e1000e_pm_runtime_idle)
};
/* PCI Device API Driver */
@@ -7055,7 +7148,7 @@ static int __init e1000_init_module(void)
int ret;
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index d70a03906ac0..a9a976f04bff 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 45fc69561627..342bf69efab5 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_NVM_H_
#define _E1000E_NVM_H_
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index c16bd75b6caa..d0ac0f3249c8 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/netdevice.h>
#include <linux/module.h>
@@ -381,6 +374,12 @@ void e1000e_check_options(struct e1000_adapter *adapter)
"%s set to dynamic mode\n", opt.name);
adapter->itr = 20000;
break;
+ case 2:
+ dev_info(&adapter->pdev->dev,
+ "%s Invalid mode - setting default\n",
+ opt.name);
+ adapter->itr_setting = opt.def;
+ /* fall-through */
case 3:
dev_info(&adapter->pdev->dev,
"%s set to dynamic conservative mode\n",
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 20e71f4ca426..00b3fc98bf30 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index f4f71b9991e3..3841bccf058c 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_PHY_H_
#define _E1000E_PHY_H_
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 065f8c80d4f2..fb1a914a3ad4 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* PTP 1588 Hardware Clock (PHC)
* Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
@@ -47,6 +40,7 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
ptp_clock_info);
struct e1000_hw *hw = &adapter->hw;
bool neg_adj = false;
+ unsigned long flags;
u64 adjustment;
u32 timinca, incvalue;
s32 ret_val;
@@ -64,6 +58,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
if (ret_val)
return ret_val;
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+
incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
adjustment = incvalue;
@@ -77,6 +73,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
ew32(TIMINCA, timinca);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
return 0;
}
@@ -191,6 +189,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = e1000e_phc_adjfreq,
.adjtime = e1000e_phc_adjtime,
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index a7e6a3e37257..ea235bbe50d3 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_REGS_H_
#define _E1000E_REGS_H_
@@ -39,6 +32,7 @@
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FEXT 0x0002C /* Future Extended - RW */
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 72dae4d97b43..beb7b4393a6c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -86,12 +86,12 @@
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_HI_SHIFT 8
-#define I40E_NVM_VERSION_HI_MASK (0xff << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 12
+#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
-#define I40E_CURRENT_NVM_VERSION_LO 0x30
+#define I40E_CURRENT_NVM_VERSION_LO 0x40
/* magic for getting defines into strings */
#define STRINGIFY(foo) #foo
@@ -136,6 +136,7 @@ enum i40e_state_t {
__I40E_EMP_RESET_REQUESTED,
__I40E_FILTER_OVERFLOW_PROMISC,
__I40E_SUSPENDED,
+ __I40E_BAD_EEPROM,
};
enum i40e_interrupt_policy {
@@ -152,8 +153,21 @@ struct i40e_lump_tracking {
};
#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
-#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512
-struct i40e_fdir_data {
+#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512
+#define I40E_FDIR_BUFFER_FULL_MARGIN 10
+#define I40E_FDIR_BUFFER_HEAD_ROOM 200
+
+struct i40e_fdir_filter {
+ struct hlist_node fdir_node;
+ /* filter ipnut set */
+ u8 flow_type;
+ u8 ip4_proto;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 sctp_v_tag;
+ /* filter control */
u16 q_index;
u8 flex_off;
u8 pctype;
@@ -162,7 +176,6 @@ struct i40e_fdir_data {
u8 fd_status;
u16 cnt_index;
u32 fd_id;
- u8 *raw_packet;
};
#define I40E_ETH_P_LLDP 0x88cc
@@ -196,7 +209,7 @@ struct i40e_pf {
bool fc_autoneg_status;
u16 eeprom_version;
- u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */
+ u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num vfs requested for this vf */
@@ -210,6 +223,9 @@ struct i40e_pf {
u8 atr_sample_rate;
bool wol_en;
+ struct hlist_head fdir_filter_list;
+ u16 fdir_pf_active_filters;
+
#ifdef CONFIG_I40E_VXLAN
__be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
u16 pending_vxlan_bitmap;
@@ -251,6 +267,9 @@ struct i40e_pf {
#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
#endif
+ /* tracks features that get auto disabled by errors */
+ u64 auto_disable_flags;
+
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
struct i40e_hw_port_stats stats_offsets;
@@ -477,10 +496,10 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
"f%d.%d a%d.%d n%02x.%02x e%08x",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
- (hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
- >> I40E_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
- >> I40E_NVM_VERSION_LO_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
+ I40E_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
+ I40E_NVM_VERSION_LO_SHIFT,
hw->nvm.eetrack);
return buf;
@@ -534,9 +553,13 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
-int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
struct i40e_pf *pf, bool add);
-
+int i40e_add_del_fdir(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input, bool add);
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
+int i40e_get_current_fd_count(struct i40e_pf *pf);
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
@@ -575,6 +598,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index a50e6b3479ae..ed3902bf249b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -647,9 +647,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
- memset((void *)details, 0,
- sizeof(struct i40e_asq_cmd_details));
+ memset(desc, 0, sizeof(*desc));
+ memset(details, 0, sizeof(*details));
ntc++;
if (ntc == asq->count)
ntc = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index e7f38b57834d..922cdcc45c54 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -162,6 +162,372 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
+/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40e_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ I40E_RX_PTYPE_##OUTER_FRAG, \
+ I40E_RX_PTYPE_TUNNEL_##T, \
+ I40E_RX_PTYPE_TUNNEL_END_##TE, \
+ I40E_RX_PTYPE_##TEF, \
+ I40E_RX_PTYPE_INNER_PROT_##I, \
+ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+ /* L2 Packet types */
+ I40E_PTT_UNUSED_ENTRY(0),
+ I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(4),
+ I40E_PTT_UNUSED_ENTRY(5),
+ I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(8),
+ I40E_PTT_UNUSED_ENTRY(9),
+ I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(25),
+ I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(32),
+ I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(39),
+ I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(47),
+ I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(54),
+ I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(62),
+ I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(69),
+ I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(77),
+ I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(84),
+ I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT_UNUSED_ENTRY(91),
+ I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(98),
+ I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(105),
+ I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(113),
+ I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(120),
+ I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(128),
+ I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(135),
+ I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(143),
+ I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(150),
+ I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ I40E_PTT_UNUSED_ENTRY(154),
+ I40E_PTT_UNUSED_ENTRY(155),
+ I40E_PTT_UNUSED_ENTRY(156),
+ I40E_PTT_UNUSED_ENTRY(157),
+ I40E_PTT_UNUSED_ENTRY(158),
+ I40E_PTT_UNUSED_ENTRY(159),
+
+ I40E_PTT_UNUSED_ENTRY(160),
+ I40E_PTT_UNUSED_ENTRY(161),
+ I40E_PTT_UNUSED_ENTRY(162),
+ I40E_PTT_UNUSED_ENTRY(163),
+ I40E_PTT_UNUSED_ENTRY(164),
+ I40E_PTT_UNUSED_ENTRY(165),
+ I40E_PTT_UNUSED_ENTRY(166),
+ I40E_PTT_UNUSED_ENTRY(167),
+ I40E_PTT_UNUSED_ENTRY(168),
+ I40E_PTT_UNUSED_ENTRY(169),
+
+ I40E_PTT_UNUSED_ENTRY(170),
+ I40E_PTT_UNUSED_ENTRY(171),
+ I40E_PTT_UNUSED_ENTRY(172),
+ I40E_PTT_UNUSED_ENTRY(173),
+ I40E_PTT_UNUSED_ENTRY(174),
+ I40E_PTT_UNUSED_ENTRY(175),
+ I40E_PTT_UNUSED_ENTRY(176),
+ I40E_PTT_UNUSED_ENTRY(177),
+ I40E_PTT_UNUSED_ENTRY(178),
+ I40E_PTT_UNUSED_ENTRY(179),
+
+ I40E_PTT_UNUSED_ENTRY(180),
+ I40E_PTT_UNUSED_ENTRY(181),
+ I40E_PTT_UNUSED_ENTRY(182),
+ I40E_PTT_UNUSED_ENTRY(183),
+ I40E_PTT_UNUSED_ENTRY(184),
+ I40E_PTT_UNUSED_ENTRY(185),
+ I40E_PTT_UNUSED_ENTRY(186),
+ I40E_PTT_UNUSED_ENTRY(187),
+ I40E_PTT_UNUSED_ENTRY(188),
+ I40E_PTT_UNUSED_ENTRY(189),
+
+ I40E_PTT_UNUSED_ENTRY(190),
+ I40E_PTT_UNUSED_ENTRY(191),
+ I40E_PTT_UNUSED_ENTRY(192),
+ I40E_PTT_UNUSED_ENTRY(193),
+ I40E_PTT_UNUSED_ENTRY(194),
+ I40E_PTT_UNUSED_ENTRY(195),
+ I40E_PTT_UNUSED_ENTRY(196),
+ I40E_PTT_UNUSED_ENTRY(197),
+ I40E_PTT_UNUSED_ENTRY(198),
+ I40E_PTT_UNUSED_ENTRY(199),
+
+ I40E_PTT_UNUSED_ENTRY(200),
+ I40E_PTT_UNUSED_ENTRY(201),
+ I40E_PTT_UNUSED_ENTRY(202),
+ I40E_PTT_UNUSED_ENTRY(203),
+ I40E_PTT_UNUSED_ENTRY(204),
+ I40E_PTT_UNUSED_ENTRY(205),
+ I40E_PTT_UNUSED_ENTRY(206),
+ I40E_PTT_UNUSED_ENTRY(207),
+ I40E_PTT_UNUSED_ENTRY(208),
+ I40E_PTT_UNUSED_ENTRY(209),
+
+ I40E_PTT_UNUSED_ENTRY(210),
+ I40E_PTT_UNUSED_ENTRY(211),
+ I40E_PTT_UNUSED_ENTRY(212),
+ I40E_PTT_UNUSED_ENTRY(213),
+ I40E_PTT_UNUSED_ENTRY(214),
+ I40E_PTT_UNUSED_ENTRY(215),
+ I40E_PTT_UNUSED_ENTRY(216),
+ I40E_PTT_UNUSED_ENTRY(217),
+ I40E_PTT_UNUSED_ENTRY(218),
+ I40E_PTT_UNUSED_ENTRY(219),
+
+ I40E_PTT_UNUSED_ENTRY(220),
+ I40E_PTT_UNUSED_ENTRY(221),
+ I40E_PTT_UNUSED_ENTRY(222),
+ I40E_PTT_UNUSED_ENTRY(223),
+ I40E_PTT_UNUSED_ENTRY(224),
+ I40E_PTT_UNUSED_ENTRY(225),
+ I40E_PTT_UNUSED_ENTRY(226),
+ I40E_PTT_UNUSED_ENTRY(227),
+ I40E_PTT_UNUSED_ENTRY(228),
+ I40E_PTT_UNUSED_ENTRY(229),
+
+ I40E_PTT_UNUSED_ENTRY(230),
+ I40E_PTT_UNUSED_ENTRY(231),
+ I40E_PTT_UNUSED_ENTRY(232),
+ I40E_PTT_UNUSED_ENTRY(233),
+ I40E_PTT_UNUSED_ENTRY(234),
+ I40E_PTT_UNUSED_ENTRY(235),
+ I40E_PTT_UNUSED_ENTRY(236),
+ I40E_PTT_UNUSED_ENTRY(237),
+ I40E_PTT_UNUSED_ENTRY(238),
+ I40E_PTT_UNUSED_ENTRY(239),
+
+ I40E_PTT_UNUSED_ENTRY(240),
+ I40E_PTT_UNUSED_ENTRY(241),
+ I40E_PTT_UNUSED_ENTRY(242),
+ I40E_PTT_UNUSED_ENTRY(243),
+ I40E_PTT_UNUSED_ENTRY(244),
+ I40E_PTT_UNUSED_ENTRY(245),
+ I40E_PTT_UNUSED_ENTRY(246),
+ I40E_PTT_UNUSED_ENTRY(247),
+ I40E_PTT_UNUSED_ENTRY(248),
+ I40E_PTT_UNUSED_ENTRY(249),
+
+ I40E_PTT_UNUSED_ENTRY(250),
+ I40E_PTT_UNUSED_ENTRY(251),
+ I40E_PTT_UNUSED_ENTRY(252),
+ I40E_PTT_UNUSED_ENTRY(253),
+ I40E_PTT_UNUSED_ENTRY(254),
+ I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
@@ -1409,9 +1775,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
- p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+ p = &hw->dev_caps;
else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
- p = (struct i40e_hw_capabilities *)&hw->func_caps;
+ p = &hw->func_caps;
else
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 50730141bb7b..036570d76176 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -332,6 +332,7 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
u16 type;
u16 length;
u16 typelength;
+ u16 offset = 0;
if (!lldpmib || !dcbcfg)
return I40E_ERR_PARAM;
@@ -339,15 +340,17 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
/* set to the start of LLDPDU */
lldpmib += ETH_HLEN;
tlv = (struct i40e_lldp_org_tlv *)lldpmib;
- while (tlv) {
+ while (1) {
typelength = ntohs(tlv->typelength);
type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
I40E_LLDP_TLV_TYPE_SHIFT);
length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
I40E_LLDP_TLV_LEN_SHIFT);
+ offset += sizeof(typelength) + length;
- if (type == I40E_TLV_TYPE_END)
- break;/* END TLV break out */
+ /* END TLV or beyond LLDPDU size */
+ if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
+ break;
switch (type) {
case I40E_TLV_TYPE_ORG:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index da22c3fa2c00..3c37386fd138 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1011,10 +1011,12 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
**/
static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
{
- if (enable)
+ if (enable) {
pf->flags |= flag;
- else
+ } else {
pf->flags &= ~flag;
+ pf->auto_disable_flags |= flag;
+ }
dev_info(&pf->pdev->dev, "requesting a pf reset\n");
i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
}
@@ -1467,19 +1469,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
pf->msg_enable);
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
- dev_info(&pf->pdev->dev, "forcing PFR\n");
+ dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
- dev_info(&pf->pdev->dev, "forcing CoreR\n");
+ dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
- dev_info(&pf->pdev->dev, "forcing GlobR\n");
+ dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "empr", 4) == 0) {
- dev_info(&pf->pdev->dev, "forcing EMPR\n");
+ dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) {
@@ -1663,28 +1665,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
desc = NULL;
} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
(strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
- struct i40e_fdir_data fd_data;
+ struct i40e_fdir_filter fd_data;
u16 packet_len, i, j = 0;
char *asc_packet;
+ u8 *raw_packet;
bool add = false;
int ret;
- asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ goto command_write_done;
+
+ if (strncmp(cmd_buf, "add", 3) == 0)
+ add = true;
+
+ if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+ goto command_write_done;
+
+ asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
GFP_KERNEL);
if (!asc_packet)
goto command_write_done;
- fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
- GFP_KERNEL);
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
+ GFP_KERNEL);
- if (!fd_data.raw_packet) {
+ if (!raw_packet) {
kfree(asc_packet);
asc_packet = NULL;
goto command_write_done;
}
- if (strncmp(cmd_buf, "add", 3) == 0)
- add = true;
cnt = sscanf(&cmd_buf[13],
"%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
&fd_data.q_index,
@@ -1698,36 +1708,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt);
kfree(asc_packet);
asc_packet = NULL;
- kfree(fd_data.raw_packet);
+ kfree(raw_packet);
goto command_write_done;
}
/* fix packet length if user entered 0 */
if (packet_len == 0)
- packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP;
+ packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
/* make sure to check the max as well */
packet_len = min_t(u16,
- packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+ packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
for (i = 0; i < packet_len; i++) {
sscanf(&asc_packet[j], "%2hhx ",
- &fd_data.raw_packet[i]);
+ &raw_packet[i]);
j += 3;
}
dev_info(&pf->pdev->dev, "FD raw packet dump\n");
print_hex_dump(KERN_INFO, "FD raw packet: ",
DUMP_PREFIX_OFFSET, 16, 1,
- fd_data.raw_packet, packet_len, true);
- ret = i40e_program_fdir_filter(&fd_data, pf, add);
+ raw_packet, packet_len, true);
+ ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
if (!ret) {
dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
} else {
dev_info(&pf->pdev->dev,
"Filter command send failed %d\n", ret);
}
- kfree(fd_data.raw_packet);
- fd_data.raw_packet = NULL;
+ kfree(raw_packet);
+ raw_packet = NULL;
kfree(asc_packet);
asc_packet = NULL;
} else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
@@ -2077,9 +2087,13 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
if (!vsi) {
dev_info(&pf->pdev->dev,
"tx_timeout: VSI %d not found\n", vsi_seid);
- goto netdev_ops_write_done;
- }
- if (rtnl_trylock()) {
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
+ vsi_seid);
+ } else if (test_bit(__I40E_DOWN, &vsi->state)) {
+ dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
+ vsi_seid);
+ } else if (rtnl_trylock()) {
vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
rtnl_unlock();
dev_info(&pf->pdev->dev, "tx_timeout called\n");
@@ -2098,9 +2112,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
if (!vsi) {
dev_info(&pf->pdev->dev,
"change_mtu: VSI %d not found\n", vsi_seid);
- goto netdev_ops_write_done;
- }
- if (rtnl_trylock()) {
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
+ vsi_seid);
+ } else if (rtnl_trylock()) {
vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
mtu);
rtnl_unlock();
@@ -2119,9 +2134,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
if (!vsi) {
dev_info(&pf->pdev->dev,
"set_rx_mode: VSI %d not found\n", vsi_seid);
- goto netdev_ops_write_done;
- }
- if (rtnl_trylock()) {
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
+ vsi_seid);
+ } else if (rtnl_trylock()) {
vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
rtnl_unlock();
dev_info(&pf->pdev->dev, "set_rx_mode called\n");
@@ -2139,11 +2155,14 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
if (!vsi) {
dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
vsi_seid);
- goto netdev_ops_write_done;
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
+ vsi_seid);
+ } else {
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ napi_schedule(&vsi->q_vectors[i]->napi);
+ dev_info(&pf->pdev->dev, "napi called\n");
}
- for (i = 0; i < vsi->num_q_vectors; i++)
- napi_schedule(&vsi->q_vectors[i]->napi);
- dev_info(&pf->pdev->dev, "napi called\n");
} else {
dev_info(&pf->pdev->dev, "unknown command '%s'\n",
i40e_dbg_netdev_ops_buf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index b1d7d8c5cb9b..03d99cbc5c25 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -62,6 +62,9 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_crc_errors),
};
+static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd);
+
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
* but they are separate. This device supports Virtualization, and
* as such might have several netdevs supporting VMDq and FCoE going
@@ -84,6 +87,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
+ I40E_PF_STAT("tx_timeout", tx_timeout_count),
I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
@@ -110,6 +114,11 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ /* LPI stats */
+ I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
+ I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
+ I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
+ I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
};
#define I40E_QUEUE_STATS_LEN(n) \
@@ -387,7 +396,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
ret_val = i40e_aq_read_nvm(hw, 0x0,
eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
len,
- (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
+ eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
last, NULL);
if (ret_val) {
dev_info(&pf->pdev->dev,
@@ -399,7 +408,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
release_nvm:
i40e_release_nvm(hw);
- memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
+ memcpy(bytes, eeprom_buff, eeprom->len);
free_buff:
kfree(eeprom_buff);
return ret_val;
@@ -649,18 +658,18 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
/* process Tx ring statistics */
do {
- start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+ start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
data[i] = tx_ring->stats.packets;
data[i + 1] = tx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
/* Rx ring is the 2nd half of the queue pair */
rx_ring = &tx_ring[1];
do {
- start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+ start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
data[i + 2] = rx_ring->stats.packets;
data[i + 3] = rx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
}
rcu_read_unlock();
if (vsi == pf->vsi[pf->lan_vsi]) {
@@ -1112,6 +1121,84 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
}
/**
+ * i40e_get_ethtool_fdir_all - Populates the rule count of a command
+ * @pf: Pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ * @rule_locs: Array of used rule locations
+ *
+ * This function populates both the total and actual rule count of
+ * the ethtool flow classification command
+ *
+ * Returns 0 on success or -EMSGSIZE if entry not found
+ **/
+static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40e_fdir_filter *rule;
+ struct hlist_node *node2;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = pf->hw.fdir_shared_filter_count +
+ pf->fdir_pf_filter_count;
+
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = rule->fd_id;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+/**
+ * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
+ * @pf: Pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * This function looks up a filter based on the Rx flow classification
+ * command and fills the flow spec info for it if found
+ *
+ * Returns 0 on success or -EINVAL if filter not found
+ **/
+static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct i40e_fdir_filter *rule = NULL;
+ struct hlist_node *node2;
+
+ /* report total rule count */
+ cmd->data = pf->hw.fdir_shared_filter_count +
+ pf->fdir_pf_filter_count;
+
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ if (fsp->location <= rule->fd_id)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->fd_id)
+ return -EINVAL;
+
+ fsp->flow_type = rule->flow_type;
+ fsp->h_u.tcp_ip4_spec.psrc = rule->src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0];
+ fsp->ring_cookie = rule->q_index;
+
+ return 0;
+}
+
+/**
* i40e_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -1135,15 +1222,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
ret = i40e_get_rss_hash_opts(pf, cmd);
break;
case ETHTOOL_GRXCLSRLCNT:
- cmd->rule_cnt = 10;
+ cmd->rule_cnt = pf->fdir_pf_active_filters;
ret = 0;
break;
case ETHTOOL_GRXCLSRULE:
- ret = 0;
+ ret = i40e_get_ethtool_fdir_entry(pf, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
- cmd->data = 500;
- ret = 0;
+ ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
+ break;
default:
break;
}
@@ -1274,289 +1361,182 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return 0;
}
-#define IP_HEADER_OFFSET 14
-#define I40E_UDPIP_DUMMY_PACKET_LEN 42
/**
- * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required from the FDir descriptor
- * @ethtool_rx_flow_spec: the flow spec
- * @add: true adds a filter, false removes it
+ * i40e_match_fdir_input_set - Match a new filter against an existing one
+ * @rule: The filter already added
+ * @input: The new filter to comapre against
*
- * Returns 0 if the filters were successfully added or removed
+ * Returns true if the two input set match
**/
-static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_data *fd_data,
- struct ethtool_rx_flow_spec *fsp, bool add)
+static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
+ struct i40e_fdir_filter *input)
{
- struct i40e_pf *pf = vsi->back;
- struct udphdr *udp;
- struct iphdr *ip;
- bool err = false;
- int ret;
- int i;
- char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0};
-
- memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
-
- ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
- udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
- + sizeof(struct iphdr));
-
- ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
- ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
- udp->source = fsp->h_u.tcp_ip4_spec.psrc;
- udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
-
- for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
- i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
- fd_data->pctype = i;
- ret = i40e_program_fdir_filter(fd_data, pf, add);
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- }
- }
-
- return err ? -EOPNOTSUPP : 0;
+ if ((rule->dst_ip[0] != input->dst_ip[0]) ||
+ (rule->src_ip[0] != input->src_ip[0]) ||
+ (rule->dst_port != input->dst_port) ||
+ (rule->src_port != input->src_port))
+ return false;
+ return true;
}
-#define I40E_TCPIP_DUMMY_PACKET_LEN 54
/**
- * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required from the FDir descriptor
- * @ethtool_rx_flow_spec: the flow spec
- * @add: true adds a filter, false removes it
+ * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
+ * @vsi: Pointer to the targeted VSI
+ * @input: The filter to update or NULL to indicate deletion
+ * @sw_idx: Software index to the filter
+ * @cmd: The command to get or set Rx flow classification rules
*
- * Returns 0 if the filters were successfully added or removed
+ * This function updates (or deletes) a Flow Director entry from
+ * the hlist of the corresponding PF
+ *
+ * Returns 0 on success
**/
-static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_data *fd_data,
- struct ethtool_rx_flow_spec *fsp, bool add)
+static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input,
+ u16 sw_idx,
+ struct ethtool_rxnfc *cmd)
{
+ struct i40e_fdir_filter *rule, *parent;
struct i40e_pf *pf = vsi->back;
- struct tcphdr *tcp;
- struct iphdr *ip;
- bool err = false;
- int ret;
- /* Dummy packet */
- char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
-
- memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
-
- ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
- tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
- + sizeof(struct iphdr));
-
- ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
- tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
- ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
- tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
-
- if (add) {
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- }
- }
+ struct hlist_node *node2;
+ int err = -EINVAL;
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
- ret = i40e_program_fdir_filter(fd_data, pf, add);
+ parent = NULL;
+ rule = NULL;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ /* hash found, or no matching entry */
+ if (rule->fd_id >= sw_idx)
+ break;
+ parent = rule;
}
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
-
- ret = i40e_program_fdir_filter(fd_data, pf, add);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
+ /* if there is an old rule occupying our place remove it */
+ if (rule && (rule->fd_id == sw_idx)) {
+ if (input && !i40e_match_fdir_input_set(rule, input))
+ err = i40e_add_del_fdir(vsi, rule, false);
+ else if (!input)
+ err = i40e_add_del_fdir(vsi, rule, false);
+ hlist_del(&rule->fdir_node);
+ kfree(rule);
+ pf->fdir_pf_active_filters--;
}
- return err ? -EOPNOTSUPP : 0;
-}
+ /* If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
+ */
+ if (!input)
+ return err;
-/**
- * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required from the FDir descriptor
- * @ethtool_rx_flow_spec: the flow spec
- * @add: true adds a filter, false removes it
- *
- * Returns 0 if the filters were successfully added or removed
- **/
-static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_data *fd_data,
- struct ethtool_rx_flow_spec *fsp, bool add)
-{
- return -EOPNOTSUPP;
+ /* initialize node and set software index */
+ INIT_HLIST_NODE(&input->fdir_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_after(&parent->fdir_node, &input->fdir_node);
+ else
+ hlist_add_head(&input->fdir_node,
+ &pf->fdir_filter_list);
+
+ /* update counts */
+ pf->fdir_pf_active_filters++;
+
+ return 0;
}
-#define I40E_IP_DUMMY_PACKET_LEN 34
/**
- * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required for the FDir descriptor
- * @fsp: the ethtool flow spec
- * @add: true adds a filter, false removes it
+ * i40e_del_fdir_entry - Deletes a Flow Director filter entry
+ * @vsi: Pointer to the targeted VSI
+ * @cmd: The command to get or set Rx flow classification rules
*
- * Returns 0 if the filters were successfully added or removed
- **/
-static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
- struct i40e_fdir_data *fd_data,
- struct ethtool_rx_flow_spec *fsp, bool add)
+ * The function removes a Flow Director filter entry from the
+ * hlist of the corresponding PF
+ *
+ * Returns 0 on success
+ */
+static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd)
{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
struct i40e_pf *pf = vsi->back;
- struct iphdr *ip;
- bool err = false;
- int ret;
- int i;
- char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-
- memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
- ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+ int ret = 0;
- ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
- ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
- ip->protocol = fsp->h_u.usr_ip4_spec.proto;
+ ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
- for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
- i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
- fd_data->pctype = i;
- ret = i40e_program_fdir_filter(fd_data, pf, add);
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- }
- }
-
- return err ? -EOPNOTSUPP : 0;
+ i40e_fdir_check_and_reenable(pf);
+ return ret;
}
/**
- * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
- * a specific flow spec based on their protocol
+ * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
* @vsi: pointer to the targeted VSI
* @cmd: command to get or set RX flow classification rules
- * @add: true adds a filter, false removes it
*
- * Returns 0 if the filters were successfully added or removed
+ * Add Flow Director filters for a specific flow spec based on their
+ * protocol. Returns 0 if the filters were successfully added.
**/
-static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
- struct ethtool_rxnfc *cmd, bool add)
+static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd)
{
- struct i40e_fdir_data fd_data;
- int ret = -EINVAL;
+ struct ethtool_rx_flow_spec *fsp;
+ struct i40e_fdir_filter *input;
struct i40e_pf *pf;
- struct ethtool_rx_flow_spec *fsp =
- (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int ret = -EINVAL;
if (!vsi)
return -EINVAL;
pf = vsi->back;
- if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
- (fsp->ring_cookie >= vsi->num_queue_pairs))
- return -EINVAL;
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return -EOPNOTSUPP;
- /* Populate the Flow Director that we have at the moment
- * and allocate the raw packet buffer for the calling functions
- */
- fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
- GFP_KERNEL);
+ if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+ return -ENOSPC;
- if (!fd_data.raw_packet) {
- dev_info(&pf->pdev->dev, "Could not allocate memory\n");
- return -ENOMEM;
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
+ pf->hw.func_caps.fd_filters_guaranteed)) {
+ return -EINVAL;
}
- fd_data.q_index = fsp->ring_cookie;
- fd_data.flex_off = 0;
- fd_data.pctype = 0;
- fd_data.dest_vsi = vsi->id;
- fd_data.dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
- fd_data.fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
- fd_data.cnt_index = 0;
- fd_data.fd_id = 0;
+ if (fsp->ring_cookie >= vsi->num_queue_pairs)
+ return -EINVAL;
- switch (fsp->flow_type & ~FLOW_EXT) {
- case TCP_V4_FLOW:
- ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
- break;
- case UDP_V4_FLOW:
- ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
- break;
- case SCTP_V4_FLOW:
- ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
- break;
- case IPV4_FLOW:
- ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
- break;
- case IP_USER_FLOW:
- switch (fsp->h_u.usr_ip4_spec.proto) {
- case IPPROTO_TCP:
- ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
- break;
- case IPPROTO_UDP:
- ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
- break;
- case IPPROTO_SCTP:
- ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
- break;
- default:
- ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
- break;
- }
- break;
- default:
- dev_info(&pf->pdev->dev, "Could not specify spec type\n");
- ret = -EINVAL;
- }
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+
+ if (!input)
+ return -ENOMEM;
- kfree(fd_data.raw_packet);
- fd_data.raw_packet = NULL;
+ input->fd_id = fsp->location;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+ else
+ input->dest_ctl =
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+
+ input->q_index = fsp->ring_cookie;
+ input->flex_off = 0;
+ input->pctype = 0;
+ input->dest_vsi = vsi->id;
+ input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
+ input->cnt_index = 0;
+ input->flow_type = fsp->flow_type;
+ input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
+ input->src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ input->dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+ ret = i40e_add_del_fdir(vsi, input, true);
+ if (ret)
+ kfree(input);
+ else
+ i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
return ret;
}
@@ -1580,10 +1560,10 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
ret = i40e_set_rss_hash_opt(pf, cmd);
break;
case ETHTOOL_SRXCLSRLINS:
- ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
+ ret = i40e_add_fdir_ethtool(vsi, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
- ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
+ ret = i40e_del_fdir_entry(vsi, cmd);
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b901371ca361..861b722c2672 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -26,6 +26,7 @@
/* Local includes */
#include "i40e.h"
+#include "i40e_diag.h"
#ifdef CONFIG_I40E_VXLAN
#include <net/vxlan.h>
#endif
@@ -38,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 30
+#define DRV_VERSION_BUILD 36
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -305,6 +306,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
break;
default:
netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ set_bit(__I40E_DOWN, &vsi->state);
i40e_down(vsi);
break;
}
@@ -375,20 +377,20 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
continue;
do {
- start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+ start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
packets = tx_ring->stats.packets;
bytes = tx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
rx_ring = &tx_ring[1];
do {
- start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+ start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
packets = rx_ring->stats.packets;
bytes = rx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
@@ -739,6 +741,7 @@ void i40e_update_stats(struct i40e_vsi *vsi)
u32 rx_page, rx_buf;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
+ u32 val;
int i;
u16 q;
@@ -769,10 +772,10 @@ void i40e_update_stats(struct i40e_vsi *vsi)
p = ACCESS_ONCE(vsi->tx_rings[q]);
do {
- start = u64_stats_fetch_begin_bh(&p->syncp);
+ start = u64_stats_fetch_begin_irq(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
tx_b += bytes;
tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
@@ -781,10 +784,10 @@ void i40e_update_stats(struct i40e_vsi *vsi)
/* Rx queue is part of the same block as Tx queue */
p = &p[1];
do {
- start = u64_stats_fetch_begin_bh(&p->syncp);
+ start = u64_stats_fetch_begin_irq(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
rx_b += bytes;
rx_p += packets;
rx_buf += p->rx_stats.alloc_buff_failed;
@@ -971,6 +974,20 @@ void i40e_update_stats(struct i40e_vsi *vsi)
i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
pf->stat_offsets_loaded,
&osd->rx_jabber, &nsd->rx_jabber);
+
+ val = rd32(hw, I40E_PRTPM_EEE_STAT);
+ nsd->tx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+ nsd->rx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+ i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
+ pf->stat_offsets_loaded,
+ &osd->tx_lpi_count, &nsd->tx_lpi_count);
+ i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
+ pf->stat_offsets_loaded,
+ &osd->rx_lpi_count, &nsd->rx_lpi_count);
}
pf->stat_offsets_loaded = true;
@@ -1964,11 +1981,14 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
- /* If the network stack called us with vid = 0, we should
- * indicate to i40e_vsi_add_vlan() that we want to receive
- * any traffic (i.e. with any vlan tag, or untagged)
+ /* If the network stack called us with vid = 0 then
+ * it is asking to receive priority tagged packets with
+ * vlan id 0. Our HW receives them by default when configured
+ * to receive untagged packets so there is no need to add an
+ * extra filter for vlan 0 tagged packets.
*/
- ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
+ if (vid)
+ ret = i40e_vsi_add_vlan(vsi, vid);
if (!ret && (vid < VLAN_N_VID))
set_bit(vid, vsi->active_vlans);
@@ -1981,7 +2001,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
* @netdev: network interface to be adjusted
* @vid: vlan id to be removed
*
- * net_device_ops implementation for adding vlan ids
+ * net_device_ops implementation for removing vlan ids
**/
static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
@@ -2177,6 +2197,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED));
tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
+ /* FDIR VSI tx ring can still use RS bit and writebacks */
+ if (vsi->type != I40E_VSI_FDIR)
+ tx_ctx.head_wb_ena = 1;
+ tx_ctx.head_wb_addr = ring->dma +
+ (ring->count * sizeof(struct i40e_tx_desc));
/* As part of VSI creation/update, FW allocates certain
* Tx arbitration queue sets for each TC enabled for
@@ -2420,6 +2445,28 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
}
/**
+ * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
+ * @vsi: Pointer to the targeted VSI
+ *
+ * This function replays the hlist on the hw where all the SB Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
+{
+ struct i40e_fdir_filter *filter;
+ struct i40e_pf *pf = vsi->back;
+ struct hlist_node *node;
+
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return;
+
+ hlist_for_each_entry_safe(filter, node,
+ &pf->fdir_filter_list, fdir_node) {
+ i40e_add_del_fdir(vsi, filter, true);
+ }
+}
+
+/**
* i40e_vsi_configure - Set up the VSI for action
* @vsi: the VSI being configured
**/
@@ -2557,7 +2604,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0);
- /* Associate the queue pair to the vector and enable the q int */
+ /* Associate the queue pair to the vector and enable the queue int */
val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
(I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
(I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
@@ -2831,12 +2878,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
val = rd32(hw, I40E_GLGEN_RSTAT);
val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
>> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
- if (val == I40E_RESET_CORER)
+ if (val == I40E_RESET_CORER) {
pf->corer_count++;
- else if (val == I40E_RESET_GLOBR)
+ } else if (val == I40E_RESET_GLOBR) {
pf->globr_count++;
- else if (val == I40E_RESET_EMPR)
+ } else if (val == I40E_RESET_EMPR) {
pf->empr_count++;
+ set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+ }
}
if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
@@ -2866,8 +2915,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
icr0_remaining);
if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
- (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
- (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
+ (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
dev_info(&pf->pdev->dev, "device will be reset\n");
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
i40e_service_event_schedule(pf);
@@ -3107,13 +3155,13 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- j = 1000;
- do {
- usleep_range(1000, 2000);
+ for (j = 0; j < 50; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
- ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
-
+ if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
/* Skip if the queue is already in the requested state */
if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
continue;
@@ -3123,8 +3171,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
/* turn on/off the queue */
if (enable) {
wr32(hw, I40E_QTX_HEAD(pf_q), 0);
- tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
- I40E_QTX_ENA_QENA_STAT_MASK;
+ tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
} else {
tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
}
@@ -3171,12 +3218,13 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- j = 1000;
- do {
- usleep_range(1000, 2000);
+ for (j = 0; j < 50; j++) {
rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
- ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
+ if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
if (enable) {
/* is STAT set ? */
@@ -3190,11 +3238,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
/* turn on/off the queue */
if (enable)
- rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
- I40E_QRX_ENA_QENA_STAT_MASK;
+ rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
else
- rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
- I40E_QRX_ENA_QENA_STAT_MASK);
+ rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
/* wait for the change to finish */
@@ -3732,8 +3778,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
NULL);
if (aq_ret) {
dev_info(&vsi->back->pdev->dev,
- "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "AQ command Config VSI BW allocation per TC failed = %d\n",
+ vsi->back->hw.aq.asq_last_status);
return -EINVAL;
}
@@ -4062,6 +4108,10 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
} else if (vsi->netdev) {
netdev_info(vsi->netdev, "NIC Link is Down\n");
}
+
+ /* replay FDIR SB filters */
+ if (vsi->type == I40E_VSI_FDIR)
+ i40e_fdir_filter_restore(vsi);
i40e_service_event_schedule(pf);
return 0;
@@ -4208,15 +4258,40 @@ static int i40e_open(struct net_device *netdev)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- char int_name[IFNAMSIZ];
int err;
- /* disallow open during test */
- if (test_bit(__I40E_TESTING, &pf->state))
+ /* disallow open during test or if eeprom is broken */
+ if (test_bit(__I40E_TESTING, &pf->state) ||
+ test_bit(__I40E_BAD_EEPROM, &pf->state))
return -EBUSY;
netif_carrier_off(netdev);
+ err = i40e_vsi_open(vsi);
+ if (err)
+ return err;
+
+#ifdef CONFIG_I40E_VXLAN
+ vxlan_get_rx_port(netdev);
+#endif
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_open -
+ * @vsi: the VSI to open
+ *
+ * Finish initialization of the VSI.
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+int i40e_vsi_open(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ char int_name[IFNAMSIZ];
+ int err;
+
/* allocate descriptors */
err = i40e_vsi_setup_tx_resources(vsi);
if (err)
@@ -4229,18 +4304,22 @@ static int i40e_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
+ if (!vsi->netdev) {
+ err = EINVAL;
+ goto err_setup_rx;
+ }
snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
- dev_driver_string(&pf->pdev->dev), netdev->name);
+ dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
err = i40e_vsi_request_irq(vsi, int_name);
if (err)
goto err_setup_rx;
/* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs);
+ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
if (err)
goto err_set_queues;
- err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs);
+ err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs);
if (err)
goto err_set_queues;
@@ -4248,10 +4327,6 @@ static int i40e_open(struct net_device *netdev)
if (err)
goto err_up_complete;
-#ifdef CONFIG_I40E_VXLAN
- vxlan_get_rx_port(netdev);
-#endif
-
return 0;
err_up_complete:
@@ -4269,6 +4344,26 @@ err_setup_tx:
}
/**
+ * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
+ * @pf: Pointer to pf
+ *
+ * This function destroys the hlist where all the Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_exit(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter *filter;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(filter, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+ }
+ pf->fdir_pf_active_filters = 0;
+}
+
+/**
* i40e_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -4321,7 +4416,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
* for the warning interrupt will deal with the shutdown
* and recovery of the switch setup.
*/
- dev_info(&pf->pdev->dev, "GlobalR requested\n");
+ dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
@@ -4332,7 +4427,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
*
* Same as Global Reset, except does *not* include the MAC/PHY
*/
- dev_info(&pf->pdev->dev, "CoreR requested\n");
+ dev_dbg(&pf->pdev->dev, "CoreR requested\n");
val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
val |= I40E_GLGEN_RTRIG_CORER_MASK;
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
@@ -4366,7 +4461,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
* the switch, since we need to do all the recovery as
* for the Core Reset.
*/
- dev_info(&pf->pdev->dev, "PFR requested\n");
+ dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf);
} else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
@@ -4415,18 +4510,18 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
&old_cfg->etscfg.prioritytable,
sizeof(new_cfg->etscfg.prioritytable))) {
need_reconfig = true;
- dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
}
if (memcmp(&new_cfg->etscfg.tcbwtable,
&old_cfg->etscfg.tcbwtable,
sizeof(new_cfg->etscfg.tcbwtable)))
- dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+ dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
if (memcmp(&new_cfg->etscfg.tsatable,
&old_cfg->etscfg.tsatable,
sizeof(new_cfg->etscfg.tsatable)))
- dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
}
/* Check if PFC configuration has changed */
@@ -4434,7 +4529,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
&old_cfg->pfc,
sizeof(new_cfg->pfc))) {
need_reconfig = true;
- dev_info(&pf->pdev->dev, "PFC config change detected.\n");
+ dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
}
/* Check if APP Table has changed */
@@ -4442,7 +4537,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
&old_cfg->app,
sizeof(new_cfg->app))) {
need_reconfig = true;
- dev_info(&pf->pdev->dev, "APP Table change detected.\n");
+ dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
}
return need_reconfig;
@@ -4492,7 +4587,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
- dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
+ dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
goto exit;
}
@@ -4550,8 +4645,8 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
struct i40e_vf *vf;
u16 vf_id;
- dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
- __func__, queue, qtx_ctl);
+ dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
+ queue, qtx_ctl);
/* Queue belongs to VF, find the VF and issue VF reset */
if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
@@ -4581,6 +4676,54 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
}
/**
+ * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
+ * @pf: board private structure
+ **/
+int i40e_get_current_fd_count(struct i40e_pf *pf)
+{
+ int val, fcnt_prog;
+ val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+ fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
+ ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ return fcnt_prog;
+}
+
+/**
+ * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
+ * @pf: board private structure
+ **/
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
+{
+ u32 fcnt_prog, fcnt_avail;
+
+ /* Check if, FD SB or ATR was auto disabled and if there is enough room
+ * to re-enable
+ */
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return;
+ fcnt_prog = i40e_get_current_fd_count(pf);
+ fcnt_avail = pf->hw.fdir_shared_filter_count +
+ pf->fdir_pf_filter_count;
+ if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+ }
+ }
+ /* Wait for some more space to be available to turn on ATR */
+ if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+ }
+ }
+}
+
+/**
* i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
* @pf: board private structure
**/
@@ -4589,11 +4732,14 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
return;
- pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
-
/* if interface is down do nothing */
if (test_bit(__I40E_DOWN, &pf->state))
return;
+ i40e_fdir_check_and_reenable(pf);
+
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
}
/**
@@ -4903,7 +5049,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
event.msg_size);
break;
case i40e_aqc_opc_lldp_update_mib:
- dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+ dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
#ifdef CONFIG_I40E_DCB
rtnl_lock();
ret = i40e_handle_lldp_event(pf, &event);
@@ -4911,7 +5057,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
#endif /* CONFIG_I40E_DCB */
break;
case i40e_aqc_opc_event_lan_overflow:
- dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
+ dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
i40e_handle_lan_overflow_event(pf, &event);
break;
case i40e_aqc_opc_send_msg_to_peer:
@@ -4936,6 +5082,31 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
}
/**
+ * i40e_verify_eeprom - make sure eeprom is good to use
+ * @pf: board private structure
+ **/
+static void i40e_verify_eeprom(struct i40e_pf *pf)
+{
+ int err;
+
+ err = i40e_diag_eeprom_test(&pf->hw);
+ if (err) {
+ /* retry in case of garbage read */
+ err = i40e_diag_eeprom_test(&pf->hw);
+ if (err) {
+ dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
+ err);
+ set_bit(__I40E_BAD_EEPROM, &pf->state);
+ }
+ }
+
+ if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
+ clear_bit(__I40E_BAD_EEPROM, &pf->state);
+ }
+}
+
+/**
* i40e_reconstitute_veb - rebuild the VEB and anything connected to it
* @veb: pointer to the VEB instance
*
@@ -5053,6 +5224,12 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
/* increment MSI-X count because current FW skips one */
pf->hw.func_caps.num_msix_vectors++;
+ if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
+ (pf->hw.aq.fw_maj_ver < 2)) {
+ pf->hw.func_caps.num_msix_vectors++;
+ pf->hw.func_caps.num_msix_vectors_vf++;
+ }
+
if (pf->hw.debug_mask & I40E_DEBUG_USER)
dev_info(&pf->pdev->dev,
"pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -5132,9 +5309,9 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
err = i40e_up_complete(vsi);
if (err)
goto err_up_complete;
+ clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
}
- clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
return;
err_up_complete:
@@ -5157,6 +5334,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
{
int i;
+ i40e_fdir_filter_exit(pf);
for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
i40e_vsi_release(pf->vsi[i]);
@@ -5181,7 +5359,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
return 0;
- dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+ dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
if (i40e_check_asq_alive(hw))
i40e_vc_notify_reset(pf);
@@ -5228,7 +5406,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
if (test_bit(__I40E_DOWN, &pf->state))
goto end_core_reset;
- dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
+ dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
@@ -5237,6 +5415,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
goto end_core_reset;
}
+ /* re-verify the eeprom if we just had an EMP reset */
+ if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
+ clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+ i40e_verify_eeprom(pf);
+ }
+
ret = i40e_get_capabilities(pf);
if (ret) {
dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
@@ -5278,7 +5462,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
* try to recover minimal use by getting the basic PF VSI working.
*/
if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
- dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
+ dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
/* find the one VEB connected to the MAC, and find orphans */
for (v = 0; v < I40E_MAX_VEB; v++) {
if (!pf->veb[v])
@@ -5331,6 +5515,11 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
+ if (pf->num_alloc_vfs) {
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ i40e_reset_vf(&pf->vf[v], true);
+ }
+
/* tell the firmware that we're starting */
dv.major_version = DRV_VERSION_MAJOR;
dv.minor_version = DRV_VERSION_MINOR;
@@ -5338,7 +5527,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
dv.subbuild_version = 0;
i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
- dev_info(&pf->pdev->dev, "PF reset done\n");
+ dev_info(&pf->pdev->dev, "reset complete\n");
end_core_reset:
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
@@ -5387,7 +5576,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
>> I40E_GL_MDET_TX_QUEUE_SHIFT;
dev_info(&pf->pdev->dev,
- "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
+ "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
event, queue, func);
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
@@ -5401,7 +5590,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
>> I40E_GL_MDET_RX_QUEUE_SHIFT;
dev_info(&pf->pdev->dev,
- "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
+ "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
event, queue, func);
wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
mdd_detected = true;
@@ -5850,37 +6039,16 @@ err_out:
**/
static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
{
- int err = 0;
-
- pf->num_msix_entries = 0;
- while (vectors >= I40E_MIN_MSIX) {
- err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
- if (err == 0) {
- /* good to go */
- pf->num_msix_entries = vectors;
- break;
- } else if (err < 0) {
- /* total failure */
- dev_info(&pf->pdev->dev,
- "MSI-X vector reservation failed: %d\n", err);
- vectors = 0;
- break;
- } else {
- /* err > 0 is the hint for retry */
- dev_info(&pf->pdev->dev,
- "MSI-X vectors wanted %d, retrying with %d\n",
- vectors, err);
- vectors = err;
- }
- }
-
- if (vectors > 0 && vectors < I40E_MIN_MSIX) {
+ vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
+ I40E_MIN_MSIX, vectors);
+ if (vectors < 0) {
dev_info(&pf->pdev->dev,
- "Couldn't get enough vectors, only %d available\n",
- vectors);
+ "MSI-X vector reservation failed: %d\n", vectors);
vectors = 0;
}
+ pf->num_msix_entries = vectors;
+
return vectors;
}
@@ -5942,7 +6110,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else if (vec == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
- dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
+ dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
@@ -5978,13 +6146,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
/**
- * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
+ * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
{
struct i40e_q_vector *q_vector;
@@ -6010,13 +6178,13 @@ static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
}
/**
- * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
+ * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured
*
* We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
**/
-static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
+static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int v_idx, num_q_vectors;
@@ -6031,7 +6199,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
return -EINVAL;
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_alloc_q_vector(vsi, v_idx);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx);
if (err)
goto err_out;
}
@@ -6071,7 +6239,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
(pf->flags & I40E_FLAG_MSI_ENABLED)) {
- dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
+ dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
err = pci_enable_msi(pf->pdev);
if (err) {
dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
@@ -6080,7 +6248,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
}
if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
- dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
+ dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
/* track first vector for misc interrupts */
err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
@@ -6107,7 +6275,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
i40e_intr, 0, pf->misc_int_name, pf);
if (err) {
dev_info(&pf->pdev->dev,
- "request_irq for msix_misc failed: %d\n", err);
+ "request_irq for %s failed: %d\n",
+ pf->misc_int_name, err);
return -EFAULT;
}
}
@@ -6258,15 +6427,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- dev_info(&pf->pdev->dev,
- "Flow Director ATR mode Enabled\n");
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- dev_info(&pf->pdev->dev,
- "Flow Director Side Band mode Enabled\n");
} else {
dev_info(&pf->pdev->dev,
- "Flow Director Side Band mode Disabled in MFP mode\n");
+ "Flow Director Sideband mode Disabled in MFP mode\n");
}
pf->fdir_pf_filter_count =
pf->hw.func_caps.fd_filters_guaranteed;
@@ -6287,9 +6452,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_req_vfs = min_t(int,
pf->hw.func_caps.num_vfs,
I40E_MAX_VF_COUNT);
- dev_info(&pf->pdev->dev,
- "Number of VFs being requested for PF[%d] = %d\n",
- pf->hw.pf_id, pf->num_req_vfs);
}
#endif /* CONFIG_PCI_IOV */
pf->eeprom_version = 0xDEAD;
@@ -6326,6 +6488,39 @@ sw_init_done:
}
/**
+ * i40e_set_ntuple - set the ntuple feature flag and take action
+ * @pf: board private structure to initialize
+ * @features: the feature set that the stack is suggesting
+ *
+ * returns a bool to indicate if reset needs to happen
+ **/
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
+{
+ bool need_reset = false;
+
+ /* Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if (features & NETIF_F_NTUPLE) {
+ /* Enable filters and mark for reset */
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ need_reset = true;
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ } else {
+ /* turn off filters, mark for reset and clear SW filter list */
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ need_reset = true;
+ i40e_fdir_filter_exit(pf);
+ }
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ /* if ATR was disabled it can be re-enabled. */
+ if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ }
+ return need_reset;
+}
+
+/**
* i40e_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -6335,12 +6530,19 @@ static int i40e_set_features(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ bool need_reset;
if (features & NETIF_F_HW_VLAN_CTAG_RX)
i40e_vlan_stripping_enable(vsi);
else
i40e_vlan_stripping_disable(vsi);
+ need_reset = i40e_set_ntuple(pf, features);
+
+ if (need_reset)
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
return 0;
}
@@ -6464,6 +6666,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
.ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
+ .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
#ifdef CONFIG_I40E_VXLAN
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
@@ -6495,10 +6698,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np = netdev_priv(netdev);
np->vsi = vsi;
- netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM |
NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_TSO |
- NETIF_F_SG;
+ NETIF_F_TSO;
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
@@ -6512,6 +6714,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXCSUM |
+ NETIF_F_NTUPLE |
NETIF_F_RXHASH |
0;
@@ -6771,8 +6974,6 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
if (vsi->netdev) {
/* results in a call to i40e_close() */
unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
}
} else {
if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
@@ -6791,6 +6992,10 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
i40e_vsi_delete(vsi);
i40e_vsi_free_q_vectors(vsi);
+ if (vsi->netdev) {
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
i40e_vsi_clear_rings(vsi);
i40e_vsi_clear(vsi);
@@ -6845,13 +7050,12 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
}
if (vsi->base_vector) {
- dev_info(&pf->pdev->dev,
- "VSI %d has non-zero base vector %d\n",
+ dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
vsi->seid, vsi->base_vector);
return -EEXIST;
}
- ret = i40e_alloc_q_vectors(vsi);
+ ret = i40e_vsi_alloc_q_vectors(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
"failed to allocate %d q_vector for VSI %d, ret=%d\n",
@@ -6865,7 +7069,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
vsi->num_q_vectors, vsi->idx);
if (vsi->base_vector < 0) {
dev_info(&pf->pdev->dev,
- "failed to get q tracking for VSI %d, err=%d\n",
+ "failed to get queue tracking for VSI %d, err=%d\n",
vsi->seid, vsi->base_vector);
i40e_vsi_free_q_vectors(vsi);
ret = -ENOENT;
@@ -7822,6 +8026,44 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
return 0;
}
+#define INFO_STRING_LEN 255
+static void i40e_print_features(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ char *buf, *string;
+
+ string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
+ if (!string) {
+ dev_err(&pf->pdev->dev, "Features string allocation failed\n");
+ return;
+ }
+
+ buf = string;
+
+ buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
+#ifdef CONFIG_PCI_IOV
+ buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
+#endif
+ buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
+ pf->vsi[pf->lan_vsi]->num_queue_pairs);
+
+ if (pf->flags & I40E_FLAG_RSS_ENABLED)
+ buf += sprintf(buf, "RSS ");
+ buf += sprintf(buf, "FDir ");
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
+ buf += sprintf(buf, "ATR ");
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
+ buf += sprintf(buf, "NTUPLE ");
+ if (pf->flags & I40E_FLAG_DCB_ENABLED)
+ buf += sprintf(buf, "DCB ");
+ if (pf->flags & I40E_FLAG_PTP)
+ buf += sprintf(buf, "PTP ");
+
+ BUG_ON(buf > (string + INFO_STRING_LEN));
+ dev_info(&pf->pdev->dev, "%s\n", string);
+ kfree(string);
+}
+
/**
* i40e_probe - Device initialization routine
* @pdev: PCI device information struct
@@ -7848,17 +8090,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
/* set up for high or low dma */
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /* coherent mask for the same size will always succeed if
- * dma_set_mask does
- */
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- } else {
- dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
- err = -EIO;
- goto err_dma;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
}
/* set up pci connections */
@@ -7946,13 +8185,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_adminq(hw);
dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
- if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
- >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
- dev_info(&pdev->dev,
- "warning: NVM version not supported, supported version: %02x.%02x\n",
- I40E_CURRENT_NVM_VERSION_HI,
- I40E_CURRENT_NVM_VERSION_LO);
- }
if (err) {
dev_info(&pdev->dev,
"init_adminq failed: %d expecting API %02x.%02x\n",
@@ -7961,6 +8193,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ i40e_verify_eeprom(pf);
+
i40e_clear_pxe_mode(hw);
err = i40e_get_capabilities(pf);
if (err)
@@ -8062,7 +8296,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
- (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
u32 val;
/* disable link interrupts for VFs */
@@ -8070,6 +8305,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
i40e_flush(hw);
+
+ if (pci_num_vf(pdev)) {
+ dev_info(&pdev->dev,
+ "Active VFs found, allocating resources.\n");
+ err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
+ if (err)
+ dev_info(&pdev->dev,
+ "Error %d allocating resources for existing VFs\n",
+ err);
+ }
}
pfs_found++;
@@ -8092,7 +8337,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_set_pci_config_data(hw, link_status);
- dev_info(&pdev->dev, "PCI Express: %s %s\n",
+ dev_info(&pdev->dev, "PCI-Express: %s %s\n",
(hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
@@ -8109,6 +8354,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
}
+ /* print a string summarizing features */
+ i40e_print_features(pf);
+
return 0;
/* Unwind what we've done if something failed in the setup */
@@ -8165,16 +8413,16 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_ptp_stop(pf);
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
- i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
- }
-
/* no more scheduling of any task */
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ }
+
i40e_fdir_teardown(pf);
/* If there is a switch structure or any orphans, remove them.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 73f95b081927..262bdf11d221 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -27,14 +27,14 @@
#include "i40e_prototype.h"
/**
- * i40e_init_nvm_ops - Initialize NVM function pointers.
- * @hw: pointer to the HW structure.
+ * i40e_init_nvm_ops - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
*
- * Setups the function pointers and the NVM info structure. Should be called
- * once per NVM initialization, e.g. inside the i40e_init_shared_code().
- * Please notice that the NVM term is used here (& in all methods covered
- * in this file) as an equivalent of the FLASH part mapped into the SR.
- * We are accessing FLASH always thru the Shadow RAM.
+ * Setup the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
**/
i40e_status i40e_init_nvm(struct i40e_hw *hw)
{
@@ -49,16 +49,16 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
gens = rd32(hw, I40E_GLNVM_GENS);
sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
I40E_GLNVM_GENS_SR_SIZE_SHIFT);
- /* Switching to words (sr_size contains power of 2KB). */
+ /* Switching to words (sr_size contains power of 2KB) */
nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
- /* Check if we are in the normal or blank NVM programming mode. */
+ /* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, I40E_GLNVM_FLA);
- if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */
- /* Max NVM timeout. */
+ if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
+ /* Max NVM timeout */
nvm->timeout = I40E_MAX_NVM_TIMEOUT;
nvm->blank_nvm_mode = false;
- } else { /* Blank programming mode. */
+ } else { /* Blank programming mode */
nvm->blank_nvm_mode = true;
ret_code = I40E_ERR_NVM_BLANK_MODE;
hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
@@ -68,12 +68,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
}
/**
- * i40e_acquire_nvm - Generic request for acquiring the NVM ownership.
- * @hw: pointer to the HW structure.
- * @access: NVM access type (read or write).
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
*
- * This function will request NVM ownership for reading
- * via the proper Admin Command.
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
**/
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
enum i40e_aq_resource_access_type access)
@@ -87,20 +87,20 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
0, &time, NULL);
- /* Reading the Global Device Timer. */
+ /* Reading the Global Device Timer */
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
- /* Store the timeout. */
+ /* Store the timeout */
hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
if (ret_code) {
- /* Set the polling timeout. */
+ /* Set the polling timeout */
if (time > I40E_MAX_NVM_TIMEOUT)
timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
+ gtime;
else
timeout = hw->nvm.hw_semaphore_timeout;
- /* Poll until the current NVM owner timeouts. */
+ /* Poll until the current NVM owner timeouts */
while (gtime < timeout) {
usleep_range(10000, 20000);
ret_code = i40e_aq_request_resource(hw,
@@ -128,10 +128,10 @@ i40e_i40e_acquire_nvm_exit:
}
/**
- * i40e_release_nvm - Generic request for releasing the NVM ownership.
- * @hw: pointer to the HW structure.
+ * i40e_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
*
- * This function will release NVM resource via the proper Admin Command.
+ * This function will release NVM resource via the proper Admin Command.
**/
void i40e_release_nvm(struct i40e_hw *hw)
{
@@ -140,17 +140,17 @@ void i40e_release_nvm(struct i40e_hw *hw)
}
/**
- * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit.
- * @hw: pointer to the HW structure.
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
+ * @hw: pointer to the HW structure
*
- * Polls the SRCTL Shadow RAM register done bit.
+ * Polls the SRCTL Shadow RAM register done bit.
**/
static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 srctl, wait_cnt;
- /* Poll the I40E_GLNVM_SRCTL until the done bit is set. */
+ /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
srctl = rd32(hw, I40E_GLNVM_SRCTL);
if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
@@ -165,12 +165,12 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
}
/**
- * i40e_read_nvm_word - Reads Shadow RAM
- * @hw: pointer to the HW structure.
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @data: word read from the Shadow RAM.
+ * i40e_read_nvm_word - Reads Shadow RAM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
*
- * Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
@@ -184,15 +184,15 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
goto read_nvm_exit;
}
- /* Poll the done bit first. */
+ /* Poll the done bit first */
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
- /* Write the address and start reading. */
+ /* Write the address and start reading */
sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
(1 << I40E_GLNVM_SRCTL_START_SHIFT);
wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
- /* Poll I40E_GLNVM_SRCTL until the done bit is set. */
+ /* Poll I40E_GLNVM_SRCTL until the done bit is set */
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
@@ -210,16 +210,15 @@ read_nvm_exit:
}
/**
- * i40e_read_nvm_buffer - Reads Shadow RAM buffer.
- * @hw: pointer to the HW structure.
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @words: number of words to read (in) &
- * number of words read before the NVM ownership timeout (out).
- * @data: words read from the Shadow RAM.
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
*
- * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
- * method. The buffer read is preceded by the NVM ownership take
- * and followed by the release.
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
**/
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
@@ -227,7 +226,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
i40e_status ret_code = 0;
u16 index, word;
- /* Loop thru the selected region. */
+ /* Loop thru the selected region */
for (word = 0; word < *words; word++) {
index = offset + word;
ret_code = i40e_read_nvm_word(hw, index, &data[word]);
@@ -235,21 +234,21 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
break;
}
- /* Update the number of words read from the Shadow RAM. */
+ /* Update the number of words read from the Shadow RAM */
*words = word;
return ret_code;
}
/**
- * i40e_calc_nvm_checksum - Calculates and returns the checksum
- * @hw: pointer to hardware structure
- * @checksum: pointer to the checksum
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
*
- * This function calculate SW Checksum that covers the whole 64kB shadow RAM
- * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
- * is customer specific and unknown. Therefore, this function skips all maximum
- * possible size of VPD (1kB).
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
**/
static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
@@ -311,12 +310,12 @@ i40e_calc_nvm_checksum_exit:
}
/**
- * i40e_validate_nvm_checksum - Validate EEPROM checksum
- * @hw: pointer to hardware structure
- * @checksum: calculated checksum
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
*
- * Performs checksum calculation and validates the NVM SW checksum. If the
- * caller does not need checksum, the value can be NULL.
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
**/
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index ed91f93ede2b..9cd57e617959 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -231,6 +231,13 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+
+static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return i40e_ptype_lookup[ptype];
+}
+
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d4bb482b1a7f..0f5d96ad281d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -25,6 +25,7 @@
******************************************************************************/
#include "i40e.h"
+#include "i40e_prototype.h"
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
@@ -39,11 +40,12 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/**
* i40e_program_fdir_filter - Program a Flow Director filter
- * @fdir_input: Packet data that will be filter parameters
+ * @fdir_data: Packet data that will be filter parameters
+ * @raw_packet: the pre-allocated packet buffer for FDir
* @pf: The pf pointer
* @add: True for add/update, False for remove
**/
-int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
struct i40e_pf *pf, bool add)
{
struct i40e_filter_program_desc *fdir_desc;
@@ -68,8 +70,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
tx_ring = vsi->tx_rings[0];
dev = tx_ring->dev;
- dma = dma_map_single(dev, fdir_data->raw_packet,
- I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+ dma = dma_map_single(dev, raw_packet,
+ I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto dma_fail;
@@ -132,14 +134,14 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
/* record length, and DMA address */
- dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+ dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
dma_unmap_addr_set(tx_buf, dma, dma);
tx_desc->buffer_addr = cpu_to_le64(dma);
td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
+ build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
/* set the timestamp */
tx_buf->time_stamp = jiffies;
@@ -161,26 +163,329 @@ dma_fail:
return -1;
}
+#define IP_HEADER_OFFSET 14
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
+/**
+ * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ u8 *raw_packet, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct udphdr *udp;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ int i;
+ static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+ memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
+
+ ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+ udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+
+ ip->daddr = fd_data->dst_ip[0];
+ udp->dest = fd_data->dst_port;
+ ip->saddr = fd_data->src_ip[0];
+ udp->source = fd_data->src_port;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
+ i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
+ fd_data->pctype = i;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
+/**
+ * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ u8 *raw_packet, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct tcphdr *tcp;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ /* Dummy packet */
+ static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
+ 0x0, 0x72, 0, 0, 0, 0};
+
+ memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
+
+ ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+ tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+
+ ip->daddr = fd_data->dst_ip[0];
+ tcp->dest = fd_data->dst_port;
+ ip->saddr = fd_data->src_ip[0];
+ tcp->source = fd_data->src_port;
+
+ if (add) {
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ }
+ }
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Always returns -EOPNOTSUPP
+ **/
+static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ u8 *raw_packet, bool add)
+{
+ return -EOPNOTSUPP;
+}
+
+#define I40E_IP_DUMMY_PACKET_LEN 34
+/**
+ * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ u8 *raw_packet, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ int i;
+ static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
+ ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+
+ ip->saddr = fd_data->src_ip[0];
+ ip->daddr = fd_data->dst_ip[0];
+ ip->protocol = 0;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
+ fd_data->pctype = i;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir - Build raw packets to add/del fdir filter
+ * @vsi: pointer to the targeted VSI
+ * @cmd: command to get or set RX flow classification rules
+ * @add: true adds a filter, false removes it
+ *
+ **/
+int i40e_add_del_fdir(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 *raw_packet;
+ int ret;
+
+ /* Populate the Flow Director that we have at the moment
+ * and allocate the raw packet buffer for the calling functions
+ */
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+ if (!raw_packet)
+ return -ENOMEM;
+
+ switch (input->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet,
+ add);
+ break;
+ case UDP_V4_FLOW:
+ ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet,
+ add);
+ break;
+ case SCTP_V4_FLOW:
+ ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet,
+ add);
+ break;
+ case IPV4_FLOW:
+ ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet,
+ add);
+ break;
+ case IP_USER_FLOW:
+ switch (input->ip4_proto) {
+ case IPPROTO_TCP:
+ ret = i40e_add_del_fdir_tcpv4(vsi, input,
+ raw_packet, add);
+ break;
+ case IPPROTO_UDP:
+ ret = i40e_add_del_fdir_udpv4(vsi, input,
+ raw_packet, add);
+ break;
+ case IPPROTO_SCTP:
+ ret = i40e_add_del_fdir_sctpv4(vsi, input,
+ raw_packet, add);
+ break;
+ default:
+ ret = i40e_add_del_fdir_ipv4(vsi, input,
+ raw_packet, add);
+ break;
+ }
+ break;
+ default:
+ dev_info(&pf->pdev->dev, "Could not specify spec type %d",
+ input->flow_type);
+ ret = -EINVAL;
+ }
+
+ kfree(raw_packet);
+ return ret;
+}
+
/**
* i40e_fd_handle_status - check the Programming Status for FD
* @rx_ring: the Rx ring for this descriptor
- * @qw: the descriptor data
+ * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
* @prog_id: the id originally used for programming
*
* This is used to verify if the FD programming or invalidation
* requested by SW to the HW is successful or not and take actions accordingly.
**/
-static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
+static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, u8 prog_id)
{
- struct pci_dev *pdev = rx_ring->vsi->back->pdev;
+ struct i40e_pf *pf = rx_ring->vsi->back;
+ struct pci_dev *pdev = pf->pdev;
+ u32 fcnt_prog, fcnt_avail;
u32 error;
+ u64 qw;
+ qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
- /* for now just print the Status */
- dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n",
- prog_id, error);
+ if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+ dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
+ rx_desc->wb.qword0.hi_dword.fd_id);
+
+ /* filter programming failed most likely due to table full */
+ fcnt_prog = i40e_get_current_fd_count(pf);
+ fcnt_avail = pf->hw.fdir_shared_filter_count +
+ pf->fdir_pf_filter_count;
+
+ /* If ATR is running fcnt_prog can quickly change,
+ * if we are very close to full, it makes sense to disable
+ * FD ATR/SB and then re-enable it when there is room.
+ */
+ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
+ /* Turn off ATR first */
+ if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) {
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
+ pf->auto_disable_flags |=
+ I40E_FLAG_FD_ATR_ENABLED;
+ pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
+ } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) {
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+ pf->auto_disable_flags |=
+ I40E_FLAG_FD_SB_ENABLED;
+ pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
+ }
+ } else {
+ dev_info(&pdev->dev, "FD filter programming error");
+ }
+ } else if (error ==
+ (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n",
+ rx_desc->wb.qword0.hi_dword.fd_id);
+ }
}
/**
@@ -315,6 +620,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: tx ring to clean
* @budget: how many cleans we're allowed
@@ -325,6 +644,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
unsigned int total_packets = 0;
unsigned int total_bytes = 0;
@@ -333,6 +653,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_desc = I40E_TX_DESC(tx_ring, i);
i -= tx_ring->count;
+ tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
+
do {
struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
@@ -343,9 +665,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
- /* if the descriptor isn't done, no work yet to do */
- if (!(eop_desc->cmd_type_offset_bsz &
- cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
break;
/* clear next_to_watch to prevent false hangs */
@@ -577,7 +898,7 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
- i40e_fd_handle_status(rx_ring, qw, id);
+ i40e_fd_handle_status(rx_ring, rx_desc, id);
}
/**
@@ -601,6 +922,10 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ /* add u32 for head writeback, align after this takes care of
+ * guaranteeing this is at least one cache line in size
+ */
+ tx_ring->size += sizeof(u32);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
@@ -892,7 +1217,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* likely incorrect csum if alternate IP extention headers found */
+ /* likely incorrect csum if alternate IP extension headers found */
if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
return;
@@ -956,6 +1281,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring,
}
/**
+ * i40e_ptype_to_hash - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
+{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+ if (!decoded.known)
+ return PKT_HASH_TYPE_NONE;
+
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+ return PKT_HASH_TYPE_L4;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+ return PKT_HASH_TYPE_L3;
+ else
+ return PKT_HASH_TYPE_L2;
+}
+
+/**
* i40e_clean_rx_irq - Reclaim resources after receive completes
* @rx_ring: rx ring to clean
* @budget: how many cleans we're allowed
@@ -972,8 +1320,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
+ u8 rx_ptype;
u64 qword;
- u16 rx_ptype;
+
+ if (budget <= 0)
+ return 0;
rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
@@ -1087,7 +1438,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
goto next_desc;
}
- skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+ skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+ i40e_ptype_to_hash(rx_ptype));
if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
@@ -1246,8 +1598,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!tx_ring->atr_sample_rate)
return;
- tx_ring->atr_count++;
-
/* snag network header to get L4 type and address */
hdr.network = skb_network_header(skb);
@@ -1269,8 +1619,17 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
th = (struct tcphdr *)(hdr.network + hlen);
- /* sample on all syn/fin packets or once every atr sample rate */
- if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
+ /* Due to lack of space, no more new filters can be programmed */
+ if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ return;
+
+ tx_ring->atr_count++;
+
+ /* sample on all syn/fin/rst packets or once every atr sample rate */
+ if (!th->fin &&
+ !th->syn &&
+ !th->rst &&
+ (tx_ring->atr_count < tx_ring->atr_sample_rate))
return;
tx_ring->atr_count = 0;
@@ -1294,7 +1653,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
- dtype_cmd |= th->fin ?
+ dtype_cmd |= (th->fin || th->rst) ?
(I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
(I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
@@ -1596,7 +1955,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
struct i40e_tx_context_desc *context_desc;
int i = tx_ring->next_to_use;
- if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+ if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
+ !cd_tunneling && !cd_l2tag2)
return;
/* grab the next descriptor */
@@ -1707,9 +2067,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+ /* Place RS bit on last descriptor of any packet that spans across the
+ * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
+ */
+#define WB_STRIDE 0x3
+ if (((i & WB_STRIDE) != WB_STRIDE) &&
+ (first <= &tx_ring->tx_bi[i]) &&
+ (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ } else {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ }
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
@@ -1812,7 +2186,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
- * + 2 desc gap to keep tail from touching head,
+ * + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor,
* otherwise try next time
*/
@@ -1823,7 +2197,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count += skb_shinfo(skb)->nr_frags;
#endif
count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 181a825d3160..71a968fe557f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -91,6 +91,7 @@ enum i40e_debug_mask {
I40E_DEBUG_FLOW = 0x00000200,
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -458,6 +459,10 @@ union i40e_32byte_rx_desc {
union {
__le32 rss; /* RSS Hash */
__le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
} hi_dword;
} qword0;
struct {
@@ -698,7 +703,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
enum i40e_rx_prog_status_desc_error_bits {
/* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
- I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
};
@@ -1010,6 +1015,11 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
+ /* EEE LPI */
+ bool tx_lpi_status;
+ bool rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
};
/* Checksum and Shadow RAM pointers */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index b9d1c1c8ca5a..02c11a7f7d29 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -69,7 +69,7 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
{
struct i40e_pf *pf = vf->pf;
- return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
+ return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
}
/***********************vf resource mgmt routines*****************/
@@ -126,8 +126,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
else
reg_idx = I40E_VPINT_LNKLSTN(
- (pf->hw.func_caps.num_msix_vectors_vf
- * vf->vf_id) + (vector_id - 1));
+ ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
+ (vector_id - 1));
if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
/* Special case - No queues mapped on this vector */
@@ -230,6 +230,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
tx_ctx.qlen = info->ring_len;
tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
+ tx_ctx.head_wb_ena = 1;
+ tx_ctx.head_wb_addr = info->dma_ring_addr +
+ (info->ring_len * sizeof(struct i40e_tx_desc));
/* clear the context in the HMC */
ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -408,18 +411,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
"Could not allocate VF broadcast filter\n");
}
- if (!f) {
- dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
- ret = -ENOMEM;
- goto error_alloc_vsi_res;
- }
-
/* program mac filter */
ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
+ if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
- goto error_alloc_vsi_res;
- }
error_alloc_vsi_res:
return ret;
@@ -514,7 +509,8 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
vf->lan_vsi_index = 0;
vf->lan_vsi_id = 0;
}
- msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+
/* disable interrupts so the VF starts in a known state */
for (i = 0; i < msix_vf; i++) {
/* format is same for both registers */
@@ -679,9 +675,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
complete_reset:
/* reallocate vf resources to reset the VSI state */
i40e_free_vf_res(vf);
- mdelay(10);
i40e_alloc_vf_res(vf);
i40e_enable_vf_mappings(vf);
+ set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -847,7 +843,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
*
* allocate vf resources
**/
-static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
{
struct i40e_vf *vfs;
int i, ret = 0;
@@ -855,16 +851,18 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* Disable interrupt 0 so we don't try to handle the VFLR. */
i40e_irq_dynamic_disable_icr0(pf);
- ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "pci_enable_sriov failed with error %d!\n", ret);
- pf->num_alloc_vfs = 0;
- goto err_iov;
+ /* Check to see if we're just allocating resources for extant VFs */
+ if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to enable SR-IOV, error %d.\n", ret);
+ pf->num_alloc_vfs = 0;
+ goto err_iov;
+ }
}
-
/* allocate memory */
- vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
+ vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
if (!vfs) {
ret = -ENOMEM;
goto err_alloc;
@@ -1776,7 +1774,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
- int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+ unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
struct i40e_vf *vf;
int ret;
@@ -1873,7 +1871,8 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* clear the bit in GLGEN_VFLRSTAT */
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
- i40e_reset_vf(vf, true);
+ if (!test_bit(__I40E_DOWN, &pf->state))
+ i40e_reset_vf(vf, true);
}
}
@@ -1924,15 +1923,28 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
void i40e_vc_notify_link_state(struct i40e_pf *pf)
{
struct i40e_virtchnl_pf_event pfe;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf = pf->vf;
+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ int i;
pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
- pfe.event_data.link_event.link_status =
- pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
- pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
-
- i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
- (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (vf->link_forced) {
+ pfe.event_data.link_event.link_status = vf->link_up;
+ pfe.event_data.link_event.link_speed =
+ (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+ } else {
+ pfe.event_data.link_event.link_status =
+ ls->link_info & I40E_AQ_LINK_UP;
+ pfe.event_data.link_event.link_speed = ls->link_speed;
+ }
+ i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe, sizeof(pfe),
+ NULL);
+ vf++;
+ }
}
/**
@@ -2197,3 +2209,64 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
error_param:
return ret;
}
+
+/**
+ * i40e_ndo_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @link: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_virtchnl_pf_event pfe;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ vf = &pf->vf[vf_id];
+
+ pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+
+ switch (link) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
+ pfe.event_data.link_event.link_status =
+ pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
+ pfe.event_data.link_event.link_speed =
+ pf->hw.phy.link_info.link_speed;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
+ pfe.event_data.link_event.link_status = true;
+ pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ pfe.event_data.link_event.link_status = false;
+ pfe.event_data.link_event.link_speed = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error_out;
+ }
+ /* Notify the VF of its new link state */
+ i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe, sizeof(pfe), NULL);
+
+error_out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index cc1feee36e12..389c47f396d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,10 +98,13 @@ struct i40e_vf {
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
+ bool link_forced;
+ bool link_up; /* only valid if vf link is forced */
};
void i40e_free_vfs(struct i40e_pf *pf);
int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
@@ -115,6 +118,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index f7cea1bca38d..97662b6bd98a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1229,7 +1229,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
- __le32 tenant_id ;
+ __le32 tenant_id;
u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7b13953b28c4..ae084378faab 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -160,6 +160,372 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
}
+/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40evf_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ I40E_RX_PTYPE_##OUTER_FRAG, \
+ I40E_RX_PTYPE_TUNNEL_##T, \
+ I40E_RX_PTYPE_TUNNEL_END_##TE, \
+ I40E_RX_PTYPE_##TEF, \
+ I40E_RX_PTYPE_INNER_PROT_##I, \
+ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
+ /* L2 Packet types */
+ I40E_PTT_UNUSED_ENTRY(0),
+ I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(4),
+ I40E_PTT_UNUSED_ENTRY(5),
+ I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(8),
+ I40E_PTT_UNUSED_ENTRY(9),
+ I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(25),
+ I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(32),
+ I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(39),
+ I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(47),
+ I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(54),
+ I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(62),
+ I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(69),
+ I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(77),
+ I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(84),
+ I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT_UNUSED_ENTRY(91),
+ I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(98),
+ I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(105),
+ I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(113),
+ I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(120),
+ I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(128),
+ I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(135),
+ I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(143),
+ I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(150),
+ I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ I40E_PTT_UNUSED_ENTRY(154),
+ I40E_PTT_UNUSED_ENTRY(155),
+ I40E_PTT_UNUSED_ENTRY(156),
+ I40E_PTT_UNUSED_ENTRY(157),
+ I40E_PTT_UNUSED_ENTRY(158),
+ I40E_PTT_UNUSED_ENTRY(159),
+
+ I40E_PTT_UNUSED_ENTRY(160),
+ I40E_PTT_UNUSED_ENTRY(161),
+ I40E_PTT_UNUSED_ENTRY(162),
+ I40E_PTT_UNUSED_ENTRY(163),
+ I40E_PTT_UNUSED_ENTRY(164),
+ I40E_PTT_UNUSED_ENTRY(165),
+ I40E_PTT_UNUSED_ENTRY(166),
+ I40E_PTT_UNUSED_ENTRY(167),
+ I40E_PTT_UNUSED_ENTRY(168),
+ I40E_PTT_UNUSED_ENTRY(169),
+
+ I40E_PTT_UNUSED_ENTRY(170),
+ I40E_PTT_UNUSED_ENTRY(171),
+ I40E_PTT_UNUSED_ENTRY(172),
+ I40E_PTT_UNUSED_ENTRY(173),
+ I40E_PTT_UNUSED_ENTRY(174),
+ I40E_PTT_UNUSED_ENTRY(175),
+ I40E_PTT_UNUSED_ENTRY(176),
+ I40E_PTT_UNUSED_ENTRY(177),
+ I40E_PTT_UNUSED_ENTRY(178),
+ I40E_PTT_UNUSED_ENTRY(179),
+
+ I40E_PTT_UNUSED_ENTRY(180),
+ I40E_PTT_UNUSED_ENTRY(181),
+ I40E_PTT_UNUSED_ENTRY(182),
+ I40E_PTT_UNUSED_ENTRY(183),
+ I40E_PTT_UNUSED_ENTRY(184),
+ I40E_PTT_UNUSED_ENTRY(185),
+ I40E_PTT_UNUSED_ENTRY(186),
+ I40E_PTT_UNUSED_ENTRY(187),
+ I40E_PTT_UNUSED_ENTRY(188),
+ I40E_PTT_UNUSED_ENTRY(189),
+
+ I40E_PTT_UNUSED_ENTRY(190),
+ I40E_PTT_UNUSED_ENTRY(191),
+ I40E_PTT_UNUSED_ENTRY(192),
+ I40E_PTT_UNUSED_ENTRY(193),
+ I40E_PTT_UNUSED_ENTRY(194),
+ I40E_PTT_UNUSED_ENTRY(195),
+ I40E_PTT_UNUSED_ENTRY(196),
+ I40E_PTT_UNUSED_ENTRY(197),
+ I40E_PTT_UNUSED_ENTRY(198),
+ I40E_PTT_UNUSED_ENTRY(199),
+
+ I40E_PTT_UNUSED_ENTRY(200),
+ I40E_PTT_UNUSED_ENTRY(201),
+ I40E_PTT_UNUSED_ENTRY(202),
+ I40E_PTT_UNUSED_ENTRY(203),
+ I40E_PTT_UNUSED_ENTRY(204),
+ I40E_PTT_UNUSED_ENTRY(205),
+ I40E_PTT_UNUSED_ENTRY(206),
+ I40E_PTT_UNUSED_ENTRY(207),
+ I40E_PTT_UNUSED_ENTRY(208),
+ I40E_PTT_UNUSED_ENTRY(209),
+
+ I40E_PTT_UNUSED_ENTRY(210),
+ I40E_PTT_UNUSED_ENTRY(211),
+ I40E_PTT_UNUSED_ENTRY(212),
+ I40E_PTT_UNUSED_ENTRY(213),
+ I40E_PTT_UNUSED_ENTRY(214),
+ I40E_PTT_UNUSED_ENTRY(215),
+ I40E_PTT_UNUSED_ENTRY(216),
+ I40E_PTT_UNUSED_ENTRY(217),
+ I40E_PTT_UNUSED_ENTRY(218),
+ I40E_PTT_UNUSED_ENTRY(219),
+
+ I40E_PTT_UNUSED_ENTRY(220),
+ I40E_PTT_UNUSED_ENTRY(221),
+ I40E_PTT_UNUSED_ENTRY(222),
+ I40E_PTT_UNUSED_ENTRY(223),
+ I40E_PTT_UNUSED_ENTRY(224),
+ I40E_PTT_UNUSED_ENTRY(225),
+ I40E_PTT_UNUSED_ENTRY(226),
+ I40E_PTT_UNUSED_ENTRY(227),
+ I40E_PTT_UNUSED_ENTRY(228),
+ I40E_PTT_UNUSED_ENTRY(229),
+
+ I40E_PTT_UNUSED_ENTRY(230),
+ I40E_PTT_UNUSED_ENTRY(231),
+ I40E_PTT_UNUSED_ENTRY(232),
+ I40E_PTT_UNUSED_ENTRY(233),
+ I40E_PTT_UNUSED_ENTRY(234),
+ I40E_PTT_UNUSED_ENTRY(235),
+ I40E_PTT_UNUSED_ENTRY(236),
+ I40E_PTT_UNUSED_ENTRY(237),
+ I40E_PTT_UNUSED_ENTRY(238),
+ I40E_PTT_UNUSED_ENTRY(239),
+
+ I40E_PTT_UNUSED_ENTRY(240),
+ I40E_PTT_UNUSED_ENTRY(241),
+ I40E_PTT_UNUSED_ENTRY(242),
+ I40E_PTT_UNUSED_ENTRY(243),
+ I40E_PTT_UNUSED_ENTRY(244),
+ I40E_PTT_UNUSED_ENTRY(245),
+ I40E_PTT_UNUSED_ENTRY(246),
+ I40E_PTT_UNUSED_ENTRY(247),
+ I40E_PTT_UNUSED_ENTRY(248),
+ I40E_PTT_UNUSED_ENTRY(249),
+
+ I40E_PTT_UNUSED_ENTRY(250),
+ I40E_PTT_UNUSED_ENTRY(251),
+ I40E_PTT_UNUSED_ENTRY(252),
+ I40E_PTT_UNUSED_ENTRY(253),
+ I40E_PTT_UNUSED_ENTRY(254),
+ I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
@@ -199,8 +565,7 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
details.async = true;
cmd_details = &details;
}
- status = i40evf_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
- msglen, cmd_details);
+ status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details);
return status;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 7841573a58c9..97ab8c2b76f8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -63,6 +63,13 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
+
+static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return i40evf_ptype_lookup[ptype];
+}
+
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ffdb01d853db..53be5f44d015 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -24,6 +24,7 @@
#include <linux/prefetch.h>
#include "i40evf.h"
+#include "i40e_prototype.h"
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
@@ -169,6 +170,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: tx ring to clean
* @budget: how many cleans we're allowed
@@ -179,6 +194,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
unsigned int total_packets = 0;
unsigned int total_bytes = 0;
@@ -187,6 +203,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_desc = I40E_TX_DESC(tx_ring, i);
i -= tx_ring->count;
+ tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
+
do {
struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
@@ -197,9 +215,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
- /* if the descriptor isn't done, no work yet to do */
- if (!(eop_desc->cmd_type_offset_bsz &
- cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
break;
/* clear next_to_watch to prevent false hangs */
@@ -431,6 +448,10 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ /* add u32 for head writeback, align after this takes care of
+ * guaranteeing this is at least one cache line in size
+ */
+ tx_ring->size += sizeof(u32);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
@@ -722,7 +743,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* likely incorrect csum if alternate IP extention headers found */
+ /* likely incorrect csum if alternate IP extension headers found */
if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
return;
@@ -786,6 +807,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring,
}
/**
+ * i40e_ptype_to_hash - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
+{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+ if (!decoded.known)
+ return PKT_HASH_TYPE_NONE;
+
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+ return PKT_HASH_TYPE_L4;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+ return PKT_HASH_TYPE_L3;
+ else
+ return PKT_HASH_TYPE_L2;
+}
+
+/**
* i40e_clean_rx_irq - Reclaim resources after receive completes
* @rx_ring: rx ring to clean
* @budget: how many cleans we're allowed
@@ -802,13 +846,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
+ u8 rx_ptype;
u64 qword;
- u16 rx_ptype;
rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
union i40e_rx_desc *next_rxd;
@@ -912,7 +956,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
goto next_desc;
}
- skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+ skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+ i40e_ptype_to_hash(rx_ptype));
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
@@ -1241,7 +1286,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
struct i40e_tx_context_desc *context_desc;
int i = tx_ring->next_to_use;
- if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+ if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
+ !cd_tunneling && !cd_l2tag2)
return;
/* grab the next descriptor */
@@ -1352,9 +1398,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+ /* Place RS bit on last descriptor of any packet that spans across the
+ * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
+ */
+#define WB_STRIDE 0x3
+ if (((i & WB_STRIDE) != WB_STRIDE) &&
+ (first <= &tx_ring->tx_bi[i]) &&
+ (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ } else {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ }
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
@@ -1457,7 +1517,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
- * + 2 desc gap to keep tail from touching head,
+ * + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor,
* otherwise try next time
*/
@@ -1468,7 +1528,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count += skb_shinfo(skb)->nr_frags;
#endif
count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 3bffac06592f..4673b3381edd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -64,8 +64,6 @@
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define ETH_ALEN 6
-
/* Data type manipulation macros. */
#define I40E_DESC_UNUSED(R) \
@@ -90,6 +88,7 @@ enum i40e_debug_mask {
I40E_DEBUG_FLOW = 0x00000200,
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -466,6 +465,10 @@ union i40e_32byte_rx_desc {
union {
__le32 rss; /* RSS Hash */
__le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
} hi_dword;
} qword0;
struct {
@@ -706,7 +709,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
enum i40e_rx_prog_status_desc_error_bits {
/* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
- I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
};
@@ -1018,6 +1021,11 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
+ /* EEE LPI */
+ bool tx_lpi_status;
+ bool rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
};
/* Checksum and Shadow RAM pointers */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index ff6529b288a1..807807d62387 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,6 @@
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include <net/udp.h>
-#include <linux/sctp.h>
-
#include "i40e_type.h"
#include "i40e_virtchnl.h"
@@ -164,15 +162,14 @@ struct i40evf_vlan_filter {
/* Driver state. The order of these is important! */
enum i40evf_state_t {
__I40EVF_STARTUP, /* driver loaded, probe complete */
- __I40EVF_FAILED, /* PF communication failed. Fatal. */
__I40EVF_REMOVE, /* driver is being unloaded */
__I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
__I40EVF_INIT_SW, /* got resources, setting up structs */
+ __I40EVF_RESETTING, /* in reset */
/* Below here, watchdog is running */
__I40EVF_DOWN, /* ready, can be opened */
__I40EVF_TESTING, /* in ethtool self-test */
- __I40EVF_RESETTING, /* in reset */
__I40EVF_RUNNING, /* opened, working */
};
@@ -185,47 +182,25 @@ enum i40evf_critical_section_t {
/* board specific private data structure */
struct i40evf_adapter {
struct timer_list watchdog_timer;
- struct vlan_group *vlgrp;
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work init_task;
struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
struct list_head vlan_filter_list;
- char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
-
- /* Interrupt Throttle Rate */
- u32 itr_setting;
- u16 eitr_low;
- u16 eitr_high;
+ char misc_vector_name[IFNAMSIZ + 9];
/* TX */
struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
- u64 restart_queue;
- u64 hw_csum_tx_good;
- u64 lsc_int;
- u64 hw_tso_ctxt;
- u64 hw_tso6_ctxt;
u32 tx_timeout_count;
struct list_head mac_filter_list;
-#ifdef DEBUG
- bool detect_tx_hung;
-#endif /* DEBUG */
/* RX */
struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
- int txd_count;
- int rxd_count;
u64 hw_csum_rx_error;
- u64 hw_rx_no_dma_resources;
- u64 hw_csum_rx_good;
- u64 non_eop_descs;
int num_msix_vectors;
struct msix_entry *msix_entries;
- u64 rx_hdr_split;
-
- u32 init_state;
- volatile unsigned long flags;
+ u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
@@ -234,6 +209,9 @@ struct i40evf_adapter {
#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8)
+#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9)
+#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10)
/* duplcates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
@@ -251,21 +229,19 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
- /* structs defined in i40e_vf.h */
- struct i40e_hw hw;
+ struct i40e_hw hw; /* defined in i40e_type.h */
enum i40evf_state_t state;
volatile unsigned long crit_section;
- u64 tx_busy;
struct work_struct watchdog_task;
bool netdev_registered;
- bool dev_closed;
bool link_up;
enum i40e_virtchnl_ops current_op;
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -276,11 +252,6 @@ struct i40evf_adapter {
u32 aq_wait_count;
};
-struct i40evf_info {
- enum i40e_mac_type mac;
- unsigned int flags;
-};
-
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
@@ -315,6 +286,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter);
void i40evf_del_vlans(struct i40evf_adapter *adapter);
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
void i40evf_request_stats(struct i40evf_adapter *adapter);
+void i40evf_request_reset(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index b0b1f4bf5ac0..8b0db1ce179c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -241,6 +241,7 @@ static int i40evf_set_ringparam(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
+ int i;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
@@ -256,12 +257,14 @@ static int i40evf_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
- if ((new_tx_count == adapter->txd_count) &&
- (new_rx_count == adapter->rxd_count))
+ if ((new_tx_count == adapter->tx_rings[0]->count) &&
+ (new_rx_count == adapter->rx_rings[0]->count))
return 0;
- adapter->txd_count = new_tx_count;
- adapter->rxd_count = new_rx_count;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->tx_rings[0]->count = new_tx_count;
+ adapter->rx_rings[0]->count = new_rx_count;
+ }
if (netif_running(netdev))
i40evf_reinit_locked(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f5caf4419243..e35e66ffa782 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -31,10 +31,10 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver";
-#define DRV_VERSION "0.9.11"
+#define DRV_VERSION "0.9.16"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
- "Copyright (c) 2013 Intel Corporation.";
+ "Copyright (c) 2013 - 2014 Intel Corporation.";
/* i40evf_pci_tbl - PCI Device ID Table
*
@@ -167,9 +167,11 @@ static void i40evf_tx_timeout(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
-
- /* Do the reset outside of interrupt context */
- schedule_work(&adapter->reset_task);
+ dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+ adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+ schedule_work(&adapter->reset_task);
+ }
}
/**
@@ -211,6 +213,9 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter)
int i;
struct i40e_hw *hw = &adapter->hw;
+ if (!adapter->msix_entries)
+ return;
+
for (i = 1; i < adapter->num_msix_vectors; i++) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
synchronize_irq(adapter->msix_entries[i].vector);
@@ -511,12 +516,14 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int err;
- sprintf(adapter->name[0], "i40evf:mbx");
+ sprintf(adapter->misc_vector_name, "i40evf:mbx");
err = request_irq(adapter->msix_entries[0].vector,
- &i40evf_msix_aq, 0, adapter->name[0], netdev);
+ &i40evf_msix_aq, 0,
+ adapter->misc_vector_name, netdev);
if (err) {
dev_err(&adapter->pdev->dev,
- "request_irq for msix_aq failed: %d\n", err);
+ "request_irq for %s failed: %d\n",
+ adapter->misc_vector_name, err);
free_irq(adapter->msix_entries[0].vector, netdev);
}
return err;
@@ -963,16 +970,23 @@ void i40evf_down(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct i40evf_mac_filter *f;
- /* remove all MAC filters from the VSI */
+ /* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
}
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
- /* disable receives */
- adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
- mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
- msleep(20);
-
+ /* remove all VLAN filters */
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ f->remove = true;
+ }
+ if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
+ adapter->state != __I40EVF_RESETTING) {
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ /* disable receives */
+ adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ msleep(20);
+ }
netif_tx_disable(netdev);
netif_tx_stop_all_queues(netdev);
@@ -1124,8 +1138,8 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
* than CPU's. So let's be conservative and only ask for
* (roughly) twice the number of vectors as there are CPU's.
*/
- v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
- v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1);
+ v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
+ v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter.
@@ -1291,19 +1305,47 @@ static void i40evf_watchdog_task(struct work_struct *work)
watchdog_task);
struct i40e_hw *hw = &adapter->hw;
- if (adapter->state < __I40EVF_DOWN)
+ if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ goto restart_watchdog;
+
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+ dev_info(&adapter->pdev->dev, "Checking for redemption\n");
+ if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
+ /* A chance for redemption! */
+ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
+ adapter->state = __I40EVF_STARTUP;
+ adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+ schedule_delayed_work(&adapter->init_task, 10);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section);
+ /* Don't reschedule the watchdog, since we've restarted
+ * the init task. When init_task contacts the PF and
+ * gets everything set up again, it'll restart the
+ * watchdog for us. Down, boy. Sit. Stay. Woof.
+ */
+ return;
+ }
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
+ }
- if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ if ((adapter->state < __I40EVF_DOWN) ||
+ (adapter->flags & I40EVF_FLAG_RESET_PENDING))
goto watchdog_done;
- /* check for unannounced reset */
- if ((adapter->state != __I40EVF_RESETTING) &&
+ /* check for reset */
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
(rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
adapter->state = __I40EVF_RESETTING;
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ dev_err(&adapter->pdev->dev, "Hardware reset detected.\n");
+ dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
schedule_work(&adapter->reset_task);
- dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n",
- __func__);
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
}
@@ -1358,16 +1400,25 @@ static void i40evf_watchdog_task(struct work_struct *work)
i40evf_irq_enable(adapter, true);
i40evf_fire_sw_int(adapter, 0xFF);
+
watchdog_done:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+restart_watchdog:
if (adapter->aq_required)
mod_timer(&adapter->watchdog_timer,
jiffies + msecs_to_jiffies(20));
else
mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
schedule_work(&adapter->adminq_task);
}
+static int next_queue(struct i40evf_adapter *adapter, int j)
+{
+ j += 1;
+
+ return j >= adapter->vsi_res->num_queue_pairs ? 0 : j;
+}
+
/**
* i40evf_configure_rss - Prepare for RSS if used
* @adapter: board private structure
@@ -1398,19 +1449,19 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
/* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
- if (j == adapter->vsi_res->num_queue_pairs)
- j = 0;
- /* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (j &
- ((0x1 << 8) - 1));
- /* On i = 3, we have 4 entries in lut; write to the register */
- if ((i & 3) == 3)
- wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ j = adapter->vsi_res->num_queue_pairs;
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ lut = next_queue(adapter, j);
+ lut |= next_queue(adapter, j) << 8;
+ lut |= next_queue(adapter, j) << 16;
+ lut |= next_queue(adapter, j) << 24;
+ wr32(hw, I40E_VFQF_HLUT(i), lut);
}
i40e_flush(hw);
}
+#define I40EVF_RESET_WAIT_MS 100
+#define I40EVF_RESET_WAIT_COUNT 200
/**
* i40evf_reset_task - Call-back task to handle hardware reset
* @work: pointer to work_struct
@@ -1421,8 +1472,9 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
**/
static void i40evf_reset_task(struct work_struct *work)
{
- struct i40evf_adapter *adapter =
- container_of(work, struct i40evf_adapter, reset_task);
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ reset_task);
struct i40e_hw *hw = &adapter->hw;
int i = 0, err;
uint32_t rstat_val;
@@ -1431,21 +1483,61 @@ static void i40evf_reset_task(struct work_struct *work)
&adapter->crit_section))
udelay(500);
- /* wait until the reset is complete */
- for (i = 0; i < 20; i++) {
+ if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
+ dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
+ i40evf_request_reset(adapter);
+ }
+
+ /* poll until we see the reset actually happen */
+ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (rstat_val == I40E_VFR_COMPLETED)
+ if (rstat_val != I40E_VFR_VFACTIVE) {
+ dev_info(&adapter->pdev->dev, "Reset now occurring\n");
break;
- else
- mdelay(100);
+ } else {
+ msleep(I40EVF_RESET_WAIT_MS);
+ }
+ }
+ if (i == I40EVF_RESET_WAIT_COUNT) {
+ dev_err(&adapter->pdev->dev, "Reset was not detected\n");
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+ goto continue_reset; /* act like the reset happened */
+ }
+
+ /* wait until the reset is complete and the PF is responding to us */
+ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (rstat_val == I40E_VFR_VFACTIVE) {
+ dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
+ break;
+ } else {
+ msleep(I40EVF_RESET_WAIT_MS);
+ }
}
- if (i == 20) {
+ if (i == I40EVF_RESET_WAIT_COUNT) {
/* reset never finished */
- dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n",
- __func__, rstat_val);
- /* carry on anyway */
+ dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n",
+ rstat_val);
+ adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+
+ if (netif_running(adapter->netdev))
+ i40evf_close(adapter->netdev);
+
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+ i40evf_free_queues(adapter);
+ kfree(adapter->vf_res);
+ i40evf_shutdown_adminq(hw);
+ adapter->netdev->flags &= ~IFF_UP;
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return; /* Do not attempt to reinit. It's dead, Jim. */
}
+
+continue_reset:
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+
i40evf_down(adapter);
adapter->state = __I40EVF_RESETTING;
@@ -1505,6 +1597,9 @@ static void i40evf_adminq_task(struct work_struct *work)
i40e_status ret;
u16 pending;
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+ return;
+
event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
if (!event.msg_buf) {
@@ -1636,6 +1731,10 @@ static int i40evf_open(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
int err;
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+ dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
+ return -EIO;
+ }
if (adapter->state != __I40EVF_DOWN)
return -EBUSY;
@@ -1690,8 +1789,12 @@ static int i40evf_close(struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+ if (adapter->state <= __I40EVF_DOWN)
+ return 0;
+
/* signal that we are down to the interrupt handler */
adapter->state = __I40EVF_DOWN;
+
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_down(adapter);
@@ -1842,16 +1945,18 @@ static void i40evf_init_task(struct work_struct *work)
switch (adapter->state) {
case __I40EVF_STARTUP:
/* driver loaded, probe complete */
+ adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
err = i40e_set_mac_type(hw);
if (err) {
- dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n",
- __func__, err);
+ dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
+ err);
goto err;
}
err = i40evf_check_reset_complete(hw);
if (err) {
- dev_info(&pdev->dev, "%s: device is still in reset (%d).\n",
- __func__, err);
+ dev_err(&pdev->dev, "Device is still in reset (%d)\n",
+ err);
goto err;
}
hw->aq.num_arq_entries = I40EVF_AQ_LEN;
@@ -1861,14 +1966,13 @@ static void i40evf_init_task(struct work_struct *work)
err = i40evf_init_adminq(hw);
if (err) {
- dev_info(&pdev->dev, "%s: init_adminq failed: %d\n",
- __func__, err);
+ dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
+ err);
goto err;
}
err = i40evf_send_api_ver(adapter);
if (err) {
- dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n",
- __func__, err);
+ dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
i40evf_shutdown_adminq(hw);
goto err;
}
@@ -1876,19 +1980,21 @@ static void i40evf_init_task(struct work_struct *work)
goto restart;
break;
case __I40EVF_INIT_VERSION_CHECK:
- if (!i40evf_asq_done(hw))
+ if (!i40evf_asq_done(hw)) {
+ dev_err(&pdev->dev, "Admin queue command never completed.\n");
goto err;
+ }
/* aq msg sent, awaiting reply */
err = i40evf_verify_api_ver(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable to verify API version, error %d\n",
+ dev_err(&pdev->dev, "Unable to verify API version (%d)\n",
err);
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable send config request, error %d\n",
+ dev_err(&pdev->dev, "Unable send config request (%d)\n",
err);
goto err;
}
@@ -1902,18 +2008,15 @@ static void i40evf_init_task(struct work_struct *work)
(I40E_MAX_VF_VSI *
sizeof(struct i40e_virtchnl_vsi_resource));
adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
- if (!adapter->vf_res) {
- dev_err(&pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!adapter->vf_res)
goto err;
- }
}
err = i40evf_get_vf_config(adapter);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
goto restart;
if (err) {
- dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n",
- __func__, err);
+ dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
+ err);
goto err_alloc;
}
adapter->state = __I40EVF_INIT_SW;
@@ -1927,25 +2030,23 @@ static void i40evf_init_task(struct work_struct *work)
adapter->vsi_res = &adapter->vf_res->vsi_res[i];
}
if (!adapter->vsi_res) {
- dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__);
+ dev_err(&pdev->dev, "No LAN VSI found\n");
goto err_alloc;
}
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
- adapter->txd_count = I40EVF_DEFAULT_TXD;
- adapter->rxd_count = I40EVF_DEFAULT_RXD;
-
netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
-
- netdev->features |= NETIF_F_SG |
+ netdev->features |= NETIF_F_HIGHDMA |
+ NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_SCTP_CSUM |
NETIF_F_IPV6_CSUM |
NETIF_F_TSO |
NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
NETIF_F_GRO;
if (adapter->vf_res->vf_offload_flags
@@ -1956,11 +2057,13 @@ static void i40evf_init_task(struct work_struct *work)
NETIF_F_HW_VLAN_CTAG_FILTER;
}
- /* The HW MAC address was set and/or determined in sw_init */
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+ netdev->hw_features &= ~NETIF_F_RXCSUM;
+
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
- dev_info(&pdev->dev,
- "Invalid MAC address %pMAC, using random\n",
- adapter->hw.mac.addr);
+ dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n",
+ adapter->hw.mac.addr);
random_ether_addr(adapter->hw.mac.addr);
}
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
@@ -1994,8 +2097,6 @@ static void i40evf_init_task(struct work_struct *work)
netif_carrier_off(netdev);
- strcpy(netdev->name, "eth%d");
-
adapter->vsi.id = adapter->vsi_res->vsi_id;
adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
adapter->vsi.back = adapter;
@@ -2005,9 +2106,11 @@ static void i40evf_init_task(struct work_struct *work)
adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
adapter->vsi.netdev = adapter->netdev;
- err = register_netdev(netdev);
- if (err)
- goto err_register;
+ if (!adapter->netdev_registered) {
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+ }
adapter->netdev_registered = true;
@@ -2031,7 +2134,6 @@ err_register:
i40evf_free_misc_irq(adapter);
err_sw_init:
i40evf_reset_interrupt_capability(adapter);
- adapter->state = __I40EVF_FAILED;
err_alloc:
kfree(adapter->vf_res);
adapter->vf_res = NULL;
@@ -2039,9 +2141,7 @@ err:
/* Things went into the weeds, so try again later */
if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
- if (hw->aq.asq.count)
- i40evf_shutdown_adminq(hw); /* ignore error */
- adapter->state = __I40EVF_FAILED;
+ adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
return; /* do not reschedule */
}
schedule_delayed_work(&adapter->init_task, HZ * 3);
@@ -2084,26 +2184,20 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct i40evf_adapter *adapter = NULL;
struct i40e_hw *hw = NULL;
- int err, pci_using_dac;
+ int err;
err = pci_enable_device(pdev);
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = true;
- /* coherent mask for the same size will always succeed if
- * dma_set_mask does
- */
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- pci_using_dac = false;
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- } else {
- dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
- __func__, err);
- err = -EIO;
- goto err_dma;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
}
err = pci_request_regions(pdev, i40evf_driver_name);
@@ -2128,8 +2222,6 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -2271,6 +2363,7 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40e_hw *hw = &adapter->hw;
cancel_delayed_work_sync(&adapter->init_task);
+ cancel_work_sync(&adapter->reset_task);
if (adapter->netdev_registered) {
unregister_netdev(netdev);
@@ -2278,17 +2371,15 @@ static void i40evf_remove(struct pci_dev *pdev)
}
adapter->state = __I40EVF_REMOVE;
- if (adapter->num_msix_vectors) {
+ if (adapter->msix_entries) {
i40evf_misc_irq_disable(adapter);
- del_timer_sync(&adapter->watchdog_timer);
-
- flush_scheduled_work();
-
i40evf_free_misc_irq(adapter);
-
i40evf_reset_interrupt_capability(adapter);
}
+ del_timer_sync(&adapter->watchdog_timer);
+ flush_scheduled_work();
+
if (hw->aq.asq.count)
i40evf_shutdown_adminq(hw);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e6978d79e62b..e294f012647d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -43,6 +43,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
struct i40e_hw *hw = &adapter->hw;
i40e_status err;
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+ return 0; /* nothing to see here, move along */
+
err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
if (err)
dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
@@ -651,6 +654,18 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
/* if the request failed, don't lock out others */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
+/**
+ * i40evf_request_reset
+ * @adapter: adapter structure
+ *
+ * Request that the PF reset this VF. No response is expected.
+ **/
+void i40evf_request_reset(struct i40evf_adapter *adapter)
+{
+ /* Don't check CURRENT_OP - this is always higher priority */
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
/**
* i40evf_virtchnl_completion
@@ -689,10 +704,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
- adapter->state = __I40EVF_RESETTING;
- schedule_work(&adapter->reset_task);
- dev_info(&adapter->pdev->dev,
- "%s: hardware reset pending\n", __func__);
+ dev_info(&adapter->pdev->dev, "PF reset warning received\n");
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+ schedule_work(&adapter->reset_task);
+ }
break;
default:
dev_err(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index f19700e285bb..5bcb2de75933 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2013 Intel Corporation.
+# Copyright(c) 1999 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
# more details.
#
# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 06df6928f44c..fa36fe12e775 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -77,8 +76,6 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
static const u16 e1000_82580_rxpbs_table[] =
{ 36, 72, 144, 1, 2, 4, 8, 16,
35, 70, 140 };
-#define E1000_82580_RXPBS_TABLE_SIZE \
- (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
/**
* igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -2308,7 +2305,7 @@ u16 igb_rxpbs_adjust_82580(u32 data)
{
u16 ret_val = 0;
- if (data < E1000_82580_RXPBS_TABLE_SIZE)
+ if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
ret_val = e1000_82580_rxpbs_table[data];
return ret_val;
@@ -2714,13 +2711,14 @@ static const u8 e1000_emc_therm_limit[4] = {
E1000_EMC_DIODE3_THERM_LIMIT
};
+#ifdef CONFIG_IGB_HWMON
/**
* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
* @hw: pointer to hardware structure
*
* Updates the temperatures in mac.thermal_sensor_data
**/
-s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
u16 ets_offset;
@@ -2774,7 +2772,7 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
* Sets the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
**/
-s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
u16 ets_offset;
@@ -2836,6 +2834,7 @@ s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
return status;
}
+#endif
static struct e1000_mac_operations e1000_mac_ops_82575 = {
.init_hw = igb_init_hw_82575,
.check_for_link = igb_check_for_link_82575,
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 8c2437722aad..09d78be72416 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -231,6 +230,10 @@ struct e1000_adv_tx_context_desc {
#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */
+#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
#define E1000_VLVF_ARRAY_SIZE 32
#define E1000_VLVF_VLANID_MASK 0x00000FFF
#define E1000_VLVF_POOLSEL_SHIFT 12
@@ -266,8 +269,7 @@ u16 igb_rxpbs_adjust_82580(u32 data);
s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
s32 igb_set_eee_i350(struct e1000_hw *);
s32 igb_set_eee_i354(struct e1000_hw *);
-s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);
-s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
+s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status);
#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
#define E1000_EMC_INTERNAL_DATA 0x00
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 0571b973be80..b05bf925ac72 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -44,7 +43,11 @@
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
/* Extended Device Control */
+#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */
#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
+#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */
+#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
+
/* Physical Func Reset Done Indication */
#define E1000_CTRL_EXT_PFRSTD 0x00004000
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
@@ -191,7 +194,8 @@
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
-#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
+#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
@@ -529,8 +533,67 @@
#define E1000_TIMINCA_16NS_SHIFT 24
-#define E1000_TSICR_TXTS 0x00000002
-#define E1000_TSIM_TXTS 0x00000002
+/* Time Sync Interrupt Cause/Mask Register Bits */
+
+#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */
+#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */
+#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */
+#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */
+
+#define TSYNC_INTERRUPTS TSINTR_TXTS
+#define E1000_TSICR_TXTS TSINTR_TXTS
+
+/* TSAUXC Configuration Bits */
+#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */
+#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */
+#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */
+#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */
+#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */
+#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */
+
+/* SDP Configuration Bits */
+#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */
+#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */
+#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */
+#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */
+#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */
+#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */
+#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */
#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index ab99e2b582a8..10741d170f2d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 0c0393316a3a..db963397cc27 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -35,6 +34,8 @@
#include "e1000_hw.h"
#include "e1000_i210.h"
+static s32 igb_update_flash_i210(struct e1000_hw *hw);
+
/**
* igb_get_hw_semaphore_i210 - Acquire hardware semaphore
* @hw: pointer to the HW structure
@@ -111,7 +112,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -E1000_ERR_NVM (-1).
**/
-s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
+static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
{
return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
@@ -123,7 +124,7 @@ s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
* Stop any current commands to the EEPROM and clear the EEPROM request bit,
* then release the semaphores acquired.
**/
-void igb_release_nvm_i210(struct e1000_hw *hw)
+static void igb_release_nvm_i210(struct e1000_hw *hw)
{
igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
@@ -206,8 +207,8 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
* Reads a 16 bit word from the Shadow Ram using the EERD register.
* Uses necessary synchronization semaphores.
**/
-s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
s32 status = E1000_SUCCESS;
u16 i, count;
@@ -306,8 +307,8 @@ out:
* If error code is returned, data and Shadow RAM may be inconsistent - buffer
* partially written.
**/
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
s32 status = E1000_SUCCESS;
u16 i, count;
@@ -555,7 +556,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
-s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
+static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
@@ -590,7 +591,7 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM. Next commit EEPROM data onto the Flash.
**/
-s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
+static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u16 checksum = 0;
@@ -684,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
* @hw: pointer to the HW structure
*
**/
-s32 igb_update_flash_i210(struct e1000_hw *hw)
+static s32 igb_update_flash_i210(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u32 flup;
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 2d913716573a..907fe99a9813 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -28,17 +27,8 @@
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
-s32 igb_update_flash_i210(struct e1000_hw *hw);
-s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-void igb_release_nvm_i210(struct e1000_hw *hw);
s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
s32 igb_read_invm_version(struct e1000_hw *hw,
struct e1000_fw_version *invm_ver);
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 298f0ed50670..5910a932ea7c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e4cbe8ef67b3..99299ba8ee3a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index dac1447fabf7..d5b121771c31 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index de9bba41acf3..f52f5515e5a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index a7db7f3db914..9abf82919c65 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 433b7419cb98..5b101170b17e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index ad2b74d95138..4009bbab7407 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -394,77 +393,6 @@ s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
}
/**
- * e1000_write_sfp_data_byte - Writes SFP module data.
- * @hw: pointer to the HW structure
- * @offset: byte location offset to write to
- * @data: data to write
- *
- * Writes one byte to SFP module data stored
- * in SFP resided EEPROM memory or SFP diagnostic area.
- * Function should be called with
- * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
- * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
- * access
- **/
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
-{
- u32 i = 0;
- u32 i2ccmd = 0;
- u32 data_local = 0;
-
- if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
- hw_dbg("I2CCMD command address exceeds upper limit\n");
- return -E1000_ERR_PHY;
- }
- /* The programming interface is 16 bits wide
- * so we need to read the whole word first
- * then update appropriate byte lane and write
- * the updated word back.
- */
- /* Set up Op-code, EEPROM Address,in the I2CCMD
- * register. The MAC will take care of interfacing
- * with an EEPROM to write the data given.
- */
- i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_READ);
- /* Set a command to read single word */
- wr32(E1000_I2CCMD, i2ccmd);
- for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
- udelay(50);
- /* Poll the ready bit to see if lastly
- * launched I2C operation completed
- */
- i2ccmd = rd32(E1000_I2CCMD);
- if (i2ccmd & E1000_I2CCMD_READY) {
- /* Check if this is READ or WRITE phase */
- if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
- E1000_I2CCMD_OPCODE_READ) {
- /* Write the selected byte
- * lane and update whole word
- */
- data_local = i2ccmd & 0xFF00;
- data_local |= data;
- i2ccmd = ((offset <<
- E1000_I2CCMD_REG_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_WRITE | data_local);
- wr32(E1000_I2CCMD, i2ccmd);
- } else {
- break;
- }
- }
- }
- if (!(i2ccmd & E1000_I2CCMD_READY)) {
- hw_dbg("I2CCMD Write did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (i2ccmd & E1000_I2CCMD_ERROR) {
- hw_dbg("I2CCMD Error bit set\n");
- return -E1000_ERR_PHY;
- }
- return 0;
-}
-
-/**
* igb_read_phy_reg_igp - Read igp PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 6a0873f2095a..4c2c36c46a73 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -70,7 +69,6 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
s32 igb_get_phy_info_82580(struct e1000_hw *hw);
s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 82632c6c53af..bdb246e848e1 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -41,6 +40,7 @@
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
@@ -102,6 +102,14 @@
#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */
+#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */
+#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */
+#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */
+#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
+#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */
+#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */
#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
@@ -349,16 +357,30 @@
#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
+#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
* Filter - RW */
#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
-#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
-#define rd32(reg) (readl(hw->hw_addr + reg))
+struct e1000_hw;
+
+u32 igb_rd32(struct e1000_hw *hw, u32 reg);
+
+/* write operations, indexed using DWORDS */
+#define wr32(reg, val) \
+do { \
+ u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
+ if (!E1000_REMOVED(hw_addr)) \
+ writel((val), &hw_addr[(reg)]); \
+} while (0)
+
+#define rd32(reg) (igb_rd32(hw, reg))
+
#define wrfl() ((void)rd32(E1000_STATUS))
#define array_wr32(reg, offset, value) \
- (writel(value, hw->hw_addr + reg + ((offset) << 2)))
+ wr32((reg) + ((offset) << 2), (value))
+
#define array_rd32(reg, offset) \
(readl(hw->hw_addr + reg + ((offset) << 2)))
@@ -397,4 +419,6 @@
#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
+#define E1000_REMOVED(h) unlikely(!(h))
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ccf472f073dd..7fbe1e925143 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -42,6 +41,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/pci.h>
+#include <linux/mdio.h>
struct igb_adapter;
@@ -434,6 +434,7 @@ struct igb_adapter {
struct delayed_work ptp_overflow_work;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock;
@@ -456,6 +457,7 @@ struct igb_adapter {
unsigned long link_check_timeout;
int copper_tries;
struct e1000_info ei;
+ u16 eee_advert;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -472,6 +474,7 @@ struct igb_adapter {
#define IGB_FLAG_MAS_CAPABLE (1 << 11)
#define IGB_FLAG_MAS_ENABLE (1 << 12)
#define IGB_FLAG_HAS_MSIX (1 << 13)
+#define IGB_FLAG_EEE (1 << 14)
/* Media Auto Sense */
#define IGB_MAS_ENABLE_0 0X0001
@@ -489,7 +492,8 @@ struct igb_adapter {
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
- __IGB_DOWN
+ __IGB_DOWN,
+ __IGB_PTP_TX_IN_PROGRESS,
};
enum igb_boards {
@@ -525,9 +529,7 @@ void igb_set_fw_version(struct igb_adapter *);
void igb_ptp_init(struct igb_adapter *adapter);
void igb_ptp_stop(struct igb_adapter *adapter);
void igb_ptp_reset(struct igb_adapter *adapter);
-void igb_ptp_tx_work(struct work_struct *work);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
-void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
struct sk_buff *skb);
@@ -545,8 +547,8 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd);
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
#ifdef CONFIG_IGB_HWMON
void igb_sysfs_exit(struct igb_adapter *adapter);
int igb_sysfs_init(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 1df02378de69..e5570acbeea8 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -2274,15 +2273,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
ring = adapter->tx_ring[j];
do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
data[i] = ring->tx_stats.packets;
data[i+1] = ring->tx_stats.bytes;
data[i+2] = ring->tx_stats.restart_queue;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
restart2 = ring->tx_stats.restart_queue2;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
data[i+2] += restart2;
i += IGB_TX_QUEUE_STATS_LEN;
@@ -2290,13 +2289,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
for (j = 0; j < adapter->num_rx_queues; j++) {
ring = adapter->rx_ring[j];
do {
- start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
data[i] = ring->rx_stats.packets;
data[i+1] = ring->rx_stats.bytes;
data[i+2] = ring->rx_stats.drops;
data[i+3] = ring->rx_stats.csum_err;
data[i+4] = ring->rx_stats.alloc_failed;
- } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
i += IGB_RX_QUEUE_STATS_LEN;
}
spin_unlock(&adapter->stats64_lock);
@@ -2354,6 +2353,11 @@ static int igb_get_ts_info(struct net_device *dev,
{
struct igb_adapter *adapter = netdev_priv(dev);
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
switch (adapter->hw.mac.type) {
case e1000_82575:
info->so_timestamping =
@@ -2375,11 +2379,6 @@ static int igb_get_ts_info(struct net_device *dev,
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- if (adapter->ptp_clock)
- info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
-
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -2588,7 +2587,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 ipcnfg, eeer, ret_val;
+ u32 ret_val;
u16 phy_data;
if ((hw->mac.type < e1000_i350) ||
@@ -2597,16 +2596,25 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
edata->supported = (SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full);
+ if (!hw->dev_spec._82575.eee_disable)
+ edata->advertised =
+ mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+
+ /* The IPCNFG and EEER registers are not supported on I354. */
+ if (hw->mac.type == e1000_i354) {
+ igb_get_eee_status_i354(hw, (bool *)&edata->eee_active);
+ } else {
+ u32 eeer;
- ipcnfg = rd32(E1000_IPCNFG);
- eeer = rd32(E1000_EEER);
+ eeer = rd32(E1000_EEER);
- /* EEE status on negotiated link */
- if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
- edata->advertised = ADVERTISED_1000baseT_Full;
+ /* EEE status on negotiated link */
+ if (eeer & E1000_EEER_EEE_NEG)
+ edata->eee_active = true;
- if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
- edata->advertised |= ADVERTISED_100baseT_Full;
+ if (eeer & E1000_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+ }
/* EEE Link Partner Advertised */
switch (hw->mac.type) {
@@ -2617,8 +2625,8 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -ENODATA;
edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
-
break;
+ case e1000_i354:
case e1000_i210:
case e1000_i211:
ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
@@ -2634,12 +2642,10 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
break;
}
- if (eeer & E1000_EEER_EEE_NEG)
- edata->eee_active = true;
-
edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
- if (eeer & E1000_EEER_TX_LPI_EN)
+ if ((hw->mac.type == e1000_i354) &&
+ (edata->eee_enabled))
edata->tx_lpi_enabled = true;
/* Report correct negotiated EEE status for devices that
@@ -2687,9 +2693,10 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- if (eee_curr.advertised != edata->advertised) {
+ if (edata->advertised &
+ ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
dev_err(&adapter->pdev->dev,
- "Setting EEE Advertisement is not supported\n");
+ "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
return -EINVAL;
}
@@ -2699,9 +2706,14 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
- igb_set_eee_i350(hw);
+ adapter->flags |= IGB_FLAG_EEE;
+ if (hw->mac.type == e1000_i350)
+ igb_set_eee_i350(hw);
+ else
+ igb_set_eee_i354(hw);
/* reset link */
if (netif_running(netdev))
@@ -2779,9 +2791,11 @@ static int igb_get_module_eeprom(struct net_device *netdev,
/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
for (i = 0; i < last_word - first_word + 1; i++) {
status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
- if (status != E1000_SUCCESS)
+ if (status != E1000_SUCCESS) {
/* Error occurred while reading module */
+ kfree(dataword);
return -EIO;
+ }
be16_to_cpus(&dataword[i]);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index e0af5bc61613..8333f67acf96 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 46d31a49f5ea..30198185d19a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -70,7 +69,7 @@ char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
static const char igb_copyright[] =
- "Copyright (c) 2007-2013 Intel Corporation.";
+ "Copyright (c) 2007-2014 Intel Corporation.";
static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
@@ -752,6 +751,28 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
}
}
+u32 igb_rd32(struct e1000_hw *hw, u32 reg)
+{
+ struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
+ u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value = 0;
+
+ if (E1000_REMOVED(hw_addr))
+ return ~value;
+
+ value = readl(&hw_addr[reg]);
+
+ /* reads should not return all F's */
+ if (!(~value) && (!reg || !(~readl(hw_addr)))) {
+ struct net_device *netdev = igb->netdev;
+ hw->hw_addr = NULL;
+ netif_device_detach(netdev);
+ netdev_err(netdev, "PCIe link lost, device now detached\n");
+ }
+
+ return value;
+}
+
/**
* igb_write_ivar - configure ivar for given MSI-X vector
* @hw: pointer to the HW structure
@@ -1014,6 +1035,12 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
{
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /* Coming from igb_set_interrupt_capability, the vectors are not yet
+ * allocated. So, q_vector is NULL so we should stop here.
+ */
+ if (!q_vector)
+ return;
+
if (q_vector->tx.ring)
adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
@@ -1111,16 +1138,18 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
for (i = 0; i < numvecs; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries,
- numvecs);
- if (err == 0)
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries,
+ numvecs,
+ numvecs);
+ if (err > 0)
return;
igb_reset_interrupt_capability(adapter);
/* If we can't do MSI-X, try MSI */
msi_only:
+ adapter->flags &= ~IGB_FLAG_HAS_MSIX;
#ifdef CONFIG_PCI_IOV
/* disable SR-IOV for non MSI-X configurations */
if (adapter->vf_data) {
@@ -1726,6 +1755,10 @@ int igb_up(struct igb_adapter *adapter)
hw->mac.get_link_status = 1;
schedule_work(&adapter->watchdog_task);
+ if ((adapter->flags & IGB_FLAG_EEE) &&
+ (!hw->dev_spec._82575.eee_disable))
+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
+
return 0;
}
@@ -1974,6 +2007,21 @@ void igb_reset(struct igb_adapter *adapter)
}
}
#endif
+ /* Re-establish EEE setting */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (mac->type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ igb_set_eee_i350(hw);
+ break;
+ case e1000_i354:
+ igb_set_eee_i354(hw);
+ break;
+ default:
+ break;
+ }
+ }
if (!netif_running(adapter->netdev))
igb_power_down_link(adapter);
@@ -2560,23 +2608,36 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
adapter->num_rx_queues, adapter->num_tx_queues);
- switch (hw->mac.type) {
- case e1000_i350:
- case e1000_i210:
- case e1000_i211:
- igb_set_eee_i350(hw);
- break;
- case e1000_i354:
- if (hw->phy.media_type == e1000_media_type_copper) {
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (hw->mac.type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable EEE for internal copper PHY devices */
+ err = igb_set_eee_i350(hw);
+ if ((!err) &&
+ (!hw->dev_spec._82575.eee_disable)) {
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ adapter->flags |= IGB_FLAG_EEE;
+ }
+ break;
+ case e1000_i354:
if ((rd32(E1000_CTRL_EXT) &
- E1000_CTRL_EXT_LINK_MODE_SGMII))
- igb_set_eee_i354(hw);
+ E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ err = igb_set_eee_i354(hw);
+ if ((!err) &&
+ (!hw->dev_spec._82575.eee_disable)) {
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ adapter->flags |= IGB_FLAG_EEE;
+ }
+ }
+ break;
+ default:
+ break;
}
- break;
- default:
- break;
}
-
pm_runtime_put_noidle(&pdev->dev);
return 0;
@@ -2591,7 +2652,7 @@ err_eeprom:
iounmap(hw->flash_address);
err_sw_init:
igb_clear_interrupt_scheme(adapter);
- iounmap(hw->hw_addr);
+ pci_iounmap(pdev, hw->hw_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -2758,7 +2819,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_disable_sriov(pdev);
#endif
- iounmap(hw->hw_addr);
+ pci_iounmap(pdev, hw->hw_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev,
@@ -3510,6 +3571,13 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
vmolr = rd32(E1000_VMOLR(vfn));
vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ if (hw->mac.type == e1000_i350) {
+ u32 dvmolr;
+
+ dvmolr = rd32(E1000_DVMOLR(vfn));
+ dvmolr |= E1000_DVMOLR_STRVLAN;
+ wr32(E1000_DVMOLR(vfn), dvmolr);
+ }
if (aupe)
vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
else
@@ -4158,6 +4226,15 @@ static void igb_watchdog_task(struct work_struct *work)
(ctrl & E1000_CTRL_RFCE) ? "RX" :
(ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
+ /* disable EEE if enabled */
+ if ((adapter->flags & IGB_FLAG_EEE) &&
+ (adapter->link_duplex == HALF_DUPLEX)) {
+ dev_info(&adapter->pdev->dev,
+ "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
+ adapter->hw.dev_spec._82575.eee_disable = true;
+ adapter->flags &= ~IGB_FLAG_EEE;
+ }
+
/* check if SmartSpeed worked */
igb_check_downshift(hw);
if (phy->speed_downgraded)
@@ -4306,8 +4383,7 @@ enum latency_range {
* were determined based on theoretical maximum wire speed and testing
* data, in order to minimize response time while increasing bulk
* throughput.
- * This functionality is controlled by the InterruptThrottleRate module
- * parameter (see igb_param.c)
+ * This functionality is controlled by ethtool's coalescing settings.
* NOTE: This function is called only when operating in a multiqueue
* receive environment.
**/
@@ -4381,8 +4457,7 @@ clear_counts:
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
- * this functionality is controlled by the InterruptThrottleRate module
- * parameter (see igb_param.c)
+ * This functionality is controlled by ethtool's coalescing settings.
* NOTE: These calculations are only valid when operating in a single-
* queue environment.
**/
@@ -4546,7 +4621,7 @@ static int igb_tso(struct igb_ring *tx_ring,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == __constant_htons(ETH_P_IP)) {
+ if (first->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -4602,12 +4677,12 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
} else {
u8 l4_hdr = 0;
switch (first->protocol) {
- case __constant_htons(ETH_P_IP):
+ case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
l4_hdr = ip_hdr(skb)->protocol;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
vlan_macip_lens |= skb_network_header_len(skb);
l4_hdr = ipv6_hdr(skb)->nexthdr;
break;
@@ -4905,12 +4980,11 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
- skb_tx_timestamp(skb);
-
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
- if (!(adapter->ptp_tx_skb)) {
+ if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
+ &adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGB_TX_FLAGS_TSTAMP;
@@ -4921,6 +4995,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
}
}
+ skb_tx_timestamp(skb);
+
if (vlan_tx_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
@@ -5127,10 +5203,10 @@ void igb_update_stats(struct igb_adapter *adapter,
}
do {
- start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
_bytes = ring->rx_stats.bytes;
_packets = ring->rx_stats.packets;
- } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -5143,10 +5219,10 @@ void igb_update_stats(struct igb_adapter *adapter,
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
_bytes = ring->tx_stats.bytes;
_packets = ring->tx_stats.packets;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -6620,7 +6696,9 @@ static inline void igb_rx_hash(struct igb_ring *ring,
struct sk_buff *skb)
{
if (ring->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ skb_set_hash(skb,
+ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ PKT_HASH_TYPE_L3);
}
/**
@@ -6690,7 +6768,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
hdr.network += ETH_HLEN;
/* handle any vlan tag if present */
- if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if (protocol == htons(ETH_P_8021Q)) {
if ((hdr.network - data) > (max_len - VLAN_HLEN))
return max_len;
@@ -6699,7 +6777,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
}
/* handle L3 protocols */
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
return max_len;
@@ -6713,7 +6791,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
/* record next protocol if header is present */
if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
nexthdr = hdr.ipv4->protocol;
- } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ } else if (protocol == htons(ETH_P_IPV6)) {
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
return max_len;
@@ -6903,7 +6981,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
- do {
+ while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
/* return some buffers to hardware, one at a time is too slow */
@@ -6955,7 +7033,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* update budget accounting */
total_packets++;
- } while (likely(total_packets < budget));
+ }
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
@@ -7114,8 +7192,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
+ case SIOCGHWTSTAMP:
+ return igb_ptp_get_ts_config(netdev, ifr);
case SIOCSHWTSTAMP:
- return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
+ return igb_ptp_set_ts_config(netdev, ifr);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 5a54e3dc535d..2cca8fd5e574 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -12,9 +12,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/device.h>
@@ -75,6 +74,8 @@
#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
#define IGB_NBITS_82580 40
+static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+
/* SYSTIM read access for the 82576 */
static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
{
@@ -372,7 +373,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp,
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb.
**/
-void igb_ptp_tx_work(struct work_struct *work)
+static void igb_ptp_tx_work(struct work_struct *work)
{
struct igb_adapter *adapter = container_of(work, struct igb_adapter,
ptp_tx_work);
@@ -386,6 +387,7 @@ void igb_ptp_tx_work(struct work_struct *work)
IGB_PTP_TX_TIMEOUT)) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
adapter->tx_hwtstamp_timeouts++;
dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
return;
@@ -466,7 +468,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
**/
-void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
@@ -479,6 +481,7 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
/**
@@ -540,10 +543,26 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
}
/**
- * igb_ptp_hwtstamp_ioctl - control hardware time stamping
+ * igb_ptp_get_ts_config - get hardware time stamping config
+ * @netdev:
+ * @ifreq:
+ *
+ * Get the hwtstamp_config settings to return to the user. Rather than attempt
+ * to deconstruct the settings from the registers, just return a shadow copy
+ * of the last known settings.
+ **/
+int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config *config = &adapter->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+/**
+ * igb_ptp_set_ts_config - control hardware time stamping
* @netdev:
* @ifreq:
- * @cmd:
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't case any overhead
@@ -557,12 +576,11 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
**/
-int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config config;
+ struct hwtstamp_config *config = &adapter->tstamp_config;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
u32 tsync_rx_cfg = 0;
@@ -570,14 +588,14 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
bool is_l2 = false;
u32 regval;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
return -EFAULT;
/* reserved for future extensions */
- if (config.flags)
+ if (config->flags)
return -EINVAL;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
case HWTSTAMP_TX_ON:
@@ -586,7 +604,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
break;
@@ -610,7 +628,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
break;
@@ -621,12 +639,12 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
*/
if (hw->mac.type != e1000_82576) {
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
/* fall through */
default:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -643,7 +661,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
is_l2 = true;
is_l4 = true;
@@ -707,7 +725,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
regval = rd32(E1000_RXSTMPL);
regval = rd32(E1000_RXSTMPH);
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
@@ -798,7 +816,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
/* Initialize the time sync interrupts for devices that support it. */
if (hw->mac.type >= e1000_82580) {
- wr32(E1000_TSIM, E1000_TSIM_TXTS);
+ wr32(E1000_TSIM, TSYNC_INTERRUPTS);
wr32(E1000_IMS, E1000_IMS_TS);
}
@@ -841,6 +859,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
if (adapter->ptp_tx_skb) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
if (adapter->ptp_clock) {
@@ -864,6 +883,9 @@ void igb_ptp_reset(struct igb_adapter *adapter)
if (!(adapter->flags & IGB_FLAG_PTP))
return;
+ /* reset the tstamp_config */
+ memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+
switch (adapter->hw.mac.type) {
case e1000_82576:
/* Dial the nominal frequency. */
@@ -876,7 +898,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
case e1000_i211:
/* Enable the timer functions and interrupts. */
wr32(E1000_TSAUXC, 0x0);
- wr32(E1000_TSIM, E1000_TSIM_TXTS);
+ wr32(E1000_TSIM, TSYNC_INTERRUPTS);
wr32(E1000_IMS, E1000_IMS_TS);
break;
default:
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 675435fc2e53..b7ab03a2f28f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1043,11 +1043,11 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
for (i = 0; i < 3; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries, 3);
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries, 3, 3);
}
- if (err) {
+ if (err < 0) {
/* MSI-X failed */
dev_err(&adapter->pdev->dev,
"Failed to initialize MSI-X interrupts.\n");
@@ -2014,12 +2014,12 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
+ case htons(ETH_P_IP):
tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
break;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 57e390cbe6d0..f42c201f727f 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1521,12 +1521,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int tso;
if (test_bit(__IXGB_DOWN, &adapter->flags)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (skb->len <= 0) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1543,7 +1543,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = ixgb_tso(adapter, skb);
if (tso < 0) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 0186ea2969fe..55c53a1cbb62 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -765,6 +766,7 @@ struct ixgbe_adapter {
struct ptp_clock_info ptp_caps;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_overflow_check;
unsigned long last_rx_ptp_check;
@@ -806,10 +808,12 @@ enum ixgbe_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
+ __IXGBE_DISABLED,
__IXGBE_REMOVING,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
__IXGBE_PTP_RUNNING,
+ __IXGBE_PTP_TX_IN_PROGRESS,
};
struct ixgbe_cb {
@@ -884,7 +888,6 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
@@ -958,8 +961,8 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr,
- int cmd);
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
+int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index a26f3fee4f35..4c78ea8946c1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -57,10 +58,12 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
**/
static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
u16 pcie_devctl2;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+
/* only take action if timeout value is defaulted to 0 */
if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
goto out;
@@ -79,11 +82,9 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
* directly in order to set the completion timeout value for
* 16ms to 55ms
*/
- pci_read_config_word(adapter->pdev,
- IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
+ pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
- pci_write_config_word(adapter->pdev,
- IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+ ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
out:
/* disable completion timeout resend */
gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
@@ -100,6 +101,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -201,8 +203,6 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
- hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE;
-
/* set the completion timeout for interface */
if (ret_val == 0)
ixgbe_set_pcie_completion_timeout(hw);
@@ -1237,14 +1237,14 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
}
/**
- * ixgbe_set_rxpba_82598 - Configure packet buffers
+ * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure packet buffers.
- */
-static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom,
- int strategy)
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy)
{
u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
@@ -1315,7 +1315,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.release_swfw_sync = &ixgbe_release_swfw_sync,
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
- .mng_fw_enabled = NULL,
+ .prot_autoc_read = &prot_autoc_read_generic,
+ .prot_autoc_write = &prot_autoc_write_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index edda6814108c..f32b3dd1ba8e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -63,8 +64,10 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
+static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
u32 fwsm, manc, factps;
@@ -91,7 +94,7 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
* and MNG not enabled
*/
if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- !hw->mng_fw_enabled) {
+ !ixgbe_mng_enabled(hw)) {
mac->ops.disable_tx_laser =
&ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
@@ -122,7 +125,6 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
u16 list_offset, data_offset, data_value;
- bool got_lock = false;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
@@ -160,30 +162,10 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
usleep_range(hw->eeprom.semaphore_delay * 1000,
hw->eeprom.semaphore_delay * 2000);
- /* Need SW/FW semaphore around AUTOC writes if LESM on,
- * likewise reset_pipeline requires lock as it also writes
- * AUTOC.
- */
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto setup_sfp_out;
-
- got_lock = true;
- }
-
/* Restart DSP and set SFI mode */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
- IXGBE_AUTOC_LMS_10G_SERIAL));
- hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- ret_val = ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock) {
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- got_lock = false;
- }
+ ret_val = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
+ false);
if (ret_val) {
hw_dbg(hw, " sfp module setup not complete\n");
@@ -207,6 +189,81 @@ setup_sfp_err:
return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
}
+/**
+ * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @locked: Return the if we locked for this read.
+ * @reg_val: Value we read from AUTOC
+ *
+ * For this part (82599) we need to wrap read-modify-writes with a possible
+ * FW/SW lock. It is assumed this lock will be freed with the next
+ * prot_autoc_write_82599(). Note, that locked can only be true in cases
+ * where this function doesn't return an error.
+ **/
+static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
+ u32 *reg_val)
+{
+ s32 ret_val;
+
+ *locked = false;
+ /* If LESM is on then we need to hold the SW/FW semaphore. */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ *locked = true;
+ }
+
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return 0;
+}
+
+/**
+ * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous proc_autoc_read_82599.
+ *
+ * This part (82599) may need to hold a the SW/FW lock around all writes to
+ * AUTOC. Likewise after a write we need to do a pipeline reset.
+ **/
+static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+{
+ s32 ret_val = 0;
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
+ /* We only need to get the lock if:
+ * - We didn't do it already (in the read part of a read-modify-write)
+ * - LESM is enabled.
+ */
+ if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ locked = true;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ ret_val = ixgbe_reset_pipeline_82599(hw);
+
+out:
+ /* Free the SW/FW semaphore as we either grabbed it here or
+ * already had it when this function was called.
+ */
+ if (locked)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
+ return ret_val;
+}
+
static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -216,6 +273,7 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -456,12 +514,20 @@ out:
*
* Disables link, should be called during D3 power down sequence.
*
- */
+ **/
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
{
- u32 autoc2_reg;
+ u32 autoc2_reg, fwsm;
+ u16 ee_ctrl_2 = 0;
+
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
- if (!hw->mng_fw_enabled && !hw->wol_enabled) {
+ /* Check to see if MNG FW could be enabled */
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+
+ if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
+ !hw->wol_enabled &&
+ ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
@@ -542,6 +608,10 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
/* Disable tx laser; allow 100us to go dark per spec */
esdp_reg |= IXGBE_ESDP_SDP3;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
@@ -582,6 +652,10 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
**/
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
if (hw->mac.autotry_restart) {
ixgbe_disable_tx_laser_multispeed_fiber(hw);
ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -590,75 +664,6 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
}
/**
- * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
- * @hw: pointer to hardware structure
- * @speed: link speed to set
- *
- * We set the module speed differently for fixed fiber. For other
- * multi-speed devices we don't have an error value so here if we
- * detect an error we just log it and exit.
- */
-static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed)
-{
- s32 status;
- u8 rs, eeprom_data;
-
- switch (speed) {
- case IXGBE_LINK_SPEED_10GB_FULL:
- /* one bit mask same as setting on */
- rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
- break;
- case IXGBE_LINK_SPEED_1GB_FULL:
- rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
- break;
- default:
- hw_dbg(hw, "Invalid fixed module speed\n");
- return;
- }
-
- /* Set RS0 */
- status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- &eeprom_data);
- if (status) {
- hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
- goto out;
- }
-
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
-
- status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- eeprom_data);
- if (status) {
- hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
- goto out;
- }
-
- /* Set RS1 */
- status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- &eeprom_data);
- if (status) {
- hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
- goto out;
- }
-
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
-
- status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- eeprom_data);
- if (status) {
- hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
- goto out;
- }
-out:
- return;
-}
-
-/**
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -768,10 +773,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* Set the module link speed */
switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber_fixed:
- ixgbe_set_fiber_fixed_speed(hw,
- IXGBE_LINK_SPEED_1GB_FULL);
- break;
case ixgbe_media_type_fiber:
esdp_reg &= ~IXGBE_ESDP_SDP5;
esdp_reg |= IXGBE_ESDP_SDP5_DIR;
@@ -941,8 +942,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
out:
if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
- hw_dbg(hw, "Smartspeed has downgraded the link speed from "
- "the maximum advertised\n");
+ hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n");
return status;
}
@@ -958,16 +958,19 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
+ bool autoneg = false;
s32 status = 0;
- u32 autoc, pma_pmd_1g, link_mode, start_autoc;
+ u32 pma_pmd_1g, link_mode, links_reg, i;
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 orig_autoc = 0;
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
- u32 links_reg;
- u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
- bool got_lock = false;
- bool autoneg = false;
+
+ /* holds the value of AUTOC register at this current point in time */
+ u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ /* holds the cached value of AUTOC register */
+ u32 orig_autoc = 0;
+ /* temporary variable used for comparison purposes */
+ u32 autoc = current_autoc;
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -984,12 +987,10 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
if (hw->mac.orig_link_settings_stored)
- autoc = hw->mac.orig_autoc;
+ orig_autoc = hw->mac.orig_autoc;
else
- autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ orig_autoc = autoc;
- orig_autoc = autoc;
- start_autoc = hw->mac.cached_autoc;
link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
@@ -1029,28 +1030,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
}
- if (autoc != start_autoc) {
- /* Need SW/FW semaphore around AUTOC writes if LESM is on,
- * likewise reset_pipeline requires us to hold this lock as
- * it also writes to AUTOC.
- */
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- status = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (status != 0)
- goto out;
-
- got_lock = true;
- }
-
+ if (autoc != current_autoc) {
/* Restart link */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
- hw->mac.cached_autoc = autoc;
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
+ if (status)
+ goto out;
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
@@ -1068,8 +1052,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
status =
IXGBE_ERR_AUTONEG_NOT_COMPLETE;
- hw_dbg(hw, "Autoneg did not "
- "complete.\n");
+ hw_dbg(hw, "Autoneg did not complete.\n");
}
}
}
@@ -1117,7 +1100,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
ixgbe_link_speed link_speed;
s32 status;
- u32 ctrl, i, autoc2;
+ u32 ctrl, i, autoc, autoc2;
u32 curr_lms;
bool link_up = false;
@@ -1151,11 +1134,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->phy.ops.reset(hw);
/* remember AUTOC from before we reset */
- if (hw->mac.cached_autoc)
- curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
- else
- curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
- IXGBE_AUTOC_LMS_MASK;
+ curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
mac_reset_top:
/*
@@ -1205,7 +1184,7 @@ mac_reset_top:
* stored off yet. Otherwise restore the stored original
* values since the reset operation sets back to defaults.
*/
- hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
/* Enable link if disabled in NVM */
@@ -1216,7 +1195,7 @@ mac_reset_top:
}
if (hw->mac.orig_link_settings_stored == false) {
- hw->mac.orig_autoc = hw->mac.cached_autoc;
+ hw->mac.orig_autoc = autoc;
hw->mac.orig_autoc2 = autoc2;
hw->mac.orig_link_settings_stored = true;
} else {
@@ -1227,34 +1206,18 @@ mac_reset_top:
* Likewise if we support WoL we don't want change the
* LMS state either.
*/
- if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) ||
+ if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
hw->wol_enabled)
hw->mac.orig_autoc =
(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
curr_lms;
- if (hw->mac.cached_autoc != hw->mac.orig_autoc) {
- /* Need SW/FW semaphore around AUTOC writes if LESM is
- * on, likewise reset_pipeline requires us to hold
- * this lock as it also writes to AUTOC.
- */
- bool got_lock = false;
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- status = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (status)
- goto reset_hw_out;
-
- got_lock = true;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
- hw->mac.cached_autoc = hw->mac.orig_autoc;
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ if (autoc != hw->mac.orig_autoc) {
+ status = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc,
+ false);
+ if (status)
+ goto reset_hw_out;
}
if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
@@ -1634,35 +1597,20 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
{
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
- u32 bucket_hash = 0;
+ u32 bucket_hash = 0, hi_dword = 0;
+ int i;
/* Apply masks to input data */
- input->dword_stream[0] &= input_mask->dword_stream[0];
- input->dword_stream[1] &= input_mask->dword_stream[1];
- input->dword_stream[2] &= input_mask->dword_stream[2];
- input->dword_stream[3] &= input_mask->dword_stream[3];
- input->dword_stream[4] &= input_mask->dword_stream[4];
- input->dword_stream[5] &= input_mask->dword_stream[5];
- input->dword_stream[6] &= input_mask->dword_stream[6];
- input->dword_stream[7] &= input_mask->dword_stream[7];
- input->dword_stream[8] &= input_mask->dword_stream[8];
- input->dword_stream[9] &= input_mask->dword_stream[9];
- input->dword_stream[10] &= input_mask->dword_stream[10];
+ for (i = 0; i <= 10; i++)
+ input->dword_stream[i] &= input_mask->dword_stream[i];
/* record the flow_vm_vlan bits as they are a key part to the hash */
flow_vm_vlan = ntohl(input->dword_stream[0]);
/* generate common hash dword */
- hi_hash_dword = ntohl(input->dword_stream[1] ^
- input->dword_stream[2] ^
- input->dword_stream[3] ^
- input->dword_stream[4] ^
- input->dword_stream[5] ^
- input->dword_stream[6] ^
- input->dword_stream[7] ^
- input->dword_stream[8] ^
- input->dword_stream[9] ^
- input->dword_stream[10]);
+ for (i = 1; i <= 10; i++)
+ hi_dword ^= input->dword_stream[i];
+ hi_hash_dword = ntohl(hi_dword);
/* low dword is word swapped version of common */
lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
@@ -1681,21 +1629,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
/* Process remaining 30 bit of the key */
- IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
+ for (i = 1; i <= 15; i++)
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
/*
* Limit hash to 13 bits since max bucket count is 8K.
@@ -2001,7 +1936,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
/* We need to run link autotry after the driver loads */
hw->mac.autotry_restart = true;
- hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE;
if (ret_val == 0)
ret_val = ixgbe_verify_fw_version_82599(hw);
@@ -2260,7 +2194,7 @@ fw_version_err:
* Returns true if the LESM FW module is present and enabled. Otherwise
* returns false. Smart Speed must be disabled if LESM FW module is enabled.
**/
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
{
bool lesm_enabled = false;
u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2366,7 +2300,7 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
* full pipeline reset. Note - We must hold the SW/FW semaphore before writing
* to AUTOC, so this function assumes the semaphore is held.
**/
-s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
s32 ret_val;
u32 anlp1_reg = 0;
@@ -2380,11 +2314,12 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
}
- autoc_reg = hw->mac.cached_autoc;
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
+ autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
/* Wait for AN to leave state 0 */
for (i = 0; i < 10; i++) {
@@ -2565,7 +2500,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.release_swfw_sync = &ixgbe_release_swfw_sync,
.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
- .mng_fw_enabled = &ixgbe_mng_enabled,
+ .prot_autoc_read = &prot_autoc_read_82599,
+ .prot_autoc_write = &prot_autoc_write_82599,
};
static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index b5c434b617b1..24fba39e194e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -72,7 +73,6 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
bool link_up;
switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
hw->mac.ops.check_link(hw, &speed, &link_up, false);
/* if link is down, assume supported */
@@ -114,7 +114,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
u16 reg_cu = 0;
- bool got_lock = false;
+ bool locked = false;
/*
* Validate the requested mode. Strict IEEE mode does not allow
@@ -139,11 +139,16 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* we link at 10G, the 1G advertisement is harmless and vice versa.
*/
switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber_fixed:
- case ixgbe_media_type_fiber:
case ixgbe_media_type_backplane:
+ /* some MAC's need RMW protection on AUTOC */
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
+ if (ret_val)
+ goto out;
+
+ /* only backplane uses autoc so fall though */
+ case ixgbe_media_type_fiber:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
break;
case ixgbe_media_type_copper:
hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
@@ -240,27 +245,12 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* LESM is on, likewise reset_pipeline requries the lock as
* it also writes AUTOC.
*/
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto out;
-
- got_lock = true;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
-
- if (hw->mac.type == ixgbe_mac_82599EB)
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
+ if (ret_val)
+ goto out;
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
- ixgbe_device_supports_autoneg_fc(hw)) {
+ ixgbe_device_supports_autoneg_fc(hw)) {
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
MDIO_MMD_AN, reg_cu);
}
@@ -656,20 +646,17 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
**/
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
- struct ixgbe_mac_info *mac = &hw->mac;
u16 link_status;
hw->bus.type = ixgbe_bus_type_pci_express;
/* Get the negotiated link width and speed from PCI config space */
- pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
- &link_status);
+ link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
hw->bus.width = ixgbe_convert_bus_width(link_status);
hw->bus.speed = ixgbe_convert_bus_speed(link_status);
- mac->ops.set_lan_id(hw);
+ hw->mac.ops.set_lan_id(hw);
return 0;
}
@@ -2406,7 +2393,6 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
- case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2437,6 +2423,53 @@ out:
}
/**
+ * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
+ * @hw: pointer to hardware structure
+ *
+ * System-wide timeout range is encoded in PCIe Device Control2 register.
+ *
+ * Add 10% to specified maximum and return the number of times to poll for
+ * completion timeout, in units of 100 microsec. Never return less than
+ * 800 = 80 millisec.
+ **/
+static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
+{
+ s16 devctl2;
+ u32 pollcnt;
+
+ devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
+ devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
+
+ switch (devctl2) {
+ case IXGBE_PCIDEVCTRL2_65_130ms:
+ pollcnt = 1300; /* 130 millisec */
+ break;
+ case IXGBE_PCIDEVCTRL2_260_520ms:
+ pollcnt = 5200; /* 520 millisec */
+ break;
+ case IXGBE_PCIDEVCTRL2_1_2s:
+ pollcnt = 20000; /* 2 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_4_8s:
+ pollcnt = 80000; /* 8 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_17_34s:
+ pollcnt = 34000; /* 34 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
+ case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
+ case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
+ case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
+ default:
+ pollcnt = 800; /* 80 millisec minimum */
+ break;
+ }
+
+ /* add 10% to spec maximum */
+ return (pollcnt * 11) / 10;
+}
+
+/**
* ixgbe_disable_pcie_master - Disable PCI-express master access
* @hw: pointer to hardware structure
*
@@ -2447,16 +2480,16 @@ out:
**/
static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
s32 status = 0;
- u32 i;
+ u32 i, poll;
u16 value;
/* Always set this bit to ensure any future transactions are blocked */
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
/* Exit if master requests are blocked */
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
+ ixgbe_removed(hw->hw_addr))
goto out;
/* Poll for master request bit to clear */
@@ -2481,10 +2514,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
*/
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ poll = ixgbe_pcie_timeout_poll(hw);
+ for (i = 0; i < poll; i++) {
udelay(100);
- pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
- &value);
+ value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (ixgbe_removed(hw->hw_addr))
+ goto out;
if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
goto out;
}
@@ -2564,6 +2599,35 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
}
/**
+ * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @reg_val: Value we read from AUTOC
+ * @locked: bool to indicate whether the SW/FW lock should be taken. Never
+ * true in this the generic case.
+ *
+ * The default case requires no protection so just to the register read.
+ **/
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+ *locked = false;
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return 0;
+}
+
+/**
+ * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous read.
+ **/
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+{
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
+ return 0;
+}
+
+/**
* ixgbe_disable_rx_buff_generic - Stops the receive data path
* @hw: pointer to hardware structure
*
@@ -2641,6 +2705,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
s32 ret_val = 0;
+ bool locked = false;
/*
* Link must be up to auto-blink the LEDs;
@@ -2649,28 +2714,19 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (!link_up) {
- /* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on.
- */
- bool got_lock = false;
-
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto out;
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val)
+ goto out;
- got_lock = true;
- }
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (ret_val)
+ goto out;
+
IXGBE_WRITE_FLUSH(hw);
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
usleep_range(10000, 20000);
}
@@ -2690,33 +2746,21 @@ out:
**/
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
- u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc_reg = 0;
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
s32 ret_val = 0;
- bool got_lock = false;
+ bool locked = false;
- /* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on.
- */
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto out;
-
- got_lock = true;
- }
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val)
+ goto out;
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-
- if (hw->mac.type == ixgbe_mac_82599EB)
- ixgbe_reset_pipeline_82599(hw);
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (ret_val)
+ goto out;
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
@@ -2817,7 +2861,6 @@ san_mac_addr_clr:
**/
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
u16 msix_count = 1;
u16 max_msix_count;
u16 pcie_offset;
@@ -2836,7 +2879,9 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
return msix_count;
}
- pci_read_config_word(adapter->pdev, pcie_offset, &msix_count);
+ msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
+ if (ixgbe_removed(hw->hw_addr))
+ msix_count = 0;
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
/* MSI-X count is zero-based in HW */
@@ -2868,6 +2913,9 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ if (ixgbe_removed(hw->hw_addr))
+ goto done;
+
if (!mpsar_lo && !mpsar_hi)
goto done;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f2e3919750ec..f12c40fb5537 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -98,6 +99,10 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
+
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
@@ -106,10 +111,10 @@ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
-s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_EMC_INTERNAL_DATA 0x00
@@ -125,6 +130,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
#define IXGBE_FAILED_READ_REG 0xffffffffU
+#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
+#define IXGBE_FAILED_READ_CFG_WORD 0xffffU
+
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
+void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
static inline bool ixgbe_removed(void __iomem *addr)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 05e23b80b5e3..bdb99b3b0f30 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index d71d9ce3e394..d5a1e3db0774 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index c5933f6dceee..472b0f450bf9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 043307024c4a..6c55c14d082a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -1127,10 +1128,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i+1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
@@ -1155,10 +1156,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i+1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
@@ -1247,6 +1248,11 @@ static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
struct ixgbe_hw *hw = &adapter->hw;
bool link_up;
u32 link_speed = 0;
+
+ if (ixgbe_removed(hw->hw_addr)) {
+ *data = 1;
+ return 1;
+ }
*data = 0;
hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
@@ -1969,6 +1975,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
data[1] = 1;
data[2] = 1;
data[3] = 1;
+ data[4] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
return;
}
@@ -1988,6 +1995,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
data[1] = 1;
data[2] = 1;
data[3] = 1;
+ data[4] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__IXGBE_TESTING,
&adapter->state);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index f58db453a97e..25a3dfef33e8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -407,13 +408,13 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
/* return 0 to bypass going to ULD for DDPed data */
- case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
+ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
rc = 0;
break;
/* unmap the sg list when FCPRSP is received */
- case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
+ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
ddp->err = ddp_err;
@@ -421,14 +422,14 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
ddp->sgc = 0;
/* fall through */
/* if DDP length is present pass it through to ULD */
- case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
+ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
if (ddp->len)
rc = ddp->len;
break;
/* no match will return as an error */
- case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
+ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
default:
break;
}
@@ -585,7 +586,7 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
struct dma_pool *pool;
char pool_name[32];
- snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
+ snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu);
pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
IXGBE_FCPTR_ALIGN, PAGE_SIZE);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 3a02759b5e95..b16cc786750d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 32e3eaaa160a..2067d392cc3d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -698,7 +699,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
int vectors)
{
- int err, vector_threshold;
+ int vector_threshold;
/* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] + RxQ[0] handler
@@ -712,18 +713,10 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err) /* Success in acquiring all requested vectors. */
- break;
- else if (err < 0)
- vectors = 0; /* Nasty failure, quit now */
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
+ vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
- if (vectors < vector_threshold) {
+ if (vectors < 0) {
/* Can't allocate enough MSI-X interrupts? Oh well.
* This just means we'll go with either a single MSI
* vector or fall back to legacy interrupts.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 18076c4178b4..8436c651b735 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -67,7 +68,7 @@ static char ixgbe_default_device_descr[] =
#define DRV_VERSION "3.19.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2013 Intel Corporation.";
+ "Copyright (c) 1999-2014 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
@@ -151,6 +152,8 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
+
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
@@ -169,6 +172,9 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
return -1;
pcie_capability_read_word(parent_dev, reg, value);
+ if (*value == IXGBE_FAILED_READ_CFG_WORD &&
+ ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
+ return -1;
return 0;
}
@@ -313,6 +319,57 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
ixgbe_remove_adapter(hw);
}
+static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
+{
+ u16 value;
+
+ pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
+ if (value == IXGBE_FAILED_READ_CFG_WORD) {
+ ixgbe_remove_adapter(hw);
+ return true;
+ }
+ return false;
+}
+
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u16 value;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return IXGBE_FAILED_READ_CFG_WORD;
+ pci_read_config_word(adapter->pdev, reg, &value);
+ if (value == IXGBE_FAILED_READ_CFG_WORD &&
+ ixgbe_check_cfg_remove(hw, adapter->pdev))
+ return IXGBE_FAILED_READ_CFG_WORD;
+ return value;
+}
+
+#ifdef CONFIG_PCI_IOV
+static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u32 value;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return IXGBE_FAILED_READ_CFG_DWORD;
+ pci_read_config_dword(adapter->pdev, reg, &value);
+ if (value == IXGBE_FAILED_READ_CFG_DWORD &&
+ ixgbe_check_cfg_remove(hw, adapter->pdev))
+ return IXGBE_FAILED_READ_CFG_DWORD;
+ return value;
+}
+#endif /* CONFIG_PCI_IOV */
+
+void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+ pci_write_config_word(adapter->pdev, reg, value);
+}
+
static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
@@ -1264,7 +1321,9 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
struct sk_buff *skb)
{
if (ring->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ skb_set_hash(skb,
+ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ PKT_HASH_TYPE_L3);
}
#ifdef IXGBE_FCOE
@@ -1480,7 +1539,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
hdr.network += ETH_HLEN;
/* handle any vlan tag if present */
- if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if (protocol == htons(ETH_P_8021Q)) {
if ((hdr.network - data) > (max_len - VLAN_HLEN))
return max_len;
@@ -1489,7 +1548,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
}
/* handle L3 protocols */
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
return max_len;
@@ -1503,7 +1562,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
/* record next protocol if header is present */
if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
nexthdr = hdr.ipv4->protocol;
- } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ } else if (protocol == htons(ETH_P_IPV6)) {
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
return max_len;
@@ -1511,7 +1570,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
nexthdr = hdr.ipv6->nexthdr;
hlen = sizeof(struct ipv6hdr);
#ifdef IXGBE_FCOE
- } else if (protocol == __constant_htons(ETH_P_FCOE)) {
+ } else if (protocol == htons(ETH_P_FCOE)) {
if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
return max_len;
hlen = FCOE_HEADER_LEN;
@@ -2026,7 +2085,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
- do {
+ while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct sk_buff *skb;
@@ -2101,7 +2160,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* update budget accounting */
total_rx_packets++;
- } while (likely(total_rx_packets < budget));
+ }
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -2630,9 +2689,12 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- if (eicr & IXGBE_EICR_ECC)
- e_info(link, "Received unrecoverable ECC Err, please "
- "reboot\n");
+ if (eicr & IXGBE_EICR_ECC) {
+ e_info(link, "Received ECC Err, initiating reset\n");
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+ }
/* Handle Flow Director Full threshold interrupt */
if (eicr & IXGBE_EICR_FLOW_DIR) {
int reinit_count = 0;
@@ -2846,9 +2908,12 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_sfp_event(adapter, eicr);
/* Fall through */
case ixgbe_mac_X540:
- if (eicr & IXGBE_EICR_ECC)
- e_info(link, "Received unrecoverable ECC err, please "
- "reboot\n");
+ if (eicr & IXGBE_EICR_ECC) {
+ e_info(link, "Received ECC Err, initiating reset\n");
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+ }
ixgbe_check_overtemp_event(adapter, eicr);
break;
default:
@@ -4590,8 +4655,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *upper;
- struct list_head *iter;
int err;
u32 ctrl_ext;
@@ -4633,19 +4696,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
e_crit(drv, "Fan has stopped, replace the adapter\n");
}
- /* enable transmits */
- netif_tx_start_all_queues(adapter->netdev);
-
- /* enable any upper devices */
- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv)
- netif_tx_start_all_queues(upper);
- }
- }
-
/* bring the link up in the watchdog, this could race with our first
* link up interrupt but shouldn't be a problem */
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -5502,6 +5552,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
struct net_device *netdev = adapter->netdev;
u32 err;
+ adapter->hw.hw_addr = adapter->io_addr;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/*
@@ -5515,6 +5566,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
e_dev_err("Cannot enable PCI device from suspend\n");
return err;
}
+ smp_mb__before_clear_bit();
+ clear_bit(__IXGBE_DISABLED, &adapter->state);
pci_set_master(pdev);
pci_wake_from_d3(pdev, false);
@@ -5612,7 +5665,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
ixgbe_release_hw_control(adapter);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
return 0;
}
@@ -6016,6 +6070,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *upper;
+ struct list_head *iter;
u32 link_speed = adapter->link_speed;
bool flow_rx, flow_tx;
@@ -6067,6 +6123,21 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
netif_carrier_on(netdev);
ixgbe_check_vf_rate_limit(adapter);
+ /* enable transmits */
+ netif_tx_wake_all_queues(adapter->netdev);
+
+ /* enable any upper devices */
+ rtnl_lock();
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *vlan = netdev_priv(upper);
+
+ if (vlan->fwd_priv)
+ netif_tx_wake_all_queues(upper);
+ }
+ }
+ rtnl_unlock();
+
/* update the default user priority for VFs */
ixgbe_update_default_up(adapter);
@@ -6454,7 +6525,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == __constant_htons(ETH_P_IP)) {
+ if (first->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -6514,12 +6585,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
} else {
u8 l4_hdr = 0;
switch (first->protocol) {
- case __constant_htons(ETH_P_IP):
+ case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
l4_hdr = ip_hdr(skb)->protocol;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
vlan_macip_lens |= skb_network_header_len(skb);
l4_hdr = ipv6_hdr(skb)->nexthdr;
break;
@@ -6794,9 +6865,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
hdr.network = skb_network_header(first->skb);
/* Currently only IPv4/IPv6 with TCP is supported */
- if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
+ if ((first->protocol != htons(ETH_P_IPV6) ||
hdr.ipv6->nexthdr != IPPROTO_TCP) &&
- (first->protocol != __constant_htons(ETH_P_IP) ||
+ (first->protocol != htons(ETH_P_IP) ||
hdr.ipv4->protocol != IPPROTO_TCP))
return;
@@ -6829,12 +6900,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
* and write the value to source port portion of compressed dword
*/
if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
- common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+ common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
else
common.port.src ^= th->dest ^ first->protocol;
common.port.dst ^= th->source;
- if (first->protocol == __constant_htons(ETH_P_IP)) {
+ if (first->protocol == htons(ETH_P_IP)) {
input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
} else {
@@ -6900,8 +6971,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
* or FIP and we have FCoE enabled on the adapter
*/
switch (vlan_get_protocol(skb)) {
- case __constant_htons(ETH_P_FCOE):
- case __constant_htons(ETH_P_FIP):
+ case htons(ETH_P_FCOE):
+ case htons(ETH_P_FIP):
adapter = netdev_priv(dev);
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
@@ -6962,7 +7033,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN check the next protocol and store the tag */
- } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
@@ -6974,9 +7045,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
}
- skb_tx_timestamp(skb);
-
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ &adapter->state))) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
@@ -6986,6 +7057,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
schedule_work(&adapter->ptp_tx_work);
}
+ skb_tx_timestamp(skb);
+
#ifdef CONFIG_PCI_IOV
/*
* Use the l2switch_enable flag - would be false if the DMA
@@ -7021,7 +7094,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#ifdef IXGBE_FCOE
/* setup tx offload for FCoE */
- if ((protocol == __constant_htons(ETH_P_FCOE)) &&
+ if ((protocol == htons(ETH_P_FCOE)) &&
(tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
tso = ixgbe_fso(tx_ring, first, &hdr_len);
if (tso < 0)
@@ -7143,7 +7216,9 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
+ return ixgbe_ptp_set_ts_config(adapter, req);
+ case SIOCGHWTSTAMP:
+ return ixgbe_ptp_get_ts_config(adapter, req);
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
@@ -7234,10 +7309,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
if (ring) {
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
}
@@ -7250,10 +7325,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
if (ring) {
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
}
@@ -7792,6 +7867,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
case IXGBE_DEV_ID_82599_SFP:
/* Only these subdevices could supports WOL */
switch (subdevice_id) {
+ case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
case IXGBE_SUBDEV_ID_82599_560FLR:
/* only support first port */
if (hw->bus.func != 0)
@@ -7969,10 +8045,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- /* Cache if MNG FW is up so we don't have to read the REG later */
- if (hw->mac.ops.mng_fw_enabled)
- hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw);
-
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -8223,7 +8295,7 @@ skip_sriov:
ixgbe_dbg_adapter_init(adapter);
/* Need link setup for MNG FW, else wait for IXGBE_UP */
- if (hw->mng_fw_enabled && hw->mac.ops.setup_link)
+ if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw,
IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
true);
@@ -8244,7 +8316,8 @@ err_alloc_etherdev:
pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
return err;
}
@@ -8313,7 +8386,8 @@ static void ixgbe_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
}
/**
@@ -8331,6 +8405,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = adapter->netdev;
#ifdef CONFIG_PCI_IOV
+ struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *bdev, *vfdev;
u32 dw0, dw1, dw2, dw3;
int vf, pos;
@@ -8351,10 +8426,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
if (!pos)
goto skip_bad_vf_detection;
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
+ dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
+ dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
+ dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
+ dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
+ if (ixgbe_removed(hw->hw_addr))
+ goto skip_bad_vf_detection;
req_id = dw1 >> 16;
/* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
@@ -8417,14 +8494,20 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
skip_bad_vf_detection:
#endif /* CONFIG_PCI_IOV */
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
ixgbe_down(adapter);
- pci_disable_device(pdev);
+
+ if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -8446,6 +8529,9 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
e_err(probe, "Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
+ smp_mb__before_clear_bit();
+ clear_bit(__IXGBE_DISABLED, &adapter->state);
+ adapter->hw.hw_addr = adapter->io_addr;
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index cc3101afd29f..f5c6af2b891b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index e44ff47659b5..a9b9ad69ed0e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 132557c318f8..23f765263f12 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -98,6 +99,32 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_check_reset_blocked - check status of MNG FW veto bit
+ * @hw: pointer to the hardware structure
+ *
+ * This function checks the MMNGC.MNG_VETO bit to see if there are
+ * any constraints on link from manageability. For MAC's that don't
+ * have this bit just return false since the link can not be blocked
+ * via this method.
+ **/
+bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
+{
+ u32 mmngc;
+
+ /* If we don't have this bit, it can't be blocking */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return false;
+
+ mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
+ if (mmngc & IXGBE_MMNGC_MNG_VETO) {
+ hw_dbg(hw, "MNG_VETO bit detected.\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
* ixgbe_get_phy_id - Get the phy type
* @hw: pointer to hardware structure
*
@@ -172,6 +199,10 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
(IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
goto out;
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
/*
* Perform soft PHY reset to the PHY_XS.
* This will cause a soft reset to the PHY
@@ -476,6 +507,10 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
autoneg_reg);
}
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
/* Restart PHY autonegotiation and wait for completion */
hw->phy.ops.read_reg(hw, MDIO_CTRL1,
MDIO_MMD_AN, &autoneg_reg);
@@ -682,6 +717,10 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
autoneg_reg);
}
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
/* Restart PHY autonegotiation and wait for completion */
hw->phy.ops.read_reg(hw, MDIO_CTRL1,
MDIO_MMD_AN, &autoneg_reg);
@@ -759,6 +798,10 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
s32 ret_val = 0;
u32 i;
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
/* reset the PHY and poll for completion */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index fffcbdd2bf0e..0bb047f751c2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -65,9 +66,6 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
-#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_SFF_ADDRESSING_MODE 0x4
#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
@@ -79,7 +77,6 @@
#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
-
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
#define IXGBE_TAF_ASM_PAUSE 0x800
@@ -131,6 +128,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
+bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 5184e2a1a7d8..63515a6f67fa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -492,6 +493,7 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
}
/**
@@ -511,13 +513,10 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
IXGBE_PTP_TX_TIMEOUT);
u32 tsynctxctl;
- /* we have to have a valid skb */
- if (!adapter->ptp_tx_skb)
- return;
-
if (timeout) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
e_warn(drv, "clearing Tx Timestamp hang");
return;
}
@@ -576,14 +575,21 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
shhwtstamps->hwtstamp = ns_to_ktime(ns);
}
+int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config = &adapter->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
+}
+
/**
- * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping
+ * ixgbe_ptp_set_ts_config - control hardware time stamping
* @adapter: pointer to adapter struct
* @ifreq: ioctl data
- * @cmd: particular ioctl requested
*
* Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
+ * disable it when requested, although it shouldn't cause any overhead
* when no packet needs it. At most one packet in the queue may be
* marked for time stamping, otherwise it would be impossible to tell
* for sure to which packet the hardware time stamp belongs.
@@ -599,8 +605,7 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
* Event mode. This more accurately tells the user what the hardware is going
* to do anyways.
*/
-int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
- struct ifreq *ifr, int cmd)
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
{
struct ixgbe_hw *hw = &adapter->hw;
struct hwtstamp_config config;
@@ -702,6 +707,10 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+ /* save these settings for future reference */
+ memcpy(&adapter->tstamp_config, &config,
+ sizeof(adapter->tstamp_config));
+
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -809,6 +818,9 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
IXGBE_WRITE_FLUSH(hw);
+ /* Reset the saved tstamp_config */
+ memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+
ixgbe_ptp_start_cyclecounter(adapter);
spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -840,7 +852,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
- snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
+ snprintf(adapter->ptp_caps.name,
+ sizeof(adapter->ptp_caps.name),
+ "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -854,7 +868,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.enable = ixgbe_ptp_enable;
break;
case ixgbe_mac_82599EB:
- snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
+ snprintf(adapter->ptp_caps.name,
+ sizeof(adapter->ptp_caps.name),
+ "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -911,6 +927,7 @@ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
if (adapter->ptp_tx_skb) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
}
if (adapter->ptp_clock) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index dff0977876f7..e6c68d396c99 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 8bd29190514e..139eaddfb2ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index e74ae3682733..ef6df3d6437e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 0d39cfc4a3bf..8a6ff2423f07 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -54,6 +55,7 @@
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
@@ -1609,6 +1611,9 @@ enum {
#define IXGBE_MACC_FS 0x00040000
#define IXGBE_MAC_RX2TX_LPBK 0x00000002
+/* Veto Bit definiton */
+#define IXGBE_MMNGC_MNG_VETO 0x00000001
+
/* LINKS Bit Masks */
#define IXGBE_LINKS_KX_AN_COMP 0x80000000
#define IXGBE_LINKS_UP 0x40000000
@@ -1788,6 +1793,9 @@ enum {
#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
+#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */
+#define IXGBE_EEPROM_CCD_BIT 2 /* EEPROM Core Clock Disable bit */
+
#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
#endif
@@ -1853,8 +1861,19 @@ enum {
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
+#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf
+#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0
+#define IXGBE_PCIDEVCTRL2_50_100us 0x1
+#define IXGBE_PCIDEVCTRL2_1_2ms 0x2
+#define IXGBE_PCIDEVCTRL2_16_32ms 0x5
+#define IXGBE_PCIDEVCTRL2_65_130ms 0x6
+#define IXGBE_PCIDEVCTRL2_260_520ms 0x9
+#define IXGBE_PCIDEVCTRL2_1_2s 0xa
+#define IXGBE_PCIDEVCTRL2_4_8s 0xd
+#define IXGBE_PCIDEVCTRL2_17_34s 0xe
+
/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
/* RAH */
#define IXGBE_RAH_VIND_MASK 0x003C0000
@@ -2645,7 +2664,6 @@ enum ixgbe_sfp_type {
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
- ixgbe_media_type_fiber_fixed,
ixgbe_media_type_fiber_qsfp,
ixgbe_media_type_fiber_lco,
ixgbe_media_type_copper,
@@ -2858,6 +2876,8 @@ struct ixgbe_mac_operations {
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
+ s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+ s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
/* Link */
void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2901,7 +2921,6 @@ struct ixgbe_mac_operations {
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
- bool (*mng_fw_enabled)(struct ixgbe_hw *hw);
};
struct ixgbe_phy_operations {
@@ -2957,7 +2976,6 @@ struct ixgbe_mac_info {
u32 max_tx_queues;
u32 max_rx_queues;
u32 orig_autoc;
- u32 cached_autoc;
u32 orig_autoc2;
bool orig_link_settings_stored;
bool autotry_restart;
@@ -3033,7 +3051,6 @@ struct ixgbe_hw {
bool adapter_stopped;
bool force_full_reset;
bool allow_unsupported_sfp;
- bool mng_fw_enabled;
bool wol_enabled;
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 24b80a6cfca4..188a5974b85c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -61,6 +62,7 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE;
mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -187,7 +189,6 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
goto out;
ret_val = ixgbe_start_hw_gen2(hw);
- hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE;
out:
return ret_val;
}
@@ -854,7 +855,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.enable_rx_buff = &ixgbe_enable_rx_buff_generic,
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
- .mng_fw_enabled = NULL,
+ .prot_autoc_read = &prot_autoc_read_generic,
+ .prot_autoc_write = &prot_autoc_write_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index f68b78c732a8..1baecb60f065 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -530,41 +530,55 @@ static const u32 register_test_patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
};
-#define REG_PATTERN_TEST(R, M, W) \
-{ \
- u32 pat, val, before; \
- for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
- before = readl(adapter->hw.hw_addr + R); \
- writel((register_test_patterns[pat] & W), \
- (adapter->hw.hw_addr + R)); \
- val = readl(adapter->hw.hw_addr + R); \
- if (val != (register_test_patterns[pat] & W & M)) { \
- hw_dbg(&adapter->hw, \
- "pattern test reg %04X failed: got " \
- "0x%08X expected 0x%08X\n", \
- R, val, (register_test_patterns[pat] & W & M)); \
- *data = R; \
- writel(before, adapter->hw.hw_addr + R); \
- return 1; \
- } \
- writel(before, adapter->hw.hw_addr + R); \
- } \
+static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ u32 pat, val, before;
+
+ if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+ *data = 1;
+ return true;
+ }
+ for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
+ before = ixgbevf_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg,
+ register_test_patterns[pat] & write);
+ val = ixgbevf_read_reg(&adapter->hw, reg);
+ if (val != (register_test_patterns[pat] & write & mask)) {
+ hw_dbg(&adapter->hw,
+ "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
+ reg, val,
+ register_test_patterns[pat] & write & mask);
+ *data = reg;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
+ }
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ }
+ return false;
}
-#define REG_SET_AND_CHECK(R, M, W) \
-{ \
- u32 val, before; \
- before = readl(adapter->hw.hw_addr + R); \
- writel((W & M), (adapter->hw.hw_addr + R)); \
- val = readl(adapter->hw.hw_addr + R); \
- if ((W & M) != (val & M)) { \
- pr_err("set/check reg %04X test failed: got 0x%08X expected " \
- "0x%08X\n", R, (val & M), (W & M)); \
- *data = R; \
- writel(before, (adapter->hw.hw_addr + R)); \
- return 1; \
- } \
- writel(before, (adapter->hw.hw_addr + R)); \
+static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ u32 val, before;
+
+ if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+ *data = 1;
+ return true;
+ }
+ before = ixgbevf_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg, write & mask);
+ val = ixgbevf_read_reg(&adapter->hw, reg);
+ if ((write & mask) != (val & mask)) {
+ pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), write & mask);
+ *data = reg;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
+ }
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return false;
}
static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
@@ -572,6 +586,12 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
const struct ixgbevf_reg_test *test;
u32 i;
+ if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter removed - register test blocked\n");
+ *data = 1;
+ return 1;
+ }
test = reg_test_vf;
/*
@@ -580,38 +600,47 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
*/
while (test->reg) {
for (i = 0; i < test->array_len; i++) {
+ bool b = false;
+
switch (test->test_type) {
case PATTERN_TEST:
- REG_PATTERN_TEST(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case SET_READ_TEST:
- REG_SET_AND_CHECK(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_set_and_check(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case WRITE_NO_TEST:
- writel(test->write,
- (adapter->hw.hw_addr + test->reg)
- + (i * 0x40));
+ ixgbe_write_reg(&adapter->hw,
+ test->reg + (i * 0x40),
+ test->write);
break;
case TABLE32_TEST:
- REG_PATTERN_TEST(test->reg + (i * 4),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 4),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_LO:
- REG_PATTERN_TEST(test->reg + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 8),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_HI:
- REG_PATTERN_TEST((test->reg + 4) + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + 4 + (i * 8),
+ test->mask,
+ test->write);
break;
}
+ if (b)
+ return 1;
}
test++;
}
@@ -626,6 +655,14 @@ static void ixgbevf_diag_test(struct net_device *netdev,
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
+ if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter removed - test blocked\n");
+ data[0] = 1;
+ data[1] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
set_bit(__IXGBEVF_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 54829326bb09..e7e7d695816b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -315,6 +315,11 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
+static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
+{
+ writel(value, ring->tail);
+}
+
#define IXGBEVF_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBEVF_TX_DESC(R, i) \
@@ -401,6 +406,7 @@ struct ixgbevf_adapter {
u64 bp_tx_missed;
#endif
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 link_speed;
bool link_up;
@@ -412,7 +418,9 @@ struct ixgbevf_adapter {
enum ixbgevf_state_t {
__IXGBEVF_TESTING,
__IXGBEVF_RESETTING,
- __IXGBEVF_DOWN
+ __IXGBEVF_DOWN,
+ __IXGBEVF_DISABLED,
+ __IXGBEVF_REMOVING,
};
struct ixgbevf_cb {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9df28985eba7..4ba139b2d25a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -99,6 +99,49 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
+static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
+{
+ struct ixgbevf_adapter *adapter = hw->back;
+
+ if (!hw->hw_addr)
+ return;
+ hw->hw_addr = NULL;
+ dev_err(&adapter->pdev->dev, "Adapter removed\n");
+ schedule_work(&adapter->watchdog_task);
+}
+
+static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
+{
+ u32 value;
+
+ /* The following check not only optimizes a bit by not
+ * performing a read on the status register when the
+ * register just read was a status register read that
+ * returned IXGBE_FAILED_READ_REG. It also blocks any
+ * potential recursion.
+ */
+ if (reg == IXGBE_VFSTATUS) {
+ ixgbevf_remove_adapter(hw);
+ return;
+ }
+ value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
+ if (value == IXGBE_FAILED_READ_REG)
+ ixgbevf_remove_adapter(hw);
+}
+
+u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value;
+
+ if (IXGBE_REMOVED(reg_addr))
+ return IXGBE_FAILED_READ_REG;
+ value = readl(reg_addr + reg);
+ if (unlikely(value == IXGBE_FAILED_READ_REG))
+ ixgbevf_check_remove(hw, reg);
+ return value;
+}
+
static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
u32 val)
{
@@ -111,7 +154,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
* such as IA-64).
*/
wmb();
- writel(val, rx_ring->tail);
+ ixgbevf_write_tail(rx_ring, val);
}
/**
@@ -516,7 +559,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* Workaround hardware that can't do proper VEPA multicast
* source pruning.
*/
- if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
+ if ((skb->pkt_type == PACKET_BROADCAST ||
+ skb->pkt_type == PACKET_MULTICAST) &&
ether_addr_equal(rx_ring->netdev->dev_addr,
eth_hdr(skb)->h_source)) {
dev_kfree_skb_irq(skb);
@@ -607,7 +651,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
if (adapter->rx_itr_setting & 1)
ixgbevf_set_itr(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
ixgbevf_irq_enable_queues(adapter,
1 << q_vector->v_idx);
@@ -832,7 +877,8 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
hw->mac.get_link_status = 1;
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -1136,7 +1182,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
/* reset head and tail pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
- ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
+ ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0;
@@ -1256,6 +1302,8 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ if (IXGBE_REMOVED(hw->hw_addr))
+ return;
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
@@ -1281,6 +1329,8 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ if (IXGBE_REMOVED(hw->hw_addr))
+ return;
do {
usleep_range(1000, 2000);
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
@@ -1315,7 +1365,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
/* reset head and tail pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
- ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
+ ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0;
@@ -1617,6 +1667,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
spin_unlock_bh(&adapter->mbx_lock);
+ smp_mb__before_clear_bit();
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -1741,7 +1792,8 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
int i;
/* signal that we are down to the interrupt handler */
- set_bit(__IXGBEVF_DOWN, &adapter->state);
+ if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
+ return; /* do nothing if already down */
/* disable all enabled rx queues */
for (i = 0; i < adapter->num_rx_queues; i++)
@@ -1817,7 +1869,6 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
int vectors)
{
- int err = 0;
int vector_threshold;
/* We'll want at least 2 (vector_threshold):
@@ -1831,33 +1882,24 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err || err < 0) /* Success or a nasty failure. */
- break;
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
-
- if (vectors < vector_threshold)
- err = -ENOMEM;
+ vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
- if (err) {
+ if (vectors < 0) {
dev_err(&adapter->pdev->dev,
"Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- } else {
- /*
- * Adjust for only the vectors we'll use, which is minimum
- * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
- * vectors we were allocated.
- */
- adapter->num_msix_vectors = vectors;
+ return vectors;
}
- return err;
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+
+ return 0;
}
/**
@@ -2338,6 +2380,7 @@ static void ixgbevf_reset_task(struct work_struct *work)
/* If we're already down or resetting, just bail */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
return;
@@ -2361,6 +2404,14 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
bool link_up = adapter->link_up;
s32 need_reset;
+ if (IXGBE_REMOVED(hw->hw_addr)) {
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ rtnl_lock();
+ ixgbevf_down(adapter);
+ rtnl_unlock();
+ }
+ return;
+ }
ixgbevf_queue_reset_subtask(adapter);
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
@@ -2422,7 +2473,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
pf_has_reset:
/* Reset the timer */
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
mod_timer(&adapter->watchdog_timer,
round_jiffies(jiffies + (2 * HZ)));
@@ -2787,6 +2839,9 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -2857,12 +2912,12 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_hdr = 0;
switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
+ case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
l4_hdr = ip_hdr(skb)->protocol;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
vlan_macip_lens |= skb_network_header_len(skb);
l4_hdr = ipv6_hdr(skb)->nexthdr;
break;
@@ -3060,7 +3115,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_ring->next_to_use = i;
/* notify HW of packet */
- writel(i, tx_ring->tail);
+ ixgbevf_write_tail(tx_ring, i);
return;
dma_error:
@@ -3165,7 +3220,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = ixgbevf_tso(tx_ring, first, &hdr_len);
if (tso < 0)
goto out_drop;
- else
+ else if (!tso)
ixgbevf_tx_csum(tx_ring, first);
ixgbevf_tx_map(tx_ring, first, hdr_len);
@@ -3274,7 +3329,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
return retval;
#endif
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
return 0;
}
@@ -3286,7 +3342,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
u32 err;
- pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/*
* pci_restore_state clears dev->state_saved so call
@@ -3299,6 +3354,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
return err;
}
+ smp_mb__before_clear_bit();
+ clear_bit(__IXGBEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
ixgbevf_reset(adapter);
@@ -3344,10 +3401,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
ring = adapter->rx_ring[i];
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
bytes = ring->stats.bytes;
packets = ring->stats.packets;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->rx_bytes += bytes;
stats->rx_packets += packets;
}
@@ -3355,10 +3412,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) {
ring = adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
bytes = ring->stats.bytes;
packets = ring->stats.packets;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->tx_bytes += bytes;
stats->tx_packets += packets;
}
@@ -3460,6 +3517,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
+ adapter->io_addr = hw->hw_addr;
if (!hw->hw_addr) {
err = -EIO;
goto err_ioremap;
@@ -3545,14 +3603,15 @@ err_register:
ixgbevf_clear_interrupt_scheme(adapter);
err_sw_init:
ixgbevf_reset_interrupt_capability(adapter);
- iounmap(hw->hw_addr);
+ iounmap(adapter->io_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_regions(pdev);
err_pci_reg:
err_dma:
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
return err;
}
@@ -3570,7 +3629,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- set_bit(__IXGBEVF_DOWN, &adapter->state);
+ set_bit(__IXGBEVF_REMOVING, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
@@ -3583,14 +3642,15 @@ static void ixgbevf_remove(struct pci_dev *pdev)
ixgbevf_clear_interrupt_scheme(adapter);
ixgbevf_reset_interrupt_capability(adapter);
- iounmap(adapter->hw.hw_addr);
+ iounmap(adapter->io_addr);
pci_release_regions(pdev);
hw_dbg(&adapter->hw, "Remove complete\n");
free_netdev(netdev);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
}
/**
@@ -3607,15 +3667,20 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
ixgbevf_down(adapter);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -3639,6 +3704,8 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+ smp_mb__before_clear_bit();
+ clear_bit(__IXGBEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
ixgbevf_reset(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index debd8c0e1f28..09dd8f698bea 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -70,16 +70,6 @@
#define IXGBE_VFGOTC_MSB 0x02024
#define IXGBE_VFMPRC 0x01034
-#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
-
-#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
-
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
- writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
-
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
- readl((a)->hw_addr + (reg) + ((offset) << 2)))
-
#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 7b1f502d1716..3061d1890471 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -172,6 +172,37 @@ struct ixgbevf_info {
const struct ixgbe_mac_operations *mac_ops;
};
+#define IXGBE_FAILED_READ_REG 0xffffffffU
+
+#define IXGBE_REMOVED(a) unlikely(!(a))
+
+static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+
+ if (IXGBE_REMOVED(reg_addr))
+ return;
+ writel(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v)
+
+u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg);
+#define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r)
+
+static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg,
+ u32 offset, u32 value)
+{
+ ixgbe_write_reg(hw, reg + (offset << 2), value);
+}
+#define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v)
+
+static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
+ u32 offset)
+{
+ return ixgbevf_read_reg(hw, reg + (offset << 2));
+}
+#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
+
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f5685c0d0579..b0c6050479eb 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2054,19 +2054,6 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
}
static int
-jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
-{
- if (unlikely(skb_shinfo(skb)->gso_size &&
- skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
- dev_kfree_skb(skb);
- return -1;
- }
-
- return 0;
-}
-
-static int
jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
{
*mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
@@ -2225,7 +2212,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
struct jme_adapter *jme = netdev_priv(netdev);
int idx;
- if (unlikely(jme_expand_header(jme, skb))) {
+ if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) {
+ dev_kfree_skb_any(skb);
++(NET_STAT(jme).tx_dropped);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a2565ce22b7c..b7b8d74c22d9 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -730,7 +730,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
unlikely(tag_bytes & ~12)) {
if (skb_checksum_help(skb) == 0)
goto no_csum;
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return 1;
}
@@ -819,7 +819,7 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
if (net_ratelimit())
netdev_err(dev, "tx queue full?!\n");
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index fd409d76b811..b161a525fc5b 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -167,11 +167,6 @@ out:
return ret;
}
-static int orion_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
{
struct orion_mdio_dev *dev = dev_id;
@@ -209,7 +204,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
bus->name = "orion_mdio_bus";
bus->read = orion_mdio_read;
bus->write = orion_mdio_write;
- bus->reset = orion_mdio_reset;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
dev_name(&pdev->dev));
bus->parent = &pdev->dev;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 8d76fca7fde7..d04b1c3c9b85 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -510,12 +510,12 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
cpu_stats = per_cpu_ptr(pp->stats, cpu);
do {
- start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
rx_packets = cpu_stats->rx_packets;
rx_bytes = cpu_stats->rx_bytes;
tx_packets = cpu_stats->tx_packets;
tx_bytes = cpu_stats->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -2761,7 +2761,6 @@ static int mvneta_probe(struct platform_device *pdev)
const char *mac_from;
int phy_mode;
int err;
- int cpu;
/* Our multiqueue support is not complete, so for now, only
* allow the usage of the first RX queue
@@ -2816,30 +2815,19 @@ static int mvneta_probe(struct platform_device *pdev)
clk_prepare_enable(pp->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -ENODEV;
- goto err_clk;
- }
-
pp->base = devm_ioremap_resource(&pdev->dev, res);
- if (pp->base == NULL) {
+ if (IS_ERR(pp->base)) {
err = PTR_ERR(pp->base);
goto err_clk;
}
/* Alloc per-cpu stats */
- pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
+ pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
if (!pp->stats) {
err = -ENOMEM;
goto err_clk;
}
- for_each_possible_cpu(cpu) {
- struct mvneta_pcpu_stats *stats;
- stats = per_cpu_ptr(pp->stats, cpu);
- u64_stats_init(&stats->syncp);
- }
-
dt_mac_addr = of_get_mac_address(dn);
if (dt_mac_addr) {
mac_from = "device tree";
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 597846193869..7f81ae66cc89 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2845,7 +2845,7 @@ mapping_unwind:
mapping_error:
if (net_ratelimit())
dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -3172,7 +3172,7 @@ static void skge_tx_done(struct net_device *dev)
pkts_compl++;
bytes_compl += e->skb->len;
- dev_kfree_skb(e->skb);
+ dev_consume_skb_any(e->skb);
}
}
netdev_completed_queue(dev, pkts_compl, bytes_compl);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 55a37ae11440..b81106451a0a 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -44,6 +44,8 @@
#include <linux/prefetch.h>
#include <linux/debugfs.h>
#include <linux/mii.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
#include <asm/irq.h>
@@ -2000,7 +2002,7 @@ mapping_unwind:
mapping_error:
if (net_ratelimit())
dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2733,6 +2735,9 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
unsigned int total_bytes[2] = { 0 };
unsigned int total_packets[2] = { 0 };
+ if (to_do <= 0)
+ return work_done;
+
rmb();
do {
struct sky2_port *sky2;
@@ -3906,19 +3911,19 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
u64 _bytes, _packets;
do {
- start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp);
_bytes = sky2->rx_stats.bytes;
_packets = sky2->rx_stats.packets;
- } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start));
stats->rx_packets = _packets;
stats->rx_bytes = _bytes;
do {
- start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp);
_bytes = sky2->tx_stats.bytes;
_packets = sky2->tx_stats.packets;
- } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start));
stats->tx_packets = _packets;
stats->tx_bytes = _bytes;
@@ -4748,6 +4753,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
{
struct sky2_port *sky2;
struct net_device *dev = alloc_etherdev(sizeof(*sky2));
+ const void *iap;
if (!dev)
return NULL;
@@ -4805,8 +4811,16 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
dev->features |= dev->hw_features;
- /* read the mac address */
- memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
+ /* try to get mac address in the following order:
+ * 1) from device tree data
+ * 2) from internal registers set by bootloader
+ */
+ iap = of_get_mac_address(hw->pdev->dev.of_node);
+ if (iap)
+ memcpy(dev->dev_addr, iap, ETH_ALEN);
+ else
+ memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
+ ETH_ALEN);
return dev;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 563495d8975a..1486ce902a56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -3,7 +3,7 @@
#
config MLX4_EN
- tristate "Mellanox Technologies 10Gbit Ethernet support"
+ tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
depends on PCI
select MLX4_CORE
select PTP_1588_CLOCK
@@ -23,6 +23,13 @@ config MLX4_EN_DCB
If unsure, set to Y
+config MLX4_EN_VXLAN
+ bool "VXLAN offloads Support"
+ default y
+ depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m)
+ ---help---
+ Say Y here if you want to use VXLAN offloads in the driver.
+
config MLX4_CORE
tristate
depends on PCI
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 0d02fba94536..78099eab7673 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -800,16 +800,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
}
-static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
- struct mlx4_vhcr *vhcr,
- struct mlx4_cmd_mailbox *inbox,
- struct mlx4_cmd_mailbox *outbox,
- struct mlx4_cmd_info *cmd)
-{
- return -EPERM;
-}
-
-static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave,
+static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
@@ -964,6 +955,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.wrapper = NULL
},
{
+ .opcode = MLX4_CMD_CONFIG_DEV,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_CMD_EPERM_wrapper
+ },
+ {
.opcode = MLX4_CMD_ALLOC_RES,
.has_inbox = false,
.has_outbox = false,
@@ -1258,7 +1258,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = MLX4_CMD_UPDATE_QP_wrapper
+ .wrapper = mlx4_CMD_EPERM_wrapper
},
{
.opcode = MLX4_CMD_GET_OP_REQ,
@@ -1267,7 +1267,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = MLX4_CMD_GET_OP_REQ_wrapper,
+ .wrapper = mlx4_CMD_EPERM_wrapper,
},
{
.opcode = MLX4_CMD_CONF_SPECIAL_QP,
@@ -1378,7 +1378,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper
+ .wrapper = mlx4_CMD_EPERM_wrapper
},
};
@@ -1643,8 +1643,16 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
int port, err;
struct mlx4_vport_state *vp_admin;
struct mlx4_vport_oper_state *vp_oper;
-
- for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
+ &priv->dev, slave);
+ int min_port = find_first_bit(actv_ports.ports,
+ priv->dev.caps.num_ports) + 1;
+ int max_port = min_port - 1 +
+ bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
+
+ for (port = min_port; port <= max_port; port++) {
+ if (!test_bit(port - 1, actv_ports.ports))
+ continue;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
vp_oper->state = *vp_admin;
@@ -1685,8 +1693,17 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
{
int port;
struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
+ &priv->dev, slave);
+ int min_port = find_first_bit(actv_ports.ports,
+ priv->dev.caps.num_ports) + 1;
+ int max_port = min_port - 1 +
+ bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
+
- for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+ for (port = min_port; port <= max_port; port++) {
+ if (!test_bit(port - 1, actv_ports.ports))
+ continue;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (NO_INDX != vp_oper->vlan_idx) {
__mlx4_unregister_vlan(&priv->dev,
@@ -2234,6 +2251,112 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
return vf+1;
}
+int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
+{
+ if (slave < 1 || slave > dev->num_vfs) {
+ mlx4_err(dev,
+ "Bad slave number:%d (number of activated slaves: %lu)\n",
+ slave, dev->num_slaves);
+ return -EINVAL;
+ }
+ return slave - 1;
+}
+
+struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_active_ports actv_ports;
+ int vf;
+
+ bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
+
+ if (slave == 0) {
+ bitmap_fill(actv_ports.ports, dev->caps.num_ports);
+ return actv_ports;
+ }
+
+ vf = mlx4_get_vf_indx(dev, slave);
+ if (vf < 0)
+ return actv_ports;
+
+ bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
+ min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
+ dev->caps.num_ports));
+
+ return actv_ports;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
+
+int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
+{
+ unsigned n;
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+ unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+
+ if (port <= 0 || port > m)
+ return -EINVAL;
+
+ n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
+ if (port <= n)
+ port = n + 1;
+
+ return port;
+}
+EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
+
+int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
+{
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+ if (test_bit(port - 1, actv_ports.ports))
+ return port -
+ find_first_bit(actv_ports.ports, dev->caps.num_ports);
+
+ return -1;
+}
+EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
+
+struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
+ int port)
+{
+ unsigned i;
+ struct mlx4_slaves_pport slaves_pport;
+
+ bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
+
+ if (port <= 0 || port > dev->caps.num_ports)
+ return slaves_pport;
+
+ for (i = 0; i < dev->num_vfs + 1; i++) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev, i);
+ if (test_bit(port - 1, actv_ports.ports))
+ set_bit(i, slaves_pport.slaves);
+ }
+
+ return slaves_pport;
+}
+EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
+
+struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
+ struct mlx4_dev *dev,
+ const struct mlx4_active_ports *crit_ports)
+{
+ unsigned i;
+ struct mlx4_slaves_pport slaves_pport;
+
+ bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
+
+ for (i = 0; i < dev->num_vfs + 1; i++) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev, i);
+ if (bitmap_equal(crit_ports->ports, actv_ports.ports,
+ dev->caps.num_ports))
+ set_bit(i, slaves_pport.slaves);
+ }
+
+ return slaves_pport;
+}
+EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
+
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2289,6 +2412,30 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
+ /* mlx4_get_slave_default_vlan -
+ * return true if VST ( default vlan)
+ * if VST, will return vlan & qos (if not NULL)
+ */
+bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
+ u16 *vlan, u8 *qos)
+{
+ struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_priv *priv;
+
+ priv = mlx4_priv(dev);
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+
+ if (MLX4_VGT != vp_oper->state.default_vlan) {
+ if (vlan)
+ *vlan = vp_oper->state.default_vlan;
+ if (qos)
+ *qos = vp_oper->state.default_qos;
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
+
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
{
struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index abaf6bb22416..57dda95b67d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -276,6 +276,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = mlx4_en_phc_adjfreq,
.adjtime = mlx4_en_phc_adjtime,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index b4881b686159..c95ca252187c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -62,7 +62,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
int has_ets_tc = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
+ if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) {
en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
i, ets->prio_tc[i]);
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index d357bf5a4686..0c59d4fe7e3a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -72,6 +72,12 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
" Per priority bit mask");
+MLX4_EN_PARM_INT(inline_thold, MAX_INLINE,
+ "Threshold for using inline data (range: 17-104, default: 104)");
+
+#define MAX_PFC_TX 0xff
+#define MAX_PFC_RX 0xff
+
int en_print(const char *level, const struct mlx4_en_priv *priv,
const char *format, ...)
{
@@ -140,6 +146,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
MLX4_EN_NUM_UP;
params->prof[i].rss_rings = 0;
+ params->prof[i].inline_thold = inline_thold;
}
return 0;
@@ -274,19 +281,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_init_timestamp(mdev);
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- if (!dev->caps.comp_pool) {
- mdev->profile.prof[i].rx_ring_num =
- rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
- min_t(int,
- dev->caps.num_comp_vectors,
- DEF_RX_RINGS)));
- } else {
- mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
- min_t(int, dev->caps.comp_pool/
- dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
- }
- }
+ /* Set default number of RX rings*/
+ mlx4_en_set_num_rx_rings(mdev);
/* Create our own workqueue for reset/multicast tasks
* Note: we cannot use the shared workqueue because of deadlocks caused
@@ -336,8 +332,31 @@ static struct mlx4_interface mlx4_en_interface = {
.protocol = MLX4_PROT_ETH,
};
+static void mlx4_en_verify_params(void)
+{
+ if (pfctx > MAX_PFC_TX) {
+ pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
+ pfctx, MAX_PFC_TX);
+ pfctx = 0;
+ }
+
+ if (pfcrx > MAX_PFC_RX) {
+ pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
+ pfcrx, MAX_PFC_RX);
+ pfcrx = 0;
+ }
+
+ if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) {
+ pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n",
+ inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE);
+ inline_thold = MAX_INLINE;
+ }
+}
+
static int __init mlx4_en_init(void)
{
+ mlx4_en_verify_params();
+
return mlx4_register_interface(&mlx4_en_interface);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 84a96f70dfb5..f085c2df5e69 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -39,6 +39,7 @@
#include <linux/hash.h>
#include <net/ip.h>
#include <net/busy_poll.h>
+#include <net/vxlan.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -603,7 +604,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
int err = 0;
u64 reg_id;
int *qpn = &priv->base_qpn;
- u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
priv->dev->dev_addr);
@@ -672,7 +673,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
u64 mac;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+ mac = mlx4_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
@@ -685,7 +686,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
- mac = mlx4_en_mac_to_u64(entry->mac);
+ mac = mlx4_mac_to_u64(entry->mac);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
@@ -715,14 +716,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err = 0;
- u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
+ u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct hlist_head *bucket;
unsigned int mac_hash;
struct mlx4_mac_entry *entry;
struct hlist_node *tmp;
- u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
+ u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
@@ -759,18 +760,6 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
}
-u64 mlx4_en_mac_to_u64(u8 *addr)
-{
- u64 mac = 0;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++) {
- mac <<= 8;
- mac |= addr[i];
- }
- return mac;
-}
-
static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
{
int err = 0;
@@ -1089,7 +1078,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
list_for_each_entry(mclist, &priv->mc_list, list) {
- mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
+ mcast_addr = mlx4_mac_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -1181,7 +1170,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
found = true;
if (!found) {
- mac = mlx4_en_mac_to_u64(entry->mac);
+ mac = mlx4_mac_to_u64(entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
priv->base_qpn,
entry->reg_id);
@@ -1224,7 +1213,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
- mac = mlx4_en_mac_to_u64(ha->addr);
+ mac = mlx4_mac_to_u64(ha->addr);
memcpy(entry->mac, ha->addr, ETH_ALEN);
err = mlx4_register_mac(mdev->dev, priv->port, mac);
if (err < 0) {
@@ -1677,7 +1666,7 @@ int mlx4_en_start_port(struct net_device *dev)
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
if (err) {
en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
err);
@@ -1709,6 +1698,10 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
+#ifdef CONFIG_MLX4_EN_VXLAN
+ if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
+ vxlan_get_rx_port(dev);
+#endif
priv->port_up = true;
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -2216,7 +2209,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
- u64 mac_u64 = mlx4_en_mac_to_u64(mac);
+ u64 mac_u64 = mlx4_mac_to_u64(mac);
if (!is_valid_ether_addr(mac))
return -EINVAL;
@@ -2276,6 +2269,83 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
return 0;
}
+#ifdef CONFIG_MLX4_EN_VXLAN
+static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
+{
+ int ret;
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ vxlan_add_task);
+
+ ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
+ if (ret)
+ goto out;
+
+ ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+ VXLAN_STEER_BY_OUTER_MAC, 1);
+out:
+ if (ret)
+ en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
+}
+
+static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+{
+ int ret;
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ vxlan_del_task);
+
+ ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+ VXLAN_STEER_BY_OUTER_MAC, 0);
+ if (ret)
+ en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
+
+ priv->vxlan_port = 0;
+}
+
+static void mlx4_en_add_vxlan_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ __be16 current_port;
+
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
+ return;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ current_port = priv->vxlan_port;
+ if (current_port && current_port != port) {
+ en_warn(priv, "vxlan port %d configured, can't add port %d\n",
+ ntohs(current_port), ntohs(port));
+ return;
+ }
+
+ priv->vxlan_port = port;
+ queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
+}
+
+static void mlx4_en_del_vxlan_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ __be16 current_port;
+
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ return;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ current_port = priv->vxlan_port;
+ if (current_port != port) {
+ en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
+ return;
+ }
+
+ queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
+}
+#endif
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -2302,6 +2372,10 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
+#ifdef CONFIG_MLX4_EN_VXLAN
+ .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
+ .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
+#endif
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2351,7 +2425,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
- dev->dev_id = port - 1;
+ dev->dev_port = port - 1;
/*
* Initialize driver private data
@@ -2393,6 +2467,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
+#ifdef CONFIG_MLX4_EN_VXLAN
+ INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
+ INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
+#endif
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
@@ -2417,7 +2495,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mlx4_is_slave(priv->mdev->dev)) {
eth_hw_addr_random(dev);
en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
- mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr);
+ mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
mdev->dev->caps.def_mac[priv->port] = mac_u64;
} else {
en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
@@ -2526,7 +2604,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
if (err) {
en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index dae1a1f4ae55..c2cfb05e7290 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -148,10 +148,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->tx_packets = 0;
stats->tx_bytes = 0;
priv->port_stats.tx_chksum_offload = 0;
+ priv->port_stats.queue_stopped = 0;
+ priv->port_stats.wake_queue = 0;
+
for (i = 0; i < priv->tx_ring_num; i++) {
stats->tx_packets += priv->tx_ring[i]->packets;
stats->tx_bytes += priv->tx_ring[i]->bytes;
priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
+ priv->port_stats.queue_stopped +=
+ priv->tx_ring[i]->queue_stopped;
+ priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
}
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 890922c1c8ee..ba049ae88749 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -318,6 +318,31 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
}
}
+void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
+{
+ int i;
+ int num_of_eqs;
+ int num_rx_rings;
+ struct mlx4_dev *dev = mdev->dev;
+
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ if (!dev->caps.comp_pool)
+ num_of_eqs = max_t(int, MIN_RX_RINGS,
+ min_t(int,
+ dev->caps.num_comp_vectors,
+ DEF_RX_RINGS));
+ else
+ num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
+ dev->caps.comp_pool/
+ dev->caps.num_ports) - 1;
+
+ num_rx_rings = min_t(int, num_of_eqs,
+ netif_get_num_default_rss_queues());
+ mdev->profile.prof[i].rx_ring_num =
+ rounddown_pow_of_two(num_rx_rings);
+ }
+}
+
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride, int node)
@@ -636,6 +661,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (!priv->port_up)
return 0;
+ if (budget <= 0)
+ return polled;
+
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index c11d063473e5..03e5f6ac67e7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -129,8 +129,10 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
- /* The device currently only supports 10G speed */
- if (priv->port_state.link_speed != SPEED_10000)
+ /* The device supports 1G, 10G and 40G speeds */
+ if (priv->port_state.link_speed != 1000 &&
+ priv->port_state.link_speed != 10000 &&
+ priv->port_state.link_speed != 40000)
return priv->port_state.link_speed;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 13457032d15f..dd1f6d346459 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -44,16 +44,6 @@
#include "mlx4_en.h"
-enum {
- MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
- MAX_BF = 256,
-};
-
-static int inline_thold __read_mostly = MAX_INLINE;
-
-module_param_named(inline_thold, inline_thold, int, 0444);
-MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
-
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, int qpn, u32 size,
u16 stride, int node, int queue_index)
@@ -75,8 +65,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
-
- inline_thold = min(inline_thold, MAX_INLINE);
+ ring->inline_thold = priv->prof->inline_thold;
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = vmalloc_node(tmp, node);
@@ -325,7 +314,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
}
}
}
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return tx_info->nr_txbb;
}
@@ -456,7 +445,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
*/
if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
netif_tx_wake_queue(ring->tx_queue);
- priv->port_stats.wake_queue++;
+ ring->wake_queue++;
}
return done;
}
@@ -520,7 +509,7 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
return ring->buf + index * TXBB_SIZE;
}
-static int is_inline(struct sk_buff *skb, void **pfrag)
+static int is_inline(int inline_thold, struct sk_buff *skb, void **pfrag)
{
void *ptr;
@@ -580,7 +569,7 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
}
} else {
*lso_header_size = 0;
- if (!is_inline(skb, NULL))
+ if (!is_inline(priv->prof->inline_thold, skb, NULL))
real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
else
real_size = inline_size(skb);
@@ -596,7 +585,13 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
if (skb->len <= spc) {
- inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+ if (likely(skb->len >= MIN_PKT_LEN)) {
+ inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+ } else {
+ inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
+ memset(((void *)(inl + 1)) + skb->len, 0,
+ MIN_PKT_LEN - skb->len);
+ }
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
@@ -696,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
/* every full Tx ring stops queue */
netif_tx_stop_queue(ring->tx_queue);
- priv->port_stats.queue_stopped++;
+ ring->queue_stopped++;
/* If queue was emptied after the if, and before the
* stop_queue - need to wake the queue, or else it will remain
@@ -709,7 +704,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(((int)(ring->prod - ring->cons)) <=
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
netif_tx_wake_queue(ring->tx_queue);
- priv->port_stats.wake_queue++;
+ ring->wake_queue++;
} else {
return NETDEV_TX_BUSY;
}
@@ -747,11 +742,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->data_offset = (void *)data - (void *)tx_desc;
tx_info->linear = (lso_header_size < skb_headlen(skb) &&
- !is_inline(skb, NULL)) ? 1 : 0;
+ !is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0;
data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
- if (is_inline(skb, &fragptr)) {
+ if (is_inline(ring->inline_thold, skb, &fragptr)) {
tx_info->inl = 1;
} else {
/* Map fragments */
@@ -881,7 +876,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
- *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
+ tx_desc->ctrl.bf_qpn |= cpu_to_be32(ring->doorbell_qpn);
+
op_own |= htonl((bf_index & 0xffff) << 8);
/* Ensure new descirptor hits memory
* before setting ownership of this descriptor to HW */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 8992b38578d5..d501a2b0fb79 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -271,7 +271,10 @@ enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave,
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
- if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) {
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+
+ if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
+ port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
pr_err("%s: Error: asking for slave:%d, port:%d\n",
__func__, slave, port);
return SLAVE_PORT_DOWN;
@@ -285,8 +288,10 @@ static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
- if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
+ if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
+ port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
pr_err("%s: Error: asking for slave:%d, port:%d\n",
__func__, slave, port);
return -1;
@@ -300,9 +305,13 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
{
int i;
enum slave_port_gen_event gen_event;
+ struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
+ port);
- for (i = 0; i < dev->num_slaves; i++)
- set_and_calc_slave_port_state(dev, i, port, event, &gen_event);
+ for (i = 0; i < dev->num_vfs + 1; i++)
+ if (test_bit(i, slaves_pport.slaves))
+ set_and_calc_slave_port_state(dev, i, port,
+ event, &gen_event);
}
/**************************************************************************
The function get as input the new event to that port,
@@ -321,12 +330,14 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
struct mlx4_slave_state *ctx = NULL;
unsigned long flags;
int ret = -1;
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
enum slave_port_state cur_state =
mlx4_get_slave_port_state(dev, slave, port);
*gen_event = SLAVE_PORT_GEN_EVENT_NONE;
- if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
+ if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
+ port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
pr_err("%s: Error: asking for slave:%d, port:%d\n",
__func__, slave, port);
return ret;
@@ -542,15 +553,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
be64_to_cpu(eqe->event.cmd.out_param));
break;
- case MLX4_EVENT_TYPE_PORT_CHANGE:
+ case MLX4_EVENT_TYPE_PORT_CHANGE: {
+ struct mlx4_slaves_pport slaves_port;
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
+ slaves_port = mlx4_phys_to_slaves_pport(dev, port);
if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
if (!mlx4_is_master(dev))
break;
- for (i = 0; i < dev->num_slaves; i++) {
+ for (i = 0; i < dev->num_vfs + 1; i++) {
+ if (!test_bit(i, slaves_port.slaves))
+ continue;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
if (i == mlx4_master_func_num(dev))
continue;
@@ -558,8 +573,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
" to slave: %d, port:%d\n",
__func__, i, port);
s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
- if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
+ if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
+ eqe->event.port_change.port =
+ cpu_to_be32(
+ (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
+ | (mlx4_phys_to_slave_port(dev, i, port) << 28));
mlx4_slave_event(dev, i, eqe);
+ }
} else { /* IB port */
set_and_calc_slave_port_state(dev, i, port,
MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
@@ -580,12 +600,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (!mlx4_is_master(dev))
break;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
- for (i = 0; i < dev->num_slaves; i++) {
+ for (i = 0; i < dev->num_vfs + 1; i++) {
+ if (!test_bit(i, slaves_port.slaves))
+ continue;
if (i == mlx4_master_func_num(dev))
continue;
s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
- if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
+ if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
+ eqe->event.port_change.port =
+ cpu_to_be32(
+ (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
+ | (mlx4_phys_to_slave_port(dev, i, port) << 28));
mlx4_slave_event(dev, i, eqe);
+ }
}
else /* IB port */
/* port-up event will be sent to a slave when the
@@ -594,6 +621,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
}
break;
+ }
case MLX4_EVENT_TYPE_CQ_ERROR:
mlx4_warn(dev, "CQ %s on CQN %06x\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 7e2995ecea6f..d16a4d118903 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -225,13 +225,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
if (vhcr->op_modifier == 1) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev, slave);
+ int converted_port = mlx4_slave_convert_port(
+ dev, slave, vhcr->in_modifier);
+
+ if (converted_port < 0)
+ return -EINVAL;
+
+ vhcr->in_modifier = converted_port;
/* Set nic_info bit to mark new fields support */
field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
- field = vhcr->in_modifier; /* phys-port = logical-port */
+ /* phys-port = logical-port */
+ field = vhcr->in_modifier -
+ find_first_bit(actv_ports.ports, dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+ field = vhcr->in_modifier;
/* size is now the QP number */
size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
@@ -249,12 +261,16 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
QUERY_FUNC_CAP_PHYS_PORT_ID);
} else if (vhcr->op_modifier == 0) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev, slave);
/* enable rdma and ethernet interfaces, and new quota locations */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
- field = dev->caps.num_ports;
+ field = min(
+ bitmap_weight(actv_ports.ports, dev->caps.num_ports),
+ dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
size = dev->caps.function_caps; /* set PF behaviours */
@@ -840,6 +856,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
int err = 0;
u8 field;
u32 bmme_flags;
+ int real_port;
+ int slave_port;
+ int first_port;
+ struct mlx4_active_ports actv_ports;
err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -852,8 +872,26 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
+ actv_ports = mlx4_get_active_ports(dev, slave);
+ first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
+ for (slave_port = 0, real_port = first_port;
+ real_port < first_port +
+ bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+ ++real_port, ++slave_port) {
+ if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
+ flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
+ else
+ flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
+ }
+ for (; slave_port < dev->caps.num_ports; ++slave_port)
+ flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
+ field &= ~0x0F;
+ field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
+
/* For guests, disable timestamp */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
field &= 0x7f;
@@ -903,12 +941,20 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
u16 short_field;
int err;
int admin_link_state;
+ int port = mlx4_slave_convert_port(dev, slave,
+ vhcr->in_modifier & 0xFF);
#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
#define MLX4_PORT_LINK_UP_MASK 0x80
#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
+ if (port < 0)
+ return -EINVAL;
+
+ vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
+ (port & 0xFF);
+
err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
@@ -935,7 +981,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, port_type,
QUERY_PORT_SUPPORTED_TYPE_OFFSET);
- short_field = 1; /* slave max gids */
+ if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
+ short_field = mlx4_get_slave_num_gids(dev, slave, port);
+ else
+ short_field = 1; /* slave max gids */
MLX4_PUT(outbox->buf, short_field,
QUERY_PORT_CUR_MAX_GID_OFFSET);
@@ -1585,9 +1634,12 @@ int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int port = vhcr->in_modifier;
+ int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
int err;
+ if (port < 0)
+ return -EINVAL;
+
if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
return 0;
@@ -1677,9 +1729,12 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int port = vhcr->in_modifier;
+ int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
int err;
+ if (port < 0)
+ return -EINVAL;
+
if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
(1 << port)))
return 0;
@@ -1724,6 +1779,46 @@ int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
MLX4_CMD_NATIVE);
}
+struct mlx4_config_dev {
+ __be32 update_flags;
+ __be32 rsdv1[3];
+ __be16 vxlan_udp_dport;
+ __be16 rsvd2;
+};
+
+#define MLX4_VXLAN_UDP_DPORT (1 << 0)
+
+static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
+{
+ int err;
+ struct mlx4_cmd_mailbox *mailbox;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
+
+ err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
+{
+ struct mlx4_config_dev config_dev;
+
+ memset(&config_dev, 0, sizeof(config_dev));
+ config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
+ config_dev.vxlan_udp_dport = udp_port;
+
+ return mlx4_CONFIG_DEV(dev, &config_dev);
+}
+EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
+
+
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
{
int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
@@ -1891,7 +1986,8 @@ void mlx4_opreq_action(struct work_struct *work)
err = EINVAL;
break;
}
- err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
+ err = mlx4_cmd(dev, 0, ((u32) err |
+ (__force u32)cpu_to_be32(token) << 16),
1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d413e60071d4..f0ae95f66ceb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -41,7 +41,6 @@
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
#include <linux/kmod.h>
#include <linux/mlx4/device.h>
@@ -78,13 +77,17 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
-static int num_vfs;
-module_param(num_vfs, int, 0444);
-MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
+static uint8_t num_vfs[3] = {0, 0, 0};
+static int num_vfs_argc = 3;
+module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
+MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
+ "num_vfs=port1,port2,port1+2");
-static int probe_vf;
-module_param(probe_vf, int, 0644);
-MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
+static uint8_t probe_vf[3] = {0, 0, 0};
+static int probe_vfs_argc = 3;
+module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
+ "probe_vf=port1,port2,port1+2");
int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
module_param_named(log_num_mgm_entry_size,
@@ -1470,7 +1473,11 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
int i;
for (i = 1; i <= dev->caps.num_ports; i++) {
- dev->caps.gid_table_len[i] = 1;
+ if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
+ dev->caps.gid_table_len[i] =
+ mlx4_get_slave_num_gids(dev, 0, i);
+ else
+ dev->caps.gid_table_len[i] = 1;
dev->caps.pkey_table_len[i] =
dev->phys_caps.pkey_phys_table_len[i] - 1;
}
@@ -1495,7 +1502,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
if (mlx4_log_num_mgm_entry_size == -1 &&
dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
(!mlx4_is_mfunc(dev) ||
- (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) &&
+ (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
dev->oper_log_mgm_entry_size =
@@ -1981,9 +1988,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int nreq = min_t(int, dev->caps.num_ports *
- min_t(int, netif_get_num_default_rss_queues() + 1,
+ min_t(int, num_online_cpus() + 1,
MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
- int err;
int i;
if (msi_x) {
@@ -1997,23 +2003,13 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
for (i = 0; i < nreq; ++i)
entries[i].entry = i;
- retry:
- err = pci_enable_msix(dev->pdev, entries, nreq);
- if (err) {
- /* Try again if at least 2 vectors are available */
- if (err > 1) {
- mlx4_info(dev, "Requested %d vectors, "
- "but only %d MSI-X vectors available, "
- "trying again\n", nreq, err);
- nreq = err;
- goto retry;
- }
+ nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
+
+ if (nreq < 0) {
kfree(entries);
goto no_msi;
- }
-
- if (nreq <
- MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
+ } else if (nreq < MSIX_LEGACY_SZ +
+ dev->caps.num_ports * MIN_MSIX_P_PORT) {
/*Working in legacy mode , all EQ's shared*/
dev->caps.comp_pool = 0;
dev->caps.num_comp_vectors = nreq - 1;
@@ -2201,6 +2197,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
struct mlx4_dev *dev;
int err;
int port;
+ int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
+ {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
+ unsigned total_vfs = 0;
+ int sriov_initialized = 0;
+ unsigned int i;
pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
@@ -2215,17 +2218,40 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
* per port, we must limit the number of VFs to 63 (since their are
* 128 MACs)
*/
- if (num_vfs >= MLX4_MAX_NUM_VF) {
+ for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
+ total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
+ nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
+ if (nvfs[i] < 0) {
+ dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
+ return -EINVAL;
+ }
+ }
+ for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
+ i++) {
+ prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
+ if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
+ dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
+ return -EINVAL;
+ }
+ }
+ if (total_vfs >= MLX4_MAX_NUM_VF) {
dev_err(&pdev->dev,
"Requested more VF's (%d) than allowed (%d)\n",
- num_vfs, MLX4_MAX_NUM_VF - 1);
+ total_vfs, MLX4_MAX_NUM_VF - 1);
return -EINVAL;
}
- if (num_vfs < 0) {
- pr_err("num_vfs module parameter cannot be negative\n");
- return -EINVAL;
+ for (i = 0; i < MLX4_MAX_PORTS; i++) {
+ if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
+ dev_err(&pdev->dev,
+ "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
+ nvfs[i] + nvfs[2], i + 1,
+ MLX4_MAX_NUM_VF_P_PORT - 1);
+ return -EINVAL;
+ }
}
+
+
/*
* Check for BARs.
*/
@@ -2300,11 +2326,23 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
/* When acting as pf, we normally skip vfs unless explicitly
* requested to probe them. */
- if (num_vfs && extended_func_num(pdev) > probe_vf) {
- mlx4_warn(dev, "Skipping virtual function:%d\n",
- extended_func_num(pdev));
- err = -ENODEV;
- goto err_free_dev;
+ if (total_vfs) {
+ unsigned vfs_offset = 0;
+ for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
+ vfs_offset + nvfs[i] < extended_func_num(pdev);
+ vfs_offset += nvfs[i], i++)
+ ;
+ if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
+ err = -ENODEV;
+ goto err_free_dev;
+ }
+ if ((extended_func_num(pdev) - vfs_offset)
+ > prb_vf[i]) {
+ mlx4_warn(dev, "Skipping virtual function:%d\n",
+ extended_func_num(pdev));
+ err = -ENODEV;
+ goto err_free_dev;
+ }
}
mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
dev->flags |= MLX4_FLAG_SLAVE;
@@ -2324,22 +2362,30 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
}
}
- if (num_vfs) {
- mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
-
- atomic_inc(&pf_loading);
- err = pci_enable_sriov(pdev, num_vfs);
- atomic_dec(&pf_loading);
-
- if (err) {
- mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
- err);
+ if (total_vfs) {
+ mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
+ total_vfs);
+ dev->dev_vfs = kzalloc(
+ total_vfs * sizeof(*dev->dev_vfs),
+ GFP_KERNEL);
+ if (NULL == dev->dev_vfs) {
+ mlx4_err(dev, "Failed to allocate memory for VFs\n");
err = 0;
} else {
- mlx4_warn(dev, "Running in master mode\n");
- dev->flags |= MLX4_FLAG_SRIOV |
- MLX4_FLAG_MASTER;
- dev->num_vfs = num_vfs;
+ atomic_inc(&pf_loading);
+ err = pci_enable_sriov(pdev, total_vfs);
+ atomic_dec(&pf_loading);
+ if (err) {
+ mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
+ err);
+ err = 0;
+ } else {
+ mlx4_warn(dev, "Running in master mode\n");
+ dev->flags |= MLX4_FLAG_SRIOV |
+ MLX4_FLAG_MASTER;
+ dev->num_vfs = total_vfs;
+ sriov_initialized = 1;
+ }
}
}
@@ -2404,12 +2450,37 @@ slave_start:
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
+ unsigned sum = 0;
err = mlx4_multi_func_init(dev);
if (err) {
mlx4_err(dev, "Failed to init master mfunc"
"interface, aborting.\n");
goto err_close;
}
+ if (sriov_initialized) {
+ int ib_ports = 0;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_ports++;
+
+ if (ib_ports &&
+ (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
+ mlx4_err(dev,
+ "Invalid syntax of num_vfs/probe_vfs "
+ "with IB port. Single port VFs syntax"
+ " is only supported when all ports "
+ "are configured as ethernet\n");
+ goto err_close;
+ }
+ for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
+ unsigned j;
+ for (j = 0; j < nvfs[i]; ++sum, ++j) {
+ dev->dev_vfs[sum].min_port =
+ i < 2 ? i + 1 : 1;
+ dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
+ dev->caps.num_ports;
+ }
+ }
+ }
}
err = mlx4_alloc_eq_table(dev);
@@ -2517,6 +2588,8 @@ err_rel_own:
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
+ kfree(priv->dev.dev_vfs);
+
err_free_dev:
kfree(priv);
@@ -2603,6 +2676,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
kfree(dev->caps.qp0_proxy);
kfree(dev->caps.qp1_tunnel);
kfree(dev->caps.qp1_proxy);
+ kfree(dev->dev_vfs);
kfree(priv);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index db7dc0b6667d..80ccb4edf825 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1387,9 +1387,12 @@ int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
u32 qpn = (u32) vhcr->in_param & 0xffffffff;
- u8 port = vhcr->in_param >> 62;
+ int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62);
enum mlx4_steer_type steer = vhcr->in_modifier;
+ if (port < 0)
+ return -EINVAL;
+
/* Promiscuous unicast is not allowed in mfunc */
if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 7aec6c833973..cf8be41abb36 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -788,6 +788,10 @@ enum {
MLX4_USE_RR = 1,
};
+struct mlx4_roce_gid_entry {
+ u8 raw[16];
+};
+
struct mlx4_priv {
struct mlx4_dev dev;
@@ -834,6 +838,7 @@ struct mlx4_priv {
int fs_hash_mode;
u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
__be64 slave_node_guids[MLX4_MFUNC_MAX];
+ struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
atomic_t opreq_count;
struct work_struct opreq_task;
@@ -1242,11 +1247,6 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
-int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
- struct mlx4_vhcr *vhcr,
- struct mlx4_cmd_mailbox *inbox,
- struct mlx4_cmd_mailbox *outbox,
- struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
@@ -1282,4 +1282,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
void mlx4_init_quotas(struct mlx4_dev *dev);
+int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
+/* Returns the VF index of slave */
+int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
+
#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index b57e8c87a34e..7a733c287744 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -187,6 +187,13 @@ enum {
#define GET_AVG_PERF_COUNTER(cnt) (0)
#endif /* MLX4_EN_PERF_STAT */
+/* Constants for TX flow */
+enum {
+ MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
+ MAX_BF = 256,
+ MIN_PKT_LEN = 17,
+};
+
/*
* Configurables
*/
@@ -267,10 +274,13 @@ struct mlx4_en_tx_ring {
unsigned long bytes;
unsigned long packets;
unsigned long tx_csum;
+ unsigned long queue_stopped;
+ unsigned long wake_queue;
struct mlx4_bf bf;
bool bf_enabled;
struct netdev_queue *tx_queue;
int hwtstamp_tx_type;
+ int inline_thold;
};
struct mlx4_en_rx_desc {
@@ -346,6 +356,7 @@ struct mlx4_en_port_profile {
u8 tx_pause;
u8 tx_ppp;
int rss_rings;
+ int inline_thold;
};
struct mlx4_en_profile {
@@ -548,6 +559,10 @@ struct mlx4_en_priv {
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
+#ifdef CONFIG_MLX4_EN_VXLAN
+ struct work_struct vxlan_add_task;
+ struct work_struct vxlan_del_task;
+#endif
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
@@ -574,6 +589,7 @@ struct mlx4_en_priv {
struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
#endif
u64 tunnel_reg_id;
+ __be16 vxlan_port;
};
enum mlx4_en_wol {
@@ -737,7 +753,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
int cq, int user_prio);
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring);
-
+void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride, int node);
@@ -786,7 +802,6 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
#define MLX4_EN_NUM_SELF_TEST 5
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
-u64 mlx4_en_mac_to_u64(u8 *addr);
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index a58bcbf1b806..cfcad26ed40f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -505,6 +505,84 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
mlx4_free_cmd_mailbox(dev, outmailbox);
return err;
}
+static struct mlx4_roce_gid_entry zgid_entry;
+
+int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
+{
+ int vfs;
+ int slave_gid = slave;
+ unsigned i;
+ struct mlx4_slaves_pport slaves_pport;
+ struct mlx4_active_ports actv_ports;
+ unsigned max_port_p_one;
+
+ if (slave == 0)
+ return MLX4_ROCE_PF_GIDS;
+
+ /* Slave is a VF */
+ slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
+ actv_ports = mlx4_get_active_ports(dev, slave);
+ max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
+ bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
+
+ for (i = 1; i < max_port_p_one; i++) {
+ struct mlx4_active_ports exclusive_ports;
+ struct mlx4_slaves_pport slaves_pport_actv;
+ bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
+ set_bit(i - 1, exclusive_ports.ports);
+ if (i == port)
+ continue;
+ slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
+ dev, &exclusive_ports);
+ slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
+ dev->num_vfs + 1);
+ }
+ vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+ if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
+ return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
+ return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
+}
+
+int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
+{
+ int gids;
+ unsigned i;
+ int slave_gid = slave;
+ int vfs;
+
+ struct mlx4_slaves_pport slaves_pport;
+ struct mlx4_active_ports actv_ports;
+ unsigned max_port_p_one;
+
+ if (slave == 0)
+ return 0;
+
+ slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
+ actv_ports = mlx4_get_active_ports(dev, slave);
+ max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
+ bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
+
+ for (i = 1; i < max_port_p_one; i++) {
+ struct mlx4_active_ports exclusive_ports;
+ struct mlx4_slaves_pport slaves_pport_actv;
+ bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
+ set_bit(i - 1, exclusive_ports.ports);
+ if (i == port)
+ continue;
+ slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
+ dev, &exclusive_ports);
+ slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
+ dev->num_vfs + 1);
+ }
+ gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
+ vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+ if (slave_gid <= gids % vfs)
+ return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
+
+ return MLX4_ROCE_PF_GIDS + (gids % vfs) +
+ ((gids / vfs) * (slave_gid - 1));
+}
+EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
u8 op_mod, struct mlx4_cmd_mailbox *inbox)
@@ -515,14 +593,18 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
struct mlx4_slave_state *slave_st = &master->slave_state[slave];
struct mlx4_set_port_rqp_calc_context *qpn_context;
struct mlx4_set_port_general_context *gen_context;
+ struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
int reset_qkey_viols;
int port;
int is_eth;
+ int num_gids;
+ int base;
u32 in_modifier;
u32 promisc;
u16 mtu, prev_mtu;
int err;
- int i;
+ int i, j;
+ int offset;
__be32 agg_cap_mask;
__be32 slave_cap_mask;
__be32 new_cap_mask;
@@ -535,7 +617,8 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
/* Slaves cannot perform SET_PORT operations except changing MTU */
if (is_eth) {
if (slave != dev->caps.function &&
- in_modifier != MLX4_SET_PORT_GENERAL) {
+ in_modifier != MLX4_SET_PORT_GENERAL &&
+ in_modifier != MLX4_SET_PORT_GID_TABLE) {
mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
slave);
return -EINVAL;
@@ -581,6 +664,67 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
break;
+ case MLX4_SET_PORT_GID_TABLE:
+ /* change to MULTIPLE entries: number of guest's gids
+ * need a FOR-loop here over number of gids the guest has.
+ * 1. Check no duplicates in gids passed by slave
+ */
+ num_gids = mlx4_get_slave_num_gids(dev, slave, port);
+ base = mlx4_get_base_gid_ix(dev, slave, port);
+ gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
+ for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
+ if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
+ sizeof(zgid_entry)))
+ continue;
+ gid_entry_mb1 = gid_entry_mbox + 1;
+ for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
+ if (!memcmp(gid_entry_mb1->raw,
+ zgid_entry.raw, sizeof(zgid_entry)))
+ continue;
+ if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
+ sizeof(gid_entry_mbox->raw))) {
+ /* found duplicate */
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* 2. Check that do not have duplicates in OTHER
+ * entries in the port GID table
+ */
+ for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
+ if (i >= base && i < base + num_gids)
+ continue; /* don't compare to slave's current gids */
+ gid_entry_tbl = &priv->roce_gids[port - 1][i];
+ if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
+ continue;
+ gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
+ for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
+ if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
+ sizeof(zgid_entry)))
+ continue;
+ if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
+ sizeof(gid_entry_tbl->raw))) {
+ /* found duplicate */
+ mlx4_warn(dev, "requested gid entry for slave:%d "
+ "is a duplicate of gid at index %d\n",
+ slave, i);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* insert slave GIDs with memcpy, starting at slave's base index */
+ gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
+ for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
+ memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16);
+
+ /* Now, copy roce port gids table to current mailbox for passing to FW */
+ gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
+ for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
+ memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16);
+
+ break;
}
return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
@@ -646,6 +790,15 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
+ int port = mlx4_slave_convert_port(
+ dev, slave, vhcr->in_modifier & 0xFF);
+
+ if (port < 0)
+ return -EINVAL;
+
+ vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
+ (port & 0xFF);
+
return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
vhcr->op_modifier, inbox);
}
@@ -835,7 +988,7 @@ struct mlx4_set_port_vxlan_context {
u8 steering;
};
-int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
+int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
{
int err;
u32 in_mod;
@@ -849,7 +1002,8 @@ int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
memset(context, 0, sizeof(*context));
context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
- context->enable_flags = VXLAN_ENABLE;
+ if (enable)
+ context->enable_flags = VXLAN_ENABLE;
context->steering = steering;
in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
@@ -927,3 +1081,108 @@ void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
*stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
}
EXPORT_SYMBOL(mlx4_set_stats_bitmap);
+
+int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
+ int *slave_id)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i, found_ix = -1;
+ int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
+ struct mlx4_slaves_pport slaves_pport;
+ unsigned num_vfs;
+ int slave_gid;
+
+ if (!mlx4_is_mfunc(dev))
+ return -EINVAL;
+
+ slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
+ num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+
+ for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
+ if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) {
+ found_ix = i;
+ break;
+ }
+ }
+
+ if (found_ix >= 0) {
+ if (found_ix < MLX4_ROCE_PF_GIDS)
+ slave_gid = 0;
+ else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
+ (vf_gids / num_vfs + 1))
+ slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
+ (vf_gids / num_vfs + 1)) + 1;
+ else
+ slave_gid =
+ ((found_ix - MLX4_ROCE_PF_GIDS -
+ ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
+ (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
+
+ if (slave_gid) {
+ struct mlx4_active_ports exclusive_ports;
+ struct mlx4_active_ports actv_ports;
+ struct mlx4_slaves_pport slaves_pport_actv;
+ unsigned max_port_p_one;
+ int num_slaves_before = 1;
+
+ for (i = 1; i < port; i++) {
+ bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
+ set_bit(i, exclusive_ports.ports);
+ slaves_pport_actv =
+ mlx4_phys_to_slaves_pport_actv(
+ dev, &exclusive_ports);
+ num_slaves_before += bitmap_weight(
+ slaves_pport_actv.slaves,
+ dev->num_vfs + 1);
+ }
+
+ if (slave_gid < num_slaves_before) {
+ bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
+ set_bit(port - 1, exclusive_ports.ports);
+ slaves_pport_actv =
+ mlx4_phys_to_slaves_pport_actv(
+ dev, &exclusive_ports);
+ slave_gid += bitmap_weight(
+ slaves_pport_actv.slaves,
+ dev->num_vfs + 1) -
+ num_slaves_before;
+ }
+ actv_ports = mlx4_get_active_ports(dev, slave_gid);
+ max_port_p_one = find_first_bit(
+ actv_ports.ports, dev->caps.num_ports) +
+ bitmap_weight(actv_ports.ports,
+ dev->caps.num_ports) + 1;
+
+ for (i = 1; i < max_port_p_one; i++) {
+ if (i == port)
+ continue;
+ bitmap_zero(exclusive_ports.ports,
+ dev->caps.num_ports);
+ set_bit(i - 1, exclusive_ports.ports);
+ slaves_pport_actv =
+ mlx4_phys_to_slaves_pport_actv(
+ dev, &exclusive_ports);
+ slave_gid += bitmap_weight(
+ slaves_pport_actv.slaves,
+ dev->num_vfs + 1);
+ }
+ }
+ *slave_id = slave_gid;
+ }
+
+ return (found_ix >= 0) ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
+
+int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
+ u8 *gid)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (!mlx4_is_master(dev))
+ return -EINVAL;
+
+ memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16);
+ return 0;
+}
+EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 57428a0cb9dd..3b5f53ef29b2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -52,6 +52,8 @@
struct mac_res {
struct list_head list;
u64 mac;
+ int ref_count;
+ u8 smac_index;
u8 port;
};
@@ -219,6 +221,11 @@ struct res_fs_rule {
int qpn;
};
+static int mlx4_is_eth(struct mlx4_dev *dev, int port)
+{
+ return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
+}
+
static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
{
struct rb_node *node = root->rb_node;
@@ -461,6 +468,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
spin_lock_init(&res_alloc->alloc_lock);
for (t = 0; t < dev->num_vfs + 1; t++) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev, t);
switch (i) {
case RES_QP:
initialize_res_quotas(dev, res_alloc, RES_QP,
@@ -490,10 +499,27 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
break;
case RES_MAC:
if (t == mlx4_master_func_num(dev)) {
- res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
+ int max_vfs_pport = 0;
+ /* Calculate the max vfs per port for */
+ /* both ports. */
+ for (j = 0; j < dev->caps.num_ports;
+ j++) {
+ struct mlx4_slaves_pport slaves_pport =
+ mlx4_phys_to_slaves_pport(dev, j + 1);
+ unsigned current_slaves =
+ bitmap_weight(slaves_pport.slaves,
+ dev->caps.num_ports) - 1;
+ if (max_vfs_pport < current_slaves)
+ max_vfs_pport =
+ current_slaves;
+ }
+ res_alloc->quota[t] =
+ MLX4_MAX_MAC_NUM -
+ 2 * max_vfs_pport;
res_alloc->guaranteed[t] = 2;
for (j = 0; j < MLX4_MAX_PORTS; j++)
- res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
+ res_alloc->res_port_free[j] =
+ MLX4_MAX_MAC_NUM;
} else {
res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
res_alloc->guaranteed[t] = 2;
@@ -521,9 +547,10 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
break;
}
if (i == RES_MAC || i == RES_VLAN) {
- for (j = 0; j < MLX4_MAX_PORTS; j++)
- res_alloc->res_port_rsvd[j] +=
- res_alloc->guaranteed[t];
+ for (j = 0; j < dev->caps.num_ports; j++)
+ if (test_bit(j, actv_ports.ports))
+ res_alloc->res_port_rsvd[j] +=
+ res_alloc->guaranteed[t];
} else {
res_alloc->res_reserved += res_alloc->guaranteed[t];
}
@@ -600,15 +627,37 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+ int port;
- if (MLX4_QP_ST_UD == ts)
- qp_ctx->pri_path.mgid_index = 0x80 | slave;
-
- if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
- if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
- qp_ctx->pri_path.mgid_index = slave & 0x7F;
- if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
- qp_ctx->alt_path.mgid_index = slave & 0x7F;
+ if (MLX4_QP_ST_UD == ts) {
+ port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ if (mlx4_is_eth(dev, port))
+ qp_ctx->pri_path.mgid_index =
+ mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
+ else
+ qp_ctx->pri_path.mgid_index = slave | 0x80;
+
+ } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
+ if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
+ port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ if (mlx4_is_eth(dev, port)) {
+ qp_ctx->pri_path.mgid_index +=
+ mlx4_get_base_gid_ix(dev, slave, port);
+ qp_ctx->pri_path.mgid_index &= 0x7f;
+ } else {
+ qp_ctx->pri_path.mgid_index = slave & 0x7F;
+ }
+ }
+ if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
+ port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
+ if (mlx4_is_eth(dev, port)) {
+ qp_ctx->alt_path.mgid_index +=
+ mlx4_get_base_gid_ix(dev, slave, port);
+ qp_ctx->alt_path.mgid_index &= 0x7f;
+ } else {
+ qp_ctx->alt_path.mgid_index = slave & 0x7F;
+ }
+ }
}
}
@@ -619,7 +668,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_qp_context *qpc = inbox->buf + 8;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
- u32 qp_type;
int port;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
@@ -627,12 +675,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (MLX4_VGT != vp_oper->state.default_vlan) {
- qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
- if (MLX4_QP_ST_RC == qp_type ||
- (MLX4_QP_ST_UD == qp_type &&
- !mlx4_is_qp_reserved(dev, qpn)))
- return -EINVAL;
-
/* the reserved QPs (special, proxy, tunnel)
* do not operate over vlans
*/
@@ -1659,11 +1701,39 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
}
-static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
+ u8 smac_index, u64 *mac)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ if (res->smac_index == smac_index && res->port == (u8) port) {
+ *mac = res->mac;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
- struct mac_res *res;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ if (res->mac == mac && res->port == (u8) port) {
+ /* mac found. update ref count */
+ ++res->ref_count;
+ return 0;
+ }
+ }
if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
return -EINVAL;
@@ -1674,6 +1744,8 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
}
res->mac = mac;
res->port = (u8) port;
+ res->smac_index = smac_index;
+ res->ref_count = 1;
list_add_tail(&res->list,
&tracker->slave_list[slave].res_list[RES_MAC]);
return 0;
@@ -1690,9 +1762,11 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->mac == mac && res->port == (u8) port) {
- list_del(&res->list);
- mlx4_release_resource(dev, slave, RES_MAC, 1, port);
- kfree(res);
+ if (!--res->ref_count) {
+ list_del(&res->list);
+ mlx4_release_resource(dev, slave, RES_MAC, 1, port);
+ kfree(res);
+ }
break;
}
}
@@ -1705,10 +1779,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave)
struct list_head *mac_list =
&tracker->slave_list[slave].res_list[RES_MAC];
struct mac_res *res, *tmp;
+ int i;
list_for_each_entry_safe(res, tmp, mac_list, list) {
list_del(&res->list);
- __mlx4_unregister_mac(dev, res->port, res->mac);
+ /* dereference the mac the num times the slave referenced it */
+ for (i = 0; i < res->ref_count; i++)
+ __mlx4_unregister_mac(dev, res->port, res->mac);
mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
kfree(res);
}
@@ -1720,21 +1797,28 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
int err = -EINVAL;
int port;
u64 mac;
+ u8 smac_index;
if (op != RES_OP_RESERVE_AND_MAP)
return err;
port = !in_port ? get_param_l(out_param) : in_port;
+ port = mlx4_slave_convert_port(
+ dev, slave, port);
+
+ if (port < 0)
+ return -EINVAL;
mac = in_param;
err = __mlx4_register_mac(dev, port, mac);
if (err >= 0) {
+ smac_index = err;
set_param_l(out_param, err);
err = 0;
}
if (!err) {
- err = mac_add_to_slave(dev, slave, mac, port);
+ err = mac_add_to_slave(dev, slave, mac, port, smac_index);
if (err)
__mlx4_unregister_mac(dev, port, mac);
}
@@ -1831,6 +1915,11 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (!port || op != RES_OP_RESERVE_AND_MAP)
return -EINVAL;
+ port = mlx4_slave_convert_port(
+ dev, slave, port);
+
+ if (port < 0)
+ return -EINVAL;
/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
if (!in_port && port > 0 && port <= dev->caps.num_ports) {
slave_state[slave].old_vlan_api = true;
@@ -2128,6 +2217,11 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE_AND_MAP:
port = !in_port ? get_param_l(out_param) : in_port;
+ port = mlx4_slave_convert_port(
+ dev, slave, port);
+
+ if (port < 0)
+ return -EINVAL;
mac_del_from_slave(dev, slave, in_param, port);
__mlx4_unregister_mac(dev, port, in_param);
break;
@@ -2147,6 +2241,11 @@ static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
int err = 0;
+ port = mlx4_slave_convert_port(
+ dev, slave, port);
+
+ if (port < 0)
+ return -EINVAL;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
if (slave_state[slave].old_vlan_api)
@@ -2734,6 +2833,8 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
u32 qp_type;
struct mlx4_qp_context *qp_ctx;
enum mlx4_qp_optpar optpar;
+ int port;
+ int num_gids;
qp_ctx = inbox->buf + 8;
qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
@@ -2741,6 +2842,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
switch (qp_type) {
case MLX4_QP_ST_RC:
+ case MLX4_QP_ST_XRC:
case MLX4_QP_ST_UC:
switch (transition) {
case QP_TRANS_INIT2RTR:
@@ -2749,13 +2851,24 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
case QP_TRANS_SQD2SQD:
case QP_TRANS_SQD2RTS:
if (slave != mlx4_master_func_num(dev))
- /* slaves have only gid index 0 */
- if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
- if (qp_ctx->pri_path.mgid_index)
+ if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
+ port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
+ num_gids = mlx4_get_slave_num_gids(dev, slave, port);
+ else
+ num_gids = 1;
+ if (qp_ctx->pri_path.mgid_index >= num_gids)
return -EINVAL;
- if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
- if (qp_ctx->alt_path.mgid_index)
+ }
+ if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
+ port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
+ if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
+ num_gids = mlx4_get_slave_num_gids(dev, slave, port);
+ else
+ num_gids = 1;
+ if (qp_ctx->alt_path.mgid_index >= num_gids)
return -EINVAL;
+ }
break;
default:
break;
@@ -3268,6 +3381,58 @@ int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
+static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
+ struct mlx4_qp_context *qpc,
+ struct mlx4_cmd_mailbox *inbox)
+{
+ enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
+ u8 pri_sched_queue;
+ int port = mlx4_slave_convert_port(
+ dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
+
+ if (port < 0)
+ return -EINVAL;
+
+ pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
+ ((port & 1) << 6);
+
+ if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
+ mlx4_is_eth(dev, port + 1)) {
+ qpc->pri_path.sched_queue = pri_sched_queue;
+ }
+
+ if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
+ port = mlx4_slave_convert_port(
+ dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
+ + 1) - 1;
+ if (port < 0)
+ return -EINVAL;
+ qpc->alt_path.sched_queue =
+ (qpc->alt_path.sched_queue & ~(1 << 6)) |
+ (port & 1) << 6;
+ }
+ return 0;
+}
+
+static int roce_verify_mac(struct mlx4_dev *dev, int slave,
+ struct mlx4_qp_context *qpc,
+ struct mlx4_cmd_mailbox *inbox)
+{
+ u64 mac;
+ int port;
+ u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
+ u8 sched = *(u8 *)(inbox->buf + 64);
+ u8 smac_ix;
+
+ port = (sched >> 6 & 1) + 1;
+ if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
+ smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
+ if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
+ return -ENOENT;
+ }
+ return 0;
+}
+
int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -3286,10 +3451,16 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
u8 orig_vlan_index = qpc->pri_path.vlan_index;
u8 orig_feup = qpc->pri_path.feup;
+ err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
+ if (err)
+ return err;
err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
if (err)
return err;
+ if (roce_verify_mac(dev, slave, qpc, inbox))
+ return -EINVAL;
+
update_pkey_index(dev, slave, inbox);
update_gid(dev, inbox, (u8)slave);
adjust_proxy_tun_qkey(dev, vhcr, qpc);
@@ -3334,6 +3505,9 @@ int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
+ err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
if (err)
return err;
@@ -3353,6 +3527,9 @@ int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
+ err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
if (err)
return err;
@@ -3371,6 +3548,9 @@ int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
struct mlx4_qp_context *context = inbox->buf + 8;
+ int err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
adjust_proxy_tun_qkey(dev, vhcr, context);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
@@ -3384,6 +3564,9 @@ int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
+ err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
if (err)
return err;
@@ -3403,6 +3586,9 @@ int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
+ err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
if (err)
return err;
@@ -3506,16 +3692,26 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
return err;
}
-static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- int block_loopback, enum mlx4_protocol prot,
+static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
+ u8 gid[16], int block_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type type, u64 *reg_id)
{
switch (dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_DEVICE_MANAGED:
- return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ int port = mlx4_slave_convert_port(dev, slave, gid[5]);
+ if (port < 0)
+ return port;
+ return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
block_loopback, prot,
reg_id);
+ }
case MLX4_STEERING_MODE_B0:
+ if (prot == MLX4_PROT_ETH) {
+ int port = mlx4_slave_convert_port(dev, slave, gid[5]);
+ if (port < 0)
+ return port;
+ gid[5] = port;
+ }
return mlx4_qp_attach_common(dev, qp, gid,
block_loopback, prot, type);
default:
@@ -3523,9 +3719,9 @@ static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
}
}
-static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- enum mlx4_protocol prot, enum mlx4_steer_type type,
- u64 reg_id)
+static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], enum mlx4_protocol prot,
+ enum mlx4_steer_type type, u64 reg_id)
{
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
@@ -3562,7 +3758,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
qp.qpn = qpn;
if (attach) {
- err = qp_attach(dev, &qp, gid, block_loopback, prot,
+ err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
type, &reg_id);
if (err) {
pr_err("Fail to attach rule to qp 0x%x\n", qpn);
@@ -3698,6 +3894,9 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
return -EOPNOTSUPP;
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
+ ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
+ if (ctrl->port <= 0)
+ return -EINVAL;
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
@@ -3816,16 +4015,6 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
-int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
- struct mlx4_vhcr *vhcr,
- struct mlx4_cmd_mailbox *inbox,
- struct mlx4_cmd_mailbox *outbox,
- struct mlx4_cmd_info *cmd)
-{
- return -EPERM;
-}
-
-
static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
{
struct res_gid *rgid;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 23b7e2d35a93..c3eee5f70051 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -116,7 +116,6 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = &dev->priv.eq_table;
int num_eqs = 1 << dev->caps.log_max_eq;
int nvec;
- int err;
int i;
nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
@@ -131,17 +130,12 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
for (i = 0; i < nvec; i++)
table->msix_arr[i].entry = i;
-retry:
- table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
- err = pci_enable_msix(dev->pdev, table->msix_arr, nvec);
- if (err <= 0) {
- return err;
- } else if (err > 2) {
- nvec = err;
- goto retry;
- }
+ nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+ MLX5_EQ_VEC_COMP_BASE, nvec);
+ if (nvec < 0)
+ return nvec;
- mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec);
+ table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
}
@@ -446,6 +440,7 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
mlx5_init_cq_table(dev);
mlx5_init_qp_table(dev);
mlx5_init_srq_table(dev);
+ mlx5_init_mr_table(dev);
return 0;
@@ -537,7 +532,6 @@ static int __init init(void)
return 0;
- mlx5_health_cleanup();
err_debug:
mlx5_unregister_debugfs();
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 35e514dc7b7d..4cc927649404 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -36,11 +36,24 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
+void mlx5_init_mr_table(struct mlx5_core_dev *dev)
+{
+ struct mlx5_mr_table *table = &dev->priv.mr_table;
+
+ rwlock_init(&table->lock);
+ INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+}
+
+void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev)
+{
+}
+
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_create_mkey_mbox_in *in, int inlen,
mlx5_cmd_cbk_t callback, void *context,
struct mlx5_create_mkey_mbox_out *out)
{
+ struct mlx5_mr_table *table = &dev->priv.mr_table;
struct mlx5_create_mkey_mbox_out lout;
int err;
u8 key;
@@ -73,14 +86,21 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
be32_to_cpu(lout.mkey), key, mr->key);
+ /* connect to MR tree */
+ write_lock_irq(&table->lock);
+ err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
+ write_unlock_irq(&table->lock);
+
return err;
}
EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
{
+ struct mlx5_mr_table *table = &dev->priv.mr_table;
struct mlx5_destroy_mkey_mbox_in in;
struct mlx5_destroy_mkey_mbox_out out;
+ unsigned long flags;
int err;
memset(&in, 0, sizeof(in));
@@ -95,6 +115,10 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
+ write_lock_irqsave(&table->lock, flags);
+ radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
+ write_unlock_irqrestore(&table->lock, flags);
+
return err;
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
@@ -144,3 +168,64 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
return err;
}
EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
+
+int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
+ int npsvs, u32 *sig_index)
+{
+ struct mlx5_allocate_psv_in in;
+ struct mlx5_allocate_psv_out out;
+ int i, err;
+
+ if (npsvs > MLX5_MAX_PSVS)
+ return -EINVAL;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV);
+ in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err) {
+ mlx5_core_err(dev, "cmd exec failed %d\n", err);
+ return err;
+ }
+
+ if (out.hdr.status) {
+ mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status);
+ return mlx5_cmd_status_to_err(&out.hdr);
+ }
+
+ for (i = 0; i < npsvs; i++)
+ sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff;
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_psv);
+
+int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
+{
+ struct mlx5_destroy_psv_in in;
+ struct mlx5_destroy_psv_out out;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.psv_number = cpu_to_be32(psv_num);
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err) {
+ mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err);
+ goto out;
+ }
+
+ if (out.hdr.status) {
+ mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status);
+ err = mlx5_cmd_status_to_err(&out.hdr);
+ goto out;
+ }
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_destroy_psv);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ce84dc289c8f..14ac0e2bc09f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4832,7 +4832,7 @@ static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
skb->csum = old->csum;
skb_set_network_header(skb, ETH_HLEN);
- dev_kfree_skb(old);
+ dev_consume_skb_any(old);
}
/**
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 68026f7e8ba3..130f6b204efa 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2329,16 +2329,14 @@ static int myri10ge_request_irq(struct myri10ge_priv *mgp)
status = 0;
if (myri10ge_msi) {
if (mgp->num_slices > 1) {
- status =
- pci_enable_msix(pdev, mgp->msix_vectors,
- mgp->num_slices);
- if (status == 0) {
- mgp->msix_enabled = 1;
- } else {
+ status = pci_enable_msix_range(pdev, mgp->msix_vectors,
+ mgp->num_slices, mgp->num_slices);
+ if (status < 0) {
dev_err(&pdev->dev,
"Error %d setting up MSI-X\n", status);
return status;
}
+ mgp->msix_enabled = 1;
}
if (mgp->msix_enabled == 0) {
status = pci_enable_msi(pdev);
@@ -3895,32 +3893,34 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
GFP_KERNEL);
if (mgp->msix_vectors == NULL)
- goto disable_msix;
+ goto no_msix;
for (i = 0; i < mgp->num_slices; i++) {
mgp->msix_vectors[i].entry = i;
}
while (mgp->num_slices > 1) {
- /* make sure it is a power of two */
- while (!is_power_of_2(mgp->num_slices))
- mgp->num_slices--;
+ mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
if (mgp->num_slices == 1)
- goto disable_msix;
- status = pci_enable_msix(pdev, mgp->msix_vectors,
- mgp->num_slices);
- if (status == 0) {
- pci_disable_msix(pdev);
+ goto no_msix;
+ status = pci_enable_msix_range(pdev,
+ mgp->msix_vectors,
+ mgp->num_slices,
+ mgp->num_slices);
+ if (status < 0)
+ goto no_msix;
+
+ pci_disable_msix(pdev);
+
+ if (status == mgp->num_slices) {
if (old_allocated)
kfree(old_fw);
return;
- }
- if (status > 0)
+ } else {
mgp->num_slices = status;
- else
- goto disable_msix;
+ }
}
-disable_msix:
+no_msix:
if (mgp->msix_vectors != NULL) {
kfree(mgp->msix_vectors);
mgp->msix_vectors = NULL;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 9eeddbd0b2c7..a2844ff322c4 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2914,6 +2914,9 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget)
struct RxD1 *rxdp1;
struct RxD3 *rxdp3;
+ if (budget <= 0)
+ return napi_pkts;
+
get_info = ring_data->rx_curr_get_info;
get_block = get_info.block_index;
memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
@@ -3792,9 +3795,10 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
writeq(rx_mat, &bar0->rx_mat);
readq(&bar0->rx_mat);
- ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
+ ret = pci_enable_msix_range(nic->pdev, nic->entries,
+ nic->num_entries, nic->num_entries);
/* We fail init if error or we get less vectors than min required */
- if (ret) {
+ if (ret < 0) {
DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
kfree(nic->entries);
swstats->mem_freed += nic->num_entries *
@@ -4045,7 +4049,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
if (!is_s2io_card_up(sp)) {
DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
dev->name);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -4118,7 +4122,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
s2io_stop_tx_queue(sp, fifo->fifo_no);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -4240,7 +4244,7 @@ pci_map_failed:
swstats->pci_map_fail_cnt++;
s2io_stop_tx_queue(sp, fifo->fifo_no);
swstats->mem_freed += skb->truesize;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index e46e8698e630..d107bcbb8543 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -368,6 +368,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
ring->ndev->name, __func__, __LINE__);
+ if (ring->budget <= 0)
+ goto out;
+
do {
prefetch((char *)dtr + L1_CACHE_BYTES);
rx_priv = vxge_hw_ring_rxd_private_get(dtr);
@@ -525,6 +528,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
if (first_dtr)
vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
+out:
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...",
__func__, __LINE__);
@@ -820,7 +824,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len <= 0)) {
vxge_debug_tx(VXGE_ERR,
"%s: Buffer has no data..", dev->name);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -829,7 +833,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!is_vxge_card_up(vdev))) {
vxge_debug_tx(VXGE_ERR,
"%s: vdev not initialized", dev->name);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -839,7 +843,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
vxge_debug_tx(VXGE_ERR,
"%s: Failed to store the mac address",
dev->name);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
}
@@ -986,7 +990,7 @@ _exit1:
vxge_hw_fifo_txdl_free(fifo_hw, dtr);
_exit0:
netif_tx_stop_queue(fifo->txq);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2349,12 +2353,18 @@ start:
vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
vdev->vxge_entries[j].in_use = 0;
- ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
- if (ret > 0) {
+ ret = pci_enable_msix_range(vdev->pdev,
+ vdev->entries, 3, vdev->intr_cnt);
+ if (ret < 0) {
+ ret = -ENODEV;
+ goto enable_msix_failed;
+ } else if (ret < vdev->intr_cnt) {
+ pci_disable_msix(vdev->pdev);
+
vxge_debug_init(VXGE_ERR,
"%s: MSI-X enable failed for %d vectors, ret: %d",
VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
- if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
+ if (max_config_vpath != VXGE_USE_DEFAULT) {
ret = -ENODEV;
goto enable_msix_failed;
}
@@ -2368,9 +2378,6 @@ start:
vxge_close_vpaths(vdev, temp);
vdev->no_of_vpath = temp;
goto start;
- } else if (ret < 0) {
- ret = -ENODEV;
- goto enable_msix_failed;
}
return 0;
@@ -3131,12 +3138,12 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
u64 packets, bytes, multicast;
do {
- start = u64_stats_fetch_begin_bh(&rxstats->syncp);
+ start = u64_stats_fetch_begin_irq(&rxstats->syncp);
packets = rxstats->rx_frms;
multicast = rxstats->rx_mcast;
bytes = rxstats->rx_bytes;
- } while (u64_stats_fetch_retry_bh(&rxstats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
net_stats->rx_packets += packets;
net_stats->rx_bytes += bytes;
@@ -3146,11 +3153,11 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
net_stats->rx_dropped += rxstats->rx_dropped;
do {
- start = u64_stats_fetch_begin_bh(&txstats->syncp);
+ start = u64_stats_fetch_begin_irq(&txstats->syncp);
packets = txstats->tx_frms;
bytes = txstats->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&txstats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&txstats->syncp, start));
net_stats->tx_packets += packets;
net_stats->tx_bytes += bytes;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 70cf97fe67f2..fddb464aeab3 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1753,19 +1753,19 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
/* software stats */
do {
- syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
+ syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
storage->rx_packets = np->stat_rx_packets;
storage->rx_bytes = np->stat_rx_bytes;
storage->rx_dropped = np->stat_rx_dropped;
storage->rx_missed_errors = np->stat_rx_missed_errors;
- } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
+ } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
do {
- syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
+ syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
storage->tx_packets = np->stat_tx_packets;
storage->tx_bytes = np->stat_tx_bytes;
storage->tx_dropped = np->stat_tx_dropped;
- } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
+ } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
/* If the nic supports hw counters then retrieve latest values */
if (np->driver_data & DEV_HAS_STATISTICS_V123) {
@@ -2231,7 +2231,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pci_dma_mapping_error(np->pci_dev,
np->put_tx_ctx->dma)) {
/* on DMA mapping error - drop the packet */
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
u64_stats_update_end(&np->swstats_tx_syncp);
@@ -2277,7 +2277,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
tmp_tx_ctx = np->first_tx_ctx;
} while (tmp_tx_ctx != np->put_tx_ctx);
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
@@ -2380,7 +2380,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
if (pci_dma_mapping_error(np->pci_dev,
np->put_tx_ctx->dma)) {
/* on DMA mapping error - drop the packet */
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
u64_stats_update_end(&np->swstats_tx_syncp);
@@ -2427,7 +2427,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
tmp_tx_ctx = np->first_tx_ctx;
} while (tmp_tx_ctx != np->put_tx_ctx);
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
@@ -3930,7 +3930,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
- int ret = 1;
+ int ret;
int i;
irqreturn_t (*handler)(int foo, void *data);
@@ -3946,14 +3946,18 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
if (np->msi_flags & NV_MSI_X_CAPABLE) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
np->msi_x_entry[i].entry = i;
- ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
- if (ret == 0) {
+ ret = pci_enable_msix_range(np->pci_dev,
+ np->msi_x_entry,
+ np->msi_flags & NV_MSI_X_VECTORS_MASK,
+ np->msi_flags & NV_MSI_X_VECTORS_MASK);
+ if (ret > 0) {
np->msi_flags |= NV_MSI_X_ENABLED;
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
/* Request irq for rx handling */
sprintf(np->name_rx, "%s-rx", dev->name);
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
- nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
+ nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed for rx %d\n",
ret);
@@ -3963,8 +3967,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
}
/* Request irq for tx handling */
sprintf(np->name_tx, "%s-tx", dev->name);
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
- nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
+ nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed for tx %d\n",
ret);
@@ -3974,8 +3979,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
}
/* Request irq for link and timer handling */
sprintf(np->name_other, "%s-other", dev->name);
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
- nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
+ nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed for link %d\n",
ret);
@@ -3991,7 +3997,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
} else {
/* Request irq for all interrupts */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
+ handler, IRQF_SHARED, dev->name, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed %d\n",
ret);
@@ -4005,13 +4013,15 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
writel(0, base + NvRegMSIXMap1);
}
netdev_info(dev, "MSI-X enabled\n");
+ return 0;
}
}
- if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+ if (np->msi_flags & NV_MSI_CAPABLE) {
ret = pci_enable_msi(np->pci_dev);
if (ret == 0) {
np->msi_flags |= NV_MSI_ENABLED;
- if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
+ ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
+ if (ret) {
netdev_info(dev, "request_irq failed %d\n",
ret);
pci_disable_msi(np->pci_dev);
@@ -4025,13 +4035,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
/* enable msi vector 0 */
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
netdev_info(dev, "MSI enabled\n");
+ return 0;
}
}
- if (ret != 0) {
- if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
- goto out_err;
- }
+ if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
+ goto out_err;
return 0;
out_free_tx:
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 464e91058c81..73e66838cfef 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -120,10 +120,6 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
static void pch_gbe_set_multi(struct net_device *netdev);
-static struct sock_filter ptp_filter[] = {
- PTP_FILTER
-};
-
static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
{
u8 *data = skb->data;
@@ -131,7 +127,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
u16 *hi, *id;
u32 lo;
- if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
+ if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
return 0;
offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
@@ -2635,11 +2631,6 @@ static int pch_gbe_probe(struct pci_dev *pdev,
adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
PCI_DEVFN(12, 4));
- if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
- dev_err(&pdev->dev, "Bad ptp filter\n");
- ret = -EINVAL;
- goto err_free_netdev;
- }
netdev->netdev_ops = &pch_gbe_netdev_ops;
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index f59e6be4a66e..c14bd3116e45 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -56,6 +56,16 @@ config QLCNIC_DCB
mode of DCB is supported. PG and PFC values are related only
to Tx.
+config QLCNIC_VXLAN
+ bool "Virtual eXtensible Local Area Network (VXLAN) offload support"
+ default n
+ depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m)
+ ---help---
+ This enables hardware offload support for VXLAN protocol over QLogic's
+ 84XX series adapters.
+ Say Y here if you want to enable hardware offload support for
+ Virtual eXtensible Local Area Network (VXLAN) in the driver.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 70849dea32b1..f09c35d669b3 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -643,8 +643,9 @@ static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
if (adapter->msix_supported) {
netxen_init_msix_entries(adapter, num_msix);
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err == 0) {
+ err = pci_enable_msix_range(pdev, adapter->msix_entries,
+ num_msix, num_msix);
+ if (err > 0) {
adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
netxen_set_msix_bit(pdev, 1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f19f81cde134..7b52a88923ef 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -23,6 +23,7 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/timer.h>
+#include <linux/irq.h>
#include <linux/vmalloc.h>
@@ -38,8 +39,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 55
-#define QLCNIC_LINUX_VERSIONID "5.3.55"
+#define _QLCNIC_LINUX_SUBVERSION 57
+#define QLCNIC_LINUX_VERSIONID "5.3.57"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -169,11 +170,20 @@ struct cmd_desc_type0 {
__le64 addr_buffer2;
- __le16 reference_handle;
+ __le16 encap_descr; /* 15:10 offset of outer L3 header,
+ * 9:6 number of 32bit words in outer L3 header,
+ * 5 offload outer L4 checksum,
+ * 4 offload outer L3 checksum,
+ * 3 Inner L4 type, TCP=0, UDP=1,
+ * 2 Inner L3 type, IPv4=0, IPv6=1,
+ * 1 Outer L3 type,IPv4=0, IPv6=1,
+ * 0 type of encapsulation, GRE=0, VXLAN=1
+ */
__le16 mss;
u8 port_ctxid; /* 7:4 ctxid 3:0 port */
- u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
- __le16 conn_id; /* IPSec offoad only */
+ u8 hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ u8 outer_hdr_length; /* Encapsulation only */
+ u8 rsvd1;
__le64 addr_buffer3;
__le64 addr_buffer1;
@@ -183,7 +193,9 @@ struct cmd_desc_type0 {
__le64 addr_buffer4;
u8 eth_addr[ETH_ALEN];
- __le16 vlan_TCI;
+ __le16 vlan_TCI; /* In case of encapsulation,
+ * this is for outer VLAN
+ */
} __attribute__ ((aligned(64)));
@@ -394,7 +406,7 @@ struct qlcnic_nic_intr_coalesce {
u32 timer_out;
};
-struct qlcnic_dump_template_hdr {
+struct qlcnic_83xx_dump_template_hdr {
u32 type;
u32 offset;
u32 size;
@@ -411,15 +423,42 @@ struct qlcnic_dump_template_hdr {
u32 rsvd[0];
};
+struct qlcnic_82xx_dump_template_hdr {
+ u32 type;
+ u32 offset;
+ u32 size;
+ u32 cap_mask;
+ u32 num_entries;
+ u32 version;
+ u32 timestamp;
+ u32 checksum;
+ u32 drv_cap_mask;
+ u32 sys_info[3];
+ u32 saved_state[16];
+ u32 cap_sizes[8];
+ u32 rsvd[7];
+ u32 capabilities;
+ u32 rsvd1[0];
+};
+
struct qlcnic_fw_dump {
u8 clr; /* flag to indicate if dump is cleared */
bool enable; /* enable/disable dump */
u32 size; /* total size of the dump */
+ u32 cap_mask; /* Current capture mask */
void *data; /* dump data area */
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ void *tmpl_hdr;
dma_addr_t phys_addr;
void *dma_buffer;
bool use_pex_dma;
+ /* Read only elements which are common between 82xx and 83xx
+ * template header. Update these values immediately after we read
+ * template header from Firmware
+ */
+ u32 tmpl_hdr_size;
+ u32 version;
+ u32 num_entries;
+ u32 offset;
};
/*
@@ -497,6 +536,7 @@ struct qlcnic_hardware_context {
u8 extend_lb_time;
u8 phys_port_id[ETH_ALEN];
u8 lb_mode;
+ u16 vxlan_port;
};
struct qlcnic_adapter_stats {
@@ -511,6 +551,9 @@ struct qlcnic_adapter_stats {
u64 txbytes;
u64 lrobytes;
u64 lso_frames;
+ u64 encap_lso_frames;
+ u64 encap_tx_csummed;
+ u64 encap_rx_csummed;
u64 xmit_on;
u64 xmit_off;
u64 skb_alloc_failure;
@@ -872,6 +915,10 @@ struct qlcnic_mac_vlan_list {
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_CKO_OFFLOAD BIT_4
+
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
@@ -966,6 +1013,11 @@ struct qlcnic_ipaddr {
#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
#define QLCNIC_TSS_RSS 0x80000
+#ifdef CONFIG_QLCNIC_VXLAN
+#define QLCNIC_ADD_VXLAN_PORT 0x100000
+#define QLCNIC_DEL_VXLAN_PORT 0x200000
+#endif
+
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -1769,10 +1821,28 @@ struct qlcnic_hardware_ops {
struct qlcnic_host_tx_ring *);
void (*disable_tx_intr) (struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *);
+ u32 (*get_saved_state)(void *, u32);
+ void (*set_saved_state)(void *, u32, u32);
+ void (*cache_tmpl_hdr_values)(struct qlcnic_fw_dump *);
+ u32 (*get_cap_size)(void *, int);
+ void (*set_sys_info)(void *, int, u32);
+ void (*store_cap_mask)(void *, u32);
};
extern struct qlcnic_nic_template qlcnic_vf_ops;
+static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->extra_capability[0] &
+ QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
+}
+
+static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->extra_capability[0] &
+ QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
+}
+
static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
{
return adapter->nic_ops->start_firmware(adapter);
@@ -2007,6 +2077,42 @@ static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
adapter->ahw->hw_ops->read_phys_port_id(adapter);
}
+static inline u32 qlcnic_get_saved_state(struct qlcnic_adapter *adapter,
+ void *t_hdr, u32 index)
+{
+ return adapter->ahw->hw_ops->get_saved_state(t_hdr, index);
+}
+
+static inline void qlcnic_set_saved_state(struct qlcnic_adapter *adapter,
+ void *t_hdr, u32 index, u32 value)
+{
+ adapter->ahw->hw_ops->set_saved_state(t_hdr, index, value);
+}
+
+static inline void qlcnic_cache_tmpl_hdr_values(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_dump *fw_dump)
+{
+ adapter->ahw->hw_ops->cache_tmpl_hdr_values(fw_dump);
+}
+
+static inline u32 qlcnic_get_cap_size(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, int index)
+{
+ return adapter->ahw->hw_ops->get_cap_size(tmpl_hdr, index);
+}
+
+static inline void qlcnic_set_sys_info(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, int idx, u32 value)
+{
+ adapter->ahw->hw_ops->set_sys_info(tmpl_hdr, idx, value);
+}
+
+static inline void qlcnic_store_cap_mask(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, u32 mask)
+{
+ adapter->ahw->hw_ops->store_cap_mask(tmpl_hdr, mask);
+}
+
static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
u32 key)
{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 27c4f131863b..b7cffb46a75d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -77,7 +77,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_GET_PORT_CONFIG, 2, 2},
{QLCNIC_CMD_GET_LINK_STATUS, 2, 4},
{QLCNIC_CMD_IDC_ACK, 5, 1},
- {QLCNIC_CMD_INIT_NIC_FUNC, 2, 1},
+ {QLCNIC_CMD_INIT_NIC_FUNC, 3, 1},
{QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
{QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
{QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
@@ -87,6 +87,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
{QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
+ {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -203,7 +204,12 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.disable_sds_intr = qlcnic_83xx_disable_sds_intr,
.enable_tx_intr = qlcnic_83xx_enable_tx_intr,
.disable_tx_intr = qlcnic_83xx_disable_tx_intr,
-
+ .get_saved_state = qlcnic_83xx_get_saved_state,
+ .set_saved_state = qlcnic_83xx_set_saved_state,
+ .cache_tmpl_hdr_values = qlcnic_83xx_cache_tmpl_hdr_values,
+ .get_cap_size = qlcnic_83xx_get_cap_size,
+ .set_sys_info = qlcnic_83xx_set_sys_info,
+ .store_cap_mask = qlcnic_83xx_store_cap_mask,
};
static struct qlcnic_nic_template qlcnic_83xx_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index f92485ca21d1..88d809c35633 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -308,6 +308,8 @@ struct qlc_83xx_reset {
#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020
struct qlcnic_adapter;
+struct qlcnic_fw_dump;
+
struct qlc_83xx_idc {
int (*state_entry) (struct qlcnic_adapter *);
u64 sec_counter;
@@ -526,8 +528,9 @@ enum qlc_83xx_ext_regs {
};
/* Initialize/Stop NIC command bit definitions */
-#define QLC_REGISTER_DCB_AEN BIT_1
#define QLC_REGISTER_LB_IDC BIT_0
+#define QLC_REGISTER_DCB_AEN BIT_1
+#define QLC_83XX_MULTI_TENANCY_INFO BIT_29
#define QLC_INIT_FW_RESOURCES BIT_31
/* 83xx funcitons */
@@ -650,4 +653,10 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
+u32 qlcnic_83xx_get_saved_state(void *, u32);
+void qlcnic_83xx_set_saved_state(void *, u32, u32);
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_83xx_get_cap_size(void *, int);
+void qlcnic_83xx_set_sys_info(void *, int, u32);
+void qlcnic_83xx_store_cap_mask(void *, u32);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 90a2dda351ec..b48737dcd3c5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1020,10 +1020,99 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
return 0;
}
+#ifdef CONFIG_QLCNIC_VXLAN
+#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1
+#define QLC_83XX_MATCH_ENCAP_ID BIT_2
+#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3
+#define QLC_83XX_VXLAN_UDP_DPORT(PORT) ((PORT & 0xffff) << 16)
+
+#define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1
+#define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0
+
+static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter)
+{
+ u16 port = adapter->ahw->vxlan_port;
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_INIT_NIC_FUNC);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO;
+ cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN |
+ QLC_83XX_SET_VXLAN_UDP_DPORT |
+ QLC_83XX_VXLAN_UDP_DPORT(port);
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Failed to set VXLAN port %d in adapter\n",
+ port);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return ret;
+}
+
+static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
+ bool state)
+{
+ u16 vxlan_port = adapter->ahw->vxlan_port;
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_INGRESS_ENCAP);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING :
+ QLCNIC_DISABLE_INGRESS_ENCAP_PARSING;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Failed to %s VXLAN parsing for port %d\n",
+ state ? "enable" : "disable", vxlan_port);
+ else
+ netdev_info(adapter->netdev,
+ "%s VXLAN parsing for port %d\n",
+ state ? "Enabled" : "Disabled", vxlan_port);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return ret;
+}
+#endif
+
static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
{
if (adapter->fhash.fnum)
qlcnic_prune_lb_filters(adapter);
+
+#ifdef CONFIG_QLCNIC_VXLAN
+ if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
+ if (qlcnic_set_vxlan_port(adapter))
+ return;
+
+ if (qlcnic_set_vxlan_parsing(adapter, true))
+ return;
+
+ adapter->flags &= ~QLCNIC_ADD_VXLAN_PORT;
+ } else if (adapter->flags & QLCNIC_DEL_VXLAN_PORT) {
+ if (qlcnic_set_vxlan_parsing(adapter, false))
+ return;
+
+ adapter->ahw->vxlan_port = 0;
+ adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
+ }
+#endif
}
/**
@@ -1301,7 +1390,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
addr = (u64)dest;
ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
- (u32 *)p_cache, size / 16);
+ p_cache, size / 16);
if (ret) {
dev_err(&adapter->pdev->dev, "MS memory write failed\n");
release_firmware(fw);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index acee1a5d80c6..5bacf5210aed 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -47,6 +47,12 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
{"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
{"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
{"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
+ {"encap_lso_frames", QLC_SIZEOF(stats.encap_lso_frames),
+ QLC_OFF(stats.encap_lso_frames)},
+ {"encap_tx_csummed", QLC_SIZEOF(stats.encap_tx_csummed),
+ QLC_OFF(stats.encap_tx_csummed)},
+ {"encap_rx_csummed", QLC_SIZEOF(stats.encap_rx_csummed),
+ QLC_OFF(stats.encap_rx_csummed)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
QLC_OFF(stats.skb_alloc_failure)},
{"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
@@ -1639,14 +1645,14 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
}
if (fw_dump->clr)
- dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
+ dump->len = fw_dump->tmpl_hdr_size + fw_dump->size;
else
dump->len = 0;
if (!qlcnic_check_fw_dump_state(adapter))
dump->flag = ETH_FW_DUMP_DISABLE;
else
- dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+ dump->flag = fw_dump->cap_mask;
dump->version = adapter->fw_version;
return 0;
@@ -1671,9 +1677,10 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
netdev_info(netdev, "Dump not available\n");
return -EINVAL;
}
+
/* Copy template header first */
- copy_sz = fw_dump->tmpl_hdr->size;
- hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
+ copy_sz = fw_dump->tmpl_hdr_size;
+ hdr_ptr = (u32 *)fw_dump->tmpl_hdr;
data = buffer;
for (i = 0; i < copy_sz/sizeof(u32); i++)
*data++ = cpu_to_le32(*hdr_ptr++);
@@ -1681,7 +1688,7 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
/* Copy captured dump data */
memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
dump->len = copy_sz + fw_dump->size;
- dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+ dump->flag = fw_dump->cap_mask;
/* Free dump area once data has been captured */
vfree(fw_dump->data);
@@ -1703,7 +1710,11 @@ static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
return -EOPNOTSUPP;
}
- fw_dump->tmpl_hdr->drv_cap_mask = mask;
+ fw_dump->cap_mask = mask;
+
+ /* Store new capture mask in template header as well*/
+ qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask);
+
netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 03d18a0be6ce..9f3adf4e70b5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -317,9 +317,7 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
int
qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
{
- int timeout = 0;
- int err = 0;
- u32 done = 0;
+ int timeout = 0, err = 0, done = 0;
while (!done) {
done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
@@ -327,10 +325,20 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
if (done == 1)
break;
if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
- dev_err(&adapter->pdev->dev,
- "Failed to acquire sem=%d lock; holdby=%d\n",
- sem,
- id_reg ? QLCRD32(adapter, id_reg, &err) : -1);
+ if (id_reg) {
+ done = QLCRD32(adapter, id_reg, &err);
+ if (done != -1)
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock held by=%d\n",
+ sem, done);
+ else
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock",
+ sem);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock", sem);
+ }
return -EIO;
}
msleep(1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 63d75617d445..cbe2399c30a0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -98,6 +98,7 @@ enum qlcnic_regs {
#define QLCNIC_CMD_GET_LINK_EVENT 0x48
#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49
#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A
+#define QLCNIC_CMD_SET_INGRESS_ENCAP 0x4E
#define QLCNIC_CMD_INIT_NIC_FUNC 0x60
#define QLCNIC_CMD_STOP_NIC_FUNC 0x61
#define QLCNIC_CMD_IDC_ACK 0x63
@@ -161,6 +162,7 @@ struct qlcnic_host_sds_ring;
struct qlcnic_host_tx_ring;
struct qlcnic_hardware_context;
struct qlcnic_adapter;
+struct qlcnic_fw_dump;
int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
@@ -213,4 +215,11 @@ int qlcnic_82xx_shutdown(struct pci_dev *);
int qlcnic_82xx_resume(struct qlcnic_adapter *);
void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed);
void qlcnic_fw_poll_work(struct work_struct *work);
+
+u32 qlcnic_82xx_get_saved_state(void *, u32);
+void qlcnic_82xx_set_saved_state(void *, u32, u32);
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_82xx_get_cap_size(void *, int);
+void qlcnic_82xx_set_sys_info(void *, int, u32);
+void qlcnic_82xx_store_cap_mask(void *, u32);
#endif /* __QLCNIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 54ebf300332a..173b3d12991f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -13,16 +13,19 @@
#include "qlcnic.h"
-#define TX_ETHER_PKT 0x01
-#define TX_TCP_PKT 0x02
-#define TX_UDP_PKT 0x03
-#define TX_IP_PKT 0x04
-#define TX_TCP_LSO 0x05
-#define TX_TCP_LSO6 0x06
-#define TX_TCPV6_PKT 0x0b
-#define TX_UDPV6_PKT 0x0c
-#define FLAGS_VLAN_TAGGED 0x10
-#define FLAGS_VLAN_OOB 0x40
+#define QLCNIC_TX_ETHER_PKT 0x01
+#define QLCNIC_TX_TCP_PKT 0x02
+#define QLCNIC_TX_UDP_PKT 0x03
+#define QLCNIC_TX_IP_PKT 0x04
+#define QLCNIC_TX_TCP_LSO 0x05
+#define QLCNIC_TX_TCP_LSO6 0x06
+#define QLCNIC_TX_ENCAP_PKT 0x07
+#define QLCNIC_TX_ENCAP_LSO 0x08
+#define QLCNIC_TX_TCPV6_PKT 0x0b
+#define QLCNIC_TX_UDPV6_PKT 0x0c
+
+#define QLCNIC_FLAGS_VLAN_TAGGED 0x10
+#define QLCNIC_FLAGS_VLAN_OOB 0x40
#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
(cmd_desc)->vlan_TCI = cpu_to_le16(v);
@@ -364,6 +367,101 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
spin_unlock(&adapter->mac_learn_lock);
}
+#define QLCNIC_ENCAP_VXLAN_PKT BIT_0
+#define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1
+#define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2
+#define QLCNIC_ENCAP_INNER_L4_UDP BIT_3
+#define QLCNIC_ENCAP_DO_L3_CSUM BIT_4
+#define QLCNIC_ENCAP_DO_L4_CSUM BIT_5
+
+static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0;
+ int copied, copy_len, descr_size;
+ u32 producer = tx_ring->producer;
+ struct cmd_desc_type0 *hwdesc;
+ u16 flags = 0, encap_descr = 0;
+
+ opcode = QLCNIC_TX_ETHER_PKT;
+ encap_descr = QLCNIC_ENCAP_VXLAN_PKT;
+
+ if (skb_is_gso(skb)) {
+ inner_hdr_len = skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) -
+ skb_inner_mac_header(skb);
+
+ /* VXLAN header size = 8 */
+ outer_hdr_len = skb_transport_offset(skb) + 8 +
+ sizeof(struct udphdr);
+ first_desc->outer_hdr_length = outer_hdr_len;
+ total_hdr_len = inner_hdr_len + outer_hdr_len;
+ encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM |
+ QLCNIC_ENCAP_DO_L4_CSUM;
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->hdr_length = inner_hdr_len;
+
+ /* Copy inner and outer headers in Tx descriptor(s)
+ * If total_hdr_len > cmd_desc_type0, use multiple
+ * descriptors
+ */
+ copied = 0;
+ descr_size = (int)sizeof(struct cmd_desc_type0);
+ while (copied < total_hdr_len) {
+ copy_len = min(descr_size, (total_hdr_len - copied));
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc,
+ copy_len);
+ copied += copy_len;
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+
+ /* Make sure updated tx_ring->producer is visible
+ * for qlcnic_tx_avail()
+ */
+ smp_mb();
+ adapter->stats.encap_lso_frames++;
+
+ opcode = QLCNIC_TX_ENCAP_LSO;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (inner_ip_hdr(skb)->version == 6) {
+ if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+ encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+ } else {
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+ encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+ }
+
+ adapter->stats.encap_tx_csummed++;
+ opcode = QLCNIC_TX_ENCAP_PKT;
+ }
+
+ /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
+ if (ip_hdr(skb)->version == 6)
+ encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6;
+
+ /* outer IP header's size in 32bit words size*/
+ encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
+
+ /* outer IP header offset */
+ encap_descr |= skb_network_offset(skb) << 10;
+ first_desc->encap_descr = cpu_to_le16(encap_descr);
+
+ first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
+ skb->data;
+ first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
+
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ return 0;
+}
+
static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
struct qlcnic_host_tx_ring *tx_ring)
@@ -378,11 +476,11 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
if (protocol == ETH_P_8021Q) {
vh = (struct vlan_ethhdr *)skb->data;
- flags = FLAGS_VLAN_TAGGED;
+ flags = QLCNIC_FLAGS_VLAN_TAGGED;
vlan_tci = ntohs(vh->h_vlan_TCI);
protocol = ntohs(vh->h_vlan_encapsulated_proto);
} else if (vlan_tx_tag_present(skb)) {
- flags = FLAGS_VLAN_OOB;
+ flags = QLCNIC_FLAGS_VLAN_OOB;
vlan_tci = vlan_tx_tag_get(skb);
}
if (unlikely(adapter->tx_pvid)) {
@@ -391,7 +489,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
goto set_flags;
- flags = FLAGS_VLAN_OOB;
+ flags = QLCNIC_FLAGS_VLAN_OOB;
vlan_tci = adapter->tx_pvid;
}
set_flags:
@@ -402,25 +500,26 @@ set_flags:
flags |= BIT_0;
memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
}
- opcode = TX_ETHER_PKT;
+ opcode = QLCNIC_TX_ETHER_PKT;
if (skb_is_gso(skb)) {
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
- first_desc->total_hdr_length = hdr_len;
- opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
+ first_desc->hdr_length = hdr_len;
+ opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
+ QLCNIC_TX_TCP_LSO;
/* For LSO, we need to copy the MAC/IP/TCP headers into
* the descriptor ring */
copied = 0;
offset = 2;
- if (flags & FLAGS_VLAN_OOB) {
- first_desc->total_hdr_length += VLAN_HLEN;
+ if (flags & QLCNIC_FLAGS_VLAN_OOB) {
+ first_desc->hdr_length += VLAN_HLEN;
first_desc->tcp_hdr_offset = VLAN_HLEN;
first_desc->ip_hdr_offset = VLAN_HLEN;
/* Only in case of TSO on vlan device */
- flags |= FLAGS_VLAN_TAGGED;
+ flags |= QLCNIC_FLAGS_VLAN_TAGGED;
/* Create a TSO vlan header template for firmware */
hwdesc = &tx_ring->desc_head[producer];
@@ -464,16 +563,16 @@ set_flags:
l4proto = ip_hdr(skb)->protocol;
if (l4proto == IPPROTO_TCP)
- opcode = TX_TCP_PKT;
+ opcode = QLCNIC_TX_TCP_PKT;
else if (l4proto == IPPROTO_UDP)
- opcode = TX_UDP_PKT;
+ opcode = QLCNIC_TX_UDP_PKT;
} else if (protocol == ETH_P_IPV6) {
l4proto = ipv6_hdr(skb)->nexthdr;
if (l4proto == IPPROTO_TCP)
- opcode = TX_TCPV6_PKT;
+ opcode = QLCNIC_TX_TCPV6_PKT;
else if (l4proto == IPPROTO_UDP)
- opcode = TX_UDPV6_PKT;
+ opcode = QLCNIC_TX_UDPV6_PKT;
}
}
first_desc->tcp_hdr_offset += skb_transport_offset(skb);
@@ -563,6 +662,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct ethhdr *phdr;
int i, k, frag_count, delta = 0;
u32 producer, num_txd;
+ u16 protocol;
+ bool l4_is_udp = false;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
netif_tx_stop_all_queues(netdev);
@@ -653,8 +754,23 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->producer = get_next_index(producer, num_txd);
smp_mb();
- if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring)))
- goto unwind_buff;
+ protocol = ntohs(skb->protocol);
+ if (protocol == ETH_P_IP)
+ l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
+ else if (protocol == ETH_P_IPV6)
+ l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
+
+ /* Check if it is a VXLAN packet */
+ if (!skb->encapsulation || !l4_is_udp ||
+ !qlcnic_encap_tx_offload(adapter)) {
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
+ tx_ring)))
+ goto unwind_buff;
+ } else {
+ if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc,
+ skb, tx_ring)))
+ goto unwind_buff;
+ }
if (adapter->drv_mac_learn)
qlcnic_send_filter(adapter, first_desc, skb);
@@ -1587,6 +1703,13 @@ static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
}
+#define QLCNIC_ENCAP_LENGTH_MASK 0x7f
+
+static inline u8 qlcnic_encap_length(u64 sts_data)
+{
+ return sts_data & QLCNIC_ENCAP_LENGTH_MASK;
+}
+
static struct qlcnic_rx_buffer *
qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
struct qlcnic_host_sds_ring *sds_ring,
@@ -1637,6 +1760,12 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev);
+ if (qlcnic_encap_length(sts_data[1]) &&
+ skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ skb->encapsulation = 1;
+ adapter->stats.encap_rx_csummed++;
+ }
+
if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1222865cfb73..309d05640883 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -21,6 +21,9 @@
#include <linux/aer.h>
#include <linux/log2.h>
#include <linux/pci.h>
+#ifdef CONFIG_QLCNIC_VXLAN
+#include <net/vxlan.h>
+#endif
MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -90,7 +93,6 @@ static void qlcnic_82xx_io_resume(struct pci_dev *);
static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
pci_channel_state_t);
-
static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -462,6 +464,37 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_QLCNIC_VXLAN
+static void qlcnic_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Adapter supports only one VXLAN port. Use very first port
+ * for enabling offload
+ */
+ if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port)
+ return;
+
+ ahw->vxlan_port = ntohs(port);
+ adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
+}
+
+static void qlcnic_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port ||
+ (ahw->vxlan_port != ntohs(port)))
+ return;
+
+ adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
+}
+#endif
+
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
@@ -480,6 +513,10 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
.ndo_get_phys_port_id = qlcnic_get_phys_port_id,
+#ifdef CONFIG_QLCNIC_VXLAN
+ .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
+ .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
@@ -561,6 +598,12 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.disable_sds_intr = qlcnic_82xx_disable_sds_intr,
.enable_tx_intr = qlcnic_82xx_enable_tx_intr,
.disable_tx_intr = qlcnic_82xx_disable_tx_intr,
+ .get_saved_state = qlcnic_82xx_get_saved_state,
+ .set_saved_state = qlcnic_82xx_set_saved_state,
+ .cache_tmpl_hdr_values = qlcnic_82xx_cache_tmpl_hdr_values,
+ .get_cap_size = qlcnic_82xx_get_cap_size,
+ .set_sys_info = qlcnic_82xx_set_sys_info,
+ .store_cap_mask = qlcnic_82xx_store_cap_mask,
};
static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
@@ -684,7 +727,7 @@ restore:
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
- int err = -1, vector;
+ int err, vector;
if (!adapter->msix_entries) {
adapter->msix_entries = kcalloc(num_msix,
@@ -701,13 +744,17 @@ enable_msix:
for (vector = 0; vector < num_msix; vector++)
adapter->msix_entries[vector].entry = vector;
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err == 0) {
+ err = pci_enable_msix_range(pdev,
+ adapter->msix_entries, 1, num_msix);
+
+ if (err == num_msix) {
adapter->flags |= QLCNIC_MSIX_ENABLED;
adapter->ahw->num_msix = num_msix;
dev_info(&pdev->dev, "using msi-x interrupts\n");
- return err;
+ return 0;
} else if (err > 0) {
+ pci_disable_msix(pdev);
+
dev_info(&pdev->dev,
"Unable to allocate %d MSI-X vectors, Available vectors %d\n",
num_msix, err);
@@ -715,12 +762,12 @@ enable_msix:
if (qlcnic_82xx_check(adapter)) {
num_msix = rounddown_pow_of_two(err);
if (err < QLCNIC_82XX_MINIMUM_VECTOR)
- return -EIO;
+ return -ENOSPC;
} else {
num_msix = rounddown_pow_of_two(err - 1);
num_msix += 1;
if (err < QLCNIC_83XX_MINIMUM_VECTOR)
- return -EIO;
+ return -ENOSPC;
}
if (qlcnic_82xx_check(adapter) &&
@@ -747,7 +794,7 @@ enable_msix:
}
}
- return err;
+ return -EIO;
}
static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
@@ -1934,6 +1981,11 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
qlcnic_create_sysfs_entries(adapter);
+#ifdef CONFIG_QLCNIC_VXLAN
+ if (qlcnic_encap_rx_offload(adapter))
+ vxlan_get_rx_port(netdev);
+#endif
+
adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
return 0;
@@ -2196,6 +2248,19 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
netdev->features |= NETIF_F_LRO;
+ if (qlcnic_encap_tx_offload(adapter)) {
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+
+ /* encapsulation Tx offload supported by Adapter */
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+ }
+
+ if (qlcnic_encap_rx_offload(adapter))
+ netdev->hw_enc_features |= NETIF_F_RXCSUM;
+
netdev->hw_features = netdev->features;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->irq = adapter->msix_entries[0].vector;
@@ -2442,8 +2507,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
switch (err) {
case -ENOTRECOVERABLE:
- dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware. Please reboot\n");
- dev_err(&pdev->dev, "If reboot doesn't help, please replace the adapter with new one and return the faulty adapter for repair\n");
+ dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n");
+ dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n");
goto err_out_free_hw;
case -ENOMEM:
dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 7763962e2ec4..37b979b1266b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -211,6 +211,107 @@ enum qlcnic_minidump_opcode {
QLCNIC_DUMP_RDEND = 255
};
+inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->saved_state[index];
+}
+
+inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
+ u32 value)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->saved_state[index] = value;
+}
+
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ fw_dump->tmpl_hdr_size = hdr->size;
+ fw_dump->version = hdr->version;
+ fw_dump->num_entries = hdr->num_entries;
+ fw_dump->offset = hdr->offset;
+
+ hdr->drv_cap_mask = hdr->cap_mask;
+ fw_dump->cap_mask = hdr->cap_mask;
+}
+
+inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->cap_sizes[index];
+}
+
+void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->sys_info[idx] = value;
+}
+
+void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
+
+ hdr->drv_cap_mask = mask;
+}
+
+inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->saved_state[index];
+}
+
+inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
+ u32 value)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->saved_state[index] = value;
+}
+
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ fw_dump->tmpl_hdr_size = hdr->size;
+ fw_dump->version = hdr->version;
+ fw_dump->num_entries = hdr->num_entries;
+ fw_dump->offset = hdr->offset;
+
+ hdr->drv_cap_mask = hdr->cap_mask;
+ fw_dump->cap_mask = hdr->cap_mask;
+}
+
+inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->cap_sizes[index];
+}
+
+void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->sys_info[idx] = value;
+}
+
+void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = tmpl_hdr;
+ hdr->drv_cap_mask = mask;
+}
+
struct qlcnic_dump_operations {
enum qlcnic_minidump_opcode opcode;
u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
@@ -238,11 +339,11 @@ static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
struct qlcnic_dump_entry *entry, __le32 *buffer)
{
+ void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
+ struct __ctrl *ctr = &entry->region.ctrl;
int i, k, timeout = 0;
- u32 addr, data;
+ u32 addr, data, temp;
u8 no_ops;
- struct __ctrl *ctr = &entry->region.ctrl;
- struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
addr = ctr->addr;
no_ops = ctr->no_ops;
@@ -285,29 +386,42 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
}
break;
case QLCNIC_DUMP_RD_SAVE:
- if (ctr->index_a)
- addr = t_hdr->saved_state[ctr->index_a];
+ temp = ctr->index_a;
+ if (temp)
+ addr = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
data = qlcnic_ind_rd(adapter, addr);
- t_hdr->saved_state[ctr->index_v] = data;
+ qlcnic_set_saved_state(adapter, hdr,
+ ctr->index_v, data);
break;
case QLCNIC_DUMP_WRT_SAVED:
- if (ctr->index_v)
- data = t_hdr->saved_state[ctr->index_v];
+ temp = ctr->index_v;
+ if (temp)
+ data = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
else
data = ctr->val1;
- if (ctr->index_a)
- addr = t_hdr->saved_state[ctr->index_a];
+
+ temp = ctr->index_a;
+ if (temp)
+ addr = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
qlcnic_ind_wr(adapter, addr, data);
break;
case QLCNIC_DUMP_MOD_SAVE_ST:
- data = t_hdr->saved_state[ctr->index_v];
+ data = qlcnic_get_saved_state(adapter, hdr,
+ ctr->index_v);
data <<= ctr->shl_val;
data >>= ctr->shr_val;
if (ctr->val2)
data &= ctr->val2;
data |= ctr->val3;
data += ctr->val1;
- t_hdr->saved_state[ctr->index_v] = data;
+ qlcnic_set_saved_state(adapter, hdr,
+ ctr->index_v, data);
break;
default:
dev_info(&adapter->pdev->dev,
@@ -544,7 +658,7 @@ out:
static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
struct __mem *mem)
{
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
u32 dma_no, dma_base_addr, temp_addr;
int i, ret, dma_sts;
@@ -596,7 +710,7 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
u32 temp, dma_base_addr, size = 0, read_size = 0;
struct qlcnic_pex_dma_descriptor *dma_descr;
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
dma_addr_t dma_phys_addr;
void *dma_buffer;
@@ -938,8 +1052,8 @@ static int
qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_dump_template_hdr tmp_hdr;
- u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32);
+ struct qlcnic_83xx_dump_template_hdr tmp_hdr;
+ u32 size = sizeof(tmp_hdr) / sizeof(u32);
int ret = 0;
if (qlcnic_82xx_check(adapter))
@@ -1027,17 +1141,19 @@ free_mem:
return err;
}
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
{
- int err;
- u32 temp_size = 0;
- u32 version, csum, *tmp_buf;
struct qlcnic_hardware_context *ahw;
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ struct qlcnic_fw_dump *fw_dump;
+ u32 version, csum, *tmp_buf;
u8 use_flash_temp = 0;
+ u32 temp_size = 0;
+ int err;
ahw = adapter->ahw;
-
+ fw_dump = &ahw->fw_dump;
err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
&use_flash_temp);
if (err) {
@@ -1046,11 +1162,11 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
return -EIO;
}
- ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
- if (!ahw->fw_dump.tmpl_hdr)
+ fw_dump->tmpl_hdr = vzalloc(temp_size);
+ if (!fw_dump->tmpl_hdr)
return -ENOMEM;
- tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr;
+ tmp_buf = (u32 *)fw_dump->tmpl_hdr;
if (use_flash_temp)
goto flash_temp;
@@ -1065,8 +1181,8 @@ flash_temp:
dev_err(&adapter->pdev->dev,
"Failed to get minidump template header %d\n",
err);
- vfree(ahw->fw_dump.tmpl_hdr);
- ahw->fw_dump.tmpl_hdr = NULL;
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
return -EIO;
}
}
@@ -1076,21 +1192,22 @@ flash_temp:
if (csum) {
dev_err(&adapter->pdev->dev,
"Template header checksum validation failed\n");
- vfree(ahw->fw_dump.tmpl_hdr);
- ahw->fw_dump.tmpl_hdr = NULL;
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
return -EIO;
}
- tmpl_hdr = ahw->fw_dump.tmpl_hdr;
- tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
+ qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
+
dev_info(&adapter->pdev->dev,
"Default minidump capture mask 0x%x\n",
- tmpl_hdr->cap_mask);
+ fw_dump->cap_mask);
- if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
- ahw->fw_dump.use_pex_dma = true;
+ if (qlcnic_83xx_check(adapter) &&
+ (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
+ fw_dump->use_pex_dma = true;
else
- ahw->fw_dump.use_pex_dma = false;
+ fw_dump->use_pex_dma = false;
qlcnic_enable_fw_dump_state(adapter);
@@ -1099,21 +1216,22 @@ flash_temp:
int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
{
- __le32 *buffer;
- u32 ocm_window;
- char mesg[64];
- char *msg[] = {mesg, NULL};
- int i, k, ops_cnt, ops_index, dump_size = 0;
- u32 entry_offset, dump, no_entries, buf_offset = 0;
- struct qlcnic_dump_entry *entry;
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
- struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
static const struct qlcnic_dump_operations *fw_dump_ops;
+ struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
+ u32 entry_offset, dump, no_entries, buf_offset = 0;
+ int i, k, ops_cnt, ops_index, dump_size = 0;
struct device *dev = &adapter->pdev->dev;
struct qlcnic_hardware_context *ahw;
- void *temp_buffer;
+ struct qlcnic_dump_entry *entry;
+ void *temp_buffer, *tmpl_hdr;
+ u32 ocm_window;
+ __le32 *buffer;
+ char mesg[64];
+ char *msg[] = {mesg, NULL};
ahw = adapter->ahw;
+ tmpl_hdr = fw_dump->tmpl_hdr;
/* Return if we don't have firmware dump template header */
if (!tmpl_hdr)
@@ -1133,8 +1251,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
/* Calculate the size for dump data area only */
for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
- if (i & tmpl_hdr->drv_cap_mask)
- dump_size += tmpl_hdr->cap_sizes[k];
+ if (i & fw_dump->cap_mask)
+ dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
+
if (!dump_size)
return -EIO;
@@ -1144,10 +1263,10 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
buffer = fw_dump->data;
fw_dump->size = dump_size;
- no_entries = tmpl_hdr->num_entries;
- entry_offset = tmpl_hdr->offset;
- tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
- tmpl_hdr->sys_info[1] = adapter->fw_version;
+ no_entries = fw_dump->num_entries;
+ entry_offset = fw_dump->offset;
+ qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
+ qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
if (fw_dump->use_pex_dma) {
temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
@@ -1163,16 +1282,17 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
fw_dump_ops = qlcnic_fw_dump_ops;
} else {
+ hdr_83xx = tmpl_hdr;
ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
fw_dump_ops = qlcnic_83xx_fw_dump_ops;
- ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func];
- tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
- tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
+ ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
+ hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
+ hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
}
for (i = 0; i < no_entries; i++) {
- entry = (void *)tmpl_hdr + entry_offset;
- if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
+ entry = tmpl_hdr + entry_offset;
+ if (!(entry->hdr.mask & fw_dump->cap_mask)) {
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
entry_offset += entry->hdr.offset;
continue;
@@ -1209,8 +1329,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
fw_dump->clr = 1;
snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
- dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n",
- adapter->netdev->name, fw_dump->size, tmpl_hdr->size);
+ netdev_info(adapter->netdev,
+ "Dump data %d bytes captured, template header size %d bytes\n",
+ fw_dump->size, fw_dump->tmpl_hdr_size);
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index e5277a632671..14f748cbf0de 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -15,6 +15,7 @@
#define QLC_MAC_OPCODE_MASK 0x7
#define QLC_VF_FLOOD_BIT BIT_16
#define QLC_FLOOD_MODE 0x5
+#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
@@ -335,8 +336,11 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
return err;
cmd.req.arg[1] = 0x4;
- if (enable)
+ if (enable) {
cmd.req.arg[1] |= BIT_16;
+ if (qlcnic_84xx_check(adapter))
+ cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+ }
err = qlcnic_issue_cmd(adapter, &cmd);
if (err)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 3d64113a35af..448d156c3d08 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -350,33 +350,15 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
return size;
}
-static u32 qlcnic_get_pci_func_count(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_hardware_context *ahw = adapter->ahw;
- u32 count = 0;
-
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
- return ahw->total_nic_func;
-
- if (ahw->total_pci_func <= QLC_DEFAULT_VNIC_COUNT)
- count = QLC_DEFAULT_VNIC_COUNT;
- else
- count = ahw->max_vnic_func;
-
- return count;
-}
-
int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
{
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int i;
- for (i = 0; i < pci_func_count; i++) {
+ for (i = 0; i < adapter->ahw->max_vnic_func; i++) {
if (adapter->npars[i].pci_func == pci_func)
return i;
}
-
- return -1;
+ return -EINVAL;
}
static int validate_pm_config(struct qlcnic_adapter *adapter,
@@ -464,23 +446,21 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_pm_func_cfg *pm_cfg;
- int i, pm_cfg_size;
u8 pci_func;
+ u32 count;
+ int i;
- pm_cfg_size = pci_func_count * sizeof(*pm_cfg);
- if (size != pm_cfg_size)
- return QL_STATUS_INVALID_PARAM;
-
- memset(buf, 0, pm_cfg_size);
+ memset(buf, 0, size);
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
-
- for (i = 0; i < pci_func_count; i++) {
+ count = size / sizeof(struct qlcnic_pm_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
pci_func = adapter->npars[i].pci_func;
- if (!adapter->npars[i].active)
+ if (pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
continue;
-
+ }
if (!adapter->npars[i].eswitch_status)
continue;
@@ -494,7 +474,6 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
static int validate_esw_config(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg, int count)
{
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_hardware_context *ahw = adapter->ahw;
int i, ret;
u32 op_mode;
@@ -507,7 +486,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- if (pci_func >= pci_func_count)
+ if (pci_func >= ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
@@ -642,23 +621,21 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_esw_func_cfg *esw_cfg;
- size_t esw_cfg_size;
- u8 i, pci_func;
-
- esw_cfg_size = pci_func_count * sizeof(*esw_cfg);
- if (size != esw_cfg_size)
- return QL_STATUS_INVALID_PARAM;
+ u8 pci_func;
+ u32 count;
+ int i;
- memset(buf, 0, esw_cfg_size);
+ memset(buf, 0, size);
esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
-
- for (i = 0; i < pci_func_count; i++) {
+ count = size / sizeof(struct qlcnic_esw_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
pci_func = adapter->npars[i].pci_func;
- if (!adapter->npars[i].active)
+ if (pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
continue;
-
+ }
if (!adapter->npars[i].eswitch_status)
continue;
@@ -741,23 +718,24 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_npar_func_cfg *np_cfg;
struct qlcnic_info nic_info;
- size_t np_cfg_size;
int i, ret;
-
- np_cfg_size = pci_func_count * sizeof(*np_cfg);
- if (size != np_cfg_size)
- return QL_STATUS_INVALID_PARAM;
+ u32 count;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
- memset(buf, 0, np_cfg_size);
+ memset(buf, 0, size);
np_cfg = (struct qlcnic_npar_func_cfg *)buf;
- for (i = 0; i < pci_func_count; i++) {
+ count = size / sizeof(struct qlcnic_npar_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (qlcnic_is_valid_nic_func(adapter, i) < 0)
continue;
+ if (adapter->npars[i].pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
+ continue;
+ }
ret = qlcnic_get_nic_info(adapter, &nic_info, i);
if (ret)
return ret;
@@ -783,7 +761,6 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_esw_statistics port_stats;
int ret;
@@ -793,7 +770,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
- if (offset >= pci_func_count)
+ if (offset >= adapter->ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
memset(&port_stats, 0, size);
@@ -884,13 +861,12 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int ret;
if (qlcnic_83xx_check(adapter))
return QLC_STATUS_UNSUPPORTED_CMD;
- if (offset >= pci_func_count)
+ if (offset >= adapter->ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
@@ -914,17 +890,12 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_pci_func_cfg *pci_cfg;
struct qlcnic_pci_info *pci_info;
- size_t pci_cfg_sz;
int i, ret;
+ u32 count;
- pci_cfg_sz = pci_func_count * sizeof(*pci_cfg);
- if (size != pci_cfg_sz)
- return QL_STATUS_INVALID_PARAM;
-
- pci_info = kcalloc(pci_func_count, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -935,7 +906,8 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
}
pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
- for (i = 0; i < pci_func_count; i++) {
+ count = size / sizeof(struct qlcnic_pci_func_cfg);
+ for (i = 0; i < count; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
pci_cfg[i].func_state = 0;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 656c65ddadb4..0a1d76acab81 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2556,11 +2556,10 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
if (skb_is_gso(skb)) {
int err;
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
@@ -3331,24 +3330,16 @@ static void ql_enable_msix(struct ql_adapter *qdev)
for (i = 0; i < qdev->intr_count; i++)
qdev->msi_x_entry[i].entry = i;
- /* Loop to get our vectors. We start with
- * what we want and settle for what we get.
- */
- do {
- err = pci_enable_msix(qdev->pdev,
- qdev->msi_x_entry, qdev->intr_count);
- if (err > 0)
- qdev->intr_count = err;
- } while (err > 0);
-
+ err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
+ 1, qdev->intr_count);
if (err < 0) {
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
netif_warn(qdev, ifup, qdev->ndev,
"MSI-X Enable failed, trying MSI.\n");
- qdev->intr_count = 1;
qlge_irq_type = MSI_IRQ;
- } else if (err == 0) {
+ } else {
+ qdev->intr_count = err;
set_bit(QL_MSIX_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"MSI-X Enabled, got %d vectors.\n",
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 819b74cefd64..cd045ecb9816 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -270,11 +270,6 @@ static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
return r6040_phy_write(ioaddr, phy_addr, reg, value);
}
-static int r6040_mdiobus_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static void r6040_free_txbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
@@ -1191,7 +1186,6 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
lp->mii_bus->priv = dev;
lp->mii_bus->read = r6040_mdiobus_read;
lp->mii_bus->write = r6040_mdiobus_write;
- lp->mii_bus->reset = r6040_mdiobus_reset;
lp->mii_bus->name = "r6040_eth_mii";
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
dev_name(&pdev->dev), card_idx);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 737c1a881f78..2bc728e65e24 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -476,7 +476,7 @@ rx_status_loop:
rx = 0;
cpw16(IntrStatus, cp_rx_intr_mask);
- while (1) {
+ while (rx < budget) {
u32 status, len;
dma_addr_t mapping, new_mapping;
struct sk_buff *skb, *new_skb;
@@ -554,9 +554,6 @@ rx_next:
else
desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
rx_tail = NEXT_RX(rx_tail);
-
- if (rx >= budget)
- break;
}
cp->rx_tail = rx_tail;
@@ -899,7 +896,7 @@ out_unlock:
return NETDEV_TX_OK;
out_dma_error:
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
cp->dev->stats.tx_dropped++;
goto out_unlock;
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index da5972eefdd2..2e5df148af4c 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1717,9 +1717,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
if (len < ETH_ZLEN)
memset(tp->tx_buf[entry], 0, ETH_ZLEN);
skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
} else {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -2522,16 +2522,16 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
netdev_stats_to_stats64(stats, &dev->stats);
do {
- start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
stats->rx_packets = tp->rx_stats.packets;
stats->rx_bytes = tp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
do {
- start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
stats->tx_packets = tp->tx_stats.packets;
stats->tx_bytes = tp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
return stats;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3ff7bc3e7a23..aa1c079f231d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5834,7 +5834,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
tp->TxDescArray + entry);
if (skb) {
tp->dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
tx_skb->skb = NULL;
}
}
@@ -6059,7 +6059,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
err_dma_1:
rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
err_dma_0:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
err_update_stats:
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -6142,7 +6142,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
tp->tx_stats.packets++;
tp->tx_stats.bytes += tx_skb->skb->len;
u64_stats_update_end(&tp->tx_stats.syncp);
- dev_kfree_skb(tx_skb->skb);
+ dev_kfree_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
@@ -6590,17 +6590,17 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
rtl8169_rx_missed(dev, ioaddr);
do {
- start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
stats->rx_packets = tp->rx_stats.packets;
stats->rx_bytes = tp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
do {
- start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
stats->tx_packets = tp->tx_stats.packets;
stats->tx_bytes = tp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 040cb94e8219..6a9509ccd33b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,8 +1,9 @@
/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
- * Copyright (C) 2008-2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Cogent Embedded, Inc.
+ * Copyright (C) 2008-2014 Renesas Solutions Corp.
+ * Copyright (C) 2013-2014 Cogent Embedded, Inc.
+ * Copyright (C) 2014 Codethink Limited
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,6 +28,10 @@
#include <linux/platform_device.h>
#include <linux/mdio-bitbang.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/cache.h>
#include <linux/io.h>
@@ -36,6 +41,7 @@
#include <linux/if_vlan.h>
#include <linux/clk.h>
#include <linux/sh_eth.h>
+#include <linux/of_mdio.h>
#include "sh_eth.h"
@@ -394,7 +400,8 @@ static void sh_eth_select_mii(struct net_device *ndev)
value = 0x0;
break;
default:
- pr_warn("PHY interface mode was not setup. Set to MII.\n");
+ netdev_warn(ndev,
+ "PHY interface mode was not setup. Set to MII.\n");
value = 0x1;
break;
}
@@ -848,7 +855,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
cnt--;
}
if (cnt <= 0) {
- pr_err("Device reset failed\n");
+ netdev_err(ndev, "Device reset failed\n");
ret = -ETIMEDOUT;
}
return ret;
@@ -866,7 +873,7 @@ static int sh_eth_reset(struct net_device *ndev)
ret = sh_eth_check_reset(ndev);
if (ret)
- goto out;
+ return ret;
/* Table Init */
sh_eth_write(ndev, 0x0, TDLAR);
@@ -893,7 +900,6 @@ static int sh_eth_reset(struct net_device *ndev)
EDMR);
}
-out:
return ret;
}
@@ -1257,7 +1263,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
/* Soft Reset */
ret = sh_eth_reset(ndev);
if (ret)
- goto out;
+ return ret;
if (mdp->cd->rmiimode)
sh_eth_write(ndev, 0x1, RMIIMODE);
@@ -1336,7 +1342,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
netif_start_queue(ndev);
}
-out:
return ret;
}
@@ -1550,8 +1555,7 @@ ignore_link:
/* Unused write back interrupt */
if (intr_status & EESR_TABT) { /* Transmit Abort int */
ndev->stats.tx_aborted_errors++;
- if (netif_msg_tx_err(mdp))
- dev_err(&ndev->dev, "Transmit Abort\n");
+ netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
}
}
@@ -1560,45 +1564,38 @@ ignore_link:
if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */
ndev->stats.rx_frame_errors++;
- if (netif_msg_rx_err(mdp))
- dev_err(&ndev->dev, "Receive Abort\n");
+ netif_err(mdp, rx_err, ndev, "Receive Abort\n");
}
}
if (intr_status & EESR_TDE) {
/* Transmit Descriptor Empty int */
ndev->stats.tx_fifo_errors++;
- if (netif_msg_tx_err(mdp))
- dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
+ netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
}
if (intr_status & EESR_TFE) {
/* FIFO under flow */
ndev->stats.tx_fifo_errors++;
- if (netif_msg_tx_err(mdp))
- dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
+ netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
}
if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++;
-
- if (netif_msg_rx_err(mdp))
- dev_err(&ndev->dev, "Receive Descriptor Empty\n");
+ netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
}
if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */
ndev->stats.rx_fifo_errors++;
- if (netif_msg_rx_err(mdp))
- dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+ netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
}
if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
/* Address Error */
ndev->stats.tx_fifo_errors++;
- if (netif_msg_tx_err(mdp))
- dev_err(&ndev->dev, "Address Error\n");
+ netif_err(mdp, tx_err, ndev, "Address Error\n");
}
mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -1609,9 +1606,9 @@ ignore_link:
u32 edtrr = sh_eth_read(ndev, EDTRR);
/* dmesg */
- dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
- intr_status, mdp->cur_tx, mdp->dirty_tx,
- (u32)ndev->state, edtrr);
+ netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
+ intr_status, mdp->cur_tx, mdp->dirty_tx,
+ (u32)ndev->state, edtrr);
/* dirty buffer free */
sh_eth_txfree(ndev);
@@ -1656,9 +1653,9 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
EESIPR);
__napi_schedule(&mdp->napi);
} else {
- dev_warn(&ndev->dev,
- "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
- intr_status, intr_enable);
+ netdev_warn(ndev,
+ "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
+ intr_status, intr_enable);
}
}
@@ -1757,27 +1754,42 @@ static void sh_eth_adjust_link(struct net_device *ndev)
/* PHY init function */
static int sh_eth_phy_init(struct net_device *ndev)
{
+ struct device_node *np = ndev->dev.parent->of_node;
struct sh_eth_private *mdp = netdev_priv(ndev);
- char phy_id[MII_BUS_ID_SIZE + 3];
struct phy_device *phydev = NULL;
- snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
- mdp->mii_bus->id, mdp->phy_id);
-
mdp->link = 0;
mdp->speed = 0;
mdp->duplex = -1;
/* Try connect to PHY */
- phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
- mdp->phy_interface);
+ if (np) {
+ struct device_node *pn;
+
+ pn = of_parse_phandle(np, "phy-handle", 0);
+ phydev = of_phy_connect(ndev, pn,
+ sh_eth_adjust_link, 0,
+ mdp->phy_interface);
+
+ if (!phydev)
+ phydev = ERR_PTR(-ENOENT);
+ } else {
+ char phy_id[MII_BUS_ID_SIZE + 3];
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ mdp->mii_bus->id, mdp->phy_id);
+
+ phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
+ mdp->phy_interface);
+ }
+
if (IS_ERR(phydev)) {
- dev_err(&ndev->dev, "phy_connect failed\n");
+ netdev_err(ndev, "failed to connect PHY\n");
return PTR_ERR(phydev);
}
- dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n",
- phydev->addr, phydev->irq, phydev->drv->name);
+ netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
+ phydev->addr, phydev->irq, phydev->drv->name);
mdp->phydev = phydev;
@@ -1958,12 +1970,12 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
ret = sh_eth_ring_init(ndev);
if (ret < 0) {
- dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
+ netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
return ret;
}
ret = sh_eth_dev_init(ndev, false);
if (ret < 0) {
- dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
+ netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
return ret;
}
@@ -2004,7 +2016,7 @@ static int sh_eth_open(struct net_device *ndev)
ret = request_irq(ndev->irq, sh_eth_interrupt,
mdp->cd->irq_flags, ndev->name, ndev);
if (ret) {
- dev_err(&ndev->dev, "Can not assign IRQ number\n");
+ netdev_err(ndev, "Can not assign IRQ number\n");
goto out_napi_off;
}
@@ -2042,10 +2054,9 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
netif_stop_queue(ndev);
- if (netif_msg_timer(mdp)) {
- dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n",
- ndev->name, (int)sh_eth_read(ndev, EESR));
- }
+ netif_err(mdp, timer, ndev,
+ "transmit timed out, status %8.8x, resetting...\n",
+ (int)sh_eth_read(ndev, EESR));
/* tx_errors count up */
ndev->stats.tx_errors++;
@@ -2080,8 +2091,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_lock_irqsave(&mdp->lock, flags);
if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
if (!sh_eth_txfree(ndev)) {
- if (netif_msg_tx_queued(mdp))
- dev_warn(&ndev->dev, "TxFD exhausted.\n");
+ netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
netif_stop_queue(ndev);
spin_unlock_irqrestore(&mdp->lock, flags);
return NETDEV_TX_BUSY;
@@ -2098,8 +2108,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb->len + 2);
txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (skb->len < ETHERSMALL)
- txdesc->buffer_length = ETHERSMALL;
+ if (skb->len < ETH_ZLEN)
+ txdesc->buffer_length = ETH_ZLEN;
else
txdesc->buffer_length = skb->len;
@@ -2251,7 +2261,7 @@ static int sh_eth_tsu_busy(struct net_device *ndev)
udelay(10);
timeout--;
if (timeout <= 0) {
- dev_err(&ndev->dev, "%s: timeout\n", __func__);
+ netdev_err(ndev, "%s: timeout\n", __func__);
return -ETIMEDOUT;
}
}
@@ -2571,37 +2581,30 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
}
/* MDIO bus release function */
-static int sh_mdio_release(struct net_device *ndev)
+static int sh_mdio_release(struct sh_eth_private *mdp)
{
- struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
-
/* unregister mdio bus */
- mdiobus_unregister(bus);
-
- /* remove mdio bus info from net_device */
- dev_set_drvdata(&ndev->dev, NULL);
+ mdiobus_unregister(mdp->mii_bus);
/* free bitbang info */
- free_mdio_bitbang(bus);
+ free_mdio_bitbang(mdp->mii_bus);
return 0;
}
/* MDIO bus init function */
-static int sh_mdio_init(struct net_device *ndev, int id,
+static int sh_mdio_init(struct sh_eth_private *mdp,
struct sh_eth_plat_data *pd)
{
int ret, i;
struct bb_info *bitbang;
- struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct platform_device *pdev = mdp->pdev;
+ struct device *dev = &mdp->pdev->dev;
/* create bit control struct for PHY */
- bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
- GFP_KERNEL);
- if (!bitbang) {
- ret = -ENOMEM;
- goto out;
- }
+ bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
+ if (!bitbang)
+ return -ENOMEM;
/* bitbang init */
bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
@@ -2614,44 +2617,42 @@ static int sh_mdio_init(struct net_device *ndev, int id,
/* MII controller setting */
mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
- if (!mdp->mii_bus) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!mdp->mii_bus)
+ return -ENOMEM;
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
- mdp->mii_bus->parent = &ndev->dev;
+ mdp->mii_bus->parent = dev;
snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- mdp->pdev->name, id);
+ pdev->name, pdev->id);
/* PHY IRQ */
- mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
- sizeof(int) * PHY_MAX_ADDR,
+ mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR,
GFP_KERNEL);
if (!mdp->mii_bus->irq) {
ret = -ENOMEM;
goto out_free_bus;
}
- for (i = 0; i < PHY_MAX_ADDR; i++)
- mdp->mii_bus->irq[i] = PHY_POLL;
- if (pd->phy_irq > 0)
- mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
+ /* register MDIO bus */
+ if (dev->of_node) {
+ ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
+ } else {
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ mdp->mii_bus->irq[i] = PHY_POLL;
+ if (pd->phy_irq > 0)
+ mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
+
+ ret = mdiobus_register(mdp->mii_bus);
+ }
- /* register mdio bus */
- ret = mdiobus_register(mdp->mii_bus);
if (ret)
goto out_free_bus;
- dev_set_drvdata(&ndev->dev, mdp->mii_bus);
-
return 0;
out_free_bus:
free_mdio_bitbang(mdp->mii_bus);
-
-out:
return ret;
}
@@ -2676,7 +2677,6 @@ static const u16 *sh_eth_get_register_offset(int register_type)
reg_offset = sh_eth_offset_fast_sh3_sh2;
break;
default:
- pr_err("Unknown register type (%d)\n", register_type);
break;
}
@@ -2710,6 +2710,48 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_change_mtu = eth_change_mtu,
};
+#ifdef CONFIG_OF
+static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct sh_eth_plat_data *pdata;
+ const char *mac_addr;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->phy_interface = of_get_phy_mode(np);
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
+
+ pdata->no_ether_link =
+ of_property_read_bool(np, "renesas,no-ether-link");
+ pdata->ether_link_active_low =
+ of_property_read_bool(np, "renesas,ether-link-active-low");
+
+ return pdata;
+}
+
+static const struct of_device_id sh_eth_match_table[] = {
+ { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
+ { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
+ { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
+ { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sh_eth_match_table);
+#else
+static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int sh_eth_drv_probe(struct platform_device *pdev)
{
int ret, devno = 0;
@@ -2723,15 +2765,15 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(res == NULL)) {
dev_err(&pdev->dev, "invalid resource\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
ndev = alloc_etherdev(sizeof(struct sh_eth_private));
- if (!ndev) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!ndev)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
/* The sh Ether-specific entries in the device structure. */
ndev->base_addr = res->start;
@@ -2760,9 +2802,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
spin_lock_init(&mdp->lock);
mdp->pdev = pdev;
- pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
+ if (pdev->dev.of_node)
+ pd = sh_eth_parse_dt(&pdev->dev);
if (!pd) {
dev_err(&pdev->dev, "no platform data\n");
ret = -EINVAL;
@@ -2778,8 +2820,22 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp->ether_link_active_low = pd->ether_link_active_low;
/* set cpu data */
- mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+ if (id) {
+ mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+ } else {
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(sh_eth_match_table),
+ &pdev->dev);
+ mdp->cd = (struct sh_eth_cpu_data *)match->data;
+ }
mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
+ if (!mdp->reg_offset) {
+ dev_err(&pdev->dev, "Unknown register type (%d)\n",
+ mdp->cd->register_type);
+ ret = -EINVAL;
+ goto out_release;
+ }
sh_eth_set_default_cpu_data(mdp->cd);
/* set function */
@@ -2825,6 +2881,13 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
}
}
+ /* MDIO bus init */
+ ret = sh_mdio_init(mdp, pd);
+ if (ret) {
+ dev_err(&ndev->dev, "failed to initialise MDIO\n");
+ goto out_release;
+ }
+
netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
/* network device register */
@@ -2832,31 +2895,26 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
if (ret)
goto out_napi_del;
- /* mdio bus init */
- ret = sh_mdio_init(ndev, pdev->id, pd);
- if (ret)
- goto out_unregister;
-
/* print device information */
- pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
- (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+ netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+ pm_runtime_put(&pdev->dev);
platform_set_drvdata(pdev, ndev);
return ret;
-out_unregister:
- unregister_netdev(ndev);
-
out_napi_del:
netif_napi_del(&mdp->napi);
+ sh_mdio_release(mdp);
out_release:
/* net_dev free */
if (ndev)
free_netdev(ndev);
-out:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -2865,9 +2923,9 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct sh_eth_private *mdp = netdev_priv(ndev);
- sh_mdio_release(ndev);
unregister_netdev(ndev);
netif_napi_del(&mdp->napi);
+ sh_mdio_release(mdp);
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
@@ -2920,6 +2978,7 @@ static struct platform_driver sh_eth_driver = {
.driver = {
.name = CARDNAME,
.pm = SH_ETH_PM_OPS,
+ .of_match_table = of_match_ptr(sh_eth_match_table),
},
};
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 6075915b88ec..d55e37cd5fec 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -27,8 +27,7 @@
#define RX_RING_MIN 64
#define TX_RING_MAX 1024
#define RX_RING_MAX 1024
-#define ETHERSMALL 60
-#define PKT_BUF_SZ 1538
+#define PKT_BUF_SZ 1538
#define SH_ETH_TSU_TIMEOUT_MS 500
#define SH_ETH_TSU_CAM_ENTRIES 32
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig
new file mode 100644
index 000000000000..2360d8150777
--- /dev/null
+++ b/drivers/net/ethernet/samsung/Kconfig
@@ -0,0 +1,32 @@
+#
+# Samsung Ethernet device configuration
+#
+
+config NET_VENDOR_SAMSUNG
+ bool "Samsung Ethernet devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) chipset belonging to this class,
+ say Y.
+
+ Note that the answer to this question does not directly affect
+ the kernel: saying N will just case the configurator to skip all
+ the questions about Samsung chipsets. If you say Y, you will be asked
+ for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_SAMSUNG
+
+config SXGBE_ETH
+ tristate "Samsung 10G/2.5G/1G SXGBE Ethernet driver"
+ depends on HAS_IOMEM && HAS_DMA
+ select PHYLIB
+ select CRC32
+ select PTP_1588_CLOCK
+ ---help---
+ This is the driver for the SXGBE 10G Ethernet IP block found on
+ Samsung platforms.
+
+ To compile this driver as a module, choose M here: the module
+ will be called samsung-sxgbe.
+
+endif # NET_VENDOR_SAMSUNG
diff --git a/drivers/net/ethernet/samsung/Makefile b/drivers/net/ethernet/samsung/Makefile
new file mode 100644
index 000000000000..1773c29b8d76
--- /dev/null
+++ b/drivers/net/ethernet/samsung/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Samsung Ethernet device drivers.
+#
+
+obj-$(CONFIG_SXGBE_ETH) += sxgbe/
diff --git a/drivers/net/ethernet/samsung/sxgbe/Makefile b/drivers/net/ethernet/samsung/sxgbe/Makefile
new file mode 100644
index 000000000000..dcc80b9d4370
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o
+samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \
+ sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o sxgbe_mdio.o \
+ sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
new file mode 100644
index 000000000000..6203c7d8550f
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -0,0 +1,535 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SXGBE_COMMON_H__
+#define __SXGBE_COMMON_H__
+
+/* forward references */
+struct sxgbe_desc_ops;
+struct sxgbe_dma_ops;
+struct sxgbe_mtl_ops;
+
+#define SXGBE_RESOURCE_NAME "sam_sxgbeeth"
+#define DRV_MODULE_VERSION "November_2013"
+
+/* MAX HW feature words */
+#define SXGBE_HW_WORDS 3
+
+#define SXGBE_RX_COE_NONE 0
+
+/* CSR Frequency Access Defines*/
+#define SXGBE_CSR_F_150M 150000000
+#define SXGBE_CSR_F_250M 250000000
+#define SXGBE_CSR_F_300M 300000000
+#define SXGBE_CSR_F_350M 350000000
+#define SXGBE_CSR_F_400M 400000000
+#define SXGBE_CSR_F_500M 500000000
+
+/* pause time */
+#define SXGBE_PAUSE_TIME 0x200
+
+/* tx queues */
+#define SXGBE_TX_QUEUES 8
+#define SXGBE_RX_QUEUES 16
+
+/* Calculated based how much time does it take to fill 256KB Rx memory
+ * at 10Gb speed at 156MHz clock rate and considered little less then
+ * the actual value.
+ */
+#define SXGBE_MAX_DMA_RIWT 0x70
+#define SXGBE_MIN_DMA_RIWT 0x01
+
+/* Tx coalesce parameters */
+#define SXGBE_COAL_TX_TIMER 40000
+#define SXGBE_MAX_COAL_TX_TICK 100000
+#define SXGBE_TX_MAX_FRAMES 512
+#define SXGBE_TX_FRAMES 128
+
+/* SXGBE TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
+
+#define SXGBE_DEFAULT_LIT_LS 0x3E8
+#define SXGBE_DEFAULT_TWT_LS 0x0
+
+/* Flow Control defines */
+#define SXGBE_FLOW_OFF 0
+#define SXGBE_FLOW_RX 1
+#define SXGBE_FLOW_TX 2
+#define SXGBE_FLOW_AUTO (SXGBE_FLOW_TX | SXGBE_FLOW_RX)
+
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+
+/* errors */
+#define RX_GMII_ERR 0x01
+#define RX_WATCHDOG_ERR 0x02
+#define RX_CRC_ERR 0x03
+#define RX_GAINT_ERR 0x04
+#define RX_IP_HDR_ERR 0x05
+#define RX_PAYLOAD_ERR 0x06
+#define RX_OVERFLOW_ERR 0x07
+
+/* pkt type */
+#define RX_LEN_PKT 0x00
+#define RX_MACCTL_PKT 0x01
+#define RX_DCBCTL_PKT 0x02
+#define RX_ARP_PKT 0x03
+#define RX_OAM_PKT 0x04
+#define RX_UNTAG_PKT 0x05
+#define RX_OTHER_PKT 0x07
+#define RX_SVLAN_PKT 0x08
+#define RX_CVLAN_PKT 0x09
+#define RX_DVLAN_OCVLAN_ICVLAN_PKT 0x0A
+#define RX_DVLAN_OSVLAN_ISVLAN_PKT 0x0B
+#define RX_DVLAN_OSVLAN_ICVLAN_PKT 0x0C
+#define RX_DVLAN_OCVLAN_ISVLAN_PKT 0x0D
+
+#define RX_NOT_IP_PKT 0x00
+#define RX_IPV4_TCP_PKT 0x01
+#define RX_IPV4_UDP_PKT 0x02
+#define RX_IPV4_ICMP_PKT 0x03
+#define RX_IPV4_UNKNOWN_PKT 0x07
+#define RX_IPV6_TCP_PKT 0x09
+#define RX_IPV6_UDP_PKT 0x0A
+#define RX_IPV6_ICMP_PKT 0x0B
+#define RX_IPV6_UNKNOWN_PKT 0x0F
+
+#define RX_NO_PTP 0x00
+#define RX_PTP_SYNC 0x01
+#define RX_PTP_FOLLOW_UP 0x02
+#define RX_PTP_DELAY_REQ 0x03
+#define RX_PTP_DELAY_RESP 0x04
+#define RX_PTP_PDELAY_REQ 0x05
+#define RX_PTP_PDELAY_RESP 0x06
+#define RX_PTP_PDELAY_FOLLOW_UP 0x07
+#define RX_PTP_ANNOUNCE 0x08
+#define RX_PTP_MGMT 0x09
+#define RX_PTP_SIGNAL 0x0A
+#define RX_PTP_RESV_MSG 0x0F
+
+/* EEE-LPI mode flags*/
+#define TX_ENTRY_LPI_MODE 0x10
+#define TX_EXIT_LPI_MODE 0x20
+#define RX_ENTRY_LPI_MODE 0x40
+#define RX_EXIT_LPI_MODE 0x80
+
+/* EEE-LPI Interrupt status flag */
+#define LPI_INT_STATUS BIT(5)
+
+/* EEE-LPI Default timer values */
+#define LPI_LINK_STATUS_TIMER 0x3E8
+#define LPI_MAC_WAIT_TIMER 0x00
+
+/* EEE-LPI Control and status definitions */
+#define LPI_CTRL_STATUS_TXA BIT(19)
+#define LPI_CTRL_STATUS_PLSDIS BIT(18)
+#define LPI_CTRL_STATUS_PLS BIT(17)
+#define LPI_CTRL_STATUS_LPIEN BIT(16)
+#define LPI_CTRL_STATUS_TXRSTP BIT(11)
+#define LPI_CTRL_STATUS_RXRSTP BIT(10)
+#define LPI_CTRL_STATUS_RLPIST BIT(9)
+#define LPI_CTRL_STATUS_TLPIST BIT(8)
+#define LPI_CTRL_STATUS_RLPIEX BIT(3)
+#define LPI_CTRL_STATUS_RLPIEN BIT(2)
+#define LPI_CTRL_STATUS_TLPIEX BIT(1)
+#define LPI_CTRL_STATUS_TLPIEN BIT(0)
+
+enum dma_irq_status {
+ tx_hard_error = BIT(0),
+ tx_bump_tc = BIT(1),
+ handle_tx = BIT(2),
+ rx_hard_error = BIT(3),
+ rx_bump_tc = BIT(4),
+ handle_rx = BIT(5),
+};
+
+#define NETIF_F_HW_VLAN_ALL (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_TX | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_FILTER)
+
+/* MMC control defines */
+#define SXGBE_MMC_CTRL_CNT_FRZ 0x00000008
+
+/* SXGBE HW ADDR regs */
+#define SXGBE_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
+ (reg * 8))
+#define SXGBE_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
+ (reg * 8))
+#define SXGBE_MAX_PERFECT_ADDRESSES 32 /* Maximum unicast perfect filtering */
+#define SXGBE_FRAME_FILTER 0x00000004 /* Frame Filter */
+
+/* SXGBE Frame Filter defines */
+#define SXGBE_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define SXGBE_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define SXGBE_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define SXGBE_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define SXGBE_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define SXGBE_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define SXGBE_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define SXGBE_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define SXGBE_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define SXGBE_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+
+#define SXGBE_HASH_TABLE_SIZE 64
+#define SXGBE_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
+#define SXGBE_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
+
+#define SXGBE_HI_REG_AE 0x80000000
+
+/* Minimum and maximum MTU */
+#define MIN_MTU 68
+#define MAX_MTU 9000
+
+#define SXGBE_FOR_EACH_QUEUE(max_queues, queue_num) \
+ for (queue_num = 0; queue_num < max_queues; queue_num++)
+
+#define DRV_VERSION "1.0.0"
+
+#define SXGBE_MAX_RX_CHANNELS 16
+#define SXGBE_MAX_TX_CHANNELS 16
+
+#define START_MAC_REG_OFFSET 0x0000
+#define MAX_MAC_REG_OFFSET 0x0DFC
+#define START_MTL_REG_OFFSET 0x1000
+#define MAX_MTL_REG_OFFSET 0x18FC
+#define START_DMA_REG_OFFSET 0x3000
+#define MAX_DMA_REG_OFFSET 0x38FC
+
+#define REG_SPACE_SIZE 0x2000
+
+/* sxgbe statistics counters */
+struct sxgbe_extra_stats {
+ /* TX/RX IRQ events */
+ unsigned long tx_underflow_irq;
+ unsigned long tx_process_stopped_irq;
+ unsigned long tx_ctxt_desc_err;
+ unsigned long tx_threshold;
+ unsigned long rx_threshold;
+ unsigned long tx_pkt_n;
+ unsigned long rx_pkt_n;
+ unsigned long normal_irq_n;
+ unsigned long tx_normal_irq_n;
+ unsigned long rx_normal_irq_n;
+ unsigned long napi_poll;
+ unsigned long tx_clean;
+ unsigned long tx_reset_ic_bit;
+ unsigned long rx_process_stopped_irq;
+ unsigned long rx_underflow_irq;
+
+ /* Bus access errors */
+ unsigned long fatal_bus_error_irq;
+ unsigned long tx_read_transfer_err;
+ unsigned long tx_write_transfer_err;
+ unsigned long tx_desc_access_err;
+ unsigned long tx_buffer_access_err;
+ unsigned long tx_data_transfer_err;
+ unsigned long rx_read_transfer_err;
+ unsigned long rx_write_transfer_err;
+ unsigned long rx_desc_access_err;
+ unsigned long rx_buffer_access_err;
+ unsigned long rx_data_transfer_err;
+
+ /* EEE-LPI stats */
+ unsigned long tx_lpi_entry_n;
+ unsigned long tx_lpi_exit_n;
+ unsigned long rx_lpi_entry_n;
+ unsigned long rx_lpi_exit_n;
+ unsigned long eee_wakeup_error_n;
+
+ /* RX specific */
+ /* L2 error */
+ unsigned long rx_code_gmii_err;
+ unsigned long rx_watchdog_err;
+ unsigned long rx_crc_err;
+ unsigned long rx_gaint_pkt_err;
+ unsigned long ip_hdr_err;
+ unsigned long ip_payload_err;
+ unsigned long overflow_error;
+
+ /* L2 Pkt type */
+ unsigned long len_pkt;
+ unsigned long mac_ctl_pkt;
+ unsigned long dcb_ctl_pkt;
+ unsigned long arp_pkt;
+ unsigned long oam_pkt;
+ unsigned long untag_okt;
+ unsigned long other_pkt;
+ unsigned long svlan_tag_pkt;
+ unsigned long cvlan_tag_pkt;
+ unsigned long dvlan_ocvlan_icvlan_pkt;
+ unsigned long dvlan_osvlan_isvlan_pkt;
+ unsigned long dvlan_osvlan_icvlan_pkt;
+ unsigned long dvan_ocvlan_icvlan_pkt;
+
+ /* L3/L4 Pkt type */
+ unsigned long not_ip_pkt;
+ unsigned long ip4_tcp_pkt;
+ unsigned long ip4_udp_pkt;
+ unsigned long ip4_icmp_pkt;
+ unsigned long ip4_unknown_pkt;
+ unsigned long ip6_tcp_pkt;
+ unsigned long ip6_udp_pkt;
+ unsigned long ip6_icmp_pkt;
+ unsigned long ip6_unknown_pkt;
+
+ /* Filter specific */
+ unsigned long vlan_filter_match;
+ unsigned long sa_filter_fail;
+ unsigned long da_filter_fail;
+ unsigned long hash_filter_pass;
+ unsigned long l3_filter_match;
+ unsigned long l4_filter_match;
+
+ /* RX context specific */
+ unsigned long timestamp_dropped;
+ unsigned long rx_msg_type_no_ptp;
+ unsigned long rx_ptp_type_sync;
+ unsigned long rx_ptp_type_follow_up;
+ unsigned long rx_ptp_type_delay_req;
+ unsigned long rx_ptp_type_delay_resp;
+ unsigned long rx_ptp_type_pdelay_req;
+ unsigned long rx_ptp_type_pdelay_resp;
+ unsigned long rx_ptp_type_pdelay_follow_up;
+ unsigned long rx_ptp_announce;
+ unsigned long rx_ptp_mgmt;
+ unsigned long rx_ptp_signal;
+ unsigned long rx_ptp_resv_msg_type;
+};
+
+struct mac_link {
+ int port;
+ int duplex;
+ int speed;
+};
+
+struct mii_regs {
+ unsigned int addr; /* MII Address */
+ unsigned int data; /* MII Data */
+};
+
+struct sxgbe_core_ops {
+ /* MAC core initialization */
+ void (*core_init)(void __iomem *ioaddr);
+ /* Dump MAC registers */
+ void (*dump_regs)(void __iomem *ioaddr);
+ /* Handle extra events on specific interrupts hw dependent */
+ int (*host_irq_status)(void __iomem *ioaddr,
+ struct sxgbe_extra_stats *x);
+ /* Set power management mode (e.g. magic frame) */
+ void (*pmt)(void __iomem *ioaddr, unsigned long mode);
+ /* Set/Get Unicast MAC addresses */
+ void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+ void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+ void (*enable_rx)(void __iomem *ioaddr, bool enable);
+ void (*enable_tx)(void __iomem *ioaddr, bool enable);
+
+ /* controller version specific operations */
+ int (*get_controller_version)(void __iomem *ioaddr);
+
+ /* If supported then get the optional core features */
+ unsigned int (*get_hw_feature)(void __iomem *ioaddr,
+ unsigned char feature_index);
+ /* adjust SXGBE speed */
+ void (*set_speed)(void __iomem *ioaddr, unsigned char speed);
+
+ /* EEE-LPI specific operations */
+ void (*set_eee_mode)(void __iomem *ioaddr);
+ void (*reset_eee_mode)(void __iomem *ioaddr);
+ void (*set_eee_timer)(void __iomem *ioaddr, const int ls,
+ const int tw);
+ void (*set_eee_pls)(void __iomem *ioaddr, const int link);
+
+ /* Enable disable checksum offload operations */
+ void (*enable_rx_csum)(void __iomem *ioaddr);
+ void (*disable_rx_csum)(void __iomem *ioaddr);
+};
+
+const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
+
+struct sxgbe_ops {
+ const struct sxgbe_core_ops *mac;
+ const struct sxgbe_desc_ops *desc;
+ const struct sxgbe_dma_ops *dma;
+ const struct sxgbe_mtl_ops *mtl;
+ struct mii_regs mii; /* MII register Addresses */
+ struct mac_link link;
+ unsigned int ctrl_uid;
+ unsigned int ctrl_id;
+};
+
+/* SXGBE private data structures */
+struct sxgbe_tx_queue {
+ unsigned int irq_no;
+ struct sxgbe_priv_data *priv_ptr;
+ struct sxgbe_tx_norm_desc *dma_tx;
+ dma_addr_t dma_tx_phy;
+ dma_addr_t *tx_skbuff_dma;
+ struct sk_buff **tx_skbuff;
+ struct timer_list txtimer;
+ spinlock_t tx_lock; /* lock for tx queues */
+ unsigned int cur_tx;
+ unsigned int dirty_tx;
+ u32 tx_count_frames;
+ u32 tx_coal_frames;
+ u32 tx_coal_timer;
+ int hwts_tx_en;
+ u16 prev_mss;
+ u8 queue_no;
+};
+
+struct sxgbe_rx_queue {
+ struct sxgbe_priv_data *priv_ptr;
+ struct sxgbe_rx_norm_desc *dma_rx;
+ struct sk_buff **rx_skbuff;
+ unsigned int cur_rx;
+ unsigned int dirty_rx;
+ unsigned int irq_no;
+ u32 rx_riwt;
+ dma_addr_t *rx_skbuff_dma;
+ dma_addr_t dma_rx_phy;
+ u8 queue_no;
+};
+
+/* SXGBE HW capabilities */
+struct sxgbe_hw_features {
+ /****** CAP [0] *******/
+ unsigned int pmt_remote_wake_up;
+ unsigned int pmt_magic_frame;
+ /* IEEE 1588-2008 */
+ unsigned int atime_stamp;
+
+ unsigned int eee;
+
+ unsigned int tx_csum_offload;
+ unsigned int rx_csum_offload;
+ unsigned int multi_macaddr;
+ unsigned int tstamp_srcselect;
+ unsigned int sa_vlan_insert;
+
+ /****** CAP [1] *******/
+ unsigned int rxfifo_size;
+ unsigned int txfifo_size;
+ unsigned int atstmap_hword;
+ unsigned int dcb_enable;
+ unsigned int splithead_enable;
+ unsigned int tcpseg_offload;
+ unsigned int debug_mem;
+ unsigned int rss_enable;
+ unsigned int hash_tsize;
+ unsigned int l3l4_filer_size;
+
+ /* This value is in bytes and
+ * as mentioned in HW features
+ * of SXGBE data book
+ */
+ unsigned int rx_mtl_qsize;
+ unsigned int tx_mtl_qsize;
+
+ /****** CAP [2] *******/
+ /* TX and RX number of channels */
+ unsigned int rx_mtl_queues;
+ unsigned int tx_mtl_queues;
+ unsigned int rx_dma_channels;
+ unsigned int tx_dma_channels;
+ unsigned int pps_output_count;
+ unsigned int aux_input_count;
+};
+
+struct sxgbe_priv_data {
+ /* DMA descriptos */
+ struct sxgbe_tx_queue *txq[SXGBE_TX_QUEUES];
+ struct sxgbe_rx_queue *rxq[SXGBE_RX_QUEUES];
+ u8 cur_rx_qnum;
+
+ unsigned int dma_tx_size;
+ unsigned int dma_rx_size;
+ unsigned int dma_buf_sz;
+ u32 rx_riwt;
+
+ struct napi_struct napi;
+
+ void __iomem *ioaddr;
+ struct net_device *dev;
+ struct device *device;
+ struct sxgbe_ops *hw; /* sxgbe specific ops */
+ int no_csum_insertion;
+ int irq;
+ int rxcsum_insertion;
+ spinlock_t stats_lock; /* lock for tx/rx statatics */
+
+ struct phy_device *phydev;
+ int oldlink;
+ int speed;
+ int oldduplex;
+ struct mii_bus *mii;
+ int mii_irq[PHY_MAX_ADDR];
+ u8 rx_pause;
+ u8 tx_pause;
+
+ struct sxgbe_extra_stats xstats;
+ struct sxgbe_plat_data *plat;
+ struct sxgbe_hw_features hw_cap;
+
+ u32 msg_enable;
+
+ struct clk *sxgbe_clk;
+ int clk_csr;
+ unsigned int mode;
+ unsigned int default_addend;
+
+ /* advanced time stamp support */
+ u32 adv_ts;
+ int use_riwt;
+ struct ptp_clock *ptp_clock;
+
+ /* tc control */
+ int tx_tc;
+ int rx_tc;
+ /* EEE-LPI specific members */
+ struct timer_list eee_ctrl_timer;
+ bool tx_path_in_lpi_mode;
+ int lpi_irq;
+ int eee_enabled;
+ int eee_active;
+ int tx_lpi_timer;
+};
+
+/* Function prototypes */
+struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
+ struct sxgbe_plat_data *plat_dat,
+ void __iomem *addr);
+int sxgbe_drv_remove(struct net_device *ndev);
+void sxgbe_set_ethtool_ops(struct net_device *netdev);
+int sxgbe_mdio_unregister(struct net_device *ndev);
+int sxgbe_mdio_register(struct net_device *ndev);
+int sxgbe_register_platform(void);
+void sxgbe_unregister_platform(void);
+
+#ifdef CONFIG_PM
+int sxgbe_suspend(struct net_device *ndev);
+int sxgbe_resume(struct net_device *ndev);
+int sxgbe_freeze(struct net_device *ndev);
+int sxgbe_restore(struct net_device *ndev);
+#endif /* CONFIG_PM */
+
+const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void);
+
+void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv);
+bool sxgbe_eee_init(struct sxgbe_priv_data * const priv);
+#endif /* __SXGBE_COMMON_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
new file mode 100644
index 000000000000..c4da7a2b002a
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -0,0 +1,262 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_reg.h"
+
+/* MAC core initialization */
+static void sxgbe_core_init(void __iomem *ioaddr)
+{
+ u32 regval;
+
+ /* TX configuration */
+ regval = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+ /* Other configurable parameters IFP, IPG, ISR, ISM
+ * needs to be set if needed
+ */
+ regval |= SXGBE_TX_JABBER_DISABLE;
+ writel(regval, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+
+ /* RX configuration */
+ regval = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+ /* Other configurable parameters CST, SPEN, USP, GPSLCE
+ * WD, LM, S2KP, HDSMS, GPSL, ELEN, ARPEN needs to be
+ * set if needed
+ */
+ regval |= SXGBE_RX_JUMBPKT_ENABLE | SXGBE_RX_ACS_ENABLE;
+ writel(regval, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+/* Dump MAC registers */
+static void sxgbe_core_dump_regs(void __iomem *ioaddr)
+{
+}
+
+static int sxgbe_get_lpi_status(void __iomem *ioaddr, const u32 irq_status)
+{
+ int status = 0;
+ int lpi_status;
+
+ /* Reading this register shall clear all the LPI status bits */
+ lpi_status = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+
+ if (lpi_status & LPI_CTRL_STATUS_TLPIEN)
+ status |= TX_ENTRY_LPI_MODE;
+ if (lpi_status & LPI_CTRL_STATUS_TLPIEX)
+ status |= TX_EXIT_LPI_MODE;
+ if (lpi_status & LPI_CTRL_STATUS_RLPIEN)
+ status |= RX_ENTRY_LPI_MODE;
+ if (lpi_status & LPI_CTRL_STATUS_RLPIEX)
+ status |= RX_EXIT_LPI_MODE;
+
+ return status;
+}
+
+/* Handle extra events on specific interrupts hw dependent */
+static int sxgbe_core_host_irq_status(void __iomem *ioaddr,
+ struct sxgbe_extra_stats *x)
+{
+ int irq_status, status = 0;
+
+ irq_status = readl(ioaddr + SXGBE_CORE_INT_STATUS_REG);
+
+ if (unlikely(irq_status & LPI_INT_STATUS))
+ status |= sxgbe_get_lpi_status(ioaddr, irq_status);
+
+ return status;
+}
+
+/* Set power management mode (e.g. magic frame) */
+static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+}
+
+/* Set/Get Unicast MAC addresses */
+static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ u32 high_word, low_word;
+
+ high_word = (addr[5] << 8) | (addr[4]);
+ low_word = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0]);
+ writel(high_word, ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
+ writel(low_word, ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
+}
+
+static void sxgbe_core_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ u32 high_word, low_word;
+
+ high_word = readl(ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
+ low_word = readl(ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
+
+ /* extract and assign address */
+ addr[5] = (high_word & 0x0000FF00) >> 8;
+ addr[4] = (high_word & 0x000000FF);
+ addr[3] = (low_word & 0xFF000000) >> 24;
+ addr[2] = (low_word & 0x00FF0000) >> 16;
+ addr[1] = (low_word & 0x0000FF00) >> 8;
+ addr[0] = (low_word & 0x000000FF);
+}
+
+static void sxgbe_enable_tx(void __iomem *ioaddr, bool enable)
+{
+ u32 tx_config;
+
+ tx_config = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+ tx_config &= ~SXGBE_TX_ENABLE;
+
+ if (enable)
+ tx_config |= SXGBE_TX_ENABLE;
+ writel(tx_config, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+}
+
+static void sxgbe_enable_rx(void __iomem *ioaddr, bool enable)
+{
+ u32 rx_config;
+
+ rx_config = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+ rx_config &= ~SXGBE_RX_ENABLE;
+
+ if (enable)
+ rx_config |= SXGBE_RX_ENABLE;
+ writel(rx_config, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+static int sxgbe_get_controller_version(void __iomem *ioaddr)
+{
+ return readl(ioaddr + SXGBE_CORE_VERSION_REG);
+}
+
+/* If supported then get the optional core features */
+static unsigned int sxgbe_get_hw_feature(void __iomem *ioaddr,
+ unsigned char feature_index)
+{
+ return readl(ioaddr + (SXGBE_CORE_HW_FEA_REG(feature_index)));
+}
+
+static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
+{
+ u32 tx_cfg = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+
+ /* clear the speed bits */
+ tx_cfg &= ~0x60000000;
+ tx_cfg |= (speed << SXGBE_SPEED_LSHIFT);
+
+ /* set the speed */
+ writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+}
+
+static void sxgbe_set_eee_mode(void __iomem *ioaddr)
+{
+ u32 ctrl;
+
+ /* Enable the LPI mode for transmit path with Tx automate bit set.
+ * When Tx Automate bit is set, MAC internally handles the entry
+ * to LPI mode after all outstanding and pending packets are
+ * transmitted.
+ */
+ ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+ ctrl |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA;
+ writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+}
+
+static void sxgbe_reset_eee_mode(void __iomem *ioaddr)
+{
+ u32 ctrl;
+
+ ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+ ctrl &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA);
+ writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+}
+
+static void sxgbe_set_eee_pls(void __iomem *ioaddr, const int link)
+{
+ u32 ctrl;
+
+ ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+
+ /* If the PHY link status is UP then set PLS */
+ if (link)
+ ctrl |= LPI_CTRL_STATUS_PLS;
+ else
+ ctrl &= ~LPI_CTRL_STATUS_PLS;
+
+ writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+}
+
+static void sxgbe_set_eee_timer(void __iomem *ioaddr,
+ const int ls, const int tw)
+{
+ int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
+
+ /* Program the timers in the LPI timer control register:
+ * LS: minimum time (ms) for which the link
+ * status from PHY should be ok before transmitting
+ * the LPI pattern.
+ * TW: minimum time (us) for which the core waits
+ * after it has stopped transmitting the LPI pattern.
+ */
+ writel(value, ioaddr + SXGBE_CORE_LPI_TIMER_CTRL);
+}
+
+static void sxgbe_enable_rx_csum(void __iomem *ioaddr)
+{
+ u32 ctrl;
+
+ ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+ ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
+ writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+static void sxgbe_disable_rx_csum(void __iomem *ioaddr)
+{
+ u32 ctrl;
+
+ ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+ ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
+ writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+static const struct sxgbe_core_ops core_ops = {
+ .core_init = sxgbe_core_init,
+ .dump_regs = sxgbe_core_dump_regs,
+ .host_irq_status = sxgbe_core_host_irq_status,
+ .pmt = sxgbe_core_pmt,
+ .set_umac_addr = sxgbe_core_set_umac_addr,
+ .get_umac_addr = sxgbe_core_get_umac_addr,
+ .enable_rx = sxgbe_enable_rx,
+ .enable_tx = sxgbe_enable_tx,
+ .get_controller_version = sxgbe_get_controller_version,
+ .get_hw_feature = sxgbe_get_hw_feature,
+ .set_speed = sxgbe_core_set_speed,
+ .set_eee_mode = sxgbe_set_eee_mode,
+ .reset_eee_mode = sxgbe_reset_eee_mode,
+ .set_eee_timer = sxgbe_set_eee_timer,
+ .set_eee_pls = sxgbe_set_eee_pls,
+ .enable_rx_csum = sxgbe_enable_rx_csum,
+ .disable_rx_csum = sxgbe_disable_rx_csum,
+};
+
+const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
+{
+ return &core_ops;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
new file mode 100644
index 000000000000..e896dbbd2e15
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
@@ -0,0 +1,515 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_dma.h"
+#include "sxgbe_desc.h"
+
+/* DMA TX descriptor ring initialization */
+static void sxgbe_init_tx_desc(struct sxgbe_tx_norm_desc *p)
+{
+ p->tdes23.tx_rd_des23.own_bit = 0;
+}
+
+static void sxgbe_tx_desc_enable_tse(struct sxgbe_tx_norm_desc *p, u8 is_tse,
+ u32 total_hdr_len, u32 tcp_hdr_len,
+ u32 tcp_payload_len)
+{
+ p->tdes23.tx_rd_des23.tse_bit = is_tse;
+ p->tdes23.tx_rd_des23.buf1_size = total_hdr_len;
+ p->tdes23.tx_rd_des23.tcp_hdr_len = tcp_hdr_len / 4;
+ p->tdes23.tx_rd_des23.tx_pkt_len.tcp_payload_len = tcp_payload_len;
+}
+
+/* Assign buffer lengths for descriptor */
+static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
+ int buf1_len, int pkt_len, int cksum)
+{
+ p->tdes23.tx_rd_des23.first_desc = is_fd;
+ p->tdes23.tx_rd_des23.buf1_size = buf1_len;
+
+ p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len;
+
+ if (cksum)
+ p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full;
+}
+
+/* Set VLAN control information */
+static void sxgbe_tx_vlanctl_desc(struct sxgbe_tx_norm_desc *p, int vlan_ctl)
+{
+ p->tdes23.tx_rd_des23.vlan_tag_ctl = vlan_ctl;
+}
+
+/* Set the owner of Normal descriptor */
+static void sxgbe_set_tx_owner(struct sxgbe_tx_norm_desc *p)
+{
+ p->tdes23.tx_rd_des23.own_bit = 1;
+}
+
+/* Get the owner of Normal descriptor */
+static int sxgbe_get_tx_owner(struct sxgbe_tx_norm_desc *p)
+{
+ return p->tdes23.tx_rd_des23.own_bit;
+}
+
+/* Invoked by the xmit function to close the tx descriptor */
+static void sxgbe_close_tx_desc(struct sxgbe_tx_norm_desc *p)
+{
+ p->tdes23.tx_rd_des23.last_desc = 1;
+ p->tdes23.tx_rd_des23.int_on_com = 1;
+}
+
+/* Clean the tx descriptor as soon as the tx irq is received */
+static void sxgbe_release_tx_desc(struct sxgbe_tx_norm_desc *p)
+{
+ memset(p, 0, sizeof(*p));
+}
+
+/* Clear interrupt on tx frame completion. When this bit is
+ * set an interrupt happens as soon as the frame is transmitted
+ */
+static void sxgbe_clear_tx_ic(struct sxgbe_tx_norm_desc *p)
+{
+ p->tdes23.tx_rd_des23.int_on_com = 0;
+}
+
+/* Last tx segment reports the transmit status */
+static int sxgbe_get_tx_ls(struct sxgbe_tx_norm_desc *p)
+{
+ return p->tdes23.tx_rd_des23.last_desc;
+}
+
+/* Get the buffer size from the descriptor */
+static int sxgbe_get_tx_len(struct sxgbe_tx_norm_desc *p)
+{
+ return p->tdes23.tx_rd_des23.buf1_size;
+}
+
+/* Set tx timestamp enable bit */
+static void sxgbe_tx_enable_tstamp(struct sxgbe_tx_norm_desc *p)
+{
+ p->tdes23.tx_rd_des23.timestmp_enable = 1;
+}
+
+/* get tx timestamp status */
+static int sxgbe_get_tx_timestamp_status(struct sxgbe_tx_norm_desc *p)
+{
+ return p->tdes23.tx_rd_des23.timestmp_enable;
+}
+
+/* TX Context Descripto Specific */
+static void sxgbe_tx_ctxt_desc_set_ctxt(struct sxgbe_tx_ctxt_desc *p)
+{
+ p->ctxt_bit = 1;
+}
+
+/* Set the owner of TX context descriptor */
+static void sxgbe_tx_ctxt_desc_set_owner(struct sxgbe_tx_ctxt_desc *p)
+{
+ p->own_bit = 1;
+}
+
+/* Get the owner of TX context descriptor */
+static int sxgbe_tx_ctxt_desc_get_owner(struct sxgbe_tx_ctxt_desc *p)
+{
+ return p->own_bit;
+}
+
+/* Set TX mss in TX context Descriptor */
+static void sxgbe_tx_ctxt_desc_set_mss(struct sxgbe_tx_ctxt_desc *p, u16 mss)
+{
+ p->maxseg_size = mss;
+}
+
+/* Get TX mss from TX context Descriptor */
+static int sxgbe_tx_ctxt_desc_get_mss(struct sxgbe_tx_ctxt_desc *p)
+{
+ return p->maxseg_size;
+}
+
+/* Set TX tcmssv in TX context Descriptor */
+static void sxgbe_tx_ctxt_desc_set_tcmssv(struct sxgbe_tx_ctxt_desc *p)
+{
+ p->tcmssv = 1;
+}
+
+/* Reset TX ostc in TX context Descriptor */
+static void sxgbe_tx_ctxt_desc_reset_ostc(struct sxgbe_tx_ctxt_desc *p)
+{
+ p->ostc = 0;
+}
+
+/* Set IVLAN information */
+static void sxgbe_tx_ctxt_desc_set_ivlantag(struct sxgbe_tx_ctxt_desc *p,
+ int is_ivlanvalid, int ivlan_tag,
+ int ivlan_ctl)
+{
+ if (is_ivlanvalid) {
+ p->ivlan_tag_valid = is_ivlanvalid;
+ p->ivlan_tag = ivlan_tag;
+ p->ivlan_tag_ctl = ivlan_ctl;
+ }
+}
+
+/* Return IVLAN Tag */
+static int sxgbe_tx_ctxt_desc_get_ivlantag(struct sxgbe_tx_ctxt_desc *p)
+{
+ return p->ivlan_tag;
+}
+
+/* Set VLAN Tag */
+static void sxgbe_tx_ctxt_desc_set_vlantag(struct sxgbe_tx_ctxt_desc *p,
+ int is_vlanvalid, int vlan_tag)
+{
+ if (is_vlanvalid) {
+ p->vltag_valid = is_vlanvalid;
+ p->vlan_tag = vlan_tag;
+ }
+}
+
+/* Return VLAN Tag */
+static int sxgbe_tx_ctxt_desc_get_vlantag(struct sxgbe_tx_ctxt_desc *p)
+{
+ return p->vlan_tag;
+}
+
+/* Set Time stamp */
+static void sxgbe_tx_ctxt_desc_set_tstamp(struct sxgbe_tx_ctxt_desc *p,
+ u8 ostc_enable, u64 tstamp)
+{
+ if (ostc_enable) {
+ p->ostc = ostc_enable;
+ p->tstamp_lo = (u32) tstamp;
+ p->tstamp_hi = (u32) (tstamp>>32);
+ }
+}
+/* Close TX context descriptor */
+static void sxgbe_tx_ctxt_desc_close(struct sxgbe_tx_ctxt_desc *p)
+{
+ p->own_bit = 1;
+}
+
+/* WB status of context descriptor */
+static int sxgbe_tx_ctxt_desc_get_cde(struct sxgbe_tx_ctxt_desc *p)
+{
+ return p->ctxt_desc_err;
+}
+
+/* DMA RX descriptor ring initialization */
+static void sxgbe_init_rx_desc(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
+ int mode, int end)
+{
+ p->rdes23.rx_rd_des23.own_bit = 1;
+ if (disable_rx_ic)
+ p->rdes23.rx_rd_des23.int_on_com = disable_rx_ic;
+}
+
+/* Get RX own bit */
+static int sxgbe_get_rx_owner(struct sxgbe_rx_norm_desc *p)
+{
+ return p->rdes23.rx_rd_des23.own_bit;
+}
+
+/* Set RX own bit */
+static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
+{
+ p->rdes23.rx_rd_des23.own_bit = 1;
+}
+
+/* Get the receive frame size */
+static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
+{
+ return p->rdes23.rx_wb_des23.pkt_len;
+}
+
+/* Return first Descriptor status */
+static int sxgbe_get_rx_fd_status(struct sxgbe_rx_norm_desc *p)
+{
+ return p->rdes23.rx_wb_des23.first_desc;
+}
+
+/* Return Last Descriptor status */
+static int sxgbe_get_rx_ld_status(struct sxgbe_rx_norm_desc *p)
+{
+ return p->rdes23.rx_wb_des23.last_desc;
+}
+
+
+/* Return the RX status looking at the WB fields */
+static int sxgbe_rx_wbstatus(struct sxgbe_rx_norm_desc *p,
+ struct sxgbe_extra_stats *x, int *checksum)
+{
+ int status = 0;
+
+ *checksum = CHECKSUM_UNNECESSARY;
+ if (p->rdes23.rx_wb_des23.err_summary) {
+ switch (p->rdes23.rx_wb_des23.err_l2_type) {
+ case RX_GMII_ERR:
+ status = -EINVAL;
+ x->rx_code_gmii_err++;
+ break;
+ case RX_WATCHDOG_ERR:
+ status = -EINVAL;
+ x->rx_watchdog_err++;
+ break;
+ case RX_CRC_ERR:
+ status = -EINVAL;
+ x->rx_crc_err++;
+ break;
+ case RX_GAINT_ERR:
+ status = -EINVAL;
+ x->rx_gaint_pkt_err++;
+ break;
+ case RX_IP_HDR_ERR:
+ *checksum = CHECKSUM_NONE;
+ x->ip_hdr_err++;
+ break;
+ case RX_PAYLOAD_ERR:
+ *checksum = CHECKSUM_NONE;
+ x->ip_payload_err++;
+ break;
+ case RX_OVERFLOW_ERR:
+ status = -EINVAL;
+ x->overflow_error++;
+ break;
+ default:
+ pr_err("Invalid Error type\n");
+ break;
+ }
+ } else {
+ switch (p->rdes23.rx_wb_des23.err_l2_type) {
+ case RX_LEN_PKT:
+ x->len_pkt++;
+ break;
+ case RX_MACCTL_PKT:
+ x->mac_ctl_pkt++;
+ break;
+ case RX_DCBCTL_PKT:
+ x->dcb_ctl_pkt++;
+ break;
+ case RX_ARP_PKT:
+ x->arp_pkt++;
+ break;
+ case RX_OAM_PKT:
+ x->oam_pkt++;
+ break;
+ case RX_UNTAG_PKT:
+ x->untag_okt++;
+ break;
+ case RX_OTHER_PKT:
+ x->other_pkt++;
+ break;
+ case RX_SVLAN_PKT:
+ x->svlan_tag_pkt++;
+ break;
+ case RX_CVLAN_PKT:
+ x->cvlan_tag_pkt++;
+ break;
+ case RX_DVLAN_OCVLAN_ICVLAN_PKT:
+ x->dvlan_ocvlan_icvlan_pkt++;
+ break;
+ case RX_DVLAN_OSVLAN_ISVLAN_PKT:
+ x->dvlan_osvlan_isvlan_pkt++;
+ break;
+ case RX_DVLAN_OSVLAN_ICVLAN_PKT:
+ x->dvlan_osvlan_icvlan_pkt++;
+ break;
+ case RX_DVLAN_OCVLAN_ISVLAN_PKT:
+ x->dvlan_ocvlan_icvlan_pkt++;
+ break;
+ default:
+ pr_err("Invalid L2 Packet type\n");
+ break;
+ }
+ }
+
+ /* L3/L4 Pkt type */
+ switch (p->rdes23.rx_wb_des23.layer34_pkt_type) {
+ case RX_NOT_IP_PKT:
+ x->not_ip_pkt++;
+ break;
+ case RX_IPV4_TCP_PKT:
+ x->ip4_tcp_pkt++;
+ break;
+ case RX_IPV4_UDP_PKT:
+ x->ip4_udp_pkt++;
+ break;
+ case RX_IPV4_ICMP_PKT:
+ x->ip4_icmp_pkt++;
+ break;
+ case RX_IPV4_UNKNOWN_PKT:
+ x->ip4_unknown_pkt++;
+ break;
+ case RX_IPV6_TCP_PKT:
+ x->ip6_tcp_pkt++;
+ break;
+ case RX_IPV6_UDP_PKT:
+ x->ip6_udp_pkt++;
+ break;
+ case RX_IPV6_ICMP_PKT:
+ x->ip6_icmp_pkt++;
+ break;
+ case RX_IPV6_UNKNOWN_PKT:
+ x->ip6_unknown_pkt++;
+ break;
+ default:
+ pr_err("Invalid L3/L4 Packet type\n");
+ break;
+ }
+
+ /* Filter */
+ if (p->rdes23.rx_wb_des23.vlan_filter_match)
+ x->vlan_filter_match++;
+
+ if (p->rdes23.rx_wb_des23.sa_filter_fail) {
+ status = -EINVAL;
+ x->sa_filter_fail++;
+ }
+ if (p->rdes23.rx_wb_des23.da_filter_fail) {
+ status = -EINVAL;
+ x->da_filter_fail++;
+ }
+ if (p->rdes23.rx_wb_des23.hash_filter_pass)
+ x->hash_filter_pass++;
+
+ if (p->rdes23.rx_wb_des23.l3_filter_match)
+ x->l3_filter_match++;
+
+ if (p->rdes23.rx_wb_des23.l4_filter_match)
+ x->l4_filter_match++;
+
+ return status;
+}
+
+/* Get own bit of context descriptor */
+static int sxgbe_get_rx_ctxt_owner(struct sxgbe_rx_ctxt_desc *p)
+{
+ return p->own_bit;
+}
+
+/* Set own bit for context descriptor */
+static void sxgbe_set_ctxt_rx_owner(struct sxgbe_rx_ctxt_desc *p)
+{
+ p->own_bit = 1;
+}
+
+
+/* Return the reception status looking at Context control information */
+static void sxgbe_rx_ctxt_wbstatus(struct sxgbe_rx_ctxt_desc *p,
+ struct sxgbe_extra_stats *x)
+{
+ if (p->tstamp_dropped)
+ x->timestamp_dropped++;
+
+ /* ptp */
+ if (p->ptp_msgtype == RX_NO_PTP)
+ x->rx_msg_type_no_ptp++;
+ else if (p->ptp_msgtype == RX_PTP_SYNC)
+ x->rx_ptp_type_sync++;
+ else if (p->ptp_msgtype == RX_PTP_FOLLOW_UP)
+ x->rx_ptp_type_follow_up++;
+ else if (p->ptp_msgtype == RX_PTP_DELAY_REQ)
+ x->rx_ptp_type_delay_req++;
+ else if (p->ptp_msgtype == RX_PTP_DELAY_RESP)
+ x->rx_ptp_type_delay_resp++;
+ else if (p->ptp_msgtype == RX_PTP_PDELAY_REQ)
+ x->rx_ptp_type_pdelay_req++;
+ else if (p->ptp_msgtype == RX_PTP_PDELAY_RESP)
+ x->rx_ptp_type_pdelay_resp++;
+ else if (p->ptp_msgtype == RX_PTP_PDELAY_FOLLOW_UP)
+ x->rx_ptp_type_pdelay_follow_up++;
+ else if (p->ptp_msgtype == RX_PTP_ANNOUNCE)
+ x->rx_ptp_announce++;
+ else if (p->ptp_msgtype == RX_PTP_MGMT)
+ x->rx_ptp_mgmt++;
+ else if (p->ptp_msgtype == RX_PTP_SIGNAL)
+ x->rx_ptp_signal++;
+ else if (p->ptp_msgtype == RX_PTP_RESV_MSG)
+ x->rx_ptp_resv_msg_type++;
+}
+
+/* Get rx timestamp status */
+static int sxgbe_get_rx_ctxt_tstamp_status(struct sxgbe_rx_ctxt_desc *p)
+{
+ if ((p->tstamp_hi == 0xffffffff) && (p->tstamp_lo == 0xffffffff)) {
+ pr_err("Time stamp corrupted\n");
+ return 0;
+ }
+
+ return p->tstamp_available;
+}
+
+
+static u64 sxgbe_get_rx_timestamp(struct sxgbe_rx_ctxt_desc *p)
+{
+ u64 ns;
+
+ ns = p->tstamp_lo;
+ ns |= ((u64)p->tstamp_hi) << 32;
+
+ return ns;
+}
+
+static const struct sxgbe_desc_ops desc_ops = {
+ .init_tx_desc = sxgbe_init_tx_desc,
+ .tx_desc_enable_tse = sxgbe_tx_desc_enable_tse,
+ .prepare_tx_desc = sxgbe_prepare_tx_desc,
+ .tx_vlanctl_desc = sxgbe_tx_vlanctl_desc,
+ .set_tx_owner = sxgbe_set_tx_owner,
+ .get_tx_owner = sxgbe_get_tx_owner,
+ .close_tx_desc = sxgbe_close_tx_desc,
+ .release_tx_desc = sxgbe_release_tx_desc,
+ .clear_tx_ic = sxgbe_clear_tx_ic,
+ .get_tx_ls = sxgbe_get_tx_ls,
+ .get_tx_len = sxgbe_get_tx_len,
+ .tx_enable_tstamp = sxgbe_tx_enable_tstamp,
+ .get_tx_timestamp_status = sxgbe_get_tx_timestamp_status,
+ .tx_ctxt_desc_set_ctxt = sxgbe_tx_ctxt_desc_set_ctxt,
+ .tx_ctxt_desc_set_owner = sxgbe_tx_ctxt_desc_set_owner,
+ .get_tx_ctxt_owner = sxgbe_tx_ctxt_desc_get_owner,
+ .tx_ctxt_desc_set_mss = sxgbe_tx_ctxt_desc_set_mss,
+ .tx_ctxt_desc_get_mss = sxgbe_tx_ctxt_desc_get_mss,
+ .tx_ctxt_desc_set_tcmssv = sxgbe_tx_ctxt_desc_set_tcmssv,
+ .tx_ctxt_desc_reset_ostc = sxgbe_tx_ctxt_desc_reset_ostc,
+ .tx_ctxt_desc_set_ivlantag = sxgbe_tx_ctxt_desc_set_ivlantag,
+ .tx_ctxt_desc_get_ivlantag = sxgbe_tx_ctxt_desc_get_ivlantag,
+ .tx_ctxt_desc_set_vlantag = sxgbe_tx_ctxt_desc_set_vlantag,
+ .tx_ctxt_desc_get_vlantag = sxgbe_tx_ctxt_desc_get_vlantag,
+ .tx_ctxt_set_tstamp = sxgbe_tx_ctxt_desc_set_tstamp,
+ .close_tx_ctxt_desc = sxgbe_tx_ctxt_desc_close,
+ .get_tx_ctxt_cde = sxgbe_tx_ctxt_desc_get_cde,
+ .init_rx_desc = sxgbe_init_rx_desc,
+ .get_rx_owner = sxgbe_get_rx_owner,
+ .set_rx_owner = sxgbe_set_rx_owner,
+ .get_rx_frame_len = sxgbe_get_rx_frame_len,
+ .get_rx_fd_status = sxgbe_get_rx_fd_status,
+ .get_rx_ld_status = sxgbe_get_rx_ld_status,
+ .rx_wbstatus = sxgbe_rx_wbstatus,
+ .get_rx_ctxt_owner = sxgbe_get_rx_ctxt_owner,
+ .set_rx_ctxt_owner = sxgbe_set_ctxt_rx_owner,
+ .rx_ctxt_wbstatus = sxgbe_rx_ctxt_wbstatus,
+ .get_rx_ctxt_tstamp_status = sxgbe_get_rx_ctxt_tstamp_status,
+ .get_timestamp = sxgbe_get_rx_timestamp,
+};
+
+const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void)
+{
+ return &desc_ops;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
new file mode 100644
index 000000000000..838cb9fb0ea9
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
@@ -0,0 +1,298 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_DESC_H__
+#define __SXGBE_DESC_H__
+
+#define SXGBE_DESC_SIZE_BYTES 16
+
+/* forward declaration */
+struct sxgbe_extra_stats;
+
+/* Transmit checksum insertion control */
+enum tdes_csum_insertion {
+ cic_disabled = 0, /* Checksum Insertion Control */
+ cic_only_ip = 1, /* Only IP header */
+ /* IP header but pseudoheader is not calculated */
+ cic_no_pseudoheader = 2,
+ cic_full = 3, /* IP header and pseudoheader */
+};
+
+struct sxgbe_tx_norm_desc {
+ u64 tdes01; /* buf1 address */
+ union {
+ /* TX Read-Format Desc 2,3 */
+ struct {
+ /* TDES2 */
+ u32 buf1_size:14;
+ u32 vlan_tag_ctl:2;
+ u32 buf2_size:14;
+ u32 timestmp_enable:1;
+ u32 int_on_com:1;
+ /* TDES3 */
+ union {
+ u32 tcp_payload_len:18;
+ struct {
+ u32 total_pkt_len:15;
+ u32 reserved1:1;
+ u32 cksum_ctl:2;
+ } cksum_pktlen;
+ } tx_pkt_len;
+
+ u32 tse_bit:1;
+ u32 tcp_hdr_len:4;
+ u32 sa_insert_ctl:3;
+ u32 crc_pad_ctl:2;
+ u32 last_desc:1;
+ u32 first_desc:1;
+ u32 ctxt_bit:1;
+ u32 own_bit:1;
+ } tx_rd_des23;
+
+ /* tx write back Desc 2,3 */
+ struct {
+ /* WB TES2 */
+ u32 reserved1;
+ /* WB TES3 */
+ u32 reserved2:31;
+ u32 own_bit:1;
+ } tx_wb_des23;
+ } tdes23;
+};
+
+struct sxgbe_rx_norm_desc {
+ union {
+ u32 rdes0; /* buf1 address */
+ struct {
+ u32 out_vlan_tag:16;
+ u32 in_vlan_tag:16;
+ } wb_rx_des0;
+ } rd_wb_des0;
+
+ union {
+ u32 rdes1; /* buf2 address or buf1[63:32] */
+ u32 rss_hash; /* Write-back RX */
+ } rd_wb_des1;
+
+ union {
+ /* RX Read format Desc 2,3 */
+ struct{
+ /* RDES2 */
+ u32 buf2_addr;
+ /* RDES3 */
+ u32 buf2_hi_addr:30;
+ u32 int_on_com:1;
+ u32 own_bit:1;
+ } rx_rd_des23;
+
+ /* RX write back */
+ struct{
+ /* WB RDES2 */
+ u32 hdr_len:10;
+ u32 rdes2_reserved:2;
+ u32 elrd_val:1;
+ u32 iovt_sel:1;
+ u32 res_pkt:1;
+ u32 vlan_filter_match:1;
+ u32 sa_filter_fail:1;
+ u32 da_filter_fail:1;
+ u32 hash_filter_pass:1;
+ u32 macaddr_filter_match:8;
+ u32 l3_filter_match:1;
+ u32 l4_filter_match:1;
+ u32 l34_filter_num:3;
+
+ /* WB RDES3 */
+ u32 pkt_len:14;
+ u32 rdes3_reserved:1;
+ u32 err_summary:1;
+ u32 err_l2_type:4;
+ u32 layer34_pkt_type:4;
+ u32 no_coagulation_pkt:1;
+ u32 in_seq_pkt:1;
+ u32 rss_valid:1;
+ u32 context_des_avail:1;
+ u32 last_desc:1;
+ u32 first_desc:1;
+ u32 recv_context_desc:1;
+ u32 own_bit:1;
+ } rx_wb_des23;
+ } rdes23;
+};
+
+/* Context descriptor structure */
+struct sxgbe_tx_ctxt_desc {
+ u32 tstamp_lo;
+ u32 tstamp_hi;
+ u32 maxseg_size:15;
+ u32 reserved1:1;
+ u32 ivlan_tag:16;
+ u32 vlan_tag:16;
+ u32 vltag_valid:1;
+ u32 ivlan_tag_valid:1;
+ u32 ivlan_tag_ctl:2;
+ u32 reserved2:3;
+ u32 ctxt_desc_err:1;
+ u32 reserved3:2;
+ u32 ostc:1;
+ u32 tcmssv:1;
+ u32 reserved4:2;
+ u32 ctxt_bit:1;
+ u32 own_bit:1;
+};
+
+struct sxgbe_rx_ctxt_desc {
+ u32 tstamp_lo;
+ u32 tstamp_hi;
+ u32 reserved1;
+ u32 ptp_msgtype:4;
+ u32 tstamp_available:1;
+ u32 ptp_rsp_err:1;
+ u32 tstamp_dropped:1;
+ u32 reserved2:23;
+ u32 rx_ctxt_desc:1;
+ u32 own_bit:1;
+};
+
+struct sxgbe_desc_ops {
+ /* DMA TX descriptor ring initialization */
+ void (*init_tx_desc)(struct sxgbe_tx_norm_desc *p);
+
+ /* Invoked by the xmit function to prepare the tx descriptor */
+ void (*tx_desc_enable_tse)(struct sxgbe_tx_norm_desc *p, u8 is_tse,
+ u32 total_hdr_len, u32 tcp_hdr_len,
+ u32 tcp_payload_len);
+
+ /* Assign buffer lengths for descriptor */
+ void (*prepare_tx_desc)(struct sxgbe_tx_norm_desc *p, u8 is_fd,
+ int buf1_len, int pkt_len, int cksum);
+
+ /* Set VLAN control information */
+ void (*tx_vlanctl_desc)(struct sxgbe_tx_norm_desc *p, int vlan_ctl);
+
+ /* Set the owner of the descriptor */
+ void (*set_tx_owner)(struct sxgbe_tx_norm_desc *p);
+
+ /* Get the owner of the descriptor */
+ int (*get_tx_owner)(struct sxgbe_tx_norm_desc *p);
+
+ /* Invoked by the xmit function to close the tx descriptor */
+ void (*close_tx_desc)(struct sxgbe_tx_norm_desc *p);
+
+ /* Clean the tx descriptor as soon as the tx irq is received */
+ void (*release_tx_desc)(struct sxgbe_tx_norm_desc *p);
+
+ /* Clear interrupt on tx frame completion. When this bit is
+ * set an interrupt happens as soon as the frame is transmitted
+ */
+ void (*clear_tx_ic)(struct sxgbe_tx_norm_desc *p);
+
+ /* Last tx segment reports the transmit status */
+ int (*get_tx_ls)(struct sxgbe_tx_norm_desc *p);
+
+ /* Get the buffer size from the descriptor */
+ int (*get_tx_len)(struct sxgbe_tx_norm_desc *p);
+
+ /* Set tx timestamp enable bit */
+ void (*tx_enable_tstamp)(struct sxgbe_tx_norm_desc *p);
+
+ /* get tx timestamp status */
+ int (*get_tx_timestamp_status)(struct sxgbe_tx_norm_desc *p);
+
+ /* TX Context Descripto Specific */
+ void (*tx_ctxt_desc_set_ctxt)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Set the owner of the TX context descriptor */
+ void (*tx_ctxt_desc_set_owner)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Get the owner of the TX context descriptor */
+ int (*get_tx_ctxt_owner)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Set TX mss */
+ void (*tx_ctxt_desc_set_mss)(struct sxgbe_tx_ctxt_desc *p, u16 mss);
+
+ /* Set TX mss */
+ int (*tx_ctxt_desc_get_mss)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Set TX tcmssv */
+ void (*tx_ctxt_desc_set_tcmssv)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Reset TX ostc */
+ void (*tx_ctxt_desc_reset_ostc)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Set IVLAN information */
+ void (*tx_ctxt_desc_set_ivlantag)(struct sxgbe_tx_ctxt_desc *p,
+ int is_ivlanvalid, int ivlan_tag,
+ int ivlan_ctl);
+
+ /* Return IVLAN Tag */
+ int (*tx_ctxt_desc_get_ivlantag)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Set VLAN Tag */
+ void (*tx_ctxt_desc_set_vlantag)(struct sxgbe_tx_ctxt_desc *p,
+ int is_vlanvalid, int vlan_tag);
+
+ /* Return VLAN Tag */
+ int (*tx_ctxt_desc_get_vlantag)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Set Time stamp */
+ void (*tx_ctxt_set_tstamp)(struct sxgbe_tx_ctxt_desc *p,
+ u8 ostc_enable, u64 tstamp);
+
+ /* Close TX context descriptor */
+ void (*close_tx_ctxt_desc)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* WB status of context descriptor */
+ int (*get_tx_ctxt_cde)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* DMA RX descriptor ring initialization */
+ void (*init_rx_desc)(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
+ int mode, int end);
+
+ /* Get own bit */
+ int (*get_rx_owner)(struct sxgbe_rx_norm_desc *p);
+
+ /* Set own bit */
+ void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
+
+ /* Get the receive frame size */
+ int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
+
+ /* Return first Descriptor status */
+ int (*get_rx_fd_status)(struct sxgbe_rx_norm_desc *p);
+
+ /* Return first Descriptor status */
+ int (*get_rx_ld_status)(struct sxgbe_rx_norm_desc *p);
+
+ /* Return the reception status looking at the RDES1 */
+ int (*rx_wbstatus)(struct sxgbe_rx_norm_desc *p,
+ struct sxgbe_extra_stats *x, int *checksum);
+
+ /* Get own bit */
+ int (*get_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
+
+ /* Set own bit */
+ void (*set_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
+
+ /* Return the reception status looking at Context control information */
+ void (*rx_ctxt_wbstatus)(struct sxgbe_rx_ctxt_desc *p,
+ struct sxgbe_extra_stats *x);
+
+ /* Get rx timestamp status */
+ int (*get_rx_ctxt_tstamp_status)(struct sxgbe_rx_ctxt_desc *p);
+
+ /* Get timestamp value for rx, need to check this */
+ u64 (*get_timestamp)(struct sxgbe_rx_ctxt_desc *p);
+};
+
+const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void);
+
+#endif /* __SXGBE_DESC_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
new file mode 100644
index 000000000000..4d989ff6c978
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
@@ -0,0 +1,381 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_dma.h"
+#include "sxgbe_reg.h"
+#include "sxgbe_desc.h"
+
+/* DMA core initialization */
+static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
+{
+ int retry_count = 10;
+ u32 reg_val;
+
+ /* reset the DMA */
+ writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
+ while (retry_count--) {
+ if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
+ SXGBE_DMA_SOFT_RESET))
+ break;
+ mdelay(10);
+ }
+
+ if (retry_count < 0)
+ return -EBUSY;
+
+ reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
+
+ /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
+ * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
+ * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256].
+ * Set burst_map irrespective of fix_burst value.
+ */
+ if (!fix_burst)
+ reg_val |= SXGBE_DMA_AXI_UNDEF_BURST;
+
+ /* write burst len map */
+ reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT);
+
+ writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
+
+ return 0;
+}
+
+static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num,
+ int fix_burst, int pbl, dma_addr_t dma_tx,
+ dma_addr_t dma_rx, int t_rsize, int r_rsize)
+{
+ u32 reg_val;
+ dma_addr_t dma_addr;
+
+ reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
+ /* set the pbl */
+ if (fix_burst) {
+ reg_val |= SXGBE_DMA_PBL_X8MODE;
+ writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
+ /* program the TX pbl */
+ reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
+ reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT);
+ writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
+ /* program the RX pbl */
+ reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
+ reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT);
+ writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
+ }
+
+ /* program desc registers */
+ writel(upper_32_bits(dma_tx),
+ ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num));
+ writel(lower_32_bits(dma_tx),
+ ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num));
+
+ writel(upper_32_bits(dma_rx),
+ ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num));
+ writel(lower_32_bits(dma_rx),
+ ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
+
+ /* program tail pointers */
+ /* assumption: upper 32 bits are constant and
+ * same as TX/RX desc list
+ */
+ dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
+ writel(lower_32_bits(dma_addr),
+ ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num));
+
+ dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
+ writel(lower_32_bits(dma_addr),
+ ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
+ /* program the ring sizes */
+ writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num));
+ writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num));
+
+ /* Enable TX/RX interrupts */
+ writel(SXGBE_DMA_ENA_INT,
+ ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num));
+}
+
+static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num)
+{
+ u32 tx_config;
+
+ tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
+ tx_config |= SXGBE_TX_START_DMA;
+ writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
+}
+
+static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum)
+{
+ /* Enable TX/RX interrupts */
+ writel(SXGBE_DMA_ENA_INT,
+ ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
+}
+
+static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum)
+{
+ /* Disable TX/RX interrupts */
+ writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
+}
+
+static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels)
+{
+ int cnum;
+ u32 tx_ctl_reg;
+
+ for (cnum = 0; cnum < tchannels; cnum++) {
+ tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
+ tx_ctl_reg |= SXGBE_TX_ENABLE;
+ writel(tx_ctl_reg,
+ ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
+ }
+}
+
+static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum)
+{
+ u32 tx_ctl_reg;
+
+ tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
+ tx_ctl_reg |= SXGBE_TX_ENABLE;
+ writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
+}
+
+static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum)
+{
+ u32 tx_ctl_reg;
+
+ tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
+ tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
+ writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
+}
+
+static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels)
+{
+ int cnum;
+ u32 tx_ctl_reg;
+
+ for (cnum = 0; cnum < tchannels; cnum++) {
+ tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
+ tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
+ writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
+ }
+}
+
+static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels)
+{
+ int cnum;
+ u32 rx_ctl_reg;
+
+ for (cnum = 0; cnum < rchannels; cnum++) {
+ rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
+ rx_ctl_reg |= SXGBE_RX_ENABLE;
+ writel(rx_ctl_reg,
+ ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
+ }
+}
+
+static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels)
+{
+ int cnum;
+ u32 rx_ctl_reg;
+
+ for (cnum = 0; cnum < rchannels; cnum++) {
+ rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
+ rx_ctl_reg &= ~(SXGBE_RX_ENABLE);
+ writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
+ }
+}
+
+static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no,
+ struct sxgbe_extra_stats *x)
+{
+ u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
+ u32 clear_val = 0;
+ u32 ret_val = 0;
+
+ /* TX Normal Interrupt Summary */
+ if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
+ x->normal_irq_n++;
+ if (int_status & SXGBE_DMA_INT_STATUS_TI) {
+ ret_val |= handle_tx;
+ x->tx_normal_irq_n++;
+ clear_val |= SXGBE_DMA_INT_STATUS_TI;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_TBU) {
+ x->tx_underflow_irq++;
+ ret_val |= tx_bump_tc;
+ clear_val |= SXGBE_DMA_INT_STATUS_TBU;
+ }
+ } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
+ /* TX Abnormal Interrupt Summary */
+ if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
+ ret_val |= tx_hard_error;
+ clear_val |= SXGBE_DMA_INT_STATUS_TPS;
+ x->tx_process_stopped_irq++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
+ ret_val |= tx_hard_error;
+ x->fatal_bus_error_irq++;
+
+ /* Assumption: FBE bit is the combination of
+ * all the bus access erros and cleared when
+ * the respective error bits cleared
+ */
+
+ /* check for actual cause */
+ if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
+ x->tx_read_transfer_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
+ } else {
+ x->tx_write_transfer_err++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
+ x->tx_desc_access_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
+ } else {
+ x->tx_buffer_access_err++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
+ x->tx_data_transfer_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
+ }
+ }
+
+ /* context descriptor error */
+ if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
+ x->tx_ctxt_desc_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
+ }
+ }
+
+ /* clear the served bits */
+ writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
+
+ return ret_val;
+}
+
+static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no,
+ struct sxgbe_extra_stats *x)
+{
+ u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
+ u32 clear_val = 0;
+ u32 ret_val = 0;
+
+ /* RX Normal Interrupt Summary */
+ if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
+ x->normal_irq_n++;
+ if (int_status & SXGBE_DMA_INT_STATUS_RI) {
+ ret_val |= handle_rx;
+ x->rx_normal_irq_n++;
+ clear_val |= SXGBE_DMA_INT_STATUS_RI;
+ }
+ } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
+ /* RX Abnormal Interrupt Summary */
+ if (int_status & SXGBE_DMA_INT_STATUS_RBU) {
+ ret_val |= rx_bump_tc;
+ clear_val |= SXGBE_DMA_INT_STATUS_RBU;
+ x->rx_underflow_irq++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_RPS) {
+ ret_val |= rx_hard_error;
+ clear_val |= SXGBE_DMA_INT_STATUS_RPS;
+ x->rx_process_stopped_irq++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
+ ret_val |= rx_hard_error;
+ x->fatal_bus_error_irq++;
+
+ /* Assumption: FBE bit is the combination of
+ * all the bus access erros and cleared when
+ * the respective error bits cleared
+ */
+
+ /* check for actual cause */
+ if (int_status & SXGBE_DMA_INT_STATUS_REB0) {
+ x->rx_read_transfer_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_REB0;
+ } else {
+ x->rx_write_transfer_err++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_REB1) {
+ x->rx_desc_access_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_REB1;
+ } else {
+ x->rx_buffer_access_err++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_REB2) {
+ x->rx_data_transfer_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_REB2;
+ }
+ }
+ }
+
+ /* clear the served bits */
+ writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
+
+ return ret_val;
+}
+
+/* Program the HW RX Watchdog */
+static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+{
+ u32 que_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) {
+ writel(riwt,
+ ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num));
+ }
+}
+
+static void sxgbe_enable_tso(void __iomem *ioaddr, u8 chan_num)
+{
+ u32 ctrl;
+
+ ctrl = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
+ ctrl |= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE;
+ writel(ctrl, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
+}
+
+static const struct sxgbe_dma_ops sxgbe_dma_ops = {
+ .init = sxgbe_dma_init,
+ .cha_init = sxgbe_dma_channel_init,
+ .enable_dma_transmission = sxgbe_enable_dma_transmission,
+ .enable_dma_irq = sxgbe_enable_dma_irq,
+ .disable_dma_irq = sxgbe_disable_dma_irq,
+ .start_tx = sxgbe_dma_start_tx,
+ .start_tx_queue = sxgbe_dma_start_tx_queue,
+ .stop_tx = sxgbe_dma_stop_tx,
+ .stop_tx_queue = sxgbe_dma_stop_tx_queue,
+ .start_rx = sxgbe_dma_start_rx,
+ .stop_rx = sxgbe_dma_stop_rx,
+ .tx_dma_int_status = sxgbe_tx_dma_int_status,
+ .rx_dma_int_status = sxgbe_rx_dma_int_status,
+ .rx_watchdog = sxgbe_dma_rx_watchdog,
+ .enable_tso = sxgbe_enable_tso,
+};
+
+const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void)
+{
+ return &sxgbe_dma_ops;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h
new file mode 100644
index 000000000000..1607b54c9bb0
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h
@@ -0,0 +1,50 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_DMA_H__
+#define __SXGBE_DMA_H__
+
+/* forward declaration */
+struct sxgbe_extra_stats;
+
+#define SXGBE_DMA_BLENMAP_LSHIFT 1
+#define SXGBE_DMA_TXPBL_LSHIFT 16
+#define SXGBE_DMA_RXPBL_LSHIFT 16
+#define DEFAULT_DMA_PBL 8
+
+struct sxgbe_dma_ops {
+ /* DMA core initialization */
+ int (*init)(void __iomem *ioaddr, int fix_burst, int burst_map);
+ void (*cha_init)(void __iomem *ioaddr, int cha_num, int fix_burst,
+ int pbl, dma_addr_t dma_tx, dma_addr_t dma_rx,
+ int t_rzie, int r_rsize);
+ void (*enable_dma_transmission)(void __iomem *ioaddr, int dma_cnum);
+ void (*enable_dma_irq)(void __iomem *ioaddr, int dma_cnum);
+ void (*disable_dma_irq)(void __iomem *ioaddr, int dma_cnum);
+ void (*start_tx)(void __iomem *ioaddr, int tchannels);
+ void (*start_tx_queue)(void __iomem *ioaddr, int dma_cnum);
+ void (*stop_tx)(void __iomem *ioaddr, int tchannels);
+ void (*stop_tx_queue)(void __iomem *ioaddr, int dma_cnum);
+ void (*start_rx)(void __iomem *ioaddr, int rchannels);
+ void (*stop_rx)(void __iomem *ioaddr, int rchannels);
+ int (*tx_dma_int_status)(void __iomem *ioaddr, int channel_no,
+ struct sxgbe_extra_stats *x);
+ int (*rx_dma_int_status)(void __iomem *ioaddr, int channel_no,
+ struct sxgbe_extra_stats *x);
+ /* Program the HW RX Watchdog */
+ void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt);
+ /* Enable TSO for each DMA channel */
+ void (*enable_tso)(void __iomem *ioaddr, u8 chan_num);
+};
+
+const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void);
+
+#endif /* __SXGBE_CORE_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
new file mode 100644
index 000000000000..0415fa50eeb7
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -0,0 +1,524 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_reg.h"
+#include "sxgbe_dma.h"
+
+struct sxgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define SXGBE_STAT(m) \
+{ \
+ #m, \
+ FIELD_SIZEOF(struct sxgbe_extra_stats, m), \
+ offsetof(struct sxgbe_priv_data, xstats.m) \
+}
+
+static const struct sxgbe_stats sxgbe_gstrings_stats[] = {
+ /* TX/RX IRQ events */
+ SXGBE_STAT(tx_process_stopped_irq),
+ SXGBE_STAT(tx_ctxt_desc_err),
+ SXGBE_STAT(tx_threshold),
+ SXGBE_STAT(rx_threshold),
+ SXGBE_STAT(tx_pkt_n),
+ SXGBE_STAT(rx_pkt_n),
+ SXGBE_STAT(normal_irq_n),
+ SXGBE_STAT(tx_normal_irq_n),
+ SXGBE_STAT(rx_normal_irq_n),
+ SXGBE_STAT(napi_poll),
+ SXGBE_STAT(tx_clean),
+ SXGBE_STAT(tx_reset_ic_bit),
+ SXGBE_STAT(rx_process_stopped_irq),
+ SXGBE_STAT(rx_underflow_irq),
+
+ /* Bus access errors */
+ SXGBE_STAT(fatal_bus_error_irq),
+ SXGBE_STAT(tx_read_transfer_err),
+ SXGBE_STAT(tx_write_transfer_err),
+ SXGBE_STAT(tx_desc_access_err),
+ SXGBE_STAT(tx_buffer_access_err),
+ SXGBE_STAT(tx_data_transfer_err),
+ SXGBE_STAT(rx_read_transfer_err),
+ SXGBE_STAT(rx_write_transfer_err),
+ SXGBE_STAT(rx_desc_access_err),
+ SXGBE_STAT(rx_buffer_access_err),
+ SXGBE_STAT(rx_data_transfer_err),
+
+ /* EEE-LPI stats */
+ SXGBE_STAT(tx_lpi_entry_n),
+ SXGBE_STAT(tx_lpi_exit_n),
+ SXGBE_STAT(rx_lpi_entry_n),
+ SXGBE_STAT(rx_lpi_exit_n),
+ SXGBE_STAT(eee_wakeup_error_n),
+
+ /* RX specific */
+ /* L2 error */
+ SXGBE_STAT(rx_code_gmii_err),
+ SXGBE_STAT(rx_watchdog_err),
+ SXGBE_STAT(rx_crc_err),
+ SXGBE_STAT(rx_gaint_pkt_err),
+ SXGBE_STAT(ip_hdr_err),
+ SXGBE_STAT(ip_payload_err),
+ SXGBE_STAT(overflow_error),
+
+ /* L2 Pkt type */
+ SXGBE_STAT(len_pkt),
+ SXGBE_STAT(mac_ctl_pkt),
+ SXGBE_STAT(dcb_ctl_pkt),
+ SXGBE_STAT(arp_pkt),
+ SXGBE_STAT(oam_pkt),
+ SXGBE_STAT(untag_okt),
+ SXGBE_STAT(other_pkt),
+ SXGBE_STAT(svlan_tag_pkt),
+ SXGBE_STAT(cvlan_tag_pkt),
+ SXGBE_STAT(dvlan_ocvlan_icvlan_pkt),
+ SXGBE_STAT(dvlan_osvlan_isvlan_pkt),
+ SXGBE_STAT(dvlan_osvlan_icvlan_pkt),
+ SXGBE_STAT(dvan_ocvlan_icvlan_pkt),
+
+ /* L3/L4 Pkt type */
+ SXGBE_STAT(not_ip_pkt),
+ SXGBE_STAT(ip4_tcp_pkt),
+ SXGBE_STAT(ip4_udp_pkt),
+ SXGBE_STAT(ip4_icmp_pkt),
+ SXGBE_STAT(ip4_unknown_pkt),
+ SXGBE_STAT(ip6_tcp_pkt),
+ SXGBE_STAT(ip6_udp_pkt),
+ SXGBE_STAT(ip6_icmp_pkt),
+ SXGBE_STAT(ip6_unknown_pkt),
+
+ /* Filter specific */
+ SXGBE_STAT(vlan_filter_match),
+ SXGBE_STAT(sa_filter_fail),
+ SXGBE_STAT(da_filter_fail),
+ SXGBE_STAT(hash_filter_pass),
+ SXGBE_STAT(l3_filter_match),
+ SXGBE_STAT(l4_filter_match),
+
+ /* RX context specific */
+ SXGBE_STAT(timestamp_dropped),
+ SXGBE_STAT(rx_msg_type_no_ptp),
+ SXGBE_STAT(rx_ptp_type_sync),
+ SXGBE_STAT(rx_ptp_type_follow_up),
+ SXGBE_STAT(rx_ptp_type_delay_req),
+ SXGBE_STAT(rx_ptp_type_delay_resp),
+ SXGBE_STAT(rx_ptp_type_pdelay_req),
+ SXGBE_STAT(rx_ptp_type_pdelay_resp),
+ SXGBE_STAT(rx_ptp_type_pdelay_follow_up),
+ SXGBE_STAT(rx_ptp_announce),
+ SXGBE_STAT(rx_ptp_mgmt),
+ SXGBE_STAT(rx_ptp_signal),
+ SXGBE_STAT(rx_ptp_resv_msg_type),
+};
+#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats)
+
+static int sxgbe_get_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ if (!priv->hw_cap.eee)
+ return -EOPNOTSUPP;
+
+ edata->eee_enabled = priv->eee_enabled;
+ edata->eee_active = priv->eee_active;
+ edata->tx_lpi_timer = priv->tx_lpi_timer;
+
+ return phy_ethtool_get_eee(priv->phydev, edata);
+}
+
+static int sxgbe_set_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ priv->eee_enabled = edata->eee_enabled;
+
+ if (!priv->eee_enabled) {
+ sxgbe_disable_eee_mode(priv);
+ } else {
+ /* We are asking for enabling the EEE but it is safe
+ * to verify all by invoking the eee_init function.
+ * In case of failure it will return an error.
+ */
+ priv->eee_enabled = sxgbe_eee_init(priv);
+ if (!priv->eee_enabled)
+ return -EOPNOTSUPP;
+
+ /* Do not change tx_lpi_timer in case of failure */
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ }
+
+ return phy_ethtool_set_eee(priv->phydev, edata);
+}
+
+static void sxgbe_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static int sxgbe_getsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ if (priv->phydev)
+ return phy_ethtool_gset(priv->phydev, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ if (priv->phydev)
+ return phy_ethtool_sset(priv->phydev, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static u32 sxgbe_getmsglevel(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void sxgbe_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+}
+
+static void sxgbe_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+ u8 *p = data;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < SXGBE_STATS_LEN; i++) {
+ memcpy(p, sxgbe_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int sxgbe_get_sset_count(struct net_device *netdev, int sset)
+{
+ int len;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ len = SXGBE_STATS_LEN;
+ return len;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void sxgbe_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 *data)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ int i;
+ char *p;
+
+ if (priv->eee_enabled) {
+ int val = phy_get_eee_err(priv->phydev);
+
+ if (val)
+ priv->xstats.eee_wakeup_error_n = val;
+ }
+
+ for (i = 0; i < SXGBE_STATS_LEN; i++) {
+ p = (char *)priv + sxgbe_gstrings_stats[i].stat_offset;
+ data[i] = (sxgbe_gstrings_stats[i].sizeof_stat == sizeof(u64))
+ ? (*(u64 *)p) : (*(u32 *)p);
+ }
+}
+
+static void sxgbe_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ channel->max_rx = SXGBE_MAX_RX_CHANNELS;
+ channel->max_tx = SXGBE_MAX_TX_CHANNELS;
+ channel->rx_count = SXGBE_RX_QUEUES;
+ channel->tx_count = SXGBE_TX_QUEUES;
+}
+
+static u32 sxgbe_riwt2usec(u32 riwt, struct sxgbe_priv_data *priv)
+{
+ unsigned long clk = clk_get_rate(priv->sxgbe_clk);
+
+ if (!clk)
+ return 0;
+
+ return (riwt * 256) / (clk / 1000000);
+}
+
+static u32 sxgbe_usec2riwt(u32 usec, struct sxgbe_priv_data *priv)
+{
+ unsigned long clk = clk_get_rate(priv->sxgbe_clk);
+
+ if (!clk)
+ return 0;
+
+ return (usec * (clk / 1000000)) / 256;
+}
+
+static int sxgbe_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ if (priv->use_riwt)
+ ec->rx_coalesce_usecs = sxgbe_riwt2usec(priv->rx_riwt, priv);
+
+ return 0;
+}
+
+static int sxgbe_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ unsigned int rx_riwt;
+
+ if (!ec->rx_coalesce_usecs)
+ return -EINVAL;
+
+ rx_riwt = sxgbe_usec2riwt(ec->rx_coalesce_usecs, priv);
+
+ if ((rx_riwt > SXGBE_MAX_DMA_RIWT) || (rx_riwt < SXGBE_MIN_DMA_RIWT))
+ return -EINVAL;
+ else if (!priv->use_riwt)
+ return -EOPNOTSUPP;
+
+ priv->rx_riwt = rx_riwt;
+ priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
+
+ return 0;
+}
+
+static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on sxgbe */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = sxgbe_get_rss_hash_opts(priv, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
+ struct ethtool_rxnfc *cmd)
+{
+ u32 reg_val = 0;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(cmd->data & RXH_IP_SRC) ||
+ !(cmd->data & RXH_IP_DST) ||
+ !(cmd->data & RXH_L4_B_0_1) ||
+ !(cmd->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ reg_val = SXGBE_CORE_RSS_CTL_TCP4TE;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (!(cmd->data & RXH_IP_SRC) ||
+ !(cmd->data & RXH_IP_DST) ||
+ !(cmd->data & RXH_L4_B_0_1) ||
+ !(cmd->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ reg_val = SXGBE_CORE_RSS_CTL_UDP4TE;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ if (!(cmd->data & RXH_IP_SRC) ||
+ !(cmd->data & RXH_IP_DST) ||
+ (cmd->data & RXH_L4_B_0_1) ||
+ (cmd->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ reg_val = SXGBE_CORE_RSS_CTL_IP2TE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Read SXGBE RSS control register and update */
+ reg_val |= readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
+ writel(reg_val, priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
+ readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
+
+ return 0;
+}
+
+static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = sxgbe_set_rss_hash_opt(priv, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void sxgbe_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ u32 *reg_space = (u32 *)space;
+ int reg_offset;
+ int reg_ix = 0;
+ void __iomem *ioaddr = priv->ioaddr;
+
+ memset(reg_space, 0x0, REG_SPACE_SIZE);
+
+ /* MAC registers */
+ for (reg_offset = START_MAC_REG_OFFSET;
+ reg_offset <= MAX_MAC_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = readl(ioaddr + reg_offset);
+ reg_ix++;
+ }
+
+ /* MTL registers */
+ for (reg_offset = START_MTL_REG_OFFSET;
+ reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = readl(ioaddr + reg_offset);
+ reg_ix++;
+ }
+
+ /* DMA registers */
+ for (reg_offset = START_DMA_REG_OFFSET;
+ reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = readl(ioaddr + reg_offset);
+ reg_ix++;
+ }
+
+ BUG_ON(reg_ix * 4 > REG_SPACE_SIZE);
+}
+
+static int sxgbe_get_regs_len(struct net_device *dev)
+{
+ return REG_SPACE_SIZE;
+}
+
+static const struct ethtool_ops sxgbe_ethtool_ops = {
+ .get_drvinfo = sxgbe_getdrvinfo,
+ .get_settings = sxgbe_getsettings,
+ .set_settings = sxgbe_setsettings,
+ .get_msglevel = sxgbe_getmsglevel,
+ .set_msglevel = sxgbe_setmsglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = sxgbe_get_strings,
+ .get_ethtool_stats = sxgbe_get_ethtool_stats,
+ .get_sset_count = sxgbe_get_sset_count,
+ .get_channels = sxgbe_get_channels,
+ .get_coalesce = sxgbe_get_coalesce,
+ .set_coalesce = sxgbe_set_coalesce,
+ .get_rxnfc = sxgbe_get_rxnfc,
+ .set_rxnfc = sxgbe_set_rxnfc,
+ .get_regs = sxgbe_get_regs,
+ .get_regs_len = sxgbe_get_regs_len,
+ .get_eee = sxgbe_get_eee,
+ .set_eee = sxgbe_set_eee,
+};
+
+void sxgbe_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
new file mode 100644
index 000000000000..27e8c824b204
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -0,0 +1,2324 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/sxgbe_platform.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_desc.h"
+#include "sxgbe_dma.h"
+#include "sxgbe_mtl.h"
+#include "sxgbe_reg.h"
+
+#define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
+#define JUMBO_LEN 9000
+
+/* Module parameters */
+#define TX_TIMEO 5000
+#define DMA_TX_SIZE 512
+#define DMA_RX_SIZE 1024
+#define TC_DEFAULT 64
+#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
+/* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
+#define SXGBE_DEFAULT_LPI_TIMER 1000
+
+static int debug = -1;
+static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
+
+module_param(eee_timer, int, S_IRUGO | S_IWUSR);
+
+module_param(debug, int, S_IRUGO | S_IWUSR);
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+
+static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
+static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
+static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
+
+#define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
+
+#define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
+
+/**
+ * sxgbe_verify_args - verify the driver parameters.
+ * Description: it verifies if some wrong parameter is passed to the driver.
+ * Note that wrong parameters are replaced with the default values.
+ */
+static void sxgbe_verify_args(void)
+{
+ if (unlikely(eee_timer < 0))
+ eee_timer = SXGBE_DEFAULT_LPI_TIMER;
+}
+
+static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
+{
+ /* Check and enter in LPI mode */
+ if (!priv->tx_path_in_lpi_mode)
+ priv->hw->mac->set_eee_mode(priv->ioaddr);
+}
+
+void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
+{
+ /* Exit and disable EEE in case of we are are in LPI state. */
+ priv->hw->mac->reset_eee_mode(priv->ioaddr);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+}
+
+/**
+ * sxgbe_eee_ctrl_timer
+ * @arg : data hook
+ * Description:
+ * If there is no data transfer and if we are not in LPI state,
+ * then MAC Transmitter can be moved to LPI state.
+ */
+static void sxgbe_eee_ctrl_timer(unsigned long arg)
+{
+ struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg;
+
+ sxgbe_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
+}
+
+/**
+ * sxgbe_eee_init
+ * @priv: private device pointer
+ * Description:
+ * If the EEE support has been enabled while configuring the driver,
+ * if the GMAC actually supports the EEE (from the HW cap reg) and the
+ * phy can also manage EEE, so enable the LPI state and start the timer
+ * to verify if the tx path can enter in LPI state.
+ */
+bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
+{
+ bool ret = false;
+
+ /* MAC core supports the EEE feature. */
+ if (priv->hw_cap.eee) {
+ /* Check if the PHY supports EEE */
+ if (phy_init_eee(priv->phydev, 1))
+ return false;
+
+ priv->eee_active = 1;
+ init_timer(&priv->eee_ctrl_timer);
+ priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer;
+ priv->eee_ctrl_timer.data = (unsigned long)priv;
+ priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
+ add_timer(&priv->eee_ctrl_timer);
+
+ priv->hw->mac->set_eee_timer(priv->ioaddr,
+ SXGBE_DEFAULT_LPI_TIMER,
+ priv->tx_lpi_timer);
+
+ pr_info("Energy-Efficient Ethernet initialized\n");
+
+ ret = true;
+ }
+
+ return ret;
+}
+
+static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
+{
+ /* When the EEE has been already initialised we have to
+ * modify the PLS bit in the LPI ctrl & status reg according
+ * to the PHY link status. For this reason.
+ */
+ if (priv->eee_enabled)
+ priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
+}
+
+/**
+ * sxgbe_clk_csr_set - dynamically set the MDC clock
+ * @priv: driver private structure
+ * Description: this is to dynamically set the MDC clock according to the csr
+ * clock input.
+ */
+static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
+{
+ u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
+
+ /* assign the proper divider, this will be used during
+ * mdio communication
+ */
+ if (clk_rate < SXGBE_CSR_F_150M)
+ priv->clk_csr = SXGBE_CSR_100_150M;
+ else if (clk_rate <= SXGBE_CSR_F_250M)
+ priv->clk_csr = SXGBE_CSR_150_250M;
+ else if (clk_rate <= SXGBE_CSR_F_300M)
+ priv->clk_csr = SXGBE_CSR_250_300M;
+ else if (clk_rate <= SXGBE_CSR_F_350M)
+ priv->clk_csr = SXGBE_CSR_300_350M;
+ else if (clk_rate <= SXGBE_CSR_F_400M)
+ priv->clk_csr = SXGBE_CSR_350_400M;
+ else if (clk_rate <= SXGBE_CSR_F_500M)
+ priv->clk_csr = SXGBE_CSR_400_500M;
+}
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
+
+static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
+{
+ return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
+}
+
+/**
+ * sxgbe_adjust_link
+ * @dev: net device structure
+ * Description: it adjusts the link parameters.
+ */
+static void sxgbe_adjust_link(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ u8 new_state = 0;
+ u8 speed = 0xff;
+
+ if (!phydev)
+ return;
+
+ /* SXGBE is not supporting auto-negotiation and
+ * half duplex mode. so, not handling duplex change
+ * in this function. only handling speed and link status
+ */
+ if (phydev->link) {
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case SPEED_10000:
+ speed = SXGBE_SPEED_10G;
+ break;
+ case SPEED_2500:
+ speed = SXGBE_SPEED_2_5G;
+ break;
+ case SPEED_1000:
+ speed = SXGBE_SPEED_1G;
+ break;
+ default:
+ netif_err(priv, link, dev,
+ "Speed (%d) not supported\n",
+ phydev->speed);
+ }
+
+ priv->speed = phydev->speed;
+ priv->hw->mac->set_speed(priv->ioaddr, speed);
+ }
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state & netif_msg_link(priv))
+ phy_print_status(phydev);
+
+ /* Alter the MAC settings for EEE */
+ sxgbe_eee_adjust(priv);
+}
+
+/**
+ * sxgbe_init_phy - PHY initialization
+ * @dev: net device structure
+ * Description: it initializes the driver's PHY state, and attaches the PHY
+ * to the mac driver.
+ * Return value:
+ * 0 on success
+ */
+static int sxgbe_init_phy(struct net_device *ndev)
+{
+ char phy_id_fmt[MII_BUS_ID_SIZE + 3];
+ char bus_id[MII_BUS_ID_SIZE];
+ struct phy_device *phydev;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+ int phy_iface = priv->plat->interface;
+
+ /* assign default link status */
+ priv->oldlink = 0;
+ priv->speed = SPEED_UNKNOWN;
+ priv->oldduplex = DUPLEX_UNKNOWN;
+
+ if (priv->plat->phy_bus_name)
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+ priv->plat->phy_bus_name, priv->plat->bus_id);
+ else
+ snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
+ priv->plat->bus_id);
+
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->plat->phy_addr);
+ netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
+
+ phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(ndev, "Could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ /* Stop Advertising 1000BASE Capability if interface is not GMII */
+ if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
+ (phy_iface == PHY_INTERFACE_MODE_RMII))
+ phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+ if (phydev->phy_id == 0) {
+ phy_disconnect(phydev);
+ return -ENODEV;
+ }
+
+ netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
+ __func__, phydev->phy_id, phydev->link);
+
+ /* save phy device in private structure */
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+/**
+ * sxgbe_clear_descriptors: clear descriptors
+ * @priv: driver private structure
+ * Description: this function is called to clear the tx and rx descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
+{
+ int i, j;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+
+ /* Clear the Rx/Tx descriptors */
+ for (j = 0; j < SXGBE_RX_QUEUES; j++) {
+ for (i = 0; i < rxsize; i++)
+ priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
+ priv->use_riwt, priv->mode,
+ (i == rxsize - 1));
+ }
+
+ for (j = 0; j < SXGBE_TX_QUEUES; j++) {
+ for (i = 0; i < txsize; i++)
+ priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
+ }
+}
+
+static int sxgbe_init_rx_buffers(struct net_device *dev,
+ struct sxgbe_rx_norm_desc *p, int i,
+ unsigned int dma_buf_sz,
+ struct sxgbe_rx_queue *rx_ring)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ rx_ring->rx_skbuff[i] = skb;
+ rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+ dma_buf_sz, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
+ netdev_err(dev, "%s: DMA mapping error\n", __func__);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
+
+ return 0;
+}
+/**
+ * init_tx_ring - init the TX descriptor ring
+ * @dev: net device structure
+ * @tx_ring: ring to be intialised
+ * @tx_rsize: ring size
+ * Description: this function initializes the DMA TX descriptor
+ */
+static int init_tx_ring(struct device *dev, u8 queue_no,
+ struct sxgbe_tx_queue *tx_ring, int tx_rsize)
+{
+ /* TX ring is not allcoated */
+ if (!tx_ring) {
+ dev_err(dev, "No memory for TX queue of SXGBE\n");
+ return -ENOMEM;
+ }
+
+ /* allocate memory for TX descriptors */
+ tx_ring->dma_tx = dma_zalloc_coherent(dev,
+ tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
+ &tx_ring->dma_tx_phy, GFP_KERNEL);
+ if (!tx_ring->dma_tx)
+ return -ENOMEM;
+
+ /* allocate memory for TX skbuff array */
+ tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!tx_ring->tx_skbuff_dma)
+ goto dmamem_err;
+
+ tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
+ sizeof(struct sk_buff *), GFP_KERNEL);
+
+ if (!tx_ring->tx_skbuff)
+ goto dmamem_err;
+
+ /* assign queue number */
+ tx_ring->queue_no = queue_no;
+
+ /* initalise counters */
+ tx_ring->dirty_tx = 0;
+ tx_ring->cur_tx = 0;
+
+ /* initalise TX queue lock */
+ spin_lock_init(&tx_ring->tx_lock);
+
+ return 0;
+
+dmamem_err:
+ dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
+ tx_ring->dma_tx, tx_ring->dma_tx_phy);
+ return -ENOMEM;
+}
+
+/**
+ * free_rx_ring - free the RX descriptor ring
+ * @dev: net device structure
+ * @rx_ring: ring to be intialised
+ * @rx_rsize: ring size
+ * Description: this function initializes the DMA RX descriptor
+ */
+void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
+ int rx_rsize)
+{
+ dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+ rx_ring->dma_rx, rx_ring->dma_rx_phy);
+ kfree(rx_ring->rx_skbuff_dma);
+ kfree(rx_ring->rx_skbuff);
+}
+
+/**
+ * init_rx_ring - init the RX descriptor ring
+ * @dev: net device structure
+ * @rx_ring: ring to be intialised
+ * @rx_rsize: ring size
+ * Description: this function initializes the DMA RX descriptor
+ */
+static int init_rx_ring(struct net_device *dev, u8 queue_no,
+ struct sxgbe_rx_queue *rx_ring, int rx_rsize)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ int desc_index;
+ unsigned int bfsize = 0;
+ unsigned int ret = 0;
+
+ /* Set the max buffer size according to the MTU. */
+ bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
+
+ netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
+
+ /* RX ring is not allcoated */
+ if (rx_ring == NULL) {
+ netdev_err(dev, "No memory for RX queue\n");
+ goto error;
+ }
+
+ /* assign queue number */
+ rx_ring->queue_no = queue_no;
+
+ /* allocate memory for RX descriptors */
+ rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
+ rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+ &rx_ring->dma_rx_phy, GFP_KERNEL);
+
+ if (rx_ring->dma_rx == NULL)
+ goto error;
+
+ /* allocate memory for RX skbuff array */
+ rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (rx_ring->rx_skbuff_dma == NULL)
+ goto dmamem_err;
+
+ rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
+ sizeof(struct sk_buff *), GFP_KERNEL);
+ if (rx_ring->rx_skbuff == NULL)
+ goto rxbuff_err;
+
+ /* initialise the buffers */
+ for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
+ struct sxgbe_rx_norm_desc *p;
+ p = rx_ring->dma_rx + desc_index;
+ ret = sxgbe_init_rx_buffers(dev, p, desc_index,
+ bfsize, rx_ring);
+ if (ret)
+ goto err_init_rx_buffers;
+ }
+
+ /* initalise counters */
+ rx_ring->cur_rx = 0;
+ rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
+ priv->dma_buf_sz = bfsize;
+
+ return 0;
+
+err_init_rx_buffers:
+ while (--desc_index >= 0)
+ free_rx_ring(priv->device, rx_ring, desc_index);
+ kfree(rx_ring->rx_skbuff);
+rxbuff_err:
+ kfree(rx_ring->rx_skbuff_dma);
+dmamem_err:
+ dma_free_coherent(priv->device,
+ rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+ rx_ring->dma_rx, rx_ring->dma_rx_phy);
+error:
+ return -ENOMEM;
+}
+/**
+ * free_tx_ring - free the TX descriptor ring
+ * @dev: net device structure
+ * @tx_ring: ring to be intialised
+ * @tx_rsize: ring size
+ * Description: this function initializes the DMA TX descriptor
+ */
+void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
+ int tx_rsize)
+{
+ dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
+ tx_ring->dma_tx, tx_ring->dma_tx_phy);
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers. It suppors the chained and ring
+ * modes.
+ */
+static int init_dma_desc_rings(struct net_device *netd)
+{
+ int queue_num, ret;
+ struct sxgbe_priv_data *priv = netdev_priv(netd);
+ int tx_rsize = priv->dma_tx_size;
+ int rx_rsize = priv->dma_rx_size;
+
+ /* Allocate memory for queue structures and TX descs */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ ret = init_tx_ring(priv->device, queue_num,
+ priv->txq[queue_num], tx_rsize);
+ if (ret) {
+ dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
+ goto txalloc_err;
+ }
+
+ /* save private pointer in each ring this
+ * pointer is needed during cleaing TX queue
+ */
+ priv->txq[queue_num]->priv_ptr = priv;
+ }
+
+ /* Allocate memory for queue structures and RX descs */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+ ret = init_rx_ring(netd, queue_num,
+ priv->rxq[queue_num], rx_rsize);
+ if (ret) {
+ netdev_err(netd, "RX DMA ring allocation failed!!\n");
+ goto rxalloc_err;
+ }
+
+ /* save private pointer in each ring this
+ * pointer is needed during cleaing TX queue
+ */
+ priv->rxq[queue_num]->priv_ptr = priv;
+ }
+
+ sxgbe_clear_descriptors(priv);
+
+ return 0;
+
+txalloc_err:
+ while (queue_num--)
+ free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
+ return ret;
+
+rxalloc_err:
+ while (queue_num--)
+ free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
+ return ret;
+}
+
+static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
+{
+ int dma_desc;
+ struct sxgbe_priv_data *priv = txqueue->priv_ptr;
+ int tx_rsize = priv->dma_tx_size;
+
+ for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
+ struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
+
+ if (txqueue->tx_skbuff_dma[dma_desc])
+ dma_unmap_single(priv->device,
+ txqueue->tx_skbuff_dma[dma_desc],
+ priv->hw->desc->get_tx_len(tdesc),
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
+ txqueue->tx_skbuff[dma_desc] = NULL;
+ txqueue->tx_skbuff_dma[dma_desc] = 0;
+ }
+}
+
+
+static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
+ tx_free_ring_skbufs(tqueue);
+ }
+}
+
+static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+ int tx_rsize = priv->dma_tx_size;
+ int rx_rsize = priv->dma_rx_size;
+
+ /* Release the DMA TX buffers */
+ dma_free_tx_skbufs(priv);
+
+ /* Release the TX ring memory also */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
+ }
+
+ /* Release the RX ring memory also */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+ free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
+ }
+}
+
+static int txring_mem_alloc(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ priv->txq[queue_num] = devm_kmalloc(priv->device,
+ sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
+ if (!priv->txq[queue_num])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+ priv->rxq[queue_num] = devm_kmalloc(priv->device,
+ sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
+ if (!priv->rxq[queue_num])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * sxgbe_mtl_operation_mode - HW MTL operation mode
+ * @priv: driver private structure
+ * Description: it sets the MTL operation mode: tx/rx MTL thresholds
+ * or Store-And-Forward capability.
+ */
+static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ /* TX/RX threshold control */
+ if (likely(priv->plat->force_sf_dma_mode)) {
+ /* set TC mode for TX QUEUES */
+ SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
+ priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
+ SXGBE_MTL_SFMODE);
+ priv->tx_tc = SXGBE_MTL_SFMODE;
+
+ /* set TC mode for RX QUEUES */
+ SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
+ priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
+ SXGBE_MTL_SFMODE);
+ priv->rx_tc = SXGBE_MTL_SFMODE;
+ } else if (unlikely(priv->plat->force_thresh_dma_mode)) {
+ /* set TC mode for TX QUEUES */
+ SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
+ priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
+ priv->tx_tc);
+ /* set TC mode for RX QUEUES */
+ SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
+ priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
+ priv->rx_tc);
+ } else {
+ pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
+ }
+}
+
+/**
+ * sxgbe_tx_queue_clean:
+ * @priv: driver private structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
+{
+ struct sxgbe_priv_data *priv = tqueue->priv_ptr;
+ unsigned int tx_rsize = priv->dma_tx_size;
+ struct netdev_queue *dev_txq;
+ u8 queue_no = tqueue->queue_no;
+
+ dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
+
+ spin_lock(&tqueue->tx_lock);
+
+ priv->xstats.tx_clean++;
+ while (tqueue->dirty_tx != tqueue->cur_tx) {
+ unsigned int entry = tqueue->dirty_tx % tx_rsize;
+ struct sk_buff *skb = tqueue->tx_skbuff[entry];
+ struct sxgbe_tx_norm_desc *p;
+
+ p = tqueue->dma_tx + entry;
+
+ /* Check if the descriptor is owned by the DMA. */
+ if (priv->hw->desc->get_tx_owner(p))
+ break;
+
+ if (netif_msg_tx_done(priv))
+ pr_debug("%s: curr %d, dirty %d\n",
+ __func__, tqueue->cur_tx, tqueue->dirty_tx);
+
+ if (likely(tqueue->tx_skbuff_dma[entry])) {
+ dma_unmap_single(priv->device,
+ tqueue->tx_skbuff_dma[entry],
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ tqueue->tx_skbuff_dma[entry] = 0;
+ }
+
+ if (likely(skb)) {
+ dev_kfree_skb(skb);
+ tqueue->tx_skbuff[entry] = NULL;
+ }
+
+ priv->hw->desc->release_tx_desc(p);
+
+ tqueue->dirty_tx++;
+ }
+
+ /* wake up queue */
+ if (unlikely(netif_tx_queue_stopped(dev_txq) &&
+ sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
+ netif_tx_lock(priv->dev);
+ if (netif_tx_queue_stopped(dev_txq) &&
+ sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
+ if (netif_msg_tx_done(priv))
+ pr_debug("%s: restart transmit\n", __func__);
+ netif_tx_wake_queue(dev_txq);
+ }
+ netif_tx_unlock(priv->dev);
+ }
+
+ spin_unlock(&tqueue->tx_lock);
+}
+
+/**
+ * sxgbe_tx_clean:
+ * @priv: driver private structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
+{
+ u8 queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
+
+ sxgbe_tx_queue_clean(tqueue);
+ }
+
+ if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
+ sxgbe_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
+ }
+}
+
+/**
+ * sxgbe_restart_tx_queue: irq tx error mng function
+ * @priv: driver private structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
+{
+ struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
+ struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
+ queue_num);
+
+ /* stop the queue */
+ netif_tx_stop_queue(dev_txq);
+
+ /* stop the tx dma */
+ priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
+
+ /* free the skbuffs of the ring */
+ tx_free_ring_skbufs(tx_ring);
+
+ /* initalise counters */
+ tx_ring->cur_tx = 0;
+ tx_ring->dirty_tx = 0;
+
+ /* start the tx dma */
+ priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
+
+ priv->dev->stats.tx_errors++;
+
+ /* wakeup the queue */
+ netif_tx_wake_queue(dev_txq);
+}
+
+/**
+ * sxgbe_reset_all_tx_queues: irq tx error mng function
+ * @priv: driver private structure
+ * Description: it cleans all the descriptors and
+ * restarts the transmission on all queues in case of errors.
+ */
+static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ /* On TX timeout of net device, resetting of all queues
+ * may not be proper way, revisit this later if needed
+ */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
+ sxgbe_restart_tx_queue(priv, queue_num);
+}
+
+/**
+ * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
+ * @priv: driver private structure
+ * Description:
+ * new GMAC chip generations have a new register to indicate the
+ * presence of the optional feature/functions.
+ * This can be also used to override the value passed through the
+ * platform and necessary for old MAC10/100 and GMAC chips.
+ */
+static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
+{
+ int rval = 0;
+ struct sxgbe_hw_features *features = &priv->hw_cap;
+
+ /* Read First Capability Register CAP[0] */
+ rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
+ if (rval) {
+ features->pmt_remote_wake_up =
+ SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
+ features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
+ features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
+ features->tx_csum_offload =
+ SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
+ features->rx_csum_offload =
+ SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
+ features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
+ features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
+ features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
+ features->eee = SXGBE_HW_FEAT_EEE(rval);
+ }
+
+ /* Read First Capability Register CAP[1] */
+ rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
+ if (rval) {
+ features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
+ features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
+ features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
+ features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
+ features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
+ features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
+ features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
+ features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
+ features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
+ features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
+ }
+
+ /* Read First Capability Register CAP[2] */
+ rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
+ if (rval) {
+ features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
+ features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
+ features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
+ features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
+ features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
+ features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
+ }
+
+ return rval;
+}
+
+/**
+ * sxgbe_check_ether_addr: check if the MAC addr is valid
+ * @priv: driver private structure
+ * Description:
+ * it is to verify if the MAC address is valid, in case of failures it
+ * generates a random MAC address
+ */
+static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
+{
+ if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ priv->hw->mac->get_umac_addr((void __iomem *)
+ priv->ioaddr,
+ priv->dev->dev_addr, 0);
+ if (!is_valid_ether_addr(priv->dev->dev_addr))
+ eth_hw_addr_random(priv->dev);
+ }
+ dev_info(priv->device, "device MAC address %pM\n",
+ priv->dev->dev_addr);
+}
+
+/**
+ * sxgbe_init_dma_engine: DMA init.
+ * @priv: driver private structure
+ * Description:
+ * It inits the DMA invoking the specific SXGBE callback.
+ * Some DMA parameters can be passed from the platform;
+ * in case of these are not passed a default is kept for the MAC or GMAC.
+ */
+static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
+{
+ int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
+ int queue_num;
+
+ if (priv->plat->dma_cfg) {
+ pbl = priv->plat->dma_cfg->pbl;
+ fixed_burst = priv->plat->dma_cfg->fixed_burst;
+ burst_map = priv->plat->dma_cfg->burst_map;
+ }
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
+ priv->hw->dma->cha_init(priv->ioaddr, queue_num,
+ fixed_burst, pbl,
+ (priv->txq[queue_num])->dma_tx_phy,
+ (priv->rxq[queue_num])->dma_rx_phy,
+ priv->dma_tx_size, priv->dma_rx_size);
+
+ return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
+}
+
+/**
+ * sxgbe_init_mtl_engine: MTL init.
+ * @priv: driver private structure
+ * Description:
+ * It inits the MTL invoking the specific SXGBE callback.
+ */
+static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
+ priv->hw_cap.tx_mtl_qsize);
+ priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
+ }
+}
+
+/**
+ * sxgbe_disable_mtl_engine: MTL disable.
+ * @priv: driver private structure
+ * Description:
+ * It disables the MTL queues by invoking the specific SXGBE callback.
+ */
+static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
+ priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
+}
+
+
+/**
+ * sxgbe_tx_timer: mitigation sw timer for tx.
+ * @data: data pointer
+ * Description:
+ * This is the timer handler to directly invoke the sxgbe_tx_clean.
+ */
+static void sxgbe_tx_timer(unsigned long data)
+{
+ struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data;
+ sxgbe_tx_queue_clean(p);
+}
+
+/**
+ * sxgbe_init_tx_coalesce: init tx mitigation options.
+ * @priv: driver private structure
+ * Description:
+ * This inits the transmit coalesce parameters: i.e. timer rate,
+ * timer handler and default threshold used for enabling the
+ * interrupt on completion bit.
+ */
+static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
+{
+ u8 queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ struct sxgbe_tx_queue *p = priv->txq[queue_num];
+ p->tx_coal_frames = SXGBE_TX_FRAMES;
+ p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
+ init_timer(&p->txtimer);
+ p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
+ p->txtimer.data = (unsigned long)&priv->txq[queue_num];
+ p->txtimer.function = sxgbe_tx_timer;
+ add_timer(&p->txtimer);
+ }
+}
+
+static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
+{
+ u8 queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ struct sxgbe_tx_queue *p = priv->txq[queue_num];
+ del_timer_sync(&p->txtimer);
+ }
+}
+
+/**
+ * sxgbe_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int sxgbe_open(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ int ret, queue_num;
+
+ clk_prepare_enable(priv->sxgbe_clk);
+
+ sxgbe_check_ether_addr(priv);
+
+ /* Init the phy */
+ ret = sxgbe_init_phy(dev);
+ if (ret) {
+ netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
+ __func__, ret);
+ goto phy_error;
+ }
+
+ /* Create and initialize the TX/RX descriptors chains. */
+ priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
+ priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
+ priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
+ priv->tx_tc = TC_DEFAULT;
+ priv->rx_tc = TC_DEFAULT;
+ init_dma_desc_rings(dev);
+
+ /* DMA initialization and SW reset */
+ ret = sxgbe_init_dma_engine(priv);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA initialization failed\n", __func__);
+ goto init_error;
+ }
+
+ /* MTL initialization */
+ sxgbe_init_mtl_engine(priv);
+
+ /* Copy the MAC addr into the HW */
+ priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
+
+ /* Initialize the MAC Core */
+ priv->hw->mac->core_init(priv->ioaddr);
+
+ /* Request the IRQ lines */
+ ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, priv->irq, ret);
+ goto init_error;
+ }
+
+ /* If the LPI irq is different from the mac irq
+ * register a dedicated handler
+ */
+ if (priv->lpi_irq != dev->irq) {
+ ret = devm_request_irq(priv->device, priv->lpi_irq,
+ sxgbe_common_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+ __func__, priv->lpi_irq, ret);
+ goto init_error;
+ }
+ }
+
+ /* Request TX DMA irq lines */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ ret = devm_request_irq(priv->device,
+ (priv->txq[queue_num])->irq_no,
+ sxgbe_tx_interrupt, 0,
+ dev->name, priv->txq[queue_num]);
+ if (unlikely(ret < 0)) {
+ netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
+ __func__, priv->irq, ret);
+ goto init_error;
+ }
+ }
+
+ /* Request RX DMA irq lines */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+ ret = devm_request_irq(priv->device,
+ (priv->rxq[queue_num])->irq_no,
+ sxgbe_rx_interrupt, 0,
+ dev->name, priv->rxq[queue_num]);
+ if (unlikely(ret < 0)) {
+ netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
+ __func__, priv->irq, ret);
+ goto init_error;
+ }
+ }
+
+ /* Enable the MAC Rx/Tx */
+ priv->hw->mac->enable_tx(priv->ioaddr, true);
+ priv->hw->mac->enable_rx(priv->ioaddr, true);
+
+ /* Set the HW DMA mode and the COE */
+ sxgbe_mtl_operation_mode(priv);
+
+ /* Extra statistics */
+ memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
+
+ priv->xstats.tx_threshold = priv->tx_tc;
+ priv->xstats.rx_threshold = priv->rx_tc;
+
+ /* Start the ball rolling... */
+ netdev_dbg(dev, "DMA RX/TX processes started...\n");
+ priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
+ priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ /* initalise TX coalesce parameters */
+ sxgbe_tx_init_coalesce(priv);
+
+ if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
+ priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
+ priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
+ }
+
+ priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER;
+ priv->eee_enabled = sxgbe_eee_init(priv);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+
+init_error:
+ free_dma_desc_resources(priv);
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
+phy_error:
+ clk_disable_unprepare(priv->sxgbe_clk);
+
+ return ret;
+}
+
+/**
+ * sxgbe_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int sxgbe_release(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ if (priv->eee_enabled)
+ del_timer_sync(&priv->eee_ctrl_timer);
+
+ /* Stop and disconnect the PHY */
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ netif_tx_stop_all_queues(dev);
+
+ napi_disable(&priv->napi);
+
+ /* delete TX timers */
+ sxgbe_tx_del_timer(priv);
+
+ /* Stop TX/RX DMA and clear the descriptors */
+ priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
+ priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
+
+ /* disable MTL queue */
+ sxgbe_disable_mtl_engine(priv);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC Rx/Tx */
+ priv->hw->mac->enable_tx(priv->ioaddr, false);
+ priv->hw->mac->enable_rx(priv->ioaddr, false);
+
+ clk_disable_unprepare(priv->sxgbe_clk);
+
+ return 0;
+}
+
+/* Prepare first Tx descriptor for doing TSO operation */
+void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
+ struct sxgbe_tx_norm_desc *first_desc,
+ struct sk_buff *skb)
+{
+ unsigned int total_hdr_len, tcp_hdr_len;
+
+ /* Write first Tx descriptor with appropriate value */
+ tcp_hdr_len = tcp_hdrlen(skb);
+ total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
+
+ first_desc->tdes01 = dma_map_single(priv->device, skb->data,
+ total_hdr_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, first_desc->tdes01))
+ pr_err("%s: TX dma mapping failed!!\n", __func__);
+
+ first_desc->tdes23.tx_rd_des23.first_desc = 1;
+ priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len,
+ tcp_hdr_len,
+ skb->len - total_hdr_len);
+}
+
+/**
+ * sxgbe_xmit: Tx entry point of the driver
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : this is the tx entry point of the driver.
+ * It programs the chain or the ring and supports oversized frames
+ * and SG feature.
+ */
+static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned int entry, frag_num;
+ int cksum_flag = 0;
+ struct netdev_queue *dev_txq;
+ unsigned txq_index = skb_get_queue_mapping(skb);
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ unsigned int tx_rsize = priv->dma_tx_size;
+ struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
+ struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
+ struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int no_pagedlen = skb_headlen(skb);
+ int is_jumbo = 0;
+ u16 cur_mss = skb_shinfo(skb)->gso_size;
+ u32 ctxt_desc_req = 0;
+
+ /* get the TX queue handle */
+ dev_txq = netdev_get_tx_queue(dev, txq_index);
+
+ if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
+ ctxt_desc_req = 1;
+
+ if (unlikely(vlan_tx_tag_present(skb) ||
+ ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ tqueue->hwts_tx_en)))
+ ctxt_desc_req = 1;
+
+ /* get the spinlock */
+ spin_lock(&tqueue->tx_lock);
+
+ if (priv->tx_path_in_lpi_mode)
+ sxgbe_disable_eee_mode(priv);
+
+ if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
+ if (!netif_tx_queue_stopped(dev_txq)) {
+ netif_tx_stop_queue(dev_txq);
+ netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
+ __func__, txq_index);
+ }
+ /* release the spin lock in case of BUSY */
+ spin_unlock(&tqueue->tx_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = tqueue->cur_tx % tx_rsize;
+ tx_desc = tqueue->dma_tx + entry;
+
+ first_desc = tx_desc;
+ if (ctxt_desc_req)
+ ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc;
+
+ /* save the skb address */
+ tqueue->tx_skbuff[entry] = skb;
+
+ if (!is_jumbo) {
+ if (likely(skb_is_gso(skb))) {
+ /* TSO support */
+ if (unlikely(tqueue->prev_mss != cur_mss)) {
+ priv->hw->desc->tx_ctxt_desc_set_mss(
+ ctxt_desc, cur_mss);
+ priv->hw->desc->tx_ctxt_desc_set_tcmssv(
+ ctxt_desc);
+ priv->hw->desc->tx_ctxt_desc_reset_ostc(
+ ctxt_desc);
+ priv->hw->desc->tx_ctxt_desc_set_ctxt(
+ ctxt_desc);
+ priv->hw->desc->tx_ctxt_desc_set_owner(
+ ctxt_desc);
+
+ entry = (++tqueue->cur_tx) % tx_rsize;
+ first_desc = tqueue->dma_tx + entry;
+
+ tqueue->prev_mss = cur_mss;
+ }
+ sxgbe_tso_prepare(priv, first_desc, skb);
+ } else {
+ tx_desc->tdes01 = dma_map_single(priv->device,
+ skb->data, no_pagedlen, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, tx_desc->tdes01))
+ netdev_err(dev, "%s: TX dma mapping failed!!\n",
+ __func__);
+
+ priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
+ no_pagedlen, cksum_flag);
+ }
+ }
+
+ for (frag_num = 0; frag_num < nr_frags; frag_num++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
+ int len = skb_frag_size(frag);
+
+ entry = (++tqueue->cur_tx) % tx_rsize;
+ tx_desc = tqueue->dma_tx + entry;
+ tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+
+ tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
+ tqueue->tx_skbuff[entry] = NULL;
+
+ /* prepare the descriptor */
+ priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
+ len, cksum_flag);
+ /* memory barrier to flush descriptor */
+ wmb();
+
+ /* set the owner */
+ priv->hw->desc->set_tx_owner(tx_desc);
+ }
+
+ /* close the descriptors */
+ priv->hw->desc->close_tx_desc(tx_desc);
+
+ /* memory barrier to flush descriptor */
+ wmb();
+
+ tqueue->tx_count_frames += nr_frags + 1;
+ if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
+ priv->hw->desc->clear_tx_ic(tx_desc);
+ priv->xstats.tx_reset_ic_bit++;
+ mod_timer(&tqueue->txtimer,
+ SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
+ } else {
+ tqueue->tx_count_frames = 0;
+ }
+
+ /* set owner for first desc */
+ priv->hw->desc->set_tx_owner(first_desc);
+
+ /* memory barrier to flush descriptor */
+ wmb();
+
+ tqueue->cur_tx++;
+
+ /* display current ring */
+ netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
+ __func__, tqueue->cur_tx % tx_rsize,
+ tqueue->dirty_tx % tx_rsize, entry,
+ first_desc, nr_frags);
+
+ if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
+ netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
+ __func__);
+ netif_tx_stop_queue(dev_txq);
+ }
+
+ dev->stats.tx_bytes += skb->len;
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ tqueue->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->hw->desc->tx_enable_tstamp(first_desc);
+ }
+
+ if (!tqueue->hwts_tx_en)
+ skb_tx_timestamp(skb);
+
+ priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
+
+ spin_unlock(&tqueue->tx_lock);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * sxgbe_rx_refill: refill used skb preallocated buffers
+ * @priv: driver private structure
+ * Description : this is to reallocate the skb for the reception process
+ * that is based on zero-copy.
+ */
+static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
+{
+ unsigned int rxsize = priv->dma_rx_size;
+ int bfsize = priv->dma_buf_sz;
+ u8 qnum = priv->cur_rx_qnum;
+
+ for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
+ priv->rxq[qnum]->dirty_rx++) {
+ unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
+ struct sxgbe_rx_norm_desc *p;
+
+ p = priv->rxq[qnum]->dma_rx + entry;
+
+ if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
+
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rxq[qnum]->rx_skbuff[entry] = skb;
+ priv->rxq[qnum]->rx_skbuff_dma[entry] =
+ dma_map_single(priv->device, skb->data, bfsize,
+ DMA_FROM_DEVICE);
+
+ p->rdes23.rx_rd_des23.buf2_addr =
+ priv->rxq[qnum]->rx_skbuff_dma[entry];
+ }
+
+ /* Added memory barrier for RX descriptor modification */
+ wmb();
+ priv->hw->desc->set_rx_owner(p);
+ /* Added memory barrier for RX descriptor modification */
+ wmb();
+ }
+}
+
+/**
+ * sxgbe_rx: receive the frames from the remote host
+ * @priv: driver private structure
+ * @limit: napi bugget.
+ * Description : this the function called by the napi poll method.
+ * It gets all the frames inside the ring.
+ */
+static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
+{
+ u8 qnum = priv->cur_rx_qnum;
+ unsigned int rxsize = priv->dma_rx_size;
+ unsigned int entry = priv->rxq[qnum]->cur_rx;
+ unsigned int next_entry = 0;
+ unsigned int count = 0;
+ int checksum;
+ int status;
+
+ while (count < limit) {
+ struct sxgbe_rx_norm_desc *p;
+ struct sk_buff *skb;
+ int frame_len;
+
+ p = priv->rxq[qnum]->dma_rx + entry;
+
+ if (priv->hw->desc->get_rx_owner(p))
+ break;
+
+ count++;
+
+ next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
+ prefetch(priv->rxq[qnum]->dma_rx + next_entry);
+
+ /* Read the status of the incoming frame and also get checksum
+ * value based on whether it is enabled in SXGBE hardware or
+ * not.
+ */
+ status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
+ &checksum);
+ if (unlikely(status < 0)) {
+ entry = next_entry;
+ continue;
+ }
+ if (unlikely(!priv->rxcsum_insertion))
+ checksum = CHECKSUM_NONE;
+
+ skb = priv->rxq[qnum]->rx_skbuff[entry];
+
+ if (unlikely(!skb))
+ netdev_err(priv->dev, "rx descriptor is not consistent\n");
+
+ prefetch(skb->data - NET_IP_ALIGN);
+ priv->rxq[qnum]->rx_skbuff[entry] = NULL;
+
+ frame_len = priv->hw->desc->get_rx_frame_len(p);
+
+ skb_put(skb, frame_len);
+
+ skb->ip_summed = checksum;
+ if (checksum == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&priv->napi, skb);
+
+ entry = next_entry;
+ }
+
+ sxgbe_rx_refill(priv);
+
+ return count;
+}
+
+/**
+ * sxgbe_poll - sxgbe poll method (NAPI)
+ * @napi : pointer to the napi structure.
+ * @budget : maximum number of packets that the current CPU can receive from
+ * all interfaces.
+ * Description :
+ * To look at the incoming frames and clear the tx resources.
+ */
+static int sxgbe_poll(struct napi_struct *napi, int budget)
+{
+ struct sxgbe_priv_data *priv = container_of(napi,
+ struct sxgbe_priv_data, napi);
+ int work_done = 0;
+ u8 qnum = priv->cur_rx_qnum;
+
+ priv->xstats.napi_poll++;
+ /* first, clean the tx queues */
+ sxgbe_tx_all_clean(priv);
+
+ work_done = sxgbe_rx(priv, budget);
+ if (work_done < budget) {
+ napi_complete(napi);
+ priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
+ }
+
+ return work_done;
+}
+
+/**
+ * sxgbe_tx_timeout
+ * @dev : Pointer to net device structure
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable time. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void sxgbe_tx_timeout(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ sxgbe_reset_all_tx_queues(priv);
+}
+
+/**
+ * sxgbe_common_interrupt - main ISR
+ * @irq: interrupt number.
+ * @dev_id: to pass the net device pointer.
+ * Description: this is the main driver interrupt service routine.
+ * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
+ * interrupts.
+ */
+static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
+{
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct sxgbe_priv_data *priv = netdev_priv(netdev);
+ int status;
+
+ status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats);
+ /* For LPI we need to save the tx status */
+ if (status & TX_ENTRY_LPI_MODE) {
+ priv->xstats.tx_lpi_entry_n++;
+ priv->tx_path_in_lpi_mode = true;
+ }
+ if (status & TX_EXIT_LPI_MODE) {
+ priv->xstats.tx_lpi_exit_n++;
+ priv->tx_path_in_lpi_mode = false;
+ }
+ if (status & RX_ENTRY_LPI_MODE)
+ priv->xstats.rx_lpi_entry_n++;
+ if (status & RX_EXIT_LPI_MODE)
+ priv->xstats.rx_lpi_exit_n++;
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * sxgbe_tx_interrupt - TX DMA ISR
+ * @irq: interrupt number.
+ * @dev_id: to pass the net device pointer.
+ * Description: this is the tx dma interrupt service routine.
+ */
+static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
+{
+ int status;
+ struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
+ struct sxgbe_priv_data *priv = txq->priv_ptr;
+
+ /* get the channel status */
+ status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
+ &priv->xstats);
+ /* check for normal path */
+ if (likely((status & handle_tx)))
+ napi_schedule(&priv->napi);
+
+ /* check for unrecoverable error */
+ if (unlikely((status & tx_hard_error)))
+ sxgbe_restart_tx_queue(priv, txq->queue_no);
+
+ /* check for TC configuration change */
+ if (unlikely((status & tx_bump_tc) &&
+ (priv->tx_tc != SXGBE_MTL_SFMODE) &&
+ (priv->tx_tc < 512))) {
+ /* step of TX TC is 32 till 128, otherwise 64 */
+ priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
+ priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
+ txq->queue_no, priv->tx_tc);
+ priv->xstats.tx_threshold = priv->tx_tc;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * sxgbe_rx_interrupt - RX DMA ISR
+ * @irq: interrupt number.
+ * @dev_id: to pass the net device pointer.
+ * Description: this is the rx dma interrupt service routine.
+ */
+static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
+{
+ int status;
+ struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
+ struct sxgbe_priv_data *priv = rxq->priv_ptr;
+
+ /* get the channel status */
+ status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
+ &priv->xstats);
+
+ if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
+ priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
+ __napi_schedule(&priv->napi);
+ }
+
+ /* check for TC configuration change */
+ if (unlikely((status & rx_bump_tc) &&
+ (priv->rx_tc != SXGBE_MTL_SFMODE) &&
+ (priv->rx_tc < 128))) {
+ /* step of TC is 32 */
+ priv->rx_tc += 32;
+ priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
+ rxq->queue_no, priv->rx_tc);
+ priv->xstats.rx_threshold = priv->rx_tc;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
+{
+ u64 val = readl(ioaddr + reg_lo);
+
+ val |= ((u64)readl(ioaddr + reg_hi)) << 32;
+
+ return val;
+}
+
+
+/* sxgbe_get_stats64 - entry point to see statistical information of device
+ * @dev : device pointer.
+ * @stats : pointer to hold all the statistical information of device.
+ * Description:
+ * This function is a driver entry point whenever ifconfig command gets
+ * executed to see device statistics. Statistics are number of
+ * bytes sent or received, errors occured etc.
+ * Return value:
+ * This function returns various statistical information of device.
+ */
+static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->ioaddr;
+ u64 count;
+
+ spin_lock(&priv->stats_lock);
+ /* Freeze the counter registers before reading value otherwise it may
+ * get updated by hardware while we are reading them
+ */
+ writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
+
+ stats->rx_bytes = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_RXOCTETLO_GCNT_REG,
+ SXGBE_MMC_RXOCTETHI_GCNT_REG);
+
+ stats->rx_packets = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_RXFRAMELO_GBCNT_REG,
+ SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
+
+ stats->multicast = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_RXMULTILO_GCNT_REG,
+ SXGBE_MMC_RXMULTIHI_GCNT_REG);
+
+ stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_RXCRCERRLO_REG,
+ SXGBE_MMC_RXCRCERRHI_REG);
+
+ stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_RXLENERRLO_REG,
+ SXGBE_MMC_RXLENERRHI_REG);
+
+ stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
+ SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
+
+ stats->tx_bytes = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_TXOCTETLO_GCNT_REG,
+ SXGBE_MMC_TXOCTETHI_GCNT_REG);
+
+ count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
+ SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
+
+ stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
+ SXGBE_MMC_TXFRAMEHI_GCNT_REG);
+ stats->tx_errors = count - stats->tx_errors;
+ stats->tx_packets = count;
+ stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
+ SXGBE_MMC_TXUFLWHI_GBCNT_REG);
+ writel(0, ioaddr + SXGBE_MMC_CTL_REG);
+ spin_unlock(&priv->stats_lock);
+
+ return stats;
+}
+
+/* sxgbe_set_features - entry point to set offload features of the device.
+ * @dev : device pointer.
+ * @features : features which are required to be set.
+ * Description:
+ * This function is a driver entry point and called by Linux kernel whenever
+ * any device features are set or reset by user.
+ * Return value:
+ * This function returns 0 after setting or resetting device features.
+ */
+static int sxgbe_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ netdev_features_t changed = dev->features ^ features;
+
+ if (changed & NETIF_F_RXCSUM) {
+ if (features & NETIF_F_RXCSUM) {
+ priv->hw->mac->enable_rx_csum(priv->ioaddr);
+ priv->rxcsum_insertion = true;
+ } else {
+ priv->hw->mac->disable_rx_csum(priv->ioaddr);
+ priv->rxcsum_insertion = false;
+ }
+ }
+
+ return 0;
+}
+
+/* sxgbe_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
+{
+ /* RFC 791, page 25, "Every internet module must be able to forward
+ * a datagram of 68 octets without further fragmentation."
+ */
+ if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) {
+ netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n",
+ MIN_MTU, MAX_MTU);
+ return -EINVAL;
+ }
+
+ /* Return if the buffer sizes will not change */
+ if (dev->mtu == new_mtu)
+ return 0;
+
+ dev->mtu = new_mtu;
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* Recevice ring buffer size is needed to be set based on MTU. If MTU is
+ * changed then reinitilisation of the receive ring buffers need to be
+ * done. Hence bring interface down and bring interface back up
+ */
+ sxgbe_release(dev);
+ return sxgbe_open(dev);
+}
+
+static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ /* For MAC Addr registers se have to set the Address Enable (AE)
+ * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+ * is RO.
+ */
+ writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
+}
+
+/**
+ * sxgbe_set_rx_mode - entry point for setting different receive mode of
+ * a device. unicast, multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever different receive mode like unicast, multicast and promiscuous
+ * must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void sxgbe_set_rx_mode(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
+ unsigned int value = 0;
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+ int reg = 1;
+
+ netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
+
+ if (dev->flags & IFF_PROMISC) {
+ value = SXGBE_FRAME_FILTER_PR;
+
+ } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ value = SXGBE_FRAME_FILTER_PM; /* pass all multi */
+ writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
+ writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
+
+ } else if (!netdev_mc_empty(dev)) {
+ /* Hash filter for multicast */
+ value = SXGBE_FRAME_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table
+ */
+ int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register.
+ */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
+ writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
+ }
+
+ /* Handle multiple unicast addresses (perfect filtering) */
+ if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
+ /* Switch to promiscuous mode if more than 16 addrs
+ * are required
+ */
+ value |= SXGBE_FRAME_FILTER_PR;
+ else {
+ netdev_for_each_uc_addr(ha, dev) {
+ sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+#ifdef FRAME_FILTER_DEBUG
+ /* Enable Receive all mode (to debug filtering_fail errors) */
+ value |= SXGBE_FRAME_FILTER_RA;
+#endif
+ writel(value, ioaddr + SXGBE_FRAME_FILTER);
+
+ netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
+ readl(ioaddr + SXGBE_FRAME_FILTER),
+ readl(ioaddr + SXGBE_HASH_HIGH),
+ readl(ioaddr + SXGBE_HASH_LOW));
+}
+
+/**
+ * sxgbe_config - entry point for changing configuration mode passed on by
+ * ifconfig
+ * @dev : pointer to the device structure
+ * @map : pointer to the device mapping structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever some device configuration is changed.
+ * Return value:
+ * This function returns 0 if success and appropriate error otherwise.
+ */
+static int sxgbe_config(struct net_device *dev, struct ifmap *map)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ /* Can't act on a running interface */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != (unsigned long)priv->ioaddr) {
+ netdev_warn(dev, "can't change I/O address\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Don't allow changing the IRQ */
+ if (map->irq != priv->irq) {
+ netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * sxgbe_poll_controller - entry point for polling receive by device
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled.
+ * Return value:
+ * Void.
+ */
+static void sxgbe_poll_controller(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ disable_irq(priv->irq);
+ sxgbe_rx_interrupt(priv->irq, dev);
+ enable_irq(priv->irq);
+}
+#endif
+
+/* sxgbe_ioctl - Entry point for the Ioctl
+ * @dev: Device pointer.
+ * @rq: An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * @cmd: IOCTL command
+ * Description:
+ * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
+ */
+static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!priv->phydev)
+ return -EINVAL;
+ ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops sxgbe_netdev_ops = {
+ .ndo_open = sxgbe_open,
+ .ndo_start_xmit = sxgbe_xmit,
+ .ndo_stop = sxgbe_release,
+ .ndo_get_stats64 = sxgbe_get_stats64,
+ .ndo_change_mtu = sxgbe_change_mtu,
+ .ndo_set_features = sxgbe_set_features,
+ .ndo_set_rx_mode = sxgbe_set_rx_mode,
+ .ndo_tx_timeout = sxgbe_tx_timeout,
+ .ndo_do_ioctl = sxgbe_ioctl,
+ .ndo_set_config = sxgbe_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sxgbe_poll_controller,
+#endif
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+/* Get the hardware ops */
+static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
+{
+ ops_ptr->mac = sxgbe_get_core_ops();
+ ops_ptr->desc = sxgbe_get_desc_ops();
+ ops_ptr->dma = sxgbe_get_dma_ops();
+ ops_ptr->mtl = sxgbe_get_mtl_ops();
+
+ /* set the MDIO communication Address/Data regisers */
+ ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG;
+ ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG;
+
+ /* Assigning the default link settings
+ * no SXGBE defined default values to be set in registers,
+ * so assigning as 0 for port and duplex
+ */
+ ops_ptr->link.port = 0;
+ ops_ptr->link.duplex = 0;
+ ops_ptr->link.speed = SXGBE_SPEED_10G;
+}
+
+/**
+ * sxgbe_hw_init - Init the GMAC device
+ * @priv: driver private structure
+ * Description: this function checks the HW capability
+ * (if supported) and sets the driver's features.
+ */
+static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
+{
+ u32 ctrl_ids;
+
+ priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
+ if(!priv->hw)
+ return -ENOMEM;
+
+ /* get the hardware ops */
+ sxgbe_get_ops(priv->hw);
+
+ /* get the controller id */
+ ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
+ priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
+ priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
+ pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
+ priv->hw->ctrl_uid, priv->hw->ctrl_id);
+
+ /* get the H/W features */
+ if (!sxgbe_get_hw_features(priv))
+ pr_info("Hardware features not found\n");
+
+ if (priv->hw_cap.tx_csum_offload)
+ pr_info("TX Checksum offload supported\n");
+
+ if (priv->hw_cap.rx_csum_offload)
+ pr_info("RX Checksum offload supported\n");
+
+ return 0;
+}
+
+/**
+ * sxgbe_drv_probe
+ * @device: device pointer
+ * @plat_dat: platform data pointer
+ * @addr: iobase memory address
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
+ */
+struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
+ struct sxgbe_plat_data *plat_dat,
+ void __iomem *addr)
+{
+ struct sxgbe_priv_data *priv;
+ struct net_device *ndev;
+ int ret;
+ u8 queue_num;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
+ SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
+ if (!ndev)
+ return NULL;
+
+ SET_NETDEV_DEV(ndev, device);
+
+ priv = netdev_priv(ndev);
+ priv->device = device;
+ priv->dev = ndev;
+
+ sxgbe_set_ethtool_ops(ndev);
+ priv->plat = plat_dat;
+ priv->ioaddr = addr;
+
+ /* Verify driver arguments */
+ sxgbe_verify_args();
+
+ /* Init MAC and get the capabilities */
+ ret = sxgbe_hw_init(priv);
+ if (ret)
+ goto error_free_netdev;
+
+ /* allocate memory resources for Descriptor rings */
+ ret = txring_mem_alloc(priv);
+ if (ret)
+ goto error_free_hw;
+
+ ret = rxring_mem_alloc(priv);
+ if (ret)
+ goto error_free_hw;
+
+ ndev->netdev_ops = &sxgbe_netdev_ops;
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GRO;
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
+
+ /* assign filtering support */
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ /* Enable TCP segmentation offload for all DMA channels */
+ if (priv->hw_cap.tcpseg_offload) {
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ priv->hw->dma->enable_tso(priv->ioaddr, queue_num);
+ }
+ }
+
+ /* Enable Rx checksum offload */
+ if (priv->hw_cap.rx_csum_offload) {
+ priv->hw->mac->enable_rx_csum(priv->ioaddr);
+ priv->rxcsum_insertion = true;
+ }
+
+ /* Initialise pause frame settings */
+ priv->rx_pause = 1;
+ priv->tx_pause = 1;
+
+ /* Rx Watchdog is available, enable depend on platform data */
+ if (!priv->plat->riwt_off) {
+ priv->use_riwt = 1;
+ pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
+ }
+
+ netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
+
+ spin_lock_init(&priv->stats_lock);
+
+ priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
+ if (IS_ERR(priv->sxgbe_clk)) {
+ netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
+ __func__);
+ goto error_napi_del;
+ }
+
+ /* If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed. Viceversa the driver'll try to
+ * set the MDC clock dynamically according to the csr actual
+ * clock input.
+ */
+ if (!priv->plat->clk_csr)
+ sxgbe_clk_csr_set(priv);
+ else
+ priv->clk_csr = priv->plat->clk_csr;
+
+ /* MDIO bus Registration */
+ ret = sxgbe_mdio_register(ndev);
+ if (ret < 0) {
+ netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
+ __func__, priv->plat->bus_id);
+ goto error_clk_put;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ pr_err("%s: ERROR %i registering the device\n", __func__, ret);
+ goto error_mdio_unregister;
+ }
+
+ sxgbe_check_ether_addr(priv);
+
+ return priv;
+
+error_mdio_unregister:
+ sxgbe_mdio_unregister(ndev);
+error_clk_put:
+ clk_put(priv->sxgbe_clk);
+error_napi_del:
+ netif_napi_del(&priv->napi);
+error_free_hw:
+ kfree(priv->hw);
+error_free_netdev:
+ free_netdev(ndev);
+
+ return NULL;
+}
+
+/**
+ * sxgbe_drv_remove
+ * @ndev: net device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings.
+ */
+int sxgbe_drv_remove(struct net_device *ndev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+ netdev_info(ndev, "%s: removing driver\n", __func__);
+
+ priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
+ priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
+
+ priv->hw->mac->enable_tx(priv->ioaddr, false);
+ priv->hw->mac->enable_rx(priv->ioaddr, false);
+
+ unregister_netdev(ndev);
+
+ sxgbe_mdio_unregister(ndev);
+
+ clk_put(priv->sxgbe_clk);
+
+ netif_napi_del(&priv->napi);
+
+ kfree(priv->hw);
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int sxgbe_suspend(struct net_device *ndev)
+{
+ return 0;
+}
+
+int sxgbe_resume(struct net_device *ndev)
+{
+ return 0;
+}
+
+int sxgbe_freeze(struct net_device *ndev)
+{
+ return -ENOSYS;
+}
+
+int sxgbe_restore(struct net_device *ndev)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/* Driver is configured as Platform driver */
+static int __init sxgbe_init(void)
+{
+ int ret;
+
+ ret = sxgbe_register_platform();
+ if (ret)
+ goto err;
+ return 0;
+err:
+ pr_err("driver registration failed\n");
+ return ret;
+}
+
+static void __exit sxgbe_exit(void)
+{
+ sxgbe_unregister_platform();
+}
+
+module_init(sxgbe_init);
+module_exit(sxgbe_exit);
+
+#ifndef MODULE
+static int __init sxgbe_cmdline_opt(char *str)
+{
+ char *opt;
+
+ if (!str || !*str)
+ return -EINVAL;
+ while ((opt = strsep(&str, ",")) != NULL) {
+ if (!strncmp(opt, "eee_timer:", 6)) {
+ if (kstrtoint(opt + 10, 0, &eee_timer))
+ goto err;
+ }
+ }
+ return 0;
+
+err:
+ pr_err("%s: ERROR broken module parameter conversion\n", __func__);
+ return -EINVAL;
+}
+
+__setup("sxgbeeth=", sxgbe_cmdline_opt);
+#endif /* MODULE */
+
+
+
+MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
+
+MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
+MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
+
+MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
+MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
+MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
+MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
new file mode 100644
index 000000000000..01af2cbb479d
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -0,0 +1,244 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
+#include <linux/sxgbe_platform.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_reg.h"
+
+#define SXGBE_SMA_WRITE_CMD 0x01 /* write command */
+#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */
+#define SXGBE_SMA_READ_CMD 0x03 /* read command */
+#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
+#define SXGBE_MII_BUSY 0x00800000 /* mii busy */
+
+static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
+{
+ unsigned long fin_time = jiffies + 3 * HZ; /* 3 seconds */
+
+ while (!time_after(jiffies, fin_time)) {
+ if (!(readl(ioaddr + mii_data) & SXGBE_MII_BUSY))
+ return 0;
+ cpu_relax();
+ }
+
+ return -EBUSY;
+}
+
+static void sxgbe_mdio_ctrl_data(struct sxgbe_priv_data *sp, u32 cmd,
+ u16 phydata)
+{
+ u32 reg = phydata;
+
+ reg |= (cmd << 16) | SXGBE_SMA_SKIP_ADDRFRM |
+ ((sp->clk_csr & 0x7) << 19) | SXGBE_MII_BUSY;
+ writel(reg, sp->ioaddr + sp->hw->mii.data);
+}
+
+static void sxgbe_mdio_c45(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
+ int phyreg, u16 phydata)
+{
+ u32 reg;
+
+ /* set mdio address register */
+ reg = ((phyreg >> 16) & 0x1f) << 21;
+ reg |= (phyaddr << 16) | (phyreg & 0xffff);
+ writel(reg, sp->ioaddr + sp->hw->mii.addr);
+
+ sxgbe_mdio_ctrl_data(sp, cmd, phydata);
+}
+
+static void sxgbe_mdio_c22(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
+ int phyreg, u16 phydata)
+{
+ u32 reg;
+
+ writel(1 << phyaddr, sp->ioaddr + SXGBE_MDIO_CLAUSE22_PORT_REG);
+
+ /* set mdio address register */
+ reg = (phyaddr << 16) | (phyreg & 0x1f);
+ writel(reg, sp->ioaddr + sp->hw->mii.addr);
+
+ sxgbe_mdio_ctrl_data(sp, cmd, phydata);
+}
+
+static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
+ int phyreg, u16 phydata)
+{
+ const struct mii_regs *mii = &sp->hw->mii;
+ int rc;
+
+ rc = sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
+ if (rc < 0)
+ return rc;
+
+ if (phyreg & MII_ADDR_C45) {
+ sxgbe_mdio_c45(sp, cmd, phyaddr, phyreg, phydata);
+ } else {
+ /* Ports 0-3 only support C22. */
+ if (phyaddr >= 4)
+ return -ENODEV;
+
+ sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata);
+ }
+
+ return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
+}
+
+/**
+ * sxgbe_mdio_read
+ * @bus: points to the mii_bus structure
+ * @phyaddr: address of phy port
+ * @phyreg: address of register with in phy register
+ * Description: this function used for C45 and C22 MDIO Read
+ */
+static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+ int rc;
+
+ rc = sxgbe_mdio_access(priv, SXGBE_SMA_READ_CMD, phyaddr, phyreg, 0);
+ if (rc < 0)
+ return rc;
+
+ return readl(priv->ioaddr + priv->hw->mii.data) & 0xffff;
+}
+
+/**
+ * sxgbe_mdio_write
+ * @bus: points to the mii_bus structure
+ * @phyaddr: address of phy port
+ * @phyreg: address of phy registers
+ * @phydata: data to be written into phy register
+ * Description: this function is used for C45 and C22 MDIO write
+ */
+static int sxgbe_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+ return sxgbe_mdio_access(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg,
+ phydata);
+}
+
+int sxgbe_mdio_register(struct net_device *ndev)
+{
+ struct mii_bus *mdio_bus;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+ struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
+ int err, phy_addr;
+ int *irqlist;
+ bool act;
+
+ /* allocate the new mdio bus */
+ mdio_bus = mdiobus_alloc();
+ if (!mdio_bus) {
+ netdev_err(ndev, "%s: mii bus allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (mdio_data->irqs)
+ irqlist = mdio_data->irqs;
+ else
+ irqlist = priv->mii_irq;
+
+ /* assign mii bus fields */
+ mdio_bus->name = "samsxgbe";
+ mdio_bus->read = &sxgbe_mdio_read;
+ mdio_bus->write = &sxgbe_mdio_write;
+ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ mdio_bus->name, priv->plat->bus_id);
+ mdio_bus->priv = ndev;
+ mdio_bus->phy_mask = mdio_data->phy_mask;
+ mdio_bus->parent = priv->device;
+
+ /* register with kernel subsystem */
+ err = mdiobus_register(mdio_bus);
+ if (err != 0) {
+ netdev_err(ndev, "mdiobus register failed\n");
+ goto mdiobus_err;
+ }
+
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ struct phy_device *phy = mdio_bus->phy_map[phy_addr];
+
+ if (phy) {
+ char irq_num[4];
+ char *irq_str;
+ /* If an IRQ was provided to be assigned after
+ * the bus probe, do it here.
+ */
+ if ((mdio_data->irqs == NULL) &&
+ (mdio_data->probed_phy_irq > 0)) {
+ irqlist[phy_addr] = mdio_data->probed_phy_irq;
+ phy->irq = mdio_data->probed_phy_irq;
+ }
+
+ /* If we're going to bind the MAC to this PHY bus,
+ * and no PHY number was provided to the MAC,
+ * use the one probed here.
+ */
+ if (priv->plat->phy_addr == -1)
+ priv->plat->phy_addr = phy_addr;
+
+ act = (priv->plat->phy_addr == phy_addr);
+ switch (phy->irq) {
+ case PHY_POLL:
+ irq_str = "POLL";
+ break;
+ case PHY_IGNORE_INTERRUPT:
+ irq_str = "IGNORE";
+ break;
+ default:
+ sprintf(irq_num, "%d", phy->irq);
+ irq_str = irq_num;
+ break;
+ }
+ netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
+ phy->phy_id, phy_addr, irq_str,
+ dev_name(&phy->dev), act ? " active" : "");
+ }
+ }
+
+ priv->mii = mdio_bus;
+
+ return 0;
+
+mdiobus_err:
+ mdiobus_free(mdio_bus);
+ return err;
+}
+
+int sxgbe_mdio_unregister(struct net_device *ndev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+ if (!priv->mii)
+ return 0;
+
+ mdiobus_unregister(priv->mii);
+ priv->mii->priv = NULL;
+ mdiobus_free(priv->mii);
+ priv->mii = NULL;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
new file mode 100644
index 000000000000..324681c2bb74
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
@@ -0,0 +1,254 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/jiffies.h>
+
+#include "sxgbe_mtl.h"
+#include "sxgbe_reg.h"
+
+static void sxgbe_mtl_init(void __iomem *ioaddr, unsigned int etsalg,
+ unsigned int raa)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_OP_MODE_REG);
+ reg_val &= ETS_RST;
+
+ /* ETS Algorith */
+ switch (etsalg & SXGBE_MTL_OPMODE_ESTMASK) {
+ case ETS_WRR:
+ reg_val &= ETS_WRR;
+ break;
+ case ETS_WFQ:
+ reg_val |= ETS_WFQ;
+ break;
+ case ETS_DWRR:
+ reg_val |= ETS_DWRR;
+ break;
+ }
+ writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
+
+ switch (raa & SXGBE_MTL_OPMODE_RAAMASK) {
+ case RAA_SP:
+ reg_val &= RAA_SP;
+ break;
+ case RAA_WSP:
+ reg_val |= RAA_WSP;
+ break;
+ }
+ writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
+}
+
+/* For Dynamic DMA channel mapping for Rx queue */
+static void sxgbe_mtl_dma_dm_rxqueue(void __iomem *ioaddr)
+{
+ writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP0_REG);
+ writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP1_REG);
+ writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP2_REG);
+}
+
+static void sxgbe_mtl_set_txfifosize(void __iomem *ioaddr, int queue_num,
+ int queue_fifo)
+{
+ u32 fifo_bits, reg_val;
+
+ /* 0 means 256 bytes */
+ fifo_bits = (queue_fifo / SXGBE_MTL_TX_FIFO_DIV) - 1;
+ reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+ reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
+ writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_set_rxfifosize(void __iomem *ioaddr, int queue_num,
+ int queue_fifo)
+{
+ u32 fifo_bits, reg_val;
+
+ /* 0 means 256 bytes */
+ fifo_bits = (queue_fifo / SXGBE_MTL_RX_FIFO_DIV)-1;
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_enable_txqueue(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+ reg_val |= SXGBE_MTL_ENABLE_QUEUE;
+ writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_disable_txqueue(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+ reg_val &= ~SXGBE_MTL_ENABLE_QUEUE;
+ writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fc_active(void __iomem *ioaddr, int queue_num,
+ int threshold)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_ACTIVE);
+ reg_val |= (threshold << RX_FC_ACTIVE);
+
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fc_enable(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val |= SXGBE_MTL_ENABLE_FC;
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fc_deactive(void __iomem *ioaddr, int queue_num,
+ int threshold)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_DEACTIVE);
+ reg_val |= (threshold << RX_FC_DEACTIVE);
+
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fep_enable(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val |= SXGBE_MTL_RXQ_OP_FEP;
+
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fep_disable(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val &= ~(SXGBE_MTL_RXQ_OP_FEP);
+
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fup_enable(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val |= SXGBE_MTL_RXQ_OP_FUP;
+
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fup_disable(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val &= ~(SXGBE_MTL_RXQ_OP_FUP);
+
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+
+static void sxgbe_set_tx_mtl_mode(void __iomem *ioaddr, int queue_num,
+ int tx_mode)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+ /* TX specific MTL mode settings */
+ if (tx_mode == SXGBE_MTL_SFMODE) {
+ reg_val |= SXGBE_MTL_SFMODE;
+ } else {
+ /* set the TTC values */
+ if (tx_mode <= 64)
+ reg_val |= MTL_CONTROL_TTC_64;
+ else if (tx_mode <= 96)
+ reg_val |= MTL_CONTROL_TTC_96;
+ else if (tx_mode <= 128)
+ reg_val |= MTL_CONTROL_TTC_128;
+ else if (tx_mode <= 192)
+ reg_val |= MTL_CONTROL_TTC_192;
+ else if (tx_mode <= 256)
+ reg_val |= MTL_CONTROL_TTC_256;
+ else if (tx_mode <= 384)
+ reg_val |= MTL_CONTROL_TTC_384;
+ else
+ reg_val |= MTL_CONTROL_TTC_512;
+ }
+
+ /* write into TXQ operation register */
+ writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_set_rx_mtl_mode(void __iomem *ioaddr, int queue_num,
+ int rx_mode)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ /* RX specific MTL mode settings */
+ if (rx_mode == SXGBE_RX_MTL_SFMODE) {
+ reg_val |= SXGBE_RX_MTL_SFMODE;
+ } else {
+ if (rx_mode <= 64)
+ reg_val |= MTL_CONTROL_RTC_64;
+ else if (rx_mode <= 96)
+ reg_val |= MTL_CONTROL_RTC_96;
+ else if (rx_mode <= 128)
+ reg_val |= MTL_CONTROL_RTC_128;
+ }
+
+ /* write into RXQ operation register */
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static const struct sxgbe_mtl_ops mtl_ops = {
+ .mtl_set_txfifosize = sxgbe_mtl_set_txfifosize,
+ .mtl_set_rxfifosize = sxgbe_mtl_set_rxfifosize,
+ .mtl_enable_txqueue = sxgbe_mtl_enable_txqueue,
+ .mtl_disable_txqueue = sxgbe_mtl_disable_txqueue,
+ .mtl_dynamic_dma_rxqueue = sxgbe_mtl_dma_dm_rxqueue,
+ .set_tx_mtl_mode = sxgbe_set_tx_mtl_mode,
+ .set_rx_mtl_mode = sxgbe_set_rx_mtl_mode,
+ .mtl_init = sxgbe_mtl_init,
+ .mtl_fc_active = sxgbe_mtl_fc_active,
+ .mtl_fc_deactive = sxgbe_mtl_fc_deactive,
+ .mtl_fc_enable = sxgbe_mtl_fc_enable,
+ .mtl_fep_enable = sxgbe_mtl_fep_enable,
+ .mtl_fep_disable = sxgbe_mtl_fep_disable,
+ .mtl_fup_enable = sxgbe_mtl_fup_enable,
+ .mtl_fup_disable = sxgbe_mtl_fup_disable
+};
+
+const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void)
+{
+ return &mtl_ops;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h
new file mode 100644
index 000000000000..7e4810c4137e
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h
@@ -0,0 +1,104 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_MTL_H__
+#define __SXGBE_MTL_H__
+
+#define SXGBE_MTL_OPMODE_ESTMASK 0x3
+#define SXGBE_MTL_OPMODE_RAAMASK 0x1
+#define SXGBE_MTL_FCMASK 0x7
+#define SXGBE_MTL_TX_FIFO_DIV 256
+#define SXGBE_MTL_RX_FIFO_DIV 256
+
+#define SXGBE_MTL_RXQ_OP_FEP BIT(4)
+#define SXGBE_MTL_RXQ_OP_FUP BIT(3)
+#define SXGBE_MTL_ENABLE_FC 0x80
+
+#define ETS_WRR 0xFFFFFF9F
+#define ETS_RST 0xFFFFFF9F
+#define ETS_WFQ 0x00000020
+#define ETS_DWRR 0x00000040
+#define RAA_SP 0xFFFFFFFB
+#define RAA_WSP 0x00000004
+
+#define RX_QUEUE_DYNAMIC 0x80808080
+#define RX_FC_ACTIVE 8
+#define RX_FC_DEACTIVE 13
+
+enum ttc_control {
+ MTL_CONTROL_TTC_64 = 0x00000000,
+ MTL_CONTROL_TTC_96 = 0x00000020,
+ MTL_CONTROL_TTC_128 = 0x00000030,
+ MTL_CONTROL_TTC_192 = 0x00000040,
+ MTL_CONTROL_TTC_256 = 0x00000050,
+ MTL_CONTROL_TTC_384 = 0x00000060,
+ MTL_CONTROL_TTC_512 = 0x00000070,
+};
+
+enum rtc_control {
+ MTL_CONTROL_RTC_64 = 0x00000000,
+ MTL_CONTROL_RTC_96 = 0x00000002,
+ MTL_CONTROL_RTC_128 = 0x00000003,
+};
+
+enum flow_control_th {
+ MTL_FC_FULL_1K = 0x00000000,
+ MTL_FC_FULL_2K = 0x00000001,
+ MTL_FC_FULL_4K = 0x00000002,
+ MTL_FC_FULL_5K = 0x00000003,
+ MTL_FC_FULL_6K = 0x00000004,
+ MTL_FC_FULL_8K = 0x00000005,
+ MTL_FC_FULL_16K = 0x00000006,
+ MTL_FC_FULL_24K = 0x00000007,
+};
+
+struct sxgbe_mtl_ops {
+ void (*mtl_init)(void __iomem *ioaddr, unsigned int etsalg,
+ unsigned int raa);
+
+ void (*mtl_set_txfifosize)(void __iomem *ioaddr, int queue_num,
+ int mtl_fifo);
+
+ void (*mtl_set_rxfifosize)(void __iomem *ioaddr, int queue_num,
+ int queue_fifo);
+
+ void (*mtl_enable_txqueue)(void __iomem *ioaddr, int queue_num);
+
+ void (*mtl_disable_txqueue)(void __iomem *ioaddr, int queue_num);
+
+ void (*set_tx_mtl_mode)(void __iomem *ioaddr, int queue_num,
+ int tx_mode);
+
+ void (*set_rx_mtl_mode)(void __iomem *ioaddr, int queue_num,
+ int rx_mode);
+
+ void (*mtl_dynamic_dma_rxqueue)(void __iomem *ioaddr);
+
+ void (*mtl_fc_active)(void __iomem *ioaddr, int queue_num,
+ int threshold);
+
+ void (*mtl_fc_deactive)(void __iomem *ioaddr, int queue_num,
+ int threshold);
+
+ void (*mtl_fc_enable)(void __iomem *ioaddr, int queue_num);
+
+ void (*mtl_fep_enable)(void __iomem *ioaddr, int queue_num);
+
+ void (*mtl_fep_disable)(void __iomem *ioaddr, int queue_num);
+
+ void (*mtl_fup_enable)(void __iomem *ioaddr, int queue_num);
+
+ void (*mtl_fup_disable)(void __iomem *ioaddr, int queue_num);
+};
+
+const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void);
+
+#endif /* __SXGBE_MTL_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
new file mode 100644
index 000000000000..b147d469a799
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -0,0 +1,259 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/sxgbe_platform.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_reg.h"
+
+#ifdef CONFIG_OF
+static int sxgbe_probe_config_dt(struct platform_device *pdev,
+ struct sxgbe_plat_data *plat,
+ const char **mac)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sxgbe_dma_cfg *dma_cfg;
+
+ if (!np)
+ return -ENODEV;
+
+ *mac = of_get_mac_address(np);
+ plat->interface = of_get_phy_mode(np);
+
+ plat->bus_id = of_alias_get_id(np, "ethernet");
+ if (plat->bus_id < 0)
+ plat->bus_id = 0;
+
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+
+ dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
+ if (!dma_cfg)
+ return -ENOMEM;
+
+ plat->dma_cfg = dma_cfg;
+ of_property_read_u32(np, "samsung,pbl", &dma_cfg->pbl);
+ if (of_property_read_u32(np, "samsung,burst-map", &dma_cfg->burst_map) == 0)
+ dma_cfg->fixed_burst = true;
+
+ return 0;
+}
+#else
+static int sxgbe_probe_config_dt(struct platform_device *pdev,
+ struct sxgbe_plat_data *plat,
+ const char **mac)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_OF */
+
+/**
+ * sxgbe_platform_probe
+ * @pdev: platform device pointer
+ * Description: platform_device probe function. It allocates
+ * the necessary resources and invokes the main to init
+ * the net device, register the mdio bus etc.
+ */
+static int sxgbe_platform_probe(struct platform_device *pdev)
+{
+ int ret;
+ int i, chan;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void __iomem *addr;
+ struct sxgbe_priv_data *priv = NULL;
+ struct sxgbe_plat_data *plat_dat = NULL;
+ const char *mac = NULL;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct device_node *node = dev->of_node;
+
+ /* Get memory resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto err_out;
+
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+
+ if (pdev->dev.of_node) {
+ plat_dat = devm_kzalloc(&pdev->dev,
+ sizeof(struct sxgbe_plat_data),
+ GFP_KERNEL);
+ if (!plat_dat)
+ return -ENOMEM;
+
+ ret = sxgbe_probe_config_dt(pdev, plat_dat, &mac);
+ if (ret) {
+ pr_err("%s: main dt probe failed\n", __func__);
+ return ret;
+ }
+ }
+
+ /* Get MAC address if available (DT) */
+ if (mac)
+ ether_addr_copy(priv->dev->dev_addr, mac);
+
+ priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr);
+ if (!priv) {
+ pr_err("%s: main driver probe failed\n", __func__);
+ goto err_out;
+ }
+
+ /* Get the SXGBE common INT information */
+ priv->irq = irq_of_parse_and_map(node, 0);
+ if (priv->irq <= 0) {
+ dev_err(dev, "sxgbe common irq parsing failed\n");
+ goto err_drv_remove;
+ }
+
+ /* Get the TX/RX IRQ numbers */
+ for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
+ priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
+ if (priv->txq[i]->irq_no <= 0) {
+ dev_err(dev, "sxgbe tx irq parsing failed\n");
+ goto err_tx_irq_unmap;
+ }
+ }
+
+ for (i = 0; i < SXGBE_RX_QUEUES; i++) {
+ priv->rxq[i]->irq_no = irq_of_parse_and_map(node, chan++);
+ if (priv->rxq[i]->irq_no <= 0) {
+ dev_err(dev, "sxgbe rx irq parsing failed\n");
+ goto err_rx_irq_unmap;
+ }
+ }
+
+ priv->lpi_irq = irq_of_parse_and_map(node, chan);
+ if (priv->lpi_irq <= 0) {
+ dev_err(dev, "sxgbe lpi irq parsing failed\n");
+ goto err_rx_irq_unmap;
+ }
+
+ platform_set_drvdata(pdev, priv->dev);
+
+ pr_debug("platform driver registration completed\n");
+
+ return 0;
+
+err_rx_irq_unmap:
+ while (--i)
+ irq_dispose_mapping(priv->rxq[i]->irq_no);
+ i = SXGBE_TX_QUEUES;
+err_tx_irq_unmap:
+ while (--i)
+ irq_dispose_mapping(priv->txq[i]->irq_no);
+ irq_dispose_mapping(priv->irq);
+err_drv_remove:
+ sxgbe_drv_remove(ndev);
+err_out:
+ return -ENODEV;
+}
+
+/**
+ * sxgbe_platform_remove
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and calls the platforms hook and release the resources (e.g. mem).
+ */
+static int sxgbe_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ int ret = sxgbe_drv_remove(ndev);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int sxgbe_platform_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return sxgbe_suspend(ndev);
+}
+
+static int sxgbe_platform_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return sxgbe_resume(ndev);
+}
+
+static int sxgbe_platform_freeze(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return sxgbe_freeze(ndev);
+}
+
+static int sxgbe_platform_restore(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return sxgbe_restore(ndev);
+}
+
+static const struct dev_pm_ops sxgbe_platform_pm_ops = {
+ .suspend = sxgbe_platform_suspend,
+ .resume = sxgbe_platform_resume,
+ .freeze = sxgbe_platform_freeze,
+ .thaw = sxgbe_platform_restore,
+ .restore = sxgbe_platform_restore,
+};
+#else
+static const struct dev_pm_ops sxgbe_platform_pm_ops;
+#endif /* CONFIG_PM */
+
+static const struct of_device_id sxgbe_dt_ids[] = {
+ { .compatible = "samsung,sxgbe-v2.0a"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sxgbe_dt_ids);
+
+static struct platform_driver sxgbe_platform_driver = {
+ .probe = sxgbe_platform_probe,
+ .remove = sxgbe_platform_remove,
+ .driver = {
+ .name = SXGBE_RESOURCE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &sxgbe_platform_pm_ops,
+ .of_match_table = of_match_ptr(sxgbe_dt_ids),
+ },
+};
+
+int sxgbe_register_platform(void)
+{
+ int err;
+
+ err = platform_driver_register(&sxgbe_platform_driver);
+ if (err)
+ pr_err("failed to register the platform driver\n");
+
+ return err;
+}
+
+void sxgbe_unregister_platform(void)
+{
+ platform_driver_unregister(&sxgbe_platform_driver);
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
new file mode 100644
index 000000000000..5a89acb4c505
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -0,0 +1,488 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_REGMAP_H__
+#define __SXGBE_REGMAP_H__
+
+/* SXGBE MAC Registers */
+#define SXGBE_CORE_TX_CONFIG_REG 0x0000
+#define SXGBE_CORE_RX_CONFIG_REG 0x0004
+#define SXGBE_CORE_PKT_FILTER_REG 0x0008
+#define SXGBE_CORE_WATCHDOG_TIMEOUT_REG 0x000C
+#define SXGBE_CORE_HASH_TABLE_REG0 0x0010
+#define SXGBE_CORE_HASH_TABLE_REG1 0x0014
+#define SXGBE_CORE_HASH_TABLE_REG2 0x0018
+#define SXGBE_CORE_HASH_TABLE_REG3 0x001C
+#define SXGBE_CORE_HASH_TABLE_REG4 0x0020
+#define SXGBE_CORE_HASH_TABLE_REG5 0x0024
+#define SXGBE_CORE_HASH_TABLE_REG6 0x0028
+#define SXGBE_CORE_HASH_TABLE_REG7 0x002C
+
+/* EEE-LPI Registers */
+#define SXGBE_CORE_LPI_CTRL_STATUS 0x00D0
+#define SXGBE_CORE_LPI_TIMER_CTRL 0x00D4
+
+/* VLAN Specific Registers */
+#define SXGBE_CORE_VLAN_TAG_REG 0x0050
+#define SXGBE_CORE_VLAN_HASHTAB_REG 0x0058
+#define SXGBE_CORE_VLAN_INSCTL_REG 0x0060
+#define SXGBE_CORE_VLAN_INNERCTL_REG 0x0064
+#define SXGBE_CORE_RX_ETHTYPE_MATCH_REG 0x006C
+
+/* Flow Contol Registers */
+#define SXGBE_CORE_TX_Q0_FLOWCTL_REG 0x0070
+#define SXGBE_CORE_TX_Q1_FLOWCTL_REG 0x0074
+#define SXGBE_CORE_TX_Q2_FLOWCTL_REG 0x0078
+#define SXGBE_CORE_TX_Q3_FLOWCTL_REG 0x007C
+#define SXGBE_CORE_TX_Q4_FLOWCTL_REG 0x0080
+#define SXGBE_CORE_TX_Q5_FLOWCTL_REG 0x0084
+#define SXGBE_CORE_TX_Q6_FLOWCTL_REG 0x0088
+#define SXGBE_CORE_TX_Q7_FLOWCTL_REG 0x008C
+#define SXGBE_CORE_RX_FLOWCTL_REG 0x0090
+#define SXGBE_CORE_RX_CTL0_REG 0x00A0
+#define SXGBE_CORE_RX_CTL1_REG 0x00A4
+#define SXGBE_CORE_RX_CTL2_REG 0x00A8
+#define SXGBE_CORE_RX_CTL3_REG 0x00AC
+
+/* Interrupt Registers */
+#define SXGBE_CORE_INT_STATUS_REG 0x00B0
+#define SXGBE_CORE_INT_ENABLE_REG 0x00B4
+#define SXGBE_CORE_RXTX_ERR_STATUS_REG 0x00B8
+#define SXGBE_CORE_PMT_CTL_STATUS_REG 0x00C0
+#define SXGBE_CORE_RWK_PKT_FILTER_REG 0x00C4
+#define SXGBE_CORE_VERSION_REG 0x0110
+#define SXGBE_CORE_DEBUG_REG 0x0114
+#define SXGBE_CORE_HW_FEA_REG(index) (0x011C + index * 4)
+
+/* SMA(MDIO) module registers */
+#define SXGBE_MDIO_SCMD_ADD_REG 0x0200
+#define SXGBE_MDIO_SCMD_DATA_REG 0x0204
+#define SXGBE_MDIO_CCMD_WADD_REG 0x0208
+#define SXGBE_MDIO_CCMD_WDATA_REG 0x020C
+#define SXGBE_MDIO_CSCAN_PORT_REG 0x0210
+#define SXGBE_MDIO_INT_STATUS_REG 0x0214
+#define SXGBE_MDIO_INT_ENABLE_REG 0x0218
+#define SXGBE_MDIO_PORT_CONDCON_REG 0x021C
+#define SXGBE_MDIO_CLAUSE22_PORT_REG 0x0220
+
+/* port specific, addr = 0-3 */
+#define SXGBE_MDIO_DEV_BASE_REG 0x0230
+#define SXGBE_MDIO_PORT_DEV_REG(addr) \
+ (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x0)
+#define SXGBE_MDIO_PORT_LSTATUS_REG(addr) \
+ (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x4)
+#define SXGBE_MDIO_PORT_ALIVE_REG(addr) \
+ (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x8)
+
+#define SXGBE_CORE_GPIO_CTL_REG 0x0278
+#define SXGBE_CORE_GPIO_STATUS_REG 0x027C
+
+/* Address registers for filtering */
+#define SXGBE_CORE_ADD_BASE_REG 0x0300
+
+/* addr = 0-31 */
+#define SXGBE_CORE_ADD_HIGHOFFSET(addr) \
+ (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x0)
+#define SXGBE_CORE_ADD_LOWOFFSET(addr) \
+ (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x4)
+
+/* SXGBE MMC registers */
+#define SXGBE_MMC_CTL_REG 0x0800
+#define SXGBE_MMC_RXINT_STATUS_REG 0x0804
+#define SXGBE_MMC_TXINT_STATUS_REG 0x0808
+#define SXGBE_MMC_RXINT_ENABLE_REG 0x080C
+#define SXGBE_MMC_TXINT_ENABLE_REG 0x0810
+
+/* TX specific counters */
+#define SXGBE_MMC_TXOCTETHI_GBCNT_REG 0x0814
+#define SXGBE_MMC_TXOCTETLO_GBCNT_REG 0x0818
+#define SXGBE_MMC_TXFRAMELO_GBCNT_REG 0x081C
+#define SXGBE_MMC_TXFRAMEHI_GBCNT_REG 0x0820
+#define SXGBE_MMC_TXBROADLO_GCNT_REG 0x0824
+#define SXGBE_MMC_TXBROADHI_GCNT_REG 0x0828
+#define SXGBE_MMC_TXMULTILO_GCNT_REG 0x082C
+#define SXGBE_MMC_TXMULTIHI_GCNT_REG 0x0830
+#define SXGBE_MMC_TX64LO_GBCNT_REG 0x0834
+#define SXGBE_MMC_TX64HI_GBCNT_REG 0x0838
+#define SXGBE_MMC_TX65TO127LO_GBCNT_REG 0x083C
+#define SXGBE_MMC_TX65TO127HI_GBCNT_REG 0x0840
+#define SXGBE_MMC_TX128TO255LO_GBCNT_REG 0x0844
+#define SXGBE_MMC_TX128TO255HI_GBCNT_REG 0x0848
+#define SXGBE_MMC_TX256TO511LO_GBCNT_REG 0x084C
+#define SXGBE_MMC_TX256TO511HI_GBCNT_REG 0x0850
+#define SXGBE_MMC_TX512TO1023LO_GBCNT_REG 0x0854
+#define SXGBE_MMC_TX512TO1023HI_GBCNT_REG 0x0858
+#define SXGBE_MMC_TX1023TOMAXLO_GBCNT_REG 0x085C
+#define SXGBE_MMC_TX1023TOMAXHI_GBCNT_REG 0x0860
+#define SXGBE_MMC_TXUNICASTLO_GBCNT_REG 0x0864
+#define SXGBE_MMC_TXUNICASTHI_GBCNT_REG 0x0868
+#define SXGBE_MMC_TXMULTILO_GBCNT_REG 0x086C
+#define SXGBE_MMC_TXMULTIHI_GBCNT_REG 0x0870
+#define SXGBE_MMC_TXBROADLO_GBCNT_REG 0x0874
+#define SXGBE_MMC_TXBROADHI_GBCNT_REG 0x0878
+#define SXGBE_MMC_TXUFLWLO_GBCNT_REG 0x087C
+#define SXGBE_MMC_TXUFLWHI_GBCNT_REG 0x0880
+#define SXGBE_MMC_TXOCTETLO_GCNT_REG 0x0884
+#define SXGBE_MMC_TXOCTETHI_GCNT_REG 0x0888
+#define SXGBE_MMC_TXFRAMELO_GCNT_REG 0x088C
+#define SXGBE_MMC_TXFRAMEHI_GCNT_REG 0x0890
+#define SXGBE_MMC_TXPAUSELO_CNT_REG 0x0894
+#define SXGBE_MMC_TXPAUSEHI_CNT_REG 0x0898
+#define SXGBE_MMC_TXVLANLO_GCNT_REG 0x089C
+#define SXGBE_MMC_TXVLANHI_GCNT_REG 0x08A0
+
+/* RX specific counters */
+#define SXGBE_MMC_RXFRAMELO_GBCNT_REG 0x0900
+#define SXGBE_MMC_RXFRAMEHI_GBCNT_REG 0x0904
+#define SXGBE_MMC_RXOCTETLO_GBCNT_REG 0x0908
+#define SXGBE_MMC_RXOCTETHI_GBCNT_REG 0x090C
+#define SXGBE_MMC_RXOCTETLO_GCNT_REG 0x0910
+#define SXGBE_MMC_RXOCTETHI_GCNT_REG 0x0914
+#define SXGBE_MMC_RXBROADLO_GCNT_REG 0x0918
+#define SXGBE_MMC_RXBROADHI_GCNT_REG 0x091C
+#define SXGBE_MMC_RXMULTILO_GCNT_REG 0x0920
+#define SXGBE_MMC_RXMULTIHI_GCNT_REG 0x0924
+#define SXGBE_MMC_RXCRCERRLO_REG 0x0928
+#define SXGBE_MMC_RXCRCERRHI_REG 0x092C
+#define SXGBE_MMC_RXSHORT64BFRAME_ERR_REG 0x0930
+#define SXGBE_MMC_RXJABBERERR_REG 0x0934
+#define SXGBE_MMC_RXSHORT64BFRAME_COR_REG 0x0938
+#define SXGBE_MMC_RXOVERMAXFRAME_COR_REG 0x093C
+#define SXGBE_MMC_RX64LO_GBCNT_REG 0x0940
+#define SXGBE_MMC_RX64HI_GBCNT_REG 0x0944
+#define SXGBE_MMC_RX65TO127LO_GBCNT_REG 0x0948
+#define SXGBE_MMC_RX65TO127HI_GBCNT_REG 0x094C
+#define SXGBE_MMC_RX128TO255LO_GBCNT_REG 0x0950
+#define SXGBE_MMC_RX128TO255HI_GBCNT_REG 0x0954
+#define SXGBE_MMC_RX256TO511LO_GBCNT_REG 0x0958
+#define SXGBE_MMC_RX256TO511HI_GBCNT_REG 0x095C
+#define SXGBE_MMC_RX512TO1023LO_GBCNT_REG 0x0960
+#define SXGBE_MMC_RX512TO1023HI_GBCNT_REG 0x0964
+#define SXGBE_MMC_RX1023TOMAXLO_GBCNT_REG 0x0968
+#define SXGBE_MMC_RX1023TOMAXHI_GBCNT_REG 0x096C
+#define SXGBE_MMC_RXUNICASTLO_GCNT_REG 0x0970
+#define SXGBE_MMC_RXUNICASTHI_GCNT_REG 0x0974
+#define SXGBE_MMC_RXLENERRLO_REG 0x0978
+#define SXGBE_MMC_RXLENERRHI_REG 0x097C
+#define SXGBE_MMC_RXOUTOFRANGETYPELO_REG 0x0980
+#define SXGBE_MMC_RXOUTOFRANGETYPEHI_REG 0x0984
+#define SXGBE_MMC_RXPAUSELO_CNT_REG 0x0988
+#define SXGBE_MMC_RXPAUSEHI_CNT_REG 0x098C
+#define SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG 0x0990
+#define SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG 0x0994
+#define SXGBE_MMC_RXVLANLO_GBCNT_REG 0x0998
+#define SXGBE_MMC_RXVLANHI_GBCNT_REG 0x099C
+#define SXGBE_MMC_RXWATCHDOG_ERR_REG 0x09A0
+
+/* L3/L4 function registers */
+#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
+#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
+#define SXGBE_CORE_L34_DATA_REG 0x0C04
+
+/* ARP registers */
+#define SXGBE_CORE_ARP_ADD_REG 0x0C10
+
+/* RSS registers */
+#define SXGBE_CORE_RSS_CTL_REG 0x0C80
+#define SXGBE_CORE_RSS_ADD_REG 0x0C88
+#define SXGBE_CORE_RSS_DATA_REG 0x0C8C
+
+/* RSS control register bits */
+#define SXGBE_CORE_RSS_CTL_UDP4TE BIT(3)
+#define SXGBE_CORE_RSS_CTL_TCP4TE BIT(2)
+#define SXGBE_CORE_RSS_CTL_IP2TE BIT(1)
+#define SXGBE_CORE_RSS_CTL_RSSE BIT(0)
+
+/* IEEE 1588 registers */
+#define SXGBE_CORE_TSTAMP_CTL_REG 0x0D00
+#define SXGBE_CORE_SUBSEC_INC_REG 0x0D04
+#define SXGBE_CORE_SYSTIME_SEC_REG 0x0D0C
+#define SXGBE_CORE_SYSTIME_NSEC_REG 0x0D10
+#define SXGBE_CORE_SYSTIME_SECUP_REG 0x0D14
+#define SXGBE_CORE_TSTAMP_ADD_REG 0x0D18
+#define SXGBE_CORE_SYSTIME_HWORD_REG 0x0D1C
+#define SXGBE_CORE_TSTAMP_STATUS_REG 0x0D20
+#define SXGBE_CORE_TXTIME_STATUSNSEC_REG 0x0D30
+#define SXGBE_CORE_TXTIME_STATUSSEC_REG 0x0D34
+
+/* Auxiliary registers */
+#define SXGBE_CORE_AUX_CTL_REG 0x0D40
+#define SXGBE_CORE_AUX_TSTAMP_NSEC_REG 0x0D48
+#define SXGBE_CORE_AUX_TSTAMP_SEC_REG 0x0D4C
+#define SXGBE_CORE_AUX_TSTAMP_INGCOR_REG 0x0D50
+#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_REG 0x0D54
+#define SXGBE_CORE_AUX_TSTAMP_INGCOR_NSEC_REG 0x0D58
+#define SXGBE_CORE_AUX_TSTAMP_INGCOR_SUBNSEC_REG 0x0D5C
+#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_NSEC_REG 0x0D60
+#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_SUBNSEC_REG 0x0D64
+
+/* PPS registers */
+#define SXGBE_CORE_PPS_CTL_REG 0x0D70
+#define SXGBE_CORE_PPS_BASE 0x0D80
+
+/* addr = 0 - 3 */
+#define SXGBE_CORE_PPS_TTIME_SEC_REG(addr) \
+ (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x0)
+#define SXGBE_CORE_PPS_TTIME_NSEC_REG(addr) \
+ (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x4)
+#define SXGBE_CORE_PPS_INTERVAL_REG(addr) \
+ (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x8)
+#define SXGBE_CORE_PPS_WIDTH_REG(addr) \
+ (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0xC)
+#define SXGBE_CORE_PTO_CTL_REG 0x0DC0
+#define SXGBE_CORE_SRCPORT_ITY0_REG 0x0DC4
+#define SXGBE_CORE_SRCPORT_ITY1_REG 0x0DC8
+#define SXGBE_CORE_SRCPORT_ITY2_REG 0x0DCC
+#define SXGBE_CORE_LOGMSG_LEVEL_REG 0x0DD0
+
+/* SXGBE MTL Registers */
+#define SXGBE_MTL_BASE_REG 0x1000
+#define SXGBE_MTL_OP_MODE_REG (SXGBE_MTL_BASE_REG + 0x0000)
+#define SXGBE_MTL_DEBUG_CTL_REG (SXGBE_MTL_BASE_REG + 0x0008)
+#define SXGBE_MTL_DEBUG_STATUS_REG (SXGBE_MTL_BASE_REG + 0x000C)
+#define SXGBE_MTL_FIFO_DEBUGDATA_REG (SXGBE_MTL_BASE_REG + 0x0010)
+#define SXGBE_MTL_INT_STATUS_REG (SXGBE_MTL_BASE_REG + 0x0020)
+#define SXGBE_MTL_RXQ_DMAMAP0_REG (SXGBE_MTL_BASE_REG + 0x0030)
+#define SXGBE_MTL_RXQ_DMAMAP1_REG (SXGBE_MTL_BASE_REG + 0x0034)
+#define SXGBE_MTL_RXQ_DMAMAP2_REG (SXGBE_MTL_BASE_REG + 0x0038)
+#define SXGBE_MTL_TX_PRTYMAP0_REG (SXGBE_MTL_BASE_REG + 0x0040)
+#define SXGBE_MTL_TX_PRTYMAP1_REG (SXGBE_MTL_BASE_REG + 0x0044)
+
+/* TC/Queue registers, qnum=0-15 */
+#define SXGBE_MTL_TC_TXBASE_REG (SXGBE_MTL_BASE_REG + 0x0100)
+#define SXGBE_MTL_TXQ_OPMODE_REG(qnum) \
+ (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x00)
+#define SXGBE_MTL_SFMODE BIT(1)
+#define SXGBE_MTL_FIFO_LSHIFT 16
+#define SXGBE_MTL_ENABLE_QUEUE 0x00000008
+#define SXGBE_MTL_TXQ_UNDERFLOW_REG(qnum) \
+ (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x04)
+#define SXGBE_MTL_TXQ_DEBUG_REG(qnum) \
+ (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x08)
+#define SXGBE_MTL_TXQ_ETSCTL_REG(qnum) \
+ (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x10)
+#define SXGBE_MTL_TXQ_ETSSTATUS_REG(qnum) \
+ (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x14)
+#define SXGBE_MTL_TXQ_QUANTWEIGHT_REG(qnum) \
+ (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x18)
+
+#define SXGBE_MTL_TC_RXBASE_REG 0x1140
+#define SXGBE_RX_MTL_SFMODE BIT(5)
+#define SXGBE_MTL_RXQ_OPMODE_REG(qnum) \
+ (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x00)
+#define SXGBE_MTL_RXQ_MISPKTOVERFLOW_REG(qnum) \
+ (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x04)
+#define SXGBE_MTL_RXQ_DEBUG_REG(qnum) \
+ (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x08)
+#define SXGBE_MTL_RXQ_CTL_REG(qnum) \
+ (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x0C)
+#define SXGBE_MTL_RXQ_INTENABLE_REG(qnum) \
+ (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x30)
+#define SXGBE_MTL_RXQ_INTSTATUS_REG(qnum) \
+ (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x34)
+
+/* SXGBE DMA Registers */
+#define SXGBE_DMA_BASE_REG 0x3000
+#define SXGBE_DMA_MODE_REG (SXGBE_DMA_BASE_REG + 0x0000)
+#define SXGBE_DMA_SOFT_RESET BIT(0)
+#define SXGBE_DMA_SYSBUS_MODE_REG (SXGBE_DMA_BASE_REG + 0x0004)
+#define SXGBE_DMA_AXI_UNDEF_BURST BIT(0)
+#define SXGBE_DMA_ENHACE_ADDR_MODE BIT(11)
+#define SXGBE_DMA_INT_STATUS_REG (SXGBE_DMA_BASE_REG + 0x0008)
+#define SXGBE_DMA_AXI_ARCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0010)
+#define SXGBE_DMA_AXI_AWCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0018)
+#define SXGBE_DMA_DEBUG_STATUS0_REG (SXGBE_DMA_BASE_REG + 0x0020)
+#define SXGBE_DMA_DEBUG_STATUS1_REG (SXGBE_DMA_BASE_REG + 0x0024)
+#define SXGBE_DMA_DEBUG_STATUS2_REG (SXGBE_DMA_BASE_REG + 0x0028)
+#define SXGBE_DMA_DEBUG_STATUS3_REG (SXGBE_DMA_BASE_REG + 0x002C)
+#define SXGBE_DMA_DEBUG_STATUS4_REG (SXGBE_DMA_BASE_REG + 0x0030)
+#define SXGBE_DMA_DEBUG_STATUS5_REG (SXGBE_DMA_BASE_REG + 0x0034)
+
+/* Channel Registers, cha_num = 0-15 */
+#define SXGBE_DMA_CHA_BASE_REG \
+ (SXGBE_DMA_BASE_REG + 0x0100)
+#define SXGBE_DMA_CHA_CTL_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x00)
+#define SXGBE_DMA_PBL_X8MODE BIT(16)
+#define SXGBE_DMA_CHA_TXCTL_TSE_ENABLE BIT(12)
+#define SXGBE_DMA_CHA_TXCTL_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x04)
+#define SXGBE_DMA_CHA_RXCTL_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x08)
+#define SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x10)
+#define SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x14)
+#define SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x18)
+#define SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x1C)
+#define SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x24)
+#define SXGBE_DMA_CHA_RXDESC_TAILPTR_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x2C)
+#define SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x30)
+#define SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x34)
+#define SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x38)
+#define SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x3C)
+#define SXGBE_DMA_CHA_TXDESC_CURADDLO_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x44)
+#define SXGBE_DMA_CHA_RXDESC_CURADDLO_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x4C)
+#define SXGBE_DMA_CHA_CURTXBUF_ADDHI_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x50)
+#define SXGBE_DMA_CHA_CURTXBUF_ADDLO_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x54)
+#define SXGBE_DMA_CHA_CURRXBUF_ADDHI_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x58)
+#define SXGBE_DMA_CHA_CURRXBUF_ADDLO_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x5C)
+#define SXGBE_DMA_CHA_STATUS_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x60)
+
+/* TX DMA control register specific */
+#define SXGBE_TX_START_DMA BIT(0)
+
+/* sxgbe tx configuration register bitfields */
+#define SXGBE_SPEED_10G 0x0
+#define SXGBE_SPEED_2_5G 0x1
+#define SXGBE_SPEED_1G 0x2
+#define SXGBE_SPEED_LSHIFT 29
+
+#define SXGBE_TX_ENABLE BIT(0)
+#define SXGBE_TX_DISDIC_ALGO BIT(1)
+#define SXGBE_TX_JABBER_DISABLE BIT(16)
+
+/* sxgbe rx configuration register bitfields */
+#define SXGBE_RX_ENABLE BIT(0)
+#define SXGBE_RX_ACS_ENABLE BIT(1)
+#define SXGBE_RX_WATCHDOG_DISABLE BIT(7)
+#define SXGBE_RX_JUMBPKT_ENABLE BIT(8)
+#define SXGBE_RX_CSUMOFFLOAD_ENABLE BIT(9)
+#define SXGBE_RX_LOOPBACK_ENABLE BIT(10)
+#define SXGBE_RX_ARPOFFLOAD_ENABLE BIT(31)
+
+/* sxgbe vlan Tag Register bitfields */
+#define SXGBE_VLAN_SVLAN_ENABLE BIT(18)
+#define SXGBE_VLAN_DOUBLEVLAN_ENABLE BIT(26)
+#define SXGBE_VLAN_INNERVLAN_ENABLE BIT(27)
+
+/* XMAC VLAN Tag Inclusion Register(0x0060) bitfields
+ * Below fields same for Inner VLAN Tag Inclusion
+ * Register(0x0064) register
+ */
+enum vlan_tag_ctl_tx {
+ VLAN_TAG_TX_NOP,
+ VLAN_TAG_TX_DEL,
+ VLAN_TAG_TX_INSERT,
+ VLAN_TAG_TX_REPLACE
+};
+#define SXGBE_VLAN_PRTY_CTL BIT(18)
+#define SXGBE_VLAN_CSVL_CTL BIT(19)
+
+/* SXGBE TX Q Flow Control Register bitfields */
+#define SXGBE_TX_FLOW_CTL_FCB BIT(0)
+#define SXGBE_TX_FLOW_CTL_TFB BIT(1)
+
+/* SXGBE RX Q Flow Control Register bitfields */
+#define SXGBE_RX_FLOW_CTL_ENABLE BIT(0)
+#define SXGBE_RX_UNICAST_DETECT BIT(1)
+#define SXGBE_RX_PRTYFLOW_CTL_ENABLE BIT(8)
+
+/* sxgbe rx Q control0 register bitfields */
+#define SXGBE_RX_Q_ENABLE 0x2
+
+/* SXGBE hardware features bitfield specific */
+/* Capability Register 0 */
+#define SXGBE_HW_FEAT_GMII(cap) ((cap & 0x00000002) >> 1)
+#define SXGBE_HW_FEAT_VLAN_HASH_FILTER(cap) ((cap & 0x00000010) >> 4)
+#define SXGBE_HW_FEAT_SMA(cap) ((cap & 0x00000020) >> 5)
+#define SXGBE_HW_FEAT_PMT_TEMOTE_WOP(cap) ((cap & 0x00000040) >> 6)
+#define SXGBE_HW_FEAT_PMT_MAGIC_PKT(cap) ((cap & 0x00000080) >> 7)
+#define SXGBE_HW_FEAT_RMON(cap) ((cap & 0x00000100) >> 8)
+#define SXGBE_HW_FEAT_ARP_OFFLOAD(cap) ((cap & 0x00000200) >> 9)
+#define SXGBE_HW_FEAT_IEEE1500_2008(cap) ((cap & 0x00001000) >> 12)
+#define SXGBE_HW_FEAT_EEE(cap) ((cap & 0x00002000) >> 13)
+#define SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(cap) ((cap & 0x00004000) >> 14)
+#define SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(cap) ((cap & 0x00010000) >> 16)
+#define SXGBE_HW_FEAT_MACADDR_COUNT(cap) ((cap & 0x007C0000) >> 18)
+#define SXGBE_HW_FEAT_TSTMAP_SRC(cap) ((cap & 0x06000000) >> 25)
+#define SXGBE_HW_FEAT_SRCADDR_VLAN(cap) ((cap & 0x08000000) >> 27)
+
+/* Capability Register 1 */
+#define SXGBE_HW_FEAT_RX_FIFO_SIZE(cap) ((cap & 0x0000001F))
+#define SXGBE_HW_FEAT_TX_FIFO_SIZE(cap) ((cap & 0x000007C0) >> 6)
+#define SXGBE_HW_FEAT_IEEE1588_HWORD(cap) ((cap & 0x00002000) >> 13)
+#define SXGBE_HW_FEAT_DCB(cap) ((cap & 0x00010000) >> 16)
+#define SXGBE_HW_FEAT_SPLIT_HDR(cap) ((cap & 0x00020000) >> 17)
+#define SXGBE_HW_FEAT_TSO(cap) ((cap & 0x00040000) >> 18)
+#define SXGBE_HW_FEAT_DEBUG_MEM_IFACE(cap) ((cap & 0x00080000) >> 19)
+#define SXGBE_HW_FEAT_RSS(cap) ((cap & 0x00100000) >> 20)
+#define SXGBE_HW_FEAT_HASH_TABLE_SIZE(cap) ((cap & 0x03000000) >> 24)
+#define SXGBE_HW_FEAT_L3L4_FILTER_NUM(cap) ((cap & 0x78000000) >> 27)
+
+/* Capability Register 2 */
+#define SXGBE_HW_FEAT_RX_MTL_QUEUES(cap) ((cap & 0x0000000F))
+#define SXGBE_HW_FEAT_TX_MTL_QUEUES(cap) ((cap & 0x000003C0) >> 6)
+#define SXGBE_HW_FEAT_RX_DMA_CHANNELS(cap) ((cap & 0x0000F000) >> 12)
+#define SXGBE_HW_FEAT_TX_DMA_CHANNELS(cap) ((cap & 0x003C0000) >> 18)
+#define SXGBE_HW_FEAT_PPS_OUTPUTS(cap) ((cap & 0x07000000) >> 24)
+#define SXGBE_HW_FEAT_AUX_SNAPSHOTS(cap) ((cap & 0x70000000) >> 28)
+
+/* DMAchannel interrupt enable specific */
+/* DMA Normal interrupt */
+#define SXGBE_DMA_INT_ENA_NIE BIT(16) /* Normal Summary */
+#define SXGBE_DMA_INT_ENA_TIE BIT(0) /* Transmit Interrupt */
+#define SXGBE_DMA_INT_ENA_TUE BIT(2) /* Transmit Buffer Unavailable */
+#define SXGBE_DMA_INT_ENA_RIE BIT(6) /* Receive Interrupt */
+
+#define SXGBE_DMA_INT_NORMAL \
+ (SXGBE_DMA_INT_ENA_NIE | SXGBE_DMA_INT_ENA_RIE | \
+ SXGBE_DMA_INT_ENA_TIE | SXGBE_DMA_INT_ENA_TUE)
+
+/* DMA Abnormal interrupt */
+#define SXGBE_DMA_INT_ENA_AIE BIT(15) /* Abnormal Summary */
+#define SXGBE_DMA_INT_ENA_TSE BIT(1) /* Transmit Stopped */
+#define SXGBE_DMA_INT_ENA_RUE BIT(7) /* Receive Buffer Unavailable */
+#define SXGBE_DMA_INT_ENA_RSE BIT(8) /* Receive Stopped */
+#define SXGBE_DMA_INT_ENA_FBE BIT(12) /* Fatal Bus Error */
+#define SXGBE_DMA_INT_ENA_CDEE BIT(13) /* Context Descriptor Error */
+
+#define SXGBE_DMA_INT_ABNORMAL \
+ (SXGBE_DMA_INT_ENA_AIE | SXGBE_DMA_INT_ENA_TSE | \
+ SXGBE_DMA_INT_ENA_RUE | SXGBE_DMA_INT_ENA_RSE | \
+ SXGBE_DMA_INT_ENA_FBE | SXGBE_DMA_INT_ENA_CDEE)
+
+#define SXGBE_DMA_ENA_INT (SXGBE_DMA_INT_NORMAL | SXGBE_DMA_INT_ABNORMAL)
+
+/* DMA channel interrupt status specific */
+#define SXGBE_DMA_INT_STATUS_REB2 BIT(21)
+#define SXGBE_DMA_INT_STATUS_REB1 BIT(20)
+#define SXGBE_DMA_INT_STATUS_REB0 BIT(19)
+#define SXGBE_DMA_INT_STATUS_TEB2 BIT(18)
+#define SXGBE_DMA_INT_STATUS_TEB1 BIT(17)
+#define SXGBE_DMA_INT_STATUS_TEB0 BIT(16)
+#define SXGBE_DMA_INT_STATUS_NIS BIT(15)
+#define SXGBE_DMA_INT_STATUS_AIS BIT(14)
+#define SXGBE_DMA_INT_STATUS_CTXTERR BIT(13)
+#define SXGBE_DMA_INT_STATUS_FBE BIT(12)
+#define SXGBE_DMA_INT_STATUS_RPS BIT(8)
+#define SXGBE_DMA_INT_STATUS_RBU BIT(7)
+#define SXGBE_DMA_INT_STATUS_RI BIT(6)
+#define SXGBE_DMA_INT_STATUS_TBU BIT(2)
+#define SXGBE_DMA_INT_STATUS_TPS BIT(1)
+#define SXGBE_DMA_INT_STATUS_TI BIT(0)
+
+#endif /* __SXGBE_REGMAP_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
new file mode 100644
index 000000000000..51c32194ba88
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
@@ -0,0 +1,91 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include "sxgbe_common.h"
+#include "sxgbe_xpcs.h"
+
+static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg)
+{
+ u32 value;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+ value = readl(priv->ioaddr + XPCS_OFFSET + reg);
+
+ return value;
+}
+
+static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+ writel(data, priv->ioaddr + XPCS_OFFSET + reg);
+
+ return 0;
+}
+
+int sxgbe_xpcs_init(struct net_device *ndev)
+{
+ u32 value;
+
+ value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+ /* 10G XAUI mode */
+ sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
+ sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
+ sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13));
+ sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
+
+ do {
+ value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
+ } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE);
+
+ value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+ sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
+
+ do {
+ value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
+ } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
+
+ return 0;
+}
+
+int sxgbe_xpcs_init_1G(struct net_device *ndev)
+{
+ int value;
+
+ /* 10GBASE-X PCS (1G) mode */
+ sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
+ sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
+ value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+ sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13));
+
+ value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
+ sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6));
+ sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13));
+ value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+ sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
+
+ do {
+ value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
+ } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
+
+ value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+ sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
+
+ /* Auto Negotiation cluase 37 enable */
+ value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
+ sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
new file mode 100644
index 000000000000..6b26a50724d3
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
@@ -0,0 +1,38 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Byungho An <bh74.an@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_XPCS_H__
+#define __SXGBE_XPCS_H__
+
+/* XPCS Registers */
+#define XPCS_OFFSET 0x1A060000
+#define SR_PCS_MMD_CONTROL1 0x030000
+#define SR_PCS_CONTROL2 0x030007
+#define VR_PCS_MMD_XAUI_MODE_CONTROL 0x038004
+#define VR_PCS_MMD_DIGITAL_STATUS 0x038010
+#define SR_MII_MMD_CONTROL 0x1F0000
+#define SR_MII_MMD_AN_ADV 0x1F0004
+#define SR_MII_MMD_AN_LINK_PARTNER_BA 0x1F0005
+#define VR_MII_MMD_AN_CONTROL 0x1F8001
+#define VR_MII_MMD_AN_INT_STATUS 0x1F8002
+
+#define XPCS_QSEQ_STATE_STABLE 0x10
+#define XPCS_QSEQ_STATE_MPLLOFF 0x1c
+#define XPCS_TYPE_SEL_R 0x00
+#define XPCS_TYPE_SEL_X 0x01
+#define XPCS_TYPE_SEL_W 0x02
+#define XPCS_XAUI_MODE 0x00
+#define XPCS_RXAUI_MODE 0x01
+
+int sxgbe_xpcs_init(struct net_device *ndev);
+int sxgbe_xpcs_init_1G(struct net_device *ndev);
+
+#endif /* __SXGBE_XPCS_H__ */
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 174a92f5fe51..21c20ea0dad0 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -162,8 +162,8 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
return -EIO;
- memcpy(mac_address,
- MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
+ ether_addr_copy(mac_address,
+ MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
return 0;
}
@@ -172,8 +172,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data;
int i, rc;
- /* We can have one VI for each 8K region. However we need
- * multiple TX queues per channel.
+ /* We can have one VI for each 8K region. However, until we
+ * use TX option descriptors we need two TX queues per channel.
*/
efx->max_channels =
min_t(unsigned int,
@@ -565,10 +565,17 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
* several of each (in fact that's the only option if host
* page size is >4K). So we may allocate some extra VIs just
* for writing PIO buffers through.
+ *
+ * The UC mapping contains (min_vis - 1) complete VIs and the
+ * first half of the next VI. Then the WC mapping begins with
+ * the second half of this last VI.
*/
uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
ER_DZ_TX_PIOBUF);
if (nic_data->n_piobufs) {
+ /* pio_write_vi_base rounds down to give the number of complete
+ * VIs inside the UC mapping.
+ */
pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
nic_data->n_piobufs) *
@@ -1955,6 +1962,9 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
int tx_descs = 0;
int spent = 0;
+ if (quota <= 0)
+ return spent;
+
read_ptr = channel->eventq_read_ptr;
for (;;) {
@@ -3145,12 +3155,10 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
table->dev_uc_count = -1;
} else {
table->dev_uc_count = 1 + netdev_uc_count(net_dev);
- memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr,
- ETH_ALEN);
+ ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
i = 1;
netdev_for_each_uc_addr(uc, net_dev) {
- memcpy(table->dev_uc_list[i].addr,
- uc->addr, ETH_ALEN);
+ ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
i++;
}
}
@@ -3162,8 +3170,7 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
eth_broadcast_addr(table->dev_mc_list[0].addr);
i = 1;
netdev_for_each_mc_addr(mc, net_dev) {
- memcpy(table->dev_mc_list[i].addr,
- mc->addr, ETH_ALEN);
+ ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
i++;
}
}
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index 207ac9a1e3de..62a55dde61d5 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -227,36 +227,6 @@
#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
-/* RX_USER_DESC */
-#define ESF_DZ_RX_USR_RESERVED_LBN 62
-#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
-#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
-#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
-#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
-#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
-#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
-#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
-#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
-#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
-#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
-#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
-#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
-#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
-#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
-#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
-#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
-#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
-#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
-#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
-#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
-
/* TX_CSUM_TSTAMP_DESC */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
@@ -338,37 +308,6 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-/* TX_USER_DESC */
-#define ESF_DZ_TX_USR_TYPE_LBN 63
-#define ESF_DZ_TX_USR_TYPE_WIDTH 1
-#define ESF_DZ_TX_USR_CONT_LBN 62
-#define ESF_DZ_TX_USR_CONT_WIDTH 1
-#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
-#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
-#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
-#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
-#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
-#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
-#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
-#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
-#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
-#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
-#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
-#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
-#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
-#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
-#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
-#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
-#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
-#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
-#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
/*************************************************************************/
/* TX_DESC_UPD_REG: Transmit descriptor update register.
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 83d464347021..57b971e5e6b2 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -503,8 +503,6 @@ static int efx_probe_channel(struct efx_channel *channel)
goto fail;
}
- channel->n_rx_frm_trunc = 0;
-
return 0;
fail:
@@ -1014,7 +1012,7 @@ static int efx_probe_port(struct efx_nic *efx)
return rc;
/* Initialise MAC address to permanent address */
- memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
+ ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
return 0;
}
@@ -1346,20 +1344,23 @@ static int efx_probe_interrupts(struct efx_nic *efx)
for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
- rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
- if (rc > 0) {
+ rc = pci_enable_msix_range(efx->pci_dev,
+ xentries, 1, n_channels);
+ if (rc < 0) {
+ /* Fall back to single channel MSI */
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ } else if (rc < n_channels) {
netif_err(efx, drv, efx->net_dev,
"WARNING: Insufficient MSI-X vectors"
" available (%d < %u).\n", rc, n_channels);
netif_err(efx, drv, efx->net_dev,
"WARNING: Performance may be reduced.\n");
- EFX_BUG_ON_PARANOID(rc >= n_channels);
n_channels = rc;
- rc = pci_enable_msix(efx->pci_dev, xentries,
- n_channels);
}
- if (rc == 0) {
+ if (rc > 0) {
efx->n_channels = n_channels;
if (n_channels > extra_channels)
n_channels -= extra_channels;
@@ -1375,11 +1376,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
for (i = 0; i < efx->n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
- } else {
- /* Fall back to single channel MSI */
- efx->interrupt_mode = EFX_INT_MODE_MSI;
- netif_err(efx, drv, efx->net_dev,
- "could not enable MSI-X\n");
}
}
@@ -1603,6 +1599,8 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
goto fail1;
+ efx_set_channels(efx);
+
rc = efx->type->dimension_resources(efx);
if (rc)
goto fail2;
@@ -1613,7 +1611,6 @@ static int efx_probe_nic(struct efx_nic *efx)
efx->rx_indir_table[i] =
ethtool_rxfh_indir_default(i, efx->rss_spread);
- efx_set_channels(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
@@ -2115,7 +2112,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
- char *new_addr = addr->sa_data;
+ u8 *new_addr = addr->sa_data;
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
@@ -2124,7 +2121,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
return -EADDRNOTAVAIL;
}
- memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
+ ether_addr_copy(net_dev->dev_addr, new_addr);
efx_sriov_mac_address_changed(efx);
/* Reconfigure the MAC */
@@ -3273,6 +3270,6 @@ module_exit(efx_exit_module);
MODULE_AUTHOR("Solarflare Communications and "
"Michael Brown <mbrown@fensystems.co.uk>");
-MODULE_DESCRIPTION("Solarflare Communications network driver");
+MODULE_DESCRIPTION("Solarflare network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index dbd7b78fe01c..99032581336f 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,7 +14,7 @@
#include "net_driver.h"
#include "filter.h"
-/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
#define EFX_MEM_BAR 2
/* TX */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 229428915aa8..0de8b07c24c2 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -251,6 +251,9 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
* @test_index: Starting index of the test
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
+ *
+ * Fill in a block of loopback self-test entries. Return new test
+ * index.
*/
static int efx_fill_loopback_test(struct efx_nic *efx,
struct efx_loopback_self_tests *lb_tests,
@@ -290,6 +293,12 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
* @tests: Efx self-test results structure, or %NULL
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
+ *
+ * Get self-test number of strings, strings, and/or test results.
+ * Return number of strings (== number of test results).
+ *
+ * The reason for merging these three functions is to make sure that
+ * they can never be inconsistent.
*/
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
@@ -444,7 +453,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_self_tests *efx_tests;
- int already_up;
+ bool already_up;
int rc = -ENOMEM;
efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
@@ -452,8 +461,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
goto fail;
if (efx->state != STATE_READY) {
- rc = -EIO;
- goto fail1;
+ rc = -EBUSY;
+ goto out;
}
netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
@@ -466,7 +475,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
- goto fail1;
+ goto out;
}
}
@@ -479,8 +488,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
rc == 0 ? "passed" : "failed",
(test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
-fail1:
- /* Fill ethtool results structures */
+out:
efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
kfree(efx_tests);
fail:
@@ -691,7 +699,6 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
}
-
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
@@ -720,7 +727,7 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
}
/* MAC address mask including only I/G bit */
-static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
+static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
#define PORT_FULL_MASK ((__force __be16)~0)
@@ -780,16 +787,16 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
rule->flow_type = ETHER_FLOW;
if (spec.match_flags &
(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
- memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
+ ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
- memset(mac_mask->h_dest, ~0, ETH_ALEN);
+ eth_broadcast_addr(mac_mask->h_dest);
else
- memcpy(mac_mask->h_dest, mac_addr_ig_mask,
- ETH_ALEN);
+ ether_addr_copy(mac_mask->h_dest,
+ mac_addr_ig_mask);
}
if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
- memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
- memset(mac_mask->h_source, ~0, ETH_ALEN);
+ ether_addr_copy(mac_entry->h_source, spec.rem_mac);
+ eth_broadcast_addr(mac_mask->h_source);
}
if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
mac_entry->h_proto = spec.ether_type;
@@ -961,13 +968,13 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
else
return -EINVAL;
- memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
+ ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
}
if (!is_zero_ether_addr(mac_mask->h_source)) {
if (!is_broadcast_ether_addr(mac_mask->h_source))
return -EINVAL;
spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
- memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
+ ether_addr_copy(spec.rem_mac, mac_entry->h_source);
}
if (mac_mask->h_proto) {
if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 18d6f761f4d0..8ec20b713cc6 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -422,7 +422,6 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
}
-
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
@@ -467,6 +466,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
efx_schedule_channel_irq(efx_get_channel(efx, 1));
return IRQ_HANDLED;
}
+
/**************************************************************************
*
* RSS
@@ -1358,6 +1358,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
case 100: link_speed = 1; break;
default: link_speed = 0; break;
}
+
/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
* as advertised. Disable to ensure packets are not
* indefinitely held and TX queue can be flushed at any point
@@ -2182,7 +2183,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
}
/* Read the MAC addresses */
- memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
+ ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]);
netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
efx->phy_type, efx->mdio.prtad);
@@ -2868,4 +2869,3 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mcdi_max_ver = -1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
-
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index f72489a105ca..a08761360cdf 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -311,7 +311,6 @@ static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
*/
void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
{
-
struct efx_tx_buffer *buffer;
efx_qword_t *txd;
unsigned write_ptr;
@@ -1249,6 +1248,9 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
int tx_packets = 0;
int spent = 0;
+ if (budget <= 0)
+ return spent;
+
read_ptr = channel->eventq_read_ptr;
for (;;) {
@@ -1609,7 +1611,6 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-
/* Setup RSS indirection table.
* This maps from the hash value of the packet to RXQ
*/
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 3ef298d3c47e..d0ed7f71ea7e 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -243,7 +243,7 @@ static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
}
if (addr != NULL) {
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
- memcpy(spec->loc_mac, addr, ETH_ALEN);
+ ether_addr_copy(spec->loc_mac, addr);
}
return 0;
}
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index eb59abb57e85..7bd4b14bf3b3 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1187,6 +1187,9 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
int rc;
BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
+ /* we need __aligned(2) for ether_addr_copy */
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
@@ -1199,11 +1202,10 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
}
if (mac_address)
- memcpy(mac_address,
- port_num ?
- MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
- MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
- ETH_ALEN);
+ ether_addr_copy(mac_address,
+ port_num ?
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
if (fw_subtype_list) {
for (i = 0;
i < MCDI_VAR_ARRAY_LEN(outlen,
@@ -1532,7 +1534,7 @@ static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
MC_CMD_FILTER_MODE_SIMPLE);
- memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
+ ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 91d23252f8fa..e5fc4e1574b5 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -854,8 +854,8 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
- memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
- efx->net_dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+ efx->net_dev->dev_addr);
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index af2b8c59a903..8a400a0595eb 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1323,7 +1323,6 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
return &rx_queue->buffer[index];
}
-
/**
* EFX_MAX_FRAME_LEN - calculate maximum frame length
*
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 79226b19e3c4..32d969e857f7 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -530,4 +530,3 @@ void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
}
-
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index d7a36829649a..6b861e3de4b0 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -223,7 +223,6 @@ struct efx_ptp_timeset {
* @evt_list: List of MC receive events awaiting packets
* @evt_free_list: List of free events
* @evt_lock: Lock for manipulating evt_list and evt_free_list
- * @evt_overflow: Boolean indicating that event list has overflowed
* @rx_evts: Instantiated events (on evt_list and evt_free_list)
* @workwq: Work queue for processing pending PTP operations
* @work: Work task
@@ -275,7 +274,6 @@ struct efx_ptp_data {
struct list_head evt_list;
struct list_head evt_free_list;
spinlock_t evt_lock;
- bool evt_overflow;
struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
struct workqueue_struct *workwq;
struct work_struct work;
@@ -768,37 +766,36 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
return -EAGAIN;
}
- /* Convert the NIC time into kernel time. No correction is required-
- * this time is the output of a firmware process.
- */
- mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
- ptp->timeset[last_good].minor, 0);
-
- /* Calculate delay from actual PPS to last_time */
- delta = ktime_to_timespec(mc_time);
- delta.tv_nsec +=
- last_time->ts_real.tv_nsec -
- (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
-
- /* It is possible that the seconds rolled over between taking
+ /* Calculate delay from last good sync (host time) to last_time.
+ * It is possible that the seconds rolled over between taking
* the start reading and the last value written by the host. The
* timescales are such that a gap of more than one second is never
- * expected.
+ * expected. delta is *not* normalised.
*/
start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
- if (start_sec != last_sec) {
- if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
- netif_warn(efx, hw, efx->net_dev,
- "PTP bad synchronisation seconds\n");
- return -EAGAIN;
- } else {
- delta.tv_sec = 1;
- }
- } else {
- delta.tv_sec = 0;
+ if (start_sec != last_sec &&
+ ((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
+ netif_warn(efx, hw, efx->net_dev,
+ "PTP bad synchronisation seconds\n");
+ return -EAGAIN;
}
+ delta.tv_sec = (last_sec - start_sec) & 1;
+ delta.tv_nsec =
+ last_time->ts_real.tv_nsec -
+ (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
+
+ /* Convert the NIC time at last good sync into kernel time.
+ * No correction is required - this time is the output of a
+ * firmware process.
+ */
+ mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+ ptp->timeset[last_good].minor, 0);
+
+ /* Calculate delay from NIC top of second to last_time */
+ delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec;
+ /* Set PPS timestamp to match NIC top of second */
ptp->host_time_pps = *last_time;
pps_sub_ts(&ptp->host_time_pps, delta);
@@ -941,11 +938,6 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
}
}
}
- /* If the event overflow flag is set and the event list is now empty
- * clear the flag to re-enable the overflow warning message.
- */
- if (ptp->evt_overflow && list_empty(&ptp->evt_list))
- ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
}
@@ -989,11 +981,6 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
break;
}
}
- /* If the event overflow flag is set and the event list is now empty
- * clear the flag to re-enable the overflow warning message.
- */
- if (ptp->evt_overflow && list_empty(&ptp->evt_list))
- ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
return rc;
@@ -1147,7 +1134,6 @@ static int efx_ptp_stop(struct efx_nic *efx)
list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
list_move(cursor, &efx->ptp_data->evt_free_list);
}
- ptp->evt_overflow = false;
spin_unlock_bh(&efx->ptp_data->evt_lock);
return rc;
@@ -1208,6 +1194,7 @@ static const struct ptp_clock_info efx_phc_clock_info = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 1,
.adjfreq = efx_phc_adjfreq,
.adjtime = efx_phc_adjtime,
@@ -1253,7 +1240,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
spin_lock_init(&ptp->evt_lock);
for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
- ptp->evt_overflow = false;
/* Get the NIC PTP attributes and set up time conversions */
rc = efx_ptp_get_attributes(efx);
@@ -1380,6 +1366,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
u8 *match_data_012, *match_data_345;
unsigned int version;
+ u8 *data;
match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
@@ -1388,7 +1375,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
return false;
}
- version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
+ data = skb->data;
+ version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]);
if (version != PTP_VERSION_V1) {
return false;
}
@@ -1396,13 +1384,14 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
/* PTP V1 uses all six bytes of the UUID to match the packet
* to the timestamp
*/
- match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
- match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
+ match_data_012 = data + PTP_V1_UUID_OFFSET;
+ match_data_345 = data + PTP_V1_UUID_OFFSET + 3;
} else {
if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
return false;
}
- version = skb->data[PTP_V2_VERSION_OFFSET];
+ data = skb->data;
+ version = data[PTP_V2_VERSION_OFFSET];
if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
return false;
}
@@ -1414,17 +1403,17 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
* enhanced mode fixes this issue and uses bytes 0-2
* and byte 5-7 of the UUID.
*/
- match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5;
+ match_data_345 = data + PTP_V2_UUID_OFFSET + 5;
if (ptp->mode == MC_CMD_PTP_MODE_V2) {
- match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2;
+ match_data_012 = data + PTP_V2_UUID_OFFSET + 2;
} else {
- match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0;
+ match_data_012 = data + PTP_V2_UUID_OFFSET + 0;
BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
}
}
/* Does this packet require timestamping? */
- if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
+ if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
match->state = PTP_PACKET_STATE_UNMATCHED;
/* We expect the sequence number to be in the same position in
@@ -1440,8 +1429,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
(match_data_345[0] << 24));
match->words[1] = (match_data_345[1] |
(match_data_345[2] << 8) |
- (skb->data[PTP_V1_SEQUENCE_OFFSET +
- PTP_V1_SEQUENCE_LENGTH - 1] <<
+ (data[PTP_V1_SEQUENCE_OFFSET +
+ PTP_V1_SEQUENCE_LENGTH - 1] <<
16));
} else {
match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
@@ -1635,13 +1624,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
list_add_tail(&evt->link, &ptp->evt_list);
queue_work(ptp->workwq, &ptp->work);
- } else if (!ptp->evt_overflow) {
- /* Log a warning message and set the event overflow flag.
- * The message won't be logged again until the event queue
- * becomes empty.
- */
+ } else if (net_ratelimit()) {
+ /* Log a rate-limited warning message. */
netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
- ptp->evt_overflow = true;
}
spin_unlock_bh(&ptp->evt_lock);
}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 26641817a9c7..0fc5baef45b1 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -50,7 +50,7 @@ struct efx_loopback_payload {
} __packed;
/* Loopback test source MAC address */
-static const unsigned char payload_source[ETH_ALEN] = {
+static const u8 payload_source[ETH_ALEN] __aligned(2) = {
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
};
@@ -366,8 +366,8 @@ static void efx_iterate_state(struct efx_nic *efx)
struct efx_loopback_payload *payload = &state->payload;
/* Initialise the layerII header */
- memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
- memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
+ ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
+ ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
payload->header.h_proto = htons(ETH_P_IP);
/* saddr set later and used as incrementing count */
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 0c38f926871e..9a9205e77896 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1095,7 +1095,7 @@ static void efx_sriov_peer_work(struct work_struct *data)
/* Fill the remaining addresses */
list_for_each_entry(local_addr, &efx->local_addr_list, link) {
- memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN);
+ ether_addr_copy(peer->mac_addr, local_addr->addr);
peer->tci = 0;
++peer;
++peer_count;
@@ -1303,8 +1303,7 @@ int efx_sriov_init(struct efx_nic *efx)
goto fail_vfs;
rtnl_lock();
- memcpy(vfdi_status->peers[0].mac_addr,
- net_dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr);
efx->vf_init_count = efx->vf_count;
rtnl_unlock();
@@ -1452,8 +1451,8 @@ void efx_sriov_mac_address_changed(struct efx_nic *efx)
if (!efx->vf_init_count)
return;
- memcpy(vfdi_status->peers[0].mac_addr,
- efx->net_dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(vfdi_status->peers[0].mac_addr,
+ efx->net_dev->dev_addr);
queue_work(vfdi_workqueue, &efx->peer_work);
}
@@ -1570,7 +1569,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
vf = efx->vf + vf_i;
mutex_lock(&vf->status_lock);
- memcpy(vf->addr.mac_addr, mac, ETH_ALEN);
+ ether_addr_copy(vf->addr.mac_addr, mac);
__efx_sriov_update_vf_addr(vf);
mutex_unlock(&vf->status_lock);
@@ -1633,7 +1632,7 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
vf = efx->vf + vf_i;
ivi->vf = vf_i;
- memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN);
+ ether_addr_copy(ivi->mac, vf->addr.mac_addr);
ivi->tx_rate = 0;
tci = ntohs(vf->addr.tci);
ivi->vlan = tci & VLAN_VID_MASK;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 75d11fa4eb0a..fa9475300411 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -787,15 +787,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
* Requires TX checksum offload support.
*/
-/* Number of bytes inserted at the start of a TSO header buffer,
- * similar to NET_IP_ALIGN.
- */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#define TSOH_OFFSET 0
-#else
-#define TSOH_OFFSET NET_IP_ALIGN
-#endif
-
#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
/**
@@ -882,13 +873,13 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(buffer->flags);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
- if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
+ if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
unsigned index =
(tx_queue->insert_count & tx_queue->ptr_mask) / 2;
struct efx_buffer *page_buf =
&tx_queue->tsoh_page[index / TSOH_PER_PAGE];
unsigned offset =
- TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
+ TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
if (unlikely(!page_buf->addr) &&
efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
@@ -901,10 +892,10 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
} else {
tx_queue->tso_long_headers++;
- buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
+ buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
if (unlikely(!buffer->heap_buf))
return NULL;
- result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
+ result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
}
@@ -1011,7 +1002,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
static int tso_start(struct tso_state *st, struct efx_nic *efx,
const struct sk_buff *skb)
{
- bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+ bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
struct device *dma_dev = &efx->pci_dev->dev;
unsigned int header_len, in_len;
dma_addr_t dma_addr;
@@ -1037,7 +1028,7 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx,
st->out_len = skb->len - header_len;
- if (!use_options) {
+ if (!use_opt_desc) {
st->header_unmap_len = 0;
if (likely(in_len == 0)) {
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 5eb933c97bba..7daa7d433099 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -987,7 +987,7 @@ out_unlock:
spin_unlock(&priv->lock);
out:
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index ff57a46388ee..6072f093e6b4 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1614,7 +1614,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
skb->data, skb->len, PCI_DMA_TODEVICE);
if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
sis_priv->tx_ring[entry].bufptr))) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
sis_priv->tx_skbuff[entry] = NULL;
net_dev->stats.tx_dropped++;
spin_unlock_irqrestore(&sis_priv->lock, flags);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index c50fb08c9905..1c44e67c3067 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -551,7 +551,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
spin_unlock_irqrestore(&lp->lock, flags);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1211,7 +1211,6 @@ static void
smc911x_rx_dma_irq(int dma, void *data)
{
struct net_device *dev = (struct net_device *)data;
- unsigned long ioaddr = dev->base_addr;
struct smc911x_local *lp = netdev_priv(dev);
struct sk_buff *skb = lp->current_rx_skb;
unsigned long flags;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 839c0e6cca01..d1b4dca53a9d 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -621,7 +621,7 @@ static void smc_hardware_send_pkt(unsigned long data)
done: if (!THROTTLE_TX_PKTS)
netif_wake_queue(dev);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
}
/*
@@ -657,7 +657,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_warn(dev, "Far too big packet error.\n");
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 6382b7c416f4..a0fc151da40d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -439,7 +439,8 @@ static int smsc911x_request_resources(struct platform_device *pdev)
/* Request clock */
pdata->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk))
- netdev_warn(ndev, "couldn't get clock %li\n", PTR_ERR(pdata->clk));
+ dev_dbg(&pdev->dev, "couldn't get clock %li\n",
+ PTR_ERR(pdata->clk));
return ret;
}
@@ -1672,7 +1673,7 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
freespace -= (skb->len + 32);
skb_tx_timestamp(skb);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
smsc911x_tx_update_txcounters(dev);
@@ -2379,8 +2380,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
int res_size, irq_flags;
int retval;
- pr_info("Driver version %s\n", SMSC_DRV_VERSION);
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smsc911x-memory");
if (!res)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index f2d7c702c77f..2d09c116cbc8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -26,6 +26,16 @@ config STMMAC_PLATFORM
If unsure, say N.
+config DWMAC_SOCFPGA
+ bool "SOCFPGA dwmac support"
+ depends on STMMAC_PLATFORM && MFD_SYSCON && (ARCH_SOCFPGA || COMPILE_TEST)
+ help
+ Support for ethernet controller on Altera SOCFPGA
+
+ This selects the Altera SOCFPGA SoC glue layer support
+ for the stmmac device driver. This driver is used for
+ arria5 and cyclone5 FPGA SoCs.
+
config DWMAC_SUNXI
bool "Allwinner GMAC support"
depends on STMMAC_PLATFORM && ARCH_SUNXI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index dcef28775dad..18695ebef7e4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -3,6 +3,7 @@ stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
+stmmac-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
new file mode 100644
index 000000000000..fd8a217556a1
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -0,0 +1,130 @@
+/* Copyright Altera Corporation (C) 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Adopted from dwmac-sti.c
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
+
+struct socfpga_dwmac {
+ int interface;
+ u32 reg_offset;
+ u32 reg_shift;
+ struct device *dev;
+ struct regmap *sys_mgr_base_addr;
+};
+
+static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *sys_mgr_base_addr;
+ u32 reg_offset, reg_shift;
+ int ret;
+
+ dwmac->interface = of_get_phy_mode(np);
+
+ sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
+ if (IS_ERR(sys_mgr_base_addr)) {
+ dev_info(dev, "No sysmgr-syscon node found\n");
+ return PTR_ERR(sys_mgr_base_addr);
+ }
+
+ ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 1, &reg_offset);
+ if (ret) {
+ dev_info(dev, "Could not read reg_offset from sysmgr-syscon!\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 2, &reg_shift);
+ if (ret) {
+ dev_info(dev, "Could not read reg_shift from sysmgr-syscon!\n");
+ return -EINVAL;
+ }
+
+ dwmac->reg_offset = reg_offset;
+ dwmac->reg_shift = reg_shift;
+ dwmac->sys_mgr_base_addr = sys_mgr_base_addr;
+ dwmac->dev = dev;
+
+ return 0;
+}
+
+static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
+{
+ struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
+ int phymode = dwmac->interface;
+ u32 reg_offset = dwmac->reg_offset;
+ u32 reg_shift = dwmac->reg_shift;
+ u32 ctrl, val;
+
+ switch (phymode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+ break;
+ default:
+ dev_err(dwmac->dev, "bad phy mode %d\n", phymode);
+ return -EINVAL;
+ }
+
+ regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
+ ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
+ ctrl |= val << reg_shift;
+
+ regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
+ return 0;
+}
+
+static void *socfpga_dwmac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+ struct socfpga_dwmac *dwmac;
+
+ dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return ERR_PTR(-ENOMEM);
+
+ ret = socfpga_dwmac_parse_data(dwmac, dev);
+ if (ret) {
+ dev_err(dev, "Unable to parse OF data\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = socfpga_dwmac_setup(dwmac);
+ if (ret) {
+ dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ return dwmac;
+}
+
+const struct stmmac_of_data socfpga_gmac_data = {
+ .setup = socfpga_dwmac_probe,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f9e60d7918c4..ca01035634a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -136,6 +136,9 @@ extern const struct stmmac_of_data sun7i_gmac_data;
#ifdef CONFIG_DWMAC_STI
extern const struct stmmac_of_data sti_gmac_data;
#endif
+#ifdef CONFIG_DWMAC_SOCFPGA
+extern const struct stmmac_of_data socfpga_gmac_data;
+#endif
extern struct platform_driver stmmac_pltfr_driver;
static inline int stmmac_register_platform(void)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8543e1cfd55e..d940034acdd4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1303,7 +1303,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->hw->mode->clean_desc3(priv, p);
if (likely(skb != NULL)) {
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
priv->tx_skbuff[entry] = NULL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 8fb32a80f1c1..46aef5108bea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -38,6 +38,9 @@ static const struct of_device_id stmmac_dt_ids[] = {
{ .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
{ .compatible = "st,stid127-dwmac", .data = &sti_gmac_data},
#endif
+#ifdef CONFIG_DWMAC_SOCFPGA
+ { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+#endif
/* SoC specific glue layers should come before generic bindings */
{ .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.610"},
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 7680581ebe12..b7ad3565566c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -164,6 +164,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = stmmac_adjust_freq,
.adjtime = stmmac_adjust_time,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8e2266e1f260..79606f47a08e 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9041,7 +9041,7 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
struct msix_entry msi_vec[NIU_NUM_LDG];
struct niu_parent *parent = np->parent;
struct pci_dev *pdev = np->pdev;
- int i, num_irqs, err;
+ int i, num_irqs;
u8 first_ldg;
first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
@@ -9053,21 +9053,16 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
(np->port == 0 ? 3 : 1));
BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
-retry:
for (i = 0; i < num_irqs; i++) {
msi_vec[i].vector = 0;
msi_vec[i].entry = i;
}
- err = pci_enable_msix(pdev, msi_vec, num_irqs);
- if (err < 0) {
+ num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
+ if (num_irqs < 0) {
np->flags &= ~NIU_FLAGS_MSIX;
return;
}
- if (err > 0) {
- num_irqs = err;
- goto retry;
- }
np->flags |= NIU_FLAGS_MSIX;
for (i = 0; i < num_irqs; i++)
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index c2799dc46325..102a66fc54a2 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -688,7 +688,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
}
dev->stats.tx_packets++;
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
}
gp->tx_old = entry;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d6d8ec676c8..5d5fec6c4eb0 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -378,7 +378,6 @@ struct cpsw_priv {
u32 version;
u32 coal_intvl;
u32 bus_freq_mhz;
- struct net_device_stats stats;
int rx_packet_max;
int host_port;
struct clk *clk;
@@ -673,8 +672,8 @@ static void cpsw_tx_handler(void *token, int len, int status)
if (unlikely(netif_queue_stopped(ndev)))
netif_wake_queue(ndev);
cpts_tx_timestamp(priv->cpts, skb);
- priv->stats.tx_packets++;
- priv->stats.tx_bytes += len;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
}
@@ -700,10 +699,10 @@ static void cpsw_rx_handler(void *token, int len, int status)
cpts_rx_timestamp(priv->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
- priv->stats.rx_bytes += len;
- priv->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
} else {
- priv->stats.rx_dropped++;
+ ndev->stats.rx_dropped++;
new_skb = skb;
}
@@ -1313,7 +1312,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
cpsw_err(priv, tx_err, "packet pad failed\n");
- priv->stats.tx_dropped++;
+ ndev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -1337,7 +1336,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
fail:
- priv->stats.tx_dropped++;
+ ndev->stats.tx_dropped++;
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
@@ -1477,7 +1476,6 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
struct cpsw_priv *priv = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(req);
int slave_no = cpsw_slave_index(priv);
if (!netif_running(dev))
@@ -1490,14 +1488,11 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
case SIOCGHWTSTAMP:
return cpsw_hwtstamp_get(dev, req);
#endif
- case SIOCGMIIPHY:
- data->phy_id = priv->slaves[slave_no].phy->addr;
- break;
- default:
- return -ENOTSUPP;
}
- return 0;
+ if (!priv->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd);
}
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
@@ -1505,7 +1500,7 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
struct cpsw_priv *priv = netdev_priv(ndev);
cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
- priv->stats.tx_errors++;
+ ndev->stats.tx_errors++;
cpsw_intr_disable(priv);
cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_chan_stop(priv->txch);
@@ -1544,12 +1539,6 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- return &priv->stats;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cpsw_ndo_poll_controller(struct net_device *ndev)
{
@@ -1642,7 +1631,6 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
- .ndo_get_stats = cpsw_ndo_get_stats,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cpsw_ndo_poll_controller,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 8c351f100aca..243513980b51 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -26,15 +26,13 @@
#include <linux/time.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include "cpts.h"
#ifdef CONFIG_TI_CPTS
-static struct sock_filter ptp_filter[] = {
- PTP_FILTER
-};
-
#define cpts_read32(c, r) __raw_readl(&c->reg->r)
#define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
@@ -217,6 +215,7 @@ static struct ptp_clock_info cpts_info = {
.name = "CTPS timer",
.max_adj = 1000000,
.n_ext_ts = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = cpts_ptp_adjfreq,
.adjtime = cpts_ptp_adjtime,
@@ -300,7 +299,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
u64 ns = 0;
struct cpts_event *event;
struct list_head *this, *next;
- unsigned int class = sk_run_filter(skb, ptp_filter);
+ unsigned int class = ptp_classify_raw(skb);
unsigned long flags;
u16 seqid;
u8 mtype;
@@ -371,10 +370,6 @@ int cpts_register(struct device *dev, struct cpts *cpts,
int err, i;
unsigned long flags;
- if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
- pr_err("cpts: bad ptp filter\n");
- return -EINVAL;
- }
cpts->info = cpts_info;
cpts->clock = ptp_clock_register(&cpts->info, dev);
if (IS_ERR(cpts->clock)) {
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 17503da9f7a5..7e1c91d41a87 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -659,6 +659,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
struct info_mpipe *info_mpipe =
container_of(napi, struct info_mpipe, napi);
+ if (budget <= 0)
+ goto done;
+
instance = info_mpipe->instance;
while ((n = gxio_mpipe_iqueue_try_peek(
&info_mpipe->iqueue,
@@ -870,6 +873,7 @@ static struct ptp_clock_info ptp_mpipe_caps = {
.name = "mPIPE clock",
.max_adj = 999999999,
.n_ext_ts = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = ptp_mpipe_adjfreq,
.adjtime = ptp_mpipe_adjtime,
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index edb2e12a0fe2..e5a5c5d4ce0c 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -831,6 +831,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
unsigned int work = 0;
+ if (budget <= 0)
+ goto done;
+
while (priv->active) {
int index = qup->__packet_receive_read;
if (index == qsp->__packet_receive_queue.__packet_write)
@@ -1821,7 +1824,7 @@ busy:
/* Handle completions. */
for (i = 0; i < nolds; i++)
- kfree_skb(olds[i]);
+ dev_consume_skb_any(olds[i]);
/* Update stats. */
u64_stats_update_begin(&stats->syncp);
@@ -2005,7 +2008,7 @@ busy:
/* Handle completions. */
for (i = 0; i < nolds; i++)
- kfree_skb(olds[i]);
+ dev_consume_skb_any(olds[i]);
/* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
u64_stats_update_begin(&stats->syncp);
@@ -2068,14 +2071,14 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
cpu_stats = &priv->cpu[i]->stats;
do {
- start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
trx_packets = cpu_stats->rx_packets;
ttx_packets = cpu_stats->tx_packets;
trx_bytes = cpu_stats->rx_bytes;
ttx_bytes = cpu_stats->tx_bytes;
trx_errors = cpu_stats->rx_errors;
trx_dropped = cpu_stats->rx_dropped;
- } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
rx_packets += trx_packets;
tx_packets += ttx_packets;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 3f4a32e39d27..0282d0161859 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -860,7 +860,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
if (skb) {
pci_unmap_single(card->pdev, buf_addr, skb->len,
PCI_DMA_TODEVICE);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
}
}
return 0;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 88e9c73cebc0..fef5573dbfca 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1645,6 +1645,9 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
int received = 0, handled;
u32 status;
+ if (budget <= 0)
+ return received;
+
spin_lock(&lp->rx_lock);
status = tc_readl(&tr->Int_Src);
do {
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 6ac20a6738f4..f61dc2b72bb2 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1022,7 +1022,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* The chip-specific entries in the device structure. */
dev->netdev_ops = &rhine_netdev_ops;
- dev->ethtool_ops = &netdev_ethtool_ops,
+ dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
@@ -1678,7 +1678,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
/* Must use alignment buffer. */
if (skb->len > PKT_BUF_SZ) {
/* packet too long, drop it */
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
rp->tx_skbuff[entry] = NULL;
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -1698,7 +1698,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
pci_map_single(rp->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
rp->tx_skbuff_dma[entry] = 0;
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -1836,7 +1836,7 @@ static void rhine_tx(struct net_device *dev)
rp->tx_skbuff[entry]->len,
PCI_DMA_TODEVICE);
}
- dev_kfree_skb(rp->tx_skbuff[entry]);
+ dev_consume_skb_any(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
entry = (++rp->dirty_tx) % TX_RING_SIZE;
}
@@ -2072,16 +2072,16 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
netdev_stats_to_stats64(stats, &dev->stats);
do {
- start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
stats->rx_packets = rp->rx_stats.packets;
stats->rx_bytes = rp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
do {
- start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
stats->tx_packets = rp->tx_stats.packets;
stats->tx_bytes = rp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
return stats;
}
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ad61d26a44f3..de08e86db209 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2565,7 +2565,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
/* The hardware can handle at most 7 memory segments, so merge
* the skb if there are more */
if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 0df36c6ec7f4..104d46f37969 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -641,11 +641,10 @@ static int w5100_hw_probe(struct platform_device *pdev)
if (!mem)
return -ENXIO;
mem_size = resource_size(mem);
- if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
- return -EBUSY;
- priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
- if (!priv->base)
- return -EBUSY;
+
+ priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
spin_lock_init(&priv->reg_lock);
priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 71c27b3292f1..1f33c4c86c20 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -561,11 +561,10 @@ static int w5300_hw_probe(struct platform_device *pdev)
if (!mem)
return -ENXIO;
mem_size = resource_size(mem);
- if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
- return -EBUSY;
- priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
- if (!priv->base)
- return -EBUSY;
+
+ priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
spin_lock_init(&priv->reg_lock);
priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index a4347508031c..fa193c4688da 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -771,8 +771,8 @@ static void ll_temac_recv(struct net_device *ndev)
/* if we're doing rx csum offload, set it up */
if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
- (skb->protocol == __constant_htons(ETH_P_IP)) &&
- (skb->len > 64)) {
+ (skb->protocol == htons(ETH_P_IP)) &&
+ (skb->len > 64)) {
skb->csum = cur_p->app3 & 0xFFFF;
skb->ip_summed = CHECKSUM_COMPLETE;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4bfdf8c7ada0..7b0a73556264 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -756,7 +756,7 @@ static void axienet_recv(struct net_device *ndev)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == __constant_htons(ETH_P_IP) &&
+ skb->protocol == htons(ETH_P_IP) &&
skb->len > 64) {
skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 36052b98b3fc..0d87c67a5ff7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -795,18 +795,6 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
}
/**
- * xemaclite_mdio_reset - Reset the mdio bus.
- * @bus: Pointer to the MII bus
- *
- * This function is required(?) as per Documentation/networking/phy.txt.
- * There is no reset in this device; this function always returns 0.
- */
-static int xemaclite_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
-/**
* xemaclite_mdio_setup - Register mii_bus for the Emaclite device
* @lp: Pointer to the Emaclite device private data
* @ofdev: Pointer to OF device structure
@@ -861,7 +849,6 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
bus->name = "Xilinx Emaclite MDIO";
bus->read = xemaclite_mdio_read;
bus->write = xemaclite_mdio_write;
- bus->reset = xemaclite_mdio_reset;
bus->parent = dev;
bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
@@ -1037,7 +1024,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
skb_tx_timestamp(new_skb);
dev->stats.tx_bytes += len;
- dev_kfree_skb(new_skb);
+ dev_consume_skb_any(new_skb);
return 0;
}
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index 3f431019e615..b81bc9fca378 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -23,6 +23,7 @@ config IXP4XX_ETH
tristate "Intel IXP4xx Ethernet support"
depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
select PHYLIB
+ select NET_PTP_CLASSIFY
---help---
Say Y here if you want to use built-in Ethernet ports
on IXP4xx processor.
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 25283f17d82f..f7e0f0f7c2e2 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -256,10 +256,6 @@ static int ports_open;
static struct port *npe_port_tab[MAX_NPES];
static struct dma_pool *dma_pool;
-static struct sock_filter ptp_filter[] = {
- PTP_FILTER
-};
-
static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
{
u8 *data = skb->data;
@@ -267,7 +263,7 @@ static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
u16 *hi, *id;
u32 lo;
- if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
+ if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
return 0;
offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
@@ -1413,11 +1409,6 @@ static int eth_init_one(struct platform_device *pdev)
char phy_id[MII_BUS_ID_SIZE + 3];
int err;
- if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
- pr_err("ixp4xx_eth: bad ptp filter\n");
- return -EINVAL;
- }
-
if (!(dev = alloc_etherdev(sizeof(struct port))))
return -ENOMEM;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 61dd2447e1bb..81901659cc9e 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1184,7 +1184,7 @@ static void __exit yam_cleanup_driver(void)
struct yam_mcs *p;
int i;
- del_timer(&yam_timer);
+ del_timer_sync(&yam_timer);
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev = yam_devs[i];
if (dev) {
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 7b594ce3f21d..13010b4dae5b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -30,6 +30,7 @@
/* Fwd declaration */
struct hv_netvsc_packet;
+struct ndis_tcp_ip_checksum_info;
/* Represent the xfer page packet which contains 1 or more netvsc packet */
struct xferpage_packet {
@@ -73,7 +74,7 @@ struct hv_netvsc_packet {
} completion;
/* This points to the memory after page_buf */
- void *extension;
+ struct rndis_message *rndis_msg;
u32 total_data_buflen;
/* Points to the send/receive buffer where the ethernet frame is */
@@ -117,7 +118,8 @@ int netvsc_send(struct hv_device *device,
void netvsc_linkstatus_callback(struct hv_device *device_obj,
unsigned int status);
int netvsc_recv_callback(struct hv_device *device_obj,
- struct hv_netvsc_packet *packet);
+ struct hv_netvsc_packet *packet,
+ struct ndis_tcp_ip_checksum_info *csum_info);
int rndis_filter_open(struct hv_device *dev);
int rndis_filter_close(struct hv_device *dev);
int rndis_filter_device_add(struct hv_device *dev,
@@ -126,11 +128,6 @@ void rndis_filter_device_remove(struct hv_device *dev);
int rndis_filter_receive(struct hv_device *dev,
struct hv_netvsc_packet *pkt);
-
-
-int rndis_filter_send(struct hv_device *dev,
- struct hv_netvsc_packet *pkt);
-
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
@@ -139,6 +136,8 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
#define NVSP_PROTOCOL_VERSION_1 2
#define NVSP_PROTOCOL_VERSION_2 0x30002
+#define NVSP_PROTOCOL_VERSION_4 0x40000
+#define NVSP_PROTOCOL_VERSION_5 0x50000
enum {
NVSP_MSG_TYPE_NONE = 0,
@@ -193,6 +192,23 @@ enum {
NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
+
+ NVSP_MSG2_MAX = NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
+
+ /* Version 4 messages */
+ NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION,
+ NVSP_MSG4_TYPE_SWITCH_DATA_PATH,
+ NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
+
+ NVSP_MSG4_MAX = NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
+
+ /* Version 5 messages */
+ NVSP_MSG5_TYPE_OID_QUERY_EX,
+ NVSP_MSG5_TYPE_OID_QUERY_EX_COMP,
+ NVSP_MSG5_TYPE_SUBCHANNEL,
+ NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
+
+ NVSP_MSG5_MAX = NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
};
enum {
@@ -447,10 +463,44 @@ union nvsp_2_message_uber {
struct nvsp_2_free_rxbuf free_rxbuf;
} __packed;
+enum nvsp_subchannel_operation {
+ NVSP_SUBCHANNEL_NONE = 0,
+ NVSP_SUBCHANNEL_ALLOCATE,
+ NVSP_SUBCHANNEL_MAX
+};
+
+struct nvsp_5_subchannel_request {
+ u32 op;
+ u32 num_subchannels;
+} __packed;
+
+struct nvsp_5_subchannel_complete {
+ u32 status;
+ u32 num_subchannels; /* Actual number of subchannels allocated */
+} __packed;
+
+struct nvsp_5_send_indirect_table {
+ /* The number of entries in the send indirection table */
+ u32 count;
+
+ /* The offset of the send indireciton table from top of this struct.
+ * The send indirection table tells which channel to put the send
+ * traffic on. Each entry is a channel number.
+ */
+ u32 offset;
+} __packed;
+
+union nvsp_5_message_uber {
+ struct nvsp_5_subchannel_request subchn_req;
+ struct nvsp_5_subchannel_complete subchn_comp;
+ struct nvsp_5_send_indirect_table send_table;
+} __packed;
+
union nvsp_all_messages {
union nvsp_message_init_uber init_msg;
union nvsp_1_message_uber v1_msg;
union nvsp_2_message_uber v2_msg;
+ union nvsp_5_message_uber v5_msg;
} __packed;
/* ALL Messages */
@@ -463,6 +513,7 @@ struct nvsp_message {
#define NETVSC_MTU 65536
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
@@ -506,6 +557,8 @@ struct netvsc_device {
/* Holds rndis device info */
void *extension;
+ /* The recive buffer for this device */
+ unsigned char cb_buffer[NETVSC_PACKET_SIZE];
};
/* NdisInitialize message */
@@ -671,9 +724,133 @@ struct ndis_pkt_8021q_info {
};
};
+struct ndis_oject_header {
+ u8 type;
+ u8 revision;
+ u16 size;
+};
+
+#define NDIS_OBJECT_TYPE_DEFAULT 0x80
+#define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3
+#define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0
+#define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1
+#define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_LSOV1_ENABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED 1
+#define NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED 1
+#define NDIS_OFFLOAD_PARAMETERS_TX_ENABLED_RX_DISABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_RX_ENABLED_TX_DISABLED 3
+#define NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED 4
+
+#define NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE 1
+#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4 0
+#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6 1
+
+/*
+ * New offload OIDs for NDIS 6
+ */
+#define OID_TCP_OFFLOAD_CURRENT_CONFIG 0xFC01020B /* query only */
+#define OID_TCP_OFFLOAD_PARAMETERS 0xFC01020C /* set only */
+#define OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020D/* query only */
+#define OID_TCP_CONNECTION_OFFLOAD_CURRENT_CONFIG 0xFC01020E /* query only */
+#define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */
+#define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */
+
+struct ndis_offload_params {
+ struct ndis_oject_header header;
+ u8 ip_v4_csum;
+ u8 tcp_ip_v4_csum;
+ u8 udp_ip_v4_csum;
+ u8 tcp_ip_v6_csum;
+ u8 udp_ip_v6_csum;
+ u8 lso_v1;
+ u8 ip_sec_v1;
+ u8 lso_v2_ipv4;
+ u8 lso_v2_ipv6;
+ u8 tcp_connection_ip_v4;
+ u8 tcp_connection_ip_v6;
+ u32 flags;
+ u8 ip_sec_v2;
+ u8 ip_sec_v2_ip_v4;
+ struct {
+ u8 rsc_ip_v4;
+ u8 rsc_ip_v6;
+ };
+ struct {
+ u8 encapsulated_packet_task_offload;
+ u8 encapsulation_types;
+ };
+};
+
+struct ndis_tcp_ip_checksum_info {
+ union {
+ struct {
+ u32 is_ipv4:1;
+ u32 is_ipv6:1;
+ u32 tcp_checksum:1;
+ u32 udp_checksum:1;
+ u32 ip_header_checksum:1;
+ u32 reserved:11;
+ u32 tcp_header_offset:10;
+ } transmit;
+ struct {
+ u32 tcp_checksum_failed:1;
+ u32 udp_checksum_failed:1;
+ u32 ip_checksum_failed:1;
+ u32 tcp_checksum_succeeded:1;
+ u32 udp_checksum_succeeded:1;
+ u32 ip_checksum_succeeded:1;
+ u32 loopback:1;
+ u32 tcp_checksum_value_invalid:1;
+ u32 ip_checksum_value_invalid:1;
+ } receive;
+ u32 value;
+ };
+};
+
+struct ndis_tcp_lso_info {
+ union {
+ struct {
+ u32 unused:30;
+ u32 type:1;
+ u32 reserved2:1;
+ } transmit;
+ struct {
+ u32 mss:20;
+ u32 tcp_header_offset:10;
+ u32 type:1;
+ u32 reserved2:1;
+ } lso_v1_transmit;
+ struct {
+ u32 tcp_payload:30;
+ u32 type:1;
+ u32 reserved2:1;
+ } lso_v1_transmit_complete;
+ struct {
+ u32 mss:20;
+ u32 tcp_header_offset:10;
+ u32 type:1;
+ u32 ip_version:1;
+ } lso_v2_transmit;
+ struct {
+ u32 reserved:30;
+ u32 type:1;
+ u32 reserved2:1;
+ } lso_v2_transmit_complete;
+ u32 value;
+ };
+};
+
#define NDIS_VLAN_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
sizeof(struct ndis_pkt_8021q_info))
+#define NDIS_CSUM_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+ sizeof(struct ndis_tcp_ip_checksum_info))
+
+#define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+ sizeof(struct ndis_tcp_lso_info))
+
/* Format of Information buffer passed in a SetRequest for the OID */
/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
struct rndis_config_parameter_info {
@@ -846,12 +1023,6 @@ struct rndis_message {
};
-struct rndis_filter_packet {
- void *completion_ctx;
- void (*completion)(void *context);
- struct rndis_message msg;
-};
-
/* Handy macros */
/* get the size of an RNDIS message. Pass in the message type, */
@@ -905,6 +1076,16 @@ struct rndis_filter_packet {
#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
+#define INFO_IPV4 2
+#define INFO_IPV6 4
+#define INFO_TCP 2
+#define INFO_UDP 4
+
+#define TRANSPORT_INFO_NOT_IP 0
+#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP)
+#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP)
+#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP)
+#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP)
#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 03a2c6e17158..daddea2654ce 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -290,7 +290,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
NVSP_STAT_SUCCESS)
return -EINVAL;
- if (nvsp_ver != NVSP_PROTOCOL_VERSION_2)
+ if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
return 0;
/* NVSPv2 only: Send NDIS config */
@@ -314,6 +314,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
struct nvsp_message *init_packet;
int ndis_version;
struct net_device *ndev;
+ u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
+ NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
+ int i, num_ver = 4; /* number of different NVSP versions */
net_device = get_outbound_net_device(device);
if (!net_device)
@@ -323,13 +326,14 @@ static int netvsc_connect_vsp(struct hv_device *device)
init_packet = &net_device->channel_init_pkt;
/* Negotiate the latest NVSP protocol supported */
- if (negotiate_nvsp_ver(device, net_device, init_packet,
- NVSP_PROTOCOL_VERSION_2) == 0) {
- net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2;
- } else if (negotiate_nvsp_ver(device, net_device, init_packet,
- NVSP_PROTOCOL_VERSION_1) == 0) {
- net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1;
- } else {
+ for (i = num_ver - 1; i >= 0; i--)
+ if (negotiate_nvsp_ver(device, net_device, init_packet,
+ ver_list[i]) == 0) {
+ net_device->nvsp_version = ver_list[i];
+ break;
+ }
+
+ if (i < 0) {
ret = -EPROTO;
goto cleanup;
}
@@ -339,7 +343,10 @@ static int netvsc_connect_vsp(struct hv_device *device)
/* Send the ndis version */
memset(init_packet, 0, sizeof(struct nvsp_message));
- ndis_version = 0x00050001;
+ if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
+ ndis_version = 0x00050001;
+ else
+ ndis_version = 0x0006001e;
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
init_packet->msg.v1_msg.
@@ -358,6 +365,11 @@ static int netvsc_connect_vsp(struct hv_device *device)
goto cleanup;
/* Post the big receive buffer to NetVSP */
+ if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
+ net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
+ else
+ net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
+
ret = netvsc_init_recv_buf(device);
cleanup:
@@ -432,17 +444,14 @@ static inline u32 hv_ringbuf_avail_percent(
return avail_write * 100 / ring_info->ring_datasize;
}
-static void netvsc_send_completion(struct hv_device *device,
+static void netvsc_send_completion(struct netvsc_device *net_device,
+ struct hv_device *device,
struct vmpacket_descriptor *packet)
{
- struct netvsc_device *net_device;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *nvsc_packet;
struct net_device *ndev;
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
ndev = net_device->ndev;
nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
@@ -561,13 +570,13 @@ int netvsc_send(struct hv_device *device,
}
static void netvsc_send_recv_completion(struct hv_device *device,
+ struct netvsc_device *net_device,
u64 transaction_id, u32 status)
{
struct nvsp_message recvcompMessage;
int retries = 0;
int ret;
struct net_device *ndev;
- struct netvsc_device *net_device = hv_get_drvdata(device);
ndev = net_device->ndev;
@@ -653,14 +662,15 @@ static void netvsc_receive_completion(void *context)
/* Send a receive completion for the xfer page packet */
if (fsend_receive_comp)
- netvsc_send_recv_completion(device, transaction_id, status);
+ netvsc_send_recv_completion(device, net_device, transaction_id,
+ status);
}
-static void netvsc_receive(struct hv_device *device,
- struct vmpacket_descriptor *packet)
+static void netvsc_receive(struct netvsc_device *net_device,
+ struct hv_device *device,
+ struct vmpacket_descriptor *packet)
{
- struct netvsc_device *net_device;
struct vmtransfer_page_packet_header *vmxferpage_packet;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *netvsc_packet = NULL;
@@ -673,9 +683,6 @@ static void netvsc_receive(struct hv_device *device,
LIST_HEAD(listHead);
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
ndev = net_device->ndev;
/*
@@ -741,7 +748,7 @@ static void netvsc_receive(struct hv_device *device,
spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
flags);
- netvsc_send_recv_completion(device,
+ netvsc_send_recv_completion(device, net_device,
vmxferpage_packet->d.trans_id,
NVSP_STAT_FAIL);
@@ -800,22 +807,16 @@ static void netvsc_channel_cb(void *context)
struct netvsc_device *net_device;
u32 bytes_recvd;
u64 request_id;
- unsigned char *packet;
struct vmpacket_descriptor *desc;
unsigned char *buffer;
int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
- packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
- GFP_ATOMIC);
- if (!packet)
- return;
- buffer = packet;
-
net_device = get_inbound_net_device(device);
if (!net_device)
- goto out;
+ return;
ndev = net_device->ndev;
+ buffer = net_device->cb_buffer;
do {
ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
@@ -825,11 +826,13 @@ static void netvsc_channel_cb(void *context)
desc = (struct vmpacket_descriptor *)buffer;
switch (desc->type) {
case VM_PKT_COMP:
- netvsc_send_completion(device, desc);
+ netvsc_send_completion(net_device,
+ device, desc);
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- netvsc_receive(device, desc);
+ netvsc_receive(net_device,
+ device, desc);
break;
default:
@@ -841,23 +844,16 @@ static void netvsc_channel_cb(void *context)
break;
}
- /* reset */
- if (bufferlen > NETVSC_PACKET_SIZE) {
- kfree(buffer);
- buffer = packet;
- bufferlen = NETVSC_PACKET_SIZE;
- }
} else {
- /* reset */
- if (bufferlen > NETVSC_PACKET_SIZE) {
- kfree(buffer);
- buffer = packet;
- bufferlen = NETVSC_PACKET_SIZE;
- }
-
+ /*
+ * We are done for this pass.
+ */
break;
}
+
} else if (ret == -ENOBUFS) {
+ if (bufferlen > NETVSC_PACKET_SIZE)
+ kfree(buffer);
/* Handle large packet */
buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
if (buffer == NULL) {
@@ -872,8 +868,8 @@ static void netvsc_channel_cb(void *context)
}
} while (1);
-out:
- kfree(buffer);
+ if (bufferlen > NETVSC_PACKET_SIZE)
+ kfree(buffer);
return;
}
@@ -907,7 +903,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
ndev = net_device->ndev;
/* Initialize the NetVSC channel extension */
- net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
spin_lock_init(&net_device->recv_pkt_list_lock);
INIT_LIST_HEAD(&net_device->recv_pkt_list);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d6fce9750b95..4e4cf9e0c8d7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -128,6 +128,27 @@ static int netvsc_close(struct net_device *net)
return ret;
}
+static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
+ int pkt_type)
+{
+ struct rndis_packet *rndis_pkt;
+ struct rndis_per_packet_info *ppi;
+
+ rndis_pkt = &msg->msg.pkt;
+ rndis_pkt->data_offset += ppi_size;
+
+ ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
+ rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
+
+ ppi->size = ppi_size;
+ ppi->type = pkt_type;
+ ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
+
+ rndis_pkt->per_pkt_info_len += ppi_size;
+
+ return ppi;
+}
+
static void netvsc_xmit_completion(void *context)
{
struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
@@ -140,22 +161,164 @@ static void netvsc_xmit_completion(void *context)
dev_kfree_skb_any(skb);
}
+static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
+ struct hv_page_buffer *pb)
+{
+ int j = 0;
+
+ /* Deal with compund pages by ignoring unused part
+ * of the page.
+ */
+ page += (offset >> PAGE_SHIFT);
+ offset &= ~PAGE_MASK;
+
+ while (len > 0) {
+ unsigned long bytes;
+
+ bytes = PAGE_SIZE - offset;
+ if (bytes > len)
+ bytes = len;
+ pb[j].pfn = page_to_pfn(page);
+ pb[j].offset = offset;
+ pb[j].len = bytes;
+
+ offset += bytes;
+ len -= bytes;
+
+ if (offset == PAGE_SIZE && len) {
+ page++;
+ offset = 0;
+ j++;
+ }
+ }
+
+ return j + 1;
+}
+
+static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
+ struct hv_page_buffer *pb)
+{
+ u32 slots_used = 0;
+ char *data = skb->data;
+ int frags = skb_shinfo(skb)->nr_frags;
+ int i;
+
+ /* The packet is laid out thus:
+ * 1. hdr
+ * 2. skb linear data
+ * 3. skb fragment data
+ */
+ if (hdr != NULL)
+ slots_used += fill_pg_buf(virt_to_page(hdr),
+ offset_in_page(hdr),
+ len, &pb[slots_used]);
+
+ slots_used += fill_pg_buf(virt_to_page(data),
+ offset_in_page(data),
+ skb_headlen(skb), &pb[slots_used]);
+
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+
+ slots_used += fill_pg_buf(skb_frag_page(frag),
+ frag->page_offset,
+ skb_frag_size(frag), &pb[slots_used]);
+ }
+ return slots_used;
+}
+
+static int count_skb_frag_slots(struct sk_buff *skb)
+{
+ int i, frags = skb_shinfo(skb)->nr_frags;
+ int pages = 0;
+
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ unsigned long size = skb_frag_size(frag);
+ unsigned long offset = frag->page_offset;
+
+ /* Skip unused frames from start of page */
+ offset &= ~PAGE_MASK;
+ pages += PFN_UP(offset + size);
+ }
+ return pages;
+}
+
+static int netvsc_get_slots(struct sk_buff *skb)
+{
+ char *data = skb->data;
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = skb_headlen(skb);
+ int slots;
+ int frag_slots;
+
+ slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+ frag_slots = count_skb_frag_slots(skb);
+ return slots + frag_slots;
+}
+
+static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
+{
+ u32 ret_val = TRANSPORT_INFO_NOT_IP;
+
+ if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
+ (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
+ goto not_ip;
+ }
+
+ *trans_off = skb_transport_offset(skb);
+
+ if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
+ struct iphdr *iphdr = ip_hdr(skb);
+
+ if (iphdr->protocol == IPPROTO_TCP)
+ ret_val = TRANSPORT_INFO_IPV4_TCP;
+ else if (iphdr->protocol == IPPROTO_UDP)
+ ret_val = TRANSPORT_INFO_IPV4_UDP;
+ } else {
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ ret_val = TRANSPORT_INFO_IPV6_TCP;
+ else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+ ret_val = TRANSPORT_INFO_IPV6_UDP;
+ }
+
+not_ip:
+ return ret_val;
+}
+
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_netvsc_packet *packet;
int ret;
- unsigned int i, num_pages, npg_data;
-
- /* Add multipages for skb->data and additional 2 for RNDIS */
- npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
- >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
- num_pages = skb_shinfo(skb)->nr_frags + npg_data + 2;
+ unsigned int num_data_pgs;
+ struct rndis_message *rndis_msg;
+ struct rndis_packet *rndis_pkt;
+ u32 rndis_msg_size;
+ bool isvlan;
+ struct rndis_per_packet_info *ppi;
+ struct ndis_tcp_ip_checksum_info *csum_info;
+ struct ndis_tcp_lso_info *lso_info;
+ int hdr_offset;
+ u32 net_trans_info;
+
+
+ /* We will atmost need two pages to describe the rndis
+ * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
+ * of pages in a single packet.
+ */
+ num_data_pgs = netvsc_get_slots(skb) + 2;
+ if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
+ netdev_err(net, "Packet too big: %u\n", skb->len);
+ dev_kfree_skb(skb);
+ net->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
- (num_pages * sizeof(struct hv_page_buffer)) +
- sizeof(struct rndis_filter_packet) +
+ (num_data_pgs * sizeof(struct hv_page_buffer)) +
+ sizeof(struct rndis_message) +
NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
if (!packet) {
/* out of memory, drop packet */
@@ -168,53 +331,111 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->vlan_tci = skb->vlan_tci;
- packet->extension = (void *)(unsigned long)packet +
+ packet->is_data_pkt = true;
+ packet->total_data_buflen = skb->len;
+
+ packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
sizeof(struct hv_netvsc_packet) +
- (num_pages * sizeof(struct hv_page_buffer));
+ (num_data_pgs * sizeof(struct hv_page_buffer)));
+
+ /* Set the completion routine */
+ packet->completion.send.send_completion = netvsc_xmit_completion;
+ packet->completion.send.send_completion_ctx = packet;
+ packet->completion.send.send_completion_tid = (unsigned long)skb;
- /* If the rndis msg goes beyond 1 page, we will add 1 later */
- packet->page_buf_cnt = num_pages - 1;
+ isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
+
+ /* Add the rndis header */
+ rndis_msg = packet->rndis_msg;
+ rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
+ rndis_msg->msg_len = packet->total_data_buflen;
+ rndis_pkt = &rndis_msg->msg.pkt;
+ rndis_pkt->data_offset = sizeof(struct rndis_packet);
+ rndis_pkt->data_len = packet->total_data_buflen;
+ rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
+
+ rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
+
+ if (isvlan) {
+ struct ndis_pkt_8021q_info *vlan;
+
+ rndis_msg_size += NDIS_VLAN_PPI_SIZE;
+ ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
+ IEEE_8021Q_INFO);
+ vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
+ ppi->ppi_offset);
+ vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK;
+ vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+ }
- /* Initialize it from the skb */
- packet->total_data_buflen = skb->len;
+ net_trans_info = get_net_transport_info(skb, &hdr_offset);
+ if (net_trans_info == TRANSPORT_INFO_NOT_IP)
+ goto do_send;
+
+ /*
+ * Setup the sendside checksum offload only if this is not a
+ * GSO packet.
+ */
+ if (skb_is_gso(skb))
+ goto do_lso;
+
+ rndis_msg_size += NDIS_CSUM_PPI_SIZE;
+ ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
+ TCPIP_CHKSUM_PKTINFO);
+
+ csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
+ ppi->ppi_offset);
- /* Start filling in the page buffers starting after RNDIS buffer. */
- packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
- packet->page_buf[1].offset
- = (unsigned long)skb->data & (PAGE_SIZE - 1);
- if (npg_data == 1)
- packet->page_buf[1].len = skb_headlen(skb);
+ if (net_trans_info & (INFO_IPV4 << 16))
+ csum_info->transmit.is_ipv4 = 1;
else
- packet->page_buf[1].len = PAGE_SIZE
- - packet->page_buf[1].offset;
-
- for (i = 2; i <= npg_data; i++) {
- packet->page_buf[i].pfn = virt_to_phys(skb->data
- + PAGE_SIZE * (i-1)) >> PAGE_SHIFT;
- packet->page_buf[i].offset = 0;
- packet->page_buf[i].len = PAGE_SIZE;
+ csum_info->transmit.is_ipv6 = 1;
+
+ if (net_trans_info & INFO_TCP) {
+ csum_info->transmit.tcp_checksum = 1;
+ csum_info->transmit.tcp_header_offset = hdr_offset;
+ } else if (net_trans_info & INFO_UDP) {
+ csum_info->transmit.udp_checksum = 1;
}
- if (npg_data > 1)
- packet->page_buf[npg_data].len = (((unsigned long)skb->data
- + skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1;
-
- /* Additional fragments are after SKB data */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
-
- packet->page_buf[i+npg_data+1].pfn =
- page_to_pfn(skb_frag_page(f));
- packet->page_buf[i+npg_data+1].offset = f->page_offset;
- packet->page_buf[i+npg_data+1].len = skb_frag_size(f);
+ goto do_send;
+
+do_lso:
+ rndis_msg_size += NDIS_LSO_PPI_SIZE;
+ ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
+ TCP_LARGESEND_PKTINFO);
+
+ lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
+ ppi->ppi_offset);
+
+ lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
+ if (net_trans_info & (INFO_IPV4 << 16)) {
+ lso_info->lso_v2_transmit.ip_version =
+ NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
+ ip_hdr(skb)->tot_len = 0;
+ ip_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ } else {
+ lso_info->lso_v2_transmit.ip_version =
+ NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
}
+ lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
+ lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
- /* Set the completion routine */
- packet->completion.send.send_completion = netvsc_xmit_completion;
- packet->completion.send.send_completion_ctx = packet;
- packet->completion.send.send_completion_tid = (unsigned long)skb;
+do_send:
+ /* Start filling in the page buffers with the rndis hdr */
+ rndis_msg->msg_len += rndis_msg_size;
+ packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
+ skb, &packet->page_buf[0]);
+
+ ret = netvsc_send(net_device_ctx->device_ctx, packet);
- ret = rndis_filter_send(net_device_ctx->device_ctx,
- packet);
if (ret == 0) {
net->stats.tx_bytes += skb->len;
net->stats.tx_packets++;
@@ -264,7 +485,8 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
* "wire" on the specified device.
*/
int netvsc_recv_callback(struct hv_device *device_obj,
- struct hv_netvsc_packet *packet)
+ struct hv_netvsc_packet *packet,
+ struct ndis_tcp_ip_checksum_info *csum_info)
{
struct net_device *net;
struct sk_buff *skb;
@@ -291,7 +513,17 @@ int netvsc_recv_callback(struct hv_device *device_obj,
packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
- skb->ip_summed = CHECKSUM_NONE;
+ if (csum_info) {
+ /* We only look at the IP checksum here.
+ * Should we be dropping the packet if checksum
+ * failed? How do we deal with other checksums - TCP/UDP?
+ */
+ if (csum_info->receive.ip_checksum_succeeded)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
if (packet->vlan_tci & VLAN_TAG_PRESENT)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
packet->vlan_tci);
@@ -327,7 +559,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (nvdev == NULL || nvdev->destroy)
return -ENODEV;
- if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
+ if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
limit = NETVSC_MTU;
if (mtu < 68 || mtu > limit)
@@ -452,9 +684,10 @@ static int netvsc_probe(struct hv_device *dev,
net->netdev_ops = &device_ops;
- /* TODO: Add GSO and Checksum offload */
- net->hw_features = 0;
- net->features = NETIF_F_HW_VLAN_CTAG_TX;
+ net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_TSO;
+ net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_TSO;
SET_ETHTOOL_OPS(net, &ethtool_ops);
SET_NETDEV_DEV(net, &dev->device);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index b54fd257652b..4a37e3db9e32 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -58,9 +58,6 @@ struct rndis_request {
u8 request_ext[RNDIS_EXT_LEN];
};
-static void rndis_filter_send_completion(void *ctx);
-
-
static struct rndis_device *get_rndis_device(void)
{
struct rndis_device *device;
@@ -297,7 +294,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
"rndis response buffer overflow "
"detected (size %u max %zu)\n",
resp->msg_len,
- sizeof(struct rndis_filter_packet));
+ sizeof(struct rndis_message));
if (resp->ndis_msg_type ==
RNDIS_MSG_RESET_C) {
@@ -373,6 +370,7 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
struct rndis_packet *rndis_pkt;
u32 data_offset;
struct ndis_pkt_8021q_info *vlan;
+ struct ndis_tcp_ip_checksum_info *csum_info;
rndis_pkt = &msg->msg.pkt;
@@ -411,7 +409,8 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
pkt->vlan_tci = 0;
}
- netvsc_recv_callback(dev->net_dev->dev, pkt);
+ csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
+ netvsc_recv_callback(dev->net_dev->dev, pkt, csum_info);
}
int rndis_filter_receive(struct hv_device *dev,
@@ -630,6 +629,61 @@ cleanup:
return ret;
}
+int rndis_filter_set_offload_params(struct hv_device *hdev,
+ struct ndis_offload_params *req_offloads)
+{
+ struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct rndis_device *rdev = nvdev->extension;
+ struct net_device *ndev = nvdev->ndev;
+ struct rndis_request *request;
+ struct rndis_set_request *set;
+ struct ndis_offload_params *offload_params;
+ struct rndis_set_complete *set_complete;
+ u32 extlen = sizeof(struct ndis_offload_params);
+ int ret, t;
+
+ request = get_rndis_request(rdev, RNDIS_MSG_SET,
+ RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
+ if (!request)
+ return -ENOMEM;
+
+ set = &request->request_msg.msg.set_req;
+ set->oid = OID_TCP_OFFLOAD_PARAMETERS;
+ set->info_buflen = extlen;
+ set->info_buf_offset = sizeof(struct rndis_set_request);
+ set->dev_vc_handle = 0;
+
+ offload_params = (struct ndis_offload_params *)((ulong)set +
+ set->info_buf_offset);
+ *offload_params = *req_offloads;
+ offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
+ offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
+ offload_params->header.size = extlen;
+
+ ret = rndis_filter_send_request(rdev, request);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ netdev_err(ndev, "timeout before we got aOFFLOAD set response...\n");
+ /* can't put_rndis_request, since we may still receive a
+ * send-completion.
+ */
+ return -EBUSY;
+ } else {
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
+ }
+ }
+
+cleanup:
+ put_rndis_request(rdev, request);
+ return ret;
+}
static int rndis_filter_query_device_link_status(struct rndis_device *dev)
{
@@ -829,6 +883,7 @@ int rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
struct netvsc_device_info *device_info = additional_info;
+ struct ndis_offload_params offloads;
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -868,6 +923,26 @@ int rndis_filter_device_add(struct hv_device *dev,
memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
+ /* Turn on the offloads; the host supports all of the relevant
+ * offloads.
+ */
+ memset(&offloads, 0, sizeof(struct ndis_offload_params));
+ /* A value of zero means "no change"; now turn on what we
+ * want.
+ */
+ offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+
+
+ ret = rndis_filter_set_offload_params(dev, &offloads);
+ if (ret)
+ goto err_dev_remv;
+
+
rndis_filter_query_device_link_status(rndis_device);
device_info->link_state = rndis_device->link_state;
@@ -877,6 +952,10 @@ int rndis_filter_device_add(struct hv_device *dev,
device_info->link_state ? "down" : "up");
return ret;
+
+err_dev_remv:
+ rndis_filter_device_remove(dev);
+ return ret;
}
void rndis_filter_device_remove(struct hv_device *dev)
@@ -913,101 +992,3 @@ int rndis_filter_close(struct hv_device *dev)
return rndis_filter_close_device(nvdev->extension);
}
-
-int rndis_filter_send(struct hv_device *dev,
- struct hv_netvsc_packet *pkt)
-{
- int ret;
- struct rndis_filter_packet *filter_pkt;
- struct rndis_message *rndis_msg;
- struct rndis_packet *rndis_pkt;
- u32 rndis_msg_size;
- bool isvlan = pkt->vlan_tci & VLAN_TAG_PRESENT;
-
- /* Add the rndis header */
- filter_pkt = (struct rndis_filter_packet *)pkt->extension;
-
- rndis_msg = &filter_pkt->msg;
- rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
- if (isvlan)
- rndis_msg_size += NDIS_VLAN_PPI_SIZE;
-
- rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
- rndis_msg->msg_len = pkt->total_data_buflen +
- rndis_msg_size;
-
- rndis_pkt = &rndis_msg->msg.pkt;
- rndis_pkt->data_offset = sizeof(struct rndis_packet);
- if (isvlan)
- rndis_pkt->data_offset += NDIS_VLAN_PPI_SIZE;
- rndis_pkt->data_len = pkt->total_data_buflen;
-
- if (isvlan) {
- struct rndis_per_packet_info *ppi;
- struct ndis_pkt_8021q_info *vlan;
-
- rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
- rndis_pkt->per_pkt_info_len = NDIS_VLAN_PPI_SIZE;
-
- ppi = (struct rndis_per_packet_info *)((ulong)rndis_pkt +
- rndis_pkt->per_pkt_info_offset);
- ppi->size = NDIS_VLAN_PPI_SIZE;
- ppi->type = IEEE_8021Q_INFO;
- ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
-
- vlan = (struct ndis_pkt_8021q_info *)((ulong)ppi +
- ppi->ppi_offset);
- vlan->vlanid = pkt->vlan_tci & VLAN_VID_MASK;
- vlan->pri = (pkt->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- }
-
- pkt->is_data_pkt = true;
- pkt->page_buf[0].pfn = virt_to_phys(rndis_msg) >> PAGE_SHIFT;
- pkt->page_buf[0].offset =
- (unsigned long)rndis_msg & (PAGE_SIZE-1);
- pkt->page_buf[0].len = rndis_msg_size;
-
- /* Add one page_buf if the rndis msg goes beyond page boundary */
- if (pkt->page_buf[0].offset + rndis_msg_size > PAGE_SIZE) {
- int i;
- for (i = pkt->page_buf_cnt; i > 1; i--)
- pkt->page_buf[i] = pkt->page_buf[i-1];
- pkt->page_buf_cnt++;
- pkt->page_buf[0].len = PAGE_SIZE - pkt->page_buf[0].offset;
- pkt->page_buf[1].pfn = virt_to_phys((void *)((ulong)
- rndis_msg + pkt->page_buf[0].len)) >> PAGE_SHIFT;
- pkt->page_buf[1].offset = 0;
- pkt->page_buf[1].len = rndis_msg_size - pkt->page_buf[0].len;
- }
-
- /* Save the packet send completion and context */
- filter_pkt->completion = pkt->completion.send.send_completion;
- filter_pkt->completion_ctx =
- pkt->completion.send.send_completion_ctx;
-
- /* Use ours */
- pkt->completion.send.send_completion = rndis_filter_send_completion;
- pkt->completion.send.send_completion_ctx = filter_pkt;
-
- ret = netvsc_send(dev, pkt);
- if (ret != 0) {
- /*
- * Reset the completion to originals to allow retries from
- * above
- */
- pkt->completion.send.send_completion =
- filter_pkt->completion;
- pkt->completion.send.send_completion_ctx =
- filter_pkt->completion_ctx;
- }
-
- return ret;
-}
-
-static void rndis_filter_send_completion(void *ctx)
-{
- struct rndis_filter_packet *filter_pkt = ctx;
-
- /* Pass it back to the original handler */
- filter_pkt->completion(filter_pkt->completion_ctx);
-}
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 08ae4655423a..3e89beab64fd 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -15,9 +15,9 @@ config IEEE802154_FAKEHARD
depends on IEEE802154_DRIVERS
---help---
Say Y here to enable the fake driver that serves as an example
- of HardMAC device driver.
+ of HardMAC device driver.
- This driver can also be built as a module. To do so say M here.
+ This driver can also be built as a module. To do so say M here.
The module will be called 'fakehard'.
config IEEE802154_FAKELB
@@ -31,17 +31,23 @@ config IEEE802154_FAKELB
The module will be called 'fakelb'.
config IEEE802154_AT86RF230
- depends on IEEE802154_DRIVERS && MAC802154
- tristate "AT86RF230/231 transceiver driver"
- depends on SPI
+ depends on IEEE802154_DRIVERS && MAC802154
+ tristate "AT86RF230/231/233/212 transceiver driver"
+ depends on SPI
+ ---help---
+ Say Y here to enable the at86rf230/231/233/212 SPI 802.15.4 wireless
+ controller.
+
+ This driver can also be built as a module. To do so, say M here.
+ the module will be called 'at86rf230'.
config IEEE802154_MRF24J40
- tristate "Microchip MRF24J40 transceiver driver"
- depends on IEEE802154_DRIVERS && MAC802154
- depends on SPI
- ---help---
- Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
- controller.
-
- This driver can also be built as a module. To do so, say M here.
- the module will be called 'mrf24j40'.
+ tristate "Microchip MRF24J40 transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154
+ depends on SPI
+ ---help---
+ Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
+ controller.
+
+ This driver can also be built as a module. To do so, say M here.
+ the module will be called 'mrf24j40'.
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index a30258aad139..430bb0db9bc4 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -31,13 +31,13 @@
#include <linux/spi/spi.h>
#include <linux/spi/at86rf230.h>
#include <linux/skbuff.h>
+#include <linux/of_gpio.h>
#include <net/mac802154.h>
#include <net/wpan-phy.h>
struct at86rf230_local {
struct spi_device *spi;
- int rstn, slp_tr, dig2;
u8 part;
u8 vers;
@@ -53,8 +53,16 @@ struct at86rf230_local {
spinlock_t lock;
bool irq_busy;
bool is_tx;
+ bool tx_aret;
+
+ int rssi_base_val;
};
+static bool is_rf212(struct at86rf230_local *local)
+{
+ return local->part == 7;
+}
+
#define RG_TRX_STATUS (0x01)
#define SR_TRX_STATUS 0x01, 0x1f, 0
#define SR_RESERVED_01_3 0x01, 0x20, 5
@@ -100,7 +108,10 @@ struct at86rf230_local {
#define SR_SFD_VALUE 0x0b, 0xff, 0
#define RG_TRX_CTRL_2 (0x0c)
#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
-#define SR_RESERVED_0c_2 0x0c, 0x7c, 2
+#define SR_SUB_MODE 0x0c, 0x04, 2
+#define SR_BPSK_QPSK 0x0c, 0x08, 3
+#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
+#define SR_RESERVED_0c_5 0x0c, 0x60, 5
#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
#define RG_ANT_DIV (0x0d)
#define SR_ANT_CTRL 0x0d, 0x03, 0
@@ -145,7 +156,7 @@ struct at86rf230_local {
#define SR_RESERVED_17_5 0x17, 0x08, 3
#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
-#define SR_RESERVED_17_2 0x17, 0x40, 6
+#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
#define SR_RESERVED_17_1 0x17, 0x80, 7
#define RG_FTN_CTRL (0x18)
#define SR_RESERVED_18_2 0x18, 0x7f, 0
@@ -234,6 +245,7 @@ struct at86rf230_local {
#define STATE_TX_ON 0x09
/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
#define STATE_SLEEP 0x0F
+#define STATE_PREP_DEEP_SLEEP 0x10
#define STATE_BUSY_RX_AACK 0x11
#define STATE_BUSY_TX_ARET 0x12
#define STATE_RX_AACK_ON 0x16
@@ -244,6 +256,57 @@ struct at86rf230_local {
#define STATE_TRANSITION_IN_PROGRESS 0x1F
static int
+__at86rf230_detect_device(struct spi_device *spi, u16 *man_id, u8 *part,
+ u8 *version)
+{
+ u8 data[4];
+ u8 *buf = kmalloc(2, GFP_KERNEL);
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+ u8 reg;
+
+ if (!buf)
+ return -ENOMEM;
+
+ for (reg = RG_PART_NUM; reg <= RG_MAN_ID_1; reg++) {
+ buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
+ buf[1] = 0xff;
+ dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ status = spi_sync(spi, &msg);
+ dev_vdbg(&spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&spi->dev, "status = %d\n", status);
+ dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ if (status == 0)
+ data[reg - RG_PART_NUM] = buf[1];
+ else
+ break;
+ }
+
+ if (status == 0) {
+ *part = data[0];
+ *version = data[1];
+ *man_id = (data[3] << 8) | data[2];
+ }
+
+ kfree(buf);
+
+ return status;
+}
+
+static int
__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
{
u8 *buf = lp->buf;
@@ -489,7 +552,9 @@ at86rf230_state(struct ieee802154_dev *dev, int state)
} while (val == STATE_TRANSITION_IN_PROGRESS);
- if (val == desired_status)
+ if (val == desired_status ||
+ (desired_status == STATE_RX_ON && val == STATE_BUSY_RX) ||
+ (desired_status == STATE_RX_AACK_ON && val == STATE_BUSY_RX_AACK))
return 0;
pr_err("unexpected state change: %d, asked for %d\n", val, state);
@@ -510,7 +575,11 @@ at86rf230_start(struct ieee802154_dev *dev)
if (rc)
return rc;
- return at86rf230_state(dev, STATE_RX_ON);
+ rc = at86rf230_state(dev, STATE_TX_ON);
+ if (rc)
+ return rc;
+
+ return at86rf230_state(dev, STATE_RX_AACK_ON);
}
static void
@@ -520,6 +589,39 @@ at86rf230_stop(struct ieee802154_dev *dev)
}
static int
+at86rf230_set_channel(struct at86rf230_local *lp, int page, int channel)
+{
+ lp->rssi_base_val = -91;
+
+ return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+}
+
+static int
+at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel)
+{
+ int rc;
+
+ if (channel == 0)
+ rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 0);
+ else
+ rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 1);
+ if (rc < 0)
+ return rc;
+
+ if (page == 0) {
+ rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 0);
+ lp->rssi_base_val = -100;
+ } else {
+ rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 1);
+ lp->rssi_base_val = -98;
+ }
+ if (rc < 0)
+ return rc;
+
+ return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+}
+
+static int
at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
{
struct at86rf230_local *lp = dev->priv;
@@ -527,14 +629,22 @@ at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
might_sleep();
- if (page != 0 || channel < 11 || channel > 26) {
+ if (page < 0 || page > 31 ||
+ !(lp->dev->phy->channels_supported[page] & BIT(channel))) {
WARN_ON(1);
return -EINVAL;
}
- rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+ if (is_rf212(lp))
+ rc = at86rf212_set_channel(lp, page, channel);
+ else
+ rc = at86rf230_set_channel(lp, page, channel);
+ if (rc < 0)
+ return rc;
+
msleep(1); /* Wait for PLL */
dev->phy->current_channel = channel;
+ dev->phy->current_page = page;
return 0;
}
@@ -568,6 +678,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
if (rc)
goto err_rx;
+ if (lp->tx_aret) {
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ARET_ON);
+ if (rc)
+ goto err_rx;
+ }
+
rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
if (rc)
goto err_rx;
@@ -630,30 +746,31 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
struct at86rf230_local *lp = dev->priv;
if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+ u16 addr = le16_to_cpu(filt->short_addr);
+
dev_vdbg(&lp->spi->dev,
"at86rf230_set_hw_addr_filt called for saddr\n");
- __at86rf230_write(lp, RG_SHORT_ADDR_0, filt->short_addr);
- __at86rf230_write(lp, RG_SHORT_ADDR_1, filt->short_addr >> 8);
+ __at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
+ __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
}
if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+ u16 pan = le16_to_cpu(filt->pan_id);
+
dev_vdbg(&lp->spi->dev,
"at86rf230_set_hw_addr_filt called for pan id\n");
- __at86rf230_write(lp, RG_PAN_ID_0, filt->pan_id);
- __at86rf230_write(lp, RG_PAN_ID_1, filt->pan_id >> 8);
+ __at86rf230_write(lp, RG_PAN_ID_0, pan);
+ __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
}
if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+ u8 i, addr[8];
+
+ memcpy(addr, &filt->ieee_addr, 8);
dev_vdbg(&lp->spi->dev,
"at86rf230_set_hw_addr_filt called for IEEE addr\n");
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_0, filt->ieee_addr[7]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_1, filt->ieee_addr[6]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_2, filt->ieee_addr[5]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_3, filt->ieee_addr[4]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_4, filt->ieee_addr[3]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_5, filt->ieee_addr[2]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_6, filt->ieee_addr[1]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_7, filt->ieee_addr[0]);
+ for (i = 0; i < 8; i++)
+ __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
}
if (changed & IEEE802515_AFILT_PANC_CHANGED) {
@@ -668,6 +785,93 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
return 0;
}
+static int
+at86rf212_set_txpower(struct ieee802154_dev *dev, int db)
+{
+ struct at86rf230_local *lp = dev->priv;
+
+ /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
+ * bits decrease power in 1dB steps. 0x60 represents extra PA gain of
+ * 0dB.
+ * thus, supported values for db range from -26 to 5, for 31dB of
+ * reduction to 0dB of reduction.
+ */
+ if (db > 5 || db < -26)
+ return -EINVAL;
+
+ db = -(db - 5);
+
+ return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db);
+}
+
+static int
+at86rf212_set_lbt(struct ieee802154_dev *dev, bool on)
+{
+ struct at86rf230_local *lp = dev->priv;
+
+ return at86rf230_write_subreg(lp, SR_CSMA_LBT_MODE, on);
+}
+
+static int
+at86rf212_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
+{
+ struct at86rf230_local *lp = dev->priv;
+
+ return at86rf230_write_subreg(lp, SR_CCA_MODE, mode);
+}
+
+static int
+at86rf212_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int desens_steps;
+
+ if (level < lp->rssi_base_val || level > 30)
+ return -EINVAL;
+
+ desens_steps = (level - lp->rssi_base_val) * 100 / 207;
+
+ return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, desens_steps);
+}
+
+static int
+at86rf212_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
+ u8 retries)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+
+ if (min_be > max_be || max_be > 8 || retries > 5)
+ return -EINVAL;
+
+ rc = at86rf230_write_subreg(lp, SR_MIN_BE, min_be);
+ if (rc)
+ return rc;
+
+ rc = at86rf230_write_subreg(lp, SR_MAX_BE, max_be);
+ if (rc)
+ return rc;
+
+ return at86rf230_write_subreg(lp, SR_MAX_CSMA_RETRIES, retries);
+}
+
+static int
+at86rf212_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc = 0;
+
+ if (retries < -1 || retries > 15)
+ return -EINVAL;
+
+ lp->tx_aret = retries >= 0;
+
+ if (retries >= 0)
+ rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
+
+ return rc;
+}
+
static struct ieee802154_ops at86rf230_ops = {
.owner = THIS_MODULE,
.xmit = at86rf230_xmit,
@@ -678,6 +882,22 @@ static struct ieee802154_ops at86rf230_ops = {
.set_hw_addr_filt = at86rf230_set_hw_addr_filt,
};
+static struct ieee802154_ops at86rf212_ops = {
+ .owner = THIS_MODULE,
+ .xmit = at86rf230_xmit,
+ .ed = at86rf230_ed,
+ .set_channel = at86rf230_channel,
+ .start = at86rf230_start,
+ .stop = at86rf230_stop,
+ .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
+ .set_txpower = at86rf212_set_txpower,
+ .set_lbt = at86rf212_set_lbt,
+ .set_cca_mode = at86rf212_set_cca_mode,
+ .set_cca_ed_level = at86rf212_set_cca_ed_level,
+ .set_csma_params = at86rf212_set_csma_params,
+ .set_frame_retries = at86rf212_set_frame_retries,
+};
+
static void at86rf230_irqwork(struct work_struct *work)
{
struct at86rf230_local *lp =
@@ -695,8 +915,8 @@ static void at86rf230_irqwork(struct work_struct *work)
status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
if (status & IRQ_TRX_END) {
- spin_lock_irqsave(&lp->lock, flags);
status &= ~IRQ_TRX_END;
+ spin_lock_irqsave(&lp->lock, flags);
if (lp->is_tx) {
lp->is_tx = 0;
spin_unlock_irqrestore(&lp->lock, flags);
@@ -753,22 +973,15 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data;
int rc, irq_pol;
u8 status;
+ u8 csma_seed[2];
rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
if (rc)
return rc;
- dev_info(&lp->spi->dev, "Status: %02x\n", status);
- if (status == STATE_P_ON) {
- rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
- if (rc)
- return rc;
- msleep(1);
- rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
- if (rc)
- return rc;
- dev_info(&lp->spi->dev, "Status: %02x\n", status);
- }
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
+ if (rc)
+ return rc;
/* configure irq polarity, defaults to high active */
if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
@@ -784,6 +997,14 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
if (rc)
return rc;
+ get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed));
+ rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]);
+ if (rc)
+ return rc;
+ rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_1, csma_seed[1]);
+ if (rc)
+ return rc;
+
/* CLKM changes are applied immediately */
rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
if (rc)
@@ -796,16 +1017,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
/* Wait the next SLEEP cycle */
msleep(100);
- rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
- if (rc)
- return rc;
- msleep(1);
-
- rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
- if (rc)
- return rc;
- dev_info(&lp->spi->dev, "Status: %02x\n", status);
-
rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
if (rc)
return rc;
@@ -825,14 +1036,38 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return 0;
}
-static void at86rf230_fill_data(struct spi_device *spi)
+static struct at86rf230_platform_data *
+at86rf230_get_pdata(struct spi_device *spi)
{
- struct at86rf230_local *lp = spi_get_drvdata(spi);
- struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+ struct at86rf230_platform_data *pdata;
+ const char *irq_type;
+
+ if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
+ return spi->dev.platform_data;
+
+ pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ goto done;
+
+ pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
+ pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
+
+ pdata->irq_type = IRQF_TRIGGER_RISING;
+ of_property_read_string(spi->dev.of_node, "irq-type", &irq_type);
+ if (!strcmp(irq_type, "level-high"))
+ pdata->irq_type = IRQF_TRIGGER_HIGH;
+ else if (!strcmp(irq_type, "level-low"))
+ pdata->irq_type = IRQF_TRIGGER_LOW;
+ else if (!strcmp(irq_type, "edge-rising"))
+ pdata->irq_type = IRQF_TRIGGER_RISING;
+ else if (!strcmp(irq_type, "edge-falling"))
+ pdata->irq_type = IRQF_TRIGGER_FALLING;
+ else
+ dev_warn(&spi->dev, "wrong irq-type specified using edge-rising\n");
- lp->rstn = pdata->rstn;
- lp->slp_tr = pdata->slp_tr;
- lp->dig2 = pdata->dig2;
+ spi->dev.platform_data = pdata;
+done:
+ return pdata;
}
static int at86rf230_probe(struct spi_device *spi)
@@ -840,133 +1075,146 @@ static int at86rf230_probe(struct spi_device *spi)
struct at86rf230_platform_data *pdata;
struct ieee802154_dev *dev;
struct at86rf230_local *lp;
- u8 man_id_0, man_id_1, status;
+ u16 man_id = 0;
+ u8 part = 0, version = 0, status;
irq_handler_t irq_handler;
work_func_t irq_worker;
- int rc, supported = 0;
+ int rc;
const char *chip;
+ struct ieee802154_ops *ops = NULL;
if (!spi->irq) {
dev_err(&spi->dev, "no IRQ specified\n");
return -EINVAL;
}
- pdata = spi->dev.platform_data;
+ pdata = at86rf230_get_pdata(spi);
if (!pdata) {
dev_err(&spi->dev, "no platform_data\n");
return -EINVAL;
}
- dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
- if (!dev)
- return -ENOMEM;
-
- lp = dev->priv;
- lp->dev = dev;
-
- lp->spi = spi;
-
- dev->parent = &spi->dev;
- dev->extra_tx_headroom = 0;
- /* We do support only 2.4 Ghz */
- dev->phy->channels_supported[0] = 0x7FFF800;
- dev->flags = IEEE802154_HW_OMIT_CKSUM;
-
- if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- irq_worker = at86rf230_irqwork;
- irq_handler = at86rf230_isr;
- } else {
- irq_worker = at86rf230_irqwork_level;
- irq_handler = at86rf230_isr_level;
+ if (gpio_is_valid(pdata->rstn)) {
+ rc = gpio_request(pdata->rstn, "rstn");
+ if (rc)
+ return rc;
}
- mutex_init(&lp->bmux);
- INIT_WORK(&lp->irqwork, irq_worker);
- spin_lock_init(&lp->lock);
- init_completion(&lp->tx_complete);
-
- spi_set_drvdata(spi, lp);
-
- at86rf230_fill_data(spi);
-
- rc = gpio_request(lp->rstn, "rstn");
- if (rc)
- goto err_rstn;
-
- if (gpio_is_valid(lp->slp_tr)) {
- rc = gpio_request(lp->slp_tr, "slp_tr");
+ if (gpio_is_valid(pdata->slp_tr)) {
+ rc = gpio_request(pdata->slp_tr, "slp_tr");
if (rc)
goto err_slp_tr;
}
- rc = gpio_direction_output(lp->rstn, 1);
- if (rc)
- goto err_gpio_dir;
+ if (gpio_is_valid(pdata->rstn)) {
+ rc = gpio_direction_output(pdata->rstn, 1);
+ if (rc)
+ goto err_gpio_dir;
+ }
- if (gpio_is_valid(lp->slp_tr)) {
- rc = gpio_direction_output(lp->slp_tr, 0);
+ if (gpio_is_valid(pdata->slp_tr)) {
+ rc = gpio_direction_output(pdata->slp_tr, 0);
if (rc)
goto err_gpio_dir;
}
/* Reset */
- msleep(1);
- gpio_set_value(lp->rstn, 0);
- msleep(1);
- gpio_set_value(lp->rstn, 1);
- msleep(1);
+ if (gpio_is_valid(pdata->rstn)) {
+ udelay(1);
+ gpio_set_value(pdata->rstn, 0);
+ udelay(1);
+ gpio_set_value(pdata->rstn, 1);
+ usleep_range(120, 240);
+ }
- rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
- if (rc)
- goto err_gpio_dir;
- rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
- if (rc)
+ rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
+ if (rc < 0)
goto err_gpio_dir;
- if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
+ if (man_id != 0x001f) {
dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
- man_id_1, man_id_0);
+ man_id >> 8, man_id & 0xFF);
rc = -EINVAL;
goto err_gpio_dir;
}
- rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
- if (rc)
- goto err_gpio_dir;
-
- rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
- if (rc)
- goto err_gpio_dir;
-
- switch (lp->part) {
+ switch (part) {
case 2:
chip = "at86rf230";
- /* supported = 1; FIXME: should be easy to support; */
+ /* FIXME: should be easy to support; */
break;
case 3:
chip = "at86rf231";
- supported = 1;
+ ops = &at86rf230_ops;
+ break;
+ case 7:
+ chip = "at86rf212";
+ if (version == 1)
+ ops = &at86rf212_ops;
+ break;
+ case 11:
+ chip = "at86rf233";
+ ops = &at86rf230_ops;
break;
default:
chip = "UNKNOWN";
break;
}
- dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
- if (!supported) {
+ dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
+ if (!ops) {
rc = -ENOTSUPP;
goto err_gpio_dir;
}
+ dev = ieee802154_alloc_device(sizeof(*lp), ops);
+ if (!dev) {
+ rc = -ENOMEM;
+ goto err_gpio_dir;
+ }
+
+ lp = dev->priv;
+ lp->dev = dev;
+ lp->part = part;
+ lp->vers = version;
+
+ lp->spi = spi;
+
+ dev->parent = &spi->dev;
+ dev->extra_tx_headroom = 0;
+ dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
+
+ if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ irq_worker = at86rf230_irqwork;
+ irq_handler = at86rf230_isr;
+ } else {
+ irq_worker = at86rf230_irqwork_level;
+ irq_handler = at86rf230_isr_level;
+ }
+
+ mutex_init(&lp->bmux);
+ INIT_WORK(&lp->irqwork, irq_worker);
+ spin_lock_init(&lp->lock);
+ init_completion(&lp->tx_complete);
+
+ spi_set_drvdata(spi, lp);
+
+ if (is_rf212(lp)) {
+ dev->phy->channels_supported[0] = 0x00007FF;
+ dev->phy->channels_supported[2] = 0x00007FF;
+ } else {
+ dev->phy->channels_supported[0] = 0x7FFF800;
+ }
+
rc = at86rf230_hw_init(lp);
if (rc)
- goto err_gpio_dir;
+ goto err_hw_init;
rc = request_irq(spi->irq, irq_handler,
IRQF_SHARED | pdata->irq_type,
dev_name(&spi->dev), lp);
if (rc)
- goto err_gpio_dir;
+ goto err_hw_init;
/* Read irq status register to reset irq line */
rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
@@ -981,30 +1229,37 @@ static int at86rf230_probe(struct spi_device *spi)
err_irq:
free_irq(spi->irq, lp);
+err_hw_init:
flush_work(&lp->irqwork);
-err_gpio_dir:
- if (gpio_is_valid(lp->slp_tr))
- gpio_free(lp->slp_tr);
-err_slp_tr:
- gpio_free(lp->rstn);
-err_rstn:
+ spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
+
+err_gpio_dir:
+ if (gpio_is_valid(pdata->slp_tr))
+ gpio_free(pdata->slp_tr);
+err_slp_tr:
+ if (gpio_is_valid(pdata->rstn))
+ gpio_free(pdata->rstn);
return rc;
}
static int at86rf230_remove(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
+ struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+ /* mask all at86rf230 irq's */
+ at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
ieee802154_unregister_device(lp->dev);
free_irq(spi->irq, lp);
flush_work(&lp->irqwork);
- if (gpio_is_valid(lp->slp_tr))
- gpio_free(lp->slp_tr);
- gpio_free(lp->rstn);
+ if (gpio_is_valid(pdata->slp_tr))
+ gpio_free(pdata->slp_tr);
+ if (gpio_is_valid(pdata->rstn))
+ gpio_free(pdata->rstn);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
@@ -1013,8 +1268,19 @@ static int at86rf230_remove(struct spi_device *spi)
return 0;
}
+#if IS_ENABLED(CONFIG_OF)
+static struct of_device_id at86rf230_of_match[] = {
+ { .compatible = "atmel,at86rf230", },
+ { .compatible = "atmel,at86rf231", },
+ { .compatible = "atmel,at86rf233", },
+ { .compatible = "atmel,at86rf212", },
+ { },
+};
+#endif
+
static struct spi_driver at86rf230_driver = {
.driver = {
+ .of_match_table = of_match_ptr(at86rf230_of_match),
.name = "at86rf230",
.owner = THIS_MODULE,
},
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index bf0d55e2dd63..78f18be3bbf2 100644
--- a/drivers/net/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -63,11 +63,11 @@ static struct wpan_phy *fake_get_phy(const struct net_device *dev)
*
* Return the ID of the PAN from the PIB.
*/
-static u16 fake_get_pan_id(const struct net_device *dev)
+static __le16 fake_get_pan_id(const struct net_device *dev)
{
BUG_ON(dev->type != ARPHRD_IEEE802154);
- return 0xeba1;
+ return cpu_to_le16(0xeba1);
}
/**
@@ -78,11 +78,11 @@ static u16 fake_get_pan_id(const struct net_device *dev)
* device. If the device has not yet had a short address assigned
* then this should return 0xFFFF to indicate a lack of association.
*/
-static u16 fake_get_short_addr(const struct net_device *dev)
+static __le16 fake_get_short_addr(const struct net_device *dev)
{
BUG_ON(dev->type != ARPHRD_IEEE802154);
- return 0x1;
+ return cpu_to_le16(0x1);
}
/**
@@ -149,7 +149,7 @@ static int fake_assoc_req(struct net_device *dev,
* 802.15.4-2006 document.
*/
static int fake_assoc_resp(struct net_device *dev,
- struct ieee802154_addr *addr, u16 short_addr, u8 status)
+ struct ieee802154_addr *addr, __le16 short_addr, u8 status)
{
return 0;
}
@@ -191,10 +191,10 @@ static int fake_disassoc_req(struct net_device *dev,
* Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006
* document, with 7.3.8 describing coordinator realignment.
*/
-static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr,
- u8 channel, u8 page,
- u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
- u8 coord_realign)
+static int fake_start_req(struct net_device *dev,
+ struct ieee802154_addr *addr, u8 channel, u8 page,
+ u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
+ u8 coord_realign)
{
struct wpan_phy *phy = fake_to_phy(dev);
@@ -281,8 +281,8 @@ static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr,
switch (cmd) {
case SIOCGIFADDR:
/* FIXME: fixed here, get from device IRL */
- pan_id = fake_get_pan_id(dev);
- short_addr = fake_get_short_addr(dev);
+ pan_id = le16_to_cpu(fake_get_pan_id(dev));
+ short_addr = le16_to_cpu(fake_get_short_addr(dev));
if (pan_id == IEEE802154_PANID_BROADCAST ||
short_addr == IEEE802154_ADDR_BROADCAST)
return -EADDRNOTAVAIL;
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 246befa4ba05..78a6552ed707 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -465,8 +465,8 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
/* Short Addr */
u8 addrh, addrl;
- addrh = filt->short_addr >> 8 & 0xff;
- addrl = filt->short_addr & 0xff;
+ addrh = le16_to_cpu(filt->short_addr) >> 8 & 0xff;
+ addrl = le16_to_cpu(filt->short_addr) & 0xff;
write_short_reg(devrec, REG_SADRH, addrh);
write_short_reg(devrec, REG_SADRL, addrl);
@@ -476,15 +476,16 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
/* Device Address */
- int i;
+ u8 i, addr[8];
+
+ memcpy(addr, &filt->ieee_addr, 8);
for (i = 0; i < 8; i++)
- write_short_reg(devrec, REG_EADR0+i,
- filt->ieee_addr[7-i]);
+ write_short_reg(devrec, REG_EADR0 + i, addr[i]);
#ifdef DEBUG
printk(KERN_DEBUG "Set long addr to: ");
for (i = 0; i < 8; i++)
- printk("%02hhx ", filt->ieee_addr[i]);
+ printk("%02hhx ", addr[7 - i]);
printk(KERN_DEBUG "\n");
#endif
}
@@ -492,8 +493,8 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
if (changed & IEEE802515_AFILT_PANID_CHANGED) {
/* PAN ID */
u8 panidl, panidh;
- panidh = filt->pan_id >> 8 & 0xff;
- panidl = filt->pan_id & 0xff;
+ panidh = le16_to_cpu(filt->pan_id) >> 8 & 0xff;
+ panidl = le16_to_cpu(filt->pan_id) & 0xff;
write_short_reg(devrec, REG_PANIDH, panidh);
write_short_reg(devrec, REG_PANIDL, panidl);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index d7b2e947184b..46a7790be004 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -136,18 +136,18 @@ static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_bh(&dp->rsync);
+ start = u64_stats_fetch_begin_irq(&dp->rsync);
stats->rx_packets = dp->rx_packets;
stats->rx_bytes = dp->rx_bytes;
- } while (u64_stats_fetch_retry_bh(&dp->rsync, start));
+ } while (u64_stats_fetch_retry_irq(&dp->rsync, start));
do {
- start = u64_stats_fetch_begin_bh(&dp->tsync);
+ start = u64_stats_fetch_begin_irq(&dp->tsync);
stats->tx_packets = dp->tx_packets;
stats->tx_bytes = dp->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&dp->tsync, start));
+ } while (u64_stats_fetch_retry_irq(&dp->tsync, start));
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index c5011e078e1b..bb96409f8c05 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -111,10 +111,10 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
lb_stats = per_cpu_ptr(dev->lstats, i);
do {
- start = u64_stats_fetch_begin_bh(&lb_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
tbytes = lb_stats->bytes;
tpackets = lb_stats->packets;
- } while (u64_stats_fetch_retry_bh(&lb_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
bytes += tbytes;
packets += tpackets;
}
@@ -136,16 +136,9 @@ static const struct ethtool_ops loopback_ethtool_ops = {
static int loopback_dev_init(struct net_device *dev)
{
- int i;
- dev->lstats = alloc_percpu(struct pcpu_lstats);
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
if (!dev->lstats)
return -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct pcpu_lstats *lb_stats;
- lb_stats = per_cpu_ptr(dev->lstats, i);
- u64_stats_init(&lb_stats->syncp);
- }
return 0;
}
@@ -160,6 +153,7 @@ static const struct net_device_ops loopback_ops = {
.ndo_init = loopback_dev_init,
.ndo_start_xmit= loopback_xmit,
.ndo_get_stats64 = loopback_get_stats64,
+ .ndo_set_mac_address = eth_mac_addr,
};
/*
@@ -174,6 +168,7 @@ static void loopback_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
@@ -181,6 +176,7 @@ static void loopback_setup(struct net_device *dev)
| NETIF_F_UFO
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
+ | NETIF_F_SCTP_CSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
| NETIF_F_NETNS_LOCAL
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 1831fb7cd017..753a8c23d15d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -537,7 +537,6 @@ static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
const struct net_device *lowerdev = vlan->lowerdev;
- int i;
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
@@ -549,16 +548,10 @@ static int macvlan_init(struct net_device *dev)
macvlan_set_lockdep_class(dev);
- vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+ vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct vlan_pcpu_stats *mvlstats;
- mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
- u64_stats_init(&mvlstats->syncp);
- }
-
return 0;
}
@@ -589,13 +582,13 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
for_each_possible_cpu(i) {
p = per_cpu_ptr(vlan->pcpu_stats, i);
do {
- start = u64_stats_fetch_begin_bh(&p->syncp);
+ start = u64_stats_fetch_begin_irq(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
rx_multicast = p->rx_multicast;
tx_packets = p->tx_packets;
tx_bytes = p->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index d2bb12bfabd5..34924dfadd00 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -47,16 +47,7 @@ static int nlmon_change_mtu(struct net_device *dev, int new_mtu)
static int nlmon_dev_init(struct net_device *dev)
{
- int i;
-
- dev->lstats = alloc_percpu(struct pcpu_lstats);
-
- for_each_possible_cpu(i) {
- struct pcpu_lstats *nlmstats;
- nlmstats = per_cpu_ptr(dev->lstats, i);
- u64_stats_init(&nlmstats->syncp);
- }
-
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
return dev->lstats == NULL ? -ENOMEM : 0;
}
@@ -99,10 +90,10 @@ nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
nl_stats = per_cpu_ptr(dev->lstats, i);
do {
- start = u64_stats_fetch_begin_bh(&nl_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&nl_stats->syncp);
tbytes = nl_stats->bytes;
tpackets = nl_stats->packets;
- } while (u64_stats_fetch_retry_bh(&nl_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start));
packets += tpackets;
bytes += tbytes;
@@ -145,7 +136,8 @@ static void nlmon_setup(struct net_device *dev)
dev->ethtool_ops = &nlmon_ethtool_ops;
dev->destructor = free_netdev;
- dev->features = NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
+ NETIF_F_HIGHDMA | NETIF_F_LLTX;
dev->flags = IFF_NOARP;
/* That's rather a softlimit here, which, of course,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9b5d46c03eed..6a17f92153b3 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -71,6 +71,12 @@ config BCM63XX_PHY
---help---
Currently supports the 6348 and 6358 PHYs.
+config BCM7XXX_PHY
+ tristate "Drivers for Broadcom 7xxx SOCs internal PHYs"
+ ---help---
+ Currently supports the BCM7366, BCM7439, BCM7445, and
+ 40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
+
config BCM87XX_PHY
tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 9013dfa12aa3..07d24024863e 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o
obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index bc71947b1ec3..643464d5a727 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -27,6 +27,9 @@
#define AT803X_MMD_ACCESS_CONTROL 0x0D
#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
#define AT803X_FUNC_DATA 0x4003
+#define AT803X_INER 0x0012
+#define AT803X_INER_INIT 0xec00
+#define AT803X_INSR 0x0013
#define AT803X_DEBUG_ADDR 0x1D
#define AT803X_DEBUG_DATA 0x1E
#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05
@@ -191,6 +194,31 @@ static int at803x_config_init(struct phy_device *phydev)
return 0;
}
+static int at803x_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, AT803X_INSR);
+
+ return (err < 0) ? err : 0;
+}
+
+static int at803x_config_intr(struct phy_device *phydev)
+{
+ int err;
+ int value;
+
+ value = phy_read(phydev, AT803X_INER);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, AT803X_INER,
+ value | AT803X_INER_INIT);
+ else
+ err = phy_write(phydev, AT803X_INER, 0);
+
+ return err;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* ATHEROS 8035 */
@@ -240,6 +268,8 @@ static struct phy_driver at803x_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .ack_interrupt = &at803x_ack_interrupt,
+ .config_intr = &at803x_config_intr,
.driver = {
.owner = THIS_MODULE,
},
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
new file mode 100644
index 000000000000..526b94cea569
--- /dev/null
+++ b/drivers/net/phy/bcm7xxx.c
@@ -0,0 +1,359 @@
+/*
+ * Broadcom BCM7xxx internal transceivers support.
+ *
+ * Copyright (C) 2014, Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/brcmphy.h>
+
+/* Broadcom BCM7xxx internal PHY registers */
+#define MII_BCM7XXX_CHANNEL_WIDTH 0x2000
+
+/* 40nm only register definitions */
+#define MII_BCM7XXX_100TX_AUX_CTL 0x10
+#define MII_BCM7XXX_100TX_FALSE_CAR 0x13
+#define MII_BCM7XXX_100TX_DISC 0x14
+#define MII_BCM7XXX_AUX_MODE 0x1d
+#define MII_BCM7XX_64CLK_MDIO BIT(12)
+#define MII_BCM7XXX_CORE_BASE1E 0x1e
+#define MII_BCM7XXX_TEST 0x1f
+#define MII_BCM7XXX_SHD_MODE_2 BIT(2)
+
+/* 28nm only register definitions */
+#define MISC_ADDR(base, channel) base, channel
+
+#define DSP_TAP10 MISC_ADDR(0x0a, 0)
+#define PLL_PLLCTRL_1 MISC_ADDR(0x32, 1)
+#define PLL_PLLCTRL_2 MISC_ADDR(0x32, 2)
+#define PLL_PLLCTRL_4 MISC_ADDR(0x33, 0)
+
+#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0)
+#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1)
+#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3)
+#define AFE_TX_CONFIG MISC_ADDR(0x39, 0)
+#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
+
+#define CORE_EXPB0 0xb0
+
+static int bcm7445_config_init(struct phy_device *phydev)
+{
+ int ret;
+ const struct bcm7445_regs {
+ int reg;
+ u16 value;
+ } bcm7445_regs_cfg[] = {
+ /* increases ADC latency by 24ns */
+ { MII_BCM54XX_EXP_SEL, 0x0038 },
+ { MII_BCM54XX_EXP_DATA, 0xAB95 },
+ /* increases internal 1V LDO voltage by 5% */
+ { MII_BCM54XX_EXP_SEL, 0x2038 },
+ { MII_BCM54XX_EXP_DATA, 0xBB22 },
+ /* reduce RX low pass filter corner frequency */
+ { MII_BCM54XX_EXP_SEL, 0x6038 },
+ { MII_BCM54XX_EXP_DATA, 0xFFC5 },
+ /* reduce RX high pass filter corner frequency */
+ { MII_BCM54XX_EXP_SEL, 0x003a },
+ { MII_BCM54XX_EXP_DATA, 0x2002 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm7445_regs_cfg); i++) {
+ ret = phy_write(phydev,
+ bcm7445_regs_cfg[i].reg,
+ bcm7445_regs_cfg[i].value);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void phy_write_exp(struct phy_device *phydev,
+ u16 reg, u16 value)
+{
+ phy_write(phydev, MII_BCM54XX_EXP_SEL, MII_BCM54XX_EXP_SEL_ER | reg);
+ phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
+}
+
+static void phy_write_misc(struct phy_device *phydev,
+ u16 reg, u16 chl, u16 value)
+{
+ int tmp;
+
+ phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+
+ tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
+ tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
+ phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
+
+ tmp = (chl * MII_BCM7XXX_CHANNEL_WIDTH) | reg;
+ phy_write(phydev, MII_BCM54XX_EXP_SEL, tmp);
+
+ phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
+}
+
+static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
+{
+ /* Increase VCO range to prevent unlocking problem of PLL at low
+ * temp
+ */
+ phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
+
+ /* Change Ki to 011 */
+ phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
+
+ /* Disable loading of TVCO buffer to bandgap, set bandgap trim
+ * to 111
+ */
+ phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
+
+ /* Adjust bias current trim by -3 */
+ phy_write_misc(phydev, DSP_TAP10, 0x690b);
+
+ /* Switch to CORE_BASE1E */
+ phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0xd);
+
+ /* Reset R_CAL/RC_CAL Engine */
+ phy_write_exp(phydev, CORE_EXPB0, 0x0010);
+
+ /* Disable Reset R_CAL/RC_CAL Engine */
+ phy_write_exp(phydev, CORE_EXPB0, 0x0000);
+
+ /* write AFE_RXCONFIG_0 */
+ phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
+
+ /* write AFE_RXCONFIG_1 */
+ phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
+
+ /* write AFE_RX_LP_COUNTER */
+ phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
+
+ /* write AFE_HPF_TRIM_OTHERS */
+ phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
+
+ /* write AFTE_TX_CONFIG */
+ phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
+
+ return 0;
+}
+
+static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = bcm7445_config_init(phydev);
+ if (ret)
+ return ret;
+
+ return bcm7xxx_28nm_afe_config_init(phydev);
+}
+
+static int phy_set_clr_bits(struct phy_device *dev, int location,
+ int set_mask, int clr_mask)
+{
+ int v, ret;
+
+ v = phy_read(dev, location);
+ if (v < 0)
+ return v;
+
+ v &= ~clr_mask;
+ v |= set_mask;
+
+ ret = phy_write(dev, location, v);
+ if (ret < 0)
+ return ret;
+
+ return v;
+}
+
+static int bcm7xxx_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable 64 clock MDIO */
+ phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
+ phy_read(phydev, MII_BCM7XXX_AUX_MODE);
+
+ /* Workaround only required for 100Mbits/sec */
+ if (!(phydev->dev_flags & PHY_BRCM_100MBPS_WAR))
+ return 0;
+
+ /* set shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2);
+ if (ret < 0)
+ return ret;
+
+ /* set iddq_clkbias */
+ phy_write(phydev, MII_BCM7XXX_100TX_DISC, 0x0F00);
+ udelay(10);
+
+ /* reset iddq_clkbias */
+ phy_write(phydev, MII_BCM7XXX_100TX_DISC, 0x0C00);
+
+ phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555);
+
+ /* reset shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* Workaround for putting the PHY in IDDQ mode, required
+ * for all BCM7XXX PHYs
+ */
+static int bcm7xxx_suspend(struct phy_device *phydev)
+{
+ int ret;
+ const struct bcm7xxx_regs {
+ int reg;
+ u16 value;
+ } bcm7xxx_suspend_cfg[] = {
+ { MII_BCM7XXX_TEST, 0x008b },
+ { MII_BCM7XXX_100TX_AUX_CTL, 0x01c0 },
+ { MII_BCM7XXX_100TX_DISC, 0x7000 },
+ { MII_BCM7XXX_TEST, 0x000f },
+ { MII_BCM7XXX_100TX_AUX_CTL, 0x20d0 },
+ { MII_BCM7XXX_TEST, 0x000b },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm7xxx_suspend_cfg); i++) {
+ ret = phy_write(phydev,
+ bcm7xxx_suspend_cfg[i].reg,
+ bcm7xxx_suspend_cfg[i].value);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static struct phy_driver bcm7xxx_driver[] = {
+{
+ .phy_id = PHY_ID_BCM7366,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM7366",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_afe_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_afe_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_ID_BCM7439,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM7439",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_afe_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_afe_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_ID_BCM7445,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM7445",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .name = "Broadcom BCM7XXX 28nm",
+ .phy_id = PHY_ID_BCM7XXX_28,
+ .phy_id_mask = PHY_BCM_OUI_MASK,
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_BCM_OUI_4,
+ .phy_id_mask = 0xffff0000,
+ .name = "Broadcom BCM7XXX 40nm",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_BCM_OUI_5,
+ .phy_id_mask = 0xffffff00,
+ .name = "Broadcom BCM7XXX 65nm",
+ .features = PHY_BASIC_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_dummy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_config_init,
+ .driver = { .owner = THIS_MODULE },
+} };
+
+static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
+ { PHY_ID_BCM7366, 0xfffffff0, },
+ { PHY_ID_BCM7439, 0xfffffff0, },
+ { PHY_ID_BCM7445, 0xfffffff0, },
+ { PHY_ID_BCM7XXX_28, 0xfffffc00 },
+ { PHY_BCM_OUI_4, 0xffff0000 },
+ { PHY_BCM_OUI_5, 0xffffff00 },
+ { }
+};
+
+static int __init bcm7xxx_phy_init(void)
+{
+ return phy_drivers_register(bcm7xxx_driver,
+ ARRAY_SIZE(bcm7xxx_driver));
+}
+
+static void __exit bcm7xxx_phy_exit(void)
+{
+ phy_drivers_unregister(bcm7xxx_driver,
+ ARRAY_SIZE(bcm7xxx_driver));
+}
+
+module_init(bcm7xxx_phy_init);
+module_exit(bcm7xxx_phy_exit);
+
+MODULE_DEVICE_TABLE(mdio, bcm7xxx_tbl);
+
+MODULE_DESCRIPTION("Broadcom BCM7xxx internal PHY driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f8c90ea75108..34088d60da74 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -25,58 +25,6 @@
#define BRCM_PHY_REV(phydev) \
((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
-
-#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
-#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
-#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
-
-#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
-#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
-
-#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
-#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
-#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
-#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
-
-#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
-#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
-#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */
-#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */
-#define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */
-#define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */
-#define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */
-#define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */
-#define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */
-#define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */
-#define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */
-#define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */
-#define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */
-#define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */
-#define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */
-#define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */
-#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */
-#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */
-
-#define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */
-#define MII_BCM54XX_SHD_WRITE 0x8000
-#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
-#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
-
-/*
- * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
- */
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
-#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
-#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
-
-#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
-#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
-#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
-
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
-
-
/*
* Broadcom LED source encodings. These are used in BCM5461, BCM5481,
* BCM5482, and possibly some others.
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 98e7cbf720a5..6a999e6814a0 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
#include <linux/phy.h>
#include <linux/ptp_classify.h>
#include <linux/ptp_clock_kernel.h>
@@ -47,6 +48,7 @@
#define CAL_EVENT 7
#define CAL_TRIGGER 7
#define PER_TRIGGER 6
+#define DP83640_N_PINS 12
#define MII_DP83640_MICR 0x11
#define MII_DP83640_MISR 0x12
@@ -173,6 +175,37 @@ MODULE_PARM_DESC(chosen_phy, \
MODULE_PARM_DESC(gpio_tab, \
"Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
+static void dp83640_gpio_defaults(struct ptp_pin_desc *pd)
+{
+ int i, index;
+
+ for (i = 0; i < DP83640_N_PINS; i++) {
+ snprintf(pd[i].name, sizeof(pd[i].name), "GPIO%d", 1 + i);
+ pd[i].index = i;
+ }
+
+ for (i = 0; i < GPIO_TABLE_SIZE; i++) {
+ if (gpio_tab[i] < 1 || gpio_tab[i] > DP83640_N_PINS) {
+ pr_err("gpio_tab[%d]=%hu out of range", i, gpio_tab[i]);
+ return;
+ }
+ }
+
+ index = gpio_tab[CALIBRATE_GPIO] - 1;
+ pd[index].func = PTP_PF_PHYSYNC;
+ pd[index].chan = 0;
+
+ index = gpio_tab[PEROUT_GPIO] - 1;
+ pd[index].func = PTP_PF_PEROUT;
+ pd[index].chan = 0;
+
+ for (i = EXTTS0_GPIO; i < GPIO_TABLE_SIZE; i++) {
+ index = gpio_tab[i] - 1;
+ pd[index].func = PTP_PF_EXTTS;
+ pd[index].chan = i - EXTTS0_GPIO;
+ }
+}
+
/* a list of clocks and a mutex to protect it */
static LIST_HEAD(phyter_clocks);
static DEFINE_MUTEX(phyter_clocks_lock);
@@ -266,15 +299,22 @@ static u64 phy2txts(struct phy_txts *p)
return ns;
}
-static void periodic_output(struct dp83640_clock *clock,
- struct ptp_clock_request *clkreq, bool on)
+static int periodic_output(struct dp83640_clock *clock,
+ struct ptp_clock_request *clkreq, bool on)
{
struct dp83640_private *dp83640 = clock->chosen;
struct phy_device *phydev = dp83640->phydev;
- u32 sec, nsec, period;
+ u32 sec, nsec, pwidth;
u16 gpio, ptp_trig, trigger, val;
- gpio = on ? gpio_tab[PEROUT_GPIO] : 0;
+ if (on) {
+ gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT, 0);
+ if (gpio < 1)
+ return -EINVAL;
+ } else {
+ gpio = 0;
+ }
+
trigger = PER_TRIGGER;
ptp_trig = TRIG_WR |
@@ -291,13 +331,14 @@ static void periodic_output(struct dp83640_clock *clock,
ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig);
ext_write(0, phydev, PAGE4, PTP_CTL, val);
mutex_unlock(&clock->extreg_lock);
- return;
+ return 0;
}
sec = clkreq->perout.start.sec;
nsec = clkreq->perout.start.nsec;
- period = clkreq->perout.period.sec * 1000000000UL;
- period += clkreq->perout.period.nsec;
+ pwidth = clkreq->perout.period.sec * 1000000000UL;
+ pwidth += clkreq->perout.period.nsec;
+ pwidth /= 2;
mutex_lock(&clock->extreg_lock);
@@ -310,8 +351,8 @@ static void periodic_output(struct dp83640_clock *clock,
ext_write(0, phydev, PAGE4, PTP_TDR, nsec >> 16); /* ns[31:16] */
ext_write(0, phydev, PAGE4, PTP_TDR, sec & 0xffff); /* sec[15:0] */
ext_write(0, phydev, PAGE4, PTP_TDR, sec >> 16); /* sec[31:16] */
- ext_write(0, phydev, PAGE4, PTP_TDR, period & 0xffff); /* ns[15:0] */
- ext_write(0, phydev, PAGE4, PTP_TDR, period >> 16); /* ns[31:16] */
+ ext_write(0, phydev, PAGE4, PTP_TDR, pwidth & 0xffff); /* ns[15:0] */
+ ext_write(0, phydev, PAGE4, PTP_TDR, pwidth >> 16); /* ns[31:16] */
/*enable trigger*/
val &= ~TRIG_LOAD;
@@ -319,6 +360,7 @@ static void periodic_output(struct dp83640_clock *clock,
ext_write(0, phydev, PAGE4, PTP_CTL, val);
mutex_unlock(&clock->extreg_lock);
+ return 0;
}
/* ptp clock methods */
@@ -424,18 +466,21 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
struct dp83640_clock *clock =
container_of(ptp, struct dp83640_clock, caps);
struct phy_device *phydev = clock->chosen->phydev;
- int index;
+ unsigned int index;
u16 evnt, event_num, gpio_num;
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
index = rq->extts.index;
- if (index < 0 || index >= N_EXT_TS)
+ if (index >= N_EXT_TS)
return -EINVAL;
event_num = EXT_EVENT + index;
evnt = EVNT_WR | (event_num & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
if (on) {
- gpio_num = gpio_tab[EXTTS0_GPIO + index];
+ gpio_num = 1 + ptp_find_pin(clock->ptp_clock,
+ PTP_PF_EXTTS, index);
+ if (gpio_num < 1)
+ return -EINVAL;
evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
if (rq->extts.flags & PTP_FALLING_EDGE)
evnt |= EVNT_FALL;
@@ -448,8 +493,7 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
case PTP_CLK_REQ_PEROUT:
if (rq->perout.index != 0)
return -EINVAL;
- periodic_output(clock, rq, on);
- return 0;
+ return periodic_output(clock, rq, on);
default:
break;
@@ -458,6 +502,12 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
+static int ptp_dp83640_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ return 0;
+}
+
static u8 status_frame_dst[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
@@ -875,6 +925,7 @@ static void dp83640_free_clocks(void)
mutex_destroy(&clock->extreg_lock);
mutex_destroy(&clock->clock_lock);
put_device(&clock->bus->dev);
+ kfree(clock->caps.pin_config);
kfree(clock);
}
@@ -894,12 +945,18 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
clock->caps.n_alarm = 0;
clock->caps.n_ext_ts = N_EXT_TS;
clock->caps.n_per_out = 1;
+ clock->caps.n_pins = DP83640_N_PINS;
clock->caps.pps = 0;
clock->caps.adjfreq = ptp_dp83640_adjfreq;
clock->caps.adjtime = ptp_dp83640_adjtime;
clock->caps.gettime = ptp_dp83640_gettime;
clock->caps.settime = ptp_dp83640_settime;
clock->caps.enable = ptp_dp83640_enable;
+ clock->caps.verify = ptp_dp83640_verify;
+ /*
+ * Convert the module param defaults into a dynamic pin configuration.
+ */
+ dp83640_gpio_defaults(clock->caps.pin_config);
/*
* Get a reference to this bus instance.
*/
@@ -950,6 +1007,13 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
if (!clock)
goto out;
+ clock->caps.pin_config = kzalloc(sizeof(struct ptp_pin_desc) *
+ DP83640_N_PINS, GFP_KERNEL);
+ if (!clock->caps.pin_config) {
+ kfree(clock);
+ clock = NULL;
+ goto out;
+ }
dp83640_clock_init(clock, bus);
list_add_tail(&phyter_clocks, &clock->list);
out:
@@ -1363,7 +1427,7 @@ static void __exit dp83640_exit(void)
}
MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
-MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.at>");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
MODULE_LICENSE("GPL");
module_init(dp83640_init);
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 9367acc84fbb..15bc7f9ea224 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -90,11 +90,6 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
-static int sun4i_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static int sun4i_mdio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -110,7 +105,6 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
bus->name = "sun4i_mii_bus";
bus->read = &sun4i_mdio_read;
bus->write = &sun4i_mdio_write;
- bus->reset = &sun4i_mdio_reset;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
bus->parent = &pdev->dev;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 71e49000fbf3..76f54b32a120 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -432,8 +432,28 @@ phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(phy_id);
+static ssize_t
+phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "%s\n", phy_modes(phydev->interface));
+}
+static DEVICE_ATTR_RO(phy_interface);
+
+static ssize_t
+phy_has_fixups_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "%d\n", phydev->has_fixups);
+}
+static DEVICE_ATTR_RO(phy_has_fixups);
+
static struct attribute *mdio_dev_attrs[] = {
&dev_attr_phy_id.attr,
+ &dev_attr_phy_interface.attr,
+ &dev_attr_phy_has_fixups.attr,
NULL,
};
ATTRIBUTE_GROUPS(mdio_dev);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 5a8993b0cafc..5ad971a55c5d 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -148,15 +148,52 @@ static int ks8737_config_intr(struct phy_device *phydev)
return rc < 0 ? rc : 0;
}
+static int kszphy_setup_led(struct phy_device *phydev,
+ unsigned int reg, unsigned int shift)
+{
+
+ struct device *dev = &phydev->dev;
+ struct device_node *of_node = dev->of_node;
+ int rc, temp;
+ u32 val;
+
+ if (!of_node && dev->parent->of_node)
+ of_node = dev->parent->of_node;
+
+ if (of_property_read_u32(of_node, "micrel,led-mode", &val))
+ return 0;
+
+ temp = phy_read(phydev, reg);
+ if (temp < 0)
+ return temp;
+
+ temp &= ~(3 << shift);
+ temp |= val << shift;
+ rc = phy_write(phydev, reg, temp);
+
+ return rc < 0 ? rc : 0;
+}
+
static int kszphy_config_init(struct phy_device *phydev)
{
return 0;
}
+static int kszphy_config_init_led8041(struct phy_device *phydev)
+{
+ /* single led control, register 0x1e bits 15..14 */
+ return kszphy_setup_led(phydev, 0x1e, 14);
+}
+
static int ksz8021_config_init(struct phy_device *phydev)
{
- int rc;
const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
+ int rc;
+
+ rc = kszphy_setup_led(phydev, 0x1f, 4);
+ if (rc)
+ dev_err(&phydev->dev, "failed to set led mode\n");
+
phy_write(phydev, MII_KSZPHY_OMSO, val);
rc = ksz_config_flags(phydev);
return rc < 0 ? rc : 0;
@@ -166,6 +203,10 @@ static int ks8051_config_init(struct phy_device *phydev)
{
int rc;
+ rc = kszphy_setup_led(phydev, 0x1f, 4);
+ if (rc)
+ dev_err(&phydev->dev, "failed to set led mode\n");
+
rc = ksz_config_flags(phydev);
return rc < 0 ? rc : 0;
}
@@ -327,7 +368,7 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = kszphy_config_init_led8041,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -342,7 +383,7 @@ static struct phy_driver ksphy_driver[] = {
.features = PHY_BASIC_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = kszphy_config_init_led8041,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -371,7 +412,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = kszphy_config_init_led8041,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 76d96b9ebcdb..1d788f19135b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -38,6 +38,26 @@
#include <asm/irq.h>
+static const char *phy_speed_to_str(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return "10Mbps";
+ case SPEED_100:
+ return "100Mbps";
+ case SPEED_1000:
+ return "1Gbps";
+ case SPEED_2500:
+ return "2.5Gbps";
+ case SPEED_10000:
+ return "10Gbps";
+ case SPEED_UNKNOWN:
+ return "Unknown";
+ default:
+ return "Unsupported (update phy.c)";
+ }
+}
+
/**
* phy_print_status - Convenience function to print out the current phy status
* @phydev: the phy_device struct
@@ -45,12 +65,13 @@
void phy_print_status(struct phy_device *phydev)
{
if (phydev->link) {
- pr_info("%s - Link is Up - %d/%s\n",
- dev_name(&phydev->dev),
- phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ netdev_info(phydev->attached_dev,
+ "Link is Up - %s/%s - flow control %s\n",
+ phy_speed_to_str(phydev->speed),
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
+ phydev->pause ? "rx/tx" : "off");
} else {
- pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
+ netdev_info(phydev->attached_dev, "Link is Down\n");
}
}
EXPORT_SYMBOL(phy_print_status);
@@ -62,7 +83,7 @@ EXPORT_SYMBOL(phy_print_status);
* If the @phydev driver has an ack_interrupt function, call it to
* ack and clear the phy device's interrupt.
*
- * Returns 0 on success on < 0 on error.
+ * Returns 0 on success or < 0 on error.
*/
static int phy_clear_interrupt(struct phy_device *phydev)
{
@@ -77,7 +98,7 @@ static int phy_clear_interrupt(struct phy_device *phydev)
* @phydev: the phy_device struct
* @interrupts: interrupt flags to configure for this @phydev
*
- * Returns 0 on success on < 0 on error.
+ * Returns 0 on success or < 0 on error.
*/
static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
@@ -93,15 +114,16 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
* phy_aneg_done - return auto-negotiation status
* @phydev: target phy_device struct
*
- * Description: Reads the status register and returns 0 either if
- * auto-negotiation is incomplete, or if there was an error.
- * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
+ * Description: Return the auto-negotiation status from this @phydev
+ * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
+ * is still pending.
*/
static inline int phy_aneg_done(struct phy_device *phydev)
{
- int retval = phy_read(phydev, MII_BMSR);
+ if (phydev->drv->aneg_done)
+ return phydev->drv->aneg_done(phydev);
- return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+ return genphy_aneg_done(phydev);
}
/* A structure for mapping a particular speed and duplex
@@ -283,7 +305,10 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, phydev->speed);
cmd->duplex = phydev->duplex;
- cmd->port = PORT_MII;
+ if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
+ cmd->port = PORT_BNC;
+ else
+ cmd->port = PORT_MII;
cmd->phy_address = phydev->addr;
cmd->transceiver = phy_is_internal(phydev) ?
XCVR_INTERNAL : XCVR_EXTERNAL;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 2f6989b1e0dc..0ce606624296 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -139,6 +139,7 @@ static int phy_scan_fixups(struct phy_device *phydev)
mutex_unlock(&phy_fixup_lock);
return err;
}
+ phydev->has_fixups = true;
}
}
mutex_unlock(&phy_fixup_lock);
@@ -534,16 +535,16 @@ static int phy_poll_reset(struct phy_device *phydev)
int phy_init_hw(struct phy_device *phydev)
{
- int ret;
+ int ret = 0;
if (!phydev->drv || !phydev->drv->config_init)
return 0;
- ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
- if (ret < 0)
- return ret;
+ if (phydev->drv->soft_reset)
+ ret = phydev->drv->soft_reset(phydev);
+ else
+ ret = genphy_soft_reset(phydev);
- ret = phy_poll_reset(phydev);
if (ret < 0)
return ret;
@@ -864,6 +865,22 @@ int genphy_config_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_config_aneg);
+/**
+ * genphy_aneg_done - return auto-negotiation status
+ * @phydev: target phy_device struct
+ *
+ * Description: Reads the status register and returns 0 either if
+ * auto-negotiation is incomplete, or if there was an error.
+ * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
+ */
+int genphy_aneg_done(struct phy_device *phydev)
+{
+ int retval = phy_read(phydev, MII_BMSR);
+
+ return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+}
+EXPORT_SYMBOL(genphy_aneg_done);
+
static int gen10g_config_aneg(struct phy_device *phydev)
{
return 0;
@@ -1029,6 +1046,27 @@ static int gen10g_read_status(struct phy_device *phydev)
return 0;
}
+/**
+ * genphy_soft_reset - software reset the PHY via BMCR_RESET bit
+ * @phydev: target phy_device struct
+ *
+ * Description: Perform a software PHY reset using the standard
+ * BMCR_RESET bit and poll for the reset bit to be cleared.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int genphy_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (ret < 0)
+ return ret;
+
+ return phy_poll_reset(phydev);
+}
+EXPORT_SYMBOL(genphy_soft_reset);
+
static int genphy_config_init(struct phy_device *phydev)
{
int val;
@@ -1075,6 +1113,12 @@ static int genphy_config_init(struct phy_device *phydev)
return 0;
}
+static int gen10g_soft_reset(struct phy_device *phydev)
+{
+ /* Do nothing for now */
+ return 0;
+}
+
static int gen10g_config_init(struct phy_device *phydev)
{
/* Temporarily just say we support everything */
@@ -1249,9 +1293,11 @@ static struct phy_driver genphy_driver[] = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
+ .soft_reset = genphy_soft_reset,
.config_init = genphy_config_init,
.features = 0,
.config_aneg = genphy_config_aneg,
+ .aneg_done = genphy_aneg_done,
.read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -1260,6 +1306,7 @@ static struct phy_driver genphy_driver[] = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic 10G PHY",
+ .soft_reset = gen10g_soft_reset,
.config_init = gen10g_config_init,
.features = 0,
.config_aneg = gen10g_config_aneg,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 4cf5fb922e59..22b047f1fcdc 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -1,5 +1,5 @@
/*
- * SPI driver for Micrel/Kendin KS8995M ethernet switch
+ * SPI driver for Micrel/Kendin KS8995M and KSZ8864RMN ethernet switches
*
* Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
*
@@ -70,7 +70,10 @@
#define KS8995_REG_IAD1 0x76 /* Indirect Access Data 1 */
#define KS8995_REG_IAD0 0x77 /* Indirect Access Data 0 */
+#define KSZ8864_REG_ID1 0xfe /* Chip ID in bit 7 */
+
#define KS8995_REGS_SIZE 0x80
+#define KSZ8864_REGS_SIZE 0x100
#define ID1_CHIPID_M 0xf
#define ID1_CHIPID_S 4
@@ -94,6 +97,7 @@ struct ks8995_switch {
struct spi_device *spi;
struct mutex lock;
struct ks8995_pdata *pdata;
+ struct bin_attribute regs_attr;
};
static inline u8 get_chip_id(u8 val)
@@ -216,11 +220,11 @@ static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
dev = container_of(kobj, struct device, kobj);
ks8995 = dev_get_drvdata(dev);
- if (unlikely(off > KS8995_REGS_SIZE))
+ if (unlikely(off > ks8995->regs_attr.size))
return 0;
- if ((off + count) > KS8995_REGS_SIZE)
- count = KS8995_REGS_SIZE - off;
+ if ((off + count) > ks8995->regs_attr.size)
+ count = ks8995->regs_attr.size - off;
if (unlikely(!count))
return count;
@@ -238,11 +242,11 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
dev = container_of(kobj, struct device, kobj);
ks8995 = dev_get_drvdata(dev);
- if (unlikely(off >= KS8995_REGS_SIZE))
+ if (unlikely(off >= ks8995->regs_attr.size))
return -EFBIG;
- if ((off + count) > KS8995_REGS_SIZE)
- count = KS8995_REGS_SIZE - off;
+ if ((off + count) > ks8995->regs_attr.size)
+ count = ks8995->regs_attr.size - off;
if (unlikely(!count))
return count;
@@ -251,7 +255,7 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
}
-static struct bin_attribute ks8995_registers_attr = {
+static const struct bin_attribute ks8995_registers_attr = {
.attr = {
.name = "registers",
.mode = S_IRUSR | S_IWUSR,
@@ -306,20 +310,44 @@ static int ks8995_probe(struct spi_device *spi)
goto err_drvdata;
}
+ memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
+ if (get_chip_id(ids[1]) != CHIPID_M) {
+ u8 val;
+
+ /* Check if this is a KSZ8864RMN */
+ err = ks8995_read(ks, &val, KSZ8864_REG_ID1, sizeof(val));
+ if (err < 0) {
+ dev_err(&spi->dev,
+ "unable to read chip id register, err=%d\n",
+ err);
+ goto err_drvdata;
+ }
+ if ((val & 0x80) == 0) {
+ dev_err(&spi->dev, "unknown chip:%02x,0\n", ids[1]);
+ goto err_drvdata;
+ }
+ ks->regs_attr.size = KSZ8864_REGS_SIZE;
+ }
+
err = ks8995_reset(ks);
if (err)
goto err_drvdata;
- err = sysfs_create_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+ err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
if (err) {
dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
err);
goto err_drvdata;
}
- dev_info(&spi->dev, "KS89%02X device found, Chip ID:%01x, "
- "Revision:%01x\n", ids[0],
- get_chip_id(ids[1]), get_chip_rev(ids[1]));
+ if (get_chip_id(ids[1]) == CHIPID_M) {
+ dev_info(&spi->dev,
+ "KS8995 device found, Chip ID:%x, Revision:%x\n",
+ get_chip_id(ids[1]), get_chip_rev(ids[1]));
+ } else {
+ dev_info(&spi->dev, "KSZ8864 device found, Revision:%x\n",
+ get_chip_rev(ids[1]));
+ }
return 0;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 72ff14b811c6..e3923ebb693f 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -143,9 +143,8 @@ struct ppp {
struct sk_buff_head mrq; /* MP: receive reconstruction queue */
#endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER
- struct sock_filter *pass_filter; /* filter for packets to pass */
- struct sock_filter *active_filter;/* filter for pkts to reset idle */
- unsigned pass_len, active_len;
+ struct sk_filter *pass_filter; /* filter for packets to pass */
+ struct sk_filter *active_filter;/* filter for pkts to reset idle */
#endif /* CONFIG_PPP_FILTER */
struct net *ppp_net; /* the net we belong to */
struct ppp_link_stats stats64; /* 64 bit network stats */
@@ -755,28 +754,42 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PPPIOCSPASS:
{
struct sock_filter *code;
+
err = get_filter(argp, &code);
if (err >= 0) {
+ struct sock_fprog fprog = {
+ .len = err,
+ .filter = code,
+ };
+
ppp_lock(ppp);
- kfree(ppp->pass_filter);
- ppp->pass_filter = code;
- ppp->pass_len = err;
+ if (ppp->pass_filter)
+ sk_unattached_filter_destroy(ppp->pass_filter);
+ err = sk_unattached_filter_create(&ppp->pass_filter,
+ &fprog);
+ kfree(code);
ppp_unlock(ppp);
- err = 0;
}
break;
}
case PPPIOCSACTIVE:
{
struct sock_filter *code;
+
err = get_filter(argp, &code);
if (err >= 0) {
+ struct sock_fprog fprog = {
+ .len = err,
+ .filter = code,
+ };
+
ppp_lock(ppp);
- kfree(ppp->active_filter);
- ppp->active_filter = code;
- ppp->active_len = err;
+ if (ppp->active_filter)
+ sk_unattached_filter_destroy(ppp->active_filter);
+ err = sk_unattached_filter_create(&ppp->active_filter,
+ &fprog);
+ kfree(code);
ppp_unlock(ppp);
- err = 0;
}
break;
}
@@ -1184,7 +1197,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
a four-byte PPP header on each packet */
*skb_push(skb, 2) = 1;
if (ppp->pass_filter &&
- sk_run_filter(skb, ppp->pass_filter) == 0) {
+ SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev,
"PPP: outbound frame "
@@ -1194,7 +1207,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
}
/* if this packet passes the active filter, record the time */
if (!(ppp->active_filter &&
- sk_run_filter(skb, ppp->active_filter) == 0))
+ SK_RUN_FILTER(ppp->active_filter, skb) == 0))
ppp->last_xmit = jiffies;
skb_pull(skb, 2);
#else
@@ -1818,7 +1831,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
*skb_push(skb, 2) = 0;
if (ppp->pass_filter &&
- sk_run_filter(skb, ppp->pass_filter) == 0) {
+ SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev,
"PPP: inbound frame "
@@ -1827,7 +1840,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
return;
}
if (!(ppp->active_filter &&
- sk_run_filter(skb, ppp->active_filter) == 0))
+ SK_RUN_FILTER(ppp->active_filter, skb) == 0))
ppp->last_recv = jiffies;
__skb_pull(skb, 2);
} else
@@ -2672,6 +2685,10 @@ ppp_create_interface(struct net *net, int unit, int *retp)
ppp->minseq = -1;
skb_queue_head_init(&ppp->mrq);
#endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+ ppp->pass_filter = NULL;
+ ppp->active_filter = NULL;
+#endif /* CONFIG_PPP_FILTER */
/*
* drum roll: don't forget to set
@@ -2802,10 +2819,15 @@ static void ppp_destroy_interface(struct ppp *ppp)
skb_queue_purge(&ppp->mrq);
#endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER
- kfree(ppp->pass_filter);
- ppp->pass_filter = NULL;
- kfree(ppp->active_filter);
- ppp->active_filter = NULL;
+ if (ppp->pass_filter) {
+ sk_unattached_filter_destroy(ppp->pass_filter);
+ ppp->pass_filter = NULL;
+ }
+
+ if (ppp->active_filter) {
+ sk_unattached_filter_destroy(ppp->active_filter);
+ ppp->active_filter = NULL;
+ }
#endif /* CONFIG_PPP_FILTER */
kfree_skb(ppp->xmit_pending);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 6d1f6ed3113f..a8497183ff8b 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -493,6 +493,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
ndev->netdev_ops = &rionet_netdev_ops;
ndev->mtu = RIO_MAX_MSG_SIZE - 14;
ndev->features = NETIF_F_LLTX;
+ SET_NETDEV_DEV(ndev, &mport->dev);
SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
spin_lock_init(&rnet->lock);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c8624a8235ab..33008c1d1d67 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1031,8 +1031,7 @@ static void team_port_leave(struct team *team, struct team_port *port)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port,
- gfp_t gfp)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
{
struct netpoll *np;
int err;
@@ -1040,11 +1039,11 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
if (!team->dev->npinfo)
return 0;
- np = kzalloc(sizeof(*np), gfp);
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return -ENOMEM;
- err = __netpoll_setup(np, port->dev, gfp);
+ err = __netpoll_setup(np, port->dev);
if (err) {
kfree(np);
return err;
@@ -1067,8 +1066,7 @@ static void team_port_disable_netpoll(struct team_port *port)
kfree(np);
}
#else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port,
- gfp_t gfp)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
{
return 0;
}
@@ -1156,7 +1154,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
- err = team_port_enable_netpoll(team, port, GFP_KERNEL);
+ err = team_port_enable_netpoll(team, port);
if (err) {
netdev_err(dev, "Failed to enable netpoll on device %s\n",
portname);
@@ -1540,16 +1538,10 @@ static int team_init(struct net_device *dev)
mutex_init(&team->lock);
team_set_no_mode(team);
- team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+ team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
if (!team->pcpu_stats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct team_pcpu_stats *team_stats;
- team_stats = per_cpu_ptr(team->pcpu_stats, i);
- u64_stats_init(&team_stats->syncp);
- }
-
for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
INIT_HLIST_HEAD(&team->en_port_hlist[i]);
INIT_LIST_HEAD(&team->port_list);
@@ -1767,13 +1759,13 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
for_each_possible_cpu(i) {
p = per_cpu_ptr(team->pcpu_stats, i);
do {
- start = u64_stats_fetch_begin_bh(&p->syncp);
+ start = u64_stats_fetch_begin_irq(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
rx_multicast = p->rx_multicast;
tx_packets = p->tx_packets;
tx_bytes = p->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -1856,7 +1848,7 @@ static void team_netpoll_cleanup(struct net_device *dev)
}
static int team_netpoll_setup(struct net_device *dev,
- struct netpoll_info *npifo, gfp_t gfp)
+ struct netpoll_info *npifo)
{
struct team *team = netdev_priv(dev);
struct team_port *port;
@@ -1864,7 +1856,7 @@ static int team_netpoll_setup(struct net_device *dev,
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = team_port_enable_netpoll(team, port, gfp);
+ err = team_port_enable_netpoll(team, port);
if (err) {
__team_netpoll_cleanup(team);
break;
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index d671fc3ac5ac..dbde3412ee5e 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -432,9 +432,9 @@ static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
struct lb_stats tmp;
do {
- start = u64_stats_fetch_begin_bh(syncp);
+ start = u64_stats_fetch_begin_irq(syncp);
tmp.tx_bytes = cpu_stats->tx_bytes;
- } while (u64_stats_fetch_retry_bh(syncp, start));
+ } while (u64_stats_fetch_retry_irq(syncp, start));
acc_stats->tx_bytes += tmp.tx_bytes;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 26f8635b027d..ee328ba101e7 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -452,7 +452,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
--tun->numqueues;
if (clean) {
- rcu_assign_pointer(tfile->tun, NULL);
+ RCU_INIT_POINTER(tfile->tun, NULL);
sock_put(&tfile->sk);
} else
tun_disable_queue(tun, tfile);
@@ -499,12 +499,12 @@ static void tun_detach_all(struct net_device *dev)
tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
wake_up_all(&tfile->wq.wait);
- rcu_assign_pointer(tfile->tun, NULL);
+ RCU_INIT_POINTER(tfile->tun, NULL);
--tun->numqueues;
}
list_for_each_entry(tfile, &tun->disabled, next) {
wake_up_all(&tfile->wq.wait);
- rcu_assign_pointer(tfile->tun, NULL);
+ RCU_INIT_POINTER(tfile->tun, NULL);
}
BUG_ON(tun->numqueues != 0);
@@ -2194,7 +2194,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
&tun_proto);
if (!tfile)
return -ENOMEM;
- rcu_assign_pointer(tfile->tun, NULL);
+ RCU_INIT_POINTER(tfile->tun, NULL);
tfile->net = get_net(current->nsproxy->net_ns);
tfile->flags = 0;
tfile->ifindex = 0;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index bd363b27e854..9ea4bfe5d318 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -625,6 +625,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Novatel Expedite E371 - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0x9011, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* AnyDATA ADU960S - handled by qmi_wwan */
{
USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d350d2795e10..549dbac710ed 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -73,6 +73,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
u8 iface_no;
int err;
int eth_hlen;
+ u16 mbim_mtu;
u16 ntb_fmt_supported;
__le16 max_datagram_size;
@@ -252,6 +253,14 @@ out:
/* set MTU to max supported by the device if necessary */
if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
dev->net->mtu = ctx->max_datagram_size - eth_hlen;
+
+ /* do not exceed operater preferred MTU */
+ if (ctx->mbim_extended_desc) {
+ mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
+ if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
+ dev->net->mtu = mbim_mtu;
+ }
+
return 0;
}
@@ -390,6 +399,14 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
ctx->mbim_desc = (const struct usb_cdc_mbim_desc *)buf;
break;
+ case USB_CDC_MBIM_EXTENDED_TYPE:
+ if (buf[0] < sizeof(*(ctx->mbim_extended_desc)))
+ break;
+
+ ctx->mbim_extended_desc =
+ (const struct usb_cdc_mbim_extended_desc *)buf;
+ break;
+
default:
break;
}
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index acfcc32b323d..8f37efd2d2fb 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -210,7 +210,7 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
* (0x86dd) so Linux can understand it.
*/
if ((buf->data[sizeof(*ethhdr)] & 0xf0) == 0x60)
- ethhdr->h_proto = __constant_htons(ETH_P_IPV6);
+ ethhdr->h_proto = htons(ETH_P_IPV6);
}
if (count) {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 313cb6cd4848..e3458e3c44f1 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -500,6 +500,13 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* Novatel Expedite E371 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0x9011,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
{ /* Dell Wireless 5800 (Novatel E362) */
USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8195,
USB_CLASS_COMM,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index adb12f349a61..18e12a3f7fc3 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -21,9 +21,10 @@
#include <linux/list.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
/* Version Information */
-#define DRIVER_VERSION "v1.04.0 (2014/01/15)"
+#define DRIVER_VERSION "v1.06.0 (2014/03/03)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
@@ -59,9 +60,11 @@
#define PLA_TCR0 0xe610
#define PLA_TCR1 0xe612
#define PLA_TXFIFO_CTRL 0xe618
-#define PLA_RSTTELLY 0xe800
+#define PLA_RSTTALLY 0xe800
#define PLA_CR 0xe813
#define PLA_CRWECR 0xe81c
+#define PLA_CONFIG12 0xe81e /* CONFIG1, CONFIG2 */
+#define PLA_CONFIG34 0xe820 /* CONFIG3, CONFIG4 */
#define PLA_CONFIG5 0xe822
#define PLA_PHY_PWR 0xe84c
#define PLA_OOB_CTRL 0xe84f
@@ -69,7 +72,7 @@
#define PLA_MISC_0 0xe858
#define PLA_MISC_1 0xe85a
#define PLA_OCP_GPHY_BASE 0xe86c
-#define PLA_TELLYCNT 0xe890
+#define PLA_TALLYCNT 0xe890
#define PLA_SFF_STS_7 0xe8de
#define PLA_PHYSTATUS 0xe908
#define PLA_BP_BA 0xfc26
@@ -177,6 +180,9 @@
/* PLA_TCR1 */
#define VERSION_MASK 0x7cf0
+/* PLA_RSTTALLY */
+#define TALLY_RESET 0x0001
+
/* PLA_CR */
#define CR_RST 0x10
#define CR_RE 0x08
@@ -216,7 +222,14 @@
/* PAL_BDC_CR */
#define ALDPS_PROXY_MODE 0x0001
+/* PLA_CONFIG34 */
+#define LINK_ON_WAKE_EN 0x0010
+#define LINK_OFF_WAKE_EN 0x0008
+
/* PLA_CONFIG5 */
+#define BWF_EN 0x0040
+#define MWF_EN 0x0020
+#define UWF_EN 0x0010
#define LAN_WAKE_EN 0x0002
/* PLA_LED_FEATURE */
@@ -436,6 +449,9 @@ enum rtl8152_flags {
RTL8152_SET_RX_MODE,
WORK_ENABLE,
RTL8152_LINK_CHG,
+ SELECTIVE_SUSPEND,
+ PHY_RESET,
+ SCHEDULE_TASKLET,
};
/* Define these values to match your device */
@@ -449,11 +465,40 @@ enum rtl8152_flags {
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
+#define REALTEK_USB_DEVICE(vend, prod) \
+ USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC)
+
+struct tally_counter {
+ __le64 tx_packets;
+ __le64 rx_packets;
+ __le64 tx_errors;
+ __le32 rx_errors;
+ __le16 rx_missed;
+ __le16 align_errors;
+ __le32 tx_one_collision;
+ __le32 tx_multi_collision;
+ __le64 rx_unicast;
+ __le64 rx_broadcast;
+ __le32 rx_multicast;
+ __le16 tx_aborted;
+ __le16 tx_underun;
+};
+
struct rx_desc {
__le32 opts1;
#define RX_LEN_MASK 0x7fff
+
__le32 opts2;
+#define RD_UDP_CS (1 << 23)
+#define RD_TCP_CS (1 << 22)
+#define RD_IPV6_CS (1 << 20)
+#define RD_IPV4_CS (1 << 19)
+
__le32 opts3;
+#define IPF (1 << 23) /* IP checksum fail */
+#define UDPF (1 << 22) /* UDP checksum fail */
+#define TCPF (1 << 21) /* TCP checksum fail */
+
__le32 opts4;
__le32 opts5;
__le32 opts6;
@@ -463,13 +508,21 @@ struct tx_desc {
__le32 opts1;
#define TX_FS (1 << 31) /* First segment of a packet */
#define TX_LS (1 << 30) /* Final segment of a packet */
-#define TX_LEN_MASK 0x3ffff
+#define GTSENDV4 (1 << 28)
+#define GTSENDV6 (1 << 27)
+#define GTTCPHO_SHIFT 18
+#define GTTCPHO_MAX 0x7fU
+#define TX_LEN_MAX 0x3ffffU
__le32 opts2;
#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */
#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */
#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */
#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */
+#define MSS_SHIFT 17
+#define MSS_MAX 0x7ffU
+#define TCPHO_SHIFT 17
+#define TCPHO_MAX 0x7ffU
};
struct r8152;
@@ -511,11 +564,13 @@ struct r8152 {
void (*init)(struct r8152 *);
int (*enable)(struct r8152 *);
void (*disable)(struct r8152 *);
+ void (*up)(struct r8152 *);
void (*down)(struct r8152 *);
void (*unload)(struct r8152 *);
} rtl_ops;
int intr_interval;
+ u32 saved_wolopts;
u32 msg_enable;
u32 tx_qlen;
u16 ocp_base;
@@ -534,12 +589,21 @@ enum rtl_version {
RTL_VER_MAX
};
+enum tx_csum_stat {
+ TX_CSUM_SUCCESS = 0,
+ TX_CSUM_TSO,
+ TX_CSUM_NONE
+};
+
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
* The RTL chips use a 64 element hash table based on the Ethernet CRC.
*/
static const int multicast_filter_limit = 32;
static unsigned int rx_buf_sz = 16384;
+#define RTL_LIMITED_TSO_SIZE (rx_buf_sz - sizeof(struct tx_desc) - \
+ VLAN_ETH_HLEN - VLAN_HLEN)
+
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
@@ -577,6 +641,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
value, index, tmp, size, 500);
kfree(tmp);
+
return ret;
}
@@ -862,11 +927,21 @@ static u16 sram_read(struct r8152 *tp, u16 addr)
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
{
struct r8152 *tp = netdev_priv(netdev);
+ int ret;
if (phy_id != R8152_PHY_ID)
return -EINVAL;
- return r8152_mdio_read(tp, reg);
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
+
+ ret = r8152_mdio_read(tp, reg);
+
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
}
static
@@ -877,7 +952,12 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
if (phy_id != R8152_PHY_ID)
return;
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
r8152_mdio_write(tp, reg, val);
+
+ usb_autopm_put_interface(tp->intf);
}
static
@@ -886,11 +966,26 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
static inline void set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
+ int ret;
u8 node_id[8] = {0};
- if (pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id) < 0)
+ if (tp->version == RTL_VER_01)
+ ret = pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id);
+ else
+ ret = pla_ocp_read(tp, PLA_BACKUP, sizeof(node_id), node_id);
+
+ if (ret < 0) {
netif_notice(tp, probe, dev, "inet addr fail\n");
- else {
+ } else {
+ if (tp->version != RTL_VER_01) {
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
+ CRWECR_CONFIG);
+ pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES,
+ sizeof(node_id), node_id);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
+ CRWECR_NORAML);
+ }
+
memcpy(dev->dev_addr, node_id, dev->addr_len);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
}
@@ -913,15 +1008,9 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
-static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
-{
- return &dev->stats;
-}
-
static void read_bulk_callback(struct urb *urb)
{
struct net_device *netdev;
- unsigned long flags;
int status = urb->status;
struct rx_agg *agg;
struct r8152 *tp;
@@ -948,14 +1037,16 @@ static void read_bulk_callback(struct urb *urb)
if (!netif_carrier_ok(netdev))
return;
+ usb_mark_last_busy(tp->udev);
+
switch (status) {
case 0:
if (urb->actual_length < ETH_ZLEN)
break;
- spin_lock_irqsave(&tp->rx_lock, flags);
+ spin_lock(&tp->rx_lock);
list_add_tail(&agg->list, &tp->rx_done);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
+ spin_unlock(&tp->rx_lock);
tasklet_schedule(&tp->tl);
return;
case -ESHUTDOWN:
@@ -978,9 +1069,9 @@ static void read_bulk_callback(struct urb *urb)
if (result == -ENODEV) {
netif_device_detach(tp->netdev);
} else if (result) {
- spin_lock_irqsave(&tp->rx_lock, flags);
+ spin_lock(&tp->rx_lock);
list_add_tail(&agg->list, &tp->rx_done);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
+ spin_unlock(&tp->rx_lock);
tasklet_schedule(&tp->tl);
}
}
@@ -988,7 +1079,7 @@ static void read_bulk_callback(struct urb *urb)
static void write_bulk_callback(struct urb *urb)
{
struct net_device_stats *stats;
- unsigned long flags;
+ struct net_device *netdev;
struct tx_agg *agg;
struct r8152 *tp;
int status = urb->status;
@@ -1001,21 +1092,24 @@ static void write_bulk_callback(struct urb *urb)
if (!tp)
return;
- stats = rtl8152_get_stats(tp->netdev);
+ netdev = tp->netdev;
+ stats = &netdev->stats;
if (status) {
if (net_ratelimit())
- netdev_warn(tp->netdev, "Tx status %d\n", status);
+ netdev_warn(netdev, "Tx status %d\n", status);
stats->tx_errors += agg->skb_num;
} else {
stats->tx_packets += agg->skb_num;
stats->tx_bytes += agg->skb_len;
}
- spin_lock_irqsave(&tp->tx_lock, flags);
+ spin_lock(&tp->tx_lock);
list_add_tail(&agg->list, &tp->tx_free);
- spin_unlock_irqrestore(&tp->tx_lock, flags);
+ spin_unlock(&tp->tx_lock);
- if (!netif_carrier_ok(tp->netdev))
+ usb_autopm_put_interface_async(tp->intf);
+
+ if (!netif_carrier_ok(netdev))
return;
if (!test_bit(WORK_ENABLE, &tp->flags))
@@ -1220,6 +1314,9 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
struct tx_agg *agg = NULL;
unsigned long flags;
+ if (list_empty(&tp->tx_free))
+ return NULL;
+
spin_lock_irqsave(&tp->tx_lock, flags);
if (!list_empty(&tp->tx_free)) {
struct list_head *cursor;
@@ -1233,24 +1330,138 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
return agg;
}
-static void
-r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
+static inline __be16 get_protocol(struct sk_buff *skb)
+{
+ __be16 protocol;
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+ protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+ else
+ protocol = skb->protocol;
+
+ return protocol;
+}
+
+/*
+ * r8152_csum_workaround()
+ * The hw limites the value the transport offset. When the offset is out of the
+ * range, calculate the checksum by sw.
+ */
+static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
+ struct sk_buff_head *list)
+{
+ if (skb_shinfo(skb)->gso_size) {
+ netdev_features_t features = tp->netdev->features;
+ struct sk_buff_head seg_list;
+ struct sk_buff *segs, *nskb;
+
+ features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+ segs = skb_gso_segment(skb, features);
+ if (IS_ERR(segs) || !segs)
+ goto drop;
+
+ __skb_queue_head_init(&seg_list);
+
+ do {
+ nskb = segs;
+ segs = segs->next;
+ nskb->next = NULL;
+ __skb_queue_tail(&seg_list, nskb);
+ } while (segs);
+
+ skb_queue_splice(&seg_list, list);
+ dev_kfree_skb(skb);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb_checksum_help(skb) < 0)
+ goto drop;
+
+ __skb_queue_head(list, skb);
+ } else {
+ struct net_device_stats *stats;
+
+drop:
+ stats = &tp->netdev->stats;
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ }
+}
+
+/*
+ * msdn_giant_send_check()
+ * According to the document of microsoft, the TCP Pseudo Header excludes the
+ * packet length for IPv6 TCP large packets.
+ */
+static int msdn_giant_send_check(struct sk_buff *skb)
+{
+ const struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ int ret;
+
+ ret = skb_cow_head(skb, 0);
+ if (ret)
+ return ret;
+
+ ipv6h = ipv6_hdr(skb);
+ th = tcp_hdr(skb);
+
+ th->check = 0;
+ th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
+
+ return ret;
+}
+
+static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
+ struct sk_buff *skb, u32 len, u32 transport_offset)
{
- memset(desc, 0, sizeof(*desc));
+ u32 mss = skb_shinfo(skb)->gso_size;
+ u32 opts1, opts2 = 0;
+ int ret = TX_CSUM_SUCCESS;
+
+ WARN_ON_ONCE(len > TX_LEN_MAX);
+
+ opts1 = len | TX_FS | TX_LS;
+
+ if (mss) {
+ if (transport_offset > GTTCPHO_MAX) {
+ netif_warn(tp, tx_err, tp->netdev,
+ "Invalid transport offset 0x%x for TSO\n",
+ transport_offset);
+ ret = TX_CSUM_TSO;
+ goto unavailable;
+ }
- desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
+ switch (get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ opts1 |= GTSENDV4;
+ break;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- __be16 protocol;
+ case htons(ETH_P_IPV6):
+ if (msdn_giant_send_check(skb)) {
+ ret = TX_CSUM_TSO;
+ goto unavailable;
+ }
+ opts1 |= GTSENDV6;
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ opts1 |= transport_offset << GTTCPHO_SHIFT;
+ opts2 |= min(mss, MSS_MAX) << MSS_SHIFT;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ip_protocol;
- u32 opts2 = 0;
- if (skb->protocol == htons(ETH_P_8021Q))
- protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
+ if (transport_offset > TCPHO_MAX) {
+ netif_warn(tp, tx_err, tp->netdev,
+ "Invalid transport offset 0x%x\n",
+ transport_offset);
+ ret = TX_CSUM_NONE;
+ goto unavailable;
+ }
- switch (protocol) {
+ switch (get_protocol(skb)) {
case htons(ETH_P_IP):
opts2 |= IPV4_CS;
ip_protocol = ip_hdr(skb)->protocol;
@@ -1266,24 +1477,34 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
break;
}
- if (ip_protocol == IPPROTO_TCP) {
+ if (ip_protocol == IPPROTO_TCP)
opts2 |= TCP_CS;
- opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17;
- } else if (ip_protocol == IPPROTO_UDP) {
+ else if (ip_protocol == IPPROTO_UDP)
opts2 |= UDP_CS;
- } else {
+ else
WARN_ON_ONCE(1);
- }
- desc->opts2 = cpu_to_le32(opts2);
+ opts2 |= transport_offset << TCPHO_SHIFT;
}
+
+ desc->opts2 = cpu_to_le32(opts2);
+ desc->opts1 = cpu_to_le32(opts1);
+
+unavailable:
+ return ret;
}
static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
{
- int remain;
+ struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
+ int remain, ret;
u8 *tx_data;
+ __skb_queue_head_init(&skb_head);
+ spin_lock(&tx_queue->lock);
+ skb_queue_splice_init(tx_queue, &skb_head);
+ spin_unlock(&tx_queue->lock);
+
tx_data = agg->head;
agg->skb_num = agg->skb_len = 0;
remain = rx_buf_sz;
@@ -1292,32 +1513,56 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
struct tx_desc *tx_desc;
struct sk_buff *skb;
unsigned int len;
+ u32 offset;
- skb = skb_dequeue(&tp->tx_queue);
+ skb = __skb_dequeue(&skb_head);
if (!skb)
break;
- remain -= sizeof(*tx_desc);
- len = skb->len;
- if (remain < len) {
- skb_queue_head(&tp->tx_queue, skb);
+ len = skb->len + sizeof(*tx_desc);
+
+ if (len > remain) {
+ __skb_queue_head(&skb_head, skb);
break;
}
tx_data = tx_agg_align(tx_data);
tx_desc = (struct tx_desc *)tx_data;
+
+ offset = (u32)skb_transport_offset(skb);
+
+ if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) {
+ r8152_csum_workaround(tp, skb, &skb_head);
+ continue;
+ }
+
tx_data += sizeof(*tx_desc);
- r8152_tx_csum(tp, tx_desc, skb);
- memcpy(tx_data, skb->data, len);
- agg->skb_num++;
+ len = skb->len;
+ if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
+ struct net_device_stats *stats = &tp->netdev->stats;
+
+ stats->tx_dropped++;
+ dev_kfree_skb_any(skb);
+ tx_data -= sizeof(*tx_desc);
+ continue;
+ }
+
+ tx_data += len;
agg->skb_len += len;
+ agg->skb_num++;
+
dev_kfree_skb_any(skb);
- tx_data += len;
remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
}
+ if (!skb_queue_empty(&skb_head)) {
+ spin_lock(&tx_queue->lock);
+ skb_queue_splice(&skb_head, tx_queue);
+ spin_unlock(&tx_queue->lock);
+ }
+
netif_tx_lock(tp->netdev);
if (netif_queue_stopped(tp->netdev) &&
@@ -1326,20 +1571,67 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
netif_tx_unlock(tp->netdev);
+ ret = usb_autopm_get_interface_async(tp->intf);
+ if (ret < 0)
+ goto out_tx_fill;
+
usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
agg->head, (int)(tx_data - (u8 *)agg->head),
(usb_complete_t)write_bulk_callback, agg);
- return usb_submit_urb(agg->urb, GFP_ATOMIC);
+ ret = usb_submit_urb(agg->urb, GFP_ATOMIC);
+ if (ret < 0)
+ usb_autopm_put_interface_async(tp->intf);
+
+out_tx_fill:
+ return ret;
+}
+
+static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
+{
+ u8 checksum = CHECKSUM_NONE;
+ u32 opts2, opts3;
+
+ if (tp->version == RTL_VER_01)
+ goto return_result;
+
+ opts2 = le32_to_cpu(rx_desc->opts2);
+ opts3 = le32_to_cpu(rx_desc->opts3);
+
+ if (opts2 & RD_IPV4_CS) {
+ if (opts3 & IPF)
+ checksum = CHECKSUM_NONE;
+ else if ((opts2 & RD_UDP_CS) && (opts3 & UDPF))
+ checksum = CHECKSUM_NONE;
+ else if ((opts2 & RD_TCP_CS) && (opts3 & TCPF))
+ checksum = CHECKSUM_NONE;
+ else
+ checksum = CHECKSUM_UNNECESSARY;
+ } else if (RD_IPV6_CS) {
+ if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
+ checksum = CHECKSUM_UNNECESSARY;
+ else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF))
+ checksum = CHECKSUM_UNNECESSARY;
+ }
+
+return_result:
+ return checksum;
}
static void rx_bottom(struct r8152 *tp)
{
unsigned long flags;
- struct list_head *cursor, *next;
+ struct list_head *cursor, *next, rx_queue;
+ if (list_empty(&tp->rx_done))
+ return;
+
+ INIT_LIST_HEAD(&rx_queue);
spin_lock_irqsave(&tp->rx_lock, flags);
- list_for_each_safe(cursor, next, &tp->rx_done) {
+ list_splice_init(&tp->rx_done, &rx_queue);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ list_for_each_safe(cursor, next, &rx_queue) {
struct rx_desc *rx_desc;
struct rx_agg *agg;
int len_used = 0;
@@ -1348,7 +1640,6 @@ static void rx_bottom(struct r8152 *tp)
int ret;
list_del_init(cursor);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
agg = list_entry(cursor, struct rx_agg, list);
urb = agg->urb;
@@ -1361,7 +1652,7 @@ static void rx_bottom(struct r8152 *tp)
while (urb->actual_length > len_used) {
struct net_device *netdev = tp->netdev;
- struct net_device_stats *stats;
+ struct net_device_stats *stats = &netdev->stats;
unsigned int pkt_len;
struct sk_buff *skb;
@@ -1373,23 +1664,24 @@ static void rx_bottom(struct r8152 *tp)
if (urb->actual_length < len_used)
break;
- stats = rtl8152_get_stats(netdev);
-
pkt_len -= CRC_SIZE;
rx_data += sizeof(struct rx_desc);
skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
if (!skb) {
stats->rx_dropped++;
- break;
+ goto find_next_rx;
}
+
+ skb->ip_summed = r8152_rx_csum(tp, rx_desc);
memcpy(skb->data, rx_data, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
+ netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += pkt_len;
+find_next_rx:
rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
rx_desc = (struct rx_desc *)rx_data;
len_used = (int)(rx_data - (u8 *)agg->head);
@@ -1398,13 +1690,13 @@ static void rx_bottom(struct r8152 *tp)
submit:
ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
- spin_lock_irqsave(&tp->rx_lock, flags);
if (ret && ret != -ENODEV) {
- list_add_tail(&agg->list, next);
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
tasklet_schedule(&tp->tl);
}
}
- spin_unlock_irqrestore(&tp->rx_lock, flags);
}
static void tx_bottom(struct r8152 *tp)
@@ -1423,19 +1715,18 @@ static void tx_bottom(struct r8152 *tp)
res = r8152_tx_agg_fill(tp, agg);
if (res) {
- struct net_device_stats *stats;
- struct net_device *netdev;
- unsigned long flags;
-
- netdev = tp->netdev;
- stats = rtl8152_get_stats(netdev);
+ struct net_device *netdev = tp->netdev;
if (res == -ENODEV) {
netif_device_detach(netdev);
} else {
+ struct net_device_stats *stats = &netdev->stats;
+ unsigned long flags;
+
netif_warn(tp, tx_err, netdev,
"failed tx_urb %d\n", res);
stats->tx_dropped += agg->skb_num;
+
spin_lock_irqsave(&tp->tx_lock, flags);
list_add_tail(&agg->list, &tp->tx_free);
spin_unlock_irqrestore(&tp->tx_lock, flags);
@@ -1475,6 +1766,26 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
return usb_submit_urb(agg->urb, mem_flags);
}
+static void rtl_drop_queued_tx(struct r8152 *tp)
+{
+ struct net_device_stats *stats = &tp->netdev->stats;
+ struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(tx_queue))
+ return;
+
+ __skb_queue_head_init(&skb_head);
+ spin_lock_bh(&tx_queue->lock);
+ skb_queue_splice_init(tx_queue, &skb_head);
+ spin_unlock_bh(&tx_queue->lock);
+
+ while ((skb = __skb_dequeue(&skb_head))) {
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ }
+}
+
static void rtl8152_tx_timeout(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -1538,7 +1849,7 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
}
static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+ struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -1546,13 +1857,17 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
skb_queue_tail(&tp->tx_queue, skb);
- if (list_empty(&tp->tx_free) &&
- skb_queue_len(&tp->tx_queue) > tp->tx_qlen)
+ if (!list_empty(&tp->tx_free)) {
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ set_bit(SCHEDULE_TASKLET, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ } else {
+ usb_mark_last_busy(tp->udev);
+ tasklet_schedule(&tp->tl);
+ }
+ } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen)
netif_stop_queue(netdev);
- if (!list_empty(&tp->tx_free))
- tasklet_schedule(&tp->tl);
-
return NETDEV_TX_OK;
}
@@ -1610,6 +1925,18 @@ static void rtl_set_eee_plus(struct r8152 *tp)
}
}
+static void rxdy_gated_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ if (enable)
+ ocp_data |= RXDY_GATED_EN;
+ else
+ ocp_data &= ~RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+}
+
static int rtl_enable(struct r8152 *tp)
{
u32 ocp_data;
@@ -1621,9 +1948,7 @@ static int rtl_enable(struct r8152 *tp)
ocp_data |= CR_RE | CR_TE;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data &= ~RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, false);
INIT_LIST_HEAD(&tp->rx_done);
ret = 0;
@@ -1678,8 +2003,6 @@ static int rtl8153_enable(struct r8152 *tp)
static void rtl8152_disable(struct r8152 *tp)
{
- struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
- struct sk_buff *skb;
u32 ocp_data;
int i;
@@ -1687,17 +2010,12 @@ static void rtl8152_disable(struct r8152 *tp)
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- while ((skb = skb_dequeue(&tp->tx_queue))) {
- dev_kfree_skb(skb);
- stats->tx_dropped++;
- }
+ rtl_drop_queued_tx(tp);
for (i = 0; i < RTL8152_MAX_TX; i++)
usb_kill_urb(tp->tx_info[i].urb);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data |= RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, true);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -1718,18 +2036,209 @@ static void rtl8152_disable(struct r8152 *tp)
rtl8152_nic_reset(tp);
}
+static void r8152_power_cut_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
+ if (enable)
+ ocp_data |= POWER_CUT;
+ else
+ ocp_data &= ~POWER_CUT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
+ ocp_data &= ~RESUME_INDICATE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+}
+
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
+
+static u32 __rtl_get_wol(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u32 wolopts = 0;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ if (!(ocp_data & LAN_WAKE_EN))
+ return 0;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ if (ocp_data & LINK_ON_WAKE_EN)
+ wolopts |= WAKE_PHY;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ if (ocp_data & UWF_EN)
+ wolopts |= WAKE_UCAST;
+ if (ocp_data & BWF_EN)
+ wolopts |= WAKE_BCAST;
+ if (ocp_data & MWF_EN)
+ wolopts |= WAKE_MCAST;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+ if (ocp_data & MAGIC_EN)
+ wolopts |= WAKE_MAGIC;
+
+ return wolopts;
+}
+
+static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
+{
+ u32 ocp_data;
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data &= ~LINK_ON_WAKE_EN;
+ if (wolopts & WAKE_PHY)
+ ocp_data |= LINK_ON_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+ if (wolopts & WAKE_UCAST)
+ ocp_data |= UWF_EN;
+ if (wolopts & WAKE_BCAST)
+ ocp_data |= BWF_EN;
+ if (wolopts & WAKE_MCAST)
+ ocp_data |= MWF_EN;
+ if (wolopts & WAKE_ANY)
+ ocp_data |= LAN_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+ ocp_data &= ~MAGIC_EN;
+ if (wolopts & WAKE_MAGIC)
+ ocp_data |= MAGIC_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
+
+ if (wolopts & WAKE_ANY)
+ device_set_wakeup_enable(&tp->udev->dev, true);
+ else
+ device_set_wakeup_enable(&tp->udev->dev, false);
+}
+
+static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
+{
+ if (enable) {
+ u32 ocp_data;
+
+ __rtl_set_wol(tp, WAKE_ANY);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data |= LINK_OFF_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+ } else {
+ __rtl_set_wol(tp, tp->saved_wolopts);
+ }
+}
+
+static void rtl_phy_reset(struct r8152 *tp)
+{
+ u16 data;
+ int i;
+
+ clear_bit(PHY_RESET, &tp->flags);
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+
+ /* don't reset again before the previous one complete */
+ if (data & BMCR_RESET)
+ return;
+
+ data |= BMCR_RESET;
+ r8152_mdio_write(tp, MII_BMCR, data);
+
+ for (i = 0; i < 50; i++) {
+ msleep(20);
+ if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
+ break;
+ }
+}
+
+static void rtl_clear_bp(struct r8152 *tp)
+{
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
+ mdelay(3);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
+}
+
+static void r8153_clear_bp(struct r8152 *tp)
+{
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
+ rtl_clear_bp(tp);
+}
+
+static void r8153_teredo_off(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+ ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
+}
+
+static void r8152b_disable_aldps(struct r8152 *tp)
+{
+ ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
+ msleep(20);
+}
+
+static inline void r8152b_enable_aldps(struct r8152 *tp)
+{
+ ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
+ LINKENA | DIS_SDSAVE);
+}
+
+static void r8152b_hw_phy_cfg(struct r8152 *tp)
+{
+ u16 data;
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ r8152b_disable_aldps(tp);
+
+ rtl_clear_bp(tp);
+
+ r8152b_enable_aldps(tp);
+ set_bit(PHY_RESET, &tp->flags);
+}
+
static void r8152b_exit_oob(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ u32 ocp_data;
+ int i;
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data |= RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, true);
+ r8153_teredo_off(tp);
+ r8152b_hw_phy_cfg(tp);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
@@ -1835,10 +2344,6 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
- ocp_data |= MAGIC_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
ocp_data |= CPCR_RX_VLAN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
@@ -1851,36 +2356,26 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data &= ~RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data |= RCR_APM | RCR_AM | RCR_AB;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
-static void r8152b_disable_aldps(struct r8152 *tp)
-{
- ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
- msleep(20);
-}
-
-static inline void r8152b_enable_aldps(struct r8152 *tp)
-{
- ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
- LINKENA | DIS_SDSAVE);
-}
-
static void r8153_hw_phy_cfg(struct r8152 *tp)
{
u32 ocp_data;
u16 data;
ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
- r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ r8153_clear_bp(tp);
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -1916,9 +2411,11 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
data = sram_read(tp, SRAM_10M_AMP2);
data |= AMP_DN;
sram_write(tp, SRAM_10M_AMP2, data);
+
+ set_bit(PHY_RESET, &tp->flags);
}
-static void r8153_u1u2en(struct r8152 *tp, int enable)
+static void r8153_u1u2en(struct r8152 *tp, bool enable)
{
u8 u1u2[8];
@@ -1930,7 +2427,7 @@ static void r8153_u1u2en(struct r8152 *tp, int enable)
usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
}
-static void r8153_u2p3en(struct r8152 *tp, int enable)
+static void r8153_u2p3en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
@@ -1942,7 +2439,7 @@ static void r8153_u2p3en(struct r8152 *tp, int enable)
ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
}
-static void r8153_power_cut_en(struct r8152 *tp, int enable)
+static void r8153_power_cut_en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
@@ -1958,28 +2455,12 @@ static void r8153_power_cut_en(struct r8152 *tp, int enable)
ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
}
-static void r8153_teredo_off(struct r8152 *tp)
-{
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
- ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
-
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
-}
-
static void r8153_first_init(struct r8152 *tp)
{
u32 ocp_data;
int i;
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data |= RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
-
+ rxdy_gated_en(tp, true);
r8153_teredo_off(tp);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -2072,10 +2553,6 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
- ocp_data |= MAGIC_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
ocp_data &= ~TEREDO_WAKE_MASK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
@@ -2092,11 +2569,7 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data &= ~RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data |= RCR_APM | RCR_AM | RCR_AB;
@@ -2187,12 +2660,26 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
}
+ if (test_bit(PHY_RESET, &tp->flags))
+ bmcr |= BMCR_RESET;
+
if (tp->mii.supports_gmii)
r8152_mdio_write(tp, MII_CTRL1000, gbcr);
r8152_mdio_write(tp, MII_ADVERTISE, anar);
r8152_mdio_write(tp, MII_BMCR, bmcr);
+ if (test_bit(PHY_RESET, &tp->flags)) {
+ int i;
+
+ clear_bit(PHY_RESET, &tp->flags);
+ for (i = 0; i < 50; i++) {
+ msleep(20);
+ if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
+ break;
+ }
+ }
+
out:
return ret;
@@ -2200,12 +2687,7 @@ out:
static void rtl8152_down(struct r8152 *tp)
{
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data &= ~POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
-
+ r8152_power_cut_en(tp, false);
r8152b_disable_aldps(tp);
r8152b_enter_oob(tp);
r8152b_enable_aldps(tp);
@@ -2213,8 +2695,8 @@ static void rtl8152_down(struct r8152 *tp)
static void rtl8153_down(struct r8152 *tp)
{
- r8153_u1u2en(tp, 0);
- r8153_power_cut_en(tp, 0);
+ r8153_u1u2en(tp, false);
+ r8153_power_cut_en(tp, false);
r8153_disable_aldps(tp);
r8153_enter_oob(tp);
r8153_enable_aldps(tp);
@@ -2249,6 +2731,9 @@ static void rtl_work_func_t(struct work_struct *work)
{
struct r8152 *tp = container_of(work, struct r8152, schedule.work);
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
if (!test_bit(WORK_ENABLE, &tp->flags))
goto out1;
@@ -2261,8 +2746,17 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
+ if (test_bit(SCHEDULE_TASKLET, &tp->flags) &&
+ (tp->speed & LINK_STATUS)) {
+ clear_bit(SCHEDULE_TASKLET, &tp->flags);
+ tasklet_schedule(&tp->tl);
+ }
+
+ if (test_bit(PHY_RESET, &tp->flags))
+ rtl_phy_reset(tp);
+
out1:
- return;
+ usb_autopm_put_interface(tp->intf);
}
static int rtl8152_open(struct net_device *netdev)
@@ -2270,6 +2764,27 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ res = alloc_all_mem(tp);
+ if (res)
+ goto out;
+
+ res = usb_autopm_get_interface(tp->intf);
+ if (res < 0) {
+ free_all_mem(tp);
+ goto out;
+ }
+
+ /* The WORK_ENABLE may be set when autoresume occurs */
+ if (test_bit(WORK_ENABLE, &tp->flags)) {
+ clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
+ cancel_delayed_work_sync(&tp->schedule);
+ if (tp->speed & LINK_STATUS)
+ tp->rtl_ops.disable(tp);
+ }
+
+ tp->rtl_ops.up(tp);
+
rtl8152_set_speed(tp, AUTONEG_ENABLE,
tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
DUPLEX_FULL);
@@ -2277,15 +2792,19 @@ static int rtl8152_open(struct net_device *netdev)
netif_carrier_off(netdev);
netif_start_queue(netdev);
set_bit(WORK_ENABLE, &tp->flags);
+
res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
if (res) {
if (res == -ENODEV)
netif_device_detach(tp->netdev);
netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
res);
+ free_all_mem(tp);
}
+ usb_autopm_put_interface(tp->intf);
+out:
return res;
}
@@ -2298,33 +2817,30 @@ static int rtl8152_close(struct net_device *netdev)
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
- tasklet_disable(&tp->tl);
- tp->rtl_ops.disable(tp);
- tasklet_enable(&tp->tl);
- return res;
-}
+ res = usb_autopm_get_interface(tp->intf);
+ if (res < 0) {
+ rtl_drop_queued_tx(tp);
+ } else {
+ /*
+ * The autosuspend may have been enabled and wouldn't
+ * be disable when autoresume occurs, because the
+ * netif_running() would be false.
+ */
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_runtime_suspend_enable(tp, false);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ }
-static void rtl_clear_bp(struct r8152 *tp)
-{
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
- mdelay(3);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
- ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
-}
+ tasklet_disable(&tp->tl);
+ tp->rtl_ops.down(tp);
+ tasklet_enable(&tp->tl);
+ usb_autopm_put_interface(tp->intf);
+ }
-static void r8153_clear_bp(struct r8152 *tp)
-{
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
- ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
- rtl_clear_bp(tp);
+ free_all_mem(tp);
+
+ return res;
}
static void r8152b_enable_eee(struct r8152 *tp)
@@ -2375,18 +2891,18 @@ static void r8152b_enable_fc(struct r8152 *tp)
r8152_mdio_write(tp, MII_ADVERTISE, anar);
}
-static void r8152b_hw_phy_cfg(struct r8152 *tp)
+static void rtl_tally_reset(struct r8152 *tp)
{
- r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
- r8152b_disable_aldps(tp);
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY);
+ ocp_data |= TALLY_RESET;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY, ocp_data);
}
static void r8152b_init(struct r8152 *tp)
{
u32 ocp_data;
- int i;
-
- rtl_clear_bp(tp);
if (tp->version == RTL_VER_01) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
@@ -2394,17 +2910,7 @@ static void r8152b_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
}
- r8152b_hw_phy_cfg(tp);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data &= ~POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RESUME_INDICATE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
-
- r8152b_exit_oob(tp);
+ r8152_power_cut_en(tp, false);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH;
@@ -2420,14 +2926,7 @@ static void r8152b_init(struct r8152 *tp)
r8152b_enable_eee(tp);
r8152b_enable_aldps(tp);
r8152b_enable_fc(tp);
-
- r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
- BMCR_ANRESTART);
- for (i = 0; i < 100; i++) {
- udelay(100);
- if (!(r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET))
- break;
- }
+ rtl_tally_reset(tp);
/* enable rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
@@ -2440,7 +2939,7 @@ static void r8153_init(struct r8152 *tp)
u32 ocp_data;
int i;
- r8153_u1u2en(tp, 0);
+ r8153_u1u2en(tp, false);
for (i = 0; i < 500; i++) {
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
@@ -2456,14 +2955,12 @@ static void r8153_init(struct r8152 *tp)
msleep(20);
}
- r8153_u2p3en(tp, 0);
+ r8153_u2p3en(tp, false);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL);
ocp_data &= ~TIMER11_EN;
ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data);
- r8153_clear_bp(tp);
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
ocp_data &= ~LED_MODE_MASK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
@@ -2481,10 +2978,8 @@ static void r8153_init(struct r8152 *tp)
ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE;
ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data);
- r8153_power_cut_en(tp, 0);
- r8153_u1u2en(tp, 1);
-
- r8153_first_init(tp);
+ r8153_power_cut_en(tp, false);
+ r8153_u1u2en(tp, true);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
@@ -2499,26 +2994,31 @@ static void r8153_init(struct r8152 *tp)
r8153_enable_eee(tp);
r8153_enable_aldps(tp);
r8152b_enable_fc(tp);
-
- r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
- BMCR_ANRESTART);
+ rtl_tally_reset(tp);
}
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
{
struct r8152 *tp = usb_get_intfdata(intf);
- netif_device_detach(tp->netdev);
+ if (PMSG_IS_AUTO(message))
+ set_bit(SELECTIVE_SUSPEND, &tp->flags);
+ else
+ netif_device_detach(tp->netdev);
if (netif_running(tp->netdev)) {
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
- tasklet_disable(&tp->tl);
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_runtime_suspend_enable(tp, true);
+ } else {
+ tasklet_disable(&tp->tl);
+ tp->rtl_ops.down(tp);
+ tasklet_enable(&tp->tl);
+ }
}
- tp->rtl_ops.down(tp);
-
return 0;
}
@@ -2526,22 +3026,77 @@ static int rtl8152_resume(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
- tp->rtl_ops.init(tp);
- netif_device_attach(tp->netdev);
+ if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ tp->rtl_ops.init(tp);
+ netif_device_attach(tp->netdev);
+ }
+
if (netif_running(tp->netdev)) {
- rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_runtime_suspend_enable(tp, false);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ if (tp->speed & LINK_STATUS)
+ tp->rtl_ops.disable(tp);
+ } else {
+ tp->rtl_ops.up(tp);
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
DUPLEX_FULL);
+ }
tp->speed = 0;
netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
- tasklet_enable(&tp->tl);
}
return 0;
}
+static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
+ wol->supported = WAKE_ANY;
+ wol->wolopts = __rtl_get_wol(tp);
+
+ usb_autopm_put_interface(tp->intf);
+}
+
+static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct r8152 *tp = netdev_priv(dev);
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out_set_wol;
+
+ __rtl_set_wol(tp, wol->wolopts);
+ tp->saved_wolopts = wol->wolopts & WAKE_ANY;
+
+ usb_autopm_put_interface(tp->intf);
+
+out_set_wol:
+ return ret;
+}
+
+static u32 rtl8152_get_msglevel(struct net_device *dev)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ return tp->msg_enable;
+}
+
+static void rtl8152_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ tp->msg_enable = value;
+}
+
static void rtl8152_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -2566,8 +3121,76 @@ int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct r8152 *tp = netdev_priv(dev);
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
- return rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+ ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
+}
+
+static const char rtl8152_gstrings[][ETH_GSTRING_LEN] = {
+ "tx_packets",
+ "rx_packets",
+ "tx_errors",
+ "rx_errors",
+ "rx_missed",
+ "align_errors",
+ "tx_single_collisions",
+ "tx_multi_collisions",
+ "rx_unicast",
+ "rx_broadcast",
+ "rx_multicast",
+ "tx_aborted",
+ "tx_underrun",
+};
+
+static int rtl8152_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(rtl8152_gstrings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void rtl8152_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct r8152 *tp = netdev_priv(dev);
+ struct tally_counter tally;
+
+ generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
+
+ data[0] = le64_to_cpu(tally.tx_packets);
+ data[1] = le64_to_cpu(tally.rx_packets);
+ data[2] = le64_to_cpu(tally.tx_errors);
+ data[3] = le32_to_cpu(tally.rx_errors);
+ data[4] = le16_to_cpu(tally.rx_missed);
+ data[5] = le16_to_cpu(tally.align_errors);
+ data[6] = le32_to_cpu(tally.tx_one_collision);
+ data[7] = le32_to_cpu(tally.tx_multi_collision);
+ data[8] = le64_to_cpu(tally.rx_unicast);
+ data[9] = le64_to_cpu(tally.rx_broadcast);
+ data[10] = le32_to_cpu(tally.rx_multicast);
+ data[11] = le16_to_cpu(tally.tx_aborted);
+ data[12] = le16_to_cpu(tally.tx_underun);
+}
+
+static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
+ break;
+ }
}
static struct ethtool_ops ops = {
@@ -2575,13 +3198,24 @@ static struct ethtool_ops ops = {
.get_settings = rtl8152_get_settings,
.set_settings = rtl8152_set_settings,
.get_link = ethtool_op_get_link,
+ .get_msglevel = rtl8152_get_msglevel,
+ .set_msglevel = rtl8152_set_msglevel,
+ .get_wol = rtl8152_get_wol,
+ .set_wol = rtl8152_set_wol,
+ .get_strings = rtl8152_get_strings,
+ .get_sset_count = rtl8152_get_sset_count,
+ .get_ethtool_stats = rtl8152_get_ethtool_stats,
};
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
{
struct r8152 *tp = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(rq);
- int res = 0;
+ int res;
+
+ res = usb_autopm_get_interface(tp->intf);
+ if (res < 0)
+ goto out;
switch (cmd) {
case SIOCGMIIPHY:
@@ -2604,6 +3238,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
res = -EOPNOTSUPP;
}
+ usb_autopm_put_interface(tp->intf);
+
+out:
return res;
}
@@ -2656,22 +3293,13 @@ static void r8152b_get_version(struct r8152 *tp)
static void rtl8152_unload(struct r8152 *tp)
{
- u32 ocp_data;
-
- if (tp->version != RTL_VER_01) {
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data |= POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
- }
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RESUME_INDICATE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+ if (tp->version != RTL_VER_01)
+ r8152_power_cut_en(tp, true);
}
static void rtl8153_unload(struct r8152 *tp)
{
- r8153_power_cut_en(tp, 1);
+ r8153_power_cut_en(tp, true);
}
static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
@@ -2686,6 +3314,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8152b_init;
ops->enable = rtl8152_enable;
ops->disable = rtl8152_disable;
+ ops->up = r8152b_exit_oob;
ops->down = rtl8152_down;
ops->unload = rtl8152_unload;
ret = 0;
@@ -2694,6 +3323,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8153_init;
ops->enable = rtl8153_enable;
ops->disable = rtl8152_disable;
+ ops->up = r8153_first_init;
ops->down = rtl8153_down;
ops->unload = rtl8153_unload;
ret = 0;
@@ -2709,6 +3339,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8153_init;
ops->enable = rtl8153_enable;
ops->disable = rtl8152_disable;
+ ops->up = r8153_first_init;
ops->down = rtl8153_down;
ops->unload = rtl8153_unload;
ret = 0;
@@ -2766,9 +3397,15 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
- netdev->features |= NETIF_F_IP_CSUM;
- netdev->hw_features = NETIF_F_IP_CSUM;
+ netdev->features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO6;
+ netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_FRAGLIST |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
SET_ETHTOOL_OPS(netdev, &ops);
+ netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
tp->mii.dev = netdev;
tp->mii.mdio_read = read_mii_word;
@@ -2778,14 +3415,12 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->mii.phy_id = R8152_PHY_ID;
tp->mii.supports_gmii = 0;
+ intf->needs_remote_wakeup = 1;
+
r8152b_get_version(tp);
tp->rtl_ops.init(tp);
set_ethernet_addr(tp);
- ret = alloc_all_mem(tp);
- if (ret)
- goto out;
-
usb_set_intfdata(intf, tp);
ret = register_netdev(netdev);
@@ -2794,6 +3429,12 @@ static int rtl8152_probe(struct usb_interface *intf,
goto out1;
}
+ tp->saved_wolopts = __rtl_get_wol(tp);
+ if (tp->saved_wolopts)
+ device_set_wakeup_enable(&udev->dev, true);
+ else
+ device_set_wakeup_enable(&udev->dev, false);
+
netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
@@ -2815,7 +3456,6 @@ static void rtl8152_disconnect(struct usb_interface *intf)
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
tp->rtl_ops.unload(tp);
- free_all_mem(tp);
free_netdev(tp->netdev);
}
}
@@ -2838,6 +3478,8 @@ static struct usb_driver rtl8152_driver = {
.suspend = rtl8152_suspend,
.resume = rtl8152_resume,
.reset_resume = rtl8152_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
};
module_usb_driver(rtl8152_driver);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index c0e7c64765ab..b4a10bcb66a0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -14,6 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/u64_stats_sync.h>
+#include <net/rtnetlink.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <linux/veth.h>
@@ -155,10 +156,10 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
unsigned int start;
do {
- start = u64_stats_fetch_begin_bh(&stats->syncp);
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
packets = stats->packets;
bytes = stats->bytes;
- } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
result->packets += packets;
result->bytes += bytes;
}
@@ -235,18 +236,9 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
static int veth_dev_init(struct net_device *dev)
{
- int i;
-
- dev->vstats = alloc_percpu(struct pcpu_vstats);
+ dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats);
if (!dev->vstats)
return -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct pcpu_vstats *veth_stats;
- veth_stats = per_cpu_ptr(dev->vstats, i);
- u64_stats_init(&veth_stats->syncp);
- }
-
return 0;
}
@@ -336,10 +328,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
nla_peer = data[VETH_INFO_PEER];
ifmp = nla_data(nla_peer);
- err = nla_parse(peer_tb, IFLA_MAX,
- nla_data(nla_peer) + sizeof(struct ifinfomsg),
- nla_len(nla_peer) - sizeof(struct ifinfomsg),
- ifla_policy);
+ err = rtnl_nla_parse_ifla(peer_tb,
+ nla_data(nla_peer) + sizeof(struct ifinfomsg),
+ nla_len(nla_peer) - sizeof(struct ifinfomsg));
if (err < 0)
return err;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 841b60831df1..7b687469199b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -882,7 +882,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_warn(&dev->dev,
"Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
dev->stats.tx_dropped++;
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
virtqueue_kick(sq->vq);
@@ -938,7 +938,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sgs[out_num] = &stat;
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
- BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC) < 0);
+ virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
if (unlikely(!virtqueue_kick(vi->cvq)))
return status == VIRTIO_NET_OK;
@@ -1000,16 +1000,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
u64 tpackets, tbytes, rpackets, rbytes;
do {
- start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
+ start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
tpackets = stats->tx_packets;
tbytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
do {
- start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
+ start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
rpackets = stats->rx_packets;
rbytes = stats->rx_bytes;
- } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0fa3b44f7342..97394345e5dd 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1078,7 +1078,7 @@ unlock_drop_pkt:
spin_unlock_irqrestore(&tq->tx_lock, flags);
drop_pkt:
tq->stats.drop_total++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2738,47 +2738,35 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
/*
* Enable MSIx vectors.
* Returns :
- * 0 on successful enabling of required vectors,
* VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
- * could be enabled.
- * number of vectors which can be enabled otherwise (this number is smaller
+ * were enabled.
+ * number of vectors which were enabled otherwise (this number is greater
* than VMXNET3_LINUX_MIN_MSIX_VECT)
*/
static int
-vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
- int vectors)
-{
- int err = 0, vector_threshold;
- vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
-
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
- vectors);
- if (!err) {
- adapter->intr.num_intrs = vectors;
- return 0;
- } else if (err < 0) {
- dev_err(&adapter->netdev->dev,
- "Failed to enable MSI-X, error: %d\n", err);
- vectors = 0;
- } else if (err < vector_threshold) {
- break;
- } else {
- /* If fails to enable required number of MSI-x vectors
- * try enabling minimum number of vectors required.
- */
- dev_err(&adapter->netdev->dev,
- "Failed to enable %d MSI-X, trying %d instead\n",
- vectors, vector_threshold);
- vectors = vector_threshold;
- }
+vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
+{
+ int ret = pci_enable_msix_range(adapter->pdev,
+ adapter->intr.msix_entries, nvec, nvec);
+
+ if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to enable %d MSI-X, trying %d\n",
+ nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
+
+ ret = pci_enable_msix_range(adapter->pdev,
+ adapter->intr.msix_entries,
+ VMXNET3_LINUX_MIN_MSIX_VECT,
+ VMXNET3_LINUX_MIN_MSIX_VECT);
}
- dev_info(&adapter->pdev->dev,
- "Number of MSI-X interrupts which can be allocated "
- "is lower than min threshold required.\n");
- return err;
+ if (ret < 0) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to enable MSI-X, error: %d\n", ret);
+ }
+
+ return ret;
}
@@ -2805,56 +2793,50 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
- int vector, err = 0;
-
- adapter->intr.num_intrs = (adapter->share_intr ==
- VMXNET3_INTR_TXSHARE) ? 1 :
- adapter->num_tx_queues;
- adapter->intr.num_intrs += (adapter->share_intr ==
- VMXNET3_INTR_BUDDYSHARE) ? 0 :
- adapter->num_rx_queues;
- adapter->intr.num_intrs += 1; /* for link event */
-
- adapter->intr.num_intrs = (adapter->intr.num_intrs >
- VMXNET3_LINUX_MIN_MSIX_VECT
- ? adapter->intr.num_intrs :
- VMXNET3_LINUX_MIN_MSIX_VECT);
-
- for (vector = 0; vector < adapter->intr.num_intrs; vector++)
- adapter->intr.msix_entries[vector].entry = vector;
-
- err = vmxnet3_acquire_msix_vectors(adapter,
- adapter->intr.num_intrs);
+ int i, nvec;
+
+ nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
+ 1 : adapter->num_tx_queues;
+ nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
+ 0 : adapter->num_rx_queues;
+ nvec += 1; /* for link event */
+ nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
+ nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
+
+ for (i = 0; i < nvec; i++)
+ adapter->intr.msix_entries[i].entry = i;
+
+ nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
+ if (nvec < 0)
+ goto msix_err;
+
/* If we cannot allocate one MSIx vector per queue
* then limit the number of rx queues to 1
*/
- if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
+ if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
|| adapter->num_rx_queues != 1) {
adapter->share_intr = VMXNET3_INTR_TXSHARE;
netdev_err(adapter->netdev,
"Number of rx queues : 1\n");
adapter->num_rx_queues = 1;
- adapter->intr.num_intrs =
- VMXNET3_LINUX_MIN_MSIX_VECT;
}
- return;
}
- if (!err)
- return;
+ adapter->intr.num_intrs = nvec;
+ return;
+
+msix_err:
/* If we cannot allocate MSIx vectors use only one rx queue */
dev_info(&adapter->pdev->dev,
"Failed to enable MSI-X, error %d. "
- "Limiting #rx queues to 1, try MSI.\n", err);
+ "Limiting #rx queues to 1, try MSI.\n", nvec);
adapter->intr.type = VMXNET3_IT_MSI;
}
if (adapter->intr.type == VMXNET3_IT_MSI) {
- int err;
- err = pci_enable_msi(adapter->pdev);
- if (!err) {
+ if (!pci_enable_msi(adapter->pdev)) {
adapter->num_rx_queues = 1;
adapter->intr.num_intrs = 1;
return;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1236812c7be6..c55e316373a1 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -871,6 +871,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (err)
return err;
+ if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
+ return -EAFNOSUPPORT;
+
spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
port, vni, ifindex, ndm->ndm_flags);
@@ -1132,7 +1135,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_sock *vs;
struct vxlanhdr *vxh;
- __be16 port;
/* Need Vxlan and inner Ethernet header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1150,8 +1152,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
goto drop;
- port = inet_sk(sk)->inet_sport;
-
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
@@ -2080,19 +2080,11 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_sock *vs;
- int i;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *vxlan_stats;
- vxlan_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&vxlan_stats->syncp);
- }
-
-
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
if (vs) {
@@ -2612,9 +2604,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
vni = nla_get_u32(data[IFLA_VXLAN_ID]);
dst->remote_vni = vni;
+ /* Unless IPv6 is explicitly requested, assume IPv4 */
+ dst->remote_ip.sa.sa_family = AF_INET;
if (data[IFLA_VXLAN_GROUP]) {
dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
- dst->remote_ip.sa.sa_family = AF_INET;
} else if (data[IFLA_VXLAN_GROUP6]) {
if (!IS_ENABLED(CONFIG_IPV6))
return -EPFNOSUPPORT;
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 48896138418f..a9970f1af976 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -374,8 +374,7 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
- if (skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_cow_head(skb, 0))
goto drop;
if (i2400m->state == I2400M_SS_IDLE)
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 200020eb3005..b2137e8f7ca6 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -53,7 +53,7 @@ config LIBERTAS_THINFIRM_USB
config AIRO
tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
- depends on ISA_DMA_API && (PCI || BROKEN)
+ depends on CFG80211 && ISA_DMA_API && (PCI || BROKEN)
select WIRELESS_EXT
select CRYPTO
select WEXT_SPY
@@ -73,7 +73,7 @@ config AIRO
config ATMEL
tristate "Atmel at76c50x chipset 802.11b support"
- depends on (PCI || PCMCIA)
+ depends on CFG80211 && (PCI || PCMCIA)
select WIRELESS_EXT
select WEXT_PRIV
select FW_LOADER
@@ -116,7 +116,7 @@ config AT76C50X_USB
config AIRO_CS
tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
- depends on PCMCIA && (BROKEN || !M32R)
+ depends on CFG80211 && PCMCIA && (BROKEN || !M32R)
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
@@ -138,7 +138,7 @@ config AIRO_CS
config PCMCIA_WL3501
tristate "Planet WL3501 PCMCIA cards"
- depends on PCMCIA
+ depends on CFG80211 && PCMCIA
select WIRELESS_EXT
select WEXT_SPY
help
@@ -168,7 +168,7 @@ config PRISM54
config USB_ZD1201
tristate "USB ZD1201 based Wireless device support"
- depends on USB
+ depends on CFG80211 && USB
select WIRELESS_EXT
select WEXT_PRIV
select FW_LOADER
@@ -281,5 +281,6 @@ source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zd1211rw/Kconfig"
source "drivers/net/wireless/mwifiex/Kconfig"
source "drivers/net/wireless/cw1200/Kconfig"
+source "drivers/net/wireless/rsi/Kconfig"
endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0fab227025be..0c8891686718 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -59,3 +59,4 @@ obj-$(CONFIG_BRCMFMAC) += brcm80211/
obj-$(CONFIG_BRCMSMAC) += brcm80211/
obj-$(CONFIG_CW1200) += cw1200/
+obj-$(CONFIG_RSI_91X) += rsi/
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index edf4b57c4aaa..64747d457bb3 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -36,7 +36,7 @@
#include <linux/bitops.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/unaligned.h>
#include <linux/netdevice.h>
@@ -45,11 +45,11 @@
#include <linux/if_arp.h>
#include <linux/ioport.h>
#include <linux/pci.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
-#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
#include <net/iw_handler.h>
#include "airo.h"
@@ -5797,7 +5797,7 @@ static int airo_set_freq(struct net_device *dev,
/* Hack to fall through... */
fwrq->e = 0;
- fwrq->m = ieee80211_freq_to_dsss_chan(f);
+ fwrq->m = ieee80211_frequency_to_channel(f);
}
/* Setting by channel number */
if((fwrq->m > 1000) || (fwrq->e > 0))
@@ -5841,7 +5841,8 @@ static int airo_get_freq(struct net_device *dev,
ch = le16_to_cpu(status_rid.channel);
if((ch > 0) && (ch < 15)) {
- fwrq->m = ieee80211_dsss_chan_to_freq(ch) * 100000;
+ fwrq->m = 100000 *
+ ieee80211_channel_to_frequency(ch, IEEE80211_BAND_2GHZ);
fwrq->e = 1;
} else {
fwrq->m = ch;
@@ -6898,7 +6899,8 @@ static int airo_get_range(struct net_device *dev,
k = 0;
for(i = 0; i < 14; i++) {
range->freq[k].i = i + 1; /* List index */
- range->freq[k].m = ieee80211_dsss_chan_to_freq(i + 1) * 100000;
+ range->freq[k].m = 100000 *
+ ieee80211_channel_to_frequency(i + 1, IEEE80211_BAND_2GHZ);
range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */
}
range->num_frequency = k;
@@ -7297,7 +7299,8 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Add frequency */
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
- iwe.u.freq.m = ieee80211_dsss_chan_to_freq(iwe.u.freq.m) * 100000;
+ iwe.u.freq.m = 100000 *
+ ieee80211_channel_to_frequency(iwe.u.freq.m, IEEE80211_BAND_2GHZ);
iwe.u.freq.e = 1;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_FREQ_LEN);
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index b59cfbe0276b..a889fd66fc63 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -56,6 +56,15 @@ enum ath_device_state {
ATH_HW_INITIALIZED,
};
+enum ath_op_flags {
+ ATH_OP_INVALID,
+ ATH_OP_BEACONS,
+ ATH_OP_ANI_RUN,
+ ATH_OP_PRIM_STA_VIF,
+ ATH_OP_HW_RESET,
+ ATH_OP_SCANNING,
+};
+
enum ath_bus_type {
ATH_PCI,
ATH_AHB,
@@ -63,7 +72,7 @@ enum ath_bus_type {
};
struct reg_dmn_pair_mapping {
- u16 regDmnEnum;
+ u16 reg_domain;
u16 reg_5ghz_ctl;
u16 reg_2ghz_ctl;
};
@@ -130,6 +139,7 @@ struct ath_common {
struct ieee80211_hw *hw;
int debug_mask;
enum ath_device_state state;
+ unsigned long op_flags;
struct ath_ani ani;
@@ -161,6 +171,9 @@ struct ath_common {
bool btcoex_enabled;
bool disable_ani;
bool bt_ant_diversity;
+
+ int last_rssi;
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
};
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index d44d618b05f9..a79499c82350 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -266,12 +266,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
* ath10k_ce_sendlist_send.
* The caller takes responsibility for any needed locking.
*/
-static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
- void *per_transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags)
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
@@ -1067,9 +1067,9 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
*
* For the lack of a better place do the check here.
*/
- BUILD_BUG_ON(TARGET_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(TARGET_10X_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
ret = ath10k_pci_wake(ar);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 67dbde6a5c74..8eb7f99ed992 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -23,7 +23,7 @@
/* Maximum number of Copy Engine's supported */
#define CE_COUNT_MAX 8
-#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
@@ -152,6 +152,13 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
unsigned int transfer_id,
unsigned int flags);
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags);
+
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 3b59af3bddf4..ebc5fc2ede75 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -55,8 +55,7 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
{
ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
- ar->is_target_paused = true;
- wake_up(&ar->event_queue);
+ complete(&ar->target_suspend);
}
static int ath10k_init_connect_htc(struct ath10k *ar)
@@ -470,8 +469,12 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
if (index == ie_len)
break;
- if (data[index] & (1 << bit))
+ if (data[index] & (1 << bit)) {
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "Enabling feature bit: %i\n",
+ i);
__set_bit(i, ar->fw_features);
+ }
}
ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
@@ -699,6 +702,7 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
+ init_completion(&ar->target_suspend);
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
@@ -722,8 +726,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- init_waitqueue_head(&ar->event_queue);
-
INIT_WORK(&ar->restart_work, ath10k_core_restart);
return ar;
@@ -856,10 +858,34 @@ err:
}
EXPORT_SYMBOL(ath10k_core_start);
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
+{
+ int ret;
+
+ reinit_completion(&ar->target_suspend);
+
+ ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt);
+ if (ret) {
+ ath10k_warn("could not suspend target (%d)\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
+
+ if (ret == 0) {
+ ath10k_warn("suspend timed out - target pause event never came\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
void ath10k_core_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
+ /* try to suspend target */
+ ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
ath10k_htt_detach(&ar->htt);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index ade1781c7186..0e71979d837c 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -46,21 +46,35 @@
#define ATH10K_MAX_NUM_MGMT_PENDING 128
+/* number of failed packets */
+#define ATH10K_KICKOUT_THRESHOLD 50
+
+/*
+ * Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH10K_KEEPALIVE_MIN_IDLE 3747
+#define ATH10K_KEEPALIVE_MAX_IDLE 3895
+#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
struct ath10k;
struct ath10k_skb_cb {
dma_addr_t paddr;
- bool is_mapped;
- bool is_aborted;
u8 vdev_id;
struct {
u8 tid;
bool is_offchan;
-
- u8 frag_len;
- u8 pad_len;
+ struct ath10k_htt_txbuf *txbuf;
+ u32 txbuf_paddr;
} __packed htt;
+
+ struct {
+ bool dtim_zero;
+ bool deliver_cab;
+ } bcn;
} __packed;
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -70,32 +84,6 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
}
-static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
-{
- if (ATH10K_SKB_CB(skb)->is_mapped)
- return -EINVAL;
-
- ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
- return -EIO;
-
- ATH10K_SKB_CB(skb)->is_mapped = true;
- return 0;
-}
-
-static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
-{
- if (!ATH10K_SKB_CB(skb)->is_mapped)
- return -EINVAL;
-
- dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
- DMA_TO_DEVICE);
- ATH10K_SKB_CB(skb)->is_mapped = false;
- return 0;
-}
-
static inline u32 host_interest_item_address(u32 item_offset)
{
return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
@@ -211,6 +199,18 @@ struct ath10k_peer {
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
};
+struct ath10k_sta {
+ struct ath10k_vif *arvif;
+
+ /* the following are protected by ar->data_lock */
+ u32 changed; /* IEEE80211_RC_* */
+ u32 bw;
+ u32 nss;
+ u32 smps;
+
+ struct work_struct update_wk;
+};
+
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
struct ath10k_vif {
@@ -222,10 +222,17 @@ struct ath10k_vif {
u32 beacon_interval;
u32 dtim_period;
struct sk_buff *beacon;
+ /* protected by data_lock */
+ bool beacon_sent;
struct ath10k *ar;
struct ieee80211_vif *vif;
+ bool is_started;
+ bool is_up;
+ u32 aid;
+ u8 bssid[ETH_ALEN];
+
struct work_struct wep_key_work;
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
u8 def_wep_key_idx;
@@ -235,7 +242,6 @@ struct ath10k_vif {
union {
struct {
- u8 bssid[ETH_ALEN];
u32 uapsd;
} sta;
struct {
@@ -249,13 +255,11 @@ struct ath10k_vif {
u32 noa_len;
u8 *noa_data;
} ap;
- struct {
- u8 bssid[ETH_ALEN];
- } ibss;
} u;
u8 fixed_rate;
u8 fixed_nss;
+ u8 force_sgi;
};
struct ath10k_vif_iter {
@@ -355,8 +359,7 @@ struct ath10k {
const struct ath10k_hif_ops *ops;
} hif;
- wait_queue_head_t event_queue;
- bool is_target_paused;
+ struct completion target_suspend;
struct ath10k_bmi bmi;
struct ath10k_wmi wmi;
@@ -412,6 +415,9 @@ struct ath10k {
/* valid during scan; needed for mgmt rx during scan */
struct ieee80211_channel *scan_channel;
+ /* current operating channel definition */
+ struct cfg80211_chan_def chandef;
+
int free_vdev_map;
int monitor_vdev_id;
bool monitor_enabled;
@@ -470,6 +476,7 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
void ath10k_core_destroy(struct ath10k *ar);
int ath10k_core_start(struct ath10k *ar);
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, u32 chip_id);
void ath10k_core_unregister(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 1773c36c71a0..a5824990bd2a 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -92,7 +92,7 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
#ifdef CONFIG_ATH10K_DEBUG
__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
- const char *fmt, ...);
+ const char *fmt, ...);
void ath10k_dbg_dump(enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len);
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index dcdea68bcc0a..2ac7beacddca 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -21,6 +21,14 @@
#include <linux/kernel.h>
#include "core.h"
+struct ath10k_hif_sg_item {
+ u16 transfer_id;
+ void *transfer_context; /* NULL = tx completion callback not called */
+ void *vaddr; /* for debugging mostly */
+ u32 paddr;
+ u16 len;
+};
+
struct ath10k_hif_cb {
int (*tx_completion)(struct ath10k *ar,
struct sk_buff *wbuf,
@@ -31,11 +39,9 @@ struct ath10k_hif_cb {
};
struct ath10k_hif_ops {
- /* Send the head of a buffer to HIF for transmission to the target. */
- int (*send_head)(struct ath10k *ar, u8 pipe_id,
- unsigned int transfer_id,
- unsigned int nbytes,
- struct sk_buff *buf);
+ /* send a scatter-gather list to the target */
+ int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items);
/*
* API to handle HIF-specific BMI message exchanges, this API is
@@ -86,12 +92,11 @@ struct ath10k_hif_ops {
};
-static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id,
- unsigned int transfer_id,
- unsigned int nbytes,
- struct sk_buff *buf)
+static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items,
+ int n_items)
{
- return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf);
+ return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
}
static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index edc57ab505c8..7f1bccd3597f 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -63,7 +63,9 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
struct sk_buff *skb)
{
- ath10k_skb_unmap(htc->ar->dev, skb);
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+
+ dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
}
@@ -122,6 +124,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ struct ath10k_hif_sg_item sg_item;
+ struct device *dev = htc->ar->dev;
int credits = 0;
int ret;
@@ -157,19 +162,25 @@ int ath10k_htc_send(struct ath10k_htc *htc,
ath10k_htc_prepare_tx_skb(ep, skb);
- ret = ath10k_skb_map(htc->ar->dev, skb);
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret)
goto err_credits;
- ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
- skb->len, skb);
+ sg_item.transfer_id = ep->eid;
+ sg_item.transfer_context = skb;
+ sg_item.vaddr = skb->data;
+ sg_item.paddr = skb_cb->paddr;
+ sg_item.len = skb->len;
+
+ ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
if (ret)
goto err_unmap;
return 0;
err_unmap:
- ath10k_skb_unmap(htc->ar->dev, skb);
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
@@ -191,10 +202,8 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
- if (!skb) {
- ath10k_warn("invalid sk_buff completion - NULL pointer. firmware crashed?\n");
+ if (WARN_ON_ONCE(!skb))
return 0;
- }
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index b93ae355bc08..654867fc1ae7 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -20,6 +20,7 @@
#include <linux/bug.h>
#include <linux/interrupt.h>
+#include <linux/dmapool.h>
#include "htc.h"
#include "rx_desc.h"
@@ -1181,11 +1182,20 @@ struct htt_rx_info {
u32 info1;
u32 info2;
} rate;
+
+ u32 tsf;
bool fcs_err;
bool amsdu_more;
bool mic_err;
};
+struct ath10k_htt_txbuf {
+ struct htt_data_tx_desc_frag frags[2];
+ struct ath10k_htc_hdr htc_hdr;
+ struct htt_cmd_hdr cmd_hdr;
+ struct htt_data_tx_desc cmd_tx;
+} __packed;
+
struct ath10k_htt {
struct ath10k *ar;
enum ath10k_htc_ep_id eid;
@@ -1267,11 +1277,18 @@ struct ath10k_htt {
struct sk_buff **pending_tx;
unsigned long *used_msdu_ids; /* bitmap */
wait_queue_head_t empty_tx_wq;
+ struct dma_pool *tx_pool;
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
struct tasklet_struct rx_replenish_task;
+
+ /* This is used to group tx/rx completions separately and process them
+ * in batches to reduce cache stalls */
+ struct tasklet_struct txrx_compl_task;
+ struct sk_buff_head tx_compl_q;
+ struct sk_buff_head rx_compl_q;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1343,4 +1360,5 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
+
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index fe8bd1b59f0e..cdcbe2de95f9 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -43,7 +43,7 @@
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
-
+static void ath10k_htt_txrx_compl_task(unsigned long ptr);
static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
{
@@ -225,18 +225,16 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
ath10k_htt_rx_msdu_buff_replenish(htt);
}
-static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
-{
- return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
- htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
-}
-
void ath10k_htt_rx_detach(struct ath10k_htt *htt)
{
int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
tasklet_kill(&htt->rx_replenish_task);
+ tasklet_kill(&htt->txrx_compl_task);
+
+ skb_queue_purge(&htt->tx_compl_q);
+ skb_queue_purge(&htt->rx_compl_q);
while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
struct sk_buff *skb =
@@ -270,10 +268,12 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
int idx;
struct sk_buff *msdu;
- spin_lock_bh(&htt->rx_ring.lock);
+ lockdep_assert_held(&htt->rx_ring.lock);
- if (ath10k_htt_rx_ring_elems(htt) == 0)
- ath10k_warn("htt rx ring is empty!\n");
+ if (htt->rx_ring.fill_cnt == 0) {
+ ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
+ return NULL;
+ }
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx];
@@ -283,7 +283,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = idx;
htt->rx_ring.fill_cnt--;
- spin_unlock_bh(&htt->rx_ring.lock);
return msdu;
}
@@ -307,8 +306,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
- if (ath10k_htt_rx_ring_elems(htt) == 0)
- ath10k_warn("htt rx ring is empty!\n");
+ lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused) {
ath10k_warn("htt is confused. refusing rx\n");
@@ -324,7 +322,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
msdu->data, msdu->len + skb_tailroom(msdu));
rx_desc = (struct htt_rx_desc *)msdu->data;
@@ -400,6 +398,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
msdu_chained = rx_desc->frag_info.ring2_more_count;
+ msdu_chaining = msdu_chained;
if (msdu_len_invalid)
msdu_len = 0;
@@ -417,8 +416,8 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
next->len + skb_tailroom(next),
DMA_FROM_DEVICE);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
- next->data,
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
+ "htt rx chained: ", next->data,
next->len + skb_tailroom(next));
skb_trim(next, 0);
@@ -427,13 +426,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu->next = next;
msdu = next;
- msdu_chaining = 1;
- }
-
- if (msdu_len > 0) {
- /* This may suggest FW bug? */
- ath10k_warn("htt rx msdu len not consumed (%d)\n",
- msdu_len);
}
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
@@ -535,6 +527,12 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
(unsigned long)htt);
+ skb_queue_head_init(&htt->tx_compl_q);
+ skb_queue_head_init(&htt->rx_compl_q);
+
+ tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
+ (unsigned long)htt);
+
ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@@ -638,6 +636,12 @@ struct amsdu_subframe_hdr {
__be16 len;
} __packed;
+static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
+{
+ /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
+ return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+}
+
static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
struct htt_rx_info *info)
{
@@ -687,7 +691,7 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
case RX_MSDU_DECAP_NATIVE_WIFI:
/* pull decapped header and copy DA */
hdr = (struct ieee80211_hdr *)skb->data;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
skb_pull(skb, hdr_len);
@@ -751,7 +755,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
/* This shouldn't happen. If it does than it may be a FW bug. */
if (skb->next) {
- ath10k_warn("received chained non A-MSDU frame\n");
+ ath10k_warn("htt rx received chained non A-MSDU frame\n");
ath10k_htt_rx_free_msdu_chain(skb->next);
skb->next = NULL;
}
@@ -774,7 +778,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
case RX_MSDU_DECAP_NATIVE_WIFI:
/* Pull decapped header */
hdr = (struct ieee80211_hdr *)skb->data;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
skb_pull(skb, hdr_len);
/* Push original header */
@@ -852,6 +856,20 @@ static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
return false;
}
+static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
+{
+ struct htt_rx_desc *rxd;
+ u32 flags;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ flags = __le32_to_cpu(rxd->attention.flags);
+
+ if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
+ return true;
+
+ return false;
+}
+
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
@@ -883,6 +901,57 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
return CHECKSUM_UNNECESSARY;
}
+static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
+{
+ struct sk_buff *next = msdu_head->next;
+ struct sk_buff *to_free = next;
+ int space;
+ int total_len = 0;
+
+ /* TODO: Might could optimize this by using
+ * skb_try_coalesce or similar method to
+ * decrease copying, or maybe get mac80211 to
+ * provide a way to just receive a list of
+ * skb?
+ */
+
+ msdu_head->next = NULL;
+
+ /* Allocate total length all at once. */
+ while (next) {
+ total_len += next->len;
+ next = next->next;
+ }
+
+ space = total_len - skb_tailroom(msdu_head);
+ if ((space > 0) &&
+ (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
+ /* TODO: bump some rx-oom error stat */
+ /* put it back together so we can free the
+ * whole list at once.
+ */
+ msdu_head->next = to_free;
+ return -1;
+ }
+
+ /* Walk list again, copying contents into
+ * msdu_head
+ */
+ next = to_free;
+ while (next) {
+ skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
+ next->len);
+ next = next->next;
+ }
+
+ /* If here, we have consolidated skb. Free the
+ * fragments and pass the main skb on up the
+ * stack.
+ */
+ ath10k_htt_rx_free_msdu_chain(to_free);
+ return 0;
+}
+
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
@@ -894,6 +963,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
u8 *fw_desc;
int i, j;
+ lockdep_assert_held(&htt->rx_ring.lock);
+
memset(&info, 0, sizeof(info));
fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
@@ -937,6 +1008,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
}
if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx dropping due to decrypt-err\n");
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
@@ -944,13 +1017,16 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
status = info.status;
/* Skip mgmt frames while we handle this in WMI */
- if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
+ if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
+ ath10k_htt_rx_is_mgmt(msdu_head)) {
+ ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
if (status != HTT_RX_IND_MPDU_STATUS_OK &&
status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
+ status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
!htt->ar->monitor_enabled) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx ignoring frame w/ status %d\n",
@@ -960,14 +1036,14 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
}
if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx CAC running\n");
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
- /* FIXME: we do not support chaining yet.
- * this needs investigation */
- if (msdu_chaining) {
- ath10k_warn("msdu_chaining is true\n");
+ if (msdu_chaining &&
+ (ath10k_unchain_msdu(msdu_head) < 0)) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
@@ -975,12 +1051,22 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
+
+ if (info.fcs_err)
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx has FCS err\n");
+
+ if (info.mic_err)
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx has MIC err\n");
+
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
info.signal += rx->ppdu.combined_rssi;
info.rate.info0 = rx->ppdu.info0;
info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
+ info.tsf = __le32_to_cpu(rx->ppdu.tsf);
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
@@ -1014,8 +1100,11 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
msdu_head = NULL;
msdu_tail = NULL;
+
+ spin_lock_bh(&htt->rx_ring.lock);
msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
&msdu_head, &msdu_tail);
+ spin_unlock_bh(&htt->rx_ring.lock);
ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
@@ -1095,7 +1184,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
skb_trim(info.skb, info.skb->len - trim);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ",
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
info.skb->data, info.skb->len);
ath10k_process_rx(htt->ar, &info);
@@ -1107,6 +1196,45 @@ end:
}
}
+static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct htt_tx_done tx_done = {};
+ int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
+ __le16 msdu_id;
+ int i;
+
+ lockdep_assert_held(&htt->tx_lock);
+
+ switch (status) {
+ case HTT_DATA_TX_STATUS_NO_ACK:
+ tx_done.no_ack = true;
+ break;
+ case HTT_DATA_TX_STATUS_OK:
+ break;
+ case HTT_DATA_TX_STATUS_DISCARD:
+ case HTT_DATA_TX_STATUS_POSTPONE:
+ case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
+ tx_done.discard = true;
+ break;
+ default:
+ ath10k_warn("unhandled tx completion status %d\n", status);
+ tx_done.discard = true;
+ break;
+ }
+
+ ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
+ resp->data_tx_completion.num_msdus);
+
+ for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
+ msdu_id = resp->data_tx_completion.msdus[i];
+ tx_done.msdu_id = __le16_to_cpu(msdu_id);
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ }
+}
+
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@@ -1116,7 +1244,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath10k_warn("unaligned htt message, expect trouble\n");
- ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n",
+ ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
resp->hdr.msg_type);
switch (resp->hdr.msg_type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF: {
@@ -1125,10 +1253,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
complete(&htt->target_version_received);
break;
}
- case HTT_T2H_MSG_TYPE_RX_IND: {
- ath10k_htt_rx_handler(htt, &resp->rx_ind);
- break;
- }
+ case HTT_T2H_MSG_TYPE_RX_IND:
+ spin_lock_bh(&htt->rx_ring.lock);
+ __skb_queue_tail(&htt->rx_compl_q, skb);
+ spin_unlock_bh(&htt->rx_ring.lock);
+ tasklet_schedule(&htt->txrx_compl_task);
+ return;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
.vdev_id = resp->peer_map.vdev_id,
@@ -1163,44 +1293,17 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
+ spin_lock_bh(&htt->tx_lock);
ath10k_txrx_tx_unref(htt, &tx_done);
+ spin_unlock_bh(&htt->tx_lock);
break;
}
- case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
- struct htt_tx_done tx_done = {};
- int status = MS(resp->data_tx_completion.flags,
- HTT_DATA_TX_STATUS);
- __le16 msdu_id;
- int i;
-
- switch (status) {
- case HTT_DATA_TX_STATUS_NO_ACK:
- tx_done.no_ack = true;
- break;
- case HTT_DATA_TX_STATUS_OK:
- break;
- case HTT_DATA_TX_STATUS_DISCARD:
- case HTT_DATA_TX_STATUS_POSTPONE:
- case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
- tx_done.discard = true;
- break;
- default:
- ath10k_warn("unhandled tx completion status %d\n",
- status);
- tx_done.discard = true;
- break;
- }
-
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
- resp->data_tx_completion.num_msdus);
-
- for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
- msdu_id = resp->data_tx_completion.msdus[i];
- tx_done.msdu_id = __le16_to_cpu(msdu_id);
- ath10k_txrx_tx_unref(htt, &tx_done);
- }
- break;
- }
+ case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
+ spin_lock_bh(&htt->tx_lock);
+ __skb_queue_tail(&htt->tx_compl_q, skb);
+ spin_unlock_bh(&htt->tx_lock);
+ tasklet_schedule(&htt->txrx_compl_task);
+ return;
case HTT_T2H_MSG_TYPE_SEC_IND: {
struct ath10k *ar = htt->ar;
struct htt_security_indication *ev = &resp->security_indication;
@@ -1240,3 +1343,25 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
/* Free the indication buffer */
dev_kfree_skb_any(skb);
}
+
+static void ath10k_htt_txrx_compl_task(unsigned long ptr)
+{
+ struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+ struct htt_resp *resp;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&htt->tx_lock);
+ while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
+ ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_bh(&htt->tx_lock);
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
+ resp = (struct htt_resp *)skb->data;
+ ath10k_htt_rx_handler(htt, &resp->rx_ind);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_bh(&htt->rx_ring.lock);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index f1d36d2d2723..7a3e2e40dd5c 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -109,6 +109,14 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
return -ENOMEM;
}
+ htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
+ sizeof(struct ath10k_htt_txbuf), 4, 0);
+ if (!htt->tx_pool) {
+ kfree(htt->used_msdu_ids);
+ kfree(htt->pending_tx);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -117,9 +125,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
struct htt_tx_done tx_done = {0};
int msdu_id;
- /* No locks needed. Called after communication with the device has
- * been stopped. */
-
+ spin_lock_bh(&htt->tx_lock);
for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
@@ -132,6 +138,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
ath10k_txrx_tx_unref(htt, &tx_done);
}
+ spin_unlock_bh(&htt->tx_lock);
}
void ath10k_htt_tx_detach(struct ath10k_htt *htt)
@@ -139,6 +146,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
ath10k_htt_tx_cleanup_pending(htt);
kfree(htt->pending_tx);
kfree(htt->used_msdu_ids);
+ dma_pool_destroy(htt->tx_pool);
return;
}
@@ -334,7 +342,9 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
goto err_free_msdu_id;
}
- res = ath10k_skb_map(dev, msdu);
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
if (res)
goto err_free_txdesc;
@@ -348,8 +358,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
- skb_cb->htt.frag_len = 0;
- skb_cb->htt.pad_len = 0;
+ skb_cb->htt.txbuf = NULL;
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
@@ -358,7 +367,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
return 0;
err_unmap_msdu:
- ath10k_skb_unmap(dev, msdu);
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
@@ -375,19 +384,19 @@ err:
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
- struct htt_cmd *cmd;
- struct htt_data_tx_desc_frag *tx_frags;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
- struct sk_buff *txdesc = NULL;
- bool use_frags;
- u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
- u8 tid;
- int prefetch_len, desc_len;
- int msdu_id = -1;
+ struct ath10k_hif_sg_item sg_items[2];
+ struct htt_data_tx_desc_frag *frags;
+ u8 vdev_id = skb_cb->vdev_id;
+ u8 tid = skb_cb->htt.tid;
+ int prefetch_len;
int res;
- u8 flags0;
- u16 flags1;
+ u8 flags0 = 0;
+ u16 msdu_id, flags1 = 0;
+ dma_addr_t paddr;
+ u32 frags_paddr;
+ bool use_frags;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
@@ -406,114 +415,120 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
- desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
-
- txdesc = ath10k_htc_alloc_skb(desc_len);
- if (!txdesc) {
- res = -ENOMEM;
- goto err_free_msdu_id;
- }
-
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
* fragment list host driver specifies directly frame pointer. */
use_frags = htt->target_version_major < 3 ||
!ieee80211_is_mgmt(hdr->frame_control);
- if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
- ath10k_warn("htt alignment check failed. dropping packet.\n");
- res = -EIO;
- goto err_free_txdesc;
- }
+ skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
+ &paddr);
+ if (!skb_cb->htt.txbuf)
+ goto err_free_msdu_id;
+ skb_cb->htt.txbuf_paddr = paddr;
- if (use_frags) {
- skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
- skb_cb->htt.pad_len = (unsigned long)msdu->data -
- round_down((unsigned long)msdu->data, 4);
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+ if (res)
+ goto err_free_txbuf;
- skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
- } else {
- skb_cb->htt.frag_len = 0;
- skb_cb->htt.pad_len = 0;
- }
+ if (likely(use_frags)) {
+ frags = skb_cb->htt.txbuf->frags;
- res = ath10k_skb_map(dev, msdu);
- if (res)
- goto err_pull_txfrag;
-
- if (use_frags) {
- dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
- DMA_TO_DEVICE);
-
- /* tx fragment list must be terminated with zero-entry */
- tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
- tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
- skb_cb->htt.frag_len +
- skb_cb->htt.pad_len);
- tx_frags[0].len = __cpu_to_le32(msdu->len -
- skb_cb->htt.frag_len -
- skb_cb->htt.pad_len);
- tx_frags[1].paddr = __cpu_to_le32(0);
- tx_frags[1].len = __cpu_to_le32(0);
-
- dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
- DMA_TO_DEVICE);
- }
+ frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
+ frags[0].len = __cpu_to_le32(msdu->len);
+ frags[1].paddr = 0;
+ frags[1].len = 0;
- ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
- (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
- msdu->data, msdu->len);
+ flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
- skb_put(txdesc, desc_len);
- cmd = (struct htt_cmd *)txdesc->data;
+ frags_paddr = skb_cb->htt.txbuf_paddr;
+ } else {
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
- tid = ATH10K_SKB_CB(msdu)->htt.tid;
+ frags_paddr = skb_cb->paddr;
+ }
- ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);
+ /* Normally all commands go through HTC which manages tx credits for
+ * each endpoint and notifies when tx is completed.
+ *
+ * HTT endpoint is creditless so there's no need to care about HTC
+ * flags. In that case it is trivial to fill the HTC header here.
+ *
+ * MSDU transmission is considered completed upon HTT event. This
+ * implies no relevant resources can be freed until after the event is
+ * received. That's why HTC tx completion handler itself is ignored by
+ * setting NULL to transfer_context for all sg items.
+ *
+ * There is simply no point in pushing HTT TX_FRM through HTC tx path
+ * as it's a waste of resources. By bypassing HTC it is possible to
+ * avoid extra memory allocations, compress data structures and thus
+ * improve performance. */
+
+ skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
+ skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
+ sizeof(skb_cb->htt.txbuf->cmd_hdr) +
+ sizeof(skb_cb->htt.txbuf->cmd_tx) +
+ prefetch_len);
+ skb_cb->htt.txbuf->htc_hdr.flags = 0;
- flags0 = 0;
if (!ieee80211_has_protected(hdr->frame_control))
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
- flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- if (use_frags)
- flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
- HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
- else
- flags0 |= SM(ATH10K_HW_TXRX_MGMT,
- HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- flags1 = 0;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
- cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
- cmd->data_tx.flags0 = flags0;
- cmd->data_tx.flags1 = __cpu_to_le16(flags1);
- cmd->data_tx.len = __cpu_to_le16(msdu->len -
- skb_cb->htt.frag_len -
- skb_cb->htt.pad_len);
- cmd->data_tx.id = __cpu_to_le16(msdu_id);
- cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
- cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
-
- memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
+ skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
+ skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+ skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+ skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+ skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+ skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
+
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
+ flags0, flags1, msdu->len, msdu_id, frags_paddr,
+ (u32)skb_cb->paddr, vdev_id, tid);
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+ msdu->data, msdu->len);
- res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
+ sg_items[0].transfer_id = 0;
+ sg_items[0].transfer_context = NULL;
+ sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
+ sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
+ sizeof(skb_cb->htt.txbuf->frags);
+ sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
+ sizeof(skb_cb->htt.txbuf->cmd_hdr) +
+ sizeof(skb_cb->htt.txbuf->cmd_tx);
+
+ sg_items[1].transfer_id = 0;
+ sg_items[1].transfer_context = NULL;
+ sg_items[1].vaddr = msdu->data;
+ sg_items[1].paddr = skb_cb->paddr;
+ sg_items[1].len = prefetch_len;
+
+ res = ath10k_hif_tx_sg(htt->ar,
+ htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+ sg_items, ARRAY_SIZE(sg_items));
if (res)
goto err_unmap_msdu;
return 0;
err_unmap_msdu:
- ath10k_skb_unmap(dev, msdu);
-err_pull_txfrag:
- skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
-err_free_txdesc:
- dev_kfree_skb_any(txdesc);
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_txbuf:
+ dma_pool_free(htt->tx_pool,
+ skb_cb->htt.txbuf,
+ skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index f1505a25d810..35fc44e281f5 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -205,8 +205,11 @@ enum ath10k_mcast2ucast_mode {
#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
+#define SOC_RESET_CONTROL_ADDRESS 0x00000000
#define SOC_RESET_CONTROL_OFFSET 0x00000000
#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
+#define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000
+#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
#define SOC_CPU_CLOCK_OFFSET 0x00000020
#define SOC_CPU_CLOCK_STANDARD_LSB 0
#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
@@ -216,6 +219,8 @@ enum ath10k_mcast2ucast_mode {
#define SOC_LPO_CAL_OFFSET 0x000000e0
#define SOC_LPO_CAL_ENABLE_LSB 20
#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
+#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
+#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
#define SOC_CHIP_ID_ADDRESS 0x000000ec
#define SOC_CHIP_ID_REV_LSB 8
@@ -273,6 +278,7 @@ enum ath10k_mcast2ucast_mode {
#define PCIE_INTR_CAUSE_ADDRESS 0x000c
#define PCIE_INTR_CLR_ADDRESS 0x0014
#define SCRATCH_3_ADDRESS 0x0030
+#define CPU_INTR_ADDRESS 0x0010
/* Firmware indications to the Host via SCRATCH_3 register. */
#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 776e364eadcd..511a2f81e7af 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -323,13 +323,15 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
if (ret) {
- ath10k_warn("Failed to create wmi peer: %i\n", ret);
+ ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n",
+ addr, vdev_id, ret);
return ret;
}
ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
if (ret) {
- ath10k_warn("Failed to wait for created wmi peer: %i\n", ret);
+ ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n",
+ addr, vdev_id, ret);
return ret;
}
spin_lock_bh(&ar->data_lock);
@@ -339,6 +341,51 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
return 0;
}
+static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ int ret;
+
+ param = ar->wmi.pdev_param->sta_kickout_th;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ ATH10K_KICKOUT_THRESHOLD);
+ if (ret) {
+ ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MIN_IDLE);
+ if (ret) {
+ ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MAX_IDLE);
+ if (ret) {
+ ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
+ if (ret) {
+ ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
{
struct ath10k *ar = arvif->ar;
@@ -444,8 +491,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
static int ath10k_vdev_start(struct ath10k_vif *arvif)
{
struct ath10k *ar = arvif->ar;
- struct ieee80211_conf *conf = &ar->hw->conf;
- struct ieee80211_channel *channel = conf->chandef.chan;
+ struct cfg80211_chan_def *chandef = &ar->chandef;
struct wmi_vdev_start_request_arg arg = {};
int ret = 0;
@@ -457,16 +503,14 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
arg.dtim_period = arvif->dtim_period;
arg.bcn_intval = arvif->beacon_interval;
- arg.channel.freq = channel->center_freq;
-
- arg.channel.band_center_freq1 = conf->chandef.center_freq1;
-
- arg.channel.mode = chan_to_phymode(&conf->chandef);
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.mode = chan_to_phymode(chandef);
arg.channel.min_power = 0;
- arg.channel.max_power = channel->max_power * 2;
- arg.channel.max_reg_power = channel->max_reg_power * 2;
- arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
+ arg.channel.max_power = chandef->chan->max_power * 2;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
@@ -475,7 +519,7 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
/* For now allow DFS for AP mode */
arg.channel.chan_radar =
- !!(channel->flags & IEEE80211_CHAN_RADAR);
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
arg.ssid = arvif->vif->bss_conf.ssid;
arg.ssid_len = arvif->vif->bss_conf.ssid_len;
@@ -488,13 +532,15 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
- ath10k_warn("WMI vdev start failed: ret %d\n", ret);
+ ath10k_warn("WMI vdev %i start failed: ret %d\n",
+ arg.vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn("vdev setup failed %d\n", ret);
+ ath10k_warn("vdev %i setup failed %d\n",
+ arg.vdev_id, ret);
return ret;
}
@@ -512,13 +558,15 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
if (ret) {
- ath10k_warn("WMI vdev stop failed: ret %d\n", ret);
+ ath10k_warn("WMI vdev %i stop failed: ret %d\n",
+ arvif->vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn("vdev setup failed %d\n", ret);
+ ath10k_warn("vdev %i setup sync failed %d\n",
+ arvif->vdev_id, ret);
return ret;
}
@@ -527,7 +575,8 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
{
- struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
+ struct cfg80211_chan_def *chandef = &ar->chandef;
+ struct ieee80211_channel *channel = chandef->chan;
struct wmi_vdev_start_request_arg arg = {};
int ret = 0;
@@ -540,11 +589,11 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
- arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
/* TODO setup this dynamically, what in case we
don't have any vifs? */
- arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef);
+ arg.channel.mode = chan_to_phymode(chandef);
arg.channel.chan_radar =
!!(channel->flags & IEEE80211_CHAN_RADAR);
@@ -555,19 +604,22 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
- ath10k_warn("Monitor vdev start failed: ret %d\n", ret);
+ ath10k_warn("Monitor vdev %i start failed: ret %d\n",
+ vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn("Monitor vdev setup failed %d\n", ret);
+ ath10k_warn("Monitor vdev %i setup failed %d\n",
+ vdev_id, ret);
return ret;
}
ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
if (ret) {
- ath10k_warn("Monitor vdev up failed: %d\n", ret);
+ ath10k_warn("Monitor vdev %i up failed: %d\n",
+ vdev_id, ret);
goto vdev_stop;
}
@@ -579,7 +631,8 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
vdev_stop:
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev stop failed: %d\n", ret);
+ ath10k_warn("Monitor vdev %i stop failed: %d\n",
+ ar->monitor_vdev_id, ret);
return ret;
}
@@ -602,15 +655,18 @@ static int ath10k_monitor_stop(struct ath10k *ar)
ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev down failed: %d\n", ret);
+ ath10k_warn("Monitor vdev %i down failed: %d\n",
+ ar->monitor_vdev_id, ret);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev stop failed: %d\n", ret);
+ ath10k_warn("Monitor vdev %i stop failed: %d\n",
+ ar->monitor_vdev_id, ret);
ret = ath10k_vdev_setup_sync(ar);
if (ret)
- ath10k_warn("Monitor_down sync failed: %d\n", ret);
+ ath10k_warn("Monitor_down sync failed, vdev %i: %d\n",
+ ar->monitor_vdev_id, ret);
ar->monitor_enabled = false;
return ret;
@@ -640,7 +696,8 @@ static int ath10k_monitor_create(struct ath10k *ar)
WMI_VDEV_TYPE_MONITOR,
0, ar->mac_addr);
if (ret) {
- ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret);
+ ath10k_warn("WMI vdev %i monitor create failed: ret %d\n",
+ ar->monitor_vdev_id, ret);
goto vdev_fail;
}
@@ -669,7 +726,8 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
if (ret) {
- ath10k_warn("WMI vdev monitor delete failed: %d\n", ret);
+ ath10k_warn("WMI vdev %i monitor delete failed: %d\n",
+ ar->monitor_vdev_id, ret);
return ret;
}
@@ -791,6 +849,22 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
if (!info->enable_beacon) {
ath10k_vdev_stop(arvif);
+
+ arvif->is_started = false;
+ arvif->is_up = false;
+
+ spin_lock_bh(&arvif->ar->data_lock);
+ if (arvif->beacon) {
+ dma_unmap_single(arvif->ar->dev,
+ ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(arvif->beacon);
+
+ arvif->beacon = NULL;
+ arvif->beacon_sent = false;
+ }
+ spin_unlock_bh(&arvif->ar->data_lock);
+
return;
}
@@ -800,12 +874,21 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
if (ret)
return;
- ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, 0, info->bssid);
+ arvif->aid = 0;
+ memcpy(arvif->bssid, info->bssid, ETH_ALEN);
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
if (ret) {
- ath10k_warn("Failed to bring up VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to bring up vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ ath10k_vdev_stop(arvif);
return;
}
+
+ arvif->is_started = true;
+ arvif->is_up = true;
+
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
@@ -824,18 +907,18 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
self_peer, arvif->vdev_id, ret);
- if (is_zero_ether_addr(arvif->u.ibss.bssid))
+ if (is_zero_ether_addr(arvif->bssid))
return;
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
- arvif->u.ibss.bssid);
+ arvif->bssid);
if (ret) {
ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
- arvif->u.ibss.bssid, arvif->vdev_id, ret);
+ arvif->bssid, arvif->vdev_id, ret);
return;
}
- memset(arvif->u.ibss.bssid, 0, ETH_ALEN);
+ memset(arvif->bssid, 0, ETH_ALEN);
return;
}
@@ -878,8 +961,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
conf->dynamic_ps_timeout);
if (ret) {
- ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
return ret;
}
} else {
@@ -1017,7 +1100,6 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- int smps;
int i, n;
lockdep_assert_held(&ar->conf_mutex);
@@ -1063,17 +1145,6 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_flags |= WMI_PEER_STBC;
}
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
-
- if (smps == WLAN_HT_CAP_SM_PS_STATIC) {
- arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
- arg->peer_flags |= WMI_PEER_STATIC_MIMOPS;
- } else if (smps == WLAN_HT_CAP_SM_PS_DYNAMIC) {
- arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
- arg->peer_flags |= WMI_PEER_DYN_MIMOPS;
- }
-
if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
arg->peer_rate_caps |= WMI_RC_TS_FLAG;
else if (ht_cap->mcs.rx_mask[1])
@@ -1083,8 +1154,23 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
arg->peer_ht_rates.rates[n++] = i;
- arg->peer_ht_rates.num_rates = n;
- arg->peer_num_spatial_streams = max((n+7) / 8, 1);
+ /*
+ * This is a workaround for HT-enabled STAs which break the spec
+ * and have no HT capabilities RX mask (no HT RX MCS map).
+ *
+ * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+ * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+ *
+ * Firmware asserts if such situation occurs.
+ */
+ if (n == 0) {
+ arg->peer_ht_rates.num_rates = 8;
+ for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+ arg->peer_ht_rates.rates[i] = i;
+ } else {
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_num_spatial_streams = sta->rx_nss;
+ }
ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
arg->addr,
@@ -1092,27 +1178,20 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_num_spatial_streams);
}
-static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
- struct ath10k_vif *arvif,
- struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf,
- struct wmi_peer_assoc_complete_arg *arg)
+static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
{
u32 uapsd = 0;
u32 max_sp = 0;
+ int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- if (sta->wme)
- arg->peer_flags |= WMI_PEER_QOS;
-
if (sta->wme && sta->uapsd_queues) {
ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
sta->uapsd_queues, sta->max_sp);
- arg->peer_flags |= WMI_PEER_APSD;
- arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
-
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
@@ -1130,35 +1209,40 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
max_sp = sta->max_sp;
- ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
- sta->addr,
- WMI_AP_PS_PEER_PARAM_UAPSD,
- uapsd);
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_UAPSD,
+ uapsd);
+ if (ret) {
+ ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
- ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
- sta->addr,
- WMI_AP_PS_PEER_PARAM_MAX_SP,
- max_sp);
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_MAX_SP,
+ max_sp);
+ if (ret) {
+ ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
/* TODO setup this based on STA listen interval and
beacon interval. Currently we don't know
sta->listen_interval - mac80211 patch required.
Currently use 10 seconds */
- ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
- sta->addr,
- WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
- 10);
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10);
+ if (ret) {
+ ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
}
-}
-static void ath10k_peer_assoc_h_qos_sta(struct ath10k *ar,
- struct ath10k_vif *arvif,
- struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf,
- struct wmi_peer_assoc_complete_arg *arg)
-{
- if (bss_conf->qos)
- arg->peer_flags |= WMI_PEER_QOS;
+ return 0;
}
static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
@@ -1211,10 +1295,17 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
{
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
- ath10k_peer_assoc_h_qos_ap(ar, arvif, sta, bss_conf, arg);
+ if (sta->wme)
+ arg->peer_flags |= WMI_PEER_QOS;
+
+ if (sta->wme && sta->uapsd_queues) {
+ arg->peer_flags |= WMI_PEER_APSD;
+ arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
+ }
break;
case WMI_VDEV_TYPE_STA:
- ath10k_peer_assoc_h_qos_sta(ar, arvif, sta, bss_conf, arg);
+ if (bss_conf->qos)
+ arg->peer_flags |= WMI_PEER_QOS;
break;
default:
break;
@@ -1293,6 +1384,33 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar,
return 0;
}
+static const u32 ath10k_smps_map[] = {
+ [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+ [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+ [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+ [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
+ const u8 *addr,
+ const struct ieee80211_sta_ht_cap *ht_cap)
+{
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return 0;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (smps >= ARRAY_SIZE(ath10k_smps_map))
+ return -EINVAL;
+
+ return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
+ WMI_PEER_SMPS_STATE,
+ ath10k_smps_map[smps]);
+}
+
/* can be called only in mac80211 callbacks due to `key_count` usage */
static void ath10k_bss_assoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -1300,6 +1418,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ieee80211_sta_ht_cap ht_cap;
struct wmi_peer_assoc_complete_arg peer_arg;
struct ieee80211_sta *ap_sta;
int ret;
@@ -1310,17 +1429,21 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!ap_sta) {
- ath10k_warn("Failed to find station entry for %pM\n",
- bss_conf->bssid);
+ ath10k_warn("Failed to find station entry for %pM, vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
rcu_read_unlock();
return;
}
+ /* ap_sta must be accessed only within rcu section which must be left
+ * before calling ath10k_setup_peer_smps() which might sleep. */
+ ht_cap = ap_sta->ht_cap;
+
ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
bss_conf, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc prepare failed for %pM\n: %d",
- bss_conf->bssid, ret);
+ ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d",
+ bss_conf->bssid, arvif->vdev_id, ret);
rcu_read_unlock();
return;
}
@@ -1329,8 +1452,15 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for %pM\n: %d",
- bss_conf->bssid, ret);
+ ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
+ if (ret) {
+ ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n",
+ arvif->vdev_id, ret);
return;
}
@@ -1338,11 +1468,17 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
- ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
- bss_conf->bssid);
- if (ret)
+ arvif->aid = bss_conf->aid;
+ memcpy(arvif->bssid, bss_conf->bssid, ETH_ALEN);
+
+ ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ if (ret) {
ath10k_warn("VDEV: %d up failed: ret %d\n",
arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
}
/*
@@ -1382,6 +1518,9 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
arvif->def_wep_key_idx = 0;
+
+ arvif->is_started = false;
+ arvif->is_up = false;
}
static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
@@ -1394,21 +1533,35 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
if (ret) {
- ath10k_warn("WMI peer assoc prepare failed for %pM\n",
- sta->addr);
+ ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
return ret;
}
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for STA %pM\n: %d",
- sta->addr, ret);
+ ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
+ if (ret) {
+ ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret);
return ret;
}
ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
if (ret) {
- ath10k_warn("could not install peer wep keys (%d)\n", ret);
+ ath10k_warn("could not install peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
return ret;
}
@@ -1424,7 +1577,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
ret = ath10k_clear_peer_keys(arvif, sta->addr);
if (ret) {
- ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
+ ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
return ret;
}
@@ -1547,9 +1701,9 @@ static void ath10k_regd_update(struct ath10k *ar)
/* Target allows setting up per-band regdomain but ath_common provides
* a combined one only */
ret = ath10k_wmi_pdev_set_regdomain(ar,
- regpair->regDmnEnum,
- regpair->regDmnEnum, /* 2ghz */
- regpair->regDmnEnum, /* 5ghz */
+ regpair->reg_domain,
+ regpair->reg_domain, /* 2ghz */
+ regpair->reg_domain, /* 5ghz */
regpair->reg_2ghz_ctl,
regpair->reg_5ghz_ctl);
if (ret)
@@ -2100,11 +2254,29 @@ static int ath10k_start(struct ieee80211_hw *hw)
ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
ret);
+ /*
+ * By default FW set ARP frames ac to voice (6). In that case ARP
+ * exchange is not working properly for UAPSD enabled AP. ARP requests
+ * which arrives with access category 0 are processed by network stack
+ * and send back with access category 0, but FW changes access category
+ * to 6. Set ARP frames access category to best effort (0) solves
+ * this problem.
+ */
+
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->arp_ac_override, 0);
+ if (ret) {
+ ath10k_warn("could not set arp ac override parameter: %d\n",
+ ret);
+ goto exit;
+ }
+
ath10k_regd_update(ar);
+ ret = 0;
exit:
mutex_unlock(&ar->conf_mutex);
- return 0;
+ return ret;
}
static void ath10k_stop(struct ieee80211_hw *hw)
@@ -2145,6 +2317,98 @@ static int ath10k_config_ps(struct ath10k *ar)
return ret;
}
+static const char *chandef_get_width(enum nl80211_chan_width width)
+{
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ return "20 (noht)";
+ case NL80211_CHAN_WIDTH_20:
+ return "20";
+ case NL80211_CHAN_WIDTH_40:
+ return "40";
+ case NL80211_CHAN_WIDTH_80:
+ return "80";
+ case NL80211_CHAN_WIDTH_80P80:
+ return "80+80";
+ case NL80211_CHAN_WIDTH_160:
+ return "160";
+ case NL80211_CHAN_WIDTH_5:
+ return "5";
+ case NL80211_CHAN_WIDTH_10:
+ return "10";
+ }
+ return "?";
+}
+
+static void ath10k_config_chan(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ bool monitor_was_enabled;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
+ ar->chandef.chan->center_freq,
+ ar->chandef.center_freq1,
+ ar->chandef.center_freq2,
+ chandef_get_width(ar->chandef.width));
+
+ /* First stop monitor interface. Some FW versions crash if there's a
+ * lone monitor interface. */
+ monitor_was_enabled = ar->monitor_enabled;
+
+ if (ar->monitor_enabled)
+ ath10k_monitor_stop(ar);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_started)
+ continue;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ continue;
+
+ ret = ath10k_vdev_stop(arvif);
+ if (ret) {
+ ath10k_warn("could not stop vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* all vdevs are now stopped - now attempt to restart them */
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_started)
+ continue;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ continue;
+
+ ret = ath10k_vdev_start(arvif);
+ if (ret) {
+ ath10k_warn("could not start vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ if (!arvif->is_up)
+ continue;
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn("could not bring vdev up %d (%d)\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ if (monitor_was_enabled)
+ ath10k_monitor_start(ar, ar->monitor_vdev_id);
+}
+
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath10k *ar = hw->priv;
@@ -2165,6 +2429,11 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
spin_unlock_bh(&ar->data_lock);
ath10k_config_radar_detection(ar);
+
+ if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
+ ar->chandef = conf->chandef;
+ ath10k_config_chan(ar);
+ }
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -2214,7 +2483,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
enum wmi_sta_powersave_param param;
int ret = 0;
- u32 value, param_id;
+ u32 value;
int bit;
u32 vdev_param;
@@ -2276,7 +2545,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
if (ret) {
- ath10k_warn("WMI vdev create failed: ret %d\n", ret);
+ ath10k_warn("WMI vdev %i create failed: ret %d\n",
+ arvif->vdev_id, ret);
goto err;
}
@@ -2287,7 +2557,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
arvif->def_wep_key_idx);
if (ret) {
- ath10k_warn("Failed to set default keyid: %d\n", ret);
+ ath10k_warn("Failed to set vdev %i default keyid: %d\n",
+ arvif->vdev_id, ret);
goto err_vdev_delete;
}
@@ -2296,23 +2567,25 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ATH10K_HW_TXRX_NATIVE_WIFI);
/* 10.X firmware does not support this VDEV parameter. Do not warn */
if (ret && ret != -EOPNOTSUPP) {
- ath10k_warn("Failed to set TX encap: %d\n", ret);
+ ath10k_warn("Failed to set vdev %i TX encap: %d\n",
+ arvif->vdev_id, ret);
goto err_vdev_delete;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
- ath10k_warn("Failed to create peer for AP: %d\n", ret);
+ ath10k_warn("Failed to create vdev %i peer for AP: %d\n",
+ arvif->vdev_id, ret);
goto err_vdev_delete;
}
- param_id = ar->wmi.pdev_param->sta_kickout_th;
-
- /* Disable STA KICKOUT functionality in FW */
- ret = ath10k_wmi_pdev_set_param(ar, param_id, 0);
- if (ret)
- ath10k_warn("Failed to disable STA KICKOUT\n");
+ ret = ath10k_mac_set_kickout(arvif);
+ if (ret) {
+ ath10k_warn("Failed to set vdev %i kickout parameters: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
}
if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
@@ -2321,7 +2594,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set RX wake policy: %d\n", ret);
+ ath10k_warn("Failed to set vdev %i RX wake policy: %d\n",
+ arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2330,7 +2604,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
+ ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n",
+ arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2339,7 +2614,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
+ ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n",
+ arvif->vdev_id, ret);
goto err_peer_delete;
}
}
@@ -2403,17 +2679,19 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
if (ret)
- ath10k_warn("Failed to remove peer for AP: %d\n", ret);
+ ath10k_warn("Failed to remove peer for AP vdev %i: %d\n",
+ arvif->vdev_id, ret);
kfree(arvif->u.ap.noa_data);
}
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
arvif->vdev_id);
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
- ath10k_warn("WMI vdev delete failed: %d\n", ret);
+ ath10k_warn("WMI vdev %i delete failed: %d\n",
+ arvif->vdev_id, ret);
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ar->monitor_present = false;
@@ -2502,8 +2780,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, arvif->beacon_interval);
if (ret)
- ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set beacon interval for vdev %d: %i\n",
+ arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BEACON) {
@@ -2515,8 +2793,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
WMI_BEACON_STAGGERED_MODE);
if (ret)
- ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set beacon mode for vdev %d: %i\n",
+ arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BEACON_INFO) {
@@ -2530,8 +2808,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->dtim_period);
if (ret)
- ath10k_warn("Failed to set dtim period for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set dtim period for vdev %d: %i\n",
+ arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_SSID &&
@@ -2551,7 +2829,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
- ath10k_warn("Failed to add peer %pM for vdev %d when changin bssid: %i\n",
+ ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n",
info->bssid, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION) {
@@ -2559,15 +2837,20 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
* this is never erased as we it for crypto key
* clearing; this is FW requirement
*/
- memcpy(arvif->u.sta.bssid, info->bssid,
- ETH_ALEN);
+ memcpy(arvif->bssid, info->bssid, ETH_ALEN);
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d start %pM\n",
arvif->vdev_id, info->bssid);
- /* FIXME: check return value */
ret = ath10k_vdev_start(arvif);
+ if (ret) {
+ ath10k_warn("failed to start vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto exit;
+ }
+
+ arvif->is_started = true;
}
/*
@@ -2576,7 +2859,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
* IBSS in order to remove BSSID peer.
*/
if (vif->type == NL80211_IFTYPE_ADHOC)
- memcpy(arvif->u.ibss.bssid, info->bssid,
+ memcpy(arvif->bssid, info->bssid,
ETH_ALEN);
}
}
@@ -2598,8 +2881,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
cts_prot);
if (ret)
- ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set CTS prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2617,8 +2900,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
slottime);
if (ret)
- ath10k_warn("Failed to set erp slot for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set erp slot for vdev %d: %i\n",
+ arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -2636,8 +2919,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
preamble);
if (ret)
- ath10k_warn("Failed to set preamble for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set preamble for vdev %d: %i\n",
+ arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ASSOC) {
@@ -2645,6 +2928,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ath10k_bss_assoc(hw, vif, info);
}
+exit:
mutex_unlock(&ar->conf_mutex);
}
@@ -2767,8 +3051,8 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
key->keyidx);
if (ret)
- ath10k_warn("failed to set group key as default key: %d\n",
- ret);
+ ath10k_warn("failed to set vdev %i group key as default key: %d\n",
+ arvif->vdev_id, ret);
}
static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -2828,7 +3112,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = ath10k_install_key(arvif, key, cmd, peer_addr);
if (ret) {
- ath10k_warn("ath10k_install_key failed (%d)\n", ret);
+ ath10k_warn("key installation failed for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret);
goto exit;
}
@@ -2850,6 +3135,69 @@ exit:
return ret;
}
+static void ath10k_sta_rc_update_wk(struct work_struct *wk)
+{
+ struct ath10k *ar;
+ struct ath10k_vif *arvif;
+ struct ath10k_sta *arsta;
+ struct ieee80211_sta *sta;
+ u32 changed, bw, nss, smps;
+ int err;
+
+ arsta = container_of(wk, struct ath10k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+
+ changed = arsta->changed;
+ arsta->changed = 0;
+
+ bw = arsta->bw;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
+ sta->addr, bw);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_CHAN_WIDTH, bw);
+ if (err)
+ ath10k_warn("failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
+ sta->addr, nss);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_NSS, nss);
+ if (err)
+ ath10k_warn("failed to update STA %pM nss %d: %d\n",
+ sta->addr, nss, err);
+ }
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
+ sta->addr, smps);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_SMPS_STATE, smps);
+ if (err)
+ ath10k_warn("failed to update STA %pM smps %d: %d\n",
+ sta->addr, smps, err);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2858,9 +3206,22 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
int max_num_peers;
int ret = 0;
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+ }
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST))
+ cancel_work_sync(&arsta->update_wk);
+
mutex_lock(&ar->conf_mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
@@ -2899,8 +3260,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
- ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n",
+ sta->addr, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION)
ath10k_bss_disassoc(hw, vif);
@@ -2916,8 +3277,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ret = ath10k_station_assoc(ar, arvif, sta);
if (ret)
- ath10k_warn("Failed to associate station: %pM\n",
- sta->addr);
+ ath10k_warn("Failed to associate station %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -2930,8 +3291,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
- ath10k_warn("Failed to disassociate station: %pM\n",
- sta->addr);
+ ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n",
+ sta->addr, arvif->vdev_id, ret);
}
exit:
mutex_unlock(&ar->conf_mutex);
@@ -3212,7 +3573,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
}), ATH10K_FLUSH_TIMEOUT_HZ);
if (ret <= 0 || skip)
- ath10k_warn("tx not flushed\n");
+ ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n",
+ skip, ar->state, ret);
skip:
mutex_unlock(&ar->conf_mutex);
@@ -3234,23 +3596,14 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
int ret;
- ar->is_target_paused = false;
+ mutex_lock(&ar->conf_mutex);
- ret = ath10k_wmi_pdev_suspend_target(ar);
+ ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND);
if (ret) {
- ath10k_warn("could not suspend target (%d)\n", ret);
- return 1;
- }
-
- ret = wait_event_interruptible_timeout(ar->event_queue,
- ar->is_target_paused == true,
- 1 * HZ);
- if (ret < 0) {
- ath10k_warn("suspend interrupted (%d)\n", ret);
- goto resume;
- } else if (ret == 0) {
- ath10k_warn("suspend timed out - target pause event never came\n");
- goto resume;
+ if (ret == -ETIMEDOUT)
+ goto resume;
+ ret = 1;
+ goto exit;
}
ret = ath10k_hif_suspend(ar);
@@ -3259,12 +3612,17 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
goto resume;
}
- return 0;
+ ret = 0;
+ goto exit;
resume:
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret)
ath10k_warn("could not resume target (%d)\n", ret);
- return 1;
+
+ ret = 1;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
}
static int ath10k_resume(struct ieee80211_hw *hw)
@@ -3272,19 +3630,26 @@ static int ath10k_resume(struct ieee80211_hw *hw)
struct ath10k *ar = hw->priv;
int ret;
+ mutex_lock(&ar->conf_mutex);
+
ret = ath10k_hif_resume(ar);
if (ret) {
ath10k_warn("could not resume hif (%d)\n", ret);
- return 1;
+ ret = 1;
+ goto exit;
}
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret) {
ath10k_warn("could not resume target (%d)\n", ret);
- return 1;
+ ret = 1;
+ goto exit;
}
- return 0;
+ ret = 0;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
}
#endif
@@ -3575,7 +3940,8 @@ static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
u8 fixed_rate,
- u8 fixed_nss)
+ u8 fixed_nss,
+ u8 force_sgi)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
@@ -3584,12 +3950,16 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
mutex_lock(&ar->conf_mutex);
if (arvif->fixed_rate == fixed_rate &&
- arvif->fixed_nss == fixed_nss)
+ arvif->fixed_nss == fixed_nss &&
+ arvif->force_sgi == force_sgi)
goto exit;
if (fixed_rate == WMI_FIXED_RATE_NONE)
ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
+ if (force_sgi)
+ ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n");
+
vdev_param = ar->wmi.vdev_param->fixed_rate;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
vdev_param, fixed_rate);
@@ -3615,6 +3985,19 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
arvif->fixed_nss = fixed_nss;
+ vdev_param = ar->wmi.vdev_param->sgi;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ force_sgi);
+
+ if (ret) {
+ ath10k_warn("Could not set sgi param %d: %d\n",
+ force_sgi, ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ arvif->force_sgi = force_sgi;
+
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
@@ -3629,6 +4012,11 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
u8 fixed_rate = WMI_FIXED_RATE_NONE;
u8 fixed_nss = ar->num_rf_chains;
+ u8 force_sgi;
+
+ force_sgi = mask->control[band].gi;
+ if (force_sgi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
if (!ath10k_default_bitrate_mask(ar, band, mask)) {
if (!ath10k_get_fixed_rate_nss(mask, band,
@@ -3637,7 +4025,113 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
return -EINVAL;
}
- return ath10k_set_fixed_rate_param(arvif, fixed_rate, fixed_nss);
+ if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
+ ath10k_warn("Could not force SGI usage for default rate settings\n");
+ return -EINVAL;
+ }
+
+ return ath10k_set_fixed_rate_param(arvif, fixed_rate,
+ fixed_nss, force_sgi);
+}
+
+static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ /* there's no need to do anything here. vif->csa_active is enough */
+ return;
+}
+
+static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ u32 bw, smps;
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ sta->addr, changed, sta->bandwidth, sta->rx_nss,
+ sta->smps_mode);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ ath10k_warn("mac sta rc update for %pM: invalid bw %d\n",
+ sta->addr, sta->bandwidth);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ arsta->bw = bw;
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED)
+ arsta->nss = sta->rx_nss;
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ smps = WMI_PEER_SMPS_PS_NONE;
+
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ smps = WMI_PEER_SMPS_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ smps = WMI_PEER_SMPS_DYNAMIC;
+ break;
+ case IEEE80211_SMPS_NUM_MODES:
+ ath10k_warn("mac sta rc update for %pM: invalid smps: %d\n",
+ sta->addr, sta->smps_mode);
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ }
+
+ arsta->smps = smps;
+ }
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ /* FIXME: Not implemented. Probably the only way to do it would
+ * be to re-assoc the peer. */
+ changed &= ~IEEE80211_RC_SUPP_RATES_CHANGED;
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac sta rc update for %pM: changing supported rates not implemented\n",
+ sta->addr);
+ }
+
+ arsta->changed |= changed;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ /*
+ * FIXME: Return 0 for time being. Need to figure out whether FW
+ * has the API to fetch 64-bit local TSF
+ */
+
+ return 0;
}
static const struct ieee80211_ops ath10k_ops = {
@@ -3663,6 +4157,9 @@ static const struct ieee80211_ops ath10k_ops = {
.restart_complete = ath10k_restart_complete,
.get_survey = ath10k_get_survey,
.set_bitrate_mask = ath10k_set_bitrate_mask,
+ .channel_switch_beacon = ath10k_channel_switch_beacon,
+ .sta_rc_update = ath10k_sta_rc_update,
+ .get_tsf = ath10k_get_tsf,
#ifdef CONFIG_PM
.suspend = ath10k_suspend,
.resume = ath10k_resume,
@@ -3939,7 +4436,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
ath10k_get_arvif_iter,
&arvif_iter);
if (!arvif_iter.arvif) {
- ath10k_warn("No VIF found for VDEV: %d\n", vdev_id);
+ ath10k_warn("No VIF found for vdev %d\n", vdev_id);
return NULL;
}
@@ -4020,7 +4517,8 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
IEEE80211_HW_WANT_MONITOR_VIF |
- IEEE80211_HW_AP_LINK_PS;
+ IEEE80211_HW_AP_LINK_PS |
+ IEEE80211_HW_SPECTRUM_MGMT;
/* MSDU can have HTT TX fragment pushed in front. The additional 4
* bytes is used for padding/alignment if necessary. */
@@ -4038,10 +4536,12 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
+ ar->hw->sta_data_size = sizeof(struct ath10k_sta);
ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
@@ -4076,7 +4576,7 @@ int ath10k_mac_register(struct ath10k *ar)
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
- ath10k_err("Regulatory initialization failed\n");
+ ath10k_err("Regulatory initialization failed: %i\n", ret);
goto err_free;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 29fd197d1fd8..9d242d801d9d 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -58,13 +58,12 @@ static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
u32 *data);
-static void ath10k_pci_process_ce(struct ath10k *ar);
static int ath10k_pci_post_rx(struct ath10k *ar);
static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num);
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
-static void ath10k_pci_stop_ce(struct ath10k *ar);
-static int ath10k_pci_device_reset(struct ath10k *ar);
+static int ath10k_pci_cold_reset(struct ath10k *ar);
+static int ath10k_pci_warm_reset(struct ath10k *ar);
static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
static int ath10k_pci_init_irq(struct ath10k *ar);
static int ath10k_pci_deinit_irq(struct ath10k *ar);
@@ -73,7 +72,6 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
struct ath10k_ce_pipe *rx_pipe,
struct bmi_xfer *xfer);
-static void ath10k_pci_cleanup_ce(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
@@ -678,34 +676,12 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
}
}
-/*
- * FIXME: Handle OOM properly.
- */
-static inline
-struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
-{
- struct ath10k_pci_compl *compl = NULL;
-
- spin_lock_bh(&pipe_info->pipe_lock);
- if (list_empty(&pipe_info->compl_free)) {
- ath10k_warn("Completion buffers are full\n");
- goto exit;
- }
- compl = list_first_entry(&pipe_info->compl_free,
- struct ath10k_pci_compl, list);
- list_del(&compl->list);
-exit:
- spin_unlock_bh(&pipe_info->pipe_lock);
- return compl;
-}
-
/* Called by lower (CE) layer when a send to Target completes. */
static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
- struct ath10k_pci_compl *compl;
+ struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
void *transfer_context;
u32 ce_data;
unsigned int nbytes;
@@ -714,27 +690,12 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
&ce_data, &nbytes,
&transfer_id) == 0) {
- compl = get_free_compl(pipe_info);
- if (!compl)
- break;
-
- compl->state = ATH10K_PCI_COMPL_SEND;
- compl->ce_state = ce_state;
- compl->pipe_info = pipe_info;
- compl->skb = transfer_context;
- compl->nbytes = nbytes;
- compl->transfer_id = transfer_id;
- compl->flags = 0;
+ /* no need to call tx completion for NULL pointers */
+ if (transfer_context == NULL)
+ continue;
- /*
- * Add the completion to the processing queue.
- */
- spin_lock_bh(&ar_pci->compl_lock);
- list_add_tail(&compl->list, &ar_pci->compl_process);
- spin_unlock_bh(&ar_pci->compl_lock);
+ cb->tx_completion(ar, transfer_context, transfer_id);
}
-
- ath10k_pci_process_ce(ar);
}
/* Called by lower (CE) layer when data is received from the Target. */
@@ -743,77 +704,100 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
- struct ath10k_pci_compl *compl;
+ struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
struct sk_buff *skb;
void *transfer_context;
u32 ce_data;
- unsigned int nbytes;
+ unsigned int nbytes, max_nbytes;
unsigned int transfer_id;
unsigned int flags;
+ int err;
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
&ce_data, &nbytes, &transfer_id,
&flags) == 0) {
- compl = get_free_compl(pipe_info);
- if (!compl)
- break;
-
- compl->state = ATH10K_PCI_COMPL_RECV;
- compl->ce_state = ce_state;
- compl->pipe_info = pipe_info;
- compl->skb = transfer_context;
- compl->nbytes = nbytes;
- compl->transfer_id = transfer_id;
- compl->flags = flags;
+ err = ath10k_pci_post_rx_pipe(pipe_info, 1);
+ if (unlikely(err)) {
+ /* FIXME: retry */
+ ath10k_warn("failed to replenish CE rx ring %d: %d\n",
+ pipe_info->pipe_num, err);
+ }
skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
- /*
- * Add the completion to the processing queue.
- */
- spin_lock_bh(&ar_pci->compl_lock);
- list_add_tail(&compl->list, &ar_pci->compl_process);
- spin_unlock_bh(&ar_pci->compl_lock);
- }
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn("rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
- ath10k_pci_process_ce(ar);
+ skb_put(skb, nbytes);
+ cb->rx_completion(ar, skb, pipe_info->pipe_num);
+ }
}
-/* Send the first nbytes bytes of the buffer */
-static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
- unsigned int transfer_id,
- unsigned int bytes, struct sk_buff *nbuf)
+static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
{
- struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
- struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
- unsigned int len;
- u32 flags = 0;
- int ret;
+ struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
+ struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
+ struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int write_index = src_ring->write_index;
+ int err, i;
- len = min(bytes, nbuf->len);
- bytes -= len;
+ spin_lock_bh(&ar_pci->ce_lock);
- if (len & 3)
- ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) < n_items)) {
+ err = -ENOBUFS;
+ goto unlock;
+ }
- ath10k_dbg(ATH10K_DBG_PCI,
- "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
- nbuf->data, (unsigned long long) skb_cb->paddr,
- nbuf->len, len);
- ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
- "ath10k tx: data: ",
- nbuf->data, nbuf->len);
-
- ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
- flags);
- if (ret)
- ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
+ for (i = 0; i < n_items - 1; i++) {
+ ath10k_dbg(ATH10K_DBG_PCI,
+ "pci tx item %d paddr 0x%08x len %d n_items %d\n",
+ i, items[i].paddr, items[i].len, n_items);
+ ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
+ items[i].vaddr, items[i].len);
- return ret;
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ CE_SEND_FLAG_GATHER);
+ if (err)
+ goto unlock;
+ }
+
+ /* `i` is equal to `n_items -1` after for() */
+
+ ath10k_dbg(ATH10K_DBG_PCI,
+ "pci tx item %d paddr 0x%08x len %d n_items %d\n",
+ i, items[i].paddr, items[i].len, n_items);
+ ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
+ items[i].vaddr, items[i].len);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ 0);
+ if (err)
+ goto unlock;
+
+ err = 0;
+unlock:
+ spin_unlock_bh(&ar_pci->ce_lock);
+ return err;
}
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
@@ -833,9 +817,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
ath10k_err("firmware crashed!\n");
ath10k_err("hardware name %s version 0x%x\n",
ar->hw_params.name, ar->target_version);
- ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
- ar->fw_version_minor, ar->fw_version_release,
- ar->fw_version_build);
+ ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
ret = ath10k_pci_diag_read_mem(ar, host_addr,
@@ -904,52 +886,6 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
sizeof(ar_pci->msg_callbacks_current));
}
-static int ath10k_pci_alloc_compl(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- const struct ce_attr *attr;
- struct ath10k_pci_pipe *pipe_info;
- struct ath10k_pci_compl *compl;
- int i, pipe_num, completions;
-
- spin_lock_init(&ar_pci->compl_lock);
- INIT_LIST_HEAD(&ar_pci->compl_process);
-
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
- pipe_info = &ar_pci->pipe_info[pipe_num];
-
- spin_lock_init(&pipe_info->pipe_lock);
- INIT_LIST_HEAD(&pipe_info->compl_free);
-
- /* Handle Diagnostic CE specially */
- if (pipe_info->ce_hdl == ar_pci->ce_diag)
- continue;
-
- attr = &host_ce_config_wlan[pipe_num];
- completions = 0;
-
- if (attr->src_nentries)
- completions += attr->src_nentries;
-
- if (attr->dest_nentries)
- completions += attr->dest_nentries;
-
- for (i = 0; i < completions; i++) {
- compl = kmalloc(sizeof(*compl), GFP_KERNEL);
- if (!compl) {
- ath10k_warn("No memory for completion state\n");
- ath10k_pci_cleanup_ce(ar);
- return -ENOMEM;
- }
-
- compl->state = ATH10K_PCI_COMPL_FREE;
- list_add_tail(&compl->list, &pipe_info->compl_free);
- }
- }
-
- return 0;
-}
-
static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -994,147 +930,6 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
tasklet_kill(&ar_pci->pipe_info[i].intr);
}
-static void ath10k_pci_stop_ce(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_compl *compl;
- struct sk_buff *skb;
-
- /* Mark pending completions as aborted, so that upper layers free up
- * their associated resources */
- spin_lock_bh(&ar_pci->compl_lock);
- list_for_each_entry(compl, &ar_pci->compl_process, list) {
- skb = compl->skb;
- ATH10K_SKB_CB(skb)->is_aborted = true;
- }
- spin_unlock_bh(&ar_pci->compl_lock);
-}
-
-static void ath10k_pci_cleanup_ce(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_compl *compl, *tmp;
- struct ath10k_pci_pipe *pipe_info;
- struct sk_buff *netbuf;
- int pipe_num;
-
- /* Free pending completions. */
- spin_lock_bh(&ar_pci->compl_lock);
- if (!list_empty(&ar_pci->compl_process))
- ath10k_warn("pending completions still present! possible memory leaks.\n");
-
- list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
- list_del(&compl->list);
- netbuf = compl->skb;
- dev_kfree_skb_any(netbuf);
- kfree(compl);
- }
- spin_unlock_bh(&ar_pci->compl_lock);
-
- /* Free unused completions for each pipe. */
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
- pipe_info = &ar_pci->pipe_info[pipe_num];
-
- spin_lock_bh(&pipe_info->pipe_lock);
- list_for_each_entry_safe(compl, tmp,
- &pipe_info->compl_free, list) {
- list_del(&compl->list);
- kfree(compl);
- }
- spin_unlock_bh(&pipe_info->pipe_lock);
- }
-}
-
-static void ath10k_pci_process_ce(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ar->hif.priv;
- struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
- struct ath10k_pci_compl *compl;
- struct sk_buff *skb;
- unsigned int nbytes;
- int ret, send_done = 0;
-
- /* Upper layers aren't ready to handle tx/rx completions in parallel so
- * we must serialize all completion processing. */
-
- spin_lock_bh(&ar_pci->compl_lock);
- if (ar_pci->compl_processing) {
- spin_unlock_bh(&ar_pci->compl_lock);
- return;
- }
- ar_pci->compl_processing = true;
- spin_unlock_bh(&ar_pci->compl_lock);
-
- for (;;) {
- spin_lock_bh(&ar_pci->compl_lock);
- if (list_empty(&ar_pci->compl_process)) {
- spin_unlock_bh(&ar_pci->compl_lock);
- break;
- }
- compl = list_first_entry(&ar_pci->compl_process,
- struct ath10k_pci_compl, list);
- list_del(&compl->list);
- spin_unlock_bh(&ar_pci->compl_lock);
-
- switch (compl->state) {
- case ATH10K_PCI_COMPL_SEND:
- cb->tx_completion(ar,
- compl->skb,
- compl->transfer_id);
- send_done = 1;
- break;
- case ATH10K_PCI_COMPL_RECV:
- ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
- if (ret) {
- ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
- compl->pipe_info->pipe_num, ret);
- break;
- }
-
- skb = compl->skb;
- nbytes = compl->nbytes;
-
- ath10k_dbg(ATH10K_DBG_PCI,
- "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
- skb, nbytes);
- ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
- "ath10k rx: ", skb->data, nbytes);
-
- if (skb->len + skb_tailroom(skb) >= nbytes) {
- skb_trim(skb, 0);
- skb_put(skb, nbytes);
- cb->rx_completion(ar, skb,
- compl->pipe_info->pipe_num);
- } else {
- ath10k_warn("rxed more than expected (nbytes %d, max %d)",
- nbytes,
- skb->len + skb_tailroom(skb));
- }
- break;
- case ATH10K_PCI_COMPL_FREE:
- ath10k_warn("free completion cannot be processed\n");
- break;
- default:
- ath10k_warn("invalid completion state (%d)\n",
- compl->state);
- break;
- }
-
- compl->state = ATH10K_PCI_COMPL_FREE;
-
- /*
- * Add completion back to the pipe's free list.
- */
- spin_lock_bh(&compl->pipe_info->pipe_lock);
- list_add_tail(&compl->list, &compl->pipe_info->compl_free);
- spin_unlock_bh(&compl->pipe_info->pipe_lock);
- }
-
- spin_lock_bh(&ar_pci->compl_lock);
- ar_pci->compl_processing = false;
- spin_unlock_bh(&ar_pci->compl_lock);
-}
-
/* TODO - temporary mapping while we have too few CE's */
static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id, u8 *ul_pipe,
@@ -1306,17 +1101,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
- ret = ath10k_pci_alloc_compl(ar);
- if (ret) {
- ath10k_warn("failed to allocate CE completions: %d\n", ret);
- goto err_early_irq;
- }
-
ret = ath10k_pci_request_irq(ar);
if (ret) {
ath10k_warn("failed to post RX buffers for all pipes: %d\n",
ret);
- goto err_free_compl;
+ goto err_early_irq;
}
ret = ath10k_pci_setup_ce_irq(ar);
@@ -1340,10 +1129,6 @@ err_stop:
ath10k_ce_disable_interrupts(ar);
ath10k_pci_free_irq(ar);
ath10k_pci_kill_tasklet(ar);
- ath10k_pci_stop_ce(ar);
- ath10k_pci_process_ce(ar);
-err_free_compl:
- ath10k_pci_cleanup_ce(ar);
err_early_irq:
/* Though there should be no interrupts (device was reset)
* power_down() expects the early IRQ to be installed as per the
@@ -1414,18 +1199,10 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
&ce_data, &nbytes, &id) == 0) {
- /*
- * Indicate the completion to higer layer to free
- * the buffer
- */
-
- if (!netbuf) {
- ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
- ce_hdl->id);
+ /* no need to call tx completion for NULL pointers */
+ if (!netbuf)
continue;
- }
- ATH10K_SKB_CB(netbuf)->is_aborted = true;
ar_pci->msg_callbacks_current.tx_completion(ar,
netbuf,
id);
@@ -1483,7 +1260,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_free_irq(ar);
ath10k_pci_kill_tasklet(ar);
- ath10k_pci_stop_ce(ar);
ret = ath10k_pci_request_early_irq(ar);
if (ret)
@@ -1493,8 +1269,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
* not DMA nor interrupt. We process the leftovers and then free
* everything else up. */
- ath10k_pci_process_ce(ar);
- ath10k_pci_cleanup_ce(ar);
ath10k_pci_buffer_cleanup(ar);
/* Make the sure the device won't access any structures on the host by
@@ -1502,7 +1276,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
* configuration during init. If ringbuffers are freed and the device
* were to access them this could lead to memory corruption on the
* host. */
- ath10k_pci_device_reset(ar);
+ ath10k_pci_warm_reset(ar);
ar_pci->started = 0;
}
@@ -1993,7 +1767,94 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
ath10k_pci_sleep(ar);
}
-static int ath10k_pci_hif_power_up(struct ath10k *ar)
+static int ath10k_pci_warm_reset(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret = 0;
+ u32 val;
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
+
+ ret = ath10k_do_pci_wake(ar);
+ if (ret) {
+ ath10k_err("failed to wake up target: %d\n", ret);
+ return ret;
+ }
+
+ /* debug */
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_CAUSE_ADDRESS);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CPU_INTR_ADDRESS);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
+ val);
+
+ /* disable pending irqs */
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS, 0);
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_CLR_ADDRESS, ~0);
+
+ msleep(100);
+
+ /* clear fw indicator */
+ ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
+
+ /* clear target LF timer interrupts */
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_LF_TIMER_CONTROL0_ADDRESS);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_LF_TIMER_CONTROL0_ADDRESS,
+ val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
+
+ /* reset CE */
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CE_RST_MASK);
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ msleep(10);
+
+ /* unreset CE */
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_CE_RST_MASK);
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ msleep(10);
+
+ /* debug */
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_CAUSE_ADDRESS);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CPU_INTR_ADDRESS);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
+ val);
+
+ /* CPU warm reset */
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
+
+ msleep(100);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
+
+ ath10k_do_pci_sleep(ar);
+ return ret;
+}
+
+static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
const char *irq_mode;
@@ -2009,7 +1870,11 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
* is in an unexpected state. We try to catch that here in order to
* reset the Target and retry the probe.
*/
- ret = ath10k_pci_device_reset(ar);
+ if (cold_reset)
+ ret = ath10k_pci_cold_reset(ar);
+ else
+ ret = ath10k_pci_warm_reset(ar);
+
if (ret) {
ath10k_err("failed to reset target: %d\n", ret);
goto err;
@@ -2079,7 +1944,7 @@ err_deinit_irq:
ath10k_pci_deinit_irq(ar);
err_ce:
ath10k_pci_ce_deinit(ar);
- ath10k_pci_device_reset(ar);
+ ath10k_pci_warm_reset(ar);
err_ps:
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
@@ -2087,6 +1952,34 @@ err:
return ret;
}
+static int ath10k_pci_hif_power_up(struct ath10k *ar)
+{
+ int ret;
+
+ /*
+ * Hardware CUS232 version 2 has some issues with cold reset and the
+ * preferred (and safer) way to perform a device reset is through a
+ * warm reset.
+ *
+ * Warm reset doesn't always work though (notably after a firmware
+ * crash) so fall back to cold reset if necessary.
+ */
+ ret = __ath10k_pci_hif_power_up(ar, false);
+ if (ret) {
+ ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
+ ret);
+
+ ret = __ath10k_pci_hif_power_up(ar, true);
+ if (ret) {
+ ath10k_err("failed to power up target using cold reset too (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -2094,7 +1987,7 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
ath10k_pci_deinit_irq(ar);
- ath10k_pci_device_reset(ar);
+ ath10k_pci_warm_reset(ar);
ath10k_pci_ce_deinit(ar);
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
@@ -2151,7 +2044,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
#endif
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
- .send_head = ath10k_pci_hif_send_head,
+ .tx_sg = ath10k_pci_hif_tx_sg,
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
.start = ath10k_pci_hif_start,
.stop = ath10k_pci_hif_stop,
@@ -2411,11 +2304,10 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
/* Try MSI-X */
if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
- ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
- if (ret == 0)
- return 0;
+ ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
+ ar_pci->num_msi_intrs);
if (ret > 0)
- pci_disable_msi(ar_pci->pdev);
+ return 0;
/* fall-through */
}
@@ -2482,6 +2374,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
case MSI_NUM_REQUEST:
pci_disable_msi(ar_pci->pdev);
return 0;
+ default:
+ pci_disable_msi(ar_pci->pdev);
}
ath10k_warn("unknown irq configuration upon deinit\n");
@@ -2523,7 +2417,7 @@ out:
return ret;
}
-static int ath10k_pci_device_reset(struct ath10k *ar)
+static int ath10k_pci_cold_reset(struct ath10k *ar)
{
int i, ret;
u32 val;
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index a4f32038c440..b43fdb4f7319 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -43,23 +43,6 @@ struct bmi_xfer {
u32 resp_len;
};
-enum ath10k_pci_compl_state {
- ATH10K_PCI_COMPL_FREE = 0,
- ATH10K_PCI_COMPL_SEND,
- ATH10K_PCI_COMPL_RECV,
-};
-
-struct ath10k_pci_compl {
- struct list_head list;
- enum ath10k_pci_compl_state state;
- struct ath10k_ce_pipe *ce_state;
- struct ath10k_pci_pipe *pipe_info;
- struct sk_buff *skb;
- unsigned int nbytes;
- unsigned int transfer_id;
- unsigned int flags;
-};
-
/*
* PCI-specific Target state
*
@@ -175,9 +158,6 @@ struct ath10k_pci_pipe {
/* protects compl_free and num_send_allowed */
spinlock_t pipe_lock;
- /* List of free CE completion slots */
- struct list_head compl_free;
-
struct ath10k_pci *ar_pci;
struct tasklet_struct intr;
};
@@ -205,14 +185,6 @@ struct ath10k_pci {
atomic_t keep_awake_count;
bool verified_awake;
- /* List of CE completions to be processed */
- struct list_head compl_process;
-
- /* protects compl_processing and compl_process */
- spinlock_t compl_lock;
-
- bool compl_processing;
-
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
struct ath10k_hif_cb msg_callbacks_current;
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 74f45fa6f428..0541dd939ce9 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -51,7 +51,8 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
struct ieee80211_tx_info *info;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
- int ret;
+
+ lockdep_assert_held(&htt->tx_lock);
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
@@ -65,12 +66,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
msdu = htt->pending_tx[tx_done->msdu_id];
skb_cb = ATH10K_SKB_CB(msdu);
- ret = ath10k_skb_unmap(dev, msdu);
- if (ret)
- ath10k_warn("data skb unmap failed (%d)\n", ret);
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->htt.frag_len)
- skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+ if (skb_cb->htt.txbuf)
+ dma_pool_free(htt->tx_pool,
+ skb_cb->htt.txbuf,
+ skb_cb->htt.txbuf_paddr);
ath10k_report_offchan_tx(htt->ar, msdu);
@@ -92,13 +93,11 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
/* we do not own the msdu anymore */
exit:
- spin_lock_bh(&htt->tx_lock);
htt->pending_tx[tx_done->msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
- spin_unlock_bh(&htt->tx_lock);
}
static const u8 rx_legacy_rate_idx[] = {
@@ -204,7 +203,7 @@ static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
break;
/* 80MHZ */
case 2:
- status->flag |= RX_FLAG_80MHZ;
+ status->vht_flag |= RX_VHT_FLAG_80MHZ;
}
status->flag |= RX_FLAG_VHT;
@@ -258,20 +257,26 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->band = ch->band;
status->freq = ch->center_freq;
+ if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
+ /* TSF available only in 32-bit */
+ status->mactime = info->tsf & 0xffffffff;
+ status->flag |= RX_FLAG_MACTIME_END;
+ }
+
ath10k_dbg(ATH10K_DBG_DATA,
- "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n",
+ "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
info->skb,
info->skb->len,
status->flag == 0 ? "legacy" : "",
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
- status->flag & RX_FLAG_80MHZ ? "80" : "",
+ status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->vht_nss,
status->freq,
- status->band);
+ status->band, status->flag, info->fcs_err);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
info->skb->data, info->skb->len);
@@ -378,7 +383,8 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
- ath10k_warn("unknown peer id %d\n", ev->peer_id);
+ ath10k_warn("peer-unmap-event: unknown peer id %d\n",
+ ev->peer_id);
goto exit;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 712a606a080a..cb1f7b5bcf4c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -213,7 +213,7 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
.p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
.p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
- .ap_ps_peer_param_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
.peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
.wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
@@ -420,7 +420,6 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
.bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
.pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
.arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
- .arpdhcp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
.dcs = WMI_PDEV_PARAM_DCS,
.ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
.ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
@@ -472,8 +471,7 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
.bcnflt_stats_update_period =
WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
- .arp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
- .arpdhcp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
.dcs = WMI_10X_PDEV_PARAM_DCS,
.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
@@ -561,7 +559,6 @@ err_pull:
static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
{
- struct wmi_bcn_tx_arg arg = {0};
int ret;
lockdep_assert_held(&arvif->ar->data_lock);
@@ -569,18 +566,16 @@ static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
if (arvif->beacon == NULL)
return;
- arg.vdev_id = arvif->vdev_id;
- arg.tx_rate = 0;
- arg.tx_power = 0;
- arg.bcn = arvif->beacon->data;
- arg.bcn_len = arvif->beacon->len;
+ if (arvif->beacon_sent)
+ return;
- ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
+ ret = ath10k_wmi_beacon_send_ref_nowait(arvif);
if (ret)
return;
- dev_kfree_skb_any(arvif->beacon);
- arvif->beacon = NULL;
+ /* We need to retain the arvif->beacon reference for DMA unmapping and
+ * freeing the skbuff later. */
+ arvif->beacon_sent = true;
}
static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
@@ -1116,7 +1111,27 @@ static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n");
+ struct wmi_peer_sta_kickout_event *ev;
+ struct ieee80211_sta *sta;
+
+ ev = (struct wmi_peer_sta_kickout_event *)skb->data;
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
+ ev->peer_macaddr.addr);
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
+ if (!sta) {
+ ath10k_warn("Spurious quick kickout for STA %pM\n",
+ ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ ieee80211_report_low_ack(sta, 10);
+
+exit:
+ rcu_read_unlock();
}
/*
@@ -1217,6 +1232,13 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
+ if (tim->dtim_count == 0) {
+ ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
+
+ if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1)
+ ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
+ }
+
ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
tim->dtim_count, tim->dtim_period,
tim->bitmap_ctrl, pvm_len);
@@ -1338,7 +1360,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
struct wmi_bcn_info *bcn_info;
struct ath10k_vif *arvif;
struct sk_buff *bcn;
- int vdev_id = 0;
+ int ret, vdev_id = 0;
ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
@@ -1385,6 +1407,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
continue;
}
+ /* There are no completions for beacons so wait for next SWBA
+ * before telling mac80211 to decrement CSA counter
+ *
+ * Once CSA counter is completed stop sending beacons until
+ * actual channel switch is done */
+ if (arvif->vif->csa_active &&
+ ieee80211_csa_is_complete(arvif->vif)) {
+ ieee80211_csa_finish(arvif->vif);
+ continue;
+ }
+
bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
if (!bcn) {
ath10k_warn("could not get mac80211 beacon\n");
@@ -1396,15 +1429,33 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
spin_lock_bh(&ar->data_lock);
+
if (arvif->beacon) {
- ath10k_warn("SWBA overrun on vdev %d\n",
- arvif->vdev_id);
+ if (!arvif->beacon_sent)
+ ath10k_warn("SWBA overrun on vdev %d\n",
+ arvif->vdev_id);
+
+ dma_unmap_single(arvif->ar->dev,
+ ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
}
+ ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
+ bcn->data, bcn->len,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(arvif->ar->dev,
+ ATH10K_SKB_CB(bcn)->paddr);
+ if (ret) {
+ ath10k_warn("failed to map beacon: %d\n", ret);
+ goto skip;
+ }
+
arvif->beacon = bcn;
+ arvif->beacon_sent = false;
ath10k_wmi_tx_beacon_nowait(arvif);
+skip:
spin_unlock_bh(&ar->data_lock);
}
}
@@ -2031,11 +2082,11 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+ "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
__le32_to_cpu(ev->sw_version),
__le32_to_cpu(ev->abi_version),
ev->mac_addr.addr,
- __le32_to_cpu(ev->status));
+ __le32_to_cpu(ev->status), skb->len, sizeof(*ev));
complete(&ar->wmi.unified_ready);
return 0;
@@ -2403,7 +2454,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
ar->wmi.cmd->pdev_set_channel_cmdid);
}
-int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
+int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
{
struct wmi_pdev_suspend_cmd *cmd;
struct sk_buff *skb;
@@ -2413,7 +2464,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
return -ENOMEM;
cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
- cmd->suspend_opt = WMI_PDEV_SUSPEND;
+ cmd->suspend_opt = __cpu_to_le32(suspend_opt);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
}
@@ -3342,7 +3393,6 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
ci->max_power = ch->max_power;
ci->reg_power = ch->max_reg_power;
ci->antenna_max = ch->max_antenna_gain;
- ci->antenna_max = 0;
/* mode & flags share storage */
ci->mode = ch->mode;
@@ -3411,25 +3461,41 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
}
-int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
- const struct wmi_bcn_tx_arg *arg)
+/* This function assumes the beacon is already DMA mapped */
+int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
{
- struct wmi_bcn_tx_cmd *cmd;
+ struct wmi_bcn_tx_ref_cmd *cmd;
struct sk_buff *skb;
+ struct sk_buff *beacon = arvif->beacon;
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hdr *hdr;
int ret;
+ u16 fc;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
if (!skb)
return -ENOMEM;
- cmd = (struct wmi_bcn_tx_cmd *)skb->data;
- cmd->hdr.vdev_id = __cpu_to_le32(arg->vdev_id);
- cmd->hdr.tx_rate = __cpu_to_le32(arg->tx_rate);
- cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power);
- cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
- memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
+ hdr = (struct ieee80211_hdr *)beacon->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
+ cmd->data_len = __cpu_to_le32(beacon->len);
+ cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr);
+ cmd->msdu_id = 0;
+ cmd->frame_control = __cpu_to_le32(fc);
+ cmd->flags = 0;
+
+ if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
+
+ if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
+
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb,
+ ar->wmi.cmd->pdev_send_bcn_cmdid);
- ret = ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
if (ret)
dev_kfree_skb(skb);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 4b5e7d3d32b6..4fcc96aa9513 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -2277,7 +2277,6 @@ struct wmi_pdev_param_map {
u32 bcnflt_stats_update_period;
u32 pmf_qos;
u32 arp_ac_override;
- u32 arpdhcp_ac_override;
u32 dcs;
u32 ani_enable;
u32 ani_poll_period;
@@ -3403,6 +3402,24 @@ struct wmi_bcn_tx_arg {
const void *bcn;
};
+enum wmi_bcn_tx_ref_flags {
+ WMI_BCN_TX_REF_FLAG_DTIM_ZERO = 0x1,
+ WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2,
+};
+
+struct wmi_bcn_tx_ref_cmd {
+ __le32 vdev_id;
+ __le32 data_len;
+ /* physical address of the frame - dma pointer */
+ __le32 data_ptr;
+ /* id for host to track */
+ __le32 msdu_id;
+ /* frame ctrl to setup PPDU desc */
+ __le32 frame_control;
+ /* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */
+ __le32 flags;
+} __packed;
+
/* Beacon filter */
#define WMI_BCN_FILTER_ALL 0 /* Filter all beacons */
#define WMI_BCN_FILTER_NONE 1 /* Pass all beacons */
@@ -3859,6 +3876,12 @@ enum wmi_peer_smps_state {
WMI_PEER_SMPS_DYNAMIC = 0x2
};
+enum wmi_peer_chwidth {
+ WMI_PEER_CHWIDTH_20MHZ = 0,
+ WMI_PEER_CHWIDTH_40MHZ = 1,
+ WMI_PEER_CHWIDTH_80MHZ = 2,
+};
+
enum wmi_peer_param {
WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
WMI_PEER_AMPDU = 0x2,
@@ -4039,6 +4062,10 @@ struct wmi_chan_info_event {
__le32 cycle_count;
} __packed;
+struct wmi_peer_sta_kickout_event {
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
/* FIXME: empirically extrapolated */
@@ -4172,7 +4199,7 @@ int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
int ath10k_wmi_connect_htc_service(struct ath10k *ar);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
const struct wmi_channel_arg *);
-int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
+int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g);
@@ -4219,8 +4246,7 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
enum wmi_ap_ps_peer_param param_id, u32 value);
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg);
-int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
- const struct wmi_bcn_tx_arg *arg);
+int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif);
int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg);
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index ef35da84f63b..4b18434ba697 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -751,6 +751,9 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(ah->dev, bf->skbaddr))
+ return -ENOSPC;
+
ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
ARRAY_SIZE(bf->rates));
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 4ee01f654235..afb23b3cc7be 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -681,6 +681,7 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
survey->channel = conf->chandef.chan;
survey->noise = ah->ah_noise_floor;
survey->filled = SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_IN_USE |
SURVEY_INFO_CHANNEL_TIME |
SURVEY_INFO_CHANNEL_TIME_BUSY |
SURVEY_INFO_CHANNEL_TIME_RX |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index fd4c89df67e1..c2c6f4604958 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -790,7 +790,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
if (nw_type & ADHOC_NETWORK) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
nw_type & ADHOC_CREATOR ? "creator" : "joiner");
- cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(vif->ndev, bssid, chan, GFP_KERNEL);
cfg80211_put_bss(ar->wiphy, bss);
return;
}
@@ -861,13 +861,9 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
}
if (vif->nw_type & ADHOC_NETWORK) {
- if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC)
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
- return;
- }
- memset(bssid, 0, ETH_ALEN);
- cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
return;
}
@@ -3256,6 +3252,15 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
struct ath6kl_vif *vif = netdev_priv(dev);
u16 interval;
int ret, rssi_thold;
+ int n_match_sets = request->n_match_sets;
+
+ /*
+ * If there's a matchset w/o an SSID, then assume it's just for
+ * the RSSI (nothing else is currently supported) and ignore it.
+ * The device only supports a global RSSI filter that we set below.
+ */
+ if (n_match_sets == 1 && !request->match_sets[0].ssid.ssid_len)
+ n_match_sets = 0;
if (ar->state != ATH6KL_STATE_ON)
return -EIO;
@@ -3268,11 +3273,11 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
request->n_ssids,
request->match_sets,
- request->n_match_sets);
+ n_match_sets);
if (ret < 0)
return ret;
- if (!request->n_match_sets) {
+ if (!n_match_sets) {
ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
ALL_BSS_FILTER, 0);
if (ret < 0)
@@ -3286,12 +3291,12 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
ar->fw_capabilities)) {
- if (request->rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
+ if (request->min_rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
rssi_thold = 0;
- else if (request->rssi_thold < -127)
+ else if (request->min_rssi_thold < -127)
rssi_thold = -127;
else
- rssi_thold = request->rssi_thold;
+ rssi_thold = request->min_rssi_thold;
ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
rssi_thold);
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index f38ff6a6255e..56c3fd5cef65 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -24,7 +24,7 @@
/* constants */
#define TX_URB_COUNT 32
#define RX_URB_COUNT 32
-#define ATH6KL_USB_RX_BUFFER_SIZE 1700
+#define ATH6KL_USB_RX_BUFFER_SIZE 4096
/* tx/rx pipes for usb */
enum ATH6KL_USB_PIPE_ID {
@@ -481,8 +481,8 @@ static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb)
* ATH6KL_USB_RX_BUFFER_SIZE);
*/
- ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh =
- ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_alloc / 2;
+ ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh = 1;
+
ath6kl_usb_post_recv_transfers(&ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA],
ATH6KL_USB_RX_BUFFER_SIZE);
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 4f16d79c9eb1..8b4ce28e3ce8 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -914,7 +914,7 @@ ath6kl_get_regpair(u16 regdmn)
return NULL;
for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
- if (regDomainPairs[i].regDmnEnum == regdmn)
+ if (regDomainPairs[i].reg_domain == regdmn)
return &regDomainPairs[i];
}
@@ -954,7 +954,7 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
country = ath6kl_regd_find_country_by_rd((u16) reg_code);
if (regpair)
ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
- regpair->regDmnEnum);
+ regpair->reg_domain);
else
ath6kl_warn("Regpair not found reg_code 0x%0x\n",
reg_code);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7b96b3e5712d..8fcc029a76a6 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -120,18 +120,6 @@ config ATH9K_WOW
This option enables Wake on Wireless LAN support for certain cards.
Currently, AR9462 is supported.
-config ATH9K_LEGACY_RATE_CONTROL
- bool "Atheros ath9k rate control"
- depends on ATH9K
- default n
- ---help---
- Say Y, if you want to use the ath9k specific rate control
- module instead of minstrel_ht. Be warned that there are various
- issues with the ath9k RC and minstrel is a more robust algorithm.
- Note that even if this option is selected, "ath9k_rate_control"
- has to be passed to mac80211 using the module parameter,
- ieee80211_default_rc_algo.
-
config ATH9K_RFKILL
bool "Atheros ath9k rfkill support" if EXPERT
depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index a40e5c5d7418..8e1c7b0fe76c 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -8,7 +8,6 @@ ath9k-y += beacon.o \
antenna.o
ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
-ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
@@ -52,7 +51,9 @@ ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
-ath9k_common-y:= common.o
+ath9k_common-y:= common.o \
+ common-init.o \
+ common-beacon.o
ath9k_htc-y += htc_hst.o \
hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 2dff2765769b..a0398fe3eb28 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -39,6 +39,10 @@ static const struct platform_device_id ath9k_platform_id_table[] = {
.name = "qca955x_wmac",
.driver_data = AR9300_DEVID_QCA955X,
},
+ {
+ .name = "qca953x_wmac",
+ .driver_data = AR9300_DEVID_AR953X,
+ },
{},
};
@@ -82,6 +86,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
int irq;
int ret = 0;
struct ath_hw *ah;
+ struct ath_common *common;
char hw_name[64];
if (!dev_get_platdata(&pdev->dev)) {
@@ -124,9 +129,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
sc->mem = mem;
sc->irq = irq;
- /* Will be cleared in ath9k_start() */
- set_bit(SC_OP_INVALID, &sc->sc_flags);
-
ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
@@ -144,6 +146,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
hw_name, (unsigned long)mem, irq);
+ common = ath9k_hw_common(sc->sc_ah);
+ /* Will be cleared in ath9k_start() */
+ set_bit(ATH_OP_INVALID, &common->op_flags);
return 0;
err_irq:
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index d28923b7435b..6d47783f2e5b 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -176,16 +176,26 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
if (ah->opmode == NL80211_IFTYPE_STATION &&
BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH)
weak_sig = true;
-
/*
- * OFDM Weak signal detection is always enabled for AP mode.
+ * Newer chipsets are better at dealing with high PHY error counts -
+ * keep weak signal detection enabled when no RSSI threshold is
+ * available to determine if it is needed (mode != STA)
*/
- if (ah->opmode != NL80211_IFTYPE_AP &&
- aniState->ofdmWeakSigDetect != weak_sig) {
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- entry_ofdm->ofdm_weak_signal_on);
- }
+ else if (AR_SREV_9300_20_OR_LATER(ah) &&
+ ah->opmode != NL80211_IFTYPE_STATION)
+ weak_sig = true;
+
+ /* Older chipsets are more sensitive to high PHY error counts */
+ else if (!AR_SREV_9300_20_OR_LATER(ah) &&
+ aniState->ofdmNoiseImmunityLevel >= 8)
+ weak_sig = false;
+
+ if (aniState->ofdmWeakSigDetect != weak_sig)
+ ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ weak_sig);
+
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ return;
if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
@@ -308,17 +318,6 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
BUG_ON(aniState == NULL);
ah->stats.ast_ani_reset++;
- /* only allow a subset of functions in AP mode */
- if (ah->opmode == NL80211_IFTYPE_AP) {
- if (IS_CHAN_2GHZ(chan)) {
- ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
- ATH9K_ANI_FIRSTEP_LEVEL);
- if (AR_SREV_9300_20_OR_LATER(ah))
- ah->ani_function |= ATH9K_ANI_MRC_CCK;
- } else
- ah->ani_function = 0;
- }
-
ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
aniState->ofdmNoiseImmunityLevel);
cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,
@@ -483,10 +482,17 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ath_dbg(common, ANI, "Initialize ANI\n");
- ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
- ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
- ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
- ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
+ ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
+ ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
+ } else {
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
+ ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
+ ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_OLD;
+ }
ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 21e7b83c3f6a..c40965b4c1e2 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -22,12 +22,16 @@
/* units are errors per second */
#define ATH9K_ANI_OFDM_TRIG_HIGH 3500
#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
+#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
#define ATH9K_ANI_OFDM_TRIG_LOW 400
#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
+#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
#define ATH9K_ANI_CCK_TRIG_HIGH 600
+#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
#define ATH9K_ANI_CCK_TRIG_LOW 300
+#define ATH9K_ANI_CCK_TRIG_LOW_OLD 100
#define ATH9K_ANI_SPUR_IMMUNE_LVL 3
#define ATH9K_ANI_FIRSTEP_LVL 2
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index ff415e863ee9..3b3e91057a4c 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -26,10 +26,6 @@ static const int firstep_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
-static const int cycpwrThr1_table[] =
-/* level: 0 1 2 3 4 5 6 7 8 */
- { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
-
/*
* register values to turn OFDM weak signal detection OFF
*/
@@ -921,7 +917,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
struct ar5416AniState *aniState = &ah->ani;
- s32 value, value2;
+ s32 value;
switch (cmd & ah->ani_function) {
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
@@ -1008,42 +1004,11 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
case ATH9K_ANI_FIRSTEP_LEVEL:{
u32 level = param;
- if (level >= ARRAY_SIZE(firstep_table)) {
- ath_dbg(common, ANI,
- "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
- level, ARRAY_SIZE(firstep_table));
- return false;
- }
-
- /*
- * make register setting relative to default
- * from INI file & cap value
- */
- value = firstep_table[level] -
- firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
- aniState->iniDef.firstep;
- if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
- value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
- if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
- value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+ value = level * 2;
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRSTEP,
- value);
- /*
- * we need to set first step low register too
- * make register setting relative to default
- * from INI file & cap value
- */
- value2 = firstep_table[level] -
- firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
- aniState->iniDef.firstepLow;
- if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
- value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
- if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
- value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
-
+ AR_PHY_FIND_SIG_FIRSTEP, value);
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
- AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
+ AR_PHY_FIND_SIG_FIRSTEP_LOW, value);
if (level != aniState->firstepLevel) {
ath_dbg(common, ANI,
@@ -1060,7 +1025,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
aniState->firstepLevel,
level,
ATH9K_ANI_FIRSTEP_LVL,
- value2,
+ value,
aniState->iniDef.firstepLow);
if (level > aniState->firstepLevel)
ah->stats.ast_ani_stepup++;
@@ -1073,41 +1038,13 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
u32 level = param;
- if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
- ath_dbg(common, ANI,
- "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
- level, ARRAY_SIZE(cycpwrThr1_table));
- return false;
- }
- /*
- * make register setting relative to default
- * from INI file & cap value
- */
- value = cycpwrThr1_table[level] -
- cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
- aniState->iniDef.cycpwrThr1;
- if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
- value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
- if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
- value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+ value = (level + 1) * 2;
REG_RMW_FIELD(ah, AR_PHY_TIMING5,
- AR_PHY_TIMING5_CYCPWR_THR1,
- value);
+ AR_PHY_TIMING5_CYCPWR_THR1, value);
- /*
- * set AR_PHY_EXT_CCA for extension channel
- * make register setting relative to default
- * from INI file & cap value
- */
- value2 = cycpwrThr1_table[level] -
- cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
- aniState->iniDef.cycpwrThr1Ext;
- if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
- value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
- if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
- value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
- REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
- AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
+ if (IS_CHAN_HT40(ah->curchan))
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+ AR_PHY_EXT_TIMING5_CYCPWR_THR1, value);
if (level != aniState->spurImmunityLevel) {
ath_dbg(common, ANI,
@@ -1124,7 +1061,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
aniState->spurImmunityLevel,
level,
ATH9K_ANI_SPUR_IMMUNE_LVL,
- value2,
+ value,
aniState->iniDef.cycpwrThr1Ext);
if (level > aniState->spurImmunityLevel)
ah->stats.ast_ani_spurup++;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index a352128c40ad..ac8301ef5242 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -23,10 +23,11 @@
#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
#define MAX_MAG_DELTA 11
#define MAX_PHS_DELTA 10
+#define MAXIQCAL 3
struct coeff {
- int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
- int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
+ int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
+ int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
int iqc_coeff[2];
};
@@ -655,9 +656,6 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (i2_m_q2_a0_d1 > 0x800)
i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
- if (i2_p_q2_a0_d1 > 0x1000)
- i2_p_q2_a0_d1 = -((0x1fff - i2_p_q2_a0_d1) + 1);
-
if (iq_corr_a0_d1 > 0x800)
iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
@@ -800,7 +798,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (q_q_coff > 63)
q_q_coff = 63;
- iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
+ iqc_coeff[0] = (q_q_coff * 128) + (0x7f & q_i_coff);
ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[0]);
@@ -831,7 +829,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (q_q_coff > 63)
q_q_coff = 63;
- iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
+ iqc_coeff[1] = (q_q_coff * 128) + (0x7f & q_i_coff);
ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[1]);
@@ -839,7 +837,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
return true;
}
-static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
+static void ar9003_hw_detect_outlier(int mp_coeff[][MAXIQCAL],
+ int nmeasurement,
int max_delta)
{
int mp_max = -64, max_idx = 0;
@@ -848,20 +847,20 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
/* find min/max mismatch across all calibrated gains */
for (i = 0; i < nmeasurement; i++) {
- if (mp_coeff[i] > mp_max) {
- mp_max = mp_coeff[i];
+ if (mp_coeff[i][0] > mp_max) {
+ mp_max = mp_coeff[i][0];
max_idx = i;
- } else if (mp_coeff[i] < mp_min) {
- mp_min = mp_coeff[i];
+ } else if (mp_coeff[i][0] < mp_min) {
+ mp_min = mp_coeff[i][0];
min_idx = i;
}
}
/* find average (exclude max abs value) */
for (i = 0; i < nmeasurement; i++) {
- if ((abs(mp_coeff[i]) < abs(mp_max)) ||
- (abs(mp_coeff[i]) < abs(mp_min))) {
- mp_avg += mp_coeff[i];
+ if ((abs(mp_coeff[i][0]) < abs(mp_max)) ||
+ (abs(mp_coeff[i][0]) < abs(mp_min))) {
+ mp_avg += mp_coeff[i][0];
mp_count++;
}
}
@@ -873,7 +872,7 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
if (mp_count)
mp_avg /= mp_count;
else
- mp_avg = mp_coeff[nmeasurement - 1];
+ mp_avg = mp_coeff[nmeasurement - 1][0];
/* detect outlier */
if (abs(mp_max - mp_min) > max_delta) {
@@ -882,15 +881,16 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
else
outlier_idx = min_idx;
- mp_coeff[outlier_idx] = mp_avg;
+ mp_coeff[outlier_idx][0] = mp_avg;
}
}
-static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
- struct coeff *coeff,
- bool is_reusable)
+static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah,
+ struct coeff *coeff,
+ bool is_reusable)
{
int i, im, nmeasurement;
+ int magnitude, phase;
u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
struct ath9k_hw_cal_data *caldata = ah->caldata;
@@ -920,21 +920,30 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
if (nmeasurement > MAX_MEASUREMENT)
nmeasurement = MAX_MEASUREMENT;
- /* detect outlier only if nmeasurement > 1 */
- if (nmeasurement > 1) {
- /* Detect magnitude outlier */
- ar9003_hw_detect_outlier(coeff->mag_coeff[i],
- nmeasurement, MAX_MAG_DELTA);
-
- /* Detect phase outlier */
- ar9003_hw_detect_outlier(coeff->phs_coeff[i],
- nmeasurement, MAX_PHS_DELTA);
+ /*
+ * Skip normal outlier detection for AR9550.
+ */
+ if (!AR_SREV_9550(ah)) {
+ /* detect outlier only if nmeasurement > 1 */
+ if (nmeasurement > 1) {
+ /* Detect magnitude outlier */
+ ar9003_hw_detect_outlier(coeff->mag_coeff[i],
+ nmeasurement,
+ MAX_MAG_DELTA);
+
+ /* Detect phase outlier */
+ ar9003_hw_detect_outlier(coeff->phs_coeff[i],
+ nmeasurement,
+ MAX_PHS_DELTA);
+ }
}
for (im = 0; im < nmeasurement; im++) {
+ magnitude = coeff->mag_coeff[i][im][0];
+ phase = coeff->phs_coeff[i][im][0];
- coeff->iqc_coeff[0] = (coeff->mag_coeff[i][im] & 0x7f) |
- ((coeff->phs_coeff[i][im] & 0x7f) << 7);
+ coeff->iqc_coeff[0] =
+ (phase & 0x7f) | ((magnitude & 0x7f) << 7);
if ((im % 2) == 0)
REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
@@ -991,7 +1000,63 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
return true;
}
-static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
+static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah,
+ struct coeff *coeff,
+ int i, int nmeasurement)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int im, ix, iy, temp;
+
+ for (im = 0; im < nmeasurement; im++) {
+ for (ix = 0; ix < MAXIQCAL - 1; ix++) {
+ for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) {
+ if (coeff->mag_coeff[i][im][iy] <
+ coeff->mag_coeff[i][im][ix]) {
+ temp = coeff->mag_coeff[i][im][ix];
+ coeff->mag_coeff[i][im][ix] =
+ coeff->mag_coeff[i][im][iy];
+ coeff->mag_coeff[i][im][iy] = temp;
+ }
+ if (coeff->phs_coeff[i][im][iy] <
+ coeff->phs_coeff[i][im][ix]) {
+ temp = coeff->phs_coeff[i][im][ix];
+ coeff->phs_coeff[i][im][ix] =
+ coeff->phs_coeff[i][im][iy];
+ coeff->phs_coeff[i][im][iy] = temp;
+ }
+ }
+ }
+ coeff->mag_coeff[i][im][0] = coeff->mag_coeff[i][im][MAXIQCAL / 2];
+ coeff->phs_coeff[i][im][0] = coeff->phs_coeff[i][im][MAXIQCAL / 2];
+
+ ath_dbg(common, CALIBRATE,
+ "IQCAL: Median [ch%d][gain%d]: mag = %d phase = %d\n",
+ i, im,
+ coeff->mag_coeff[i][im][0],
+ coeff->phs_coeff[i][im][0]);
+ }
+}
+
+static bool ar955x_tx_iq_cal_median(struct ath_hw *ah,
+ struct coeff *coeff,
+ int iqcal_idx,
+ int nmeasurement)
+{
+ int i;
+
+ if ((iqcal_idx + 1) != MAXIQCAL)
+ return false;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ __ar955x_tx_iq_cal_sort(ah, coeff, i, nmeasurement);
+ }
+
+ return true;
+}
+
+static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
+ int iqcal_idx,
+ bool is_reusable)
{
struct ath_common *common = ath9k_hw_common(ah);
const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
@@ -1004,10 +1069,11 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
AR_PHY_CHAN_INFO_TAB_1,
AR_PHY_CHAN_INFO_TAB_2,
};
- struct coeff coeff;
+ static struct coeff coeff;
s32 iq_res[6];
int i, im, j;
- int nmeasurement;
+ int nmeasurement = 0;
+ bool outlier_detect = true;
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->txchainmask & (1 << i)))
@@ -1065,17 +1131,23 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
goto tx_iqcal_fail;
}
- coeff.mag_coeff[i][im] = coeff.iqc_coeff[0] & 0x7f;
- coeff.phs_coeff[i][im] =
+ coeff.phs_coeff[i][im][iqcal_idx] =
+ coeff.iqc_coeff[0] & 0x7f;
+ coeff.mag_coeff[i][im][iqcal_idx] =
(coeff.iqc_coeff[0] >> 7) & 0x7f;
- if (coeff.mag_coeff[i][im] > 63)
- coeff.mag_coeff[i][im] -= 128;
- if (coeff.phs_coeff[i][im] > 63)
- coeff.phs_coeff[i][im] -= 128;
+ if (coeff.mag_coeff[i][im][iqcal_idx] > 63)
+ coeff.mag_coeff[i][im][iqcal_idx] -= 128;
+ if (coeff.phs_coeff[i][im][iqcal_idx] > 63)
+ coeff.phs_coeff[i][im][iqcal_idx] -= 128;
}
}
- ar9003_hw_tx_iqcal_load_avg_2_passes(ah, &coeff, is_reusable);
+
+ if (AR_SREV_9550(ah))
+ outlier_detect = ar955x_tx_iq_cal_median(ah, &coeff,
+ iqcal_idx, nmeasurement);
+ if (outlier_detect)
+ ar9003_hw_tx_iq_cal_outlier_detection(ah, &coeff, is_reusable);
return;
@@ -1409,7 +1481,7 @@ skip_tx_iqcal:
}
if (txiqcal_done)
- ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
+ ar9003_hw_tx_iq_cal_post_proc(ah, 0, is_reusable);
else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
ar9003_hw_tx_iq_cal_reload(ah);
@@ -1455,14 +1527,38 @@ skip_tx_iqcal:
return true;
}
+static bool do_ar9003_agc_cal(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool status;
+
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in %d ms,"
+ "noisy environment?\n",
+ AH_WAIT_TIMEOUT / 1000);
+ return false;
+ }
+
+ return true;
+}
+
static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
bool txiqcal_done = false;
- bool is_reusable = true, status = true;
+ bool status = true;
bool run_agc_cal = false, sep_iq_cal = false;
+ int i = 0;
/* Use chip chainmask only for calibration */
ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
@@ -1485,7 +1581,12 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
* AGC calibration. Specifically, AR9550 in SoC chips.
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
- txiqcal_done = true;
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) {
+ txiqcal_done = true;
+ } else {
+ txiqcal_done = false;
+ }
run_agc_cal = true;
} else {
sep_iq_cal = true;
@@ -1512,27 +1613,37 @@ skip_tx_iqcal:
if (AR_SREV_9330_11(ah))
ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan));
- /* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
- AR_PHY_AGC_CONTROL_CAL);
-
- /* Poll for offset calibration complete */
- status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT);
- }
+ /*
+ * For non-AR9550 chips, we just trigger AGC calibration
+ * in the HW, poll for completion and then process
+ * the results.
+ *
+ * For AR955x, we run it multiple times and use
+ * median IQ correction.
+ */
+ if (!AR_SREV_9550(ah)) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
- if (!status) {
- ath_dbg(common, CALIBRATE,
- "offset calibration failed to complete in %d ms; noisy environment?\n",
- AH_WAIT_TIMEOUT / 1000);
- return false;
+ if (txiqcal_done)
+ ar9003_hw_tx_iq_cal_post_proc(ah, 0, false);
+ } else {
+ if (!txiqcal_done) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
+ } else {
+ for (i = 0; i < MAXIQCAL; i++) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
+ ar9003_hw_tx_iq_cal_post_proc(ah, i, false);
+ }
+ }
+ }
}
- if (txiqcal_done)
- ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
-
/* Revert chainmask to runtime parameters */
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index b8daff78b9d1..235053ba7737 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -23,8 +23,8 @@
#define COMP_HDR_LEN 4
#define COMP_CKSUM_LEN 2
-#define LE16(x) __constant_cpu_to_le16(x)
-#define LE32(x) __constant_cpu_to_le32(x)
+#define LE16(x) cpu_to_le16(x)
+#define LE32(x) cpu_to_le32(x)
/* Local defines to distinguish between extension and control CTL's */
#define EXT_ADDITIVE (0x8000)
@@ -4792,43 +4792,54 @@ static void ar9003_hw_power_control_override(struct ath_hw *ah,
tempslope:
if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+ u8 txmask = (eep->baseEepHeader.txrxMask & 0xf0) >> 4;
+
/*
* AR955x has tempSlope register for each chain.
* Check whether temp_compensation feature is enabled or not.
*/
if (eep->baseEepHeader.featureEnable & 0x1) {
if (frequency < 4000) {
- REG_RMW_FIELD(ah, AR_PHY_TPC_19,
- AR_PHY_TPC_19_ALPHA_THERM,
- eep->base_ext2.tempSlopeLow);
- REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
- AR_PHY_TPC_19_ALPHA_THERM,
- temp_slope);
- REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
- AR_PHY_TPC_19_ALPHA_THERM,
- eep->base_ext2.tempSlopeHigh);
+ if (txmask & BIT(0))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ eep->base_ext2.tempSlopeLow);
+ if (txmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope);
+ if (txmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ eep->base_ext2.tempSlopeHigh);
} else {
- REG_RMW_FIELD(ah, AR_PHY_TPC_19,
- AR_PHY_TPC_19_ALPHA_THERM,
- temp_slope);
- REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
- AR_PHY_TPC_19_ALPHA_THERM,
- temp_slope1);
- REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
- AR_PHY_TPC_19_ALPHA_THERM,
- temp_slope2);
+ if (txmask & BIT(0))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope);
+ if (txmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope1);
+ if (txmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope2);
}
} else {
/*
* If temp compensation is not enabled,
* set all registers to 0.
*/
- REG_RMW_FIELD(ah, AR_PHY_TPC_19,
- AR_PHY_TPC_19_ALPHA_THERM, 0);
- REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
- AR_PHY_TPC_19_ALPHA_THERM, 0);
- REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
- AR_PHY_TPC_19_ALPHA_THERM, 0);
+ if (txmask & BIT(0))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ if (txmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ if (txmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
}
} else {
REG_RMW_FIELD(ah, AR_PHY_TPC_19,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 09facba1dc6d..8927fc34d84c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -868,10 +868,6 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
- if (IS_CHAN_QUARTER_RATE(chan))
- rfMode |= AR_PHY_MODE_QUARTER;
- if (IS_CHAN_HALF_RATE(chan))
- rfMode |= AR_PHY_MODE_HALF;
if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF))
REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL,
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index b5ac32cfbeb8..44d74495c4de 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -30,7 +30,6 @@
#include "spectral.h"
struct ath_node;
-struct ath_rate_table;
extern struct ieee80211_ops ath9k_ops;
extern int ath9k_modparam_nohwcrypt;
@@ -150,6 +149,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
#define IS_OFDM_RATE(rate) ((rate >= 0x8) && (rate <= 0xf))
+enum {
+ WLAN_RC_PHY_OFDM,
+ WLAN_RC_PHY_CCK,
+};
+
struct ath_txq {
int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
u32 axq_qnum; /* ath9k hardware queue number */
@@ -399,21 +403,10 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
#define ATH_BCBUF 8
#define ATH_DEFAULT_BINTVAL 100 /* TU */
#define ATH_DEFAULT_BMISS_LIMIT 10
-#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
#define TSF_TO_TU(_h,_l) \
((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
-struct ath_beacon_config {
- int beacon_interval;
- u16 listen_interval;
- u16 dtim_period;
- u16 bmiss_timeout;
- u8 dtim_count;
- bool enable_beacon;
- bool ibss_creator;
-};
-
struct ath_beacon {
enum {
OK, /* no change needed */
@@ -423,11 +416,9 @@ struct ath_beacon {
u32 beaconq;
u32 bmisscnt;
- u32 bc_tstamp;
struct ieee80211_vif *bslot[ATH_BCBUF];
int slottime;
int slotupdate;
- struct ath9k_tx_queue_info beacon_qi;
struct ath_descdma bdma;
struct ath_txq *cabq;
struct list_head bbuf;
@@ -442,7 +433,8 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_beacon(struct ath_softc *sc);
-bool ath9k_csa_is_finished(struct ath_softc *sc);
+bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_csa_update(struct ath_softc *sc);
/*******************/
/* Link Monitoring */
@@ -693,15 +685,6 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define MAX_GTT_CNT 5
-enum sc_op_flags {
- SC_OP_INVALID,
- SC_OP_BEACONS,
- SC_OP_ANI_RUN,
- SC_OP_PRIM_STA_VIF,
- SC_OP_HW_RESET,
- SC_OP_SCANNING,
-};
-
/* Powersave flags */
#define PS_WAIT_FOR_BEACON BIT(0)
#define PS_WAIT_FOR_CAB BIT(1)
@@ -731,7 +714,6 @@ struct ath_softc {
struct completion paprd_complete;
wait_queue_head_t tx_wait;
- unsigned long sc_flags;
unsigned long driver_data;
u8 gtt_cnt;
@@ -748,7 +730,6 @@ struct ath_softc {
struct ath_rx rx;
struct ath_tx tx;
struct ath_beacon beacon;
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
#ifdef CONFIG_MAC80211_LEDS
bool led_registered;
@@ -757,7 +738,6 @@ struct ath_softc {
#endif
struct ath9k_hw_cal_data caldata;
- int last_rssi;
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath9k_debug debug;
@@ -774,7 +754,6 @@ struct ath_softc {
#endif
struct ath_descdma txsdma;
- struct ieee80211_vif *csa_vif;
struct ath_ant_comb ant_comb;
u8 ant_tx, ant_rx;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 2e8bba0eb361..471e0f624e81 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -80,7 +80,7 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
u8 chainmask = ah->txchainmask;
u8 rate = 0;
- sband = &sc->sbands[common->hw->conf.chandef.chan->band];
+ sband = &common->sbands[common->hw->conf.chandef.chan->band];
rate = sband->bitrates[rateidx].hw_value;
if (vif->bss_conf.use_short_preamble)
rate |= sband->bitrates[rateidx].hw_value_short;
@@ -292,11 +292,8 @@ static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
(unsigned long long)tsfadjust, avp->av_bslot);
}
-bool ath9k_csa_is_finished(struct ath_softc *sc)
+bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- struct ieee80211_vif *vif;
-
- vif = sc->csa_vif;
if (!vif || !vif->csa_active)
return false;
@@ -304,11 +301,23 @@ bool ath9k_csa_is_finished(struct ath_softc *sc)
return false;
ieee80211_csa_finish(vif);
-
- sc->csa_vif = NULL;
return true;
}
+static void ath9k_csa_update_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = data;
+ ath9k_csa_is_finished(sc, vif);
+}
+
+void ath9k_csa_update(struct ath_softc *sc)
+{
+ ieee80211_iterate_active_interfaces(sc->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath9k_csa_update_vif,
+ sc);
+}
+
void ath9k_beacon_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
@@ -319,7 +328,7 @@ void ath9k_beacon_tasklet(unsigned long data)
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int slot;
- if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
ath_dbg(common, RESET,
"reset work is pending, skip beaconing now\n");
return;
@@ -362,13 +371,13 @@ void ath9k_beacon_tasklet(unsigned long data)
return;
}
- /* EDMA devices check that in the tx completion function. */
- if (!edma && ath9k_csa_is_finished(sc))
- return;
-
slot = ath9k_beacon_choose_slot(sc);
vif = sc->beacon.bslot[slot];
+ /* EDMA devices check that in the tx completion function. */
+ if (!edma && ath9k_csa_is_finished(sc, vif))
+ return;
+
if (!vif || !vif->bss_conf.enable_beacon)
return;
@@ -438,33 +447,6 @@ static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
ath9k_hw_enable_interrupts(ah);
}
-/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
-static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
-{
- u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
-
- tsf_mod = tsf & (BIT(10) - 1);
- tsf_hi = tsf >> 32;
- tsf_lo = ((u32) tsf) >> 10;
-
- mod_hi = tsf_hi % div_tu;
- mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
-
- return (mod_lo << 10) | tsf_mod;
-}
-
-static u32 ath9k_get_next_tbtt(struct ath_softc *sc, u64 tsf,
- unsigned int interval)
-{
- struct ath_hw *ah = sc->sc_ah;
- unsigned int offset;
-
- tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
- offset = ath9k_mod_tsf64_tu(tsf, interval);
-
- return (u32) tsf + TU_TO_USEC(interval) - offset;
-}
-
/*
* For multi-bss ap support beacons are either staggered evenly over N slots or
* burst together. For the former arrange for the SWBA to be delivered for each
@@ -474,115 +456,18 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- u32 nexttbtt, intval;
-
- /* NB: the beacon interval is kept internally in TU's */
- intval = TU_TO_USEC(conf->beacon_interval);
- intval /= ATH_BCBUF;
- nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
- conf->beacon_interval);
-
- if (conf->enable_beacon)
- ah->imask |= ATH9K_INT_SWBA;
- else
- ah->imask &= ~ATH9K_INT_SWBA;
-
- ath_dbg(common, BEACON,
- "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
- (conf->enable_beacon) ? "Enable" : "Disable",
- nexttbtt, intval, conf->beacon_interval);
- ath9k_beacon_init(sc, nexttbtt, intval, false);
+ ath9k_cmn_beacon_config_ap(ah, conf, ATH_BCBUF);
+ ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, false);
}
-/*
- * This sets up the beacon timers according to the timestamp of the last
- * received beacon and the current TSF, configures PCF and DTIM
- * handling, programs the sleep registers so the hardware will wakeup in
- * time to receive beacons, and configures the beacon miss handling so
- * we'll receive a BMISS interrupt when we stop seeing beacons from the AP
- * we've associated with.
- */
-static void ath9k_beacon_config_sta(struct ath_softc *sc,
+static void ath9k_beacon_config_sta(struct ath_hw *ah,
struct ath_beacon_config *conf)
{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_beacon_state bs;
- int dtim_intval, sleepduration;
- u32 nexttbtt = 0, intval;
- u64 tsf;
- /* No need to configure beacon if we are not associated */
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
- ath_dbg(common, BEACON,
- "STA is not yet associated..skipping beacon config\n");
+ if (ath9k_cmn_beacon_config_sta(ah, conf, &bs) == -EPERM)
return;
- }
-
- memset(&bs, 0, sizeof(bs));
- intval = conf->beacon_interval;
-
- /*
- * Setup dtim parameters according to
- * last beacon we received (which may be none).
- */
- dtim_intval = intval * conf->dtim_period;
- sleepduration = conf->listen_interval * intval;
-
- /*
- * Pull nexttbtt forward to reflect the current
- * TSF and calculate dtim state for the result.
- */
- tsf = ath9k_hw_gettsf64(ah);
- nexttbtt = ath9k_get_next_tbtt(sc, tsf, intval);
-
- bs.bs_intval = TU_TO_USEC(intval);
- bs.bs_dtimperiod = conf->dtim_period * bs.bs_intval;
- bs.bs_nexttbtt = nexttbtt;
- bs.bs_nextdtim = nexttbtt;
- if (conf->dtim_period > 1)
- bs.bs_nextdtim = ath9k_get_next_tbtt(sc, tsf, dtim_intval);
-
- /*
- * Calculate the number of consecutive beacons to miss* before taking
- * a BMISS interrupt. The configuration is specified in TU so we only
- * need calculate based on the beacon interval. Note that we clamp the
- * result to at most 15 beacons.
- */
- if (sleepduration > intval) {
- bs.bs_bmissthreshold = conf->listen_interval *
- ATH_DEFAULT_BMISS_LIMIT / 2;
- } else {
- bs.bs_bmissthreshold = DIV_ROUND_UP(conf->bmiss_timeout, intval);
- if (bs.bs_bmissthreshold > 15)
- bs.bs_bmissthreshold = 15;
- else if (bs.bs_bmissthreshold <= 0)
- bs.bs_bmissthreshold = 1;
- }
-
- /*
- * Calculate sleep duration. The configuration is given in ms.
- * We ensure a multiple of the beacon period is used. Also, if the sleep
- * duration is greater than the DTIM period then it makes senses
- * to make it a multiple of that.
- *
- * XXX fixed at 100ms
- */
-
- bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
- sleepduration));
- if (bs.bs_sleepduration > bs.bs_dtimperiod)
- bs.bs_sleepduration = bs.bs_dtimperiod;
-
- /* TSF out of range threshold fixed at 1 second */
- bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
-
- ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
- bs.bs_bmissthreshold, bs.bs_sleepduration);
-
- /* Set the computed STA beacon timers */
ath9k_hw_disable_interrupts(ah);
ath9k_hw_set_sta_beacon_timers(ah, &bs);
@@ -597,36 +482,19 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- u32 intval, nexttbtt;
ath9k_reset_beacon_status(sc);
- intval = TU_TO_USEC(conf->beacon_interval);
-
- if (conf->ibss_creator)
- nexttbtt = intval;
- else
- nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
- conf->beacon_interval);
-
- if (conf->enable_beacon)
- ah->imask |= ATH9K_INT_SWBA;
- else
- ah->imask &= ~ATH9K_INT_SWBA;
-
- ath_dbg(common, BEACON,
- "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
- (conf->enable_beacon) ? "Enable" : "Disable",
- nexttbtt, intval, conf->beacon_interval);
+ ath9k_cmn_beacon_config_adhoc(ah, conf);
- ath9k_beacon_init(sc, nexttbtt, intval, conf->ibss_creator);
+ ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, conf->ibss_creator);
/*
* Set the global 'beacon has been configured' flag for the
* joiner case in IBSS mode.
*/
if (!conf->ibss_creator && conf->enable_beacon)
- set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ set_bit(ATH_OP_BEACONS, &common->op_flags);
}
static bool ath9k_allow_beacon_config(struct ath_softc *sc,
@@ -646,7 +514,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
if ((vif->type == NL80211_IFTYPE_STATION) &&
- test_bit(SC_OP_BEACONS, &sc->sc_flags) &&
+ test_bit(ATH_OP_BEACONS, &common->op_flags) &&
!avp->primary_sta_vif) {
ath_dbg(common, CONFIG,
"Beacon already configured for a station interface\n");
@@ -668,7 +536,6 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
cur_conf->beacon_interval = bss_conf->beacon_int;
cur_conf->dtim_period = bss_conf->dtim_period;
- cur_conf->listen_interval = 1;
cur_conf->dtim_count = 1;
cur_conf->ibss_creator = bss_conf->ibss_creator;
cur_conf->bmiss_timeout =
@@ -698,6 +565,8 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
{
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
unsigned long flags;
bool skip_beacon = false;
@@ -710,7 +579,7 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_cache_beacon_config(sc, bss_conf);
ath9k_set_beacon(sc);
- set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ set_bit(ATH_OP_BEACONS, &common->op_flags);
return;
}
@@ -749,13 +618,13 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
}
/*
- * Do not set the SC_OP_BEACONS flag for IBSS joiner mode
+ * Do not set the ATH_OP_BEACONS flag for IBSS joiner mode
* here, it is done in ath9k_beacon_config_adhoc().
*/
if (cur_conf->enable_beacon && !skip_beacon)
- set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ set_bit(ATH_OP_BEACONS, &common->op_flags);
else
- clear_bit(SC_OP_BEACONS, &sc->sc_flags);
+ clear_bit(ATH_OP_BEACONS, &common->op_flags);
}
}
@@ -773,7 +642,7 @@ void ath9k_set_beacon(struct ath_softc *sc)
ath9k_beacon_config_adhoc(sc, cur_conf);
break;
case NL80211_IFTYPE_STATION:
- ath9k_beacon_config_sta(sc, cur_conf);
+ ath9k_beacon_config_sta(sc->sc_ah, cur_conf);
break;
default:
ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c
new file mode 100644
index 000000000000..775d1d20ce0b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-beacon.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "common.h"
+
+#define FUDGE 2
+
+/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
+static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
+{
+ u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
+
+ tsf_mod = tsf & (BIT(10) - 1);
+ tsf_hi = tsf >> 32;
+ tsf_lo = ((u32) tsf) >> 10;
+
+ mod_hi = tsf_hi % div_tu;
+ mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
+
+ return (mod_lo << 10) | tsf_mod;
+}
+
+static u32 ath9k_get_next_tbtt(struct ath_hw *ah, u64 tsf,
+ unsigned int interval)
+{
+ unsigned int offset;
+
+ tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
+ offset = ath9k_mod_tsf64_tu(tsf, interval);
+
+ return (u32) tsf + TU_TO_USEC(interval) - offset;
+}
+
+/*
+ * This sets up the beacon timers according to the timestamp of the last
+ * received beacon and the current TSF, configures PCF and DTIM
+ * handling, programs the sleep registers so the hardware will wakeup in
+ * time to receive beacons, and configures the beacon miss handling so
+ * we'll receive a BMISS interrupt when we stop seeing beacons from the AP
+ * we've associated with.
+ */
+int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
+ struct ath_beacon_config *conf,
+ struct ath9k_beacon_state *bs)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int dtim_intval;
+ u64 tsf;
+
+ /* No need to configure beacon if we are not associated */
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
+ ath_dbg(common, BEACON,
+ "STA is not yet associated..skipping beacon config\n");
+ return -EPERM;
+ }
+
+ memset(bs, 0, sizeof(*bs));
+ conf->intval = conf->beacon_interval;
+
+ /*
+ * Setup dtim parameters according to
+ * last beacon we received (which may be none).
+ */
+ dtim_intval = conf->intval * conf->dtim_period;
+
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * TSF and calculate dtim state for the result.
+ */
+ tsf = ath9k_hw_gettsf64(ah);
+ conf->nexttbtt = ath9k_get_next_tbtt(ah, tsf, conf->intval);
+
+ bs->bs_intval = TU_TO_USEC(conf->intval);
+ bs->bs_dtimperiod = conf->dtim_period * bs->bs_intval;
+ bs->bs_nexttbtt = conf->nexttbtt;
+ bs->bs_nextdtim = conf->nexttbtt;
+ if (conf->dtim_period > 1)
+ bs->bs_nextdtim = ath9k_get_next_tbtt(ah, tsf, dtim_intval);
+
+ /*
+ * Calculate the number of consecutive beacons to miss* before taking
+ * a BMISS interrupt. The configuration is specified in TU so we only
+ * need calculate based on the beacon interval. Note that we clamp the
+ * result to at most 15 beacons.
+ */
+ bs->bs_bmissthreshold = DIV_ROUND_UP(conf->bmiss_timeout, conf->intval);
+ if (bs->bs_bmissthreshold > 15)
+ bs->bs_bmissthreshold = 15;
+ else if (bs->bs_bmissthreshold <= 0)
+ bs->bs_bmissthreshold = 1;
+
+ /*
+ * Calculate sleep duration. The configuration is given in ms.
+ * We ensure a multiple of the beacon period is used. Also, if the sleep
+ * duration is greater than the DTIM period then it makes senses
+ * to make it a multiple of that.
+ *
+ * XXX fixed at 100ms
+ */
+
+ bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
+ conf->intval));
+ if (bs->bs_sleepduration > bs->bs_dtimperiod)
+ bs->bs_sleepduration = bs->bs_dtimperiod;
+
+ /* TSF out of range threshold fixed at 1 second */
+ bs->bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
+
+ ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
+ bs->bs_bmissthreshold, bs->bs_sleepduration);
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_cmn_beacon_config_sta);
+
+void ath9k_cmn_beacon_config_adhoc(struct ath_hw *ah,
+ struct ath_beacon_config *conf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ conf->intval = TU_TO_USEC(conf->beacon_interval);
+
+ if (conf->ibss_creator)
+ conf->nexttbtt = conf->intval;
+ else
+ conf->nexttbtt = ath9k_get_next_tbtt(ah, ath9k_hw_gettsf64(ah),
+ conf->beacon_interval);
+
+ if (conf->enable_beacon)
+ ah->imask |= ATH9K_INT_SWBA;
+ else
+ ah->imask &= ~ATH9K_INT_SWBA;
+
+ ath_dbg(common, BEACON,
+ "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+ (conf->enable_beacon) ? "Enable" : "Disable",
+ conf->nexttbtt, conf->intval, conf->beacon_interval);
+}
+EXPORT_SYMBOL(ath9k_cmn_beacon_config_adhoc);
+
+/*
+ * For multi-bss ap support beacons are either staggered evenly over N slots or
+ * burst together. For the former arrange for the SWBA to be delivered for each
+ * slot. Slots that are not occupied will generate nothing.
+ */
+void ath9k_cmn_beacon_config_ap(struct ath_hw *ah,
+ struct ath_beacon_config *conf,
+ unsigned int bc_buf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* NB: the beacon interval is kept internally in TU's */
+ conf->intval = TU_TO_USEC(conf->beacon_interval);
+ conf->intval /= bc_buf;
+ conf->nexttbtt = ath9k_get_next_tbtt(ah, ath9k_hw_gettsf64(ah),
+ conf->beacon_interval);
+
+ if (conf->enable_beacon)
+ ah->imask |= ATH9K_INT_SWBA;
+ else
+ ah->imask &= ~ATH9K_INT_SWBA;
+
+ ath_dbg(common, BEACON,
+ "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+ (conf->enable_beacon) ? "Enable" : "Disable",
+ conf->nexttbtt, conf->intval, conf->beacon_interval);
+}
+EXPORT_SYMBOL(ath9k_cmn_beacon_config_ap);
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.h b/drivers/net/wireless/ath/ath9k/common-beacon.h
new file mode 100644
index 000000000000..3665d27f0dc7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-beacon.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2009-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+struct ath_beacon_config;
+
+int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
+ struct ath_beacon_config *conf,
+ struct ath9k_beacon_state *bs);
+void ath9k_cmn_beacon_config_adhoc(struct ath_hw *ah,
+ struct ath_beacon_config *conf);
+void ath9k_cmn_beacon_config_ap(struct ath_hw *ah,
+ struct ath_beacon_config *conf,
+ unsigned int bc_buf);
diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c
new file mode 100644
index 000000000000..a006c1499728
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-init.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* We use the hw_value as an index into our private channel structure */
+
+#include "common.h"
+
+#define CHAN2G(_freq, _idx) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+/* Some 2 GHz radios are actually tunable on 2312-2732
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
+ CHAN2G(2412, 0), /* Channel 1 */
+ CHAN2G(2417, 1), /* Channel 2 */
+ CHAN2G(2422, 2), /* Channel 3 */
+ CHAN2G(2427, 3), /* Channel 4 */
+ CHAN2G(2432, 4), /* Channel 5 */
+ CHAN2G(2437, 5), /* Channel 6 */
+ CHAN2G(2442, 6), /* Channel 7 */
+ CHAN2G(2447, 7), /* Channel 8 */
+ CHAN2G(2452, 8), /* Channel 9 */
+ CHAN2G(2457, 9), /* Channel 10 */
+ CHAN2G(2462, 10), /* Channel 11 */
+ CHAN2G(2467, 11), /* Channel 12 */
+ CHAN2G(2472, 12), /* Channel 13 */
+ CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Some 5 GHz radios are actually tunable on XXXX-YYYY
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
+ /* _We_ call this UNII 1 */
+ CHAN5G(5180, 14), /* Channel 36 */
+ CHAN5G(5200, 15), /* Channel 40 */
+ CHAN5G(5220, 16), /* Channel 44 */
+ CHAN5G(5240, 17), /* Channel 48 */
+ /* _We_ call this UNII 2 */
+ CHAN5G(5260, 18), /* Channel 52 */
+ CHAN5G(5280, 19), /* Channel 56 */
+ CHAN5G(5300, 20), /* Channel 60 */
+ CHAN5G(5320, 21), /* Channel 64 */
+ /* _We_ call this "Middle band" */
+ CHAN5G(5500, 22), /* Channel 100 */
+ CHAN5G(5520, 23), /* Channel 104 */
+ CHAN5G(5540, 24), /* Channel 108 */
+ CHAN5G(5560, 25), /* Channel 112 */
+ CHAN5G(5580, 26), /* Channel 116 */
+ CHAN5G(5600, 27), /* Channel 120 */
+ CHAN5G(5620, 28), /* Channel 124 */
+ CHAN5G(5640, 29), /* Channel 128 */
+ CHAN5G(5660, 30), /* Channel 132 */
+ CHAN5G(5680, 31), /* Channel 136 */
+ CHAN5G(5700, 32), /* Channel 140 */
+ /* _We_ call this UNII 3 */
+ CHAN5G(5745, 33), /* Channel 149 */
+ CHAN5G(5765, 34), /* Channel 153 */
+ CHAN5G(5785, 35), /* Channel 157 */
+ CHAN5G(5805, 36), /* Channel 161 */
+ CHAN5G(5825, 37), /* Channel 165 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+ RATE(10, 0x1b, 0),
+ RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+};
+
+int ath9k_cmn_init_channels_rates(struct ath_common *common)
+{
+ struct ath_hw *ah = (struct ath_hw *)common->ah;
+ void *channels;
+
+ BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
+ ARRAY_SIZE(ath9k_5ghz_chantable) !=
+ ATH9K_NUM_CHANNELS);
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
+ channels = devm_kzalloc(ah->dev,
+ sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ memcpy(channels, ath9k_2ghz_chantable,
+ sizeof(ath9k_2ghz_chantable));
+ common->sbands[IEEE80211_BAND_2GHZ].channels = channels;
+ common->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+ common->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ ARRAY_SIZE(ath9k_2ghz_chantable);
+ common->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ common->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates);
+ }
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
+ channels = devm_kzalloc(ah->dev,
+ sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ memcpy(channels, ath9k_5ghz_chantable,
+ sizeof(ath9k_5ghz_chantable));
+ common->sbands[IEEE80211_BAND_5GHZ].channels = channels;
+ common->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
+ common->sbands[IEEE80211_BAND_5GHZ].n_channels =
+ ARRAY_SIZE(ath9k_5ghz_chantable);
+ common->sbands[IEEE80211_BAND_5GHZ].bitrates =
+ ath9k_legacy_rates + 4;
+ common->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates) - 4;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_cmn_init_channels_rates);
+
+void ath9k_cmn_setup_ht_cap(struct ath_hw *ah,
+ struct ieee80211_sta_ht_cap *ht_info)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 tx_streams, rx_streams;
+ int i, max_streams;
+
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SM_PS |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
+ ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+ if (AR_SREV_9271(ah) || AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
+ max_streams = 1;
+ else if (AR_SREV_9462(ah))
+ max_streams = 2;
+ else if (AR_SREV_9300_20_OR_LATER(ah))
+ max_streams = 3;
+ else
+ max_streams = 2;
+
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ if (max_streams >= 2)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ }
+
+ /* set up supported mcs set */
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+ tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
+ rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
+
+ ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
+
+ if (tx_streams != rx_streams) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+
+ for (i = 0; i < rx_streams; i++)
+ ht_info->mcs.rx_mask[i] = 0xff;
+
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+EXPORT_SYMBOL(ath9k_cmn_setup_ht_cap);
+
+void ath9k_cmn_reload_chainmask(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_HT))
+ return;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
+ ath9k_cmn_setup_ht_cap(ah,
+ &common->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
+ ath9k_cmn_setup_ht_cap(ah,
+ &common->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+}
+EXPORT_SYMBOL(ath9k_cmn_reload_chainmask);
diff --git a/drivers/net/wireless/ath/ath9k/common-init.h b/drivers/net/wireless/ath/ath9k/common-init.h
new file mode 100644
index 000000000000..ac03fca5ffdd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-init.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2009-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+int ath9k_cmn_init_channels_rates(struct ath_common *common);
+void ath9k_cmn_setup_ht_cap(struct ath_hw *ah,
+ struct ieee80211_sta_ht_cap *ht_info);
+void ath9k_cmn_reload_chainmask(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 768c733cad31..c6dd7f1fed65 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -27,6 +27,250 @@ MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
+/* Assumes you've already done the endian to CPU conversion */
+bool ath9k_cmn_rx_accept(struct ath_common *common,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rxs,
+ struct ath_rx_status *rx_stats,
+ bool *decrypt_error,
+ unsigned int rxfilter)
+{
+ struct ath_hw *ah = common->ah;
+ bool is_mc, is_valid_tkip, strip_mic, mic_error;
+ __le16 fc;
+
+ fc = hdr->frame_control;
+
+ is_mc = !!is_multicast_ether_addr(hdr->addr1);
+ is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
+ test_bit(rx_stats->rs_keyix, common->tkip_keymap);
+ strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
+ ieee80211_has_protected(fc) &&
+ !(rx_stats->rs_status &
+ (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS));
+
+ /*
+ * Key miss events are only relevant for pairwise keys where the
+ * descriptor does contain a valid key index. This has been observed
+ * mostly with CCMP encryption.
+ */
+ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+ !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
+ rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
+
+ mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
+ !ieee80211_has_morefrags(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
+ (rx_stats->rs_status & ATH9K_RXERR_MIC);
+
+ /*
+ * The rx_stats->rs_status will not be set until the end of the
+ * chained descriptors so it can be ignored if rs_more is set. The
+ * rs_more will be false at the last element of the chained
+ * descriptors.
+ */
+ if (rx_stats->rs_status != 0) {
+ u8 status_mask;
+
+ if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
+ rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
+ mic_error = false;
+ }
+
+ if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
+ (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
+ *decrypt_error = true;
+ mic_error = false;
+ }
+
+
+ /*
+ * Reject error frames with the exception of
+ * decryption and MIC failures. For monitor mode,
+ * we also ignore the CRC error.
+ */
+ status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS;
+
+ if (ah->is_monitoring && (rxfilter & FIF_FCSFAIL))
+ status_mask |= ATH9K_RXERR_CRC;
+
+ if (rx_stats->rs_status & ~status_mask)
+ return false;
+ }
+
+ /*
+ * For unicast frames the MIC error bit can have false positives,
+ * so all MIC error reports need to be validated in software.
+ * False negatives are not common, so skip software verification
+ * if the hardware considers the MIC valid.
+ */
+ if (strip_mic)
+ rxs->flag |= RX_FLAG_MMIC_STRIPPED;
+ else if (is_mc && mic_error)
+ rxs->flag |= RX_FLAG_MMIC_ERROR;
+
+ return true;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_accept);
+
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error)
+{
+ struct ath_hw *ah = common->ah;
+ struct ieee80211_hdr *hdr;
+ int hdrlen, padpos, padsize;
+ u8 keyix;
+ __le16 fc;
+
+ /* see if any padding is done by the hw and remove it */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ fc = hdr->frame_control;
+ padpos = ieee80211_hdrlen(fc);
+
+ /* The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = (4 - padpos % 4) % 4; However, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. In addition, we must
+ * not try to remove padding from short control frames that do
+ * not have payload. */
+ padsize = padpos & 3;
+ if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ keyix = rx_stats->rs_keyix;
+
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ ieee80211_has_protected(fc)) {
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ } else if (ieee80211_has_protected(fc)
+ && !decrypt_error && skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+
+ if (test_bit(keyix, common->keymap))
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ }
+ if (ah->sw_mgmt_crypto &&
+ (rxs->flag & RX_FLAG_DECRYPTED) &&
+ ieee80211_is_mgmt(fc))
+ /* Use software decrypt for management frames. */
+ rxs->flag &= ~RX_FLAG_DECRYPTED;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
+
+int ath9k_cmn_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_band band;
+ unsigned int i = 0;
+ struct ath_hw *ah = common->ah;
+
+ band = ah->curchan->chan->band;
+ sband = hw->wiphy->bands[band];
+
+ if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ rxs->flag |= RX_FLAG_5MHZ;
+ else if (IS_CHAN_HALF_RATE(ah->curchan))
+ rxs->flag |= RX_FLAG_10MHZ;
+
+ if (rx_stats->rs_rate & 0x80) {
+ /* HT rate */
+ rxs->flag |= RX_FLAG_HT;
+ rxs->flag |= rx_stats->flag;
+ rxs->rate_idx = rx_stats->rs_rate & 0x7f;
+ return 0;
+ }
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
+ rxs->rate_idx = i;
+ return 0;
+ }
+ if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
+ rxs->flag |= RX_FLAG_SHORTPRE;
+ rxs->rate_idx = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ath9k_cmn_process_rate);
+
+void ath9k_cmn_process_rssi(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ath_hw *ah = common->ah;
+ int last_rssi;
+ int rssi = rx_stats->rs_rssi;
+ int i, j;
+
+ /*
+ * RSSI is not available for subframes in an A-MPDU.
+ */
+ if (rx_stats->rs_moreaggr) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ return;
+ }
+
+ /*
+ * Check if the RSSI for the last subframe in an A-MPDU
+ * or an unaggregated frame is valid.
+ */
+ if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ return;
+ }
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
+ s8 rssi;
+
+ if (!(ah->rxchainmask & BIT(i)))
+ continue;
+
+ rssi = rx_stats->rs_rssi_ctl[i];
+ if (rssi != ATH9K_RSSI_BAD) {
+ rxs->chains |= BIT(j);
+ rxs->chain_signal[j] = ah->noise + rssi;
+ }
+ j++;
+ }
+
+ /*
+ * Update Beacon RSSI, this is used by ANI.
+ */
+ if (rx_stats->is_mybeacon &&
+ ((ah->opmode == NL80211_IFTYPE_STATION) ||
+ (ah->opmode == NL80211_IFTYPE_ADHOC))) {
+ ATH_RSSI_LPF(common->last_rssi, rx_stats->rs_rssi);
+ last_rssi = common->last_rssi;
+
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ if (rssi < 0)
+ rssi = 0;
+
+ ah->stats.avgbrssi = rssi;
+ }
+
+ rxs->signal = ah->noise + rx_stats->rs_rssi;
+}
+EXPORT_SYMBOL(ath9k_cmn_process_rssi);
+
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index eb85e1bdca88..ca38116838f0 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -21,6 +21,9 @@
#include "hw.h"
#include "hw-ops.h"
+#include "common-init.h"
+#include "common-beacon.h"
+
/* Common header for Atheros 802.11n base driver cores */
#define WME_BA_BMP_SIZE 64
@@ -42,6 +45,38 @@
#define ATH_EP_RND(x, mul) \
(((x) + ((mul)/2)) / (mul))
+#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+
+struct ath_beacon_config {
+ int beacon_interval;
+ u16 dtim_period;
+ u16 bmiss_timeout;
+ u8 dtim_count;
+ bool enable_beacon;
+ bool ibss_creator;
+ u32 nexttbtt;
+ u32 intval;
+};
+
+bool ath9k_cmn_rx_accept(struct ath_common *common,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rxs,
+ struct ath_rx_status *rx_stats,
+ bool *decrypt_error,
+ unsigned int rxfilter);
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error);
+int ath9k_cmn_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs);
+void ath9k_cmn_process_rssi(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs);
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index ab7264c1d8f7..780ff1bee6f6 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -135,46 +135,45 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
- unsigned int len = 0, size = 1024;
+ unsigned int len = 0;
+ const unsigned int size = 1024;
ssize_t retval = 0;
char *buf;
+ int i;
+ struct {
+ const char *name;
+ unsigned int val;
+ } ani_info[] = {
+ { "ANI RESET", ah->stats.ast_ani_reset },
+ { "OFDM LEVEL", ah->ani.ofdmNoiseImmunityLevel },
+ { "CCK LEVEL", ah->ani.cckNoiseImmunityLevel },
+ { "SPUR UP", ah->stats.ast_ani_spurup },
+ { "SPUR DOWN", ah->stats.ast_ani_spurup },
+ { "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon },
+ { "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff },
+ { "MRC-CCK ON", ah->stats.ast_ani_ccklow },
+ { "MRC-CCK OFF", ah->stats.ast_ani_cckhigh },
+ { "FIR-STEP UP", ah->stats.ast_ani_stepup },
+ { "FIR-STEP DOWN", ah->stats.ast_ani_stepdown },
+ { "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero },
+ { "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs },
+ { "CCK ERRORS", ah->stats.ast_ani_cckerrs },
+ };
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- if (common->disable_ani) {
- len += scnprintf(buf + len, size - len, "%s: %s\n",
- "ANI", "DISABLED");
+ len += scnprintf(buf + len, size - len, "%15s: %s\n", "ANI",
+ common->disable_ani ? "DISABLED" : "ENABLED");
+
+ if (common->disable_ani)
goto exit;
- }
- len += scnprintf(buf + len, size - len, "%15s: %s\n",
- "ANI", "ENABLED");
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "ANI RESET", ah->stats.ast_ani_reset);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "SPUR UP", ah->stats.ast_ani_spurup);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "SPUR DOWN", ah->stats.ast_ani_spurup);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "MRC-CCK ON", ah->stats.ast_ani_ccklow);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "FIR-STEP UP", ah->stats.ast_ani_stepup);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "CCK ERRORS", ah->stats.ast_ani_cckerrs);
+ for (i = 0; i < ARRAY_SIZE(ani_info); i++)
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ ani_info[i].name, ani_info[i].val);
+
exit:
if (len > size)
len = size;
@@ -209,7 +208,7 @@ static ssize_t write_file_ani(struct file *file,
common->disable_ani = !ani;
if (common->disable_ani) {
- clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+ clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
ath_stop_ani(sc);
} else {
ath_check_ani(sc);
@@ -307,13 +306,13 @@ static ssize_t read_file_antenna_diversity(struct file *file,
struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
struct ath_hw_antcomb_conf div_ant_conf;
- unsigned int len = 0, size = 1024;
+ unsigned int len = 0;
+ const unsigned int size = 1024;
ssize_t retval = 0;
char *buf;
- char *lna_conf_str[4] = {"LNA1_MINUS_LNA2",
- "LNA2",
- "LNA1",
- "LNA1_PLUS_LNA2"};
+ static const char *lna_conf_str[4] = {
+ "LNA1_MINUS_LNA2", "LNA2", "LNA1", "LNA1_PLUS_LNA2"
+ };
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
@@ -716,10 +715,13 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
struct ath_softc *sc = file->private_data;
struct ath_txq *txq;
char *buf;
- unsigned int len = 0, size = 1024;
+ unsigned int len = 0;
+ const unsigned int size = 1024;
ssize_t retval = 0;
int i;
- char *qname[4] = {"VO", "VI", "BE", "BK"};
+ static const char *qname[4] = {
+ "VO", "VI", "BE", "BK"
+ };
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
@@ -866,6 +868,12 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
"%17s: %2d\n", "PLL RX Hang",
sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "MAC Hang",
+ sc->debug.stats.reset[RESET_TYPE_MAC_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Stuck Beacon",
+ sc->debug.stats.reset[RESET_TYPE_BEACON_STUCK]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "MCI Reset",
sc->debug.stats.reset[RESET_TYPE_MCI]);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index cc7a025d833e..559a68c2709c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -18,7 +18,6 @@
#define DEBUG_H
#include "hw.h"
-#include "rc.h"
#include "dfs_debug.h"
struct ath_txq;
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index 0a7ddf4c88c9..7936c9126a20 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -21,6 +21,8 @@
#include "hw.h"
+struct ath_softc;
+
/**
* struct ath_dfs_stats - DFS Statistics per wiphy
* @pulses_total: pulses reported by HW
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 6d5d716adc1b..8e7153b186ed 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -54,6 +54,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
.driver_info = AR9280_USB }, /* SMC Networks */
{ USB_DEVICE(0x0411, 0x017f),
.driver_info = AR9280_USB }, /* Sony UWA-BR100 */
+ { USB_DEVICE(0x0411, 0x0197),
+ .driver_info = AR9280_USB }, /* Buffalo WLI-UV-AG300P */
{ USB_DEVICE(0x04da, 0x3904),
.driver_info = AR9280_USB },
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 99a203174f45..dab1f0cab993 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -39,7 +39,6 @@
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
#define ATH_DEFAULT_BMISS_LIMIT 10
-#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
#define TSF_TO_TU(_h, _l) \
((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
@@ -277,7 +276,6 @@ struct ath9k_htc_rxbuf {
};
struct ath9k_htc_rx {
- int last_rssi; /* FIXME: per-STA */
struct list_head rxbuf;
spinlock_t rxbuflock;
};
@@ -407,12 +405,18 @@ static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
#define DEFAULT_SWBA_RESPONSE 40 /* in TUs */
#define MIN_SWBA_RESPONSE 10 /* in TUs */
-struct htc_beacon_config {
+struct htc_beacon {
+ enum {
+ OK, /* no change needed */
+ UPDATE, /* update pending */
+ COMMIT /* beacon sent, commit change */
+ } updateslot; /* slot time update fsm */
+
struct ieee80211_vif *bslot[ATH9K_HTC_MAX_BCN_VIF];
- u16 beacon_interval;
- u16 dtim_period;
- u16 bmiss_timeout;
- u32 bmiss_cnt;
+ u32 bmisscnt;
+ u32 beaconq;
+ int slottime;
+ int slotupdate;
};
struct ath_btcoex {
@@ -440,12 +444,8 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
-#define OP_INVALID BIT(0)
-#define OP_SCANNING BIT(1)
-#define OP_ENABLE_BEACON BIT(2)
#define OP_BT_PRIORITY_DETECTED BIT(3)
#define OP_BT_SCAN BIT(4)
-#define OP_ANI_RUNNING BIT(5)
#define OP_TSF_RESET BIT(6)
struct ath9k_htc_priv {
@@ -488,10 +488,10 @@ struct ath9k_htc_priv {
unsigned long op_flags;
struct ath9k_hw_cal_data caldata;
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
spinlock_t beacon_lock;
- struct htc_beacon_config cur_beacon_conf;
+ struct ath_beacon_config cur_beacon_conf;
+ struct htc_beacon beacon;
struct ath9k_htc_rx rx;
struct ath9k_htc_tx tx;
@@ -516,7 +516,6 @@ struct ath9k_htc_priv {
struct work_struct led_work;
#endif
- int beaconq;
int cabq;
int hwq_map[IEEE80211_NUM_ACS];
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 8b5757734596..e8b6ec3c1dbb 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -26,7 +26,7 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info));
- ath9k_hw_get_txq_props(ah, priv->beaconq, &qi);
+ ath9k_hw_get_txq_props(ah, priv->beacon.beaconq, &qi);
if (priv->ah->opmode == NL80211_IFTYPE_AP ||
priv->ah->opmode == NL80211_IFTYPE_MESH_POINT) {
@@ -54,220 +54,78 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
}
- if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) {
+ if (!ath9k_hw_set_txq_props(ah, priv->beacon.beaconq, &qi)) {
ath_err(ath9k_hw_common(ah),
- "Unable to update beacon queue %u!\n", priv->beaconq);
+ "Unable to update beacon queue %u!\n", priv->beacon.beaconq);
} else {
- ath9k_hw_resettxqueue(ah, priv->beaconq);
+ ath9k_hw_resettxqueue(ah, priv->beacon.beaconq);
}
}
-
-static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
- struct htc_beacon_config *bss_conf)
+/*
+ * Both nexttbtt and intval have to be in usecs.
+ */
+static void ath9k_htc_beacon_init(struct ath9k_htc_priv *priv,
+ struct ath_beacon_config *conf,
+ bool reset_tsf)
{
- struct ath_common *common = ath9k_hw_common(priv->ah);
- struct ath9k_beacon_state bs;
- enum ath9k_int imask = 0;
- int dtimperiod, dtimcount, sleepduration;
- int bmiss_timeout;
- u32 nexttbtt = 0, intval, tsftu;
- __be32 htc_imask = 0;
- u64 tsf;
- int num_beacons, offset, dtim_dec_count;
+ struct ath_hw *ah = priv->ah;
int ret __attribute__ ((unused));
+ __be32 htc_imask = 0;
u8 cmd_rsp;
- memset(&bs, 0, sizeof(bs));
-
- intval = bss_conf->beacon_interval;
- bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
-
- /*
- * Setup dtim parameters according to
- * last beacon we received (which may be none).
- */
- dtimperiod = bss_conf->dtim_period;
- if (dtimperiod <= 0) /* NB: 0 if not known */
- dtimperiod = 1;
- dtimcount = 1;
- if (dtimcount >= dtimperiod) /* NB: sanity check */
- dtimcount = 0;
-
- sleepduration = intval;
- if (sleepduration <= 0)
- sleepduration = intval;
-
- /*
- * Pull nexttbtt forward to reflect the current
- * TSF and calculate dtim state for the result.
- */
- tsf = ath9k_hw_gettsf64(priv->ah);
- tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
-
- num_beacons = tsftu / intval + 1;
- offset = tsftu % intval;
- nexttbtt = tsftu - offset;
- if (offset)
- nexttbtt += intval;
-
- /* DTIM Beacon every dtimperiod Beacon */
- dtim_dec_count = num_beacons % dtimperiod;
- dtimcount -= dtim_dec_count;
- if (dtimcount < 0)
- dtimcount += dtimperiod;
-
- bs.bs_intval = TU_TO_USEC(intval);
- bs.bs_nexttbtt = TU_TO_USEC(nexttbtt);
- bs.bs_dtimperiod = dtimperiod * bs.bs_intval;
- bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount * bs.bs_intval;
-
- /*
- * Calculate the number of consecutive beacons to miss* before taking
- * a BMISS interrupt. The configuration is specified in TU so we only
- * need calculate based on the beacon interval. Note that we clamp the
- * result to at most 15 beacons.
- */
- if (sleepduration > intval) {
- bs.bs_bmissthreshold = ATH_DEFAULT_BMISS_LIMIT / 2;
- } else {
- bs.bs_bmissthreshold = DIV_ROUND_UP(bmiss_timeout, intval);
- if (bs.bs_bmissthreshold > 15)
- bs.bs_bmissthreshold = 15;
- else if (bs.bs_bmissthreshold <= 0)
- bs.bs_bmissthreshold = 1;
- }
-
- /*
- * Calculate sleep duration. The configuration is given in ms.
- * We ensure a multiple of the beacon period is used. Also, if the sleep
- * duration is greater than the DTIM period then it makes senses
- * to make it a multiple of that.
- *
- * XXX fixed at 100ms
- */
-
- bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
- sleepduration));
- if (bs.bs_sleepduration > bs.bs_dtimperiod)
- bs.bs_sleepduration = bs.bs_dtimperiod;
-
- /* TSF out of range threshold fixed at 1 second */
- bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
-
- ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
- intval, tsf, tsftu);
- ath_dbg(common, CONFIG, "bmiss: %u sleep: %u\n",
- bs.bs_bmissthreshold, bs.bs_sleepduration);
-
- /* Set the computed STA beacon timers */
+ if (conf->intval >= TU_TO_USEC(DEFAULT_SWBA_RESPONSE))
+ ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
+ else
+ ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
WMI_CMD(WMI_DISABLE_INTR_CMDID);
- ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
- imask |= ATH9K_INT_BMISS;
- htc_imask = cpu_to_be32(imask);
+ if (reset_tsf)
+ ath9k_hw_reset_tsf(ah);
+ ath9k_htc_beaconq_config(priv);
+ ath9k_hw_beaconinit(ah, conf->nexttbtt, conf->intval);
+ priv->beacon.bmisscnt = 0;
+ htc_imask = cpu_to_be32(ah->imask);
WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
}
-static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
- struct htc_beacon_config *bss_conf)
+static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
+ struct ath_beacon_config *bss_conf)
{
- struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_beacon_state bs;
enum ath9k_int imask = 0;
- u32 nexttbtt, intval, tsftu;
__be32 htc_imask = 0;
int ret __attribute__ ((unused));
u8 cmd_rsp;
- u64 tsf;
- intval = bss_conf->beacon_interval;
- intval /= ATH9K_HTC_MAX_BCN_VIF;
- nexttbtt = intval;
-
- /*
- * To reduce beacon misses under heavy TX load,
- * set the beacon response time to a larger value.
- */
- if (intval > DEFAULT_SWBA_RESPONSE)
- priv->ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
- else
- priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
-
- if (test_bit(OP_TSF_RESET, &priv->op_flags)) {
- ath9k_hw_reset_tsf(priv->ah);
- clear_bit(OP_TSF_RESET, &priv->op_flags);
- } else {
- /*
- * Pull nexttbtt forward to reflect the current TSF.
- */
- tsf = ath9k_hw_gettsf64(priv->ah);
- tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
- do {
- nexttbtt += intval;
- } while (nexttbtt < tsftu);
- }
-
- if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
- imask |= ATH9K_INT_SWBA;
-
- ath_dbg(common, CONFIG,
- "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d imask: 0x%x\n",
- bss_conf->beacon_interval, nexttbtt,
- priv->ah->config.sw_beacon_response_time, imask);
-
- ath9k_htc_beaconq_config(priv);
+ if (ath9k_cmn_beacon_config_sta(priv->ah, bss_conf, &bs) == -EPERM)
+ return;
WMI_CMD(WMI_DISABLE_INTR_CMDID);
- ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval));
- priv->cur_beacon_conf.bmiss_cnt = 0;
+ ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
+ imask |= ATH9K_INT_BMISS;
htc_imask = cpu_to_be32(imask);
WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
}
-static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
- struct htc_beacon_config *bss_conf)
+static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
+ struct ath_beacon_config *conf)
{
- struct ath_common *common = ath9k_hw_common(priv->ah);
- enum ath9k_int imask = 0;
- u32 nexttbtt, intval, tsftu;
- __be32 htc_imask = 0;
- int ret __attribute__ ((unused));
- u8 cmd_rsp;
- u64 tsf;
-
- intval = bss_conf->beacon_interval;
- nexttbtt = intval;
-
- /*
- * Pull nexttbtt forward to reflect the current TSF.
- */
- tsf = ath9k_hw_gettsf64(priv->ah);
- tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
- do {
- nexttbtt += intval;
- } while (nexttbtt < tsftu);
-
- /*
- * Only one IBSS interfce is allowed.
- */
- if (intval > DEFAULT_SWBA_RESPONSE)
- priv->ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
- else
- priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
+ struct ath_hw *ah = priv->ah;
+ ah->imask = 0;
- if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
- imask |= ATH9K_INT_SWBA;
+ ath9k_cmn_beacon_config_ap(ah, conf, ATH9K_HTC_MAX_BCN_VIF);
+ ath9k_htc_beacon_init(priv, conf, false);
+}
- ath_dbg(common, CONFIG,
- "IBSS Beacon config, intval: %d, nexttbtt: %u, resp_time: %d, imask: 0x%x\n",
- bss_conf->beacon_interval, nexttbtt,
- priv->ah->config.sw_beacon_response_time, imask);
+static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
+ struct ath_beacon_config *conf)
+{
+ struct ath_hw *ah = priv->ah;
+ ah->imask = 0;
- WMI_CMD(WMI_DISABLE_INTR_CMDID);
- ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval));
- priv->cur_beacon_conf.bmiss_cnt = 0;
- htc_imask = cpu_to_be32(imask);
- WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+ ath9k_cmn_beacon_config_adhoc(ah, conf);
+ ath9k_htc_beacon_init(priv, conf, conf->ibss_creator);
}
void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
@@ -287,7 +145,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
spin_lock_bh(&priv->beacon_lock);
- vif = priv->cur_beacon_conf.bslot[slot];
+ vif = priv->beacon.bslot[slot];
skb = ieee80211_get_buffered_bc(priv->hw, vif);
@@ -348,10 +206,10 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
spin_lock_bh(&priv->beacon_lock);
- vif = priv->cur_beacon_conf.bslot[slot];
+ vif = priv->beacon.bslot[slot];
avp = (struct ath9k_htc_vif *)vif->drv_priv;
- if (unlikely(test_bit(OP_SCANNING, &priv->op_flags))) {
+ if (unlikely(test_bit(ATH_OP_SCANNING, &common->op_flags))) {
spin_unlock_bh(&priv->beacon_lock);
return;
}
@@ -431,8 +289,8 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
int slot;
if (swba->beacon_pending != 0) {
- priv->cur_beacon_conf.bmiss_cnt++;
- if (priv->cur_beacon_conf.bmiss_cnt > BSTUCK_THRESHOLD) {
+ priv->beacon.bmisscnt++;
+ if (priv->beacon.bmisscnt > BSTUCK_THRESHOLD) {
ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n");
ieee80211_queue_work(priv->hw,
&priv->fatal_work);
@@ -440,16 +298,16 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
return;
}
- if (priv->cur_beacon_conf.bmiss_cnt) {
+ if (priv->beacon.bmisscnt) {
ath_dbg(common, BSTUCK,
"Resuming beacon xmit after %u misses\n",
- priv->cur_beacon_conf.bmiss_cnt);
- priv->cur_beacon_conf.bmiss_cnt = 0;
+ priv->beacon.bmisscnt);
+ priv->beacon.bmisscnt = 0;
}
slot = ath9k_htc_choose_bslot(priv, swba);
spin_lock_bh(&priv->beacon_lock);
- if (priv->cur_beacon_conf.bslot[slot] == NULL) {
+ if (priv->beacon.bslot[slot] == NULL) {
spin_unlock_bh(&priv->beacon_lock);
return;
}
@@ -468,13 +326,13 @@ void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv,
spin_lock_bh(&priv->beacon_lock);
for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++) {
- if (priv->cur_beacon_conf.bslot[i] == NULL) {
+ if (priv->beacon.bslot[i] == NULL) {
avp->bslot = i;
break;
}
}
- priv->cur_beacon_conf.bslot[avp->bslot] = vif;
+ priv->beacon.bslot[avp->bslot] = vif;
spin_unlock_bh(&priv->beacon_lock);
ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
@@ -488,7 +346,7 @@ void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
spin_lock_bh(&priv->beacon_lock);
- priv->cur_beacon_conf.bslot[avp->bslot] = NULL;
+ priv->beacon.bslot[avp->bslot] = NULL;
spin_unlock_bh(&priv->beacon_lock);
ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n",
@@ -504,7 +362,7 @@ void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
- struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
u64 tsfadjust;
if (avp->bslot == 0)
@@ -536,7 +394,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
- struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
bool beacon_configured;
@@ -591,7 +449,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
- struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
@@ -627,7 +485,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
- struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
switch (priv->ah->opmode) {
case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index c57d6b859c04..8a3bd5fe3a54 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -38,93 +38,6 @@ static int ath9k_ps_enable;
module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
-#define CHAN2G(_freq, _idx) { \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-static struct ieee80211_channel ath9k_2ghz_channels[] = {
- CHAN2G(2412, 0), /* Channel 1 */
- CHAN2G(2417, 1), /* Channel 2 */
- CHAN2G(2422, 2), /* Channel 3 */
- CHAN2G(2427, 3), /* Channel 4 */
- CHAN2G(2432, 4), /* Channel 5 */
- CHAN2G(2437, 5), /* Channel 6 */
- CHAN2G(2442, 6), /* Channel 7 */
- CHAN2G(2447, 7), /* Channel 8 */
- CHAN2G(2452, 8), /* Channel 9 */
- CHAN2G(2457, 9), /* Channel 10 */
- CHAN2G(2462, 10), /* Channel 11 */
- CHAN2G(2467, 11), /* Channel 12 */
- CHAN2G(2472, 12), /* Channel 13 */
- CHAN2G(2484, 13), /* Channel 14 */
-};
-
-static struct ieee80211_channel ath9k_5ghz_channels[] = {
- /* _We_ call this UNII 1 */
- CHAN5G(5180, 14), /* Channel 36 */
- CHAN5G(5200, 15), /* Channel 40 */
- CHAN5G(5220, 16), /* Channel 44 */
- CHAN5G(5240, 17), /* Channel 48 */
- /* _We_ call this UNII 2 */
- CHAN5G(5260, 18), /* Channel 52 */
- CHAN5G(5280, 19), /* Channel 56 */
- CHAN5G(5300, 20), /* Channel 60 */
- CHAN5G(5320, 21), /* Channel 64 */
- /* _We_ call this "Middle band" */
- CHAN5G(5500, 22), /* Channel 100 */
- CHAN5G(5520, 23), /* Channel 104 */
- CHAN5G(5540, 24), /* Channel 108 */
- CHAN5G(5560, 25), /* Channel 112 */
- CHAN5G(5580, 26), /* Channel 116 */
- CHAN5G(5600, 27), /* Channel 120 */
- CHAN5G(5620, 28), /* Channel 124 */
- CHAN5G(5640, 29), /* Channel 128 */
- CHAN5G(5660, 30), /* Channel 132 */
- CHAN5G(5680, 31), /* Channel 136 */
- CHAN5G(5700, 32), /* Channel 140 */
- /* _We_ call this UNII 3 */
- CHAN5G(5745, 33), /* Channel 149 */
- CHAN5G(5765, 34), /* Channel 153 */
- CHAN5G(5785, 35), /* Channel 157 */
- CHAN5G(5805, 36), /* Channel 161 */
- CHAN5G(5825, 37), /* Channel 165 */
-};
-
-/* Atheros hardware rate code addition for short premble */
-#define SHPCHECK(__hw_rate, __flags) \
- ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
-
-#define RATE(_bitrate, _hw_rate, _flags) { \
- .bitrate = (_bitrate), \
- .flags = (_flags), \
- .hw_value = (_hw_rate), \
- .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
-}
-
-static struct ieee80211_rate ath9k_legacy_rates[] = {
- RATE(10, 0x1b, 0),
- RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp : 0x1e */
- RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp: 0x1d */
- RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), /* short: 0x1c */
- RATE(60, 0x0b, 0),
- RATE(90, 0x0f, 0),
- RATE(120, 0x0a, 0),
- RATE(180, 0x0e, 0),
- RATE(240, 0x09, 0),
- RATE(360, 0x0d, 0),
- RATE(480, 0x08, 0),
- RATE(540, 0x0c, 0),
-};
-
#ifdef CONFIG_MAC80211_LEDS
static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = {
{ .throughput = 0 * 1024, .blink_time = 334 },
@@ -343,6 +256,25 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
}
}
+static void ath9k_regwrite_multi(struct ath_common *common)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &priv->wmi->multi_write,
+ sizeof(struct register_write) * priv->wmi->multi_write_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_dbg(common, WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
+ }
+ priv->wmi->multi_write_idx = 0;
+}
+
static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
{
struct ath_hw *ah = (struct ath_hw *) hw_priv;
@@ -369,8 +301,6 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
struct ath_hw *ah = (struct ath_hw *) hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
- u32 rsp_status;
- int r;
mutex_lock(&priv->wmi->multi_write_mutex);
@@ -383,19 +313,8 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
priv->wmi->multi_write_idx++;
/* If the buffer is full, send it out. */
- if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER) {
- r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
- (u8 *) &priv->wmi->multi_write,
- sizeof(struct register_write) * priv->wmi->multi_write_idx,
- (u8 *) &rsp_status, sizeof(rsp_status),
- 100);
- if (unlikely(r)) {
- ath_dbg(common, WMI,
- "REGISTER WRITE FAILED, multi len: %d\n",
- priv->wmi->multi_write_idx);
- }
- priv->wmi->multi_write_idx = 0;
- }
+ if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER)
+ ath9k_regwrite_multi(common);
mutex_unlock(&priv->wmi->multi_write_mutex);
}
@@ -426,26 +345,13 @@ static void ath9k_regwrite_flush(void *hw_priv)
struct ath_hw *ah = (struct ath_hw *) hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
- u32 rsp_status;
- int r;
atomic_dec(&priv->wmi->mwrite_cnt);
mutex_lock(&priv->wmi->multi_write_mutex);
- if (priv->wmi->multi_write_idx) {
- r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
- (u8 *) &priv->wmi->multi_write,
- sizeof(struct register_write) * priv->wmi->multi_write_idx,
- (u8 *) &rsp_status, sizeof(rsp_status),
- 100);
- if (unlikely(r)) {
- ath_dbg(common, WMI,
- "REGISTER WRITE FAILED, multi len: %d\n",
- priv->wmi->multi_write_idx);
- }
- priv->wmi->multi_write_idx = 0;
- }
+ if (priv->wmi->multi_write_idx)
+ ath9k_regwrite_multi(common);
mutex_unlock(&priv->wmi->multi_write_mutex);
}
@@ -491,51 +397,6 @@ static const struct ath_bus_ops ath9k_usb_bus_ops = {
.eeprom_read = ath_usb_eeprom_read,
};
-static void setup_ht_cap(struct ath9k_htc_priv *priv,
- struct ieee80211_sta_ht_cap *ht_info)
-{
- struct ath_common *common = ath9k_hw_common(priv->ah);
- u8 tx_streams, rx_streams;
- int i;
-
- ht_info->ht_supported = true;
- ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SM_PS |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_DSSSCCK40;
-
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
-
- ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
-
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
-
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
- /* ath9k_htc supports only 1 or 2 stream devices */
- tx_streams = ath9k_cmn_count_streams(priv->ah->txchainmask, 2);
- rx_streams = ath9k_cmn_count_streams(priv->ah->rxchainmask, 2);
-
- ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
-
- if (tx_streams >= 2)
- ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
-
- if (tx_streams != rx_streams) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_streams - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-
- for (i = 0; i < rx_streams; i++)
- ht_info->mcs.rx_mask[i] = 0xff;
-
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
-}
-
static int ath9k_init_queues(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
@@ -544,8 +405,8 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv)
for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
priv->hwq_map[i] = -1;
- priv->beaconq = ath9k_hw_beaconq_setup(priv->ah);
- if (priv->beaconq == -1) {
+ priv->beacon.beaconq = ath9k_hw_beaconq_setup(priv->ah);
+ if (priv->beacon.beaconq == -1) {
ath_err(common, "Unable to setup BEACON xmit queue\n");
goto err;
}
@@ -580,37 +441,13 @@ err:
return -EINVAL;
}
-static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
-{
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
- priv->sbands[IEEE80211_BAND_2GHZ].channels =
- ath9k_2ghz_channels;
- priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
- priv->sbands[IEEE80211_BAND_2GHZ].n_channels =
- ARRAY_SIZE(ath9k_2ghz_channels);
- priv->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
- priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates);
- }
-
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
- priv->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_channels;
- priv->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
- priv->sbands[IEEE80211_BAND_5GHZ].n_channels =
- ARRAY_SIZE(ath9k_5ghz_channels);
- priv->sbands[IEEE80211_BAND_5GHZ].bitrates =
- ath9k_legacy_rates + 4;
- priv->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates) - 4;
- }
-}
-
static void ath9k_init_misc(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
priv->ah->opmode = NL80211_IFTYPE_STATION;
}
@@ -622,12 +459,11 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
struct ath_common *common;
int i, ret = 0, csz = 0;
- set_bit(OP_INVALID, &priv->op_flags);
-
ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
if (!ah)
return -ENOMEM;
+ ah->dev = priv->dev;
ah->hw_version.devid = devid;
ah->hw_version.usbdev = drv_info;
ah->ah_flags |= AH_USE_EEPROM;
@@ -647,6 +483,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
common->priv = priv;
common->debug_mask = ath9k_debug;
common->btcoex_enabled = ath9k_htc_btcoex_enable == 1;
+ set_bit(ATH_OP_INVALID, &common->op_flags);
spin_lock_init(&priv->beacon_lock);
spin_lock_init(&priv->tx.tx_lock);
@@ -682,10 +519,11 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
goto err_queues;
for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++)
- priv->cur_beacon_conf.bslot[i] = NULL;
+ priv->beacon.bslot[i] = NULL;
+ priv->beacon.slottime = ATH9K_SLOT_TIME_9;
+ ath9k_cmn_init_channels_rates(common);
ath9k_cmn_init_crypto(ah);
- ath9k_init_channels_rates(priv);
ath9k_init_misc(priv);
ath9k_htc_init_btcoex(priv, product);
@@ -721,6 +559,7 @@ static const struct ieee80211_iface_combination if_comb = {
static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
struct ieee80211_hw *hw)
{
+ struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(priv->ah);
struct base_eep_header *pBase;
@@ -765,19 +604,12 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->sbands[IEEE80211_BAND_2GHZ];
+ &common->sbands[IEEE80211_BAND_2GHZ];
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->sbands[IEEE80211_BAND_5GHZ];
-
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
- setup_ht_cap(priv,
- &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
- setup_ht_cap(priv,
- &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap);
- }
+ &common->sbands[IEEE80211_BAND_5GHZ];
+
+ ath9k_cmn_reload_chainmask(ah);
pBase = ath9k_htc_get_eeprom_base(priv);
if (pBase) {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index c9254a61ca52..f46cd0250e48 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -250,7 +250,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
u8 cmd_rsp;
int ret;
- if (test_bit(OP_INVALID, &priv->op_flags))
+ if (test_bit(ATH_OP_INVALID, &common->op_flags))
return -EIO;
fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@@ -304,7 +304,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
htc_start(priv->htc);
- if (!test_bit(OP_SCANNING, &priv->op_flags) &&
+ if (!test_bit(ATH_OP_SCANNING, &common->op_flags) &&
!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
ath9k_htc_vif_reconfig(priv);
@@ -748,7 +748,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
common->ani.shortcal_timer = timestamp;
common->ani.checkani_timer = timestamp;
- set_bit(OP_ANI_RUNNING, &priv->op_flags);
+ set_bit(ATH_OP_ANI_RUN, &common->op_flags);
ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
@@ -756,8 +756,9 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
cancel_delayed_work_sync(&priv->ani_work);
- clear_bit(OP_ANI_RUNNING, &priv->op_flags);
+ clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
}
void ath9k_htc_ani_work(struct work_struct *work)
@@ -942,7 +943,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
ath_dbg(common, CONFIG,
"Failed to update capability in target\n");
- clear_bit(OP_INVALID, &priv->op_flags);
+ clear_bit(ATH_OP_INVALID, &common->op_flags);
htc_start(priv->htc);
spin_lock_bh(&priv->tx.tx_lock);
@@ -971,7 +972,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- if (test_bit(OP_INVALID, &priv->op_flags)) {
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&priv->mutex);
return;
@@ -1013,7 +1014,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
ath9k_htc_ps_restore(priv);
ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
- set_bit(OP_INVALID, &priv->op_flags);
+ set_bit(ATH_OP_INVALID, &common->op_flags);
ath_dbg(common, CONFIG, "Driver halt\n");
mutex_unlock(&priv->mutex);
@@ -1087,7 +1088,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
ath9k_htc_set_opmode(priv);
if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
- !test_bit(OP_ANI_RUNNING, &priv->op_flags)) {
+ !test_bit(ATH_OP_ANI_RUN, &common->op_flags)) {
ath9k_hw_set_tsfadjust(priv->ah, true);
ath9k_htc_start_ani(priv);
}
@@ -1245,13 +1246,14 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
u32 rfilt;
mutex_lock(&priv->mutex);
changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
- if (test_bit(OP_INVALID, &priv->op_flags)) {
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(ath9k_hw_common(priv->ah), ANY,
"Unable to configure filter on invalid state\n");
mutex_unlock(&priv->mutex);
@@ -1474,7 +1476,9 @@ static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) {
common->curaid = bss_conf->aid;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
}
}
@@ -1496,6 +1500,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
+ int slottime;
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
@@ -1507,6 +1512,9 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->assoc ?
priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
+ if (!bss_conf->assoc)
+ clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
+
if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_htc_choose_set_bssid(priv);
if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
@@ -1528,7 +1536,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
bss_conf->bssid);
ath9k_htc_set_tsfadjust(priv, vif);
- set_bit(OP_ENABLE_BEACON, &priv->op_flags);
+ priv->cur_beacon_conf.enable_beacon = 1;
ath9k_htc_beacon_config(priv, vif);
}
@@ -1542,7 +1550,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG,
"Beacon disabled for BSS: %pM\n",
bss_conf->bssid);
- clear_bit(OP_ENABLE_BEACON, &priv->op_flags);
+ priv->cur_beacon_conf.enable_beacon = 0;
ath9k_htc_beacon_config(priv, vif);
}
}
@@ -1568,11 +1576,21 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
- ah->slottime = 9;
+ slottime = 9;
else
- ah->slottime = 20;
-
- ath9k_hw_init_global_settings(ah);
+ slottime = 20;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * Defer update, so that connected stations can adjust
+ * their settings at the same time.
+ * See beacon.c for more details
+ */
+ priv->beacon.slottime = slottime;
+ priv->beacon.updateslot = UPDATE;
+ } else {
+ ah->slottime = slottime;
+ ath9k_hw_init_global_settings(ah);
+ }
}
if (changed & BSS_CHANGED_HT)
@@ -1669,10 +1687,11 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
mutex_lock(&priv->mutex);
spin_lock_bh(&priv->beacon_lock);
- set_bit(OP_SCANNING, &priv->op_flags);
+ set_bit(ATH_OP_SCANNING, &common->op_flags);
spin_unlock_bh(&priv->beacon_lock);
cancel_work_sync(&priv->ps_work);
ath9k_htc_stop_ani(priv);
@@ -1682,10 +1701,11 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
mutex_lock(&priv->mutex);
spin_lock_bh(&priv->beacon_lock);
- clear_bit(OP_SCANNING, &priv->op_flags);
+ clear_bit(ATH_OP_SCANNING, &common->op_flags);
spin_unlock_bh(&priv->beacon_lock);
ath9k_htc_ps_wakeup(priv);
ath9k_htc_vif_reconfig(priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 12e0f32a4905..e8149e3dbdd5 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -924,46 +924,43 @@ static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
ath9k_hw_rxena(priv->ah);
ath9k_htc_opmode_init(priv);
- ath9k_hw_startpcureceive(priv->ah, test_bit(OP_SCANNING, &priv->op_flags));
- priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
+ ath9k_hw_startpcureceive(priv->ah, test_bit(ATH_OP_SCANNING, &common->op_flags));
}
-static void ath9k_process_rate(struct ieee80211_hw *hw,
- struct ieee80211_rx_status *rxs,
- u8 rx_rate, u8 rs_flags)
+static inline void convert_htc_flag(struct ath_rx_status *rx_stats,
+ struct ath_htc_rx_status *rxstatus)
{
- struct ieee80211_supported_band *sband;
- enum ieee80211_band band;
- unsigned int i = 0;
-
- if (rx_rate & 0x80) {
- /* HT rate */
- rxs->flag |= RX_FLAG_HT;
- if (rs_flags & ATH9K_RX_2040)
- rxs->flag |= RX_FLAG_40MHZ;
- if (rs_flags & ATH9K_RX_GI)
- rxs->flag |= RX_FLAG_SHORT_GI;
- rxs->rate_idx = rx_rate & 0x7f;
- return;
- }
-
- band = hw->conf.chandef.chan->band;
- sband = hw->wiphy->bands[band];
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_rate) {
- rxs->rate_idx = i;
- return;
- }
- if (sband->bitrates[i].hw_value_short == rx_rate) {
- rxs->rate_idx = i;
- rxs->flag |= RX_FLAG_SHORTPRE;
- return;
- }
- }
+ rx_stats->flag = 0;
+ if (rxstatus->rs_flags & ATH9K_RX_2040)
+ rx_stats->flag |= RX_FLAG_40MHZ;
+ if (rxstatus->rs_flags & ATH9K_RX_GI)
+ rx_stats->flag |= RX_FLAG_SHORT_GI;
+}
+static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats,
+ struct ath_htc_rx_status *rxstatus)
+{
+ rx_stats->rs_datalen = rxstatus->rs_datalen;
+ rx_stats->rs_status = rxstatus->rs_status;
+ rx_stats->rs_phyerr = rxstatus->rs_phyerr;
+ rx_stats->rs_rssi = rxstatus->rs_rssi;
+ rx_stats->rs_keyix = rxstatus->rs_keyix;
+ rx_stats->rs_rate = rxstatus->rs_rate;
+ rx_stats->rs_antenna = rxstatus->rs_antenna;
+ rx_stats->rs_more = rxstatus->rs_more;
+
+ memcpy(rx_stats->rs_rssi_ctl, rxstatus->rs_rssi_ctl,
+ sizeof(rx_stats->rs_rssi_ctl));
+ memcpy(rx_stats->rs_rssi_ext, rxstatus->rs_rssi_ext,
+ sizeof(rx_stats->rs_rssi_ext));
+
+ rx_stats->rs_isaggr = rxstatus->rs_isaggr;
+ rx_stats->rs_moreaggr = rxstatus->rs_moreaggr;
+ rx_stats->rs_num_delims = rxstatus->rs_num_delims;
+ convert_htc_flag(rx_stats, rxstatus);
}
static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
@@ -975,10 +972,10 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ieee80211_hw *hw = priv->hw;
struct sk_buff *skb = rxbuf->skb;
struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath_hw *ah = common->ah;
struct ath_htc_rx_status *rxstatus;
- int hdrlen, padsize;
- int last_rssi = ATH_RSSI_DUMMY_MARKER;
- __le16 fc;
+ struct ath_rx_status rx_stats;
+ bool decrypt_error;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
@@ -999,103 +996,39 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
ath9k_htc_err_stat_rx(priv, rxstatus);
/* Get the RX status information */
- memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
- skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
-
- hdr = (struct ieee80211_hdr *)skb->data;
- fc = hdr->frame_control;
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-
- padsize = hdrlen & 3;
- if (padsize && skb->len >= hdrlen+padsize+FCS_LEN) {
- memmove(skb->data + padsize, skb->data, hdrlen);
- skb_pull(skb, padsize);
- }
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
- if (rxbuf->rxstatus.rs_status != 0) {
- if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
- goto rx_next;
-
- if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
- /* FIXME */
- } else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
- if (ieee80211_is_ctl(fc))
- /*
- * Sometimes, we get invalid
- * MIC failures on valid control frames.
- * Remove these mic errors.
- */
- rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
- else
- rx_status->flag |= RX_FLAG_MMIC_ERROR;
- }
-
- /*
- * Reject error frames with the exception of
- * decryption and MIC failures. For monitor mode,
- * we also ignore the CRC error.
- */
- if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
- if (rxbuf->rxstatus.rs_status &
- ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
- ATH9K_RXERR_CRC))
- goto rx_next;
- } else {
- if (rxbuf->rxstatus.rs_status &
- ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
- goto rx_next;
- }
- }
- }
-
- if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
- u8 keyix;
- keyix = rxbuf->rxstatus.rs_keyix;
- if (keyix != ATH9K_RXKEYIX_INVALID) {
- rx_status->flag |= RX_FLAG_DECRYPTED;
- } else if (ieee80211_has_protected(fc) &&
- skb->len >= hdrlen + 4) {
- keyix = skb->data[hdrlen + 3] >> 6;
- if (test_bit(keyix, common->keymap))
- rx_status->flag |= RX_FLAG_DECRYPTED;
- }
- }
-
- ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
- rxbuf->rxstatus.rs_flags);
-
- if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
- !rxbuf->rxstatus.rs_moreaggr)
- ATH_RSSI_LPF(priv->rx.last_rssi,
- rxbuf->rxstatus.rs_rssi);
-
- last_rssi = priv->rx.last_rssi;
+ /* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
+ * After this, we can drop this part of skb. */
+ rx_status_htc_to_ath(&rx_stats, rxstatus);
+ rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
+ skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
- if (ath_is_mybeacon(common, hdr)) {
- s8 rssi = rxbuf->rxstatus.rs_rssi;
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (!ath9k_cmn_rx_accept(common, hdr, rx_status, &rx_stats,
+ &decrypt_error, priv->rxfilter))
+ goto rx_next;
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ ath9k_cmn_rx_skb_postprocess(common, skb, &rx_stats,
+ rx_status, decrypt_error);
- if (rssi < 0)
- rssi = 0;
+ if (ath9k_cmn_process_rate(common, hw, &rx_stats, rx_status))
+ goto rx_next;
- priv->ah->stats.avgbrssi = rssi;
- }
+ rx_stats.is_mybeacon = ath_is_mybeacon(common, hdr);
+ ath9k_cmn_process_rssi(common, hw, &rx_stats, rx_status);
- rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
- rx_status->band = hw->conf.chandef.chan->band;
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
- rx_status->antenna = rxbuf->rxstatus.rs_antenna;
+ rx_status->band = ah->curchan->chan->band;
+ rx_status->freq = ah->curchan->chan->center_freq;
+ rx_status->antenna = rx_stats.rs_antenna;
rx_status->flag |= RX_FLAG_MACTIME_END;
return true;
-
rx_next:
return false;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index aac4a406a513..a0ff5b637054 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -358,6 +358,36 @@ ret:
kfree_skb(skb);
}
+static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
+ struct sk_buff *skb)
+{
+ uint32_t *pattern = (uint32_t *)skb->data;
+
+ switch (*pattern) {
+ case 0x33221199:
+ {
+ struct htc_panic_bad_vaddr *htc_panic;
+ htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
+ dev_err(htc_handle->dev, "ath: firmware panic! "
+ "exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
+ htc_panic->exccause, htc_panic->pc,
+ htc_panic->badvaddr);
+ break;
+ }
+ case 0x33221299:
+ {
+ struct htc_panic_bad_epid *htc_panic;
+ htc_panic = (struct htc_panic_bad_epid *) skb->data;
+ dev_err(htc_handle->dev, "ath: firmware panic! "
+ "bad epid: 0x%08x\n", htc_panic->epid);
+ break;
+ }
+ default:
+ dev_err(htc_handle->dev, "ath: uknown panic pattern!\n");
+ break;
+ }
+}
+
/*
* HTC Messages are handled directly here and the obtained SKB
* is freed.
@@ -379,6 +409,12 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
htc_hdr = (struct htc_frame_hdr *) skb->data;
epid = htc_hdr->endpoint_id;
+ if (epid == 0x99) {
+ ath9k_htc_fw_panic_report(htc_handle, skb);
+ kfree_skb(skb);
+ return;
+ }
+
if (epid >= ENDPOINT_MAX) {
if (pipe_id != USB_REG_IN_PIPE)
dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index e1ffbb6bd636..06474ccc7696 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -77,6 +77,18 @@ struct htc_config_pipe_msg {
u8 credits;
} __packed;
+struct htc_panic_bad_vaddr {
+ __be32 pattern;
+ __be32 exccause;
+ __be32 pc;
+ __be32 badvaddr;
+} __packed;
+
+struct htc_panic_bad_epid {
+ __be32 pattern;
+ __be32 epid;
+} __packed;
+
struct htc_ep_callbacks {
void *priv;
void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 9078a6c5a74e..c8a9dfab1fee 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -23,7 +23,6 @@
#include "hw.h"
#include "hw-ops.h"
-#include "rc.h"
#include "ar9003_mac.h"
#include "ar9003_mci.h"
#include "ar9003_phy.h"
@@ -883,7 +882,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
if (AR_SREV_9300_20_OR_LATER(ah)) {
@@ -3048,6 +3047,7 @@ static struct {
{ AR_SREV_VERSION_9462, "9462" },
{ AR_SREV_VERSION_9550, "9550" },
{ AR_SREV_VERSION_9565, "9565" },
+ { AR_SREV_VERSION_9531, "9531" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 1fc2e5a26b52..c0a4e866edca 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -62,111 +62,6 @@ module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
bool is_ath9k_unloaded;
-/* We use the hw_value as an index into our private channel structure */
-
-#define CHAN2G(_freq, _idx) { \
- .band = IEEE80211_BAND_2GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-/* Some 2 GHz radios are actually tunable on 2312-2732
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
- CHAN2G(2412, 0), /* Channel 1 */
- CHAN2G(2417, 1), /* Channel 2 */
- CHAN2G(2422, 2), /* Channel 3 */
- CHAN2G(2427, 3), /* Channel 4 */
- CHAN2G(2432, 4), /* Channel 5 */
- CHAN2G(2437, 5), /* Channel 6 */
- CHAN2G(2442, 6), /* Channel 7 */
- CHAN2G(2447, 7), /* Channel 8 */
- CHAN2G(2452, 8), /* Channel 9 */
- CHAN2G(2457, 9), /* Channel 10 */
- CHAN2G(2462, 10), /* Channel 11 */
- CHAN2G(2467, 11), /* Channel 12 */
- CHAN2G(2472, 12), /* Channel 13 */
- CHAN2G(2484, 13), /* Channel 14 */
-};
-
-/* Some 5 GHz radios are actually tunable on XXXX-YYYY
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
- /* _We_ call this UNII 1 */
- CHAN5G(5180, 14), /* Channel 36 */
- CHAN5G(5200, 15), /* Channel 40 */
- CHAN5G(5220, 16), /* Channel 44 */
- CHAN5G(5240, 17), /* Channel 48 */
- /* _We_ call this UNII 2 */
- CHAN5G(5260, 18), /* Channel 52 */
- CHAN5G(5280, 19), /* Channel 56 */
- CHAN5G(5300, 20), /* Channel 60 */
- CHAN5G(5320, 21), /* Channel 64 */
- /* _We_ call this "Middle band" */
- CHAN5G(5500, 22), /* Channel 100 */
- CHAN5G(5520, 23), /* Channel 104 */
- CHAN5G(5540, 24), /* Channel 108 */
- CHAN5G(5560, 25), /* Channel 112 */
- CHAN5G(5580, 26), /* Channel 116 */
- CHAN5G(5600, 27), /* Channel 120 */
- CHAN5G(5620, 28), /* Channel 124 */
- CHAN5G(5640, 29), /* Channel 128 */
- CHAN5G(5660, 30), /* Channel 132 */
- CHAN5G(5680, 31), /* Channel 136 */
- CHAN5G(5700, 32), /* Channel 140 */
- /* _We_ call this UNII 3 */
- CHAN5G(5745, 33), /* Channel 149 */
- CHAN5G(5765, 34), /* Channel 153 */
- CHAN5G(5785, 35), /* Channel 157 */
- CHAN5G(5805, 36), /* Channel 161 */
- CHAN5G(5825, 37), /* Channel 165 */
-};
-
-/* Atheros hardware rate code addition for short premble */
-#define SHPCHECK(__hw_rate, __flags) \
- ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
-
-#define RATE(_bitrate, _hw_rate, _flags) { \
- .bitrate = (_bitrate), \
- .flags = (_flags), \
- .hw_value = (_hw_rate), \
- .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
-}
-
-static struct ieee80211_rate ath9k_legacy_rates[] = {
- RATE(10, 0x1b, 0),
- RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
-};
#ifdef CONFIG_MAC80211_LEDS
static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
@@ -258,64 +153,6 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl
/* Initialization */
/**************************/
-static void setup_ht_cap(struct ath_softc *sc,
- struct ieee80211_sta_ht_cap *ht_info)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- u8 tx_streams, rx_streams;
- int i, max_streams;
-
- ht_info->ht_supported = true;
- ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SM_PS |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_DSSSCCK40;
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
- ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
-
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
-
- if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
- max_streams = 1;
- else if (AR_SREV_9462(ah))
- max_streams = 2;
- else if (AR_SREV_9300_20_OR_LATER(ah))
- max_streams = 3;
- else
- max_streams = 2;
-
- if (AR_SREV_9280_20_OR_LATER(ah)) {
- if (max_streams >= 2)
- ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
- ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
- }
-
- /* set up supported mcs set */
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
- rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
-
- ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
-
- if (tx_streams != rx_streams) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_streams - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-
- for (i = 0; i < rx_streams; i++)
- ht_info->mcs.rx_mask[i] = 0xff;
-
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
-}
-
static void ath9k_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
@@ -486,51 +323,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
return 0;
}
-static int ath9k_init_channels_rates(struct ath_softc *sc)
-{
- void *channels;
-
- BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
- ARRAY_SIZE(ath9k_5ghz_chantable) !=
- ATH9K_NUM_CHANNELS);
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
- channels = devm_kzalloc(sc->dev,
- sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
-
- memcpy(channels, ath9k_2ghz_chantable,
- sizeof(ath9k_2ghz_chantable));
- sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
- sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
- sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
- ARRAY_SIZE(ath9k_2ghz_chantable);
- sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
- sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates);
- }
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
- channels = devm_kzalloc(sc->dev,
- sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
-
- memcpy(channels, ath9k_5ghz_chantable,
- sizeof(ath9k_5ghz_chantable));
- sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
- sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
- sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
- ARRAY_SIZE(ath9k_5ghz_chantable);
- sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
- ath9k_legacy_rates + 4;
- sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates) - 4;
- }
- return 0;
-}
-
static void ath9k_init_misc(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -538,7 +330,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->config.txpowlimit = ATH_TXPOWER_MAX;
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
sc->beacon.slottime = ATH9K_SLOT_TIME_9;
@@ -793,7 +585,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (ret)
goto err_btcoex;
- ret = ath9k_init_channels_rates(sc);
+ ret = ath9k_cmn_init_channels_rates(common);
if (ret)
goto err_btcoex;
@@ -823,10 +615,11 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct cfg80211_chan_def chandef;
int i;
- sband = &sc->sbands[band];
+ sband = &common->sbands[band];
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
ah->curchan = &ah->channels[chan->hw_value];
@@ -849,17 +642,6 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
ah->curchan = curchan;
}
-void ath9k_reload_chainmask_settings(struct ath_softc *sc)
-{
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
- return;
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
- setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
- setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
-}
-
static const struct ieee80211_iface_limit if_limits[] = {
{ .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -949,6 +731,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->queues = 4;
hw->max_rates = 4;
@@ -969,13 +752,13 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &sc->sbands[IEEE80211_BAND_2GHZ];
+ &common->sbands[IEEE80211_BAND_2GHZ];
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &sc->sbands[IEEE80211_BAND_5GHZ];
+ &common->sbands[IEEE80211_BAND_5GHZ];
ath9k_init_wow(hw);
- ath9k_reload_chainmask_settings(sc);
+ ath9k_cmn_reload_chainmask(ah);
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
}
@@ -1106,19 +889,11 @@ static int __init ath9k_init(void)
{
int error;
- /* Register rate control algorithm */
- error = ath_rate_control_register();
- if (error != 0) {
- pr_err("Unable to register rate control algorithm: %d\n",
- error);
- goto err_out;
- }
-
error = ath_pci_init();
if (error < 0) {
pr_err("No PCI devices found, driver not installed\n");
error = -ENODEV;
- goto err_rate_unregister;
+ goto err_out;
}
error = ath_ahb_init();
@@ -1131,9 +906,6 @@ static int __init ath9k_init(void)
err_pci_exit:
ath_pci_exit();
-
- err_rate_unregister:
- ath_rate_control_unregister();
err_out:
return error;
}
@@ -1144,7 +916,6 @@ static void __exit ath9k_exit(void)
is_ath9k_unloaded = true;
ath_ahb_exit();
ath_pci_exit();
- ath_rate_control_unregister();
pr_info("%s: Driver unloaded\n", dev_info);
}
module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 30dcef5aba10..72a715fe8f24 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -115,13 +115,14 @@ void ath_hw_pll_work(struct work_struct *work)
u32 pll_sqsum;
struct ath_softc *sc = container_of(work, struct ath_softc,
hw_pll_work.work);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
/*
* ensure that the PLL WAR is executed only
* after the STA is associated (or) if the
* beaconing had started in interfaces that
* uses beacons.
*/
- if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
+ if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
return;
if (sc->tx99_state)
@@ -414,7 +415,7 @@ void ath_start_ani(struct ath_softc *sc)
unsigned long timestamp = jiffies_to_msecs(jiffies);
if (common->disable_ani ||
- !test_bit(SC_OP_ANI_RUN, &sc->sc_flags) ||
+ !test_bit(ATH_OP_ANI_RUN, &common->op_flags) ||
(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
return;
@@ -438,6 +439,7 @@ void ath_stop_ani(struct ath_softc *sc)
void ath_check_ani(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
/*
@@ -453,23 +455,23 @@ void ath_check_ani(struct ath_softc *sc)
* Disable ANI only when there are no
* associated stations.
*/
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
goto stop_ani;
}
} else if (ah->opmode == NL80211_IFTYPE_STATION) {
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
goto stop_ani;
}
- if (!test_bit(SC_OP_ANI_RUN, &sc->sc_flags)) {
- set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+ if (!test_bit(ATH_OP_ANI_RUN, &common->op_flags)) {
+ set_bit(ATH_OP_ANI_RUN, &common->op_flags);
ath_start_ani(sc);
}
return;
stop_ani:
- clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+ clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
ath_stop_ani(sc);
}
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 5f727588ca27..51ce36f108f9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -827,7 +827,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
return;
}
- if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
async_mask = AR_INTR_MAC_IRQ;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 10271373a0cd..89df634e81f9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -155,12 +155,8 @@ struct ath_htc_rx_status {
u8 rs_status;
u8 rs_phyerr;
int8_t rs_rssi;
- int8_t rs_rssi_ctl0;
- int8_t rs_rssi_ctl1;
- int8_t rs_rssi_ctl2;
- int8_t rs_rssi_ext0;
- int8_t rs_rssi_ext1;
- int8_t rs_rssi_ext2;
+ int8_t rs_rssi_ctl[3];
+ int8_t rs_rssi_ext[3];
u8 rs_keyix;
u8 rs_rate;
u8 rs_antenna;
@@ -170,6 +166,7 @@ struct ath_htc_rx_status {
u8 rs_num_delims;
u8 rs_flags;
u8 rs_dummy;
+ /* FIXME: evm* never used? */
__be32 evm0;
__be32 evm1;
__be32 evm2;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 5924f72dd493..d69853b848ce 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -229,16 +229,16 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath9k_cmn_update_txpow(ah, sc->curtxpow,
sc->config.txpowlimit, &sc->curtxpow);
- clear_bit(SC_OP_HW_RESET, &sc->sc_flags);
+ clear_bit(ATH_OP_HW_RESET, &common->op_flags);
ath9k_hw_set_interrupts(ah);
ath9k_hw_enable_interrupts(ah);
if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) && start) {
- if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
+ if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
goto work;
if (ah->opmode == NL80211_IFTYPE_STATION &&
- test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
spin_lock_irqsave(&sc->sc_pm_lock, flags);
sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
@@ -336,7 +336,7 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
int old_pos = -1;
int r;
- if (test_bit(SC_OP_INVALID, &sc->sc_flags))
+ if (test_bit(ATH_OP_INVALID, &common->op_flags))
return -EIO;
offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@@ -402,7 +402,7 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
chan->center_freq);
} else {
/* perform spectral scan if requested. */
- if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
sc->spectral_mode == SPECTRAL_CHANSCAN)
ath9k_spectral_scan_trigger(hw);
}
@@ -451,7 +451,7 @@ void ath9k_tasklet(unsigned long data)
* interrupts are enabled in the reset routine.
*/
atomic_inc(&ah->intr_ref_cnt);
- ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
+ ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
goto out;
}
@@ -471,7 +471,7 @@ void ath9k_tasklet(unsigned long data)
* interrupts are enabled in the reset routine.
*/
atomic_inc(&ah->intr_ref_cnt);
- ath_dbg(common, ANY,
+ ath_dbg(common, RESET,
"BB_WATCHDOG: Skipping interrupts\n");
goto out;
}
@@ -484,7 +484,7 @@ void ath9k_tasklet(unsigned long data)
type = RESET_TYPE_TX_GTT;
ath9k_queue_reset(sc, type);
atomic_inc(&ah->intr_ref_cnt);
- ath_dbg(common, ANY,
+ ath_dbg(common, RESET,
"GTT: Skipping interrupts\n");
goto out;
}
@@ -566,6 +566,7 @@ irqreturn_t ath_isr(int irq, void *dev)
struct ath_softc *sc = dev;
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
enum ath9k_int status;
u32 sync_cause = 0;
bool sched = false;
@@ -575,7 +576,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* touch anything. Note this can happen early
* on if the IRQ is shared.
*/
- if (test_bit(SC_OP_INVALID, &sc->sc_flags))
+ if (test_bit(ATH_OP_INVALID, &common->op_flags))
return IRQ_NONE;
/* shared irq, not for us */
@@ -583,7 +584,7 @@ irqreturn_t ath_isr(int irq, void *dev)
if (!ath9k_hw_intrpend(ah))
return IRQ_NONE;
- if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
ath9k_hw_kill_interrupts(ah);
return IRQ_HANDLED;
}
@@ -684,10 +685,11 @@ int ath_reset(struct ath_softc *sc)
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
#ifdef CONFIG_ATH9K_DEBUGFS
RESET_STAT_INC(sc, type);
#endif
- set_bit(SC_OP_HW_RESET, &sc->sc_flags);
+ set_bit(ATH_OP_HW_RESET, &common->op_flags);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
}
@@ -768,7 +770,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath_mci_enable(sc);
- clear_bit(SC_OP_INVALID, &sc->sc_flags);
+ clear_bit(ATH_OP_INVALID, &common->op_flags);
sc->sc_ah->is_monitoring = false;
if (!ath_complete_reset(sc, false))
@@ -885,7 +887,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath_cancel_work(sc);
- if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
@@ -940,7 +942,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_ps_restore(sc);
- set_bit(SC_OP_INVALID, &sc->sc_flags);
+ set_bit(ATH_OP_INVALID, &common->op_flags);
sc->ps_idle = prev_idle;
mutex_unlock(&sc->mutex);
@@ -1081,7 +1083,7 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
*/
if (ah->opmode == NL80211_IFTYPE_STATION &&
old_opmode == NL80211_IFTYPE_AP &&
- test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
ieee80211_iterate_active_interfaces_atomic(
sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
ath9k_sta_vif_iter, sc);
@@ -1178,9 +1180,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
- if (sc->csa_vif == vif)
- sc->csa_vif = NULL;
-
ath9k_ps_wakeup(sc);
ath9k_calculate_summary_state(hw, NULL);
ath9k_ps_restore(sc);
@@ -1593,7 +1592,7 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
unsigned long flags;
- set_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
+ set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
avp->primary_sta_vif = true;
/*
@@ -1609,7 +1608,7 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
common->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc->sc_ah);
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
@@ -1628,8 +1627,9 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct ath_softc *sc = data;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- if (test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+ if (test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
return;
if (bss_conf->assoc)
@@ -1660,18 +1660,18 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid, bss_conf->assoc);
if (avp->primary_sta_vif && !bss_conf->assoc) {
- clear_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
+ clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
avp->primary_sta_vif = false;
if (ah->opmode == NL80211_IFTYPE_STATION)
- clear_bit(SC_OP_BEACONS, &sc->sc_flags);
+ clear_bit(ATH_OP_BEACONS, &common->op_flags);
}
ieee80211_iterate_active_interfaces_atomic(
sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
ath9k_bss_assoc_iter, sc);
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags) &&
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags) &&
ah->opmode == NL80211_IFTYPE_STATION) {
memset(common->curbssid, 0, ETH_ALEN);
common->curaid = 0;
@@ -1866,7 +1866,7 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
static bool ath9k_has_tx_pending(struct ath_softc *sc)
{
- int i, npend;
+ int i, npend = 0;
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (!ATH_TXQ_SETUP(sc, i))
@@ -1900,7 +1900,7 @@ static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
return;
}
- if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
@@ -2056,7 +2056,7 @@ static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant);
ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant);
- ath9k_reload_chainmask_settings(sc);
+ ath9k_cmn_reload_chainmask(ah);
return 0;
}
@@ -2073,26 +2073,23 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
- set_bit(SC_OP_SCANNING, &sc->sc_flags);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ set_bit(ATH_OP_SCANNING, &common->op_flags);
}
static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
- clear_bit(SC_OP_SCANNING, &sc->sc_flags);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ clear_bit(ATH_OP_SCANNING, &common->op_flags);
}
static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
- struct ath_softc *sc = hw->priv;
-
- /* mac80211 does not support CSA in multi-if cases (yet) */
- if (WARN_ON(sc->csa_vif))
- return;
-
- sc->csa_vif = vif;
+ /* depend on vif->csa_active only */
+ return;
}
struct ieee80211_ops ath9k_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 71799fcade54..a0dbcc412384 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -555,7 +555,7 @@ void ath_mci_intr(struct ath_softc *sc)
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
while (more_data == MCI_GPM_MORE) {
- if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
return;
pgpm = mci->gpm_buf.bf_addr;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 55724b02316b..25304adece57 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -784,6 +784,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ath_softc *sc;
struct ieee80211_hw *hw;
+ struct ath_common *common;
u8 csz;
u32 val;
int ret = 0;
@@ -858,9 +859,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->mem = pcim_iomap_table(pdev)[0];
sc->driver_data = id->driver_data;
- /* Will be cleared in ath9k_start() */
- set_bit(SC_OP_INVALID, &sc->sc_flags);
-
ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
@@ -879,6 +877,10 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
hw_name, (unsigned long)sc->mem, pdev->irq);
+ /* Will be cleared in ath9k_start() */
+ common = ath9k_hw_common(sc->sc_ah);
+ set_bit(ATH_OP_INVALID, &common->op_flags);
+
return 0;
err_init:
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
deleted file mode 100644
index d829bb62a3fc..000000000000
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ /dev/null
@@ -1,1495 +0,0 @@
-/*
- * Copyright (c) 2004 Video54 Technologies, Inc.
- * Copyright (c) 2004-2011 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include "ath9k.h"
-
-static const struct ath_rate_table ar5416_11na_ratetable = {
- 68,
- 8, /* MCS start */
- {
- [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
- 5400, 0, 12 }, /* 6 Mb */
- [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
- 7800, 1, 18 }, /* 9 Mb */
- [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
- 10000, 2, 24 }, /* 12 Mb */
- [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
- 13900, 3, 36 }, /* 18 Mb */
- [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
- 17300, 4, 48 }, /* 24 Mb */
- [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
- 23000, 5, 72 }, /* 36 Mb */
- [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
- 27400, 6, 96 }, /* 48 Mb */
- [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
- 29300, 7, 108 }, /* 54 Mb */
- [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
- 6400, 0, 0 }, /* 6.5 Mb */
- [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
- 12700, 1, 1 }, /* 13 Mb */
- [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
- 18800, 2, 2 }, /* 19.5 Mb */
- [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
- 25000, 3, 3 }, /* 26 Mb */
- [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
- 36700, 4, 4 }, /* 39 Mb */
- [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
- 48100, 5, 5 }, /* 52 Mb */
- [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
- 53500, 6, 6 }, /* 58.5 Mb */
- [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
- 59000, 7, 7 }, /* 65 Mb */
- [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
- 65400, 7, 7 }, /* 75 Mb */
- [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
- 12700, 8, 8 }, /* 13 Mb */
- [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
- 24800, 9, 9 }, /* 26 Mb */
- [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
- 36600, 10, 10 }, /* 39 Mb */
- [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
- 48100, 11, 11 }, /* 52 Mb */
- [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
- 69500, 12, 12 }, /* 78 Mb */
- [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
- 89500, 13, 13 }, /* 104 Mb */
- [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
- 98900, 14, 14 }, /* 117 Mb */
- [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
- 108300, 15, 15 }, /* 130 Mb */
- [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
- 120000, 15, 15 }, /* 144.4 Mb */
- [26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
- 17400, 16, 16 }, /* 19.5 Mb */
- [27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
- 35100, 17, 17 }, /* 39 Mb */
- [28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
- 52600, 18, 18 }, /* 58.5 Mb */
- [29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
- 70400, 19, 19 }, /* 78 Mb */
- [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
- 104900, 20, 20 }, /* 117 Mb */
- [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
- 115800, 20, 20 }, /* 130 Mb*/
- [32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
- 137200, 21, 21 }, /* 156 Mb */
- [33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
- 151100, 21, 21 }, /* 173.3 Mb */
- [34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
- 152800, 22, 22 }, /* 175.5 Mb */
- [35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
- 168400, 22, 22 }, /* 195 Mb*/
- [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
- 168400, 23, 23 }, /* 195 Mb */
- [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
- 185000, 23, 23 }, /* 216.7 Mb */
- [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
- 13200, 0, 0 }, /* 13.5 Mb*/
- [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
- 25900, 1, 1 }, /* 27.0 Mb*/
- [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
- 38600, 2, 2 }, /* 40.5 Mb*/
- [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
- 49800, 3, 3 }, /* 54 Mb */
- [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
- 72200, 4, 4 }, /* 81 Mb */
- [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
- 92900, 5, 5 }, /* 108 Mb */
- [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
- 102700, 6, 6 }, /* 121.5 Mb*/
- [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
- 112000, 7, 7 }, /* 135 Mb */
- [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
- 122000, 7, 7 }, /* 150 Mb */
- [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
- 25800, 8, 8 }, /* 27 Mb */
- [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
- 49800, 9, 9 }, /* 54 Mb */
- [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
- 71900, 10, 10 }, /* 81 Mb */
- [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
- 92500, 11, 11 }, /* 108 Mb */
- [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
- 130300, 12, 12 }, /* 162 Mb */
- [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
- 162800, 13, 13 }, /* 216 Mb */
- [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
- 178200, 14, 14 }, /* 243 Mb */
- [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
- 192100, 15, 15 }, /* 270 Mb */
- [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
- 207000, 15, 15 }, /* 300 Mb */
- [56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
- 36100, 16, 16 }, /* 40.5 Mb */
- [57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
- 72900, 17, 17 }, /* 81 Mb */
- [58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
- 108300, 18, 18 }, /* 121.5 Mb */
- [59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
- 142000, 19, 19 }, /* 162 Mb */
- [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
- 205100, 20, 20 }, /* 243 Mb */
- [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
- 224700, 20, 20 }, /* 270 Mb */
- [62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
- 263100, 21, 21 }, /* 324 Mb */
- [63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
- 288000, 21, 21 }, /* 360 Mb */
- [64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
- 290700, 22, 22 }, /* 364.5 Mb */
- [65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
- 317200, 22, 22 }, /* 405 Mb */
- [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
- 317200, 23, 23 }, /* 405 Mb */
- [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
- 346400, 23, 23 }, /* 450 Mb */
- },
- 50, /* probe interval */
- WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
-};
-
-/* 4ms frame limit not used for NG mode. The values filled
- * for HT are the 64K max aggregate limit */
-
-static const struct ath_rate_table ar5416_11ng_ratetable = {
- 72,
- 12, /* MCS start */
- {
- [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
- 900, 0, 2 }, /* 1 Mb */
- [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
- 1900, 1, 4 }, /* 2 Mb */
- [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
- 4900, 2, 11 }, /* 5.5 Mb */
- [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
- 8100, 3, 22 }, /* 11 Mb */
- [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
- 5400, 4, 12 }, /* 6 Mb */
- [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
- 7800, 5, 18 }, /* 9 Mb */
- [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
- 10100, 6, 24 }, /* 12 Mb */
- [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
- 14100, 7, 36 }, /* 18 Mb */
- [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
- 17700, 8, 48 }, /* 24 Mb */
- [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
- 23700, 9, 72 }, /* 36 Mb */
- [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
- 27400, 10, 96 }, /* 48 Mb */
- [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
- 30900, 11, 108 }, /* 54 Mb */
- [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
- 6400, 0, 0 }, /* 6.5 Mb */
- [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
- 12700, 1, 1 }, /* 13 Mb */
- [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
- 18800, 2, 2 }, /* 19.5 Mb*/
- [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
- 25000, 3, 3 }, /* 26 Mb */
- [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
- 36700, 4, 4 }, /* 39 Mb */
- [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
- 48100, 5, 5 }, /* 52 Mb */
- [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
- 53500, 6, 6 }, /* 58.5 Mb */
- [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
- 59000, 7, 7 }, /* 65 Mb */
- [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
- 65400, 7, 7 }, /* 65 Mb*/
- [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
- 12700, 8, 8 }, /* 13 Mb */
- [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
- 24800, 9, 9 }, /* 26 Mb */
- [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
- 36600, 10, 10 }, /* 39 Mb */
- [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
- 48100, 11, 11 }, /* 52 Mb */
- [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
- 69500, 12, 12 }, /* 78 Mb */
- [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
- 89500, 13, 13 }, /* 104 Mb */
- [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
- 98900, 14, 14 }, /* 117 Mb */
- [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
- 108300, 15, 15 }, /* 130 Mb */
- [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
- 120000, 15, 15 }, /* 144.4 Mb */
- [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
- 17400, 16, 16 }, /* 19.5 Mb */
- [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
- 35100, 17, 17 }, /* 39 Mb */
- [32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
- 52600, 18, 18 }, /* 58.5 Mb */
- [33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
- 70400, 19, 19 }, /* 78 Mb */
- [34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
- 104900, 20, 20 }, /* 117 Mb */
- [35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
- 115800, 20, 20 }, /* 130 Mb */
- [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
- 137200, 21, 21 }, /* 156 Mb */
- [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
- 151100, 21, 21 }, /* 173.3 Mb */
- [38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
- 152800, 22, 22 }, /* 175.5 Mb */
- [39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
- 168400, 22, 22 }, /* 195 Mb */
- [40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
- 168400, 23, 23 }, /* 195 Mb */
- [41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
- 185000, 23, 23 }, /* 216.7 Mb */
- [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
- 13200, 0, 0 }, /* 13.5 Mb */
- [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
- 25900, 1, 1 }, /* 27.0 Mb */
- [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
- 38600, 2, 2 }, /* 40.5 Mb */
- [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
- 49800, 3, 3 }, /* 54 Mb */
- [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
- 72200, 4, 4 }, /* 81 Mb */
- [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
- 92900, 5, 5 }, /* 108 Mb */
- [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
- 102700, 6, 6 }, /* 121.5 Mb */
- [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
- 112000, 7, 7 }, /* 135 Mb */
- [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
- 122000, 7, 7 }, /* 150 Mb */
- [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
- 25800, 8, 8 }, /* 27 Mb */
- [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
- 49800, 9, 9 }, /* 54 Mb */
- [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
- 71900, 10, 10 }, /* 81 Mb */
- [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
- 92500, 11, 11 }, /* 108 Mb */
- [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
- 130300, 12, 12 }, /* 162 Mb */
- [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
- 162800, 13, 13 }, /* 216 Mb */
- [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
- 178200, 14, 14 }, /* 243 Mb */
- [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
- 192100, 15, 15 }, /* 270 Mb */
- [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
- 207000, 15, 15 }, /* 300 Mb */
- [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
- 36100, 16, 16 }, /* 40.5 Mb */
- [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
- 72900, 17, 17 }, /* 81 Mb */
- [62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
- 108300, 18, 18 }, /* 121.5 Mb */
- [63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
- 142000, 19, 19 }, /* 162 Mb */
- [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
- 205100, 20, 20 }, /* 243 Mb */
- [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
- 224700, 20, 20 }, /* 270 Mb */
- [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
- 263100, 21, 21 }, /* 324 Mb */
- [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
- 288000, 21, 21 }, /* 360 Mb */
- [68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
- 290700, 22, 22 }, /* 364.5 Mb */
- [69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
- 317200, 22, 22 }, /* 405 Mb */
- [70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
- 317200, 23, 23 }, /* 405 Mb */
- [71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
- 346400, 23, 23 }, /* 450 Mb */
- },
- 50, /* probe interval */
- WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
-};
-
-static const struct ath_rate_table ar5416_11a_ratetable = {
- 8,
- 0,
- {
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
- 5400, 0, 12},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
- 7800, 1, 18},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
- 10000, 2, 24},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
- 13900, 3, 36},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
- 17300, 4, 48},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
- 23000, 5, 72},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
- 27400, 6, 96},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
- 29300, 7, 108},
- },
- 50, /* probe interval */
- 0, /* Phy rates allowed initially */
-};
-
-static const struct ath_rate_table ar5416_11g_ratetable = {
- 12,
- 0,
- {
- { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
- 900, 0, 2},
- { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
- 1900, 1, 4},
- { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
- 4900, 2, 11},
- { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
- 8100, 3, 22},
- { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
- 5400, 4, 12},
- { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
- 7800, 5, 18},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
- 10000, 6, 24},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
- 13900, 7, 36},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
- 17300, 8, 48},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
- 23000, 9, 72},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
- 27400, 10, 96},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
- 29300, 11, 108},
- },
- 50, /* probe interval */
- 0, /* Phy rates allowed initially */
-};
-
-static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_rate *rate)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- int rix, i, idx = 0;
-
- if (!(rate->flags & IEEE80211_TX_RC_MCS))
- return rate->idx;
-
- for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
- idx = ath_rc_priv->valid_rate_index[i];
-
- if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
- rate_table->info[idx].ratecode == rate->idx)
- break;
- }
-
- rix = idx;
-
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- rix++;
-
- return rix;
-}
-
-static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- u8 i, j, idx, idx_next;
-
- for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
- for (j = 0; j <= i-1; j++) {
- idx = ath_rc_priv->valid_rate_index[j];
- idx_next = ath_rc_priv->valid_rate_index[j+1];
-
- if (rate_table->info[idx].ratekbps >
- rate_table->info[idx_next].ratekbps) {
- ath_rc_priv->valid_rate_index[j] = idx_next;
- ath_rc_priv->valid_rate_index[j+1] = idx;
- }
- }
- }
-}
-
-static inline
-int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
- struct ath_rate_priv *ath_rc_priv,
- u8 cur_valid_txrate,
- u8 *next_idx)
-{
- u8 i;
-
- for (i = 0; i < ath_rc_priv->max_valid_rate - 1; i++) {
- if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
- *next_idx = ath_rc_priv->valid_rate_index[i+1];
- return 1;
- }
- }
-
- /* No more valid rates */
- *next_idx = 0;
-
- return 0;
-}
-
-/* Return true only for single stream */
-
-static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
-{
- if (WLAN_RC_PHY_HT(phy) && !(capflag & WLAN_RC_HT_FLAG))
- return 0;
- if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
- return 0;
- if (WLAN_RC_PHY_TS(phy) && !(capflag & WLAN_RC_TS_FLAG))
- return 0;
- if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG))
- return 0;
- if (!ignore_cw && WLAN_RC_PHY_HT(phy))
- if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG))
- return 0;
- return 1;
-}
-
-static inline int
-ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
- u8 cur_valid_txrate, u8 *next_idx)
-{
- int8_t i;
-
- for (i = 1; i < ath_rc_priv->max_valid_rate ; i++) {
- if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
- *next_idx = ath_rc_priv->valid_rate_index[i-1];
- return 1;
- }
- }
-
- return 0;
-}
-
-static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- u8 i, hi = 0;
-
- for (i = 0; i < rate_table->rate_cnt; i++) {
- if (rate_table->info[i].rate_flags & RC_LEGACY) {
- u32 phy = rate_table->info[i].phy;
- u8 valid_rate_count = 0;
-
- if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
- continue;
-
- valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
-
- ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
- ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_priv->valid_rate_index[i] = true;
- hi = i;
- }
- }
-
- return hi;
-}
-
-static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
- u32 phy, u32 capflag)
-{
- if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
- return false;
-
- if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
- return false;
-
- if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
- return false;
-
- return true;
-}
-
-static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
- u32 phy, u32 capflag)
-{
- if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
- return false;
-
- if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
- return false;
-
- if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
- return false;
-
- return true;
-}
-
-static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- struct ath_rateset *rateset;
- u32 phy, capflag = ath_rc_priv->ht_cap;
- u16 rate_flags;
- u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
-
- if (legacy)
- rateset = &ath_rc_priv->neg_rates;
- else
- rateset = &ath_rc_priv->neg_ht_rates;
-
- for (i = 0; i < rateset->rs_nrates; i++) {
- for (j = 0; j < rate_table->rate_cnt; j++) {
- phy = rate_table->info[j].phy;
- rate_flags = rate_table->info[j].rate_flags;
- rate = rateset->rs_rates[i];
- dot11rate = rate_table->info[j].dot11rate;
-
- if (legacy &&
- !ath_rc_check_legacy(rate, dot11rate,
- rate_flags, phy, capflag))
- continue;
-
- if (!legacy &&
- !ath_rc_check_ht(rate, dot11rate,
- rate_flags, phy, capflag))
- continue;
-
- if (!ath_rc_valid_phyrate(phy, capflag, 0))
- continue;
-
- valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
- ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
- ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_priv->valid_rate_index[j] = true;
- hi = max(hi, j);
- }
- }
-
- return hi;
-}
-
-static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
- int *is_probing)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- u32 best_thruput, this_thruput, now_msec;
- u8 rate, next_rate, best_rate, maxindex, minindex;
- int8_t index = 0;
-
- now_msec = jiffies_to_msecs(jiffies);
- *is_probing = 0;
- best_thruput = 0;
- maxindex = ath_rc_priv->max_valid_rate-1;
- minindex = 0;
- best_rate = minindex;
-
- /*
- * Try the higher rate first. It will reduce memory moving time
- * if we have very good channel characteristics.
- */
- for (index = maxindex; index >= minindex ; index--) {
- u8 per_thres;
-
- rate = ath_rc_priv->valid_rate_index[index];
- if (rate > ath_rc_priv->rate_max_phy)
- continue;
-
- /*
- * For TCP the average collision rate is around 11%,
- * so we ignore PERs less than this. This is to
- * prevent the rate we are currently using (whose
- * PER might be in the 10-15 range because of TCP
- * collisions) looking worse than the next lower
- * rate whose PER has decayed close to 0. If we
- * used to next lower rate, its PER would grow to
- * 10-15 and we would be worse off then staying
- * at the current rate.
- */
- per_thres = ath_rc_priv->per[rate];
- if (per_thres < 12)
- per_thres = 12;
-
- this_thruput = rate_table->info[rate].user_ratekbps *
- (100 - per_thres);
-
- if (best_thruput <= this_thruput) {
- best_thruput = this_thruput;
- best_rate = rate;
- }
- }
-
- rate = best_rate;
-
- /*
- * Must check the actual rate (ratekbps) to account for
- * non-monoticity of 11g's rate table
- */
-
- if (rate >= ath_rc_priv->rate_max_phy) {
- rate = ath_rc_priv->rate_max_phy;
-
- /* Probe the next allowed phy state */
- if (ath_rc_get_nextvalid_txrate(rate_table,
- ath_rc_priv, rate, &next_rate) &&
- (now_msec - ath_rc_priv->probe_time >
- rate_table->probe_interval) &&
- (ath_rc_priv->hw_maxretry_pktcnt >= 1)) {
- rate = next_rate;
- ath_rc_priv->probe_rate = rate;
- ath_rc_priv->probe_time = now_msec;
- ath_rc_priv->hw_maxretry_pktcnt = 0;
- *is_probing = 1;
- }
- }
-
- if (rate > (ath_rc_priv->rate_table_size - 1))
- rate = ath_rc_priv->rate_table_size - 1;
-
- if (RC_TS_ONLY(rate_table->info[rate].rate_flags) &&
- (ath_rc_priv->ht_cap & WLAN_RC_TS_FLAG))
- return rate;
-
- if (RC_DS_OR_LATER(rate_table->info[rate].rate_flags) &&
- (ath_rc_priv->ht_cap & (WLAN_RC_DS_FLAG | WLAN_RC_TS_FLAG)))
- return rate;
-
- if (RC_SS_OR_LEGACY(rate_table->info[rate].rate_flags))
- return rate;
-
- /* This should not happen */
- WARN_ON_ONCE(1);
-
- rate = ath_rc_priv->valid_rate_index[0];
-
- return rate;
-}
-
-static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
- struct ieee80211_tx_rate *rate,
- struct ieee80211_tx_rate_control *txrc,
- u8 tries, u8 rix, int rtsctsenable)
-{
- rate->count = tries;
- rate->idx = rate_table->info[rix].ratecode;
-
- if (txrc->rts || rtsctsenable)
- rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
-
- if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
- rate->flags |= IEEE80211_TX_RC_MCS;
- if (WLAN_RC_PHY_40(rate_table->info[rix].phy) &&
- conf_is_ht40(&txrc->hw->conf))
- rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
- rate->flags |= IEEE80211_TX_RC_SHORT_GI;
- }
-}
-
-static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
- const struct ath_rate_table *rate_table,
- struct ieee80211_tx_info *tx_info)
-{
- struct ieee80211_bss_conf *bss_conf;
-
- if (!tx_info->control.vif)
- return;
- /*
- * For legacy frames, mac80211 takes care of CTS protection.
- */
- if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
- return;
-
- bss_conf = &tx_info->control.vif->bss_conf;
-
- if (!bss_conf->basic_rates)
- return;
-
- /*
- * For now, use the lowest allowed basic rate for HT frames.
- */
- tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
-}
-
-static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
- struct ieee80211_tx_rate_control *txrc)
-{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *ath_rc_priv = priv_sta;
- const struct ath_rate_table *rate_table;
- struct sk_buff *skb = txrc->skb;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rates = tx_info->control.rates;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- __le16 fc = hdr->frame_control;
- u8 try_per_rate, i = 0, rix;
- int is_probe = 0;
-
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- /*
- * For Multi Rate Retry we use a different number of
- * retry attempt counts. This ends up looking like this:
- *
- * MRR[0] = 4
- * MRR[1] = 4
- * MRR[2] = 4
- * MRR[3] = 8
- *
- */
- try_per_rate = 4;
-
- rate_table = ath_rc_priv->rate_table;
- rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
-
- if (conf_is_ht(&sc->hw->conf) &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
- tx_info->flags |= IEEE80211_TX_CTL_LDPC;
-
- if (conf_is_ht(&sc->hw->conf) &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
- tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
-
- if (is_probe) {
- /*
- * Set one try for probe rates. For the
- * probes don't enable RTS.
- */
- ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- 1, rix, 0);
- /*
- * Get the next tried/allowed rate.
- * No RTS for the next series after the probe rate.
- */
- ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
- ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, rix, 0);
-
- tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
- } else {
- /*
- * Set the chosen rate. No RTS for first series entry.
- */
- ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, rix, 0);
- }
-
- for ( ; i < 4; i++) {
- /*
- * Use twice the number of tries for the last MRR segment.
- */
- if (i + 1 == 4)
- try_per_rate = 8;
-
- ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
-
- /*
- * All other rates in the series have RTS enabled.
- */
- ath_rc_rate_set_series(rate_table, &rates[i], txrc,
- try_per_rate, rix, 1);
- }
-
- /*
- * NB:Change rate series to enable aggregation when operating
- * at lower MCS rates. When first rate in series is MCS2
- * in HT40 @ 2.4GHz, series should look like:
- *
- * {MCS2, MCS1, MCS0, MCS0}.
- *
- * When first rate in series is MCS3 in HT20 @ 2.4GHz, series should
- * look like:
- *
- * {MCS3, MCS2, MCS1, MCS1}
- *
- * So, set fourth rate in series to be same as third one for
- * above conditions.
- */
- if ((sc->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) &&
- (conf_is_ht(&sc->hw->conf))) {
- u8 dot11rate = rate_table->info[rix].dot11rate;
- u8 phy = rate_table->info[rix].phy;
- if (i == 4 &&
- ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
- (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
- rates[3].idx = rates[2].idx;
- rates[3].flags = rates[2].flags;
- }
- }
-
- /*
- * Force hardware to use computed duration for next
- * fragment by disabling multi-rate retry, which
- * updates duration based on the multi-rate duration table.
- *
- * FIXME: Fix duration
- */
- if (ieee80211_has_morefrags(fc) ||
- (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
- rates[1].count = rates[2].count = rates[3].count = 0;
- rates[1].idx = rates[2].idx = rates[3].idx = 0;
- rates[0].count = ATH_TXMAXTRY;
- }
-
- ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
-}
-
-static void ath_rc_update_per(struct ath_softc *sc,
- const struct ath_rate_table *rate_table,
- struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_info *tx_info,
- int tx_rate, int xretries, int retries,
- u32 now_msec)
-{
- int count, n_bad_frames;
- u8 last_per;
- static const u32 nretry_to_per_lookup[10] = {
- 100 * 0 / 1,
- 100 * 1 / 4,
- 100 * 1 / 2,
- 100 * 3 / 4,
- 100 * 4 / 5,
- 100 * 5 / 6,
- 100 * 6 / 7,
- 100 * 7 / 8,
- 100 * 8 / 9,
- 100 * 9 / 10
- };
-
- last_per = ath_rc_priv->per[tx_rate];
- n_bad_frames = tx_info->status.ampdu_len - tx_info->status.ampdu_ack_len;
-
- if (xretries) {
- if (xretries == 1) {
- ath_rc_priv->per[tx_rate] += 30;
- if (ath_rc_priv->per[tx_rate] > 100)
- ath_rc_priv->per[tx_rate] = 100;
- } else {
- /* xretries == 2 */
- count = ARRAY_SIZE(nretry_to_per_lookup);
- if (retries >= count)
- retries = count - 1;
-
- /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
- ath_rc_priv->per[tx_rate] =
- (u8)(last_per - (last_per >> 3) + (100 >> 3));
- }
-
- /* xretries == 1 or 2 */
-
- if (ath_rc_priv->probe_rate == tx_rate)
- ath_rc_priv->probe_rate = 0;
-
- } else { /* xretries == 0 */
- count = ARRAY_SIZE(nretry_to_per_lookup);
- if (retries >= count)
- retries = count - 1;
-
- if (n_bad_frames) {
- /* new_PER = 7/8*old_PER + 1/8*(currentPER)
- * Assuming that n_frames is not 0. The current PER
- * from the retries is 100 * retries / (retries+1),
- * since the first retries attempts failed, and the
- * next one worked. For the one that worked,
- * n_bad_frames subframes out of n_frames wored,
- * so the PER for that part is
- * 100 * n_bad_frames / n_frames, and it contributes
- * 100 * n_bad_frames / (n_frames * (retries+1)) to
- * the above PER. The expression below is a
- * simplified version of the sum of these two terms.
- */
- if (tx_info->status.ampdu_len > 0) {
- int n_frames, n_bad_tries;
- u8 cur_per, new_per;
-
- n_bad_tries = retries * tx_info->status.ampdu_len +
- n_bad_frames;
- n_frames = tx_info->status.ampdu_len * (retries + 1);
- cur_per = (100 * n_bad_tries / n_frames) >> 3;
- new_per = (u8)(last_per - (last_per >> 3) + cur_per);
- ath_rc_priv->per[tx_rate] = new_per;
- }
- } else {
- ath_rc_priv->per[tx_rate] =
- (u8)(last_per - (last_per >> 3) +
- (nretry_to_per_lookup[retries] >> 3));
- }
-
-
- /*
- * If we got at most one retry then increase the max rate if
- * this was a probe. Otherwise, ignore the probe.
- */
- if (ath_rc_priv->probe_rate && ath_rc_priv->probe_rate == tx_rate) {
- if (retries > 0 || 2 * n_bad_frames > tx_info->status.ampdu_len) {
- /*
- * Since we probed with just a single attempt,
- * any retries means the probe failed. Also,
- * if the attempt worked, but more than half
- * the subframes were bad then also consider
- * the probe a failure.
- */
- ath_rc_priv->probe_rate = 0;
- } else {
- u8 probe_rate = 0;
-
- ath_rc_priv->rate_max_phy =
- ath_rc_priv->probe_rate;
- probe_rate = ath_rc_priv->probe_rate;
-
- if (ath_rc_priv->per[probe_rate] > 30)
- ath_rc_priv->per[probe_rate] = 20;
-
- ath_rc_priv->probe_rate = 0;
-
- /*
- * Since this probe succeeded, we allow the next
- * probe twice as soon. This allows the maxRate
- * to move up faster if the probes are
- * successful.
- */
- ath_rc_priv->probe_time =
- now_msec - rate_table->probe_interval / 2;
- }
- }
-
- if (retries > 0) {
- /*
- * Don't update anything. We don't know if
- * this was because of collisions or poor signal.
- */
- ath_rc_priv->hw_maxretry_pktcnt = 0;
- } else {
- /*
- * It worked with no retries. First ignore bogus (small)
- * rssi_ack values.
- */
- if (tx_rate == ath_rc_priv->rate_max_phy &&
- ath_rc_priv->hw_maxretry_pktcnt < 255) {
- ath_rc_priv->hw_maxretry_pktcnt++;
- }
-
- }
- }
-}
-
-static void ath_rc_update_ht(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_info *tx_info,
- int tx_rate, int xretries, int retries)
-{
- u32 now_msec = jiffies_to_msecs(jiffies);
- int rate;
- u8 last_per;
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- int size = ath_rc_priv->rate_table_size;
-
- if ((tx_rate < 0) || (tx_rate > rate_table->rate_cnt))
- return;
-
- last_per = ath_rc_priv->per[tx_rate];
-
- /* Update PER first */
- ath_rc_update_per(sc, rate_table, ath_rc_priv,
- tx_info, tx_rate, xretries,
- retries, now_msec);
-
- /*
- * If this rate looks bad (high PER) then stop using it for
- * a while (except if we are probing).
- */
- if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
- rate_table->info[tx_rate].ratekbps <=
- rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
- ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
- &ath_rc_priv->rate_max_phy);
-
- /* Don't probe for a little while. */
- ath_rc_priv->probe_time = now_msec;
- }
-
- /* Make sure the rates below this have lower PER */
- /* Monotonicity is kept only for rates below the current rate. */
- if (ath_rc_priv->per[tx_rate] < last_per) {
- for (rate = tx_rate - 1; rate >= 0; rate--) {
-
- if (ath_rc_priv->per[rate] >
- ath_rc_priv->per[rate+1]) {
- ath_rc_priv->per[rate] =
- ath_rc_priv->per[rate+1];
- }
- }
- }
-
- /* Maintain monotonicity for rates above the current rate */
- for (rate = tx_rate; rate < size - 1; rate++) {
- if (ath_rc_priv->per[rate+1] <
- ath_rc_priv->per[rate])
- ath_rc_priv->per[rate+1] =
- ath_rc_priv->per[rate];
- }
-
- /* Every so often, we reduce the thresholds
- * and PER (different for CCK and OFDM). */
- if (now_msec - ath_rc_priv->per_down_time >=
- rate_table->probe_interval) {
- for (rate = 0; rate < size; rate++) {
- ath_rc_priv->per[rate] =
- 7 * ath_rc_priv->per[rate] / 8;
- }
-
- ath_rc_priv->per_down_time = now_msec;
- }
-
- ath_debug_stat_retries(ath_rc_priv, tx_rate, xretries, retries,
- ath_rc_priv->per[tx_rate]);
-
-}
-
-static void ath_rc_tx_status(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rates = tx_info->status.rates;
- struct ieee80211_tx_rate *rate;
- int final_ts_idx = 0, xretries = 0, long_retry = 0;
- u8 flags;
- u32 i = 0, rix;
-
- for (i = 0; i < sc->hw->max_rates; i++) {
- rate = &tx_info->status.rates[i];
- if (rate->idx < 0 || !rate->count)
- break;
-
- final_ts_idx = i;
- long_retry = rate->count - 1;
- }
-
- if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
- xretries = 1;
-
- /*
- * If the first rate is not the final index, there
- * are intermediate rate failures to be processed.
- */
- if (final_ts_idx != 0) {
- for (i = 0; i < final_ts_idx ; i++) {
- if (rates[i].count != 0 && (rates[i].idx >= 0)) {
- flags = rates[i].flags;
-
- /* If HT40 and we have switched mode from
- * 40 to 20 => don't update */
-
- if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
- !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
- return;
-
- rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
- ath_rc_update_ht(sc, ath_rc_priv, tx_info,
- rix, xretries ? 1 : 2,
- rates[i].count);
- }
- }
- }
-
- flags = rates[final_ts_idx].flags;
-
- /* If HT40 and we have switched mode from 40 to 20 => don't update */
- if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
- !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
- return;
-
- rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
- ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
- ath_debug_stat_rc(ath_rc_priv, rix);
-}
-
-static const
-struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
- enum ieee80211_band band,
- bool is_ht)
-{
- switch(band) {
- case IEEE80211_BAND_2GHZ:
- if (is_ht)
- return &ar5416_11ng_ratetable;
- return &ar5416_11g_ratetable;
- case IEEE80211_BAND_5GHZ:
- if (is_ht)
- return &ar5416_11na_ratetable;
- return &ar5416_11a_ratetable;
- default:
- return NULL;
- }
-}
-
-static void ath_rc_init(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u8 i, j, k, hi = 0, hthi = 0;
-
- ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
-
- for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
- ath_rc_priv->per[i] = 0;
- ath_rc_priv->valid_rate_index[i] = 0;
- }
-
- for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
- for (j = 0; j < RATE_TABLE_SIZE; j++)
- ath_rc_priv->valid_phy_rateidx[i][j] = 0;
- ath_rc_priv->valid_phy_ratecnt[i] = 0;
- }
-
- if (!rateset->rs_nrates) {
- hi = ath_rc_init_validrates(ath_rc_priv);
- } else {
- hi = ath_rc_setvalid_rates(ath_rc_priv, true);
-
- if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
- hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
-
- hi = max(hi, hthi);
- }
-
- ath_rc_priv->rate_table_size = hi + 1;
- ath_rc_priv->rate_max_phy = 0;
- WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
-
- for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
- for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
- ath_rc_priv->valid_rate_index[k++] =
- ath_rc_priv->valid_phy_rateidx[i][j];
- }
-
- if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
- !ath_rc_priv->valid_phy_ratecnt[i])
- continue;
-
- ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
- }
- WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
- WARN_ON(k > RATE_TABLE_SIZE);
-
- ath_rc_priv->max_valid_rate = k;
- ath_rc_sort_validrates(ath_rc_priv);
- ath_rc_priv->rate_max_phy = (k > 4) ?
- ath_rc_priv->valid_rate_index[k-4] :
- ath_rc_priv->valid_rate_index[k-1];
-
- ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
- ath_rc_priv->ht_cap);
-}
-
-static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
-{
- u8 caps = 0;
-
- if (sta->ht_cap.ht_supported) {
- caps = WLAN_RC_HT_FLAG;
- if (sta->ht_cap.mcs.rx_mask[1] && sta->ht_cap.mcs.rx_mask[2])
- caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
- else if (sta->ht_cap.mcs.rx_mask[1])
- caps |= WLAN_RC_DS_FLAG;
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
- caps |= WLAN_RC_40_FLAG;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
- caps |= WLAN_RC_SGI_FLAG;
- } else {
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
- caps |= WLAN_RC_SGI_FLAG;
- }
- }
-
- return caps;
-}
-
-static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
- u8 tidno)
-{
- struct ath_node *an = (struct ath_node *)sta->drv_priv;
- struct ath_atx_tid *txtid;
-
- if (!sta->ht_cap.ht_supported)
- return false;
-
- txtid = ATH_AN_2_TID(an, tidno);
- return !txtid->active;
-}
-
-
-/***********************************/
-/* mac80211 Rate Control callbacks */
-/***********************************/
-
-static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *ath_rc_priv = priv_sta;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- __le16 fc = hdr->frame_control;
-
- if (!priv_sta || !ieee80211_is_data(fc))
- return;
-
- /* This packet was aggregated but doesn't carry status info */
- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
- !(tx_info->flags & IEEE80211_TX_STAT_AMPDU))
- return;
-
- if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
- return;
-
- ath_rc_tx_status(sc, ath_rc_priv, skb);
-
- /* Check if aggregation has to be enabled for this tid */
- if (conf_is_ht(&sc->hw->conf) &&
- !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
- if (ieee80211_is_data_qos(fc) &&
- skb_get_queue_mapping(skb) != IEEE80211_AC_VO) {
- u8 *qc, tid;
-
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
-
- if(ath_tx_aggr_check(sc, sta, tid))
- ieee80211_start_tx_ba_session(sta, tid, 0);
- }
- }
-}
-
-static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *priv_sta)
-{
- struct ath_softc *sc = priv;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_rate_priv *ath_rc_priv = priv_sta;
- int i, j = 0;
- u32 rate_flags = ieee80211_chandef_rate_flags(&sc->hw->conf.chandef);
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sta->supp_rates[sband->band] & BIT(i)) {
- if ((rate_flags & sband->bitrates[i].flags)
- != rate_flags)
- continue;
-
- ath_rc_priv->neg_rates.rs_rates[j]
- = (sband->bitrates[i].bitrate * 2) / 10;
- j++;
- }
- }
- ath_rc_priv->neg_rates.rs_nrates = j;
-
- if (sta->ht_cap.ht_supported) {
- for (i = 0, j = 0; i < 77; i++) {
- if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
- ath_rc_priv->neg_ht_rates.rs_rates[j++] = i;
- if (j == ATH_RATE_MAX)
- break;
- }
- ath_rc_priv->neg_ht_rates.rs_nrates = j;
- }
-
- ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
- sta->ht_cap.ht_supported);
- if (!ath_rc_priv->rate_table) {
- ath_err(common, "No rate table chosen\n");
- return;
- }
-
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
- ath_rc_init(sc, priv_sta);
-}
-
-static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *priv_sta,
- u32 changed)
-{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *ath_rc_priv = priv_sta;
-
- if (changed & IEEE80211_RC_BW_CHANGED) {
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
- ath_rc_init(sc, priv_sta);
-
- ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
- "Operating Bandwidth changed to: %d\n",
- sc->hw->conf.chandef.width);
- }
-}
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-
-void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
- struct ath_rc_stats *stats;
-
- stats = &rc->rcstats[final_rate];
- stats->success++;
-}
-
-void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per)
-{
- struct ath_rc_stats *stats = &rc->rcstats[rix];
-
- stats->xretries += xretries;
- stats->retries += retries;
- stats->per = per;
-}
-
-static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_rate_priv *rc = file->private_data;
- char *buf;
- unsigned int len = 0, max;
- int rix;
- ssize_t retval;
-
- if (rc->rate_table == NULL)
- return 0;
-
- max = 80 + rc->rate_table_size * 1024 + 1;
- buf = kmalloc(max, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- len += sprintf(buf, "%6s %6s %6s "
- "%10s %10s %10s %10s\n",
- "HT", "MCS", "Rate",
- "Success", "Retries", "XRetries", "PER");
-
- for (rix = 0; rix < rc->max_valid_rate; rix++) {
- u8 i = rc->valid_rate_index[rix];
- u32 ratekbps = rc->rate_table->info[i].ratekbps;
- struct ath_rc_stats *stats = &rc->rcstats[i];
- char mcs[5];
- char htmode[5];
- int used_mcs = 0, used_htmode = 0;
-
- if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
- used_mcs = scnprintf(mcs, 5, "%d",
- rc->rate_table->info[i].ratecode);
-
- if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
- used_htmode = scnprintf(htmode, 5, "HT40");
- else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
- used_htmode = scnprintf(htmode, 5, "HT20");
- else
- used_htmode = scnprintf(htmode, 5, "????");
- }
-
- mcs[used_mcs] = '\0';
- htmode[used_htmode] = '\0';
-
- len += scnprintf(buf + len, max - len,
- "%6s %6s %3u.%d: "
- "%10u %10u %10u %10u\n",
- htmode,
- mcs,
- ratekbps / 1000,
- (ratekbps % 1000) / 100,
- stats->success,
- stats->retries,
- stats->xretries,
- stats->per);
- }
-
- if (len > max)
- len = max;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
- return retval;
-}
-
-static const struct file_operations fops_rcstat = {
- .read = read_file_rcstat,
- .open = simple_open,
- .owner = THIS_MODULE
-};
-
-static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
- struct dentry *dir)
-{
- struct ath_rate_priv *rc = priv_sta;
- rc->debugfs_rcstats = debugfs_create_file("rc_stats", S_IRUGO,
- dir, rc, &fops_rcstat);
-}
-
-static void ath_rate_remove_sta_debugfs(void *priv, void *priv_sta)
-{
- struct ath_rate_priv *rc = priv_sta;
- debugfs_remove(rc->debugfs_rcstats);
-}
-
-#endif /* CONFIG_MAC80211_DEBUGFS && CONFIG_ATH9K_DEBUGFS */
-
-static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- return hw->priv;
-}
-
-static void ath_rate_free(void *priv)
-{
- return;
-}
-
-static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
- return kzalloc(sizeof(struct ath_rate_priv), gfp);
-}
-
-static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
- void *priv_sta)
-{
- struct ath_rate_priv *rate_priv = priv_sta;
- kfree(rate_priv);
-}
-
-static struct rate_control_ops ath_rate_ops = {
- .module = NULL,
- .name = "ath9k_rate_control",
- .tx_status = ath_tx_status,
- .get_rate = ath_get_rate,
- .rate_init = ath_rate_init,
- .rate_update = ath_rate_update,
- .alloc = ath_rate_alloc,
- .free = ath_rate_free,
- .alloc_sta = ath_rate_alloc_sta,
- .free_sta = ath_rate_free_sta,
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
- .add_sta_debugfs = ath_rate_add_sta_debugfs,
- .remove_sta_debugfs = ath_rate_remove_sta_debugfs,
-#endif
-};
-
-int ath_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&ath_rate_ops);
-}
-
-void ath_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&ath_rate_ops);
-}
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
deleted file mode 100644
index b9a87383cb43..000000000000
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (c) 2004 Sam Leffler, Errno Consulting
- * Copyright (c) 2004 Video54 Technologies, Inc.
- * Copyright (c) 2008-2011 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef RC_H
-#define RC_H
-
-#include "hw.h"
-
-struct ath_softc;
-
-#define ATH_RATE_MAX 30
-#define RATE_TABLE_SIZE 72
-
-#define RC_INVALID 0x0000
-#define RC_LEGACY 0x0001
-#define RC_SS 0x0002
-#define RC_DS 0x0004
-#define RC_TS 0x0008
-#define RC_HT_20 0x0010
-#define RC_HT_40 0x0020
-
-#define RC_STREAM_MASK 0xe
-#define RC_DS_OR_LATER(f) ((((f) & RC_STREAM_MASK) == RC_DS) || \
- (((f) & RC_STREAM_MASK) == (RC_DS | RC_TS)))
-#define RC_TS_ONLY(f) (((f) & RC_STREAM_MASK) == RC_TS)
-#define RC_SS_OR_LEGACY(f) ((f) & (RC_SS | RC_LEGACY))
-
-#define RC_HT_2040 (RC_HT_20 | RC_HT_40)
-#define RC_ALL_STREAM (RC_SS | RC_DS | RC_TS)
-#define RC_L_SD (RC_LEGACY | RC_SS | RC_DS)
-#define RC_L_SDT (RC_LEGACY | RC_SS | RC_DS | RC_TS)
-#define RC_HT_S_20 (RC_HT_20 | RC_SS)
-#define RC_HT_D_20 (RC_HT_20 | RC_DS)
-#define RC_HT_T_20 (RC_HT_20 | RC_TS)
-#define RC_HT_S_40 (RC_HT_40 | RC_SS)
-#define RC_HT_D_40 (RC_HT_40 | RC_DS)
-#define RC_HT_T_40 (RC_HT_40 | RC_TS)
-
-#define RC_HT_SD_20 (RC_HT_20 | RC_SS | RC_DS)
-#define RC_HT_DT_20 (RC_HT_20 | RC_DS | RC_TS)
-#define RC_HT_SD_40 (RC_HT_40 | RC_SS | RC_DS)
-#define RC_HT_DT_40 (RC_HT_40 | RC_DS | RC_TS)
-
-#define RC_HT_SD_2040 (RC_HT_2040 | RC_SS | RC_DS)
-#define RC_HT_SDT_2040 (RC_HT_2040 | RC_SS | RC_DS | RC_TS)
-
-#define RC_HT_SDT_20 (RC_HT_20 | RC_SS | RC_DS | RC_TS)
-#define RC_HT_SDT_40 (RC_HT_40 | RC_SS | RC_DS | RC_TS)
-
-#define RC_ALL (RC_LEGACY | RC_HT_2040 | RC_ALL_STREAM)
-
-enum {
- WLAN_RC_PHY_OFDM,
- WLAN_RC_PHY_CCK,
- WLAN_RC_PHY_HT_20_SS,
- WLAN_RC_PHY_HT_20_DS,
- WLAN_RC_PHY_HT_20_TS,
- WLAN_RC_PHY_HT_40_SS,
- WLAN_RC_PHY_HT_40_DS,
- WLAN_RC_PHY_HT_40_TS,
- WLAN_RC_PHY_HT_20_SS_HGI,
- WLAN_RC_PHY_HT_20_DS_HGI,
- WLAN_RC_PHY_HT_20_TS_HGI,
- WLAN_RC_PHY_HT_40_SS_HGI,
- WLAN_RC_PHY_HT_40_DS_HGI,
- WLAN_RC_PHY_HT_40_TS_HGI,
- WLAN_RC_PHY_MAX
-};
-
-#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
- || (_phy == WLAN_RC_PHY_HT_40_DS) \
- || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
-#define WLAN_RC_PHY_TS(_phy) ((_phy == WLAN_RC_PHY_HT_20_TS) \
- || (_phy == WLAN_RC_PHY_HT_40_TS) \
- || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
- || (_phy == WLAN_RC_PHY_HT_20_DS) \
- || (_phy == WLAN_RC_PHY_HT_20_TS) \
- || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_TS_HGI))
-#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
- || (_phy == WLAN_RC_PHY_HT_40_DS) \
- || (_phy == WLAN_RC_PHY_HT_40_TS) \
- || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-
-#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
-
-#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \
- ((capflag & WLAN_RC_40_FLAG) ? RC_HT_40 : RC_HT_20) : RC_LEGACY))
-
-#define WLAN_RC_CAP_STREAM(capflag) (((capflag & WLAN_RC_TS_FLAG) ? \
- (RC_TS) : ((capflag & WLAN_RC_DS_FLAG) ? RC_DS : RC_SS)))
-
-/* Return TRUE if flag supports HT20 && client supports HT20 or
- * return TRUE if flag supports HT40 && client supports HT40.
- * This is used becos some rates overlap between HT20/HT40.
- */
-#define WLAN_RC_PHY_HT_VALID(flag, capflag) \
- (((flag & RC_HT_20) && !(capflag & WLAN_RC_40_FLAG)) || \
- ((flag & RC_HT_40) && (capflag & WLAN_RC_40_FLAG)))
-
-#define WLAN_RC_DS_FLAG (0x01)
-#define WLAN_RC_TS_FLAG (0x02)
-#define WLAN_RC_40_FLAG (0x04)
-#define WLAN_RC_SGI_FLAG (0x08)
-#define WLAN_RC_HT_FLAG (0x10)
-
-/**
- * struct ath_rate_table - Rate Control table
- * @rate_cnt: total number of rates for the given wireless mode
- * @mcs_start: MCS rate index offset
- * @rate_flags: Rate Control flags
- * @phy: CCK/OFDM/HT20/HT40
- * @ratekbps: rate in Kbits per second
- * @user_ratekbps: user rate in Kbits per second
- * @ratecode: rate that goes into HW descriptors
- * @dot11rate: value that goes into supported
- * rates info element of MLME
- * @ctrl_rate: Index of next lower basic rate, used for duration computation
- * @cw40index: Index of rates having 40MHz channel width
- * @sgi_index: Index of rates having Short Guard Interval
- * @ht_index: high throughput rates having 40MHz channel width and
- * Short Guard Interval
- * @probe_interval: interval for rate control to probe for other rates
- * @initial_ratemax: initial ratemax value
- */
-struct ath_rate_table {
- int rate_cnt;
- int mcs_start;
- struct {
- u16 rate_flags;
- u8 phy;
- u32 ratekbps;
- u32 user_ratekbps;
- u8 ratecode;
- u8 dot11rate;
- } info[RATE_TABLE_SIZE];
- u32 probe_interval;
- u8 initial_ratemax;
-};
-
-struct ath_rateset {
- u8 rs_nrates;
- u8 rs_rates[ATH_RATE_MAX];
-};
-
-struct ath_rc_stats {
- u32 success;
- u32 retries;
- u32 xretries;
- u8 per;
-};
-
-/**
- * struct ath_rate_priv - Rate Control priv data
- * @state: RC state
- * @probe_rate: rate we are probing at
- * @probe_time: msec timestamp for last probe
- * @hw_maxretry_pktcnt: num of packets since we got HW max retry error
- * @max_valid_rate: maximum number of valid rate
- * @per_down_time: msec timestamp for last PER down step
- * @valid_phy_ratecnt: valid rate count
- * @rate_max_phy: phy index for the max rate
- * @per: PER for every valid rate in %
- * @probe_interval: interval for ratectrl to probe for other rates
- * @ht_cap: HT capabilities
- * @neg_rates: Negotatied rates
- * @neg_ht_rates: Negotiated HT rates
- */
-struct ath_rate_priv {
- u8 rate_table_size;
- u8 probe_rate;
- u8 hw_maxretry_pktcnt;
- u8 max_valid_rate;
- u8 valid_rate_index[RATE_TABLE_SIZE];
- u8 ht_cap;
- u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX];
- u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][RATE_TABLE_SIZE];
- u8 rate_max_phy;
- u8 per[RATE_TABLE_SIZE];
- u32 probe_time;
- u32 per_down_time;
- u32 probe_interval;
- struct ath_rateset neg_rates;
- struct ath_rateset neg_ht_rates;
- const struct ath_rate_table *rate_table;
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
- struct dentry *debugfs_rcstats;
- struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
-#endif
-};
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate);
-void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per);
-#else
-static inline void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
-}
-static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per)
-{
-}
-#endif
-
-#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
-int ath_rate_control_register(void);
-void ath_rate_control_unregister(void);
-#else
-static inline int ath_rate_control_register(void)
-{
- return 0;
-}
-
-static inline void ath_rate_control_unregister(void)
-{
-}
-#endif
-
-#endif /* RC_H */
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 82e340d3ec60..6c9accdb52e4 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -762,204 +762,6 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
return bf;
}
-/* Assumes you've already done the endian to CPU conversion */
-static bool ath9k_rx_accept(struct ath_common *common,
- struct ieee80211_hdr *hdr,
- struct ieee80211_rx_status *rxs,
- struct ath_rx_status *rx_stats,
- bool *decrypt_error)
-{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- bool is_mc, is_valid_tkip, strip_mic, mic_error;
- struct ath_hw *ah = common->ah;
- __le16 fc;
-
- fc = hdr->frame_control;
-
- is_mc = !!is_multicast_ether_addr(hdr->addr1);
- is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
- test_bit(rx_stats->rs_keyix, common->tkip_keymap);
- strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
- ieee80211_has_protected(fc) &&
- !(rx_stats->rs_status &
- (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
- ATH9K_RXERR_KEYMISS));
-
- /*
- * Key miss events are only relevant for pairwise keys where the
- * descriptor does contain a valid key index. This has been observed
- * mostly with CCMP encryption.
- */
- if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
- !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
- rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
-
- mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
- !ieee80211_has_morefrags(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
- (rx_stats->rs_status & ATH9K_RXERR_MIC);
-
- /*
- * The rx_stats->rs_status will not be set until the end of the
- * chained descriptors so it can be ignored if rs_more is set. The
- * rs_more will be false at the last element of the chained
- * descriptors.
- */
- if (rx_stats->rs_status != 0) {
- u8 status_mask;
-
- if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
- rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
- mic_error = false;
- }
-
- if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
- (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
- *decrypt_error = true;
- mic_error = false;
- }
-
- /*
- * Reject error frames with the exception of
- * decryption and MIC failures. For monitor mode,
- * we also ignore the CRC error.
- */
- status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
- ATH9K_RXERR_KEYMISS;
-
- if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
- status_mask |= ATH9K_RXERR_CRC;
-
- if (rx_stats->rs_status & ~status_mask)
- return false;
- }
-
- /*
- * For unicast frames the MIC error bit can have false positives,
- * so all MIC error reports need to be validated in software.
- * False negatives are not common, so skip software verification
- * if the hardware considers the MIC valid.
- */
- if (strip_mic)
- rxs->flag |= RX_FLAG_MMIC_STRIPPED;
- else if (is_mc && mic_error)
- rxs->flag |= RX_FLAG_MMIC_ERROR;
-
- return true;
-}
-
-static int ath9k_process_rate(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs)
-{
- struct ieee80211_supported_band *sband;
- enum ieee80211_band band;
- unsigned int i = 0;
- struct ath_softc __maybe_unused *sc = common->priv;
- struct ath_hw *ah = sc->sc_ah;
-
- band = ah->curchan->chan->band;
- sband = hw->wiphy->bands[band];
-
- if (IS_CHAN_QUARTER_RATE(ah->curchan))
- rxs->flag |= RX_FLAG_5MHZ;
- else if (IS_CHAN_HALF_RATE(ah->curchan))
- rxs->flag |= RX_FLAG_10MHZ;
-
- if (rx_stats->rs_rate & 0x80) {
- /* HT rate */
- rxs->flag |= RX_FLAG_HT;
- rxs->flag |= rx_stats->flag;
- rxs->rate_idx = rx_stats->rs_rate & 0x7f;
- return 0;
- }
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
- rxs->rate_idx = i;
- return 0;
- }
- if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
- rxs->flag |= RX_FLAG_SHORTPRE;
- rxs->rate_idx = i;
- return 0;
- }
- }
-
- /*
- * No valid hardware bitrate found -- we should not get here
- * because hardware has already validated this frame as OK.
- */
- ath_dbg(common, ANY,
- "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
- rx_stats->rs_rate);
- RX_STAT_INC(rx_rate_err);
- return -EINVAL;
-}
-
-static void ath9k_process_rssi(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = common->ah;
- int last_rssi;
- int rssi = rx_stats->rs_rssi;
- int i, j;
-
- /*
- * RSSI is not available for subframes in an A-MPDU.
- */
- if (rx_stats->rs_moreaggr) {
- rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
- return;
- }
-
- /*
- * Check if the RSSI for the last subframe in an A-MPDU
- * or an unaggregated frame is valid.
- */
- if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
- rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
- return;
- }
-
- for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
- s8 rssi;
-
- if (!(ah->rxchainmask & BIT(i)))
- continue;
-
- rssi = rx_stats->rs_rssi_ctl[i];
- if (rssi != ATH9K_RSSI_BAD) {
- rxs->chains |= BIT(j);
- rxs->chain_signal[j] = ah->noise + rssi;
- }
- j++;
- }
-
- /*
- * Update Beacon RSSI, this is used by ANI.
- */
- if (rx_stats->is_mybeacon &&
- ((ah->opmode == NL80211_IFTYPE_STATION) ||
- (ah->opmode == NL80211_IFTYPE_ADHOC))) {
- ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
- last_rssi = sc->last_rssi;
-
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
- if (rssi < 0)
- rssi = 0;
-
- ah->stats.avgbrssi = rssi;
- }
-
- rxs->signal = ah->noise + rx_stats->rs_rssi;
-}
-
static void ath9k_process_tsf(struct ath_rx_status *rs,
struct ieee80211_rx_status *rxs,
u64 tsf)
@@ -1055,7 +857,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
*/
- if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
+ if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, sc->rx.rxfilter))
return -EINVAL;
if (ath_is_mybeacon(common, hdr)) {
@@ -1069,10 +871,18 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
if (WARN_ON(!ah->curchan))
return -EINVAL;
- if (ath9k_process_rate(common, hw, rx_stats, rx_status))
+ if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) {
+ /*
+ * No valid hardware bitrate found -- we should not get here
+ * because hardware has already validated this frame as OK.
+ */
+ ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
+ rx_stats->rs_rate);
+ RX_STAT_INC(rx_rate_err);
return -EINVAL;
+ }
- ath9k_process_rssi(common, hw, rx_stats, rx_status);
+ ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
rx_status->band = ah->curchan->chan->band;
rx_status->freq = ah->curchan->chan->center_freq;
@@ -1092,57 +902,6 @@ corrupt:
return -EINVAL;
}
-static void ath9k_rx_skb_postprocess(struct ath_common *common,
- struct sk_buff *skb,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- bool decrypt_error)
-{
- struct ath_hw *ah = common->ah;
- struct ieee80211_hdr *hdr;
- int hdrlen, padpos, padsize;
- u8 keyix;
- __le16 fc;
-
- /* see if any padding is done by the hw and remove it */
- hdr = (struct ieee80211_hdr *) skb->data;
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- fc = hdr->frame_control;
- padpos = ieee80211_hdrlen(fc);
-
- /* The MAC header is padded to have 32-bit boundary if the
- * packet payload is non-zero. The general calculation for
- * padsize would take into account odd header lengths:
- * padsize = (4 - padpos % 4) % 4; However, since only
- * even-length headers are used, padding can only be 0 or 2
- * bytes and we can optimize this a bit. In addition, we must
- * not try to remove padding from short control frames that do
- * not have payload. */
- padsize = padpos & 3;
- if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
- memmove(skb->data + padsize, skb->data, padpos);
- skb_pull(skb, padsize);
- }
-
- keyix = rx_stats->rs_keyix;
-
- if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
- ieee80211_has_protected(fc)) {
- rxs->flag |= RX_FLAG_DECRYPTED;
- } else if (ieee80211_has_protected(fc)
- && !decrypt_error && skb->len >= hdrlen + 4) {
- keyix = skb->data[hdrlen + 3] >> 6;
-
- if (test_bit(keyix, common->keymap))
- rxs->flag |= RX_FLAG_DECRYPTED;
- }
- if (ah->sw_mgmt_crypto &&
- (rxs->flag & RX_FLAG_DECRYPTED) &&
- ieee80211_is_mgmt(fc))
- /* Use software decrypt for management frames. */
- rxs->flag &= ~RX_FLAG_DECRYPTED;
-}
-
/*
* Run the LNA combining algorithm only in these cases:
*
@@ -1292,8 +1051,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb_pull(skb, ah->caps.rx_status_len);
if (!rs.rs_more)
- ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
- rxs, decrypt_error);
+ ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
+ rxs, decrypt_error);
if (rs.rs_more) {
RX_STAT_INC(rx_frags);
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index b686a7498450..a65cfb91adca 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -108,7 +108,7 @@ static int ath9k_tx99_init(struct ath_softc *sc)
struct ath_tx_control txctl;
int r;
- if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_err(common,
"driver is in invalid state unable to use TX99");
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 1b3230fa3651..2879887f5691 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -198,7 +198,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
ath_cancel_work(sc);
ath_stop_ani(sc);
- if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
ret = -EINVAL;
goto fail_wow;
@@ -224,7 +224,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
* STA.
*/
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
ath_dbg(common, WOW, "None of the STA vifs are associated\n");
ret = 1;
goto fail_wow;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 55897d508a76..87cbec47fb48 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1040,11 +1040,11 @@ static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
int symbols, bits;
int bytes = 0;
+ usec -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
bits -= OFDM_PLCP_BITS;
bytes = bits / 8;
- bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
if (bytes > 65532)
bytes = 65532;
@@ -1076,6 +1076,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_info *info, int len, bool rts)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct sk_buff *skb;
struct ieee80211_tx_info *tx_info;
struct ieee80211_tx_rate *rates;
@@ -1145,7 +1146,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
}
/* legacy rates */
- rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
+ rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
!(rate->flags & IEEE80211_RATE_ERP_G))
phy = WLAN_RC_PHY_CCK;
@@ -1698,7 +1699,7 @@ int ath_cabq_update(struct ath_softc *sc)
ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
- qi.tqi_readyTime = (cur_conf->beacon_interval *
+ qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
ATH_CABQ_READY_TIME) / 100;
ath_txq_update(sc, qnum, &qi);
@@ -1768,7 +1769,7 @@ bool ath_drain_all_txq(struct ath_softc *sc)
int i;
u32 npend = 0;
- if (test_bit(SC_OP_INVALID, &sc->sc_flags))
+ if (test_bit(ATH_OP_INVALID, &common->op_flags))
return true;
ath9k_hw_abort_tx_dma(ah);
@@ -1816,11 +1817,12 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
*/
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_atx_ac *ac, *last_ac;
struct ath_atx_tid *tid, *last_tid;
bool sent = false;
- if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
list_empty(&txq->axq_acq))
return;
@@ -2470,7 +2472,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ath_txq_lock(sc, txq);
for (;;) {
- if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
break;
if (list_empty(&txq->axq_q)) {
@@ -2553,7 +2555,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
int status;
for (;;) {
- if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
break;
status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
@@ -2569,7 +2571,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
sc->beacon.tx_processed = true;
sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
- ath9k_csa_is_finished(sc);
+ ath9k_csa_update(sc);
continue;
}
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 536bc46a2912..924135b8e575 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -572,7 +572,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
{
- struct ieee80211_bar *bar = (void *) data;
+ struct ieee80211_bar *bar = data;
struct carl9170_bar_list_entry *entry;
unsigned int queue;
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index e5e905910db4..415393dfb6fc 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -222,7 +222,7 @@ static const struct ieee80211_regdomain *ath_default_world_regdomain(void)
static const struct
ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
{
- switch (reg->regpair->regDmnEnum) {
+ switch (reg->regpair->reg_domain) {
case 0x60:
case 0x61:
case 0x62:
@@ -431,7 +431,7 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator,
struct ath_regulatory *reg)
{
- switch (reg->regpair->regDmnEnum) {
+ switch (reg->regpair->reg_domain) {
case 0x60:
case 0x63:
case 0x66:
@@ -560,7 +560,7 @@ static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
printk(KERN_DEBUG "ath: EEPROM indicates we "
"should expect a direct regpair map\n");
for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
- if (regDomainPairs[i].regDmnEnum == rd)
+ if (regDomainPairs[i].reg_domain == rd)
return true;
}
printk(KERN_DEBUG
@@ -617,7 +617,7 @@ ath_get_regpair(int regdmn)
if (regdmn == NO_ENUMRD)
return NULL;
for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
- if (regDomainPairs[i].regDmnEnum == regdmn)
+ if (regDomainPairs[i].reg_domain == regdmn)
return &regDomainPairs[i];
}
return NULL;
@@ -741,7 +741,7 @@ static int __ath_regd_init(struct ath_regulatory *reg)
printk(KERN_DEBUG "ath: Country alpha2 being used: %c%c\n",
reg->alpha2[0], reg->alpha2[1]);
printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
- reg->regpair->regDmnEnum);
+ reg->regpair->reg_domain);
return 0;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index ee25786b4447..73f12f196f14 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -44,6 +44,14 @@ static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
writel(data, wcn->mmio + addr);
}
+#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
+do { \
+ if (wcn->chip_version == WCN36XX_CHIP_3680) \
+ wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
+ else \
+ wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
+} while (0) \
+
static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
{
*data = readl(wcn->mmio + addr);
@@ -680,7 +688,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/* Setting interrupt path */
reg_data = WCN36XX_DXE_CCU_INT;
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+ wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
/***************************************/
/* Init descriptors for TX LOW channel */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
index c88562f85de1..35ee7e966bd2 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.h
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -28,11 +28,11 @@ H2H_TEST_RX_TX = DMA2
*/
/* DXE registers */
-#define WCN36XX_DXE_MEM_BASE 0x03000000
#define WCN36XX_DXE_MEM_REG 0x202000
#define WCN36XX_DXE_CCU_INT 0xA0011
-#define WCN36XX_DXE_REG_CCU_INT 0x200b10
+#define WCN36XX_DXE_REG_CCU_INT_3660 0x200b10
+#define WCN36XX_DXE_REG_CCU_INT_3680 0x2050dc
/* TODO This must calculated properly but not hardcoded */
#define WCN36XX_DXE_CTRL_TX_L 0x328a44
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 3c2ef0c32f72..a1f1127d7808 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -4384,11 +4384,13 @@ enum place_holder_in_cap_bitmap {
MAX_FEATURE_SUPPORTED = 128,
};
+#define WCN36XX_HAL_CAPS_SIZE 4
+
struct wcn36xx_hal_feat_caps_msg {
struct wcn36xx_hal_msg_header header;
- u32 feat_caps[4];
+ u32 feat_caps[WCN36XX_HAL_CAPS_SIZE];
} __packed;
/* status codes to help debug rekey failures */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index e64a6784079e..4ab5370ab7a6 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -17,6 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/firmware.h>
#include <linux/platform_device.h>
#include "wcn36xx.h"
@@ -177,6 +178,60 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
sta_priv->sta_index;
}
+static const char * const wcn36xx_caps_names[] = {
+ "MCC", /* 0 */
+ "P2P", /* 1 */
+ "DOT11AC", /* 2 */
+ "SLM_SESSIONIZATION", /* 3 */
+ "DOT11AC_OPMODE", /* 4 */
+ "SAP32STA", /* 5 */
+ "TDLS", /* 6 */
+ "P2P_GO_NOA_DECOUPLE_INIT_SCAN",/* 7 */
+ "WLANACTIVE_OFFLOAD", /* 8 */
+ "BEACON_OFFLOAD", /* 9 */
+ "SCAN_OFFLOAD", /* 10 */
+ "ROAM_OFFLOAD", /* 11 */
+ "BCN_MISS_OFFLOAD", /* 12 */
+ "STA_POWERSAVE", /* 13 */
+ "STA_ADVANCED_PWRSAVE", /* 14 */
+ "AP_UAPSD", /* 15 */
+ "AP_DFS", /* 16 */
+ "BLOCKACK", /* 17 */
+ "PHY_ERR", /* 18 */
+ "BCN_FILTER", /* 19 */
+ "RTT", /* 20 */
+ "RATECTRL", /* 21 */
+ "WOW" /* 22 */
+};
+
+static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
+{
+ if (x >= ARRAY_SIZE(wcn36xx_caps_names))
+ return "UNKNOWN";
+ return wcn36xx_caps_names[x];
+}
+
+static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
+{
+ int i;
+
+ for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
+ if (get_feat_caps(wcn->fw_feat_caps, i))
+ wcn36xx_info("FW Cap %s\n", wcn36xx_get_cap_name(i));
+ }
+}
+
+static void wcn36xx_detect_chip_version(struct wcn36xx *wcn)
+{
+ if (get_feat_caps(wcn->fw_feat_caps, DOT11AC)) {
+ wcn36xx_info("Chip is 3680\n");
+ wcn->chip_version = WCN36XX_CHIP_3680;
+ } else {
+ wcn36xx_info("Chip is 3660\n");
+ wcn->chip_version = WCN36XX_CHIP_3660;
+ }
+}
+
static int wcn36xx_start(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
@@ -223,6 +278,16 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
goto out_free_smd_buf;
}
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_feature_caps_exchange(wcn);
+ if (ret)
+ wcn36xx_warn("Exchange feature caps failed\n");
+ else
+ wcn36xx_feat_caps_info(wcn);
+ }
+
+ wcn36xx_detect_chip_version(wcn);
+
/* DMA channel initialization */
ret = wcn36xx_dxe_init(wcn);
if (ret) {
@@ -232,11 +297,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
wcn36xx_debugfs_init(wcn);
- if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
- ret = wcn36xx_smd_feature_caps_exchange(wcn);
- if (ret)
- wcn36xx_warn("Exchange feature caps failed\n");
- }
INIT_LIST_HEAD(&wcn->vif_list);
return 0;
@@ -648,6 +708,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->enable_beacon);
if (bss_conf->enable_beacon) {
+ vif_priv->dtim_period = bss_conf->dtim_period;
vif_priv->bss_index = 0xff;
wcn36xx_smd_config_bss(wcn, vif, NULL,
vif->addr, false);
@@ -992,6 +1053,7 @@ static int wcn36xx_remove(struct platform_device *pdev)
struct wcn36xx *wcn = hw->priv;
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
+ release_firmware(wcn->nv);
mutex_destroy(&wcn->hal_mutex);
ieee80211_unregister_hw(hw);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 750626b0e22d..7bf0ef8a1f56 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -195,9 +195,11 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
{
int ret = 0;
+ unsigned long start;
wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
init_completion(&wcn->hal_rsp_compl);
+ start = jiffies;
ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
if (ret) {
wcn36xx_err("HAL TX failed\n");
@@ -205,10 +207,13 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
}
if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
- wcn36xx_err("Timeout while waiting SMD response\n");
+ wcn36xx_err("Timeout! No SMD response in %dms\n",
+ HAL_MSG_TIMEOUT);
ret = -ETIME;
goto out;
}
+ wcn36xx_dbg(WCN36XX_DBG_SMD, "SMD command completed in %dms",
+ jiffies_to_msecs(jiffies - start));
out:
return ret;
}
@@ -246,21 +251,22 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
{
- const struct firmware *nv;
struct nv_data *nv_d;
struct wcn36xx_hal_nv_img_download_req_msg msg_body;
int fw_bytes_left;
int ret;
u16 fm_offset = 0;
- ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
- if (ret) {
- wcn36xx_err("Failed to load nv file %s: %d\n",
- WLAN_NV_FILE, ret);
- goto out_free_nv;
+ if (!wcn->nv) {
+ ret = request_firmware(&wcn->nv, WLAN_NV_FILE, wcn->dev);
+ if (ret) {
+ wcn36xx_err("Failed to load nv file %s: %d\n",
+ WLAN_NV_FILE, ret);
+ goto out;
+ }
}
- nv_d = (struct nv_data *)nv->data;
+ nv_d = (struct nv_data *)wcn->nv->data;
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
@@ -270,7 +276,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
mutex_lock(&wcn->hal_mutex);
do {
- fw_bytes_left = nv->size - fm_offset - 4;
+ fw_bytes_left = wcn->nv->size - fm_offset - 4;
if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
msg_body.last_fragment = 0;
msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
@@ -308,10 +314,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
out_unlock:
mutex_unlock(&wcn->hal_mutex);
-out_free_nv:
- release_firmware(nv);
-
- return ret;
+out: return ret;
}
static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
@@ -899,11 +902,12 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
sta_priv->sta_index = params->sta_index;
sta_priv->dpu_desc_index = params->dpu_index;
+ sta_priv->ucast_dpu_sign = params->uc_ucast_sig;
wcn36xx_dbg(WCN36XX_DBG_HAL,
- "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
+ "hal config sta rsp status %d sta_index %d bssid_index %d uc_ucast_sig %d p2p %d\n",
params->status, params->sta_index, params->bssid_index,
- params->p2p);
+ params->uc_ucast_sig, params->p2p);
return 0;
}
@@ -1118,7 +1122,7 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
}
- priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
+ priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature;
return 0;
}
@@ -1637,12 +1641,12 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
- wcn36xx_err("Sending hal_exit_bmps failed\n");
+ wcn36xx_err("Sending hal_keep_alive failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
- wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ wcn36xx_err("hal_keep_alive response failed err=%d\n", ret);
goto out;
}
out:
@@ -1682,8 +1686,7 @@ out:
return ret;
}
-static inline void set_feat_caps(u32 *bitmap,
- enum place_holder_in_cap_bitmap cap)
+void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
@@ -1697,8 +1700,7 @@ static inline void set_feat_caps(u32 *bitmap,
bitmap[arr_idx] |= (1 << bit_idx);
}
-static inline int get_feat_caps(u32 *bitmap,
- enum place_holder_in_cap_bitmap cap)
+int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
int ret = 0;
@@ -1714,8 +1716,7 @@ static inline int get_feat_caps(u32 *bitmap,
return ret;
}
-static inline void clear_feat_caps(u32 *bitmap,
- enum place_holder_in_cap_bitmap cap)
+void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
@@ -1731,8 +1732,8 @@ static inline void clear_feat_caps(u32 *bitmap,
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
- struct wcn36xx_hal_feat_caps_msg msg_body;
- int ret = 0;
+ struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
+ int ret = 0, i;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@@ -1746,12 +1747,15 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
- if (ret) {
- wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
- ret);
+ if (wcn->hal_rsp_len != sizeof(*rsp)) {
+ wcn36xx_err("Invalid hal_feature_caps_exchange response");
goto out;
}
+
+ rsp = (struct wcn36xx_hal_feat_caps_msg *) wcn->hal_buf;
+
+ for (i = 0; i < WCN36XX_HAL_CAPS_SIZE; i++)
+ wcn->fw_feat_caps[i] = rsp->feat_caps[i];
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index e7c39019c6f1..008d03423dbf 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -24,7 +24,7 @@
#define WCN36XX_HAL_BUF_SIZE 4096
-#define HAL_MSG_TIMEOUT 200
+#define HAL_MSG_TIMEOUT 500
#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
/* The PNO version info be contained in the rsp msg */
@@ -112,6 +112,9 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5);
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
+void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index b2b60e30caaf..32bb26a0db2a 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -57,8 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
- wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
- status.flag, status.vendor_radiotap_len);
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
@@ -132,6 +131,7 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
struct ieee80211_vif,
drv_priv);
+ bd->dpu_sign = sta_priv->ucast_dpu_sign;
if (vif->type == NL80211_IFTYPE_STATION) {
bd->sta_index = sta_priv->bss_sta_index;
bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
@@ -145,10 +145,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
__vif_priv = get_vif_by_addr(wcn, hdr->addr2);
bd->sta_index = __vif_priv->self_sta_index;
bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ bd->dpu_sign = __vif_priv->self_ucast_dpu_sign;
}
- bd->dpu_sign = __vif_priv->ucast_dpu_signature;
-
if (ieee80211_is_nullfunc(hdr->frame_control) ||
(sta_priv && !sta_priv->is_data_encrypted))
bd->dpu_ne = 1;
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 8fa5cbace5ab..f0fb81dfd17b 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -125,10 +125,10 @@ struct wcn36xx_vif {
enum wcn36xx_power_state pw_state;
u8 bss_index;
- u8 ucast_dpu_signature;
/* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
u8 self_sta_index;
u8 self_dpu_desc_index;
+ u8 self_ucast_dpu_sign;
};
/**
@@ -159,6 +159,7 @@ struct wcn36xx_sta {
u16 tid;
u8 sta_index;
u8 dpu_desc_index;
+ u8 ucast_dpu_sign;
u8 bss_sta_index;
u8 bss_dpu_desc_index;
bool is_data_encrypted;
@@ -171,10 +172,14 @@ struct wcn36xx {
struct device *dev;
struct list_head vif_list;
+ const struct firmware *nv;
+
u8 fw_revision;
u8 fw_version;
u8 fw_minor;
u8 fw_major;
+ u32 fw_feat_caps[WCN36XX_HAL_CAPS_SIZE];
+ u32 chip_version;
/* extra byte for the NULL termination */
u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
@@ -222,6 +227,9 @@ struct wcn36xx {
};
+#define WCN36XX_CHIP_3660 0
+#define WCN36XX_CHIP_3680 1
+
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
u8 major,
u8 minor,
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 990dd42ae79e..c7a3465fd02a 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -9,6 +9,7 @@ wil6210-y += wmi.o
wil6210-y += interrupt.o
wil6210-y += txrx.o
wil6210-y += debug.o
+wil6210-y += rx_reorder.o
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
# for tracing framework to find trace.h
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 5b340769d5bb..4806a49cb61b 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -104,41 +104,125 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type)
return -EOPNOTSUPP;
}
-static int wil_cfg80211_get_station(struct wiphy *wiphy,
- struct net_device *ndev,
- u8 *mac, struct station_info *sinfo)
+static int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
+ struct station_info *sinfo)
{
- struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- int rc;
struct wmi_notify_req_cmd cmd = {
- .cid = 0,
+ .cid = cid,
.interval_usec = 0,
};
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_notify_req_done_event evt;
+ } __packed reply;
+ struct wil_net_stats *stats = &wil->sta[cid].stats;
+ int rc;
- if (memcmp(mac, wil->dst_addr[0], ETH_ALEN))
- return -ENOENT;
-
- /* WMI_NOTIFY_REQ_DONE_EVENTID handler fills wil->stats.bf_mcs */
rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
- WMI_NOTIFY_REQ_DONE_EVENTID, NULL, 0, 20);
+ WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20);
if (rc)
return rc;
+ wil_dbg_wmi(wil, "Link status for CID %d: {\n"
+ " MCS %d TSF 0x%016llx\n"
+ " BF status 0x%08x SNR 0x%08x SQI %d%%\n"
+ " Tx Tpt %d goodput %d Rx goodput %d\n"
+ " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n",
+ cid, le16_to_cpu(reply.evt.bf_mcs),
+ le64_to_cpu(reply.evt.tsf), reply.evt.status,
+ le32_to_cpu(reply.evt.snr_val),
+ reply.evt.sqi,
+ le32_to_cpu(reply.evt.tx_tpt),
+ le32_to_cpu(reply.evt.tx_goodput),
+ le32_to_cpu(reply.evt.rx_goodput),
+ le16_to_cpu(reply.evt.my_rx_sector),
+ le16_to_cpu(reply.evt.my_tx_sector),
+ le16_to_cpu(reply.evt.other_rx_sector),
+ le16_to_cpu(reply.evt.other_tx_sector));
+
sinfo->generation = wil->sinfo_gen;
- sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->filled = STATION_INFO_RX_BYTES |
+ STATION_INFO_TX_BYTES |
+ STATION_INFO_RX_PACKETS |
+ STATION_INFO_TX_PACKETS |
+ STATION_INFO_RX_BITRATE |
+ STATION_INFO_TX_BITRATE |
+ STATION_INFO_RX_DROP_MISC |
+ STATION_INFO_TX_FAILED;
+
sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
- sinfo->txrate.mcs = wil->stats.bf_mcs;
- sinfo->filled |= STATION_INFO_RX_BITRATE;
+ sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
- sinfo->rxrate.mcs = wil->stats.last_mcs_rx;
+ sinfo->rxrate.mcs = stats->last_mcs_rx;
+ sinfo->rx_bytes = stats->rx_bytes;
+ sinfo->rx_packets = stats->rx_packets;
+ sinfo->rx_dropped_misc = stats->rx_dropped;
+ sinfo->tx_bytes = stats->tx_bytes;
+ sinfo->tx_packets = stats->tx_packets;
+ sinfo->tx_failed = stats->tx_errors;
if (test_bit(wil_status_fwconnected, &wil->status)) {
sinfo->filled |= STATION_INFO_SIGNAL;
- sinfo->signal = 12; /* TODO: provide real value */
+ sinfo->signal = reply.evt.sqi;
}
- return 0;
+ return rc;
+}
+
+static int wil_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ int cid = wil_find_cid(wil, mac);
+
+ wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+ if (cid < 0)
+ return cid;
+
+ rc = wil_cid_fill_sinfo(wil, cid, sinfo);
+
+ return rc;
+}
+
+/*
+ * Find @idx-th active STA for station dump.
+ */
+static int wil_find_cid_by_idx(struct wil6210_priv *wil, int idx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ if (wil->sta[i].status == wil_sta_unused)
+ continue;
+ if (idx == 0)
+ return i;
+ idx--;
+ }
+
+ return -ENOENT;
+}
+
+static int wil_cfg80211_dump_station(struct wiphy *wiphy,
+ struct net_device *dev, int idx,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ int cid = wil_find_cid_by_idx(wil, idx);
+
+ if (cid < 0)
+ return -ENOENT;
+
+ memcpy(mac, wil->sta[cid].addr, ETH_ALEN);
+ wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+
+ rc = wil_cid_fill_sinfo(wil, cid, sinfo);
+
+ return rc;
}
static int wil_cfg80211_change_iface(struct wiphy *wiphy,
@@ -181,6 +265,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
u16 chnl[4];
} __packed cmd;
uint i, n;
+ int rc;
if (wil->scan_request) {
wil_err(wil, "Already scanning\n");
@@ -198,7 +283,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
/* FW don't support scan after connection attempt */
if (test_bit(wil_status_dontscan, &wil->status)) {
- wil_err(wil, "Scan after connect attempt not supported\n");
+ wil_err(wil, "Can't scan now\n");
return -EBUSY;
}
@@ -221,8 +306,13 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
request->channels[i]->center_freq);
}
- return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
+ rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+
+ if (rc)
+ wil->scan_request = NULL;
+
+ return rc;
}
static int wil_cfg80211_connect(struct wiphy *wiphy,
@@ -237,6 +327,10 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
int ch;
int rc = 0;
+ if (test_bit(wil_status_fwconnecting, &wil->status) ||
+ test_bit(wil_status_fwconnected, &wil->status))
+ return -EALREADY;
+
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
@@ -318,10 +412,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
memcpy(conn.bssid, bss->bssid, ETH_ALEN);
memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
- /*
- * FW don't support scan after connection attempt
- */
- set_bit(wil_status_dontscan, &wil->status);
+
set_bit(wil_status_fwconnecting, &wil->status);
rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
@@ -330,7 +421,6 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
mod_timer(&wil->connect_timer,
jiffies + msecs_to_jiffies(2000));
} else {
- clear_bit(wil_status_dontscan, &wil->status);
clear_bit(wil_status_fwconnecting, &wil->status);
}
@@ -352,6 +442,40 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
return rc;
}
+static int wil_cfg80211_mgmt_tx(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params,
+ u64 *cookie)
+{
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ struct ieee80211_mgmt *mgmt_frame = (void *)buf;
+ struct wmi_sw_tx_req_cmd *cmd;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_sw_tx_complete_event evt;
+ } __packed evt;
+
+ cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
+ cmd->len = cpu_to_le16(len);
+ memcpy(cmd->payload, buf, len);
+
+ rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
+ WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
+ if (rc == 0)
+ rc = evt.evt.status;
+
+ kfree(cmd);
+
+ return rc;
+}
+
static int wil_cfg80211_set_channel(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef)
{
@@ -402,6 +526,41 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
return 0;
}
+static int wil_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ unsigned int duration,
+ u64 *cookie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ /* TODO: handle duration */
+ wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration);
+
+ rc = wmi_set_channel(wil, chan->hw_value);
+ if (rc)
+ return rc;
+
+ rc = wmi_rxon(wil, true);
+
+ return rc;
+}
+
+static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ wil_info(wil, "%s()\n", __func__);
+
+ rc = wmi_rxon(wil, false);
+
+ return rc;
+}
+
static int wil_fix_bcon(struct wil6210_priv *wil,
struct cfg80211_beacon_data *bcon)
{
@@ -450,18 +609,20 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
if (wil_fix_bcon(wil, bcon))
wil_dbg_misc(wil, "Fixed bcon\n");
+ mutex_lock(&wil->mutex);
+
rc = wil_reset(wil);
if (rc)
- return rc;
+ goto out;
/* Rx VRING. */
rc = wil_rx_init(wil);
if (rc)
- return rc;
+ goto out;
rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
if (rc)
- return rc;
+ goto out;
/* MAC address - pre-requisite for other commands */
wmi_set_mac_address(wil, ndev->dev_addr);
@@ -485,11 +646,13 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
channel->hw_value);
if (rc)
- return rc;
+ goto out;
netif_carrier_on(ndev);
+out:
+ mutex_unlock(&wil->mutex);
return rc;
}
@@ -499,17 +662,36 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
int rc = 0;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ mutex_lock(&wil->mutex);
+
rc = wmi_pcp_stop(wil);
+ mutex_unlock(&wil->mutex);
return rc;
}
+static int wil_cfg80211_del_station(struct wiphy *wiphy,
+ struct net_device *dev, u8 *mac)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ mutex_lock(&wil->mutex);
+ wil6210_disconnect(wil, mac);
+ mutex_unlock(&wil->mutex);
+
+ return 0;
+}
+
static struct cfg80211_ops wil_cfg80211_ops = {
.scan = wil_cfg80211_scan,
.connect = wil_cfg80211_connect,
.disconnect = wil_cfg80211_disconnect,
.change_virtual_intf = wil_cfg80211_change_iface,
.get_station = wil_cfg80211_get_station,
+ .dump_station = wil_cfg80211_dump_station,
+ .remain_on_channel = wil_remain_on_channel,
+ .cancel_remain_on_channel = wil_cancel_remain_on_channel,
+ .mgmt_tx = wil_cfg80211_mgmt_tx,
.set_monitor_channel = wil_cfg80211_set_channel,
.add_key = wil_cfg80211_add_key,
.del_key = wil_cfg80211_del_key,
@@ -517,6 +699,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
/* AP mode */
.start_ap = wil_cfg80211_start_ap,
.stop_ap = wil_cfg80211_stop_ap,
+ .del_station = wil_cfg80211_del_station,
};
static void wil_wiphy_init(struct wiphy *wiphy)
@@ -542,7 +725,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
/* TODO: figure this out */
- wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
wiphy->cipher_suites = wil_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 1caa31992a7e..ecdabe4adec3 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -26,9 +26,11 @@
/* Nasty hack. Better have per device instances */
static u32 mem_addr;
static u32 dbg_txdesc_index;
+static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
- const char *name, struct vring *vring)
+ const char *name, struct vring *vring,
+ char _s, char _h)
{
void __iomem *x = wmi_addr(wil, vring->hwtail);
@@ -50,8 +52,8 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
volatile struct vring_tx_desc *d = &vring->va[i].tx;
if ((i % 64) == 0 && (i != 0))
seq_printf(s, "\n");
- seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
- "S" : (vring->ctx[i].skb ? "H" : "h"));
+ seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
+ _s : (vring->ctx[i].skb ? _h : 'h'));
}
seq_printf(s, "\n");
}
@@ -63,14 +65,19 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
uint i;
struct wil6210_priv *wil = s->private;
- wil_print_vring(s, wil, "rx", &wil->vring_rx);
+ wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_');
for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
struct vring *vring = &(wil->vring_tx[i]);
if (vring->va) {
+ int cid = wil->vring2cid_tid[i][0];
+ int tid = wil->vring2cid_tid[i][1];
char name[10];
snprintf(name, sizeof(name), "tx_%2d", i);
- wil_print_vring(s, wil, name, vring);
+
+ seq_printf(s, "\n%pM CID %d TID %d\n",
+ wil->sta[cid].addr, cid, tid);
+ wil_print_vring(s, wil, name, vring, '_', 'H');
}
}
@@ -390,25 +397,78 @@ static const struct file_operations fops_reset = {
.write = wil_write_file_reset,
.open = simple_open,
};
-/*---------Tx descriptor------------*/
+static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
+ const char *prefix)
+{
+ char printbuf[16 * 3 + 2];
+ int i = 0;
+ while (i < len) {
+ int l = min(len - i, 16);
+ hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
+ sizeof(printbuf), false);
+ seq_printf(s, "%s%s\n", prefix, printbuf);
+ i += l;
+ }
+}
+
+static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
+{
+ int i = 0;
+ int len = skb_headlen(skb);
+ void *p = skb->data;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+
+ seq_printf(s, " len = %d\n", len);
+ wil_seq_hexdump(s, p, len, " : ");
+
+ if (nr_frags) {
+ seq_printf(s, " nr_frags = %d\n", nr_frags);
+ for (i = 0; i < nr_frags; i++) {
+ const struct skb_frag_struct *frag =
+ &skb_shinfo(skb)->frags[i];
+
+ len = skb_frag_size(frag);
+ p = skb_frag_address_safe(frag);
+ seq_printf(s, " [%2d] : len = %d\n", i, len);
+ wil_seq_hexdump(s, p, len, " : ");
+ }
+ }
+}
+
+/*---------Tx/Rx descriptor------------*/
static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- struct vring *vring = &(wil->vring_tx[0]);
+ struct vring *vring;
+ bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS);
+ if (tx)
+ vring = &(wil->vring_tx[dbg_vring_index]);
+ else
+ vring = &wil->vring_rx;
if (!vring->va) {
- seq_printf(s, "No Tx VRING\n");
+ if (tx)
+ seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index);
+ else
+ seq_puts(s, "No Rx VRING\n");
return 0;
}
if (dbg_txdesc_index < vring->size) {
+ /* use struct vring_tx_desc for Rx as well,
+ * only field used, .dma.length, is the same
+ */
volatile struct vring_tx_desc *d =
&(vring->va[dbg_txdesc_index].tx);
volatile u32 *u = (volatile u32 *)d;
struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
- seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
+ if (tx)
+ seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index,
+ dbg_txdesc_index);
+ else
+ seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index);
seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[0], u[1], u[2], u[3]);
seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
@@ -416,31 +476,19 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, " SKB = %p\n", skb);
if (skb) {
- char printbuf[16 * 3 + 2];
- int i = 0;
- int len = le16_to_cpu(d->dma.length);
- void *p = skb->data;
-
- if (len != skb_headlen(skb)) {
- seq_printf(s, "!!! len: desc = %d skb = %d\n",
- len, skb_headlen(skb));
- len = min_t(int, len, skb_headlen(skb));
- }
-
- seq_printf(s, " len = %d\n", len);
-
- while (i < len) {
- int l = min(len - i, 16);
- hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
- sizeof(printbuf), false);
- seq_printf(s, " : %s\n", printbuf);
- i += l;
- }
+ skb_get(skb);
+ wil_seq_print_skb(s, skb);
+ kfree_skb(skb);
}
seq_printf(s, "}\n");
} else {
- seq_printf(s, "TxDesc index (%d) >= size (%d)\n",
- dbg_txdesc_index, vring->size);
+ if (tx)
+ seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
+ dbg_vring_index, dbg_txdesc_index,
+ vring->size);
+ else
+ seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
+ dbg_txdesc_index, vring->size);
}
return 0;
@@ -570,6 +618,69 @@ static const struct file_operations fops_temp = {
.llseek = seq_lseek,
};
+/*---------Station matrix------------*/
+static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
+{
+ int i;
+ u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
+ seq_printf(s, "0x%03x [", r->head_seq_num);
+ for (i = 0; i < r->buf_size; i++) {
+ if (i == index)
+ seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|');
+ else
+ seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
+ }
+ seq_puts(s, "]\n");
+}
+
+static int wil_sta_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ int i, tid;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ struct wil_sta_info *p = &wil->sta[i];
+ char *status = "unknown";
+ switch (p->status) {
+ case wil_sta_unused:
+ status = "unused ";
+ break;
+ case wil_sta_conn_pending:
+ status = "pending ";
+ break;
+ case wil_sta_connected:
+ status = "connected";
+ break;
+ }
+ seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
+ (p->data_port_open ? " data_port_open" : ""));
+
+ if (p->status == wil_sta_connected) {
+ for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+ struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
+ if (r) {
+ seq_printf(s, "[%2d] ", tid);
+ wil_print_rxtid(s, r);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int wil_sta_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_sta_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_sta = {
+ .open = wil_sta_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
/*----------------*/
int wil6210_debugfs_init(struct wil6210_priv *wil)
{
@@ -581,9 +692,13 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox);
debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring);
- debugfs_create_file("txdesc", S_IRUGO, dbg, wil, &fops_txdesc);
- debugfs_create_u32("txdesc_index", S_IRUGO | S_IWUSR, dbg,
+ debugfs_create_file("stations", S_IRUGO, dbg, wil, &fops_sta);
+ debugfs_create_file("desc", S_IRUGO, dbg, wil, &fops_txdesc);
+ debugfs_create_u32("desc_index", S_IRUGO | S_IWUSR, dbg,
&dbg_txdesc_index);
+ debugfs_create_u32("vring_index", S_IRUGO | S_IWUSR, dbg,
+ &dbg_vring_index);
+
debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf);
debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid);
debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg,
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 10919f95a83c..5824cd41e4ba 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -195,8 +195,12 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
wil_dbg_irq(wil, "RX done\n");
isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
- wil_dbg_txrx(wil, "NAPI schedule\n");
- napi_schedule(&wil->napi_rx);
+ if (test_bit(wil_status_reset_done, &wil->status)) {
+ wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
+ napi_schedule(&wil->napi_rx);
+ } else {
+ wil_err(wil, "Got Rx interrupt while in reset\n");
+ }
}
if (isr)
@@ -226,10 +230,15 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
wil_dbg_irq(wil, "TX done\n");
- napi_schedule(&wil->napi_tx);
isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
/* clear also all VRING interrupts */
isr &= ~(BIT(25) - 1UL);
+ if (test_bit(wil_status_reset_done, &wil->status)) {
+ wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
+ napi_schedule(&wil->napi_tx);
+ } else {
+ wil_err(wil, "Got Tx interrupt while in reset\n");
+ }
}
if (isr)
@@ -319,6 +328,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
if (isr & ISR_MISC_FW_ERROR) {
wil_notify_fw_error(wil);
isr &= ~ISR_MISC_FW_ERROR;
+ wil_fw_error_recovery(wil);
}
if (isr & ISR_MISC_MBOX_EVT) {
@@ -493,6 +503,23 @@ free0:
return rc;
}
+/* can't use wil_ioread32_and_clear because ICC value is not ser yet */
+static inline void wil_clear32(void __iomem *addr)
+{
+ u32 x = ioread32(addr);
+
+ iowrite32(x, addr);
+}
+
+void wil6210_clear_irq(struct wil6210_priv *wil)
+{
+ wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+}
int wil6210_init_irq(struct wil6210_priv *wil, int irq)
{
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index fd30cddd5882..95f4efe9ef37 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -16,8 +16,14 @@
#include <linux/moduleparam.h>
#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
#include "wil6210.h"
+#include "txrx.h"
+
+static bool no_fw_recovery;
+module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(no_fw_recovery, " disable FW error recovery");
/*
* Due to a hardware issue,
@@ -52,29 +58,74 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
__raw_writel(*s++, d++);
}
-static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
{
uint i;
- struct net_device *ndev = wil_to_ndev(wil);
+ struct wil_sta_info *sta = &wil->sta[cid];
- wil_dbg_misc(wil, "%s()\n", __func__);
+ sta->data_port_open = false;
+ if (sta->status != wil_sta_unused) {
+ wmi_disconnect_sta(wil, sta->addr, WLAN_REASON_DEAUTH_LEAVING);
+ sta->status = wil_sta_unused;
+ }
- wil_link_off(wil);
- if (test_bit(wil_status_fwconnected, &wil->status)) {
- clear_bit(wil_status_fwconnected, &wil->status);
- cfg80211_disconnected(ndev,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- NULL, 0, GFP_KERNEL);
- } else if (test_bit(wil_status_fwconnecting, &wil->status)) {
- cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
+ for (i = 0; i < WIL_STA_TID_NUM; i++) {
+ struct wil_tid_ampdu_rx *r = sta->tid_rx[i];
+ sta->tid_rx[i] = NULL;
+ wil_tid_ampdu_rx_free(wil, r);
+ }
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+ if (wil->vring2cid_tid[i][0] == cid)
+ wil_vring_fini_tx(wil, i);
}
- clear_bit(wil_status_fwconnecting, &wil->status);
- for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
- wil_vring_fini_tx(wil, i);
+ memset(&sta->stats, 0, sizeof(sta->stats));
+}
- clear_bit(wil_status_dontscan, &wil->status);
+static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+ int cid = -ENOENT;
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+
+ might_sleep();
+ if (bssid) {
+ cid = wil_find_cid(wil, bssid);
+ wil_dbg_misc(wil, "%s(%pM, CID %d)\n", __func__, bssid, cid);
+ } else {
+ wil_dbg_misc(wil, "%s(all)\n", __func__);
+ }
+
+ if (cid >= 0) /* disconnect 1 peer */
+ wil_disconnect_cid(wil, cid);
+ else /* disconnect all */
+ for (cid = 0; cid < WIL6210_MAX_CID; cid++)
+ wil_disconnect_cid(wil, cid);
+
+ /* link state */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ wil_link_off(wil);
+ if (test_bit(wil_status_fwconnected, &wil->status)) {
+ clear_bit(wil_status_fwconnected, &wil->status);
+ cfg80211_disconnected(ndev,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ NULL, 0, GFP_KERNEL);
+ } else if (test_bit(wil_status_fwconnecting, &wil->status)) {
+ cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ }
+ clear_bit(wil_status_fwconnecting, &wil->status);
+ break;
+ default:
+ /* AP-like interface and monitor:
+ * never scan, always connected
+ */
+ if (bssid)
+ cfg80211_del_sta(ndev, bssid, GFP_KERNEL);
+ break;
+ }
}
static void wil_disconnect_worker(struct work_struct *work)
@@ -82,7 +133,9 @@ static void wil_disconnect_worker(struct work_struct *work)
struct wil6210_priv *wil = container_of(work,
struct wil6210_priv, disconnect_worker);
+ mutex_lock(&wil->mutex);
_wil6210_disconnect(wil, NULL);
+ mutex_unlock(&wil->mutex);
}
static void wil_connect_timer_fn(ulong x)
@@ -97,12 +150,55 @@ static void wil_connect_timer_fn(ulong x)
schedule_work(&wil->disconnect_worker);
}
+static void wil_fw_error_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work,
+ struct wil6210_priv, fw_error_worker);
+ struct wireless_dev *wdev = wil->wdev;
+
+ wil_dbg_misc(wil, "fw error worker\n");
+
+ if (no_fw_recovery)
+ return;
+
+ mutex_lock(&wil->mutex);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_MONITOR:
+ wil_info(wil, "fw error recovery started...\n");
+ wil_reset(wil);
+
+ /* need to re-allocate Rx ring after reset */
+ wil_rx_init(wil);
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ /* recovery in these modes is done by upper layers */
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&wil->mutex);
+}
+
+static int wil_find_free_vring(struct wil6210_priv *wil)
+{
+ int i;
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ if (!wil->vring_tx[i].va)
+ return i;
+ }
+ return -EINVAL;
+}
+
static void wil_connect_worker(struct work_struct *work)
{
int rc;
struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
connect_worker);
int cid = wil->pending_connect_cid;
+ int ringid = wil_find_free_vring(wil);
if (cid < 0) {
wil_err(wil, "No connection pending\n");
@@ -111,16 +207,22 @@ static void wil_connect_worker(struct work_struct *work)
wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid);
- rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE, cid, 0);
+ rc = wil_vring_init_tx(wil, ringid, WIL6210_TX_RING_SIZE, cid, 0);
wil->pending_connect_cid = -1;
- if (rc == 0)
+ if (rc == 0) {
+ wil->sta[cid].status = wil_sta_connected;
wil_link_on(wil);
+ } else {
+ wil->sta[cid].status = wil_sta_unused;
+ }
}
int wil_priv_init(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "%s()\n", __func__);
+ memset(wil->sta, 0, sizeof(wil->sta));
+
mutex_init(&wil->mutex);
mutex_init(&wil->wmi_mutex);
@@ -132,6 +234,7 @@ int wil_priv_init(struct wil6210_priv *wil)
INIT_WORK(&wil->connect_worker, wil_connect_worker);
INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
+ INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
INIT_LIST_HEAD(&wil->pending_wmi_ev);
spin_lock_init(&wil->wmi_ev_lock);
@@ -158,7 +261,10 @@ void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
void wil_priv_deinit(struct wil6210_priv *wil)
{
cancel_work_sync(&wil->disconnect_worker);
+ cancel_work_sync(&wil->fw_error_worker);
+ mutex_lock(&wil->mutex);
wil6210_disconnect(wil, NULL);
+ mutex_unlock(&wil->mutex);
wmi_event_flush(wil);
destroy_workqueue(wil->wmi_wq_conn);
destroy_workqueue(wil->wmi_wq);
@@ -166,40 +272,78 @@ void wil_priv_deinit(struct wil6210_priv *wil)
static void wil_target_reset(struct wil6210_priv *wil)
{
+ int delay = 0;
+ u32 hw_state;
+ u32 rev_id;
+
wil_dbg_misc(wil, "Resetting...\n");
+ /* register read */
+#define R(a) ioread32(wil->csr + HOSTADDR(a))
/* register write */
#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
/* register set = read, OR, write */
-#define S(a, v) iowrite32(ioread32(wil->csr + HOSTADDR(a)) | v, \
- wil->csr + HOSTADDR(a))
+#define S(a, v) W(a, R(a) | v)
+ /* register clear = read, AND with inverted, write */
+#define C(a, v) W(a, R(a) & ~v)
+ wil->hw_version = R(RGF_USER_FW_REV_ID);
+ rev_id = wil->hw_version & 0xff;
/* hpal_perst_from_pad_src_n_mask */
S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6));
/* car_perst_rst_src_n_mask */
S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7));
+ wmb(); /* order is important here */
W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */
W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
+ wmb(); /* order is important here */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
+ wmb(); /* order is important here */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+ wmb(); /* order is important here */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+ if (rev_id == 1) {
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+ } else {
+ W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
+ }
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+ wmb(); /* order is important here */
+
+ /* wait until device ready */
+ do {
+ msleep(1);
+ hw_state = R(RGF_USER_HW_MACHINE_STATE);
+ if (delay++ > 100) {
+ wil_err(wil, "Reset not completed, hw_state 0x%08x\n",
+ hw_state);
+ return;
+ }
+ } while (hw_state != HW_MACHINE_BOOT_DONE);
+
+ if (rev_id == 2)
+ W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8));
+
+ C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+ wmb(); /* order is important here */
- wil_dbg_misc(wil, "Reset completed\n");
+ wil_dbg_misc(wil, "Reset completed in %d ms\n", delay);
+#undef R
#undef W
#undef S
+#undef C
}
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -234,11 +378,24 @@ int wil_reset(struct wil6210_priv *wil)
{
int rc;
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL);
+ wil->status = 0; /* prevent NAPI from being scheduled */
+ if (test_bit(wil_status_napi_en, &wil->status)) {
+ napi_synchronize(&wil->napi_rx);
+ }
+
+ if (wil->scan_request) {
+ wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
+ wil->scan_request);
+ cfg80211_scan_done(wil->scan_request, true);
+ wil->scan_request = NULL;
+ }
+
wil6210_disable_irq(wil);
- wil->status = 0;
wmi_event_flush(wil);
@@ -248,6 +405,8 @@ int wil_reset(struct wil6210_priv *wil)
/* TODO: put MAC in reset */
wil_target_reset(wil);
+ wil_rx_fini(wil);
+
/* init after reset */
wil->pending_connect_cid = -1;
reinit_completion(&wil->wmi_ready);
@@ -261,6 +420,11 @@ int wil_reset(struct wil6210_priv *wil)
return rc;
}
+void wil_fw_error_recovery(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "starting fw error recovery\n");
+ schedule_work(&wil->fw_error_worker);
+}
void wil_link_on(struct wil6210_priv *wil)
{
@@ -288,6 +452,8 @@ static int __wil_up(struct wil6210_priv *wil)
struct wireless_dev *wdev = wil->wdev;
int rc;
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
rc = wil_reset(wil);
if (rc)
return rc;
@@ -329,6 +495,7 @@ static int __wil_up(struct wil6210_priv *wil)
napi_enable(&wil->napi_rx);
napi_enable(&wil->napi_tx);
+ set_bit(wil_status_napi_en, &wil->status);
return 0;
}
@@ -346,6 +513,9 @@ int wil_up(struct wil6210_priv *wil)
static int __wil_down(struct wil6210_priv *wil)
{
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
+ clear_bit(wil_status_napi_en, &wil->status);
napi_disable(&wil->napi_rx);
napi_disable(&wil->napi_tx);
@@ -370,3 +540,19 @@ int wil_down(struct wil6210_priv *wil)
return rc;
}
+
+int wil_find_cid(struct wil6210_priv *wil, const u8 *mac)
+{
+ int i;
+ int rc = -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ if ((wil->sta[i].status != wil_sta_unused) &&
+ ether_addr_equal(wil->sta[i].addr, mac)) {
+ rc = i;
+ break;
+ }
+ }
+
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 717178f09aa8..fdcaeb820e75 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -127,8 +127,9 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
ndev->netdev_ops = &wil_netdev_ops;
ndev->ieee80211_ptr = wdev;
- ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
- ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_SG | NETIF_F_GRO;
+ ndev->features |= ndev->hw_features;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index eeceab39cda2..f1e1bb338d68 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -41,39 +41,41 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
switch (use_msi) {
case 3:
case 1:
+ wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
+ break;
case 0:
+ wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
break;
default:
- wil_err(wil, "Invalid use_msi=%d, default to 1\n",
- use_msi);
+ wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
use_msi = 1;
}
- wil->n_msi = use_msi;
- if (wil->n_msi) {
- wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
- rc = pci_enable_msi_block(pdev, wil->n_msi);
- if (rc && (wil->n_msi == 3)) {
- wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
- wil->n_msi = 1;
- rc = pci_enable_msi_block(pdev, wil->n_msi);
- }
- if (rc) {
- wil_err(wil, "pci_enable_msi failed, use INTx\n");
- wil->n_msi = 0;
- }
- } else {
- wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
+
+ if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ use_msi = 1;
+ }
+
+ if (use_msi == 1 && pci_enable_msi(pdev)) {
+ wil_err(wil, "pci_enable_msi failed, use INTx\n");
+ use_msi = 0;
}
+ wil->n_msi = use_msi;
+
rc = wil6210_init_irq(wil, pdev->irq);
if (rc)
goto stop_master;
/* need reset here to obtain MAC */
+ mutex_lock(&wil->mutex);
rc = wil_reset(wil);
+ mutex_unlock(&wil->mutex);
if (rc)
goto release_irq;
+ wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
+
return 0;
release_irq:
@@ -151,6 +153,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, wil);
wil->pdev = pdev;
+ wil6210_clear_irq(wil);
/* FW should raise IRQ when ready */
rc = wil_if_pcie_enable(wil);
if (rc) {
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
new file mode 100644
index 000000000000..d04629fe053f
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -0,0 +1,177 @@
+#include "wil6210.h"
+#include "txrx.h"
+
+#define SEQ_MODULO 0x1000
+#define SEQ_MASK 0xfff
+
+static inline int seq_less(u16 sq1, u16 sq2)
+{
+ return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
+}
+
+static inline u16 seq_inc(u16 sq)
+{
+ return (sq + 1) & SEQ_MASK;
+}
+
+static inline u16 seq_sub(u16 sq1, u16 sq2)
+{
+ return (sq1 - sq2) & SEQ_MASK;
+}
+
+static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
+{
+ return seq_sub(seq, r->ssn) % r->buf_size;
+}
+
+static void wil_release_reorder_frame(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r,
+ int index)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct sk_buff *skb = r->reorder_buf[index];
+
+ if (!skb)
+ goto no_frame;
+
+ /* release the frame from the reorder ring buffer */
+ r->stored_mpdu_num--;
+ r->reorder_buf[index] = NULL;
+ wil_netif_rx_any(skb, ndev);
+
+no_frame:
+ r->head_seq_num = seq_inc(r->head_seq_num);
+}
+
+static void wil_release_reorder_frames(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r,
+ u16 hseq)
+{
+ int index;
+
+ while (seq_less(r->head_seq_num, hseq)) {
+ index = reorder_index(r, r->head_seq_num);
+ wil_release_reorder_frame(wil, r, index);
+ }
+}
+
+static void wil_reorder_release(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r)
+{
+ int index = reorder_index(r, r->head_seq_num);
+
+ while (r->reorder_buf[index]) {
+ wil_release_reorder_frame(wil, r, index);
+ index = reorder_index(r, r->head_seq_num);
+ }
+}
+
+void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ int tid = wil_rxdesc_tid(d);
+ int cid = wil_rxdesc_cid(d);
+ int mid = wil_rxdesc_mid(d);
+ u16 seq = wil_rxdesc_seq(d);
+ struct wil_sta_info *sta = &wil->sta[cid];
+ struct wil_tid_ampdu_rx *r = sta->tid_rx[tid];
+ u16 hseq;
+ int index;
+
+ wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x\n",
+ mid, cid, tid, seq);
+
+ if (!r) {
+ wil_netif_rx_any(skb, ndev);
+ return;
+ }
+
+ hseq = r->head_seq_num;
+
+ spin_lock(&r->reorder_lock);
+
+ /* frame with out of date sequence number */
+ if (seq_less(seq, r->head_seq_num)) {
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ /*
+ * If frame the sequence number exceeds our buffering window
+ * size release some previous frames to make room for this one.
+ */
+ if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
+ hseq = seq_inc(seq_sub(seq, r->buf_size));
+ /* release stored frames up to new head to stack */
+ wil_release_reorder_frames(wil, r, hseq);
+ }
+
+ /* Now the new frame is always in the range of the reordering buffer */
+
+ index = reorder_index(r, seq);
+
+ /* check if we already stored this frame */
+ if (r->reorder_buf[index]) {
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ /*
+ * If the current MPDU is in the right order and nothing else
+ * is stored we can process it directly, no need to buffer it.
+ * If it is first but there's something stored, we may be able
+ * to release frames after this one.
+ */
+ if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
+ r->head_seq_num = seq_inc(r->head_seq_num);
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+
+ /* put the frame in the reordering buffer */
+ r->reorder_buf[index] = skb;
+ r->reorder_time[index] = jiffies;
+ r->stored_mpdu_num++;
+ wil_reorder_release(wil, r);
+
+out:
+ spin_unlock(&r->reorder_lock);
+}
+
+struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
+ int size, u16 ssn)
+{
+ struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ r->reorder_buf =
+ kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
+ r->reorder_time =
+ kcalloc(size, sizeof(unsigned long), GFP_KERNEL);
+ if (!r->reorder_buf || !r->reorder_time) {
+ kfree(r->reorder_buf);
+ kfree(r->reorder_time);
+ kfree(r);
+ return NULL;
+ }
+
+ spin_lock_init(&r->reorder_lock);
+ r->ssn = ssn;
+ r->head_seq_num = ssn;
+ r->buf_size = size;
+ r->stored_mpdu_num = 0;
+ return r;
+}
+
+void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r)
+{
+ if (!r)
+ return;
+ wil_release_reorder_frames(wil, r, r->head_seq_num + r->buf_size);
+ kfree(r->reorder_buf);
+ kfree(r->reorder_time);
+ kfree(r);
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 0b0975d88b43..c8c547457eb4 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -104,6 +104,23 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
return 0;
}
+static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
+ struct wil_ctx *ctx)
+{
+ dma_addr_t pa = wil_desc_addr(&d->dma.addr);
+ u16 dmalen = le16_to_cpu(d->dma.length);
+ switch (ctx->mapped_as) {
+ case wil_mapped_as_single:
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ case wil_mapped_as_page:
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
int tx)
{
@@ -122,15 +139,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
ctx = &vring->ctx[vring->swtail];
*d = *_d;
- pa = wil_desc_addr(&d->dma.addr);
- dmalen = le16_to_cpu(d->dma.length);
- if (vring->ctx[vring->swtail].mapped_as_page) {
- dma_unmap_page(dev, pa, dmalen,
- DMA_TO_DEVICE);
- } else {
- dma_unmap_single(dev, pa, dmalen,
- DMA_TO_DEVICE);
- }
+ wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
vring->swtail = wil_vring_next_tail(vring);
@@ -344,6 +353,9 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
u16 dmalen;
u8 ftype;
u8 ds_bits;
+ int cid;
+ struct wil_net_stats *stats;
+
BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
@@ -383,8 +395,10 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
-
- wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
+ cid = wil_rxdesc_cid(d);
+ stats = &wil->sta[cid].stats;
+ stats->last_mcs_rx = wil_rxdesc_mcs(d);
+ wil->stats.last_mcs_rx = stats->last_mcs_rx;
/* use radiotap header only if required */
if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
@@ -472,21 +486,28 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
* Pass Rx packet to the netif. Update statistics.
* Called in softirq context (NAPI poll).
*/
-static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
+void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
{
- int rc;
+ gro_result_t rc;
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
unsigned int len = skb->len;
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ int cid = wil_rxdesc_cid(d);
+ struct wil_net_stats *stats = &wil->sta[cid].stats;
skb_orphan(skb);
- rc = netif_receive_skb(skb);
+ rc = napi_gro_receive(&wil->napi_rx, skb);
- if (likely(rc == NET_RX_SUCCESS)) {
+ if (unlikely(rc == GRO_DROP)) {
+ ndev->stats.rx_dropped++;
+ stats->rx_dropped++;
+ wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
+ } else {
ndev->stats.rx_packets++;
+ stats->rx_packets++;
ndev->stats.rx_bytes += len;
-
- } else {
- ndev->stats.rx_dropped++;
+ stats->rx_bytes += len;
}
}
@@ -515,12 +536,18 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
-
+ wil_netif_rx_any(skb, ndev);
} else {
+ struct ethhdr *eth = (void *)skb->data;
+
skb->protocol = eth_type_trans(skb, ndev);
+
+ if (is_unicast_ether_addr(eth->h_dest))
+ wil_rx_reorder(wil, skb);
+ else
+ wil_netif_rx_any(skb, ndev);
}
- wil_netif_rx_any(skb, ndev);
}
wil_rx_refill(wil, v->size);
}
@@ -530,6 +557,11 @@ int wil_rx_init(struct wil6210_priv *wil)
struct vring *vring = &wil->vring_rx;
int rc;
+ if (vring->va) {
+ wil_err(wil, "Rx ring already allocated\n");
+ return -EINVAL;
+ }
+
vring->size = WIL6210_RX_RING_SIZE;
rc = wil_vring_alloc(wil, vring);
if (rc)
@@ -570,7 +602,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
.ring_size = cpu_to_le16(size),
},
.ringid = id,
- .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
+ .cidxtid = mk_cidxtid(cid, tid),
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
.mac_ctrl = 0,
.to_resolution = 0,
@@ -586,6 +618,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
struct wmi_vring_cfg_done_event cmd;
} __packed reply;
struct vring *vring = &wil->vring_tx[id];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[id];
if (vring->va) {
wil_err(wil, "Tx ring [%d] already allocated\n", id);
@@ -593,11 +626,15 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
goto out;
}
+ memset(txdata, 0, sizeof(*txdata));
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
goto out;
+ wil->vring2cid_tid[id][0] = cid;
+ wil->vring2cid_tid[id][1] = tid;
+
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
@@ -613,6 +650,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
}
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+ txdata->enabled = 1;
+
return 0;
out_free:
wil_vring_free(wil, vring, 1);
@@ -625,23 +664,116 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
{
struct vring *vring = &wil->vring_tx[id];
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
if (!vring->va)
return;
+ /* make sure NAPI won't touch this vring */
+ wil->vring_tx_data[id].enabled = 0;
+ if (test_bit(wil_status_napi_en, &wil->status))
+ napi_synchronize(&wil->napi_tx);
+
wil_vring_free(wil, vring, 1);
}
static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
struct sk_buff *skb)
{
- struct vring *v = &wil->vring_tx[0];
+ int i;
+ struct ethhdr *eth = (void *)skb->data;
+ int cid = wil_find_cid(wil, eth->h_dest);
+
+ if (cid < 0)
+ return NULL;
- if (v->va)
- return v;
+ if (!wil->sta[cid].data_port_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ return NULL;
+
+ /* TODO: fix for multiple TID */
+ for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
+ if (wil->vring2cid_tid[i][0] == cid) {
+ struct vring *v = &wil->vring_tx[i];
+ wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
+ __func__, eth->h_dest, i);
+ if (v->va) {
+ return v;
+ } else {
+ wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
+ return NULL;
+ }
+ }
+ }
return NULL;
}
+static void wil_set_da_for_vring(struct wil6210_priv *wil,
+ struct sk_buff *skb, int vring_index)
+{
+ struct ethhdr *eth = (void *)skb->data;
+ int cid = wil->vring2cid_tid[vring_index][0];
+ memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN);
+}
+
+static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb);
+/*
+ * Find 1-st vring and return it; set dest address for this vring in skb
+ * duplicate skb and send it to other active vrings
+ */
+static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct vring *v, *v2;
+ struct sk_buff *skb2;
+ int i;
+ u8 cid;
+
+ /* find 1-st vring eligible for data */
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ v = &wil->vring_tx[i];
+ if (!v->va)
+ continue;
+
+ cid = wil->vring2cid_tid[i][0];
+ if (!wil->sta[cid].data_port_open)
+ continue;
+
+ goto found;
+ }
+
+ wil_err(wil, "Tx while no vrings active?\n");
+
+ return NULL;
+
+found:
+ wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
+ wil_set_da_for_vring(wil, skb, i);
+
+ /* find other active vrings and duplicate skb for each */
+ for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
+ v2 = &wil->vring_tx[i];
+ if (!v2->va)
+ continue;
+ cid = wil->vring2cid_tid[i][0];
+ if (!wil->sta[cid].data_port_open)
+ continue;
+
+ skb2 = skb_copy(skb, GFP_ATOMIC);
+ if (skb2) {
+ wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
+ wil_set_da_for_vring(wil, skb2, i);
+ wil_tx_vring(wil, v2, skb2);
+ } else {
+ wil_err(wil, "skb_copy failed\n");
+ }
+ }
+
+ return v;
+}
+
static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
int vring_index)
{
@@ -667,6 +799,13 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
return 0;
}
+static inline
+void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
+{
+ d->mac.d[2] |= ((nr_frags + 1) <<
+ MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+}
+
static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
struct vring_tx_desc *d,
struct sk_buff *skb)
@@ -731,8 +870,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_dbg_txrx(wil, "%s()\n", __func__);
- if (avail < vring->size/8)
- netif_tx_stop_all_queues(wil_to_ndev(wil));
if (avail < 1 + nr_frags) {
wil_err(wil, "Tx ring full. No space for %d fragments\n",
1 + nr_frags);
@@ -740,9 +877,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
}
_d = &(vring->va[i].tx);
- /* FIXME FW can accept only unicast frames for the peer */
- memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
-
pa = dma_map_single(dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
@@ -753,6 +887,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
if (unlikely(dma_mapping_error(dev, pa)))
return -EINVAL;
+ vring->ctx[i].mapped_as = wil_mapped_as_single;
/* 1-st segment */
wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
/* Process TCP/UDP checksum offloading */
@@ -762,8 +897,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
goto dma_error;
}
- d->mac.d[2] |= ((nr_frags + 1) <<
- MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+ vring->ctx[i].nr_frags = nr_frags;
+ wil_tx_desc_set_nr_frags(d, nr_frags);
if (nr_frags)
*_d = *d;
@@ -778,8 +913,13 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa)))
goto dma_error;
+ vring->ctx[i].mapped_as = wil_mapped_as_page;
wil_tx_desc_map(d, pa, len, vring_index);
- vring->ctx[i].mapped_as_page = 1;
+ /* no need to check return code -
+ * if it succeeded for 1-st descriptor,
+ * it will succeed here too
+ */
+ wil_tx_desc_offload_cksum_set(wil, d, skb);
*_d = *d;
}
/* for the last seg only */
@@ -808,7 +948,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
/* unmap what we have mapped */
nr_frags = f + 1; /* frags mapped + one for skb head */
for (f = 0; f < nr_frags; f++) {
- u16 dmalen;
struct wil_ctx *ctx;
i = (swhead + f) % vring->size;
@@ -816,12 +955,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
_d = &(vring->va[i].tx);
*d = *_d;
_d->dma.status = TX_DMA_STATUS_DU;
- pa = wil_desc_addr(&d->dma.addr);
- dmalen = le16_to_cpu(d->dma.length);
- if (ctx->mapped_as_page)
- dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
- else
- dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+ wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
@@ -836,12 +970,17 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
+ struct ethhdr *eth = (void *)skb->data;
struct vring *vring;
+ static bool pr_once_fw;
int rc;
wil_dbg_txrx(wil, "%s()\n", __func__);
if (!test_bit(wil_status_fwready, &wil->status)) {
- wil_err(wil, "FW not ready\n");
+ if (!pr_once_fw) {
+ wil_err(wil, "FW not ready\n");
+ pr_once_fw = true;
+ }
goto drop;
}
if (!test_bit(wil_status_fwconnected, &wil->status)) {
@@ -852,16 +991,25 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
wil_err(wil, "Xmit in monitor mode not supported\n");
goto drop;
}
+ pr_once_fw = false;
/* find vring */
- vring = wil_find_tx_vring(wil, skb);
+ if (is_unicast_ether_addr(eth->h_dest)) {
+ vring = wil_find_tx_vring(wil, skb);
+ } else {
+ vring = wil_tx_bcast(wil, skb);
+ }
if (!vring) {
- wil_err(wil, "No Tx VRING available\n");
+ wil_err(wil, "No Tx VRING found for %pM\n", eth->h_dest);
goto drop;
}
/* set up vring entry */
rc = wil_tx_vring(wil, vring, skb);
+ /* do we still have enough room in the vring? */
+ if (wil_vring_avail_tx(vring) < vring->size/8)
+ netif_tx_stop_all_queues(wil_to_ndev(wil));
+
switch (rc) {
case 0:
/* statistics will be updated on the tx_complete */
@@ -891,64 +1039,82 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
struct net_device *ndev = wil_to_ndev(wil);
struct device *dev = wil_to_dev(wil);
struct vring *vring = &wil->vring_tx[ringid];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
int done = 0;
+ int cid = wil->vring2cid_tid[ringid][0];
+ struct wil_net_stats *stats = &wil->sta[cid].stats;
+ volatile struct vring_tx_desc *_d;
if (!vring->va) {
wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
return 0;
}
+ if (!txdata->enabled) {
+ wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
+ return 0;
+ }
+
wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
while (!wil_vring_is_empty(vring)) {
- volatile struct vring_tx_desc *_d =
- &vring->va[vring->swtail].tx;
- struct vring_tx_desc dd, *d = &dd;
- dma_addr_t pa;
- u16 dmalen;
+ int new_swtail;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
- struct sk_buff *skb = ctx->skb;
-
- *d = *_d;
+ /**
+ * For the fragmented skb, HW will set DU bit only for the
+ * last fragment. look for it
+ */
+ int lf = (vring->swtail + ctx->nr_frags) % vring->size;
+ /* TODO: check we are not past head */
- if (!(d->dma.status & TX_DMA_STATUS_DU))
+ _d = &vring->va[lf].tx;
+ if (!(_d->dma.status & TX_DMA_STATUS_DU))
break;
- dmalen = le16_to_cpu(d->dma.length);
- trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
- d->dma.error);
- wil_dbg_txrx(wil,
- "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
- vring->swtail, dmalen, d->dma.status,
- d->dma.error);
- wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
- (const void *)d, sizeof(*d), false);
-
- pa = wil_desc_addr(&d->dma.addr);
- if (ctx->mapped_as_page)
- dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
- else
- dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
-
- if (skb) {
- if (d->dma.error == 0) {
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- } else {
- ndev->stats.tx_errors++;
- }
+ new_swtail = (lf + 1) % vring->size;
+ while (vring->swtail != new_swtail) {
+ struct vring_tx_desc dd, *d = &dd;
+ u16 dmalen;
+ struct wil_ctx *ctx = &vring->ctx[vring->swtail];
+ struct sk_buff *skb = ctx->skb;
+ _d = &vring->va[vring->swtail].tx;
+
+ *d = *_d;
+
+ dmalen = le16_to_cpu(d->dma.length);
+ trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
+ d->dma.error);
+ wil_dbg_txrx(wil,
+ "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
+ vring->swtail, dmalen, d->dma.status,
+ d->dma.error);
+ wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ wil_txdesc_unmap(dev, d, ctx);
+
+ if (skb) {
+ if (d->dma.error == 0) {
+ ndev->stats.tx_packets++;
+ stats->tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ stats->tx_bytes += skb->len;
+ } else {
+ ndev->stats.tx_errors++;
+ stats->tx_errors++;
+ }
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ /* There is no need to touch HW descriptor:
+ * - ststus bit TX_DMA_STATUS_DU is set by design,
+ * so hardware will not try to process this desc.,
+ * - rest of descriptor will be initialized on Tx.
+ */
+ vring->swtail = wil_vring_next_tail(vring);
+ done++;
}
- memset(ctx, 0, sizeof(*ctx));
- /*
- * There is no need to touch HW descriptor:
- * - ststus bit TX_DMA_STATUS_DU is set by design,
- * so hardware will not try to process this desc.,
- * - rest of descriptor will be initialized on Tx.
- */
- vring->swtail = wil_vring_next_tail(vring);
- done++;
}
if (wil_vring_avail_tx(vring) > vring->size/4)
netif_tx_wake_all_queues(wil_to_ndev(wil));
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index b3828279204c..bc5706a4f007 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -436,4 +436,11 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
return (void *)skb->cb;
}
+void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
+void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
+struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
+ int size, u16 ssn);
+void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r);
+
#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 1f91eaf95bbe..2a2dec75f026 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -74,23 +74,21 @@ struct RGF_ICR {
} __packed;
/* registers - FW addresses */
-#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
-#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
- #define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
-#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
-#define RGF_USER_MAC_CPU_0 (0x8801fc)
+#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
+ #define HW_MACHINE_BOOT_DONE (0x3fffffd)
#define RGF_USER_USER_CPU_0 (0x8801e0)
+#define RGF_USER_MAC_CPU_0 (0x8801fc)
+#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
+#define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */
+#define RGF_USER_CLKS_CTL_0 (0x880abc)
+ #define BIT_USER_CLKS_RST_PWGD BIT(11) /* reset on "power good" */
#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
-
-#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
-#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
-#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
- #define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
- #define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
- #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
+#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
+#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
+ #define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
#define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
@@ -105,13 +103,22 @@ struct RGF_ICR {
/* Interrupt moderation control */
#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
#define RGF_DMA_ITR_CNT_DATA (0x881c60)
-#define RGF_DMA_ITR_CNT_CRL (0x881C64)
+#define RGF_DMA_ITR_CNT_CRL (0x881c64)
#define BIT_DMA_ITR_CNT_CRL_EN BIT(0)
#define BIT_DMA_ITR_CNT_CRL_EXT_TICK BIT(1)
#define BIT_DMA_ITR_CNT_CRL_FOREVER BIT(2)
#define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
#define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
+#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
+ #define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
+ #define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
+ #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
+
+#define RGF_PCIE_LOS_COUNTER_CTL (0x882dc4)
+
/* popular locations */
#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
@@ -125,6 +132,31 @@ struct RGF_ICR {
/* Hardware definitions end */
+/**
+ * mk_cidxtid - construct @cidxtid field
+ * @cid: CID value
+ * @tid: TID value
+ *
+ * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
+ */
+static inline u8 mk_cidxtid(u8 cid, u8 tid)
+{
+ return ((tid & 0xf) << 4) | (cid & 0xf);
+}
+
+/**
+ * parse_cidxtid - parse @cidxtid field
+ * @cid: store CID value here
+ * @tid: store TID value here
+ *
+ * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
+ */
+static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
+{
+ *cid = cidxtid & 0xf;
+ *tid = (cidxtid >> 4) & 0xf;
+}
+
struct wil6210_mbox_ring {
u32 base;
u16 entry_size; /* max. size of mbox entry, incl. all headers */
@@ -184,12 +216,19 @@ struct pending_wmi_event {
} __packed event;
};
+enum { /* for wil_ctx.mapped_as */
+ wil_mapped_as_none = 0,
+ wil_mapped_as_single = 1,
+ wil_mapped_as_page = 2,
+};
+
/**
* struct wil_ctx - software context for Vring descriptor
*/
struct wil_ctx {
struct sk_buff *skb;
- u8 mapped_as_page:1;
+ u8 nr_frags;
+ u8 mapped_as;
};
union vring_desc;
@@ -204,6 +243,14 @@ struct vring {
struct wil_ctx *ctx; /* ctx[size] - software context */
};
+/**
+ * Additional data for Tx Vring
+ */
+struct vring_tx_data {
+ int enabled;
+
+};
+
enum { /* for wil6210_priv.status */
wil_status_fwready = 0,
wil_status_fwconnecting,
@@ -211,10 +258,51 @@ enum { /* for wil6210_priv.status */
wil_status_dontscan,
wil_status_reset_done,
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+ wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
};
struct pci_dev;
+/**
+ * struct tid_ampdu_rx - TID aggregation information (Rx).
+ *
+ * @reorder_buf: buffer to reorder incoming aggregated MPDUs
+ * @reorder_time: jiffies when skb was added
+ * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
+ * @reorder_timer: releases expired frames from the reorder buffer.
+ * @last_rx: jiffies of last rx activity
+ * @head_seq_num: head sequence number in reordering buffer.
+ * @stored_mpdu_num: number of MPDUs in reordering buffer
+ * @ssn: Starting Sequence Number expected to be aggregated.
+ * @buf_size: buffer size for incoming A-MPDUs
+ * @timeout: reset timer value (in TUs).
+ * @dialog_token: dialog token for aggregation session
+ * @rcu_head: RCU head used for freeing this struct
+ * @reorder_lock: serializes access to reorder buffer, see below.
+ *
+ * This structure's lifetime is managed by RCU, assignments to
+ * the array holding it must hold the aggregation mutex.
+ *
+ * The @reorder_lock is used to protect the members of this
+ * struct, except for @timeout, @buf_size and @dialog_token,
+ * which are constant across the lifetime of the struct (the
+ * dialog token being used only for debugging).
+ */
+struct wil_tid_ampdu_rx {
+ spinlock_t reorder_lock; /* see above */
+ struct sk_buff **reorder_buf;
+ unsigned long *reorder_time;
+ struct timer_list session_timer;
+ struct timer_list reorder_timer;
+ unsigned long last_rx;
+ u16 head_seq_num;
+ u16 stored_mpdu_num;
+ u16 ssn;
+ u16 buf_size;
+ u16 timeout;
+ u8 dialog_token;
+};
+
struct wil6210_stats {
u64 tsf;
u32 snr;
@@ -226,6 +314,43 @@ struct wil6210_stats {
u16 peer_tx_sector;
};
+enum wil_sta_status {
+ wil_sta_unused = 0,
+ wil_sta_conn_pending = 1,
+ wil_sta_connected = 2,
+};
+
+#define WIL_STA_TID_NUM (16)
+
+struct wil_net_stats {
+ unsigned long rx_packets;
+ unsigned long tx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long tx_errors;
+ unsigned long rx_dropped;
+ u16 last_mcs_rx;
+};
+
+/**
+ * struct wil_sta_info - data for peer
+ *
+ * Peer identified by its CID (connection ID)
+ * NIC performs beam forming for each peer;
+ * if no beam forming done, frame exchange is not
+ * possible.
+ */
+struct wil_sta_info {
+ u8 addr[ETH_ALEN];
+ enum wil_sta_status status;
+ struct wil_net_stats stats;
+ bool data_port_open; /* can send any data, not only EAPOL */
+ /* Rx BACK */
+ struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
+ unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+ unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+};
+
struct wil6210_priv {
struct pci_dev *pdev;
int n_msi;
@@ -233,6 +358,7 @@ struct wil6210_priv {
void __iomem *csr;
ulong status;
u32 fw_version;
+ u32 hw_version;
u8 n_mids; /* number of additional MIDs as reported by FW */
/* profile */
u32 monitor_flags;
@@ -253,6 +379,7 @@ struct wil6210_priv {
struct workqueue_struct *wmi_wq_conn; /* for connect worker */
struct work_struct connect_worker;
struct work_struct disconnect_worker;
+ struct work_struct fw_error_worker; /* for FW error recovery */
struct timer_list connect_timer;
int pending_connect_cid;
struct list_head pending_wmi_ev;
@@ -267,7 +394,9 @@ struct wil6210_priv {
/* DMA related */
struct vring vring_rx;
struct vring vring_tx[WIL6210_MAX_TX_RINGS];
- u8 dst_addr[WIL6210_MAX_TX_RINGS][ETH_ALEN];
+ struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
+ u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
+ struct wil_sta_info sta[WIL6210_MAX_CID];
/* scan */
struct cfg80211_scan_request *scan_request;
@@ -329,11 +458,13 @@ void wil_if_remove(struct wil6210_priv *wil);
int wil_priv_init(struct wil6210_priv *wil);
void wil_priv_deinit(struct wil6210_priv *wil);
int wil_reset(struct wil6210_priv *wil);
+void wil_fw_error_recovery(struct wil6210_priv *wil);
void wil_link_on(struct wil6210_priv *wil);
void wil_link_off(struct wil6210_priv *wil);
int wil_up(struct wil6210_priv *wil);
int wil_down(struct wil6210_priv *wil);
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
+int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
@@ -357,8 +488,11 @@ int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
+int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason);
+void wil6210_clear_irq(struct wil6210_priv *wil);
int wil6210_init_irq(struct wil6210_priv *wil, int irq);
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
void wil6210_disable_irq(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 063963ee422a..2ba56eef0c45 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -307,14 +307,14 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
u32 freq = ieee80211_channel_to_frequency(ch_no,
IEEE80211_BAND_60GHZ);
struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
- /* TODO convert LE to CPU */
- s32 signal = 0; /* TODO */
+ s32 signal = data->info.sqi;
__le16 fc = rx_mgmt_frame->frame_control;
u32 d_len = le32_to_cpu(data->info.len);
u16 d_status = le16_to_cpu(data->info.status);
- wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
- data->info.channel, data->info.mcs, data->info.snr);
+ wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
+ data->info.channel, data->info.mcs, data->info.snr,
+ data->info.sqi);
wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
@@ -384,6 +384,11 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
evt->assoc_req_len, evt->assoc_resp_len);
return;
}
+ if (evt->cid >= WIL6210_MAX_CID) {
+ wil_err(wil, "Connect CID invalid : %d\n", evt->cid);
+ return;
+ }
+
ch = evt->channel + 1;
wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
evt->bssid, ch, evt->cid);
@@ -439,7 +444,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
/* FIXME FW can transmit only ucast frames to peer */
/* FIXME real ring_id instead of hard coded 0 */
- memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
+ memcpy(wil->sta[evt->cid].addr, evt->bssid, ETH_ALEN);
+ wil->sta[evt->cid].status = wil_sta_conn_pending;
wil->pending_connect_cid = evt->cid;
queue_work(wil->wmi_wq_conn, &wil->connect_worker);
@@ -456,7 +462,9 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
wil->sinfo_gen++;
+ mutex_lock(&wil->mutex);
wil6210_disconnect(wil, evt->bssid);
+ mutex_unlock(&wil->mutex);
}
static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
@@ -476,11 +484,11 @@ static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n"
- "BF status 0x%08x SNR 0x%08x\n"
+ "BF status 0x%08x SNR 0x%08x SQI %d%%\n"
"Tx Tpt %d goodput %d Rx goodput %d\n"
"Sectors(rx:tx) my %d:%d peer %d:%d\n",
wil->stats.bf_mcs, wil->stats.tsf, evt->status,
- wil->stats.snr, le32_to_cpu(evt->tx_tpt),
+ wil->stats.snr, evt->sqi, le32_to_cpu(evt->tx_tpt),
le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
wil->stats.my_rx_sector, wil->stats.my_tx_sector,
wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
@@ -499,10 +507,16 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
int sz = eapol_len + ETH_HLEN;
struct sk_buff *skb;
struct ethhdr *eth;
+ int cid;
+ struct wil_net_stats *stats = NULL;
wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len,
evt->src_mac);
+ cid = wil_find_cid(wil, evt->src_mac);
+ if (cid >= 0)
+ stats = &wil->sta[cid].stats;
+
if (eapol_len > 196) { /* TODO: revisit size limit */
wil_err(wil, "EAPOL too large\n");
return;
@@ -513,6 +527,7 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
wil_err(wil, "Failed to allocate skb\n");
return;
}
+
eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
@@ -521,9 +536,15 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
skb->protocol = eth_type_trans(skb, ndev);
if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += skb->len;
+ ndev->stats.rx_bytes += sz;
+ if (stats) {
+ stats->rx_packets++;
+ stats->rx_bytes += sz;
+ }
} else {
ndev->stats.rx_dropped++;
+ if (stats)
+ stats->rx_dropped++;
}
}
@@ -531,9 +552,16 @@ static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
{
struct net_device *ndev = wil_to_ndev(wil);
struct wmi_data_port_open_event *evt = d;
+ u8 cid = evt->cid;
- wil_dbg_wmi(wil, "Link UP for CID %d\n", evt->cid);
+ wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
+ if (cid >= ARRAY_SIZE(wil->sta)) {
+ wil_err(wil, "Link UP for invalid CID %d\n", cid);
+ return;
+ }
+
+ wil->sta[cid].data_port_open = true;
netif_carrier_on(ndev);
}
@@ -541,10 +569,17 @@ static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
{
struct net_device *ndev = wil_to_ndev(wil);
struct wmi_wbe_link_down_event *evt = d;
+ u8 cid = evt->cid;
wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
- evt->cid, le32_to_cpu(evt->reason));
+ cid, le32_to_cpu(evt->reason));
+
+ if (cid >= ARRAY_SIZE(wil->sta)) {
+ wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
+ return;
+ }
+ wil->sta[cid].data_port_open = false;
netif_carrier_off(ndev);
}
@@ -552,10 +587,42 @@ static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
int len)
{
struct wmi_vring_ba_status_event *evt = d;
+ struct wil_sta_info *sta;
+ uint i, cid;
+
+ /* TODO: use Rx BA status, not Tx one */
wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n",
- evt->ringid, evt->status ? "N/A" : "OK", evt->agg_wsize,
- __le16_to_cpu(evt->ba_timeout));
+ evt->ringid,
+ evt->status == WMI_BA_AGREED ? "OK" : "N/A",
+ evt->agg_wsize, __le16_to_cpu(evt->ba_timeout));
+
+ if (evt->ringid >= WIL6210_MAX_TX_RINGS) {
+ wil_err(wil, "invalid ring id %d\n", evt->ringid);
+ return;
+ }
+
+ cid = wil->vring2cid_tid[evt->ringid][0];
+ if (cid >= WIL6210_MAX_CID) {
+ wil_err(wil, "invalid CID %d for vring %d\n", cid, evt->ringid);
+ return;
+ }
+
+ sta = &wil->sta[cid];
+ if (sta->status == wil_sta_unused) {
+ wil_err(wil, "CID %d unused\n", cid);
+ return;
+ }
+
+ wil_dbg_wmi(wil, "BACK for CID %d %pM\n", cid, sta->addr);
+ for (i = 0; i < WIL_STA_TID_NUM; i++) {
+ struct wil_tid_ampdu_rx *r = sta->tid_rx[i];
+ sta->tid_rx[i] = NULL;
+ wil_tid_ampdu_rx_free(wil, r);
+ if ((evt->status == WMI_BA_AGREED) && evt->agg_wsize)
+ sta->tid_rx[i] = wil_tid_ampdu_rx_alloc(wil,
+ evt->agg_wsize, 0);
+ }
}
static const struct {
@@ -893,6 +960,38 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
return rc;
}
+/**
+ * wmi_rxon - turn radio on/off
+ * @on: turn on if true, off otherwise
+ *
+ * Only switch radio. Channel should be set separately.
+ * No timeout for rxon - radio turned on forever unless some other call
+ * turns it off
+ */
+int wmi_rxon(struct wil6210_priv *wil, bool on)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_listen_started_event evt;
+ } __packed reply;
+
+ wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off");
+
+ if (on) {
+ rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
+ WMI_LISTEN_STARTED_EVENTID,
+ &reply, sizeof(reply), 100);
+ if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS))
+ rc = -EINVAL;
+ } else {
+ rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0,
+ WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 20);
+ }
+
+ return rc;
+}
+
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
{
struct wireless_dev *wdev = wil->wdev;
@@ -906,6 +1005,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
},
.mid = 0, /* TODO - what is it? */
.decap_trans_type = WMI_DECAP_TYPE_802_3,
+ .reorder_type = WMI_RX_SW_REORDER,
};
struct {
struct wil6210_mbox_hdr_wmi wmi;
@@ -973,6 +1073,18 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r)
return 0;
}
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
+{
+ struct wmi_disconnect_sta_cmd cmd = {
+ .disconnect_reason = cpu_to_le16(reason),
+ };
+ memcpy(cmd.dst_mac, mac, ETH_ALEN);
+
+ wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
+
+ return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
+}
+
void wmi_event_flush(struct wil6210_priv *wil)
{
struct pending_wmi_event *evt, *t;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index bf93ea859f2d..1fe41af81a59 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -67,7 +67,7 @@
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
-#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
#include "atmel.h"
#define DRIVER_MAJOR 0
@@ -2273,7 +2273,7 @@ static int atmel_set_freq(struct net_device *dev,
/* Hack to fall through... */
fwrq->e = 0;
- fwrq->m = ieee80211_freq_to_dsss_chan(f);
+ fwrq->m = ieee80211_frequency_to_channel(f);
}
/* Setting by channel number */
if ((fwrq->m > 1000) || (fwrq->e > 0))
@@ -2434,8 +2434,8 @@ static int atmel_get_range(struct net_device *dev,
range->freq[k].i = i; /* List index */
/* Values in MHz -> * 10^5 * 10 */
- range->freq[k].m = (ieee80211_dsss_chan_to_freq(i) *
- 100000);
+ range->freq[k].m = 100000 *
+ ieee80211_channel_to_frequency(i, IEEE80211_BAND_2GHZ);
range->freq[k++].e = 1;
}
range->num_frequency = k;
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 51ff0b198d0a..088d544ec63f 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -92,7 +92,7 @@ config B43_SDIO
# if we can do DMA.
config B43_BCMA_PIO
bool
- depends on B43_BCMA
+ depends on B43 && B43_BCMA
select BCMA_BLOCKIO
default y
diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h
index 822aad8842f4..50517b801cb4 100644
--- a/drivers/net/wireless/b43/debugfs.h
+++ b/drivers/net/wireless/b43/debugfs.h
@@ -86,7 +86,7 @@ void b43_debugfs_log_txstat(struct b43_wldev *dev,
static inline bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature)
{
- return 0;
+ return false;
}
static inline void b43_debugfs_init(void)
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c75237eb55a1..69fc3d65531a 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1549,7 +1549,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
- len = min((size_t) dev->wl->current_beacon->len,
+ len = min_t(size_t, dev->wl->current_beacon->len,
0x200 - sizeof(struct b43_plcp_hdr6));
rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index abac25ee958d..f476fc337d64 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -58,41 +58,6 @@ enum b43_verbosity {
#endif
};
-
-/* Lightweight function to convert a frequency (in Mhz) to a channel number. */
-static inline u8 b43_freq_to_channel_5ghz(int freq)
-{
- return ((freq - 5000) / 5);
-}
-static inline u8 b43_freq_to_channel_2ghz(int freq)
-{
- u8 channel;
-
- if (freq == 2484)
- channel = 14;
- else
- channel = (freq - 2407) / 5;
-
- return channel;
-}
-
-/* Lightweight function to convert a channel number to a frequency (in Mhz). */
-static inline int b43_channel_to_freq_5ghz(u8 channel)
-{
- return (5000 + (5 * channel));
-}
-static inline int b43_channel_to_freq_2ghz(u8 channel)
-{
- int freq;
-
- if (channel == 14)
- freq = 2484;
- else
- freq = 2407 + (5 * channel);
-
- return freq;
-}
-
static inline int b43_is_cck_rate(int rate)
{
return (rate == B43_CCK_RATE_1MB ||
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index f01676ac481b..dbaa51890198 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -133,9 +133,9 @@ void b43_phy_exit(struct b43_wldev *dev)
bool b43_has_hardware_pctl(struct b43_wldev *dev)
{
if (!dev->phy.hardware_power_control)
- return 0;
+ return false;
if (!dev->phy.ops->supports_hwpctl)
- return 0;
+ return false;
return dev->phy.ops->supports_hwpctl(dev);
}
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index a73ff8c9deb5..a4ff5e2a42b9 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -637,7 +637,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
- return 0;
+ return false;
b43_piorx_write32(q, B43_PIO8_RXCTL,
B43_PIO8_RXCTL_FRAMERDY);
for (i = 0; i < 10; i++) {
@@ -651,7 +651,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
- return 0;
+ return false;
b43_piorx_write16(q, B43_PIO_RXCTL,
B43_PIO_RXCTL_FRAMERDY);
for (i = 0; i < 10; i++) {
@@ -662,7 +662,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
}
}
b43dbg(q->dev->wl, "PIO RX timed out\n");
- return 1;
+ return true;
data_ready:
/* Get the preamble (RX header) */
@@ -759,7 +759,7 @@ data_ready:
b43_rx(q->dev, skb, rxhdr);
- return 1;
+ return true;
rx_error:
if (err_msg)
@@ -769,7 +769,7 @@ rx_error:
else
b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
- return 1;
+ return true;
}
void b43_pio_rx(struct b43_pio_rxqueue *q)
diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c
index 8e8431d4eb0c..3190493bd07f 100644
--- a/drivers/net/wireless/b43/sysfs.c
+++ b/drivers/net/wireless/b43/sysfs.c
@@ -40,7 +40,7 @@ static int get_integer(const char *buf, size_t count)
if (count == 0)
goto out;
- count = min(count, (size_t) 10);
+ count = min_t(size_t, count, 10);
memcpy(tmp, buf, count);
ret = simple_strtol(tmp, NULL, 10);
out:
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 50e5ddb12fb3..31adb8cf0291 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -337,7 +337,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
/* iv16 */
memcpy(txhdr->iv + 10, ((u8 *) wlhdr) + wlhdr_len, 3);
} else {
- iv_len = min((size_t) info->control.hw_key->iv_len,
+ iv_len = min_t(size_t, info->control.hw_key->iv_len,
ARRAY_SIZE(txhdr->iv));
memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len);
}
@@ -806,7 +806,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
B43_WARN_ON(1);
/* FIXME: We don't really know which value the "chanid" contains.
* So the following assignment might be wrong. */
- status.freq = b43_channel_to_freq_5ghz(chanid);
+ status.freq =
+ ieee80211_channel_to_frequency(chanid, status.band);
break;
case B43_PHYTYPE_G:
status.band = IEEE80211_BAND_2GHZ;
@@ -819,13 +820,12 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
case B43_PHYTYPE_HT:
/* chanid is the SHM channel cookie. Which is the plain
* channel number in b43. */
- if (chanstat & B43_RX_CHAN_5GHZ) {
+ if (chanstat & B43_RX_CHAN_5GHZ)
status.band = IEEE80211_BAND_5GHZ;
- status.freq = b43_channel_to_freq_5ghz(chanid);
- } else {
+ else
status.band = IEEE80211_BAND_2GHZ;
- status.freq = b43_channel_to_freq_2ghz(chanid);
- }
+ status.freq =
+ ieee80211_channel_to_frequency(chanid, status.band);
break;
default:
B43_WARN_ON(1);
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 349c77605231..1aec2146a2bf 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -978,7 +978,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
- len = min((size_t)dev->wl->current_beacon->len,
+ len = min_t(size_t, dev->wl->current_beacon->len,
0x200 - sizeof(struct b43legacy_plcp_hdr6));
rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
@@ -1155,7 +1155,7 @@ static void b43legacy_write_probe_resp_template(struct b43legacy_wldev *dev,
b43legacy_write_probe_resp_plcp(dev, 0x350, size,
&b43legacy_b_ratetable[3]);
- size = min((size_t)size,
+ size = min_t(size_t, size,
0x200 - sizeof(struct b43legacy_plcp_hdr6));
b43legacy_write_template_common(dev, probe_resp_data,
size, ram_offset,
diff --git a/drivers/net/wireless/b43legacy/sysfs.c b/drivers/net/wireless/b43legacy/sysfs.c
index 57f8b089767c..2a1da15c913b 100644
--- a/drivers/net/wireless/b43legacy/sysfs.c
+++ b/drivers/net/wireless/b43legacy/sysfs.c
@@ -42,7 +42,7 @@ static int get_integer(const char *buf, size_t count)
if (count == 0)
goto out;
- count = min(count, (size_t)10);
+ count = min_t(size_t, count, 10);
memcpy(tmp, buf, count);
ret = simple_strtol(tmp, NULL, 10);
out:
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 86588c9ff0f2..34bf3f0b729f 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -254,7 +254,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
B43legacy_TX4_MAC_KEYALG_SHIFT) &
B43legacy_TX4_MAC_KEYALG;
wlhdr_len = ieee80211_hdrlen(wlhdr->frame_control);
- iv_len = min((size_t)info->control.hw_key->iv_len,
+ iv_len = min_t(size_t, info->control.hw_key->iv_len,
ARRAY_SIZE(txhdr->iv));
memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
} else {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 57cddee03252..1d2ceac3a221 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -24,6 +24,7 @@ ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
wl_cfg80211.o \
+ chip.o \
fwil.o \
fweh.o \
fwsignal.o \
@@ -36,8 +37,7 @@ brcmfmac-objs += \
btcoex.o
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
dhd_sdio.o \
- bcmsdh.o \
- sdio_chip.o
+ bcmsdh.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
brcmfmac-$(CONFIG_BRCMDBG) += \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index fa35b23bbaa7..a16e644e7c08 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -43,7 +43,6 @@
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "sdio_host.h"
-#include "sdio_chip.h"
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
@@ -54,6 +53,12 @@
/* Maximum milliseconds to wait for F2 to come up */
#define SDIO_WAIT_F2RDY 3000
+#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
+#define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
+
+static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
+module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
+MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
{
@@ -264,26 +269,17 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
break;
}
- if (ret) {
- /*
- * SleepCSR register access can fail when
- * waking up the device so reduce this noise
- * in the logs.
- */
- if (addr != SBSDIO_FUNC1_SLEEPCSR)
- brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
- write ? "write" : "read", fn, addr, ret);
- else
- brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
- write ? "write" : "read", fn, addr, ret);
- }
+ if (ret)
+ brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+ write ? "write" : "read", fn, addr, ret);
+
return ret;
}
static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
u8 regsz, void *data, bool write)
{
- u8 func_num;
+ u8 func;
s32 retry = 0;
int ret;
@@ -297,9 +293,9 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
* The rest: function 1 silicon backplane core registers
*/
if ((addr & ~REG_F0_REG_MASK) == 0)
- func_num = SDIO_FUNC_0;
+ func = SDIO_FUNC_0;
else
- func_num = SDIO_FUNC_1;
+ func = SDIO_FUNC_1;
do {
if (!write)
@@ -307,16 +303,26 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
/* for retry wait for 1 ms till bus get settled down */
if (retry)
usleep_range(1000, 2000);
- ret = brcmf_sdiod_request_data(sdiodev, func_num, addr, regsz,
+ ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
data, write);
} while (ret != 0 && ret != -ENOMEDIUM &&
retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
if (ret == -ENOMEDIUM)
brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
- else if (ret != 0)
- brcmf_err("failed with %d\n", ret);
-
+ else if (ret != 0) {
+ /*
+ * SleepCSR register access can fail when
+ * waking up the device so reduce this noise
+ * in the logs.
+ */
+ if (addr != SBSDIO_FUNC1_SLEEPCSR)
+ brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
+ write ? "write" : "read", func, addr, ret);
+ else
+ brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+ write ? "write" : "read", func, addr, ret);
+ }
return ret;
}
@@ -488,7 +494,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
struct mmc_request mmc_req;
struct mmc_command mmc_cmd;
struct mmc_data mmc_dat;
- struct sg_table st;
struct scatterlist *sgl;
int ret = 0;
@@ -533,16 +538,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
pkt_offset = 0;
pkt_next = target_list->next;
- if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto exit;
- }
-
memset(&mmc_req, 0, sizeof(struct mmc_request));
memset(&mmc_cmd, 0, sizeof(struct mmc_command));
memset(&mmc_dat, 0, sizeof(struct mmc_data));
- mmc_dat.sg = st.sgl;
+ mmc_dat.sg = sdiodev->sgtable.sgl;
mmc_dat.blksz = func_blk_sz;
mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
mmc_cmd.opcode = SD_IO_RW_EXTENDED;
@@ -558,7 +558,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
while (seg_sz) {
req_sz = 0;
sg_cnt = 0;
- sgl = st.sgl;
+ sgl = sdiodev->sgtable.sgl;
/* prep sg table */
while (pkt_next != (struct sk_buff *)target_list) {
pkt_data = pkt_next->data + pkt_offset;
@@ -640,7 +640,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
}
exit:
- sg_free_table(&st);
+ sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
brcmu_pkt_buf_free_skb(pkt_next);
@@ -827,7 +827,7 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
}
if (!write)
memcpy(data, pkt->data, dsize);
- skb_trim(pkt, dsize);
+ skb_trim(pkt, 0);
/* Adjust for next transfer (if any) */
size -= dsize;
@@ -864,6 +864,29 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
return 0;
}
+static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
+{
+ uint nents;
+ int err;
+
+ if (!sdiodev->sg_support)
+ return;
+
+ nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, brcmf_sdiod_txglomsz);
+ nents += (nents >> 4) + 1;
+
+ WARN_ON(nents > sdiodev->max_segment_count);
+
+ brcmf_dbg(TRACE, "nents=%d\n", nents);
+ err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
+ if (err < 0) {
+ brcmf_err("allocation failed: disable scatter-gather");
+ sdiodev->sg_support = false;
+ }
+
+ sdiodev->txglomsz = brcmf_sdiod_txglomsz;
+}
+
static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
{
if (sdiodev->bus) {
@@ -881,6 +904,7 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
sdio_disable_func(sdiodev->func[1]);
sdio_release_host(sdiodev->func[1]);
+ sg_free_table(&sdiodev->sgtable);
sdiodev->sbwad = 0;
return 0;
@@ -936,6 +960,11 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
SG_MAX_SINGLE_ALLOC);
sdiodev->max_segment_size = host->max_seg_size;
+ /* allocate scatter-gather table. sg support
+ * will be disabled upon allocation failure.
+ */
+ brcmf_sdiod_sgtable_alloc(sdiodev);
+
/* try to attach to the target device */
sdiodev->bus = brcmf_sdio_probe(sdiodev);
if (!sdiodev->bus) {
@@ -960,6 +989,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
SDIO_DEVICE_ID_BROADCOM_4335_4339)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4354)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -1073,9 +1103,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
int ret = 0;
- brcmf_dbg(SDIO, "\n");
-
- atomic_set(&sdiodev->suspend, true);
+ brcmf_dbg(SDIO, "Enter\n");
sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
@@ -1083,9 +1111,12 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
return -EINVAL;
}
+ atomic_set(&sdiodev->suspend, true);
+
ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
if (ret) {
brcmf_err("Failed to set pm_flags\n");
+ atomic_set(&sdiodev->suspend, false);
return ret;
}
@@ -1099,6 +1130,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ brcmf_dbg(SDIO, "Enter\n");
brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
atomic_set(&sdiodev->suspend, false);
return 0;
@@ -1115,14 +1147,15 @@ static struct sdio_driver brcmf_sdmmc_driver = {
.remove = brcmf_ops_sdio_remove,
.name = BRCMFMAC_SDIO_PDATA_NAME,
.id_table = brcmf_sdmmc_ids,
-#ifdef CONFIG_PM_SLEEP
.drv = {
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
.pm = &brcmf_sdio_pm_ops,
- },
#endif /* CONFIG_PM_SLEEP */
+ },
};
-static int brcmf_sdio_pd_probe(struct platform_device *pdev)
+static int __init brcmf_sdio_pd_probe(struct platform_device *pdev)
{
brcmf_dbg(SDIO, "Enter\n");
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
new file mode 100644
index 000000000000..df130ef53d1c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -0,0 +1,1034 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/ssb/ssb_regs.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
+
+#include <defs.h>
+#include <soc.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <chipcommon.h>
+#include "dhd_dbg.h"
+#include "chip.h"
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB 0
+#define SOCI_AI 1
+
+/* PL-368 DMP definitions */
+#define DMP_DESC_TYPE_MSK 0x0000000F
+#define DMP_DESC_EMPTY 0x00000000
+#define DMP_DESC_VALID 0x00000001
+#define DMP_DESC_COMPONENT 0x00000001
+#define DMP_DESC_MASTER_PORT 0x00000003
+#define DMP_DESC_ADDRESS 0x00000005
+#define DMP_DESC_ADDRSIZE_GT32 0x00000008
+#define DMP_DESC_EOT 0x0000000F
+
+#define DMP_COMP_DESIGNER 0xFFF00000
+#define DMP_COMP_DESIGNER_S 20
+#define DMP_COMP_PARTNUM 0x000FFF00
+#define DMP_COMP_PARTNUM_S 8
+#define DMP_COMP_CLASS 0x000000F0
+#define DMP_COMP_CLASS_S 4
+#define DMP_COMP_REVISION 0xFF000000
+#define DMP_COMP_REVISION_S 24
+#define DMP_COMP_NUM_SWRAP 0x00F80000
+#define DMP_COMP_NUM_SWRAP_S 19
+#define DMP_COMP_NUM_MWRAP 0x0007C000
+#define DMP_COMP_NUM_MWRAP_S 14
+#define DMP_COMP_NUM_SPORT 0x00003E00
+#define DMP_COMP_NUM_SPORT_S 9
+#define DMP_COMP_NUM_MPORT 0x000001F0
+#define DMP_COMP_NUM_MPORT_S 4
+
+#define DMP_MASTER_PORT_UID 0x0000FF00
+#define DMP_MASTER_PORT_UID_S 8
+#define DMP_MASTER_PORT_NUM 0x000000F0
+#define DMP_MASTER_PORT_NUM_S 4
+
+#define DMP_SLAVE_ADDR_BASE 0xFFFFF000
+#define DMP_SLAVE_ADDR_BASE_S 12
+#define DMP_SLAVE_PORT_NUM 0x00000F00
+#define DMP_SLAVE_PORT_NUM_S 8
+#define DMP_SLAVE_TYPE 0x000000C0
+#define DMP_SLAVE_TYPE_S 6
+#define DMP_SLAVE_TYPE_SLAVE 0
+#define DMP_SLAVE_TYPE_BRIDGE 1
+#define DMP_SLAVE_TYPE_SWRAP 2
+#define DMP_SLAVE_TYPE_MWRAP 3
+#define DMP_SLAVE_SIZE_TYPE 0x00000030
+#define DMP_SLAVE_SIZE_TYPE_S 4
+#define DMP_SLAVE_SIZE_4K 0
+#define DMP_SLAVE_SIZE_8K 1
+#define DMP_SLAVE_SIZE_16K 2
+#define DMP_SLAVE_SIZE_DESC 3
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+
+/* ARM CR4 core specific control flag bits */
+#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
+
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
+#define D11_BCMA_IOCTL_PHYRESET 0x0008
+
+/* chip core base & ramsize */
+/* bcm4329 */
+/* SDIO device core, ID 0x829 */
+#define BCM4329_CORE_BUS_BASE 0x18011000
+/* internal memory core, ID 0x80e */
+#define BCM4329_CORE_SOCRAM_BASE 0x18003000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM4329_CORE_ARM_BASE 0x18002000
+#define BCM4329_RAMSIZE 0x48000
+
+/* bcm43143 */
+/* SDIO device core */
+#define BCM43143_CORE_BUS_BASE 0x18002000
+/* internal memory core */
+#define BCM43143_CORE_SOCRAM_BASE 0x18004000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM43143_CORE_ARM_BASE 0x18003000
+#define BCM43143_RAMSIZE 0x70000
+
+#define CORE_SB(base, field) \
+ (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+ ((sbidh) & SSB_IDHIGH_RCLO))
+
+struct sbconfig {
+ u32 PAD[2];
+ u32 sbipsflag; /* initiator port ocp slave flag */
+ u32 PAD[3];
+ u32 sbtpsflag; /* target port ocp slave flag */
+ u32 PAD[11];
+ u32 sbtmerrloga; /* (sonics >= 2.3) */
+ u32 PAD;
+ u32 sbtmerrlog; /* (sonics >= 2.3) */
+ u32 PAD[3];
+ u32 sbadmatch3; /* address match3 */
+ u32 PAD;
+ u32 sbadmatch2; /* address match2 */
+ u32 PAD;
+ u32 sbadmatch1; /* address match1 */
+ u32 PAD[7];
+ u32 sbimstate; /* initiator agent state */
+ u32 sbintvec; /* interrupt mask */
+ u32 sbtmstatelow; /* target state */
+ u32 sbtmstatehigh; /* target state */
+ u32 sbbwa0; /* bandwidth allocation table0 */
+ u32 PAD;
+ u32 sbimconfiglow; /* initiator configuration */
+ u32 sbimconfighigh; /* initiator configuration */
+ u32 sbadmatch0; /* address match0 */
+ u32 PAD;
+ u32 sbtmconfiglow; /* target configuration */
+ u32 sbtmconfighigh; /* target configuration */
+ u32 sbbconfig; /* broadcast configuration */
+ u32 PAD;
+ u32 sbbstate; /* broadcast state */
+ u32 PAD[3];
+ u32 sbactcnfg; /* activate configuration */
+ u32 PAD[3];
+ u32 sbflagst; /* current sbflags */
+ u32 PAD[3];
+ u32 sbidlow; /* identification */
+ u32 sbidhigh; /* identification */
+};
+
+struct brcmf_core_priv {
+ struct brcmf_core pub;
+ u32 wrapbase;
+ struct list_head list;
+ struct brcmf_chip_priv *chip;
+};
+
+/* ARM CR4 core specific control flag bits */
+#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
+
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
+#define D11_BCMA_IOCTL_PHYRESET 0x0008
+
+struct brcmf_chip_priv {
+ struct brcmf_chip pub;
+ const struct brcmf_buscore_ops *ops;
+ void *ctx;
+ /* assured first core is chipcommon, second core is buscore */
+ struct list_head cores;
+ u16 num_cores;
+
+ bool (*iscoreup)(struct brcmf_core_priv *core);
+ void (*coredisable)(struct brcmf_core_priv *core, u32 prereset,
+ u32 reset);
+ void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset,
+ u32 postreset);
+};
+
+static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci,
+ struct brcmf_core *core)
+{
+ u32 regdata;
+
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh));
+ core->rev = SBCOREREV(regdata);
+}
+
+static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+ u32 address;
+
+ ci = core->chip;
+ address = CORE_SB(core->pub.base, sbtmstatelow);
+ regdata = ci->ops->read32(ci->ctx, address);
+ regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+ SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+ return SSB_TMSLOW_CLOCK == regdata;
+}
+
+static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+ bool ret;
+
+ ci = core->chip;
+ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+ ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+
+ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
+ ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+
+ return ret;
+}
+
+static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core,
+ u32 prereset, u32 reset)
+{
+ struct brcmf_chip_priv *ci;
+ u32 val, base;
+
+ ci = core->chip;
+ base = core->pub.base;
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ if (val & SSB_TMSLOW_RESET)
+ return;
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ if ((val & SSB_TMSLOW_CLOCK) != 0) {
+ /*
+ * set target reject and spin until busy is clear
+ * (preserve core-specific bits)
+ */
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ val | SSB_TMSLOW_REJECT);
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+ SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh))
+ & SSB_TMSHIGH_BUSY), 100000);
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
+ if (val & SSB_TMSHIGH_BUSY)
+ brcmf_err("core state still busy\n");
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
+ if (val & SSB_IDLOW_INITIATOR) {
+ val = ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate));
+ val |= SSB_IMSTATE_REJECT;
+ ci->ops->write32(ci->ctx,
+ CORE_SB(base, sbimstate), val);
+ val = ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate));
+ udelay(1);
+ SPINWAIT((ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate)) &
+ SSB_IMSTATE_BUSY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val);
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(10);
+
+ /* clear the initiator reject bit */
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
+ if (val & SSB_IDLOW_INITIATOR) {
+ val = ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate));
+ val &= ~SSB_IMSTATE_REJECT;
+ ci->ops->write32(ci->ctx,
+ CORE_SB(base, sbimstate), val);
+ }
+ }
+
+ /* leave reset and reject asserted */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+ udelay(1);
+}
+
+static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
+ u32 prereset, u32 reset)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+
+ ci = core->chip;
+
+ /* if core is already in reset, just return */
+ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
+ if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+ return;
+
+ /* configure reset */
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+ prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+
+ /* put in reset */
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL,
+ BCMA_RESET_CTL_RESET);
+ usleep_range(10, 20);
+
+ /* wait till reset is 1 */
+ SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
+ BCMA_RESET_CTL_RESET, 300);
+
+ /* in-reset configure */
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+ reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+}
+
+static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset,
+ u32 reset, u32 postreset)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+ u32 base;
+
+ ci = core->chip;
+ base = core->pub.base;
+ /*
+ * Must do the disable sequence first to work for
+ * arbitrary current core state.
+ */
+ brcmf_chip_sb_coredisable(core, 0, 0);
+
+ /*
+ * Now do the initialization sequence.
+ * set reset while enabling the clock and
+ * forcing them on throughout the core
+ */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_RESET);
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+
+ /* clear any serror */
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
+ if (regdata & SSB_TMSHIGH_SERR)
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0);
+
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate));
+ if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) {
+ regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO);
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata);
+ }
+
+ /* clear reset and allow it to propagate throughout the core */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+
+ /* leave clock enabled */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ SSB_TMSLOW_CLOCK);
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+}
+
+static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
+ u32 reset, u32 postreset)
+{
+ struct brcmf_chip_priv *ci;
+ int count;
+
+ ci = core->chip;
+
+ /* must disable first to work for arbitrary current core state */
+ brcmf_chip_ai_coredisable(core, prereset, reset);
+
+ count = 0;
+ while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
+ BCMA_RESET_CTL_RESET) {
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0);
+ count++;
+ if (count > 50)
+ break;
+ usleep_range(40, 60);
+ }
+
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+ postreset | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+}
+
+static char *brcmf_chip_name(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
+ u16 coreid, u32 base,
+ u32 wrapbase)
+{
+ struct brcmf_core_priv *core;
+
+ core = kzalloc(sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return ERR_PTR(-ENOMEM);
+
+ core->pub.id = coreid;
+ core->pub.base = base;
+ core->chip = ci;
+ core->wrapbase = wrapbase;
+
+ list_add_tail(&core->list, &ci->cores);
+ return &core->pub;
+}
+
+#ifdef DEBUG
+/* safety check for chipinfo */
+static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core_priv *core;
+ bool need_socram = false;
+ bool has_socram = false;
+ int idx = 1;
+
+ list_for_each_entry(core, &ci->cores, list) {
+ brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n",
+ idx++, core->pub.id, core->pub.rev, core->pub.base,
+ core->wrapbase);
+
+ switch (core->pub.id) {
+ case BCMA_CORE_ARM_CM3:
+ need_socram = true;
+ break;
+ case BCMA_CORE_INTERNAL_MEM:
+ has_socram = true;
+ break;
+ case BCMA_CORE_ARM_CR4:
+ if (ci->pub.rambase == 0) {
+ brcmf_err("RAM base not provided with ARM CR4 core\n");
+ return -ENOMEM;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* check RAM core presence for ARM CM3 core */
+ if (need_socram && !has_socram) {
+ brcmf_err("RAM core not provided with ARM CM3 core\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+#else /* DEBUG */
+static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+{
+ return 0;
+}
+#endif
+
+static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
+{
+ switch (ci->pub.chip) {
+ case BCM4329_CHIP_ID:
+ ci->pub.ramsize = BCM4329_RAMSIZE;
+ break;
+ case BCM43143_CHIP_ID:
+ ci->pub.ramsize = BCM43143_RAMSIZE;
+ break;
+ case BCM43241_CHIP_ID:
+ ci->pub.ramsize = 0x90000;
+ break;
+ case BCM4330_CHIP_ID:
+ ci->pub.ramsize = 0x48000;
+ break;
+ case BCM4334_CHIP_ID:
+ ci->pub.ramsize = 0x80000;
+ break;
+ case BCM4335_CHIP_ID:
+ ci->pub.ramsize = 0xc0000;
+ ci->pub.rambase = 0x180000;
+ break;
+ case BCM43362_CHIP_ID:
+ ci->pub.ramsize = 0x3c000;
+ break;
+ case BCM4339_CHIP_ID:
+ case BCM4354_CHIP_ID:
+ ci->pub.ramsize = 0xc0000;
+ ci->pub.rambase = 0x180000;
+ break;
+ default:
+ brcmf_err("unknown chip: %s\n", ci->pub.name);
+ break;
+ }
+}
+
+static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
+ u8 *type)
+{
+ u32 val;
+
+ /* read next descriptor */
+ val = ci->ops->read32(ci->ctx, *eromaddr);
+ *eromaddr += 4;
+
+ if (!type)
+ return val;
+
+ /* determine descriptor type */
+ *type = (val & DMP_DESC_TYPE_MSK);
+ if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS)
+ *type = DMP_DESC_ADDRESS;
+
+ return val;
+}
+
+static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
+ u32 *regbase, u32 *wrapbase)
+{
+ u8 desc;
+ u32 val;
+ u8 mpnum = 0;
+ u8 stype, sztype, wraptype;
+
+ *regbase = 0;
+ *wrapbase = 0;
+
+ val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
+ if (desc == DMP_DESC_MASTER_PORT) {
+ mpnum = (val & DMP_MASTER_PORT_NUM) >> DMP_MASTER_PORT_NUM_S;
+ wraptype = DMP_SLAVE_TYPE_MWRAP;
+ } else if (desc == DMP_DESC_ADDRESS) {
+ /* revert erom address */
+ *eromaddr -= 4;
+ wraptype = DMP_SLAVE_TYPE_SWRAP;
+ } else {
+ *eromaddr -= 4;
+ return -EILSEQ;
+ }
+
+ do {
+ /* locate address descriptor */
+ do {
+ val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
+ /* unexpected table end */
+ if (desc == DMP_DESC_EOT) {
+ *eromaddr -= 4;
+ return -EFAULT;
+ }
+ } while (desc != DMP_DESC_ADDRESS);
+
+ /* skip upper 32-bit address descriptor */
+ if (val & DMP_DESC_ADDRSIZE_GT32)
+ brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+
+ sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S;
+
+ /* next size descriptor can be skipped */
+ if (sztype == DMP_SLAVE_SIZE_DESC) {
+ val = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+ /* skip upper size descriptor if present */
+ if (val & DMP_DESC_ADDRSIZE_GT32)
+ brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+ }
+
+ /* only look for 4K register regions */
+ if (sztype != DMP_SLAVE_SIZE_4K)
+ continue;
+
+ stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S;
+
+ /* only regular slave and wrapper */
+ if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE)
+ *regbase = val & DMP_SLAVE_ADDR_BASE;
+ if (*wrapbase == 0 && stype == wraptype)
+ *wrapbase = val & DMP_SLAVE_ADDR_BASE;
+ } while (*regbase == 0 || *wrapbase == 0);
+
+ return 0;
+}
+
+static
+int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core *core;
+ u32 eromaddr;
+ u8 desc_type = 0;
+ u32 val;
+ u16 id;
+ u8 nmp, nsp, nmw, nsw, rev;
+ u32 base, wrap;
+ int err;
+
+ eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr));
+
+ while (desc_type != DMP_DESC_EOT) {
+ val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
+ if (!(val & DMP_DESC_VALID))
+ continue;
+
+ if (desc_type == DMP_DESC_EMPTY)
+ continue;
+
+ /* need a component descriptor */
+ if (desc_type != DMP_DESC_COMPONENT)
+ continue;
+
+ id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S;
+
+ /* next descriptor must be component as well */
+ val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
+ if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT))
+ return -EFAULT;
+
+ /* only look at cores with master port(s) */
+ nmp = (val & DMP_COMP_NUM_MPORT) >> DMP_COMP_NUM_MPORT_S;
+ nsp = (val & DMP_COMP_NUM_SPORT) >> DMP_COMP_NUM_SPORT_S;
+ nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
+ nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
+ rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
+
+ /* need core with ports */
+ if (nmw + nsw == 0)
+ continue;
+
+ /* try to obtain register address info */
+ err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap);
+ if (err)
+ continue;
+
+ /* finally a core to be added */
+ core = brcmf_chip_add_core(ci, id, base, wrap);
+ if (IS_ERR(core))
+ return PTR_ERR(core);
+
+ core->rev = rev;
+ }
+
+ return 0;
+}
+
+static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core *core;
+ u32 regdata;
+ u32 socitype;
+
+ /* Get CC core rev
+ * Chipid is assume to be at offset 0 from SI_ENUM_BASE
+ * For different chiptypes or old sdio hosts w/o chipcommon,
+ * other ways of recognition should be added here.
+ */
+ regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid));
+ ci->pub.chip = regdata & CID_ID_MASK;
+ ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+ socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+
+ brcmf_chip_name(ci->pub.chip, ci->pub.name, sizeof(ci->pub.name));
+ brcmf_dbg(INFO, "found %s chip: BCM%s, rev=%d\n",
+ socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name,
+ ci->pub.chiprev);
+
+ if (socitype == SOCI_SB) {
+ if (ci->pub.chip != BCM4329_CHIP_ID) {
+ brcmf_err("SB chip is not supported\n");
+ return -ENODEV;
+ }
+ ci->iscoreup = brcmf_chip_sb_iscoreup;
+ ci->coredisable = brcmf_chip_sb_coredisable;
+ ci->resetcore = brcmf_chip_sb_resetcore;
+
+ core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON,
+ SI_ENUM_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV,
+ BCM4329_CORE_BUS_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM,
+ BCM4329_CORE_SOCRAM_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3,
+ BCM4329_CORE_ARM_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+
+ core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ } else if (socitype == SOCI_AI) {
+ ci->iscoreup = brcmf_chip_ai_iscoreup;
+ ci->coredisable = brcmf_chip_ai_coredisable;
+ ci->resetcore = brcmf_chip_ai_resetcore;
+
+ brcmf_chip_dmp_erom_scan(ci);
+ } else {
+ brcmf_err("chip backplane type %u is not supported\n",
+ socitype);
+ return -ENODEV;
+ }
+
+ brcmf_chip_get_raminfo(ci);
+
+ return brcmf_chip_cores_check(ci);
+}
+
+static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
+{
+ struct brcmf_core *core;
+ struct brcmf_core_priv *cr4;
+ u32 val;
+
+
+ core = brcmf_chip_get_core(&chip->pub, id);
+ if (!core)
+ return;
+
+ switch (id) {
+ case BCMA_CORE_ARM_CM3:
+ brcmf_chip_coredisable(core, 0, 0);
+ break;
+ case BCMA_CORE_ARM_CR4:
+ cr4 = container_of(core, struct brcmf_core_priv, pub);
+
+ /* clear all IOCTL bits except HALT bit */
+ val = chip->ops->read32(chip->ctx, cr4->wrapbase + BCMA_IOCTL);
+ val &= ARMCR4_BCMA_IOCTL_CPUHALT;
+ brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT,
+ ARMCR4_BCMA_IOCTL_CPUHALT);
+ break;
+ default:
+ brcmf_err("unknown id: %u\n", id);
+ break;
+ }
+}
+
+static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_chip *pub;
+ struct brcmf_core_priv *cc;
+ u32 base;
+ u32 val;
+ int ret = 0;
+
+ pub = &chip->pub;
+ cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
+ base = cc->pub.base;
+
+ /* get chipcommon capabilites */
+ pub->cc_caps = chip->ops->read32(chip->ctx,
+ CORE_CC_REG(base, capabilities));
+
+ /* get pmu caps & rev */
+ if (pub->cc_caps & CC_CAP_PMU) {
+ val = chip->ops->read32(chip->ctx,
+ CORE_CC_REG(base, pmucapabilities));
+ pub->pmurev = val & PCAP_REV_MASK;
+ pub->pmucaps = val;
+ }
+
+ brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n",
+ cc->pub.rev, pub->pmurev, pub->pmucaps);
+
+ /* execute bus core specific setup */
+ if (chip->ops->setup)
+ ret = chip->ops->setup(chip->ctx, pub);
+
+ /*
+ * Make sure any on-chip ARM is off (in case strapping is wrong),
+ * or downloaded code was already running.
+ */
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
+ return ret;
+}
+
+struct brcmf_chip *brcmf_chip_attach(void *ctx,
+ const struct brcmf_buscore_ops *ops)
+{
+ struct brcmf_chip_priv *chip;
+ int err = 0;
+
+ if (WARN_ON(!ops->read32))
+ err = -EINVAL;
+ if (WARN_ON(!ops->write32))
+ err = -EINVAL;
+ if (WARN_ON(!ops->prepare))
+ err = -EINVAL;
+ if (WARN_ON(!ops->exit_dl))
+ err = -EINVAL;
+ if (err < 0)
+ return ERR_PTR(-EINVAL);
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&chip->cores);
+ chip->num_cores = 0;
+ chip->ops = ops;
+ chip->ctx = ctx;
+
+ err = ops->prepare(ctx);
+ if (err < 0)
+ goto fail;
+
+ err = brcmf_chip_recognition(chip);
+ if (err < 0)
+ goto fail;
+
+ err = brcmf_chip_setup(chip);
+ if (err < 0)
+ goto fail;
+
+ return &chip->pub;
+
+fail:
+ brcmf_chip_detach(&chip->pub);
+ return ERR_PTR(err);
+}
+
+void brcmf_chip_detach(struct brcmf_chip *pub)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *core;
+ struct brcmf_core_priv *tmp;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ list_for_each_entry_safe(core, tmp, &chip->cores, list) {
+ list_del(&core->list);
+ kfree(core);
+ }
+ kfree(chip);
+}
+
+struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *core;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ list_for_each_entry(core, &chip->cores, list)
+ if (core->pub.id == coreid)
+ return &core->pub;
+
+ return NULL;
+}
+
+struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *cc;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
+ if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON))
+ return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON);
+ return &cc->pub;
+}
+
+bool brcmf_chip_iscoreup(struct brcmf_core *pub)
+{
+ struct brcmf_core_priv *core;
+
+ core = container_of(pub, struct brcmf_core_priv, pub);
+ return core->chip->iscoreup(core);
+}
+
+void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset)
+{
+ struct brcmf_core_priv *core;
+
+ core = container_of(pub, struct brcmf_core_priv, pub);
+ core->chip->coredisable(core, prereset, reset);
+}
+
+void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
+ u32 postreset)
+{
+ struct brcmf_core_priv *core;
+
+ core = container_of(pub, struct brcmf_core_priv, pub);
+ core->chip->resetcore(core, prereset, reset, postreset);
+}
+
+static void
+brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_core *core;
+
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+ brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN);
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
+ brcmf_chip_resetcore(core, 0, 0, 0);
+}
+
+static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_core *core;
+
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
+ if (!brcmf_chip_iscoreup(core)) {
+ brcmf_err("SOCRAM core is down after reset?\n");
+ return false;
+ }
+
+ chip->ops->exit_dl(chip->ctx, &chip->pub, 0);
+
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
+ brcmf_chip_resetcore(core, 0, 0, 0);
+
+ return true;
+}
+
+static inline void
+brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_core *core;
+
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
+
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+ brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN);
+}
+
+static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
+{
+ struct brcmf_core *core;
+
+ chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec);
+
+ /* restore ARM */
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
+ brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
+
+ return true;
+}
+
+void brcmf_chip_enter_download(struct brcmf_chip *pub)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core *arm;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
+ if (arm) {
+ brcmf_chip_cr4_enterdl(chip);
+ return;
+ }
+
+ brcmf_chip_cm3_enterdl(chip);
+}
+
+bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core *arm;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
+ if (arm)
+ return brcmf_chip_cr4_exitdl(chip, rstvec);
+
+ return brcmf_chip_cm3_exitdl(chip);
+}
+
+bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
+{
+ u32 base, addr, reg, pmu_cc3_mask = ~0;
+ struct brcmf_chip_priv *chip;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* old chips with PMU version less than 17 don't support save restore */
+ if (pub->pmurev < 17)
+ return false;
+
+ base = brcmf_chip_get_chipcommon(pub)->base;
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+
+ switch (pub->chip) {
+ case BCM4354_CHIP_ID:
+ /* explicitly check SR engine enable bit */
+ pmu_cc3_mask = BIT(2);
+ /* fall-through */
+ case BCM43241_CHIP_ID:
+ case BCM4335_CHIP_ID:
+ case BCM4339_CHIP_ID:
+ /* read PMU chipcontrol register 3 */
+ addr = CORE_CC_REG(base, chipcontrol_addr);
+ chip->ops->write32(chip->ctx, addr, 3);
+ addr = CORE_CC_REG(base, chipcontrol_data);
+ reg = chip->ops->read32(chip->ctx, addr);
+ return (reg & pmu_cc3_mask) != 0;
+ default:
+ addr = CORE_CC_REG(base, pmucapabilities_ext);
+ reg = chip->ops->read32(chip->ctx, addr);
+ if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
+ return false;
+
+ addr = CORE_CC_REG(base, retention_ctl);
+ reg = chip->ops->read32(chip->ctx, addr);
+ return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
+ PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
new file mode 100644
index 000000000000..c32908da90c8
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMF_CHIP_H
+#define BRCMF_CHIP_H
+
+#include <linux/types.h>
+
+#define CORE_CC_REG(base, field) \
+ (base + offsetof(struct chipcregs, field))
+
+/**
+ * struct brcmf_chip - chip level information.
+ *
+ * @chip: chip identifier.
+ * @chiprev: chip revision.
+ * @cc_caps: chipcommon core capabilities.
+ * @pmucaps: PMU capabilities.
+ * @pmurev: PMU revision.
+ * @rambase: RAM base address (only applicable for ARM CR4 chips).
+ * @ramsize: amount of RAM on chip.
+ * @name: string representation of the chip identifier.
+ */
+struct brcmf_chip {
+ u32 chip;
+ u32 chiprev;
+ u32 cc_caps;
+ u32 pmucaps;
+ u32 pmurev;
+ u32 rambase;
+ u32 ramsize;
+ char name[8];
+};
+
+/**
+ * struct brcmf_core - core related information.
+ *
+ * @id: core identifier.
+ * @rev: core revision.
+ * @base: base address of core register space.
+ */
+struct brcmf_core {
+ u16 id;
+ u16 rev;
+ u32 base;
+};
+
+/**
+ * struct brcmf_buscore_ops - buscore specific callbacks.
+ *
+ * @read32: read 32-bit value over bus.
+ * @write32: write 32-bit value over bus.
+ * @prepare: prepare bus for core configuration.
+ * @setup: bus-specific core setup.
+ * @exit_dl: exit download state.
+ * The callback should use the provided @rstvec when non-zero.
+ */
+struct brcmf_buscore_ops {
+ u32 (*read32)(void *ctx, u32 addr);
+ void (*write32)(void *ctx, u32 addr, u32 value);
+ int (*prepare)(void *ctx);
+ int (*setup)(void *ctx, struct brcmf_chip *chip);
+ void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
+};
+
+struct brcmf_chip *brcmf_chip_attach(void *ctx,
+ const struct brcmf_buscore_ops *ops);
+void brcmf_chip_detach(struct brcmf_chip *chip);
+struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
+struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
+bool brcmf_chip_iscoreup(struct brcmf_core *core);
+void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
+void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
+ u32 postreset);
+void brcmf_chip_enter_download(struct brcmf_chip *ci);
+bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
+bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
+
+#endif /* BRCMF_AXIDMP_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index d4d966beb840..7d28cd385092 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1040,12 +1040,12 @@ void brcmf_detach(struct device *dev)
brcmf_cfg80211_detach(drvr->config);
+ brcmf_fws_deinit(drvr);
+
brcmf_bus_detach(drvr);
brcmf_proto_detach(drvr);
- brcmf_fws_deinit(drvr);
-
brcmf_debugfs_detach(drvr);
bus_if->drvr = NULL;
kfree(drvr);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index ddaa9efd053d..13c89a0c4ba7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
#include <linux/semaphore.h>
@@ -40,7 +41,7 @@
#include <brcm_hw_ids.h>
#include <soc.h>
#include "sdio_host.h"
-#include "sdio_chip.h"
+#include "chip.h"
#include "nvram.h"
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@@ -112,8 +113,6 @@ struct rte_console {
#define BRCMF_TXBOUND 20 /* Default for max tx frames in
one scheduling */
-#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
-
#define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
#define MEMBLOCK 2048 /* Block size used for downloading
@@ -156,6 +155,34 @@ struct rte_console {
/* manfid tuple length, include tuple, link bytes */
#define SBSDIO_CIS_MANFID_TUPLE_LEN 6
+#define CORE_BUS_REG(base, field) \
+ (base + offsetof(struct sdpcmd_regs, field))
+
+/* SDIO function 1 register CHIPCLKCSR */
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP 0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT 0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP 0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ 0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ 0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL 0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL 0x80
+#define SBSDIO_CSR_MASK 0x1F
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) \
+ (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
/* intstatus */
#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
@@ -276,7 +303,6 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
-#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
* when idle
*/
@@ -433,10 +459,11 @@ struct brcmf_sdio {
bool alp_only; /* Don't use HT clock (ALP only) */
u8 *ctrl_frame_buf;
- u32 ctrl_frame_len;
+ u16 ctrl_frame_len;
bool ctrl_frame_stat;
- spinlock_t txqlock;
+ spinlock_t txq_lock; /* protect bus->txq */
+ struct semaphore tx_seq_lock; /* protect bus->tx_seq */
wait_queue_head_t ctrl_wait;
wait_queue_head_t dcmd_resp_wait;
@@ -483,16 +510,58 @@ static const uint max_roundup = 512;
#define ALIGNMENT 4
-static int brcmf_sdio_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
-module_param_named(txglomsz, brcmf_sdio_txglomsz, int, 0);
-MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
-
enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_NORMAL,
BRCMF_SDIO_FT_SUPER,
BRCMF_SDIO_FT_SUB,
};
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+ u8 strength; /* Pad Drive Strength in mA */
+ u8 sel; /* Chip-specific select value */
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+ {32, 0x6},
+ {26, 0x7},
+ {22, 0x4},
+ {16, 0x5},
+ {12, 0x2},
+ {8, 0x3},
+ {4, 0x0},
+ {0, 0x1}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
+ {6, 0x7},
+ {5, 0x6},
+ {4, 0x5},
+ {3, 0x4},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
+ {3, 0x3},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
+ {16, 0x7},
+ {12, 0x5},
+ {8, 0x3},
+ {4, 0x1}
+};
+
#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
@@ -511,6 +580,8 @@ enum brcmf_sdio_frmtype {
#define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt"
#define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
#define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
+#define BCM4354_FIRMWARE_NAME "brcm/brcmfmac4354-sdio.bin"
+#define BCM4354_NVRAM_NAME "brcm/brcmfmac4354-sdio.txt"
MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
@@ -530,6 +601,8 @@ MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
struct brcmf_firmware_names {
u32 chipid;
@@ -555,7 +628,8 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
{ BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
{ BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
- { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }
+ { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
+ { BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
};
@@ -618,27 +692,24 @@ static bool data_ok(struct brcmf_sdio *bus)
* Reads a register in the SDIO hardware block. This block occupies a series of
* adresses on the 32 bit backplane bus.
*/
-static int
-r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
+static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
{
- u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ struct brcmf_core *core;
int ret;
- *regvar = brcmf_sdiod_regrl(bus->sdiodev,
- bus->ci->c_inf[idx].base + offset, &ret);
+ core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ *regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
return ret;
}
-static int
-w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
+static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
{
- u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ struct brcmf_core *core;
int ret;
- brcmf_sdiod_regwl(bus->sdiodev,
- bus->ci->c_inf[idx].base + reg_offset,
- regval, &ret);
+ core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
return ret;
}
@@ -650,16 +721,12 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
int err = 0;
int try_cnt = 0;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(TRACE, "Enter: on=%d\n", on);
wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
/* 1st KSO write goes to AOS wake up core if device is asleep */
brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
wr_val, &err);
- if (err) {
- brcmf_err("SDIO_AOS KSO write error: %d\n", err);
- return err;
- }
if (on) {
/* device WAKEUP through KSO:
@@ -689,18 +756,22 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
&err);
if (((rd_val & bmask) == cmp_val) && !err)
break;
- brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
- try_cnt, MAX_KSO_ATTEMPTS, err);
+
udelay(KSO_WAIT_US);
brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
wr_val, &err);
} while (try_cnt++ < MAX_KSO_ATTEMPTS);
+ if (try_cnt > 2)
+ brcmf_dbg(SDIO, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt,
+ rd_val, err);
+
+ if (try_cnt > MAX_KSO_ATTEMPTS)
+ brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
+
return err;
}
-#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
-
#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
/* Turn backplane clock on or off */
@@ -799,7 +870,6 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
}
#endif /* defined (DEBUG) */
- bus->activity = true;
} else {
clkreq = 0;
@@ -899,8 +969,9 @@ static int
brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
{
int err = 0;
- brcmf_dbg(TRACE, "Enter\n");
- brcmf_dbg(SDIO, "request %s currently %s\n",
+ u8 clkcsr;
+
+ brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
(sleep ? "SLEEP" : "WAKE"),
(bus->sleeping ? "SLEEP" : "WAKE"));
@@ -917,8 +988,20 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
atomic_read(&bus->ipend) > 0 ||
(!atomic_read(&bus->fcstate) &&
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
- data_ok(bus)))
- return -EBUSY;
+ data_ok(bus))) {
+ err = -EBUSY;
+ goto done;
+ }
+
+ clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ &err);
+ if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
+ brcmf_dbg(SDIO, "no clock, set ALP\n");
+ brcmf_sdiod_regwb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_ALP_AVAIL_REQ, &err);
+ }
err = brcmf_sdio_kso_control(bus, false);
/* disable watchdog */
if (!err)
@@ -935,7 +1018,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
} else {
brcmf_err("error while changing bus sleep state %d\n",
err);
- return err;
+ goto done;
}
}
@@ -947,11 +1030,92 @@ end:
} else {
brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
}
-
+done:
+ brcmf_dbg(SDIO, "Exit: err=%d\n", err);
return err;
}
+#ifdef DEBUG
+static inline bool brcmf_sdio_valid_shared_address(u32 addr)
+{
+ return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
+}
+
+static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
+ struct sdpcm_shared *sh)
+{
+ u32 addr;
+ int rv;
+ u32 shaddr = 0;
+ struct sdpcm_shared_le sh_le;
+ __le32 addr_le;
+
+ shaddr = bus->ci->rambase + bus->ramsize - 4;
+
+ /*
+ * Read last word in socram to determine
+ * address of sdpcm_shared structure
+ */
+ sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_bus_sleep(bus, false, false);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
+ sdio_release_host(bus->sdiodev->func[1]);
+ if (rv < 0)
+ return rv;
+
+ addr = le32_to_cpu(addr_le);
+
+ brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
+
+ /*
+ * Check if addr is valid.
+ * NVRAM length at the end of memory should have been overwritten.
+ */
+ if (!brcmf_sdio_valid_shared_address(addr)) {
+ brcmf_err("invalid sdpcm_shared address 0x%08X\n",
+ addr);
+ return -EINVAL;
+ }
+
+ /* Read hndrte_shared structure */
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
+ sizeof(struct sdpcm_shared_le));
+ if (rv < 0)
+ return rv;
+
+ /* Endianness */
+ sh->flags = le32_to_cpu(sh_le.flags);
+ sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
+ sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
+ sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
+ sh->assert_line = le32_to_cpu(sh_le.assert_line);
+ sh->console_addr = le32_to_cpu(sh_le.console_addr);
+ sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
+ brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
+ SDPCM_SHARED_VERSION,
+ sh->flags & SDPCM_SHARED_VERSION_MASK);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
+{
+ struct sdpcm_shared sh;
+
+ if (brcmf_sdio_readshared(bus, &sh) == 0)
+ bus->console_addr = sh.console_addr;
+}
+#else
+static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
+{
+}
+#endif /* DEBUG */
+
static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
@@ -995,6 +1159,12 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
else
brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
bus->sdpcm_ver);
+
+ /*
+ * Retrieve console state address now that firmware should have
+ * updated it.
+ */
+ brcmf_sdio_get_console_addr(bus);
}
/*
@@ -1083,6 +1253,28 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
bus->cur_read.len = 0;
}
+static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
+{
+ struct brcmf_sdio_dev *sdiodev = bus->sdiodev;
+ u8 i, hi, lo;
+
+ /* On failure, abort the command and terminate the frame */
+ brcmf_err("sdio error, abort command and terminate frame\n");
+ bus->sdcnt.tx_sderrs++;
+
+ brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
+ bus->sdcnt.f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->sdcnt.f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+}
+
/* return total length of buffer chain */
static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
{
@@ -1955,7 +2147,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
memcpy(pkt_pad->data,
pkt->data + pkt->len - tail_chop,
tail_chop);
- *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
+ *(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
skb_trim(pkt, pkt->len - tail_chop);
skb_trim(pkt_pad, tail_pad + tail_chop);
__skb_queue_after(pktq, pkt, pkt_pad);
@@ -2003,7 +2195,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
* already properly aligned and does not
* need an sdpcm header.
*/
- if (*(u32 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
+ if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
continue;
/* align packet data pointer */
@@ -2037,10 +2229,10 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
if (BRCMF_BYTES_ON() &&
((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
(BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
- brcmf_dbg_hex_dump(true, pkt_next, hd_info.len,
+ brcmf_dbg_hex_dump(true, pkt_next->data, hd_info.len,
"Tx Frame:\n");
else if (BRCMF_HDRS_ON())
- brcmf_dbg_hex_dump(true, pkt_next,
+ brcmf_dbg_hex_dump(true, pkt_next->data,
head_pad + bus->tx_hdrlen,
"Tx Header:\n");
}
@@ -2067,11 +2259,11 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
u8 *hdr;
u32 dat_offset;
u16 tail_pad;
- u32 dummy_flags, chop_len;
+ u16 dummy_flags, chop_len;
struct sk_buff *pkt_next, *tmp, *pkt_prev;
skb_queue_walk_safe(pktq, pkt_next, tmp) {
- dummy_flags = *(u32 *)(pkt_next->cb);
+ dummy_flags = *(u16 *)(pkt_next->cb);
if (dummy_flags & ALIGN_SKB_FLAG) {
chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
if (chop_len) {
@@ -2100,7 +2292,6 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
uint chan)
{
int ret;
- int i;
struct sk_buff *pkt_next, *tmp;
brcmf_dbg(TRACE, "Enter\n");
@@ -2113,28 +2304,9 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
bus->sdcnt.f2txdata++;
- if (ret < 0) {
- /* On failure, abort the command and terminate the frame */
- brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
- ret);
- bus->sdcnt.tx_sderrs++;
+ if (ret < 0)
+ brcmf_sdio_txfail(bus);
- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
- bus->sdcnt.f1regdata++;
-
- for (i = 0; i < 3; i++) {
- u8 hi, lo;
- hi = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
- bus->sdcnt.f1regdata += 2;
- if ((hi == 0) && (lo == 0))
- break;
- }
- }
sdio_release_host(bus->sdiodev->func[1]);
done:
@@ -2164,13 +2336,15 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
/* Send frames until the limit or some other event */
for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
pkt_num = 1;
- __skb_queue_head_init(&pktq);
+ if (down_interruptible(&bus->tx_seq_lock))
+ return cnt;
if (bus->txglom)
pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
- brcmf_sdio_txglomsz);
+ bus->sdiodev->txglomsz);
pkt_num = min_t(u32, pkt_num,
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
- spin_lock_bh(&bus->txqlock);
+ __skb_queue_head_init(&pktq);
+ spin_lock_bh(&bus->txq_lock);
for (i = 0; i < pkt_num; i++) {
pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
&prec_out);
@@ -2178,15 +2352,19 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
break;
__skb_queue_tail(&pktq, pkt);
}
- spin_unlock_bh(&bus->txqlock);
- if (i == 0)
+ spin_unlock_bh(&bus->txq_lock);
+ if (i == 0) {
+ up(&bus->tx_seq_lock);
break;
+ }
ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
+ up(&bus->tx_seq_lock);
+
cnt += i;
/* In poll mode, need to check for other events */
- if (!bus->intr && cnt) {
+ if (!bus->intr) {
/* Check device status, signal pending interrupt */
sdio_claim_host(bus->sdiodev->func[1]);
ret = r_sdreg32(bus, &intstatus,
@@ -2211,6 +2389,68 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
return cnt;
}
+static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
+{
+ u8 doff;
+ u16 pad;
+ uint retries = 0;
+ struct brcmf_sdio_hdrinfo hd_info = {0};
+ int ret;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Back the pointer to make room for bus header */
+ frame -= bus->tx_hdrlen;
+ len += bus->tx_hdrlen;
+
+ /* Add alignment padding (optional for ctl frames) */
+ doff = ((unsigned long)frame % bus->head_align);
+ if (doff) {
+ frame -= doff;
+ len += doff;
+ memset(frame + bus->tx_hdrlen, 0, doff);
+ }
+
+ /* Round send length to next SDIO block */
+ pad = 0;
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad > bus->roundup) || (pad >= bus->blocksize))
+ pad = 0;
+ } else if (len % bus->head_align) {
+ pad = bus->head_align - (len % bus->head_align);
+ }
+ len += pad;
+
+ hd_info.len = len - pad;
+ hd_info.channel = SDPCM_CONTROL_CHANNEL;
+ hd_info.dat_offset = doff + bus->tx_hdrlen;
+ hd_info.seq_num = bus->tx_seq;
+ hd_info.lastfrm = true;
+ hd_info.tail_pad = pad;
+ brcmf_sdio_hdpack(bus, frame, &hd_info);
+
+ if (bus->txglom)
+ brcmf_sdio_update_hwhdr(frame, len);
+
+ brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
+ frame, len, "Tx Frame:\n");
+ brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
+ BRCMF_HDRS_ON(),
+ frame, min_t(u16, len, 16), "TxHdr:\n");
+
+ do {
+ ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
+
+ if (ret < 0)
+ brcmf_sdio_txfail(bus);
+ else
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
+ } while (ret < 0 && retries++ < TXRETRIES);
+
+ return ret;
+}
+
static void brcmf_sdio_bus_stop(struct device *dev)
{
u32 local_hostintmask;
@@ -2292,21 +2532,29 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
}
}
+static void atomic_orr(int val, atomic_t *v)
+{
+ int old_val;
+
+ old_val = atomic_read(v);
+ while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
+ old_val = atomic_read(v);
+}
+
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
- u8 idx;
+ struct brcmf_core *buscore;
u32 addr;
unsigned long val;
- int n, ret;
+ int ret;
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- addr = bus->ci->c_inf[idx].base +
- offsetof(struct sdpcmd_regs, intstatus);
+ buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
bus->sdcnt.f1regdata++;
if (ret != 0)
- val = 0;
+ return ret;
val &= bus->hostintmask;
atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
@@ -2315,13 +2563,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
if (val) {
brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
bus->sdcnt.f1regdata++;
- }
-
- if (ret) {
- atomic_set(&bus->intstatus, 0);
- } else if (val) {
- for_each_set_bit(n, &val, 32)
- set_bit(n, (unsigned long *)&bus->intstatus.counter);
+ atomic_orr(val, &bus->intstatus);
}
return ret;
@@ -2331,10 +2573,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
{
u32 newstatus = 0;
unsigned long intstatus;
- uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
uint txlimit = bus->txbound; /* Tx frames to send before resched */
- uint framecnt = 0; /* Temporary counter of tx/rx frames */
- int err = 0, n;
+ uint framecnt; /* Temporary counter of tx/rx frames */
+ int err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -2431,70 +2672,38 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
intstatus &= ~I_HMB_FRAME_IND;
/* On frame indication, read available frames */
- if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
- framecnt = brcmf_sdio_readframes(bus, rxlimit);
+ if ((intstatus & I_HMB_FRAME_IND) && (bus->clkstate == CLK_AVAIL)) {
+ brcmf_sdio_readframes(bus, bus->rxbound);
if (!bus->rxpending)
intstatus &= ~I_HMB_FRAME_IND;
- rxlimit -= min(framecnt, rxlimit);
}
/* Keep still-pending events for next scheduling */
- if (intstatus) {
- for_each_set_bit(n, &intstatus, 32)
- set_bit(n, (unsigned long *)&bus->intstatus.counter);
- }
+ if (intstatus)
+ atomic_orr(intstatus, &bus->intstatus);
brcmf_sdio_clrintr(bus);
- if (data_ok(bus) && bus->ctrl_frame_stat &&
- (bus->clkstate == CLK_AVAIL)) {
- int i;
-
- sdio_claim_host(bus->sdiodev->func[1]);
- err = brcmf_sdiod_send_buf(bus->sdiodev, bus->ctrl_frame_buf,
- (u32)bus->ctrl_frame_len);
-
- if (err < 0) {
- /* On failure, abort the command and
- terminate the frame */
- brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
- err);
- bus->sdcnt.tx_sderrs++;
-
- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
-
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, &err);
- bus->sdcnt.f1regdata++;
-
- for (i = 0; i < 3; i++) {
- u8 hi, lo;
- hi = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCHI,
- &err);
- lo = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCLO,
- &err);
- bus->sdcnt.f1regdata += 2;
- if ((hi == 0) && (lo == 0))
- break;
- }
+ if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
+ (down_interruptible(&bus->tx_seq_lock) == 0)) {
+ if (data_ok(bus)) {
+ sdio_claim_host(bus->sdiodev->func[1]);
+ err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
+ bus->ctrl_frame_len);
+ sdio_release_host(bus->sdiodev->func[1]);
- } else {
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
+ bus->ctrl_frame_stat = false;
+ brcmf_sdio_wait_event_wakeup(bus);
}
- sdio_release_host(bus->sdiodev->func[1]);
- bus->ctrl_frame_stat = false;
- brcmf_sdio_wait_event_wakeup(bus);
+ up(&bus->tx_seq_lock);
}
/* Send queued frames (limit 1 if rx may still be pending) */
- else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
- brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
- && data_ok(bus)) {
+ if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit &&
+ data_ok(bus)) {
framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
txlimit;
- framecnt = brcmf_sdio_sendfromq(bus, framecnt);
- txlimit -= framecnt;
+ brcmf_sdio_sendfromq(bus, framecnt);
}
if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) {
@@ -2504,19 +2713,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
atomic_read(&bus->ipend) > 0 ||
(!atomic_read(&bus->fcstate) &&
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
- data_ok(bus)) || PKT_AVAILABLE()) {
+ data_ok(bus))) {
atomic_inc(&bus->dpc_tskcnt);
}
-
- /* If we're done for now, turn off clock request. */
- if ((bus->clkstate != CLK_PENDING)
- && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
- bus->activity = false;
- brcmf_dbg(SDIO, "idle state\n");
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdio_bus_sleep(bus, true, false);
- sdio_release_host(bus->sdiodev->func[1]);
- }
}
static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
@@ -2531,15 +2730,12 @@ static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
{
int ret = -EBADE;
- uint datalen, prec;
+ uint prec;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
- ulong flags;
- brcmf_dbg(TRACE, "Enter\n");
-
- datalen = pkt->len;
+ brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
/* Add space for the header */
skb_push(pkt, bus->tx_hdrlen);
@@ -2553,7 +2749,9 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
bus->sdcnt.fcqueued++;
/* Priority based enq */
- spin_lock_irqsave(&bus->txqlock, flags);
+ spin_lock_bh(&bus->txq_lock);
+ /* reset bus_flags in packet cb */
+ *(u16 *)(pkt->cb) = 0;
if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
skb_pull(pkt, bus->tx_hdrlen);
brcmf_err("out of bus->txq !!!\n");
@@ -2566,7 +2764,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
bus->txoff = true;
brcmf_txflowblock(bus->sdiodev->dev, true);
}
- spin_unlock_irqrestore(&bus->txqlock, flags);
+ spin_unlock_bh(&bus->txq_lock);
#ifdef DEBUG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
@@ -2661,110 +2859,27 @@ break2:
}
#endif /* DEBUG */
-static int brcmf_sdio_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
-{
- int i;
- int ret;
-
- bus->ctrl_frame_stat = false;
- ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
-
- if (ret < 0) {
- /* On failure, abort the command and terminate the frame */
- brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
- ret);
- bus->sdcnt.tx_sderrs++;
-
- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
-
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
- bus->sdcnt.f1regdata++;
-
- for (i = 0; i < 3; i++) {
- u8 hi, lo;
- hi = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
- bus->sdcnt.f1regdata += 2;
- if (hi == 0 && lo == 0)
- break;
- }
- return ret;
- }
-
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
-
- return ret;
-}
-
static int
brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
- u8 *frame;
- u16 len, pad;
- uint retries = 0;
- u8 doff = 0;
- int ret = -1;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
- struct brcmf_sdio_hdrinfo hd_info = {0};
+ int ret = -1;
brcmf_dbg(TRACE, "Enter\n");
- /* Back the pointer to make a room for bus header */
- frame = msg - bus->tx_hdrlen;
- len = (msglen += bus->tx_hdrlen);
-
- /* Add alignment padding (optional for ctl frames) */
- doff = ((unsigned long)frame % bus->head_align);
- if (doff) {
- frame -= doff;
- len += doff;
- msglen += doff;
- memset(frame, 0, doff + bus->tx_hdrlen);
- }
- /* precondition: doff < bus->head_align */
- doff += bus->tx_hdrlen;
-
- /* Round send length to next SDIO block */
- pad = 0;
- if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
- pad = bus->blocksize - (len % bus->blocksize);
- if ((pad > bus->roundup) || (pad >= bus->blocksize))
- pad = 0;
- } else if (len % bus->head_align) {
- pad = bus->head_align - (len % bus->head_align);
- }
- len += pad;
-
- /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
-
- /* Make sure backplane clock is on */
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdio_bus_sleep(bus, false, false);
- sdio_release_host(bus->sdiodev->func[1]);
-
- hd_info.len = (u16)msglen;
- hd_info.channel = SDPCM_CONTROL_CHANNEL;
- hd_info.dat_offset = doff;
- hd_info.seq_num = bus->tx_seq;
- hd_info.lastfrm = true;
- hd_info.tail_pad = pad;
- brcmf_sdio_hdpack(bus, frame, &hd_info);
-
- if (bus->txglom)
- brcmf_sdio_update_hwhdr(frame, len);
+ if (down_interruptible(&bus->tx_seq_lock))
+ return -EINTR;
if (!data_ok(bus)) {
brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
bus->tx_max, bus->tx_seq);
- bus->ctrl_frame_stat = true;
+ up(&bus->tx_seq_lock);
/* Send from dpc */
- bus->ctrl_frame_buf = frame;
- bus->ctrl_frame_len = len;
+ bus->ctrl_frame_buf = msg;
+ bus->ctrl_frame_len = msglen;
+ bus->ctrl_frame_stat = true;
wait_event_interruptible_timeout(bus->ctrl_wait,
!bus->ctrl_frame_stat,
@@ -2775,31 +2890,18 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
ret = 0;
} else {
brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
+ bus->ctrl_frame_stat = false;
+ if (down_interruptible(&bus->tx_seq_lock))
+ return -EINTR;
ret = -1;
}
}
-
if (ret == -1) {
- brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
- frame, len, "Tx Frame:\n");
- brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
- BRCMF_HDRS_ON(),
- frame, min_t(u16, len, 16), "TxHdr:\n");
-
- do {
- sdio_claim_host(bus->sdiodev->func[1]);
- ret = brcmf_sdio_tx_frame(bus, frame, len);
- sdio_release_host(bus->sdiodev->func[1]);
- } while (ret < 0 && retries++ < TXRETRIES);
- }
-
- if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
- atomic_read(&bus->dpc_tskcnt) == 0) {
- bus->activity = false;
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_dbg(INFO, "idle\n");
- brcmf_sdio_clkctl(bus, CLK_NONE, true);
+ brcmf_sdio_bus_sleep(bus, false, false);
+ ret = brcmf_sdio_tx_ctrlframe(bus, msg, msglen);
sdio_release_host(bus->sdiodev->func[1]);
+ up(&bus->tx_seq_lock);
}
if (ret)
@@ -2811,72 +2913,6 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
}
#ifdef DEBUG
-static inline bool brcmf_sdio_valid_shared_address(u32 addr)
-{
- return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
-}
-
-static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
- struct sdpcm_shared *sh)
-{
- u32 addr;
- int rv;
- u32 shaddr = 0;
- struct sdpcm_shared_le sh_le;
- __le32 addr_le;
-
- shaddr = bus->ci->rambase + bus->ramsize - 4;
-
- /*
- * Read last word in socram to determine
- * address of sdpcm_shared structure
- */
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdio_bus_sleep(bus, false, false);
- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
- sdio_release_host(bus->sdiodev->func[1]);
- if (rv < 0)
- return rv;
-
- addr = le32_to_cpu(addr_le);
-
- brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
-
- /*
- * Check if addr is valid.
- * NVRAM length at the end of memory should have been overwritten.
- */
- if (!brcmf_sdio_valid_shared_address(addr)) {
- brcmf_err("invalid sdpcm_shared address 0x%08X\n",
- addr);
- return -EINVAL;
- }
-
- /* Read hndrte_shared structure */
- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
- sizeof(struct sdpcm_shared_le));
- if (rv < 0)
- return rv;
-
- /* Endianness */
- sh->flags = le32_to_cpu(sh_le.flags);
- sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
- sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
- sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
- sh->assert_line = le32_to_cpu(sh_le.assert_line);
- sh->console_addr = le32_to_cpu(sh_le.console_addr);
- sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
-
- if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
- brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
- SDPCM_SHARED_VERSION,
- sh->flags & SDPCM_SHARED_VERSION_MASK);
- return -EPROTO;
- }
-
- return 0;
-}
-
static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
struct sdpcm_shared *sh, char __user *data,
size_t count)
@@ -3106,6 +3142,8 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
debugfs_create_file("forensics", S_IRUGO, dentry, bus,
&brcmf_sdio_forensic_ops);
brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
+ debugfs_create_u32("console_interval", 0644, dentry,
+ &bus->console_interval);
}
#else
static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
@@ -3224,32 +3262,17 @@ static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
const struct firmware *fw)
{
int err;
- int offset;
- int address;
- int len;
brcmf_dbg(TRACE, "Enter\n");
- err = 0;
- offset = 0;
- address = bus->ci->rambase;
- while (offset < fw->size) {
- len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
- fw->size - offset;
- err = brcmf_sdiod_ramrw(bus->sdiodev, true, address,
- (u8 *)&fw->data[offset], len);
- if (err) {
- brcmf_err("error %d on writing %d membytes at 0x%08x\n",
- err, len, address);
- return err;
- }
- offset += len;
- address += len;
- }
- if (!err)
- if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
- (u8 *)fw->data, fw->size))
- err = -EIO;
+ err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
+ (u8 *)fw->data, fw->size);
+ if (err)
+ brcmf_err("error %d on writing %d membytes at 0x%08x\n",
+ err, (int)fw->size, bus->ci->rambase);
+ else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
+ (u8 *)fw->data, fw->size))
+ err = -EIO;
return err;
}
@@ -3292,7 +3315,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Keep arm in reset */
- brcmf_sdio_chip_enter_download(bus->sdiodev, bus->ci);
+ brcmf_chip_enter_download(bus->ci);
fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
if (fw == NULL) {
@@ -3324,7 +3347,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
}
/* Take arm out of reset */
- if (!brcmf_sdio_chip_exit_download(bus->sdiodev, bus->ci, rstvec)) {
+ if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
brcmf_err("error getting out of ARM core reset\n");
goto err;
}
@@ -3339,40 +3362,6 @@ err:
return bcmerror;
}
-static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
-{
- u32 addr, reg, pmu_cc3_mask = ~0;
- int err;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- /* old chips with PMU version less than 17 don't support save restore */
- if (bus->ci->pmurev < 17)
- return false;
-
- switch (bus->ci->chip) {
- case BCM43241_CHIP_ID:
- case BCM4335_CHIP_ID:
- case BCM4339_CHIP_ID:
- /* read PMU chipcontrol register 3 */
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
- brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
- return (reg & pmu_cc3_mask) != 0;
- default:
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, pmucapabilities_ext);
- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, &err);
- if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
- return false;
-
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, retention_ctl);
- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
- return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
- PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
- }
-}
-
static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
{
int err = 0;
@@ -3424,7 +3413,7 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
/* KSO bit added in SDIO core rev 12 */
- if (bus->ci->c_inf[1].rev < 12)
+ if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
return 0;
val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
@@ -3455,15 +3444,13 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
struct brcmf_sdio *bus = sdiodev->bus;
uint pad_size;
u32 value;
- u8 idx;
int err;
/* the commands below use the terms tx and rx from
* a device perspective, ie. bus:txglom affects the
* bus transfers from device to host.
*/
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- if (bus->ci->c_inf[idx].rev < 12) {
+ if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
/* for sdio core rev < 12, disable txgloming */
value = 0;
err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
@@ -3570,7 +3557,7 @@ static int brcmf_sdio_bus_init(struct device *dev)
ret = -ENODEV;
}
- if (brcmf_sdio_sr_capable(bus)) {
+ if (brcmf_chip_sr_capable(bus->ci)) {
brcmf_sdio_sr_init(bus);
} else {
/* Restore previous clock setting */
@@ -3714,11 +3701,175 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
datawork);
while (atomic_read(&bus->dpc_tskcnt)) {
+ atomic_set(&bus->dpc_tskcnt, 0);
brcmf_sdio_dpc(bus);
- atomic_dec(&bus->dpc_tskcnt);
}
}
+static void
+brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct brcmf_chip *ci, u32 drivestrength)
+{
+ const struct sdiod_drive_str *str_tab = NULL;
+ u32 str_mask;
+ u32 str_shift;
+ u32 base;
+ u32 i;
+ u32 drivestrength_sel = 0;
+ u32 cc_data_temp;
+ u32 addr;
+
+ if (!(ci->cc_caps & CC_CAP_PMU))
+ return;
+
+ switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+ str_tab = sdiod_drvstr_tab1_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+ str_tab = sdiod_drvstr_tab6_1v8;
+ str_mask = 0x00001800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+ /* note: 43143 does not support tristate */
+ i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
+ if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
+ str_tab = sdiod_drvstr_tab2_3v3;
+ str_mask = 0x00000007;
+ str_shift = 0;
+ } else
+ brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
+ ci->name, drivestrength);
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+ str_tab = sdiod_drive_strength_tab5_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ default:
+ brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ ci->name, ci->chiprev, ci->pmurev);
+ break;
+ }
+
+ if (str_tab != NULL) {
+ for (i = 0; str_tab[i].strength != 0; i++) {
+ if (drivestrength >= str_tab[i].strength) {
+ drivestrength_sel = str_tab[i].sel;
+ break;
+ }
+ }
+ base = brcmf_chip_get_chipcommon(ci)->base;
+ addr = CORE_CC_REG(base, chipcontrol_addr);
+ brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
+ cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+ cc_data_temp &= ~str_mask;
+ drivestrength_sel <<= str_shift;
+ cc_data_temp |= drivestrength_sel;
+ brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+
+ brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
+ str_tab[i].strength, drivestrength, cc_data_temp);
+ }
+}
+
+static int brcmf_sdio_buscoreprep(void *ctx)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+ int err = 0;
+ u8 clkval, clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (err) {
+ brcmf_err("error writing for HT off\n");
+ return err;
+ }
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ /* This may take up to 15 milliseconds */
+ clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+
+ if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+ brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+ clkset, clkval);
+ return -EACCES;
+ }
+
+ SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval);
+ return -EBUSY;
+ }
+
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ udelay(65);
+
+ /* Also, disable the extra SDIO pull-ups */
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+ return 0;
+}
+
+static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
+ u32 rstvec)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+ struct brcmf_core *core;
+ u32 reg_addr;
+
+ /* clear all interrupts */
+ core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
+ reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
+ brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+
+ if (rstvec)
+ /* Write reset vector to address 0 */
+ brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
+ sizeof(rstvec));
+}
+
+static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+ u32 val, rev;
+
+ val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+ if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+ addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
+ rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
+ if (rev >= 2) {
+ val &= ~CID_ID_MASK;
+ val |= BCM4339_CHIP_ID;
+ }
+ }
+ return val;
+}
+
+static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+
+ brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
+}
+
+static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
+ .prepare = brcmf_sdio_buscoreprep,
+ .exit_dl = brcmf_sdio_buscore_exitdl,
+ .read32 = brcmf_sdio_buscore_read32,
+ .write32 = brcmf_sdio_buscore_write32,
+};
+
static bool
brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
{
@@ -3734,7 +3885,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
/*
- * Force PLL off until brcmf_sdio_chip_attach()
+ * Force PLL off until brcmf_chip_attach()
* programs PLL control regs
*/
@@ -3755,8 +3906,10 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
*/
brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
- if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
- brcmf_err("brcmf_sdio_chip_attach failed!\n");
+ bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
+ if (IS_ERR(bus->ci)) {
+ brcmf_err("brcmf_chip_attach failed!\n");
+ bus->ci = NULL;
goto fail;
}
@@ -3769,7 +3922,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
drivestrength = bus->sdiodev->pdata->drive_strength;
else
drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
- brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
+ brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
/* Get info on the SOCRAM cores... */
bus->ramsize = bus->ci->ramsize;
@@ -3792,24 +3945,18 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
goto fail;
/* set PMUControl so a backplane reset does PMU state reload */
- reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
+ reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
pmucontrol);
- reg_val = brcmf_sdiod_regrl(bus->sdiodev,
- reg_addr,
- &err);
+ reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
if (err)
goto fail;
reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
- brcmf_sdiod_regwl(bus->sdiodev,
- reg_addr,
- reg_val,
- &err);
+ brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
if (err)
goto fail;
-
sdio_release_host(bus->sdiodev->func[1]);
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@@ -3849,6 +3996,7 @@ brcmf_sdio_watchdog_thread(void *data)
brcmf_sdio_bus_watchdog(bus);
/* Count the tick for reference */
bus->sdcnt.tickcnt++;
+ reinit_completion(&bus->watchdog_wait);
} else
break;
}
@@ -3925,7 +4073,8 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
}
spin_lock_init(&bus->rxctl_lock);
- spin_lock_init(&bus->txqlock);
+ spin_lock_init(&bus->txq_lock);
+ sema_init(&bus->tx_seq_lock, 1);
init_waitqueue_head(&bus->ctrl_wait);
init_waitqueue_head(&bus->dcmd_resp_wait);
@@ -4024,14 +4173,14 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
/* De-register interrupt handler */
brcmf_sdiod_intr_unregister(bus->sdiodev);
- cancel_work_sync(&bus->datawork);
- if (bus->brcmf_wq)
- destroy_workqueue(bus->brcmf_wq);
-
if (bus->sdiodev->bus_if->drvr) {
brcmf_detach(bus->sdiodev->dev);
}
+ cancel_work_sync(&bus->datawork);
+ if (bus->brcmf_wq)
+ destroy_workqueue(bus->brcmf_wq);
+
if (bus->ci) {
if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
sdio_claim_host(bus->sdiodev->func[1]);
@@ -4042,12 +4191,11 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
* all necessary cores.
*/
msleep(20);
- brcmf_sdio_chip_enter_download(bus->sdiodev,
- bus->ci);
+ brcmf_chip_enter_download(bus->ci);
brcmf_sdio_clkctl(bus, CLK_NONE, false);
sdio_release_host(bus->sdiodev->func[1]);
}
- brcmf_sdio_chip_detach(&bus->ci);
+ brcmf_chip_detach(bus->ci);
}
kfree(bus->rxbuf);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index 22adbe311d20..59a5af5bf994 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -124,7 +124,8 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
}
static u32
-brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
+brcmf_create_iovar(char *name, const char *data, u32 datalen,
+ char *buf, u32 buflen)
{
u32 len;
@@ -144,7 +145,7 @@ brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
s32
-brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index 77eae86e55c2..a30be683f4a1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -83,7 +83,7 @@ s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
u32 len);
s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
u32 len);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index af17a5bc8b83..614e4888504f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -48,6 +48,11 @@
#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
+/* OBSS Coex Auto/On/Off */
+#define BRCMF_OBSS_COEX_AUTO (-1)
+#define BRCMF_OBSS_COEX_OFF 0
+#define BRCMF_OBSS_COEX_ON 1
+
enum brcmf_fil_p2p_if_types {
BRCMF_FIL_P2P_IF_CLIENT,
BRCMF_FIL_P2P_IF_GO,
@@ -87,6 +92,11 @@ struct brcmf_fil_bss_enable_le {
__le32 enable;
};
+struct brcmf_fil_bwcap_le {
+ __le32 band;
+ __le32 bw_cap;
+};
+
/**
* struct tdls_iovar - common structure for tdls iovars.
*
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index fc4f98b275d7..f3445ac627e4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -797,7 +797,8 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
/* SOCIAL CHANNELS 1, 6, 11 */
search_state = WL_P2P_DISC_ST_SEARCH;
brcmf_dbg(INFO, "P2P SEARCH PHASE START\n");
- } else if (dev != NULL && vif->mode == WL_MODE_AP) {
+ } else if (dev != NULL &&
+ vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
/* If you are already a GO, then do SEARCH only */
brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n");
search_state = WL_P2P_DISC_ST_SEARCH;
@@ -2256,7 +2257,6 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct brcmf_cfg80211_vif *vif;
enum brcmf_fil_p2p_if_types iftype;
- enum wl_mode mode;
int err;
if (brcmf_cfg80211_vif_event_armed(cfg))
@@ -2267,11 +2267,9 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
switch (type) {
case NL80211_IFTYPE_P2P_CLIENT:
iftype = BRCMF_FIL_P2P_IF_CLIENT;
- mode = WL_MODE_BSS;
break;
case NL80211_IFTYPE_P2P_GO:
iftype = BRCMF_FIL_P2P_IF_GO;
- mode = WL_MODE_AP;
break;
case NL80211_IFTYPE_P2P_DEVICE:
return brcmf_p2p_create_p2pdev(&cfg->p2p, wiphy,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
deleted file mode 100644
index 82bf3c5d3cdc..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ /dev/null
@@ -1,972 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-/* ***** SDIO interface chip backplane handle functions ***** */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/ssb/ssb_regs.h>
-#include <linux/bcma/bcma.h>
-
-#include <chipcommon.h>
-#include <brcm_hw_ids.h>
-#include <brcmu_wifi.h>
-#include <brcmu_utils.h>
-#include <soc.h>
-#include "dhd_dbg.h"
-#include "sdio_host.h"
-#include "sdio_chip.h"
-
-/* chip core base & ramsize */
-/* bcm4329 */
-/* SDIO device core, ID 0x829 */
-#define BCM4329_CORE_BUS_BASE 0x18011000
-/* internal memory core, ID 0x80e */
-#define BCM4329_CORE_SOCRAM_BASE 0x18003000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM4329_CORE_ARM_BASE 0x18002000
-#define BCM4329_RAMSIZE 0x48000
-
-/* bcm43143 */
-/* SDIO device core */
-#define BCM43143_CORE_BUS_BASE 0x18002000
-/* internal memory core */
-#define BCM43143_CORE_SOCRAM_BASE 0x18004000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM43143_CORE_ARM_BASE 0x18003000
-#define BCM43143_RAMSIZE 0x70000
-
-/* All D11 cores, ID 0x812 */
-#define BCM43xx_CORE_D11_BASE 0x18001000
-
-#define SBCOREREV(sbidh) \
- ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
- ((sbidh) & SSB_IDHIGH_RCLO))
-
-/* SOC Interconnect types (aka chip types) */
-#define SOCI_SB 0
-#define SOCI_AI 1
-
-/* EROM CompIdentB */
-#define CIB_REV_MASK 0xff000000
-#define CIB_REV_SHIFT 24
-
-/* ARM CR4 core specific control flag bits */
-#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
-
-/* D11 core specific control flag bits */
-#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
-#define D11_BCMA_IOCTL_PHYRESET 0x0008
-
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
-/* SDIO Pad drive strength to select value mappings */
-struct sdiod_drive_str {
- u8 strength; /* Pad Drive Strength in mA */
- u8 sel; /* Chip-specific select value */
-};
-/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
-static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
- {32, 0x6},
- {26, 0x7},
- {22, 0x4},
- {16, 0x5},
- {12, 0x2},
- {8, 0x3},
- {4, 0x0},
- {0, 0x1}
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
- {6, 0x7},
- {5, 0x6},
- {4, 0x5},
- {3, 0x4},
- {2, 0x2},
- {1, 0x1},
- {0, 0x0}
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
-static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
- {3, 0x3},
- {2, 0x2},
- {1, 0x1},
- {0, 0x0} };
-
-/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
-static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
- {16, 0x7},
- {12, 0x5},
- {8, 0x3},
- {4, 0x1}
-};
-
-u8
-brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid)
-{
- u8 idx;
-
- for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
- if (coreid == ci->c_inf[idx].id)
- return idx;
-
- return BRCMF_MAX_CORENUM;
-}
-
-static u32
-brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u32 regdata;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidhigh),
- NULL);
- return SBCOREREV(regdata);
-}
-
-static u32
-brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-
- return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-}
-
-static bool
-brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u32 regdata;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return false;
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
- SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
- return (SSB_TMSLOW_CLOCK == regdata);
-}
-
-static bool
-brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u32 regdata;
- u8 idx;
- bool ret;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return false;
-
- regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- NULL);
- ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- NULL);
- ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
-
- return ret;
-}
-
-static void
-brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits)
-{
- u32 regdata, base;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- base = ci->c_inf[idx].base;
-
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
- if (regdata & SSB_TMSLOW_RESET)
- return;
-
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
- if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
- /*
- * set target reject and spin until busy is clear
- * (preserve core-specific bits)
- */
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatelow), NULL);
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata | SSB_TMSLOW_REJECT, NULL);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatelow), NULL);
- udelay(1);
- SPINWAIT((brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL) &
- SSB_TMSHIGH_BUSY), 100000);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL);
- if (regdata & SSB_TMSHIGH_BUSY)
- brcmf_err("core state still busy\n");
-
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
- NULL);
- if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
- regdata |= SSB_IMSTATE_REJECT;
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
- udelay(1);
- SPINWAIT((brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL) &
- SSB_IMSTATE_BUSY), 100000);
- }
-
- /* set reset and reject while enabling the clocks */
- regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
- SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatelow), NULL);
- udelay(10);
-
- /* clear the initiator reject bit */
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
- NULL);
- if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
- regdata &= ~SSB_IMSTATE_REJECT;
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
- }
- }
-
- /* leave reset and reject asserted */
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
- udelay(1);
-}
-
-static void
-brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits)
-{
- u8 idx;
- u32 regdata;
- u32 wrapbase;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- wrapbase = ci->c_inf[idx].wrapbase;
-
- /* if core is already in reset, just return */
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
- if ((regdata & BCMA_RESET_CTL_RESET) != 0)
- return;
-
- /* configure reset */
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
- BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-
- /* put in reset */
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL,
- BCMA_RESET_CTL_RESET, NULL);
- usleep_range(10, 20);
-
- /* wait till reset is 1 */
- SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
- BCMA_RESET_CTL_RESET, 300);
-
- /* post reset configure */
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
- BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-}
-
-static void
-brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits, u32 post_resetbits)
-{
- u32 regdata;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- /*
- * Must do the disable sequence first to work for
- * arbitrary current core state.
- */
- brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, pre_resetbits,
- in_resetbits);
-
- /*
- * Now do the initialization sequence.
- * set reset while enabling the clock and
- * forcing them on throughout the core
- */
- brcmf_sdiod_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
- NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- udelay(1);
-
- /* clear any serror */
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
- NULL);
- if (regdata & SSB_TMSHIGH_SERR)
- brcmf_sdiod_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
- 0, NULL);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate),
- NULL);
- if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
- brcmf_sdiod_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate),
- regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
- NULL);
-
- /* clear reset and allow it to propagate throughout the core */
- brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- udelay(1);
-
- /* leave clock enabled */
- brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_CLOCK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- udelay(1);
-}
-
-static void
-brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits, u32 post_resetbits)
-{
- u8 idx;
- u32 regdata;
- u32 wrapbase;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- wrapbase = ci->c_inf[idx].wrapbase;
-
- /* must disable first to work for arbitrary current core state */
- brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, pre_resetbits,
- in_resetbits);
-
- while (brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) &
- BCMA_RESET_CTL_RESET) {
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL, 0, NULL);
- usleep_range(40, 60);
- }
-
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, post_resetbits |
- BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-}
-
-#ifdef DEBUG
-/* safety check for chipinfo */
-static int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
-{
- u8 core_idx;
-
- /* check RAM core presence for ARM CM3 core */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
- if (BRCMF_MAX_CORENUM != core_idx) {
- core_idx = brcmf_sdio_chip_getinfidx(ci,
- BCMA_CORE_INTERNAL_MEM);
- if (BRCMF_MAX_CORENUM == core_idx) {
- brcmf_err("RAM core not provided with ARM CM3 core\n");
- return -ENODEV;
- }
- }
-
- /* check RAM base for ARM CR4 core */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
- if (BRCMF_MAX_CORENUM != core_idx) {
- if (ci->rambase == 0) {
- brcmf_err("RAM base not provided with ARM CR4 core\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-#else /* DEBUG */
-static inline int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
-{
- return 0;
-}
-#endif
-
-static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u32 regdata;
- u32 socitype;
-
- /* Get CC core rev
- * Chipid is assume to be at offset 0 from SI_ENUM_BASE
- * For different chiptypes or old sdio hosts w/o chipcommon,
- * other ways of recognition should be added here.
- */
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_CC_REG(SI_ENUM_BASE, chipid),
- NULL);
- ci->chip = regdata & CID_ID_MASK;
- ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
- if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
- ci->chiprev >= 2)
- ci->chip = BCM4339_CHIP_ID;
- socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
-
- brcmf_dbg(INFO, "found %s chip: id=0x%x, rev=%d\n",
- socitype == SOCI_SB ? "SB" : "AXI", ci->chip, ci->chiprev);
-
- if (socitype == SOCI_SB) {
- if (ci->chip != BCM4329_CHIP_ID) {
- brcmf_err("SB chip is not supported\n");
- return -ENODEV;
- }
- ci->iscoreup = brcmf_sdio_sb_iscoreup;
- ci->corerev = brcmf_sdio_sb_corerev;
- ci->coredisable = brcmf_sdio_sb_coredisable;
- ci->resetcore = brcmf_sdio_sb_resetcore;
-
- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
- ci->c_inf[0].base = SI_ENUM_BASE;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->ramsize = BCM4329_RAMSIZE;
- } else if (socitype == SOCI_AI) {
- ci->iscoreup = brcmf_sdio_ai_iscoreup;
- ci->corerev = brcmf_sdio_ai_corerev;
- ci->coredisable = brcmf_sdio_ai_coredisable;
- ci->resetcore = brcmf_sdio_ai_resetcore;
-
- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
- ci->c_inf[0].base = SI_ENUM_BASE;
-
- /* Address of cores for new chips should be added here */
- switch (ci->chip) {
- case BCM43143_CHIP_ID:
- ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
- ci->c_inf[0].cib = 0x2b000000;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
- ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
- ci->c_inf[1].cib = 0x18000000;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
- ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
- ci->c_inf[2].cib = 0x14000000;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->c_inf[3].cib = 0x07000000;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = BCM43143_RAMSIZE;
- break;
- case BCM43241_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2a084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0e004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x14080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x07004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x90000;
- break;
- case BCM4330_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x27004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x07004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x0d080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x03004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x48000;
- break;
- case BCM4334_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x29004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0d004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x13080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x07004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x80000;
- break;
- case BCM4335_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2b084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18005000;
- ci->c_inf[1].wrapbase = 0x18105000;
- ci->c_inf[1].cib = 0x0f004211;
- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
- ci->c_inf[2].base = 0x18002000;
- ci->c_inf[2].wrapbase = 0x18102000;
- ci->c_inf[2].cib = 0x01084411;
- ci->c_inf[3].id = BCMA_CORE_80211;
- ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->ramsize = 0xc0000;
- ci->rambase = 0x180000;
- break;
- case BCM43362_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x27004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0a004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x08080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x03004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x3C000;
- break;
- case BCM4339_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2e084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18005000;
- ci->c_inf[1].wrapbase = 0x18105000;
- ci->c_inf[1].cib = 0x15004211;
- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
- ci->c_inf[2].base = 0x18002000;
- ci->c_inf[2].wrapbase = 0x18102000;
- ci->c_inf[2].cib = 0x04084411;
- ci->c_inf[3].id = BCMA_CORE_80211;
- ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->ramsize = 0xc0000;
- ci->rambase = 0x180000;
- break;
- default:
- brcmf_err("AXI chip is not supported\n");
- return -ENODEV;
- }
- } else {
- brcmf_err("chip backplane type %u is not supported\n",
- socitype);
- return -ENODEV;
- }
-
- return brcmf_sdio_chip_cichk(ci);
-}
-
-static int
-brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
-{
- int err = 0;
- u8 clkval, clkset;
-
- /* Try forcing SDIO core to do ALPAvail request only */
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
- if (err) {
- brcmf_err("error writing for HT off\n");
- return err;
- }
-
- /* If register supported, wait for ALPAvail and then force ALP */
- /* This may take up to 15 milliseconds */
- clkval = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
-
- if ((clkval & ~SBSDIO_AVBITS) != clkset) {
- brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
- clkset, clkval);
- return -EACCES;
- }
-
- SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
- !SBSDIO_ALPAV(clkval)),
- PMU_MAX_TRANSITION_DLY);
- if (!SBSDIO_ALPAV(clkval)) {
- brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
- clkval);
- return -EBUSY;
- }
-
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
- udelay(65);
-
- /* Also, disable the extra SDIO pull-ups */
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
-
- return 0;
-}
-
-static void
-brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u32 base = ci->c_inf[0].base;
-
- /* get chipcommon rev */
- ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
-
- /* get chipcommon capabilites */
- ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
- CORE_CC_REG(base, capabilities),
- NULL);
-
- /* get pmu caps & rev */
- if (ci->c_inf[0].caps & CC_CAP_PMU) {
- ci->pmucaps =
- brcmf_sdiod_regrl(sdiodev,
- CORE_CC_REG(base, pmucapabilities),
- NULL);
- ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
- }
-
- ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
-
- brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
- ci->c_inf[0].rev, ci->pmurev,
- ci->c_inf[1].rev, ci->c_inf[1].id);
-
- /*
- * Make sure any on-chip ARM is off (in case strapping is wrong),
- * or downloaded code was already running.
- */
- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
-}
-
-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip **ci_ptr)
-{
- int ret;
- struct brcmf_chip *ci;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- ci = kzalloc(sizeof(*ci), GFP_ATOMIC);
- if (!ci)
- return -ENOMEM;
-
- ret = brcmf_sdio_chip_buscoreprep(sdiodev);
- if (ret != 0)
- goto err;
-
- ret = brcmf_sdio_chip_recognition(sdiodev, ci);
- if (ret != 0)
- goto err;
-
- brcmf_sdio_chip_buscoresetup(sdiodev, ci);
-
- brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
- 0, NULL);
- brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
- 0, NULL);
-
- *ci_ptr = ci;
- return 0;
-
-err:
- kfree(ci);
- return ret;
-}
-
-void
-brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- kfree(*ci_ptr);
- *ci_ptr = NULL;
-}
-
-static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
-{
- const char *fmt;
-
- fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
- snprintf(buf, len, fmt, chipid);
- return buf;
-}
-
-void
-brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 drivestrength)
-{
- const struct sdiod_drive_str *str_tab = NULL;
- u32 str_mask;
- u32 str_shift;
- char chn[8];
- u32 base = ci->c_inf[0].base;
- u32 i;
- u32 drivestrength_sel = 0;
- u32 cc_data_temp;
- u32 addr;
-
- if (!(ci->c_inf[0].caps & CC_CAP_PMU))
- return;
-
- switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
- case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
- str_tab = sdiod_drvstr_tab1_1v8;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
- str_tab = sdiod_drvstr_tab6_1v8;
- str_mask = 0x00001800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
- /* note: 43143 does not support tristate */
- i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
- if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
- str_tab = sdiod_drvstr_tab2_3v3;
- str_mask = 0x00000007;
- str_shift = 0;
- } else
- brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
- brcmf_sdio_chip_name(ci->chip, chn, 8),
- drivestrength);
- break;
- case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
- str_tab = sdiod_drive_strength_tab5_1v8;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- default:
- brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
- brcmf_sdio_chip_name(ci->chip, chn, 8),
- ci->chiprev, ci->pmurev);
- break;
- }
-
- if (str_tab != NULL) {
- for (i = 0; str_tab[i].strength != 0; i++) {
- if (drivestrength >= str_tab[i].strength) {
- drivestrength_sel = str_tab[i].sel;
- break;
- }
- }
- addr = CORE_CC_REG(base, chipcontrol_addr);
- brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
- cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
- cc_data_temp &= ~str_mask;
- drivestrength_sel <<= str_shift;
- cc_data_temp |= drivestrength_sel;
- brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
-
- brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
- str_tab[i].strength, drivestrength, cc_data_temp);
- }
-}
-
-static void
-brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
- ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
- D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
- D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
- ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0, 0, 0);
-}
-
-static bool brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u8 core_idx;
- u32 reg_addr;
-
- if (!ci->iscoreup(sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
- brcmf_err("SOCRAM core is down after reset?\n");
- return false;
- }
-
- /* clear all interrupts */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
- reg_addr = ci->c_inf[core_idx].base;
- reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
-
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0, 0);
-
- return true;
-}
-
-static inline void
-brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u8 idx;
- u32 regdata;
- u32 wrapbase;
- idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
-
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- wrapbase = ci->c_inf[idx].wrapbase;
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
- regdata &= ARMCR4_BCMA_IOCTL_CPUHALT;
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, regdata,
- ARMCR4_BCMA_IOCTL_CPUHALT, ARMCR4_BCMA_IOCTL_CPUHALT);
- ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
- D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
- D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
-}
-
-static bool brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 rstvec)
-{
- u8 core_idx;
- u32 reg_addr;
-
- /* clear all interrupts */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
- reg_addr = ci->c_inf[core_idx].base;
- reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
-
- /* Write reset vector to address 0 */
- brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
- sizeof(rstvec));
-
- /* restore ARM */
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, ARMCR4_BCMA_IOCTL_CPUHALT,
- 0, 0);
-
- return true;
-}
-
-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u8 arm_core_idx;
-
- arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
- if (BRCMF_MAX_CORENUM != arm_core_idx) {
- brcmf_sdio_chip_cm3_enterdl(sdiodev, ci);
- return;
- }
-
- brcmf_sdio_chip_cr4_enterdl(sdiodev, ci);
-}
-
-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 rstvec)
-{
- u8 arm_core_idx;
-
- arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
- if (BRCMF_MAX_CORENUM != arm_core_idx)
- return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci);
-
- return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, rstvec);
-}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
deleted file mode 100644
index fb0614329ede..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMFMAC_SDIO_CHIP_H_
-#define _BRCMFMAC_SDIO_CHIP_H_
-
-/*
- * Core reg address translation.
- * Both macro's returns a 32 bits byte address on the backplane bus.
- */
-#define CORE_CC_REG(base, field) \
- (base + offsetof(struct chipcregs, field))
-#define CORE_BUS_REG(base, field) \
- (base + offsetof(struct sdpcmd_regs, field))
-#define CORE_SB(base, field) \
- (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
-
-/* SDIO function 1 register CHIPCLKCSR */
-/* Force ALP request to backplane */
-#define SBSDIO_FORCE_ALP 0x01
-/* Force HT request to backplane */
-#define SBSDIO_FORCE_HT 0x02
-/* Force ILP request to backplane */
-#define SBSDIO_FORCE_ILP 0x04
-/* Make ALP ready (power up xtal) */
-#define SBSDIO_ALP_AVAIL_REQ 0x08
-/* Make HT ready (power up PLL) */
-#define SBSDIO_HT_AVAIL_REQ 0x10
-/* Squelch clock requests from HW */
-#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
-/* Status: ALP is ready */
-#define SBSDIO_ALP_AVAIL 0x40
-/* Status: HT is ready */
-#define SBSDIO_HT_AVAIL 0x80
-#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
-#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
-#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
-#define SBSDIO_CLKAV(regval, alponly) \
- (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
-
-#define BRCMF_MAX_CORENUM 6
-
-struct brcmf_core {
- u16 id;
- u16 rev;
- u32 base;
- u32 wrapbase;
- u32 caps;
- u32 cib;
-};
-
-struct brcmf_chip {
- u32 chip;
- u32 chiprev;
- /* core info */
- /* always put chipcommon core at 0, bus core at 1 */
- struct brcmf_core c_inf[BRCMF_MAX_CORENUM];
- u32 pmurev;
- u32 pmucaps;
- u32 ramsize;
- u32 rambase;
- u32 rst_vec; /* reset vertor for ARM CR4 core */
-
- bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
- u16 coreid);
- u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
- u16 coreid);
- void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits);
- void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits, u32 post_resetbits);
-};
-
-struct sbconfig {
- u32 PAD[2];
- u32 sbipsflag; /* initiator port ocp slave flag */
- u32 PAD[3];
- u32 sbtpsflag; /* target port ocp slave flag */
- u32 PAD[11];
- u32 sbtmerrloga; /* (sonics >= 2.3) */
- u32 PAD;
- u32 sbtmerrlog; /* (sonics >= 2.3) */
- u32 PAD[3];
- u32 sbadmatch3; /* address match3 */
- u32 PAD;
- u32 sbadmatch2; /* address match2 */
- u32 PAD;
- u32 sbadmatch1; /* address match1 */
- u32 PAD[7];
- u32 sbimstate; /* initiator agent state */
- u32 sbintvec; /* interrupt mask */
- u32 sbtmstatelow; /* target state */
- u32 sbtmstatehigh; /* target state */
- u32 sbbwa0; /* bandwidth allocation table0 */
- u32 PAD;
- u32 sbimconfiglow; /* initiator configuration */
- u32 sbimconfighigh; /* initiator configuration */
- u32 sbadmatch0; /* address match0 */
- u32 PAD;
- u32 sbtmconfiglow; /* target configuration */
- u32 sbtmconfighigh; /* target configuration */
- u32 sbbconfig; /* broadcast configuration */
- u32 PAD;
- u32 sbbstate; /* broadcast state */
- u32 PAD[3];
- u32 sbactcnfg; /* activate configuration */
- u32 PAD[3];
- u32 sbflagst; /* current sbflags */
- u32 PAD[3];
- u32 sbidlow; /* identification */
- u32 sbidhigh; /* identification */
-};
-
-/* sdio core registers */
-struct sdpcmd_regs {
- u32 corecontrol; /* 0x00, rev8 */
- u32 corestatus; /* rev8 */
- u32 PAD[1];
- u32 biststatus; /* rev8 */
-
- /* PCMCIA access */
- u16 pcmciamesportaladdr; /* 0x010, rev8 */
- u16 PAD[1];
- u16 pcmciamesportalmask; /* rev8 */
- u16 PAD[1];
- u16 pcmciawrframebc; /* rev8 */
- u16 PAD[1];
- u16 pcmciaunderflowtimer; /* rev8 */
- u16 PAD[1];
-
- /* interrupt */
- u32 intstatus; /* 0x020, rev8 */
- u32 hostintmask; /* rev8 */
- u32 intmask; /* rev8 */
- u32 sbintstatus; /* rev8 */
- u32 sbintmask; /* rev8 */
- u32 funcintmask; /* rev4 */
- u32 PAD[2];
- u32 tosbmailbox; /* 0x040, rev8 */
- u32 tohostmailbox; /* rev8 */
- u32 tosbmailboxdata; /* rev8 */
- u32 tohostmailboxdata; /* rev8 */
-
- /* synchronized access to registers in SDIO clock domain */
- u32 sdioaccess; /* 0x050, rev8 */
- u32 PAD[3];
-
- /* PCMCIA frame control */
- u8 pcmciaframectrl; /* 0x060, rev8 */
- u8 PAD[3];
- u8 pcmciawatermark; /* rev8 */
- u8 PAD[155];
-
- /* interrupt batching control */
- u32 intrcvlazy; /* 0x100, rev8 */
- u32 PAD[3];
-
- /* counters */
- u32 cmd52rd; /* 0x110, rev8 */
- u32 cmd52wr; /* rev8 */
- u32 cmd53rd; /* rev8 */
- u32 cmd53wr; /* rev8 */
- u32 abort; /* rev8 */
- u32 datacrcerror; /* rev8 */
- u32 rdoutofsync; /* rev8 */
- u32 wroutofsync; /* rev8 */
- u32 writebusy; /* rev8 */
- u32 readwait; /* rev8 */
- u32 readterm; /* rev8 */
- u32 writeterm; /* rev8 */
- u32 PAD[40];
- u32 clockctlstatus; /* rev8 */
- u32 PAD[7];
-
- u32 PAD[128]; /* DMA engines */
-
- /* SDIO/PCMCIA CIS region */
- char cis[512]; /* 0x400-0x5ff, rev6 */
-
- /* PCMCIA function control registers */
- char pcmciafcr[256]; /* 0x600-6ff, rev6 */
- u16 PAD[55];
-
- /* PCMCIA backplane access */
- u16 backplanecsr; /* 0x76E, rev6 */
- u16 backplaneaddr0; /* rev6 */
- u16 backplaneaddr1; /* rev6 */
- u16 backplaneaddr2; /* rev6 */
- u16 backplaneaddr3; /* rev6 */
- u16 backplanedata0; /* rev6 */
- u16 backplanedata1; /* rev6 */
- u16 backplanedata2; /* rev6 */
- u16 backplanedata3; /* rev6 */
- u16 PAD[31];
-
- /* sprom "size" & "blank" info */
- u16 spromstatus; /* 0x7BE, rev2 */
- u32 PAD[464];
-
- u16 PAD[0x80];
-};
-
-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip **ci_ptr);
-void brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr);
-void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci,
- u32 drivestrength);
-u8 brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid);
-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci);
-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 rstvec);
-
-#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 092e9c824992..3deab7959a0d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -180,6 +180,97 @@ struct brcmf_sdio_dev {
uint max_request_size;
ushort max_segment_count;
uint max_segment_size;
+ uint txglomsz;
+ struct sg_table sgtable;
+};
+
+/* sdio core registers */
+struct sdpcmd_regs {
+ u32 corecontrol; /* 0x00, rev8 */
+ u32 corestatus; /* rev8 */
+ u32 PAD[1];
+ u32 biststatus; /* rev8 */
+
+ /* PCMCIA access */
+ u16 pcmciamesportaladdr; /* 0x010, rev8 */
+ u16 PAD[1];
+ u16 pcmciamesportalmask; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciawrframebc; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciaunderflowtimer; /* rev8 */
+ u16 PAD[1];
+
+ /* interrupt */
+ u32 intstatus; /* 0x020, rev8 */
+ u32 hostintmask; /* rev8 */
+ u32 intmask; /* rev8 */
+ u32 sbintstatus; /* rev8 */
+ u32 sbintmask; /* rev8 */
+ u32 funcintmask; /* rev4 */
+ u32 PAD[2];
+ u32 tosbmailbox; /* 0x040, rev8 */
+ u32 tohostmailbox; /* rev8 */
+ u32 tosbmailboxdata; /* rev8 */
+ u32 tohostmailboxdata; /* rev8 */
+
+ /* synchronized access to registers in SDIO clock domain */
+ u32 sdioaccess; /* 0x050, rev8 */
+ u32 PAD[3];
+
+ /* PCMCIA frame control */
+ u8 pcmciaframectrl; /* 0x060, rev8 */
+ u8 PAD[3];
+ u8 pcmciawatermark; /* rev8 */
+ u8 PAD[155];
+
+ /* interrupt batching control */
+ u32 intrcvlazy; /* 0x100, rev8 */
+ u32 PAD[3];
+
+ /* counters */
+ u32 cmd52rd; /* 0x110, rev8 */
+ u32 cmd52wr; /* rev8 */
+ u32 cmd53rd; /* rev8 */
+ u32 cmd53wr; /* rev8 */
+ u32 abort; /* rev8 */
+ u32 datacrcerror; /* rev8 */
+ u32 rdoutofsync; /* rev8 */
+ u32 wroutofsync; /* rev8 */
+ u32 writebusy; /* rev8 */
+ u32 readwait; /* rev8 */
+ u32 readterm; /* rev8 */
+ u32 writeterm; /* rev8 */
+ u32 PAD[40];
+ u32 clockctlstatus; /* rev8 */
+ u32 PAD[7];
+
+ u32 PAD[128]; /* DMA engines */
+
+ /* SDIO/PCMCIA CIS region */
+ char cis[512]; /* 0x400-0x5ff, rev6 */
+
+ /* PCMCIA function control registers */
+ char pcmciafcr[256]; /* 0x600-6ff, rev6 */
+ u16 PAD[55];
+
+ /* PCMCIA backplane access */
+ u16 backplanecsr; /* 0x76E, rev6 */
+ u16 backplaneaddr0; /* rev6 */
+ u16 backplaneaddr1; /* rev6 */
+ u16 backplaneaddr2; /* rev6 */
+ u16 backplaneaddr3; /* rev6 */
+ u16 backplanedata0; /* rev6 */
+ u16 backplanedata1; /* rev6 */
+ u16 backplanedata2; /* rev6 */
+ u16 backplanedata3; /* rev6 */
+ u16 PAD[31];
+
+ /* sprom "size" & "blank" info */
+ u16 spromstatus; /* 0x7BE, rev2 */
+ u32 PAD[464];
+
+ u16 PAD[0x80];
};
/* Register/deregister interrupt handler. */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index d7718a5fa2f0..afb3d15e38ff 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/etherdevice.h>
+#include <linux/module.h>
#include <net/cfg80211.h>
#include <net/netlink.h>
@@ -190,6 +191,7 @@ static struct ieee80211_supported_band __wl_band_2ghz = {
.n_channels = ARRAY_SIZE(__wl_2ghz_channels),
.bitrates = wl_g_rates,
.n_bitrates = wl_g_rates_size,
+ .ht_cap = {IEEE80211_HT_CAP_SUP_WIDTH_20_40, true},
};
static struct ieee80211_supported_band __wl_band_5ghz_a = {
@@ -251,6 +253,10 @@ struct parsed_vndr_ies {
struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
};
+static int brcmf_roamoff;
+module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR);
+MODULE_PARM_DESC(roamoff, "do not use internal roaming engine");
+
/* Quarter dBm units to mW
* Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
* Table is offset so the last entry is largest mW value that fits in
@@ -351,13 +357,11 @@ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
* triples, returning a pointer to the substring whose first element
* matches tag
*/
-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
+const struct brcmf_tlv *
+brcmf_parse_tlvs(const void *buf, int buflen, uint key)
{
- struct brcmf_tlv *elt;
- int totlen;
-
- elt = (struct brcmf_tlv *)buf;
- totlen = buflen;
+ const struct brcmf_tlv *elt = buf;
+ int totlen = buflen;
/* find tagged parameter */
while (totlen >= TLV_HDR_LEN) {
@@ -378,8 +382,8 @@ struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
* not update the tlvs buffer pointer/length.
*/
static bool
-brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
- u8 *oui, u32 oui_len, u8 type)
+brcmf_tlv_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len,
+ const u8 *oui, u32 oui_len, u8 type)
{
/* If the contents match the OUI and the type */
if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
@@ -401,12 +405,12 @@ brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
}
static struct brcmf_vs_tlv *
-brcmf_find_wpaie(u8 *parse, u32 len)
+brcmf_find_wpaie(const u8 *parse, u32 len)
{
- struct brcmf_tlv *ie;
+ const struct brcmf_tlv *ie;
while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
- if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+ if (brcmf_tlv_has_ie((const u8 *)ie, &parse, &len,
WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
return (struct brcmf_vs_tlv *)ie;
}
@@ -414,9 +418,9 @@ brcmf_find_wpaie(u8 *parse, u32 len)
}
static struct brcmf_vs_tlv *
-brcmf_find_wpsie(u8 *parse, u32 len)
+brcmf_find_wpsie(const u8 *parse, u32 len)
{
- struct brcmf_tlv *ie;
+ const struct brcmf_tlv *ie;
while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
@@ -491,6 +495,19 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
return err;
}
+static bool brcmf_is_apmode(struct brcmf_cfg80211_vif *vif)
+{
+ enum nl80211_iftype iftype;
+
+ iftype = vif->wdev.iftype;
+ return iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO;
+}
+
+static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
+{
+ return vif->wdev.iftype == NL80211_IFTYPE_ADHOC;
+}
+
static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
const char *name,
enum nl80211_iftype type,
@@ -651,7 +668,6 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
type);
return -EOPNOTSUPP;
case NL80211_IFTYPE_ADHOC:
- vif->mode = WL_MODE_IBSS;
infra = 0;
break;
case NL80211_IFTYPE_STATION:
@@ -667,12 +683,10 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
*/
return 0;
}
- vif->mode = WL_MODE_BSS;
infra = 1;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
- vif->mode = WL_MODE_AP;
ap = 1;
break;
default:
@@ -696,7 +710,7 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
err = -EAGAIN;
goto done;
}
- brcmf_dbg(INFO, "IF Type = %s\n", (vif->mode == WL_MODE_IBSS) ?
+ brcmf_dbg(INFO, "IF Type = %s\n", brcmf_is_ibssmode(vif) ?
"Adhoc" : "Infra");
}
ndev->ieee80211_ptr->iftype = type;
@@ -1340,13 +1354,14 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
}
static s32
-brcmf_set_set_cipher(struct net_device *ndev,
- struct cfg80211_connect_params *sme)
+brcmf_set_wsec_mode(struct net_device *ndev,
+ struct cfg80211_connect_params *sme, bool mfp)
{
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
s32 pval = 0;
s32 gval = 0;
+ s32 wsec;
s32 err = 0;
if (sme->crypto.n_ciphers_pairwise) {
@@ -1398,7 +1413,12 @@ brcmf_set_set_cipher(struct net_device *ndev,
if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval &&
sme->privacy)
pval = AES_ENABLED;
- err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", pval | gval);
+
+ if (mfp)
+ wsec = pval | gval | MFP_CAPABLE;
+ else
+ wsec = pval | gval;
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", wsec);
if (err) {
brcmf_err("error (%d)\n", err);
return err;
@@ -1562,13 +1582,12 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct ieee80211_channel *chan = sme->channel;
struct brcmf_join_params join_params;
size_t join_params_size;
- struct brcmf_tlv *rsn_ie;
- struct brcmf_vs_tlv *wpa_ie;
- void *ie;
+ const struct brcmf_tlv *rsn_ie;
+ const struct brcmf_vs_tlv *wpa_ie;
+ const void *ie;
u32 ie_len;
struct brcmf_ext_join_params_le *ext_join_params;
u16 chanspec;
-
s32 err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -1591,7 +1610,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ie_len = wpa_ie->len + TLV_HDR_LEN;
} else {
/* find the RSN_IE */
- rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+ rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie,
+ sme->ie_len,
WLAN_EID_RSN);
if (rsn_ie) {
ie = rsn_ie;
@@ -1636,7 +1656,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
- err = brcmf_set_set_cipher(ndev, sme);
+ err = brcmf_set_wsec_mode(ndev, sme, sme->mfp == NL80211_MFP_REQUIRED);
if (err) {
brcmf_err("wl_set_set_cipher failed (%d)\n", err);
goto done;
@@ -1678,22 +1698,9 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
memcpy(&ext_join_params->ssid_le.SSID, sme->ssid,
profile->ssid.SSID_len);
- /*increase dwell time to receive probe response or detect Beacon
- * from target AP at a noisy air only during connect command
- */
- ext_join_params->scan_le.active_time =
- cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
- ext_join_params->scan_le.passive_time =
- cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
+
/* Set up join scan parameters */
ext_join_params->scan_le.scan_type = -1;
- /* to sync with presence period of VSDB GO.
- * Send probe request more frequently. Probe request will be stopped
- * when it gets probe response from target AP/GO.
- */
- ext_join_params->scan_le.nprobes =
- cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
- BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
ext_join_params->scan_le.home_time = cpu_to_le32(-1);
if (sme->bssid)
@@ -1706,6 +1713,25 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ext_join_params->assoc_le.chanspec_list[0] =
cpu_to_le16(chanspec);
+ /* Increase dwell time to receive probe response or detect
+ * beacon from target AP at a noisy air only during connect
+ * command.
+ */
+ ext_join_params->scan_le.active_time =
+ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
+ ext_join_params->scan_le.passive_time =
+ cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
+ /* To sync with presence period of VSDB GO send probe request
+ * more frequently. Probe request will be stopped when it gets
+ * probe response from target AP/GO.
+ */
+ ext_join_params->scan_le.nprobes =
+ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
+ BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
+ } else {
+ ext_join_params->scan_le.active_time = cpu_to_le32(-1);
+ ext_join_params->scan_le.passive_time = cpu_to_le32(-1);
+ ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
}
err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
@@ -1913,7 +1939,7 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
memcpy(key.data, params->key, key.len);
- if ((ifp->vif->mode != WL_MODE_AP) &&
+ if (!brcmf_is_apmode(ifp->vif) &&
(params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
@@ -1981,7 +2007,9 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
if (!check_vif_up(ifp->vif))
return -EIO;
- if (mac_addr) {
+ if (mac_addr &&
+ (params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
+ (params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
brcmf_dbg(TRACE, "Exit");
return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
}
@@ -2010,7 +2038,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
- if (ifp->vif->mode != WL_MODE_AP) {
+ if (!brcmf_is_apmode(ifp->vif)) {
brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
@@ -2164,12 +2192,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
u8 *bssid = profile->bssid;
struct brcmf_sta_info_le sta_info_le;
+ u32 beacon_period;
+ u32 dtim_period;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
if (!check_vif_up(ifp->vif))
return -EIO;
- if (ifp->vif->mode == WL_MODE_AP) {
+ if (brcmf_is_apmode(ifp->vif)) {
memcpy(&sta_info_le, mac, ETH_ALEN);
err = brcmf_fil_iovar_data_get(ifp, "sta_info",
&sta_info_le,
@@ -2186,7 +2216,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
}
brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n",
sinfo->inactive_time, sinfo->connected_time);
- } else if (ifp->vif->mode == WL_MODE_BSS) {
+ } else if (ifp->vif->wdev.iftype == NL80211_IFTYPE_STATION) {
if (memcmp(mac, bssid, ETH_ALEN)) {
brcmf_err("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
mac, bssid);
@@ -2218,6 +2248,30 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
sinfo->signal = rssi;
brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
}
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_BCNPRD,
+ &beacon_period);
+ if (err) {
+ brcmf_err("Could not get beacon period (%d)\n",
+ err);
+ goto done;
+ } else {
+ sinfo->bss_param.beacon_interval =
+ beacon_period;
+ brcmf_dbg(CONN, "Beacon peroid %d\n",
+ beacon_period);
+ }
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_DTIMPRD,
+ &dtim_period);
+ if (err) {
+ brcmf_err("Could not get DTIM period (%d)\n",
+ err);
+ goto done;
+ } else {
+ sinfo->bss_param.dtim_period = dtim_period;
+ brcmf_dbg(CONN, "DTIM peroid %d\n",
+ dtim_period);
+ }
+ sinfo->filled |= STATION_INFO_BSS_PARAM;
}
} else
err = -EPERM;
@@ -2444,18 +2498,13 @@ CleanUp:
return err;
}
-static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
-{
- return vif->mode == WL_MODE_IBSS;
-}
-
static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp)
{
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
- struct brcmf_tlv *tim;
+ const struct brcmf_tlv *tim;
u16 beacon_interval;
u8 dtim_period;
size_t ie_len;
@@ -3220,8 +3269,9 @@ static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
}
static s32
-brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
- bool is_rsn_ie)
+brcmf_configure_wpaie(struct net_device *ndev,
+ const struct brcmf_vs_tlv *wpa_ie,
+ bool is_rsn_ie)
{
struct brcmf_if *ifp = netdev_priv(ndev);
u32 auth = 0; /* d11 open authentication */
@@ -3707,11 +3757,11 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
s32 ie_offset;
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_tlv *ssid_ie;
+ const struct brcmf_tlv *ssid_ie;
struct brcmf_ssid_le ssid_le;
s32 err = -EPERM;
- struct brcmf_tlv *rsn_ie;
- struct brcmf_vs_tlv *wpa_ie;
+ const struct brcmf_tlv *rsn_ie;
+ const struct brcmf_vs_tlv *wpa_ie;
struct brcmf_join_params join_params;
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
@@ -4220,32 +4270,6 @@ static struct cfg80211_ops wl_cfg80211_ops = {
CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
};
-static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
-{
- switch (type) {
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_MESH_POINT:
- return -ENOTSUPP;
- case NL80211_IFTYPE_ADHOC:
- return WL_MODE_IBSS;
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_P2P_CLIENT:
- return WL_MODE_BSS;
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_P2P_GO:
- return WL_MODE_AP;
- case NL80211_IFTYPE_P2P_DEVICE:
- return WL_MODE_P2P;
- case NL80211_IFTYPE_UNSPECIFIED:
- default:
- break;
- }
-
- return -EINVAL;
-}
-
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
{
/* scheduled scan settings */
@@ -4370,7 +4394,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
vif->wdev.wiphy = cfg->wiphy;
vif->wdev.iftype = type;
- vif->mode = brcmf_nl80211_iftype_to_mode(type);
vif->pm_block = pm_block;
vif->roam_off = -1;
@@ -4416,7 +4439,9 @@ static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
u32 event = e->event_code;
u16 flags = e->flags;
- if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) {
+ if ((event == BRCMF_E_DEAUTH) || (event == BRCMF_E_DEAUTH_IND) ||
+ (event == BRCMF_E_DISASSOC_IND) ||
+ ((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
brcmf_dbg(CONN, "Processing link down\n");
return true;
}
@@ -4658,16 +4683,19 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
struct net_device *ndev = ifp->ndev;
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+ struct ieee80211_channel *chan;
s32 err = 0;
+ u16 reason;
- if (ifp->vif->mode == WL_MODE_AP) {
+ if (brcmf_is_apmode(ifp->vif)) {
err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
} else if (brcmf_is_linkup(e)) {
brcmf_dbg(CONN, "Linkup\n");
if (brcmf_is_ibssmode(ifp->vif)) {
+ chan = ieee80211_get_channel(cfg->wiphy, cfg->channel);
memcpy(profile->bssid, e->addr, ETH_ALEN);
wl_inform_ibss(cfg, ndev, e->addr);
- cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
+ cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL);
clear_bit(BRCMF_VIF_STATUS_CONNECTING,
&ifp->vif->sme_state);
set_bit(BRCMF_VIF_STATUS_CONNECTED,
@@ -4679,9 +4707,15 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
if (!brcmf_is_ibssmode(ifp->vif)) {
brcmf_bss_connect_done(cfg, ndev, e, false);
if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
- &ifp->vif->sme_state))
- cfg80211_disconnected(ndev, 0, NULL, 0,
+ &ifp->vif->sme_state)) {
+ reason = 0;
+ if (((e->event_code == BRCMF_E_DEAUTH_IND) ||
+ (e->event_code == BRCMF_E_DISASSOC_IND)) &&
+ (e->reason != WLAN_REASON_UNSPECIFIED))
+ reason = e->reason;
+ cfg80211_disconnected(ndev, reason, NULL, 0,
GFP_KERNEL);
+ }
}
brcmf_link_down(ifp->vif);
brcmf_init_prof(ndev_to_prof(ndev));
@@ -4875,11 +4909,8 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
cfg->scan_request = NULL;
cfg->pwr_save = true;
- cfg->roam_on = true; /* roam on & off switch.
- we enable roam per default */
- cfg->active_scan = true; /* we do active scan for
- specific scan per default */
- cfg->dongle_up = false; /* dongle is not up yet */
+ cfg->active_scan = true; /* we do active scan per default */
+ cfg->dongle_up = false; /* dongle is not up yet */
err = brcmf_init_priv_mem(cfg);
if (err)
return err;
@@ -4904,6 +4935,30 @@ static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
mutex_init(&event->vif_event_lock);
}
+static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
+{
+ struct brcmf_fil_bwcap_le band_bwcap;
+ u32 val;
+ int err;
+
+ /* verify support for bw_cap command */
+ val = WLC_BAND_5G;
+ err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
+
+ if (!err) {
+ /* only set 2G bandwidth using bw_cap command */
+ band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
+ band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+ err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
+ sizeof(band_bwcap));
+ } else {
+ brcmf_dbg(INFO, "fallback to mimo_bw_cap\n");
+ val = WLC_N_BW_40ALL;
+ err = brcmf_fil_iovar_int_set(ifp, "mimo_bw_cap", val);
+ }
+ return err;
+}
+
struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
struct device *busdev)
{
@@ -4961,6 +5016,17 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
goto cfg80211_p2p_attach_out;
}
+ /* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
+ * setup 40MHz in 2GHz band and enable OBSS scanning.
+ */
+ if (wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+ err = brcmf_enable_bw40_2g(ifp);
+ if (!err)
+ err = brcmf_fil_iovar_int_set(ifp, "obss_coex",
+ BRCMF_OBSS_COEX_AUTO);
+ }
+
err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
if (err) {
brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
@@ -4999,7 +5065,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
}
static s32
-brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
+brcmf_dongle_roam(struct brcmf_if *ifp, u32 bcn_timeout)
{
s32 err = 0;
__le32 roamtrigger[2];
@@ -5009,7 +5075,7 @@ brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
* Setup timeout if Beacons are lost and roam is
* off to report link down
*/
- if (roamvar) {
+ if (brcmf_roamoff) {
err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout);
if (err) {
brcmf_err("bcn_timeout error (%d)\n", err);
@@ -5021,8 +5087,9 @@ brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
* Enable/Disable built-in roaming to allow supplicant
* to take care of roaming
*/
- brcmf_dbg(INFO, "Internal Roaming = %s\n", roamvar ? "Off" : "On");
- err = brcmf_fil_iovar_int_set(ifp, "roam_off", roamvar);
+ brcmf_dbg(INFO, "Internal Roaming = %s\n",
+ brcmf_roamoff ? "Off" : "On");
+ err = brcmf_fil_iovar_int_set(ifp, "roam_off", !!(brcmf_roamoff));
if (err) {
brcmf_err("roam_off error (%d)\n", err);
goto dongle_rom_out;
@@ -5164,9 +5231,6 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
ieee80211_channel_to_frequency(ch.chnum, band);
band_chan_arr[index].hw_value = ch.chnum;
- brcmf_err("channel %d: f=%d bw=%d sb=%d\n",
- ch.chnum, band_chan_arr[index].center_freq,
- ch.bw, ch.sb);
if (ch.bw == BRCMU_CHAN_BW_40) {
/* assuming the order is HT20, HT40 Upper,
* HT40 lower from chanspecs
@@ -5267,6 +5331,8 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
u32 band_list[3];
u32 nmode;
u32 bw_cap[2] = { 0, 0 };
+ u32 rxchain;
+ u32 nchain;
s8 phy;
s32 err;
u32 nband;
@@ -5303,6 +5369,16 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode,
bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]);
+ err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
+ if (err) {
+ brcmf_err("rxchain error (%d)\n", err);
+ nchain = 1;
+ } else {
+ for (nchain = 0; rxchain; nchain++)
+ rxchain = rxchain & (rxchain - 1);
+ }
+ brcmf_dbg(INFO, "nchain=%d\n", nchain);
+
err = brcmf_construct_reginfo(cfg, bw_cap);
if (err) {
brcmf_err("brcmf_construct_reginfo failed (%d)\n", err);
@@ -5331,10 +5407,7 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
band->ht_cap.ht_supported = true;
band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
- /* An HT shall support all EQM rates for one spatial
- * stream
- */
- band->ht_cap.mcs.rx_mask[0] = 0xff;
+ memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
bands[band->band] = band;
}
@@ -5381,7 +5454,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
brcmf_dbg(INFO, "power save set to %s\n",
(power_mode ? "enabled" : "disabled"));
- err = brcmf_dongle_roam(ifp, (cfg->roam_on ? 0 : 1), WL_BEACON_TIMEOUT);
+ err = brcmf_dongle_roam(ifp, WL_BEACON_TIMEOUT);
if (err)
goto default_conf_out;
err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 2dc6a074e8ed..283c525a44f7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -89,21 +89,6 @@ enum brcmf_scan_status {
BRCMF_SCAN_STATUS_SUPPRESS,
};
-/**
- * enum wl_mode - driver mode of virtual interface.
- *
- * @WL_MODE_BSS: connects to BSS.
- * @WL_MODE_IBSS: operate as ad-hoc.
- * @WL_MODE_AP: operate as access-point.
- * @WL_MODE_P2P: provide P2P discovery.
- */
-enum wl_mode {
- WL_MODE_BSS,
- WL_MODE_IBSS,
- WL_MODE_AP,
- WL_MODE_P2P
-};
-
/* dongle configuration */
struct brcmf_cfg80211_conf {
u32 frag_threshold;
@@ -193,7 +178,6 @@ struct vif_saved_ie {
* @ifp: lower layer interface pointer
* @wdev: wireless device.
* @profile: profile information.
- * @mode: operating mode.
* @roam_off: roaming state.
* @sme_state: SME state using enum brcmf_vif_status bits.
* @pm_block: power-management blocked.
@@ -204,7 +188,6 @@ struct brcmf_cfg80211_vif {
struct brcmf_if *ifp;
struct wireless_dev wdev;
struct brcmf_cfg80211_profile profile;
- s32 mode;
s32 roam_off;
unsigned long sme_state;
bool pm_block;
@@ -402,7 +385,6 @@ struct brcmf_cfg80211_info {
bool ibss_starter;
bool pwr_save;
bool dongle_up;
- bool roam_on;
bool scan_tried;
u8 *dcmd_buf;
u8 *extra_buf;
@@ -491,7 +473,8 @@ void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len);
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
+const struct brcmf_tlv *
+brcmf_parse_tlvs(const void *buf, int buflen, uint key);
u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
struct ieee80211_channel *ch);
u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 925034b80e9c..8c5fa4e58139 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -426,6 +426,12 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
bool blocked;
int err;
+ if (!wl->ucode.bcm43xx_bomminor) {
+ err = brcms_request_fw(wl, wl->wlc->hw->d11core);
+ if (err)
+ return -ENOENT;
+ }
+
ieee80211_wake_queues(hw);
spin_lock_bh(&wl->lock);
blocked = brcms_rfkill_set_hw_state(wl);
@@ -433,14 +439,6 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
if (!blocked)
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
- if (!wl->ucode.bcm43xx_bomminor) {
- err = brcms_request_fw(wl, wl->wlc->hw->d11core);
- if (err) {
- brcms_remove(wl->wlc->hw->d11core);
- return -ENOENT;
- }
- }
-
spin_lock_bh(&wl->lock);
/* avoid acknowledging frames before a non-monitor device is added */
wl->mute_tx = true;
@@ -1094,12 +1092,6 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
* Attach to the WL device identified by vendor and device parameters.
* regs is a host accessible memory address pointing to WL device registers.
*
- * brcms_attach is not defined as static because in the case where no bus
- * is defined, wl_attach will never be called, and thus, gcc will issue
- * a warning that this function is defined but not used if we declare
- * it as static.
- *
- *
* is called in brcms_bcma_probe() context, therefore no locking required.
*/
static struct brcms_info *brcms_attach(struct bcma_device *pdev)
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 6fa5d4863782..d816270db3be 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -43,5 +43,6 @@
#define BCM4335_CHIP_ID 0x4335
#define BCM43362_CHIP_ID 43362
#define BCM4339_CHIP_ID 0x4339
+#define BCM4354_CHIP_ID 0x4354
#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index 7ca2aa1035b2..74419d4bd123 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -217,6 +217,9 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec)
#define WSEC_SWFLAG 0x0008
/* to go into transition mode without setting wep */
#define SES_OW_ENABLED 0x0040
+/* MFP */
+#define MFP_CAPABLE 0x0200
+#define MFP_REQUIRED 0x0400
/* WPA authentication mode bitvec */
#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index 5a9ffd3a6a6c..e23d67e0bfe6 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -202,8 +202,8 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
}
/* calculate the block size */
- tx_size = block_size = min((size_t)(firmware->size - put),
- (size_t)DOWNLOAD_BLOCK_SIZE);
+ tx_size = block_size = min_t(size_t, firmware->size - put,
+ DOWNLOAD_BLOCK_SIZE);
memcpy(buf, &firmware->data[put], block_size);
if (block_size < DOWNLOAD_BLOCK_SIZE) {
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 9f825f2620da..b6ec51923b20 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -677,6 +677,8 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12(
"ZoomAir 11Mbps High", "Rate wireless Networking",
0x273fe3db, 0x32a1eaee),
+ PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card",
+ 0xa37434e9, 0x9762e8f1),
PCMCIA_DEVICE_PROD_ID123(
"Pretec", "CompactWLAN Card 802.11b", "2.5",
0x1cadd3e5, 0xe697636c, 0x7a5bfcf1),
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 3aba49259ef1..dfc6dfc56d52 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -7065,7 +7065,7 @@ static int ipw2100_wx_set_nick(struct net_device *dev,
if (wrqu->data.length > IW_ESSID_MAX_SIZE)
return -E2BIG;
- wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
+ wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 139326065bd9..c5aa404069f3 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -9169,7 +9169,7 @@ static int ipw_wx_set_nick(struct net_device *dev,
if (wrqu->data.length > IW_ESSID_MAX_SIZE)
return -E2BIG;
mutex_lock(&priv->mutex);
- wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
+ wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
IPW_DEBUG_TRACE("<<\n");
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 0487461ae4da..dc1d20cf64ee 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -1248,14 +1248,7 @@ il3945_rx_handle(struct il_priv *il)
len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
len += sizeof(u32); /* account for status word */
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
+ reclaim = il_need_reclaim(il, pkt);
/* Based on type of command response or notification,
* handle those that need handling via function in
@@ -1495,12 +1488,14 @@ il3945_irq_tasklet(struct il_priv *il)
if (inta & CSR_INT_BIT_WAKEUP) {
D_ISR("Wakeup interrupt\n");
il_rx_queue_update_write_ptr(il, &il->rxq);
+
+ spin_lock_irqsave(&il->lock, flags);
il_txq_update_write_ptr(il, &il->txq[0]);
il_txq_update_write_ptr(il, &il->txq[1]);
il_txq_update_write_ptr(il, &il->txq[2]);
il_txq_update_write_ptr(il, &il->txq[3]);
il_txq_update_write_ptr(il, &il->txq[4]);
- il_txq_update_write_ptr(il, &il->txq[5]);
+ spin_unlock_irqrestore(&il->lock, flags);
il->isr_stats.wakeup++;
handled |= CSR_INT_BIT_WAKEUP;
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index 9a45f6f626f6..76b0729ade17 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -891,8 +891,7 @@ il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
{
}
-static struct rate_control_ops rs_ops = {
- .module = NULL,
+static const struct rate_control_ops rs_ops = {
.name = RS_NAME,
.tx_status = il3945_rs_tx_status,
.get_rate = il3945_rs_get_rate,
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 43f488a8cda2..888ad5c74639 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -92,7 +92,6 @@ il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
* EEPROM
*/
struct il_mod_params il4965_mod_params = {
- .amsdu_size_8K = 1,
.restart_fw = 1,
/* the rest are 0 by default */
};
@@ -4274,17 +4273,7 @@ il4965_rx_handle(struct il_priv *il)
len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
len += sizeof(u32); /* account for status word */
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
- (pkt->hdr.cmd != N_RX_MPDU) &&
- (pkt->hdr.cmd != N_COMPRESSED_BA) &&
- (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
+ reclaim = il_need_reclaim(il, pkt);
/* Based on type of command response or notification,
* handle those that need handling via function in
@@ -6876,6 +6865,6 @@ module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0 [disabled])");
module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 4d5e33259ca8..eaaeea19d8c5 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2807,8 +2807,7 @@ il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
{
}
-static struct rate_control_ops rs_4965_ops = {
- .module = NULL,
+static const struct rate_control_ops rs_4965_ops = {
.name = IL4965_RS_NAME,
.tx_status = il4965_rs_tx_status,
.get_rate = il4965_rs_get_rate,
diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 048421511988..dd744135c956 100644
--- a/drivers/net/wireless/iwlegacy/commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -2270,7 +2270,8 @@ struct il_spectrum_notification {
*/
#define IL_POWER_VEC_SIZE 5
-#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
+#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
+#define IL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
struct il3945_powertable_cmd {
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 02e8233ccf29..4f42174d9994 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1078,29 +1078,82 @@ EXPORT_SYMBOL(il_get_channel_info);
* Setting power level allows the card to go to sleep when not busy.
*
* We calculate a sleep command based on the required latency, which
- * we get from mac80211. In order to handle thermal throttling, we can
- * also use pre-defined power levels.
+ * we get from mac80211.
*/
-/*
- * This defines the old power levels. They are still used by default
- * (level 1) and for thermal throttle (levels 3 through 5)
- */
-
-struct il_power_vec_entry {
- struct il_powertable_cmd cmd;
- u8 no_dtim; /* number of skip dtim */
-};
+#define SLP_VEC(X0, X1, X2, X3, X4) { \
+ cpu_to_le32(X0), \
+ cpu_to_le32(X1), \
+ cpu_to_le32(X2), \
+ cpu_to_le32(X3), \
+ cpu_to_le32(X4) \
+}
static void
-il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
+il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
{
+ const __le32 interval[3][IL_POWER_VEC_SIZE] = {
+ SLP_VEC(2, 2, 4, 6, 0xFF),
+ SLP_VEC(2, 4, 7, 10, 10),
+ SLP_VEC(4, 7, 10, 10, 0xFF)
+ };
+ int i, dtim_period, no_dtim;
+ u32 max_sleep;
+ bool skip;
+
memset(cmd, 0, sizeof(*cmd));
if (il->power_data.pci_pm)
cmd->flags |= IL_POWER_PCI_PM_MSK;
- D_POWER("Sleep command for CAM\n");
+ /* if no Power Save, we are done */
+ if (il->power_data.ps_disabled)
+ return;
+
+ cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK;
+ cmd->keep_alive_seconds = 0;
+ cmd->debug_flags = 0;
+ cmd->rx_data_timeout = cpu_to_le32(25 * 1024);
+ cmd->tx_data_timeout = cpu_to_le32(25 * 1024);
+ cmd->keep_alive_beacons = 0;
+
+ dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0;
+
+ if (dtim_period <= 2) {
+ memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0]));
+ no_dtim = 2;
+ } else if (dtim_period <= 10) {
+ memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1]));
+ no_dtim = 2;
+ } else {
+ memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2]));
+ no_dtim = 0;
+ }
+
+ if (dtim_period == 0) {
+ dtim_period = 1;
+ skip = false;
+ } else {
+ skip = !!no_dtim;
+ }
+
+ if (skip) {
+ __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1];
+
+ max_sleep = le32_to_cpu(tmp);
+ if (max_sleep == 0xFF)
+ max_sleep = dtim_period * (skip + 1);
+ else if (max_sleep > dtim_period)
+ max_sleep = (max_sleep / dtim_period) * dtim_period;
+ cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK;
+ } else {
+ max_sleep = dtim_period;
+ cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK;
+ }
+
+ for (i = 0; i < IL_POWER_VEC_SIZE; i++)
+ if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
+ cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
}
static int
@@ -1173,7 +1226,8 @@ il_power_update_mode(struct il_priv *il, bool force)
{
struct il_powertable_cmd cmd;
- il_power_sleep_cam_cmd(il, &cmd);
+ il_build_powertable_cmd(il, &cmd);
+
return il_power_set_mode(il, &cmd, force);
}
EXPORT_SYMBOL(il_power_update_mode);
@@ -5081,6 +5135,7 @@ set_ch_out:
}
if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
+ il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS);
ret = il_power_update_mode(il, false);
if (ret)
D_MAC80211("Error setting sleep level\n");
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index ad123d66ab6c..dfb13c70efe8 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1123,6 +1123,7 @@ struct il_power_mgr {
struct il_powertable_cmd sleep_cmd_next;
int debug_sleep_level_override;
bool pci_pm;
+ bool ps_disabled;
};
struct il_priv {
@@ -1597,7 +1598,7 @@ struct il_mod_params {
int disable_hw_scan; /* def: 0 = use h/w scan */
int num_of_queues; /* def: HW dependent */
int disable_11n; /* def: 0 = 11n capabilities enabled */
- int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
+ int amsdu_size_8K; /* def: 0 = disable 8K amsdu size */
int antenna; /* def: 0 = both antennas (use diversity) */
int restart_fw; /* def: 1 = restart firmware */
};
@@ -1978,6 +1979,20 @@ void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
u32 il_read_targ_mem(struct il_priv *il, u32 addr);
void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+static inline bool il_need_reclaim(struct il_priv *il, struct il_rx_pkt *pkt)
+{
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command. If the packet (e.g. Rx frame)
+ * originated from uCode, there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated, but
+ * apparently a few don't get set; catch them here.
+ */
+ return !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX &&
+ pkt->hdr.cmd != N_RX_PHY && pkt->hdr.cmd != N_RX &&
+ pkt->hdr.cmd != N_RX_MPDU && pkt->hdr.cmd != N_COMPRESSED_BA;
+}
+
static inline void
_il_write8(struct il_priv *il, u32 ofs, u8 val)
{
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 3eb2102ce236..74b3b4de7bb7 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -68,6 +68,19 @@ config IWLWIFI_OPMODE_MODULAR
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLWIFI && IWLDVM=n && IWLMVM=n
+config IWLWIFI_BCAST_FILTERING
+ bool "Enable broadcast filtering"
+ depends on IWLMVM
+ help
+ Say Y here to enable default bcast filtering configuration.
+
+ Enabling broadcast filtering will drop any incoming wireless
+ broadcast frames, except some very specific predefined
+ patterns (e.g. incoming arp requests).
+
+ If unsure, don't enable this option, as some programs might
+ expect incoming broadcasts for their normal operations.
+
menu "Debugging Options"
depends on IWLWIFI
@@ -111,6 +124,7 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
Enable use of experimental ucode for testing and debugging.
config IWLWIFI_DEVICE_TRACING
+
bool "iwlwifi device access tracing"
depends on IWLWIFI
depends on EVENT_TRACING
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 1fa64429bcc2..3d32f4120174 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -8,7 +8,7 @@ iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
-iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o
+iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
iwlwifi-objs += $(iwlwifi-m)
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 562772d85102..c160dad03037 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -109,7 +109,7 @@ extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
struct iwl_ucode_capabilities;
-extern struct ieee80211_ops iwlagn_hw_ops;
+extern const struct ieee80211_ops iwlagn_hw_ops;
static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
{
@@ -480,7 +480,7 @@ do { \
} while (0)
#endif /* CONFIG_IWLWIFI_DEBUG */
-extern const char *iwl_dvm_cmd_strings[REPLY_MAX];
+extern const char *const iwl_dvm_cmd_strings[REPLY_MAX];
static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
{
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 7b140e487deb..758c54eeb206 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -317,7 +317,7 @@ static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.nrg_th_cca = 62,
};
-static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
+static const struct iwl_sensitivity_ranges iwl5150_sensitivity = {
.min_nrg_cck = 95,
.auto_corr_min_ofdm = 90,
.auto_corr_min_ofdm_mrc = 170,
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 73086c1629ca..dd55c9cf7ba8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1582,7 +1582,7 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-struct ieee80211_ops iwlagn_hw_ops = {
+const struct ieee80211_ops iwlagn_hw_ops = {
.tx = iwlagn_mac_tx,
.start = iwlagn_mac_start,
.stop = iwlagn_mac_stop,
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index ba1b1ea54252..6a6df71af1d7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -252,13 +252,17 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
struct iwl_priv *priv =
container_of(work, struct iwl_priv, bt_runtime_config);
+ mutex_lock(&priv->mutex);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
+ goto out;
/* dont send host command if rf-kill is on */
if (!iwl_is_ready_rf(priv))
- return;
+ goto out;
+
iwlagn_send_advance_bt_config(priv);
+out:
+ mutex_unlock(&priv->mutex);
}
static void iwl_bg_bt_full_concurrency(struct work_struct *work)
@@ -2035,7 +2039,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
ieee80211_free_txskb(priv->hw, skb);
}
-static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
@@ -2045,6 +2049,8 @@ static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
+
+ return false;
}
static const struct iwl_op_mode_ops iwl_dvm_ops = {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 0977d93b529d..aa773a2da4ab 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -176,46 +176,46 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
* (2.4 GHz) band.
*/
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
};
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
{0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
{0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
{0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
};
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
{0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
{0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
{0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
{0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
{0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
};
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
{0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
{0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
{0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
{0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
{0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
{0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
{0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
@@ -1111,7 +1111,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl)
{
/* Used to choose among HT tables */
- s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+ const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
/* Check for invalid LQ type */
if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
@@ -1173,9 +1173,8 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
&(lq_sta->lq_info[lq_sta->active_tbl]);
s32 active_sr = active_tbl->win[index].success_ratio;
s32 active_tpt = active_tbl->expected_tpt[index];
-
/* expected "search" throughput */
- s32 *tpt_tbl = tbl->expected_tpt;
+ const u16 *tpt_tbl = tbl->expected_tpt;
s32 new_rate, high, low, start_hi;
u16 high_low;
@@ -3319,8 +3318,8 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
struct ieee80211_sta *sta, void *priv_sta)
{
}
-static struct rate_control_ops rs_ops = {
- .module = NULL,
+
+static const struct rate_control_ops rs_ops = {
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index bdd5644a400b..f6bd25cad203 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -315,7 +315,7 @@ struct iwl_scale_tbl_info {
u8 is_dup; /* 1 = duplicated data streams */
u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
u8 max_search; /* maximun number of tables we can search */
- s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
u32 current_rate; /* rate_n_flags, uCode API format */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index 7a1bc1c547e1..cd8377346aff 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -39,7 +39,7 @@
#define IWL_CMD_ENTRY(x) [x] = #x
-const char *iwl_dvm_cmd_strings[REPLY_MAX] = {
+const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
IWL_CMD_ENTRY(REPLY_ALIVE),
IWL_CMD_ENTRY(REPLY_ERROR),
IWL_CMD_ENTRY(REPLY_ECHO),
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 2a59da2ff87a..003a546571d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -71,8 +71,8 @@
#define IWL3160_UCODE_API_MAX 8
/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 7
-#define IWL3160_UCODE_API_OK 7
+#define IWL7260_UCODE_API_OK 8
+#define IWL3160_UCODE_API_OK 8
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 7
@@ -95,6 +95,8 @@
#define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define NVM_HW_SECTION_NUM_FAMILY_7000 0
+
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
@@ -120,7 +122,8 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl7000_base_params, \
- .led_mode = IWL_LED_RF_STATE
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000
const struct iwl_cfg iwl7260_2ac_cfg = {
@@ -131,6 +134,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
+ .lp_xtal_workaround = true,
};
const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
@@ -142,6 +146,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.high_temp = true,
.host_interrupt_operation_mode = true,
+ .lp_xtal_workaround = true,
};
const struct iwl_cfg iwl7260_2n_cfg = {
@@ -152,6 +157,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
+ .lp_xtal_workaround = true,
};
const struct iwl_cfg iwl7260_n_cfg = {
@@ -162,6 +168,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
+ .lp_xtal_workaround = true,
};
const struct iwl_cfg iwl3160_2ac_cfg = {
@@ -194,6 +201,17 @@ const struct iwl_cfg iwl3160_n_cfg = {
.host_interrupt_operation_mode = true,
};
+static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
+ {.pwr = 1600, .backoff = 0},
+ {.pwr = 1300, .backoff = 467},
+ {.pwr = 900, .backoff = 1900},
+ {.pwr = 800, .backoff = 2630},
+ {.pwr = 700, .backoff = 3720},
+ {.pwr = 600, .backoff = 5550},
+ {.pwr = 500, .backoff = 9350},
+ {0},
+};
+
const struct iwl_cfg iwl7265_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7265",
.fw_name_pre = IWL7265_FW_PRE,
@@ -201,6 +219,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265_2n_cfg = {
@@ -210,6 +229,7 @@ const struct iwl_cfg iwl7265_2n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265_n_cfg = {
@@ -219,6 +239,7 @@ const struct iwl_cfg iwl7265_n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
new file mode 100644
index 000000000000..f5bd82b88592
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -0,0 +1,132 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL8000_UCODE_API_MAX 8
+
+/* Oldest version we won't warn about */
+#define IWL8000_UCODE_API_OK 8
+
+/* Lowest firmware API version supported */
+#define IWL8000_UCODE_API_MIN 8
+
+/* NVM versions */
+#define IWL8000_NVM_VERSION 0x0a1d
+#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
+
+#define IWL8000_FW_PRE "iwlwifi-8000-"
+#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_8000 10
+
+static const struct iwl_base_params iwl8000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .pll_cfg_val = 0,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl8000_ht_params = {
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_8000 \
+ .ucode_api_max = IWL8000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL8000_UCODE_API_OK, \
+ .ucode_api_min = IWL8000_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_8000, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .base_params = &iwl8000_base_params, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000
+
+const struct iwl_cfg iwl8260_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 8260",
+ .fw_name_pre = IWL8000_FW_PRE,
+ IWL_DEVICE_8000,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl8260_n_cfg = {
+ .name = "Intel(R) Dual Band Wireless-AC 8260",
+ .fw_name_pre = IWL8000_FW_PRE,
+ IWL_DEVICE_8000,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+};
+
+MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 1ced525157dc..3f17dc3f2c8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -84,6 +84,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_6050,
IWL_DEVICE_FAMILY_6150,
IWL_DEVICE_FAMILY_7000,
+ IWL_DEVICE_FAMILY_8000,
};
/*
@@ -192,6 +193,15 @@ struct iwl_eeprom_params {
bool enhanced_txpower;
};
+/* Tx-backoff power threshold
+ * @pwr: The power limit in mw
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_pwr_tx_backoff {
+ u32 pwr;
+ u32 backoff;
+};
+
/**
* struct iwl_cfg
* @name: Offical name of the device
@@ -217,6 +227,9 @@ struct iwl_eeprom_params {
* @high_temp: Is this NIC is designated to be in high temperature.
* @host_interrupt_operation_mode: device needs host interrupt operation
* mode set
+ * @d0i3: device uses d0i3 instead of d3
+ * @nvm_hw_section_num: the ID of the HW NVM section
+ * @pwr_tx_backoffs: translation table between power limits and backoffs
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -247,6 +260,10 @@ struct iwl_cfg {
const bool internal_wimax_coex;
const bool host_interrupt_operation_mode;
bool high_temp;
+ bool d0i3;
+ u8 nvm_hw_section_num;
+ bool lp_xtal_workaround;
+ const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
};
/*
@@ -307,6 +324,8 @@ extern const struct iwl_cfg iwl3160_n_cfg;
extern const struct iwl_cfg iwl7265_2ac_cfg;
extern const struct iwl_cfg iwl7265_2n_cfg;
extern const struct iwl_cfg iwl7265_n_cfg;
+extern const struct iwl_cfg iwl8260_2ac_cfg;
+extern const struct iwl_cfg iwl8260_n_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 9d325516c42d..fe129c94ae3e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -139,6 +139,13 @@
#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
/*
+ * CSR HW resources monitor registers
+ */
+#define CSR_MONITOR_CFG_REG (CSR_BASE+0x214)
+#define CSR_MONITOR_STATUS_REG (CSR_BASE+0x228)
+#define CSR_MONITOR_XTAL_RESOURCES (0x00000010)
+
+/*
* CSR Hardware Revision Workaround Register. Indicates hardware rev;
* "step" determines CCK backoff for txpower calculation. Used for 4965 only.
* See also CSR_HW_REV register.
@@ -173,6 +180,7 @@
#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
@@ -240,6 +248,7 @@
* 001 -- MAC power-down
* 010 -- PHY (radio) power-down
* 011 -- Error
+ * 10: XTAL ON request
* 9-6: SYS_CONFIG
* Indicates current system configuration, reflecting pins on chip
* as forced high/low by device circuit board.
@@ -271,6 +280,7 @@
#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
+#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
@@ -395,37 +405,33 @@
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
-/* SECURE boot registers */
-#define CSR_SECURE_BOOT_CONFIG_ADDR (0x100)
-enum secure_boot_config_reg {
- CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
- CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
-};
-
-#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR (0x100)
-#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR (0x100)
-enum secure_boot_status_reg {
- CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000003,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
- CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
-};
-
-#define CSR_UCODE_LOAD_STATUS_ADDR (0x100)
-enum secure_load_status_reg {
- CSR_CPU_STATUS_LOADING_STARTED = 0x00000001,
- CSR_CPU_STATUS_LOADING_COMPLETED = 0x00000002,
- CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
- CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
-};
-
-#define CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
-#define CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
-
-#define CSR_SECURE_TIME_OUT (100)
+/*
+ * SHR target access (Shared block memory space)
+ *
+ * Shared internal registers can be accessed directly from PCI bus through SHR
+ * arbiter without need for the MAC HW to be powered up. This is possible due to
+ * indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and
+ * HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers.
+ *
+ * Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW
+ * need not be powered up so no "grab inc access" is required.
+ */
-#define FH_TCSR_0_REG0 (0x1D00)
+/*
+ * Registers for accessing shared registers (e.g. SHR_APMG_GP1,
+ * SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC),
+ * first, write to the control register:
+ * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
+ * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access)
+ * second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0].
+ *
+ * To write the register, first, write to the data register
+ * HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then:
+ * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
+ * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access)
+ */
+#define HEEP_CTRL_WRD_PCIEX_CTRL_REG (CSR_BASE+0x0ec)
+#define HEEP_CTRL_WRD_PCIEX_DATA_REG (CSR_BASE+0x0f4)
/*
* HBUS (Host-side Bus)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index a75aac986a23..c8cbdbe15924 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -126,6 +126,7 @@ do { \
/* 0x00000F00 - 0x00000100 */
#define IWL_DL_POWER 0x00000100
#define IWL_DL_TEMP 0x00000200
+#define IWL_DL_RPM 0x00000400
#define IWL_DL_SCAN 0x00000800
/* 0x0000F000 - 0x00001000 */
#define IWL_DL_ASSOC 0x00001000
@@ -189,5 +190,6 @@ do { \
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
+#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 75103554cd63..0a3e841b44a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -128,7 +128,7 @@ struct iwl_drv {
const struct iwl_cfg *cfg;
int fw_index; /* firmware we're trying to load */
- char firmware_name[25]; /* name of firmware file to load */
+ char firmware_name[32]; /* name of firmware file to load */
struct completion request_firmware_complete;
@@ -237,7 +237,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -ENOENT;
}
- sprintf(drv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+ snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
+ name_pre, tag);
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
@@ -403,6 +404,38 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
return 0;
}
+static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
+ struct iwl_ucode_capabilities *capa)
+{
+ const struct iwl_ucode_api *ucode_api = (void *)data;
+ u32 api_index = le32_to_cpu(ucode_api->api_index);
+
+ if (api_index >= IWL_API_ARRAY_SIZE) {
+ IWL_ERR(drv, "api_index larger than supported by driver\n");
+ return -EINVAL;
+ }
+
+ capa->api[api_index] = le32_to_cpu(ucode_api->api_flags);
+
+ return 0;
+}
+
+static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
+ struct iwl_ucode_capabilities *capa)
+{
+ const struct iwl_ucode_capa *ucode_capa = (void *)data;
+ u32 api_index = le32_to_cpu(ucode_capa->api_index);
+
+ if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) {
+ IWL_ERR(drv, "api_index larger than supported by driver\n");
+ return -EINVAL;
+ }
+
+ capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa);
+
+ return 0;
+}
+
static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces)
@@ -637,6 +670,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
*/
capa->flags = le32_to_cpup((__le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_API_CHANGES_SET:
+ if (tlv_len != sizeof(struct iwl_ucode_api))
+ goto invalid_tlv_len;
+ if (iwl_set_ucode_api_flags(drv, tlv_data, capa))
+ goto tlv_error;
+ break;
+ case IWL_UCODE_TLV_ENABLED_CAPABILITIES:
+ if (tlv_len != sizeof(struct iwl_ucode_capa))
+ goto invalid_tlv_len;
+ if (iwl_set_ucode_capabilities(drv, tlv_data, capa))
+ goto tlv_error;
+ break;
case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
@@ -727,6 +772,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
+ drv->fw.valid_tx_ant = (drv->fw.phy_config &
+ FW_PHY_CFG_TX_CHAIN) >>
+ FW_PHY_CFG_TX_CHAIN_POS;
+ drv->fw.valid_rx_ant = (drv->fw.phy_config &
+ FW_PHY_CFG_RX_CHAIN) >>
+ FW_PHY_CFG_RX_CHAIN_POS;
break;
case IWL_UCODE_TLV_SECURE_SEC_RT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
@@ -1300,8 +1351,7 @@ MODULE_PARM_DESC(antenna_coupling,
module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
MODULE_PARM_DESC(wd_disable,
- "Disable stuck queue watchdog timer 0=system default, "
- "1=disable, 2=enable (default: 0)");
+ "Disable stuck queue watchdog timer 0=system default, 1=disable (default: 1)");
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
MODULE_PARM_DESC(nvm_file, "NVM file name");
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 592c01e11013..3c72cb710b0c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -70,6 +70,20 @@
#define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
+/* radio config bits (actual values from NVM definition) */
+#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
+#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(x) (x & 0xF)
+#define NVM_RF_CFG_DASH_MSK_FAMILY_8000(x) ((x >> 4) & 0xF)
+#define NVM_RF_CFG_STEP_MSK_FAMILY_8000(x) ((x >> 8) & 0xF)
+#define NVM_RF_CFG_TYPE_MSK_FAMILY_8000(x) ((x >> 12) & 0xFFF)
+#define NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(x) ((x >> 24) & 0xF)
+#define NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(x) ((x >> 28) & 0xF)
/**
* DOC: Driver system flows - drv component
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index e3c7deafabe6..f0548b8a64b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -81,16 +81,17 @@ struct iwl_nvm_data {
bool sku_cap_band_24GHz_enable;
bool sku_cap_band_52GHz_enable;
bool sku_cap_11n_enable;
+ bool sku_cap_11ac_enable;
bool sku_cap_amt_enable;
bool sku_cap_ipan_enable;
- u8 radio_cfg_type;
+ u16 radio_cfg_type;
u8 radio_cfg_step;
u8 radio_cfg_dash;
u8 radio_cfg_pnum;
u8 valid_tx_ant, valid_rx_ant;
- u16 nvm_version;
+ u32 nvm_version;
s8 max_tx_pwr_half_dbm;
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 88e2d6eb569f..b45e576a4b57 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -126,6 +126,8 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
IWL_UCODE_TLV_NUM_OF_CPU = 27,
IWL_UCODE_TLV_CSCHEME = 28,
+ IWL_UCODE_TLV_API_CHANGES_SET = 29,
+ IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
};
struct iwl_ucode_tlv {
@@ -158,4 +160,19 @@ struct iwl_tlv_ucode_header {
u8 data[0];
};
+/*
+ * ucode TLVs
+ *
+ * ability to get extension for: flags & capabilities from ucode binaries files
+ */
+struct iwl_ucode_api {
+ __le32 api_index;
+ __le32 api_flags;
+} __packed;
+
+struct iwl_ucode_capa {
+ __le32 api_index;
+ __le32 api_capa;
+} __packed;
+
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 5f1493c44097..d14f19339d61 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -92,9 +92,11 @@
* @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
* @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
* containing CAM (Continuous Active Mode) indication.
- * @IWL_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
- * single bound interface).
+ * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
+ * P2P client interfaces simultaneously if they are in different bindings.
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
+ * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -116,9 +118,27 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
- IWL_UCODE_TLV_FLAGS_P2P_PS = BIT(21),
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
+ IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
+ IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
+};
+
+/**
+ * enum iwl_ucode_tlv_api - ucode api
+ * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
+ */
+enum iwl_ucode_tlv_api {
+ IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
+};
+
+/**
+ * enum iwl_ucode_tlv_capa - ucode capabilities
+ * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
+ */
+enum iwl_ucode_tlv_capa {
+ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
};
/* The default calibrate table size if not specified by firmware file */
@@ -160,13 +180,16 @@ enum iwl_ucode_sec {
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
-#define IWL_UCODE_SECTION_MAX 6
-#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU (IWL_UCODE_SECTION_MAX/2)
+#define IWL_UCODE_SECTION_MAX 12
+#define IWL_API_ARRAY_SIZE 1
+#define IWL_CAPABILITIES_ARRAY_SIZE 1
struct iwl_ucode_capabilities {
u32 max_probe_length;
u32 standard_phy_calibration_size;
u32 flags;
+ u32 api[IWL_API_ARRAY_SIZE];
+ u32 capa[IWL_CAPABILITIES_ARRAY_SIZE];
};
/* one for each uCode image (inst/data, init/runtime/wowlan) */
@@ -285,22 +308,12 @@ struct iwl_fw {
struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
u32 phy_config;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
bool mvm_fw;
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
};
-static inline u8 iwl_fw_valid_tx_ant(const struct iwl_fw *fw)
-{
- return (fw->phy_config & FW_PHY_CFG_TX_CHAIN) >>
- FW_PHY_CFG_TX_CHAIN_POS;
-}
-
-static inline u8 iwl_fw_valid_rx_ant(const struct iwl_fw *fw)
-{
- return (fw->phy_config & FW_PHY_CFG_RX_CHAIN) >>
- FW_PHY_CFG_RX_CHAIN_POS;
-}
-
#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index f98175a0d35b..44cc3cf45762 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -93,14 +93,14 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
}
IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
-static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
+u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
u32 val = iwl_trans_read_prph(trans, ofs);
trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val);
return val;
}
-static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val);
iwl_trans_write_prph(trans, ofs, val);
@@ -130,6 +130,21 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
}
IWL_EXPORT_SYMBOL(iwl_write_prph);
+int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
+ u32 bits, u32 mask, int timeout)
+{
+ int t = 0;
+
+ do {
+ if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
+ return t;
+ udelay(IWL_POLL_INTERVAL);
+ t += IWL_POLL_INTERVAL;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index c339c1bed080..665ddd9dbbc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -70,8 +70,12 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
+u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
+void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
+ u32 bits, u32 mask, int timeout);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index b29075c3da8e..d994317db85b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -96,7 +96,7 @@ enum iwl_disable_11n {
* use IWL_[DIS,EN]ABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 0
* @restart_fw: restart firmware, default = 1
- * @wd_disable: enable stuck queue check, default = 0
+ * @wd_disable: disable stuck queue check, default = 1
* @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0
* @power_save: disable power save, default = false
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 725e954d8475..6be30c698506 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -71,7 +71,7 @@ enum wkp_nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
HW_ADDR = 0x15,
-/* NVM SW-Section offset (in words) definitions */
+ /* NVM SW-Section offset (in words) definitions */
NVM_SW_SECTION = 0x1C0,
NVM_VERSION = 0,
RADIO_CFG = 1,
@@ -79,11 +79,32 @@ enum wkp_nvm_offsets {
N_HW_ADDRS = 3,
NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
-/* NVM calibration section offset (in words) definitions */
+ /* NVM calibration section offset (in words) definitions */
NVM_CALIB_SECTION = 0x2B8,
XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
};
+enum family_8000_nvm_offsets {
+ /* NVM HW-Section offset (in words) definitions */
+ HW_ADDR0_FAMILY_8000 = 0x12,
+ HW_ADDR1_FAMILY_8000 = 0x16,
+ MAC_ADDRESS_OVERRIDE_FAMILY_8000 = 1,
+
+ /* NVM SW-Section offset (in words) definitions */
+ NVM_SW_SECTION_FAMILY_8000 = 0x1C0,
+ NVM_VERSION_FAMILY_8000 = 0,
+ RADIO_CFG_FAMILY_8000 = 2,
+ SKU_FAMILY_8000 = 4,
+ N_HW_ADDRS_FAMILY_8000 = 5,
+
+ /* NVM REGULATORY -Section offset (in words) definitions */
+ NVM_CHANNELS_FAMILY_8000 = 0,
+
+ /* NVM calibration section offset (in words) definitions */
+ NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8,
+ XTAL_CALIB_FAMILY_8000 = 0x316 - NVM_CALIB_SECTION_FAMILY_8000
+};
+
/* SKU Capabilities (actual values from NVM definition) */
enum nvm_sku_bits {
NVM_SKU_CAP_BAND_24GHZ = BIT(0),
@@ -92,14 +113,6 @@ enum nvm_sku_bits {
NVM_SKU_CAP_11AC_ENABLE = BIT(3),
};
-/* radio config bits (actual values from NVM definition) */
-#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
-#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
-#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
-#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
-#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
-#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
/*
* These are the channel numbers in the order that they are stored in the NVM
*/
@@ -112,7 +125,17 @@ static const u8 iwl_nvm_channels[] = {
149, 153, 157, 161, 165
};
+static const u8 iwl_nvm_channels_family_8000[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ /* 5 GHz */
+ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165, 169, 173, 177, 181
+};
+
#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
+#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
#define NUM_2GHZ_CHANNELS 14
#define FIRST_2GHZ_HT_MINUS 5
#define LAST_2GHZ_HT_PLUS 9
@@ -179,8 +202,18 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct ieee80211_channel *channel;
u16 ch_flags;
bool is_5ghz;
+ int num_of_ch;
+ const u8 *nvm_chan;
+
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ num_of_ch = IWL_NUM_CHANNELS;
+ nvm_chan = &iwl_nvm_channels[0];
+ } else {
+ num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
+ nvm_chan = &iwl_nvm_channels_family_8000[0];
+ }
- for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
+ for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
if (ch_idx >= NUM_2GHZ_CHANNELS &&
@@ -190,7 +223,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
if (!(ch_flags & NVM_CHANNEL_VALID)) {
IWL_DEBUG_EEPROM(dev,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
- iwl_nvm_channels[ch_idx],
+ nvm_chan[ch_idx],
ch_flags,
(ch_idx >= NUM_2GHZ_CHANNELS) ?
"5.2" : "2.4");
@@ -200,7 +233,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel = &data->channels[n_channels];
n_channels++;
- channel->hw_value = iwl_nvm_channels[ch_idx];
+ channel->hw_value = nvm_chan[ch_idx];
channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
channel->center_freq =
@@ -211,11 +244,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags = IEEE80211_CHAN_NO_HT40;
if (ch_idx < NUM_2GHZ_CHANNELS &&
(ch_flags & NVM_CHANNEL_40MHZ)) {
- if (iwl_nvm_channels[ch_idx] <= LAST_2GHZ_HT_PLUS)
+ if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
- if (iwl_nvm_channels[ch_idx] >= FIRST_2GHZ_HT_MINUS)
+ if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
- } else if (iwl_nvm_channels[ch_idx] <= LAST_5GHZ_HT &&
+ } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
(ch_flags & NVM_CHANNEL_40MHZ)) {
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
@@ -266,9 +299,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
- struct ieee80211_sta_vht_cap *vht_cap)
+ struct ieee80211_sta_vht_cap *vht_cap,
+ u8 tx_chains, u8 rx_chains)
{
- int num_ants = num_of_ant(data->valid_rx_ant);
+ int num_rx_ants = num_of_ant(rx_chains);
+ int num_tx_ants = num_of_ant(tx_chains);
vht_cap->vht_supported = true;
@@ -278,8 +313,10 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
- if (num_ants > 1)
+ if (num_tx_ants > 1)
vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
+ else
+ vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
if (iwlwifi_mod_params.amsdu_size_8K)
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
@@ -294,10 +331,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
- if (num_ants == 1 ||
- cfg->rx_with_siso_diversity) {
- vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
- IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
+ if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) {
+ vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
/* this works because NOT_SUPPORTED == 3 */
vht_cap->vht_mcs.rx_mcs_map |=
cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
@@ -307,14 +342,23 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
}
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data, const __le16 *nvm_sw,
- bool enable_vht, u8 tx_chains, u8 rx_chains)
+ struct iwl_nvm_data *data,
+ const __le16 *ch_section, bool enable_vht,
+ u8 tx_chains, u8 rx_chains)
{
- int n_channels = iwl_init_channel_map(dev, cfg, data,
- &nvm_sw[NVM_CHANNELS]);
+ int n_channels;
int n_used = 0;
struct ieee80211_supported_band *sband;
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ n_channels = iwl_init_channel_map(
+ dev, cfg, data,
+ &ch_section[NVM_CHANNELS]);
+ else
+ n_channels = iwl_init_channel_map(
+ dev, cfg, data,
+ &ch_section[NVM_CHANNELS_FAMILY_8000]);
+
sband = &data->bands[IEEE80211_BAND_2GHZ];
sband->band = IEEE80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
@@ -333,80 +377,160 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
tx_chains, rx_chains);
if (enable_vht)
- iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
+ iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
+ tx_chains, rx_chains);
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
n_used, n_channels);
}
+static int iwl_get_sku(const struct iwl_cfg *cfg,
+ const __le16 *nvm_sw)
+{
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ return le16_to_cpup(nvm_sw + SKU);
+ else
+ return le32_to_cpup((__le32 *)(nvm_sw + SKU_FAMILY_8000));
+}
+
+static int iwl_get_nvm_version(const struct iwl_cfg *cfg,
+ const __le16 *nvm_sw)
+{
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ return le16_to_cpup(nvm_sw + NVM_VERSION);
+ else
+ return le32_to_cpup((__le32 *)(nvm_sw +
+ NVM_VERSION_FAMILY_8000));
+}
+
+static int iwl_get_radio_cfg(const struct iwl_cfg *cfg,
+ const __le16 *nvm_sw)
+{
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ return le16_to_cpup(nvm_sw + RADIO_CFG);
+ else
+ return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
+}
+
+#define N_HW_ADDRS_MASK_FAMILY_8000 0xF
+static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg,
+ const __le16 *nvm_sw)
+{
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ return le16_to_cpup(nvm_sw + N_HW_ADDRS);
+ else
+ return le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000))
+ & N_HW_ADDRS_MASK_FAMILY_8000;
+}
+
+static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ u32 radio_cfg)
+{
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
+ data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
+ data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
+ data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
+ return;
+ }
+
+ /* set the radio configuration for family 8000 */
+ data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK_FAMILY_8000(radio_cfg);
+ data->radio_cfg_step = NVM_RF_CFG_STEP_MSK_FAMILY_8000(radio_cfg);
+ data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK_FAMILY_8000(radio_cfg);
+ data->radio_cfg_pnum = NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(radio_cfg);
+}
+
+static void iwl_set_hw_address(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ const __le16 *nvm_sec)
+{
+ u8 hw_addr[ETH_ALEN];
+
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ memcpy(hw_addr, nvm_sec + HW_ADDR, ETH_ALEN);
+ else
+ memcpy(hw_addr, nvm_sec + MAC_ADDRESS_OVERRIDE_FAMILY_8000,
+ ETH_ALEN);
+
+ /* The byte order is little endian 16 bit, meaning 214365 */
+ data->hw_addr[0] = hw_addr[1];
+ data->hw_addr[1] = hw_addr[0];
+ data->hw_addr[2] = hw_addr[3];
+ data->hw_addr[3] = hw_addr[2];
+ data->hw_addr[4] = hw_addr[5];
+ data->hw_addr[5] = hw_addr[4];
+}
+
struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
- const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains)
+ const __le16 *nvm_calib, const __le16 *regulatory,
+ const __le16 *mac_override, u8 tx_chains, u8 rx_chains)
{
struct iwl_nvm_data *data;
- u8 hw_addr[ETH_ALEN];
- u16 radio_cfg, sku;
-
- data = kzalloc(sizeof(*data) +
- sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
- GFP_KERNEL);
+ u32 sku;
+ u32 radio_cfg;
+
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ data = kzalloc(sizeof(*data) +
+ sizeof(struct ieee80211_channel) *
+ IWL_NUM_CHANNELS,
+ GFP_KERNEL);
+ else
+ data = kzalloc(sizeof(*data) +
+ sizeof(struct ieee80211_channel) *
+ IWL_NUM_CHANNELS_FAMILY_8000,
+ GFP_KERNEL);
if (!data)
return NULL;
- data->nvm_version = le16_to_cpup(nvm_sw + NVM_VERSION);
+ data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
- radio_cfg = le16_to_cpup(nvm_sw + RADIO_CFG);
- data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
- data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
- data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
- data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
- data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
- data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
+ radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw);
+ iwl_set_radio_cfg(cfg, data, radio_cfg);
- sku = le16_to_cpup(nvm_sw + SKU);
+ sku = iwl_get_sku(cfg, nvm_sw);
data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
+ data->sku_cap_11ac_enable = sku & NVM_SKU_CAP_11AC_ENABLE;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
data->sku_cap_11n_enable = false;
- /* check overrides (some devices have wrong NVM) */
- if (cfg->valid_tx_ant)
- data->valid_tx_ant = cfg->valid_tx_ant;
- if (cfg->valid_rx_ant)
- data->valid_rx_ant = cfg->valid_rx_ant;
+ data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
- if (!data->valid_tx_ant || !data->valid_rx_ant) {
- IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
- data->valid_tx_ant, data->valid_rx_ant);
- kfree(data);
- return NULL;
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ /* Checking for required sections */
+ if (!nvm_calib) {
+ IWL_ERR_DEV(dev,
+ "Can't parse empty Calib NVM sections\n");
+ kfree(data);
+ return NULL;
+ }
+ /* in family 8000 Xtal calibration values moved to OTP */
+ data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
+ data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
}
- data->n_hw_addrs = le16_to_cpup(nvm_sw + N_HW_ADDRS);
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ iwl_set_hw_address(cfg, data, nvm_hw);
- data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
- data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
+ iwl_init_sbands(dev, cfg, data, nvm_sw,
+ sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
+ rx_chains);
+ } else {
+ /* MAC address in family 8000 */
+ iwl_set_hw_address(cfg, data, mac_override);
- /* The byte order is little endian 16 bit, meaning 214365 */
- memcpy(hw_addr, nvm_hw + HW_ADDR, ETH_ALEN);
- data->hw_addr[0] = hw_addr[1];
- data->hw_addr[1] = hw_addr[0];
- data->hw_addr[2] = hw_addr[3];
- data->hw_addr[3] = hw_addr[2];
- data->hw_addr[4] = hw_addr[5];
- data->hw_addr[5] = hw_addr[4];
-
- iwl_init_sbands(dev, cfg, data, nvm_sw, sku & NVM_SKU_CAP_11AC_ENABLE,
- tx_chains, rx_chains);
+ iwl_init_sbands(dev, cfg, data, regulatory,
+ sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
+ rx_chains);
+ }
- data->calib_version = 255; /* TODO:
- this value will prevent some checks from
- failing, we need to check if this
- field is still needed, and if it does,
- where is it in the NVM*/
+ data->calib_version = 255;
return data;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index 0c4399aba8c6..c9c45a39d212 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -75,6 +75,7 @@
struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
- const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains);
+ const __le16 *nvm_calib, const __le16 *regulatory,
+ const __le16 *mac_override, u8 tx_chains, u8 rx_chains);
#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index b5be51f3cd3d..ea29504ac617 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -119,7 +119,8 @@ struct iwl_cfg;
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
- * the radio is killed. May sleep.
+ * the radio is killed. Return %true if the device should be stopped by
+ * the transport immediately after the call. May sleep.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
@@ -131,6 +132,8 @@ struct iwl_cfg;
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
+ * @enter_d0i3: configure the fw to enter d0i3. May sleep.
+ * @exit_d0i3: configure the fw to exit d0i3. May sleep.
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -142,12 +145,14 @@ struct iwl_op_mode_ops {
struct iwl_device_cmd *cmd);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
- void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
+ bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
void (*nic_error)(struct iwl_op_mode *op_mode);
void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
void (*nic_config)(struct iwl_op_mode *op_mode);
void (*wimax_active)(struct iwl_op_mode *op_mode);
+ int (*enter_d0i3)(struct iwl_op_mode *op_mode);
+ int (*exit_d0i3)(struct iwl_op_mode *op_mode);
};
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@@ -155,7 +160,7 @@ void iwl_opmode_deregister(const char *name);
/**
* struct iwl_op_mode - operational mode
- * @ops - pointer to its own ops
+ * @ops: pointer to its own ops
*
* This holds an implementation of the mac80211 / fw API.
*/
@@ -191,11 +196,11 @@ static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
op_mode->ops->queue_not_full(op_mode, queue);
}
-static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
- bool state)
+static inline bool __must_check
+iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
{
might_sleep();
- op_mode->ops->hw_rf_kill(op_mode, state);
+ return op_mode->ops->hw_rf_kill(op_mode, state);
}
static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
@@ -226,4 +231,22 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
op_mode->ops->wimax_active(op_mode);
}
+static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+ might_sleep();
+
+ if (!op_mode->ops->enter_d0i3)
+ return 0;
+ return op_mode->ops->enter_d0i3(op_mode);
+}
+
+static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+ might_sleep();
+
+ if (!op_mode->ops->exit_d0i3)
+ return 0;
+ return op_mode->ops->exit_d0i3(op_mode);
+}
+
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index fa77d63a277a..b761ac4822a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -72,7 +72,7 @@
#include "iwl-trans.h"
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-#define IWL_NUM_PAPD_CH_GROUPS 4
+#define IWL_NUM_PAPD_CH_GROUPS 7
#define IWL_NUM_TXP_CH_GROUPS 9
struct iwl_phy_db_entry {
@@ -383,7 +383,7 @@ static int iwl_phy_db_send_all_channel_groups(
if (!entry)
return -EINVAL;
- if (WARN_ON_ONCE(!entry->size))
+ if (!entry->size)
continue;
/* Send the requested PHY DB section */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 100bd0d79681..5f657c501406 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -95,7 +95,8 @@
#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
-#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+#define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200)
+#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
#define APMG_RTC_INT_STT_RFKILL (0x10000000)
@@ -105,6 +106,33 @@
/* Device NMI register */
#define DEVICE_SET_NMI_REG 0x00a01c30
+/* Shared registers (0x0..0x3ff, via target indirect or periphery */
+#define SHR_BASE 0x00a10000
+
+/* Shared GP1 register */
+#define SHR_APMG_GP1_REG 0x01dc
+#define SHR_APMG_GP1_REG_PRPH (SHR_BASE + SHR_APMG_GP1_REG)
+#define SHR_APMG_GP1_WF_XTAL_LP_EN 0x00000004
+#define SHR_APMG_GP1_CHICKEN_BIT_SELECT 0x80000000
+
+/* Shared DL_CFG register */
+#define SHR_APMG_DL_CFG_REG 0x01c4
+#define SHR_APMG_DL_CFG_REG_PRPH (SHR_BASE + SHR_APMG_DL_CFG_REG)
+#define SHR_APMG_DL_CFG_RTCS_CLK_SELECTOR_MSK 0x000000c0
+#define SHR_APMG_DL_CFG_RTCS_CLK_INTERNAL_XTAL 0x00000080
+#define SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP 0x00000100
+
+/* Shared APMG_XTAL_CFG register */
+#define SHR_APMG_XTAL_CFG_REG 0x1c0
+#define SHR_APMG_XTAL_CFG_XTAL_ON_REQ 0x80000000
+
+/*
+ * Device reset for family 8000
+ * write to bit 24 in order to reset the CPU
+*/
+#define RELEASE_CPU_RESET (0x300C)
+#define RELEASE_CPU_RESET_BIT BIT(24)
+
/*****************************************************************************
* 7000/3000 series SHR DTS addresses *
*****************************************************************************/
@@ -281,4 +309,43 @@ static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
#define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8)
+/* SECURE boot registers */
+#define LMPM_SECURE_BOOT_CONFIG_ADDR (0x100)
+enum secure_boot_config_reg {
+ LMPM_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
+ LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
+};
+
+#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR (0x1E30)
+#define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR (0x1E34)
+enum secure_boot_status_reg {
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000001,
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
+ LMPM_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS = 0x00000003,
+};
+
+#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
+enum secure_load_status_reg {
+ LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
+ LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
+ LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
+ LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
+ LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
+};
+
+#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38)
+#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C)
+#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
+#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
+
+#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
+#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
+#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
+#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
+
+#define LMPM_SECURE_TIME_OUT (100)
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 1f065cf4a4ba..8cdb0dd618a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -193,12 +193,23 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* @CMD_ASYNC: Return right away and don't wait for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
* response. The caller needs to call iwl_free_resp when done.
+ * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
+ * command queue, but after other high priority commands. valid only
+ * with CMD_ASYNC.
+ * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
+ * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
+ * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
+ * (i.e. mark it as non-idle).
*/
enum CMD_MODE {
CMD_SYNC = 0,
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
+ CMD_HIGH_PRIO = BIT(3),
+ CMD_SEND_IN_IDLE = BIT(4),
+ CMD_MAKE_TRANS_IDLE = BIT(5),
+ CMD_WAKE_UP_TRANS = BIT(6),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -335,6 +346,9 @@ enum iwl_d3_status {
* @STATUS_INT_ENABLED: interrupts are enabled
* @STATUS_RFKILL: the HW RFkill switch is in KILL position
* @STATUS_FW_ERROR: the fw is in error state
+ * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
+ * are sent
+ * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -343,6 +357,8 @@ enum iwl_trans_status {
STATUS_INT_ENABLED,
STATUS_RFKILL,
STATUS_FW_ERROR,
+ STATUS_TRANS_GOING_IDLE,
+ STATUS_TRANS_IDLE,
};
/**
@@ -377,7 +393,7 @@ struct iwl_trans_config {
bool rx_buf_size_8k;
bool bc_table_dword;
unsigned int queue_watchdog_timeout;
- const char **command_names;
+ const char *const *command_names;
};
struct iwl_trans;
@@ -443,6 +459,11 @@ struct iwl_trans;
* @release_nic_access: let the NIC go to sleep. The "flags" parameter
* must be the same one that was sent before to the grab_nic_access.
* @set_bits_mask - set SRAM register according to value and mask.
+ * @ref: grab a reference to the transport/FW layers, disallowing
+ * certain low power states
+ * @unref: release a reference previously taken with @ref. Note that
+ * initially the reference count is 1, making an initial @unref
+ * necessary to allow low power states.
*/
struct iwl_trans_ops {
@@ -489,6 +510,8 @@ struct iwl_trans_ops {
unsigned long *flags);
void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
u32 value);
+ void (*ref)(struct iwl_trans *trans);
+ void (*unref)(struct iwl_trans *trans);
};
/**
@@ -523,6 +546,7 @@ enum iwl_trans_state {
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
+ * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -551,6 +575,8 @@ struct iwl_trans {
struct lockdep_map sync_cmd_lockdep_map;
#endif
+ u64 dflt_pwr_limit;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -627,6 +653,18 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
return trans->ops->d3_resume(trans, status, test);
}
+static inline void iwl_trans_ref(struct iwl_trans *trans)
+{
+ if (trans->ops->ref)
+ trans->ops->ref(trans);
+}
+
+static inline void iwl_trans_unref(struct iwl_trans *trans)
+{
+ if (trans->ops->unref)
+ trans->ops->unref(trans);
+}
+
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index f98ec2b23898..ccdd3b7c4cce 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -2,8 +2,8 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o power_legacy.o bt-coex.o
-iwlmvm-y += led.o tt.o
+iwlmvm-y += power.o coex.o
+iwlmvm-y += led.o tt.o offloading.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 18a895a949d4..685f7e8e6943 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -61,9 +61,11 @@
*
*****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
#include <net/mac80211.h>
-#include "fw-api-bt-coex.h"
+#include "fw-api-coex.h"
#include "iwl-modparams.h"
#include "mvm.h"
#include "iwl-debug.h"
@@ -305,6 +307,215 @@ static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
cpu_to_le32(0x33113311),
};
+struct corunning_block_luts {
+ u8 range;
+ __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
+};
+
+/*
+ * Ranges for the antenna coupling calibration / co-running block LUT:
+ * LUT0: [ 0, 12[
+ * LUT1: [12, 20[
+ * LUT2: [20, 21[
+ * LUT3: [21, 23[
+ * LUT4: [23, 27[
+ * LUT5: [27, 30[
+ * LUT6: [30, 32[
+ * LUT7: [32, 33[
+ * LUT8: [33, - [
+ */
+static const struct corunning_block_luts antenna_coupling_ranges[] = {
+ {
+ .range = 0,
+ .lut20 = {
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 12,
+ .lut20 = {
+ cpu_to_le32(0x00000001), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 20,
+ .lut20 = {
+ cpu_to_le32(0x00000002), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 21,
+ .lut20 = {
+ cpu_to_le32(0x00000003), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 23,
+ .lut20 = {
+ cpu_to_le32(0x00000004), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 27,
+ .lut20 = {
+ cpu_to_le32(0x00000005), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 30,
+ .lut20 = {
+ cpu_to_le32(0x00000006), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 32,
+ .lut20 = {
+ cpu_to_le32(0x00000007), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 33,
+ .lut20 = {
+ cpu_to_le32(0x00000008), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+};
+
static enum iwl_bt_coex_lut_type
iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
{
@@ -378,7 +589,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
- flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
bt_cmd->flags = cpu_to_le32(flags);
bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
@@ -391,14 +601,26 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_LUT |
BT_VALID_WIFI_RX_SW_PRIO_BOOST |
BT_VALID_WIFI_TX_SW_PRIO_BOOST |
- BT_VALID_CORUN_LUT_20 |
- BT_VALID_CORUN_LUT_40 |
BT_VALID_ANT_ISOLATION |
BT_VALID_ANT_ISOLATION_THRS |
BT_VALID_TXTX_DELTA_FREQ_THRS |
BT_VALID_TXRX_MAX_FREQ_0 |
BT_VALID_SYNC_TO_SCO);
+ if (IWL_MVM_BT_COEX_SYNC2SCO)
+ bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
+
+ if (IWL_MVM_BT_COEX_CORUNNING) {
+ bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+ BT_VALID_CORUN_LUT_40);
+ bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
+ }
+
+ if (IWL_MVM_BT_COEX_MPLUT) {
+ bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
+ bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+ }
+
if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
sizeof(iwl_single_shared_ant));
@@ -406,6 +628,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
sizeof(iwl_combined_lookup));
+ /* Take first Co-running block LUT to get started */
+ memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
+ sizeof(bt_cmd->bt4_corun_lut20));
+ memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
+ sizeof(bt_cmd->bt4_corun_lut40));
+
memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
sizeof(iwl_bt_prio_boost));
memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
@@ -489,36 +717,26 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
return ret;
}
-static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
- bool enable)
+int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
{
struct iwl_bt_coex_cmd *bt_cmd;
/* Send ASYNC since this can be sent from an atomic context */
struct iwl_host_cmd cmd = {
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
- .dataflags = { IWL_HCMD_DFL_DUP, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
.flags = CMD_ASYNC,
};
-
- struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
int ret;
- if (sta_id == IWL_MVM_STATION_COUNT)
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (!mvmsta)
return 0;
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
-
- /* This can happen if the station has been removed right now */
- if (IS_ERR_OR_NULL(sta))
- return 0;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
/* nothing to do */
- if (mvmsta->bt_reduced_txpower == enable)
+ if (mvmsta->bt_reduced_txpower_dbg ||
+ mvmsta->bt_reduced_txpower == enable)
return 0;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
@@ -552,6 +770,7 @@ struct iwl_bt_iterator_data {
bool reduced_tx_power;
struct ieee80211_chanctx_conf *primary;
struct ieee80211_chanctx_conf *secondary;
+ bool primary_ll;
};
static inline
@@ -577,72 +796,113 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct iwl_mvm *mvm = data->mvm;
struct ieee80211_chanctx_conf *chanctx_conf;
enum ieee80211_smps_mode smps_mode;
+ u32 bt_activity_grading;
int ave_rssi;
lockdep_assert_held(&mvm->mutex);
- if (vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_AP)
- return;
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ /* default smps_mode for BSS / P2P client is AUTOMATIC */
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ data->num_bss_ifaces++;
- smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ /*
+ * Count unassoc BSSes, relax SMSP constraints
+ * and disable reduced Tx Power
+ */
+ if (!vif->bss_conf.assoc) {
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ if (iwl_mvm_bt_coex_reduced_txp(mvm,
+ mvmvif->ap_sta_id,
+ false))
+ IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+ return;
+ }
+ break;
+ case NL80211_IFTYPE_AP:
+ /* default smps_mode for AP / GO is OFF */
+ smps_mode = IEEE80211_SMPS_OFF;
+ if (!mvmvif->ap_ibss_active) {
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ return;
+ }
+
+ /* the Ack / Cts kill mask must be default if AP / GO */
+ data->reduced_tx_power = false;
+ break;
+ default:
+ return;
+ }
chanctx_conf = rcu_dereference(vif->chanctx_conf);
/* If channel context is invalid or not on 2.4GHz .. */
if ((!chanctx_conf ||
chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
- /* ... and it is an associated STATION, relax constraints */
- if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc)
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ /* ... relax constraints and disable rssi events */
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
}
- /* SoftAP / GO will always be primary */
- if (vif->type == NL80211_IFTYPE_AP) {
- if (!mvmvif->ap_ibss_active)
- return;
+ bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
+ if (bt_activity_grading >= BT_HIGH_TRAFFIC)
+ smps_mode = IEEE80211_SMPS_STATIC;
+ else if (bt_activity_grading >= BT_LOW_TRAFFIC)
+ smps_mode = vif->type == NL80211_IFTYPE_AP ?
+ IEEE80211_SMPS_OFF :
+ IEEE80211_SMPS_DYNAMIC;
+ IWL_DEBUG_COEX(data->mvm,
+ "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
+ mvmvif->id, data->notif->bt_status, bt_activity_grading,
+ smps_mode);
- /* the Ack / Cts kill mask must be default if AP / GO */
- data->reduced_tx_power = false;
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
- if (chanctx_conf == data->primary)
- return;
+ /* low latency is always primary */
+ if (iwl_mvm_vif_low_latency(mvmvif)) {
+ data->primary_ll = true;
- /* downgrade the current primary no matter what its type is */
data->secondary = data->primary;
data->primary = chanctx_conf;
- return;
}
- data->num_bss_ifaces++;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (!mvmvif->ap_ibss_active)
+ return;
- /* we are now a STA / P2P Client, and take associated ones only */
- if (!vif->bss_conf.assoc)
+ if (chanctx_conf == data->primary)
+ return;
+
+ if (!data->primary_ll) {
+ /*
+ * downgrade the current primary no matter what its
+ * type is.
+ */
+ data->secondary = data->primary;
+ data->primary = chanctx_conf;
+ } else {
+ /* there is low latency vif - we will be secondary */
+ data->secondary = chanctx_conf;
+ }
return;
+ }
- /* STA / P2P Client, try to be primary if first vif */
+ /*
+ * STA / P2P Client, try to be primary if first vif. If we are in low
+ * latency mode, we are already in primary and just don't do much
+ */
if (!data->primary || data->primary == chanctx_conf)
data->primary = chanctx_conf;
else if (!data->secondary)
/* if secondary is not NULL, it might be a GO */
data->secondary = chanctx_conf;
- if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
- smps_mode = IEEE80211_SMPS_STATIC;
- else if (le32_to_cpu(data->notif->bt_activity_grading) >=
- BT_LOW_TRAFFIC)
- smps_mode = IEEE80211_SMPS_DYNAMIC;
-
- IWL_DEBUG_COEX(data->mvm,
- "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
- mvmvif->id, data->notif->bt_status,
- data->notif->bt_activity_grading, smps_mode);
-
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
-
/* don't reduce the Tx power if in loose scheme */
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
mvm->cfg->bt_shared_single_ant) {
@@ -918,8 +1178,8 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
-u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta)
+u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
enum iwl_bt_coex_lut_type lut_type;
@@ -955,6 +1215,38 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
}
+u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info, u8 ac)
+{
+ __le16 fc = hdr->frame_control;
+
+ if (info->band != IEEE80211_BAND_2GHZ)
+ return 0;
+
+ if (unlikely(mvm->bt_tx_prio))
+ return mvm->bt_tx_prio - 1;
+
+ /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
+ if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
+ is_multicast_ether_addr(hdr->addr1) ||
+ ieee80211_is_ctl(fc) || ieee80211_is_mgmt(fc) ||
+ ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc))
+ return 3;
+
+ switch (ac) {
+ case IEEE80211_AC_BE:
+ return 1;
+ case IEEE80211_AC_VO:
+ return 3;
+ case IEEE80211_AC_VI:
+ return 2;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
@@ -962,3 +1254,69 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
iwl_mvm_bt_coex_notif_handle(mvm);
}
+
+int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *dev_cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u32 ant_isolation = le32_to_cpup((void *)pkt->data);
+ u8 __maybe_unused lower_bound, upper_bound;
+ u8 lut;
+
+ struct iwl_bt_coex_cmd *bt_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = BT_CONFIG,
+ .len = { sizeof(*bt_cmd), },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ .flags = CMD_SYNC,
+ };
+
+ if (!IWL_MVM_BT_COEX_CORUNNING)
+ return 0;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (ant_isolation == mvm->last_ant_isol)
+ return 0;
+
+ for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
+ if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
+ break;
+
+ lower_bound = antenna_coupling_ranges[lut].range;
+
+ if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
+ upper_bound = antenna_coupling_ranges[lut + 1].range;
+ else
+ upper_bound = antenna_coupling_ranges[lut].range;
+
+ IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
+ ant_isolation, lower_bound, upper_bound, lut);
+
+ mvm->last_ant_isol = ant_isolation;
+
+ if (mvm->last_corun_lut == lut)
+ return 0;
+
+ mvm->last_corun_lut = lut;
+
+ bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+ if (!bt_cmd)
+ return 0;
+ cmd.data[0] = bt_cmd;
+
+ bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
+ bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
+ BT_VALID_CORUN_LUT_20 |
+ BT_VALID_CORUN_LUT_40);
+
+ /* For the moment, use the same LUT for 20GHz and 40GHz */
+ memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
+ sizeof(bt_cmd->bt4_corun_lut20));
+
+ memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
+ sizeof(bt_cmd->bt4_corun_lut40));
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 036857698565..51685693af2e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -78,5 +78,9 @@
#define IWL_MVM_PS_SNOOZE_INTERVAL 25
#define IWL_MVM_PS_SNOOZE_WINDOW 50
#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
+#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64
+#define IWL_MVM_BT_COEX_SYNC2SCO 1
+#define IWL_MVM_BT_COEX_CORUNNING 1
+#define IWL_MVM_BT_COEX_MPLUT 1
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index f36a7ee0267f..e56f5a0edf85 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -376,139 +376,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
return err;
}
-static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- union {
- struct iwl_proto_offload_cmd_v1 v1;
- struct iwl_proto_offload_cmd_v2 v2;
- struct iwl_proto_offload_cmd_v3_small v3s;
- struct iwl_proto_offload_cmd_v3_large v3l;
- } cmd = {};
- struct iwl_host_cmd hcmd = {
- .id = PROT_OFFLOAD_CONFIG_CMD,
- .flags = CMD_SYNC,
- .data[0] = &cmd,
- .dataflags[0] = IWL_HCMD_DFL_DUP,
- };
- struct iwl_proto_offload_cmd_common *common;
- u32 enabled = 0, size;
- u32 capa_flags = mvm->fw->ucode_capa.flags;
-#if IS_ENABLED(CONFIG_IPV6)
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int i;
-
- if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
- capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
- struct iwl_ns_config *nsc;
- struct iwl_targ_addr *addrs;
- int n_nsc, n_addrs;
- int c;
-
- if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
- nsc = cmd.v3s.ns_config;
- n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
- addrs = cmd.v3s.targ_addrs;
- n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
- } else {
- nsc = cmd.v3l.ns_config;
- n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
- addrs = cmd.v3l.targ_addrs;
- n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
- }
-
- if (mvmvif->num_target_ipv6_addrs)
- enabled |= IWL_D3_PROTO_OFFLOAD_NS;
-
- /*
- * For each address we have (and that will fit) fill a target
- * address struct and combine for NS offload structs with the
- * solicited node addresses.
- */
- for (i = 0, c = 0;
- i < mvmvif->num_target_ipv6_addrs &&
- i < n_addrs && c < n_nsc; i++) {
- struct in6_addr solicited_addr;
- int j;
-
- addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
- &solicited_addr);
- for (j = 0; j < c; j++)
- if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
- &solicited_addr) == 0)
- break;
- if (j == c)
- c++;
- addrs[i].addr = mvmvif->target_ipv6_addrs[i];
- addrs[i].config_num = cpu_to_le32(j);
- nsc[j].dest_ipv6_addr = solicited_addr;
- memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
- }
-
- if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
- cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
- else
- cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
- } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
- if (mvmvif->num_target_ipv6_addrs) {
- enabled |= IWL_D3_PROTO_OFFLOAD_NS;
- memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
- }
-
- BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
- sizeof(mvmvif->target_ipv6_addrs[0]));
-
- for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
- IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
- memcpy(cmd.v2.target_ipv6_addr[i],
- &mvmvif->target_ipv6_addrs[i],
- sizeof(cmd.v2.target_ipv6_addr[i]));
- } else {
- if (mvmvif->num_target_ipv6_addrs) {
- enabled |= IWL_D3_PROTO_OFFLOAD_NS;
- memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
- }
-
- BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
- sizeof(mvmvif->target_ipv6_addrs[0]));
-
- for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
- IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
- memcpy(cmd.v1.target_ipv6_addr[i],
- &mvmvif->target_ipv6_addrs[i],
- sizeof(cmd.v1.target_ipv6_addr[i]));
- }
-#endif
-
- if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
- common = &cmd.v3s.common;
- size = sizeof(cmd.v3s);
- } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
- common = &cmd.v3l.common;
- size = sizeof(cmd.v3l);
- } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
- common = &cmd.v2.common;
- size = sizeof(cmd.v2);
- } else {
- common = &cmd.v1.common;
- size = sizeof(cmd.v1);
- }
-
- if (vif->bss_conf.arp_addr_cnt) {
- enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
- common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
- memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
- }
-
- if (!enabled)
- return 0;
-
- common->enabled = cpu_to_le32(enabled);
-
- hcmd.len[0] = size;
- return iwl_mvm_send_cmd(mvm, &hcmd);
-}
-
enum iwl_mvm_tcp_packet_type {
MVM_TCP_TX_SYN,
MVM_TCP_RX_SYNACK,
@@ -846,8 +713,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
quota_cmd.quotas[0].id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
mvmvif->phy_ctxt->color));
- quota_cmd.quotas[0].quota = cpu_to_le32(100);
- quota_cmd.quotas[0].max_duration = cpu_to_le32(1000);
+ quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+ quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
for (i = 1; i < MAX_BINDINGS; i++)
quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
@@ -927,6 +794,20 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
IWL_ERR(mvm, "failed to set non-QoS seqno\n");
}
+static int
+iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
+ const struct iwl_wowlan_config_cmd_v3 *cmd)
+{
+ /* start only with the v2 part of the command */
+ u16 cmd_len = sizeof(cmd->common);
+
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
+ cmd_len = sizeof(*cmd);
+
+ return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, CMD_SYNC,
+ cmd_len, cmd);
+}
+
static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan,
bool test)
@@ -939,7 +820,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif;
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
- struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
struct iwl_d3_manager_config d3_cfg_cmd_data = {
@@ -961,9 +842,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
.tkip = &tkip_cmd,
.use_tkip = false,
};
- int ret, i;
+ int ret;
int len __maybe_unused;
- u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
if (!wowlan) {
/*
@@ -980,8 +860,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- old_aux_sta_id = mvm->aux_sta.sta_id;
-
/* see if there's only a single BSS vif and it's associated */
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -1005,49 +883,41 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
- /* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
+ /* TODO: wowlan_config_cmd.common.wowlan_ba_teardown_tids */
- wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
+ wowlan_config_cmd.common.is_11n_connection =
+ ap_sta->ht_cap.ht_supported;
/* Query the last used seqno and set it */
ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
if (ret < 0)
goto out_noreset;
- wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret);
+ wowlan_config_cmd.common.non_qos_seq = cpu_to_le16(ret);
- /*
- * For QoS counters, we store the one to use next, so subtract 0x10
- * since the uCode will add 0x10 *before* using the value while we
- * increment after using the value (i.e. store the next value to use).
- */
- for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
- u16 seq = mvm_ap_sta->tid_data[i].seq_number;
- seq -= 0x10;
- wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
- }
+ iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &wowlan_config_cmd.common);
if (wowlan->disconnect)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE);
if (wowlan->magic_pkt)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
if (wowlan->gtk_rekey_failure)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
if (wowlan->eap_identity_req)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
if (wowlan->four_way_handshake)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
if (wowlan->n_patterns)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
if (wowlan->rfkill_release)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
if (wowlan->tcp) {
@@ -1055,7 +925,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
* Set the "link change" (really "link lost") flag as well
* since that implies losing the TCP connection.
*/
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
@@ -1067,16 +937,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_stop_device(mvm->trans);
/*
- * The D3 firmware still hardcodes the AP station ID for the
- * BSS we're associated with as 0. Store the real STA ID here
- * and assign 0. When we leave this function, we'll restore
- * the original value for the resume code.
- */
- old_ap_sta_id = mvm_ap_sta->sta_id;
- mvm_ap_sta->sta_id = 0;
- mvmvif->ap_sta_id = 0;
-
- /*
* Set the HW restart bit -- this is mostly true as we're
* going to load new firmware and reprogram that, though
* the reprogramming is going to be manual to avoid adding
@@ -1096,16 +956,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->ptk_ivlen = 0;
mvm->ptk_icvlen = 0;
- /*
- * The D3 firmware still hardcodes the AP station ID for the
- * BSS we're associated with as 0. As a result, we have to move
- * the auxiliary station to ID 1 so the ID 0 remains free for
- * the AP station for later.
- * We set the sta_id to 1 here, and reset it to its previous
- * value (that we stored above) later.
- */
- mvm->aux_sta.sta_id = 1;
-
ret = iwl_mvm_load_d3_fw(mvm);
if (ret)
goto out;
@@ -1173,9 +1023,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
}
}
- ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION,
- CMD_SYNC, sizeof(wowlan_config_cmd),
- &wowlan_config_cmd);
+ ret = iwl_mvm_send_wowlan_config_cmd(mvm, &wowlan_config_cmd);
if (ret)
goto out;
@@ -1183,7 +1031,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
- ret = iwl_mvm_send_proto_offload(mvm, vif);
+ ret = iwl_mvm_send_proto_offload(mvm, vif, false, CMD_SYNC);
if (ret)
goto out;
@@ -1191,11 +1039,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
- ret = iwl_mvm_power_update_device_mode(mvm);
+ ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto out;
- ret = iwl_mvm_power_update_mode(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
if (ret)
goto out;
@@ -1222,10 +1070,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_d3_suspend(mvm->trans, test);
out:
- mvm->aux_sta.sta_id = old_aux_sta_id;
- mvm_ap_sta->sta_id = old_ap_sta_id;
- mvmvif->ap_sta_id = old_ap_sta_id;
-
if (ret < 0)
ieee80211_restart_hw(mvm->hw);
out_noreset:
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 0e29cd83a06a..9b59e1d7ae71 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -185,7 +185,7 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_pm(mvm, vif, param, val);
- ret = iwl_mvm_power_update_mode(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -202,7 +202,7 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
int bufsz = sizeof(buf);
int pos;
- pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
+ pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -225,6 +225,29 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
ap_sta_id = mvmvif->ap_sta_id;
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_ADHOC:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n");
+ break;
+ case NL80211_IFTYPE_STATION:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n");
+ break;
+ case NL80211_IFTYPE_AP:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n");
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n");
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n");
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n");
+ break;
+ default:
+ break;
+ }
+
pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
mvmvif->id, mvmvif->color);
pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
@@ -249,9 +272,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
pos += scnprintf(buf+pos, bufsz-pos,
- "ap_sta_id %d - reduced Tx power %d\n",
+ "ap_sta_id %d - reduced Tx power %d force %d\n",
ap_sta_id,
- mvm_sta->bt_reduced_txpower);
+ mvm_sta->bt_reduced_txpower,
+ mvm_sta->bt_reduced_txpower_dbg);
}
}
@@ -269,6 +293,41 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_mvm_sta *mvmsta;
+ bool reduced_tx_power;
+ int ret;
+
+ if (mvmvif->ap_sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ return -ENOTCONN;
+
+ if (strtobool(buf, &reduced_tx_power) != 0)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
+ if (IS_ERR_OR_NULL(mvmsta)) {
+ mutex_unlock(&mvm->mutex);
+ return -ENOTCONN;
+ }
+
+ mvmsta->bt_reduced_txpower_dbg = false;
+ ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+ reduced_tx_power);
+ if (!ret)
+ mvmsta->bt_reduced_txpower_dbg = true;
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret ? : count;
+}
+
static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
enum iwl_dbgfs_bf_mask param, int value)
{
@@ -403,9 +462,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_bf(vif, param, value);
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
else
- ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -460,6 +519,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u8 value;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &value);
+ if (ret)
+ return ret;
+ if (value > 1)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_update_low_latency(mvm, vif, value);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[3];
+
+ buf[0] = mvmvif->low_latency ? '1' : '0';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -473,6 +567,8 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(reduced_txp, 10);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -496,15 +592,18 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
}
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
+ iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
(vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
- mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS)))
+ mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
S_IRUSR);
- MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
- S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(reduced_txp, mvmvif->dbgfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 369d4c90e669..1b52deea6081 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -60,11 +60,14 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
+#include <linux/vmalloc.h>
+
#include "mvm.h"
#include "sta.h"
#include "iwl-io.h"
#include "iwl-prph.h"
#include "debugfs.h"
+#include "fw-error-dump.h"
static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
@@ -90,7 +93,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
int sta_id, drain, ret;
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
@@ -105,19 +108,63 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
mutex_lock(&mvm->mutex);
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(sta))
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+
+ if (!mvmsta)
ret = -ENOENT;
else
- ret = iwl_mvm_drain_sta(mvm, (void *)sta->drv_priv, drain) ? :
- count;
+ ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
mutex_unlock(&mvm->mutex);
return ret;
}
+static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
+{
+ struct iwl_mvm *mvm = inode->i_private;
+ int ret;
+
+ if (!mvm)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ if (!mvm->fw_error_dump) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ file->private_data = mvm->fw_error_dump;
+ mvm->fw_error_dump = NULL;
+ kfree(mvm->fw_error_sram);
+ mvm->fw_error_sram = NULL;
+ mvm->fw_error_sram_len = 0;
+ ret = 0;
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_fw_error_dump_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_fw_error_dump_file *dump_file = file->private_data;
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ dump_file,
+ le32_to_cpu(dump_file->file_len));
+}
+
+static int iwl_dbgfs_fw_error_dump_release(struct inode *inode,
+ struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -251,7 +298,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
}
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_power_update_device_mode(mvm);
+ ret = iwl_mvm_power_update_device(mvm);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -351,6 +398,9 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
le32_to_cpu(notif->secondary_ch_lut));
pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
le32_to_cpu(notif->bt_activity_grading));
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "antenna isolation = %d CORUN LUT index = %d\n",
+ mvm->last_ant_isol, mvm->last_corun_lut);
mutex_unlock(&mvm->mutex);
@@ -393,6 +443,22 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t
+iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ u32 bt_tx_prio;
+
+ if (sscanf(buf, "%u", &bt_tx_prio) != 1)
+ return -EINVAL;
+ if (bt_tx_prio > 4)
+ return -EINVAL;
+
+ mvm->bt_tx_prio = bt_tx_prio;
+
+ return count;
+}
+
#define PRINT_STATS_LE32(_str, _val) \
pos += scnprintf(buf + pos, bufsz - pos, \
fmt_table, _str, \
@@ -532,6 +598,80 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
}
#undef PRINT_STAT_LE32
+static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
+ char __user *user_buf, size_t count,
+ loff_t *ppos,
+ struct iwl_mvm_frame_stats *stats)
+{
+ char *buff, *pos, *endpos;
+ int idx, i;
+ int ret;
+ static const size_t bufsz = 1024;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ spin_lock_bh(&mvm->drv_stats_lock);
+
+ pos = buff;
+ endpos = pos + bufsz;
+
+ pos += scnprintf(pos, endpos - pos,
+ "Legacy/HT/VHT\t:\t%d/%d/%d\n",
+ stats->legacy_frames,
+ stats->ht_frames,
+ stats->vht_frames);
+ pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n",
+ stats->bw_20_frames,
+ stats->bw_40_frames,
+ stats->bw_80_frames);
+ pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n",
+ stats->ngi_frames,
+ stats->sgi_frames);
+ pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n",
+ stats->siso_frames,
+ stats->mimo2_frames);
+ pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n",
+ stats->fail_frames,
+ stats->success_frames);
+ pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n",
+ stats->agg_frames);
+ pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n",
+ stats->ampdu_count);
+ pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
+ stats->ampdu_count > 0 ?
+ (stats->agg_frames / stats->ampdu_count) : 0);
+
+ pos += scnprintf(pos, endpos - pos, "Last Rates\n");
+
+ idx = stats->last_frame_idx - 1;
+ for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
+ idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
+ if (stats->last_rates[idx] == 0)
+ continue;
+ pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
+ (int)(ARRAY_SIZE(stats->last_rates) - i));
+ pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
+ }
+ spin_unlock_bh(&mvm->drv_stats_lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+
+ return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos,
+ &mvm->drv_rx_stats);
+}
+
static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
@@ -592,7 +732,7 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
return -EINVAL;
if (scan_rx_ant > ANT_ABC)
return -EINVAL;
- if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
+ if (scan_rx_ant & ~mvm->fw->valid_rx_ant)
return -EINVAL;
mvm->scan_rx_ant = scan_rx_ant;
@@ -600,6 +740,187 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
return count;
}
+#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bcast_filter_cmd cmd;
+ const struct iwl_fw_bcast_filter *filter;
+ char *buf;
+ int bufsz = 1024;
+ int i, j, pos = 0;
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+ ADD_TEXT("None\n");
+ mutex_unlock(&mvm->mutex);
+ goto out;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
+ filter = &cmd.filters[i];
+
+ ADD_TEXT("Filter [%d]:\n", i);
+ ADD_TEXT("\tDiscard=%d\n", filter->discard);
+ ADD_TEXT("\tFrame Type: %s\n",
+ filter->frame_type ? "IPv4" : "Generic");
+
+ for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
+ const struct iwl_fw_bcast_filter_attr *attr;
+
+ attr = &filter->attrs[j];
+ if (!attr->mask)
+ break;
+
+ ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
+ j, attr->offset,
+ attr->offset_type ? "IP End" :
+ "Payload Start",
+ be32_to_cpu(attr->mask),
+ be32_to_cpu(attr->val),
+ le16_to_cpu(attr->reserved1));
+ }
+ }
+out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int pos, next_pos;
+ struct iwl_fw_bcast_filter filter = {};
+ struct iwl_bcast_filter_cmd cmd;
+ u32 filter_id, attr_id, mask, value;
+ int err = 0;
+
+ if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
+ &filter.frame_type, &pos) != 3)
+ return -EINVAL;
+
+ if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
+ filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
+ return -EINVAL;
+
+ for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
+ attr_id++) {
+ struct iwl_fw_bcast_filter_attr *attr =
+ &filter.attrs[attr_id];
+
+ if (pos >= count)
+ break;
+
+ if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
+ &attr->offset, &attr->offset_type,
+ &mask, &value, &next_pos) != 4)
+ return -EINVAL;
+
+ attr->mask = cpu_to_be32(mask);
+ attr->val = cpu_to_be32(value);
+ if (mask)
+ filter.num_attrs++;
+
+ pos += next_pos;
+ }
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
+ &filter, sizeof(filter));
+
+ /* send updated bcast filtering configuration */
+ if (mvm->dbgfs_bcast_filtering.override &&
+ iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return err ?: count;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bcast_filter_cmd cmd;
+ char *buf;
+ int bufsz = 1024;
+ int i, pos = 0;
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+ ADD_TEXT("None\n");
+ mutex_unlock(&mvm->mutex);
+ goto out;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
+ const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
+
+ ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
+ i, mac->default_discard, mac->attached_filters);
+ }
+out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_bcast_filter_cmd cmd;
+ struct iwl_fw_bcast_mac mac = {};
+ u32 mac_id, attached_filters;
+ int err = 0;
+
+ if (!mvm->bcast_filters)
+ return -ENOENT;
+
+ if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
+ &attached_filters) != 3)
+ return -EINVAL;
+
+ if (mac_id >= ARRAY_SIZE(cmd.macs) ||
+ mac.default_discard > 1 ||
+ attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
+ return -EINVAL;
+
+ mac.attached_filters = cpu_to_le16(attached_filters);
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
+ &mac, sizeof(mac));
+
+ /* send updated bcast filtering configuration */
+ if (mvm->dbgfs_bcast_filtering.override &&
+ iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return err ?: count;
+}
+#endif
+
#ifdef CONFIG_PM_SLEEP
static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
@@ -658,15 +979,117 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
}
#endif
+#define PRINT_MVM_REF(ref) do { \
+ if (test_bit(ref, mvm->ref_bitmap)) \
+ pos += scnprintf(buf + pos, bufsz - pos, \
+ "\t(0x%lx) %s\n", \
+ BIT(ref), #ref); \
+} while (0)
+
+static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[256];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%lx\n",
+ mvm->ref_bitmap[0]);
+
+ PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
+ PRINT_MVM_REF(IWL_MVM_REF_SCAN);
+ PRINT_MVM_REF(IWL_MVM_REF_ROC);
+ PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
+ PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
+ PRINT_MVM_REF(IWL_MVM_REF_USER);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long value;
+ int ret;
+ bool taken;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+
+ taken = test_bit(IWL_MVM_REF_USER, mvm->ref_bitmap);
+ if (value == 1 && !taken)
+ iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
+ else if (value == 0 && taken)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&mvm->mutex);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
-#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, mvm, \
+#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
+ if (!debugfs_create_file(alias, mode, parent, mvm, \
&iwl_dbgfs_##name##_ops)) \
goto err; \
} while (0)
+#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
+ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static ssize_t
+iwl_dbgfs_prph_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+
+ if (!mvm->dbgfs_prph_reg_addr)
+ return -EINVAL;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n",
+ mvm->dbgfs_prph_reg_addr,
+ iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 args;
+ u32 value;
+
+ args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value);
+ /* if we only want to set the reg address - nothing more to do */
+ if (args == 1)
+ goto out;
+
+ /* otherwise, make sure we have both address and value */
+ if (args != 2)
+ return -EINVAL;
+
+ iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value);
+out:
+ return count;
+}
+
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
/* Device wide debugfs entries */
MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
@@ -677,9 +1100,23 @@ MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
+MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
+
+static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
+ .open = iwl_dbgfs_fw_error_dump_open,
+ .read = iwl_dbgfs_fw_error_dump_read,
+ .release = iwl_dbgfs_fw_error_dump_release,
+};
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
+#endif
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
@@ -687,24 +1124,52 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
+ struct dentry *bcast_dir __maybe_unused;
char buf[100];
+ spin_lock_init(&mvm->drv_stats_lock);
+
mvm->debugfs_dir = dbgfs_dir;
MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
S_IWUSR | S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
+ bcast_dir = debugfs_create_dir("bcast_filtering",
+ mvm->debugfs_dir);
+ if (!bcast_dir)
+ goto err;
+
+ if (!debugfs_create_bool("override", S_IRUSR | S_IWUSR,
+ bcast_dir,
+ &mvm->dbgfs_bcast_filtering.override))
+ goto err;
+
+ MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
+ bcast_dir, S_IWUSR | S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
+ bcast_dir, S_IWUSR | S_IRUSR);
+ }
+#endif
+
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index 1b4e54d416b0..21877e5966a8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -70,37 +70,28 @@
/**
* enum iwl_bt_coex_flags - flags for BT_COEX command
- * @BT_CH_PRIMARY_EN:
- * @BT_CH_SECONDARY_EN:
- * @BT_NOTIF_COEX_OFF:
* @BT_COEX_MODE_POS:
* @BT_COEX_MODE_MSK:
* @BT_COEX_DISABLE:
* @BT_COEX_2W:
* @BT_COEX_3W:
* @BT_COEX_NW:
- * @BT_USE_DEFAULTS:
- * @BT_SYNC_2_BT_DISABLE:
- * @BT_COEX_CORUNNING_TBL_EN:
+ * @BT_COEX_SYNC2SCO:
+ * @BT_COEX_CORUNNING:
+ * @BT_COEX_MPLUT:
*
* The COEX_MODE must be set for each command. Even if it is not changed.
*/
enum iwl_bt_coex_flags {
- BT_CH_PRIMARY_EN = BIT(0),
- BT_CH_SECONDARY_EN = BIT(1),
- BT_NOTIF_COEX_OFF = BIT(2),
BT_COEX_MODE_POS = 3,
BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
BT_COEX_DISABLE = 0x0 << BT_COEX_MODE_POS,
BT_COEX_2W = 0x1 << BT_COEX_MODE_POS,
BT_COEX_3W = 0x2 << BT_COEX_MODE_POS,
BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
- BT_USE_DEFAULTS = BIT(6),
- BT_SYNC_2_BT_DISABLE = BIT(7),
- BT_COEX_CORUNNING_TBL_EN = BIT(8),
- BT_COEX_MPLUT_TBL_EN = BIT(9),
- /* Bit 10 is reserved */
- BT_COEX_WF_PRIO_BOOST_CHECK_EN = BIT(11),
+ BT_COEX_SYNC2SCO = BIT(7),
+ BT_COEX_CORUNNING = BIT(8),
+ BT_COEX_MPLUT = BIT(9),
};
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 8415ff312d0e..10fcc1a79ebd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -231,11 +231,15 @@ enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
- /* BIT(11) reserved */
+ IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11),
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
+ IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13),
+ IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14),
+ IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15),
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
-struct iwl_wowlan_config_cmd {
+struct iwl_wowlan_config_cmd_v2 {
__le32 wakeup_filter;
__le16 non_qos_seq;
__le16 qos_seq[8];
@@ -243,6 +247,12 @@ struct iwl_wowlan_config_cmd {
u8 is_11n_connection;
} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */
+struct iwl_wowlan_config_cmd_v3 {
+ struct iwl_wowlan_config_cmd_v2 common;
+ u8 offloading_tid;
+ u8 reserved[3];
+} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */
+
/*
* WOWLAN_TSC_RSC_PARAMS
*/
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 884c08725308..cbbcd8e284e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -301,54 +301,65 @@ struct iwl_beacon_filter_cmd {
/* Beacon filtering and beacon abort */
#define IWL_BF_ENERGY_DELTA_DEFAULT 5
+#define IWL_BF_ENERGY_DELTA_D0I3 20
#define IWL_BF_ENERGY_DELTA_MAX 255
#define IWL_BF_ENERGY_DELTA_MIN 0
#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20
#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
#define IWL_BF_ROAMING_STATE_DEFAULT 72
+#define IWL_BF_ROAMING_STATE_D0I3 72
#define IWL_BF_ROAMING_STATE_MAX 255
#define IWL_BF_ROAMING_STATE_MIN 0
#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWL_BF_TEMP_THRESHOLD_D0I3 112
#define IWL_BF_TEMP_THRESHOLD_MAX 255
#define IWL_BF_TEMP_THRESHOLD_MIN 0
#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWL_BF_TEMP_FAST_FILTER_D0I3 1
#define IWL_BF_TEMP_FAST_FILTER_MAX 255
#define IWL_BF_TEMP_FAST_FILTER_MIN 0
#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
#define IWL_BF_DEBUG_FLAG_DEFAULT 0
+#define IWL_BF_DEBUG_FLAG_D0I3 0
#define IWL_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWL_BF_ESCAPE_TIMER_D0I3 1024
#define IWL_BF_ESCAPE_TIMER_MAX 1024
#define IWL_BF_ESCAPE_TIMER_MIN 0
#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWL_BA_ESCAPE_TIMER_D0I3 6
#define IWL_BA_ESCAPE_TIMER_D3 9
#define IWL_BA_ESCAPE_TIMER_MAX 1024
#define IWL_BA_ESCAPE_TIMER_MIN 0
#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
-#define IWL_BF_CMD_CONFIG_DEFAULTS \
- .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT), \
- .bf_roaming_energy_delta = \
- cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
- .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT), \
- .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT), \
- .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \
- .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \
- .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT), \
- .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
- .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
+#define IWL_BF_CMD_CONFIG(mode) \
+ .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \
+ .bf_roaming_energy_delta = \
+ cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \
+ .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \
+ .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \
+ .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \
+ .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \
+ .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \
+ .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \
+ .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode)
+#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
+#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 85057219cc43..39148b5bb332 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -257,7 +257,8 @@ enum {
/* Bit 17-18: (0) SS, (1) SS*2 */
#define RATE_MCS_STBC_POS 17
-#define RATE_MCS_STBC_MSK (1 << RATE_MCS_STBC_POS)
+#define RATE_MCS_HT_STBC_MSK (3 << RATE_MCS_STBC_POS)
+#define RATE_MCS_VHT_STBC_MSK (1 << RATE_MCS_STBC_POS)
/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
#define RATE_MCS_BF_POS 19
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 1b60fdff6a56..d63647867262 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -199,11 +199,14 @@ enum iwl_sta_modify_flag {
* @STA_SLEEP_STATE_AWAKE:
* @STA_SLEEP_STATE_PS_POLL:
* @STA_SLEEP_STATE_UAPSD:
+ * @STA_SLEEP_STATE_MOREDATA: set more-data bit on
+ * (last) released frame
*/
enum iwl_sta_sleep_flag {
- STA_SLEEP_STATE_AWAKE = 0,
- STA_SLEEP_STATE_PS_POLL = BIT(0),
- STA_SLEEP_STATE_UAPSD = BIT(1),
+ STA_SLEEP_STATE_AWAKE = 0,
+ STA_SLEEP_STATE_PS_POLL = BIT(0),
+ STA_SLEEP_STATE_UAPSD = BIT(1),
+ STA_SLEEP_STATE_MOREDATA = BIT(2),
};
/* STA ID and color bits definitions */
@@ -318,13 +321,15 @@ struct iwl_mvm_add_sta_cmd_v5 {
} __packed; /* ADD_STA_CMD_API_S_VER_5 */
/**
- * struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station
- * VER_6 of this command is quite similar to VER_5 except
+ * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
+ * VER_7 of this command is quite similar to VER_5 except
* exclusion of all fields related to the security key installation.
+ * It only differs from VER_6 by the "awake_acs" field that is
+ * reserved and ignored in VER_6.
*/
-struct iwl_mvm_add_sta_cmd_v6 {
+struct iwl_mvm_add_sta_cmd_v7 {
u8 add_modify;
- u8 reserved1;
+ u8 awake_acs;
__le16 tid_disable_tx;
__le32 mac_id_n_color;
u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
@@ -342,7 +347,7 @@ struct iwl_mvm_add_sta_cmd_v6 {
__le16 assoc_id;
__le16 beamform_flags;
__le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_6 */
+} __packed; /* ADD_STA_CMD_API_S_VER_7 */
/**
* struct iwl_mvm_add_sta_key_cmd - add/modify sta key
@@ -432,5 +437,15 @@ struct iwl_mvm_wep_key_cmd {
struct iwl_mvm_wep_key wep_key[0];
} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+/**
+ * struct iwl_mvm_eosp_notification - EOSP notification from firmware
+ * @remain_frame_count: # of frames remaining, non-zero if SP was cut
+ * short by GO absence
+ * @sta_id: station ID
+ */
+struct iwl_mvm_eosp_notification {
+ __le32 remain_frame_count;
+ __le32 sta_id;
+} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
#endif /* __fw_api_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index b674c2a2b51c..8e122f3a7a74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -76,6 +76,8 @@
* @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
* @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
* @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
+ * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored
+ * on old firmwares).
* @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
* @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
* Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
@@ -107,6 +109,7 @@ enum iwl_tx_flags {
TX_CMD_FLG_VHT_NDPA = BIT(8),
TX_CMD_FLG_HT_NDPA = BIT(9),
TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
+ TX_CMD_FLG_BT_PRIO_POS = 11,
TX_CMD_FLG_BT_DIS = BIT(12),
TX_CMD_FLG_SEQ_CTL = BIT(13),
TX_CMD_FLG_MORE_FRAG = BIT(14),
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 989d7dbdca6c..6e75b52588de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -70,7 +70,7 @@
#include "fw-api-mac.h"
#include "fw-api-power.h"
#include "fw-api-d3.h"
-#include "fw-api-bt-coex.h"
+#include "fw-api-coex.h"
/* maximal number of Tx queues in any platform */
#define IWL_MVM_MAX_QUEUES 20
@@ -95,6 +95,7 @@ enum {
/* PHY context commands */
PHY_CONTEXT_CMD = 0x8,
DBG_CFG = 0x9,
+ ANTENNA_COUPLING_NOTIFICATION = 0xa,
/* station table */
ADD_STA_KEY = 0x17,
@@ -163,6 +164,7 @@ enum {
TX_ANT_CONFIGURATION_CMD = 0x98,
BT_CONFIG = 0x9b,
STATISTICS_NOTIFICATION = 0x9d,
+ EOSP_NOTIFICATION = 0x9e,
REDUCE_TX_POWER_CMD = 0x9f,
/* RF-KILL commands and notifications */
@@ -190,6 +192,7 @@ enum {
REPLY_DEBUG_CMD = 0xf0,
DEBUG_LOG_MSG = 0xf7,
+ BCAST_FILTER_CMD = 0xcf,
MCAST_FILTER_CMD = 0xd0,
/* D3 commands/notifications */
@@ -197,6 +200,7 @@ enum {
PROT_OFFLOAD_CONFIG_CMD = 0xd4,
OFFLOADS_QUERY_CMD = 0xd5,
REMOTE_WAKE_CONFIG_CMD = 0xd6,
+ D0I3_END_CMD = 0xed,
/* for WoWLAN in particular */
WOWLAN_PATTERNS = 0xe0,
@@ -313,14 +317,12 @@ enum {
/* Section types for NVM_ACCESS_CMD */
enum {
- NVM_SECTION_TYPE_HW = 0,
- NVM_SECTION_TYPE_SW,
- NVM_SECTION_TYPE_PAPD,
- NVM_SECTION_TYPE_BT,
- NVM_SECTION_TYPE_CALIBRATION,
- NVM_SECTION_TYPE_PRODUCTION,
- NVM_SECTION_TYPE_POST_FCS_CALIB,
- NVM_NUM_OF_SECTIONS,
+ NVM_SECTION_TYPE_SW = 1,
+ NVM_SECTION_TYPE_REGULATORY = 3,
+ NVM_SECTION_TYPE_CALIBRATION = 4,
+ NVM_SECTION_TYPE_PRODUCTION = 5,
+ NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
+ NVM_MAX_NUM_SECTIONS = 12,
};
/**
@@ -412,6 +414,35 @@ struct mvm_alive_resp {
__le32 scd_base_ptr; /* SRAM address for SCD */
} __packed; /* ALIVE_RES_API_S_VER_1 */
+struct mvm_alive_resp_ver2 {
+ __le16 status;
+ __le16 flags;
+ u8 ucode_minor;
+ u8 ucode_major;
+ __le16 id;
+ u8 api_minor;
+ u8 api_major;
+ u8 ver_subtype;
+ u8 ver_type;
+ u8 mac;
+ u8 opt;
+ __le16 reserved2;
+ __le32 timestamp;
+ __le32 error_event_table_ptr; /* SRAM address for error log */
+ __le32 log_event_table_ptr; /* SRAM address for LMAC event log */
+ __le32 cpu_register_ptr;
+ __le32 dbgm_config_ptr;
+ __le32 alive_counter_ptr;
+ __le32 scd_base_ptr; /* SRAM address for SCD */
+ __le32 st_fwrd_addr; /* pointer to Store and forward */
+ __le32 st_fwrd_size;
+ u8 umac_minor; /* UMAC version: minor */
+ u8 umac_major; /* UMAC version: major */
+ __le16 umac_id; /* UMAC version: id */
+ __le32 error_info_addr; /* SRAM address for UMAC error log */
+ __le32 dbg_print_buff_addr;
+} __packed; /* ALIVE_RES_API_S_VER_2 */
+
/* Error response/notification */
enum {
FW_ERR_UNKNOWN_CMD = 0x0,
@@ -682,6 +713,7 @@ enum {
TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
+ T2_V2_START_IMMEDIATELY = BIT(11),
TE_V2_NOTIF_MSK = 0xff,
@@ -1159,6 +1191,90 @@ struct iwl_mcast_filter_cmd {
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
+#define MAX_BCAST_FILTERS 8
+#define MAX_BCAST_FILTER_ATTRS 2
+
+/**
+ * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
+ * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
+ * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
+ * start of ip payload).
+ */
+enum iwl_mvm_bcast_filter_attr_offset {
+ BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
+ BCAST_FILTER_OFFSET_IP_END = 1,
+};
+
+/**
+ * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
+ * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
+ * @offset: starting offset of this pattern.
+ * @val: value to match - big endian (MSB is the first
+ * byte to match from offset pos).
+ * @mask: mask to match (big endian).
+ */
+struct iwl_fw_bcast_filter_attr {
+ u8 offset_type;
+ u8 offset;
+ __le16 reserved1;
+ __be32 val;
+ __be32 mask;
+} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
+
+/**
+ * enum iwl_mvm_bcast_filter_frame_type - filter frame type
+ * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
+ * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
+ */
+enum iwl_mvm_bcast_filter_frame_type {
+ BCAST_FILTER_FRAME_TYPE_ALL = 0,
+ BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
+};
+
+/**
+ * struct iwl_fw_bcast_filter - broadcast filter
+ * @discard: discard frame (1) or let it pass (0).
+ * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
+ * @num_attrs: number of valid attributes in this filter.
+ * @attrs: attributes of this filter. a filter is considered matched
+ * only when all its attributes are matched (i.e. AND relationship)
+ */
+struct iwl_fw_bcast_filter {
+ u8 discard;
+ u8 frame_type;
+ u8 num_attrs;
+ u8 reserved1;
+ struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
+} __packed; /* BCAST_FILTER_S_VER_1 */
+
+/**
+ * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
+ * @default_discard: default action for this mac (discard (1) / pass (0)).
+ * @attached_filters: bitmap of relevant filters for this mac.
+ */
+struct iwl_fw_bcast_mac {
+ u8 default_discard;
+ u8 reserved1;
+ __le16 attached_filters;
+} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
+
+/**
+ * struct iwl_bcast_filter_cmd - broadcast filtering configuration
+ * @disable: enable (0) / disable (1)
+ * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
+ * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
+ * @filters: broadcast filters
+ * @macs: broadcast filtering configuration per-mac
+ */
+struct iwl_bcast_filter_cmd {
+ u8 disable;
+ u8 max_bcast_filters;
+ u8 max_macs;
+ u8 reserved1;
+ struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
+ struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
+} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
+
struct mvm_statistics_dbg {
__le32 burst_check;
__le32 burst_count;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h b/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
new file mode 100644
index 000000000000..58c8941c0d95
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
@@ -0,0 +1,106 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_error_dump_h__
+#define __fw_error_dump_h__
+
+#include <linux/types.h>
+
+#define IWL_FW_ERROR_DUMP_BARKER 0x14789632
+
+/**
+ * enum iwl_fw_error_dump_type - types of data in the dump file
+ * @IWL_FW_ERROR_DUMP_SRAM:
+ * @IWL_FW_ERROR_DUMP_REG:
+ */
+enum iwl_fw_error_dump_type {
+ IWL_FW_ERROR_DUMP_SRAM = 0,
+ IWL_FW_ERROR_DUMP_REG = 1,
+
+ IWL_FW_ERROR_DUMP_MAX,
+};
+
+/**
+ * struct iwl_fw_error_dump_data - data for one type
+ * @type: %enum iwl_fw_error_dump_type
+ * @len: the length starting from %data - must be a multiplier of 4.
+ * @data: the data itself padded to be a multiplier of 4.
+ */
+struct iwl_fw_error_dump_data {
+ __le32 type;
+ __le32 len;
+ __u8 data[];
+} __packed __aligned(4);
+
+/**
+ * struct iwl_fw_error_dump_file - the layout of the header of the file
+ * @barker: must be %IWL_FW_ERROR_DUMP_BARKER
+ * @file_len: the length of all the file starting from %barker
+ * @data: array of %struct iwl_fw_error_dump_data
+ */
+struct iwl_fw_error_dump_file {
+ __le32 barker;
+ __le32 file_len;
+ u8 data[0];
+} __packed __aligned(4);
+
+#endif /* __fw_error_dump_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index c03d39541f9e..7ce20062f32d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -110,18 +110,48 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_alive_data *alive_data = data;
struct mvm_alive_resp *palive;
-
- palive = (void *)pkt->data;
-
- mvm->error_event_table = le32_to_cpu(palive->error_event_table_ptr);
- mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
- alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
-
- alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK;
- IWL_DEBUG_FW(mvm,
- "Alive ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
- le16_to_cpu(palive->status), palive->ver_type,
- palive->ver_subtype, palive->flags);
+ struct mvm_alive_resp_ver2 *palive2;
+
+ if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+ palive = (void *)pkt->data;
+
+ mvm->support_umac_log = false;
+ mvm->error_event_table =
+ le32_to_cpu(palive->error_event_table_ptr);
+ mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
+ alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+
+ alive_data->valid = le16_to_cpu(palive->status) ==
+ IWL_ALIVE_STATUS_OK;
+ IWL_DEBUG_FW(mvm,
+ "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+ le16_to_cpu(palive->status), palive->ver_type,
+ palive->ver_subtype, palive->flags);
+ } else {
+ palive2 = (void *)pkt->data;
+
+ mvm->error_event_table =
+ le32_to_cpu(palive2->error_event_table_ptr);
+ mvm->log_event_table =
+ le32_to_cpu(palive2->log_event_table_ptr);
+ alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
+ mvm->umac_error_event_table =
+ le32_to_cpu(palive2->error_info_addr);
+
+ alive_data->valid = le16_to_cpu(palive2->status) ==
+ IWL_ALIVE_STATUS_OK;
+ if (mvm->umac_error_event_table)
+ mvm->support_umac_log = true;
+
+ IWL_DEBUG_FW(mvm,
+ "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+ le16_to_cpu(palive2->status), palive2->ver_type,
+ palive2->ver_subtype, palive2->flags);
+
+ IWL_DEBUG_FW(mvm,
+ "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+ palive2->umac_major, palive2->umac_minor);
+ }
return true;
}
@@ -292,7 +322,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
}
/* Send TX valid antennas before triggering calibrations */
- ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
+ ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
if (ret)
goto error;
@@ -328,8 +358,6 @@ out:
GFP_KERNEL);
if (!mvm->nvm_data)
return -ENOMEM;
- mvm->nvm_data->valid_rx_ant = 1;
- mvm->nvm_data->valid_tx_ant = 1;
mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
mvm->nvm_data->bands[0].n_channels = 1;
mvm->nvm_data->bands[0].n_bitrates = 1;
@@ -341,8 +369,6 @@ out:
return ret;
}
-#define UCODE_CALIB_TIMEOUT (2*HZ)
-
int iwl_mvm_up(struct iwl_mvm *mvm)
{
int ret, i;
@@ -394,7 +420,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
- ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
+ ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
if (ret)
goto error;
@@ -439,10 +465,23 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- ret = iwl_mvm_power_update_device_mode(mvm);
+ /* Initialize tx backoffs to the minimal possible */
+ iwl_mvm_tt_tx_backoff(mvm, 0);
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
+ ret = iwl_power_legacy_set_cam_mode(mvm);
+ if (ret)
+ goto error;
+ }
+
+ ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto error;
+ /* allow FW/transport low power modes if not during restart */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
@@ -466,7 +505,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
goto error;
}
- ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
+ ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
if (ret)
goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
index 6b4ea6bf8ffe..e3b3cf4dbd77 100644
--- a/drivers/net/wireless/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/iwlwifi/mvm/led.c
@@ -94,6 +94,8 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
int ret;
switch (mode) {
+ case IWL_LED_BLINK:
+ IWL_ERR(mvm, "Blink led mode not supported, used default\n");
case IWL_LED_DEFAULT:
case IWL_LED_RF_STATE:
mode = IWL_LED_RF_STATE;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index ba723d50939a..9ccec10bba16 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -90,6 +90,7 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
{
struct iwl_mvm_mac_iface_iterator_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 min_bi;
/* Skip the interface for which we are trying to assign a tsf_id */
if (vif == data->vif)
@@ -114,42 +115,57 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
switch (data->vif->type) {
case NL80211_IFTYPE_STATION:
/*
- * The new interface is client, so if the existing one
- * we're iterating is an AP, and both interfaces have the
- * same beacon interval, the same TSF should be used to
- * avoid drift between the new client and existing AP,
- * the existing AP will get drift updates from the new
- * client context in this case
+ * The new interface is a client, so if the one we're iterating
+ * is an AP, and the beacon interval of the AP is a multiple or
+ * divisor of the beacon interval of the client, the same TSF
+ * should be used to avoid drift between the new client and
+ * existing AP. The existing AP will get drift updates from the
+ * new client context in this case.
*/
- if (vif->type == NL80211_IFTYPE_AP) {
- if (data->preferred_tsf == NUM_TSF_IDS &&
- test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
- (vif->bss_conf.beacon_int ==
- data->vif->bss_conf.beacon_int)) {
- data->preferred_tsf = mvmvif->tsf_id;
- return;
- }
+ if (vif->type != NL80211_IFTYPE_AP ||
+ data->preferred_tsf != NUM_TSF_IDS ||
+ !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ break;
+
+ min_bi = min(data->vif->bss_conf.beacon_int,
+ vif->bss_conf.beacon_int);
+
+ if (!min_bi)
+ break;
+
+ if ((data->vif->bss_conf.beacon_int -
+ vif->bss_conf.beacon_int) % min_bi == 0) {
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
}
break;
+
case NL80211_IFTYPE_AP:
/*
- * The new interface is AP/GO, so in case both interfaces
- * have the same beacon interval, it should get drift
- * updates from an existing client or use the same
- * TSF as an existing GO. There's no drift between
- * TSFs internally but if they used different TSFs
- * then a new client MAC could update one of them
- * and cause drift that way.
+ * The new interface is AP/GO, so if its beacon interval is a
+ * multiple or a divisor of the beacon interval of an existing
+ * interface, it should get drift updates from an existing
+ * client or use the same TSF as an existing GO. There's no
+ * drift between TSFs internally but if they used different
+ * TSFs then a new client MAC could update one of them and
+ * cause drift that way.
*/
- if (vif->type == NL80211_IFTYPE_STATION ||
- vif->type == NL80211_IFTYPE_AP) {
- if (data->preferred_tsf == NUM_TSF_IDS &&
- test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
- (vif->bss_conf.beacon_int ==
- data->vif->bss_conf.beacon_int)) {
- data->preferred_tsf = mvmvif->tsf_id;
- return;
- }
+ if ((vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_STATION) ||
+ data->preferred_tsf != NUM_TSF_IDS ||
+ !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ break;
+
+ min_bi = min(data->vif->bss_conf.beacon_int,
+ vif->bss_conf.beacon_int);
+
+ if (!min_bi)
+ break;
+
+ if ((data->vif->bss_conf.beacon_int -
+ vif->bss_conf.beacon_int) % min_bi == 0) {
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
}
break;
default:
@@ -936,7 +952,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
TX_CMD_FLG_TSF);
mvm->mgmt_last_antenna_idx =
- iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw),
+ iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
mvm->mgmt_last_antenna_idx);
beacon_cmd.tx.rate_n_flags =
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index c35b8661b395..4dd9ff43b8b6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -66,7 +66,9 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
+#include <linux/if_arp.h>
#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
#include <net/tcp.h>
#include "iwl-op-mode.h"
@@ -128,6 +130,117 @@ static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
};
#endif
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+/*
+ * Use the reserved field to indicate magic values.
+ * these values will only be used internally by the driver,
+ * and won't make it to the fw (reserved will be 0).
+ * BC_FILTER_MAGIC_IP - configure the val of this attribute to
+ * be the vif's ip address. in case there is not a single
+ * ip address (0, or more than 1), this attribute will
+ * be skipped.
+ * BC_FILTER_MAGIC_MAC - set the val of this attribute to
+ * the LSB bytes of the vif's mac address
+ */
+enum {
+ BC_FILTER_MAGIC_NONE = 0,
+ BC_FILTER_MAGIC_IP,
+ BC_FILTER_MAGIC_MAC,
+};
+
+static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
+ {
+ /* arp */
+ .discard = 0,
+ .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
+ .attrs = {
+ {
+ /* frame type - arp, hw type - ethernet */
+ .offset_type =
+ BCAST_FILTER_OFFSET_PAYLOAD_START,
+ .offset = sizeof(rfc1042_header),
+ .val = cpu_to_be32(0x08060001),
+ .mask = cpu_to_be32(0xffffffff),
+ },
+ {
+ /* arp dest ip */
+ .offset_type =
+ BCAST_FILTER_OFFSET_PAYLOAD_START,
+ .offset = sizeof(rfc1042_header) + 2 +
+ sizeof(struct arphdr) +
+ ETH_ALEN + sizeof(__be32) +
+ ETH_ALEN,
+ .mask = cpu_to_be32(0xffffffff),
+ /* mark it as special field */
+ .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
+ },
+ },
+ },
+ {
+ /* dhcp offer bcast */
+ .discard = 0,
+ .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
+ .attrs = {
+ {
+ /* udp dest port - 68 (bootp client)*/
+ .offset_type = BCAST_FILTER_OFFSET_IP_END,
+ .offset = offsetof(struct udphdr, dest),
+ .val = cpu_to_be32(0x00440000),
+ .mask = cpu_to_be32(0xffff0000),
+ },
+ {
+ /* dhcp - lsb bytes of client hw address */
+ .offset_type = BCAST_FILTER_OFFSET_IP_END,
+ .offset = 38,
+ .mask = cpu_to_be32(0xffffffff),
+ /* mark it as special field */
+ .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
+ },
+ },
+ },
+ /* last filter must be empty */
+ {},
+};
+#endif
+
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return;
+
+ IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
+ WARN_ON(test_and_set_bit(ref_type, mvm->ref_bitmap));
+ iwl_trans_ref(mvm->trans);
+}
+
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return;
+
+ IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
+ WARN_ON(!test_and_clear_bit(ref_type, mvm->ref_bitmap));
+ iwl_trans_unref(mvm->trans);
+}
+
+static void
+iwl_mvm_unref_all_except(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref)
+{
+ int i;
+
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return;
+
+ for_each_set_bit(i, mvm->ref_bitmap, IWL_MVM_REF_COUNT) {
+ if (ref == i)
+ continue;
+
+ IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d\n", i);
+ clear_bit(i, mvm->ref_bitmap);
+ iwl_trans_unref(mvm->trans);
+ }
+}
+
static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
{
int i;
@@ -168,6 +281,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->queues = mvm->first_agg_queue;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
+ hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+ IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+ hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
hw->rate_control_algorithm = "iwl-mvm-rs";
/*
@@ -179,7 +295,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
!iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
+ if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
hw->uapsd_queues = IWL_UAPSD_AC_INFO;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -203,6 +319,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_DISABLE_BEACON_HINTS;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
hw->wiphy->n_iface_combinations =
ARRAY_SIZE(iwl_mvm_iface_combinations);
@@ -246,7 +365,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
@@ -256,8 +375,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
- NL80211_FEATURE_P2P_GO_OPPPS |
- NL80211_FEATURE_LOW_PRIORITY_SCAN;
+ NL80211_FEATURE_P2P_GO_OPPPS;
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
@@ -289,6 +407,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#endif
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ /* assign default bcast filtering configuration */
+ mvm->bcast_filters = iwl_mvm_default_bcast_filters;
+#endif
+
ret = iwl_mvm_leds_init(mvm);
if (ret)
return ret;
@@ -300,11 +423,55 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
return ret;
}
+static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ struct iwl_mvm_sta *mvmsta;
+ bool defer = false;
+
+ /*
+ * double check the IN_D0I3 flag both before and after
+ * taking the spinlock, in order to prevent taking
+ * the spinlock when not needed.
+ */
+ if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
+ return false;
+
+ spin_lock(&mvm->d0i3_tx_lock);
+ /*
+ * testing the flag again ensures the skb dequeue
+ * loop (on d0i3 exit) hasn't run yet.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
+ goto out;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
+ mvmsta->sta_id != mvm->d0i3_ap_sta_id)
+ goto out;
+
+ __skb_queue_tail(&mvm->d0i3_tx, skb);
+ ieee80211_stop_queues(mvm->hw);
+
+ /* trigger wakeup */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
+
+ defer = true;
+out:
+ spin_unlock(&mvm->d0i3_tx_lock);
+ return defer;
+}
+
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
if (iwl_mvm_is_radio_killed(mvm)) {
IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
@@ -315,8 +482,18 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
goto drop;
- if (control->sta) {
- if (iwl_mvm_tx_skb(mvm, skb, control->sta))
+ /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
+ if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
+ ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_deauth(hdr->frame_control) &&
+ !ieee80211_is_disassoc(hdr->frame_control) &&
+ !ieee80211_is_action(hdr->frame_control)))
+ sta = NULL;
+
+ if (sta) {
+ if (iwl_mvm_defer_tx(mvm, sta, skb))
+ return;
+ if (iwl_mvm_tx_skb(mvm, skb, sta))
goto drop;
return;
}
@@ -354,6 +531,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
+ bool tx_agg_ref = false;
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
sta->addr, tid, action);
@@ -361,6 +539,23 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
if (!(mvm->nvm_data->sku_cap_11n_enable))
return -EACCES;
+ /* return from D0i3 before starting a new Tx aggregation */
+ if (action == IEEE80211_AMPDU_TX_START) {
+ iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG);
+ tx_agg_ref = true;
+
+ /*
+ * wait synchronously until D0i3 exit to get the correct
+ * sequence number for the tid
+ */
+ if (!wait_event_timeout(mvm->d0i3_exit_waitq,
+ !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) {
+ WARN_ON_ONCE(1);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
+ return -EIO;
+ }
+ }
+
mutex_lock(&mvm->mutex);
switch (action) {
@@ -398,6 +593,13 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
}
mutex_unlock(&mvm->mutex);
+ /*
+ * If the tid is marked as started, we won't use it for offloaded
+ * traffic on the next D0i3 entry. It's safe to unref.
+ */
+ if (tx_agg_ref)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
+
return ret;
}
@@ -422,6 +624,15 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ static char *env[] = { "DRIVER=iwlwifi", "EVENT=error_dump", NULL };
+
+ iwl_mvm_fw_error_dump(mvm);
+
+ /* notify the userspace about the error we had */
+ kobject_uevent_env(&mvm->hw->wiphy->dev.kobj, KOBJ_CHANGE, env);
+#endif
+
iwl_trans_stop_device(mvm->trans);
mvm->scan_status = IWL_MVM_SCAN_NONE;
@@ -434,6 +645,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_cleanup_iterator, mvm);
mvm->p2p_device_vif = NULL;
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
@@ -441,6 +653,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_wake_queues(mvm->hw);
+ /* cleanup all stale references (scan, roc), but keep the
+ * ucode_down ref until reconfig is complete */
+ iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
+
mvm->vif_count = 0;
mvm->rx_ba_sessions = 0;
}
@@ -470,11 +686,15 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
mutex_lock(&mvm->mutex);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ iwl_mvm_d0i3_enable_tx(mvm, NULL);
ret = iwl_mvm_update_quotas(mvm, NULL);
if (ret)
IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
ret);
+ /* allow transport/FW low power modes */
+ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
mutex_unlock(&mvm->mutex);
}
@@ -482,9 +702,14 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ flush_work(&mvm->d0i3_exit_work);
flush_work(&mvm->async_handlers_wk);
mutex_lock(&mvm->mutex);
+
+ /* disallow low power states when the FW is down */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
/* async_handlers_wk is now blocked */
/*
@@ -510,14 +735,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_work_sync(&mvm->async_handlers_wk);
}
-static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm *mvm = data;
-
- iwl_mvm_power_update_mode(mvm, vif);
-}
-
static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
{
u16 i;
@@ -585,7 +802,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC) {
u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
- qmask);
+ qmask,
+ ieee80211_vif_type_p2p(vif));
if (ret) {
IWL_ERR(mvm, "Failed to allocate bcast sta\n");
goto out_release;
@@ -599,10 +817,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_release;
- iwl_mvm_power_disable(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
+ if (ret)
+ goto out_release;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
if (ret)
goto out_remove_mac;
@@ -661,11 +881,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- /* TODO: remove this when legacy PM will be discarded */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_update_iterator, mvm);
-
iwl_mvm_mac_ctxt_release(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
@@ -754,11 +969,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- /* TODO: remove this when legacy PM will be discarded */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_update_iterator, mvm);
-
+ iwl_mvm_power_update_mac(mvm, vif);
iwl_mvm_mac_ctxt_remove(mvm, vif);
out_release:
@@ -876,6 +1087,156 @@ out:
*total_flags = 0;
}
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+struct iwl_bcast_iter_data {
+ struct iwl_mvm *mvm;
+ struct iwl_bcast_filter_cmd *cmd;
+ u8 current_filter;
+};
+
+static void
+iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
+ const struct iwl_fw_bcast_filter *in_filter,
+ struct iwl_fw_bcast_filter *out_filter)
+{
+ struct iwl_fw_bcast_filter_attr *attr;
+ int i;
+
+ memcpy(out_filter, in_filter, sizeof(*out_filter));
+
+ for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
+ attr = &out_filter->attrs[i];
+
+ if (!attr->mask)
+ break;
+
+ switch (attr->reserved1) {
+ case cpu_to_le16(BC_FILTER_MAGIC_IP):
+ if (vif->bss_conf.arp_addr_cnt != 1) {
+ attr->mask = 0;
+ continue;
+ }
+
+ attr->val = vif->bss_conf.arp_addr_list[0];
+ break;
+ case cpu_to_le16(BC_FILTER_MAGIC_MAC):
+ attr->val = *(__be32 *)&vif->addr[2];
+ break;
+ default:
+ break;
+ }
+ attr->reserved1 = 0;
+ out_filter->num_attrs++;
+ }
+}
+
+static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bcast_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_bcast_filter_cmd *cmd = data->cmd;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_fw_bcast_mac *bcast_mac;
+ int i;
+
+ if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
+ return;
+
+ bcast_mac = &cmd->macs[mvmvif->id];
+
+ /* enable filtering only for associated stations */
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ return;
+
+ bcast_mac->default_discard = 1;
+
+ /* copy all configured filters */
+ for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
+ /*
+ * Make sure we don't exceed our filters limit.
+ * if there is still a valid filter to be configured,
+ * be on the safe side and just allow bcast for this mac.
+ */
+ if (WARN_ON_ONCE(data->current_filter >=
+ ARRAY_SIZE(cmd->filters))) {
+ bcast_mac->default_discard = 0;
+ bcast_mac->attached_filters = 0;
+ break;
+ }
+
+ iwl_mvm_set_bcast_filter(vif,
+ &mvm->bcast_filters[i],
+ &cmd->filters[data->current_filter]);
+
+ /* skip current filter if it contains no attributes */
+ if (!cmd->filters[data->current_filter].num_attrs)
+ continue;
+
+ /* attach the filter to current mac */
+ bcast_mac->attached_filters |=
+ cpu_to_le16(BIT(data->current_filter));
+
+ data->current_filter++;
+ }
+}
+
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+ struct iwl_bcast_filter_cmd *cmd)
+{
+ struct iwl_bcast_iter_data iter_data = {
+ .mvm = mvm,
+ .cmd = cmd,
+ };
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
+ cmd->max_macs = ARRAY_SIZE(cmd->macs);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /* use debugfs filters/macs if override is configured */
+ if (mvm->dbgfs_bcast_filtering.override) {
+ memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
+ sizeof(cmd->filters));
+ memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
+ sizeof(cmd->macs));
+ return true;
+ }
+#endif
+
+ /* if no filters are configured, do nothing */
+ if (!mvm->bcast_filters)
+ return false;
+
+ /* configure and attach these filters for each associated sta vif */
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bcast_filter_iterator, &iter_data);
+
+ return true;
+}
+static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bcast_filter_cmd cmd;
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
+ return 0;
+
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ return 0;
+
+ return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
+#else
+static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ return 0;
+}
+#endif
+
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -928,6 +1289,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_sf_update(mvm, vif, false);
iwl_mvm_power_vif_assoc(mvm, vif);
+ if (vif->p2p)
+ iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/*
* If update fails - SF might be running in associated
@@ -940,27 +1303,25 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
if (ret)
IWL_ERR(mvm, "failed to remove AP station\n");
+
+ if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
/* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, NULL);
if (ret)
IWL_ERR(mvm, "failed to update quotas\n");
+
+ if (vif->p2p)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
}
iwl_mvm_recalc_multicast(mvm);
+ iwl_mvm_configure_bcast_filter(mvm, vif);
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
- if (!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
- /* Workaround for FW bug, otherwise FW disables device
- * power save upon disassociation
- */
- ret = iwl_mvm_power_update_mode(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to update power mode\n");
- }
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
IEEE80211_SMPS_AUTOMATIC);
@@ -971,9 +1332,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
BSS_CHANGED_QOS)) {
- ret = iwl_mvm_power_update_mode(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
@@ -987,10 +1349,15 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
/* reset cqm events tracking */
mvmvif->bf_data.last_cqm_event = 0;
- ret = iwl_mvm_update_beacon_filter(mvm, vif);
+ ret = iwl_mvm_update_beacon_filter(mvm, vif, false, CMD_SYNC);
if (ret)
IWL_ERR(mvm, "failed to update CQM thresholds\n");
}
+
+ if (changes & BSS_CHANGED_ARP_FILTER) {
+ IWL_DEBUG_MAC80211(mvm, "arp filter changed");
+ iwl_mvm_configure_bcast_filter(mvm, vif);
+ }
}
static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
@@ -1024,8 +1391,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_remove;
- mvmvif->ap_ibss_active = true;
-
/* Send the bcast station. At this stage the TBTT and DTIM time events
* are added and applied to the scheduler */
ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
@@ -1036,8 +1401,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
mvmvif->ap_ibss_active = true;
/* power updated needs to be done before quotas */
- mvm->bound_vif_cnt++;
- iwl_mvm_power_update_binding(mvm, vif, true);
+ iwl_mvm_power_update_mac(mvm, vif);
ret = iwl_mvm_update_quotas(mvm, vif);
if (ret)
@@ -1047,14 +1411,15 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
+
iwl_mvm_bt_coex_vif_change(mvm);
mutex_unlock(&mvm->mutex);
return 0;
out_quota_failed:
- mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
mvmvif->ap_ibss_active = false;
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
out_unbind:
@@ -1080,6 +1445,8 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_bt_coex_vif_change(mvm);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
+
/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
@@ -1088,8 +1455,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
iwl_mvm_binding_remove_vif(mvm, vif);
- mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -1103,26 +1469,20 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
u32 changes)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- enum ieee80211_bss_change ht_change = BSS_CHANGED_ERP_CTS_PROT |
- BSS_CHANGED_HT |
- BSS_CHANGED_BANDWIDTH;
- int ret;
/* Changes will be applied when the AP/IBSS is started */
if (!mvmvif->ap_ibss_active)
return;
- if (changes & ht_change) {
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
- }
+ if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
+ BSS_CHANGED_BANDWIDTH) &&
+ iwl_mvm_mac_ctxt_changed(mvm, vif))
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
/* Need to send a new beacon template to the FW */
- if (changes & BSS_CHANGED_BEACON) {
- if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
- IWL_WARN(mvm, "Failed updating beacon data\n");
- }
+ if (changes & BSS_CHANGED_BEACON &&
+ iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
+ IWL_WARN(mvm, "Failed updating beacon data\n");
}
static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -1162,13 +1522,30 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- ret = iwl_mvm_scan_request(mvm, vif, req);
- else
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_SCHED:
+ ret = iwl_mvm_sched_scan_stop(mvm);
+ if (ret) {
+ ret = -EBUSY;
+ goto out;
+ }
+ break;
+ case IWL_MVM_SCAN_NONE:
+ break;
+ default:
ret = -EBUSY;
+ goto out;
+ }
- mutex_unlock(&mvm->mutex);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
+ ret = iwl_mvm_scan_request(mvm, vif, req);
+ if (ret)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+out:
+ mutex_unlock(&mvm->mutex);
+ /* make sure to flush the Rx handler before the next scan arrives */
+ iwl_mvm_wait_for_async_handlers(mvm);
return ret;
}
@@ -1186,20 +1563,32 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
static void
iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u16 tid,
+ struct ieee80211_sta *sta, u16 tids,
int num_frames,
enum ieee80211_frame_release_type reason,
bool more_data)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- /* TODO: how do we tell the fw to send frames for a specific TID */
+ /* Called when we need to transmit (a) frame(s) from mac80211 */
- /*
- * The fw will send EOSP notification when the last frame will be
- * transmitted.
- */
- iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames);
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+ tids, more_data, false);
+}
+
+static void
+iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tids,
+ int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* Called when we need to transmit (a) frame(s) from agg queue */
+
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+ tids, more_data, true);
}
static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
@@ -1209,11 +1598,25 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int tid;
switch (cmd) {
case STA_NOTIFY_SLEEP:
if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
ieee80211_sta_block_awake(hw, sta, true);
+ spin_lock_bh(&mvmsta->lock);
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct iwl_mvm_tid_data *tid_data;
+
+ tid_data = &mvmsta->tid_data[tid];
+ if (tid_data->state != IWL_AGG_ON &&
+ tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
+ continue;
+ if (iwl_mvm_tid_queued(tid_data) == 0)
+ continue;
+ ieee80211_sta_set_buffered(sta, tid, true);
+ }
+ spin_unlock_bh(&mvmsta->lock);
/*
* The fw updates the STA to be asleep. Tx packets on the Tx
* queues to this station will not be transmitted. The fw will
@@ -1304,12 +1707,14 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
/* enable beacon filtering */
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
+ if (vif->bss_conf.dtim_period)
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif,
+ CMD_SYNC));
ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
/* disable beacon filtering */
- WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif));
+ WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC));
ret = 0;
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
@@ -1401,9 +1806,26 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
- IWL_DEBUG_SCAN(mvm,
- "SCHED SCAN request during internal scan - abort\n");
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_OS:
+ IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
+ ret = iwl_mvm_cancel_scan(mvm);
+ if (ret) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * iwl_mvm_rx_scan_complete() will be called soon but will
+ * not reset the scan status as it won't be IWL_MVM_SCAN_OS
+ * any more since we queue the next scan immediately (below).
+ * We make sure it is called before the next scan starts by
+ * flushing the async-handlers work.
+ */
+ break;
+ case IWL_MVM_SCAN_NONE:
+ break;
+ default:
ret = -EBUSY;
goto out;
}
@@ -1425,17 +1847,23 @@ err:
mvm->scan_status = IWL_MVM_SCAN_NONE;
out:
mutex_unlock(&mvm->mutex);
+ /* make sure to flush the Rx handler before the next scan arrives */
+ iwl_mvm_wait_for_async_handlers(mvm);
return ret;
}
-static void iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
mutex_lock(&mvm->mutex);
- iwl_mvm_sched_scan_stop(mvm);
+ ret = iwl_mvm_sched_scan_stop(mvm);
mutex_unlock(&mvm->mutex);
+ iwl_mvm_wait_for_async_handlers(mvm);
+
+ return ret;
}
static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
@@ -1773,8 +2201,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
* Power state must be updated before quotas,
* otherwise fw will complain.
*/
- mvm->bound_vif_cnt++;
- iwl_mvm_power_update_binding(mvm, vif, true);
+ iwl_mvm_power_update_mac(mvm, vif);
/* Setting the quota at this stage is only required for monitor
* interfaces. For the other types, the bss_info changed flow
@@ -1791,8 +2218,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
out_remove_binding:
iwl_mvm_binding_remove_vif(mvm, vif);
- mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
if (ret)
@@ -1824,8 +2250,7 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
iwl_mvm_binding_remove_vif(mvm, vif);
- mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
out_unlock:
mvmvif->phy_ctxt = NULL;
@@ -1892,8 +2317,9 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
return -EINVAL;
if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif);
- return iwl_mvm_disable_beacon_filter(mvm, vif);
+ return iwl_mvm_enable_beacon_filter(mvm, vif,
+ CMD_SYNC);
+ return iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
}
return -EOPNOTSUPP;
@@ -1914,7 +2340,7 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
}
#endif
-struct ieee80211_ops iwl_mvm_hw_ops = {
+const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,
.start = iwl_mvm_mac_start,
@@ -1932,6 +2358,7 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
.sta_state = iwl_mvm_mac_sta_state,
.sta_notify = iwl_mvm_mac_sta_notify,
.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
+ .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
.sta_rc_update = iwl_mvm_sta_rc_update,
.conf_tx = iwl_mvm_mac_conf_tx,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 2b0ba1fc3c82..d564233a65da 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -91,9 +91,7 @@ enum iwl_mvm_tx_fifo {
IWL_MVM_TX_FIFO_MCAST = 5,
};
-extern struct ieee80211_ops iwl_mvm_hw_ops;
-extern const struct iwl_mvm_power_ops pm_legacy_ops;
-extern const struct iwl_mvm_power_ops pm_mac_ops;
+extern const struct ieee80211_ops iwl_mvm_hw_ops;
/**
* struct iwl_mvm_mod_params - module parameters for iwlmvm
@@ -159,20 +157,6 @@ enum iwl_power_scheme {
IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
-struct iwl_mvm_power_ops {
- int (*power_update_mode)(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
- int (*power_update_device_mode)(struct iwl_mvm *mvm);
- int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
- void (*power_update_binding)(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, bool assign);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- char *buf, int bufsz);
-#endif
-};
-
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
@@ -239,6 +223,19 @@ enum iwl_mvm_smps_type_request {
NUM_IWL_MVM_SMPS_REQ,
};
+enum iwl_mvm_ref_type {
+ IWL_MVM_REF_UCODE_DOWN,
+ IWL_MVM_REF_SCAN,
+ IWL_MVM_REF_ROC,
+ IWL_MVM_REF_P2P_CLIENT,
+ IWL_MVM_REF_AP_IBSS,
+ IWL_MVM_REF_USER,
+ IWL_MVM_REF_TX,
+ IWL_MVM_REF_TX_AGG,
+
+ IWL_MVM_REF_COUNT,
+};
+
/**
* struct iwl_mvm_vif_bf_data - beacon filtering related data
* @bf_enabled: indicates if beacon filtering is enabled
@@ -269,7 +266,9 @@ struct iwl_mvm_vif_bf_data {
* @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
* should get quota etc.
* @monitor_active: indicates that monitor context is configured, and that the
- * interface should get quota etc.
+ * interface should get quota etc.
+ * @low_latency: indicates that this interface is in low-latency mode
+ * (VMACLowLatencyMode)
* @queue_params: QoS params for this MAC
* @bcast_sta: station used for broadcast packets. Used by the following
* vifs: P2P_DEVICE, GO and AP.
@@ -285,6 +284,7 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_ibss_active;
bool monitor_active;
+ bool low_latency;
struct iwl_mvm_vif_bf_data bf_data;
u32 ap_beacon_time;
@@ -319,13 +319,13 @@ struct iwl_mvm_vif {
bool seqno_valid;
u16 seqno;
+#endif
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 addresses for WoWLAN */
struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
int num_target_ipv6_addrs;
#endif
-#endif
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl_mvm *mvm;
@@ -333,14 +333,13 @@ struct iwl_mvm_vif {
struct dentry *dbgfs_slink;
struct iwl_dbgfs_pm dbgfs_pm;
struct iwl_dbgfs_bf dbgfs_bf;
+ struct iwl_mac_power_cmd mac_pwr_cmd;
#endif
enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
/* FW identified misbehaving AP */
u8 uapsd_misbehaving_bssid[ETH_ALEN];
-
- bool pm_prevented;
};
static inline struct iwl_mvm_vif *
@@ -349,6 +348,8 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
return (void *)vif->drv_priv;
}
+extern const u8 tid_to_mac80211_ac[];
+
enum iwl_scan_status {
IWL_MVM_SCAN_NONE,
IWL_MVM_SCAN_OS,
@@ -415,6 +416,7 @@ struct iwl_tt_params {
* @ct_kill_exit: worker to exit thermal kill
* @dynamic_smps: Is thermal throttling enabled dynamic_smps?
* @tx_backoff: The current thremal throttling tx backoff in uSec.
+ * @min_backoff: The minimal tx backoff due to power restrictions
* @params: Parameters to configure the thermal throttling algorithm.
* @throttle: Is thermal throttling is active?
*/
@@ -422,10 +424,33 @@ struct iwl_mvm_tt_mgmt {
struct delayed_work ct_kill_exit;
bool dynamic_smps;
u32 tx_backoff;
+ u32 min_backoff;
const struct iwl_tt_params *params;
bool throttle;
};
+#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
+
+struct iwl_mvm_frame_stats {
+ u32 legacy_frames;
+ u32 ht_frames;
+ u32 vht_frames;
+ u32 bw_20_frames;
+ u32 bw_40_frames;
+ u32 bw_80_frames;
+ u32 bw_160_frames;
+ u32 sgi_frames;
+ u32 ngi_frames;
+ u32 siso_frames;
+ u32 mimo2_frames;
+ u32 agg_frames;
+ u32 ampdu_count;
+ u32 success_frames;
+ u32 fail_frames;
+ u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES];
+ int last_frame_idx;
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -457,6 +482,8 @@ struct iwl_mvm {
bool init_ucode_complete;
u32 error_event_table;
u32 log_event_table;
+ u32 umac_error_event_table;
+ bool support_umac_log;
u32 ampdu_ref;
@@ -470,7 +497,7 @@ struct iwl_mvm {
struct iwl_nvm_data *nvm_data;
/* NVM sections */
- struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS];
+ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
/* EEPROM MAC addresses */
struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
@@ -494,6 +521,17 @@ struct iwl_mvm {
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ /* broadcast filters to configure for each associated station */
+ const struct iwl_fw_bcast_filter *bcast_filters;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct {
+ u32 override; /* u32 for debugfs_create_bool */
+ struct iwl_bcast_filter_cmd cmd;
+ } dbgfs_bcast_filtering;
+#endif
+#endif
+
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
@@ -506,6 +544,7 @@ struct iwl_mvm {
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct dentry *debugfs_dir;
u32 dbgfs_sram_offset, dbgfs_sram_len;
+ u32 dbgfs_prph_reg_addr;
bool disable_power_off;
bool disable_power_off_d3;
@@ -513,6 +552,9 @@ struct iwl_mvm {
struct debugfs_blob_wrapper nvm_sw_blob;
struct debugfs_blob_wrapper nvm_calib_blob;
struct debugfs_blob_wrapper nvm_prod_blob;
+
+ struct iwl_mvm_frame_stats drv_rx_stats;
+ spinlock_t drv_stats_lock;
#endif
struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -526,10 +568,16 @@ struct iwl_mvm {
*/
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+ /* A bitmap of reference types taken by the driver. */
+ unsigned long ref_bitmap[BITS_TO_LONGS(IWL_MVM_REF_COUNT)];
+
u8 vif_count;
/* -1 for always, 0 for never, >0 for that many times */
s8 restart_fw;
+ void *fw_error_dump;
+ void *fw_error_sram;
+ u32 fw_error_sram_len;
struct led_classdev led;
@@ -548,17 +596,27 @@ struct iwl_mvm {
#endif
#endif
+ /* d0i3 */
+ u8 d0i3_ap_sta_id;
+ bool d0i3_offloading;
+ struct work_struct d0i3_exit_work;
+ struct sk_buff_head d0i3_tx;
+ /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
+ spinlock_t d0i3_tx_lock;
+ wait_queue_head_t d0i3_exit_waitq;
+
/* BT-Coex */
u8 bt_kill_msk;
struct iwl_bt_coex_profile_notif last_bt_notif;
struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
+ u32 last_ant_isol;
+ u8 last_corun_lut;
+ u8 bt_tx_prio;
/* Thermal Throttling and CTkill */
struct iwl_mvm_tt_mgmt thermal_throttle;
s32 temperature; /* Celsius */
- const struct iwl_mvm_power_ops *pm_ops;
-
#ifdef CONFIG_NL80211_TESTMODE
u32 noa_duration;
struct ieee80211_vif *noa_vif;
@@ -569,10 +627,10 @@ struct iwl_mvm {
u8 first_agg_queue;
u8 last_agg_queue;
- u8 bound_vif_cnt;
-
/* Indicate if device power save is allowed */
- bool ps_prevented;
+ bool ps_disabled;
+ /* Indicate if device power management is allowed */
+ bool pm_disabled;
};
/* Extract MVM priv from op_mode and _hw */
@@ -587,6 +645,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_HW_CTKILL,
IWL_MVM_STATUS_ROC_RUNNING,
IWL_MVM_STATUS_IN_HW_RESTART,
+ IWL_MVM_STATUS_IN_D0I3,
};
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -595,6 +654,30 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
}
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+
+ if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ return NULL;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
+}
+
+static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
+{
+ return mvm->trans->cfg->d0i3 &&
+ (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
+}
+
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
@@ -619,7 +702,10 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
-void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
+void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
+#endif
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@ -645,6 +731,11 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
+static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
+{
+ flush_work(&mvm->async_handlers_wk);
+}
+
/* Statistics */
int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
@@ -661,6 +752,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+ struct iwl_bcast_filter_cmd *cmd);
/*
* FW notifications / CMD responses handlers
@@ -676,6 +769,9 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
@@ -730,7 +826,7 @@ int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
+int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
/* Scheduled scan */
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
@@ -744,7 +840,7 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
-void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
+int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
@@ -772,49 +868,24 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* rate scaling */
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
+ struct iwl_mvm_frame_stats *stats,
+ u32 rate, bool agg);
+int rs_pretty_print_rate(char *buf, const u32 rate);
-/* power managment */
-static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- return mvm->pm_ops->power_update_mode(mvm, vif);
-}
-
-static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- return mvm->pm_ops->power_disable(mvm, vif);
-}
-
-static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
-{
- if (mvm->pm_ops->power_update_device_mode)
- return mvm->pm_ops->power_update_device_mode(mvm);
- return 0;
-}
+/* power management */
+int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
-static inline void iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool assign)
-{
- if (mvm->pm_ops->power_update_binding)
- mvm->pm_ops->power_update_binding(mvm, vif, assign);
-}
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ char *buf, int bufsz);
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- char *buf, int bufsz)
-{
- return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz);
-}
-#endif
-
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
@@ -840,6 +911,17 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
}
#endif
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
+ struct iwl_wowlan_config_cmd_v2 *cmd);
+int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool disable_offloading,
+ u32 cmd_flags);
+
+/* D0i3 */
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
/* BT Coex */
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -850,10 +932,13 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event);
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
-u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta);
+u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
+u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info, u8 ac);
+int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
enum iwl_bt_kill_msk {
BT_KILL_MSK_DEFAULT,
@@ -875,25 +960,53 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
struct iwl_beacon_filter_cmd *cmd)
{}
#endif
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, u32 flags);
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ u32 flags);
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
-int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd);
+ struct ieee80211_vif *vif,
+ u32 flags);
int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, bool enable);
int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ bool force,
+ u32 flags);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
enum ieee80211_smps_mode smps_request);
+/* Low latency */
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool value);
+/* get SystemLowLatencyMode - only needed for beacon threshold? */
+bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
+/* get VMACLowLatencyMode */
+static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
+{
+ /*
+ * should this consider associated/active/... state?
+ *
+ * Normally low-latency should only be active on interfaces
+ * that are active, but at least with debugfs it can also be
+ * enabled on interfaces that aren't active. However, when
+ * interface aren't active then they aren't added into the
+ * binding, so this has no real impact. For now, just return
+ * the current desired low-latency state.
+ */
+
+ return mvmvif->low_latency;
+}
+
/* Thermal management and CT-kill */
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
-void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 35b71af78d02..cf2d09f53782 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -67,14 +67,6 @@
#include "iwl-eeprom-read.h"
#include "iwl-nvm-parse.h"
-/* list of NVM sections we are allowed/need to read */
-static const int nvm_to_read[] = {
- NVM_SECTION_TYPE_HW,
- NVM_SECTION_TYPE_SW,
- NVM_SECTION_TYPE_CALIBRATION,
- NVM_SECTION_TYPE_PRODUCTION,
-};
-
/* Default NVM size to read */
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
#define IWL_MAX_NVM_SECTION_SIZE 7000
@@ -236,24 +228,39 @@ static struct iwl_nvm_data *
iwl_parse_nvm_sections(struct iwl_mvm *mvm)
{
struct iwl_nvm_section *sections = mvm->nvm_sections;
- const __le16 *hw, *sw, *calib;
+ const __le16 *hw, *sw, *calib, *regulatory, *mac_override;
/* Checking for required sections */
- if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
- !mvm->nvm_sections[NVM_SECTION_TYPE_HW].data) {
- IWL_ERR(mvm, "Can't parse empty NVM sections\n");
- return NULL;
+ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+ !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
+ IWL_ERR(mvm, "Can't parse empty NVM sections\n");
+ return NULL;
+ }
+ } else {
+ if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+ !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data ||
+ !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
+ IWL_ERR(mvm,
+ "Can't parse empty family 8000 NVM sections\n");
+ return NULL;
+ }
}
if (WARN_ON(!mvm->cfg))
return NULL;
- hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data;
+ hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
+ regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
+ mac_override =
+ (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
+
return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
- iwl_fw_valid_tx_ant(mvm->fw),
- iwl_fw_valid_rx_ant(mvm->fw));
+ regulatory, mac_override,
+ mvm->fw->valid_tx_ant,
+ mvm->fw->valid_rx_ant);
}
#define MAX_NVM_FILE_LEN 16384
@@ -293,6 +300,8 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
#define NVM_WORD2_ID(x) (x >> 12)
+#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
+#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
@@ -343,8 +352,16 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
break;
}
- section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
- section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
+ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ section_size =
+ 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
+ section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
+ } else {
+ section_size = 2 * NVM_WORD2_LEN_FAMILY_8000(
+ le16_to_cpu(file_sec->word2));
+ section_id = NVM_WORD1_ID_FAMILY_8000(
+ le16_to_cpu(file_sec->word1));
+ }
if (section_size > IWL_MAX_NVM_SECTION_SIZE) {
IWL_ERR(mvm, "ERROR - section too large (%d)\n",
@@ -367,7 +384,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
break;
}
- if (WARN(section_id >= NVM_NUM_OF_SECTIONS,
+ if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
"Invalid NVM section ID %d\n", section_id)) {
ret = -EINVAL;
break;
@@ -414,6 +431,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
{
int ret, i, section;
u8 *nvm_buffer, *temp;
+ int nvm_to_read[NVM_MAX_NUM_SECTIONS];
+ int num_of_sections_to_read;
+
+ if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
+ return -EINVAL;
/* load external NVM if configured */
if (iwlwifi_mod_params.nvm_file) {
@@ -422,6 +444,22 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
if (ret)
return ret;
} else {
+ /* list of NVM sections we are allowed/need to read */
+ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ nvm_to_read[0] = mvm->cfg->nvm_hw_section_num;
+ nvm_to_read[1] = NVM_SECTION_TYPE_SW;
+ nvm_to_read[2] = NVM_SECTION_TYPE_CALIBRATION;
+ nvm_to_read[3] = NVM_SECTION_TYPE_PRODUCTION;
+ num_of_sections_to_read = 4;
+ } else {
+ nvm_to_read[0] = NVM_SECTION_TYPE_SW;
+ nvm_to_read[1] = NVM_SECTION_TYPE_CALIBRATION;
+ nvm_to_read[2] = NVM_SECTION_TYPE_PRODUCTION;
+ nvm_to_read[3] = NVM_SECTION_TYPE_REGULATORY;
+ nvm_to_read[4] = NVM_SECTION_TYPE_MAC_OVERRIDE;
+ num_of_sections_to_read = 5;
+ }
+
/* Read From FW NVM */
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
@@ -430,7 +468,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
GFP_KERNEL);
if (!nvm_buffer)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+ for (i = 0; i < num_of_sections_to_read; i++) {
section = nvm_to_read[i];
/* we override the constness for initial read */
ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
@@ -446,10 +484,6 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
#ifdef CONFIG_IWLWIFI_DEBUGFS
switch (section) {
- case NVM_SECTION_TYPE_HW:
- mvm->nvm_hw_blob.data = temp;
- mvm->nvm_hw_blob.size = ret;
- break;
case NVM_SECTION_TYPE_SW:
mvm->nvm_sw_blob.data = temp;
mvm->nvm_sw_blob.size = ret;
@@ -463,6 +497,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_prod_blob.size = ret;
break;
default:
+ if (section == mvm->cfg->nvm_hw_section_num) {
+ mvm->nvm_hw_blob.data = temp;
+ mvm->nvm_hw_blob.size = ret;
+ break;
+ }
WARN(1, "section: %d", section);
}
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/offloading.c b/drivers/net/wireless/iwlwifi/mvm/offloading.c
new file mode 100644
index 000000000000..9bfb95e89cfb
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/offloading.c
@@ -0,0 +1,215 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include "mvm.h"
+
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
+ struct iwl_wowlan_config_cmd_v2 *cmd)
+{
+ int i;
+
+ /*
+ * For QoS counters, we store the one to use next, so subtract 0x10
+ * since the uCode will add 0x10 *before* using the value while we
+ * increment after using the value (i.e. store the next value to use).
+ */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = mvm_ap_sta->tid_data[i].seq_number;
+ seq -= 0x10;
+ cmd->qos_seq[i] = cpu_to_le16(seq);
+ }
+}
+
+int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool disable_offloading,
+ u32 cmd_flags)
+{
+ union {
+ struct iwl_proto_offload_cmd_v1 v1;
+ struct iwl_proto_offload_cmd_v2 v2;
+ struct iwl_proto_offload_cmd_v3_small v3s;
+ struct iwl_proto_offload_cmd_v3_large v3l;
+ } cmd = {};
+ struct iwl_host_cmd hcmd = {
+ .id = PROT_OFFLOAD_CONFIG_CMD,
+ .flags = cmd_flags,
+ .data[0] = &cmd,
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ };
+ struct iwl_proto_offload_cmd_common *common;
+ u32 enabled = 0, size;
+ u32 capa_flags = mvm->fw->ucode_capa.flags;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int i;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
+ capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ struct iwl_ns_config *nsc;
+ struct iwl_targ_addr *addrs;
+ int n_nsc, n_addrs;
+ int c;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ nsc = cmd.v3s.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
+ addrs = cmd.v3s.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
+ } else {
+ nsc = cmd.v3l.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+ addrs = cmd.v3l.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+ }
+
+ if (mvmvif->num_target_ipv6_addrs)
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+
+ /*
+ * For each address we have (and that will fit) fill a target
+ * address struct and combine for NS offload structs with the
+ * solicited node addresses.
+ */
+ for (i = 0, c = 0;
+ i < mvmvif->num_target_ipv6_addrs &&
+ i < n_addrs && c < n_nsc; i++) {
+ struct in6_addr solicited_addr;
+ int j;
+
+ addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
+ &solicited_addr);
+ for (j = 0; j < c; j++)
+ if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+ &solicited_addr) == 0)
+ break;
+ if (j == c)
+ c++;
+ addrs[i].addr = mvmvif->target_ipv6_addrs[i];
+ addrs[i].config_num = cpu_to_le32(j);
+ nsc[j].dest_ipv6_addr = solicited_addr;
+ memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
+ cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
+ else
+ cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ if (mvmvif->num_target_ipv6_addrs) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+ memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
+ sizeof(mvmvif->target_ipv6_addrs[0]));
+
+ for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+ IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
+ memcpy(cmd.v2.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.v2.target_ipv6_addr[i]));
+ } else {
+ if (mvmvif->num_target_ipv6_addrs) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+ memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
+ sizeof(mvmvif->target_ipv6_addrs[0]));
+
+ for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+ IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
+ memcpy(cmd.v1.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.v1.target_ipv6_addr[i]));
+ }
+#endif
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ common = &cmd.v3s.common;
+ size = sizeof(cmd.v3s);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ common = &cmd.v3l.common;
+ size = sizeof(cmd.v3l);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ common = &cmd.v2.common;
+ size = sizeof(cmd.v2);
+ } else {
+ common = &cmd.v1.common;
+ size = sizeof(cmd.v1);
+ }
+
+ if (vif->bss_conf.arp_addr_cnt) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
+ common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+ memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (!disable_offloading)
+ common->enabled = cpu_to_le32(enabled);
+
+ hcmd.len[0] = size;
+ return iwl_mvm_send_cmd(mvm, &hcmd);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index a3d43de342d7..9545d7fdd4bf 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -61,6 +61,7 @@
*
*****************************************************************************/
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include <net/mac80211.h>
#include "iwl-notif-wait.h"
@@ -78,6 +79,7 @@
#include "iwl-prph.h"
#include "rs.h"
#include "fw-api-scan.h"
+#include "fw-error-dump.h"
#include "time-event.h"
/*
@@ -185,9 +187,10 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
* (PCIe power is lost before PERST# is asserted), causing ME FW
* to lose ownership and not being able to obtain it back.
*/
- iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
- ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
struct iwl_rx_handlers {
@@ -219,13 +222,17 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
+ RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
+ iwl_mvm_rx_ant_coupling_notif, true),
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
+ RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
+
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
- RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
+ RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
- iwl_mvm_rx_scan_offload_complete_notif, false),
+ iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
false),
@@ -242,7 +249,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
#undef RX_HANDLER
#define CMD(x) [x] = #x
-static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
+static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(MVM_ALIVE),
CMD(REPLY_ERROR),
CMD(INIT_COMPLETE_NOTIF),
@@ -284,9 +291,11 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BEACON_NOTIFICATION),
CMD(BEACON_TEMPLATE_CMD),
CMD(STATISTICS_NOTIFICATION),
+ CMD(EOSP_NOTIFICATION),
CMD(REDUCE_TX_POWER_CMD),
CMD(TX_ANT_CONFIGURATION_CMD),
CMD(D3_CONFIG_CMD),
+ CMD(D0I3_END_CMD),
CMD(PROT_OFFLOAD_CONFIG_CMD),
CMD(OFFLOADS_QUERY_CMD),
CMD(REMOTE_WAKE_CONFIG_CMD),
@@ -309,17 +318,37 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BT_PROFILE_NOTIFICATION),
CMD(BT_CONFIG),
CMD(MCAST_FILTER_CMD),
+ CMD(BCAST_FILTER_CMD),
CMD(REPLY_SF_CFG_CMD),
CMD(REPLY_BEACON_FILTERING_CMD),
CMD(REPLY_THERMAL_MNG_BACKOFF),
CMD(MAC_PM_POWER_TABLE),
CMD(BT_COEX_CI),
CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
+ CMD(ANTENNA_COUPLING_NOTIFICATION),
};
#undef CMD
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
+
+static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
+{
+ const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs;
+
+ if (!pwr_tx_backoff)
+ return 0;
+
+ while (pwr_tx_backoff->pwr) {
+ if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr)
+ return pwr_tx_backoff->backoff;
+
+ pwr_tx_backoff++;
+ }
+
+ return 0;
+}
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
@@ -333,6 +362,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
TX_CMD,
};
int err, scan_size;
+ u32 min_backoff;
+
+ /*
+ * We use IWL_MVM_STATION_COUNT to check the validity of the station
+ * index all over the driver - check that its value corresponds to the
+ * array size.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
/********************************
* 1. Allocating and configuring HW data
@@ -373,6 +410,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
+ INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
+
+ spin_lock_init(&mvm->d0i3_tx_lock);
+ skb_queue_head_init(&mvm->d0i3_tx);
+ init_waitqueue_head(&mvm->d0i3_exit_waitq);
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
@@ -421,7 +463,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
mvm->cfg->name, mvm->trans->hw_rev);
- iwl_mvm_tt_initialize(mvm);
+ min_backoff = calc_min_backoff(trans, cfg);
+ iwl_mvm_tt_initialize(mvm, min_backoff);
/*
* If the NVM exists in an external file,
@@ -462,13 +505,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_unregister;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)
- mvm->pm_ops = &pm_mac_ops;
- else
- mvm->pm_ops = &pm_legacy_ops;
-
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
+ /* rpm starts with a taken ref. only set the appropriate bit here. */
+ set_bit(IWL_MVM_REF_UCODE_DOWN, mvm->ref_bitmap);
+
return op_mode;
out_unregister:
@@ -495,6 +536,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
+ vfree(mvm->fw_error_dump);
+ kfree(mvm->fw_error_sram);
kfree(mvm->mcast_filter_cmd);
mvm->mcast_filter_cmd = NULL;
@@ -508,7 +551,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
mvm->phy_db = NULL;
iwl_free_nvm_data(mvm->nvm_data);
- for (i = 0; i < NVM_NUM_OF_SECTIONS; i++)
+ for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
ieee80211_free_hw(mvm->hw);
@@ -658,7 +701,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
}
-static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@@ -667,9 +710,9 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
else
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- if (state && mvm->cur_ucode != IWL_UCODE_INIT)
- iwl_trans_stop_device(mvm->trans);
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
+
+ return state && mvm->cur_ucode != IWL_UCODE_INIT;
}
static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
@@ -703,6 +746,29 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
iwl_abort_notification_waits(&mvm->notif_wait);
/*
+ * This is a bit racy, but worst case we tell mac80211 about
+ * a stopped/aborted scan when that was already done which
+ * is not a problem. It is necessary to abort any os scan
+ * here because mac80211 requires having the scan cleared
+ * before restarting.
+ * We'll reset the scan_status to NONE in restart cleanup in
+ * the next start() call from mac80211. If restart isn't called
+ * (no fw restart) scan status will stay busy.
+ */
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_NONE:
+ break;
+ case IWL_MVM_SCAN_OS:
+ ieee80211_scan_completed(mvm->hw, true);
+ break;
+ case IWL_MVM_SCAN_SCHED:
+ /* Sched scan will be restarted by mac80211 in restart_hw. */
+ if (!mvm->restart_fw)
+ ieee80211_sched_scan_stopped(mvm->hw);
+ break;
+ }
+
+ /*
* If we're restarting already, don't cycle restarts.
* If INIT fw asserted, it will likely fail again.
* If WoWLAN fw asserted, don't restart either, mac80211
@@ -733,25 +799,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
- /*
- * This is a bit racy, but worst case we tell mac80211 about
- * a stopped/aborted (sched) scan when that was already done
- * which is not a problem. It is necessary to abort any scan
- * here because mac80211 requires having the scan cleared
- * before restarting.
- * We'll reset the scan_status to NONE in restart cleanup in
- * the next start() call from mac80211.
- */
- switch (mvm->scan_status) {
- case IWL_MVM_SCAN_NONE:
- break;
- case IWL_MVM_SCAN_OS:
- ieee80211_scan_completed(mvm->hw, true);
- break;
- case IWL_MVM_SCAN_SCHED:
- ieee80211_sched_scan_stopped(mvm->hw);
- break;
- }
+ /* don't let the transport/FW power down */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
if (mvm->restart_fw > 0)
mvm->restart_fw--;
@@ -759,13 +808,52 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
}
}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
+{
+ struct iwl_fw_error_dump_file *dump_file;
+ struct iwl_fw_error_dump_data *dump_data;
+ u32 file_len;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->fw_error_dump)
+ return;
+
+ file_len = mvm->fw_error_sram_len +
+ sizeof(*dump_file) +
+ sizeof(*dump_data);
+
+ dump_file = vmalloc(file_len);
+ if (!dump_file)
+ return;
+
+ mvm->fw_error_dump = dump_file;
+
+ dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
+ dump_file->file_len = cpu_to_le32(file_len);
+ dump_data = (void *)dump_file->data;
+ dump_data->type = IWL_FW_ERROR_DUMP_SRAM;
+ dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
+
+ /*
+ * No need for lock since at the stage the FW isn't loaded. So it
+ * can't assert - we are the only one who can possibly be accessing
+ * mvm->fw_error_sram right now.
+ */
+ memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
+}
+#endif
+
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
iwl_mvm_dump_nic_error_log(mvm);
- if (!mvm->restart_fw)
- iwl_mvm_dump_sram(mvm);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_fw_error_sram_dump(mvm);
+#endif
iwl_mvm_nic_restart(mvm);
}
@@ -778,6 +866,323 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
iwl_mvm_nic_restart(mvm);
}
+struct iwl_d0i3_iter_data {
+ struct iwl_mvm *mvm;
+ u8 ap_sta_id;
+ u8 vif_count;
+ u8 offloading_tid;
+ bool disable_offloading;
+};
+
+static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_d0i3_iter_data *iter_data)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvmsta;
+ u32 available_tids = 0;
+ u8 tid;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
+ mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
+ return false;
+
+ ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
+ if (IS_ERR_OR_NULL(ap_sta))
+ return false;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
+ spin_lock_bh(&mvmsta->lock);
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+ /*
+ * in case of pending tx packets, don't use this tid
+ * for offloading in order to prevent reuse of the same
+ * qos seq counters.
+ */
+ if (iwl_mvm_tid_queued(tid_data))
+ continue;
+
+ if (tid_data->state != IWL_AGG_OFF)
+ continue;
+
+ available_tids |= BIT(tid);
+ }
+ spin_unlock_bh(&mvmsta->lock);
+
+ /*
+ * disallow protocol offloading if we have no available tid
+ * (with no pending frames and no active aggregation,
+ * as we don't handle "holes" properly - the scheduler needs the
+ * frame's seq number and TFD index to match)
+ */
+ if (!available_tids)
+ return true;
+
+ /* for simplicity, just use the first available tid */
+ iter_data->offloading_tid = ffs(available_tids) - 1;
+ return false;
+}
+
+static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_d0i3_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+
+ IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ /*
+ * in case of pending tx packets or active aggregations,
+ * avoid offloading features in order to prevent reuse of
+ * the same qos seq counters.
+ */
+ if (iwl_mvm_disallow_offloading(mvm, vif, data))
+ data->disable_offloading = true;
+
+ iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
+ iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, flags);
+
+ /*
+ * on init/association, mvm already configures POWER_TABLE_CMD
+ * and REPLY_MCAST_FILTER_CMD, so currently don't
+ * reconfigure them (we might want to use different
+ * params later on, though).
+ */
+ data->ap_sta_id = mvmvif->ap_sta_id;
+ data->vif_count++;
+}
+
+static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
+ struct iwl_wowlan_config_cmd_v3 *cmd,
+ struct iwl_d0i3_iter_data *iter_data)
+{
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvm_ap_sta;
+
+ if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
+ return;
+
+ rcu_read_lock();
+
+ ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
+ if (IS_ERR_OR_NULL(ap_sta))
+ goto out;
+
+ mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
+ cmd->common.is_11n_connection = ap_sta->ht_cap.ht_supported;
+ cmd->offloading_tid = iter_data->offloading_tid;
+
+ /*
+ * The d0i3 uCode takes care of the nonqos counters,
+ * so configure only the qos seq ones.
+ */
+ iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &cmd->common);
+out:
+ rcu_read_unlock();
+}
+static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+ int ret;
+ struct iwl_d0i3_iter_data d0i3_iter_data = {
+ .mvm = mvm,
+ };
+ struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {
+ .common = {
+ .wakeup_filter =
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
+ IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING),
+ },
+ };
+ struct iwl_d3_manager_config d3_cfg_cmd = {
+ .min_sleep_time = cpu_to_le32(1000),
+ };
+
+ IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
+
+ /* make sure we have no running tx while configuring the qos */
+ set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+ synchronize_net();
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_enter_d0i3_iterator,
+ &d0i3_iter_data);
+ if (d0i3_iter_data.vif_count == 1) {
+ mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
+ mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
+ } else {
+ WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_offloading = false;
+ }
+
+ iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
+ sizeof(wowlan_config_cmd),
+ &wowlan_config_cmd);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
+ flags | CMD_MAKE_TRANS_IDLE,
+ sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+}
+
+static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = _data;
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
+
+ IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
+}
+
+static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
+ mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+ ieee80211_connection_loss(vif);
+}
+
+void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
+{
+ struct ieee80211_sta *sta = NULL;
+ struct iwl_mvm_sta *mvm_ap_sta;
+ int i;
+ bool wake_queues = false;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->d0i3_tx_lock);
+
+ if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
+ goto out;
+
+ IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
+
+ /* get the sta in order to update seq numbers and re-enqueue skbs */
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (IS_ERR_OR_NULL(sta)) {
+ sta = NULL;
+ goto out;
+ }
+
+ if (mvm->d0i3_offloading && qos_seq) {
+ /* update qos seq numbers if offloading was enabled */
+ mvm_ap_sta = (struct iwl_mvm_sta *)sta->drv_priv;
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = le16_to_cpu(qos_seq[i]);
+ /* firmware stores last-used one, we store next one */
+ seq += 0x10;
+ mvm_ap_sta->tid_data[i].seq_number = seq;
+ }
+ }
+out:
+ /* re-enqueue (or drop) all packets */
+ while (!skb_queue_empty(&mvm->d0i3_tx)) {
+ struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
+
+ if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
+ ieee80211_free_txskb(mvm->hw, skb);
+
+ /* if the skb_queue is not empty, we need to wake queues */
+ wake_queues = true;
+ }
+ clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+ wake_up(&mvm->d0i3_exit_waitq);
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ if (wake_queues)
+ ieee80211_wake_queues(mvm->hw);
+
+ spin_unlock_bh(&mvm->d0i3_tx_lock);
+}
+
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
+ struct iwl_host_cmd get_status_cmd = {
+ .id = WOWLAN_GET_STATUSES,
+ .flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
+ };
+ struct iwl_wowlan_status_v6 *status;
+ int ret;
+ u32 disconnection_reasons, wakeup_reasons;
+ __le16 *qos_seq = NULL;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
+ if (ret)
+ goto out;
+
+ if (!get_status_cmd.resp_pkt)
+ goto out;
+
+ status = (void *)get_status_cmd.resp_pkt->data;
+ wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
+ qos_seq = status->qos_seq_ctr;
+
+ IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
+
+ disconnection_reasons =
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+ if (wakeup_reasons & disconnection_reasons)
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d0i3_disconnect_iter, mvm);
+
+ iwl_free_resp(&get_status_cmd);
+out:
+ iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
+ CMD_WAKE_UP_TRANS;
+ int ret;
+
+ IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
+ if (ret)
+ goto out;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_exit_d0i3_iterator,
+ mvm);
+out:
+ schedule_work(&mvm->d0i3_exit_work);
+ return ret;
+}
+
static const struct iwl_op_mode_ops iwl_mvm_ops = {
.start = iwl_op_mode_mvm_start,
.stop = iwl_op_mode_mvm_stop,
@@ -789,4 +1194,6 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
.nic_error = iwl_mvm_nic_error,
.cmd_queue_full = iwl_mvm_cmd_queue_full,
.nic_config = iwl_mvm_nic_config,
+ .enter_d0i3 = iwl_mvm_enter_d0i3,
+ .exit_d0i3 = iwl_mvm_exit_d0i3,
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index b7268c0b3333..237efe0ac1c4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -156,13 +156,13 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
idle_cnt = chains_static;
active_cnt = chains_dynamic;
- cmd->rxchain_info = cpu_to_le32(iwl_fw_valid_rx_ant(mvm->fw) <<
+ cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant <<
PHY_RX_CHAIN_VALID_POS);
cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
cmd->rxchain_info |= cpu_to_le32(active_cnt <<
PHY_RX_CHAIN_MIMO_CNT_POS);
- cmd->txchain_info = cpu_to_le32(iwl_fw_valid_tx_ant(mvm->fw));
+ cmd->txchain_info = cpu_to_le32(mvm->fw->valid_tx_ant);
}
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index d9eab3b7bb9f..6b636eab3339 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -74,39 +74,36 @@
#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+static
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd)
+ struct iwl_beacon_filter_cmd *cmd,
+ u32 flags)
{
- int ret;
-
- ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC,
- sizeof(struct iwl_beacon_filter_cmd), cmd);
-
- if (!ret) {
- IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
- le32_to_cpu(cmd->ba_enable_beacon_abort));
- IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
- le32_to_cpu(cmd->ba_escape_timer));
- IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
- le32_to_cpu(cmd->bf_debug_flag));
- IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
- le32_to_cpu(cmd->bf_enable_beacon_filter));
- IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
- le32_to_cpu(cmd->bf_energy_delta));
- IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
- le32_to_cpu(cmd->bf_escape_timer));
- IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
- le32_to_cpu(cmd->bf_roaming_energy_delta));
- IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
- le32_to_cpu(cmd->bf_roaming_state));
- IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
- le32_to_cpu(cmd->bf_temp_threshold));
- IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
- le32_to_cpu(cmd->bf_temp_fast_filter));
- IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
- le32_to_cpu(cmd->bf_temp_slow_filter));
- }
- return ret;
+ IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
+ le32_to_cpu(cmd->ba_enable_beacon_abort));
+ IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
+ le32_to_cpu(cmd->ba_escape_timer));
+ IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
+ le32_to_cpu(cmd->bf_debug_flag));
+ IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
+ le32_to_cpu(cmd->bf_enable_beacon_filter));
+ IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
+ le32_to_cpu(cmd->bf_energy_delta));
+ IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
+ le32_to_cpu(cmd->bf_escape_timer));
+ IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
+ le32_to_cpu(cmd->bf_roaming_energy_delta));
+ IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
+ le32_to_cpu(cmd->bf_roaming_state));
+ IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
+ le32_to_cpu(cmd->bf_temp_threshold));
+ IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_fast_filter));
+ IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_slow_filter));
+
+ return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+ sizeof(struct iwl_beacon_filter_cmd), cmd);
}
static
@@ -145,7 +142,7 @@ int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
mvmvif->bf_data.ba_enabled = enable;
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+ return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, CMD_SYNC);
}
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
@@ -301,8 +298,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
- mvm->ps_prevented)
+ if (mvm->ps_disabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -312,7 +308,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
mvmvif->dbgfs_pm.disable_power_off)
cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
#endif
- if (!vif->bss_conf.ps || mvmvif->pm_prevented)
+ if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
+ mvm->pm_disabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -419,72 +416,44 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
-static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
+static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
- int ret;
- bool ba_enable;
struct iwl_mac_power_cmd cmd = {};
if (vif->type != NL80211_IFTYPE_STATION)
return 0;
if (vif->p2p &&
- !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS))
+ !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))
return 0;
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
- sizeof(cmd), &cmd);
- if (ret)
- return ret;
-
- ba_enable = !!(cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
-
- return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
-}
-
-static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mac_power_cmd cmd = {};
- struct iwl_mvm_vif *mvmvif __maybe_unused =
- iwl_mvm_vif_from_mac80211(vif);
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return 0;
-
- cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
- mvmvif->color));
-
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
- cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+ memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
#endif
- iwl_mvm_power_log(mvm, &cmd);
- return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
sizeof(cmd), &cmd);
}
-static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
{
struct iwl_device_power_cmd cmd = {
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
};
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
+ return 0;
+
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
return 0;
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
- force_disable)
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ mvm->ps_disabled = true;
+
+ if (mvm->ps_disabled)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -501,11 +470,6 @@ static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
&cmd);
}
-static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
-{
- return _iwl_mvm_power_update_device(mvm, false);
-}
-
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -544,44 +508,176 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
return 0;
}
-static void iwl_mvm_power_binding_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+struct iwl_power_constraint {
+ struct ieee80211_vif *bf_vif;
+ struct ieee80211_vif *bss_vif;
+ struct ieee80211_vif *p2p_vif;
+ u16 bss_phyctx_id;
+ u16 p2p_phyctx_id;
+ bool pm_disabled;
+ bool ps_disabled;
+ struct iwl_mvm *mvm;
+};
+
+static void iwl_mvm_power_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = _data;
- int ret;
+ struct iwl_power_constraint *power_iterator = _data;
+ struct iwl_mvm *mvm = power_iterator->mvm;
+
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ break;
+
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ /* no BSS power mgmt if we have an active AP */
+ if (mvmvif->ap_ibss_active)
+ power_iterator->pm_disabled = true;
+ break;
+
+ case NL80211_IFTYPE_MONITOR:
+ /* no BSS power mgmt and no device power save */
+ power_iterator->pm_disabled = true;
+ power_iterator->ps_disabled = true;
+ break;
+
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (mvmvif->phy_ctxt)
+ power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
+
+ /* we should have only one P2P vif */
+ WARN_ON(power_iterator->p2p_vif);
+ power_iterator->p2p_vif = vif;
+
+ IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n",
+ power_iterator->p2p_phyctx_id,
+ power_iterator->bss_phyctx_id);
+ if (!(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
+ /* no BSS power mgmt if we have a P2P client*/
+ power_iterator->pm_disabled = true;
+ } else if (power_iterator->p2p_phyctx_id < MAX_PHYS &&
+ power_iterator->bss_phyctx_id < MAX_PHYS &&
+ power_iterator->p2p_phyctx_id ==
+ power_iterator->bss_phyctx_id) {
+ power_iterator->pm_disabled = true;
+ }
+ break;
+
+ case NL80211_IFTYPE_STATION:
+ if (mvmvif->phy_ctxt)
+ power_iterator->bss_phyctx_id = mvmvif->phy_ctxt->id;
+
+ /* we should have only one BSS vif */
+ WARN_ON(power_iterator->bss_vif);
+ power_iterator->bss_vif = vif;
+
+ if (mvmvif->bf_data.bf_enabled &&
+ !WARN_ON(power_iterator->bf_vif))
+ power_iterator->bf_vif = vif;
+
+ IWL_DEBUG_POWER(mvm, "bss: p2p_id=%d, bss_id=%d\n",
+ power_iterator->p2p_phyctx_id,
+ power_iterator->bss_phyctx_id);
+ if (mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM &&
+ (power_iterator->p2p_phyctx_id < MAX_PHYS &&
+ power_iterator->bss_phyctx_id < MAX_PHYS &&
+ power_iterator->p2p_phyctx_id ==
+ power_iterator->bss_phyctx_id))
+ power_iterator->pm_disabled = true;
+ break;
+
+ default:
+ break;
+ }
+}
- mvmvif->pm_prevented = (mvm->bound_vif_cnt <= 1) ? false : true;
+static void
+iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
+ struct iwl_power_constraint *constraint)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
+ constraint->pm_disabled = true;
+ constraint->ps_disabled = true;
+ }
- ret = iwl_mvm_power_mac_update_mode(mvm, vif);
- WARN_ONCE(ret, "Failed to update power parameters on a specific vif\n");
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_iterator, constraint);
}
-static void _iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool assign)
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_power_constraint constraint = {
+ .p2p_phyctx_id = MAX_PHYS,
+ .bss_phyctx_id = MAX_PHYS,
+ .mvm = mvm,
+ };
+ bool ba_enable;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
+ return 0;
+
+ iwl_mvm_power_get_global_constraint(mvm, &constraint);
+ mvm->ps_disabled = constraint.ps_disabled;
+ mvm->pm_disabled = constraint.pm_disabled;
+
+ /* don't update device power state unless we add / remove monitor */
if (vif->type == NL80211_IFTYPE_MONITOR) {
- int ret = _iwl_mvm_power_update_device(mvm, assign);
- mvm->ps_prevented = assign;
- WARN_ONCE(ret, "Failed to update power device state\n");
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret)
+ return ret;
}
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_binding_iterator,
- mvm);
+ if (constraint.bss_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (constraint.p2p_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (!constraint.bf_vif)
+ return 0;
+
+ vif = constraint.bf_vif;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
+ !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
+
+ return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, char *buf,
- int bufsz)
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, char *buf,
+ int bufsz)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mac_power_cmd cmd = {};
int pos = 0;
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+ if (WARN_ON(!(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
+ return 0;
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
+ mutex_unlock(&mvm->mutex);
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
@@ -685,32 +781,46 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
}
#endif
-int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd,
+ u32 cmd_flags,
+ bool d0i3)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_beacon_filter_cmd cmd = {
- IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter = cpu_to_le32(1),
- };
int ret;
if (mvmvif != mvm->bf_allowed_vif ||
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
- iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+ iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+ if (!d0i3)
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
- if (!ret)
+ /* don't change bf_enabled in case of temporary d0i3 configuration */
+ if (!ret && !d0i3)
mvmvif->bf_data.bf_enabled = true;
return ret;
}
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 flags)
+{
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
+}
+
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ u32 flags)
{
struct iwl_beacon_filter_cmd cmd = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -720,7 +830,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
if (!ret)
mvmvif->bf_data.bf_enabled = false;
@@ -728,23 +838,89 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
return ret;
}
-int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, u32 flags)
{
+ int ret;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_power_cmd cmd = {};
- if (!mvmvif->bf_data.bf_enabled)
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- return iwl_mvm_enable_beacon_filter(mvm, vif);
-}
+ if (!vif->bss_conf.assoc)
+ return 0;
-const struct iwl_mvm_power_ops pm_mac_ops = {
- .power_update_mode = iwl_mvm_power_mac_update_mode,
- .power_update_device_mode = iwl_mvm_power_update_device,
- .power_disable = iwl_mvm_power_mac_disable,
- .power_update_binding = _iwl_mvm_power_update_binding,
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+ if (enable) {
+ /* configure skip over dtim up to 300 msec */
+ int dtimper = mvm->hw->conf.ps_dtim_period ?: 1;
+ int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+ if (WARN_ON(!dtimper_msec))
+ return 0;
+
+ cmd.flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ cmd.skip_dtim_periods = 300 / dtimper_msec;
+ }
+ iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
+ memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
#endif
-};
+ ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags,
+ sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ /* configure beacon filtering */
+ if (mvmvif != mvm->bf_allowed_vif)
+ return 0;
+
+ if (enable) {
+ struct iwl_beacon_filter_cmd cmd_bf = {
+ IWL_BF_CMD_CONFIG_D0I3,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+ ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
+ flags, true);
+ } else {
+ if (mvmvif->bf_data.bf_enabled)
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+ else
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ }
+
+ return ret;
+}
+
+int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool force,
+ u32 flags)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif != mvm->bf_allowed_vif)
+ return 0;
+
+ if (!mvmvif->bf_data.bf_enabled) {
+ /* disable beacon filtering explicitly if force is true */
+ if (force)
+ return iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ return 0;
+ }
+
+ return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+}
+
+int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
+{
+ struct iwl_powertable_cmd cmd = {
+ .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
deleted file mode 100644
index ef712ae5bc62..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-debug.h"
-#include "mvm.h"
-#include "iwl-modparams.h"
-#include "fw-api-power.h"
-
-#define POWER_KEEP_ALIVE_PERIOD_SEC 25
-
-static void iwl_mvm_power_log(struct iwl_mvm *mvm,
- struct iwl_powertable_cmd *cmd)
-{
- IWL_DEBUG_POWER(mvm,
- "Sending power table command for power level %d, flags = 0x%X\n",
- iwlmvm_mod_params.power_scheme,
- le16_to_cpu(cmd->flags));
- IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
-
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
- le32_to_cpu(cmd->rx_data_timeout));
- IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
- le32_to_cpu(cmd->tx_data_timeout));
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
- IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
- le32_to_cpu(cmd->skip_dtim_periods));
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
- le32_to_cpu(cmd->lprx_rssi_threshold));
- }
-}
-
-static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct iwl_powertable_cmd *cmd)
-{
- struct ieee80211_hw *hw = mvm->hw;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct ieee80211_channel *chan;
- int dtimper, dtimper_msec;
- int keep_alive;
- bool radar_detect = false;
- struct iwl_mvm_vif *mvmvif __maybe_unused =
- iwl_mvm_vif_from_mac80211(vif);
-
- /*
- * Regardless of power management state the driver must set
- * keep alive period. FW will use it for sending keep alive NDPs
- * immediately after association.
- */
- cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
-
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
- return;
-
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
- if (!vif->bss_conf.assoc)
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
- if (!vif->bss_conf.ps)
- return;
-
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
-
- if (vif->bss_conf.beacon_rate &&
- (vif->bss_conf.beacon_rate->bitrate == 10 ||
- vif->bss_conf.beacon_rate->bitrate == 60)) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
- cmd->lprx_rssi_threshold =
- cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
- }
-
- dtimper = hw->conf.ps_dtim_period ?: 1;
-
- /* Check if radar detection is required on current channel */
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- WARN_ON(!chanctx_conf);
- if (chanctx_conf) {
- chan = chanctx_conf->def.chan;
- radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
- }
- rcu_read_unlock();
-
- /* Check skip over DTIM conditions */
- if (!radar_detect && (dtimper <= 10) &&
- (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
- mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- cmd->skip_dtim_periods = cpu_to_le32(3);
- }
-
- /* Check that keep alive period is at least 3 * DTIM */
- dtimper_msec = dtimper * vif->bss_conf.beacon_int;
- keep_alive = max_t(int, 3 * dtimper_msec,
- MSEC_PER_SEC * cmd->keep_alive_seconds);
- keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
- cmd->keep_alive_seconds = keep_alive;
-
- if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
- cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
- } else {
- cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
- cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
- if (mvmvif->dbgfs_pm.skip_over_dtim)
- cmd->flags |=
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- else
- cmd->flags &=
- cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- }
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
- cmd->rx_data_timeout =
- cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
- cmd->tx_data_timeout =
- cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
- cmd->skip_dtim_periods =
- cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
- if (mvmvif->dbgfs_pm.lprx_ena)
- cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
- else
- cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
- }
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
- cmd->lprx_rssi_threshold =
- cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
-#endif /* CONFIG_IWLWIFI_DEBUGFS */
-}
-
-static int iwl_mvm_power_legacy_update_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- int ret;
- bool ba_enable;
- struct iwl_powertable_cmd cmd = {};
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return 0;
-
- /*
- * TODO: The following vif_count verification is temporary condition.
- * Avoid power mode update if more than one interface is currently
- * active. Remove this condition when FW will support power management
- * on multiple MACs.
- */
- IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
- mvm->vif_count);
- if (mvm->vif_count > 1)
- return 0;
-
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
- iwl_mvm_power_log(mvm, &cmd);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
- sizeof(cmd), &cmd);
- if (ret)
- return ret;
-
- ba_enable = !!(cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
-
- return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
-}
-
-static int iwl_mvm_power_legacy_disable(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_powertable_cmd cmd = {};
- struct iwl_mvm_vif *mvmvif __maybe_unused =
- iwl_mvm_vif_from_mac80211(vif);
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return 0;
-
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
- cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
- iwl_mvm_power_log(mvm, &cmd);
-
- return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
- sizeof(cmd), &cmd);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_power_legacy_dbgfs_read(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, char *buf,
- int bufsz)
-{
- struct iwl_powertable_cmd cmd = {};
- int pos = 0;
-
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
-
- pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
- 0 : 1);
- pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
- le32_to_cpu(cmd.skip_dtim_periods));
- pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
- iwlmvm_mod_params.power_scheme);
- pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
- le16_to_cpu(cmd.flags));
- pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
- cmd.keep_alive_seconds);
-
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
- 1 : 0);
- pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
- le32_to_cpu(cmd.rx_data_timeout));
- pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
- le32_to_cpu(cmd.tx_data_timeout));
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- pos += scnprintf(buf+pos, bufsz-pos,
- "lprx_rssi_threshold = %d\n",
- le32_to_cpu(cmd.lprx_rssi_threshold));
- }
- return pos;
-}
-#endif
-
-const struct iwl_mvm_power_ops pm_legacy_ops = {
- .power_update_mode = iwl_mvm_power_legacy_update_mode,
- .power_disable = iwl_mvm_power_legacy_disable,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- .power_dbgfs_read = iwl_mvm_power_legacy_dbgfs_read,
-#endif
-};
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index ce5db6c4ef7e..35e86e06dffd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -65,9 +65,14 @@
#include "fw-api.h"
#include "mvm.h"
+#define QUOTA_100 IWL_MVM_MAX_QUOTA
+#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
+
struct iwl_mvm_quota_iterator_data {
int n_interfaces[MAX_BINDINGS];
int colors[MAX_BINDINGS];
+ int low_latency[MAX_BINDINGS];
+ int n_low_latency_bindings;
struct ieee80211_vif *new_vif;
};
@@ -107,22 +112,29 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
if (vif->bss_conf.assoc)
- data->n_interfaces[id]++;
- break;
+ break;
+ return;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
if (mvmvif->ap_ibss_active)
- data->n_interfaces[id]++;
- break;
+ break;
+ return;
case NL80211_IFTYPE_MONITOR:
if (mvmvif->monitor_active)
- data->n_interfaces[id]++;
- break;
+ break;
+ return;
case NL80211_IFTYPE_P2P_DEVICE:
- break;
+ return;
default:
WARN_ON_ONCE(1);
- break;
+ return;
+ }
+
+ data->n_interfaces[id]++;
+
+ if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
+ data->n_low_latency_bindings++;
+ data->low_latency[id] = true;
}
}
@@ -162,7 +174,7 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
{
struct iwl_time_quota_cmd cmd = {};
- int i, idx, ret, num_active_macs, quota, quota_rem;
+ int i, idx, ret, num_active_macs, quota, quota_rem, n_non_lowlat;
struct iwl_mvm_quota_iterator_data data = {
.n_interfaces = {},
.colors = { -1, -1, -1, -1 },
@@ -197,11 +209,39 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
num_active_macs += data.n_interfaces[i];
}
- quota = 0;
- quota_rem = 0;
- if (num_active_macs) {
- quota = IWL_MVM_MAX_QUOTA / num_active_macs;
- quota_rem = IWL_MVM_MAX_QUOTA % num_active_macs;
+ n_non_lowlat = num_active_macs;
+
+ if (data.n_low_latency_bindings == 1) {
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ if (data.low_latency[i]) {
+ n_non_lowlat -= data.n_interfaces[i];
+ break;
+ }
+ }
+ }
+
+ if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
+ /*
+ * Reserve quota for the low latency binding in case that
+ * there are several data bindings but only a single
+ * low latency one. Split the rest of the quota equally
+ * between the other data interfaces.
+ */
+ quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
+ quota_rem = QUOTA_100 - n_non_lowlat * quota -
+ QUOTA_LOWLAT_MIN;
+ } else if (num_active_macs) {
+ /*
+ * There are 0 or more than 1 low latency bindings, or all the
+ * data interfaces belong to the single low latency binding.
+ * Split the quota equally between the data interfaces.
+ */
+ quota = QUOTA_100 / num_active_macs;
+ quota_rem = QUOTA_100 % num_active_macs;
+ } else {
+ /* values don't really matter - won't be used */
+ quota = 0;
+ quota_rem = 0;
}
for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
@@ -211,19 +251,37 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
cmd.quotas[idx].id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
- if (data.n_interfaces[i] <= 0) {
+ if (data.n_interfaces[i] <= 0)
cmd.quotas[idx].quota = cpu_to_le32(0);
- cmd.quotas[idx].max_duration = cpu_to_le32(0);
- } else {
+ else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
+ data.low_latency[i])
+ /*
+ * There is more than one binding, but only one of the
+ * bindings is in low latency. For this case, allocate
+ * the minimal required quota for the low latency
+ * binding.
+ */
+ cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
+ else
cmd.quotas[idx].quota =
cpu_to_le32(quota * data.n_interfaces[i]);
- cmd.quotas[idx].max_duration = cpu_to_le32(0);
- }
+
+ WARN_ONCE(le32_to_cpu(cmd.quotas[idx].quota) > QUOTA_100,
+ "Binding=%d, quota=%u > max=%u\n",
+ idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100);
+
+ cmd.quotas[idx].max_duration = cpu_to_le32(0);
+
idx++;
}
- /* Give the remainder of the session to the first binding */
- le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
+ /* Give the remainder of the session to the first data binding */
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ if (le32_to_cpu(cmd.quotas[i].quota) != 0) {
+ le32_add_cpu(&cmd.quotas[i].quota, quota_rem);
+ break;
+ }
+ }
iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 6abf74e1351f..568abd61b14f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -166,7 +166,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return false;
- if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2)
+ if (num_of_ant(mvm->fw->valid_tx_ant) < 2)
return false;
if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
@@ -211,9 +211,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
.next_columns = {
RS_COLUMN_LEGACY_ANT_B,
RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
+ RS_COLUMN_MIMO2_SGI,
},
},
[RS_COLUMN_LEGACY_ANT_B] = {
@@ -221,10 +221,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
.ant = ANT_B,
.next_columns = {
RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_SISO_ANT_A,
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
+ RS_COLUMN_MIMO2_SGI,
},
},
[RS_COLUMN_SISO_ANT_A] = {
@@ -234,8 +234,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
RS_COLUMN_SISO_ANT_A_SGI,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_MIMO2_SGI,
},
.checks = {
rs_siso_allow,
@@ -248,8 +248,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_MIMO2,
RS_COLUMN_SISO_ANT_B_SGI,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_MIMO2_SGI,
},
.checks = {
rs_siso_allow,
@@ -263,8 +263,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_SISO_ANT_A,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_MIMO2,
},
.checks = {
rs_siso_allow,
@@ -279,8 +279,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_SISO_ANT_B,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_MIMO2,
},
.checks = {
rs_siso_allow,
@@ -292,10 +292,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
.ant = ANT_AB,
.next_columns = {
RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_MIMO2_SGI,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
},
.checks = {
rs_mimo_allow,
@@ -307,10 +307,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
.sgi = true,
.next_columns = {
RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
},
.checks = {
rs_mimo_allow,
@@ -380,49 +380,49 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
* (2.4 GHz) band.
*/
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
};
/* Expected TpT tables. 4 indexes:
* 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
*/
-static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
{0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
{0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
{0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
};
-static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
{0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
{0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
};
-static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
{0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
{0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
{0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
};
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
{0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
{0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
{0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
};
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
{0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
{0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
};
-static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
{0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
{0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
@@ -503,6 +503,14 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
window->average_tpt = IWL_INVALID_VALUE;
}
+static void rs_rate_scale_clear_tbl_windows(struct iwl_scale_tbl_info *tbl)
+{
+ int i;
+
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(&tbl->win[i]);
+}
+
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
{
return (ant_type & valid_antenna) == ant_type;
@@ -566,19 +574,13 @@ static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
* at this rate. window->data contains the bitmask of successful
* packets.
*/
-static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
- int scale_index, int attempts, int successes)
+static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes,
+ struct iwl_rate_scale_data *window)
{
- struct iwl_rate_scale_data *window = NULL;
static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
s32 fail_count, tpt;
- if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
- return -EINVAL;
-
- /* Select window for current tx bit rate */
- window = &(tbl->win[scale_index]);
-
/* Get expected throughput */
tpt = get_expected_tpt(tbl, scale_index);
@@ -636,6 +638,21 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
return 0;
}
+static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes)
+{
+ struct iwl_rate_scale_data *window = NULL;
+
+ if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+ return -EINVAL;
+
+ /* Select window for current tx bit rate */
+ window = &(tbl->win[scale_index]);
+
+ return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
+ window);
+}
+
/* Convert rs_rate object into ucode rate bitmask */
static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
struct rs_rate *rate)
@@ -905,7 +922,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
rate->bw = RATE_MCS_CHAN_WIDTH_20;
- WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX &&
+ WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
rate->index > IWL_RATE_MCS_9_INDEX);
rate->index = rs_ht_to_legacy[rate->index];
@@ -917,7 +934,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
if (num_of_ant(rate->ant) > 1)
- rate->ant = first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
+ rate->ant = first_antenna(mvm->fw->valid_tx_ant);
/* Relevant in both switching to SISO or Legacy */
rate->sgi = false;
@@ -1169,12 +1186,12 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
lq_sta->visited_columns = 0;
}
-static s32 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
const struct rs_tx_column *column,
u32 bw)
{
/* Used to choose among HT tables */
- s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+ const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
column->mode != RS_SISO &&
@@ -1262,9 +1279,8 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
&(lq_sta->lq_info[lq_sta->active_tbl]);
s32 active_sr = active_tbl->win[index].success_ratio;
s32 active_tpt = active_tbl->expected_tpt[index];
-
/* expected "search" throughput */
- s32 *tpt_tbl = tbl->expected_tpt;
+ const u16 *tpt_tbl = tbl->expected_tpt;
s32 new_rate, high, low, start_hi;
u16 high_low;
@@ -1362,7 +1378,6 @@ static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
{
struct iwl_scale_tbl_info *tbl;
- int i;
int active_tbl;
int flush_interval_passed = 0;
struct iwl_mvm *mvm;
@@ -1423,9 +1438,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
IWL_DEBUG_RATE(mvm,
"LQ: stay in table clear win\n");
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(
- &(tbl->win[i]));
+ rs_rate_scale_clear_tbl_windows(tbl);
}
}
@@ -1434,8 +1447,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
* "search" table). */
if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(&(tbl->win[i]));
+ rs_rate_scale_clear_tbl_windows(tbl);
}
}
}
@@ -1478,8 +1490,8 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
const struct rs_tx_column *next_col;
allow_column_func_t allow_func;
- u8 valid_ants = iwl_fw_valid_tx_ant(mvm->fw);
- s32 *expected_tpt_tbl;
+ u8 valid_ants = mvm->fw->valid_tx_ant;
+ const u16 *expected_tpt_tbl;
s32 tpt, max_expected_tpt;
for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
@@ -1725,7 +1737,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
int low = IWL_RATE_INVALID;
int high = IWL_RATE_INVALID;
int index;
- int i;
struct iwl_rate_scale_data *window = NULL;
int current_tpt = IWL_INVALID_VALUE;
int low_tpt = IWL_INVALID_VALUE;
@@ -2010,8 +2021,7 @@ lq_update:
if (lq_sta->search_better_tbl) {
/* Access the "search" table, clear its history. */
tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(&(tbl->win[i]));
+ rs_rate_scale_clear_tbl_windows(tbl);
/* Use new "search" start rate */
index = tbl->rate.index;
@@ -2090,7 +2100,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
i = lq_sta->last_txrate_idx;
- valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
+ valid_tx_ant = mvm->fw->valid_tx_ant;
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
@@ -2241,6 +2251,73 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
}
}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm,
+ struct iwl_mvm_frame_stats *stats)
+{
+ spin_lock_bh(&mvm->drv_stats_lock);
+ memset(stats, 0, sizeof(*stats));
+ spin_unlock_bh(&mvm->drv_stats_lock);
+}
+
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
+ struct iwl_mvm_frame_stats *stats,
+ u32 rate, bool agg)
+{
+ u8 nss = 0, mcs = 0;
+
+ spin_lock(&mvm->drv_stats_lock);
+
+ if (agg)
+ stats->agg_frames++;
+
+ stats->success_frames++;
+
+ switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ stats->bw_20_frames++;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ stats->bw_40_frames++;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ stats->bw_80_frames++;
+ break;
+ default:
+ WARN_ONCE(1, "bad BW. rate 0x%x", rate);
+ }
+
+ if (rate & RATE_MCS_HT_MSK) {
+ stats->ht_frames++;
+ mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
+ } else if (rate & RATE_MCS_VHT_MSK) {
+ stats->vht_frames++;
+ mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ } else {
+ stats->legacy_frames++;
+ }
+
+ if (nss == 1)
+ stats->siso_frames++;
+ else if (nss == 2)
+ stats->mimo2_frames++;
+
+ if (rate & RATE_MCS_SGI_MSK)
+ stats->sgi_frames++;
+ else
+ stats->ngi_frames++;
+
+ stats->last_rates[stats->last_frame_idx] = rate;
+ stats->last_frame_idx = (stats->last_frame_idx + 1) %
+ ARRAY_SIZE(stats->last_rates);
+
+ spin_unlock(&mvm->drv_stats_lock);
+}
+#endif
+
/*
* Called after adding a new station to initialize rate scaling
*/
@@ -2265,8 +2342,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->lq.sta_id = sta_priv->sta_id;
for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
+ rs_rate_scale_clear_tbl_windows(&lq_sta->lq_info[j]);
lq_sta->flush_timer = 0;
@@ -2320,7 +2396,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
- first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
+ first_antenna(mvm->fw->valid_tx_ant);
lq_sta->lq.dual_stream_ant_msk = ANT_AB;
/* as default allow aggregation for all tids */
@@ -2335,7 +2411,9 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
#ifdef CONFIG_MAC80211_DEBUGFS
lq_sta->dbg_fixed_rate = 0;
#endif
-
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
+#endif
rs_initialize_lq(mvm, sta, lq_sta, band, init);
}
@@ -2446,7 +2524,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
memcpy(&rate, initial_rate, sizeof(rate));
- valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
+ valid_tx_ant = mvm->fw->valid_tx_ant;
if (is_siso(&rate)) {
num_rates = RS_INITIAL_SISO_NUM_RATES;
@@ -2523,7 +2601,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
if (sta)
lq_cmd->agg_time_limit =
- cpu_to_le16(iwl_mvm_bt_coex_agg_time_limit(mvm, sta));
+ cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2547,7 +2625,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
}
#ifdef CONFIG_MAC80211_DEBUGFS
-static int rs_pretty_print_rate(char *buf, const u32 rate)
+int rs_pretty_print_rate(char *buf, const u32 rate)
{
char *type, *bw;
@@ -2596,7 +2674,7 @@ static int rs_pretty_print_rate(char *buf, const u32 rate)
return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
type, rs_pretty_ant(ant), bw, mcs, nss,
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
- (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+ (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "",
(rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
(rate & RATE_MCS_BF_MSK) ? "BF " : "",
(rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
@@ -2677,9 +2755,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
lq_sta->dbg_fixed_rate);
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (iwl_fw_valid_tx_ant(mvm->fw) & ANT_A) ? "ANT_A," : "",
- (iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
- (iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
+ (mvm->fw->valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (mvm->fw->valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (mvm->fw->valid_tx_ant & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
(is_legacy(rate)) ? "legacy" :
is_vht(rate) ? "VHT" : "HT");
@@ -2815,8 +2893,8 @@ static void rs_rate_init_stub(void *mvm_r,
struct ieee80211_sta *sta, void *mvm_sta)
{
}
-static struct rate_control_ops rs_mvm_ops = {
- .module = NULL,
+
+static const struct rate_control_ops rs_mvm_ops = {
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 7bc6404f6986..3332b396011e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -277,7 +277,7 @@ enum rs_column {
struct iwl_scale_tbl_info {
struct rs_rate rate;
enum rs_column column;
- s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index a85b60f7e67e..6061553a5e44 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -77,6 +77,15 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
mvm->ampdu_ref++;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+ spin_lock(&mvm->drv_stats_lock);
+ mvm->drv_rx_stats.ampdu_count++;
+ spin_unlock(&mvm->drv_stats_lock);
+ }
+#endif
+
return 0;
}
@@ -129,22 +138,16 @@ static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
struct ieee80211_rx_status *rx_status)
{
int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
- int rssi_all_band_a, rssi_all_band_b;
- u32 agc_a, agc_b, max_agc;
+ u32 agc_a, agc_b;
u32 val;
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
- max_agc = max_t(u32, agc_a, agc_b);
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
- rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
- IWL_OFDM_RSSI_ALLBAND_A_POS;
- rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
- IWL_OFDM_RSSI_ALLBAND_B_POS;
/*
* dBm = rssi dB - agc dB - constant.
@@ -364,31 +367,43 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
rx_status.flag |= RX_FLAG_40MHZ;
break;
case RATE_MCS_CHAN_WIDTH_80:
- rx_status.flag |= RX_FLAG_80MHZ;
+ rx_status.vht_flag |= RX_VHT_FLAG_80MHZ;
break;
case RATE_MCS_CHAN_WIDTH_160:
- rx_status.flag |= RX_FLAG_160MHZ;
+ rx_status.vht_flag |= RX_VHT_FLAG_160MHZ;
break;
}
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
rx_status.flag |= RX_FLAG_HT_GF;
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ rx_status.flag |= RX_FLAG_LDPC;
if (rate_n_flags & RATE_MCS_HT_MSK) {
+ u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
rx_status.flag |= RX_FLAG_HT;
rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
rx_status.vht_nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
rx_status.flag |= RX_FLAG_VHT;
+ rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
} else {
rx_status.rate_idx =
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status.band);
}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_update_frame_stats(mvm, &mvm->drv_rx_stats, rate_n_flags,
+ rx_status.flag & RX_FLAG_AMPDU_DETAILS);
+#endif
iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status,
rxb, &rx_status);
return 0;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 742afc429c94..c91dc8498852 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -70,9 +70,16 @@
#define IWL_PLCP_QUIET_THRESH 1
#define IWL_ACTIVE_QUIET_TIME 10
-#define LONG_OUT_TIME_PERIOD 600
-#define SHORT_OUT_TIME_PERIOD 200
-#define SUSPEND_TIME_PERIOD 100
+
+struct iwl_mvm_scan_params {
+ u32 max_out_time;
+ u32 suspend_time;
+ bool passive_fragmented;
+ struct _dwell {
+ u16 passive;
+ u16 active;
+ } dwell[IEEE80211_NUM_BANDS];
+};
static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
{
@@ -82,7 +89,7 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
if (mvm->scan_rx_ant != ANT_NONE)
rx_ant = mvm->scan_rx_ant;
else
- rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
+ rx_ant = mvm->fw->valid_rx_ant;
rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -90,24 +97,6 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
return cpu_to_le16(rx_chain);
}
-static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif,
- u32 flags, bool is_assoc)
-{
- if (!is_assoc)
- return 0;
- if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
- return cpu_to_le32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
- return cpu_to_le32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
-}
-
-static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif,
- bool is_assoc)
-{
- if (!is_assoc)
- return 0;
- return cpu_to_le32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
-}
-
static inline __le32
iwl_mvm_scan_rxon_flags(struct cfg80211_scan_request *req)
{
@@ -124,7 +113,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
u32 tx_ant;
mvm->scan_last_antenna_idx =
- iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw),
+ iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
mvm->scan_last_antenna_idx);
tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
@@ -181,15 +170,14 @@ static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
struct cfg80211_scan_request *req,
- bool basic_ssid)
+ bool basic_ssid,
+ struct iwl_mvm_scan_params *params)
{
- u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
- u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
- req->n_ssids);
struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
int i;
int type = BIT(req->n_ssids) - 1;
+ enum ieee80211_band band = req->channels[0]->band;
if (!basic_ssid)
type |= BIT(req->n_ssids);
@@ -199,8 +187,8 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
chan->type = cpu_to_le32(type);
if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
- chan->active_dwell = cpu_to_le16(active_dwell);
- chan->passive_dwell = cpu_to_le16(passive_dwell);
+ chan->active_dwell = cpu_to_le16(params->dwell[band].active);
+ chan->passive_dwell = cpu_to_le16(params->dwell[band].passive);
chan->iteration_count = cpu_to_le16(1);
chan++;
}
@@ -267,13 +255,76 @@ static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
return (u16)len;
}
-static void iwl_mvm_vif_assoc_iterator(void *data, u8 *mac,
- struct ieee80211_vif *vif)
+static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- bool *is_assoc = data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool *global_bound = data;
+
+ if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < MAX_PHYS)
+ *global_bound = true;
+}
+
+static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int n_ssids,
+ struct iwl_mvm_scan_params *params)
+{
+ bool global_bound = false;
+ enum ieee80211_band band;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_scan_condition_iterator,
+ &global_bound);
+ /*
+ * Under low latency traffic passive scan is fragmented meaning
+ * that dwell on a particular channel will be fragmented. Each fragment
+ * dwell time is 20ms and fragments period is 105ms. Skipping to next
+ * channel will be delayed by the same period - 105ms. So suspend_time
+ * parameter describing both fragments and channels skipping periods is
+ * set to 105ms. This value is chosen so that overall passive scan
+ * duration will not be too long. Max_out_time in this case is set to
+ * 70ms, so for active scanning operating channel will be left for 70ms
+ * while for passive still for 20ms (fragment dwell).
+ */
+ if (global_bound) {
+ if (!iwl_mvm_low_latency(mvm)) {
+ params->suspend_time = ieee80211_tu_to_usec(100);
+ params->max_out_time = ieee80211_tu_to_usec(600);
+ } else {
+ params->suspend_time = ieee80211_tu_to_usec(105);
+ /* P2P doesn't support fragmented passive scan, so
+ * configure max_out_time to be at least longest dwell
+ * time for passive scan.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
+ params->max_out_time = ieee80211_tu_to_usec(70);
+ params->passive_fragmented = true;
+ } else {
+ u32 passive_dwell;
- if (vif->bss_conf.assoc)
- *is_assoc = true;
+ /*
+ * Use band G so that passive channel dwell time
+ * will be assigned with maximum value.
+ */
+ band = IEEE80211_BAND_2GHZ;
+ passive_dwell = iwl_mvm_get_passive_dwell(band);
+ params->max_out_time =
+ ieee80211_tu_to_usec(passive_dwell);
+ }
+ }
+ }
+
+ for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ if (params->passive_fragmented)
+ params->dwell[band].passive = 20;
+ else
+ params->dwell[band].passive =
+ iwl_mvm_get_passive_dwell(band);
+ params->dwell[band].active = iwl_mvm_get_active_dwell(band,
+ n_ssids);
+ }
}
int iwl_mvm_scan_request(struct iwl_mvm *mvm,
@@ -288,13 +339,13 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_cmd *cmd = mvm->scan_cmd;
- bool is_assoc = false;
int ret;
u32 status;
int ssid_len = 0;
u8 *ssid = NULL;
bool basic_ssid = !(mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
+ struct iwl_mvm_scan_params params = {};
lockdep_assert_held(&mvm->mutex);
BUG_ON(mvm->scan_cmd == NULL);
@@ -304,17 +355,18 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
mvm->fw->ucode_capa.max_probe_length +
(MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_vif_assoc_iterator,
- &is_assoc);
+
cmd->channel_count = (u8)req->n_channels;
cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
- cmd->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
- is_assoc);
- cmd->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
+
+ iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, &params);
+ cmd->max_out_time = cpu_to_le32(params.max_out_time);
+ cmd->suspend_time = cpu_to_le32(params.suspend_time);
+ if (params.passive_fragmented)
+ cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
+
cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
@@ -360,7 +412,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
req->ie, req->ie_len,
mvm->fw->ucode_capa.max_probe_length));
- iwl_mvm_scan_fill_channels(cmd, req, basic_ssid);
+ iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
le16_to_cpu(cmd->tx_cmd.len) +
@@ -402,12 +454,17 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scan_complete_notif *notif = (void *)pkt->data;
+ lockdep_assert_held(&mvm->mutex);
+
IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
notif->status, notif->scanned_channels);
- mvm->scan_status = IWL_MVM_SCAN_NONE;
+ if (mvm->scan_status == IWL_MVM_SCAN_OS)
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+
return 0;
}
@@ -464,7 +521,7 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
};
}
-void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
+int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
{
struct iwl_notification_wait wait_scan_abort;
static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
@@ -472,12 +529,13 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
int ret;
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- return;
+ return 0;
if (iwl_mvm_is_radio_killed(mvm)) {
ieee80211_scan_completed(mvm->hw, true);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
mvm->scan_status = IWL_MVM_SCAN_NONE;
- return;
+ return 0;
}
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
@@ -488,18 +546,15 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
if (ret) {
IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
- /* mac80211's state will be cleaned in the fw_restart flow */
+ /* mac80211's state will be cleaned in the nic_restart flow */
goto out_remove_notif;
}
- ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, 1 * HZ);
- if (ret)
- IWL_ERR(mvm, "%s - failed on timeout\n", __func__);
-
- return;
+ return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ);
out_remove_notif:
iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
+ return ret;
}
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
@@ -509,12 +564,18 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
+ /* scan status must be locked for proper checking */
+ lockdep_assert_held(&mvm->mutex);
+
IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted");
- mvm->scan_status = IWL_MVM_SCAN_NONE;
- ieee80211_sched_scan_stopped(mvm->hw);
+ /* only call mac80211 completion if the stop was initiated by FW */
+ if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+ ieee80211_sched_scan_stopped(mvm->hw);
+ }
return 0;
}
@@ -545,14 +606,9 @@ static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
- struct iwl_scan_offload_cmd *scan)
+ struct iwl_scan_offload_cmd *scan,
+ struct iwl_mvm_scan_params *params)
{
- bool is_assoc = false;
-
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_vif_assoc_iterator,
- &is_assoc);
scan->channel_count =
mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
@@ -560,13 +616,17 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
- scan->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
- is_assoc);
- scan->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
+
+ scan->max_out_time = cpu_to_le32(params->max_out_time);
+ scan->suspend_time = cpu_to_le32(params->suspend_time);
+
scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
scan->rep_count = cpu_to_le32(1);
+
+ if (params->passive_fragmented)
+ scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
}
static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
@@ -596,6 +656,9 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
* config match list.
*/
for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+ /* skip empty SSID matchsets */
+ if (!req->match_sets[i].ssid.ssid_len)
+ continue;
scan->direct_scan[i].id = WLAN_EID_SSID;
scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
@@ -628,12 +691,11 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
struct iwl_scan_channel_cfg *channels,
enum ieee80211_band band,
int *head, int *tail,
- u32 ssid_bitmap)
+ u32 ssid_bitmap,
+ struct iwl_mvm_scan_params *params)
{
struct ieee80211_supported_band *s_band;
- int n_probes = req->n_ssids;
int n_channels = req->n_channels;
- u8 active_dwell, passive_dwell;
int i, j, index = 0;
bool partial;
@@ -643,8 +705,6 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
* to scan. So add requested channels to head of the list and others to
* the end.
*/
- active_dwell = iwl_mvm_get_active_dwell(band, n_probes);
- passive_dwell = iwl_mvm_get_passive_dwell(band);
s_band = &mvm->nvm_data->bands[band];
for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
@@ -668,8 +728,8 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
channels->channel_number[index] =
cpu_to_le16(ieee80211_frequency_to_channel(
s_band->channels[i].center_freq));
- channels->dwell_time[index][0] = active_dwell;
- channels->dwell_time[index][1] = passive_dwell;
+ channels->dwell_time[index][0] = params->dwell[band].active;
+ channels->dwell_time[index][1] = params->dwell[band].passive;
channels->iter_count[index] = cpu_to_le16(1);
channels->iter_interval[index] = 0;
@@ -698,7 +758,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies)
{
- int supported_bands = 0;
int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
int head = 0;
@@ -712,22 +771,19 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
.id = SCAN_OFFLOAD_CONFIG_CMD,
.flags = CMD_SYNC,
};
+ struct iwl_mvm_scan_params params = {};
lockdep_assert_held(&mvm->mutex);
- if (band_2ghz)
- supported_bands++;
- if (band_5ghz)
- supported_bands++;
-
cmd_len = sizeof(struct iwl_scan_offload_cfg) +
- supported_bands * SCAN_OFFLOAD_PROBE_REQ_SIZE;
+ 2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
if (!scan_cfg)
return -ENOMEM;
- iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd);
+ iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, &params);
+ iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap);
@@ -739,7 +795,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
scan_cfg->data);
iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
IEEE80211_BAND_2GHZ, &head, &tail,
- ssid_bitmap);
+ ssid_bitmap, &params);
}
if (band_5ghz) {
iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
@@ -749,7 +805,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
SCAN_OFFLOAD_PROBE_REQ_SIZE);
iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
IEEE80211_BAND_5GHZ, &head, &tail,
- ssid_bitmap);
+ ssid_bitmap, &params);
}
cmd.data[0] = scan_cfg;
@@ -889,26 +945,49 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
* microcode has notified us that a scan is completed.
*/
IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
- ret = -EIO;
+ ret = -ENOENT;
}
return ret;
}
-void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
+int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
{
int ret;
+ struct iwl_notification_wait wait_scan_done;
+ static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
lockdep_assert_held(&mvm->mutex);
if (mvm->scan_status != IWL_MVM_SCAN_SCHED) {
IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n");
- return;
+ return 0;
}
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
+ scan_done_notif,
+ ARRAY_SIZE(scan_done_notif),
+ NULL, NULL);
+
ret = iwl_mvm_send_sched_scan_abort(mvm);
- if (ret)
+ if (ret) {
IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret);
- else
- IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
+ iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
+ return ret;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
+
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
+ if (ret)
+ return ret;
+
+ /*
+ * Clear the scan status so the next scan requests will succeed. This
+ * also ensures the Rx handler doesn't do anything, as the scan was
+ * stopped from above.
+ */
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+
+ return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 3397f59cd4e4..f339ef884250 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,27 +66,27 @@
#include "sta.h"
#include "rs.h"
-static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6,
+static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
{
memset(cmd_v5, 0, sizeof(*cmd_v5));
- cmd_v5->add_modify = cmd_v6->add_modify;
- cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
- cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
- memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN);
- cmd_v5->sta_id = cmd_v6->sta_id;
- cmd_v5->modify_mask = cmd_v6->modify_mask;
- cmd_v5->station_flags = cmd_v6->station_flags;
- cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
- cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
- cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
- cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
- cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
- cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
- cmd_v5->assoc_id = cmd_v6->assoc_id;
- cmd_v5->beamform_flags = cmd_v6->beamform_flags;
- cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
+ cmd_v5->add_modify = cmd_v7->add_modify;
+ cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
+ cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
+ memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
+ cmd_v5->sta_id = cmd_v7->sta_id;
+ cmd_v5->modify_mask = cmd_v7->modify_mask;
+ cmd_v5->station_flags = cmd_v7->station_flags;
+ cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
+ cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
+ cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
+ cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
+ cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
+ cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
+ cmd_v5->assoc_id = cmd_v7->assoc_id;
+ cmd_v5->beamform_flags = cmd_v7->beamform_flags;
+ cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
}
static void
@@ -110,7 +110,7 @@ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
}
static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
- struct iwl_mvm_add_sta_cmd_v6 *cmd,
+ struct iwl_mvm_add_sta_cmd_v7 *cmd,
int *status)
{
struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
@@ -119,14 +119,14 @@ static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
cmd, status);
- iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+ iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
&cmd_v5, status);
}
static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
- struct iwl_mvm_add_sta_cmd_v6 *cmd)
+ struct iwl_mvm_add_sta_cmd_v7 *cmd)
{
struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
@@ -134,7 +134,7 @@ static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
sizeof(*cmd), cmd);
- iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+ iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
&cmd_v5);
@@ -175,19 +175,30 @@ static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
&sta_cmd);
}
-static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
+static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
+ enum nl80211_iftype iftype)
{
int sta_id;
+ u32 reserved_ids = 0;
+ BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
lockdep_assert_held(&mvm->mutex);
+ /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
+ if (iftype != NL80211_IFTYPE_STATION)
+ reserved_ids = BIT(0);
+
/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
- for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++)
+ for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
+ if (BIT(sta_id) & reserved_ids)
+ continue;
+
if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex)))
return sta_id;
+ }
return IWL_MVM_STATION_COUNT;
}
@@ -196,7 +207,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd;
+ struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
int ret;
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
@@ -312,7 +323,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- sta_id = iwl_mvm_find_free_sta_id(mvm);
+ sta_id = iwl_mvm_find_free_sta_id(mvm,
+ ieee80211_vif_type_p2p(vif));
else
sta_id = mvm_sta->sta_id;
@@ -368,7 +380,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
- struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@@ -522,6 +534,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
/* unassoc - go ahead - remove the AP STA now */
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+
+ /* clear d0i3_ap_sta_id if no longer relevant */
+ if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
}
/*
@@ -560,10 +576,10 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
}
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
- u32 qmask)
+ u32 qmask, enum nl80211_iftype iftype)
{
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
- sta->sta_id = iwl_mvm_find_free_sta_id(mvm);
+ sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
return -ENOSPC;
}
@@ -587,13 +603,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
const u8 *addr,
u16 mac_id, u16 color)
{
- struct iwl_mvm_add_sta_cmd_v6 cmd;
+ struct iwl_mvm_add_sta_cmd_v7 cmd;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
- memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6));
+ memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
@@ -627,7 +643,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
/* Add the aux station, but without any queues */
- ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0);
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0,
+ NL80211_IFTYPE_UNSPECIFIED);
if (ret)
return ret;
@@ -699,7 +716,8 @@ int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
- ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask);
+ ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask,
+ ieee80211_vif_type_p2p(vif));
if (ret)
return ret;
@@ -735,7 +753,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@@ -794,7 +812,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@@ -833,7 +851,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
-static const u8 tid_to_ac[] = {
+const u8 tid_to_mac80211_ac[] = {
IEEE80211_AC_BE,
IEEE80211_AC_BK,
IEEE80211_AC_BK,
@@ -844,6 +862,17 @@ static const u8 tid_to_ac[] = {
IEEE80211_AC_VO,
};
+static const u8 tid_to_ucode_ac[] = {
+ AC_BE,
+ AC_BK,
+ AC_BK,
+ AC_BE,
+ AC_VI,
+ AC_VI,
+ AC_VO,
+ AC_VO,
+};
+
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
@@ -873,10 +902,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO;
}
+ spin_lock_bh(&mvmsta->lock);
+
+ /* possible race condition - we entered D0i3 while starting agg */
+ if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
+ spin_unlock_bh(&mvmsta->lock);
+ IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
+ return -EIO;
+ }
+
/* the new tx queue is still connected to the same mac80211 queue */
- mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_ac[tid]];
+ mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
- spin_lock_bh(&mvmsta->lock);
tid_data = &mvmsta->tid_data[tid];
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
tid_data->txq_id = txq_id;
@@ -916,7 +953,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->ssn = 0xffff;
spin_unlock_bh(&mvmsta->lock);
- fifo = iwl_mvm_ac_to_tx_fifo[tid_to_ac[tid]];
+ fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
@@ -1411,7 +1448,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v6 cmd = {
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1427,28 +1464,102 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
- u16 cnt)
+ u16 cnt, u16 tids, bool more_data,
+ bool agg)
{
- u16 sleep_state_flags =
- (reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
- STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v6 cmd = {
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
.sleep_tx_count = cpu_to_le16(cnt),
.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
- /*
- * Same modify mask for sleep_tx_count and sleep_state_flags so
- * we must set the sleep_state_flags too.
- */
- .sleep_state_flags = cpu_to_le16(sleep_state_flags),
};
- int ret;
+ int tid, ret;
+ unsigned long _tids = tids;
+
+ /* convert TIDs to ACs - we don't support TSPEC so that's OK
+ * Note that this field is reserved and unused by firmware not
+ * supporting GO uAPSD, so it's safe to always do this.
+ */
+ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
+ cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
+
+ /* If we're releasing frames from aggregation queues then check if the
+ * all queues combined that we're releasing frames from have
+ * - more frames than the service period, in which case more_data
+ * needs to be set
+ * - fewer than 'cnt' frames, in which case we need to adjust the
+ * firmware command (but do that unconditionally)
+ */
+ if (agg) {
+ int remaining = cnt;
+
+ spin_lock_bh(&mvmsta->lock);
+ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
+ struct iwl_mvm_tid_data *tid_data;
+ u16 n_queued;
+
+ tid_data = &mvmsta->tid_data[tid];
+ if (WARN(tid_data->state != IWL_AGG_ON &&
+ tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
+ "TID %d state is %d\n",
+ tid, tid_data->state)) {
+ spin_unlock_bh(&mvmsta->lock);
+ ieee80211_sta_eosp(sta);
+ return;
+ }
+
+ n_queued = iwl_mvm_tid_queued(tid_data);
+ if (n_queued > remaining) {
+ more_data = true;
+ remaining = 0;
+ break;
+ }
+ remaining -= n_queued;
+ }
+ spin_unlock_bh(&mvmsta->lock);
+
+ cmd.sleep_tx_count = cpu_to_le16(cnt - remaining);
+ if (WARN_ON(cnt - remaining == 0)) {
+ ieee80211_sta_eosp(sta);
+ return;
+ }
+ }
+
+ /* Note: this is ignored by firmware not supporting GO uAPSD */
+ if (more_data)
+ cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
+
+ if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
+ mvmsta->next_status_eosp = true;
+ cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
+ } else {
+ cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
+ }
- /* TODO: somehow the fw doesn't seem to take PS_POLL into account */
ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
+
+int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ u32 sta_id = le32_to_cpu(notif->sta_id);
+
+ if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
+ return 0;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (!IS_ERR_OR_NULL(sta))
+ ieee80211_sta_eosp(sta);
+ rcu_read_unlock();
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 4968d0237dc5..2ed84c421481 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -195,24 +195,33 @@ struct iwl_mvm;
/**
* DOC: AP mode - PS
*
- * When a station is asleep, the fw will set it as "asleep". All the
- * non-aggregation frames to that station will be dropped by the fw
- * (%TX_STATUS_FAIL_DEST_PS failure code).
+ * When a station is asleep, the fw will set it as "asleep". All frames on
+ * shared queues (i.e. non-aggregation queues) to that station will be dropped
+ * by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
+ *
* AMPDUs are in a separate queue that is stopped by the fw. We just need to
- * let mac80211 know how many frames we have in these queues so that it can
+ * let mac80211 know when there are frames in these queues so that it can
* properly handle trigger frames.
- * When the a trigger frame is received, mac80211 tells the driver to send
- * frames from the AMPDU queues or AC queue depending on which queue are
- * delivery-enabled and what TID has frames to transmit (Note that mac80211 has
- * all the knowledege since all the non-agg frames are buffered / filtered, and
- * the driver tells mac80211 about agg frames). The driver needs to tell the fw
- * to let frames out even if the station is asleep. This is done by
- * %iwl_mvm_sta_modify_sleep_tx_count.
- * When we receive a frame from that station with PM bit unset, the
- * driver needs to let the fw know that this station isn't alseep any more.
- * This is done by %iwl_mvm_sta_modify_ps_wake.
- *
- * TODO - EOSP handling
+ *
+ * When a trigger frame is received, mac80211 tells the driver to send frames
+ * from the AMPDU queues or sends frames to non-aggregation queues itself,
+ * depending on which ACs are delivery-enabled and what TID has frames to
+ * transmit. Note that mac80211 has all the knowledege since all the non-agg
+ * frames are buffered / filtered, and the driver tells mac80211 about agg
+ * frames). The driver needs to tell the fw to let frames out even if the
+ * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
+ *
+ * When we receive a frame from that station with PM bit unset, the driver
+ * needs to let the fw know that this station isn't asleep any more. This is
+ * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signalling the
+ * station's wakeup.
+ *
+ * For a GO, the Service Period might be cut short due to an absence period
+ * of the GO. In this (and all other cases) the firmware notifies us with the
+ * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
+ * already sent to the device will be rejected again.
+ *
+ * See also "AP support for powersaving clients" in mac80211.h.
*/
/**
@@ -261,6 +270,12 @@ struct iwl_mvm_tid_data {
u16 ssn;
};
+static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
+{
+ return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number),
+ tid_data->next_reclaimed);
+}
+
/**
* struct iwl_mvm_sta - representation of a station in the driver
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
@@ -269,7 +284,11 @@ struct iwl_mvm_tid_data {
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid.
* @max_agg_bufsize: the maximal size of the AGG buffer for this station
+ * @bt_reduced_txpower_dbg: debug mode in which %bt_reduced_txpower is forced
+ * by debugfs.
* @bt_reduced_txpower: is reduced tx power enabled for this station
+ * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
+ * we need to signal the EOSP
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock.
* @tid_data: per tid data. Look at %iwl_mvm_tid_data.
@@ -287,7 +306,9 @@ struct iwl_mvm_sta {
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
+ bool bt_reduced_txpower_dbg;
bool bt_reduced_txpower;
+ bool next_status_eosp;
spinlock_t lock;
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta;
@@ -345,6 +366,10 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u32 iv32,
u16 *phase1key);
+int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start);
@@ -359,7 +384,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
- u32 qmask);
+ u32 qmask, enum nl80211_iftype iftype);
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta);
int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -375,7 +400,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
- u16 cnt);
+ u16 cnt, u16 tids, bool more_data,
+ bool agg);
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index b4c2abaa297b..61331245ad93 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -126,6 +126,7 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
* in iwl_mvm_te_handle_notif).
*/
clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
/*
* Of course, our status bit is just as racy as mac80211, so in
@@ -210,6 +211,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
ieee80211_ready_on_channel(mvm->hw);
}
} else {
@@ -436,7 +438,8 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
time_cmd.duration = cpu_to_le32(duration);
time_cmd.repeat = 1;
time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
- TE_V2_NOTIF_HOST_EVENT_END);
+ TE_V2_NOTIF_HOST_EVENT_END |
+ T2_V2_START_IMMEDIATELY);
iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
@@ -551,7 +554,8 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
time_cmd.repeat = 1;
time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
- TE_V2_NOTIF_HOST_EVENT_END);
+ TE_V2_NOTIF_HOST_EVENT_END |
+ T2_V2_START_IMMEDIATELY);
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 3afa6b6bf835..7a99fa361954 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -403,7 +403,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
}
}
-static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
{
struct iwl_host_cmd cmd = {
.id = REPLY_THERMAL_MNG_BACKOFF,
@@ -412,6 +412,8 @@ static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
.flags = CMD_SYNC,
};
+ backoff = max(backoff, mvm->thermal_throttle.min_backoff);
+
if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
backoff);
@@ -534,7 +536,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
.support_tx_backoff = true,
};
-void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
@@ -546,6 +548,7 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
tt->params = &iwl7000_tt_params;
tt->throttle = false;
+ tt->min_backoff = min_backoff;
INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 76ee486039d7..879aeac46cc1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -79,6 +79,7 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
__le16 fc = hdr->frame_control;
u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
u32 len = skb->len + FCS_LEN;
+ u8 ac;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
tx_flags |= TX_CMD_FLG_ACK;
@@ -90,13 +91,6 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
- /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
- if (info->band == IEEE80211_BAND_2GHZ &&
- (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
- is_multicast_ether_addr(hdr->addr1) ||
- ieee80211_is_back_req(fc) || ieee80211_is_mgmt(fc)))
- tx_flags |= TX_CMD_FLG_BT_DIS;
-
if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG;
@@ -112,6 +106,11 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
}
+ /* tid_tspec will default to 0 = BE when QOS isn't enabled */
+ ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+ tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
+ TX_CMD_FLG_BT_PRIO_POS;
+
if (ieee80211_is_mgmt(fc)) {
if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
tx_cmd->pm_frame_timeout = cpu_to_le16(3);
@@ -122,15 +121,12 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
* it
*/
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
- } else if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+ } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
tx_cmd->pm_frame_timeout = cpu_to_le16(2);
} else {
tx_cmd->pm_frame_timeout = 0;
}
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
-
if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
!is_multicast_ether_addr(ieee80211_get_DA(hdr)))
tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
@@ -207,7 +203,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
mvm->mgmt_last_antenna_idx =
- iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw),
+ iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
mvm->mgmt_last_antenna_idx);
rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
@@ -377,6 +373,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* From now on, we cannot access info->control */
+ /*
+ * we handle that entirely ourselves -- for uAPSD the firmware
+ * will always send a notification, and for PS-Poll responses
+ * we'll notify mac80211 when getting frame status
+ */
+ info->flags &= ~IEEE80211_TX_STATUS_EOSP;
+
spin_lock(&mvmsta->lock);
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
@@ -437,6 +440,17 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock);
+ if ((tid_data->state == IWL_AGG_ON ||
+ tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+ iwl_mvm_tid_queued(tid_data) == 0) {
+ /*
+ * Now that this aggregation queue is empty tell mac80211 so it
+ * knows we no longer have frames buffered for the station on
+ * this TID (for the TIM bitmap calculation.)
+ */
+ ieee80211_sta_set_buffered(sta, tid, false);
+ }
+
if (tid_data->ssn != tid_data->next_reclaimed)
return;
@@ -680,6 +694,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_mvm_check_ratid_empty(mvm, sta, tid);
spin_unlock_bh(&mvmsta->lock);
}
+
+ if (mvmsta->next_status_eosp) {
+ mvmsta->next_status_eosp = false;
+ ieee80211_sta_eosp(sta);
+ }
} else {
mvmsta = NULL;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 86989df69356..d619851745a1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -289,8 +289,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
return last_idx;
}
-static struct {
- char *name;
+static const struct {
+ const char *name;
u8 num;
} advanced_lookup[] = {
{ "NMI_INTERRUPT_WDG", 0x34 },
@@ -376,9 +376,67 @@ struct iwl_error_event_table {
u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed;
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_umac_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 pc; /* program counter */
+ u32 blink1; /* branch link */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 line; /* source code line of error */
+ u32 umac_ver; /* umac version */
+} __packed;
+
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
+{
+ struct iwl_trans *trans = mvm->trans;
+ struct iwl_umac_error_event_table table;
+ u32 base;
+
+ base = mvm->umac_error_event_table;
+
+ if (base < 0x800000 || base >= 0x80C000) {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base,
+ (mvm->cur_ucode == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return;
+ }
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+ mvm->status, table.valid);
+ }
+
+ IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+ desc_lookup(table.error_id));
+ IWL_ERR(mvm, "0x%08X | umac uPc\n", table.pc);
+ IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
+ IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
+ IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
+ IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
+ IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
+ IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
+ IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_ver);
+}
+
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
@@ -394,7 +452,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
base = mvm->fw->inst_errlog_ptr;
}
- if (base < 0x800000 || base >= 0x80C000) {
+ if (base < 0x800000) {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
@@ -453,29 +511,31 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+
+ if (mvm->support_umac_log)
+ iwl_mvm_dump_umac_error_log(mvm);
}
-void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
+void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
{
const struct fw_img *img;
- int ofs, len = 0;
- u8 *buf;
+ u32 ofs, sram_len;
+ void *sram;
- if (!mvm->ucode_loaded)
+ if (!mvm->ucode_loaded || mvm->fw_error_sram)
return;
img = &mvm->fw->img[mvm->cur_ucode];
ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
- len = img->sec[IWL_UCODE_SECTION_DATA].len;
+ sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
- buf = kzalloc(len, GFP_ATOMIC);
- if (!buf)
+ sram = kzalloc(sram_len, GFP_ATOMIC);
+ if (!sram)
return;
- iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len);
- iwl_print_hex_error(mvm->trans, buf, len);
-
- kfree(buf);
+ iwl_trans_read_mem_bytes(mvm->trans, ofs, sram, sram_len);
+ mvm->fw_error_sram = sram;
+ mvm->fw_error_sram_len = sram_len;
}
/**
@@ -516,15 +576,20 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_smps_mode smps_request)
{
struct iwl_mvm_vif *mvmvif;
- enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ enum ieee80211_smps_mode smps_mode;
int i;
lockdep_assert_held(&mvm->mutex);
/* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
- if (num_of_ant(iwl_fw_valid_rx_ant(mvm->fw)) == 1)
+ if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
return;
+ if (vif->type == NL80211_IFTYPE_AP)
+ smps_mode = IEEE80211_SMPS_OFF;
+ else
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->smps_requests[req_type] = smps_request;
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
@@ -538,3 +603,44 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_request_smps(vif, smps_mode);
}
+
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool value)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int res;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvmvif->low_latency == value)
+ return 0;
+
+ mvmvif->low_latency = value;
+
+ res = iwl_mvm_update_quotas(mvm, NULL);
+ if (res)
+ return res;
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+
+ return iwl_mvm_power_update_mac(mvm, vif);
+}
+
+static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ bool *result = _data;
+
+ if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif)))
+ *result = true;
+}
+
+bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
+{
+ bool result = false;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_ll_iter, &result);
+
+ return result;
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3872ead75488..edb015c99049 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -66,6 +66,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
+#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-drv.h"
@@ -389,12 +390,92 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
+
+/* 8000 Series */
+ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+#ifdef CONFIG_ACPI
+#define SPL_METHOD "SPLC"
+#define SPL_DOMAINTYPE_MODULE BIT(0)
+#define SPL_DOMAINTYPE_WIFI BIT(1)
+#define SPL_DOMAINTYPE_WIGIG BIT(2)
+#define SPL_DOMAINTYPE_RFEM BIT(3)
+
+static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
+{
+ union acpi_object *limits, *domain_type, *power_limit;
+
+ if (splx->type != ACPI_TYPE_PACKAGE ||
+ splx->package.count != 2 ||
+ splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ splx->package.elements[0].integer.value != 0) {
+ IWL_ERR(trans, "Unsupported splx structure");
+ return 0;
+ }
+
+ limits = &splx->package.elements[1];
+ if (limits->type != ACPI_TYPE_PACKAGE ||
+ limits->package.count < 2 ||
+ limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ IWL_ERR(trans, "Invalid limits element");
+ return 0;
+ }
+
+ domain_type = &limits->package.elements[0];
+ power_limit = &limits->package.elements[1];
+ if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
+ IWL_DEBUG_INFO(trans, "WiFi power is not limited");
+ return 0;
+ }
+
+ return power_limit->integer.value;
+}
+
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
+{
+ acpi_handle pxsx_handle;
+ acpi_handle handle;
+ struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+
+ pxsx_handle = ACPI_HANDLE(&pdev->dev);
+ if (!pxsx_handle) {
+ IWL_DEBUG_INFO(trans,
+ "Could not retrieve root port ACPI handle");
+ return;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_INFO(trans, "SPL method not found");
+ return;
+ }
+
+ /* Call SPLC with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &splx);
+ if (ACPI_FAILURE(status)) {
+ IWL_ERR(trans, "SPLC invocation failed (0x%x)", status);
+ return;
+ }
+
+ trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
+ IWL_DEBUG_INFO(trans, "Default power limit set to %lld",
+ trans->dflt_pwr_limit);
+ kfree(splx.pointer);
+}
+
+#else /* CONFIG_ACPI */
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
+#endif
+
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
@@ -419,6 +500,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_trans;
}
+ set_dflt_pwr_limit(iwl_trans, pdev);
+
/* register transport layer debugfs here */
ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
if (ret)
@@ -477,7 +560,7 @@ static int iwl_pci_resume(struct device *device)
iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index e851f26fd44c..9091513ea738 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -304,7 +304,7 @@ struct iwl_trans_pcie {
bool bc_table_dword;
u32 rx_page_order;
- const char **command_names;
+ const char *const *command_names;
/* queue watchdog */
unsigned long wd_timeout;
@@ -488,4 +488,6 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
}
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
+
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 08c23d497a02..fdfa3969cac9 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -155,37 +155,26 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
if (rxq->need_update == 0)
goto exit_unlock;
- if (trans->cfg->base_params->shadow_reg_enable) {
- /* shadow register enabled */
- /* Device expects a multiple of 8 */
- rxq->write_actual = (rxq->write & ~0x7);
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
- } else {
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_TPOWER_PMI, &trans->status)) {
- reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(trans,
- "Rx queue requesting wakeup,"
- " GP1 = 0x%x\n", reg);
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
-
- rxq->write_actual = (rxq->write & ~0x7);
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
- rxq->write_actual);
-
- /* Else device is assumed to be awake */
- } else {
- /* Device expects a multiple of 8 */
- rxq->write_actual = (rxq->write & ~0x7);
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
- rxq->write_actual);
+ /*
+ * explicitly wake up the NIC if:
+ * 1. shadow registers aren't enabled
+ * 2. there is a chance that the NIC is asleep
+ */
+ if (!trans->cfg->base_params->shadow_reg_enable &&
+ test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
+ reg);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ goto exit_unlock;
}
}
+
+ rxq->write_actual = round_down(rxq->write, 8);
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
rxq->need_update = 0;
exit_unlock:
@@ -802,10 +791,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 inta;
- lockdep_assert_held(&trans_pcie->irq_lock);
+ lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
trace_iwlwifi_dev_irq(trans->dev);
@@ -1006,7 +994,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
isr_stats->rfkill++;
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
if (hw_rfkill) {
set_bit(STATUS_RFKILL, &trans->status);
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index f9507807b486..dcfd6d866d09 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -75,6 +75,20 @@
#include "iwl-agn-hw.h"
#include "internal.h"
+static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
+{
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+ ((reg & 0x0000ffff) | (2 << 28)));
+ return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
+}
+
+static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
+{
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+ ((reg & 0x0000ffff) | (3 << 28)));
+}
+
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -89,6 +103,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
+#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
static void iwl_pcie_apm_config(struct iwl_trans *trans)
{
@@ -132,8 +147,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
/*
* Disable L0s without affecting L1;
@@ -203,19 +219,23 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
/*
* Enable DMA clock and wait for it to stabilize.
*
- * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
- * do not disable clocks. This preserves any hardware bits already
- * set by default in "CLK_CTRL_REG" after reset.
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
+ * bits do not disable clocks. This preserves any hardware
+ * bits already set by default in "CLK_CTRL_REG" after reset.
*/
- iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(20);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ iwl_write_prph(trans, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
- /* Disable L1-Active */
- iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ /* Disable L1-Active */
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- /* Clear the interrupt in APMG if the NIC is in RFKILL */
- iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
+ APMG_RTC_INT_STT_RFKILL);
+ }
set_bit(STATUS_DEVICE_ENABLED, &trans->status);
@@ -223,6 +243,116 @@ out:
return ret;
}
+/*
+ * Enable LP XTAL to avoid HW bug where device may consume much power if
+ * FW is not loaded after device reset. LP XTAL is disabled by default
+ * after device HW reset. Do it only if XTAL is fed by internal source.
+ * Configure device's "persistence" mode to avoid resetting XTAL again when
+ * SHRD_HW_RST occurs in S3.
+ */
+static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
+{
+ int ret;
+ u32 apmg_gp1_reg;
+ u32 apmg_xtal_cfg_reg;
+ u32 dl_cfg_reg;
+
+ /* Force XTAL ON */
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+
+ /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is possible.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (WARN_ON(ret < 0)) {
+ IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
+ /* Release XTAL ON request */
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ return;
+ }
+
+ /*
+ * Clear "disable persistence" to avoid LP XTAL resetting when
+ * SHRD_HW_RST is applied in S3.
+ */
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_PERSIST_DIS);
+
+ /*
+ * Force APMG XTAL to be active to prevent its disabling by HW
+ * caused by APMG idle state.
+ */
+ apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
+ SHR_APMG_XTAL_CFG_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+ apmg_xtal_cfg_reg |
+ SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+
+ /*
+ * Reset entire device again - do controller reset (results in
+ * SHRD_HW_RST). Turn MAC off before proceeding.
+ */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /* Enable LP XTAL by indirect access through CSR */
+ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
+ SHR_APMG_GP1_WF_XTAL_LP_EN |
+ SHR_APMG_GP1_CHICKEN_BIT_SELECT);
+
+ /* Clear delay line clock power up */
+ dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
+ ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
+
+ /*
+ * Enable persistence mode to avoid LP XTAL resetting when
+ * SHRD_HW_RST is applied in S3.
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /* Activates XTAL resources monitor */
+ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
+ CSR_MONITOR_XTAL_RESOURCES);
+
+ /* Release XTAL ON request */
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ udelay(10);
+
+ /* Release APMG XTAL */
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+ apmg_xtal_cfg_reg &
+ ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+}
+
static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
{
int ret = 0;
@@ -250,6 +380,11 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
+ if (trans->cfg->lp_xtal_workaround) {
+ iwl_pcie_apm_lp_xtal_enable(trans);
+ return;
+ }
+
/* Reset the entire device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
@@ -273,7 +408,8 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock);
- iwl_pcie_set_pwr(trans, false);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_pcie_set_pwr(trans, false);
iwl_op_mode_nic_config(trans->op_mode);
@@ -435,78 +571,106 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
-static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu)
+static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
{
int shift_param;
- u32 address;
- int ret = 0;
+ int i, ret = 0;
+ u32 last_read_idx = 0;
if (cpu == 1) {
shift_param = 0;
- address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR;
+ *first_ucode_section = 0;
} else {
shift_param = 16;
- address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR;
+ (*first_ucode_section)++;
}
- /* set CPU to started */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_LOADING_STARTED << shift_param,
- 1);
-
- /* set last complete descriptor number */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
- << shift_param,
- 1);
-
- /* set last loaded block */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
- << shift_param,
- 1);
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ if (i == (*first_ucode_section) + 1)
+ /* set CPU to started */
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ LMPM_CPU_HDRS_LOADING_COMPLETED
+ << shift_param);
+
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
/* image loading complete */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_LOADING_COMPLETED
- << shift_param,
- 1);
-
- /* set FH_TCSR_0_REG */
- iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1);
-
- /* verify image verification started */
- ret = iwl_poll_bit(trans, address,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
- CSR_SECURE_TIME_OUT);
- if (ret < 0) {
- IWL_ERR(trans, "secure boot process didn't start\n");
- return ret;
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
+
+ *first_ucode_section = last_read_idx;
+
+ return 0;
+}
+
+static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
+{
+ int shift_param;
+ int i, ret = 0;
+ u32 last_read_idx = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ *first_ucode_section = 0;
+ } else {
+ shift_param = 16;
+ (*first_ucode_section)++;
}
- /* wait for image verification to complete */
- ret = iwl_poll_bit(trans, address,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
- CSR_SECURE_TIME_OUT);
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
- if (ret < 0) {
- IWL_ERR(trans, "Time out on secure boot process\n");
- return ret;
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
}
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ (LMPM_CPU_UCODE_LOADING_COMPLETED |
+ LMPM_CPU_HDRS_LOADING_COMPLETED |
+ LMPM_CPU_UCODE_LOADING_STARTED) <<
+ shift_param);
+
+ *first_ucode_section = last_read_idx;
+
return 0;
}
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
{
- int i, ret = 0;
+ int ret = 0;
+ int first_ucode_section;
IWL_DEBUG_FW(trans,
"working with %s image\n",
@@ -518,53 +682,68 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
/* configure the ucode to be ready to get the secured image */
if (image->is_secure) {
/* set secure boot inspector addresses */
- iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0);
- iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0);
-
- /* release CPU1 reset if secure inspector image burned in OTP */
- iwl_write32(trans, CSR_RESET, 0);
- }
-
- /* load to FW the binary sections of CPU1 */
- IWL_DEBUG_INFO(trans, "Loading CPU1\n");
- for (i = 0;
- i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
- i++) {
- if (!image->sec[i].data)
- break;
- ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ iwl_write_prph(trans,
+ LMPM_SECURE_INSPECTOR_CODE_ADDR,
+ LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
+
+ iwl_write_prph(trans,
+ LMPM_SECURE_INSPECTOR_DATA_ADDR,
+ LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
+
+ /* set CPU1 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
+ LMPM_SECURE_CPU1_HDR_MEM_SPACE);
+
+ /* load to FW the binary Secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
+ &first_ucode_section);
if (ret)
return ret;
- }
- /* configure the ucode to start secure process on CPU1 */
- if (image->is_secure) {
- /* config CPU1 to start secure protocol */
- ret = iwl_pcie_secure_set(trans, 1);
+ } else {
+ /* load to FW the binary Non secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections(trans, image, 1,
+ &first_ucode_section);
if (ret)
return ret;
- } else {
- /* Remove all resets to allow NIC to operate */
- iwl_write32(trans, CSR_RESET, 0);
}
if (image->is_dual_cpus) {
+ /* set CPU2 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
+ LMPM_SECURE_CPU2_HDR_MEM_SPACE);
+
/* load to FW the binary sections of CPU2 */
- IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n");
- for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
- i < IWL_UCODE_SECTION_MAX; i++) {
- if (!image->sec[i].data)
- break;
- ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
- if (ret)
- return ret;
- }
+ if (image->is_secure)
+ ret = iwl_pcie_load_cpu_secured_sections(
+ trans, image, 2,
+ &first_ucode_section);
+ else
+ ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+ }
+
+ /* release CPU reset */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+ else
+ iwl_write32(trans, CSR_RESET, 0);
- if (image->is_secure) {
- /* set CPU2 for secure protocol */
- ret = iwl_pcie_secure_set(trans, 2);
- if (ret)
- return ret;
+ if (image->is_secure) {
+ /* wait for image verification to complete */
+ ret = iwl_poll_prph_bit(trans,
+ LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_TIME_OUT);
+
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out on secure boot process\n");
+ return ret;
}
}
@@ -591,7 +770,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
set_bit(STATUS_RFKILL, &trans->status);
else
clear_bit(STATUS_RFKILL, &trans->status);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
if (hw_rfkill && !run_in_rfkill)
return -ERFKILL;
@@ -706,7 +885,13 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
else
clear_bit(STATUS_RFKILL, &trans->status);
if (hw_rfkill != was_hw_rfkill)
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+}
+
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
+{
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
+ iwl_trans_pcie_stop_device(trans);
}
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
@@ -815,7 +1000,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
set_bit(STATUS_RFKILL, &trans->status);
else
clear_bit(STATUS_RFKILL, &trans->status);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
return 0;
}
@@ -1158,6 +1343,7 @@ static const char *get_csr_string(int cmd)
IWL_CMD(CSR_GIO_CHICKEN_BITS);
IWL_CMD(CSR_ANA_PLL_CFG);
IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_MONITOR_STATUS_REG);
IWL_CMD(CSR_DBG_HPET_MEM_REG);
default:
return "UNKNOWN";
@@ -1190,6 +1376,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
CSR_DRAM_INT_TBL_REG,
CSR_GIO_CHICKEN_BITS,
CSR_ANA_PLL_CFG,
+ CSR_MONITOR_STATUS_REG,
CSR_HW_REV_WA_REG,
CSR_DBG_HPET_MEM_REG
};
@@ -1407,16 +1594,15 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
char *buf = NULL;
- int pos = 0;
- ssize_t ret = -EFAULT;
-
- ret = pos = iwl_dump_fh(trans, &buf);
- if (buf) {
- ret = simple_read_from_buffer(user_buf,
- count, ppos, buf, pos);
- kfree(buf);
- }
+ ssize_t ret;
+ ret = iwl_dump_fh(trans, &buf);
+ if (ret < 0)
+ return ret;
+ if (!buf)
+ return -EINVAL;
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ kfree(buf);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 3d549008b3e2..3b0c72c10054 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
le32_to_cpu(txq->scratchbufs[i].scratch));
- iwl_trans_fw_error(trans);
+ iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
}
/*
@@ -296,43 +296,38 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
if (txq->need_update == 0)
return;
- if (trans->cfg->base_params->shadow_reg_enable ||
- txq_id == trans_pcie->cmd_queue) {
- /* shadow register enabled */
- iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
- } else {
- /* if we're trying to save power */
- if (test_bit(STATUS_TPOWER_PMI, &trans->status)) {
- /* wake up nic if it's powered down ...
- * uCode will wake up, and interrupt us again, so next
- * time we'll skip this part. */
- reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(trans,
- "Tx queue %d requesting wakeup,"
- " GP1 = 0x%x\n", txq_id, reg);
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- return;
- }
-
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id,
- txq->q.write_ptr);
-
- iwl_write_direct32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
-
+ /*
+ * explicitly wake up the NIC if:
+ * 1. shadow registers aren't enabled
+ * 2. NIC is woken up for CMD regardless of shadow outside this function
+ * 3. there is a chance that the NIC is asleep
+ */
+ if (!trans->cfg->base_params->shadow_reg_enable &&
+ txq_id != trans_pcie->cmd_queue &&
+ test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
- * else not in power-save mode,
- * uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
+ * wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part.
*/
- } else
- iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
+ txq_id, reg);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ return;
+ }
}
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+
txq->need_update = 0;
}
@@ -705,8 +700,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
- iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
@@ -1028,7 +1024,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
idx, q->write_ptr, q->read_ptr);
- iwl_trans_fw_error(trans);
+ iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
}
}
@@ -1587,6 +1583,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
get_cmd_string(trans_pcie, cmd->id));
ret = -ETIMEDOUT;
+ iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
iwl_trans_fw_error(trans);
goto cancel;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index cb6d189bc3e6..54e344aed6e0 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1766,7 +1766,8 @@ static void lbs_join_post(struct lbs_private *priv,
memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
priv->wdev->ssid_len = params->ssid_len;
- cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(priv->dev, bssid, params->chandef.chan,
+ GFP_KERNEL);
/* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */
priv->connect_status = LBS_CONNECTED;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 58c6ee5de98f..33ceda296c9c 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -498,7 +498,7 @@ static int if_sdio_prog_helper(struct if_sdio_card *card,
*/
mdelay(2);
- chunk_size = min(size, (size_t)60);
+ chunk_size = min_t(size_t, size, 60);
*((__le32*)chunk_buffer) = cpu_to_le32(chunk_size);
memcpy(chunk_buffer + 4, firmware, chunk_size);
@@ -639,7 +639,7 @@ static int if_sdio_prog_real(struct if_sdio_card *card,
req_size = size;
while (req_size) {
- chunk_size = min(req_size, (size_t)512);
+ chunk_size = min_t(size_t, req_size, 512);
memcpy(chunk_buffer, firmware, chunk_size);
/*
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 69d4c3179d04..9d7a52f5a410 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -57,6 +57,10 @@ static bool rctbl = false;
module_param(rctbl, bool, 0444);
MODULE_PARM_DESC(rctbl, "Handle rate control table");
+static bool support_p2p_device = true;
+module_param(support_p2p_device, bool, 0444);
+MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -335,7 +339,8 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
#endif
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO) },
- { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
+ /* must be last, see hwsim_if_comb */
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }
};
static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
@@ -345,6 +350,27 @@ static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
static const struct ieee80211_iface_combination hwsim_if_comb[] = {
{
.limits = hwsim_if_limits,
+ /* remove the last entry which is P2P_DEVICE */
+ .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1,
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+ },
+ {
+ .limits = hwsim_if_dfs_limits,
+ .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+ }
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
+ {
+ .limits = hwsim_if_limits,
.n_limits = ARRAY_SIZE(hwsim_if_limits),
.max_interfaces = 2048,
.num_different_channels = 1,
@@ -385,6 +411,7 @@ struct mac80211_hwsim_data {
struct mac_address addresses[2];
int channels, idx;
+ bool use_chanctx;
struct ieee80211_channel *tmp_chan;
struct delayed_work roc_done;
@@ -451,7 +478,7 @@ static struct genl_family hwsim_genl_family = {
/* MAC80211_HWSIM netlink policy */
-static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
+static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
[HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
[HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
@@ -468,6 +495,7 @@ static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 },
[HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
[HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG },
};
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
@@ -1035,32 +1063,6 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
ack = true;
rx_status.mactime = now + data2->tsf_offset;
-#if 0
- /*
- * Don't enable this code by default as the OUI 00:00:00
- * is registered to Xerox so we shouldn't use it here, it
- * might find its way into pcap files.
- * Note that this code requires the headroom in the SKB
- * that was allocated earlier.
- */
- rx_status.vendor_radiotap_oui[0] = 0x00;
- rx_status.vendor_radiotap_oui[1] = 0x00;
- rx_status.vendor_radiotap_oui[2] = 0x00;
- rx_status.vendor_radiotap_subns = 127;
- /*
- * Radiotap vendor namespaces can (and should) also be
- * split into fields by using the standard radiotap
- * presence bitmap mechanism. Use just BIT(0) here for
- * the presence bitmap.
- */
- rx_status.vendor_radiotap_bitmap = BIT(0);
- /* We have 8 bytes of (dummy) data */
- rx_status.vendor_radiotap_len = 8;
- /* For testing, also require it to be aligned */
- rx_status.vendor_radiotap_align = 8;
- /* push the data */
- memcpy(skb_push(nskb, 8), "ABCDEFGH", 8);
-#endif
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -1087,7 +1089,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return;
}
- if (data->channels == 1) {
+ if (!data->use_chanctx) {
channel = data->channel;
} else if (txi->hw_queue == 4) {
channel = data->tmp_chan;
@@ -1275,6 +1277,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
mac80211_hwsim_tx_frame(hw, skb,
rcu_dereference(vif->chanctx_conf)->def.chan);
+
+ if (vif->csa_active && ieee80211_csa_is_complete(vif))
+ ieee80211_csa_finish(vif);
}
static enum hrtimer_restart
@@ -1350,7 +1355,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
data->channel = conf->chandef.chan;
- WARN_ON(data->channel && data->channels > 1);
+ WARN_ON(data->channel && data->use_chanctx);
data->power_level = conf->power_level;
if (!data->started || !data->beacon_int)
@@ -1936,7 +1941,8 @@ static struct ieee80211_ops mac80211_hwsim_mchan_ops;
static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
const struct ieee80211_regdomain *regd,
- bool reg_strict)
+ bool reg_strict, bool p2p_device,
+ bool use_chanctx)
{
int err;
u8 addr[ETH_ALEN];
@@ -1946,11 +1952,14 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
int idx;
+ if (WARN_ON(channels > 1 && !use_chanctx))
+ return -EINVAL;
+
spin_lock_bh(&hwsim_radio_lock);
idx = hwsim_radio_idx++;
spin_unlock_bh(&hwsim_radio_lock);
- if (channels > 1)
+ if (use_chanctx)
ops = &mac80211_hwsim_mchan_ops;
hw = ieee80211_alloc_hw(sizeof(*data), ops);
if (!hw) {
@@ -1991,17 +2000,25 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
hw->wiphy->addresses = data->addresses;
data->channels = channels;
+ data->use_chanctx = use_chanctx;
data->idx = idx;
- if (data->channels > 1) {
+ if (data->use_chanctx) {
hw->wiphy->max_scan_ssids = 255;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
hw->wiphy->max_remain_on_channel_duration = 1000;
/* For channels > 1 DFS is not allowed */
hw->wiphy->n_iface_combinations = 1;
hw->wiphy->iface_combinations = &data->if_combination;
- data->if_combination = hwsim_if_comb[0];
+ if (p2p_device)
+ data->if_combination = hwsim_if_comb_p2p_dev[0];
+ else
+ data->if_combination = hwsim_if_comb[0];
data->if_combination.num_different_channels = data->channels;
+ } else if (p2p_device) {
+ hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(hwsim_if_comb_p2p_dev);
} else {
hw->wiphy->iface_combinations = hwsim_if_comb;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb);
@@ -2017,8 +2034,10 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ if (p2p_device)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
hw->flags = IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_SIGNAL_DBM |
@@ -2027,13 +2046,15 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
+ IEEE80211_HW_CHANCTX_STA_CSA;
if (rctbl)
hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_AP_UAPSD;
+ WIPHY_FLAG_AP_UAPSD |
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
/* ask mac80211 to reserve space for magic */
@@ -2141,7 +2162,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
debugfs_create_file("group", 0666, data->debugfs, data,
&hwsim_fops_group);
- if (data->channels == 1)
+ if (!data->use_chanctx)
debugfs_create_file("dfs_simulate_radar", 0222,
data->debugfs,
data, &hwsim_simulate_radar);
@@ -2407,10 +2428,17 @@ static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
const char *alpha2 = NULL;
const struct ieee80211_regdomain *regd = NULL;
bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+ bool p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
+ bool use_chanctx;
if (info->attrs[HWSIM_ATTR_CHANNELS])
chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+ if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
+ use_chanctx = true;
+ else
+ use_chanctx = (chans > 1);
+
if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
@@ -2422,7 +2450,8 @@ static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
regd = hwsim_world_regdom_custom[idx];
}
- return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict);
+ return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict,
+ p2p_device, use_chanctx);
}
static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
@@ -2640,7 +2669,9 @@ static int __init init_mac80211_hwsim(void)
}
err = mac80211_hwsim_create_radio(channels, reg_alpha2,
- regd, reg_strict);
+ regd, reg_strict,
+ support_p2p_device,
+ channels > 1);
if (err < 0)
goto out_free_radios;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 2747cce5a269..c9d0315575ba 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -107,6 +107,10 @@ enum {
* (nla string, length 2)
* @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute)
* @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute)
+ * @HWSIM_ATTR_SUPPORT_P2P_DEVICE: support P2P Device virtual interface (flag)
+ * @HWSIM_ATTR_USE_CHANCTX: used with the %HWSIM_CMD_CREATE_RADIO
+ * command to force use of channel contexts even when only a
+ * single channel is supported
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -126,6 +130,8 @@ enum {
HWSIM_ATTR_REG_HINT_ALPHA2,
HWSIM_ATTR_REG_CUSTOM_REG,
HWSIM_ATTR_REG_STRICT_REG,
+ HWSIM_ATTR_SUPPORT_P2P_DEVICE,
+ HWSIM_ATTR_USE_CHANCTX,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index 5d9a8084665d..c92f27aa71ed 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -23,6 +23,31 @@
#include "main.h"
#include "11ac.h"
+/* Tables of the MCS map to the highest data rate (in Mbps) supported
+ * for long GI.
+ */
+static const u16 max_rate_lgi_80MHZ[8][3] = {
+ {0x124, 0x15F, 0x186}, /* NSS = 1 */
+ {0x249, 0x2BE, 0x30C}, /* NSS = 2 */
+ {0x36D, 0x41D, 0x492}, /* NSS = 3 */
+ {0x492, 0x57C, 0x618}, /* NSS = 4 */
+ {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */
+ {0x6DB, 0x83A, 0x0}, /* NSS = 6 */
+ {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */
+ {0x924, 0xAF8, 0xC30} /* NSS = 8 */
+};
+
+static const u16 max_rate_lgi_160MHZ[8][3] = {
+ {0x249, 0x2BE, 0x30C}, /* NSS = 1 */
+ {0x492, 0x57C, 0x618}, /* NSS = 2 */
+ {0x6DB, 0x83A, 0x0}, /* NSS = 3 */
+ {0x924, 0xAF8, 0xC30}, /* NSS = 4 */
+ {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */
+ {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
+ {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
+ {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
+};
+
/* This function converts the 2-bit MCS map to the highest long GI
* VHT data rate.
*/
@@ -30,33 +55,10 @@ static u16
mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
u8 bands, u16 mcs_map)
{
- u8 i, nss, max_mcs;
+ u8 i, nss, mcs;
u16 max_rate = 0;
u32 usr_vht_cap_info = 0;
struct mwifiex_adapter *adapter = priv->adapter;
- /* tables of the MCS map to the highest data rate (in Mbps)
- * supported for long GI
- */
- u16 max_rate_lgi_80MHZ[8][3] = {
- {0x124, 0x15F, 0x186}, /* NSS = 1 */
- {0x249, 0x2BE, 0x30C}, /* NSS = 2 */
- {0x36D, 0x41D, 0x492}, /* NSS = 3 */
- {0x492, 0x57C, 0x618}, /* NSS = 4 */
- {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */
- {0x6DB, 0x83A, 0x0}, /* NSS = 6 */
- {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */
- {0x924, 0xAF8, 0xC30} /* NSS = 8 */
- };
- u16 max_rate_lgi_160MHZ[8][3] = {
- {0x249, 0x2BE, 0x30C}, /* NSS = 1 */
- {0x492, 0x57C, 0x618}, /* NSS = 2 */
- {0x6DB, 0x83A, 0x0}, /* NSS = 3 */
- {0x924, 0xAF8, 0xC30}, /* NSS = 4 */
- {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */
- {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
- {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
- {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
- };
if (bands & BAND_AAC)
usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
@@ -64,29 +66,29 @@ mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
/* find the max NSS supported */
- nss = 0;
- for (i = 0; i < 8; i++) {
- max_mcs = (mcs_map >> (2 * i)) & 0x3;
- if (max_mcs < 3)
+ nss = 1;
+ for (i = 1; i <= 8; i++) {
+ mcs = GET_VHTNSSMCS(mcs_map, i);
+ if (mcs < IEEE80211_VHT_MCS_NOT_SUPPORTED)
nss = i;
}
- max_mcs = (mcs_map >> (2 * nss)) & 0x3;
+ mcs = GET_VHTNSSMCS(mcs_map, nss);
- /* if max_mcs is 3, nss must be 0 (SS = 1). Thus, max mcs is MCS 9 */
- if (max_mcs >= 3)
- max_mcs = 2;
+ /* if mcs is 3, nss must be 1 (NSS = 1). Default mcs to MCS 0~9 */
+ if (mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
if (GET_VHTCAP_CHWDSET(usr_vht_cap_info)) {
/* support 160 MHz */
- max_rate = max_rate_lgi_160MHZ[nss][max_mcs];
+ max_rate = max_rate_lgi_160MHZ[nss - 1][mcs];
if (!max_rate)
/* MCS9 is not supported in NSS6 */
- max_rate = max_rate_lgi_160MHZ[nss][max_mcs - 1];
+ max_rate = max_rate_lgi_160MHZ[nss - 1][mcs - 1];
} else {
- max_rate = max_rate_lgi_80MHZ[nss][max_mcs];
+ max_rate = max_rate_lgi_80MHZ[nss - 1][mcs];
if (!max_rate)
/* MCS9 is not supported in NSS3 */
- max_rate = max_rate_lgi_80MHZ[nss][max_mcs - 1];
+ max_rate = max_rate_lgi_80MHZ[nss - 1][mcs - 1];
}
return max_rate;
@@ -94,21 +96,20 @@ mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
static void
mwifiex_fill_vht_cap_info(struct mwifiex_private *priv,
- struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
+ struct ieee80211_vht_cap *vht_cap, u8 bands)
{
struct mwifiex_adapter *adapter = priv->adapter;
if (bands & BAND_A)
- vht_cap->vht_cap.vht_cap_info =
+ vht_cap->vht_cap_info =
cpu_to_le32(adapter->usr_dot_11ac_dev_cap_a);
else
- vht_cap->vht_cap.vht_cap_info =
+ vht_cap->vht_cap_info =
cpu_to_le32(adapter->usr_dot_11ac_dev_cap_bg);
}
-static void
-mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
- struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
+void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
+ struct ieee80211_vht_cap *vht_cap, u8 bands)
{
struct mwifiex_adapter *adapter = priv->adapter;
u16 mcs_map_user, mcs_map_resp, mcs_map_result;
@@ -119,46 +120,48 @@ mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
/* rx MCS Set: find the minimum of the user rx mcs and ap rx mcs */
mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
- mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.rx_mcs_map);
+ mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
mcs_map_result = 0;
for (nss = 1; nss <= 8; nss++) {
mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
- if ((mcs_user == NO_NSS_SUPPORT) ||
- (mcs_resp == NO_NSS_SUPPORT))
- SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT);
+ if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+ (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED);
else
SET_VHTNSSMCS(mcs_map_result, nss,
min(mcs_user, mcs_resp));
}
- vht_cap->vht_cap.supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result);
+ vht_cap->supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result);
tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
- vht_cap->vht_cap.supp_mcs.rx_highest = cpu_to_le16(tmp);
+ vht_cap->supp_mcs.rx_highest = cpu_to_le16(tmp);
/* tx MCS Set: find the minimum of the user tx mcs and ap tx mcs */
mcs_map_user = GET_DEVTXMCSMAP(adapter->usr_dot_11ac_mcs_support);
- mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.tx_mcs_map);
+ mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.tx_mcs_map);
mcs_map_result = 0;
for (nss = 1; nss <= 8; nss++) {
mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
- if ((mcs_user == NO_NSS_SUPPORT) ||
- (mcs_resp == NO_NSS_SUPPORT))
- SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT);
+ if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+ (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED);
else
SET_VHTNSSMCS(mcs_map_result, nss,
min(mcs_user, mcs_resp));
}
- vht_cap->vht_cap.supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result);
+ vht_cap->supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result);
tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
- vht_cap->vht_cap.supp_mcs.tx_highest = cpu_to_le16(tmp);
+ vht_cap->supp_mcs.tx_highest = cpu_to_le16(tmp);
return;
}
@@ -192,7 +195,8 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
(u8 *)bss_desc->bcn_vht_cap,
le16_to_cpu(vht_cap->header.len));
- mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
+ mwifiex_fill_vht_cap_tlv(priv, &vht_cap->vht_cap,
+ bss_desc->bss_band);
*buffer += sizeof(*vht_cap);
ret_len += sizeof(*vht_cap);
}
@@ -299,3 +303,81 @@ void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv)
return;
}
+
+bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv)
+{
+ struct mwifiex_bssdescriptor *bss_desc;
+ struct ieee80211_vht_operation *vht_oper;
+
+ bss_desc = &priv->curr_bss_params.bss_descriptor;
+ vht_oper = bss_desc->bcn_vht_oper;
+
+ if (!bss_desc->bcn_vht_cap || !vht_oper)
+ return false;
+
+ if (vht_oper->chan_width == IEEE80211_VHT_CHANWIDTH_USE_HT)
+ return false;
+
+ return true;
+}
+
+u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
+ u32 pri_chan, u8 chan_bw)
+{
+ u8 center_freq_idx = 0;
+
+ if (band & BAND_AAC) {
+ switch (pri_chan) {
+ case 36:
+ case 40:
+ case 44:
+ case 48:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 42;
+ break;
+ case 52:
+ case 56:
+ case 60:
+ case 64:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 58;
+ else if (chan_bw == IEEE80211_VHT_CHANWIDTH_160MHZ)
+ center_freq_idx = 50;
+ break;
+ case 100:
+ case 104:
+ case 108:
+ case 112:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 106;
+ break;
+ case 116:
+ case 120:
+ case 124:
+ case 128:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 122;
+ else if (chan_bw == IEEE80211_VHT_CHANWIDTH_160MHZ)
+ center_freq_idx = 114;
+ break;
+ case 132:
+ case 136:
+ case 140:
+ case 144:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 138;
+ break;
+ case 149:
+ case 153:
+ case 157:
+ case 161:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 155;
+ break;
+ default:
+ center_freq_idx = 42;
+ }
+ }
+
+ return center_freq_idx;
+}
diff --git a/drivers/net/wireless/mwifiex/11ac.h b/drivers/net/wireless/mwifiex/11ac.h
index 7c2c69b5b3eb..0b02cb6cfcb4 100644
--- a/drivers/net/wireless/mwifiex/11ac.h
+++ b/drivers/net/wireless/mwifiex/11ac.h
@@ -40,4 +40,6 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
int mwifiex_cmd_11ac_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd, u16 cmd_action,
struct mwifiex_11ac_vht_cfg *cfg);
+void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
+ struct ieee80211_vht_cap *vht_cap, u8 bands);
#endif /* _MWIFIEX_11AC_H_ */
diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c
index 8d683070bdb3..e76b0db4e3e6 100644
--- a/drivers/net/wireless/mwifiex/11h.c
+++ b/drivers/net/wireless/mwifiex/11h.c
@@ -73,8 +73,8 @@ static int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag)
{
u32 enable = flag;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, DOT11H_I, &enable);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, DOT11H_I, &enable, true);
}
/* This functions processes TLV buffer for a pending BSS Join command.
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 7db1a89fdd95..d14ead8beca8 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -34,22 +34,26 @@
*
* RD responder bit to set to clear in the extended capability header.
*/
-void
-mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
- struct mwifiex_ie_types_htcap *ht_cap)
+int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
+ struct ieee80211_ht_cap *ht_cap)
{
- uint16_t ht_ext_cap = le16_to_cpu(ht_cap->ht_cap.extended_ht_cap_info);
+ uint16_t ht_ext_cap = le16_to_cpu(ht_cap->extended_ht_cap_info);
struct ieee80211_supported_band *sband =
priv->wdev->wiphy->bands[radio_type];
- ht_cap->ht_cap.ampdu_params_info =
+ if (WARN_ON_ONCE(!sband)) {
+ dev_err(priv->adapter->dev, "Invalid radio type!\n");
+ return -EINVAL;
+ }
+
+ ht_cap->ampdu_params_info =
(sband->ht_cap.ampdu_factor &
IEEE80211_HT_AMPDU_PARM_FACTOR) |
((sband->ht_cap.ampdu_density <<
IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT) &
IEEE80211_HT_AMPDU_PARM_DENSITY);
- memcpy((u8 *) &ht_cap->ht_cap.mcs, &sband->ht_cap.mcs,
+ memcpy((u8 *)&ht_cap->mcs, &sband->ht_cap.mcs,
sizeof(sband->ht_cap.mcs));
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
@@ -57,13 +61,18 @@ mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
(priv->adapter->sec_chan_offset !=
IEEE80211_HT_PARAM_CHA_SEC_NONE)))
/* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
- SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
+ SETHT_MCS32(ht_cap->mcs.rx_mask);
/* Clear RD responder bit */
ht_ext_cap &= ~IEEE80211_HT_EXT_CAP_RD_RESPONDER;
- ht_cap->ht_cap.cap_info = cpu_to_le16(sband->ht_cap.cap);
- ht_cap->ht_cap.extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
+ ht_cap->cap_info = cpu_to_le16(sband->ht_cap.cap);
+ ht_cap->extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
+
+ if (ISSUPP_BEAMFORMING(priv->adapter->hw_dot_11n_dev_cap))
+ ht_cap->tx_BF_cap_info = cpu_to_le32(MWIFIEX_DEF_11N_TX_BF_CAP);
+
+ return 0;
}
/*
@@ -150,28 +159,34 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
int tid;
struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
+ u16 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
& SSN_MASK);
- tid = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
- & IEEE80211_ADDBA_PARAM_TID_MASK)
- >> BLOCKACKPARAM_TID_POS;
- if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
- tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid,
- add_ba_rsp->peer_mac_addr);
- if (tx_ba_tbl) {
- dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
- tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
- } else {
- dev_err(priv->adapter->dev, "BA stream not created\n");
- }
- } else {
+ tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
+ >> BLOCKACKPARAM_TID_POS;
+ if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr,
TYPE_DELBA_SENT, true);
if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
priv->aggr_prio_tbl[tid].ampdu_ap =
BA_STREAM_NOT_ALLOWED;
+ return 0;
+ }
+
+ tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr);
+ if (tx_ba_tbl) {
+ dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
+ tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
+ if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
+ priv->add_ba_param.tx_amsdu &&
+ (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
+ tx_ba_tbl->amsdu = true;
+ else
+ tx_ba_tbl->amsdu = false;
+ } else {
+ dev_err(priv->adapter->dev, "BA stream not created\n");
}
return 0;
@@ -311,7 +326,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
(u8 *)bss_desc->bcn_ht_cap,
le16_to_cpu(ht_cap->header.len));
- mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
*buffer += sizeof(struct mwifiex_ie_types_htcap);
ret_len += sizeof(struct mwifiex_ie_types_htcap);
@@ -527,16 +542,39 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
{
struct host_cmd_ds_11n_addba_req add_ba_req;
+ struct mwifiex_sta_node *sta_ptr;
+ u32 tx_win_size = priv->add_ba_param.tx_win_size;
static u8 dialog_tok;
int ret;
+ u16 block_ack_param_set;
dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
- add_ba_req.block_ack_param_set = cpu_to_le16(
- (u16) ((tid << BLOCKACKPARAM_TID_POS) |
- (priv->add_ba_param.
- tx_win_size << BLOCKACKPARAM_WINSIZE_POS) |
- IMMEDIATE_BLOCK_ACK));
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->adapter->is_hw_11ac_capable &&
+ memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) {
+ sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
+ if (!sta_ptr) {
+ dev_warn(priv->adapter->dev,
+ "BA setup with unknown TDLS peer %pM!\n",
+ peer_mac);
+ return -1;
+ }
+ if (sta_ptr->is_11ac_enabled)
+ tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE;
+ }
+
+ block_ack_param_set = (u16)((tid << BLOCKACKPARAM_TID_POS) |
+ tx_win_size << BLOCKACKPARAM_WINSIZE_POS |
+ IMMEDIATE_BLOCK_ACK);
+
+ /* enable AMSDU inside AMPDU */
+ if (priv->add_ba_param.tx_amsdu &&
+ (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
+ block_ack_param_set |= BLOCKACKPARAM_AMSDU_SUPP_MASK;
+
+ add_ba_req.block_ack_param_set = cpu_to_le16(block_ack_param_set);
add_ba_req.block_ack_tmo = cpu_to_le16((u16)priv->add_ba_param.timeout);
++dialog_tok;
@@ -548,8 +586,8 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
memcpy(&add_ba_req.peer_mac_addr, peer_mac, ETH_ALEN);
/* We don't wait for the response of this command */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_REQ,
- 0, 0, &add_ba_req);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_REQ,
+ 0, 0, &add_ba_req, false);
return ret;
}
@@ -576,8 +614,8 @@ int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac,
memcpy(&delba.peer_mac_addr, peer_mac, ETH_ALEN);
/* We don't wait for the response of this command */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA,
- HostCmd_ACT_GEN_SET, 0, &delba);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_DELBA,
+ HostCmd_ACT_GEN_SET, 0, &delba, false);
return ret;
}
@@ -651,6 +689,7 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
dev_dbg(priv->adapter->dev, "data: %s tid=%d\n",
__func__, rx_reo_tbl->tid);
memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN);
+ rx_reo_tbl->amsdu = tx_ba_tsr_tbl->amsdu;
rx_reo_tbl++;
count++;
if (count >= MWIFIEX_MAX_TX_BASTREAM_SUPPORTED)
@@ -706,5 +745,8 @@ void mwifiex_set_ba_params(struct mwifiex_private *priv)
MWIFIEX_STA_AMPDU_DEF_RXWINSIZE;
}
+ priv->add_ba_param.tx_amsdu = true;
+ priv->add_ba_param.rx_amsdu = true;
+
return;
}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 375db01442bf..40b007a00f4b 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -34,8 +34,8 @@ int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv,
int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc,
u8 **buffer);
-void mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
- struct mwifiex_ie_types_htcap *);
+int mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
+ struct ieee80211_ht_cap *);
int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv,
u16 action, int *htcap_cfg);
void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
@@ -64,14 +64,46 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
-/*
- * This function checks whether AMPDU is allowed or not for a particular TID.
- */
static inline u8
-mwifiex_is_ampdu_allowed(struct mwifiex_private *priv, int tid)
+mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
+ struct mwifiex_ra_list_tbl *ptr, int tid)
{
- return ((priv->aggr_prio_tbl[tid].ampdu_ap != BA_STREAM_NOT_ALLOWED)
- ? true : false);
+ struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ptr->ra);
+
+ if (unlikely(!node))
+ return false;
+
+ return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false;
+}
+
+/* This function checks whether AMSDU is allowed for BA stream. */
+static inline u8
+mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv,
+ struct mwifiex_ra_list_tbl *ptr, int tid)
+{
+ struct mwifiex_tx_ba_stream_tbl *tx_tbl;
+
+ tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
+ if (tx_tbl)
+ return tx_tbl->amsdu;
+
+ return false;
+}
+
+/* This function checks whether AMPDU is allowed or not for a particular TID. */
+static inline u8
+mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
+ struct mwifiex_ra_list_tbl *ptr, int tid)
+{
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
+ } else {
+ if (ptr->tdls_link)
+ return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
+
+ return (priv->aggr_prio_tbl[tid].ampdu_ap !=
+ BA_STREAM_NOT_ALLOWED) ? true : false;
+ }
}
/*
@@ -165,4 +197,14 @@ static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
return node->is_11n_enabled;
}
+
+static inline u8
+mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, u8 *ra)
+{
+ struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra);
+ if (node)
+ return node->is_11n_enabled;
+
+ return false;
+}
#endif /* !_MWIFIEX_11N_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index ada809f576fe..0c3571f830b0 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -26,6 +26,56 @@
#include "11n.h"
#include "11n_rxreorder.h"
+/* This function will dispatch amsdu packet and forward it to kernel/upper
+ * layer.
+ */
+static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct rxpd *local_rx_pd = (struct rxpd *)(skb->data);
+ int ret;
+
+ if (le16_to_cpu(local_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
+ struct sk_buff_head list;
+ struct sk_buff *rx_skb;
+
+ __skb_queue_head_init(&list);
+
+ skb_pull(skb, le16_to_cpu(local_rx_pd->rx_pkt_offset));
+ skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
+
+ ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
+ priv->wdev->iftype, 0, false);
+
+ while (!skb_queue_empty(&list)) {
+ rx_skb = __skb_dequeue(&list);
+ ret = mwifiex_recv_packet(priv, rx_skb);
+ if (ret == -1)
+ dev_err(priv->adapter->dev,
+ "Rx of A-MSDU failed");
+ }
+ return 0;
+ }
+
+ return -1;
+}
+
+/* This function will process the rx packet and forward it to kernel/upper
+ * layer.
+ */
+static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
+{
+ int ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
+
+ if (!ret)
+ return 0;
+
+ if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+ return mwifiex_handle_uap_rx_forward(priv, payload);
+
+ return mwifiex_process_rx_packet(priv, payload);
+}
+
/*
* This function dispatches all packets in the Rx reorder table until the
* start window.
@@ -35,8 +85,9 @@
* circular buffer.
*/
static void
-mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
- struct mwifiex_rx_reorder_tbl *tbl, int start_win)
+mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
+ struct mwifiex_rx_reorder_tbl *tbl,
+ int start_win)
{
int pkt_to_send, i;
void *rx_tmp_ptr;
@@ -54,12 +105,8 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
tbl->rx_reorder_ptr[i] = NULL;
}
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
- if (rx_tmp_ptr) {
- if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
- mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
- else
- mwifiex_process_rx_packet(priv, rx_tmp_ptr);
- }
+ if (rx_tmp_ptr)
+ mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -101,11 +148,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-
- if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
- mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
- else
- mwifiex_process_rx_packet(priv, rx_tmp_ptr);
+ mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -135,14 +178,15 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *tbl)
{
unsigned long flags;
+ int start_win;
if (!tbl)
return;
- mwifiex_11n_dispatch_pkt(priv, tbl, (tbl->start_win + tbl->win_size) &
- (MAX_TID_VALUE - 1));
+ start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1);
+ mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
- del_timer(&tbl->timer_context.timer);
+ del_timer_sync(&tbl->timer_context.timer);
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_del(&tbl->list);
@@ -228,17 +272,17 @@ mwifiex_flush_data(unsigned long context)
{
struct reorder_tmr_cnxt *ctx =
(struct reorder_tmr_cnxt *) context;
- int start_win;
+ int start_win, seq_num;
- start_win = mwifiex_11n_find_last_seq_num(ctx->ptr);
+ seq_num = mwifiex_11n_find_last_seq_num(ctx->ptr);
- if (start_win < 0)
+ if (seq_num < 0)
return;
- dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", start_win);
- mwifiex_11n_dispatch_pkt(ctx->priv, ctx->ptr,
- (ctx->ptr->start_win + start_win + 1) &
- (MAX_TID_VALUE - 1));
+ dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", seq_num);
+ start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
+ mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
+ start_win);
}
/*
@@ -267,7 +311,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
*/
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (tbl) {
- mwifiex_11n_dispatch_pkt(priv, tbl, seq_num);
+ mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
return;
}
/* if !tbl then create one */
@@ -279,6 +323,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
new_node->tid = tid;
memcpy(new_node->ta, ta, ETH_ALEN);
new_node->start_win = seq_num;
+ new_node->init_win = seq_num;
+ new_node->flags = 0;
if (mwifiex_queuing_ra_based(priv)) {
dev_dbg(priv->adapter->dev,
@@ -290,15 +336,20 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
last_seq = node->rx_seq[tid];
}
} else {
- last_seq = priv->rx_seq[tid];
+ node = mwifiex_get_sta_entry(priv, ta);
+ if (node)
+ last_seq = node->rx_seq[tid];
+ else
+ last_seq = priv->rx_seq[tid];
}
if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
- last_seq >= new_node->start_win)
+ last_seq >= new_node->start_win) {
new_node->start_win = last_seq + 1;
+ new_node->flags |= RXREOR_INIT_WINDOW_SHIFT;
+ }
new_node->win_size = win_size;
- new_node->flags = 0;
new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
GFP_KERNEL);
@@ -358,10 +409,28 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
*cmd_addba_req)
{
struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
+ struct mwifiex_sta_node *sta_ptr;
+ u32 rx_win_size = priv->add_ba_param.rx_win_size;
u8 tid;
int win_size;
uint16_t block_ack_param_set;
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->adapter->is_hw_11ac_capable &&
+ memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
+ sta_ptr = mwifiex_get_sta_entry(priv,
+ cmd_addba_req->peer_mac_addr);
+ if (!sta_ptr) {
+ dev_warn(priv->adapter->dev,
+ "BA setup with unknown TDLS peer %pM!\n",
+ cmd_addba_req->peer_mac_addr);
+ return -1;
+ }
+ if (sta_ptr->is_11ac_enabled)
+ rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
+ }
+
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN);
@@ -376,10 +445,12 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
>> BLOCKACKPARAM_TID_POS;
add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT);
block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
- /* We donot support AMSDU inside AMPDU, hence reset the bit */
- block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
- block_ack_param_set |= (priv->add_ba_param.rx_win_size <<
- BLOCKACKPARAM_WINSIZE_POS);
+
+ /* If we don't support AMSDU inside AMPDU, reset the bit */
+ if (!priv->add_ba_param.rx_amsdu ||
+ (priv->aggr_prio_tbl[tid].amsdu == BA_STREAM_NOT_ALLOWED))
+ block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
+ block_ack_param_set |= rx_win_size << BLOCKACKPARAM_WINSIZE_POS;
add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set);
win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
& IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
@@ -431,33 +502,46 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *tbl;
int start_win, end_win, win_size;
u16 pkt_index;
+ bool init_window_shift = false;
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
- if (pkt_type != PKT_TYPE_BAR) {
- if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
- mwifiex_handle_uap_rx_forward(priv, payload);
- else
- mwifiex_process_rx_packet(priv, payload);
- }
+ if (pkt_type != PKT_TYPE_BAR)
+ mwifiex_11n_dispatch_pkt(priv, payload);
return 0;
}
+
+ if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
+ mwifiex_11n_dispatch_pkt(priv, payload);
+ return 0;
+ }
+
start_win = tbl->start_win;
win_size = tbl->win_size;
end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
- del_timer(&tbl->timer_context.timer);
+ if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) {
+ init_window_shift = true;
+ tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT;
+ }
mod_timer(&tbl->timer_context.timer,
jiffies + msecs_to_jiffies(MIN_FLUSH_TIMER_MS * win_size));
- /*
- * If seq_num is less then starting win then ignore and drop the
- * packet
- */
if (tbl->flags & RXREOR_FORCE_NO_DROP) {
dev_dbg(priv->adapter->dev,
"RXREOR_FORCE_NO_DROP when HS is activated\n");
tbl->flags &= ~RXREOR_FORCE_NO_DROP;
+ } else if (init_window_shift && seq_num < start_win &&
+ seq_num >= tbl->init_win) {
+ dev_dbg(priv->adapter->dev,
+ "Sender TID sequence number reset %d->%d for SSN %d\n",
+ start_win, seq_num, tbl->init_win);
+ tbl->start_win = start_win = seq_num;
+ end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
} else {
+ /*
+ * If seq_num is less then starting win then ignore and drop
+ * the packet
+ */
if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
if (seq_num >= ((start_win + TWOPOW11) &
(MAX_TID_VALUE - 1)) &&
@@ -485,7 +569,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
start_win = (end_win - win_size) + 1;
else
start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
- mwifiex_11n_dispatch_pkt(priv, tbl, start_win);
+ mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
}
if (pkt_type != PKT_TYPE_BAR) {
@@ -576,16 +660,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
* Check if we had rejected the ADDBA, if yes then do not create
* the stream
*/
- if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
- win_size = (block_ack_param_set &
- IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
- >> BLOCKACKPARAM_WINSIZE_POS;
-
- dev_dbg(priv->adapter->dev,
- "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
- add_ba_rsp->peer_mac_addr, tid,
- add_ba_rsp->ssn, win_size);
- } else {
+ if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
add_ba_rsp->peer_mac_addr, tid);
@@ -593,8 +668,28 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
add_ba_rsp->peer_mac_addr);
if (tbl)
mwifiex_del_rx_reorder_entry(priv, tbl);
+
+ return 0;
}
+ win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
+ >> BLOCKACKPARAM_WINSIZE_POS;
+
+ tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
+ add_ba_rsp->peer_mac_addr);
+ if (tbl) {
+ if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
+ priv->add_ba_param.rx_amsdu &&
+ (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
+ tbl->amsdu = true;
+ else
+ tbl->amsdu = false;
+ }
+
+ dev_dbg(priv->adapter->dev,
+ "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
+ add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size);
+
return 0;
}
@@ -615,7 +710,7 @@ void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
delba.del_ba_param_set |= cpu_to_le16(
(u16) event->origninator << DELBA_INITIATOR_POS);
delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
- mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba);
+ mwifiex_send_cmd(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba, false);
}
/*
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index 4064041ac852..0fc76e4a60f8 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -42,7 +42,8 @@
#define BA_SETUP_PACKET_OFFSET 16
enum mwifiex_rxreor_flags {
- RXREOR_FORCE_NO_DROP = 1<<0,
+ RXREOR_FORCE_NO_DROP = 1<<0,
+ RXREOR_INIT_WINDOW_SHIFT = 1<<1,
};
static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index a42a506fd32b..2aa208ffbe23 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -41,6 +41,7 @@ mwifiex-y += uap_txrx.o
mwifiex-y += cfg80211.o
mwifiex-y += ethtool.o
mwifiex-y += 11h.o
+mwifiex-y += tdls.o
mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_MWIFIEX) += mwifiex.o
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index 3d64613ebb29..b9242c3dca43 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -131,7 +131,7 @@ info
hs_configured = <0/1, host sleep not configured/configured>
hs_activated = <0/1, extended host sleep not activated/activated>
num_tx_timeout = <number of Tx timeout>
- num_cmd_timeout = <number of timeout commands>
+ is_cmd_timedout = <0/1 command timeout not occurred/occurred>
timeout_cmd_id = <command id of the last timeout command>
timeout_cmd_act = <command action of the last timeout command>
last_cmd_id = <command id of the last several commands sent to device>
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 8bfc07cd330e..21ee27ab7b74 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -252,9 +252,9 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
if (mask != priv->mgmt_frame_mask) {
priv->mgmt_frame_mask = mask;
- mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
- HostCmd_ACT_GEN_SET, 0,
- &priv->mgmt_frame_mask);
+ mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->mgmt_frame_mask, false);
wiphy_dbg(wiphy, "info: mgmt frame registered\n");
}
}
@@ -515,8 +515,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
- HostCmd_ACT_GEN_SET, 0, NULL)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
+ HostCmd_ACT_GEN_SET, 0, NULL, false)) {
wiphy_err(wiphy, "11D: setting domain info in FW\n");
return -1;
}
@@ -580,9 +580,9 @@ mwifiex_set_frag(struct mwifiex_private *priv, u32 frag_thr)
frag_thr > MWIFIEX_FRAG_MAX_VALUE)
frag_thr = MWIFIEX_FRAG_MAX_VALUE;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, FRAG_THRESH_I,
- &frag_thr);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, FRAG_THRESH_I,
+ &frag_thr, true);
}
/*
@@ -597,9 +597,9 @@ mwifiex_set_rts(struct mwifiex_private *priv, u32 rts_thr)
if (rts_thr < MWIFIEX_RTS_MIN_VALUE || rts_thr > MWIFIEX_RTS_MAX_VALUE)
rts_thr = MWIFIEX_RTS_MAX_VALUE;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, RTS_THRESH_I,
- &rts_thr);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, RTS_THRESH_I,
+ &rts_thr, true);
}
/*
@@ -637,20 +637,19 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
bss_started = priv->bss_started;
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_UAP_BSS_STOP,
- HostCmd_ACT_GEN_SET, 0,
- NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0,
+ NULL, true);
if (ret) {
wiphy_err(wiphy, "Failed to stop the BSS\n");
kfree(bss_cfg);
return ret;
}
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_UAP_SYS_CONFIG,
- HostCmd_ACT_GEN_SET,
- UAP_BSS_PARAMS_I, bss_cfg);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_BSS_PARAMS_I, bss_cfg,
+ false);
kfree(bss_cfg);
@@ -662,10 +661,9 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
if (!bss_started)
break;
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_UAP_BSS_START,
- HostCmd_ACT_GEN_SET, 0,
- NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
+ HostCmd_ACT_GEN_SET, 0,
+ NULL, false);
if (ret) {
wiphy_err(wiphy, "Failed to start BSS\n");
return ret;
@@ -700,8 +698,8 @@ mwifiex_cfg80211_deinit_p2p(struct mwifiex_private *priv)
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA)
mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_STA);
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
- HostCmd_ACT_GEN_SET, 0, &mode))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode, true))
return -1;
return 0;
@@ -721,13 +719,13 @@ mwifiex_cfg80211_init_p2p_client(struct mwifiex_private *priv)
return -1;
mode = P2P_MODE_DEVICE;
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
- HostCmd_ACT_GEN_SET, 0, &mode))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode, true))
return -1;
mode = P2P_MODE_CLIENT;
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
- HostCmd_ACT_GEN_SET, 0, &mode))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode, true))
return -1;
return 0;
@@ -747,13 +745,13 @@ mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
return -1;
mode = P2P_MODE_DEVICE;
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
- HostCmd_ACT_GEN_SET, 0, &mode))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode, true))
return -1;
mode = P2P_MODE_GO;
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
- HostCmd_ACT_GEN_SET, 0, &mode))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode, true))
return -1;
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
@@ -853,8 +851,8 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
return ret;
}
@@ -942,8 +940,8 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
/* Get signal information from the firmware */
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO,
- HostCmd_ACT_GEN_GET, 0, NULL)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
+ HostCmd_ACT_GEN_GET, 0, NULL, true)) {
dev_err(priv->adapter->dev, "failed to get signal information\n");
return -EFAULT;
}
@@ -954,9 +952,9 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
}
/* Get DTIM period information from firmware */
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_GET, DTIM_PERIOD_I,
- &priv->dtim_period);
+ mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, DTIM_PERIOD_I,
+ &priv->dtim_period, true);
mwifiex_parse_htinfo(priv, priv->tx_htinfo, &sinfo->txrate);
@@ -1160,9 +1158,10 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
enum ieee80211_band band;
+ struct mwifiex_adapter *adapter = priv->adapter;
if (!priv->media_connected) {
- dev_err(priv->adapter->dev,
+ dev_err(adapter->dev,
"Can not set Tx data rate in disconnected state\n");
return -EINVAL;
}
@@ -1183,11 +1182,18 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
/* Fill HT MCS rates */
bitmap_rates[2] = mask->control[band].ht_mcs[0];
- if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2)
+ if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
bitmap_rates[2] |= mask->control[band].ht_mcs[1] << 8;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
- HostCmd_ACT_GEN_SET, 0, bitmap_rates);
+ /* Fill VHT MCS rates */
+ if (adapter->fw_api_ver == MWIFIEX_FW_V15) {
+ bitmap_rates[10] = mask->control[band].vht_mcs[0];
+ if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
+ bitmap_rates[11] = mask->control[band].vht_mcs[1];
+ }
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TX_RATE_CFG,
+ HostCmd_ACT_GEN_SET, 0, bitmap_rates, true);
}
/*
@@ -1216,14 +1222,14 @@ static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
subsc_evt.bcn_h_rssi_cfg.abs_value = abs(rssi_thold);
subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
- return mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
- 0, 0, &subsc_evt);
+ return mwifiex_send_cmd(priv,
+ HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
+ 0, 0, &subsc_evt, true);
} else {
subsc_evt.action = HostCmd_ACT_BITWISE_CLR;
- return mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
- 0, 0, &subsc_evt);
+ return mwifiex_send_cmd(priv,
+ HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
+ 0, 0, &subsc_evt, true);
}
return 0;
@@ -1276,10 +1282,9 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
if (!mac || is_broadcast_ether_addr(mac)) {
wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__);
list_for_each_entry(sta_node, &priv->sta_list, list) {
- if (mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_UAP_STA_DEAUTH,
- HostCmd_ACT_GEN_SET, 0,
- sta_node->mac_addr))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
+ HostCmd_ACT_GEN_SET, 0,
+ sta_node->mac_addr, true))
return -1;
mwifiex_uap_del_sta_data(priv, sta_node);
}
@@ -1289,10 +1294,9 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
sta_node = mwifiex_get_sta_entry(priv, mac);
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
if (sta_node) {
- if (mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_UAP_STA_DEAUTH,
- HostCmd_ACT_GEN_SET, 0,
- sta_node->mac_addr))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
+ HostCmd_ACT_GEN_SET, 0,
+ sta_node->mac_addr, true))
return -1;
mwifiex_uap_del_sta_data(priv, sta_node);
}
@@ -1328,13 +1332,40 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
tx_ant = RF_ANTENNA_AUTO;
rx_ant = RF_ANTENNA_AUTO;
}
+ } else {
+ struct ieee80211_sta_ht_cap *ht_info;
+ int rx_mcs_supp;
+ enum ieee80211_band band;
+
+ if ((tx_ant == 0x1 && rx_ant == 0x1)) {
+ adapter->user_dev_mcs_support = HT_STREAM_1X1;
+ if (adapter->is_hw_11ac_capable)
+ adapter->usr_dot_11ac_mcs_support =
+ MWIFIEX_11AC_MCS_MAP_1X1;
+ } else {
+ adapter->user_dev_mcs_support = HT_STREAM_2X2;
+ if (adapter->is_hw_11ac_capable)
+ adapter->usr_dot_11ac_mcs_support =
+ MWIFIEX_11AC_MCS_MAP_2X2;
+ }
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!adapter->wiphy->bands[band])
+ continue;
+
+ ht_info = &adapter->wiphy->bands[band]->ht_cap;
+ rx_mcs_supp =
+ GET_RXMCSSUPP(adapter->user_dev_mcs_support);
+ memset(&ht_info->mcs, 0, adapter->number_of_antenna);
+ memset(&ht_info->mcs, 0xff, rx_mcs_supp);
+ }
}
ant_cfg.tx_ant = tx_ant;
ant_cfg.rx_ant = rx_ant;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_ANTENNA,
- HostCmd_ACT_GEN_SET, 0, &ant_cfg);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_RF_ANTENNA,
+ HostCmd_ACT_GEN_SET, 0, &ant_cfg, true);
}
/* cfg80211 operation handler for stop ap.
@@ -1349,8 +1380,8 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
priv->ap_11n_enabled = 0;
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
- HostCmd_ACT_GEN_SET, 0, NULL)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL, true)) {
wiphy_err(wiphy, "Failed to stop the BSS\n");
return -1;
}
@@ -1416,9 +1447,6 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
config_bands |= BAND_GN;
-
- if (params->chandef.width > NL80211_CHAN_WIDTH_40)
- config_bands |= BAND_GAC;
} else {
bss_cfg->band_cfg = BAND_CONFIG_A;
config_bands = BAND_A;
@@ -1464,16 +1492,16 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout;
}
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
- HostCmd_ACT_GEN_SET, 0, NULL)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL, true)) {
wiphy_err(wiphy, "Failed to stop the BSS\n");
kfree(bss_cfg);
return -1;
}
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
- HostCmd_ACT_GEN_SET,
- UAP_BSS_PARAMS_I, bss_cfg)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_BSS_PARAMS_I, bss_cfg, false)) {
wiphy_err(wiphy, "Failed to set the SSID\n");
kfree(bss_cfg);
return -1;
@@ -1481,8 +1509,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
kfree(bss_cfg);
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_BSS_START,
- HostCmd_ACT_GEN_SET, 0, NULL)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
+ HostCmd_ACT_GEN_SET, 0, NULL, false)) {
wiphy_err(wiphy, "Failed to start the BSS\n");
return -1;
}
@@ -1492,9 +1520,9 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
else
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &priv->curr_pkt_filter))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter, true))
return -1;
return 0;
@@ -1583,8 +1611,9 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
* the function notifies the CFG802.11 subsystem of the new BSS connection.
*/
static int
-mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
- u8 *bssid, int mode, struct ieee80211_channel *channel,
+mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
+ const u8 *ssid, const u8 *bssid, int mode,
+ struct ieee80211_channel *channel,
struct cfg80211_connect_params *sme, bool privacy)
{
struct cfg80211_ssid req_ssid;
@@ -1881,7 +1910,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
params->privacy);
done:
if (!ret) {
- cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
+ params->chandef.chan, GFP_KERNEL);
dev_dbg(priv->adapter->dev,
"info: joined/created adhoc network with bssid"
" %pM successfully\n", priv->cfg_bssid);
@@ -2070,10 +2100,10 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
else
ht_info->cap &= ~IEEE80211_HT_CAP_SGI_40;
- if (ISSUPP_RXSTBC(adapter->hw_dot_11n_dev_cap))
- ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ if (adapter->user_dev_mcs_support == HT_STREAM_2X2)
+ ht_info->cap |= 3 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
else
- ht_info->cap &= ~(3 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
if (ISSUPP_TXSTBC(adapter->hw_dot_11n_dev_cap))
ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
@@ -2098,8 +2128,8 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
ht_info->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU;
ht_info->cap |= IEEE80211_HT_CAP_SM_PS;
- rx_mcs_supp = GET_RXMCSSUPP(adapter->hw_dev_mcs_support);
- /* Set MCS for 1x1 */
+ rx_mcs_supp = GET_RXMCSSUPP(adapter->user_dev_mcs_support);
+ /* Set MCS for 1x1/2x2 */
memset(mcs, 0xff, rx_mcs_supp);
/* Clear all the other values */
memset(&mcs[rx_mcs_supp], 0,
@@ -2460,9 +2490,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
MWIFIEX_CRITERIA_UNICAST |
MWIFIEX_CRITERIA_MULTICAST;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MEF_CFG,
- HostCmd_ACT_GEN_SET, 0,
- &mef_cfg);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mef_cfg, true);
kfree(mef_entry);
return ret;
@@ -2574,9 +2603,9 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
if (!coalesce) {
dev_dbg(adapter->dev,
"Disable coalesce and reset all previous rules\n");
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
- HostCmd_ACT_GEN_SET, 0,
- &coalesce_cfg);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG,
+ HostCmd_ACT_GEN_SET, 0,
+ &coalesce_cfg, true);
}
coalesce_cfg.num_of_rules = coalesce->n_rules;
@@ -2591,8 +2620,172 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
}
}
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
- HostCmd_ACT_GEN_SET, 0, &coalesce_cfg);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &coalesce_cfg, true);
+}
+
+/* cfg80211 ops handler for tdls_mgmt.
+ * Function prepares TDLS action frame packets and forwards them to FW
+ */
+static int
+mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, u32 peer_capability,
+ const u8 *extra_ies, size_t extra_ies_len)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ int ret;
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
+ return -ENOTSUPP;
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Setup Request to %pM status_code=%d\n", peer,
+ status_code);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Setup Response to %pM status_code=%d\n",
+ peer, status_code);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_SETUP_CONFIRM:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Confirm to %pM status_code=%d\n", peer,
+ status_code);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_TEARDOWN:
+ dev_dbg(priv->adapter->dev, "Send TDLS Tear down to %pM\n",
+ peer);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Discovery Request to %pM\n", peer);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Discovery Response to %pM\n", peer);
+ ret = mwifiex_send_tdls_action_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ default:
+ dev_warn(priv->adapter->dev,
+ "Unknown TDLS mgmt/action frame %pM\n", peer);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, enum nl80211_tdls_operation action)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
+ !(wiphy->flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
+ return -ENOTSUPP;
+
+ dev_dbg(priv->adapter->dev,
+ "TDLS peer=%pM, oper=%d\n", peer, action);
+
+ switch (action) {
+ case NL80211_TDLS_ENABLE_LINK:
+ action = MWIFIEX_TDLS_ENABLE_LINK;
+ break;
+ case NL80211_TDLS_DISABLE_LINK:
+ action = MWIFIEX_TDLS_DISABLE_LINK;
+ break;
+ case NL80211_TDLS_TEARDOWN:
+ /* shouldn't happen!*/
+ dev_warn(priv->adapter->dev,
+ "tdls_oper: teardown from driver not supported\n");
+ return -EINVAL;
+ case NL80211_TDLS_SETUP:
+ /* shouldn't happen!*/
+ dev_warn(priv->adapter->dev,
+ "tdls_oper: setup from driver not supported\n");
+ return -EINVAL;
+ case NL80211_TDLS_DISCOVERY_REQ:
+ /* shouldn't happen!*/
+ dev_warn(priv->adapter->dev,
+ "tdls_oper: discovery from driver not supported\n");
+ return -EINVAL;
+ default:
+ dev_err(priv->adapter->dev,
+ "tdls_oper: operation not supported\n");
+ return -ENOTSUPP;
+ }
+
+ return mwifiex_tdls_oper(priv, peer, action);
+}
+
+static int
+mwifiex_cfg80211_add_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 *mac, struct station_parameters *params)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
+ return -ENOTSUPP;
+
+ return mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CREATE_LINK);
+}
+
+static int
+mwifiex_cfg80211_change_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 *mac, struct station_parameters *params)
+{
+ int ret;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ /* we support change_station handler only for TDLS peers*/
+ if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
+ return -ENOTSUPP;
+
+ priv->sta_params = params;
+
+ ret = mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CONFIG_LINK);
+ priv->sta_params = NULL;
+
+ return ret;
}
/* station cfg80211 operations */
@@ -2630,6 +2823,10 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_wakeup = mwifiex_cfg80211_set_wakeup,
#endif
.set_coalesce = mwifiex_cfg80211_set_coalesce,
+ .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
+ .tdls_oper = mwifiex_cfg80211_tdls_oper,
+ .add_station = mwifiex_cfg80211_add_station,
+ .change_station = mwifiex_cfg80211_change_station,
};
#ifdef CONFIG_PM
@@ -2715,6 +2912,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+ if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+
wiphy->regulatory_flags |=
REGULATORY_CUSTOM_REG |
REGULATORY_STRICT_REG;
@@ -2736,7 +2938,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->features |= NL80211_FEATURE_HT_IBSS |
NL80211_FEATURE_INACTIVITY_TIMER |
- NL80211_FEATURE_LOW_PRIORITY_SCAN;
+ NL80211_FEATURE_LOW_PRIORITY_SCAN |
+ NL80211_FEATURE_NEED_OBSS_SCAN;
/* Reserve space for mwifiex specific private data for BSS */
wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -2767,17 +2970,17 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
country_code);
}
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_GET, FRAG_THRESH_I, &thr);
+ mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, FRAG_THRESH_I, &thr, true);
wiphy->frag_threshold = thr;
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_GET, RTS_THRESH_I, &thr);
+ mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, RTS_THRESH_I, &thr, true);
wiphy->rts_threshold = thr;
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_GET, SHORT_RETRY_LIM_I, &retry);
+ mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, SHORT_RETRY_LIM_I, &retry, true);
wiphy->retry_short = (u8) retry;
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_GET, LONG_RETRY_LIM_I, &retry);
+ mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, LONG_RETRY_LIM_I, &retry, true);
wiphy->retry_long = (u8) retry;
adapter->wiphy = wiphy;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 9eefacbc844b..0ddec3d4b059 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -71,6 +71,95 @@ u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30,
static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
+/* For every mcs_rate line, the first 8 bytes are for stream 1x1,
+ * and all 16 bytes are for stream 2x2.
+ */
+static const u16 mcs_rate[4][16] = {
+ /* LGI 40M */
+ { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
+ 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
+
+ /* SGI 40M */
+ { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
+ 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
+
+ /* LGI 20M */
+ { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
+ 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
+
+ /* SGI 20M */
+ { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
+ 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
+};
+
+/* AC rates */
+static const u16 ac_mcs_rate_nss1[8][10] = {
+ /* LG 160M */
+ { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
+ 0x492, 0x57C, 0x618 },
+
+ /* SG 160M */
+ { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
+ 0x514, 0x618, 0x6C6 },
+
+ /* LG 80M */
+ { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
+ 0x249, 0x2BE, 0x30C },
+
+ /* SG 80M */
+ { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
+ 0x28A, 0x30C, 0x363 },
+
+ /* LG 40M */
+ { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
+ 0x10E, 0x144, 0x168 },
+
+ /* SG 40M */
+ { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
+ 0x12C, 0x168, 0x190 },
+
+ /* LG 20M */
+ { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
+
+ /* SG 20M */
+ { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
+};
+
+/* NSS2 note: the value in the table is 2 multiplier of the actual rate */
+static const u16 ac_mcs_rate_nss2[8][10] = {
+ /* LG 160M */
+ { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
+ 0x924, 0xAF8, 0xC30 },
+
+ /* SG 160M */
+ { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
+ 0xA28, 0xC30, 0xD8B },
+
+ /* LG 80M */
+ { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
+ 0x492, 0x57C, 0x618 },
+
+ /* SG 80M */
+ { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
+ 0x514, 0x618, 0x6C6 },
+
+ /* LG 40M */
+ { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
+ 0x21C, 0x288, 0x2D0 },
+
+ /* SG 40M */
+ { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
+ 0x258, 0x2D0, 0x320 },
+
+ /* LG 20M */
+ { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
+ 0x138, 0x00 },
+
+ /* SG 20M */
+ { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
+ 0x15B, 0x00 },
+};
+
struct region_code_mapping {
u8 code;
u8 region[IEEE80211_COUNTRY_STRING_LEN];
@@ -109,95 +198,6 @@ u8 *mwifiex_11d_code_2_region(u8 code)
u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
u8 index, u8 ht_info)
{
- /*
- * For every mcs_rate line, the first 8 bytes are for stream 1x1,
- * and all 16 bytes are for stream 2x2.
- */
- u16 mcs_rate[4][16] = {
- /* LGI 40M */
- { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
- 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
-
- /* SGI 40M */
- { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
- 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
-
- /* LGI 20M */
- { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
- 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
-
- /* SGI 20M */
- { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
- 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
- };
- /* AC rates */
- u16 ac_mcs_rate_nss1[8][10] = {
- /* LG 160M */
- { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
- 0x492, 0x57C, 0x618 },
-
- /* SG 160M */
- { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
- 0x514, 0x618, 0x6C6 },
-
- /* LG 80M */
- { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
- 0x249, 0x2BE, 0x30C },
-
- /* SG 80M */
- { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
- 0x28A, 0x30C, 0x363 },
-
- /* LG 40M */
- { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
- 0x10E, 0x144, 0x168 },
-
- /* SG 40M */
- { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
- 0x12C, 0x168, 0x190 },
-
- /* LG 20M */
- { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
-
- /* SG 20M */
- { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
- };
- /* NSS2 note: the value in the table is 2 multiplier of the actual
- * rate
- */
- u16 ac_mcs_rate_nss2[8][10] = {
- /* LG 160M */
- { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
- 0x924, 0xAF8, 0xC30 },
-
- /* SG 160M */
- { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
- 0xA28, 0xC30, 0xD8B },
-
- /* LG 80M */
- { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
- 0x492, 0x57C, 0x618 },
-
- /* SG 80M */
- { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
- 0x514, 0x618, 0x6C6 },
-
- /* LG 40M */
- { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
- 0x21C, 0x288, 0x2D0 },
-
- /* SG 40M */
- { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
- 0x258, 0x2D0, 0x320 },
-
- /* LG 20M */
- { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
- 0x138, 0x00 },
-
- /* SG 20M */
- { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
- 0x15B, 0x00 },
- };
u32 rate = 0;
u8 mcs_index = 0;
u8 bw = 0;
@@ -252,28 +252,8 @@ u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv,
u8 index, u8 ht_info)
{
- /* For every mcs_rate line, the first 8 bytes are for stream 1x1,
- * and all 16 bytes are for stream 2x2.
- */
- u16 mcs_rate[4][16] = {
- /* LGI 40M */
- { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
- 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
-
- /* SGI 40M */
- { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
- 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
-
- /* LGI 20M */
- { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
- 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
-
- /* SGI 20M */
- { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
- 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
- };
u32 mcs_num_supp =
- (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
+ (priv->adapter->user_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
u32 rate;
if (priv->adapter->is_hw_11ac_capable)
@@ -458,7 +438,6 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
break;
case BAND_G:
case BAND_G | BAND_GN:
- case BAND_G | BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_g\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_g,
@@ -469,10 +448,7 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
case BAND_A | BAND_B:
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC:
- case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN |
- BAND_AAC | BAND_GAC:
case BAND_B | BAND_G | BAND_GN:
- case BAND_B | BAND_G | BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_bg\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_bg,
@@ -496,7 +472,6 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
sizeof(supported_rates_a));
break;
case BAND_GN:
- case BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_n\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_n,
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 1ddc8b2e3722..1062c918a7bf 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -37,13 +37,12 @@
static void
mwifiex_init_cmd_node(struct mwifiex_private *priv,
struct cmd_ctrl_node *cmd_node,
- u32 cmd_oid, void *data_buf)
+ u32 cmd_oid, void *data_buf, bool sync)
{
cmd_node->priv = priv;
cmd_node->cmd_oid = cmd_oid;
- if (priv->adapter->cmd_wait_q_required) {
- cmd_node->wait_q_enabled = priv->adapter->cmd_wait_q_required;
- priv->adapter->cmd_wait_q_required = false;
+ if (sync) {
+ cmd_node->wait_q_enabled = true;
cmd_node->cmd_wait_q_woken = false;
cmd_node->condition = &cmd_node->cmd_wait_q_woken;
}
@@ -166,8 +165,10 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
dev_err(adapter->dev,
"DNLD_CMD: FW in reset state, ignore cmd %#x\n",
cmd_code);
- mwifiex_complete_cmd(adapter, cmd_node);
+ if (cmd_node->wait_q_enabled)
+ mwifiex_complete_cmd(adapter, cmd_node);
mwifiex_recycle_cmd_node(adapter, cmd_node);
+ queue_work(adapter->workqueue, &adapter->main_work);
return -1;
}
@@ -276,11 +277,11 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ adapter->seq_num++;
sleep_cfm_buf->seq_num =
cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO
(adapter->seq_num, priv->bss_num,
priv->bss_type)));
- adapter->seq_num++;
if (adapter->iface_type == MWIFIEX_USB) {
sleep_cfm_tmp =
@@ -480,28 +481,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
}
/*
- * This function is used to send synchronous command to the firmware.
- *
- * it allocates a wait queue for the command and wait for the command
- * response.
- */
-int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
- u16 cmd_action, u32 cmd_oid, void *data_buf)
-{
- int ret = 0;
- struct mwifiex_adapter *adapter = priv->adapter;
-
- adapter->cmd_wait_q_required = true;
-
- ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid,
- data_buf);
-
- return ret;
-}
-
-
-/*
- * This function prepares a command and asynchronously send it to the firmware.
+ * This function prepares a command and send it to the firmware.
*
* Preparation includes -
* - Sanity tests to make sure the card is still present or the FW
@@ -511,8 +491,8 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
* - Fill up the non-default parameters and buffer pointers
* - Add the command to pending queue
*/
-int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
- u16 cmd_action, u32 cmd_oid, void *data_buf)
+int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
+ u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync)
{
int ret;
struct mwifiex_adapter *adapter = priv->adapter;
@@ -529,11 +509,21 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
return -1;
}
+ if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
+ dev_err(adapter->dev, "PREP_CMD: host entering sleep state\n");
+ return -1;
+ }
+
if (adapter->surprise_removed) {
dev_err(adapter->dev, "PREP_CMD: card is removed\n");
return -1;
}
+ if (adapter->is_cmd_timedout) {
+ dev_err(adapter->dev, "PREP_CMD: FW is in bad state\n");
+ return -1;
+ }
+
if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) {
if (cmd_no != HostCmd_CMD_FUNC_INIT) {
dev_err(adapter->dev, "PREP_CMD: FW in reset state\n");
@@ -550,7 +540,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
}
/* Initialize the command node */
- mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf);
+ mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync);
if (!cmd_node->cmd_skb) {
dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n");
@@ -595,7 +585,8 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
}
/* Send command */
- if (cmd_no == HostCmd_CMD_802_11_SCAN) {
+ if (cmd_no == HostCmd_CMD_802_11_SCAN ||
+ cmd_no == HostCmd_CMD_802_11_SCAN_EXT) {
mwifiex_queue_scan_cmd(priv, cmd_node);
} else {
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
@@ -785,7 +776,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
unsigned long flags;
/* Now we got response from FW, cancel the command timer */
- del_timer(&adapter->cmd_timer);
+ del_timer_sync(&adapter->cmd_timer);
if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
resp = (struct host_cmd_ds_command *) adapter->upld_buf;
@@ -794,7 +785,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
return -1;
}
- adapter->num_cmd_timeout = 0;
+ adapter->is_cmd_timedout = 0;
resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
@@ -905,8 +896,7 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
struct cmd_ctrl_node *cmd_node;
struct timeval tstamp;
- adapter->num_cmd_timeout++;
- adapter->dbg.num_cmd_timeout++;
+ adapter->is_cmd_timedout = 1;
if (!adapter->curr_cmd) {
dev_dbg(adapter->dev, "cmd: empty curr_cmd\n");
return;
@@ -929,8 +919,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n",
adapter->dbg.num_cmd_host_to_card_failure);
- dev_err(adapter->dev, "num_cmd_timeout = %d\n",
- adapter->dbg.num_cmd_timeout);
+ dev_err(adapter->dev, "is_cmd_timedout = %d\n",
+ adapter->is_cmd_timedout);
dev_err(adapter->dev, "num_tx_timeout = %d\n",
adapter->dbg.num_tx_timeout);
@@ -987,13 +977,14 @@ void
mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
- unsigned long flags;
+ unsigned long flags, cmd_flags;
+ struct mwifiex_private *priv;
+ int i;
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
/* Cancel current cmd */
if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) {
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->curr_cmd->wait_q_enabled = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
adapter->cmd_wait_q.status = -1;
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
@@ -1013,6 +1004,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
}
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
/* Cancel all pending scan command */
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
@@ -1027,9 +1019,21 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
}
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
- adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ if (adapter->scan_processing) {
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+ adapter->scan_processing = false;
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (!priv)
+ continue;
+ if (priv->scan_request) {
+ dev_dbg(adapter->dev, "info: aborting scan\n");
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ }
+ }
+ }
}
/*
@@ -1048,7 +1052,8 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
unsigned long scan_pending_q_flags;
- bool cancel_scan_cmd = false;
+ struct mwifiex_private *priv;
+ int i;
if ((adapter->curr_cmd) &&
(adapter->curr_cmd->wait_q_enabled)) {
@@ -1074,15 +1079,24 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
spin_lock_irqsave(&adapter->scan_pending_q_lock,
scan_pending_q_flags);
- cancel_scan_cmd = true;
}
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
scan_pending_q_flags);
- if (cancel_scan_cmd) {
+ if (adapter->scan_processing) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (!priv)
+ continue;
+ if (priv->scan_request) {
+ dev_dbg(adapter->dev, "info: aborting scan\n");
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ }
+ }
}
adapter->cmd_wait_q.status = -1;
}
@@ -1454,7 +1468,10 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
{
struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec;
struct mwifiex_adapter *adapter = priv->adapter;
- int i;
+ struct mwifiex_ie_types_header *tlv;
+ struct hw_spec_fw_api_rev *api_rev;
+ u16 resp_size, api_id;
+ int i, left_len, parsed_len = 0;
adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info);
@@ -1490,6 +1507,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
}
adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number);
+ adapter->fw_api_ver = (adapter->fw_release_number >> 16) & 0xff;
adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna);
if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) {
@@ -1498,8 +1516,10 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
/* Copy 11AC cap */
adapter->hw_dot_11ac_dev_cap =
le32_to_cpu(hw_spec->dot_11ac_dev_cap);
- adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap;
- adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap;
+ adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap
+ & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK;
+ adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap
+ & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK;
/* Copy 11AC mcs */
adapter->hw_dot_11ac_mcs_support =
@@ -1510,6 +1530,46 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
adapter->is_hw_11ac_capable = false;
}
+ resp_size = le16_to_cpu(resp->size) - S_DS_GEN;
+ if (resp_size > sizeof(struct host_cmd_ds_get_hw_spec)) {
+ /* we have variable HW SPEC information */
+ left_len = resp_size - sizeof(struct host_cmd_ds_get_hw_spec);
+ while (left_len > sizeof(struct mwifiex_ie_types_header)) {
+ tlv = (void *)&hw_spec->tlvs + parsed_len;
+ switch (le16_to_cpu(tlv->type)) {
+ case TLV_TYPE_FW_API_REV:
+ api_rev = (struct hw_spec_fw_api_rev *)tlv;
+ api_id = le16_to_cpu(api_rev->api_id);
+ switch (api_id) {
+ case KEY_API_VER_ID:
+ adapter->fw_key_api_major_ver =
+ api_rev->major_ver;
+ adapter->fw_key_api_minor_ver =
+ api_rev->minor_ver;
+ dev_dbg(adapter->dev,
+ "fw_key_api v%d.%d\n",
+ adapter->fw_key_api_major_ver,
+ adapter->fw_key_api_minor_ver);
+ break;
+ default:
+ dev_warn(adapter->dev,
+ "Unknown FW api_id: %d\n",
+ api_id);
+ break;
+ }
+ break;
+ default:
+ dev_warn(adapter->dev,
+ "Unknown GET_HW_SPEC TLV type: %#x\n",
+ le16_to_cpu(tlv->type));
+ break;
+ }
+ parsed_len += le16_to_cpu(tlv->len) +
+ sizeof(struct mwifiex_ie_types_header);
+ left_len -= parsed_len;
+ }
+ }
+
dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
adapter->fw_release_number);
dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
@@ -1538,6 +1598,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap);
adapter->hw_dev_mcs_support = hw_spec->dev_mcs_support;
+ adapter->user_dev_mcs_support = adapter->hw_dev_mcs_support;
if (adapter->if_ops.update_mp_end_port)
adapter->if_ops.update_mp_end_port(adapter,
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index a5f9875cfd6e..b8a49aad12fd 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -85,8 +85,8 @@ static struct mwifiex_debug_data items[] = {
item_addr(hs_activated), 1},
{"num_tx_timeout", item_size(num_tx_timeout),
item_addr(num_tx_timeout), 1},
- {"num_cmd_timeout", item_size(num_cmd_timeout),
- item_addr(num_cmd_timeout), 1},
+ {"is_cmd_timedout", item_size(is_cmd_timedout),
+ item_addr(is_cmd_timedout), 1},
{"timeout_cmd_id", item_size(timeout_cmd_id),
item_addr(timeout_cmd_id), 1},
{"timeout_cmd_act", item_size(timeout_cmd_act),
@@ -493,7 +493,7 @@ mwifiex_regrdwr_write(struct file *file,
{
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *) addr;
- size_t buf_size = min(count, (size_t) (PAGE_SIZE - 1));
+ size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1);
int ret;
u32 reg_type = 0, reg_offset = 0, reg_value = UINT_MAX;
@@ -594,7 +594,7 @@ mwifiex_rdeeprom_write(struct file *file,
{
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *) addr;
- size_t buf_size = min(count, (size_t) (PAGE_SIZE - 1));
+ size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1);
int ret = 0;
int offset = -1, bytes = -1;
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 3a21bd03d6db..e7b3e16e5d34 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -75,10 +75,16 @@
#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
+#define MWIFIEX_BUF_FLAG_TDLS_PKT BIT(2)
#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024
#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128
+#define MWIFIEX_TDLS_DISABLE_LINK 0x00
+#define MWIFIEX_TDLS_ENABLE_LINK 0x01
+#define MWIFIEX_TDLS_CREATE_LINK 0x02
+#define MWIFIEX_TDLS_CONFIG_LINK 0x03
+
enum mwifiex_bss_type {
MWIFIEX_BSS_TYPE_STA = 0,
MWIFIEX_BSS_TYPE_UAP = 1,
@@ -92,6 +98,23 @@ enum mwifiex_bss_role {
MWIFIEX_BSS_ROLE_ANY = 0xff,
};
+enum mwifiex_tdls_status {
+ TDLS_NOT_SETUP = 0,
+ TDLS_SETUP_INPROGRESS,
+ TDLS_SETUP_COMPLETE,
+ TDLS_SETUP_FAILURE,
+ TDLS_LINK_TEARDOWN,
+};
+
+enum mwifiex_tdls_error_code {
+ TDLS_ERR_NO_ERROR = 0,
+ TDLS_ERR_INTERNAL_ERROR,
+ TDLS_ERR_MAX_LINKS_EST,
+ TDLS_ERR_LINK_EXISTS,
+ TDLS_ERR_LINK_NONEXISTENT,
+ TDLS_ERR_PEER_STA_UNREACHABLE = 25,
+};
+
#define BSS_ROLE_BIT_MASK BIT(0)
#define GET_BSS_ROLE(priv) ((priv)->bss_role & BSS_ROLE_BIT_MASK)
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 5fa932d5f905..b485dc1ae5eb 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -50,21 +50,23 @@ struct tx_packet_hdr {
#define HOSTCMD_SUPPORTED_RATES 14
#define N_SUPPORTED_RATES 3
#define ALL_802_11_BANDS (BAND_A | BAND_B | BAND_G | BAND_GN | \
- BAND_AN | BAND_GAC | BAND_AAC)
+ BAND_AN | BAND_AAC)
#define FW_MULTI_BANDS_SUPPORT (BIT(8) | BIT(9) | BIT(10) | BIT(11) | \
- BIT(12) | BIT(13))
+ BIT(13))
#define IS_SUPPORT_MULTI_BANDS(adapter) \
(adapter->fw_cap_info & FW_MULTI_BANDS_SUPPORT)
-/* shift bit 12 and bit 13 in fw_cap_info from the firmware to bit 13 and 14
- * for 11ac so that bit 11 is for GN, bit 12 for AN, bit 13 for GAC, and bit
- * bit 14 for AAC, in order to be compatible with the band capability
- * defined in the driver after right shift of 8 bits.
+/* bit 13: 11ac BAND_AAC
+ * bit 12: reserved for lab testing, will be reused for BAND_AN
+ * bit 11: 11n BAND_GN
+ * bit 10: 11a BAND_A
+ * bit 9: 11g BAND_G
+ * bit 8: 11b BAND_B
+ * Map these bits to band capability by right shifting 8 bits.
*/
#define GET_FW_DEFAULT_BANDS(adapter) \
- (((((adapter->fw_cap_info & 0x3000) << 1) | \
- (adapter->fw_cap_info & ~0xF000)) >> 8) & \
+ (((adapter->fw_cap_info & 0x2f00) >> 8) & \
ALL_802_11_BANDS)
#define HostCmd_WEP_KEY_INDEX_MASK 0x3fff
@@ -77,12 +79,21 @@ enum KEY_TYPE_ID {
KEY_TYPE_ID_WAPI,
KEY_TYPE_ID_AES_CMAC,
};
+
+#define WPA_PN_SIZE 8
+#define KEY_PARAMS_FIXED_LEN 10
+#define KEY_INDEX_MASK 0xf
+#define FW_KEY_API_VER_MAJOR_V2 2
+
#define KEY_MCAST BIT(0)
#define KEY_UNICAST BIT(1)
#define KEY_ENABLED BIT(2)
+#define KEY_DEFAULT BIT(3)
+#define KEY_TX_KEY BIT(4)
+#define KEY_RX_KEY BIT(5)
#define KEY_IGTK BIT(10)
-#define WAPI_KEY_LEN 50
+#define WAPI_KEY_LEN (WLAN_KEY_LEN_SMS4 + PN_LEN + 2)
#define MAX_POLL_TRIES 100
#define MAX_FIRMWARE_POLL_TRIES 100
@@ -130,6 +141,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
#define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32)
+#define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35)
#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
@@ -144,6 +156,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82)
#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83)
#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
+#define TLV_TYPE_BSS_SCAN_RSP (PROPRIETARY_TLV_BASE_ID + 86)
+#define TLV_TYPE_BSS_SCAN_INFO (PROPRIETARY_TLV_BASE_ID + 87)
#define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93)
#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
#define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104)
@@ -154,6 +168,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
+#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -176,13 +192,21 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define MWIFIEX_TX_DATA_BUF_SIZE_8K 8192
#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
+#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
IEEE80211_HT_CAP_SM_PS)
+#define MWIFIEX_DEF_11N_TX_BF_CAP 0x09E1E008
+
#define MWIFIEX_DEF_AMPDU IEEE80211_HT_AMPDU_PARM_FACTOR
+#define GET_RXSTBC(x) (x & IEEE80211_HT_CAP_RX_STBC)
+#define MWIFIEX_RX_STBC1 0x0100
+#define MWIFIEX_RX_STBC12 0x0200
+#define MWIFIEX_RX_STBC123 0x0300
+
/* dev_cap bitmap
* BIT
* 0-16 reserved
@@ -204,6 +228,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29))
#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
+#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
/* httxcfg bitmap
* 0 reserved
@@ -216,8 +241,21 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
*/
#define MWIFIEX_FW_DEF_HTTXCFG (BIT(1) | BIT(4) | BIT(5) | BIT(6))
+/* 11AC Tx and Rx MCS map for 1x1 mode:
+ * IEEE80211_VHT_MCS_SUPPORT_0_9 for stream 1
+ * IEEE80211_VHT_MCS_NOT_SUPPORTED for remaining 7 streams
+ */
+#define MWIFIEX_11AC_MCS_MAP_1X1 0xfffefffe
+
+/* 11AC Tx and Rx MCS map for 2x2 mode:
+ * IEEE80211_VHT_MCS_SUPPORT_0_9 for stream 1 and 2
+ * IEEE80211_VHT_MCS_NOT_SUPPORTED for remaining 6 streams
+ */
+#define MWIFIEX_11AC_MCS_MAP_2X2 0xfffafffa
+
#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
#define SETHT_MCS32(x) (x[4] |= 1)
+#define HT_STREAM_1X1 0x11
#define HT_STREAM_2X2 0x22
#define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
@@ -226,17 +264,24 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
/* HW_SPEC fw_cap_info */
-#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(12)|BIT(13)))
+#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & BIT(13))
#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3)
#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \
(2 * (nss - 1)))
-#define NO_NSS_SUPPORT 0x3
-
#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16)
#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF)
+/* Clear SU Beanformer, MU beanformer, MU beanformee and
+ * sounding dimensions bits
+ */
+#define MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK \
+ (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | \
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE | \
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | \
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK)
+
#define MOD_CLASS_HR_DSSS 0x03
#define MOD_CLASS_OFDM 0x07
#define MOD_CLASS_HT 0x08
@@ -295,10 +340,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
#define HostCmd_CMD_SET_BSS_MODE 0x00f7
#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
+#define HostCmd_CMD_802_11_SCAN_EXT 0x0107
#define HostCmd_CMD_COALESCE_CFG 0x010a
#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
#define HostCmd_CMD_11AC_CFG 0x0112
+#define HostCmd_CMD_TDLS_OPER 0x0122
#define PROTOCOL_NO_SECURITY 0x01
#define PROTOCOL_STATIC_WEP 0x02
@@ -440,6 +487,7 @@ enum P2P_MODES {
#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
#define EVENT_HOSTWAKE_STAIE 0x0000004d
#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
+#define EVENT_EXT_SCAN_REPORT 0x00000058
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_ID_MASK 0xffff
@@ -468,6 +516,12 @@ enum P2P_MODES {
#define MWIFIEX_CRITERIA_UNICAST BIT(1)
#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
+#define ACT_TDLS_DELETE 0x00
+#define ACT_TDLS_CREATE 0x01
+#define ACT_TDLS_CONFIG 0x02
+
+#define MWIFIEX_FW_V15 15
+
struct mwifiex_ie_types_header {
__le16 type;
__le16 len;
@@ -480,6 +534,7 @@ struct mwifiex_ie_types_data {
#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
+#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10
struct txpd {
u8 bss_type;
@@ -676,6 +731,56 @@ struct mwifiex_cmac_param {
u8 key[WLAN_KEY_LEN_AES_CMAC];
} __packed;
+struct mwifiex_wep_param {
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_WEP104];
+} __packed;
+
+struct mwifiex_tkip_param {
+ u8 pn[WPA_PN_SIZE];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_TKIP];
+} __packed;
+
+struct mwifiex_aes_param {
+ u8 pn[WPA_PN_SIZE];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_CCMP];
+} __packed;
+
+struct mwifiex_wapi_param {
+ u8 pn[PN_LEN];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_SMS4];
+} __packed;
+
+struct mwifiex_cmac_aes_param {
+ u8 ipn[IGTK_PN_LEN];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_AES_CMAC];
+} __packed;
+
+struct mwifiex_ie_type_key_param_set_v2 {
+ __le16 type;
+ __le16 len;
+ u8 mac_addr[ETH_ALEN];
+ u8 key_idx;
+ u8 key_type;
+ __le16 key_info;
+ union {
+ struct mwifiex_wep_param wep;
+ struct mwifiex_tkip_param tkip;
+ struct mwifiex_aes_param aes;
+ struct mwifiex_wapi_param wapi;
+ struct mwifiex_cmac_aes_param cmac_aes;
+ } key_params;
+} __packed;
+
+struct host_cmd_ds_802_11_key_material_v2 {
+ __le16 action;
+ struct mwifiex_ie_type_key_param_set_v2 key_param_set;
+} __packed;
+
struct host_cmd_ds_802_11_key_material {
__le16 action;
struct mwifiex_ie_type_key_param_set key_param_set;
@@ -727,6 +832,17 @@ struct host_cmd_ds_802_11_ps_mode_enh {
} params;
} __packed;
+enum FW_API_VER_ID {
+ KEY_API_VER_ID = 1,
+};
+
+struct hw_spec_fw_api_rev {
+ struct mwifiex_ie_types_header header;
+ __le16 api_id;
+ u8 major_ver;
+ u8 minor_ver;
+} __packed;
+
struct host_cmd_ds_get_hw_spec {
__le16 hw_if_version;
__le16 version;
@@ -748,6 +864,7 @@ struct host_cmd_ds_get_hw_spec {
__le32 reserved_6;
__le32 dot_11ac_dev_cap;
__le32 dot_11ac_mcs_support;
+ u8 tlvs[0];
} __packed;
struct host_cmd_ds_802_11_rssi_info {
@@ -993,6 +1110,7 @@ struct mwifiex_rate_scope {
__le16 hr_dsss_rate_bitmap;
__le16 ofdm_rate_bitmap;
__le16 ht_mcs_rate_bitmap[8];
+ __le16 vht_mcs_rate_bitmap[8];
} __packed;
struct mwifiex_rate_drop_pattern {
@@ -1047,14 +1165,28 @@ struct host_cmd_ds_rf_ant_siso {
__le16 ant_mode;
};
-struct mwifiex_bcn_param {
- u8 bssid[ETH_ALEN];
- u8 rssi;
+struct host_cmd_ds_tdls_oper {
+ __le16 tdls_action;
+ __le16 reason;
+ u8 peer_mac[ETH_ALEN];
+} __packed;
+
+struct mwifiex_fixed_bcn_param {
__le64 timestamp;
__le16 beacon_period;
__le16 cap_info_bitmap;
} __packed;
+struct mwifiex_event_scan_result {
+ __le16 event_id;
+ u8 bss_index;
+ u8 bss_type;
+ u8 more_event;
+ u8 reserved[3];
+ __le16 buf_size;
+ u8 num_of_set;
+} __packed;
+
#define MWIFIEX_USER_SCAN_CHAN_MAX 50
#define MWIFIEX_MAX_SSID_LIST_LENGTH 10
@@ -1124,6 +1256,28 @@ struct host_cmd_ds_802_11_scan_rsp {
u8 bss_desc_and_tlv_buffer[1];
} __packed;
+struct host_cmd_ds_802_11_scan_ext {
+ u32 reserved;
+ u8 tlv_buffer[1];
+} __packed;
+
+struct mwifiex_ie_types_bss_scan_rsp {
+ struct mwifiex_ie_types_header header;
+ u8 bssid[ETH_ALEN];
+ u8 frame_body[1];
+} __packed;
+
+struct mwifiex_ie_types_bss_scan_info {
+ struct mwifiex_ie_types_header header;
+ __le16 rssi;
+ __le16 anpi;
+ u8 cca_busy_fraction;
+ u8 radio_type;
+ u8 channel;
+ u8 reserved;
+ __le64 tsf;
+} __packed;
+
struct host_cmd_ds_802_11_bg_scan_query {
u8 flush;
} __packed;
@@ -1296,6 +1450,11 @@ struct mwifiex_ie_types_vhtcap {
struct ieee80211_vht_cap vht_cap;
} __packed;
+struct mwifiex_ie_types_aid {
+ struct mwifiex_ie_types_header header;
+ __le16 aid;
+} __packed;
+
struct mwifiex_ie_types_oper_mode_ntf {
struct mwifiex_ie_types_header header;
u8 oper_mode;
@@ -1331,6 +1490,11 @@ struct mwifiex_ie_types_extcap {
u8 ext_capab[0];
} __packed;
+struct mwifiex_ie_types_qos_info {
+ struct mwifiex_ie_types_header header;
+ u8 qos_info;
+} __packed;
+
struct host_cmd_ds_mac_reg_access {
__le16 action;
__le16 offset;
@@ -1441,6 +1605,11 @@ struct host_cmd_tlv_rates {
u8 rates[0];
} __packed;
+struct mwifiex_ie_types_bssid_list {
+ struct mwifiex_ie_types_header header;
+ u8 bssid[ETH_ALEN];
+} __packed;
+
struct host_cmd_tlv_bcast_ssid {
struct mwifiex_ie_types_header header;
u8 bcast_ctl;
@@ -1634,6 +1803,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_802_11_ps_mode_enh psmode_enh;
struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg;
struct host_cmd_ds_802_11_scan scan;
+ struct host_cmd_ds_802_11_scan_ext ext_scan;
struct host_cmd_ds_802_11_scan_rsp scan_resp;
struct host_cmd_ds_802_11_bg_scan_query bg_scan_query;
struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp;
@@ -1653,6 +1823,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_11n_cfg htcfg;
struct host_cmd_ds_wmm_get_status get_wmm_status;
struct host_cmd_ds_802_11_key_material key_material;
+ struct host_cmd_ds_802_11_key_material_v2 key_material_v2;
struct host_cmd_ds_version_ext verext;
struct host_cmd_ds_mgmt_frame_reg reg_mask;
struct host_cmd_ds_remain_on_chan roc_cfg;
@@ -1671,6 +1842,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_sta_deauth sta_deauth;
struct host_cmd_11ac_vht_cfg vht_cfg;
struct host_cmd_ds_coalesce_cfg coalesce_cfg;
+ struct host_cmd_ds_tdls_oper tdls_oper;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 81ac001ee741..3bf3d58bbc02 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -138,9 +138,9 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
}
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
- return mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
- HostCmd_ACT_GEN_SET,
- UAP_CUSTOM_IE_I, ie_list);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_CUSTOM_IE_I, ie_list, false);
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 1d0a817f2bf0..4ecd0b208ac6 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -137,6 +137,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->csa_expire_time = 0;
priv->del_list_idx = 0;
priv->hs2_enabled = false;
+ memcpy(priv->tos_to_tid_inv, tos_to_tid_inv, MAX_NUM_TID);
return mwifiex_add_bss_prio_tbl(priv);
}
@@ -233,7 +234,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->pm_wakeup_fw_try = false;
- adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
adapter->is_hs_configured = false;
@@ -281,6 +281,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->arp_filter_size = 0;
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
adapter->empty_tx_q_cnt = 0;
+ adapter->ext_scan = true;
+ adapter->fw_key_api_major_ver = 0;
+ adapter->fw_key_api_minor_ver = 0;
}
/*
@@ -450,6 +453,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
INIT_LIST_HEAD(&priv->sta_list);
+ skb_queue_head_init(&priv->tdls_txq);
spin_lock_init(&priv->tx_ba_stream_tbl_lock);
spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -615,7 +619,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
/* cancel current command */
if (adapter->curr_cmd) {
dev_warn(adapter->dev, "curr_cmd is still in processing\n");
- del_timer(&adapter->cmd_timer);
+ del_timer_sync(&adapter->cmd_timer);
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
adapter->curr_cmd = NULL;
}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 00a95f4c6a6c..ee494db54060 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -60,8 +60,7 @@ enum {
BAND_A = 4,
BAND_GN = 8,
BAND_AN = 16,
- BAND_GAC = 32,
- BAND_AAC = 64,
+ BAND_AAC = 32,
};
#define MWIFIEX_WPA_PASSHPHRASE_LEN 64
@@ -86,6 +85,10 @@ struct wep_key {
#define BAND_CONFIG_A 0x01
#define MWIFIEX_SUPPORTED_RATES 14
#define MWIFIEX_SUPPORTED_RATES_EXT 32
+#define MWIFIEX_TDLS_SUPPORTED_RATES 8
+#define MWIFIEX_TDLS_DEF_QOS_CAPAB 0xf
+#define MWIFIEX_PRIO_BK 2
+#define MWIFIEX_PRIO_VI 5
struct mwifiex_uap_bss_param {
u8 channel;
@@ -174,6 +177,7 @@ struct mwifiex_ds_rx_reorder_tbl {
struct mwifiex_ds_tx_ba_stream_tbl {
u16 tid;
u8 ra[ETH_ALEN];
+ u8 amsdu;
};
#define DBG_CMD_NUM 5
@@ -206,7 +210,7 @@ struct mwifiex_debug_info {
u32 num_cmd_assoc_success;
u32 num_cmd_assoc_failure;
u32 num_tx_timeout;
- u32 num_cmd_timeout;
+ u8 is_cmd_timedout;
u16 timeout_cmd_id;
u16 timeout_cmd_act;
u16 last_cmd_id[DBG_CMD_NUM];
@@ -233,7 +237,10 @@ struct mwifiex_ds_encrypt_key {
u8 mac_addr[ETH_ALEN];
u32 is_wapi_key;
u8 pn[PN_LEN]; /* packet number */
+ u8 pn_len;
u8 is_igtk_key;
+ u8 is_current_wep_key;
+ u8 is_rx_seq_valid;
};
struct mwifiex_power_cfg {
@@ -432,4 +439,16 @@ struct mwifiex_ds_coalesce_cfg {
struct mwifiex_coalesce_rule rule[MWIFIEX_COALESCE_MAX_RULES];
};
+struct mwifiex_ds_tdls_oper {
+ u16 tdls_action;
+ u8 peer_mac[ETH_ALEN];
+ u16 capability;
+ u8 qos_info;
+ u8 *ext_capab;
+ u8 ext_capab_len;
+ u8 *supp_rates;
+ u8 supp_rates_len;
+ u8 *ht_capab;
+};
+
#endif /* !_MWIFIEX_IOCTL_H_ */
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 4e4686e6ac09..89dc62a467f4 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -515,8 +515,7 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_cmd_append_11ac_tlv(priv, bss_desc, &pos);
/* Append vendor specific IE TLV */
@@ -902,9 +901,9 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
mwifiex_get_active_data_rates(priv, adhoc_start->data_rate);
if ((adapter->adhoc_start_band & BAND_G) &&
(priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &priv->curr_pkt_filter)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter, false)) {
dev_err(adapter->dev,
"ADHOC_S_CMD: G Protection config failed\n");
return -1;
@@ -983,7 +982,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
radio_type = mwifiex_band_to_radio_type(
priv->adapter->config_bands);
- mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
if (adapter->sec_chan_offset ==
IEEE80211_HT_PARAM_CHA_SEC_NONE) {
@@ -1074,9 +1073,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
priv->
curr_pkt_filter | HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON;
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &curr_pkt_filter)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &curr_pkt_filter, false)) {
dev_err(priv->adapter->dev,
"ADHOC_J_CMD: G Protection config failed\n");
return -1;
@@ -1300,8 +1299,7 @@ int mwifiex_associate(struct mwifiex_private *priv,
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
@@ -1314,8 +1312,8 @@ int mwifiex_associate(struct mwifiex_private *priv,
retrieval */
priv->assoc_rsp_size = 0;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_ASSOCIATE,
- HostCmd_ACT_GEN_SET, 0, bss_desc);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_ASSOCIATE,
+ HostCmd_ACT_GEN_SET, 0, bss_desc, true);
}
/*
@@ -1335,14 +1333,13 @@ mwifiex_adhoc_start(struct mwifiex_private *priv,
priv->curr_bss_params.band);
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_START,
- HostCmd_ACT_GEN_SET, 0, adhoc_ssid);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_START,
+ HostCmd_ACT_GEN_SET, 0, adhoc_ssid, true);
}
/*
@@ -1376,8 +1373,7 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
@@ -1387,8 +1383,8 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n",
priv->curr_bss_params.band);
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
- HostCmd_ACT_GEN_SET, 0, bss_desc);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
+ HostCmd_ACT_GEN_SET, 0, bss_desc, true);
}
/*
@@ -1407,8 +1403,8 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
else
memcpy(mac_address, mac, ETH_ALEN);
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
- HostCmd_ACT_GEN_SET, 0, mac_address);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
+ HostCmd_ACT_GEN_SET, 0, mac_address, true);
return ret;
}
@@ -1436,19 +1432,31 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
GFP_KERNEL);
break;
case NL80211_IFTYPE_ADHOC:
- return mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_AD_HOC_STOP,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
case NL80211_IFTYPE_AP:
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
default:
break;
}
return ret;
}
-EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
+
+/* This function deauthenticates/disconnects from all BSS. */
+void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter)
+{
+ struct mwifiex_private *priv;
+ int i;
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (priv)
+ mwifiex_deauthenticate(priv, NULL);
+ }
+}
+EXPORT_SYMBOL_GPL(mwifiex_deauthenticate_all);
/*
* This function converts band to radio type used in channel TLV.
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9d3d2758ec35..77db0886c6e2 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -38,7 +38,8 @@ static void scan_delay_timer_fn(unsigned long data)
if (adapter->surprise_removed)
return;
- if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+ if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT ||
+ !adapter->scan_processing) {
/*
* Abort scan operation by cancelling all pending scan
* commands
@@ -194,7 +195,7 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
if (adapter->if_ops.cleanup_if)
adapter->if_ops.cleanup_if(adapter);
- del_timer(&adapter->cmd_timer);
+ del_timer_sync(&adapter->cmd_timer);
/* Free private structures */
for (i = 0; i < adapter->priv_num; i++) {
@@ -678,8 +679,8 @@ mwifiex_set_mac_address(struct net_device *dev, void *addr)
memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN);
/* Send request to firmware */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_MAC_ADDRESS,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_MAC_ADDRESS,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
if (!ret)
memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN);
@@ -871,7 +872,6 @@ mwifiex_add_card(void *card, struct semaphore *sem,
adapter->is_suspended = false;
adapter->hs_activated = false;
init_waitqueue_head(&adapter->hs_activate_wait_q);
- adapter->cmd_wait_q_required = false;
init_waitqueue_head(&adapter->cmd_wait_q.wait);
adapter->cmd_wait_q.status = 0;
adapter->scan_wait_q_woken = false;
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index d8ad554ce39f..d53e1e8c9467 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -59,7 +59,7 @@ enum {
#define MWIFIEX_UPLD_SIZE (2312)
-#define MAX_EVENT_SIZE 1024
+#define MAX_EVENT_SIZE 2048
#define ARP_FILTER_MAX_BUF_SIZE 68
@@ -116,7 +116,7 @@ enum {
#define MWIFIEX_TYPE_DATA 0
#define MWIFIEX_TYPE_EVENT 3
-#define MAX_BITMAP_RATES_SIZE 10
+#define MAX_BITMAP_RATES_SIZE 18
#define MAX_CHANNEL_BAND_BG 14
#define MAX_CHANNEL_BAND_A 165
@@ -145,7 +145,6 @@ struct mwifiex_dbg {
u32 num_cmd_assoc_success;
u32 num_cmd_assoc_failure;
u32 num_tx_timeout;
- u32 num_cmd_timeout;
u16 timeout_cmd_id;
u16 timeout_cmd_act;
u16 last_cmd_id[DBG_CMD_NUM];
@@ -193,6 +192,8 @@ struct mwifiex_add_ba_param {
u32 tx_win_size;
u32 rx_win_size;
u32 timeout;
+ u8 tx_amsdu;
+ u8 rx_amsdu;
};
struct mwifiex_tx_aggr {
@@ -210,6 +211,7 @@ struct mwifiex_ra_list_tbl {
u16 ba_pkt_count;
u8 ba_packet_thr;
u16 total_pkt_count;
+ bool tdls_link;
};
struct mwifiex_tid_tbl {
@@ -262,6 +264,31 @@ struct ieee_types_generic {
u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header)];
} __packed;
+struct ieee_types_bss_co_2040 {
+ struct ieee_types_header ieee_hdr;
+ u8 bss_2040co;
+} __packed;
+
+struct ieee_types_extcap {
+ struct ieee_types_header ieee_hdr;
+ u8 ext_capab[8];
+} __packed;
+
+struct ieee_types_vht_cap {
+ struct ieee_types_header ieee_hdr;
+ struct ieee80211_vht_cap vhtcap;
+} __packed;
+
+struct ieee_types_vht_oper {
+ struct ieee_types_header ieee_hdr;
+ struct ieee80211_vht_operation vhtoper;
+} __packed;
+
+struct ieee_types_aid {
+ struct ieee_types_header ieee_hdr;
+ u16 aid;
+} __packed;
+
struct mwifiex_bssdescriptor {
u8 mac_address[ETH_ALEN];
struct cfg80211_ssid ssid;
@@ -443,6 +470,7 @@ struct mwifiex_private {
u8 wpa_ie_len;
u8 wpa_is_gtk_set;
struct host_cmd_ds_802_11_key_material aes_key;
+ struct host_cmd_ds_802_11_key_material_v2 aes_key_v2;
u8 wapi_ie[256];
u8 wapi_ie_len;
u8 *wps_ie;
@@ -461,6 +489,7 @@ struct mwifiex_private {
struct mwifiex_tx_aggr aggr_prio_tbl[MAX_NUM_TID];
struct mwifiex_add_ba_param add_ba_param;
u16 rx_seq[MAX_NUM_TID];
+ u8 tos_to_tid_inv[MAX_NUM_TID];
struct list_head rx_reorder_tbl_ptr;
/* spin lock for rx_reorder_tbl_ptr queue */
spinlock_t rx_reorder_tbl_lock;
@@ -518,6 +547,8 @@ struct mwifiex_private {
unsigned long csa_expire_time;
u8 del_list_idx;
bool hs2_enabled;
+ struct station_parameters *sta_params;
+ struct sk_buff_head tdls_txq;
};
enum mwifiex_ba_status {
@@ -531,6 +562,7 @@ struct mwifiex_tx_ba_stream_tbl {
int tid;
u8 ra[ETH_ALEN];
enum mwifiex_ba_status ba_status;
+ u8 amsdu;
};
struct mwifiex_rx_reorder_tbl;
@@ -545,10 +577,12 @@ struct mwifiex_rx_reorder_tbl {
struct list_head list;
int tid;
u8 ta[ETH_ALEN];
+ int init_win;
int start_win;
int win_size;
void **rx_reorder_ptr;
struct reorder_tmr_cnxt timer_context;
+ u8 amsdu;
u8 flags;
};
@@ -583,17 +617,35 @@ struct mwifiex_bss_priv {
u64 fw_tsf;
};
-/* This is AP specific structure which stores information
- * about associated STA
+struct mwifiex_tdls_capab {
+ __le16 capab;
+ u8 rates[32];
+ u8 rates_len;
+ u8 qos_info;
+ u8 coex_2040;
+ u16 aid;
+ struct ieee80211_ht_cap ht_capb;
+ struct ieee80211_ht_operation ht_oper;
+ struct ieee_types_extcap extcap;
+ struct ieee_types_generic rsn_ie;
+ struct ieee80211_vht_cap vhtcap;
+ struct ieee80211_vht_operation vhtoper;
+};
+
+/* This is AP/TDLS specific structure which stores information
+ * about associated/peer STA
*/
struct mwifiex_sta_node {
struct list_head list;
u8 mac_addr[ETH_ALEN];
u8 is_wmm_enabled;
u8 is_11n_enabled;
+ u8 is_11ac_enabled;
u8 ampdu_sta[MAX_NUM_TID];
u16 rx_seq[MAX_NUM_TID];
u16 max_amsdu;
+ u8 tdls_status;
+ struct mwifiex_tdls_capab tdls_cap;
};
struct mwifiex_if_ops {
@@ -671,7 +723,7 @@ struct mwifiex_adapter {
struct cmd_ctrl_node *curr_cmd;
/* spin lock for command */
spinlock_t mwifiex_cmd_lock;
- u32 num_cmd_timeout;
+ u8 is_cmd_timedout;
u16 last_init_cmd;
struct timer_list cmd_timer;
struct list_head cmd_free_q;
@@ -722,15 +774,16 @@ struct mwifiex_adapter {
u16 hs_activate_wait_q_woken;
wait_queue_head_t hs_activate_wait_q;
bool is_suspended;
+ bool hs_enabling;
u8 event_body[MAX_EVENT_SIZE];
u32 hw_dot_11n_dev_cap;
u8 hw_dev_mcs_support;
+ u8 user_dev_mcs_support;
u8 adhoc_11n_enabled;
u8 sec_chan_offset;
struct mwifiex_dbg dbg;
u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE];
u32 arp_filter_size;
- u16 cmd_wait_q_required;
struct mwifiex_wait_queue cmd_wait_q;
u8 scan_wait_q_woken;
spinlock_t queue_lock; /* lock for tx queues */
@@ -753,6 +806,9 @@ struct mwifiex_adapter {
atomic_t is_tx_received;
atomic_t pending_bridged_pkts;
struct semaphore *card_sem;
+ bool ext_scan;
+ u8 fw_api_ver;
+ u8 fw_key_api_major_ver, fw_key_api_minor_ver;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -788,11 +844,8 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter);
int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node);
-int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
- u16 cmd_action, u32 cmd_oid, void *data_buf);
-
-int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
- u16 cmd_action, u32 cmd_oid, void *data_buf);
+int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
+ u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync);
void mwifiex_cmd_timeout_func(unsigned long function_context);
@@ -880,6 +933,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason);
u8 mwifiex_band_to_radio_type(u8 band);
int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter);
int mwifiex_adhoc_start(struct mwifiex_private *priv,
struct cfg80211_ssid *adhoc_ssid);
int mwifiex_adhoc_join(struct mwifiex_private *priv,
@@ -938,6 +992,12 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
struct cfg80211_ap_settings *params);
void mwifiex_set_ba_params(struct mwifiex_private *priv);
void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv);
+int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf);
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv);
+int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
+ void *buf);
/*
* This function checks if the queuing is RA based or not.
@@ -1078,7 +1138,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
const u8 *key, int key_len, u8 key_index,
const u8 *mac_addr, int disable);
-int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
+int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len);
int mwifiex_get_ver_ext(struct mwifiex_private *priv);
@@ -1159,6 +1219,32 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
extern const struct ethtool_ops mwifiex_ethtool_ops;
+void mwifiex_del_all_sta_list(struct mwifiex_private *priv);
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+ int ies_len, struct mwifiex_sta_node *node);
+struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac);
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, u8 *peer,
+ u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len);
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len);
+void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *buf, int len);
+int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action);
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
+bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
+u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
+ u32 pri_chan, u8 chan_bw);
+
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 7fe7b53fb17a..a7e8b96b2d90 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -39,20 +39,31 @@ static struct semaphore add_remove_card_sem;
static int
mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
- int size, int flags)
+ size_t size, int flags)
{
struct pcie_service_card *card = adapter->card;
- dma_addr_t buf_pa;
+ struct mwifiex_dma_mapping mapping;
- buf_pa = pci_map_single(card->dev, skb->data, size, flags);
- if (pci_dma_mapping_error(card->dev, buf_pa)) {
+ mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
+ if (pci_dma_mapping_error(card->dev, mapping.addr)) {
dev_err(adapter->dev, "failed to map pci memory!\n");
return -1;
}
- memcpy(skb->cb, &buf_pa, sizeof(dma_addr_t));
+ mapping.len = size;
+ memcpy(skb->cb, &mapping, sizeof(mapping));
return 0;
}
+static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb, int flags)
+{
+ struct pcie_service_card *card = adapter->card;
+ struct mwifiex_dma_mapping mapping;
+
+ MWIFIEX_SKB_PACB(skb, &mapping);
+ pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
+}
+
/*
* This function reads sleep cookie and checks if FW is ready
*/
@@ -109,6 +120,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
/* Indicate device suspended */
adapter->is_suspended = true;
+ adapter->hs_enabling = false;
return 0;
}
@@ -179,6 +191,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
card->pcie.firmware = data->firmware;
card->pcie.reg = data->reg;
card->pcie.blksz_fw_dl = data->blksz_fw_dl;
+ card->pcie.tx_buf_size = data->tx_buf_size;
}
if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
@@ -199,7 +212,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
struct pcie_service_card *card;
struct mwifiex_adapter *adapter;
struct mwifiex_private *priv;
- int i;
card = pci_get_drvdata(pdev);
if (!card)
@@ -218,11 +230,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
mwifiex_pcie_resume(&pdev->dev);
#endif
- for (i = 0; i < adapter->priv_num; i++)
- if ((GET_BSS_ROLE(adapter->priv[i]) ==
- MWIFIEX_BSS_ROLE_STA) &&
- adapter->priv[i]->media_connected)
- mwifiex_deauthenticate(adapter->priv[i], NULL);
+ mwifiex_deauthenticate_all(adapter);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -320,6 +328,30 @@ static void mwifiex_pcie_dev_wakeup_delay(struct mwifiex_adapter *adapter)
return;
}
+static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
+ u32 max_delay_loop_cnt)
+{
+ struct pcie_service_card *card = adapter->card;
+ u8 *buffer;
+ u32 sleep_cookie, count;
+
+ for (count = 0; count < max_delay_loop_cnt; count++) {
+ buffer = card->cmdrsp_buf->data - INTF_HEADER_LEN;
+ sleep_cookie = *(u32 *)buffer;
+
+ if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
+ dev_dbg(adapter->dev,
+ "sleep cookie found at count %d\n", count);
+ break;
+ }
+ usleep_range(20, 30);
+ }
+
+ if (count >= max_delay_loop_cnt)
+ dev_dbg(adapter->dev,
+ "max count reached while accessing sleep cookie\n");
+}
+
/* This function wakes up the card by reading fw_status register. */
static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
@@ -456,7 +488,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
dev_dbg(adapter->dev,
"info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
@@ -513,7 +545,7 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
dev_dbg(adapter->dev,
"info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
@@ -549,8 +581,8 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
desc2 = card->txbd_ring[i];
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
- pci_unmap_single(card->dev, desc2->paddr,
- skb->len, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -558,8 +590,8 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
desc = card->txbd_ring[i];
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
- pci_unmap_single(card->dev, desc->paddr,
- skb->len, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -587,8 +619,8 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
desc2 = card->rxbd_ring[i];
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
- pci_unmap_single(card->dev, desc2->paddr,
- skb->len, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -596,8 +628,8 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
desc = card->rxbd_ring[i];
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
- pci_unmap_single(card->dev, desc->paddr,
- skb->len, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -622,8 +654,8 @@ static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
desc = card->evtbd_ring[i];
if (card->evt_buf_list[i]) {
skb = card->evt_buf_list[i];
- pci_unmap_single(card->dev, desc->paddr, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
card->evt_buf_list[i] = NULL;
@@ -861,7 +893,6 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card;
- dma_addr_t buf_pa;
if (!adapter)
return 0;
@@ -869,16 +900,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
card = adapter->card;
if (card && card->cmdrsp_buf) {
- MWIFIEX_SKB_PACB(card->cmdrsp_buf, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(card->cmdrsp_buf);
}
if (card && card->cmd_buf) {
- MWIFIEX_SKB_PACB(card->cmd_buf, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, card->cmd_buf->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
+ PCI_DMA_TODEVICE);
}
return 0;
}
@@ -956,7 +985,6 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
{
struct sk_buff *skb;
- dma_addr_t buf_pa;
u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0;
struct mwifiex_pcie_buf_desc *desc;
struct mwifiex_pfu_buf_desc *desc2;
@@ -986,13 +1014,13 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
reg->tx_start_ptr;
skb = card->tx_buf_list[wrdoneidx];
+
if (skb) {
dev_dbg(adapter->dev,
"SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
skb, wrdoneidx);
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, skb->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
unmap_count++;
@@ -1006,7 +1034,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
card->tx_buf_list[wrdoneidx] = NULL;
if (reg->pfu_enabled) {
- desc2 = (void *)card->txbd_ring[wrdoneidx];
+ desc2 = card->txbd_ring[wrdoneidx];
memset(desc2, 0, sizeof(*desc2));
} else {
desc = card->txbd_ring[wrdoneidx];
@@ -1082,16 +1110,16 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
tmp = (__le16 *)&payload[2];
*tmp = cpu_to_le16(MWIFIEX_TYPE_DATA);
- if (mwifiex_map_pci_memory(adapter, skb, skb->len ,
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len,
PCI_DMA_TODEVICE))
return -1;
wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
card->tx_buf_list[wrindx] = skb;
if (reg->pfu_enabled) {
- desc2 = (void *)card->txbd_ring[wrindx];
+ desc2 = card->txbd_ring[wrindx];
desc2->paddr = buf_pa;
desc2->len = (u16)skb->len;
desc2->frag_len = (u16)skb->len;
@@ -1162,8 +1190,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
return -EINPROGRESS;
done_unmap:
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, skb->len, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
card->tx_buf_list[wrindx] = NULL;
if (reg->pfu_enabled)
memset(desc2, 0, sizeof(*desc2));
@@ -1217,9 +1244,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
if (!skb_data)
return -ENOMEM;
- MWIFIEX_SKB_PACB(skb_data, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_data, PCI_DMA_FROMDEVICE);
card->rx_buf_list[rd_index] = NULL;
/* Get data length from interface header -
@@ -1246,7 +1271,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
dev_dbg(adapter->dev,
"RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
@@ -1254,7 +1279,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
card->rx_buf_list[rd_index] = skb_tmp;
if (reg->pfu_enabled) {
- desc2 = (void *)card->rxbd_ring[rd_index];
+ desc2 = card->rxbd_ring[rd_index];
desc2->paddr = buf_pa;
desc2->len = skb_tmp->len;
desc2->frag_len = skb_tmp->len;
@@ -1322,7 +1347,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
if (mwifiex_map_pci_memory(adapter, skb, skb->len , PCI_DMA_TODEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
/* Write the lower 32bits of the physical address to low command
* address scratch register
@@ -1331,8 +1356,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
dev_err(adapter->dev,
"%s: failed to write download command to boot code.\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1344,8 +1368,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
dev_err(adapter->dev,
"%s: failed to write download command to boot code.\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1354,8 +1377,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
dev_err(adapter->dev,
"%s: failed to write command len to cmd_size scratch reg\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1364,8 +1386,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
CPU_INTR_DOOR_BELL)) {
dev_err(adapter->dev,
"%s: failed to assert door-bell intr\n", __func__);
- pci_unmap_single(card->dev, buf_pa,
- MWIFIEX_UPLD_SIZE, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1439,7 +1460,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
*/
if (card->cmdrsp_buf) {
- MWIFIEX_SKB_PACB(card->cmdrsp_buf, &cmdrsp_buf_pa);
+ cmdrsp_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmdrsp_buf);
/* Write the lower 32bits of the cmdrsp buffer physical
address */
if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
@@ -1460,7 +1481,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
}
}
- MWIFIEX_SKB_PACB(card->cmd_buf, &cmd_buf_pa);
+ cmd_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmd_buf);
/* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
(u32)cmd_buf_pa)) {
@@ -1514,13 +1535,17 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
int count = 0;
u16 rx_len;
__le16 pkt_len;
- dma_addr_t buf_pa;
dev_dbg(adapter->dev, "info: Rx CMD Response\n");
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
+
+ /* Unmap the command as a response has been received. */
+ if (card->cmd_buf) {
+ mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
+ PCI_DMA_TODEVICE);
+ card->cmd_buf = NULL;
+ }
pkt_len = *((__le16 *)skb->data);
rx_len = le16_to_cpu(pkt_len);
@@ -1539,6 +1564,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
"Write register failed\n");
return -1;
}
+ mwifiex_delay_for_sleep_cookie(adapter,
+ MWIFIEX_MAX_DELAY_COUNT);
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
@@ -1552,8 +1579,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
-
- MWIFIEX_SKB_PACB(skb, &buf_pa);
} else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
adapter->curr_cmd->resp_skb = skb;
adapter->cmd_resp_received = true;
@@ -1588,8 +1613,6 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
struct sk_buff *skb)
{
struct pcie_service_card *card = adapter->card;
- dma_addr_t buf_pa;
- struct sk_buff *skb_tmp;
if (skb) {
card->cmdrsp_buf = skb;
@@ -1599,14 +1622,6 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
return -1;
}
- skb_tmp = card->cmd_buf;
- if (skb_tmp) {
- MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, skb_tmp->len,
- PCI_DMA_FROMDEVICE);
- card->cmd_buf = NULL;
- }
-
return 0;
}
@@ -1619,7 +1634,6 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
u32 wrptr, event;
- dma_addr_t buf_pa;
struct mwifiex_evt_buf_desc *desc;
if (!mwifiex_pcie_ok_to_access_hw(adapter))
@@ -1655,9 +1669,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr);
skb_cmd = card->evt_buf_list[rdptr];
- MWIFIEX_SKB_PACB(skb_cmd, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
/* Take the pointer and set it to event pointer in adapter
and will return back after event handling callback */
@@ -1703,7 +1715,6 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
int ret = 0;
u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
u32 wrptr;
- dma_addr_t buf_pa;
struct mwifiex_evt_buf_desc *desc;
if (!skb)
@@ -1728,11 +1739,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
MAX_EVENT_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
card->evt_buf_list[rdptr] = skb;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
desc = card->evtbd_ring[rdptr];
- desc->paddr = buf_pa;
+ desc->paddr = MWIFIEX_SKB_DMA_ADDR(skb);
desc->len = (u16)skb->len;
desc->flags = 0;
skb = NULL;
@@ -1782,7 +1791,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
struct sk_buff *skb;
u32 txlen, tx_blocks = 0, tries, len;
u32 block_retry_cnt = 0;
- dma_addr_t buf_pa;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
@@ -1880,8 +1888,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
goto done;
}
- MWIFIEX_SKB_PACB(skb, &buf_pa);
-
/* Wait for the command done interrupt */
do {
if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
@@ -1889,16 +1895,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
dev_err(adapter->dev, "%s: Failed to read "
"interrupt status during fw dnld.\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, skb->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
ret = -1;
goto done;
}
} while ((ireg_intr & CPU_INTR_DOOR_BELL) ==
CPU_INTR_DOOR_BELL);
- pci_unmap_single(card->dev, buf_pa, skb->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
offset += txlen;
} while (true);
@@ -2338,6 +2343,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
}
adapter->dev = &pdev->dev;
+ adapter->tx_buf_size = card->pcie.tx_buf_size;
strcpy(adapter->fw_name, card->pcie.firmware);
return 0;
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index d322ab8604ea..e8ec561f8a64 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -97,6 +97,8 @@
#define MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD 256
/* FW awake cookie after FW ready */
#define FW_AWAKE_COOKIE (0xAA55AA55)
+#define MWIFIEX_DEF_SLEEP_COOKIE 0xBEEFBEEF
+#define MWIFIEX_MAX_DELAY_COUNT 5
struct mwifiex_pcie_card_reg {
u16 cmd_addr_lo;
@@ -195,18 +197,21 @@ struct mwifiex_pcie_device {
const char *firmware;
const struct mwifiex_pcie_card_reg *reg;
u16 blksz_fw_dl;
+ u16 tx_buf_size;
};
static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
.firmware = PCIE8766_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_8766,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
};
static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
.firmware = PCIE8897_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_8897,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
};
struct mwifiex_evt_buf_desc {
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 668547c2de84..7b3af3d29ded 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -591,11 +591,13 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
*chan_tlv_out,
struct mwifiex_chan_scan_param_set *scan_chan_list)
{
+ struct mwifiex_adapter *adapter = priv->adapter;
int ret = 0;
struct mwifiex_chan_scan_param_set *tmp_chan_list;
struct mwifiex_chan_scan_param_set *start_chan;
-
- u32 tlv_idx, rates_size;
+ struct cmd_ctrl_node *cmd_node, *tmp_node;
+ unsigned long flags;
+ u32 tlv_idx, rates_size, cmd_no;
u32 total_scan_time;
u32 done_early;
u8 radio_type;
@@ -733,9 +735,13 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* Send the scan command to the firmware with the specified
cfg */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN,
- HostCmd_ACT_GEN_SET, 0,
- scan_cfg_out);
+ if (priv->adapter->ext_scan)
+ cmd_no = HostCmd_CMD_802_11_SCAN_EXT;
+ else
+ cmd_no = HostCmd_CMD_802_11_SCAN;
+
+ ret = mwifiex_send_cmd(priv, cmd_no, HostCmd_ACT_GEN_SET,
+ 0, scan_cfg_out, false);
/* rate IE is updated per scan command but same starting
* pointer is used each time so that rate IE from earlier
@@ -744,8 +750,19 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
scan_cfg_out->tlv_buf_len -=
sizeof(struct mwifiex_ie_types_header) + rates_size;
- if (ret)
+ if (ret) {
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ list_for_each_entry_safe(cmd_node, tmp_node,
+ &adapter->scan_pending_q,
+ list) {
+ list_del(&cmd_node->list);
+ cmd_node->wait_q_enabled = false;
+ mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ }
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
break;
+ }
}
if (ret)
@@ -786,6 +803,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie_types_num_probes *num_probes_tlv;
struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
+ struct mwifiex_ie_types_bssid_list *bssid_tlv;
u8 *tlv_pos;
u32 num_probes;
u32 ssid_len;
@@ -848,6 +866,17 @@ mwifiex_config_scan(struct mwifiex_private *priv,
user_scan_in->specific_bssid,
sizeof(scan_cfg_out->specific_bssid));
+ if (adapter->ext_scan &&
+ !is_zero_ether_addr(scan_cfg_out->specific_bssid)) {
+ bssid_tlv =
+ (struct mwifiex_ie_types_bssid_list *)tlv_pos;
+ bssid_tlv->header.type = cpu_to_le16(TLV_TYPE_BSSID);
+ bssid_tlv->header.len = cpu_to_le16(ETH_ALEN);
+ memcpy(bssid_tlv->bssid, user_scan_in->specific_bssid,
+ ETH_ALEN);
+ tlv_pos += sizeof(struct mwifiex_ie_types_bssid_list);
+ }
+
for (i = 0; i < user_scan_in->num_ssids; i++) {
ssid_len = user_scan_in->ssid_list[i].ssid_len;
@@ -941,7 +970,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
radio_type =
mwifiex_band_to_radio_type(priv->adapter->config_bands);
- mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
tlv_pos += sizeof(struct mwifiex_ie_types_htcap);
}
@@ -1576,6 +1605,228 @@ done:
return 0;
}
+static int
+mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
+ u32 *bytes_left, u64 fw_tsf, u8 *radio_type,
+ bool ext_scan, s32 rssi_val)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_chan_freq_power *cfp;
+ struct cfg80211_bss *bss;
+ u8 bssid[ETH_ALEN];
+ s32 rssi;
+ const u8 *ie_buf;
+ size_t ie_len;
+ u16 channel = 0;
+ u16 beacon_size = 0;
+ u32 curr_bcn_bytes;
+ u32 freq;
+ u16 beacon_period;
+ u16 cap_info_bitmap;
+ u8 *current_ptr;
+ u64 timestamp;
+ struct mwifiex_fixed_bcn_param *bcn_param;
+ struct mwifiex_bss_priv *bss_priv;
+
+ if (*bytes_left >= sizeof(beacon_size)) {
+ /* Extract & convert beacon size from command buffer */
+ memcpy(&beacon_size, *bss_info, sizeof(beacon_size));
+ *bytes_left -= sizeof(beacon_size);
+ *bss_info += sizeof(beacon_size);
+ }
+
+ if (!beacon_size || beacon_size > *bytes_left) {
+ *bss_info += *bytes_left;
+ *bytes_left = 0;
+ return -EFAULT;
+ }
+
+ /* Initialize the current working beacon pointer for this BSS
+ * iteration
+ */
+ current_ptr = *bss_info;
+
+ /* Advance the return beacon pointer past the current beacon */
+ *bss_info += beacon_size;
+ *bytes_left -= beacon_size;
+
+ curr_bcn_bytes = beacon_size;
+
+ /* First 5 fields are bssid, RSSI(for legacy scan only),
+ * time stamp, beacon interval, and capability information
+ */
+ if (curr_bcn_bytes < ETH_ALEN + sizeof(u8) +
+ sizeof(struct mwifiex_fixed_bcn_param)) {
+ dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
+ return -EFAULT;
+ }
+
+ memcpy(bssid, current_ptr, ETH_ALEN);
+ current_ptr += ETH_ALEN;
+ curr_bcn_bytes -= ETH_ALEN;
+
+ if (!ext_scan) {
+ rssi = (s32) *current_ptr;
+ rssi = (-rssi) * 100; /* Convert dBm to mBm */
+ current_ptr += sizeof(u8);
+ curr_bcn_bytes -= sizeof(u8);
+ dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+ } else {
+ rssi = rssi_val;
+ }
+
+ bcn_param = (struct mwifiex_fixed_bcn_param *)current_ptr;
+ current_ptr += sizeof(*bcn_param);
+ curr_bcn_bytes -= sizeof(*bcn_param);
+
+ timestamp = le64_to_cpu(bcn_param->timestamp);
+ beacon_period = le16_to_cpu(bcn_param->beacon_period);
+
+ cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
+ dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
+ cap_info_bitmap);
+
+ /* Rest of the current buffer are IE's */
+ ie_buf = current_ptr;
+ ie_len = curr_bcn_bytes;
+ dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
+ curr_bcn_bytes);
+
+ while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
+ u8 element_id, element_len;
+
+ element_id = *current_ptr;
+ element_len = *(current_ptr + 1);
+ if (curr_bcn_bytes < element_len +
+ sizeof(struct ieee_types_header)) {
+ dev_err(adapter->dev,
+ "%s: bytes left < IE length\n", __func__);
+ return -EFAULT;
+ }
+ if (element_id == WLAN_EID_DS_PARAMS) {
+ channel = *(current_ptr +
+ sizeof(struct ieee_types_header));
+ break;
+ }
+
+ current_ptr += element_len + sizeof(struct ieee_types_header);
+ curr_bcn_bytes -= element_len +
+ sizeof(struct ieee_types_header);
+ }
+
+ if (channel) {
+ struct ieee80211_channel *chan;
+ u8 band;
+
+ /* Skip entry if on csa closed channel */
+ if (channel == priv->csa_chan) {
+ dev_dbg(adapter->dev,
+ "Dropping entry on csa closed channel\n");
+ return 0;
+ }
+
+ band = BAND_G;
+ if (radio_type)
+ band = mwifiex_radio_type_to_band(*radio_type &
+ (BIT(0) | BIT(1)));
+
+ cfp = mwifiex_get_cfp(priv, band, channel, 0);
+
+ freq = cfp ? cfp->freq : 0;
+
+ chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
+
+ if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
+ bss = cfg80211_inform_bss(priv->wdev->wiphy,
+ chan, bssid, timestamp,
+ cap_info_bitmap, beacon_period,
+ ie_buf, ie_len, rssi, GFP_KERNEL);
+ bss_priv = (struct mwifiex_bss_priv *)bss->priv;
+ bss_priv->band = band;
+ bss_priv->fw_tsf = fw_tsf;
+ if (priv->media_connected &&
+ !memcmp(bssid, priv->curr_bss_params.bss_descriptor
+ .mac_address, ETH_ALEN))
+ mwifiex_update_curr_bss_params(priv, bss);
+ cfg80211_put_bss(priv->wdev->wiphy, bss);
+ }
+ } else {
+ dev_dbg(adapter->dev, "missing BSS channel IE\n");
+ }
+
+ return 0;
+}
+
+static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct cmd_ctrl_node *cmd_node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ if (list_empty(&adapter->scan_pending_q)) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ adapter->scan_processing = false;
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+
+ /* Need to indicate IOCTL complete */
+ if (adapter->curr_cmd->wait_q_enabled) {
+ adapter->cmd_wait_q.status = 0;
+ if (!priv->scan_request) {
+ dev_dbg(adapter->dev,
+ "complete internal scan\n");
+ mwifiex_complete_cmd(adapter,
+ adapter->curr_cmd);
+ }
+ }
+ if (priv->report_scan_result)
+ priv->report_scan_result = false;
+
+ if (priv->scan_request) {
+ dev_dbg(adapter->dev, "info: notifying scan done\n");
+ cfg80211_scan_done(priv->scan_request, 0);
+ priv->scan_request = NULL;
+ } else {
+ priv->scan_aborting = false;
+ dev_dbg(adapter->dev, "info: scan already aborted\n");
+ }
+ } else {
+ if ((priv->scan_aborting && !priv->scan_request) ||
+ priv->scan_block) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
+ mod_timer(&priv->scan_delay_timer, jiffies);
+ dev_dbg(priv->adapter->dev,
+ "info: %s: triggerring scan abort\n", __func__);
+ } else if (!mwifiex_wmm_lists_empty(adapter) &&
+ (priv->scan_request && (priv->scan_request->flags &
+ NL80211_SCAN_FLAG_LOW_PRIORITY))) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ adapter->scan_delay_cnt = 1;
+ mod_timer(&priv->scan_delay_timer, jiffies +
+ msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+ dev_dbg(priv->adapter->dev,
+ "info: %s: deferring scan\n", __func__);
+ } else {
+ /* Get scan command from scan_pending_q and put to
+ * cmd_pending_q
+ */
+ cmd_node = list_first_entry(&adapter->scan_pending_q,
+ struct cmd_ctrl_node, list);
+ list_del(&cmd_node->list);
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+ true);
+ }
+ }
+
+ return;
+}
+
/*
* This function handles the command response of scan.
*
@@ -1600,7 +1851,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
{
int ret = 0;
struct mwifiex_adapter *adapter = priv->adapter;
- struct cmd_ctrl_node *cmd_node;
struct host_cmd_ds_802_11_scan_rsp *scan_rsp;
struct mwifiex_ie_types_data *tlv_data;
struct mwifiex_ie_types_tsf_timestamp *tsf_tlv;
@@ -1609,12 +1859,11 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
u32 bytes_left;
u32 idx;
u32 tlv_buf_size;
- struct mwifiex_chan_freq_power *cfp;
struct mwifiex_ie_types_chan_band_list_param_set *chan_band_tlv;
struct chan_band_param_set *chan_band;
u8 is_bgscan_resp;
- unsigned long flags;
- struct cfg80211_bss *bss;
+ __le64 fw_tsf = 0;
+ u8 *radio_type;
is_bgscan_resp = (le16_to_cpu(resp->command)
== HostCmd_CMD_802_11_BG_SCAN_QUERY);
@@ -1676,220 +1925,194 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
&chan_band_tlv);
for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
- u8 bssid[ETH_ALEN];
- s32 rssi;
- const u8 *ie_buf;
- size_t ie_len;
- u16 channel = 0;
- __le64 fw_tsf = 0;
- u16 beacon_size = 0;
- u32 curr_bcn_bytes;
- u32 freq;
- u16 beacon_period;
- u16 cap_info_bitmap;
- u8 *current_ptr;
- u64 timestamp;
- struct mwifiex_bcn_param *bcn_param;
- struct mwifiex_bss_priv *bss_priv;
-
- if (bytes_left >= sizeof(beacon_size)) {
- /* Extract & convert beacon size from command buffer */
- memcpy(&beacon_size, bss_info, sizeof(beacon_size));
- bytes_left -= sizeof(beacon_size);
- bss_info += sizeof(beacon_size);
- }
+ /*
+ * If the TSF TLV was appended to the scan results, save this
+ * entry's TSF value in the fw_tsf field. It is the firmware's
+ * TSF value at the time the beacon or probe response was
+ * received.
+ */
+ if (tsf_tlv)
+ memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
+ sizeof(fw_tsf));
- if (!beacon_size || beacon_size > bytes_left) {
- bss_info += bytes_left;
- bytes_left = 0;
- ret = -1;
- goto check_next_scan;
+ if (chan_band_tlv) {
+ chan_band = &chan_band_tlv->chan_band_param[idx];
+ radio_type = &chan_band->radio_type;
+ } else {
+ radio_type = NULL;
}
- /* Initialize the current working beacon pointer for this BSS
- * iteration */
- current_ptr = bss_info;
+ ret = mwifiex_parse_single_response_buf(priv, &bss_info,
+ &bytes_left,
+ le64_to_cpu(fw_tsf),
+ radio_type, false, 0);
+ if (ret)
+ goto check_next_scan;
+ }
- /* Advance the return beacon pointer past the current beacon */
- bss_info += beacon_size;
- bytes_left -= beacon_size;
+check_next_scan:
+ mwifiex_check_next_scan_command(priv);
+ return ret;
+}
- curr_bcn_bytes = beacon_size;
+/*
+ * This function prepares an extended scan command to be sent to the firmware
+ *
+ * This uses the scan command configuration sent to the command processing
+ * module in command preparation stage to configure a extended scan command
+ * structure to send to firmware.
+ */
+int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf)
+{
+ struct host_cmd_ds_802_11_scan_ext *ext_scan = &cmd->params.ext_scan;
+ struct mwifiex_scan_cmd_config *scan_cfg = data_buf;
- /*
- * First 5 fields are bssid, RSSI, time stamp, beacon interval,
- * and capability information
- */
- if (curr_bcn_bytes < sizeof(struct mwifiex_bcn_param)) {
- dev_err(adapter->dev,
- "InterpretIE: not enough bytes left\n");
- continue;
- }
- bcn_param = (struct mwifiex_bcn_param *)current_ptr;
- current_ptr += sizeof(*bcn_param);
- curr_bcn_bytes -= sizeof(*bcn_param);
+ memcpy(ext_scan->tlv_buffer, scan_cfg->tlv_buf, scan_cfg->tlv_buf_len);
- memcpy(bssid, bcn_param->bssid, ETH_ALEN);
+ cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SCAN_EXT);
- rssi = (s32) bcn_param->rssi;
- rssi = (-rssi) * 100; /* Convert dBm to mBm */
- dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+ /* Size is equal to the sizeof(fixed portions) + the TLV len + header */
+ cmd->size = cpu_to_le16((u16)(sizeof(ext_scan->reserved)
+ + scan_cfg->tlv_buf_len + S_DS_GEN));
- timestamp = le64_to_cpu(bcn_param->timestamp);
- beacon_period = le16_to_cpu(bcn_param->beacon_period);
+ return 0;
+}
- cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
- dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
- cap_info_bitmap);
+/* This function handles the command response of extended scan */
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
+{
+ dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+ return 0;
+}
- /* Rest of the current buffer are IE's */
- ie_buf = current_ptr;
- ie_len = curr_bcn_bytes;
- dev_dbg(adapter->dev,
- "info: InterpretIE: IELength for this AP = %d\n",
- curr_bcn_bytes);
+/* This function This function handles the event extended scan report. It
+ * parses extended scan results and informs to cfg80211 stack.
+ */
+int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
+ void *buf)
+{
+ int ret = 0;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 *bss_info;
+ u32 bytes_left, bytes_left_for_tlv, idx;
+ u16 type, len;
+ struct mwifiex_ie_types_data *tlv;
+ struct mwifiex_ie_types_bss_scan_rsp *scan_rsp_tlv;
+ struct mwifiex_ie_types_bss_scan_info *scan_info_tlv;
+ u8 *radio_type;
+ u64 fw_tsf = 0;
+ s32 rssi = 0;
+ struct mwifiex_event_scan_result *event_scan = buf;
+ u8 num_of_set = event_scan->num_of_set;
+ u8 *scan_resp = buf + sizeof(struct mwifiex_event_scan_result);
+ u16 scan_resp_size = le16_to_cpu(event_scan->buf_size);
+
+ if (num_of_set > MWIFIEX_MAX_AP) {
+ dev_err(adapter->dev,
+ "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
+ num_of_set);
+ ret = -1;
+ goto check_next_scan;
+ }
- while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
- u8 element_id, element_len;
+ bytes_left = scan_resp_size;
+ dev_dbg(adapter->dev,
+ "EXT_SCAN: size %d, returned %d APs...",
+ scan_resp_size, num_of_set);
- element_id = *current_ptr;
- element_len = *(current_ptr + 1);
- if (curr_bcn_bytes < element_len +
- sizeof(struct ieee_types_header)) {
- dev_err(priv->adapter->dev,
- "%s: bytes left < IE length\n",
- __func__);
- goto check_next_scan;
- }
- if (element_id == WLAN_EID_DS_PARAMS) {
- channel = *(current_ptr + sizeof(struct ieee_types_header));
- break;
- }
+ tlv = (struct mwifiex_ie_types_data *)scan_resp;
- current_ptr += element_len +
- sizeof(struct ieee_types_header);
- curr_bcn_bytes -= element_len +
- sizeof(struct ieee_types_header);
+ for (idx = 0; idx < num_of_set && bytes_left; idx++) {
+ type = le16_to_cpu(tlv->header.type);
+ len = le16_to_cpu(tlv->header.len);
+ if (bytes_left < sizeof(struct mwifiex_ie_types_header) + len) {
+ dev_err(adapter->dev, "EXT_SCAN: Error bytes left < TLV length\n");
+ break;
}
+ scan_rsp_tlv = NULL;
+ scan_info_tlv = NULL;
+ bytes_left_for_tlv = bytes_left;
- /*
- * If the TSF TLV was appended to the scan results, save this
- * entry's TSF value in the fw_tsf field. It is the firmware's
- * TSF value at the time the beacon or probe response was
- * received.
+ /* BSS response TLV with beacon or probe response buffer
+ * at the initial position of each descriptor
*/
- if (tsf_tlv)
- memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
- sizeof(fw_tsf));
-
- if (channel) {
- struct ieee80211_channel *chan;
- u8 band;
+ if (type != TLV_TYPE_BSS_SCAN_RSP)
+ break;
- /* Skip entry if on csa closed channel */
- if (channel == priv->csa_chan) {
- dev_dbg(adapter->dev,
- "Dropping entry on csa closed channel\n");
+ bss_info = (u8 *)tlv;
+ scan_rsp_tlv = (struct mwifiex_ie_types_bss_scan_rsp *)tlv;
+ tlv = (struct mwifiex_ie_types_data *)(tlv->data + len);
+ bytes_left_for_tlv -=
+ (len + sizeof(struct mwifiex_ie_types_header));
+
+ while (bytes_left_for_tlv >=
+ sizeof(struct mwifiex_ie_types_header) &&
+ le16_to_cpu(tlv->header.type) != TLV_TYPE_BSS_SCAN_RSP) {
+ type = le16_to_cpu(tlv->header.type);
+ len = le16_to_cpu(tlv->header.len);
+ if (bytes_left_for_tlv <
+ sizeof(struct mwifiex_ie_types_header) + len) {
+ dev_err(adapter->dev,
+ "EXT_SCAN: Error in processing TLV, bytes left < TLV length\n");
+ scan_rsp_tlv = NULL;
+ bytes_left_for_tlv = 0;
continue;
}
-
- band = BAND_G;
- if (chan_band_tlv) {
- chan_band =
- &chan_band_tlv->chan_band_param[idx];
- band = mwifiex_radio_type_to_band(
- chan_band->radio_type
- & (BIT(0) | BIT(1)));
- }
-
- cfp = mwifiex_get_cfp(priv, band, channel, 0);
-
- freq = cfp ? cfp->freq : 0;
-
- chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
-
- if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
- bss = cfg80211_inform_bss(priv->wdev->wiphy,
- chan, bssid, timestamp,
- cap_info_bitmap, beacon_period,
- ie_buf, ie_len, rssi, GFP_KERNEL);
- bss_priv = (struct mwifiex_bss_priv *)bss->priv;
- bss_priv->band = band;
- bss_priv->fw_tsf = le64_to_cpu(fw_tsf);
- if (priv->media_connected &&
- !memcmp(bssid,
- priv->curr_bss_params.bss_descriptor
- .mac_address, ETH_ALEN))
- mwifiex_update_curr_bss_params(priv,
- bss);
- cfg80211_put_bss(priv->wdev->wiphy, bss);
+ switch (type) {
+ case TLV_TYPE_BSS_SCAN_INFO:
+ scan_info_tlv =
+ (struct mwifiex_ie_types_bss_scan_info *)tlv;
+ if (len !=
+ sizeof(struct mwifiex_ie_types_bss_scan_info) -
+ sizeof(struct mwifiex_ie_types_header)) {
+ bytes_left_for_tlv = 0;
+ continue;
+ }
+ break;
+ default:
+ break;
}
- } else {
- dev_dbg(adapter->dev, "missing BSS channel IE\n");
+ tlv = (struct mwifiex_ie_types_data *)(tlv->data + len);
+ bytes_left -=
+ (len + sizeof(struct mwifiex_ie_types_header));
+ bytes_left_for_tlv -=
+ (len + sizeof(struct mwifiex_ie_types_header));
}
- }
-check_next_scan:
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- if (list_empty(&adapter->scan_pending_q)) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
- adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ if (!scan_rsp_tlv)
+ break;
- /* Need to indicate IOCTL complete */
- if (adapter->curr_cmd->wait_q_enabled) {
- adapter->cmd_wait_q.status = 0;
- if (!priv->scan_request) {
- dev_dbg(adapter->dev,
- "complete internal scan\n");
- mwifiex_complete_cmd(adapter,
- adapter->curr_cmd);
- }
- }
- if (priv->report_scan_result)
- priv->report_scan_result = false;
+ /* Advance pointer to the beacon buffer length and
+ * update the bytes count so that the function
+ * wlan_interpret_bss_desc_with_ie() can handle the
+ * scan buffer withut any change
+ */
+ bss_info += sizeof(u16);
+ bytes_left -= sizeof(u16);
- if (priv->scan_request) {
- dev_dbg(adapter->dev, "info: notifying scan done\n");
- cfg80211_scan_done(priv->scan_request, 0);
- priv->scan_request = NULL;
- } else {
- priv->scan_aborting = false;
- dev_dbg(adapter->dev, "info: scan already aborted\n");
- }
- } else {
- if ((priv->scan_aborting && !priv->scan_request) ||
- priv->scan_block) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
- mod_timer(&priv->scan_delay_timer, jiffies);
- dev_dbg(priv->adapter->dev,
- "info: %s: triggerring scan abort\n", __func__);
- } else if (!mwifiex_wmm_lists_empty(adapter) &&
- (priv->scan_request && (priv->scan_request->flags &
- NL80211_SCAN_FLAG_LOW_PRIORITY))) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- adapter->scan_delay_cnt = 1;
- mod_timer(&priv->scan_delay_timer, jiffies +
- msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
- dev_dbg(priv->adapter->dev,
- "info: %s: deferring scan\n", __func__);
+ if (scan_info_tlv) {
+ rssi = (s32)(s16)(le16_to_cpu(scan_info_tlv->rssi));
+ rssi *= 100; /* Convert dBm to mBm */
+ dev_dbg(adapter->dev,
+ "info: InterpretIE: RSSI=%d\n", rssi);
+ fw_tsf = le64_to_cpu(scan_info_tlv->tsf);
+ radio_type = &scan_info_tlv->radio_type;
} else {
- /* Get scan command from scan_pending_q and put to
- cmd_pending_q */
- cmd_node = list_first_entry(&adapter->scan_pending_q,
- struct cmd_ctrl_node, list);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
- true);
+ radio_type = NULL;
}
+ ret = mwifiex_parse_single_response_buf(priv, &bss_info,
+ &bytes_left, fw_tsf,
+ radio_type, true, rssi);
+ if (ret)
+ goto check_next_scan;
}
+check_next_scan:
+ if (!event_scan->more_event)
+ mwifiex_check_next_scan_command(priv);
+
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index b44a31523461..d206f04d4994 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -84,6 +84,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->mp_agg_pkt_limit = data->mp_agg_pkt_limit;
card->supports_sdio_new_mode = data->supports_sdio_new_mode;
card->has_control_mask = data->has_control_mask;
+ card->tx_buf_size = data->tx_buf_size;
}
sdio_claim_host(func);
@@ -165,7 +166,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
struct mwifiex_private *priv;
- int i;
pr_debug("info: SDIO func num=%d\n", func->num);
@@ -184,11 +184,7 @@ mwifiex_sdio_remove(struct sdio_func *func)
if (adapter->is_suspended)
mwifiex_sdio_resume(adapter->dev);
- for (i = 0; i < adapter->priv_num; i++)
- if ((GET_BSS_ROLE(adapter->priv[i]) ==
- MWIFIEX_BSS_ROLE_STA) &&
- adapter->priv[i]->media_connected)
- mwifiex_deauthenticate(adapter->priv[i], NULL);
+ mwifiex_deauthenticate_all(adapter);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
mwifiex_disable_auto_ds(priv);
@@ -241,6 +237,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
/* Enable the Host Sleep */
if (!mwifiex_enable_hs(adapter)) {
dev_err(adapter->dev, "cmd: failed to suspend\n");
+ adapter->hs_enabling = false;
return -EFAULT;
}
@@ -249,6 +246,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
/* Indicate device suspended */
adapter->is_suspended = true;
+ adapter->hs_enabling = false;
return ret;
}
@@ -1760,6 +1758,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
/* save adapter pointer in card */
card->adapter = adapter;
+ adapter->tx_buf_size = card->tx_buf_size;
sdio_claim_host(func);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 532ae0ac4dfb..c71201b2e2a3 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -233,6 +233,7 @@ struct sdio_mmc_card {
u8 mp_agg_pkt_limit;
bool supports_sdio_new_mode;
bool has_control_mask;
+ u16 tx_buf_size;
u32 mp_rd_bitmap;
u32 mp_wr_bitmap;
@@ -256,6 +257,7 @@ struct mwifiex_sdio_device {
u8 mp_agg_pkt_limit;
bool supports_sdio_new_mode;
bool has_control_mask;
+ u16 tx_buf_size;
};
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -312,6 +314,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.mp_agg_pkt_limit = 8,
.supports_sdio_new_mode = false,
.has_control_mask = true,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -321,6 +324,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.mp_agg_pkt_limit = 8,
.supports_sdio_new_mode = false,
.has_control_mask = true,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -330,6 +334,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.mp_agg_pkt_limit = 8,
.supports_sdio_new_mode = false,
.has_control_mask = true,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -339,6 +344,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.mp_agg_pkt_limit = 16,
.supports_sdio_new_mode = true,
.has_control_mask = false,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
};
/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 9208a8816b80..e3cac1495cc7 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -185,6 +185,13 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
i++)
rate_scope->ht_mcs_rate_bitmap[i] =
cpu_to_le16(pbitmap_rates[2 + i]);
+ if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
+ for (i = 0;
+ i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap);
+ i++)
+ rate_scope->vht_mcs_rate_bitmap[i] =
+ cpu_to_le16(pbitmap_rates[10 + i]);
+ }
} else {
rate_scope->hr_dsss_rate_bitmap =
cpu_to_le16(priv->bitmap_rates[0]);
@@ -195,6 +202,13 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
i++)
rate_scope->ht_mcs_rate_bitmap[i] =
cpu_to_le16(priv->bitmap_rates[2 + i]);
+ if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
+ for (i = 0;
+ i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap);
+ i++)
+ rate_scope->vht_mcs_rate_bitmap[i] =
+ cpu_to_le16(priv->bitmap_rates[10 + i]);
+ }
}
rate_drop = (struct mwifiex_rate_drop_pattern *) ((u8 *) rate_scope +
@@ -532,8 +546,228 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
return 0;
}
+/* This function populates key material v2 command
+ * to set network key for AES & CMAC AES.
+ */
+static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ struct mwifiex_ds_encrypt_key *enc_key,
+ struct host_cmd_ds_802_11_key_material_v2 *km)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u16 size, len = KEY_PARAMS_FIXED_LEN;
+
+ if (enc_key->is_igtk_key) {
+ dev_dbg(adapter->dev, "%s: Set CMAC AES Key\n", __func__);
+ if (enc_key->is_rx_seq_valid)
+ memcpy(km->key_param_set.key_params.cmac_aes.ipn,
+ enc_key->pn, enc_key->pn_len);
+ km->key_param_set.key_info &= cpu_to_le16(~KEY_MCAST);
+ km->key_param_set.key_info |= cpu_to_le16(KEY_IGTK);
+ km->key_param_set.key_type = KEY_TYPE_ID_AES_CMAC;
+ km->key_param_set.key_params.cmac_aes.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.cmac_aes.key,
+ enc_key->key_material, enc_key->key_len);
+ len += sizeof(struct mwifiex_cmac_aes_param);
+ } else {
+ dev_dbg(adapter->dev, "%s: Set AES Key\n", __func__);
+ if (enc_key->is_rx_seq_valid)
+ memcpy(km->key_param_set.key_params.aes.pn,
+ enc_key->pn, enc_key->pn_len);
+ km->key_param_set.key_type = KEY_TYPE_ID_AES;
+ km->key_param_set.key_params.aes.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.aes.key,
+ enc_key->key_material, enc_key->key_len);
+ len += sizeof(struct mwifiex_aes_param);
+ }
+
+ km->key_param_set.len = cpu_to_le16(len);
+ size = len + sizeof(struct mwifiex_ie_types_header) +
+ sizeof(km->action) + S_DS_GEN;
+ cmd->size = cpu_to_le16(size);
+
+ return 0;
+}
+
+/* This function prepares command to set/get/reset network key(s).
+ * This function prepares key material command for V2 format.
+ * Preparation includes -
+ * - Setting command ID, action and proper size
+ * - Setting WEP keys, WAPI keys or WPA keys along with required
+ * encryption (TKIP, AES) (as required)
+ * - Ensuring correct endian-ness
+ */
+static int
+mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 *mac = enc_key->mac_addr;
+ u16 key_info, len = KEY_PARAMS_FIXED_LEN;
+ struct host_cmd_ds_802_11_key_material_v2 *km =
+ &cmd->params.key_material_v2;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
+ km->action = cpu_to_le16(cmd_action);
+
+ if (cmd_action == HostCmd_ACT_GEN_GET) {
+ dev_dbg(adapter->dev, "%s: Get key\n", __func__);
+ km->key_param_set.key_idx =
+ enc_key->key_index & KEY_INDEX_MASK;
+ km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+ km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
+ memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+
+ if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
+ key_info = KEY_UNICAST;
+ else
+ key_info = KEY_MCAST;
+
+ if (enc_key->is_igtk_key)
+ key_info |= KEY_IGTK;
+
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ S_DS_GEN + KEY_PARAMS_FIXED_LEN +
+ sizeof(km->action));
+ return 0;
+ }
+
+ memset(&km->key_param_set, 0,
+ sizeof(struct mwifiex_ie_type_key_param_set_v2));
+
+ if (enc_key->key_disable) {
+ dev_dbg(adapter->dev, "%s: Remove key\n", __func__);
+ km->action = cpu_to_le16(HostCmd_ACT_GEN_REMOVE);
+ km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+ km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
+ km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
+ key_info = KEY_MCAST | KEY_UNICAST;
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+ memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ S_DS_GEN + KEY_PARAMS_FIXED_LEN +
+ sizeof(km->action));
+ return 0;
+ }
+
+ km->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
+ km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
+ km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+ key_info = KEY_ENABLED;
+ memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+
+ if (enc_key->key_len <= WLAN_KEY_LEN_WEP104) {
+ dev_dbg(adapter->dev, "%s: Set WEP Key\n", __func__);
+ len += sizeof(struct mwifiex_wep_param);
+ km->key_param_set.len = cpu_to_le16(len);
+ km->key_param_set.key_type = KEY_TYPE_ID_WEP;
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ key_info |= KEY_MCAST | KEY_UNICAST;
+ } else {
+ if (enc_key->is_current_wep_key) {
+ key_info |= KEY_MCAST | KEY_UNICAST;
+ if (km->key_param_set.key_idx ==
+ (priv->wep_key_curr_index & KEY_INDEX_MASK))
+ key_info |= KEY_DEFAULT;
+ } else {
+ if (mac) {
+ if (is_broadcast_ether_addr(mac))
+ key_info |= KEY_MCAST;
+ else
+ key_info |= KEY_UNICAST |
+ KEY_DEFAULT;
+ } else {
+ key_info |= KEY_MCAST;
+ }
+ }
+ }
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ km->key_param_set.key_params.wep.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.wep.key,
+ enc_key->key_material, enc_key->key_len);
+
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ len + sizeof(km->action) + S_DS_GEN);
+ return 0;
+ }
+
+ if (is_broadcast_ether_addr(mac))
+ key_info |= KEY_MCAST | KEY_RX_KEY;
+ else
+ key_info |= KEY_UNICAST | KEY_TX_KEY | KEY_RX_KEY;
+
+ if (enc_key->is_wapi_key) {
+ dev_dbg(adapter->dev, "%s: Set WAPI Key\n", __func__);
+ km->key_param_set.key_type = KEY_TYPE_ID_WAPI;
+ memcpy(km->key_param_set.key_params.wapi.pn, enc_key->pn,
+ PN_LEN);
+ km->key_param_set.key_params.wapi.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.wapi.key,
+ enc_key->key_material, enc_key->key_len);
+ if (is_broadcast_ether_addr(mac))
+ priv->sec_info.wapi_key_on = true;
+
+ if (!priv->sec_info.wapi_key_on)
+ key_info |= KEY_DEFAULT;
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ len += sizeof(struct mwifiex_wapi_param);
+ km->key_param_set.len = cpu_to_le16(len);
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ len + sizeof(km->action) + S_DS_GEN);
+ return 0;
+ }
+
+ if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+ key_info |= KEY_DEFAULT;
+ /* Enable unicast bit for WPA-NONE/ADHOC_AES */
+ if (!priv->sec_info.wpa2_enabled &&
+ !is_broadcast_ether_addr(mac))
+ key_info |= KEY_UNICAST;
+ } else {
+ /* Enable default key for WPA/WPA2 */
+ if (!priv->wpa_is_gtk_set)
+ key_info |= KEY_DEFAULT;
+ }
+
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ if (enc_key->key_len == WLAN_KEY_LEN_CCMP)
+ return mwifiex_set_aes_key_v2(priv, cmd, enc_key, km);
+
+ if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
+ dev_dbg(adapter->dev, "%s: Set TKIP Key\n", __func__);
+ if (enc_key->is_rx_seq_valid)
+ memcpy(km->key_param_set.key_params.tkip.pn,
+ enc_key->pn, enc_key->pn_len);
+ km->key_param_set.key_type = KEY_TYPE_ID_TKIP;
+ km->key_param_set.key_params.tkip.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.tkip.key,
+ enc_key->key_material, enc_key->key_len);
+
+ len += sizeof(struct mwifiex_tkip_param);
+ km->key_param_set.len = cpu_to_le16(len);
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ len + sizeof(km->action) + S_DS_GEN);
+ }
+
+ return 0;
+}
+
/*
* This function prepares command to set/get/reset network key(s).
+ * This function prepares key material command for V1 format.
*
* Preparation includes -
* - Setting command ID, action and proper size
@@ -542,10 +776,10 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
* - Ensuring correct endian-ness
*/
static int
-mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd,
- u16 cmd_action, u32 cmd_oid,
- struct mwifiex_ds_encrypt_key *enc_key)
+mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
{
struct host_cmd_ds_802_11_key_material *key_material =
&cmd->params.key_material;
@@ -724,6 +958,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
return ret;
}
+/* Wrapper function for setting network key depending upon FW KEY API version */
+static int
+mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
+{
+ if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+ return mwifiex_cmd_802_11_key_material_v2(priv, cmd,
+ cmd_action, cmd_oid,
+ enc_key);
+
+ else
+ return mwifiex_cmd_802_11_key_material_v1(priv, cmd,
+ cmd_action, cmd_oid,
+ enc_key);
+}
+
/*
* This function prepares command to set/get 11d domain information.
*
@@ -1173,9 +1425,9 @@ int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
/* property header is 6 bytes, data must fit in cmd buffer */
if (prop && prop->value && prop->length > 6 &&
prop->length <= MWIFIEX_SIZE_OF_CMD_BUFFER - S_DS_GEN) {
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
- HostCmd_ACT_GEN_SET, 0,
- prop);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
+ HostCmd_ACT_GEN_SET, 0,
+ prop, true);
if (ret)
return ret;
}
@@ -1280,6 +1532,127 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
return 0;
}
+static int
+mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf)
+{
+ struct host_cmd_ds_tdls_oper *tdls_oper = &cmd->params.tdls_oper;
+ struct mwifiex_ds_tdls_oper *oper = data_buf;
+ struct mwifiex_sta_node *sta_ptr;
+ struct host_cmd_tlv_rates *tlv_rates;
+ struct mwifiex_ie_types_htcap *ht_capab;
+ struct mwifiex_ie_types_qos_info *wmm_qos_info;
+ struct mwifiex_ie_types_extcap *extcap;
+ struct mwifiex_ie_types_vhtcap *vht_capab;
+ struct mwifiex_ie_types_aid *aid;
+ u8 *pos, qos_info;
+ u16 config_len = 0;
+ struct station_parameters *params = priv->sta_params;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_OPER);
+ cmd->size = cpu_to_le16(S_DS_GEN);
+ le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_tdls_oper));
+
+ tdls_oper->reason = 0;
+ memcpy(tdls_oper->peer_mac, oper->peer_mac, ETH_ALEN);
+ sta_ptr = mwifiex_get_sta_entry(priv, oper->peer_mac);
+
+ pos = (u8 *)tdls_oper + sizeof(struct host_cmd_ds_tdls_oper);
+
+ switch (oper->tdls_action) {
+ case MWIFIEX_TDLS_DISABLE_LINK:
+ tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_DELETE);
+ break;
+ case MWIFIEX_TDLS_CREATE_LINK:
+ tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CREATE);
+ break;
+ case MWIFIEX_TDLS_CONFIG_LINK:
+ tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CONFIG);
+
+ if (!params) {
+ dev_err(priv->adapter->dev,
+ "TDLS config params not available for %pM\n",
+ oper->peer_mac);
+ return -ENODATA;
+ }
+
+ *(__le16 *)pos = cpu_to_le16(params->capability);
+ config_len += sizeof(params->capability);
+
+ qos_info = params->uapsd_queues | (params->max_sp << 5);
+ wmm_qos_info = (struct mwifiex_ie_types_qos_info *)(pos +
+ config_len);
+ wmm_qos_info->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA);
+ wmm_qos_info->header.len = cpu_to_le16(sizeof(qos_info));
+ wmm_qos_info->qos_info = qos_info;
+ config_len += sizeof(struct mwifiex_ie_types_qos_info);
+
+ if (params->ht_capa) {
+ ht_capab = (struct mwifiex_ie_types_htcap *)(pos +
+ config_len);
+ ht_capab->header.type =
+ cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+ ht_capab->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+ memcpy(&ht_capab->ht_cap, params->ht_capa,
+ sizeof(struct ieee80211_ht_cap));
+ config_len += sizeof(struct mwifiex_ie_types_htcap);
+ }
+
+ if (params->supported_rates && params->supported_rates_len) {
+ tlv_rates = (struct host_cmd_tlv_rates *)(pos +
+ config_len);
+ tlv_rates->header.type =
+ cpu_to_le16(WLAN_EID_SUPP_RATES);
+ tlv_rates->header.len =
+ cpu_to_le16(params->supported_rates_len);
+ memcpy(tlv_rates->rates, params->supported_rates,
+ params->supported_rates_len);
+ config_len += sizeof(struct host_cmd_tlv_rates) +
+ params->supported_rates_len;
+ }
+
+ if (params->ext_capab && params->ext_capab_len) {
+ extcap = (struct mwifiex_ie_types_extcap *)(pos +
+ config_len);
+ extcap->header.type =
+ cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
+ extcap->header.len = cpu_to_le16(params->ext_capab_len);
+ memcpy(extcap->ext_capab, params->ext_capab,
+ params->ext_capab_len);
+ config_len += sizeof(struct mwifiex_ie_types_extcap) +
+ params->ext_capab_len;
+ }
+ if (params->vht_capa) {
+ vht_capab = (struct mwifiex_ie_types_vhtcap *)(pos +
+ config_len);
+ vht_capab->header.type =
+ cpu_to_le16(WLAN_EID_VHT_CAPABILITY);
+ vht_capab->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_vht_cap));
+ memcpy(&vht_capab->vht_cap, params->vht_capa,
+ sizeof(struct ieee80211_vht_cap));
+ config_len += sizeof(struct mwifiex_ie_types_vhtcap);
+ }
+ if (params->aid) {
+ aid = (struct mwifiex_ie_types_aid *)(pos + config_len);
+ aid->header.type = cpu_to_le16(WLAN_EID_AID);
+ aid->header.len = cpu_to_le16(sizeof(params->aid));
+ aid->aid = cpu_to_le16(params->aid);
+ config_len += sizeof(struct mwifiex_ie_types_aid);
+ }
+
+ break;
+ default:
+ dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
+ return -ENOTSUPP;
+ }
+
+ le16_add_cpu(&cmd->size, config_len);
+
+ return 0;
+}
/*
* This function prepares the commands before sending them to the firmware.
*
@@ -1472,6 +1845,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_ibss_coalescing_status(cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_802_11_SCAN_EXT:
+ ret = mwifiex_cmd_802_11_scan_ext(priv, cmd_ptr, data_buf);
+ break;
case HostCmd_CMD_MAC_REG_ACCESS:
case HostCmd_CMD_BBP_REG_ACCESS:
case HostCmd_CMD_RF_REG_ACCESS:
@@ -1507,6 +1883,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_coalesce_cfg(priv, cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_TDLS_OPER:
+ ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
+ break;
default:
dev_err(priv->adapter->dev,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1547,15 +1926,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (first_sta) {
if (priv->adapter->iface_type == MWIFIEX_PCIE) {
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_PCIE_DESC_DETAILS,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd(priv,
+ HostCmd_CMD_PCIE_DESC_DETAILS,
+ HostCmd_ACT_GEN_SET, 0, NULL,
+ true);
if (ret)
return -1;
}
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_FUNC_INIT,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_FUNC_INIT,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
if (ret)
return -1;
@@ -1573,55 +1953,57 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
}
if (adapter->cal_data) {
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
+ HostCmd_ACT_GEN_SET, 0, NULL,
+ true);
if (ret)
return -1;
}
/* Read MAC address from HW */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_GET_HW_SPEC,
+ HostCmd_ACT_GEN_GET, 0, NULL, true);
if (ret)
return -1;
/* Reconfigure tx buf size */
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_RECONFIGURE_TX_BUFF,
- HostCmd_ACT_GEN_SET, 0,
- &priv->adapter->tx_buf_size);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->adapter->tx_buf_size, true);
if (ret)
return -1;
if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
/* Enable IEEE PS by default */
priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
- ret = mwifiex_send_cmd_sync(
- priv, HostCmd_CMD_802_11_PS_MODE_ENH,
- EN_AUTO_PS, BITMAP_STA_PS, NULL);
+ ret = mwifiex_send_cmd(priv,
+ HostCmd_CMD_802_11_PS_MODE_ENH,
+ EN_AUTO_PS, BITMAP_STA_PS, NULL,
+ true);
if (ret)
return -1;
}
}
/* get tx rate */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_TX_RATE_CFG,
+ HostCmd_ACT_GEN_GET, 0, NULL, true);
if (ret)
return -1;
priv->data_rate = 0;
/* get tx power */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_TX_PWR,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_RF_TX_PWR,
+ HostCmd_ACT_GEN_GET, 0, NULL, true);
if (ret)
return -1;
if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
/* set ibss coalescing_status */
- ret = mwifiex_send_cmd_sync(
- priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
- HostCmd_ACT_GEN_SET, 0, &enable);
+ ret = mwifiex_send_cmd(
+ priv,
+ HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
+ HostCmd_ACT_GEN_SET, 0, &enable, true);
if (ret)
return -1;
}
@@ -1629,16 +2011,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
amsdu_aggr_ctrl.enable = true;
/* Send request to firmware */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
- HostCmd_ACT_GEN_SET, 0,
- &amsdu_aggr_ctrl);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
+ HostCmd_ACT_GEN_SET, 0,
+ &amsdu_aggr_ctrl, true);
if (ret)
return -1;
/* MAC Control must be the last command in init_fw */
/* set MAC Control */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &priv->curr_pkt_filter);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter, true);
if (ret)
return -1;
@@ -1647,10 +2029,9 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
/* Enable auto deep sleep */
auto_ds.auto_ds = DEEP_SLEEP_ON;
auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_PS_MODE_ENH,
- EN_AUTO_PS, BITMAP_AUTO_DS,
- &auto_ds);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+ EN_AUTO_PS, BITMAP_AUTO_DS,
+ &auto_ds, true);
if (ret)
return -1;
}
@@ -1658,9 +2039,9 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
/* Send cmd to FW to enable/disable 11D function */
state_11d = ENABLE_11D;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, DOT11D_I,
- &state_11d);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, DOT11D_I,
+ &state_11d, true);
if (ret)
dev_err(priv->adapter->dev,
"11D: failed to enable 11D\n");
@@ -1673,8 +2054,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
* (Short GI, Channel BW, Green field support etc.) for transmit
*/
tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_11N_CFG,
- HostCmd_ACT_GEN_SET, 0, &tx_cfg);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_CFG,
+ HostCmd_ACT_GEN_SET, 0, &tx_cfg, true);
ret = -EINPROGRESS;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 24523e4015cb..bfebb0144df5 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -69,6 +69,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
break;
case HostCmd_CMD_802_11_SCAN:
+ case HostCmd_CMD_802_11_SCAN_EXT:
/* Cancel all pending scan command */
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
list_for_each_entry_safe(cmd_node, tmp_node,
@@ -157,8 +158,8 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
priv->subsc_evt_rssi_state = EVENT_HANDLED;
- mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
- 0, 0, subsc_evt);
+ mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
+ 0, 0, subsc_evt, false);
return 0;
}
@@ -303,6 +304,15 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
priv->bitmap_rates[2 + i] =
le16_to_cpu(rate_scope->
ht_mcs_rate_bitmap[i]);
+
+ if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
+ for (i = 0; i < ARRAY_SIZE(rate_scope->
+ vht_mcs_rate_bitmap);
+ i++)
+ priv->bitmap_rates[10 + i] =
+ le16_to_cpu(rate_scope->
+ vht_mcs_rate_bitmap[i]);
+ }
break;
/* Add RATE_DROP tlv here */
}
@@ -316,9 +326,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
if (priv->is_data_rate_auto)
priv->data_rate = 0;
else
- return mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_TX_RATE_QUERY,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
+ HostCmd_ACT_GEN_GET, 0, NULL, false);
return 0;
}
@@ -561,13 +570,13 @@ static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
}
/*
- * This function handles the command response of set/get key material.
+ * This function handles the command response of set/get v1 key material.
*
* Handling includes updating the driver parameters to reflect the
* changes.
*/
-static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
- struct host_cmd_ds_command *resp)
+static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_802_11_key_material *key =
&resp->params.key_material;
@@ -590,6 +599,51 @@ static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
}
/*
+ * This function handles the command response of set/get v2 key material.
+ *
+ * Handling includes updating the driver parameters to reflect the
+ * changes.
+ */
+static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_802_11_key_material_v2 *key_v2;
+ __le16 len;
+
+ key_v2 = &resp->params.key_material_v2;
+ if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
+ if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
+ dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+ priv->wpa_is_gtk_set = true;
+ priv->scan_block = false;
+ }
+ }
+
+ if (key_v2->key_param_set.key_type != KEY_TYPE_ID_AES)
+ return 0;
+
+ memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
+ WLAN_KEY_LEN_CCMP);
+ priv->aes_key_v2.key_param_set.key_params.aes.key_len =
+ key_v2->key_param_set.key_params.aes.key_len;
+ len = priv->aes_key_v2.key_param_set.key_params.aes.key_len;
+ memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
+ key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len));
+
+ return 0;
+}
+
+/* Wrapper function for processing response of key material command */
+static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+ return mwifiex_ret_802_11_key_material_v2(priv, resp);
+ else
+ return mwifiex_ret_802_11_key_material_v1(priv, resp);
+}
+
+/*
* This function handles the command response of get 11d domain information.
*/
static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
@@ -800,7 +854,60 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
return 0;
}
+static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_tdls_oper *cmd_tdls_oper = &resp->params.tdls_oper;
+ u16 reason = le16_to_cpu(cmd_tdls_oper->reason);
+ u16 action = le16_to_cpu(cmd_tdls_oper->tdls_action);
+ struct mwifiex_sta_node *node =
+ mwifiex_get_sta_entry(priv, cmd_tdls_oper->peer_mac);
+ switch (action) {
+ case ACT_TDLS_DELETE:
+ if (reason)
+ dev_err(priv->adapter->dev,
+ "TDLS link delete for %pM failed: reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
+ else
+ dev_dbg(priv->adapter->dev,
+ "TDLS link config for %pM successful\n",
+ cmd_tdls_oper->peer_mac);
+ break;
+ case ACT_TDLS_CREATE:
+ if (reason) {
+ dev_err(priv->adapter->dev,
+ "TDLS link creation for %pM failed: reason %d",
+ cmd_tdls_oper->peer_mac, reason);
+ if (node && reason != TDLS_ERR_LINK_EXISTS)
+ node->tdls_status = TDLS_SETUP_FAILURE;
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "TDLS link creation for %pM successful",
+ cmd_tdls_oper->peer_mac);
+ }
+ break;
+ case ACT_TDLS_CONFIG:
+ if (reason) {
+ dev_err(priv->adapter->dev,
+ "TDLS link config for %pM failed, reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
+ if (node)
+ node->tdls_status = TDLS_SETUP_FAILURE;
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "TDLS link config for %pM successful\n",
+ cmd_tdls_oper->peer_mac);
+ }
+ break;
+ default:
+ dev_err(priv->adapter->dev,
+ "Unknown TDLS command action respnse %d", action);
+ return -1;
+ }
+
+ return 0;
+}
/*
* This function handles the command response for subscribe event command.
*/
@@ -871,6 +978,10 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
ret = mwifiex_ret_802_11_scan(priv, resp);
adapter->curr_cmd->wait_q_enabled = false;
break;
+ case HostCmd_CMD_802_11_SCAN_EXT:
+ ret = mwifiex_ret_802_11_scan_ext(priv);
+ adapter->curr_cmd->wait_q_enabled = false;
+ break;
case HostCmd_CMD_802_11_BG_SCAN_QUERY:
ret = mwifiex_ret_802_11_scan(priv, resp);
dev_dbg(adapter->dev,
@@ -999,6 +1110,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_COALESCE_CFG:
break;
+ case HostCmd_CMD_TDLS_OPER:
+ ret = mwifiex_ret_tdls_oper(priv, resp);
+ break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 8c351f71f72f..368450cc56c7 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -54,6 +54,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
priv->scan_block = false;
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ mwifiex_disable_all_tdls_links(priv);
+
/* Free Tx and Rx packets, report disconnect to upper layer */
mwifiex_clean_txrx(priv);
@@ -112,7 +116,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
adapter->tx_lock_flag = false;
adapter->pps_uapsd_mode = false;
- if (adapter->num_cmd_timeout && adapter->curr_cmd)
+ if (adapter->is_cmd_timedout && adapter->curr_cmd)
return;
priv->media_connected = false;
dev_dbg(adapter->dev,
@@ -289,9 +293,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_HS_ACT_REQ:
dev_dbg(adapter->dev, "event: HS_ACT_REQ\n");
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_HS_CFG_ENH,
- 0, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_HS_CFG_ENH,
+ 0, 0, NULL, false);
break;
case EVENT_MIC_ERR_UNICAST:
@@ -322,27 +325,34 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_BG_SCAN_REPORT:
dev_dbg(adapter->dev, "event: BGS_REPORT\n");
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_BG_SCAN_QUERY,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_QUERY,
+ HostCmd_ACT_GEN_GET, 0, NULL, false);
break;
case EVENT_PORT_RELEASE:
dev_dbg(adapter->dev, "event: PORT RELEASE\n");
break;
+ case EVENT_EXT_SCAN_REPORT:
+ dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+ if (adapter->ext_scan)
+ ret = mwifiex_handle_event_ext_scan_report(priv,
+ adapter->event_skb->data);
+
+ break;
+
case EVENT_WMM_STATUS_CHANGE:
dev_dbg(adapter->dev, "event: WMM status changed\n");
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_WMM_GET_STATUS,
- 0, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_WMM_GET_STATUS,
+ 0, 0, NULL, false);
break;
case EVENT_RSSI_LOW:
cfg80211_cqm_rssi_notify(priv->netdev,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
GFP_KERNEL);
- mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
+ HostCmd_ACT_GEN_GET, 0, NULL, false);
priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
break;
@@ -356,8 +366,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
cfg80211_cqm_rssi_notify(priv->netdev,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
GFP_KERNEL);
- mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
+ HostCmd_ACT_GEN_GET, 0, NULL, false);
priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
break;
@@ -384,15 +394,15 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_IBSS_COALESCED:
dev_dbg(adapter->dev, "event: IBSS_COALESCED\n");
- ret = mwifiex_send_cmd_async(priv,
+ ret = mwifiex_send_cmd(priv,
HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ HostCmd_ACT_GEN_GET, 0, NULL, false);
break;
case EVENT_ADDBA:
dev_dbg(adapter->dev, "event: ADDBA Request\n");
- mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
- HostCmd_ACT_GEN_SET, 0,
- adapter->event_body);
+ mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
+ HostCmd_ACT_GEN_SET, 0,
+ adapter->event_body, false);
break;
case EVENT_DELBA:
dev_dbg(adapter->dev, "event: DELBA Request\n");
@@ -443,10 +453,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
priv->csa_expire_time =
jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME);
priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel;
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_DEAUTHENTICATE,
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
HostCmd_ACT_GEN_SET, 0,
- priv->curr_bss_params.bss_descriptor.mac_address);
+ priv->curr_bss_params.bss_descriptor.mac_address,
+ false);
break;
default:
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index c5cb2ed19ec2..894270611f2c 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -64,6 +64,7 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
*(cmd_queued->condition));
if (status) {
dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
+ mwifiex_cancel_all_pending_cmd(adapter);
return status;
}
@@ -108,19 +109,19 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
"info: Set multicast list=%d\n",
mcast_list->num_multicast_addr);
/* Send multicast addresses to firmware */
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_MAC_MULTICAST_ADR,
- HostCmd_ACT_GEN_SET, 0,
- mcast_list);
+ ret = mwifiex_send_cmd(priv,
+ HostCmd_CMD_MAC_MULTICAST_ADR,
+ HostCmd_ACT_GEN_SET, 0,
+ mcast_list, false);
}
}
dev_dbg(priv->adapter->dev,
"info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
old_pkt_filter, priv->curr_pkt_filter);
if (old_pkt_filter != priv->curr_pkt_filter) {
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET,
- 0, &priv->curr_pkt_filter);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET,
+ 0, &priv->curr_pkt_filter, false);
}
return ret;
@@ -237,8 +238,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
rcu_read_unlock();
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
- HostCmd_ACT_GEN_SET, 0, NULL)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
+ HostCmd_ACT_GEN_SET, 0, NULL, false)) {
wiphy_err(priv->adapter->wiphy,
"11D: setting domain info in FW\n");
return -1;
@@ -290,7 +291,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
if (mwifiex_band_to_radio_type(bss_desc->bss_band) ==
HostCmd_SCAN_RADIO_TYPE_BG)
- config_bands = BAND_B | BAND_G | BAND_GN | BAND_GAC;
+ config_bands = BAND_B | BAND_G | BAND_GN;
else
config_bands = BAND_A | BAND_AN | BAND_AAC;
@@ -429,16 +430,13 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
status = -1;
break;
}
- if (cmd_type == MWIFIEX_SYNC_CMD)
- status = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_HS_CFG_ENH,
- HostCmd_ACT_GEN_SET, 0,
- &adapter->hs_cfg);
- else
- status = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_HS_CFG_ENH,
- HostCmd_ACT_GEN_SET, 0,
- &adapter->hs_cfg);
+
+ status = mwifiex_send_cmd(priv,
+ HostCmd_CMD_802_11_HS_CFG_ENH,
+ HostCmd_ACT_GEN_SET, 0,
+ &adapter->hs_cfg,
+ cmd_type == MWIFIEX_SYNC_CMD);
+
if (hs_cfg->conditions == HS_CFG_CANCEL)
/* Restore previous condition */
adapter->hs_cfg.conditions =
@@ -511,6 +509,9 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
hscfg.is_invoke_hostcmd = true;
+ adapter->hs_enabling = true;
+ mwifiex_cancel_all_pending_cmd(adapter);
+
if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_STA),
HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD,
@@ -519,8 +520,9 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
return false;
}
- if (wait_event_interruptible(adapter->hs_activate_wait_q,
- adapter->hs_activate_wait_q_woken)) {
+ if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q,
+ adapter->hs_activate_wait_q_woken,
+ (10 * HZ)) <= 0) {
dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
return false;
}
@@ -586,8 +588,8 @@ int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
auto_ds.auto_ds = DEEP_SLEEP_OFF;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
- DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+ DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds, true);
}
EXPORT_SYMBOL_GPL(mwifiex_disable_auto_ds);
@@ -601,8 +603,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, u32 *rate)
{
int ret;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
+ HostCmd_ACT_GEN_GET, 0, NULL, true);
if (!ret) {
if (priv->is_data_rate_auto)
@@ -698,8 +700,8 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->power_max = (s8) dbm;
pg->ht_bandwidth = HT_BW_40;
}
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TXPWR_CFG,
- HostCmd_ACT_GEN_SET, 0, buf);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_TXPWR_CFG,
+ HostCmd_ACT_GEN_SET, 0, buf, true);
kfree(buf);
return ret;
@@ -722,12 +724,11 @@ int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode)
else
adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
sub_cmd = (*ps_mode) ? EN_AUTO_PS : DIS_AUTO_PS;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
- sub_cmd, BITMAP_STA_PS, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+ sub_cmd, BITMAP_STA_PS, NULL, true);
if ((!ret) && (sub_cmd == DIS_AUTO_PS))
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_PS_MODE_ENH,
- GET_PS, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+ GET_PS, 0, NULL, false);
return ret;
}
@@ -851,9 +852,9 @@ static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
struct mwifiex_ds_encrypt_key *encrypt_key)
{
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
- encrypt_key);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
+ encrypt_key, true);
}
/*
@@ -865,6 +866,7 @@ static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
struct mwifiex_ds_encrypt_key *encrypt_key)
{
+ struct mwifiex_adapter *adapter = priv->adapter;
int ret;
struct mwifiex_wep_key *wep_key;
int index;
@@ -879,10 +881,17 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
/* Copy the required key as the current key */
wep_key = &priv->wep_key[index];
if (!wep_key->key_length) {
- dev_err(priv->adapter->dev,
+ dev_err(adapter->dev,
"key not set, so cannot enable it\n");
return -1;
}
+
+ if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) {
+ memcpy(encrypt_key->key_material,
+ wep_key->key_material, wep_key->key_length);
+ encrypt_key->key_len = wep_key->key_length;
+ }
+
priv->wep_key_curr_index = (u16) index;
priv->sec_info.wep_enabled = 1;
} else {
@@ -897,21 +906,32 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
priv->sec_info.wep_enabled = 1;
}
if (wep_key->key_length) {
+ void *enc_key;
+
+ if (encrypt_key->key_disable)
+ memset(&priv->wep_key[index], 0,
+ sizeof(struct mwifiex_wep_key));
+
+ if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+ enc_key = encrypt_key;
+ else
+ enc_key = NULL;
+
/* Send request to firmware */
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET, 0, enc_key, false);
if (ret)
return ret;
}
+
if (priv->sec_info.wep_enabled)
priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
else
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &priv->curr_pkt_filter);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter, true);
return ret;
}
@@ -946,10 +966,9 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
*/
/* Send the key as PTK to firmware */
encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET,
- KEY_INFO_ENABLED, encrypt_key);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET,
+ KEY_INFO_ENABLED, encrypt_key, false);
if (ret)
return ret;
@@ -973,15 +992,13 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
if (remove_key)
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET,
- !KEY_INFO_ENABLED, encrypt_key);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET,
+ !KEY_INFO_ENABLED, encrypt_key, true);
else
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET,
- KEY_INFO_ENABLED, encrypt_key);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET,
+ KEY_INFO_ENABLED, encrypt_key, true);
return ret;
}
@@ -1044,19 +1061,27 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
encrypt_key.key_len = key_len;
+ encrypt_key.key_index = key_index;
if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
encrypt_key.is_igtk_key = true;
if (!disable) {
- encrypt_key.key_index = key_index;
if (key_len)
memcpy(encrypt_key.key_material, key, key_len);
+ else
+ encrypt_key.is_current_wep_key = true;
+
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
- if (kp && kp->seq && kp->seq_len)
+ if (kp && kp->seq && kp->seq_len) {
memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
+ encrypt_key.pn_len = kp->seq_len;
+ encrypt_key.is_rx_seq_valid = true;
+ }
} else {
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
+ return 0;
encrypt_key.key_disable = true;
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
@@ -1077,8 +1102,8 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv)
struct mwifiex_ver_ext ver_ext;
memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_VERSION_EXT,
- HostCmd_ACT_GEN_GET, 0, &ver_ext))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
+ HostCmd_ACT_GEN_GET, 0, &ver_ext, true))
return -1;
return 0;
@@ -1103,8 +1128,8 @@ mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
ieee80211_frequency_to_channel(chan->center_freq);
roc_cfg.duration = cpu_to_le32(duration);
}
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_REMAIN_ON_CHAN,
- action, 0, &roc_cfg)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_REMAIN_ON_CHAN,
+ action, 0, &roc_cfg, true)) {
dev_err(priv->adapter->dev, "failed to remain on channel\n");
return -1;
}
@@ -1136,8 +1161,8 @@ mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role)
break;
}
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
return mwifiex_sta_init_cmd(priv, false);
}
@@ -1152,8 +1177,8 @@ int
mwifiex_get_stats_info(struct mwifiex_private *priv,
struct mwifiex_ds_get_stats *log)
{
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_GET_LOG,
- HostCmd_ACT_GEN_GET, 0, log);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_GET_LOG,
+ HostCmd_ACT_GEN_GET, 0, log, true);
}
/*
@@ -1195,8 +1220,7 @@ static int mwifiex_reg_mem_ioctl_reg_rw(struct mwifiex_private *priv,
return -1;
}
- return mwifiex_send_cmd_sync(priv, cmd_no, action, 0, reg_rw);
-
+ return mwifiex_send_cmd(priv, cmd_no, action, 0, reg_rw, true);
}
/*
@@ -1261,8 +1285,8 @@ mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes,
rd_eeprom.byte_count = cpu_to_le16((u16) bytes);
/* Send request to firmware */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_EEPROM_ACCESS,
- HostCmd_ACT_GEN_GET, 0, &rd_eeprom);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_EEPROM_ACCESS,
+ HostCmd_ACT_GEN_GET, 0, &rd_eeprom, true);
if (!ret)
memcpy(value, rd_eeprom.value, MAX_EEPROM_DATA);
@@ -1391,7 +1415,7 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
* with requisite parameters and calls the IOCTL handler.
*/
int
-mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len)
+mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len)
{
struct mwifiex_ds_misc_gen_ie gen_ie;
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 4651d676df38..ed26387eccf5 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -88,11 +88,14 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
struct rxpd *local_rx_pd;
int hdr_chop;
struct ethhdr *eth;
+ u16 rx_pkt_off, rx_pkt_len;
+ u8 *offset;
local_rx_pd = (struct rxpd *) (skb->data);
- rx_pkt_hdr = (void *)local_rx_pd +
- le16_to_cpu(local_rx_pd->rx_pkt_offset);
+ rx_pkt_off = le16_to_cpu(local_rx_pd->rx_pkt_offset);
+ rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
+ rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
@@ -142,6 +145,12 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return 0;
}
+ if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ ntohs(rx_pkt_hdr->eth803_hdr.h_proto) == ETH_P_TDLS) {
+ offset = (u8 *)local_rx_pd + rx_pkt_off;
+ mwifiex_process_tdls_action_frame(priv, offset, rx_pkt_len);
+ }
+
priv->rxpd_rate = local_rx_pd->rx_rate;
priv->rxpd_htinfo = local_rx_pd->ht_info;
@@ -192,26 +201,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
return ret;
}
- if (rx_pkt_type == PKT_TYPE_AMSDU) {
- struct sk_buff_head list;
- struct sk_buff *rx_skb;
-
- __skb_queue_head_init(&list);
-
- skb_pull(skb, rx_pkt_offset);
- skb_trim(skb, rx_pkt_length);
-
- ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev->iftype, 0, false);
-
- while (!skb_queue_empty(&list)) {
- rx_skb = __skb_dequeue(&list);
- ret = mwifiex_recv_packet(priv, rx_skb);
- if (ret == -1)
- dev_err(adapter->dev, "Rx of A-MSDU failed");
- }
- return 0;
- } else if (rx_pkt_type == PKT_TYPE_MGMT) {
+ if (rx_pkt_type == PKT_TYPE_MGMT) {
ret = mwifiex_process_mgmt_packet(priv, skb);
if (ret)
dev_err(adapter->dev, "Rx of mgmt packet failed");
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 354d64c9606f..1236a5de7bca 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -95,6 +95,9 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
}
}
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
+ local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
+
/* Offset of actual data */
pkt_offset = sizeof(struct txpd) + pad;
if (pkt_type == PKT_TYPE_MGMT) {
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
new file mode 100644
index 000000000000..97662a1ba58c
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -0,0 +1,1044 @@
+/* Marvell Wireless LAN device driver: TDLS handling
+ *
+ * Copyright (C) 2014, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+#include "wmm.h"
+#include "11n.h"
+#include "11n_rxreorder.h"
+#include "11ac.h"
+
+#define TDLS_REQ_FIX_LEN 6
+#define TDLS_RESP_FIX_LEN 8
+#define TDLS_CONFIRM_FIX_LEN 6
+
+static void
+mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ struct list_head *tid_list;
+ struct sk_buff *skb, *tmp;
+ struct mwifiex_txinfo *tx_info;
+ unsigned long flags;
+ u32 tid;
+ u8 tid_down;
+
+ dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
+ if (!ether_addr_equal(mac, skb->data))
+ continue;
+
+ __skb_unlink(skb, &priv->tdls_txq);
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tid = skb->priority;
+ tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
+
+ if (status == TDLS_SETUP_COMPLETE) {
+ ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
+ ra_list->tdls_link = true;
+ tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+ } else {
+ tid_list = &priv->wmm.tid_tbl_ptr[tid_down].ra_list;
+ if (!list_empty(tid_list))
+ ra_list = list_first_entry(tid_list,
+ struct mwifiex_ra_list_tbl, list);
+ else
+ ra_list = NULL;
+ tx_info->flags &= ~MWIFIEX_BUF_FLAG_TDLS_PKT;
+ }
+
+ if (!ra_list) {
+ mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+ continue;
+ }
+
+ skb_queue_tail(&ra_list->skb_head, skb);
+
+ ra_list->ba_pkt_count++;
+ ra_list->total_pkt_count++;
+
+ if (atomic_read(&priv->wmm.highest_queued_prio) <
+ tos_to_tid_inv[tid_down])
+ atomic_set(&priv->wmm.highest_queued_prio,
+ tos_to_tid_inv[tid_down]);
+
+ atomic_inc(&priv->wmm.tx_pkts_queued);
+ }
+
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ return;
+}
+
+static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ struct list_head *ra_list_head;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ int i;
+
+ dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ for (i = 0; i < MAX_NUM_TID; i++) {
+ if (!list_empty(&priv->wmm.tid_tbl_ptr[i].ra_list)) {
+ ra_list_head = &priv->wmm.tid_tbl_ptr[i].ra_list;
+ list_for_each_entry(ra_list, ra_list_head, list) {
+ skb_queue_walk_safe(&ra_list->skb_head, skb,
+ tmp) {
+ if (!ether_addr_equal(mac, skb->data))
+ continue;
+ __skb_unlink(skb, &ra_list->skb_head);
+ atomic_dec(&priv->wmm.tx_pkts_queued);
+ ra_list->total_pkt_count--;
+ skb_queue_tail(&priv->tdls_txq, skb);
+ }
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ return;
+}
+
+/* This function appends rate TLV to scan config command. */
+static int
+mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ u8 rates[MWIFIEX_SUPPORTED_RATES], *pos;
+ u16 rates_size, supp_rates_size, ext_rates_size;
+
+ memset(rates, 0, sizeof(rates));
+ rates_size = mwifiex_get_supported_rates(priv, rates);
+
+ supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES);
+
+ if (skb_tailroom(skb) < rates_size + 4) {
+ dev_err(priv->adapter->dev,
+ "Insuffient space while adding rates\n");
+ return -ENOMEM;
+ }
+
+ pos = skb_put(skb, supp_rates_size + 2);
+ *pos++ = WLAN_EID_SUPP_RATES;
+ *pos++ = supp_rates_size;
+ memcpy(pos, rates, supp_rates_size);
+
+ if (rates_size > MWIFIEX_TDLS_SUPPORTED_RATES) {
+ ext_rates_size = rates_size - MWIFIEX_TDLS_SUPPORTED_RATES;
+ pos = skb_put(skb, ext_rates_size + 2);
+ *pos++ = WLAN_EID_EXT_SUPP_RATES;
+ *pos++ = ext_rates_size;
+ memcpy(pos, rates + MWIFIEX_TDLS_SUPPORTED_RATES,
+ ext_rates_size);
+ }
+
+ return 0;
+}
+
+static void mwifiex_tdls_add_aid(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ieee_types_assoc_rsp *assoc_rsp;
+ u8 *pos;
+
+ assoc_rsp = (struct ieee_types_assoc_rsp *)&priv->assoc_rsp_buf;
+ pos = (void *)skb_put(skb, 4);
+ *pos++ = WLAN_EID_AID;
+ *pos++ = 2;
+ *pos++ = le16_to_cpu(assoc_rsp->a_id);
+
+ return;
+}
+
+static int mwifiex_tdls_add_vht_capab(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_vht_cap vht_cap;
+ u8 *pos;
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
+ *pos++ = WLAN_EID_VHT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_vht_cap);
+
+ memset(&vht_cap, 0, sizeof(struct ieee80211_vht_cap));
+
+ mwifiex_fill_vht_cap_tlv(priv, &vht_cap, priv->curr_bss_params.band);
+ memcpy(pos, &vht_cap, sizeof(vht_cap));
+
+ return 0;
+}
+
+static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
+ u8 *mac, struct sk_buff *skb)
+{
+ struct mwifiex_bssdescriptor *bss_desc;
+ struct ieee80211_vht_operation *vht_oper;
+ struct ieee80211_vht_cap *vht_cap, *ap_vht_cap = NULL;
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 supp_chwd_set, peer_supp_chwd_set;
+ u8 *pos, ap_supp_chwd_set, chan_bw;
+ u16 mcs_map_user, mcs_map_resp, mcs_map_result;
+ u16 mcs_user, mcs_resp, nss;
+ u32 usr_vht_cap_info;
+
+ bss_desc = &priv->curr_bss_params.bss_descriptor;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, mac);
+ if (unlikely(!sta_ptr)) {
+ dev_warn(adapter->dev, "TDLS peer station not found in list\n");
+ return -1;
+ }
+
+ if (!mwifiex_is_bss_in_11ac_mode(priv)) {
+ if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
+ WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
+ dev_dbg(adapter->dev,
+ "TDLS peer doesn't support wider bandwitdh\n");
+ return 0;
+ }
+ } else {
+ ap_vht_cap = bss_desc->bcn_vht_cap;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_operation) + 2);
+ *pos++ = WLAN_EID_VHT_OPERATION;
+ *pos++ = sizeof(struct ieee80211_vht_operation);
+ vht_oper = (struct ieee80211_vht_operation *)pos;
+
+ if (bss_desc->bss_band & BAND_A)
+ usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
+ else
+ usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
+
+ /* find the minmum bandwith between AP/TDLS peers */
+ vht_cap = &sta_ptr->tdls_cap.vhtcap;
+ supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
+ peer_supp_chwd_set =
+ GET_VHTCAP_CHWDSET(le32_to_cpu(vht_cap->vht_cap_info));
+ supp_chwd_set = min_t(u8, supp_chwd_set, peer_supp_chwd_set);
+
+ /* We need check AP's bandwidth when TDLS_WIDER_BANDWIDTH is off */
+
+ if (ap_vht_cap && sta_ptr->tdls_cap.extcap.ext_capab[7] &
+ WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
+ ap_supp_chwd_set =
+ GET_VHTCAP_CHWDSET(le32_to_cpu(ap_vht_cap->vht_cap_info));
+ supp_chwd_set = min_t(u8, supp_chwd_set, ap_supp_chwd_set);
+ }
+
+ switch (supp_chwd_set) {
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+ break;
+ default:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+
+ mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
+ mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
+ mcs_map_result = 0;
+
+ for (nss = 1; nss <= 8; nss++) {
+ mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
+ mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
+
+ if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+ (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED);
+ else
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ min_t(u16, mcs_user, mcs_resp));
+ }
+
+ vht_oper->basic_mcs_set = cpu_to_le16(mcs_map_result);
+
+ switch (vht_oper->chan_width) {
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_160MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ default:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+ vht_oper->center_freq_seg1_idx =
+ mwifiex_get_center_freq_index(priv, BAND_AAC,
+ bss_desc->channel,
+ chan_bw);
+
+ return 0;
+}
+
+static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ieee_types_extcap *extcap;
+
+ extcap = (void *)skb_put(skb, sizeof(struct ieee_types_extcap));
+ extcap->ieee_hdr.element_id = WLAN_EID_EXT_CAPABILITY;
+ extcap->ieee_hdr.len = 8;
+ memset(extcap->ext_capab, 0, 8);
+ extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
+
+ if (priv->adapter->is_hw_11ac_capable)
+ extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
+}
+
+static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
+{
+ u8 *pos = (void *)skb_put(skb, 3);
+
+ *pos++ = WLAN_EID_QOS_CAPA;
+ *pos++ = 1;
+ *pos++ = MWIFIEX_TDLS_DEF_QOS_CAPAB;
+}
+
+static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_tdls_data *tf;
+ int ret;
+ u16 capab;
+ struct ieee80211_ht_cap *ht_cap;
+ u8 radio, *pos;
+
+ capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
+
+ tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
+ memcpy(tf->da, peer, ETH_ALEN);
+ memcpy(tf->sa, priv->curr_addr, ETH_ALEN);
+ tf->ether_type = cpu_to_be16(ETH_P_TDLS);
+ tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_REQUEST;
+ skb_put(skb, sizeof(tf->u.setup_req));
+ tf->u.setup_req.dialog_token = dialog_token;
+ tf->u.setup_req.capability = cpu_to_le16(capab);
+ ret = mwifiex_tdls_append_rates_ie(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ ht_cap = (void *)pos;
+ radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_capab(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ mwifiex_tdls_add_aid(priv, skb);
+ }
+
+ mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_qos_capab(skb);
+ break;
+
+ case WLAN_TDLS_SETUP_RESPONSE:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
+ skb_put(skb, sizeof(tf->u.setup_resp));
+ tf->u.setup_resp.status_code = cpu_to_le16(status_code);
+ tf->u.setup_resp.dialog_token = dialog_token;
+ tf->u.setup_resp.capability = cpu_to_le16(capab);
+ ret = mwifiex_tdls_append_rates_ie(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ ht_cap = (void *)pos;
+ radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_capab(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ mwifiex_tdls_add_aid(priv, skb);
+ }
+
+ mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_qos_capab(skb);
+ break;
+
+ case WLAN_TDLS_SETUP_CONFIRM:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
+ skb_put(skb, sizeof(tf->u.setup_cfm));
+ tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
+ tf->u.setup_cfm.dialog_token = dialog_token;
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_oper(priv, peer, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ }
+ break;
+
+ case WLAN_TDLS_TEARDOWN:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_TEARDOWN;
+ skb_put(skb, sizeof(tf->u.teardown));
+ tf->u.teardown.reason_code = cpu_to_le16(status_code);
+ break;
+
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
+ skb_put(skb, sizeof(tf->u.discover_req));
+ tf->u.discover_req.dialog_token = dialog_token;
+ break;
+ default:
+ dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
+{
+ struct ieee80211_tdls_lnkie *lnkid;
+
+ lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
+ lnkid->ie_type = WLAN_EID_LINK_ID;
+ lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) -
+ sizeof(struct ieee_types_header);
+
+ memcpy(lnkid->bssid, bssid, ETH_ALEN);
+ memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
+ memcpy(lnkid->resp_sta, peer, ETH_ALEN);
+}
+
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct sk_buff *skb;
+ struct mwifiex_txinfo *tx_info;
+ struct timeval tv;
+ int ret;
+ u16 skb_len;
+
+ skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
+ max(sizeof(struct ieee80211_mgmt),
+ sizeof(struct ieee80211_tdls_data)) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ MWIFIEX_SUPPORTED_RATES +
+ 3 + /* Qos Info */
+ sizeof(struct ieee_types_extcap) +
+ sizeof(struct ieee80211_ht_cap) +
+ sizeof(struct ieee_types_bss_co_2040) +
+ sizeof(struct ieee80211_ht_operation) +
+ sizeof(struct ieee80211_tdls_lnkie) +
+ extra_ies_len;
+
+ if (priv->adapter->is_hw_11ac_capable)
+ skb_len += sizeof(struct ieee_types_vht_cap) +
+ sizeof(struct ieee_types_vht_oper) +
+ sizeof(struct ieee_types_aid);
+
+ skb = dev_alloc_skb(skb_len);
+ if (!skb) {
+ dev_err(priv->adapter->dev,
+ "allocate skb failed for management frame\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_CONFIRM:
+ case WLAN_TDLS_TEARDOWN:
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
+ dialog_token, status_code,
+ skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies,
+ extra_ies_len);
+ mwifiex_tdls_add_link_ie(skb, priv->curr_addr, peer,
+ priv->cfg_bssid);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
+ dialog_token, status_code,
+ skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies,
+ extra_ies_len);
+ mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
+ priv->cfg_bssid);
+ break;
+ }
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_RESPONSE:
+ skb->priority = MWIFIEX_PRIO_BK;
+ break;
+ default:
+ skb->priority = MWIFIEX_PRIO_VI;
+ break;
+ }
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+
+ do_gettimeofday(&tv);
+ skb->tstamp = timeval_to_ktime(tv);
+ mwifiex_queue_tx_pkt(priv, skb);
+
+ return 0;
+}
+
+static int
+mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
+ u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ int ret;
+ u16 capab;
+ struct ieee80211_ht_cap *ht_cap;
+ u8 radio, *pos;
+
+ capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
+
+ mgmt = (void *)skb_put(skb, offsetof(struct ieee80211_mgmt, u));
+
+ memset(mgmt, 0, 24);
+ memcpy(mgmt->da, peer, ETH_ALEN);
+ memcpy(mgmt->sa, priv->curr_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, priv->cfg_bssid, ETH_ALEN);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+
+ /* add address 4 */
+ pos = skb_put(skb, ETH_ALEN);
+
+ switch (action_code) {
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1);
+ mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
+ mgmt->u.action.u.tdls_discover_resp.action_code =
+ WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
+ mgmt->u.action.u.tdls_discover_resp.dialog_token =
+ dialog_token;
+ mgmt->u.action.u.tdls_discover_resp.capability =
+ cpu_to_le16(capab);
+ /* move back for addr4 */
+ memmove(pos + ETH_ALEN, &mgmt->u.action.category,
+ sizeof(mgmt->u.action.u.tdls_discover_resp));
+ /* init address 4 */
+ memcpy(pos, bc_addr, ETH_ALEN);
+
+ ret = mwifiex_tdls_append_rates_ie(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ ht_cap = (void *)pos;
+ radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_capab(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ mwifiex_tdls_add_aid(priv, skb);
+ }
+
+ mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_qos_capab(skb);
+ break;
+ default:
+ dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct sk_buff *skb;
+ struct mwifiex_txinfo *tx_info;
+ struct timeval tv;
+ u8 *pos;
+ u32 pkt_type, tx_control;
+ u16 pkt_len, skb_len;
+
+ skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
+ max(sizeof(struct ieee80211_mgmt),
+ sizeof(struct ieee80211_tdls_data)) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ MWIFIEX_SUPPORTED_RATES +
+ sizeof(struct ieee_types_extcap) +
+ sizeof(struct ieee80211_ht_cap) +
+ sizeof(struct ieee_types_bss_co_2040) +
+ sizeof(struct ieee80211_ht_operation) +
+ sizeof(struct ieee80211_tdls_lnkie) +
+ extra_ies_len +
+ 3 + /* Qos Info */
+ ETH_ALEN; /* Address4 */
+
+ if (priv->adapter->is_hw_11ac_capable)
+ skb_len += sizeof(struct ieee_types_vht_cap) +
+ sizeof(struct ieee_types_vht_oper) +
+ sizeof(struct ieee_types_aid);
+
+ skb = dev_alloc_skb(skb_len);
+ if (!skb) {
+ dev_err(priv->adapter->dev,
+ "allocate skb failed for management frame\n");
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+ pkt_type = PKT_TYPE_MGMT;
+ tx_control = 0;
+ pos = skb_put(skb, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+ memset(pos, 0, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+ memcpy(pos, &pkt_type, sizeof(pkt_type));
+ memcpy(pos + sizeof(pkt_type), &tx_control, sizeof(tx_control));
+
+ if (mwifiex_construct_tdls_action_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ skb)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+
+ /* the TDLS link IE is always added last we are the responder */
+
+ mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
+ priv->cfg_bssid);
+
+ skb->priority = MWIFIEX_PRIO_VI;
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+ tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+
+ pkt_len = skb->len - MWIFIEX_MGMT_FRAME_HEADER_SIZE - sizeof(pkt_len);
+ memcpy(skb->data + MWIFIEX_MGMT_FRAME_HEADER_SIZE, &pkt_len,
+ sizeof(pkt_len));
+ do_gettimeofday(&tv);
+ skb->tstamp = timeval_to_ktime(tv);
+ mwifiex_queue_tx_pkt(priv, skb);
+
+ return 0;
+}
+
+/* This function process tdls action frame from peer.
+ * Peer capabilities are stored into station node structure.
+ */
+void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *buf, int len)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ u8 *peer, *pos, *end;
+ u8 i, action, basic;
+ int ie_len = 0;
+
+ if (len < (sizeof(struct ethhdr) + 3))
+ return;
+ if (*(buf + sizeof(struct ethhdr)) != WLAN_TDLS_SNAP_RFTYPE)
+ return;
+ if (*(buf + sizeof(struct ethhdr) + 1) != WLAN_CATEGORY_TDLS)
+ return;
+
+ peer = buf + ETH_ALEN;
+ action = *(buf + sizeof(struct ethhdr) + 2);
+
+ /* just handle TDLS setup request/response/confirm */
+ if (action > WLAN_TDLS_SETUP_CONFIRM)
+ return;
+
+ dev_dbg(priv->adapter->dev,
+ "rx:tdls action: peer=%pM, action=%d\n", peer, action);
+
+ sta_ptr = mwifiex_add_sta_entry(priv, peer);
+ if (!sta_ptr)
+ return;
+
+ switch (action) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ if (len < (sizeof(struct ethhdr) + TDLS_REQ_FIX_LEN))
+ return;
+
+ pos = buf + sizeof(struct ethhdr) + 4;
+ /* payload 1+ category 1 + action 1 + dialog 1 */
+ sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
+ ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
+ pos += 2;
+ break;
+
+ case WLAN_TDLS_SETUP_RESPONSE:
+ if (len < (sizeof(struct ethhdr) + TDLS_RESP_FIX_LEN))
+ return;
+ /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
+ pos = buf + sizeof(struct ethhdr) + 6;
+ sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
+ ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
+ pos += 2;
+ break;
+
+ case WLAN_TDLS_SETUP_CONFIRM:
+ if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
+ return;
+ pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
+ ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
+ break;
+ default:
+ dev_warn(priv->adapter->dev, "Unknown TDLS frame type.\n");
+ return;
+ }
+
+ for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
+ if (pos + 2 + pos[1] > end)
+ break;
+
+ switch (*pos) {
+ case WLAN_EID_SUPP_RATES:
+ sta_ptr->tdls_cap.rates_len = pos[1];
+ for (i = 0; i < pos[1]; i++)
+ sta_ptr->tdls_cap.rates[i] = pos[i + 2];
+ break;
+
+ case WLAN_EID_EXT_SUPP_RATES:
+ basic = sta_ptr->tdls_cap.rates_len;
+ for (i = 0; i < pos[1]; i++)
+ sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
+ sta_ptr->tdls_cap.rates_len += pos[1];
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
+ sizeof(struct ieee80211_ht_cap));
+ sta_ptr->is_11n_enabled = 1;
+ break;
+ case WLAN_EID_HT_OPERATION:
+ memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
+ sizeof(struct ieee80211_ht_operation));
+ break;
+ case WLAN_EID_BSS_COEX_2040:
+ sta_ptr->tdls_cap.coex_2040 = pos[2];
+ break;
+ case WLAN_EID_EXT_CAPABILITY:
+ memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
+ sizeof(struct ieee_types_header) +
+ min_t(u8, pos[1], 8));
+ break;
+ case WLAN_EID_RSN:
+ memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
+ sizeof(struct ieee_types_header) + pos[1]);
+ break;
+ case WLAN_EID_QOS_CAPA:
+ sta_ptr->tdls_cap.qos_info = pos[2];
+ break;
+ case WLAN_EID_VHT_OPERATION:
+ if (priv->adapter->is_hw_11ac_capable)
+ memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
+ sizeof(struct ieee80211_vht_operation));
+ break;
+ case WLAN_EID_VHT_CAPABILITY:
+ if (priv->adapter->is_hw_11ac_capable) {
+ memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
+ sizeof(struct ieee80211_vht_cap));
+ sta_ptr->is_11ac_enabled = 1;
+ }
+ break;
+ case WLAN_EID_AID:
+ if (priv->adapter->is_hw_11ac_capable)
+ sta_ptr->tdls_cap.aid =
+ le16_to_cpu(*(__le16 *)(pos + 2));
+ default:
+ break;
+ }
+ }
+
+ return;
+}
+
+static int
+mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) {
+ dev_err(priv->adapter->dev,
+ "link absent for peer %pM; cannot config\n", peer);
+ return -EINVAL;
+ }
+
+ memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_CONFIG_LINK;
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
+}
+
+static int
+mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) {
+ dev_dbg(priv->adapter->dev,
+ "Setup already in progress for peer %pM\n", peer);
+ return 0;
+ }
+
+ sta_ptr = mwifiex_add_sta_entry(priv, peer);
+ if (!sta_ptr)
+ return -ENOMEM;
+
+ sta_ptr->tdls_status = TDLS_SETUP_INPROGRESS;
+ mwifiex_hold_tdls_packets(priv, peer);
+ memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_CREATE_LINK;
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
+}
+
+static int
+mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+ unsigned long flags;
+
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (sta_ptr) {
+ if (sta_ptr->is_11n_enabled) {
+ mwifiex_11n_cleanup_reorder_tbl(priv);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ }
+ mwifiex_del_sta_entry(priv, peer);
+ }
+
+ mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+ memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
+}
+
+static int
+mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct ieee80211_mcs_info mcs;
+ unsigned long flags;
+ int i;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) {
+ dev_dbg(priv->adapter->dev,
+ "tdls: enable link %pM success\n", peer);
+
+ sta_ptr->tdls_status = TDLS_SETUP_COMPLETE;
+
+ mcs = sta_ptr->tdls_cap.ht_capb.mcs;
+ if (mcs.rx_mask[0] != 0xff)
+ sta_ptr->is_11n_enabled = true;
+ if (sta_ptr->is_11n_enabled) {
+ if (le16_to_cpu(sta_ptr->tdls_cap.ht_capb.cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU)
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_8K;
+ else
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+
+ for (i = 0; i < MAX_NUM_TID; i++)
+ sta_ptr->ampdu_sta[i] =
+ priv->aggr_prio_tbl[i].ampdu_user;
+ } else {
+ for (i = 0; i < MAX_NUM_TID; i++)
+ sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
+ }
+
+ memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
+ mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "tdls: enable link %pM failed\n", peer);
+ if (sta_ptr) {
+ mwifiex_11n_cleanup_reorder_tbl(priv);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_del_sta_entry(priv, peer);
+ }
+ mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
+{
+ switch (action) {
+ case MWIFIEX_TDLS_ENABLE_LINK:
+ return mwifiex_tdls_process_enable_link(priv, peer);
+ case MWIFIEX_TDLS_DISABLE_LINK:
+ return mwifiex_tdls_process_disable_link(priv, peer);
+ case MWIFIEX_TDLS_CREATE_LINK:
+ return mwifiex_tdls_process_create_link(priv, peer);
+ case MWIFIEX_TDLS_CONFIG_LINK:
+ return mwifiex_tdls_process_config_link(priv, peer);
+ }
+ return 0;
+}
+
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *sta_ptr;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, mac);
+ if (sta_ptr)
+ return sta_ptr->tdls_status;
+
+ return TDLS_NOT_SETUP;
+}
+
+void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+ unsigned long flags;
+
+ if (list_empty(&priv->sta_list))
+ return;
+
+ list_for_each_entry(sta_ptr, &priv->sta_list, list) {
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+
+ if (sta_ptr->is_11n_enabled) {
+ mwifiex_11n_cleanup_reorder_tbl(priv);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ }
+
+ mwifiex_restore_tdls_packets(priv, sta_ptr->mac_addr,
+ TDLS_LINK_TEARDOWN);
+ memcpy(&tdls_oper.peer_mac, sta_ptr->mac_addr, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper, false))
+ dev_warn(priv->adapter->dev,
+ "Disable link failed for TDLS peer %pM",
+ sta_ptr->mac_addr);
+ }
+
+ mwifiex_del_all_sta_list(priv);
+}
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 64424c81b44f..9be6544bdded 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -159,6 +159,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
struct cfg80211_ap_settings *params)
{
const u8 *ht_ie;
+ u16 cap_info;
if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
return;
@@ -168,6 +169,25 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
if (ht_ie) {
memcpy(&bss_cfg->ht_cap, ht_ie + 2,
sizeof(struct ieee80211_ht_cap));
+ cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info);
+ memset(&bss_cfg->ht_cap.mcs, 0,
+ priv->adapter->number_of_antenna);
+ switch (GET_RXSTBC(cap_info)) {
+ case MWIFIEX_RX_STBC1:
+ /* HT_CAP 1X1 mode */
+ memset(&bss_cfg->ht_cap.mcs, 0xff, 1);
+ break;
+ case MWIFIEX_RX_STBC12: /* fall through */
+ case MWIFIEX_RX_STBC123:
+ /* HT_CAP 2X2 mode */
+ memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+ break;
+ default:
+ dev_warn(priv->adapter->dev,
+ "Unsupported RX-STBC, default to 2x2\n");
+ memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+ break;
+ }
priv->ap_11n_enabled = 1;
} else {
memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
@@ -226,8 +246,8 @@ void mwifiex_set_vht_width(struct mwifiex_private *priv,
if (ap_11ac_enable && width >= NL80211_CHAN_WIDTH_80)
vht_cfg.misc_config |= VHT_BW_80_160_80P80;
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_11AC_CFG,
- HostCmd_ACT_GEN_SET, 0, &vht_cfg);
+ mwifiex_send_cmd(priv, HostCmd_CMD_11AC_CFG,
+ HostCmd_ACT_GEN_SET, 0, &vht_cfg, true);
return;
}
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 718066577c6c..92e77a398ecf 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -21,126 +21,8 @@
#include "main.h"
#include "11n.h"
-/*
- * This function will return the pointer to station entry in station list
- * table which matches specified mac address.
- * This function should be called after acquiring RA list spinlock.
- * NULL is returned if station entry is not found in associated STA list.
- */
-struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
- struct mwifiex_sta_node *node;
-
- if (!mac)
- return NULL;
-
- list_for_each_entry(node, &priv->sta_list, list) {
- if (!memcmp(node->mac_addr, mac, ETH_ALEN))
- return node;
- }
-
- return NULL;
-}
-
-/*
- * This function will add a sta_node entry to associated station list
- * table with the given mac address.
- * If entry exist already, existing entry is returned.
- * If received mac address is NULL, NULL is returned.
- */
-static struct mwifiex_sta_node *
-mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
- struct mwifiex_sta_node *node;
- unsigned long flags;
-
- if (!mac)
- return NULL;
-
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
- node = mwifiex_get_sta_entry(priv, mac);
- if (node)
- goto done;
-
- node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
- if (!node)
- goto done;
-
- memcpy(node->mac_addr, mac, ETH_ALEN);
- list_add_tail(&node->list, &priv->sta_list);
-
-done:
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- return node;
-}
-
-/*
- * This function will search for HT IE in association request IEs
- * and set station HT parameters accordingly.
- */
-static void
-mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
- int ies_len, struct mwifiex_sta_node *node)
-{
- const struct ieee80211_ht_cap *ht_cap;
-
- if (!ies)
- return;
- ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
- if (ht_cap) {
- node->is_11n_enabled = 1;
- node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
- IEEE80211_HT_CAP_MAX_AMSDU ?
- MWIFIEX_TX_DATA_BUF_SIZE_8K :
- MWIFIEX_TX_DATA_BUF_SIZE_4K;
- } else {
- node->is_11n_enabled = 0;
- }
- return;
-}
-
-/*
- * This function will delete a station entry from station list
- */
-static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
- struct mwifiex_sta_node *node;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
-
- node = mwifiex_get_sta_entry(priv, mac);
- if (node) {
- list_del(&node->list);
- kfree(node);
- }
-
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- return;
-}
-
-/*
- * This function will delete all stations from associated station list.
- */
-static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
-{
- struct mwifiex_sta_node *node, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
-
- list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
- list_del(&node->list);
- kfree(node);
- }
-
- INIT_LIST_HEAD(&priv->sta_list);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- return;
-}
/*
* This function handles AP interface specific events generated by firmware.
@@ -268,9 +150,9 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
case EVENT_ADDBA:
dev_dbg(adapter->dev, "event: ADDBA Request\n");
if (priv->media_connected)
- mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
- HostCmd_ACT_GEN_SET, 0,
- adapter->event_body);
+ mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
+ HostCmd_ACT_GEN_SET, 0,
+ adapter->event_body, false);
break;
case EVENT_DELBA:
dev_dbg(adapter->dev, "event: DELBA Request\n");
@@ -284,6 +166,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
}
break;
+ case EVENT_EXT_SCAN_REPORT:
+ dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+ if (adapter->ext_scan)
+ return mwifiex_handle_event_ext_scan_report(priv,
+ adapter->event_skb->data);
+ break;
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 3c74eb254927..9a56bc61cb1d 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -284,27 +284,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
return 0;
}
- if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
- struct sk_buff_head list;
- struct sk_buff *rx_skb;
-
- __skb_queue_head_init(&list);
- skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
- skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length));
-
- ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev->iftype, 0, false);
-
- while (!skb_queue_empty(&list)) {
- rx_skb = __skb_dequeue(&list);
- ret = mwifiex_recv_packet(priv, rx_skb);
- if (ret)
- dev_err(adapter->dev,
- "AP:Rx A-MSDU failed");
- }
-
- return 0;
- } else if (rx_pkt_type == PKT_TYPE_MGMT) {
+ if (rx_pkt_type == PKT_TYPE_MGMT) {
ret = mwifiex_process_mgmt_packet(priv, skb);
if (ret)
dev_err(adapter->dev, "Rx of mgmt packet failed");
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 208748804a55..edbe4aff00d8 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -459,6 +459,7 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
* 'suspended' state and a 'disconnect' one.
*/
adapter->is_suspended = true;
+ adapter->hs_enabling = false;
if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
usb_kill_urb(card->rx_cmd.urb);
@@ -766,11 +767,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
case USB8897_PID_1:
case USB8897_PID_2:
+ adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
break;
case USB8797_PID_1:
case USB8797_PID_2:
default:
+ adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
break;
}
@@ -1024,7 +1027,6 @@ static void mwifiex_usb_cleanup_module(void)
if (usb_card && usb_card->adapter) {
struct mwifiex_adapter *adapter = usb_card->adapter;
- int i;
/* In case driver is removed when asynchronous FW downloading is
* in progress
@@ -1035,11 +1037,8 @@ static void mwifiex_usb_cleanup_module(void)
if (adapter->is_suspended)
mwifiex_usb_resume(usb_card->intf);
#endif
- for (i = 0; i < adapter->priv_num; i++)
- if ((GET_BSS_ROLE(adapter->priv[i]) ==
- MWIFIEX_BSS_ROLE_STA) &&
- adapter->priv[i]->media_connected)
- mwifiex_deauthenticate(adapter->priv[i], NULL);
+
+ mwifiex_deauthenticate_all(adapter);
mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_ANY),
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 9b82e225880c..c3824e37f3f2 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -72,7 +72,7 @@ int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
return -1;
}
- return mwifiex_send_cmd_sync(priv, cmd, HostCmd_ACT_GEN_SET, 0, NULL);
+ return mwifiex_send_cmd(priv, cmd, HostCmd_ACT_GEN_SET, 0, NULL, true);
}
EXPORT_SYMBOL_GPL(mwifiex_init_shutdown_fw);
@@ -104,6 +104,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try;
info->is_hs_configured = adapter->is_hs_configured;
info->hs_activated = adapter->hs_activated;
+ info->is_cmd_timedout = adapter->is_cmd_timedout;
info->num_cmd_host_to_card_failure
= adapter->dbg.num_cmd_host_to_card_failure;
info->num_cmd_sleep_cfm_host_to_card_failure
@@ -119,7 +120,6 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
info->num_cmd_assoc_failure =
adapter->dbg.num_cmd_assoc_failure;
info->num_tx_timeout = adapter->dbg.num_tx_timeout;
- info->num_cmd_timeout = adapter->dbg.num_cmd_timeout;
info->timeout_cmd_id = adapter->dbg.timeout_cmd_id;
info->timeout_cmd_act = adapter->dbg.timeout_cmd_act;
memcpy(info->last_cmd_id, adapter->dbg.last_cmd_id,
@@ -252,3 +252,117 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
return 0;
}
+
+/* This function will return the pointer to station entry in station list
+ * table which matches specified mac address.
+ * This function should be called after acquiring RA list spinlock.
+ * NULL is returned if station entry is not found in associated STA list.
+ */
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+
+ if (!mac)
+ return NULL;
+
+ list_for_each_entry(node, &priv->sta_list, list) {
+ if (!memcmp(node->mac_addr, mac, ETH_ALEN))
+ return node;
+ }
+
+ return NULL;
+}
+
+/* This function will add a sta_node entry to associated station list
+ * table with the given mac address.
+ * If entry exist already, existing entry is returned.
+ * If received mac address is NULL, NULL is returned.
+ */
+struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+ unsigned long flags;
+
+ if (!mac)
+ return NULL;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ node = mwifiex_get_sta_entry(priv, mac);
+ if (node)
+ goto done;
+
+ node = kzalloc(sizeof(*node), GFP_ATOMIC);
+ if (!node)
+ goto done;
+
+ memcpy(node->mac_addr, mac, ETH_ALEN);
+ list_add_tail(&node->list, &priv->sta_list);
+
+done:
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return node;
+}
+
+/* This function will search for HT IE in association request IEs
+ * and set station HT parameters accordingly.
+ */
+void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+ int ies_len, struct mwifiex_sta_node *node)
+{
+ const struct ieee80211_ht_cap *ht_cap;
+
+ if (!ies)
+ return;
+
+ ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
+ if (ht_cap) {
+ node->is_11n_enabled = 1;
+ node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU ?
+ MWIFIEX_TX_DATA_BUF_SIZE_8K :
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ } else {
+ node->is_11n_enabled = 0;
+ }
+
+ return;
+}
+
+/* This function will delete a station entry from station list */
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+ node = mwifiex_get_sta_entry(priv, mac);
+ if (node) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return;
+}
+
+/* This function will delete all stations from associated station list. */
+void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *node, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+ list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ INIT_LIST_HEAD(&priv->sta_list);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return;
+}
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
index cb2d0582bd36..ddae57021397 100644
--- a/drivers/net/wireless/mwifiex/util.h
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -30,8 +30,24 @@ static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t));
}
-static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb, dma_addr_t *buf_pa)
+struct mwifiex_dma_mapping {
+ dma_addr_t addr;
+ size_t len;
+};
+
+static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb,
+ struct mwifiex_dma_mapping *mapping)
{
- memcpy(buf_pa, skb->cb, sizeof(dma_addr_t));
+ memcpy(mapping, skb->cb, sizeof(*mapping));
}
+
+static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
+{
+ struct mwifiex_dma_mapping mapping;
+
+ MWIFIEX_SKB_PACB(skb, &mapping);
+
+ return mapping.addr;
+}
+
#endif /* !_MWIFIEX_UTIL_H_ */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 981cf6e7c73b..0a7cc742aed7 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -37,8 +37,8 @@
/* Offset for TOS field in the IP header */
#define IPTOS_OFFSET 5
-static bool enable_tx_amsdu;
-module_param(enable_tx_amsdu, bool, 0644);
+static bool disable_tx_amsdu;
+module_param(disable_tx_amsdu, bool, 0644);
/* WMM information IE */
static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
@@ -64,21 +64,6 @@ static u8 tos_to_tid[] = {
0x07 /* 1 1 1 AC_VO */
};
-/*
- * This table inverses the tos_to_tid operation to get a priority
- * which is in sequential order, and can be compared.
- * Use this to compare the priority of two different TIDs.
- */
-static u8 tos_to_tid_inv[] = {
- 0x02, /* from tos_to_tid[2] = 0 */
- 0x00, /* from tos_to_tid[0] = 1 */
- 0x01, /* from tos_to_tid[1] = 2 */
- 0x03,
- 0x04,
- 0x05,
- 0x06,
- 0x07};
-
static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
/*
@@ -175,8 +160,15 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
break;
ra_list->is_11n_enabled = 0;
+ ra_list->tdls_link = false;
if (!mwifiex_queuing_ra_based(priv)) {
- ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
+ if (mwifiex_get_tdls_link_status(priv, ra) ==
+ TDLS_SETUP_COMPLETE) {
+ ra_list->is_11n_enabled =
+ mwifiex_tdls_peer_11n_enabled(priv, ra);
+ } else {
+ ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
+ }
} else {
ra_list->is_11n_enabled =
mwifiex_is_sta_11n_enabled(priv, node);
@@ -213,8 +205,9 @@ static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
* This function map ACs to TIDs.
*/
static void
-mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
+mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
{
+ struct mwifiex_wmm_desc *wmm = &priv->wmm;
u8 *queue_priority = wmm->queue_priority;
int i;
@@ -224,7 +217,7 @@ mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
}
for (i = 0; i < MAX_NUM_TID; ++i)
- tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
+ priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
}
@@ -285,7 +278,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
}
}
- mwifiex_wmm_queue_priorities_tid(&priv->wmm);
+ mwifiex_wmm_queue_priorities_tid(priv);
}
/*
@@ -388,8 +381,7 @@ mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
* AP is disabled (due to call admission control (ACM bit). Mapping
* of TID to AC is taken care of internally.
*/
-static u8
-mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
+u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
{
enum mwifiex_wmm_ac_e ac, ac_down;
u8 new_tid;
@@ -421,9 +413,17 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
continue;
for (i = 0; i < MAX_NUM_TID; ++i) {
- priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i];
- priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i];
- priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i];
+ if (!disable_tx_amsdu &&
+ adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
+ priv->aggr_prio_tbl[i].amsdu =
+ priv->tos_to_tid_inv[i];
+ else
+ priv->aggr_prio_tbl[i].amsdu =
+ BA_STREAM_NOT_ALLOWED;
+ priv->aggr_prio_tbl[i].ampdu_ap =
+ priv->tos_to_tid_inv[i];
+ priv->aggr_prio_tbl[i].ampdu_user =
+ priv->tos_to_tid_inv[i];
}
priv->aggr_prio_tbl[6].amsdu
@@ -546,6 +546,7 @@ void
mwifiex_clean_txrx(struct mwifiex_private *priv)
{
unsigned long flags;
+ struct sk_buff *skb, *tmp;
mwifiex_11n_cleanup_reorder_tbl(priv);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
@@ -563,6 +564,9 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
!priv->adapter->surprise_removed)
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+
+ skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
+ mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
}
/*
@@ -591,7 +595,7 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
* If no such node is found, a new node is added first and then
* retrieved.
*/
-static struct mwifiex_ra_list_tbl *
+struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
{
struct mwifiex_ra_list_tbl *ra_list;
@@ -641,6 +645,21 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra_list;
u8 ra[ETH_ALEN], tid_down;
unsigned long flags;
+ struct list_head list_head;
+ int tdls_status = TDLS_NOT_SETUP;
+ struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+ struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
+
+ memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+ ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
+ if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
+ dev_dbg(adapter->dev,
+ "TDLS setup packet for %pM. Don't block\n", ra);
+ else
+ tdls_status = mwifiex_get_tdls_link_status(priv, ra);
+ }
if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
@@ -659,12 +678,27 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
have only 1 raptr for a tid in case of infra */
if (!mwifiex_queuing_ra_based(priv) &&
!mwifiex_is_skb_mgmt_frame(skb)) {
- if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
- ra_list = list_first_entry(
- &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
- struct mwifiex_ra_list_tbl, list);
- else
- ra_list = NULL;
+ switch (tdls_status) {
+ case TDLS_SETUP_COMPLETE:
+ ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
+ ra);
+ tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+ break;
+ case TDLS_SETUP_INPROGRESS:
+ skb_queue_tail(&priv->tdls_txq, skb);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ return;
+ default:
+ list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
+ if (!list_empty(&list_head))
+ ra_list = list_first_entry(
+ &list_head, struct mwifiex_ra_list_tbl,
+ list);
+ else
+ ra_list = NULL;
+ break;
+ }
} else {
memcpy(ra, skb->data, ETH_ALEN);
if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
@@ -684,9 +718,9 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
ra_list->total_pkt_count++;
if (atomic_read(&priv->wmm.highest_queued_prio) <
- tos_to_tid_inv[tid_down])
+ priv->tos_to_tid_inv[tid_down])
atomic_set(&priv->wmm.highest_queued_prio,
- tos_to_tid_inv[tid_down]);
+ priv->tos_to_tid_inv[tid_down]);
atomic_inc(&priv->wmm.tx_pkts_queued);
@@ -1219,15 +1253,24 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
if (!ptr->is_11n_enabled ||
mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
- priv->wps.session_enable ||
- ((priv->sec_info.wpa_enabled ||
- priv->sec_info.wpa2_enabled) &&
- !priv->wpa_is_gtk_set)) {
- mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
- /* ra_list_spinlock has been freed in
- mwifiex_send_single_packet() */
+ priv->wps.session_enable) {
+ if (ptr->is_11n_enabled &&
+ mwifiex_is_ba_stream_setup(priv, ptr, tid) &&
+ mwifiex_is_amsdu_in_ampdu_allowed(priv, ptr, tid) &&
+ mwifiex_is_amsdu_allowed(priv, tid) &&
+ mwifiex_is_11n_aggragation_possible(priv, ptr,
+ adapter->tx_buf_size))
+ mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
+ /* ra_list_spinlock has been freed in
+ * mwifiex_11n_aggregate_pkt()
+ */
+ else
+ mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
+ /* ra_list_spinlock has been freed in
+ * mwifiex_send_single_packet()
+ */
} else {
- if (mwifiex_is_ampdu_allowed(priv, tid) &&
+ if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
ptr->ba_pkt_count > ptr->ba_packet_thr) {
if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
mwifiex_create_ba_tbl(priv, ptr->ra, tid,
@@ -1240,7 +1283,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
mwifiex_send_delba(priv, tid_del, ra, 1);
}
}
- if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
+ if (mwifiex_is_amsdu_allowed(priv, tid) &&
mwifiex_is_11n_aggragation_possible(priv, ptr,
adapter->tx_buf_size))
mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 0f129d498fb1..83e42083ebff 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -34,6 +34,21 @@ enum ieee_types_wmm_ecw_bitmasks {
static const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
/*
+ * This table inverses the tos_to_tid operation to get a priority
+ * which is in sequential order, and can be compared.
+ * Use this to compare the priority of two different TIDs.
+ */
+static const u8 tos_to_tid_inv[] = {
+ 0x02, /* from tos_to_tid[2] = 0 */
+ 0x00, /* from tos_to_tid[0] = 1 */
+ 0x01, /* from tos_to_tid[1] = 2 */
+ 0x03,
+ 0x04,
+ 0x05,
+ 0x06,
+ 0x07};
+
+/*
* This function retrieves the TID of the given RA list.
*/
static inline int
@@ -107,5 +122,8 @@ void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
const struct host_cmd_ds_command *resp);
+struct mwifiex_ra_list_tbl *
+mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr);
+u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 4987c3f942ce..3c0a0a86ba12 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -81,6 +81,9 @@ MODULE_PARM_DESC(ap_mode_default,
*/
#define MWL8K_HW_TIMER_REGISTER 0x0000a600
+#define BBU_RXRDY_CNT_REG 0x0000a860
+#define NOK_CCA_CNT_REG 0x0000a6a0
+#define BBU_AVG_NOISE_VAL 0x67
#define MWL8K_A2H_EVENTS (MWL8K_A2H_INT_DUMMY | \
MWL8K_A2H_INT_CHNL_SWITCHED | \
@@ -112,6 +115,8 @@ MODULE_PARM_DESC(ap_mode_default,
*/
#define MWL8K_NUM_AMPDU_STREAMS (TOTAL_HW_TX_QUEUES - 1)
+#define MWL8K_NUM_CHANS 18
+
struct rxd_ops {
int rxd_size;
void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr);
@@ -289,6 +294,12 @@ struct mwl8k_priv {
/* bitmap of running BSSes */
u32 running_bsses;
+
+ /* ACS related */
+ bool sw_scan_start;
+ struct ieee80211_channel *acs_chan;
+ unsigned long channel_time;
+ struct survey_info survey[MWL8K_NUM_CHANS];
};
#define MAX_WEP_KEY_LEN 13
@@ -396,6 +407,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
#define MWL8K_CMD_SET_HW_SPEC 0x0004
#define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010
#define MWL8K_CMD_GET_STAT 0x0014
+#define MWL8K_CMD_BBP_REG_ACCESS 0x001a
#define MWL8K_CMD_RADIO_CONTROL 0x001c
#define MWL8K_CMD_RF_TX_POWER 0x001e
#define MWL8K_CMD_TX_POWER 0x001f
@@ -2987,6 +2999,47 @@ static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
}
/*
+ * CMD_BBP_REG_ACCESS.
+ */
+struct mwl8k_cmd_bbp_reg_access {
+ struct mwl8k_cmd_pkt header;
+ __le16 action;
+ __le16 offset;
+ u8 value;
+ u8 rsrv[3];
+} __packed;
+
+static int
+mwl8k_cmd_bbp_reg_access(struct ieee80211_hw *hw,
+ u16 action,
+ u16 offset,
+ u8 *value)
+{
+ struct mwl8k_cmd_bbp_reg_access *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_BBP_REG_ACCESS);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le16(action);
+ cmd->offset = cpu_to_le16(offset);
+
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+
+ if (!rc)
+ *value = cmd->value;
+ else
+ *value = 0;
+
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
* CMD_SET_POST_SCAN.
*/
struct mwl8k_cmd_set_post_scan {
@@ -3016,6 +3069,64 @@ mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
return rc;
}
+static int freq_to_idx(struct mwl8k_priv *priv, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ sband = priv->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
+static void mwl8k_update_survey(struct mwl8k_priv *priv,
+ struct ieee80211_channel *channel)
+{
+ u32 cca_cnt, rx_rdy;
+ s8 nf = 0, idx;
+ struct survey_info *survey;
+
+ idx = freq_to_idx(priv, priv->acs_chan->center_freq);
+ if (idx >= MWL8K_NUM_CHANS) {
+ wiphy_err(priv->hw->wiphy, "Failed to update survey\n");
+ return;
+ }
+
+ survey = &priv->survey[idx];
+
+ cca_cnt = ioread32(priv->regs + NOK_CCA_CNT_REG);
+ cca_cnt /= 1000; /* uSecs to mSecs */
+ survey->channel_time_busy = (u64) cca_cnt;
+
+ rx_rdy = ioread32(priv->regs + BBU_RXRDY_CNT_REG);
+ rx_rdy /= 1000; /* uSecs to mSecs */
+ survey->channel_time_rx = (u64) rx_rdy;
+
+ priv->channel_time = jiffies - priv->channel_time;
+ survey->channel_time = jiffies_to_msecs(priv->channel_time);
+
+ survey->channel = channel;
+
+ mwl8k_cmd_bbp_reg_access(priv->hw, 0, BBU_AVG_NOISE_VAL, &nf);
+
+ /* Make sure sign is negative else ACS at hostapd fails */
+ survey->noise = nf * -1;
+
+ survey->filled = SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_CHANNEL_TIME |
+ SURVEY_INFO_CHANNEL_TIME_BUSY |
+ SURVEY_INFO_CHANNEL_TIME_RX;
+}
+
/*
* CMD_SET_RF_CHANNEL.
*/
@@ -3033,6 +3144,7 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
enum nl80211_channel_type channel_type =
cfg80211_get_chandef_type(&conf->chandef);
struct mwl8k_cmd_set_rf_channel *cmd;
+ struct mwl8k_priv *priv = hw->priv;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -3049,13 +3161,29 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
else if (channel->band == IEEE80211_BAND_5GHZ)
cmd->channel_flags |= cpu_to_le32(0x00000004);
- if (channel_type == NL80211_CHAN_NO_HT ||
- channel_type == NL80211_CHAN_HT20)
+ if (!priv->sw_scan_start) {
+ if (channel_type == NL80211_CHAN_NO_HT ||
+ channel_type == NL80211_CHAN_HT20)
+ cmd->channel_flags |= cpu_to_le32(0x00000080);
+ else if (channel_type == NL80211_CHAN_HT40MINUS)
+ cmd->channel_flags |= cpu_to_le32(0x000001900);
+ else if (channel_type == NL80211_CHAN_HT40PLUS)
+ cmd->channel_flags |= cpu_to_le32(0x000000900);
+ } else {
cmd->channel_flags |= cpu_to_le32(0x00000080);
- else if (channel_type == NL80211_CHAN_HT40MINUS)
- cmd->channel_flags |= cpu_to_le32(0x000001900);
- else if (channel_type == NL80211_CHAN_HT40PLUS)
- cmd->channel_flags |= cpu_to_le32(0x000000900);
+ }
+
+ if (priv->sw_scan_start) {
+ /* Store current channel stats
+ * before switching to newer one.
+ * This will be processed only for AP fw.
+ */
+ if (priv->channel_time != 0)
+ mwl8k_update_survey(priv, priv->acs_chan);
+
+ priv->channel_time = jiffies;
+ priv->acs_chan = channel;
+ }
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -5263,6 +5391,27 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
{
struct mwl8k_priv *priv = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_supported_band *sband;
+
+ if (priv->ap_fw) {
+ sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels)
+ return -ENOENT;
+
+ memcpy(survey, &priv->survey[idx], sizeof(*survey));
+ survey->channel = &sband->channels[idx];
+
+ return 0;
+ }
if (idx != 0)
return -ENOENT;
@@ -5406,6 +5555,40 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return rc;
}
+static void mwl8k_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ u8 tmp;
+
+ if (!priv->ap_fw)
+ return;
+
+ /* clear all stats */
+ priv->channel_time = 0;
+ ioread32(priv->regs + BBU_RXRDY_CNT_REG);
+ ioread32(priv->regs + NOK_CCA_CNT_REG);
+ mwl8k_cmd_bbp_reg_access(priv->hw, 0, BBU_AVG_NOISE_VAL, &tmp);
+
+ priv->sw_scan_start = true;
+}
+
+static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ u8 tmp;
+
+ if (!priv->ap_fw)
+ return;
+
+ priv->sw_scan_start = false;
+
+ /* clear all stats */
+ priv->channel_time = 0;
+ ioread32(priv->regs + BBU_RXRDY_CNT_REG);
+ ioread32(priv->regs + NOK_CCA_CNT_REG);
+ mwl8k_cmd_bbp_reg_access(priv->hw, 0, BBU_AVG_NOISE_VAL, &tmp);
+}
+
static const struct ieee80211_ops mwl8k_ops = {
.tx = mwl8k_tx,
.start = mwl8k_start,
@@ -5424,6 +5607,8 @@ static const struct ieee80211_ops mwl8k_ops = {
.get_stats = mwl8k_get_stats,
.get_survey = mwl8k_get_survey,
.ampdu_action = mwl8k_ampdu_action,
+ .sw_scan_start = mwl8k_sw_scan_start,
+ .sw_scan_complete = mwl8k_sw_scan_complete,
};
static void mwl8k_finalize_join_worker(struct work_struct *work)
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index d01edd2c50c5..a9e94b6db5b7 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -59,7 +59,8 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
for (i = 0; i < NUM_CHANNELS; i++) {
if (priv->channel_mask & (1 << i)) {
priv->channels[i].center_freq =
- ieee80211_dsss_chan_to_freq(i + 1);
+ ieee80211_channel_to_frequency(i + 1,
+ IEEE80211_BAND_2GHZ);
channels++;
}
}
@@ -177,7 +178,7 @@ static int orinoco_set_monitor_channel(struct wiphy *wiphy,
if (chandef->chan->band != IEEE80211_BAND_2GHZ)
return -EINVAL;
- channel = ieee80211_freq_to_dsss_chan(chandef->chan->center_freq);
+ channel = ieee80211_frequency_to_channel(chandef->chan->center_freq);
if ((channel < 1) || (channel > NUM_CHANNELS) ||
!(priv->channel_mask & (1 << (channel - 1))))
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index c09c8437c0b8..49300d04efdf 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -1193,7 +1193,7 @@ int orinoco_hw_get_freq(struct orinoco_private *priv)
goto out;
}
- freq = ieee80211_dsss_chan_to_freq(channel);
+ freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
out:
orinoco_unlock(priv, &flags);
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index e8c5714bfd11..e175b9b8561b 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -110,7 +110,8 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
break;
}
- freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel));
+ freq = ieee80211_channel_to_frequency(
+ le16_to_cpu(bss->a.channel), IEEE80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, freq);
if (!channel) {
printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
@@ -146,7 +147,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
ie_len = len - sizeof(*bss);
ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
chan = ie ? ie[2] : 0;
- freq = ieee80211_dsss_chan_to_freq(chan);
+ freq = ieee80211_channel_to_frequency(chan, IEEE80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, freq);
timestamp = le64_to_cpu(bss->timestamp);
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 3b5508f982e8..b7a867b50b94 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -444,7 +444,7 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
for (i = 0; i < (6 - frq->e); i++)
denom *= 10;
- chan = ieee80211_freq_to_dsss_chan(frq->m / denom);
+ chan = ieee80211_frequency_to_channel(frq->m / denom);
}
if ((chan < 1) || (chan > NUM_CHANNELS) ||
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 6e635cfa24c8..043bd1c23c19 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -513,7 +513,7 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
if (!buf)
return -ENOMEM;
- left = block_size = min((size_t)P54U_FW_BLOCK, priv->fw->size);
+ left = block_size = min_t(size_t, P54U_FW_BLOCK, priv->fw->size);
strcpy(buf, p54u_firmware_upload_3887);
left -= strlen(p54u_firmware_upload_3887);
tmp += strlen(p54u_firmware_upload_3887);
@@ -1053,6 +1053,10 @@ static int p54u_probe(struct usb_interface *intf,
priv->upload_fw = p54u_upload_firmware_net2280;
}
err = p54u_load_firmware(dev, intf);
+ if (err) {
+ usb_put_dev(udev);
+ p54_free_common(dev);
+ }
return err;
}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 78fa64d3f223..ecbb0546cf3e 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -644,7 +644,7 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
wpa_ie_len = prism54_wpa_bss_ie_get(priv, bss->address, wpa_ie);
if (wpa_ie_len > 0) {
iwe.cmd = IWEVGENIE;
- iwe.u.data.length = min(wpa_ie_len, (size_t)MAX_WPA_IE_LEN);
+ iwe.u.data.length = min_t(size_t, wpa_ie_len, MAX_WPA_IE_LEN);
current_ev = iwe_stream_add_point(info, current_ev, end_buf,
&iwe, wpa_ie);
}
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 5028557aa18a..39d22a154341 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1290,7 +1290,8 @@ static int set_channel(struct usbnet *usbdev, int channel)
if (is_associated(usbdev))
return 0;
- dsconfig = ieee80211_dsss_chan_to_freq(channel) * 1000;
+ dsconfig = 1000 *
+ ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
len = sizeof(config);
ret = rndis_query_oid(usbdev,
@@ -2835,7 +2836,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
bssid, req_ie, req_ie_len,
resp_ie, resp_ie_len, GFP_KERNEL);
} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
- cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(usbdev->net, bssid,
+ get_current_channel(usbdev, NULL),
+ GFP_KERNEL);
kfree(info);
diff --git a/drivers/net/wireless/rsi/Kconfig b/drivers/net/wireless/rsi/Kconfig
new file mode 100644
index 000000000000..35245f994c10
--- /dev/null
+++ b/drivers/net/wireless/rsi/Kconfig
@@ -0,0 +1,30 @@
+config RSI_91X
+ tristate "Redpine Signals Inc 91x WLAN driver support"
+ depends on MAC80211
+ ---help---
+ This option enabes support for RSI 1x1 devices.
+ Select M (recommended), if you have a RSI 1x1 wireless module.
+
+config RSI_DEBUGFS
+ bool "Redpine Signals Inc debug support"
+ depends on RSI_91X
+ default y
+ ---help---
+ Say Y, if you would like to enable debug support. This option
+ creates debugfs entries
+
+config RSI_SDIO
+ tristate "Redpine Signals SDIO bus support"
+ depends on MMC && RSI_91X
+ default m
+ ---help---
+ This option enables the SDIO bus support in rsi drivers.
+ Select M (recommended), if you have a RSI 1x1 wireless module.
+
+config RSI_USB
+ tristate "Redpine Signals USB bus support"
+ depends on USB && RSI_91X
+ default m
+ ---help---
+ This option enables the USB bus support in rsi drivers.
+ Select M (recommended), if you have a RSI 1x1 wireless module.
diff --git a/drivers/net/wireless/rsi/Makefile b/drivers/net/wireless/rsi/Makefile
new file mode 100644
index 000000000000..25828b692756
--- /dev/null
+++ b/drivers/net/wireless/rsi/Makefile
@@ -0,0 +1,12 @@
+rsi_91x-y += rsi_91x_main.o
+rsi_91x-y += rsi_91x_core.o
+rsi_91x-y += rsi_91x_mac80211.o
+rsi_91x-y += rsi_91x_mgmt.o
+rsi_91x-y += rsi_91x_pkt.o
+rsi_91x-$(CONFIG_RSI_DEBUGFS) += rsi_91x_debugfs.o
+
+rsi_usb-y += rsi_91x_usb.o rsi_91x_usb_ops.o
+rsi_sdio-y += rsi_91x_sdio.o rsi_91x_sdio_ops.o
+obj-$(CONFIG_RSI_91X) += rsi_91x.o
+obj-$(CONFIG_RSI_SDIO) += rsi_sdio.o
+obj-$(CONFIG_RSI_USB) += rsi_usb.o
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
new file mode 100644
index 000000000000..e89535e86caf
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -0,0 +1,342 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "rsi_mgmt.h"
+#include "rsi_common.h"
+
+/**
+ * rsi_determine_min_weight_queue() - This function determines the queue with
+ * the min weight.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: q_num: Corresponding queue number.
+ */
+static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
+{
+ struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
+ u32 q_len = 0;
+ u8 ii = 0;
+
+ for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
+ q_len = skb_queue_len(&common->tx_queue[ii]);
+ if ((tx_qinfo[ii].pkt_contended) && q_len) {
+ common->min_weight = tx_qinfo[ii].weight;
+ break;
+ }
+ }
+ return ii;
+}
+
+/**
+ * rsi_recalculate_weights() - This function recalculates the weights
+ * corresponding to each queue.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: recontend_queue bool variable
+ */
+static bool rsi_recalculate_weights(struct rsi_common *common)
+{
+ struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
+ bool recontend_queue = false;
+ u8 ii = 0;
+ u32 q_len = 0;
+
+ for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
+ q_len = skb_queue_len(&common->tx_queue[ii]);
+ /* Check for the need of contention */
+ if (q_len) {
+ if (tx_qinfo[ii].pkt_contended) {
+ tx_qinfo[ii].weight =
+ ((tx_qinfo[ii].weight > common->min_weight) ?
+ tx_qinfo[ii].weight - common->min_weight : 0);
+ } else {
+ tx_qinfo[ii].pkt_contended = 1;
+ tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
+ recontend_queue = true;
+ }
+ } else { /* No packets so no contention */
+ tx_qinfo[ii].weight = 0;
+ tx_qinfo[ii].pkt_contended = 0;
+ }
+ }
+
+ return recontend_queue;
+}
+
+/**
+ * rsi_core_determine_hal_queue() - This function determines the queue from
+ * which packet has to be dequeued.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: q_num: Corresponding queue number on success.
+ */
+static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
+{
+ bool recontend_queue = false;
+ u32 q_len = 0;
+ u8 q_num = INVALID_QUEUE;
+ u8 ii, min = 0;
+
+ if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
+ if (!common->mgmt_q_block)
+ q_num = MGMT_SOFT_Q;
+ return q_num;
+ }
+
+ if (common->pkt_cnt != 0) {
+ --common->pkt_cnt;
+ return common->selected_qnum;
+ }
+
+get_queue_num:
+ q_num = 0;
+ recontend_queue = false;
+
+ q_num = rsi_determine_min_weight_queue(common);
+ q_len = skb_queue_len(&common->tx_queue[ii]);
+ ii = q_num;
+
+ /* Selecting the queue with least back off */
+ for (; ii < NUM_EDCA_QUEUES; ii++) {
+ if (((common->tx_qinfo[ii].pkt_contended) &&
+ (common->tx_qinfo[ii].weight < min)) && q_len) {
+ min = common->tx_qinfo[ii].weight;
+ q_num = ii;
+ }
+ }
+
+ common->tx_qinfo[q_num].pkt_contended = 0;
+ /* Adjust the back off values for all queues again */
+ recontend_queue = rsi_recalculate_weights(common);
+
+ q_len = skb_queue_len(&common->tx_queue[q_num]);
+ if (!q_len) {
+ /* If any queues are freshly contended and the selected queue
+ * doesn't have any packets
+ * then get the queue number again with fresh values
+ */
+ if (recontend_queue)
+ goto get_queue_num;
+
+ q_num = INVALID_QUEUE;
+ return q_num;
+ }
+
+ common->selected_qnum = q_num;
+ q_len = skb_queue_len(&common->tx_queue[q_num]);
+
+ switch (common->selected_qnum) {
+ case VO_Q:
+ if (q_len > MAX_CONTINUOUS_VO_PKTS)
+ common->pkt_cnt = (MAX_CONTINUOUS_VO_PKTS - 1);
+ else
+ common->pkt_cnt = --q_len;
+ break;
+
+ case VI_Q:
+ if (q_len > MAX_CONTINUOUS_VI_PKTS)
+ common->pkt_cnt = (MAX_CONTINUOUS_VI_PKTS - 1);
+ else
+ common->pkt_cnt = --q_len;
+
+ break;
+
+ default:
+ common->pkt_cnt = 0;
+ break;
+ }
+
+ return q_num;
+}
+
+/**
+ * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
+ * specified by the queue number.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: None.
+ */
+static void rsi_core_queue_pkt(struct rsi_common *common,
+ struct sk_buff *skb)
+{
+ u8 q_num = skb->priority;
+ if (q_num >= NUM_SOFT_QUEUES) {
+ rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
+ __func__, q_num);
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ skb_queue_tail(&common->tx_queue[q_num], skb);
+}
+
+/**
+ * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
+ * specified by the queue number.
+ * @common: Pointer to the driver private structure.
+ * @q_num: Queue number.
+ *
+ * Return: Pointer to sk_buff structure.
+ */
+static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
+ u8 q_num)
+{
+ if (q_num >= NUM_SOFT_QUEUES) {
+ rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
+ __func__, q_num);
+ return NULL;
+ }
+
+ return skb_dequeue(&common->tx_queue[q_num]);
+}
+
+/**
+ * rsi_core_qos_processor() - This function is used to determine the wmm queue
+ * based on the backoff procedure. Data packets are
+ * dequeued from the selected hal queue and sent to
+ * the below layers.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: None.
+ */
+void rsi_core_qos_processor(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct sk_buff *skb;
+ unsigned long tstamp_1, tstamp_2;
+ u8 q_num;
+ int status;
+
+ tstamp_1 = jiffies;
+ while (1) {
+ q_num = rsi_core_determine_hal_queue(common);
+ rsi_dbg(DATA_TX_ZONE,
+ "%s: Queue number = %d\n", __func__, q_num);
+
+ if (q_num == INVALID_QUEUE) {
+ rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
+ break;
+ }
+
+ mutex_lock(&common->tx_rxlock);
+
+ status = adapter->check_hw_queue_status(adapter, q_num);
+ if ((status <= 0)) {
+ mutex_unlock(&common->tx_rxlock);
+ break;
+ }
+
+ if ((q_num < MGMT_SOFT_Q) &&
+ ((skb_queue_len(&common->tx_queue[q_num])) <=
+ MIN_DATA_QUEUE_WATER_MARK)) {
+ if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
+ ieee80211_wake_queue(adapter->hw,
+ WME_AC(q_num));
+ }
+
+ skb = rsi_core_dequeue_pkt(common, q_num);
+ if (skb == NULL) {
+ mutex_unlock(&common->tx_rxlock);
+ break;
+ }
+
+ if (q_num == MGMT_SOFT_Q)
+ status = rsi_send_mgmt_pkt(common, skb);
+ else
+ status = rsi_send_data_pkt(common, skb);
+
+ if (status) {
+ mutex_unlock(&common->tx_rxlock);
+ break;
+ }
+
+ common->tx_stats.total_tx_pkt_send[q_num]++;
+
+ tstamp_2 = jiffies;
+ mutex_unlock(&common->tx_rxlock);
+
+ if (tstamp_2 > tstamp_1 + (300 * HZ / 1000))
+ schedule();
+ }
+}
+
+/**
+ * rsi_core_xmit() - This function transmits the packets received from mac80211
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: None.
+ */
+void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_tx_info *info;
+ struct skb_info *tx_params;
+ struct ieee80211_hdr *tmp_hdr = NULL;
+ u8 q_num, tid = 0;
+
+ if ((!skb) || (!skb->len)) {
+ rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
+ __func__);
+ goto xmit_fail;
+ }
+ info = IEEE80211_SKB_CB(skb);
+ tx_params = (struct skb_info *)info->driver_data;
+ tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
+
+ if (common->fsm_state != FSM_MAC_INIT_DONE) {
+ rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
+ goto xmit_fail;
+ }
+
+ if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) ||
+ (ieee80211_is_ctl(tmp_hdr->frame_control))) {
+ q_num = MGMT_SOFT_Q;
+ skb->priority = q_num;
+ } else {
+ if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
+ tid = (skb->data[24] & IEEE80211_QOS_TID);
+ skb->priority = TID_TO_WME_AC(tid);
+ } else {
+ tid = IEEE80211_NONQOS_TID;
+ skb->priority = BE_Q;
+ }
+ q_num = skb->priority;
+ tx_params->tid = tid;
+ tx_params->sta_id = 0;
+ }
+
+ if ((q_num != MGMT_SOFT_Q) &&
+ ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
+ DATA_QUEUE_WATER_MARK)) {
+ if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
+ ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
+ rsi_set_event(&common->tx_thread.event);
+ goto xmit_fail;
+ }
+
+ rsi_core_queue_pkt(common, skb);
+ rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
+ rsi_set_event(&common->tx_thread.event);
+
+ return;
+
+xmit_fail:
+ rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
+ /* Dropping pkt here */
+ ieee80211_free_txskb(common->priv->hw, skb);
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
new file mode 100644
index 000000000000..7e4ef4554411
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
@@ -0,0 +1,339 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "rsi_debugfs.h"
+#include "rsi_sdio.h"
+
+/**
+ * rsi_sdio_stats_read() - This function returns the sdio status of the driver.
+ * @seq: Pointer to the sequence file structure.
+ * @data: Pointer to the data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_sdio_stats_read(struct seq_file *seq, void *data)
+{
+ struct rsi_common *common = seq->private;
+ struct rsi_hw *adapter = common->priv;
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+
+ seq_printf(seq, "total_sdio_interrupts: %d\n",
+ dev->rx_info.sdio_int_counter);
+ seq_printf(seq, "sdio_msdu_pending_intr_count: %d\n",
+ dev->rx_info.total_sdio_msdu_pending_intr);
+ seq_printf(seq, "sdio_buff_full_count : %d\n",
+ dev->rx_info.buf_full_counter);
+ seq_printf(seq, "sdio_buf_semi_full_count %d\n",
+ dev->rx_info.buf_semi_full_counter);
+ seq_printf(seq, "sdio_unknown_intr_count: %d\n",
+ dev->rx_info.total_sdio_unknown_intr);
+ /* RX Path Stats */
+ seq_printf(seq, "BUFFER FULL STATUS : %d\n",
+ dev->rx_info.buffer_full);
+ seq_printf(seq, "SEMI BUFFER FULL STATUS : %d\n",
+ dev->rx_info.semi_buffer_full);
+ seq_printf(seq, "MGMT BUFFER FULL STATUS : %d\n",
+ dev->rx_info.mgmt_buffer_full);
+ seq_printf(seq, "BUFFER FULL COUNTER : %d\n",
+ dev->rx_info.buf_full_counter);
+ seq_printf(seq, "BUFFER SEMI FULL COUNTER : %d\n",
+ dev->rx_info.buf_semi_full_counter);
+ seq_printf(seq, "MGMT BUFFER FULL COUNTER : %d\n",
+ dev->rx_info.mgmt_buf_full_counter);
+
+ return 0;
+}
+
+/**
+ * rsi_sdio_stats_open() - This funtion calls single open function of seq_file
+ * to open file and read contents from it.
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
+ */
+static int rsi_sdio_stats_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, rsi_sdio_stats_read, inode->i_private);
+}
+
+/**
+ * rsi_version_read() - This function gives driver and firmware version number.
+ * @seq: Pointer to the sequence file structure.
+ * @data: Pointer to the data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_version_read(struct seq_file *seq, void *data)
+{
+ struct rsi_common *common = seq->private;
+
+ common->driver_ver.major = 0;
+ common->driver_ver.minor = 1;
+ common->driver_ver.release_num = 0;
+ common->driver_ver.patch_num = 0;
+ seq_printf(seq, "Driver : %x.%d.%d.%d\nLMAC : %d.%d.%d.%d\n",
+ common->driver_ver.major,
+ common->driver_ver.minor,
+ common->driver_ver.release_num,
+ common->driver_ver.patch_num,
+ common->fw_ver.major,
+ common->fw_ver.minor,
+ common->fw_ver.release_num,
+ common->fw_ver.patch_num);
+ return 0;
+}
+
+/**
+ * rsi_version_open() - This funtion calls single open function of seq_file to
+ * open file and read contents from it.
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
+ */
+static int rsi_version_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, rsi_version_read, inode->i_private);
+}
+
+/**
+ * rsi_stats_read() - This function return the status of the driver.
+ * @seq: Pointer to the sequence file structure.
+ * @data: Pointer to the data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_stats_read(struct seq_file *seq, void *data)
+{
+ struct rsi_common *common = seq->private;
+
+ unsigned char fsm_state[][32] = {
+ "FSM_CARD_NOT_READY",
+ "FSM_BOOT_PARAMS_SENT",
+ "FSM_EEPROM_READ_MAC_ADDR",
+ "FSM_RESET_MAC_SENT",
+ "FSM_RADIO_CAPS_SENT",
+ "FSM_BB_RF_PROG_SENT",
+ "FSM_MAC_INIT_DONE"
+ };
+ seq_puts(seq, "==> RSI STA DRIVER STATUS <==\n");
+ seq_puts(seq, "DRIVER_FSM_STATE: ");
+
+ if (common->fsm_state <= FSM_MAC_INIT_DONE)
+ seq_printf(seq, "%s", fsm_state[common->fsm_state]);
+
+ seq_printf(seq, "(%d)\n\n", common->fsm_state);
+
+ /* Mgmt TX Path Stats */
+ seq_printf(seq, "total_mgmt_pkt_send : %d\n",
+ common->tx_stats.total_tx_pkt_send[MGMT_SOFT_Q]);
+ seq_printf(seq, "total_mgmt_pkt_queued : %d\n",
+ skb_queue_len(&common->tx_queue[4]));
+ seq_printf(seq, "total_mgmt_pkt_freed : %d\n",
+ common->tx_stats.total_tx_pkt_freed[MGMT_SOFT_Q]);
+
+ /* Data TX Path Stats */
+ seq_printf(seq, "total_data_vo_pkt_send: %8d\t",
+ common->tx_stats.total_tx_pkt_send[VO_Q]);
+ seq_printf(seq, "total_data_vo_pkt_queued: %8d\t",
+ skb_queue_len(&common->tx_queue[0]));
+ seq_printf(seq, "total_vo_pkt_freed: %8d\n",
+ common->tx_stats.total_tx_pkt_freed[VO_Q]);
+ seq_printf(seq, "total_data_vi_pkt_send: %8d\t",
+ common->tx_stats.total_tx_pkt_send[VI_Q]);
+ seq_printf(seq, "total_data_vi_pkt_queued: %8d\t",
+ skb_queue_len(&common->tx_queue[1]));
+ seq_printf(seq, "total_vi_pkt_freed: %8d\n",
+ common->tx_stats.total_tx_pkt_freed[VI_Q]);
+ seq_printf(seq, "total_data_be_pkt_send: %8d\t",
+ common->tx_stats.total_tx_pkt_send[BE_Q]);
+ seq_printf(seq, "total_data_be_pkt_queued: %8d\t",
+ skb_queue_len(&common->tx_queue[2]));
+ seq_printf(seq, "total_be_pkt_freed: %8d\n",
+ common->tx_stats.total_tx_pkt_freed[BE_Q]);
+ seq_printf(seq, "total_data_bk_pkt_send: %8d\t",
+ common->tx_stats.total_tx_pkt_send[BK_Q]);
+ seq_printf(seq, "total_data_bk_pkt_queued: %8d\t",
+ skb_queue_len(&common->tx_queue[3]));
+ seq_printf(seq, "total_bk_pkt_freed: %8d\n",
+ common->tx_stats.total_tx_pkt_freed[BK_Q]);
+
+ seq_puts(seq, "\n");
+ return 0;
+}
+
+/**
+ * rsi_stats_open() - This funtion calls single open function of seq_file to
+ * open file and read contents from it.
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
+ */
+static int rsi_stats_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, rsi_stats_read, inode->i_private);
+}
+
+/**
+ * rsi_debug_zone_read() - This function display the currently enabled debug zones.
+ * @seq: Pointer to the sequence file structure.
+ * @data: Pointer to the data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_debug_zone_read(struct seq_file *seq, void *data)
+{
+ rsi_dbg(FSM_ZONE, "%x: rsi_enabled zone", rsi_zone_enabled);
+ seq_printf(seq, "The zones available are %#x\n",
+ rsi_zone_enabled);
+ return 0;
+}
+
+/**
+ * rsi_debug_read() - This funtion calls single open function of seq_file to
+ * open file and read contents from it.
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
+ */
+static int rsi_debug_read(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, rsi_debug_zone_read, inode->i_private);
+}
+
+/**
+ * rsi_debug_zone_write() - This function writes into hal queues as per user
+ * requirement.
+ * @filp: Pointer to the file structure.
+ * @buff: Pointer to the character buffer.
+ * @len: Length of the data to be written into buffer.
+ * @data: Pointer to the data.
+ *
+ * Return: len: Number of bytes read.
+ */
+static ssize_t rsi_debug_zone_write(struct file *filp,
+ const char __user *buff,
+ size_t len,
+ loff_t *data)
+{
+ unsigned long dbg_zone;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = kstrtoul_from_user(buff, len, 16, &dbg_zone);
+
+ if (ret)
+ return ret;
+
+ rsi_zone_enabled = dbg_zone;
+ return len;
+}
+
+#define FOPS(fopen) { \
+ .owner = THIS_MODULE, \
+ .open = (fopen), \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+#define FOPS_RW(fopen, fwrite) { \
+ .owner = THIS_MODULE, \
+ .open = (fopen), \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .write = (fwrite), \
+}
+
+static const struct rsi_dbg_files dev_debugfs_files[] = {
+ {"version", 0644, FOPS(rsi_version_open),},
+ {"stats", 0644, FOPS(rsi_stats_open),},
+ {"debug_zone", 0666, FOPS_RW(rsi_debug_read, rsi_debug_zone_write),},
+ {"sdio_stats", 0644, FOPS(rsi_sdio_stats_open),},
+};
+
+/**
+ * rsi_init_dbgfs() - This function initializes the dbgfs entry.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_init_dbgfs(struct rsi_hw *adapter)
+{
+ struct rsi_common *common = adapter->priv;
+ struct rsi_debugfs *dev_dbgfs;
+ char devdir[6];
+ int ii;
+ const struct rsi_dbg_files *files;
+
+ dev_dbgfs = kzalloc(sizeof(*dev_dbgfs), GFP_KERNEL);
+ adapter->dfsentry = dev_dbgfs;
+
+ snprintf(devdir, sizeof(devdir), "%s",
+ wiphy_name(adapter->hw->wiphy));
+ dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL);
+
+ if (IS_ERR(dev_dbgfs->subdir)) {
+ if (dev_dbgfs->subdir == ERR_PTR(-ENODEV))
+ rsi_dbg(ERR_ZONE,
+ "%s:Debugfs has not been mounted\n", __func__);
+ else
+ rsi_dbg(ERR_ZONE, "debugfs:%s not created\n", devdir);
+
+ adapter->dfsentry = NULL;
+ kfree(dev_dbgfs);
+ return (int)PTR_ERR(dev_dbgfs->subdir);
+ } else {
+ for (ii = 0; ii < adapter->num_debugfs_entries; ii++) {
+ files = &dev_debugfs_files[ii];
+ dev_dbgfs->rsi_files[ii] =
+ debugfs_create_file(files->name,
+ files->perms,
+ dev_dbgfs->subdir,
+ common,
+ &files->fops);
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rsi_init_dbgfs);
+
+/**
+ * rsi_remove_dbgfs() - Removes the previously created dbgfs file entries
+ * in the reverse order of creation.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+void rsi_remove_dbgfs(struct rsi_hw *adapter)
+{
+ struct rsi_debugfs *dev_dbgfs = adapter->dfsentry;
+
+ if (!dev_dbgfs)
+ return;
+
+ debugfs_remove_recursive(dev_dbgfs->subdir);
+}
+EXPORT_SYMBOL_GPL(rsi_remove_dbgfs);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
new file mode 100644
index 000000000000..84164747ace0
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -0,0 +1,1008 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "rsi_debugfs.h"
+#include "rsi_mgmt.h"
+#include "rsi_common.h"
+
+static const struct ieee80211_channel rsi_2ghz_channels[] = {
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+ .hw_value = 1 }, /* Channel 1 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+ .hw_value = 2 }, /* Channel 2 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+ .hw_value = 3 }, /* Channel 3 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+ .hw_value = 4 }, /* Channel 4 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+ .hw_value = 5 }, /* Channel 5 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+ .hw_value = 6 }, /* Channel 6 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+ .hw_value = 7 }, /* Channel 7 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+ .hw_value = 8 }, /* Channel 8 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+ .hw_value = 9 }, /* Channel 9 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+ .hw_value = 10 }, /* Channel 10 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+ .hw_value = 11 }, /* Channel 11 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+ .hw_value = 12 }, /* Channel 12 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+ .hw_value = 13 }, /* Channel 13 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+ .hw_value = 14 }, /* Channel 14 */
+};
+
+static const struct ieee80211_channel rsi_5ghz_channels[] = {
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180,
+ .hw_value = 36, }, /* Channel 36 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200,
+ .hw_value = 40, }, /* Channel 40 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220,
+ .hw_value = 44, }, /* Channel 44 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240,
+ .hw_value = 48, }, /* Channel 48 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5260,
+ .hw_value = 52, }, /* Channel 52 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5280,
+ .hw_value = 56, }, /* Channel 56 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5300,
+ .hw_value = 60, }, /* Channel 60 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5320,
+ .hw_value = 64, }, /* Channel 64 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5500,
+ .hw_value = 100, }, /* Channel 100 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5520,
+ .hw_value = 104, }, /* Channel 104 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5540,
+ .hw_value = 108, }, /* Channel 108 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5560,
+ .hw_value = 112, }, /* Channel 112 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5580,
+ .hw_value = 116, }, /* Channel 116 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5600,
+ .hw_value = 120, }, /* Channel 120 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5620,
+ .hw_value = 124, }, /* Channel 124 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5640,
+ .hw_value = 128, }, /* Channel 128 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5660,
+ .hw_value = 132, }, /* Channel 132 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5680,
+ .hw_value = 136, }, /* Channel 136 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5700,
+ .hw_value = 140, }, /* Channel 140 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5745,
+ .hw_value = 149, }, /* Channel 149 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5765,
+ .hw_value = 153, }, /* Channel 153 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5785,
+ .hw_value = 157, }, /* Channel 157 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5805,
+ .hw_value = 161, }, /* Channel 161 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5825,
+ .hw_value = 165, }, /* Channel 165 */
+};
+
+struct ieee80211_rate rsi_rates[12] = {
+ { .bitrate = STD_RATE_01 * 5, .hw_value = RSI_RATE_1 },
+ { .bitrate = STD_RATE_02 * 5, .hw_value = RSI_RATE_2 },
+ { .bitrate = STD_RATE_5_5 * 5, .hw_value = RSI_RATE_5_5 },
+ { .bitrate = STD_RATE_11 * 5, .hw_value = RSI_RATE_11 },
+ { .bitrate = STD_RATE_06 * 5, .hw_value = RSI_RATE_6 },
+ { .bitrate = STD_RATE_09 * 5, .hw_value = RSI_RATE_9 },
+ { .bitrate = STD_RATE_12 * 5, .hw_value = RSI_RATE_12 },
+ { .bitrate = STD_RATE_18 * 5, .hw_value = RSI_RATE_18 },
+ { .bitrate = STD_RATE_24 * 5, .hw_value = RSI_RATE_24 },
+ { .bitrate = STD_RATE_36 * 5, .hw_value = RSI_RATE_36 },
+ { .bitrate = STD_RATE_48 * 5, .hw_value = RSI_RATE_48 },
+ { .bitrate = STD_RATE_54 * 5, .hw_value = RSI_RATE_54 },
+};
+
+const u16 rsi_mcsrates[8] = {
+ RSI_RATE_MCS0, RSI_RATE_MCS1, RSI_RATE_MCS2, RSI_RATE_MCS3,
+ RSI_RATE_MCS4, RSI_RATE_MCS5, RSI_RATE_MCS6, RSI_RATE_MCS7
+};
+
+/**
+ * rsi_is_cipher_wep() - This function determines if the cipher is WEP or not.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: If cipher type is WEP, a value of 1 is returned, else 0.
+ */
+
+bool rsi_is_cipher_wep(struct rsi_common *common)
+{
+ if (((common->secinfo.gtk_cipher == WLAN_CIPHER_SUITE_WEP104) ||
+ (common->secinfo.gtk_cipher == WLAN_CIPHER_SUITE_WEP40)) &&
+ (!common->secinfo.ptk_cipher))
+ return true;
+ else
+ return false;
+}
+
+/**
+ * rsi_register_rates_channels() - This function registers channels and rates.
+ * @adapter: Pointer to the adapter structure.
+ * @band: Operating band to be set.
+ *
+ * Return: None.
+ */
+static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
+{
+ struct ieee80211_supported_band *sbands = &adapter->sbands[band];
+ void *channels = NULL;
+
+ if (band == IEEE80211_BAND_2GHZ) {
+ channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
+ memcpy(channels,
+ rsi_2ghz_channels,
+ sizeof(rsi_2ghz_channels));
+ sbands->band = IEEE80211_BAND_2GHZ;
+ sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
+ sbands->bitrates = rsi_rates;
+ sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
+ } else {
+ channels = kmalloc(sizeof(rsi_5ghz_channels), GFP_KERNEL);
+ memcpy(channels,
+ rsi_5ghz_channels,
+ sizeof(rsi_5ghz_channels));
+ sbands->band = IEEE80211_BAND_5GHZ;
+ sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
+ sbands->bitrates = &rsi_rates[4];
+ sbands->n_bitrates = ARRAY_SIZE(rsi_rates) - 4;
+ }
+
+ sbands->channels = channels;
+
+ memset(&sbands->ht_cap, 0, sizeof(struct ieee80211_sta_ht_cap));
+ sbands->ht_cap.ht_supported = true;
+ sbands->ht_cap.cap = (IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40);
+ sbands->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K;
+ sbands->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ sbands->ht_cap.mcs.rx_mask[0] = 0xff;
+ sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ /* sbands->ht_cap.mcs.rx_highest = 0x82; */
+}
+
+/**
+ * rsi_mac80211_attach() - This function is used to de-initialize the
+ * Mac80211 stack.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+void rsi_mac80211_detach(struct rsi_hw *adapter)
+{
+ struct ieee80211_hw *hw = adapter->hw;
+
+ if (hw) {
+ ieee80211_stop_queues(hw);
+ ieee80211_unregister_hw(hw);
+ ieee80211_free_hw(hw);
+ }
+
+ rsi_remove_dbgfs(adapter);
+}
+EXPORT_SYMBOL_GPL(rsi_mac80211_detach);
+
+/**
+ * rsi_indicate_tx_status() - This function indicates the transmit status.
+ * @adapter: Pointer to the adapter structure.
+ * @skb: Pointer to the socket buffer structure.
+ * @status: Status
+ *
+ * Return: None.
+ */
+void rsi_indicate_tx_status(struct rsi_hw *adapter,
+ struct sk_buff *skb,
+ int status)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ memset(info->driver_data, 0, IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+
+ if (!status)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_irqsafe(adapter->hw, skb);
+}
+
+/**
+ * rsi_mac80211_tx() - This is the handler that 802.11 module calls for each
+ * transmitted frame.SKB contains the buffer starting
+ * from the IEEE 802.11 header.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @control: Pointer to the ieee80211_tx_control structure
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: None
+ */
+static void rsi_mac80211_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ rsi_core_xmit(common, skb);
+}
+
+/**
+ * rsi_mac80211_start() - This is first handler that 802.11 module calls, since
+ * the driver init is complete by then, just
+ * returns success.
+ * @hw: Pointer to the ieee80211_hw structure.
+ *
+ * Return: 0 as success.
+ */
+static int rsi_mac80211_start(struct ieee80211_hw *hw)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+ common->iface_down = false;
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+/**
+ * rsi_mac80211_stop() - This is the last handler that 802.11 module calls.
+ * @hw: Pointer to the ieee80211_hw structure.
+ *
+ * Return: None.
+ */
+static void rsi_mac80211_stop(struct ieee80211_hw *hw)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+ common->iface_down = true;
+ mutex_unlock(&common->mutex);
+}
+
+/**
+ * rsi_mac80211_add_interface() - This function is called when a netdevice
+ * attached to the hardware is enabled.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ *
+ * Return: ret: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ int ret = -EOPNOTSUPP;
+
+ mutex_lock(&common->mutex);
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (!adapter->sc_nvifs) {
+ ++adapter->sc_nvifs;
+ adapter->vifs[0] = vif;
+ ret = rsi_set_vap_capabilities(common, STA_OPMODE);
+ }
+ break;
+ default:
+ rsi_dbg(ERR_ZONE,
+ "%s: Interface type %d not supported\n", __func__,
+ vif->type);
+ }
+ mutex_unlock(&common->mutex);
+
+ return ret;
+}
+
+/**
+ * rsi_mac80211_remove_interface() - This function notifies driver that an
+ * interface is going down.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ *
+ * Return: None.
+ */
+static void rsi_mac80211_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ adapter->sc_nvifs--;
+
+ if (!memcmp(adapter->vifs[0], vif, sizeof(struct ieee80211_vif)))
+ adapter->vifs[0] = NULL;
+ mutex_unlock(&common->mutex);
+}
+
+/**
+ * rsi_mac80211_config() - This function is a handler for configuration
+ * requests. The stack calls this function to
+ * change hardware configuration, e.g., channel.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @changed: Changed flags set.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_config(struct ieee80211_hw *hw,
+ u32 changed)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ int status = -EOPNOTSUPP;
+
+ mutex_lock(&common->mutex);
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+ u16 channel = curchan->hw_value;
+
+ rsi_dbg(INFO_ZONE,
+ "%s: Set channel: %d MHz type: %d channel_no %d\n",
+ __func__, curchan->center_freq,
+ curchan->flags, channel);
+ common->band = curchan->band;
+ status = rsi_set_channel(adapter->priv, channel);
+ }
+ mutex_unlock(&common->mutex);
+
+ return status;
+}
+
+/**
+ * rsi_get_connected_channel() - This function is used to get the current
+ * connected channel number.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: Current connected AP's channel number is returned.
+ */
+u16 rsi_get_connected_channel(struct rsi_hw *adapter)
+{
+ struct ieee80211_vif *vif = adapter->vifs[0];
+ if (vif) {
+ struct ieee80211_bss_conf *bss = &vif->bss_conf;
+ struct ieee80211_channel *channel = bss->chandef.chan;
+ return channel->hw_value;
+ }
+
+ return 0;
+}
+
+/**
+ * rsi_mac80211_bss_info_changed() - This function is a handler for config
+ * requests related to BSS parameters that
+ * may vary during BSS's lifespan.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @bss_conf: Pointer to the ieee80211_bss_conf structure.
+ * @changed: Changed flags set.
+ *
+ * Return: None.
+ */
+static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+ if (changed & BSS_CHANGED_ASSOC) {
+ rsi_dbg(INFO_ZONE, "%s: Changed Association status: %d\n",
+ __func__, bss_conf->assoc);
+ rsi_inform_bss_status(common,
+ bss_conf->assoc,
+ bss_conf->bssid,
+ bss_conf->qos,
+ bss_conf->aid);
+ }
+ mutex_unlock(&common->mutex);
+}
+
+/**
+ * rsi_mac80211_conf_filter() - This function configure the device's RX filter.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @changed: Changed flags set.
+ * @total_flags: Total initial flags set.
+ * @multicast: Multicast.
+ *
+ * Return: None.
+ */
+static void rsi_mac80211_conf_filter(struct ieee80211_hw *hw,
+ u32 changed_flags,
+ u32 *total_flags,
+ u64 multicast)
+{
+ /* Not doing much here as of now */
+ *total_flags &= RSI_SUPP_FILTERS;
+}
+
+/**
+ * rsi_mac80211_conf_tx() - This function configures TX queue parameters
+ * (EDCF (aifs, cw_min, cw_max), bursting)
+ * for a hardware TX queue.
+ * @hw: Pointer to the ieee80211_hw structure
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @queue: Queue number.
+ * @params: Pointer to ieee80211_tx_queue_params structure.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ u8 idx = 0;
+
+ if (queue >= IEEE80211_NUM_ACS)
+ return 0;
+
+ rsi_dbg(INFO_ZONE,
+ "%s: Conf queue %d, aifs: %d, cwmin: %d cwmax: %d, txop: %d\n",
+ __func__, queue, params->aifs,
+ params->cw_min, params->cw_max, params->txop);
+
+ mutex_lock(&common->mutex);
+ /* Map into the way the f/w expects */
+ switch (queue) {
+ case IEEE80211_AC_VO:
+ idx = VO_Q;
+ break;
+ case IEEE80211_AC_VI:
+ idx = VI_Q;
+ break;
+ case IEEE80211_AC_BE:
+ idx = BE_Q;
+ break;
+ case IEEE80211_AC_BK:
+ idx = BK_Q;
+ break;
+ default:
+ idx = BE_Q;
+ break;
+ }
+
+ memcpy(&common->edca_params[idx],
+ params,
+ sizeof(struct ieee80211_tx_queue_params));
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+/**
+ * rsi_hal_key_config() - This function loads the keys into the firmware.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @key: Pointer to the ieee80211_key_conf structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_hal_key_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *key)
+{
+ struct rsi_hw *adapter = hw->priv;
+ int status;
+ u8 key_type;
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ key_type = RSI_PAIRWISE_KEY;
+ else
+ key_type = RSI_GROUP_KEY;
+
+ rsi_dbg(ERR_ZONE, "%s: Cipher 0x%x key_type: %d key_len: %d\n",
+ __func__, key->cipher, key_type, key->keylen);
+
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP104) ||
+ (key->cipher == WLAN_CIPHER_SUITE_WEP40)) {
+ status = rsi_hal_load_key(adapter->priv,
+ key->key,
+ key->keylen,
+ RSI_PAIRWISE_KEY,
+ key->keyidx,
+ key->cipher);
+ if (status)
+ return status;
+ }
+ return rsi_hal_load_key(adapter->priv,
+ key->key,
+ key->keylen,
+ key_type,
+ key->keyidx,
+ key->cipher);
+}
+
+/**
+ * rsi_mac80211_set_key() - This function sets type of key to be loaded.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @cmd: enum set_key_cmd.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @sta: Pointer to the ieee80211_sta structure.
+ * @key: Pointer to the ieee80211_key_conf structure.
+ *
+ * Return: status: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ struct security_info *secinfo = &common->secinfo;
+ int status;
+
+ mutex_lock(&common->mutex);
+ switch (cmd) {
+ case SET_KEY:
+ secinfo->security_enable = true;
+ status = rsi_hal_key_config(hw, vif, key);
+ if (status) {
+ mutex_unlock(&common->mutex);
+ return status;
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ secinfo->ptk_cipher = key->cipher;
+ else
+ secinfo->gtk_cipher = key->cipher;
+
+ key->hw_key_idx = key->keyidx;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ rsi_dbg(ERR_ZONE, "%s: RSI set_key\n", __func__);
+ break;
+
+ case DISABLE_KEY:
+ secinfo->security_enable = false;
+ rsi_dbg(ERR_ZONE, "%s: RSI del key\n", __func__);
+ memset(key, 0, sizeof(struct ieee80211_key_conf));
+ status = rsi_hal_key_config(hw, vif, key);
+ break;
+
+ default:
+ status = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&common->mutex);
+ return status;
+}
+
+/**
+ * rsi_mac80211_ampdu_action() - This function selects the AMPDU action for
+ * the corresponding mlme_action flag and
+ * informs the f/w regarding this.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @action: ieee80211_ampdu_mlme_action enum.
+ * @sta: Pointer to the ieee80211_sta structure.
+ * @tid: Traffic identifier.
+ * @ssn: Pointer to ssn value.
+ * @buf_size: Buffer size (for kernel version > 2.6.38).
+ *
+ * Return: status: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta,
+ unsigned short tid,
+ unsigned short *ssn,
+ unsigned char buf_size)
+{
+ int status = -EOPNOTSUPP;
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ u16 seq_no = 0;
+ u8 ii = 0;
+
+ for (ii = 0; ii < RSI_MAX_VIFS; ii++) {
+ if (vif == adapter->vifs[ii])
+ break;
+ }
+
+ mutex_lock(&common->mutex);
+ rsi_dbg(INFO_ZONE, "%s: AMPDU action %d called\n", __func__, action);
+ if (ssn != NULL)
+ seq_no = *ssn;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ status = rsi_send_aggregation_params_frame(common,
+ tid,
+ seq_no,
+ buf_size,
+ STA_RX_ADDBA_DONE);
+ break;
+
+ case IEEE80211_AMPDU_RX_STOP:
+ status = rsi_send_aggregation_params_frame(common,
+ tid,
+ 0,
+ buf_size,
+ STA_RX_DELBA);
+ break;
+
+ case IEEE80211_AMPDU_TX_START:
+ common->vif_info[ii].seq_start = seq_no;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ status = rsi_send_aggregation_params_frame(common,
+ tid,
+ seq_no,
+ buf_size,
+ STA_TX_DELBA);
+ if (!status)
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ status = rsi_send_aggregation_params_frame(common,
+ tid,
+ common->vif_info[ii]
+ .seq_start,
+ buf_size,
+ STA_TX_ADDBA_DONE);
+ break;
+
+ default:
+ rsi_dbg(ERR_ZONE, "%s: Uknown AMPDU action\n", __func__);
+ break;
+ }
+
+ mutex_unlock(&common->mutex);
+ return status;
+}
+
+/**
+ * rsi_mac80211_set_rts_threshold() - This function sets rts threshold value.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @value: Rts threshold value.
+ *
+ * Return: 0 on success.
+ */
+static int rsi_mac80211_set_rts_threshold(struct ieee80211_hw *hw,
+ u32 value)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+ common->rts_threshold = value;
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+/**
+ * rsi_mac80211_set_rate_mask() - This function sets bitrate_mask to be used.
+ * @hw: Pointer to the ieee80211_hw structure
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @mask: Pointer to the cfg80211_bitrate_mask structure.
+ *
+ * Return: 0 on success.
+ */
+static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+
+ common->fixedrate_mask[IEEE80211_BAND_2GHZ] = 0;
+
+ if (mask->control[IEEE80211_BAND_2GHZ].legacy == 0xfff) {
+ common->fixedrate_mask[IEEE80211_BAND_2GHZ] =
+ (mask->control[IEEE80211_BAND_2GHZ].ht_mcs[0] << 12);
+ } else {
+ common->fixedrate_mask[IEEE80211_BAND_2GHZ] =
+ mask->control[IEEE80211_BAND_2GHZ].legacy;
+ }
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+/**
+ * rsi_fill_rx_status() - This function fills rx status in
+ * ieee80211_rx_status structure.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @skb: Pointer to the socket buffer structure.
+ * @common: Pointer to the driver private structure.
+ * @rxs: Pointer to the ieee80211_rx_status structure.
+ *
+ * Return: None.
+ */
+static void rsi_fill_rx_status(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rsi_common *common,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct skb_info *rx_params = (struct skb_info *)info->driver_data;
+ struct ieee80211_hdr *hdr;
+ char rssi = rx_params->rssi;
+ u8 hdrlen = 0;
+ u8 channel = rx_params->channel;
+ s32 freq;
+
+ hdr = ((struct ieee80211_hdr *)(skb->data));
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+ memset(info, 0, sizeof(struct ieee80211_tx_info));
+
+ rxs->signal = -(rssi);
+
+ if (channel <= 14)
+ rxs->band = IEEE80211_BAND_2GHZ;
+ else
+ rxs->band = IEEE80211_BAND_5GHZ;
+
+ freq = ieee80211_channel_to_frequency(channel, rxs->band);
+
+ if (freq)
+ rxs->freq = freq;
+
+ if (ieee80211_has_protected(hdr->frame_control)) {
+ if (rsi_is_cipher_wep(common)) {
+ memmove(skb->data + 4, skb->data, hdrlen);
+ skb_pull(skb, 4);
+ } else {
+ memmove(skb->data + 8, skb->data, hdrlen);
+ skb_pull(skb, 8);
+ rxs->flag |= RX_FLAG_MMIC_STRIPPED;
+ }
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ rxs->flag |= RX_FLAG_IV_STRIPPED;
+ }
+}
+
+/**
+ * rsi_indicate_pkt_to_os() - This function sends recieved packet to mac80211.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: None.
+ */
+void rsi_indicate_pkt_to_os(struct rsi_common *common,
+ struct sk_buff *skb)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_hw *hw = adapter->hw;
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+
+ if ((common->iface_down) || (!adapter->sc_nvifs)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /* filling in the ieee80211_rx_status flags */
+ rsi_fill_rx_status(hw, skb, common, rx_status);
+
+ ieee80211_rx_irqsafe(hw, skb);
+}
+
+static void rsi_set_min_rate(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct rsi_common *common)
+{
+ u8 band = hw->conf.chandef.chan->band;
+ u8 ii;
+ u32 rate_bitmap;
+ bool matched = false;
+
+ common->bitrate_mask[band] = sta->supp_rates[band];
+
+ rate_bitmap = (common->fixedrate_mask[band] & sta->supp_rates[band]);
+
+ if (rate_bitmap & 0xfff) {
+ /* Find out the min rate */
+ for (ii = 0; ii < ARRAY_SIZE(rsi_rates); ii++) {
+ if (rate_bitmap & BIT(ii)) {
+ common->min_rate = rsi_rates[ii].hw_value;
+ matched = true;
+ break;
+ }
+ }
+ }
+
+ common->vif_info[0].is_ht = sta->ht_cap.ht_supported;
+
+ if ((common->vif_info[0].is_ht) && (rate_bitmap >> 12)) {
+ for (ii = 0; ii < ARRAY_SIZE(rsi_mcsrates); ii++) {
+ if ((rate_bitmap >> 12) & BIT(ii)) {
+ common->min_rate = rsi_mcsrates[ii];
+ matched = true;
+ break;
+ }
+ }
+ }
+
+ if (!matched)
+ common->min_rate = 0xffff;
+}
+
+/**
+ * rsi_mac80211_sta_add() - This function notifies driver about a peer getting
+ * connected.
+ * @hw: pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @sta: Pointer to the ieee80211_sta structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+
+ rsi_set_min_rate(hw, sta, common);
+
+ if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) {
+ common->vif_info[0].sgi = true;
+ }
+
+ if (sta->ht_cap.ht_supported)
+ ieee80211_start_tx_ba_session(sta, 0, 0);
+
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+/**
+ * rsi_mac80211_sta_remove() - This function notifies driver about a peer
+ * getting disconnected.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @sta: Pointer to the ieee80211_sta structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+ /* Resetting all the fields to default values */
+ common->bitrate_mask[IEEE80211_BAND_2GHZ] = 0;
+ common->bitrate_mask[IEEE80211_BAND_5GHZ] = 0;
+ common->min_rate = 0xffff;
+ common->vif_info[0].is_ht = false;
+ common->vif_info[0].sgi = false;
+ common->vif_info[0].seq_start = 0;
+ common->secinfo.ptk_cipher = 0;
+ common->secinfo.gtk_cipher = 0;
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+static struct ieee80211_ops mac80211_ops = {
+ .tx = rsi_mac80211_tx,
+ .start = rsi_mac80211_start,
+ .stop = rsi_mac80211_stop,
+ .add_interface = rsi_mac80211_add_interface,
+ .remove_interface = rsi_mac80211_remove_interface,
+ .config = rsi_mac80211_config,
+ .bss_info_changed = rsi_mac80211_bss_info_changed,
+ .conf_tx = rsi_mac80211_conf_tx,
+ .configure_filter = rsi_mac80211_conf_filter,
+ .set_key = rsi_mac80211_set_key,
+ .set_rts_threshold = rsi_mac80211_set_rts_threshold,
+ .set_bitrate_mask = rsi_mac80211_set_rate_mask,
+ .ampdu_action = rsi_mac80211_ampdu_action,
+ .sta_add = rsi_mac80211_sta_add,
+ .sta_remove = rsi_mac80211_sta_remove,
+};
+
+/**
+ * rsi_mac80211_attach() - This function is used to initialize Mac80211 stack.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_mac80211_attach(struct rsi_common *common)
+{
+ int status = 0;
+ struct ieee80211_hw *hw = NULL;
+ struct wiphy *wiphy = NULL;
+ struct rsi_hw *adapter = common->priv;
+ u8 addr_mask[ETH_ALEN] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x3};
+
+ rsi_dbg(INIT_ZONE, "%s: Performing mac80211 attach\n", __func__);
+
+ hw = ieee80211_alloc_hw(sizeof(struct rsi_hw), &mac80211_ops);
+ if (!hw) {
+ rsi_dbg(ERR_ZONE, "%s: ieee80211 hw alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ wiphy = hw->wiphy;
+
+ SET_IEEE80211_DEV(hw, adapter->device);
+
+ hw->priv = adapter;
+ adapter->hw = hw;
+
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ 0;
+
+ hw->queues = MAX_HW_QUEUES;
+ hw->extra_tx_headroom = RSI_NEEDED_HEADROOM;
+
+ hw->max_rates = 1;
+ hw->max_rate_tries = MAX_RETRIES;
+
+ hw->max_tx_aggregation_subframes = 6;
+ rsi_register_rates_channels(adapter, IEEE80211_BAND_2GHZ);
+ hw->rate_control_algorithm = "AARF";
+
+ SET_IEEE80211_PERM_ADDR(hw, common->mac_addr);
+ ether_addr_copy(hw->wiphy->addr_mask, addr_mask);
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wiphy->retry_short = RETRY_SHORT;
+ wiphy->retry_long = RETRY_LONG;
+ wiphy->frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
+ wiphy->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+ wiphy->flags = 0;
+
+ wiphy->available_antennas_rx = 1;
+ wiphy->available_antennas_tx = 1;
+ wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &adapter->sbands[IEEE80211_BAND_2GHZ];
+
+ status = ieee80211_register_hw(hw);
+ if (status)
+ return status;
+
+ return rsi_init_dbgfs(adapter);
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
new file mode 100644
index 000000000000..8810862ae826
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -0,0 +1,295 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include "rsi_mgmt.h"
+#include "rsi_common.h"
+
+u32 rsi_zone_enabled = /* INFO_ZONE |
+ INIT_ZONE |
+ MGMT_TX_ZONE |
+ MGMT_RX_ZONE |
+ DATA_TX_ZONE |
+ DATA_RX_ZONE |
+ FSM_ZONE |
+ ISR_ZONE | */
+ ERR_ZONE |
+ 0;
+EXPORT_SYMBOL_GPL(rsi_zone_enabled);
+
+/**
+ * rsi_dbg() - This function outputs informational messages.
+ * @zone: Zone of interest for output message.
+ * @fmt: printf-style format for output message.
+ *
+ * Return: none
+ */
+void rsi_dbg(u32 zone, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (zone & rsi_zone_enabled)
+ pr_info("%pV", &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL_GPL(rsi_dbg);
+
+/**
+ * rsi_prepare_skb() - This function prepares the skb.
+ * @common: Pointer to the driver private structure.
+ * @buffer: Pointer to the packet data.
+ * @pkt_len: Length of the packet.
+ * @extended_desc: Extended descriptor.
+ *
+ * Return: Successfully skb.
+ */
+static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
+ u8 *buffer,
+ u32 pkt_len,
+ u8 extended_desc)
+{
+ struct ieee80211_tx_info *info;
+ struct skb_info *rx_params;
+ struct sk_buff *skb = NULL;
+ u8 payload_offset;
+
+ if (WARN(!pkt_len, "%s: Dummy pkt received", __func__))
+ return NULL;
+
+ if (pkt_len > (RSI_RCV_BUFFER_LEN * 4)) {
+ rsi_dbg(ERR_ZONE, "%s: Pkt size > max rx buf size %d\n",
+ __func__, pkt_len);
+ pkt_len = RSI_RCV_BUFFER_LEN * 4;
+ }
+
+ pkt_len -= extended_desc;
+ skb = dev_alloc_skb(pkt_len + FRAME_DESC_SZ);
+ if (skb == NULL)
+ return NULL;
+
+ payload_offset = (extended_desc + FRAME_DESC_SZ);
+ skb_put(skb, pkt_len);
+ memcpy((skb->data), (buffer + payload_offset), skb->len);
+
+ info = IEEE80211_SKB_CB(skb);
+ rx_params = (struct skb_info *)info->driver_data;
+ rx_params->rssi = rsi_get_rssi(buffer);
+ rx_params->channel = rsi_get_connected_channel(common->priv);
+
+ return skb;
+}
+
+/**
+ * rsi_read_pkt() - This function reads frames from the card.
+ * @common: Pointer to the driver private structure.
+ * @rcv_pkt_len: Received pkt length. In case of USB it is 0.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len)
+{
+ u8 *frame_desc = NULL, extended_desc = 0;
+ u32 index, length = 0, queueno = 0;
+ u16 actual_length = 0, offset;
+ struct sk_buff *skb = NULL;
+
+ index = 0;
+ do {
+ frame_desc = &common->rx_data_pkt[index];
+ actual_length = *(u16 *)&frame_desc[0];
+ offset = *(u16 *)&frame_desc[2];
+
+ queueno = rsi_get_queueno(frame_desc, offset);
+ length = rsi_get_length(frame_desc, offset);
+ extended_desc = rsi_get_extended_desc(frame_desc, offset);
+
+ switch (queueno) {
+ case RSI_WIFI_DATA_Q:
+ skb = rsi_prepare_skb(common,
+ (frame_desc + offset),
+ length,
+ extended_desc);
+ if (skb == NULL)
+ goto fail;
+
+ rsi_indicate_pkt_to_os(common, skb);
+ break;
+
+ case RSI_WIFI_MGMT_Q:
+ rsi_mgmt_pkt_recv(common, (frame_desc + offset));
+ break;
+
+ default:
+ rsi_dbg(ERR_ZONE, "%s: pkt from invalid queue: %d\n",
+ __func__, queueno);
+ goto fail;
+ }
+
+ index += actual_length;
+ rcv_pkt_len -= actual_length;
+ } while (rcv_pkt_len > 0);
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(rsi_read_pkt);
+
+/**
+ * rsi_tx_scheduler_thread() - This function is a kernel thread to send the
+ * packets to the device.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: None.
+ */
+static void rsi_tx_scheduler_thread(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ u32 timeout = EVENT_WAIT_FOREVER;
+
+ do {
+ if (adapter->determine_event_timeout)
+ timeout = adapter->determine_event_timeout(adapter);
+ rsi_wait_event(&common->tx_thread.event, timeout);
+ rsi_reset_event(&common->tx_thread.event);
+
+ if (common->init_done)
+ rsi_core_qos_processor(common);
+ } while (atomic_read(&common->tx_thread.thread_done) == 0);
+ complete_and_exit(&common->tx_thread.completion, 0);
+}
+
+/**
+ * rsi_91x_init() - This function initializes os interface operations.
+ * @void: Void.
+ *
+ * Return: Pointer to the adapter structure on success, NULL on failure .
+ */
+struct rsi_hw *rsi_91x_init(void)
+{
+ struct rsi_hw *adapter = NULL;
+ struct rsi_common *common = NULL;
+ u8 ii = 0;
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter)
+ return NULL;
+
+ adapter->priv = kzalloc(sizeof(*common), GFP_KERNEL);
+ if (adapter->priv == NULL) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of memory\n",
+ __func__);
+ kfree(adapter);
+ return NULL;
+ } else {
+ common = adapter->priv;
+ common->priv = adapter;
+ }
+
+ for (ii = 0; ii < NUM_SOFT_QUEUES; ii++)
+ skb_queue_head_init(&common->tx_queue[ii]);
+
+ rsi_init_event(&common->tx_thread.event);
+ mutex_init(&common->mutex);
+ mutex_init(&common->tx_rxlock);
+
+ if (rsi_create_kthread(common,
+ &common->tx_thread,
+ rsi_tx_scheduler_thread,
+ "Tx-Thread")) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__);
+ goto err;
+ }
+
+ common->init_done = true;
+ return adapter;
+
+err:
+ kfree(common);
+ kfree(adapter);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rsi_91x_init);
+
+/**
+ * rsi_91x_deinit() - This function de-intializes os intf operations.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+void rsi_91x_deinit(struct rsi_hw *adapter)
+{
+ struct rsi_common *common = adapter->priv;
+ u8 ii;
+
+ rsi_dbg(INFO_ZONE, "%s: Performing deinit os ops\n", __func__);
+
+ rsi_kill_thread(&common->tx_thread);
+
+ for (ii = 0; ii < NUM_SOFT_QUEUES; ii++)
+ skb_queue_purge(&common->tx_queue[ii]);
+
+ common->init_done = false;
+
+ kfree(common);
+ kfree(adapter->rsi_dev);
+ kfree(adapter);
+}
+EXPORT_SYMBOL_GPL(rsi_91x_deinit);
+
+/**
+ * rsi_91x_hal_module_init() - This function is invoked when the module is
+ * loaded into the kernel.
+ * It registers the client driver.
+ * @void: Void.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_91x_hal_module_init(void)
+{
+ rsi_dbg(INIT_ZONE, "%s: Module init called\n", __func__);
+ return 0;
+}
+
+/**
+ * rsi_91x_hal_module_exit() - This function is called at the time of
+ * removing/unloading the module.
+ * It unregisters the client driver.
+ * @void: Void.
+ *
+ * Return: None.
+ */
+static void rsi_91x_hal_module_exit(void)
+{
+ rsi_dbg(INIT_ZONE, "%s: Module exit called\n", __func__);
+}
+
+module_init(rsi_91x_hal_module_init);
+module_exit(rsi_91x_hal_module_exit);
+MODULE_AUTHOR("Redpine Signals Inc");
+MODULE_DESCRIPTION("Station driver for RSI 91x devices");
+MODULE_SUPPORTED_DEVICE("RSI-91x");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
new file mode 100644
index 000000000000..2361a6849ad7
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -0,0 +1,1304 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "rsi_mgmt.h"
+#include "rsi_common.h"
+
+static struct bootup_params boot_params_20 = {
+ .magic_number = cpu_to_le16(0x5aa5),
+ .crystal_good_time = 0x0,
+ .valid = cpu_to_le32(VALID_20),
+ .reserved_for_valids = 0x0,
+ .bootup_mode_info = 0x0,
+ .digital_loop_back_params = 0x0,
+ .rtls_timestamp_en = 0x0,
+ .host_spi_intr_cfg = 0x0,
+ .device_clk_info = {{
+ .pll_config_g = {
+ .tapll_info_g = {
+ .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)|
+ (TA_PLL_M_VAL_20)),
+ .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20),
+ },
+ .pll960_info_g = {
+ .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)|
+ (PLL960_N_VAL_20)),
+ .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20),
+ .pll_reg_3 = 0x0,
+ },
+ .afepll_info_g = {
+ .pll_reg = cpu_to_le16(0x9f0),
+ }
+ },
+ .switch_clk_g = {
+ .switch_clk_info = cpu_to_le16(BIT(3)),
+ .bbp_lmac_clk_reg_val = cpu_to_le16(0x121),
+ .umac_clock_reg_config = 0x0,
+ .qspi_uart_clock_reg_config = 0x0
+ }
+ },
+ {
+ .pll_config_g = {
+ .tapll_info_g = {
+ .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)|
+ (TA_PLL_M_VAL_20)),
+ .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20),
+ },
+ .pll960_info_g = {
+ .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)|
+ (PLL960_N_VAL_20)),
+ .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20),
+ .pll_reg_3 = 0x0,
+ },
+ .afepll_info_g = {
+ .pll_reg = cpu_to_le16(0x9f0),
+ }
+ },
+ .switch_clk_g = {
+ .switch_clk_info = 0x0,
+ .bbp_lmac_clk_reg_val = 0x0,
+ .umac_clock_reg_config = 0x0,
+ .qspi_uart_clock_reg_config = 0x0
+ }
+ },
+ {
+ .pll_config_g = {
+ .tapll_info_g = {
+ .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)|
+ (TA_PLL_M_VAL_20)),
+ .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20),
+ },
+ .pll960_info_g = {
+ .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)|
+ (PLL960_N_VAL_20)),
+ .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20),
+ .pll_reg_3 = 0x0,
+ },
+ .afepll_info_g = {
+ .pll_reg = cpu_to_le16(0x9f0),
+ }
+ },
+ .switch_clk_g = {
+ .switch_clk_info = 0x0,
+ .bbp_lmac_clk_reg_val = 0x0,
+ .umac_clock_reg_config = 0x0,
+ .qspi_uart_clock_reg_config = 0x0
+ }
+ } },
+ .buckboost_wakeup_cnt = 0x0,
+ .pmu_wakeup_wait = 0x0,
+ .shutdown_wait_time = 0x0,
+ .pmu_slp_clkout_sel = 0x0,
+ .wdt_prog_value = 0x0,
+ .wdt_soc_rst_delay = 0x0,
+ .dcdc_operation_mode = 0x0,
+ .soc_reset_wait_cnt = 0x0
+};
+
+static struct bootup_params boot_params_40 = {
+ .magic_number = cpu_to_le16(0x5aa5),
+ .crystal_good_time = 0x0,
+ .valid = cpu_to_le32(VALID_40),
+ .reserved_for_valids = 0x0,
+ .bootup_mode_info = 0x0,
+ .digital_loop_back_params = 0x0,
+ .rtls_timestamp_en = 0x0,
+ .host_spi_intr_cfg = 0x0,
+ .device_clk_info = {{
+ .pll_config_g = {
+ .tapll_info_g = {
+ .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)|
+ (TA_PLL_M_VAL_40)),
+ .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40),
+ },
+ .pll960_info_g = {
+ .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)|
+ (PLL960_N_VAL_40)),
+ .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40),
+ .pll_reg_3 = 0x0,
+ },
+ .afepll_info_g = {
+ .pll_reg = cpu_to_le16(0x9f0),
+ }
+ },
+ .switch_clk_g = {
+ .switch_clk_info = cpu_to_le16(0x09),
+ .bbp_lmac_clk_reg_val = cpu_to_le16(0x1121),
+ .umac_clock_reg_config = cpu_to_le16(0x48),
+ .qspi_uart_clock_reg_config = 0x0
+ }
+ },
+ {
+ .pll_config_g = {
+ .tapll_info_g = {
+ .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)|
+ (TA_PLL_M_VAL_40)),
+ .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40),
+ },
+ .pll960_info_g = {
+ .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)|
+ (PLL960_N_VAL_40)),
+ .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40),
+ .pll_reg_3 = 0x0,
+ },
+ .afepll_info_g = {
+ .pll_reg = cpu_to_le16(0x9f0),
+ }
+ },
+ .switch_clk_g = {
+ .switch_clk_info = 0x0,
+ .bbp_lmac_clk_reg_val = 0x0,
+ .umac_clock_reg_config = 0x0,
+ .qspi_uart_clock_reg_config = 0x0
+ }
+ },
+ {
+ .pll_config_g = {
+ .tapll_info_g = {
+ .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)|
+ (TA_PLL_M_VAL_40)),
+ .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40),
+ },
+ .pll960_info_g = {
+ .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)|
+ (PLL960_N_VAL_40)),
+ .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40),
+ .pll_reg_3 = 0x0,
+ },
+ .afepll_info_g = {
+ .pll_reg = cpu_to_le16(0x9f0),
+ }
+ },
+ .switch_clk_g = {
+ .switch_clk_info = 0x0,
+ .bbp_lmac_clk_reg_val = 0x0,
+ .umac_clock_reg_config = 0x0,
+ .qspi_uart_clock_reg_config = 0x0
+ }
+ } },
+ .buckboost_wakeup_cnt = 0x0,
+ .pmu_wakeup_wait = 0x0,
+ .shutdown_wait_time = 0x0,
+ .pmu_slp_clkout_sel = 0x0,
+ .wdt_prog_value = 0x0,
+ .wdt_soc_rst_delay = 0x0,
+ .dcdc_operation_mode = 0x0,
+ .soc_reset_wait_cnt = 0x0
+};
+
+static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130};
+
+/**
+ * rsi_set_default_parameters() - This function sets default parameters.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: none
+ */
+static void rsi_set_default_parameters(struct rsi_common *common)
+{
+ common->band = IEEE80211_BAND_2GHZ;
+ common->channel_width = BW_20MHZ;
+ common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+ common->channel = 1;
+ common->min_rate = 0xffff;
+ common->fsm_state = FSM_CARD_NOT_READY;
+ common->iface_down = true;
+}
+
+/**
+ * rsi_set_contention_vals() - This function sets the contention values for the
+ * backoff procedure.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: None.
+ */
+static void rsi_set_contention_vals(struct rsi_common *common)
+{
+ u8 ii = 0;
+
+ for (; ii < NUM_EDCA_QUEUES; ii++) {
+ common->tx_qinfo[ii].wme_params =
+ (((common->edca_params[ii].cw_min / 2) +
+ (common->edca_params[ii].aifs)) *
+ WMM_SHORT_SLOT_TIME + SIFS_DURATION);
+ common->tx_qinfo[ii].weight = common->tx_qinfo[ii].wme_params;
+ common->tx_qinfo[ii].pkt_contended = 0;
+ }
+}
+
+/**
+ * rsi_send_internal_mgmt_frame() - This function sends management frames to
+ * firmware.Also schedules packet to queue
+ * for transmission.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_send_internal_mgmt_frame(struct rsi_common *common,
+ struct sk_buff *skb)
+{
+ struct skb_info *tx_params;
+
+ if (skb == NULL) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__);
+ return -ENOMEM;
+ }
+ tx_params = (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data;
+ tx_params->flags |= INTERNAL_MGMT_PKT;
+ skb_queue_tail(&common->tx_queue[MGMT_SOFT_Q], skb);
+ rsi_set_event(&common->tx_thread.event);
+ return 0;
+}
+
+/**
+ * rsi_load_radio_caps() - This function is used to send radio capabilities
+ * values to firmware.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding negative error code on failure.
+ */
+static int rsi_load_radio_caps(struct rsi_common *common)
+{
+ struct rsi_radio_caps *radio_caps;
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_hw *hw = adapter->hw;
+ u16 inx = 0;
+ u8 ii;
+ u8 radio_id = 0;
+ u16 gc[20] = {0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0};
+ struct ieee80211_conf *conf = &hw->conf;
+ struct sk_buff *skb;
+
+ rsi_dbg(INFO_ZONE, "%s: Sending rate symbol req frame\n", __func__);
+
+ skb = dev_alloc_skb(sizeof(struct rsi_radio_caps));
+
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, sizeof(struct rsi_radio_caps));
+ radio_caps = (struct rsi_radio_caps *)skb->data;
+
+ radio_caps->desc_word[1] = cpu_to_le16(RADIO_CAPABILITIES);
+ radio_caps->desc_word[4] = cpu_to_le16(RSI_RF_TYPE << 8);
+
+ if (common->channel_width == BW_40MHZ) {
+ radio_caps->desc_word[7] |= cpu_to_le16(RSI_LMAC_CLOCK_80MHZ);
+ radio_caps->desc_word[7] |= cpu_to_le16(RSI_ENABLE_40MHZ);
+ if (common->channel_width) {
+ radio_caps->desc_word[5] =
+ cpu_to_le16(common->channel_width << 12);
+ radio_caps->desc_word[5] |= cpu_to_le16(FULL40M_ENABLE);
+ }
+
+ if (conf_is_ht40_minus(conf)) {
+ radio_caps->desc_word[5] = 0;
+ radio_caps->desc_word[5] |=
+ cpu_to_le16(LOWER_20_ENABLE);
+ radio_caps->desc_word[5] |=
+ cpu_to_le16(LOWER_20_ENABLE >> 12);
+ }
+
+ if (conf_is_ht40_plus(conf)) {
+ radio_caps->desc_word[5] = 0;
+ radio_caps->desc_word[5] |=
+ cpu_to_le16(UPPER_20_ENABLE);
+ radio_caps->desc_word[5] |=
+ cpu_to_le16(UPPER_20_ENABLE >> 12);
+ }
+ }
+
+ radio_caps->desc_word[7] |= cpu_to_le16(radio_id << 8);
+
+ for (ii = 0; ii < MAX_HW_QUEUES; ii++) {
+ radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(3);
+ radio_caps->qos_params[ii].cont_win_max_q = cpu_to_le16(0x3f);
+ radio_caps->qos_params[ii].aifsn_val_q = cpu_to_le16(2);
+ radio_caps->qos_params[ii].txop_q = 0;
+ }
+
+ for (ii = 0; ii < MAX_HW_QUEUES - 4; ii++) {
+ radio_caps->qos_params[ii].cont_win_min_q =
+ cpu_to_le16(common->edca_params[ii].cw_min);
+ radio_caps->qos_params[ii].cont_win_max_q =
+ cpu_to_le16(common->edca_params[ii].cw_max);
+ radio_caps->qos_params[ii].aifsn_val_q =
+ cpu_to_le16((common->edca_params[ii].aifs) << 8);
+ radio_caps->qos_params[ii].txop_q =
+ cpu_to_le16(common->edca_params[ii].txop);
+ }
+
+ memcpy(&common->rate_pwr[0], &gc[0], 40);
+ for (ii = 0; ii < 20; ii++)
+ radio_caps->gcpd_per_rate[inx++] =
+ cpu_to_le16(common->rate_pwr[ii] & 0x00FF);
+
+ radio_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_radio_caps) -
+ FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+
+
+ skb_put(skb, (sizeof(struct rsi_radio_caps)));
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_mgmt_pkt_to_core() - This function is the entry point for Mgmt module.
+ * @common: Pointer to the driver private structure.
+ * @msg: Pointer to received packet.
+ * @msg_len: Length of the recieved packet.
+ * @type: Type of recieved packet.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_mgmt_pkt_to_core(struct rsi_common *common,
+ u8 *msg,
+ s32 msg_len,
+ u8 type)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_tx_info *info;
+ struct skb_info *rx_params;
+ u8 pad_bytes = msg[4];
+ u8 pkt_recv;
+ struct sk_buff *skb;
+ char *buffer;
+
+ if (type == RX_DOT11_MGMT) {
+ if (!adapter->sc_nvifs)
+ return -ENOLINK;
+
+ msg_len -= pad_bytes;
+ if ((msg_len <= 0) || (!msg)) {
+ rsi_dbg(MGMT_RX_ZONE,
+ "%s: Invalid rx msg of len = %d\n",
+ __func__, msg_len);
+ return -EINVAL;
+ }
+
+ skb = dev_alloc_skb(msg_len);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to allocate skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ buffer = skb_put(skb, msg_len);
+
+ memcpy(buffer,
+ (u8 *)(msg + FRAME_DESC_SZ + pad_bytes),
+ msg_len);
+
+ pkt_recv = buffer[0];
+
+ info = IEEE80211_SKB_CB(skb);
+ rx_params = (struct skb_info *)info->driver_data;
+ rx_params->rssi = rsi_get_rssi(msg);
+ rx_params->channel = rsi_get_channel(msg);
+ rsi_indicate_pkt_to_os(common, skb);
+ } else {
+ rsi_dbg(MGMT_TX_ZONE, "%s: Internal Packet\n", __func__);
+ }
+
+ return 0;
+}
+
+/**
+ * rsi_hal_send_sta_notify_frame() - This function sends the station notify
+ * frame to firmware.
+ * @common: Pointer to the driver private structure.
+ * @opmode: Operating mode of device.
+ * @notify_event: Notification about station connection.
+ * @bssid: bssid.
+ * @qos_enable: Qos is enabled.
+ * @aid: Aid (unique for all STA).
+ *
+ * Return: status: 0 on success, corresponding negative error code on failure.
+ */
+static int rsi_hal_send_sta_notify_frame(struct rsi_common *common,
+ u8 opmode,
+ u8 notify_event,
+ const unsigned char *bssid,
+ u8 qos_enable,
+ u16 aid)
+{
+ struct sk_buff *skb = NULL;
+ struct rsi_peer_notify *peer_notify;
+ u16 vap_id = 0;
+ int status;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending sta notify frame\n", __func__);
+
+ skb = dev_alloc_skb(sizeof(struct rsi_peer_notify));
+
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, sizeof(struct rsi_peer_notify));
+ peer_notify = (struct rsi_peer_notify *)skb->data;
+
+ peer_notify->command = cpu_to_le16(opmode << 1);
+
+ switch (notify_event) {
+ case STA_CONNECTED:
+ peer_notify->command |= cpu_to_le16(RSI_ADD_PEER);
+ break;
+ case STA_DISCONNECTED:
+ peer_notify->command |= cpu_to_le16(RSI_DELETE_PEER);
+ break;
+ default:
+ break;
+ }
+
+ peer_notify->command |= cpu_to_le16((aid & 0xfff) << 4);
+ ether_addr_copy(peer_notify->mac_addr, bssid);
+
+ peer_notify->sta_flags = cpu_to_le32((qos_enable) ? 1 : 0);
+
+ peer_notify->desc_word[0] =
+ cpu_to_le16((sizeof(struct rsi_peer_notify) - FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+ peer_notify->desc_word[1] = cpu_to_le16(PEER_NOTIFY);
+ peer_notify->desc_word[7] |= cpu_to_le16(vap_id << 8);
+
+ skb_put(skb, sizeof(struct rsi_peer_notify));
+
+ status = rsi_send_internal_mgmt_frame(common, skb);
+
+ if (!status && qos_enable) {
+ rsi_set_contention_vals(common);
+ status = rsi_load_radio_caps(common);
+ }
+ return status;
+}
+
+/**
+ * rsi_send_aggregation_params_frame() - This function sends the ampdu
+ * indication frame to firmware.
+ * @common: Pointer to the driver private structure.
+ * @tid: traffic identifier.
+ * @ssn: ssn.
+ * @buf_size: buffer size.
+ * @event: notification about station connection.
+ *
+ * Return: 0 on success, corresponding negative error code on failure.
+ */
+int rsi_send_aggregation_params_frame(struct rsi_common *common,
+ u16 tid,
+ u16 ssn,
+ u8 buf_size,
+ u8 event)
+{
+ struct sk_buff *skb = NULL;
+ struct rsi_mac_frame *mgmt_frame;
+ u8 peer_id = 0;
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending AMPDU indication frame\n", __func__);
+
+ mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+ mgmt_frame->desc_word[1] = cpu_to_le16(AMPDU_IND);
+
+ if (event == STA_TX_ADDBA_DONE) {
+ mgmt_frame->desc_word[4] = cpu_to_le16(ssn);
+ mgmt_frame->desc_word[5] = cpu_to_le16(buf_size);
+ mgmt_frame->desc_word[7] =
+ cpu_to_le16((tid | (START_AMPDU_AGGR << 4) | (peer_id << 8)));
+ } else if (event == STA_RX_ADDBA_DONE) {
+ mgmt_frame->desc_word[4] = cpu_to_le16(ssn);
+ mgmt_frame->desc_word[7] = cpu_to_le16(tid |
+ (START_AMPDU_AGGR << 4) |
+ (RX_BA_INDICATION << 5) |
+ (peer_id << 8));
+ } else if (event == STA_TX_DELBA) {
+ mgmt_frame->desc_word[7] = cpu_to_le16(tid |
+ (STOP_AMPDU_AGGR << 4) |
+ (peer_id << 8));
+ } else if (event == STA_RX_DELBA) {
+ mgmt_frame->desc_word[7] = cpu_to_le16(tid |
+ (STOP_AMPDU_AGGR << 4) |
+ (RX_BA_INDICATION << 5) |
+ (peer_id << 8));
+ }
+
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_program_bb_rf() - This function starts base band and RF programming.
+ * This is called after initial configurations are done.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding negative error code on failure.
+ */
+static int rsi_program_bb_rf(struct rsi_common *common)
+{
+ struct sk_buff *skb;
+ struct rsi_mac_frame *mgmt_frame;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending program BB/RF frame\n", __func__);
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+ mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+ mgmt_frame->desc_word[1] = cpu_to_le16(BBP_PROG_IN_TA);
+ mgmt_frame->desc_word[4] = cpu_to_le16(common->endpoint << 8);
+
+ if (common->rf_reset) {
+ mgmt_frame->desc_word[7] = cpu_to_le16(RF_RESET_ENABLE);
+ rsi_dbg(MGMT_TX_ZONE, "%s: ===> RF RESET REQUEST SENT <===\n",
+ __func__);
+ common->rf_reset = 0;
+ }
+ common->bb_rf_prog_count = 1;
+ mgmt_frame->desc_word[7] |= cpu_to_le16(PUT_BBP_RESET |
+ BBP_REG_WRITE | (RSI_RF_TYPE << 4));
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_set_vap_capabilities() - This function send vap capability to firmware.
+ * @common: Pointer to the driver private structure.
+ * @opmode: Operating mode of device.
+ *
+ * Return: 0 on success, corresponding negative error code on failure.
+ */
+int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode)
+{
+ struct sk_buff *skb = NULL;
+ struct rsi_vap_caps *vap_caps;
+ u16 vap_id = 0;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending VAP capabilities frame\n", __func__);
+
+ skb = dev_alloc_skb(sizeof(struct rsi_vap_caps));
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, sizeof(struct rsi_vap_caps));
+ vap_caps = (struct rsi_vap_caps *)skb->data;
+
+ vap_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_vap_caps) -
+ FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+ vap_caps->desc_word[1] = cpu_to_le16(VAP_CAPABILITIES);
+ vap_caps->desc_word[4] = cpu_to_le16(mode |
+ (common->channel_width << 8));
+ vap_caps->desc_word[7] = cpu_to_le16((vap_id << 8) |
+ (common->mac_id << 4) |
+ common->radio_id);
+
+ memcpy(vap_caps->mac_addr, common->mac_addr, IEEE80211_ADDR_LEN);
+ vap_caps->keep_alive_period = cpu_to_le16(90);
+ vap_caps->frag_threshold = cpu_to_le16(IEEE80211_MAX_FRAG_THRESHOLD);
+
+ vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold);
+ vap_caps->default_mgmt_rate = 0;
+ if (conf_is_ht40(&common->priv->hw->conf)) {
+ vap_caps->default_ctrl_rate =
+ cpu_to_le32(RSI_RATE_6 | FULL40M_ENABLE << 16);
+ } else {
+ vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6);
+ }
+ vap_caps->default_data_rate = 0;
+ vap_caps->beacon_interval = cpu_to_le16(200);
+ vap_caps->dtim_period = cpu_to_le16(4);
+
+ skb_put(skb, sizeof(*vap_caps));
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_hal_load_key() - This function is used to load keys within the firmware.
+ * @common: Pointer to the driver private structure.
+ * @data: Pointer to the key data.
+ * @key_len: Key length to be loaded.
+ * @key_type: Type of key: GROUP/PAIRWISE.
+ * @key_id: Key index.
+ * @cipher: Type of cipher used.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_hal_load_key(struct rsi_common *common,
+ u8 *data,
+ u16 key_len,
+ u8 key_type,
+ u8 key_id,
+ u32 cipher)
+{
+ struct sk_buff *skb = NULL;
+ struct rsi_set_key *set_key;
+ u16 key_descriptor = 0;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending load key frame\n", __func__);
+
+ skb = dev_alloc_skb(sizeof(struct rsi_set_key));
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, sizeof(struct rsi_set_key));
+ set_key = (struct rsi_set_key *)skb->data;
+
+ if ((cipher == WLAN_CIPHER_SUITE_WEP40) ||
+ (cipher == WLAN_CIPHER_SUITE_WEP104)) {
+ key_len += 1;
+ key_descriptor |= BIT(2);
+ if (key_len >= 13)
+ key_descriptor |= BIT(3);
+ } else if (cipher != KEY_TYPE_CLEAR) {
+ key_descriptor |= BIT(4);
+ if (key_type == RSI_PAIRWISE_KEY)
+ key_id = 0;
+ if (cipher == WLAN_CIPHER_SUITE_TKIP)
+ key_descriptor |= BIT(5);
+ }
+ key_descriptor |= (key_type | BIT(13) | (key_id << 14));
+
+ set_key->desc_word[0] = cpu_to_le16((sizeof(struct rsi_set_key) -
+ FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+ set_key->desc_word[1] = cpu_to_le16(SET_KEY_REQ);
+ set_key->desc_word[4] = cpu_to_le16(key_descriptor);
+
+ if ((cipher == WLAN_CIPHER_SUITE_WEP40) ||
+ (cipher == WLAN_CIPHER_SUITE_WEP104)) {
+ memcpy(&set_key->key[key_id][1],
+ data,
+ key_len * 2);
+ } else {
+ memcpy(&set_key->key[0][0], data, key_len);
+ }
+
+ memcpy(set_key->tx_mic_key, &data[16], 8);
+ memcpy(set_key->rx_mic_key, &data[24], 8);
+
+ skb_put(skb, sizeof(struct rsi_set_key));
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/*
+ * rsi_load_bootup_params() - This function send bootup params to the firmware.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+static u8 rsi_load_bootup_params(struct rsi_common *common)
+{
+ struct sk_buff *skb;
+ struct rsi_boot_params *boot_params;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending boot params frame\n", __func__);
+ skb = dev_alloc_skb(sizeof(struct rsi_boot_params));
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, sizeof(struct rsi_boot_params));
+ boot_params = (struct rsi_boot_params *)skb->data;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s:\n", __func__);
+
+ if (common->channel_width == BW_40MHZ) {
+ memcpy(&boot_params->bootup_params,
+ &boot_params_40,
+ sizeof(struct bootup_params));
+ rsi_dbg(MGMT_TX_ZONE, "%s: Packet 40MHZ <=== %d\n", __func__,
+ UMAC_CLK_40BW);
+ boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_40BW);
+ } else {
+ memcpy(&boot_params->bootup_params,
+ &boot_params_20,
+ sizeof(struct bootup_params));
+ if (boot_params_20.valid != cpu_to_le32(VALID_20)) {
+ boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_20BW);
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Packet 20MHZ <=== %d\n", __func__,
+ UMAC_CLK_20BW);
+ } else {
+ boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_40MHZ);
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Packet 20MHZ <=== %d\n", __func__,
+ UMAC_CLK_40MHZ);
+ }
+ }
+
+ /**
+ * Bit{0:11} indicates length of the Packet
+ * Bit{12:15} indicates host queue number
+ */
+ boot_params->desc_word[0] = cpu_to_le16(sizeof(struct bootup_params) |
+ (RSI_WIFI_MGMT_Q << 12));
+ boot_params->desc_word[1] = cpu_to_le16(BOOTUP_PARAMS_REQUEST);
+
+ skb_put(skb, sizeof(struct rsi_boot_params));
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_send_reset_mac() - This function prepares reset MAC request and sends an
+ * internal management frame to indicate it to firmware.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+static int rsi_send_reset_mac(struct rsi_common *common)
+{
+ struct sk_buff *skb;
+ struct rsi_mac_frame *mgmt_frame;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending reset MAC frame\n", __func__);
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+ mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+ mgmt_frame->desc_word[1] = cpu_to_le16(RESET_MAC_REQ);
+ mgmt_frame->desc_word[4] = cpu_to_le16(RETRY_COUNT << 8);
+
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_set_channel() - This function programs the channel.
+ * @common: Pointer to the driver private structure.
+ * @channel: Channel value to be set.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+int rsi_set_channel(struct rsi_common *common, u16 channel)
+{
+ struct sk_buff *skb = NULL;
+ struct rsi_mac_frame *mgmt_frame;
+
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Sending scan req frame\n", __func__);
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+ if (common->band == IEEE80211_BAND_5GHZ) {
+ if ((channel >= 36) && (channel <= 64))
+ channel = ((channel - 32) / 4);
+ else if ((channel > 64) && (channel <= 140))
+ channel = ((channel - 102) / 4) + 8;
+ else if (channel >= 149)
+ channel = ((channel - 151) / 4) + 18;
+ else
+ return -EINVAL;
+ } else {
+ if (channel > 14) {
+ rsi_dbg(ERR_ZONE, "%s: Invalid chno %d, band = %d\n",
+ __func__, channel, common->band);
+ return -EINVAL;
+ }
+ }
+
+ mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+ mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST);
+ mgmt_frame->desc_word[4] = cpu_to_le16(channel);
+
+ mgmt_frame->desc_word[7] = cpu_to_le16(PUT_BBP_RESET |
+ BBP_REG_WRITE |
+ (RSI_RF_TYPE << 4));
+
+ mgmt_frame->desc_word[5] = cpu_to_le16(0x01);
+
+ if (common->channel_width == BW_40MHZ)
+ mgmt_frame->desc_word[5] |= cpu_to_le16(0x1 << 8);
+
+ common->channel = channel;
+
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_compare() - This function is used to compare two integers
+ * @a: pointer to the first integer
+ * @b: pointer to the second integer
+ *
+ * Return: 0 if both are equal, -1 if the first is smaller, else 1
+ */
+static int rsi_compare(const void *a, const void *b)
+{
+ u16 _a = *(const u16 *)(a);
+ u16 _b = *(const u16 *)(b);
+
+ if (_a > _b)
+ return -1;
+
+ if (_a < _b)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * rsi_map_rates() - This function is used to map selected rates to hw rates.
+ * @rate: The standard rate to be mapped.
+ * @offset: Offset that will be returned.
+ *
+ * Return: 0 if it is a mcs rate, else 1
+ */
+static bool rsi_map_rates(u16 rate, int *offset)
+{
+ int kk;
+ for (kk = 0; kk < ARRAY_SIZE(rsi_mcsrates); kk++) {
+ if (rate == mcs[kk]) {
+ *offset = kk;
+ return false;
+ }
+ }
+
+ for (kk = 0; kk < ARRAY_SIZE(rsi_rates); kk++) {
+ if (rate == rsi_rates[kk].bitrate / 5) {
+ *offset = kk;
+ break;
+ }
+ }
+ return true;
+}
+
+/**
+ * rsi_send_auto_rate_request() - This function is to set rates for connection
+ * and send autorate request to firmware.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+static int rsi_send_auto_rate_request(struct rsi_common *common)
+{
+ struct sk_buff *skb;
+ struct rsi_auto_rate *auto_rate;
+ int ii = 0, jj = 0, kk = 0;
+ struct ieee80211_hw *hw = common->priv->hw;
+ u8 band = hw->conf.chandef.chan->band;
+ u8 num_supported_rates = 0;
+ u8 rate_offset = 0;
+ u32 rate_bitmap = common->bitrate_mask[band];
+
+ u16 *selected_rates, min_rate;
+
+ skb = dev_alloc_skb(sizeof(struct rsi_auto_rate));
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ selected_rates = kmalloc(2 * RSI_TBL_SZ, GFP_KERNEL);
+ if (!selected_rates) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, sizeof(struct rsi_auto_rate));
+ memset(selected_rates, 0, 2 * RSI_TBL_SZ);
+
+ auto_rate = (struct rsi_auto_rate *)skb->data;
+
+ auto_rate->aarf_rssi = cpu_to_le16(((u16)3 << 6) | (u16)(18 & 0x3f));
+ auto_rate->collision_tolerance = cpu_to_le16(3);
+ auto_rate->failure_limit = cpu_to_le16(3);
+ auto_rate->initial_boundary = cpu_to_le16(3);
+ auto_rate->max_threshold_limt = cpu_to_le16(27);
+
+ auto_rate->desc_word[1] = cpu_to_le16(AUTO_RATE_IND);
+
+ if (common->channel_width == BW_40MHZ)
+ auto_rate->desc_word[7] |= cpu_to_le16(1);
+
+ if (band == IEEE80211_BAND_2GHZ)
+ min_rate = STD_RATE_01;
+ else
+ min_rate = STD_RATE_06;
+
+ for (ii = 0, jj = 0; ii < ARRAY_SIZE(rsi_rates); ii++) {
+ if (rate_bitmap & BIT(ii)) {
+ selected_rates[jj++] = (rsi_rates[ii].bitrate / 5);
+ rate_offset++;
+ }
+ }
+ num_supported_rates = jj;
+
+ if (common->vif_info[0].is_ht) {
+ for (ii = 0; ii < ARRAY_SIZE(mcs); ii++)
+ selected_rates[jj++] = mcs[ii];
+ num_supported_rates += ARRAY_SIZE(mcs);
+ rate_offset += ARRAY_SIZE(mcs);
+ }
+
+ if (rate_offset < (RSI_TBL_SZ / 2) - 1) {
+ for (ii = jj; ii < (RSI_TBL_SZ / 2); ii++) {
+ selected_rates[jj++] = min_rate;
+ rate_offset++;
+ }
+ }
+
+ sort(selected_rates, jj, sizeof(u16), &rsi_compare, NULL);
+
+ /* mapping the rates to RSI rates */
+ for (ii = 0; ii < jj; ii++) {
+ if (rsi_map_rates(selected_rates[ii], &kk)) {
+ auto_rate->supported_rates[ii] =
+ cpu_to_le16(rsi_rates[kk].hw_value);
+ } else {
+ auto_rate->supported_rates[ii] =
+ cpu_to_le16(rsi_mcsrates[kk]);
+ }
+ }
+
+ /* loading HT rates in the bottom half of the auto rate table */
+ if (common->vif_info[0].is_ht) {
+ if (common->vif_info[0].sgi)
+ auto_rate->supported_rates[rate_offset++] =
+ cpu_to_le16(RSI_RATE_MCS7_SG);
+
+ for (ii = rate_offset, kk = ARRAY_SIZE(rsi_mcsrates) - 1;
+ ii < rate_offset + 2 * ARRAY_SIZE(rsi_mcsrates); ii++) {
+ if (common->vif_info[0].sgi)
+ auto_rate->supported_rates[ii++] =
+ cpu_to_le16(rsi_mcsrates[kk] | BIT(9));
+ auto_rate->supported_rates[ii] =
+ cpu_to_le16(rsi_mcsrates[kk--]);
+ }
+
+ for (; ii < RSI_TBL_SZ; ii++) {
+ auto_rate->supported_rates[ii] =
+ cpu_to_le16(rsi_mcsrates[0]);
+ }
+ }
+
+ auto_rate->num_supported_rates = cpu_to_le16(num_supported_rates * 2);
+ auto_rate->moderate_rate_inx = cpu_to_le16(num_supported_rates / 2);
+ auto_rate->desc_word[7] |= cpu_to_le16(0 << 8);
+ num_supported_rates *= 2;
+
+ auto_rate->desc_word[0] = cpu_to_le16((sizeof(*auto_rate) -
+ FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+
+ skb_put(skb,
+ sizeof(struct rsi_auto_rate));
+ kfree(selected_rates);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_inform_bss_status() - This function informs about bss status with the
+ * help of sta notify params by sending an internal
+ * management frame to firmware.
+ * @common: Pointer to the driver private structure.
+ * @status: Bss status type.
+ * @bssid: Bssid.
+ * @qos_enable: Qos is enabled.
+ * @aid: Aid (unique for all STAs).
+ *
+ * Return: None.
+ */
+void rsi_inform_bss_status(struct rsi_common *common,
+ u8 status,
+ const unsigned char *bssid,
+ u8 qos_enable,
+ u16 aid)
+{
+ if (status) {
+ rsi_hal_send_sta_notify_frame(common,
+ NL80211_IFTYPE_STATION,
+ STA_CONNECTED,
+ bssid,
+ qos_enable,
+ aid);
+ if (common->min_rate == 0xffff)
+ rsi_send_auto_rate_request(common);
+ } else {
+ rsi_hal_send_sta_notify_frame(common,
+ NL80211_IFTYPE_STATION,
+ STA_DISCONNECTED,
+ bssid,
+ qos_enable,
+ aid);
+ }
+}
+
+/**
+ * rsi_eeprom_read() - This function sends a frame to read the mac address
+ * from the eeprom.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_eeprom_read(struct rsi_common *common)
+{
+ struct rsi_mac_frame *mgmt_frame;
+ struct sk_buff *skb;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending EEPROM read req frame\n", __func__);
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+ /* FrameType */
+ mgmt_frame->desc_word[1] = cpu_to_le16(EEPROM_READ_TYPE);
+ mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+ /* Number of bytes to read */
+ mgmt_frame->desc_word[3] = cpu_to_le16(ETH_ALEN +
+ WLAN_MAC_MAGIC_WORD_LEN +
+ WLAN_HOST_MODE_LEN +
+ WLAN_FW_VERSION_LEN);
+ /* Address to read */
+ mgmt_frame->desc_word[4] = cpu_to_le16(WLAN_MAC_EEPROM_ADDR);
+
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_handle_ta_confirm_type() - This function handles the confirm frames.
+ * @common: Pointer to the driver private structure.
+ * @msg: Pointer to received packet.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_handle_ta_confirm_type(struct rsi_common *common,
+ u8 *msg)
+{
+ u8 sub_type = (msg[15] & 0xff);
+
+ switch (sub_type) {
+ case BOOTUP_PARAMS_REQUEST:
+ rsi_dbg(FSM_ZONE, "%s: Boot up params confirm received\n",
+ __func__);
+ if (common->fsm_state == FSM_BOOT_PARAMS_SENT) {
+ if (rsi_eeprom_read(common)) {
+ common->fsm_state = FSM_CARD_NOT_READY;
+ goto out;
+ } else {
+ common->fsm_state = FSM_EEPROM_READ_MAC_ADDR;
+ }
+ } else {
+ rsi_dbg(ERR_ZONE,
+ "%s: Received bootup params cfm in %d state\n",
+ __func__, common->fsm_state);
+ return 0;
+ }
+ break;
+
+ case EEPROM_READ_TYPE:
+ if (common->fsm_state == FSM_EEPROM_READ_MAC_ADDR) {
+ if (msg[16] == MAGIC_WORD) {
+ u8 offset = (FRAME_DESC_SZ + WLAN_HOST_MODE_LEN
+ + WLAN_MAC_MAGIC_WORD_LEN);
+ memcpy(common->mac_addr,
+ &msg[offset],
+ ETH_ALEN);
+ memcpy(&common->fw_ver,
+ &msg[offset + ETH_ALEN],
+ sizeof(struct version_info));
+
+ } else {
+ common->fsm_state = FSM_CARD_NOT_READY;
+ break;
+ }
+ if (rsi_send_reset_mac(common))
+ goto out;
+ else
+ common->fsm_state = FSM_RESET_MAC_SENT;
+ } else {
+ rsi_dbg(ERR_ZONE,
+ "%s: Received eeprom mac addr in %d state\n",
+ __func__, common->fsm_state);
+ return 0;
+ }
+ break;
+
+ case RESET_MAC_REQ:
+ if (common->fsm_state == FSM_RESET_MAC_SENT) {
+ rsi_dbg(FSM_ZONE, "%s: Reset MAC cfm received\n",
+ __func__);
+
+ if (rsi_load_radio_caps(common))
+ goto out;
+ else
+ common->fsm_state = FSM_RADIO_CAPS_SENT;
+ } else {
+ rsi_dbg(ERR_ZONE,
+ "%s: Received reset mac cfm in %d state\n",
+ __func__, common->fsm_state);
+ return 0;
+ }
+ break;
+
+ case RADIO_CAPABILITIES:
+ if (common->fsm_state == FSM_RADIO_CAPS_SENT) {
+ common->rf_reset = 1;
+ if (rsi_program_bb_rf(common)) {
+ goto out;
+ } else {
+ common->fsm_state = FSM_BB_RF_PROG_SENT;
+ rsi_dbg(FSM_ZONE, "%s: Radio cap cfm received\n",
+ __func__);
+ }
+ } else {
+ rsi_dbg(ERR_ZONE,
+ "%s: Received radio caps cfm in %d state\n",
+ __func__, common->fsm_state);
+ return 0;
+ }
+ break;
+
+ case BB_PROG_VALUES_REQUEST:
+ case RF_PROG_VALUES_REQUEST:
+ case BBP_PROG_IN_TA:
+ rsi_dbg(FSM_ZONE, "%s: BB/RF cfm received\n", __func__);
+ if (common->fsm_state == FSM_BB_RF_PROG_SENT) {
+ common->bb_rf_prog_count--;
+ if (!common->bb_rf_prog_count) {
+ common->fsm_state = FSM_MAC_INIT_DONE;
+ return rsi_mac80211_attach(common);
+ }
+ } else {
+ goto out;
+ }
+ break;
+
+ default:
+ rsi_dbg(INFO_ZONE, "%s: Invalid TA confirm pkt received\n",
+ __func__);
+ break;
+ }
+ return 0;
+out:
+ rsi_dbg(ERR_ZONE, "%s: Unable to send pkt/Invalid frame received\n",
+ __func__);
+ return -EINVAL;
+}
+
+/**
+ * rsi_mgmt_pkt_recv() - This function processes the management packets
+ * recieved from the hardware.
+ * @common: Pointer to the driver private structure.
+ * @msg: Pointer to the received packet.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
+{
+ s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff);
+ u16 msg_type = (msg[2]);
+
+ rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n",
+ __func__, msg_len, msg_type);
+
+ if (msg_type == TA_CONFIRM_TYPE) {
+ return rsi_handle_ta_confirm_type(common, msg);
+ } else if (msg_type == CARD_READY_IND) {
+ rsi_dbg(FSM_ZONE, "%s: Card ready indication received\n",
+ __func__);
+ if (common->fsm_state == FSM_CARD_NOT_READY) {
+ rsi_set_default_parameters(common);
+
+ if (rsi_load_bootup_params(common))
+ return -ENOMEM;
+ else
+ common->fsm_state = FSM_BOOT_PARAMS_SENT;
+ } else {
+ return -EINVAL;
+ }
+ } else if (msg_type == TX_STATUS_IND) {
+ if (msg[15] == PROBEREQ_CONFIRM) {
+ common->mgmt_q_block = false;
+ rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n",
+ __func__);
+ }
+ } else {
+ return rsi_mgmt_pkt_to_core(common, msg, msg_len, msg_type);
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c
new file mode 100644
index 000000000000..8e48e72bae20
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_pkt.c
@@ -0,0 +1,196 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "rsi_mgmt.h"
+
+/**
+ * rsi_send_data_pkt() - This function sends the recieved data packet from
+ * driver to device.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_hdr *tmp_hdr = NULL;
+ struct ieee80211_tx_info *info;
+ struct skb_info *tx_params;
+ struct ieee80211_bss_conf *bss = NULL;
+ int status = -EINVAL;
+ u8 ieee80211_size = MIN_802_11_HDR_LEN;
+ u8 extnd_size = 0;
+ __le16 *frame_desc;
+ u16 seq_num = 0;
+
+ info = IEEE80211_SKB_CB(skb);
+ bss = &info->control.vif->bss_conf;
+ tx_params = (struct skb_info *)info->driver_data;
+
+ if (!bss->assoc)
+ goto err;
+
+ tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
+ seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4);
+
+ extnd_size = ((uintptr_t)skb->data & 0x3);
+
+ if ((FRAME_DESC_SZ + extnd_size) > skb_headroom(skb)) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
+ status = -ENOSPC;
+ goto err;
+ }
+
+ skb_push(skb, (FRAME_DESC_SZ + extnd_size));
+ frame_desc = (__le16 *)&skb->data[0];
+ memset((u8 *)frame_desc, 0, FRAME_DESC_SZ);
+
+ if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
+ ieee80211_size += 2;
+ frame_desc[6] |= cpu_to_le16(BIT(12));
+ }
+
+ if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
+ (common->secinfo.security_enable)) {
+ if (rsi_is_cipher_wep(common))
+ ieee80211_size += 4;
+ else
+ ieee80211_size += 8;
+ frame_desc[6] |= cpu_to_le16(BIT(15));
+ }
+
+ frame_desc[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) |
+ (RSI_WIFI_DATA_Q << 12));
+ frame_desc[2] = cpu_to_le16((extnd_size) | (ieee80211_size) << 8);
+
+ if (common->min_rate != 0xffff) {
+ /* Send fixed rate */
+ frame_desc[3] = cpu_to_le16(RATE_INFO_ENABLE);
+ frame_desc[4] = cpu_to_le16(common->min_rate);
+ }
+
+ frame_desc[6] |= cpu_to_le16(seq_num & 0xfff);
+ frame_desc[7] = cpu_to_le16(((tx_params->tid & 0xf) << 4) |
+ (skb->priority & 0xf) |
+ (tx_params->sta_id << 8));
+
+ status = adapter->host_intf_write_pkt(common->priv,
+ skb->data,
+ skb->len);
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n",
+ __func__);
+
+err:
+ ++common->tx_stats.total_tx_pkt_freed[skb->priority];
+ rsi_indicate_tx_status(common->priv, skb, status);
+ return status;
+}
+
+/**
+ * rsi_send_mgmt_pkt() - This functions sends the received management packet
+ * from driver to device.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_send_mgmt_pkt(struct rsi_common *common,
+ struct sk_buff *skb)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_hdr *wh = NULL;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_bss_conf *bss = NULL;
+ struct skb_info *tx_params;
+ int status = -E2BIG;
+ __le16 *msg = NULL;
+ u8 extnd_size = 0;
+ u8 vap_id = 0;
+
+ info = IEEE80211_SKB_CB(skb);
+ tx_params = (struct skb_info *)info->driver_data;
+ extnd_size = ((uintptr_t)skb->data & 0x3);
+
+ if (tx_params->flags & INTERNAL_MGMT_PKT) {
+ if ((extnd_size) > skb_headroom(skb)) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
+ dev_kfree_skb(skb);
+ return -ENOSPC;
+ }
+ skb_push(skb, extnd_size);
+ skb->data[extnd_size + 4] = extnd_size;
+ status = adapter->host_intf_write_pkt(common->priv,
+ (u8 *)skb->data,
+ skb->len);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write the packet\n", __func__);
+ }
+ dev_kfree_skb(skb);
+ return status;
+ }
+
+ bss = &info->control.vif->bss_conf;
+ wh = (struct ieee80211_hdr *)&skb->data[0];
+
+ if (FRAME_DESC_SZ > skb_headroom(skb))
+ goto err;
+
+ skb_push(skb, FRAME_DESC_SZ);
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ msg = (__le16 *)skb->data;
+
+ if (skb->len > MAX_MGMT_PKT_SIZE) {
+ rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__);
+ goto err;
+ }
+
+ msg[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+ msg[1] = cpu_to_le16(TX_DOT11_MGMT);
+ msg[2] = cpu_to_le16(MIN_802_11_HDR_LEN << 8);
+ msg[3] = cpu_to_le16(RATE_INFO_ENABLE);
+ msg[6] = cpu_to_le16(le16_to_cpu(wh->seq_ctrl) >> 4);
+
+ if (wh->addr1[0] & BIT(0))
+ msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT);
+
+ if (common->band == IEEE80211_BAND_2GHZ)
+ msg[4] = cpu_to_le16(RSI_11B_MODE);
+ else
+ msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE);
+
+ /* Indicate to firmware to give cfm */
+ if ((skb->data[16] == IEEE80211_STYPE_PROBE_REQ) && (!bss->assoc)) {
+ msg[1] |= cpu_to_le16(BIT(10));
+ msg[7] = cpu_to_le16(PROBEREQ_CONFIRM);
+ common->mgmt_q_block = true;
+ }
+
+ msg[7] |= cpu_to_le16(vap_id << 8);
+
+ status = adapter->host_intf_write_pkt(common->priv,
+ (u8 *)msg,
+ skb->len);
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__);
+
+err:
+ rsi_indicate_tx_status(common->priv, skb, status);
+ return status;
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
new file mode 100644
index 000000000000..852453f386e2
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -0,0 +1,850 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include "rsi_sdio.h"
+#include "rsi_common.h"
+
+/**
+ * rsi_sdio_set_cmd52_arg() - This function prepares cmd 52 read/write arg.
+ * @rw: Read/write
+ * @func: function number
+ * @raw: indicates whether to perform read after write
+ * @address: address to which to read/write
+ * @writedata: data to write
+ *
+ * Return: argument
+ */
+static u32 rsi_sdio_set_cmd52_arg(bool rw,
+ u8 func,
+ u8 raw,
+ u32 address,
+ u8 writedata)
+{
+ return ((rw & 1) << 31) | ((func & 0x7) << 28) |
+ ((raw & 1) << 27) | (1 << 26) |
+ ((address & 0x1FFFF) << 9) | (1 << 8) |
+ (writedata & 0xFF);
+}
+
+/**
+ * rsi_cmd52writebyte() - This function issues cmd52 byte write onto the card.
+ * @card: Pointer to the mmc_card.
+ * @address: Address to write.
+ * @byte: Data to write.
+ *
+ * Return: Write status.
+ */
+static int rsi_cmd52writebyte(struct mmc_card *card,
+ u32 address,
+ u8 byte)
+{
+ struct mmc_command io_cmd;
+ u32 arg;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ arg = rsi_sdio_set_cmd52_arg(1, 0, 0, address, byte);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.arg = arg;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(card->host, &io_cmd, 0);
+}
+
+/**
+ * rsi_cmd52readbyte() - This function issues cmd52 byte read onto the card.
+ * @card: Pointer to the mmc_card.
+ * @address: Address to read from.
+ * @byte: Variable to store read value.
+ *
+ * Return: Read status.
+ */
+static int rsi_cmd52readbyte(struct mmc_card *card,
+ u32 address,
+ u8 *byte)
+{
+ struct mmc_command io_cmd;
+ u32 arg;
+ int err;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ arg = rsi_sdio_set_cmd52_arg(0, 0, 0, address, 0);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.arg = arg;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(card->host, &io_cmd, 0);
+ if ((!err) && (byte))
+ *byte = io_cmd.resp[0] & 0xFF;
+ return err;
+}
+
+/**
+ * rsi_issue_sdiocommand() - This function issues sdio commands.
+ * @func: Pointer to the sdio_func structure.
+ * @opcode: Opcode value.
+ * @arg: Arguments to pass.
+ * @flags: Flags which are set.
+ * @resp: Pointer to store response.
+ *
+ * Return: err: command status as 0 or -1.
+ */
+static int rsi_issue_sdiocommand(struct sdio_func *func,
+ u32 opcode,
+ u32 arg,
+ u32 flags,
+ u32 *resp)
+{
+ struct mmc_command cmd;
+ struct mmc_host *host;
+ int err;
+
+ host = func->card->host;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ cmd.opcode = opcode;
+ cmd.arg = arg;
+ cmd.flags = flags;
+ err = mmc_wait_for_cmd(host, &cmd, 3);
+
+ if ((!err) && (resp))
+ *resp = cmd.resp[0];
+
+ return err;
+}
+
+/**
+ * rsi_handle_interrupt() - This function is called upon the occurence
+ * of an interrupt.
+ * @function: Pointer to the sdio_func structure.
+ *
+ * Return: None.
+ */
+static void rsi_handle_interrupt(struct sdio_func *function)
+{
+ struct rsi_hw *adapter = sdio_get_drvdata(function);
+
+ sdio_release_host(function);
+ rsi_interrupt_handler(adapter);
+ sdio_claim_host(function);
+}
+
+/**
+ * rsi_reset_card() - This function resets and re-initializes the card.
+ * @pfunction: Pointer to the sdio_func structure.
+ *
+ * Return: None.
+ */
+static void rsi_reset_card(struct sdio_func *pfunction)
+{
+ int ret = 0;
+ int err;
+ struct mmc_card *card = pfunction->card;
+ struct mmc_host *host = card->host;
+ s32 bit = (fls(host->ocr_avail) - 1);
+ u8 cmd52_resp;
+ u32 clock, resp, i;
+ u16 rca;
+
+ /* Reset 9110 chip */
+ ret = rsi_cmd52writebyte(pfunction->card,
+ SDIO_CCCR_ABORT,
+ (1 << 3));
+
+ /* Card will not send any response as it is getting reset immediately
+ * Hence expect a timeout status from host controller
+ */
+ if (ret != -ETIMEDOUT)
+ rsi_dbg(ERR_ZONE, "%s: Reset failed : %d\n", __func__, ret);
+
+ /* Wait for few milli seconds to get rid of residue charges if any */
+ msleep(20);
+
+ /* Initialize the SDIO card */
+ host->ios.vdd = bit;
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+ host->ios.power_mode = MMC_POWER_UP;
+ host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
+ host->ops->set_ios(host, &host->ios);
+
+ /*
+ * This delay should be sufficient to allow the power supply
+ * to reach the minimum voltage.
+ */
+ msleep(20);
+
+ host->ios.clock = host->f_min;
+ host->ios.power_mode = MMC_POWER_ON;
+ host->ops->set_ios(host, &host->ios);
+
+ /*
+ * This delay must be at least 74 clock sizes, or 1 ms, or the
+ * time required to reach a stable voltage.
+ */
+ msleep(20);
+
+ /* Issue CMD0. Goto idle state */
+ host->ios.chip_select = MMC_CS_HIGH;
+ host->ops->set_ios(host, &host->ios);
+ msleep(20);
+ err = rsi_issue_sdiocommand(pfunction,
+ MMC_GO_IDLE_STATE,
+ 0,
+ (MMC_RSP_NONE | MMC_CMD_BC),
+ NULL);
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->ops->set_ios(host, &host->ios);
+ msleep(20);
+ host->use_spi_crc = 0;
+
+ if (err)
+ rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err);
+
+ if (!host->ocr_avail) {
+ /* Issue CMD5, arg = 0 */
+ err = rsi_issue_sdiocommand(pfunction,
+ SD_IO_SEND_OP_COND,
+ 0,
+ (MMC_RSP_R4 | MMC_CMD_BCR),
+ &resp);
+ if (err)
+ rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
+ __func__, err);
+ host->ocr_avail = resp;
+ }
+
+ /* Issue CMD5, arg = ocr. Wait till card is ready */
+ for (i = 0; i < 100; i++) {
+ err = rsi_issue_sdiocommand(pfunction,
+ SD_IO_SEND_OP_COND,
+ host->ocr_avail,
+ (MMC_RSP_R4 | MMC_CMD_BCR),
+ &resp);
+ if (err) {
+ rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
+ __func__, err);
+ break;
+ }
+
+ if (resp & MMC_CARD_BUSY)
+ break;
+ msleep(20);
+ }
+
+ if ((i == 100) || (err)) {
+ rsi_dbg(ERR_ZONE, "%s: card in not ready : %d %d\n",
+ __func__, i, err);
+ return;
+ }
+
+ /* Issue CMD3, get RCA */
+ err = rsi_issue_sdiocommand(pfunction,
+ SD_SEND_RELATIVE_ADDR,
+ 0,
+ (MMC_RSP_R6 | MMC_CMD_BCR),
+ &resp);
+ if (err) {
+ rsi_dbg(ERR_ZONE, "%s: CMD3 failed : %d\n", __func__, err);
+ return;
+ }
+ rca = resp >> 16;
+ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ host->ops->set_ios(host, &host->ios);
+
+ /* Issue CMD7, select card */
+ err = rsi_issue_sdiocommand(pfunction,
+ MMC_SELECT_CARD,
+ (rca << 16),
+ (MMC_RSP_R1 | MMC_CMD_AC),
+ NULL);
+ if (err) {
+ rsi_dbg(ERR_ZONE, "%s: CMD7 failed : %d\n", __func__, err);
+ return;
+ }
+
+ /* Enable high speed */
+ if (card->host->caps & MMC_CAP_SD_HIGHSPEED) {
+ rsi_dbg(ERR_ZONE, "%s: Set high speed mode\n", __func__);
+ err = rsi_cmd52readbyte(card, SDIO_CCCR_SPEED, &cmd52_resp);
+ if (err) {
+ rsi_dbg(ERR_ZONE, "%s: CCCR speed reg read failed: %d\n",
+ __func__, err);
+ card->state &= ~MMC_STATE_HIGHSPEED;
+ } else {
+ err = rsi_cmd52writebyte(card,
+ SDIO_CCCR_SPEED,
+ (cmd52_resp | SDIO_SPEED_EHS));
+ if (err) {
+ rsi_dbg(ERR_ZONE,
+ "%s: CCR speed regwrite failed %d\n",
+ __func__, err);
+ return;
+ }
+ mmc_card_set_highspeed(card);
+ host->ios.timing = MMC_TIMING_SD_HS;
+ host->ops->set_ios(host, &host->ios);
+ }
+ }
+
+ /* Set clock */
+ if (mmc_card_highspeed(card))
+ clock = 50000000;
+ else
+ clock = card->cis.max_dtr;
+
+ if (clock > host->f_max)
+ clock = host->f_max;
+
+ host->ios.clock = clock;
+ host->ops->set_ios(host, &host->ios);
+
+ if (card->host->caps & MMC_CAP_4_BIT_DATA) {
+ /* CMD52: Set bus width & disable card detect resistor */
+ err = rsi_cmd52writebyte(card,
+ SDIO_CCCR_IF,
+ (SDIO_BUS_CD_DISABLE |
+ SDIO_BUS_WIDTH_4BIT));
+ if (err) {
+ rsi_dbg(ERR_ZONE, "%s: Set bus mode failed : %d\n",
+ __func__, err);
+ return;
+ }
+ host->ios.bus_width = MMC_BUS_WIDTH_4;
+ host->ops->set_ios(host, &host->ios);
+ }
+}
+
+/**
+ * rsi_setclock() - This function sets the clock frequency.
+ * @adapter: Pointer to the adapter structure.
+ * @freq: Clock frequency.
+ *
+ * Return: None.
+ */
+static void rsi_setclock(struct rsi_hw *adapter, u32 freq)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ struct mmc_host *host = dev->pfunction->card->host;
+ u32 clock;
+
+ clock = freq * 1000;
+ if (clock > host->f_max)
+ clock = host->f_max;
+ host->ios.clock = clock;
+ host->ops->set_ios(host, &host->ios);
+}
+
+/**
+ * rsi_setblocklength() - This function sets the host block length.
+ * @adapter: Pointer to the adapter structure.
+ * @length: Block length to be set.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_setblocklength(struct rsi_hw *adapter, u32 length)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ int status;
+ rsi_dbg(INIT_ZONE, "%s: Setting the block length\n", __func__);
+
+ status = sdio_set_block_size(dev->pfunction, length);
+ dev->pfunction->max_blksize = 256;
+
+ rsi_dbg(INFO_ZONE,
+ "%s: Operational blk length is %d\n", __func__, length);
+ return status;
+}
+
+/**
+ * rsi_setupcard() - This function queries and sets the card's features.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_setupcard(struct rsi_hw *adapter)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ int status = 0;
+
+ rsi_setclock(adapter, 50000);
+
+ dev->tx_blk_size = 256;
+ status = rsi_setblocklength(adapter, dev->tx_blk_size);
+ if (status)
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set block length\n", __func__);
+ return status;
+}
+
+/**
+ * rsi_sdio_read_register() - This function reads one byte of information
+ * from a register.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @data: Pointer to the data that stores the data read.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_read_register(struct rsi_hw *adapter,
+ u32 addr,
+ u8 *data)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u8 fun_num = 0;
+ int status;
+
+ sdio_claim_host(dev->pfunction);
+
+ if (fun_num == 0)
+ *data = sdio_f0_readb(dev->pfunction, addr, &status);
+ else
+ *data = sdio_readb(dev->pfunction, addr, &status);
+
+ sdio_release_host(dev->pfunction);
+
+ return status;
+}
+
+/**
+ * rsi_sdio_write_register() - This function writes one byte of information
+ * into a register.
+ * @adapter: Pointer to the adapter structure.
+ * @function: Function Number.
+ * @addr: Address of the register.
+ * @data: Pointer to the data tha has to be written.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_write_register(struct rsi_hw *adapter,
+ u8 function,
+ u32 addr,
+ u8 *data)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ int status = 0;
+
+ sdio_claim_host(dev->pfunction);
+
+ if (function == 0)
+ sdio_f0_writeb(dev->pfunction, *data, addr, &status);
+ else
+ sdio_writeb(dev->pfunction, *data, addr, &status);
+
+ sdio_release_host(dev->pfunction);
+
+ return status;
+}
+
+/**
+ * rsi_sdio_ack_intr() - This function acks the interrupt received.
+ * @adapter: Pointer to the adapter structure.
+ * @int_bit: Interrupt bit to write into register.
+ *
+ * Return: None.
+ */
+void rsi_sdio_ack_intr(struct rsi_hw *adapter, u8 int_bit)
+{
+ int status;
+ status = rsi_sdio_write_register(adapter,
+ 1,
+ (SDIO_FUN1_INTR_CLR_REG |
+ RSI_SD_REQUEST_MASTER),
+ &int_bit);
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: unable to send ack\n", __func__);
+}
+
+
+
+/**
+ * rsi_sdio_read_register_multiple() - This function read multiple bytes of
+ * information from the SD card.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @count: Number of multiple bytes to be read.
+ * @data: Pointer to the read data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_sdio_read_register_multiple(struct rsi_hw *adapter,
+ u32 addr,
+ u32 count,
+ u8 *data)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u32 status;
+
+ sdio_claim_host(dev->pfunction);
+
+ status = sdio_readsb(dev->pfunction, data, addr, count);
+
+ sdio_release_host(dev->pfunction);
+
+ if (status != 0)
+ rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 read failed\n", __func__);
+ return status;
+}
+
+/**
+ * rsi_sdio_write_register_multiple() - This function writes multiple bytes of
+ * information to the SD card.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @data: Pointer to the data that has to be written.
+ * @count: Number of multiple bytes to be written.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_write_register_multiple(struct rsi_hw *adapter,
+ u32 addr,
+ u8 *data,
+ u32 count)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ int status;
+
+ if (dev->write_fail > 1) {
+ rsi_dbg(ERR_ZONE, "%s: Stopping card writes\n", __func__);
+ return 0;
+ } else if (dev->write_fail == 1) {
+ /**
+ * Assuming it is a CRC failure, we want to allow another
+ * card write
+ */
+ rsi_dbg(ERR_ZONE, "%s: Continue card writes\n", __func__);
+ dev->write_fail++;
+ }
+
+ sdio_claim_host(dev->pfunction);
+
+ status = sdio_writesb(dev->pfunction, addr, data, count);
+
+ sdio_release_host(dev->pfunction);
+
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 write failed %d\n",
+ __func__, status);
+ dev->write_fail = 2;
+ } else {
+ memcpy(dev->prev_desc, data, FRAME_DESC_SZ);
+ }
+ return status;
+}
+
+/**
+ * rsi_sdio_host_intf_write_pkt() - This function writes the packet to device.
+ * @adapter: Pointer to the adapter structure.
+ * @pkt: Pointer to the data to be written on to the device.
+ * @len: length of the data to be written on to the device.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_sdio_host_intf_write_pkt(struct rsi_hw *adapter,
+ u8 *pkt,
+ u32 len)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u32 block_size = dev->tx_blk_size;
+ u32 num_blocks, address, length;
+ u32 queueno;
+ int status;
+
+ queueno = ((pkt[1] >> 4) & 0xf);
+
+ num_blocks = len / block_size;
+
+ if (len % block_size)
+ num_blocks++;
+
+ address = (num_blocks * block_size | (queueno << 12));
+ length = num_blocks * block_size;
+
+ status = rsi_sdio_write_register_multiple(adapter,
+ address,
+ (u8 *)pkt,
+ length);
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: Unable to write onto the card: %d\n",
+ __func__, status);
+ rsi_dbg(DATA_TX_ZONE, "%s: Successfully written onto card\n", __func__);
+ return status;
+}
+
+/**
+ * rsi_sdio_host_intf_read_pkt() - This function reads the packet
+ from the device.
+ * @adapter: Pointer to the adapter data structure.
+ * @pkt: Pointer to the packet data to be read from the the device.
+ * @length: Length of the data to be read from the device.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter,
+ u8 *pkt,
+ u32 length)
+{
+ int status = -EINVAL;
+
+ if (!length) {
+ rsi_dbg(ERR_ZONE, "%s: Pkt size is zero\n", __func__);
+ return status;
+ }
+
+ status = rsi_sdio_read_register_multiple(adapter,
+ length,
+ length, /*num of bytes*/
+ (u8 *)pkt);
+
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: Failed to read frame: %d\n", __func__,
+ status);
+ return status;
+}
+
+/**
+ * rsi_init_sdio_interface() - This function does init specific to SDIO.
+ *
+ * @adapter: Pointer to the adapter data structure.
+ * @pkt: Pointer to the packet data to be read from the the device.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+
+static int rsi_init_sdio_interface(struct rsi_hw *adapter,
+ struct sdio_func *pfunction)
+{
+ struct rsi_91x_sdiodev *rsi_91x_dev;
+ int status = -ENOMEM;
+
+ rsi_91x_dev = kzalloc(sizeof(*rsi_91x_dev), GFP_KERNEL);
+ if (!rsi_91x_dev)
+ return status;
+
+ adapter->rsi_dev = rsi_91x_dev;
+
+ sdio_claim_host(pfunction);
+
+ pfunction->enable_timeout = 100;
+ status = sdio_enable_func(pfunction);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to enable interface\n", __func__);
+ sdio_release_host(pfunction);
+ return status;
+ }
+
+ rsi_dbg(INIT_ZONE, "%s: Enabled the interface\n", __func__);
+
+ rsi_91x_dev->pfunction = pfunction;
+ adapter->device = &pfunction->dev;
+
+ sdio_set_drvdata(pfunction, adapter);
+
+ status = rsi_setupcard(adapter);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to setup card\n", __func__);
+ goto fail;
+ }
+
+ rsi_dbg(INIT_ZONE, "%s: Setup card succesfully\n", __func__);
+
+ status = rsi_init_sdio_slave_regs(adapter);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to init slave regs\n", __func__);
+ goto fail;
+ }
+ sdio_release_host(pfunction);
+
+ adapter->host_intf_write_pkt = rsi_sdio_host_intf_write_pkt;
+ adapter->host_intf_read_pkt = rsi_sdio_host_intf_read_pkt;
+ adapter->determine_event_timeout = rsi_sdio_determine_event_timeout;
+ adapter->check_hw_queue_status = rsi_sdio_read_buffer_status_register;
+
+#ifdef CONFIG_RSI_DEBUGFS
+ adapter->num_debugfs_entries = MAX_DEBUGFS_ENTRIES;
+#endif
+ return status;
+fail:
+ sdio_disable_func(pfunction);
+ sdio_release_host(pfunction);
+ return status;
+}
+
+/**
+ * rsi_probe() - This function is called by kernel when the driver provided
+ * Vendor and device IDs are matched. All the initialization
+ * work is done here.
+ * @pfunction: Pointer to the sdio_func structure.
+ * @id: Pointer to sdio_device_id structure.
+ *
+ * Return: 0 on success, 1 on failure.
+ */
+static int rsi_probe(struct sdio_func *pfunction,
+ const struct sdio_device_id *id)
+{
+ struct rsi_hw *adapter;
+
+ rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__);
+
+ adapter = rsi_91x_init();
+ if (!adapter) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to init os intf ops\n",
+ __func__);
+ return 1;
+ }
+
+ if (rsi_init_sdio_interface(adapter, pfunction)) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to init sdio interface\n",
+ __func__);
+ goto fail;
+ }
+
+ if (rsi_sdio_device_init(adapter->priv)) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in device init\n", __func__);
+ sdio_claim_host(pfunction);
+ sdio_disable_func(pfunction);
+ sdio_release_host(pfunction);
+ goto fail;
+ }
+
+ sdio_claim_host(pfunction);
+ if (sdio_claim_irq(pfunction, rsi_handle_interrupt)) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to request IRQ\n", __func__);
+ sdio_release_host(pfunction);
+ goto fail;
+ }
+
+ sdio_release_host(pfunction);
+ rsi_dbg(INIT_ZONE, "%s: Registered Interrupt handler\n", __func__);
+
+ return 0;
+fail:
+ rsi_91x_deinit(adapter);
+ rsi_dbg(ERR_ZONE, "%s: Failed in probe...Exiting\n", __func__);
+ return 1;
+}
+
+/**
+ * rsi_disconnect() - This function performs the reverse of the probe function.
+ * @pfunction: Pointer to the sdio_func structure.
+ *
+ * Return: void.
+ */
+static void rsi_disconnect(struct sdio_func *pfunction)
+{
+ struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+
+ if (!adapter)
+ return;
+
+ dev->write_fail = 2;
+ rsi_mac80211_detach(adapter);
+
+ sdio_claim_host(pfunction);
+ sdio_release_irq(pfunction);
+ sdio_disable_func(pfunction);
+ rsi_91x_deinit(adapter);
+ /* Resetting to take care of the case, where-in driver is re-loaded */
+ rsi_reset_card(pfunction);
+ sdio_release_host(pfunction);
+}
+
+#ifdef CONFIG_PM
+static int rsi_suspend(struct device *dev)
+{
+ /* Not yet implemented */
+ return -ENOSYS;
+}
+
+static int rsi_resume(struct device *dev)
+{
+ /* Not yet implemented */
+ return -ENOSYS;
+}
+
+static const struct dev_pm_ops rsi_pm_ops = {
+ .suspend = rsi_suspend,
+ .resume = rsi_resume,
+};
+#endif
+
+static const struct sdio_device_id rsi_dev_table[] = {
+ { SDIO_DEVICE(0x303, 0x100) },
+ { SDIO_DEVICE(0x041B, 0x0301) },
+ { SDIO_DEVICE(0x041B, 0x0201) },
+ { SDIO_DEVICE(0x041B, 0x9330) },
+ { /* Blank */},
+};
+
+static struct sdio_driver rsi_driver = {
+ .name = "RSI-SDIO WLAN",
+ .probe = rsi_probe,
+ .remove = rsi_disconnect,
+ .id_table = rsi_dev_table,
+#ifdef CONFIG_PM
+ .drv = {
+ .pm = &rsi_pm_ops,
+ }
+#endif
+};
+
+/**
+ * rsi_module_init() - This function registers the sdio module.
+ * @void: Void.
+ *
+ * Return: 0 on success.
+ */
+static int rsi_module_init(void)
+{
+ sdio_register_driver(&rsi_driver);
+ rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__);
+ return 0;
+}
+
+/**
+ * rsi_module_exit() - This function unregisters the sdio module.
+ * @void: Void.
+ *
+ * Return: None.
+ */
+static void rsi_module_exit(void)
+{
+ sdio_unregister_driver(&rsi_driver);
+ rsi_dbg(INFO_ZONE, "%s: Unregistering driver\n", __func__);
+}
+
+module_init(rsi_module_init);
+module_exit(rsi_module_exit);
+
+MODULE_AUTHOR("Redpine Signals Inc");
+MODULE_DESCRIPTION("Common SDIO layer for RSI drivers");
+MODULE_SUPPORTED_DEVICE("RSI-91x");
+MODULE_DEVICE_TABLE(sdio, rsi_dev_table);
+MODULE_FIRMWARE(FIRMWARE_RSI9113);
+MODULE_VERSION("0.1");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
new file mode 100644
index 000000000000..f1cb99cafed8
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -0,0 +1,566 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "rsi_sdio.h"
+#include "rsi_common.h"
+
+/**
+ * rsi_sdio_master_access_msword() - This function sets the AHB master access
+ * MS word in the SDIO slave registers.
+ * @adapter: Pointer to the adapter structure.
+ * @ms_word: ms word need to be initialized.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_sdio_master_access_msword(struct rsi_hw *adapter,
+ u16 ms_word)
+{
+ u8 byte;
+ u8 function = 0;
+ int status = 0;
+
+ byte = (u8)(ms_word & 0x00FF);
+
+ rsi_dbg(INIT_ZONE,
+ "%s: MASTER_ACCESS_MSBYTE:0x%x\n", __func__, byte);
+
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_MASTER_ACCESS_MSBYTE,
+ &byte);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: fail to access MASTER_ACCESS_MSBYTE\n",
+ __func__);
+ return -1;
+ }
+
+ byte = (u8)(ms_word >> 8);
+
+ rsi_dbg(INIT_ZONE, "%s:MASTER_ACCESS_LSBYTE:0x%x\n", __func__, byte);
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_MASTER_ACCESS_LSBYTE,
+ &byte);
+ return status;
+}
+
+/**
+ * rsi_copy_to_card() - This function includes the actual funtionality of
+ * copying the TA firmware to the card.Basically this
+ * function includes opening the TA file,reading the
+ * TA file and writing their values in blocks of data.
+ * @common: Pointer to the driver private structure.
+ * @fw: Pointer to the firmware value to be written.
+ * @len: length of firmware file.
+ * @num_blocks: Number of blocks to be written to the card.
+ *
+ * Return: 0 on success and -1 on failure.
+ */
+static int rsi_copy_to_card(struct rsi_common *common,
+ const u8 *fw,
+ u32 len,
+ u32 num_blocks)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u32 indx, ii;
+ u32 block_size = dev->tx_blk_size;
+ u32 lsb_address;
+ __le32 data[] = { TA_HOLD_THREAD_VALUE, TA_SOFT_RST_CLR,
+ TA_PC_ZERO, TA_RELEASE_THREAD_VALUE };
+ u32 address[] = { TA_HOLD_THREAD_REG, TA_SOFT_RESET_REG,
+ TA_TH0_PC_REG, TA_RELEASE_THREAD_REG };
+ u32 base_address;
+ u16 msb_address;
+
+ base_address = TA_LOAD_ADDRESS;
+ msb_address = base_address >> 16;
+
+ for (indx = 0, ii = 0; ii < num_blocks; ii++, indx += block_size) {
+ lsb_address = ((u16) base_address | RSI_SD_REQUEST_MASTER);
+ if (rsi_sdio_write_register_multiple(adapter,
+ lsb_address,
+ (u8 *)(fw + indx),
+ block_size)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to load %s blk\n", __func__,
+ FIRMWARE_RSI9113);
+ return -1;
+ }
+ rsi_dbg(INIT_ZONE, "%s: loading block: %d\n", __func__, ii);
+ base_address += block_size;
+ if ((base_address >> 16) != msb_address) {
+ msb_address += 1;
+ if (rsi_sdio_master_access_msword(adapter,
+ msb_address)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set ms word reg\n",
+ __func__);
+ return -1;
+ }
+ }
+ }
+
+ if (len % block_size) {
+ lsb_address = ((u16) base_address | RSI_SD_REQUEST_MASTER);
+ if (rsi_sdio_write_register_multiple(adapter,
+ lsb_address,
+ (u8 *)(fw + indx),
+ len % block_size)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to load f/w\n", __func__);
+ return -1;
+ }
+ }
+ rsi_dbg(INIT_ZONE,
+ "%s: Succesfully loaded TA instructions\n", __func__);
+
+ if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set ms word to common reg\n",
+ __func__);
+ return -1;
+ }
+
+ for (ii = 0; ii < ARRAY_SIZE(data); ii++) {
+ /* Bringing TA out of reset */
+ if (rsi_sdio_write_register_multiple(adapter,
+ (address[ii] |
+ RSI_SD_REQUEST_MASTER),
+ (u8 *)&data[ii],
+ 4)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to hold TA threads\n", __func__);
+ return -1;
+ }
+ }
+
+ rsi_dbg(INIT_ZONE, "%s: loaded firmware\n", __func__);
+ return 0;
+}
+
+/**
+ * rsi_load_ta_instructions() - This function includes the actual funtionality
+ * of loading the TA firmware.This function also
+ * includes opening the TA file,reading the TA
+ * file and writing their value in blocks of data.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_load_ta_instructions(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u32 len;
+ u32 num_blocks;
+ const u8 *fw;
+ const struct firmware *fw_entry = NULL;
+ u32 block_size = dev->tx_blk_size;
+ int status = 0;
+ u32 base_address;
+ u16 msb_address;
+
+ if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set ms word to common reg\n",
+ __func__);
+ return -1;
+ }
+ base_address = TA_LOAD_ADDRESS;
+ msb_address = (base_address >> 16);
+
+ if (rsi_sdio_master_access_msword(adapter, msb_address)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set ms word reg\n", __func__);
+ return -1;
+ }
+
+ status = request_firmware(&fw_entry, FIRMWARE_RSI9113, adapter->device);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s Firmware file %s not found\n",
+ __func__, FIRMWARE_RSI9113);
+ return status;
+ }
+
+ fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ len = fw_entry->size;
+
+ if (len % 4)
+ len += (4 - (len % 4));
+
+ num_blocks = (len / block_size);
+
+ rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
+ rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
+
+ status = rsi_copy_to_card(common, fw, len, num_blocks);
+ release_firmware(fw_entry);
+ return status;
+}
+
+/**
+ * rsi_process_pkt() - This Function reads rx_blocks register and figures out
+ * the size of the rx pkt.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_process_pkt(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ u8 num_blks = 0;
+ u32 rcv_pkt_len = 0;
+ int status = 0;
+
+ status = rsi_sdio_read_register(adapter,
+ SDIO_RX_NUM_BLOCKS_REG,
+ &num_blks);
+
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to read pkt length from the card:\n",
+ __func__);
+ return status;
+ }
+ rcv_pkt_len = (num_blks * 256);
+
+ common->rx_data_pkt = kmalloc(rcv_pkt_len, GFP_KERNEL);
+ if (!common->rx_data_pkt) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in memory allocation\n",
+ __func__);
+ return -1;
+ }
+
+ status = rsi_sdio_host_intf_read_pkt(adapter,
+ common->rx_data_pkt,
+ rcv_pkt_len);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to read packet from card\n",
+ __func__);
+ goto fail;
+ }
+
+ status = rsi_read_pkt(common, rcv_pkt_len);
+ kfree(common->rx_data_pkt);
+ return status;
+
+fail:
+ kfree(common->rx_data_pkt);
+ return -1;
+}
+
+/**
+ * rsi_init_sdio_slave_regs() - This function does the actual initialization
+ * of SDBUS slave registers.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_init_sdio_slave_regs(struct rsi_hw *adapter)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u8 function = 0;
+ u8 byte;
+ int status = 0;
+
+ if (dev->next_read_delay) {
+ byte = dev->next_read_delay;
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_NXT_RD_DELAY2,
+ &byte);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write SDIO_NXT_RD_DELAY2\n",
+ __func__);
+ return -1;
+ }
+ }
+
+ if (dev->sdio_high_speed_enable) {
+ rsi_dbg(INIT_ZONE, "%s: Enabling SDIO High speed\n", __func__);
+ byte = 0x3;
+
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_REG_HIGH_SPEED,
+ &byte);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to enable SDIO high speed\n",
+ __func__);
+ return -1;
+ }
+ }
+
+ /* This tells SDIO FIFO when to start read to host */
+ rsi_dbg(INIT_ZONE, "%s: Initialzing SDIO read start level\n", __func__);
+ byte = 0x24;
+
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_READ_START_LVL,
+ &byte);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write SDIO_READ_START_LVL\n", __func__);
+ return -1;
+ }
+
+ rsi_dbg(INIT_ZONE, "%s: Initialzing FIFO ctrl registers\n", __func__);
+ byte = (128 - 32);
+
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_READ_FIFO_CTL,
+ &byte);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write SDIO_READ_FIFO_CTL\n", __func__);
+ return -1;
+ }
+
+ byte = 32;
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_WRITE_FIFO_CTL,
+ &byte);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write SDIO_WRITE_FIFO_CTL\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * rsi_interrupt_handler() - This function read and process SDIO interrupts.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+void rsi_interrupt_handler(struct rsi_hw *adapter)
+{
+ struct rsi_common *common = adapter->priv;
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ int status;
+ enum sdio_interrupt_type isr_type;
+ u8 isr_status = 0;
+ u8 fw_status = 0;
+
+ dev->rx_info.sdio_int_counter++;
+
+ do {
+ mutex_lock(&common->tx_rxlock);
+ status = rsi_sdio_read_register(common->priv,
+ RSI_FN1_INT_REGISTER,
+ &isr_status);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to Read Intr Status Register\n",
+ __func__);
+ mutex_unlock(&common->tx_rxlock);
+ return;
+ }
+
+ if (isr_status == 0) {
+ rsi_set_event(&common->tx_thread.event);
+ dev->rx_info.sdio_intr_status_zero++;
+ mutex_unlock(&common->tx_rxlock);
+ return;
+ }
+
+ rsi_dbg(ISR_ZONE, "%s: Intr_status = %x %d %d\n",
+ __func__, isr_status, (1 << MSDU_PKT_PENDING),
+ (1 << FW_ASSERT_IND));
+
+ do {
+ RSI_GET_SDIO_INTERRUPT_TYPE(isr_status, isr_type);
+
+ switch (isr_type) {
+ case BUFFER_AVAILABLE:
+ dev->rx_info.watch_bufferfull_count = 0;
+ dev->rx_info.buffer_full = false;
+ dev->rx_info.mgmt_buffer_full = false;
+ rsi_sdio_ack_intr(common->priv,
+ (1 << PKT_BUFF_AVAILABLE));
+ rsi_set_event((&common->tx_thread.event));
+ rsi_dbg(ISR_ZONE,
+ "%s: ==> BUFFER_AVILABLE <==\n",
+ __func__);
+ dev->rx_info.buf_avilable_counter++;
+ break;
+
+ case FIRMWARE_ASSERT_IND:
+ rsi_dbg(ERR_ZONE,
+ "%s: ==> FIRMWARE Assert <==\n",
+ __func__);
+ status = rsi_sdio_read_register(common->priv,
+ SDIO_FW_STATUS_REG,
+ &fw_status);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to read f/w reg\n",
+ __func__);
+ } else {
+ rsi_dbg(ERR_ZONE,
+ "%s: Firmware Status is 0x%x\n",
+ __func__ , fw_status);
+ rsi_sdio_ack_intr(common->priv,
+ (1 << FW_ASSERT_IND));
+ }
+
+ common->fsm_state = FSM_CARD_NOT_READY;
+ break;
+
+ case MSDU_PACKET_PENDING:
+ rsi_dbg(ISR_ZONE, "Pkt pending interrupt\n");
+ dev->rx_info.total_sdio_msdu_pending_intr++;
+
+ status = rsi_process_pkt(common);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to read pkt\n",
+ __func__);
+ mutex_unlock(&common->tx_rxlock);
+ return;
+ }
+ break;
+ default:
+ rsi_sdio_ack_intr(common->priv, isr_status);
+ dev->rx_info.total_sdio_unknown_intr++;
+ isr_status = 0;
+ rsi_dbg(ISR_ZONE,
+ "Unknown Interrupt %x\n",
+ isr_status);
+ break;
+ }
+ isr_status ^= BIT(isr_type - 1);
+ } while (isr_status);
+ mutex_unlock(&common->tx_rxlock);
+ } while (1);
+}
+
+/**
+ * rsi_device_init() - This Function Initializes The HAL.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_device_init(struct rsi_common *common)
+{
+ if (rsi_load_ta_instructions(common))
+ return -1;
+
+ if (rsi_sdio_master_access_msword(common->priv, MISC_CFG_BASE_ADDR)) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to set ms word reg\n",
+ __func__);
+ return -1;
+ }
+ rsi_dbg(INIT_ZONE,
+ "%s: Setting ms word to 0x41050000\n", __func__);
+
+ return 0;
+}
+
+/**
+ * rsi_sdio_read_buffer_status_register() - This function is used to the read
+ * buffer status register and set
+ * relevant fields in
+ * rsi_91x_sdiodev struct.
+ * @adapter: Pointer to the driver hw structure.
+ * @q_num: The Q number whose status is to be found.
+ *
+ * Return: status: -1 on failure or else queue full/stop is indicated.
+ */
+int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num)
+{
+ struct rsi_common *common = adapter->priv;
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u8 buf_status = 0;
+ int status = 0;
+
+ status = rsi_sdio_read_register(common->priv,
+ RSI_DEVICE_BUFFER_STATUS_REGISTER,
+ &buf_status);
+
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to read status register\n", __func__);
+ return -1;
+ }
+
+ if (buf_status & (BIT(PKT_MGMT_BUFF_FULL))) {
+ if (!dev->rx_info.mgmt_buffer_full)
+ dev->rx_info.mgmt_buf_full_counter++;
+ dev->rx_info.mgmt_buffer_full = true;
+ } else {
+ dev->rx_info.mgmt_buffer_full = false;
+ }
+
+ if (buf_status & (BIT(PKT_BUFF_FULL))) {
+ if (!dev->rx_info.buffer_full)
+ dev->rx_info.buf_full_counter++;
+ dev->rx_info.buffer_full = true;
+ } else {
+ dev->rx_info.buffer_full = false;
+ }
+
+ if (buf_status & (BIT(PKT_BUFF_SEMI_FULL))) {
+ if (!dev->rx_info.semi_buffer_full)
+ dev->rx_info.buf_semi_full_counter++;
+ dev->rx_info.semi_buffer_full = true;
+ } else {
+ dev->rx_info.semi_buffer_full = false;
+ }
+
+ if ((q_num == MGMT_SOFT_Q) && (dev->rx_info.mgmt_buffer_full))
+ return QUEUE_FULL;
+
+ if (dev->rx_info.buffer_full)
+ return QUEUE_FULL;
+
+ return QUEUE_NOT_FULL;
+}
+
+/**
+ * rsi_sdio_determine_event_timeout() - This Function determines the event
+ * timeout duration.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: timeout duration is returned.
+ */
+int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+
+ /* Once buffer full is seen, event timeout to occur every 2 msecs */
+ if (dev->rx_info.buffer_full)
+ return 2;
+
+ return EVENT_WAIT_FOREVER;
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
new file mode 100644
index 000000000000..bb1bf96670eb
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -0,0 +1,575 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include "rsi_usb.h"
+
+/**
+ * rsi_usb_card_write() - This function writes to the USB Card.
+ * @adapter: Pointer to the adapter structure.
+ * @buf: Pointer to the buffer from where the data has to be taken.
+ * @len: Length to be written.
+ * @endpoint: Type of endpoint.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_usb_card_write(struct rsi_hw *adapter,
+ void *buf,
+ u16 len,
+ u8 endpoint)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ int status;
+ s32 transfer;
+
+ status = usb_bulk_msg(dev->usbdev,
+ usb_sndbulkpipe(dev->usbdev,
+ dev->bulkout_endpoint_addr[endpoint - 1]),
+ buf,
+ len,
+ &transfer,
+ HZ * 5);
+
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Card write failed with error code :%10d\n", status);
+ dev->write_fail = 1;
+ }
+ return status;
+}
+
+/**
+ * rsi_write_multiple() - This function writes multiple bytes of information
+ * to the USB card.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @data: Pointer to the data that has to be written.
+ * @count: Number of multiple bytes to be written.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_write_multiple(struct rsi_hw *adapter,
+ u8 endpoint,
+ u8 *data,
+ u32 count)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ u8 *seg = dev->tx_buffer;
+
+ if (dev->write_fail)
+ return 0;
+
+ if (endpoint == MGMT_EP) {
+ memset(seg, 0, RSI_USB_TX_HEAD_ROOM);
+ memcpy(seg + RSI_USB_TX_HEAD_ROOM, data, count);
+ } else {
+ seg = ((u8 *)data - RSI_USB_TX_HEAD_ROOM);
+ }
+
+ return rsi_usb_card_write(adapter,
+ seg,
+ count + RSI_USB_TX_HEAD_ROOM,
+ endpoint);
+}
+
+/**
+ * rsi_find_bulk_in_and_out_endpoints() - This function initializes the bulk
+ * endpoints to the device.
+ * @interface: Pointer to the USB interface structure.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: ret_val: 0 on success, -ENOMEM on failure.
+ */
+static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
+ struct rsi_hw *adapter)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ __le16 buffer_size;
+ int ii, bep_found = 0;
+
+ iface_desc = &(interface->altsetting[0]);
+
+ for (ii = 0; ii < iface_desc->desc.bNumEndpoints; ++ii) {
+ endpoint = &(iface_desc->endpoint[ii].desc);
+
+ if ((!(dev->bulkin_endpoint_addr)) &&
+ (endpoint->bEndpointAddress & USB_DIR_IN) &&
+ ((endpoint->bmAttributes &
+ USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK)) {
+ buffer_size = endpoint->wMaxPacketSize;
+ dev->bulkin_size = buffer_size;
+ dev->bulkin_endpoint_addr =
+ endpoint->bEndpointAddress;
+ }
+
+ if (!dev->bulkout_endpoint_addr[bep_found] &&
+ !(endpoint->bEndpointAddress & USB_DIR_IN) &&
+ ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK)) {
+ dev->bulkout_endpoint_addr[bep_found] =
+ endpoint->bEndpointAddress;
+ buffer_size = endpoint->wMaxPacketSize;
+ dev->bulkout_size[bep_found] = buffer_size;
+ bep_found++;
+ }
+
+ if (bep_found >= MAX_BULK_EP)
+ break;
+ }
+
+ if (!(dev->bulkin_endpoint_addr) &&
+ (dev->bulkout_endpoint_addr[0]))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* rsi_usb_reg_read() - This function reads data from given register address.
+ * @usbdev: Pointer to the usb_device structure.
+ * @reg: Address of the register to be read.
+ * @value: Value to be read.
+ * @len: length of data to be read.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_usb_reg_read(struct usb_device *usbdev,
+ u32 reg,
+ u16 *value,
+ u16 len)
+{
+ u8 temp_buf[4];
+ int status = 0;
+
+ status = usb_control_msg(usbdev,
+ usb_rcvctrlpipe(usbdev, 0),
+ USB_VENDOR_REGISTER_READ,
+ USB_TYPE_VENDOR,
+ ((reg & 0xffff0000) >> 16), (reg & 0xffff),
+ (void *)temp_buf,
+ len,
+ HZ * 5);
+
+ *value = (temp_buf[0] | (temp_buf[1] << 8));
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Reg read failed with error code :%d\n",
+ __func__, status);
+ }
+ return status;
+}
+
+/**
+ * rsi_usb_reg_write() - This function writes the given data into the given
+ * register address.
+ * @usbdev: Pointer to the usb_device structure.
+ * @reg: Address of the register.
+ * @value: Value to write.
+ * @len: Length of data to be written.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_usb_reg_write(struct usb_device *usbdev,
+ u32 reg,
+ u16 value,
+ u16 len)
+{
+ u8 usb_reg_buf[4];
+ int status = 0;
+
+ usb_reg_buf[0] = (value & 0x00ff);
+ usb_reg_buf[1] = (value & 0xff00) >> 8;
+ usb_reg_buf[2] = 0x0;
+ usb_reg_buf[3] = 0x0;
+
+ status = usb_control_msg(usbdev,
+ usb_sndctrlpipe(usbdev, 0),
+ USB_VENDOR_REGISTER_WRITE,
+ USB_TYPE_VENDOR,
+ ((reg & 0xffff0000) >> 16),
+ (reg & 0xffff),
+ (void *)usb_reg_buf,
+ len,
+ HZ * 5);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Reg write failed with error code :%d\n",
+ __func__, status);
+ }
+ return status;
+}
+
+/**
+ * rsi_rx_done_handler() - This function is called when a packet is received
+ * from USB stack. This is callback to recieve done.
+ * @urb: Received URB.
+ *
+ * Return: None.
+ */
+static void rsi_rx_done_handler(struct urb *urb)
+{
+ struct rsi_hw *adapter = urb->context;
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+
+ if (urb->status)
+ return;
+
+ rsi_set_event(&dev->rx_thread.event);
+}
+
+/**
+ * rsi_rx_urb_submit() - This function submits the given URB to the USB stack.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_rx_urb_submit(struct rsi_hw *adapter)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ struct urb *urb = dev->rx_usb_urb[0];
+ int status;
+
+ usb_fill_bulk_urb(urb,
+ dev->usbdev,
+ usb_rcvbulkpipe(dev->usbdev,
+ dev->bulkin_endpoint_addr),
+ urb->transfer_buffer,
+ 3000,
+ rsi_rx_done_handler,
+ adapter);
+
+ status = usb_submit_urb(urb, GFP_KERNEL);
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: Failed in urb submission\n", __func__);
+
+ return status;
+}
+
+/**
+ * rsi_usb_write_register_multiple() - This function writes multiple bytes of
+ * information to multiple registers.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @data: Pointer to the data that has to be written.
+ * @count: Number of multiple bytes to be written on to the registers.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_usb_write_register_multiple(struct rsi_hw *adapter,
+ u32 addr,
+ u8 *data,
+ u32 count)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ u8 *buf;
+ u8 transfer;
+ int status = 0;
+
+ buf = kzalloc(4096, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ while (count) {
+ transfer = min_t(int, count, 4096);
+ memcpy(buf, data, transfer);
+ status = usb_control_msg(dev->usbdev,
+ usb_sndctrlpipe(dev->usbdev, 0),
+ USB_VENDOR_REGISTER_WRITE,
+ USB_TYPE_VENDOR,
+ ((addr & 0xffff0000) >> 16),
+ (addr & 0xffff),
+ (void *)buf,
+ transfer,
+ HZ * 5);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Reg write failed with error code :%d\n",
+ status);
+ } else {
+ count -= transfer;
+ data += transfer;
+ addr += transfer;
+ }
+ }
+
+ kfree(buf);
+ return 0;
+}
+
+/**
+ *rsi_usb_host_intf_write_pkt() - This function writes the packet to the
+ * USB card.
+ * @adapter: Pointer to the adapter structure.
+ * @pkt: Pointer to the data to be written on to the card.
+ * @len: Length of the data to be written on to the card.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_usb_host_intf_write_pkt(struct rsi_hw *adapter,
+ u8 *pkt,
+ u32 len)
+{
+ u32 queueno = ((pkt[1] >> 4) & 0xf);
+ u8 endpoint;
+
+ endpoint = ((queueno == RSI_WIFI_MGMT_Q) ? MGMT_EP : DATA_EP);
+
+ return rsi_write_multiple(adapter,
+ endpoint,
+ (u8 *)pkt,
+ len);
+}
+
+/**
+ * rsi_deinit_usb_interface() - This function deinitializes the usb interface.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+static void rsi_deinit_usb_interface(struct rsi_hw *adapter)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+
+ rsi_kill_thread(&dev->rx_thread);
+ kfree(adapter->priv->rx_data_pkt);
+ kfree(dev->tx_buffer);
+}
+
+/**
+ * rsi_init_usb_interface() - This function initializes the usb interface.
+ * @adapter: Pointer to the adapter structure.
+ * @pfunction: Pointer to USB interface structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_init_usb_interface(struct rsi_hw *adapter,
+ struct usb_interface *pfunction)
+{
+ struct rsi_91x_usbdev *rsi_dev;
+ struct rsi_common *common = adapter->priv;
+ int status;
+
+ rsi_dev = kzalloc(sizeof(*rsi_dev), GFP_KERNEL);
+ if (!rsi_dev)
+ return -ENOMEM;
+
+ adapter->rsi_dev = rsi_dev;
+ rsi_dev->usbdev = interface_to_usbdev(pfunction);
+
+ if (rsi_find_bulk_in_and_out_endpoints(pfunction, adapter))
+ return -EINVAL;
+
+ adapter->device = &pfunction->dev;
+ usb_set_intfdata(pfunction, adapter);
+
+ common->rx_data_pkt = kmalloc(2048, GFP_KERNEL);
+ if (!common->rx_data_pkt) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to allocate memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ rsi_dev->tx_buffer = kmalloc(2048, GFP_ATOMIC);
+ rsi_dev->rx_usb_urb[0] = usb_alloc_urb(0, GFP_KERNEL);
+ rsi_dev->rx_usb_urb[0]->transfer_buffer = adapter->priv->rx_data_pkt;
+ rsi_dev->tx_blk_size = 252;
+
+ /* Initializing function callbacks */
+ adapter->rx_urb_submit = rsi_rx_urb_submit;
+ adapter->host_intf_write_pkt = rsi_usb_host_intf_write_pkt;
+ adapter->check_hw_queue_status = rsi_usb_check_queue_status;
+ adapter->determine_event_timeout = rsi_usb_event_timeout;
+
+ rsi_init_event(&rsi_dev->rx_thread.event);
+ status = rsi_create_kthread(common, &rsi_dev->rx_thread,
+ rsi_usb_rx_thread, "RX-Thread");
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__);
+ goto fail;
+ }
+
+#ifdef CONFIG_RSI_DEBUGFS
+ /* In USB, one less than the MAX_DEBUGFS_ENTRIES entries is required */
+ adapter->num_debugfs_entries = (MAX_DEBUGFS_ENTRIES - 1);
+#endif
+
+ rsi_dbg(INIT_ZONE, "%s: Enabled the interface\n", __func__);
+ return 0;
+
+fail:
+ kfree(rsi_dev->tx_buffer);
+ kfree(common->rx_data_pkt);
+ return status;
+}
+
+/**
+ * rsi_probe() - This function is called by kernel when the driver provided
+ * Vendor and device IDs are matched. All the initialization
+ * work is done here.
+ * @pfunction: Pointer to the USB interface structure.
+ * @id: Pointer to the usb_device_id structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_probe(struct usb_interface *pfunction,
+ const struct usb_device_id *id)
+{
+ struct rsi_hw *adapter;
+ struct rsi_91x_usbdev *dev;
+ u16 fw_status;
+
+ rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__);
+
+ adapter = rsi_91x_init();
+ if (!adapter) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to init os intf ops\n",
+ __func__);
+ return 1;
+ }
+
+ if (rsi_init_usb_interface(adapter, pfunction)) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to init usb interface\n",
+ __func__);
+ goto err;
+ }
+
+ rsi_dbg(ERR_ZONE, "%s: Initialized os intf ops\n", __func__);
+
+ dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+
+ if (rsi_usb_reg_read(dev->usbdev, FW_STATUS_REG, &fw_status, 2) < 0)
+ goto err1;
+ else
+ fw_status &= 1;
+
+ if (!fw_status) {
+ if (rsi_usb_device_init(adapter->priv)) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in device init\n",
+ __func__);
+ goto err1;
+ }
+
+ if (rsi_usb_reg_write(dev->usbdev,
+ USB_INTERNAL_REG_1,
+ RSI_USB_READY_MAGIC_NUM, 1) < 0)
+ goto err1;
+ rsi_dbg(INIT_ZONE, "%s: Performed device init\n", __func__);
+ }
+
+ if (rsi_rx_urb_submit(adapter))
+ goto err1;
+
+ return 0;
+err1:
+ rsi_deinit_usb_interface(adapter);
+err:
+ rsi_91x_deinit(adapter);
+ rsi_dbg(ERR_ZONE, "%s: Failed in probe...Exiting\n", __func__);
+ return 1;
+}
+
+/**
+ * rsi_disconnect() - This function performs the reverse of the probe function,
+ * it deintialize the driver structure.
+ * @pfunction: Pointer to the USB interface structure.
+ *
+ * Return: None.
+ */
+static void rsi_disconnect(struct usb_interface *pfunction)
+{
+ struct rsi_hw *adapter = usb_get_intfdata(pfunction);
+
+ if (!adapter)
+ return;
+
+ rsi_mac80211_detach(adapter);
+ rsi_deinit_usb_interface(adapter);
+ rsi_91x_deinit(adapter);
+
+ rsi_dbg(INFO_ZONE, "%s: Deinitialization completed\n", __func__);
+}
+
+#ifdef CONFIG_PM
+static int rsi_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ /* Not yet implemented */
+ return -ENOSYS;
+}
+
+static int rsi_resume(struct usb_interface *intf)
+{
+ /* Not yet implemented */
+ return -ENOSYS;
+}
+#endif
+
+static const struct usb_device_id rsi_dev_table[] = {
+ { USB_DEVICE(0x0303, 0x0100) },
+ { USB_DEVICE(0x041B, 0x0301) },
+ { USB_DEVICE(0x041B, 0x0201) },
+ { USB_DEVICE(0x041B, 0x9330) },
+ { /* Blank */},
+};
+
+static struct usb_driver rsi_driver = {
+ .name = "RSI-USB WLAN",
+ .probe = rsi_probe,
+ .disconnect = rsi_disconnect,
+ .id_table = rsi_dev_table,
+#ifdef CONFIG_PM
+ .suspend = rsi_suspend,
+ .resume = rsi_resume,
+#endif
+};
+
+/**
+ * rsi_module_init() - This function registers the client driver.
+ * @void: Void.
+ *
+ * Return: 0 on success.
+ */
+static int rsi_module_init(void)
+{
+ usb_register(&rsi_driver);
+ rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__);
+ return 0;
+}
+
+/**
+ * rsi_module_exit() - This function unregisters the client driver.
+ * @void: Void.
+ *
+ * Return: None.
+ */
+static void rsi_module_exit(void)
+{
+ usb_deregister(&rsi_driver);
+ rsi_dbg(INFO_ZONE, "%s: Unregistering driver\n", __func__);
+}
+
+module_init(rsi_module_init);
+module_exit(rsi_module_exit);
+
+MODULE_AUTHOR("Redpine Signals Inc");
+MODULE_DESCRIPTION("Common USB layer for RSI drivers");
+MODULE_SUPPORTED_DEVICE("RSI-91x");
+MODULE_DEVICE_TABLE(usb, rsi_dev_table);
+MODULE_FIRMWARE(FIRMWARE_RSI9113);
+MODULE_VERSION("0.1");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
new file mode 100644
index 000000000000..1106ce76707e
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -0,0 +1,177 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "rsi_usb.h"
+
+/**
+ * rsi_copy_to_card() - This function includes the actual funtionality of
+ * copying the TA firmware to the card.Basically this
+ * function includes opening the TA file,reading the TA
+ * file and writing their values in blocks of data.
+ * @common: Pointer to the driver private structure.
+ * @fw: Pointer to the firmware value to be written.
+ * @len: length of firmware file.
+ * @num_blocks: Number of blocks to be written to the card.
+ *
+ * Return: 0 on success and -1 on failure.
+ */
+static int rsi_copy_to_card(struct rsi_common *common,
+ const u8 *fw,
+ u32 len,
+ u32 num_blocks)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ u32 indx, ii;
+ u32 block_size = dev->tx_blk_size;
+ u32 lsb_address;
+ u32 base_address;
+
+ base_address = TA_LOAD_ADDRESS;
+
+ for (indx = 0, ii = 0; ii < num_blocks; ii++, indx += block_size) {
+ lsb_address = base_address;
+ if (rsi_usb_write_register_multiple(adapter,
+ lsb_address,
+ (u8 *)(fw + indx),
+ block_size)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to load %s blk\n", __func__,
+ FIRMWARE_RSI9113);
+ return -EIO;
+ }
+ rsi_dbg(INIT_ZONE, "%s: loading block: %d\n", __func__, ii);
+ base_address += block_size;
+ }
+
+ if (len % block_size) {
+ lsb_address = base_address;
+ if (rsi_usb_write_register_multiple(adapter,
+ lsb_address,
+ (u8 *)(fw + indx),
+ len % block_size)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to load %s blk\n", __func__,
+ FIRMWARE_RSI9113);
+ return -EIO;
+ }
+ }
+ rsi_dbg(INIT_ZONE,
+ "%s: Succesfully loaded %s instructions\n", __func__,
+ FIRMWARE_RSI9113);
+
+ rsi_dbg(INIT_ZONE, "%s: loaded firmware\n", __func__);
+ return 0;
+}
+
+/**
+ * rsi_usb_rx_thread() - This is a kernel thread to receive the packets from
+ * the USB device.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: None.
+ */
+void rsi_usb_rx_thread(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ int status;
+
+ do {
+ rsi_wait_event(&dev->rx_thread.event, EVENT_WAIT_FOREVER);
+
+ if (atomic_read(&dev->rx_thread.thread_done))
+ goto out;
+
+ mutex_lock(&common->tx_rxlock);
+ status = rsi_read_pkt(common, 0);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Failed To read data", __func__);
+ mutex_unlock(&common->tx_rxlock);
+ return;
+ }
+ mutex_unlock(&common->tx_rxlock);
+ rsi_reset_event(&dev->rx_thread.event);
+ if (adapter->rx_urb_submit(adapter)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed in urb submission", __func__);
+ return;
+ }
+ } while (1);
+
+out:
+ rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__);
+ complete_and_exit(&dev->rx_thread.completion, 0);
+}
+
+
+/**
+ * rsi_load_ta_instructions() - This function includes the actual funtionality
+ * of loading the TA firmware.This function also
+ * includes opening the TA file,reading the TA
+ * file and writing their value in blocks of data.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_load_ta_instructions(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ const struct firmware *fw_entry = NULL;
+ u32 block_size = dev->tx_blk_size;
+ const u8 *fw;
+ u32 num_blocks, len;
+ int status = 0;
+
+ status = request_firmware(&fw_entry, FIRMWARE_RSI9113, adapter->device);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s Firmware file %s not found\n",
+ __func__, FIRMWARE_RSI9113);
+ return status;
+ }
+
+ fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ len = fw_entry->size;
+
+ if (len % 4)
+ len += (4 - (len % 4));
+
+ num_blocks = (len / block_size);
+
+ rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
+ rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
+
+ status = rsi_copy_to_card(common, fw, len, num_blocks);
+ release_firmware(fw_entry);
+ return status;
+}
+
+/**
+ * rsi_device_init() - This Function Initializes The HAL.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_usb_device_init(struct rsi_common *common)
+{
+ if (rsi_load_ta_instructions(common))
+ return -EIO;
+
+ return 0;
+ }
diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h
new file mode 100644
index 000000000000..5e2721f7909c
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_boot_params.h
@@ -0,0 +1,126 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_BOOTPARAMS_HEADER_H__
+#define __RSI_BOOTPARAMS_HEADER_H__
+
+#define CRYSTAL_GOOD_TIME BIT(0)
+#define BOOTUP_MODE_INFO BIT(1)
+#define WIFI_TAPLL_CONFIGS BIT(5)
+#define WIFI_PLL960_CONFIGS BIT(6)
+#define WIFI_AFEPLL_CONFIGS BIT(7)
+#define WIFI_SWITCH_CLK_CONFIGS BIT(8)
+
+#define TA_PLL_M_VAL_20 8
+#define TA_PLL_N_VAL_20 1
+#define TA_PLL_P_VAL_20 4
+
+#define PLL960_M_VAL_20 0x14
+#define PLL960_N_VAL_20 0
+#define PLL960_P_VAL_20 5
+
+#define UMAC_CLK_40MHZ 40
+
+#define TA_PLL_M_VAL_40 46
+#define TA_PLL_N_VAL_40 3
+#define TA_PLL_P_VAL_40 3
+
+#define PLL960_M_VAL_40 0x14
+#define PLL960_N_VAL_40 0
+#define PLL960_P_VAL_40 5
+
+#define UMAC_CLK_20BW \
+ (((TA_PLL_M_VAL_20 + 1) * 40) / \
+ ((TA_PLL_N_VAL_20 + 1) * (TA_PLL_P_VAL_20 + 1)))
+#define VALID_20 \
+ (WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | WIFI_SWITCH_CLK_CONFIGS)
+#define UMAC_CLK_40BW \
+ (((TA_PLL_M_VAL_40 + 1) * 40) / \
+ ((TA_PLL_N_VAL_40 + 1) * (TA_PLL_P_VAL_40 + 1)))
+#define VALID_40 \
+ (WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | WIFI_SWITCH_CLK_CONFIGS | \
+ WIFI_TAPLL_CONFIGS | CRYSTAL_GOOD_TIME | BOOTUP_MODE_INFO)
+
+/* structure to store configs related to TAPLL programming */
+struct tapll_info {
+ __le16 pll_reg_1;
+ __le16 pll_reg_2;
+} __packed;
+
+/* structure to store configs related to PLL960 programming */
+struct pll960_info {
+ __le16 pll_reg_1;
+ __le16 pll_reg_2;
+ __le16 pll_reg_3;
+} __packed;
+
+/* structure to store configs related to AFEPLL programming */
+struct afepll_info {
+ __le16 pll_reg;
+} __packed;
+
+/* structure to store configs related to pll configs */
+struct pll_config {
+ struct tapll_info tapll_info_g;
+ struct pll960_info pll960_info_g;
+ struct afepll_info afepll_info_g;
+} __packed;
+
+/* structure to store configs related to UMAC clk programming */
+struct switch_clk {
+ __le16 switch_clk_info;
+ /* If switch_bbp_lmac_clk_reg is set then this value will be programmed
+ * into reg
+ */
+ __le16 bbp_lmac_clk_reg_val;
+ /* if switch_umac_clk is set then this value will be programmed */
+ __le16 umac_clock_reg_config;
+ /* if switch_qspi_clk is set then this value will be programmed */
+ __le16 qspi_uart_clock_reg_config;
+} __packed;
+
+struct device_clk_info {
+ struct pll_config pll_config_g;
+ struct switch_clk switch_clk_g;
+} __packed;
+
+struct bootup_params {
+ __le16 magic_number;
+ __le16 crystal_good_time;
+ __le32 valid;
+ __le32 reserved_for_valids;
+ __le16 bootup_mode_info;
+ /* configuration used for digital loop back */
+ __le16 digital_loop_back_params;
+ __le16 rtls_timestamp_en;
+ __le16 host_spi_intr_cfg;
+ struct device_clk_info device_clk_info[3];
+ /* ulp buckboost wait time */
+ __le32 buckboost_wakeup_cnt;
+ /* pmu wakeup wait time & WDT EN info */
+ __le16 pmu_wakeup_wait;
+ u8 shutdown_wait_time;
+ /* Sleep clock source selection */
+ u8 pmu_slp_clkout_sel;
+ /* WDT programming values */
+ __le32 wdt_prog_value;
+ /* WDT soc reset delay */
+ __le32 wdt_soc_rst_delay;
+ /* dcdc modes configs */
+ __le32 dcdc_operation_mode;
+ __le32 soc_reset_wait_cnt;
+} __packed;
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
new file mode 100644
index 000000000000..f2f70784d4ad
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -0,0 +1,87 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_COMMON_H__
+#define __RSI_COMMON_H__
+
+#include <linux/kthread.h>
+
+#define EVENT_WAIT_FOREVER 0
+#define TA_LOAD_ADDRESS 0x00
+#define FIRMWARE_RSI9113 "rsi_91x.fw"
+#define QUEUE_NOT_FULL 1
+#define QUEUE_FULL 0
+
+static inline int rsi_init_event(struct rsi_event *pevent)
+{
+ atomic_set(&pevent->event_condition, 1);
+ init_waitqueue_head(&pevent->event_queue);
+ return 0;
+}
+
+static inline int rsi_wait_event(struct rsi_event *event, u32 timeout)
+{
+ int status = 0;
+
+ if (!timeout)
+ status = wait_event_interruptible(event->event_queue,
+ (atomic_read(&event->event_condition) == 0));
+ else
+ status = wait_event_interruptible_timeout(event->event_queue,
+ (atomic_read(&event->event_condition) == 0),
+ timeout);
+ return status;
+}
+
+static inline void rsi_set_event(struct rsi_event *event)
+{
+ atomic_set(&event->event_condition, 0);
+ wake_up_interruptible(&event->event_queue);
+}
+
+static inline void rsi_reset_event(struct rsi_event *event)
+{
+ atomic_set(&event->event_condition, 1);
+}
+
+static inline int rsi_create_kthread(struct rsi_common *common,
+ struct rsi_thread *thread,
+ void *func_ptr,
+ u8 *name)
+{
+ init_completion(&thread->completion);
+ thread->task = kthread_run(func_ptr, common, name);
+ if (IS_ERR(thread->task))
+ return (int)PTR_ERR(thread->task);
+
+ return 0;
+}
+
+static inline int rsi_kill_thread(struct rsi_thread *handle)
+{
+ atomic_inc(&handle->thread_done);
+ rsi_set_event(&handle->event);
+
+ wait_for_completion(&handle->completion);
+ return kthread_stop(handle->task);
+}
+
+void rsi_mac80211_detach(struct rsi_hw *hw);
+u16 rsi_get_connected_channel(struct rsi_hw *adapter);
+struct rsi_hw *rsi_91x_init(void);
+void rsi_91x_deinit(struct rsi_hw *adapter);
+int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len);
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_debugfs.h b/drivers/net/wireless/rsi/rsi_debugfs.h
new file mode 100644
index 000000000000..580ad3b3f710
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_debugfs.h
@@ -0,0 +1,48 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_DEBUGFS_H__
+#define __RSI_DEBUGFS_H__
+
+#include "rsi_main.h"
+#include <linux/debugfs.h>
+
+#ifndef CONFIG_RSI_DEBUGFS
+static inline int rsi_init_dbgfs(struct rsi_hw *adapter)
+{
+ return 0;
+}
+
+static inline void rsi_remove_dbgfs(struct rsi_hw *adapter)
+{
+ return;
+}
+#else
+struct rsi_dbg_files {
+ const char *name;
+ umode_t perms;
+ const struct file_operations fops;
+};
+
+struct rsi_debugfs {
+ struct dentry *subdir;
+ struct rsi_dbg_ops *dfs_get_ops;
+ struct dentry *rsi_files[MAX_DEBUGFS_ENTRIES];
+};
+int rsi_init_dbgfs(struct rsi_hw *adapter);
+void rsi_remove_dbgfs(struct rsi_hw *adapter);
+#endif
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
new file mode 100644
index 000000000000..2cb73e7edb98
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -0,0 +1,218 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_MAIN_H__
+#define __RSI_MAIN_H__
+
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <net/mac80211.h>
+
+#define ERR_ZONE BIT(0) /* For Error Msgs */
+#define INFO_ZONE BIT(1) /* For General Status Msgs */
+#define INIT_ZONE BIT(2) /* For Driver Init Seq Msgs */
+#define MGMT_TX_ZONE BIT(3) /* For TX Mgmt Path Msgs */
+#define MGMT_RX_ZONE BIT(4) /* For RX Mgmt Path Msgs */
+#define DATA_TX_ZONE BIT(5) /* For TX Data Path Msgs */
+#define DATA_RX_ZONE BIT(6) /* For RX Data Path Msgs */
+#define FSM_ZONE BIT(7) /* For State Machine Msgs */
+#define ISR_ZONE BIT(8) /* For Interrupt Msgs */
+
+#define FSM_CARD_NOT_READY 0
+#define FSM_BOOT_PARAMS_SENT 1
+#define FSM_EEPROM_READ_MAC_ADDR 2
+#define FSM_RESET_MAC_SENT 3
+#define FSM_RADIO_CAPS_SENT 4
+#define FSM_BB_RF_PROG_SENT 5
+#define FSM_MAC_INIT_DONE 6
+
+extern u32 rsi_zone_enabled;
+extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
+
+#define RSI_MAX_VIFS 1
+#define NUM_EDCA_QUEUES 4
+#define IEEE80211_ADDR_LEN 6
+#define FRAME_DESC_SZ 16
+#define MIN_802_11_HDR_LEN 24
+
+#define DATA_QUEUE_WATER_MARK 400
+#define MIN_DATA_QUEUE_WATER_MARK 300
+#define MULTICAST_WATER_MARK 200
+#define MAC_80211_HDR_FRAME_CONTROL 0
+#define WME_NUM_AC 4
+#define NUM_SOFT_QUEUES 5
+#define MAX_HW_QUEUES 8
+#define INVALID_QUEUE 0xff
+#define MAX_CONTINUOUS_VO_PKTS 8
+#define MAX_CONTINUOUS_VI_PKTS 4
+
+/* Queue information */
+#define RSI_WIFI_MGMT_Q 0x4
+#define RSI_WIFI_DATA_Q 0x5
+#define IEEE80211_MGMT_FRAME 0x00
+#define IEEE80211_CTL_FRAME 0x04
+
+#define IEEE80211_QOS_TID 0x0f
+#define IEEE80211_NONQOS_TID 16
+
+#define MAX_DEBUGFS_ENTRIES 4
+
+#define TID_TO_WME_AC(_tid) ( \
+ ((_tid) == 0 || (_tid) == 3) ? BE_Q : \
+ ((_tid) < 3) ? BK_Q : \
+ ((_tid) < 6) ? VI_Q : \
+ VO_Q)
+
+#define WME_AC(_q) ( \
+ ((_q) == BK_Q) ? IEEE80211_AC_BK : \
+ ((_q) == BE_Q) ? IEEE80211_AC_BE : \
+ ((_q) == VI_Q) ? IEEE80211_AC_VI : \
+ IEEE80211_AC_VO)
+
+struct version_info {
+ u16 major;
+ u16 minor;
+ u16 release_num;
+ u16 patch_num;
+} __packed;
+
+struct skb_info {
+ s8 rssi;
+ u32 flags;
+ u16 channel;
+ s8 tid;
+ s8 sta_id;
+};
+
+enum edca_queue {
+ BK_Q,
+ BE_Q,
+ VI_Q,
+ VO_Q,
+ MGMT_SOFT_Q
+};
+
+struct security_info {
+ bool security_enable;
+ u32 ptk_cipher;
+ u32 gtk_cipher;
+};
+
+struct wmm_qinfo {
+ s32 weight;
+ s32 wme_params;
+ s32 pkt_contended;
+};
+
+struct transmit_q_stats {
+ u32 total_tx_pkt_send[NUM_EDCA_QUEUES + 1];
+ u32 total_tx_pkt_freed[NUM_EDCA_QUEUES + 1];
+};
+
+struct vif_priv {
+ bool is_ht;
+ bool sgi;
+ u16 seq_start;
+};
+
+struct rsi_event {
+ atomic_t event_condition;
+ wait_queue_head_t event_queue;
+};
+
+struct rsi_thread {
+ void (*thread_function)(void *);
+ struct completion completion;
+ struct task_struct *task;
+ struct rsi_event event;
+ atomic_t thread_done;
+};
+
+struct rsi_hw;
+
+struct rsi_common {
+ struct rsi_hw *priv;
+ struct vif_priv vif_info[RSI_MAX_VIFS];
+
+ bool mgmt_q_block;
+ struct version_info driver_ver;
+ struct version_info fw_ver;
+
+ struct rsi_thread tx_thread;
+ struct sk_buff_head tx_queue[NUM_EDCA_QUEUES + 1];
+ /* Mutex declaration */
+ struct mutex mutex;
+ /* Mutex used between tx/rx threads */
+ struct mutex tx_rxlock;
+ u8 endpoint;
+
+ /* Channel/band related */
+ u8 band;
+ u8 channel_width;
+
+ u16 rts_threshold;
+ u16 bitrate_mask[2];
+ u32 fixedrate_mask[2];
+
+ u8 rf_reset;
+ struct transmit_q_stats tx_stats;
+ struct security_info secinfo;
+ struct wmm_qinfo tx_qinfo[NUM_EDCA_QUEUES];
+ struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
+ u8 mac_addr[IEEE80211_ADDR_LEN];
+
+ /* state related */
+ u32 fsm_state;
+ bool init_done;
+ u8 bb_rf_prog_count;
+ bool iface_down;
+
+ /* Generic */
+ u8 channel;
+ u8 *rx_data_pkt;
+ u8 mac_id;
+ u8 radio_id;
+ u16 rate_pwr[20];
+ u16 min_rate;
+
+ /* WMM algo related */
+ u8 selected_qnum;
+ u32 pkt_cnt;
+ u8 min_weight;
+};
+
+struct rsi_hw {
+ struct rsi_common *priv;
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vifs[RSI_MAX_VIFS];
+ struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+
+ struct device *device;
+ u8 sc_nvifs;
+
+#ifdef CONFIG_RSI_DEBUGFS
+ struct rsi_debugfs *dfsentry;
+ u8 num_debugfs_entries;
+#endif
+ void *rsi_dev;
+ int (*host_intf_read_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
+ int (*host_intf_write_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
+ int (*check_hw_queue_status)(struct rsi_hw *adapter, u8 q_num);
+ int (*rx_urb_submit)(struct rsi_hw *adapter);
+ int (*determine_event_timeout)(struct rsi_hw *adapter);
+};
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
new file mode 100644
index 000000000000..ac67c4ad63c2
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -0,0 +1,285 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_MGMT_H__
+#define __RSI_MGMT_H__
+
+#include <linux/sort.h>
+#include "rsi_boot_params.h"
+#include "rsi_main.h"
+
+#define MAX_MGMT_PKT_SIZE 512
+#define RSI_NEEDED_HEADROOM 80
+#define RSI_RCV_BUFFER_LEN 2000
+
+#define RSI_11B_MODE 0
+#define RSI_11G_MODE BIT(7)
+#define RETRY_COUNT 8
+#define RETRY_LONG 4
+#define RETRY_SHORT 7
+#define WMM_SHORT_SLOT_TIME 9
+#define SIFS_DURATION 16
+
+#define KEY_TYPE_CLEAR 0
+#define RSI_PAIRWISE_KEY 1
+#define RSI_GROUP_KEY 2
+
+/* EPPROM_READ_ADDRESS */
+#define WLAN_MAC_EEPROM_ADDR 40
+#define WLAN_MAC_MAGIC_WORD_LEN 0x01
+#define WLAN_HOST_MODE_LEN 0x04
+#define WLAN_FW_VERSION_LEN 0x08
+#define MAGIC_WORD 0x5A
+
+/* Receive Frame Types */
+#define TA_CONFIRM_TYPE 0x01
+#define RX_DOT11_MGMT 0x02
+#define TX_STATUS_IND 0x04
+#define PROBEREQ_CONFIRM 2
+#define CARD_READY_IND 0x00
+
+#define RSI_DELETE_PEER 0x0
+#define RSI_ADD_PEER 0x1
+#define START_AMPDU_AGGR 0x1
+#define STOP_AMPDU_AGGR 0x0
+#define INTERNAL_MGMT_PKT 0x99
+
+#define PUT_BBP_RESET 0
+#define BBP_REG_WRITE 0
+#define RF_RESET_ENABLE BIT(3)
+#define RATE_INFO_ENABLE BIT(0)
+#define RSI_BROADCAST_PKT BIT(9)
+
+#define UPPER_20_ENABLE (0x2 << 12)
+#define LOWER_20_ENABLE (0x4 << 12)
+#define FULL40M_ENABLE 0x6
+
+#define RSI_LMAC_CLOCK_80MHZ 0x1
+#define RSI_ENABLE_40MHZ (0x1 << 3)
+
+#define RX_BA_INDICATION 1
+#define RSI_TBL_SZ 40
+#define MAX_RETRIES 8
+
+#define STD_RATE_MCS7 0x07
+#define STD_RATE_MCS6 0x06
+#define STD_RATE_MCS5 0x05
+#define STD_RATE_MCS4 0x04
+#define STD_RATE_MCS3 0x03
+#define STD_RATE_MCS2 0x02
+#define STD_RATE_MCS1 0x01
+#define STD_RATE_MCS0 0x00
+#define STD_RATE_54 0x6c
+#define STD_RATE_48 0x60
+#define STD_RATE_36 0x48
+#define STD_RATE_24 0x30
+#define STD_RATE_18 0x24
+#define STD_RATE_12 0x18
+#define STD_RATE_11 0x16
+#define STD_RATE_09 0x12
+#define STD_RATE_06 0x0C
+#define STD_RATE_5_5 0x0B
+#define STD_RATE_02 0x04
+#define STD_RATE_01 0x02
+
+#define RSI_RF_TYPE 1
+#define RSI_RATE_00 0x00
+#define RSI_RATE_1 0x0
+#define RSI_RATE_2 0x2
+#define RSI_RATE_5_5 0x4
+#define RSI_RATE_11 0x6
+#define RSI_RATE_6 0x8b
+#define RSI_RATE_9 0x8f
+#define RSI_RATE_12 0x8a
+#define RSI_RATE_18 0x8e
+#define RSI_RATE_24 0x89
+#define RSI_RATE_36 0x8d
+#define RSI_RATE_48 0x88
+#define RSI_RATE_54 0x8c
+#define RSI_RATE_MCS0 0x100
+#define RSI_RATE_MCS1 0x101
+#define RSI_RATE_MCS2 0x102
+#define RSI_RATE_MCS3 0x103
+#define RSI_RATE_MCS4 0x104
+#define RSI_RATE_MCS5 0x105
+#define RSI_RATE_MCS6 0x106
+#define RSI_RATE_MCS7 0x107
+#define RSI_RATE_MCS7_SG 0x307
+
+#define BW_20MHZ 0
+#define BW_40MHZ 1
+
+#define RSI_SUPP_FILTERS (FIF_ALLMULTI | FIF_PROBE_REQ |\
+ FIF_BCN_PRBRESP_PROMISC)
+enum opmode {
+ STA_OPMODE = 1,
+ AP_OPMODE = 2
+};
+
+extern struct ieee80211_rate rsi_rates[12];
+extern const u16 rsi_mcsrates[8];
+
+enum sta_notify_events {
+ STA_CONNECTED = 0,
+ STA_DISCONNECTED,
+ STA_TX_ADDBA_DONE,
+ STA_TX_DELBA,
+ STA_RX_ADDBA_DONE,
+ STA_RX_DELBA
+};
+
+/* Send Frames Types */
+enum cmd_frame_type {
+ TX_DOT11_MGMT,
+ RESET_MAC_REQ,
+ RADIO_CAPABILITIES,
+ BB_PROG_VALUES_REQUEST,
+ RF_PROG_VALUES_REQUEST,
+ WAKEUP_SLEEP_REQUEST,
+ SCAN_REQUEST,
+ TSF_UPDATE,
+ PEER_NOTIFY,
+ BLOCK_UNBLOCK,
+ SET_KEY_REQ,
+ AUTO_RATE_IND,
+ BOOTUP_PARAMS_REQUEST,
+ VAP_CAPABILITIES,
+ EEPROM_READ_TYPE ,
+ EEPROM_WRITE,
+ GPIO_PIN_CONFIG ,
+ SET_RX_FILTER,
+ AMPDU_IND,
+ STATS_REQUEST_FRAME,
+ BB_BUF_PROG_VALUES_REQ,
+ BBP_PROG_IN_TA,
+ BG_SCAN_PARAMS,
+ BG_SCAN_PROBE_REQ,
+ CW_MODE_REQ,
+ PER_CMD_PKT
+};
+
+struct rsi_mac_frame {
+ __le16 desc_word[8];
+} __packed;
+
+struct rsi_boot_params {
+ __le16 desc_word[8];
+ struct bootup_params bootup_params;
+} __packed;
+
+struct rsi_peer_notify {
+ __le16 desc_word[8];
+ u8 mac_addr[6];
+ __le16 command;
+ __le16 mpdu_density;
+ __le16 reserved;
+ __le32 sta_flags;
+} __packed;
+
+struct rsi_vap_caps {
+ __le16 desc_word[8];
+ u8 mac_addr[6];
+ __le16 keep_alive_period;
+ u8 bssid[6];
+ __le16 reserved;
+ __le32 flags;
+ __le16 frag_threshold;
+ __le16 rts_threshold;
+ __le32 default_mgmt_rate;
+ __le32 default_ctrl_rate;
+ __le32 default_data_rate;
+ __le16 beacon_interval;
+ __le16 dtim_period;
+} __packed;
+
+struct rsi_set_key {
+ __le16 desc_word[8];
+ u8 key[4][32];
+ u8 tx_mic_key[8];
+ u8 rx_mic_key[8];
+} __packed;
+
+struct rsi_auto_rate {
+ __le16 desc_word[8];
+ __le16 failure_limit;
+ __le16 initial_boundary;
+ __le16 max_threshold_limt;
+ __le16 num_supported_rates;
+ __le16 aarf_rssi;
+ __le16 moderate_rate_inx;
+ __le16 collision_tolerance;
+ __le16 supported_rates[40];
+} __packed;
+
+struct qos_params {
+ __le16 cont_win_min_q;
+ __le16 cont_win_max_q;
+ __le16 aifsn_val_q;
+ __le16 txop_q;
+} __packed;
+
+struct rsi_radio_caps {
+ __le16 desc_word[8];
+ struct qos_params qos_params[MAX_HW_QUEUES];
+ u8 num_11n_rates;
+ u8 num_11ac_rates;
+ __le16 gcpd_per_rate[20];
+} __packed;
+
+static inline u32 rsi_get_queueno(u8 *addr, u16 offset)
+{
+ return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12;
+}
+
+static inline u32 rsi_get_length(u8 *addr, u16 offset)
+{
+ return (le16_to_cpu(*(__le16 *)&addr[offset])) & 0x0fff;
+}
+
+static inline u8 rsi_get_extended_desc(u8 *addr, u16 offset)
+{
+ return le16_to_cpu(*((__le16 *)&addr[offset + 4])) & 0x00ff;
+}
+
+static inline u8 rsi_get_rssi(u8 *addr)
+{
+ return *(u8 *)(addr + FRAME_DESC_SZ);
+}
+
+static inline u8 rsi_get_channel(u8 *addr)
+{
+ return *(char *)(addr + 15);
+}
+
+int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg);
+int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode);
+int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid,
+ u16 ssn, u8 buf_size, u8 event);
+int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len,
+ u8 key_type, u8 key_id, u32 cipher);
+int rsi_set_channel(struct rsi_common *common, u16 chno);
+void rsi_inform_bss_status(struct rsi_common *common, u8 status,
+ const u8 *bssid, u8 qos_enable, u16 aid);
+void rsi_indicate_pkt_to_os(struct rsi_common *common, struct sk_buff *skb);
+int rsi_mac80211_attach(struct rsi_common *common);
+void rsi_indicate_tx_status(struct rsi_hw *common, struct sk_buff *skb,
+ int status);
+bool rsi_is_cipher_wep(struct rsi_common *common);
+void rsi_core_qos_processor(struct rsi_common *common);
+void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb);
+int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb);
+int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb);
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
new file mode 100644
index 000000000000..df4b5e20e05f
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -0,0 +1,129 @@
+/**
+ * @section LICENSE
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef __RSI_SDIO_INTF__
+#define __RSI_SDIO_INTF__
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio_ids.h>
+#include "rsi_main.h"
+
+enum sdio_interrupt_type {
+ BUFFER_FULL = 0x0,
+ BUFFER_AVAILABLE = 0x1,
+ FIRMWARE_ASSERT_IND = 0x3,
+ MSDU_PACKET_PENDING = 0x4,
+ UNKNOWN_INT = 0XE
+};
+
+/* Buffer status register related info */
+#define PKT_BUFF_SEMI_FULL 0
+#define PKT_BUFF_FULL 1
+#define PKT_MGMT_BUFF_FULL 2
+#define MSDU_PKT_PENDING 3
+/* Interrupt Bit Related Macros */
+#define PKT_BUFF_AVAILABLE 0
+#define FW_ASSERT_IND 2
+
+#define RSI_DEVICE_BUFFER_STATUS_REGISTER 0xf3
+#define RSI_FN1_INT_REGISTER 0xf9
+#define RSI_SD_REQUEST_MASTER 0x10000
+
+/* FOR SD CARD ONLY */
+#define SDIO_RX_NUM_BLOCKS_REG 0x000F1
+#define SDIO_FW_STATUS_REG 0x000F2
+#define SDIO_NXT_RD_DELAY2 0x000F5
+#define SDIO_MASTER_ACCESS_MSBYTE 0x000FA
+#define SDIO_MASTER_ACCESS_LSBYTE 0x000FB
+#define SDIO_READ_START_LVL 0x000FC
+#define SDIO_READ_FIFO_CTL 0x000FD
+#define SDIO_WRITE_FIFO_CTL 0x000FE
+#define SDIO_FUN1_INTR_CLR_REG 0x0008
+#define SDIO_REG_HIGH_SPEED 0x0013
+
+#define RSI_GET_SDIO_INTERRUPT_TYPE(_I, TYPE) \
+ { \
+ TYPE = \
+ (_I & (1 << PKT_BUFF_AVAILABLE)) ? \
+ BUFFER_AVAILABLE : \
+ (_I & (1 << MSDU_PKT_PENDING)) ? \
+ MSDU_PACKET_PENDING : \
+ (_I & (1 << FW_ASSERT_IND)) ? \
+ FIRMWARE_ASSERT_IND : UNKNOWN_INT; \
+ }
+
+/* common registers in SDIO function1 */
+#define TA_SOFT_RESET_REG 0x0004
+#define TA_TH0_PC_REG 0x0400
+#define TA_HOLD_THREAD_REG 0x0844
+#define TA_RELEASE_THREAD_REG 0x0848
+
+#define TA_SOFT_RST_CLR 0
+#define TA_SOFT_RST_SET BIT(0)
+#define TA_PC_ZERO 0
+#define TA_HOLD_THREAD_VALUE cpu_to_le32(0xF)
+#define TA_RELEASE_THREAD_VALUE cpu_to_le32(0xF)
+#define TA_BASE_ADDR 0x2200
+#define MISC_CFG_BASE_ADDR 0x4150
+
+struct receive_info {
+ bool buffer_full;
+ bool semi_buffer_full;
+ bool mgmt_buffer_full;
+ u32 mgmt_buf_full_counter;
+ u32 buf_semi_full_counter;
+ u8 watch_bufferfull_count;
+ u32 sdio_intr_status_zero;
+ u32 sdio_int_counter;
+ u32 total_sdio_msdu_pending_intr;
+ u32 total_sdio_unknown_intr;
+ u32 buf_full_counter;
+ u32 buf_avilable_counter;
+};
+
+struct rsi_91x_sdiodev {
+ struct sdio_func *pfunction;
+ struct task_struct *in_sdio_litefi_irq;
+ struct receive_info rx_info;
+ u32 next_read_delay;
+ u32 sdio_high_speed_enable;
+ u8 sdio_clock_speed;
+ u32 cardcapability;
+ u8 prev_desc[16];
+ u32 tx_blk_size;
+ u8 write_fail;
+};
+
+void rsi_interrupt_handler(struct rsi_hw *adapter);
+int rsi_init_sdio_slave_regs(struct rsi_hw *adapter);
+int rsi_sdio_device_init(struct rsi_common *common);
+int rsi_sdio_read_register(struct rsi_hw *adapter, u32 addr, u8 *data);
+int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter, u8 *pkt, u32 length);
+int rsi_sdio_write_register(struct rsi_hw *adapter, u8 function,
+ u32 addr, u8 *data);
+int rsi_sdio_write_register_multiple(struct rsi_hw *adapter, u32 addr,
+ u8 *data, u32 count);
+void rsi_sdio_ack_intr(struct rsi_hw *adapter, u8 int_bit);
+int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter);
+int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num);
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
new file mode 100644
index 000000000000..ebea0c411ead
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -0,0 +1,68 @@
+/**
+ * @section LICENSE
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_USB_INTF__
+#define __RSI_USB_INTF__
+
+#include <linux/usb.h>
+#include "rsi_main.h"
+#include "rsi_common.h"
+
+#define USB_INTERNAL_REG_1 0x25000
+#define RSI_USB_READY_MAGIC_NUM 0xab
+#define FW_STATUS_REG 0x41050012
+
+#define USB_VENDOR_REGISTER_READ 0x15
+#define USB_VENDOR_REGISTER_WRITE 0x16
+#define RSI_USB_TX_HEAD_ROOM 128
+
+#define MAX_RX_URBS 1
+#define MAX_BULK_EP 8
+#define MGMT_EP 1
+#define DATA_EP 2
+
+struct rsi_91x_usbdev {
+ struct rsi_thread rx_thread;
+ u8 endpoint;
+ struct usb_device *usbdev;
+ struct usb_interface *pfunction;
+ struct urb *rx_usb_urb[MAX_RX_URBS];
+ u8 *tx_buffer;
+ __le16 bulkin_size;
+ u8 bulkin_endpoint_addr;
+ __le16 bulkout_size[MAX_BULK_EP];
+ u8 bulkout_endpoint_addr[MAX_BULK_EP];
+ u32 tx_blk_size;
+ u8 write_fail;
+};
+
+static inline int rsi_usb_check_queue_status(struct rsi_hw *adapter, u8 q_num)
+{
+ /* In USB, there isn't any need to check the queue status */
+ return QUEUE_NOT_FULL;
+}
+
+static inline int rsi_usb_event_timeout(struct rsi_hw *adapter)
+{
+ return EVENT_WAIT_FOREVER;
+}
+
+int rsi_usb_device_init(struct rsi_common *common);
+int rsi_usb_write_register_multiple(struct rsi_hw *adapter, u32 addr,
+ u8 *data, u32 count);
+void rsi_usb_rx_thread(struct rsi_common *common);
+#endif
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index caddc1b427a9..a49c3d73ea2c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -125,9 +125,9 @@ static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
if (unlikely(tout))
- rt2x00_warn(entry->queue->rt2x00dev,
- "TX status timeout for entry %d in queue %d\n",
- entry->entry_idx, entry->queue->qid);
+ rt2x00_dbg(entry->queue->rt2x00dev,
+ "TX status timeout for entry %d in queue %d\n",
+ entry->entry_idx, entry->queue->qid);
return tout;
}
@@ -566,8 +566,8 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
if (unlikely(rt2x00queue_empty(queue))) {
- rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
- qid);
+ rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
+ qid);
break;
}
@@ -764,7 +764,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Overwrite TX done handler
*/
- PREPARE_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);
+ INIT_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);
return 0;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 2e3d1645e68b..90fdb02b55e7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -286,7 +286,7 @@ static ssize_t rt2x00debug_read_queue_dump(struct file *file,
if (retval)
return retval;
- status = min((size_t)skb->len, length);
+ status = min_t(size_t, skb->len, length);
if (copy_to_user(buf, skb->data, status)) {
status = -EFAULT;
goto exit;
diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/rtl818x/Kconfig
index 30332175bcd8..1ce1d55f0010 100644
--- a/drivers/net/wireless/rtl818x/Kconfig
+++ b/drivers/net/wireless/rtl818x/Kconfig
@@ -2,11 +2,11 @@
# RTL818X Wireless LAN device configuration
#
config RTL8180
- tristate "Realtek 8180/8185 PCI support"
+ tristate "Realtek 8180/8185/8187SE PCI support"
depends on MAC80211 && PCI
select EEPROM_93CX6
---help---
- This is a driver for RTL8180 and RTL8185 based cards.
+ This is a driver for RTL8180, RTL8185 and RTL8187SE based cards.
These are PCI based chips found in cards such as:
(RTL8185 802.11g)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/rtl818x/rtl8180/Makefile
index cb4fb8596f0b..08b056db4a3b 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/rtl818x/rtl8180/Makefile
@@ -1,4 +1,4 @@
-rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o
+rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
obj-$(CONFIG_RTL8180) += rtl8180.o
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 3867d1470b36..98d8256f0377 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1,15 +1,42 @@
-/*
- * Linux device driver for RTL8180 / RTL8185
+/* Linux device driver for RTL8180 / RTL8185 / RTL8187SE
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
+ * Copyright 2007,2014 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* Thanks to Realtek for their support!
*
+ ************************************************************************
+ *
+ * The driver was extended to the RTL8187SE in 2014 by
+ * Andrea Merello <andrea.merello@gmail.com>
+ *
+ * based also on:
+ * - portions of rtl8187se Linux staging driver, Copyright Realtek corp.
+ * - other GPL, unpublished (until now), Linux driver code,
+ * Copyright Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ * A huge thanks goes to Sara V. Nari who forgives me when I'm
+ * sitting in front of my laptop at evening, week-end, night...
+ *
+ * A special thanks goes to Antonio Cuni, who helped me with
+ * some python userspace stuff I used to debug RTL8187SE code, and who
+ * bought a laptop with an unsupported Wi-Fi card some years ago...
+ *
+ * Thanks to Larry Finger for writing some code for rtl8187se and for
+ * his suggestions.
+ *
+ * Thanks to Dan Carpenter for reviewing my initial patch and for his
+ * suggestions.
+ *
+ * Thanks to Bernhard Schiffner for his help in testing and for his
+ * suggestions.
+ *
+ ************************************************************************
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -29,13 +56,18 @@
#include "sa2400.h"
#include "max2820.h"
#include "grf5101.h"
+#include "rtl8225se.h"
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
-MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
+MODULE_DESCRIPTION("RTL8180 / RTL8185 / RTL8187SE PCI wireless driver");
MODULE_LICENSE("GPL");
static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
+
+ /* rtl8187se */
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8199) },
+
/* rtl8185 */
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) },
{ PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) },
@@ -85,6 +117,76 @@ static const struct ieee80211_channel rtl818x_channels[] = {
{ .center_freq = 2484 },
};
+/* Queues for rtl8187se card
+ *
+ * name | reg | queue
+ * BC | 7 | 6
+ * MG | 1 | 0
+ * HI | 6 | 1
+ * VO | 5 | 2
+ * VI | 4 | 3
+ * BE | 3 | 4
+ * BK | 2 | 5
+ *
+ * The complete map for DMA kick reg using use all queue is:
+ * static const int rtl8187se_queues_map[RTL8187SE_NR_TX_QUEUES] =
+ * {1, 6, 5, 4, 3, 2, 7};
+ *
+ * .. but.. Because for mac80211 4 queues are enough for QoS we use this
+ *
+ * name | reg | queue
+ * BC | 7 | 4 <- currently not used yet
+ * MG | 1 | x <- Not used
+ * HI | 6 | x <- Not used
+ * VO | 5 | 0 <- used
+ * VI | 4 | 1 <- used
+ * BE | 3 | 2 <- used
+ * BK | 2 | 3 <- used
+ *
+ * Beacon queue could be used, but this is not finished yet.
+ *
+ * I thougth about using the other two queues but I decided not to do this:
+ *
+ * - I'm unsure whether the mac80211 will ever try to use more than 4 queues
+ * by itself.
+ *
+ * - I could route MGMT frames (currently sent over VO queue) to the MGMT
+ * queue but since mac80211 will do not know about it, I will probably gain
+ * some HW priority whenever the VO queue is not empty, but this gain is
+ * limited by the fact that I had to stop the mac80211 queue whenever one of
+ * the VO or MGMT queues is full, stopping also submitting of MGMT frame
+ * to the driver.
+ *
+ * - I don't know how to set in the HW the contention window params for MGMT
+ * and HI-prio queues.
+ */
+
+static const int rtl8187se_queues_map[RTL8187SE_NR_TX_QUEUES] = {5, 4, 3, 2, 7};
+
+/* Queues for rtl8180/rtl8185 cards
+ *
+ * name | reg | prio
+ * BC | 7 | 3
+ * HI | 6 | 0
+ * NO | 5 | 1
+ * LO | 4 | 2
+ *
+ * The complete map for DMA kick reg using all queue is:
+ * static const int rtl8180_queues_map[RTL8180_NR_TX_QUEUES] = {6, 5, 4, 7};
+ *
+ * .. but .. Because the mac80211 needs at least 4 queues for QoS or
+ * otherwise QoS can't be done, we use just one.
+ * Beacon queue could be used, but this is not finished yet.
+ * Actual map is:
+ *
+ * name | reg | prio
+ * BC | 7 | 1 <- currently not used yet.
+ * HI | 6 | x <- not used
+ * NO | 5 | x <- not used
+ * LO | 4 | 0 <- used
+ */
+
+static const int rtl8180_queues_map[RTL8180_NR_TX_QUEUES] = {4, 7};
void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
{
@@ -105,14 +207,30 @@ void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
static void rtl8180_handle_rx(struct ieee80211_hw *dev)
{
struct rtl8180_priv *priv = dev->priv;
+ struct rtl818x_rx_cmd_desc *cmd_desc;
unsigned int count = 32;
u8 signal, agc, sq;
dma_addr_t mapping;
while (count--) {
- struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
+ void *entry = priv->rx_ring + priv->rx_idx * priv->rx_ring_sz;
struct sk_buff *skb = priv->rx_buf[priv->rx_idx];
- u32 flags = le32_to_cpu(entry->flags);
+ u32 flags, flags2;
+ u64 tsft;
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ struct rtl8187se_rx_desc *desc = entry;
+
+ flags = le32_to_cpu(desc->flags);
+ flags2 = le32_to_cpu(desc->flags2);
+ tsft = le64_to_cpu(desc->tsft);
+ } else {
+ struct rtl8180_rx_desc *desc = entry;
+
+ flags = le32_to_cpu(desc->flags);
+ flags2 = le32_to_cpu(desc->flags2);
+ tsft = le64_to_cpu(desc->tsft);
+ }
if (flags & RTL818X_RX_DESC_FLAG_OWN)
return;
@@ -122,7 +240,6 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
RTL818X_RX_DESC_FLAG_RX_ERR)))
goto done;
else {
- u32 flags2 = le32_to_cpu(entry->flags2);
struct ieee80211_rx_status rx_status = {0};
struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_SIZE);
@@ -148,19 +265,24 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
rx_status.antenna = (flags2 >> 15) & 1;
rx_status.rate_idx = (flags >> 20) & 0xF;
agc = (flags2 >> 17) & 0x7F;
- if (priv->r8185) {
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
if (rx_status.rate_idx > 3)
signal = 90 - clamp_t(u8, agc, 25, 90);
else
signal = 95 - clamp_t(u8, agc, 30, 95);
- } else {
+ } else if (priv->chip_family ==
+ RTL818X_CHIP_FAMILY_RTL8180) {
sq = flags2 & 0xff;
signal = priv->rf->calc_rssi(agc, sq);
+ } else {
+ /* TODO: rtl8187se rssi */
+ signal = 10;
}
rx_status.signal = signal;
rx_status.freq = dev->conf.chandef.chan->center_freq;
rx_status.band = dev->conf.chandef.chan->band;
- rx_status.mactime = le64_to_cpu(entry->tsft);
+ rx_status.mactime = tsft;
rx_status.flag |= RX_FLAG_MACTIME_START;
if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -174,11 +296,13 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
}
done:
- entry->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
- entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
+ cmd_desc = entry;
+ cmd_desc->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
+ cmd_desc->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
MAX_RX_SIZE);
if (priv->rx_idx == 31)
- entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
+ cmd_desc->flags |=
+ cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
priv->rx_idx = (priv->rx_idx + 1) % 32;
}
}
@@ -218,6 +342,55 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
}
}
+static irqreturn_t rtl8187se_interrupt(int irq, void *dev_id)
+{
+ struct ieee80211_hw *dev = dev_id;
+ struct rtl8180_priv *priv = dev->priv;
+ u32 reg;
+ unsigned long flags;
+ static int desc_err;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ /* Note: 32-bit interrupt status */
+ reg = rtl818x_ioread32(priv, &priv->map->INT_STATUS_SE);
+ if (unlikely(reg == 0xFFFFFFFF)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ rtl818x_iowrite32(priv, &priv->map->INT_STATUS_SE, reg);
+
+ if (reg & IMR_TIMEOUT1)
+ rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
+
+ if (reg & (IMR_TBDOK | IMR_TBDER))
+ rtl8180_handle_tx(dev, 4);
+
+ if (reg & (IMR_TVODOK | IMR_TVODER))
+ rtl8180_handle_tx(dev, 0);
+
+ if (reg & (IMR_TVIDOK | IMR_TVIDER))
+ rtl8180_handle_tx(dev, 1);
+
+ if (reg & (IMR_TBEDOK | IMR_TBEDER))
+ rtl8180_handle_tx(dev, 2);
+
+ if (reg & (IMR_TBKDOK | IMR_TBKDER))
+ rtl8180_handle_tx(dev, 3);
+
+ if (reg & (IMR_ROK | IMR_RER | RTL818X_INT_SE_RX_DU | IMR_RQOSOK))
+ rtl8180_handle_rx(dev);
+ /* The interface sometimes generates several RX DMA descriptor errors
+ * at startup. Do not report these.
+ */
+ if ((reg & RTL818X_INT_SE_RX_DU) && desc_err++ > 2)
+ if (net_ratelimit())
+ wiphy_err(dev->wiphy, "No RX DMA Descriptor avail\n");
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+}
+
static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
{
struct ieee80211_hw *dev = dev_id;
@@ -234,12 +407,6 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg);
if (reg & (RTL818X_INT_TXB_OK | RTL818X_INT_TXB_ERR))
- rtl8180_handle_tx(dev, 3);
-
- if (reg & (RTL818X_INT_TXH_OK | RTL818X_INT_TXH_ERR))
- rtl8180_handle_tx(dev, 2);
-
- if (reg & (RTL818X_INT_TXN_OK | RTL818X_INT_TXN_ERR))
rtl8180_handle_tx(dev, 1);
if (reg & (RTL818X_INT_TXL_OK | RTL818X_INT_TXL_ERR))
@@ -263,12 +430,14 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
struct rtl8180_tx_ring *ring;
struct rtl8180_tx_desc *entry;
unsigned long flags;
- unsigned int idx, prio;
+ unsigned int idx, prio, hw_prio;
dma_addr_t mapping;
u32 tx_flags;
u8 rc_flags;
u16 plcp_len = 0;
__le16 rts_duration = 0;
+ /* do arithmetic and then convert to le16 */
+ u16 frame_duration = 0;
prio = skb_get_queue_mapping(skb);
ring = &priv->tx_ring[prio];
@@ -280,7 +449,6 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
kfree_skb(skb);
dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
return;
-
}
tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
@@ -288,7 +456,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
(ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
skb->len;
- if (priv->r8185)
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180)
tx_flags |= RTL818X_TX_DESC_FLAG_DMA |
RTL818X_TX_DESC_FLAG_NO_ENC;
@@ -305,7 +473,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
info);
- if (!priv->r8185) {
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
unsigned int remainder;
plcp_len = DIV_ROUND_UP(16 * (skb->len + 4),
@@ -316,6 +484,18 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
plcp_len |= 1 << 15;
}
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ __le16 duration;
+ /* SIFS time (required by HW) is already included by
+ * ieee80211_generic_frame_duration
+ */
+ duration = ieee80211_generic_frame_duration(dev, priv->vif,
+ IEEE80211_BAND_2GHZ, skb->len,
+ ieee80211_get_tx_rate(dev, info));
+
+ frame_duration = priv->ack_time + le16_to_cpu(duration);
+ }
+
spin_lock_irqsave(&priv->lock, flags);
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -328,21 +508,91 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
entry = &ring->desc[idx];
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ entry->frame_duration = cpu_to_le16(frame_duration);
+ entry->frame_len_se = cpu_to_le16(skb->len);
+
+ /* tpc polarity */
+ entry->flags3 = cpu_to_le16(1<<4);
+ } else
+ entry->frame_len = cpu_to_le32(skb->len);
+
entry->rts_duration = rts_duration;
entry->plcp_len = cpu_to_le16(plcp_len);
entry->tx_buf = cpu_to_le32(mapping);
- entry->frame_len = cpu_to_le32(skb->len);
+
entry->flags2 = info->control.rates[1].idx >= 0 ?
ieee80211_get_alt_retry_rate(dev, info, 0)->bitrate << 4 : 0;
entry->retry_limit = info->control.rates[0].count;
+
+ /* We must be sure that tx_flags is written last because the HW
+ * looks at it to check if the rest of data is valid or not
+ */
+ wmb();
entry->flags = cpu_to_le32(tx_flags);
+ /* We must be sure this has been written before followings HW
+ * register write, because this write will made the HW attempts
+ * to DMA the just-written data
+ */
+ wmb();
+
__skb_queue_tail(&ring->queue, skb);
if (ring->entries - skb_queue_len(&ring->queue) < 2)
ieee80211_stop_queue(dev, prio);
spin_unlock_irqrestore(&priv->lock, flags);
- rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ /* just poll: rings are stopped with TPPollStop reg */
+ hw_prio = rtl8187se_queues_map[prio];
+ rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
+ (1 << hw_prio));
+ } else {
+ hw_prio = rtl8180_queues_map[prio];
+ rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
+ (1 << hw_prio) | /* ring to poll */
+ (1<<1) | (1<<2));/* stopped rings */
+ }
+}
+
+static void rtl8180_set_anaparam3(struct rtl8180_priv *priv, u16 anaparam3)
+{
+ u8 reg;
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_CONFIG);
+
+ reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+ rtl818x_iowrite8(priv, &priv->map->CONFIG3,
+ reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
+
+ rtl818x_iowrite16(priv, &priv->map->ANAPARAM3, anaparam3);
+
+ rtl818x_iowrite8(priv, &priv->map->CONFIG3,
+ reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_NORMAL);
+}
+
+void rtl8180_set_anaparam2(struct rtl8180_priv *priv, u32 anaparam2)
+{
+ u8 reg;
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_CONFIG);
+
+ reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+ rtl818x_iowrite8(priv, &priv->map->CONFIG3,
+ reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
+
+ rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
+
+ rtl818x_iowrite8(priv, &priv->map->CONFIG3,
+ reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_NORMAL);
}
void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
@@ -359,17 +609,171 @@ void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
}
+static void rtl8187se_mac_config(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ u8 reg;
+
+ rtl818x_iowrite32(priv, REG_ADDR4(0x1F0), 0);
+ rtl818x_ioread32(priv, REG_ADDR4(0x1F0));
+ rtl818x_iowrite32(priv, REG_ADDR4(0x1F4), 0);
+ rtl818x_ioread32(priv, REG_ADDR4(0x1F4));
+ rtl818x_iowrite8(priv, REG_ADDR1(0x1F8), 0);
+ rtl818x_ioread8(priv, REG_ADDR1(0x1F8));
+ /* Enable DA10 TX power saving */
+ reg = rtl818x_ioread8(priv, &priv->map->PHY_PR);
+ rtl818x_iowrite8(priv, &priv->map->PHY_PR, reg | 0x04);
+ /* Power */
+ rtl818x_iowrite16(priv, PI_DATA_REG, 0x1000);
+ rtl818x_iowrite16(priv, SI_DATA_REG, 0x1000);
+ /* AFE - default to power ON */
+ rtl818x_iowrite16(priv, REG_ADDR2(0x370), 0x0560);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x372), 0x0560);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x374), 0x0DA4);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x376), 0x0DA4);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x378), 0x0560);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x37A), 0x0560);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x37C), 0x00EC);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x37E), 0x00EC);
+ rtl818x_iowrite8(priv, REG_ADDR1(0x24E), 0x01);
+ /* unknown, needed for suspend to RAM resume */
+ rtl818x_iowrite8(priv, REG_ADDR1(0x0A), 0x72);
+}
+
+static void rtl8187se_set_antenna_config(struct ieee80211_hw *dev, u8 def_ant,
+ bool diversity)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ rtl8225_write_phy_cck(dev, 0x0C, 0x09);
+ if (diversity) {
+ if (def_ant == 1) {
+ rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x00);
+ rtl8225_write_phy_cck(dev, 0x11, 0xBB);
+ rtl8225_write_phy_cck(dev, 0x01, 0xC7);
+ rtl8225_write_phy_ofdm(dev, 0x0D, 0x54);
+ rtl8225_write_phy_ofdm(dev, 0x18, 0xB2);
+ } else { /* main antenna */
+ rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
+ rtl8225_write_phy_cck(dev, 0x11, 0x9B);
+ rtl8225_write_phy_cck(dev, 0x01, 0xC7);
+ rtl8225_write_phy_ofdm(dev, 0x0D, 0x5C);
+ rtl8225_write_phy_ofdm(dev, 0x18, 0xB2);
+ }
+ } else { /* disable antenna diversity */
+ if (def_ant == 1) {
+ rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x00);
+ rtl8225_write_phy_cck(dev, 0x11, 0xBB);
+ rtl8225_write_phy_cck(dev, 0x01, 0x47);
+ rtl8225_write_phy_ofdm(dev, 0x0D, 0x54);
+ rtl8225_write_phy_ofdm(dev, 0x18, 0x32);
+ } else { /* main antenna */
+ rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
+ rtl8225_write_phy_cck(dev, 0x11, 0x9B);
+ rtl8225_write_phy_cck(dev, 0x01, 0x47);
+ rtl8225_write_phy_ofdm(dev, 0x0D, 0x5C);
+ rtl8225_write_phy_ofdm(dev, 0x18, 0x32);
+ }
+ }
+ /* priv->curr_ant = def_ant; */
+}
+
+static void rtl8180_int_enable(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ rtl818x_iowrite32(priv, &priv->map->IMR, IMR_TMGDOK |
+ IMR_TBDER | IMR_THPDER |
+ IMR_THPDER | IMR_THPDOK |
+ IMR_TVODER | IMR_TVODOK |
+ IMR_TVIDER | IMR_TVIDOK |
+ IMR_TBEDER | IMR_TBEDOK |
+ IMR_TBKDER | IMR_TBKDOK |
+ IMR_RDU | IMR_RER |
+ IMR_ROK | IMR_RQOSOK);
+ } else {
+ rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
+ }
+}
+
+static void rtl8180_int_disable(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ rtl818x_iowrite32(priv, &priv->map->IMR, 0);
+ } else {
+ rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
+ }
+}
+
+static void rtl8180_conf_basic_rates(struct ieee80211_hw *dev,
+ u32 rates_mask)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ u8 max, min;
+ u16 reg;
+
+ max = fls(rates_mask) - 1;
+ min = ffs(rates_mask) - 1;
+
+ switch (priv->chip_family) {
+
+ case RTL818X_CHIP_FAMILY_RTL8180:
+ /* in 8180 this is NOT a BITMAP */
+ reg = rtl818x_ioread16(priv, &priv->map->BRSR);
+ reg &= ~3;
+ reg |= max;
+ rtl818x_iowrite16(priv, &priv->map->BRSR, reg);
+ break;
+
+ case RTL818X_CHIP_FAMILY_RTL8185:
+ /* in 8185 this is a BITMAP */
+ rtl818x_iowrite16(priv, &priv->map->BRSR, rates_mask);
+ rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (max << 4) | min);
+ break;
+
+ case RTL818X_CHIP_FAMILY_RTL8187SE:
+ /* in 8187se this is a BITMAP */
+ rtl818x_iowrite16(priv, &priv->map->BRSR_8187SE, rates_mask);
+ break;
+ }
+}
+
+static void rtl8180_config_cardbus(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ u16 reg16;
+ u8 reg8;
+
+ reg8 = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+ reg8 |= 1 << 1;
+ rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg8);
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ rtl818x_iowrite16(priv, FEMR_SE, 0xffff);
+ } else {
+ reg16 = rtl818x_ioread16(priv, &priv->map->FEMR);
+ reg16 |= (1 << 15) | (1 << 14) | (1 << 4);
+ rtl818x_iowrite16(priv, &priv->map->FEMR, reg16);
+ }
+
+}
+
static int rtl8180_init_hw(struct ieee80211_hw *dev)
{
struct rtl8180_priv *priv = dev->priv;
u16 reg;
+ u32 reg32;
rtl818x_iowrite8(priv, &priv->map->CMD, 0);
rtl818x_ioread8(priv, &priv->map->CMD);
msleep(10);
/* reset */
- rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
+ rtl8180_int_disable(dev);
rtl818x_ioread8(priv, &priv->map->CMD);
reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -390,31 +794,45 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
msleep(200);
if (rtl818x_ioread8(priv, &priv->map->CONFIG3) & (1 << 3)) {
- /* For cardbus */
- reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
- reg |= 1 << 1;
- rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
- reg = rtl818x_ioread16(priv, &priv->map->FEMR);
- reg |= (1 << 15) | (1 << 14) | (1 << 4);
- rtl818x_iowrite16(priv, &priv->map->FEMR, reg);
+ rtl8180_config_cardbus(dev);
}
- rtl818x_iowrite8(priv, &priv->map->MSR, 0);
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA);
+ else
+ rtl818x_iowrite8(priv, &priv->map->MSR, 0);
- if (!priv->r8185)
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
rtl8180_set_anaparam(priv, priv->anaparam);
rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma);
- rtl818x_iowrite32(priv, &priv->map->TBDA, priv->tx_ring[3].dma);
- rtl818x_iowrite32(priv, &priv->map->THPDA, priv->tx_ring[2].dma);
- rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma);
- rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma);
+ /* mac80211 queue have higher prio for lower index. The last queue
+ * (that mac80211 is not aware of) is reserved for beacons (and have
+ * the highest priority on the NIC)
+ */
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8187SE) {
+ rtl818x_iowrite32(priv, &priv->map->TBDA,
+ priv->tx_ring[1].dma);
+ rtl818x_iowrite32(priv, &priv->map->TLPDA,
+ priv->tx_ring[0].dma);
+ } else {
+ rtl818x_iowrite32(priv, &priv->map->TBDA,
+ priv->tx_ring[4].dma);
+ rtl818x_iowrite32(priv, &priv->map->TVODA,
+ priv->tx_ring[0].dma);
+ rtl818x_iowrite32(priv, &priv->map->TVIDA,
+ priv->tx_ring[1].dma);
+ rtl818x_iowrite32(priv, &priv->map->TBEDA,
+ priv->tx_ring[2].dma);
+ rtl818x_iowrite32(priv, &priv->map->TBKDA,
+ priv->tx_ring[3].dma);
+ }
/* TODO: necessary? specs indicate not */
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
reg = rtl818x_ioread8(priv, &priv->map->CONFIG2);
rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg & ~(1 << 3));
- if (priv->r8185) {
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
reg = rtl818x_ioread8(priv, &priv->map->CONFIG2);
rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg | (1 << 4));
}
@@ -426,13 +844,17 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
- if (priv->r8185) {
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0);
rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0x81);
- rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (8 << 4) | 0);
+ } else {
+ rtl818x_iowrite8(priv, &priv->map->SECURITY, 0);
- rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3);
+ rtl818x_iowrite8(priv, &priv->map->PHY_DELAY, 0x6);
+ rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, 0x4C);
+ }
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
/* TODO: set ClkRun enable? necessary? */
reg = rtl818x_ioread8(priv, &priv->map->GP_ENABLE);
rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, reg & ~(1 << 6));
@@ -440,28 +862,90 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | (1 << 2));
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
- } else {
- rtl818x_iowrite16(priv, &priv->map->BRSR, 0x1);
- rtl818x_iowrite8(priv, &priv->map->SECURITY, 0);
+ }
- rtl818x_iowrite8(priv, &priv->map->PHY_DELAY, 0x6);
- rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, 0x4C);
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+
+ /* the set auto rate fallback bitmask from 1M to 54 Mb/s */
+ rtl818x_iowrite16(priv, ARFR, 0xFFF);
+ rtl818x_ioread16(priv, ARFR);
+
+ /* stop unused queus (no dma alloc) */
+ rtl818x_iowrite8(priv, &priv->map->TPPOLL_STOP,
+ RTL818x_TPPOLL_STOP_MG | RTL818x_TPPOLL_STOP_HI);
+
+ rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0x00);
+ rtl818x_iowrite16(priv, &priv->map->TID_AC_MAP, 0xFA50);
+
+ rtl818x_iowrite16(priv, &priv->map->INT_MIG, 0);
+
+ /* some black magic here.. */
+ rtl8187se_mac_config(dev);
+
+ rtl818x_iowrite16(priv, RFSW_CTRL, 0x569A);
+ rtl818x_ioread16(priv, RFSW_CTRL);
+
+ rtl8180_set_anaparam(priv, RTL8225SE_ANAPARAM_ON);
+ rtl8180_set_anaparam2(priv, RTL8225SE_ANAPARAM2_ON);
+ rtl8180_set_anaparam3(priv, RTL8225SE_ANAPARAM3);
+
+
+ rtl818x_iowrite8(priv, &priv->map->CONFIG5,
+ rtl818x_ioread8(priv, &priv->map->CONFIG5) & 0x7F);
+
+ /*probably this switch led on */
+ rtl818x_iowrite8(priv, &priv->map->PGSELECT,
+ rtl818x_ioread8(priv, &priv->map->PGSELECT) | 0x08);
+
+ rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
+ rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1BFF);
+ rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488);
+
+ rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x4003);
+
+ /* the reference code mac hardcode table write
+ * this reg by doing byte-wide accesses.
+ * It does it just for lowest and highest byte..
+ */
+ reg32 = rtl818x_ioread32(priv, &priv->map->RF_PARA);
+ reg32 &= 0x00ffff00;
+ reg32 |= 0xb8000054;
+ rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32);
}
priv->rf->init(dev);
- if (priv->r8185)
- rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3);
+
+ /* default basic rates are 1,2 Mbps for rtl8180. 1,2,6,9,12,18,24 Mbps
+ * otherwise. bitmask 0x3 and 0x01f3 respectively.
+ * NOTE: currenty rtl8225 RF code changes basic rates, so we need to do
+ * this after rf init.
+ * TODO: try to find out whether RF code really needs to do this..
+ */
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
+ rtl8180_conf_basic_rates(dev, 0x3);
+ else
+ rtl8180_conf_basic_rates(dev, 0x1f3);
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ rtl8187se_set_antenna_config(dev,
+ priv->antenna_diversity_default,
+ priv->antenna_diversity_en);
return 0;
}
static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
{
struct rtl8180_priv *priv = dev->priv;
- struct rtl8180_rx_desc *entry;
+ struct rtl818x_rx_cmd_desc *entry;
int i;
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ priv->rx_ring_sz = sizeof(struct rtl8187se_rx_desc);
+ else
+ priv->rx_ring_sz = sizeof(struct rtl8180_rx_desc);
+
priv->rx_ring = pci_alloc_consistent(priv->pdev,
- sizeof(*priv->rx_ring) * 32,
+ priv->rx_ring_sz * 32,
&priv->rx_ring_dma);
if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
@@ -469,20 +953,28 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
return -ENOMEM;
}
- memset(priv->rx_ring, 0, sizeof(*priv->rx_ring) * 32);
+ memset(priv->rx_ring, 0, priv->rx_ring_sz * 32);
priv->rx_idx = 0;
for (i = 0; i < 32; i++) {
struct sk_buff *skb = dev_alloc_skb(MAX_RX_SIZE);
dma_addr_t *mapping;
- entry = &priv->rx_ring[i];
- if (!skb)
- return 0;
-
+ entry = priv->rx_ring + priv->rx_ring_sz*i;
+ if (!skb) {
+ wiphy_err(dev->wiphy, "Cannot allocate RX skb\n");
+ return -ENOMEM;
+ }
priv->rx_buf[i] = skb;
mapping = (dma_addr_t *)skb->cb;
*mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(priv->pdev, *mapping)) {
+ kfree_skb(skb);
+ wiphy_err(dev->wiphy, "Cannot map DMA for RX skb\n");
+ return -ENOMEM;
+ }
+
entry->rx_buf = cpu_to_le32(*mapping);
entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
MAX_RX_SIZE);
@@ -507,7 +999,7 @@ static void rtl8180_free_rx_ring(struct ieee80211_hw *dev)
kfree_skb(skb);
}
- pci_free_consistent(priv->pdev, sizeof(*priv->rx_ring) * 32,
+ pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
priv->rx_ring, priv->rx_ring_dma);
priv->rx_ring = NULL;
}
@@ -571,7 +1063,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
if (ret)
return ret;
- for (i = 0; i < 4; i++)
+ for (i = 0; i < (dev->queues + 1); i++)
if ((ret = rtl8180_init_tx_ring(dev, i, 16)))
goto err_free_rings;
@@ -579,23 +1071,28 @@ static int rtl8180_start(struct ieee80211_hw *dev)
if (ret)
goto err_free_rings;
- rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma);
- rtl818x_iowrite32(priv, &priv->map->TBDA, priv->tx_ring[3].dma);
- rtl818x_iowrite32(priv, &priv->map->THPDA, priv->tx_ring[2].dma);
- rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma);
- rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma);
-
- ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ ret = request_irq(priv->pdev->irq, rtl8187se_interrupt,
IRQF_SHARED, KBUILD_MODNAME, dev);
+ } else {
+ ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ }
+
if (ret) {
wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
goto err_free_rings;
}
- rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
+ rtl8180_int_enable(dev);
- rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
- rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
+ /* in rtl8187se at MAR regs offset there is the management
+ * TX descriptor DMA addres..
+ */
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8187SE) {
+ rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
+ rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
+ }
reg = RTL818X_RX_CONF_ONLYERLPKT |
RTL818X_RX_CONF_RX_AUTORESETPHY |
@@ -605,27 +1102,42 @@ static int rtl8180_start(struct ieee80211_hw *dev)
RTL818X_RX_CONF_BROADCAST |
RTL818X_RX_CONF_NICMAC;
- if (priv->r8185)
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185)
reg |= RTL818X_RX_CONF_CSDM1 | RTL818X_RX_CONF_CSDM2;
- else {
+ else if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
reg |= (priv->rfparam & RF_PARAM_CARRIERSENSE1)
? RTL818X_RX_CONF_CSDM1 : 0;
reg |= (priv->rfparam & RF_PARAM_CARRIERSENSE2)
? RTL818X_RX_CONF_CSDM2 : 0;
+ } else {
+ reg &= ~(RTL818X_RX_CONF_CSDM1 | RTL818X_RX_CONF_CSDM2);
}
priv->rx_conf = reg;
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
- if (priv->r8185) {
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
- reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT;
- reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+
+ /* CW is not on per-packet basis.
+ * in rtl8185 the CW_VALUE reg is used.
+ * in rtl8187se the AC param regs are used.
+ */
+ reg &= ~RTL818X_CW_CONF_PERPACKET_CW;
+ /* retry limit IS on per-packet basis.
+ * the short and long retry limit in TX_CONF
+ * reg are ignored
+ */
+ reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ /* TX antenna and TX gain are not on per-packet basis.
+ * TX Antenna is selected by ANTSEL reg (RX in BB regs).
+ * TX gain is selected with CCK_TX_AGC and OFDM_TX_AGC regs
+ */
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
reg |= RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
@@ -637,11 +1149,16 @@ static int rtl8180_start(struct ieee80211_hw *dev)
reg |= (6 << 21 /* MAX TX DMA */) |
RTL818X_TX_CONF_NO_ICV;
- if (priv->r8185)
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ reg |= 1<<30; /* "duration procedure mode" */
+
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180)
reg &= ~RTL818X_TX_CONF_PROBE_DTS;
else
reg &= ~RTL818X_TX_CONF_HW_SEQNUM;
+ reg &= ~RTL818X_TX_CONF_DISCW;
+
/* different meaning, same value on both rtl8185 and rtl8180 */
reg &= ~RTL818X_TX_CONF_SAT_HWPLCP;
@@ -656,7 +1173,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
err_free_rings:
rtl8180_free_rx_ring(dev);
- for (i = 0; i < 4; i++)
+ for (i = 0; i < (dev->queues + 1); i++)
if (priv->tx_ring[i].desc)
rtl8180_free_tx_ring(dev, i);
@@ -669,7 +1186,7 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
u8 reg;
int i;
- rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
+ rtl8180_int_disable(dev);
reg = rtl818x_ioread8(priv, &priv->map->CMD);
reg &= ~RTL818X_CMD_TX_ENABLE;
@@ -686,7 +1203,7 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
free_irq(priv->pdev->irq, dev);
rtl8180_free_rx_ring(dev);
- for (i = 0; i < 4; i++)
+ for (i = 0; i < (dev->queues + 1); i++)
rtl8180_free_tx_ring(dev, i);
}
@@ -794,6 +1311,123 @@ static int rtl8180_config(struct ieee80211_hw *dev, u32 changed)
return 0;
}
+static void rtl8187se_conf_ac_parm(struct ieee80211_hw *dev, u8 queue)
+{
+ const struct ieee80211_tx_queue_params *params;
+ struct rtl8180_priv *priv = dev->priv;
+
+ /* hw value */
+ u32 ac_param;
+
+ u8 aifs;
+ u8 txop;
+ u8 cw_min, cw_max;
+
+ params = &priv->queue_param[queue];
+
+ cw_min = fls(params->cw_min);
+ cw_max = fls(params->cw_max);
+
+ aifs = 10 + params->aifs * priv->slot_time;
+
+ /* TODO: check if txop HW is in us (mult by 32) */
+ txop = params->txop;
+
+ ac_param = txop << AC_PARAM_TXOP_LIMIT_SHIFT |
+ cw_max << AC_PARAM_ECW_MAX_SHIFT |
+ cw_min << AC_PARAM_ECW_MIN_SHIFT |
+ aifs << AC_PARAM_AIFS_SHIFT;
+
+ switch (queue) {
+ case IEEE80211_AC_BK:
+ rtl818x_iowrite32(priv, &priv->map->AC_BK_PARAM, ac_param);
+ break;
+ case IEEE80211_AC_BE:
+ rtl818x_iowrite32(priv, &priv->map->AC_BE_PARAM, ac_param);
+ break;
+ case IEEE80211_AC_VI:
+ rtl818x_iowrite32(priv, &priv->map->AC_VI_PARAM, ac_param);
+ break;
+ case IEEE80211_AC_VO:
+ rtl818x_iowrite32(priv, &priv->map->AC_VO_PARAM, ac_param);
+ break;
+ }
+}
+
+static int rtl8180_conf_tx(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ u8 cw_min, cw_max;
+
+ /* nothing to do ? */
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
+ return 0;
+
+ cw_min = fls(params->cw_min);
+ cw_max = fls(params->cw_max);
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ priv->queue_param[queue] = *params;
+ rtl8187se_conf_ac_parm(dev, queue);
+ } else
+ rtl818x_iowrite8(priv, &priv->map->CW_VAL,
+ (cw_max << 4) | cw_min);
+ return 0;
+}
+
+static void rtl8180_conf_erp(struct ieee80211_hw *dev,
+ struct ieee80211_bss_conf *info)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ u8 sifs, difs;
+ int eifs;
+ u8 hw_eifs;
+
+ /* TODO: should we do something ? */
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
+ return;
+
+ /* I _hope_ this means 10uS for the HW.
+ * In reference code it is 0x22 for
+ * both rtl8187L and rtl8187SE
+ */
+ sifs = 0x22;
+
+ if (info->use_short_slot)
+ priv->slot_time = 9;
+ else
+ priv->slot_time = 20;
+
+ /* 10 is SIFS time in uS */
+ difs = 10 + 2 * priv->slot_time;
+ eifs = 10 + difs + priv->ack_time;
+
+ /* HW should use 4uS units for EIFS (I'm sure for rtl8185)*/
+ hw_eifs = DIV_ROUND_UP(eifs, 4);
+
+
+ rtl818x_iowrite8(priv, &priv->map->SLOT, priv->slot_time);
+ rtl818x_iowrite8(priv, &priv->map->SIFS, sifs);
+ rtl818x_iowrite8(priv, &priv->map->DIFS, difs);
+
+ /* from reference code. set ack timeout reg = eifs reg */
+ rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, hw_eifs);
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ rtl818x_iowrite8(priv, &priv->map->EIFS_8187SE, hw_eifs);
+ else if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
+ /* rtl8187/rtl8185 HW bug. After EIFS is elapsed,
+ * the HW still wait for DIFS.
+ * HW uses 4uS units for EIFS.
+ */
+ hw_eifs = DIV_ROUND_UP(eifs - difs, 4);
+
+ rtl818x_iowrite8(priv, &priv->map->EIFS, hw_eifs);
+ }
+}
+
static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -818,11 +1452,40 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
reg = RTL818X_MSR_INFRA;
} else
reg = RTL818X_MSR_NO_LINK;
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ reg |= RTL818X_MSR_ENEDCA;
+
rtl818x_iowrite8(priv, &priv->map->MSR, reg);
}
- if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp)
- priv->rf->conf_erp(dev, info);
+ if (changed & BSS_CHANGED_BASIC_RATES)
+ rtl8180_conf_basic_rates(dev, info->basic_rates);
+
+ if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_ERP_PREAMBLE)) {
+
+ /* when preamble changes, acktime duration changes, and erp must
+ * be recalculated. ACK time is calculated at lowest rate.
+ * Since mac80211 include SIFS time we remove it (-10)
+ */
+ priv->ack_time =
+ le16_to_cpu(ieee80211_generic_frame_duration(dev,
+ priv->vif,
+ IEEE80211_BAND_2GHZ, 10,
+ &priv->rates[0])) - 10;
+
+ rtl8180_conf_erp(dev, info);
+
+ /* mac80211 supplies aifs_n to driver and calls
+ * conf_tx callback whether aifs_n changes, NOT
+ * when aifs changes.
+ * Aifs should be recalculated if slot changes.
+ */
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ for (i = 0; i < 4; i++)
+ rtl8187se_conf_ac_parm(dev, i);
+ }
+ }
if (changed & BSS_CHANGED_BEACON_ENABLED)
vif_priv->enable_beacon = info->enable_beacon;
@@ -880,6 +1543,7 @@ static const struct ieee80211_ops rtl8180_ops = {
.remove_interface = rtl8180_remove_interface,
.config = rtl8180_config,
.bss_info_changed = rtl8180_bss_info_changed,
+ .conf_tx = rtl8180_conf_tx,
.prepare_multicast = rtl8180_prepare_multicast,
.configure_filter = rtl8180_configure_filter,
.get_tsf = rtl8180_get_tsf,
@@ -887,8 +1551,7 @@ static const struct ieee80211_ops rtl8180_ops = {
static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
{
- struct ieee80211_hw *dev = eeprom->data;
- struct rtl8180_priv *priv = dev->priv;
+ struct rtl8180_priv *priv = eeprom->data;
u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
eeprom->reg_data_in = reg & RTL818X_EEPROM_CMD_WRITE;
@@ -899,8 +1562,7 @@ static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
static void rtl8180_eeprom_register_write(struct eeprom_93cx6 *eeprom)
{
- struct ieee80211_hw *dev = eeprom->data;
- struct rtl8180_priv *priv = dev->priv;
+ struct rtl8180_priv *priv = eeprom->data;
u8 reg = 2 << 6;
if (eeprom->reg_data_in)
@@ -917,6 +1579,83 @@ static void rtl8180_eeprom_register_write(struct eeprom_93cx6 *eeprom)
udelay(10);
}
+static void rtl8180_eeprom_read(struct rtl8180_priv *priv)
+{
+ struct eeprom_93cx6 eeprom;
+ int eeprom_cck_table_adr;
+ u16 eeprom_val;
+ int i;
+
+ eeprom.data = priv;
+ eeprom.register_read = rtl8180_eeprom_register_read;
+ eeprom.register_write = rtl8180_eeprom_register_write;
+ if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
+ eeprom.width = PCI_EEPROM_WIDTH_93C66;
+ else
+ eeprom.width = PCI_EEPROM_WIDTH_93C46;
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_PROGRAM);
+ rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
+ udelay(10);
+
+ eeprom_93cx6_read(&eeprom, 0x06, &eeprom_val);
+ eeprom_val &= 0xFF;
+ priv->rf_type = eeprom_val;
+
+ eeprom_93cx6_read(&eeprom, 0x17, &eeprom_val);
+ priv->csthreshold = eeprom_val >> 8;
+
+ eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)priv->mac_addr, 3);
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ eeprom_cck_table_adr = 0x30;
+ else
+ eeprom_cck_table_adr = 0x10;
+
+ /* CCK TX power */
+ for (i = 0; i < 14; i += 2) {
+ u16 txpwr;
+ eeprom_93cx6_read(&eeprom, eeprom_cck_table_adr + (i >> 1),
+ &txpwr);
+ priv->channels[i].hw_value = txpwr & 0xFF;
+ priv->channels[i + 1].hw_value = txpwr >> 8;
+ }
+
+ /* OFDM TX power */
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
+ for (i = 0; i < 14; i += 2) {
+ u16 txpwr;
+ eeprom_93cx6_read(&eeprom, 0x20 + (i >> 1), &txpwr);
+ priv->channels[i].hw_value |= (txpwr & 0xFF) << 8;
+ priv->channels[i + 1].hw_value |= txpwr & 0xFF00;
+ }
+ }
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
+ __le32 anaparam;
+ eeprom_93cx6_multiread(&eeprom, 0xD, (__le16 *)&anaparam, 2);
+ priv->anaparam = le32_to_cpu(anaparam);
+ eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
+ }
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ eeprom_93cx6_read(&eeprom, 0x3F, &eeprom_val);
+ priv->antenna_diversity_en = !!(eeprom_val & 0x100);
+ priv->antenna_diversity_default = (eeprom_val & 0xC00) == 0x400;
+
+ eeprom_93cx6_read(&eeprom, 0x7C, &eeprom_val);
+ priv->xtal_out = eeprom_val & 0xF;
+ priv->xtal_in = (eeprom_val & 0xF0) >> 4;
+ priv->xtal_cal = !!(eeprom_val & 0x1000);
+ priv->thermal_meter_val = (eeprom_val & 0xF00) >> 8;
+ priv->thermal_meter_en = !!(eeprom_val & 0x2000);
+ }
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_NORMAL);
+}
+
static int rtl8180_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -924,12 +1663,9 @@ static int rtl8180_probe(struct pci_dev *pdev,
struct rtl8180_priv *priv;
unsigned long mem_addr, mem_len;
unsigned int io_addr, io_len;
- int err, i;
- struct eeprom_93cx6 eeprom;
+ int err;
const char *chip_name, *rf_name = NULL;
u32 reg;
- u16 eeprom_val;
- u8 mac_addr[ETH_ALEN];
err = pci_enable_device(pdev);
if (err) {
@@ -1011,7 +1747,6 @@ static int rtl8180_probe(struct pci_dev *pdev,
dev->vif_data_size = sizeof(struct rtl8180_vif);
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
- dev->queues = 1;
dev->max_signal = 65;
reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
@@ -1019,43 +1754,55 @@ static int rtl8180_probe(struct pci_dev *pdev,
switch (reg) {
case RTL818X_TX_CONF_R8180_ABCD:
chip_name = "RTL8180";
+ priv->chip_family = RTL818X_CHIP_FAMILY_RTL8180;
break;
+
case RTL818X_TX_CONF_R8180_F:
chip_name = "RTL8180vF";
+ priv->chip_family = RTL818X_CHIP_FAMILY_RTL8180;
break;
+
case RTL818X_TX_CONF_R8185_ABC:
chip_name = "RTL8185";
+ priv->chip_family = RTL818X_CHIP_FAMILY_RTL8185;
break;
+
case RTL818X_TX_CONF_R8185_D:
chip_name = "RTL8185vD";
+ priv->chip_family = RTL818X_CHIP_FAMILY_RTL8185;
+ break;
+
+ case RTL818X_TX_CONF_RTL8187SE:
+ chip_name = "RTL8187SE";
+ priv->chip_family = RTL818X_CHIP_FAMILY_RTL8187SE;
break;
+
default:
printk(KERN_ERR "%s (rtl8180): Unknown chip! (0x%x)\n",
pci_name(pdev), reg >> 25);
goto err_iounmap;
}
- priv->r8185 = reg & RTL818X_TX_CONF_R8185_ABC;
- if (priv->r8185) {
+ /* we declare to MAC80211 all the queues except for beacon queue
+ * that will be eventually handled by DRV.
+ * TX rings are arranged in such a way that lower is the IDX,
+ * higher is the priority, in order to achieve direct mapping
+ * with mac80211, however the beacon queue is an exception and it
+ * is mapped on the highst tx ring IDX.
+ */
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ dev->queues = RTL8187SE_NR_TX_QUEUES - 1;
+ else
+ dev->queues = RTL8180_NR_TX_QUEUES - 1;
+
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates);
pci_try_set_mwi(pdev);
}
- eeprom.data = dev;
- eeprom.register_read = rtl8180_eeprom_register_read;
- eeprom.register_write = rtl8180_eeprom_register_write;
- if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
- eeprom.width = PCI_EEPROM_WIDTH_93C66;
- else
- eeprom.width = PCI_EEPROM_WIDTH_93C46;
-
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_PROGRAM);
- rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
- udelay(10);
+ rtl8180_eeprom_read(priv);
- eeprom_93cx6_read(&eeprom, 0x06, &eeprom_val);
- eeprom_val &= 0xFF;
- switch (eeprom_val) {
+ switch (priv->rf_type) {
case 1: rf_name = "Intersil";
break;
case 2: rf_name = "RFMD";
@@ -1066,14 +1813,18 @@ static int rtl8180_probe(struct pci_dev *pdev,
break;
case 5: priv->rf = &grf5101_rf_ops;
break;
- case 9: priv->rf = rtl8180_detect_rf(dev);
+ case 9:
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ priv->rf = rtl8187se_detect_rf(dev);
+ else
+ priv->rf = rtl8180_detect_rf(dev);
break;
case 10:
rf_name = "RTL8255";
break;
default:
printk(KERN_ERR "%s (rtl8180): Unknown RF! (0x%x)\n",
- pci_name(pdev), eeprom_val);
+ pci_name(pdev), priv->rf_type);
goto err_iounmap;
}
@@ -1083,42 +1834,12 @@ static int rtl8180_probe(struct pci_dev *pdev,
goto err_iounmap;
}
- eeprom_93cx6_read(&eeprom, 0x17, &eeprom_val);
- priv->csthreshold = eeprom_val >> 8;
- if (!priv->r8185) {
- __le32 anaparam;
- eeprom_93cx6_multiread(&eeprom, 0xD, (__le16 *)&anaparam, 2);
- priv->anaparam = le32_to_cpu(anaparam);
- eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
- }
-
- eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)mac_addr, 3);
- if (!is_valid_ether_addr(mac_addr)) {
+ if (!is_valid_ether_addr(priv->mac_addr)) {
printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
" randomly generated MAC addr\n", pci_name(pdev));
- eth_random_addr(mac_addr);
- }
- SET_IEEE80211_PERM_ADDR(dev, mac_addr);
-
- /* CCK TX power */
- for (i = 0; i < 14; i += 2) {
- u16 txpwr;
- eeprom_93cx6_read(&eeprom, 0x10 + (i >> 1), &txpwr);
- priv->channels[i].hw_value = txpwr & 0xFF;
- priv->channels[i + 1].hw_value = txpwr >> 8;
- }
-
- /* OFDM TX power */
- if (priv->r8185) {
- for (i = 0; i < 14; i += 2) {
- u16 txpwr;
- eeprom_93cx6_read(&eeprom, 0x20 + (i >> 1), &txpwr);
- priv->channels[i].hw_value |= (txpwr & 0xFF) << 8;
- priv->channels[i + 1].hw_value |= txpwr & 0xFF00;
- }
+ eth_random_addr(priv->mac_addr);
}
-
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+ SET_IEEE80211_PERM_ADDR(dev, priv->mac_addr);
spin_lock_init(&priv->lock);
@@ -1130,12 +1851,12 @@ static int rtl8180_probe(struct pci_dev *pdev,
}
wiphy_info(dev->wiphy, "hwaddr %pm, %s + %s\n",
- mac_addr, chip_name, priv->rf->name);
+ priv->mac_addr, chip_name, priv->rf->name);
return 0;
err_iounmap:
- iounmap(priv->map);
+ pci_iounmap(pdev, priv->map);
err_free_dev:
ieee80211_free_hw(dev);
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
index 30523314da43..291a55970d1a 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
@@ -24,27 +24,64 @@
#define ANAPARAM_PWR1_SHIFT 20
#define ANAPARAM_PWR1_MASK (0x7F << ANAPARAM_PWR1_SHIFT)
+/* rtl8180/rtl8185 have 3 queue + beacon queue.
+ * mac80211 can use just one, + beacon = 2 tot.
+ */
+#define RTL8180_NR_TX_QUEUES 2
+
+/* rtl8187SE have 6 queues + beacon queues
+ * mac80211 can use 4 QoS data queue, + beacon = 5 tot
+ */
+#define RTL8187SE_NR_TX_QUEUES 5
+
+/* for array static allocation, it is the max of above */
+#define RTL818X_NR_TX_QUEUES 5
+
struct rtl8180_tx_desc {
__le32 flags;
__le16 rts_duration;
__le16 plcp_len;
__le32 tx_buf;
- __le32 frame_len;
+ union{
+ __le32 frame_len;
+ struct {
+ __le16 frame_len_se;
+ __le16 frame_duration;
+ } __packed;
+ } __packed;
__le32 next_tx_desc;
u8 cw;
u8 retry_limit;
u8 agc;
u8 flags2;
- u32 reserved[2];
+ /* rsvd for 8180/8185.
+ * valid for 8187se but we dont use it
+ */
+ u32 reserved;
+ /* all rsvd for 8180/8185 */
+ __le16 flags3;
+ __le16 frag_qsize;
+} __packed;
+
+struct rtl818x_rx_cmd_desc {
+ __le32 flags;
+ u32 reserved;
+ __le32 rx_buf;
} __packed;
struct rtl8180_rx_desc {
__le32 flags;
__le32 flags2;
- union {
- __le32 rx_buf;
- __le64 tsft;
- };
+ __le64 tsft;
+
+} __packed;
+
+struct rtl8187se_rx_desc {
+ __le32 flags;
+ __le64 tsft;
+ __le32 flags2;
+ __le32 flags3;
+ u32 reserved[3];
} __packed;
struct rtl8180_tx_ring {
@@ -71,28 +108,45 @@ struct rtl8180_priv {
/* rtl8180 driver specific */
spinlock_t lock;
- struct rtl8180_rx_desc *rx_ring;
+ void *rx_ring;
+ u8 rx_ring_sz;
dma_addr_t rx_ring_dma;
unsigned int rx_idx;
struct sk_buff *rx_buf[32];
- struct rtl8180_tx_ring tx_ring[4];
+ struct rtl8180_tx_ring tx_ring[RTL818X_NR_TX_QUEUES];
struct ieee80211_channel channels[14];
struct ieee80211_rate rates[12];
struct ieee80211_supported_band band;
+ struct ieee80211_tx_queue_params queue_param[4];
struct pci_dev *pdev;
u32 rx_conf;
-
- int r8185;
+ u8 slot_time;
+ u16 ack_time;
+
+ enum {
+ RTL818X_CHIP_FAMILY_RTL8180,
+ RTL818X_CHIP_FAMILY_RTL8185,
+ RTL818X_CHIP_FAMILY_RTL8187SE,
+ } chip_family;
u32 anaparam;
u16 rfparam;
u8 csthreshold;
-
+ u8 mac_addr[ETH_ALEN];
+ u8 rf_type;
+ u8 xtal_out;
+ u8 xtal_in;
+ u8 xtal_cal;
+ u8 thermal_meter_val;
+ u8 thermal_meter_en;
+ u8 antenna_diversity_en;
+ u8 antenna_diversity_default;
/* sequence # */
u16 seqno;
};
void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam);
+void rtl8180_set_anaparam2(struct rtl8180_priv *priv, u32 anaparam2);
static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, u8 __iomem *addr)
{
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
index d60a5f399022..9bda5bc78eda 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
@@ -282,6 +282,7 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
msleep(1); /* FIXME: optional? */
+ /* TODO: use set_anaparam2 dev.c_func*/
/* anaparam2 on */
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
@@ -730,32 +731,11 @@ static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
msleep(10);
}
-static void rtl8225_rf_conf_erp(struct ieee80211_hw *dev,
- struct ieee80211_bss_conf *info)
-{
- struct rtl8180_priv *priv = dev->priv;
-
- if (info->use_short_slot) {
- rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9);
- rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22);
- rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14);
- rtl818x_iowrite8(priv, &priv->map->EIFS, 81);
- rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0x73);
- } else {
- rtl818x_iowrite8(priv, &priv->map->SLOT, 0x14);
- rtl818x_iowrite8(priv, &priv->map->SIFS, 0x44);
- rtl818x_iowrite8(priv, &priv->map->DIFS, 0x24);
- rtl818x_iowrite8(priv, &priv->map->EIFS, 81);
- rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0xa5);
- }
-}
-
static const struct rtl818x_rf_ops rtl8225_ops = {
.name = "rtl8225",
.init = rtl8225_rf_init,
.stop = rtl8225_rf_stop,
.set_chan = rtl8225_rf_set_channel,
- .conf_erp = rtl8225_rf_conf_erp,
};
static const struct rtl818x_rf_ops rtl8225z2_ops = {
@@ -763,7 +743,6 @@ static const struct rtl818x_rf_ops rtl8225z2_ops = {
.init = rtl8225z2_rf_init,
.stop = rtl8225_rf_stop,
.set_chan = rtl8225_rf_set_channel,
- .conf_erp = rtl8225_rf_conf_erp,
};
const struct rtl818x_rf_ops * rtl8180_detect_rf(struct ieee80211_hw *dev)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c
new file mode 100644
index 000000000000..fde89866fa8d
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c
@@ -0,0 +1,475 @@
+
+/* Radio tuning for RTL8225 on RTL8187SE
+ *
+ * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net>
+ * Copyright 2014 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * Based on the r8180 and Realtek r8187se drivers, which are:
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
+ *
+ * Also based on the rtl8187 driver, which is:
+ * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/mac80211.h>
+
+#include "rtl8180.h"
+#include "rtl8225se.h"
+
+#define PFX "rtl8225 (se) "
+
+static const u32 RF_GAIN_TABLE[] = {
+ 0x0096, 0x0076, 0x0056, 0x0036, 0x0016, 0x01f6, 0x01d6, 0x01b6,
+ 0x0196, 0x0176, 0x00F7, 0x00D7, 0x00B7, 0x0097, 0x0077, 0x0057,
+ 0x0037, 0x00FB, 0x00DB, 0x00BB, 0x00FF, 0x00E3, 0x00C3, 0x00A3,
+ 0x0083, 0x0063, 0x0043, 0x0023, 0x0003, 0x01E3, 0x01C3, 0x01A3,
+ 0x0183, 0x0163, 0x0143, 0x0123, 0x0103
+};
+
+static const u8 cck_ofdm_gain_settings[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
+ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
+ 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
+ 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
+};
+
+static const u8 rtl8225se_tx_gain_cck_ofdm[] = {
+ 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e
+};
+
+static const u8 rtl8225se_tx_power_cck[] = {
+ 0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02,
+ 0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02,
+ 0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02,
+ 0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02,
+ 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03,
+ 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03
+};
+
+static const u8 rtl8225se_tx_power_cck_ch14[] = {
+ 0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00,
+ 0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00,
+ 0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00,
+ 0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00,
+ 0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00
+};
+
+static const u8 rtl8225se_tx_power_ofdm[] = {
+ 0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4
+};
+
+static const u32 rtl8225se_chan[] = {
+ 0x0080, 0x0100, 0x0180, 0x0200, 0x0280, 0x0300, 0x0380,
+ 0x0400, 0x0480, 0x0500, 0x0580, 0x0600, 0x0680, 0x074A,
+};
+
+static const u8 rtl8225sez2_tx_power_cck_ch14[] = {
+ 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00
+};
+
+static const u8 rtl8225sez2_tx_power_cck_B[] = {
+ 0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x04
+};
+
+static const u8 rtl8225sez2_tx_power_cck_A[] = {
+ 0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04
+};
+
+static const u8 rtl8225sez2_tx_power_cck[] = {
+ 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04
+};
+
+static const u8 ZEBRA_AGC[] = {
+ 0x7E, 0x7E, 0x7E, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A,
+ 0x79, 0x78, 0x77, 0x76, 0x75, 0x74, 0x73, 0x72,
+ 0x71, 0x70, 0x6F, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A,
+ 0x69, 0x68, 0x67, 0x66, 0x65, 0x64, 0x63, 0x62,
+ 0x48, 0x47, 0x46, 0x45, 0x44, 0x29, 0x28, 0x27,
+ 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x08, 0x07,
+ 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+ 0x0f, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x15, 0x16,
+ 0x17, 0x17, 0x18, 0x18, 0x19, 0x1a, 0x1a, 0x1b,
+ 0x1b, 0x1c, 0x1c, 0x1d, 0x1d, 0x1d, 0x1e, 0x1e,
+ 0x1f, 0x1f, 0x1f, 0x20, 0x20, 0x20, 0x20, 0x21,
+ 0x21, 0x21, 0x22, 0x22, 0x22, 0x23, 0x23, 0x24,
+ 0x24, 0x25, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27,
+ 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F
+};
+
+static const u8 OFDM_CONFIG[] = {
+ 0x10, 0x0F, 0x0A, 0x0C, 0x14, 0xFA, 0xFF, 0x50,
+ 0x00, 0x50, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00,
+ 0x40, 0x00, 0x40, 0x00, 0x00, 0x00, 0xA8, 0x26,
+ 0x32, 0x33, 0x06, 0xA5, 0x6F, 0x55, 0xC8, 0xBB,
+ 0x0A, 0xE1, 0x2C, 0x4A, 0x86, 0x83, 0x34, 0x00,
+ 0x4F, 0x24, 0x6F, 0xC2, 0x03, 0x40, 0x80, 0x00,
+ 0xC0, 0xC1, 0x58, 0xF1, 0x00, 0xC4, 0x90, 0x3e,
+ 0xD8, 0x3C, 0x7B, 0x10, 0x10
+};
+
+static void rtl8187se_three_wire_io(struct ieee80211_hw *dev, u8 *data,
+ u8 len, bool write)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ int i;
+ u8 tmp;
+
+ do {
+ for (i = 0; i < 5; i++) {
+ tmp = rtl818x_ioread8(priv, SW_3W_CMD1);
+ if (!(tmp & 0x3))
+ break;
+ udelay(10);
+ }
+ if (i == 5)
+ wiphy_err(dev->wiphy, PFX
+ "CmdReg: 0x%x RE/WE bits aren't clear\n", tmp);
+
+ tmp = rtl818x_ioread8(priv, &priv->map->rf_sw_config) | 0x02;
+ rtl818x_iowrite8(priv, &priv->map->rf_sw_config, tmp);
+
+ tmp = rtl818x_ioread8(priv, REG_ADDR1(0x84)) & 0xF7;
+ rtl818x_iowrite8(priv, REG_ADDR1(0x84), tmp);
+ if (write) {
+ if (len == 16) {
+ rtl818x_iowrite16(priv, SW_3W_DB0,
+ *(u16 *)data);
+ } else if (len == 64) {
+ rtl818x_iowrite32(priv, SW_3W_DB0_4,
+ *((u32 *)data));
+ rtl818x_iowrite32(priv, SW_3W_DB1_4,
+ *((u32 *)(data + 4)));
+ } else
+ wiphy_err(dev->wiphy, PFX
+ "Unimplemented length\n");
+ } else {
+ rtl818x_iowrite16(priv, SW_3W_DB0, *(u16 *)data);
+ }
+ if (write)
+ tmp = 2;
+ else
+ tmp = 1;
+ rtl818x_iowrite8(priv, SW_3W_CMD1, tmp);
+ for (i = 0; i < 5; i++) {
+ tmp = rtl818x_ioread8(priv, SW_3W_CMD1);
+ if (!(tmp & 0x3))
+ break;
+ udelay(10);
+ }
+ rtl818x_iowrite8(priv, SW_3W_CMD1, 0);
+ if (!write) {
+ *((u16 *)data) = rtl818x_ioread16(priv, SI_DATA_REG);
+ *((u16 *)data) &= 0x0FFF;
+ }
+ } while (0);
+}
+
+static u32 rtl8187se_rf_readreg(struct ieee80211_hw *dev, u8 addr)
+{
+ u32 dataread = addr & 0x0F;
+ rtl8187se_three_wire_io(dev, (u8 *)&dataread, 16, 0);
+ return dataread;
+}
+
+static void rtl8187se_rf_writereg(struct ieee80211_hw *dev, u8 addr, u32 data)
+{
+ u32 outdata = (data << 4) | (u32)(addr & 0x0F);
+ rtl8187se_three_wire_io(dev, (u8 *)&outdata, 16, 1);
+}
+
+
+static void rtl8225se_write_zebra_agc(struct ieee80211_hw *dev)
+{
+ int i;
+
+ for (i = 0; i < 128; i++) {
+ rtl8225se_write_phy_ofdm(dev, 0xF, ZEBRA_AGC[i]);
+ rtl8225se_write_phy_ofdm(dev, 0xE, i+0x80);
+ rtl8225se_write_phy_ofdm(dev, 0xE, 0);
+ }
+}
+
+static void rtl8187se_write_ofdm_config(struct ieee80211_hw *dev)
+{
+ /* write OFDM_CONFIG table */
+ int i;
+
+ for (i = 0; i < 60; i++)
+ rtl8225se_write_phy_ofdm(dev, i, OFDM_CONFIG[i]);
+
+}
+
+static void rtl8225sez2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ u8 cck_power, ofdm_power;
+
+ cck_power = priv->channels[channel - 1].hw_value & 0xFF;
+ if (cck_power > 35)
+ cck_power = 35;
+ rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
+ cck_ofdm_gain_settings[cck_power]);
+
+ usleep_range(1000, 5000);
+ ofdm_power = priv->channels[channel - 1].hw_value >> 8;
+ if (ofdm_power > 35)
+ ofdm_power = 35;
+
+ rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
+ cck_ofdm_gain_settings[ofdm_power]);
+ if (ofdm_power < 12) {
+ rtl8225se_write_phy_ofdm(dev, 7, 0x5C);
+ rtl8225se_write_phy_ofdm(dev, 9, 0x5C);
+ }
+ if (ofdm_power < 18) {
+ rtl8225se_write_phy_ofdm(dev, 7, 0x54);
+ rtl8225se_write_phy_ofdm(dev, 9, 0x54);
+ } else {
+ rtl8225se_write_phy_ofdm(dev, 7, 0x50);
+ rtl8225se_write_phy_ofdm(dev, 9, 0x50);
+ }
+
+ usleep_range(1000, 5000);
+}
+
+static void rtl8187se_write_rf_gain(struct ieee80211_hw *dev)
+{
+ int i;
+
+ for (i = 0; i <= 36; i++) {
+ rtl8187se_rf_writereg(dev, 0x01, i); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x02, RF_GAIN_TABLE[i]); mdelay(1);
+ }
+}
+
+static void rtl8187se_write_initial_gain(struct ieee80211_hw *dev,
+ int init_gain)
+{
+ switch (init_gain) {
+ default:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x26); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFA); mdelay(1);
+ break;
+ case 2:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x36); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFA); mdelay(1);
+ break;
+ case 3:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x36); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFB); mdelay(1);
+ break;
+ case 4:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x46); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFB); mdelay(1);
+ break;
+ case 5:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x46); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0x96); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFB); mdelay(1);
+ break;
+ case 6:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x56); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0x96); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFC); mdelay(1);
+ break;
+ case 7:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x56); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0xA6); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFC); mdelay(1);
+ break;
+ case 8:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x66); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0xB6); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFC); mdelay(1);
+ break;
+ }
+}
+
+void rtl8225se_rf_init(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ u32 rf23, rf24;
+ u8 d_cut = 0;
+ u8 tmp;
+
+ /* Page 1 */
+ rtl8187se_rf_writereg(dev, 0x00, 0x013F); mdelay(1);
+ rf23 = rtl8187se_rf_readreg(dev, 0x08); mdelay(1);
+ rf24 = rtl8187se_rf_readreg(dev, 0x09); mdelay(1);
+ if (rf23 == 0x0818 && rf24 == 0x070C)
+ d_cut = 1;
+
+ wiphy_info(dev->wiphy, "RTL8225-SE version %s\n",
+ d_cut ? "D" : "not-D");
+
+ /* Page 0: reg 0 - 15 */
+ rtl8187se_rf_writereg(dev, 0x00, 0x009F); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x01, 0x06E0); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x03, 0x07F1); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x05, 0x0C72); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x06, 0x0AE6); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x07, 0x00CA); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x08, 0x0E1C); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x09, 0x02F0); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0A, 0x09D0); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0B, 0x01BA); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0C, 0x0640); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0E, 0x0020); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0F, 0x0990); mdelay(1);
+ /* page 1: reg 16-30 */
+ rtl8187se_rf_writereg(dev, 0x00, 0x013F); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x03, 0x0806); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x04, 0x03A7); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x05, 0x059B); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x06, 0x0081); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x07, 0x01A0); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0A, 0x0001); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0B, 0x0418); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0C, 0x0FBE); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0D, 0x0008); mdelay(1);
+ if (d_cut)
+ rtl8187se_rf_writereg(dev, 0x0E, 0x0807);
+ else
+ rtl8187se_rf_writereg(dev, 0x0E, 0x0806);
+ mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0F, 0x0ACC); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x00, 0x01D7); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x03, 0x0E00); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0E50); mdelay(1);
+
+ rtl8187se_write_rf_gain(dev);
+
+ rtl8187se_rf_writereg(dev, 0x05, 0x0203); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x06, 0x0200); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
+ rtl8187se_rf_writereg(dev, 0x0D, 0x0008); mdelay(11);
+ rtl8187se_rf_writereg(dev, 0x00, 0x0037); mdelay(11);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0160); mdelay(11);
+ rtl8187se_rf_writereg(dev, 0x07, 0x0080); mdelay(11);
+ rtl8187se_rf_writereg(dev, 0x02, 0x088D); mdelay(221);
+ rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
+ rtl8187se_rf_writereg(dev, 0x07, 0x0000); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x07, 0x0180); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x07, 0x0220); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x07, 0x03E0); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x06, 0x00C1); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0A, 0x0001); mdelay(1);
+ if (priv->xtal_cal) {
+ tmp = (priv->xtal_in << 4) | (priv->xtal_out << 1) |
+ (1 << 11) | (1 << 9);
+ rtl8187se_rf_writereg(dev, 0x0F, tmp);
+ wiphy_info(dev->wiphy, "Xtal cal\n");
+ mdelay(1);
+ } else {
+ wiphy_info(dev->wiphy, "NO Xtal cal\n");
+ rtl8187se_rf_writereg(dev, 0x0F, 0x0ACC);
+ mdelay(1);
+ }
+ /* page 0 */
+ rtl8187se_rf_writereg(dev, 0x00, 0x00BF); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(31);
+ rtl8187se_rf_writereg(dev, 0x00, 0x0197); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x05, 0x05AB); mdelay(1);
+
+ rtl8187se_rf_writereg(dev, 0x00, 0x009F); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x01, 0x0000); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x02, 0x0000); mdelay(1);
+ /* power save parameters */
+ /* TODO: move to dev.c */
+ rtl818x_iowrite8(priv, REG_ADDR1(0x024E),
+ rtl818x_ioread8(priv, REG_ADDR1(0x24E)) & 0x9F);
+ rtl8225se_write_phy_cck(dev, 0x00, 0xC8);
+ rtl8225se_write_phy_cck(dev, 0x06, 0x1C);
+ rtl8225se_write_phy_cck(dev, 0x10, 0x78);
+ rtl8225se_write_phy_cck(dev, 0x2E, 0xD0);
+ rtl8225se_write_phy_cck(dev, 0x2F, 0x06);
+ rtl8225se_write_phy_cck(dev, 0x01, 0x46);
+
+ /* power control */
+ rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 0x10);
+ rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 0x1B);
+
+ rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
+ rtl8225se_write_phy_ofdm(dev, 0x00, 0x12);
+
+ rtl8225se_write_zebra_agc(dev);
+
+ rtl8225se_write_phy_ofdm(dev, 0x10, 0x00);
+
+ rtl8187se_write_ofdm_config(dev);
+
+ /* turn on RF */
+ rtl8187se_rf_writereg(dev, 0x00, 0x009F); udelay(500);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0972); udelay(500);
+ /* turn on RF again */
+ rtl8187se_rf_writereg(dev, 0x00, 0x009F); udelay(500);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0972); udelay(500);
+ /* turn on BB */
+ rtl8225se_write_phy_ofdm(dev, 0x10, 0x40);
+ rtl8225se_write_phy_ofdm(dev, 0x12, 0x40);
+
+ rtl8187se_write_initial_gain(dev, 4);
+}
+
+void rtl8225se_rf_stop(struct ieee80211_hw *dev)
+{
+ /* checked for 8187se */
+ struct rtl8180_priv *priv = dev->priv;
+
+ /* turn off BB RXIQ matrix to cut off rx signal */
+ rtl8225se_write_phy_ofdm(dev, 0x10, 0x00);
+ rtl8225se_write_phy_ofdm(dev, 0x12, 0x00);
+ /* turn off RF */
+ rtl8187se_rf_writereg(dev, 0x04, 0x0000);
+ rtl8187se_rf_writereg(dev, 0x00, 0x0000);
+
+ usleep_range(1000, 5000);
+ /* turn off A/D and D/A */
+ rtl8180_set_anaparam(priv, RTL8225SE_ANAPARAM_OFF);
+ rtl8180_set_anaparam2(priv, RTL8225SE_ANAPARAM2_OFF);
+}
+
+void rtl8225se_rf_set_channel(struct ieee80211_hw *dev,
+ struct ieee80211_conf *conf)
+{
+ int chan =
+ ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
+
+ rtl8225sez2_rf_set_tx_power(dev, chan);
+ rtl8187se_rf_writereg(dev, 0x7, rtl8225se_chan[chan - 1]);
+ if ((rtl8187se_rf_readreg(dev, 0x7) & 0x0F80) !=
+ rtl8225se_chan[chan - 1])
+ rtl8187se_rf_writereg(dev, 0x7, rtl8225se_chan[chan - 1]);
+ usleep_range(10000, 20000);
+}
+
+static const struct rtl818x_rf_ops rtl8225se_ops = {
+ .name = "rtl8225-se",
+ .init = rtl8225se_rf_init,
+ .stop = rtl8225se_rf_stop,
+ .set_chan = rtl8225se_rf_set_channel,
+};
+
+const struct rtl818x_rf_ops *rtl8187se_detect_rf(struct ieee80211_hw *dev)
+{
+ return &rtl8225se_ops;
+}
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h
new file mode 100644
index 000000000000..229400264088
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h
@@ -0,0 +1,61 @@
+
+/* Definitions for RTL8187SE hardware
+ *
+ * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net>
+ * Copyright 2014 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * Based on the r8180 and Realtek r8187se drivers, which are:
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
+ *
+ * Also based on the rtl8187 driver, which is:
+ * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RTL8187SE_RTL8225_H
+#define RTL8187SE_RTL8225_H
+
+#define RTL8225SE_ANAPARAM_ON 0xb0054d00
+#define RTL8225SE_ANAPARAM2_ON 0x000004c6
+
+/* all off except PLL */
+#define RTL8225SE_ANAPARAM_OFF 0xb0054dec
+/* all on including PLL */
+#define RTL8225SE_ANAPARAM_OFF2 0xb0054dfc
+
+#define RTL8225SE_ANAPARAM2_OFF 0x00ff04c6
+
+#define RTL8225SE_ANAPARAM3 0x10
+
+enum rtl8187se_power_state {
+ RTL8187SE_POWER_ON,
+ RTL8187SE_POWER_OFF,
+ RTL8187SE_POWER_SLEEP
+};
+
+static inline void rtl8225se_write_phy_ofdm(struct ieee80211_hw *dev,
+ u8 addr, u8 data)
+{
+ rtl8180_write_phy(dev, addr, data);
+}
+
+static inline void rtl8225se_write_phy_cck(struct ieee80211_hw *dev,
+ u8 addr, u8 data)
+{
+ rtl8180_write_phy(dev, addr, data | 0x10000);
+}
+
+
+const struct rtl818x_rf_ops *rtl8187se_detect_rf(struct ieee80211_hw *);
+void rtl8225se_rf_stop(struct ieee80211_hw *dev);
+void rtl8225se_rf_set_channel(struct ieee80211_hw *dev,
+ struct ieee80211_conf *conf);
+void rtl8225se_rf_conf_erp(struct ieee80211_hw *dev,
+ struct ieee80211_bss_conf *info);
+void rtl8225se_rf_init(struct ieee80211_hw *dev);
+
+#endif /* RTL8187SE_RTL8225_H */
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index fd78df813a85..0ca17cda48fa 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -592,7 +592,7 @@ static void rtl8187_set_anaparam(struct rtl8187_priv *priv, bool rfon)
rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam);
rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
if (priv->is_rtl8187b)
- rtl818x_iowrite8(priv, &priv->map->ANAPARAM3, anaparam3);
+ rtl818x_iowrite8(priv, &priv->map->ANAPARAM3A, anaparam3);
reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE;
rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
@@ -785,7 +785,7 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
rtl818x_iowrite16(priv, (__le16 *)0xFF34, 0x0FFF);
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
- reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+ reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
/* Auto Rate Fallback Register (ARFR): 1M-54M setting */
@@ -943,8 +943,8 @@ static int rtl8187_start(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
@@ -986,13 +986,13 @@ static int rtl8187_start(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
- reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT;
- reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+ reg &= ~RTL818X_CW_CONF_PERPACKET_CW;
+ reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
@@ -1636,10 +1636,10 @@ static int rtl8187_probe(struct usb_interface *intf,
err_free_dmabuf:
kfree(priv->io_dmabuf);
- err_free_dev:
- ieee80211_free_hw(dev);
usb_set_intfdata(intf, NULL);
usb_put_dev(udev);
+ err_free_dev:
+ ieee80211_free_hw(dev);
return err;
}
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index ce23dfd42381..45ea4e1c4abe 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -16,30 +16,82 @@
#define RTL818X_H
struct rtl818x_csr {
+
u8 MAC[6];
u8 reserved_0[2];
- __le32 MAR[2];
- u8 RX_FIFO_COUNT;
- u8 reserved_1;
- u8 TX_FIFO_COUNT;
- u8 BQREQ;
- u8 reserved_2[4];
+
+ union {
+ __le32 MAR[2]; /* 0x8 */
+
+ struct{ /* rtl8187se */
+ u8 rf_sw_config; /* 0x8 */
+ u8 reserved_01[3];
+ __le32 TMGDA; /* 0xc */
+ } __packed;
+ } __packed;
+
+ union { /* 0x10 */
+ struct {
+ u8 RX_FIFO_COUNT;
+ u8 reserved_1;
+ u8 TX_FIFO_COUNT;
+ u8 BQREQ;
+ } __packed;
+
+ __le32 TBKDA; /* for 8187se */
+ } __packed;
+
+ __le32 TBEDA; /* 0x14 - for rtl8187se */
+
__le32 TSFT[2];
- __le32 TLPDA;
- __le32 TNPDA;
- __le32 THPDA;
- __le16 BRSR;
- u8 BSSID[6];
- u8 RESP_RATE;
- u8 EIFS;
- u8 reserved_3[1];
- u8 CMD;
+
+ union { /* 0x20 */
+ __le32 TLPDA;
+ __le32 TVIDA; /* for 8187se */
+ } __packed;
+
+ union { /* 0x24 */
+ __le32 TNPDA;
+ __le32 TVODA; /* for 8187se */
+ } __packed;
+
+ /* hi pri ring for all cards */
+ __le32 THPDA; /* 0x28 */
+
+ union { /* 0x2c */
+ struct {
+ u8 reserved_2a;
+ u8 EIFS_8187SE;
+ } __packed;
+
+ __le16 BRSR;
+ } __packed;
+
+ u8 BSSID[6]; /* 0x2e */
+
+ union { /* 0x34 */
+ struct {
+ u8 RESP_RATE;
+ u8 EIFS;
+ } __packed;
+ __le16 BRSR_8187SE;
+ } __packed;
+
+ u8 reserved_3[1]; /* 0x36 */
+ u8 CMD; /* 0x37 */
#define RTL818X_CMD_TX_ENABLE (1 << 2)
#define RTL818X_CMD_RX_ENABLE (1 << 3)
#define RTL818X_CMD_RESET (1 << 4)
- u8 reserved_4[4];
- __le16 INT_MASK;
- __le16 INT_STATUS;
+ u8 reserved_4[4]; /* 0x38 */
+ union {
+ struct {
+ __le16 INT_MASK;
+ __le16 INT_STATUS;
+ } __packed;
+
+ __le32 INT_STATUS_SE; /* 0x3c */
+ } __packed;
+/* status bits for rtl8187 and rtl8180/8185 */
#define RTL818X_INT_RX_OK (1 << 0)
#define RTL818X_INT_RX_ERR (1 << 1)
#define RTL818X_INT_TXL_OK (1 << 2)
@@ -56,7 +108,34 @@ struct rtl818x_csr {
#define RTL818X_INT_BEACON (1 << 13)
#define RTL818X_INT_TIME_OUT (1 << 14)
#define RTL818X_INT_TX_FO (1 << 15)
- __le32 TX_CONF;
+/* status bits for rtl8187se */
+#define RTL818X_INT_SE_TIMER3 (1 << 0)
+#define RTL818X_INT_SE_TIMER2 (1 << 1)
+#define RTL818X_INT_SE_RQ0SOR (1 << 2)
+#define RTL818X_INT_SE_TXBED_OK (1 << 3)
+#define RTL818X_INT_SE_TXBED_ERR (1 << 4)
+#define RTL818X_INT_SE_TXBE_OK (1 << 5)
+#define RTL818X_INT_SE_TXBE_ERR (1 << 6)
+#define RTL818X_INT_SE_RX_OK (1 << 7)
+#define RTL818X_INT_SE_RX_ERR (1 << 8)
+#define RTL818X_INT_SE_TXL_OK (1 << 9)
+#define RTL818X_INT_SE_TXL_ERR (1 << 10)
+#define RTL818X_INT_SE_RX_DU (1 << 11)
+#define RTL818X_INT_SE_RX_FIFO (1 << 12)
+#define RTL818X_INT_SE_TXN_OK (1 << 13)
+#define RTL818X_INT_SE_TXN_ERR (1 << 14)
+#define RTL818X_INT_SE_TXH_OK (1 << 15)
+#define RTL818X_INT_SE_TXH_ERR (1 << 16)
+#define RTL818X_INT_SE_TXB_OK (1 << 17)
+#define RTL818X_INT_SE_TXB_ERR (1 << 18)
+#define RTL818X_INT_SE_ATIM_TO (1 << 19)
+#define RTL818X_INT_SE_BK_TO (1 << 20)
+#define RTL818X_INT_SE_TIMER1 (1 << 21)
+#define RTL818X_INT_SE_TX_FIFO (1 << 22)
+#define RTL818X_INT_SE_WAKEUP (1 << 23)
+#define RTL818X_INT_SE_BK_DMA (1 << 24)
+#define RTL818X_INT_SE_TMGD_OK (1 << 30)
+ __le32 TX_CONF; /* 0x40 */
#define RTL818X_TX_CONF_LOOPBACK_MAC (1 << 17)
#define RTL818X_TX_CONF_LOOPBACK_CONT (3 << 17)
#define RTL818X_TX_CONF_NO_ICV (1 << 19)
@@ -68,6 +147,7 @@ struct rtl818x_csr {
#define RTL818X_TX_CONF_R8185_D (5 << 25)
#define RTL818X_TX_CONF_R8187vD (5 << 25)
#define RTL818X_TX_CONF_R8187vD_B (6 << 25)
+#define RTL818X_TX_CONF_RTL8187SE (6 << 25)
#define RTL818X_TX_CONF_HWVER_MASK (7 << 25)
#define RTL818X_TX_CONF_DISREQQSIZE (1 << 28)
#define RTL818X_TX_CONF_PROBE_DTS (1 << 29)
@@ -122,31 +202,67 @@ struct rtl818x_csr {
u8 PGSELECT;
u8 SECURITY;
__le32 ANAPARAM2;
- u8 reserved_10[12];
- __le16 BEACON_INTERVAL;
- __le16 ATIM_WND;
- __le16 BEACON_INTERVAL_TIME;
- __le16 ATIMTR_INTERVAL;
- u8 PHY_DELAY;
- u8 CARRIER_SENSE_COUNTER;
- u8 reserved_11[2];
- u8 PHY[4];
- __le16 RFPinsOutput;
- __le16 RFPinsEnable;
- __le16 RFPinsSelect;
- __le16 RFPinsInput;
- __le32 RF_PARA;
- __le32 RF_TIMING;
- u8 GP_ENABLE;
- u8 GPIO0;
- u8 GPIO1;
- u8 reserved_12;
- __le32 HSSI_PARA;
- u8 reserved_13[4];
- u8 TX_AGC_CTL;
-#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT (1 << 0)
-#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT (1 << 1)
-#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT (1 << 2)
+ u8 reserved_10[8];
+ __le32 IMR; /* 0x6c - Interrupt mask reg for 8187se */
+#define IMR_TMGDOK ((1 << 30))
+#define IMR_DOT11HINT ((1 << 25)) /* 802.11h Measurement Interrupt */
+#define IMR_BCNDMAINT ((1 << 24)) /* Beacon DMA Interrupt */
+#define IMR_WAKEINT ((1 << 23)) /* Wake Up Interrupt */
+#define IMR_TXFOVW ((1 << 22)) /* Tx FIFO Overflow */
+#define IMR_TIMEOUT1 ((1 << 21)) /* Time Out Interrupt 1 */
+#define IMR_BCNINT ((1 << 20)) /* Beacon Time out */
+#define IMR_ATIMINT ((1 << 19)) /* ATIM Time Out */
+#define IMR_TBDER ((1 << 18)) /* Tx Beacon Descriptor Error */
+#define IMR_TBDOK ((1 << 17)) /* Tx Beacon Descriptor OK */
+#define IMR_THPDER ((1 << 16)) /* Tx High Priority Descriptor Error */
+#define IMR_THPDOK ((1 << 15)) /* Tx High Priority Descriptor OK */
+#define IMR_TVODER ((1 << 14)) /* Tx AC_VO Descriptor Error Int */
+#define IMR_TVODOK ((1 << 13)) /* Tx AC_VO Descriptor OK Interrupt */
+#define IMR_FOVW ((1 << 12)) /* Rx FIFO Overflow Interrupt */
+#define IMR_RDU ((1 << 11)) /* Rx Descriptor Unavailable */
+#define IMR_TVIDER ((1 << 10)) /* Tx AC_VI Descriptor Error */
+#define IMR_TVIDOK ((1 << 9)) /* Tx AC_VI Descriptor OK Interrupt */
+#define IMR_RER ((1 << 8)) /* Rx Error Interrupt */
+#define IMR_ROK ((1 << 7)) /* Receive OK Interrupt */
+#define IMR_TBEDER ((1 << 6)) /* Tx AC_BE Descriptor Error */
+#define IMR_TBEDOK ((1 << 5)) /* Tx AC_BE Descriptor OK */
+#define IMR_TBKDER ((1 << 4)) /* Tx AC_BK Descriptor Error */
+#define IMR_TBKDOK ((1 << 3)) /* Tx AC_BK Descriptor OK */
+#define IMR_RQOSOK ((1 << 2)) /* Rx QoS OK Interrupt */
+#define IMR_TIMEOUT2 ((1 << 1)) /* Time Out Interrupt 2 */
+#define IMR_TIMEOUT3 ((1 << 0)) /* Time Out Interrupt 3 */
+ __le16 BEACON_INTERVAL; /* 0x70 */
+ __le16 ATIM_WND; /* 0x72 */
+ __le16 BEACON_INTERVAL_TIME; /* 0x74 */
+ __le16 ATIMTR_INTERVAL; /* 0x76 */
+ u8 PHY_DELAY; /* 0x78 */
+ u8 CARRIER_SENSE_COUNTER; /* 0x79 */
+ u8 reserved_11[2]; /* 0x7a */
+ u8 PHY[4]; /* 0x7c */
+ __le16 RFPinsOutput; /* 0x80 */
+ __le16 RFPinsEnable; /* 0x82 */
+ __le16 RFPinsSelect; /* 0x84 */
+ __le16 RFPinsInput; /* 0x86 */
+ __le32 RF_PARA; /* 0x88 */
+ __le32 RF_TIMING; /* 0x8c */
+ u8 GP_ENABLE; /* 0x90 */
+ u8 GPIO0; /* 0x91 */
+ u8 GPIO1; /* 0x92 */
+ u8 TPPOLL_STOP; /* 0x93 - rtl8187se only */
+#define RTL818x_TPPOLL_STOP_BQ (1 << 7)
+#define RTL818x_TPPOLL_STOP_VI (1 << 4)
+#define RTL818x_TPPOLL_STOP_VO (1 << 5)
+#define RTL818x_TPPOLL_STOP_BE (1 << 3)
+#define RTL818x_TPPOLL_STOP_BK (1 << 2)
+#define RTL818x_TPPOLL_STOP_MG (1 << 1)
+#define RTL818x_TPPOLL_STOP_HI (1 << 6)
+
+ __le32 HSSI_PARA; /* 0x94 */
+ u8 reserved_13[4]; /* 0x98 */
+ u8 TX_AGC_CTL; /* 0x9c */
+#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN (1 << 0)
+#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL (1 << 1)
+#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT (1 << 2)
u8 TX_GAIN_CCK;
u8 TX_GAIN_OFDM;
u8 TX_ANTENNA;
@@ -158,8 +274,8 @@ struct rtl818x_csr {
u8 SLOT;
u8 reserved_16[5];
u8 CW_CONF;
-#define RTL818X_CW_CONF_PERPACKET_CW_SHIFT (1 << 0)
-#define RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT (1 << 1)
+#define RTL818X_CW_CONF_PERPACKET_CW (1 << 0)
+#define RTL818X_CW_CONF_PERPACKET_RETRY (1 << 1)
u8 CW_VAL;
u8 RATE_FALLBACK;
#define RTL818X_RATE_FALLBACK_ENABLE (1 << 7)
@@ -167,7 +283,8 @@ struct rtl818x_csr {
u8 reserved_17[24];
u8 CONFIG5;
u8 TX_DMA_POLLING;
- u8 reserved_18[2];
+ u8 PHY_PR;
+ u8 reserved_18;
__le16 CWR;
u8 RETRY_CTR;
u8 reserved_19[3];
@@ -179,20 +296,64 @@ struct rtl818x_csr {
__le32 RDSAR;
__le16 TID_AC_MAP;
u8 reserved_20[4];
- u8 ANAPARAM3;
- u8 reserved_21[5];
- __le16 FEMR;
- u8 reserved_22[4];
- __le16 TALLY_CNT;
- u8 TALLY_SEL;
+ union {
+ __le16 ANAPARAM3; /* 0xee */
+ u8 ANAPARAM3A; /* for rtl8187 */
+ };
+
+#define AC_PARAM_TXOP_LIMIT_SHIFT 16
+#define AC_PARAM_ECW_MAX_SHIFT 12
+#define AC_PARAM_ECW_MIN_SHIFT 8
+#define AC_PARAM_AIFS_SHIFT 0
+
+ __le32 AC_VO_PARAM; /* 0xf0 */
+
+ union { /* 0xf4 */
+ __le32 AC_VI_PARAM;
+ __le16 FEMR;
+ } __packed;
+
+ union{ /* 0xf8 */
+ __le32 AC_BE_PARAM; /* rtl8187se */
+ struct{
+ u8 reserved_21[2];
+ __le16 TALLY_CNT; /* 0xfa */
+ } __packed;
+ } __packed;
+
+ union {
+ u8 TALLY_SEL; /* 0xfc */
+ __le32 AC_BK_PARAM;
+
+ } __packed;
+
} __packed;
+/* These are addresses with NON-standard usage.
+ * They have offsets very far from this struct.
+ * I don't like to introduce a ton of "reserved"..
+ * They are for RTL8187SE
+ */
+#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + addr)
+#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + (addr >> 1))
+#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + (addr >> 2))
+
+#define FEMR_SE REG_ADDR2(0x1D4)
+#define ARFR REG_ADDR2(0x1E0)
+#define RFSW_CTRL REG_ADDR2(0x272)
+#define SW_3W_DB0 REG_ADDR2(0x274)
+#define SW_3W_DB0_4 REG_ADDR4(0x274)
+#define SW_3W_DB1 REG_ADDR2(0x278)
+#define SW_3W_DB1_4 REG_ADDR4(0x278)
+#define SW_3W_CMD1 REG_ADDR1(0x27D)
+#define PI_DATA_REG REG_ADDR2(0x360)
+#define SI_DATA_REG REG_ADDR2(0x362)
+
struct rtl818x_rf_ops {
char *name;
void (*init)(struct ieee80211_hw *);
void (*stop)(struct ieee80211_hw *);
void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *);
- void (*conf_erp)(struct ieee80211_hw *, struct ieee80211_bss_conf *);
u8 (*calc_rssi)(u8 agc, u8 sq);
};
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index c2ffce7a907c..bf3cf124e4ea 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -5,7 +5,7 @@ menuconfig RTL_CARDS
---help---
This option will enable support for the Realtek mac80211-based
wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de,
- rtl8723eu, and rtl8188eu share some common code.
+ rtl8723ae, rtl8723be, and rtl8188ae share some common code.
if RTL_CARDS
@@ -48,12 +48,27 @@ config RTL8723AE
depends on PCI
select RTLWIFI
select RTLWIFI_PCI
+ select RTL8723_COMMON
+ select RTLBTCOEXIST
---help---
This is the driver for Realtek RTL8723AE 802.11n PCIe
wireless network adapters.
If you choose to build it as a module, it will be called rtl8723ae
+config RTL8723BE
+ tristate "Realtek RTL8723BE PCIe Wireless Network Adapter"
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
+ select RTL8723_COMMON
+ select RTLBTCOEXIST
+ ---help---
+ This is the driver for Realtek RTL8723BE 802.11n PCIe
+ wireless network adapters.
+
+ If you choose to build it as a module, it will be called rtl8723be
+
config RTL8188EE
tristate "Realtek RTL8188EE Wireless Network Adapter"
depends on PCI
@@ -101,4 +116,14 @@ config RTL8192C_COMMON
depends on RTL8192CE || RTL8192CU
default y
+config RTL8723_COMMON
+ tristate
+ depends on RTL8723AE || RTL8723BE
+ default y
+
+config RTLBTCOEXIST
+ tristate
+ depends on RTL8723AE || RTL8723BE
+ default y
+
endif
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index d56f023a4b90..bba36a06abcc 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -24,6 +24,9 @@ obj-$(CONFIG_RTL8192CU) += rtl8192cu/
obj-$(CONFIG_RTL8192SE) += rtl8192se/
obj-$(CONFIG_RTL8192DE) += rtl8192de/
obj-$(CONFIG_RTL8723AE) += rtl8723ae/
+obj-$(CONFIG_RTL8723BE) += rtl8723be/
obj-$(CONFIG_RTL8188EE) += rtl8188ee/
+obj-$(CONFIG_RTLBTCOEXIST) += btcoexist/
+obj-$(CONFIG_RTL8723_COMMON) += rtl8723com/
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/rtlwifi/btcoexist/Makefile
new file mode 100644
index 000000000000..47ceecfcb7dc
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/Makefile
@@ -0,0 +1,7 @@
+btcoexist-objs := halbtc8723b2ant.o \
+ halbtcoutsrc.o \
+ rtl_btc.o
+
+obj-$(CONFIG_RTLBTCOEXIST) += btcoexist.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h
new file mode 100644
index 000000000000..d76684eb24d0
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h
@@ -0,0 +1,75 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
+
+#ifndef __HALBT_PRECOMP_H__
+#define __HALBT_PRECOMP_H__
+/*************************************************************
+ * include files
+ *************************************************************/
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+
+#include "halbtcoutsrc.h"
+
+#include "halbtc8723b2ant.h"
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+
+#endif /* __HALBT_PRECOMP_H__ */
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
new file mode 100644
index 000000000000..d916ab9f3c38
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -0,0 +1,3698 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2012 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+/***************************************************************
+ * Description:
+ *
+ * This file is for RTL8723B Co-exist mechanism
+ *
+ * History
+ * 2012/11/15 Cosa first check in.
+ *
+ **************************************************************/
+/**************************************************************
+ * include files
+ **************************************************************/
+#include "halbt_precomp.h"
+/**************************************************************
+ * Global variables, these are static variables
+ **************************************************************/
+static struct coex_dm_8723b_2ant glcoex_dm_8723b_2ant;
+static struct coex_dm_8723b_2ant *coex_dm = &glcoex_dm_8723b_2ant;
+static struct coex_sta_8723b_2ant glcoex_sta_8723b_2ant;
+static struct coex_sta_8723b_2ant *coex_sta = &glcoex_sta_8723b_2ant;
+
+static const char *const glbt_info_src_8723b_2ant[] = {
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+static u32 glcoex_ver_date_8723b_2ant = 20130731;
+static u32 glcoex_ver_8723b_2ant = 0x3b;
+
+/**************************************************************
+ * local function proto type if needed
+ **************************************************************/
+/**************************************************************
+ * local function start with btc8723b2ant_
+ **************************************************************/
+static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
+ u8 rssi_thresh1)
+{
+ s32 bt_rssi = 0;
+ u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
+
+ bt_rssi = coex_sta->bt_rssi;
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if (bt_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to High\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Low\n");
+ }
+ } else {
+ if (bt_rssi < rssi_thresh) {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Low\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
+ return coex_sta->pre_bt_rssi_state;
+ }
+
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if (bt_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Medium\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Low\n");
+ }
+ } else if ((coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (bt_rssi >= rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to High\n");
+ } else if (bt_rssi < rssi_thresh) {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Low\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Medium\n");
+ }
+ } else {
+ if (bt_rssi < rssi_thresh1) {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Medium\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_bt_rssi_state = bt_rssi_state;
+
+ return bt_rssi_state;
+}
+
+static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+ u8 index, u8 level_num,
+ u8 rssi_thresh, u8 rssi_thresh1)
+{
+ s32 wifi_rssi = 0;
+ u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifi_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to High\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Low\n");
+ }
+ } else {
+ if (wifi_rssi < rssi_thresh) {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Low\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
+ return coex_sta->pre_wifi_rssi_state[index];
+ }
+
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifi_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Medium\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Low\n");
+ }
+ } else if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (wifi_rssi >= rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to High\n");
+ } else if (wifi_rssi < rssi_thresh) {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Low\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Medium\n");
+ }
+ } else {
+ if (wifi_rssi < rssi_thresh1) {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Medium\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
+
+ return wifi_rssi_state;
+}
+
+static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+{
+ u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
+ u32 reg_hp_tx = 0, reg_hp_rx = 0;
+ u32 reg_lp_tx = 0, reg_lp_rx = 0;
+
+ reg_hp_txrx = 0x770;
+ reg_lp_txrx = 0x774;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
+ reg_hp_tx = u32tmp & MASKLWORD;
+ reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
+ reg_lp_tx = u32tmp & MASKLWORD;
+ reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
+
+ coex_sta->high_priority_tx = reg_hp_tx;
+ coex_sta->high_priority_rx = reg_hp_rx;
+ coex_sta->low_priority_tx = reg_lp_tx;
+ coex_sta->low_priority_rx = reg_lp_rx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+
+ /* reset counter */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+ static bool pre_wifi_busy;
+ static bool pre_under_4way;
+ static bool pre_bt_hs_on;
+ bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+ bool wifi_connected = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
+ if (wifi_connected) {
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ return true;
+ }
+
+ if (under_4way != pre_under_4way) {
+ pre_under_4way = under_4way;
+ return true;
+ }
+
+ if (bt_hs_on != pre_bt_hs_on) {
+ pre_bt_hs_on = bt_hs_on;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
+{
+ /*struct btc_stack_info *stack_info = &btcoexist->stack_info;*/
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+
+#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) /* profile from bt patch */
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+ bt_link_info->sco_exist = coex_sta->sco_exist;
+ bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+ bt_link_info->pan_exist = coex_sta->pan_exist;
+ bt_link_info->hid_exist = coex_sta->hid_exist;
+
+ /* work around for HS mode. */
+ if (bt_hs_on) {
+ bt_link_info->pan_exist = true;
+ bt_link_info->bt_link_exist = true;
+ }
+#else /* profile from bt stack */
+ bt_link_info->bt_link_exist = stack_info->bt_link_exist;
+ bt_link_info->sco_exist = stack_info->sco_exist;
+ bt_link_info->a2dp_exist = stack_info->a2dp_exist;
+ bt_link_info->pan_exist = stack_info->pan_exist;
+ bt_link_info->hid_exist = stack_info->hid_exist;
+
+ /*for win-8 stack HID report error*/
+ if (!stack_info->hid_exist)
+ stack_info->hid_exist = coex_sta->hid_exist;
+ /*sync BTInfo with BT firmware and stack*/
+ /* when stack HID report error, here we use the info from bt fw.*/
+ if (!stack_info->bt_link_exist)
+ stack_info->bt_link_exist = coex_sta->bt_link_exist;
+#endif
+ /* check if Sco only */
+ if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->sco_only = true;
+ else
+ bt_link_info->sco_only = false;
+
+ /* check if A2dp only */
+ if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->a2dp_only = true;
+ else
+ bt_link_info->a2dp_only = false;
+
+ /* check if Pan only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->pan_only = true;
+ else
+ bt_link_info->pan_only = false;
+
+ /* check if Hid only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && bt_link_info->hid_exist)
+ bt_link_info->hid_only = true;
+ else
+ bt_link_info->hid_only = false;
+}
+
+static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+ u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED;
+ u8 num_of_diff_profile = 0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ if (!bt_link_info->bt_link_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], No BT link exists!!!\n");
+ return algorithm;
+ }
+
+ if (bt_link_info->sco_exist)
+ num_of_diff_profile++;
+ if (bt_link_info->hid_exist)
+ num_of_diff_profile++;
+ if (bt_link_info->pan_exist)
+ num_of_diff_profile++;
+ if (bt_link_info->a2dp_exist)
+ num_of_diff_profile++;
+
+ if (num_of_diff_profile == 1) {
+ if (bt_link_info->sco_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO only\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+ } else {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID only\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], A2DP only\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], PAN(HS) only\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], PAN(EDR) only\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ } else if (num_of_diff_profile == 2) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + PAN(HS)\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + A2DP\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + PAN(HS)\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], A2DP + PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex],A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ } else if (num_of_diff_profile == 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP"
+ " ==> HID\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + "
+ "PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + "
+ "PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + "
+ "PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + "
+ "PAN(EDR) ==> HID\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + A2DP + "
+ "PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + A2DP + "
+ "PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ } else if (num_of_diff_profile >= 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Error!!! SCO + HID"
+ " + A2DP + PAN(HS)\n");
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP +"
+ " PAN(EDR)==>PAN(EDR)+HID\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+ return algorithm;
+}
+
+static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
+{
+ bool ret = false;
+ bool bt_hs_on = false, wifi_connected = false;
+ s32 bt_hs_rssi = 0;
+ u8 bt_rssi_state;
+
+ if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
+ return false;
+ if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected))
+ return false;
+ if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
+ return false;
+
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ if (wifi_connected) {
+ if (bt_hs_on) {
+ if (bt_hs_rssi > 37) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt "
+ "power for HS mode!!\n");
+ ret = true;
+ }
+ } else {
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt "
+ "power for Wifi is connected!!\n");
+ ret = true;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
+ u8 dac_swing_lvl)
+{
+ u8 h2c_parameter[1] = {0};
+
+ /* There are several type of dacswing
+ * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
+ */
+ h2c_parameter[0] = dac_swing_lvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool dec_bt_pwr)
+{
+ u8 h2c_parameter[1] = {0};
+
+ h2c_parameter[0] = 0;
+
+ if (dec_bt_pwr)
+ h2c_parameter[0] |= BIT1;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+ (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
+}
+
+static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool force_exec, bool dec_bt_pwr)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power = %s\n",
+ (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF"));
+ coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+
+ if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+ return;
+ }
+ btc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+ coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+ bool force_exec, u8 fw_dac_swing_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], preFwDacSwingLvl=%d, "
+ "curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
+
+ if (coex_dm->pre_fw_dac_swing_lvl ==
+ coex_dm->cur_fw_dac_swing_lvl)
+ return;
+ }
+
+ btc8723b2ant_set_fw_dac_swing_level(btcoexist,
+ coex_dm->cur_fw_dac_swing_lvl);
+ coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
+ bool rx_rf_shrink_on)
+{
+ if (rx_rf_shrink_on) {
+ /* Shrink RF Rx LPF corner */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff, 0xffffc);
+ } else {
+ /* Resume RF Rx LPF corner */
+ /* After initialized, we can use coex_dm->btRf0x1eBackup */
+ if (btcoexist->initilized) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff,
+ coex_dm->bt_rf0x1e_backup);
+ }
+ }
+}
+
+static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
+ bool force_exec, bool rx_rf_shrink_on)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
+ "ON" : "OFF"));
+ coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreRfRxLpfShrink=%d, "
+ "bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ if (coex_dm->pre_rf_rx_lpf_shrink ==
+ coex_dm->cur_rf_rx_lpf_shrink)
+ return;
+ }
+ btc8723b2ant_set_sw_rf_rx_lpf_corner(btcoexist,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
+ bool low_penalty_ra)
+{
+ u8 h2c_parameter[6] = {0};
+
+ h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/
+
+ if (low_penalty_ra) {
+ h2c_parameter[1] |= BIT0;
+ /*normal rate except MCS7/6/5, OFDM54/48/36*/
+ h2c_parameter[2] = 0x00;
+ h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54*/
+ h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48*/
+ h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
+}
+
+static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
+ bool force_exec, bool low_penalty_ra)
+{
+ /*return; */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec ? "force to" : ""), (low_penalty_ra ?
+ "ON" : "OFF"));
+ coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreLowPenaltyRa=%d, "
+ "bCurLowPenaltyRa=%d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
+
+ if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
+ return;
+ }
+ btc8723b_set_penalty_txrate(btcoexist, coex_dm->cur_low_penalty_ra);
+
+ coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
+ u32 level)
+{
+ u8 val = (u8) level;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
+}
+
+static void btc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoex,
+ bool sw_dac_swing_on,
+ u32 sw_dac_swing_lvl)
+{
+ if (sw_dac_swing_on)
+ btc8723b2ant_set_dac_swing_reg(btcoex, sw_dac_swing_lvl);
+ else
+ btc8723b2ant_set_dac_swing_reg(btcoex, 0x18);
+}
+
+
+static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
+ bool force_exec, bool dac_swing_on,
+ u32 dac_swing_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec ? "force to" : ""),
+ (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
+ coex_dm->cur_dac_swing_on = dac_swing_on;
+ coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x,"
+ " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
+
+ if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+ (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
+ return;
+ }
+ mdelay(30);
+ btc8723b2ant_set_sw_fulltime_dac_swing(btcoexist, dac_swing_on,
+ dac_swing_lvl);
+
+ coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+ coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
+ bool agc_table_en)
+{
+ u8 rssi_adjust_val = 0;
+
+ /* BB AGC Gain Table */
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table On!\n");
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6b1D0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6a1E0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table Off!\n");
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001);
+ }
+
+
+ /* RF Gain */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x38fff);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x38ffe);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x380c3);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x28ce6);
+ }
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
+
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x38fff);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x38ffe);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x380c3);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x28ce6);
+ }
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x0);
+
+ /* set rssiAdjustVal for wifi module. */
+ if (agc_table_en)
+ rssi_adjust_val = 8;
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+ &rssi_adjust_val);
+}
+
+static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
+ bool force_exec, bool agc_table_en)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec ? "force to" : ""),
+ (agc_table_en ? "Enable" : "Disable"));
+ coex_dm->cur_agc_table_en = agc_table_en;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+
+ if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+ return;
+ }
+ btc8723b2ant_set_agc_table(btcoexist, agc_table_en);
+
+ coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
+ bool force_exec, u32 val0x6c0,
+ u32 val0x6c4, u32 val0x6c8,
+ u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
+ " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ (force_exec ? "force to" : ""), val0x6c0,
+ val0x6c4, val0x6c8, val0x6cc);
+ coex_dm->cur_val0x6c0 = val0x6c0;
+ coex_dm->cur_val0x6c4 = val0x6c4;
+ coex_dm->cur_val0x6c8 = val0x6c8;
+ coex_dm->cur_val0x6cc = val0x6cc;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], preVal0x6c0=0x%x, "
+ "preVal0x6c4=0x%x, preVal0x6c8=0x%x, "
+ "preVal0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], curVal0x6c0=0x%x, "
+ "curVal0x6c4=0x%x, curVal0x6c8=0x%x, "
+ "curVal0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+
+ if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+ (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+ (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+ (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
+ return;
+ }
+ btc8723b2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
+
+ coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+ coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+ coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+ coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+static void btc8723b_coex_tbl_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ switch (type) {
+ case 0:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x55555555, 0xffff, 0x3);
+ break;
+ case 1:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5afa5afa, 0xffff, 0x3);
+ break;
+ case 2:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0x5a5a5a5a, 0xffff, 0x3);
+ break;
+ case 3:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
+ 0xaaaaaaaa, 0xffff, 0x3);
+ break;
+ case 4:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0xffffffff,
+ 0xffffffff, 0xffff, 0x3);
+ break;
+ case 5:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+ 0x5fff5fff, 0xffff, 0x3);
+ break;
+ case 6:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5a5a5a5a, 0xffff, 0x3);
+ break;
+ case 7:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5afa5afa, 0xffff, 0x3);
+ break;
+ case 8:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x5aea5aea,
+ 0x5aea5aea, 0xffff, 0x3);
+ break;
+ case 9:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5aea5aea, 0xffff, 0x3);
+ break;
+ case 10:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5aff5aff, 0xffff, 0x3);
+ break;
+ case 11:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5a5f5a5f, 0xffff, 0x3);
+ break;
+ case 12:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5f5f5f5f, 0xffff, 0x3);
+ break;
+ default:
+ break;
+ }
+}
+
+static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool enable)
+{
+ u8 h2c_parameter[1] = {0};
+
+ if (enable)
+ h2c_parameter[0] |= BIT0;/* function enable*/
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, "
+ "FW write 0x63=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
+}
+
+static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ coex_dm->cur_ignore_wlan_act = enable;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d, "
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
+
+ if (coex_dm->pre_ignore_wlan_act ==
+ coex_dm->cur_ignore_wlan_act)
+ return;
+ }
+ btc8723b2ant_set_fw_ignore_wlan_act(btcoexist, enable);
+
+ coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
+ u8 byte2, u8 byte3, u8 byte4, u8 byte5)
+{
+ u8 h2c_parameter[5];
+
+ h2c_parameter[0] = byte1;
+ h2c_parameter[1] = byte2;
+ h2c_parameter[2] = byte3;
+ h2c_parameter[3] = byte4;
+ h2c_parameter[4] = byte5;
+
+ coex_dm->ps_tdma_para[0] = byte1;
+ coex_dm->ps_tdma_para[1] = byte2;
+ coex_dm->ps_tdma_para[2] = byte3;
+ coex_dm->ps_tdma_para[3] = byte4;
+ coex_dm->ps_tdma_para[4] = byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+static void btc8723b2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+ bool shrink_rx_lpf, bool low_penalty_ra,
+ bool limited_dig, bool bt_lna_constrain)
+{
+ btc8723b2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+ btc8723b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
+}
+
+static void btc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+ bool agc_table_shift, bool adc_backoff,
+ bool sw_dac_swing, u32 dac_swing_lvl)
+{
+ btc8723b2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift);
+ btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+ dac_swing_lvl);
+}
+
+static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
+ bool turn_on, u8 type)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec ? "force to" : ""),
+ (turn_on ? "ON" : "OFF"), type);
+ coex_dm->cur_ps_tdma_on = turn_on;
+ coex_dm->cur_ps_tdma = type;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+
+ if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+ (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
+ return;
+ }
+ if (turn_on) {
+ switch (type) {
+ case 1:
+ default:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ case 2:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe1, 0x90);
+ break;
+ case 3:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf1, 0x90);
+ break;
+ case 4:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+ 0x03, 0xf1, 0x90);
+ break;
+ case 5:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0x60, 0x90);
+ break;
+ case 6:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0x60, 0x90);
+ break;
+ case 7:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0x70, 0x90);
+ break;
+ case 8:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
+ 0x3, 0x70, 0x90);
+ break;
+ case 9:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ case 10:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe1, 0x90);
+ break;
+ case 11:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0xa, 0xe1, 0x90);
+ break;
+ case 12:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0xe1, 0x90);
+ break;
+ case 13:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0x60, 0x90);
+ break;
+ case 14:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0x60, 0x90);
+ break;
+ case 15:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0xa, 0x60, 0x90);
+ break;
+ case 16:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0x60, 0x90);
+ break;
+ case 17:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f,
+ 0x2f, 0x60, 0x90);
+ break;
+ case 18:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0xe1, 0x90);
+ break;
+ case 19:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0xe1, 0x90);
+ break;
+ case 20:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0x60, 0x90);
+ break;
+ case 21:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+ 0x03, 0x70, 0x90);
+ break;
+ case 71:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ }
+ } else {
+ /* disable PS tdma */
+ switch (type) {
+ case 0:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x40, 0x0);
+ break;
+ case 1:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x48, 0x0);
+ break;
+ default:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x40, 0x0);
+ break;
+ }
+ }
+
+ /* update pre state */
+ coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+ coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+static void btc8723b2ant_coex_alloff(struct btc_coexist *btcoexist)
+{
+ /* fw all off */
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ /* sw all off */
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+ /* hw all off */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+static void btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ /* force to reset coex mechanism*/
+
+ btc8723b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ btc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
+
+static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+{
+ bool wifi_connected = false;
+ bool low_pwr_disable = true;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ if (wifi_connected) {
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ } else {
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ }
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+ coex_dm->need_recover_0x948 = true;
+ coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948);
+
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+}
+
+static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
+{
+ bool common = false, wifi_connected = false;
+ bool wifi_busy = false;
+ bool bt_hs_on = false, low_pwr_disable = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ if (!wifi_connected) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi non-connected idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x0);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false, false,
+ false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false, false,
+ 0x18);
+
+ common = true;
+ } else {
+ if (BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi connected + "
+ "BT non connected-idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 0xb);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ common = true;
+ } else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if (bt_hs_on)
+ return false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi connected + "
+ "BT connected-idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 0xb);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ common = true;
+ } else {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if (wifi_busy) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Busy + "
+ "BT Busy!!\n");
+ common = false;
+ } else {
+ if (bt_hs_on)
+ return false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Idle + "
+ "BT Busy!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
+ 0x1, 0xfffff, 0x0);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC,
+ 7);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 21);
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist,
+ NORMAL_EXEC,
+ 0xb);
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC,
+ true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC,
+ false);
+ btc8723b2ant_sw_mechanism1(btcoexist, false,
+ false, false,
+ false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false,
+ false, false,
+ 0x18);
+ common = true;
+ }
+ }
+ }
+
+ return common;
+}
+
+static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
+ s32 result)
+{
+ /* Set PS TDMA for max interval == 1 */
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+
+ if (coex_dm->cur_ps_tdma == 71) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 5);
+ coex_dm->tdma_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 5);
+ coex_dm->tdma_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->tdma_adj_type = 8;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 13);
+ coex_dm->tdma_adj_type = 13;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->tdma_adj_type = 16;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->tdma_adj_type = 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->tdma_adj_type = 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 5);
+ coex_dm->tdma_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 13);
+ coex_dm->tdma_adj_type = 13;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
+ coex_dm->tdma_adj_type = 71;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ coex_dm->tdma_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
+ coex_dm->tdma_adj_type = 4;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ coex_dm->tdma_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ coex_dm->tdma_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
+ coex_dm->tdma_adj_type = 12;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 71) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 1);
+ coex_dm->tdma_adj_type = 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 4);
+ coex_dm->tdma_adj_type = 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 12);
+ coex_dm->tdma_adj_type = 12;
+ }
+ } else if (result == 1) {
+ int tmp = coex_dm->cur_ps_tdma;
+ switch (tmp) {
+ case 4:
+ case 3:
+ case 2:
+ case 12:
+ case 11:
+ case 10:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, tmp - 1);
+ coex_dm->tdma_adj_type = tmp - 1;
+ break;
+ case 1:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 71);
+ coex_dm->tdma_adj_type = 71;
+ break;
+ }
+ }
+ }
+}
+
+static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
+ s32 result)
+{
+ /* Set PS TDMA for max interval == 2 */
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
+ coex_dm->tdma_adj_type = 8;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
+ coex_dm->tdma_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->tdma_adj_type = 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->tdma_adj_type = 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ coex_dm->tdma_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
+ coex_dm->tdma_adj_type = 4;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ coex_dm->tdma_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
+ coex_dm->tdma_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 4);
+ coex_dm->tdma_adj_type = 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 12);
+ coex_dm->tdma_adj_type = 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->tdma_adj_type = 10;
+ }
+ }
+ }
+}
+
+static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
+ s32 result)
+{
+ /* Set PS TDMA for max interval == 3 */
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
+ coex_dm->tdma_adj_type = 8;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
+ coex_dm->tdma_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->tdma_adj_type = 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->tdma_adj_type = 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ switch (coex_dm->cur_ps_tdma) {
+ case 5:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 6:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 7:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 8:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
+ coex_dm->tdma_adj_type = 4;
+ break;
+ case 13:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 14:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 15:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 16:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
+ coex_dm->tdma_adj_type = 12;
+ break;
+ }
+ if (result == -1) {
+ switch (coex_dm->cur_ps_tdma) {
+ case 1:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 2:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 3:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 4);
+ coex_dm->tdma_adj_type = 4;
+ break;
+ case 9:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 10:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 11:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 12);
+ coex_dm->tdma_adj_type = 12;
+ break;
+ }
+ } else if (result == 1) {
+ switch (coex_dm->cur_ps_tdma) {
+ case 4:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 3:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 2:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 12:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 11:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 10:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ }
+ }
+ }
+}
+
+static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+ bool sco_hid, bool tx_pause,
+ u8 max_interval)
+{
+ static s32 up, dn, m, n, wait_count;
+ /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
+ s32 result;
+ u8 retry_count = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjust()\n");
+
+ if (!coex_dm->auto_tdma_adjust) {
+ coex_dm->auto_tdma_adjust = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ if (sco_hid) {
+ if (tx_pause) {
+ if (max_interval == 1) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->tdma_adj_type = 13;
+ } else if (max_interval == 2) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (max_interval == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ }
+ } else {
+ if (max_interval == 1) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->tdma_adj_type = 9;
+ } else if (max_interval == 2) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (max_interval == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ } else {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ }
+ }
+ } else {
+ if (tx_pause) {
+ if (max_interval == 1) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->tdma_adj_type = 5;
+ } else if (max_interval == 2) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (max_interval == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ }
+ } else {
+ if (max_interval == 1) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->tdma_adj_type = 1;
+ } else if (max_interval == 2) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (max_interval == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ } else {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ }
+ }
+ }
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ result = 0;
+ wait_count = 0;
+ } else {
+ /*accquire the BT TRx retry count from BT_Info byte2*/
+ retry_count = coex_sta->bt_retry_cnt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], retry_count = %d\n", retry_count);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+ up, dn, m, n, wait_count);
+ result = 0;
+ wait_count++;
+ /* no retry in the last 2-second duration*/
+ if (retry_count == 0) {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if (up >= n) {
+ wait_count = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi "
+ "duration!!\n");
+ } /* <=3 retry in the last 2-second duration*/
+ } else if (retry_count <= 3) {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) {
+ if (wait_count <= 2)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration "
+ "for retry_counter<3!!\n");
+ }
+ } else {
+ if (wait_count == 1)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration "
+ "for retry_counter>3!!\n");
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], max Interval = %d\n", max_interval);
+ if (max_interval == 1)
+ set_tdma_int1(btcoexist, tx_pause, result);
+ else if (max_interval == 2)
+ set_tdma_int2(btcoexist, tx_pause, result);
+ else if (max_interval == 3)
+ set_tdma_int3(btcoexist, tx_pause, result);
+ }
+
+ /*if current PsTdma not match with the recorded one (when scan, dhcp..),
+ *then we have to adjust it back to the previous recorded one.
+ */
+ if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
+ bool scan = false, link = false, roam = false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], PsTdma type dismatch!!!, "
+ "curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if (!scan && !link && !roam)
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+ coex_dm->tdma_adj_type);
+ else
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], roaming/link/scan is under"
+ " progress, will adjust next time!!!\n");
+ }
+}
+
+/* SCO only or SCO+PAN(HS) */
+static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ /*for SCO quality at 11b/g mode*/
+ if (BTC_WIFI_BW_LEGACY == wifi_bw)
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 2);
+ else /*for SCO quality & wifi performance balance at 11n mode*/
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 8);
+
+ /*for voice quality */
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x4);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x4);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x4);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x4);
+ }
+ }
+}
+
+static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_LEGACY == wifi_bw) /*/for HID at 11b/g mode*/
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+ else /*for HID quality & wifi performance balance at 11n mode*/
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 9);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ else
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/
+static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8723b2ant_tdma_duration_adjust(btcoexist, false,
+ false, 1);
+ else
+ btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 1);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+ btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 2);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 10);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+ else
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*PAN(HS) only*/
+static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*PAN(EDR)+A2DP*/
+static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 12);
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ btc8723b2ant_tdma_duration_adjust(btcoexist, false,
+ true, 3);
+ else
+ btc8723b2ant_tdma_duration_adjust(btcoexist, false,
+ false, 3);
+ } else {
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+ btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 3);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x780);
+ } else {
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 6);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ }
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ } else {
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x0);
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* HID+A2DP+PAN(EDR) */
+static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+ true, 2);
+ else
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+ false, 3);
+ } else {
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ else
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+ u8 algorithm = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
+
+ if (btcoexist->manual_control) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), "
+ "return for Manual CTRL <===\n");
+ return;
+ }
+
+ if (coex_sta->under_ips) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
+ return;
+ }
+
+ algorithm = btc8723b2ant_action_algorithm(btcoexist);
+ if (coex_sta->c2h_bt_inquiry_page &&
+ (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
+ btc8723b2ant_action_bt_inquiry(btcoexist);
+ return;
+ } else {
+ if (coex_dm->need_recover_0x948) {
+ coex_dm->need_recover_0x948 = false;
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ coex_dm->backup_0x948);
+ }
+ }
+
+ coex_dm->cur_algorithm = algorithm;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
+ coex_dm->cur_algorithm);
+
+ if (btc8723b2ant_is_common_action(btcoexist)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant common.\n");
+ coex_dm->auto_tdma_adjust = false;
+ } else {
+ if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], preAlgorithm=%d, "
+ "curAlgorithm=%d\n", coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
+ coex_dm->auto_tdma_adjust = false;
+ }
+ switch (coex_dm->cur_algorithm) {
+ case BT_8723B_2ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+ btc8723b2ant_action_sco(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+ btc8723b2ant_action_hid(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = A2DP.\n");
+ btc8723b2ant_action_a2dp(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = A2DP+PAN(HS).\n");
+ btc8723b2ant_action_a2dp_pan_hs(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = PAN(EDR).\n");
+ btc8723b2ant_action_pan_edr(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = HS mode.\n");
+ btc8723b2ant_action_pan_hs(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = PAN+A2DP.\n");
+ btc8723b2ant_action_pan_edr_a2dp(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = PAN(EDR)+HID.\n");
+ btc8723b2ant_action_pan_edr_hid(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = HID+A2DP+PAN.\n");
+ btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = HID+A2DP.\n");
+ btc8723b2ant_action_hid_a2dp(btcoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = coexist All Off!!\n");
+ btc8723b2ant_coex_alloff(btcoexist);
+ break;
+ }
+ coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+ }
+}
+
+
+
+/*********************************************************************
+ * work around function start with wa_btc8723b2ant_
+ *********************************************************************/
+/*********************************************************************
+ * extern function start with EXbtc8723b2ant_
+ *********************************************************************/
+void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ u32 u32tmp = 0, fw_ver;
+ u8 u8tmp = 0;
+ u8 h2c_parameter[2] = {0};
+
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], 2Ant Init HW Config!!\n");
+
+ /* backup rf 0x1e value */
+ coex_dm->bt_rf0x1e_backup = btcoexist->btc_get_rf_reg(btcoexist,
+ BTC_RF_A, 0x1e,
+ 0xfffff);
+
+ /* 0x4c[23]=0, 0x4c[24]=1 Antenna control by WL/BT */
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u32tmp &= ~BIT23;
+ u32tmp |= BIT24;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+
+ btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
+ btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
+
+ /* Antenna switch control parameter */
+ /* btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555);*/
+
+ /*Force GNT_BT to low*/
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+
+ /* 0x790[5:0]=0x5 */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+ u8tmp &= 0xc0;
+ u8tmp |= 0x5;
+ btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+
+
+ /*Antenna config */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+
+ /*ext switch for fw ver < 0xc */
+ if (fw_ver < 0xc00) {
+ if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+ 0x3, 0x1);
+ /*Main Ant to BT for IPS case 0x4c[23]=1*/
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1,
+ 0x1);
+
+ /*tell firmware "no antenna inverse"*/
+ h2c_parameter[0] = 0;
+ h2c_parameter[1] = 1; /* ext switch type */
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ } else {
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+ 0x3, 0x2);
+ /*Aux Ant to BT for IPS case 0x4c[23]=1*/
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1,
+ 0x0);
+
+ /*tell firmware "antenna inverse"*/
+ h2c_parameter[0] = 1;
+ h2c_parameter[1] = 1; /*ext switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ }
+ } else {
+ /*ext switch always at s1 (if exist) */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1);
+ /*Main Ant to BT for IPS case 0x4c[23]=1*/
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x1);
+
+ if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
+ /*tell firmware "no antenna inverse"*/
+ h2c_parameter[0] = 0;
+ h2c_parameter[1] = 0; /*ext switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ } else {
+ /*tell firmware "antenna inverse"*/
+ h2c_parameter[0] = 1;
+ h2c_parameter[1] = 0; /*ext switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ }
+ }
+
+ /* PTA parameter */
+ btc8723b_coex_tbl_type(btcoexist, FORCE_EXEC, 0);
+
+ /* Enable counter statistics */
+ /*0x76e[3] =1, WLAN_Act control by PTA*/
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+ btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
+}
+
+void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
+ btc8723b2ant_init_coex_dm(btcoexist);
+}
+
+void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ u8 *cli_buf = btcoexist->cli_buf;
+ u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
+ u32 u32tmp[4];
+ bool roam = false, scan = false;
+ bool link = false, wifi_under_5g = false;
+ bool bt_hs_on = false, wifi_busy = false;
+ s32 wifi_rssi = 0, bt_hs_rssi = 0;
+ u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck;
+ u8 wifi_dot11_chnl, wifi_hs_chnl;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cli_buf);
+
+ if (btcoexist->manual_control) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========[Under Manual Control]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+
+ if (!board_info->bt_exist) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cli_buf);
+ return;
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+ "Ant PG number/ Ant mechanism:",
+ board_info->pg_ant_num, board_info->btdm_ant_num);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
+ "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
+ &wifi_dot11_chnl);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
+ "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0],
+ coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ &wifi_traffic_dir);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
+ (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
+ ((!wifi_busy) ? "idle" :
+ ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
+ "uplink" : "downlink")));
+ CL_PRINTF(cli_buf);
+
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP",
+ bt_link_info->sco_exist, bt_link_info->hid_exist,
+ bt_link_info->pan_exist, bt_link_info->a2dp_exist);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
+ CL_PRINTF(cli_buf);
+
+ for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) {
+ if (coex_sta->bt_info_c2h_cnt[i]) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x "
+ "%02x %02x %02x %02x(%d)",
+ glbt_info_src_8723b_2ant[i],
+ coex_sta->bt_info_c2h[i][0],
+ coex_sta->bt_info_c2h[i][1],
+ coex_sta->bt_info_c2h[i][2],
+ coex_sta->bt_info_c2h[i][3],
+ coex_sta->bt_info_c2h[i][4],
+ coex_sta->bt_info_c2h[i][5],
+ coex_sta->bt_info_c2h[i][6],
+ coex_sta->bt_info_c2h_cnt[i]);
+ CL_PRINTF(cli_buf);
+ }
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s",
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ /* Sw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s", "============[Sw mechanism]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ CL_PRINTF(cli_buf);
+
+ /* Fw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Fw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ ps_tdma_case = coex_dm->cur_ps_tdma;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para[0],
+ coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
+ coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
+ ps_tdma_case, coex_dm->auto_tdma_adjust);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+ "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+ coex_dm->cur_ignore_wlan_act);
+ CL_PRINTF(cli_buf);
+
+ /* Hw setting */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Hw setting]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x778/0x880[29:25]", u8tmp[0],
+ (u32tmp[0]&0x3e000000) >> 25);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x948/ 0x67[5] / 0x765",
+ u32tmp[0], ((u8tmp[0]&0x20) >> 5), u8tmp[1]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+ u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3);
+ CL_PRINTF(cli_buf);
+
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+ ((u8tmp[0] & 0x8)>>3), u8tmp[1],
+ ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8);
+ u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
+
+ fa_ofdm = ((u32tmp[0]&0xffff0000) >> 16) +
+ ((u32tmp[1]&0xffff0000) >> 16) +
+ (u32tmp[1] & 0xffff) +
+ (u32tmp[2] & 0xffff) +
+ ((u32tmp[3]&0xffff0000) >> 16) +
+ (u32tmp[3] & 0xffff);
+ fa_cck = (u8tmp[0] << 8) + u8tmp[1];
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "OFDM-CCA/OFDM-FA/CCK-FA",
+ u32tmp[0]&0xffff, fa_ofdm, fa_cck);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x770(high-pri rx/tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+ coex_sta->low_priority_tx);
+ CL_PRINTF(cli_buf);
+#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1)
+ btc8723b2ant_monitor_bt_ctr(btcoexist);
+#endif
+ btcoexist->btc_disp_dbg_msg(btcoexist,
+ BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_IPS_ENTER == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
+ coex_sta->under_ips = true;
+ btc8723b2ant_coex_alloff(btcoexist);
+ } else if (BTC_IPS_LEAVE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
+ coex_sta->under_ips = false;
+ }
+}
+
+void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_LPS_ENABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
+ coex_sta->under_lps = true;
+ } else if (BTC_LPS_DISABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
+ coex_sta->under_lps = false;
+ }
+}
+
+void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_SCAN_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
+ else if (BTC_SCAN_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
+}
+
+void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_ASSOCIATE_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
+ else if (BTC_ASSOCIATE_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
+}
+
+void btc8723b_med_stat_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ u8 h2c_parameter[3] = {0};
+ u32 wifi_bw;
+ u8 wifi_central_chnl;
+
+ if (BTC_MEDIA_CONNECT == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
+
+ /* only 2.4G we need to inform bt the chnl mask */
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl);
+ if ((BTC_MEDIA_CONNECT == type) &&
+ (wifi_central_chnl <= 14)) {
+ h2c_parameter[0] = 0x1;
+ h2c_parameter[1] = wifi_central_chnl;
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ h2c_parameter[2] = 0x30;
+ else
+ h2c_parameter[2] = 0x20;
+ }
+
+ coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+ coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+ coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
+}
+
+void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ if (type == BTC_PACKET_DHCP)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], DHCP Packet notify\n");
+}
+
+void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length)
+{
+ u8 bt_info = 0;
+ u8 i, rsp_source = 0;
+ bool bt_busy = false, limited_dig = false;
+ bool wifi_connected = false;
+
+ coex_sta->c2h_bt_info_req_sent = false;
+
+ rsp_source = tmpbuf[0]&0xf;
+ if (rsp_source >= BT_INFO_SRC_8723B_2ANT_MAX)
+ rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
+ coex_sta->bt_info_c2h_cnt[rsp_source]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rsp_source, length);
+ for (i = 0; i < length; i++) {
+ coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
+ if (i == 1)
+ bt_info = tmpbuf[i];
+ if (i == length-1)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x]\n", tmpbuf[i]);
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x, ", tmpbuf[i]);
+ }
+
+ if (btcoexist->manual_control) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "return for Manual CTRL<===\n");
+ return;
+ }
+
+ if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rsp_source) {
+ coex_sta->bt_retry_cnt = /* [3:0]*/
+ coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
+
+ coex_sta->bt_rssi =
+ coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
+
+ coex_sta->bt_info_ext =
+ coex_sta->bt_info_c2h[rsp_source][4];
+
+ /* Here we need to resend some wifi info to BT
+ * because bt is reset and loss of the info.
+ */
+ if ((coex_sta->bt_info_ext & BIT1)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT ext info bit1 check,"
+ " send wifi BW&Chnl to BT!!\n");
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ if (wifi_connected)
+ btc8723b_med_stat_notify(btcoexist,
+ BTC_MEDIA_CONNECT);
+ else
+ btc8723b_med_stat_notify(btcoexist,
+ BTC_MEDIA_DISCONNECT);
+ }
+
+ if ((coex_sta->bt_info_ext & BIT3)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT ext info bit3 check, "
+ "set BT NOT to ignore Wlan active!!\n");
+ btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
+ false);
+ } else {
+ /* BT already NOT ignore Wlan active, do nothing here.*/
+ }
+#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
+ if ((coex_sta->bt_info_ext & BIT4)) {
+ /* BT auto report already enabled, do nothing*/
+ } else {
+ btc8723b2ant_bt_auto_report(btcoexist, FORCE_EXEC,
+ true);
+ }
+#endif
+ }
+
+ /* check BIT2 first ==> check if bt is under inquiry or page scan*/
+ if (bt_info & BT_INFO_8723B_2ANT_B_INQ_PAGE)
+ coex_sta->c2h_bt_inquiry_page = true;
+ else
+ coex_sta->c2h_bt_inquiry_page = false;
+
+ /* set link exist status*/
+ if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+ coex_sta->bt_link_exist = false;
+ coex_sta->pan_exist = false;
+ coex_sta->a2dp_exist = false;
+ coex_sta->hid_exist = false;
+ coex_sta->sco_exist = false;
+ } else { /* connection exists */
+ coex_sta->bt_link_exist = true;
+ if (bt_info & BT_INFO_8723B_2ANT_B_FTP)
+ coex_sta->pan_exist = true;
+ else
+ coex_sta->pan_exist = false;
+ if (bt_info & BT_INFO_8723B_2ANT_B_A2DP)
+ coex_sta->a2dp_exist = true;
+ else
+ coex_sta->a2dp_exist = false;
+ if (bt_info & BT_INFO_8723B_2ANT_B_HID)
+ coex_sta->hid_exist = true;
+ else
+ coex_sta->hid_exist = false;
+ if (bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO)
+ coex_sta->sco_exist = true;
+ else
+ coex_sta->sco_exist = false;
+ }
+
+ btc8723b2ant_update_bt_link_info(btcoexist);
+
+ if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "BT Non-Connected idle!!!\n");
+ /* connection exists but no busy */
+ } else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ } else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
+ (bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ } else if (bt_info & BT_INFO_8723B_2ANT_B_ACL_BUSY) {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ } else {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "BT Non-Defined state!!!\n");
+ }
+
+ if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
+ bt_busy = true;
+ limited_dig = true;
+ } else {
+ bt_busy = false;
+ limited_dig = false;
+ }
+
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+ coex_dm->limited_dig = limited_dig;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+ btc8723b2ant_run_coexist_mechanism(btcoexist);
+}
+
+void ex_halbtc8723b2ant_stack_operation_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ if (BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex],StackOP Inquiry/page/pair start notify\n");
+ else if (BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex],StackOP Inquiry/page/pair finish notify\n");
+}
+
+void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+
+ btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+ btc8723b_med_stat_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ static u8 dis_ver_info_cnt;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], =========================="
+ "Periodical===========================\n");
+
+ if (dis_ver_info_cnt <= 5) {
+ dis_ver_info_cnt += 1;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], ****************************"
+ "************************************\n");
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ "
+ "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num,
+ board_info->btdm_ant_num, board_info->btdm_ant_pos);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
+ &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = "
+ "%d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], *****************************"
+ "***********************************\n");
+ }
+
+#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
+ btc8723b2ant_query_bt_info(btcoexist);
+ btc8723b2ant_monitor_bt_ctr(btcoexist);
+ btc8723b2ant_monitor_bt_enable_disable(btcoexist);
+#else
+ if (btc8723b2ant_is_wifi_status_changed(btcoexist) ||
+ coex_dm->auto_tdma_adjust)
+ btc8723b2ant_run_coexist_mechanism(btcoexist);
+#endif
+}
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h
new file mode 100644
index 000000000000..e0ad8e545f82
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h
@@ -0,0 +1,173 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2012 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#ifndef _HAL8723B_2_ANT
+#define _HAL8723B_2_ANT
+
+/************************************************************************
+ * The following is for 8723B 2Ant BT Co-exist definition
+ ************************************************************************/
+#define BT_AUTO_REPORT_ONLY_8723B_2ANT 1
+
+#define BT_INFO_8723B_2ANT_B_FTP BIT7
+#define BT_INFO_8723B_2ANT_B_A2DP BIT6
+#define BT_INFO_8723B_2ANT_B_HID BIT5
+#define BT_INFO_8723B_2ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8723B_2ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8723B_2ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8723B_2ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8723B_2ANT_B_CONNECTION BIT0
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT 2
+
+enum BT_INFO_SRC_8723B_2ANT {
+ BT_INFO_SRC_8723B_2ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723B_2ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8723B_2ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8723B_2ANT_MAX
+};
+
+enum BT_8723B_2ANT_BT_STATUS {
+ BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723B_2ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8723B_2ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8723B_2ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8723B_2ANT_BT_STATUS_MAX
+};
+
+enum BT_8723B_2ANT_COEX_ALGO {
+ BT_8723B_2ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8723B_2ANT_COEX_ALGO_SCO = 0x1,
+ BT_8723B_2ANT_COEX_ALGO_HID = 0x2,
+ BT_8723B_2ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS = 0x4,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR = 0x5,
+ BT_8723B_2ANT_COEX_ALGO_PANHS = 0x6,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID = 0x8,
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP = 0xa,
+ BT_8723B_2ANT_COEX_ALGO_MAX = 0xb,
+};
+
+struct coex_dm_8723b_2ant {
+ /* fw mechanism */
+ bool pre_dec_bt_pwr;
+ bool cur_dec_bt_pwr;
+ u8 pre_fw_dac_swing_lvl;
+ u8 cur_fw_dac_swing_lvl;
+ bool cur_ignore_wlan_act;
+ bool pre_ignore_wlan_act;
+ u8 pre_ps_tdma;
+ u8 cur_ps_tdma;
+ u8 ps_tdma_para[5];
+ u8 tdma_adj_type;
+ bool reset_tdma_adjust;
+ bool auto_tdma_adjust;
+ bool pre_ps_tdma_on;
+ bool cur_ps_tdma_on;
+ bool pre_bt_auto_report;
+ bool cur_bt_auto_report;
+
+ /* sw mechanism */
+ bool pre_rf_rx_lpf_shrink;
+ bool cur_rf_rx_lpf_shrink;
+ u32 bt_rf0x1e_backup;
+ bool pre_low_penalty_ra;
+ bool cur_low_penalty_ra;
+ bool pre_dac_swing_on;
+ u32 pre_dac_swing_lvl;
+ bool cur_dac_swing_on;
+ u32 cur_dac_swing_lvl;
+ bool pre_adc_back_off;
+ bool cur_adc_back_off;
+ bool pre_agc_table_en;
+ bool cur_agc_table_en;
+ u32 pre_val0x6c0;
+ u32 cur_val0x6c0;
+ u32 pre_val0x6c4;
+ u32 cur_val0x6c4;
+ u32 pre_val0x6c8;
+ u32 cur_val0x6c8;
+ u8 pre_val0x6cc;
+ u8 cur_val0x6cc;
+ bool limited_dig;
+
+ /* algorithm related */
+ u8 pre_algorithm;
+ u8 cur_algorithm;
+ u8 bt_status;
+ u8 wifi_chnl_info[3];
+
+ bool need_recover_0x948;
+ u16 backup_0x948;
+};
+
+struct coex_sta_8723b_2ant {
+ bool bt_link_exist;
+ bool sco_exist;
+ bool a2dp_exist;
+ bool hid_exist;
+ bool pan_exist;
+
+ bool under_lps;
+ bool under_ips;
+ u32 high_priority_tx;
+ u32 high_priority_rx;
+ u32 low_priority_tx;
+ u32 low_priority_rx;
+ u8 bt_rssi;
+ u8 pre_bt_rssi_state;
+ u8 pre_wifi_rssi_state[4];
+ bool c2h_bt_info_req_sent;
+ u8 bt_info_c2h[BT_INFO_SRC_8723B_2ANT_MAX][10];
+ u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_2ANT_MAX];
+ bool c2h_bt_inquiry_page;
+ u8 bt_retry_cnt;
+ u8 bt_info_ext;
+};
+
+/*********************************************************************
+ * The following is interface which will notify coex module.
+ *********************************************************************/
+void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void btc8723b_med_stat_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length);
+void ex_halbtc8723b2ant_stack_operation_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
new file mode 100644
index 000000000000..b6722de64a31
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -0,0 +1,1011 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
+
+#include "halbt_precomp.h"
+
+/***********************************************
+ * Global variables
+ ***********************************************/
+
+struct btc_coexist gl_bt_coexist;
+
+u32 btc_dbg_type[BTC_MSG_MAX];
+static u8 btc_dbg_buf[100];
+
+/***************************************************
+ * Debug related function
+ ***************************************************/
+static bool halbtc_is_bt_coexist_available(struct btc_coexist *btcoexist)
+{
+ if (!btcoexist->binded || NULL == btcoexist->adapter)
+ return false;
+
+ return true;
+}
+
+static bool halbtc_is_wifi_busy(struct rtl_priv *rtlpriv)
+{
+ if (rtlpriv->link_info.busytraffic)
+ return true;
+ else
+ return false;
+}
+
+static void halbtc_dbg_init(void)
+{
+ u8 i;
+
+ for (i = 0; i < BTC_MSG_MAX; i++)
+ btc_dbg_type[i] = 0;
+
+ btc_dbg_type[BTC_MSG_INTERFACE] =
+/* INTF_INIT | */
+/* INTF_NOTIFY | */
+ 0;
+
+ btc_dbg_type[BTC_MSG_ALGORITHM] =
+/* ALGO_BT_RSSI_STATE | */
+/* ALGO_WIFI_RSSI_STATE | */
+/* ALGO_BT_MONITOR | */
+/* ALGO_TRACE | */
+/* ALGO_TRACE_FW | */
+/* ALGO_TRACE_FW_DETAIL | */
+/* ALGO_TRACE_FW_EXEC | */
+/* ALGO_TRACE_SW | */
+/* ALGO_TRACE_SW_DETAIL | */
+/* ALGO_TRACE_SW_EXEC | */
+ 0;
+}
+
+static bool halbtc_is_bt40(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool is_ht40 = true;
+ enum ht_channel_width bw = rtlphy->current_chan_bw;
+
+ if (bw == HT_CHANNEL_WIDTH_20)
+ is_ht40 = false;
+ else if (bw == HT_CHANNEL_WIDTH_20_40)
+ is_ht40 = true;
+
+ return is_ht40;
+}
+
+static bool halbtc_legacy(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ bool is_legacy = false;
+
+ if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B))
+ is_legacy = true;
+
+ return is_legacy;
+}
+
+bool halbtc_is_wifi_uplink(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+
+ if (rtlpriv->link_info.tx_busy_traffic)
+ return true;
+ else
+ return false;
+}
+
+static u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv =
+ (struct rtl_priv *)btcoexist->adapter;
+ u32 wifi_bw = BTC_WIFI_BW_HT20;
+
+ if (halbtc_is_bt40(rtlpriv)) {
+ wifi_bw = BTC_WIFI_BW_HT40;
+ } else {
+ if (halbtc_legacy(rtlpriv))
+ wifi_bw = BTC_WIFI_BW_LEGACY;
+ else
+ wifi_bw = BTC_WIFI_BW_HT20;
+ }
+ return wifi_bw;
+}
+
+static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 chnl = 1;
+
+ if (rtlphy->current_channel != 0)
+ chnl = rtlphy->current_channel;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "static halbtc_get_wifi_central_chnl:%d\n", chnl);
+ return chnl;
+}
+
+static void halbtc_leave_lps(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv;
+ struct rtl_ps_ctl *ppsc;
+ bool ap_enable = false;
+
+ rtlpriv = btcoexist->adapter;
+ ppsc = rtl_psc(rtlpriv);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+
+ if (ap_enable) {
+ pr_info("halbtc_leave_lps()<--dont leave lps under AP mode\n");
+ return;
+ }
+
+ btcoexist->bt_info.bt_ctrl_lps = true;
+ btcoexist->bt_info.bt_lps_on = false;
+}
+
+static void halbtc_enter_lps(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv;
+ struct rtl_ps_ctl *ppsc;
+ bool ap_enable = false;
+
+ rtlpriv = btcoexist->adapter;
+ ppsc = rtl_psc(rtlpriv);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+
+ if (ap_enable) {
+ pr_info("halbtc_enter_lps()<--dont enter lps under AP mode\n");
+ return;
+ }
+
+ btcoexist->bt_info.bt_ctrl_lps = true;
+ btcoexist->bt_info.bt_lps_on = false;
+}
+
+static void halbtc_normal_lps(struct btc_coexist *btcoexist)
+{
+ if (btcoexist->bt_info.bt_ctrl_lps) {
+ btcoexist->bt_info.bt_lps_on = false;
+ btcoexist->bt_info.bt_ctrl_lps = false;
+ }
+}
+
+static void halbtc_leave_low_power(void)
+{
+}
+
+static void halbtc_nomal_low_power(void)
+{
+}
+
+static void halbtc_disable_low_power(void)
+{
+}
+
+static void halbtc_aggregation_check(void)
+{
+}
+
+static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
+{
+ return 0;
+}
+
+static s32 halbtc_get_wifi_rssi(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+ s32 undec_sm_pwdb = 0;
+
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+ else /* associated entry pwdb */
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+ return undec_sm_pwdb;
+}
+
+static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ bool *bool_tmp = (bool *)out_buf;
+ int *s32_tmp = (int *)out_buf;
+ u32 *u32_tmp = (u32 *)out_buf;
+ u8 *u8_tmp = (u8 *)out_buf;
+ bool tmp = false;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return false;
+
+ switch (get_type) {
+ case BTC_GET_BL_HS_OPERATION:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_HS_CONNECTING:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_CONNECTED:
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ tmp = true;
+ *bool_tmp = tmp;
+ break;
+ case BTC_GET_BL_WIFI_BUSY:
+ if (halbtc_is_wifi_busy(rtlpriv))
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_SCAN:
+ if (mac->act_scanning)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_LINK:
+ if (mac->link_state == MAC80211_LINKING)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_ROAM: /*TODO*/
+ if (mac->link_state == MAC80211_LINKING)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_4_WAY_PROGRESS: /*TODO*/
+ *bool_tmp = false;
+
+ break;
+ case BTC_GET_BL_WIFI_UNDER_5G:
+ *bool_tmp = false; /*TODO*/
+
+ case BTC_GET_BL_WIFI_DHCP: /*TODO*/
+ break;
+ case BTC_GET_BL_WIFI_SOFTAP_IDLE:
+ *bool_tmp = true;
+ break;
+ case BTC_GET_BL_WIFI_SOFTAP_LINKING:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_IN_EARLY_SUSPEND:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
+ if (NO_ENCRYPTION == rtlpriv->sec.pairwise_enc_algorithm)
+ *bool_tmp = false;
+ else
+ *bool_tmp = true;
+ break;
+ case BTC_GET_BL_WIFI_UNDER_B_MODE:
+ *bool_tmp = false; /*TODO*/
+ break;
+ case BTC_GET_BL_EXT_SWITCH:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_S4_WIFI_RSSI:
+ *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+ break;
+ case BTC_GET_S4_HS_RSSI: /*TODO*/
+ *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+ break;
+ case BTC_GET_U4_WIFI_BW:
+ *u32_tmp = halbtc_get_wifi_bw(btcoexist);
+ break;
+ case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION:
+ if (halbtc_is_wifi_uplink(rtlpriv))
+ *u32_tmp = BTC_WIFI_TRAFFIC_TX;
+ else
+ *u32_tmp = BTC_WIFI_TRAFFIC_RX;
+ break;
+ case BTC_GET_U4_WIFI_FW_VER:
+ *u32_tmp = rtlhal->fw_version;
+ break;
+ case BTC_GET_U4_BT_PATCH_VER:
+ *u32_tmp = halbtc_get_bt_patch_version(btcoexist);
+ break;
+ case BTC_GET_U1_WIFI_DOT11_CHNL:
+ *u8_tmp = rtlphy->current_channel;
+ break;
+ case BTC_GET_U1_WIFI_CENTRAL_CHNL:
+ *u8_tmp = halbtc_get_wifi_central_chnl(btcoexist);
+ break;
+ case BTC_GET_U1_WIFI_HS_CHNL:
+ *u8_tmp = 1;/*BT_OperateChnl(rtlpriv);*/
+ break;
+ case BTC_GET_U1_MAC_PHY_MODE:
+ *u8_tmp = BTC_MP_UNKNOWN;
+ break;
+
+ /************* 1Ant **************/
+ case BTC_GET_U1_LPS_MODE:
+ *u8_tmp = btcoexist->pwr_mode_val[0];
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
+ bool *bool_tmp = (bool *)in_buf;
+ u8 *u8_tmp = (u8 *)in_buf;
+ u32 *u32_tmp = (u32 *)in_buf;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return false;
+
+ switch (set_type) {
+ /* set some bool type variables. */
+ case BTC_SET_BL_BT_DISABLE:
+ btcoexist->bt_info.bt_disabled = *bool_tmp;
+ break;
+ case BTC_SET_BL_BT_TRAFFIC_BUSY:
+ btcoexist->bt_info.bt_busy = *bool_tmp;
+ break;
+ case BTC_SET_BL_BT_LIMITED_DIG:
+ btcoexist->bt_info.limited_dig = *bool_tmp;
+ break;
+ case BTC_SET_BL_FORCE_TO_ROAM:
+ btcoexist->bt_info.force_to_roam = *bool_tmp;
+ break;
+ case BTC_SET_BL_TO_REJ_AP_AGG_PKT:
+ btcoexist->bt_info.reject_agg_pkt = *bool_tmp;
+ break;
+ case BTC_SET_BL_BT_CTRL_AGG_SIZE:
+ btcoexist->bt_info.b_bt_ctrl_buf_size = *bool_tmp;
+ break;
+ case BTC_SET_BL_INC_SCAN_DEV_NUM:
+ btcoexist->bt_info.increase_scan_dev_num = *bool_tmp;
+ break;
+ /* set some u1Byte type variables. */
+ case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON:
+ btcoexist->bt_info.rssi_adjust_for_agc_table_on = *u8_tmp;
+ break;
+ case BTC_SET_U1_AGG_BUF_SIZE:
+ btcoexist->bt_info.agg_buf_size = *u8_tmp;
+ break;
+ /* the following are some action which will be triggered */
+ case BTC_SET_ACT_GET_BT_RSSI:
+ /*BTHCI_SendGetBtRssiEvent(rtlpriv);*/
+ break;
+ case BTC_SET_ACT_AGGREGATE_CTRL:
+ halbtc_aggregation_check();
+ break;
+
+ /* 1Ant */
+ case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE:
+ btcoexist->bt_info.rssi_adjust_for_1ant_coex_type = *u8_tmp;
+ break;
+ case BTC_SET_UI_SCAN_SIG_COMPENSATION:
+ /* rtlpriv->mlmepriv.scan_compensation = *u8_tmp; */
+ break;
+ case BTC_SET_U1_1ANT_LPS:
+ btcoexist->bt_info.lps_1ant = *u8_tmp;
+ break;
+ case BTC_SET_U1_1ANT_RPWM:
+ btcoexist->bt_info.rpwm_1ant = *u8_tmp;
+ break;
+ /* the following are some action which will be triggered */
+ case BTC_SET_ACT_LEAVE_LPS:
+ halbtc_leave_lps(btcoexist);
+ break;
+ case BTC_SET_ACT_ENTER_LPS:
+ halbtc_enter_lps(btcoexist);
+ break;
+ case BTC_SET_ACT_NORMAL_LPS:
+ halbtc_normal_lps(btcoexist);
+ break;
+ case BTC_SET_ACT_DISABLE_LOW_POWER:
+ halbtc_disable_low_power();
+ break;
+ case BTC_SET_ACT_UPDATE_ra_mask:
+ btcoexist->bt_info.ra_mask = *u32_tmp;
+ break;
+ case BTC_SET_ACT_SEND_MIMO_PS:
+ break;
+ case BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT:
+ btcoexist->bt_info.force_exec_pwr_cmd_cnt++;
+ break;
+ case BTC_SET_ACT_CTRL_BT_INFO: /*wait for 8812/8821*/
+ break;
+ case BTC_SET_ACT_CTRL_BT_COEX:
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static void halbtc_display_coex_statistics(struct btc_coexist *btcoexist)
+{
+}
+
+static void halbtc_display_bt_link_info(struct btc_coexist *btcoexist)
+{
+}
+
+static void halbtc_display_bt_fw_info(struct btc_coexist *btcoexist)
+{
+}
+
+static void halbtc_display_fw_pwr_mode_cmd(struct btc_coexist *btcoexist)
+{
+}
+
+/************************************************************
+ * IO related function
+ ************************************************************/
+static u8 halbtc_read_1byte(void *bt_context, u32 reg_addr)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_read_byte(rtlpriv, reg_addr);
+}
+
+static u16 halbtc_read_2byte(void *bt_context, u32 reg_addr)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_read_word(rtlpriv, reg_addr);
+}
+
+static u32 halbtc_read_4byte(void *bt_context, u32 reg_addr)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_read_dword(rtlpriv, reg_addr);
+}
+
+static void halbtc_write_1byte(void *bt_context, u32 reg_addr, u8 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+static void halbtc_bitmask_write_1byte(void *bt_context, u32 reg_addr,
+ u32 bit_mask, u8 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 original_value, bit_shift = 0;
+ u8 i;
+
+ if (bit_mask != MASKDWORD) {/*if not "double word" write*/
+ original_value = rtl_read_byte(rtlpriv, reg_addr);
+ for (i = 0; i <= 7; i++) {
+ if ((bit_mask>>i) & 0x1)
+ break;
+ }
+ bit_shift = i;
+ data = (original_value & (~bit_mask)) |
+ ((data << bit_shift) & bit_mask);
+ }
+ rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+static void halbtc_write_2byte(void *bt_context, u32 reg_addr, u16 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_write_word(rtlpriv, reg_addr, data);
+}
+
+static void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data)
+{
+ struct btc_coexist *btcoexist =
+ (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_write_dword(rtlpriv, reg_addr, data);
+}
+
+static void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask,
+ u32 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
+}
+
+static u32 halbtc_get_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask);
+}
+
+static void halbtc_set_rfreg(void *bt_context, u8 rf_path, u32 reg_addr,
+ u32 bit_mask, u32 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_set_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask, data);
+}
+
+static u32 halbtc_get_rfreg(void *bt_context, u8 rf_path, u32 reg_addr,
+ u32 bit_mask)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_get_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask);
+}
+
+static void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id,
+ u32 cmd_len, u8 *cmd_buf)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, element_id,
+ cmd_len, cmd_buf);
+}
+
+static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ switch (disp_type) {
+ case BTC_DBG_DISP_COEX_STATISTICS:
+ halbtc_display_coex_statistics(btcoexist);
+ break;
+ case BTC_DBG_DISP_BT_LINK_INFO:
+ halbtc_display_bt_link_info(btcoexist);
+ break;
+ case BTC_DBG_DISP_BT_FW_VER:
+ halbtc_display_bt_fw_info(btcoexist);
+ break;
+ case BTC_DBG_DISP_FW_PWR_MODE_CMD:
+ halbtc_display_fw_pwr_mode_cmd(btcoexist);
+ break;
+ default:
+ break;
+ }
+}
+
+/*****************************************************************
+ * Extern functions called by other module
+ *****************************************************************/
+bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ btcoexist->statistics.cnt_bind++;
+
+ halbtc_dbg_init();
+
+ if (btcoexist->binded)
+ return false;
+ else
+ btcoexist->binded = true;
+
+#if (defined(CONFIG_PCI_HCI))
+ btcoexist->chip_interface = BTC_INTF_PCI;
+#elif (defined(CONFIG_USB_HCI))
+ btcoexist->chip_interface = BTC_INTF_USB;
+#elif (defined(CONFIG_SDIO_HCI))
+ btcoexist->chip_interface = BTC_INTF_SDIO;
+#elif (defined(CONFIG_GSPI_HCI))
+ btcoexist->chip_interface = BTC_INTF_GSPI;
+#else
+ btcoexist->chip_interface = BTC_INTF_UNKNOWN;
+#endif
+
+ if (NULL == btcoexist->adapter)
+ btcoexist->adapter = adapter;
+
+ btcoexist->stack_info.profile_notified = false;
+
+ btcoexist->btc_read_1byte = halbtc_read_1byte;
+ btcoexist->btc_write_1byte = halbtc_write_1byte;
+ btcoexist->btc_write_1byte_bitmask = halbtc_bitmask_write_1byte;
+ btcoexist->btc_read_2byte = halbtc_read_2byte;
+ btcoexist->btc_write_2byte = halbtc_write_2byte;
+ btcoexist->btc_read_4byte = halbtc_read_4byte;
+ btcoexist->btc_write_4byte = halbtc_write_4byte;
+
+ btcoexist->btc_set_bb_reg = halbtc_set_bbreg;
+ btcoexist->btc_get_bb_reg = halbtc_get_bbreg;
+
+ btcoexist->btc_set_rf_reg = halbtc_set_rfreg;
+ btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
+
+ btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
+ btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
+
+ btcoexist->btc_get = halbtc_get;
+ btcoexist->btc_set = halbtc_set;
+
+ btcoexist->cli_buf = &btc_dbg_buf[0];
+
+ btcoexist->bt_info.b_bt_ctrl_buf_size = false;
+ btcoexist->bt_info.agg_buf_size = 5;
+
+ btcoexist->bt_info.increase_scan_dev_num = false;
+ return true;
+}
+
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->statistics.cnt_init_hw_config++;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_init_hwconfig(btcoexist);
+}
+
+void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->statistics.cnt_init_coex_dm++;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_init_coex_dm(btcoexist);
+
+ btcoexist->initilized = true;
+}
+
+void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 ips_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_ips_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (ERFOFF == type)
+ ips_type = BTC_IPS_ENTER;
+ else
+ ips_type = BTC_IPS_LEAVE;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_ips_notify(btcoexist, ips_type);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 lps_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_lps_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (EACTIVE == type)
+ lps_type = BTC_LPS_DISABLE;
+ else
+ lps_type = BTC_LPS_ENABLE;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_lps_notify(btcoexist, lps_type);
+}
+
+void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 scan_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_scan_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (type)
+ scan_type = BTC_SCAN_START;
+ else
+ scan_type = BTC_SCAN_FINISH;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_scan_notify(btcoexist, scan_type);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 asso_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_connect_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (action)
+ asso_type = BTC_ASSOCIATE_START;
+ else
+ asso_type = BTC_ASSOCIATE_FINISH;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_connect_notify(btcoexist, asso_type);
+}
+
+void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
+ enum _RT_MEDIA_STATUS media_status)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 status;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_media_status_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (RT_MEDIA_CONNECT == media_status)
+ status = BTC_MEDIA_CONNECT;
+ else
+ status = BTC_MEDIA_DISCONNECT;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ btc8723b_med_stat_notify(btcoexist, status);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 packet_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_special_packet_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ packet_type = BTC_PACKET_DHCP;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_special_packet_notify(btcoexist,
+ packet_type);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmp_buf, u8 length)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_bt_info_notify++;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_bt_info_notify(btcoexist, tmp_buf, length);
+}
+
+void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 stack_op_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_stack_operation_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ stack_op_type = BTC_STACK_OP_NONE;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_stack_operation_notify(btcoexist,
+ stack_op_type);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_halt_notify(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_halt_notify(btcoexist);
+}
+
+void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+}
+
+void exhalbtc_periodical(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_periodical++;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_periodical(btcoexist);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_dbg_control(struct btc_coexist *btcoexist,
+ u8 code, u8 len, u8 *data)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_dbg_ctrl++;
+}
+
+void exhalbtc_stack_update_profile_info(void)
+{
+}
+
+void exhalbtc_update_min_bt_rssi(char bt_rssi)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->stack_info.min_bt_rssi = bt_rssi;
+}
+
+void exhalbtc_set_hci_version(u16 hci_version)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->stack_info.hci_version = hci_version;
+}
+
+void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->bt_info.bt_real_fw_ver = bt_patch_version;
+ btcoexist->bt_info.bt_hci_ver = bt_hci_version;
+}
+
+void exhalbtc_set_bt_exist(bool bt_exist)
+{
+ gl_bt_coexist.board_info.bt_exist = bt_exist;
+}
+
+void exhalbtc_set_chip_type(u8 chip_type)
+{
+ switch (chip_type) {
+ default:
+ case BT_2WIRE:
+ case BT_ISSC_3WIRE:
+ case BT_ACCEL:
+ case BT_RTL8756:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF;
+ break;
+ case BT_CSR_BC4:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
+ break;
+ case BT_CSR_BC8:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
+ break;
+ case BT_RTL8723A:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A;
+ break;
+ case BT_RTL8821A:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821;
+ break;
+ case BT_RTL8723B:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B;
+ break;
+ }
+}
+
+void exhalbtc_set_ant_num(u8 type, u8 ant_num)
+{
+ if (BT_COEX_ANT_TYPE_PG == type) {
+ gl_bt_coexist.board_info.pg_ant_num = ant_num;
+ gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ } else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
+ gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ }
+}
+
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_display_coex_info(btcoexist);
+}
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
new file mode 100644
index 000000000000..871fc3c6d559
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -0,0 +1,559 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#ifndef __HALBTC_OUT_SRC_H__
+#define __HALBTC_OUT_SRC_H__
+
+#include "../wifi.h"
+
+#define NORMAL_EXEC false
+#define FORCE_EXEC true
+
+#define BTC_RF_A RF90_PATH_A
+#define BTC_RF_B RF90_PATH_B
+#define BTC_RF_C RF90_PATH_C
+#define BTC_RF_D RF90_PATH_D
+
+#define BTC_SMSP SINGLEMAC_SINGLEPHY
+#define BTC_DMDP DUALMAC_DUALPHY
+#define BTC_DMSP DUALMAC_SINGLEPHY
+#define BTC_MP_UNKNOWN 0xff
+
+#define IN
+#define OUT
+
+#define BT_TMP_BUF_SIZE 100
+
+#define BT_COEX_ANT_TYPE_PG 0
+#define BT_COEX_ANT_TYPE_ANTDIV 1
+#define BT_COEX_ANT_TYPE_DETECTED 2
+
+#define BTC_MIMO_PS_STATIC 0
+#define BTC_MIMO_PS_DYNAMIC 1
+
+#define BTC_RATE_DISABLE 0
+#define BTC_RATE_ENABLE 1
+
+#define BTC_ANT_PATH_WIFI 0
+#define BTC_ANT_PATH_BT 1
+#define BTC_ANT_PATH_PTA 2
+
+enum btc_chip_interface {
+ BTC_INTF_UNKNOWN = 0,
+ BTC_INTF_PCI = 1,
+ BTC_INTF_USB = 2,
+ BTC_INTF_SDIO = 3,
+ BTC_INTF_GSPI = 4,
+ BTC_INTF_MAX
+};
+
+enum BTC_CHIP_TYPE {
+ BTC_CHIP_UNDEF = 0,
+ BTC_CHIP_CSR_BC4 = 1,
+ BTC_CHIP_CSR_BC8 = 2,
+ BTC_CHIP_RTL8723A = 3,
+ BTC_CHIP_RTL8821 = 4,
+ BTC_CHIP_RTL8723B = 5,
+ BTC_CHIP_MAX
+};
+
+enum BTC_MSG_TYPE {
+ BTC_MSG_INTERFACE = 0x0,
+ BTC_MSG_ALGORITHM = 0x1,
+ BTC_MSG_MAX
+};
+extern u32 btc_dbg_type[];
+
+/* following is for BTC_MSG_INTERFACE */
+#define INTF_INIT BIT0
+#define INTF_NOTIFY BIT2
+
+/* following is for BTC_ALGORITHM */
+#define ALGO_BT_RSSI_STATE BIT0
+#define ALGO_WIFI_RSSI_STATE BIT1
+#define ALGO_BT_MONITOR BIT2
+#define ALGO_TRACE BIT3
+#define ALGO_TRACE_FW BIT4
+#define ALGO_TRACE_FW_DETAIL BIT5
+#define ALGO_TRACE_FW_EXEC BIT6
+#define ALGO_TRACE_SW BIT7
+#define ALGO_TRACE_SW_DETAIL BIT8
+#define ALGO_TRACE_SW_EXEC BIT9
+
+#define BT_COEX_ANT_TYPE_PG 0
+#define BT_COEX_ANT_TYPE_ANTDIV 1
+#define BT_COEX_ANT_TYPE_DETECTED 2
+#define BTC_MIMO_PS_STATIC 0
+#define BTC_MIMO_PS_DYNAMIC 1
+#define BTC_RATE_DISABLE 0
+#define BTC_RATE_ENABLE 1
+#define BTC_ANT_PATH_WIFI 0
+#define BTC_ANT_PATH_BT 1
+#define BTC_ANT_PATH_PTA 2
+
+
+#define CL_SPRINTF snprintf
+#define CL_PRINTF printk
+
+#define BTC_PRINT(dbgtype, dbgflag, printstr, ...) \
+ do { \
+ if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
+ printk(printstr, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+#define BTC_PRINT_F(dbgtype, dbgflag, printstr, ...) \
+ do { \
+ if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
+ pr_info("%s: ", __func__); \
+ printk(printstr, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+#define BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _ptr) \
+ do { \
+ if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \
+ int __i; \
+ u8 *__ptr = (u8 *)_ptr; \
+ printk printstr; \
+ for (__i = 0; __i < 6; __i++) \
+ printk("%02X%s", __ptr[__i], (__i == 5) ? \
+ "" : "-"); \
+ pr_info("\n"); \
+ } \
+ } while (0)
+
+#define BTC_PRINT_DATA(dbgtype, dbgflag, _titlestring, _hexdata, _hexdatalen) \
+ do { \
+ if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \
+ int __i; \
+ u8 *__ptr = (u8 *)_hexdata; \
+ printk(_titlestring); \
+ for (__i = 0; __i < (int)_hexdatalen; __i++) { \
+ printk("%02X%s", __ptr[__i], (((__i + 1) % 4) \
+ == 0) ? " " : " ");\
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ pr_debug("\n"); \
+ } \
+ } while (0)
+
+#define BTC_ANT_PATH_WIFI 0
+#define BTC_ANT_PATH_BT 1
+#define BTC_ANT_PATH_PTA 2
+
+enum btc_power_save_type {
+ BTC_PS_WIFI_NATIVE = 0,
+ BTC_PS_LPS_ON = 1,
+ BTC_PS_LPS_OFF = 2,
+ BTC_PS_LPS_MAX
+};
+
+struct btc_board_info {
+ /* The following is some board information */
+ u8 bt_chip_type;
+ u8 pg_ant_num; /* pg ant number */
+ u8 btdm_ant_num; /* ant number for btdm */
+ u8 btdm_ant_pos;
+ bool bt_exist;
+};
+
+enum btc_dbg_opcode {
+ BTC_DBG_SET_COEX_NORMAL = 0x0,
+ BTC_DBG_SET_COEX_WIFI_ONLY = 0x1,
+ BTC_DBG_SET_COEX_BT_ONLY = 0x2,
+ BTC_DBG_MAX
+};
+
+enum btc_rssi_state {
+ BTC_RSSI_STATE_HIGH = 0x0,
+ BTC_RSSI_STATE_MEDIUM = 0x1,
+ BTC_RSSI_STATE_LOW = 0x2,
+ BTC_RSSI_STATE_STAY_HIGH = 0x3,
+ BTC_RSSI_STATE_STAY_MEDIUM = 0x4,
+ BTC_RSSI_STATE_STAY_LOW = 0x5,
+ BTC_RSSI_MAX
+};
+
+enum btc_wifi_role {
+ BTC_ROLE_STATION = 0x0,
+ BTC_ROLE_AP = 0x1,
+ BTC_ROLE_IBSS = 0x2,
+ BTC_ROLE_HS_MODE = 0x3,
+ BTC_ROLE_MAX
+};
+
+enum btc_wifi_bw_mode {
+ BTC_WIFI_BW_LEGACY = 0x0,
+ BTC_WIFI_BW_HT20 = 0x1,
+ BTC_WIFI_BW_HT40 = 0x2,
+ BTC_WIFI_BW_MAX
+};
+
+enum btc_wifi_traffic_dir {
+ BTC_WIFI_TRAFFIC_TX = 0x0,
+ BTC_WIFI_TRAFFIC_RX = 0x1,
+ BTC_WIFI_TRAFFIC_MAX
+};
+
+enum btc_wifi_pnp {
+ BTC_WIFI_PNP_WAKE_UP = 0x0,
+ BTC_WIFI_PNP_SLEEP = 0x1,
+ BTC_WIFI_PNP_MAX
+};
+
+
+enum btc_get_type {
+ /* type bool */
+ BTC_GET_BL_HS_OPERATION,
+ BTC_GET_BL_HS_CONNECTING,
+ BTC_GET_BL_WIFI_CONNECTED,
+ BTC_GET_BL_WIFI_BUSY,
+ BTC_GET_BL_WIFI_SCAN,
+ BTC_GET_BL_WIFI_LINK,
+ BTC_GET_BL_WIFI_DHCP,
+ BTC_GET_BL_WIFI_SOFTAP_IDLE,
+ BTC_GET_BL_WIFI_SOFTAP_LINKING,
+ BTC_GET_BL_WIFI_IN_EARLY_SUSPEND,
+ BTC_GET_BL_WIFI_ROAM,
+ BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ BTC_GET_BL_WIFI_UNDER_5G,
+ BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ BTC_GET_BL_WIFI_ENABLE_ENCRYPTION,
+ BTC_GET_BL_WIFI_UNDER_B_MODE,
+ BTC_GET_BL_EXT_SWITCH,
+
+ /* type s4Byte */
+ BTC_GET_S4_WIFI_RSSI,
+ BTC_GET_S4_HS_RSSI,
+
+ /* type u32 */
+ BTC_GET_U4_WIFI_BW,
+ BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ BTC_GET_U4_WIFI_FW_VER,
+ BTC_GET_U4_BT_PATCH_VER,
+
+ /* type u1Byte */
+ BTC_GET_U1_WIFI_DOT11_CHNL,
+ BTC_GET_U1_WIFI_CENTRAL_CHNL,
+ BTC_GET_U1_WIFI_HS_CHNL,
+ BTC_GET_U1_MAC_PHY_MODE,
+
+ /* for 1Ant */
+ BTC_GET_U1_LPS_MODE,
+ BTC_GET_BL_BT_SCO_BUSY,
+
+ /* for test mode */
+ BTC_GET_DRIVER_TEST_CFG,
+ BTC_GET_MAX
+};
+
+
+enum btc_set_type {
+ /* type bool */
+ BTC_SET_BL_BT_DISABLE,
+ BTC_SET_BL_BT_TRAFFIC_BUSY,
+ BTC_SET_BL_BT_LIMITED_DIG,
+ BTC_SET_BL_FORCE_TO_ROAM,
+ BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+ BTC_SET_BL_BT_CTRL_AGG_SIZE,
+ BTC_SET_BL_INC_SCAN_DEV_NUM,
+
+ /* type u1Byte */
+ BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+ BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE,
+ BTC_SET_UI_SCAN_SIG_COMPENSATION,
+ BTC_SET_U1_AGG_BUF_SIZE,
+
+ /* type trigger some action */
+ BTC_SET_ACT_GET_BT_RSSI,
+ BTC_SET_ACT_AGGREGATE_CTRL,
+
+ /********* for 1Ant **********/
+ /* type bool */
+ BTC_SET_BL_BT_SCO_BUSY,
+ /* type u1Byte */
+ BTC_SET_U1_1ANT_LPS,
+ BTC_SET_U1_1ANT_RPWM,
+ /* type trigger some action */
+ BTC_SET_ACT_LEAVE_LPS,
+ BTC_SET_ACT_ENTER_LPS,
+ BTC_SET_ACT_NORMAL_LPS,
+ BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ BTC_SET_ACT_UPDATE_ra_mask,
+ BTC_SET_ACT_SEND_MIMO_PS,
+ /* BT Coex related */
+ BTC_SET_ACT_CTRL_BT_INFO,
+ BTC_SET_ACT_CTRL_BT_COEX,
+ /***************************/
+ BTC_SET_MAX
+};
+
+enum btc_dbg_disp_type {
+ BTC_DBG_DISP_COEX_STATISTICS = 0x0,
+ BTC_DBG_DISP_BT_LINK_INFO = 0x1,
+ BTC_DBG_DISP_BT_FW_VER = 0x2,
+ BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3,
+ BTC_DBG_DISP_MAX
+};
+
+enum btc_notify_type_ips {
+ BTC_IPS_LEAVE = 0x0,
+ BTC_IPS_ENTER = 0x1,
+ BTC_IPS_MAX
+};
+
+enum btc_notify_type_lps {
+ BTC_LPS_DISABLE = 0x0,
+ BTC_LPS_ENABLE = 0x1,
+ BTC_LPS_MAX
+};
+
+enum btc_notify_type_scan {
+ BTC_SCAN_FINISH = 0x0,
+ BTC_SCAN_START = 0x1,
+ BTC_SCAN_MAX
+};
+
+enum btc_notify_type_associate {
+ BTC_ASSOCIATE_FINISH = 0x0,
+ BTC_ASSOCIATE_START = 0x1,
+ BTC_ASSOCIATE_MAX
+};
+
+enum btc_notify_type_media_status {
+ BTC_MEDIA_DISCONNECT = 0x0,
+ BTC_MEDIA_CONNECT = 0x1,
+ BTC_MEDIA_MAX
+};
+
+enum btc_notify_type_special_packet {
+ BTC_PACKET_UNKNOWN = 0x0,
+ BTC_PACKET_DHCP = 0x1,
+ BTC_PACKET_ARP = 0x2,
+ BTC_PACKET_EAPOL = 0x3,
+ BTC_PACKET_MAX
+};
+
+enum btc_notify_type_stack_operation {
+ BTC_STACK_OP_NONE = 0x0,
+ BTC_STACK_OP_INQ_PAGE_PAIR_START = 0x1,
+ BTC_STACK_OP_INQ_PAGE_PAIR_FINISH = 0x2,
+ BTC_STACK_OP_MAX
+};
+
+
+typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr);
+
+typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr);
+
+typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr);
+
+typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u8 data);
+
+typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr,
+ u32 bit_mask, u8 data1b);
+
+typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
+
+typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
+
+typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
+ u8 bit_mask, u8 data);
+
+typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr,
+ u32 bit_mask, u32 data);
+
+typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr,
+ u32 bit_mask);
+
+typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
+ u32 bit_mask, u32 data);
+
+typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path,
+ u32 reg_addr, u32 bit_mask);
+
+typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id,
+ u32 cmd_len, u8 *cmd_buffer);
+
+typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
+
+typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
+
+typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
+
+struct btc_bt_info {
+ bool bt_disabled;
+ u8 rssi_adjust_for_agc_table_on;
+ u8 rssi_adjust_for_1ant_coex_type;
+ bool bt_busy;
+ u8 agg_buf_size;
+ bool limited_dig;
+ bool reject_agg_pkt;
+ bool b_bt_ctrl_buf_size;
+ bool increase_scan_dev_num;
+ u16 bt_hci_ver;
+ u16 bt_real_fw_ver;
+ u8 bt_fw_ver;
+
+ /* the following is for 1Ant solution */
+ bool bt_ctrl_lps;
+ bool bt_pwr_save_mode;
+ bool bt_lps_on;
+ bool force_to_roam;
+ u8 force_exec_pwr_cmd_cnt;
+ u8 lps_1ant;
+ u8 rpwm_1ant;
+ u32 ra_mask;
+};
+
+struct btc_stack_info {
+ bool profile_notified;
+ u16 hci_version; /* stack hci version */
+ u8 num_of_link;
+ bool bt_link_exist;
+ bool sco_exist;
+ bool acl_exist;
+ bool a2dp_exist;
+ bool hid_exist;
+ u8 num_of_hid;
+ bool pan_exist;
+ bool unknown_acl_exist;
+ char min_bt_rssi;
+};
+
+struct btc_statistics {
+ u32 cnt_bind;
+ u32 cnt_init_hw_config;
+ u32 cnt_init_coex_dm;
+ u32 cnt_ips_notify;
+ u32 cnt_lps_notify;
+ u32 cnt_scan_notify;
+ u32 cnt_connect_notify;
+ u32 cnt_media_status_notify;
+ u32 cnt_special_packet_notify;
+ u32 cnt_bt_info_notify;
+ u32 cnt_periodical;
+ u32 cnt_stack_operation_notify;
+ u32 cnt_dbg_ctrl;
+};
+
+struct btc_bt_link_info {
+ bool bt_link_exist;
+ bool sco_exist;
+ bool sco_only;
+ bool a2dp_exist;
+ bool a2dp_only;
+ bool hid_exist;
+ bool hid_only;
+ bool pan_exist;
+ bool pan_only;
+};
+
+enum btc_antenna_pos {
+ BTC_ANTENNA_AT_MAIN_PORT = 0x1,
+ BTC_ANTENNA_AT_AUX_PORT = 0x2,
+};
+
+struct btc_coexist {
+ /* make sure only one adapter can bind the data context */
+ bool binded;
+ /* default adapter */
+ void *adapter;
+ struct btc_board_info board_info;
+ /* some bt info referenced by non-bt module */
+ struct btc_bt_info bt_info;
+ struct btc_stack_info stack_info;
+ enum btc_chip_interface chip_interface;
+ struct btc_bt_link_info bt_link_info;
+
+ bool initilized;
+ bool stop_coex_dm;
+ bool manual_control;
+ u8 *cli_buf;
+ struct btc_statistics statistics;
+ u8 pwr_mode_val[10];
+
+ /* function pointers - io related */
+ bfp_btc_r1 btc_read_1byte;
+ bfp_btc_w1 btc_write_1byte;
+ bfp_btc_w1_bit_mak btc_write_1byte_bitmask;
+ bfp_btc_r2 btc_read_2byte;
+ bfp_btc_w2 btc_write_2byte;
+ bfp_btc_r4 btc_read_4byte;
+ bfp_btc_w4 btc_write_4byte;
+
+ bfp_btc_set_bb_reg btc_set_bb_reg;
+ bfp_btc_get_bb_reg btc_get_bb_reg;
+
+
+ bfp_btc_set_rf_reg btc_set_rf_reg;
+ bfp_btc_get_rf_reg btc_get_rf_reg;
+
+ bfp_btc_fill_h2c btc_fill_h2c;
+
+ bfp_btc_disp_dbg_msg btc_disp_dbg_msg;
+
+ bfp_btc_get btc_get;
+ bfp_btc_set btc_set;
+};
+
+bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
+
+extern struct btc_coexist gl_bt_coexist;
+
+bool exhalbtc_initlize_variables(struct rtl_priv *adapter);
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist);
+void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist);
+void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action);
+void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
+ enum _RT_MEDIA_STATUS media_status);
+void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type);
+void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
+ u8 length);
+void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_halt_notify(struct btc_coexist *btcoexist);
+void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
+void exhalbtc_periodical(struct btc_coexist *btcoexist);
+void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len,
+ u8 *data);
+void exhalbtc_stack_update_profile_info(void);
+void exhalbtc_set_hci_version(u16 hci_version);
+void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
+void exhalbtc_update_min_bt_rssi(char bt_rssi);
+void exhalbtc_set_bt_exist(bool bt_exist);
+void exhalbtc_set_chip_type(u8 chip_type);
+void exhalbtc_set_ant_num(u8 type, u8 ant_num);
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
+void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
+ u8 *rssi_wifi, u8 *rssi_bt);
+void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
+void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
new file mode 100644
index 000000000000..0ab94fe4cbbe
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
@@ -0,0 +1,218 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2013 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "rtl_btc.h"
+#include "halbt_precomp.h"
+
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+static struct rtl_btc_ops rtl_btc_operation = {
+ .btc_init_variables = rtl_btc_init_variables,
+ .btc_init_hal_vars = rtl_btc_init_hal_vars,
+ .btc_init_hw_config = rtl_btc_init_hw_config,
+ .btc_ips_notify = rtl_btc_ips_notify,
+ .btc_scan_notify = rtl_btc_scan_notify,
+ .btc_connect_notify = rtl_btc_connect_notify,
+ .btc_mediastatus_notify = rtl_btc_mediastatus_notify,
+ .btc_periodical = rtl_btc_periodical,
+ .btc_halt_notify = rtl_btc_halt_notify,
+ .btc_btinfo_notify = rtl_btc_btinfo_notify,
+ .btc_is_limited_dig = rtl_btc_is_limited_dig,
+ .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo,
+ .btc_is_bt_disabled = rtl_btc_is_bt_disabled,
+};
+
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
+{
+ exhalbtc_initlize_variables(rtlpriv);
+}
+
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
+{
+ u8 ant_num;
+ u8 bt_exist;
+ u8 bt_type;
+
+ ant_num = rtl_get_hwpg_ant_num(rtlpriv);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ "%s, antNum is %d\n", __func__, ant_num);
+
+ bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ "%s, bt_exist is %d\n", __func__, bt_exist);
+ exhalbtc_set_bt_exist(bt_exist);
+
+ bt_type = rtl_get_hwpg_bt_type(rtlpriv);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s, bt_type is %d\n",
+ __func__, bt_type);
+ exhalbtc_set_chip_type(bt_type);
+
+ exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num);
+}
+
+void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
+{
+ exhalbtc_init_hw_config(&gl_bt_coexist);
+ exhalbtc_init_coex_dm(&gl_bt_coexist);
+}
+
+void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type)
+{
+ exhalbtc_ips_notify(&gl_bt_coexist, type);
+}
+
+void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype)
+{
+ exhalbtc_scan_notify(&gl_bt_coexist, scantype);
+}
+
+void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action)
+{
+ exhalbtc_connect_notify(&gl_bt_coexist, action);
+}
+
+void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
+ enum _RT_MEDIA_STATUS mstatus)
+{
+ exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus);
+}
+
+void rtl_btc_periodical(struct rtl_priv *rtlpriv)
+{
+ exhalbtc_periodical(&gl_bt_coexist);
+}
+
+void rtl_btc_halt_notify(void)
+{
+ exhalbtc_halt_notify(&gl_bt_coexist);
+}
+
+void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
+{
+ exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length);
+}
+
+bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv)
+{
+ return gl_bt_coexist.bt_info.limited_dig;
+}
+
+bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
+{
+ bool bt_change_edca = false;
+ u32 cur_edca_val;
+ u32 edca_bt_hs_uplink = 0x5ea42b, edca_bt_hs_downlink = 0x5ea42b;
+ u32 edca_hs;
+ u32 edca_addr = 0x504;
+
+ cur_edca_val = rtl_read_dword(rtlpriv, edca_addr);
+ if (halbtc_is_wifi_uplink(rtlpriv)) {
+ if (cur_edca_val != edca_bt_hs_uplink) {
+ edca_hs = edca_bt_hs_uplink;
+ bt_change_edca = true;
+ }
+ } else {
+ if (cur_edca_val != edca_bt_hs_downlink) {
+ edca_hs = edca_bt_hs_downlink;
+ bt_change_edca = true;
+ }
+ }
+
+ if (bt_change_edca)
+ rtl_write_dword(rtlpriv, edca_addr, edca_hs);
+
+ return true;
+}
+
+bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
+{
+ if (gl_bt_coexist.bt_info.bt_disabled)
+ return true;
+ else
+ return false;
+}
+
+struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
+{
+ return &rtl_btc_operation;
+}
+EXPORT_SYMBOL(rtl_btc_get_ops_pointer);
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+{
+ u8 num;
+
+ if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
+ num = 2;
+ else
+ num = 1;
+
+ return num;
+}
+
+enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum _RT_MEDIA_STATUS m_status = RT_MEDIA_DISCONNECT;
+
+ u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+ if (bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ m_status = RT_MEDIA_CONNECT;
+
+ return m_status;
+}
+
+u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv)
+{
+ return rtlpriv->btcoexist.btc_info.btcoexist;
+}
+
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+{
+ return rtlpriv->btcoexist.btc_info.bt_type;
+}
+
+MODULE_AUTHOR("Page He <page_he@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
+
+static int __init rtl_btcoexist_module_init(void)
+{
+ return 0;
+}
+
+static void __exit rtl_btcoexist_module_exit(void)
+{
+ return;
+}
+
+module_init(rtl_btcoexist_module_init);
+module_exit(rtl_btcoexist_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h
new file mode 100644
index 000000000000..805b22cc8fc8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h
@@ -0,0 +1,52 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_BTC_H__
+#define __RTL_BTC_H__
+
+#include "halbt_precomp.h"
+
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv);
+void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type);
+void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype);
+void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action);
+void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
+ enum _RT_MEDIA_STATUS mstatus);
+void rtl_btc_periodical(struct rtl_priv *rtlpriv);
+void rtl_btc_halt_notify(void);
+void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length);
+bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
+
+struct rtl_btc_ops *rtl_btc_get_ops_pointer(void);
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv);
+enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 2d337a0c3df0..4ec424f26672 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -36,6 +36,66 @@
#include <linux/export.h>
+void rtl_addr_delay(u32 addr)
+{
+ if (addr == 0xfe)
+ mdelay(50);
+ else if (addr == 0xfd)
+ mdelay(5);
+ else if (addr == 0xfc)
+ mdelay(1);
+ else if (addr == 0xfb)
+ udelay(50);
+ else if (addr == 0xfa)
+ udelay(5);
+ else if (addr == 0xf9)
+ udelay(1);
+}
+EXPORT_SYMBOL(rtl_addr_delay);
+
+void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
+ u32 mask, u32 data)
+{
+ if (addr == 0xfe) {
+ mdelay(50);
+ } else if (addr == 0xfd) {
+ mdelay(5);
+ } else if (addr == 0xfc) {
+ mdelay(1);
+ } else if (addr == 0xfb) {
+ udelay(50);
+ } else if (addr == 0xfa) {
+ udelay(5);
+ } else if (addr == 0xf9) {
+ udelay(1);
+ } else {
+ rtl_set_rfreg(hw, rfpath, addr, mask, data);
+ udelay(1);
+ }
+}
+EXPORT_SYMBOL(rtl_rfreg_delay);
+
+void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
+{
+ if (addr == 0xfe) {
+ mdelay(50);
+ } else if (addr == 0xfd) {
+ mdelay(5);
+ } else if (addr == 0xfc) {
+ mdelay(1);
+ } else if (addr == 0xfb) {
+ udelay(50);
+ } else if (addr == 0xfa) {
+ udelay(5);
+ } else if (addr == 0xf9) {
+ udelay(1);
+ } else {
+ rtl_set_bbreg(hw, addr, MASKDWORD, data);
+ udelay(1);
+ }
+}
+EXPORT_SYMBOL(rtl_bb_delay);
+
void rtl_fw_cb(const struct firmware *firmware, void *context)
{
struct ieee80211_hw *hw = context;
@@ -475,20 +535,40 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u32 rx_conf;
*new_flags &= RTL_SUPPORTED_FILTERS;
if (!changed_flags)
return;
+ /* if ssid not set to hw don't check bssid
+ * here just used for linked scanning, & linked
+ * and nolink check bssid is set in set network_type */
+ if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
+ (mac->link_state >= MAC80211_LINKED)) {
+ if (mac->opmode != NL80211_IFTYPE_AP &&
+ mac->opmode != NL80211_IFTYPE_MESH_POINT) {
+ if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
+ rtlpriv->cfg->ops->set_chk_bssid(hw, false);
+ } else {
+ rtlpriv->cfg->ops->set_chk_bssid(hw, true);
+ }
+ }
+ }
+
+ /* must be called after set_chk_bssid since that function modifies the
+ * RCR register too. */
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&rx_conf));
+
/*TODO: we disable broadcase now, so enable here */
if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI) {
- mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
+ rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Enable receive multicast frame\n");
} else {
- mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
+ rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB]);
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive multicast frame\n");
@@ -497,39 +577,25 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_FCSFAIL) {
if (*new_flags & FIF_FCSFAIL) {
- mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+ rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Enable receive FCS error frame\n");
} else {
- mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+ rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive FCS error frame\n");
}
}
- /* if ssid not set to hw don't check bssid
- * here just used for linked scanning, & linked
- * and nolink check bssid is set in set network_type */
- if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
- (mac->link_state >= MAC80211_LINKED)) {
- if (mac->opmode != NL80211_IFTYPE_AP &&
- mac->opmode != NL80211_IFTYPE_MESH_POINT) {
- if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
- rtlpriv->cfg->ops->set_chk_bssid(hw, false);
- } else {
- rtlpriv->cfg->ops->set_chk_bssid(hw, true);
- }
- }
- }
if (changed_flags & FIF_CONTROL) {
if (*new_flags & FIF_CONTROL) {
- mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
+ rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Enable receive control frame\n");
} else {
- mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
+ rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive control frame\n");
}
@@ -537,15 +603,17 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_OTHER_BSS) {
if (*new_flags & FIF_OTHER_BSS) {
- mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
+ rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Enable receive other BSS's frame\n");
} else {
- mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
+ rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive other BSS's frame\n");
}
}
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&rx_conf));
}
static int rtl_op_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -738,6 +806,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->linked_set_reg(hw);
rcu_read_lock();
sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
+ if (!sta) {
+ pr_err("ieee80211_find_sta returned NULL\n");
+ rcu_read_unlock();
+ goto out;
+ }
if (vif->type == NL80211_IFTYPE_STATION && sta)
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
@@ -892,7 +965,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->basic_rates = basic_rates;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
- (u8 *) (&basic_rates));
+ (u8 *)(&basic_rates));
}
rcu_read_unlock();
}
@@ -906,6 +979,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (bss_conf->assoc) {
if (ppsc->fwctrl_lps) {
u8 mstatus = RT_MEDIA_CONNECT;
+ u8 keep_alive = 10;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_KEEP_ALIVE,
+ &keep_alive);
+
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_JOINBSSRPT,
&mstatus);
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 2fe46a1b4f1f..027e75374dcc 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -41,5 +41,9 @@
extern const struct ieee80211_ops rtl_ops;
void rtl_fw_cb(const struct firmware *firmware, void *context);
+void rtl_addr_delay(u32 addr);
+void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
+ u32 mask, u32 data);
+void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index d7aa165fe677..dae55257f0e8 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -811,19 +811,19 @@ done:
if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
return;
tmp_one = 1;
- rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
HW_DESC_RXBUFF_ADDR,
(u8 *)&bufferaddress);
- rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
HW_DESC_RXPKT_LEN,
(u8 *)&rtlpci->rxbuffersize);
if (index == rtlpci->rxringcount - 1)
- rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
HW_DESC_RXERO,
&tmp_one);
- rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false, HW_DESC_RXOWN,
&tmp_one);
index = (index + 1) % rtlpci->rxringcount;
@@ -983,6 +983,8 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
struct sk_buff *pskb = NULL;
struct rtl_tx_desc *pdesc = NULL;
struct rtl_tcb_desc tcb_desc;
+ /*This is for new trx flow*/
+ struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
u8 temp_one = 1;
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
@@ -1004,11 +1006,12 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
info = IEEE80211_SKB_CB(pskb);
pdesc = &ring->desc[0];
rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
- info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
+ (u8 *)pbuffer_desc, info, NULL, pskb,
+ BEACON_QUEUE, &tcb_desc);
__skb_queue_tail(&ring->queue, pskb);
- rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
&temp_one);
return;
@@ -1066,7 +1069,7 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
mac->current_ampdu_factor = 3;
/*QOS*/
- rtlpci->acm_method = eAcmWay2_SW;
+ rtlpci->acm_method = EACMWAY2_SW;
/*task */
tasklet_init(&rtlpriv->works.irq_tasklet,
@@ -1113,7 +1116,7 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
((i + 1) % entries) *
sizeof(*ring);
- rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)&(ring[i]),
true, HW_DESC_TX_NEXTDESC_ADDR,
(u8 *)&nextdescaddress);
}
@@ -1188,19 +1191,19 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
dev_kfree_skb_any(skb);
return 1;
}
- rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RXBUFF_ADDR,
(u8 *)&bufferaddress);
- rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RXPKT_LEN,
(u8 *)&rtlpci->
rxbuffersize);
- rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RXOWN,
&tmp_one);
}
- rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RXERO, &tmp_one);
}
return 0;
@@ -1331,7 +1334,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
- rtlpriv->cfg->ops->set_desc((u8 *) entry,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry,
false,
HW_DESC_RXOWN,
&tmp_one);
@@ -1424,6 +1427,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct rtl8192_tx_ring *ring;
struct rtl_tx_desc *pdesc;
+ struct rtl_tx_buffer_desc *ptx_bd_desc = NULL;
u8 idx;
u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
unsigned long flags;
@@ -1464,17 +1468,22 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
idx = 0;
pdesc = &ring->desc[idx];
- own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
- true, HW_DESC_OWN);
+ if (rtlpriv->use_new_trx_flow) {
+ ptx_bd_desc = &ring->buffer_desc[idx];
+ } else {
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc,
+ true, HW_DESC_OWN);
- if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
- hw_queue, ring->idx, idx,
- skb_queue_len(&ring->queue));
+ if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue));
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
- return skb->len;
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
+ flags);
+ return skb->len;
+ }
}
if (ieee80211_is_data_qos(fc)) {
@@ -1494,17 +1503,20 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
- info, sta, skb, hw_queue, ptcb_desc);
+ (u8 *)ptx_bd_desc, info, sta, skb, hw_queue, ptcb_desc);
__skb_queue_tail(&ring->queue, skb);
- rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
- HW_DESC_OWN, &temp_one);
-
+ if (rtlpriv->use_new_trx_flow) {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
+ HW_DESC_OWN, &hw_queue);
+ } else {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
+ HW_DESC_OWN, &temp_one);
+ }
if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
hw_queue != BEACON_QUEUE) {
-
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
"less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
hw_queue, ring->idx, idx,
@@ -1841,6 +1853,65 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
return true;
}
+static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ int ret;
+
+ ret = pci_enable_msi(rtlpci->pdev);
+ if (ret < 0)
+ return ret;
+
+ ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, hw);
+ if (ret < 0) {
+ pci_disable_msi(rtlpci->pdev);
+ return ret;
+ }
+
+ rtlpci->using_msi = true;
+
+ RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG,
+ "MSI Interrupt Mode!\n");
+ return 0;
+}
+
+static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ int ret;
+
+ ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, hw);
+ if (ret < 0)
+ return ret;
+
+ rtlpci->using_msi = false;
+ RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG,
+ "Pin-based Interrupt Mode!\n");
+ return 0;
+}
+
+static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ int ret;
+
+ if (rtlpci->msi_support) {
+ ret = rtl_pci_intr_mode_msi(hw);
+ if (ret < 0)
+ ret = rtl_pci_intr_mode_legacy(hw);
+ } else {
+ ret = rtl_pci_intr_mode_legacy(hw);
+ }
+ return ret;
+}
+
int rtl_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -1983,8 +2054,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
}
rtlpci = rtl_pcidev(pcipriv);
- err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
- IRQF_SHARED, KBUILD_MODNAME, hw);
+ err = rtl_pci_intr_mode_decide(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"%s: failed to register IRQ handler\n",
@@ -2052,6 +2122,9 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
rtlpci->irq_alloc = 0;
}
+ if (rtlpci->using_msi)
+ pci_disable_msi(rtlpci->pdev);
+
list_del(&rtlpriv->list);
if (rtlpriv->io.pci_mem_start != 0) {
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index d3262ec45d23..90174a814a6d 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -137,12 +137,22 @@ struct rtl_tx_cmd_desc {
u32 dword[16];
} __packed;
+/* In new TRX flow, Buffer_desc is new concept
+ * But TX wifi info == TX descriptor in old flow
+ * RX wifi info == RX descriptor in old flow
+ */
+struct rtl_tx_buffer_desc {
+ u32 dword[8]; /*seg = 4*/
+} __packed;
+
struct rtl8192_tx_ring {
struct rtl_tx_desc *desc;
dma_addr_t dma;
unsigned int idx;
unsigned int entries;
struct sk_buff_head queue;
+ /*add for new trx flow*/
+ struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
};
struct rtl8192_rx_ring {
@@ -199,6 +209,10 @@ struct rtl_pci {
u16 shortretry_limit;
u16 longretry_limit;
+
+ /* MSI support */
+ bool msi_support;
+ bool using_msi;
};
struct mp_adapter {
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index d1c0191a195b..50504942ded1 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -32,6 +32,106 @@
#include "base.h"
#include "ps.h"
+/* Description:
+ * This routine deals with the Power Configuration CMD
+ * parsing for RTL8723/RTL8188E Series IC.
+ * Assumption:
+ * We should follow specific format that was released from HW SD.
+ */
+bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+ u8 faversion, u8 interface_type,
+ struct wlan_pwr_cfg pwrcfgcmd[])
+{
+ struct wlan_pwr_cfg cfg_cmd = {0};
+ bool polling_bit = false;
+ u32 ary_idx = 0;
+ u8 value = 0;
+ u32 offset = 0;
+ u32 polling_count = 0;
+ u32 max_polling_cnt = 5000;
+
+ do {
+ cfg_cmd = pwrcfgcmd[ary_idx];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x),"
+ "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
+ GET_PWR_CFG_OFFSET(cfg_cmd),
+ GET_PWR_CFG_CUT_MASK(cfg_cmd),
+ GET_PWR_CFG_FAB_MASK(cfg_cmd),
+ GET_PWR_CFG_INTF_MASK(cfg_cmd),
+ GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
+ GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
+
+ if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) &&
+ (GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) &&
+ (GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) {
+ switch (GET_PWR_CFG_CMD(cfg_cmd)) {
+ case PWR_CMD_READ:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n");
+ break;
+ case PWR_CMD_WRITE:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n");
+ offset = GET_PWR_CFG_OFFSET(cfg_cmd);
+
+ /*Read the value from system register*/
+ value = rtl_read_byte(rtlpriv, offset);
+ value &= (~(GET_PWR_CFG_MASK(cfg_cmd)));
+ value |= (GET_PWR_CFG_VALUE(cfg_cmd) &
+ GET_PWR_CFG_MASK(cfg_cmd));
+
+ /*Write the value back to sytem register*/
+ rtl_write_byte(rtlpriv, offset, value);
+ break;
+ case PWR_CMD_POLLING:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n");
+ polling_bit = false;
+ offset = GET_PWR_CFG_OFFSET(cfg_cmd);
+
+ do {
+ value = rtl_read_byte(rtlpriv, offset);
+
+ value &= GET_PWR_CFG_MASK(cfg_cmd);
+ if (value ==
+ (GET_PWR_CFG_VALUE(cfg_cmd)
+ & GET_PWR_CFG_MASK(cfg_cmd)))
+ polling_bit = true;
+ else
+ udelay(10);
+
+ if (polling_count++ > max_polling_cnt)
+ return false;
+ } while (!polling_bit);
+ break;
+ case PWR_CMD_DELAY:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n");
+ if (GET_PWR_CFG_VALUE(cfg_cmd) ==
+ PWRSEQ_DELAY_US)
+ udelay(GET_PWR_CFG_OFFSET(cfg_cmd));
+ else
+ mdelay(GET_PWR_CFG_OFFSET(cfg_cmd));
+ break;
+ case PWR_CMD_END:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
+ return true;
+ default:
+ RT_ASSERT(false,
+ "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
+ break;
+ }
+
+ }
+ ary_idx++;
+ } while (1);
+
+ return true;
+}
+EXPORT_SYMBOL(rtl_hal_pwrseqcmdparsing);
+
bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -659,7 +759,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
unsigned int len)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_mgmt *mgmt = (void *)data;
+ struct ieee80211_mgmt *mgmt = data;
struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
u8 *pos, *end, *ie;
u16 noa_len;
@@ -758,7 +858,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
unsigned int len)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_mgmt *mgmt = (void *)data;
+ struct ieee80211_mgmt *mgmt = data;
struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
u8 noa_num, index, i, noa_index = 0;
u8 *pos, *end, *ie;
@@ -850,9 +950,8 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
p2pinfo->p2p_ps_state = p2p_ps_state;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- (u8 *)(&p2p_ps_state));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ &p2p_ps_state);
p2pinfo->noa_index = 0;
p2pinfo->ctwindow = 0;
@@ -864,7 +963,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
rtlps->smart_ps = 2;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&rtlps->pwr_mode));
+ &rtlps->pwr_mode);
}
}
break;
@@ -877,12 +976,12 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
rtlps->smart_ps = 0;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&rtlps->pwr_mode));
+ &rtlps->pwr_mode);
}
}
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- (u8 *)(&p2p_ps_state));
+ &p2p_ps_state);
}
break;
case P2P_PS_SCAN:
@@ -892,7 +991,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
p2pinfo->p2p_ps_state = p2p_ps_state;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- (u8 *)(&p2p_ps_state));
+ &p2p_ps_state);
}
break;
default:
@@ -912,7 +1011,7 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct ieee80211_hdr *hdr = (void *)data;
+ struct ieee80211_hdr *hdr = data;
if (!mac->p2p)
return;
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index 88bd76ea88f7..3bd41f958974 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -32,6 +32,66 @@
#define MAX_SW_LPS_SLEEP_INTV 5
+/*---------------------------------------------
+ * 3 The value of cmd: 4 bits
+ *---------------------------------------------
+ */
+#define PWR_CMD_READ 0x00
+#define PWR_CMD_WRITE 0x01
+#define PWR_CMD_POLLING 0x02
+#define PWR_CMD_DELAY 0x03
+#define PWR_CMD_END 0x04
+
+/* define the base address of each block */
+#define PWR_BASEADDR_MAC 0x00
+#define PWR_BASEADDR_USB 0x01
+#define PWR_BASEADDR_PCIE 0x02
+#define PWR_BASEADDR_SDIO 0x03
+
+#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+#define PWR_CUT_TESTCHIP_MSK BIT(0)
+#define PWR_CUT_A_MSK BIT(1)
+#define PWR_CUT_B_MSK BIT(2)
+#define PWR_CUT_C_MSK BIT(3)
+#define PWR_CUT_D_MSK BIT(4)
+#define PWR_CUT_E_MSK BIT(5)
+#define PWR_CUT_F_MSK BIT(6)
+#define PWR_CUT_G_MSK BIT(7)
+#define PWR_CUT_ALL_MSK 0xFF
+#define PWR_INTF_SDIO_MSK BIT(0)
+#define PWR_INTF_USB_MSK BIT(1)
+#define PWR_INTF_PCI_MSK BIT(2)
+#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+enum pwrseq_delay_unit {
+ PWRSEQ_DELAY_US,
+ PWRSEQ_DELAY_MS,
+};
+
+struct wlan_pwr_cfg {
+ u16 offset;
+ u8 cut_msk;
+ u8 fab_msk:4;
+ u8 interface_msk:4;
+ u8 base:4;
+ u8 cmd:4;
+ u8 msk;
+ u8 value;
+};
+
+#define GET_PWR_CFG_OFFSET(__PWR_CMD) (__PWR_CMD.offset)
+#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) (__PWR_CMD.cut_msk)
+#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) (__PWR_CMD.fab_msk)
+#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) (__PWR_CMD.interface_msk)
+#define GET_PWR_CFG_BASE(__PWR_CMD) (__PWR_CMD.base)
+#define GET_PWR_CFG_CMD(__PWR_CMD) (__PWR_CMD.cmd)
+#define GET_PWR_CFG_MASK(__PWR_CMD) (__PWR_CMD.msk)
+#define GET_PWR_CFG_VALUE(__PWR_CMD) (__PWR_CMD.value)
+
+bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+ u8 fab_version, u8 interface_type,
+ struct wlan_pwr_cfg pwrcfgcmd[]);
+
bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
enum rf_pwrstate state_toset, u32 changesource);
bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index a98acefb8c06..ee28a1a3d010 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -260,8 +260,7 @@ static void rtl_rate_free_sta(void *rtlpriv,
kfree(rate_priv);
}
-static struct rate_control_ops rtl_rate_ops = {
- .module = NULL,
+static const struct rate_control_ops rtl_rate_ops = {
.name = "rtl_rc",
.alloc = rtl_rate_alloc,
.free = rtl_rate_free,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
index 5b194e97f4b3..a85419a37651 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
@@ -5,7 +5,6 @@ rtl8188ee-objs := \
led.o \
phy.o \
pwrseq.o \
- pwrseqcmd.o \
rf.o \
sw.o \
table.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
index a6184b6e1d57..f8daa61cf1c3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
@@ -235,7 +235,7 @@ void rtl88e_dm_txpower_track_adjust(struct ieee80211_hw *hw,
u8 pwr_val = 0;
u8 cck_base = rtldm->swing_idx_cck_base;
u8 cck_val = rtldm->swing_idx_cck;
- u8 ofdm_base = rtldm->swing_idx_ofdm_base;
+ u8 ofdm_base = rtldm->swing_idx_ofdm_base[0];
u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A];
if (type == 0) {
@@ -726,7 +726,7 @@ static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw)
static u64 last_rx;
long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
- if (rtlhal->oem_id == RT_CID_819x_HP) {
+ if (rtlhal->oem_id == RT_CID_819X_HP) {
u64 cur_txok_cnt = 0;
u64 cur_rxok_cnt = 0;
cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok;
@@ -851,9 +851,8 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw)
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- (u8 *)(&tmp));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
@@ -912,7 +911,7 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_old[0] = (u8) i;
- rtldm->swing_idx_ofdm_base = (u8)i;
+ rtldm->swing_idx_ofdm_base[0] = (u8)i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n",
ROFDM0_XATXIQIMBAL,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
index 557bc5b8327e..4f9376ad4739 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
@@ -119,7 +119,7 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw,
enum version_8188e version, u8 *buffer, u32 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 *buf_ptr = (u8 *)buffer;
+ u8 *buf_ptr = buffer;
u32 page_no, remain;
u32 page, offset;
@@ -213,7 +213,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
return 1;
pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
- pfwdata = (u8 *)rtlhal->pfirmware;
+ pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
"normal Firmware SIZE %d\n", fwsize);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index e06971be7df7..94cd9df98381 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -41,7 +41,6 @@
#include "fw.h"
#include "led.h"
#include "hw.h"
-#include "pwrseqcmd.h"
#include "pwrseq.h"
#define LLT_CONFIG 5
@@ -148,8 +147,7 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
}
if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) {
- rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
if (FW_PS_IS_ACK(rpwm_val)) {
isr_regaddr = REG_HISR;
content = rtl_read_dword(rtlpriv, isr_regaddr);
@@ -226,7 +224,7 @@ static void _rtl88ee_set_fw_clock_off(struct ieee80211_hw *hw,
rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
rtl_write_word(rtlpriv, REG_HISR, 0x0100);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ &rpwm_val);
spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
rtlhal->fw_clk_change_in_progress = false;
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
@@ -274,15 +272,14 @@ static void _rtl88ee_fwlps_leave(struct ieee80211_hw *hw)
_rtl88ee_set_fw_clock_on(hw, rpwm_val, false);
rtlhal->allow_sw_to_change_hwclc = false;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&fw_pwrmode));
+ &fw_pwrmode);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
} else {
rpwm_val = FW_PS_STATE_ALL_ON_88E; /* RF on */
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&fw_pwrmode));
+ &fw_pwrmode);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
}
@@ -301,7 +298,7 @@ static void _rtl88ee_fwlps_enter(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&ppsc->fwctrl_psmode));
+ &ppsc->fwctrl_psmode);
rtlhal->allow_sw_to_change_hwclc = true;
_rtl88ee_set_fw_clock_off(hw, rpwm_val);
} else {
@@ -309,9 +306,8 @@ static void _rtl88ee_fwlps_enter(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&ppsc->fwctrl_psmode));
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ &ppsc->fwctrl_psmode);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
}
}
@@ -420,12 +416,12 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
- (u8 *)(&e_aci));
+ &e_aci);
}
break; }
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *)val);
+ u8 short_preamble = (bool)*val;
reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2);
if (short_preamble) {
reg_tmp |= 0x02;
@@ -436,13 +432,13 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
break; }
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *)val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_AMPDU_MIN_SPACE:{
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *)val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
sec_min_space = 0;
@@ -465,7 +461,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *)val);
+ density_to_set = *val;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -483,7 +479,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
reg = regtoset_normal;
- factor = *((u8 *)val);
+ factor = *val;
if (factor <= 3) {
factor = (1 << (factor + 2));
if (factor > 0xf)
@@ -506,15 +502,15 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
break; }
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *)val);
+ u8 e_aci = *val;
rtl88e_dm_init_edca_turbo(hw);
- if (rtlpci->acm_method != eAcmWay2_SW)
+ if (rtlpci->acm_method != EACMWAY2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
- (u8 *)(&e_aci));
+ &e_aci);
break; }
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *)val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn =
(union aci_aifsn *)(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -567,7 +563,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlpci->receive_config = ((u32 *)(val))[0];
break;
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *)(val))[0];
+ u8 retry_limit = *val;
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -580,7 +576,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *)val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *)val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl88e_phy_set_io_cmd(hw, (*(enum io_type *)val));
@@ -592,15 +588,13 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
udelay(1);
if (rpwm_val & BIT(7)) {
- rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- (*(u8 *)val));
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
} else {
- rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- ((*(u8 *)val) | BIT(7)));
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
}
break; }
case HW_VAR_H2C_FW_PWRMODE:
- rtl88e_set_fw_pwrmode_cmd(hw, (*(u8 *)val));
+ rtl88e_set_fw_pwrmode_cmd(hw, *val);
break;
case HW_VAR_FW_PSMODE_STATUS:
ppsc->fw_current_inpsmode = *((bool *)val);
@@ -617,7 +611,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
_rtl88ee_fwlps_leave(hw);
break; }
case HW_VAR_H2C_FW_JOINBSSRPT:{
- u8 mstatus = (*(u8 *)val);
+ u8 mstatus = *val;
u8 tmp, tmp_reg422, uval;
u8 count = 0, dlbcn_count = 0;
bool recover = false;
@@ -668,10 +662,10 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
rtl_write_byte(rtlpriv, REG_CR + 1, (tmp & ~(BIT(0))));
}
- rtl88e_set_fw_joinbss_report_cmd(hw, (*(u8 *)val));
+ rtl88e_set_fw_joinbss_report_cmd(hw, *val);
break; }
case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
- rtl88e_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
+ rtl88e_set_p2p_ps_offload_cmd(hw, *val);
break;
case HW_VAR_AID:{
u16 u2btmp;
@@ -681,7 +675,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
mac->assoc_id));
break; }
case HW_VAR_CORRECT_TSF:{
- u8 btype_ibss = ((u8 *)(val))[0];
+ u8 btype_ibss = *val;
if (btype_ibss == true)
_rtl88ee_stop_tx_beacon(hw);
@@ -815,11 +809,11 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
/* HW Power on sequence */
- if (!rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
- PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
- Rtl8188E_NIC_ENABLE_FLOW)) {
+ if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
+ PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
+ Rtl8188E_NIC_ENABLE_FLOW)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "init MAC Fail as rtl88_hal_pwrseqcmdparsing\n");
+ "init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
return false;
}
@@ -1025,9 +1019,20 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
bool rtstatus = true;
int err = 0;
u8 tmp_u1b, u1byte;
+ unsigned long flags;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Rtl8188EE hw init\n");
rtlpriv->rtlhal.being_init_adapter = true;
+ /* As this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
+
rtlpriv->intf_ops->disable_aspm(hw);
tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CLKR+1);
@@ -1043,7 +1048,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
if (rtstatus != true) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
err = 1;
- return err;
+ goto exit;
}
err = rtl88e_download_fw(hw, false);
@@ -1051,8 +1056,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now..\n");
err = 1;
- rtlhal->fw_ready = false;
- return err;
+ goto exit;
} else {
rtlhal->fw_ready = true;
}
@@ -1097,7 +1101,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
if (ppsc->rfpwr_state == ERFON) {
if ((rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) ||
((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) &&
- (rtlhal->oem_id == RT_CID_819x_HP))) {
+ (rtlhal->oem_id == RT_CID_819X_HP))) {
rtl88e_phy_set_rfpath_switch(hw, true);
rtlpriv->dm.fat_table.rx_idle_ant = MAIN_ANT;
} else {
@@ -1135,10 +1139,12 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
}
rtl_write_byte(rtlpriv, REG_NAV_CTRL+2, ((30000+127)/128));
rtl88e_dm_init(hw);
+exit:
+ local_irq_restore(flags);
rtlpriv->rtlhal.being_init_adapter = false;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "end of Rtl8188EE hw init %x\n",
err);
- return 0;
+ return err;
}
static enum version_8188e _rtl88ee_read_chip_version(struct ieee80211_hw *hw)
@@ -1235,12 +1241,13 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
void rtl88ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u32 reg_rcr = rtlpci->receive_config;
+ u32 reg_rcr;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
if (check_bssid == true) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
@@ -1345,9 +1352,9 @@ static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw)
}
rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0xFF);
- rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
- PWR_INTF_PCI_MSK,
- Rtl8188E_NIC_LPS_ENTER_FLOW);
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK,
+ Rtl8188E_NIC_LPS_ENTER_FLOW);
rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
@@ -1361,8 +1368,8 @@ static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_32K_CTRL);
rtl_write_byte(rtlpriv, REG_32K_CTRL, (u1b_tmp & (~BIT(0))));
- rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
- PWR_INTF_PCI_MSK, Rtl8188E_NIC_DISABLE_FLOW);
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, Rtl8188E_NIC_DISABLE_FLOW);
u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3))));
@@ -1816,7 +1823,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
/*customer ID*/
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
if (rtlefuse->eeprom_oemid == 0xFF)
rtlefuse->eeprom_oemid = 0;
@@ -1833,7 +1840,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"dev_addr: %pM\n", rtlefuse->dev_addr);
/*channel plan */
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
/* set channel paln to world wide 13 */
rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
/*tx power*/
@@ -1845,7 +1852,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag,
hwinfo);
/*board type*/
- rtlefuse->board_type = (((*(u8 *)&hwinfo[jj]) & 0xE0) >> 5);
+ rtlefuse->board_type = (hwinfo[jj] & 0xE0) >> 5;
/*Wake on wlan*/
rtlefuse->wowlan_enable = ((hwinfo[kk] & 0x40) >> 6);
/*parse xtal*/
@@ -1872,15 +1879,15 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
case EEPROM_CID_DEFAULT:
if (rtlefuse->eeprom_did == 0x8179) {
if (rtlefuse->eeprom_svid == 0x1025) {
- rtlhal->oem_id = RT_CID_819x_Acer;
+ rtlhal->oem_id = RT_CID_819X_ACER;
} else if ((rtlefuse->eeprom_svid == 0x10EC &&
rtlefuse->eeprom_smid == 0x0179) ||
(rtlefuse->eeprom_svid == 0x17AA &&
rtlefuse->eeprom_smid == 0x0179)) {
- rtlhal->oem_id = RT_CID_819x_Lenovo;
+ rtlhal->oem_id = RT_CID_819X_LENOVO;
} else if (rtlefuse->eeprom_svid == 0x103c &&
rtlefuse->eeprom_smid == 0x197d) {
- rtlhal->oem_id = RT_CID_819x_HP;
+ rtlhal->oem_id = RT_CID_819X_HP;
} else {
rtlhal->oem_id = RT_CID_DEFAULT;
}
@@ -1892,7 +1899,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
rtlhal->oem_id = RT_CID_TOSHIBA;
break;
case EEPROM_CID_QMI:
- rtlhal->oem_id = RT_CID_819x_QMI;
+ rtlhal->oem_id = RT_CID_819X_QMI;
break;
case EEPROM_CID_WHQL:
default:
@@ -1911,14 +1918,14 @@ static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw)
pcipriv->ledctl.led_opendrain = true;
switch (rtlhal->oem_id) {
- case RT_CID_819x_HP:
+ case RT_CID_819X_HP:
pcipriv->ledctl.led_opendrain = true;
break;
- case RT_CID_819x_Lenovo:
+ case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
case RT_CID_TOSHIBA:
case RT_CID_CCX:
- case RT_CID_819x_Acer:
+ case RT_CID_819X_ACER:
case RT_CID_WHQL:
default:
break;
@@ -2211,8 +2218,7 @@ void rtl88ee_update_channel_access_setting(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 sifs_timer;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
index d67f9c731cc4..1cd6c16d597e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
@@ -29,6 +29,7 @@
#include "../wifi.h"
#include "../pci.h"
+#include "../core.h"
#include "../ps.h"
#include "reg.h"
#include "def.h"
@@ -151,18 +152,7 @@ static bool config_bb_with_pgheader(struct ieee80211_hw *hw,
v2 = table_pg[i + 1];
if (v1 < 0xcdcdcdcd) {
- if (table_pg[i] == 0xfe)
- mdelay(50);
- else if (table_pg[i] == 0xfd)
- mdelay(5);
- else if (table_pg[i] == 0xfc)
- mdelay(1);
- else if (table_pg[i] == 0xfb)
- udelay(50);
- else if (table_pg[i] == 0xfa)
- udelay(5);
- else if (table_pg[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(table_pg[i]);
store_pwrindex_offset(hw, table_pg[i],
table_pg[i + 1],
@@ -672,24 +662,9 @@ static void _rtl8188e_config_rf_reg(struct ieee80211_hw *hw,
u32 addr, u32 data, enum radio_path rfpath,
u32 regaddr)
{
- if (addr == 0xffe) {
- mdelay(50);
- } else if (addr == 0xfd) {
- mdelay(5);
- } else if (addr == 0xfc) {
- mdelay(1);
- } else if (addr == 0xfb) {
- udelay(50);
- } else if (addr == 0xfa) {
- udelay(5);
- } else if (addr == 0xf9) {
- udelay(1);
- } else {
- rtl_set_rfreg(hw, rfpath, regaddr,
- RFREG_OFFSET_MASK,
- data);
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, regaddr,
+ RFREG_OFFSET_MASK,
+ data);
}
static void rtl88_config_s(struct ieee80211_hw *hw,
@@ -702,28 +677,6 @@ static void rtl88_config_s(struct ieee80211_hw *hw,
addr | maskforphyset);
}
-static void _rtl8188e_config_bb_reg(struct ieee80211_hw *hw,
- u32 addr, u32 data)
-{
- if (addr == 0xfe) {
- mdelay(50);
- } else if (addr == 0xfd) {
- mdelay(5);
- } else if (addr == 0xfc) {
- mdelay(1);
- } else if (addr == 0xfb) {
- udelay(50);
- } else if (addr == 0xfa) {
- udelay(5);
- } else if (addr == 0xf9) {
- udelay(1);
- } else {
- rtl_set_bbreg(hw, addr, MASKDWORD, data);
- udelay(1);
- }
-}
-
-
#define NEXT_PAIR(v1, v2, i) \
do { \
i += 2; v1 = array_table[i]; \
@@ -795,7 +748,7 @@ static void set_baseband_phy_config(struct ieee80211_hw *hw)
v1 = array_table[i];
v2 = array_table[i + 1];
if (v1 < 0xcdcdcdcd) {
- _rtl8188e_config_bb_reg(hw, v1, v2);
+ rtl_bb_delay(hw, v1, v2);
} else {/*This line is the start line of branch.*/
if (!check_cond(hw, array_table[i])) {
/*Discard the following (offset, data) pairs*/
@@ -811,7 +764,7 @@ static void set_baseband_phy_config(struct ieee80211_hw *hw)
while (v2 != 0xDEAD &&
v2 != 0xCDEF &&
v2 != 0xCDCD && i < arraylen - 2) {
- _rtl8188e_config_bb_reg(hw, v1, v2);
+ rtl_bb_delay(hw, v1, v2);
NEXT_PAIR(v1, v2, i);
}
@@ -1002,7 +955,7 @@ bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
}
}
- if (rtlhal->oem_id == RT_CID_819x_HP)
+ if (rtlhal->oem_id == RT_CID_819X_HP)
rtl88_config_s(hw, 0x52, 0x7E4BD);
break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
index 028ec6dd52b4..32e135ab9a63 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
@@ -30,7 +30,6 @@
#ifndef __RTL8723E_PWRSEQ_H__
#define __RTL8723E_PWRSEQ_H__
-#include "pwrseqcmd.h"
/*
Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
There are 6 HW Power States:
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h b/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
index d849abf7d94a..7af85cfa8f87 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
@@ -2215,22 +2215,6 @@
#define BWORD1 0xc
#define BWORD 0xf
-#define MASKBYTE0 0xff
-#define MASKBYTE1 0xff00
-#define MASKBYTE2 0xff0000
-#define MASKBYTE3 0xff000000
-#define MASKHWORD 0xffff0000
-#define MASKLWORD 0x0000ffff
-#define MASKDWORD 0xffffffff
-#define MASK12BITS 0xfff
-#define MASKH4BITS 0xf0000000
-#define MASKOFDM_D 0xffc00000
-#define MASKCCK 0x3f3f3f3f
-
-#define MASK4BITS 0x0f
-#define MASK20BITS 0xfffff
-#define RFREG_OFFSET_MASK 0xfffff
-
#define BENABLE 0x1
#define BDISABLE 0x0
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index 347af1e4f438..1b4101bf9974 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -93,6 +93,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
u8 tid;
rtl8188ee_bt_reg_init(hw);
+ rtlpci->msi_support = true;
rtlpriv->dm.dm_initialgain_enable = 1;
rtlpriv->dm.dm_flag = 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index aece6c9cccf1..06ef47cd6203 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -452,7 +452,7 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
/* During testing, hdr was NULL */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
@@ -489,16 +489,15 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta,
- struct sk_buff *skb,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- u8 *pdesc = (u8 *)pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
unsigned int buf_len = 0;
@@ -717,7 +716,7 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_OWN(pdesc, 1);
- SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
@@ -734,7 +733,8 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
pdesc, TX_DESC_SIZE);
}
-void rtl88ee_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val)
{
if (istx == true) {
switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
index 21ca33a7c770..8c2609412d2c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
@@ -777,15 +777,15 @@ struct rx_desc_88e {
void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta,
- struct sk_buff *skb,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *status,
struct ieee80211_rx_status *rx_status,
u8 *pdesc, struct sk_buff *skb);
-void rtl88ee_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name);
void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 2eb0b38384dd..55adf043aef7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -319,7 +319,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 e_aci = *(val);
rtl92c_dm_init_edca_turbo(hw);
- if (rtlpci->acm_method != eAcmWay2_SW)
+ if (rtlpci->acm_method != EACMWAY2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL,
(&e_aci));
@@ -476,7 +476,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
- rtl92c_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
+ rtl92c_set_p2p_ps_offload_cmd(hw, *val);
break;
case HW_VAR_AID:{
u16 u2btmp;
@@ -521,30 +521,32 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(u8 *)(&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&ppsc->fwctrl_psmode));
+ &ppsc->fwctrl_psmode);
rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ HW_VAR_SET_RPWM,
+ &rpwm_val);
} else {
rpwm_val = 0x0C; /* RF on */
fw_pwrmode = FW_PS_ACTIVE_MODE;
fw_current_inps = false;
rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ HW_VAR_SET_RPWM,
+ &rpwm_val);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&fw_pwrmode));
+ &fw_pwrmode);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
}
break; }
+ case HW_VAR_KEEP_ALIVE:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %d not processed\n", variable);
break;
}
}
@@ -1214,11 +1216,13 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
void rtl92ce_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ u32 reg_rcr;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
if (check_bssid) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
@@ -1734,7 +1738,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
if (rtlefuse->eeprom_did == 0x8176) {
if ((rtlefuse->eeprom_svid == 0x103C &&
rtlefuse->eeprom_smid == 0x1629))
- rtlhal->oem_id = RT_CID_819x_HP;
+ rtlhal->oem_id = RT_CID_819X_HP;
else
rtlhal->oem_id = RT_CID_DEFAULT;
} else {
@@ -1745,7 +1749,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
rtlhal->oem_id = RT_CID_TOSHIBA;
break;
case EEPROM_CID_QMI:
- rtlhal->oem_id = RT_CID_819x_QMI;
+ rtlhal->oem_id = RT_CID_819X_QMI;
break;
case EEPROM_CID_WHQL:
default:
@@ -1764,14 +1768,14 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
switch (rtlhal->oem_id) {
- case RT_CID_819x_HP:
+ case RT_CID_819X_HP:
pcipriv->ledctl.led_opendrain = true;
break;
- case RT_CID_819x_Lenovo:
+ case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
case RT_CID_TOSHIBA:
case RT_CID_CCX:
- case RT_CID_819x_Acer:
+ case RT_CID_819X_ACER:
case RT_CID_WHQL:
default:
break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 73262ca3864b..98b22303c84d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../pci.h"
#include "../ps.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "hw.h"
@@ -198,18 +199,7 @@ bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
}
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_arraylen; i = i + 2) {
- if (phy_regarray_table[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table[i]);
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
@@ -245,18 +235,7 @@ bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
- if (phy_regarray_table_pg[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table_pg[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table_pg[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table_pg[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table_pg[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table_pg[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table_pg[i]);
_rtl92c_store_pwrIndex_diffrate_offset(hw,
phy_regarray_table_pg[i],
@@ -305,46 +284,16 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
- if (radioa_array_table[i] == 0xfe)
- mdelay(50);
- else if (radioa_array_table[i] == 0xfd)
- mdelay(5);
- else if (radioa_array_table[i] == 0xfc)
- mdelay(1);
- else if (radioa_array_table[i] == 0xfb)
- udelay(50);
- else if (radioa_array_table[i] == 0xfa)
- udelay(5);
- else if (radioa_array_table[i] == 0xf9)
- udelay(1);
- else {
- rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
- RFREG_OFFSET_MASK,
- radioa_array_table[i + 1]);
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
}
break;
case RF90_PATH_B:
for (i = 0; i < radiob_arraylen; i = i + 2) {
- if (radiob_array_table[i] == 0xfe) {
- mdelay(50);
- } else if (radiob_array_table[i] == 0xfd)
- mdelay(5);
- else if (radiob_array_table[i] == 0xfc)
- mdelay(1);
- else if (radiob_array_table[i] == 0xfb)
- udelay(50);
- else if (radiob_array_table[i] == 0xfa)
- udelay(5);
- else if (radiob_array_table[i] == 0xf9)
- udelay(1);
- else {
- rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
- RFREG_OFFSET_MASK,
- radiob_array_table[i + 1]);
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
+ RFREG_OFFSET_MASK,
+ radiob_array_table[i + 1]);
}
break;
case RF90_PATH_C:
@@ -355,6 +304,8 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not processed\n");
break;
+ default:
+ break;
}
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index 8922ecb47ad2..ed703a1b3b7c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -2044,22 +2044,6 @@
#define BWORD1 0xc
#define BWORD 0xf
-#define MASKBYTE0 0xff
-#define MASKBYTE1 0xff00
-#define MASKBYTE2 0xff0000
-#define MASKBYTE3 0xff000000
-#define MASKHWORD 0xffff0000
-#define MASKLWORD 0x0000ffff
-#define MASKDWORD 0xffffffff
-#define MASK12BITS 0xfff
-#define MASKH4BITS 0xf0000000
-#define MASKOFDM_D 0xffc00000
-#define MASKCCK 0x3f3f3f3f
-
-#define MASK4BITS 0x0f
-#define MASK20BITS 0xfffff
-#define RFREG_OFFSET_MASK 0xfffff
-
#define BENABLE 0x1
#define BDISABLE 0x0
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 52abf0a862fa..8f04817cb7ec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -393,7 +393,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
/* In testing, hdr was NULL here */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
@@ -426,7 +426,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
@@ -666,7 +666,8 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
"H2C Tx Cmd Content", pdesc, TX_DESC_SIZE);
}
-void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val)
{
if (istx) {
switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index a7cdd514cb2e..9a39ec4204dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -711,8 +711,8 @@ struct rx_desc_92c {
} __packed;
void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr,
- u8 *pdesc, struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
@@ -720,7 +720,8 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status,
u8 *pdesc, struct sk_buff *skb);
-void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
u32 rtl92ce_get_desc(u8 *pdesc, bool istx, u8 desc_name);
void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 468bf73cc883..68b5c7e92cfb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -394,7 +394,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
if (rtlefuse->eeprom_did == 0x8176) {
if ((rtlefuse->eeprom_svid == 0x103C &&
rtlefuse->eeprom_smid == 0x1629))
- rtlhal->oem_id = RT_CID_819x_HP;
+ rtlhal->oem_id = RT_CID_819X_HP;
else
rtlhal->oem_id = RT_CID_DEFAULT;
} else {
@@ -405,7 +405,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
rtlhal->oem_id = RT_CID_TOSHIBA;
break;
case EEPROM_CID_QMI:
- rtlhal->oem_id = RT_CID_819x_QMI;
+ rtlhal->oem_id = RT_CID_819X_QMI;
break;
case EEPROM_CID_WHQL:
default:
@@ -423,14 +423,14 @@ static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
switch (rtlhal->oem_id) {
- case RT_CID_819x_HP:
+ case RT_CID_819X_HP:
usb_priv->ledctl.led_opendrain = true;
break;
- case RT_CID_819x_Lenovo:
+ case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
case RT_CID_TOSHIBA:
case RT_CID_CCX:
- case RT_CID_819x_Acer:
+ case RT_CID_819X_ACER:
case RT_CID_WHQL:
default:
break;
@@ -985,6 +985,17 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
int err = 0;
static bool iqk_initialized;
+ unsigned long flags;
+
+ /* As this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
err = _rtl92cu_init_mac(hw);
@@ -997,7 +1008,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now..\n");
err = 1;
- return err;
+ goto exit;
}
rtlhal->last_hmeboxnum = 0; /* h2c */
_rtl92cu_phy_param_tab_init(hw);
@@ -1034,6 +1045,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
_InitPABias(hw);
_update_mac_setting(hw);
rtl92c_dm_init(hw);
+exit:
+ local_irq_restore(flags);
return err;
}
@@ -1379,11 +1392,13 @@ void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ u32 reg_rcr;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
if (check_bssid) {
u8 tmp;
if (IS_NORMAL_CHIP(rtlhal->version)) {
@@ -1795,7 +1810,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
e_aci);
break;
}
- if (rtlusb->acm_method != eAcmWay2_SW)
+ if (rtlusb->acm_method != EACMWAY2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL, &e_aci);
break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 0c09240eadcc..9831ff1128ca 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../pci.h"
#include "../ps.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -188,18 +189,7 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
}
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_arraylen; i = i + 2) {
- if (phy_regarray_table[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table[i]);
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
@@ -236,18 +226,7 @@ bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
phy_regarray_table_pg = rtlphy->hwparam_tables[PHY_REG_PG].pdata;
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
- if (phy_regarray_table_pg[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table_pg[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table_pg[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table_pg[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table_pg[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table_pg[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table_pg[i]);
_rtl92c_store_pwrIndex_diffrate_offset(hw,
phy_regarray_table_pg[i],
phy_regarray_table_pg[i + 1],
@@ -294,46 +273,16 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
- if (radioa_array_table[i] == 0xfe)
- mdelay(50);
- else if (radioa_array_table[i] == 0xfd)
- mdelay(5);
- else if (radioa_array_table[i] == 0xfc)
- mdelay(1);
- else if (radioa_array_table[i] == 0xfb)
- udelay(50);
- else if (radioa_array_table[i] == 0xfa)
- udelay(5);
- else if (radioa_array_table[i] == 0xf9)
- udelay(1);
- else {
- rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
- RFREG_OFFSET_MASK,
- radioa_array_table[i + 1]);
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
}
break;
case RF90_PATH_B:
for (i = 0; i < radiob_arraylen; i = i + 2) {
- if (radiob_array_table[i] == 0xfe) {
- mdelay(50);
- } else if (radiob_array_table[i] == 0xfd)
- mdelay(5);
- else if (radiob_array_table[i] == 0xfc)
- mdelay(1);
- else if (radiob_array_table[i] == 0xfb)
- udelay(50);
- else if (radiob_array_table[i] == 0xfa)
- udelay(5);
- else if (radiob_array_table[i] == 0xf9)
- udelay(1);
- else {
- rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
- RFREG_OFFSET_MASK,
- radiob_array_table[i + 1]);
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
+ RFREG_OFFSET_MASK,
+ radiob_array_table[i + 1]);
}
break;
case RF90_PATH_C:
@@ -344,6 +293,8 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not processed\n");
break;
+ default:
+ break;
}
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 1bc21ccfa71b..035e0dc3922c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -495,7 +495,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
u8 queue_index,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
index 725c53accc58..fd8051dcd98a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -420,7 +420,7 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
struct sk_buff_head *);
void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
u8 queue_index,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 7908e1c85819..304c443b89b2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -194,15 +194,15 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */
rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /*hold page D counter */
- ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, BMASKDWORD);
+ ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, BMASKDWORD);
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, BMASKDWORD);
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, BMASKDWORD);
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
falsealm_cnt->cnt_rate_illegal +
@@ -214,9 +214,9 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
/* hold cck counter */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, BMASKBYTE0);
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
falsealm_cnt->cnt_cck_fail = ret_value;
- ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, BMASKBYTE3);
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
} else {
@@ -331,11 +331,11 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) {
if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83);
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
} else {
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd);
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
@@ -722,7 +722,7 @@ static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"===> Rx Gain %x\n", u4tmp);
for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
- rtl_set_rfreg(hw, i, 0x3C, BRFREGOFFSETMASK,
+ rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK,
(rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
}
@@ -737,7 +737,7 @@ static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
/* Query CCK default setting From 0xa24 */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2,
- BMASKDWORD) & BMASKCCK;
+ MASKDWORD) & MASKCCK;
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
for (i = 0; i < CCK_TABLE_LENGTH; i++) {
if (rtlpriv->dm.cck_inch14) {
@@ -896,9 +896,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
rf = 1;
if (thermalvalue) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
- BMASKDWORD) & BMASKOFDM_D;
+ MASKDWORD) & MASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
- if (ele_d == (ofdmswing_table[i] & BMASKOFDM_D)) {
+ if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[0] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
@@ -910,10 +910,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
}
if (is2t) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
- BMASKDWORD) & BMASKOFDM_D;
+ MASKDWORD) & MASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
if (ele_d ==
- (ofdmswing_table[i] & BMASKOFDM_D)) {
+ (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[1] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
@@ -1091,10 +1091,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
value32 = (ele_d << 22) | ((ele_c & 0x3F) <<
16) | ele_a;
rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
- BMASKDWORD, value32);
+ MASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
- rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
+ rtl_set_bbreg(hw, ROFDM0_XCTxAFE, MASKH4BITS,
value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
@@ -1103,10 +1103,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
} else {
rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
- BMASKDWORD,
+ MASKDWORD,
ofdmswing_table
[(u8)ofdm_index[0]]);
- rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
+ rtl_set_bbreg(hw, ROFDM0_XCTxAFE, MASKH4BITS,
0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(24), 0x00);
@@ -1204,21 +1204,21 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
ele_a;
rtl_set_bbreg(hw,
ROFDM0_XBTxIQIMBALANCE,
- BMASKDWORD, value32);
+ MASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
- BMASKH4BITS, value32);
+ MASKH4BITS, value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), value32);
} else {
rtl_set_bbreg(hw,
ROFDM0_XBTxIQIMBALANCE,
- BMASKDWORD,
+ MASKDWORD,
ofdmswing_table
[(u8) ofdm_index[1]]);
rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
- BMASKH4BITS, 0x00);
+ MASKH4BITS, 0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), 0x00);
}
@@ -1229,10 +1229,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
- rtl_get_bbreg(hw, 0xc80, BMASKDWORD),
- rtl_get_bbreg(hw, 0xc94, BMASKDWORD),
+ rtl_get_bbreg(hw, 0xc80, MASKDWORD),
+ rtl_get_bbreg(hw, 0xc94, MASKDWORD),
rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
- BRFREGOFFSETMASK));
+ RFREG_OFFSET_MASK));
}
if ((delta_iqk > rtlefuse->delta_iqk) &&
(rtlefuse->delta_iqk != 0)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index c4a7db9135d6..2b08671004a0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -318,7 +318,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_AC_PARAM: {
u8 e_aci = *val;
rtl92d_dm_init_edca_turbo(hw);
- if (rtlpci->acm_method != eAcmWay2_SW)
+ if (rtlpci->acm_method != EACMWAY2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
&e_aci);
break;
@@ -985,9 +985,9 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
/* set default value after initialize RF, */
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0);
rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
- RF_CHNLBW, BRFREGOFFSETMASK);
+ RF_CHNLBW, RFREG_OFFSET_MASK);
rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
- RF_CHNLBW, BRFREGOFFSETMASK);
+ RF_CHNLBW, RFREG_OFFSET_MASK);
/*---- Set CCK and OFDM Block "ON"----*/
if (rtlhal->current_bandtype == BAND_ON_2_4G)
@@ -1035,7 +1035,7 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
tmp_rega = rtl_get_rfreg(hw,
(enum radio_path)RF90_PATH_A,
- 0x2a, BMASKDWORD);
+ 0x2a, MASKDWORD);
if (((tmp_rega & BIT(11)) == BIT(11)))
break;
@@ -1138,11 +1138,13 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
void rtl92de_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u32 reg_rcr = rtlpci->receive_config;
+ u32 reg_rcr;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
if (check_bssid) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
@@ -1332,13 +1334,13 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
/* c. ========RF OFF sequence========== */
/* 0x88c[23:20] = 0xf. */
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
/* APSD_CTRL 0x600[7:0] = 0x40 */
rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
/* Close antenna 0,0xc04,0xd04 */
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0);
rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0);
/* SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB state machine */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 13196cc4b1d3..3d1f0dd4e52d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../pci.h"
#include "../ps.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -242,7 +243,7 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
else if (rtlhal->during_mac0init_radiob)
/* mac0 use phy1 write radio_b. */
dbi_direct = BIT(3) | BIT(2);
- if (bitmask != BMASKDWORD) {
+ if (bitmask != MASKDWORD) {
if (rtlhal->during_mac1init_radioa ||
rtlhal->during_mac0init_radiob)
originalvalue = rtl92de_read_dword_dbi(hw,
@@ -275,20 +276,20 @@ static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
u32 retvalue;
newoffset = offset;
- tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD);
+ tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
if (rfpath == RF90_PATH_A)
tmplong2 = tmplong;
else
- tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, BMASKDWORD);
+ tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
(newoffset << 23) | BLSSIREADEDGE;
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
tmplong & (~BLSSIREADEDGE));
udelay(10);
- rtl_set_bbreg(hw, pphyreg->rfhssi_para2, BMASKDWORD, tmplong2);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
udelay(50);
udelay(50);
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
tmplong | BLSSIREADEDGE);
udelay(10);
if (rfpath == RF90_PATH_A)
@@ -321,7 +322,7 @@ static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw,
newoffset = offset;
/* T65 RF */
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
- rtl_set_bbreg(hw, pphyreg->rf3wire_offset, BMASKDWORD, data_and_addr);
+ rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
@@ -362,7 +363,7 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
return;
spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
- if (bitmask != BRFREGOFFSETMASK) {
+ if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92d_phy_rf_serial_read(hw,
rfpath, regaddr);
bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
@@ -567,19 +568,8 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
" ===> phy:Rtl819XPHY_REG_Array_PG\n");
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_arraylen; i = i + 2) {
- if (phy_regarray_table[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table[i] == 0xf9)
- udelay(1);
- rtl_set_bbreg(hw, phy_regarray_table[i], BMASKDWORD,
+ rtl_addr_delay(phy_regarray_table[i]);
+ rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
@@ -591,7 +581,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
if (rtlhal->interfaceindex == 0) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
rtl_set_bbreg(hw, agctab_array_table[i],
- BMASKDWORD,
+ MASKDWORD,
agctab_array_table[i + 1]);
/* Add 1us delay between BB/RF register
* setting. */
@@ -607,7 +597,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
rtl_set_bbreg(hw, agctab_array_table[i],
- BMASKDWORD,
+ MASKDWORD,
agctab_array_table[i + 1]);
/* Add 1us delay between BB/RF register
* setting. */
@@ -623,7 +613,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
for (i = 0; i < agctab_5garraylen; i = i + 2) {
rtl_set_bbreg(hw,
agctab_5garray_table[i],
- BMASKDWORD,
+ MASKDWORD,
agctab_5garray_table[i + 1]);
/* Add 1us delay between BB/RF registeri
* setting. */
@@ -705,18 +695,7 @@ static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
phy_regarray_table_pg = rtl8192de_phy_reg_array_pg;
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
- if (phy_regarray_table_pg[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table_pg[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table_pg[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table_pg[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table_pg[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table_pg[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table_pg[i]);
_rtl92d_store_pwrindex_diffrate_offset(hw,
phy_regarray_table_pg[i],
phy_regarray_table_pg[i + 1],
@@ -843,54 +822,16 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
- if (radioa_array_table[i] == 0xfe) {
- mdelay(50);
- } else if (radioa_array_table[i] == 0xfd) {
- /* delay_ms(5); */
- mdelay(5);
- } else if (radioa_array_table[i] == 0xfc) {
- /* delay_ms(1); */
- mdelay(1);
- } else if (radioa_array_table[i] == 0xfb) {
- udelay(50);
- } else if (radioa_array_table[i] == 0xfa) {
- udelay(5);
- } else if (radioa_array_table[i] == 0xf9) {
- udelay(1);
- } else {
- rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
- BRFREGOFFSETMASK,
- radioa_array_table[i + 1]);
- /* Add 1us delay between BB/RF register set. */
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
}
break;
case RF90_PATH_B:
for (i = 0; i < radiob_arraylen; i = i + 2) {
- if (radiob_array_table[i] == 0xfe) {
- /* Delay specific ms. Only RF configuration
- * requires delay. */
- mdelay(50);
- } else if (radiob_array_table[i] == 0xfd) {
- /* delay_ms(5); */
- mdelay(5);
- } else if (radiob_array_table[i] == 0xfc) {
- /* delay_ms(1); */
- mdelay(1);
- } else if (radiob_array_table[i] == 0xfb) {
- udelay(50);
- } else if (radiob_array_table[i] == 0xfa) {
- udelay(5);
- } else if (radiob_array_table[i] == 0xf9) {
- udelay(1);
- } else {
- rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
- BRFREGOFFSETMASK,
- radiob_array_table[i + 1]);
- /* Add 1us delay between BB/RF register set. */
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
+ RFREG_OFFSET_MASK,
+ radiob_array_table[i + 1]);
}
break;
case RF90_PATH_C:
@@ -911,13 +852,13 @@ void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
rtlphy->default_initialgain[0] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, BMASKBYTE0);
+ (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
rtlphy->default_initialgain[1] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, BMASKBYTE0);
+ (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
rtlphy->default_initialgain[2] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, BMASKBYTE0);
+ (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
rtlphy->default_initialgain[3] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, BMASKBYTE0);
+ (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
rtlphy->default_initialgain[0],
@@ -925,9 +866,9 @@ void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[2],
rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
- BMASKBYTE0);
+ MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
- BMASKDWORD);
+ MASKDWORD);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Default framesync (0x%x) = 0x%x\n",
ROFDM0_RXDETECTOR3, rtlphy->framesync);
@@ -1106,7 +1047,7 @@ static void _rtl92d_phy_stop_trx_before_changeband(struct ieee80211_hw *hw)
{
rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0);
rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0);
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x00);
rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x0);
}
@@ -1168,7 +1109,7 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 imr_num = MAX_RF_IMR_INDEX;
- u32 rfmask = BRFREGOFFSETMASK;
+ u32 rfmask = RFREG_OFFSET_MASK;
u8 group, i;
unsigned long flag = 0;
@@ -1211,7 +1152,7 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
for (i = 0; i < imr_num; i++) {
rtl_set_rfreg(hw, (enum radio_path)rfpath,
rf_reg_for_5g_swchnl_normal[i],
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
rf_imr_param_normal[0][0][i]);
}
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
@@ -1329,7 +1270,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
if (i == 0 && (rtlhal->macphymode == DUALMAC_DUALPHY)) {
rtl_set_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_5g[i],
- BRFREGOFFSETMASK, 0xE439D);
+ RFREG_OFFSET_MASK, 0xE439D);
} else if (rf_reg_for_c_cut_5g[i] == RF_SYN_G4) {
u4tmp2 = (rf_reg_pram_c_5g[index][i] &
0x7FF) | (u4tmp << 11);
@@ -1337,11 +1278,11 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
u4tmp2 &= ~(BIT(7) | BIT(6));
rtl_set_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_5g[i],
- BRFREGOFFSETMASK, u4tmp2);
+ RFREG_OFFSET_MASK, u4tmp2);
} else {
rtl_set_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_5g[i],
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
rf_reg_pram_c_5g[index][i]);
}
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
@@ -1351,7 +1292,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
path, index,
rtl_get_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_5g[i],
- BRFREGOFFSETMASK));
+ RFREG_OFFSET_MASK));
}
if (need_pwr_down)
_rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
@@ -1381,7 +1322,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
i++) {
rtl_set_rfreg(hw, rfpath,
rf_for_c_cut_5g_internal_pa[i],
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
rf_pram_c_5g_int_pa[index][i]);
RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
"offset 0x%x value 0x%x path %d index %d\n",
@@ -1422,13 +1363,13 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
if (rf_reg_for_c_cut_2g[i] == RF_SYN_G7)
rtl_set_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_2g[i],
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
(rf_reg_param_for_c_cut_2g[index][i] |
BIT(17)));
else
rtl_set_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_2g[i],
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
rf_reg_param_for_c_cut_2g
[index][i]);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
@@ -1438,14 +1379,14 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
rf_reg_mask_for_c_cut_2g[i], path, index,
rtl_get_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_2g[i],
- BRFREGOFFSETMASK));
+ RFREG_OFFSET_MASK));
}
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
rtl_set_rfreg(hw, (enum radio_path)path, RF_SYN_G4,
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
if (need_pwr_down)
_rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
@@ -1493,41 +1434,41 @@ static u8 _rtl92d_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb)
/* path-A IQK setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
if (rtlhal->interfaceindex == 0) {
- rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x10008c1f);
- rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x10008c1f);
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
} else {
- rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x10008c22);
- rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c22);
}
- rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82140102);
- rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x28160206);
+ rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x28160206);
/* path-B IQK setting */
if (configpathb) {
- rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x10008c22);
- rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x10008c22);
- rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82140102);
- rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x28160206);
+ rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160206);
}
/* LO calibration setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
- rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
+ rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
/* One shot, path A LOK & IQK */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n");
- rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf9000000);
- rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
/* delay x ms */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Delay %d ms for One shot, path A LOK & IQK\n",
IQK_DELAY_TIME);
mdelay(IQK_DELAY_TIME);
/* Check failed */
- regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+ regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
- rege94 = rtl_get_bbreg(hw, 0xe94, BMASKDWORD);
+ rege94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94);
- rege9c = rtl_get_bbreg(hw, 0xe9c, BMASKDWORD);
+ rege9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
- regea4 = rtl_get_bbreg(hw, 0xea4, BMASKDWORD);
+ regea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
if (!(regeac & BIT(28)) && (((rege94 & 0x03FF0000) >> 16) != 0x142) &&
(((rege9c & 0x03FF0000) >> 16) != 0x42))
@@ -1563,42 +1504,42 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK!\n");
/* path-A IQK setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
- rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x18008c1f);
- rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x18008c1f);
- rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82140307);
- rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x68160960);
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140307);
+ rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x68160960);
/* path-B IQK setting */
if (configpathb) {
- rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x18008c2f);
- rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x18008c2f);
- rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82110000);
- rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x68110000);
+ rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82110000);
+ rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x68110000);
}
/* LO calibration setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
- rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
+ rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
/* path-A PA on */
- rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 0x07000f60);
- rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BMASKDWORD, 0x66e60e30);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x07000f60);
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD, 0x66e60e30);
for (i = 0; i < retrycount; i++) {
/* One shot, path A LOK & IQK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"One shot, path A LOK & IQK!\n");
- rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf9000000);
- rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
/* delay x ms */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Delay %d ms for One shot, path A LOK & IQK.\n",
IQK_DELAY_TIME);
mdelay(IQK_DELAY_TIME * 10);
/* Check failed */
- regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+ regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
- rege94 = rtl_get_bbreg(hw, 0xe94, BMASKDWORD);
+ rege94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94);
- rege9c = rtl_get_bbreg(hw, 0xe9c, BMASKDWORD);
+ rege9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
- regea4 = rtl_get_bbreg(hw, 0xea4, BMASKDWORD);
+ regea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
if (!(regeac & TxOKBit) &&
(((rege94 & 0x03FF0000) >> 16) != 0x142)) {
@@ -1620,9 +1561,9 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
}
}
/* path A PA off */
- rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD,
rtlphy->iqk_bb_backup[0]);
- rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD,
rtlphy->iqk_bb_backup[1]);
return result;
}
@@ -1637,22 +1578,22 @@ static u8 _rtl92d_phy_pathb_iqk(struct ieee80211_hw *hw)
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQK!\n");
/* One shot, path B LOK & IQK */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n");
- rtl_set_bbreg(hw, 0xe60, BMASKDWORD, 0x00000002);
- rtl_set_bbreg(hw, 0xe60, BMASKDWORD, 0x00000000);
+ rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
+ rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
/* delay x ms */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Delay %d ms for One shot, path B LOK & IQK\n", IQK_DELAY_TIME);
mdelay(IQK_DELAY_TIME);
/* Check failed */
- regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+ regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
- regeb4 = rtl_get_bbreg(hw, 0xeb4, BMASKDWORD);
+ regeb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4);
- regebc = rtl_get_bbreg(hw, 0xebc, BMASKDWORD);
+ regebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc);
- regec4 = rtl_get_bbreg(hw, 0xec4, BMASKDWORD);
+ regec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4);
- regecc = rtl_get_bbreg(hw, 0xecc, BMASKDWORD);
+ regecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc);
if (!(regeac & BIT(31)) && (((regeb4 & 0x03FF0000) >> 16) != 0x142) &&
(((regebc & 0x03FF0000) >> 16) != 0x42))
@@ -1680,31 +1621,31 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQK!\n");
/* path-A IQK setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
- rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x18008c1f);
- rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x18008c1f);
- rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82110000);
- rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x68110000);
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82110000);
+ rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x68110000);
/* path-B IQK setting */
- rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x18008c2f);
- rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x18008c2f);
- rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82140307);
- rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x68160960);
+ rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140307);
+ rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x68160960);
/* LO calibration setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
- rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
+ rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
/* path-B PA on */
- rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 0x0f600700);
- rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BMASKDWORD, 0x061f0d30);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x0f600700);
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD, 0x061f0d30);
for (i = 0; i < retrycount; i++) {
/* One shot, path B LOK & IQK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"One shot, path A LOK & IQK!\n");
- rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xfa000000);
- rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xfa000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
/* delay x ms */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
@@ -1712,15 +1653,15 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
mdelay(IQK_DELAY_TIME * 10);
/* Check failed */
- regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+ regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
- regeb4 = rtl_get_bbreg(hw, 0xeb4, BMASKDWORD);
+ regeb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4);
- regebc = rtl_get_bbreg(hw, 0xebc, BMASKDWORD);
+ regebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc);
- regec4 = rtl_get_bbreg(hw, 0xec4, BMASKDWORD);
+ regec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4);
- regecc = rtl_get_bbreg(hw, 0xecc, BMASKDWORD);
+ regecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc);
if (!(regeac & BIT(31)) &&
(((regeb4 & 0x03FF0000) >> 16) != 0x142))
@@ -1738,9 +1679,9 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
}
/* path B PA off */
- rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD,
rtlphy->iqk_bb_backup[0]);
- rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD,
rtlphy->iqk_bb_backup[2]);
return result;
}
@@ -1754,7 +1695,7 @@ static void _rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save ADDA parameters.\n");
for (i = 0; i < regnum; i++)
- adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], BMASKDWORD);
+ adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], MASKDWORD);
}
static void _rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw,
@@ -1779,7 +1720,7 @@ static void _rtl92d_phy_reload_adda_registers(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Reload ADDA power saving parameters !\n");
for (i = 0; i < regnum; i++)
- rtl_set_bbreg(hw, adda_reg[i], BMASKDWORD, adda_backup[i]);
+ rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, adda_backup[i]);
}
static void _rtl92d_phy_reload_mac_registers(struct ieee80211_hw *hw,
@@ -1807,7 +1748,7 @@ static void _rtl92d_phy_path_adda_on(struct ieee80211_hw *hw,
pathon = rtlpriv->rtlhal.interfaceindex == 0 ?
0x04db25a4 : 0x0b1b25a4;
for (i = 0; i < IQK_ADDA_REG_NUM; i++)
- rtl_set_bbreg(hw, adda_reg[i], BMASKDWORD, pathon);
+ rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, pathon);
}
static void _rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
@@ -1830,9 +1771,9 @@ static void _rtl92d_phy_patha_standby(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A standby mode!\n");
- rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x0);
- rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, BMASKDWORD, 0x00010000);
- rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
+ rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD, 0x00010000);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
}
static void _rtl92d_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode)
@@ -1843,8 +1784,8 @@ static void _rtl92d_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode)
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"BB Switch to %s mode!\n", pi_mode ? "PI" : "SI");
mode = pi_mode ? 0x01000100 : 0x01000000;
- rtl_set_bbreg(hw, 0x820, BMASKDWORD, mode);
- rtl_set_bbreg(hw, 0x828, BMASKDWORD, mode);
+ rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
+ rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
}
static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
@@ -1875,7 +1816,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 2.4G :Start!!!\n");
if (t == 0) {
- bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD);
+ bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
is2t ? "2T2R" : "1T1R");
@@ -1898,40 +1839,40 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
_rtl92d_phy_pimode_switch(hw, true);
rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00);
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKDWORD, 0x03a05600);
- rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, BMASKDWORD, 0x000800e4);
- rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BMASKDWORD, 0x22204000);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22204000);
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
if (is2t) {
- rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD,
0x00010000);
- rtl_set_bbreg(hw, RFPGA0_XB_LSSIPARAMETER, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XB_LSSIPARAMETER, MASKDWORD,
0x00010000);
}
/* MAC settings */
_rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
rtlphy->iqk_mac_backup);
/* Page B init */
- rtl_set_bbreg(hw, 0xb68, BMASKDWORD, 0x0f600000);
+ rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
if (is2t)
- rtl_set_bbreg(hw, 0xb6c, BMASKDWORD, 0x0f600000);
+ rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x0f600000);
/* IQ calibration setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n");
- rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
- rtl_set_bbreg(hw, 0xe40, BMASKDWORD, 0x01007c00);
- rtl_set_bbreg(hw, 0xe44, BMASKDWORD, 0x01004800);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+ rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
+ rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
for (i = 0; i < retrycount; i++) {
patha_ok = _rtl92d_phy_patha_iqk(hw, is2t);
if (patha_ok == 0x03) {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path A IQK Success!!\n");
- result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][2] = (rtl_get_bbreg(hw, 0xea4, BMASKDWORD) &
+ result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][3] = (rtl_get_bbreg(hw, 0xeac, BMASKDWORD) &
+ result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
0x3FF0000) >> 16;
break;
} else if (i == (retrycount - 1) && patha_ok == 0x01) {
@@ -1939,9 +1880,9 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path A IQK Only Tx Success!!\n");
- result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
0x3FF0000) >> 16;
}
}
@@ -1957,22 +1898,22 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path B IQK Success!!\n");
result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
- BMASKDWORD) & 0x3FF0000) >> 16;
+ MASKDWORD) & 0x3FF0000) >> 16;
result[t][5] = (rtl_get_bbreg(hw, 0xebc,
- BMASKDWORD) & 0x3FF0000) >> 16;
+ MASKDWORD) & 0x3FF0000) >> 16;
result[t][6] = (rtl_get_bbreg(hw, 0xec4,
- BMASKDWORD) & 0x3FF0000) >> 16;
+ MASKDWORD) & 0x3FF0000) >> 16;
result[t][7] = (rtl_get_bbreg(hw, 0xecc,
- BMASKDWORD) & 0x3FF0000) >> 16;
+ MASKDWORD) & 0x3FF0000) >> 16;
break;
} else if (i == (retrycount - 1) && pathb_ok == 0x01) {
/* Tx IQK OK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path B Only Tx IQK Success!!\n");
result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
- BMASKDWORD) & 0x3FF0000) >> 16;
+ MASKDWORD) & 0x3FF0000) >> 16;
result[t][5] = (rtl_get_bbreg(hw, 0xebc,
- BMASKDWORD) & 0x3FF0000) >> 16;
+ MASKDWORD) & 0x3FF0000) >> 16;
}
}
if (0x00 == pathb_ok)
@@ -1984,7 +1925,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"IQK:Back to BB mode, load original value!\n");
- rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
if (t != 0) {
/* Switch back BB to SI mode after finish IQ Calibration. */
if (!rtlphy->rfpi_enable)
@@ -2004,8 +1945,8 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
rtlphy->iqk_bb_backup,
IQK_BB_REG_NUM - 1);
/* load 0xe30 IQC default value */
- rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x01008c00);
- rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x01008c00);
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
}
RTPRINT(rtlpriv, FINIT, INIT_IQK, "<==\n");
}
@@ -2042,7 +1983,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 5G NORMAL:Start!!!\n");
mdelay(IQK_DELAY_TIME * 20);
if (t == 0) {
- bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD);
+ bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
is2t ? "2T2R" : "1T1R");
@@ -2072,38 +2013,38 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
if (!rtlphy->rfpi_enable)
_rtl92d_phy_pimode_switch(hw, true);
rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00);
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKDWORD, 0x03a05600);
- rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, BMASKDWORD, 0x000800e4);
- rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BMASKDWORD, 0x22208000);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22208000);
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
/* Page B init */
- rtl_set_bbreg(hw, 0xb68, BMASKDWORD, 0x0f600000);
+ rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
if (is2t)
- rtl_set_bbreg(hw, 0xb6c, BMASKDWORD, 0x0f600000);
+ rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x0f600000);
/* IQ calibration setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n");
- rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
- rtl_set_bbreg(hw, 0xe40, BMASKDWORD, 0x10007c00);
- rtl_set_bbreg(hw, 0xe44, BMASKDWORD, 0x01004800);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+ rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x10007c00);
+ rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
patha_ok = _rtl92d_phy_patha_iqk_5g_normal(hw, is2t);
if (patha_ok == 0x03) {
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Success!!\n");
- result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][2] = (rtl_get_bbreg(hw, 0xea4, BMASKDWORD) &
+ result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][3] = (rtl_get_bbreg(hw, 0xeac, BMASKDWORD) &
+ result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
0x3FF0000) >> 16;
} else if (patha_ok == 0x01) { /* Tx IQK OK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path A IQK Only Tx Success!!\n");
- result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
0x3FF0000) >> 16;
} else {
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Fail!!\n");
@@ -2116,20 +2057,20 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
if (pathb_ok == 0x03) {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path B IQK Success!!\n");
- result[t][4] = (rtl_get_bbreg(hw, 0xeb4, BMASKDWORD) &
+ result[t][4] = (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][5] = (rtl_get_bbreg(hw, 0xebc, BMASKDWORD) &
+ result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][6] = (rtl_get_bbreg(hw, 0xec4, BMASKDWORD) &
+ result[t][6] = (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][7] = (rtl_get_bbreg(hw, 0xecc, BMASKDWORD) &
+ result[t][7] = (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
0x3FF0000) >> 16;
} else if (pathb_ok == 0x01) { /* Tx IQK OK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path B Only Tx IQK Success!!\n");
- result[t][4] = (rtl_get_bbreg(hw, 0xeb4, BMASKDWORD) &
+ result[t][4] = (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][5] = (rtl_get_bbreg(hw, 0xebc, BMASKDWORD) &
+ result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
0x3FF0000) >> 16;
} else {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
@@ -2140,7 +2081,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
/* Back to BB mode, load original value */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"IQK:Back to BB mode, load original value!\n");
- rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
if (t != 0) {
if (is2t)
_rtl92d_phy_reload_adda_registers(hw, iqk_bb_reg,
@@ -2240,7 +2181,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
return;
} else if (iqk_ok) {
oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
- BMASKDWORD) >> 22) & 0x3FF; /* OFDM0_D */
+ MASKDWORD) >> 22) & 0x3FF; /* OFDM0_D */
val_x = result[final_candidate][0];
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
@@ -2271,7 +2212,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
((val_y * oldval_0 >> 7) & 0x1));
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xC80 = 0x%x\n",
rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
- BMASKDWORD));
+ MASKDWORD));
if (txonly) {
RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n");
return;
@@ -2299,7 +2240,7 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
return;
} else if (iqk_ok) {
oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
- BMASKDWORD) >> 22) & 0x3FF;
+ MASKDWORD) >> 22) & 0x3FF;
val_x = result[final_candidate][4];
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
@@ -2657,7 +2598,7 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
rf_mode[index] = rtl_read_byte(rtlpriv, offset);
/* 2. Set RF mode = standby mode */
rtl_set_rfreg(hw, (enum radio_path)index, RF_AC,
- BRFREGOFFSETMASK, 0x010000);
+ RFREG_OFFSET_MASK, 0x010000);
if (rtlpci->init_ready) {
/* switch CV-curve control by LC-calibration */
rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7,
@@ -2667,16 +2608,16 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
0x08000, 0x01);
}
u4tmp = rtl_get_rfreg(hw, (enum radio_path)index, RF_SYN_G6,
- BRFREGOFFSETMASK);
+ RFREG_OFFSET_MASK);
while ((!(u4tmp & BIT(11))) && timecount <= timeout) {
mdelay(50);
timecount += 50;
u4tmp = rtl_get_rfreg(hw, (enum radio_path)index,
- RF_SYN_G6, BRFREGOFFSETMASK);
+ RF_SYN_G6, RFREG_OFFSET_MASK);
}
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"PHY_LCK finish delay for %d ms=2\n", timecount);
- u4tmp = rtl_get_rfreg(hw, index, RF_SYN_G4, BRFREGOFFSETMASK);
+ u4tmp = rtl_get_rfreg(hw, index, RF_SYN_G4, RFREG_OFFSET_MASK);
if (index == 0 && rtlhal->interfaceindex == 0) {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"path-A / 5G LCK\n");
@@ -2696,9 +2637,9 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
0x7f, i);
rtl_set_rfreg(hw, (enum radio_path)index, 0x4D,
- BRFREGOFFSETMASK, 0x0);
+ RFREG_OFFSET_MASK, 0x0);
readval = rtl_get_rfreg(hw, (enum radio_path)index,
- 0x4F, BRFREGOFFSETMASK);
+ 0x4F, RFREG_OFFSET_MASK);
curvecount_val[2 * i + 1] = (readval & 0xfffe0) >> 5;
/* reg 0x4f [4:0] */
/* reg 0x50 [19:10] */
@@ -2912,7 +2853,7 @@ static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
}
rtl_set_rfreg(hw, (enum radio_path)rfpath,
currentcmd->para1,
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
rtlphy->rfreg_chnlval[rfpath]);
_rtl92d_phy_reload_imr_setting(hw, channel,
rfpath);
@@ -2960,7 +2901,7 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY &&
rtlhal->bandset == BAND_ON_BOTH) {
ret_value = rtl_get_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
- BMASKDWORD);
+ MASKDWORD);
if (rtlphy->current_channel > 14 && !(ret_value & BIT(0)))
rtl92d_phy_switch_wirelessband(hw, BAND_ON_5G);
else if (rtlphy->current_channel <= 14 && (ret_value & BIT(0)))
@@ -3112,7 +3053,7 @@ static void _rtl92d_phy_set_rfsleep(struct ieee80211_hw *hw)
/* a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue */
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
/* b. RF path 0 offset 0x00 = 0x00 disable RF */
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
/* c. APSD_CTRL 0x600[7:0] = 0x40 */
rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
/* d. APSD_CTRL 0x600[7:0] = 0x00
@@ -3120,12 +3061,12 @@ static void _rtl92d_phy_set_rfsleep(struct ieee80211_hw *hw)
* RF path 0 offset 0x00 = 0x00
* APSD_CTRL 0x600[7:0] = 0x40
* */
- u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, BRFREGOFFSETMASK);
+ u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
while (u4btmp != 0 && delay > 0) {
rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
- u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, BRFREGOFFSETMASK);
+ u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
delay--;
}
if (delay == 0) {
@@ -3468,9 +3409,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
/* 5G LAN ON */
rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0xa);
/* TX BB gain shift*1,Just for testchip,0xc80,0xc88 */
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
0x40000100);
- rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
0x40000100);
if (rtlhal->macphymode == DUALMAC_DUALPHY) {
rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
@@ -3524,16 +3465,16 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0x0);
/* TX BB gain shift,Just for testchip,0xc80,0xc88 */
if (rtlefuse->internal_pa_5g[0])
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
0x2d4000b5);
else
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
0x20000080);
if (rtlefuse->internal_pa_5g[1])
- rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
0x2d4000b5);
else
- rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
0x20000080);
if (rtlhal->macphymode == DUALMAC_DUALPHY) {
rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
@@ -3560,8 +3501,8 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
}
}
/* update IQK related settings */
- rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, BMASKDWORD, 0x40000100);
- rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, BMASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100);
rtl_set_bbreg(hw, ROFDM0_XCTxAFE, 0xF0000000, 0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30) | BIT(28) |
BIT(26) | BIT(24), 0x00);
@@ -3590,7 +3531,7 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
/* DMDP */
if (rtlphy->rf_type == RF_1T1R) {
/* Use antenna 0,0xc04,0xd04 */
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x11);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x11);
rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x1);
/* enable ad/da clock1 for dual-phy reg0x888 */
@@ -3612,7 +3553,7 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
} else {
/* Single PHY */
/* Use antenna 0 & 1,0xc04,0xd04 */
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x33);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x33);
rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x3);
/* disable ad/da clock1,0x888 */
rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) | BIT(13), 0);
@@ -3620,9 +3561,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
rfpath++) {
rtlphy->rfreg_chnlval[rfpath] = rtl_get_rfreg(hw, rfpath,
- RF_CHNLBW, BRFREGOFFSETMASK);
+ RF_CHNLBW, RFREG_OFFSET_MASK);
rtlphy->reg_rf3c[rfpath] = rtl_get_rfreg(hw, rfpath, 0x3C,
- BRFREGOFFSETMASK);
+ RFREG_OFFSET_MASK);
}
for (i = 0; i < 2; i++)
RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
index b7498c5bafc5..7f29b8d765b3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
@@ -1295,18 +1295,4 @@
#define BWORD1 0xc
#define BDWORD 0xf
-#define BMASKBYTE0 0xff
-#define BMASKBYTE1 0xff00
-#define BMASKBYTE2 0xff0000
-#define BMASKBYTE3 0xff000000
-#define BMASKHWORD 0xffff0000
-#define BMASKLWORD 0x0000ffff
-#define BMASKDWORD 0xffffffff
-#define BMASK12BITS 0xfff
-#define BMASKH4BITS 0xf0000000
-#define BMASKOFDM_D 0xffc00000
-#define BMASKCCK 0x3f3f3f3f
-
-#define BRFREGOFFSETMASK 0xfffff
-
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
index 20144e0b4142..6a6ac540d5b5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
@@ -125,7 +125,7 @@ void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
}
tmpval = tx_agc[RF90_PATH_A] & 0xff;
- rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, BMASKBYTE1, tmpval);
+ rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
"CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n",
tmpval, RTXAGC_A_CCK1_MCS32);
@@ -135,7 +135,7 @@ void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
"CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n",
tmpval, RTXAGC_B_CCK11_A_CCK2_11);
tmpval = tx_agc[RF90_PATH_B] >> 24;
- rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, BMASKBYTE0, tmpval);
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
"CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n",
tmpval, RTXAGC_B_CCK11_A_CCK2_11);
@@ -360,7 +360,7 @@ static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw,
regoffset = regoffset_a[index];
else
regoffset = regoffset_b[index];
- rtl_set_bbreg(hw, regoffset, BMASKDWORD, writeval);
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
"Set 0x%x = %08x\n", regoffset, writeval);
if (((get_rf_type(rtlphy) == RF_2T2R) &&
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 0eb0f4ae5920..99c2ab5dfceb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -545,7 +545,7 @@ static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
@@ -786,7 +786,8 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_OWN(pdesc, 1);
}
-void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val)
{
if (istx) {
switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index c1b5dfb79d53..fb5cf0634e8d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -728,8 +728,8 @@ struct rx_desc_92d {
} __packed;
void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr,
- u8 *pdesc, struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
@@ -737,7 +737,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status,
u8 *pdesc, struct sk_buff *skb);
-void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
u32 rtl92de_get_desc(u8 *pdesc, bool istx, u8 desc_name);
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 4f461786a7eb..9098558d916d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -251,7 +251,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 e_aci = *val;
rtl92s_dm_init_edca_turbo(hw);
- if (rtlpci->acm_method != eAcmWay2_SW)
+ if (rtlpci->acm_method != EACMWAY2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL,
&e_aci);
@@ -413,20 +413,18 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(u8 *)(&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&ppsc->fwctrl_psmode));
+ &ppsc->fwctrl_psmode);
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+ &rpwm_val);
} else {
rpwm_val = 0x0C; /* RF on */
fw_pwrmode = FW_PS_ACTIVE_MODE;
fw_current_inps = false;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&fw_pwrmode));
+ &rpwm_val);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ &fw_pwrmode);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_FW_PSMODE_STATUS,
@@ -955,7 +953,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 tmp_byte = 0;
-
+ unsigned long flags;
bool rtstatus = true;
u8 tmp_u1b;
int err = false;
@@ -967,6 +965,16 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
rtlpci->being_init_adapter = true;
+ /* As this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
+
rtlpriv->intf_ops->disable_aspm(hw);
/* 1. MAC Initialize */
@@ -984,7 +992,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now... "
"Please copy FW into /lib/firmware/rtlwifi\n");
- return 1;
+ err = 1;
+ goto exit;
}
/* After FW download, we have to reset MAC register */
@@ -997,7 +1006,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
/* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */
if (!rtl92s_phy_mac_config(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n");
- return rtstatus;
+ err = rtstatus;
+ goto exit;
}
/* because last function modify RCR, so we update
@@ -1016,7 +1026,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
/* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */
if (!rtl92s_phy_bb_config(hw)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n");
- return rtstatus;
+ err = rtstatus;
+ goto exit;
}
/* 5. Initiailze RF RAIO_A.txt RF RAIO_B.txt */
@@ -1033,7 +1044,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
if (!rtl92s_phy_rf_config(hw)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n");
- return rtstatus;
+ err = rtstatus;
+ goto exit;
}
/* After read predefined TXT, we must set BB/MAC/RF
@@ -1122,8 +1134,9 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_ON);
rtl92s_dm_init(hw);
+exit:
+ local_irq_restore(flags);
rtlpci->being_init_adapter = false;
-
return err;
}
@@ -1135,12 +1148,13 @@ void rtl92se_set_mac_addr(struct rtl_io *io, const u8 *addr)
void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u32 reg_rcr = rtlpci->receive_config;
+ u32 reg_rcr;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
if (check_bssid) {
reg_rcr |= (RCR_CBSSID);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 9c092e6eb3fe..77c5b5f35244 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../pci.h"
#include "../ps.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -833,18 +834,7 @@ static bool _rtl92s_phy_config_bb(struct ieee80211_hw *hw, u8 configtype)
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_len; i = i + 2) {
- if (phy_reg_table[i] == 0xfe)
- mdelay(50);
- else if (phy_reg_table[i] == 0xfd)
- mdelay(5);
- else if (phy_reg_table[i] == 0xfc)
- mdelay(1);
- else if (phy_reg_table[i] == 0xfb)
- udelay(50);
- else if (phy_reg_table[i] == 0xfa)
- udelay(5);
- else if (phy_reg_table[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_reg_table[i]);
/* Add delay for ECS T20 & LG malow platform, */
udelay(1);
@@ -886,18 +876,7 @@ static bool _rtl92s_phy_set_bb_to_diff_rf(struct ieee80211_hw *hw,
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray2xtxr_len; i = i + 3) {
- if (phy_regarray2xtxr_table[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray2xtxr_table[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray2xtxr_table[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray2xtxr_table[i] == 0xfb)
- udelay(50);
- else if (phy_regarray2xtxr_table[i] == 0xfa)
- udelay(5);
- else if (phy_regarray2xtxr_table[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray2xtxr_table[i]);
rtl92s_phy_set_bb_reg(hw, phy_regarray2xtxr_table[i],
phy_regarray2xtxr_table[i + 1],
@@ -920,18 +899,7 @@ static bool _rtl92s_phy_config_bb_with_pg(struct ieee80211_hw *hw,
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_pg_len; i = i + 3) {
- if (phy_table_pg[i] == 0xfe)
- mdelay(50);
- else if (phy_table_pg[i] == 0xfd)
- mdelay(5);
- else if (phy_table_pg[i] == 0xfc)
- mdelay(1);
- else if (phy_table_pg[i] == 0xfb)
- udelay(50);
- else if (phy_table_pg[i] == 0xfa)
- udelay(5);
- else if (phy_table_pg[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_table_pg[i]);
_rtl92s_store_pwrindex_diffrate_offset(hw,
phy_table_pg[i],
@@ -1034,28 +1002,9 @@ u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath)
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radio_a_tblen; i = i + 2) {
- if (radio_a_table[i] == 0xfe)
- /* Delay specific ms. Only RF configuration
- * requires delay. */
- mdelay(50);
- else if (radio_a_table[i] == 0xfd)
- mdelay(5);
- else if (radio_a_table[i] == 0xfc)
- mdelay(1);
- else if (radio_a_table[i] == 0xfb)
- udelay(50);
- else if (radio_a_table[i] == 0xfa)
- udelay(5);
- else if (radio_a_table[i] == 0xf9)
- udelay(1);
- else
- rtl92s_phy_set_rf_reg(hw, rfpath,
- radio_a_table[i],
- MASK20BITS,
- radio_a_table[i + 1]);
+ rtl_rfreg_delay(hw, rfpath, radio_a_table[i],
+ MASK20BITS, radio_a_table[i + 1]);
- /* Add delay for ECS T20 & LG malow platform */
- udelay(1);
}
/* PA Bias current for inferiority IC */
@@ -1063,28 +1012,8 @@ u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath)
break;
case RF90_PATH_B:
for (i = 0; i < radio_b_tblen; i = i + 2) {
- if (radio_b_table[i] == 0xfe)
- /* Delay specific ms. Only RF configuration
- * requires delay.*/
- mdelay(50);
- else if (radio_b_table[i] == 0xfd)
- mdelay(5);
- else if (radio_b_table[i] == 0xfc)
- mdelay(1);
- else if (radio_b_table[i] == 0xfb)
- udelay(50);
- else if (radio_b_table[i] == 0xfa)
- udelay(5);
- else if (radio_b_table[i] == 0xf9)
- udelay(1);
- else
- rtl92s_phy_set_rf_reg(hw, rfpath,
- radio_b_table[i],
- MASK20BITS,
- radio_b_table[i + 1]);
-
- /* Add delay for ECS T20 & LG malow platform */
- udelay(1);
+ rtl_rfreg_delay(hw, rfpath, radio_b_table[i],
+ MASK20BITS, radio_b_table[i + 1]);
}
break;
case RF90_PATH_C:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
index c81c83591940..e13043479b71 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -1165,16 +1165,4 @@
#define BTX_AGCRATECCK 0x7f00
-#define MASKBYTE0 0xff
-#define MASKBYTE1 0xff00
-#define MASKBYTE2 0xff0000
-#define MASKBYTE3 0xff000000
-#define MASKHWORD 0xffff0000
-#define MASKLWORD 0x0000ffff
-#define MASKDWORD 0xffffffff
-
-#define MAKS12BITS 0xfffff
-#define MASK20BITS 0xfffff
-#define RFREG_OFFSET_MASK 0xfffff
-
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
index 92d38ab3c60e..78a81c1e390b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
@@ -52,7 +52,7 @@ static void _rtl92s_get_powerbase(struct ieee80211_hw *hw, u8 *p_pwrlevel,
/* We only care about the path A for legacy. */
if (rtlefuse->eeprom_version < 2) {
pwrbase0 = pwrlevel[0] + (rtlefuse->legacy_httxpowerdiff & 0xf);
- } else if (rtlefuse->eeprom_version >= 2) {
+ } else {
legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff
[RF90_PATH_A][chnl - 1];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 27efbcdac6a9..36b48be8329c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -310,7 +310,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
/* during testing, hdr was NULL here */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
@@ -336,7 +336,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
@@ -573,7 +573,8 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
}
}
-void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val)
{
if (istx) {
switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
index 64dd66f287c1..5a13f17e3b41 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
@@ -29,8 +29,9 @@
#ifndef __REALTEK_PCI92SE_TRX_H__
#define __REALTEK_PCI92SE_TRX_H__
-void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
- u8 *pdesc, struct ieee80211_tx_info *info,
+void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
@@ -39,7 +40,8 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status, u8 *pdesc,
struct sk_buff *skb);
-void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
u32 rtl92se_get_desc(u8 *pdesc, bool istx, u8 desc_name);
void rtl92se_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
index 4ed731f09b1f..9c34a85fdb89 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
@@ -10,7 +10,6 @@ rtl8723ae-objs := \
led.o \
phy.o \
pwrseq.o \
- pwrseqcmd.o \
rf.o \
sw.o \
table.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
index 8c110356dff9..debe261a7eeb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
@@ -46,11 +46,6 @@
#define E_CUT_VERSION BIT(14)
#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
-enum version_8723e {
- VERSION_TEST_UMC_CHIP_8723 = 0x0081,
- VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089,
- VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089,
-};
/* MASK */
#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2))
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
index a36eee28f9e7..25cc83058b01 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
@@ -35,6 +35,7 @@
#include "def.h"
#include "phy.h"
#include "dm.h"
+#include "../rtl8723com/dm_common.h"
#include "fw.h"
#include "hal_btc.h"
@@ -483,16 +484,6 @@ static void rtl8723ae_dm_dig(struct ieee80211_hw *hw)
rtl8723ae_dm_ctrl_initgain_by_twoport(hw);
}
-static void rtl8723ae_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm.dynamic_txpower_enable = false;
-
- rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
- rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
-}
-
static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -585,19 +576,6 @@ void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw)
}
}
-static void rtl8723ae_dm_pwdmonitor(struct ieee80211_hw *hw)
-{
-}
-
-void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm.current_turbo_edca = false;
- rtlpriv->dm.is_any_nonbepkts = false;
- rtlpriv->dm.is_cur_rdlstate = false;
-}
-
static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -669,9 +647,8 @@ static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- (u8 *) (&tmp));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
@@ -778,17 +755,6 @@ static void rtl8723ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
}
}
-static void rtl8723ae_dm_init_dynamic_bpowersaving(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm_pstable.pre_ccastate = CCA_MAX;
- rtlpriv->dm_pstable.cur_ccasate = CCA_MAX;
- rtlpriv->dm_pstable.pre_rfstate = RF_MAX;
- rtlpriv->dm_pstable.cur_rfstate = RF_MAX;
- rtlpriv->dm_pstable.rssi_val_min = 0;
-}
-
void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -905,11 +871,11 @@ void rtl8723ae_dm_init(struct ieee80211_hw *hw)
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
rtl8723ae_dm_diginit(hw);
- rtl8723ae_dm_init_dynamic_txpower(hw);
- rtl8723ae_dm_init_edca_turbo(hw);
+ rtl8723_dm_init_dynamic_txpower(hw);
+ rtl8723_dm_init_edca_turbo(hw);
rtl8723ae_dm_init_rate_adaptive_mask(hw);
rtl8723ae_dm_initialize_txpower_tracking(hw);
- rtl8723ae_dm_init_dynamic_bpowersaving(hw);
+ rtl8723_dm_init_dynamic_bb_powersaving(hw);
}
void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw)
@@ -930,7 +896,6 @@ void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw)
if ((ppsc->rfpwr_state == ERFON) &&
((!fw_current_inpsmode) && fw_ps_awake) &&
(!ppsc->rfchange_inprogress)) {
- rtl8723ae_dm_pwdmonitor(hw);
rtl8723ae_dm_dig(hw);
rtl8723ae_dm_false_alarm_counter_statistics(hw);
rtl8723ae_dm_dynamic_bpowersaving(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
index a372b0204456..d253bb53d03e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
@@ -147,7 +147,6 @@ enum dm_dig_connect_e {
void rtl8723ae_dm_init(struct ieee80211_hw *hw);
void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw);
void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw);
-void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw);
void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
index ba1502b172a6..728b7563ad36 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
@@ -34,199 +34,7 @@
#include "reg.h"
#include "def.h"
#include "fw.h"
-
-static void _rtl8723ae_enable_fw_download(struct ieee80211_hw *hw, bool enable)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp;
- if (enable) {
- tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
-
- tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
- rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
-
- tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
- rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
- } else {
- tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
- rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
-
- rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
- }
-}
-
-static void _rtl8723ae_fw_block_write(struct ieee80211_hw *hw,
- const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 blockSize = sizeof(u32);
- u8 *bufferPtr = (u8 *) buffer;
- u32 *pu4BytePtr = (u32 *) buffer;
- u32 i, offset, blockCount, remainSize;
-
- blockCount = size / blockSize;
- remainSize = size % blockSize;
-
- for (i = 0; i < blockCount; i++) {
- offset = i * blockSize;
- rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
- *(pu4BytePtr + i));
- }
-
- if (remainSize) {
- offset = blockCount * blockSize;
- bufferPtr += offset;
- for (i = 0; i < remainSize; i++) {
- rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
- offset + i), *(bufferPtr + i));
- }
- }
-}
-
-static void _rtl8723ae_fw_page_write(struct ieee80211_hw *hw,
- u32 page, const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 value8;
- u8 u8page = (u8) (page & 0x07);
-
- value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
- rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
- _rtl8723ae_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl8723ae_write_fw(struct ieee80211_hw *hw,
- enum version_8723e version, u8 *buffer,
- u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 *bufferPtr = (u8 *) buffer;
- u32 page_nums, remain_size;
- u32 page, offset;
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
-
- page_nums = size / FW_8192C_PAGE_SIZE;
- remain_size = size % FW_8192C_PAGE_SIZE;
-
- if (page_nums > 6) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Page numbers should not be greater then 6\n");
- }
-
- for (page = 0; page < page_nums; page++) {
- offset = page * FW_8192C_PAGE_SIZE;
- _rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset),
- FW_8192C_PAGE_SIZE);
- }
-
- if (remain_size) {
- offset = page_nums * FW_8192C_PAGE_SIZE;
- page = page_nums;
- _rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset),
- remain_size);
- }
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n");
-}
-
-static int _rtl8723ae_fw_free_to_go(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int err = -EIO;
- u32 counter = 0;
- u32 value32;
-
- do {
- value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
- (!(value32 & FWDL_ChkSum_rpt)));
-
- if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
- value32);
- goto exit;
- }
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
- value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- value32 |= MCUFWDL_RDY;
- value32 &= ~WINTINI_RDY;
- rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
-
- counter = 0;
-
- do {
- value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- if (value32 & WINTINI_RDY) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
- value32);
- err = 0;
- goto exit;
- }
-
- mdelay(FW_8192C_POLLING_DELAY);
-
- } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
-
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32);
-
-exit:
- return err;
-}
-
-int rtl8723ae_download_fw(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl8723ae_firmware_header *pfwheader;
- u8 *pfwdata;
- u32 fwsize;
- int err;
- enum version_8723e version = rtlhal->version;
-
- if (!rtlhal->pfirmware)
- return 1;
-
- pfwheader = (struct rtl8723ae_firmware_header *)rtlhal->pfirmware;
- pfwdata = (u8 *) rtlhal->pfirmware;
- fwsize = rtlhal->fwsize;
-
- if (IS_FW_HEADER_EXIST(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Firmware Version(%d), Signature(%#x),Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtl8723ae_firmware_header));
-
- pfwdata = pfwdata + sizeof(struct rtl8723ae_firmware_header);
- fwsize = fwsize - sizeof(struct rtl8723ae_firmware_header);
- }
-
- if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) {
- rtl8723ae_firmware_selfreset(hw);
- rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
- }
- _rtl8723ae_enable_fw_download(hw, true);
- _rtl8723ae_write_fw(hw, version, pfwdata, fwsize);
- _rtl8723ae_enable_fw_download(hw, false);
-
- err = _rtl8723ae_fw_free_to_go(hw);
- if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Firmware is not ready to run!\n");
- } else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Firmware is ready to run!\n");
- }
- return 0;
-}
+#include "../rtl8723com/fw_common.h"
static bool rtl8723ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
{
@@ -463,50 +271,6 @@ void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw,
return;
}
-void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw)
-{
- u8 u1tmp;
- u8 delay = 100;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
- u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
-
- while (u1tmp & BIT(2)) {
- delay--;
- if (delay == 0)
- break;
- udelay(50);
- u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
- }
- if (delay == 0) {
- u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2)));
- }
-}
-
-void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 u1_h2c_set_pwrmode[3] = { 0 };
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
-
- SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
- SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
- (rtlpriv->mac80211.p2p) ?
- ppsc->smart_ps : 1);
- SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
- ppsc->reg_max_lps_awakeintvl);
-
- RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
- u1_h2c_set_pwrmode, 3);
- rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
-
-}
-
static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
@@ -812,7 +576,6 @@ void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
p2p_ps_offload->offload_en = 1;
-
if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) {
p2p_ps_offload->role = 1;
p2p_ps_offload->allstasleep = 0;
@@ -836,3 +599,24 @@ void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
}
rtl8723ae_fill_h2c_cmd(hw, H2C_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload);
}
+
+void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 u1_h2c_set_pwrmode[3] = { 0 };
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+
+ SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
+ SET_H2CCMD_PWRMODE_PARM_SMART_PS_23A(u1_h2c_set_pwrmode,
+ (rtlpriv->mac80211.p2p) ?
+ ppsc->smart_ps : 1);
+ SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
+ ppsc->reg_max_lps_awakeintvl);
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
+ u1_h2c_set_pwrmode, 3);
+ rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
index ed3b795e6980..d355b85dd9fe 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
@@ -34,7 +34,7 @@
#define FW_8192C_END_ADDRESS 0x3FFF
#define FW_8192C_PAGE_SIZE 4096
#define FW_8192C_POLLING_DELAY 5
-#define FW_8192C_POLLING_TIMEOUT_COUNT 1000
+#define FW_8192C_POLLING_TIMEOUT_COUNT 6000
#define BEACON_PG 0
#define PSPOLL_PG 2
@@ -65,21 +65,9 @@ struct rtl8723ae_firmware_header {
u32 rsvd5;
};
-enum rtl8192c_h2c_cmd {
- H2C_AP_OFFLOAD = 0,
- H2C_SETPWRMODE = 1,
- H2C_JOINBSSRPT = 2,
- H2C_RSVDPAGE = 3,
- H2C_RSSI_REPORT = 4,
- H2C_P2P_PS_CTW_CMD = 5,
- H2C_P2P_PS_OFFLOAD = 6,
- H2C_RA_MASK = 7,
- MAX_H2CCMD
-};
-
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS_23A(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
@@ -92,10 +80,8 @@ enum rtl8192c_h2c_cmd {
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
-int rtl8723ae_download_fw(struct ieee80211_hw *hw);
void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
-void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
index 3d092e4b0b7f..48fee1be78c2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
@@ -31,6 +31,8 @@
#include "../pci.h"
#include "dm.h"
#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+#include "../rtl8723com/fw_common.h"
#include "phy.h"
#include "reg.h"
#include "hal_btc.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
index 68c28340f791..5d534df8d90c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
@@ -30,7 +30,9 @@
#include "hal_btc.h"
#include "../pci.h"
#include "phy.h"
+#include "../rtl8723com/phy_common.h"
#include "fw.h"
+#include "../rtl8723com/fw_common.h"
#include "reg.h"
#include "def.h"
@@ -391,13 +393,13 @@ static void rtl8723ae_dm_bt_set_sw_full_time_dac_swing(struct ieee80211_hw *hw,
if (sw_dac_swing_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl);
- rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000,
- sw_dac_swing_lvl);
+ rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000,
+ sw_dac_swing_lvl);
rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
} else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], SwDacSwing Off!\n");
- rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
+ rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index c333dfd116b8..65c9e80e1f78 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -38,10 +38,11 @@
#include "def.h"
#include "phy.h"
#include "dm.h"
+#include "../rtl8723com/dm_common.h"
#include "fw.h"
+#include "../rtl8723com/fw_common.h"
#include "led.h"
#include "hw.h"
-#include "pwrseqcmd.h"
#include "pwrseq.h"
#include "btc.h"
@@ -206,14 +207,13 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- (u8 *) (&e_aci));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &e_aci);
}
break; }
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool)*val;
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
reg_tmp |= 0x80;
@@ -224,7 +224,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *) val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
sec_min_space = 0;
@@ -248,7 +248,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -272,7 +272,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
else
p_regtoset = regtoset_normal;
- factor_toset = *((u8 *) val);
+ factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -303,16 +303,15 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
break; }
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *) val);
- rtl8723ae_dm_init_edca_turbo(hw);
+ u8 e_aci = *val;
+ rtl8723_dm_init_edca_turbo(hw);
- if (rtlpci->acm_method != eAcmWay2_SW)
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_ACM_CTRL,
- (u8 *) (&e_aci));
+ if (rtlpci->acm_method != EACMWAY2_SW)
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
+ &e_aci);
break; }
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn =
(union aci_aifsn *)(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -365,7 +364,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlpci->receive_config = ((u32 *) (val))[0];
break;
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = *val;
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -378,13 +377,13 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *) val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl8723ae_phy_set_io_cmd(hw, (*(enum io_type *)val));
break;
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_SET_RPWM:{
u8 rpwm_val;
@@ -393,27 +392,25 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
udelay(1);
if (rpwm_val & BIT(7)) {
- rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- (*(u8 *) val));
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
} else {
- rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- ((*(u8 *) val) | BIT(7)));
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
}
break; }
case HW_VAR_H2C_FW_PWRMODE:{
- u8 psmode = (*(u8 *) val);
+ u8 psmode = *val;
if (psmode != FW_PS_ACTIVE_MODE)
rtl8723ae_dm_rf_saving(hw, true);
- rtl8723ae_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ rtl8723ae_set_fw_pwrmode_cmd(hw, *val);
break; }
case HW_VAR_FW_PSMODE_STATUS:
ppsc->fw_current_inpsmode = *((bool *) val);
break;
case HW_VAR_H2C_FW_JOINBSSRPT:{
- u8 mstatus = (*(u8 *) val);
+ u8 mstatus = *val;
u8 tmp_regcr, tmp_reg422;
bool recover = false;
@@ -446,11 +443,11 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_CR + 1,
(tmp_regcr & ~(BIT(0))));
}
- rtl8723ae_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ rtl8723ae_set_fw_joinbss_report_cmd(hw, *val);
break; }
case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
- rtl8723ae_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
+ rtl8723ae_set_p2p_ps_offload_cmd(hw, *val);
break;
case HW_VAR_AID:{
u16 u2btmp;
@@ -460,7 +457,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
mac->assoc_id));
break; }
case HW_VAR_CORRECT_TSF:{
- u8 btype_ibss = ((u8 *) (val))[0];
+ u8 btype_ibss = *val;
if (btype_ibss == true)
_rtl8723ae_stop_tx_beacon(hw);
@@ -490,20 +487,18 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(u8 *)(&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&ppsc->fwctrl_psmode));
+ &ppsc->fwctrl_psmode);
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+ &rpwm_val);
} else {
rpwm_val = 0x0C; /* RF on */
fw_pwrmode = FW_PS_ACTIVE_MODE;
fw_current_inps = false;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&fw_pwrmode));
+ &rpwm_val);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ &fw_pwrmode);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_FW_PSMODE_STATUS,
@@ -880,23 +875,33 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw)
bool rtstatus = true;
int err;
u8 tmp_u1b;
+ unsigned long flags;
rtlpriv->rtlhal.being_init_adapter = true;
+ /* As this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
+
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl8712e_init_mac(hw);
if (rtstatus != true) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
err = 1;
- return err;
+ goto exit;
}
- err = rtl8723ae_download_fw(hw);
+ err = rtl8723_download_fw(hw, false);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now..\n");
err = 1;
- rtlhal->fw_ready = false;
- return err;
+ goto exit;
} else {
rtlhal->fw_ready = true;
}
@@ -971,6 +976,8 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
}
rtl8723ae_dm_init(hw);
+exit:
+ local_irq_restore(flags);
rtlpriv->rtlhal.being_init_adapter = false;
return err;
}
@@ -1112,12 +1119,13 @@ static int _rtl8723ae_set_media_status(struct ieee80211_hw *hw,
void rtl8723ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u32 reg_rcr = rtlpci->receive_config;
+ u32 reg_rcr;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
if (check_bssid == true) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
@@ -1153,7 +1161,7 @@ void rtl8723ae_set_qos(struct ieee80211_hw *hw, int aci)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- rtl8723ae_dm_init_edca_turbo(hw);
+ rtl8723_dm_init_edca_turbo(hw);
switch (aci) {
case AC1_BK:
rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
@@ -1614,10 +1622,10 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
rtl8723ae_read_bt_coexist_info_from_hwpg(hw,
rtlefuse->autoload_failflag, hwinfo);
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
@@ -1655,7 +1663,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
CHK_SVID_SMID(0x10EC, 0x9185))
rtlhal->oem_id = RT_CID_TOSHIBA;
else if (rtlefuse->eeprom_svid == 0x1025)
- rtlhal->oem_id = RT_CID_819x_Acer;
+ rtlhal->oem_id = RT_CID_819X_ACER;
else if (CHK_SVID_SMID(0x10EC, 0x6191) ||
CHK_SVID_SMID(0x10EC, 0x6192) ||
CHK_SVID_SMID(0x10EC, 0x6193) ||
@@ -1665,7 +1673,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
CHK_SVID_SMID(0x10EC, 0x8191) ||
CHK_SVID_SMID(0x10EC, 0x8192) ||
CHK_SVID_SMID(0x10EC, 0x8193))
- rtlhal->oem_id = RT_CID_819x_SAMSUNG;
+ rtlhal->oem_id = RT_CID_819X_SAMSUNG;
else if (CHK_SVID_SMID(0x10EC, 0x8195) ||
CHK_SVID_SMID(0x10EC, 0x9195) ||
CHK_SVID_SMID(0x10EC, 0x7194) ||
@@ -1673,24 +1681,24 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
CHK_SVID_SMID(0x10EC, 0x8201) ||
CHK_SVID_SMID(0x10EC, 0x8202) ||
CHK_SVID_SMID(0x10EC, 0x9200))
- rtlhal->oem_id = RT_CID_819x_Lenovo;
+ rtlhal->oem_id = RT_CID_819X_LENOVO;
else if (CHK_SVID_SMID(0x10EC, 0x8197) ||
CHK_SVID_SMID(0x10EC, 0x9196))
- rtlhal->oem_id = RT_CID_819x_CLEVO;
+ rtlhal->oem_id = RT_CID_819X_CLEVO;
else if (CHK_SVID_SMID(0x1028, 0x8194) ||
CHK_SVID_SMID(0x1028, 0x8198) ||
CHK_SVID_SMID(0x1028, 0x9197) ||
CHK_SVID_SMID(0x1028, 0x9198))
- rtlhal->oem_id = RT_CID_819x_DELL;
+ rtlhal->oem_id = RT_CID_819X_DELL;
else if (CHK_SVID_SMID(0x103C, 0x1629))
- rtlhal->oem_id = RT_CID_819x_HP;
+ rtlhal->oem_id = RT_CID_819X_HP;
else if (CHK_SVID_SMID(0x1A32, 0x2315))
- rtlhal->oem_id = RT_CID_819x_QMI;
+ rtlhal->oem_id = RT_CID_819X_QMI;
else if (CHK_SVID_SMID(0x10EC, 0x8203))
- rtlhal->oem_id = RT_CID_819x_PRONETS;
+ rtlhal->oem_id = RT_CID_819X_PRONETS;
else if (CHK_SVID_SMID(0x1043, 0x84B5))
rtlhal->oem_id =
- RT_CID_819x_Edimax_ASUS;
+ RT_CID_819X_EDIMAX_ASUS;
else
rtlhal->oem_id = RT_CID_DEFAULT;
} else if (rtlefuse->eeprom_did == 0x8178) {
@@ -1712,12 +1720,12 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
CHK_SVID_SMID(0x10EC, 0x9185))
rtlhal->oem_id = RT_CID_TOSHIBA;
else if (rtlefuse->eeprom_svid == 0x1025)
- rtlhal->oem_id = RT_CID_819x_Acer;
+ rtlhal->oem_id = RT_CID_819X_ACER;
else if (CHK_SVID_SMID(0x10EC, 0x8186))
- rtlhal->oem_id = RT_CID_819x_PRONETS;
+ rtlhal->oem_id = RT_CID_819X_PRONETS;
else if (CHK_SVID_SMID(0x1043, 0x8486))
rtlhal->oem_id =
- RT_CID_819x_Edimax_ASUS;
+ RT_CID_819X_EDIMAX_ASUS;
else
rtlhal->oem_id = RT_CID_DEFAULT;
} else {
@@ -1731,7 +1739,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
rtlhal->oem_id = RT_CID_CCX;
break;
case EEPROM_CID_QMI:
- rtlhal->oem_id = RT_CID_819x_QMI;
+ rtlhal->oem_id = RT_CID_819X_QMI;
break;
case EEPROM_CID_WHQL:
break;
@@ -2037,8 +2045,7 @@ void rtl8723ae_update_channel_access_setting(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 sifs_timer;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
index 5d318a85eda4..3ea78afdec73 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -30,12 +30,14 @@
#include "../wifi.h"
#include "../pci.h"
#include "../ps.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "rf.h"
#include "dm.h"
#include "table.h"
+#include "../rtl8723com/phy_common.h"
/* static forward definitions */
static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw,
@@ -43,72 +45,17 @@ static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw,
static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath,
u32 offset, u32 data);
-static u32 _phy_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset);
-static void _phy_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset, u32 data);
-static u32 _phy_calculate_bit_shift(u32 bitmask);
static bool _phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw);
static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype);
static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype);
-static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw);
-static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
- u32 cmdtableidx, u32 cmdtablesz,
- enum swchnlcmd_id cmdid,
- u32 para1, u32 para2,
- u32 msdelay);
static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
u8 *stage, u8 *step, u32 *delay);
static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
enum wireless_mode wirelessmode,
long power_indbm);
-static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
- enum wireless_mode wirelessmode, u8 txpwridx);
static void rtl8723ae_phy_set_io(struct ieee80211_hw *hw);
-u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
- u32 bitmask)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 returnvalue, originalvalue, bitshift;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
- originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _phy_calculate_bit_shift(bitmask);
- returnvalue = (originalvalue & bitmask) >> bitshift;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask, regaddr,
- originalvalue);
-
- return returnvalue;
-}
-
-void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 originalvalue, bitshift;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr,
- bitmask, data);
-
- if (bitmask != MASKDWORD) {
- originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _phy_calculate_bit_shift(bitmask);
- data = ((originalvalue & (~bitmask)) | (data << bitshift));
- }
-
- rtl_write_dword(rtlpriv, regaddr, data);
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
-}
-
u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 regaddr, u32 bitmask)
{
@@ -124,11 +71,11 @@ u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
if (rtlphy->rf_mode != RF_OP_BY_FW)
- original_value = _phy_rf_serial_read(hw, rfpath, regaddr);
+ original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
else
original_value = _phy_fw_rf_serial_read(hw, rfpath, regaddr);
- bitshift = _phy_calculate_bit_shift(bitmask);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
@@ -157,19 +104,19 @@ void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
if (rtlphy->rf_mode != RF_OP_BY_FW) {
if (bitmask != RFREG_OFFSET_MASK) {
- original_value = _phy_rf_serial_read(hw, rfpath,
- regaddr);
- bitshift = _phy_calculate_bit_shift(bitmask);
+ original_value = rtl8723_phy_rf_serial_read(hw, rfpath,
+ regaddr);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
data = ((original_value & (~bitmask)) |
(data << bitshift));
}
- _phy_rf_serial_write(hw, rfpath, regaddr, data);
+ rtl8723_phy_rf_serial_write(hw, rfpath, regaddr, data);
} else {
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _phy_fw_rf_serial_read(hw, rfpath,
regaddr);
- bitshift = _phy_calculate_bit_shift(bitmask);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
data = ((original_value & (~bitmask)) |
(data << bitshift));
}
@@ -197,87 +144,6 @@ static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw,
RT_ASSERT(false, "deprecated!\n");
}
-static u32 _phy_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
- u32 newoffset;
- u32 tmplong, tmplong2;
- u8 rfpi_enable = 0;
- u32 retvalue;
-
- offset &= 0x3f;
- newoffset = offset;
- if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
- return 0xFFFFFFFF;
- }
- tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
- if (rfpath == RF90_PATH_A)
- tmplong2 = tmplong;
- else
- tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
- tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
- (newoffset << 23) | BLSSIREADEDGE;
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
- tmplong & (~BLSSIREADEDGE));
- mdelay(1);
- rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
- mdelay(1);
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
- tmplong | BLSSIREADEDGE);
- mdelay(1);
- if (rfpath == RF90_PATH_A)
- rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
- BIT(8));
- else if (rfpath == RF90_PATH_B)
- rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
- BIT(8));
- if (rfpi_enable)
- retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
- BLSSIREADBACKDATA);
- else
- retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
- BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
- return retvalue;
-}
-
-static void _phy_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset, u32 data)
-{
- u32 data_and_addr;
- u32 newoffset;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-
- if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
- return;
- }
- offset &= 0x3f;
- newoffset = offset;
- data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
- rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr);
-}
-
-static u32 _phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
-}
-
static void _rtl8723ae_phy_bb_config_1t(struct ieee80211_hw *hw)
{
rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
@@ -307,7 +173,7 @@ bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw)
u8 tmpu1b;
u8 reg_hwparafile = 1;
- _phy_init_bb_rf_reg_def(hw);
+ rtl8723_phy_init_bb_rf_reg_def(hw);
/* 1. 0x28[1] = 1 */
tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_PLL_CTRL);
@@ -412,18 +278,7 @@ static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype)
phy_regarray_table = RTL8723EPHY_REG_1TARRAY;
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_arraylen; i = i + 2) {
- if (phy_regarray_table[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table[i]);
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
@@ -585,18 +440,7 @@ static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype)
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
- if (phy_regarray_table_pg[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table_pg[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table_pg[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table_pg[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table_pg[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table_pg[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table_pg[i]);
_st_pwrIdx_dfrate_off(hw, phy_regarray_table_pg[i],
phy_regarray_table_pg[i + 1],
@@ -623,24 +467,9 @@ bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
- if (radioa_array_table[i] == 0xfe)
- mdelay(50);
- else if (radioa_array_table[i] == 0xfd)
- mdelay(5);
- else if (radioa_array_table[i] == 0xfc)
- mdelay(1);
- else if (radioa_array_table[i] == 0xfb)
- udelay(50);
- else if (radioa_array_table[i] == 0xfa)
- udelay(5);
- else if (radioa_array_table[i] == 0xf9)
- udelay(1);
- else {
- rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
- RFREG_OFFSET_MASK,
- radioa_array_table[i + 1]);
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
}
break;
case RF90_PATH_B:
@@ -690,92 +519,6 @@ void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
-static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
- rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
- rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
- rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
- rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
- RFPGA0_XA_LSSIPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
- RFPGA0_XB_LSSIPARAMETER;
-
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
-
- rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
- rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
- rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
- rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
- rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
- rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
-
- rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
-}
-
void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -785,17 +528,17 @@ void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
long txpwr_dbm;
txpwr_level = rtlphy->cur_cck_txpwridx;
- txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level);
+ txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level);
txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
rtlefuse->legacy_ht_txpowerdiff;
- if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm)
- txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+ if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm)
+ txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
txpwr_level);
txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
- if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) >
+ if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) >
txpwr_dbm)
- txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
- txpwr_level);
+ txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+ txpwr_level);
*powerlevel = txpwr_dbm;
}
@@ -912,28 +655,6 @@ static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
return txpwridx;
}
-static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
- enum wireless_mode wirelessmode, u8 txpwridx)
-{
- long offset;
- long pwrout_dbm;
-
- switch (wirelessmode) {
- case WIRELESS_MODE_B:
- offset = -7;
- break;
- case WIRELESS_MODE_G:
- case WIRELESS_MODE_N_24G:
- offset = -8;
- break;
- default:
- offset = -8;
- break;
- }
- pwrout_dbm = txpwridx / 2 + offset;
- return pwrout_dbm;
-}
-
void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1117,26 +838,26 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
u8 num_total_rfpath = rtlphy->num_total_rfpath;
precommoncmdcnt = 0;
- _phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
- MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL,
- 0, 0, 0);
- _phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
- MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
+ rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL,
+ 0, 0, 0);
+ rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
postcommoncmdcnt = 0;
- _phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
- MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
+ rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
+ MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
rfdependcmdcnt = 0;
RT_ASSERT((channel >= 1 && channel <= 14),
"illegal channel for Zebra: %d\n", channel);
- _phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
- MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
+ rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
RF_CHNLBW, channel, 10);
- _phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
- MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
+ rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
do {
switch (*stage) {
@@ -1204,29 +925,6 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
return false;
}
-static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
- u32 cmdtableidx, u32 cmdtablesz,
- enum swchnlcmd_id cmdid, u32 para1,
- u32 para2, u32 msdelay)
-{
- struct swchnlcmd *pcmd;
-
- if (cmdtable == NULL) {
- RT_ASSERT(false, "cmdtable cannot be NULL.\n");
- return false;
- }
-
- if (cmdtableidx >= cmdtablesz)
- return false;
-
- pcmd = cmdtable + cmdtableidx;
- pcmd->cmdid = cmdid;
- pcmd->para1 = para1;
- pcmd->para2 = para2;
- pcmd->msdelay = msdelay;
- return true;
-}
-
static u8 _rtl8723ae_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
{
u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
@@ -1297,136 +995,6 @@ static u8 _rtl8723ae_phy_path_b_iqk(struct ieee80211_hw *hw)
return result;
}
-static void phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, bool iqk_ok,
- long result[][8], u8 final_candidate,
- bool btxonly)
-{
- u32 oldval_0, x, tx0_a, reg;
- long y, tx0_c;
-
- if (final_candidate == 0xFF) {
- return;
- } else if (iqk_ok) {
- oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
- MASKDWORD) >> 22) & 0x3FF;
- x = result[final_candidate][0];
- if ((x & 0x00000200) != 0)
- x = x | 0xFFFFFC00;
- tx0_a = (x * oldval_0) >> 8;
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
- ((x * oldval_0 >> 7) & 0x1));
- y = result[final_candidate][1];
- if ((y & 0x00000200) != 0)
- y = y | 0xFFFFFC00;
- tx0_c = (y * oldval_0) >> 8;
- rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
- ((tx0_c & 0x3C0) >> 6));
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
- (tx0_c & 0x3F));
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
- ((y * oldval_0 >> 7) & 0x1));
- if (btxonly)
- return;
- reg = result[final_candidate][2];
- rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
- reg = result[final_candidate][3] & 0x3F;
- rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
- reg = (result[final_candidate][3] >> 6) & 0xF;
- rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
- }
-}
-
-static void phy_save_adda_regs(struct ieee80211_hw *hw,
- u32 *addareg, u32 *addabackup,
- u32 registernum)
-{
- u32 i;
-
- for (i = 0; i < registernum; i++)
- addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
-}
-
-static void phy_save_mac_regs(struct ieee80211_hw *hw, u32 *macreg,
- u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
- macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
- macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
-}
-
-static void phy_reload_adda_regs(struct ieee80211_hw *hw, u32 *addareg,
- u32 *addabackup, u32 regiesternum)
-{
- u32 i;
-
- for (i = 0; i < regiesternum; i++)
- rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
-}
-
-static void phy_reload_mac_regs(struct ieee80211_hw *hw, u32 *macreg,
- u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
- rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
- rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
-}
-
-static void _rtl8723ae_phy_path_adda_on(struct ieee80211_hw *hw,
- u32 *addareg, bool is_patha_on,
- bool is2t)
-{
- u32 pathOn;
- u32 i;
-
- pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
- if (false == is2t) {
- pathOn = 0x0bdb25a0;
- rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
- } else {
- rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
- }
-
- for (i = 1; i < IQK_ADDA_REG_NUM; i++)
- rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
-}
-
-static void _rtl8723ae_phy_mac_setting_calibration(struct ieee80211_hw *hw,
- u32 *macreg, u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i = 0;
-
- rtl_write_byte(rtlpriv, macreg[i], 0x3F);
-
- for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
- rtl_write_byte(rtlpriv, macreg[i],
- (u8) (macbackup[i] & (~BIT(3))));
- rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
-}
-
-static void _rtl8723ae_phy_path_a_standby(struct ieee80211_hw *hw)
-{
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
- rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
-}
-
-static void _rtl8723ae_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
-{
- u32 mode;
-
- mode = pi_mode ? 0x01000100 : 0x01000000;
- rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
- rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
-}
-
static bool phy_simularity_comp(struct ieee80211_hw *hw, long result[][8],
u8 c1, u8 c2)
{
@@ -1498,10 +1066,12 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
const u32 retrycount = 2;
if (t == 0) {
- phy_save_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
- phy_save_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
+ rtl8723_save_adda_registers(hw, adda_reg, rtlphy->adda_backup,
+ 16);
+ rtl8723_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
}
- _rtl8723ae_phy_path_adda_on(hw, adda_reg, true, is2t);
+ rtl8723_phy_path_adda_on(hw, adda_reg, true, is2t);
if (t == 0) {
rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
RFPGA0_XA_HSSIPARAMETER1,
@@ -1509,7 +1079,7 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
}
if (!rtlphy->rfpi_enable)
- _rtl8723ae_phy_pi_mode_switch(hw, true);
+ rtl8723_phy_pi_mode_switch(hw, true);
if (t == 0) {
rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
@@ -1522,7 +1092,7 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
}
- _rtl8723ae_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtl8723_phy_mac_setting_calibration(hw, iqk_mac_reg,
rtlphy->iqk_mac_backup);
rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
if (is2t)
@@ -1552,8 +1122,8 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
}
if (is2t) {
- _rtl8723ae_phy_path_a_standby(hw);
- _rtl8723ae_phy_path_adda_on(hw, adda_reg, false, is2t);
+ rtl8723_phy_path_a_standby(hw);
+ rtl8723_phy_path_adda_on(hw, adda_reg, false, is2t);
for (i = 0; i < retrycount; i++) {
pathb_ok = _rtl8723ae_phy_path_b_iqk(hw);
if (pathb_ok == 0x03) {
@@ -1588,9 +1158,11 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
if (t != 0) {
if (!rtlphy->rfpi_enable)
- _rtl8723ae_phy_pi_mode_switch(hw, false);
- phy_reload_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
- phy_reload_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
+ rtl8723_phy_pi_mode_switch(hw, false);
+ rtl8723_phy_reload_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup, 16);
+ rtl8723_phy_reload_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
}
}
@@ -1691,7 +1263,8 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
};
if (recovery) {
- phy_reload_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
+ rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup, 10);
return;
}
if (start_conttx || singletone)
@@ -1756,9 +1329,10 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
}
if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
- phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
- final_candidate, (reg_ea4 == 0));
- phy_save_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
+ rtl8723_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
+ final_candidate,
+ (reg_ea4 == 0));
+ rtl8723_save_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
}
void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
index 007ebdbbe108..cd43139ed332 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
@@ -76,23 +76,6 @@
#define RTL92C_MAX_PATH_NUM 2
-enum swchnlcmd_id {
- CMDID_END,
- CMDID_SET_TXPOWEROWER_LEVEL,
- CMDID_BBREGWRITE10,
- CMDID_WRITEPORT_ULONG,
- CMDID_WRITEPORT_USHORT,
- CMDID_WRITEPORT_UCHAR,
- CMDID_RF_WRITEREG,
-};
-
-struct swchnlcmd {
- enum swchnlcmd_id cmdid;
- u32 para1;
- u32 para2;
- u32 msdelay;
-};
-
enum hw90_block_e {
HW90_BLOCK_MAC = 0,
HW90_BLOCK_PHY0 = 1,
@@ -183,10 +166,6 @@ struct tx_power_struct {
u32 mcs_original_offset[4][16];
};
-u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 regaddr,
u32 bitmask);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
index 7a46f9fdf558..a418acb4d0ca 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
@@ -30,7 +30,6 @@
#ifndef __RTL8723E_PWRSEQ_H__
#define __RTL8723E_PWRSEQ_H__
-#include "pwrseqcmd.h"
/*
Check document WM-20110607-Paul-RTL8723A_Power_Architecture-R02.vsd
There are 6 HW Power States:
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
index 199da366c6da..64376b38708b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
@@ -2059,22 +2059,6 @@
#define BWORD1 0xc
#define BWORD 0xf
-#define MASKBYTE0 0xff
-#define MASKBYTE1 0xff00
-#define MASKBYTE2 0xff0000
-#define MASKBYTE3 0xff000000
-#define MASKHWORD 0xffff0000
-#define MASKLWORD 0x0000ffff
-#define MASKDWORD 0xffffffff
-#define MASK12BITS 0xfff
-#define MASKH4BITS 0xf0000000
-#define MASKOFDM_D 0xffc00000
-#define MASKCCK 0x3f3f3f3f
-
-#define MASK4BITS 0x0f
-#define MASK20BITS 0xfffff
-#define RFREG_OFFSET_MASK 0xfffff
-
#define BENABLE 0x1
#define BDISABLE 0x0
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index 62b204faf773..1087a3bd07fa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -37,8 +37,11 @@
#include "reg.h"
#include "def.h"
#include "phy.h"
+#include "../rtl8723com/phy_common.h"
#include "dm.h"
#include "hw.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
#include "sw.h"
#include "trx.h"
#include "led.h"
@@ -193,6 +196,11 @@ void rtl8723ae_deinit_sw_vars(struct ieee80211_hw *hw)
}
}
+static bool is_fw_header(struct rtl92c_firmware_header *hdr)
+{
+ return (hdr->signature & 0xfff0) == 0x2300;
+}
+
static struct rtl_hal_ops rtl8723ae_hal_ops = {
.init_sw_vars = rtl8723ae_init_sw_vars,
.deinit_sw_vars = rtl8723ae_deinit_sw_vars,
@@ -231,13 +239,14 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
.set_key = rtl8723ae_set_key,
.init_sw_leds = rtl8723ae_init_sw_leds,
.allow_all_destaddr = rtl8723ae_allow_all_destaddr,
- .get_bbreg = rtl8723ae_phy_query_bb_reg,
- .set_bbreg = rtl8723ae_phy_set_bb_reg,
+ .get_bbreg = rtl8723_phy_query_bb_reg,
+ .set_bbreg = rtl8723_phy_set_bb_reg,
.get_rfreg = rtl8723ae_phy_query_rf_reg,
.set_rfreg = rtl8723ae_phy_set_rf_reg,
.c2h_command_handle = rtl_8723e_c2h_command_handle,
.bt_wifi_media_status_notify = rtl_8723e_bt_wifi_media_status_notify,
.bt_coex_off_before_lps = rtl8723ae_bt_coex_off_before_lps,
+ .is_fw_header = is_fw_header,
};
static struct rtl_mod_params rtl8723ae_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index 50b7be3f3a60..10b7577b6ae5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -334,7 +334,7 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
/* during testing, hdr could be NULL here */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
@@ -365,7 +365,7 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcdesc)
@@ -375,7 +375,7 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool defaultadapter = true;
- u8 *pdesc = (u8 *) pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 fw_qsel = _rtl8723ae_map_hwqueue_to_fwqueue(skb, hw_queue);
@@ -577,7 +577,7 @@ void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_OWN(pdesc, 1);
- SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
@@ -597,7 +597,8 @@ void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
pdesc, TX_DESC_SIZE);
}
-void rtl8723ae_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl8723ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val)
{
if (istx == true) {
switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
index ad05b54bc0f1..4380b7d3a91a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
@@ -521,12 +521,6 @@ do { \
memset(__pdesc, 0, _size); \
} while (0)
-#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs) \
- ((rxmcs) == DESC92_RATE1M || \
- (rxmcs) == DESC92_RATE2M || \
- (rxmcs) == DESC92_RATE5_5M || \
- (rxmcs) == DESC92_RATE11M)
-
struct rx_fwinfo_8723e {
u8 gain_trsw[4];
u8 pwdb_all;
@@ -706,8 +700,8 @@ struct rx_desc_8723e {
} __packed;
void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
@@ -715,7 +709,8 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *status,
struct ieee80211_rx_status *rx_status,
u8 *pdesc, struct sk_buff *skb);
-void rtl8723ae_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl8723ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name);
void rtl8723ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
new file mode 100644
index 000000000000..59e416abd93a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
@@ -0,0 +1,19 @@
+obj-m := rtl8723be.o
+
+
+rtl8723be-objs := \
+ dm.o \
+ fw.o \
+ hw.o \
+ led.o \
+ phy.o \
+ pwrseq.o \
+ rf.o \
+ sw.o \
+ table.o \
+ trx.o \
+
+
+obj-$(CONFIG_RTL8723BE) += rtl8723be.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/rtlwifi/rtl8723be/def.h
new file mode 100644
index 000000000000..3c30b74e983d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/def.h
@@ -0,0 +1,248 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_DEF_H__
+#define __RTL8723BE_DEF_H__
+
+#define HAL_RETRY_LIMIT_INFRA 48
+#define HAL_RETRY_LIMIT_AP_ADHOC 7
+
+#define RESET_DELAY_8185 20
+
+#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
+#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
+
+#define NUM_OF_FIRMWARE_QUEUE 10
+#define NUM_OF_PAGES_IN_FW 0x100
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
+
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
+
+#define MAX_LINES_HWCONFIG_TXT 1000
+#define MAX_BYTES_LINE_HWCONFIG_TXT 256
+
+#define SW_THREE_WIRE 0
+#define HW_THREE_WIRE 2
+
+#define BT_DEMO_BOARD 0
+#define BT_QA_BOARD 1
+#define BT_FPGA 2
+
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
+#define HAL_PRIME_CHNL_OFFSET_LOWER 1
+#define HAL_PRIME_CHNL_OFFSET_UPPER 2
+
+#define MAX_H2C_QUEUE_NUM 10
+
+#define RX_MPDU_QUEUE 0
+#define RX_CMD_QUEUE 1
+#define RX_MAX_QUEUE 2
+#define AC2QUEUEID(_AC) (_AC)
+
+#define C2H_RX_CMD_HDR_LEN 8
+#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 0, 16)
+#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 16, 8)
+#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 24, 7)
+#define GET_C2H_CMD_CONTINUE(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 31, 1)
+#define GET_C2H_CMD_CONTENT(__prxhdr) \
+ ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN)
+
+#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8)
+#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8)
+#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16)
+#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5)
+#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1)
+#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5)
+#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1)
+#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
+#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
+
+#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
+#define CHIP_BONDING_92C_1T2R 0x1
+
+#define CHIP_8723 BIT(0)
+#define CHIP_8723B (BIT(1) | BIT(2))
+#define NORMAL_CHIP BIT(3)
+#define RF_TYPE_1T1R (~(BIT(4) | BIT(5) | BIT(6)))
+#define RF_TYPE_1T2R BIT(4)
+#define RF_TYPE_2T2R BIT(5)
+#define CHIP_VENDOR_UMC BIT(7)
+#define B_CUT_VERSION BIT(12)
+#define C_CUT_VERSION BIT(13)
+#define D_CUT_VERSION ((BIT(12) | BIT(13)))
+#define E_CUT_VERSION BIT(14)
+#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
+
+/* MASK */
+#define IC_TYPE_MASK (BIT(0) | BIT(1) | BIT(2))
+#define CHIP_TYPE_MASK BIT(3)
+#define RF_TYPE_MASK (BIT(4) | BIT(5) | BIT(6))
+#define MANUFACTUER_MASK BIT(7)
+#define ROM_VERSION_MASK (BIT(11) | BIT(10) | BIT(9) | BIT(8))
+#define CUT_VERSION_MASK (BIT(15) | BIT(14) | BIT(13) | BIT(12))
+
+/* Get element */
+#define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK)
+#define GET_CVID_CHIP_TYPE(version) ((version) & CHIP_TYPE_MASK)
+#define GET_CVID_RF_TYPE(version) ((version) & RF_TYPE_MASK)
+#define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK)
+#define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK)
+#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK)
+
+#define IS_92C_SERIAL(version) ((IS_81XXC(version) && IS_2T2R(version)) ?\
+ true : false)
+#define IS_81XXC(version) ((GET_CVID_IC_TYPE(version) == 0) ?\
+ true : false)
+#define IS_8723_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8723) ?\
+ true : false)
+#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version)) ? false : true)
+#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)\
+ ? true : false)
+#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)\
+ ? true : false)
+enum rf_optype {
+ RF_OP_BY_SW_3WIRE = 0,
+ RF_OP_BY_FW,
+ RF_OP_MAX
+};
+
+enum rf_power_state {
+ RF_ON,
+ RF_OFF,
+ RF_SLEEP,
+ RF_SHUT_DOWN,
+};
+
+enum power_save_mode {
+ POWER_SAVE_MODE_ACTIVE,
+ POWER_SAVE_MODE_SAVE,
+};
+
+enum power_polocy_config {
+ POWERCFG_MAX_POWER_SAVINGS,
+ POWERCFG_GLOBAL_POWER_SAVINGS,
+ POWERCFG_LOCAL_POWER_SAVINGS,
+ POWERCFG_LENOVO,
+};
+
+enum interface_select_pci {
+ INTF_SEL1_MINICARD = 0,
+ INTF_SEL0_PCIE = 1,
+ INTF_SEL2_RSV = 2,
+ INTF_SEL3_RSV = 3,
+};
+
+enum rtl_desc_qsel {
+ QSLT_BK = 0x2,
+ QSLT_BE = 0x0,
+ QSLT_VI = 0x5,
+ QSLT_VO = 0x7,
+ QSLT_BEACON = 0x10,
+ QSLT_HIGH = 0x11,
+ QSLT_MGNT = 0x12,
+ QSLT_CMD = 0x13,
+};
+
+enum rtl_desc8723e_rate {
+ DESC92C_RATE1M = 0x00,
+ DESC92C_RATE2M = 0x01,
+ DESC92C_RATE5_5M = 0x02,
+ DESC92C_RATE11M = 0x03,
+
+ DESC92C_RATE6M = 0x04,
+ DESC92C_RATE9M = 0x05,
+ DESC92C_RATE12M = 0x06,
+ DESC92C_RATE18M = 0x07,
+ DESC92C_RATE24M = 0x08,
+ DESC92C_RATE36M = 0x09,
+ DESC92C_RATE48M = 0x0a,
+ DESC92C_RATE54M = 0x0b,
+
+ DESC92C_RATEMCS0 = 0x0c,
+ DESC92C_RATEMCS1 = 0x0d,
+ DESC92C_RATEMCS2 = 0x0e,
+ DESC92C_RATEMCS3 = 0x0f,
+ DESC92C_RATEMCS4 = 0x10,
+ DESC92C_RATEMCS5 = 0x11,
+ DESC92C_RATEMCS6 = 0x12,
+ DESC92C_RATEMCS7 = 0x13,
+ DESC92C_RATEMCS8 = 0x14,
+ DESC92C_RATEMCS9 = 0x15,
+ DESC92C_RATEMCS10 = 0x16,
+ DESC92C_RATEMCS11 = 0x17,
+ DESC92C_RATEMCS12 = 0x18,
+ DESC92C_RATEMCS13 = 0x19,
+ DESC92C_RATEMCS14 = 0x1a,
+ DESC92C_RATEMCS15 = 0x1b,
+ DESC92C_RATEMCS15_SG = 0x1c,
+ DESC92C_RATEMCS32 = 0x20,
+};
+
+enum rx_packet_type {
+ NORMAL_RX,
+ TX_REPORT1,
+ TX_REPORT2,
+ HIS_REPORT,
+};
+
+struct phy_sts_cck_8723e_t {
+ u8 adc_pwdb_X[4];
+ u8 sq_rpt;
+ u8 cck_agc_rpt;
+};
+
+struct h2c_cmd_8723e {
+ u8 element_id;
+ u32 cmd_len;
+ u8 *p_cmdbuffer;
+};
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
new file mode 100644
index 000000000000..13d53a1df789
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
@@ -0,0 +1,1325 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "../rtl8723com/dm_common.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+#include "trx.h"
+#include "../btcoexist/rtl_btc.h"
+
+static const u32 ofdmswing_table[] = {
+ 0x0b40002d, /* 0, -15.0dB */
+ 0x0c000030, /* 1, -14.5dB */
+ 0x0cc00033, /* 2, -14.0dB */
+ 0x0d800036, /* 3, -13.5dB */
+ 0x0e400039, /* 4, -13.0dB */
+ 0x0f00003c, /* 5, -12.5dB */
+ 0x10000040, /* 6, -12.0dB */
+ 0x11000044, /* 7, -11.5dB */
+ 0x12000048, /* 8, -11.0dB */
+ 0x1300004c, /* 9, -10.5dB */
+ 0x14400051, /* 10, -10.0dB */
+ 0x15800056, /* 11, -9.5dB */
+ 0x16c0005b, /* 12, -9.0dB */
+ 0x18000060, /* 13, -8.5dB */
+ 0x19800066, /* 14, -8.0dB */
+ 0x1b00006c, /* 15, -7.5dB */
+ 0x1c800072, /* 16, -7.0dB */
+ 0x1e400079, /* 17, -6.5dB */
+ 0x20000080, /* 18, -6.0dB */
+ 0x22000088, /* 19, -5.5dB */
+ 0x24000090, /* 20, -5.0dB */
+ 0x26000098, /* 21, -4.5dB */
+ 0x288000a2, /* 22, -4.0dB */
+ 0x2ac000ab, /* 23, -3.5dB */
+ 0x2d4000b5, /* 24, -3.0dB */
+ 0x300000c0, /* 25, -2.5dB */
+ 0x32c000cb, /* 26, -2.0dB */
+ 0x35c000d7, /* 27, -1.5dB */
+ 0x390000e4, /* 28, -1.0dB */
+ 0x3c8000f2, /* 29, -0.5dB */
+ 0x40000100, /* 30, +0dB */
+ 0x43c0010f, /* 31, +0.5dB */
+ 0x47c0011f, /* 32, +1.0dB */
+ 0x4c000130, /* 33, +1.5dB */
+ 0x50800142, /* 34, +2.0dB */
+ 0x55400155, /* 35, +2.5dB */
+ 0x5a400169, /* 36, +3.0dB */
+ 0x5fc0017f, /* 37, +3.5dB */
+ 0x65400195, /* 38, +4.0dB */
+ 0x6b8001ae, /* 39, +4.5dB */
+ 0x71c001c7, /* 40, +5.0dB */
+ 0x788001e2, /* 41, +5.5dB */
+ 0x7f8001fe /* 42, +6.0dB */
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, /* 0, -16.0dB */
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 1, -15.5dB */
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 2, -15.0dB */
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 3, -14.5dB */
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 4, -14.0dB */
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 5, -13.5dB */
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 6, -13.0dB */
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 7, -12.5dB */
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 8, -12.0dB */
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 9, -11.5dB */
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 10, -11.0dB */
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 11, -10.5dB */
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 12, -10.0dB */
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 13, -9.5dB */
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 14, -9.0dB */
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 15, -8.5dB */
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 17, -7.5dB */
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 18, -7.0dB */
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 19, -6.5dB */
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 20, -6.0dB */
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 21, -5.5dB */
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 22, -5.0dB */
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 23, -4.5dB */
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 24, -4.0dB */
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 25, -3.5dB */
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 26, -3.0dB */
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 27, -2.5dB */
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 28, -2.0dB */
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 29, -1.5dB */
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 30, -1.0dB */
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 31, -0.5dB */
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} /* 32, +0dB */
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, /* 0, -16.0dB */
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 1, -15.5dB */
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 2, -15.0dB */
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 3, -14.5dB */
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 4, -14.0dB */
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 5, -13.5dB */
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 6, -13.0dB */
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 7, -12.5dB */
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 8, -12.0dB */
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 9, -11.5dB */
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 10, -11.0dB */
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 11, -10.5dB */
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 12, -10.0dB */
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 13, -9.5dB */
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 14, -9.0dB */
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 15, -8.5dB */
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 17, -7.5dB */
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 18, -7.0dB */
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 19, -6.5dB */
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 20, -6.0dB */
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 21, -5.5dB */
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 22, -5.0dB */
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 23, -4.5dB */
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 24, -4.0dB */
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 25, -3.5dB */
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 26, -3.0dB */
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 27, -2.5dB */
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 28, -2.0dB */
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 29, -1.5dB */
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 30, -1.0dB */
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 31, -0.5dB */
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} /* 32, +0dB */
+};
+
+static const u32 edca_setting_dl[PEER_MAX] = {
+ 0xa44f, /* 0 UNKNOWN */
+ 0x5ea44f, /* 1 REALTEK_90 */
+ 0x5e4322, /* 2 REALTEK_92SE */
+ 0x5ea42b, /* 3 BROAD */
+ 0xa44f, /* 4 RAL */
+ 0xa630, /* 5 ATH */
+ 0x5ea630, /* 6 CISCO */
+ 0x5ea42b, /* 7 MARVELL */
+};
+
+static const u32 edca_setting_ul[PEER_MAX] = {
+ 0x5e4322, /* 0 UNKNOWN */
+ 0xa44f, /* 1 REALTEK_90 */
+ 0x5ea44f, /* 2 REALTEK_92SE */
+ 0x5ea32b, /* 3 BROAD */
+ 0x5ea422, /* 4 RAL */
+ 0x5ea322, /* 5 ATH */
+ 0x3ea430, /* 6 CISCO */
+ 0x5ea44f, /* 7 MARV */
+};
+
+void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type,
+ u8 *pdirection, u32 *poutwrite_val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 pwr_val = 0;
+ u8 ofdm_base = rtlpriv->dm.swing_idx_ofdm_base[RF90_PATH_A];
+ u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A];
+ u8 cck_base = rtldm->swing_idx_cck_base;
+ u8 cck_val = rtldm->swing_idx_cck;
+
+ if (type == 0) {
+ if (ofdm_val <= ofdm_base) {
+ *pdirection = 1;
+ pwr_val = ofdm_base - ofdm_val;
+ } else {
+ *pdirection = 2;
+ pwr_val = ofdm_val - ofdm_base;
+ }
+ } else if (type == 1) {
+ if (cck_val <= cck_base) {
+ *pdirection = 1;
+ pwr_val = cck_base - cck_val;
+ } else {
+ *pdirection = 2;
+ pwr_val = cck_val - cck_base;
+ }
+ }
+
+ if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1))
+ pwr_val = TXPWRTRACK_MAX_IDX;
+
+ *poutwrite_val = pwr_val | (pwr_val << 8) |
+ (pwr_val << 16) | (pwr_val << 24);
+}
+
+static void rtl8723be_dm_diginit(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+
+ dm_digtable->dig_enable_flag = true;
+ dm_digtable->cur_igvalue = rtl_get_bbreg(hw,
+ ROFDM0_XAAGCCORE1, 0x7f);
+ dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
+ dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
+ dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+ dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+ dm_digtable->rx_gain_max = DM_DIG_MAX;
+ dm_digtable->rx_gain_min = DM_DIG_MIN;
+ dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+ dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
+ dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
+ dm_digtable->pre_cck_cca_thres = 0xff;
+ dm_digtable->cur_cck_cca_thres = 0x83;
+ dm_digtable->forbidden_igi = DM_DIG_MIN;
+ dm_digtable->large_fa_hit = 0;
+ dm_digtable->recover_cnt = 0;
+ dm_digtable->dig_min_0 = DM_DIG_MIN;
+ dm_digtable->dig_min_1 = DM_DIG_MIN;
+ dm_digtable->media_connect_0 = false;
+ dm_digtable->media_connect_1 = false;
+ rtlpriv->dm.dm_initialgain_enable = true;
+ dm_digtable->bt30_cur_igi = 0x32;
+}
+
+void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rate_adaptive *ra = &(rtlpriv->ra);
+
+ ra->ratr_state = DM_RATR_STA_INIT;
+ ra->pre_ratr_state = DM_RATR_STA_INIT;
+
+ if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+ rtlpriv->dm.useramask = true;
+ else
+ rtlpriv->dm.useramask = false;
+
+ ra->high_rssi_thresh_for_ra = 50;
+ ra->low_rssi_thresh_for_ra40m = 20;
+}
+
+static void rtl8723be_dm_init_txpower_tracking(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.txpower_tracking = true;
+ rtlpriv->dm.txpower_track_control = true;
+ rtlpriv->dm.thermalvalue = 0;
+
+ rtlpriv->dm.ofdm_index[0] = 30;
+ rtlpriv->dm.cck_index = 20;
+
+ rtlpriv->dm.swing_idx_cck_base = rtlpriv->dm.cck_index;
+
+ rtlpriv->dm.swing_idx_ofdm_base[0] = rtlpriv->dm.ofdm_index[0];
+ rtlpriv->dm.delta_power_index[RF90_PATH_A] = 0;
+ rtlpriv->dm.delta_power_index_last[RF90_PATH_A] = 0;
+ rtlpriv->dm.power_index_offset[RF90_PATH_A] = 0;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ " rtlpriv->dm.txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
+}
+
+static void rtl8723be_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.crystal_cap = rtlpriv->efuse.crystalcap;
+ rtlpriv->dm.atc_status = rtl_get_bbreg(hw, ROFDM1_CFOTRACKING, 0x800);
+ rtlpriv->dm.cfo_threshold = CFO_THRESHOLD_XTAL;
+}
+
+void rtl8723be_dm_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ rtl8723be_dm_diginit(hw);
+ rtl8723be_dm_init_rate_adaptive_mask(hw);
+ rtl8723_dm_init_edca_turbo(hw);
+ rtl8723_dm_init_dynamic_bb_powersaving(hw);
+ rtl8723_dm_init_dynamic_txpower(hw);
+ rtl8723be_dm_init_txpower_tracking(hw);
+ rtl8723be_dm_init_dynamic_atc_switch(hw);
+}
+
+static void rtl8723be_dm_find_minimum_rssi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *rtl_dm_dig = &(rtlpriv->dm_digtable);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ /* Determine the minimum RSSI */
+ if ((mac->link_state < MAC80211_LINKED) &&
+ (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
+ rtl_dm_dig->min_undec_pwdb_for_dm = 0;
+ RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "Not connected to any\n");
+ }
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_dm_dig->min_undec_pwdb_for_dm =
+ rtlpriv->dm.entry_min_undec_sm_pwdb;
+ RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ rtlpriv->dm.entry_min_undec_sm_pwdb);
+ } else {
+ rtl_dm_dig->min_undec_pwdb_for_dm =
+ rtlpriv->dm.undec_sm_pwdb;
+ RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "STA Default Port PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
+ }
+ } else {
+ rtl_dm_dig->min_undec_pwdb_for_dm =
+ rtlpriv->dm.entry_min_undec_sm_pwdb;
+ RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Ext Port or disconnet PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
+ }
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
+}
+
+static void rtl8723be_dm_check_rssi_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_sta_info *drv_priv;
+ u8 h2c_parameter[3] = { 0 };
+ long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
+
+ /* AP & ADHOC & MESH */
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
+ if (drv_priv->rssi_stat.undec_sm_pwdb <
+ tmp_entry_min_pwdb)
+ tmp_entry_min_pwdb =
+ drv_priv->rssi_stat.undec_sm_pwdb;
+ if (drv_priv->rssi_stat.undec_sm_pwdb >
+ tmp_entry_max_pwdb)
+ tmp_entry_max_pwdb =
+ drv_priv->rssi_stat.undec_sm_pwdb;
+ }
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+
+ /* If associated entry is found */
+ if (tmp_entry_max_pwdb != 0) {
+ rtlpriv->dm.entry_max_undec_sm_pwdb = tmp_entry_max_pwdb;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "EntryMaxPWDB = 0x%lx(%ld)\n",
+ tmp_entry_max_pwdb, tmp_entry_max_pwdb);
+ } else {
+ rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
+ }
+ /* If associated entry is found */
+ if (tmp_entry_min_pwdb != 0xff) {
+ rtlpriv->dm.entry_min_undec_sm_pwdb = tmp_entry_min_pwdb;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "EntryMinPWDB = 0x%lx(%ld)\n",
+ tmp_entry_min_pwdb, tmp_entry_min_pwdb);
+ } else {
+ rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
+ }
+ /* Indicate Rx signal strength to FW. */
+ if (rtlpriv->dm.useramask) {
+ h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
+ h2c_parameter[1] = 0x20;
+ h2c_parameter[0] = 0;
+ rtl8723be_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
+ } else {
+ rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb);
+ }
+ rtl8723be_dm_find_minimum_rssi(hw);
+ rtlpriv->dm_digtable.rssi_val_min =
+ rtlpriv->dm_digtable.min_undec_pwdb_for_dm;
+}
+
+void rtl8723be_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->dm_digtable.cur_igvalue != current_igi) {
+ rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, current_igi);
+ if (rtlpriv->phy.rf_type != RF_1T1R)
+ rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, current_igi);
+ }
+ rtlpriv->dm_digtable.pre_igvalue = rtlpriv->dm_digtable.cur_igvalue;
+ rtlpriv->dm_digtable.cur_igvalue = current_igi;
+}
+
+static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct dig_t *dm_digtable = &(rtlpriv->dm_digtable);
+ u8 dig_dynamic_min, dig_maxofmin;
+ bool firstconnect, firstdisconnect;
+ u8 dm_dig_max, dm_dig_min;
+ u8 current_igi = dm_digtable->cur_igvalue;
+ u8 offset;
+
+ /* AP, BT */
+ if (mac->act_scanning)
+ return;
+
+ dig_dynamic_min = dm_digtable->dig_min_0;
+ firstconnect = (mac->link_state >= MAC80211_LINKED) &&
+ !dm_digtable->media_connect_0;
+ firstdisconnect = (mac->link_state < MAC80211_LINKED) &&
+ dm_digtable->media_connect_0;
+
+ dm_dig_max = 0x5a;
+ dm_dig_min = DM_DIG_MIN;
+ dig_maxofmin = DM_DIG_MAX_AP;
+
+ if (mac->link_state >= MAC80211_LINKED) {
+ if ((dm_digtable->rssi_val_min + 10) > dm_dig_max)
+ dm_digtable->rx_gain_max = dm_dig_max;
+ else if ((dm_digtable->rssi_val_min + 10) < dm_dig_min)
+ dm_digtable->rx_gain_max = dm_dig_min;
+ else
+ dm_digtable->rx_gain_max =
+ dm_digtable->rssi_val_min + 10;
+
+ if (rtlpriv->dm.one_entry_only) {
+ offset = 12;
+ if (dm_digtable->rssi_val_min - offset < dm_dig_min)
+ dig_dynamic_min = dm_dig_min;
+ else if (dm_digtable->rssi_val_min - offset >
+ dig_maxofmin)
+ dig_dynamic_min = dig_maxofmin;
+ else
+ dig_dynamic_min =
+ dm_digtable->rssi_val_min - offset;
+ } else {
+ dig_dynamic_min = dm_dig_min;
+ }
+ } else {
+ dm_digtable->rx_gain_max = dm_dig_max;
+ dig_dynamic_min = dm_dig_min;
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
+ }
+
+ if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
+ if (dm_digtable->large_fa_hit != 3)
+ dm_digtable->large_fa_hit++;
+ if (dm_digtable->forbidden_igi < current_igi) {
+ dm_digtable->forbidden_igi = current_igi;
+ dm_digtable->large_fa_hit = 1;
+ }
+
+ if (dm_digtable->large_fa_hit >= 3) {
+ if ((dm_digtable->forbidden_igi + 1) >
+ dm_digtable->rx_gain_max)
+ dm_digtable->rx_gain_min =
+ dm_digtable->rx_gain_max;
+ else
+ dm_digtable->rx_gain_min =
+ dm_digtable->forbidden_igi + 1;
+ dm_digtable->recover_cnt = 3600;
+ }
+ } else {
+ if (dm_digtable->recover_cnt != 0) {
+ dm_digtable->recover_cnt--;
+ } else {
+ if (dm_digtable->large_fa_hit < 3) {
+ if ((dm_digtable->forbidden_igi - 1) <
+ dig_dynamic_min) {
+ dm_digtable->forbidden_igi =
+ dig_dynamic_min;
+ dm_digtable->rx_gain_min =
+ dig_dynamic_min;
+ } else {
+ dm_digtable->forbidden_igi--;
+ dm_digtable->rx_gain_min =
+ dm_digtable->forbidden_igi + 1;
+ }
+ } else {
+ dm_digtable->large_fa_hit = 0;
+ }
+ }
+ }
+ if (dm_digtable->rx_gain_min > dm_digtable->rx_gain_max)
+ dm_digtable->rx_gain_min = dm_digtable->rx_gain_max;
+
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (firstconnect) {
+ if (dm_digtable->rssi_val_min <= dig_maxofmin)
+ current_igi = dm_digtable->rssi_val_min;
+ else
+ current_igi = dig_maxofmin;
+
+ dm_digtable->large_fa_hit = 0;
+ } else {
+ if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2)
+ current_igi += 4;
+ else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1)
+ current_igi += 2;
+ else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
+ current_igi -= 2;
+ }
+ } else {
+ if (firstdisconnect) {
+ current_igi = dm_digtable->rx_gain_min;
+ } else {
+ if (rtlpriv->falsealm_cnt.cnt_all > 10000)
+ current_igi += 4;
+ else if (rtlpriv->falsealm_cnt.cnt_all > 8000)
+ current_igi += 2;
+ else if (rtlpriv->falsealm_cnt.cnt_all < 500)
+ current_igi -= 2;
+ }
+ }
+
+ if (current_igi > dm_digtable->rx_gain_max)
+ current_igi = dm_digtable->rx_gain_max;
+ else if (current_igi < dm_digtable->rx_gain_min)
+ current_igi = dm_digtable->rx_gain_min;
+
+ rtl8723be_dm_write_dig(hw, current_igi);
+ dm_digtable->media_connect_0 =
+ ((mac->link_state >= MAC80211_LINKED) ? true : false);
+ dm_digtable->dig_min_0 = dig_dynamic_min;
+}
+
+static void rtl8723be_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+ u32 ret_value;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1);
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 1);
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE1_11N, MASKDWORD);
+ falsealm_cnt->cnt_fast_fsync_fail = ret_value & 0xffff;
+ falsealm_cnt->cnt_sb_search_fail = (ret_value & 0xffff0000) >> 16;
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE2_11N, MASKDWORD);
+ falsealm_cnt->cnt_ofdm_cca = ret_value & 0xffff;
+ falsealm_cnt->cnt_parity_fail = (ret_value & 0xffff0000) >> 16;
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE3_11N, MASKDWORD);
+ falsealm_cnt->cnt_rate_illegal = ret_value & 0xffff;
+ falsealm_cnt->cnt_crc8_fail = (ret_value & 0xffff0000) >> 16;
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE4_11N, MASKDWORD);
+ falsealm_cnt->cnt_mcs_fail = ret_value & 0xffff;
+
+ falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail +
+ falsealm_cnt->cnt_mcs_fail +
+ falsealm_cnt->cnt_fast_fsync_fail +
+ falsealm_cnt->cnt_sb_search_fail;
+
+ rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(12), 1);
+ rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(14), 1);
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_CCK_FA_RST_11N, MASKBYTE0);
+ falsealm_cnt->cnt_cck_fail = ret_value;
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_CCK_FA_MSB_11N, MASKBYTE3);
+ falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_CCK_CCA_CNT_11N, MASKDWORD);
+ falsealm_cnt->cnt_cck_cca = ((ret_value & 0xff) << 8) |
+ ((ret_value & 0xff00) >> 8);
+
+ falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail +
+ falsealm_cnt->cnt_sb_search_fail +
+ falsealm_cnt->cnt_parity_fail +
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail +
+ falsealm_cnt->cnt_mcs_fail +
+ falsealm_cnt->cnt_cck_fail;
+
+ falsealm_cnt->cnt_cca_all = falsealm_cnt->cnt_ofdm_cca +
+ falsealm_cnt->cnt_cck_cca;
+
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTC_11N, BIT(31), 1);
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTC_11N, BIT(31), 0);
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(27), 1);
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(27), 0);
+
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 0);
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 0);
+
+ rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 0);
+ rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 2);
+
+ rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 0);
+ rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 2);
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, "
+ "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail,
+ falsealm_cnt->cnt_mcs_fail);
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x,"
+ " cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail,
+ falsealm_cnt->cnt_all);
+}
+
+static void rtl8723be_dm_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ /* 8723BE does not support ODM_BB_DYNAMIC_TXPWR*/
+ return;
+}
+
+static void rtl8723be_set_iqk_matrix(struct ieee80211_hw *hw, u8 ofdm_index,
+ u8 rfpath, long iqk_result_x,
+ long iqk_result_y)
+{
+ long ele_a = 0, ele_d, ele_c = 0, value32;
+
+ if (ofdm_index >= 43)
+ ofdm_index = 43 - 1;
+
+ ele_d = (ofdmswing_table[ofdm_index] & 0xFFC00000) >> 22;
+
+ if (iqk_result_x != 0) {
+ if ((iqk_result_x & 0x00000200) != 0)
+ iqk_result_x = iqk_result_x | 0xFFFFFC00;
+ ele_a = ((iqk_result_x * ele_d) >> 8) & 0x000003FF;
+
+ if ((iqk_result_y & 0x00000200) != 0)
+ iqk_result_y = iqk_result_y | 0xFFFFFC00;
+ ele_c = ((iqk_result_y * ele_d) >> 8) & 0x000003FF;
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ value32 = (ele_d << 22) |
+ ((ele_c & 0x3F) << 16) | ele_a;
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
+ value32);
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32);
+ value32 = ((iqk_result_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
+ value32);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (rfpath) {
+ case RF90_PATH_A:
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
+ ofdmswing_table[ofdm_index]);
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0x00);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void rtl8723be_dm_tx_power_track_set_power(struct ieee80211_hw *hw,
+ enum pwr_track_control_method method,
+ u8 rfpath, u8 idx)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 swing_idx_ofdm_limit = 36;
+
+ if (method == TXAGC) {
+ rtl8723be_phy_set_txpower_level(hw, rtlphy->current_channel);
+ } else if (method == BBSWING) {
+ if (rtldm->swing_idx_cck >= CCK_TABLE_SIZE)
+ rtldm->swing_idx_cck = CCK_TABLE_SIZE - 1;
+
+ if (!rtldm->cck_inch14) {
+ rtl_write_byte(rtlpriv, 0xa22,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][0]);
+ rtl_write_byte(rtlpriv, 0xa23,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][1]);
+ rtl_write_byte(rtlpriv, 0xa24,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][2]);
+ rtl_write_byte(rtlpriv, 0xa25,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][3]);
+ rtl_write_byte(rtlpriv, 0xa26,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][4]);
+ rtl_write_byte(rtlpriv, 0xa27,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][5]);
+ rtl_write_byte(rtlpriv, 0xa28,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][6]);
+ rtl_write_byte(rtlpriv, 0xa29,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][7]);
+ } else {
+ rtl_write_byte(rtlpriv, 0xa22,
+ cckswing_table_ch14[rtldm->swing_idx_cck][0]);
+ rtl_write_byte(rtlpriv, 0xa23,
+ cckswing_table_ch14[rtldm->swing_idx_cck][1]);
+ rtl_write_byte(rtlpriv, 0xa24,
+ cckswing_table_ch14[rtldm->swing_idx_cck][2]);
+ rtl_write_byte(rtlpriv, 0xa25,
+ cckswing_table_ch14[rtldm->swing_idx_cck][3]);
+ rtl_write_byte(rtlpriv, 0xa26,
+ cckswing_table_ch14[rtldm->swing_idx_cck][4]);
+ rtl_write_byte(rtlpriv, 0xa27,
+ cckswing_table_ch14[rtldm->swing_idx_cck][5]);
+ rtl_write_byte(rtlpriv, 0xa28,
+ cckswing_table_ch14[rtldm->swing_idx_cck][6]);
+ rtl_write_byte(rtlpriv, 0xa29,
+ cckswing_table_ch14[rtldm->swing_idx_cck][7]);
+ }
+
+ if (rfpath == RF90_PATH_A) {
+ if (rtldm->swing_idx_ofdm[RF90_PATH_A] <
+ swing_idx_ofdm_limit)
+ swing_idx_ofdm_limit =
+ rtldm->swing_idx_ofdm[RF90_PATH_A];
+
+ rtl8723be_set_iqk_matrix(hw,
+ rtldm->swing_idx_ofdm[rfpath], rfpath,
+ rtlphy->iqk_matrix[idx].value[0][0],
+ rtlphy->iqk_matrix[idx].value[0][1]);
+ } else if (rfpath == RF90_PATH_B) {
+ if (rtldm->swing_idx_ofdm[RF90_PATH_B] <
+ swing_idx_ofdm_limit)
+ swing_idx_ofdm_limit =
+ rtldm->swing_idx_ofdm[RF90_PATH_B];
+
+ rtl8723be_set_iqk_matrix(hw,
+ rtldm->swing_idx_ofdm[rfpath], rfpath,
+ rtlphy->iqk_matrix[idx].value[0][4],
+ rtlphy->iqk_matrix[idx].value[0][5]);
+ }
+ } else {
+ return;
+ }
+}
+
+static void txpwr_track_cb_therm(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 thermalvalue = 0, delta, delta_lck, delta_iqk;
+ u8 thermalvalue_avg_count = 0;
+ u32 thermalvalue_avg = 0;
+ int i = 0;
+
+ u8 ofdm_min_index = 6;
+ u8 index = 0;
+
+ char delta_swing_table_idx_tup_a[] = {
+ 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5,
+ 5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 14, 15};
+ char delta_swing_table_idx_tdown_a[] = {
+ 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5,
+ 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 12, 13, 14, 15};
+
+ /*Initilization ( 7 steps in total)*/
+ rtlpriv->dm.txpower_trackinginit = true;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "rtl8723be_dm_txpower_tracking"
+ "_callback_thermalmeter\n");
+
+ thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xfc00);
+ if (!rtlpriv->dm.txpower_track_control || thermalvalue == 0 ||
+ rtlefuse->eeprom_thermalmeter == 0xFF)
+ return;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+ "eeprom_thermalmeter 0x%x\n",
+ thermalvalue, rtldm->thermalvalue,
+ rtlefuse->eeprom_thermalmeter);
+ /*3 Initialize ThermalValues of RFCalibrateInfo*/
+ if (!rtldm->thermalvalue) {
+ rtlpriv->dm.thermalvalue_lck = thermalvalue;
+ rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+ }
+
+ /*4 Calculate average thermal meter*/
+ rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermalvalue;
+ rtldm->thermalvalue_avg_index++;
+ if (rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8723BE)
+ rtldm->thermalvalue_avg_index = 0;
+
+ for (i = 0; i < AVG_THERMAL_NUM_8723BE; i++) {
+ if (rtldm->thermalvalue_avg[i]) {
+ thermalvalue_avg += rtldm->thermalvalue_avg[i];
+ thermalvalue_avg_count++;
+ }
+ }
+
+ if (thermalvalue_avg_count)
+ thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count);
+
+ /* 5 Calculate delta, delta_LCK, delta_IQK.*/
+ delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue) :
+ (rtlpriv->dm.thermalvalue - thermalvalue);
+ delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
+ (rtlpriv->dm.thermalvalue_lck - thermalvalue);
+ delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
+ (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+ "eeprom_thermalmeter 0x%x delta 0x%x "
+ "delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk);
+ /* 6 If necessary, do LCK.*/
+ if (delta_lck >= IQK_THRESHOLD) {
+ rtlpriv->dm.thermalvalue_lck = thermalvalue;
+ rtl8723be_phy_lc_calibrate(hw);
+ }
+
+ /* 7 If necessary, move the index of
+ * swing table to adjust Tx power.
+ */
+ if (delta > 0 && rtlpriv->dm.txpower_track_control) {
+ delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
+ (thermalvalue - rtlefuse->eeprom_thermalmeter) :
+ (rtlefuse->eeprom_thermalmeter - thermalvalue);
+
+ if (delta >= TXSCALE_TABLE_SIZE)
+ delta = TXSCALE_TABLE_SIZE - 1;
+ /* 7.1 Get the final CCK_index and
+ * OFDM_index for each swing table.
+ */
+ if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
+ rtldm->delta_power_index_last[RF90_PATH_A] =
+ rtldm->delta_power_index[RF90_PATH_A];
+ rtldm->delta_power_index[RF90_PATH_A] =
+ delta_swing_table_idx_tup_a[delta];
+ } else {
+ rtldm->delta_power_index_last[RF90_PATH_A] =
+ rtldm->delta_power_index[RF90_PATH_A];
+ rtldm->delta_power_index[RF90_PATH_A] =
+ -1 * delta_swing_table_idx_tdown_a[delta];
+ }
+
+ /* 7.2 Handle boundary conditions of index.*/
+ if (rtldm->delta_power_index[RF90_PATH_A] ==
+ rtldm->delta_power_index_last[RF90_PATH_A])
+ rtldm->power_index_offset[RF90_PATH_A] = 0;
+ else
+ rtldm->power_index_offset[RF90_PATH_A] =
+ rtldm->delta_power_index[RF90_PATH_A] -
+ rtldm->delta_power_index_last[RF90_PATH_A];
+
+ rtldm->ofdm_index[0] =
+ rtldm->swing_idx_ofdm_base[RF90_PATH_A] +
+ rtldm->power_index_offset[RF90_PATH_A];
+ rtldm->cck_index = rtldm->swing_idx_cck_base +
+ rtldm->power_index_offset[RF90_PATH_A];
+
+ rtldm->swing_idx_cck = rtldm->cck_index;
+ rtldm->swing_idx_ofdm[0] = rtldm->ofdm_index[0];
+
+ if (rtldm->ofdm_index[0] > OFDM_TABLE_SIZE - 1)
+ rtldm->ofdm_index[0] = OFDM_TABLE_SIZE - 1;
+ else if (rtldm->ofdm_index[0] < ofdm_min_index)
+ rtldm->ofdm_index[0] = ofdm_min_index;
+
+ if (rtldm->cck_index > CCK_TABLE_SIZE - 1)
+ rtldm->cck_index = CCK_TABLE_SIZE - 1;
+ else if (rtldm->cck_index < 0)
+ rtldm->cck_index = 0;
+ } else {
+ rtldm->power_index_offset[RF90_PATH_A] = 0;
+ }
+
+ if ((rtldm->power_index_offset[RF90_PATH_A] != 0) &&
+ (rtldm->txpower_track_control)) {
+ rtldm->done_txpower = true;
+ if (thermalvalue > rtlefuse->eeprom_thermalmeter)
+ rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0,
+ index);
+ else
+ rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0,
+ index);
+
+ rtldm->swing_idx_cck_base = rtldm->swing_idx_cck;
+ rtldm->swing_idx_ofdm_base[RF90_PATH_A] =
+ rtldm->swing_idx_ofdm[0];
+ rtldm->thermalvalue = thermalvalue;
+ }
+
+ if (delta_iqk >= IQK_THRESHOLD) {
+ rtldm->thermalvalue_iqk = thermalvalue;
+ rtl8723be_phy_iq_calibrate(hw, false);
+ }
+
+ rtldm->txpowercount = 0;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
+}
+
+void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ static u8 tm_trigger;
+
+ if (!rtlpriv->dm.txpower_tracking)
+ return;
+
+ if (!tm_trigger) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) | BIT(16),
+ 0x03);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 8723be Thermal Meter!!\n");
+ tm_trigger = 1;
+ return;
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking !!\n");
+ txpwr_track_cb_therm(hw);
+ tm_trigger = 0;
+ }
+}
+
+static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rate_adaptive *ra = &(rtlpriv->ra);
+ struct ieee80211_sta *sta = NULL;
+ u32 low_rssithresh_for_ra = ra->low2high_rssi_thresh_for_ra40m;
+ u32 high_rssithresh_for_ra = ra->high_rssi_thresh_for_ra;
+ u8 go_up_gap = 5;
+
+ if (is_hal_stop(rtlhal)) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver is going to unload\n");
+ return;
+ }
+
+ if (!rtlpriv->dm.useramask) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver does not control rate adaptive mask\n");
+ return;
+ }
+
+ if (mac->link_state == MAC80211_LINKED &&
+ mac->opmode == NL80211_IFTYPE_STATION) {
+ switch (ra->pre_ratr_state) {
+ case DM_RATR_STA_MIDDLE:
+ high_rssithresh_for_ra += go_up_gap;
+ break;
+ case DM_RATR_STA_LOW:
+ high_rssithresh_for_ra += go_up_gap;
+ low_rssithresh_for_ra += go_up_gap;
+ break;
+ default:
+ break;
+ }
+
+ if (rtlpriv->dm.undec_sm_pwdb >
+ (long)high_rssithresh_for_ra)
+ ra->ratr_state = DM_RATR_STA_HIGH;
+ else if (rtlpriv->dm.undec_sm_pwdb >
+ (long)low_rssithresh_for_ra)
+ ra->ratr_state = DM_RATR_STA_MIDDLE;
+ else
+ ra->ratr_state = DM_RATR_STA_LOW;
+
+ if (ra->pre_ratr_state != ra->ratr_state) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld\n",
+ rtlpriv->dm.undec_sm_pwdb);
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI_LEVEL = %d\n", ra->ratr_state);
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "PreState = %d, CurState = %d\n",
+ ra->pre_ratr_state, ra->ratr_state);
+
+ rcu_read_lock();
+ sta = rtl_find_sta(hw, mac->bssid);
+ if (sta)
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
+ ra->ratr_state);
+ rcu_read_unlock();
+
+ ra->pre_ratr_state = ra->ratr_state;
+ }
+ }
+}
+
+static bool rtl8723be_dm_is_edca_turbo_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->cfg->ops->get_btc_status()) {
+ if (rtlpriv->btcoexist.btc_ops->btc_is_disable_edca_turbo(rtlpriv))
+ return true;
+ }
+ if (rtlpriv->mac80211.mode == WIRELESS_MODE_B)
+ return true;
+
+ return false;
+}
+
+static void rtl8723be_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ static u64 last_txok_cnt;
+ static u64 last_rxok_cnt;
+ u64 cur_txok_cnt = 0;
+ u64 cur_rxok_cnt = 0;
+ u32 edca_be_ul = 0x6ea42b;
+ u32 edca_be_dl = 0x6ea42b;/*not sure*/
+ u32 edca_be = 0x5ea42b;
+ u32 iot_peer = 0;
+ bool is_cur_rdlstate;
+ bool last_is_cur_rdlstate = false;
+ bool bias_on_rx = false;
+ bool edca_turbo_on = false;
+
+ last_is_cur_rdlstate = rtlpriv->dm.is_cur_rdlstate;
+
+ cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
+ cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
+
+ iot_peer = rtlpriv->mac80211.vendor;
+ bias_on_rx = (iot_peer == PEER_RAL || iot_peer == PEER_ATH) ?
+ true : false;
+ edca_turbo_on = ((!rtlpriv->dm.is_any_nonbepkts) &&
+ (!rtlpriv->dm.disable_framebursting)) ?
+ true : false;
+
+ if ((iot_peer == PEER_CISCO) &&
+ (mac->mode == WIRELESS_MODE_N_24G)) {
+ edca_be_dl = edca_setting_dl[iot_peer];
+ edca_be_ul = edca_setting_ul[iot_peer];
+ }
+ if (rtl8723be_dm_is_edca_turbo_disable(hw))
+ goto exit;
+
+ if (edca_turbo_on) {
+ if (bias_on_rx)
+ is_cur_rdlstate = (cur_txok_cnt > cur_rxok_cnt * 4) ?
+ false : true;
+ else
+ is_cur_rdlstate = (cur_rxok_cnt > cur_txok_cnt * 4) ?
+ true : false;
+
+ edca_be = (is_cur_rdlstate) ? edca_be_dl : edca_be_ul;
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be);
+ rtlpriv->dm.is_cur_rdlstate = is_cur_rdlstate;
+ rtlpriv->dm.current_turbo_edca = true;
+ } else {
+ if (rtlpriv->dm.current_turbo_edca) {
+ u8 tmp = AC0_BE;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &tmp);
+ }
+ rtlpriv->dm.current_turbo_edca = false;
+ }
+
+exit:
+ rtlpriv->dm.is_any_nonbepkts = false;
+ last_txok_cnt = rtlpriv->stats.txbytesunicast;
+ last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+
+static void rtl8723be_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 cur_cck_cca_thresh;
+
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ if (rtlpriv->dm_digtable.rssi_val_min > 25) {
+ cur_cck_cca_thresh = 0xcd;
+ } else if ((rtlpriv->dm_digtable.rssi_val_min <= 25) &&
+ (rtlpriv->dm_digtable.rssi_val_min > 10)) {
+ cur_cck_cca_thresh = 0x83;
+ } else {
+ if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
+ cur_cck_cca_thresh = 0x83;
+ else
+ cur_cck_cca_thresh = 0x40;
+ }
+ } else {
+ if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
+ cur_cck_cca_thresh = 0x83;
+ else
+ cur_cck_cca_thresh = 0x40;
+ }
+
+ if (rtlpriv->dm_digtable.cur_cck_cca_thres != cur_cck_cca_thresh)
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh);
+
+ rtlpriv->dm_digtable.pre_cck_cca_thres = rtlpriv->dm_digtable.cur_cck_cca_thres;
+ rtlpriv->dm_digtable.cur_cck_cca_thres = cur_cck_cca_thresh;
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ "CCK cca thresh hold =%x\n",
+ rtlpriv->dm_digtable.cur_cck_cca_thres);
+}
+
+static void rtl8723be_dm_dynamic_edcca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 reg_c50, reg_c58;
+ bool fw_current_in_ps_mode = false;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *)(&fw_current_in_ps_mode));
+ if (fw_current_in_ps_mode)
+ return;
+
+ reg_c50 = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+ reg_c58 = rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+
+ if (reg_c50 > 0x28 && reg_c58 > 0x28) {
+ if (!rtlpriv->rtlhal.pre_edcca_enable) {
+ rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD, 0x03);
+ rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD + 2, 0x00);
+ }
+ } else if (reg_c50 < 0x25 && reg_c58 < 0x25) {
+ if (rtlpriv->rtlhal.pre_edcca_enable) {
+ rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD, 0x7f);
+ rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD + 2, 0x7f);
+ }
+ }
+}
+
+static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 crystal_cap;
+ u32 packet_count;
+ int cfo_khz_a, cfo_khz_b, cfo_ave = 0, adjust_xtal = 0;
+ int cfo_ave_diff;
+
+ if (rtlpriv->mac80211.link_state < MAC80211_LINKED) {
+ if (rtldm->atc_status == ATC_STATUS_OFF) {
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11),
+ ATC_STATUS_ON);
+ rtldm->atc_status = ATC_STATUS_ON;
+ }
+ if (rtlpriv->cfg->ops->get_btc_status()) {
+ if (!rtlpriv->btcoexist.btc_ops->btc_is_bt_disabled(rtlpriv)) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "odm_DynamicATCSwitch(): Disable"
+ " CFO tracking for BT!!\n");
+ return;
+ }
+ }
+
+ if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap) {
+ rtldm->crystal_cap = rtlpriv->efuse.crystalcap;
+ crystal_cap = rtldm->crystal_cap & 0x3f;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000,
+ (crystal_cap | (crystal_cap << 6)));
+ }
+ } else {
+ cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280;
+ cfo_khz_b = (int)(rtldm->cfo_tail[1] * 3125) / 1280;
+ packet_count = rtldm->packet_count;
+
+ if (packet_count == rtldm->packet_count_pre)
+ return;
+
+ rtldm->packet_count_pre = packet_count;
+
+ if (rtlpriv->phy.rf_type == RF_1T1R)
+ cfo_ave = cfo_khz_a;
+ else
+ cfo_ave = (int)(cfo_khz_a + cfo_khz_b) >> 1;
+
+ cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave) ?
+ (rtldm->cfo_ave_pre - cfo_ave) :
+ (cfo_ave - rtldm->cfo_ave_pre);
+
+ if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) {
+ rtldm->large_cfo_hit = 1;
+ return;
+ } else {
+ rtldm->large_cfo_hit = 0;
+ }
+
+ rtldm->cfo_ave_pre = cfo_ave;
+
+ if (cfo_ave >= -rtldm->cfo_threshold &&
+ cfo_ave <= rtldm->cfo_threshold && rtldm->is_freeze == 0) {
+ if (rtldm->cfo_threshold == CFO_THRESHOLD_XTAL) {
+ rtldm->cfo_threshold = CFO_THRESHOLD_XTAL + 10;
+ rtldm->is_freeze = 1;
+ } else {
+ rtldm->cfo_threshold = CFO_THRESHOLD_XTAL;
+ }
+ }
+
+ if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f)
+ adjust_xtal = ((cfo_ave - CFO_THRESHOLD_XTAL) >> 1) + 1;
+ else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) &&
+ rtlpriv->dm.crystal_cap > 0)
+ adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 1) - 1;
+
+ if (adjust_xtal != 0) {
+ rtldm->is_freeze = 0;
+ rtldm->crystal_cap += adjust_xtal;
+
+ if (rtldm->crystal_cap > 0x3f)
+ rtldm->crystal_cap = 0x3f;
+ else if (rtldm->crystal_cap < 0)
+ rtldm->crystal_cap = 0;
+
+ crystal_cap = rtldm->crystal_cap & 0x3f;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000,
+ (crystal_cap | (crystal_cap << 6)));
+ }
+
+ if (cfo_ave < CFO_THRESHOLD_ATC &&
+ cfo_ave > -CFO_THRESHOLD_ATC) {
+ if (rtldm->atc_status == ATC_STATUS_ON) {
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11),
+ ATC_STATUS_OFF);
+ rtldm->atc_status = ATC_STATUS_OFF;
+ }
+ } else {
+ if (rtldm->atc_status == ATC_STATUS_OFF) {
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11),
+ ATC_STATUS_ON);
+ rtldm->atc_status = ATC_STATUS_ON;
+ }
+ }
+ }
+}
+
+static void rtl8723be_dm_common_info_self_update(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_sta_info *drv_priv;
+ u8 cnt = 0;
+
+ rtlpriv->dm.one_entry_only = false;
+
+ if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION &&
+ rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ rtlpriv->dm.one_entry_only = true;
+ return;
+ }
+
+ if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP ||
+ rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC ||
+ rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) {
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
+ cnt++;
+ }
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+
+ if (cnt == 1)
+ rtlpriv->dm.one_entry_only = true;
+ }
+}
+
+void rtl8723be_dm_watchdog(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool fw_current_inpsmode = false;
+ bool fw_ps_awake = true;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *)(&fw_current_inpsmode));
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+ (u8 *)(&fw_ps_awake));
+
+ if (ppsc->p2p_ps_info.p2p_ps_mode)
+ fw_ps_awake = false;
+
+ if ((ppsc->rfpwr_state == ERFON) &&
+ ((!fw_current_inpsmode) && fw_ps_awake) &&
+ (!ppsc->rfchange_inprogress)) {
+ rtl8723be_dm_common_info_self_update(hw);
+ rtl8723be_dm_false_alarm_counter_statistics(hw);
+ rtl8723be_dm_check_rssi_monitor(hw);
+ rtl8723be_dm_dig(hw);
+ rtl8723be_dm_dynamic_edcca(hw);
+ rtl8723be_dm_cck_packet_detection_thresh(hw);
+ rtl8723be_dm_refresh_rate_adaptive_mask(hw);
+ rtl8723be_dm_check_edca_turbo(hw);
+ rtl8723be_dm_dynamic_atc_switch(hw);
+ rtl8723be_dm_check_txpower_tracking(hw);
+ rtl8723be_dm_dynamic_txpower(hw);
+ if (rtlpriv->cfg->ops->get_btc_status())
+ rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv);
+ }
+ rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
new file mode 100644
index 000000000000..c6c2f2a78a66
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
@@ -0,0 +1,310 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_DM_H__
+#define __RTL8723BE_DM_H__
+
+#define MAIN_ANT 0
+#define AUX_ANT 1
+#define MAIN_ANT_CG_TRX 1
+#define AUX_ANT_CG_TRX 0
+#define MAIN_ANT_CGCS_RX 0
+#define AUX_ANT_CGCS_RX 1
+
+#define TXSCALE_TABLE_SIZE 30
+
+/*RF REG LIST*/
+#define DM_REG_RF_MODE_11N 0x00
+#define DM_REG_RF_0B_11N 0x0B
+#define DM_REG_CHNBW_11N 0x18
+#define DM_REG_T_METER_11N 0x24
+#define DM_REG_RF_25_11N 0x25
+#define DM_REG_RF_26_11N 0x26
+#define DM_REG_RF_27_11N 0x27
+#define DM_REG_RF_2B_11N 0x2B
+#define DM_REG_RF_2C_11N 0x2C
+#define DM_REG_RXRF_A3_11N 0x3C
+#define DM_REG_T_METER_92D_11N 0x42
+#define DM_REG_T_METER_88E_11N 0x42
+
+/*BB REG LIST*/
+/*PAGE 8 */
+#define DM_REG_BB_CTRL_11N 0x800
+#define DM_REG_RF_PIN_11N 0x804
+#define DM_REG_PSD_CTRL_11N 0x808
+#define DM_REG_TX_ANT_CTRL_11N 0x80C
+#define DM_REG_BB_PWR_SAV5_11N 0x818
+#define DM_REG_CCK_RPT_FORMAT_11N 0x824
+#define DM_REG_RX_DEFUALT_A_11N 0x858
+#define DM_REG_RX_DEFUALT_B_11N 0x85A
+#define DM_REG_BB_PWR_SAV3_11N 0x85C
+#define DM_REG_ANTSEL_CTRL_11N 0x860
+#define DM_REG_RX_ANT_CTRL_11N 0x864
+#define DM_REG_PIN_CTRL_11N 0x870
+#define DM_REG_BB_PWR_SAV1_11N 0x874
+#define DM_REG_ANTSEL_PATH_11N 0x878
+#define DM_REG_BB_3WIRE_11N 0x88C
+#define DM_REG_SC_CNT_11N 0x8C4
+#define DM_REG_PSD_DATA_11N 0x8B4
+/*PAGE 9*/
+#define DM_REG_ANT_MAPPING1_11N 0x914
+#define DM_REG_ANT_MAPPING2_11N 0x918
+/*PAGE A*/
+#define DM_REG_CCK_ANTDIV_PARA1_11N 0xA00
+#define DM_REG_CCK_CCA_11N 0xA0A
+#define DM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
+#define DM_REG_CCK_ANTDIV_PARA3_11N 0xA10
+#define DM_REG_CCK_ANTDIV_PARA4_11N 0xA14
+#define DM_REG_CCK_FILTER_PARA1_11N 0xA22
+#define DM_REG_CCK_FILTER_PARA2_11N 0xA23
+#define DM_REG_CCK_FILTER_PARA3_11N 0xA24
+#define DM_REG_CCK_FILTER_PARA4_11N 0xA25
+#define DM_REG_CCK_FILTER_PARA5_11N 0xA26
+#define DM_REG_CCK_FILTER_PARA6_11N 0xA27
+#define DM_REG_CCK_FILTER_PARA7_11N 0xA28
+#define DM_REG_CCK_FILTER_PARA8_11N 0xA29
+#define DM_REG_CCK_FA_RST_11N 0xA2C
+#define DM_REG_CCK_FA_MSB_11N 0xA58
+#define DM_REG_CCK_FA_LSB_11N 0xA5C
+#define DM_REG_CCK_CCA_CNT_11N 0xA60
+#define DM_REG_BB_PWR_SAV4_11N 0xA74
+/*PAGE B */
+#define DM_REG_LNA_SWITCH_11N 0xB2C
+#define DM_REG_PATH_SWITCH_11N 0xB30
+#define DM_REG_RSSI_CTRL_11N 0xB38
+#define DM_REG_CONFIG_ANTA_11N 0xB68
+#define DM_REG_RSSI_BT_11N 0xB9C
+/*PAGE C */
+#define DM_REG_OFDM_FA_HOLDC_11N 0xC00
+#define DM_REG_RX_PATH_11N 0xC04
+#define DM_REG_TRMUX_11N 0xC08
+#define DM_REG_OFDM_FA_RSTC_11N 0xC0C
+#define DM_REG_RXIQI_MATRIX_11N 0xC14
+#define DM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
+#define DM_REG_IGI_A_11N 0xC50
+#define DM_REG_ANTDIV_PARA2_11N 0xC54
+#define DM_REG_IGI_B_11N 0xC58
+#define DM_REG_ANTDIV_PARA3_11N 0xC5C
+#define DM_REG_BB_PWR_SAV2_11N 0xC70
+#define DM_REG_RX_OFF_11N 0xC7C
+#define DM_REG_TXIQK_MATRIXA_11N 0xC80
+#define DM_REG_TXIQK_MATRIXB_11N 0xC88
+#define DM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
+#define DM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
+#define DM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
+#define DM_REG_ANTDIV_PARA1_11N 0xCA4
+#define DM_REG_OFDM_FA_TYPE1_11N 0xCF0
+/*PAGE D */
+#define DM_REG_OFDM_FA_RSTD_11N 0xD00
+#define DM_REG_OFDM_FA_TYPE2_11N 0xDA0
+#define DM_REG_OFDM_FA_TYPE3_11N 0xDA4
+#define DM_REG_OFDM_FA_TYPE4_11N 0xDA8
+/*PAGE E */
+#define DM_REG_TXAGC_A_6_18_11N 0xE00
+#define DM_REG_TXAGC_A_24_54_11N 0xE04
+#define DM_REG_TXAGC_A_1_MCS32_11N 0xE08
+#define DM_REG_TXAGC_A_MCS0_3_11N 0xE10
+#define DM_REG_TXAGC_A_MCS4_7_11N 0xE14
+#define DM_REG_TXAGC_A_MCS8_11_11N 0xE18
+#define DM_REG_TXAGC_A_MCS12_15_11N 0xE1C
+#define DM_REG_FPGA0_IQK_11N 0xE28
+#define DM_REG_TXIQK_TONE_A_11N 0xE30
+#define DM_REG_RXIQK_TONE_A_11N 0xE34
+#define DM_REG_TXIQK_PI_A_11N 0xE38
+#define DM_REG_RXIQK_PI_A_11N 0xE3C
+#define DM_REG_TXIQK_11N 0xE40
+#define DM_REG_RXIQK_11N 0xE44
+#define DM_REG_IQK_AGC_PTS_11N 0xE48
+#define DM_REG_IQK_AGC_RSP_11N 0xE4C
+#define DM_REG_BLUETOOTH_11N 0xE6C
+#define DM_REG_RX_WAIT_CCA_11N 0xE70
+#define DM_REG_TX_CCK_RFON_11N 0xE74
+#define DM_REG_TX_CCK_BBON_11N 0xE78
+#define DM_REG_OFDM_RFON_11N 0xE7C
+#define DM_REG_OFDM_BBON_11N 0xE80
+#define DM_REG_TX2RX_11N 0xE84
+#define DM_REG_TX2TX_11N 0xE88
+#define DM_REG_RX_CCK_11N 0xE8C
+#define DM_REG_RX_OFDM_11N 0xED0
+#define DM_REG_RX_WAIT_RIFS_11N 0xED4
+#define DM_REG_RX2RX_11N 0xED8
+#define DM_REG_STANDBY_11N 0xEDC
+#define DM_REG_SLEEP_11N 0xEE0
+#define DM_REG_PMPD_ANAEN_11N 0xEEC
+
+/*MAC REG LIST*/
+#define DM_REG_BB_RST_11N 0x02
+#define DM_REG_ANTSEL_PIN_11N 0x4C
+#define DM_REG_EARLY_MODE_11N 0x4D0
+#define DM_REG_RSSI_MONITOR_11N 0x4FE
+#define DM_REG_EDCA_VO_11N 0x500
+#define DM_REG_EDCA_VI_11N 0x504
+#define DM_REG_EDCA_BE_11N 0x508
+#define DM_REG_EDCA_BK_11N 0x50C
+#define DM_REG_TXPAUSE_11N 0x522
+#define DM_REG_RESP_TX_11N 0x6D8
+#define DM_REG_ANT_TRAIN_PARA1_11N 0x7b0
+#define DM_REG_ANT_TRAIN_PARA2_11N 0x7b4
+
+/*DIG Related*/
+#define DM_BIT_IGI_11N 0x0000007F
+
+#define HAL_DM_DIG_DISABLE BIT(0)
+#define HAL_DM_HIPWR_DISABLE BIT(1)
+
+#define OFDM_TABLE_LENGTH 43
+#define CCK_TABLE_LENGTH 33
+
+#define OFDM_TABLE_SIZE 37
+#define CCK_TABLE_SIZE 33
+
+#define BW_AUTO_SWITCH_HIGH_LOW 25
+#define BW_AUTO_SWITCH_LOW_HIGH 30
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DM_FALSEALARM_THRESH_LOW 400
+#define DM_FALSEALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX 0x3e
+#define DM_DIG_MIN 0x1e
+
+#define DM_DIG_MAX_AP 0x32
+#define DM_DIG_MIN_AP 0x20
+
+#define DM_DIG_FA_UPPER 0x3e
+#define DM_DIG_FA_LOWER 0x1e
+#define DM_DIG_FA_TH0 0x200
+#define DM_DIG_FA_TH1 0x300
+#define DM_DIG_FA_TH2 0x400
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+#define RXPATHSELECTION_DIFF_TH 18
+
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+#define CTS2SELF_THVAL 30
+#define REGC38_TH 20
+
+#define TXHIGHPWRLEVEL_NORMAL 0
+#define TXHIGHPWRLEVEL_LEVEL1 1
+#define TXHIGHPWRLEVEL_LEVEL2 2
+#define TXHIGHPWRLEVEL_BT1 3
+#define TXHIGHPWRLEVEL_BT2 4
+
+#define DM_TYPE_BYFW 0
+#define DM_TYPE_BYDRIVER 1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define TXPWRTRACK_MAX_IDX 6
+
+/* Dynamic ATC switch */
+#define ATC_STATUS_OFF 0x0 /* enable */
+#define ATC_STATUS_ON 0x1 /* disable */
+#define CFO_THRESHOLD_XTAL 10 /* kHz */
+#define CFO_THRESHOLD_ATC 80 /* kHz */
+
+enum FAT_STATE {
+ FAT_NORMAL_STATE = 0,
+ FAT_TRAINING_STATE = 1,
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+ DIG_TYPE_THRESH_HIGH = 0,
+ DIG_TYPE_THRESH_LOW = 1,
+ DIG_TYPE_BACKOFF = 2,
+ DIG_TYPE_RX_GAIN_MIN = 3,
+ DIG_TYPE_RX_GAIN_MAX = 4,
+ DIG_TYPE_ENABLE = 5,
+ DIG_TYPE_DISABLE = 6,
+ DIG_OP_TYPE_MAX
+};
+
+enum dm_1r_cca_e {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf_e {
+ RF_SAVE = 0,
+ RF_NORMAL = 1,
+ RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch_e {
+ ANS_ANTENNA_B = 1,
+ ANS_ANTENNA_A = 2,
+ ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg_e {
+ DIG_EXT_PORT_STAGE_0 = 0,
+ DIG_EXT_PORT_STAGE_1 = 1,
+ DIG_EXT_PORT_STAGE_2 = 2,
+ DIG_EXT_PORT_STAGE_3 = 3,
+ DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect_e {
+ DIG_STA_DISCONNECT = 0,
+ DIG_STA_CONNECT = 1,
+ DIG_STA_BEFORE_CONNECT = 2,
+ DIG_MULTISTA_DISCONNECT = 3,
+ DIG_MULTISTA_CONNECT = 4,
+ DIG_CONNECT_MAX
+};
+
+enum pwr_track_control_method {
+ BBSWING,
+ TXAGC
+};
+
+#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
+#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
+#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
+#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
+#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
+
+void rtl8723be_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, u8 *pdesc,
+ u32 mac_id);
+void rtl8723be_dm_ant_sel_statistics(struct ieee80211_hw *hw, u8 antsel_tr_mux,
+ u32 mac_id, u32 rx_pwdb_all);
+void rtl8723be_dm_fast_antenna_trainning_callback(unsigned long data);
+void rtl8723be_dm_init(struct ieee80211_hw *hw);
+void rtl8723be_dm_watchdog(struct ieee80211_hw *hw);
+void rtl8723be_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi);
+void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw);
+void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type,
+ u8 *pdirection, u32 *poutwrite_val);
+void rtl8723be_dm_init_edca_turbo(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
new file mode 100644
index 000000000000..f856be6fc138
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
@@ -0,0 +1,620 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+
+static bool _rtl8723be_check_fw_read_last_h2c(struct ieee80211_hw *hw,
+ u8 boxnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 val_hmetfr;
+ bool result = false;
+
+ val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
+ if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
+ result = true;
+ return result;
+}
+
+static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 boxnum;
+ u16 box_reg = 0, box_extreg = 0;
+ u8 u1b_tmp;
+ bool isfw_read = false;
+ u8 buf_index = 0;
+ bool bwrite_sucess = false;
+ u8 wait_h2c_limit = 100;
+ u8 wait_writeh2c_limit = 100;
+ u8 boxcontent[4], boxextcontent[4];
+ u32 h2c_waitcounter = 0;
+ unsigned long flag;
+ u8 idx;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+
+ while (true) {
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ if (rtlhal->h2c_setinprogress) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set.."
+ "element_id(%d).\n", element_id);
+
+ while (rtlhal->h2c_setinprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
+ flag);
+ h2c_waitcounter++;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
+ udelay(100);
+
+ if (h2c_waitcounter > 1000)
+ return;
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
+ flag);
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ } else {
+ rtlhal->h2c_setinprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ break;
+ }
+ }
+ while (!bwrite_sucess) {
+ wait_writeh2c_limit--;
+ if (wait_writeh2c_limit == 0) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Write H2C fail because no trigger "
+ "for FW INT!\n");
+ break;
+ }
+ boxnum = rtlhal->last_hmeboxnum;
+ switch (boxnum) {
+ case 0:
+ box_reg = REG_HMEBOX_0;
+ box_extreg = REG_HMEBOX_EXT_0;
+ break;
+ case 1:
+ box_reg = REG_HMEBOX_1;
+ box_extreg = REG_HMEBOX_EXT_1;
+ break;
+ case 2:
+ box_reg = REG_HMEBOX_2;
+ box_extreg = REG_HMEBOX_EXT_2;
+ break;
+ case 3:
+ box_reg = REG_HMEBOX_3;
+ box_extreg = REG_HMEBOX_EXT_3;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not processed\n");
+ break;
+ }
+ isfw_read = _rtl8723be_check_fw_read_last_h2c(hw, boxnum);
+ while (!isfw_read) {
+ wait_h2c_limit--;
+ if (wait_h2c_limit == 0) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wating too long for FW read "
+ "clear HMEBox(%d)!\n", boxnum);
+ break;
+ }
+ udelay(10);
+
+ isfw_read = _rtl8723be_check_fw_read_last_h2c(hw,
+ boxnum);
+ u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wating for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
+ boxnum, u1b_tmp);
+ }
+ if (!isfw_read) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! "
+ "Fw do not read.\n", boxnum);
+ break;
+ }
+ memset(boxcontent, 0, sizeof(boxcontent));
+ memset(boxextcontent, 0, sizeof(boxextcontent));
+ boxcontent[0] = element_id;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
+
+ switch (cmd_len) {
+ case 1:
+ case 2:
+ case 3:
+ /*boxcontent[0] &= ~(BIT(7));*/
+ memcpy((u8 *)(boxcontent) + 1,
+ p_cmdbuffer + buf_index, cmd_len);
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ /*boxcontent[0] |= (BIT(7));*/
+ memcpy((u8 *)(boxextcontent),
+ p_cmdbuffer + buf_index+3, cmd_len-3);
+ memcpy((u8 *)(boxcontent) + 1,
+ p_cmdbuffer + buf_index, 3);
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_extreg + idx,
+ boxextcontent[idx]);
+ }
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+ bwrite_sucess = true;
+
+ rtlhal->last_hmeboxnum = boxnum + 1;
+ if (rtlhal->last_hmeboxnum == 4)
+ rtlhal->last_hmeboxnum = 0;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
+ }
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ rtlhal->h2c_setinprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+}
+
+void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 tmp_cmdbuf[2];
+
+ if (!rtlhal->fw_ready) {
+ RT_ASSERT(false,
+ "return H2C cmd because of Fw download fail!!!\n");
+ return;
+ }
+ memset(tmp_cmdbuf, 0, 8);
+ memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len);
+ _rtl8723be_fill_h2c_command(hw, element_id, cmd_len,
+ (u8 *)&tmp_cmdbuf);
+ return;
+}
+
+void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 u1_h2c_set_pwrmode[H2C_8723BE_PWEMODE_LENGTH] = { 0 };
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u8 rlbm, power_state = 0;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+
+ SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
+ rlbm = 0;/*YJ, temp, 120316. FW now not support RLBM = 2.*/
+ SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
+ SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
+ (rtlpriv->mac80211.p2p) ?
+ ppsc->smart_ps : 1);
+ SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode,
+ ppsc->reg_max_lps_awakeintvl);
+ SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
+ if (mode == FW_PS_ACTIVE_MODE)
+ power_state |= FW_PWR_STATE_ACTIVE;
+ else
+ power_state |= FW_PWR_STATE_RF_OFF;
+ SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n",
+ u1_h2c_set_pwrmode, H2C_8723BE_PWEMODE_LENGTH);
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_SETPWRMODE,
+ H2C_8723BE_PWEMODE_LENGTH,
+ u1_h2c_set_pwrmode);
+}
+
+static bool _rtl8723be_cmd_send_packet(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ struct rtl_tx_desc *pdesc;
+ struct sk_buff *pskb = NULL;
+ u8 own;
+ unsigned long flags;
+
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+ pdesc = &ring->desc[0];
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN);
+
+ rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
+
+ __skb_queue_tail(&ring->queue, skb);
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+ return true;
+}
+#define BEACON_PG 0 /* ->1 */
+#define PSPOLL_PG 2
+#define NULL_PG 3
+#define PROBERSP_PG 4 /* ->5 */
+
+#define TOTAL_RESERVED_PKT_LEN 768
+
+static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
+ /* page 0 beacon */
+ 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
+ 0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x10, 0x04, 0x00, 0x05, 0x54, 0x65,
+ 0x73, 0x74, 0x32, 0x01, 0x08, 0x82, 0x84, 0x0B,
+ 0x16, 0x24, 0x30, 0x48, 0x6C, 0x03, 0x01, 0x06,
+ 0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x02, 0x32,
+ 0x04, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C,
+ 0x09, 0x03, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x3D, 0x00, 0xDD, 0x07, 0x00, 0xE0, 0x4C,
+ 0x02, 0x02, 0x00, 0x00, 0xDD, 0x18, 0x00, 0x50,
+ 0xF2, 0x01, 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04,
+ 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04, 0x01, 0x00,
+
+ /* page 1 beacon */
+ 0x00, 0x50, 0xF2, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 2 ps-poll */
+ 0xA4, 0x10, 0x01, 0xC0, 0xEC, 0x1A, 0x59, 0x0B,
+ 0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 3 null */
+ 0x48, 0x01, 0x00, 0x00, 0xEC, 0x1A, 0x59, 0x0B,
+ 0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
+ 0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x72, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 4 probe_resp */
+ 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
+ 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+ 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
+ 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
+ 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
+ 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
+ 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
+ 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
+ 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
+ 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
+ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
+ 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 5 probe_resp */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
+ bool dl_finished)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct sk_buff *skb = NULL;
+
+ u32 totalpacketlen;
+ bool rtstatus;
+ u8 u1rsvdpageloc[5] = { 0 };
+ bool dlok = false;
+
+ u8 *beacon;
+ u8 *p_pspoll;
+ u8 *nullfunc;
+ u8 *p_probersp;
+ /*---------------------------------------------------------
+ * (1) beacon
+ *---------------------------------------------------------
+ */
+ beacon = &reserved_page_packet[BEACON_PG * 128];
+ SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+
+ /*-------------------------------------------------------
+ * (2) ps-poll
+ *-------------------------------------------------------
+ */
+ p_pspoll = &reserved_page_packet[PSPOLL_PG * 128];
+ SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+ SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+ SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG);
+
+ /*--------------------------------------------------------
+ * (3) null data
+ *--------------------------------------------------------
+ */
+ nullfunc = &reserved_page_packet[NULL_PG * 128];
+ SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+ SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG);
+
+ /*---------------------------------------------------------
+ * (4) probe response
+ *---------------------------------------------------------
+ */
+ p_probersp = &reserved_page_packet[PROBERSP_PG * 128];
+ SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+ SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG);
+
+ totalpacketlen = TOTAL_RESERVED_PKT_LEN;
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+ "rtl8723be_set_fw_rsvdpagepkt(): "
+ "HW_VAR_SET_TX_CMD: ALL\n",
+ &reserved_page_packet[0], totalpacketlen);
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl8723be_set_fw_rsvdpagepkt(): "
+ "HW_VAR_SET_TX_CMD: ALL\n", u1rsvdpageloc, 3);
+
+
+ skb = dev_alloc_skb(totalpacketlen);
+ memcpy((u8 *)skb_put(skb, totalpacketlen),
+ &reserved_page_packet, totalpacketlen);
+
+ rtstatus = _rtl8723be_cmd_send_packet(hw, skb);
+
+ if (rtstatus)
+ dlok = true;
+
+ if (dlok) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE:\n",
+ u1rsvdpageloc, 3);
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_RSVDPAGE,
+ sizeof(u1rsvdpageloc), u1rsvdpageloc);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ }
+}
+
+/*Should check FW support p2p or not.*/
+static void rtl8723be_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw,
+ u8 ctwindow)
+{
+ u8 u1_ctwindow_period[1] = {ctwindow};
+
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_P2P_PS_CTW_CMD, 1,
+ u1_ctwindow_period);
+}
+
+void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
+ u8 p2p_ps_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info);
+ struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload;
+ u8 i;
+ u16 ctwindow;
+ u32 start_time, tsf_low;
+
+ switch (p2p_ps_state) {
+ case P2P_PS_DISABLE:
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
+ memset(p2p_ps_offload, 0, sizeof(struct p2p_ps_offload_t));
+ break;
+ case P2P_PS_ENABLE:
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
+ /* update CTWindow value. */
+ if (p2pinfo->ctwindow > 0) {
+ p2p_ps_offload->ctwindow_en = 1;
+ ctwindow = p2pinfo->ctwindow;
+ rtl8723be_set_p2p_ctw_period_cmd(hw, ctwindow);
+ }
+ /* hw only support 2 set of NoA */
+ for (i = 0; i < p2pinfo->noa_num; i++) {
+ /* To control the register setting
+ * for which NOA
+ */
+ rtl_write_byte(rtlpriv, 0x5cf, (i << 4));
+ if (i == 0)
+ p2p_ps_offload->noa0_en = 1;
+ else
+ p2p_ps_offload->noa1_en = 1;
+
+ /* config P2P NoA Descriptor Register */
+ rtl_write_dword(rtlpriv, 0x5E0,
+ p2pinfo->noa_duration[i]);
+ rtl_write_dword(rtlpriv, 0x5E4,
+ p2pinfo->noa_interval[i]);
+
+ /*Get Current TSF value */
+ tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+ start_time = p2pinfo->noa_start_time[i];
+ if (p2pinfo->noa_count_type[i] != 1) {
+ while (start_time <= (tsf_low + (50 * 1024))) {
+ start_time += p2pinfo->noa_interval[i];
+ if (p2pinfo->noa_count_type[i] != 255)
+ p2pinfo->noa_count_type[i]--;
+ }
+ }
+ rtl_write_dword(rtlpriv, 0x5E8, start_time);
+ rtl_write_dword(rtlpriv, 0x5EC,
+ p2pinfo->noa_count_type[i]);
+ }
+ if ((p2pinfo->opp_ps == 1) ||
+ (p2pinfo->noa_num > 0)) {
+ /* rst p2p circuit */
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
+
+ p2p_ps_offload->offload_en = 1;
+
+ if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) {
+ p2p_ps_offload->role = 1;
+ p2p_ps_offload->allstasleep = 0;
+ } else {
+ p2p_ps_offload->role = 0;
+ }
+ p2p_ps_offload->discovery = 0;
+ }
+ break;
+ case P2P_PS_SCAN:
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+ p2p_ps_offload->discovery = 1;
+ break;
+ case P2P_PS_SCAN_DONE:
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
+ p2p_ps_offload->discovery = 0;
+ p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
+ break;
+ default:
+ break;
+ }
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_P2P_PS_OFFLOAD, 1,
+ (u8 *)p2p_ps_offload);
+}
+
+void rtl8723be_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
+{
+ u8 u1_joinbssrpt_parm[1] = { 0 };
+
+ SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
+
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_JOINBSSRPT, 1,
+ u1_joinbssrpt_parm);
+}
+
+void rtl8723be_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw,
+ u8 ap_offload_enable)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 u1_apoffload_parm[H2C_8723BE_AP_OFFLOAD_LENGTH] = { 0 };
+
+ SET_H2CCMD_AP_OFFLOAD_ON(u1_apoffload_parm, ap_offload_enable);
+ SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->hiddenssid);
+ SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0);
+
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_AP_OFFLOAD,
+ H2C_8723BE_AP_OFFLOAD_LENGTH, u1_apoffload_parm);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/rtlwifi/rtl8723be/fw.h
new file mode 100644
index 000000000000..31eec281e446
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/fw.h
@@ -0,0 +1,248 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE__FW__H__
+#define __RTL8723BE__FW__H__
+
+#define FW_8192C_SIZE 0x8000
+#define FW_8192C_START_ADDRESS 0x1000
+#define FW_8192C_END_ADDRESS 0x5FFF
+#define FW_8192C_PAGE_SIZE 4096
+#define FW_8192C_POLLING_DELAY 5
+#define FW_8192C_POLLING_TIMEOUT_COUNT 6000
+
+#define IS_FW_HEADER_EXIST(_pfwhdr) \
+ ((_pfwhdr->signature&0xFFF0) == 0x5300)
+#define USE_OLD_WOWLAN_DEBUG_FW 0
+
+#define H2C_8723BE_RSVDPAGE_LOC_LEN 5
+#define H2C_8723BE_PWEMODE_LENGTH 5
+#define H2C_8723BE_JOINBSSRPT_LENGTH 1
+#define H2C_8723BE_AP_OFFLOAD_LENGTH 3
+#define H2C_8723BE_WOWLAN_LENGTH 3
+#define H2C_8723BE_KEEP_ALIVE_CTRL_LENGTH 3
+#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
+#define H2C_8723BE_REMOTE_WAKE_CTRL_LEN 1
+#else
+#define H2C_8723BE_REMOTE_WAKE_CTRL_LEN 3
+#endif
+#define H2C_8723BE_AOAC_GLOBAL_INFO_LEN 2
+#define H2C_8723BE_AOAC_RSVDPAGE_LOC_LEN 7
+
+
+/* Fw PS state for RPWM.
+*BIT[2:0] = HW state
+*BIT[3] = Protocol PS state, 1: register active state , 0: register sleep state
+*BIT[4] = sub-state
+*/
+#define FW_PS_GO_ON BIT(0)
+#define FW_PS_TX_NULL BIT(1)
+#define FW_PS_RF_ON BIT(2)
+#define FW_PS_REGISTER_ACTIVE BIT(3)
+
+#define FW_PS_DPS BIT(0)
+#define FW_PS_LCLK (FW_PS_DPS)
+#define FW_PS_RF_OFF BIT(1)
+#define FW_PS_ALL_ON BIT(2)
+#define FW_PS_ST_ACTIVE BIT(3)
+#define FW_PS_ISR_ENABLE BIT(4)
+#define FW_PS_IMR_ENABLE BIT(5)
+
+
+#define FW_PS_ACK BIT(6)
+#define FW_PS_TOGGLE BIT(7)
+
+ /* 88E RPWM value*/
+ /* BIT[0] = 1: 32k, 0: 40M*/
+#define FW_PS_CLOCK_OFF BIT(0) /* 32k*/
+#define FW_PS_CLOCK_ON 0 /*40M*/
+
+#define FW_PS_STATE_MASK (0x0F)
+#define FW_PS_STATE_HW_MASK (0x07)
+/*ISR_ENABLE, IMR_ENABLE, and PS mode should be inherited.*/
+#define FW_PS_STATE_INT_MASK (0x3F)
+
+#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x))
+#define FW_PS_STATE_HW(x) (FW_PS_STATE_HW_MASK & (x))
+#define FW_PS_STATE_INT(x) (FW_PS_STATE_INT_MASK & (x))
+#define FW_PS_ISR_VAL(x) ((x) & 0x70)
+#define FW_PS_IMR_MASK(x) ((x) & 0xDF)
+#define FW_PS_KEEP_IMR(x) ((x) & 0x20)
+
+
+#define FW_PS_STATE_S0 (FW_PS_DPS)
+#define FW_PS_STATE_S1 (FW_PS_LCLK)
+#define FW_PS_STATE_S2 (FW_PS_RF_OFF)
+#define FW_PS_STATE_S3 (FW_PS_ALL_ON)
+#define FW_PS_STATE_S4 ((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON))
+
+/* ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))*/
+#define FW_PS_STATE_ALL_ON_88E (FW_PS_CLOCK_ON)
+/* (FW_PS_RF_ON)*/
+#define FW_PS_STATE_RF_ON_88E (FW_PS_CLOCK_ON)
+/* 0x0*/
+#define FW_PS_STATE_RF_OFF_88E (FW_PS_CLOCK_ON)
+/* (FW_PS_STATE_RF_OFF)*/
+#define FW_PS_STATE_RF_OFF_LOW_PWR_88E (FW_PS_CLOCK_OFF)
+
+#define FW_PS_STATE_ALL_ON_92C (FW_PS_STATE_S4)
+#define FW_PS_STATE_RF_ON_92C (FW_PS_STATE_S3)
+#define FW_PS_STATE_RF_OFF_92C (FW_PS_STATE_S2)
+#define FW_PS_STATE_RF_OFF_LOW_PWR_92C (FW_PS_STATE_S1)
+
+
+/* For 88E H2C PwrMode Cmd ID 5.*/
+#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
+#define FW_PWR_STATE_RF_OFF 0
+
+#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK)
+#define FW_PS_IS_CLK_ON(x) ((x) & (FW_PS_RF_OFF | FW_PS_ALL_ON))
+#define FW_PS_IS_RF_ON(x) ((x) & (FW_PS_ALL_ON))
+#define FW_PS_IS_ACTIVE(x) ((x) & (FW_PS_ST_ACTIVE))
+#define FW_PS_IS_CPWM_INT(x) ((x) & 0x40)
+
+#define FW_CLR_PS_STATE(x) ((x) = ((x) & (0xF0)))
+
+#define IS_IN_LOW_POWER_STATE_88E(fwpsstate) \
+ (FW_PS_STATE(fwpsstate) == FW_PS_CLOCK_OFF)
+
+#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
+#define FW_PWR_STATE_RF_OFF 0
+
+#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
+
+#define SET_88E_H2CCMD_WOWLAN_FUNC_ENABLE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 2, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 3, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_ALL_PKT_DROP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 4, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_GPIO_ACTIVE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 5, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_REKEY_WAKE_UP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 6, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 7, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_GPIONUM(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_88E_H2CCMD_WOWLAN_GPIO_DURATION(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+
+
+#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_RLBM(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 4, __val)
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 4, 4, __val)
+#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val)
+#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd) \
+ LE_BITS_TO_1BYTE(__ph2ccmd, 0, 8)
+
+#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+
+/* AP_OFFLOAD */
+#define SET_H2CCMD_AP_OFFLOAD_ON(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+
+/* Keep Alive Control*/
+#define SET_88E_H2CCMD_KEEP_ALIVE_ENABLE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
+#define SET_88E_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
+#define SET_88E_H2CCMD_KEEP_ALIVE_PERIOD(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+
+/*REMOTE_WAKE_CTRL */
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_EN(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
+#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 2, 1, __val)
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 3, 1, __val)
+#else
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_PAIRWISE_ENC_ALG(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#endif
+
+/* GTK_OFFLOAD */
+#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+
+/* AOAC_RSVDPAGE_LOC */
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_REM_WAKE_CTRL_INFO(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd), 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val)
+
+void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl8723be_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw,
+ u8 ap_offload_enable);
+void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
+void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
+ bool dl_finished);
+void rtl8723be_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+int rtl8723be_download_fw(struct ieee80211_hw *hw,
+ bool buse_wake_on_wlan_fw);
+void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
+ u8 p2p_ps_state);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
new file mode 100644
index 000000000000..0fdf0909321f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
@@ -0,0 +1,2523 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "../rtl8723com/dm_common.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+#include "led.h"
+#include "hw.h"
+#include "pwrseq.h"
+#include "../btcoexist/rtl_btc.h"
+
+#define LLT_CONFIG 5
+
+static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+ while (skb_queue_len(&ring->queue)) {
+ struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+ struct sk_buff *skb = __skb_dequeue(&ring->queue);
+
+ pci_unmap_single(rtlpci->pdev,
+ rtlpriv->cfg->ops->get_desc(
+ (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
+ skb->len, PCI_DMA_TODEVICE);
+ kfree_skb(skb);
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+}
+
+static void _rtl8723be_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+ u8 set_bits, u8 clear_bits)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpci->reg_bcn_ctrl_val |= set_bits;
+ rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
+
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
+}
+
+static void _rtl8723be_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte &= ~(BIT(0));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl8723be_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte |= BIT(1);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl8723be_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(1));
+}
+
+static void _rtl8723be_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl8723be_set_bcn_ctrl_reg(hw, BIT(1), 0);
+}
+
+static void _rtl8723be_set_fw_clock_on(struct ieee80211_hw *hw, u8 rpwm_val,
+ bool need_turn_off_ckk)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool support_remote_wake_up;
+ u32 count = 0, isr_regaddr, content;
+ bool schedule_timer = need_turn_off_ckk;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN,
+ (u8 *)(&support_remote_wake_up));
+
+ if (!rtlhal->fw_ready)
+ return;
+ if (!rtlpriv->psc.fw_current_inpsmode)
+ return;
+
+ while (1) {
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ if (rtlhal->fw_clk_change_in_progress) {
+ while (rtlhal->fw_clk_change_in_progress) {
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ count++;
+ udelay(100);
+ if (count > 1000)
+ return;
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ }
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ } else {
+ rtlhal->fw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ break;
+ }
+ }
+ if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) {
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM,
+ &rpwm_val);
+ if (FW_PS_IS_ACK(rpwm_val)) {
+ isr_regaddr = REG_HISR;
+ content = rtl_read_dword(rtlpriv, isr_regaddr);
+ while (!(content & IMR_CPWM) && (count < 500)) {
+ udelay(50);
+ count++;
+ content = rtl_read_dword(rtlpriv, isr_regaddr);
+ }
+
+ if (content & IMR_CPWM) {
+ rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
+ rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_88E;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Receive CPWM INT!!! Set "
+ "pHalData->FwPSState = %X\n",
+ rtlhal->fw_ps_state);
+ }
+ }
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->fw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ if (schedule_timer) {
+ mod_timer(&rtlpriv->works.fw_clockoff_timer,
+ jiffies + MSECS(10));
+ }
+ } else {
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->fw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ }
+}
+
+static void _rtl8723be_set_fw_clock_off(struct ieee80211_hw *hw, u8 rpwm_val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ enum rf_pwrstate rtstate;
+ bool schedule_timer = false;
+ u8 queue;
+
+ if (!rtlhal->fw_ready)
+ return;
+ if (!rtlpriv->psc.fw_current_inpsmode)
+ return;
+ if (!rtlhal->allow_sw_to_change_hwclc)
+ return;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, (u8 *)(&rtstate));
+ if (rtstate == ERFOFF || rtlpriv->psc.inactive_pwrstate == ERFOFF)
+ return;
+
+ for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
+ ring = &rtlpci->tx_ring[queue];
+ if (skb_queue_len(&ring->queue)) {
+ schedule_timer = true;
+ break;
+ }
+ }
+ if (schedule_timer) {
+ mod_timer(&rtlpriv->works.fw_clockoff_timer,
+ jiffies + MSECS(10));
+ return;
+ }
+ if (FW_PS_STATE(rtlhal->fw_ps_state) !=
+ FW_PS_STATE_RF_OFF_LOW_PWR_88E) {
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ if (!rtlhal->fw_clk_change_in_progress) {
+ rtlhal->fw_clk_change_in_progress = true;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
+ rtl_write_word(rtlpriv, REG_HISR, 0x0100);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+ &rpwm_val);
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->fw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ } else {
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ mod_timer(&rtlpriv->works.fw_clockoff_timer,
+ jiffies + MSECS(10));
+ }
+ }
+}
+
+static void _rtl8723be_set_fw_ps_rf_on(struct ieee80211_hw *hw)
+{
+ u8 rpwm_val = 0;
+ rpwm_val |= (FW_PS_STATE_RF_OFF_88E | FW_PS_ACK);
+ _rtl8723be_set_fw_clock_on(hw, rpwm_val, true);
+}
+
+static void _rtl8723be_fwlps_leave(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool fw_current_inps = false;
+ u8 rpwm_val = 0, fw_pwrmode = FW_PS_ACTIVE_MODE;
+
+ if (ppsc->low_power_enable) {
+ rpwm_val = (FW_PS_STATE_ALL_ON_88E | FW_PS_ACK);/* RF on */
+ _rtl8723be_set_fw_clock_on(hw, rpwm_val, false);
+ rtlhal->allow_sw_to_change_hwclc = false;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ &fw_pwrmode);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *)(&fw_current_inps));
+ } else {
+ rpwm_val = FW_PS_STATE_ALL_ON_88E; /* RF on */
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ &fw_pwrmode);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *)(&fw_current_inps));
+ }
+}
+
+static void _rtl8723be_fwlps_enter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool fw_current_inps = true;
+ u8 rpwm_val;
+
+ if (ppsc->low_power_enable) {
+ rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_88E; /* RF off */
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *)(&fw_current_inps));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ &ppsc->fwctrl_psmode);
+ rtlhal->allow_sw_to_change_hwclc = true;
+ _rtl8723be_set_fw_clock_off(hw, rpwm_val);
+
+ } else {
+ rpwm_val = FW_PS_STATE_RF_OFF_88E; /* RF off */
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *)(&fw_current_inps));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ &ppsc->fwctrl_psmode);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
+ }
+}
+
+void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ switch (variable) {
+ case HW_VAR_RCR:
+ *((u32 *)(val)) = rtlpci->receive_config;
+ break;
+ case HW_VAR_RF_STATE:
+ *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+ break;
+ case HW_VAR_FWLPS_RF_ON: {
+ enum rf_pwrstate rfstate;
+ u32 val_rcr;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
+ (u8 *)(&rfstate));
+ if (rfstate == ERFOFF) {
+ *((bool *)(val)) = true;
+ } else {
+ val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ val_rcr &= 0x00070000;
+ if (val_rcr)
+ *((bool *)(val)) = false;
+ else
+ *((bool *)(val)) = true;
+ }
+ break; }
+ case HW_VAR_FW_PSMODE_STATUS:
+ *((bool *)(val)) = ppsc->fw_current_inpsmode;
+ break;
+ case HW_VAR_CORRECT_TSF: {
+ u64 tsf;
+ u32 *ptsf_low = (u32 *)&tsf;
+ u32 *ptsf_high = ((u32 *)&tsf) + 1;
+
+ *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+ *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+ *((u64 *)(val)) = tsf;
+
+ break; }
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process %x\n", variable);
+ break;
+ }
+}
+
+void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u8 idx;
+
+ switch (variable) {
+ case HW_VAR_ETHER_ADDR:
+ for (idx = 0; idx < ETH_ALEN; idx++)
+ rtl_write_byte(rtlpriv, (REG_MACID + idx), val[idx]);
+ break;
+ case HW_VAR_BASIC_RATE: {
+ u16 rate_cfg = ((u16 *)val)[0];
+ u8 rate_index = 0;
+ rate_cfg = rate_cfg & 0x15f;
+ rate_cfg |= 0x01;
+ rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
+ rtl_write_byte(rtlpriv, REG_RRSR + 1, (rate_cfg >> 8) & 0xff);
+ while (rate_cfg > 0x1) {
+ rate_cfg = (rate_cfg >> 1);
+ rate_index++;
+ }
+ rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, rate_index);
+ break; }
+ case HW_VAR_BSSID:
+ for (idx = 0; idx < ETH_ALEN; idx++)
+ rtl_write_byte(rtlpriv, (REG_BSSID + idx), val[idx]);
+ break;
+ case HW_VAR_SIFS:
+ rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
+
+ rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+
+ if (!mac->ht_enable)
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, 0x0e0e);
+ else
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ *((u16 *)val));
+ break;
+ case HW_VAR_SLOT_TIME: {
+ u8 e_aci;
+
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
+
+ rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+
+ for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &e_aci);
+ }
+ break; }
+ case HW_VAR_ACK_PREAMBLE: {
+ u8 reg_tmp;
+ u8 short_preamble = (bool)*val;
+ reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL + 2);
+ if (short_preamble) {
+ reg_tmp |= 0x02;
+ rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
+ } else {
+ reg_tmp &= 0xFD;
+ rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
+ }
+ break; }
+ case HW_VAR_WPA_CONFIG:
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
+ break;
+ case HW_VAR_AMPDU_MIN_SPACE: {
+ u8 min_spacing_to_set;
+ u8 sec_min_space;
+
+ min_spacing_to_set = *val;
+ if (min_spacing_to_set <= 7) {
+ sec_min_space = 0;
+
+ if (min_spacing_to_set < sec_min_space)
+ min_spacing_to_set = sec_min_space;
+
+ mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) |
+ min_spacing_to_set);
+
+ *val = min_spacing_to_set;
+
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ }
+ break; }
+ case HW_VAR_SHORTGI_DENSITY: {
+ u8 density_to_set;
+
+ density_to_set = *val;
+ mac->min_space_cfg |= (density_to_set << 3);
+
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ break; }
+ case HW_VAR_AMPDU_FACTOR: {
+ u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
+ u8 factor_toset;
+ u8 *p_regtoset = NULL;
+ u8 index = 0;
+
+ p_regtoset = regtoset_normal;
+
+ factor_toset = *val;
+ if (factor_toset <= 3) {
+ factor_toset = (1 << (factor_toset + 2));
+ if (factor_toset > 0xf)
+ factor_toset = 0xf;
+
+ for (index = 0; index < 4; index++) {
+ if ((p_regtoset[index] & 0xf0) >
+ (factor_toset << 4))
+ p_regtoset[index] =
+ (p_regtoset[index] & 0x0f) |
+ (factor_toset << 4);
+
+ if ((p_regtoset[index] & 0x0f) > factor_toset)
+ p_regtoset[index] =
+ (p_regtoset[index] & 0xf0) |
+ (factor_toset);
+
+ rtl_write_byte(rtlpriv,
+ (REG_AGGLEN_LMT + index),
+ p_regtoset[index]);
+ }
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
+ }
+ break; }
+ case HW_VAR_AC_PARAM: {
+ u8 e_aci = *val;
+ rtl8723_dm_init_edca_turbo(hw);
+
+ if (rtlpci->acm_method != EACMWAY2_SW)
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
+ &e_aci);
+ break; }
+ case HW_VAR_ACM_CTRL: {
+ u8 e_aci = *val;
+ union aci_aifsn *p_aci_aifsn =
+ (union aci_aifsn *)(&(mac->ac[0].aifs));
+ u8 acm = p_aci_aifsn->f.acm;
+ u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+ acm_ctrl =
+ acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
+
+ if (acm) {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl |= ACMHW_BEQEN;
+ break;
+ case AC2_VI:
+ acm_ctrl |= ACMHW_VIQEN;
+ break;
+ case AC3_VO:
+ acm_ctrl |= ACMHW_VOQEN;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set "
+ "failed: eACI is %d\n", acm);
+ break;
+ }
+ } else {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl &= (~ACMHW_BEQEN);
+ break;
+ case AC2_VI:
+ acm_ctrl &= (~ACMHW_VIQEN);
+ break;
+ case AC3_VO:
+ acm_ctrl &= (~ACMHW_BEQEN);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
+ "Write 0x%X\n", acm_ctrl);
+ rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+ break; }
+ case HW_VAR_RCR:
+ rtl_write_dword(rtlpriv, REG_RCR, ((u32 *)(val))[0]);
+ rtlpci->receive_config = ((u32 *)(val))[0];
+ break;
+ case HW_VAR_RETRY_LIMIT: {
+ u8 retry_limit = *val;
+
+ rtl_write_word(rtlpriv, REG_RL,
+ retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+ retry_limit << RETRY_LIMIT_LONG_SHIFT);
+ break; }
+ case HW_VAR_DUAL_TSF_RST:
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+ break;
+ case HW_VAR_EFUSE_BYTES:
+ rtlefuse->efuse_usedbytes = *((u16 *)val);
+ break;
+ case HW_VAR_EFUSE_USAGE:
+ rtlefuse->efuse_usedpercentage = *val;
+ break;
+ case HW_VAR_IO_CMD:
+ rtl8723be_phy_set_io_cmd(hw, (*(enum io_type *)val));
+ break;
+ case HW_VAR_SET_RPWM: {
+ u8 rpwm_val;
+
+ rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
+ udelay(1);
+
+ if (rpwm_val & BIT(7)) {
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
+ } else {
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
+ }
+ break; }
+ case HW_VAR_H2C_FW_PWRMODE:
+ rtl8723be_set_fw_pwrmode_cmd(hw, *val);
+ break;
+ case HW_VAR_FW_PSMODE_STATUS:
+ ppsc->fw_current_inpsmode = *((bool *)val);
+ break;
+ case HW_VAR_RESUME_CLK_ON:
+ _rtl8723be_set_fw_ps_rf_on(hw);
+ break;
+ case HW_VAR_FW_LPS_ACTION: {
+ bool enter_fwlps = *((bool *)val);
+
+ if (enter_fwlps)
+ _rtl8723be_fwlps_enter(hw);
+ else
+ _rtl8723be_fwlps_leave(hw);
+
+ break; }
+ case HW_VAR_H2C_FW_JOINBSSRPT: {
+ u8 mstatus = *val;
+ u8 tmp_regcr, tmp_reg422, bcnvalid_reg;
+ u8 count = 0, dlbcn_count = 0;
+ bool recover = false;
+
+ if (mstatus == RT_MEDIA_CONNECT) {
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL);
+
+ tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ (tmp_regcr | BIT(0)));
+
+ _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(3));
+ _rtl8723be_set_bcn_ctrl_reg(hw, BIT(4), 0);
+
+ tmp_reg422 = rtl_read_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422 & (~BIT(6)));
+ if (tmp_reg422 & BIT(6))
+ recover = true;
+
+ do {
+ bcnvalid_reg = rtl_read_byte(rtlpriv,
+ REG_TDECTRL + 2);
+ rtl_write_byte(rtlpriv, REG_TDECTRL + 2,
+ (bcnvalid_reg | BIT(0)));
+ _rtl8723be_return_beacon_queue_skb(hw);
+
+ rtl8723be_set_fw_rsvdpagepkt(hw, 0);
+ bcnvalid_reg = rtl_read_byte(rtlpriv,
+ REG_TDECTRL + 2);
+ count = 0;
+ while (!(bcnvalid_reg & BIT(0)) && count < 20) {
+ count++;
+ udelay(10);
+ bcnvalid_reg = rtl_read_byte(rtlpriv,
+ REG_TDECTRL + 2);
+ }
+ dlbcn_count++;
+ } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5);
+
+ if (bcnvalid_reg & BIT(0))
+ rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0));
+
+ _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
+ _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(4));
+
+ if (recover) {
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422);
+ }
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ (tmp_regcr & ~(BIT(0))));
+ }
+ rtl8723be_set_fw_joinbss_report_cmd(hw, *val);
+ break; }
+ case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
+ rtl8723be_set_p2p_ps_offload_cmd(hw, *val);
+ break;
+ case HW_VAR_AID: {
+ u16 u2btmp;
+ u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+ u2btmp &= 0xC000;
+ rtl_write_word(rtlpriv, REG_BCN_PSR_RPT,
+ (u2btmp | mac->assoc_id));
+ break; }
+ case HW_VAR_CORRECT_TSF: {
+ u8 btype_ibss = *val;
+
+ if (btype_ibss)
+ _rtl8723be_stop_tx_beacon(hw);
+
+ _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(3));
+
+ rtl_write_dword(rtlpriv, REG_TSFTR,
+ (u32) (mac->tsf & 0xffffffff));
+ rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+ (u32) ((mac->tsf >> 32) & 0xffffffff));
+
+ _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
+
+ if (btype_ibss)
+ _rtl8723be_resume_tx_beacon(hw);
+ break; }
+ case HW_VAR_KEEP_ALIVE: {
+ u8 array[2];
+ array[0] = 0xff;
+ array[1] = *val;
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_KEEP_ALIVE_CTRL,
+ 2, array);
+ break; }
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process %x\n",
+ variable);
+ break;
+ }
+}
+
+static bool _rtl8723be_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool status = true;
+ int count = 0;
+ u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) |
+ _LLT_OP(_LLT_WRITE_ACCESS);
+
+ rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+
+ do {
+ value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+ if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+ break;
+
+ if (count > POLLING_LLT_THRESHOLD) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Failed to polling write LLT done at "
+ "address %d!\n", address);
+ status = false;
+ break;
+ }
+ } while (++count);
+
+ return status;
+}
+
+static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned short i;
+ u8 txpktbuf_bndy;
+ u8 maxpage;
+ bool status;
+
+ maxpage = 255;
+ txpktbuf_bndy = 245;
+
+ rtl_write_dword(rtlpriv, REG_TRXFF_BNDY,
+ (0x27FF0000 | txpktbuf_bndy));
+ rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
+
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+
+ rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_PBP, 0x31);
+ rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
+
+ for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+ status = _rtl8723be_llt_write(hw, i, i + 1);
+ if (!status)
+ return status;
+ }
+ status = _rtl8723be_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
+
+ if (!status)
+ return status;
+
+ for (i = txpktbuf_bndy; i < maxpage; i++) {
+ status = _rtl8723be_llt_write(hw, i, (i + 1));
+ if (!status)
+ return status;
+ }
+ status = _rtl8723be_llt_write(hw, maxpage, txpktbuf_bndy);
+ if (!status)
+ return status;
+
+ rtl_write_dword(rtlpriv, REG_RQPN, 0x80e40808);
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x00);
+
+ return true;
+}
+
+static void _rtl8723be_gen_refresh_led_state(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0);
+
+ if (rtlpriv->rtlhal.up_first_time)
+ return;
+
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
+ rtl8723be_sw_led_on(hw, pled0);
+ else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
+ rtl8723be_sw_led_on(hw, pled0);
+ else
+ rtl8723be_sw_led_off(hw, pled0);
+}
+
+static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ unsigned char bytetmp;
+ unsigned short wordtmp;
+ u16 retry = 0;
+ bool mac_func_enable;
+
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+
+ /*Auto Power Down to CHIP-off State*/
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7));
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_CR);
+ if (bytetmp == 0xFF)
+ mac_func_enable = true;
+ else
+ mac_func_enable = false;
+
+ /* HW Power on sequence */
+ if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
+ PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
+ RTL8723_NIC_ENABLE_FLOW)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "init MAC Fail as power on failure\n");
+ return false;
+ }
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4);
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_CR);
+ bytetmp = 0xff;
+ rtl_write_byte(rtlpriv, REG_CR, bytetmp);
+ mdelay(2);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_HWSEQ_CTRL);
+ bytetmp |= 0x7f;
+ rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, bytetmp);
+ mdelay(2);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CFG + 3);
+ if (bytetmp & BIT(0)) {
+ bytetmp = rtl_read_byte(rtlpriv, 0x7c);
+ bytetmp |= BIT(6);
+ rtl_write_byte(rtlpriv, 0x7c, bytetmp);
+ }
+ bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CLKR);
+ bytetmp |= BIT(3);
+ rtl_write_byte(rtlpriv, REG_SYS_CLKR, bytetmp);
+ bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG + 1);
+ bytetmp &= ~BIT(4);
+ rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+3);
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+3, bytetmp | 0x77);
+
+ rtl_write_word(rtlpriv, REG_CR, 0x2ff);
+
+ if (!mac_func_enable) {
+ if (!_rtl8723be_llt_table_init(hw))
+ return false;
+ }
+ rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
+ rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff);
+
+ /* Enable FW Beamformer Interrupt */
+ bytetmp = rtl_read_byte(rtlpriv, REG_FWIMR + 3);
+ rtl_write_byte(rtlpriv, REG_FWIMR + 3, bytetmp | BIT(6));
+
+ wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL);
+ wordtmp &= 0xf;
+ wordtmp |= 0xF5B1;
+ rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
+
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F);
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+ rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF);
+ rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config);
+
+ rtl_write_byte(rtlpriv, 0x4d0, 0x0);
+
+ rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
+ ((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
+ DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_MGQ_DESA,
+ (u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
+ DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_VOQ_DESA,
+ (u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_VIQ_DESA,
+ (u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_BEQ_DESA,
+ (u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_BKQ_DESA,
+ (u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_HQ_DESA,
+ (u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
+ DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_RX_DESA,
+ (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma &
+ DMA_BIT_MASK(32));
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG + 3);
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, bytetmp | 0x77);
+
+ rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, bytetmp & ~BIT(6));
+
+ rtl_write_byte(rtlpriv, REG_SECONDARY_CCA_CTRL, 0x3);
+
+ do {
+ retry++;
+ bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
+ } while ((retry < 200) && (bytetmp & BIT(7)));
+
+ _rtl8723be_gen_refresh_led_state(hw);
+
+ rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
+ rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, bytetmp & ~BIT(2));
+
+ return true;
+}
+
+static void _rtl8723be_hw_configure(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 reg_bw_opmode;
+ u32 reg_ratr, reg_prsr;
+
+ reg_bw_opmode = BW_OPMODE_20MHZ;
+ reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
+ RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+ reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+
+ rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr);
+ rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
+}
+
+static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ rtl_write_byte(rtlpriv, 0x34b, 0x93);
+ rtl_write_word(rtlpriv, 0x350, 0x870c);
+ rtl_write_byte(rtlpriv, 0x352, 0x1);
+
+ if (ppsc->support_backdoor)
+ rtl_write_byte(rtlpriv, 0x349, 0x1b);
+ else
+ rtl_write_byte(rtlpriv, 0x349, 0x03);
+
+ rtl_write_word(rtlpriv, 0x350, 0x2718);
+ rtl_write_byte(rtlpriv, 0x352, 0x1);
+}
+
+void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 sec_reg_value;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
+
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
+ return;
+ }
+ sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE;
+
+ if (rtlpriv->sec.use_defaultkey) {
+ sec_reg_value |= SCR_TXUSEDK;
+ sec_reg_value |= SCR_RXUSEDK;
+ }
+ sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+
+ rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "The SECR-value %x\n",
+ sec_reg_value);
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+}
+
+int rtl8723be_hw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ bool rtstatus = true;
+ int err;
+ u8 tmp_u1b;
+ unsigned long flags;
+
+ /* reenable interrupts to not interfere with other devices */
+ local_save_flags(flags);
+ local_irq_enable();
+
+ rtlpriv->rtlhal.being_init_adapter = true;
+ rtlpriv->intf_ops->disable_aspm(hw);
+ rtstatus = _rtl8723be_init_mac(hw);
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+ err = 1;
+ goto exit;
+ }
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CFG);
+ tmp_u1b &= 0x7F;
+ rtl_write_byte(rtlpriv, REG_SYS_CFG, tmp_u1b);
+
+ err = rtl8723_download_fw(hw, true);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
+ err = 1;
+ rtlhal->fw_ready = false;
+ goto exit;
+ } else {
+ rtlhal->fw_ready = true;
+ }
+ rtlhal->last_hmeboxnum = 0;
+ rtl8723be_phy_mac_config(hw);
+ /* because last function modify RCR, so we update
+ * rcr var here, or TP will unstable for receive_config
+ * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
+ * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
+ */
+ rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
+ rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+
+ rtl8723be_phy_bb_config(hw);
+ rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
+ rtl8723be_phy_rf_config(hw);
+
+ rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
+ RF_CHNLBW, RFREG_OFFSET_MASK);
+ rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
+ RF_CHNLBW, RFREG_OFFSET_MASK);
+ rtlphy->rfreg_chnlval[0] &= 0xFFF03FF;
+ rtlphy->rfreg_chnlval[0] |= (BIT(10) | BIT(11));
+
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
+ _rtl8723be_hw_configure(hw);
+ rtl_cam_reset_all_entry(hw);
+ rtl8723be_enable_hw_security_config(hw);
+
+ ppsc->rfpwr_state = ERFON;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+ _rtl8723be_enable_aspm_back_door(hw);
+ rtlpriv->intf_ops->enable_aspm(hw);
+
+ rtl8723be_bt_hw_init(hw);
+
+ rtl_set_bbreg(hw, 0x64, BIT(20), 0);
+ rtl_set_bbreg(hw, 0x64, BIT(24), 0);
+
+ rtl_set_bbreg(hw, 0x40, BIT(4), 0);
+ rtl_set_bbreg(hw, 0x40, BIT(3), 1);
+
+ rtl_set_bbreg(hw, 0x944, BIT(0)|BIT(1), 0x3);
+ rtl_set_bbreg(hw, 0x930, 0xff, 0x77);
+
+ rtl_set_bbreg(hw, 0x38, BIT(11), 0x1);
+
+ rtl_set_bbreg(hw, 0xb2c, 0xffffffff, 0x80000000);
+
+ if (ppsc->rfpwr_state == ERFON) {
+ rtl8723be_dm_check_txpower_tracking(hw);
+ rtl8723be_phy_lc_calibrate(hw);
+ }
+ tmp_u1b = efuse_read_1byte(hw, 0x1FA);
+ if (!(tmp_u1b & BIT(0))) {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
+ }
+ if (!(tmp_u1b & BIT(4))) {
+ tmp_u1b = rtl_read_byte(rtlpriv, 0x16);
+ tmp_u1b &= 0x0F;
+ rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
+ udelay(10);
+ rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
+ }
+ rtl8723be_dm_init(hw);
+exit:
+ local_irq_restore(flags);
+ rtlpriv->rtlhal.being_init_adapter = false;
+ return err;
+}
+
+static enum version_8723e _rtl8723be_read_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ enum version_8723e version = VERSION_UNKNOWN;
+ u8 count = 0;
+ u8 value8;
+ u32 value32;
+
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0);
+
+ value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 2);
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO + 2, value8 | BIT(0));
+
+ value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, value8 | BIT(0));
+
+ value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+ while (((value8 & BIT(0))) && (count++ < 100)) {
+ udelay(10);
+ value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+ }
+ count = 0;
+ value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION);
+ while ((value8 == 0) && (count++ < 50)) {
+ value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION);
+ mdelay(1);
+ }
+ value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1);
+ if ((value32 & (CHIP_8723B)) != CHIP_8723B)
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "unkown chip version\n");
+ else
+ version = (enum version_8723e) VERSION_TEST_CHIP_1T1R_8723B;
+
+ rtlphy->rf_type = RF_1T1R;
+
+ value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION);
+ if (value8 >= 0x02)
+ version |= BIT(3);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R");
+
+ return version;
+}
+
+static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bt_msr = rtl_read_byte(rtlpriv, MSR) & 0xfc;
+ enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+
+ rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
+ RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD,
+ "clear 0x550 when set HW_VAR_MEDIA_STATUS\n");
+
+ if (type == NL80211_IFTYPE_UNSPECIFIED ||
+ type == NL80211_IFTYPE_STATION) {
+ _rtl8723be_stop_tx_beacon(hw);
+ _rtl8723be_enable_bcn_sub_func(hw);
+ } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) {
+ _rtl8723be_resume_tx_beacon(hw);
+ _rtl8723be_disable_bcn_sub_func(hw);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: "
+ "No such media status(%x).\n", type);
+ }
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ bt_msr |= MSR_NOLINK;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ bt_msr |= MSR_ADHOC;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
+ break;
+ case NL80211_IFTYPE_STATION:
+ bt_msr |= MSR_INFRA;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
+ break;
+ case NL80211_IFTYPE_AP:
+ bt_msr |= MSR_AP;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Network type %d not support!\n", type);
+ return 1;
+ }
+ rtl_write_byte(rtlpriv, (MSR), bt_msr);
+ rtlpriv->cfg->ops->led_control(hw, ledaction);
+ if ((bt_msr & 0x03) == MSR_AP)
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+ else
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+ return 0;
+}
+
+void rtl8723be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u32 reg_rcr = rtlpci->receive_config;
+
+ if (rtlpriv->psc.rfpwr_state != ERFON)
+ return;
+
+ if (check_bssid) {
+ reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *)(&reg_rcr));
+ _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(4));
+ } else if (!check_bssid) {
+ reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+ _rtl8723be_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *)(&reg_rcr));
+ }
+}
+
+int rtl8723be_set_network_type(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (_rtl8723be_set_media_status(hw, type))
+ return -EOPNOTSUPP;
+
+ if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
+ if (type != NL80211_IFTYPE_AP)
+ rtl8723be_set_check_bssid(hw, true);
+ } else {
+ rtl8723be_set_check_bssid(hw, false);
+ }
+ return 0;
+}
+
+/* don't set REG_EDCA_BE_PARAM here
+ * because mac80211 will send pkt when scan
+ */
+void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ rtl8723_dm_init_edca_turbo(hw);
+ switch (aci) {
+ case AC1_BK:
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
+ break;
+ case AC0_BE:
+ break;
+ case AC2_VI:
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
+ break;
+ case AC3_VO:
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
+ break;
+ default:
+ RT_ASSERT(false, "invalid aci: %d !\n", aci);
+ break;
+ }
+}
+
+void rtl8723be_enable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+ rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ rtlpci->irq_enabled = true;
+ /* there are some C2H CMDs have been sent
+ * before system interrupt is enabled, e.g., C2H, CPWM.
+ * So we need to clear all C2H events that FW has notified,
+ * otherwise FW won't schedule any commands anymore.
+ */
+ rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0);
+ /*enable system interrupt*/
+ rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF);
+}
+
+void rtl8723be_disable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED);
+ rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED);
+ rtlpci->irq_enabled = false;
+ synchronize_irq(rtlpci->pdev->irq);
+}
+
+static void _rtl8723be_poweroff_adapter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 u1b_tmp;
+
+ /* Combo (PCIe + USB) Card and PCIe-MF Card */
+ /* 1. Run LPS WL RFOFF flow */
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8723_NIC_LPS_ENTER_FLOW);
+
+ /* 2. 0x1F[7:0] = 0 */
+ /* turn off RF */
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
+ if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) &&
+ rtlhal->fw_ready)
+ rtl8723be_firmware_selfreset(hw);
+
+ /* Reset MCU. Suggested by Filen. */
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2))));
+
+ /* g. MCUFWDL 0x80[1:0]= 0 */
+ /* reset MCU ready status */
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+
+ /* HW card disable configuration. */
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8723_NIC_DISABLE_FLOW);
+
+ /* Reset MCU IO Wrapper */
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0))));
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0));
+
+ /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */
+ /* lock ISO/CLK/Power control register */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
+}
+
+void rtl8723be_card_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum nl80211_iftype opmode;
+
+ mac->link_state = MAC80211_NOLINK;
+ opmode = NL80211_IFTYPE_UNSPECIFIED;
+ _rtl8723be_set_media_status(hw, opmode);
+ if (rtlpriv->rtlhal.driver_is_goingto_unload ||
+ ppsc->rfoff_reason > RF_CHANGE_BY_PS)
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ _rtl8723be_poweroff_adapter(hw);
+
+ /* after power off we should do iqk again */
+ rtlpriv->phy.iqk_initialized = false;
+}
+
+void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, *p_inta);
+
+ *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) &
+ rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+}
+
+void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval, atim_window;
+
+ bcn_interval = mac->beacon_interval;
+ atim_window = 2; /*FIX MERGE */
+ rtl8723be_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
+ rtl_write_byte(rtlpriv, 0x606, 0x30);
+ rtl8723be_enable_interrupt(hw);
+}
+
+void rtl8723be_set_beacon_interval(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval = mac->beacon_interval;
+
+ RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
+ rtl8723be_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl8723be_enable_interrupt(hw);
+}
+
+void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
+ "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+
+ if (add_msr)
+ rtlpci->irq_mask[0] |= add_msr;
+ if (rm_msr)
+ rtlpci->irq_mask[0] &= (~rm_msr);
+ rtl8723be_disable_interrupt(hw);
+ rtl8723be_enable_interrupt(hw);
+}
+
+static u8 _rtl8723be_get_chnl_group(u8 chnl)
+{
+ u8 group;
+
+ if (chnl < 3)
+ group = 0;
+ else if (chnl < 9)
+ group = 1;
+ else
+ group = 2;
+ return group;
+}
+
+static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw,
+ struct txpower_info_2g *pw2g,
+ struct txpower_info_5g *pw5g,
+ bool autoload_fail, u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 path, addr = EEPROM_TX_PWR_INX, group, cnt = 0;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "hal_ReadPowerValueFromPROM8723BE(): "
+ "PROMContent[0x%x]= 0x%x\n",
+ (addr + 1), hwinfo[addr + 1]);
+ if (0xFF == hwinfo[addr + 1]) /*YJ, add, 120316*/
+ autoload_fail = true;
+
+ if (autoload_fail) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "auto load fail : Use Default value!\n");
+ for (path = 0; path < MAX_RF_PATH; path++) {
+ /* 2.4G default value */
+ for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
+ pw2g->index_cck_base[path][group] = 0x2D;
+ pw2g->index_bw40_base[path][group] = 0x2D;
+ }
+ for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) {
+ if (cnt == 0) {
+ pw2g->bw20_diff[path][0] = 0x02;
+ pw2g->ofdm_diff[path][0] = 0x04;
+ } else {
+ pw2g->bw20_diff[path][cnt] = 0xFE;
+ pw2g->bw40_diff[path][cnt] = 0xFE;
+ pw2g->cck_diff[path][cnt] = 0xFE;
+ pw2g->ofdm_diff[path][cnt] = 0xFE;
+ }
+ }
+ }
+ return;
+ }
+ for (path = 0; path < MAX_RF_PATH; path++) {
+ /*2.4G default value*/
+ for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
+ pw2g->index_cck_base[path][group] = hwinfo[addr++];
+ if (pw2g->index_cck_base[path][group] == 0xFF)
+ pw2g->index_cck_base[path][group] = 0x2D;
+ }
+ for (group = 0; group < MAX_CHNL_GROUP_24G - 1; group++) {
+ pw2g->index_bw40_base[path][group] = hwinfo[addr++];
+ if (pw2g->index_bw40_base[path][group] == 0xFF)
+ pw2g->index_bw40_base[path][group] = 0x2D;
+ }
+ for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) {
+ if (cnt == 0) {
+ pw2g->bw40_diff[path][cnt] = 0;
+ if (hwinfo[addr] == 0xFF) {
+ pw2g->bw20_diff[path][cnt] = 0x02;
+ } else {
+ pw2g->bw20_diff[path][cnt] =
+ (hwinfo[addr] & 0xf0) >> 4;
+ /*bit sign number to 8 bit sign number*/
+ if (pw2g->bw20_diff[path][cnt] & BIT(3))
+ pw2g->bw20_diff[path][cnt] |= 0xF0;
+ }
+ if (hwinfo[addr] == 0xFF) {
+ pw2g->ofdm_diff[path][cnt] = 0x04;
+ } else {
+ pw2g->ofdm_diff[path][cnt] =
+ (hwinfo[addr] & 0x0f);
+ /*bit sign number to 8 bit sign number*/
+ if (pw2g->ofdm_diff[path][cnt] & BIT(3))
+ pw2g->ofdm_diff[path][cnt] |=
+ 0xF0;
+ }
+ pw2g->cck_diff[path][cnt] = 0;
+ addr++;
+ } else {
+ if (hwinfo[addr] == 0xFF) {
+ pw2g->bw40_diff[path][cnt] = 0xFE;
+ } else {
+ pw2g->bw40_diff[path][cnt] =
+ (hwinfo[addr] & 0xf0) >> 4;
+ if (pw2g->bw40_diff[path][cnt] & BIT(3))
+ pw2g->bw40_diff[path][cnt] |=
+ 0xF0;
+ }
+ if (hwinfo[addr] == 0xFF) {
+ pw2g->bw20_diff[path][cnt] = 0xFE;
+ } else {
+ pw2g->bw20_diff[path][cnt] =
+ (hwinfo[addr] & 0x0f);
+ if (pw2g->bw20_diff[path][cnt] & BIT(3))
+ pw2g->bw20_diff[path][cnt] |=
+ 0xF0;
+ }
+ addr++;
+
+ if (hwinfo[addr] == 0xFF) {
+ pw2g->ofdm_diff[path][cnt] = 0xFE;
+ } else {
+ pw2g->ofdm_diff[path][cnt] =
+ (hwinfo[addr] & 0xf0) >> 4;
+ if (pw2g->ofdm_diff[path][cnt] & BIT(3))
+ pw2g->ofdm_diff[path][cnt] |=
+ 0xF0;
+ }
+ if (hwinfo[addr] == 0xFF) {
+ pw2g->cck_diff[path][cnt] = 0xFE;
+ } else {
+ pw2g->cck_diff[path][cnt] =
+ (hwinfo[addr] & 0x0f);
+ if (pw2g->cck_diff[path][cnt] & BIT(3))
+ pw2g->cck_diff[path][cnt] |=
+ 0xF0;
+ }
+ addr++;
+ }
+ }
+ /*5G default value*/
+ for (group = 0; group < MAX_CHNL_GROUP_5G; group++) {
+ pw5g->index_bw40_base[path][group] = hwinfo[addr++];
+ if (pw5g->index_bw40_base[path][group] == 0xFF)
+ pw5g->index_bw40_base[path][group] = 0xFE;
+ }
+ for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) {
+ if (cnt == 0) {
+ pw5g->bw40_diff[path][cnt] = 0;
+
+ if (hwinfo[addr] == 0xFF) {
+ pw5g->bw20_diff[path][cnt] = 0;
+ } else {
+ pw5g->bw20_diff[path][0] =
+ (hwinfo[addr] & 0xf0) >> 4;
+ if (pw5g->bw20_diff[path][cnt] & BIT(3))
+ pw5g->bw20_diff[path][cnt] |=
+ 0xF0;
+ }
+ if (hwinfo[addr] == 0xFF) {
+ pw5g->ofdm_diff[path][cnt] = 0x04;
+ } else {
+ pw5g->ofdm_diff[path][0] =
+ (hwinfo[addr] & 0x0f);
+ if (pw5g->ofdm_diff[path][cnt] & BIT(3))
+ pw5g->ofdm_diff[path][cnt] |=
+ 0xF0;
+ }
+ addr++;
+ } else {
+ if (hwinfo[addr] == 0xFF) {
+ pw5g->bw40_diff[path][cnt] = 0xFE;
+ } else {
+ pw5g->bw40_diff[path][cnt] =
+ (hwinfo[addr] & 0xf0) >> 4;
+ if (pw5g->bw40_diff[path][cnt] & BIT(3))
+ pw5g->bw40_diff[path][cnt] |= 0xF0;
+ }
+ if (hwinfo[addr] == 0xFF) {
+ pw5g->bw20_diff[path][cnt] = 0xFE;
+ } else {
+ pw5g->bw20_diff[path][cnt] =
+ (hwinfo[addr] & 0x0f);
+ if (pw5g->bw20_diff[path][cnt] & BIT(3))
+ pw5g->bw20_diff[path][cnt] |= 0xF0;
+ }
+ addr++;
+ }
+ }
+ if (hwinfo[addr] == 0xFF) {
+ pw5g->ofdm_diff[path][1] = 0xFE;
+ pw5g->ofdm_diff[path][2] = 0xFE;
+ } else {
+ pw5g->ofdm_diff[path][1] = (hwinfo[addr] & 0xf0) >> 4;
+ pw5g->ofdm_diff[path][2] = (hwinfo[addr] & 0x0f);
+ }
+ addr++;
+
+ if (hwinfo[addr] == 0xFF)
+ pw5g->ofdm_diff[path][3] = 0xFE;
+ else
+ pw5g->ofdm_diff[path][3] = (hwinfo[addr] & 0x0f);
+ addr++;
+
+ for (cnt = 1; cnt < MAX_TX_COUNT; cnt++) {
+ if (pw5g->ofdm_diff[path][cnt] == 0xFF)
+ pw5g->ofdm_diff[path][cnt] = 0xFE;
+ else if (pw5g->ofdm_diff[path][cnt] & BIT(3))
+ pw5g->ofdm_diff[path][cnt] |= 0xF0;
+ }
+ }
+}
+
+static void _rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct txpower_info_2g pw2g;
+ struct txpower_info_5g pw5g;
+ u8 rf_path, index;
+ u8 i;
+
+ _rtl8723be_read_power_value_fromprom(hw, &pw2g, &pw5g, autoload_fail,
+ hwinfo);
+
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < 14; i++) {
+ index = _rtl8723be_get_chnl_group(i+1);
+
+ rtlefuse->txpwrlevel_cck[rf_path][i] =
+ pw2g.index_cck_base[rf_path][index];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+ pw2g.index_bw40_base[rf_path][index];
+ }
+ for (i = 0; i < MAX_TX_COUNT; i++) {
+ rtlefuse->txpwr_ht20diff[rf_path][i] =
+ pw2g.bw20_diff[rf_path][i];
+ rtlefuse->txpwr_ht40diff[rf_path][i] =
+ pw2g.bw40_diff[rf_path][i];
+ rtlefuse->txpwr_legacyhtdiff[rf_path][i] =
+ pw2g.ofdm_diff[rf_path][i];
+ }
+ for (i = 0; i < 14; i++) {
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ "RF(%d)-Ch(%d) [CCK / HT40_1S ] = "
+ "[0x%x / 0x%x ]\n", rf_path, i,
+ rtlefuse->txpwrlevel_cck[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i]);
+ }
+ }
+ if (!autoload_fail)
+ rtlefuse->eeprom_thermalmeter =
+ hwinfo[EEPROM_THERMAL_METER_88E];
+ else
+ rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
+
+ if (rtlefuse->eeprom_thermalmeter == 0xff || autoload_fail) {
+ rtlefuse->apk_thermalmeterignore = true;
+ rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
+ }
+ rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
+
+ if (!autoload_fail) {
+ rtlefuse->eeprom_regulatory =
+ hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0x07;/*bit0~2*/
+ if (hwinfo[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
+ rtlefuse->eeprom_regulatory = 0;
+ } else {
+ rtlefuse->eeprom_regulatory = 0;
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
+}
+
+static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
+ bool pseudo_test)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u16 i, usvalue;
+ u8 hwinfo[HWSET_MAX_SIZE];
+ u16 eeprom_id;
+ bool is_toshiba_smid1 = false;
+ bool is_toshiba_smid2 = false;
+ bool is_samsung_smid = false;
+ bool is_lenovo_smid = false;
+ u16 toshiba_smid1[] = {
+ 0x6151, 0x6152, 0x6154, 0x6155, 0x6177, 0x6178, 0x6179, 0x6180,
+ 0x7151, 0x7152, 0x7154, 0x7155, 0x7177, 0x7178, 0x7179, 0x7180,
+ 0x8151, 0x8152, 0x8154, 0x8155, 0x8181, 0x8182, 0x8184, 0x8185,
+ 0x9151, 0x9152, 0x9154, 0x9155, 0x9181, 0x9182, 0x9184, 0x9185
+ };
+ u16 toshiba_smid2[] = {
+ 0x6181, 0x6184, 0x6185, 0x7181, 0x7182, 0x7184, 0x7185, 0x8181,
+ 0x8182, 0x8184, 0x8185, 0x9181, 0x9182, 0x9184, 0x9185
+ };
+ u16 samsung_smid[] = {
+ 0x6191, 0x6192, 0x6193, 0x7191, 0x7192, 0x7193, 0x8191, 0x8192,
+ 0x8193, 0x9191, 0x9192, 0x9193
+ };
+ u16 lenovo_smid[] = {
+ 0x8195, 0x9195, 0x7194, 0x8200, 0x8201, 0x8202, 0x9199, 0x9200
+ };
+
+ if (pseudo_test) {
+ /* needs to be added */
+ return;
+ }
+ if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ rtl_efuse_shadow_map_update(hw);
+
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ HWSET_MAX_SIZE);
+ } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "RTL819X Not boot from eeprom, check it !!");
+ }
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
+ hwinfo, HWSET_MAX_SIZE);
+
+ eeprom_id = *((u16 *)&hwinfo[0]);
+ if (eeprom_id != RTL8723BE_EEPROM_ID) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
+ rtlefuse->autoload_failflag = true;
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtlefuse->autoload_failflag = false;
+ }
+ if (rtlefuse->autoload_failflag)
+ return;
+
+ rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
+ rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+ rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
+ rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROMId = 0x%4x\n", eeprom_id);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
+
+ for (i = 0; i < 6; i += 2) {
+ usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
+ *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "dev_addr: %pM\n",
+ rtlefuse->dev_addr);
+
+ /*parse xtal*/
+ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8723BE];
+ if (rtlefuse->crystalcap == 0xFF)
+ rtlefuse->crystalcap = 0x20;
+
+ _rtl8723be_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag,
+ hwinfo);
+
+ rtl8723be_read_bt_coexist_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag,
+ hwinfo);
+
+ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+ rtlefuse->txpwr_fromeprom = true;
+ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+
+ /* set channel plan to world wide 13 */
+ rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+
+ if (rtlhal->oem_id == RT_CID_DEFAULT) {
+ /* Does this one have a Toshiba SMID from group 1? */
+ for (i = 0; i < sizeof(toshiba_smid1) / sizeof(u16); i++) {
+ if (rtlefuse->eeprom_smid == toshiba_smid1[i]) {
+ is_toshiba_smid1 = true;
+ break;
+ }
+ }
+ /* Does this one have a Toshiba SMID from group 2? */
+ for (i = 0; i < sizeof(toshiba_smid2) / sizeof(u16); i++) {
+ if (rtlefuse->eeprom_smid == toshiba_smid2[i]) {
+ is_toshiba_smid2 = true;
+ break;
+ }
+ }
+ /* Does this one have a Samsung SMID? */
+ for (i = 0; i < sizeof(samsung_smid) / sizeof(u16); i++) {
+ if (rtlefuse->eeprom_smid == samsung_smid[i]) {
+ is_samsung_smid = true;
+ break;
+ }
+ }
+ /* Does this one have a Lenovo SMID? */
+ for (i = 0; i < sizeof(lenovo_smid) / sizeof(u16); i++) {
+ if (rtlefuse->eeprom_smid == lenovo_smid[i]) {
+ is_lenovo_smid = true;
+ break;
+ }
+ }
+ switch (rtlefuse->eeprom_oemid) {
+ case EEPROM_CID_DEFAULT:
+ if (rtlefuse->eeprom_did == 0x8176) {
+ if (rtlefuse->eeprom_svid == 0x10EC &&
+ is_toshiba_smid1) {
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ } else if (rtlefuse->eeprom_svid == 0x1025) {
+ rtlhal->oem_id = RT_CID_819X_ACER;
+ } else if (rtlefuse->eeprom_svid == 0x10EC &&
+ is_samsung_smid) {
+ rtlhal->oem_id = RT_CID_819X_SAMSUNG;
+ } else if (rtlefuse->eeprom_svid == 0x10EC &&
+ is_lenovo_smid) {
+ rtlhal->oem_id = RT_CID_819X_LENOVO;
+ } else if ((rtlefuse->eeprom_svid == 0x10EC &&
+ rtlefuse->eeprom_smid == 0x8197) ||
+ (rtlefuse->eeprom_svid == 0x10EC &&
+ rtlefuse->eeprom_smid == 0x9196)) {
+ rtlhal->oem_id = RT_CID_819X_CLEVO;
+ } else if ((rtlefuse->eeprom_svid == 0x1028 &&
+ rtlefuse->eeprom_smid == 0x8194) ||
+ (rtlefuse->eeprom_svid == 0x1028 &&
+ rtlefuse->eeprom_smid == 0x8198) ||
+ (rtlefuse->eeprom_svid == 0x1028 &&
+ rtlefuse->eeprom_smid == 0x9197) ||
+ (rtlefuse->eeprom_svid == 0x1028 &&
+ rtlefuse->eeprom_smid == 0x9198)) {
+ rtlhal->oem_id = RT_CID_819X_DELL;
+ } else if ((rtlefuse->eeprom_svid == 0x103C &&
+ rtlefuse->eeprom_smid == 0x1629)) {
+ rtlhal->oem_id = RT_CID_819X_HP;
+ } else if ((rtlefuse->eeprom_svid == 0x1A32 &&
+ rtlefuse->eeprom_smid == 0x2315)) {
+ rtlhal->oem_id = RT_CID_819X_QMI;
+ } else if ((rtlefuse->eeprom_svid == 0x10EC &&
+ rtlefuse->eeprom_smid == 0x8203)) {
+ rtlhal->oem_id = RT_CID_819X_PRONETS;
+ } else if ((rtlefuse->eeprom_svid == 0x1043 &&
+ rtlefuse->eeprom_smid == 0x84B5)) {
+ rtlhal->oem_id = RT_CID_819X_EDIMAX_ASUS;
+ } else {
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ }
+ } else if (rtlefuse->eeprom_did == 0x8178) {
+ if (rtlefuse->eeprom_svid == 0x10EC &&
+ is_toshiba_smid2)
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ else if (rtlefuse->eeprom_svid == 0x1025)
+ rtlhal->oem_id = RT_CID_819X_ACER;
+ else if ((rtlefuse->eeprom_svid == 0x10EC &&
+ rtlefuse->eeprom_smid == 0x8186))
+ rtlhal->oem_id = RT_CID_819X_PRONETS;
+ else if ((rtlefuse->eeprom_svid == 0x1043 &&
+ rtlefuse->eeprom_smid == 0x84B6))
+ rtlhal->oem_id =
+ RT_CID_819X_EDIMAX_ASUS;
+ else
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ } else {
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ }
+ break;
+ case EEPROM_CID_TOSHIBA:
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ break;
+ case EEPROM_CID_CCX:
+ rtlhal->oem_id = RT_CID_CCX;
+ break;
+ case EEPROM_CID_QMI:
+ rtlhal->oem_id = RT_CID_819X_QMI;
+ break;
+ case EEPROM_CID_WHQL:
+ break;
+ default:
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ break;
+ }
+ }
+}
+
+static void _rtl8723be_hal_customized_behavior(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ pcipriv->ledctl.led_opendrain = true;
+ switch (rtlhal->oem_id) {
+ case RT_CID_819X_HP:
+ pcipriv->ledctl.led_opendrain = true;
+ break;
+ case RT_CID_819X_LENOVO:
+ case RT_CID_DEFAULT:
+ case RT_CID_TOSHIBA:
+ case RT_CID_CCX:
+ case RT_CID_819X_ACER:
+ case RT_CID_WHQL:
+ default:
+ break;
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+}
+
+void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_u1b;
+
+ rtlhal->version = _rtl8723be_read_chip_version(hw);
+ if (get_rf_type(rtlphy) == RF_1T1R)
+ rtlpriv->dm.rfpath_rxenable[0] = true;
+ else
+ rtlpriv->dm.rfpath_rxenable[0] =
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+ if (tmp_u1b & BIT(4)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtlefuse->epromtype = EEPROM_93C46;
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
+ }
+ if (tmp_u1b & BIT(5)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtlefuse->autoload_failflag = false;
+ _rtl8723be_read_adapter_info(hw, false);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+ }
+ _rtl8723be_hal_customized_behavior(hw);
+}
+
+static void rtl8723be_update_hal_rate_table(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 ratr_value;
+ u8 ratr_index = 0;
+ u8 nmode = mac->ht_enable;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+ u16 shortgi_rate;
+ u32 tmp_ratr_value;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
+ enum wireless_mode wirelessmode = mac->mode;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_value = sta->supp_rates[1] << 4;
+ else
+ ratr_value = sta->supp_rates[0];
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ ratr_value = 0xfff;
+ ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ if (ratr_value & 0x0000000c)
+ ratr_value &= 0x0000000d;
+ else
+ ratr_value &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_value &= 0x00000FF5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ nmode = 1;
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
+ ratr_value &= 0x0007F005;
+ } else {
+ u32 ratr_mask;
+
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_1T1R)
+ ratr_mask = 0x000ff005;
+ else
+ ratr_mask = 0x0f0ff005;
+ ratr_value &= ratr_mask;
+ }
+ break;
+ default:
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_value &= 0x000ff0ff;
+ else
+ ratr_value &= 0x0f0ff0ff;
+ break;
+ }
+ if ((rtlpriv->btcoexist.bt_coexistence) &&
+ (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) &&
+ (rtlpriv->btcoexist.bt_cur_state) &&
+ (rtlpriv->btcoexist.bt_ant_isolation) &&
+ ((rtlpriv->btcoexist.bt_service == BT_SCO) ||
+ (rtlpriv->btcoexist.bt_service == BT_BUSY)))
+ ratr_value &= 0x0fffcfc0;
+ else
+ ratr_value &= 0x0FFFFFFF;
+
+ if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz))) {
+ ratr_value |= 0x10000000;
+ tmp_ratr_value = (ratr_value >> 12);
+
+ for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+ if ((1 << shortgi_rate) & tmp_ratr_value)
+ break;
+ }
+ shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+ (shortgi_rate << 4) | (shortgi_rate);
+ }
+ rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
+}
+
+static u8 _rtl8723be_mrate_idx_to_arfr_id(struct ieee80211_hw *hw,
+ u8 rate_index)
+{
+ u8 ret = 0;
+
+ switch (rate_index) {
+ case RATR_INX_WIRELESS_NGB:
+ ret = 1;
+ break;
+ case RATR_INX_WIRELESS_N:
+ case RATR_INX_WIRELESS_NG:
+ ret = 5;
+ break;
+ case RATR_INX_WIRELESS_NB:
+ ret = 3;
+ break;
+ case RATR_INX_WIRELESS_GB:
+ ret = 6;
+ break;
+ case RATR_INX_WIRELESS_G:
+ ret = 7;
+ break;
+ case RATR_INX_WIRELESS_B:
+ ret = 8;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_sta_info *sta_entry = NULL;
+ u32 ratr_bitmap;
+ u8 ratr_index;
+ u8 curtxbw_40mhz = (sta->ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
+ u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
+ enum wireless_mode wirelessmode = 0;
+ bool shortgi = false;
+ u8 rate_mask[7];
+ u8 macid = 0;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ wirelessmode = sta_entry->wireless_mode;
+ if (mac->opmode == NL80211_IFTYPE_STATION ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT)
+ curtxbw_40mhz = mac->bw_40;
+ else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC)
+ macid = sta->aid + 1;
+
+ ratr_bitmap = sta->supp_rates[0];
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ ratr_bitmap = 0xfff;
+
+ ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ ratr_index = RATR_INX_WIRELESS_B;
+ if (ratr_bitmap & 0x0000000c)
+ ratr_bitmap &= 0x0000000d;
+ else
+ ratr_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_index = RATR_INX_WIRELESS_GB;
+
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00000f00;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x00000ff0;
+ else
+ ratr_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_A:
+ ratr_index = RATR_INX_WIRELESS_A;
+ ratr_bitmap &= 0x00000ff0;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+
+ if (mimo_ps == IEEE80211_SMPS_STATIC ||
+ mimo_ps == IEEE80211_SMPS_DYNAMIC) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00070000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0007f000;
+ else
+ ratr_bitmap &= 0x0007f005;
+ } else {
+ if (rtlphy->rf_type == RF_1T1R) {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff005;
+ }
+ } else {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f8f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f8ff000;
+ else
+ ratr_bitmap &= 0x0f8ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f8f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f8ff000;
+ else
+ ratr_bitmap &= 0x0f8ff005;
+ }
+ }
+ }
+ if ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz)) {
+ if (macid == 0)
+ shortgi = true;
+ else if (macid == 1)
+ shortgi = false;
+ }
+ break;
+ default:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_bitmap &= 0x000ff0ff;
+ else
+ ratr_bitmap &= 0x0f0ff0ff;
+ break;
+ }
+ sta_entry->ratr_index = ratr_index;
+
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
+ *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28);
+ rate_mask[0] = macid;
+ rate_mask[1] = _rtl8723be_mrate_idx_to_arfr_id(hw, ratr_index) |
+ (shortgi ? 0x80 : 0x00);
+ rate_mask[2] = curtxbw_40mhz;
+ /* if (prox_priv->proxim_modeinfo->power_output > 0)
+ * rate_mask[2] |= BIT(6);
+ */
+
+ rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff);
+ rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8);
+ rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
+ rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
+
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4], rate_mask[5],
+ rate_mask[6]);
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_RA_MASK, 7, rate_mask);
+ _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
+}
+
+void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if (rtlpriv->dm.useramask)
+ rtl8723be_update_hal_rate_mask(hw, sta, rssi_level);
+ else
+ rtl8723be_update_hal_rate_table(hw, sta);
+}
+
+void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 sifs_timer;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
+ if (!mac->ht_enable)
+ sifs_timer = 0x0a0a;
+ else
+ sifs_timer = 0x0e0e;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
+}
+
+bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ u8 u1tmp;
+ bool actuallyset = false;
+
+ if (rtlpriv->rtlhal.being_init_adapter)
+ return false;
+
+ if (ppsc->swrf_processing)
+ return false;
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ return false;
+ } else {
+ ppsc->rfchange_inprogress = true;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ }
+ cur_rfstate = ppsc->rfpwr_state;
+
+ rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
+ rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2) & ~(BIT(1)));
+
+ u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL_2);
+
+ if (rtlphy->polarity_ctl)
+ e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFOFF : ERFON;
+ else
+ e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
+
+ if (ppsc->hwradiooff &&
+ (e_rfpowerstate_toset == ERFON)) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
+
+ e_rfpowerstate_toset = ERFON;
+ ppsc->hwradiooff = false;
+ actuallyset = true;
+ } else if (!ppsc->hwradiooff &&
+ (e_rfpowerstate_toset == ERFOFF)) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+
+ e_rfpowerstate_toset = ERFOFF;
+ ppsc->hwradiooff = true;
+ actuallyset = true;
+ }
+ if (actuallyset) {
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ } else {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ }
+ *valid = 1;
+ return !ppsc->hwradiooff;
+}
+
+void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 *macaddr = p_macaddr;
+ u32 entry_id = 0;
+ bool is_pairwise = false;
+
+ static u8 cam_const_addr[4][6] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+ };
+ static u8 cam_const_broad[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ if (clear_all) {
+ u8 idx = 0;
+ u8 cam_offset = 0;
+ u8 clear_number = 5;
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+
+ for (idx = 0; idx < clear_number; idx++) {
+ rtl_cam_mark_invalid(hw, cam_offset + idx);
+ rtl_cam_empty_entry(hw, cam_offset + idx);
+
+ if (idx < 5) {
+ memset(rtlpriv->sec.key_buf[idx], 0,
+ MAX_KEY_LEN);
+ rtlpriv->sec.key_len[idx] = 0;
+ }
+ }
+ } else {
+ switch (enc_algo) {
+ case WEP40_ENCRYPTION:
+ enc_algo = CAM_WEP40;
+ break;
+ case WEP104_ENCRYPTION:
+ enc_algo = CAM_WEP104;
+ break;
+ case TKIP_ENCRYPTION:
+ enc_algo = CAM_TKIP;
+ break;
+ case AESCCMP_ENCRYPTION:
+ enc_algo = CAM_AES;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ enc_algo = CAM_TKIP;
+ break;
+ }
+
+ if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+ macaddr = cam_const_addr[key_index];
+ entry_id = key_index;
+ } else {
+ if (is_group) {
+ macaddr = cam_const_broad;
+ entry_id = key_index;
+ } else {
+ if (mac->opmode == NL80211_IFTYPE_AP) {
+ entry_id = rtl_cam_get_free_entry(hw,
+ p_macaddr);
+ if (entry_id >= TOTAL_CAM_ENTRY) {
+ RT_TRACE(rtlpriv, COMP_SEC,
+ DBG_EMERG,
+ "Can not find free"
+ " hw security cam "
+ "entry\n");
+ return;
+ }
+ } else {
+ entry_id = CAM_PAIRWISE_KEY_POSITION;
+ }
+ key_index = PAIRWISE_KEYIDX;
+ is_pairwise = true;
+ }
+ }
+ if (rtlpriv->sec.key_len[key_index] == 0) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
+ if (mac->opmode == NL80211_IFTYPE_AP)
+ rtl_cam_del_entry(hw, p_macaddr);
+ rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
+ if (is_pairwise) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
+
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[key_index]);
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_cam_add_one_entry(hw,
+ rtlefuse->dev_addr,
+ PAIRWISE_KEYIDX,
+ CAM_PAIRWISE_KEY_POSITION,
+ enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf
+ [entry_id]);
+ }
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[entry_id]);
+ }
+ }
+ }
+}
+
+void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ bool auto_load_fail, u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value;
+ u32 tmpu_32;
+
+ if (!auto_load_fail) {
+ tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
+ if (tmpu_32 & BIT(18))
+ rtlpriv->btcoexist.btc_info.btcoexist = 1;
+ else
+ rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ value = hwinfo[RF_OPTION4];
+ rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
+ rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
+ } else {
+ rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
+ rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+ }
+}
+
+void rtl8723be_bt_reg_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* 0:Low, 1:High, 2:From Efuse. */
+ rtlpriv->btcoexist.reg_bt_iso = 2;
+ /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
+ rtlpriv->btcoexist.reg_bt_sco = 3;
+ /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
+ rtlpriv->btcoexist.reg_bt_sco = 0;
+}
+
+void rtl8723be_bt_hw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->cfg->ops->get_btc_status())
+ rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv);
+}
+
+void rtl8723be_suspend(struct ieee80211_hw *hw)
+{
+}
+
+void rtl8723be_resume(struct ieee80211_hw *hw)
+{
+}
+
+/* Turn on AAP (RCR:bit 0) for promicuous mode. */
+void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
+ bool write_into_reg)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ if (allow_all_da) /* Set BIT0 */
+ rtlpci->receive_config |= RCR_AAP;
+ else /* Clear BIT0 */
+ rtlpci->receive_config &= ~RCR_AAP;
+
+ if (write_into_reg)
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+
+ RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
+ "receive_config = 0x%08X, write_into_reg =%d\n",
+ rtlpci->receive_config, write_into_reg);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
new file mode 100644
index 000000000000..b7449a9b57e4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
@@ -0,0 +1,64 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_HW_H__
+#define __RTL8723BE_HW_H__
+
+void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw);
+
+void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb);
+int rtl8723be_hw_init(struct ieee80211_hw *hw);
+void rtl8723be_card_disable(struct ieee80211_hw *hw);
+void rtl8723be_enable_interrupt(struct ieee80211_hw *hw);
+void rtl8723be_disable_interrupt(struct ieee80211_hw *hw);
+int rtl8723be_set_network_type(struct ieee80211_hw *hw,
+ enum nl80211_iftype type);
+void rtl8723be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci);
+void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl8723be_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level);
+void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
+void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw);
+void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo);
+void rtl8723be_bt_reg_init(struct ieee80211_hw *hw);
+void rtl8723be_bt_hw_init(struct ieee80211_hw *hw);
+void rtl8723be_suspend(struct ieee80211_hw *hw);
+void rtl8723be_resume(struct ieee80211_hw *hw);
+void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
+ bool write_into_reg);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/rtlwifi/rtl8723be/led.c
new file mode 100644
index 000000000000..cb931a38dc48
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/led.c
@@ -0,0 +1,153 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "reg.h"
+#include "led.h"
+
+static void _rtl8723be_init_led(struct ieee80211_hw *hw, struct rtl_led *pled,
+ enum rtl_led_pin ledpin)
+{
+ pled->hw = hw;
+ pled->ledpin = ledpin;
+ pled->ledon = false;
+}
+
+void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ u8 ledcfg;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin);
+
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+ ledcfg &= ~BIT(6);
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5));
+ break;
+ case LED_PIN_LED1:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+ rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+ pled->ledon = true;
+}
+
+void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u8 ledcfg;
+
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin);
+
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg &= 0xf0;
+ if (pcipriv->ledctl.led_opendrain) {
+ ledcfg &= 0x90; /* Set to software control. */
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
+ ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
+ ledcfg &= 0xFE;
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg);
+ } else {
+ ledcfg &= ~BIT(6);
+ rtl_write_byte(rtlpriv, REG_LEDCFG2,
+ (ledcfg | BIT(3) | BIT(5)));
+ }
+ break;
+ case LED_PIN_LED1:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+ ledcfg &= 0x10; /* Set to software control. */
+ rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3));
+
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not processed\n");
+ break;
+ }
+ pled->ledon = false;
+}
+
+void rtl8723be_init_sw_leds(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
+ _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+}
+
+static void _rtl8723be_sw_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0);
+ switch (ledaction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ rtl8723be_sw_led_on(hw, pled0);
+ break;
+ case LED_CTL_POWER_OFF:
+ rtl8723be_sw_led_off(hw, pled0);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtl8723be_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+ (ledaction == LED_CTL_TX ||
+ ledaction == LED_CTL_RX ||
+ ledaction == LED_CTL_SITE_SURVEY ||
+ ledaction == LED_CTL_LINK ||
+ ledaction == LED_CTL_NO_LINK ||
+ ledaction == LED_CTL_START_TO_LINK ||
+ ledaction == LED_CTL_POWER_ON)) {
+ return;
+ }
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
+ _rtl8723be_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.h b/drivers/net/wireless/rtlwifi/rtl8723be/led.h
new file mode 100644
index 000000000000..c57de379ee8d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/led.h
@@ -0,0 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_LED_H__
+#define __RTL8723BE_LED_H__
+
+void rtl8723be_init_sw_leds(struct ieee80211_hw *hw);
+void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8723be_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
new file mode 100644
index 000000000000..1575ef9ece9f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
@@ -0,0 +1,2156 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "../core.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "../rtl8723com/phy_common.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+#include "trx.h"
+
+static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw);
+static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype);
+static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw,
+ u8 channel, u8 *stage,
+ u8 *step, u32 *delay);
+static bool _rtl8723be_check_condition(struct ieee80211_hw *hw,
+ const u32 condition)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 _board = rtlefuse->board_type; /*need efuse define*/
+ u32 _interface = rtlhal->interface;
+ u32 _platform = 0x08;/*SupportPlatform */
+ u32 cond = condition;
+
+ if (condition == 0xCDCDCDCD)
+ return true;
+
+ cond = condition & 0xFF;
+ if ((_board & cond) == 0 && cond != 0x1F)
+ return false;
+
+ cond = condition & 0xFF00;
+ cond = cond >> 8;
+ if ((_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = condition & 0xFF0000;
+ cond = cond >> 16;
+ if ((_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+ u32 arraylength;
+ u32 *ptrarray;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n");
+ arraylength = RTL8723BEMAC_1T_ARRAYLEN;
+ ptrarray = RTL8723BEMAC_1T_ARRAY;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Img:RTL8723bEMAC_1T_ARRAY LEN %d\n", arraylength);
+ for (i = 0; i < arraylength; i = i + 2)
+ rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
+ return true;
+}
+
+static bool _rtl8723be_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ #define READ_NEXT_PAIR(v1, v2, i) \
+ do { \
+ i += 2; \
+ v1 = array_table[i];\
+ v2 = array_table[i+1]; \
+ } while (0)
+
+ int i;
+ u32 *array_table;
+ u16 arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 v1 = 0, v2 = 0;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ arraylen = RTL8723BEPHY_REG_1TARRAYLEN;
+ array_table = RTL8723BEPHY_REG_1TARRAY;
+
+ for (i = 0; i < arraylen; i = i + 2) {
+ v1 = array_table[i];
+ v2 = array_table[i+1];
+ if (v1 < 0xcdcdcdcd) {
+ rtl_bb_delay(hw, v1, v2);
+ } else {/*This line is the start line of branch.*/
+ if (!_rtl8723be_check_condition(hw, array_table[i])) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD &&
+ i < arraylen - 2) {
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ i -= 2; /* prevent from for-loop += 2*/
+ /* Configure matched pairs and
+ * skip to end of if-else.
+ */
+ } else {
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD &&
+ i < arraylen - 2) {
+ rtl_bb_delay(hw,
+ v1, v2);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ }
+ } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+ arraylen = RTL8723BEAGCTAB_1TARRAYLEN;
+ array_table = RTL8723BEAGCTAB_1TARRAY;
+
+ for (i = 0; i < arraylen; i = i + 2) {
+ v1 = array_table[i];
+ v2 = array_table[i+1];
+ if (v1 < 0xCDCDCDCD) {
+ rtl_set_bbreg(hw, array_table[i],
+ MASKDWORD,
+ array_table[i + 1]);
+ udelay(1);
+ continue;
+ } else {/*This line is the start line of branch.*/
+ if (!_rtl8723be_check_condition(hw, array_table[i])) {
+ /* Discard the following
+ * (offset, data) pairs
+ */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD &&
+ i < arraylen - 2) {
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ i -= 2; /* prevent from for-loop += 2*/
+ /*Configure matched pairs and
+ *skip to end of if-else.
+ */
+ } else {
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD &&
+ i < arraylen - 2) {
+ rtl_set_bbreg(hw, array_table[i],
+ MASKDWORD,
+ array_table[i + 1]);
+ udelay(1);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The agctab_array_table[0] is "
+ "%x Rtl818EEPHY_REGArray[1] is %x\n",
+ array_table[i], array_table[i + 1]);
+ }
+ }
+ return true;
+}
+
+static u8 _rtl8723be_get_rate_section_index(u32 regaddr)
+{
+ u8 index = 0;
+
+ switch (regaddr) {
+ case RTXAGC_A_RATE18_06:
+ case RTXAGC_B_RATE18_06:
+ index = 0;
+ break;
+ case RTXAGC_A_RATE54_24:
+ case RTXAGC_B_RATE54_24:
+ index = 1;
+ break;
+ case RTXAGC_A_CCK1_MCS32:
+ case RTXAGC_B_CCK1_55_MCS32:
+ index = 2;
+ break;
+ case RTXAGC_B_CCK11_A_CCK2_11:
+ index = 3;
+ break;
+ case RTXAGC_A_MCS03_MCS00:
+ case RTXAGC_B_MCS03_MCS00:
+ index = 4;
+ break;
+ case RTXAGC_A_MCS07_MCS04:
+ case RTXAGC_B_MCS07_MCS04:
+ index = 5;
+ break;
+ case RTXAGC_A_MCS11_MCS08:
+ case RTXAGC_B_MCS11_MCS08:
+ index = 6;
+ break;
+ case RTXAGC_A_MCS15_MCS12:
+ case RTXAGC_B_MCS15_MCS12:
+ index = 7;
+ break;
+ default:
+ regaddr &= 0xFFF;
+ if (regaddr >= 0xC20 && regaddr <= 0xC4C)
+ index = (u8) ((regaddr - 0xC20) / 4);
+ else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
+ index = (u8) ((regaddr - 0xE20) / 4);
+ break;
+ };
+ return index;
+}
+
+u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, readback_value, bitshift;
+ unsigned long flags;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+ original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), "
+ "bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
+
+ return readback_value;
+}
+
+void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, bitshift;
+ unsigned long flags;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, path);
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = rtl8723_phy_rf_serial_read(hw, path,
+ regaddr);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ data = ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+
+ rtl8723_phy_rf_serial_write(hw, path, regaddr, data);
+
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, path);
+}
+
+bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool rtstatus = _rtl8723be_phy_config_mac_with_headerfile(hw);
+
+ rtl_write_byte(rtlpriv, 0x04CA, 0x0B);
+ return rtstatus;
+}
+
+bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw)
+{
+ bool rtstatus = true;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 regval;
+ u8 reg_hwparafile = 1;
+ u32 tmp;
+ u8 crystalcap = rtlpriv->efuse.crystalcap;
+ rtl8723_phy_init_bb_rf_reg_def(hw);
+ regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
+ regval | BIT(13) | BIT(0) | BIT(1));
+
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
+ FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE |
+ FEN_BB_GLB_RSTN | FEN_BBRSTB);
+ tmp = rtl_read_dword(rtlpriv, 0x4c);
+ rtl_write_dword(rtlpriv, 0x4c, tmp | BIT(23));
+
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
+
+ if (reg_hwparafile == 1)
+ rtstatus = _rtl8723be_phy_bb8723b_config_parafile(hw);
+
+ crystalcap = crystalcap & 0x3F;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000,
+ (crystalcap | crystalcap << 6));
+
+ return rtstatus;
+}
+
+bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw)
+{
+ return rtl8723be_phy_rf6052_config(hw);
+}
+
+static void _rtl8723be_config_rf_reg(struct ieee80211_hw *hw, u32 addr,
+ u32 data, enum radio_path rfpath,
+ u32 regaddr)
+{
+ if (addr == 0xfe || addr == 0xffe) {
+ mdelay(50);
+ } else {
+ rtl_set_rfreg(hw, rfpath, regaddr, RFREG_OFFSET_MASK, data);
+ udelay(1);
+ }
+}
+
+static void _rtl8723be_config_rf_radio_a(struct ieee80211_hw *hw,
+ u32 addr, u32 data)
+{
+ u32 content = 0x1000; /*RF Content: radio_a_txt*/
+ u32 maskforphyset = (u32)(content & 0xE000);
+
+ _rtl8723be_config_rf_reg(hw, addr, data, RF90_PATH_A,
+ addr | maskforphyset);
+}
+
+static void _rtl8723be_phy_init_tx_power_by_rate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ u8 band, path, txnum, section;
+
+ for (band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band)
+ for (path = 0; path < TX_PWR_BY_RATE_NUM_RF; ++path)
+ for (txnum = 0; txnum < TX_PWR_BY_RATE_NUM_RF; ++txnum)
+ for (section = 0;
+ section < TX_PWR_BY_RATE_NUM_SECTION;
+ ++section)
+ rtlphy->tx_power_by_rate_offset[band]
+ [path][txnum][section] = 0;
+}
+
+static void phy_set_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band,
+ u8 path, u8 rate_section,
+ u8 txnum, u8 value)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (path > RF90_PATH_D) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n",
+ path);
+ return;
+ }
+
+ if (band == BAND_ON_2_4G) {
+ switch (rate_section) {
+ case CCK:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value;
+ break;
+ case OFDM:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value;
+ break;
+ case HT_MCS0_MCS7:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value;
+ break;
+ case HT_MCS8_MCS15:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 2.4G, Rf Path"
+ " %d, %dTx in PHY_SetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
+ break;
+ };
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d in PHY_SetTxPowerByRateBase()\n",
+ band);
+ }
+}
+
+static u8 phy_get_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band, u8 path,
+ u8 txnum, u8 rate_section)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 value = 0;
+ if (path > RF90_PATH_D) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n",
+ path);
+ return 0;
+ }
+
+ if (band == BAND_ON_2_4G) {
+ switch (rate_section) {
+ case CCK:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0];
+ break;
+ case OFDM:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1];
+ break;
+ case HT_MCS0_MCS7:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2];
+ break;
+ case HT_MCS8_MCS15:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3];
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 2.4G, Rf Path"
+ " %d, %dTx in PHY_GetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
+ break;
+ };
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d in PHY_GetTxPowerByRateBase()\n",
+ band);
+ }
+
+ return value;
+}
+
+static void _rtl8723be_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u16 raw_value = 0;
+ u8 base = 0, path = 0;
+
+ for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) {
+ if (path == RF90_PATH_A) {
+ raw_value = (u16) (rtlphy->tx_power_by_rate_offset
+ [BAND_ON_2_4G][path][RF_1TX][3] >> 24) & 0xFF;
+ base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+ phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, CCK,
+ RF_1TX, base);
+ } else if (path == RF90_PATH_B) {
+ raw_value = (u16) (rtlphy->tx_power_by_rate_offset
+ [BAND_ON_2_4G][path][RF_1TX][3] >> 0) & 0xFF;
+ base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+ phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path,
+ CCK, RF_1TX, base);
+ }
+ raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [path][RF_1TX][1] >> 24) & 0xFF;
+ base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+ phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX,
+ base);
+
+ raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [path][RF_1TX][5] >> 24) & 0xFF;
+ base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+ phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7,
+ RF_1TX, base);
+
+ raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [path][RF_2TX][7] >> 24) & 0xFF;
+ base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+ phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path,
+ HT_MCS8_MCS15, RF_2TX, base);
+ }
+}
+
+static void phy_conv_dbm_to_rel(u32 *data, u8 start, u8 end, u8 base_val)
+{
+ char i = 0;
+ u8 temp_value = 0;
+ u32 temp_data = 0;
+
+ for (i = 3; i >= 0; --i) {
+ if (i >= start && i <= end) {
+ /* Get the exact value */
+ temp_value = (u8) (*data >> (i * 8)) & 0xF;
+ temp_value += ((u8) ((*data >> (i*8 + 4)) & 0xF)) * 10;
+
+ /* Change the value to a relative value */
+ temp_value = (temp_value > base_val) ?
+ temp_value - base_val :
+ base_val - temp_value;
+ } else {
+ temp_value = (u8) (*data >> (i * 8)) & 0xFF;
+ }
+ temp_data <<= 8;
+ temp_data |= temp_value;
+ }
+ *data = temp_data;
+}
+
+static void conv_dbm_to_rel(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 base = 0, rfpath = RF90_PATH_A;
+
+ base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
+ RF_1TX, CCK);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_1TX][2]), 1, 1, base);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_1TX][3]), 1, 3, base);
+
+ base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
+ RF_1TX, OFDM);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_1TX][0]), 0, 3, base);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_1TX][1]), 0, 3, base);
+
+ base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
+ RF_1TX, HT_MCS0_MCS7);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_1TX][4]), 0, 3, base);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_1TX][5]), 0, 3, base);
+
+ base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
+ RF_2TX, HT_MCS8_MCS15);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_2TX][6]), 0, 3, base);
+
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_2TX][7]), 0, 3, base);
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+ "<=== conv_dbm_to_rel()\n");
+}
+
+static void _rtl8723be_phy_txpower_by_rate_configuration(
+ struct ieee80211_hw *hw)
+{
+ _rtl8723be_phy_store_txpower_by_rate_base(hw);
+ conv_dbm_to_rel(hw);
+}
+
+static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ bool rtstatus;
+
+ rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
+ return false;
+ }
+ _rtl8723be_phy_init_tx_power_by_rate(hw);
+ if (!rtlefuse->autoload_failflag) {
+ rtlphy->pwrgroup_cnt = 0;
+ rtstatus = _rtl8723be_phy_config_bb_with_pgheaderfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ }
+ _rtl8723be_phy_txpower_by_rate_configuration(hw);
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
+ return false;
+ }
+ rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_AGC_TAB);
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+ return false;
+ }
+ rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2,
+ 0x200));
+ return true;
+}
+
+static void _rtl8723be_store_tx_power_by_rate(struct ieee80211_hw *hw,
+ u32 band, u32 rfpath,
+ u32 txnum, u32 regaddr,
+ u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 rate_section = _rtl8723be_get_rate_section_index(regaddr);
+
+ if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
+ RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR,
+ "Invalid Band %d\n", band);
+ return;
+ }
+
+ if (rfpath > TX_PWR_BY_RATE_NUM_RF) {
+ RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR,
+ "Invalid RfPath %d\n", rfpath);
+ return;
+ }
+ if (txnum > TX_PWR_BY_RATE_NUM_RF) {
+ RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR,
+ "Invalid TxNum %d\n", txnum);
+ return;
+ }
+ rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] =
+ data;
+}
+
+static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i;
+ u32 *phy_regarray_table_pg;
+ u16 phy_regarray_pg_len;
+ u32 v1 = 0, v2 = 0, v3 = 0, v4 = 0, v5 = 0, v6 = 0;
+
+ phy_regarray_pg_len = RTL8723BEPHY_REG_ARRAY_PGLEN;
+ phy_regarray_table_pg = RTL8723BEPHY_REG_ARRAY_PG;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_regarray_pg_len; i = i + 6) {
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ v4 = phy_regarray_table_pg[i+3];
+ v5 = phy_regarray_table_pg[i+4];
+ v6 = phy_regarray_table_pg[i+5];
+
+ if (v1 < 0xcdcdcdcd) {
+ if (phy_regarray_table_pg[i] == 0xfe ||
+ phy_regarray_table_pg[i] == 0xffe)
+ mdelay(50);
+ else
+ _rtl8723be_store_tx_power_by_rate(hw,
+ v1, v2, v3, v4, v5, v6);
+ continue;
+ } else {
+ /*don't need the hw_body*/
+ if (!_rtl8723be_check_condition(hw,
+ phy_regarray_table_pg[i])) {
+ i += 2; /* skip the pair of expression*/
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ while (v2 != 0xDEAD) {
+ i += 3;
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ }
+ }
+ }
+ }
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
+ }
+ return true;
+}
+
+bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath)
+{
+ #define READ_NEXT_RF_PAIR(v1, v2, i) \
+ do { \
+ i += 2; \
+ v1 = radioa_array_table[i]; \
+ v2 = radioa_array_table[i+1]; \
+ } while (0)
+
+ int i;
+ bool rtstatus = true;
+ u32 *radioa_array_table;
+ u16 radioa_arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 v1 = 0, v2 = 0;
+
+ radioa_arraylen = RTL8723BE_RADIOA_1TARRAYLEN;
+ radioa_array_table = RTL8723BE_RADIOA_1TARRAY;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Radio_A:RTL8723BE_RADIOA_1TARRAY %d\n", radioa_arraylen);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtstatus = true;
+ switch (rfpath) {
+ case RF90_PATH_A:
+ for (i = 0; i < radioa_arraylen; i = i + 2) {
+ v1 = radioa_array_table[i];
+ v2 = radioa_array_table[i+1];
+ if (v1 < 0xcdcdcdcd) {
+ _rtl8723be_config_rf_radio_a(hw, v1, v2);
+ } else { /*This line is the start line of branch.*/
+ if (!_rtl8723be_check_condition(hw,
+ radioa_array_table[i])) {
+ /* Discard the following
+ * (offset, data) pairs
+ */
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD &&
+ i < radioa_arraylen - 2)
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {
+ /* Configure matched pairs
+ * and skip to end of if-else.
+ */
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD &&
+ i < radioa_arraylen - 2) {
+ _rtl8723be_config_rf_radio_a(hw,
+ v1, v2);
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD &&
+ i < radioa_arraylen - 2) {
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ }
+ }
+ }
+ }
+
+ if (rtlhal->oem_id == RT_CID_819X_HP)
+ _rtl8723be_config_rf_radio_a(hw, 0x52, 0x7E4BD);
+
+ break;
+ case RF90_PATH_B:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ case RF90_PATH_C:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ case RF90_PATH_D:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+ return true;
+}
+
+void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->default_initialgain[0] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[1] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[2] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[3] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50 = 0x%x, "
+ "c58 = 0x%x, c60 = 0x%x, c68 = 0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
+
+ rtlphy->framesync = (u8) rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
+ MASKBYTE0);
+ rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
+ MASKDWORD);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
+}
+
+void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 txpwr_level;
+ long txpwr_dbm;
+
+ txpwr_level = rtlphy->cur_cck_txpwridx;
+ txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B,
+ txpwr_level);
+ txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+ if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) >
+ txpwr_dbm)
+ txpwr_dbm =
+ rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+ txpwr_level);
+ txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+ if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+ txpwr_level) > txpwr_dbm)
+ txpwr_dbm =
+ rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+ txpwr_level);
+ *powerlevel = txpwr_dbm;
+}
+
+static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path,
+ u8 rate)
+{
+ u8 rate_section = 0;
+
+ switch (rate) {
+ case DESC92C_RATE1M:
+ rate_section = 2;
+ break;
+ case DESC92C_RATE2M:
+ case DESC92C_RATE5_5M:
+ if (path == RF90_PATH_A)
+ rate_section = 3;
+ else if (path == RF90_PATH_B)
+ rate_section = 2;
+ break;
+ case DESC92C_RATE11M:
+ rate_section = 3;
+ break;
+ case DESC92C_RATE6M:
+ case DESC92C_RATE9M:
+ case DESC92C_RATE12M:
+ case DESC92C_RATE18M:
+ rate_section = 0;
+ break;
+ case DESC92C_RATE24M:
+ case DESC92C_RATE36M:
+ case DESC92C_RATE48M:
+ case DESC92C_RATE54M:
+ rate_section = 1;
+ break;
+ case DESC92C_RATEMCS0:
+ case DESC92C_RATEMCS1:
+ case DESC92C_RATEMCS2:
+ case DESC92C_RATEMCS3:
+ rate_section = 4;
+ break;
+ case DESC92C_RATEMCS4:
+ case DESC92C_RATEMCS5:
+ case DESC92C_RATEMCS6:
+ case DESC92C_RATEMCS7:
+ rate_section = 5;
+ break;
+ case DESC92C_RATEMCS8:
+ case DESC92C_RATEMCS9:
+ case DESC92C_RATEMCS10:
+ case DESC92C_RATEMCS11:
+ rate_section = 6;
+ break;
+ case DESC92C_RATEMCS12:
+ case DESC92C_RATEMCS13:
+ case DESC92C_RATEMCS14:
+ case DESC92C_RATEMCS15:
+ rate_section = 7;
+ break;
+ default:
+ RT_ASSERT(true, "Rate_Section is Illegal\n");
+ break;
+ }
+ return rate_section;
+}
+
+static u8 _rtl8723be_get_txpower_by_rate(struct ieee80211_hw *hw,
+ enum band_type band,
+ enum radio_path rfpath, u8 rate)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 shift = 0, rate_section, tx_num;
+ char tx_pwr_diff = 0;
+
+ rate_section = _rtl8723be_phy_get_ratesection_intxpower_byrate(rfpath,
+ rate);
+ tx_num = RF_TX_NUM_NONIMPLEMENT;
+
+ if (tx_num == RF_TX_NUM_NONIMPLEMENT) {
+ if (rate >= DESC92C_RATEMCS8 && rate <= DESC92C_RATEMCS15)
+ tx_num = RF_2TX;
+ else
+ tx_num = RF_1TX;
+ }
+
+ switch (rate) {
+ case DESC92C_RATE6M:
+ case DESC92C_RATE24M:
+ case DESC92C_RATEMCS0:
+ case DESC92C_RATEMCS4:
+ case DESC92C_RATEMCS8:
+ case DESC92C_RATEMCS12:
+ shift = 0;
+ break;
+ case DESC92C_RATE1M:
+ case DESC92C_RATE2M:
+ case DESC92C_RATE9M:
+ case DESC92C_RATE36M:
+ case DESC92C_RATEMCS1:
+ case DESC92C_RATEMCS5:
+ case DESC92C_RATEMCS9:
+ case DESC92C_RATEMCS13:
+ shift = 8;
+ break;
+ case DESC92C_RATE5_5M:
+ case DESC92C_RATE12M:
+ case DESC92C_RATE48M:
+ case DESC92C_RATEMCS2:
+ case DESC92C_RATEMCS6:
+ case DESC92C_RATEMCS10:
+ case DESC92C_RATEMCS14:
+ shift = 16;
+ break;
+ case DESC92C_RATE11M:
+ case DESC92C_RATE18M:
+ case DESC92C_RATE54M:
+ case DESC92C_RATEMCS3:
+ case DESC92C_RATEMCS7:
+ case DESC92C_RATEMCS11:
+ case DESC92C_RATEMCS15:
+ shift = 24;
+ break;
+ default:
+ RT_ASSERT(true, "Rate_Section is Illegal\n");
+ break;
+ }
+ tx_pwr_diff = (u8)(rtlphy->tx_power_by_rate_offset[band][rfpath][tx_num]
+ [rate_section] >> shift) & 0xff;
+
+ return tx_pwr_diff;
+}
+
+static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path,
+ u8 rate, u8 bandwidth, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 index = (channel - 1);
+ u8 txpower;
+ u8 power_diff_byrate = 0;
+
+ if (channel > 14 || channel < 1) {
+ index = 0;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Illegal channel!\n");
+ }
+ if (RTL8723E_RX_HAL_IS_CCK_RATE(rate))
+ txpower = rtlefuse->txpwrlevel_cck[path][index];
+ else if (DESC92C_RATE6M <= rate)
+ txpower = rtlefuse->txpwrlevel_ht40_1s[path][index];
+ else
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "invalid rate\n");
+
+ if (DESC92C_RATE6M <= rate && rate <= DESC92C_RATE54M &&
+ !RTL8723E_RX_HAL_IS_CCK_RATE(rate))
+ txpower += rtlefuse->txpwr_legacyhtdiff[0][TX_1S];
+
+ if (bandwidth == HT_CHANNEL_WIDTH_20) {
+ if (DESC92C_RATEMCS0 <= rate && rate <= DESC92C_RATEMCS15)
+ txpower += rtlefuse->txpwr_ht20diff[0][TX_1S];
+ if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15)
+ txpower += rtlefuse->txpwr_ht20diff[0][TX_2S];
+ } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
+ if (DESC92C_RATEMCS0 <= rate && rate <= DESC92C_RATEMCS15)
+ txpower += rtlefuse->txpwr_ht40diff[0][TX_1S];
+ if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15)
+ txpower += rtlefuse->txpwr_ht40diff[0][TX_2S];
+ }
+ if (rtlefuse->eeprom_regulatory != 2)
+ power_diff_byrate = _rtl8723be_get_txpower_by_rate(hw,
+ BAND_ON_2_4G,
+ path, rate);
+
+ txpower += power_diff_byrate;
+
+ if (txpower > MAX_POWER_INDEX)
+ txpower = MAX_POWER_INDEX;
+
+ return txpower;
+}
+
+static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw,
+ u8 power_index, u8 path, u8 rate)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if (path == RF90_PATH_A) {
+ switch (rate) {
+ case DESC92C_RATE1M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_CCK1_MCS32,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATE2M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATE5_5M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11,
+ MASKBYTE2, power_index);
+ break;
+ case DESC92C_RATE11M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11,
+ MASKBYTE3, power_index);
+ break;
+ case DESC92C_RATE6M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
+ MASKBYTE0, power_index);
+ break;
+ case DESC92C_RATE9M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATE12M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
+ MASKBYTE2, power_index);
+ break;
+ case DESC92C_RATE18M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
+ MASKBYTE3, power_index);
+ break;
+ case DESC92C_RATE24M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
+ MASKBYTE0, power_index);
+ break;
+ case DESC92C_RATE36M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATE48M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
+ MASKBYTE2, power_index);
+ break;
+ case DESC92C_RATE54M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
+ MASKBYTE3, power_index);
+ break;
+ case DESC92C_RATEMCS0:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
+ MASKBYTE0, power_index);
+ break;
+ case DESC92C_RATEMCS1:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATEMCS2:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
+ MASKBYTE2, power_index);
+ break;
+ case DESC92C_RATEMCS3:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
+ MASKBYTE3, power_index);
+ break;
+ case DESC92C_RATEMCS4:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
+ MASKBYTE0, power_index);
+ break;
+ case DESC92C_RATEMCS5:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATEMCS6:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
+ MASKBYTE2, power_index);
+ break;
+ case DESC92C_RATEMCS7:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
+ MASKBYTE3, power_index);
+ break;
+ case DESC92C_RATEMCS8:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
+ MASKBYTE0, power_index);
+ break;
+ case DESC92C_RATEMCS9:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATEMCS10:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
+ MASKBYTE2, power_index);
+ break;
+ case DESC92C_RATEMCS11:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
+ MASKBYTE3, power_index);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Invalid Rate!!\n");
+ break;
+ }
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n");
+ }
+}
+
+void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 cck_rates[] = {DESC92C_RATE1M, DESC92C_RATE2M,
+ DESC92C_RATE5_5M, DESC92C_RATE11M};
+ u8 ofdm_rates[] = {DESC92C_RATE6M, DESC92C_RATE9M,
+ DESC92C_RATE12M, DESC92C_RATE18M,
+ DESC92C_RATE24M, DESC92C_RATE36M,
+ DESC92C_RATE48M, DESC92C_RATE54M};
+ u8 ht_rates_1t[] = {DESC92C_RATEMCS0, DESC92C_RATEMCS1,
+ DESC92C_RATEMCS2, DESC92C_RATEMCS3,
+ DESC92C_RATEMCS4, DESC92C_RATEMCS5,
+ DESC92C_RATEMCS6, DESC92C_RATEMCS7};
+ u8 i, size;
+ u8 power_index;
+
+ if (!rtlefuse->txpwr_fromeprom)
+ return;
+
+ size = sizeof(cck_rates) / sizeof(u8);
+ for (i = 0; i < size; i++) {
+ power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
+ cck_rates[i],
+ rtl_priv(hw)->phy.current_chan_bw,
+ channel);
+ _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
+ cck_rates[i]);
+ }
+ size = sizeof(ofdm_rates) / sizeof(u8);
+ for (i = 0; i < size; i++) {
+ power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
+ ofdm_rates[i],
+ rtl_priv(hw)->phy.current_chan_bw,
+ channel);
+ _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
+ ofdm_rates[i]);
+ }
+ size = sizeof(ht_rates_1t) / sizeof(u8);
+ for (i = 0; i < size; i++) {
+ power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
+ ht_rates_1t[i],
+ rtl_priv(hw)->phy.current_chan_bw,
+ channel);
+ _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
+ ht_rates_1t[i]);
+ }
+}
+
+void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum io_type iotype;
+
+ if (!is_hal_stop(rtlhal)) {
+ switch (operation) {
+ case SCAN_OPT_BACKUP:
+ iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ case SCAN_OPT_RESTORE:
+ iotype = IO_CMD_RESUME_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Unknown Scan Backup operation.\n");
+ break;
+ }
+ }
+}
+
+void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 reg_bw_opmode;
+ u8 reg_prsr_rsc;
+
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
+
+ if (is_hal_stop(rtlhal)) {
+ rtlphy->set_bwmode_inprogress = false;
+ return;
+ }
+
+ reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
+ reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
+
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ reg_bw_opmode |= BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ reg_bw_opmode &= ~BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ reg_prsr_rsc = (reg_prsr_rsc & 0x90) |
+ (mac->cur_40_prime_sc << 5);
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ break;
+ }
+
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
+ (mac->cur_40_prime_sc >> 1));
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
+ rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
+ (mac->cur_40_prime_sc ==
+ HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ break;
+ }
+ rtl8723be_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+ rtlphy->set_bwmode_inprogress = false;
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
+}
+
+void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_bw = rtlphy->current_chan_bw;
+
+ if (rtlphy->set_bwmode_inprogress)
+ return;
+ rtlphy->set_bwmode_inprogress = true;
+ if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+ rtl8723be_phy_set_bw_mode_callback(hw);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "false driver sleep or unload\n");
+ rtlphy->set_bwmode_inprogress = false;
+ rtlphy->current_chan_bw = tmp_bw;
+ }
+}
+
+void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 delay;
+
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
+ if (is_hal_stop(rtlhal))
+ return;
+ do {
+ if (!rtlphy->sw_chnl_inprogress)
+ break;
+ if (!rtl8723be_phy_sw_chn_step_by_step(hw,
+ rtlphy->current_channel,
+ &rtlphy->sw_chnl_stage,
+ &rtlphy->sw_chnl_step,
+ &delay)) {
+ if (delay > 0)
+ mdelay(delay);
+ else
+ continue;
+ } else {
+ rtlphy->sw_chnl_inprogress = false;
+ }
+ break;
+ } while (true);
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+}
+
+u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (rtlphy->sw_chnl_inprogress)
+ return 0;
+ if (rtlphy->set_bwmode_inprogress)
+ return 0;
+ RT_ASSERT((rtlphy->current_channel <= 14),
+ "WIRELESS_MODE_G but channel>14");
+ rtlphy->sw_chnl_inprogress = true;
+ rtlphy->sw_chnl_stage = 0;
+ rtlphy->sw_chnl_step = 0;
+ if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+ rtl8723be_phy_sw_chnl_callback(hw);
+ RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false schdule "
+ "workitem current channel %d\n",
+ rtlphy->current_channel);
+ rtlphy->sw_chnl_inprogress = false;
+ } else {
+ RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or"
+ " unload\n");
+ rtlphy->sw_chnl_inprogress = false;
+ }
+ return 1;
+}
+
+static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw,
+ u8 channel, u8 *stage,
+ u8 *step, u32 *delay)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
+ u32 precommoncmdcnt;
+ struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
+ u32 postcommoncmdcnt;
+ struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
+ u32 rfdependcmdcnt;
+ struct swchnlcmd *currentcmd = NULL;
+ u8 rfpath;
+ u8 num_total_rfpath = rtlphy->num_total_rfpath;
+
+ precommoncmdcnt = 0;
+ rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT,
+ CMDID_SET_TXPOWEROWER_LEVEL,
+ 0, 0, 0);
+ rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
+ postcommoncmdcnt = 0;
+ rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
+ MAX_POSTCMD_CNT, CMDID_END,
+ 0, 0, 0);
+ rfdependcmdcnt = 0;
+
+ RT_ASSERT((channel >= 1 && channel <= 14),
+ "illegal channel for Zebra: %d\n", channel);
+
+ rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT,
+ CMDID_RF_WRITEREG,
+ RF_CHNLBW, channel, 10);
+
+ rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT,
+ CMDID_END, 0, 0, 0);
+
+ do {
+ switch (*stage) {
+ case 0:
+ currentcmd = &precommoncmd[*step];
+ break;
+ case 1:
+ currentcmd = &rfdependcmd[*step];
+ break;
+ case 2:
+ currentcmd = &postcommoncmd[*step];
+ break;
+ }
+
+ if (currentcmd->cmdid == CMDID_END) {
+ if ((*stage) == 2) {
+ return true;
+ } else {
+ (*stage)++;
+ (*step) = 0;
+ continue;
+ }
+ }
+
+ switch (currentcmd->cmdid) {
+ case CMDID_SET_TXPOWEROWER_LEVEL:
+ rtl8723be_phy_set_txpower_level(hw, channel);
+ break;
+ case CMDID_WRITEPORT_ULONG:
+ rtl_write_dword(rtlpriv, currentcmd->para1,
+ currentcmd->para2);
+ break;
+ case CMDID_WRITEPORT_USHORT:
+ rtl_write_word(rtlpriv, currentcmd->para1,
+ (u16) currentcmd->para2);
+ break;
+ case CMDID_WRITEPORT_UCHAR:
+ rtl_write_byte(rtlpriv, currentcmd->para1,
+ (u8) currentcmd->para2);
+ break;
+ case CMDID_RF_WRITEREG:
+ for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] =
+ ((rtlphy->rfreg_chnlval[rfpath] &
+ 0xfffffc00) | currentcmd->para2);
+
+ rtl_set_rfreg(hw, (enum radio_path)rfpath,
+ currentcmd->para1,
+ RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[rfpath]);
+ }
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+
+ break;
+ } while (true);
+
+ (*delay) = currentcmd->msdelay;
+ (*step)++;
+ return false;
+}
+
+static u8 _rtl8723be_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
+{
+ u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
+ u8 result = 0x00;
+
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1c);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x30008c1c);
+ rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x8214032a);
+ rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x28160000);
+
+ rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
+
+ mdelay(IQK_DELAY_TIME);
+
+ reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+ reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
+ reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
+ reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
+
+ if (!(reg_eac & BIT(28)) &&
+ (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+ (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ return result;
+}
+
+static bool phy_similarity_cmp(struct ieee80211_hw *hw, long result[][8],
+ u8 c1, u8 c2)
+{
+ u32 i, j, diff, simularity_bitmap, bound;
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ u8 final_candidate[2] = { 0xFF, 0xFF };
+ bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
+
+ if (is2t)
+ bound = 8;
+ else
+ bound = 4;
+
+ simularity_bitmap = 0;
+
+ for (i = 0; i < bound; i++) {
+ diff = (result[c1][i] > result[c2][i]) ?
+ (result[c1][i] - result[c2][i]) :
+ (result[c2][i] - result[c1][i]);
+
+ if (diff > MAX_TOLERANCE) {
+ if ((i == 2 || i == 6) && !simularity_bitmap) {
+ if (result[c1][i] + result[c1][i + 1] == 0)
+ final_candidate[(i / 4)] = c2;
+ else if (result[c2][i] + result[c2][i + 1] == 0)
+ final_candidate[(i / 4)] = c1;
+ else
+ simularity_bitmap |= (1 << i);
+ } else {
+ simularity_bitmap |= (1 << i);
+ }
+ }
+ }
+
+ if (simularity_bitmap == 0) {
+ for (i = 0; i < (bound / 4); i++) {
+ if (final_candidate[i] != 0xFF) {
+ for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+ result[3][j] =
+ result[final_candidate[i]][j];
+ bresult = false;
+ }
+ }
+ return bresult;
+ } else if (!(simularity_bitmap & 0x0F)) {
+ for (i = 0; i < 4; i++)
+ result[3][i] = result[c1][i];
+ return false;
+ } else if (!(simularity_bitmap & 0xF0) && is2t) {
+ for (i = 4; i < 8; i++)
+ result[3][i] = result[c1][i];
+ return false;
+ } else {
+ return false;
+ }
+}
+
+static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
+ long result[][8], u8 t, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 i;
+ u8 patha_ok;
+ u32 adda_reg[IQK_ADDA_REG_NUM] = {
+ 0x85c, 0xe6c, 0xe70, 0xe74,
+ 0xe78, 0xe7c, 0xe80, 0xe84,
+ 0xe88, 0xe8c, 0xed0, 0xed4,
+ 0xed8, 0xedc, 0xee0, 0xeec
+ };
+
+ u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+ 0x522, 0x550, 0x551, 0x040
+ };
+ u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
+ ROFDM0_TRXPATHENABLE, ROFDM0_TRMUXPAR,
+ RFPGA0_XCD_RFINTERFACESW, 0xb68, 0xb6c,
+ 0x870, 0x860,
+ 0x864, 0x800
+ };
+ const u32 retrycount = 2;
+ u32 path_sel_bb, path_sel_rf;
+ u8 tmp_reg_c50, tmp_reg_c58;
+
+ tmp_reg_c50 = rtl_get_bbreg(hw, 0xc50, MASKBYTE0);
+ tmp_reg_c58 = rtl_get_bbreg(hw, 0xc58, MASKBYTE0);
+
+ if (t == 0) {
+ rtl8723_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup, 16);
+ rtl8723_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ rtl8723_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ }
+ rtl8723_phy_path_adda_on(hw, adda_reg, true, is2t);
+ if (t == 0) {
+ rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ }
+ if (!rtlphy->rfpi_enable)
+ rtl8723_phy_pi_mode_switch(hw, true);
+
+ path_sel_bb = rtl_get_bbreg(hw, 0x948, MASKDWORD);
+ path_sel_rf = rtl_get_rfreg(hw, RF90_PATH_A, 0xb0, 0xfffff);
+
+ /*BB Setting*/
+ rtl_set_bbreg(hw, 0x800, BIT(24), 0x00);
+ rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
+
+ rtl_set_bbreg(hw, 0x870, BIT(10), 0x01);
+ rtl_set_bbreg(hw, 0x870, BIT(26), 0x01);
+ rtl_set_bbreg(hw, 0x860, BIT(10), 0x00);
+ rtl_set_bbreg(hw, 0x864, BIT(10), 0x00);
+
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASKDWORD, 0x10000);
+ rtl8723_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
+
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+ rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
+ rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x81004800);
+ for (i = 0; i < retrycount; i++) {
+ patha_ok = _rtl8723be_phy_path_a_iqk(hw, is2t);
+ if (patha_ok == 0x01) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A Tx IQK Success!!\n");
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ break;
+ }
+ }
+
+ if (0 == patha_ok)
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A IQK Success!!\n");
+ if (is2t) {
+ rtl8723_phy_path_a_standby(hw);
+ rtl8723_phy_path_adda_on(hw, adda_reg, false, is2t);
+ }
+
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
+
+ if (t != 0) {
+ if (!rtlphy->rfpi_enable)
+ rtl8723_phy_pi_mode_switch(hw, false);
+ rtl8723_phy_reload_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup, 16);
+ rtl8723_phy_reload_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+
+ rtl_set_bbreg(hw, 0x948, MASKDWORD, path_sel_bb);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0xb0, 0xfffff, path_sel_rf);
+
+ rtl_set_bbreg(hw, 0xc50, MASKBYTE0, 0x50);
+ rtl_set_bbreg(hw, 0xc50, MASKBYTE0, tmp_reg_c50);
+ if (is2t) {
+ rtl_set_bbreg(hw, 0xc58, MASKBYTE0, 0x50);
+ rtl_set_bbreg(hw, 0xc58, MASKBYTE0, tmp_reg_c58);
+ }
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "8723be IQK Finish!!\n");
+}
+
+static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmpreg;
+ u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
+
+ tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+
+ if ((tmpreg & 0x70) != 0)
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+ else
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+ if ((tmpreg & 0x70) != 0) {
+ rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
+
+ if (is2t)
+ rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
+ MASK12BITS);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
+ (rf_a_mode & 0x8FFFF) | 0x10000);
+
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+ (rf_b_mode & 0x8FFFF) | 0x10000);
+ }
+ lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdfbe0);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, 0x8c0a);
+
+ mdelay(100);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdffe0);
+
+ if ((tmpreg & 0x70) != 0) {
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
+
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00,
+ MASK12BITS, rf_b_mode);
+ } else {
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+}
+
+static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw,
+ bool bmain, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+
+ if (is_hal_stop(rtlhal)) {
+ u8 u1btmp;
+ u1btmp = rtl_read_byte(rtlpriv, REG_LEDCFG0);
+ rtl_write_byte(rtlpriv, REG_LEDCFG0, u1btmp | BIT(7));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+ }
+ if (is2t) {
+ if (bmain)
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(5) | BIT(6), 0x1);
+ else
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(5) | BIT(6), 0x2);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BIT(8) | BIT(9), 0);
+ rtl_set_bbreg(hw, 0x914, MASKLWORD, 0x0201);
+
+ /* We use the RF definition of MAIN and AUX,
+ * left antenna and right antenna repectively.
+ * Default output at AUX.
+ */
+ if (bmain) {
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
+ BIT(14) | BIT(13) | BIT(12), 0);
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(5) | BIT(4) | BIT(3), 0);
+ if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
+ rtl_set_bbreg(hw, CONFIG_RAM64X16, BIT(31), 0);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
+ BIT(14) | BIT(13) | BIT(12), 1);
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(5) | BIT(4) | BIT(3), 1);
+ if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
+ rtl_set_bbreg(hw, CONFIG_RAM64X16, BIT(31), 1);
+ }
+ }
+}
+
+#undef IQK_ADDA_REG_NUM
+#undef IQK_DELAY_TIME
+
+void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ long result[4][8];
+ u8 i, final_candidate;
+ bool patha_ok, pathb_ok;
+ long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
+ reg_ecc, reg_tmp = 0;
+ bool is12simular, is13simular, is23simular;
+ u32 iqk_bb_reg[9] = {
+ ROFDM0_XARXIQIMBALANCE,
+ ROFDM0_XBRXIQIMBALANCE,
+ ROFDM0_ECCATHRESHOLD,
+ ROFDM0_AGCRSSITABLE,
+ ROFDM0_XATXIQIMBALANCE,
+ ROFDM0_XBTXIQIMBALANCE,
+ ROFDM0_XCTXAFE,
+ ROFDM0_XDTXAFE,
+ ROFDM0_RXIQEXTANTA
+ };
+
+ if (recovery) {
+ rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup, 9);
+ return;
+ }
+
+ for (i = 0; i < 8; i++) {
+ result[0][i] = 0;
+ result[1][i] = 0;
+ result[2][i] = 0;
+ result[3][i] = 0;
+ }
+ final_candidate = 0xff;
+ patha_ok = false;
+ pathb_ok = false;
+ is12simular = false;
+ is23simular = false;
+ is13simular = false;
+ for (i = 0; i < 3; i++) {
+ if (get_rf_type(rtlphy) == RF_2T2R)
+ _rtl8723be_phy_iq_calibrate(hw, result, i, true);
+ else
+ _rtl8723be_phy_iq_calibrate(hw, result, i, false);
+ if (i == 1) {
+ is12simular = phy_similarity_cmp(hw, result, 0, 1);
+ if (is12simular) {
+ final_candidate = 0;
+ break;
+ }
+ }
+ if (i == 2) {
+ is13simular = phy_similarity_cmp(hw, result, 0, 2);
+ if (is13simular) {
+ final_candidate = 0;
+ break;
+ }
+ is23simular = phy_similarity_cmp(hw, result, 1, 2);
+ if (is23simular) {
+ final_candidate = 1;
+ } else {
+ for (i = 0; i < 8; i++)
+ reg_tmp += result[3][i];
+
+ if (reg_tmp != 0)
+ final_candidate = 3;
+ else
+ final_candidate = 0xFF;
+ }
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ reg_e94 = result[i][0];
+ reg_e9c = result[i][1];
+ reg_ea4 = result[i][2];
+ reg_eac = result[i][3];
+ reg_eb4 = result[i][4];
+ reg_ebc = result[i][5];
+ reg_ec4 = result[i][6];
+ reg_ecc = result[i][7];
+ }
+ if (final_candidate != 0xff) {
+ reg_e94 = result[final_candidate][0];
+ rtlphy->reg_e94 = reg_e94;
+ reg_e9c = result[final_candidate][1];
+ rtlphy->reg_e9c = reg_e9c;
+ reg_ea4 = result[final_candidate][2];
+ reg_eac = result[final_candidate][3];
+ reg_eb4 = result[final_candidate][4];
+ rtlphy->reg_eb4 = reg_eb4;
+ reg_ebc = result[final_candidate][5];
+ rtlphy->reg_ebc = reg_ebc;
+ reg_ec4 = result[final_candidate][6];
+ reg_ecc = result[final_candidate][7];
+ patha_ok = true;
+ pathb_ok = true;
+ } else {
+ rtlphy->reg_e94 = 0x100;
+ rtlphy->reg_eb4 = 0x100;
+ rtlphy->reg_e9c = 0x0;
+ rtlphy->reg_ebc = 0x0;
+ }
+ if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
+ rtl8723_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
+ final_candidate,
+ (reg_ea4 == 0));
+ if (final_candidate != 0xFF) {
+ for (i = 0; i < IQK_MATRIX_REG_NUM; i++)
+ rtlphy->iqk_matrix[0].value[0][i] =
+ result[final_candidate][i];
+ rtlphy->iqk_matrix[0].iqk_done = true;
+ }
+ rtl8723_save_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 9);
+}
+
+void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u32 timeout = 2000, timecount = 0;
+
+ while (rtlpriv->mac80211.act_scanning && timecount < timeout) {
+ udelay(50);
+ timecount += 50;
+ }
+
+ rtlphy->lck_inprogress = true;
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ "LCK:Start!!! currentband %x delay %d ms\n",
+ rtlhal->current_bandtype, timecount);
+
+ _rtl8723be_phy_lc_calibrate(hw, false);
+
+ rtlphy->lck_inprogress = false;
+}
+
+void rtl23b_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (rtlphy->apk_done)
+ return;
+
+ return;
+}
+
+void rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
+{
+ _rtl8723be_phy_set_rfpath_switch(hw, bmain, false);
+}
+
+static void rtl8723be_phy_set_io(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ switch (rtlphy->current_io_type) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ rtlpriv->dm_digtable.cur_igvalue =
+ rtlphy->initgain_backup.xaagccore1;
+ /*rtl92c_dm_write_dig(hw);*/
+ rtl8723be_phy_set_txpower_level(hw, rtlphy->current_channel);
+ rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x83);
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ rtlphy->initgain_backup.xaagccore1 =
+ rtlpriv->dm_digtable.cur_igvalue;
+ rtlpriv->dm_digtable.cur_igvalue = 0x17;
+ rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+ rtlphy->set_io_inprogress = false;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
+}
+
+bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool postprocessing = false;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
+ do {
+ switch (iotype) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
+ postprocessing = true;
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
+ postprocessing = true;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+ } while (false);
+ if (postprocessing && !rtlphy->set_io_inprogress) {
+ rtlphy->set_io_inprogress = true;
+ rtlphy->current_io_type = iotype;
+ } else {
+ return false;
+ }
+ rtl8723be_phy_set_io(hw);
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ return true;
+}
+
+static void rtl8723be_phy_set_rf_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+
+static void _rtl8723be_phy_set_rf_sleep(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+
+static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool bresult = true;
+ u8 i, queue_id;
+ struct rtl8192_tx_ring *ring = NULL;
+
+ switch (rfpwr_state) {
+ case ERFON:
+ if ((ppsc->rfpwr_state == ERFOFF) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+ bool rtstatus;
+ u32 initialize_count = 0;
+ do {
+ initialize_count++;
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
+ rtstatus = rtl_ps_enable_nic(hw);
+ } while (!rtstatus && (initialize_count < 10));
+ RT_CLEAR_PS_LEVEL(ppsc,
+ RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON sleeped:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
+ ppsc->last_awake_jiffies = jiffies;
+ rtl8723be_phy_set_rf_on(hw);
+ }
+ if (mac->link_state == MAC80211_LINKED)
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK);
+ else
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK);
+ break;
+ case ERFOFF:
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] =%d before "
+ "doze!\n", (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
+
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
+ break;
+ }
+ }
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
+ rtl_ps_disable_nic(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ } else {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_POWER_OFF);
+ }
+ }
+ break;
+ case ERFSLEEP:
+ if (ppsc->rfpwr_state == ERFOFF)
+ break;
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] =%d before "
+ "doze!\n", (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
+
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
+ break;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
+ ppsc->last_sleep_jiffies = jiffies;
+ _rtl8723be_phy_set_rf_sleep(hw);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ bresult = false;
+ break;
+ }
+ if (bresult)
+ ppsc->rfpwr_state = rfpwr_state;
+ return bresult;
+}
+
+bool rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ bool bresult = false;
+
+ if (rfpwr_state == ppsc->rfpwr_state)
+ return bresult;
+ bresult = _rtl8723be_phy_set_rf_power_state(hw, rfpwr_state);
+ return bresult;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
new file mode 100644
index 000000000000..444ef95bb6af
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
@@ -0,0 +1,217 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_PHY_H__
+#define __RTL8723BE_PHY_H__
+
+/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/
+#define MAX_TX_COUNT 4
+#define TX_1S 0
+#define TX_2S 1
+
+#define MAX_POWER_INDEX 0x3F
+
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+#define RT_CANNOT_IO(hw) false
+#define HIGHPOWER_RADIOA_ARRAYLEN 22
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM 9
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 10
+#define index_mapping_NUM 15
+
+#define APK_BB_REG_NUM 5
+#define APK_AFE_REG_NUM 16
+#define APK_CURVE_REG_NUM 4
+#define PATH_NUM 1
+
+#define LOOP_LIMIT 5
+#define MAX_STALL_TIME 50
+#define ANTENNADIVERSITYVALUE 0x80
+#define MAX_TXPWR_IDX_NMODE_92S 63
+#define RESET_CNT_LIMIT 3
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_MAC_REG_NUM 4
+
+#define RF6052_MAX_PATH 2
+
+#define CT_OFFSET_MAC_ADDR 0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
+
+#define CT_OFFSET_CHANNEL_PLAH 0x75
+#define CT_OFFSET_THERMAL_METER 0x78
+#define CT_OFFSET_RF_OPTION 0x79
+#define CT_OFFSET_VERSION 0x7E
+#define CT_OFFSET_CUSTOMER_ID 0x7F
+
+#define RTL92C_MAX_PATH_NUM 2
+
+enum hw90_block_e {
+ HW90_BLOCK_MAC = 0,
+ HW90_BLOCK_PHY0 = 1,
+ HW90_BLOCK_PHY1 = 2,
+ HW90_BLOCK_RF = 3,
+ HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum baseband_config_type {
+ BASEBAND_CONFIG_PHY_REG = 0,
+ BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum ra_offset_area {
+ RA_OFFSET_LEGACY_OFDM1,
+ RA_OFFSET_LEGACY_OFDM2,
+ RA_OFFSET_HT_OFDM1,
+ RA_OFFSET_HT_OFDM2,
+ RA_OFFSET_HT_OFDM3,
+ RA_OFFSET_HT_OFDM4,
+ RA_OFFSET_HT_CCK,
+};
+
+enum antenna_path {
+ ANTENNA_NONE,
+ ANTENNA_D,
+ ANTENNA_C,
+ ANTENNA_CD,
+ ANTENNA_B,
+ ANTENNA_BD,
+ ANTENNA_BC,
+ ANTENNA_BCD,
+ ANTENNA_A,
+ ANTENNA_AD,
+ ANTENNA_AC,
+ ANTENNA_ACD,
+ ANTENNA_AB,
+ ANTENNA_ABD,
+ ANTENNA_ABC,
+ ANTENNA_ABCD
+};
+
+struct r_antenna_select_ofdm {
+ u32 r_tx_antenna:4;
+ u32 r_ant_l:4;
+ u32 r_ant_non_ht:4;
+ u32 r_ant_ht1:4;
+ u32 r_ant_ht2:4;
+ u32 r_ant_ht_s1:4;
+ u32 r_ant_non_ht_s1:4;
+ u32 ofdm_txsc:2;
+ u32 reserved:2;
+};
+
+struct r_antenna_select_cck {
+ u8 r_cckrx_enable_2:2;
+ u8 r_cckrx_enable:2;
+ u8 r_ccktx_enable:4;
+};
+
+
+struct efuse_contents {
+ u8 mac_addr[ETH_ALEN];
+ u8 cck_tx_power_idx[6];
+ u8 ht40_1s_tx_power_idx[6];
+ u8 ht40_2s_tx_power_idx_diff[3];
+ u8 ht20_tx_power_idx_diff[3];
+ u8 ofdm_tx_power_idx_diff[3];
+ u8 ht40_max_power_offset[3];
+ u8 ht20_max_power_offset[3];
+ u8 channel_plan;
+ u8 thermal_meter;
+ u8 rf_option[5];
+ u8 version;
+ u8 oem_id;
+ u8 regulatory;
+};
+
+struct tx_power_struct {
+ u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_txpowerdiff;
+ u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 pwrgroup_cnt;
+ u32 mcs_original_offset[4][16];
+};
+
+enum _ANT_DIV_TYPE {
+ NO_ANTDIV = 0xFF,
+ CG_TRX_HW_ANTDIV = 0x01,
+ CGCS_RX_HW_ANTDIV = 0x02,
+ FIXED_HW_ANTDIV = 0x03,
+ CG_TRX_SMART_ANTDIV = 0x04,
+ CGCS_RX_SW_ANTDIV = 0x05,
+};
+
+u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 regaddr, u32 bitmask);
+void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
+bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw);
+void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw,
+ u8 channel);
+void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw,
+ u8 operation);
+void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
+ bool b_recovery);
+void rtl23b_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
+bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+bool rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c
new file mode 100644
index 000000000000..b5167e73fecf
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c
@@ -0,0 +1,106 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseqcmd.h"
+#include "pwrseq.h"
+
+
+/* drivers should parse below arrays and do the corresponding actions */
+/*3 Power on Array*/
+struct wlan_pwr_cfg rtl8723B_power_on_flow[RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_CARDEMU_TO_ACT
+ RTL8723B_TRANS_END
+};
+
+/*3Radio off GPIO Array */
+struct wlan_pwr_cfg rtl8723B_radio_off_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_ACT_TO_CARDEMU
+ RTL8723B_TRANS_END
+};
+
+/*3Card Disable Array*/
+struct wlan_pwr_cfg rtl8723B_card_disable_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_ACT_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_CARDDIS
+ RTL8723B_TRANS_END
+};
+
+/*3 Card Enable Array*/
+struct wlan_pwr_cfg rtl8723B_card_enable_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_CARDDIS_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_ACT
+ RTL8723B_TRANS_END
+};
+
+/*3Suspend Array*/
+struct wlan_pwr_cfg rtl8723B_suspend_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_ACT_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_SUS
+ RTL8723B_TRANS_END
+};
+
+/*3 Resume Array*/
+struct wlan_pwr_cfg rtl8723B_resume_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_SUS_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_ACT
+ RTL8723B_TRANS_END
+};
+
+/*3HWPDN Array*/
+struct wlan_pwr_cfg rtl8723B_hwpdn_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_ACT_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_PDN
+ RTL8723B_TRANS_END
+};
+
+/*3 Enter LPS */
+struct wlan_pwr_cfg rtl8723B_enter_lps_flow[RTL8723B_TRANS_ACT_TO_LPS_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ /*FW behavior*/
+ RTL8723B_TRANS_ACT_TO_LPS
+ RTL8723B_TRANS_END
+};
+
+/*3 Leave LPS */
+struct wlan_pwr_cfg rtl8723B_leave_lps_flow[RTL8723B_TRANS_LPS_TO_ACT_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ /*FW behavior*/
+ RTL8723B_TRANS_LPS_TO_ACT
+ RTL8723B_TRANS_END
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h
new file mode 100644
index 000000000000..a62f43ed8d32
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h
@@ -0,0 +1,304 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_PWRSEQ_H__
+#define __RTL8723BE_PWRSEQ_H__
+
+/* Check document WM-20130425-JackieLau-RTL8723B_Power_Architecture v05.vsd
+ * There are 6 HW Power States:
+ * 0: POFF--Power Off
+ * 1: PDN--Power Down
+ * 2: CARDEMU--Card Emulation
+ * 3: ACT--Active Mode
+ * 4: LPS--Low Power State
+ * 5: SUS--Suspend
+ *
+ * The transition from different states are defined below
+ * TRANS_CARDEMU_TO_ACT
+ * TRANS_ACT_TO_CARDEMU
+ * TRANS_CARDEMU_TO_SUS
+ * TRANS_SUS_TO_CARDEMU
+ * TRANS_CARDEMU_TO_PDN
+ * TRANS_ACT_TO_LPS
+ * TRANS_LPS_TO_ACT
+ *
+ * TRANS_END
+ */
+#define RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS 23
+#define RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS 15
+#define RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS 15
+#define RTL8723B_TRANS_SUS_TO_CARDEMU_STEPS 15
+#define RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS 15
+#define RTL8723B_TRANS_PDN_TO_CARDEMU_STEPS 15
+#define RTL8723B_TRANS_ACT_TO_LPS_STEPS 15
+#define RTL8723B_TRANS_LPS_TO_ACT_STEPS 15
+#define RTL8723B_TRANS_END_STEPS 1
+
+#define RTL8723B_TRANS_CARDEMU_TO_ACT \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
+ {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS}, \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)|BIT(2)), 0}, \
+ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , 0}, \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
+ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , BIT(0)}, \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}, \
+ {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), BIT(6)}, \
+ {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
+ {0x0063, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
+ {0x0062, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
+ {0x0058, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x005A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
+ {0x0068, PWR_CUT_TESTCHIP_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3), BIT(3)}, \
+ {0x0069, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), BIT(6)},
+
+#define RTL8723B_TRANS_ACT_TO_CARDEMU \
+ {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, \
+ {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, \
+ {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), 0}, \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
+ PWR_CMD_WRITE, BIT(5), BIT(5)}, \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
+ PWR_CMD_WRITE, BIT(0), 0},
+
+#define RTL8723B_TRANS_CARDEMU_TO_SUS \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4) | BIT(3), (BIT(4) | BIT(3))}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
+ PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) | BIT(4)},\
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0},
+
+#define RTL8723B_TRANS_SUS_TO_CARDEMU \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0},
+
+#define RTL8723B_TRANS_CARDEMU_TO_CARDDIS \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), BIT(2)}, \
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 1}, \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0},
+
+#define RTL8723B_TRANS_CARDDIS_TO_CARDEMU \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},
+
+#define RTL8723B_TRANS_CARDEMU_TO_PDN \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_SDIO_MSK | PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, \
+ PWR_CMD_WRITE, 0xFF, 0x20}, \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)},
+
+#define RTL8723B_TRANS_PDN_TO_CARDEMU \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},
+
+#define RTL8723B_TRANS_ACT_TO_LPS \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
+ {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
+ {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
+ {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
+ {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x03}, \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
+ {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00}, \
+ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)},
+
+#define RTL8723B_TRANS_LPS_TO_ACT \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, \
+ {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \
+ {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, \
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
+ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, \
+ {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0}, \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1) | BIT(0), BIT(1) | BIT(0)}, \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},
+
+#define RTL8723B_TRANS_END \
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, \
+ PWR_CMD_END, 0, 0},
+
+extern struct wlan_pwr_cfg rtl8723B_power_on_flow
+ [RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_radio_off_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_card_disable_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_card_enable_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_suspend_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_resume_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_hwpdn_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_enter_lps_flow
+ [RTL8723B_TRANS_ACT_TO_LPS_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_leave_lps_flow
+ [RTL8723B_TRANS_LPS_TO_ACT_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+
+/* RTL8723 Power Configuration CMDs for PCIe interface */
+#define RTL8723_NIC_PWR_ON_FLOW rtl8723B_power_on_flow
+#define RTL8723_NIC_RF_OFF_FLOW rtl8723B_radio_off_flow
+#define RTL8723_NIC_DISABLE_FLOW rtl8723B_card_disable_flow
+#define RTL8723_NIC_ENABLE_FLOW rtl8723B_card_enable_flow
+#define RTL8723_NIC_SUSPEND_FLOW rtl8723B_suspend_flow
+#define RTL8723_NIC_RESUME_FLOW rtl8723B_resume_flow
+#define RTL8723_NIC_PDN_FLOW rtl8723B_hwpdn_flow
+#define RTL8723_NIC_LPS_ENTER_FLOW rtl8723B_enter_lps_flow
+#define RTL8723_NIC_LPS_LEAVE_FLOW rtl8723B_leave_lps_flow
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c
new file mode 100644
index 000000000000..e4a507a756fb
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseq.h"
+
+/* Description:
+ * This routine deal with the Power Configuration CMDs
+ * parsing for RTL8723/RTL8188E Series IC.
+ * Assumption:
+ * We should follow specific format which was released from HW SD.
+ *
+ * 2011.07.07, added by Roger.
+ */
+bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+ u8 fab_version, u8 interface_type,
+ struct wlan_pwr_cfg pwrcfgcmd[])
+
+{
+ struct wlan_pwr_cfg pwr_cfg_cmd = {0};
+ bool b_polling_bit = false;
+ u32 ary_idx = 0;
+ u8 value = 0;
+ u32 offset = 0;
+ u32 polling_count = 0;
+ u32 max_polling_cnt = 5000;
+
+ do {
+ pwr_cfg_cmd = pwrcfgcmd[ary_idx];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "offset(%#x),cut_msk(%#x), fab_msk(%#x),"
+ "interface_msk(%#x), base(%#x), "
+ "cmd(%#x), msk(%#x), value(%#x)\n",
+ GET_PWR_CFG_OFFSET(pwr_cfg_cmd),
+ GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd),
+ GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd),
+ GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd),
+ GET_PWR_CFG_BASE(pwr_cfg_cmd),
+ GET_PWR_CFG_CMD(pwr_cfg_cmd),
+ GET_PWR_CFG_MASK(pwr_cfg_cmd),
+ GET_PWR_CFG_VALUE(pwr_cfg_cmd));
+
+ if ((GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd)&fab_version) &&
+ (GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd)&cut_version) &&
+ (GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd)&interface_type)) {
+ switch (GET_PWR_CFG_CMD(pwr_cfg_cmd)) {
+ case PWR_CMD_READ:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "PWR_CMD_READ\n");
+ break;
+ case PWR_CMD_WRITE:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "PWR_CMD_WRITE\n");
+ offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
+
+ /*Read the value from system register*/
+ value = rtl_read_byte(rtlpriv, offset);
+ value &= (~(GET_PWR_CFG_MASK(pwr_cfg_cmd)));
+ value = value | (GET_PWR_CFG_VALUE(pwr_cfg_cmd)
+ & GET_PWR_CFG_MASK(pwr_cfg_cmd));
+
+ /*Write the value back to sytem register*/
+ rtl_write_byte(rtlpriv, offset, value);
+ break;
+ case PWR_CMD_POLLING:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "PWR_CMD_POLLING\n");
+ b_polling_bit = false;
+ offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
+
+ do {
+ value = rtl_read_byte(rtlpriv, offset);
+
+ value &= GET_PWR_CFG_MASK(pwr_cfg_cmd);
+ if (value ==
+ (GET_PWR_CFG_VALUE(pwr_cfg_cmd) &
+ GET_PWR_CFG_MASK(pwr_cfg_cmd)))
+ b_polling_bit = true;
+ else
+ udelay(10);
+
+ if (polling_count++ > max_polling_cnt)
+ return false;
+
+ } while (!b_polling_bit);
+ break;
+ case PWR_CMD_DELAY:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "PWR_CMD_DELAY\n");
+ if (GET_PWR_CFG_VALUE(pwr_cfg_cmd) ==
+ PWRSEQ_DELAY_US)
+ udelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
+ else
+ mdelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
+ break;
+ case PWR_CMD_END:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "PWR_CMD_END\n");
+ return true;
+ break;
+ default:
+ RT_ASSERT(false,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "Unknown CMD!!\n");
+ break;
+ }
+ }
+
+ ary_idx++;
+ } while (1);
+
+ return true;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h
new file mode 100644
index 000000000000..ce14a3b5cb71
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h
@@ -0,0 +1,95 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_PWRSEQCMD_H__
+#define __RTL8723BE_PWRSEQCMD_H__
+
+#include "../wifi.h"
+/*---------------------------------------------*/
+/*The value of cmd: 4 bits */
+/*---------------------------------------------*/
+#define PWR_CMD_READ 0x00
+#define PWR_CMD_WRITE 0x01
+#define PWR_CMD_POLLING 0x02
+#define PWR_CMD_DELAY 0x03
+#define PWR_CMD_END 0x04
+
+/* define the base address of each block */
+#define PWR_BASEADDR_MAC 0x00
+#define PWR_BASEADDR_USB 0x01
+#define PWR_BASEADDR_PCIE 0x02
+#define PWR_BASEADDR_SDIO 0x03
+
+#define PWR_INTF_SDIO_MSK BIT(0)
+#define PWR_INTF_USB_MSK BIT(1)
+#define PWR_INTF_PCI_MSK BIT(2)
+#define PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+#define PWR_FAB_TSMC_MSK BIT(0)
+#define PWR_FAB_UMC_MSK BIT(1)
+#define PWR_FAB_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+#define PWR_CUT_TESTCHIP_MSK BIT(0)
+#define PWR_CUT_A_MSK BIT(1)
+#define PWR_CUT_B_MSK BIT(2)
+#define PWR_CUT_C_MSK BIT(3)
+#define PWR_CUT_D_MSK BIT(4)
+#define PWR_CUT_E_MSK BIT(5)
+#define PWR_CUT_F_MSK BIT(6)
+#define PWR_CUT_G_MSK BIT(7)
+#define PWR_CUT_ALL_MSK 0xFF
+
+
+enum pwrseq_delay_unit {
+ PWRSEQ_DELAY_US,
+ PWRSEQ_DELAY_MS,
+};
+
+struct wlan_pwr_cfg {
+ u16 offset;
+ u8 cut_msk;
+ u8 fab_msk:4;
+ u8 interface_msk:4;
+ u8 base:4;
+ u8 cmd:4;
+ u8 msk;
+ u8 value;
+
+};
+
+#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
+#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
+#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
+#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
+#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
+#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
+#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
+#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
+
+bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+ u8 fab_version, u8 interface_type,
+ struct wlan_pwr_cfg pwrcfgcmd[]);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/reg.h b/drivers/net/wireless/rtlwifi/rtl8723be/reg.h
new file mode 100644
index 000000000000..4c653fab8795
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/reg.h
@@ -0,0 +1,2277 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_REG_H__
+#define __RTL8723BE_REG_H__
+
+#define TXPKT_BUF_SELECT 0x69
+#define RXPKT_BUF_SELECT 0xA5
+#define DISABLE_TRXPKT_BUF_ACCESS 0x0
+
+#define REG_SYS_ISO_CTRL 0x0000
+#define REG_SYS_FUNC_EN 0x0002
+#define REG_APS_FSMCO 0x0004
+#define REG_SYS_CLKR 0x0008
+#define REG_9346CR 0x000A
+#define REG_EE_VPD 0x000C
+#define REG_AFE_MISC 0x0010
+#define REG_SPS0_CTRL 0x0011
+#define REG_SPS_OCP_CFG 0x0018
+#define REG_RSV_CTRL 0x001C
+#define REG_RF_CTRL 0x001F
+#define REG_LDOA15_CTRL 0x0020
+#define REG_LDOV12D_CTRL 0x0021
+#define REG_LDOHCI12_CTRL 0x0022
+#define REG_LPLDO_CTRL 0x0023
+#define REG_AFE_XTAL_CTRL 0x0024
+/* 1.5v for 8188EE test chip, 1.4v for MP chip */
+#define REG_AFE_LDO_CTRL 0x0027
+#define REG_AFE_PLL_CTRL 0x0028
+#define REG_MAC_PHY_CTRL 0x002c
+#define REG_EFUSE_CTRL 0x0030
+#define REG_EFUSE_TEST 0x0034
+#define REG_PWR_DATA 0x0038
+#define REG_CAL_TIMER 0x003C
+#define REG_ACLK_MON 0x003E
+#define REG_GPIO_MUXCFG 0x0040
+#define REG_GPIO_IO_SEL 0x0042
+#define REG_MAC_PINMUX_CFG 0x0043
+#define REG_GPIO_PIN_CTRL 0x0044
+#define REG_GPIO_INTM 0x0048
+#define REG_LEDCFG0 0x004C
+#define REG_LEDCFG1 0x004D
+#define REG_LEDCFG2 0x004E
+#define REG_LEDCFG3 0x004F
+#define REG_FSIMR 0x0050
+#define REG_FSISR 0x0054
+#define REG_HSIMR 0x0058
+#define REG_HSISR 0x005c
+#define REG_GPIO_PIN_CTRL_2 0x0060
+#define REG_GPIO_IO_SEL_2 0x0062
+#define REG_MULTI_FUNC_CTRL 0x0068
+#define REG_GPIO_OUTPUT 0x006c
+#define REG_AFE_XTAL_CTRL_EXT 0x0078
+#define REG_XCK_OUT_CTRL 0x007c
+#define REG_MCUFWDL 0x0080
+#define REG_WOL_EVENT 0x0081
+#define REG_MCUTSTCFG 0x0084
+
+
+#define REG_HIMR 0x00B0
+#define REG_HISR 0x00B4
+#define REG_HIMRE 0x00B8
+#define REG_HISRE 0x00BC
+
+#define REG_EFUSE_ACCESS 0x00CF
+
+#define REG_BIST_SCAN 0x00D0
+#define REG_BIST_RPT 0x00D4
+#define REG_BIST_ROM_RPT 0x00D8
+#define REG_USB_SIE_INTF 0x00E0
+#define REG_PCIE_MIO_INTF 0x00E4
+#define REG_PCIE_MIO_INTD 0x00E8
+#define REG_HPON_FSM 0x00EC
+#define REG_SYS_CFG 0x00F0
+#define REG_GPIO_OUTSTS 0x00F4
+#define REG_SYS_CFG1 0x00F0
+#define REG_ROM_VERSION 0x00FD
+
+#define REG_CR 0x0100
+#define REG_PBP 0x0104
+#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
+#define REG_TRXDMA_CTRL 0x010C
+#define REG_TRXFF_BNDY 0x0114
+#define REG_TRXFF_STATUS 0x0118
+#define REG_RXFF_PTR 0x011C
+
+#define REG_CPWM 0x012F
+#define REG_FWIMR 0x0130
+#define REG_FWISR 0x0134
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_PKTBUF_DBG_DATA_L 0x0144
+#define REG_PKTBUF_DBG_DATA_H 0x0148
+#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL + 2)
+
+#define REG_TC0_CTRL 0x0150
+#define REG_TC1_CTRL 0x0154
+#define REG_TC2_CTRL 0x0158
+#define REG_TC3_CTRL 0x015C
+#define REG_TC4_CTRL 0x0160
+#define REG_TCUNIT_BASE 0x0164
+#define REG_MBIST_START 0x0174
+#define REG_MBIST_DONE 0x0178
+#define REG_MBIST_FAIL 0x017C
+#define REG_32K_CTRL 0x0194
+#define REG_C2HEVT_MSG_NORMAL 0x01A0
+#define REG_C2HEVT_CLEAR 0x01AF
+#define REG_C2HEVT_MSG_TEST 0x01B8
+#define REG_MCUTST_1 0x01c0
+#define REG_FMETHR 0x01C8
+#define REG_HMETFR 0x01CC
+#define REG_HMEBOX_0 0x01D0
+#define REG_HMEBOX_1 0x01D4
+#define REG_HMEBOX_2 0x01D8
+#define REG_HMEBOX_3 0x01DC
+
+#define REG_LLT_INIT 0x01E0
+#define REG_BB_ACCEESS_CTRL 0x01E8
+#define REG_BB_ACCESS_DATA 0x01EC
+
+#define REG_HMEBOX_EXT_0 0x01F0
+#define REG_HMEBOX_EXT_1 0x01F4
+#define REG_HMEBOX_EXT_2 0x01F8
+#define REG_HMEBOX_EXT_3 0x01FC
+
+#define REG_RQPN 0x0200
+#define REG_FIFOPAGE 0x0204
+#define REG_TDECTRL 0x0208
+#define REG_TXDMA_OFFSET_CHK 0x020C
+#define REG_TXDMA_STATUS 0x0210
+#define REG_RQPN_NPQ 0x0214
+
+#define REG_RXDMA_AGG_PG_TH 0x0280
+/* FW shall update this register before FW write RXPKT_RELEASE_POLL to 1 */
+#define REG_FW_UPD_RDPTR 0x0284
+/* Control the RX DMA.*/
+#define REG_RXDMA_CONTROL 0x0286
+/* The number of packets in RXPKTBUF. */
+#define REG_RXPKT_NUM 0x0287
+
+#define REG_PCIE_CTRL_REG 0x0300
+#define REG_INT_MIG 0x0304
+#define REG_BCNQ_DESA 0x0308
+#define REG_HQ_DESA 0x0310
+#define REG_MGQ_DESA 0x0318
+#define REG_VOQ_DESA 0x0320
+#define REG_VIQ_DESA 0x0328
+#define REG_BEQ_DESA 0x0330
+#define REG_BKQ_DESA 0x0338
+#define REG_RX_DESA 0x0340
+
+#define REG_DBI 0x0348
+#define REG_MDIO 0x0354
+#define REG_DBG_SEL 0x0360
+#define REG_PCIE_HRPWM 0x0361
+#define REG_PCIE_HCPWM 0x0363
+#define REG_UART_CTRL 0x0364
+#define REG_WATCH_DOG 0x0368
+#define REG_UART_TX_DESA 0x0370
+#define REG_UART_RX_DESA 0x0378
+
+
+#define REG_HDAQ_DESA_NODEF 0x0000
+#define REG_CMDQ_DESA_NODEF 0x0000
+
+#define REG_VOQ_INFORMATION 0x0400
+#define REG_VIQ_INFORMATION 0x0404
+#define REG_BEQ_INFORMATION 0x0408
+#define REG_BKQ_INFORMATION 0x040C
+#define REG_MGQ_INFORMATION 0x0410
+#define REG_HGQ_INFORMATION 0x0414
+#define REG_BCNQ_INFORMATION 0x0418
+#define REG_TXPKT_EMPTY 0x041A
+
+
+#define REG_CPU_MGQ_INFORMATION 0x041C
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define REG_HWSEQ_CTRL 0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY 0x0425
+#define REG_MULTI_BCNQ_EN 0x0426
+#define REG_MULTI_BCNQ_OFFSET 0x0427
+#define REG_SPEC_SIFS 0x0428
+#define REG_RL 0x042A
+#define REG_DARFRC 0x0430
+#define REG_RARFRC 0x0438
+#define REG_RRSR 0x0440
+#define REG_ARFR0 0x0444
+#define REG_ARFR1 0x0448
+#define REG_ARFR2 0x044C
+#define REG_ARFR3 0x0450
+#define REG_AMPDU_MAX_TIME 0x0456
+#define REG_AGGLEN_LMT 0x0458
+#define REG_AMPDU_MIN_SPACE 0x045C
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
+#define REG_FAST_EDCA_CTRL 0x0460
+#define REG_RD_RESP_PKT_TH 0x0463
+#define REG_INIRTS_RATE_SEL 0x0480
+#define REG_INIDATA_RATE_SEL 0x0484
+#define REG_POWER_STATUS 0x04A4
+#define REG_POWER_STAGE1 0x04B4
+#define REG_POWER_STAGE2 0x04B8
+#define REG_PKT_LIFE_TIME 0x04C0
+#define REG_STBC_SETTING 0x04C4
+#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_BAR_MODE_CTRL 0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
+#define REG_EARLY_MODE_CONTROL 0x04D0
+#define REG_NQOS_SEQ 0x04DC
+#define REG_QOS_SEQ 0x04DE
+#define REG_NEED_CPU_HANDLE 0x04E0
+#define REG_PKT_LOSE_RPT 0x04E1
+#define REG_PTCL_ERR_STATUS 0x04E2
+#define REG_TX_RPT_CTRL 0x04EC
+#define REG_TX_RPT_TIME 0x04F0
+#define REG_DUMMY 0x04FC
+
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050C
+#define REG_BCNTCFG 0x0510
+#define REG_PIFS 0x0512
+#define REG_RDG_PIFS 0x0513
+#define REG_SIFS_CTX 0x0514
+#define REG_SIFS_TRX 0x0516
+#define REG_AGGR_BREAK_TIME 0x051A
+#define REG_SLOT 0x051B
+#define REG_TX_PTCL_CTRL 0x0520
+#define REG_TXPAUSE 0x0522
+#define REG_DIS_TXREQ_CLR 0x0523
+#define REG_RD_CTRL 0x0524
+#define REG_TBTT_PROHIBIT 0x0540
+#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
+#define REG_BCN_CTRL 0x0550
+#define REG_USTIME_TSF 0x0551
+#define REG_MBID_NUM 0x0552
+#define REG_DUAL_TSF_RST 0x0553
+#define REG_BCN_INTERVAL 0x0554
+#define REG_MBSSID_BCN_SPACE 0x0554
+#define REG_DRVERLYINT 0x0558
+#define REG_BCNDMATIM 0x0559
+#define REG_ATIMWND 0x055A
+#define REG_BCN_MAX_ERR 0x055D
+#define REG_RXTSF_OFFSET_CCK 0x055E
+#define REG_RXTSF_OFFSET_OFDM 0x055F
+#define REG_TSFTR 0x0560
+#define REG_INIT_TSFTR 0x0564
+#define REG_SECONDARY_CCA_CTRL 0x0577
+#define REG_PSTIMER 0x0580
+#define REG_TIMER0 0x0584
+#define REG_TIMER1 0x0588
+#define REG_ACMHWCTRL 0x05C0
+#define REG_ACMRSTCTRL 0x05C1
+#define REG_ACMAVG 0x05C2
+#define REG_VO_ADMTIME 0x05C4
+#define REG_VI_ADMTIME 0x05C6
+#define REG_BE_ADMTIME 0x05C8
+#define REG_EDCA_RANDOM_GEN 0x05CC
+#define REG_SCH_TXCMD 0x05D0
+
+#define REG_APSD_CTRL 0x0600
+#define REG_BWOPMODE 0x0603
+#define REG_TCR 0x0604
+#define REG_RCR 0x0608
+#define REG_RX_PKT_LIMIT 0x060C
+#define REG_RX_DLK_TIME 0x060D
+#define REG_RX_DRVINFO_SZ 0x060F
+
+#define REG_MACID 0x0610
+#define REG_BSSID 0x0618
+#define REG_MAR 0x0620
+#define REG_MBIDCAMCFG 0x0628
+
+#define REG_USTIME_EDCA 0x0638
+#define REG_MAC_SPEC_SIFS 0x063A
+#define REG_RESP_SIFS_CCK 0x063C
+#define REG_RESP_SIFS_OFDM 0x063E
+#define REG_ACKTO 0x0640
+#define REG_CTS2TO 0x0641
+#define REG_EIFS 0x0642
+
+#define REG_NAV_CTRL 0x0650
+#define REG_BACAMCMD 0x0654
+#define REG_BACAMCONTENT 0x0658
+#define REG_LBDLY 0x0660
+#define REG_FWDLY 0x0661
+#define REG_RXERR_RPT 0x0664
+#define REG_TRXPTCL_CTL 0x0668
+
+#define REG_CAMCMD 0x0670
+#define REG_CAMWRITE 0x0674
+#define REG_CAMREAD 0x0678
+#define REG_CAMDBG 0x067C
+#define REG_SECCFG 0x0680
+
+#define REG_WOW_CTRL 0x0690
+#define REG_PSSTATUS 0x0691
+#define REG_PS_RX_INFO 0x0692
+#define REG_UAPSD_TID 0x0693
+#define REG_LPNAV_CTRL 0x0694
+#define REG_WKFMCAM_NUM 0x0698
+#define REG_WKFMCAM_RWD 0x069C
+#define REG_RXFLTMAP0 0x06A0
+#define REG_RXFLTMAP1 0x06A2
+#define REG_RXFLTMAP2 0x06A4
+#define REG_BCN_PSR_RPT 0x06A8
+#define REG_CALB32K_CTRL 0x06AC
+#define REG_PKT_MON_CTRL 0x06B4
+#define REG_BT_COEX_TABLE 0x06C0
+#define REG_WMAC_RESP_TXINFO 0x06D8
+
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_TEST_USB_TXQS 0xFE48
+#define REG_TEST_SIE_VID 0xFE60
+#define REG_TEST_SIE_PID 0xFE62
+#define REG_TEST_SIE_OPTIONAL 0xFE64
+#define REG_TEST_SIE_CHIRP_K 0xFE65
+#define REG_TEST_SIE_PHY 0xFE66
+#define REG_TEST_SIE_MAC_ADDR 0xFE70
+#define REG_TEST_SIE_STRING 0xFE80
+
+#define REG_NORMAL_SIE_VID 0xFE60
+#define REG_NORMAL_SIE_PID 0xFE62
+#define REG_NORMAL_SIE_OPTIONAL 0xFE64
+#define REG_NORMAL_SIE_EP 0xFE65
+#define REG_NORMAL_SIE_PHY 0xFE68
+#define REG_NORMAL_SIE_MAC_ADDR 0xFE70
+#define REG_NORMAL_SIE_STRING 0xFE80
+
+#define CR9346 REG_9346CR
+#define MSR (REG_CR + 2)
+#define ISR REG_HISR
+#define TSFR REG_TSFTR
+
+#define MACIDR0 REG_MACID
+#define MACIDR4 (REG_MACID + 4)
+
+#define PBP REG_PBP
+
+#define IDR0 MACIDR0
+#define IDR4 MACIDR4
+
+#define UNUSED_REGISTER 0x1BF
+#define DCAM UNUSED_REGISTER
+#define PSR UNUSED_REGISTER
+#define BBADDR UNUSED_REGISTER
+#define PHYDATAR UNUSED_REGISTER
+
+#define INVALID_BBRF_VALUE 0x12345678
+
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+#define CMDEEPROM_EN BIT(5)
+#define CMDEEPROM_SEL BIT(4)
+#define CMD9346CR_9356SEL BIT(4)
+#define AUTOLOAD_EEPROM (CMDEEPROM_EN | CMDEEPROM_SEL)
+#define AUTOLOAD_EFUSE CMDEEPROM_EN
+
+#define GPIOSEL_GPIO 0
+#define GPIOSEL_ENBT BIT(5)
+
+#define GPIO_IN REG_GPIO_PIN_CTRL
+#define GPIO_OUT (REG_GPIO_PIN_CTRL + 1)
+#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL + 2)
+#define GPIO_MOD (REG_GPIO_PIN_CTRL + 3)
+
+/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
+#define HSIMR_GPIO12_0_INT_EN BIT(0)
+#define HSIMR_SPS_OCP_INT_EN BIT(5)
+#define HSIMR_RON_INT_EN BIT(6)
+#define HSIMR_PDN_INT_EN BIT(7)
+#define HSIMR_GPIO9_INT_EN BIT(25)
+
+/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
+
+#define HSISR_GPIO12_0_INT BIT(0)
+#define HSISR_SPS_OCP_INT BIT(5)
+#define HSISR_RON_INT_EN BIT(6)
+#define HSISR_PDNINT BIT(7)
+#define HSISR_GPIO9_INT BIT(25)
+
+#define MSR_NOLINK 0x00
+#define MSR_ADHOC 0x01
+#define MSR_INFRA 0x02
+#define MSR_AP 0x03
+
+#define RRSR_RSC_OFFSET 21
+#define RRSR_SHORT_OFFSET 23
+#define RRSR_RSC_BW_40M 0x600000
+#define RRSR_RSC_UPSUBCHNL 0x400000
+#define RRSR_RSC_LOWSUBCHNL 0x200000
+#define RRSR_SHORT 0x800000
+#define RRSR_1M BIT(0)
+#define RRSR_2M BIT(1)
+#define RRSR_5_5M BIT(2)
+#define RRSR_11M BIT(3)
+#define RRSR_6M BIT(4)
+#define RRSR_9M BIT(5)
+#define RRSR_12M BIT(6)
+#define RRSR_18M BIT(7)
+#define RRSR_24M BIT(8)
+#define RRSR_36M BIT(9)
+#define RRSR_48M BIT(10)
+#define RRSR_54M BIT(11)
+#define RRSR_MCS0 BIT(12)
+#define RRSR_MCS1 BIT(13)
+#define RRSR_MCS2 BIT(14)
+#define RRSR_MCS3 BIT(15)
+#define RRSR_MCS4 BIT(16)
+#define RRSR_MCS5 BIT(17)
+#define RRSR_MCS6 BIT(18)
+#define RRSR_MCS7 BIT(19)
+#define BRSR_ACKSHORTPMB BIT(23)
+
+#define RATR_1M 0x00000001
+#define RATR_2M 0x00000002
+#define RATR_55M 0x00000004
+#define RATR_11M 0x00000008
+#define RATR_6M 0x00000010
+#define RATR_9M 0x00000020
+#define RATR_12M 0x00000040
+#define RATR_18M 0x00000080
+#define RATR_24M 0x00000100
+#define RATR_36M 0x00000200
+#define RATR_48M 0x00000400
+#define RATR_54M 0x00000800
+#define RATR_MCS0 0x00001000
+#define RATR_MCS1 0x00002000
+#define RATR_MCS2 0x00004000
+#define RATR_MCS3 0x00008000
+#define RATR_MCS4 0x00010000
+#define RATR_MCS5 0x00020000
+#define RATR_MCS6 0x00040000
+#define RATR_MCS7 0x00080000
+#define RATR_MCS8 0x00100000
+#define RATR_MCS9 0x00200000
+#define RATR_MCS10 0x00400000
+#define RATR_MCS11 0x00800000
+#define RATR_MCS12 0x01000000
+#define RATR_MCS13 0x02000000
+#define RATR_MCS14 0x04000000
+#define RATR_MCS15 0x08000000
+
+#define RATE_1M BIT(0)
+#define RATE_2M BIT(1)
+#define RATE_5_5M BIT(2)
+#define RATE_11M BIT(3)
+#define RATE_6M BIT(4)
+#define RATE_9M BIT(5)
+#define RATE_12M BIT(6)
+#define RATE_18M BIT(7)
+#define RATE_24M BIT(8)
+#define RATE_36M BIT(9)
+#define RATE_48M BIT(10)
+#define RATE_54M BIT(11)
+#define RATE_MCS0 BIT(12)
+#define RATE_MCS1 BIT(13)
+#define RATE_MCS2 BIT(14)
+#define RATE_MCS3 BIT(15)
+#define RATE_MCS4 BIT(16)
+#define RATE_MCS5 BIT(17)
+#define RATE_MCS6 BIT(18)
+#define RATE_MCS7 BIT(19)
+#define RATE_MCS8 BIT(20)
+#define RATE_MCS9 BIT(21)
+#define RATE_MCS10 BIT(22)
+#define RATE_MCS11 BIT(23)
+#define RATE_MCS12 BIT(24)
+#define RATE_MCS13 BIT(25)
+#define RATE_MCS14 BIT(26)
+#define RATE_MCS15 BIT(27)
+
+#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\
+ RATR_24M | RATR_36M | RATR_48M | RATR_54M)
+#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\
+ RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\
+ RATR_MCS6 | RATR_MCS7)
+#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\
+ RATR_MCS11 | RATR_MCS12 | RATR_MCS13 |\
+ RATR_MCS14 | RATR_MCS15)
+
+#define BW_OPMODE_20MHZ BIT(2)
+#define BW_OPMODE_5G BIT(1)
+#define BW_OPMODE_11J BIT(0)
+
+#define CAM_VALID BIT(15)
+#define CAM_NOTVALID 0x0000
+#define CAM_USEDK BIT(5)
+
+#define CAM_NONE 0x0
+#define CAM_WEP40 0x01
+#define CAM_TKIP 0x02
+#define CAM_AES 0x04
+#define CAM_WEP104 0x05
+
+#define TOTAL_CAM_ENTRY 32
+#define HALF_CAM_ENTRY 16
+
+#define CAM_WRITE BIT(16)
+#define CAM_READ 0x00000000
+#define CAM_POLLINIG BIT(31)
+
+#define SCR_USEDK 0x01
+#define SCR_TXSEC_ENABLE 0x02
+#define SCR_RXSEC_ENABLE 0x04
+
+#define WOW_PMEN BIT(0)
+#define WOW_WOMEN BIT(1)
+#define WOW_MAGIC BIT(2)
+#define WOW_UWF BIT(3)
+
+/*********************************************
+* 8723BE IMR/ISR bits
+**********************************************/
+#define IMR_DISABLED 0x0
+/* IMR DW0(0x0060-0063) Bit 0-31 */
+#define IMR_TXCCK BIT(30) /* TXRPT interrupt when
+ * CCX bit of the packet is set
+ */
+#define IMR_PSTIMEOUT BIT(29) /* Power Save Time Out Interrupt */
+#define IMR_GTINT4 BIT(28) /* When GTIMER4 expires,
+ * this bit is set to 1
+ */
+#define IMR_GTINT3 BIT(27) /* When GTIMER3 expires,
+ * this bit is set to 1
+ */
+#define IMR_TBDER BIT(26) /* Transmit Beacon0 Error */
+#define IMR_TBDOK BIT(25) /* Transmit Beacon0 OK */
+#define IMR_TSF_BIT32_TOGGLE BIT(24) /* TSF Timer BIT32 toggle
+ * indication interrupt
+ */
+#define IMR_BCNDMAINT0 BIT(20) /* Beacon DMA Interrupt 0 */
+#define IMR_BCNDOK0 BIT(16) /* Beacon Queue DMA OK0 */
+#define IMR_HSISR_IND_ON_INT BIT(15) /* HSISR Indicator (HSIMR & HSISR is
+ * true, this bit is set to 1)
+ */
+#define IMR_BCNDMAINT_E BIT(14) /* Beacon DMA Interrupt
+ * Extension for Win7
+ */
+#define IMR_ATIMEND BIT(12) /* CTWidnow End or ATIM Window End */
+#define IMR_HISR1_IND_INT BIT(11) /* HISR1 Indicator (HISR1 & HIMR1 is
+ * true, this bit is set to 1)
+ */
+#define IMR_C2HCMD BIT(10) /* CPU to Host Command INT Status,
+ * Write 1 clear
+ */
+#define IMR_CPWM2 BIT(9) /* CPU power Mode exchange INT Status,
+ * Write 1 clear
+ */
+#define IMR_CPWM BIT(8) /* CPU power Mode exchange INT Status,
+ * Write 1 clear
+ */
+#define IMR_HIGHDOK BIT(7) /* High Queue DMA OK */
+#define IMR_MGNTDOK BIT(6) /* Management Queue DMA OK */
+#define IMR_BKDOK BIT(5) /* AC_BK DMA OK */
+#define IMR_BEDOK BIT(4) /* AC_BE DMA OK */
+#define IMR_VIDOK BIT(3) /* AC_VI DMA OK */
+#define IMR_VODOK BIT(2) /* AC_VO DMA OK */
+#define IMR_RDU BIT(1) /* Rx Descriptor Unavailable */
+#define IMR_ROK BIT(0) /* Receive DMA OK */
+
+/* IMR DW1(0x00B4-00B7) Bit 0-31 */
+#define IMR_BCNDMAINT7 BIT(27) /* Beacon DMA Interrupt 7 */
+#define IMR_BCNDMAINT6 BIT(26) /* Beacon DMA Interrupt 6 */
+#define IMR_BCNDMAINT5 BIT(25) /* Beacon DMA Interrupt 5 */
+#define IMR_BCNDMAINT4 BIT(24) /* Beacon DMA Interrupt 4 */
+#define IMR_BCNDMAINT3 BIT(23) /* Beacon DMA Interrupt 3 */
+#define IMR_BCNDMAINT2 BIT(22) /* Beacon DMA Interrupt 2 */
+#define IMR_BCNDMAINT1 BIT(21) /* Beacon DMA Interrupt 1 */
+#define IMR_BCNDOK7 BIT(20) /* Beacon Queue DMA OK Interrup 7 */
+#define IMR_BCNDOK6 BIT(19) /* Beacon Queue DMA OK Interrup 6 */
+#define IMR_BCNDOK5 BIT(18) /* Beacon Queue DMA OK Interrup 5 */
+#define IMR_BCNDOK4 BIT(17) /* Beacon Queue DMA OK Interrup 4 */
+#define IMR_BCNDOK3 BIT(16) /* Beacon Queue DMA OK Interrup 3 */
+#define IMR_BCNDOK2 BIT(15) /* Beacon Queue DMA OK Interrup 2 */
+#define IMR_BCNDOK1 BIT(14) /* Beacon Queue DMA OK Interrup 1 */
+#define IMR_ATIMEND_E BIT(13) /* ATIM Window End Extension for Win7 */
+#define IMR_TXERR BIT(11) /* Tx Error Flag Interrupt Status,
+ * write 1 clear.
+ */
+#define IMR_RXERR BIT(10) /* Rx Error Flag INT Status,
+ * Write 1 clear
+ */
+#define IMR_TXFOVW BIT(9) /* Transmit FIFO Overflow */
+#define IMR_RXFOVW BIT(8) /* Receive FIFO Overflow */
+
+#define HWSET_MAX_SIZE 512
+#define EFUSE_MAX_SECTION 64
+#define EFUSE_REAL_CONTENT_LEN 256
+#define EFUSE_OOB_PROTECT_BYTES 18 /* PG data exclude header,
+ * dummy 7 bytes frome CP test
+ * and reserved 1byte.
+ */
+
+#define EEPROM_DEFAULT_TSSI 0x0
+#define EEPROM_DEFAULT_TXPOWERDIFF 0x0
+#define EEPROM_DEFAULT_CRYSTALCAP 0x5
+#define EEPROM_DEFAULT_BOARDTYPE 0x02
+#define EEPROM_DEFAULT_TXPOWER 0x1010
+#define EEPROM_DEFAULT_HT2T_TXPWR 0x10
+
+#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
+#define EEPROM_DEFAULT_THERMALMETER 0x18
+#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0
+#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5
+#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
+#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
+#define EEPROM_DEFAULT_HT20_DIFF 2
+#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
+#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
+#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
+
+#define RF_OPTION1 0x79
+#define RF_OPTION2 0x7A
+#define RF_OPTION3 0x7B
+#define RF_OPTION4 0xC3
+
+#define EEPROM_DEFAULT_PID 0x1234
+#define EEPROM_DEFAULT_VID 0x5678
+#define EEPROM_DEFAULT_CUSTOMERID 0xAB
+#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD
+#define EEPROM_DEFAULT_VERSION 0
+
+#define EEPROM_CHANNEL_PLAN_FCC 0x0
+#define EEPROM_CHANNEL_PLAN_IC 0x1
+#define EEPROM_CHANNEL_PLAN_ETSI 0x2
+#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
+#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
+#define EEPROM_CHANNEL_PLAN_MKK 0x5
+#define EEPROM_CHANNEL_PLAN_MKK1 0x6
+#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
+#define EEPROM_CHANNEL_PLAN_TELEC 0x8
+#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
+#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
+#define EEPROM_CHANNEL_PLAN_NCC 0xB
+#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
+
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_TOSHIBA 0x4
+#define EEPROM_CID_CCX 0x10
+#define EEPROM_CID_QMI 0x0D
+#define EEPROM_CID_WHQL 0xFE
+
+#define RTL8723BE_EEPROM_ID 0x8129
+
+#define EEPROM_HPON 0x02
+#define EEPROM_CLK 0x06
+#define EEPROM_TESTR 0x08
+
+
+#define EEPROM_TXPOWERCCK 0x10
+#define EEPROM_TXPOWERHT40_1S 0x16
+#define EEPROM_TXPOWERHT20DIFF 0x1B
+#define EEPROM_TXPOWER_OFDMDIFF 0x1B
+
+
+
+#define EEPROM_TX_PWR_INX 0x10
+
+#define EEPROM_CHANNELPLAN 0xB8
+#define EEPROM_XTAL_8723BE 0xB9
+#define EEPROM_THERMAL_METER_88E 0xBA
+#define EEPROM_IQK_LCK_88E 0xBB
+
+#define EEPROM_RF_BOARD_OPTION_88E 0xC1
+#define EEPROM_RF_FEATURE_OPTION_88E 0xC2
+#define EEPROM_RF_BT_SETTING_88E 0xC3
+#define EEPROM_VERSION 0xC4
+#define EEPROM_CUSTOMER_ID 0xC5
+#define EEPROM_RF_ANTENNA_OPT_88E 0xC9
+
+#define EEPROM_MAC_ADDR 0xD0
+#define EEPROM_VID 0xD6
+#define EEPROM_DID 0xD8
+#define EEPROM_SVID 0xDA
+#define EEPROM_SMID 0xDC
+
+#define STOPBECON BIT(6)
+#define STOPHIGHT BIT(5)
+#define STOPMGT BIT(4)
+#define STOPVO BIT(3)
+#define STOPVI BIT(2)
+#define STOPBE BIT(1)
+#define STOPBK BIT(0)
+
+#define RCR_APPFCS BIT(31)
+#define RCR_APP_MIC BIT(30)
+#define RCR_APP_ICV BIT(29)
+#define RCR_APP_PHYST_RXFF BIT(28)
+#define RCR_APP_BA_SSN BIT(27)
+#define RCR_ENMBID BIT(24)
+#define RCR_LSIGEN BIT(23)
+#define RCR_MFBEN BIT(22)
+#define RCR_HTC_LOC_CTRL BIT(14)
+#define RCR_AMF BIT(13)
+#define RCR_ACF BIT(12)
+#define RCR_ADF BIT(11)
+#define RCR_AICV BIT(9)
+#define RCR_ACRC32 BIT(8)
+#define RCR_CBSSID_BCN BIT(7)
+#define RCR_CBSSID_DATA BIT(6)
+#define RCR_CBSSID RCR_CBSSID_DATA
+#define RCR_APWRMGT BIT(5)
+#define RCR_ADD3 BIT(4)
+#define RCR_AB BIT(3)
+#define RCR_AM BIT(2)
+#define RCR_APM BIT(1)
+#define RCR_AAP BIT(0)
+#define RCR_MXDMA_OFFSET 8
+#define RCR_FIFO_OFFSET 13
+
+#define RSV_CTRL 0x001C
+#define RD_CTRL 0x0524
+
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_USB_VID 0xFE60
+#define REG_USB_PID 0xFE62
+#define REG_USB_OPTIONAL 0xFE64
+#define REG_USB_CHIRP_K 0xFE65
+#define REG_USB_PHY 0xFE66
+#define REG_USB_MAC_ADDR 0xFE70
+#define REG_USB_HRPWM 0xFE58
+#define REG_USB_HCPWM 0xFE57
+
+#define SW18_FPWM BIT(3)
+
+#define ISO_MD2PP BIT(0)
+#define ISO_UA2USB BIT(1)
+#define ISO_UD2CORE BIT(2)
+#define ISO_PA2PCIE BIT(3)
+#define ISO_PD2CORE BIT(4)
+#define ISO_IP2MAC BIT(5)
+#define ISO_DIOP BIT(6)
+#define ISO_DIOE BIT(7)
+#define ISO_EB2CORE BIT(8)
+#define ISO_DIOR BIT(9)
+
+#define PWC_EV25V BIT(14)
+#define PWC_EV12V BIT(15)
+
+#define FEN_BBRSTB BIT(0)
+#define FEN_BB_GLB_RSTN BIT(1)
+#define FEN_USBA BIT(2)
+#define FEN_UPLL BIT(3)
+#define FEN_USBD BIT(4)
+#define FEN_DIO_PCIE BIT(5)
+#define FEN_PCIEA BIT(6)
+#define FEN_PPLL BIT(7)
+#define FEN_PCIED BIT(8)
+#define FEN_DIOE BIT(9)
+#define FEN_CPUEN BIT(10)
+#define FEN_DCORE BIT(11)
+#define FEN_ELDR BIT(12)
+#define FEN_DIO_RF BIT(13)
+#define FEN_HWPDN BIT(14)
+#define FEN_MREGEN BIT(15)
+
+#define PFM_LDALL BIT(0)
+#define PFM_ALDN BIT(1)
+#define PFM_LDKP BIT(2)
+#define PFM_WOWL BIT(3)
+#define ENPDN BIT(4)
+#define PDN_PL BIT(5)
+#define APFM_ONMAC BIT(8)
+#define APFM_OFF BIT(9)
+#define APFM_RSM BIT(10)
+#define AFSM_HSUS BIT(11)
+#define AFSM_PCIE BIT(12)
+#define APDM_MAC BIT(13)
+#define APDM_HOST BIT(14)
+#define APDM_HPDN BIT(15)
+#define RDY_MACON BIT(16)
+#define SUS_HOST BIT(17)
+#define ROP_ALD BIT(20)
+#define ROP_PWR BIT(21)
+#define ROP_SPS BIT(22)
+#define SOP_MRST BIT(25)
+#define SOP_FUSE BIT(26)
+#define SOP_ABG BIT(27)
+#define SOP_AMB BIT(28)
+#define SOP_RCK BIT(29)
+#define SOP_A8M BIT(30)
+#define XOP_BTCK BIT(31)
+
+#define ANAD16V_EN BIT(0)
+#define ANA8M BIT(1)
+#define MACSLP BIT(4)
+#define LOADER_CLK_EN BIT(5)
+#define _80M_SSC_DIS BIT(7)
+#define _80M_SSC_EN_HO BIT(8)
+#define PHY_SSC_RSTB BIT(9)
+#define SEC_CLK_EN BIT(10)
+#define MAC_CLK_EN BIT(11)
+#define SYS_CLK_EN BIT(12)
+#define RING_CLK_EN BIT(13)
+
+#define BOOT_FROM_EEPROM BIT(4)
+#define EEPROM_EN BIT(5)
+
+#define AFE_BGEN BIT(0)
+#define AFE_MBEN BIT(1)
+#define MAC_ID_EN BIT(7)
+
+#define WLOCK_ALL BIT(0)
+#define WLOCK_00 BIT(1)
+#define WLOCK_04 BIT(2)
+#define WLOCK_08 BIT(3)
+#define WLOCK_40 BIT(4)
+#define R_DIS_PRST_0 BIT(5)
+#define R_DIS_PRST_1 BIT(6)
+#define LOCK_ALL_EN BIT(7)
+
+#define RF_EN BIT(0)
+#define RF_RSTB BIT(1)
+#define RF_SDMRSTB BIT(2)
+
+#define LDA15_EN BIT(0)
+#define LDA15_STBY BIT(1)
+#define LDA15_OBUF BIT(2)
+#define LDA15_REG_VOS BIT(3)
+#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
+
+#define LDV12_EN BIT(0)
+#define LDV12_SDBY BIT(1)
+#define LPLDO_HSM BIT(2)
+#define LPLDO_LSM_DIS BIT(3)
+#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
+
+#define XTAL_EN BIT(0)
+#define XTAL_BSEL BIT(1)
+#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
+#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
+#define XTAL_GATE_USB BIT(8)
+#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
+#define XTAL_GATE_AFE BIT(11)
+#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
+#define XTAL_RF_GATE BIT(14)
+#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
+#define XTAL_GATE_DIG BIT(17)
+#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
+#define XTAL_BT_GATE BIT(20)
+#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
+#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
+
+#define CKDLY_AFE BIT(26)
+#define CKDLY_USB BIT(27)
+#define CKDLY_DIG BIT(28)
+#define CKDLY_BT BIT(29)
+
+#define APLL_EN BIT(0)
+#define APLL_320_EN BIT(1)
+#define APLL_FREF_SEL BIT(2)
+#define APLL_EDGE_SEL BIT(3)
+#define APLL_WDOGB BIT(4)
+#define APLL_LPFEN BIT(5)
+
+#define APLL_REF_CLK_13MHZ 0x1
+#define APLL_REF_CLK_19_2MHZ 0x2
+#define APLL_REF_CLK_20MHZ 0x3
+#define APLL_REF_CLK_25MHZ 0x4
+#define APLL_REF_CLK_26MHZ 0x5
+#define APLL_REF_CLK_38_4MHZ 0x6
+#define APLL_REF_CLK_40MHZ 0x7
+
+#define APLL_320EN BIT(14)
+#define APLL_80EN BIT(15)
+#define APLL_1MEN BIT(24)
+
+#define ALD_EN BIT(18)
+#define EF_PD BIT(19)
+#define EF_FLAG BIT(31)
+
+#define EF_TRPT BIT(7)
+#define LDOE25_EN BIT(31)
+
+#define RSM_EN BIT(0)
+#define TIMER_EN BIT(4)
+
+#define TRSW0EN BIT(2)
+#define TRSW1EN BIT(3)
+#define EROM_EN BIT(4)
+#define ENBT BIT(5)
+#define ENUART BIT(8)
+#define UART_910 BIT(9)
+#define ENPMAC BIT(10)
+#define SIC_SWRST BIT(11)
+#define ENSIC BIT(12)
+#define SIC_23 BIT(13)
+#define ENHDP BIT(14)
+#define SIC_LBK BIT(15)
+
+#define LED0PL BIT(4)
+#define LED1PL BIT(12)
+#define LED0DIS BIT(7)
+
+#define MCUFWDL_EN BIT(0)
+#define MCUFWDL_RDY BIT(1)
+#define FWDL_CHKSUM_RPT BIT(2)
+#define MACINI_RDY BIT(3)
+#define BBINI_RDY BIT(4)
+#define RFINI_RDY BIT(5)
+#define WINTINI_RDY BIT(6)
+#define CPRST BIT(23)
+
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define TRP_B15V_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define VENDOR_ID BIT(19)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23)
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+#define CHIP_VER_RTL_MASK 0xF000
+#define CHIP_VER_RTL_SHIFT 12
+
+#define REG_LBMODE (REG_CR + 3)
+
+#define HCI_TXDMA_EN BIT(0)
+#define HCI_RXDMA_EN BIT(1)
+#define TXDMA_EN BIT(2)
+#define RXDMA_EN BIT(3)
+#define PROTOCOL_EN BIT(4)
+#define SCHEDULE_EN BIT(5)
+#define MACTXEN BIT(6)
+#define MACRXEN BIT(7)
+#define ENSWBCN BIT(8)
+#define ENSEC BIT(9)
+
+#define _NETTYPE(x) (((x) & 0x3) << 16)
+#define MASK_NETTYPE 0x30000
+#define NT_NO_LINK 0x0
+#define NT_LINK_AD_HOC 0x1
+#define NT_LINK_AP 0x2
+#define NT_AS_AP 0x3
+
+#define _LBMODE(x) (((x) & 0xF) << 24)
+#define MASK_LBMODE 0xF000000
+#define LOOPBACK_NORMAL 0x0
+#define LOOPBACK_IMMEDIATELY 0xB
+#define LOOPBACK_MAC_DELAY 0x3
+#define LOOPBACK_PHY 0x1
+#define LOOPBACK_DMA 0x7
+
+#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
+#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
+#define _PSRX_MASK 0xF
+#define _PSTX_MASK 0xF0
+#define _PSRX(x) (x)
+#define _PSTX(x) ((x) << 4)
+
+#define PBP_64 0x0
+#define PBP_128 0x1
+#define PBP_256 0x2
+#define PBP_512 0x3
+#define PBP_1024 0x4
+
+#define RXDMA_ARBBW_EN BIT(0)
+#define RXSHFT_EN BIT(1)
+#define RXDMA_AGG_EN BIT(2)
+#define QS_VO_QUEUE BIT(8)
+#define QS_VI_QUEUE BIT(9)
+#define QS_BE_QUEUE BIT(10)
+#define QS_BK_QUEUE BIT(11)
+#define QS_MANAGER_QUEUE BIT(12)
+#define QS_HIGH_QUEUE BIT(13)
+
+#define HQSEL_VOQ BIT(0)
+#define HQSEL_VIQ BIT(1)
+#define HQSEL_BEQ BIT(2)
+#define HQSEL_BKQ BIT(3)
+#define HQSEL_MGTQ BIT(4)
+#define HQSEL_HIQ BIT(5)
+
+#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
+#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
+#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
+#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
+#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
+#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
+
+#define QUEUE_LOW 1
+#define QUEUE_NORMAL 2
+#define QUEUE_HIGH 3
+
+#define _LLT_NO_ACTIVE 0x0
+#define _LLT_WRITE_ACCESS 0x1
+#define _LLT_READ_ACCESS 0x2
+
+#define _LLT_INIT_DATA(x) ((x) & 0xFF)
+#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
+#define _LLT_OP(x) (((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
+
+#define BB_WRITE_READ_MASK (BIT(31) | BIT(30))
+#define BB_WRITE_EN BIT(30)
+#define BB_READ_EN BIT(31)
+
+#define _HPQ(x) ((x) & 0xFF)
+#define _LPQ(x) (((x) & 0xFF) << 8)
+#define _PUBQ(x) (((x) & 0xFF) << 16)
+#define _NPQ(x) ((x) & 0xFF)
+
+#define HPQ_PUBLIC_DIS BIT(24)
+#define LPQ_PUBLIC_DIS BIT(25)
+#define LD_RQPN BIT(31)
+
+#define BCN_VALID BIT(16)
+#define BCN_HEAD(x) (((x) & 0xFF) << 8)
+#define BCN_HEAD_MASK 0xFF00
+
+#define BLK_DESC_NUM_SHIFT 4
+#define BLK_DESC_NUM_MASK 0xF
+
+#define DROP_DATA_EN BIT(9)
+
+#define EN_AMPDU_RTY_NEW BIT(7)
+
+#define _INIRTSMCS_SEL(x) ((x) & 0x3F)
+
+#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
+#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
+
+#define RATE_REG_BITMAP_ALL 0xFFFFF
+
+#define _RRSC_BITMAP(x) ((x) & 0xFFFFF)
+
+#define _RRSR_RSC(x) (((x) & 0x3) << 21)
+#define RRSR_RSC_RESERVED 0x0
+#define RRSR_RSC_UPPER_SUBCHANNEL 0x1
+#define RRSR_RSC_LOWER_SUBCHANNEL 0x2
+#define RRSR_RSC_DUPLICATE_MODE 0x3
+
+#define USE_SHORT_G1 BIT(20)
+
+#define _AGGLMT_MCS0(x) ((x) & 0xF)
+#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4)
+#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8)
+#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12)
+#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16)
+#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20)
+#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24)
+#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28)
+
+#define RETRY_LIMIT_SHORT_SHIFT 8
+#define RETRY_LIMIT_LONG_SHIFT 0
+
+#define _DARF_RC1(x) ((x) & 0x1F)
+#define _DARF_RC2(x) (((x) & 0x1F) << 8)
+#define _DARF_RC3(x) (((x) & 0x1F) << 16)
+#define _DARF_RC4(x) (((x) & 0x1F) << 24)
+#define _DARF_RC5(x) ((x) & 0x1F)
+#define _DARF_RC6(x) (((x) & 0x1F) << 8)
+#define _DARF_RC7(x) (((x) & 0x1F) << 16)
+#define _DARF_RC8(x) (((x) & 0x1F) << 24)
+
+#define _RARF_RC1(x) ((x) & 0x1F)
+#define _RARF_RC2(x) (((x) & 0x1F) << 8)
+#define _RARF_RC3(x) (((x) & 0x1F) << 16)
+#define _RARF_RC4(x) (((x) & 0x1F) << 24)
+#define _RARF_RC5(x) ((x) & 0x1F)
+#define _RARF_RC6(x) (((x) & 0x1F) << 8)
+#define _RARF_RC7(x) (((x) & 0x1F) << 16)
+#define _RARF_RC8(x) (((x) & 0x1F) << 24)
+
+#define AC_PARAM_TXOP_LIMIT_OFFSET 16
+#define AC_PARAM_ECW_MAX_OFFSET 12
+#define AC_PARAM_ECW_MIN_OFFSET 8
+#define AC_PARAM_AIFS_OFFSET 0
+
+#define _AIFS(x) (x)
+#define _ECW_MAX_MIN(x) ((x) << 8)
+#define _TXOP_LIMIT(x) ((x) << 16)
+
+#define _BCNIFS(x) ((x) & 0xFF)
+#define _BCNECW(x) ((((x) & 0xF)) << 8)
+
+#define _LRL(x) ((x) & 0x3F)
+#define _SRL(x) (((x) & 0x3F) << 8)
+
+#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
+#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8)
+
+#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
+#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8)
+
+#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
+
+#define DIS_EDCA_CNT_DWN BIT(11)
+
+#define EN_MBSSID BIT(1)
+#define EN_TXBCN_RPT BIT(2)
+#define EN_BCN_FUNCTION BIT(3)
+
+#define TSFTR_RST BIT(0)
+#define TSFTR1_RST BIT(1)
+
+#define STOP_BCNQ BIT(6)
+
+#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
+#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
+
+#define ACMHW_HWEN BIT(0)
+#define ACMHW_BEQEN BIT(1)
+#define ACMHW_VIQEN BIT(2)
+#define ACMHW_VOQEN BIT(3)
+#define ACMHW_BEQSTATUS BIT(4)
+#define ACMHW_VIQSTATUS BIT(5)
+#define ACMHW_VOQSTATUS BIT(6)
+
+#define APSDOFF BIT(6)
+#define APSDOFF_STATUS BIT(7)
+
+#define BW_20MHZ BIT(2)
+
+#define RATE_BITMAP_ALL 0xFFFFF
+
+#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
+
+#define TSFRST BIT(0)
+#define DIS_GCLK BIT(1)
+#define PAD_SEL BIT(2)
+#define PWR_ST BIT(6)
+#define PWRBIT_OW_EN BIT(7)
+#define ACRC BIT(8)
+#define CFENDFORM BIT(9)
+#define ICV BIT(10)
+
+#define AAP BIT(0)
+#define APM BIT(1)
+#define AM BIT(2)
+#define AB BIT(3)
+#define ADD3 BIT(4)
+#define APWRMGT BIT(5)
+#define CBSSID BIT(6)
+#define CBSSID_DATA BIT(6)
+#define CBSSID_BCN BIT(7)
+#define ACRC32 BIT(8)
+#define AICV BIT(9)
+#define ADF BIT(11)
+#define ACF BIT(12)
+#define AMF BIT(13)
+#define HTC_LOC_CTRL BIT(14)
+#define UC_DATA_EN BIT(16)
+#define BM_DATA_EN BIT(17)
+#define MFBEN BIT(22)
+#define LSIGEN BIT(23)
+#define ENMBID BIT(24)
+#define APP_BASSN BIT(27)
+#define APP_PHYSTS BIT(28)
+#define APP_ICV BIT(29)
+#define APP_MIC BIT(30)
+#define APP_FCS BIT(31)
+
+#define _MIN_SPACE(x) ((x) & 0x7)
+#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3)
+
+#define RXERR_TYPE_OFDM_PPDU 0
+#define RXERR_TYPE_OFDM_FALSE_ALARM 1
+#define RXERR_TYPE_OFDM_MPDU_OK 2
+#define RXERR_TYPE_OFDM_MPDU_FAIL 3
+#define RXERR_TYPE_CCK_PPDU 4
+#define RXERR_TYPE_CCK_FALSE_ALARM 5
+#define RXERR_TYPE_CCK_MPDU_OK 6
+#define RXERR_TYPE_CCK_MPDU_FAIL 7
+#define RXERR_TYPE_HT_PPDU 8
+#define RXERR_TYPE_HT_FALSE_ALARM 9
+#define RXERR_TYPE_HT_MPDU_TOTAL 10
+#define RXERR_TYPE_HT_MPDU_OK 11
+#define RXERR_TYPE_HT_MPDU_FAIL 12
+#define RXERR_TYPE_RX_FULL_DROP 15
+
+#define RXERR_COUNTER_MASK 0xFFFFF
+#define RXERR_RPT_RST BIT(27)
+#define _RXERR_RPT_SEL(type) ((type) << 28)
+
+#define SCR_TXUSEDK BIT(0)
+#define SCR_RXUSEDK BIT(1)
+#define SCR_TXENCENABLE BIT(2)
+#define SCR_RXDECENABLE BIT(3)
+#define SCR_SKBYA2 BIT(4)
+#define SCR_NOSKMC BIT(5)
+#define SCR_TXBCUSEDK BIT(6)
+#define SCR_RXBCUSEDK BIT(7)
+
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define TRP_B15V_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define BT_FUNC BIT(16)
+#define VENDOR_ID BIT(19)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23)
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+#define USB_IS_HIGH_SPEED 0
+#define USB_IS_FULL_SPEED 1
+#define USB_SPEED_MASK BIT(5)
+
+#define USB_NORMAL_SIE_EP_MASK 0xF
+#define USB_NORMAL_SIE_EP_SHIFT 4
+
+#define USB_TEST_EP_MASK 0x30
+#define USB_TEST_EP_SHIFT 4
+
+#define USB_AGG_EN BIT(3)
+
+#define MAC_ADDR_LEN 6
+#define LAST_ENTRY_OF_TX_PKT_BUFFER 175/*255 88e*/
+
+#define POLLING_LLT_THRESHOLD 20
+#define POLLING_READY_TIMEOUT_COUNT 3000
+
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
+#define EPROM_CMD_CONFIG 0x3
+#define EPROM_CMD_LOAD 1
+
+#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE
+
+#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
+
+#define RPMAC_RESET 0x100
+#define RPMAC_TXSTART 0x104
+#define RPMAC_TXLEGACYSIG 0x108
+#define RPMAC_TXHTSIG1 0x10c
+#define RPMAC_TXHTSIG2 0x110
+#define RPMAC_PHYDEBUG 0x114
+#define RPMAC_TXPACKETNUM 0x118
+#define RPMAC_TXIDLE 0x11c
+#define RPMAC_TXMACHEADER0 0x120
+#define RPMAC_TXMACHEADER1 0x124
+#define RPMAC_TXMACHEADER2 0x128
+#define RPMAC_TXMACHEADER3 0x12c
+#define RPMAC_TXMACHEADER4 0x130
+#define RPMAC_TXMACHEADER5 0x134
+#define RPMAC_TXDADATYPE 0x138
+#define RPMAC_TXRANDOMSEED 0x13c
+#define RPMAC_CCKPLCPPREAMBLE 0x140
+#define RPMAC_CCKPLCPHEADER 0x144
+#define RPMAC_CCKCRC16 0x148
+#define RPMAC_OFDMRXCRC32OK 0x170
+#define RPMAC_OFDMRXCRC32ER 0x174
+#define RPMAC_OFDMRXPARITYER 0x178
+#define RPMAC_OFDMRXCRC8ER 0x17c
+#define RPMAC_CCKCRXRC16ER 0x180
+#define RPMAC_CCKCRXRC32ER 0x184
+#define RPMAC_CCKCRXRC32OK 0x188
+#define RPMAC_TXSTATUS 0x18c
+
+#define RFPGA0_RFMOD 0x800
+
+#define RFPGA0_TXINFO 0x804
+#define RFPGA0_PSDFUNCTION 0x808
+
+#define RFPGA0_TXGAINSTAGE 0x80c
+
+#define RFPGA0_RFTIMING1 0x810
+#define RFPGA0_RFTIMING2 0x814
+
+#define RFPGA0_XA_HSSIPARAMETER1 0x820
+#define RFPGA0_XA_HSSIPARAMETER2 0x824
+#define RFPGA0_XB_HSSIPARAMETER1 0x828
+#define RFPGA0_XB_HSSIPARAMETER2 0x82c
+
+#define RFPGA0_XA_LSSIPARAMETER 0x840
+#define RFPGA0_XB_LSSIPARAMETER 0x844
+
+#define RFPGA0_RFWAKEUPPARAMETER 0x850
+#define RFPGA0_RFSLEEPUPPARAMETER 0x854
+
+#define RFPGA0_XAB_SWITCHCONTROL 0x858
+#define RFPGA0_XCD_SWITCHCONTROL 0x85c
+
+#define RFPGA0_XA_RFINTERFACEOE 0x860
+#define RFPGA0_XB_RFINTERFACEOE 0x864
+
+#define RFPGA0_XAB_RFINTERFACESW 0x870
+#define RFPGA0_XCD_RFINTERFACESW 0x874
+
+#define RFPGA0_XAB_RFPARAMETER 0x878
+#define RFPGA0_XCD_RFPARAMETER 0x87c
+
+#define RFPGA0_ANALOGPARAMETER1 0x880
+#define RFPGA0_ANALOGPARAMETER2 0x884
+#define RFPGA0_ANALOGPARAMETER3 0x888
+#define RFPGA0_ANALOGPARAMETER4 0x88c
+
+#define RFPGA0_XA_LSSIREADBACK 0x8a0
+#define RFPGA0_XB_LSSIREADBACK 0x8a4
+#define RFPGA0_XC_LSSIREADBACK 0x8a8
+#define RFPGA0_XD_LSSIREADBACK 0x8ac
+
+#define RFPGA0_PSDREPORT 0x8b4
+#define TRANSCEIVEA_HSPI_READBACK 0x8b8
+#define TRANSCEIVEB_HSPI_READBACK 0x8bc
+#define REG_SC_CNT 0x8c4
+#define RFPGA0_XAB_RFINTERFACERB 0x8e0
+#define RFPGA0_XCD_RFINTERFACERB 0x8e4
+
+#define RFPGA1_RFMOD 0x900
+
+#define RFPGA1_TXBLOCK 0x904
+#define RFPGA1_DEBUGSELECT 0x908
+#define RFPGA1_TXINFO 0x90c
+
+#define RCCK0_SYSTEM 0xa00
+
+#define RCCK0_AFESETTING 0xa04
+#define RCCK0_CCA 0xa08
+
+#define RCCK0_RXAGC1 0xa0c
+#define RCCK0_RXAGC2 0xa10
+
+#define RCCK0_RXHP 0xa14
+
+#define RCCK0_DSPPARAMETER1 0xa18
+#define RCCK0_DSPPARAMETER2 0xa1c
+
+#define RCCK0_TXFILTER1 0xa20
+#define RCCK0_TXFILTER2 0xa24
+#define RCCK0_DEBUGPORT 0xa28
+#define RCCK0_FALSEALARMREPORT 0xa2c
+#define RCCK0_TRSSIREPORT 0xa50
+#define RCCK0_RXREPORT 0xa54
+#define RCCK0_FACOUNTERLOWER 0xa5c
+#define RCCK0_FACOUNTERUPPER 0xa58
+#define RCCK0_CCA_CNT 0xa60
+
+
+/* PageB(0xB00) */
+#define RPDP_ANTA 0xb00
+#define RPDP_ANTA_4 0xb04
+#define RPDP_ANTA_8 0xb08
+#define RPDP_ANTA_C 0xb0c
+#define RPDP_ANTA_10 0xb10
+#define RPDP_ANTA_14 0xb14
+#define RPDP_ANTA_18 0xb18
+#define RPDP_ANTA_1C 0xb1c
+#define RPDP_ANTA_20 0xb20
+#define RPDP_ANTA_24 0xb24
+
+#define RCONFIG_PMPD_ANTA 0xb28
+#define CONFIG_RAM64X16 0xb2c
+
+#define RBNDA 0xb30
+#define RHSSIPAR 0xb34
+
+#define RCONFIG_ANTA 0xb68
+#define RCONFIG_ANTB 0xb6c
+
+#define RPDP_ANTB 0xb70
+#define RPDP_ANTB_4 0xb74
+#define RPDP_ANTB_8 0xb78
+#define RPDP_ANTB_C 0xb7c
+#define RPDP_ANTB_10 0xb80
+#define RPDP_ANTB_14 0xb84
+#define RPDP_ANTB_18 0xb88
+#define RPDP_ANTB_1C 0xb8c
+#define RPDP_ANTB_20 0xb90
+#define RPDP_ANTB_24 0xb94
+
+#define RCONFIG_PMPD_ANTB 0xb98
+
+#define RBNDB 0xba0
+
+#define RAPK 0xbd8
+#define RPM_RX0_ANTA 0xbdc
+#define RPM_RX1_ANTA 0xbe0
+#define RPM_RX2_ANTA 0xbe4
+#define RPM_RX3_ANTA 0xbe8
+#define RPM_RX0_ANTB 0xbec
+#define RPM_RX1_ANTB 0xbf0
+#define RPM_RX2_ANTB 0xbf4
+#define RPM_RX3_ANTB 0xbf8
+
+/*Page C*/
+#define ROFDM0_LSTF 0xc00
+
+#define ROFDM0_TRXPATHENABLE 0xc04
+#define ROFDM0_TRMUXPAR 0xc08
+#define ROFDM0_TRSWISOLATION 0xc0c
+
+#define ROFDM0_XARXAFE 0xc10
+#define ROFDM0_XARXIQIMBALANCE 0xc14
+#define ROFDM0_XBRXAFE 0xc18
+#define ROFDM0_XBRXIQIMBALANCE 0xc1c
+#define ROFDM0_XCRXAFE 0xc20
+#define ROFDM0_XCRXIQIMBANLANCE 0xc24
+#define ROFDM0_XDRXAFE 0xc28
+#define ROFDM0_XDRXIQIMBALANCE 0xc2c
+
+#define ROFDM0_RXDETECTOR1 0xc30
+#define ROFDM0_RXDETECTOR2 0xc34
+#define ROFDM0_RXDETECTOR3 0xc38
+#define ROFDM0_RXDETECTOR4 0xc3c
+
+#define ROFDM0_RXDSP 0xc40
+#define ROFDM0_CFOANDDAGC 0xc44
+#define ROFDM0_CCADROPTHRESHOLD 0xc48
+#define ROFDM0_ECCATHRESHOLD 0xc4c
+
+#define ROFDM0_XAAGCCORE1 0xc50
+#define ROFDM0_XAAGCCORE2 0xc54
+#define ROFDM0_XBAGCCORE1 0xc58
+#define ROFDM0_XBAGCCORE2 0xc5c
+#define ROFDM0_XCAGCCORE1 0xc60
+#define ROFDM0_XCAGCCORE2 0xc64
+#define ROFDM0_XDAGCCORE1 0xc68
+#define ROFDM0_XDAGCCORE2 0xc6c
+
+#define ROFDM0_AGCPARAMETER1 0xc70
+#define ROFDM0_AGCPARAMETER2 0xc74
+#define ROFDM0_AGCRSSITABLE 0xc78
+#define ROFDM0_HTSTFAGC 0xc7c
+
+#define ROFDM0_XATXIQIMBALANCE 0xc80
+#define ROFDM0_XATXAFE 0xc84
+#define ROFDM0_XBTXIQIMBALANCE 0xc88
+#define ROFDM0_XBTXAFE 0xc8c
+#define ROFDM0_XCTXIQIMBALANCE 0xc90
+#define ROFDM0_XCTXAFE 0xc94
+#define ROFDM0_XDTXIQIMBALANCE 0xc98
+#define ROFDM0_XDTXAFE 0xc9c
+
+#define ROFDM0_RXIQEXTANTA 0xca0
+#define ROFDM0_TXCOEFF1 0xca4
+#define ROFDM0_TXCOEFF2 0xca8
+#define ROFDM0_TXCOEFF3 0xcac
+#define ROFDM0_TXCOEFF4 0xcb0
+#define ROFDM0_TXCOEFF5 0xcb4
+#define ROFDM0_TXCOEFF6 0xcb8
+
+#define ROFDM0_RXHPPARAMETER 0xce0
+#define ROFDM0_TXPSEUDONOISEWGT 0xce4
+#define ROFDM0_FRAMESYNC 0xcf0
+#define ROFDM0_DFSREPORT 0xcf4
+
+
+#define ROFDM1_LSTF 0xd00
+#define ROFDM1_TRXPATHENABLE 0xd04
+
+#define ROFDM1_CF0 0xd08
+#define ROFDM1_CSI1 0xd10
+#define ROFDM1_SBD 0xd14
+#define ROFDM1_CSI2 0xd18
+#define ROFDM1_CFOTRACKING 0xd2c
+#define ROFDM1_TRXMESAURE1 0xd34
+#define ROFDM1_INTFDET 0xd3c
+#define ROFDM1_PSEUDONOISESTATEAB 0xd50
+#define ROFDM1_PSEUDONOISESTATECD 0xd54
+#define ROFDM1_RXPSEUDONOISEWGT 0xd58
+
+#define ROFDM_PHYCOUNTER1 0xda0
+#define ROFDM_PHYCOUNTER2 0xda4
+#define ROFDM_PHYCOUNTER3 0xda8
+
+#define ROFDM_SHORTCFOAB 0xdac
+#define ROFDM_SHORTCFOCD 0xdb0
+#define ROFDM_LONGCFOAB 0xdb4
+#define ROFDM_LONGCFOCD 0xdb8
+#define ROFDM_TAILCF0AB 0xdbc
+#define ROFDM_TAILCF0CD 0xdc0
+#define ROFDM_PWMEASURE1 0xdc4
+#define ROFDM_PWMEASURE2 0xdc8
+#define ROFDM_BWREPORT 0xdcc
+#define ROFDM_AGCREPORT 0xdd0
+#define ROFDM_RXSNR 0xdd4
+#define ROFDM_RXEVMCSI 0xdd8
+#define ROFDM_SIGREPORT 0xddc
+
+#define RTXAGC_A_RATE18_06 0xe00
+#define RTXAGC_A_RATE54_24 0xe04
+#define RTXAGC_A_CCK1_MCS32 0xe08
+#define RTXAGC_A_MCS03_MCS00 0xe10
+#define RTXAGC_A_MCS07_MCS04 0xe14
+#define RTXAGC_A_MCS11_MCS08 0xe18
+#define RTXAGC_A_MCS15_MCS12 0xe1c
+
+#define RTXAGC_B_RATE18_06 0x830
+#define RTXAGC_B_RATE54_24 0x834
+#define RTXAGC_B_CCK1_55_MCS32 0x838
+#define RTXAGC_B_MCS03_MCS00 0x83c
+#define RTXAGC_B_MCS07_MCS04 0x848
+#define RTXAGC_B_MCS11_MCS08 0x84c
+#define RTXAGC_B_MCS15_MCS12 0x868
+#define RTXAGC_B_CCK11_A_CCK2_11 0x86c
+
+#define RFPGA0_IQK 0xe28
+#define RTX_IQK_TONE_A 0xe30
+#define RRX_IQK_TONE_A 0xe34
+#define RTX_IQK_PI_A 0xe38
+#define RRX_IQK_PI_A 0xe3c
+
+#define RTX_IQK 0xe40
+#define RRX_IQK 0xe44
+#define RIQK_AGC_PTS 0xe48
+#define RIQK_AGC_RSP 0xe4c
+#define RTX_IQK_TONE_B 0xe50
+#define RRX_IQK_TONE_B 0xe54
+#define RTX_IQK_PI_B 0xe58
+#define RRX_IQK_PI_B 0xe5c
+#define RIQK_AGC_CONT 0xe60
+
+#define RBLUE_TOOTH 0xe6c
+#define RRX_WAIT_CCA 0xe70
+#define RTX_CCK_RFON 0xe74
+#define RTX_CCK_BBON 0xe78
+#define RTX_OFDM_RFON 0xe7c
+#define RTX_OFDM_BBON 0xe80
+#define RTX_TO_RX 0xe84
+#define RTX_TO_TX 0xe88
+#define RRX_CCK 0xe8c
+
+#define RTX_POWER_BEFORE_IQK_A 0xe94
+#define RTX_POWER_AFTER_IQK_A 0xe9c
+
+#define RRX_POWER_BEFORE_IQK_A 0xea0
+#define RRX_POWER_BEFORE_IQK_A_2 0xea4
+#define RRX_POWER_AFTER_IQK_A 0xea8
+#define RRX_POWER_AFTER_IQK_A_2 0xeac
+
+#define RTX_POWER_BEFORE_IQK_B 0xeb4
+#define RTX_POWER_AFTER_IQK_B 0xebc
+
+#define RRX_POWER_BEFORE_IQK_B 0xec0
+#define RRX_POWER_BEFORE_IQK_B_2 0xec4
+#define RRX_POWER_AFTER_IQK_B 0xec8
+#define RRX_POWER_AFTER_IQK_B_2 0xecc
+
+#define RRX_OFDM 0xed0
+#define RRX_WAIT_RIFS 0xed4
+#define RRX_TO_RX 0xed8
+#define RSTANDBY 0xedc
+#define RSLEEP 0xee0
+#define RPMPD_ANAEN 0xeec
+
+#define RZEBRA1_HSSIENABLE 0x0
+#define RZEBRA1_TRXENABLE1 0x1
+#define RZEBRA1_TRXENABLE2 0x2
+#define RZEBRA1_AGC 0x4
+#define RZEBRA1_CHARGEPUMP 0x5
+#define RZEBRA1_CHANNEL 0x7
+
+#define RZEBRA1_TXGAIN 0x8
+#define RZEBRA1_TXLPF 0x9
+#define RZEBRA1_RXLPF 0xb
+#define RZEBRA1_RXHPFCORNER 0xc
+
+#define RGLOBALCTRL 0
+#define RRTL8256_TXLPF 19
+#define RRTL8256_RXLPF 11
+#define RRTL8258_TXLPF 0x11
+#define RRTL8258_RXLPF 0x13
+#define RRTL8258_RSSILPF 0xa
+
+#define RF_AC 0x00
+
+#define RF_IQADJ_G1 0x01
+#define RF_IQADJ_G2 0x02
+#define RF_POW_TRSW 0x05
+
+#define RF_GAIN_RX 0x06
+#define RF_GAIN_TX 0x07
+
+#define RF_TXM_IDAC 0x08
+#define RF_BS_IQGEN 0x0F
+
+#define RF_MODE1 0x10
+#define RF_MODE2 0x11
+
+#define RF_RX_AGC_HP 0x12
+#define RF_TX_AGC 0x13
+#define RF_BIAS 0x14
+#define RF_IPA 0x15
+#define RF_POW_ABILITY 0x17
+#define RF_MODE_AG 0x18
+#define RRFCHANNEL 0x18
+#define RF_CHNLBW 0x18
+#define RF_TOP 0x19
+
+#define RF_RX_G1 0x1A
+#define RF_RX_G2 0x1B
+
+#define RF_RX_BB2 0x1C
+#define RF_RX_BB1 0x1D
+
+#define RF_RCK1 0x1E
+#define RF_RCK2 0x1F
+
+#define RF_TX_G1 0x20
+#define RF_TX_G2 0x21
+#define RF_TX_G3 0x22
+
+#define RF_TX_BB1 0x23
+#define RF_T_METER 0x42
+
+#define RF_SYN_G1 0x25
+#define RF_SYN_G2 0x26
+#define RF_SYN_G3 0x27
+#define RF_SYN_G4 0x28
+#define RF_SYN_G5 0x29
+#define RF_SYN_G6 0x2A
+#define RF_SYN_G7 0x2B
+#define RF_SYN_G8 0x2C
+
+#define RF_RCK_OS 0x30
+#define RF_TXPA_G1 0x31
+#define RF_TXPA_G2 0x32
+#define RF_TXPA_G3 0x33
+
+#define RF_TX_BIAS_A 0x35
+#define RF_TX_BIAS_D 0x36
+#define RF_LOBF_9 0x38
+#define RF_RXRF_A3 0x3C
+#define RF_TRSW 0x3F
+
+#define RF_TXRF_A2 0x41
+#define RF_TXPA_G4 0x46
+#define RF_TXPA_A4 0x4B
+
+#define RF_WE_LUT 0xEF
+
+#define BBBRESETB 0x100
+#define BGLOBALRESETB 0x200
+#define BOFDMTXSTART 0x4
+#define BCCKTXSTART 0x8
+#define BCRC32DEBUG 0x100
+#define BPMACLOOPBACK 0x10
+#define BTXLSIG 0xffffff
+#define BOFDMTXRATE 0xf
+#define BOFDMTXRESERVED 0x10
+#define BOFDMTXLENGTH 0x1ffe0
+#define BOFDMTXPARITY 0x20000
+#define BTXHTSIG1 0xffffff
+#define BTXHTMCSRATE 0x7f
+#define BTXHTBW 0x80
+#define BTXHTLENGTH 0xffff00
+#define BTXHTSIG2 0xffffff
+#define BTXHTSMOOTHING 0x1
+#define BTXHTSOUNDING 0x2
+#define BTXHTRESERVED 0x4
+#define BTXHTAGGREATION 0x8
+#define BTXHTSTBC 0x30
+#define BTXHTADVANCECODING 0x40
+#define BTXHTSHORTGI 0x80
+#define BTXHTNUMBERHT_LTF 0x300
+#define BTXHTCRC8 0x3fc00
+#define BCOUNTERRESET 0x10000
+#define BNUMOFOFDMTX 0xffff
+#define BNUMOFCCKTX 0xffff0000
+#define BTXIDLEINTERVAL 0xffff
+#define BOFDMSERVICE 0xffff0000
+#define BTXMACHEADER 0xffffffff
+#define BTXDATAINIT 0xff
+#define BTXHTMODE 0x100
+#define BTXDATATYPE 0x30000
+#define BTXRANDOMSEED 0xffffffff
+#define BCCKTXPREAMBLE 0x1
+#define BCCKTXSFD 0xffff0000
+#define BCCKTXSIG 0xff
+#define BCCKTXSERVICE 0xff00
+#define BCCKLENGTHEXT 0x8000
+#define BCCKTXLENGHT 0xffff0000
+#define BCCKTXCRC16 0xffff
+#define BCCKTXSTATUS 0x1
+#define BOFDMTXSTATUS 0x2
+#define IS_BB_REG_OFFSET_92S(_offset) \
+ ((_offset >= 0x800) && (_offset <= 0xfff))
+
+#define BRFMOD 0x1
+#define BJAPANMODE 0x2
+#define BCCKTXSC 0x30
+#define BCCKEN 0x1000000
+#define BOFDMEN 0x2000000
+
+#define BOFDMRXADCPHASE 0x10000
+#define BOFDMTXDACPHASE 0x40000
+#define BXATXAGC 0x3f
+
+#define BXBTXAGC 0xf00
+#define BXCTXAGC 0xf000
+#define BXDTXAGC 0xf0000
+
+#define BPASTART 0xf0000000
+#define BTRSTART 0x00f00000
+#define BRFSTART 0x0000f000
+#define BBBSTART 0x000000f0
+#define BBBCCKSTART 0x0000000f
+#define BPAEND 0xf
+#define BTREND 0x0f000000
+#define BRFEND 0x000f0000
+#define BCCAMASK 0x000000f0
+#define BR2RCCAMASK 0x00000f00
+#define BHSSI_R2TDELAY 0xf8000000
+#define BHSSI_T2RDELAY 0xf80000
+#define BCONTXHSSI 0x400
+#define BIGFROMCCK 0x200
+#define BAGCADDRESS 0x3f
+#define BRXHPTX 0x7000
+#define BRXHP2RX 0x38000
+#define BRXHPCCKINI 0xc0000
+#define BAGCTXCODE 0xc00000
+#define BAGCRXCODE 0x300000
+
+#define B3WIREDATALENGTH 0x800
+#define B3WIREADDREAALENGTH 0x400
+
+#define B3WIRERFPOWERDOWN 0x1
+#define B5GPAPEPOLARITY 0x40000000
+#define B2GPAPEPOLARITY 0x80000000
+#define BRFSW_TXDEFAULTANT 0x3
+#define BRFSW_TXOPTIONANT 0x30
+#define BRFSW_RXDEFAULTANT 0x300
+#define BRFSW_RXOPTIONANT 0x3000
+#define BRFSI_3WIREDATA 0x1
+#define BRFSI_3WIRECLOCK 0x2
+#define BRFSI_3WIRELOAD 0x4
+#define BRFSI_3WIRERW 0x8
+#define BRFSI_3WIRE 0xf
+
+#define BRFSI_RFENV 0x10
+
+#define BRFSI_TRSW 0x20
+#define BRFSI_TRSWB 0x40
+#define BRFSI_ANTSW 0x100
+#define BRFSI_ANTSWB 0x200
+#define BRFSI_PAPE 0x400
+#define BRFSI_PAPE5G 0x800
+#define BBANDSELECT 0x1
+#define BHTSIG2_GI 0x80
+#define BHTSIG2_SMOOTHING 0x01
+#define BHTSIG2_SOUNDING 0x02
+#define BHTSIG2_AGGREATON 0x08
+#define BHTSIG2_STBC 0x30
+#define BHTSIG2_ADVCODING 0x40
+#define BHTSIG2_NUMOFHTLTF 0x300
+#define BHTSIG2_CRC8 0x3fc
+#define BHTSIG1_MCS 0x7f
+#define BHTSIG1_BANDWIDTH 0x80
+#define BHTSIG1_HTLENGTH 0xffff
+#define BLSIG_RATE 0xf
+#define BLSIG_RESERVED 0x10
+#define BLSIG_LENGTH 0x1fffe
+#define BLSIG_PARITY 0x20
+#define BCCKRXPHASE 0x4
+
+#define BLSSIREADADDRESS 0x7f800000
+#define BLSSIREADEDGE 0x80000000
+
+#define BLSSIREADBACKDATA 0xfffff
+
+#define BLSSIREADOKFLAG 0x1000
+#define BCCKSAMPLERATE 0x8
+#define BREGULATOR0STANDBY 0x1
+#define BREGULATORPLLSTANDBY 0x2
+#define BREGULATOR1STANDBY 0x4
+#define BPLLPOWERUP 0x8
+#define BDPLLPOWERUP 0x10
+#define BDA10POWERUP 0x20
+#define BAD7POWERUP 0x200
+#define BDA6POWERUP 0x2000
+#define BXTALPOWERUP 0x4000
+#define B40MDCLKPOWERUP 0x8000
+#define BDA6DEBUGMODE 0x20000
+#define BDA6SWING 0x380000
+
+#define BADCLKPHASE 0x4000000
+#define B80MCLKDELAY 0x18000000
+#define BAFEWATCHDOGENABLE 0x20000000
+
+#define BXTALCAP01 0xc0000000
+#define BXTALCAP23 0x3
+#define BXTALCAP92X 0x0f000000
+#define BXTALCAP 0x0f000000
+
+#define BINTDIFCLKENABLE 0x400
+#define BEXTSIGCLKENABLE 0x800
+#define BBANDGAP_MBIAS_POWERUP 0x10000
+#define BAD11SH_GAIN 0xc0000
+#define BAD11NPUT_RANGE 0x700000
+#define BAD110P_CURRENT 0x3800000
+#define BLPATH_LOOPBACK 0x4000000
+#define BQPATH_LOOPBACK 0x8000000
+#define BAFE_LOOPBACK 0x10000000
+#define BDA10_SWING 0x7e0
+#define BDA10_REVERSE 0x800
+#define BDA_CLK_SOURCE 0x1000
+#define BDA7INPUT_RANGE 0x6000
+#define BDA7_GAIN 0x38000
+#define BDA7OUTPUT_CM_MODE 0x40000
+#define BDA7INPUT_CM_MODE 0x380000
+#define BDA7CURRENT 0xc00000
+#define BREGULATOR_ADJUST 0x7000000
+#define BAD11POWERUP_ATTX 0x1
+#define BDA10PS_ATTX 0x10
+#define BAD11POWERUP_ATRX 0x100
+#define BDA10PS_ATRX 0x1000
+#define BCCKRX_AGC_FORMAT 0x200
+#define BPSDFFT_SAMPLE_POINT 0xc000
+#define BPSD_AVERAGE_NUM 0x3000
+#define BIQPATH_CONTROL 0xc00
+#define BPSD_FREQ 0x3ff
+#define BPSD_ANTENNA_PATH 0x30
+#define BPSD_IQ_SWITCH 0x40
+#define BPSD_RX_TRIGGER 0x400000
+#define BPSD_TX_TRIGGER 0x80000000
+#define BPSD_SINE_TONE_SCALE 0x7f000000
+#define BPSD_REPORT 0xffff
+
+#define BOFDM_TXSC 0x30000000
+#define BCCK_TXON 0x1
+#define BOFDM_TXON 0x2
+#define BDEBUG_PAGE 0xfff
+#define BDEBUG_ITEM 0xff
+#define BANTL 0x10
+#define BANT_NONHT 0x100
+#define BANT_HT1 0x1000
+#define BANT_HT2 0x10000
+#define BANT_HT1S1 0x100000
+#define BANT_NONHTS1 0x1000000
+
+#define BCCK_BBMODE 0x3
+#define BCCK_TXPOWERSAVING 0x80
+#define BCCK_RXPOWERSAVING 0x40
+
+#define BCCK_SIDEBAND 0x10
+
+#define BCCK_SCRAMBLE 0x8
+#define BCCK_ANTDIVERSITY 0x8000
+#define BCCK_CARRIER_RECOVERY 0x4000
+#define BCCK_TXRATE 0x3000
+#define BCCK_DCCANCEL 0x0800
+#define BCCK_ISICANCEL 0x0400
+#define BCCK_MATCH_FILTER 0x0200
+#define BCCK_EQUALIZER 0x0100
+#define BCCK_PREAMBLE_DETECT 0x800000
+#define BCCK_FAST_FALSECCA 0x400000
+#define BCCK_CH_ESTSTART 0x300000
+#define BCCK_CCA_COUNT 0x080000
+#define BCCK_CS_LIM 0x070000
+#define BCCK_BIST_MODE 0x80000000
+#define BCCK_CCAMASK 0x40000000
+#define BCCK_TX_DAC_PHASE 0x4
+#define BCCK_RX_ADC_PHASE 0x20000000
+#define BCCKR_CP_MODE 0x0100
+#define BCCK_TXDC_OFFSET 0xf0
+#define BCCK_RXDC_OFFSET 0xf
+#define BCCK_CCA_MODE 0xc000
+#define BCCK_FALSECS_LIM 0x3f00
+#define BCCK_CS_RATIO 0xc00000
+#define BCCK_CORGBIT_SEL 0x300000
+#define BCCK_PD_LIM 0x0f0000
+#define BCCK_NEWCCA 0x80000000
+#define BCCK_RXHP_OF_IG 0x8000
+#define BCCK_RXIG 0x7f00
+#define BCCK_LNA_POLARITY 0x800000
+#define BCCK_RX1ST_BAIN 0x7f0000
+#define BCCK_RF_EXTEND 0x20000000
+#define BCCK_RXAGC_SATLEVEL 0x1f000000
+#define BCCK_RXAGC_SATCOUNT 0xe0
+#define BCCKRXRFSETTLE 0x1f
+#define BCCK_FIXED_RXAGC 0x8000
+#define BCCK_ANTENNA_POLARITY 0x2000
+#define BCCK_TXFILTER_TYPE 0x0c00
+#define BCCK_RXAGC_REPORTTYPE 0x0300
+#define BCCK_RXDAGC_EN 0x80000000
+#define BCCK_RXDAGC_PERIOD 0x20000000
+#define BCCK_RXDAGC_SATLEVEL 0x1f000000
+#define BCCK_TIMING_RECOVERY 0x800000
+#define BCCK_TXC0 0x3f0000
+#define BCCK_TXC1 0x3f000000
+#define BCCK_TXC2 0x3f
+#define BCCK_TXC3 0x3f00
+#define BCCK_TXC4 0x3f0000
+#define BCCK_TXC5 0x3f000000
+#define BCCK_TXC6 0x3f
+#define BCCK_TXC7 0x3f00
+#define BCCK_DEBUGPORT 0xff0000
+#define BCCK_DAC_DEBUG 0x0f000000
+#define BCCK_FALSEALARM_ENABLE 0x8000
+#define BCCK_FALSEALARM_READ 0x4000
+#define BCCK_TRSSI 0x7f
+#define BCCK_RXAGC_REPORT 0xfe
+#define BCCK_RXREPORT_ANTSEL 0x80000000
+#define BCCK_RXREPORT_MFOFF 0x40000000
+#define BCCK_RXREPORT_SQLOSS 0x20000000
+#define BCCK_RXREPORT_PKTLOSS 0x10000000
+#define BCCK_RXREPORT_LOCKEDBIT 0x08000000
+#define BCCK_RXREPORT_RATEERROR 0x04000000
+#define BCCK_RXREPORT_RXRATE 0x03000000
+#define BCCK_RXFA_COUNTER_LOWER 0xff
+#define BCCK_RXFA_COUNTER_UPPER 0xff000000
+#define BCCK_RXHPAGC_START 0xe000
+#define BCCK_RXHPAGC_FINAL 0x1c00
+#define BCCK_RXFALSEALARM_ENABLE 0x8000
+#define BCCK_FACOUNTER_FREEZE 0x4000
+#define BCCK_TXPATH_SEL 0x10000000
+#define BCCK_DEFAULT_RXPATH 0xc000000
+#define BCCK_OPTION_RXPATH 0x3000000
+
+#define BNUM_OFSTF 0x3
+#define BSHIFT_L 0xc0
+#define BGI_TH 0xc
+#define BRXPATH_A 0x1
+#define BRXPATH_B 0x2
+#define BRXPATH_C 0x4
+#define BRXPATH_D 0x8
+#define BTXPATH_A 0x1
+#define BTXPATH_B 0x2
+#define BTXPATH_C 0x4
+#define BTXPATH_D 0x8
+#define BTRSSI_FREQ 0x200
+#define BADC_BACKOFF 0x3000
+#define BDFIR_BACKOFF 0xc000
+#define BTRSSI_LATCH_PHASE 0x10000
+#define BRX_LDC_OFFSET 0xff
+#define BRX_QDC_OFFSET 0xff00
+#define BRX_DFIR_MODE 0x1800000
+#define BRX_DCNF_TYPE 0xe000000
+#define BRXIQIMB_A 0x3ff
+#define BRXIQIMB_B 0xfc00
+#define BRXIQIMB_C 0x3f0000
+#define BRXIQIMB_D 0xffc00000
+#define BDC_DC_NOTCH 0x60000
+#define BRXNB_NOTCH 0x1f000000
+#define BPD_TH 0xf
+#define BPD_TH_OPT2 0xc000
+#define BPWED_TH 0x700
+#define BIFMF_WIN_L 0x800
+#define BPD_OPTION 0x1000
+#define BMF_WIN_L 0xe000
+#define BBW_SEARCH_L 0x30000
+#define BWIN_ENH_L 0xc0000
+#define BBW_TH 0x700000
+#define BED_TH2 0x3800000
+#define BBW_OPTION 0x4000000
+#define BRADIO_TH 0x18000000
+#define BWINDOW_L 0xe0000000
+#define BSBD_OPTION 0x1
+#define BFRAME_TH 0x1c
+#define BFS_OPTION 0x60
+#define BDC_SLOPE_CHECK 0x80
+#define BFGUARD_COUNTER_DC_L 0xe00
+#define BFRAME_WEIGHT_SHORT 0x7000
+#define BSUB_TUNE 0xe00000
+#define BFRAME_DC_LENGTH 0xe000000
+#define BSBD_START_OFFSET 0x30000000
+#define BFRAME_TH_2 0x7
+#define BFRAME_GI2_TH 0x38
+#define BGI2_SYNC_EN 0x40
+#define BSARCH_SHORT_EARLY 0x300
+#define BSARCH_SHORT_LATE 0xc00
+#define BSARCH_GI2_LATE 0x70000
+#define BCFOANTSUM 0x1
+#define BCFOACC 0x2
+#define BCFOSTARTOFFSET 0xc
+#define BCFOLOOPBACK 0x70
+#define BCFOSUMWEIGHT 0x80
+#define BDAGCENABLE 0x10000
+#define BTXIQIMB_A 0x3ff
+#define BTXIQIMB_b 0xfc00
+#define BTXIQIMB_C 0x3f0000
+#define BTXIQIMB_D 0xffc00000
+#define BTXIDCOFFSET 0xff
+#define BTXIQDCOFFSET 0xff00
+#define BTXDFIRMODE 0x10000
+#define BTXPESUDO_NOISEON 0x4000000
+#define BTXPESUDO_NOISE_A 0xff
+#define BTXPESUDO_NOISE_B 0xff00
+#define BTXPESUDO_NOISE_C 0xff0000
+#define BTXPESUDO_NOISE_D 0xff000000
+#define BCCA_DROPOPTION 0x20000
+#define BCCA_DROPTHRES 0xfff00000
+#define BEDCCA_H 0xf
+#define BEDCCA_L 0xf0
+#define BLAMBDA_ED 0x300
+#define BRX_INITIALGAIN 0x7f
+#define BRX_ANTDIV_EN 0x80
+#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00
+#define BRX_HIGHPOWER_FLOW 0x8000
+#define BRX_AGC_FREEZE_THRES 0xc0000
+#define BRX_FREEZESTEP_AGC1 0x300000
+#define BRX_FREEZESTEP_AGC2 0xc00000
+#define BRX_FREEZESTEP_AGC3 0x3000000
+#define BRX_FREEZESTEP_AGC0 0xc000000
+#define BRXRSSI_CMP_EN 0x10000000
+#define BRXQUICK_AGCEN 0x20000000
+#define BRXAGC_FREEZE_THRES_MODE 0x40000000
+#define BRX_OVERFLOW_CHECKTYPE 0x80000000
+#define BRX_AGCSHIFT 0x7f
+#define BTRSW_TRI_ONLY 0x80
+#define BPOWER_THRES 0x300
+#define BRXAGC_EN 0x1
+#define BRXAGC_TOGETHER_EN 0x2
+#define BRXAGC_MIN 0x4
+#define BRXHP_INI 0x7
+#define BRXHP_TRLNA 0x70
+#define BRXHP_RSSI 0x700
+#define BRXHP_BBP1 0x7000
+#define BRXHP_BBP2 0x70000
+#define BRXHP_BBP3 0x700000
+#define BRSSI_H 0x7f0000
+#define BRSSI_GEN 0x7f000000
+#define BRXSETTLE_TRSW 0x7
+#define BRXSETTLE_LNA 0x38
+#define BRXSETTLE_RSSI 0x1c0
+#define BRXSETTLE_BBP 0xe00
+#define BRXSETTLE_RXHP 0x7000
+#define BRXSETTLE_ANTSW_RSSI 0x38000
+#define BRXSETTLE_ANTSW 0xc0000
+#define BRXPROCESS_TIME_DAGC 0x300000
+#define BRXSETTLE_HSSI 0x400000
+#define BRXPROCESS_TIME_BBPPW 0x800000
+#define BRXANTENNA_POWER_SHIFT 0x3000000
+#define BRSSI_TABLE_SELECT 0xc000000
+#define BRXHP_FINAL 0x7000000
+#define BRXHPSETTLE_BBP 0x7
+#define BRXHTSETTLE_HSSI 0x8
+#define BRXHTSETTLE_RXHP 0x70
+#define BRXHTSETTLE_BBPPW 0x80
+#define BRXHTSETTLE_IDLE 0x300
+#define BRXHTSETTLE_RESERVED 0x1c00
+#define BRXHT_RXHP_EN 0x8000
+#define BRXAGC_FREEZE_THRES 0x30000
+#define BRXAGC_TOGETHEREN 0x40000
+#define BRXHTAGC_MIN 0x80000
+#define BRXHTAGC_EN 0x100000
+#define BRXHTDAGC_EN 0x200000
+#define BRXHT_RXHP_BBP 0x1c00000
+#define BRXHT_RXHP_FINAL 0xe0000000
+#define BRXPW_RADIO_TH 0x3
+#define BRXPW_RADIO_EN 0x4
+#define BRXMF_HOLD 0x3800
+#define BRXPD_DELAY_TH1 0x38
+#define BRXPD_DELAY_TH2 0x1c0
+#define BRXPD_DC_COUNT_MAX 0x600
+#define BRXPD_DELAY_TH 0x8000
+#define BRXPROCESS_DELAY 0xf0000
+#define BRXSEARCHRANGE_GI2_EARLY 0x700000
+#define BRXFRAME_FUARD_COUNTER_L 0x3800000
+#define BRXSGI_GUARD_L 0xc000000
+#define BRXSGI_SEARCH_L 0x30000000
+#define BRXSGI_TH 0xc0000000
+#define BDFSCNT0 0xff
+#define BDFSCNT1 0xff00
+#define BDFSFLAG 0xf0000
+#define BMF_WEIGHT_SUM 0x300000
+#define BMINIDX_TH 0x7f000000
+#define BDAFORMAT 0x40000
+#define BTXCH_EMU_ENABLE 0x01000000
+#define BTRSW_ISOLATION_A 0x7f
+#define BTRSW_ISOLATION_B 0x7f00
+#define BTRSW_ISOLATION_C 0x7f0000
+#define BTRSW_ISOLATION_D 0x7f000000
+#define BEXT_LNA_GAIN 0x7c00
+
+#define BSTBC_EN 0x4
+#define BANTENNA_MAPPING 0x10
+#define BNSS 0x20
+#define BCFO_ANTSUM_ID 0x200
+#define BPHY_COUNTER_RESET 0x8000000
+#define BCFO_REPORT_GET 0x4000000
+#define BOFDM_CONTINUE_TX 0x10000000
+#define BOFDM_SINGLE_CARRIER 0x20000000
+#define BOFDM_SINGLE_TONE 0x40000000
+#define BHT_DETECT 0x100
+#define BCFOEN 0x10000
+#define BCFOVALUE 0xfff00000
+#define BSIGTONE_RE 0x3f
+#define BSIGTONE_IM 0x7f00
+#define BCOUNTER_CCA 0xffff
+#define BCOUNTER_PARITYFAIL 0xffff0000
+#define BCOUNTER_RATEILLEGAL 0xffff
+#define BCOUNTER_CRC8FAIL 0xffff0000
+#define BCOUNTER_MCSNOSUPPORT 0xffff
+#define BCOUNTER_FASTSYNC 0xffff
+#define BSHORTCFO 0xfff
+#define BSHORTCFOT_LENGTH 12
+#define BSHORTCFOF_LENGTH 11
+#define BLONGCFO 0x7ff
+#define BLONGCFOT_LENGTH 11
+#define BLONGCFOF_LENGTH 11
+#define BTAILCFO 0x1fff
+#define BTAILCFOT_LENGTH 13
+#define BTAILCFOF_LENGTH 12
+#define BNOISE_EN_PWDB 0xffff
+#define BCC_POWER_DB 0xffff0000
+#define BMOISE_PWDB 0xffff
+#define BPOWERMEAST_LENGTH 10
+#define BPOWERMEASF_LENGTH 3
+#define BRX_HT_BW 0x1
+#define BRXSC 0x6
+#define BRX_HT 0x8
+#define BNB_INTF_DET_ON 0x1
+#define BINTF_WIN_LEN_CFG 0x30
+#define BNB_INTF_TH_CFG 0x1c0
+#define BRFGAIN 0x3f
+#define BTABLESEL 0x40
+#define BTRSW 0x80
+#define BRXSNR_A 0xff
+#define BRXSNR_B 0xff00
+#define BRXSNR_C 0xff0000
+#define BRXSNR_D 0xff000000
+#define BSNR_EVMT_LENGTH 8
+#define BSNR_EVMF_LENGTH 1
+#define BCSI1ST 0xff
+#define BCSI2ND 0xff00
+#define BRXEVM1ST 0xff0000
+#define BRXEVM2ND 0xff000000
+#define BSIGEVM 0xff
+#define BPWDB 0xff00
+#define BSGIEN 0x10000
+
+#define BSFACTOR_QMA1 0xf
+#define BSFACTOR_QMA2 0xf0
+#define BSFACTOR_QMA3 0xf00
+#define BSFACTOR_QMA4 0xf000
+#define BSFACTOR_QMA5 0xf0000
+#define BSFACTOR_QMA6 0xf0000
+#define BSFACTOR_QMA7 0xf00000
+#define BSFACTOR_QMA8 0xf000000
+#define BSFACTOR_QMA9 0xf0000000
+#define BCSI_SCHEME 0x100000
+
+#define BNOISE_LVL_TOP_SET 0x3
+#define BCHSMOOTH 0x4
+#define BCHSMOOTH_CFG1 0x38
+#define BCHSMOOTH_CFG2 0x1c0
+#define BCHSMOOTH_CFG3 0xe00
+#define BCHSMOOTH_CFG4 0x7000
+#define BMRCMODE 0x800000
+#define BTHEVMCFG 0x7000000
+
+#define BLOOP_FIT_TYPE 0x1
+#define BUPD_CFO 0x40
+#define BUPD_CFO_OFFDATA 0x80
+#define BADV_UPD_CFO 0x100
+#define BADV_TIME_CTRL 0x800
+#define BUPD_CLKO 0x1000
+#define BFC 0x6000
+#define BTRACKING_MODE 0x8000
+#define BPHCMP_ENABLE 0x10000
+#define BUPD_CLKO_LTF 0x20000
+#define BCOM_CH_CFO 0x40000
+#define BCSI_ESTI_MODE 0x80000
+#define BADV_UPD_EQZ 0x100000
+#define BUCHCFG 0x7000000
+#define BUPDEQZ 0x8000000
+
+#define BRX_PESUDO_NOISE_ON 0x20000000
+#define BRX_PESUDO_NOISE_A 0xff
+#define BRX_PESUDO_NOISE_B 0xff00
+#define BRX_PESUDO_NOISE_C 0xff0000
+#define BRX_PESUDO_NOISE_D 0xff000000
+#define BRX_PESUDO_NOISESTATE_A 0xffff
+#define BRX_PESUDO_NOISESTATE_B 0xffff0000
+#define BRX_PESUDO_NOISESTATE_C 0xffff
+#define BRX_PESUDO_NOISESTATE_D 0xffff0000
+
+#define BZEBRA1_HSSIENABLE 0x8
+#define BZEBRA1_TRXCONTROL 0xc00
+#define BZEBRA1_TRXGAINSETTING 0x07f
+#define BZEBRA1_RXCOUNTER 0xc00
+#define BZEBRA1_TXCHANGEPUMP 0x38
+#define BZEBRA1_RXCHANGEPUMP 0x7
+#define BZEBRA1_CHANNEL_NUM 0xf80
+#define BZEBRA1_TXLPFBW 0x400
+#define BZEBRA1_RXLPFBW 0x600
+
+#define BRTL8256REG_MODE_CTRL1 0x100
+#define BRTL8256REG_MODE_CTRL0 0x40
+#define BRTL8256REG_TXLPFBW 0x18
+#define BRTL8256REG_RXLPFBW 0x600
+
+#define BRTL8258_TXLPFBW 0xc
+#define BRTL8258_RXLPFBW 0xc00
+#define BRTL8258_RSSILPFBW 0xc0
+
+#define BBYTE0 0x1
+#define BBYTE1 0x2
+#define BBYTE2 0x4
+#define BBYTE3 0x8
+#define BWORD0 0x3
+#define BWORD1 0xc
+#define BWORD 0xf
+
+#define BENABLE 0x1
+#define BDISABLE 0x0
+
+#define LEFT_ANTENNA 0x0
+#define RIGHT_ANTENNA 0x1
+
+#define TCHECK_TXSTATUS 500
+#define TUPDATE_RXCOUNTER 100
+
+#define REG_UN_used_register 0x01bf
+
+/* WOL bit information */
+#define HAL92C_WOL_PTK_UPDATE_EVENT BIT(0)
+#define HAL92C_WOL_GTK_UPDATE_EVENT BIT(1)
+#define HAL92C_WOL_DISASSOC_EVENT BIT(2)
+#define HAL92C_WOL_DEAUTH_EVENT BIT(3)
+#define HAL92C_WOL_FW_DISCONNECT_EVENT BIT(4)
+
+#define WOL_REASON_PTK_UPDATE BIT(0)
+#define WOL_REASON_GTK_UPDATE BIT(1)
+#define WOL_REASON_DISASSOC BIT(2)
+#define WOL_REASON_DEAUTH BIT(3)
+#define WOL_REASON_FW_DISCONNECT BIT(4)
+
+/* 2 EFUSE_TEST (For RTL8723 partially) */
+#define EFUSE_SEL(x) (((x) & 0x3) << 8)
+#define EFUSE_SEL_MASK 0x300
+#define EFUSE_WIFI_SEL_0 0x0
+
+#define WL_HWPDN_EN BIT(0) /* Enable GPIO[9] as WiFi HW PDn source*/
+#define WL_HWPDN_SL BIT(1) /* WiFi HW PDn polarity control*/
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/rtlwifi/rtl8723be/rf.c
new file mode 100644
index 000000000000..486294930a7b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/rf.c
@@ -0,0 +1,504 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+
+static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
+
+void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ switch (bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+ 0xfffff3ff) | BIT(10) | BIT(11));
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[0]);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+ 0xfffff3ff) | BIT(10));
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[0]);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+ "unknown bandwidth: %#X\n", bandwidth);
+ break;
+ }
+}
+
+void rtl8723be_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 tx_agc[2] = {0, 0}, tmpval;
+ bool turbo_scanoff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+ u8 direction;
+ u32 pwrtrac_value;
+
+ if (rtlefuse->eeprom_regulatory != 0)
+ turbo_scanoff = true;
+
+ if (mac->act_scanning) {
+ tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+ tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+
+ if (turbo_scanoff) {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ }
+ } else {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ if (rtlefuse->eeprom_regulatory == 0) {
+ tmpval =
+ (rtlphy->mcs_offset[0][6]) +
+ (rtlphy->mcs_offset[0][7] << 8);
+ tx_agc[RF90_PATH_A] += tmpval;
+
+ tmpval = (rtlphy->mcs_offset[0][14]) +
+ (rtlphy->mcs_offset[0][15] <<
+ 24);
+ tx_agc[RF90_PATH_B] += tmpval;
+ }
+ }
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ ptr = (u8 *)(&(tx_agc[idx1]));
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+ rtl8723be_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value);
+ if (direction == 1) {
+ tx_agc[0] += pwrtrac_value;
+ tx_agc[1] += pwrtrac_value;
+ } else if (direction == 2) {
+ tx_agc[0] -= pwrtrac_value;
+ tx_agc[1] -= pwrtrac_value;
+ }
+ tmpval = tx_agc[RF90_PATH_A] & 0xff;
+ rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_A_CCK1_MCS32);
+
+ tmpval = tx_agc[RF90_PATH_A] >> 8;
+
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_A_CCK2_11);
+
+ tmpval = tx_agc[RF90_PATH_B] >> 24;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_A_CCK2_11);
+
+ tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK1_55_MCS32);
+}
+
+static void rtl8723be_phy_get_power_base(struct ieee80211_hw *hw,
+ u8 *ppowerlevel_ofdm,
+ u8 *ppowerlevel_bw20,
+ u8 *ppowerlevel_bw40,
+ u8 channel, u32 *ofdmbase,
+ u32 *mcsbase)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 powerbase0, powerbase1;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerbase0 = ppowerlevel_ofdm[i];
+
+ powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) |
+ (powerbase0 << 8) | powerbase0;
+ *(ofdmbase + i) = powerbase0;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ " [OFDM power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(ofdmbase + i));
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20)
+ powerlevel[i] = ppowerlevel_bw20[i];
+ else
+ powerlevel[i] = ppowerlevel_bw40[i];
+ powerbase1 = powerlevel[i];
+ powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) |
+ (powerbase1 << 8) | powerbase1;
+
+ *(mcsbase + i) = powerbase1;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ " [MCS power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(mcsbase + i));
+ }
+}
+
+static void txpwr_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index,
+ u32 *powerbase0, u32 *powerbase1,
+ u32 *p_outwriteval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 i, chnlgroup = 0, pwr_diff_limit[4];
+ u8 pwr_diff = 0, customer_pwr_diff;
+ u32 writeval, customer_limit, rf;
+
+ for (rf = 0; rf < 2; rf++) {
+ switch (rtlefuse->eeprom_regulatory) {
+ case 0:
+ chnlgroup = 0;
+
+ writeval =
+ rtlphy->mcs_offset[chnlgroup][index + (rf ? 8 : 0)]
+ + ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "RTK better performance, "
+ "writeval(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval);
+ break;
+ case 1:
+ if (rtlphy->pwrgroup_cnt == 1) {
+ chnlgroup = 0;
+ } else {
+ if (channel < 3)
+ chnlgroup = 0;
+ else if (channel < 6)
+ chnlgroup = 1;
+ else if (channel < 9)
+ chnlgroup = 2;
+ else if (channel < 12)
+ chnlgroup = 3;
+ else if (channel < 14)
+ chnlgroup = 4;
+ else if (channel == 14)
+ chnlgroup = 5;
+ }
+ writeval = rtlphy->mcs_offset[chnlgroup]
+ [index + (rf ? 8 : 0)] + ((index < 2) ?
+ powerbase0[rf] :
+ powerbase1[rf]);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Realtek regulatory, 20MHz, "
+ "writeval(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval);
+
+ break;
+ case 2:
+ writeval =
+ ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Better regulatory, "
+ "writeval(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval);
+ break;
+ case 3:
+ chnlgroup = 0;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "customer's limit, 40MHz "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht40[rf]
+ [channel-1]);
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "customer's limit, 20MHz "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht20[rf]
+ [channel-1]);
+ }
+
+ if (index < 2)
+ pwr_diff =
+ rtlefuse->txpwr_legacyhtdiff[rf][channel-1];
+ else if (rtlphy->current_chan_bw ==
+ HT_CHANNEL_WIDTH_20)
+ pwr_diff =
+ rtlefuse->txpwr_ht20diff[rf][channel-1];
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+ customer_pwr_diff =
+ rtlefuse->pwrgroup_ht40[rf][channel-1];
+ else
+ customer_pwr_diff =
+ rtlefuse->pwrgroup_ht20[rf][channel-1];
+
+ if (pwr_diff > customer_pwr_diff)
+ pwr_diff = 0;
+ else
+ pwr_diff = customer_pwr_diff - pwr_diff;
+
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] =
+ (u8)((rtlphy->mcs_offset
+ [chnlgroup][index + (rf ? 8 : 0)] &
+ (0x7f << (i * 8))) >> (i * 8));
+
+ if (pwr_diff_limit[i] > pwr_diff)
+ pwr_diff_limit[i] = pwr_diff;
+ }
+
+ customer_limit = (pwr_diff_limit[3] << 24) |
+ (pwr_diff_limit[2] << 16) |
+ (pwr_diff_limit[1] << 8) |
+ (pwr_diff_limit[0]);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Customer's limit rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), customer_limit);
+
+ writeval = customer_limit + ((index < 2) ?
+ powerbase0[rf] :
+ powerbase1[rf]);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Customer, writeval rf(%c)= 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval);
+ break;
+ default:
+ chnlgroup = 0;
+ writeval =
+ rtlphy->mcs_offset[chnlgroup]
+ [index + (rf ? 8 : 0)]
+ + ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "RTK better performance, writeval "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval);
+ break;
+ }
+
+ if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+ writeval = writeval - 0x06060606;
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_BT2)
+ writeval = writeval - 0x0c0c0c0c;
+ *(p_outwriteval + rf) = writeval;
+ }
+}
+
+static void _rtl8723be_write_ofdm_power_reg(struct ieee80211_hw *hw,
+ u8 index, u32 *value)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 regoffset_a[6] = {
+ RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
+ RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+ RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+ };
+ u16 regoffset_b[6] = {
+ RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
+ RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+ RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+ };
+ u8 i, rf, pwr_val[4];
+ u32 writeval;
+ u16 regoffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeval = value[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8) ((writeval & (0x7f <<
+ (i * 8))) >> (i * 8));
+
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ (pwr_val[1] << 8) | pwr_val[0];
+
+ if (rf == 0)
+ regoffset = regoffset_a[index];
+ else
+ regoffset = regoffset_b[index];
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Set 0x%x = %08x\n", regoffset, writeval);
+ }
+}
+
+void rtl8723be_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel_ofdm,
+ u8 *ppowerlevel_bw20,
+ u8 *ppowerlevel_bw40, u8 channel)
+{
+ u32 writeval[2], powerbase0[2], powerbase1[2];
+ u8 index;
+ u8 direction;
+ u32 pwrtrac_value;
+
+ rtl8723be_phy_get_power_base(hw, ppowerlevel_ofdm, ppowerlevel_bw20,
+ ppowerlevel_bw40, channel,
+ &powerbase0[0], &powerbase1[0]);
+
+ rtl8723be_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value);
+
+ for (index = 0; index < 6; index++) {
+ txpwr_by_regulatory(hw, channel, index, &powerbase0[0],
+ &powerbase1[0], &writeval[0]);
+ if (direction == 1) {
+ writeval[0] += pwrtrac_value;
+ writeval[1] += pwrtrac_value;
+ } else if (direction == 2) {
+ writeval[0] -= pwrtrac_value;
+ writeval[1] -= pwrtrac_value;
+ }
+ _rtl8723be_write_ofdm_power_reg(hw, index, &writeval[0]);
+ }
+}
+
+bool rtl8723be_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (rtlphy->rf_type == RF_1T1R)
+ rtlphy->num_total_rfpath = 1;
+ else
+ rtlphy->num_total_rfpath = 2;
+
+ return _rtl8723be_phy_rf6052_config_parafile(hw);
+}
+
+static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg;
+ u32 u4_regvalue = 0;
+ u8 rfpath;
+ bool rtstatus = true;
+
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16);
+ break;
+ }
+
+ rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
+
+ rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
+
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
+ B3WIREADDREAALENGTH, 0x0);
+ udelay(1);
+
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ rtstatus = rtl8723be_phy_config_rf_with_headerfile(hw,
+ (enum radio_path)rfpath);
+ break;
+ case RF90_PATH_B:
+ rtstatus = rtl8723be_phy_config_rf_with_headerfile(hw,
+ (enum radio_path)rfpath);
+ break;
+ case RF90_PATH_C:
+ break;
+ case RF90_PATH_D:
+ break;
+ }
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ rtl_set_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV, u4_regvalue);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ rtl_set_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16, u4_regvalue);
+ break;
+ }
+
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!", rfpath);
+ return false;
+ }
+ }
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ return rtstatus;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h b/drivers/net/wireless/rtlwifi/rtl8723be/rf.h
new file mode 100644
index 000000000000..a6fea106ced4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/rf.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_RF_H__
+#define __RTL8723BE_RF_H__
+
+#define RF6052_MAX_TX_PWR 0x3F
+#define RF6052_MAX_REG 0x3F
+
+void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+ u8 bandwidth);
+void rtl8723be_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl8723be_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel_ofdm,
+ u8 *ppowerlevel_bw20,
+ u8 *ppowerlevel_bw40,
+ u8 channel);
+bool rtl8723be_phy_rf6052_config(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
new file mode 100644
index 000000000000..b4577ebc4bb0
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -0,0 +1,384 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "../rtl8723com/phy_common.h"
+#include "dm.h"
+#include "hw.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+#include "table.h"
+#include "../btcoexist/rtl_btc.h"
+
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ /*close ASPM for AMD defaultly */
+ rtlpci->const_amdpci_aspm = 0;
+
+ /* ASPM PS mode.
+ * 0 - Disable ASPM,
+ * 1 - Enable ASPM without Clock Req,
+ * 2 - Enable ASPM with Clock Req,
+ * 3 - Alwyas Enable ASPM with Clock Req,
+ * 4 - Always Enable ASPM without Clock Req.
+ * set defult to RTL8192CE:3 RTL8192E:2
+ */
+ rtlpci->const_pci_aspm = 3;
+
+ /*Setting for PCI-E device */
+ rtlpci->const_devicepci_aspm_setting = 0x03;
+
+ /*Setting for PCI-E bridge */
+ rtlpci->const_hostpci_aspm_setting = 0x02;
+
+ /* In Hw/Sw Radio Off situation.
+ * 0 - Default,
+ * 1 - From ASPM setting without low Mac Pwr,
+ * 2 - From ASPM setting with low Mac Pwr,
+ * 3 - Bus D3
+ * set default to RTL8192CE:0 RTL8192SE:2
+ */
+ rtlpci->const_hwsw_rfoff_d3 = 0;
+
+ /* This setting works for those device with
+ * backdoor ASPM setting such as EPHY setting.
+ * 0 - Not support ASPM,
+ * 1 - Support ASPM,
+ * 2 - According to chipset.
+ */
+ rtlpci->const_support_pciaspm = 1;
+}
+
+int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+{
+ int err = 0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ rtl8723be_bt_reg_init(hw);
+ rtlpci->msi_support = true;
+ rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
+
+ rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_flag = 0;
+ rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.thermalvalue = 0;
+ rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25);
+
+ mac->ht_enable = true;
+
+ /* compatible 5G band 88ce just 2.4G band & smsp */
+ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
+ rtlpriv->rtlhal.bandset = BAND_ON_2_4G;
+ rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
+
+ rtlpci->receive_config = (RCR_APPFCS |
+ RCR_APP_MIC |
+ RCR_APP_ICV |
+ RCR_APP_PHYST_RXFF |
+ RCR_HTC_LOC_CTRL |
+ RCR_AMF |
+ RCR_ACF |
+ RCR_ADF |
+ RCR_AICV |
+ RCR_AB |
+ RCR_AM |
+ RCR_APM |
+ 0);
+
+ rtlpci->irq_mask[0] = (u32) (IMR_PSTIMEOUT |
+ IMR_HSISR_IND_ON_INT |
+ IMR_C2HCMD |
+ IMR_HIGHDOK |
+ IMR_MGNTDOK |
+ IMR_BKDOK |
+ IMR_BEDOK |
+ IMR_VIDOK |
+ IMR_VODOK |
+ IMR_RDU |
+ IMR_ROK |
+ 0);
+
+ rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0);
+
+ /* for debug level */
+ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
+ /* for LPS & IPS */
+ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ rtlpriv->psc.reg_fwctrl_lps = 3;
+ rtlpriv->psc.reg_max_lps_awakeintvl = 5;
+ /* for ASPM, you can close aspm through
+ * set const_support_pciaspm = 0
+ */
+ rtl8723be_init_aspm_vars(hw);
+
+ if (rtlpriv->psc.reg_fwctrl_lps == 1)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
+ else if (rtlpriv->psc.reg_fwctrl_lps == 2)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
+ else if (rtlpriv->psc.reg_fwctrl_lps == 3)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
+
+ /* for firmware buf */
+ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
+ if (!rtlpriv->rtlhal.pfirmware) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Can't alloc buffer for fw.\n");
+ return 1;
+ }
+
+ rtlpriv->max_fw_size = 0x8000;
+ pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl_fw_cb);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Failed to request firmware!\n");
+ return 1;
+ }
+ return 0;
+}
+
+void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->cfg->ops->get_btc_status())
+ rtlpriv->btcoexist.btc_ops->btc_halt_notify();
+ if (rtlpriv->rtlhal.pfirmware) {
+ vfree(rtlpriv->rtlhal.pfirmware);
+ rtlpriv->rtlhal.pfirmware = NULL;
+ }
+}
+
+/* get bt coexist status */
+bool rtl8723be_get_btc_status(void)
+{
+ return true;
+}
+
+static bool is_fw_header(struct rtl92c_firmware_header *hdr)
+{
+ return (hdr->signature & 0xfff0) == 0x5300;
+}
+
+static struct rtl_hal_ops rtl8723be_hal_ops = {
+ .init_sw_vars = rtl8723be_init_sw_vars,
+ .deinit_sw_vars = rtl8723be_deinit_sw_vars,
+ .read_eeprom_info = rtl8723be_read_eeprom_info,
+ .interrupt_recognized = rtl8723be_interrupt_recognized,
+ .hw_init = rtl8723be_hw_init,
+ .hw_disable = rtl8723be_card_disable,
+ .hw_suspend = rtl8723be_suspend,
+ .hw_resume = rtl8723be_resume,
+ .enable_interrupt = rtl8723be_enable_interrupt,
+ .disable_interrupt = rtl8723be_disable_interrupt,
+ .set_network_type = rtl8723be_set_network_type,
+ .set_chk_bssid = rtl8723be_set_check_bssid,
+ .set_qos = rtl8723be_set_qos,
+ .set_bcn_reg = rtl8723be_set_beacon_related_registers,
+ .set_bcn_intv = rtl8723be_set_beacon_interval,
+ .update_interrupt_mask = rtl8723be_update_interrupt_mask,
+ .get_hw_reg = rtl8723be_get_hw_reg,
+ .set_hw_reg = rtl8723be_set_hw_reg,
+ .update_rate_tbl = rtl8723be_update_hal_rate_tbl,
+ .fill_tx_desc = rtl8723be_tx_fill_desc,
+ .fill_tx_cmddesc = rtl8723be_tx_fill_cmddesc,
+ .query_rx_desc = rtl8723be_rx_query_desc,
+ .set_channel_access = rtl8723be_update_channel_access_setting,
+ .radio_onoff_checking = rtl8723be_gpio_radio_on_off_checking,
+ .set_bw_mode = rtl8723be_phy_set_bw_mode,
+ .switch_channel = rtl8723be_phy_sw_chnl,
+ .dm_watchdog = rtl8723be_dm_watchdog,
+ .scan_operation_backup = rtl8723be_phy_scan_operation_backup,
+ .set_rf_power_state = rtl8723be_phy_set_rf_power_state,
+ .led_control = rtl8723be_led_control,
+ .set_desc = rtl8723be_set_desc,
+ .get_desc = rtl8723be_get_desc,
+ .is_tx_desc_closed = rtl8723be_is_tx_desc_closed,
+ .tx_polling = rtl8723be_tx_polling,
+ .enable_hw_sec = rtl8723be_enable_hw_security_config,
+ .set_key = rtl8723be_set_key,
+ .init_sw_leds = rtl8723be_init_sw_leds,
+ .get_bbreg = rtl8723_phy_query_bb_reg,
+ .set_bbreg = rtl8723_phy_set_bb_reg,
+ .get_rfreg = rtl8723be_phy_query_rf_reg,
+ .set_rfreg = rtl8723be_phy_set_rf_reg,
+ .fill_h2c_cmd = rtl8723be_fill_h2c_cmd,
+ .get_btc_status = rtl8723be_get_btc_status,
+ .is_fw_header = is_fw_header,
+};
+
+static struct rtl_mod_params rtl8723be_mod_params = {
+ .sw_crypto = false,
+ .inactiveps = true,
+ .swctrl_lps = false,
+ .fwctrl_lps = true,
+ .debug = DBG_EMERG,
+};
+
+static struct rtl_hal_cfg rtl8723be_hal_cfg = {
+ .bar_id = 2,
+ .write_readback = true,
+ .name = "rtl8723be_pci",
+ .fw_name = "rtlwifi/rtl8723befw.bin",
+ .ops = &rtl8723be_hal_ops,
+ .mod_params = &rtl8723be_mod_params,
+ .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+ .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+ .maps[SYS_CLK] = REG_SYS_CLKR,
+ .maps[MAC_RCR_AM] = AM,
+ .maps[MAC_RCR_AB] = AB,
+ .maps[MAC_RCR_ACRC32] = ACRC32,
+ .maps[MAC_RCR_ACF] = ACF,
+ .maps[MAC_RCR_AAP] = AAP,
+
+ .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS,
+
+ .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+ .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_CLK] = 0,
+ .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+ .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+ .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+ .maps[EFUSE_ANA8M] = ANA8M,
+ .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+ .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+ .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+ .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
+
+ .maps[RWCAM] = REG_CAMCMD,
+ .maps[WCAMI] = REG_CAMWRITE,
+ .maps[RCAMO] = REG_CAMREAD,
+ .maps[CAMDBG] = REG_CAMDBG,
+ .maps[SECR] = REG_SECCFG,
+ .maps[SEC_CAM_NONE] = CAM_NONE,
+ .maps[SEC_CAM_WEP40] = CAM_WEP40,
+ .maps[SEC_CAM_TKIP] = CAM_TKIP,
+ .maps[SEC_CAM_AES] = CAM_AES,
+ .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+ .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+ .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+ .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+ .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+ .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+ .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+ .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+ .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+ .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+ .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+ .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+ .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+
+ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+ .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+ .maps[RTL_IMR_BCNINT] = IMR_BCNDMAINT0,
+ .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+ .maps[RTL_IMR_RDU] = IMR_RDU,
+ .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+ .maps[RTL_IMR_BDOK] = IMR_BCNDOK0,
+ .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+ .maps[RTL_IMR_TBDER] = IMR_TBDER,
+ .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+ .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+ .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+ .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+ .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+ .maps[RTL_IMR_VODOK] = IMR_VODOK,
+ .maps[RTL_IMR_ROK] = IMR_ROK,
+ .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER),
+
+ .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(rtl8723be_pci_id) = {
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb723, rtl8723be_hal_cfg)},
+ {},
+};
+
+MODULE_DEVICE_TABLE(pci, rtl8723be_pci_id);
+
+MODULE_AUTHOR("PageHe <page_he@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin");
+
+module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444);
+module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
+module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
+module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
+module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
+MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
+MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
+MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+
+static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+
+static struct pci_driver rtl8723be_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rtl8723be_pci_id,
+ .probe = rtl_pci_probe,
+ .remove = rtl_pci_disconnect,
+
+ .driver.pm = &rtlwifi_pm_ops,
+};
+
+module_pci_driver(rtl8723be_driver);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/rtlwifi/rtl8723be/sw.h
new file mode 100644
index 000000000000..a7b25e769950
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.h
@@ -0,0 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_SW_H__
+#define __RTL8723BE_SW_H__
+
+int rtl8723be_init_sw_vars(struct ieee80211_hw *hw);
+void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw);
+void rtl8723be_init_var_map(struct ieee80211_hw *hw);
+bool rtl8723be_get_btc_status(void);
+
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/rtlwifi/rtl8723be/table.c
new file mode 100644
index 000000000000..4b283cde042e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/table.c
@@ -0,0 +1,572 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on 2010/ 5/18, 1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "table.h"
+u32 RTL8723BEPHY_REG_1TARRAY[] = {
+ 0x800, 0x80040000,
+ 0x804, 0x00000003,
+ 0x808, 0x0000FC00,
+ 0x80C, 0x0000000A,
+ 0x810, 0x10001331,
+ 0x814, 0x020C3D10,
+ 0x818, 0x02200385,
+ 0x81C, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390204,
+ 0x828, 0x00000000,
+ 0x82C, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83C, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84C, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569A11A9,
+ 0x85C, 0x01000014,
+ 0x860, 0x66F60110,
+ 0x864, 0x061F0649,
+ 0x868, 0x00000000,
+ 0x86C, 0x27272700,
+ 0x870, 0x07000760,
+ 0x874, 0x25004000,
+ 0x878, 0x00000808,
+ 0x87C, 0x00000000,
+ 0x880, 0xB0000C1C,
+ 0x884, 0x00000001,
+ 0x888, 0x00000000,
+ 0x88C, 0xCCC000C0,
+ 0x890, 0x00000800,
+ 0x894, 0xFFFFFFFE,
+ 0x898, 0x40302010,
+ 0x89C, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90C, 0x81121111,
+ 0x910, 0x00000002,
+ 0x914, 0x00000201,
+ 0x948, 0x00000000,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x80FF000C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E7F120F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x1114D028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x1A1B0000,
+ 0xA24, 0x090E1317,
+ 0xA28, 0x00000204,
+ 0xA2C, 0x00D30000,
+ 0xA70, 0x101FBF00,
+ 0xA74, 0x00000007,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x21806490,
+ 0xB2C, 0x00000000,
+ 0xC00, 0x48071D40,
+ 0xC04, 0x03A05611,
+ 0xC08, 0x000000E4,
+ 0xC0C, 0x6C6C6C6C,
+ 0xC10, 0x08800000,
+ 0xC14, 0x40000100,
+ 0xC18, 0x08800000,
+ 0xC1C, 0x40000100,
+ 0xC20, 0x00000000,
+ 0xC24, 0x00000000,
+ 0xC28, 0x00000000,
+ 0xC2C, 0x00000000,
+ 0xC30, 0x69E9AC44,
+ 0xC34, 0x469652AF,
+ 0xC38, 0x49795994,
+ 0xC3C, 0x0A97971C,
+ 0xC40, 0x1F7C403F,
+ 0xC44, 0x000100B7,
+ 0xC48, 0xEC020107,
+ 0xC4C, 0x007F037F,
+ 0xC50, 0x69553420,
+ 0xC54, 0x43BC0094,
+ 0xC58, 0x00023169,
+ 0xC5C, 0x00250492,
+ 0xC60, 0x00000000,
+ 0xC64, 0x7112848B,
+ 0xC68, 0x47C00BFF,
+ 0xC6C, 0x00000036,
+ 0xC70, 0x2C7F000D,
+ 0xC74, 0x020610DB,
+ 0xC78, 0x0000001F,
+ 0xC7C, 0x00B91612,
+ 0xC80, 0x390000E4,
+ 0xC84, 0x20F60000,
+ 0xC88, 0x40000100,
+ 0xC8C, 0x20200000,
+ 0xC90, 0x00020E1A,
+ 0xC94, 0x00000000,
+ 0xC98, 0x00020E1A,
+ 0xC9C, 0x00007F7F,
+ 0xCA0, 0x00000000,
+ 0xCA4, 0x000300A0,
+ 0xCA8, 0x00000000,
+ 0xCAC, 0x00000000,
+ 0xCB0, 0x00000000,
+ 0xCB4, 0x00000000,
+ 0xCB8, 0x00000000,
+ 0xCBC, 0x28000000,
+ 0xCC0, 0x00000000,
+ 0xCC4, 0x00000000,
+ 0xCC8, 0x00000000,
+ 0xCCC, 0x00000000,
+ 0xCD0, 0x00000000,
+ 0xCD4, 0x00000000,
+ 0xCD8, 0x64B22427,
+ 0xCDC, 0x00766932,
+ 0xCE0, 0x00222222,
+ 0xCE4, 0x00000000,
+ 0xCE8, 0x37644302,
+ 0xCEC, 0x2F97D40C,
+ 0xD00, 0x00000740,
+ 0xD04, 0x40020401,
+ 0xD08, 0x0000907F,
+ 0xD0C, 0x20010201,
+ 0xD10, 0xA0633333,
+ 0xD14, 0x3333BC53,
+ 0xD18, 0x7A8F5B6F,
+ 0xD2C, 0xCC979975,
+ 0xD30, 0x00000000,
+ 0xD34, 0x80608000,
+ 0xD38, 0x00000000,
+ 0xD3C, 0x00127353,
+ 0xD40, 0x00000000,
+ 0xD44, 0x00000000,
+ 0xD48, 0x00000000,
+ 0xD4C, 0x00000000,
+ 0xD50, 0x6437140A,
+ 0xD54, 0x00000000,
+ 0xD58, 0x00000282,
+ 0xD5C, 0x30032064,
+ 0xD60, 0x4653DE68,
+ 0xD64, 0x04518A3C,
+ 0xD68, 0x00002101,
+ 0xD6C, 0x2A201C16,
+ 0xD70, 0x1812362E,
+ 0xD74, 0x322C2220,
+ 0xD78, 0x000E3C24,
+ 0xE00, 0x2D2D2D2D,
+ 0xE04, 0x2D2D2D2D,
+ 0xE08, 0x0390272D,
+ 0xE10, 0x2D2D2D2D,
+ 0xE14, 0x2D2D2D2D,
+ 0xE18, 0x2D2D2D2D,
+ 0xE1C, 0x2D2D2D2D,
+ 0xE28, 0x00000000,
+ 0xE30, 0x1000DC1F,
+ 0xE34, 0x10008C1F,
+ 0xE38, 0x02140102,
+ 0xE3C, 0x681604C2,
+ 0xE40, 0x01007C00,
+ 0xE44, 0x01004800,
+ 0xE48, 0xFB000000,
+ 0xE4C, 0x000028D1,
+ 0xE50, 0x1000DC1F,
+ 0xE54, 0x10008C1F,
+ 0xE58, 0x02140102,
+ 0xE5C, 0x28160D05,
+ 0xE60, 0x00000008,
+ 0xE68, 0x001B2556,
+ 0xE6C, 0x00C00096,
+ 0xE70, 0x00C00096,
+ 0xE74, 0x01000056,
+ 0xE78, 0x01000014,
+ 0xE7C, 0x01000056,
+ 0xE80, 0x01000014,
+ 0xE84, 0x00C00096,
+ 0xE88, 0x01000056,
+ 0xE8C, 0x00C00096,
+ 0xED0, 0x00C00096,
+ 0xED4, 0x00C00096,
+ 0xED8, 0x00C00096,
+ 0xEDC, 0x000000D6,
+ 0xEE0, 0x000000D6,
+ 0xEEC, 0x01C00016,
+ 0xF14, 0x00000003,
+ 0xF4C, 0x00000000,
+ 0xF00, 0x00000300,
+ 0x820, 0x01000100,
+ 0x800, 0x83040000,
+};
+
+u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
+ 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00004000,
+ 0, 0, 0, 0x0000086c, 0xffffff00, 0x34363800,
+ 0, 0, 0, 0x00000e00, 0xffffffff, 0x42444646,
+ 0, 0, 0, 0x00000e04, 0xffffffff, 0x30343840,
+ 0, 0, 0, 0x00000e10, 0xffffffff, 0x38404244,
+ 0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436
+};
+
+u32 RTL8723BE_RADIOA_1TARRAY[] = {
+ 0x000, 0x00010000,
+ 0x0B0, 0x000DFFE0,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0B1, 0x00000018,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0B2, 0x00084C00,
+ 0x0B5, 0x0000D2CC,
+ 0x0B6, 0x000925AA,
+ 0x0B7, 0x00000010,
+ 0x0B8, 0x0000907F,
+ 0x05C, 0x00000002,
+ 0x07C, 0x00000002,
+ 0x07E, 0x00000005,
+ 0x08B, 0x0006FC00,
+ 0x0B0, 0x000FF9F0,
+ 0x01C, 0x000739D2,
+ 0x01E, 0x00000000,
+ 0x0DF, 0x00000780,
+ 0x050, 0x00067435,
+ 0x051, 0x0006B04E,
+ 0x052, 0x000007D2,
+ 0x053, 0x00000000,
+ 0x054, 0x00050400,
+ 0x055, 0x0004026E,
+ 0x0DD, 0x0000004C,
+ 0x070, 0x00067435,
+ 0x071, 0x0006B04E,
+ 0x072, 0x000007D2,
+ 0x073, 0x00000000,
+ 0x074, 0x00050400,
+ 0x075, 0x0004026E,
+ 0x0EF, 0x00000100,
+ 0x034, 0x0000ADD7,
+ 0x035, 0x00005C00,
+ 0x034, 0x00009DD4,
+ 0x035, 0x00005000,
+ 0x034, 0x00008DD1,
+ 0x035, 0x00004400,
+ 0x034, 0x00007DCE,
+ 0x035, 0x00003800,
+ 0x034, 0x00006CD1,
+ 0x035, 0x00004400,
+ 0x034, 0x00005CCE,
+ 0x035, 0x00003800,
+ 0x034, 0x000048CE,
+ 0x035, 0x00004400,
+ 0x034, 0x000034CE,
+ 0x035, 0x00003800,
+ 0x034, 0x00002451,
+ 0x035, 0x00004400,
+ 0x034, 0x0000144E,
+ 0x035, 0x00003800,
+ 0x034, 0x00000051,
+ 0x035, 0x00004400,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x0ED, 0x00000010,
+ 0x044, 0x0000ADD7,
+ 0x044, 0x00009DD4,
+ 0x044, 0x00008DD1,
+ 0x044, 0x00007DCE,
+ 0x044, 0x00006CC1,
+ 0x044, 0x00005CCE,
+ 0x044, 0x000044D1,
+ 0x044, 0x000034CE,
+ 0x044, 0x00002451,
+ 0x044, 0x0000144E,
+ 0x044, 0x00000051,
+ 0x0EF, 0x00000000,
+ 0x0ED, 0x00000000,
+ 0x0EF, 0x00002000,
+ 0x03B, 0x000380EF,
+ 0x03B, 0x000302FE,
+ 0x03B, 0x00028CE6,
+ 0x03B, 0x000200BC,
+ 0x03B, 0x000188A5,
+ 0x03B, 0x00010FBC,
+ 0x03B, 0x00008F71,
+ 0x03B, 0x00000900,
+ 0x0EF, 0x00000000,
+ 0x0ED, 0x00000001,
+ 0x040, 0x000380EF,
+ 0x040, 0x000302FE,
+ 0x040, 0x00028CE6,
+ 0x040, 0x000200BC,
+ 0x040, 0x000188A5,
+ 0x040, 0x00010FBC,
+ 0x040, 0x00008F71,
+ 0x040, 0x00000900,
+ 0x0ED, 0x00000000,
+ 0x082, 0x00080000,
+ 0x083, 0x00008000,
+ 0x084, 0x00048D80,
+ 0x085, 0x00068000,
+ 0x0A2, 0x00080000,
+ 0x0A3, 0x00008000,
+ 0x0A4, 0x00048D80,
+ 0x0A5, 0x00068000,
+ 0x000, 0x00033D80,
+};
+
+u32 RTL8723BEMAC_1T_ARRAY[] = {
+ 0x02F, 0x00000030,
+ 0x035, 0x00000000,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000007,
+ 0x43F, 0x00000008,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x00000000,
+ 0x446, 0x00000000,
+ 0x447, 0x00000000,
+ 0x448, 0x00000000,
+ 0x449, 0x000000F0,
+ 0x44A, 0x0000000F,
+ 0x44B, 0x0000003E,
+ 0x44C, 0x00000010,
+ 0x44D, 0x00000000,
+ 0x44E, 0x00000000,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x000000F0,
+ 0x452, 0x0000000F,
+ 0x453, 0x00000000,
+ 0x456, 0x0000005E,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000050,
+ 0x55D, 0x000000FF,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000050,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+};
+
+u32 RTL8723BEAGCTAB_1TARRAY[] = {
+ 0xC78, 0xFD000001,
+ 0xC78, 0xFC010001,
+ 0xC78, 0xFB020001,
+ 0xC78, 0xFA030001,
+ 0xC78, 0xF9040001,
+ 0xC78, 0xF8050001,
+ 0xC78, 0xF7060001,
+ 0xC78, 0xF6070001,
+ 0xC78, 0xF5080001,
+ 0xC78, 0xF4090001,
+ 0xC78, 0xF30A0001,
+ 0xC78, 0xF20B0001,
+ 0xC78, 0xF10C0001,
+ 0xC78, 0xF00D0001,
+ 0xC78, 0xEF0E0001,
+ 0xC78, 0xEE0F0001,
+ 0xC78, 0xED100001,
+ 0xC78, 0xEC110001,
+ 0xC78, 0xEB120001,
+ 0xC78, 0xEA130001,
+ 0xC78, 0xE9140001,
+ 0xC78, 0xE8150001,
+ 0xC78, 0xE7160001,
+ 0xC78, 0xAA170001,
+ 0xC78, 0xA9180001,
+ 0xC78, 0xA8190001,
+ 0xC78, 0xA71A0001,
+ 0xC78, 0xA61B0001,
+ 0xC78, 0xA51C0001,
+ 0xC78, 0xA41D0001,
+ 0xC78, 0xA31E0001,
+ 0xC78, 0x671F0001,
+ 0xC78, 0x66200001,
+ 0xC78, 0x65210001,
+ 0xC78, 0x64220001,
+ 0xC78, 0x63230001,
+ 0xC78, 0x62240001,
+ 0xC78, 0x61250001,
+ 0xC78, 0x47260001,
+ 0xC78, 0x46270001,
+ 0xC78, 0x45280001,
+ 0xC78, 0x44290001,
+ 0xC78, 0x432A0001,
+ 0xC78, 0x422B0001,
+ 0xC78, 0x292C0001,
+ 0xC78, 0x282D0001,
+ 0xC78, 0x272E0001,
+ 0xC78, 0x262F0001,
+ 0xC78, 0x25300001,
+ 0xC78, 0x24310001,
+ 0xC78, 0x09320001,
+ 0xC78, 0x08330001,
+ 0xC78, 0x07340001,
+ 0xC78, 0x06350001,
+ 0xC78, 0x05360001,
+ 0xC78, 0x04370001,
+ 0xC78, 0x03380001,
+ 0xC78, 0x02390001,
+ 0xC78, 0x013A0001,
+ 0xC78, 0x003B0001,
+ 0xC78, 0x003C0001,
+ 0xC78, 0x003D0001,
+ 0xC78, 0x003E0001,
+ 0xC78, 0x003F0001,
+ 0xC78, 0xFC400001,
+ 0xC78, 0xFB410001,
+ 0xC78, 0xFA420001,
+ 0xC78, 0xF9430001,
+ 0xC78, 0xF8440001,
+ 0xC78, 0xF7450001,
+ 0xC78, 0xF6460001,
+ 0xC78, 0xF5470001,
+ 0xC78, 0xF4480001,
+ 0xC78, 0xF3490001,
+ 0xC78, 0xF24A0001,
+ 0xC78, 0xF14B0001,
+ 0xC78, 0xF04C0001,
+ 0xC78, 0xEF4D0001,
+ 0xC78, 0xEE4E0001,
+ 0xC78, 0xED4F0001,
+ 0xC78, 0xEC500001,
+ 0xC78, 0xEB510001,
+ 0xC78, 0xEA520001,
+ 0xC78, 0xE9530001,
+ 0xC78, 0xE8540001,
+ 0xC78, 0xE7550001,
+ 0xC78, 0xE6560001,
+ 0xC78, 0xE5570001,
+ 0xC78, 0xAA580001,
+ 0xC78, 0xA9590001,
+ 0xC78, 0xA85A0001,
+ 0xC78, 0xA75B0001,
+ 0xC78, 0xA65C0001,
+ 0xC78, 0xA55D0001,
+ 0xC78, 0xA45E0001,
+ 0xC78, 0x675F0001,
+ 0xC78, 0x66600001,
+ 0xC78, 0x65610001,
+ 0xC78, 0x64620001,
+ 0xC78, 0x63630001,
+ 0xC78, 0x62640001,
+ 0xC78, 0x61650001,
+ 0xC78, 0x47660001,
+ 0xC78, 0x46670001,
+ 0xC78, 0x45680001,
+ 0xC78, 0x44690001,
+ 0xC78, 0x436A0001,
+ 0xC78, 0x426B0001,
+ 0xC78, 0x296C0001,
+ 0xC78, 0x286D0001,
+ 0xC78, 0x276E0001,
+ 0xC78, 0x266F0001,
+ 0xC78, 0x25700001,
+ 0xC78, 0x24710001,
+ 0xC78, 0x09720001,
+ 0xC78, 0x08730001,
+ 0xC78, 0x07740001,
+ 0xC78, 0x06750001,
+ 0xC78, 0x05760001,
+ 0xC78, 0x04770001,
+ 0xC78, 0x03780001,
+ 0xC78, 0x02790001,
+ 0xC78, 0x017A0001,
+ 0xC78, 0x007B0001,
+ 0xC78, 0x007C0001,
+ 0xC78, 0x007D0001,
+ 0xC78, 0x007E0001,
+ 0xC78, 0x007F0001,
+ 0xC50, 0x69553422,
+ 0xC50, 0x69553420,
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/rtlwifi/rtl8723be/table.h
new file mode 100644
index 000000000000..932760a84827
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/table.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on 2010/ 5/18, 1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_TABLE__H_
+#define __RTL8723BE_TABLE__H_
+
+#include <linux/types.h>
+#define RTL8723BEPHY_REG_1TARRAYLEN 388
+extern u32 RTL8723BEPHY_REG_1TARRAY[];
+#define RTL8723BEPHY_REG_ARRAY_PGLEN 36
+extern u32 RTL8723BEPHY_REG_ARRAY_PG[];
+#define RTL8723BE_RADIOA_1TARRAYLEN 206
+extern u32 RTL8723BE_RADIOA_1TARRAY[];
+#define RTL8723BEMAC_1T_ARRAYLEN 194
+extern u32 RTL8723BEMAC_1T_ARRAY[];
+#define RTL8723BEAGCTAB_1TARRAYLEN 260
+extern u32 RTL8723BEAGCTAB_1TARRAY[];
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
new file mode 100644
index 000000000000..e0a0d8c8fed5
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
@@ -0,0 +1,960 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "../stats.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "trx.h"
+#include "led.h"
+#include "dm.h"
+#include "phy.h"
+
+static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
+{
+ __le16 fc = rtl_get_fc(skb);
+
+ if (unlikely(ieee80211_is_beacon(fc)))
+ return QSLT_BEACON;
+ if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
+ return QSLT_MGNT;
+
+ return skb->priority;
+}
+
+/* mac80211's rate_idx is like this:
+ *
+ * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
+ *
+ * B/G rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
+ *
+ * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
+ * A rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
+ */
+static int _rtl8723be_rate_mapping(struct ieee80211_hw *hw,
+ bool isht, u8 desc_rate)
+{
+ int rate_idx;
+
+ if (!isht) {
+ if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
+ switch (desc_rate) {
+ case DESC92C_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC92C_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC92C_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC92C_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC92C_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC92C_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC92C_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC92C_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC92C_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC92C_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC92C_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC92C_RATE54M:
+ rate_idx = 11;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ } else {
+ switch (desc_rate) {
+ case DESC92C_RATE6M:
+ rate_idx = 0;
+ break;
+ case DESC92C_RATE9M:
+ rate_idx = 1;
+ break;
+ case DESC92C_RATE12M:
+ rate_idx = 2;
+ break;
+ case DESC92C_RATE18M:
+ rate_idx = 3;
+ break;
+ case DESC92C_RATE24M:
+ rate_idx = 4;
+ break;
+ case DESC92C_RATE36M:
+ rate_idx = 5;
+ break;
+ case DESC92C_RATE48M:
+ rate_idx = 6;
+ break;
+ case DESC92C_RATE54M:
+ rate_idx = 7;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ }
+ } else {
+ switch (desc_rate) {
+ case DESC92C_RATEMCS0:
+ rate_idx = 0;
+ break;
+ case DESC92C_RATEMCS1:
+ rate_idx = 1;
+ break;
+ case DESC92C_RATEMCS2:
+ rate_idx = 2;
+ break;
+ case DESC92C_RATEMCS3:
+ rate_idx = 3;
+ break;
+ case DESC92C_RATEMCS4:
+ rate_idx = 4;
+ break;
+ case DESC92C_RATEMCS5:
+ rate_idx = 5;
+ break;
+ case DESC92C_RATEMCS6:
+ rate_idx = 6;
+ break;
+ case DESC92C_RATEMCS7:
+ rate_idx = 7;
+ break;
+ case DESC92C_RATEMCS8:
+ rate_idx = 8;
+ break;
+ case DESC92C_RATEMCS9:
+ rate_idx = 9;
+ break;
+ case DESC92C_RATEMCS10:
+ rate_idx = 10;
+ break;
+ case DESC92C_RATEMCS11:
+ rate_idx = 11;
+ break;
+ case DESC92C_RATEMCS12:
+ rate_idx = 12;
+ break;
+ case DESC92C_RATEMCS13:
+ rate_idx = 13;
+ break;
+ case DESC92C_RATEMCS14:
+ rate_idx = 14;
+ break;
+ case DESC92C_RATEMCS15:
+ rate_idx = 15;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ }
+ return rate_idx;
+}
+
+static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstatus, u8 *pdesc,
+ struct rx_fwinfo_8723be *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+ bool packet_beacon)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct phy_sts_cck_8723e_t *cck_buf;
+ struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo;
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ char rx_pwr_all = 0, rx_pwr[4];
+ u8 rf_rx_num = 0, evm, pwdb_all;
+ u8 i, max_spatial_stream;
+ u32 rssi, total_rssi = 0;
+ bool is_cck = pstatus->is_cck;
+ u8 lan_idx, vga_idx;
+
+ /* Record it for next packet processing */
+ pstatus->packet_matchbssid = packet_match_bssid;
+ pstatus->packet_toself = packet_toself;
+ pstatus->packet_beacon = packet_beacon;
+ pstatus->rx_mimo_sig_qual[0] = -1;
+ pstatus->rx_mimo_sig_qual[1] = -1;
+
+ if (is_cck) {
+ u8 cck_highpwr;
+ u8 cck_agc_rpt;
+ /* CCK Driver info Structure is not the same as OFDM packet. */
+ cck_buf = (struct phy_sts_cck_8723e_t *)p_drvinfo;
+ cck_agc_rpt = cck_buf->cck_agc_rpt;
+
+ /* (1)Hardware does not provide RSSI for CCK
+ * (2)PWDB, Average PWDB cacluated by
+ * hardware (for rate adaptive)
+ */
+ if (ppsc->rfpwr_state == ERFON)
+ cck_highpwr = (u8) rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2,
+ BIT(9));
+ else
+ cck_highpwr = false;
+
+ lan_idx = ((cck_agc_rpt & 0xE0) >> 5);
+ vga_idx = (cck_agc_rpt & 0x1f);
+ switch (lan_idx) {
+ case 7:
+ if (vga_idx <= 27)/*VGA_idx = 27~2*/
+ rx_pwr_all = -100 + 2 * (27 - vga_idx);
+ else
+ rx_pwr_all = -100;
+ break;
+ case 6:/*VGA_idx = 2~0*/
+ rx_pwr_all = -48 + 2 * (2 - vga_idx);
+ break;
+ case 5:/*VGA_idx = 7~5*/
+ rx_pwr_all = -42 + 2 * (7 - vga_idx);
+ break;
+ case 4:/*VGA_idx = 7~4*/
+ rx_pwr_all = -36 + 2 * (7 - vga_idx);
+ break;
+ case 3:/*VGA_idx = 7~0*/
+ rx_pwr_all = -24 + 2 * (7 - vga_idx);
+ break;
+ case 2:
+ if (cck_highpwr)/*VGA_idx = 5~0*/
+ rx_pwr_all = -12 + 2 * (5 - vga_idx);
+ else
+ rx_pwr_all = -6 + 2 * (5 - vga_idx);
+ break;
+ case 1:
+ rx_pwr_all = 8 - 2 * vga_idx;
+ break;
+ case 0:
+ rx_pwr_all = 14 - 2 * vga_idx;
+ break;
+ default:
+ break;
+ }
+ rx_pwr_all += 6;
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+ /* CCK gain is smaller than OFDM/MCS gain, */
+ /* so we add gain diff by experiences,
+ * the val is 6
+ */
+ pwdb_all += 6;
+ if (pwdb_all > 100)
+ pwdb_all = 100;
+ /* modify the offset to make the same gain index with OFDM. */
+ if (pwdb_all > 34 && pwdb_all <= 42)
+ pwdb_all -= 2;
+ else if (pwdb_all > 26 && pwdb_all <= 34)
+ pwdb_all -= 6;
+ else if (pwdb_all > 14 && pwdb_all <= 26)
+ pwdb_all -= 8;
+ else if (pwdb_all > 4 && pwdb_all <= 14)
+ pwdb_all -= 4;
+ if (!cck_highpwr) {
+ if (pwdb_all >= 80)
+ pwdb_all = ((pwdb_all - 80) << 1) +
+ ((pwdb_all - 80) >> 1) + 80;
+ else if ((pwdb_all <= 78) && (pwdb_all >= 20))
+ pwdb_all += 3;
+ if (pwdb_all > 100)
+ pwdb_all = 100;
+ }
+
+ pstatus->rx_pwdb_all = pwdb_all;
+ pstatus->recvsignalpower = rx_pwr_all;
+
+ /* (3) Get Signal Quality (EVM) */
+ if (packet_match_bssid) {
+ u8 sq;
+
+ if (pstatus->rx_pwdb_all > 40) {
+ sq = 100;
+ } else {
+ sq = cck_buf->sq_rpt;
+ if (sq > 64)
+ sq = 0;
+ else if (sq < 20)
+ sq = 100;
+ else
+ sq = ((64 - sq) * 100) / 44;
+ }
+
+ pstatus->signalquality = sq;
+ pstatus->rx_mimo_sig_qual[0] = sq;
+ pstatus->rx_mimo_sig_qual[1] = -1;
+ }
+ } else {
+ rtlpriv->dm.rfpath_rxenable[0] = true;
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+
+ /* (1)Get RSSI for HT rate */
+ for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
+ /* we will judge RF RX path now. */
+ if (rtlpriv->dm.rfpath_rxenable[i])
+ rf_rx_num++;
+
+ rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f)*2) - 110;
+
+ /* Translate DBM to percentage. */
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
+ total_rssi += rssi;
+
+ /* Get Rx snr value in DB */
+ rtlpriv->stats.rx_snr_db[i] =
+ (long)(p_drvinfo->rxsnr[i] / 2);
+
+ /* Record Signal Strength for next packet */
+ if (packet_match_bssid)
+ pstatus->rx_mimo_signalstrength[i] = (u8) rssi;
+ }
+
+ /* (2)PWDB, Avg cacluated by hardware (for rate adaptive) */
+ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
+
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+ pstatus->rx_pwdb_all = pwdb_all;
+ pstatus->rxpower = rx_pwr_all;
+ pstatus->recvsignalpower = rx_pwr_all;
+
+ /* (3)EVM of HT rate */
+ if (pstatus->is_ht && pstatus->rate >= DESC92C_RATEMCS8 &&
+ pstatus->rate <= DESC92C_RATEMCS15)
+ max_spatial_stream = 2;
+ else
+ max_spatial_stream = 1;
+
+ for (i = 0; i < max_spatial_stream; i++) {
+ evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+
+ if (packet_match_bssid) {
+ /* Fill value in RFD, Get the first
+ * spatial stream only
+ */
+ if (i == 0)
+ pstatus->signalquality =
+ (u8) (evm & 0xff);
+ pstatus->rx_mimo_sig_qual[i] =
+ (u8) (evm & 0xff);
+ }
+ }
+ if (packet_match_bssid) {
+ for (i = RF90_PATH_A; i <= RF90_PATH_B; i++)
+ rtl_priv(hw)->dm.cfo_tail[i] =
+ (char)p_phystrpt->path_cfotail[i];
+
+ rtl_priv(hw)->dm.packet_count++;
+ if (rtl_priv(hw)->dm.packet_count == 0xffffffff)
+ rtl_priv(hw)->dm.packet_count = 0;
+ }
+ }
+
+ /* UI BSS List signal strength(in percentage),
+ * make it good looking, from 0~100.
+ */
+ if (is_cck)
+ pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+ pwdb_all));
+ else if (rf_rx_num != 0)
+ pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+ total_rssi /= rf_rx_num));
+ /*HW antenna diversity*/
+ rtldm->fat_table.antsel_rx_keep_0 = p_phystrpt->ant_sel;
+ rtldm->fat_table.antsel_rx_keep_1 = p_phystrpt->ant_sel_b;
+ rtldm->fat_table.antsel_rx_keep_2 = p_phystrpt->antsel_rx_keep_2;
+}
+
+static void _rtl8723be_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_stats *pstatus,
+ u8 *pdesc,
+ struct rx_fwinfo_8723be *p_drvinfo)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct ieee80211_hdr *hdr;
+ u8 *tmp_buf;
+ u8 *praddr;
+ u8 *psaddr;
+ u16 fc, type;
+ bool packet_matchbssid, packet_toself, packet_beacon;
+
+ tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
+
+ hdr = (struct ieee80211_hdr *)tmp_buf;
+ fc = le16_to_cpu(hdr->frame_control);
+ type = WLAN_FC_GET_TYPE(hdr->frame_control);
+ praddr = hdr->addr1;
+ psaddr = ieee80211_get_SA(hdr);
+ memcpy(pstatus->psaddr, psaddr, ETH_ALEN);
+
+ packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
+ (!ether_addr_equal(mac->bssid, (fc & IEEE80211_FCTL_TODS) ?
+ hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ?
+ hdr->addr2 : hdr->addr3)) &&
+ (!pstatus->hwerror) &&
+ (!pstatus->crc) && (!pstatus->icv));
+
+ packet_toself = packet_matchbssid &&
+ (!ether_addr_equal(praddr, rtlefuse->dev_addr));
+
+ /* YP: packet_beacon is not initialized,
+ * this assignment is neccesary,
+ * otherwise it counld be true in this case
+ * the situation is much worse in Kernel 3.10
+ */
+ if (ieee80211_is_beacon(hdr->frame_control))
+ packet_beacon = true;
+ else
+ packet_beacon = false;
+
+ if (packet_beacon && packet_matchbssid)
+ rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++;
+
+ _rtl8723be_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo,
+ packet_matchbssid,
+ packet_toself,
+ packet_beacon);
+
+ rtl_process_phyinfo(hw, tmp_buf, pstatus);
+}
+
+static void _rtl8723be_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
+ u8 *virtualaddress)
+{
+ u32 dwtmp = 0;
+ memset(virtualaddress, 0, 8);
+
+ SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
+ if (ptcb_desc->empkt_num == 1) {
+ dwtmp = ptcb_desc->empkt_len[0];
+ } else {
+ dwtmp = ptcb_desc->empkt_len[0];
+ dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+ dwtmp += ptcb_desc->empkt_len[1];
+ }
+ SET_EARLYMODE_LEN0(virtualaddress, dwtmp);
+
+ if (ptcb_desc->empkt_num <= 3) {
+ dwtmp = ptcb_desc->empkt_len[2];
+ } else {
+ dwtmp = ptcb_desc->empkt_len[2];
+ dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+ dwtmp += ptcb_desc->empkt_len[3];
+ }
+ SET_EARLYMODE_LEN1(virtualaddress, dwtmp);
+ if (ptcb_desc->empkt_num <= 5) {
+ dwtmp = ptcb_desc->empkt_len[4];
+ } else {
+ dwtmp = ptcb_desc->empkt_len[4];
+ dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+ dwtmp += ptcb_desc->empkt_len[5];
+ }
+ SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF);
+ SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4);
+ if (ptcb_desc->empkt_num <= 7) {
+ dwtmp = ptcb_desc->empkt_len[6];
+ } else {
+ dwtmp = ptcb_desc->empkt_len[6];
+ dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+ dwtmp += ptcb_desc->empkt_len[7];
+ }
+ SET_EARLYMODE_LEN3(virtualaddress, dwtmp);
+ if (ptcb_desc->empkt_num <= 9) {
+ dwtmp = ptcb_desc->empkt_len[8];
+ } else {
+ dwtmp = ptcb_desc->empkt_len[8];
+ dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+ dwtmp += ptcb_desc->empkt_len[9];
+ }
+ SET_EARLYMODE_LEN4(virtualaddress, dwtmp);
+}
+
+bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *status,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rx_fwinfo_8723be *p_drvinfo;
+ struct ieee80211_hdr *hdr;
+
+ u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc);
+ if (status->packet_report_type == TX_REPORT2)
+ status->length = (u16) GET_RX_RPT2_DESC_PKT_LEN(pdesc);
+ else
+ status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+ status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+ status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+ status->icv = (u16) GET_RX_DESC_ICV(pdesc);
+ status->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ status->hwerror = (status->crc | status->icv);
+ status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+ status->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
+ status->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+ status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ status->isfirst_ampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ if (status->packet_report_type == NORMAL_RX)
+ status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+ status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+
+ status->is_cck = RTL8723E_RX_HAL_IS_CCK_RATE(status->rate);
+
+ status->macid = GET_RX_DESC_MACID(pdesc);
+ if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
+ status->wake_match = BIT(2);
+ else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
+ status->wake_match = BIT(1);
+ else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
+ status->wake_match = BIT(0);
+ else
+ status->wake_match = 0;
+ if (status->wake_match)
+ RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
+ "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
+ status->wake_match);
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
+
+
+ hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size +
+ status->rx_bufshift);
+
+ if (status->crc)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (status->rx_is40Mhzpacket)
+ rx_status->flag |= RX_FLAG_40MHZ;
+
+ if (status->is_ht)
+ rx_status->flag |= RX_FLAG_HT;
+
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+
+ /* hw will set status->decrypted true, if it finds the
+ * frame is open data frame or mgmt frame.
+ * So hw will not decryption robust managment frame
+ * for IEEE80211w but still set status->decrypted
+ * true, so here we should set it back to undecrypted
+ * for IEEE80211w frame, and mac80211 sw will help
+ * to decrypt it
+ */
+ if (status->decrypted) {
+ if (!hdr) {
+ WARN_ON_ONCE(true);
+ pr_err("decrypted is true but hdr NULL in skb %p\n",
+ rtl_get_hdr(skb));
+ return false;
+ }
+
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
+ (ieee80211_has_protected(hdr->frame_control)))
+ rx_status->flag &= ~RX_FLAG_DECRYPTED;
+ else
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ }
+
+ /* rate_idx: index of data rate into band's
+ * supported rates or MCS index if HT rates
+ * are use (RX_FLAG_HT)
+ * Notice: this is diff with windows define
+ */
+ rx_status->rate_idx = _rtl8723be_rate_mapping(hw, status->is_ht,
+ status->rate);
+
+ rx_status->mactime = status->timestamp_low;
+ if (phystatus) {
+ p_drvinfo = (struct rx_fwinfo_8723be *)(skb->data +
+ status->rx_bufshift);
+
+ _rtl8723be_translate_rx_signal_stuff(hw, skb, status,
+ pdesc, p_drvinfo);
+ }
+
+ /*rx_status->qual = status->signal; */
+ rx_status->signal = status->recvsignalpower + 10;
+ if (status->packet_report_type == TX_REPORT2) {
+ status->macid_valid_entry[0] =
+ GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
+ status->macid_valid_entry[1] =
+ GET_RX_RPT2_DESC_MACID_VALID_2(pdesc);
+ }
+ return true;
+}
+
+void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 *pdesc = pdesc_tx;
+ u16 seq_number;
+ __le16 fc = hdr->frame_control;
+ unsigned int buf_len = 0;
+ unsigned int skb_len = skb->len;
+ u8 fw_qsel = _rtl8723be_map_hwqueue_to_fwqueue(skb, hw_queue);
+ bool firstseg = ((hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
+ bool lastseg = ((hdr->frame_control &
+ cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
+ dma_addr_t mapping;
+ u8 bw_40 = 0;
+ u8 short_gi = 0;
+
+ if (mac->opmode == NL80211_IFTYPE_STATION) {
+ bw_40 = mac->bw_40;
+ } else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ if (sta)
+ bw_40 = sta->ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ }
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
+ /* reserve 8 byte for AMPDU early mode */
+ if (rtlhal->earlymode_enable) {
+ skb_push(skb, EM_HDR_LEN);
+ memset(skb->data, 0, EM_HDR_LEN);
+ }
+ buf_len = skb->len;
+ mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error");
+ return;
+ }
+ CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8723be));
+ if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
+ firstseg = true;
+ lastseg = true;
+ }
+ if (firstseg) {
+ if (rtlhal->earlymode_enable) {
+ SET_TX_DESC_PKT_OFFSET(pdesc, 1);
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN +
+ EM_HDR_LEN);
+ if (ptcb_desc->empkt_num) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
+ _rtl8723be_insert_emcontent(ptcb_desc,
+ (u8 *)(skb->data));
+ }
+ } else {
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ }
+
+ /* ptcb_desc->use_driver_rate = true; */
+ SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ if (ptcb_desc->hw_rate > DESC92C_RATEMCS0)
+ short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
+ else
+ short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
+
+ SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ SET_TX_DESC_AGG_ENABLE(pdesc, 1);
+ SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ }
+ SET_TX_DESC_SEQ(pdesc, seq_number);
+ SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
+ !ptcb_desc->cts_enable) ?
+ 1 : 0));
+ SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0);
+ SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ?
+ 1 : 0));
+
+ SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
+
+ SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
+ SET_TX_DESC_RTS_SHORT(pdesc,
+ ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ?
+ (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
+ (ptcb_desc->rts_use_shortgi ? 1 : 0)));
+
+ if (ptcb_desc->btx_enable_sw_calc_duration)
+ SET_TX_DESC_NAV_USE_HDR(pdesc, 1);
+
+ if (bw_40) {
+ if (ptcb_desc->packet_bw) {
+ SET_TX_DESC_DATA_BW(pdesc, 1);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ } else {
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, mac->cur_40_prime_sc);
+ }
+ } else {
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ }
+
+ SET_TX_DESC_LINIP(pdesc, 0);
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len);
+ if (sta) {
+ u8 ampdu_density = sta->ht_cap.ampdu_density;
+ SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ }
+ if (info->control.hw_key) {
+ struct ieee80211_key_conf *keyconf =
+ info->control.hw_key;
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ break;
+ default:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ break;
+ }
+ }
+
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+ SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
+ SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
+ SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ?
+ 1 : 0);
+ SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+
+ if (ieee80211_is_data_qos(fc)) {
+ if (mac->rdg_en) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function.\n");
+ SET_TX_DESC_RDG_ENABLE(pdesc, 1);
+ SET_TX_DESC_HTC(pdesc, 1);
+ }
+ }
+ }
+
+ SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
+ SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len);
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
+ SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+
+ if (!ieee80211_is_data_qos(fc)) {
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ SET_TX_DESC_HWSEQ_SEL(pdesc, 0);
+ }
+ SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
+ SET_TX_DESC_BMC(pdesc, 1);
+ }
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+}
+
+void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
+ bool b_firstseg, bool b_lastseg,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 fw_queue = QSLT_BEACON;
+
+ dma_addr_t mapping = pci_map_single(rtlpci->pdev,
+ skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error");
+ return;
+ }
+ CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+
+ SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+
+ SET_TX_DESC_SEQ(pdesc, 0);
+
+ SET_TX_DESC_LINIP(pdesc, 0);
+
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
+
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+
+ SET_TX_DESC_RATE_ID(pdesc, 0);
+ SET_TX_DESC_MACID(pdesc, 0);
+
+ SET_TX_DESC_OWN(pdesc, 1);
+
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
+
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+ SET_TX_DESC_USE_RATE(pdesc, 1);
+}
+
+void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val)
+{
+ if (istx) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ SET_TX_DESC_OWN(pdesc, 1);
+ break;
+ case HW_DESC_TX_NEXTDESC_ADDR:
+ SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
+ break;
+ default:
+ RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ desc_name);
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_RXOWN:
+ SET_RX_DESC_OWN(pdesc, 1);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val);
+ break;
+ case HW_DESC_RXERO:
+ SET_RX_DESC_EOR(pdesc, 1);
+ break;
+ default:
+ RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ desc_name);
+ break;
+ }
+ }
+}
+
+u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name)
+{
+ u32 ret = 0;
+
+ if (istx) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = GET_TX_DESC_OWN(pdesc);
+ break;
+ case HW_DESC_TXBUFF_ADDR:
+ ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
+ break;
+ default:
+ RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ desc_name);
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = GET_RX_DESC_OWN(pdesc);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ ret = GET_RX_DESC_PKT_LEN(pdesc);
+ break;
+ default:
+ RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ desc_name);
+ break;
+ }
+ }
+ return ret;
+}
+
+bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+ u8 own = (u8) rtl8723be_get_desc(entry, true, HW_DESC_OWN);
+
+ /*beacon packet will only use the first
+ *descriptor by default, and the own may not
+ *be cleared by the hardware
+ */
+ if (own)
+ return false;
+ else
+ return true;
+}
+
+void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if (hw_queue == BEACON_QUEUE) {
+ rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
+ } else {
+ rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
+ BIT(0) << (hw_queue));
+ }
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/rtlwifi/rtl8723be/trx.h
new file mode 100644
index 000000000000..102f33dcc988
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.h
@@ -0,0 +1,617 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_TRX_H__
+#define __RTL8723BE_TRX_H__
+
+#define TX_DESC_SIZE 40
+#define TX_DESC_AGGR_SUBFRAME_SIZE 32
+
+#define RX_DESC_SIZE 32
+#define RX_DRV_INFO_SIZE_UNIT 8
+
+#define TX_DESC_NEXT_DESC_OFFSET 40
+#define USB_HWDESC_HEADER_LEN 40
+#define CRCLENGTH 4
+
+#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
+#define SET_TX_DESC_OFFSET(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
+#define SET_TX_DESC_BMC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
+#define SET_TX_DESC_HTC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
+#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
+#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
+#define SET_TX_DESC_LINIP(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
+#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
+#define SET_TX_DESC_GF(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_TX_DESC_OWN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_TX_DESC_PKT_SIZE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 0, 16)
+#define GET_TX_DESC_OFFSET(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 16, 8)
+#define GET_TX_DESC_BMC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 24, 1)
+#define GET_TX_DESC_HTC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 25, 1)
+#define GET_TX_DESC_LAST_SEG(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_TX_DESC_FIRST_SEG(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_TX_DESC_LINIP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_TX_DESC_NO_ACM(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_TX_DESC_GF(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_TX_DESC_OWN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_TX_DESC_MACID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val)
+#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
+#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
+#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
+#define SET_TX_DESC_PIFS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
+#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val)
+#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
+#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
+#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val)
+
+
+#define SET_TX_DESC_PAID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val)
+#define SET_TX_DESC_CCA_RTS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val)
+#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val)
+#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val)
+#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
+#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
+#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
+#define SET_TX_DESC_RAW(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
+#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
+#define SET_TX_DESC_BT_INT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val)
+#define SET_TX_DESC_GID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val)
+
+
+#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val)
+#define SET_TX_DESC_CHK_EN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val)
+#define SET_TX_DESC_EARLY_MODE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val)
+#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val)
+#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val)
+#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val)
+#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val)
+#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val)
+#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val)
+#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val)
+#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val)
+#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val)
+#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val)
+#define SET_TX_DESC_NDPA(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val)
+#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val)
+
+
+#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val)
+#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val)
+
+
+#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 4, 1, __val)
+#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val)
+#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_STBC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val)
+#define SET_TX_DESC_CTROL_STBC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val)
+#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val)
+#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
+
+
+#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
+
+#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
+
+#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val)
+
+#define SET_TX_DESC_SEQ(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val)
+
+#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
+
+#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
+
+
+#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val)
+
+#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+48, 0, 32)
+
+#define GET_RX_DESC_PKT_LEN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 0, 14)
+#define GET_RX_DESC_CRC32(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 14, 1)
+#define GET_RX_DESC_ICV(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 15, 1)
+#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 20, 3)
+#define GET_RX_DESC_QOS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 24, 2)
+#define GET_RX_DESC_PHYST(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_RX_DESC_LS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_RX_DESC_FS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_RX_DESC_EOR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_RX_DESC_OWN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
+#define SET_RX_DESC_EOR(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_RX_DESC_OWN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_RX_DESC_MACID(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 0, 7)
+#define GET_RX_DESC_TID(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 8, 4)
+#define GET_RX_DESC_AMSDU(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
+#define GET_RX_STATUS_DESC_RXID_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+#define GET_RX_DESC_PAGGR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+#define GET_RX_DESC_CHKERR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
+#define GET_RX_DESC_IPVER(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
+#define GET_RX_STATUS_DESC_IS_TCPUDP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 22, 1)
+#define GET_RX_STATUS_DESC_CHK_VLD(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 23, 1)
+#define GET_RX_DESC_PAM(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
+#define GET_RX_DESC_MD(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
+#define GET_RX_DESC_MF(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
+
+
+#define GET_RX_DESC_SEQ(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
+#define GET_RX_STATUS_DESC_RX_IS_QOS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 16, 1)
+#define GET_RX_STATUS_DESC_WLANHD_IV_LEN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 18, 6)
+#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 28, 1)
+
+
+#define GET_RX_DESC_RXMCS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 0, 7)
+#define GET_RX_DESC_RXHT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
+#define GET_RX_STATUS_DESC_RX_GF(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 7, 1)
+#define GET_RX_DESC_HTC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
+#define GET_RX_STATUS_DESC_EOSP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 11, 1)
+#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 12, 2)
+
+#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 29, 1)
+#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 30, 1)
+#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 31, 1)
+
+#define GET_RX_DESC_SPLCP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 0, 1)
+#define GET_RX_STATUS_DESC_LDPC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 1, 1)
+#define GET_RX_STATUS_DESC_STBC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 2, 1)
+#define GET_RX_DESC_BW(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 4, 2)
+
+#define GET_RX_DESC_TSFL(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
+
+#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
+#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
+
+#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
+#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+
+
+/* TX report 2 format in Rx desc*/
+
+#define GET_RX_RPT2_DESC_PKT_LEN(__rxstatusdesc) \
+ LE_BITS_TO_4BYTE(__rxstatusdesc, 0, 9)
+#define GET_RX_RPT2_DESC_MACID_VALID_1(__rxstatusdesc) \
+ LE_BITS_TO_4BYTE(__rxstatusdesc+16, 0, 32)
+#define GET_RX_RPT2_DESC_MACID_VALID_2(__rxstatusdesc) \
+ LE_BITS_TO_4BYTE(__rxstatusdesc+20, 0, 32)
+
+#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value)
+#define SET_EARLYMODE_LEN0(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value)
+#define SET_EARLYMODE_LEN1(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value)
+#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value)
+#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value)
+#define SET_EARLYMODE_LEN3(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value)
+#define SET_EARLYMODE_LEN4(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value)
+
+#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
+do { \
+ if (_size > TX_DESC_NEXT_DESC_OFFSET) \
+ memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
+ else \
+ memset(__pdesc, 0, _size); \
+} while (0)
+
+struct phy_rx_agc_info_t {
+ #ifdef __LITTLE_ENDIAN
+ u8 gain:7, trsw:1;
+ #else
+ u8 trsw:1, gain:7;
+ #endif
+};
+struct phy_status_rpt {
+ struct phy_rx_agc_info_t path_agc[2];
+ u8 ch_corr[2];
+ u8 cck_sig_qual_ofdm_pwdb_all;
+ u8 cck_agc_rpt_ofdm_cfosho_a;
+ u8 cck_rpt_b_ofdm_cfosho_b;
+ u8 rsvd_1;/* ch_corr_msb; */
+ u8 noise_power_db_msb;
+ char path_cfotail[2];
+ u8 pcts_mask[2];
+ char stream_rxevm[2];
+ u8 path_rxsnr[2];
+ u8 noise_power_db_lsb;
+ u8 rsvd_2[3];
+ u8 stream_csi[2];
+ u8 stream_target_csi[2];
+ u8 sig_evm;
+ u8 rsvd_3;
+#ifdef __LITTLE_ENDIAN
+ u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 idle_long:1;
+ u8 r_ant_train_en:1;
+ u8 ant_sel_b:1;
+ u8 ant_sel:1;
+#else /* _BIG_ENDIAN_ */
+ u8 ant_sel:1;
+ u8 ant_sel_b:1;
+ u8 r_ant_train_en:1;
+ u8 idle_long:1;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
+#endif
+} __packed;
+
+struct rx_fwinfo_8723be {
+ u8 gain_trsw[4];
+ u8 pwdb_all;
+ u8 cfosho[4];
+ u8 cfotail[4];
+ char rxevm[2];
+ char rxsnr[4];
+ u8 pdsnr[2];
+ u8 csi_current[2];
+ u8 csi_target[2];
+ u8 sigevm;
+ u8 max_ex_pwr;
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
+} __packed;
+
+struct tx_desc_8723be {
+ u32 pktsize:16;
+ u32 offset:8;
+ u32 bmc:1;
+ u32 htc:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 linip:1;
+ u32 noacm:1;
+ u32 gf:1;
+ u32 own:1;
+
+ u32 macid:6;
+ u32 rsvd0:2;
+ u32 queuesel:5;
+ u32 rd_nav_ext:1;
+ u32 lsig_txop_en:1;
+ u32 pifs:1;
+ u32 rateid:4;
+ u32 nav_usehdr:1;
+ u32 en_descid:1;
+ u32 sectype:2;
+ u32 pktoffset:8;
+
+ u32 rts_rc:6;
+ u32 data_rc:6;
+ u32 agg_en:1;
+ u32 rdg_en:1;
+ u32 bar_retryht:2;
+ u32 agg_break:1;
+ u32 morefrag:1;
+ u32 raw:1;
+ u32 ccx:1;
+ u32 ampdudensity:3;
+ u32 bt_int:1;
+ u32 ant_sela:1;
+ u32 ant_selb:1;
+ u32 txant_cck:2;
+ u32 txant_l:2;
+ u32 txant_ht:2;
+
+ u32 nextheadpage:8;
+ u32 tailpage:8;
+ u32 seq:12;
+ u32 cpu_handle:1;
+ u32 tag1:1;
+ u32 trigger_int:1;
+ u32 hwseq_en:1;
+
+ u32 rtsrate:5;
+ u32 apdcfe:1;
+ u32 qos:1;
+ u32 hwseq_ssn:1;
+ u32 userrate:1;
+ u32 dis_rtsfb:1;
+ u32 dis_datafb:1;
+ u32 cts2self:1;
+ u32 rts_en:1;
+ u32 hwrts_en:1;
+ u32 portid:1;
+ u32 pwr_status:3;
+ u32 waitdcts:1;
+ u32 cts2ap_en:1;
+ u32 txsc:2;
+ u32 stbc:2;
+ u32 txshort:1;
+ u32 txbw:1;
+ u32 rtsshort:1;
+ u32 rtsbw:1;
+ u32 rtssc:2;
+ u32 rtsstbc:2;
+
+ u32 txrate:6;
+ u32 shortgi:1;
+ u32 ccxt:1;
+ u32 txrate_fb_lmt:5;
+ u32 rtsrate_fb_lmt:4;
+ u32 retrylmt_en:1;
+ u32 txretrylmt:6;
+ u32 usb_txaggnum:8;
+
+ u32 txagca:5;
+ u32 txagcb:5;
+ u32 usemaxlen:1;
+ u32 maxaggnum:5;
+ u32 mcsg1maxlen:4;
+ u32 mcsg2maxlen:4;
+ u32 mcsg3maxlen:4;
+ u32 mcs7sgimaxlen:4;
+
+ u32 txbuffersize:16;
+ u32 sw_offset30:8;
+ u32 sw_offset31:4;
+ u32 rsvd1:1;
+ u32 antsel_c:1;
+ u32 null_0:1;
+ u32 null_1:1;
+
+ u32 txbuffaddr;
+ u32 txbufferaddr64;
+ u32 nextdescaddress;
+ u32 nextdescaddress64;
+
+ u32 reserve_pass_pcie_mm_limit[4];
+} __packed;
+
+struct rx_desc_8723be {
+ u32 length:14;
+ u32 crc32:1;
+ u32 icverror:1;
+ u32 drv_infosize:4;
+ u32 security:3;
+ u32 qos:1;
+ u32 shift:2;
+ u32 phystatus:1;
+ u32 swdec:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 eor:1;
+ u32 own:1;
+
+ u32 macid:6;
+ u32 tid:4;
+ u32 hwrsvd:5;
+ u32 paggr:1;
+ u32 faggr:1;
+ u32 a1_fit:4;
+ u32 a2_fit:4;
+ u32 pam:1;
+ u32 pwr:1;
+ u32 moredata:1;
+ u32 morefrag:1;
+ u32 type:2;
+ u32 mc:1;
+ u32 bc:1;
+
+ u32 seq:12;
+ u32 frag:4;
+ u32 nextpktlen:14;
+ u32 nextind:1;
+ u32 rsvd:1;
+
+ u32 rxmcs:6;
+ u32 rxht:1;
+ u32 amsdu:1;
+ u32 splcp:1;
+ u32 bandwidth:1;
+ u32 htc:1;
+ u32 tcpchk_rpt:1;
+ u32 ipcchk_rpt:1;
+ u32 tcpchk_valid:1;
+ u32 hwpcerr:1;
+ u32 hwpcind:1;
+ u32 iv0:16;
+
+ u32 iv1;
+
+ u32 tsfl;
+
+ u32 bufferaddress;
+ u32 bufferaddress64;
+
+} __packed;
+
+void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
+bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *status,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
+u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
+void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
+void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
+ bool b_firstseg, bool b_lastseg,
+ struct sk_buff *skb);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/rtlwifi/rtl8723com/Makefile
new file mode 100644
index 000000000000..345a68adcf38
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/Makefile
@@ -0,0 +1,9 @@
+rtl8723-common-objs := \
+ main.o \
+ dm_common.o \
+ fw_common.o \
+ phy_common.o
+
+obj-$(CONFIG_RTL8723_COMMON) += rtl8723-common.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c
new file mode 100644
index 000000000000..4e254b72bf45
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c
@@ -0,0 +1,65 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "dm_common.h"
+#include "../rtl8723ae/dm.h"
+#include <linux/module.h>
+
+/* These routines are common to RTL8723AE and RTL8723bE */
+
+void rtl8723_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dynamic_txpower_enable = false;
+
+ rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+}
+EXPORT_SYMBOL_GPL(rtl8723_dm_init_dynamic_txpower);
+
+void rtl8723_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.current_turbo_edca = false;
+ rtlpriv->dm.is_any_nonbepkts = false;
+ rtlpriv->dm.is_cur_rdlstate = false;
+}
+EXPORT_SYMBOL_GPL(rtl8723_dm_init_edca_turbo);
+
+void rtl8723_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm_pstable.pre_ccastate = CCA_MAX;
+ rtlpriv->dm_pstable.cur_ccasate = CCA_MAX;
+ rtlpriv->dm_pstable.pre_rfstate = RF_MAX;
+ rtlpriv->dm_pstable.cur_rfstate = RF_MAX;
+ rtlpriv->dm_pstable.rssi_val_min = 0;
+ rtlpriv->dm_pstable.initialize = 0;
+}
+EXPORT_SYMBOL_GPL(rtl8723_dm_init_dynamic_bb_powersaving);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h
new file mode 100644
index 000000000000..5c1b94ce2f86
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h
@@ -0,0 +1,33 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __DM_COMMON_H__
+#define __DM_COMMON_H__
+
+void rtl8723_dm_init_dynamic_txpower(struct ieee80211_hw *hw);
+void rtl8723_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl8723_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
new file mode 100644
index 000000000000..540278ff462b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
@@ -0,0 +1,329 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "fw_common.h"
+#include <linux/module.h>
+
+void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp;
+
+ if (enable) {
+ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
+
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
+
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
+ } else {
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
+
+ rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8723_enable_fw_download);
+
+void rtl8723_fw_block_write(struct ieee80211_hw *hw,
+ const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 blocksize = sizeof(u32);
+ u8 *bufferptr = (u8 *)buffer;
+ u32 *pu4byteptr = (u32 *)buffer;
+ u32 i, offset, blockcount, remainsize;
+
+ blockcount = size / blocksize;
+ remainsize = size % blocksize;
+
+ for (i = 0; i < blockcount; i++) {
+ offset = i * blocksize;
+ rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
+ *(pu4byteptr + i));
+ }
+ if (remainsize) {
+ offset = blockcount * blocksize;
+ bufferptr += offset;
+ for (i = 0; i < remainsize; i++) {
+ rtl_write_byte(rtlpriv,
+ (FW_8192C_START_ADDRESS + offset + i),
+ *(bufferptr + i));
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8723_fw_block_write);
+
+void rtl8723_fw_page_write(struct ieee80211_hw *hw,
+ u32 page, const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value8;
+ u8 u8page = (u8) (page & 0x07);
+
+ value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
+
+ rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
+ rtl8723_fw_block_write(hw, buffer, size);
+}
+EXPORT_SYMBOL_GPL(rtl8723_fw_page_write);
+
+static void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
+{
+ u32 fwlen = *pfwlen;
+ u8 remain = (u8) (fwlen % 4);
+
+ remain = (remain == 0) ? 0 : (4 - remain);
+
+ while (remain > 0) {
+ pfwbuf[fwlen] = 0;
+ fwlen++;
+ remain--;
+ }
+ *pfwlen = fwlen;
+}
+
+void rtl8723_write_fw(struct ieee80211_hw *hw,
+ enum version_8723e version,
+ u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 *bufferptr = buffer;
+ u32 pagenums, remainsize;
+ u32 page, offset;
+
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
+
+ rtl8723_fill_dummy(bufferptr, &size);
+
+ pagenums = size / FW_8192C_PAGE_SIZE;
+ remainsize = size % FW_8192C_PAGE_SIZE;
+
+ if (pagenums > 8) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Page numbers should not greater then 8\n");
+ }
+ for (page = 0; page < pagenums; page++) {
+ offset = page * FW_8192C_PAGE_SIZE;
+ rtl8723_fw_page_write(hw, page, (bufferptr + offset),
+ FW_8192C_PAGE_SIZE);
+ }
+ if (remainsize) {
+ offset = pagenums * FW_8192C_PAGE_SIZE;
+ page = pagenums;
+ rtl8723_fw_page_write(hw, page, (bufferptr + offset),
+ remainsize);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8723_write_fw);
+
+void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw)
+{
+ u8 u1tmp;
+ u8 delay = 100;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
+ u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+
+ while (u1tmp & BIT(2)) {
+ delay--;
+ if (delay == 0)
+ break;
+ udelay(50);
+ u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ }
+ if (delay == 0) {
+ u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2)));
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8723ae_firmware_selfreset);
+
+void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw)
+{
+ u8 u1b_tmp;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0))));
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2))));
+ udelay(50);
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp | BIT(0)));
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp | BIT(2)));
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ " _8051Reset8723be(): 8051 reset success .\n");
+}
+EXPORT_SYMBOL_GPL(rtl8723be_firmware_selfreset);
+
+int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err = -EIO;
+ u32 counter = 0;
+ u32 value32;
+
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
+ (!(value32 & FWDL_CHKSUM_RPT)));
+
+ if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "chksum report fail ! REG_MCUFWDL:0x%08x .\n",
+ value32);
+ goto exit;
+ }
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
+
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL) | MCUFWDL_RDY;
+ value32 &= ~WINTINI_RDY;
+ rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+
+ if (is_8723be)
+ rtl8723be_firmware_selfreset(hw);
+ counter = 0;
+
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ if (value32 & WINTINI_RDY) {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ "Polling FW ready success!! "
+ "REG_MCUFWDL:0x%08x .\n",
+ value32);
+ err = 0;
+ goto exit;
+ }
+ udelay(FW_8192C_POLLING_DELAY);
+
+ } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
+
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
+ value32);
+
+exit:
+ return err;
+}
+EXPORT_SYMBOL_GPL(rtl8723_fw_free_to_go);
+
+int rtl8723_download_fw(struct ieee80211_hw *hw,
+ bool is_8723be)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl92c_firmware_header *pfwheader;
+ u8 *pfwdata;
+ u32 fwsize;
+ int err;
+ enum version_8723e version = rtlhal->version;
+
+ if (!rtlhal->pfirmware)
+ return 1;
+
+ pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+ pfwdata = rtlhal->pfirmware;
+ fwsize = rtlhal->fwsize;
+ RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+ "normal Firmware SIZE %d\n", fwsize);
+
+ if (rtlpriv->cfg->ops->is_fw_header(pfwheader)) {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
+ "Firmware Version(%d), Signature(%#x), Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (int)sizeof(struct rtl92c_firmware_header));
+
+ pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
+ fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+ }
+ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+ if (is_8723be)
+ rtl8723be_firmware_selfreset(hw);
+ else
+ rtl8723ae_firmware_selfreset(hw);
+ }
+ rtl8723_enable_fw_download(hw, true);
+ rtl8723_write_fw(hw, version, pfwdata, fwsize);
+ rtl8723_enable_fw_download(hw, false);
+
+ err = rtl8723_fw_free_to_go(hw, is_8723be);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Firmware is not ready to run!\n");
+ } else {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
+ "Firmware is ready to run!\n");
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8723_download_fw);
+
+bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ struct rtl_tx_desc *pdesc;
+ struct sk_buff *pskb = NULL;
+ u8 own;
+ unsigned long flags;
+
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+ pdesc = &ring->desc[0];
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN);
+
+ rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
+
+ __skb_queue_tail(&ring->queue, skb);
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(rtl8723_cmd_send_packet);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
new file mode 100644
index 000000000000..cf1cc5804d06
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
@@ -0,0 +1,126 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __FW_COMMON_H__
+#define __FW_COMMON_H__
+
+#define REG_SYS_FUNC_EN 0x0002
+#define REG_MCUFWDL 0x0080
+#define FW_8192C_START_ADDRESS 0x1000
+#define FW_8192C_PAGE_SIZE 4096
+#define FW_8192C_POLLING_TIMEOUT_COUNT 6000
+#define FW_8192C_POLLING_DELAY 5
+
+#define MCUFWDL_RDY BIT(1)
+#define FWDL_CHKSUM_RPT BIT(2)
+#define WINTINI_RDY BIT(6)
+
+#define REG_RSV_CTRL 0x001C
+#define REG_HMETFR 0x01CC
+
+enum version_8723e {
+ VERSION_TEST_UMC_CHIP_8723 = 0x0081,
+ VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089,
+ VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089,
+ VERSION_TEST_CHIP_1T1R_8723B = 0x0106,
+ VERSION_NORMAL_SMIC_CHIP_1T1R_8723B = 0x010E,
+ VERSION_UNKNOWN = 0xFF,
+};
+
+enum rtl8723ae_h2c_cmd {
+ H2C_AP_OFFLOAD = 0,
+ H2C_SETPWRMODE = 1,
+ H2C_JOINBSSRPT = 2,
+ H2C_RSVDPAGE = 3,
+ H2C_RSSI_REPORT = 4,
+ H2C_P2P_PS_CTW_CMD = 5,
+ H2C_P2P_PS_OFFLOAD = 6,
+ H2C_RA_MASK = 7,
+ MAX_H2CCMD
+};
+
+enum rtl8723be_cmd {
+ H2C_8723BE_RSVDPAGE = 0,
+ H2C_8723BE_JOINBSSRPT = 1,
+ H2C_8723BE_SCAN = 2,
+ H2C_8723BE_KEEP_ALIVE_CTRL = 3,
+ H2C_8723BE_DISCONNECT_DECISION = 4,
+ H2C_8723BE_INIT_OFFLOAD = 6,
+ H2C_8723BE_AP_OFFLOAD = 8,
+ H2C_8723BE_BCN_RSVDPAGE = 9,
+ H2C_8723BE_PROBERSP_RSVDPAGE = 10,
+
+ H2C_8723BE_SETPWRMODE = 0x20,
+ H2C_8723BE_PS_TUNING_PARA = 0x21,
+ H2C_8723BE_PS_TUNING_PARA2 = 0x22,
+ H2C_8723BE_PS_LPS_PARA = 0x23,
+ H2C_8723BE_P2P_PS_OFFLOAD = 0x24,
+
+ H2C_8723BE_WO_WLAN = 0x80,
+ H2C_8723BE_REMOTE_WAKE_CTRL = 0x81,
+ H2C_8723BE_AOAC_GLOBAL_INFO = 0x82,
+ H2C_8723BE_AOAC_RSVDPAGE = 0x83,
+ H2C_8723BE_RSSI_REPORT = 0x42,
+ H2C_8723BE_RA_MASK = 0x40,
+ H2C_8723BE_SELECTIVE_SUSPEND_ROF_CMD,
+ H2C_8723BE_P2P_PS_MODE,
+ H2C_8723BE_PSD_RESULT,
+ /*Not defined CTW CMD for P2P yet*/
+ H2C_8723BE_P2P_PS_CTW_CMD,
+ MAX_8723BE_H2CCMD
+};
+
+struct rtl92c_firmware_header {
+ u16 signature;
+ u8 category;
+ u8 function;
+ u16 version;
+ u8 subversion;
+ u8 rsvd1;
+ u8 month;
+ u8 date;
+ u8 hour;
+ u8 minute;
+ u16 ramcodesize;
+ u16 rsvd2;
+ u32 svnindex;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+};
+
+void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable);
+void rtl8723_fw_block_write(struct ieee80211_hw *hw,
+ const u8 *buffer, u32 size);
+void rtl8723_fw_page_write(struct ieee80211_hw *hw,
+ u32 page, const u8 *buffer, u32 size);
+void rtl8723_write_fw(struct ieee80211_hw *hw,
+ enum version_8723e version,
+ u8 *buffer, u32 size);
+int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be);
+int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/main.c b/drivers/net/wireless/rtlwifi/rtl8723com/main.c
new file mode 100644
index 000000000000..9014a94fac6a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/main.c
@@ -0,0 +1,33 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include <linux/module.h>
+
+
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek RTL8723AE/RTL8723BE 802.11n PCI wireless common routines");
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c
new file mode 100644
index 000000000000..d73b659bd2b5
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c
@@ -0,0 +1,434 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "phy_common.h"
+#include "../rtl8723ae/reg.h"
+#include <linux/module.h>
+
+/* These routines are common to RTL8723AE and RTL8723bE */
+
+u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 returnvalue, originalvalue, bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK = 0x%x Addr[0x%x]= 0x%x\n",
+ bitmask, regaddr, originalvalue);
+
+ return returnvalue;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_query_bb_reg);
+
+void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
+ u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 originalvalue, bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+
+ rtl_write_dword(rtlpriv, regaddr, data);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_set_bb_reg);
+
+u32 rtl8723_phy_calculate_bit_shift(u32 bitmask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++) {
+ if (((bitmask >> i) & 0x1) == 1)
+ break;
+ }
+ return i;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_calculate_bit_shift);
+
+u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+ u32 newoffset;
+ u32 tmplong, tmplong2;
+ u8 rfpi_enable = 0;
+ u32 retvalue;
+
+ offset &= 0xff;
+ newoffset = offset;
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+ return 0xFFFFFFFF;
+ }
+ tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+ if (rfpath == RF90_PATH_A)
+ tmplong2 = tmplong;
+ else
+ tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+ tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+ (newoffset << 23) | BLSSIREADEDGE;
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong & (~BLSSIREADEDGE));
+ mdelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+ mdelay(2);
+ if (rfpath == RF90_PATH_A)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ else if (rfpath == RF90_PATH_B)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+ BIT(8));
+ if (rfpi_enable)
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
+ BLSSIREADBACKDATA);
+ else
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
+ BLSSIREADBACKDATA);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFR-%d Addr[0x%x]= 0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
+ return retvalue;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_read);
+
+void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 offset, u32 data)
+{
+ u32 data_and_addr;
+ u32 newoffset;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+ return;
+ }
+ offset &= 0xff;
+ newoffset = offset;
+ data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+ rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFW-%d Addr[0x%x]= 0x%x\n", rfpath,
+ pphyreg->rf3wire_offset, data_and_addr);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_write);
+
+long rtl8723_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx)
+{
+ long offset;
+ long pwrout_dbm;
+
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ offset = -7;
+ break;
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ default:
+ offset = -8;
+ break;
+ }
+ pwrout_dbm = txpwridx / 2 + offset;
+ return pwrout_dbm;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_txpwr_idx_to_dbm);
+
+void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
+ RFPGA0_XA_LSSIPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
+ RFPGA0_XB_LSSIPARAMETER;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_init_bb_rf_reg_def);
+
+bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+ u32 cmdtableidx,
+ u32 cmdtablesz,
+ enum swchnlcmd_id cmdid,
+ u32 para1, u32 para2,
+ u32 msdelay)
+{
+ struct swchnlcmd *pcmd;
+
+ if (cmdtable == NULL) {
+ RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+ return false;
+ }
+
+ if (cmdtableidx >= cmdtablesz)
+ return false;
+
+ pcmd = cmdtable + cmdtableidx;
+ pcmd->cmdid = cmdid;
+ pcmd->para1 = para1;
+ pcmd->para2 = para2;
+ pcmd->msdelay = msdelay;
+ return true;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_set_sw_chnl_cmdarray);
+
+void rtl8723_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok,
+ long result[][8],
+ u8 final_candidate,
+ bool btxonly)
+{
+ u32 oldval_0, x, tx0_a, reg;
+ long y, tx0_c;
+
+ if (final_candidate == 0xFF) {
+ return;
+ } else if (iqk_ok) {
+ oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD) >> 22) & 0x3FF;
+ x = result[final_candidate][0];
+ if ((x & 0x00000200) != 0)
+ x = x | 0xFFFFFC00;
+ tx0_a = (x * oldval_0) >> 8;
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
+ ((x * oldval_0 >> 7) & 0x1));
+ y = result[final_candidate][1];
+ if ((y & 0x00000200) != 0)
+ y = y | 0xFFFFFC00;
+ tx0_c = (y * oldval_0) >> 8;
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
+ ((tx0_c & 0x3C0) >> 6));
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
+ (tx0_c & 0x3F));
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
+ ((y * oldval_0 >> 7) & 0x1));
+ if (btxonly)
+ return;
+ reg = result[final_candidate][2];
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][3] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_path_a_fill_iqk_matrix);
+
+void rtl8723_save_adda_registers(struct ieee80211_hw *hw, u32 *addareg,
+ u32 *addabackup, u32 registernum)
+{
+ u32 i;
+
+ for (i = 0; i < registernum; i++)
+ addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
+}
+EXPORT_SYMBOL_GPL(rtl8723_save_adda_registers);
+
+void rtl8723_phy_save_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
+ macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_save_mac_registers);
+
+void rtl8723_phy_reload_adda_registers(struct ieee80211_hw *hw,
+ u32 *addareg, u32 *addabackup,
+ u32 regiesternum)
+{
+ u32 i;
+
+ for (i = 0; i < regiesternum; i++)
+ rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_reload_adda_registers);
+
+void rtl8723_phy_reload_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
+ rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_reload_mac_registers);
+
+void rtl8723_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg,
+ bool is_patha_on, bool is2t)
+{
+ u32 pathon;
+ u32 i;
+
+ pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
+ if (!is2t) {
+ pathon = 0x0bdb25a0;
+ rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
+ } else {
+ rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon);
+ }
+
+ for (i = 1; i < IQK_ADDA_REG_NUM; i++)
+ rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_path_adda_on);
+
+void rtl8723_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i = 0;
+
+ rtl_write_byte(rtlpriv, macreg[i], 0x3F);
+
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i],
+ (u8) (macbackup[i] & (~BIT(3))));
+ rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_mac_setting_calibration);
+
+void rtl8723_phy_path_a_standby(struct ieee80211_hw *hw)
+{
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
+ rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_path_a_standby);
+
+void rtl8723_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
+{
+ u32 mode;
+
+ mode = pi_mode ? 0x01000100 : 0x01000000;
+ rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
+ rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_pi_mode_switch);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h
new file mode 100644
index 000000000000..83b891a9adb8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h
@@ -0,0 +1,89 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __PHY_COMMON__
+#define __PHY_COMMON__
+
+#define RT_CANNOT_IO(hw) false
+
+enum swchnlcmd_id {
+ CMDID_END,
+ CMDID_SET_TXPOWEROWER_LEVEL,
+ CMDID_BBREGWRITE10,
+ CMDID_WRITEPORT_ULONG,
+ CMDID_WRITEPORT_USHORT,
+ CMDID_WRITEPORT_UCHAR,
+ CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+ enum swchnlcmd_id cmdid;
+ u32 para1;
+ u32 para2;
+ u32 msdelay;
+};
+
+u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
+ u32 bitmask, u32 data);
+u32 rtl8723_phy_calculate_bit_shift(u32 bitmask);
+u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset);
+void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 offset, u32 data);
+long rtl8723_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx);
+void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw);
+bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+ u32 cmdtableidx,
+ u32 cmdtablesz,
+ enum swchnlcmd_id cmdid,
+ u32 para1, u32 para2,
+ u32 msdelay);
+void rtl8723_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok,
+ long result[][8],
+ u8 final_candidate,
+ bool btxonly);
+void rtl8723_save_adda_registers(struct ieee80211_hw *hw, u32 *addareg,
+ u32 *addabackup, u32 registernum);
+void rtl8723_phy_save_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup);
+void rtl8723_phy_reload_adda_registers(struct ieee80211_hw *hw,
+ u32 *addareg, u32 *addabackup,
+ u32 regiesternum);
+void rtl8723_phy_reload_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup);
+void rtl8723_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg,
+ bool is_patha_on, bool is2t);
+void rtl8723_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup);
+void rtl8723_phy_path_a_standby(struct ieee80211_hw *hw);
+void rtl8723_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 4933f02ce1d5..0398d3ea15b0 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -410,7 +410,7 @@ static void rtl_usb_init_sw(struct ieee80211_hw *hw)
mac->current_ampdu_factor = 3;
/* QOS */
- rtlusb->acm_method = eAcmWay2_SW;
+ rtlusb->acm_method = EACMWAY2_SW;
/* IRQ */
/* HIMR - turn all on */
@@ -994,7 +994,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
seq_number += 1;
seq_number <<= 4;
}
- rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb,
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, NULL, info, sta, skb,
hw_queue, &tcb_desc);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
if (qc)
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 8c647391bedf..6965afdf572a 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -41,6 +41,38 @@
#include <linux/completion.h>
#include "debug.h"
+#define MASKBYTE0 0xff
+#define MASKBYTE1 0xff00
+#define MASKBYTE2 0xff0000
+#define MASKBYTE3 0xff000000
+#define MASKHWORD 0xffff0000
+#define MASKLWORD 0x0000ffff
+#define MASKDWORD 0xffffffff
+#define MASK12BITS 0xfff
+#define MASKH4BITS 0xf0000000
+#define MASKOFDM_D 0xffc00000
+#define MASKCCK 0x3f3f3f3f
+
+#define MASK4BITS 0x0f
+#define MASK20BITS 0xfffff
+#define RFREG_OFFSET_MASK 0xfffff
+
+#define MASKBYTE0 0xff
+#define MASKBYTE1 0xff00
+#define MASKBYTE2 0xff0000
+#define MASKBYTE3 0xff000000
+#define MASKHWORD 0xffff0000
+#define MASKLWORD 0x0000ffff
+#define MASKDWORD 0xffffffff
+#define MASK12BITS 0xfff
+#define MASKH4BITS 0xf0000000
+#define MASKOFDM_D 0xffc00000
+#define MASKCCK 0x3f3f3f3f
+
+#define MASK4BITS 0x0f
+#define MASK20BITS 0xfffff
+#define RFREG_OFFSET_MASK 0xfffff
+
#define RF_CHANGE_BY_INIT 0
#define RF_CHANGE_BY_IPS BIT(28)
#define RF_CHANGE_BY_PS BIT(29)
@@ -49,6 +81,7 @@
#define IQK_ADDA_REG_NUM 16
#define IQK_MAC_REG_NUM 4
+#define IQK_THRESHOLD 8
#define MAX_KEY_LEN 61
#define KEY_BUF_SIZE 5
@@ -86,7 +119,18 @@
#define MAC80211_4ADDR_LEN 30
#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */
+#define CHANNEL_MAX_NUMBER_2G 14
+#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to
+ *"phy_GetChnlGroup8812A" and
+ * "Hal_ReadTxPowerInfo8812A"
+ */
+#define CHANNEL_MAX_NUMBER_5G_80M 7
#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */
+#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to
+ *"phy_GetChnlGroup8812A" and
+ * "Hal_ReadTxPowerInfo8812A"
+ */
+#define CHANNEL_MAX_NUMBER_5G_80M 7
#define MAX_PG_GROUP 13
#define CHANNEL_GROUP_MAX_2G 3
#define CHANNEL_GROUP_IDX_5GL 3
@@ -96,6 +140,7 @@
#define CHANNEL_MAX_NUMBER_2G 14
#define AVG_THERMAL_NUM 8
#define AVG_THERMAL_NUM_88E 4
+#define AVG_THERMAL_NUM_8723BE 4
#define MAX_TID_COUNT 9
/* for early mode */
@@ -107,6 +152,24 @@
#define MAX_CHNL_GROUP_24G 6
#define MAX_CHNL_GROUP_5G 14
+#define TX_PWR_BY_RATE_NUM_BAND 2
+#define TX_PWR_BY_RATE_NUM_RF 4
+#define TX_PWR_BY_RATE_NUM_SECTION 12
+#define MAX_BASE_NUM_IN_PHY_REG_PG_24G 6
+#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 5
+
+#define RTL8192EE_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */
+
+#define DEL_SW_IDX_SZ 30
+#define BAND_NUM 3
+
+enum rf_tx_num {
+ RF_1TX = 0,
+ RF_2TX,
+ RF_MAX_TX_NUM,
+ RF_TX_NUM_NONIMPLEMENT,
+};
+
struct txpower_info_2g {
u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
@@ -115,6 +178,8 @@ struct txpower_info_2g {
u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
};
struct txpower_info_5g {
@@ -123,6 +188,17 @@ struct txpower_info_5g {
u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
+};
+
+enum rate_section {
+ CCK = 0,
+ OFDM,
+ HT_MCS0_MCS7,
+ HT_MCS8_MCS15,
+ VHT_1SSMCS0_1SSMCS9,
+ VHT_2SSMCS0_2SSMCS9,
};
enum intf_type {
@@ -158,7 +234,10 @@ enum hardware_type {
HARDWARE_TYPE_RTL8192DU,
HARDWARE_TYPE_RTL8723AE,
HARDWARE_TYPE_RTL8723U,
+ HARDWARE_TYPE_RTL8723BE,
HARDWARE_TYPE_RTL8188EE,
+ HARDWARE_TYPE_RTL8821AE,
+ HARDWARE_TYPE_RTL8812AE,
/* keep it last */
HARDWARE_TYPE_NUM
@@ -195,8 +274,16 @@ enum hardware_type {
_pdesc->rxmcs == DESC92_RATE5_5M || \
_pdesc->rxmcs == DESC92_RATE11M)
+#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs) \
+ ((rxmcs) == DESC92_RATE1M || \
+ (rxmcs) == DESC92_RATE2M || \
+ (rxmcs) == DESC92_RATE5_5M || \
+ (rxmcs) == DESC92_RATE11M)
+
enum scan_operation_backup_opt {
SCAN_OPT_BACKUP = 0,
+ SCAN_OPT_BACKUP_BAND0 = 0,
+ SCAN_OPT_BACKUP_BAND1,
SCAN_OPT_RESTORE,
SCAN_OPT_MAX
};
@@ -231,7 +318,9 @@ struct bb_reg_def {
enum io_type {
IO_CMD_PAUSE_DM_BY_SCAN = 0,
- IO_CMD_RESUME_DM_BY_SCAN = 1,
+ IO_CMD_PAUSE_BAND0_DM_BY_SCAN = 0,
+ IO_CMD_PAUSE_BAND1_DM_BY_SCAN = 1,
+ IO_CMD_RESUME_DM_BY_SCAN = 2,
};
enum hw_variables {
@@ -298,6 +387,7 @@ enum hw_variables {
HW_VAR_SET_RPWM,
HW_VAR_H2C_FW_PWRMODE,
HW_VAR_H2C_FW_JOINBSSRPT,
+ HW_VAR_H2C_FW_MEDIASTATUSRPT,
HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
HW_VAR_FW_PSMODE_STATUS,
HW_VAR_RESUME_CLK_ON,
@@ -330,6 +420,8 @@ enum hw_variables {
HAL_DEF_WOWLAN,
HW_VAR_MRC,
+ HW_VAR_KEEP_ALIVE,
+ HW_VAR_NAV_UPPER,
HW_VAR_MGT_FILTER,
HW_VAR_CTRL_FILTER,
@@ -348,34 +440,34 @@ enum rt_oem_id {
RT_CID_8187_HW_LED = 3,
RT_CID_8187_NETGEAR = 4,
RT_CID_WHQL = 5,
- RT_CID_819x_CAMEO = 6,
- RT_CID_819x_RUNTOP = 7,
- RT_CID_819x_Senao = 8,
+ RT_CID_819X_CAMEO = 6,
+ RT_CID_819X_RUNTOP = 7,
+ RT_CID_819X_SENAO = 8,
RT_CID_TOSHIBA = 9,
- RT_CID_819x_Netcore = 10,
- RT_CID_Nettronix = 11,
+ RT_CID_819X_NETCORE = 10,
+ RT_CID_NETTRONIX = 11,
RT_CID_DLINK = 12,
RT_CID_PRONET = 13,
RT_CID_COREGA = 14,
- RT_CID_819x_ALPHA = 15,
- RT_CID_819x_Sitecom = 16,
+ RT_CID_819X_ALPHA = 15,
+ RT_CID_819X_SITECOM = 16,
RT_CID_CCX = 17,
- RT_CID_819x_Lenovo = 18,
- RT_CID_819x_QMI = 19,
- RT_CID_819x_Edimax_Belkin = 20,
- RT_CID_819x_Sercomm_Belkin = 21,
- RT_CID_819x_CAMEO1 = 22,
- RT_CID_819x_MSI = 23,
- RT_CID_819x_Acer = 24,
- RT_CID_819x_HP = 27,
- RT_CID_819x_CLEVO = 28,
- RT_CID_819x_Arcadyan_Belkin = 29,
- RT_CID_819x_SAMSUNG = 30,
- RT_CID_819x_WNC_COREGA = 31,
- RT_CID_819x_Foxcoon = 32,
- RT_CID_819x_DELL = 33,
- RT_CID_819x_PRONETS = 34,
- RT_CID_819x_Edimax_ASUS = 35,
+ RT_CID_819X_LENOVO = 18,
+ RT_CID_819X_QMI = 19,
+ RT_CID_819X_EDIMAX_BELKIN = 20,
+ RT_CID_819X_SERCOMM_BELKIN = 21,
+ RT_CID_819X_CAMEO1 = 22,
+ RT_CID_819X_MSI = 23,
+ RT_CID_819X_ACER = 24,
+ RT_CID_819X_HP = 27,
+ RT_CID_819X_CLEVO = 28,
+ RT_CID_819X_ARCADYAN_BELKIN = 29,
+ RT_CID_819X_SAMSUNG = 30,
+ RT_CID_819X_WNC_COREGA = 31,
+ RT_CID_819X_FOXCOON = 32,
+ RT_CID_819X_DELL = 33,
+ RT_CID_819X_PRONETS = 34,
+ RT_CID_819X_EDIMAX_ASUS = 35,
RT_CID_NETGEAR = 36,
RT_CID_PLANEX = 37,
RT_CID_CC_C = 38,
@@ -389,6 +481,7 @@ enum hw_descs {
HW_DESC_RXBUFF_ADDR,
HW_DESC_RXPKT_LEN,
HW_DESC_RXERO,
+ HW_DESC_RX_PREPARE,
};
enum prime_sc {
@@ -407,6 +500,7 @@ enum rf_type {
enum ht_channel_width {
HT_CHANNEL_WIDTH_20 = 0,
HT_CHANNEL_WIDTH_20_40 = 1,
+ HT_CHANNEL_WIDTH_80 = 2,
};
/* Ref: 802.11i sepc D10.0 7.3.2.25.1
@@ -471,6 +565,9 @@ enum rtl_var_map {
MAC_RCR_ACRC32,
MAC_RCR_ACF,
MAC_RCR_AAP,
+ MAC_HIMR,
+ MAC_HIMRE,
+ MAC_HSISR,
/*efuse map */
EFUSE_TEST,
@@ -608,7 +705,7 @@ enum rtl_led_pin {
enum acm_method {
eAcmWay0_SwAndHw = 0,
eAcmWay1_HW = 1,
- eAcmWay2_SW = 2,
+ EACMWAY2_SW = 2,
};
enum macphy_mode {
@@ -645,7 +742,9 @@ enum wireless_mode {
WIRELESS_MODE_G = 0x04,
WIRELESS_MODE_AUTO = 0x08,
WIRELESS_MODE_N_24G = 0x10,
- WIRELESS_MODE_N_5G = 0x20
+ WIRELESS_MODE_N_5G = 0x20,
+ WIRELESS_MODE_AC_5G = 0x40,
+ WIRELESS_MODE_AC_24G = 0x80
};
#define IS_WIRELESS_MODE_A(wirelessmode) \
@@ -669,6 +768,8 @@ enum ratr_table_mode {
RATR_INX_WIRELESS_B = 6,
RATR_INX_WIRELESS_MC = 7,
RATR_INX_WIRELESS_A = 8,
+ RATR_INX_WIRELESS_AC_5N = 8,
+ RATR_INX_WIRELESS_AC_24N = 9,
};
enum rtl_link_state {
@@ -803,8 +904,12 @@ struct wireless_stats {
long signal_strength;
u8 rx_rssi_percentage[4];
+ u8 rx_evm_dbm[4];
u8 rx_evm_percentage[2];
+ u16 rx_cfo_short[4];
+ u16 rx_cfo_tail[4];
+
struct rt_smooth_data ui_rssi;
struct rt_smooth_data ui_link_quality;
};
@@ -817,9 +922,9 @@ struct rate_adaptive {
u32 high_rssi_thresh_for_ra;
u32 high2low_rssi_thresh_for_ra;
u8 low2high_rssi_thresh_for_ra40m;
- u32 low_rssi_thresh_for_ra40M;
+ u32 low_rssi_thresh_for_ra40m;
u8 low2high_rssi_thresh_for_ra20m;
- u32 low_rssi_thresh_for_ra20M;
+ u32 low_rssi_thresh_for_ra20m;
u32 upper_rssi_threshold_ratr;
u32 middleupper_rssi_threshold_ratr;
u32 middle_rssi_threshold_ratr;
@@ -833,6 +938,10 @@ struct rate_adaptive {
u32 ping_rssi_thresh_for_ra;
u32 last_ratr;
u8 pre_ratr_state;
+ u8 ldpc_thres;
+ bool use_ldpc;
+ bool lower_rts_rate;
+ bool is_special_data;
};
struct regd_pair_mapping {
@@ -841,6 +950,16 @@ struct regd_pair_mapping {
u16 reg_2ghz_ctl;
};
+struct dynamic_primary_cca {
+ u8 pricca_flag;
+ u8 intf_flag;
+ u8 intf_type;
+ u8 dup_rts_flag;
+ u8 monitor_flag;
+ u8 ch_offset;
+ u8 mf_state;
+};
+
struct rtl_regulatory {
char alpha2[2];
u16 country_code;
@@ -976,16 +1095,29 @@ struct rtl_phy {
u32 iqk_bb_backup[10];
bool iqk_initialized;
+ bool rfpath_rx_enable[MAX_RF_PATH];
+ u8 reg_837;
/* Dual mac */
bool need_iqk;
struct iqk_matrix_regs iqk_matrix[IQK_MATRIX_SETTINGS_NUM];
bool rfpi_enable;
+ bool iqk_in_progress;
u8 pwrgroup_cnt;
u8 cck_high_power;
/* MAX_PG_GROUP groups of pwr diff by rates */
u32 mcs_offset[MAX_PG_GROUP][16];
+ u32 tx_power_by_rate_offset[TX_PWR_BY_RATE_NUM_BAND]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_SECTION];
+ u8 txpwr_by_rate_base_24g[TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [MAX_BASE_NUM_IN_PHY_REG_PG_24G];
+ u8 txpwr_by_rate_base_5g[TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [MAX_BASE_NUM_IN_PHY_REG_PG_5G];
u8 default_initialgain[4];
/* the current Tx power level */
@@ -998,6 +1130,7 @@ struct rtl_phy {
bool apk_done;
u32 reg_rf3c[2]; /* pathA / pathB */
+ u32 backup_rf_0x1a;/*92ee*/
/* bfsync */
u8 framesync;
u32 framesync_c34;
@@ -1006,6 +1139,7 @@ struct rtl_phy {
struct phy_parameters hwparam_tables[MAX_TAB];
u16 rf_pathmap;
+ u8 hw_rof_enable; /*Enable GPIO[9] as WL RF HW PDn source*/
enum rt_polarity_ctl polarity_ctl;
};
@@ -1133,6 +1267,7 @@ struct rtl_mac {
u8 use_cts_protect;
u8 cur_40_prime_sc;
u8 cur_40_prime_sc_bk;
+ u8 cur_80_prime_sc;
u64 tsf;
u8 retry_short;
u8 retry_long;
@@ -1213,6 +1348,7 @@ struct rtl_hal {
bool being_init_adapter;
bool bbrf_ready;
bool mac_func_enable;
+ bool pre_edcca_enable;
struct bt_coexist_8723 hal_coex_8723;
enum intf_type interface;
@@ -1234,6 +1370,7 @@ struct rtl_hal {
/*Reserve page start offset except beacon in TxQ. */
u8 fw_rsvdpage_startoffset;
u8 h2c_txcmd_seq;
+ u8 current_ra_rate;
/* FW Cmd IO related */
u16 fwcmd_iomap;
@@ -1273,6 +1410,9 @@ struct rtl_hal {
bool disable_amsdu_8k;
bool master_of_dmsp;
bool slave_of_dmsp;
+
+ u16 rx_tag;/*for 92ee*/
+ u8 rts_en;
};
struct rtl_security {
@@ -1321,6 +1461,16 @@ struct fast_ant_training {
bool becomelinked;
};
+struct dm_phy_dbg_info {
+ char rx_snrdb[4];
+ u64 num_qry_phy_status;
+ u64 num_qry_phy_status_cck;
+ u64 num_qry_phy_status_ofdm;
+ u16 num_qry_beacon_pkt;
+ u16 num_non_be_pkt;
+ s32 rx_evm[4];
+};
+
struct rtl_dm {
/*PHY status for Dynamic Management */
long entry_min_undec_sm_pwdb;
@@ -1360,29 +1510,84 @@ struct rtl_dm {
u8 txpower_track_control;
bool interrupt_migration;
bool disable_tx_int;
- char ofdm_index[2];
+ char ofdm_index[MAX_RF_PATH];
+ u8 default_ofdm_index;
+ u8 default_cck_index;
char cck_index;
- char delta_power_index;
- char delta_power_index_last;
- char power_index_offset;
+ char delta_power_index[MAX_RF_PATH];
+ char delta_power_index_last[MAX_RF_PATH];
+ char power_index_offset[MAX_RF_PATH];
+ char absolute_ofdm_swing_idx[MAX_RF_PATH];
+ char remnant_ofdm_swing_idx[MAX_RF_PATH];
+ char remnant_cck_idx;
+ bool modify_txagc_flag_path_a;
+ bool modify_txagc_flag_path_b;
+
+ bool one_entry_only;
+ struct dm_phy_dbg_info dbginfo;
+
+ /* Dynamic ATC switch */
+ bool atc_status;
+ bool large_cfo_hit;
+ bool is_freeze;
+ int cfo_tail[2];
+ int cfo_ave_pre;
+ int crystal_cap;
+ u8 cfo_threshold;
+ u32 packet_count;
+ u32 packet_count_pre;
+ u8 tx_rate;
/*88e tx power tracking*/
- u8 swing_idx_ofdm[2];
+ u8 swing_idx_ofdm[MAX_RF_PATH];
u8 swing_idx_ofdm_cur;
- u8 swing_idx_ofdm_base;
+ u8 swing_idx_ofdm_base[MAX_RF_PATH];
bool swing_flag_ofdm;
u8 swing_idx_cck;
u8 swing_idx_cck_cur;
u8 swing_idx_cck_base;
bool swing_flag_cck;
+ char swing_diff_2g;
+ char swing_diff_5g;
+
+ u8 delta_swing_table_idx_24gccka_p[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24gccka_n[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24gcckb_p[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24gcckb_n[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24ga_p[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24ga_n[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24gb_p[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24gb_n[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_5ga_p[BAND_NUM][DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_5ga_n[BAND_NUM][DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_5gb_p[BAND_NUM][DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_5gb_n[BAND_NUM][DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24ga_p_8188e[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24ga_n_8188e[DEL_SW_IDX_SZ];
+
/* DMSP */
bool supp_phymode_switch;
+ /* DulMac */
struct fast_ant_training fat_table;
+
+ u8 resp_tx_path;
+ u8 path_sel;
+ u32 patha_sum;
+ u32 pathb_sum;
+ u32 patha_cnt;
+ u32 pathb_cnt;
+
+ u8 pre_channel;
+ u8 *p_channel;
+ u8 linked_interval;
+
+ u64 last_tx_ok_cnt;
+ u64 last_rx_ok_cnt;
};
-#define EFUSE_MAX_LOGICAL_SIZE 256
+#define EFUSE_MAX_LOGICAL_SIZE 512
struct rtl_efuse {
bool autoLoad_ok;
@@ -1422,12 +1627,9 @@ struct rtl_efuse {
u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
- u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
- u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
- u8 eprom_chnl_txpwr_ht40_2sdf[2][CHANNEL_GROUP_MAX];
- u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
- u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
- u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
+ u8 eeprom_chnlarea_txpwr_cck[MAX_RF_PATH][CHANNEL_GROUP_MAX_2G];
+ u8 eeprom_chnlarea_txpwr_ht40_1s[MAX_RF_PATH][CHANNEL_GROUP_MAX];
+ u8 eprom_chnl_txpwr_ht40_2sdf[MAX_RF_PATH][CHANNEL_GROUP_MAX];
u8 internal_pa_5g[2]; /* pathA / pathB */
u8 eeprom_c9;
@@ -1438,9 +1640,38 @@ struct rtl_efuse {
u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
- char txpwr_ht20diff[2][CHANNEL_MAX_NUMBER]; /*HT 20<->40 Pwr diff */
- /*For HT<->legacy pwr diff*/
- u8 txpwr_legacyhtdiff[2][CHANNEL_MAX_NUMBER];
+ u8 txpwrlevel_cck[MAX_RF_PATH][CHANNEL_MAX_NUMBER_2G];
+ /*For HT 40MHZ pwr */
+ u8 txpwrlevel_ht40_1s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ /*For HT 40MHZ pwr */
+ u8 txpwrlevel_ht40_2s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+
+ /*--------------------------------------------------------*
+ * 8192CE\8192SE\8192DE\8723AE use the following 4 arrays,
+ * other ICs (8188EE\8723BE\8192EE\8812AE...)
+ * define new arrays in Windows code.
+ * BUT, in linux code, we use the same array for all ICs.
+ *
+ * The Correspondance relation between two arrays is:
+ * txpwr_cckdiff[][] == CCK_24G_Diff[][]
+ * txpwr_ht20diff[][] == BW20_24G_Diff[][]
+ * txpwr_ht40diff[][] == BW40_24G_Diff[][]
+ * txpwr_legacyhtdiff[][] == OFDM_24G_Diff[][]
+ *
+ * Sizes of these arrays are decided by the larger ones.
+ */
+ char txpwr_cckdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ char txpwr_ht20diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ char txpwr_ht40diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ char txpwr_legacyhtdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+
+ u8 txpwr_5g_bw40base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ u8 txpwr_5g_bw80base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M];
+ char txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT];
+ char txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT];
+ char txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT];
+ char txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT];
+
u8 txpwr_safetyflag; /* Band edge enable flag */
u16 eeprom_txpowerdiff;
u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */
@@ -1571,7 +1802,9 @@ struct rtl_stats {
bool rx_is40Mhzpacket;
u32 rx_pwdb_all;
u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
- s8 rx_mimo_sig_qual[2];
+ s8 rx_mimo_sig_qual[4];
+ u8 rx_pwr[4]; /* per-path's pwdb */
+ u8 rx_snr[4]; /* per-path's SNR */
bool packet_matchbssid;
bool is_cck;
bool is_ht;
@@ -1644,6 +1877,8 @@ struct rtl_tcb_desc {
bool btx_enable_sw_calc_duration;
};
+struct rtl92c_firmware_header;
+
struct rtl_hal_ops {
int (*init_sw_vars) (struct ieee80211_hw *hw);
void (*deinit_sw_vars) (struct ieee80211_hw *hw);
@@ -1673,9 +1908,17 @@ struct rtl_hal_ops {
void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
void (*update_rate_tbl) (struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u8 rssi_level);
+ void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc,
+ u8 *desc, u8 queue_index,
+ struct sk_buff *skb, dma_addr_t addr);
void (*update_rate_mask) (struct ieee80211_hw *hw, u8 rssi_level);
+ u16 (*rx_desc_buff_remained_cnt)(struct ieee80211_hw *hw,
+ u8 queue_index);
+ void (*rx_check_dma_ok)(struct ieee80211_hw *hw, u8 *header_desc,
+ u8 queue_index);
void (*fill_tx_desc) (struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ u8 *pbd_desc_tx,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
@@ -1698,8 +1941,11 @@ struct rtl_hal_ops {
enum rf_pwrstate rfpwr_state);
void (*led_control) (struct ieee80211_hw *hw,
enum led_ctl_mode ledaction);
- void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+ void (*set_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
+ bool (*is_tx_desc_closed) (struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
void (*tx_polling) (struct ieee80211_hw *hw, u8 hw_queue);
void (*enable_hw_sec) (struct ieee80211_hw *hw);
void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
@@ -1738,6 +1984,10 @@ struct rtl_hal_ops {
void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
+ bool (*get_btc_status) (void);
+ bool (*is_fw_header) (struct rtl92c_firmware_header *hdr);
+ u32 (*rx_command_packet)(struct ieee80211_hw *hw,
+ struct rtl_stats status, struct sk_buff *skb);
};
struct rtl_intf_ops {
@@ -1847,6 +2097,8 @@ struct rtl_locks {
/*Easy concurrent*/
spinlock_t check_sendpkt_lock;
+
+ spinlock_t iqk_lock;
};
struct rtl_works {
@@ -1915,6 +2167,7 @@ struct ps_t {
u8 cur_ccasate;
u8 pre_rfstate;
u8 cur_rfstate;
+ u8 initialize;
long rssi_val_min;
};
@@ -1939,6 +2192,7 @@ struct dig_t {
u8 cursta_cstate;
u8 presta_cstate;
u8 curmultista_cstate;
+ u8 stop_dig;
char back_val;
char back_range_max;
char back_range_min;
@@ -1956,6 +2210,7 @@ struct dig_t {
u8 cur_ccasate;
u8 large_fa_hit;
u8 dig_dynamic_min;
+ u8 dig_dynamic_min_1;
u8 forbidden_igi;
u8 dig_state;
u8 dig_highpwrstate;
@@ -1972,6 +2227,7 @@ struct dig_t {
char backoffval_range_min;
u8 dig_min_0;
u8 dig_min_1;
+ u8 bt30_cur_igi;
bool media_connect_0;
bool media_connect_1;
@@ -1986,6 +2242,96 @@ struct rtl_global_var {
spinlock_t glb_list_lock;
};
+struct rtl_btc_info {
+ u8 bt_type;
+ u8 btcoexist;
+ u8 ant_num;
+};
+
+struct bt_coexist_info {
+ struct rtl_btc_ops *btc_ops;
+ struct rtl_btc_info btc_info;
+ /* EEPROM BT info. */
+ u8 eeprom_bt_coexist;
+ u8 eeprom_bt_type;
+ u8 eeprom_bt_ant_num;
+ u8 eeprom_bt_ant_isol;
+ u8 eeprom_bt_radio_shared;
+
+ u8 bt_coexistence;
+ u8 bt_ant_num;
+ u8 bt_coexist_type;
+ u8 bt_state;
+ u8 bt_cur_state; /* 0:on, 1:off */
+ u8 bt_ant_isolation; /* 0:good, 1:bad */
+ u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
+ u8 bt_service;
+ u8 bt_radio_shared_type;
+ u8 bt_rfreg_origin_1e;
+ u8 bt_rfreg_origin_1f;
+ u8 bt_rssi_state;
+ u32 ratio_tx;
+ u32 ratio_pri;
+ u32 bt_edca_ul;
+ u32 bt_edca_dl;
+
+ bool init_set;
+ bool bt_busy_traffic;
+ bool bt_traffic_mode_set;
+ bool bt_non_traffic_mode_set;
+
+ bool fw_coexist_all_off;
+ bool sw_coexist_all_off;
+ bool hw_coexist_all_off;
+ u32 cstate;
+ u32 previous_state;
+ u32 cstate_h;
+ u32 previous_state_h;
+
+ u8 bt_pre_rssi_state;
+ u8 bt_pre_rssi_state1;
+
+ u8 reg_bt_iso;
+ u8 reg_bt_sco;
+ bool balance_on;
+ u8 bt_active_zero_cnt;
+ bool cur_bt_disabled;
+ bool pre_bt_disabled;
+
+ u8 bt_profile_case;
+ u8 bt_profile_action;
+ bool bt_busy;
+ bool hold_for_bt_operation;
+ u8 lps_counter;
+};
+
+struct rtl_btc_ops {
+ void (*btc_init_variables) (struct rtl_priv *rtlpriv);
+ void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
+ void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
+ void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
+ void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype);
+ void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action);
+ void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv,
+ enum _RT_MEDIA_STATUS mstatus);
+ void (*btc_periodical) (struct rtl_priv *rtlpriv);
+ void (*btc_halt_notify) (void);
+ void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv,
+ u8 *tmp_buf, u8 length);
+ bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv);
+ bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv);
+ bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
+};
+
+struct proxim {
+ bool proxim_on;
+
+ void *proximity_priv;
+ int (*proxim_rx)(struct ieee80211_hw *hw, struct rtl_stats *status,
+ struct sk_buff *skb);
+ u8 (*proxim_get_var)(struct ieee80211_hw *hw, u8 type);
+};
+
struct rtl_priv {
struct ieee80211_hw *hw;
struct completion firmware_loading_complete;
@@ -2008,6 +2354,7 @@ struct rtl_priv {
struct rtl_ps_ctl psc;
struct rate_adaptive ra;
+ struct dynamic_primary_cca primarycca;
struct wireless_stats stats;
struct rt_link_detect link_info;
struct false_alarm_statistics falsealm_cnt;
@@ -2048,6 +2395,20 @@ struct rtl_priv {
bool enter_ps; /* true when entering PS */
u8 rate_mask[5];
+ /* intel Proximity, should be alloc mem
+ * in intel Proximity module and can only
+ * be used in intel Proximity mode
+ */
+ struct proxim proximity;
+
+ /*for bt coexist use*/
+ struct bt_coexist_info btcoexist;
+
+ /* separate 92ee from other ICs,
+ * 92ee use new trx flow.
+ */
+ bool use_new_trx_flow;
+
/*This must be the last item so
that it points to the data allocated
beyond this structure like:
@@ -2079,6 +2440,15 @@ enum bt_co_type {
BT_CSR_BC8 = 4,
BT_RTL8756 = 5,
BT_RTL8723A = 6,
+ BT_RTL8821A = 7,
+ BT_RTL8723B = 8,
+ BT_RTL8192E = 9,
+ BT_RTL8812A = 11,
+};
+
+enum bt_total_ant_num {
+ ANT_TOTAL_X2 = 0,
+ ANT_TOTAL_X1 = 1
};
enum bt_cur_state {
@@ -2104,62 +2474,6 @@ enum bt_radio_shared {
BT_RADIO_INDIVIDUAL = 1,
};
-struct bt_coexist_info {
-
- /* EEPROM BT info. */
- u8 eeprom_bt_coexist;
- u8 eeprom_bt_type;
- u8 eeprom_bt_ant_num;
- u8 eeprom_bt_ant_isol;
- u8 eeprom_bt_radio_shared;
-
- u8 bt_coexistence;
- u8 bt_ant_num;
- u8 bt_coexist_type;
- u8 bt_state;
- u8 bt_cur_state; /* 0:on, 1:off */
- u8 bt_ant_isolation; /* 0:good, 1:bad */
- u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
- u8 bt_service;
- u8 bt_radio_shared_type;
- u8 bt_rfreg_origin_1e;
- u8 bt_rfreg_origin_1f;
- u8 bt_rssi_state;
- u32 ratio_tx;
- u32 ratio_pri;
- u32 bt_edca_ul;
- u32 bt_edca_dl;
-
- bool init_set;
- bool bt_busy_traffic;
- bool bt_traffic_mode_set;
- bool bt_non_traffic_mode_set;
-
- bool fw_coexist_all_off;
- bool sw_coexist_all_off;
- bool hw_coexist_all_off;
- u32 cstate;
- u32 previous_state;
- u32 cstate_h;
- u32 previous_state_h;
-
- u8 bt_pre_rssi_state;
- u8 bt_pre_rssi_state1;
-
- u8 reg_bt_iso;
- u8 reg_bt_sco;
- bool balance_on;
- u8 bt_active_zero_cnt;
- bool cur_bt_disabled;
- bool pre_bt_disabled;
-
- u8 bt_profile_case;
- u8 bt_profile_action;
- bool bt_busy;
- bool hold_for_bt_operation;
- u8 lps_counter;
-};
-
/****************************************
mem access macro define start
diff --git a/drivers/net/wireless/ti/wilink_platform_data.c b/drivers/net/wireless/ti/wilink_platform_data.c
index 998e95895f9d..a92bd3e89796 100644
--- a/drivers/net/wireless/ti/wilink_platform_data.c
+++ b/drivers/net/wireless/ti/wilink_platform_data.c
@@ -23,17 +23,17 @@
#include <linux/err.h>
#include <linux/wl12xx.h>
-static struct wl12xx_platform_data *platform_data;
+static struct wl12xx_platform_data *wl12xx_platform_data;
int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
{
- if (platform_data)
+ if (wl12xx_platform_data)
return -EBUSY;
if (!data)
return -EINVAL;
- platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
- if (!platform_data)
+ wl12xx_platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
+ if (!wl12xx_platform_data)
return -ENOMEM;
return 0;
@@ -41,9 +41,34 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
struct wl12xx_platform_data *wl12xx_get_platform_data(void)
{
- if (!platform_data)
+ if (!wl12xx_platform_data)
return ERR_PTR(-ENODEV);
- return platform_data;
+ return wl12xx_platform_data;
}
EXPORT_SYMBOL(wl12xx_get_platform_data);
+
+static struct wl1251_platform_data *wl1251_platform_data;
+
+int __init wl1251_set_platform_data(const struct wl1251_platform_data *data)
+{
+ if (wl1251_platform_data)
+ return -EBUSY;
+ if (!data)
+ return -EINVAL;
+
+ wl1251_platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
+ if (!wl1251_platform_data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+struct wl1251_platform_data *wl1251_get_platform_data(void)
+{
+ if (!wl1251_platform_data)
+ return ERR_PTR(-ENODEV);
+
+ return wl1251_platform_data;
+}
+EXPORT_SYMBOL(wl1251_get_platform_data);
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index 223649bcaa5a..bf1fa18b9786 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -448,7 +448,7 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
* Note: This bug may be caused by the fw's DTIM handling.
*/
if (is_zero_ether_addr(wl->bssid))
- cmd->params.scan_options |= WL1251_SCAN_OPT_PRIORITY_HIGH;
+ cmd->params.scan_options |= cpu_to_le16(WL1251_SCAN_OPT_PRIORITY_HIGH);
cmd->params.num_channels = n_channels;
cmd->params.num_probe_requests = n_probes;
cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index e2b3d9c541e8..b661f896e9fe 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -28,6 +28,7 @@
#include <linux/wl12xx.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
#include "wl1251.h"
@@ -182,8 +183,9 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
* callback in case it wants to do any additional setup,
* for example enabling clock buffer for the module.
*/
- if (wl->set_power)
- wl->set_power(true);
+ if (gpio_is_valid(wl->power_gpio))
+ gpio_set_value(wl->power_gpio, true);
+
ret = pm_runtime_get_sync(&func->dev);
if (ret < 0) {
@@ -203,8 +205,8 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
if (ret < 0)
goto out;
- if (wl->set_power)
- wl->set_power(false);
+ if (gpio_is_valid(wl->power_gpio))
+ gpio_set_value(wl->power_gpio, false);
}
out:
@@ -227,7 +229,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
struct wl1251 *wl;
struct ieee80211_hw *hw;
struct wl1251_sdio *wl_sdio;
- const struct wl12xx_platform_data *wl12xx_board_data;
+ const struct wl1251_platform_data *wl1251_board_data;
hw = wl1251_alloc_hw();
if (IS_ERR(hw))
@@ -254,11 +256,20 @@ static int wl1251_sdio_probe(struct sdio_func *func,
wl->if_priv = wl_sdio;
wl->if_ops = &wl1251_sdio_ops;
- wl12xx_board_data = wl12xx_get_platform_data();
- if (!IS_ERR(wl12xx_board_data)) {
- wl->set_power = wl12xx_board_data->set_power;
- wl->irq = wl12xx_board_data->irq;
- wl->use_eeprom = wl12xx_board_data->use_eeprom;
+ wl1251_board_data = wl1251_get_platform_data();
+ if (!IS_ERR(wl1251_board_data)) {
+ wl->power_gpio = wl1251_board_data->power_gpio;
+ wl->irq = wl1251_board_data->irq;
+ wl->use_eeprom = wl1251_board_data->use_eeprom;
+ }
+
+ if (gpio_is_valid(wl->power_gpio)) {
+ ret = devm_gpio_request(&func->dev, wl->power_gpio,
+ "wl1251 power");
+ if (ret) {
+ wl1251_error("Failed to request gpio: %d\n", ret);
+ goto disable;
+ }
}
if (wl->irq) {
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 1342f81e683d..b06d36d99362 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -26,6 +26,10 @@
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
#include "wl1251.h"
#include "reg.h"
@@ -221,8 +225,8 @@ static void wl1251_spi_disable_irq(struct wl1251 *wl)
static int wl1251_spi_set_power(struct wl1251 *wl, bool enable)
{
- if (wl->set_power)
- wl->set_power(enable);
+ if (gpio_is_valid(wl->power_gpio))
+ gpio_set_value(wl->power_gpio, enable);
return 0;
}
@@ -238,13 +242,13 @@ static const struct wl1251_if_operations wl1251_spi_ops = {
static int wl1251_spi_probe(struct spi_device *spi)
{
- struct wl12xx_platform_data *pdata;
+ struct wl1251_platform_data *pdata = dev_get_platdata(&spi->dev);
+ struct device_node *np = spi->dev.of_node;
struct ieee80211_hw *hw;
struct wl1251 *wl;
int ret;
- pdata = dev_get_platdata(&spi->dev);
- if (!pdata) {
+ if (!np && !pdata) {
wl1251_error("no platform data");
return -ENODEV;
}
@@ -271,22 +275,42 @@ static int wl1251_spi_probe(struct spi_device *spi)
goto out_free;
}
- wl->set_power = pdata->set_power;
- if (!wl->set_power) {
- wl1251_error("set power function missing in platform data");
- return -ENODEV;
+ if (np) {
+ wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom");
+ wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0);
+ } else if (pdata) {
+ wl->power_gpio = pdata->power_gpio;
+ wl->use_eeprom = pdata->use_eeprom;
+ }
+
+ if (wl->power_gpio == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_free;
+ }
+
+ if (gpio_is_valid(wl->power_gpio)) {
+ ret = devm_gpio_request_one(&spi->dev, wl->power_gpio,
+ GPIOF_OUT_INIT_LOW, "wl1251 power");
+ if (ret) {
+ wl1251_error("Failed to request gpio: %d\n", ret);
+ goto out_free;
+ }
+ } else {
+ wl1251_error("set power gpio missing in platform data");
+ ret = -ENODEV;
+ goto out_free;
}
wl->irq = spi->irq;
if (wl->irq < 0) {
wl1251_error("irq missing in platform data");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free;
}
- wl->use_eeprom = pdata->use_eeprom;
-
irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
- ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
+ ret = devm_request_irq(&spi->dev, wl->irq, wl1251_irq, 0,
+ DRIVER_NAME, wl);
if (ret < 0) {
wl1251_error("request_irq() failed: %d", ret);
goto out_free;
@@ -294,16 +318,26 @@ static int wl1251_spi_probe(struct spi_device *spi)
irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+ wl->vio = devm_regulator_get(&spi->dev, "vio");
+ if (IS_ERR(wl->vio)) {
+ ret = PTR_ERR(wl->vio);
+ wl1251_error("vio regulator missing: %d", ret);
+ goto out_free;
+ }
+
+ ret = regulator_enable(wl->vio);
+ if (ret)
+ goto out_free;
+
ret = wl1251_init_ieee80211(wl);
if (ret)
- goto out_irq;
+ goto disable_regulator;
return 0;
- out_irq:
- free_irq(wl->irq, wl);
-
- out_free:
+disable_regulator:
+ regulator_disable(wl->vio);
+out_free:
ieee80211_free_hw(hw);
return ret;
@@ -315,6 +349,7 @@ static int wl1251_spi_remove(struct spi_device *spi)
free_irq(wl->irq, wl);
wl1251_free_hw(wl);
+ regulator_disable(wl->vio);
return 0;
}
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 235617a7716d..16dae5269175 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -276,10 +276,12 @@ struct wl1251 {
void *if_priv;
const struct wl1251_if_operations *if_ops;
- void (*set_power)(bool enable);
+ int power_gpio;
int irq;
bool use_eeprom;
+ struct regulator *vio;
+
spinlock_t wl_lock;
enum wl1251_state state;
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index be7129ba16ad..d50dfac91631 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1378,7 +1378,7 @@ static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
static int wl12xx_tx_delayed_compl(struct wl1271 *wl)
{
- if (wl->fw_status_1->tx_results_counter ==
+ if (wl->fw_status->tx_results_counter ==
(wl->tx_results_count & 0xff))
return 0;
@@ -1438,6 +1438,37 @@ out:
return ret;
}
+static void wl12xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ struct wl12xx_fw_status *int_fw_status = raw_fw_status;
+
+ fw_status->intr = le32_to_cpu(int_fw_status->intr);
+ fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
+ fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
+ fw_status->tx_results_counter = int_fw_status->tx_results_counter;
+ fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
+
+ fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
+ fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
+ fw_status->link_fast_bitmap =
+ le32_to_cpu(int_fw_status->link_fast_bitmap);
+ fw_status->total_released_blks =
+ le32_to_cpu(int_fw_status->total_released_blks);
+ fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
+
+ fw_status->counters.tx_released_pkts =
+ int_fw_status->counters.tx_released_pkts;
+ fw_status->counters.tx_lnk_free_pkts =
+ int_fw_status->counters.tx_lnk_free_pkts;
+ fw_status->counters.tx_voice_released_blks =
+ int_fw_status->counters.tx_voice_released_blks;
+ fw_status->counters.tx_last_rate =
+ int_fw_status->counters.tx_last_rate;
+
+ fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
+}
+
static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
@@ -1677,6 +1708,7 @@ static struct wlcore_ops wl12xx_ops = {
.tx_delayed_compl = wl12xx_tx_delayed_compl,
.hw_init = wl12xx_hw_init,
.init_vif = NULL,
+ .convert_fw_status = wl12xx_convert_fw_status,
.sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask,
.get_pg_ver = wl12xx_get_pg_ver,
.get_mac = wl12xx_get_mac,
@@ -1711,22 +1743,53 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
},
};
+static const struct ieee80211_iface_limit wl12xx_iface_limits[] = {
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_combination
+wl12xx_iface_combinations[] = {
+ {
+ .max_interfaces = 3,
+ .limits = wl12xx_iface_limits,
+ .n_limits = ARRAY_SIZE(wl12xx_iface_limits),
+ .num_different_channels = 1,
+ },
+};
+
static int wl12xx_setup(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
struct wl12xx_platform_data *pdata = pdev_data->pdata;
+ BUILD_BUG_ON(WL12XX_MAX_LINKS > WLCORE_MAX_LINKS);
+ BUILD_BUG_ON(WL12XX_MAX_AP_STATIONS > WL12XX_MAX_LINKS);
+
wl->rtable = wl12xx_rtable;
wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
- wl->num_channels = 1;
+ wl->num_links = WL12XX_MAX_LINKS;
+ wl->max_ap_stations = WL12XX_MAX_AP_STATIONS;
+ wl->iface_combinations = wl12xx_iface_combinations;
+ wl->n_iface_combinations = ARRAY_SIZE(wl12xx_iface_combinations);
wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
+ wl->fw_status_len = sizeof(struct wl12xx_fw_status);
wl->fw_status_priv_len = 0;
wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
+ wl->ofdm_only_ap = true;
wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
wl12xx_conf_init(wl);
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 9e5484a73667..75c92658bfea 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -65,6 +65,9 @@
#define WL12XX_RX_BA_MAX_SESSIONS 3
+#define WL12XX_MAX_AP_STATIONS 8
+#define WL12XX_MAX_LINKS 12
+
struct wl127x_rx_mem_pool_addr {
u32 addr;
u32 addr_extra;
@@ -79,4 +82,54 @@ struct wl12xx_priv {
struct wl127x_rx_mem_pool_addr *rx_mem_addr;
};
+struct wl12xx_fw_packet_counters {
+ /* Cumulative counter of released packets per AC */
+ u8 tx_released_pkts[NUM_TX_QUEUES];
+
+ /* Cumulative counter of freed packets per HLID */
+ u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
+
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+
+ u8 padding[2];
+} __packed;
+
+/* FW status registers */
+struct wl12xx_fw_status {
+ __le32 intr;
+ u8 fw_rx_counter;
+ u8 drv_rx_counter;
+ u8 reserved;
+ u8 tx_results_counter;
+ __le32 rx_pkt_descs[WL12XX_NUM_RX_DESCRIPTORS];
+
+ __le32 fw_localtime;
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /*
+ * A bitmap (where each bit represents a single HLID) to indicate
+ * if the station is in Fast mode
+ */
+ __le32 link_fast_bitmap;
+
+ /* Cumulative counter of total released mem blocks since FW-reset */
+ __le32 total_released_blks;
+
+ /* Size (in Memory Blocks) of TX pool */
+ __le32 tx_total;
+
+ struct wl12xx_fw_packet_counters counters;
+
+ __le32 log_start_addr;
+} __packed;
+
#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index ec37b16585df..de5b4fa5d166 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -648,7 +648,7 @@ static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
};
/* TODO: maybe move to a new header file? */
-#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-2.bin"
+#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-3.bin"
static int wl18xx_identify_chip(struct wl1271 *wl)
{
@@ -1133,6 +1133,39 @@ static int wl18xx_hw_init(struct wl1271 *wl)
return ret;
}
+static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ struct wl18xx_fw_status *int_fw_status = raw_fw_status;
+
+ fw_status->intr = le32_to_cpu(int_fw_status->intr);
+ fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
+ fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
+ fw_status->tx_results_counter = int_fw_status->tx_results_counter;
+ fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
+
+ fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
+ fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
+ fw_status->link_fast_bitmap =
+ le32_to_cpu(int_fw_status->link_fast_bitmap);
+ fw_status->total_released_blks =
+ le32_to_cpu(int_fw_status->total_released_blks);
+ fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
+
+ fw_status->counters.tx_released_pkts =
+ int_fw_status->counters.tx_released_pkts;
+ fw_status->counters.tx_lnk_free_pkts =
+ int_fw_status->counters.tx_lnk_free_pkts;
+ fw_status->counters.tx_voice_released_blks =
+ int_fw_status->counters.tx_voice_released_blks;
+ fw_status->counters.tx_last_rate =
+ int_fw_status->counters.tx_last_rate;
+
+ fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
+
+ fw_status->priv = &int_fw_status->priv;
+}
+
static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
struct sk_buff *skb)
@@ -1572,7 +1605,7 @@ static bool wl18xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
{
u8 thold;
struct wl18xx_fw_status_priv *status_priv =
- (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
/* suspended links are never high priority */
@@ -1594,7 +1627,7 @@ static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
{
u8 thold;
struct wl18xx_fw_status_priv *status_priv =
- (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
@@ -1632,6 +1665,7 @@ static struct wlcore_ops wl18xx_ops = {
.tx_immediate_compl = wl18xx_tx_immediate_completion,
.tx_delayed_compl = NULL,
.hw_init = wl18xx_hw_init,
+ .convert_fw_status = wl18xx_convert_fw_status,
.set_tx_desc_csum = wl18xx_set_tx_desc_csum,
.get_pg_ver = wl18xx_get_pg_ver,
.set_rx_csum = wl18xx_set_rx_csum,
@@ -1713,19 +1747,62 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
},
};
+static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_combination
+wl18xx_iface_combinations[] = {
+ {
+ .max_interfaces = 3,
+ .limits = wl18xx_iface_limits,
+ .n_limits = ARRAY_SIZE(wl18xx_iface_limits),
+ .num_different_channels = 2,
+ },
+ {
+ .max_interfaces = 2,
+ .limits = wl18xx_iface_ap_limits,
+ .n_limits = ARRAY_SIZE(wl18xx_iface_ap_limits),
+ .num_different_channels = 1,
+ }
+};
+
static int wl18xx_setup(struct wl1271 *wl)
{
struct wl18xx_priv *priv = wl->priv;
int ret;
+ BUILD_BUG_ON(WL18XX_MAX_LINKS > WLCORE_MAX_LINKS);
+ BUILD_BUG_ON(WL18XX_MAX_AP_STATIONS > WL18XX_MAX_LINKS);
+
wl->rtable = wl18xx_rtable;
wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
wl->num_rx_desc = WL18XX_NUM_RX_DESCRIPTORS;
- wl->num_channels = 2;
+ wl->num_links = WL18XX_MAX_LINKS;
+ wl->max_ap_stations = WL18XX_MAX_AP_STATIONS;
+ wl->iface_combinations = wl18xx_iface_combinations;
+ wl->n_iface_combinations = ARRAY_SIZE(wl18xx_iface_combinations);
wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
+ wl->fw_status_len = sizeof(struct wl18xx_fw_status);
wl->fw_status_priv_len = sizeof(struct wl18xx_fw_status_priv);
wl->stats.fw_stats_len = sizeof(struct wl18xx_acx_statistics);
wl->static_data_priv_len = sizeof(struct wl18xx_static_data_priv);
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 57c694396647..be1ebd55ac88 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -32,7 +32,7 @@ static
void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
struct ieee80211_tx_rate *rate)
{
- u8 fw_rate = wl->fw_status_2->counters.tx_last_rate;
+ u8 fw_rate = wl->fw_status->counters.tx_last_rate;
if (fw_rate > CONF_HW_RATE_INDEX_MAX) {
wl1271_error("last Tx rate invalid: %d", fw_rate);
@@ -139,7 +139,7 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
void wl18xx_tx_immediate_complete(struct wl1271 *wl)
{
struct wl18xx_fw_status_priv *status_priv =
- (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
struct wl18xx_priv *priv = wl->priv;
u8 i;
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 9204e07ee432..eb7cfe817010 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -26,10 +26,10 @@
/* minimum FW required for driver */
#define WL18XX_CHIP_VER 8
-#define WL18XX_IFTYPE_VER 5
+#define WL18XX_IFTYPE_VER 8
#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
-#define WL18XX_MINOR_VER 39
+#define WL18XX_MINOR_VER 13
#define WL18XX_CMD_MAX_SIZE 740
@@ -40,7 +40,10 @@
#define WL18XX_NUM_MAC_ADDRESSES 3
-#define WL18XX_RX_BA_MAX_SESSIONS 5
+#define WL18XX_RX_BA_MAX_SESSIONS 13
+
+#define WL18XX_MAX_AP_STATIONS 10
+#define WL18XX_MAX_LINKS 16
struct wl18xx_priv {
/* buffer for sending commands to FW */
@@ -109,6 +112,59 @@ struct wl18xx_fw_status_priv {
u8 padding[3];
};
+struct wl18xx_fw_packet_counters {
+ /* Cumulative counter of released packets per AC */
+ u8 tx_released_pkts[NUM_TX_QUEUES];
+
+ /* Cumulative counter of freed packets per HLID */
+ u8 tx_lnk_free_pkts[WL18XX_MAX_LINKS];
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
+
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+
+ u8 padding[2];
+} __packed;
+
+/* FW status registers */
+struct wl18xx_fw_status {
+ __le32 intr;
+ u8 fw_rx_counter;
+ u8 drv_rx_counter;
+ u8 reserved;
+ u8 tx_results_counter;
+ __le32 rx_pkt_descs[WL18XX_NUM_RX_DESCRIPTORS];
+
+ __le32 fw_localtime;
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /*
+ * A bitmap (where each bit represents a single HLID) to indicate
+ * if the station is in Fast mode
+ */
+ __le32 link_fast_bitmap;
+
+ /* Cumulative counter of total released mem blocks since FW-reset */
+ __le32 total_released_blks;
+
+ /* Size (in Memory Blocks) of TX pool */
+ __le32 tx_total;
+
+ struct wl18xx_fw_packet_counters counters;
+
+ __le32 log_start_addr;
+
+ /* Private status to be used by the lower drivers */
+ struct wl18xx_fw_status_priv priv;
+} __packed;
+
#define WL18XX_PHY_VERSION_MAX_LEN 20
struct wl18xx_static_data_priv {
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index ec83675a2446..b924ceadc02c 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -358,7 +358,8 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct acx_beacon_filter_option *beacon_filter = NULL;
int ret = 0;
- wl1271_debug(DEBUG_ACX, "acx beacon filter opt");
+ wl1271_debug(DEBUG_ACX, "acx beacon filter opt enable=%d",
+ enable_filter);
if (enable_filter &&
wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED)
@@ -1591,7 +1592,8 @@ out:
return ret;
}
-int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 *addr)
{
struct wl1271_acx_inconnection_sta *acx = NULL;
int ret;
@@ -1603,6 +1605,7 @@ int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
return -ENOMEM;
memcpy(acx->addr, addr, ETH_ALEN);
+ acx->role_id = wlvif->role_id;
ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
acx, sizeof(*acx));
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 6dcfad9b0472..954d57ec98f4 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -824,7 +824,8 @@ struct wl1271_acx_inconnection_sta {
struct acx_header header;
u8 addr[ETH_ALEN];
- u8 padding1[2];
+ u8 role_id;
+ u8 padding;
} __packed;
/*
@@ -1118,7 +1119,8 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable);
int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 *addr);
int wl1271_acx_fm_coex(struct wl1271 *wl);
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
int wl12xx_acx_config_hangover(struct wl1271 *wl);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 9b2ecf52449f..40dc30f4faaa 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -60,8 +60,8 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
u16 status;
u16 poll_count = 0;
- if (WARN_ON(wl->state == WLCORE_STATE_RESTARTING &&
- id != CMD_STOP_FWLOGGER))
+ if (unlikely(wl->state == WLCORE_STATE_RESTARTING &&
+ id != CMD_STOP_FWLOGGER))
return -EIO;
cmd = buf;
@@ -312,8 +312,8 @@ static int wlcore_get_new_session_id(struct wl1271 *wl, u8 hlid)
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
unsigned long flags;
- u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
- if (link >= WL12XX_MAX_LINKS)
+ u8 link = find_first_zero_bit(wl->links_map, wl->num_links);
+ if (link >= wl->num_links)
return -EBUSY;
wl->session_ids[link] = wlcore_get_new_session_id(wl, link);
@@ -324,9 +324,14 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
__set_bit(link, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
- /* take the last "freed packets" value from the current FW status */
- wl->links[link].prev_freed_pkts =
- wl->fw_status_2->counters.tx_lnk_free_pkts[link];
+ /*
+ * take the last "freed packets" value from the current FW status.
+ * on recovery, we might not have fw_status yet, and
+ * tx_lnk_free_pkts will be NULL. check for it.
+ */
+ if (wl->fw_status->counters.tx_lnk_free_pkts)
+ wl->links[link].prev_freed_pkts =
+ wl->fw_status->counters.tx_lnk_free_pkts[link];
wl->links[link].wlvif = wlvif;
/*
@@ -1527,6 +1532,7 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cmd->sp_len = sta->max_sp;
cmd->wmm = sta->wme ? 1 : 0;
cmd->session_id = wl->session_ids[hlid];
+ cmd->role_id = wlvif->role_id;
for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
if (sta->wme && (sta->uapsd_queues & BIT(i)))
@@ -1563,7 +1569,8 @@ out:
return ret;
}
-int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
+int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid)
{
struct wl12xx_cmd_remove_peer *cmd;
int ret;
@@ -1581,6 +1588,7 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
/* We never send a deauth, mac80211 is in charge of this */
cmd->reason_opcode = 0;
cmd->send_deauth_flag = 0;
+ cmd->role_id = wlvif->role_id;
ret = wl1271_cmd_send(wl, CMD_REMOVE_PEER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 323d4a856e4b..b084830a61cf 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -88,7 +88,8 @@ int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta, u8 hlid);
-int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
+int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid);
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
enum ieee80211_band band);
int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
@@ -206,7 +207,7 @@ enum cmd_templ {
#define WL1271_COMMAND_TIMEOUT 2000
#define WL1271_CMD_TEMPL_DFLT_SIZE 252
#define WL1271_CMD_TEMPL_MAX_SIZE 512
-#define WL1271_EVENT_TIMEOUT 1500
+#define WL1271_EVENT_TIMEOUT 5000
struct wl1271_cmd_header {
__le16 id;
@@ -594,6 +595,8 @@ struct wl12xx_cmd_add_peer {
u8 sp_len;
u8 wmm;
u8 session_id;
+ u8 role_id;
+ u8 padding[3];
} __packed;
struct wl12xx_cmd_remove_peer {
@@ -602,7 +605,7 @@ struct wl12xx_cmd_remove_peer {
u8 hlid;
u8 reason_opcode;
u8 send_deauth_flag;
- u8 padding1;
+ u8 role_id;
} __packed;
/*
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 8d3b34965db3..1f9a36031b06 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -67,7 +67,7 @@ static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
u8 hlid;
struct wl1271_link *lnk;
for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
- WL12XX_MAX_LINKS) {
+ wl->num_links) {
lnk = &wl->links[hlid];
if (!lnk->ba_bitmap)
continue;
@@ -172,7 +172,7 @@ static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
const u8 *addr;
int h;
- for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+ for_each_set_bit(h, &sta_bitmap, wl->num_links) {
bool found = false;
/* find the ap vif connected to this sta */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 51f8d634d32f..1555ff970050 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -106,6 +106,15 @@ wlcore_hw_init_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
return 0;
}
+static inline void
+wlcore_hw_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ BUG_ON(!wl->ops->convert_fw_status);
+
+ wl->ops->convert_fw_status(wl, raw_fw_status, fw_status);
+}
+
static inline u32
wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 7699f9d07e26..199e94120864 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -287,8 +287,8 @@ static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
if (ret < 0)
return ret;
- /* enable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
+ /* disable beacon filtering until we get the first beacon */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
if (ret < 0)
return ret;
@@ -462,7 +462,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
* If the basic rates contain OFDM rates, use OFDM only
* rates for unicast TX as well. Else use all supported rates.
*/
- if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
+ if (wl->ofdm_only_ap && (wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
supported_rates = CONF_TX_OFDM_RATES;
else
supported_rates = CONF_TX_ENABLED_RATES;
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 07e3d6a049ad..0305729d0986 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -60,7 +60,9 @@ static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
{
int ret;
- if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+ if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) ||
+ WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) &&
+ addr != HW_ACCESS_ELP_CTRL_REG)))
return -EIO;
ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
@@ -76,7 +78,9 @@ static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
{
int ret;
- if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+ if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) ||
+ WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) &&
+ addr != HW_ACCESS_ELP_CTRL_REG)))
return -EIO;
ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index b46b3116cc55..ed88d3913483 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -345,24 +345,24 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
* Start high-level PS if the STA is asleep with enough blocks in FW.
* Make an exception if this is the only connected link. In this
* case FW-memory congestion is less of a problem.
- * Note that a single connected STA means 3 active links, since we must
- * account for the global and broadcast AP links. The "fw_ps" check
- * assures us the third link is a STA connected to the AP. Otherwise
- * the FW would not set the PSM bit.
+ * Note that a single connected STA means 2*ap_count + 1 active links,
+ * since we must account for the global and broadcast AP links
+ * for each AP. The "fw_ps" check assures us the other link is a STA
+ * connected to the AP. Otherwise the FW would not set the PSM bit.
*/
- else if (wl->active_link_count > 3 && fw_ps &&
+ else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
static void wl12xx_irq_update_links_status(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
- struct wl_fw_status_2 *status)
+ struct wl_fw_status *status)
{
u32 cur_fw_ps_map;
u8 hlid;
- cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
+ cur_fw_ps_map = status->link_ps_bitmap;
if (wl->ap_fw_ps_map != cur_fw_ps_map) {
wl1271_debug(DEBUG_PSM,
"link ps prev 0x%x cur 0x%x changed 0x%x",
@@ -372,77 +372,73 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
wl->ap_fw_ps_map = cur_fw_ps_map;
}
- for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
wl->links[hlid].allocated_pkts);
}
-static int wlcore_fw_status(struct wl1271 *wl,
- struct wl_fw_status_1 *status_1,
- struct wl_fw_status_2 *status_2)
+static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
{
struct wl12xx_vif *wlvif;
struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
int i;
- size_t status_len;
int ret;
struct wl1271_link *lnk;
- status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
- sizeof(*status_2) + wl->fw_status_priv_len;
-
- ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
- status_len, false);
+ ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
+ wl->raw_fw_status,
+ wl->fw_status_len, false);
if (ret < 0)
return ret;
+ wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
+
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
- status_1->intr,
- status_1->fw_rx_counter,
- status_1->drv_rx_counter,
- status_1->tx_results_counter);
+ status->intr,
+ status->fw_rx_counter,
+ status->drv_rx_counter,
+ status->tx_results_counter);
for (i = 0; i < NUM_TX_QUEUES; i++) {
/* prevent wrap-around in freed-packets counter */
wl->tx_allocated_pkts[i] -=
- (status_2->counters.tx_released_pkts[i] -
+ (status->counters.tx_released_pkts[i] -
wl->tx_pkts_freed[i]) & 0xff;
- wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
+ wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
}
- for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
+ for_each_set_bit(i, wl->links_map, wl->num_links) {
u8 diff;
lnk = &wl->links[i];
/* prevent wrap-around in freed-packets counter */
- diff = (status_2->counters.tx_lnk_free_pkts[i] -
+ diff = (status->counters.tx_lnk_free_pkts[i] -
lnk->prev_freed_pkts) & 0xff;
if (diff == 0)
continue;
lnk->allocated_pkts -= diff;
- lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
+ lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
/* accumulate the prev_freed_pkts counter */
lnk->total_freed_pkts += diff;
}
/* prevent wrap-around in total blocks counter */
- if (likely(wl->tx_blocks_freed <=
- le32_to_cpu(status_2->total_released_blks)))
- freed_blocks = le32_to_cpu(status_2->total_released_blks) -
+ if (likely(wl->tx_blocks_freed <= status->total_released_blks))
+ freed_blocks = status->total_released_blks -
wl->tx_blocks_freed;
else
freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
- le32_to_cpu(status_2->total_released_blks);
+ status->total_released_blks;
- wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
+ wl->tx_blocks_freed = status->total_released_blks;
wl->tx_allocated_blocks -= freed_blocks;
@@ -458,7 +454,7 @@ static int wlcore_fw_status(struct wl1271 *wl,
cancel_delayed_work(&wl->tx_watchdog_work);
}
- avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
+ avail = status->tx_total - wl->tx_allocated_blocks;
/*
* The FW might change the total number of TX memblocks before
@@ -477,15 +473,15 @@ static int wlcore_fw_status(struct wl1271 *wl,
/* for AP update num of allocated TX blocks per link and ps status */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
- wl12xx_irq_update_links_status(wl, wlvif, status_2);
+ wl12xx_irq_update_links_status(wl, wlvif, status);
}
/* update the host-chipset time offset */
getnstimeofday(&ts);
wl->time_offset = (timespec_to_ns(&ts) >> 10) -
- (s64)le32_to_cpu(status_2->fw_localtime);
+ (s64)(status->fw_localtime);
- wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
+ wl->fw_fast_lnk_map = status->link_fast_bitmap;
return 0;
}
@@ -549,13 +545,13 @@ static int wlcore_irq_locked(struct wl1271 *wl)
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_clear_bit();
- ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+ ret = wlcore_fw_status(wl, wl->fw_status);
if (ret < 0)
goto out;
wlcore_hw_tx_immediate_compl(wl);
- intr = le32_to_cpu(wl->fw_status_1->intr);
+ intr = wl->fw_status->intr;
intr &= WLCORE_ALL_INTR_MASK;
if (!intr) {
done = true;
@@ -584,7 +580,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
- ret = wlcore_rx(wl, wl->fw_status_1);
+ ret = wlcore_rx(wl, wl->fw_status);
if (ret < 0)
goto out;
@@ -786,10 +782,11 @@ out:
void wl12xx_queue_recovery_work(struct wl1271 *wl)
{
- WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
-
/* Avoid a recursive recovery */
if (wl->state == WLCORE_STATE_ON) {
+ WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
+ &wl->flags));
+
wl->state = WLCORE_STATE_RESTARTING;
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
wl1271_ps_elp_wakeup(wl);
@@ -803,7 +800,7 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
size_t len;
/* Make sure we have enough room */
- len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
+ len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
/* Fill the FW log file, consumed by the sysfs fwlog entry */
memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
@@ -843,11 +840,11 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
wl12xx_cmd_stop_fwlog(wl);
/* Read the first memory block address */
- ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+ ret = wlcore_fw_status(wl, wl->fw_status);
if (ret < 0)
goto out;
- addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
+ addr = wl->fw_status->log_start_addr;
if (!addr)
goto out;
@@ -990,23 +987,23 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
static int wl1271_setup(struct wl1271 *wl)
{
- wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
- sizeof(*wl->fw_status_2) +
- wl->fw_status_priv_len, GFP_KERNEL);
- if (!wl->fw_status_1)
- return -ENOMEM;
+ wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
+ if (!wl->raw_fw_status)
+ goto err;
- wl->fw_status_2 = (struct wl_fw_status_2 *)
- (((u8 *) wl->fw_status_1) +
- WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
+ wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
+ if (!wl->fw_status)
+ goto err;
wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
- if (!wl->tx_res_if) {
- kfree(wl->fw_status_1);
- return -ENOMEM;
- }
+ if (!wl->tx_res_if)
+ goto err;
return 0;
+err:
+ kfree(wl->fw_status);
+ kfree(wl->raw_fw_status);
+ return -ENOMEM;
}
static int wl12xx_set_power_on(struct wl1271 *wl)
@@ -1767,6 +1764,12 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
flush_work(&wl->tx_work);
flush_delayed_work(&wl->elp_work);
+ /*
+ * Cancel the watchdog even if above tx_flush failed. We will detect
+ * it on resume anyway.
+ */
+ cancel_delayed_work(&wl->tx_watchdog_work);
+
return 0;
}
@@ -1824,6 +1827,13 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
out:
wl->wow_enabled = false;
+
+ /*
+ * Set a flag to re-init the watchdog on the first Tx after resume.
+ * That way we avoid possible conditions where Tx-complete interrupts
+ * fail to arrive and we perform a spurious recovery.
+ */
+ set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
mutex_unlock(&wl->mutex);
return 0;
@@ -1914,6 +1924,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
memset(wl->links_map, 0, sizeof(wl->links_map));
memset(wl->roc_map, 0, sizeof(wl->roc_map));
memset(wl->session_ids, 0, sizeof(wl->session_ids));
+ memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
wl->active_sta_count = 0;
wl->active_link_count = 0;
@@ -1938,9 +1949,10 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
wl1271_debugfs_reset(wl);
- kfree(wl->fw_status_1);
- wl->fw_status_1 = NULL;
- wl->fw_status_2 = NULL;
+ kfree(wl->raw_fw_status);
+ wl->raw_fw_status = NULL;
+ kfree(wl->fw_status);
+ wl->fw_status = NULL;
kfree(wl->tx_res_if);
wl->tx_res_if = NULL;
kfree(wl->target_mem_map);
@@ -2571,10 +2583,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
ieee80211_scan_completed(wl->hw, true);
}
- if (wl->sched_vif == wlvif) {
- ieee80211_sched_scan_stopped(wl->hw);
+ if (wl->sched_vif == wlvif)
wl->sched_vif = NULL;
- }
if (wl->roc_vif == vif) {
wl->roc_vif = NULL;
@@ -2931,6 +2941,11 @@ static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
return ret;
+
+ /* disable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
+ if (ret < 0)
+ return ret;
}
if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
@@ -3463,6 +3478,10 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
key_idx);
+ /* we don't handle unsetting of default key */
+ if (key_idx == -1)
+ return;
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
@@ -3649,8 +3668,8 @@ out:
return ret;
}
-static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
@@ -3672,6 +3691,8 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
+
+ return 0;
}
static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
@@ -4298,6 +4319,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
}
+ if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
+ /* enable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
+ if (ret < 0)
+ goto out;
+ }
+
ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
@@ -4651,7 +4679,7 @@ static int wl1271_allocate_sta(struct wl1271 *wl,
int ret;
- if (wl->active_sta_count >= AP_MAX_STATIONS) {
+ if (wl->active_sta_count >= wl->max_ap_stations) {
wl1271_warning("could not allocate HLID - too much stations");
return -EBUSY;
}
@@ -4754,7 +4782,7 @@ static int wl12xx_sta_remove(struct wl1271 *wl,
if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
return -EINVAL;
- ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
+ ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
if (ret < 0)
return ret;
@@ -5679,28 +5707,6 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
}
-static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
- {
- .max = 3,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT),
- },
-};
-
-static struct ieee80211_iface_combination
-wlcore_iface_combinations[] = {
- {
- .max_interfaces = 3,
- .limits = wlcore_iface_limits,
- .n_limits = ARRAY_SIZE(wlcore_iface_limits),
- },
-};
-
static int wl1271_init_ieee80211(struct wl1271 *wl)
{
int i;
@@ -5733,7 +5739,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
- IEEE80211_HW_QUEUE_CONTROL;
+ IEEE80211_HW_QUEUE_CONTROL |
+ IEEE80211_HW_CHANCTX_STA_CSA;
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -5821,10 +5828,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
/* allowed interface combinations */
- wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
- wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
- wl->hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(wlcore_iface_combinations);
+ wl->hw->wiphy->iface_combinations = wl->iface_combinations;
+ wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
SET_IEEE80211_DEV(wl->hw, wl->dev);
@@ -5844,8 +5849,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
int i, j, ret;
unsigned int order;
- BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
-
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
wl1271_error("could not alloc ieee80211_hw");
@@ -5867,8 +5870,12 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
wl->hw = hw;
+ /*
+ * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
+ * we don't allocate any additional resource here, so that's fine.
+ */
for (i = 0; i < NUM_TX_QUEUES; i++)
- for (j = 0; j < WL12XX_MAX_LINKS; j++)
+ for (j = 0; j < WLCORE_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
skb_queue_head_init(&wl->deferred_rx_queue);
@@ -6011,7 +6018,8 @@ int wlcore_free_hw(struct wl1271 *wl)
kfree(wl->nvs);
wl->nvs = NULL;
- kfree(wl->fw_status_1);
+ kfree(wl->raw_fw_status);
+ kfree(wl->fw_status);
kfree(wl->tx_res_if);
destroy_workqueue(wl->freezable_wq);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 26bfc365ba70..b52516eed7b2 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -280,7 +280,11 @@ void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
- if (test_bit(hlid, &wl->ap_ps_map))
+ if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
+ return;
+
+ if (!test_bit(hlid, wlvif->ap.sta_hlid_map) ||
+ test_bit(hlid, &wl->ap_ps_map))
return;
wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d "
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 6791a1a6afba..e125974285cc 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -203,9 +203,9 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
return is_data;
}
-int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status)
{
- unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
+ unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
u32 buf_size;
u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc;
u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc;
@@ -263,12 +263,12 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
wl->aggr_buf + pkt_offset,
pkt_len, rx_align,
&hlid) == 1) {
- if (hlid < WL12XX_MAX_LINKS)
+ if (hlid < wl->num_links)
__set_bit(hlid, active_hlids);
else
WARN(1,
- "hlid exceeded WL12XX_MAX_LINKS "
- "(%d)\n", hlid);
+ "hlid (%d) exceeded MAX_LINKS\n",
+ hlid);
}
wl->rx_counter++;
@@ -302,7 +302,7 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
{
int ret;
- if (wl->rx_filter_enabled[index] == enable) {
+ if (!!test_bit(index, wl->rx_filter_enabled) == enable) {
wl1271_warning("Request to enable an already "
"enabled rx filter %d", index);
return 0;
@@ -316,7 +316,10 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
return ret;
}
- wl->rx_filter_enabled[index] = enable;
+ if (enable)
+ __set_bit(index, wl->rx_filter_enabled);
+ else
+ __clear_bit(index, wl->rx_filter_enabled);
return 0;
}
@@ -326,7 +329,7 @@ int wl1271_rx_filter_clear_all(struct wl1271 *wl)
int i, ret = 0;
for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
- if (!wl->rx_filter_enabled[i])
+ if (!test_bit(i, wl->rx_filter_enabled))
continue;
ret = wl1271_rx_filter_enable(wl, i, 0, NULL);
if (ret)
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index 3363f60fb7da..a3b1618db27c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -142,7 +142,7 @@ struct wl1271_rx_descriptor {
u8 reserved;
} __packed;
-int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status);
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
int wl1271_rx_filter_enable(struct wl1271 *wl,
int index, bool enable,
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index b2c018dccf18..dbe826dd7c23 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -211,7 +211,7 @@ static int __must_check wl12xx_spi_raw_read(struct device *child, int addr,
u32 chunk_len;
while (len > 0) {
- chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len);
+ chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len);
cmd = &wl->buffer_cmd;
busy_buf = wl->buffer_busyword;
@@ -285,7 +285,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
cmd = &commands[0];
i = 0;
while (len > 0) {
- chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len);
+ chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len);
*cmd = 0;
*cmd |= WSPI_CMD_WRITE;
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index 8e583497940d..24dd288d6809 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -152,7 +152,7 @@ static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
}
/* Seeking is not supported - old logs are not kept. Disregard pos. */
- len = min(count, (size_t)wl->fwlog_size);
+ len = min_t(size_t, count, wl->fwlog_size);
wl->fwlog_size -= len;
memcpy(buffer, wl->fwlog, len);
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 87cd707affa2..40b43115f835 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -101,7 +101,7 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
* authentication response. this way it won't get de-authed by FW
* when transmitting too soon.
*/
- wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+ wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
/*
* ROC for 1 second on the AP channel for completing the connection.
@@ -134,12 +134,12 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
* into high-level PS and clean out its TX queues.
* Make an exception if this is the only connected link. In this
* case FW-memory congestion is less of a problem.
- * Note that a single connected STA means 3 active links, since we must
- * account for the global and broadcast AP links. The "fw_ps" check
- * assures us the third link is a STA connected to the AP. Otherwise
- * the FW would not set the PSM bit.
+ * Note that a single connected STA means 2*ap_count + 1 active links,
+ * since we must account for the global and broadcast AP links
+ * for each AP. The "fw_ps" check assures us the other link is a STA
+ * connected to the AP. Otherwise the FW would not set the PSM bit.
*/
- if (wl->active_link_count > 3 && fw_ps &&
+ if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
@@ -234,8 +234,13 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl->tx_blocks_available -= total_blocks;
wl->tx_allocated_blocks += total_blocks;
- /* If the FW was empty before, arm the Tx watchdog */
- if (wl->tx_allocated_blocks == total_blocks)
+ /*
+ * If the FW was empty before, arm the Tx watchdog. Also do
+ * this on the first Tx after resume, as we always cancel the
+ * watchdog on suspend.
+ */
+ if (wl->tx_allocated_blocks == total_blocks ||
+ test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
wl12xx_rearm_tx_watchdog_locked(wl);
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -357,6 +362,10 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ieee80211_has_protected(frame_control))
tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
+ /* send EAPOL frames as voice */
+ if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)
+ tx_attr |= TX_HW_ATTR_EAPOL_FRAME;
+
desc->tx_attr = cpu_to_le16(tx_attr);
wlcore_hw_set_tx_desc_csum(wl, desc, skb);
@@ -560,11 +569,11 @@ static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
int i, h, start_hlid;
/* start from the link after the last one */
- start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
+ start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
/* dequeue according to AC, round robin on each link */
- for (i = 0; i < WL12XX_MAX_LINKS; i++) {
- h = (start_hlid + i) % WL12XX_MAX_LINKS;
+ for (i = 0; i < wl->num_links; i++) {
+ h = (start_hlid + i) % wl->num_links;
/* only consider connected stations */
if (!test_bit(h, wlvif->links_map))
@@ -688,8 +697,8 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */
- wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
- WL12XX_MAX_LINKS;
+ wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
+ wl->num_links;
}
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -722,7 +731,7 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
timeout = wl->conf.rx_streaming.duration;
wl12xx_for_each_wlvif_sta(wl, wlvif) {
bool found = false;
- for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
+ for_each_set_bit(hlid, active_hlids, wl->num_links) {
if (test_bit(hlid, wlvif->links_map)) {
found = true;
break;
@@ -759,7 +768,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
struct wl1271_tx_hw_descr *desc;
u32 buf_offset = 0, last_len = 0;
bool sent_packets = false;
- unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
+ unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
int ret = 0;
int bus_ret = 0;
u8 hlid;
@@ -1061,7 +1070,7 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
int i;
/* TX failure */
- for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+ for_each_set_bit(i, wlvif->links_map, wl->num_links) {
if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
/* this calls wl12xx_free_link */
@@ -1085,7 +1094,7 @@ void wl12xx_tx_reset(struct wl1271 *wl)
/* only reset the queues if something bad happened */
if (wl1271_tx_total_queue_count(wl) != 0) {
- for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ for (i = 0; i < wl->num_links; i++)
wl1271_tx_reset_link_queues(wl, i);
for (i = 0; i < NUM_TX_QUEUES; i++)
@@ -1178,7 +1187,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
WL1271_TX_FLUSH_TIMEOUT / 1000);
/* forcibly flush all Tx buffers on our queues */
- for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ for (i = 0; i < wl->num_links; i++)
wl1271_tx_reset_link_queues(wl, i);
out_wake:
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 35489c300da1..79cb3ff8b71f 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -37,6 +37,7 @@
#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12)
#define TX_HW_ATTR_TX_DUMMY_REQ BIT(13)
#define TX_HW_ATTR_HOST_ENCRYPT BIT(14)
+#define TX_HW_ATTR_EAPOL_FRAME BIT(15)
#define TX_HW_ATTR_OFST_SAVE_RETRIES 0
#define TX_HW_ATTR_OFST_HEADER_PAD 1
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 06efc12a39e5..95a54504f0cc 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -73,6 +73,8 @@ struct wlcore_ops {
void (*tx_immediate_compl)(struct wl1271 *wl);
int (*hw_init)(struct wl1271 *wl);
int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+ void (*convert_fw_status)(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status);
u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
struct wl12xx_vif *wlvif);
int (*get_pg_ver)(struct wl1271 *wl, s8 *ver);
@@ -220,7 +222,7 @@ struct wl1271 {
int channel;
u8 system_hlid;
- unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+ unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)];
unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long rate_policies_map[
@@ -228,7 +230,7 @@ struct wl1271 {
unsigned long klv_templates_map[
BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
- u8 session_ids[WL12XX_MAX_LINKS];
+ u8 session_ids[WLCORE_MAX_LINKS];
struct list_head wlvif_list;
@@ -346,8 +348,8 @@ struct wl1271 {
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
- struct wl_fw_status_1 *fw_status_1;
- struct wl_fw_status_2 *fw_status_2;
+ void *raw_fw_status;
+ struct wl_fw_status *fw_status;
struct wl1271_tx_hw_res_if *tx_res_if;
/* Current chipset configuration */
@@ -376,7 +378,7 @@ struct wl1271 {
* AP-mode - links indexed by HLID. The global and broadcast links
* are always active.
*/
- struct wl1271_link links[WL12XX_MAX_LINKS];
+ struct wl1271_link links[WLCORE_MAX_LINKS];
/* number of currently active links */
int active_link_count;
@@ -405,6 +407,9 @@ struct wl1271 {
/* AP-mode - number of currently connected stations */
int active_sta_count;
+ /* Flag determining whether AP should broadcast OFDM-only rates */
+ bool ofdm_only_ap;
+
/* last wlvif we transmitted from */
struct wl12xx_vif *last_wlvif;
@@ -434,6 +439,10 @@ struct wl1271 {
u32 num_tx_desc;
/* number of RX descriptors the HW supports. */
u32 num_rx_desc;
+ /* number of links the HW supports */
+ u8 num_links;
+ /* max stations a single AP can support */
+ u8 max_ap_stations;
/* translate HW Tx rates to standard rate-indices */
const u8 **band_rate_to_idx;
@@ -448,10 +457,11 @@ struct wl1271 {
struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS];
/* size of the private FW status data */
+ size_t fw_status_len;
size_t fw_status_priv_len;
/* RX Data filter rule state - enabled/disabled */
- bool rx_filter_enabled[WL1271_MAX_RX_FILTERS];
+ unsigned long rx_filter_enabled[BITS_TO_LONGS(WL1271_MAX_RX_FILTERS)];
/* size of the private static data */
size_t static_data_priv_len;
@@ -476,8 +486,9 @@ struct wl1271 {
struct completion nvs_loading_complete;
- /* number of concurrent channels the HW supports */
- u32 num_channels;
+ /* interface combinations supported by the hw */
+ const struct ieee80211_iface_combination *iface_combinations;
+ u8 n_iface_combinations;
};
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index ce7261ce8b59..756e890bc5ee 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -58,10 +58,15 @@
#define WL1271_DEFAULT_DTIM_PERIOD 1
#define WL12XX_MAX_ROLES 4
-#define WL12XX_MAX_LINKS 12
#define WL12XX_INVALID_ROLE_ID 0xff
#define WL12XX_INVALID_LINK_ID 0xff
+/*
+ * max number of links allowed by all HWs.
+ * this is NOT the actual max links supported by the current hw.
+ */
+#define WLCORE_MAX_LINKS 16
+
/* the driver supports the 2.4Ghz and 5Ghz bands */
#define WLCORE_NUM_BANDS 2
@@ -118,72 +123,58 @@ struct wl1271_chip {
#define NUM_TX_QUEUES 4
-#define AP_MAX_STATIONS 8
-
-struct wl_fw_packet_counters {
- /* Cumulative counter of released packets per AC */
- u8 tx_released_pkts[NUM_TX_QUEUES];
-
- /* Cumulative counter of freed packets per HLID */
- u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
-
- /* Cumulative counter of released Voice memory blocks */
- u8 tx_voice_released_blks;
-
- /* Tx rate of the last transmitted packet */
- u8 tx_last_rate;
-
- u8 padding[2];
-} __packed;
-
-/* FW status registers */
-struct wl_fw_status_1 {
- __le32 intr;
+struct wl_fw_status {
+ u32 intr;
u8 fw_rx_counter;
u8 drv_rx_counter;
- u8 reserved;
u8 tx_results_counter;
- __le32 rx_pkt_descs[0];
-} __packed;
-
-/*
- * Each HW arch has a different number of Rx descriptors.
- * The length of the status depends on it, since it holds an array
- * of descriptors.
- */
-#define WLCORE_FW_STATUS_1_LEN(num_rx_desc) \
- (sizeof(struct wl_fw_status_1) + \
- (sizeof(((struct wl_fw_status_1 *)0)->rx_pkt_descs[0])) * \
- num_rx_desc)
+ __le32 *rx_pkt_descs;
-struct wl_fw_status_2 {
- __le32 fw_localtime;
+ u32 fw_localtime;
/*
* A bitmap (where each bit represents a single HLID)
* to indicate if the station is in PS mode.
*/
- __le32 link_ps_bitmap;
+ u32 link_ps_bitmap;
/*
* A bitmap (where each bit represents a single HLID) to indicate
* if the station is in Fast mode
*/
- __le32 link_fast_bitmap;
+ u32 link_fast_bitmap;
/* Cumulative counter of total released mem blocks since FW-reset */
- __le32 total_released_blks;
+ u32 total_released_blks;
/* Size (in Memory Blocks) of TX pool */
- __le32 tx_total;
+ u32 tx_total;
+
+ struct {
+ /*
+ * Cumulative counter of released packets per AC
+ * (length of the array is NUM_TX_QUEUES)
+ */
+ u8 *tx_released_pkts;
- struct wl_fw_packet_counters counters;
+ /*
+ * Cumulative counter of freed packets per HLID
+ * (length of the array is wl->num_links)
+ */
+ u8 *tx_lnk_free_pkts;
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
- __le32 log_start_addr;
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+ } counters;
+
+ u32 log_start_addr;
/* Private status to be used by the lower drivers */
- u8 priv[0];
-} __packed;
+ void *priv;
+};
#define WL1271_MAX_CHANNELS 64
struct wl1271_scan {
@@ -240,6 +231,7 @@ enum wl12xx_flags {
WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
WL1271_FLAG_INTENDED_FW_RECOVERY,
WL1271_FLAG_IO_FAILED,
+ WL1271_FLAG_REINIT_TX_WDOG,
};
enum wl12xx_vif_flags {
@@ -368,7 +360,7 @@ struct wl12xx_vif {
/* HLIDs bitmap of associated stations */
unsigned long sta_hlid_map[BITS_TO_LONGS(
- WL12XX_MAX_LINKS)];
+ WLCORE_MAX_LINKS)];
/* recoreded keys - set here before AP startup */
struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
@@ -385,7 +377,7 @@ struct wl12xx_vif {
/* counters of packets per AC, across all links in the vif */
int tx_queue_count[NUM_TX_QUEUES];
- unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+ unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)];
u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
u8 ssid_len;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index d24d4a958c67..d5c371d77ddf 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -42,8 +42,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <linux/etherdevice.h>
+#include <net/cfg80211.h>
#include <net/iw_handler.h>
@@ -1454,7 +1453,8 @@ static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info,
{
struct wl3501_card *this = netdev_priv(dev);
- wrqu->freq.m = ieee80211_dsss_chan_to_freq(this->chan) * 100000;
+ wrqu->freq.m = 100000 *
+ ieee80211_channel_to_frequency(this->chan, IEEE80211_BAND_2GHZ);
wrqu->freq.e = 1;
return 0;
}
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index d39c4178c33a..6f5c793a7855 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -18,7 +18,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/wireless.h>
-#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
#include <net/iw_handler.h>
#include <linux/string.h>
#include <linux/if_arp.h>
@@ -914,11 +914,8 @@ static int zd1201_set_freq(struct net_device *dev,
if (freq->e == 0)
channel = freq->m;
- else {
- channel = ieee80211_freq_to_dsss_chan(freq->m);
- if (channel < 0)
- channel = 0;
- }
+ else
+ channel = ieee80211_frequency_to_channel(freq->m);
err = zd1201_setconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, channel);
if (err)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index ae413a2cbee7..630a3fcf65bc 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -48,37 +48,19 @@
typedef unsigned int pending_ring_idx_t;
#define INVALID_PENDING_RING_IDX (~0U)
-/* For the head field in pending_tx_info: it is used to indicate
- * whether this tx info is the head of one or more coalesced requests.
- *
- * When head != INVALID_PENDING_RING_IDX, it means the start of a new
- * tx requests queue and the end of previous queue.
- *
- * An example sequence of head fields (I = INVALID_PENDING_RING_IDX):
- *
- * ...|0 I I I|5 I|9 I I I|...
- * -->|<-INUSE----------------
- *
- * After consuming the first slot(s) we have:
- *
- * ...|V V V V|5 I|9 I I I|...
- * -----FREE->|<-INUSE--------
- *
- * where V stands for "valid pending ring index". Any number other
- * than INVALID_PENDING_RING_IDX is OK. These entries are considered
- * free and can contain any number other than
- * INVALID_PENDING_RING_IDX. In practice we use 0.
- *
- * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the
- * above example) number is the index into pending_tx_info and
- * mmap_pages arrays.
- */
struct pending_tx_info {
- struct xen_netif_tx_request req; /* coalesced tx request */
- pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
- * if it is head of one or more tx
- * reqs
- */
+ struct xen_netif_tx_request req; /* tx request */
+ /* Callback data for released SKBs. The callback is always
+ * xenvif_zerocopy_callback, desc contains the pending_idx, which is
+ * also an index in pending_tx_info array. It is initialized in
+ * xenvif_alloc and it never changes.
+ * skb_shinfo(skb)->destructor_arg points to the first mapped slot's
+ * callback_struct in this array of struct pending_tx_info's, then ctx
+ * to the next, or NULL if there is no more slot for this skb.
+ * ubuf_to_vif is a helper which finds the struct xenvif from a pointer
+ * to this field.
+ */
+ struct ubuf_info callback_struct;
};
#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
@@ -99,7 +81,7 @@ struct xenvif_rx_meta {
#define MAX_BUFFER_OFFSET PAGE_SIZE
-#define MAX_PENDING_REQS 256
+#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
/* It's possible for an skb to have a maximal number of frags
* but still be less than MAX_BUFFER_OFFSET in size. Thus the
@@ -108,11 +90,25 @@ struct xenvif_rx_meta {
*/
#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
+#define NETBACK_INVALID_HANDLE -1
+
+/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
+ * the maximum slots a valid packet can use. Now this value is defined
+ * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
+ * all backend.
+ */
+#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
unsigned int handle;
+ /* Is this interface disabled? True when backend discovers
+ * frontend is rogue.
+ */
+ bool disabled;
+
/* Use NAPI for guest TX */
struct napi_struct napi;
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
@@ -126,13 +122,27 @@ struct xenvif {
pending_ring_idx_t pending_cons;
u16 pending_ring[MAX_PENDING_REQS];
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
-
- /* Coalescing tx requests before copying makes number of grant
- * copy ops greater or equal to number of slots required. In
- * worst case a tx request consumes 2 gnttab_copy.
+ grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
+
+ struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
+ struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
+ struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
+ /* passed to gnttab_[un]map_refs with pages under (un)mapping */
+ struct page *pages_to_map[MAX_PENDING_REQS];
+ struct page *pages_to_unmap[MAX_PENDING_REQS];
+
+ /* This prevents zerocopy callbacks to race over dealloc_ring */
+ spinlock_t callback_lock;
+ /* This prevents dealloc thread and NAPI instance to race over response
+ * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err
+ * it only protect response creation
*/
- struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
-
+ spinlock_t response_lock;
+ pending_ring_idx_t dealloc_prod;
+ pending_ring_idx_t dealloc_cons;
+ u16 dealloc_ring[MAX_PENDING_REQS];
+ struct task_struct *dealloc_task;
+ wait_queue_head_t dealloc_wq;
/* Use kthread for guest RX */
struct task_struct *task;
@@ -144,6 +154,9 @@ struct xenvif {
struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue;
RING_IDX rx_last_skb_slots;
+ bool rx_queue_purge;
+
+ struct timer_list wake_queue;
/* This array is allocated seperately as it is large */
struct gnttab_copy *grant_copy_op;
@@ -175,6 +188,10 @@ struct xenvif {
/* Statistics */
unsigned long rx_gso_checksum_fixup;
+ unsigned long tx_zerocopy_sent;
+ unsigned long tx_zerocopy_success;
+ unsigned long tx_zerocopy_fail;
+ unsigned long tx_frag_overflow;
/* Miscellaneous private stuff. */
struct net_device *dev;
@@ -216,9 +233,11 @@ void xenvif_carrier_off(struct xenvif *vif);
int xenvif_tx_action(struct xenvif *vif, int budget);
-int xenvif_kthread(void *data);
+int xenvif_kthread_guest_rx(void *data);
void xenvif_kick_thread(struct xenvif *vif);
+int xenvif_dealloc_kthread(void *data);
+
/* Determine whether the needed number of slots (req) are available,
* and set req_event if not.
*/
@@ -226,6 +245,24 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
void xenvif_stop_queue(struct xenvif *vif);
+/* Callback from stack when TX packet can be released */
+void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
+
+/* Unmap a pending page and release it back to the guest */
+void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx);
+
+static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
+{
+ return MAX_PENDING_REQS -
+ vif->pending_prod + vif->pending_cons;
+}
+
+/* Callback from stack when TX packet can be released */
+void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
+
extern bool separate_tx_rx_irq;
+extern unsigned int rx_drain_timeout_msecs;
+extern unsigned int rx_drain_timeout_jiffies;
+
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 301cc037fda8..ef05c5c49d41 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -38,6 +38,7 @@
#include <xen/events.h>
#include <asm/xen/hypercall.h>
+#include <xen/balloon.h>
#define XENVIF_QUEUE_LENGTH 32
#define XENVIF_NAPI_WEIGHT 64
@@ -62,6 +63,15 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
struct xenvif *vif = container_of(napi, struct xenvif, napi);
int work_done;
+ /* This vif is rogue, we pretend we've there is nothing to do
+ * for this vif to deschedule it from NAPI. But this interface
+ * will be turned off in thread context later.
+ */
+ if (unlikely(vif->disabled)) {
+ napi_complete(napi);
+ return 0;
+ }
+
work_done = xenvif_tx_action(vif, budget);
if (work_done < budget) {
@@ -113,6 +123,18 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void xenvif_wake_queue(unsigned long data)
+{
+ struct xenvif *vif = (struct xenvif *)data;
+
+ if (netif_queue_stopped(vif->dev)) {
+ netdev_err(vif->dev, "draining TX queue\n");
+ vif->rx_queue_purge = true;
+ xenvif_kick_thread(vif);
+ netif_wake_queue(vif->dev);
+ }
+}
+
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
@@ -121,7 +143,9 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(skb->dev != dev);
/* Drop the packet if vif is not ready */
- if (vif->task == NULL || !xenvif_schedulable(vif))
+ if (vif->task == NULL ||
+ vif->dealloc_task == NULL ||
+ !xenvif_schedulable(vif))
goto drop;
/* At best we'll need one slot for the header and one for each
@@ -139,8 +163,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
* then turn off the queue to give the ring a chance to
* drain.
*/
- if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
+ if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
+ vif->wake_queue.function = xenvif_wake_queue;
+ vif->wake_queue.data = (unsigned long)vif;
xenvif_stop_queue(vif);
+ mod_timer(&vif->wake_queue,
+ jiffies + rx_drain_timeout_jiffies);
+ }
skb_queue_tail(&vif->rx_queue, skb);
xenvif_kick_thread(vif);
@@ -233,6 +262,28 @@ static const struct xenvif_stat {
"rx_gso_checksum_fixup",
offsetof(struct xenvif, rx_gso_checksum_fixup)
},
+ /* If (sent != success + fail), there are probably packets never
+ * freed up properly!
+ */
+ {
+ "tx_zerocopy_sent",
+ offsetof(struct xenvif, tx_zerocopy_sent),
+ },
+ {
+ "tx_zerocopy_success",
+ offsetof(struct xenvif, tx_zerocopy_success),
+ },
+ {
+ "tx_zerocopy_fail",
+ offsetof(struct xenvif, tx_zerocopy_fail)
+ },
+ /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
+ * a guest with the same MAX_SKB_FRAG
+ */
+ {
+ "tx_frag_overflow",
+ offsetof(struct xenvif, tx_frag_overflow)
+ },
};
static int xenvif_get_sset_count(struct net_device *dev, int string_set)
@@ -321,11 +372,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->ip_csum = 1;
vif->dev = dev;
+ vif->disabled = false;
+
vif->credit_bytes = vif->remaining_credit = ~0UL;
vif->credit_usec = 0UL;
init_timer(&vif->credit_timeout);
vif->credit_window_start = get_jiffies_64();
+ init_timer(&vif->wake_queue);
+
dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -342,8 +397,26 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->pending_prod = MAX_PENDING_REQS;
for (i = 0; i < MAX_PENDING_REQS; i++)
vif->pending_ring[i] = i;
- for (i = 0; i < MAX_PENDING_REQS; i++)
- vif->mmap_pages[i] = NULL;
+ spin_lock_init(&vif->callback_lock);
+ spin_lock_init(&vif->response_lock);
+ /* If ballooning is disabled, this will consume real memory, so you
+ * better enable it. The long term solution would be to use just a
+ * bunch of valid page descriptors, without dependency on ballooning
+ */
+ err = alloc_xenballooned_pages(MAX_PENDING_REQS,
+ vif->mmap_pages,
+ false);
+ if (err) {
+ netdev_err(dev, "Could not reserve mmap_pages\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ for (i = 0; i < MAX_PENDING_REQS; i++) {
+ vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
+ { .callback = xenvif_zerocopy_callback,
+ .ctx = NULL,
+ .desc = i };
+ vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
+ }
/*
* Initialise a dummy MAC address. We choose the numerically
@@ -381,12 +454,14 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
BUG_ON(vif->tx_irq);
BUG_ON(vif->task);
+ BUG_ON(vif->dealloc_task);
err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
init_waitqueue_head(&vif->wq);
+ init_waitqueue_head(&vif->dealloc_wq);
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
@@ -420,8 +495,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
disable_irq(vif->rx_irq);
}
- task = kthread_create(xenvif_kthread,
- (void *)vif, "%s", vif->dev->name);
+ task = kthread_create(xenvif_kthread_guest_rx,
+ (void *)vif, "%s-guest-rx", vif->dev->name);
if (IS_ERR(task)) {
pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
err = PTR_ERR(task);
@@ -430,6 +505,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
vif->task = task;
+ task = kthread_create(xenvif_dealloc_kthread,
+ (void *)vif, "%s-dealloc", vif->dev->name);
+ if (IS_ERR(task)) {
+ pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
+ err = PTR_ERR(task);
+ goto err_rx_unbind;
+ }
+
+ vif->dealloc_task = task;
+
rtnl_lock();
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN);
@@ -440,6 +525,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
rtnl_unlock();
wake_up_process(vif->task);
+ wake_up_process(vif->dealloc_task);
return 0;
@@ -473,10 +559,16 @@ void xenvif_disconnect(struct xenvif *vif)
xenvif_carrier_off(vif);
if (vif->task) {
+ del_timer_sync(&vif->wake_queue);
kthread_stop(vif->task);
vif->task = NULL;
}
+ if (vif->dealloc_task) {
+ kthread_stop(vif->dealloc_task);
+ vif->dealloc_task = NULL;
+ }
+
if (vif->tx_irq) {
if (vif->tx_irq == vif->rx_irq)
unbind_from_irqhandler(vif->tx_irq, vif);
@@ -492,6 +584,43 @@ void xenvif_disconnect(struct xenvif *vif)
void xenvif_free(struct xenvif *vif)
{
+ int i, unmap_timeout = 0;
+ /* Here we want to avoid timeout messages if an skb can be legitimately
+ * stuck somewhere else. Realistically this could be an another vif's
+ * internal or QDisc queue. That another vif also has this
+ * rx_drain_timeout_msecs timeout, but the timer only ditches the
+ * internal queue. After that, the QDisc queue can put in worst case
+ * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
+ * internal queue, so we need several rounds of such timeouts until we
+ * can be sure that no another vif should have skb's from us. We are
+ * not sending more skb's, so newly stuck packets are not interesting
+ * for us here.
+ */
+ unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
+ DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
+
+ for (i = 0; i < MAX_PENDING_REQS; ++i) {
+ if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
+ unmap_timeout++;
+ schedule_timeout(msecs_to_jiffies(1000));
+ if (unmap_timeout > worst_case_skb_lifetime &&
+ net_ratelimit())
+ netdev_err(vif->dev,
+ "Page still granted! Index: %x\n",
+ i);
+ /* If there are still unmapped pages, reset the loop to
+ * start checking again. We shouldn't exit here until
+ * dealloc thread and NAPI instance release all the
+ * pages. If a kernel bug causes the skbs to stall
+ * somewhere, the interface cannot be brought down
+ * properly.
+ */
+ i = -1;
+ }
+ }
+
+ free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
+
netif_napi_del(&vif->napi);
unregister_netdev(vif->dev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 438d0c09b7e6..76665405c5aa 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -37,6 +37,7 @@
#include <linux/kthread.h>
#include <linux/if_vlan.h>
#include <linux/udp.h>
+#include <linux/highmem.h>
#include <net/tcp.h>
@@ -54,6 +55,13 @@
bool separate_tx_rx_irq = 1;
module_param(separate_tx_rx_irq, bool, 0644);
+/* When guest ring is filled up, qdisc queues the packets for us, but we have
+ * to timeout them, otherwise other guests' packets can get stuck there
+ */
+unsigned int rx_drain_timeout_msecs = 10000;
+module_param(rx_drain_timeout_msecs, uint, 0444);
+unsigned int rx_drain_timeout_jiffies;
+
/*
* This is the maximum slots a skb can have. If a guest sends a skb
* which exceeds this limit it is considered malicious.
@@ -62,24 +70,6 @@ module_param(separate_tx_rx_irq, bool, 0644);
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
module_param(fatal_skb_slots, uint, 0444);
-/*
- * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
- * the maximum slots a valid packet can use. Now this value is defined
- * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
- * all backend.
- */
-#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
-
-/*
- * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
- * one or more merged tx requests, otherwise it is the continuation of
- * previous tx request.
- */
-static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
-{
- return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
-}
-
static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
u8 status);
@@ -109,6 +99,21 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
}
+#define callback_param(vif, pending_idx) \
+ (vif->pending_tx_info[pending_idx].callback_struct)
+
+/* Find the containing VIF's structure from a pointer in pending_tx_info array
+ */
+static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+{
+ u16 pending_idx = ubuf->desc;
+ struct pending_tx_info *temp =
+ container_of(ubuf, struct pending_tx_info, callback_struct);
+ return container_of(temp - pending_idx,
+ struct xenvif,
+ pending_tx_info[0]);
+}
+
/* This is a miniumum size for the linear area to avoid lots of
* calls to __pskb_pull_tail() as we set up checksum offsets. The
* value 128 was chosen as it covers all IPv4 and most likely
@@ -131,12 +136,6 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
-static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
-{
- return MAX_PENDING_REQS -
- vif->pending_prod + vif->pending_cons;
-}
-
bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
{
RING_IDX prod, cons;
@@ -192,8 +191,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* into multiple copies tend to give large frags their
* own buffers as before.
*/
- if ((offset + size > MAX_BUFFER_OFFSET) &&
- (size <= MAX_BUFFER_OFFSET) && offset && !head)
+ BUG_ON(size > MAX_BUFFER_OFFSET);
+ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
return true;
return false;
@@ -235,7 +234,9 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct netrx_pending_operations *npo,
struct page *page, unsigned long size,
- unsigned long offset, int *head)
+ unsigned long offset, int *head,
+ struct xenvif *foreign_vif,
+ grant_ref_t foreign_gref)
{
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
@@ -277,8 +278,15 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
copy_gop->flags = GNTCOPY_dest_gref;
copy_gop->len = bytes;
- copy_gop->source.domid = DOMID_SELF;
- copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
+ if (foreign_vif) {
+ copy_gop->source.domid = foreign_vif->domid;
+ copy_gop->source.u.ref = foreign_gref;
+ copy_gop->flags |= GNTCOPY_source_gref;
+ } else {
+ copy_gop->source.domid = DOMID_SELF;
+ copy_gop->source.u.gmfn =
+ virt_to_mfn(page_address(page));
+ }
copy_gop->source.offset = offset;
copy_gop->dest.domid = vif->domid;
@@ -338,6 +346,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1;
int old_meta_prod;
int gso_type;
+ struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+ grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
+ struct xenvif *foreign_vif = NULL;
old_meta_prod = npo->meta_prod;
@@ -375,6 +386,19 @@ static int xenvif_gop_skb(struct sk_buff *skb,
npo->copy_off = 0;
npo->copy_gref = req->gref;
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+ (ubuf->callback == &xenvif_zerocopy_callback)) {
+ int i = 0;
+ foreign_vif = ubuf_to_vif(ubuf);
+
+ do {
+ u16 pending_idx = ubuf->desc;
+ foreign_grefs[i++] =
+ foreign_vif->pending_tx_info[pending_idx].req.gref;
+ ubuf = (struct ubuf_info *) ubuf->ctx;
+ } while (ubuf);
+ }
+
data = skb->data;
while (data < skb_tail_pointer(skb)) {
unsigned int offset = offset_in_page(data);
@@ -384,7 +408,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
len = skb_tail_pointer(skb) - data;
xenvif_gop_frag_copy(vif, skb, npo,
- virt_to_page(data), len, offset, &head);
+ virt_to_page(data), len, offset, &head,
+ NULL,
+ 0);
data += len;
}
@@ -393,7 +419,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
- &head);
+ &head,
+ foreign_vif,
+ foreign_grefs[i]);
}
return npo->meta_prod - old_meta_prod;
@@ -451,10 +479,12 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
}
}
-struct skb_cb_overlay {
+struct xenvif_rx_cb {
int meta_slots_used;
};
+#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
+
void xenvif_kick_thread(struct xenvif *vif)
{
wake_up(&vif->wq);
@@ -470,7 +500,6 @@ static void xenvif_rx_action(struct xenvif *vif)
LIST_HEAD(notify);
int ret;
unsigned long offset;
- struct skb_cb_overlay *sco;
bool need_to_notify = false;
struct netrx_pending_operations npo = {
@@ -482,6 +511,8 @@ static void xenvif_rx_action(struct xenvif *vif)
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
RING_IDX max_slots_needed;
+ RING_IDX old_req_cons;
+ RING_IDX ring_slots_used;
int i;
/* We need a cheap worse case estimate for the number of
@@ -493,9 +524,28 @@ static void xenvif_rx_action(struct xenvif *vif)
PAGE_SIZE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned int size;
+ unsigned int offset;
+
size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+ offset = skb_shinfo(skb)->frags[i].page_offset;
+
+ /* For a worse-case estimate we need to factor in
+ * the fragment page offset as this will affect the
+ * number of times xenvif_gop_frag_copy() will
+ * call start_new_rx_buffer().
+ */
+ max_slots_needed += DIV_ROUND_UP(offset + size,
+ PAGE_SIZE);
}
+
+ /* To avoid the estimate becoming too pessimal for some
+ * frontends that limit posted rx requests, cap the estimate
+ * at MAX_SKB_FRAGS.
+ */
+ if (max_slots_needed > MAX_SKB_FRAGS)
+ max_slots_needed = MAX_SKB_FRAGS;
+
+ /* We may need one more slot for GSO metadata */
if (skb_is_gso(skb) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
@@ -510,9 +560,11 @@ static void xenvif_rx_action(struct xenvif *vif)
} else
vif->rx_last_skb_slots = 0;
- sco = (struct skb_cb_overlay *)skb->cb;
- sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
- BUG_ON(sco->meta_slots_used > max_slots_needed);
+ old_req_cons = vif->rx.req_cons;
+ XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
+ ring_slots_used = vif->rx.req_cons - old_req_cons;
+
+ BUG_ON(ring_slots_used > max_slots_needed);
__skb_queue_tail(&rxq, skb);
}
@@ -526,7 +578,6 @@ static void xenvif_rx_action(struct xenvif *vif)
gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
- sco = (struct skb_cb_overlay *)skb->cb;
if ((1 << vif->meta[npo.meta_cons].gso_type) &
vif->gso_prefix_mask) {
@@ -537,19 +588,21 @@ static void xenvif_rx_action(struct xenvif *vif)
resp->offset = vif->meta[npo.meta_cons].gso_size;
resp->id = vif->meta[npo.meta_cons].id;
- resp->status = sco->meta_slots_used;
+ resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
npo.meta_cons++;
- sco->meta_slots_used--;
+ XENVIF_RX_CB(skb)->meta_slots_used--;
}
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
- status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
+ status = xenvif_check_gop(vif,
+ XENVIF_RX_CB(skb)->meta_slots_used,
+ &npo);
- if (sco->meta_slots_used == 1)
+ if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
flags = 0;
else
flags = XEN_NETRXF_more_data;
@@ -586,13 +639,13 @@ static void xenvif_rx_action(struct xenvif *vif)
xenvif_add_frag_responses(vif, status,
vif->meta + npo.meta_cons + 1,
- sco->meta_slots_used);
+ XENVIF_RX_CB(skb)->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
need_to_notify |= !!ret;
- npo.meta_cons += sco->meta_slots_used;
+ npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
dev_kfree_skb(skb);
}
@@ -642,9 +695,12 @@ static void xenvif_tx_err(struct xenvif *vif,
struct xen_netif_tx_request *txp, RING_IDX end)
{
RING_IDX cons = vif->tx.req_cons;
+ unsigned long flags;
do {
+ spin_lock_irqsave(&vif->response_lock, flags);
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+ spin_unlock_irqrestore(&vif->response_lock, flags);
if (cons == end)
break;
txp = RING_GET_REQUEST(&vif->tx, cons++);
@@ -655,7 +711,8 @@ static void xenvif_tx_err(struct xenvif *vif,
static void xenvif_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
- xenvif_carrier_off(vif);
+ vif->disabled = true;
+ xenvif_kick_thread(vif);
}
static int xenvif_count_requests(struct xenvif *vif,
@@ -756,204 +813,220 @@ static int xenvif_count_requests(struct xenvif *vif,
return slots;
}
-static struct page *xenvif_alloc_page(struct xenvif *vif,
- u16 pending_idx)
+
+struct xenvif_tx_cb {
+ u16 pending_idx;
+};
+
+#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
+
+static inline void xenvif_tx_create_map_op(struct xenvif *vif,
+ u16 pending_idx,
+ struct xen_netif_tx_request *txp,
+ struct gnttab_map_grant_ref *mop)
{
- struct page *page;
+ vif->pages_to_map[mop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
+ gnttab_set_map_op(mop, idx_to_kaddr(vif, pending_idx),
+ GNTMAP_host_map | GNTMAP_readonly,
+ txp->gref, vif->domid);
- page = alloc_page(GFP_ATOMIC|__GFP_COLD);
- if (!page)
+ memcpy(&vif->pending_tx_info[pending_idx].req, txp,
+ sizeof(*txp));
+}
+
+static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
+{
+ struct sk_buff *skb =
+ alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(skb == NULL))
return NULL;
- vif->mmap_pages[pending_idx] = page;
- return page;
+ /* Packets passed to netif_rx() must have some headroom. */
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ /* Initialize it here to avoid later surprises */
+ skb_shinfo(skb)->destructor_arg = NULL;
+
+ return skb;
}
-static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
- struct sk_buff *skb,
- struct xen_netif_tx_request *txp,
- struct gnttab_copy *gop)
+static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct xen_netif_tx_request *txp,
+ struct gnttab_map_grant_ref *gop)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
- u16 pending_idx = *((u16 *)skb->data);
- u16 head_idx = 0;
- int slot, start;
- struct page *page;
- pending_ring_idx_t index, start_idx = 0;
- uint16_t dst_offset;
- unsigned int nr_slots;
- struct pending_tx_info *first = NULL;
+ u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ int start;
+ pending_ring_idx_t index;
+ unsigned int nr_slots, frag_overflow = 0;
/* At this point shinfo->nr_frags is in fact the number of
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
*/
+ if (shinfo->nr_frags > MAX_SKB_FRAGS) {
+ frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
+ BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+ shinfo->nr_frags = MAX_SKB_FRAGS;
+ }
nr_slots = shinfo->nr_frags;
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
- /* Coalesce tx requests, at this point the packet passed in
- * should be <= 64K. Any packets larger than 64K have been
- * handled in xenvif_count_requests().
- */
- for (shinfo->nr_frags = slot = start; slot < nr_slots;
- shinfo->nr_frags++) {
- struct pending_tx_info *pending_tx_info =
- vif->pending_tx_info;
+ for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
+ shinfo->nr_frags++, txp++, gop++) {
+ index = pending_index(vif->pending_cons++);
+ pending_idx = vif->pending_ring[index];
+ xenvif_tx_create_map_op(vif, pending_idx, txp, gop);
+ frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+ }
- page = alloc_page(GFP_ATOMIC|__GFP_COLD);
- if (!page)
- goto err;
-
- dst_offset = 0;
- first = NULL;
- while (dst_offset < PAGE_SIZE && slot < nr_slots) {
- gop->flags = GNTCOPY_source_gref;
-
- gop->source.u.ref = txp->gref;
- gop->source.domid = vif->domid;
- gop->source.offset = txp->offset;
-
- gop->dest.domid = DOMID_SELF;
-
- gop->dest.offset = dst_offset;
- gop->dest.u.gmfn = virt_to_mfn(page_address(page));
-
- if (dst_offset + txp->size > PAGE_SIZE) {
- /* This page can only merge a portion
- * of tx request. Do not increment any
- * pointer / counter here. The txp
- * will be dealt with in future
- * rounds, eventually hitting the
- * `else` branch.
- */
- gop->len = PAGE_SIZE - dst_offset;
- txp->offset += gop->len;
- txp->size -= gop->len;
- dst_offset += gop->len; /* quit loop */
- } else {
- /* This tx request can be merged in the page */
- gop->len = txp->size;
- dst_offset += gop->len;
-
- index = pending_index(vif->pending_cons++);
-
- pending_idx = vif->pending_ring[index];
-
- memcpy(&pending_tx_info[pending_idx].req, txp,
- sizeof(*txp));
-
- /* Poison these fields, corresponding
- * fields for head tx req will be set
- * to correct values after the loop.
- */
- vif->mmap_pages[pending_idx] = (void *)(~0UL);
- pending_tx_info[pending_idx].head =
- INVALID_PENDING_RING_IDX;
-
- if (!first) {
- first = &pending_tx_info[pending_idx];
- start_idx = index;
- head_idx = pending_idx;
- }
-
- txp++;
- slot++;
- }
+ if (frag_overflow) {
+ struct sk_buff *nskb = xenvif_alloc_skb(0);
+ if (unlikely(nskb == NULL)) {
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Can't allocate the frag_list skb.\n");
+ return NULL;
+ }
- gop++;
+ shinfo = skb_shinfo(nskb);
+ frags = shinfo->frags;
+
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
+ shinfo->nr_frags++, txp++, gop++) {
+ index = pending_index(vif->pending_cons++);
+ pending_idx = vif->pending_ring[index];
+ xenvif_tx_create_map_op(vif, pending_idx, txp, gop);
+ frag_set_pending_idx(&frags[shinfo->nr_frags],
+ pending_idx);
}
- first->req.offset = 0;
- first->req.size = dst_offset;
- first->head = start_idx;
- vif->mmap_pages[head_idx] = page;
- frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
+ skb_shinfo(skb)->frag_list = nskb;
}
- BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
-
return gop;
-err:
- /* Unwind, freeing all pages and sending error responses. */
- while (shinfo->nr_frags-- > start) {
- xenvif_idx_release(vif,
- frag_get_pending_idx(&frags[shinfo->nr_frags]),
- XEN_NETIF_RSP_ERROR);
+}
+
+static inline void xenvif_grant_handle_set(struct xenvif *vif,
+ u16 pending_idx,
+ grant_handle_t handle)
+{
+ if (unlikely(vif->grant_tx_handle[pending_idx] !=
+ NETBACK_INVALID_HANDLE)) {
+ netdev_err(vif->dev,
+ "Trying to overwrite active handle! pending_idx: %x\n",
+ pending_idx);
+ BUG();
}
- /* The head too, if necessary. */
- if (start)
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+ vif->grant_tx_handle[pending_idx] = handle;
+}
- return NULL;
+static inline void xenvif_grant_handle_reset(struct xenvif *vif,
+ u16 pending_idx)
+{
+ if (unlikely(vif->grant_tx_handle[pending_idx] ==
+ NETBACK_INVALID_HANDLE)) {
+ netdev_err(vif->dev,
+ "Trying to unmap invalid handle! pending_idx: %x\n",
+ pending_idx);
+ BUG();
+ }
+ vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
}
static int xenvif_tx_check_gop(struct xenvif *vif,
struct sk_buff *skb,
- struct gnttab_copy **gopp)
+ struct gnttab_map_grant_ref **gopp_map,
+ struct gnttab_copy **gopp_copy)
{
- struct gnttab_copy *gop = *gopp;
- u16 pending_idx = *((u16 *)skb->data);
+ struct gnttab_map_grant_ref *gop_map = *gopp_map;
+ u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
struct skb_shared_info *shinfo = skb_shinfo(skb);
- struct pending_tx_info *tx_info;
int nr_frags = shinfo->nr_frags;
- int i, err, start;
- u16 peek; /* peek into next tx request */
+ int i, err;
+ struct sk_buff *first_skb = NULL;
/* Check status of header. */
- err = gop->status;
- if (unlikely(err))
+ err = (*gopp_copy)->status;
+ (*gopp_copy)++;
+ if (unlikely(err)) {
+ if (net_ratelimit())
+ netdev_dbg(vif->dev,
+ "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
+ (*gopp_copy)->status,
+ pending_idx,
+ (*gopp_copy)->source.u.ref);
xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+ }
- /* Skip first skb fragment if it is on same page as header fragment. */
- start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
-
- for (i = start; i < nr_frags; i++) {
+check_frags:
+ for (i = 0; i < nr_frags; i++, gop_map++) {
int j, newerr;
- pending_ring_idx_t head;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
- tx_info = &vif->pending_tx_info[pending_idx];
- head = tx_info->head;
/* Check error status: if okay then remember grant handle. */
- do {
- newerr = (++gop)->status;
- if (newerr)
- break;
- peek = vif->pending_ring[pending_index(++head)];
- } while (!pending_tx_is_head(vif, peek));
+ newerr = gop_map->status;
if (likely(!newerr)) {
+ xenvif_grant_handle_set(vif,
+ pending_idx,
+ gop_map->handle);
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
- xenvif_idx_release(vif, pending_idx,
- XEN_NETIF_RSP_OKAY);
+ xenvif_idx_unmap(vif, pending_idx);
continue;
}
/* Error on this fragment: respond to client with an error. */
+ if (net_ratelimit())
+ netdev_dbg(vif->dev,
+ "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
+ i,
+ gop_map->status,
+ pending_idx,
+ gop_map->ref);
xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
-
- /* First error: invalidate header and preceding fragments. */
- pending_idx = *((u16 *)skb->data);
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
- for (j = start; j < i; j++) {
+ /* First error: invalidate preceding fragments. */
+ for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xenvif_idx_release(vif, pending_idx,
- XEN_NETIF_RSP_OKAY);
+ xenvif_idx_unmap(vif, pending_idx);
}
/* Remember the error: invalidate all subsequent fragments. */
err = newerr;
}
- *gopp = gop + 1;
+ if (skb_has_frag_list(skb)) {
+ first_skb = skb;
+ skb = shinfo->frag_list;
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+
+ goto check_frags;
+ }
+
+ /* There was a mapping error in the frag_list skb. We have to unmap
+ * the first skb's frags
+ */
+ if (first_skb && err) {
+ int j;
+ shinfo = skb_shinfo(first_skb);
+ for (j = 0; j < shinfo->nr_frags; j++) {
+ pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
+ xenvif_idx_unmap(vif, pending_idx);
+ }
+ }
+
+ *gopp_map = gop_map;
return err;
}
@@ -962,6 +1035,7 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
int i;
+ u16 prev_pending_idx = INVALID_PENDING_IDX;
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = shinfo->frags + i;
@@ -971,6 +1045,17 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
pending_idx = frag_get_pending_idx(frag);
+ /* If this is not the first frag, chain it to the previous*/
+ if (prev_pending_idx == INVALID_PENDING_IDX)
+ skb_shinfo(skb)->destructor_arg =
+ &callback_param(vif, pending_idx);
+ else
+ callback_param(vif, prev_pending_idx).ctx =
+ &callback_param(vif, pending_idx);
+
+ callback_param(vif, pending_idx).ctx = NULL;
+ prev_pending_idx = pending_idx;
+
txp = &vif->pending_tx_info[pending_idx].req;
page = virt_to_page(idx_to_kaddr(vif, pending_idx));
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
@@ -978,10 +1063,15 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
skb->data_len += txp->size;
skb->truesize += txp->size;
- /* Take an extra reference to offset xenvif_idx_release */
+ /* Take an extra reference to offset network stack's put_page */
get_page(vif->mmap_pages[pending_idx]);
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
}
+ /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
+ * overlaps with "index", and "mapping" is not set. I think mapping
+ * should be set. If delivered to local stack, it would drop this
+ * skb in sk_filter unless the socket has the right to use it.
+ */
+ skb->pfmemalloc = false;
}
static int xenvif_get_extras(struct xenvif *vif,
@@ -1099,18 +1189,18 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
return false;
}
-static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
+static void xenvif_tx_build_gops(struct xenvif *vif,
+ int budget,
+ unsigned *copy_ops,
+ unsigned *map_ops)
{
- struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
+ struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
struct sk_buff *skb;
int ret;
- while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
- < MAX_PENDING_REQS) &&
- (skb_queue_len(&vif->tx_queue) < budget)) {
+ while (skb_queue_len(&vif->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
- struct page *page;
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
u16 pending_idx;
RING_IDX idx;
@@ -1126,7 +1216,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
vif->tx.sring->req_prod, vif->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
xenvif_fatal_tx_err(vif);
- continue;
+ break;
}
work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
@@ -1186,8 +1276,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
PKT_PROT_LEN : txreq.size;
- skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
netdev_dbg(vif->dev,
"Can't allocate a skb in start_xmit.\n");
@@ -1195,9 +1284,6 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
break;
}
- /* Packets passed to netif_rx() must have some headroom. */
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1209,42 +1295,36 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
}
}
- /* XXX could copy straight to head */
- page = xenvif_alloc_page(vif, pending_idx);
- if (!page) {
- kfree_skb(skb);
- xenvif_tx_err(vif, &txreq, idx);
- break;
- }
-
- gop->source.u.ref = txreq.gref;
- gop->source.domid = vif->domid;
- gop->source.offset = txreq.offset;
+ XENVIF_TX_CB(skb)->pending_idx = pending_idx;
- gop->dest.u.gmfn = virt_to_mfn(page_address(page));
- gop->dest.domid = DOMID_SELF;
- gop->dest.offset = txreq.offset;
+ __skb_put(skb, data_len);
+ vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
+ vif->tx_copy_ops[*copy_ops].source.domid = vif->domid;
+ vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
- gop->len = txreq.size;
- gop->flags = GNTCOPY_source_gref;
+ vif->tx_copy_ops[*copy_ops].dest.u.gmfn =
+ virt_to_mfn(skb->data);
+ vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
+ vif->tx_copy_ops[*copy_ops].dest.offset =
+ offset_in_page(skb->data);
- gop++;
+ vif->tx_copy_ops[*copy_ops].len = data_len;
+ vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
- memcpy(&vif->pending_tx_info[pending_idx].req,
- &txreq, sizeof(txreq));
- vif->pending_tx_info[pending_idx].head = index;
- *((u16 *)skb->data) = pending_idx;
-
- __skb_put(skb, data_len);
+ (*copy_ops)++;
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
+ xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop);
+ gop++;
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
+ memcpy(&vif->pending_tx_info[pending_idx].req, &txreq,
+ sizeof(txreq));
}
vif->pending_cons++;
@@ -1261,17 +1341,85 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
vif->tx.req_cons = idx;
- if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
+ if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) ||
+ (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops)))
break;
}
- return gop - vif->tx_copy_ops;
+ (*map_ops) = gop - vif->tx_map_ops;
+ return;
}
+/* Consolidate skb with a frag_list into a brand new one with local pages on
+ * frags. Returns 0 or -ENOMEM if can't allocate new pages.
+ */
+static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
+{
+ unsigned int offset = skb_headlen(skb);
+ skb_frag_t frags[MAX_SKB_FRAGS];
+ int i;
+ struct ubuf_info *uarg;
+ struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+
+ vif->tx_zerocopy_sent += 2;
+ vif->tx_frag_overflow++;
+
+ xenvif_fill_frags(vif, nskb);
+ /* Subtract frags size, we will correct it later */
+ skb->truesize -= skb->data_len;
+ skb->len += nskb->len;
+ skb->data_len += nskb->len;
+
+ /* create a brand new frags array and coalesce there */
+ for (i = 0; offset < skb->len; i++) {
+ struct page *page;
+ unsigned int len;
+
+ BUG_ON(i >= MAX_SKB_FRAGS);
+ page = alloc_page(GFP_ATOMIC|__GFP_COLD);
+ if (!page) {
+ int j;
+ skb->truesize += skb->data_len;
+ for (j = 0; j < i; j++)
+ put_page(frags[j].page.p);
+ return -ENOMEM;
+ }
+
+ if (offset + PAGE_SIZE < skb->len)
+ len = PAGE_SIZE;
+ else
+ len = skb->len - offset;
+ if (skb_copy_bits(skb, offset, page_address(page), len))
+ BUG();
+
+ offset += len;
+ frags[i].page.p = page;
+ frags[i].page_offset = 0;
+ skb_frag_size_set(&frags[i], len);
+ }
+ /* swap out with old one */
+ memcpy(skb_shinfo(skb)->frags,
+ frags,
+ i * sizeof(skb_frag_t));
+ skb_shinfo(skb)->nr_frags = i;
+ skb->truesize += i * PAGE_SIZE;
+
+ /* remove traces of mapped pages and frag_list */
+ skb_frag_list_init(skb);
+ uarg = skb_shinfo(skb)->destructor_arg;
+ uarg->callback(uarg, true);
+ skb_shinfo(skb)->destructor_arg = NULL;
+
+ skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ kfree_skb(nskb);
+
+ return 0;
+}
static int xenvif_tx_submit(struct xenvif *vif)
{
- struct gnttab_copy *gop = vif->tx_copy_ops;
+ struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops;
+ struct gnttab_copy *gop_copy = vif->tx_copy_ops;
struct sk_buff *skb;
int work_done = 0;
@@ -1280,21 +1428,18 @@ static int xenvif_tx_submit(struct xenvif *vif)
u16 pending_idx;
unsigned data_len;
- pending_idx = *((u16 *)skb->data);
+ pending_idx = XENVIF_TX_CB(skb)->pending_idx;
txp = &vif->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
- if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
- netdev_dbg(vif->dev, "netback grant failed.\n");
+ if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) {
skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
continue;
}
data_len = skb->len;
- memcpy(skb->data,
- (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
- data_len);
+ callback_param(vif, pending_idx).ctx = NULL;
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
txp->offset += data_len;
@@ -1312,6 +1457,17 @@ static int xenvif_tx_submit(struct xenvif *vif)
xenvif_fill_frags(vif, skb);
+ if (unlikely(skb_has_frag_list(skb))) {
+ if (xenvif_handle_frag_list(vif, skb)) {
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Not enough memory to consolidate frag_list!\n");
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ kfree_skb(skb);
+ continue;
+ }
+ }
+
if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
int target = min_t(int, skb->len, PKT_PROT_LEN);
__pskb_pull_tail(skb, target - skb_headlen(skb));
@@ -1324,6 +1480,9 @@ static int xenvif_tx_submit(struct xenvif *vif)
if (checksum_setup(vif, skb)) {
netdev_dbg(vif->dev,
"Can't setup checksum in net_tx_action\n");
+ /* We have to set this flag to trigger the callback */
+ if (skb_shinfo(skb)->destructor_arg)
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
kfree_skb(skb);
continue;
}
@@ -1349,27 +1508,143 @@ static int xenvif_tx_submit(struct xenvif *vif)
work_done++;
+ /* Set this flag right before netif_receive_skb, otherwise
+ * someone might think this packet already left netback, and
+ * do a skb_copy_ubufs while we are still in control of the
+ * skb. E.g. the __pskb_pull_tail earlier can do such thing.
+ */
+ if (skb_shinfo(skb)->destructor_arg) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ vif->tx_zerocopy_sent++;
+ }
+
netif_receive_skb(skb);
}
return work_done;
}
+void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
+{
+ unsigned long flags;
+ pending_ring_idx_t index;
+ struct xenvif *vif = ubuf_to_vif(ubuf);
+
+ /* This is the only place where we grab this lock, to protect callbacks
+ * from each other.
+ */
+ spin_lock_irqsave(&vif->callback_lock, flags);
+ do {
+ u16 pending_idx = ubuf->desc;
+ ubuf = (struct ubuf_info *) ubuf->ctx;
+ BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
+ MAX_PENDING_REQS);
+ index = pending_index(vif->dealloc_prod);
+ vif->dealloc_ring[index] = pending_idx;
+ /* Sync with xenvif_tx_dealloc_action:
+ * insert idx then incr producer.
+ */
+ smp_wmb();
+ vif->dealloc_prod++;
+ } while (ubuf);
+ wake_up(&vif->dealloc_wq);
+ spin_unlock_irqrestore(&vif->callback_lock, flags);
+
+ if (likely(zerocopy_success))
+ vif->tx_zerocopy_success++;
+ else
+ vif->tx_zerocopy_fail++;
+}
+
+static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
+{
+ struct gnttab_unmap_grant_ref *gop;
+ pending_ring_idx_t dc, dp;
+ u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
+ unsigned int i = 0;
+
+ dc = vif->dealloc_cons;
+ gop = vif->tx_unmap_ops;
+
+ /* Free up any grants we have finished using */
+ do {
+ dp = vif->dealloc_prod;
+
+ /* Ensure we see all indices enqueued by all
+ * xenvif_zerocopy_callback().
+ */
+ smp_rmb();
+
+ while (dc != dp) {
+ BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
+ pending_idx =
+ vif->dealloc_ring[pending_index(dc++)];
+
+ pending_idx_release[gop-vif->tx_unmap_ops] =
+ pending_idx;
+ vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
+ vif->mmap_pages[pending_idx];
+ gnttab_set_unmap_op(gop,
+ idx_to_kaddr(vif, pending_idx),
+ GNTMAP_host_map,
+ vif->grant_tx_handle[pending_idx]);
+ xenvif_grant_handle_reset(vif, pending_idx);
+ ++gop;
+ }
+
+ } while (dp != vif->dealloc_prod);
+
+ vif->dealloc_cons = dc;
+
+ if (gop - vif->tx_unmap_ops > 0) {
+ int ret;
+ ret = gnttab_unmap_refs(vif->tx_unmap_ops,
+ NULL,
+ vif->pages_to_unmap,
+ gop - vif->tx_unmap_ops);
+ if (ret) {
+ netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
+ gop - vif->tx_unmap_ops, ret);
+ for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
+ if (gop[i].status != GNTST_okay)
+ netdev_err(vif->dev,
+ " host_addr: %llx handle: %x status: %d\n",
+ gop[i].host_addr,
+ gop[i].handle,
+ gop[i].status);
+ }
+ BUG();
+ }
+ }
+
+ for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
+ xenvif_idx_release(vif, pending_idx_release[i],
+ XEN_NETIF_RSP_OKAY);
+}
+
+
/* Called after netfront has transmitted */
int xenvif_tx_action(struct xenvif *vif, int budget)
{
- unsigned nr_gops;
- int work_done;
+ unsigned nr_mops, nr_cops = 0;
+ int work_done, ret;
if (unlikely(!tx_work_todo(vif)))
return 0;
- nr_gops = xenvif_tx_build_gops(vif, budget);
+ xenvif_tx_build_gops(vif, budget, &nr_cops, &nr_mops);
- if (nr_gops == 0)
+ if (nr_cops == 0)
return 0;
- gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
+ gnttab_batch_copy(vif->tx_copy_ops, nr_cops);
+ if (nr_mops != 0) {
+ ret = gnttab_map_refs(vif->tx_map_ops,
+ NULL,
+ vif->pages_to_map,
+ nr_mops);
+ BUG_ON(ret);
+ }
work_done = xenvif_tx_submit(vif);
@@ -1380,45 +1655,18 @@ static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
u8 status)
{
struct pending_tx_info *pending_tx_info;
- pending_ring_idx_t head;
- u16 peek; /* peek into next tx request */
-
- BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
-
- /* Already complete? */
- if (vif->mmap_pages[pending_idx] == NULL)
- return;
+ pending_ring_idx_t index;
+ unsigned long flags;
pending_tx_info = &vif->pending_tx_info[pending_idx];
-
- head = pending_tx_info->head;
-
- BUG_ON(!pending_tx_is_head(vif, head));
- BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
-
- do {
- pending_ring_idx_t index;
- pending_ring_idx_t idx = pending_index(head);
- u16 info_idx = vif->pending_ring[idx];
-
- pending_tx_info = &vif->pending_tx_info[info_idx];
- make_tx_response(vif, &pending_tx_info->req, status);
-
- /* Setting any number other than
- * INVALID_PENDING_RING_IDX indicates this slot is
- * starting a new packet / ending a previous packet.
- */
- pending_tx_info->head = 0;
-
- index = pending_index(vif->pending_prod++);
- vif->pending_ring[index] = vif->pending_ring[info_idx];
-
- peek = vif->pending_ring[pending_index(++head)];
-
- } while (!pending_tx_is_head(vif, peek));
-
- put_page(vif->mmap_pages[pending_idx]);
- vif->mmap_pages[pending_idx] = NULL;
+ spin_lock_irqsave(&vif->response_lock, flags);
+ make_tx_response(vif, &pending_tx_info->req, status);
+ index = pending_index(vif->pending_prod);
+ vif->pending_ring[index] = pending_idx;
+ /* TX shouldn't use the index before we give it back here */
+ mb();
+ vif->pending_prod++;
+ spin_unlock_irqrestore(&vif->response_lock, flags);
}
@@ -1466,23 +1714,54 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
return resp;
}
+void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
+{
+ int ret;
+ struct gnttab_unmap_grant_ref tx_unmap_op;
+
+ gnttab_set_unmap_op(&tx_unmap_op,
+ idx_to_kaddr(vif, pending_idx),
+ GNTMAP_host_map,
+ vif->grant_tx_handle[pending_idx]);
+ xenvif_grant_handle_reset(vif, pending_idx);
+
+ ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
+ &vif->mmap_pages[pending_idx], 1);
+ if (ret) {
+ netdev_err(vif->dev,
+ "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
+ ret,
+ pending_idx,
+ tx_unmap_op.host_addr,
+ tx_unmap_op.handle,
+ tx_unmap_op.status);
+ BUG();
+ }
+
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
+}
+
static inline int rx_work_todo(struct xenvif *vif)
{
- return !skb_queue_empty(&vif->rx_queue) &&
- xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
+ return (!skb_queue_empty(&vif->rx_queue) &&
+ xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
+ vif->rx_queue_purge;
}
static inline int tx_work_todo(struct xenvif *vif)
{
- if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
- (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
- < MAX_PENDING_REQS))
+ if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
return 1;
return 0;
}
+static inline bool tx_dealloc_work_todo(struct xenvif *vif)
+{
+ return vif->dealloc_cons != vif->dealloc_prod;
+}
+
void xenvif_unmap_frontend_rings(struct xenvif *vif)
{
if (vif->tx.sring)
@@ -1540,7 +1819,7 @@ static void xenvif_start_queue(struct xenvif *vif)
netif_wake_queue(vif->dev);
}
-int xenvif_kthread(void *data)
+int xenvif_kthread_guest_rx(void *data)
{
struct xenvif *vif = data;
struct sk_buff *skb;
@@ -1548,16 +1827,34 @@ int xenvif_kthread(void *data)
while (!kthread_should_stop()) {
wait_event_interruptible(vif->wq,
rx_work_todo(vif) ||
+ vif->disabled ||
kthread_should_stop());
+
+ /* This frontend is found to be rogue, disable it in
+ * kthread context. Currently this is only set when
+ * netback finds out frontend sends malformed packet,
+ * but we cannot disable the interface in softirq
+ * context so we defer it here.
+ */
+ if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
+ xenvif_carrier_off(vif);
+
if (kthread_should_stop())
break;
+ if (vif->rx_queue_purge) {
+ skb_queue_purge(&vif->rx_queue);
+ vif->rx_queue_purge = false;
+ }
+
if (!skb_queue_empty(&vif->rx_queue))
xenvif_rx_action(vif);
if (skb_queue_empty(&vif->rx_queue) &&
- netif_queue_stopped(vif->dev))
+ netif_queue_stopped(vif->dev)) {
+ del_timer_sync(&vif->wake_queue);
xenvif_start_queue(vif);
+ }
cond_resched();
}
@@ -1569,6 +1866,28 @@ int xenvif_kthread(void *data)
return 0;
}
+int xenvif_dealloc_kthread(void *data)
+{
+ struct xenvif *vif = data;
+
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(vif->dealloc_wq,
+ tx_dealloc_work_todo(vif) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ xenvif_tx_dealloc_action(vif);
+ cond_resched();
+ }
+
+ /* Unmap anything remaining*/
+ if (tx_dealloc_work_todo(vif))
+ xenvif_tx_dealloc_action(vif);
+
+ return 0;
+}
+
static int __init netback_init(void)
{
int rc = 0;
@@ -1586,6 +1905,8 @@ static int __init netback_init(void)
if (rc)
goto failed_init;
+ rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
+
return 0;
failed_init:
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e30d80033cbc..057b05700f8b 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -658,7 +658,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
drop:
dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1060,13 +1060,13 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_bh(&stats->syncp);
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
rx_packets = stats->rx_packets;
tx_packets = stats->tx_packets;
rx_bytes = stats->rx_bytes;
tx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;
@@ -1282,16 +1282,10 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
np->rx_refill_timer.function = rx_refill_timeout;
err = -ENOMEM;
- np->stats = alloc_percpu(struct netfront_stats);
+ np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
if (np->stats == NULL)
goto exit;
- for_each_possible_cpu(i) {
- struct netfront_stats *xen_nf_stats;
- xen_nf_stats = per_cpu_ptr(np->stats, i);
- u64_stats_init(&xen_nf_stats->syncp);
- }
-
/* Initialise tx_skbs as a free chain containing every entry. */
np->tx_skb_freelist = 0;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index fe20e1cc0545..65d4ca19d132 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -26,6 +26,18 @@ config NFC_WILINK
Say Y here to compile support for Texas Instrument's NFC WiLink driver
into the kernel or say M to compile it as module.
+config NFC_TRF7970A
+ tristate "Texas Instruments TRF7970a NFC driver"
+ depends on SPI && NFC_DIGITAL
+ help
+ This option enables the NFC driver for Texas Instruments' TRF7970a
+ device. Such device supports 5 different protocols: ISO14443A,
+ ISO14443B, FeLiCa, ISO15693 and ISO18000-3.
+
+ Say Y here to compile support for TRF7970a into the kernel or
+ say M to compile it as a module. The module will be called
+ trf7970a.ko.
+
config NFC_MEI_PHY
tristate "MEI bus NFC device support"
depends on INTEL_MEI && NFC_HCI
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 56ab822ba03d..ae42a3fa60c9 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o
obj-$(CONFIG_NFC_SIM) += nfcsim.o
obj-$(CONFIG_NFC_PORT100) += port100.o
obj-$(CONFIG_NFC_MRVL) += nfcmrvl/
+obj-$(CONFIG_NFC_TRF7970A) += trf7970a.o
ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index cf1a87bb74f8..d46a700a9637 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -55,26 +55,14 @@
NFC_PROTO_NFC_DEP_MASK)
static const struct usb_device_id pn533_table[] = {
- { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = PN533_VENDOR_ID,
- .idProduct = PN533_PRODUCT_ID,
- .driver_info = PN533_DEVICE_STD,
- },
- { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = SCM_VENDOR_ID,
- .idProduct = SCL3711_PRODUCT_ID,
- .driver_info = PN533_DEVICE_STD,
- },
- { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = SONY_VENDOR_ID,
- .idProduct = PASORI_PRODUCT_ID,
- .driver_info = PN533_DEVICE_PASORI,
- },
- { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = ACS_VENDOR_ID,
- .idProduct = ACR122U_PRODUCT_ID,
- .driver_info = PN533_DEVICE_ACR122U,
- },
+ { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID),
+ .driver_info = PN533_DEVICE_STD },
+ { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID),
+ .driver_info = PN533_DEVICE_STD },
+ { USB_DEVICE(SONY_VENDOR_ID, PASORI_PRODUCT_ID),
+ .driver_info = PN533_DEVICE_PASORI },
+ { USB_DEVICE(ACS_VENDOR_ID, ACR122U_PRODUCT_ID),
+ .driver_info = PN533_DEVICE_ACR122U },
{ }
};
MODULE_DEVICE_TABLE(usb, pn533_table);
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index d6185ff2f87b..f2acd85be86e 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -58,8 +58,19 @@ MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
#define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c"
+/*
+ * Exposed through the 4 most significant bytes
+ * from the HCI SW_VERSION first byte, a.k.a.
+ * SW RomLib.
+ */
+#define PN544_HW_VARIANT_C2 0xa
+#define PN544_HW_VARIANT_C3 0xb
+
+#define PN544_FW_CMD_RESET 0x01
#define PN544_FW_CMD_WRITE 0x08
#define PN544_FW_CMD_CHECK 0x06
+#define PN544_FW_CMD_SECURE_WRITE 0x0C
+#define PN544_FW_CMD_SECURE_CHUNK_WRITE 0x0D
struct pn544_i2c_fw_frame_write {
u8 cmd;
@@ -88,13 +99,31 @@ struct pn544_i2c_fw_blob {
u8 data[];
};
+struct pn544_i2c_fw_secure_frame {
+ u8 cmd;
+ u16 be_datalen;
+ u8 data[];
+} __packed;
+
+struct pn544_i2c_fw_secure_blob {
+ u64 header;
+ u8 data[];
+};
+
#define PN544_FW_CMD_RESULT_TIMEOUT 0x01
#define PN544_FW_CMD_RESULT_BAD_CRC 0x02
#define PN544_FW_CMD_RESULT_ACCESS_DENIED 0x08
#define PN544_FW_CMD_RESULT_PROTOCOL_ERROR 0x0B
#define PN544_FW_CMD_RESULT_INVALID_PARAMETER 0x11
+#define PN544_FW_CMD_RESULT_UNSUPPORTED_COMMAND 0x13
#define PN544_FW_CMD_RESULT_INVALID_LENGTH 0x18
+#define PN544_FW_CMD_RESULT_CRYPTOGRAPHIC_ERROR 0x19
+#define PN544_FW_CMD_RESULT_VERSION_CONDITIONS_ERROR 0x1D
+#define PN544_FW_CMD_RESULT_MEMORY_ERROR 0x20
+#define PN544_FW_CMD_RESULT_CHUNK_OK 0x21
#define PN544_FW_CMD_RESULT_WRITE_FAILED 0x74
+#define PN544_FW_CMD_RESULT_COMMAND_REJECTED 0xE0
+#define PN544_FW_CMD_RESULT_CHUNK_ERROR 0xE6
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
@@ -104,11 +133,17 @@ struct pn544_i2c_fw_blob {
#define PN544_FW_I2C_WRITE_DATA_MAX_LEN MIN((PN544_FW_I2C_MAX_PAYLOAD -\
PN544_FW_I2C_WRITE_FRAME_HEADER_LEN),\
PN544_FW_WRITE_BUFFER_MAX_LEN)
+#define PN544_FW_SECURE_CHUNK_WRITE_HEADER_LEN 3
+#define PN544_FW_SECURE_CHUNK_WRITE_DATA_MAX_LEN (PN544_FW_I2C_MAX_PAYLOAD -\
+ PN544_FW_SECURE_CHUNK_WRITE_HEADER_LEN)
+#define PN544_FW_SECURE_FRAME_HEADER_LEN 3
+#define PN544_FW_SECURE_BLOB_HEADER_LEN 8
#define FW_WORK_STATE_IDLE 1
#define FW_WORK_STATE_START 2
#define FW_WORK_STATE_WAIT_WRITE_ANSWER 3
#define FW_WORK_STATE_WAIT_CHECK_ANSWER 4
+#define FW_WORK_STATE_WAIT_SECURE_WRITE_ANSWER 5
struct pn544_i2c_phy {
struct i2c_client *i2c_dev;
@@ -119,6 +154,8 @@ struct pn544_i2c_phy {
unsigned int gpio_fw;
unsigned int en_polarity;
+ u8 hw_variant;
+
struct work_struct fw_work;
int fw_work_state;
char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
@@ -127,6 +164,8 @@ struct pn544_i2c_phy {
size_t fw_blob_size;
const u8 *fw_blob_data;
size_t fw_written;
+ size_t fw_size;
+
int fw_cmd_result;
int powered;
@@ -390,6 +429,8 @@ static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
switch (response.status) {
case 0:
return 0;
+ case PN544_FW_CMD_RESULT_CHUNK_OK:
+ return response.status;
case PN544_FW_CMD_RESULT_TIMEOUT:
return -ETIMEDOUT;
case PN544_FW_CMD_RESULT_BAD_CRC:
@@ -400,9 +441,20 @@ static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
return -EPROTO;
case PN544_FW_CMD_RESULT_INVALID_PARAMETER:
return -EINVAL;
+ case PN544_FW_CMD_RESULT_UNSUPPORTED_COMMAND:
+ return -ENOTSUPP;
case PN544_FW_CMD_RESULT_INVALID_LENGTH:
return -EBADMSG;
+ case PN544_FW_CMD_RESULT_CRYPTOGRAPHIC_ERROR:
+ return -ENOKEY;
+ case PN544_FW_CMD_RESULT_VERSION_CONDITIONS_ERROR:
+ return -EINVAL;
+ case PN544_FW_CMD_RESULT_MEMORY_ERROR:
+ return -ENOMEM;
+ case PN544_FW_CMD_RESULT_COMMAND_REJECTED:
+ return -EACCES;
case PN544_FW_CMD_RESULT_WRITE_FAILED:
+ case PN544_FW_CMD_RESULT_CHUNK_ERROR:
return -EIO;
default:
return -EIO;
@@ -469,7 +521,8 @@ static struct nfc_phy_ops i2c_phy_ops = {
.disable = pn544_hci_i2c_disable,
};
-static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
+static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name,
+ u8 hw_variant)
{
struct pn544_i2c_phy *phy = phy_id;
@@ -477,6 +530,7 @@ static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
strcpy(phy->firmware_name, firmware_name);
+ phy->hw_variant = hw_variant;
phy->fw_work_state = FW_WORK_STATE_START;
schedule_work(&phy->fw_work);
@@ -598,12 +652,93 @@ static int pn544_hci_i2c_fw_write_chunk(struct pn544_i2c_phy *phy)
return 0;
}
+static int pn544_hci_i2c_fw_secure_write_frame_cmd(struct pn544_i2c_phy *phy,
+ const u8 *data, u16 datalen)
+{
+ u8 buf[PN544_FW_I2C_MAX_PAYLOAD];
+ struct pn544_i2c_fw_secure_frame *chunk;
+ int chunklen;
+ int r;
+
+ if (datalen > PN544_FW_SECURE_CHUNK_WRITE_DATA_MAX_LEN)
+ datalen = PN544_FW_SECURE_CHUNK_WRITE_DATA_MAX_LEN;
+
+ chunk = (struct pn544_i2c_fw_secure_frame *) buf;
+
+ chunk->cmd = PN544_FW_CMD_SECURE_CHUNK_WRITE;
+
+ put_unaligned_be16(datalen, &chunk->be_datalen);
+
+ memcpy(chunk->data, data, datalen);
+
+ chunklen = sizeof(chunk->cmd) + sizeof(chunk->be_datalen) + datalen;
+
+ r = i2c_master_send(phy->i2c_dev, buf, chunklen);
+
+ if (r == chunklen)
+ return datalen;
+ else if (r < 0)
+ return r;
+ else
+ return -EIO;
+
+}
+
+static int pn544_hci_i2c_fw_secure_write_frame(struct pn544_i2c_phy *phy)
+{
+ struct pn544_i2c_fw_secure_frame *framep;
+ int r;
+
+ framep = (struct pn544_i2c_fw_secure_frame *) phy->fw_blob_data;
+ if (phy->fw_written == 0)
+ phy->fw_blob_size = get_unaligned_be16(&framep->be_datalen)
+ + PN544_FW_SECURE_FRAME_HEADER_LEN;
+
+ /* Only secure write command can be chunked*/
+ if (phy->fw_blob_size > PN544_FW_I2C_MAX_PAYLOAD &&
+ framep->cmd != PN544_FW_CMD_SECURE_WRITE)
+ return -EINVAL;
+
+ /* The firmware also have other commands, we just send them directly */
+ if (phy->fw_blob_size < PN544_FW_I2C_MAX_PAYLOAD) {
+ r = i2c_master_send(phy->i2c_dev,
+ (const char *) phy->fw_blob_data, phy->fw_blob_size);
+
+ if (r == phy->fw_blob_size)
+ goto exit;
+ else if (r < 0)
+ return r;
+ else
+ return -EIO;
+ }
+
+ r = pn544_hci_i2c_fw_secure_write_frame_cmd(phy,
+ phy->fw_blob_data + phy->fw_written,
+ phy->fw_blob_size - phy->fw_written);
+ if (r < 0)
+ return r;
+
+exit:
+ phy->fw_written += r;
+ phy->fw_work_state = FW_WORK_STATE_WAIT_SECURE_WRITE_ANSWER;
+
+ /* SW reset command will not trig any response from PN544 */
+ if (framep->cmd == PN544_FW_CMD_RESET) {
+ pn544_hci_i2c_enable_mode(phy, PN544_FW_MODE);
+ phy->fw_cmd_result = 0;
+ schedule_work(&phy->fw_work);
+ }
+
+ return 0;
+}
+
static void pn544_hci_i2c_fw_work(struct work_struct *work)
{
struct pn544_i2c_phy *phy = container_of(work, struct pn544_i2c_phy,
fw_work);
int r;
struct pn544_i2c_fw_blob *blob;
+ struct pn544_i2c_fw_secure_blob *secure_blob;
switch (phy->fw_work_state) {
case FW_WORK_STATE_START:
@@ -614,13 +749,29 @@ static void pn544_hci_i2c_fw_work(struct work_struct *work)
if (r < 0)
goto exit_state_start;
- blob = (struct pn544_i2c_fw_blob *) phy->fw->data;
- phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
- phy->fw_blob_dest_addr = get_unaligned_be32(&blob->be_destaddr);
- phy->fw_blob_data = blob->data;
-
phy->fw_written = 0;
- r = pn544_hci_i2c_fw_write_chunk(phy);
+
+ switch (phy->hw_variant) {
+ case PN544_HW_VARIANT_C2:
+ blob = (struct pn544_i2c_fw_blob *) phy->fw->data;
+ phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
+ phy->fw_blob_dest_addr = get_unaligned_be32(
+ &blob->be_destaddr);
+ phy->fw_blob_data = blob->data;
+
+ r = pn544_hci_i2c_fw_write_chunk(phy);
+ break;
+ case PN544_HW_VARIANT_C3:
+ secure_blob = (struct pn544_i2c_fw_secure_blob *)
+ phy->fw->data;
+ phy->fw_blob_data = secure_blob->data;
+ phy->fw_size = phy->fw->size;
+ r = pn544_hci_i2c_fw_secure_write_frame(phy);
+ break;
+ default:
+ r = -ENOTSUPP;
+ break;
+ }
exit_state_start:
if (r < 0)
@@ -672,6 +823,35 @@ exit_state_wait_check_answer:
pn544_hci_i2c_fw_work_complete(phy, r);
break;
+ case FW_WORK_STATE_WAIT_SECURE_WRITE_ANSWER:
+ r = phy->fw_cmd_result;
+ if (r < 0)
+ goto exit_state_wait_secure_write_answer;
+
+ if (r == PN544_FW_CMD_RESULT_CHUNK_OK) {
+ r = pn544_hci_i2c_fw_secure_write_frame(phy);
+ goto exit_state_wait_secure_write_answer;
+ }
+
+ if (phy->fw_written == phy->fw_blob_size) {
+ secure_blob = (struct pn544_i2c_fw_secure_blob *)
+ (phy->fw_blob_data + phy->fw_blob_size);
+ phy->fw_size -= phy->fw_blob_size +
+ PN544_FW_SECURE_BLOB_HEADER_LEN;
+ if (phy->fw_size >= PN544_FW_SECURE_BLOB_HEADER_LEN
+ + PN544_FW_SECURE_FRAME_HEADER_LEN) {
+ phy->fw_blob_data = secure_blob->data;
+
+ phy->fw_written = 0;
+ r = pn544_hci_i2c_fw_secure_write_frame(phy);
+ }
+ }
+
+exit_state_wait_secure_write_answer:
+ if (r < 0 || phy->fw_size == 0)
+ pn544_hci_i2c_fw_work_complete(phy, r);
+ break;
+
default:
break;
}
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 3df4a109cfad..9c8051d20cea 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -786,7 +786,7 @@ static int pn544_hci_fw_download(struct nfc_hci_dev *hdev,
if (info->fw_download == NULL)
return -ENOTSUPP;
- return info->fw_download(info->phy_id, firmware_name);
+ return info->fw_download(info->phy_id, firmware_name, hdev->sw_romlib);
}
static int pn544_hci_discover_se(struct nfc_hci_dev *hdev)
diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h
index 491bf45da358..2aa9233e8086 100644
--- a/drivers/nfc/pn544/pn544.h
+++ b/drivers/nfc/pn544/pn544.h
@@ -25,7 +25,8 @@
#define PN544_HCI_MODE 0
#define PN544_FW_MODE 1
-typedef int (*fw_download_t)(void *context, const char *firmware_name);
+typedef int (*fw_download_t)(void *context, const char *firmware_name,
+ u8 hw_variant);
int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
int phy_headroom, int phy_tailroom, int phy_payload,
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index a8555f81cbba..b7a372af5eb7 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -27,7 +27,8 @@
#define PORT100_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
NFC_PROTO_MIFARE_MASK | \
NFC_PROTO_FELICA_MASK | \
- NFC_PROTO_NFC_DEP_MASK)
+ NFC_PROTO_NFC_DEP_MASK | \
+ NFC_PROTO_ISO14443_MASK)
#define PORT100_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \
NFC_DIGITAL_DRV_CAPS_TG_CRC)
@@ -139,6 +140,8 @@ static const struct port100_in_rf_setting in_rf_settings[] = {
.in_recv_set_number = 15,
.in_recv_comm_type = PORT100_COMM_TYPE_IN_106A,
},
+ /* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */
+ [NFC_DIGITAL_RF_TECH_LAST] = { 0 },
};
/**
@@ -174,6 +177,9 @@ static const struct port100_tg_rf_setting tg_rf_settings[] = {
.tg_set_number = 8,
.tg_comm_type = PORT100_COMM_TYPE_TG_424F,
},
+ /* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */
+ [NFC_DIGITAL_RF_TECH_LAST] = { 0 },
+
};
#define PORT100_IN_PROT_INITIAL_GUARD_TIME 0x00
@@ -293,6 +299,10 @@ in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
{ PORT100_IN_PROT_CHECK_CRC, 0 },
{ PORT100_IN_PROT_END, 0 },
},
+ [NFC_DIGITAL_FRAMING_NFCA_T4T] = {
+ /* nfc_digital_framing_nfca_standard_with_crc_a */
+ { PORT100_IN_PROT_END, 0 },
+ },
[NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = {
/* nfc_digital_framing_nfca_standard */
{ PORT100_IN_PROT_END, 0 },
@@ -330,6 +340,10 @@ in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
[NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
{ PORT100_IN_PROT_END, 0 },
},
+ /* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */
+ [NFC_DIGITAL_FRAMING_LAST] = {
+ { PORT100_IN_PROT_END, 0 },
+ },
};
static struct port100_protocol
@@ -371,6 +385,10 @@ tg_protocols[][PORT100_TG_MAX_NUM_PROTOCOLS + 1] = {
{ PORT100_TG_PROT_RF_OFF, 1 },
{ PORT100_TG_PROT_END, 0 },
},
+ /* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */
+ [NFC_DIGITAL_FRAMING_LAST] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
};
struct port100 {
@@ -1356,10 +1374,7 @@ static struct nfc_digital_ops port100_digital_ops = {
};
static const struct usb_device_id port100_table[] = {
- { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = SONY_VENDOR_ID,
- .idProduct = RCS380_PRODUCT_ID,
- },
+ { USB_DEVICE(SONY_VENDOR_ID, RCS380_PRODUCT_ID), },
{ }
};
MODULE_DEVICE_TABLE(usb, port100_table);
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
new file mode 100644
index 000000000000..d9babe986473
--- /dev/null
+++ b/drivers/nfc/trf7970a.c
@@ -0,0 +1,1370 @@
+/*
+ * TI TRF7970a RFID/NFC Transceiver Driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Erick Macias <emacias@ti.com>
+ * Author: Felipe Balbi <balbi@ti.com>
+ * Author: Mark A. Greer <mgreer@animalcreek.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/nfc.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+
+#include <net/nfc/nfc.h>
+#include <net/nfc/digital.h>
+
+/* There are 3 ways the host can communicate with the trf7970a:
+ * parallel mode, SPI with Slave Select (SS) mode, and SPI without
+ * SS mode. The driver only supports the two SPI modes.
+ *
+ * The trf7970a is very timing sensitive and the VIN, EN2, and EN
+ * pins must asserted in that order and with specific delays in between.
+ * The delays used in the driver were provided by TI and have been
+ * confirmed to work with this driver.
+ *
+ * Timeouts are implemented using the delayed workqueue kernel facility.
+ * Timeouts are required so things don't hang when there is no response
+ * from the trf7970a (or tag). Using this mechanism creates a race with
+ * interrupts, however. That is, an interrupt and a timeout could occur
+ * closely enough together that one is blocked by the mutex while the other
+ * executes. When the timeout handler executes first and blocks the
+ * interrupt handler, it will eventually set the state to IDLE so the
+ * interrupt handler will check the state and exit with no harm done.
+ * When the interrupt handler executes first and blocks the timeout handler,
+ * the cancel_delayed_work() call will know that it didn't cancel the
+ * work item (i.e., timeout) and will return zero. That return code is
+ * used by the timer handler to indicate that it should ignore the timeout
+ * once its unblocked.
+ *
+ * Aborting an active command isn't as simple as it seems because the only
+ * way to abort a command that's already been sent to the tag is so turn
+ * off power to the tag. If we do that, though, we'd have to go through
+ * the entire anticollision procedure again but the digital layer doesn't
+ * support that. So, if an abort is received before trf7970a_in_send_cmd()
+ * has sent the command to the tag, it simply returns -ECANCELED. If the
+ * command has already been sent to the tag, then the driver continues
+ * normally and recieves the response data (or error) but just before
+ * sending the data upstream, it frees the rx_skb and sends -ECANCELED
+ * upstream instead. If the command failed, that error will be sent
+ * upstream.
+ *
+ * When recieving data from a tag and the interrupt status register has
+ * only the SRX bit set, it means that all of the data has been received
+ * (once what's in the fifo has been read). However, depending on timing
+ * an interrupt status with only the SRX bit set may not be recived. In
+ * those cases, the timeout mechanism is used to wait 5 ms in case more
+ * data arrives. After 5 ms, it is assumed that all of the data has been
+ * received and the accumulated rx data is sent upstream. The
+ * 'TRF7970A_ST_WAIT_FOR_RX_DATA_CONT' state is used for this purpose
+ * (i.e., it indicates that some data has been received but we're not sure
+ * if there is more coming so a timeout in this state means all data has
+ * been received and there isn't an error). The delay is 5 ms since delays
+ * over 2 ms have been observed during testing (a little extra just in case).
+ *
+ * Type 2 write and sector select commands respond with a 4-bit ACK or NACK.
+ * Having only 4 bits in the FIFO won't normally generate an interrupt so
+ * driver enables the '4_bit_RX' bit of the Special Functions register 1
+ * to cause an interrupt in that case. Leaving that bit for a read command
+ * messes up the data returned so it is only enabled when the framing is
+ * 'NFC_DIGITAL_FRAMING_NFCA_T2T' and the command is not a read command.
+ * Unfortunately, that means that the driver has to peek into tx frames
+ * when the framing is 'NFC_DIGITAL_FRAMING_NFCA_T2T'. This is done by
+ * the trf7970a_per_cmd_config() routine.
+ *
+ * ISO/IEC 15693 frames specify whether to use single or double sub-carrier
+ * frequencies and whether to use low or high data rates in the flags byte
+ * of the frame. This means that the driver has to peek at all 15693 frames
+ * to determine what speed to set the communication to. In addition, write
+ * and lock commands use the OPTION flag to indicate that an EOF must be
+ * sent to the tag before it will send its response. So the driver has to
+ * examine all frames for that reason too.
+ *
+ * It is unclear how long to wait before sending the EOF. According to the
+ * Note under Table 1-1 in section 1.6 of
+ * http://www.ti.com/lit/ug/scbu011/scbu011.pdf, that wait should be at least
+ * 10 ms for TI Tag-it HF-I tags; however testing has shown that is not long
+ * enough. For this reason, the driver waits 20 ms which seems to work
+ * reliably.
+ */
+
+#define TRF7970A_SUPPORTED_PROTOCOLS \
+ (NFC_PROTO_MIFARE_MASK | NFC_PROTO_ISO14443_MASK | \
+ NFC_PROTO_ISO15693_MASK)
+
+/* TX data must be prefixed with a FIFO reset cmd, a cmd that depends
+ * on what the current framing is, the address of the TX length byte 1
+ * register (0x1d), and the 2 byte length of the data to be transmitted.
+ * That totals 5 bytes.
+ */
+#define TRF7970A_TX_SKB_HEADROOM 5
+
+#define TRF7970A_RX_SKB_ALLOC_SIZE 256
+
+#define TRF7970A_FIFO_SIZE 128
+
+/* TX length is 3 nibbles long ==> 4KB - 1 bytes max */
+#define TRF7970A_TX_MAX (4096 - 1)
+
+#define TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT 5
+#define TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT 3
+#define TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF 20
+
+/* Quirks */
+/* Erratum: When reading IRQ Status register on trf7970a, we must issue a
+ * read continuous command for IRQ Status and Collision Position registers.
+ */
+#define TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA BIT(0)
+
+/* Direct commands */
+#define TRF7970A_CMD_IDLE 0x00
+#define TRF7970A_CMD_SOFT_INIT 0x03
+#define TRF7970A_CMD_RF_COLLISION 0x04
+#define TRF7970A_CMD_RF_COLLISION_RESPONSE_N 0x05
+#define TRF7970A_CMD_RF_COLLISION_RESPONSE_0 0x06
+#define TRF7970A_CMD_FIFO_RESET 0x0f
+#define TRF7970A_CMD_TRANSMIT_NO_CRC 0x10
+#define TRF7970A_CMD_TRANSMIT 0x11
+#define TRF7970A_CMD_DELAY_TRANSMIT_NO_CRC 0x12
+#define TRF7970A_CMD_DELAY_TRANSMIT 0x13
+#define TRF7970A_CMD_EOF 0x14
+#define TRF7970A_CMD_CLOSE_SLOT 0x15
+#define TRF7970A_CMD_BLOCK_RX 0x16
+#define TRF7970A_CMD_ENABLE_RX 0x17
+#define TRF7970A_CMD_TEST_EXT_RF 0x18
+#define TRF7970A_CMD_TEST_INT_RF 0x19
+#define TRF7970A_CMD_RX_GAIN_ADJUST 0x1a
+
+/* Bits determining whether its a direct command or register R/W,
+ * whether to use a continuous SPI transaction or not, and the actual
+ * direct cmd opcode or regster address.
+ */
+#define TRF7970A_CMD_BIT_CTRL BIT(7)
+#define TRF7970A_CMD_BIT_RW BIT(6)
+#define TRF7970A_CMD_BIT_CONTINUOUS BIT(5)
+#define TRF7970A_CMD_BIT_OPCODE(opcode) ((opcode) & 0x1f)
+
+/* Registers addresses */
+#define TRF7970A_CHIP_STATUS_CTRL 0x00
+#define TRF7970A_ISO_CTRL 0x01
+#define TRF7970A_ISO14443B_TX_OPTIONS 0x02
+#define TRF7970A_ISO14443A_HIGH_BITRATE_OPTIONS 0x03
+#define TRF7970A_TX_TIMER_SETTING_H_BYTE 0x04
+#define TRF7970A_TX_TIMER_SETTING_L_BYTE 0x05
+#define TRF7970A_TX_PULSE_LENGTH_CTRL 0x06
+#define TRF7970A_RX_NO_RESPONSE_WAIT 0x07
+#define TRF7970A_RX_WAIT_TIME 0x08
+#define TRF7970A_MODULATOR_SYS_CLK_CTRL 0x09
+#define TRF7970A_RX_SPECIAL_SETTINGS 0x0a
+#define TRF7970A_REG_IO_CTRL 0x0b
+#define TRF7970A_IRQ_STATUS 0x0c
+#define TRF7970A_COLLISION_IRQ_MASK 0x0d
+#define TRF7970A_COLLISION_POSITION 0x0e
+#define TRF7970A_RSSI_OSC_STATUS 0x0f
+#define TRF7970A_SPECIAL_FCN_REG1 0x10
+#define TRF7970A_SPECIAL_FCN_REG2 0x11
+#define TRF7970A_RAM1 0x12
+#define TRF7970A_RAM2 0x13
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS 0x14
+#define TRF7970A_NFC_LOW_FIELD_LEVEL 0x16
+#define TRF7970A_NFCID1 0x17
+#define TRF7970A_NFC_TARGET_LEVEL 0x18
+#define TRF79070A_NFC_TARGET_PROTOCOL 0x19
+#define TRF7970A_TEST_REGISTER1 0x1a
+#define TRF7970A_TEST_REGISTER2 0x1b
+#define TRF7970A_FIFO_STATUS 0x1c
+#define TRF7970A_TX_LENGTH_BYTE1 0x1d
+#define TRF7970A_TX_LENGTH_BYTE2 0x1e
+#define TRF7970A_FIFO_IO_REGISTER 0x1f
+
+/* Chip Status Control Register Bits */
+#define TRF7970A_CHIP_STATUS_VRS5_3 BIT(0)
+#define TRF7970A_CHIP_STATUS_REC_ON BIT(1)
+#define TRF7970A_CHIP_STATUS_AGC_ON BIT(2)
+#define TRF7970A_CHIP_STATUS_PM_ON BIT(3)
+#define TRF7970A_CHIP_STATUS_RF_PWR BIT(4)
+#define TRF7970A_CHIP_STATUS_RF_ON BIT(5)
+#define TRF7970A_CHIP_STATUS_DIRECT BIT(6)
+#define TRF7970A_CHIP_STATUS_STBY BIT(7)
+
+/* ISO Control Register Bits */
+#define TRF7970A_ISO_CTRL_15693_SGL_1OF4_662 0x00
+#define TRF7970A_ISO_CTRL_15693_SGL_1OF256_662 0x01
+#define TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648 0x02
+#define TRF7970A_ISO_CTRL_15693_SGL_1OF256_2648 0x03
+#define TRF7970A_ISO_CTRL_15693_DBL_1OF4_667a 0x04
+#define TRF7970A_ISO_CTRL_15693_DBL_1OF256_667 0x05
+#define TRF7970A_ISO_CTRL_15693_DBL_1OF4_2669 0x06
+#define TRF7970A_ISO_CTRL_15693_DBL_1OF256_2669 0x07
+#define TRF7970A_ISO_CTRL_14443A_106 0x08
+#define TRF7970A_ISO_CTRL_14443A_212 0x09
+#define TRF7970A_ISO_CTRL_14443A_424 0x0a
+#define TRF7970A_ISO_CTRL_14443A_848 0x0b
+#define TRF7970A_ISO_CTRL_14443B_106 0x0c
+#define TRF7970A_ISO_CTRL_14443B_212 0x0d
+#define TRF7970A_ISO_CTRL_14443B_424 0x0e
+#define TRF7970A_ISO_CTRL_14443B_848 0x0f
+#define TRF7970A_ISO_CTRL_FELICA_212 0x1a
+#define TRF7970A_ISO_CTRL_FELICA_424 0x1b
+#define TRF7970A_ISO_CTRL_RFID BIT(5)
+#define TRF7970A_ISO_CTRL_DIR_MODE BIT(6)
+#define TRF7970A_ISO_CTRL_RX_CRC_N BIT(7) /* true == No CRC */
+
+#define TRF7970A_ISO_CTRL_RFID_SPEED_MASK 0x1f
+
+/* Modulator and SYS_CLK Control Register Bits */
+#define TRF7970A_MODULATOR_DEPTH(n) ((n) & 0x7)
+#define TRF7970A_MODULATOR_DEPTH_ASK10 (TRF7970A_MODULATOR_DEPTH(0))
+#define TRF7970A_MODULATOR_DEPTH_OOK (TRF7970A_MODULATOR_DEPTH(1))
+#define TRF7970A_MODULATOR_DEPTH_ASK7 (TRF7970A_MODULATOR_DEPTH(2))
+#define TRF7970A_MODULATOR_DEPTH_ASK8_5 (TRF7970A_MODULATOR_DEPTH(3))
+#define TRF7970A_MODULATOR_DEPTH_ASK13 (TRF7970A_MODULATOR_DEPTH(4))
+#define TRF7970A_MODULATOR_DEPTH_ASK16 (TRF7970A_MODULATOR_DEPTH(5))
+#define TRF7970A_MODULATOR_DEPTH_ASK22 (TRF7970A_MODULATOR_DEPTH(6))
+#define TRF7970A_MODULATOR_DEPTH_ASK30 (TRF7970A_MODULATOR_DEPTH(7))
+#define TRF7970A_MODULATOR_EN_ANA BIT(3)
+#define TRF7970A_MODULATOR_CLK(n) (((n) & 0x3) << 4)
+#define TRF7970A_MODULATOR_CLK_DISABLED (TRF7970A_MODULATOR_CLK(0))
+#define TRF7970A_MODULATOR_CLK_3_6 (TRF7970A_MODULATOR_CLK(1))
+#define TRF7970A_MODULATOR_CLK_6_13 (TRF7970A_MODULATOR_CLK(2))
+#define TRF7970A_MODULATOR_CLK_13_27 (TRF7970A_MODULATOR_CLK(3))
+#define TRF7970A_MODULATOR_EN_OOK BIT(6)
+#define TRF7970A_MODULATOR_27MHZ BIT(7)
+
+/* IRQ Status Register Bits */
+#define TRF7970A_IRQ_STATUS_NORESP BIT(0) /* ISO15693 only */
+#define TRF7970A_IRQ_STATUS_COL BIT(1)
+#define TRF7970A_IRQ_STATUS_FRAMING_EOF_ERROR BIT(2)
+#define TRF7970A_IRQ_STATUS_PARITY_ERROR BIT(3)
+#define TRF7970A_IRQ_STATUS_CRC_ERROR BIT(4)
+#define TRF7970A_IRQ_STATUS_FIFO BIT(5)
+#define TRF7970A_IRQ_STATUS_SRX BIT(6)
+#define TRF7970A_IRQ_STATUS_TX BIT(7)
+
+#define TRF7970A_IRQ_STATUS_ERROR \
+ (TRF7970A_IRQ_STATUS_COL | \
+ TRF7970A_IRQ_STATUS_FRAMING_EOF_ERROR | \
+ TRF7970A_IRQ_STATUS_PARITY_ERROR | \
+ TRF7970A_IRQ_STATUS_CRC_ERROR)
+
+#define TRF7970A_SPECIAL_FCN_REG1_COL_7_6 BIT(0)
+#define TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL BIT(1)
+#define TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX BIT(2)
+#define TRF7970A_SPECIAL_FCN_REG1_SP_DIR_MODE BIT(3)
+#define TRF7970A_SPECIAL_FCN_REG1_NEXT_SLOT_37US BIT(4)
+#define TRF7970A_SPECIAL_FCN_REG1_PAR43 BIT(5)
+
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_124 (0x0 << 2)
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_120 (0x1 << 2)
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_112 (0x2 << 2)
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_96 (0x3 << 2)
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_4 0x0
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_8 0x1
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_16 0x2
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32 0x3
+
+#define TRF7970A_FIFO_STATUS_OVERFLOW BIT(7)
+
+/* NFC (ISO/IEC 14443A) Type 2 Tag commands */
+#define NFC_T2T_CMD_READ 0x30
+
+/* ISO 15693 commands codes */
+#define ISO15693_CMD_INVENTORY 0x01
+#define ISO15693_CMD_READ_SINGLE_BLOCK 0x20
+#define ISO15693_CMD_WRITE_SINGLE_BLOCK 0x21
+#define ISO15693_CMD_LOCK_BLOCK 0x22
+#define ISO15693_CMD_READ_MULTIPLE_BLOCK 0x23
+#define ISO15693_CMD_WRITE_MULTIPLE_BLOCK 0x24
+#define ISO15693_CMD_SELECT 0x25
+#define ISO15693_CMD_RESET_TO_READY 0x26
+#define ISO15693_CMD_WRITE_AFI 0x27
+#define ISO15693_CMD_LOCK_AFI 0x28
+#define ISO15693_CMD_WRITE_DSFID 0x29
+#define ISO15693_CMD_LOCK_DSFID 0x2a
+#define ISO15693_CMD_GET_SYSTEM_INFO 0x2b
+#define ISO15693_CMD_GET_MULTIPLE_BLOCK_SECURITY_STATUS 0x2c
+
+/* ISO 15693 request and response flags */
+#define ISO15693_REQ_FLAG_SUB_CARRIER BIT(0)
+#define ISO15693_REQ_FLAG_DATA_RATE BIT(1)
+#define ISO15693_REQ_FLAG_INVENTORY BIT(2)
+#define ISO15693_REQ_FLAG_PROTOCOL_EXT BIT(3)
+#define ISO15693_REQ_FLAG_SELECT BIT(4)
+#define ISO15693_REQ_FLAG_AFI BIT(4)
+#define ISO15693_REQ_FLAG_ADDRESS BIT(5)
+#define ISO15693_REQ_FLAG_NB_SLOTS BIT(5)
+#define ISO15693_REQ_FLAG_OPTION BIT(6)
+
+#define ISO15693_REQ_FLAG_SPEED_MASK \
+ (ISO15693_REQ_FLAG_SUB_CARRIER | ISO15693_REQ_FLAG_DATA_RATE)
+
+enum trf7970a_state {
+ TRF7970A_ST_OFF,
+ TRF7970A_ST_IDLE,
+ TRF7970A_ST_IDLE_RX_BLOCKED,
+ TRF7970A_ST_WAIT_FOR_TX_FIFO,
+ TRF7970A_ST_WAIT_FOR_RX_DATA,
+ TRF7970A_ST_WAIT_FOR_RX_DATA_CONT,
+ TRF7970A_ST_WAIT_TO_ISSUE_EOF,
+ TRF7970A_ST_MAX
+};
+
+struct trf7970a {
+ enum trf7970a_state state;
+ struct device *dev;
+ struct spi_device *spi;
+ struct regulator *regulator;
+ struct nfc_digital_dev *ddev;
+ u32 quirks;
+ bool powering_up;
+ bool aborting;
+ struct sk_buff *tx_skb;
+ struct sk_buff *rx_skb;
+ nfc_digital_cmd_complete_t cb;
+ void *cb_arg;
+ u8 iso_ctrl;
+ u8 special_fcn_reg1;
+ int technology;
+ int framing;
+ u8 tx_cmd;
+ bool issue_eof;
+ int en2_gpio;
+ int en_gpio;
+ struct mutex lock;
+ unsigned int timeout;
+ bool ignore_timeout;
+ struct delayed_work timeout_work;
+};
+
+
+static int trf7970a_cmd(struct trf7970a *trf, u8 opcode)
+{
+ u8 cmd = TRF7970A_CMD_BIT_CTRL | TRF7970A_CMD_BIT_OPCODE(opcode);
+ int ret;
+
+ dev_dbg(trf->dev, "cmd: 0x%x\n", cmd);
+
+ ret = spi_write(trf->spi, &cmd, 1);
+ if (ret)
+ dev_err(trf->dev, "%s - cmd: 0x%x, ret: %d\n", __func__, cmd,
+ ret);
+ return ret;
+}
+
+static int trf7970a_read(struct trf7970a *trf, u8 reg, u8 *val)
+{
+ u8 addr = TRF7970A_CMD_BIT_RW | reg;
+ int ret;
+
+ ret = spi_write_then_read(trf->spi, &addr, 1, val, 1);
+ if (ret)
+ dev_err(trf->dev, "%s - addr: 0x%x, ret: %d\n", __func__, addr,
+ ret);
+
+ dev_dbg(trf->dev, "read(0x%x): 0x%x\n", addr, *val);
+
+ return ret;
+}
+
+static int trf7970a_read_cont(struct trf7970a *trf, u8 reg,
+ u8 *buf, size_t len)
+{
+ u8 addr = reg | TRF7970A_CMD_BIT_RW | TRF7970A_CMD_BIT_CONTINUOUS;
+ int ret;
+
+ dev_dbg(trf->dev, "read_cont(0x%x, %zd)\n", addr, len);
+
+ ret = spi_write_then_read(trf->spi, &addr, 1, buf, len);
+ if (ret)
+ dev_err(trf->dev, "%s - addr: 0x%x, ret: %d\n", __func__, addr,
+ ret);
+ return ret;
+}
+
+static int trf7970a_write(struct trf7970a *trf, u8 reg, u8 val)
+{
+ u8 buf[2] = { reg, val };
+ int ret;
+
+ dev_dbg(trf->dev, "write(0x%x): 0x%x\n", reg, val);
+
+ ret = spi_write(trf->spi, buf, 2);
+ if (ret)
+ dev_err(trf->dev, "%s - write: 0x%x 0x%x, ret: %d\n", __func__,
+ buf[0], buf[1], ret);
+
+ return ret;
+}
+
+static int trf7970a_read_irqstatus(struct trf7970a *trf, u8 *status)
+{
+ int ret;
+ u8 buf[2];
+ u8 addr;
+
+ addr = TRF7970A_IRQ_STATUS | TRF7970A_CMD_BIT_RW;
+
+ if (trf->quirks & TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA) {
+ addr |= TRF7970A_CMD_BIT_CONTINUOUS;
+ ret = spi_write_then_read(trf->spi, &addr, 1, buf, 2);
+ } else {
+ ret = spi_write_then_read(trf->spi, &addr, 1, buf, 1);
+ }
+
+ if (ret)
+ dev_err(trf->dev, "%s - irqstatus: Status read failed: %d\n",
+ __func__, ret);
+ else
+ *status = buf[0];
+
+ return ret;
+}
+
+static void trf7970a_send_upstream(struct trf7970a *trf)
+{
+ u8 rssi;
+
+ dev_kfree_skb_any(trf->tx_skb);
+ trf->tx_skb = NULL;
+
+ if (trf->rx_skb && !IS_ERR(trf->rx_skb) && !trf->aborting)
+ print_hex_dump_debug("trf7970a rx data: ", DUMP_PREFIX_NONE,
+ 16, 1, trf->rx_skb->data, trf->rx_skb->len,
+ false);
+
+ /* According to the manual it is "good form" to reset the fifo and
+ * read the RSSI levels & oscillator status register here. It doesn't
+ * explain why.
+ */
+ trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
+ trf7970a_read(trf, TRF7970A_RSSI_OSC_STATUS, &rssi);
+
+ trf->state = TRF7970A_ST_IDLE;
+
+ if (trf->aborting) {
+ dev_dbg(trf->dev, "Abort process complete\n");
+
+ if (!IS_ERR(trf->rx_skb)) {
+ kfree_skb(trf->rx_skb);
+ trf->rx_skb = ERR_PTR(-ECANCELED);
+ }
+
+ trf->aborting = false;
+ }
+
+ trf->cb(trf->ddev, trf->cb_arg, trf->rx_skb);
+
+ trf->rx_skb = NULL;
+}
+
+static void trf7970a_send_err_upstream(struct trf7970a *trf, int errno)
+{
+ dev_dbg(trf->dev, "Error - state: %d, errno: %d\n", trf->state, errno);
+
+ kfree_skb(trf->rx_skb);
+ trf->rx_skb = ERR_PTR(errno);
+
+ trf7970a_send_upstream(trf);
+}
+
+static int trf7970a_transmit(struct trf7970a *trf, struct sk_buff *skb,
+ unsigned int len)
+{
+ unsigned int timeout;
+ int ret;
+
+ print_hex_dump_debug("trf7970a tx data: ", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, len, false);
+
+ ret = spi_write(trf->spi, skb->data, len);
+ if (ret) {
+ dev_err(trf->dev, "%s - Can't send tx data: %d\n", __func__,
+ ret);
+ return ret;
+ }
+
+ skb_pull(skb, len);
+
+ if (skb->len > 0) {
+ trf->state = TRF7970A_ST_WAIT_FOR_TX_FIFO;
+ timeout = TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT;
+ } else {
+ if (trf->issue_eof) {
+ trf->state = TRF7970A_ST_WAIT_TO_ISSUE_EOF;
+ timeout = TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF;
+ } else {
+ trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA;
+ timeout = trf->timeout;
+ }
+ }
+
+ dev_dbg(trf->dev, "Setting timeout for %d ms, state: %d\n", timeout,
+ trf->state);
+
+ schedule_delayed_work(&trf->timeout_work, msecs_to_jiffies(timeout));
+
+ return 0;
+}
+
+static void trf7970a_fill_fifo(struct trf7970a *trf)
+{
+ struct sk_buff *skb = trf->tx_skb;
+ unsigned int len;
+ int ret;
+ u8 fifo_bytes;
+
+ ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes);
+ if (ret) {
+ trf7970a_send_err_upstream(trf, ret);
+ return;
+ }
+
+ dev_dbg(trf->dev, "Filling FIFO - fifo_bytes: 0x%x\n", fifo_bytes);
+
+ if (fifo_bytes & TRF7970A_FIFO_STATUS_OVERFLOW) {
+ dev_err(trf->dev, "%s - fifo overflow: 0x%x\n", __func__,
+ fifo_bytes);
+ trf7970a_send_err_upstream(trf, -EIO);
+ return;
+ }
+
+ /* Calculate how much more data can be written to the fifo */
+ len = TRF7970A_FIFO_SIZE - fifo_bytes;
+ len = min(skb->len, len);
+
+ ret = trf7970a_transmit(trf, skb, len);
+ if (ret)
+ trf7970a_send_err_upstream(trf, ret);
+}
+
+static void trf7970a_drain_fifo(struct trf7970a *trf, u8 status)
+{
+ struct sk_buff *skb = trf->rx_skb;
+ int ret;
+ u8 fifo_bytes;
+
+ if (status & TRF7970A_IRQ_STATUS_ERROR) {
+ trf7970a_send_err_upstream(trf, -EIO);
+ return;
+ }
+
+ ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes);
+ if (ret) {
+ trf7970a_send_err_upstream(trf, ret);
+ return;
+ }
+
+ dev_dbg(trf->dev, "Draining FIFO - fifo_bytes: 0x%x\n", fifo_bytes);
+
+ if (!fifo_bytes)
+ goto no_rx_data;
+
+ if (fifo_bytes & TRF7970A_FIFO_STATUS_OVERFLOW) {
+ dev_err(trf->dev, "%s - fifo overflow: 0x%x\n", __func__,
+ fifo_bytes);
+ trf7970a_send_err_upstream(trf, -EIO);
+ return;
+ }
+
+ if (fifo_bytes > skb_tailroom(skb)) {
+ skb = skb_copy_expand(skb, skb_headroom(skb),
+ max_t(int, fifo_bytes,
+ TRF7970A_RX_SKB_ALLOC_SIZE),
+ GFP_KERNEL);
+ if (!skb) {
+ trf7970a_send_err_upstream(trf, -ENOMEM);
+ return;
+ }
+
+ kfree_skb(trf->rx_skb);
+ trf->rx_skb = skb;
+ }
+
+ ret = trf7970a_read_cont(trf, TRF7970A_FIFO_IO_REGISTER,
+ skb_put(skb, fifo_bytes), fifo_bytes);
+ if (ret) {
+ trf7970a_send_err_upstream(trf, ret);
+ return;
+ }
+
+ /* If received Type 2 ACK/NACK, shift right 4 bits and pass up */
+ if ((trf->framing == NFC_DIGITAL_FRAMING_NFCA_T2T) && (skb->len == 1) &&
+ (trf->special_fcn_reg1 ==
+ TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX)) {
+ skb->data[0] >>= 4;
+ status = TRF7970A_IRQ_STATUS_SRX;
+ } else {
+ trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA_CONT;
+ }
+
+no_rx_data:
+ if (status == TRF7970A_IRQ_STATUS_SRX) { /* Receive complete */
+ trf7970a_send_upstream(trf);
+ return;
+ }
+
+ dev_dbg(trf->dev, "Setting timeout for %d ms\n",
+ TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT);
+
+ schedule_delayed_work(&trf->timeout_work,
+ msecs_to_jiffies(TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT));
+}
+
+static irqreturn_t trf7970a_irq(int irq, void *dev_id)
+{
+ struct trf7970a *trf = dev_id;
+ int ret;
+ u8 status;
+
+ mutex_lock(&trf->lock);
+
+ if (trf->state == TRF7970A_ST_OFF) {
+ mutex_unlock(&trf->lock);
+ return IRQ_NONE;
+ }
+
+ ret = trf7970a_read_irqstatus(trf, &status);
+ if (ret) {
+ mutex_unlock(&trf->lock);
+ return IRQ_NONE;
+ }
+
+ dev_dbg(trf->dev, "IRQ - state: %d, status: 0x%x\n", trf->state,
+ status);
+
+ if (!status) {
+ mutex_unlock(&trf->lock);
+ return IRQ_NONE;
+ }
+
+ switch (trf->state) {
+ case TRF7970A_ST_IDLE:
+ case TRF7970A_ST_IDLE_RX_BLOCKED:
+ /* If getting interrupts caused by RF noise, turn off the
+ * receiver to avoid unnecessary interrupts. It will be
+ * turned back on in trf7970a_in_send_cmd() when the next
+ * command is issued.
+ */
+ if (status & TRF7970A_IRQ_STATUS_ERROR) {
+ trf7970a_cmd(trf, TRF7970A_CMD_BLOCK_RX);
+ trf->state = TRF7970A_ST_IDLE_RX_BLOCKED;
+ }
+
+ trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
+ break;
+ case TRF7970A_ST_WAIT_FOR_TX_FIFO:
+ if (status & TRF7970A_IRQ_STATUS_TX) {
+ trf->ignore_timeout =
+ !cancel_delayed_work(&trf->timeout_work);
+ trf7970a_fill_fifo(trf);
+ } else {
+ trf7970a_send_err_upstream(trf, -EIO);
+ }
+ break;
+ case TRF7970A_ST_WAIT_FOR_RX_DATA:
+ case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
+ if (status & TRF7970A_IRQ_STATUS_SRX) {
+ trf->ignore_timeout =
+ !cancel_delayed_work(&trf->timeout_work);
+ trf7970a_drain_fifo(trf, status);
+ } else if (!(status & TRF7970A_IRQ_STATUS_TX)) {
+ trf7970a_send_err_upstream(trf, -EIO);
+ }
+ break;
+ case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
+ if (status != TRF7970A_IRQ_STATUS_TX)
+ trf7970a_send_err_upstream(trf, -EIO);
+ break;
+ default:
+ dev_err(trf->dev, "%s - Driver in invalid state: %d\n",
+ __func__, trf->state);
+ }
+
+ mutex_unlock(&trf->lock);
+ return IRQ_HANDLED;
+}
+
+static void trf7970a_issue_eof(struct trf7970a *trf)
+{
+ int ret;
+
+ dev_dbg(trf->dev, "Issuing EOF\n");
+
+ ret = trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
+ if (ret)
+ trf7970a_send_err_upstream(trf, ret);
+
+ ret = trf7970a_cmd(trf, TRF7970A_CMD_EOF);
+ if (ret)
+ trf7970a_send_err_upstream(trf, ret);
+
+ trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA;
+
+ dev_dbg(trf->dev, "Setting timeout for %d ms, state: %d\n",
+ trf->timeout, trf->state);
+
+ schedule_delayed_work(&trf->timeout_work,
+ msecs_to_jiffies(trf->timeout));
+}
+
+static void trf7970a_timeout_work_handler(struct work_struct *work)
+{
+ struct trf7970a *trf = container_of(work, struct trf7970a,
+ timeout_work.work);
+
+ dev_dbg(trf->dev, "Timeout - state: %d, ignore_timeout: %d\n",
+ trf->state, trf->ignore_timeout);
+
+ mutex_lock(&trf->lock);
+
+ if (trf->ignore_timeout)
+ trf->ignore_timeout = false;
+ else if (trf->state == TRF7970A_ST_WAIT_FOR_RX_DATA_CONT)
+ trf7970a_send_upstream(trf); /* No more rx data so send up */
+ else if (trf->state == TRF7970A_ST_WAIT_TO_ISSUE_EOF)
+ trf7970a_issue_eof(trf);
+ else
+ trf7970a_send_err_upstream(trf, -ETIMEDOUT);
+
+ mutex_unlock(&trf->lock);
+}
+
+static int trf7970a_init(struct trf7970a *trf)
+{
+ int ret;
+
+ dev_dbg(trf->dev, "Initializing device - state: %d\n", trf->state);
+
+ ret = trf7970a_cmd(trf, TRF7970A_CMD_SOFT_INIT);
+ if (ret)
+ goto err_out;
+
+ ret = trf7970a_cmd(trf, TRF7970A_CMD_IDLE);
+ if (ret)
+ goto err_out;
+
+ ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL,
+ TRF7970A_MODULATOR_DEPTH_OOK);
+ if (ret)
+ goto err_out;
+
+ ret = trf7970a_write(trf, TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS,
+ TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_96 |
+ TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32);
+ if (ret)
+ goto err_out;
+
+ ret = trf7970a_write(trf, TRF7970A_SPECIAL_FCN_REG1, 0);
+ if (ret)
+ goto err_out;
+
+ trf->special_fcn_reg1 = 0;
+
+ ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL,
+ TRF7970A_CHIP_STATUS_RF_ON |
+ TRF7970A_CHIP_STATUS_VRS5_3);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ dev_dbg(trf->dev, "Couldn't init device: %d\n", ret);
+ return ret;
+}
+
+static void trf7970a_switch_rf_off(struct trf7970a *trf)
+{
+ dev_dbg(trf->dev, "Switching rf off\n");
+
+ gpio_set_value(trf->en_gpio, 0);
+ gpio_set_value(trf->en2_gpio, 0);
+
+ trf->aborting = false;
+ trf->state = TRF7970A_ST_OFF;
+}
+
+static int trf7970a_switch_rf_on(struct trf7970a *trf)
+{
+ unsigned long delay;
+ int ret;
+
+ dev_dbg(trf->dev, "Switching rf on\n");
+
+ if (trf->powering_up)
+ usleep_range(5000, 6000);
+
+ gpio_set_value(trf->en2_gpio, 1);
+ usleep_range(1000, 2000);
+ gpio_set_value(trf->en_gpio, 1);
+
+ /* The delay between enabling the trf7970a and issuing the first
+ * command is significantly longer the very first time after powering
+ * up. Make sure the longer delay is only done the first time.
+ */
+ if (trf->powering_up) {
+ delay = 20000;
+ trf->powering_up = false;
+ } else {
+ delay = 5000;
+ }
+
+ usleep_range(delay, delay + 1000);
+
+ ret = trf7970a_init(trf);
+ if (ret)
+ trf7970a_switch_rf_off(trf);
+ else
+ trf->state = TRF7970A_ST_IDLE;
+
+ return ret;
+}
+
+static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+ int ret = 0;
+
+ dev_dbg(trf->dev, "Switching RF - state: %d, on: %d\n", trf->state, on);
+
+ mutex_lock(&trf->lock);
+
+ if (on) {
+ switch (trf->state) {
+ case TRF7970A_ST_OFF:
+ ret = trf7970a_switch_rf_on(trf);
+ break;
+ case TRF7970A_ST_IDLE:
+ case TRF7970A_ST_IDLE_RX_BLOCKED:
+ break;
+ default:
+ dev_err(trf->dev, "%s - Invalid request: %d %d\n",
+ __func__, trf->state, on);
+ trf7970a_switch_rf_off(trf);
+ }
+ } else {
+ switch (trf->state) {
+ case TRF7970A_ST_OFF:
+ break;
+ default:
+ dev_err(trf->dev, "%s - Invalid request: %d %d\n",
+ __func__, trf->state, on);
+ /* FALLTHROUGH */
+ case TRF7970A_ST_IDLE:
+ case TRF7970A_ST_IDLE_RX_BLOCKED:
+ trf7970a_switch_rf_off(trf);
+ }
+ }
+
+ mutex_unlock(&trf->lock);
+ return ret;
+}
+
+static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech)
+{
+ int ret = 0;
+
+ dev_dbg(trf->dev, "rf technology: %d\n", tech);
+
+ switch (tech) {
+ case NFC_DIGITAL_RF_TECH_106A:
+ trf->iso_ctrl = TRF7970A_ISO_CTRL_14443A_106;
+ break;
+ case NFC_DIGITAL_RF_TECH_ISO15693:
+ trf->iso_ctrl = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
+ break;
+ default:
+ dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech);
+ return -EINVAL;
+ }
+
+ trf->technology = tech;
+
+ return ret;
+}
+
+static int trf7970a_config_framing(struct trf7970a *trf, int framing)
+{
+ dev_dbg(trf->dev, "framing: %d\n", framing);
+
+ switch (framing) {
+ case NFC_DIGITAL_FRAMING_NFCA_SHORT:
+ case NFC_DIGITAL_FRAMING_NFCA_STANDARD:
+ trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC;
+ trf->iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
+ break;
+ case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A:
+ case NFC_DIGITAL_FRAMING_NFCA_T4T:
+ case NFC_DIGITAL_FRAMING_ISO15693_INVENTORY:
+ case NFC_DIGITAL_FRAMING_ISO15693_T5T:
+ trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
+ trf->iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N;
+ break;
+ case NFC_DIGITAL_FRAMING_NFCA_T2T:
+ trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
+ trf->iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
+ break;
+ default:
+ dev_dbg(trf->dev, "Unsupported Framing: %d\n", framing);
+ return -EINVAL;
+ }
+
+ trf->framing = framing;
+
+ return trf7970a_write(trf, TRF7970A_ISO_CTRL, trf->iso_ctrl);
+}
+
+static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type,
+ int param)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+ int ret = 0;
+
+ dev_dbg(trf->dev, "Configure hw - type: %d, param: %d\n", type, param);
+
+ mutex_lock(&trf->lock);
+
+ if (trf->state == TRF7970A_ST_OFF) {
+ ret = trf7970a_switch_rf_on(trf);
+ if (ret)
+ goto err_out;
+ }
+
+ switch (type) {
+ case NFC_DIGITAL_CONFIG_RF_TECH:
+ ret = trf7970a_config_rf_tech(trf, param);
+ break;
+ case NFC_DIGITAL_CONFIG_FRAMING:
+ ret = trf7970a_config_framing(trf, param);
+ break;
+ default:
+ dev_dbg(trf->dev, "Unknown type: %d\n", type);
+ ret = -EINVAL;
+ }
+
+err_out:
+ mutex_unlock(&trf->lock);
+ return ret;
+}
+
+static int trf7970a_is_iso15693_write_or_lock(u8 cmd)
+{
+ switch (cmd) {
+ case ISO15693_CMD_WRITE_SINGLE_BLOCK:
+ case ISO15693_CMD_LOCK_BLOCK:
+ case ISO15693_CMD_WRITE_MULTIPLE_BLOCK:
+ case ISO15693_CMD_WRITE_AFI:
+ case ISO15693_CMD_LOCK_AFI:
+ case ISO15693_CMD_WRITE_DSFID:
+ case ISO15693_CMD_LOCK_DSFID:
+ return 1;
+ break;
+ default:
+ return 0;
+ }
+}
+
+static int trf7970a_per_cmd_config(struct trf7970a *trf, struct sk_buff *skb)
+{
+ u8 *req = skb->data;
+ u8 special_fcn_reg1, iso_ctrl;
+ int ret;
+
+ trf->issue_eof = false;
+
+ /* When issuing Type 2 read command, make sure the '4_bit_RX' bit in
+ * special functions register 1 is cleared; otherwise, its a write or
+ * sector select command and '4_bit_RX' must be set.
+ *
+ * When issuing an ISO 15693 command, inspect the flags byte to see
+ * what speed to use. Also, remember if the OPTION flag is set on
+ * a Type 5 write or lock command so the driver will know that it
+ * has to send an EOF in order to get a response.
+ */
+ if ((trf->technology == NFC_DIGITAL_RF_TECH_106A) &&
+ (trf->framing == NFC_DIGITAL_FRAMING_NFCA_T2T)) {
+ if (req[0] == NFC_T2T_CMD_READ)
+ special_fcn_reg1 = 0;
+ else
+ special_fcn_reg1 = TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX;
+
+ if (special_fcn_reg1 != trf->special_fcn_reg1) {
+ ret = trf7970a_write(trf, TRF7970A_SPECIAL_FCN_REG1,
+ special_fcn_reg1);
+ if (ret)
+ return ret;
+
+ trf->special_fcn_reg1 = special_fcn_reg1;
+ }
+ } else if (trf->technology == NFC_DIGITAL_RF_TECH_ISO15693) {
+ iso_ctrl = trf->iso_ctrl & ~TRF7970A_ISO_CTRL_RFID_SPEED_MASK;
+
+ switch (req[0] & ISO15693_REQ_FLAG_SPEED_MASK) {
+ case 0x00:
+ iso_ctrl |= TRF7970A_ISO_CTRL_15693_SGL_1OF4_662;
+ break;
+ case ISO15693_REQ_FLAG_SUB_CARRIER:
+ iso_ctrl |= TRF7970A_ISO_CTRL_15693_DBL_1OF4_667a;
+ break;
+ case ISO15693_REQ_FLAG_DATA_RATE:
+ iso_ctrl |= TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
+ break;
+ case (ISO15693_REQ_FLAG_SUB_CARRIER |
+ ISO15693_REQ_FLAG_DATA_RATE):
+ iso_ctrl |= TRF7970A_ISO_CTRL_15693_DBL_1OF4_2669;
+ break;
+ }
+
+ if (iso_ctrl != trf->iso_ctrl) {
+ ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl);
+ if (ret)
+ return ret;
+
+ trf->iso_ctrl = iso_ctrl;
+ }
+
+ if ((trf->framing == NFC_DIGITAL_FRAMING_ISO15693_T5T) &&
+ trf7970a_is_iso15693_write_or_lock(req[1]) &&
+ (req[0] & ISO15693_REQ_FLAG_OPTION))
+ trf->issue_eof = true;
+ }
+
+ return 0;
+}
+
+static int trf7970a_in_send_cmd(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+ char *prefix;
+ unsigned int len;
+ int ret;
+
+ dev_dbg(trf->dev, "New request - state: %d, timeout: %d ms, len: %d\n",
+ trf->state, timeout, skb->len);
+
+ if (skb->len > TRF7970A_TX_MAX)
+ return -EINVAL;
+
+ mutex_lock(&trf->lock);
+
+ if ((trf->state != TRF7970A_ST_IDLE) &&
+ (trf->state != TRF7970A_ST_IDLE_RX_BLOCKED)) {
+ dev_err(trf->dev, "%s - Bogus state: %d\n", __func__,
+ trf->state);
+ ret = -EIO;
+ goto out_err;
+ }
+
+ if (trf->aborting) {
+ dev_dbg(trf->dev, "Abort process complete\n");
+ trf->aborting = false;
+ ret = -ECANCELED;
+ goto out_err;
+ }
+
+ trf->rx_skb = nfc_alloc_recv_skb(TRF7970A_RX_SKB_ALLOC_SIZE,
+ GFP_KERNEL);
+ if (!trf->rx_skb) {
+ dev_dbg(trf->dev, "Can't alloc rx_skb\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ if (trf->state == TRF7970A_ST_IDLE_RX_BLOCKED) {
+ ret = trf7970a_cmd(trf, TRF7970A_CMD_ENABLE_RX);
+ if (ret)
+ goto out_err;
+
+ trf->state = TRF7970A_ST_IDLE;
+ }
+
+ ret = trf7970a_per_cmd_config(trf, skb);
+ if (ret)
+ goto out_err;
+
+ trf->ddev = ddev;
+ trf->tx_skb = skb;
+ trf->cb = cb;
+ trf->cb_arg = arg;
+ trf->timeout = timeout;
+ trf->ignore_timeout = false;
+
+ len = skb->len;
+ prefix = skb_push(skb, TRF7970A_TX_SKB_HEADROOM);
+
+ /* TX data must be prefixed with a FIFO reset cmd, a cmd that depends
+ * on what the current framing is, the address of the TX length byte 1
+ * register (0x1d), and the 2 byte length of the data to be transmitted.
+ */
+ prefix[0] = TRF7970A_CMD_BIT_CTRL |
+ TRF7970A_CMD_BIT_OPCODE(TRF7970A_CMD_FIFO_RESET);
+ prefix[1] = TRF7970A_CMD_BIT_CTRL |
+ TRF7970A_CMD_BIT_OPCODE(trf->tx_cmd);
+ prefix[2] = TRF7970A_CMD_BIT_CONTINUOUS | TRF7970A_TX_LENGTH_BYTE1;
+
+ if (trf->framing == NFC_DIGITAL_FRAMING_NFCA_SHORT) {
+ prefix[3] = 0x00;
+ prefix[4] = 0x0f; /* 7 bits */
+ } else {
+ prefix[3] = (len & 0xf00) >> 4;
+ prefix[3] |= ((len & 0xf0) >> 4);
+ prefix[4] = ((len & 0x0f) << 4);
+ }
+
+ len = min_t(int, skb->len, TRF7970A_FIFO_SIZE);
+
+ usleep_range(1000, 2000);
+
+ ret = trf7970a_transmit(trf, skb, len);
+ if (ret) {
+ kfree_skb(trf->rx_skb);
+ trf->rx_skb = NULL;
+ }
+
+out_err:
+ mutex_unlock(&trf->lock);
+ return ret;
+}
+
+static int trf7970a_tg_configure_hw(struct nfc_digital_dev *ddev,
+ int type, int param)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+ dev_dbg(trf->dev, "Unsupported interface\n");
+
+ return -EINVAL;
+}
+
+static int trf7970a_tg_send_cmd(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+ dev_dbg(trf->dev, "Unsupported interface\n");
+
+ return -EINVAL;
+}
+
+static int trf7970a_tg_listen(struct nfc_digital_dev *ddev,
+ u16 timeout, nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+ dev_dbg(trf->dev, "Unsupported interface\n");
+
+ return -EINVAL;
+}
+
+static int trf7970a_tg_listen_mdaa(struct nfc_digital_dev *ddev,
+ struct digital_tg_mdaa_params *mdaa_params,
+ u16 timeout, nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+ dev_dbg(trf->dev, "Unsupported interface\n");
+
+ return -EINVAL;
+}
+
+static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+ dev_dbg(trf->dev, "Abort process initiated\n");
+
+ mutex_lock(&trf->lock);
+ trf->aborting = true;
+ mutex_unlock(&trf->lock);
+}
+
+static struct nfc_digital_ops trf7970a_nfc_ops = {
+ .in_configure_hw = trf7970a_in_configure_hw,
+ .in_send_cmd = trf7970a_in_send_cmd,
+ .tg_configure_hw = trf7970a_tg_configure_hw,
+ .tg_send_cmd = trf7970a_tg_send_cmd,
+ .tg_listen = trf7970a_tg_listen,
+ .tg_listen_mdaa = trf7970a_tg_listen_mdaa,
+ .switch_rf = trf7970a_switch_rf,
+ .abort_cmd = trf7970a_abort_cmd,
+};
+
+static int trf7970a_probe(struct spi_device *spi)
+{
+ struct device_node *np = spi->dev.of_node;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct trf7970a *trf;
+ int ret;
+
+ if (!np) {
+ dev_err(&spi->dev, "No Device Tree entry\n");
+ return -EINVAL;
+ }
+
+ trf = devm_kzalloc(&spi->dev, sizeof(*trf), GFP_KERNEL);
+ if (!trf)
+ return -ENOMEM;
+
+ trf->state = TRF7970A_ST_OFF;
+ trf->dev = &spi->dev;
+ trf->spi = spi;
+ trf->quirks = id->driver_data;
+
+ spi->mode = SPI_MODE_1;
+ spi->bits_per_word = 8;
+
+ /* There are two enable pins - both must be present */
+ trf->en_gpio = of_get_named_gpio(np, "ti,enable-gpios", 0);
+ if (!gpio_is_valid(trf->en_gpio)) {
+ dev_err(trf->dev, "No EN GPIO property\n");
+ return trf->en_gpio;
+ }
+
+ ret = devm_gpio_request_one(trf->dev, trf->en_gpio,
+ GPIOF_DIR_OUT | GPIOF_INIT_LOW, "EN");
+ if (ret) {
+ dev_err(trf->dev, "Can't request EN GPIO: %d\n", ret);
+ return ret;
+ }
+
+ trf->en2_gpio = of_get_named_gpio(np, "ti,enable-gpios", 1);
+ if (!gpio_is_valid(trf->en2_gpio)) {
+ dev_err(trf->dev, "No EN2 GPIO property\n");
+ return trf->en2_gpio;
+ }
+
+ ret = devm_gpio_request_one(trf->dev, trf->en2_gpio,
+ GPIOF_DIR_OUT | GPIOF_INIT_LOW, "EN2");
+ if (ret) {
+ dev_err(trf->dev, "Can't request EN2 GPIO: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(trf->dev, spi->irq, NULL,
+ trf7970a_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "trf7970a", trf);
+ if (ret) {
+ dev_err(trf->dev, "Can't request IRQ#%d: %d\n", spi->irq, ret);
+ return ret;
+ }
+
+ mutex_init(&trf->lock);
+ INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
+
+ trf->regulator = devm_regulator_get(&spi->dev, "vin");
+ if (IS_ERR(trf->regulator)) {
+ ret = PTR_ERR(trf->regulator);
+ dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
+ goto err_destroy_lock;
+ }
+
+ ret = regulator_enable(trf->regulator);
+ if (ret) {
+ dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
+ goto err_destroy_lock;
+ }
+
+ trf->powering_up = true;
+
+ trf->ddev = nfc_digital_allocate_device(&trf7970a_nfc_ops,
+ TRF7970A_SUPPORTED_PROTOCOLS,
+ NFC_DIGITAL_DRV_CAPS_IN_CRC, TRF7970A_TX_SKB_HEADROOM,
+ 0);
+ if (!trf->ddev) {
+ dev_err(trf->dev, "Can't allocate NFC digital device\n");
+ ret = -ENOMEM;
+ goto err_disable_regulator;
+ }
+
+ nfc_digital_set_parent_dev(trf->ddev, trf->dev);
+ nfc_digital_set_drvdata(trf->ddev, trf);
+ spi_set_drvdata(spi, trf);
+
+ ret = nfc_digital_register_device(trf->ddev);
+ if (ret) {
+ dev_err(trf->dev, "Can't register NFC digital device: %d\n",
+ ret);
+ goto err_free_ddev;
+ }
+
+ return 0;
+
+err_free_ddev:
+ nfc_digital_free_device(trf->ddev);
+err_disable_regulator:
+ regulator_disable(trf->regulator);
+err_destroy_lock:
+ mutex_destroy(&trf->lock);
+ return ret;
+}
+
+static int trf7970a_remove(struct spi_device *spi)
+{
+ struct trf7970a *trf = spi_get_drvdata(spi);
+
+ mutex_lock(&trf->lock);
+
+ trf7970a_switch_rf_off(trf);
+ trf7970a_init(trf);
+
+ switch (trf->state) {
+ case TRF7970A_ST_WAIT_FOR_TX_FIFO:
+ case TRF7970A_ST_WAIT_FOR_RX_DATA:
+ case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
+ case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
+ trf7970a_send_err_upstream(trf, -ECANCELED);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&trf->lock);
+
+ nfc_digital_unregister_device(trf->ddev);
+ nfc_digital_free_device(trf->ddev);
+
+ regulator_disable(trf->regulator);
+
+ mutex_destroy(&trf->lock);
+
+ return 0;
+}
+
+static const struct spi_device_id trf7970a_id_table[] = {
+ { "trf7970a", TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, trf7970a_id_table);
+
+static struct spi_driver trf7970a_spi_driver = {
+ .probe = trf7970a_probe,
+ .remove = trf7970a_remove,
+ .id_table = trf7970a_id_table,
+ .driver = {
+ .name = "trf7970a",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_spi_driver(trf7970a_spi_driver);
+
+MODULE_AUTHOR("Mark A. Greer <mgreer@animalcreek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI trf7970a RFID/NFC Transceiver Driver");
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index c6973f101a3e..889005fa4d04 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -7,14 +7,6 @@ config OF
menu "Device Tree and Open Firmware support"
depends on OF
-config PROC_DEVICETREE
- bool "Support for device tree in /proc"
- depends on PROC_FS && !SPARC
- help
- This option adds a device-tree directory under /proc which contains
- an image of the device tree that the kernel copies from Open
- Firmware or other boot firmware. If unsure, say Y here.
-
config OF_SELFTEST
bool "Device Tree Runtime self tests"
depends on OF_IRQ
@@ -44,6 +36,10 @@ config OF_DYNAMIC
config OF_ADDRESS
def_bool y
depends on !SPARC
+ select OF_ADDRESS_PCI if PCI
+
+config OF_ADDRESS_PCI
+ bool
config OF_IRQ
def_bool y
@@ -75,4 +71,10 @@ config OF_MTD
depends on MTD
def_bool y
+config OF_RESERVED_MEM
+ depends on OF_EARLY_FLATTREE
+ bool
+ help
+ Helpers to allow for reservation of memory regions
+
endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index efd05102c405..ed9660adad77 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
obj-$(CONFIG_OF_MTD) += of_mtd.o
+obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 1a54f1ffaadb..cb4242a69cd5 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -91,7 +91,7 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
return IORESOURCE_MEM;
}
-#ifdef CONFIG_PCI
+#ifdef CONFIG_OF_ADDRESS_PCI
/*
* PCI bus specific translator
*/
@@ -166,7 +166,9 @@ static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
{
return of_bus_default_translate(addr + 1, offset, na - 1);
}
+#endif /* CONFIG_OF_ADDRESS_PCI */
+#ifdef CONFIG_PCI
const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
unsigned int *flags)
{
@@ -356,7 +358,7 @@ static unsigned int of_bus_isa_get_flags(const __be32 *addr)
*/
static struct of_bus of_busses[] = {
-#ifdef CONFIG_PCI
+#ifdef CONFIG_OF_ADDRESS_PCI
/* PCI */
{
.name = "pci",
@@ -367,7 +369,7 @@ static struct of_bus of_busses[] = {
.translate = of_bus_pci_translate,
.get_flags = of_bus_pci_get_flags,
},
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_OF_ADDRESS_PCI */
/* ISA */
{
.name = "isa",
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 89e888a78899..f72d19b7e5d2 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -21,8 +21,10 @@
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/proc_fs.h>
#include "of_private.h"
@@ -35,6 +37,12 @@ struct device_node *of_chosen;
struct device_node *of_aliases;
static struct device_node *of_stdout;
+static struct kset *of_kset;
+
+/*
+ * Used to protect the of_aliases; but also overloaded to hold off addition of
+ * nodes to sysfs
+ */
DEFINE_MUTEX(of_aliases_mutex);
/* use when traversing tree through the allnext, child, sibling,
@@ -92,14 +100,14 @@ int __weak of_node_to_nid(struct device_node *np)
struct device_node *of_node_get(struct device_node *node)
{
if (node)
- kref_get(&node->kref);
+ kobject_get(&node->kobj);
return node;
}
EXPORT_SYMBOL(of_node_get);
-static inline struct device_node *kref_to_device_node(struct kref *kref)
+static inline struct device_node *kobj_to_device_node(struct kobject *kobj)
{
- return container_of(kref, struct device_node, kref);
+ return container_of(kobj, struct device_node, kobj);
}
/**
@@ -109,16 +117,15 @@ static inline struct device_node *kref_to_device_node(struct kref *kref)
* In of_node_put() this function is passed to kref_put()
* as the destructor.
*/
-static void of_node_release(struct kref *kref)
+static void of_node_release(struct kobject *kobj)
{
- struct device_node *node = kref_to_device_node(kref);
+ struct device_node *node = kobj_to_device_node(kobj);
struct property *prop = node->properties;
/* We should never be releasing nodes that haven't been detached. */
if (!of_node_check_flag(node, OF_DETACHED)) {
pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name);
dump_stack();
- kref_init(&node->kref);
return;
}
@@ -151,11 +158,154 @@ static void of_node_release(struct kref *kref)
void of_node_put(struct device_node *node)
{
if (node)
- kref_put(&node->kref, of_node_release);
+ kobject_put(&node->kobj);
}
EXPORT_SYMBOL(of_node_put);
+#else
+static void of_node_release(struct kobject *kobj)
+{
+ /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
+}
#endif /* CONFIG_OF_DYNAMIC */
+struct kobj_type of_node_ktype = {
+ .release = of_node_release,
+};
+
+static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct property *pp = container_of(bin_attr, struct property, attr);
+ return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
+}
+
+static const char *safe_name(struct kobject *kobj, const char *orig_name)
+{
+ const char *name = orig_name;
+ struct kernfs_node *kn;
+ int i = 0;
+
+ /* don't be a hero. After 16 tries give up */
+ while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, name))) {
+ sysfs_put(kn);
+ if (name != orig_name)
+ kfree(name);
+ name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
+ }
+
+ if (name != orig_name)
+ pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
+ kobject_name(kobj), name);
+ return name;
+}
+
+static int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+ int rc;
+
+ /* Important: Don't leak passwords */
+ bool secure = strncmp(pp->name, "security-", 9) == 0;
+
+ sysfs_bin_attr_init(&pp->attr);
+ pp->attr.attr.name = safe_name(&np->kobj, pp->name);
+ pp->attr.attr.mode = secure ? S_IRUSR : S_IRUGO;
+ pp->attr.size = secure ? 0 : pp->length;
+ pp->attr.read = of_node_property_read;
+
+ rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
+ WARN(rc, "error adding attribute %s to node %s\n", pp->name, np->full_name);
+ return rc;
+}
+
+static int __of_node_add(struct device_node *np)
+{
+ const char *name;
+ struct property *pp;
+ int rc;
+
+ np->kobj.kset = of_kset;
+ if (!np->parent) {
+ /* Nodes without parents are new top level trees */
+ rc = kobject_add(&np->kobj, NULL, safe_name(&of_kset->kobj, "base"));
+ } else {
+ name = safe_name(&np->parent->kobj, kbasename(np->full_name));
+ if (!name || !name[0])
+ return -EINVAL;
+
+ rc = kobject_add(&np->kobj, &np->parent->kobj, "%s", name);
+ }
+ if (rc)
+ return rc;
+
+ for_each_property_of_node(np, pp)
+ __of_add_property_sysfs(np, pp);
+
+ return 0;
+}
+
+int of_node_add(struct device_node *np)
+{
+ int rc = 0;
+
+ BUG_ON(!of_node_is_initialized(np));
+
+ /*
+ * Grab the mutex here so that in a race condition between of_init() and
+ * of_node_add(), node addition will still be consistent.
+ */
+ mutex_lock(&of_aliases_mutex);
+ if (of_kset)
+ rc = __of_node_add(np);
+ else
+ /* This scenario may be perfectly valid, but report it anyway */
+ pr_info("of_node_add(%s) before of_init()\n", np->full_name);
+ mutex_unlock(&of_aliases_mutex);
+ return rc;
+}
+
+#if defined(CONFIG_OF_DYNAMIC)
+static void of_node_remove(struct device_node *np)
+{
+ struct property *pp;
+
+ BUG_ON(!of_node_is_initialized(np));
+
+ /* only remove properties if on sysfs */
+ if (of_node_is_attached(np)) {
+ for_each_property_of_node(np, pp)
+ sysfs_remove_bin_file(&np->kobj, &pp->attr);
+ kobject_del(&np->kobj);
+ }
+
+ /* finally remove the kobj_init ref */
+ of_node_put(np);
+}
+#endif
+
+static int __init of_init(void)
+{
+ struct device_node *np;
+
+ /* Create the kset, and register existing nodes */
+ mutex_lock(&of_aliases_mutex);
+ of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
+ if (!of_kset) {
+ mutex_unlock(&of_aliases_mutex);
+ return -ENOMEM;
+ }
+ for_each_of_allnodes(np)
+ __of_node_add(np);
+ mutex_unlock(&of_aliases_mutex);
+
+ /* Symlink in /proc as required by userspace ABI */
+ if (of_allnodes)
+ proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
+
+ return 0;
+}
+core_initcall(of_init);
+
static struct property *__of_find_property(const struct device_node *np,
const char *name, int *lenp)
{
@@ -904,6 +1054,38 @@ struct device_node *of_find_node_by_phandle(phandle handle)
EXPORT_SYMBOL(of_find_node_by_phandle);
/**
+ * of_property_count_elems_of_size - Count the number of elements in a property
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @elem_size: size of the individual element
+ *
+ * Search for a property in a device node and count the number of elements of
+ * size elem_size in it. Returns number of elements on sucess, -EINVAL if the
+ * property does not exist or its length does not match a multiple of elem_size
+ * and -ENODATA if the property does not have a value.
+ */
+int of_property_count_elems_of_size(const struct device_node *np,
+ const char *propname, int elem_size)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+
+ if (prop->length % elem_size != 0) {
+ pr_err("size of %s in node %s is not a multiple of %d\n",
+ propname, np->full_name, elem_size);
+ return -EINVAL;
+ }
+
+ return prop->length / elem_size;
+}
+EXPORT_SYMBOL_GPL(of_property_count_elems_of_size);
+
+/**
* of_find_property_value_of_size
*
* @np: device node from which the property value is to be read.
@@ -1498,6 +1680,10 @@ static int of_property_notify(int action, struct device_node *np,
{
struct of_prop_reconfig pr;
+ /* only call notifiers if the node is attached */
+ if (!of_node_is_attached(np))
+ return 0;
+
pr.dn = np;
pr.prop = prop;
return of_reconfig_notify(action, &pr);
@@ -1511,11 +1697,31 @@ static int of_property_notify(int action, struct device_node *np,
#endif
/**
+ * __of_add_property - Add a property to a node without lock operations
+ */
+static int __of_add_property(struct device_node *np, struct property *prop)
+{
+ struct property **next;
+
+ prop->next = NULL;
+ next = &np->properties;
+ while (*next) {
+ if (strcmp(prop->name, (*next)->name) == 0)
+ /* duplicate ! don't insert it */
+ return -EEXIST;
+
+ next = &(*next)->next;
+ }
+ *next = prop;
+
+ return 0;
+}
+
+/**
* of_add_property - Add a property to a node
*/
int of_add_property(struct device_node *np, struct property *prop)
{
- struct property **next;
unsigned long flags;
int rc;
@@ -1523,27 +1729,16 @@ int of_add_property(struct device_node *np, struct property *prop)
if (rc)
return rc;
- prop->next = NULL;
raw_spin_lock_irqsave(&devtree_lock, flags);
- next = &np->properties;
- while (*next) {
- if (strcmp(prop->name, (*next)->name) == 0) {
- /* duplicate ! don't insert it */
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return -1;
- }
- next = &(*next)->next;
- }
- *next = prop;
+ rc = __of_add_property(np, prop);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ if (rc)
+ return rc;
-#ifdef CONFIG_PROC_DEVICETREE
- /* try to add to proc as well if it was initialized */
- if (np->pde)
- proc_device_tree_add_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
+ if (of_node_is_attached(np))
+ __of_add_property_sysfs(np, prop);
- return 0;
+ return rc;
}
/**
@@ -1583,11 +1778,11 @@ int of_remove_property(struct device_node *np, struct property *prop)
if (!found)
return -ENODEV;
-#ifdef CONFIG_PROC_DEVICETREE
- /* try to remove the proc node as well */
- if (np->pde)
- proc_device_tree_remove_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
+ /* at early boot, bail hear and defer setup to of_init() */
+ if (!of_kset)
+ return 0;
+
+ sysfs_remove_bin_file(&np->kobj, &prop->attr);
return 0;
}
@@ -1633,16 +1828,17 @@ int of_update_property(struct device_node *np, struct property *newprop)
next = &(*next)->next;
}
raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ if (rc)
+ return rc;
+
+ /* Update the sysfs attribute */
+ if (oldprop)
+ sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
+ __of_add_property_sysfs(np, newprop);
if (!found)
return -ENODEV;
-#ifdef CONFIG_PROC_DEVICETREE
- /* try to add to proc as well if it was initialized */
- if (np->pde)
- proc_device_tree_update_prop(np->pde, newprop, oldprop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
return 0;
}
@@ -1677,22 +1873,6 @@ int of_reconfig_notify(unsigned long action, void *p)
return notifier_to_errno(rc);
}
-#ifdef CONFIG_PROC_DEVICETREE
-static void of_add_proc_dt_entry(struct device_node *dn)
-{
- struct proc_dir_entry *ent;
-
- ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde);
- if (ent)
- proc_device_tree_add_node(dn, ent);
-}
-#else
-static void of_add_proc_dt_entry(struct device_node *dn)
-{
- return;
-}
-#endif
-
/**
* of_attach_node - Plug a device node into the tree and global list.
*/
@@ -1710,24 +1890,13 @@ int of_attach_node(struct device_node *np)
np->allnext = of_allnodes;
np->parent->child = np;
of_allnodes = np;
+ of_node_clear_flag(np, OF_DETACHED);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
- of_add_proc_dt_entry(np);
+ of_node_add(np);
return 0;
}
-#ifdef CONFIG_PROC_DEVICETREE
-static void of_remove_proc_dt_entry(struct device_node *dn)
-{
- proc_remove(dn->pde);
-}
-#else
-static void of_remove_proc_dt_entry(struct device_node *dn)
-{
- return;
-}
-#endif
-
/**
* of_detach_node - "Unplug" a node from the device tree.
*
@@ -1783,7 +1952,7 @@ int of_detach_node(struct device_node *np)
of_node_set_flag(np, OF_DETACHED);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
- of_remove_proc_dt_entry(np);
+ of_node_remove(np);
return rc;
}
#endif /* defined(CONFIG_OF_DYNAMIC) */
@@ -1819,9 +1988,9 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
of_chosen = of_find_node_by_path("/chosen@0");
if (of_chosen) {
- const char *name;
-
- name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ const char *name = of_get_property(of_chosen, "stdout-path", NULL);
+ if (!name)
+ name = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (name)
of_stdout = of_find_node_by_path(name);
}
@@ -1982,3 +2151,154 @@ struct device_node *of_find_next_cache_node(const struct device_node *np)
return NULL;
}
+
+/**
+ * of_graph_parse_endpoint() - parse common endpoint node properties
+ * @node: pointer to endpoint device_node
+ * @endpoint: pointer to the OF endpoint data structure
+ *
+ * The caller should hold a reference to @node.
+ */
+int of_graph_parse_endpoint(const struct device_node *node,
+ struct of_endpoint *endpoint)
+{
+ struct device_node *port_node = of_get_parent(node);
+
+ WARN_ONCE(!port_node, "%s(): endpoint %s has no parent node\n",
+ __func__, node->full_name);
+
+ memset(endpoint, 0, sizeof(*endpoint));
+
+ endpoint->local_node = node;
+ /*
+ * It doesn't matter whether the two calls below succeed.
+ * If they don't then the default value 0 is used.
+ */
+ of_property_read_u32(port_node, "reg", &endpoint->port);
+ of_property_read_u32(node, "reg", &endpoint->id);
+
+ of_node_put(port_node);
+
+ return 0;
+}
+EXPORT_SYMBOL(of_graph_parse_endpoint);
+
+/**
+ * of_graph_get_next_endpoint() - get next endpoint node
+ * @parent: pointer to the parent device node
+ * @prev: previous endpoint node, or NULL to get first
+ *
+ * Return: An 'endpoint' node pointer with refcount incremented. Refcount
+ * of the passed @prev node is not decremented, the caller have to use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
+ struct device_node *prev)
+{
+ struct device_node *endpoint;
+ struct device_node *port;
+
+ if (!parent)
+ return NULL;
+
+ /*
+ * Start by locating the port node. If no previous endpoint is specified
+ * search for the first port node, otherwise get the previous endpoint
+ * parent port node.
+ */
+ if (!prev) {
+ struct device_node *node;
+
+ node = of_get_child_by_name(parent, "ports");
+ if (node)
+ parent = node;
+
+ port = of_get_child_by_name(parent, "port");
+ of_node_put(node);
+
+ if (!port) {
+ pr_err("%s(): no port node found in %s\n",
+ __func__, parent->full_name);
+ return NULL;
+ }
+ } else {
+ port = of_get_parent(prev);
+ if (WARN_ONCE(!port, "%s(): endpoint %s has no parent node\n",
+ __func__, prev->full_name))
+ return NULL;
+
+ /*
+ * Avoid dropping prev node refcount to 0 when getting the next
+ * child below.
+ */
+ of_node_get(prev);
+ }
+
+ while (1) {
+ /*
+ * Now that we have a port node, get the next endpoint by
+ * getting the next child. If the previous endpoint is NULL this
+ * will return the first child.
+ */
+ endpoint = of_get_next_child(port, prev);
+ if (endpoint) {
+ of_node_put(port);
+ return endpoint;
+ }
+
+ /* No more endpoints under this port, try the next one. */
+ prev = NULL;
+
+ do {
+ port = of_get_next_child(parent, port);
+ if (!port)
+ return NULL;
+ } while (of_node_cmp(port->name, "port"));
+ }
+}
+EXPORT_SYMBOL(of_graph_get_next_endpoint);
+
+/**
+ * of_graph_get_remote_port_parent() - get remote port's parent node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote device node associated with remote endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_port_parent(
+ const struct device_node *node)
+{
+ struct device_node *np;
+ unsigned int depth;
+
+ /* Get remote endpoint node. */
+ np = of_parse_phandle(node, "remote-endpoint", 0);
+
+ /* Walk 3 levels up only if there is 'ports' node. */
+ for (depth = 3; depth && np; depth--) {
+ np = of_get_next_parent(np);
+ if (depth == 2 && of_node_cmp(np->name, "ports"))
+ break;
+ }
+ return np;
+}
+EXPORT_SYMBOL(of_graph_get_remote_port_parent);
+
+/**
+ * of_graph_get_remote_port() - get remote port node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote port node associated with remote endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_port(const struct device_node *node)
+{
+ struct device_node *np;
+
+ /* Get remote endpoint node. */
+ np = of_parse_phandle(node, "remote-endpoint", 0);
+ if (!np)
+ return NULL;
+ return of_get_next_parent(np);
+}
+EXPORT_SYMBOL(of_graph_get_remote_port);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 758b4f8b30b7..fa16a912a927 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -15,6 +15,8 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/sizes.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -202,6 +204,7 @@ static void * unflatten_dt_node(struct boot_param_header *blob,
__alignof__(struct device_node));
if (allnextpp) {
char *fn;
+ of_node_init(np);
np->full_name = fn = ((char *)np) + sizeof(*np);
if (new_format) {
/* rebuild full path for new format */
@@ -232,7 +235,6 @@ static void * unflatten_dt_node(struct boot_param_header *blob,
dad->next->sibling = np;
dad->next = np;
}
- kref_init(&np->kref);
}
/* process properties */
while (1) {
@@ -440,6 +442,129 @@ struct boot_param_header *initial_boot_params;
#ifdef CONFIG_OF_EARLY_FLATTREE
/**
+ * res_mem_reserve_reg() - reserve all memory described in 'reg' property
+ */
+static int __init __reserved_mem_reserve_reg(unsigned long node,
+ const char *uname)
+{
+ int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
+ phys_addr_t base, size;
+ unsigned long len;
+ __be32 *prop;
+ int nomap, first = 1;
+
+ prop = of_get_flat_dt_prop(node, "reg", &len);
+ if (!prop)
+ return -ENOENT;
+
+ if (len && len % t_len != 0) {
+ pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
+ uname);
+ return -EINVAL;
+ }
+
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+
+ while (len >= t_len) {
+ base = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ size = dt_mem_next_cell(dt_root_size_cells, &prop);
+
+ if (base && size &&
+ early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
+ pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
+ uname, &base, (unsigned long)size / SZ_1M);
+ else
+ pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %ld MiB\n",
+ uname, &base, (unsigned long)size / SZ_1M);
+
+ len -= t_len;
+ if (first) {
+ fdt_reserved_mem_save_node(node, uname, base, size);
+ first = 0;
+ }
+ }
+ return 0;
+}
+
+/**
+ * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
+ * in /reserved-memory matches the values supported by the current implementation,
+ * also check if ranges property has been provided
+ */
+static int __reserved_mem_check_root(unsigned long node)
+{
+ __be32 *prop;
+
+ prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
+ if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
+ return -EINVAL;
+
+ prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
+ if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
+ return -EINVAL;
+
+ prop = of_get_flat_dt_prop(node, "ranges", NULL);
+ if (!prop)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
+ */
+static int __init __fdt_scan_reserved_mem(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ static int found;
+ const char *status;
+ int err;
+
+ if (!found && depth == 1 && strcmp(uname, "reserved-memory") == 0) {
+ if (__reserved_mem_check_root(node) != 0) {
+ pr_err("Reserved memory: unsupported node format, ignoring\n");
+ /* break scan */
+ return 1;
+ }
+ found = 1;
+ /* scan next node */
+ return 0;
+ } else if (!found) {
+ /* scan next node */
+ return 0;
+ } else if (found && depth < 2) {
+ /* scanning of /reserved-memory has been finished */
+ return 1;
+ }
+
+ status = of_get_flat_dt_prop(node, "status", NULL);
+ if (status && strcmp(status, "okay") != 0 && strcmp(status, "ok") != 0)
+ return 0;
+
+ err = __reserved_mem_reserve_reg(node, uname);
+ if (err == -ENOENT && of_get_flat_dt_prop(node, "size", NULL))
+ fdt_reserved_mem_save_node(node, uname, 0, 0);
+
+ /* scan next node */
+ return 0;
+}
+
+/**
+ * early_init_fdt_scan_reserved_mem() - create reserved memory regions
+ *
+ * This function grabs memory from early allocator for device exclusive use
+ * defined in device tree structures. It should be called by arch specific code
+ * once the early allocator (i.e. memblock) has been fully activated.
+ */
+void __init early_init_fdt_scan_reserved_mem(void)
+{
+ if (!initial_boot_params)
+ return;
+
+ of_scan_flat_dt(__fdt_scan_reserved_mem, NULL);
+ fdt_init_reserved_mem();
+}
+
+/**
* of_scan_flat_dt - scan flattened tree blob and call callback on each.
* @it: callback function
* @data: context data pointer
@@ -856,6 +981,16 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
memblock_add(base, size);
}
+int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
+ phys_addr_t size, bool nomap)
+{
+ if (memblock_is_region_reserved(base, size))
+ return -EBUSY;
+ if (nomap)
+ return memblock_remove(base, size);
+ return memblock_reserve(base, size);
+}
+
/*
* called from unflatten_device_tree() to bootstrap devicetree itself
* Architectures can override this definition if memblock isn't used
@@ -864,6 +999,14 @@ void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
return __va(memblock_alloc(size, align));
}
+#else
+int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
+ phys_addr_t size, bool nomap)
+{
+ pr_err("Reserved memory not supported, ignoring range 0x%llx - 0x%llx%s\n",
+ base, size, nomap ? " (nomap)" : "");
+ return -ENOSYS;
+}
#endif
bool __init early_init_dt_scan(void *params)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 5b3c24f3cde5..9a95831bd065 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -43,6 +43,23 @@ static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
}
}
+/* Extract the clause 22 phy ID from the compatible string of the form
+ * ethernet-phy-idAAAA.BBBB */
+static int of_get_phy_id(struct device_node *device, u32 *phy_id)
+{
+ struct property *prop;
+ const char *cp;
+ unsigned int upper, lower;
+
+ of_property_for_each_string(device, "compatible", prop, cp) {
+ if (sscanf(cp, "ethernet-phy-id%4x.%4x", &upper, &lower) == 2) {
+ *phy_id = ((upper & 0xFFFF) << 16) | (lower & 0xFFFF);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child,
u32 addr)
{
@@ -50,11 +67,15 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
bool is_c45;
int rc;
u32 max_speed = 0;
+ u32 phy_id;
is_c45 = of_device_is_compatible(child,
"ethernet-phy-ieee802.3-c45");
- phy = get_phy_device(mdio, addr, is_c45);
+ if (!is_c45 && !of_get_phy_id(child, &phy_id))
+ phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
+ else
+ phy = get_phy_device(mdio, addr, is_c45);
if (!phy || IS_ERR(phy))
return 1;
diff --git a/drivers/of/of_mtd.c b/drivers/of/of_mtd.c
index a27ec94877e4..b7361ed70537 100644
--- a/drivers/of/of_mtd.c
+++ b/drivers/of/of_mtd.c
@@ -50,6 +50,40 @@ int of_get_nand_ecc_mode(struct device_node *np)
EXPORT_SYMBOL_GPL(of_get_nand_ecc_mode);
/**
+ * of_get_nand_ecc_step_size - Get ECC step size associated to
+ * the required ECC strength (see below).
+ * @np: Pointer to the given device_node
+ *
+ * return the ECC step size, or errno in error case.
+ */
+int of_get_nand_ecc_step_size(struct device_node *np)
+{
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
+ return ret ? ret : val;
+}
+EXPORT_SYMBOL_GPL(of_get_nand_ecc_step_size);
+
+/**
+ * of_get_nand_ecc_strength - Get required ECC strength over the
+ * correspnding step size as defined by 'nand-ecc-size'
+ * @np: Pointer to the given device_node
+ *
+ * return the ECC strength, or errno in error case.
+ */
+int of_get_nand_ecc_strength(struct device_node *np)
+{
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(np, "nand-ecc-strength", &val);
+ return ret ? ret : val;
+}
+EXPORT_SYMBOL_GPL(of_get_nand_ecc_strength);
+
+/**
* of_get_nand_bus_width - Get nand bus witdh for given device_node
* @np: Pointer to the given device_node
*
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index a208a457558c..73e14184aafe 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -12,33 +12,12 @@
#include <linux/export.h>
/**
- * It maps 'enum phy_interface_t' found in include/linux/phy.h
- * into the device tree binding of 'phy-mode', so that Ethernet
- * device driver can get phy interface from device tree.
- */
-static const char *phy_modes[] = {
- [PHY_INTERFACE_MODE_NA] = "",
- [PHY_INTERFACE_MODE_MII] = "mii",
- [PHY_INTERFACE_MODE_GMII] = "gmii",
- [PHY_INTERFACE_MODE_SGMII] = "sgmii",
- [PHY_INTERFACE_MODE_TBI] = "tbi",
- [PHY_INTERFACE_MODE_REVMII] = "rev-mii",
- [PHY_INTERFACE_MODE_RMII] = "rmii",
- [PHY_INTERFACE_MODE_RGMII] = "rgmii",
- [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
- [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
- [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
- [PHY_INTERFACE_MODE_RTBI] = "rtbi",
- [PHY_INTERFACE_MODE_SMII] = "smii",
- [PHY_INTERFACE_MODE_XGMII] = "xgmii",
-};
-
-/**
* of_get_phy_mode - Get phy mode for given device_node
* @np: Pointer to the given device_node
*
- * The function gets phy interface string from property 'phy-mode',
- * and return its index in phy_modes table, or errno in error case.
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
*/
int of_get_phy_mode(struct device_node *np)
{
@@ -47,10 +26,12 @@ int of_get_phy_mode(struct device_node *np)
err = of_property_read_string(np, "phy-mode", &pm);
if (err < 0)
+ err = of_property_read_string(np, "phy-connection-type", &pm);
+ if (err < 0)
return err;
- for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
- if (!strcasecmp(pm, phy_modes[i]))
+ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
+ if (!strcasecmp(pm, phy_modes(i)))
return i;
return -ENODEV;
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
new file mode 100644
index 000000000000..daaaf935911d
--- /dev/null
+++ b/drivers/of/of_reserved_mem.c
@@ -0,0 +1,217 @@
+/*
+ * Device tree based initialization code for reserved memory.
+ *
+ * Copyright (c) 2013, The Linux Foundation. All Rights Reserved.
+ * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ * Author: Josh Cartwright <joshc@codeaurora.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/mm.h>
+#include <linux/sizes.h>
+#include <linux/of_reserved_mem.h>
+
+#define MAX_RESERVED_REGIONS 16
+static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
+static int reserved_mem_count;
+
+#if defined(CONFIG_HAVE_MEMBLOCK)
+#include <linux/memblock.h>
+int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+ phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
+ phys_addr_t *res_base)
+{
+ /*
+ * We use __memblock_alloc_base() because memblock_alloc_base()
+ * panic()s on allocation failure.
+ */
+ phys_addr_t base = __memblock_alloc_base(size, align, end);
+ if (!base)
+ return -ENOMEM;
+
+ /*
+ * Check if the allocated region fits in to start..end window
+ */
+ if (base < start) {
+ memblock_free(base, size);
+ return -ENOMEM;
+ }
+
+ *res_base = base;
+ if (nomap)
+ return memblock_remove(base, size);
+ return 0;
+}
+#else
+int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+ phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
+ phys_addr_t *res_base)
+{
+ pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n",
+ size, nomap ? " (nomap)" : "");
+ return -ENOSYS;
+}
+#endif
+
+/**
+ * res_mem_save_node() - save fdt node for second pass initialization
+ */
+void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
+ phys_addr_t base, phys_addr_t size)
+{
+ struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
+
+ if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
+ pr_err("Reserved memory: not enough space all defined regions.\n");
+ return;
+ }
+
+ rmem->fdt_node = node;
+ rmem->name = uname;
+ rmem->base = base;
+ rmem->size = size;
+
+ reserved_mem_count++;
+ return;
+}
+
+/**
+ * res_mem_alloc_size() - allocate reserved memory described by 'size', 'align'
+ * and 'alloc-ranges' properties
+ */
+static int __init __reserved_mem_alloc_size(unsigned long node,
+ const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
+{
+ int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
+ phys_addr_t start = 0, end = 0;
+ phys_addr_t base = 0, align = 0, size;
+ unsigned long len;
+ __be32 *prop;
+ int nomap;
+ int ret;
+
+ prop = of_get_flat_dt_prop(node, "size", &len);
+ if (!prop)
+ return -EINVAL;
+
+ if (len != dt_root_size_cells * sizeof(__be32)) {
+ pr_err("Reserved memory: invalid size property in '%s' node.\n",
+ uname);
+ return -EINVAL;
+ }
+ size = dt_mem_next_cell(dt_root_size_cells, &prop);
+
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+
+ prop = of_get_flat_dt_prop(node, "alignment", &len);
+ if (prop) {
+ if (len != dt_root_addr_cells * sizeof(__be32)) {
+ pr_err("Reserved memory: invalid alignment property in '%s' node.\n",
+ uname);
+ return -EINVAL;
+ }
+ align = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ }
+
+ prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
+ if (prop) {
+
+ if (len % t_len != 0) {
+ pr_err("Reserved memory: invalid alloc-ranges property in '%s', skipping node.\n",
+ uname);
+ return -EINVAL;
+ }
+
+ base = 0;
+
+ while (len > 0) {
+ start = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ end = start + dt_mem_next_cell(dt_root_size_cells,
+ &prop);
+
+ ret = early_init_dt_alloc_reserved_memory_arch(size,
+ align, start, end, nomap, &base);
+ if (ret == 0) {
+ pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
+ uname, &base,
+ (unsigned long)size / SZ_1M);
+ break;
+ }
+ len -= t_len;
+ }
+
+ } else {
+ ret = early_init_dt_alloc_reserved_memory_arch(size, align,
+ 0, 0, nomap, &base);
+ if (ret == 0)
+ pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
+ uname, &base, (unsigned long)size / SZ_1M);
+ }
+
+ if (base == 0) {
+ pr_info("Reserved memory: failed to allocate memory for node '%s'\n",
+ uname);
+ return -ENOMEM;
+ }
+
+ *res_base = base;
+ *res_size = size;
+
+ return 0;
+}
+
+static const struct of_device_id __rmem_of_table_sentinel
+ __used __section(__reservedmem_of_table_end);
+
+/**
+ * res_mem_init_node() - call region specific reserved memory init code
+ */
+static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
+{
+ extern const struct of_device_id __reservedmem_of_table[];
+ const struct of_device_id *i;
+
+ for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
+ reservedmem_of_init_fn initfn = i->data;
+ const char *compat = i->compatible;
+
+ if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
+ continue;
+
+ if (initfn(rmem, rmem->fdt_node, rmem->name) == 0) {
+ pr_info("Reserved memory: initialized node %s, compatible id %s\n",
+ rmem->name, compat);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/**
+ * fdt_init_reserved_mem - allocate and init all saved reserved memory regions
+ */
+void __init fdt_init_reserved_mem(void)
+{
+ int i;
+ for (i = 0; i < reserved_mem_count; i++) {
+ struct reserved_mem *rmem = &reserved_mem[i];
+ unsigned long node = rmem->fdt_node;
+ int err = 0;
+
+ if (rmem->size == 0)
+ err = __reserved_mem_alloc_size(node, rmem->name,
+ &rmem->base, &rmem->size);
+ if (err == 0)
+ __reserved_mem_init_node(rmem);
+ }
+}
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 7b666736c168..36b4035881b0 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -176,11 +176,10 @@ static struct device_node * __init of_pdt_create_node(phandle node,
return NULL;
dp = prom_early_alloc(sizeof(*dp));
+ of_node_init(dp);
of_pdt_incr_unique_id(dp);
dp->parent = parent;
- kref_init(&dp->kref);
-
dp->name = of_pdt_get_one_property(node, "name");
dp->type = of_pdt_get_one_property(node, "device_type");
dp->phandle = node;
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index 6643d1920985..ae4450070503 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -30,6 +30,67 @@ static struct selftest_results {
} \
}
+static void __init of_selftest_dynamic(void)
+{
+ struct device_node *np;
+ struct property *prop;
+
+ np = of_find_node_by_path("/testcase-data");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ /* Array of 4 properties for the purpose of testing */
+ prop = kzalloc(sizeof(*prop) * 4, GFP_KERNEL);
+ if (!prop) {
+ selftest(0, "kzalloc() failed\n");
+ return;
+ }
+
+ /* Add a new property - should pass*/
+ prop->name = "new-property";
+ prop->value = "new-property-data";
+ prop->length = strlen(prop->value);
+ selftest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
+
+ /* Try to add an existing property - should fail */
+ prop++;
+ prop->name = "new-property";
+ prop->value = "new-property-data-should-fail";
+ prop->length = strlen(prop->value);
+ selftest(of_add_property(np, prop) != 0,
+ "Adding an existing property should have failed\n");
+
+ /* Try to modify an existing property - should pass */
+ prop->value = "modify-property-data-should-pass";
+ prop->length = strlen(prop->value);
+ selftest(of_update_property(np, prop) == 0,
+ "Updating an existing property should have passed\n");
+
+ /* Try to modify non-existent property - should pass*/
+ prop++;
+ prop->name = "modify-property";
+ prop->value = "modify-missing-property-data-should-pass";
+ prop->length = strlen(prop->value);
+ selftest(of_update_property(np, prop) == 0,
+ "Updating a missing property should have passed\n");
+
+ /* Remove property - should pass */
+ selftest(of_remove_property(np, prop) == 0,
+ "Removing a property should have passed\n");
+
+ /* Adding very large property - should pass */
+ prop++;
+ prop->name = "large-property-PAGE_SIZEx8";
+ prop->length = PAGE_SIZE * 8;
+ prop->value = kzalloc(prop->length, GFP_KERNEL);
+ selftest(prop->value != NULL, "Unable to allocate large buffer\n");
+ if (prop->value)
+ selftest(of_add_property(np, prop) == 0,
+ "Adding a large property should have passed\n");
+}
+
static void __init of_selftest_parse_phandle_with_args(void)
{
struct device_node *np;
@@ -378,6 +439,7 @@ static int __init of_selftest(void)
of_node_put(np);
pr_info("start of selftest - you will see error messages\n");
+ of_selftest_dynamic();
of_selftest_parse_phandle_with_args();
of_selftest_property_match_string();
of_selftest_parse_interrupts();
diff --git a/drivers/of/testcase-data/tests-phandle.dtsi b/drivers/of/testcase-data/tests-phandle.dtsi
index 0007d3cd7dc2..788a4c24b8f5 100644
--- a/drivers/of/testcase-data/tests-phandle.dtsi
+++ b/drivers/of/testcase-data/tests-phandle.dtsi
@@ -1,6 +1,9 @@
/ {
testcase-data {
+ security-password = "password";
+ duplicate-name = "duplicate";
+ duplicate-name { };
phandle-tests {
provider0: provider0 {
#phandle-cells = <0>;
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
index 76f1c9357f39..9559829fb234 100644
--- a/drivers/oprofile/nmi_timer_int.c
+++ b/drivers/oprofile/nmi_timer_int.c
@@ -108,8 +108,8 @@ static void nmi_timer_shutdown(void)
struct perf_event *event;
int cpu;
- get_online_cpus();
- unregister_cpu_notifier(&nmi_timer_cpu_nb);
+ cpu_notifier_register_begin();
+ __unregister_cpu_notifier(&nmi_timer_cpu_nb);
for_each_possible_cpu(cpu) {
event = per_cpu(nmi_timer_events, cpu);
if (!event)
@@ -119,7 +119,7 @@ static void nmi_timer_shutdown(void)
perf_event_release_kernel(event);
}
- put_online_cpus();
+ cpu_notifier_register_done();
}
static int nmi_timer_setup(void)
@@ -132,20 +132,23 @@ static int nmi_timer_setup(void)
do_div(period, HZ);
nmi_timer_attr.sample_period = period;
- get_online_cpus();
- err = register_cpu_notifier(&nmi_timer_cpu_nb);
+ cpu_notifier_register_begin();
+ err = __register_cpu_notifier(&nmi_timer_cpu_nb);
if (err)
goto out;
+
/* can't attach events to offline cpus: */
for_each_online_cpu(cpu) {
err = nmi_timer_start_cpu(cpu);
- if (err)
- break;
+ if (err) {
+ cpu_notifier_register_done();
+ nmi_timer_shutdown();
+ return err;
+ }
}
- if (err)
- nmi_timer_shutdown();
+
out:
- put_online_cpus();
+ cpu_notifier_register_done();
return err;
}
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 6a83ee1e9178..3fa66244ce32 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -905,7 +905,8 @@ int parport_claim_or_block(struct pardevice *dev)
/* If dev->waiting is clear now, an interrupt
gave us the port and we would deadlock if we slept. */
if (dev->waiting) {
- interruptible_sleep_on (&dev->wait_q);
+ wait_event_interruptible(dev->wait_q,
+ !dev->waiting);
if (signal_pending (current)) {
return -EINTR;
}
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 17d2b07ee67c..e04fe2d9df3b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -33,21 +33,14 @@ obj-$(CONFIG_PCI_IOV) += iov.o
#
# Some architectures use the generic PCI setup functions
#
-obj-$(CONFIG_X86) += setup-bus.o
-obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
-obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
-obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
-obj-$(CONFIG_PARISC) += setup-bus.o
-obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
-obj-$(CONFIG_PPC) += setup-bus.o
-obj-$(CONFIG_FRV) += setup-bus.o
-obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
-obj-$(CONFIG_X86_VISWS) += setup-irq.o
-obj-$(CONFIG_MN10300) += setup-bus.o
-obj-$(CONFIG_MICROBLAZE) += setup-bus.o
-obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
-obj-$(CONFIG_SPARC_LEON) += setup-bus.o setup-irq.o
-obj-$(CONFIG_M68K) += setup-bus.o setup-irq.o
+obj-$(CONFIG_ALPHA) += setup-irq.o
+obj-$(CONFIG_ARM) += setup-irq.o
+obj-$(CONFIG_UNICORE32) += setup-irq.o
+obj-$(CONFIG_SUPERH) += setup-irq.o
+obj-$(CONFIG_MIPS) += setup-irq.o
+obj-$(CONFIG_TILE) += setup-irq.o
+obj-$(CONFIG_SPARC_LEON) += setup-irq.o
+obj-$(CONFIG_M68K) += setup-irq.o
#
# ACPI Related PCI FW Functions
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 38901665c770..fb8aed307c28 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -132,7 +132,7 @@ static void pci_clip_resource_to_region(struct pci_bus *bus,
static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
- resource_size_t min, unsigned int type_mask,
+ resource_size_t min, unsigned long type_mask,
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
@@ -144,7 +144,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
struct resource *r, avail;
resource_size_t max;
- type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
+ type_mask |= IORESOURCE_TYPE_BITS;
pci_bus_for_each_resource(bus, r, i) {
if (!r)
@@ -200,7 +200,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
*/
int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
- resource_size_t min, unsigned int type_mask,
+ resource_size_t min, unsigned long type_mask,
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index 06ace6248c61..47aaf22d814e 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -32,11 +32,6 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
bridge->release_data = release_data;
}
-static bool resource_contains(struct resource *res1, struct resource *res2)
-{
- return res1->start <= res2->start && res1->end >= res2->end;
-}
-
void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
struct resource *res)
{
@@ -45,9 +40,6 @@ void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
resource_size_t offset = 0;
list_for_each_entry(window, &bridge->windows, list) {
- if (resource_type(res) != resource_type(window->res))
- continue;
-
if (resource_contains(window->res, res)) {
offset = window->offset;
break;
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 47d46c6d8468..a6f67ec8882f 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -27,7 +27,7 @@ config PCI_TEGRA
config PCI_RCAR_GEN2
bool "Renesas R-Car Gen2 Internal PCI controller"
- depends on ARM && (ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST)
+ depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
help
Say Y here if you want internal PCI support on R-Car Gen2 SoC.
There are 3 internal PCI controllers available with a single
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index e8663a8c3406..ee082509b0ba 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -424,20 +424,40 @@ static void imx6_pcie_reset_phy(struct pcie_port *pp)
static int imx6_pcie_link_up(struct pcie_port *pp)
{
- u32 rc, ltssm, rx_valid;
+ u32 rc, debug_r0, rx_valid;
+ int count = 5;
/*
- * Test if the PHY reports that the link is up and also that
- * the link training finished. It might happen that the PHY
- * reports the link is already up, but the link training bit
- * is still set, so make sure to check the training is done
- * as well here.
+ * Test if the PHY reports that the link is up and also that the LTSSM
+ * training finished. There are three possible states of the link when
+ * this code is called:
+ * 1) The link is DOWN (unlikely)
+ * The link didn't come up yet for some reason. This usually means
+ * we have a real problem somewhere. Reset the PHY and exit. This
+ * state calls for inspection of the DEBUG registers.
+ * 2) The link is UP, but still in LTSSM training
+ * Wait for the training to finish, which should take a very short
+ * time. If the training does not finish, we have a problem and we
+ * need to inspect the DEBUG registers. If the training does finish,
+ * the link is up and operating correctly.
+ * 3) The link is UP and no longer in LTSSM training
+ * The link is up and operating correctly.
*/
- rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
- if ((rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) &&
- !(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
- return 1;
-
+ while (1) {
+ rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
+ if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
+ break;
+ if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
+ return 1;
+ if (!count--)
+ break;
+ dev_dbg(pp->dev, "Link is up, but still in training\n");
+ /*
+ * Wait a little bit, then re-check if the link finished
+ * the training.
+ */
+ usleep_range(1000, 2000);
+ }
/*
* From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
* Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
@@ -446,15 +466,16 @@ static int imx6_pcie_link_up(struct pcie_port *pp)
* to gen2 is stuck
*/
pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
- ltssm = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F;
+ debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
if (rx_valid & 0x01)
return 0;
- if (ltssm != 0x0d)
+ if ((debug_r0 & 0x3f) != 0x0d)
return 0;
dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
+ dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
imx6_pcie_reset_phy(pp);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 0e79665afd44..d3d1cfd51e09 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -101,7 +101,9 @@ struct mvebu_pcie {
struct mvebu_pcie_port *ports;
struct msi_chip *msi;
struct resource io;
+ char io_name[30];
struct resource realio;
+ char mem_name[30];
struct resource mem;
struct resource busn;
int nports;
@@ -672,10 +674,30 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct mvebu_pcie *pcie = sys_to_pcie(sys);
int i;
+ int domain = 0;
- if (resource_size(&pcie->realio) != 0)
+#ifdef CONFIG_PCI_DOMAINS
+ domain = sys->domain;
+#endif
+
+ snprintf(pcie->mem_name, sizeof(pcie->mem_name), "PCI MEM %04x",
+ domain);
+ pcie->mem.name = pcie->mem_name;
+
+ snprintf(pcie->io_name, sizeof(pcie->io_name), "PCI I/O %04x", domain);
+ pcie->realio.name = pcie->io_name;
+
+ if (request_resource(&iomem_resource, &pcie->mem))
+ return 0;
+
+ if (resource_size(&pcie->realio) != 0) {
+ if (request_resource(&ioport_resource, &pcie->realio)) {
+ release_resource(&pcie->mem);
+ return 0;
+ }
pci_add_resource_offset(&sys->resources, &pcie->realio,
sys->io_offset);
+ }
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
@@ -797,7 +819,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
for (i = 0; i < nranges; i++) {
u32 flags = of_read_number(range, 1);
- u32 slot = of_read_number(range, 2);
+ u32 slot = of_read_number(range + 1, 1);
u64 cpuaddr = of_read_number(range + na, pna);
unsigned long rtype;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index ceec147baec3..fd3e3ab56509 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
/* AHB-PCI Bridge PCI communication registers */
@@ -39,9 +40,26 @@
#define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20)
#define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24)
+#define RCAR_PCI_INT_SIGTABORT (1 << 0)
+#define RCAR_PCI_INT_SIGRETABORT (1 << 1)
+#define RCAR_PCI_INT_REMABORT (1 << 2)
+#define RCAR_PCI_INT_PERR (1 << 3)
+#define RCAR_PCI_INT_SIGSERR (1 << 4)
+#define RCAR_PCI_INT_RESERR (1 << 5)
+#define RCAR_PCI_INT_WIN1ERR (1 << 12)
+#define RCAR_PCI_INT_WIN2ERR (1 << 13)
#define RCAR_PCI_INT_A (1 << 16)
#define RCAR_PCI_INT_B (1 << 17)
#define RCAR_PCI_INT_PME (1 << 19)
+#define RCAR_PCI_INT_ALLERRORS (RCAR_PCI_INT_SIGTABORT | \
+ RCAR_PCI_INT_SIGRETABORT | \
+ RCAR_PCI_INT_SIGRETABORT | \
+ RCAR_PCI_INT_REMABORT | \
+ RCAR_PCI_INT_PERR | \
+ RCAR_PCI_INT_SIGSERR | \
+ RCAR_PCI_INT_RESERR | \
+ RCAR_PCI_INT_WIN1ERR | \
+ RCAR_PCI_INT_WIN2ERR)
#define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30)
#define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0)
@@ -74,9 +92,6 @@
#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48)
-/* Number of internal PCI controllers */
-#define RCAR_PCI_NR_CONTROLLERS 3
-
struct rcar_pci_priv {
struct device *dev;
void __iomem *reg;
@@ -84,6 +99,7 @@ struct rcar_pci_priv {
struct resource mem_res;
struct resource *cfg_res;
int irq;
+ unsigned long window_size;
};
/* PCI configuration space operations */
@@ -102,6 +118,10 @@ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
if (slot > 2)
return NULL;
+ /* bridge logic only has registers to 0x40 */
+ if (slot == 0x0 && where >= 0x40)
+ return NULL;
+
val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG :
RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG;
@@ -156,7 +176,7 @@ static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn,
}
/* PCI interrupt mapping */
-static int __init rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct pci_sys_data *sys = dev->bus->sysdata;
struct rcar_pci_priv *priv = sys->private_data;
@@ -164,8 +184,48 @@ static int __init rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return priv->irq;
}
+#ifdef CONFIG_PCI_DEBUG
+/* if debug enabled, then attach an error handler irq to the bridge */
+
+static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
+{
+ struct rcar_pci_priv *priv = pw;
+ u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
+
+ if (status & RCAR_PCI_INT_ALLERRORS) {
+ dev_err(priv->dev, "error irq: status %08x\n", status);
+
+ /* clear the error(s) */
+ iowrite32(status & RCAR_PCI_INT_ALLERRORS,
+ priv->reg + RCAR_PCI_INT_STATUS_REG);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ ret = devm_request_irq(priv->dev, priv->irq, rcar_pci_err_irq,
+ IRQF_SHARED, "error irq", priv);
+ if (ret) {
+ dev_err(priv->dev, "cannot claim IRQ for error handling\n");
+ return;
+ }
+
+ val = ioread32(priv->reg + RCAR_PCI_INT_ENABLE_REG);
+ val |= RCAR_PCI_INT_ALLERRORS;
+ iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG);
+}
+#else
+static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
+#endif
+
/* PCI host controller setup */
-static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
+static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
{
struct rcar_pci_priv *priv = sys->private_data;
void __iomem *reg = priv->reg;
@@ -183,10 +243,31 @@ static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
iowrite32(val, reg + RCAR_USBCTR_REG);
udelay(4);
- /* De-assert reset and set PCIAHB window1 size to 1GB */
+ /* De-assert reset and reset PCIAHB window1 size */
val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK |
RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST);
- iowrite32(val | RCAR_USBCTR_PCIAHB_WIN1_1G, reg + RCAR_USBCTR_REG);
+
+ /* Setup PCIAHB window1 size */
+ switch (priv->window_size) {
+ case SZ_2G:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_2G;
+ break;
+ case SZ_1G:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_1G;
+ break;
+ case SZ_512M:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_512M;
+ break;
+ default:
+ pr_warn("unknown window size %ld - defaulting to 256M\n",
+ priv->window_size);
+ priv->window_size = SZ_256M;
+ /* fall-through */
+ case SZ_256M:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_256M;
+ break;
+ }
+ iowrite32(val, reg + RCAR_USBCTR_REG);
/* Configure AHB master and slave modes */
iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG);
@@ -197,7 +278,7 @@ static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
RCAR_PCI_ARBITER_PCIBP_MODE;
iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG);
- /* PCI-AHB mapping: 0x40000000-0x80000000 */
+ /* PCI-AHB mapping: 0x40000000 base */
iowrite32(0x40000000 | RCAR_PCIAHB_PREFETCH16,
reg + RCAR_PCIAHB_WIN1_CTR_REG);
@@ -224,10 +305,15 @@ static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME,
reg + RCAR_PCI_INT_ENABLE_REG);
+ if (priv->irq > 0)
+ rcar_pci_setup_errirq(priv);
+
/* Add PCI resources */
pci_add_resource(&sys->resources, &priv->io_res);
pci_add_resource(&sys->resources, &priv->mem_res);
+ /* Setup bus number based on platform device id */
+ sys->busnr = to_platform_device(priv->dev)->id;
return 1;
}
@@ -236,48 +322,13 @@ static struct pci_ops rcar_pci_ops = {
.write = rcar_pci_write_config,
};
-static struct hw_pci rcar_hw_pci __initdata = {
- .map_irq = rcar_pci_map_irq,
- .ops = &rcar_pci_ops,
- .setup = rcar_pci_setup,
-};
-
-static int rcar_pci_count __initdata;
-
-static int __init rcar_pci_add_controller(struct rcar_pci_priv *priv)
-{
- void **private_data;
- int count;
-
- if (rcar_hw_pci.nr_controllers < rcar_pci_count)
- goto add_priv;
-
- /* (Re)allocate private data pointer array if needed */
- count = rcar_pci_count + RCAR_PCI_NR_CONTROLLERS;
- private_data = kzalloc(count * sizeof(void *), GFP_KERNEL);
- if (!private_data)
- return -ENOMEM;
-
- rcar_pci_count = count;
- if (rcar_hw_pci.private_data) {
- memcpy(private_data, rcar_hw_pci.private_data,
- rcar_hw_pci.nr_controllers * sizeof(void *));
- kfree(rcar_hw_pci.private_data);
- }
-
- rcar_hw_pci.private_data = private_data;
-
-add_priv:
- /* Add private data pointer to the array */
- rcar_hw_pci.private_data[rcar_hw_pci.nr_controllers++] = priv;
- return 0;
-}
-
-static int __init rcar_pci_probe(struct platform_device *pdev)
+static int rcar_pci_probe(struct platform_device *pdev)
{
struct resource *cfg_res, *mem_res;
struct rcar_pci_priv *priv;
void __iomem *reg;
+ struct hw_pci hw;
+ void *hw_private[1];
cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(&pdev->dev, cfg_res);
@@ -308,31 +359,34 @@ static int __init rcar_pci_probe(struct platform_device *pdev)
priv->reg = reg;
priv->dev = &pdev->dev;
- return rcar_pci_add_controller(priv);
+ if (priv->irq < 0) {
+ dev_err(&pdev->dev, "no valid irq found\n");
+ return priv->irq;
+ }
+
+ priv->window_size = SZ_1G;
+
+ hw_private[0] = priv;
+ memset(&hw, 0, sizeof(hw));
+ hw.nr_controllers = ARRAY_SIZE(hw_private);
+ hw.private_data = hw_private;
+ hw.map_irq = rcar_pci_map_irq;
+ hw.ops = &rcar_pci_ops;
+ hw.setup = rcar_pci_setup;
+ pci_common_init_dev(&pdev->dev, &hw);
+ return 0;
}
static struct platform_driver rcar_pci_driver = {
.driver = {
.name = "pci-rcar-gen2",
+ .owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
},
+ .probe = rcar_pci_probe,
};
-static int __init rcar_pci_init(void)
-{
- int retval;
-
- retval = platform_driver_probe(&rcar_pci_driver, rcar_pci_probe);
- if (!retval)
- pci_common_init(&rcar_hw_pci);
-
- /* Private data pointer array is not needed any more */
- kfree(rcar_hw_pci.private_data);
- rcar_hw_pci.private_data = NULL;
-
- return retval;
-}
-
-subsys_initcall(rcar_pci_init);
+module_platform_driver(rcar_pci_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 17ce88f79d2b..509a29d84509 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -294,14 +294,12 @@ no_valid_irq:
static void clear_irq(unsigned int irq)
{
unsigned int pos, nvec;
- struct irq_desc *desc;
struct msi_desc *msi;
struct pcie_port *pp;
struct irq_data *data = irq_get_irq_data(irq);
/* get the port structure */
- desc = irq_to_desc(irq);
- msi = irq_desc_get_msi_desc(desc);
+ msi = irq_data_get_msi(data);
pp = sys_to_pcie(msi->dev->bus->sysdata);
if (!pp) {
BUG();
@@ -800,7 +798,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* setup RC BARs */
dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
- dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_1);
+ dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
/* setup interrupt pins */
dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index b6162be4df40..2b859249303b 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -93,7 +93,6 @@ struct acpiphp_slot {
struct list_head funcs; /* one slot may have different
objects (i.e. for each function) */
struct slot *slot;
- struct mutex crit_sect;
u8 device; /* pci device# */
u32 flags; /* see below */
@@ -117,20 +116,30 @@ struct acpiphp_func {
};
struct acpiphp_context {
- acpi_handle handle;
+ struct acpi_hotplug_context hp;
struct acpiphp_func func;
struct acpiphp_bridge *bridge;
unsigned int refcount;
};
+static inline struct acpiphp_context *to_acpiphp_context(struct acpi_hotplug_context *hp)
+{
+ return container_of(hp, struct acpiphp_context, hp);
+}
+
static inline struct acpiphp_context *func_to_context(struct acpiphp_func *func)
{
return container_of(func, struct acpiphp_context, func);
}
+static inline struct acpi_device *func_to_acpi_device(struct acpiphp_func *func)
+{
+ return func_to_context(func)->hp.self;
+}
+
static inline acpi_handle func_to_handle(struct acpiphp_func *func)
{
- return func_to_context(func)->handle;
+ return func_to_acpi_device(func)->handle;
}
/*
@@ -158,7 +167,6 @@ struct acpiphp_attention_info
#define FUNC_HAS_STA (0x00000001)
#define FUNC_HAS_EJ0 (0x00000002)
-#define FUNC_HAS_DCK (0x00000004)
/* function prototypes */
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 7c7a388c85ab..bccc27ee1030 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -58,71 +58,59 @@
static LIST_HEAD(bridge_list);
static DEFINE_MUTEX(bridge_mutex);
-static DEFINE_MUTEX(acpiphp_context_lock);
-static void handle_hotplug_event(acpi_handle handle, u32 type, void *data);
+static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type);
+static void acpiphp_post_dock_fixup(struct acpi_device *adev);
static void acpiphp_sanitize_bus(struct pci_bus *bus);
static void acpiphp_set_hpp_values(struct pci_bus *bus);
-static void hotplug_event(acpi_handle handle, u32 type, void *data);
+static void hotplug_event(u32 type, struct acpiphp_context *context);
static void free_bridge(struct kref *kref);
-static void acpiphp_context_handler(acpi_handle handle, void *context)
-{
- /* Intentionally empty. */
-}
-
/**
* acpiphp_init_context - Create hotplug context and grab a reference to it.
- * @handle: ACPI object handle to create the context for.
+ * @adev: ACPI device object to create the context for.
*
- * Call under acpiphp_context_lock.
+ * Call under acpi_hp_context_lock.
*/
-static struct acpiphp_context *acpiphp_init_context(acpi_handle handle)
+static struct acpiphp_context *acpiphp_init_context(struct acpi_device *adev)
{
struct acpiphp_context *context;
- acpi_status status;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return NULL;
- context->handle = handle;
context->refcount = 1;
- status = acpi_attach_data(handle, acpiphp_context_handler, context);
- if (ACPI_FAILURE(status)) {
- kfree(context);
- return NULL;
- }
+ acpi_set_hp_context(adev, &context->hp, acpiphp_hotplug_notify, NULL,
+ acpiphp_post_dock_fixup);
return context;
}
/**
* acpiphp_get_context - Get hotplug context and grab a reference to it.
- * @handle: ACPI object handle to get the context for.
+ * @adev: ACPI device object to get the context for.
*
- * Call under acpiphp_context_lock.
+ * Call under acpi_hp_context_lock.
*/
-static struct acpiphp_context *acpiphp_get_context(acpi_handle handle)
+static struct acpiphp_context *acpiphp_get_context(struct acpi_device *adev)
{
- struct acpiphp_context *context = NULL;
- acpi_status status;
- void *data;
+ struct acpiphp_context *context;
- status = acpi_get_data(handle, acpiphp_context_handler, &data);
- if (ACPI_SUCCESS(status)) {
- context = data;
- context->refcount++;
- }
+ if (!adev->hp)
+ return NULL;
+
+ context = to_acpiphp_context(adev->hp);
+ context->refcount++;
return context;
}
/**
* acpiphp_put_context - Drop a reference to ACPI hotplug context.
- * @handle: ACPI object handle to put the context for.
+ * @context: ACPI hotplug context to drop a reference to.
*
* The context object is removed if there are no more references to it.
*
- * Call under acpiphp_context_lock.
+ * Call under acpi_hp_context_lock.
*/
static void acpiphp_put_context(struct acpiphp_context *context)
{
@@ -130,7 +118,7 @@ static void acpiphp_put_context(struct acpiphp_context *context)
return;
WARN_ON(context->bridge);
- acpi_detach_data(context->handle, acpiphp_context_handler);
+ context->hp.self->hp = NULL;
kfree(context);
}
@@ -144,6 +132,27 @@ static inline void put_bridge(struct acpiphp_bridge *bridge)
kref_put(&bridge->ref, free_bridge);
}
+static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
+{
+ struct acpiphp_context *context;
+
+ acpi_lock_hp_context();
+ context = acpiphp_get_context(adev);
+ if (!context || context->func.parent->is_going_away) {
+ acpi_unlock_hp_context();
+ return NULL;
+ }
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ acpi_unlock_hp_context();
+ return context;
+}
+
+static void acpiphp_let_context_go(struct acpiphp_context *context)
+{
+ put_bridge(context->func.parent);
+}
+
static void free_bridge(struct kref *kref)
{
struct acpiphp_context *context;
@@ -151,7 +160,7 @@ static void free_bridge(struct kref *kref)
struct acpiphp_slot *slot, *next;
struct acpiphp_func *func, *tmp;
- mutex_lock(&acpiphp_context_lock);
+ acpi_lock_hp_context();
bridge = container_of(kref, struct acpiphp_bridge, ref);
@@ -175,31 +184,32 @@ static void free_bridge(struct kref *kref)
pci_dev_put(bridge->pci_dev);
kfree(bridge);
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
}
-/*
- * the _DCK method can do funny things... and sometimes not
- * hah-hah funny.
+/**
+ * acpiphp_post_dock_fixup - Post-dock fixups for PCI devices.
+ * @adev: ACPI device object corresponding to a PCI device.
*
- * TBD - figure out a way to only call fixups for
- * systems that require them.
+ * TBD - figure out a way to only call fixups for systems that require them.
*/
-static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
+static void acpiphp_post_dock_fixup(struct acpi_device *adev)
{
- struct acpiphp_context *context = data;
- struct pci_bus *bus = context->func.slot->bus;
+ struct acpiphp_context *context = acpiphp_grab_context(adev);
+ struct pci_bus *bus;
u32 buses;
- if (!bus->self)
+ if (!context)
return;
+ bus = context->func.slot->bus;
+ if (!bus->self)
+ goto out;
+
/* fixup bad _DCK function that rewrites
* secondary bridge on slot
*/
- pci_read_config_dword(bus->self,
- PCI_PRIMARY_BUS,
- &buses);
+ pci_read_config_dword(bus->self, PCI_PRIMARY_BUS, &buses);
if (((buses >> 8) & 0xff) != bus->busn_res.start) {
buses = (buses & 0xff000000)
@@ -208,33 +218,11 @@ static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
| ((unsigned int)(bus->busn_res.end) << 16);
pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses);
}
-}
-
-static void dock_event(acpi_handle handle, u32 type, void *data)
-{
- struct acpiphp_context *context;
-
- mutex_lock(&acpiphp_context_lock);
- context = acpiphp_get_context(handle);
- if (!context || WARN_ON(context->handle != handle)
- || context->func.parent->is_going_away) {
- mutex_unlock(&acpiphp_context_lock);
- return;
- }
- get_bridge(context->func.parent);
- acpiphp_put_context(context);
- mutex_unlock(&acpiphp_context_lock);
-
- hotplug_event(handle, type, data);
- put_bridge(context->func.parent);
+ out:
+ acpiphp_let_context_go(context);
}
-static const struct acpi_dock_ops acpiphp_dock_ops = {
- .fixup = post_dock_fixups,
- .handler = dock_event,
-};
-
/* Check whether the PCI device is managed by native PCIe hotplug driver */
static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
{
@@ -264,26 +252,19 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
return true;
}
-static void acpiphp_dock_init(void *data)
-{
- struct acpiphp_context *context = data;
-
- get_bridge(context->func.parent);
-}
-
-static void acpiphp_dock_release(void *data)
-{
- struct acpiphp_context *context = data;
-
- put_bridge(context->func.parent);
-}
-
-/* callback routine to register each ACPI PCI slot object */
-static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
- void **rv)
+/**
+ * acpiphp_add_context - Add ACPIPHP context to an ACPI device object.
+ * @handle: ACPI handle of the object to add a context to.
+ * @lvl: Not used.
+ * @data: The object's parent ACPIPHP bridge.
+ * @rv: Not used.
+ */
+static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
+ void **rv)
{
struct acpiphp_bridge *bridge = data;
struct acpiphp_context *context;
+ struct acpi_device *adev;
struct acpiphp_slot *slot;
struct acpiphp_func *newfunc;
acpi_status status = AE_OK;
@@ -293,9 +274,6 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
struct pci_dev *pdev = bridge->pci_dev;
u32 val;
- if (pdev && device_is_managed_by_native_pciehp(pdev))
- return AE_OK;
-
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
@@ -303,31 +281,34 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
"can't evaluate _ADR (%#x)\n", status);
return AE_OK;
}
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
- mutex_lock(&acpiphp_context_lock);
- context = acpiphp_init_context(handle);
+ acpi_lock_hp_context();
+ context = acpiphp_init_context(adev);
if (!context) {
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
acpi_handle_err(handle, "No hotplug context\n");
return AE_NOT_EXIST;
}
newfunc = &context->func;
newfunc->function = function;
newfunc->parent = bridge;
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
- if (acpi_has_method(handle, "_EJ0"))
+ /*
+ * If this is a dock device, its _EJ0 should be executed by the dock
+ * notify handler after calling _DCK.
+ */
+ if (!is_dock_device(adev) && acpi_has_method(handle, "_EJ0"))
newfunc->flags = FUNC_HAS_EJ0;
if (acpi_has_method(handle, "_STA"))
newfunc->flags |= FUNC_HAS_STA;
- if (acpi_has_method(handle, "_DCK"))
- newfunc->flags |= FUNC_HAS_DCK;
-
/* search for objects that share the same slot */
list_for_each_entry(slot, &bridge->slots, node)
if (slot->device == device)
@@ -335,19 +316,26 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
if (!slot) {
- status = AE_NO_MEMORY;
- goto err;
+ acpi_lock_hp_context();
+ acpiphp_put_context(context);
+ acpi_unlock_hp_context();
+ return AE_NO_MEMORY;
}
slot->bus = bridge->pci_bus;
slot->device = device;
INIT_LIST_HEAD(&slot->funcs);
- mutex_init(&slot->crit_sect);
list_add_tail(&slot->node, &bridge->slots);
- /* Register slots for ejectable functions only. */
- if (acpi_pci_check_ejectable(pbus, handle) || is_dock_device(handle)) {
+ /*
+ * Expose slots to user space for functions that have _EJ0 or _RMV or
+ * are located in dock stations. Do not expose them for devices handled
+ * by the native PCIe hotplug (PCIeHP), becuase that code is supposed to
+ * expose slots to user space in those cases.
+ */
+ if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
+ && !(pdev && device_is_managed_by_native_pciehp(pdev))) {
unsigned long long sun;
int retval;
@@ -381,44 +369,16 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
&val, 60*1000))
slot->flags |= SLOT_ENABLED;
- if (is_dock_device(handle)) {
- /* we don't want to call this device's _EJ0
- * because we want the dock notify handler
- * to call it after it calls _DCK
- */
- newfunc->flags &= ~FUNC_HAS_EJ0;
- if (register_hotplug_dock_device(handle,
- &acpiphp_dock_ops, context,
- acpiphp_dock_init, acpiphp_dock_release))
- pr_debug("failed to register dock device\n");
- }
-
- /* install notify handler */
- if (!(newfunc->flags & FUNC_HAS_DCK)) {
- status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event,
- context);
- if (ACPI_FAILURE(status))
- acpi_handle_err(handle,
- "failed to install notify handler\n");
- }
-
return AE_OK;
-
- err:
- mutex_lock(&acpiphp_context_lock);
- acpiphp_put_context(context);
- mutex_unlock(&acpiphp_context_lock);
- return status;
}
-static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
+static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev)
{
struct acpiphp_context *context;
struct acpiphp_bridge *bridge = NULL;
- mutex_lock(&acpiphp_context_lock);
- context = acpiphp_get_context(handle);
+ acpi_lock_hp_context();
+ context = acpiphp_get_context(adev);
if (context) {
bridge = context->bridge;
if (bridge)
@@ -426,7 +386,7 @@ static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
acpiphp_put_context(context);
}
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
return bridge;
}
@@ -434,22 +394,15 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
{
struct acpiphp_slot *slot;
struct acpiphp_func *func;
- acpi_status status;
list_for_each_entry(slot, &bridge->slots, node) {
list_for_each_entry(func, &slot->funcs, sibling) {
- acpi_handle handle = func_to_handle(func);
-
- if (is_dock_device(handle))
- unregister_hotplug_dock_device(handle);
+ struct acpi_device *adev = func_to_acpi_device(func);
- if (!(func->flags & FUNC_HAS_DCK)) {
- status = acpi_remove_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event);
- if (ACPI_FAILURE(status))
- pr_err("failed to remove notify handler\n");
- }
+ acpi_lock_hp_context();
+ adev->hp->notify = NULL;
+ adev->hp->fixup = NULL;
+ acpi_unlock_hp_context();
}
slot->flags |= SLOT_IS_GOING_AWAY;
if (slot->slot)
@@ -460,9 +413,9 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
list_del(&bridge->list);
mutex_unlock(&bridge_mutex);
- mutex_lock(&acpiphp_context_lock);
+ acpi_lock_hp_context();
bridge->is_going_away = true;
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
}
/**
@@ -471,7 +424,7 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
*/
static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
{
- struct list_head *tmp;
+ struct pci_bus *tmp;
unsigned char max, n;
/*
@@ -484,41 +437,14 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
*/
max = bus->busn_res.start;
- list_for_each(tmp, &bus->children) {
- n = pci_bus_max_busnr(pci_bus_b(tmp));
+ list_for_each_entry(tmp, &bus->children, node) {
+ n = pci_bus_max_busnr(tmp);
if (n > max)
max = n;
}
return max;
}
-/**
- * acpiphp_bus_trim - Trim device objects in an ACPI namespace subtree.
- * @handle: ACPI device object handle to start from.
- */
-static void acpiphp_bus_trim(acpi_handle handle)
-{
- struct acpi_device *adev = NULL;
-
- acpi_bus_get_device(handle, &adev);
- if (adev)
- acpi_bus_trim(adev);
-}
-
-/**
- * acpiphp_bus_add - Scan ACPI namespace subtree.
- * @handle: ACPI object handle to start the scan from.
- */
-static void acpiphp_bus_add(acpi_handle handle)
-{
- struct acpi_device *adev = NULL;
-
- acpi_bus_scan(handle);
- acpi_bus_get_device(handle, &adev);
- if (acpi_device_enumerated(adev))
- acpi_device_set_power(adev, ACPI_STATE_D0);
-}
-
static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
{
struct acpiphp_func *func;
@@ -558,9 +484,13 @@ static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
{
struct acpiphp_func *func;
- list_for_each_entry(func, &slot->funcs, sibling)
- acpiphp_bus_add(func_to_handle(func));
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ struct acpi_device *adev = func_to_acpi_device(func);
+ acpi_bus_scan(adev->handle);
+ if (acpi_device_enumerated(adev))
+ acpi_device_set_power(adev, ACPI_STATE_D0);
+ }
return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
}
@@ -625,32 +555,15 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
}
}
-/* return first device in slot, acquiring a reference on it */
-static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot)
-{
- struct pci_bus *bus = slot->bus;
- struct pci_dev *dev;
- struct pci_dev *ret = NULL;
-
- down_read(&pci_bus_sem);
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (PCI_SLOT(dev->devfn) == slot->device) {
- ret = pci_dev_get(dev);
- break;
- }
- up_read(&pci_bus_sem);
-
- return ret;
-}
-
/**
* disable_slot - disable a slot
* @slot: ACPI PHP slot
*/
static void disable_slot(struct acpiphp_slot *slot)
{
+ struct pci_bus *bus = slot->bus;
+ struct pci_dev *dev, *prev;
struct acpiphp_func *func;
- struct pci_dev *pdev;
/*
* enable_slot() enumerates all functions in this device via
@@ -658,22 +571,18 @@ static void disable_slot(struct acpiphp_slot *slot)
* methods (_EJ0, etc.) or not. Therefore, we remove all functions
* here.
*/
- while ((pdev = dev_in_slot(slot))) {
- pci_stop_and_remove_bus_device(pdev);
- pci_dev_put(pdev);
- }
+ list_for_each_entry_safe_reverse(dev, prev, &bus->devices, bus_list)
+ if (PCI_SLOT(dev->devfn) == slot->device)
+ pci_stop_and_remove_bus_device(dev);
list_for_each_entry(func, &slot->funcs, sibling)
- acpiphp_bus_trim(func_to_handle(func));
+ acpi_bus_trim(func_to_acpi_device(func));
slot->flags &= (~SLOT_ENABLED);
}
-static bool acpiphp_no_hotplug(acpi_handle handle)
+static bool acpiphp_no_hotplug(struct acpi_device *adev)
{
- struct acpi_device *adev = NULL;
-
- acpi_bus_get_device(handle, &adev);
return adev && adev->flags.no_hotplug;
}
@@ -682,7 +591,7 @@ static bool slot_no_hotplug(struct acpiphp_slot *slot)
struct acpiphp_func *func;
list_for_each_entry(func, &slot->funcs, sibling)
- if (acpiphp_no_hotplug(func_to_handle(func)))
+ if (acpiphp_no_hotplug(func_to_acpi_device(func)))
return true;
return false;
@@ -747,28 +656,25 @@ static inline bool device_status_valid(unsigned int sta)
*/
static void trim_stale_devices(struct pci_dev *dev)
{
- acpi_handle handle = ACPI_HANDLE(&dev->dev);
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
struct pci_bus *bus = dev->subordinate;
bool alive = false;
- if (handle) {
+ if (adev) {
acpi_status status;
unsigned long long sta;
- status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+ status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
- || acpiphp_no_hotplug(handle);
+ || acpiphp_no_hotplug(adev);
}
- if (!alive) {
- u32 v;
+ if (!alive)
+ alive = pci_device_is_present(dev);
- /* Check if the device responds. */
- alive = pci_bus_read_dev_vendor_id(dev->bus, dev->devfn, &v, 0);
- }
if (!alive) {
pci_stop_and_remove_bus_device(dev);
- if (handle)
- acpiphp_bus_trim(handle);
+ if (adev)
+ acpi_bus_trim(adev);
} else if (bus) {
struct pci_dev *child, *tmp;
@@ -800,7 +706,6 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
struct pci_bus *bus = slot->bus;
struct pci_dev *dev, *tmp;
- mutex_lock(&slot->crit_sect);
if (slot_no_hotplug(slot)) {
; /* do nothing */
} else if (device_status_valid(get_slot_status(slot))) {
@@ -815,7 +720,6 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
} else {
disable_slot(slot);
}
- mutex_unlock(&slot->crit_sect);
}
}
@@ -855,11 +759,11 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
* ACPI event handlers
*/
-void acpiphp_check_host_bridge(acpi_handle handle)
+void acpiphp_check_host_bridge(struct acpi_device *adev)
{
struct acpiphp_bridge *bridge;
- bridge = acpiphp_handle_to_bridge(handle);
+ bridge = acpiphp_dev_to_bridge(adev);
if (bridge) {
pci_lock_rescan_remove();
@@ -872,73 +776,52 @@ void acpiphp_check_host_bridge(acpi_handle handle)
static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot);
-static void hotplug_event(acpi_handle handle, u32 type, void *data)
+static void hotplug_event(u32 type, struct acpiphp_context *context)
{
- struct acpiphp_context *context = data;
+ acpi_handle handle = context->hp.self->handle;
struct acpiphp_func *func = &context->func;
+ struct acpiphp_slot *slot = func->slot;
struct acpiphp_bridge *bridge;
- char objname[64];
- struct acpi_buffer buffer = { .length = sizeof(objname),
- .pointer = objname };
- mutex_lock(&acpiphp_context_lock);
+ acpi_lock_hp_context();
bridge = context->bridge;
if (bridge)
get_bridge(bridge);
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
pci_lock_rescan_remove();
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
/* bus re-enumerate */
- pr_debug("%s: Bus check notify on %s\n", __func__, objname);
- pr_debug("%s: re-enumerating slots under %s\n",
- __func__, objname);
- if (bridge) {
+ acpi_handle_debug(handle, "Bus check in %s()\n", __func__);
+ if (bridge)
acpiphp_check_bridge(bridge);
- } else {
- struct acpiphp_slot *slot = func->slot;
-
- if (slot->flags & SLOT_IS_GOING_AWAY)
- break;
-
- mutex_lock(&slot->crit_sect);
+ else if (!(slot->flags & SLOT_IS_GOING_AWAY))
enable_slot(slot);
- mutex_unlock(&slot->crit_sect);
- }
+
break;
case ACPI_NOTIFY_DEVICE_CHECK:
/* device check */
- pr_debug("%s: Device check notify on %s\n", __func__, objname);
+ acpi_handle_debug(handle, "Device check in %s()\n", __func__);
if (bridge) {
acpiphp_check_bridge(bridge);
- } else {
- struct acpiphp_slot *slot = func->slot;
- int ret;
-
- if (slot->flags & SLOT_IS_GOING_AWAY)
- break;
-
+ } else if (!(slot->flags & SLOT_IS_GOING_AWAY)) {
/*
* Check if anything has changed in the slot and rescan
* from the parent if that's the case.
*/
- mutex_lock(&slot->crit_sect);
- ret = acpiphp_rescan_slot(slot);
- mutex_unlock(&slot->crit_sect);
- if (ret)
+ if (acpiphp_rescan_slot(slot))
acpiphp_check_bridge(func->parent);
}
break;
case ACPI_NOTIFY_EJECT_REQUEST:
/* request device eject */
- pr_debug("%s: Device eject notify on %s\n", __func__, objname);
- acpiphp_disable_and_eject_slot(func->slot);
+ acpi_handle_debug(handle, "Eject request in %s()\n", __func__);
+ acpiphp_disable_and_eject_slot(slot);
break;
}
@@ -947,106 +830,41 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
put_bridge(bridge);
}
-static void hotplug_event_work(void *data, u32 type)
+static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type)
{
- struct acpiphp_context *context = data;
- acpi_handle handle = context->handle;
-
- acpi_scan_lock_acquire();
+ struct acpiphp_context *context;
- hotplug_event(handle, type, context);
+ context = acpiphp_grab_context(adev);
+ if (!context)
+ return -ENODATA;
- acpi_scan_lock_release();
- acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
- put_bridge(context->func.parent);
+ hotplug_event(type, context);
+ acpiphp_let_context_go(context);
+ return 0;
}
/**
- * handle_hotplug_event - handle ACPI hotplug event
- * @handle: Notify()'ed acpi_handle
- * @type: Notify code
- * @data: pointer to acpiphp_context structure
+ * acpiphp_enumerate_slots - Enumerate PCI slots for a given bus.
+ * @bus: PCI bus to enumerate the slots for.
*
- * Handles ACPI event notification on slots.
- */
-static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
-{
- struct acpiphp_context *context;
- u32 ost_code = ACPI_OST_SC_SUCCESS;
- acpi_status status;
-
- switch (type) {
- case ACPI_NOTIFY_BUS_CHECK:
- case ACPI_NOTIFY_DEVICE_CHECK:
- break;
- case ACPI_NOTIFY_EJECT_REQUEST:
- ost_code = ACPI_OST_SC_EJECT_IN_PROGRESS;
- acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
- break;
-
- case ACPI_NOTIFY_DEVICE_WAKE:
- return;
-
- case ACPI_NOTIFY_FREQUENCY_MISMATCH:
- acpi_handle_err(handle, "Device cannot be configured due "
- "to a frequency mismatch\n");
- goto out;
-
- case ACPI_NOTIFY_BUS_MODE_MISMATCH:
- acpi_handle_err(handle, "Device cannot be configured due "
- "to a bus mode mismatch\n");
- goto out;
-
- case ACPI_NOTIFY_POWER_FAULT:
- acpi_handle_err(handle, "Device has suffered a power fault\n");
- goto out;
-
- default:
- acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
- ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
- goto out;
- }
-
- mutex_lock(&acpiphp_context_lock);
- context = acpiphp_get_context(handle);
- if (!context || WARN_ON(context->handle != handle)
- || context->func.parent->is_going_away)
- goto err_out;
-
- get_bridge(context->func.parent);
- acpiphp_put_context(context);
- status = acpi_hotplug_execute(hotplug_event_work, context, type);
- if (ACPI_SUCCESS(status)) {
- mutex_unlock(&acpiphp_context_lock);
- return;
- }
- put_bridge(context->func.parent);
-
- err_out:
- mutex_unlock(&acpiphp_context_lock);
- ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
-
- out:
- acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
-}
-
-/*
- * Create hotplug slots for the PCI bus.
- * It should always return 0 to avoid skipping following notifiers.
+ * A "slot" is an object associated with a PCI device number. All functions
+ * (PCI devices) with the same bus and device number belong to the same slot.
*/
void acpiphp_enumerate_slots(struct pci_bus *bus)
{
struct acpiphp_bridge *bridge;
+ struct acpi_device *adev;
acpi_handle handle;
acpi_status status;
if (acpiphp_disabled)
return;
- handle = ACPI_HANDLE(bus->bridge);
- if (!handle)
+ adev = ACPI_COMPANION(bus->bridge);
+ if (!adev)
return;
+ handle = adev->handle;
bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
if (!bridge) {
acpi_handle_err(handle, "No memory for bridge object\n");
@@ -1074,10 +892,10 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
* parent is going to be handled by pciehp, in which case this
* bridge is not interesting to us either.
*/
- mutex_lock(&acpiphp_context_lock);
- context = acpiphp_get_context(handle);
+ acpi_lock_hp_context();
+ context = acpiphp_get_context(adev);
if (!context) {
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
put_device(&bus->dev);
pci_dev_put(bridge->pci_dev);
kfree(bridge);
@@ -1087,17 +905,17 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
context->bridge = bridge;
/* Get a reference to the parent bridge. */
get_bridge(context->func.parent);
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
}
- /* must be added to the list prior to calling register_slot */
+ /* Must be added to the list prior to calling acpiphp_add_context(). */
mutex_lock(&bridge_mutex);
list_add(&bridge->list, &bridge_list);
mutex_unlock(&bridge_mutex);
/* register all slot objects under this bridge */
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
- register_slot, NULL, bridge, NULL);
+ acpiphp_add_context, NULL, bridge, NULL);
if (ACPI_FAILURE(status)) {
acpi_handle_err(handle, "failed to register slots\n");
cleanup_bridge(bridge);
@@ -1105,7 +923,10 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
}
}
-/* Destroy hotplug slots associated with the PCI bus */
+/**
+ * acpiphp_remove_slots - Remove slot objects associated with a given bus.
+ * @bus: PCI bus to remove the slot objects for.
+ */
void acpiphp_remove_slots(struct pci_bus *bus)
{
struct acpiphp_bridge *bridge;
@@ -1136,13 +957,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
if (slot->flags & SLOT_IS_GOING_AWAY)
return -ENODEV;
- mutex_lock(&slot->crit_sect);
/* configure all functions */
if (!(slot->flags & SLOT_ENABLED))
enable_slot(slot);
- mutex_unlock(&slot->crit_sect);
-
pci_unlock_rescan_remove();
return 0;
}
@@ -1158,8 +976,6 @@ static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
if (slot->flags & SLOT_IS_GOING_AWAY)
return -ENODEV;
- mutex_lock(&slot->crit_sect);
-
/* unconfigure all functions */
disable_slot(slot);
@@ -1173,7 +989,6 @@ static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
break;
}
- mutex_unlock(&slot->crit_sect);
return 0;
}
@@ -1181,9 +996,15 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot)
{
int ret;
+ /*
+ * Acquire acpi_scan_lock to ensure that the execution of _EJ0 in
+ * acpiphp_disable_and_eject_slot() will be synchronized properly.
+ */
+ acpi_scan_lock_acquire();
pci_lock_rescan_remove();
ret = acpiphp_disable_and_eject_slot(slot);
pci_unlock_rescan_remove();
+ acpi_scan_lock_release();
return ret;
}
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 31273e155e6c..037e2612c5bd 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -920,12 +920,12 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
break;
}
- if (bus_cap & 20) {
+ if (bus_cap & 0x20) {
dbg("bus max supports 66MHz PCI-X\n");
bus->max_bus_speed = PCI_SPEED_66MHz_PCIX;
break;
}
- if (bus_cap & 10) {
+ if (bus_cap & 0x10) {
dbg("bus max supports 66MHz PCI\n");
bus->max_bus_speed = PCI_SPEED_66MHz;
break;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 88b37cad4b35..8a66866b8cf1 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -76,6 +76,7 @@ struct slot {
struct hotplug_slot *hotplug_slot;
struct delayed_work work; /* work for button event */
struct mutex lock;
+ struct mutex hotplug_lock;
struct workqueue_struct *wq;
};
@@ -109,6 +110,8 @@ struct controller {
#define INT_BUTTON_PRESS 7
#define INT_BUTTON_RELEASE 8
#define INT_BUTTON_CANCEL 9
+#define INT_LINK_UP 10
+#define INT_LINK_DOWN 11
#define STATIC_STATE 0
#define BLINKINGON_STATE 1
@@ -132,6 +135,7 @@ u8 pciehp_handle_attention_button(struct slot *p_slot);
u8 pciehp_handle_switch_change(struct slot *p_slot);
u8 pciehp_handle_presence_change(struct slot *p_slot);
u8 pciehp_handle_power_fault(struct slot *p_slot);
+void pciehp_handle_linkstate_change(struct slot *p_slot);
int pciehp_configure_device(struct slot *p_slot);
int pciehp_unconfigure_device(struct slot *p_slot);
void pciehp_queue_pushbutton_work(struct work_struct *work);
@@ -153,6 +157,7 @@ void pciehp_green_led_on(struct slot *slot);
void pciehp_green_led_off(struct slot *slot);
void pciehp_green_led_blink(struct slot *slot);
int pciehp_check_link_status(struct controller *ctrl);
+bool pciehp_check_link_active(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_reset_slot(struct slot *slot, int probe);
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index eddddd447d0d..20fea57d2149 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -112,6 +112,7 @@ static struct pcie_port_service_driver __initdata dummy_driver = {
static int __init select_detection_mode(void)
{
struct dummy_slot *slot, *tmp;
+
if (pcie_port_service_register(&dummy_driver))
return PCIEHP_DETECT_ACPI;
pcie_port_service_unregister(&dummy_driver);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 53b58debc288..0e0a2fff20a3 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -108,6 +108,7 @@ static int init_slot(struct controller *ctrl)
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
goto out;
+
ops->enable_slot = enable_slot;
ops->disable_slot = disable_slot;
ops->get_power_status = get_power_status;
@@ -283,8 +284,11 @@ static int pciehp_probe(struct pcie_device *dev)
slot = ctrl->slot;
pciehp_get_adapter_status(slot, &occupied);
pciehp_get_power_status(slot, &poweron);
- if (occupied && pciehp_force)
+ if (occupied && pciehp_force) {
+ mutex_lock(&slot->hotplug_lock);
pciehp_enable_slot(slot);
+ mutex_unlock(&slot->hotplug_lock);
+ }
/* If empty slot's power status is on, turn power off */
if (!occupied && poweron && POWER_CTRL(ctrl))
pciehp_power_off_slot(slot);
@@ -328,10 +332,12 @@ static int pciehp_resume (struct pcie_device *dev)
/* Check if slot is occupied */
pciehp_get_adapter_status(slot, &status);
+ mutex_lock(&slot->hotplug_lock);
if (status)
pciehp_enable_slot(slot);
else
pciehp_disable_slot(slot);
+ mutex_unlock(&slot->hotplug_lock);
return 0;
}
#endif /* PM */
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 50628487597d..c75e6a678dcc 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -150,6 +150,27 @@ u8 pciehp_handle_power_fault(struct slot *p_slot)
return 1;
}
+void pciehp_handle_linkstate_change(struct slot *p_slot)
+{
+ u32 event_type;
+ struct controller *ctrl = p_slot->ctrl;
+
+ /* Link Status Change */
+ ctrl_dbg(ctrl, "Data Link Layer State change\n");
+
+ if (pciehp_check_link_active(ctrl)) {
+ ctrl_info(ctrl, "slot(%s): Link Up event\n",
+ slot_name(p_slot));
+ event_type = INT_LINK_UP;
+ } else {
+ ctrl_info(ctrl, "slot(%s): Link Down event\n",
+ slot_name(p_slot));
+ event_type = INT_LINK_DOWN;
+ }
+
+ queue_interrupt_event(p_slot, event_type);
+}
+
/* The following routines constitute the bulk of the
hotplug controller logic
*/
@@ -212,7 +233,8 @@ static int board_added(struct slot *p_slot)
if (retval) {
ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
pci_domain_nr(parent), parent->number);
- goto err_exit;
+ if (retval != -EEXIST)
+ goto err_exit;
}
pciehp_green_led_on(p_slot);
@@ -255,6 +277,9 @@ static int remove_board(struct slot *p_slot)
struct power_work_info {
struct slot *p_slot;
struct work_struct work;
+ unsigned int req;
+#define DISABLE_REQ 0
+#define ENABLE_REQ 1
};
/**
@@ -269,30 +294,38 @@ static void pciehp_power_thread(struct work_struct *work)
struct power_work_info *info =
container_of(work, struct power_work_info, work);
struct slot *p_slot = info->p_slot;
+ int ret;
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
- case POWEROFF_STATE:
- mutex_unlock(&p_slot->lock);
+ switch (info->req) {
+ case DISABLE_REQ:
ctrl_dbg(p_slot->ctrl,
"Disabling domain:bus:device=%04x:%02x:00\n",
pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
p_slot->ctrl->pcie->port->subordinate->number);
+ mutex_lock(&p_slot->hotplug_lock);
pciehp_disable_slot(p_slot);
+ mutex_unlock(&p_slot->hotplug_lock);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
- break;
- case POWERON_STATE:
mutex_unlock(&p_slot->lock);
- if (pciehp_enable_slot(p_slot))
+ break;
+ case ENABLE_REQ:
+ ctrl_dbg(p_slot->ctrl,
+ "Enabling domain:bus:device=%04x:%02x:00\n",
+ pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
+ p_slot->ctrl->pcie->port->subordinate->number);
+ mutex_lock(&p_slot->hotplug_lock);
+ ret = pciehp_enable_slot(p_slot);
+ mutex_unlock(&p_slot->hotplug_lock);
+ if (ret)
pciehp_green_led_off(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
+ mutex_unlock(&p_slot->lock);
break;
default:
break;
}
- mutex_unlock(&p_slot->lock);
kfree(info);
}
@@ -315,9 +348,11 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
switch (p_slot->state) {
case BLINKINGOFF_STATE:
p_slot->state = POWEROFF_STATE;
+ info->req = DISABLE_REQ;
break;
case BLINKINGON_STATE:
p_slot->state = POWERON_STATE;
+ info->req = ENABLE_REQ;
break;
default:
kfree(info);
@@ -364,11 +399,10 @@ static void handle_button_press_event(struct slot *p_slot)
*/
ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
- if (p_slot->state == BLINKINGOFF_STATE) {
+ if (p_slot->state == BLINKINGOFF_STATE)
pciehp_green_led_on(p_slot);
- } else {
+ else
pciehp_green_led_off(p_slot);
- }
pciehp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "PCI slot #%s - action canceled "
"due to button press\n", slot_name(p_slot));
@@ -407,14 +441,81 @@ static void handle_surprise_event(struct slot *p_slot)
INIT_WORK(&info->work, pciehp_power_thread);
pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus)
+ if (!getstatus) {
p_slot->state = POWEROFF_STATE;
- else
+ info->req = DISABLE_REQ;
+ } else {
p_slot->state = POWERON_STATE;
+ info->req = ENABLE_REQ;
+ }
queue_work(p_slot->wq, &info->work);
}
+/*
+ * Note: This function must be called with slot->lock held
+ */
+static void handle_link_event(struct slot *p_slot, u32 event)
+{
+ struct controller *ctrl = p_slot->ctrl;
+ struct power_work_info *info;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
+ __func__);
+ return;
+ }
+ info->p_slot = p_slot;
+ info->req = event == INT_LINK_UP ? ENABLE_REQ : DISABLE_REQ;
+ INIT_WORK(&info->work, pciehp_power_thread);
+
+ switch (p_slot->state) {
+ case BLINKINGON_STATE:
+ case BLINKINGOFF_STATE:
+ cancel_delayed_work(&p_slot->work);
+ /* Fall through */
+ case STATIC_STATE:
+ p_slot->state = event == INT_LINK_UP ?
+ POWERON_STATE : POWEROFF_STATE;
+ queue_work(p_slot->wq, &info->work);
+ break;
+ case POWERON_STATE:
+ if (event == INT_LINK_UP) {
+ ctrl_info(ctrl,
+ "Link Up event ignored on slot(%s): already powering on\n",
+ slot_name(p_slot));
+ kfree(info);
+ } else {
+ ctrl_info(ctrl,
+ "Link Down event queued on slot(%s): currently getting powered on\n",
+ slot_name(p_slot));
+ p_slot->state = POWEROFF_STATE;
+ queue_work(p_slot->wq, &info->work);
+ }
+ break;
+ case POWEROFF_STATE:
+ if (event == INT_LINK_UP) {
+ ctrl_info(ctrl,
+ "Link Up event queued on slot(%s): currently getting powered off\n",
+ slot_name(p_slot));
+ p_slot->state = POWERON_STATE;
+ queue_work(p_slot->wq, &info->work);
+ } else {
+ ctrl_info(ctrl,
+ "Link Down event ignored on slot(%s): already powering off\n",
+ slot_name(p_slot));
+ kfree(info);
+ }
+ break;
+ default:
+ ctrl_err(ctrl, "Not a valid state on slot(%s)\n",
+ slot_name(p_slot));
+ kfree(info);
+ break;
+ }
+}
+
static void interrupt_event_handler(struct work_struct *work)
{
struct event_info *info = container_of(work, struct event_info, work);
@@ -433,12 +534,23 @@ static void interrupt_event_handler(struct work_struct *work)
pciehp_green_led_off(p_slot);
break;
case INT_PRESENCE_ON:
- case INT_PRESENCE_OFF:
if (!HP_SUPR_RM(ctrl))
break;
+ ctrl_dbg(ctrl, "Surprise Insertion\n");
+ handle_surprise_event(p_slot);
+ break;
+ case INT_PRESENCE_OFF:
+ /*
+ * Regardless of surprise capability, we need to
+ * definitely remove a card that has been pulled out!
+ */
ctrl_dbg(ctrl, "Surprise Removal\n");
handle_surprise_event(p_slot);
break;
+ case INT_LINK_UP:
+ case INT_LINK_DOWN:
+ handle_link_event(p_slot, info->event_type);
+ break;
default:
break;
}
@@ -447,6 +559,9 @@ static void interrupt_event_handler(struct work_struct *work)
kfree(info);
}
+/*
+ * Note: This function must be called with slot->hotplug_lock held
+ */
int pciehp_enable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
@@ -479,13 +594,15 @@ int pciehp_enable_slot(struct slot *p_slot)
pciehp_get_latch_status(p_slot, &getstatus);
rc = board_added(p_slot);
- if (rc) {
+ if (rc)
pciehp_get_latch_status(p_slot, &getstatus);
- }
+
return rc;
}
-
+/*
+ * Note: This function must be called with slot->hotplug_lock held
+ */
int pciehp_disable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
@@ -494,24 +611,6 @@ int pciehp_disable_slot(struct slot *p_slot)
if (!p_slot->ctrl)
return 1;
- if (!HP_SUPR_RM(p_slot->ctrl)) {
- pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus) {
- ctrl_info(ctrl, "No adapter on slot(%s)\n",
- slot_name(p_slot));
- return -ENODEV;
- }
- }
-
- if (MRL_SENS(p_slot->ctrl)) {
- pciehp_get_latch_status(p_slot, &getstatus);
- if (getstatus) {
- ctrl_info(ctrl, "Latch open on slot(%s)\n",
- slot_name(p_slot));
- return -ENODEV;
- }
- }
-
if (POWER_CTRL(p_slot->ctrl)) {
pciehp_get_power_status(p_slot, &getstatus);
if (!getstatus) {
@@ -536,7 +635,9 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
case STATIC_STATE:
p_slot->state = POWERON_STATE;
mutex_unlock(&p_slot->lock);
+ mutex_lock(&p_slot->hotplug_lock);
retval = pciehp_enable_slot(p_slot);
+ mutex_unlock(&p_slot->hotplug_lock);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 14acfccb7670..d7d058fa19a4 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -206,7 +206,7 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
mutex_unlock(&ctrl->ctrl_lock);
}
-static bool check_link_active(struct controller *ctrl)
+bool pciehp_check_link_active(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 lnk_status;
@@ -225,12 +225,12 @@ static void __pcie_wait_link_active(struct controller *ctrl, bool active)
{
int timeout = 1000;
- if (check_link_active(ctrl) == active)
+ if (pciehp_check_link_active(ctrl) == active)
return;
while (timeout > 0) {
msleep(10);
timeout -= 10;
- if (check_link_active(ctrl) == active)
+ if (pciehp_check_link_active(ctrl) == active)
return;
}
ctrl_dbg(ctrl, "Data Link Layer Link Active not %s in 1000 msec\n",
@@ -242,11 +242,6 @@ static void pcie_wait_link_active(struct controller *ctrl)
__pcie_wait_link_active(ctrl, true);
}
-static void pcie_wait_link_not_active(struct controller *ctrl)
-{
- __pcie_wait_link_active(ctrl, false);
-}
-
static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
{
u32 l;
@@ -332,11 +327,6 @@ static int pciehp_link_enable(struct controller *ctrl)
return __pciehp_link_set(ctrl, true);
}
-static int pciehp_link_disable(struct controller *ctrl)
-{
- return __pciehp_link_set(ctrl, false);
-}
-
void pciehp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
@@ -508,14 +498,6 @@ void pciehp_power_off_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
- /* Disable the link at first */
- pciehp_link_disable(ctrl);
- /* wait the link is down */
- if (ctrl->link_active_reporting)
- pcie_wait_link_not_active(ctrl);
- else
- msleep(1000);
-
pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
@@ -540,7 +522,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
- PCI_EXP_SLTSTA_CC);
+ PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
detected &= ~intr_loc;
intr_loc |= detected;
if (!intr_loc)
@@ -579,6 +561,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
ctrl->power_fault_detected = 1;
pciehp_handle_power_fault(slot);
}
+
+ if (intr_loc & PCI_EXP_SLTSTA_DLLSC)
+ pciehp_handle_linkstate_change(slot);
+
return IRQ_HANDLED;
}
@@ -596,9 +582,17 @@ void pcie_enable_notification(struct controller *ctrl)
* when it is cleared in the interrupt service routine, and
* next power fault detected interrupt was notified again.
*/
- cmd = PCI_EXP_SLTCTL_PDCE;
+
+ /*
+ * Always enable link events: thus link-up and link-down shall
+ * always be treated as hotplug and unplug respectively. Enable
+ * presence detect only if Attention Button is not present.
+ */
+ cmd = PCI_EXP_SLTCTL_DLLSCE;
if (ATTN_BUTTN(ctrl))
cmd |= PCI_EXP_SLTCTL_ABPE;
+ else
+ cmd |= PCI_EXP_SLTCTL_PDCE;
if (MRL_SENS(ctrl))
cmd |= PCI_EXP_SLTCTL_MRLSCE;
if (!pciehp_poll_mode)
@@ -606,7 +600,8 @@ void pcie_enable_notification(struct controller *ctrl)
mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
- PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
+ PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
+ PCI_EXP_SLTCTL_DLLSCE);
pcie_write_cmd(ctrl, cmd, mask);
}
@@ -624,33 +619,38 @@ static void pcie_disable_notification(struct controller *ctrl)
/*
* pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
- * bus reset of the bridge, but if the slot supports surprise removal we need
- * to disable presence detection around the bus reset and clear any spurious
+ * bus reset of the bridge, but at the same time we want to ensure that it is
+ * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
+ * disable link state notification and presence detection change notification
+ * momentarily, if we see that they could interfere. Also, clear any spurious
* events after.
*/
int pciehp_reset_slot(struct slot *slot, int probe)
{
struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 stat_mask = 0, ctrl_mask = 0;
if (probe)
return 0;
- if (HP_SUPR_RM(ctrl)) {
- pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_PDCE);
- if (pciehp_poll_mode)
- del_timer_sync(&ctrl->poll_timer);
+ if (!ATTN_BUTTN(ctrl)) {
+ ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
+ stat_mask |= PCI_EXP_SLTSTA_PDC;
}
+ ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
+ stat_mask |= PCI_EXP_SLTSTA_DLLSC;
+
+ pcie_write_cmd(ctrl, 0, ctrl_mask);
+ if (pciehp_poll_mode)
+ del_timer_sync(&ctrl->poll_timer);
pci_reset_bridge_secondary_bus(ctrl->pcie->port);
- if (HP_SUPR_RM(ctrl)) {
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
- PCI_EXP_SLTSTA_PDC);
- pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE);
- if (pciehp_poll_mode)
- int_poll_timeout(ctrl->poll_timer.data);
- }
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
+ pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask);
+ if (pciehp_poll_mode)
+ int_poll_timeout(ctrl->poll_timer.data);
return 0;
}
@@ -687,6 +687,7 @@ static int pcie_init_slot(struct controller *ctrl)
slot->ctrl = ctrl;
mutex_init(&slot->lock);
+ mutex_init(&slot->hotplug_lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
ctrl->slot = slot;
return 0;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index b07d7cc2d697..1b533060ce65 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -50,7 +50,7 @@ int pciehp_configure_device(struct slot *p_slot)
"at %04x:%02x:00, cannot hot-add\n", pci_name(dev),
pci_domain_nr(parent), parent->number);
pci_dev_put(dev);
- ret = -EINVAL;
+ ret = -EEXIST;
goto out;
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 9dce7c5e2a77..de7a74782f92 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -170,97 +170,6 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
pci_dev_put(dev);
}
-static int sriov_migration(struct pci_dev *dev)
-{
- u16 status;
- struct pci_sriov *iov = dev->sriov;
-
- if (!iov->num_VFs)
- return 0;
-
- if (!(iov->cap & PCI_SRIOV_CAP_VFM))
- return 0;
-
- pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
- if (!(status & PCI_SRIOV_STATUS_VFM))
- return 0;
-
- schedule_work(&iov->mtask);
-
- return 1;
-}
-
-static void sriov_migration_task(struct work_struct *work)
-{
- int i;
- u8 state;
- u16 status;
- struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
-
- for (i = iov->initial_VFs; i < iov->num_VFs; i++) {
- state = readb(iov->mstate + i);
- if (state == PCI_SRIOV_VFM_MI) {
- writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
- state = readb(iov->mstate + i);
- if (state == PCI_SRIOV_VFM_AV)
- virtfn_add(iov->self, i, 1);
- } else if (state == PCI_SRIOV_VFM_MO) {
- virtfn_remove(iov->self, i, 1);
- writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
- state = readb(iov->mstate + i);
- if (state == PCI_SRIOV_VFM_AV)
- virtfn_add(iov->self, i, 0);
- }
- }
-
- pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
- status &= ~PCI_SRIOV_STATUS_VFM;
- pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
-}
-
-static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
-{
- int bir;
- u32 table;
- resource_size_t pa;
- struct pci_sriov *iov = dev->sriov;
-
- if (nr_virtfn <= iov->initial_VFs)
- return 0;
-
- pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
- bir = PCI_SRIOV_VFM_BIR(table);
- if (bir > PCI_STD_RESOURCE_END)
- return -EIO;
-
- table = PCI_SRIOV_VFM_OFFSET(table);
- if (table + nr_virtfn > pci_resource_len(dev, bir))
- return -EIO;
-
- pa = pci_resource_start(dev, bir) + table;
- iov->mstate = ioremap(pa, nr_virtfn);
- if (!iov->mstate)
- return -ENOMEM;
-
- INIT_WORK(&iov->mtask, sriov_migration_task);
-
- iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
-
- return 0;
-}
-
-static void sriov_disable_migration(struct pci_dev *dev)
-{
- struct pci_sriov *iov = dev->sriov;
-
- iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
-
- cancel_work_sync(&iov->mtask);
- iounmap(iov->mstate);
-}
-
static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
{
int rc;
@@ -351,12 +260,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
goto failed;
}
- if (iov->cap & PCI_SRIOV_CAP_VFM) {
- rc = sriov_enable_migration(dev, nr_virtfn);
- if (rc)
- goto failed;
- }
-
kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
iov->num_VFs = nr_virtfn;
@@ -387,9 +290,6 @@ static void sriov_disable(struct pci_dev *dev)
if (!iov->num_VFs)
return;
- if (iov->cap & PCI_SRIOV_CAP_VFM)
- sriov_disable_migration(dev);
-
for (i = 0; i < iov->num_VFs; i++)
virtfn_remove(dev, i, 0);
@@ -688,25 +588,6 @@ void pci_disable_sriov(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_disable_sriov);
/**
- * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
- * @dev: the PCI device
- *
- * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
- *
- * Physical Function driver is responsible to register IRQ handler using
- * VF Migration Interrupt Message Number, and call this function when the
- * interrupt is generated by the hardware.
- */
-irqreturn_t pci_sriov_migration(struct pci_dev *dev)
-{
- if (!dev->is_physfn)
- return IRQ_NONE;
-
- return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
-}
-EXPORT_SYMBOL_GPL(pci_sriov_migration);
-
-/**
* pci_num_vf - return number of VFs associated with a PF device_release_driver
* @dev: the PCI device
*
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 25f0bc659164..d911e0c1f359 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -616,15 +616,11 @@ static int pci_pm_prepare(struct device *dev)
int error = 0;
/*
- * PCI devices suspended at run time need to be resumed at this
- * point, because in general it is necessary to reconfigure them for
- * system suspend. Namely, if the device is supposed to wake up the
- * system from the sleep state, we may need to reconfigure it for this
- * purpose. In turn, if the device is not supposed to wake up the
- * system from the sleep state, we'll have to prevent it from signaling
- * wake-up.
+ * Devices having power.ignore_children set may still be necessary for
+ * suspending their children in the next phase of device suspend.
*/
- pm_runtime_resume(dev);
+ if (dev->power.ignore_children)
+ pm_runtime_resume(dev);
if (drv && drv->pm && drv->pm->prepare)
error = drv->pm->prepare(dev);
@@ -654,6 +650,16 @@ static int pci_pm_suspend(struct device *dev)
goto Fixup;
}
+ /*
+ * PCI devices suspended at run time need to be resumed at this point,
+ * because in general it is necessary to reconfigure them for system
+ * suspend. Namely, if the device is supposed to wake up the system
+ * from the sleep state, we may need to reconfigure it for this purpose.
+ * In turn, if the device is not supposed to wake up the system from the
+ * sleep state, we'll have to prevent it from signaling wake-up.
+ */
+ pm_runtime_resume(dev);
+
pci_dev->state_saved = false;
if (pm->suspend) {
pci_power_t prev = pci_dev->current_state;
@@ -808,6 +814,14 @@ static int pci_pm_freeze(struct device *dev)
return 0;
}
+ /*
+ * This used to be done in pci_pm_prepare() for all devices and some
+ * drivers may depend on it, so do it here. Ideally, runtime-suspended
+ * devices should not be touched during freeze/thaw transitions,
+ * however.
+ */
+ pm_runtime_resume(dev);
+
pci_dev->state_saved = false;
if (pm->freeze) {
int error;
@@ -915,6 +929,9 @@ static int pci_pm_poweroff(struct device *dev)
goto Fixup;
}
+ /* The reason to do that is the same as in pci_pm_suspend(). */
+ pm_runtime_resume(dev);
+
pci_dev->state_saved = false;
if (pm->poweroff) {
int error;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 276ef9c18802..4e0acefb7565 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -351,28 +351,17 @@ static struct device_attribute dev_rescan_attr = __ATTR(rescan,
(S_IWUSR|S_IWGRP),
NULL, dev_rescan_store);
-static void remove_callback(struct device *dev)
-{
- pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
-}
-
static ssize_t
-remove_store(struct device *dev, struct device_attribute *dummy,
+remove_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int ret = 0;
unsigned long val;
if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
- /* An attribute cannot be unregistered by one of its own methods,
- * so we have to use this roundabout approach.
- */
- if (val)
- ret = device_schedule_callback(dev, remove_callback);
- if (ret)
- count = ret;
+ if (val && device_remove_file_self(dev, attr))
+ pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
return count;
}
static struct device_attribute dev_remove_attr = __ATTR(remove,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index fdbc294821e6..7325d43bf030 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -108,12 +108,12 @@ static bool pcie_ari_disabled;
*/
unsigned char pci_bus_max_busnr(struct pci_bus* bus)
{
- struct list_head *tmp;
+ struct pci_bus *tmp;
unsigned char max, n;
max = bus->busn_res.end;
- list_for_each(tmp, &bus->children) {
- n = pci_bus_max_busnr(pci_bus_b(tmp));
+ list_for_each_entry(tmp, &bus->children, node) {
+ n = pci_bus_max_busnr(tmp);
if(n > max)
max = n;
}
@@ -401,33 +401,40 @@ EXPORT_SYMBOL_GPL(pci_find_ht_capability);
* @res: child resource record for which parent is sought
*
* For given resource region of given device, return the resource
- * region of parent bus the given region is contained in or where
- * it should be allocated from.
+ * region of parent bus the given region is contained in.
*/
struct resource *
pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
{
const struct pci_bus *bus = dev->bus;
+ struct resource *r;
int i;
- struct resource *best = NULL, *r;
pci_bus_for_each_resource(bus, r, i) {
if (!r)
continue;
- if (res->start && !(res->start >= r->start && res->end <= r->end))
- continue; /* Not contained */
- if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
- continue; /* Wrong type */
- if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
- return r; /* Exact match */
- /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
- if (r->flags & IORESOURCE_PREFETCH)
- continue;
- /* .. but we can put a prefetchable resource inside a non-prefetchable one */
- if (!best)
- best = r;
+ if (res->start && resource_contains(r, res)) {
+
+ /*
+ * If the window is prefetchable but the BAR is
+ * not, the allocator made a mistake.
+ */
+ if (r->flags & IORESOURCE_PREFETCH &&
+ !(res->flags & IORESOURCE_PREFETCH))
+ return NULL;
+
+ /*
+ * If we're below a transparent bridge, there may
+ * be both a positively-decoded aperture and a
+ * subtractively-decoded region that contain the BAR.
+ * We want the positively-decoded one, so this depends
+ * on pci_bus_for_each_resource() giving us those
+ * first.
+ */
+ return r;
+ }
}
- return best;
+ return NULL;
}
/**
@@ -1178,6 +1185,11 @@ int pci_load_and_free_saved_state(struct pci_dev *dev,
}
EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
+int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
+{
+ return pci_enable_resources(dev, bars);
+}
+
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
@@ -1624,29 +1636,27 @@ static void pci_pme_list_scan(struct work_struct *work)
struct pci_pme_device *pme_dev, *n;
mutex_lock(&pci_pme_list_mutex);
- if (!list_empty(&pci_pme_list)) {
- list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
- if (pme_dev->dev->pme_poll) {
- struct pci_dev *bridge;
-
- bridge = pme_dev->dev->bus->self;
- /*
- * If bridge is in low power state, the
- * configuration space of subordinate devices
- * may be not accessible
- */
- if (bridge && bridge->current_state != PCI_D0)
- continue;
- pci_pme_wakeup(pme_dev->dev, NULL);
- } else {
- list_del(&pme_dev->list);
- kfree(pme_dev);
- }
+ list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
+ if (pme_dev->dev->pme_poll) {
+ struct pci_dev *bridge;
+
+ bridge = pme_dev->dev->bus->self;
+ /*
+ * If bridge is in low power state, the
+ * configuration space of subordinate devices
+ * may be not accessible
+ */
+ if (bridge && bridge->current_state != PCI_D0)
+ continue;
+ pci_pme_wakeup(pme_dev->dev, NULL);
+ } else {
+ list_del(&pme_dev->list);
+ kfree(pme_dev);
}
- if (!list_empty(&pci_pme_list))
- schedule_delayed_work(&pci_pme_work,
- msecs_to_jiffies(PME_TIMEOUT));
}
+ if (!list_empty(&pci_pme_list))
+ schedule_delayed_work(&pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
mutex_unlock(&pci_pme_list_mutex);
}
@@ -2193,21 +2203,18 @@ void pci_request_acs(void)
}
/**
- * pci_enable_acs - enable ACS if hardware support it
+ * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
* @dev: the PCI device
*/
-void pci_enable_acs(struct pci_dev *dev)
+static int pci_std_enable_acs(struct pci_dev *dev)
{
int pos;
u16 cap;
u16 ctrl;
- if (!pci_acs_enable)
- return;
-
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
if (!pos)
- return;
+ return -ENODEV;
pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
@@ -2225,6 +2232,23 @@ void pci_enable_acs(struct pci_dev *dev)
ctrl |= (cap & PCI_ACS_UF);
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+
+ return 0;
+}
+
+/**
+ * pci_enable_acs - enable ACS if hardware support it
+ * @dev: the PCI device
+ */
+void pci_enable_acs(struct pci_dev *dev)
+{
+ if (!pci_acs_enable)
+ return;
+
+ if (!pci_std_enable_acs(dev))
+ return;
+
+ pci_dev_specific_enable_acs(dev);
}
static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
@@ -4250,6 +4274,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
"Rounding up size of resource #%d to %#llx.\n",
i, (unsigned long long)size);
}
+ r->flags |= IORESOURCE_UNSET;
r->end = size - 1;
r->start = 0;
}
@@ -4263,6 +4288,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
r = &dev->resource[i];
if (!(r->flags & IORESOURCE_MEM))
continue;
+ r->flags |= IORESOURCE_UNSET;
r->end = resource_size(r) - 1;
r->start = 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4df38df224f4..6bd082299e31 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,8 +1,6 @@
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
-#include <linux/workqueue.h>
-
#define PCI_CFG_SPACE_SIZE 256
#define PCI_CFG_SPACE_EXP_SIZE 4096
@@ -240,8 +238,6 @@ struct pci_sriov {
struct pci_dev *dev; /* lowest numbered PF */
struct pci_dev *self; /* this PF */
struct mutex lock; /* lock for VF bus */
- struct work_struct mtask; /* VF Migration task */
- u8 __iomem *mstate; /* VF Migration State Array */
};
#ifdef CONFIG_PCI_ATS
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 6e34498ec9f0..ef09f5f2fe6c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -252,6 +252,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
/* Address above 32-bit boundary; disable the BAR */
pci_write_config_dword(dev, pos, 0);
pci_write_config_dword(dev, pos + 4, 0);
+ res->flags |= IORESOURCE_UNSET;
region.start = 0;
region.end = sz64;
bar_disabled = true;
@@ -731,22 +732,6 @@ struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *de
return child;
}
-static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
-{
- struct pci_bus *parent = child->parent;
-
- /* Attempts to fix that up are really dangerous unless
- we're going to re-assign all bus numbers. */
- if (!pcibios_assign_all_busses())
- return;
-
- while (parent->parent && parent->busn_res.end < max) {
- parent->busn_res.end = max;
- pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
- parent = parent->parent;
- }
-}
-
/*
* If it's a bridge, configure it and scan the bus behind it.
* For CardBus bridges, we don't scan behind as the devices will
@@ -782,7 +767,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
/* Check if setup is sensible at all */
if (!pass &&
(primary != bus->number || secondary <= bus->number ||
- secondary > subordinate)) {
+ secondary > subordinate || subordinate > bus->busn_res.end)) {
dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
secondary, subordinate);
broken = 1;
@@ -805,11 +790,10 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
goto out;
/*
- * If we already got to this bus through a different bridge,
- * don't re-add it. This can happen with the i450NX chipset.
- *
- * However, we continue to descend down the hierarchy and
- * scan remaining child buses.
+ * The bus might already exist for two reasons: Either we are
+ * rescanning the bus or the bus is reachable through more than
+ * one bridge. The second case can happen with the i450NX
+ * chipset.
*/
child = pci_find_bus(pci_domain_nr(bus), secondary);
if (!child) {
@@ -822,17 +806,19 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
}
cmax = pci_scan_child_bus(child);
- if (cmax > max)
- max = cmax;
- if (child->busn_res.end > max)
- max = child->busn_res.end;
+ if (cmax > subordinate)
+ dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
+ subordinate, cmax);
+ /* subordinate should equal child->busn_res.end */
+ if (subordinate > max)
+ max = subordinate;
} else {
/*
* We need to assign a number to this bus which we always
* do in the second pass.
*/
if (!pass) {
- if (pcibios_assign_all_busses() || broken)
+ if (pcibios_assign_all_busses() || broken || is_cardbus)
/* Temporarily disable forwarding of the
configuration cycles on all bridges in
this bus segment to avoid possible
@@ -844,19 +830,25 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
goto out;
}
+ if (max >= bus->busn_res.end) {
+ dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n",
+ max, &bus->busn_res);
+ goto out;
+ }
+
/* Clear errors */
pci_write_config_word(dev, PCI_STATUS, 0xffff);
- /* Prevent assigning a bus number that already exists.
- * This can happen when a bridge is hot-plugged, so in
- * this case we only re-scan this bus. */
+ /* The bus will already exist if we are rescanning */
child = pci_find_bus(pci_domain_nr(bus), max+1);
if (!child) {
- child = pci_add_new_bus(bus, dev, ++max);
+ child = pci_add_new_bus(bus, dev, max+1);
if (!child)
goto out;
- pci_bus_insert_busn_res(child, max, 0xff);
+ pci_bus_insert_busn_res(child, max+1,
+ bus->busn_res.end);
}
+ max++;
buses = (buses & 0xff000000)
| ((unsigned int)(child->primary) << 0)
| ((unsigned int)(child->busn_res.start) << 8)
@@ -878,20 +870,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
if (!is_cardbus) {
child->bridge_ctl = bctl;
- /*
- * Adjust subordinate busnr in parent buses.
- * We do this before scanning for children because
- * some devices may not be detected if the bios
- * was lazy.
- */
- pci_fixup_parent_subordinate_busnr(child, max);
- /* Now we can scan all subordinate buses... */
max = pci_scan_child_bus(child);
- /*
- * now fix it up again since we have found
- * the real value of max.
- */
- pci_fixup_parent_subordinate_busnr(child, max);
} else {
/*
* For CardBus bridges, we leave 4 bus numbers
@@ -922,11 +901,15 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
}
}
max += i;
- pci_fixup_parent_subordinate_busnr(child, max);
}
/*
* Set the subordinate bus number to its real value.
*/
+ if (max > bus->busn_res.end) {
+ dev_warn(&dev->dev, "max busn %02x is outside %pR\n",
+ max, &bus->busn_res);
+ max = bus->busn_res.end;
+ }
pci_bus_update_busn_res_end(child, max);
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
}
@@ -1125,10 +1108,10 @@ int pci_setup_device(struct pci_dev *dev)
pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
/*
- * Do the ugly legacy mode stuff here rather than broken chip
- * quirk code. Legacy mode ATA controllers have fixed
- * addresses. These are not always echoed in BAR0-3, and
- * BAR0-3 in a few cases contain junk!
+ * Do the ugly legacy mode stuff here rather than broken chip
+ * quirk code. Legacy mode ATA controllers have fixed
+ * addresses. These are not always echoed in BAR0-3, and
+ * BAR0-3 in a few cases contain junk!
*/
if (class == PCI_CLASS_STORAGE_IDE) {
u8 progif;
@@ -1139,11 +1122,15 @@ int pci_setup_device(struct pci_dev *dev)
res = &dev->resource[0];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
+ res);
region.start = 0x3F6;
region.end = 0x3F6;
res = &dev->resource[1];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
+ res);
}
if ((progif & 4) == 0) {
region.start = 0x170;
@@ -1151,11 +1138,15 @@ int pci_setup_device(struct pci_dev *dev)
res = &dev->resource[2];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
+ res);
region.start = 0x376;
region.end = 0x376;
res = &dev->resource[3];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
+ res);
}
}
break;
@@ -1835,7 +1826,7 @@ int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
res->flags |= IORESOURCE_PCI_FIXED;
}
- conflict = insert_resource_conflict(parent_res, res);
+ conflict = request_resource_conflict(parent_res, res);
if (conflict)
dev_printk(KERN_DEBUG, &b->dev,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 5cb726c193de..e7292065a1b1 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -296,6 +296,7 @@ static void quirk_s3_64M(struct pci_dev *dev)
struct resource *r = &dev->resource[0];
if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
+ r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0x3ffffff;
}
@@ -937,6 +938,8 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C
static void quirk_dunord(struct pci_dev *dev)
{
struct resource *r = &dev->resource [1];
+
+ r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0xffffff;
}
@@ -1740,6 +1743,7 @@ static void quirk_tc86c001_ide(struct pci_dev *dev)
struct resource *r = &dev->resource[0];
if (r->start & 0x8) {
+ r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0xf;
}
@@ -1769,6 +1773,7 @@ static void quirk_plx_pci9050(struct pci_dev *dev)
dev_info(&dev->dev,
"Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
bar);
+ r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0xff;
}
@@ -3423,6 +3428,61 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
#endif
}
+/*
+ * Many Intel PCH root ports do provide ACS-like features to disable peer
+ * transactions and validate bus numbers in requests, but do not provide an
+ * actual PCIe ACS capability. This is the list of device IDs known to fall
+ * into that category as provided by Intel in Red Hat bugzilla 1037684.
+ */
+static const u16 pci_quirk_intel_pch_acs_ids[] = {
+ /* Ibexpeak PCH */
+ 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
+ 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
+ /* Cougarpoint PCH */
+ 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
+ 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
+ /* Pantherpoint PCH */
+ 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
+ 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
+ /* Lynxpoint-H PCH */
+ 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
+ 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
+ /* Lynxpoint-LP PCH */
+ 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
+ 0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
+ /* Wildcat PCH */
+ 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
+ 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
+};
+
+static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
+{
+ int i;
+
+ /* Filter out a few obvious non-matches first */
+ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
+ if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
+ return true;
+
+ return false;
+}
+
+#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
+
+static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
+ INTEL_PCH_ACS_FLAGS : 0;
+
+ if (!pci_quirk_intel_pch_acs_match(dev))
+ return -ENOTTY;
+
+ return acs_flags & ~flags ? 0 : 1;
+}
+
static const struct pci_dev_acs_enabled {
u16 vendor;
u16 device;
@@ -3434,6 +3494,7 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ 0 }
};
@@ -3461,3 +3522,132 @@ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
return -ENOTTY;
}
+
+/* Config space offset of Root Complex Base Address register */
+#define INTEL_LPC_RCBA_REG 0xf0
+/* 31:14 RCBA address */
+#define INTEL_LPC_RCBA_MASK 0xffffc000
+/* RCBA Enable */
+#define INTEL_LPC_RCBA_ENABLE (1 << 0)
+
+/* Backbone Scratch Pad Register */
+#define INTEL_BSPR_REG 0x1104
+/* Backbone Peer Non-Posted Disable */
+#define INTEL_BSPR_REG_BPNPD (1 << 8)
+/* Backbone Peer Posted Disable */
+#define INTEL_BSPR_REG_BPPD (1 << 9)
+
+/* Upstream Peer Decode Configuration Register */
+#define INTEL_UPDCR_REG 0x1114
+/* 5:0 Peer Decode Enable bits */
+#define INTEL_UPDCR_REG_MASK 0x3f
+
+static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
+{
+ u32 rcba, bspr, updcr;
+ void __iomem *rcba_mem;
+
+ /*
+ * Read the RCBA register from the LPC (D31:F0). PCH root ports
+ * are D28:F* and therefore get probed before LPC, thus we can't
+ * use pci_get_slot/pci_read_config_dword here.
+ */
+ pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
+ INTEL_LPC_RCBA_REG, &rcba);
+ if (!(rcba & INTEL_LPC_RCBA_ENABLE))
+ return -EINVAL;
+
+ rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
+ PAGE_ALIGN(INTEL_UPDCR_REG));
+ if (!rcba_mem)
+ return -ENOMEM;
+
+ /*
+ * The BSPR can disallow peer cycles, but it's set by soft strap and
+ * therefore read-only. If both posted and non-posted peer cycles are
+ * disallowed, we're ok. If either are allowed, then we need to use
+ * the UPDCR to disable peer decodes for each port. This provides the
+ * PCIe ACS equivalent of PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF
+ */
+ bspr = readl(rcba_mem + INTEL_BSPR_REG);
+ bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
+ if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
+ updcr = readl(rcba_mem + INTEL_UPDCR_REG);
+ if (updcr & INTEL_UPDCR_REG_MASK) {
+ dev_info(&dev->dev, "Disabling UPDCR peer decodes\n");
+ updcr &= ~INTEL_UPDCR_REG_MASK;
+ writel(updcr, rcba_mem + INTEL_UPDCR_REG);
+ }
+ }
+
+ iounmap(rcba_mem);
+ return 0;
+}
+
+/* Miscellaneous Port Configuration register */
+#define INTEL_MPC_REG 0xd8
+/* MPC: Invalid Receive Bus Number Check Enable */
+#define INTEL_MPC_REG_IRBNCE (1 << 26)
+
+static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
+{
+ u32 mpc;
+
+ /*
+ * When enabled, the IRBNCE bit of the MPC register enables the
+ * equivalent of PCI ACS Source Validation (PCI_ACS_SV), which
+ * ensures that requester IDs fall within the bus number range
+ * of the bridge. Enable if not already.
+ */
+ pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
+ if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
+ dev_info(&dev->dev, "Enabling MPC IRBNCE\n");
+ mpc |= INTEL_MPC_REG_IRBNCE;
+ pci_write_config_word(dev, INTEL_MPC_REG, mpc);
+ }
+}
+
+static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
+{
+ if (!pci_quirk_intel_pch_acs_match(dev))
+ return -ENOTTY;
+
+ if (pci_quirk_enable_intel_lpc_acs(dev)) {
+ dev_warn(&dev->dev, "Failed to enable Intel PCH ACS quirk\n");
+ return 0;
+ }
+
+ pci_quirk_enable_intel_rp_mpc_acs(dev);
+
+ dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
+
+ dev_info(&dev->dev, "Intel PCH root port ACS workaround enabled\n");
+
+ return 0;
+}
+
+static const struct pci_dev_enable_acs {
+ u16 vendor;
+ u16 device;
+ int (*enable_acs)(struct pci_dev *dev);
+} pci_dev_enable_acs[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
+ { 0 }
+};
+
+void pci_dev_specific_enable_acs(struct pci_dev *dev)
+{
+ const struct pci_dev_enable_acs *i;
+ int ret;
+
+ for (i = pci_dev_enable_acs; i->enable_acs; i++) {
+ if ((i->vendor == dev->vendor ||
+ i->vendor == (u16)PCI_ANY_ID) &&
+ (i->device == dev->device ||
+ i->device == (u16)PCI_ANY_ID)) {
+ ret = i->enable_acs(dev);
+ if (ret >= 0)
+ return;
+ }
+ }
+}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 5d595724e5f4..c1839450d4d6 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -197,8 +197,10 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
void pci_cleanup_rom(struct pci_dev *pdev)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
+
if (res->flags & IORESOURCE_ROM_COPY) {
kfree((void*)(unsigned long)res->start);
+ res->flags |= IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_ROM_COPY;
res->start = 0;
res->end = 0;
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 3ff2ac7c14e2..4a1b972efe7f 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -54,14 +54,14 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
{
- struct pci_bus* child;
- struct list_head *tmp;
+ struct pci_bus *child;
+ struct pci_bus *tmp;
if(bus->number == busnr)
return bus;
- list_for_each(tmp, &bus->children) {
- child = pci_do_find_bus(pci_bus_b(tmp), busnr);
+ list_for_each_entry(tmp, &bus->children, node) {
+ child = pci_do_find_bus(tmp, busnr);
if(child)
return child;
}
@@ -111,7 +111,7 @@ pci_find_next_bus(const struct pci_bus *from)
down_read(&pci_bus_sem);
n = from ? from->node.next : pci_root_buses.next;
if (n != &pci_root_buses)
- b = pci_bus_b(n);
+ b = list_entry(n, struct pci_bus, node);
up_read(&pci_bus_sem);
return b;
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 5c060b152ce6..7eed671d5586 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -44,6 +44,9 @@ void pci_update_resource(struct pci_dev *dev, int resno)
if (!res->flags)
return;
+ if (res->flags & IORESOURCE_UNSET)
+ return;
+
/*
* Ignore non-moveable resources. This might be legacy resources for
* which no functional BAR register exists or another important
@@ -101,11 +104,6 @@ void pci_update_resource(struct pci_dev *dev, int resno)
if (disable)
pci_write_config_word(dev, PCI_COMMAND, cmd);
-
- res->flags &= ~IORESOURCE_UNSET;
- dev_dbg(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
- resno, res, (unsigned long long)region.start,
- (unsigned long long)region.end);
}
int pci_claim_resource(struct pci_dev *dev, int resource)
@@ -113,18 +111,23 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
struct resource *res = &dev->resource[resource];
struct resource *root, *conflict;
+ if (res->flags & IORESOURCE_UNSET) {
+ dev_info(&dev->dev, "can't claim BAR %d %pR: no address assigned\n",
+ resource, res);
+ return -EINVAL;
+ }
+
root = pci_find_parent_resource(dev, res);
if (!root) {
- dev_info(&dev->dev, "no compatible bridge window for %pR\n",
- res);
+ dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n",
+ resource, res);
return -EINVAL;
}
conflict = request_resource_conflict(root, res);
if (conflict) {
- dev_info(&dev->dev,
- "address space collision: %pR conflicts with %s %pR\n",
- res, conflict->name, conflict);
+ dev_info(&dev->dev, "can't claim BAR %d %pR: address conflict with %s %pR\n",
+ resource, res, conflict->name, conflict);
return -EBUSY;
}
@@ -263,6 +266,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
resource_size_t align, size;
int ret;
+ res->flags |= IORESOURCE_UNSET;
align = pci_resource_alignment(dev, res);
if (!align) {
dev_info(&dev->dev, "BAR %d: can't assign %pR "
@@ -282,6 +286,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
ret = pci_revert_fw_address(res, dev, resno, size);
if (!ret) {
+ res->flags &= ~IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_STARTALIGN;
dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
if (resno < PCI_BRIDGE_RESOURCES)
@@ -297,6 +302,7 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
resource_size_t new_size;
int ret;
+ res->flags |= IORESOURCE_UNSET;
if (!res->parent) {
dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR "
"\n", resno, res);
@@ -307,6 +313,7 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
new_size = resource_size(res) + addsize;
ret = _pci_assign_resource(dev, resno, new_size, min_align);
if (!ret) {
+ res->flags &= ~IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_STARTALIGN;
dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res);
if (resno < PCI_BRIDGE_RESOURCES)
@@ -336,9 +343,15 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
(!(r->flags & IORESOURCE_ROM_ENABLE)))
continue;
+ if (r->flags & IORESOURCE_UNSET) {
+ dev_err(&dev->dev, "can't enable device: BAR %d %pR not assigned\n",
+ i, r);
+ return -EINVAL;
+ }
+
if (!r->parent) {
- dev_err(&dev->dev, "device not available "
- "(can't reserve %pR)\n", r);
+ dev_err(&dev->dev, "can't enable device: BAR %d %pR not claimed\n",
+ i, r);
return -EINVAL;
}
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 7dd62fa9d0bd..396c200b9ddb 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -116,11 +116,11 @@ static void pci_slot_release(struct kobject *kobj)
}
static struct pci_slot_attribute pci_slot_attr_address =
- __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL);
+ __ATTR(address, S_IRUGO, address_read_file, NULL);
static struct pci_slot_attribute pci_slot_attr_max_speed =
- __ATTR(max_bus_speed, (S_IFREG | S_IRUGO), max_speed_read_file, NULL);
+ __ATTR(max_bus_speed, S_IRUGO, max_speed_read_file, NULL);
static struct pci_slot_attribute pci_slot_attr_cur_speed =
- __ATTR(cur_bus_speed, (S_IFREG | S_IRUGO), cur_speed_read_file, NULL);
+ __ATTR(cur_bus_speed, S_IRUGO, cur_speed_read_file, NULL);
static struct attribute *pci_slot_default_attrs[] = {
&pci_slot_attr_address.attr,
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index 6eecd7cddf57..54d3089d157b 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -125,9 +125,6 @@ sa1100_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
if (freqs->new < freqs->old)
sa1100_pcmcia_set_mecr(skt, freqs->new);
break;
- case CPUFREQ_RESUMECHANGE:
- sa1100_pcmcia_set_mecr(skt, freqs->new);
- break;
}
return 0;
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 8485761e76af..946f90ef6020 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1076,7 +1076,7 @@ static void yenta_config_init(struct yenta_socket *socket)
*/
static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
{
- struct list_head *tmp;
+ struct pci_bus *sibling;
unsigned char upper_limit;
/*
* We only check and fix the parent bridge: All systems which need
@@ -1095,18 +1095,18 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
/* stay within the limits of the bus range of the parent: */
upper_limit = bridge_to_fix->parent->busn_res.end;
- /* check the bus ranges of all silbling bridges to prevent overlap */
- list_for_each(tmp, &bridge_to_fix->parent->children) {
- struct pci_bus *silbling = pci_bus_b(tmp);
+ /* check the bus ranges of all sibling bridges to prevent overlap */
+ list_for_each_entry(sibling, &bridge_to_fix->parent->children,
+ node) {
/*
- * If the silbling has a higher secondary bus number
+ * If the sibling has a higher secondary bus number
* and it's secondary is equal or smaller than our
* current upper limit, set the new upper limit to
- * the bus number below the silbling's range:
+ * the bus number below the sibling's range:
*/
- if (silbling->busn_res.start > bridge_to_fix->busn_res.end
- && silbling->busn_res.start <= upper_limit)
- upper_limit = silbling->busn_res.start - 1;
+ if (sibling->busn_res.start > bridge_to_fix->busn_res.end
+ && sibling->busn_res.start <= upper_limit)
+ upper_limit = sibling->busn_res.start - 1;
}
/* Show that the wanted subordinate number is not possible: */
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index c7a551c2d5f1..3bb05f17b9b4 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -16,30 +16,56 @@ config GENERIC_PHY
framework should select this config.
config PHY_EXYNOS_MIPI_VIDEO
- depends on HAS_IOMEM
tristate "S5P/EXYNOS SoC series MIPI CSI-2/DSI PHY driver"
+ depends on HAS_IOMEM
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ select GENERIC_PHY
+ default y if ARCH_S5PV210 || ARCH_EXYNOS
help
Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P
and EXYNOS SoCs.
config PHY_MVEBU_SATA
def_bool y
- depends on ARCH_KIRKWOOD || ARCH_DOVE
+ depends on ARCH_KIRKWOOD || ARCH_DOVE || MACH_DOVE || MACH_KIRKWOOD
depends on OF
select GENERIC_PHY
+config OMAP_CONTROL_PHY
+ tristate "OMAP CONTROL PHY Driver"
+ help
+ Enable this to add support for the PHY part present in the control
+ module. This driver has API to power on the USB2 PHY and to write to
+ the mailbox. The mailbox is present only in omap4 and the register to
+ power on the USB2 PHY is present in OMAP4 and OMAP5. OMAP5 has an
+ additional register to power on USB3 PHY/SATA PHY/PCIE PHY
+ (PIPE3 PHY).
+
config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
depends on ARCH_OMAP2PLUS
depends on USB_PHY
select GENERIC_PHY
- select OMAP_CONTROL_USB
+ select OMAP_CONTROL_PHY
+ depends on OMAP_OCP2SCP
help
Enable this to support the transceiver that is part of SOC. This
driver takes care of all the PHY functionality apart from comparator.
The USB OTG controller communicates with the comparator using this
driver.
+config TI_PIPE3
+ tristate "TI PIPE3 PHY Driver"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ select GENERIC_PHY
+ select OMAP_CONTROL_PHY
+ depends on OMAP_OCP2SCP
+ help
+ Enable this to support the PIPE3 PHY that is part of TI SOCs. This
+ driver takes care of all the PHY functionality apart from comparator.
+ This driver interacts with the "OMAP Control PHY Driver" to power
+ on/off the PHY.
+
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
@@ -54,6 +80,8 @@ config TWL4030_USB
config PHY_EXYNOS_DP_VIDEO
tristate "EXYNOS SoC series Display Port PHY driver"
depends on OF
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ default ARCH_EXYNOS
select GENERIC_PHY
help
Support for Display Port PHY found on Samsung EXYNOS SoCs.
@@ -65,4 +93,77 @@ config BCM_KONA_USB2_PHY
help
Enable this to support the Broadcom Kona USB 2.0 PHY.
+config PHY_EXYNOS5250_SATA
+ tristate "Exynos5250 Sata SerDes/PHY driver"
+ depends on SOC_EXYNOS5250
+ depends on HAS_IOMEM
+ depends on OF
+ select GENERIC_PHY
+ select I2C
+ select I2C_S3C2410
+ select MFD_SYSCON
+ help
+ Enable this to support SATA SerDes/Phy found on Samsung's
+ Exynos5250 based SoCs.This SerDes/Phy supports SATA 1.5 Gb/s,
+ SATA 3.0 Gb/s, SATA 6.0 Gb/s speeds. It supports one SATA host
+ port to accept one SATA device.
+
+config PHY_SUN4I_USB
+ tristate "Allwinner sunxi SoC USB PHY driver"
+ depends on ARCH_SUNXI && HAS_IOMEM && OF
+ select GENERIC_PHY
+ help
+ Enable this to support the transceiver that is part of Allwinner
+ sunxi SoCs.
+
+ This driver controls the entire USB PHY block, both the USB OTG
+ parts, as well as the 2 regular USB 2 host PHYs.
+
+config PHY_SAMSUNG_USB2
+ tristate "Samsung USB 2.0 PHY driver"
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support the Samsung USB 2.0 PHY driver for Samsung
+ SoCs. This driver provides the interface for USB 2.0 PHY. Support for
+ particular SoCs has to be enabled in addition to this driver. Number
+ and type of supported phys depends on the SoC.
+
+config PHY_EXYNOS4210_USB2
+ bool "Support for Exynos 4210"
+ depends on PHY_SAMSUNG_USB2
+ depends on CPU_EXYNOS4210
+ help
+ Enable USB PHY support for Exynos 4210. This option requires that
+ Samsung USB 2.0 PHY driver is enabled and means that support for this
+ particular SoC is compiled in the driver. In case of Exynos 4210 four
+ phys are available - device, host, HSIC0 and HSIC1.
+
+config PHY_EXYNOS4X12_USB2
+ bool "Support for Exynos 4x12"
+ depends on PHY_SAMSUNG_USB2
+ depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412)
+ help
+ Enable USB PHY support for Exynos 4x12. This option requires that
+ Samsung USB 2.0 PHY driver is enabled and means that support for this
+ particular SoC is compiled in the driver. In case of Exynos 4x12 four
+ phys are available - device, host, HSIC0 and HSIC1.
+
+config PHY_EXYNOS5250_USB2
+ bool "Support for Exynos 5250"
+ depends on PHY_SAMSUNG_USB2
+ depends on SOC_EXYNOS5250
+ help
+ Enable USB PHY support for Exynos 5250. This option requires that
+ Samsung USB 2.0 PHY driver is enabled and means that support for this
+ particular SoC is compiled in the driver. In case of Exynos 5250 four
+ phys are available - device, host, HSIC0 and HSIC.
+
+config PHY_XGENE
+ tristate "APM X-Gene 15Gbps PHY support"
+ depends on HAS_IOMEM && OF && (ARM64 || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ This option enables support for APM X-Gene SoC multi-purpose PHY.
+
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index b57c25371cca..2faf78edc864 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -7,5 +7,14 @@ obj-$(CONFIG_BCM_KONA_USB2_PHY) += phy-bcm-kona-usb2.o
obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o
obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o
obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
+obj-$(CONFIG_OMAP_CONTROL_PHY) += phy-omap-control.o
obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o
+obj-$(CONFIG_TI_PIPE3) += phy-ti-pipe3.o
obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
+obj-$(CONFIG_PHY_EXYNOS5250_SATA) += phy-exynos5250-sata.o
+obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o
+obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-samsung-usb2.o
+obj-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o
+obj-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o
+obj-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o
+obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
diff --git a/drivers/phy/phy-bcm-kona-usb2.c b/drivers/phy/phy-bcm-kona-usb2.c
index efc5c1a13a5d..e94f5a6a5645 100644
--- a/drivers/phy/phy-bcm-kona-usb2.c
+++ b/drivers/phy/phy-bcm-kona-usb2.c
@@ -128,10 +128,8 @@ static int bcm_kona_usb2_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev,
of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
- return 0;
+ return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id bcm_kona_usb2_dt_ids[] = {
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 6c738376daff..623b71c54b3e 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -274,8 +274,8 @@ int phy_power_off(struct phy *phy)
EXPORT_SYMBOL_GPL(phy_power_off);
/**
- * of_phy_get() - lookup and obtain a reference to a phy by phandle
- * @dev: device that requests this phy
+ * _of_phy_get() - lookup and obtain a reference to a phy by phandle
+ * @np: device_node for which to get the phy
* @index: the index of the phy
*
* Returns the phy associated with the given phandle value,
@@ -284,20 +284,17 @@ EXPORT_SYMBOL_GPL(phy_power_off);
* not yet loaded. This function uses of_xlate call back function provided
* while registering the phy_provider to find the phy instance.
*/
-static struct phy *of_phy_get(struct device *dev, int index)
+static struct phy *_of_phy_get(struct device_node *np, int index)
{
int ret;
struct phy_provider *phy_provider;
struct phy *phy = NULL;
struct of_phandle_args args;
- ret = of_parse_phandle_with_args(dev->of_node, "phys", "#phy-cells",
+ ret = of_parse_phandle_with_args(np, "phys", "#phy-cells",
index, &args);
- if (ret) {
- dev_dbg(dev, "failed to get phy in %s node\n",
- dev->of_node->full_name);
+ if (ret)
return ERR_PTR(-ENODEV);
- }
mutex_lock(&phy_provider_mutex);
phy_provider = of_phy_provider_lookup(args.np);
@@ -317,6 +314,36 @@ err0:
}
/**
+ * of_phy_get() - lookup and obtain a reference to a phy using a device_node.
+ * @np: device_node for which to get the phy
+ * @con_id: name of the phy from device's point of view
+ *
+ * Returns the phy driver, after getting a refcount to it; or
+ * -ENODEV if there is no such phy. The caller is responsible for
+ * calling phy_put() to release that count.
+ */
+struct phy *of_phy_get(struct device_node *np, const char *con_id)
+{
+ struct phy *phy = NULL;
+ int index = 0;
+
+ if (con_id)
+ index = of_property_match_string(np, "phy-names", con_id);
+
+ phy = _of_phy_get(np, index);
+ if (IS_ERR(phy))
+ return phy;
+
+ if (!try_module_get(phy->ops->owner))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ get_device(&phy->dev);
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(of_phy_get);
+
+/**
* phy_put() - release the PHY
* @phy: the phy returned by phy_get()
*
@@ -407,7 +434,7 @@ struct phy *phy_get(struct device *dev, const char *string)
if (dev->of_node) {
index = of_property_match_string(dev->of_node, "phy-names",
string);
- phy = of_phy_get(dev, index);
+ phy = _of_phy_get(dev->of_node, index);
} else {
phy = phy_lookup(dev, string);
}
@@ -499,6 +526,37 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string)
EXPORT_SYMBOL_GPL(devm_phy_optional_get);
/**
+ * devm_of_phy_get() - lookup and obtain a reference to a phy.
+ * @dev: device that requests this phy
+ * @np: node containing the phy
+ * @con_id: name of the phy from device's point of view
+ *
+ * Gets the phy using of_phy_get(), and associates a device with it using
+ * devres. On driver detach, release function is invoked on the devres data,
+ * then, devres data is freed.
+ */
+struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
+ const char *con_id)
+{
+ struct phy **ptr, *phy;
+
+ ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ phy = of_phy_get(np, con_id);
+ if (!IS_ERR(phy)) {
+ *ptr = phy;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_of_phy_get);
+
+/**
* phy_create() - create a new phy
* @dev: device that is creating the new phy
* @ops: function pointers for performing phy operations
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c
new file mode 100644
index 000000000000..236a52ad94eb
--- /dev/null
+++ b/drivers/phy/phy-exynos4210-usb2.c
@@ -0,0 +1,261 @@
+/*
+ * Samsung SoC USB 1.1/2.0 PHY driver - Exynos 4210 support
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include "phy-samsung-usb2.h"
+
+/* Exynos USB PHY registers */
+
+/* PHY power control */
+#define EXYNOS_4210_UPHYPWR 0x0
+
+#define EXYNOS_4210_UPHYPWR_PHY0_SUSPEND BIT(0)
+#define EXYNOS_4210_UPHYPWR_PHY0_PWR BIT(3)
+#define EXYNOS_4210_UPHYPWR_PHY0_OTG_PWR BIT(4)
+#define EXYNOS_4210_UPHYPWR_PHY0_SLEEP BIT(5)
+#define EXYNOS_4210_UPHYPWR_PHY0 ( \
+ EXYNOS_4210_UPHYPWR_PHY0_SUSPEND | \
+ EXYNOS_4210_UPHYPWR_PHY0_PWR | \
+ EXYNOS_4210_UPHYPWR_PHY0_OTG_PWR | \
+ EXYNOS_4210_UPHYPWR_PHY0_SLEEP)
+
+#define EXYNOS_4210_UPHYPWR_PHY1_SUSPEND BIT(6)
+#define EXYNOS_4210_UPHYPWR_PHY1_PWR BIT(7)
+#define EXYNOS_4210_UPHYPWR_PHY1_SLEEP BIT(8)
+#define EXYNOS_4210_UPHYPWR_PHY1 ( \
+ EXYNOS_4210_UPHYPWR_PHY1_SUSPEND | \
+ EXYNOS_4210_UPHYPWR_PHY1_PWR | \
+ EXYNOS_4210_UPHYPWR_PHY1_SLEEP)
+
+#define EXYNOS_4210_UPHYPWR_HSIC0_SUSPEND BIT(9)
+#define EXYNOS_4210_UPHYPWR_HSIC0_SLEEP BIT(10)
+#define EXYNOS_4210_UPHYPWR_HSIC0 ( \
+ EXYNOS_4210_UPHYPWR_HSIC0_SUSPEND | \
+ EXYNOS_4210_UPHYPWR_HSIC0_SLEEP)
+
+#define EXYNOS_4210_UPHYPWR_HSIC1_SUSPEND BIT(11)
+#define EXYNOS_4210_UPHYPWR_HSIC1_SLEEP BIT(12)
+#define EXYNOS_4210_UPHYPWR_HSIC1 ( \
+ EXYNOS_4210_UPHYPWR_HSIC1_SUSPEND | \
+ EXYNOS_4210_UPHYPWR_HSIC1_SLEEP)
+
+/* PHY clock control */
+#define EXYNOS_4210_UPHYCLK 0x4
+
+#define EXYNOS_4210_UPHYCLK_PHYFSEL_MASK (0x3 << 0)
+#define EXYNOS_4210_UPHYCLK_PHYFSEL_OFFSET 0
+#define EXYNOS_4210_UPHYCLK_PHYFSEL_48MHZ (0x0 << 0)
+#define EXYNOS_4210_UPHYCLK_PHYFSEL_24MHZ (0x3 << 0)
+#define EXYNOS_4210_UPHYCLK_PHYFSEL_12MHZ (0x2 << 0)
+
+#define EXYNOS_4210_UPHYCLK_PHY0_ID_PULLUP BIT(2)
+#define EXYNOS_4210_UPHYCLK_PHY0_COMMON_ON BIT(4)
+#define EXYNOS_4210_UPHYCLK_PHY1_COMMON_ON BIT(7)
+
+/* PHY reset control */
+#define EXYNOS_4210_UPHYRST 0x8
+
+#define EXYNOS_4210_URSTCON_PHY0 BIT(0)
+#define EXYNOS_4210_URSTCON_OTG_HLINK BIT(1)
+#define EXYNOS_4210_URSTCON_OTG_PHYLINK BIT(2)
+#define EXYNOS_4210_URSTCON_PHY1_ALL BIT(3)
+#define EXYNOS_4210_URSTCON_PHY1_P0 BIT(4)
+#define EXYNOS_4210_URSTCON_PHY1_P1P2 BIT(5)
+#define EXYNOS_4210_URSTCON_HOST_LINK_ALL BIT(6)
+#define EXYNOS_4210_URSTCON_HOST_LINK_P0 BIT(7)
+#define EXYNOS_4210_URSTCON_HOST_LINK_P1 BIT(8)
+#define EXYNOS_4210_URSTCON_HOST_LINK_P2 BIT(9)
+
+/* Isolation, configured in the power management unit */
+#define EXYNOS_4210_USB_ISOL_DEVICE_OFFSET 0x704
+#define EXYNOS_4210_USB_ISOL_DEVICE BIT(0)
+#define EXYNOS_4210_USB_ISOL_HOST_OFFSET 0x708
+#define EXYNOS_4210_USB_ISOL_HOST BIT(0)
+
+/* USBYPHY1 Floating prevention */
+#define EXYNOS_4210_UPHY1CON 0x34
+#define EXYNOS_4210_UPHY1CON_FLOAT_PREVENTION 0x1
+
+/* Mode switching SUB Device <-> Host */
+#define EXYNOS_4210_MODE_SWITCH_OFFSET 0x21c
+#define EXYNOS_4210_MODE_SWITCH_MASK 1
+#define EXYNOS_4210_MODE_SWITCH_DEVICE 0
+#define EXYNOS_4210_MODE_SWITCH_HOST 1
+
+enum exynos4210_phy_id {
+ EXYNOS4210_DEVICE,
+ EXYNOS4210_HOST,
+ EXYNOS4210_HSIC0,
+ EXYNOS4210_HSIC1,
+ EXYNOS4210_NUM_PHYS,
+};
+
+/*
+ * exynos4210_rate_to_clk() converts the supplied clock rate to the value that
+ * can be written to the phy register.
+ */
+static int exynos4210_rate_to_clk(unsigned long rate, u32 *reg)
+{
+ switch (rate) {
+ case 12 * MHZ:
+ *reg = EXYNOS_4210_UPHYCLK_PHYFSEL_12MHZ;
+ break;
+ case 24 * MHZ:
+ *reg = EXYNOS_4210_UPHYCLK_PHYFSEL_24MHZ;
+ break;
+ case 48 * MHZ:
+ *reg = EXYNOS_4210_UPHYCLK_PHYFSEL_48MHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void exynos4210_isol(struct samsung_usb2_phy_instance *inst, bool on)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 offset;
+ u32 mask;
+
+ switch (inst->cfg->id) {
+ case EXYNOS4210_DEVICE:
+ offset = EXYNOS_4210_USB_ISOL_DEVICE_OFFSET;
+ mask = EXYNOS_4210_USB_ISOL_DEVICE;
+ break;
+ case EXYNOS4210_HOST:
+ offset = EXYNOS_4210_USB_ISOL_HOST_OFFSET;
+ mask = EXYNOS_4210_USB_ISOL_HOST;
+ break;
+ default:
+ return;
+ };
+
+ regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
+}
+
+static void exynos4210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 rstbits = 0;
+ u32 phypwr = 0;
+ u32 rst;
+ u32 pwr;
+ u32 clk;
+
+ switch (inst->cfg->id) {
+ case EXYNOS4210_DEVICE:
+ phypwr = EXYNOS_4210_UPHYPWR_PHY0;
+ rstbits = EXYNOS_4210_URSTCON_PHY0;
+ break;
+ case EXYNOS4210_HOST:
+ phypwr = EXYNOS_4210_UPHYPWR_PHY1;
+ rstbits = EXYNOS_4210_URSTCON_PHY1_ALL |
+ EXYNOS_4210_URSTCON_PHY1_P0 |
+ EXYNOS_4210_URSTCON_PHY1_P1P2 |
+ EXYNOS_4210_URSTCON_HOST_LINK_ALL |
+ EXYNOS_4210_URSTCON_HOST_LINK_P0;
+ writel(on, drv->reg_phy + EXYNOS_4210_UPHY1CON);
+ break;
+ case EXYNOS4210_HSIC0:
+ phypwr = EXYNOS_4210_UPHYPWR_HSIC0;
+ rstbits = EXYNOS_4210_URSTCON_PHY1_P1P2 |
+ EXYNOS_4210_URSTCON_HOST_LINK_P1;
+ break;
+ case EXYNOS4210_HSIC1:
+ phypwr = EXYNOS_4210_UPHYPWR_HSIC1;
+ rstbits = EXYNOS_4210_URSTCON_PHY1_P1P2 |
+ EXYNOS_4210_URSTCON_HOST_LINK_P2;
+ break;
+ };
+
+ if (on) {
+ clk = readl(drv->reg_phy + EXYNOS_4210_UPHYCLK);
+ clk &= ~EXYNOS_4210_UPHYCLK_PHYFSEL_MASK;
+ clk |= drv->ref_reg_val << EXYNOS_4210_UPHYCLK_PHYFSEL_OFFSET;
+ writel(clk, drv->reg_phy + EXYNOS_4210_UPHYCLK);
+
+ pwr = readl(drv->reg_phy + EXYNOS_4210_UPHYPWR);
+ pwr &= ~phypwr;
+ writel(pwr, drv->reg_phy + EXYNOS_4210_UPHYPWR);
+
+ rst = readl(drv->reg_phy + EXYNOS_4210_UPHYRST);
+ rst |= rstbits;
+ writel(rst, drv->reg_phy + EXYNOS_4210_UPHYRST);
+ udelay(10);
+ rst &= ~rstbits;
+ writel(rst, drv->reg_phy + EXYNOS_4210_UPHYRST);
+ /* The following delay is necessary for the reset sequence to be
+ * completed */
+ udelay(80);
+ } else {
+ pwr = readl(drv->reg_phy + EXYNOS_4210_UPHYPWR);
+ pwr |= phypwr;
+ writel(pwr, drv->reg_phy + EXYNOS_4210_UPHYPWR);
+ }
+}
+
+static int exynos4210_power_on(struct samsung_usb2_phy_instance *inst)
+{
+ /* Order of initialisation is important - first power then isolation */
+ exynos4210_phy_pwr(inst, 1);
+ exynos4210_isol(inst, 0);
+
+ return 0;
+}
+
+static int exynos4210_power_off(struct samsung_usb2_phy_instance *inst)
+{
+ exynos4210_isol(inst, 1);
+ exynos4210_phy_pwr(inst, 0);
+
+ return 0;
+}
+
+
+static const struct samsung_usb2_common_phy exynos4210_phys[] = {
+ {
+ .label = "device",
+ .id = EXYNOS4210_DEVICE,
+ .power_on = exynos4210_power_on,
+ .power_off = exynos4210_power_off,
+ },
+ {
+ .label = "host",
+ .id = EXYNOS4210_HOST,
+ .power_on = exynos4210_power_on,
+ .power_off = exynos4210_power_off,
+ },
+ {
+ .label = "hsic0",
+ .id = EXYNOS4210_HSIC0,
+ .power_on = exynos4210_power_on,
+ .power_off = exynos4210_power_off,
+ },
+ {
+ .label = "hsic1",
+ .id = EXYNOS4210_HSIC1,
+ .power_on = exynos4210_power_on,
+ .power_off = exynos4210_power_off,
+ },
+ {},
+};
+
+const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = {
+ .has_mode_switch = 0,
+ .num_phys = EXYNOS4210_NUM_PHYS,
+ .phys = exynos4210_phys,
+ .rate_to_clk = exynos4210_rate_to_clk,
+};
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c
new file mode 100644
index 000000000000..d92a7cc5698a
--- /dev/null
+++ b/drivers/phy/phy-exynos4x12-usb2.c
@@ -0,0 +1,328 @@
+/*
+ * Samsung SoC USB 1.1/2.0 PHY driver - Exynos 4x12 support
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include "phy-samsung-usb2.h"
+
+/* Exynos USB PHY registers */
+
+/* PHY power control */
+#define EXYNOS_4x12_UPHYPWR 0x0
+
+#define EXYNOS_4x12_UPHYPWR_PHY0_SUSPEND BIT(0)
+#define EXYNOS_4x12_UPHYPWR_PHY0_PWR BIT(3)
+#define EXYNOS_4x12_UPHYPWR_PHY0_OTG_PWR BIT(4)
+#define EXYNOS_4x12_UPHYPWR_PHY0_SLEEP BIT(5)
+#define EXYNOS_4x12_UPHYPWR_PHY0 ( \
+ EXYNOS_4x12_UPHYPWR_PHY0_SUSPEND | \
+ EXYNOS_4x12_UPHYPWR_PHY0_PWR | \
+ EXYNOS_4x12_UPHYPWR_PHY0_OTG_PWR | \
+ EXYNOS_4x12_UPHYPWR_PHY0_SLEEP)
+
+#define EXYNOS_4x12_UPHYPWR_PHY1_SUSPEND BIT(6)
+#define EXYNOS_4x12_UPHYPWR_PHY1_PWR BIT(7)
+#define EXYNOS_4x12_UPHYPWR_PHY1_SLEEP BIT(8)
+#define EXYNOS_4x12_UPHYPWR_PHY1 ( \
+ EXYNOS_4x12_UPHYPWR_PHY1_SUSPEND | \
+ EXYNOS_4x12_UPHYPWR_PHY1_PWR | \
+ EXYNOS_4x12_UPHYPWR_PHY1_SLEEP)
+
+#define EXYNOS_4x12_UPHYPWR_HSIC0_SUSPEND BIT(9)
+#define EXYNOS_4x12_UPHYPWR_HSIC0_PWR BIT(10)
+#define EXYNOS_4x12_UPHYPWR_HSIC0_SLEEP BIT(11)
+#define EXYNOS_4x12_UPHYPWR_HSIC0 ( \
+ EXYNOS_4x12_UPHYPWR_HSIC0_SUSPEND | \
+ EXYNOS_4x12_UPHYPWR_HSIC0_PWR | \
+ EXYNOS_4x12_UPHYPWR_HSIC0_SLEEP)
+
+#define EXYNOS_4x12_UPHYPWR_HSIC1_SUSPEND BIT(12)
+#define EXYNOS_4x12_UPHYPWR_HSIC1_PWR BIT(13)
+#define EXYNOS_4x12_UPHYPWR_HSIC1_SLEEP BIT(14)
+#define EXYNOS_4x12_UPHYPWR_HSIC1 ( \
+ EXYNOS_4x12_UPHYPWR_HSIC1_SUSPEND | \
+ EXYNOS_4x12_UPHYPWR_HSIC1_PWR | \
+ EXYNOS_4x12_UPHYPWR_HSIC1_SLEEP)
+
+/* PHY clock control */
+#define EXYNOS_4x12_UPHYCLK 0x4
+
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_MASK (0x7 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_OFFSET 0
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_9MHZ6 (0x0 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_10MHZ (0x1 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_12MHZ (0x2 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_19MHZ2 (0x3 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_20MHZ (0x4 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_24MHZ (0x5 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_50MHZ (0x7 << 0)
+
+#define EXYNOS_4x12_UPHYCLK_PHY0_ID_PULLUP BIT(3)
+#define EXYNOS_4x12_UPHYCLK_PHY0_COMMON_ON BIT(4)
+#define EXYNOS_4x12_UPHYCLK_PHY1_COMMON_ON BIT(7)
+
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_MASK (0x7f << 10)
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_OFFSET 10
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_12MHZ (0x24 << 10)
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_15MHZ (0x1c << 10)
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_16MHZ (0x1a << 10)
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_19MHZ2 (0x15 << 10)
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_20MHZ (0x14 << 10)
+
+/* PHY reset control */
+#define EXYNOS_4x12_UPHYRST 0x8
+
+#define EXYNOS_4x12_URSTCON_PHY0 BIT(0)
+#define EXYNOS_4x12_URSTCON_OTG_HLINK BIT(1)
+#define EXYNOS_4x12_URSTCON_OTG_PHYLINK BIT(2)
+#define EXYNOS_4x12_URSTCON_HOST_PHY BIT(3)
+#define EXYNOS_4x12_URSTCON_PHY1 BIT(4)
+#define EXYNOS_4x12_URSTCON_HSIC0 BIT(5)
+#define EXYNOS_4x12_URSTCON_HSIC1 BIT(6)
+#define EXYNOS_4x12_URSTCON_HOST_LINK_ALL BIT(7)
+#define EXYNOS_4x12_URSTCON_HOST_LINK_P0 BIT(8)
+#define EXYNOS_4x12_URSTCON_HOST_LINK_P1 BIT(9)
+#define EXYNOS_4x12_URSTCON_HOST_LINK_P2 BIT(10)
+
+/* Isolation, configured in the power management unit */
+#define EXYNOS_4x12_USB_ISOL_OFFSET 0x704
+#define EXYNOS_4x12_USB_ISOL_OTG BIT(0)
+#define EXYNOS_4x12_USB_ISOL_HSIC0_OFFSET 0x708
+#define EXYNOS_4x12_USB_ISOL_HSIC0 BIT(0)
+#define EXYNOS_4x12_USB_ISOL_HSIC1_OFFSET 0x70c
+#define EXYNOS_4x12_USB_ISOL_HSIC1 BIT(0)
+
+/* Mode switching SUB Device <-> Host */
+#define EXYNOS_4x12_MODE_SWITCH_OFFSET 0x21c
+#define EXYNOS_4x12_MODE_SWITCH_MASK 1
+#define EXYNOS_4x12_MODE_SWITCH_DEVICE 0
+#define EXYNOS_4x12_MODE_SWITCH_HOST 1
+
+enum exynos4x12_phy_id {
+ EXYNOS4x12_DEVICE,
+ EXYNOS4x12_HOST,
+ EXYNOS4x12_HSIC0,
+ EXYNOS4x12_HSIC1,
+ EXYNOS4x12_NUM_PHYS,
+};
+
+/*
+ * exynos4x12_rate_to_clk() converts the supplied clock rate to the value that
+ * can be written to the phy register.
+ */
+static int exynos4x12_rate_to_clk(unsigned long rate, u32 *reg)
+{
+ /* EXYNOS_4x12_UPHYCLK_PHYFSEL_MASK */
+
+ switch (rate) {
+ case 9600 * KHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_9MHZ6;
+ break;
+ case 10 * MHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_10MHZ;
+ break;
+ case 12 * MHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_12MHZ;
+ break;
+ case 19200 * KHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_19MHZ2;
+ break;
+ case 20 * MHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_20MHZ;
+ break;
+ case 24 * MHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_24MHZ;
+ break;
+ case 50 * MHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_50MHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void exynos4x12_isol(struct samsung_usb2_phy_instance *inst, bool on)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 offset;
+ u32 mask;
+
+ switch (inst->cfg->id) {
+ case EXYNOS4x12_DEVICE:
+ case EXYNOS4x12_HOST:
+ offset = EXYNOS_4x12_USB_ISOL_OFFSET;
+ mask = EXYNOS_4x12_USB_ISOL_OTG;
+ break;
+ case EXYNOS4x12_HSIC0:
+ offset = EXYNOS_4x12_USB_ISOL_HSIC0_OFFSET;
+ mask = EXYNOS_4x12_USB_ISOL_HSIC0;
+ break;
+ case EXYNOS4x12_HSIC1:
+ offset = EXYNOS_4x12_USB_ISOL_HSIC1_OFFSET;
+ mask = EXYNOS_4x12_USB_ISOL_HSIC1;
+ break;
+ default:
+ return;
+ };
+
+ regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
+}
+
+static void exynos4x12_setup_clk(struct samsung_usb2_phy_instance *inst)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 clk;
+
+ clk = readl(drv->reg_phy + EXYNOS_4x12_UPHYCLK);
+ clk &= ~EXYNOS_4x12_UPHYCLK_PHYFSEL_MASK;
+ clk |= drv->ref_reg_val << EXYNOS_4x12_UPHYCLK_PHYFSEL_OFFSET;
+ writel(clk, drv->reg_phy + EXYNOS_4x12_UPHYCLK);
+}
+
+static void exynos4x12_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 rstbits = 0;
+ u32 phypwr = 0;
+ u32 rst;
+ u32 pwr;
+ u32 mode = 0;
+ u32 switch_mode = 0;
+
+ switch (inst->cfg->id) {
+ case EXYNOS4x12_DEVICE:
+ phypwr = EXYNOS_4x12_UPHYPWR_PHY0;
+ rstbits = EXYNOS_4x12_URSTCON_PHY0;
+ mode = EXYNOS_4x12_MODE_SWITCH_DEVICE;
+ switch_mode = 1;
+ break;
+ case EXYNOS4x12_HOST:
+ phypwr = EXYNOS_4x12_UPHYPWR_PHY1;
+ rstbits = EXYNOS_4x12_URSTCON_HOST_PHY;
+ mode = EXYNOS_4x12_MODE_SWITCH_HOST;
+ switch_mode = 1;
+ break;
+ case EXYNOS4x12_HSIC0:
+ phypwr = EXYNOS_4x12_UPHYPWR_HSIC0;
+ rstbits = EXYNOS_4x12_URSTCON_HSIC1 |
+ EXYNOS_4x12_URSTCON_HOST_LINK_P0 |
+ EXYNOS_4x12_URSTCON_HOST_PHY;
+ break;
+ case EXYNOS4x12_HSIC1:
+ phypwr = EXYNOS_4x12_UPHYPWR_HSIC1;
+ rstbits = EXYNOS_4x12_URSTCON_HSIC1 |
+ EXYNOS_4x12_URSTCON_HOST_LINK_P1;
+ break;
+ };
+
+ if (on) {
+ if (switch_mode)
+ regmap_update_bits(drv->reg_sys,
+ EXYNOS_4x12_MODE_SWITCH_OFFSET,
+ EXYNOS_4x12_MODE_SWITCH_MASK, mode);
+
+ pwr = readl(drv->reg_phy + EXYNOS_4x12_UPHYPWR);
+ pwr &= ~phypwr;
+ writel(pwr, drv->reg_phy + EXYNOS_4x12_UPHYPWR);
+
+ rst = readl(drv->reg_phy + EXYNOS_4x12_UPHYRST);
+ rst |= rstbits;
+ writel(rst, drv->reg_phy + EXYNOS_4x12_UPHYRST);
+ udelay(10);
+ rst &= ~rstbits;
+ writel(rst, drv->reg_phy + EXYNOS_4x12_UPHYRST);
+ /* The following delay is necessary for the reset sequence to be
+ * completed */
+ udelay(80);
+ } else {
+ pwr = readl(drv->reg_phy + EXYNOS_4x12_UPHYPWR);
+ pwr |= phypwr;
+ writel(pwr, drv->reg_phy + EXYNOS_4x12_UPHYPWR);
+ }
+}
+
+static int exynos4x12_power_on(struct samsung_usb2_phy_instance *inst)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+
+ inst->enabled = 1;
+ exynos4x12_setup_clk(inst);
+ exynos4x12_phy_pwr(inst, 1);
+ exynos4x12_isol(inst, 0);
+
+ /* Power on the device, as it is necessary for HSIC to work */
+ if (inst->cfg->id == EXYNOS4x12_HSIC0) {
+ struct samsung_usb2_phy_instance *device =
+ &drv->instances[EXYNOS4x12_DEVICE];
+ exynos4x12_phy_pwr(device, 1);
+ exynos4x12_isol(device, 0);
+ }
+
+ return 0;
+}
+
+static int exynos4x12_power_off(struct samsung_usb2_phy_instance *inst)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ struct samsung_usb2_phy_instance *device =
+ &drv->instances[EXYNOS4x12_DEVICE];
+
+ inst->enabled = 0;
+ exynos4x12_isol(inst, 1);
+ exynos4x12_phy_pwr(inst, 0);
+
+ if (inst->cfg->id == EXYNOS4x12_HSIC0 && !device->enabled) {
+ exynos4x12_isol(device, 1);
+ exynos4x12_phy_pwr(device, 0);
+ }
+
+ return 0;
+}
+
+
+static const struct samsung_usb2_common_phy exynos4x12_phys[] = {
+ {
+ .label = "device",
+ .id = EXYNOS4x12_DEVICE,
+ .power_on = exynos4x12_power_on,
+ .power_off = exynos4x12_power_off,
+ },
+ {
+ .label = "host",
+ .id = EXYNOS4x12_HOST,
+ .power_on = exynos4x12_power_on,
+ .power_off = exynos4x12_power_off,
+ },
+ {
+ .label = "hsic0",
+ .id = EXYNOS4x12_HSIC0,
+ .power_on = exynos4x12_power_on,
+ .power_off = exynos4x12_power_off,
+ },
+ {
+ .label = "hsic1",
+ .id = EXYNOS4x12_HSIC1,
+ .power_on = exynos4x12_power_on,
+ .power_off = exynos4x12_power_off,
+ },
+ {},
+};
+
+const struct samsung_usb2_phy_config exynos4x12_usb2_phy_config = {
+ .has_mode_switch = 1,
+ .num_phys = EXYNOS4x12_NUM_PHYS,
+ .phys = exynos4x12_phys,
+ .rate_to_clk = exynos4x12_rate_to_clk,
+};
diff --git a/drivers/phy/phy-exynos5250-sata.c b/drivers/phy/phy-exynos5250-sata.c
new file mode 100644
index 000000000000..c9361b731fa6
--- /dev/null
+++ b/drivers/phy/phy-exynos5250-sata.c
@@ -0,0 +1,251 @@
+/*
+ * Samsung SATA SerDes(PHY) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Authors: Girish K S <ks.giri@samsung.com>
+ * Yuvaraj Kumar C D <yuvaraj.cd@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+
+#define SATAPHY_CONTROL_OFFSET 0x0724
+#define EXYNOS5_SATAPHY_PMU_ENABLE BIT(0)
+#define EXYNOS5_SATA_RESET 0x4
+#define RESET_GLOBAL_RST_N BIT(0)
+#define RESET_CMN_RST_N BIT(1)
+#define RESET_CMN_BLOCK_RST_N BIT(2)
+#define RESET_CMN_I2C_RST_N BIT(3)
+#define RESET_TX_RX_PIPE_RST_N BIT(4)
+#define RESET_TX_RX_BLOCK_RST_N BIT(5)
+#define RESET_TX_RX_I2C_RST_N (BIT(6) | BIT(7))
+#define LINK_RESET 0xf0000
+#define EXYNOS5_SATA_MODE0 0x10
+#define SATA_SPD_GEN3 BIT(1)
+#define EXYNOS5_SATA_CTRL0 0x14
+#define CTRL0_P0_PHY_CALIBRATED_SEL BIT(9)
+#define CTRL0_P0_PHY_CALIBRATED BIT(8)
+#define EXYNOS5_SATA_PHSATA_CTRLM 0xe0
+#define PHCTRLM_REF_RATE BIT(1)
+#define PHCTRLM_HIGH_SPEED BIT(0)
+#define EXYNOS5_SATA_PHSATA_STATM 0xf0
+#define PHSTATM_PLL_LOCKED BIT(0)
+
+#define PHY_PLL_TIMEOUT (usecs_to_jiffies(1000))
+
+struct exynos_sata_phy {
+ struct phy *phy;
+ struct clk *phyclk;
+ void __iomem *regs;
+ struct regmap *pmureg;
+ struct i2c_client *client;
+};
+
+static int wait_for_reg_status(void __iomem *base, u32 reg, u32 checkbit,
+ u32 status)
+{
+ unsigned long timeout = jiffies + PHY_PLL_TIMEOUT;
+
+ while (time_before(jiffies, timeout)) {
+ if ((readl(base + reg) & checkbit) == status)
+ return 0;
+ }
+
+ return -EFAULT;
+}
+
+static int exynos_sata_phy_power_on(struct phy *phy)
+{
+ struct exynos_sata_phy *sata_phy = phy_get_drvdata(phy);
+
+ return regmap_update_bits(sata_phy->pmureg, SATAPHY_CONTROL_OFFSET,
+ EXYNOS5_SATAPHY_PMU_ENABLE, true);
+
+}
+
+static int exynos_sata_phy_power_off(struct phy *phy)
+{
+ struct exynos_sata_phy *sata_phy = phy_get_drvdata(phy);
+
+ return regmap_update_bits(sata_phy->pmureg, SATAPHY_CONTROL_OFFSET,
+ EXYNOS5_SATAPHY_PMU_ENABLE, false);
+
+}
+
+static int exynos_sata_phy_init(struct phy *phy)
+{
+ u32 val = 0;
+ int ret = 0;
+ u8 buf[] = { 0x3a, 0x0b };
+ struct exynos_sata_phy *sata_phy = phy_get_drvdata(phy);
+
+ ret = regmap_update_bits(sata_phy->pmureg, SATAPHY_CONTROL_OFFSET,
+ EXYNOS5_SATAPHY_PMU_ENABLE, true);
+ if (ret != 0)
+ dev_err(&sata_phy->phy->dev, "phy init failed\n");
+
+ writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
+ val |= RESET_GLOBAL_RST_N | RESET_CMN_RST_N | RESET_CMN_BLOCK_RST_N
+ | RESET_CMN_I2C_RST_N | RESET_TX_RX_PIPE_RST_N
+ | RESET_TX_RX_BLOCK_RST_N | RESET_TX_RX_I2C_RST_N;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
+ val |= LINK_RESET;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
+ val |= RESET_CMN_RST_N;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_PHSATA_CTRLM);
+ val &= ~PHCTRLM_REF_RATE;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_PHSATA_CTRLM);
+
+ /* High speed enable for Gen3 */
+ val = readl(sata_phy->regs + EXYNOS5_SATA_PHSATA_CTRLM);
+ val |= PHCTRLM_HIGH_SPEED;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_PHSATA_CTRLM);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_CTRL0);
+ val |= CTRL0_P0_PHY_CALIBRATED_SEL | CTRL0_P0_PHY_CALIBRATED;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_CTRL0);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_MODE0);
+ val |= SATA_SPD_GEN3;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_MODE0);
+
+ ret = i2c_master_send(sata_phy->client, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ /* release cmu reset */
+ val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
+ val &= ~RESET_CMN_RST_N;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
+ val |= RESET_CMN_RST_N;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
+
+ ret = wait_for_reg_status(sata_phy->regs,
+ EXYNOS5_SATA_PHSATA_STATM,
+ PHSTATM_PLL_LOCKED, 1);
+ if (ret < 0)
+ dev_err(&sata_phy->phy->dev,
+ "PHY PLL locking failed\n");
+ return ret;
+}
+
+static struct phy_ops exynos_sata_phy_ops = {
+ .init = exynos_sata_phy_init,
+ .power_on = exynos_sata_phy_power_on,
+ .power_off = exynos_sata_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int exynos_sata_phy_probe(struct platform_device *pdev)
+{
+ struct exynos_sata_phy *sata_phy;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct phy_provider *phy_provider;
+ struct device_node *node;
+ int ret = 0;
+
+ sata_phy = devm_kzalloc(dev, sizeof(*sata_phy), GFP_KERNEL);
+ if (!sata_phy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ sata_phy->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sata_phy->regs))
+ return PTR_ERR(sata_phy->regs);
+
+ sata_phy->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(sata_phy->pmureg)) {
+ dev_err(dev, "syscon regmap lookup failed.\n");
+ return PTR_ERR(sata_phy->pmureg);
+ }
+
+ node = of_parse_phandle(dev->of_node,
+ "samsung,exynos-sataphy-i2c-phandle", 0);
+ if (!node)
+ return -EINVAL;
+
+ sata_phy->client = of_find_i2c_device_by_node(node);
+ if (!sata_phy->client)
+ return -EPROBE_DEFER;
+
+ dev_set_drvdata(dev, sata_phy);
+
+ sata_phy->phyclk = devm_clk_get(dev, "sata_phyctrl");
+ if (IS_ERR(sata_phy->phyclk)) {
+ dev_err(dev, "failed to get clk for PHY\n");
+ return PTR_ERR(sata_phy->phyclk);
+ }
+
+ ret = clk_prepare_enable(sata_phy->phyclk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable source clk\n");
+ return ret;
+ }
+
+ sata_phy->phy = devm_phy_create(dev, &exynos_sata_phy_ops, NULL);
+ if (IS_ERR(sata_phy->phy)) {
+ clk_disable_unprepare(sata_phy->phyclk);
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(sata_phy->phy);
+ }
+
+ phy_set_drvdata(sata_phy->phy, sata_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ clk_disable_unprepare(sata_phy->phyclk);
+ return PTR_ERR(phy_provider);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id exynos_sata_phy_of_match[] = {
+ { .compatible = "samsung,exynos5250-sata-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_sata_phy_of_match);
+
+static struct platform_driver exynos_sata_phy_driver = {
+ .probe = exynos_sata_phy_probe,
+ .driver = {
+ .of_match_table = exynos_sata_phy_of_match,
+ .name = "samsung,sata-phy",
+ .owner = THIS_MODULE,
+ }
+};
+module_platform_driver(exynos_sata_phy_driver);
+
+MODULE_DESCRIPTION("Samsung SerDes PHY driver");
+MODULE_LICENSE("GPL V2");
+MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
+MODULE_AUTHOR("Yuvaraj C D <yuvaraj.cd@samsung.com>");
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c
new file mode 100644
index 000000000000..94179afda951
--- /dev/null
+++ b/drivers/phy/phy-exynos5250-usb2.c
@@ -0,0 +1,404 @@
+/*
+ * Samsung SoC USB 1.1/2.0 PHY driver - Exynos 5250 support
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include "phy-samsung-usb2.h"
+
+/* Exynos USB PHY registers */
+#define EXYNOS_5250_REFCLKSEL_CRYSTAL 0x0
+#define EXYNOS_5250_REFCLKSEL_XO 0x1
+#define EXYNOS_5250_REFCLKSEL_CLKCORE 0x2
+
+#define EXYNOS_5250_FSEL_9MHZ6 0x0
+#define EXYNOS_5250_FSEL_10MHZ 0x1
+#define EXYNOS_5250_FSEL_12MHZ 0x2
+#define EXYNOS_5250_FSEL_19MHZ2 0x3
+#define EXYNOS_5250_FSEL_20MHZ 0x4
+#define EXYNOS_5250_FSEL_24MHZ 0x5
+#define EXYNOS_5250_FSEL_50MHZ 0x7
+
+/* Normal host */
+#define EXYNOS_5250_HOSTPHYCTRL0 0x0
+
+#define EXYNOS_5250_HOSTPHYCTRL0_PHYSWRSTALL BIT(31)
+#define EXYNOS_5250_HOSTPHYCTRL0_REFCLKSEL_SHIFT 19
+#define EXYNOS_5250_HOSTPHYCTRL0_REFCLKSEL_MASK \
+ (0x3 << EXYNOS_5250_HOSTPHYCTRL0_REFCLKSEL_SHIFT)
+#define EXYNOS_5250_HOSTPHYCTRL0_FSEL_SHIFT 16
+#define EXYNOS_5250_HOSTPHYCTRL0_FSEL_MASK \
+ (0x7 << EXYNOS_5250_HOSTPHYCTRL0_FSEL_SHIFT)
+#define EXYNOS_5250_HOSTPHYCTRL0_TESTBURNIN BIT(11)
+#define EXYNOS_5250_HOSTPHYCTRL0_RETENABLE BIT(10)
+#define EXYNOS_5250_HOSTPHYCTRL0_COMMON_ON_N BIT(9)
+#define EXYNOS_5250_HOSTPHYCTRL0_VATESTENB_MASK (0x3 << 7)
+#define EXYNOS_5250_HOSTPHYCTRL0_VATESTENB_DUAL (0x0 << 7)
+#define EXYNOS_5250_HOSTPHYCTRL0_VATESTENB_ID0 (0x1 << 7)
+#define EXYNOS_5250_HOSTPHYCTRL0_VATESTENB_ANALOGTEST (0x2 << 7)
+#define EXYNOS_5250_HOSTPHYCTRL0_SIDDQ BIT(6)
+#define EXYNOS_5250_HOSTPHYCTRL0_FORCESLEEP BIT(5)
+#define EXYNOS_5250_HOSTPHYCTRL0_FORCESUSPEND BIT(4)
+#define EXYNOS_5250_HOSTPHYCTRL0_WORDINTERFACE BIT(3)
+#define EXYNOS_5250_HOSTPHYCTRL0_UTMISWRST BIT(2)
+#define EXYNOS_5250_HOSTPHYCTRL0_LINKSWRST BIT(1)
+#define EXYNOS_5250_HOSTPHYCTRL0_PHYSWRST BIT(0)
+
+/* HSIC0 & HSIC1 */
+#define EXYNOS_5250_HSICPHYCTRL1 0x10
+#define EXYNOS_5250_HSICPHYCTRL2 0x20
+
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKSEL_MASK (0x3 << 23)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKSEL_DEFAULT (0x2 << 23)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_MASK (0x7f << 16)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_12 (0x24 << 16)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_15 (0x1c << 16)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_16 (0x1a << 16)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_19_2 (0x15 << 16)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_20 (0x14 << 16)
+#define EXYNOS_5250_HSICPHYCTRLX_SIDDQ BIT(6)
+#define EXYNOS_5250_HSICPHYCTRLX_FORCESLEEP BIT(5)
+#define EXYNOS_5250_HSICPHYCTRLX_FORCESUSPEND BIT(4)
+#define EXYNOS_5250_HSICPHYCTRLX_WORDINTERFACE BIT(3)
+#define EXYNOS_5250_HSICPHYCTRLX_UTMISWRST BIT(2)
+#define EXYNOS_5250_HSICPHYCTRLX_PHYSWRST BIT(0)
+
+/* EHCI control */
+#define EXYNOS_5250_HOSTEHCICTRL 0x30
+#define EXYNOS_5250_HOSTEHCICTRL_ENAINCRXALIGN BIT(29)
+#define EXYNOS_5250_HOSTEHCICTRL_ENAINCR4 BIT(28)
+#define EXYNOS_5250_HOSTEHCICTRL_ENAINCR8 BIT(27)
+#define EXYNOS_5250_HOSTEHCICTRL_ENAINCR16 BIT(26)
+#define EXYNOS_5250_HOSTEHCICTRL_AUTOPPDONOVRCUREN BIT(25)
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_SHIFT 19
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_MASK \
+ (0x3f << EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_SHIFT)
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL1_SHIFT 13
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL1_MASK \
+ (0x3f << EXYNOS_5250_HOSTEHCICTRL_FLADJVAL1_SHIFT)
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL2_SHIFT 7
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_MASK \
+ (0x3f << EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_SHIFT)
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVALHOST_SHIFT 1
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVALHOST_MASK \
+ (0x1 << EXYNOS_5250_HOSTEHCICTRL_FLADJVALHOST_SHIFT)
+#define EXYNOS_5250_HOSTEHCICTRL_SIMULATIONMODE BIT(0)
+
+/* OHCI control */
+#define EXYNOS_5250_HOSTOHCICTRL 0x34
+#define EXYNOS_5250_HOSTOHCICTRL_FRAMELENVAL_SHIFT 1
+#define EXYNOS_5250_HOSTOHCICTRL_FRAMELENVAL_MASK \
+ (0x3ff << EXYNOS_5250_HOSTOHCICTRL_FRAMELENVAL_SHIFT)
+#define EXYNOS_5250_HOSTOHCICTRL_FRAMELENVALEN BIT(0)
+
+/* USBOTG */
+#define EXYNOS_5250_USBOTGSYS 0x38
+#define EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET BIT(14)
+#define EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG BIT(13)
+#define EXYNOS_5250_USBOTGSYS_PHY_SW_RST BIT(12)
+#define EXYNOS_5250_USBOTGSYS_REFCLKSEL_SHIFT 9
+#define EXYNOS_5250_USBOTGSYS_REFCLKSEL_MASK \
+ (0x3 << EXYNOS_5250_USBOTGSYS_REFCLKSEL_SHIFT)
+#define EXYNOS_5250_USBOTGSYS_ID_PULLUP BIT(8)
+#define EXYNOS_5250_USBOTGSYS_COMMON_ON BIT(7)
+#define EXYNOS_5250_USBOTGSYS_FSEL_SHIFT 4
+#define EXYNOS_5250_USBOTGSYS_FSEL_MASK \
+ (0x3 << EXYNOS_5250_USBOTGSYS_FSEL_SHIFT)
+#define EXYNOS_5250_USBOTGSYS_FORCE_SLEEP BIT(3)
+#define EXYNOS_5250_USBOTGSYS_OTGDISABLE BIT(2)
+#define EXYNOS_5250_USBOTGSYS_SIDDQ_UOTG BIT(1)
+#define EXYNOS_5250_USBOTGSYS_FORCE_SUSPEND BIT(0)
+
+/* Isolation, configured in the power management unit */
+#define EXYNOS_5250_USB_ISOL_OTG_OFFSET 0x704
+#define EXYNOS_5250_USB_ISOL_OTG BIT(0)
+#define EXYNOS_5250_USB_ISOL_HOST_OFFSET 0x708
+#define EXYNOS_5250_USB_ISOL_HOST BIT(0)
+
+/* Mode swtich register */
+#define EXYNOS_5250_MODE_SWITCH_OFFSET 0x230
+#define EXYNOS_5250_MODE_SWITCH_MASK 1
+#define EXYNOS_5250_MODE_SWITCH_DEVICE 0
+#define EXYNOS_5250_MODE_SWITCH_HOST 1
+
+enum exynos4x12_phy_id {
+ EXYNOS5250_DEVICE,
+ EXYNOS5250_HOST,
+ EXYNOS5250_HSIC0,
+ EXYNOS5250_HSIC1,
+ EXYNOS5250_NUM_PHYS,
+};
+
+/*
+ * exynos5250_rate_to_clk() converts the supplied clock rate to the value that
+ * can be written to the phy register.
+ */
+static int exynos5250_rate_to_clk(unsigned long rate, u32 *reg)
+{
+ /* EXYNOS_5250_FSEL_MASK */
+
+ switch (rate) {
+ case 9600 * KHZ:
+ *reg = EXYNOS_5250_FSEL_9MHZ6;
+ break;
+ case 10 * MHZ:
+ *reg = EXYNOS_5250_FSEL_10MHZ;
+ break;
+ case 12 * MHZ:
+ *reg = EXYNOS_5250_FSEL_12MHZ;
+ break;
+ case 19200 * KHZ:
+ *reg = EXYNOS_5250_FSEL_19MHZ2;
+ break;
+ case 20 * MHZ:
+ *reg = EXYNOS_5250_FSEL_20MHZ;
+ break;
+ case 24 * MHZ:
+ *reg = EXYNOS_5250_FSEL_24MHZ;
+ break;
+ case 50 * MHZ:
+ *reg = EXYNOS_5250_FSEL_50MHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void exynos5250_isol(struct samsung_usb2_phy_instance *inst, bool on)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 offset;
+ u32 mask;
+
+ switch (inst->cfg->id) {
+ case EXYNOS5250_DEVICE:
+ offset = EXYNOS_5250_USB_ISOL_OTG_OFFSET;
+ mask = EXYNOS_5250_USB_ISOL_OTG;
+ break;
+ case EXYNOS5250_HOST:
+ offset = EXYNOS_5250_USB_ISOL_HOST_OFFSET;
+ mask = EXYNOS_5250_USB_ISOL_HOST;
+ break;
+ default:
+ return;
+ };
+
+ regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
+}
+
+static int exynos5250_power_on(struct samsung_usb2_phy_instance *inst)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 ctrl0;
+ u32 otg;
+ u32 ehci;
+ u32 ohci;
+ u32 hsic;
+
+ switch (inst->cfg->id) {
+ case EXYNOS5250_DEVICE:
+ regmap_update_bits(drv->reg_sys,
+ EXYNOS_5250_MODE_SWITCH_OFFSET,
+ EXYNOS_5250_MODE_SWITCH_MASK,
+ EXYNOS_5250_MODE_SWITCH_DEVICE);
+
+ /* OTG configuration */
+ otg = readl(drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+ /* The clock */
+ otg &= ~EXYNOS_5250_USBOTGSYS_FSEL_MASK;
+ otg |= drv->ref_reg_val << EXYNOS_5250_USBOTGSYS_FSEL_SHIFT;
+ /* Reset */
+ otg &= ~(EXYNOS_5250_USBOTGSYS_FORCE_SUSPEND |
+ EXYNOS_5250_USBOTGSYS_FORCE_SLEEP |
+ EXYNOS_5250_USBOTGSYS_SIDDQ_UOTG);
+ otg |= EXYNOS_5250_USBOTGSYS_PHY_SW_RST |
+ EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET |
+ EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG |
+ EXYNOS_5250_USBOTGSYS_OTGDISABLE;
+ /* Ref clock */
+ otg &= ~EXYNOS_5250_USBOTGSYS_REFCLKSEL_MASK;
+ otg |= EXYNOS_5250_REFCLKSEL_CLKCORE <<
+ EXYNOS_5250_USBOTGSYS_REFCLKSEL_SHIFT;
+ writel(otg, drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+ udelay(100);
+ otg &= ~(EXYNOS_5250_USBOTGSYS_PHY_SW_RST |
+ EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG |
+ EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET |
+ EXYNOS_5250_USBOTGSYS_OTGDISABLE);
+ writel(otg, drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+
+
+ break;
+ case EXYNOS5250_HOST:
+ case EXYNOS5250_HSIC0:
+ case EXYNOS5250_HSIC1:
+ /* Host registers configuration */
+ ctrl0 = readl(drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
+ /* The clock */
+ ctrl0 &= ~EXYNOS_5250_HOSTPHYCTRL0_FSEL_MASK;
+ ctrl0 |= drv->ref_reg_val <<
+ EXYNOS_5250_HOSTPHYCTRL0_FSEL_SHIFT;
+
+ /* Reset */
+ ctrl0 &= ~(EXYNOS_5250_HOSTPHYCTRL0_PHYSWRST |
+ EXYNOS_5250_HOSTPHYCTRL0_PHYSWRSTALL |
+ EXYNOS_5250_HOSTPHYCTRL0_SIDDQ |
+ EXYNOS_5250_HOSTPHYCTRL0_FORCESUSPEND |
+ EXYNOS_5250_HOSTPHYCTRL0_FORCESLEEP);
+ ctrl0 |= EXYNOS_5250_HOSTPHYCTRL0_LINKSWRST |
+ EXYNOS_5250_HOSTPHYCTRL0_UTMISWRST |
+ EXYNOS_5250_HOSTPHYCTRL0_COMMON_ON_N;
+ writel(ctrl0, drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
+ udelay(10);
+ ctrl0 &= ~(EXYNOS_5250_HOSTPHYCTRL0_LINKSWRST |
+ EXYNOS_5250_HOSTPHYCTRL0_UTMISWRST);
+ writel(ctrl0, drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
+
+ /* OTG configuration */
+ otg = readl(drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+ /* The clock */
+ otg &= ~EXYNOS_5250_USBOTGSYS_FSEL_MASK;
+ otg |= drv->ref_reg_val << EXYNOS_5250_USBOTGSYS_FSEL_SHIFT;
+ /* Reset */
+ otg &= ~(EXYNOS_5250_USBOTGSYS_FORCE_SUSPEND |
+ EXYNOS_5250_USBOTGSYS_FORCE_SLEEP |
+ EXYNOS_5250_USBOTGSYS_SIDDQ_UOTG);
+ otg |= EXYNOS_5250_USBOTGSYS_PHY_SW_RST |
+ EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET |
+ EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG |
+ EXYNOS_5250_USBOTGSYS_OTGDISABLE;
+ /* Ref clock */
+ otg &= ~EXYNOS_5250_USBOTGSYS_REFCLKSEL_MASK;
+ otg |= EXYNOS_5250_REFCLKSEL_CLKCORE <<
+ EXYNOS_5250_USBOTGSYS_REFCLKSEL_SHIFT;
+ writel(otg, drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+ udelay(10);
+ otg &= ~(EXYNOS_5250_USBOTGSYS_PHY_SW_RST |
+ EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG |
+ EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET);
+
+ /* HSIC phy configuration */
+ hsic = (EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_12 |
+ EXYNOS_5250_HSICPHYCTRLX_REFCLKSEL_DEFAULT |
+ EXYNOS_5250_HSICPHYCTRLX_PHYSWRST);
+ writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL1);
+ writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL2);
+ udelay(10);
+ hsic &= ~EXYNOS_5250_HSICPHYCTRLX_PHYSWRST;
+ writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL1);
+ writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL2);
+ /* The following delay is necessary for the reset sequence to be
+ * completed */
+ udelay(80);
+
+ /* Enable EHCI DMA burst */
+ ehci = readl(drv->reg_phy + EXYNOS_5250_HOSTEHCICTRL);
+ ehci |= EXYNOS_5250_HOSTEHCICTRL_ENAINCRXALIGN |
+ EXYNOS_5250_HOSTEHCICTRL_ENAINCR4 |
+ EXYNOS_5250_HOSTEHCICTRL_ENAINCR8 |
+ EXYNOS_5250_HOSTEHCICTRL_ENAINCR16;
+ writel(ehci, drv->reg_phy + EXYNOS_5250_HOSTEHCICTRL);
+
+ /* OHCI settings */
+ ohci = readl(drv->reg_phy + EXYNOS_5250_HOSTOHCICTRL);
+ /* Following code is based on the old driver */
+ ohci |= 0x1 << 3;
+ writel(ohci, drv->reg_phy + EXYNOS_5250_HOSTOHCICTRL);
+
+ break;
+ }
+ inst->enabled = 1;
+ exynos5250_isol(inst, 0);
+
+ return 0;
+}
+
+static int exynos5250_power_off(struct samsung_usb2_phy_instance *inst)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 ctrl0;
+ u32 otg;
+ u32 hsic;
+
+ inst->enabled = 0;
+ exynos5250_isol(inst, 1);
+
+ switch (inst->cfg->id) {
+ case EXYNOS5250_DEVICE:
+ otg = readl(drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+ otg |= (EXYNOS_5250_USBOTGSYS_FORCE_SUSPEND |
+ EXYNOS_5250_USBOTGSYS_SIDDQ_UOTG |
+ EXYNOS_5250_USBOTGSYS_FORCE_SLEEP);
+ writel(otg, drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+ break;
+ case EXYNOS5250_HOST:
+ ctrl0 = readl(drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
+ ctrl0 |= (EXYNOS_5250_HOSTPHYCTRL0_SIDDQ |
+ EXYNOS_5250_HOSTPHYCTRL0_FORCESUSPEND |
+ EXYNOS_5250_HOSTPHYCTRL0_FORCESLEEP |
+ EXYNOS_5250_HOSTPHYCTRL0_PHYSWRST |
+ EXYNOS_5250_HOSTPHYCTRL0_PHYSWRSTALL);
+ writel(ctrl0, drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
+ break;
+ case EXYNOS5250_HSIC0:
+ case EXYNOS5250_HSIC1:
+ hsic = (EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_12 |
+ EXYNOS_5250_HSICPHYCTRLX_REFCLKSEL_DEFAULT |
+ EXYNOS_5250_HSICPHYCTRLX_SIDDQ |
+ EXYNOS_5250_HSICPHYCTRLX_FORCESLEEP |
+ EXYNOS_5250_HSICPHYCTRLX_FORCESUSPEND
+ );
+ writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL1);
+ writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL2);
+ break;
+ }
+
+ return 0;
+}
+
+
+static const struct samsung_usb2_common_phy exynos5250_phys[] = {
+ {
+ .label = "device",
+ .id = EXYNOS5250_DEVICE,
+ .power_on = exynos5250_power_on,
+ .power_off = exynos5250_power_off,
+ },
+ {
+ .label = "host",
+ .id = EXYNOS5250_HOST,
+ .power_on = exynos5250_power_on,
+ .power_off = exynos5250_power_off,
+ },
+ {
+ .label = "hsic0",
+ .id = EXYNOS5250_HSIC0,
+ .power_on = exynos5250_power_on,
+ .power_off = exynos5250_power_off,
+ },
+ {
+ .label = "hsic1",
+ .id = EXYNOS5250_HSIC1,
+ .power_on = exynos5250_power_on,
+ .power_off = exynos5250_power_off,
+ },
+ {},
+};
+
+const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = {
+ .has_mode_switch = 1,
+ .num_phys = EXYNOS5250_NUM_PHYS,
+ .phys = exynos5250_phys,
+ .rate_to_clk = exynos5250_rate_to_clk,
+};
diff --git a/drivers/usb/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c
index e7253182e47d..311b4f9a5132 100644
--- a/drivers/usb/phy/phy-omap-control.c
+++ b/drivers/phy/phy-omap-control.c
@@ -1,5 +1,5 @@
/*
- * omap-control-usb.c - The USB part of control module.
+ * omap-control-phy.c - The PHY part of control module.
*
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
* This program is free software; you can redistribute it and/or modify
@@ -24,36 +24,36 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/usb/omap_control_usb.h>
+#include <linux/phy/omap_control_phy.h>
/**
- * omap_control_usb_phy_power - power on/off the phy using control module reg
+ * omap_control_phy_power - power on/off the phy using control module reg
* @dev: the control module device
* @on: 0 or 1, based on powering on or off the PHY
*/
-void omap_control_usb_phy_power(struct device *dev, int on)
+void omap_control_phy_power(struct device *dev, int on)
{
u32 val;
unsigned long rate;
- struct omap_control_usb *control_usb;
+ struct omap_control_phy *control_phy;
if (IS_ERR(dev) || !dev) {
pr_err("%s: invalid device\n", __func__);
return;
}
- control_usb = dev_get_drvdata(dev);
- if (!control_usb) {
- dev_err(dev, "%s: invalid control usb device\n", __func__);
+ control_phy = dev_get_drvdata(dev);
+ if (!control_phy) {
+ dev_err(dev, "%s: invalid control phy device\n", __func__);
return;
}
- if (control_usb->type == OMAP_CTRL_TYPE_OTGHS)
+ if (control_phy->type == OMAP_CTRL_TYPE_OTGHS)
return;
- val = readl(control_usb->power);
+ val = readl(control_phy->power);
- switch (control_usb->type) {
+ switch (control_phy->type) {
case OMAP_CTRL_TYPE_USB2:
if (on)
val &= ~OMAP_CTRL_DEV_PHY_PD;
@@ -62,19 +62,20 @@ void omap_control_usb_phy_power(struct device *dev, int on)
break;
case OMAP_CTRL_TYPE_PIPE3:
- rate = clk_get_rate(control_usb->sys_clk);
+ rate = clk_get_rate(control_phy->sys_clk);
rate = rate/1000000;
if (on) {
- val &= ~(OMAP_CTRL_USB_PWRCTL_CLK_CMD_MASK |
- OMAP_CTRL_USB_PWRCTL_CLK_FREQ_MASK);
- val |= OMAP_CTRL_USB3_PHY_TX_RX_POWERON <<
- OMAP_CTRL_USB_PWRCTL_CLK_CMD_SHIFT;
- val |= rate << OMAP_CTRL_USB_PWRCTL_CLK_FREQ_SHIFT;
+ val &= ~(OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK |
+ OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_MASK);
+ val |= OMAP_CTRL_PIPE3_PHY_TX_RX_POWERON <<
+ OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT;
+ val |= rate <<
+ OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT;
} else {
- val &= ~OMAP_CTRL_USB_PWRCTL_CLK_CMD_MASK;
- val |= OMAP_CTRL_USB3_PHY_TX_RX_POWEROFF <<
- OMAP_CTRL_USB_PWRCTL_CLK_CMD_SHIFT;
+ val &= ~OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK;
+ val |= OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF <<
+ OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT;
}
break;
@@ -100,66 +101,66 @@ void omap_control_usb_phy_power(struct device *dev, int on)
break;
default:
dev_err(dev, "%s: type %d not recognized\n",
- __func__, control_usb->type);
+ __func__, control_phy->type);
break;
}
- writel(val, control_usb->power);
+ writel(val, control_phy->power);
}
-EXPORT_SYMBOL_GPL(omap_control_usb_phy_power);
+EXPORT_SYMBOL_GPL(omap_control_phy_power);
/**
* omap_control_usb_host_mode - set AVALID, VBUSVALID and ID pin in grounded
- * @ctrl_usb: struct omap_control_usb *
+ * @ctrl_phy: struct omap_control_phy *
*
* Writes to the mailbox register to notify the usb core that a usb
* device has been connected.
*/
-static void omap_control_usb_host_mode(struct omap_control_usb *ctrl_usb)
+static void omap_control_usb_host_mode(struct omap_control_phy *ctrl_phy)
{
u32 val;
- val = readl(ctrl_usb->otghs_control);
+ val = readl(ctrl_phy->otghs_control);
val &= ~(OMAP_CTRL_DEV_IDDIG | OMAP_CTRL_DEV_SESSEND);
val |= OMAP_CTRL_DEV_AVALID | OMAP_CTRL_DEV_VBUSVALID;
- writel(val, ctrl_usb->otghs_control);
+ writel(val, ctrl_phy->otghs_control);
}
/**
* omap_control_usb_device_mode - set AVALID, VBUSVALID and ID pin in high
* impedance
- * @ctrl_usb: struct omap_control_usb *
+ * @ctrl_phy: struct omap_control_phy *
*
* Writes to the mailbox register to notify the usb core that it has been
* connected to a usb host.
*/
-static void omap_control_usb_device_mode(struct omap_control_usb *ctrl_usb)
+static void omap_control_usb_device_mode(struct omap_control_phy *ctrl_phy)
{
u32 val;
- val = readl(ctrl_usb->otghs_control);
+ val = readl(ctrl_phy->otghs_control);
val &= ~OMAP_CTRL_DEV_SESSEND;
val |= OMAP_CTRL_DEV_IDDIG | OMAP_CTRL_DEV_AVALID |
OMAP_CTRL_DEV_VBUSVALID;
- writel(val, ctrl_usb->otghs_control);
+ writel(val, ctrl_phy->otghs_control);
}
/**
* omap_control_usb_set_sessionend - Enable SESSIONEND and IDIG to high
* impedance
- * @ctrl_usb: struct omap_control_usb *
+ * @ctrl_phy: struct omap_control_phy *
*
* Writes to the mailbox register to notify the usb core it's now in
* disconnected state.
*/
-static void omap_control_usb_set_sessionend(struct omap_control_usb *ctrl_usb)
+static void omap_control_usb_set_sessionend(struct omap_control_phy *ctrl_phy)
{
u32 val;
- val = readl(ctrl_usb->otghs_control);
+ val = readl(ctrl_phy->otghs_control);
val &= ~(OMAP_CTRL_DEV_AVALID | OMAP_CTRL_DEV_VBUSVALID);
val |= OMAP_CTRL_DEV_IDDIG | OMAP_CTRL_DEV_SESSEND;
- writel(val, ctrl_usb->otghs_control);
+ writel(val, ctrl_phy->otghs_control);
}
/**
@@ -174,30 +175,30 @@ static void omap_control_usb_set_sessionend(struct omap_control_usb *ctrl_usb)
void omap_control_usb_set_mode(struct device *dev,
enum omap_control_usb_mode mode)
{
- struct omap_control_usb *ctrl_usb;
+ struct omap_control_phy *ctrl_phy;
if (IS_ERR(dev) || !dev)
return;
- ctrl_usb = dev_get_drvdata(dev);
+ ctrl_phy = dev_get_drvdata(dev);
- if (!ctrl_usb) {
- dev_err(dev, "Invalid control usb device\n");
+ if (!ctrl_phy) {
+ dev_err(dev, "Invalid control phy device\n");
return;
}
- if (ctrl_usb->type != OMAP_CTRL_TYPE_OTGHS)
+ if (ctrl_phy->type != OMAP_CTRL_TYPE_OTGHS)
return;
switch (mode) {
case USB_MODE_HOST:
- omap_control_usb_host_mode(ctrl_usb);
+ omap_control_usb_host_mode(ctrl_phy);
break;
case USB_MODE_DEVICE:
- omap_control_usb_device_mode(ctrl_usb);
+ omap_control_usb_device_mode(ctrl_phy);
break;
case USB_MODE_DISCONNECT:
- omap_control_usb_set_sessionend(ctrl_usb);
+ omap_control_usb_set_sessionend(ctrl_phy);
break;
default:
dev_vdbg(dev, "invalid omap control usb mode\n");
@@ -207,13 +208,13 @@ EXPORT_SYMBOL_GPL(omap_control_usb_set_mode);
#ifdef CONFIG_OF
-static const enum omap_control_usb_type otghs_data = OMAP_CTRL_TYPE_OTGHS;
-static const enum omap_control_usb_type usb2_data = OMAP_CTRL_TYPE_USB2;
-static const enum omap_control_usb_type pipe3_data = OMAP_CTRL_TYPE_PIPE3;
-static const enum omap_control_usb_type dra7usb2_data = OMAP_CTRL_TYPE_DRA7USB2;
-static const enum omap_control_usb_type am437usb2_data = OMAP_CTRL_TYPE_AM437USB2;
+static const enum omap_control_phy_type otghs_data = OMAP_CTRL_TYPE_OTGHS;
+static const enum omap_control_phy_type usb2_data = OMAP_CTRL_TYPE_USB2;
+static const enum omap_control_phy_type pipe3_data = OMAP_CTRL_TYPE_PIPE3;
+static const enum omap_control_phy_type dra7usb2_data = OMAP_CTRL_TYPE_DRA7USB2;
+static const enum omap_control_phy_type am437usb2_data = OMAP_CTRL_TYPE_AM437USB2;
-static const struct of_device_id omap_control_usb_id_table[] = {
+static const struct of_device_id omap_control_phy_id_table[] = {
{
.compatible = "ti,control-phy-otghs",
.data = &otghs_data,
@@ -227,93 +228,93 @@ static const struct of_device_id omap_control_usb_id_table[] = {
.data = &pipe3_data,
},
{
- .compatible = "ti,control-phy-dra7usb2",
+ .compatible = "ti,control-phy-usb2-dra7",
.data = &dra7usb2_data,
},
{
- .compatible = "ti,control-phy-am437usb2",
+ .compatible = "ti,control-phy-usb2-am437",
.data = &am437usb2_data,
},
{},
};
-MODULE_DEVICE_TABLE(of, omap_control_usb_id_table);
+MODULE_DEVICE_TABLE(of, omap_control_phy_id_table);
#endif
-static int omap_control_usb_probe(struct platform_device *pdev)
+static int omap_control_phy_probe(struct platform_device *pdev)
{
struct resource *res;
const struct of_device_id *of_id;
- struct omap_control_usb *control_usb;
+ struct omap_control_phy *control_phy;
- of_id = of_match_device(of_match_ptr(omap_control_usb_id_table),
- &pdev->dev);
+ of_id = of_match_device(of_match_ptr(omap_control_phy_id_table),
+ &pdev->dev);
if (!of_id)
return -EINVAL;
- control_usb = devm_kzalloc(&pdev->dev, sizeof(*control_usb),
+ control_phy = devm_kzalloc(&pdev->dev, sizeof(*control_phy),
GFP_KERNEL);
- if (!control_usb) {
- dev_err(&pdev->dev, "unable to alloc memory for control usb\n");
+ if (!control_phy) {
+ dev_err(&pdev->dev, "unable to alloc memory for control phy\n");
return -ENOMEM;
}
- control_usb->dev = &pdev->dev;
- control_usb->type = *(enum omap_control_usb_type *)of_id->data;
+ control_phy->dev = &pdev->dev;
+ control_phy->type = *(enum omap_control_phy_type *)of_id->data;
- if (control_usb->type == OMAP_CTRL_TYPE_OTGHS) {
+ if (control_phy->type == OMAP_CTRL_TYPE_OTGHS) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"otghs_control");
- control_usb->otghs_control = devm_ioremap_resource(
+ control_phy->otghs_control = devm_ioremap_resource(
&pdev->dev, res);
- if (IS_ERR(control_usb->otghs_control))
- return PTR_ERR(control_usb->otghs_control);
+ if (IS_ERR(control_phy->otghs_control))
+ return PTR_ERR(control_phy->otghs_control);
} else {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"power");
- control_usb->power = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(control_usb->power)) {
+ control_phy->power = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(control_phy->power)) {
dev_err(&pdev->dev, "Couldn't get power register\n");
- return PTR_ERR(control_usb->power);
+ return PTR_ERR(control_phy->power);
}
}
- if (control_usb->type == OMAP_CTRL_TYPE_PIPE3) {
- control_usb->sys_clk = devm_clk_get(control_usb->dev,
+ if (control_phy->type == OMAP_CTRL_TYPE_PIPE3) {
+ control_phy->sys_clk = devm_clk_get(control_phy->dev,
"sys_clkin");
- if (IS_ERR(control_usb->sys_clk)) {
+ if (IS_ERR(control_phy->sys_clk)) {
pr_err("%s: unable to get sys_clkin\n", __func__);
return -EINVAL;
}
}
- dev_set_drvdata(control_usb->dev, control_usb);
+ dev_set_drvdata(control_phy->dev, control_phy);
return 0;
}
-static struct platform_driver omap_control_usb_driver = {
- .probe = omap_control_usb_probe,
+static struct platform_driver omap_control_phy_driver = {
+ .probe = omap_control_phy_probe,
.driver = {
- .name = "omap-control-usb",
+ .name = "omap-control-phy",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(omap_control_usb_id_table),
+ .of_match_table = of_match_ptr(omap_control_phy_id_table),
},
};
-static int __init omap_control_usb_init(void)
+static int __init omap_control_phy_init(void)
{
- return platform_driver_register(&omap_control_usb_driver);
+ return platform_driver_register(&omap_control_phy_driver);
}
-subsys_initcall(omap_control_usb_init);
+subsys_initcall(omap_control_phy_init);
-static void __exit omap_control_usb_exit(void)
+static void __exit omap_control_phy_exit(void)
{
- platform_driver_unregister(&omap_control_usb_driver);
+ platform_driver_unregister(&omap_control_phy_driver);
}
-module_exit(omap_control_usb_exit);
+module_exit(omap_control_phy_exit);
-MODULE_ALIAS("platform: omap_control_usb");
+MODULE_ALIAS("platform: omap_control_phy");
MODULE_AUTHOR("Texas Instruments Inc.");
-MODULE_DESCRIPTION("OMAP Control Module USB Driver");
+MODULE_DESCRIPTION("OMAP Control Module PHY Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 7699752fba11..a2205a841e5e 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -21,16 +21,19 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/io.h>
-#include <linux/usb/omap_usb.h>
+#include <linux/phy/omap_usb.h>
#include <linux/usb/phy_companion.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
-#include <linux/usb/omap_control_usb.h>
+#include <linux/phy/omap_control_phy.h>
#include <linux/phy/phy.h>
#include <linux/of_platform.h>
+#define USB2PHY_DISCON_BYP_LATCH (1 << 31)
+#define USB2PHY_ANA_CONFIG1 0x4c
+
/**
* omap_usb2_set_comparator - links the comparator present in the sytem with
* this phy
@@ -98,65 +101,116 @@ static int omap_usb_set_peripheral(struct usb_otg *otg,
return 0;
}
-static int omap_usb2_suspend(struct usb_phy *x, int suspend)
+static int omap_usb_power_off(struct phy *x)
{
- struct omap_usb *phy = phy_to_omapusb(x);
- int ret;
+ struct omap_usb *phy = phy_get_drvdata(x);
- if (suspend && !phy->is_suspended) {
- omap_control_usb_phy_power(phy->control_dev, 0);
- pm_runtime_put_sync(phy->dev);
- phy->is_suspended = 1;
- } else if (!suspend && phy->is_suspended) {
- ret = pm_runtime_get_sync(phy->dev);
- if (ret < 0) {
- dev_err(phy->dev, "get_sync failed with err %d\n", ret);
- return ret;
- }
- omap_control_usb_phy_power(phy->control_dev, 1);
- phy->is_suspended = 0;
- }
+ omap_control_phy_power(phy->control_dev, 0);
return 0;
}
-static int omap_usb_power_off(struct phy *x)
+static int omap_usb_power_on(struct phy *x)
{
struct omap_usb *phy = phy_get_drvdata(x);
- omap_control_usb_phy_power(phy->control_dev, 0);
+ omap_control_phy_power(phy->control_dev, 1);
return 0;
}
-static int omap_usb_power_on(struct phy *x)
+static int omap_usb_init(struct phy *x)
{
struct omap_usb *phy = phy_get_drvdata(x);
-
- omap_control_usb_phy_power(phy->control_dev, 1);
+ u32 val;
+
+ if (phy->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
+ /*
+ *
+ * Reduce the sensitivity of internal PHY by enabling the
+ * DISCON_BYP_LATCH of the USB2PHY_ANA_CONFIG1 register. This
+ * resolves issues with certain devices which can otherwise
+ * be prone to false disconnects.
+ *
+ */
+ val = omap_usb_readl(phy->phy_base, USB2PHY_ANA_CONFIG1);
+ val |= USB2PHY_DISCON_BYP_LATCH;
+ omap_usb_writel(phy->phy_base, USB2PHY_ANA_CONFIG1, val);
+ }
return 0;
}
static struct phy_ops ops = {
+ .init = omap_usb_init,
.power_on = omap_usb_power_on,
.power_off = omap_usb_power_off,
.owner = THIS_MODULE,
};
+#ifdef CONFIG_OF
+static const struct usb_phy_data omap_usb2_data = {
+ .label = "omap_usb2",
+ .flags = OMAP_USB2_HAS_START_SRP | OMAP_USB2_HAS_SET_VBUS,
+};
+
+static const struct usb_phy_data omap5_usb2_data = {
+ .label = "omap5_usb2",
+ .flags = 0,
+};
+
+static const struct usb_phy_data dra7x_usb2_data = {
+ .label = "dra7x_usb2",
+ .flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT,
+};
+
+static const struct usb_phy_data am437x_usb2_data = {
+ .label = "am437x_usb2",
+ .flags = 0,
+};
+
+static const struct of_device_id omap_usb2_id_table[] = {
+ {
+ .compatible = "ti,omap-usb2",
+ .data = &omap_usb2_data,
+ },
+ {
+ .compatible = "ti,omap5-usb2",
+ .data = &omap5_usb2_data,
+ },
+ {
+ .compatible = "ti,dra7x-usb2",
+ .data = &dra7x_usb2_data,
+ },
+ {
+ .compatible = "ti,am437x-usb2",
+ .data = &am437x_usb2_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_usb2_id_table);
+#endif
+
static int omap_usb2_probe(struct platform_device *pdev)
{
struct omap_usb *phy;
struct phy *generic_phy;
+ struct resource *res;
struct phy_provider *phy_provider;
struct usb_otg *otg;
struct device_node *node = pdev->dev.of_node;
struct device_node *control_node;
struct platform_device *control_pdev;
+ const struct of_device_id *of_id;
+ struct usb_phy_data *phy_data;
+
+ of_id = of_match_device(of_match_ptr(omap_usb2_id_table), &pdev->dev);
- if (!node)
+ if (!of_id)
return -EINVAL;
+ phy_data = (struct usb_phy_data *)of_id->data;
+
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy) {
dev_err(&pdev->dev, "unable to allocate memory for USB2 PHY\n");
@@ -172,11 +226,18 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy->dev = &pdev->dev;
phy->phy.dev = phy->dev;
- phy->phy.label = "omap-usb2";
- phy->phy.set_suspend = omap_usb2_suspend;
+ phy->phy.label = phy_data->label;
phy->phy.otg = otg;
phy->phy.type = USB_PHY_TYPE_USB2;
+ if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!phy->phy_base)
+ return -ENOMEM;
+ phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
+ }
+
control_node = of_parse_phandle(node, "ctrl-module", 0);
if (!control_node) {
dev_err(&pdev->dev, "Failed to get control device phandle\n");
@@ -190,14 +251,14 @@ static int omap_usb2_probe(struct platform_device *pdev)
}
phy->control_dev = &control_pdev->dev;
-
- phy->is_suspended = 1;
- omap_control_usb_phy_power(phy->control_dev, 0);
+ omap_control_phy_power(phy->control_dev, 0);
otg->set_host = omap_usb_set_host;
otg->set_peripheral = omap_usb_set_peripheral;
- otg->set_vbus = omap_usb_set_vbus;
- otg->start_srp = omap_usb_start_srp;
+ if (phy_data->flags & OMAP_USB2_HAS_SET_VBUS)
+ otg->set_vbus = omap_usb_set_vbus;
+ if (phy_data->flags & OMAP_USB2_HAS_START_SRP)
+ otg->start_srp = omap_usb_start_srp;
otg->phy = &phy->phy;
platform_set_drvdata(pdev, phy);
@@ -297,14 +358,6 @@ static const struct dev_pm_ops omap_usb2_pm_ops = {
#define DEV_PM_OPS NULL
#endif
-#ifdef CONFIG_OF
-static const struct of_device_id omap_usb2_id_table[] = {
- { .compatible = "ti,omap-usb2" },
- {}
-};
-MODULE_DEVICE_TABLE(of, omap_usb2_id_table);
-#endif
-
static struct platform_driver omap_usb2_driver = {
.probe = omap_usb2_probe,
.remove = omap_usb2_remove,
diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c
new file mode 100644
index 000000000000..8a8c6bc8709a
--- /dev/null
+++ b/drivers/phy/phy-samsung-usb2.c
@@ -0,0 +1,228 @@
+/*
+ * Samsung SoC USB 1.1/2.0 PHY driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include "phy-samsung-usb2.h"
+
+static int samsung_usb2_phy_power_on(struct phy *phy)
+{
+ struct samsung_usb2_phy_instance *inst = phy_get_drvdata(phy);
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ int ret;
+
+ dev_dbg(drv->dev, "Request to power_on \"%s\" usb phy\n",
+ inst->cfg->label);
+ ret = clk_prepare_enable(drv->clk);
+ if (ret)
+ goto err_main_clk;
+ ret = clk_prepare_enable(drv->ref_clk);
+ if (ret)
+ goto err_instance_clk;
+ if (inst->cfg->power_on) {
+ spin_lock(&drv->lock);
+ ret = inst->cfg->power_on(inst);
+ spin_unlock(&drv->lock);
+ }
+
+ return 0;
+
+err_instance_clk:
+ clk_disable_unprepare(drv->clk);
+err_main_clk:
+ return ret;
+}
+
+static int samsung_usb2_phy_power_off(struct phy *phy)
+{
+ struct samsung_usb2_phy_instance *inst = phy_get_drvdata(phy);
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ int ret = 0;
+
+ dev_dbg(drv->dev, "Request to power_off \"%s\" usb phy\n",
+ inst->cfg->label);
+ if (inst->cfg->power_off) {
+ spin_lock(&drv->lock);
+ ret = inst->cfg->power_off(inst);
+ spin_unlock(&drv->lock);
+ }
+ clk_disable_unprepare(drv->ref_clk);
+ clk_disable_unprepare(drv->clk);
+ return ret;
+}
+
+static struct phy_ops samsung_usb2_phy_ops = {
+ .power_on = samsung_usb2_phy_power_on,
+ .power_off = samsung_usb2_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *samsung_usb2_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct samsung_usb2_phy_driver *drv;
+
+ drv = dev_get_drvdata(dev);
+ if (!drv)
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(args->args[0] >= drv->cfg->num_phys))
+ return ERR_PTR(-ENODEV);
+
+ return drv->instances[args->args[0]].phy;
+}
+
+static const struct of_device_id samsung_usb2_phy_of_match[] = {
+#ifdef CONFIG_PHY_EXYNOS4210_USB2
+ {
+ .compatible = "samsung,exynos4210-usb2-phy",
+ .data = &exynos4210_usb2_phy_config,
+ },
+#endif
+#ifdef CONFIG_PHY_EXYNOS4X12_USB2
+ {
+ .compatible = "samsung,exynos4x12-usb2-phy",
+ .data = &exynos4x12_usb2_phy_config,
+ },
+#endif
+#ifdef CONFIG_PHY_EXYNOS5250_USB2
+ {
+ .compatible = "samsung,exynos5250-usb2-phy",
+ .data = &exynos5250_usb2_phy_config,
+ },
+#endif
+ { },
+};
+
+static int samsung_usb2_phy_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct samsung_usb2_phy_config *cfg;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct resource *mem;
+ struct samsung_usb2_phy_driver *drv;
+ int i, ret;
+
+ if (!pdev->dev.of_node) {
+ dev_err(dev, "This driver is required to be instantiated from device tree\n");
+ return -EINVAL;
+ }
+
+ match = of_match_node(samsung_usb2_phy_of_match, pdev->dev.of_node);
+ if (!match) {
+ dev_err(dev, "of_match_node() failed\n");
+ return -EINVAL;
+ }
+ cfg = match->data;
+
+ drv = devm_kzalloc(dev, sizeof(struct samsung_usb2_phy_driver) +
+ cfg->num_phys * sizeof(struct samsung_usb2_phy_instance),
+ GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, drv);
+ spin_lock_init(&drv->lock);
+
+ drv->cfg = cfg;
+ drv->dev = dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->reg_phy = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(drv->reg_phy)) {
+ dev_err(dev, "Failed to map register memory (phy)\n");
+ return PTR_ERR(drv->reg_phy);
+ }
+
+ drv->reg_pmu = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "samsung,pmureg-phandle");
+ if (IS_ERR(drv->reg_pmu)) {
+ dev_err(dev, "Failed to map PMU registers (via syscon)\n");
+ return PTR_ERR(drv->reg_pmu);
+ }
+
+ if (drv->cfg->has_mode_switch) {
+ drv->reg_sys = syscon_regmap_lookup_by_phandle(
+ pdev->dev.of_node, "samsung,sysreg-phandle");
+ if (IS_ERR(drv->reg_sys)) {
+ dev_err(dev, "Failed to map system registers (via syscon)\n");
+ return PTR_ERR(drv->reg_sys);
+ }
+ }
+
+ drv->clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(drv->clk)) {
+ dev_err(dev, "Failed to get clock of phy controller\n");
+ return PTR_ERR(drv->clk);
+ }
+
+ drv->ref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(drv->ref_clk)) {
+ dev_err(dev, "Failed to get reference clock for the phy controller\n");
+ return PTR_ERR(drv->ref_clk);
+ }
+
+ drv->ref_rate = clk_get_rate(drv->ref_clk);
+ if (drv->cfg->rate_to_clk) {
+ ret = drv->cfg->rate_to_clk(drv->ref_rate, &drv->ref_reg_val);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < drv->cfg->num_phys; i++) {
+ char *label = drv->cfg->phys[i].label;
+ struct samsung_usb2_phy_instance *p = &drv->instances[i];
+
+ dev_dbg(dev, "Creating phy \"%s\"\n", label);
+ p->phy = devm_phy_create(dev, &samsung_usb2_phy_ops, NULL);
+ if (IS_ERR(p->phy)) {
+ dev_err(drv->dev, "Failed to create usb2_phy \"%s\"\n",
+ label);
+ return PTR_ERR(p->phy);
+ }
+
+ p->cfg = &drv->cfg->phys[i];
+ p->drv = drv;
+ phy_set_bus_width(p->phy, 8);
+ phy_set_drvdata(p->phy, p);
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev,
+ samsung_usb2_phy_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(drv->dev, "Failed to register phy provider\n");
+ return PTR_ERR(phy_provider);
+ }
+
+ return 0;
+}
+
+static struct platform_driver samsung_usb2_phy_driver = {
+ .probe = samsung_usb2_phy_probe,
+ .driver = {
+ .of_match_table = samsung_usb2_phy_of_match,
+ .name = "samsung-usb2-phy",
+ .owner = THIS_MODULE,
+ }
+};
+
+module_platform_driver(samsung_usb2_phy_driver);
+MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC USB PHY driver");
+MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:samsung-usb2-phy");
diff --git a/drivers/phy/phy-samsung-usb2.h b/drivers/phy/phy-samsung-usb2.h
new file mode 100644
index 000000000000..45b3170652bd
--- /dev/null
+++ b/drivers/phy/phy-samsung-usb2.h
@@ -0,0 +1,67 @@
+/*
+ * Samsung SoC USB 1.1/2.0 PHY driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PHY_EXYNOS_USB2_H
+#define _PHY_EXYNOS_USB2_H
+
+#include <linux/clk.h>
+#include <linux/phy/phy.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#define KHZ 1000
+#define MHZ (KHZ * KHZ)
+
+struct samsung_usb2_phy_driver;
+struct samsung_usb2_phy_instance;
+struct samsung_usb2_phy_config;
+
+struct samsung_usb2_phy_instance {
+ const struct samsung_usb2_common_phy *cfg;
+ struct phy *phy;
+ struct samsung_usb2_phy_driver *drv;
+ bool enabled;
+};
+
+struct samsung_usb2_phy_driver {
+ const struct samsung_usb2_phy_config *cfg;
+ struct clk *clk;
+ struct clk *ref_clk;
+ unsigned long ref_rate;
+ u32 ref_reg_val;
+ struct device *dev;
+ void __iomem *reg_phy;
+ struct regmap *reg_pmu;
+ struct regmap *reg_sys;
+ spinlock_t lock;
+ struct samsung_usb2_phy_instance instances[0];
+};
+
+struct samsung_usb2_common_phy {
+ int (*power_on)(struct samsung_usb2_phy_instance *);
+ int (*power_off)(struct samsung_usb2_phy_instance *);
+ unsigned int id;
+ char *label;
+};
+
+
+struct samsung_usb2_phy_config {
+ const struct samsung_usb2_common_phy *phys;
+ int (*rate_to_clk)(unsigned long, u32 *);
+ unsigned int num_phys;
+ bool has_mode_switch;
+};
+
+extern const struct samsung_usb2_phy_config exynos4210_usb2_phy_config;
+extern const struct samsung_usb2_phy_config exynos4x12_usb2_phy_config;
+extern const struct samsung_usb2_phy_config exynos5250_usb2_phy_config;
+#endif
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
new file mode 100644
index 000000000000..e6e6c4ba7145
--- /dev/null
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -0,0 +1,331 @@
+/*
+ * Allwinner sun4i USB phy driver
+ *
+ * Copyright (C) 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on code from
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ *
+ * Modelled after: Samsung S5P/EXYNOS SoC series MIPI CSIS/DSIM DPHY driver
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#define REG_ISCR 0x00
+#define REG_PHYCTL 0x04
+#define REG_PHYBIST 0x08
+#define REG_PHYTUNE 0x0c
+
+#define PHYCTL_DATA BIT(7)
+
+#define SUNXI_AHB_ICHR8_EN BIT(10)
+#define SUNXI_AHB_INCR4_BURST_EN BIT(9)
+#define SUNXI_AHB_INCRX_ALIGN_EN BIT(8)
+#define SUNXI_ULPI_BYPASS_EN BIT(0)
+
+/* Common Control Bits for Both PHYs */
+#define PHY_PLL_BW 0x03
+#define PHY_RES45_CAL_EN 0x0c
+
+/* Private Control Bits for Each PHY */
+#define PHY_TX_AMPLITUDE_TUNE 0x20
+#define PHY_TX_SLEWRATE_TUNE 0x22
+#define PHY_VBUSVALID_TH_SEL 0x25
+#define PHY_PULLUP_RES_SEL 0x27
+#define PHY_OTG_FUNC_EN 0x28
+#define PHY_VBUS_DET_EN 0x29
+#define PHY_DISCON_TH_SEL 0x2a
+
+#define MAX_PHYS 3
+
+struct sun4i_usb_phy_data {
+ struct clk *clk;
+ void __iomem *base;
+ struct mutex mutex;
+ int num_phys;
+ u32 disc_thresh;
+ struct sun4i_usb_phy {
+ struct phy *phy;
+ void __iomem *pmu;
+ struct regulator *vbus;
+ struct reset_control *reset;
+ int index;
+ } phys[MAX_PHYS];
+};
+
+#define to_sun4i_usb_phy_data(phy) \
+ container_of((phy), struct sun4i_usb_phy_data, phys[(phy)->index])
+
+static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
+ int len)
+{
+ struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
+ u32 temp, usbc_bit = BIT(phy->index * 2);
+ int i;
+
+ mutex_lock(&phy_data->mutex);
+
+ for (i = 0; i < len; i++) {
+ temp = readl(phy_data->base + REG_PHYCTL);
+
+ /* clear the address portion */
+ temp &= ~(0xff << 8);
+
+ /* set the address */
+ temp |= ((addr + i) << 8);
+ writel(temp, phy_data->base + REG_PHYCTL);
+
+ /* set the data bit and clear usbc bit*/
+ temp = readb(phy_data->base + REG_PHYCTL);
+ if (data & 0x1)
+ temp |= PHYCTL_DATA;
+ else
+ temp &= ~PHYCTL_DATA;
+ temp &= ~usbc_bit;
+ writeb(temp, phy_data->base + REG_PHYCTL);
+
+ /* pulse usbc_bit */
+ temp = readb(phy_data->base + REG_PHYCTL);
+ temp |= usbc_bit;
+ writeb(temp, phy_data->base + REG_PHYCTL);
+
+ temp = readb(phy_data->base + REG_PHYCTL);
+ temp &= ~usbc_bit;
+ writeb(temp, phy_data->base + REG_PHYCTL);
+
+ data >>= 1;
+ }
+ mutex_unlock(&phy_data->mutex);
+}
+
+static void sun4i_usb_phy_passby(struct sun4i_usb_phy *phy, int enable)
+{
+ u32 bits, reg_value;
+
+ if (!phy->pmu)
+ return;
+
+ bits = SUNXI_AHB_ICHR8_EN | SUNXI_AHB_INCR4_BURST_EN |
+ SUNXI_AHB_INCRX_ALIGN_EN | SUNXI_ULPI_BYPASS_EN;
+
+ reg_value = readl(phy->pmu);
+
+ if (enable)
+ reg_value |= bits;
+ else
+ reg_value &= ~bits;
+
+ writel(reg_value, phy->pmu);
+}
+
+static int sun4i_usb_phy_init(struct phy *_phy)
+{
+ struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+ struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+ int ret;
+
+ ret = clk_prepare_enable(data->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(phy->reset);
+ if (ret) {
+ clk_disable_unprepare(data->clk);
+ return ret;
+ }
+
+ /* Adjust PHY's magnitude and rate */
+ sun4i_usb_phy_write(phy, PHY_TX_AMPLITUDE_TUNE, 0x14, 5);
+
+ /* Disconnect threshold adjustment */
+ sun4i_usb_phy_write(phy, PHY_DISCON_TH_SEL, data->disc_thresh, 2);
+
+ sun4i_usb_phy_passby(phy, 1);
+
+ return 0;
+}
+
+static int sun4i_usb_phy_exit(struct phy *_phy)
+{
+ struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+ struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+
+ sun4i_usb_phy_passby(phy, 0);
+ reset_control_assert(phy->reset);
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static int sun4i_usb_phy_power_on(struct phy *_phy)
+{
+ struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+ int ret = 0;
+
+ if (phy->vbus)
+ ret = regulator_enable(phy->vbus);
+
+ return ret;
+}
+
+static int sun4i_usb_phy_power_off(struct phy *_phy)
+{
+ struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+
+ if (phy->vbus)
+ regulator_disable(phy->vbus);
+
+ return 0;
+}
+
+static struct phy_ops sun4i_usb_phy_ops = {
+ .init = sun4i_usb_phy_init,
+ .exit = sun4i_usb_phy_exit,
+ .power_on = sun4i_usb_phy_power_on,
+ .power_off = sun4i_usb_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *sun4i_usb_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
+
+ if (WARN_ON(args->args[0] == 0 || args->args[0] >= data->num_phys))
+ return ERR_PTR(-ENODEV);
+
+ return data->phys[args->args[0]].phy;
+}
+
+static int sun4i_usb_phy_probe(struct platform_device *pdev)
+{
+ struct sun4i_usb_phy_data *data;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *pmu = NULL;
+ struct phy_provider *phy_provider;
+ struct reset_control *reset;
+ struct regulator *vbus;
+ struct resource *res;
+ struct phy *phy;
+ char name[16];
+ int i;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_init(&data->mutex);
+
+ if (of_device_is_compatible(np, "allwinner,sun5i-a13-usb-phy"))
+ data->num_phys = 2;
+ else
+ data->num_phys = 3;
+
+ if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy"))
+ data->disc_thresh = 3;
+ else
+ data->disc_thresh = 2;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_ctrl");
+ data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ data->clk = devm_clk_get(dev, "usb_phy");
+ if (IS_ERR(data->clk)) {
+ dev_err(dev, "could not get usb_phy clock\n");
+ return PTR_ERR(data->clk);
+ }
+
+ /* Skip 0, 0 is the phy for otg which is not yet supported. */
+ for (i = 1; i < data->num_phys; i++) {
+ snprintf(name, sizeof(name), "usb%d_vbus", i);
+ vbus = devm_regulator_get_optional(dev, name);
+ if (IS_ERR(vbus)) {
+ if (PTR_ERR(vbus) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ vbus = NULL;
+ }
+
+ snprintf(name, sizeof(name), "usb%d_reset", i);
+ reset = devm_reset_control_get(dev, name);
+ if (IS_ERR(reset)) {
+ dev_err(dev, "failed to get reset %s\n", name);
+ return PTR_ERR(reset);
+ }
+
+ if (i) { /* No pmu for usbc0 */
+ snprintf(name, sizeof(name), "pmu%d", i);
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, name);
+ pmu = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pmu))
+ return PTR_ERR(pmu);
+ }
+
+ phy = devm_phy_create(dev, &sun4i_usb_phy_ops, NULL);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create PHY %d\n", i);
+ return PTR_ERR(phy);
+ }
+
+ data->phys[i].phy = phy;
+ data->phys[i].pmu = pmu;
+ data->phys[i].vbus = vbus;
+ data->phys[i].reset = reset;
+ data->phys[i].index = i;
+ phy_set_drvdata(phy, &data->phys[i]);
+ }
+
+ dev_set_drvdata(dev, data);
+ phy_provider = devm_of_phy_provider_register(dev, sun4i_usb_phy_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_usb_phy_of_match[] = {
+ { .compatible = "allwinner,sun4i-a10-usb-phy" },
+ { .compatible = "allwinner,sun5i-a13-usb-phy" },
+ { .compatible = "allwinner,sun7i-a20-usb-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match);
+
+static struct platform_driver sun4i_usb_phy_driver = {
+ .probe = sun4i_usb_phy_probe,
+ .driver = {
+ .of_match_table = sun4i_usb_phy_of_match,
+ .name = "sun4i-usb-phy",
+ .owner = THIS_MODULE,
+ }
+};
+module_platform_driver(sun4i_usb_phy_driver);
+
+MODULE_DESCRIPTION("Allwinner sun4i USB phy driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
new file mode 100644
index 000000000000..591367654613
--- /dev/null
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -0,0 +1,470 @@
+/*
+ * phy-ti-pipe3 - PIPE3 PHY driver.
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/phy/phy.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/phy/omap_control_phy.h>
+#include <linux/of_platform.h>
+
+#define PLL_STATUS 0x00000004
+#define PLL_GO 0x00000008
+#define PLL_CONFIGURATION1 0x0000000C
+#define PLL_CONFIGURATION2 0x00000010
+#define PLL_CONFIGURATION3 0x00000014
+#define PLL_CONFIGURATION4 0x00000020
+
+#define PLL_REGM_MASK 0x001FFE00
+#define PLL_REGM_SHIFT 0x9
+#define PLL_REGM_F_MASK 0x0003FFFF
+#define PLL_REGM_F_SHIFT 0x0
+#define PLL_REGN_MASK 0x000001FE
+#define PLL_REGN_SHIFT 0x1
+#define PLL_SELFREQDCO_MASK 0x0000000E
+#define PLL_SELFREQDCO_SHIFT 0x1
+#define PLL_SD_MASK 0x0003FC00
+#define PLL_SD_SHIFT 10
+#define SET_PLL_GO 0x1
+#define PLL_LDOPWDN BIT(15)
+#define PLL_TICOPWDN BIT(16)
+#define PLL_LOCK 0x2
+#define PLL_IDLE 0x1
+
+/*
+ * This is an Empirical value that works, need to confirm the actual
+ * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
+ * to be correctly reflected in the PIPE3PHY_PLL_STATUS register.
+ */
+#define PLL_IDLE_TIME 100 /* in milliseconds */
+#define PLL_LOCK_TIME 100 /* in milliseconds */
+
+struct pipe3_dpll_params {
+ u16 m;
+ u8 n;
+ u8 freq:3;
+ u8 sd;
+ u32 mf;
+};
+
+struct pipe3_dpll_map {
+ unsigned long rate;
+ struct pipe3_dpll_params params;
+};
+
+struct ti_pipe3 {
+ void __iomem *pll_ctrl_base;
+ struct device *dev;
+ struct device *control_dev;
+ struct clk *wkupclk;
+ struct clk *sys_clk;
+ struct clk *refclk;
+ struct pipe3_dpll_map *dpll_map;
+};
+
+static struct pipe3_dpll_map dpll_map_usb[] = {
+ {12000000, {1250, 5, 4, 20, 0} }, /* 12 MHz */
+ {16800000, {3125, 20, 4, 20, 0} }, /* 16.8 MHz */
+ {19200000, {1172, 8, 4, 20, 65537} }, /* 19.2 MHz */
+ {20000000, {1000, 7, 4, 10, 0} }, /* 20 MHz */
+ {26000000, {1250, 12, 4, 20, 0} }, /* 26 MHz */
+ {38400000, {3125, 47, 4, 20, 92843} }, /* 38.4 MHz */
+ { }, /* Terminator */
+};
+
+static struct pipe3_dpll_map dpll_map_sata[] = {
+ {12000000, {1000, 7, 4, 6, 0} }, /* 12 MHz */
+ {16800000, {714, 7, 4, 6, 0} }, /* 16.8 MHz */
+ {19200000, {625, 7, 4, 6, 0} }, /* 19.2 MHz */
+ {20000000, {600, 7, 4, 6, 0} }, /* 20 MHz */
+ {26000000, {461, 7, 4, 6, 0} }, /* 26 MHz */
+ {38400000, {312, 7, 4, 6, 0} }, /* 38.4 MHz */
+ { }, /* Terminator */
+};
+
+static inline u32 ti_pipe3_readl(void __iomem *addr, unsigned offset)
+{
+ return __raw_readl(addr + offset);
+}
+
+static inline void ti_pipe3_writel(void __iomem *addr, unsigned offset,
+ u32 data)
+{
+ __raw_writel(data, addr + offset);
+}
+
+static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy)
+{
+ unsigned long rate;
+ struct pipe3_dpll_map *dpll_map = phy->dpll_map;
+
+ rate = clk_get_rate(phy->sys_clk);
+
+ for (; dpll_map->rate; dpll_map++) {
+ if (rate == dpll_map->rate)
+ return &dpll_map->params;
+ }
+
+ dev_err(phy->dev, "No DPLL configuration for %lu Hz SYS CLK\n", rate);
+
+ return NULL;
+}
+
+static int ti_pipe3_power_off(struct phy *x)
+{
+ struct ti_pipe3 *phy = phy_get_drvdata(x);
+
+ omap_control_phy_power(phy->control_dev, 0);
+
+ return 0;
+}
+
+static int ti_pipe3_power_on(struct phy *x)
+{
+ struct ti_pipe3 *phy = phy_get_drvdata(x);
+
+ omap_control_phy_power(phy->control_dev, 1);
+
+ return 0;
+}
+
+static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy)
+{
+ u32 val;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(PLL_LOCK_TIME);
+ do {
+ cpu_relax();
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
+ if (val & PLL_LOCK)
+ break;
+ } while (!time_after(jiffies, timeout));
+
+ if (!(val & PLL_LOCK)) {
+ dev_err(phy->dev, "DPLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int ti_pipe3_dpll_program(struct ti_pipe3 *phy)
+{
+ u32 val;
+ struct pipe3_dpll_params *dpll_params;
+
+ dpll_params = ti_pipe3_get_dpll_params(phy);
+ if (!dpll_params)
+ return -EINVAL;
+
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
+ val &= ~PLL_REGN_MASK;
+ val |= dpll_params->n << PLL_REGN_SHIFT;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
+
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+ val &= ~PLL_SELFREQDCO_MASK;
+ val |= dpll_params->freq << PLL_SELFREQDCO_SHIFT;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
+
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
+ val &= ~PLL_REGM_MASK;
+ val |= dpll_params->m << PLL_REGM_SHIFT;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
+
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION4);
+ val &= ~PLL_REGM_F_MASK;
+ val |= dpll_params->mf << PLL_REGM_F_SHIFT;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION4, val);
+
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION3);
+ val &= ~PLL_SD_MASK;
+ val |= dpll_params->sd << PLL_SD_SHIFT;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION3, val);
+
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_GO, SET_PLL_GO);
+
+ return ti_pipe3_dpll_wait_lock(phy);
+}
+
+static int ti_pipe3_init(struct phy *x)
+{
+ struct ti_pipe3 *phy = phy_get_drvdata(x);
+ u32 val;
+ int ret = 0;
+
+ /* Bring it out of IDLE if it is IDLE */
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+ if (val & PLL_IDLE) {
+ val &= ~PLL_IDLE;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
+ ret = ti_pipe3_dpll_wait_lock(phy);
+ }
+
+ /* Program the DPLL only if not locked */
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
+ if (!(val & PLL_LOCK))
+ if (ti_pipe3_dpll_program(phy))
+ return -EINVAL;
+
+ return ret;
+}
+
+static int ti_pipe3_exit(struct phy *x)
+{
+ struct ti_pipe3 *phy = phy_get_drvdata(x);
+ u32 val;
+ unsigned long timeout;
+
+ /* SATA DPLL can't be powered down due to Errata i783 */
+ if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
+ return 0;
+
+ /* Put DPLL in IDLE mode */
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+ val |= PLL_IDLE;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
+
+ /* wait for LDO and Oscillator to power down */
+ timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
+ do {
+ cpu_relax();
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
+ if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
+ break;
+ } while (!time_after(jiffies, timeout));
+
+ if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
+ dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
+ val);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+static struct phy_ops ops = {
+ .init = ti_pipe3_init,
+ .exit = ti_pipe3_exit,
+ .power_on = ti_pipe3_power_on,
+ .power_off = ti_pipe3_power_off,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id ti_pipe3_id_table[];
+#endif
+
+static int ti_pipe3_probe(struct platform_device *pdev)
+{
+ struct ti_pipe3 *phy;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *control_node;
+ struct platform_device *control_pdev;
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(ti_pipe3_id_table), &pdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy) {
+ dev_err(&pdev->dev, "unable to alloc mem for TI PIPE3 PHY\n");
+ return -ENOMEM;
+ }
+
+ phy->dpll_map = (struct pipe3_dpll_map *)match->data;
+ if (!phy->dpll_map) {
+ dev_err(&pdev->dev, "no DPLL data\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll_ctrl");
+ phy->pll_ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(phy->pll_ctrl_base))
+ return PTR_ERR(phy->pll_ctrl_base);
+
+ phy->dev = &pdev->dev;
+
+ if (!of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
+
+ phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
+ if (IS_ERR(phy->wkupclk)) {
+ dev_err(&pdev->dev, "unable to get wkupclk\n");
+ return PTR_ERR(phy->wkupclk);
+ }
+
+ phy->refclk = devm_clk_get(phy->dev, "refclk");
+ if (IS_ERR(phy->refclk)) {
+ dev_err(&pdev->dev, "unable to get refclk\n");
+ return PTR_ERR(phy->refclk);
+ }
+ } else {
+ phy->wkupclk = ERR_PTR(-ENODEV);
+ phy->refclk = ERR_PTR(-ENODEV);
+ }
+
+ phy->sys_clk = devm_clk_get(phy->dev, "sysclk");
+ if (IS_ERR(phy->sys_clk)) {
+ dev_err(&pdev->dev, "unable to get sysclk\n");
+ return -EINVAL;
+ }
+
+ control_node = of_parse_phandle(node, "ctrl-module", 0);
+ if (!control_node) {
+ dev_err(&pdev->dev, "Failed to get control device phandle\n");
+ return -EINVAL;
+ }
+
+ control_pdev = of_find_device_by_node(control_node);
+ if (!control_pdev) {
+ dev_err(&pdev->dev, "Failed to get control device\n");
+ return -EINVAL;
+ }
+
+ phy->control_dev = &control_pdev->dev;
+
+ omap_control_phy_power(phy->control_dev, 0);
+
+ platform_set_drvdata(pdev, phy);
+ pm_runtime_enable(phy->dev);
+
+ generic_phy = devm_phy_create(phy->dev, &ops, NULL);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, phy);
+ phy_provider = devm_of_phy_provider_register(phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ pm_runtime_get(&pdev->dev);
+
+ return 0;
+}
+
+static int ti_pipe3_remove(struct platform_device *pdev)
+{
+ if (!pm_runtime_suspended(&pdev->dev))
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+
+static int ti_pipe3_runtime_suspend(struct device *dev)
+{
+ struct ti_pipe3 *phy = dev_get_drvdata(dev);
+
+ if (!IS_ERR(phy->wkupclk))
+ clk_disable_unprepare(phy->wkupclk);
+ if (!IS_ERR(phy->refclk))
+ clk_disable_unprepare(phy->refclk);
+
+ return 0;
+}
+
+static int ti_pipe3_runtime_resume(struct device *dev)
+{
+ u32 ret = 0;
+ struct ti_pipe3 *phy = dev_get_drvdata(dev);
+
+ if (!IS_ERR(phy->refclk)) {
+ ret = clk_prepare_enable(phy->refclk);
+ if (ret) {
+ dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
+ goto err1;
+ }
+ }
+
+ if (!IS_ERR(phy->wkupclk)) {
+ ret = clk_prepare_enable(phy->wkupclk);
+ if (ret) {
+ dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
+ goto err2;
+ }
+ }
+
+ return 0;
+
+err2:
+ if (!IS_ERR(phy->refclk))
+ clk_disable_unprepare(phy->refclk);
+
+err1:
+ return ret;
+}
+
+static const struct dev_pm_ops ti_pipe3_pm_ops = {
+ SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend,
+ ti_pipe3_runtime_resume, NULL)
+};
+
+#define DEV_PM_OPS (&ti_pipe3_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id ti_pipe3_id_table[] = {
+ {
+ .compatible = "ti,phy-usb3",
+ .data = dpll_map_usb,
+ },
+ {
+ .compatible = "ti,omap-usb3",
+ .data = dpll_map_usb,
+ },
+ {
+ .compatible = "ti,phy-pipe3-sata",
+ .data = dpll_map_sata,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ti_pipe3_id_table);
+#endif
+
+static struct platform_driver ti_pipe3_driver = {
+ .probe = ti_pipe3_probe,
+ .remove = ti_pipe3_remove,
+ .driver = {
+ .name = "ti-pipe3",
+ .owner = THIS_MODULE,
+ .pm = DEV_PM_OPS,
+ .of_match_table = of_match_ptr(ti_pipe3_id_table),
+ },
+};
+
+module_platform_driver(ti_pipe3_driver);
+
+MODULE_ALIAS("platform: ti_pipe3");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("TI PIPE3 phy driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index c3ace1db8136..2e0e9b3774c8 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -338,7 +338,7 @@ static void twl4030_usb_set_mode(struct twl4030_usb *twl, int mode)
dev_err(twl->dev, "unsupported T2 transceiver mode %d\n",
mode);
break;
- };
+ }
}
static void twl4030_i2c_access(struct twl4030_usb *twl, int on)
@@ -661,7 +661,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct phy_init_data *init_data = NULL;
- twl = devm_kzalloc(&pdev->dev, sizeof *twl, GFP_KERNEL);
+ twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
return -ENOMEM;
@@ -676,7 +676,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
return -EINVAL;
}
- otg = devm_kzalloc(&pdev->dev, sizeof *otg, GFP_KERNEL);
+ otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
new file mode 100644
index 000000000000..4aa1ccd1511f
--- /dev/null
+++ b/drivers/phy/phy-xgene.c
@@ -0,0 +1,1750 @@
+/*
+ * AppliedMicro X-Gene Multi-purpose PHY driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Loc Ho <lho@apm.com>
+ * Tuan Phan <tphan@apm.com>
+ * Suman Tripathi <stripathi@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The APM X-Gene PHY consists of two PLL clock macro's (CMU) and lanes.
+ * The first PLL clock macro is used for internal reference clock. The second
+ * PLL clock macro is used to generate the clock for the PHY. This driver
+ * configures the first PLL CMU, the second PLL CMU, and programs the PHY to
+ * operate according to the mode of operation. The first PLL CMU is only
+ * required if internal clock is enabled.
+ *
+ * Logical Layer Out Of HW module units:
+ *
+ * -----------------
+ * | Internal | |------|
+ * | Ref PLL CMU |----| | ------------- ---------
+ * ------------ ---- | MUX |-----|PHY PLL CMU|----| Serdes|
+ * | | | | ---------
+ * External Clock ------| | -------------
+ * |------|
+ *
+ * The Ref PLL CMU CSR (Configuration System Registers) is accessed
+ * indirectly from the SDS offset at 0x2000. It is only required for
+ * internal reference clock.
+ * The PHY PLL CMU CSR is accessed indirectly from the SDS offset at 0x0000.
+ * The Serdes CSR is accessed indirectly from the SDS offset at 0x0400.
+ *
+ * The Ref PLL CMU can be located within the same PHY IP or outside the PHY IP
+ * due to shared Ref PLL CMU. For PHY with Ref PLL CMU shared with another IP,
+ * it is located outside the PHY IP. This is the case for the PHY located
+ * at 0x1f23a000 (SATA Port 4/5). For such PHY, another resource is required
+ * to located the SDS/Ref PLL CMU module and its clock for that IP enabled.
+ *
+ * Currently, this driver only supports Gen3 SATA mode with external clock.
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/phy/phy.h>
+#include <linux/clk.h>
+
+/* Max 2 lanes per a PHY unit */
+#define MAX_LANE 2
+
+/* Register offset inside the PHY */
+#define SERDES_PLL_INDIRECT_OFFSET 0x0000
+#define SERDES_PLL_REF_INDIRECT_OFFSET 0x2000
+#define SERDES_INDIRECT_OFFSET 0x0400
+#define SERDES_LANE_STRIDE 0x0200
+
+/* Some default Serdes parameters */
+#define DEFAULT_SATA_TXBOOST_GAIN { 0x1e, 0x1e, 0x1e }
+#define DEFAULT_SATA_TXEYEDIRECTION { 0x0, 0x0, 0x0 }
+#define DEFAULT_SATA_TXEYETUNING { 0xa, 0xa, 0xa }
+#define DEFAULT_SATA_SPD_SEL { 0x1, 0x3, 0x7 }
+#define DEFAULT_SATA_TXAMP { 0x8, 0x8, 0x8 }
+#define DEFAULT_SATA_TXCN1 { 0x2, 0x2, 0x2 }
+#define DEFAULT_SATA_TXCN2 { 0x0, 0x0, 0x0 }
+#define DEFAULT_SATA_TXCP1 { 0xa, 0xa, 0xa }
+
+#define SATA_SPD_SEL_GEN3 0x7
+#define SATA_SPD_SEL_GEN2 0x3
+#define SATA_SPD_SEL_GEN1 0x1
+
+#define SSC_DISABLE 0
+#define SSC_ENABLE 1
+
+#define FBDIV_VAL_50M 0x77
+#define REFDIV_VAL_50M 0x1
+#define FBDIV_VAL_100M 0x3B
+#define REFDIV_VAL_100M 0x0
+
+/* SATA Clock/Reset CSR */
+#define SATACLKENREG 0x00000000
+#define SATA0_CORE_CLKEN 0x00000002
+#define SATA1_CORE_CLKEN 0x00000004
+#define SATASRESETREG 0x00000004
+#define SATA_MEM_RESET_MASK 0x00000020
+#define SATA_MEM_RESET_RD(src) (((src) & 0x00000020) >> 5)
+#define SATA_SDS_RESET_MASK 0x00000004
+#define SATA_CSR_RESET_MASK 0x00000001
+#define SATA_CORE_RESET_MASK 0x00000002
+#define SATA_PMCLK_RESET_MASK 0x00000010
+#define SATA_PCLK_RESET_MASK 0x00000008
+
+/* SDS CSR used for PHY Indirect access */
+#define SATA_ENET_SDS_PCS_CTL0 0x00000000
+#define REGSPEC_CFG_I_TX_WORDMODE0_SET(dst, src) \
+ (((dst) & ~0x00070000) | (((u32) (src) << 16) & 0x00070000))
+#define REGSPEC_CFG_I_RX_WORDMODE0_SET(dst, src) \
+ (((dst) & ~0x00e00000) | (((u32) (src) << 21) & 0x00e00000))
+#define SATA_ENET_SDS_CTL0 0x0000000c
+#define REGSPEC_CFG_I_CUSTOMER_PIN_MODE0_SET(dst, src) \
+ (((dst) & ~0x00007fff) | (((u32) (src)) & 0x00007fff))
+#define SATA_ENET_SDS_CTL1 0x00000010
+#define CFG_I_SPD_SEL_CDR_OVR1_SET(dst, src) \
+ (((dst) & ~0x0000000f) | (((u32) (src)) & 0x0000000f))
+#define SATA_ENET_SDS_RST_CTL 0x00000024
+#define SATA_ENET_SDS_IND_CMD_REG 0x0000003c
+#define CFG_IND_WR_CMD_MASK 0x00000001
+#define CFG_IND_RD_CMD_MASK 0x00000002
+#define CFG_IND_CMD_DONE_MASK 0x00000004
+#define CFG_IND_ADDR_SET(dst, src) \
+ (((dst) & ~0x003ffff0) | (((u32) (src) << 4) & 0x003ffff0))
+#define SATA_ENET_SDS_IND_RDATA_REG 0x00000040
+#define SATA_ENET_SDS_IND_WDATA_REG 0x00000044
+#define SATA_ENET_CLK_MACRO_REG 0x0000004c
+#define I_RESET_B_SET(dst, src) \
+ (((dst) & ~0x00000001) | (((u32) (src)) & 0x00000001))
+#define I_PLL_FBDIV_SET(dst, src) \
+ (((dst) & ~0x001ff000) | (((u32) (src) << 12) & 0x001ff000))
+#define I_CUSTOMEROV_SET(dst, src) \
+ (((dst) & ~0x00000f80) | (((u32) (src) << 7) & 0x00000f80))
+#define O_PLL_LOCK_RD(src) (((src) & 0x40000000) >> 30)
+#define O_PLL_READY_RD(src) (((src) & 0x80000000) >> 31)
+
+/* PLL Clock Macro Unit (CMU) CSR accessing from SDS indirectly */
+#define CMU_REG0 0x00000
+#define CMU_REG0_PLL_REF_SEL_MASK 0x00002000
+#define CMU_REG0_PLL_REF_SEL_SET(dst, src) \
+ (((dst) & ~0x00002000) | (((u32) (src) << 13) & 0x00002000))
+#define CMU_REG0_PDOWN_MASK 0x00004000
+#define CMU_REG0_CAL_COUNT_RESOL_SET(dst, src) \
+ (((dst) & ~0x000000e0) | (((u32) (src) << 5) & 0x000000e0))
+#define CMU_REG1 0x00002
+#define CMU_REG1_PLL_CP_SET(dst, src) \
+ (((dst) & ~0x00003c00) | (((u32) (src) << 10) & 0x00003c00))
+#define CMU_REG1_PLL_MANUALCAL_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define CMU_REG1_PLL_CP_SEL_SET(dst, src) \
+ (((dst) & ~0x000003e0) | (((u32) (src) << 5) & 0x000003e0))
+#define CMU_REG1_REFCLK_CMOS_SEL_MASK 0x00000001
+#define CMU_REG1_REFCLK_CMOS_SEL_SET(dst, src) \
+ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
+#define CMU_REG2 0x00004
+#define CMU_REG2_PLL_REFDIV_SET(dst, src) \
+ (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000))
+#define CMU_REG2_PLL_LFRES_SET(dst, src) \
+ (((dst) & ~0x0000001e) | (((u32) (src) << 1) & 0x0000001e))
+#define CMU_REG2_PLL_FBDIV_SET(dst, src) \
+ (((dst) & ~0x00003fe0) | (((u32) (src) << 5) & 0x00003fe0))
+#define CMU_REG3 0x00006
+#define CMU_REG3_VCOVARSEL_SET(dst, src) \
+ (((dst) & ~0x0000000f) | (((u32) (src) << 0) & 0x0000000f))
+#define CMU_REG3_VCO_MOMSEL_INIT_SET(dst, src) \
+ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
+#define CMU_REG3_VCO_MANMOMSEL_SET(dst, src) \
+ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
+#define CMU_REG4 0x00008
+#define CMU_REG5 0x0000a
+#define CMU_REG5_PLL_LFSMCAP_SET(dst, src) \
+ (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000))
+#define CMU_REG5_PLL_LOCK_RESOLUTION_SET(dst, src) \
+ (((dst) & ~0x0000000e) | (((u32) (src) << 1) & 0x0000000e))
+#define CMU_REG5_PLL_LFCAP_SET(dst, src) \
+ (((dst) & ~0x00003000) | (((u32) (src) << 12) & 0x00003000))
+#define CMU_REG5_PLL_RESETB_MASK 0x00000001
+#define CMU_REG6 0x0000c
+#define CMU_REG6_PLL_VREGTRIM_SET(dst, src) \
+ (((dst) & ~0x00000600) | (((u32) (src) << 9) & 0x00000600))
+#define CMU_REG6_MAN_PVT_CAL_SET(dst, src) \
+ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
+#define CMU_REG7 0x0000e
+#define CMU_REG7_PLL_CALIB_DONE_RD(src) ((0x00004000 & (u32) (src)) >> 14)
+#define CMU_REG7_VCO_CAL_FAIL_RD(src) ((0x00000c00 & (u32) (src)) >> 10)
+#define CMU_REG8 0x00010
+#define CMU_REG9 0x00012
+#define CMU_REG9_WORD_LEN_8BIT 0x000
+#define CMU_REG9_WORD_LEN_10BIT 0x001
+#define CMU_REG9_WORD_LEN_16BIT 0x002
+#define CMU_REG9_WORD_LEN_20BIT 0x003
+#define CMU_REG9_WORD_LEN_32BIT 0x004
+#define CMU_REG9_WORD_LEN_40BIT 0x005
+#define CMU_REG9_WORD_LEN_64BIT 0x006
+#define CMU_REG9_WORD_LEN_66BIT 0x007
+#define CMU_REG9_TX_WORD_MODE_CH1_SET(dst, src) \
+ (((dst) & ~0x00000380) | (((u32) (src) << 7) & 0x00000380))
+#define CMU_REG9_TX_WORD_MODE_CH0_SET(dst, src) \
+ (((dst) & ~0x00000070) | (((u32) (src) << 4) & 0x00000070))
+#define CMU_REG9_PLL_POST_DIVBY2_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define CMU_REG9_VBG_BYPASSB_SET(dst, src) \
+ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
+#define CMU_REG9_IGEN_BYPASS_SET(dst, src) \
+ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
+#define CMU_REG10 0x00014
+#define CMU_REG10_VREG_REFSEL_SET(dst, src) \
+ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
+#define CMU_REG11 0x00016
+#define CMU_REG12 0x00018
+#define CMU_REG12_STATE_DELAY9_SET(dst, src) \
+ (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0))
+#define CMU_REG13 0x0001a
+#define CMU_REG14 0x0001c
+#define CMU_REG15 0x0001e
+#define CMU_REG16 0x00020
+#define CMU_REG16_PVT_DN_MAN_ENA_MASK 0x00000001
+#define CMU_REG16_PVT_UP_MAN_ENA_MASK 0x00000002
+#define CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(dst, src) \
+ (((dst) & ~0x0000001c) | (((u32) (src) << 2) & 0x0000001c))
+#define CMU_REG16_CALIBRATION_DONE_OVERRIDE_SET(dst, src) \
+ (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040))
+#define CMU_REG16_BYPASS_PLL_LOCK_SET(dst, src) \
+ (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020))
+#define CMU_REG17 0x00022
+#define CMU_REG17_PVT_CODE_R2A_SET(dst, src) \
+ (((dst) & ~0x00007f00) | (((u32) (src) << 8) & 0x00007f00))
+#define CMU_REG17_RESERVED_7_SET(dst, src) \
+ (((dst) & ~0x000000e0) | (((u32) (src) << 5) & 0x000000e0))
+#define CMU_REG17_PVT_TERM_MAN_ENA_MASK 0x00008000
+#define CMU_REG18 0x00024
+#define CMU_REG19 0x00026
+#define CMU_REG20 0x00028
+#define CMU_REG21 0x0002a
+#define CMU_REG22 0x0002c
+#define CMU_REG23 0x0002e
+#define CMU_REG24 0x00030
+#define CMU_REG25 0x00032
+#define CMU_REG26 0x00034
+#define CMU_REG26_FORCE_PLL_LOCK_SET(dst, src) \
+ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
+#define CMU_REG27 0x00036
+#define CMU_REG28 0x00038
+#define CMU_REG29 0x0003a
+#define CMU_REG30 0x0003c
+#define CMU_REG30_LOCK_COUNT_SET(dst, src) \
+ (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006))
+#define CMU_REG30_PCIE_MODE_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define CMU_REG31 0x0003e
+#define CMU_REG32 0x00040
+#define CMU_REG32_FORCE_VCOCAL_START_MASK 0x00004000
+#define CMU_REG32_PVT_CAL_WAIT_SEL_SET(dst, src) \
+ (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006))
+#define CMU_REG32_IREF_ADJ_SET(dst, src) \
+ (((dst) & ~0x00000180) | (((u32) (src) << 7) & 0x00000180))
+#define CMU_REG33 0x00042
+#define CMU_REG34 0x00044
+#define CMU_REG34_VCO_CAL_VTH_LO_MAX_SET(dst, src) \
+ (((dst) & ~0x0000000f) | (((u32) (src) << 0) & 0x0000000f))
+#define CMU_REG34_VCO_CAL_VTH_HI_MAX_SET(dst, src) \
+ (((dst) & ~0x00000f00) | (((u32) (src) << 8) & 0x00000f00))
+#define CMU_REG34_VCO_CAL_VTH_LO_MIN_SET(dst, src) \
+ (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0))
+#define CMU_REG34_VCO_CAL_VTH_HI_MIN_SET(dst, src) \
+ (((dst) & ~0x0000f000) | (((u32) (src) << 12) & 0x0000f000))
+#define CMU_REG35 0x00046
+#define CMU_REG35_PLL_SSC_MOD_SET(dst, src) \
+ (((dst) & ~0x0000fe00) | (((u32) (src) << 9) & 0x0000fe00))
+#define CMU_REG36 0x00048
+#define CMU_REG36_PLL_SSC_EN_SET(dst, src) \
+ (((dst) & ~0x00000010) | (((u32) (src) << 4) & 0x00000010))
+#define CMU_REG36_PLL_SSC_VSTEP_SET(dst, src) \
+ (((dst) & ~0x0000ffc0) | (((u32) (src) << 6) & 0x0000ffc0))
+#define CMU_REG36_PLL_SSC_DSMSEL_SET(dst, src) \
+ (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020))
+#define CMU_REG37 0x0004a
+#define CMU_REG38 0x0004c
+#define CMU_REG39 0x0004e
+
+/* PHY lane CSR accessing from SDS indirectly */
+#define RXTX_REG0 0x000
+#define RXTX_REG0_CTLE_EQ_HR_SET(dst, src) \
+ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
+#define RXTX_REG0_CTLE_EQ_QR_SET(dst, src) \
+ (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0))
+#define RXTX_REG0_CTLE_EQ_FR_SET(dst, src) \
+ (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e))
+#define RXTX_REG1 0x002
+#define RXTX_REG1_RXACVCM_SET(dst, src) \
+ (((dst) & ~0x0000f000) | (((u32) (src) << 12) & 0x0000f000))
+#define RXTX_REG1_CTLE_EQ_SET(dst, src) \
+ (((dst) & ~0x00000f80) | (((u32) (src) << 7) & 0x00000f80))
+#define RXTX_REG1_RXVREG1_SET(dst, src) \
+ (((dst) & ~0x00000060) | (((u32) (src) << 5) & 0x00000060))
+#define RXTX_REG1_RXIREF_ADJ_SET(dst, src) \
+ (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006))
+#define RXTX_REG2 0x004
+#define RXTX_REG2_VTT_ENA_SET(dst, src) \
+ (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100))
+#define RXTX_REG2_TX_FIFO_ENA_SET(dst, src) \
+ (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020))
+#define RXTX_REG2_VTT_SEL_SET(dst, src) \
+ (((dst) & ~0x000000c0) | (((u32) (src) << 6) & 0x000000c0))
+#define RXTX_REG4 0x008
+#define RXTX_REG4_TX_LOOPBACK_BUF_EN_MASK 0x00000040
+#define RXTX_REG4_TX_DATA_RATE_SET(dst, src) \
+ (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000))
+#define RXTX_REG4_TX_WORD_MODE_SET(dst, src) \
+ (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800))
+#define RXTX_REG5 0x00a
+#define RXTX_REG5_TX_CN1_SET(dst, src) \
+ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
+#define RXTX_REG5_TX_CP1_SET(dst, src) \
+ (((dst) & ~0x000007e0) | (((u32) (src) << 5) & 0x000007e0))
+#define RXTX_REG5_TX_CN2_SET(dst, src) \
+ (((dst) & ~0x0000001f) | (((u32) (src) << 0) & 0x0000001f))
+#define RXTX_REG6 0x00c
+#define RXTX_REG6_TXAMP_CNTL_SET(dst, src) \
+ (((dst) & ~0x00000780) | (((u32) (src) << 7) & 0x00000780))
+#define RXTX_REG6_TXAMP_ENA_SET(dst, src) \
+ (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040))
+#define RXTX_REG6_RX_BIST_ERRCNT_RD_SET(dst, src) \
+ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
+#define RXTX_REG6_TX_IDLE_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define RXTX_REG6_RX_BIST_RESYNC_SET(dst, src) \
+ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
+#define RXTX_REG7 0x00e
+#define RXTX_REG7_RESETB_RXD_MASK 0x00000100
+#define RXTX_REG7_RESETB_RXA_MASK 0x00000080
+#define RXTX_REG7_BIST_ENA_RX_SET(dst, src) \
+ (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040))
+#define RXTX_REG7_RX_WORD_MODE_SET(dst, src) \
+ (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800))
+#define RXTX_REG8 0x010
+#define RXTX_REG8_CDR_LOOP_ENA_SET(dst, src) \
+ (((dst) & ~0x00004000) | (((u32) (src) << 14) & 0x00004000))
+#define RXTX_REG8_CDR_BYPASS_RXLOS_SET(dst, src) \
+ (((dst) & ~0x00000800) | (((u32) (src) << 11) & 0x00000800))
+#define RXTX_REG8_SSC_ENABLE_SET(dst, src) \
+ (((dst) & ~0x00000200) | (((u32) (src) << 9) & 0x00000200))
+#define RXTX_REG8_SD_VREF_SET(dst, src) \
+ (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0))
+#define RXTX_REG8_SD_DISABLE_SET(dst, src) \
+ (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100))
+#define RXTX_REG7 0x00e
+#define RXTX_REG7_RESETB_RXD_SET(dst, src) \
+ (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100))
+#define RXTX_REG7_RESETB_RXA_SET(dst, src) \
+ (((dst) & ~0x00000080) | (((u32) (src) << 7) & 0x00000080))
+#define RXTX_REG7_LOOP_BACK_ENA_CTLE_MASK 0x00004000
+#define RXTX_REG7_LOOP_BACK_ENA_CTLE_SET(dst, src) \
+ (((dst) & ~0x00004000) | (((u32) (src) << 14) & 0x00004000))
+#define RXTX_REG11 0x016
+#define RXTX_REG11_PHASE_ADJUST_LIMIT_SET(dst, src) \
+ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
+#define RXTX_REG12 0x018
+#define RXTX_REG12_LATCH_OFF_ENA_SET(dst, src) \
+ (((dst) & ~0x00002000) | (((u32) (src) << 13) & 0x00002000))
+#define RXTX_REG12_SUMOS_ENABLE_SET(dst, src) \
+ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
+#define RXTX_REG12_RX_DET_TERM_ENABLE_MASK 0x00000002
+#define RXTX_REG12_RX_DET_TERM_ENABLE_SET(dst, src) \
+ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
+#define RXTX_REG13 0x01a
+#define RXTX_REG14 0x01c
+#define RXTX_REG14_CLTE_LATCAL_MAN_PROG_SET(dst, src) \
+ (((dst) & ~0x0000003f) | (((u32) (src) << 0) & 0x0000003f))
+#define RXTX_REG14_CTLE_LATCAL_MAN_ENA_SET(dst, src) \
+ (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040))
+#define RXTX_REG26 0x034
+#define RXTX_REG26_PERIOD_ERROR_LATCH_SET(dst, src) \
+ (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800))
+#define RXTX_REG26_BLWC_ENA_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define RXTX_REG21 0x02a
+#define RXTX_REG21_DO_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10)
+#define RXTX_REG21_XO_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4)
+#define RXTX_REG21_LATCH_CAL_FAIL_ODD_RD(src) ((0x0000000f & (u32)(src)))
+#define RXTX_REG22 0x02c
+#define RXTX_REG22_SO_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4)
+#define RXTX_REG22_EO_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10)
+#define RXTX_REG22_LATCH_CAL_FAIL_EVEN_RD(src) ((0x0000000f & (u32)(src)))
+#define RXTX_REG23 0x02e
+#define RXTX_REG23_DE_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10)
+#define RXTX_REG23_XE_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4)
+#define RXTX_REG24 0x030
+#define RXTX_REG24_EE_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10)
+#define RXTX_REG24_SE_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4)
+#define RXTX_REG27 0x036
+#define RXTX_REG28 0x038
+#define RXTX_REG31 0x03e
+#define RXTX_REG38 0x04c
+#define RXTX_REG38_CUSTOMER_PINMODE_INV_SET(dst, src) \
+ (((dst) & 0x0000fffe) | (((u32) (src) << 1) & 0x0000fffe))
+#define RXTX_REG39 0x04e
+#define RXTX_REG40 0x050
+#define RXTX_REG41 0x052
+#define RXTX_REG42 0x054
+#define RXTX_REG43 0x056
+#define RXTX_REG44 0x058
+#define RXTX_REG45 0x05a
+#define RXTX_REG46 0x05c
+#define RXTX_REG47 0x05e
+#define RXTX_REG48 0x060
+#define RXTX_REG49 0x062
+#define RXTX_REG50 0x064
+#define RXTX_REG51 0x066
+#define RXTX_REG52 0x068
+#define RXTX_REG53 0x06a
+#define RXTX_REG54 0x06c
+#define RXTX_REG55 0x06e
+#define RXTX_REG61 0x07a
+#define RXTX_REG61_ISCAN_INBERT_SET(dst, src) \
+ (((dst) & ~0x00000010) | (((u32) (src) << 4) & 0x00000010))
+#define RXTX_REG61_LOADFREQ_SHIFT_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define RXTX_REG61_EYE_COUNT_WIDTH_SEL_SET(dst, src) \
+ (((dst) & ~0x000000c0) | (((u32) (src) << 6) & 0x000000c0))
+#define RXTX_REG61_SPD_SEL_CDR_SET(dst, src) \
+ (((dst) & ~0x00003c00) | (((u32) (src) << 10) & 0x00003c00))
+#define RXTX_REG62 0x07c
+#define RXTX_REG62_PERIOD_H1_QLATCH_SET(dst, src) \
+ (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800))
+#define RXTX_REG81 0x0a2
+#define RXTX_REG89_MU_TH7_SET(dst, src) \
+ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
+#define RXTX_REG89_MU_TH8_SET(dst, src) \
+ (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0))
+#define RXTX_REG89_MU_TH9_SET(dst, src) \
+ (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e))
+#define RXTX_REG96 0x0c0
+#define RXTX_REG96_MU_FREQ1_SET(dst, src) \
+ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
+#define RXTX_REG96_MU_FREQ2_SET(dst, src) \
+ (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0))
+#define RXTX_REG96_MU_FREQ3_SET(dst, src) \
+ (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e))
+#define RXTX_REG99 0x0c6
+#define RXTX_REG99_MU_PHASE1_SET(dst, src) \
+ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
+#define RXTX_REG99_MU_PHASE2_SET(dst, src) \
+ (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0))
+#define RXTX_REG99_MU_PHASE3_SET(dst, src) \
+ (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e))
+#define RXTX_REG102 0x0cc
+#define RXTX_REG102_FREQLOOP_LIMIT_SET(dst, src) \
+ (((dst) & ~0x00000060) | (((u32) (src) << 5) & 0x00000060))
+#define RXTX_REG114 0x0e4
+#define RXTX_REG121 0x0f2
+#define RXTX_REG121_SUMOS_CAL_CODE_RD(src) ((0x0000003e & (u32)(src)) >> 0x1)
+#define RXTX_REG125 0x0fa
+#define RXTX_REG125_PQ_REG_SET(dst, src) \
+ (((dst) & ~0x0000fe00) | (((u32) (src) << 9) & 0x0000fe00))
+#define RXTX_REG125_SIGN_PQ_SET(dst, src) \
+ (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100))
+#define RXTX_REG125_SIGN_PQ_2C_SET(dst, src) \
+ (((dst) & ~0x00000080) | (((u32) (src) << 7) & 0x00000080))
+#define RXTX_REG125_PHZ_MANUALCODE_SET(dst, src) \
+ (((dst) & ~0x0000007c) | (((u32) (src) << 2) & 0x0000007c))
+#define RXTX_REG125_PHZ_MANUAL_SET(dst, src) \
+ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
+#define RXTX_REG127 0x0fe
+#define RXTX_REG127_FORCE_SUM_CAL_START_MASK 0x00000002
+#define RXTX_REG127_FORCE_LAT_CAL_START_MASK 0x00000004
+#define RXTX_REG127_FORCE_SUM_CAL_START_SET(dst, src) \
+ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
+#define RXTX_REG127_FORCE_LAT_CAL_START_SET(dst, src) \
+ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
+#define RXTX_REG127_LATCH_MAN_CAL_ENA_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define RXTX_REG127_DO_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
+#define RXTX_REG127_XO_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
+#define RXTX_REG128 0x100
+#define RXTX_REG128_LATCH_CAL_WAIT_SEL_SET(dst, src) \
+ (((dst) & ~0x0000000c) | (((u32) (src) << 2) & 0x0000000c))
+#define RXTX_REG128_EO_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
+#define RXTX_REG128_SO_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
+#define RXTX_REG129 0x102
+#define RXTX_REG129_DE_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
+#define RXTX_REG129_XE_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
+#define RXTX_REG130 0x104
+#define RXTX_REG130_EE_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
+#define RXTX_REG130_SE_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
+#define RXTX_REG145 0x122
+#define RXTX_REG145_TX_IDLE_SATA_SET(dst, src) \
+ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
+#define RXTX_REG145_RXES_ENA_SET(dst, src) \
+ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
+#define RXTX_REG145_RXDFE_CONFIG_SET(dst, src) \
+ (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000))
+#define RXTX_REG145_RXVWES_LATENA_SET(dst, src) \
+ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
+#define RXTX_REG147 0x126
+#define RXTX_REG148 0x128
+
+/* Clock macro type */
+enum cmu_type_t {
+ REF_CMU = 0, /* Clock macro is the internal reference clock */
+ PHY_CMU = 1, /* Clock macro is the PLL for the Serdes */
+};
+
+enum mux_type_t {
+ MUX_SELECT_ATA = 0, /* Switch the MUX to ATA */
+ MUX_SELECT_SGMMII = 0, /* Switch the MUX to SGMII */
+};
+
+enum clk_type_t {
+ CLK_EXT_DIFF = 0, /* External differential */
+ CLK_INT_DIFF = 1, /* Internal differential */
+ CLK_INT_SING = 2, /* Internal single ended */
+};
+
+enum phy_mode {
+ MODE_SATA = 0, /* List them for simple reference */
+ MODE_SGMII = 1,
+ MODE_PCIE = 2,
+ MODE_USB = 3,
+ MODE_XFI = 4,
+ MODE_MAX
+};
+
+struct xgene_sata_override_param {
+ u32 speed[MAX_LANE]; /* Index for override parameter per lane */
+ u32 txspeed[3]; /* Tx speed */
+ u32 txboostgain[MAX_LANE*3]; /* Tx freq boost and gain control */
+ u32 txeyetuning[MAX_LANE*3]; /* Tx eye tuning */
+ u32 txeyedirection[MAX_LANE*3]; /* Tx eye tuning direction */
+ u32 txamplitude[MAX_LANE*3]; /* Tx amplitude control */
+ u32 txprecursor_cn1[MAX_LANE*3]; /* Tx emphasis taps 1st pre-cursor */
+ u32 txprecursor_cn2[MAX_LANE*3]; /* Tx emphasis taps 2nd pre-cursor */
+ u32 txpostcursor_cp1[MAX_LANE*3]; /* Tx emphasis taps post-cursor */
+};
+
+struct xgene_phy_ctx {
+ struct device *dev;
+ struct phy *phy;
+ enum phy_mode mode; /* Mode of operation */
+ enum clk_type_t clk_type; /* Input clock selection */
+ void __iomem *sds_base; /* PHY CSR base addr */
+ struct clk *clk; /* Optional clock */
+
+ /* Override Serdes parameters */
+ struct xgene_sata_override_param sata_param;
+};
+
+/*
+ * For chip earlier than A3 version, enable this flag.
+ * To enable, pass boot argument phy_xgene.preA3Chip=1
+ */
+static int preA3Chip;
+MODULE_PARM_DESC(preA3Chip, "Enable pre-A3 chip support (1=enable 0=disable)");
+module_param_named(preA3Chip, preA3Chip, int, 0444);
+
+static void sds_wr(void __iomem *csr_base, u32 indirect_cmd_reg,
+ u32 indirect_data_reg, u32 addr, u32 data)
+{
+ unsigned long deadline = jiffies + HZ;
+ u32 val;
+ u32 cmd;
+
+ cmd = CFG_IND_WR_CMD_MASK | CFG_IND_CMD_DONE_MASK;
+ cmd = CFG_IND_ADDR_SET(cmd, addr);
+ writel(data, csr_base + indirect_data_reg);
+ readl(csr_base + indirect_data_reg); /* Force a barrier */
+ writel(cmd, csr_base + indirect_cmd_reg);
+ readl(csr_base + indirect_cmd_reg); /* Force a barrier */
+ do {
+ val = readl(csr_base + indirect_cmd_reg);
+ } while (!(val & CFG_IND_CMD_DONE_MASK) &&
+ time_before(jiffies, deadline));
+ if (!(val & CFG_IND_CMD_DONE_MASK))
+ pr_err("SDS WR timeout at 0x%p offset 0x%08X value 0x%08X\n",
+ csr_base + indirect_cmd_reg, addr, data);
+}
+
+static void sds_rd(void __iomem *csr_base, u32 indirect_cmd_reg,
+ u32 indirect_data_reg, u32 addr, u32 *data)
+{
+ unsigned long deadline = jiffies + HZ;
+ u32 val;
+ u32 cmd;
+
+ cmd = CFG_IND_RD_CMD_MASK | CFG_IND_CMD_DONE_MASK;
+ cmd = CFG_IND_ADDR_SET(cmd, addr);
+ writel(cmd, csr_base + indirect_cmd_reg);
+ readl(csr_base + indirect_cmd_reg); /* Force a barrier */
+ do {
+ val = readl(csr_base + indirect_cmd_reg);
+ } while (!(val & CFG_IND_CMD_DONE_MASK) &&
+ time_before(jiffies, deadline));
+ *data = readl(csr_base + indirect_data_reg);
+ if (!(val & CFG_IND_CMD_DONE_MASK))
+ pr_err("SDS WR timeout at 0x%p offset 0x%08X value 0x%08X\n",
+ csr_base + indirect_cmd_reg, addr, *data);
+}
+
+static void cmu_wr(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
+ u32 reg, u32 data)
+{
+ void __iomem *sds_base = ctx->sds_base;
+ u32 val;
+
+ if (cmu_type == REF_CMU)
+ reg += SERDES_PLL_REF_INDIRECT_OFFSET;
+ else
+ reg += SERDES_PLL_INDIRECT_OFFSET;
+ sds_wr(sds_base, SATA_ENET_SDS_IND_CMD_REG,
+ SATA_ENET_SDS_IND_WDATA_REG, reg, data);
+ sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG,
+ SATA_ENET_SDS_IND_RDATA_REG, reg, &val);
+ pr_debug("CMU WR addr 0x%X value 0x%08X <-> 0x%08X\n", reg, data, val);
+}
+
+static void cmu_rd(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
+ u32 reg, u32 *data)
+{
+ void __iomem *sds_base = ctx->sds_base;
+
+ if (cmu_type == REF_CMU)
+ reg += SERDES_PLL_REF_INDIRECT_OFFSET;
+ else
+ reg += SERDES_PLL_INDIRECT_OFFSET;
+ sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG,
+ SATA_ENET_SDS_IND_RDATA_REG, reg, data);
+ pr_debug("CMU RD addr 0x%X value 0x%08X\n", reg, *data);
+}
+
+static void cmu_toggle1to0(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
+ u32 reg, u32 bits)
+{
+ u32 val;
+
+ cmu_rd(ctx, cmu_type, reg, &val);
+ val |= bits;
+ cmu_wr(ctx, cmu_type, reg, val);
+ cmu_rd(ctx, cmu_type, reg, &val);
+ val &= ~bits;
+ cmu_wr(ctx, cmu_type, reg, val);
+}
+
+static void cmu_clrbits(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
+ u32 reg, u32 bits)
+{
+ u32 val;
+
+ cmu_rd(ctx, cmu_type, reg, &val);
+ val &= ~bits;
+ cmu_wr(ctx, cmu_type, reg, val);
+}
+
+static void cmu_setbits(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
+ u32 reg, u32 bits)
+{
+ u32 val;
+
+ cmu_rd(ctx, cmu_type, reg, &val);
+ val |= bits;
+ cmu_wr(ctx, cmu_type, reg, val);
+}
+
+static void serdes_wr(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 data)
+{
+ void __iomem *sds_base = ctx->sds_base;
+ u32 val;
+
+ reg += SERDES_INDIRECT_OFFSET;
+ reg += lane * SERDES_LANE_STRIDE;
+ sds_wr(sds_base, SATA_ENET_SDS_IND_CMD_REG,
+ SATA_ENET_SDS_IND_WDATA_REG, reg, data);
+ sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG,
+ SATA_ENET_SDS_IND_RDATA_REG, reg, &val);
+ pr_debug("SERDES WR addr 0x%X value 0x%08X <-> 0x%08X\n", reg, data,
+ val);
+}
+
+static void serdes_rd(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 *data)
+{
+ void __iomem *sds_base = ctx->sds_base;
+
+ reg += SERDES_INDIRECT_OFFSET;
+ reg += lane * SERDES_LANE_STRIDE;
+ sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG,
+ SATA_ENET_SDS_IND_RDATA_REG, reg, data);
+ pr_debug("SERDES RD addr 0x%X value 0x%08X\n", reg, *data);
+}
+
+static void serdes_clrbits(struct xgene_phy_ctx *ctx, int lane, u32 reg,
+ u32 bits)
+{
+ u32 val;
+
+ serdes_rd(ctx, lane, reg, &val);
+ val &= ~bits;
+ serdes_wr(ctx, lane, reg, val);
+}
+
+static void serdes_setbits(struct xgene_phy_ctx *ctx, int lane, u32 reg,
+ u32 bits)
+{
+ u32 val;
+
+ serdes_rd(ctx, lane, reg, &val);
+ val |= bits;
+ serdes_wr(ctx, lane, reg, val);
+}
+
+static void xgene_phy_cfg_cmu_clk_type(struct xgene_phy_ctx *ctx,
+ enum cmu_type_t cmu_type,
+ enum clk_type_t clk_type)
+{
+ u32 val;
+
+ /* Set the reset sequence delay for TX ready assertion */
+ cmu_rd(ctx, cmu_type, CMU_REG12, &val);
+ val = CMU_REG12_STATE_DELAY9_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG12, val);
+ /* Set the programmable stage delays between various enable stages */
+ cmu_wr(ctx, cmu_type, CMU_REG13, 0x0222);
+ cmu_wr(ctx, cmu_type, CMU_REG14, 0x2225);
+
+ /* Configure clock type */
+ if (clk_type == CLK_EXT_DIFF) {
+ /* Select external clock mux */
+ cmu_rd(ctx, cmu_type, CMU_REG0, &val);
+ val = CMU_REG0_PLL_REF_SEL_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG0, val);
+ /* Select CMOS as reference clock */
+ cmu_rd(ctx, cmu_type, CMU_REG1, &val);
+ val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG1, val);
+ dev_dbg(ctx->dev, "Set external reference clock\n");
+ } else if (clk_type == CLK_INT_DIFF) {
+ /* Select internal clock mux */
+ cmu_rd(ctx, cmu_type, CMU_REG0, &val);
+ val = CMU_REG0_PLL_REF_SEL_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG0, val);
+ /* Select CMOS as reference clock */
+ cmu_rd(ctx, cmu_type, CMU_REG1, &val);
+ val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG1, val);
+ dev_dbg(ctx->dev, "Set internal reference clock\n");
+ } else if (clk_type == CLK_INT_SING) {
+ /*
+ * NOTE: This clock type is NOT support for controller
+ * whose internal clock shared in the PCIe controller
+ *
+ * Select internal clock mux
+ */
+ cmu_rd(ctx, cmu_type, CMU_REG1, &val);
+ val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG1, val);
+ /* Select CML as reference clock */
+ cmu_rd(ctx, cmu_type, CMU_REG1, &val);
+ val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG1, val);
+ dev_dbg(ctx->dev,
+ "Set internal single ended reference clock\n");
+ }
+}
+
+static void xgene_phy_sata_cfg_cmu_core(struct xgene_phy_ctx *ctx,
+ enum cmu_type_t cmu_type,
+ enum clk_type_t clk_type)
+{
+ u32 val;
+ int ref_100MHz;
+
+ if (cmu_type == REF_CMU) {
+ /* Set VCO calibration voltage threshold */
+ cmu_rd(ctx, cmu_type, CMU_REG34, &val);
+ val = CMU_REG34_VCO_CAL_VTH_LO_MAX_SET(val, 0x7);
+ val = CMU_REG34_VCO_CAL_VTH_HI_MAX_SET(val, 0xc);
+ val = CMU_REG34_VCO_CAL_VTH_LO_MIN_SET(val, 0x3);
+ val = CMU_REG34_VCO_CAL_VTH_HI_MIN_SET(val, 0x8);
+ cmu_wr(ctx, cmu_type, CMU_REG34, val);
+ }
+
+ /* Set the VCO calibration counter */
+ cmu_rd(ctx, cmu_type, CMU_REG0, &val);
+ if (cmu_type == REF_CMU || preA3Chip)
+ val = CMU_REG0_CAL_COUNT_RESOL_SET(val, 0x4);
+ else
+ val = CMU_REG0_CAL_COUNT_RESOL_SET(val, 0x7);
+ cmu_wr(ctx, cmu_type, CMU_REG0, val);
+
+ /* Configure PLL for calibration */
+ cmu_rd(ctx, cmu_type, CMU_REG1, &val);
+ val = CMU_REG1_PLL_CP_SET(val, 0x1);
+ if (cmu_type == REF_CMU || preA3Chip)
+ val = CMU_REG1_PLL_CP_SEL_SET(val, 0x5);
+ else
+ val = CMU_REG1_PLL_CP_SEL_SET(val, 0x3);
+ if (cmu_type == REF_CMU)
+ val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x0);
+ else
+ val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG1, val);
+
+ if (cmu_type != REF_CMU)
+ cmu_clrbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK);
+
+ /* Configure the PLL for either 100MHz or 50MHz */
+ cmu_rd(ctx, cmu_type, CMU_REG2, &val);
+ if (cmu_type == REF_CMU) {
+ val = CMU_REG2_PLL_LFRES_SET(val, 0xa);
+ ref_100MHz = 1;
+ } else {
+ val = CMU_REG2_PLL_LFRES_SET(val, 0x3);
+ if (clk_type == CLK_EXT_DIFF)
+ ref_100MHz = 0;
+ else
+ ref_100MHz = 1;
+ }
+ if (ref_100MHz) {
+ val = CMU_REG2_PLL_FBDIV_SET(val, FBDIV_VAL_100M);
+ val = CMU_REG2_PLL_REFDIV_SET(val, REFDIV_VAL_100M);
+ } else {
+ val = CMU_REG2_PLL_FBDIV_SET(val, FBDIV_VAL_50M);
+ val = CMU_REG2_PLL_REFDIV_SET(val, REFDIV_VAL_50M);
+ }
+ cmu_wr(ctx, cmu_type, CMU_REG2, val);
+
+ /* Configure the VCO */
+ cmu_rd(ctx, cmu_type, CMU_REG3, &val);
+ if (cmu_type == REF_CMU) {
+ val = CMU_REG3_VCOVARSEL_SET(val, 0x3);
+ val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x10);
+ } else {
+ val = CMU_REG3_VCOVARSEL_SET(val, 0xF);
+ if (preA3Chip)
+ val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x15);
+ else
+ val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x1a);
+ val = CMU_REG3_VCO_MANMOMSEL_SET(val, 0x15);
+ }
+ cmu_wr(ctx, cmu_type, CMU_REG3, val);
+
+ /* Disable force PLL lock */
+ cmu_rd(ctx, cmu_type, CMU_REG26, &val);
+ val = CMU_REG26_FORCE_PLL_LOCK_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG26, val);
+
+ /* Setup PLL loop filter */
+ cmu_rd(ctx, cmu_type, CMU_REG5, &val);
+ val = CMU_REG5_PLL_LFSMCAP_SET(val, 0x3);
+ val = CMU_REG5_PLL_LFCAP_SET(val, 0x3);
+ if (cmu_type == REF_CMU || !preA3Chip)
+ val = CMU_REG5_PLL_LOCK_RESOLUTION_SET(val, 0x7);
+ else
+ val = CMU_REG5_PLL_LOCK_RESOLUTION_SET(val, 0x4);
+ cmu_wr(ctx, cmu_type, CMU_REG5, val);
+
+ /* Enable or disable manual calibration */
+ cmu_rd(ctx, cmu_type, CMU_REG6, &val);
+ val = CMU_REG6_PLL_VREGTRIM_SET(val, preA3Chip ? 0x0 : 0x2);
+ val = CMU_REG6_MAN_PVT_CAL_SET(val, preA3Chip ? 0x1 : 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG6, val);
+
+ /* Configure lane for 20-bits */
+ if (cmu_type == PHY_CMU) {
+ cmu_rd(ctx, cmu_type, CMU_REG9, &val);
+ val = CMU_REG9_TX_WORD_MODE_CH1_SET(val,
+ CMU_REG9_WORD_LEN_20BIT);
+ val = CMU_REG9_TX_WORD_MODE_CH0_SET(val,
+ CMU_REG9_WORD_LEN_20BIT);
+ val = CMU_REG9_PLL_POST_DIVBY2_SET(val, 0x1);
+ if (!preA3Chip) {
+ val = CMU_REG9_VBG_BYPASSB_SET(val, 0x0);
+ val = CMU_REG9_IGEN_BYPASS_SET(val , 0x0);
+ }
+ cmu_wr(ctx, cmu_type, CMU_REG9, val);
+
+ if (!preA3Chip) {
+ cmu_rd(ctx, cmu_type, CMU_REG10, &val);
+ val = CMU_REG10_VREG_REFSEL_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG10, val);
+ }
+ }
+
+ cmu_rd(ctx, cmu_type, CMU_REG16, &val);
+ val = CMU_REG16_CALIBRATION_DONE_OVERRIDE_SET(val, 0x1);
+ val = CMU_REG16_BYPASS_PLL_LOCK_SET(val, 0x1);
+ if (cmu_type == REF_CMU || preA3Chip)
+ val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x4);
+ else
+ val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x7);
+ cmu_wr(ctx, cmu_type, CMU_REG16, val);
+
+ /* Configure for SATA */
+ cmu_rd(ctx, cmu_type, CMU_REG30, &val);
+ val = CMU_REG30_PCIE_MODE_SET(val, 0x0);
+ val = CMU_REG30_LOCK_COUNT_SET(val, 0x3);
+ cmu_wr(ctx, cmu_type, CMU_REG30, val);
+
+ /* Disable state machine bypass */
+ cmu_wr(ctx, cmu_type, CMU_REG31, 0xF);
+
+ cmu_rd(ctx, cmu_type, CMU_REG32, &val);
+ val = CMU_REG32_PVT_CAL_WAIT_SEL_SET(val, 0x3);
+ if (cmu_type == REF_CMU || preA3Chip)
+ val = CMU_REG32_IREF_ADJ_SET(val, 0x3);
+ else
+ val = CMU_REG32_IREF_ADJ_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG32, val);
+
+ /* Set VCO calibration threshold */
+ if (cmu_type != REF_CMU && preA3Chip)
+ cmu_wr(ctx, cmu_type, CMU_REG34, 0x8d27);
+ else
+ cmu_wr(ctx, cmu_type, CMU_REG34, 0x873c);
+
+ /* Set CTLE Override and override waiting from state machine */
+ cmu_wr(ctx, cmu_type, CMU_REG37, 0xF00F);
+}
+
+static void xgene_phy_ssc_enable(struct xgene_phy_ctx *ctx,
+ enum cmu_type_t cmu_type)
+{
+ u32 val;
+
+ /* Set SSC modulation value */
+ cmu_rd(ctx, cmu_type, CMU_REG35, &val);
+ val = CMU_REG35_PLL_SSC_MOD_SET(val, 98);
+ cmu_wr(ctx, cmu_type, CMU_REG35, val);
+
+ /* Enable SSC, set vertical step and DSM value */
+ cmu_rd(ctx, cmu_type, CMU_REG36, &val);
+ val = CMU_REG36_PLL_SSC_VSTEP_SET(val, 30);
+ val = CMU_REG36_PLL_SSC_EN_SET(val, 1);
+ val = CMU_REG36_PLL_SSC_DSMSEL_SET(val, 1);
+ cmu_wr(ctx, cmu_type, CMU_REG36, val);
+
+ /* Reset the PLL */
+ cmu_clrbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK);
+ cmu_setbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK);
+
+ /* Force VCO calibration to restart */
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG32,
+ CMU_REG32_FORCE_VCOCAL_START_MASK);
+}
+
+static void xgene_phy_sata_cfg_lanes(struct xgene_phy_ctx *ctx)
+{
+ u32 val;
+ u32 reg;
+ int i;
+ int lane;
+
+ for (lane = 0; lane < MAX_LANE; lane++) {
+ serdes_wr(ctx, lane, RXTX_REG147, 0x6);
+
+ /* Set boost control for quarter, half, and full rate */
+ serdes_rd(ctx, lane, RXTX_REG0, &val);
+ val = RXTX_REG0_CTLE_EQ_HR_SET(val, 0x10);
+ val = RXTX_REG0_CTLE_EQ_QR_SET(val, 0x10);
+ val = RXTX_REG0_CTLE_EQ_FR_SET(val, 0x10);
+ serdes_wr(ctx, lane, RXTX_REG0, val);
+
+ /* Set boost control value */
+ serdes_rd(ctx, lane, RXTX_REG1, &val);
+ val = RXTX_REG1_RXACVCM_SET(val, 0x7);
+ val = RXTX_REG1_CTLE_EQ_SET(val,
+ ctx->sata_param.txboostgain[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ serdes_wr(ctx, lane, RXTX_REG1, val);
+
+ /* Latch VTT value based on the termination to ground and
+ enable TX FIFO */
+ serdes_rd(ctx, lane, RXTX_REG2, &val);
+ val = RXTX_REG2_VTT_ENA_SET(val, 0x1);
+ val = RXTX_REG2_VTT_SEL_SET(val, 0x1);
+ val = RXTX_REG2_TX_FIFO_ENA_SET(val, 0x1);
+ serdes_wr(ctx, lane, RXTX_REG2, val);
+
+ /* Configure Tx for 20-bits */
+ serdes_rd(ctx, lane, RXTX_REG4, &val);
+ val = RXTX_REG4_TX_WORD_MODE_SET(val, CMU_REG9_WORD_LEN_20BIT);
+ serdes_wr(ctx, lane, RXTX_REG4, val);
+
+ if (!preA3Chip) {
+ serdes_rd(ctx, lane, RXTX_REG1, &val);
+ val = RXTX_REG1_RXVREG1_SET(val, 0x2);
+ val = RXTX_REG1_RXIREF_ADJ_SET(val, 0x2);
+ serdes_wr(ctx, lane, RXTX_REG1, val);
+ }
+
+ /* Set pre-emphasis first 1 and 2, and post-emphasis values */
+ serdes_rd(ctx, lane, RXTX_REG5, &val);
+ val = RXTX_REG5_TX_CN1_SET(val,
+ ctx->sata_param.txprecursor_cn1[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ val = RXTX_REG5_TX_CP1_SET(val,
+ ctx->sata_param.txpostcursor_cp1[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ val = RXTX_REG5_TX_CN2_SET(val,
+ ctx->sata_param.txprecursor_cn2[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ serdes_wr(ctx, lane, RXTX_REG5, val);
+
+ /* Set TX amplitude value */
+ serdes_rd(ctx, lane, RXTX_REG6, &val);
+ val = RXTX_REG6_TXAMP_CNTL_SET(val,
+ ctx->sata_param.txamplitude[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ val = RXTX_REG6_TXAMP_ENA_SET(val, 0x1);
+ val = RXTX_REG6_TX_IDLE_SET(val, 0x0);
+ val = RXTX_REG6_RX_BIST_RESYNC_SET(val, 0x0);
+ val = RXTX_REG6_RX_BIST_ERRCNT_RD_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG6, val);
+
+ /* Configure Rx for 20-bits */
+ serdes_rd(ctx, lane, RXTX_REG7, &val);
+ val = RXTX_REG7_BIST_ENA_RX_SET(val, 0x0);
+ val = RXTX_REG7_RX_WORD_MODE_SET(val, CMU_REG9_WORD_LEN_20BIT);
+ serdes_wr(ctx, lane, RXTX_REG7, val);
+
+ /* Set CDR and LOS values and enable Rx SSC */
+ serdes_rd(ctx, lane, RXTX_REG8, &val);
+ val = RXTX_REG8_CDR_LOOP_ENA_SET(val, 0x1);
+ val = RXTX_REG8_CDR_BYPASS_RXLOS_SET(val, 0x0);
+ val = RXTX_REG8_SSC_ENABLE_SET(val, 0x1);
+ val = RXTX_REG8_SD_DISABLE_SET(val, 0x0);
+ val = RXTX_REG8_SD_VREF_SET(val, 0x4);
+ serdes_wr(ctx, lane, RXTX_REG8, val);
+
+ /* Set phase adjust upper/lower limits */
+ serdes_rd(ctx, lane, RXTX_REG11, &val);
+ val = RXTX_REG11_PHASE_ADJUST_LIMIT_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG11, val);
+
+ /* Enable Latch Off; disable SUMOS and Tx termination */
+ serdes_rd(ctx, lane, RXTX_REG12, &val);
+ val = RXTX_REG12_LATCH_OFF_ENA_SET(val, 0x1);
+ val = RXTX_REG12_SUMOS_ENABLE_SET(val, 0x0);
+ val = RXTX_REG12_RX_DET_TERM_ENABLE_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG12, val);
+
+ /* Set period error latch to 512T and enable BWL */
+ serdes_rd(ctx, lane, RXTX_REG26, &val);
+ val = RXTX_REG26_PERIOD_ERROR_LATCH_SET(val, 0x0);
+ val = RXTX_REG26_BLWC_ENA_SET(val, 0x1);
+ serdes_wr(ctx, lane, RXTX_REG26, val);
+
+ serdes_wr(ctx, lane, RXTX_REG28, 0x0);
+
+ /* Set DFE loop preset value */
+ serdes_wr(ctx, lane, RXTX_REG31, 0x0);
+
+ /* Set Eye Monitor counter width to 12-bit */
+ serdes_rd(ctx, lane, RXTX_REG61, &val);
+ val = RXTX_REG61_ISCAN_INBERT_SET(val, 0x1);
+ val = RXTX_REG61_LOADFREQ_SHIFT_SET(val, 0x0);
+ val = RXTX_REG61_EYE_COUNT_WIDTH_SEL_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG61, val);
+
+ serdes_rd(ctx, lane, RXTX_REG62, &val);
+ val = RXTX_REG62_PERIOD_H1_QLATCH_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG62, val);
+
+ /* Set BW select tap X for DFE loop */
+ for (i = 0; i < 9; i++) {
+ reg = RXTX_REG81 + i * 2;
+ serdes_rd(ctx, lane, reg, &val);
+ val = RXTX_REG89_MU_TH7_SET(val, 0xe);
+ val = RXTX_REG89_MU_TH8_SET(val, 0xe);
+ val = RXTX_REG89_MU_TH9_SET(val, 0xe);
+ serdes_wr(ctx, lane, reg, val);
+ }
+
+ /* Set BW select tap X for frequency adjust loop */
+ for (i = 0; i < 3; i++) {
+ reg = RXTX_REG96 + i * 2;
+ serdes_rd(ctx, lane, reg, &val);
+ val = RXTX_REG96_MU_FREQ1_SET(val, 0x10);
+ val = RXTX_REG96_MU_FREQ2_SET(val, 0x10);
+ val = RXTX_REG96_MU_FREQ3_SET(val, 0x10);
+ serdes_wr(ctx, lane, reg, val);
+ }
+
+ /* Set BW select tap X for phase adjust loop */
+ for (i = 0; i < 3; i++) {
+ reg = RXTX_REG99 + i * 2;
+ serdes_rd(ctx, lane, reg, &val);
+ val = RXTX_REG99_MU_PHASE1_SET(val, 0x7);
+ val = RXTX_REG99_MU_PHASE2_SET(val, 0x7);
+ val = RXTX_REG99_MU_PHASE3_SET(val, 0x7);
+ serdes_wr(ctx, lane, reg, val);
+ }
+
+ serdes_rd(ctx, lane, RXTX_REG102, &val);
+ val = RXTX_REG102_FREQLOOP_LIMIT_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG102, val);
+
+ serdes_wr(ctx, lane, RXTX_REG114, 0xffe0);
+
+ serdes_rd(ctx, lane, RXTX_REG125, &val);
+ val = RXTX_REG125_SIGN_PQ_SET(val,
+ ctx->sata_param.txeyedirection[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ val = RXTX_REG125_PQ_REG_SET(val,
+ ctx->sata_param.txeyetuning[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ val = RXTX_REG125_PHZ_MANUAL_SET(val, 0x1);
+ serdes_wr(ctx, lane, RXTX_REG125, val);
+
+ serdes_rd(ctx, lane, RXTX_REG127, &val);
+ val = RXTX_REG127_LATCH_MAN_CAL_ENA_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG127, val);
+
+ serdes_rd(ctx, lane, RXTX_REG128, &val);
+ val = RXTX_REG128_LATCH_CAL_WAIT_SEL_SET(val, 0x3);
+ serdes_wr(ctx, lane, RXTX_REG128, val);
+
+ serdes_rd(ctx, lane, RXTX_REG145, &val);
+ val = RXTX_REG145_RXDFE_CONFIG_SET(val, 0x3);
+ val = RXTX_REG145_TX_IDLE_SATA_SET(val, 0x0);
+ if (preA3Chip) {
+ val = RXTX_REG145_RXES_ENA_SET(val, 0x1);
+ val = RXTX_REG145_RXVWES_LATENA_SET(val, 0x1);
+ } else {
+ val = RXTX_REG145_RXES_ENA_SET(val, 0x0);
+ val = RXTX_REG145_RXVWES_LATENA_SET(val, 0x0);
+ }
+ serdes_wr(ctx, lane, RXTX_REG145, val);
+
+ /*
+ * Set Rx LOS filter clock rate, sample rate, and threshold
+ * windows
+ */
+ for (i = 0; i < 4; i++) {
+ reg = RXTX_REG148 + i * 2;
+ serdes_wr(ctx, lane, reg, 0xFFFF);
+ }
+ }
+}
+
+static int xgene_phy_cal_rdy_chk(struct xgene_phy_ctx *ctx,
+ enum cmu_type_t cmu_type,
+ enum clk_type_t clk_type)
+{
+ void __iomem *csr_serdes = ctx->sds_base;
+ int loop;
+ u32 val;
+
+ /* Release PHY main reset */
+ writel(0xdf, csr_serdes + SATA_ENET_SDS_RST_CTL);
+ readl(csr_serdes + SATA_ENET_SDS_RST_CTL); /* Force a barrier */
+
+ if (cmu_type != REF_CMU) {
+ cmu_setbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK);
+ /*
+ * As per PHY design spec, the PLL reset requires a minimum
+ * of 800us.
+ */
+ usleep_range(800, 1000);
+
+ cmu_rd(ctx, cmu_type, CMU_REG1, &val);
+ val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG1, val);
+ /*
+ * As per PHY design spec, the PLL auto calibration requires
+ * a minimum of 800us.
+ */
+ usleep_range(800, 1000);
+
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG32,
+ CMU_REG32_FORCE_VCOCAL_START_MASK);
+ /*
+ * As per PHY design spec, the PLL requires a minimum of
+ * 800us to settle.
+ */
+ usleep_range(800, 1000);
+ }
+
+ if (!preA3Chip)
+ goto skip_manual_cal;
+
+ /*
+ * Configure the termination resister calibration
+ * The serial receive pins, RXP/RXN, have TERMination resistor
+ * that is required to be calibrated.
+ */
+ cmu_rd(ctx, cmu_type, CMU_REG17, &val);
+ val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x12);
+ val = CMU_REG17_RESERVED_7_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG17, val);
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG17,
+ CMU_REG17_PVT_TERM_MAN_ENA_MASK);
+ /*
+ * The serial transmit pins, TXP/TXN, have Pull-UP and Pull-DOWN
+ * resistors that are required to the calibrated.
+ * Configure the pull DOWN calibration
+ */
+ cmu_rd(ctx, cmu_type, CMU_REG17, &val);
+ val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x29);
+ val = CMU_REG17_RESERVED_7_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG17, val);
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG16,
+ CMU_REG16_PVT_DN_MAN_ENA_MASK);
+ /* Configure the pull UP calibration */
+ cmu_rd(ctx, cmu_type, CMU_REG17, &val);
+ val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x28);
+ val = CMU_REG17_RESERVED_7_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG17, val);
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG16,
+ CMU_REG16_PVT_UP_MAN_ENA_MASK);
+
+skip_manual_cal:
+ /* Poll the PLL calibration completion status for at least 1 ms */
+ loop = 100;
+ do {
+ cmu_rd(ctx, cmu_type, CMU_REG7, &val);
+ if (CMU_REG7_PLL_CALIB_DONE_RD(val))
+ break;
+ /*
+ * As per PHY design spec, PLL calibration status requires
+ * a minimum of 10us to be updated.
+ */
+ usleep_range(10, 100);
+ } while (--loop > 0);
+
+ cmu_rd(ctx, cmu_type, CMU_REG7, &val);
+ dev_dbg(ctx->dev, "PLL calibration %s\n",
+ CMU_REG7_PLL_CALIB_DONE_RD(val) ? "done" : "failed");
+ if (CMU_REG7_VCO_CAL_FAIL_RD(val)) {
+ dev_err(ctx->dev,
+ "PLL calibration failed due to VCO failure\n");
+ return -1;
+ }
+ dev_dbg(ctx->dev, "PLL calibration successful\n");
+
+ cmu_rd(ctx, cmu_type, CMU_REG15, &val);
+ dev_dbg(ctx->dev, "PHY Tx is %sready\n", val & 0x300 ? "" : "not ");
+ return 0;
+}
+
+static void xgene_phy_pdwn_force_vco(struct xgene_phy_ctx *ctx,
+ enum cmu_type_t cmu_type,
+ enum clk_type_t clk_type)
+{
+ u32 val;
+
+ dev_dbg(ctx->dev, "Reset VCO and re-start again\n");
+ if (cmu_type == PHY_CMU) {
+ cmu_rd(ctx, cmu_type, CMU_REG16, &val);
+ val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x7);
+ cmu_wr(ctx, cmu_type, CMU_REG16, val);
+ }
+
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG0, CMU_REG0_PDOWN_MASK);
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG32,
+ CMU_REG32_FORCE_VCOCAL_START_MASK);
+}
+
+static int xgene_phy_hw_init_sata(struct xgene_phy_ctx *ctx,
+ enum clk_type_t clk_type, int ssc_enable)
+{
+ void __iomem *sds_base = ctx->sds_base;
+ u32 val;
+ int i;
+
+ /* Configure the PHY for operation */
+ dev_dbg(ctx->dev, "Reset PHY\n");
+ /* Place PHY into reset */
+ writel(0x0, sds_base + SATA_ENET_SDS_RST_CTL);
+ val = readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */
+ /* Release PHY lane from reset (active high) */
+ writel(0x20, sds_base + SATA_ENET_SDS_RST_CTL);
+ readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */
+ /* Release all PHY module out of reset except PHY main reset */
+ writel(0xde, sds_base + SATA_ENET_SDS_RST_CTL);
+ readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */
+
+ /* Set the operation speed */
+ val = readl(sds_base + SATA_ENET_SDS_CTL1);
+ val = CFG_I_SPD_SEL_CDR_OVR1_SET(val,
+ ctx->sata_param.txspeed[ctx->sata_param.speed[0]]);
+ writel(val, sds_base + SATA_ENET_SDS_CTL1);
+
+ dev_dbg(ctx->dev, "Set the customer pin mode to SATA\n");
+ val = readl(sds_base + SATA_ENET_SDS_CTL0);
+ val = REGSPEC_CFG_I_CUSTOMER_PIN_MODE0_SET(val, 0x4421);
+ writel(val, sds_base + SATA_ENET_SDS_CTL0);
+
+ /* Configure the clock macro unit (CMU) clock type */
+ xgene_phy_cfg_cmu_clk_type(ctx, PHY_CMU, clk_type);
+
+ /* Configure the clock macro */
+ xgene_phy_sata_cfg_cmu_core(ctx, PHY_CMU, clk_type);
+
+ /* Enable SSC if enabled */
+ if (ssc_enable)
+ xgene_phy_ssc_enable(ctx, PHY_CMU);
+
+ /* Configure PHY lanes */
+ xgene_phy_sata_cfg_lanes(ctx);
+
+ /* Set Rx/Tx 20-bit */
+ val = readl(sds_base + SATA_ENET_SDS_PCS_CTL0);
+ val = REGSPEC_CFG_I_RX_WORDMODE0_SET(val, 0x3);
+ val = REGSPEC_CFG_I_TX_WORDMODE0_SET(val, 0x3);
+ writel(val, sds_base + SATA_ENET_SDS_PCS_CTL0);
+
+ /* Start PLL calibration and try for three times */
+ i = 10;
+ do {
+ if (!xgene_phy_cal_rdy_chk(ctx, PHY_CMU, clk_type))
+ break;
+ /* If failed, toggle the VCO power signal and start again */
+ xgene_phy_pdwn_force_vco(ctx, PHY_CMU, clk_type);
+ } while (--i > 0);
+ /* Even on failure, allow to continue any way */
+ if (i <= 0)
+ dev_err(ctx->dev, "PLL calibration failed\n");
+
+ return 0;
+}
+
+static int xgene_phy_hw_initialize(struct xgene_phy_ctx *ctx,
+ enum clk_type_t clk_type,
+ int ssc_enable)
+{
+ int rc;
+
+ dev_dbg(ctx->dev, "PHY init clk type %d\n", clk_type);
+
+ if (ctx->mode == MODE_SATA) {
+ rc = xgene_phy_hw_init_sata(ctx, clk_type, ssc_enable);
+ if (rc)
+ return rc;
+ } else {
+ dev_err(ctx->dev, "Un-supported customer pin mode %d\n",
+ ctx->mode);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * Receiver Offset Calibration:
+ *
+ * Calibrate the receiver signal path offset in two steps - summar and
+ * latch calibrations
+ */
+static void xgene_phy_force_lat_summer_cal(struct xgene_phy_ctx *ctx, int lane)
+{
+ int i;
+ struct {
+ u32 reg;
+ u32 val;
+ } serdes_reg[] = {
+ {RXTX_REG38, 0x0},
+ {RXTX_REG39, 0xff00},
+ {RXTX_REG40, 0xffff},
+ {RXTX_REG41, 0xffff},
+ {RXTX_REG42, 0xffff},
+ {RXTX_REG43, 0xffff},
+ {RXTX_REG44, 0xffff},
+ {RXTX_REG45, 0xffff},
+ {RXTX_REG46, 0xffff},
+ {RXTX_REG47, 0xfffc},
+ {RXTX_REG48, 0x0},
+ {RXTX_REG49, 0x0},
+ {RXTX_REG50, 0x0},
+ {RXTX_REG51, 0x0},
+ {RXTX_REG52, 0x0},
+ {RXTX_REG53, 0x0},
+ {RXTX_REG54, 0x0},
+ {RXTX_REG55, 0x0},
+ };
+
+ /* Start SUMMER calibration */
+ serdes_setbits(ctx, lane, RXTX_REG127,
+ RXTX_REG127_FORCE_SUM_CAL_START_MASK);
+ /*
+ * As per PHY design spec, the Summer calibration requires a minimum
+ * of 100us to complete.
+ */
+ usleep_range(100, 500);
+ serdes_clrbits(ctx, lane, RXTX_REG127,
+ RXTX_REG127_FORCE_SUM_CAL_START_MASK);
+ /*
+ * As per PHY design spec, the auto calibration requires a minimum
+ * of 100us to complete.
+ */
+ usleep_range(100, 500);
+
+ /* Start latch calibration */
+ serdes_setbits(ctx, lane, RXTX_REG127,
+ RXTX_REG127_FORCE_LAT_CAL_START_MASK);
+ /*
+ * As per PHY design spec, the latch calibration requires a minimum
+ * of 100us to complete.
+ */
+ usleep_range(100, 500);
+ serdes_clrbits(ctx, lane, RXTX_REG127,
+ RXTX_REG127_FORCE_LAT_CAL_START_MASK);
+
+ /* Configure the PHY lane for calibration */
+ serdes_wr(ctx, lane, RXTX_REG28, 0x7);
+ serdes_wr(ctx, lane, RXTX_REG31, 0x7e00);
+ serdes_clrbits(ctx, lane, RXTX_REG4,
+ RXTX_REG4_TX_LOOPBACK_BUF_EN_MASK);
+ serdes_clrbits(ctx, lane, RXTX_REG7,
+ RXTX_REG7_LOOP_BACK_ENA_CTLE_MASK);
+ for (i = 0; i < ARRAY_SIZE(serdes_reg); i++)
+ serdes_wr(ctx, lane, serdes_reg[i].reg,
+ serdes_reg[i].val);
+}
+
+static void xgene_phy_reset_rxd(struct xgene_phy_ctx *ctx, int lane)
+{
+ /* Reset digital Rx */
+ serdes_clrbits(ctx, lane, RXTX_REG7, RXTX_REG7_RESETB_RXD_MASK);
+ /* As per PHY design spec, the reset requires a minimum of 100us. */
+ usleep_range(100, 150);
+ serdes_setbits(ctx, lane, RXTX_REG7, RXTX_REG7_RESETB_RXD_MASK);
+}
+
+static int xgene_phy_get_avg(int accum, int samples)
+{
+ return (accum + (samples / 2)) / samples;
+}
+
+static void xgene_phy_gen_avg_val(struct xgene_phy_ctx *ctx, int lane)
+{
+ int max_loop = 10;
+ int avg_loop = 0;
+ int lat_do = 0, lat_xo = 0, lat_eo = 0, lat_so = 0;
+ int lat_de = 0, lat_xe = 0, lat_ee = 0, lat_se = 0;
+ int sum_cal = 0;
+ int lat_do_itr, lat_xo_itr, lat_eo_itr, lat_so_itr;
+ int lat_de_itr, lat_xe_itr, lat_ee_itr, lat_se_itr;
+ int sum_cal_itr;
+ int fail_even;
+ int fail_odd;
+ u32 val;
+
+ dev_dbg(ctx->dev, "Generating avg calibration value for lane %d\n",
+ lane);
+
+ /* Enable RX Hi-Z termination */
+ serdes_setbits(ctx, lane, RXTX_REG12,
+ RXTX_REG12_RX_DET_TERM_ENABLE_MASK);
+ /* Turn off DFE */
+ serdes_wr(ctx, lane, RXTX_REG28, 0x0000);
+ /* DFE Presets to zero */
+ serdes_wr(ctx, lane, RXTX_REG31, 0x0000);
+
+ /*
+ * Receiver Offset Calibration:
+ * Calibrate the receiver signal path offset in two steps - summar
+ * and latch calibration.
+ * Runs the "Receiver Offset Calibration multiple times to determine
+ * the average value to use.
+ */
+ while (avg_loop < max_loop) {
+ /* Start the calibration */
+ xgene_phy_force_lat_summer_cal(ctx, lane);
+
+ serdes_rd(ctx, lane, RXTX_REG21, &val);
+ lat_do_itr = RXTX_REG21_DO_LATCH_CALOUT_RD(val);
+ lat_xo_itr = RXTX_REG21_XO_LATCH_CALOUT_RD(val);
+ fail_odd = RXTX_REG21_LATCH_CAL_FAIL_ODD_RD(val);
+
+ serdes_rd(ctx, lane, RXTX_REG22, &val);
+ lat_eo_itr = RXTX_REG22_EO_LATCH_CALOUT_RD(val);
+ lat_so_itr = RXTX_REG22_SO_LATCH_CALOUT_RD(val);
+ fail_even = RXTX_REG22_LATCH_CAL_FAIL_EVEN_RD(val);
+
+ serdes_rd(ctx, lane, RXTX_REG23, &val);
+ lat_de_itr = RXTX_REG23_DE_LATCH_CALOUT_RD(val);
+ lat_xe_itr = RXTX_REG23_XE_LATCH_CALOUT_RD(val);
+
+ serdes_rd(ctx, lane, RXTX_REG24, &val);
+ lat_ee_itr = RXTX_REG24_EE_LATCH_CALOUT_RD(val);
+ lat_se_itr = RXTX_REG24_SE_LATCH_CALOUT_RD(val);
+
+ serdes_rd(ctx, lane, RXTX_REG121, &val);
+ sum_cal_itr = RXTX_REG121_SUMOS_CAL_CODE_RD(val);
+
+ /* Check for failure. If passed, sum them for averaging */
+ if ((fail_even == 0 || fail_even == 1) &&
+ (fail_odd == 0 || fail_odd == 1)) {
+ lat_do += lat_do_itr;
+ lat_xo += lat_xo_itr;
+ lat_eo += lat_eo_itr;
+ lat_so += lat_so_itr;
+ lat_de += lat_de_itr;
+ lat_xe += lat_xe_itr;
+ lat_ee += lat_ee_itr;
+ lat_se += lat_se_itr;
+ sum_cal += sum_cal_itr;
+
+ dev_dbg(ctx->dev, "Iteration %d:\n", avg_loop);
+ dev_dbg(ctx->dev, "DO 0x%x XO 0x%x EO 0x%x SO 0x%x\n",
+ lat_do_itr, lat_xo_itr, lat_eo_itr,
+ lat_so_itr);
+ dev_dbg(ctx->dev, "DE 0x%x XE 0x%x EE 0x%x SE 0x%x\n",
+ lat_de_itr, lat_xe_itr, lat_ee_itr,
+ lat_se_itr);
+ dev_dbg(ctx->dev, "SUM 0x%x\n", sum_cal_itr);
+ ++avg_loop;
+ } else {
+ dev_err(ctx->dev,
+ "Receiver calibration failed at %d loop\n",
+ avg_loop);
+ }
+ xgene_phy_reset_rxd(ctx, lane);
+ }
+
+ /* Update latch manual calibration with average value */
+ serdes_rd(ctx, lane, RXTX_REG127, &val);
+ val = RXTX_REG127_DO_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_do, max_loop));
+ val = RXTX_REG127_XO_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_xo, max_loop));
+ serdes_wr(ctx, lane, RXTX_REG127, val);
+
+ serdes_rd(ctx, lane, RXTX_REG128, &val);
+ val = RXTX_REG128_EO_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_eo, max_loop));
+ val = RXTX_REG128_SO_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_so, max_loop));
+ serdes_wr(ctx, lane, RXTX_REG128, val);
+
+ serdes_rd(ctx, lane, RXTX_REG129, &val);
+ val = RXTX_REG129_DE_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_de, max_loop));
+ val = RXTX_REG129_XE_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_xe, max_loop));
+ serdes_wr(ctx, lane, RXTX_REG129, val);
+
+ serdes_rd(ctx, lane, RXTX_REG130, &val);
+ val = RXTX_REG130_EE_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_ee, max_loop));
+ val = RXTX_REG130_SE_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_se, max_loop));
+ serdes_wr(ctx, lane, RXTX_REG130, val);
+
+ /* Update SUMMER calibration with average value */
+ serdes_rd(ctx, lane, RXTX_REG14, &val);
+ val = RXTX_REG14_CLTE_LATCAL_MAN_PROG_SET(val,
+ xgene_phy_get_avg(sum_cal, max_loop));
+ serdes_wr(ctx, lane, RXTX_REG14, val);
+
+ dev_dbg(ctx->dev, "Average Value:\n");
+ dev_dbg(ctx->dev, "DO 0x%x XO 0x%x EO 0x%x SO 0x%x\n",
+ xgene_phy_get_avg(lat_do, max_loop),
+ xgene_phy_get_avg(lat_xo, max_loop),
+ xgene_phy_get_avg(lat_eo, max_loop),
+ xgene_phy_get_avg(lat_so, max_loop));
+ dev_dbg(ctx->dev, "DE 0x%x XE 0x%x EE 0x%x SE 0x%x\n",
+ xgene_phy_get_avg(lat_de, max_loop),
+ xgene_phy_get_avg(lat_xe, max_loop),
+ xgene_phy_get_avg(lat_ee, max_loop),
+ xgene_phy_get_avg(lat_se, max_loop));
+ dev_dbg(ctx->dev, "SUM 0x%x\n",
+ xgene_phy_get_avg(sum_cal, max_loop));
+
+ serdes_rd(ctx, lane, RXTX_REG14, &val);
+ val = RXTX_REG14_CTLE_LATCAL_MAN_ENA_SET(val, 0x1);
+ serdes_wr(ctx, lane, RXTX_REG14, val);
+ dev_dbg(ctx->dev, "Enable Manual Summer calibration\n");
+
+ serdes_rd(ctx, lane, RXTX_REG127, &val);
+ val = RXTX_REG127_LATCH_MAN_CAL_ENA_SET(val, 0x1);
+ dev_dbg(ctx->dev, "Enable Manual Latch calibration\n");
+ serdes_wr(ctx, lane, RXTX_REG127, val);
+
+ /* Disable RX Hi-Z termination */
+ serdes_rd(ctx, lane, RXTX_REG12, &val);
+ val = RXTX_REG12_RX_DET_TERM_ENABLE_SET(val, 0);
+ serdes_wr(ctx, lane, RXTX_REG12, val);
+ /* Turn on DFE */
+ serdes_wr(ctx, lane, RXTX_REG28, 0x0007);
+ /* Set DFE preset */
+ serdes_wr(ctx, lane, RXTX_REG31, 0x7e00);
+}
+
+static int xgene_phy_hw_init(struct phy *phy)
+{
+ struct xgene_phy_ctx *ctx = phy_get_drvdata(phy);
+ int rc;
+ int i;
+
+ rc = xgene_phy_hw_initialize(ctx, CLK_EXT_DIFF, SSC_DISABLE);
+ if (rc) {
+ dev_err(ctx->dev, "PHY initialize failed %d\n", rc);
+ return rc;
+ }
+
+ /* Setup clock properly after PHY configuration */
+ if (!IS_ERR(ctx->clk)) {
+ /* HW requires an toggle of the clock */
+ clk_prepare_enable(ctx->clk);
+ clk_disable_unprepare(ctx->clk);
+ clk_prepare_enable(ctx->clk);
+ }
+
+ /* Compute average value */
+ for (i = 0; i < MAX_LANE; i++)
+ xgene_phy_gen_avg_val(ctx, i);
+
+ dev_dbg(ctx->dev, "PHY initialized\n");
+ return 0;
+}
+
+static const struct phy_ops xgene_phy_ops = {
+ .init = xgene_phy_hw_init,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *xgene_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct xgene_phy_ctx *ctx = dev_get_drvdata(dev);
+
+ if (args->args_count <= 0)
+ return ERR_PTR(-EINVAL);
+ if (args->args[0] < MODE_SATA || args->args[0] >= MODE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ ctx->mode = args->args[0];
+ return ctx->phy;
+}
+
+static void xgene_phy_get_param(struct platform_device *pdev,
+ const char *name, u32 *buffer,
+ int count, u32 *default_val,
+ u32 conv_factor)
+{
+ int i;
+
+ if (!of_property_read_u32_array(pdev->dev.of_node, name, buffer,
+ count)) {
+ for (i = 0; i < count; i++)
+ buffer[i] /= conv_factor;
+ return;
+ }
+ /* Does not exist, load default */
+ for (i = 0; i < count; i++)
+ buffer[i] = default_val[i % 3];
+}
+
+static int xgene_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct xgene_phy_ctx *ctx;
+ struct resource *res;
+ int rc = 0;
+ u32 default_spd[] = DEFAULT_SATA_SPD_SEL;
+ u32 default_txboost_gain[] = DEFAULT_SATA_TXBOOST_GAIN;
+ u32 default_txeye_direction[] = DEFAULT_SATA_TXEYEDIRECTION;
+ u32 default_txeye_tuning[] = DEFAULT_SATA_TXEYETUNING;
+ u32 default_txamp[] = DEFAULT_SATA_TXAMP;
+ u32 default_txcn1[] = DEFAULT_SATA_TXCN1;
+ u32 default_txcn2[] = DEFAULT_SATA_TXCN2;
+ u32 default_txcp1[] = DEFAULT_SATA_TXCP1;
+ int i;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->sds_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->sds_base)) {
+ rc = PTR_ERR(ctx->sds_base);
+ goto error;
+ }
+
+ /* Retrieve optional clock */
+ ctx->clk = clk_get(&pdev->dev, NULL);
+
+ /* Load override paramaters */
+ xgene_phy_get_param(pdev, "apm,tx-eye-tuning",
+ ctx->sata_param.txeyetuning, 6, default_txeye_tuning, 1);
+ xgene_phy_get_param(pdev, "apm,tx-eye-direction",
+ ctx->sata_param.txeyedirection, 6, default_txeye_direction, 1);
+ xgene_phy_get_param(pdev, "apm,tx-boost-gain",
+ ctx->sata_param.txboostgain, 6, default_txboost_gain, 1);
+ xgene_phy_get_param(pdev, "apm,tx-amplitude",
+ ctx->sata_param.txamplitude, 6, default_txamp, 13300);
+ xgene_phy_get_param(pdev, "apm,tx-pre-cursor1",
+ ctx->sata_param.txprecursor_cn1, 6, default_txcn1, 18200);
+ xgene_phy_get_param(pdev, "apm,tx-pre-cursor2",
+ ctx->sata_param.txprecursor_cn2, 6, default_txcn2, 18200);
+ xgene_phy_get_param(pdev, "apm,tx-post-cursor",
+ ctx->sata_param.txpostcursor_cp1, 6, default_txcp1, 18200);
+ xgene_phy_get_param(pdev, "apm,tx-speed",
+ ctx->sata_param.txspeed, 3, default_spd, 1);
+ for (i = 0; i < MAX_LANE; i++)
+ ctx->sata_param.speed[i] = 2; /* Default to Gen3 */
+
+ ctx->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ctx);
+
+ ctx->phy = devm_phy_create(ctx->dev, &xgene_phy_ops, NULL);
+ if (IS_ERR(ctx->phy)) {
+ dev_dbg(&pdev->dev, "Failed to create PHY\n");
+ rc = PTR_ERR(ctx->phy);
+ goto error;
+ }
+ phy_set_drvdata(ctx->phy, ctx);
+
+ phy_provider = devm_of_phy_provider_register(ctx->dev,
+ xgene_phy_xlate);
+ if (IS_ERR(phy_provider)) {
+ rc = PTR_ERR(phy_provider);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ return rc;
+}
+
+static const struct of_device_id xgene_phy_of_match[] = {
+ {.compatible = "apm,xgene-phy",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_phy_of_match);
+
+static struct platform_driver xgene_phy_driver = {
+ .probe = xgene_phy_probe,
+ .driver = {
+ .name = "xgene-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = xgene_phy_of_match,
+ },
+};
+module_platform_driver(xgene_phy_driver);
+
+MODULE_DESCRIPTION("APM X-Gene Multi-Purpose PHY driver");
+MODULE_AUTHOR("Loc Ho <lho@apm.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 1e4e69384baa..e49324032611 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -224,7 +224,7 @@ config PINCTRL_MSM
config PINCTRL_MSM8X74
tristate "Qualcomm 8x74 pin controller driver"
- depends on GPIOLIB && OF && OF_IRQ
+ depends on GPIOLIB && OF
select PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -235,6 +235,9 @@ config PINCTRL_NOMADIK
depends on ARCH_U8500 || ARCH_NOMADIK
select PINMUX
select PINCONF
+ select GPIOLIB
+ select OF_GPIO
+ select GPIOLIB_IRQCHIP
config PINCTRL_STN8815
bool "STN8815 pin controller driver"
@@ -321,6 +324,7 @@ config PINCTRL_U300
config PINCTRL_COH901
bool "ST-Ericsson U300 COH 901 335/571 GPIO"
depends on GPIOLIB && ARCH_U300 && PINCTRL_U300
+ select GPIOLIB_IRQCHIP
help
Say yes here to support GPIO interface on ST-Ericsson U300.
The names of the two IP block variants supported are
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 340fb4e6c600..eda13de2e7c0 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -186,7 +186,9 @@ int pinctrl_dt_to_map(struct pinctrl *p)
/* CONFIG_OF enabled, p->dev not instantiated from DT */
if (!np) {
- dev_dbg(p->dev, "no of_node; not parsing pinctrl DT\n");
+ if (of_have_populated_dt())
+ dev_dbg(p->dev,
+ "no of_node; not parsing pinctrl DT\n");
return 0;
}
diff --git a/drivers/pinctrl/mvebu/Kconfig b/drivers/pinctrl/mvebu/Kconfig
index 366fa541ee91..cc298fade93a 100644
--- a/drivers/pinctrl/mvebu/Kconfig
+++ b/drivers/pinctrl/mvebu/Kconfig
@@ -8,6 +8,7 @@ config PINCTRL_MVEBU
config PINCTRL_DOVE
bool
select PINCTRL_MVEBU
+ select MFD_SYSCON
config PINCTRL_KIRKWOOD
bool
@@ -17,6 +18,14 @@ config PINCTRL_ARMADA_370
bool
select PINCTRL_MVEBU
+config PINCTRL_ARMADA_375
+ bool
+ select PINCTRL_MVEBU
+
+config PINCTRL_ARMADA_38X
+ bool
+ select PINCTRL_MVEBU
+
config PINCTRL_ARMADA_XP
bool
select PINCTRL_MVEBU
diff --git a/drivers/pinctrl/mvebu/Makefile b/drivers/pinctrl/mvebu/Makefile
index 37c253297af0..bc1b9f14f539 100644
--- a/drivers/pinctrl/mvebu/Makefile
+++ b/drivers/pinctrl/mvebu/Makefile
@@ -2,4 +2,6 @@ obj-$(CONFIG_PINCTRL_MVEBU) += pinctrl-mvebu.o
obj-$(CONFIG_PINCTRL_DOVE) += pinctrl-dove.o
obj-$(CONFIG_PINCTRL_KIRKWOOD) += pinctrl-kirkwood.o
obj-$(CONFIG_PINCTRL_ARMADA_370) += pinctrl-armada-370.o
+obj-$(CONFIG_PINCTRL_ARMADA_375) += pinctrl-armada-375.o
+obj-$(CONFIG_PINCTRL_ARMADA_38X) += pinctrl-armada-38x.o
obj-$(CONFIG_PINCTRL_ARMADA_XP) += pinctrl-armada-xp.o
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
index ae1f760cbdd2..670e5b01c678 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
@@ -23,6 +23,18 @@
#include "pinctrl-mvebu.h"
+static void __iomem *mpp_base;
+
+static int armada_370_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_370_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
MPP_MODE(0,
MPP_FUNCTION(0x0, "gpio", NULL),
@@ -373,7 +385,7 @@ static struct of_device_id armada_370_pinctrl_of_match[] = {
};
static struct mvebu_mpp_ctrl mv88f6710_mpp_controls[] = {
- MPP_REG_CTRL(0, 65),
+ MPP_FUNC_CTRL(0, 65, NULL, armada_370_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = {
@@ -385,6 +397,12 @@ static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = {
static int armada_370_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_370_pinctrl_info;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
soc->variant = 0; /* no variants for Armada 370 */
soc->controls = mv88f6710_mpp_controls;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-375.c b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
new file mode 100644
index 000000000000..db078fe7ace6
--- /dev/null
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
@@ -0,0 +1,459 @@
+/*
+ * Marvell Armada 375 pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+static void __iomem *mpp_base;
+
+static int armada_375_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_375_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
+static struct mvebu_mpp_mode mv88f6720_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad2"),
+ MPP_FUNCTION(0x2, "spi0", "cs1"),
+ MPP_FUNCTION(0x3, "spi1", "cs1"),
+ MPP_FUNCTION(0x5, "nand", "io2")),
+ MPP_MODE(1,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad3"),
+ MPP_FUNCTION(0x2, "spi0", "mosi"),
+ MPP_FUNCTION(0x3, "spi1", "mosi"),
+ MPP_FUNCTION(0x5, "nand", "io3")),
+ MPP_MODE(2,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad4"),
+ MPP_FUNCTION(0x2, "ptp", "eventreq"),
+ MPP_FUNCTION(0x3, "led", "c0"),
+ MPP_FUNCTION(0x4, "audio", "sdi"),
+ MPP_FUNCTION(0x5, "nand", "io4"),
+ MPP_FUNCTION(0x6, "spi1", "mosi")),
+ MPP_MODE(3,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad5"),
+ MPP_FUNCTION(0x2, "ptp", "triggen"),
+ MPP_FUNCTION(0x3, "led", "p3"),
+ MPP_FUNCTION(0x4, "audio", "mclk"),
+ MPP_FUNCTION(0x5, "nand", "io5"),
+ MPP_FUNCTION(0x6, "spi1", "miso")),
+ MPP_MODE(4,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad6"),
+ MPP_FUNCTION(0x2, "spi0", "miso"),
+ MPP_FUNCTION(0x3, "spi1", "miso"),
+ MPP_FUNCTION(0x5, "nand", "io6")),
+ MPP_MODE(5,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad7"),
+ MPP_FUNCTION(0x2, "spi0", "cs2"),
+ MPP_FUNCTION(0x3, "spi1", "cs2"),
+ MPP_FUNCTION(0x5, "nand", "io7"),
+ MPP_FUNCTION(0x6, "spi1", "miso")),
+ MPP_MODE(6,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad0"),
+ MPP_FUNCTION(0x3, "led", "p1"),
+ MPP_FUNCTION(0x4, "audio", "rclk"),
+ MPP_FUNCTION(0x5, "nand", "io0")),
+ MPP_MODE(7,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad1"),
+ MPP_FUNCTION(0x2, "ptp", "clk"),
+ MPP_FUNCTION(0x3, "led", "p2"),
+ MPP_FUNCTION(0x4, "audio", "extclk"),
+ MPP_FUNCTION(0x5, "nand", "io1")),
+ MPP_MODE(8,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev ", "bootcs"),
+ MPP_FUNCTION(0x2, "spi0", "cs0"),
+ MPP_FUNCTION(0x3, "spi1", "cs0"),
+ MPP_FUNCTION(0x5, "nand", "ce")),
+ MPP_MODE(9,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "nf", "wen"),
+ MPP_FUNCTION(0x2, "spi0", "sck"),
+ MPP_FUNCTION(0x3, "spi1", "sck"),
+ MPP_FUNCTION(0x5, "nand", "we")),
+ MPP_MODE(10,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "nf", "ren"),
+ MPP_FUNCTION(0x2, "dram", "vttctrl"),
+ MPP_FUNCTION(0x3, "led", "c1"),
+ MPP_FUNCTION(0x5, "nand", "re"),
+ MPP_FUNCTION(0x6, "spi1", "sck")),
+ MPP_MODE(11,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "a0"),
+ MPP_FUNCTION(0x3, "led", "c2"),
+ MPP_FUNCTION(0x4, "audio", "sdo"),
+ MPP_FUNCTION(0x5, "nand", "cle")),
+ MPP_MODE(12,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "a1"),
+ MPP_FUNCTION(0x4, "audio", "bclk"),
+ MPP_FUNCTION(0x5, "nand", "ale")),
+ MPP_MODE(13,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "readyn"),
+ MPP_FUNCTION(0x2, "pcie0", "rstoutn"),
+ MPP_FUNCTION(0x3, "pcie1", "rstoutn"),
+ MPP_FUNCTION(0x5, "nand", "rb"),
+ MPP_FUNCTION(0x6, "spi1", "mosi")),
+ MPP_MODE(14,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "i2c0", "sda"),
+ MPP_FUNCTION(0x3, "uart1", "txd")),
+ MPP_MODE(15,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "i2c0", "sck"),
+ MPP_FUNCTION(0x3, "uart1", "rxd")),
+ MPP_MODE(16,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "uart0", "txd")),
+ MPP_MODE(17,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "uart0", "rxd")),
+ MPP_MODE(18,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "tdm", "intn")),
+ MPP_MODE(19,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "tdm", "rstn")),
+ MPP_MODE(20,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "tdm", "pclk")),
+ MPP_MODE(21,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "tdm", "fsync")),
+ MPP_MODE(22,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "tdm", "drx")),
+ MPP_MODE(23,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "tdm", "dtx")),
+ MPP_MODE(24,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p0"),
+ MPP_FUNCTION(0x2, "ge1", "rxd0"),
+ MPP_FUNCTION(0x3, "sd", "cmd"),
+ MPP_FUNCTION(0x4, "uart0", "rts"),
+ MPP_FUNCTION(0x5, "spi0", "cs0"),
+ MPP_FUNCTION(0x6, "dev", "cs1")),
+ MPP_MODE(25,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p2"),
+ MPP_FUNCTION(0x2, "ge1", "rxd1"),
+ MPP_FUNCTION(0x3, "sd", "d0"),
+ MPP_FUNCTION(0x4, "uart0", "cts"),
+ MPP_FUNCTION(0x5, "spi0", "mosi"),
+ MPP_FUNCTION(0x6, "dev", "cs2")),
+ MPP_MODE(26,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie0", "clkreq"),
+ MPP_FUNCTION(0x2, "ge1", "rxd2"),
+ MPP_FUNCTION(0x3, "sd", "d2"),
+ MPP_FUNCTION(0x4, "uart1", "rts"),
+ MPP_FUNCTION(0x5, "spi0", "cs1"),
+ MPP_FUNCTION(0x6, "led", "c1")),
+ MPP_MODE(27,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie1", "clkreq"),
+ MPP_FUNCTION(0x2, "ge1", "rxd3"),
+ MPP_FUNCTION(0x3, "sd", "d1"),
+ MPP_FUNCTION(0x4, "uart1", "cts"),
+ MPP_FUNCTION(0x5, "spi0", "miso"),
+ MPP_FUNCTION(0x6, "led", "c2")),
+ MPP_MODE(28,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p3"),
+ MPP_FUNCTION(0x2, "ge1", "txctl"),
+ MPP_FUNCTION(0x3, "sd", "clk"),
+ MPP_FUNCTION(0x5, "dram", "vttctrl")),
+ MPP_MODE(29,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie1", "clkreq"),
+ MPP_FUNCTION(0x2, "ge1", "rxclk"),
+ MPP_FUNCTION(0x3, "sd", "d3"),
+ MPP_FUNCTION(0x5, "spi0", "sck"),
+ MPP_FUNCTION(0x6, "pcie0", "rstoutn")),
+ MPP_MODE(30,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge1", "txd0"),
+ MPP_FUNCTION(0x3, "spi1", "cs0"),
+ MPP_FUNCTION(0x5, "led", "p3"),
+ MPP_FUNCTION(0x6, "ptp", "eventreq")),
+ MPP_MODE(31,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge1", "txd1"),
+ MPP_FUNCTION(0x3, "spi1", "mosi"),
+ MPP_FUNCTION(0x5, "led", "p0")),
+ MPP_MODE(32,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge1", "txd2"),
+ MPP_FUNCTION(0x3, "spi1", "sck"),
+ MPP_FUNCTION(0x4, "ptp", "triggen"),
+ MPP_FUNCTION(0x5, "led", "c0")),
+ MPP_MODE(33,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge1", "txd3"),
+ MPP_FUNCTION(0x3, "spi1", "miso"),
+ MPP_FUNCTION(0x5, "led", "p2")),
+ MPP_MODE(34,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge1", "txclkout"),
+ MPP_FUNCTION(0x3, "spi1", "sck"),
+ MPP_FUNCTION(0x5, "led", "c1")),
+ MPP_MODE(35,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge1", "rxctl"),
+ MPP_FUNCTION(0x3, "spi1", "cs1"),
+ MPP_FUNCTION(0x4, "spi0", "cs2"),
+ MPP_FUNCTION(0x5, "led", "p1")),
+ MPP_MODE(36,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie0", "clkreq"),
+ MPP_FUNCTION(0x5, "led", "c2")),
+ MPP_MODE(37,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie0", "clkreq"),
+ MPP_FUNCTION(0x2, "tdm", "intn"),
+ MPP_FUNCTION(0x4, "ge", "mdc")),
+ MPP_MODE(38,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie1", "clkreq"),
+ MPP_FUNCTION(0x4, "ge", "mdio")),
+ MPP_MODE(39,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x4, "ref", "clkout"),
+ MPP_FUNCTION(0x5, "led", "p3")),
+ MPP_MODE(40,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x4, "uart1", "txd"),
+ MPP_FUNCTION(0x5, "led", "p0")),
+ MPP_MODE(41,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x4, "uart1", "rxd"),
+ MPP_FUNCTION(0x5, "led", "p1")),
+ MPP_MODE(42,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x3, "spi1", "cs2"),
+ MPP_FUNCTION(0x4, "led", "c0"),
+ MPP_FUNCTION(0x6, "ptp", "clk")),
+ MPP_MODE(43,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "sata0", "prsnt"),
+ MPP_FUNCTION(0x4, "dram", "vttctrl"),
+ MPP_FUNCTION(0x5, "led", "c1")),
+ MPP_MODE(44,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x4, "sata0", "prsnt")),
+ MPP_MODE(45,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "spi0", "cs2"),
+ MPP_FUNCTION(0x4, "pcie0", "rstoutn"),
+ MPP_FUNCTION(0x5, "led", "c2"),
+ MPP_FUNCTION(0x6, "spi1", "cs2")),
+ MPP_MODE(46,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p0"),
+ MPP_FUNCTION(0x2, "ge0", "txd0"),
+ MPP_FUNCTION(0x3, "ge1", "txd0"),
+ MPP_FUNCTION(0x6, "dev", "wen1")),
+ MPP_MODE(47,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p1"),
+ MPP_FUNCTION(0x2, "ge0", "txd1"),
+ MPP_FUNCTION(0x3, "ge1", "txd1"),
+ MPP_FUNCTION(0x5, "ptp", "triggen"),
+ MPP_FUNCTION(0x6, "dev", "ale0")),
+ MPP_MODE(48,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p2"),
+ MPP_FUNCTION(0x2, "ge0", "txd2"),
+ MPP_FUNCTION(0x3, "ge1", "txd2"),
+ MPP_FUNCTION(0x6, "dev", "ale1")),
+ MPP_MODE(49,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p3"),
+ MPP_FUNCTION(0x2, "ge0", "txd3"),
+ MPP_FUNCTION(0x3, "ge1", "txd3"),
+ MPP_FUNCTION(0x6, "dev", "a2")),
+ MPP_MODE(50,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "c0"),
+ MPP_FUNCTION(0x2, "ge0", "rxd0"),
+ MPP_FUNCTION(0x3, "ge1", "rxd0"),
+ MPP_FUNCTION(0x5, "ptp", "eventreq"),
+ MPP_FUNCTION(0x6, "dev", "ad12")),
+ MPP_MODE(51,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "c1"),
+ MPP_FUNCTION(0x2, "ge0", "rxd1"),
+ MPP_FUNCTION(0x3, "ge1", "rxd1"),
+ MPP_FUNCTION(0x6, "dev", "ad8")),
+ MPP_MODE(52,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "c2"),
+ MPP_FUNCTION(0x2, "ge0", "rxd2"),
+ MPP_FUNCTION(0x3, "ge1", "rxd2"),
+ MPP_FUNCTION(0x5, "i2c0", "sda"),
+ MPP_FUNCTION(0x6, "dev", "ad9")),
+ MPP_MODE(53,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie1", "rstoutn"),
+ MPP_FUNCTION(0x2, "ge0", "rxd3"),
+ MPP_FUNCTION(0x3, "ge1", "rxd3"),
+ MPP_FUNCTION(0x5, "i2c0", "sck"),
+ MPP_FUNCTION(0x6, "dev", "ad10")),
+ MPP_MODE(54,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie0", "rstoutn"),
+ MPP_FUNCTION(0x2, "ge0", "rxctl"),
+ MPP_FUNCTION(0x3, "ge1", "rxctl"),
+ MPP_FUNCTION(0x6, "dev", "ad11")),
+ MPP_MODE(55,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge0", "rxclk"),
+ MPP_FUNCTION(0x3, "ge1", "rxclk"),
+ MPP_FUNCTION(0x6, "dev", "cs0")),
+ MPP_MODE(56,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge0", "txclkout"),
+ MPP_FUNCTION(0x3, "ge1", "txclkout"),
+ MPP_FUNCTION(0x6, "dev", "oe")),
+ MPP_MODE(57,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge0", "txctl"),
+ MPP_FUNCTION(0x3, "ge1", "txctl"),
+ MPP_FUNCTION(0x6, "dev", "wen0")),
+ MPP_MODE(58,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x4, "led", "c0")),
+ MPP_MODE(59,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x4, "led", "c1")),
+ MPP_MODE(60,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "uart1", "txd"),
+ MPP_FUNCTION(0x4, "led", "c2"),
+ MPP_FUNCTION(0x6, "dev", "ad13")),
+ MPP_MODE(61,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "i2c1", "sda"),
+ MPP_FUNCTION(0x2, "uart1", "rxd"),
+ MPP_FUNCTION(0x3, "spi1", "cs2"),
+ MPP_FUNCTION(0x4, "led", "p0"),
+ MPP_FUNCTION(0x6, "dev", "ad14")),
+ MPP_MODE(62,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "i2c1", "sck"),
+ MPP_FUNCTION(0x4, "led", "p1"),
+ MPP_FUNCTION(0x6, "dev", "ad15")),
+ MPP_MODE(63,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ptp", "triggen"),
+ MPP_FUNCTION(0x4, "led", "p2"),
+ MPP_FUNCTION(0x6, "dev", "burst")),
+ MPP_MODE(64,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "dram", "vttctrl"),
+ MPP_FUNCTION(0x4, "led", "p3")),
+ MPP_MODE(65,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "sata1", "prsnt")),
+ MPP_MODE(66,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ptp", "eventreq"),
+ MPP_FUNCTION(0x4, "spi1", "cs3"),
+ MPP_FUNCTION(0x5, "pcie0", "rstoutn"),
+ MPP_FUNCTION(0x6, "dev", "cs3")),
+};
+
+static struct mvebu_pinctrl_soc_info armada_375_pinctrl_info;
+
+static struct of_device_id armada_375_pinctrl_of_match[] = {
+ { .compatible = "marvell,mv88f6720-pinctrl" },
+ { },
+};
+
+static struct mvebu_mpp_ctrl mv88f6720_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 69, NULL, armada_375_mpp_ctrl),
+};
+
+static struct pinctrl_gpio_range mv88f6720_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+ MPP_GPIO_RANGE(1, 32, 32, 32),
+ MPP_GPIO_RANGE(2, 64, 64, 3),
+};
+
+static int armada_375_pinctrl_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc = &armada_375_pinctrl_info;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
+
+ soc->variant = 0; /* no variants for Armada 375 */
+ soc->controls = mv88f6720_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(mv88f6720_mpp_controls);
+ soc->modes = mv88f6720_mpp_modes;
+ soc->nmodes = ARRAY_SIZE(mv88f6720_mpp_modes);
+ soc->gpioranges = mv88f6720_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(mv88f6720_mpp_gpio_ranges);
+
+ pdev->dev.platform_data = soc;
+
+ return mvebu_pinctrl_probe(pdev);
+}
+
+static int armada_375_pinctrl_remove(struct platform_device *pdev)
+{
+ return mvebu_pinctrl_remove(pdev);
+}
+
+static struct platform_driver armada_375_pinctrl_driver = {
+ .driver = {
+ .name = "armada-375-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(armada_375_pinctrl_of_match),
+ },
+ .probe = armada_375_pinctrl_probe,
+ .remove = armada_375_pinctrl_remove,
+};
+
+module_platform_driver(armada_375_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada 375 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
new file mode 100644
index 000000000000..1049f82fb62f
--- /dev/null
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
@@ -0,0 +1,462 @@
+/*
+ * Marvell Armada 380/385 pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+static void __iomem *mpp_base;
+
+static int armada_38x_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_38x_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
+enum {
+ V_88F6810 = BIT(0),
+ V_88F6820 = BIT(1),
+ V_88F6828 = BIT(2),
+ V_88F6810_PLUS = (V_88F6810 | V_88F6820 | V_88F6828),
+ V_88F6820_PLUS = (V_88F6820 | V_88F6828),
+};
+
+static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ua0", "rxd", V_88F6810_PLUS)),
+ MPP_MODE(1,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ua0", "txd", V_88F6810_PLUS)),
+ MPP_MODE(2,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "i2c0", "sck", V_88F6810_PLUS)),
+ MPP_MODE(3,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "i2c0", "sda", V_88F6810_PLUS)),
+ MPP_MODE(4,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge", "mdc", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ua1", "txd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "rts", V_88F6810_PLUS)),
+ MPP_MODE(5,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge", "mdio", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ua1", "rxd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "cts", V_88F6810_PLUS)),
+ MPP_MODE(6,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txclkout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge0", "crs", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "cs3", V_88F6810_PLUS)),
+ MPP_MODE(7,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txd0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad9", V_88F6810_PLUS)),
+ MPP_MODE(8,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txd1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad10", V_88F6810_PLUS)),
+ MPP_MODE(9,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txd2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad11", V_88F6810_PLUS)),
+ MPP_MODE(10,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txd3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad12", V_88F6810_PLUS)),
+ MPP_MODE(11,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txctl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad13", V_88F6810_PLUS)),
+ MPP_MODE(12,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxd0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "cs1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad14", V_88F6810_PLUS)),
+ MPP_MODE(13,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxd1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie0", "clkreq", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "clkreq", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "cs2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad15", V_88F6810_PLUS)),
+ MPP_MODE(14,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxd2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ptp", "clk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "m", "vtt_ctrl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "cs3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "wen1", V_88F6810_PLUS)),
+ MPP_MODE(15,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxd3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge", "mdc slave", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "mosi", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "pcie1", "rstout", V_88F6820_PLUS)),
+ MPP_MODE(16,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxctl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge", "mdio slave", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "m", "decc_err", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "miso", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "pcie0", "clkreq", V_88F6810_PLUS)),
+ MPP_MODE(17,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ptp", "clk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua1", "rxd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "sck", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sata1", "prsnt", V_88F6810_PLUS)),
+ MPP_MODE(18,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxerr", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ptp", "trig_gen", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua1", "txd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "cs0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "pcie1", "rstout", V_88F6820_PLUS)),
+ MPP_MODE(19,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "col", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ptp", "event_req", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie0", "clkreq", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sata1", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "ua0", "cts", V_88F6810_PLUS)),
+ MPP_MODE(20,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ptp", "clk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "ua0", "rts", V_88F6810_PLUS)),
+ MPP_MODE(21,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "cs1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "rxd0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "cmd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "bootcs", V_88F6810_PLUS)),
+ MPP_MODE(22,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "mosi", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad0", V_88F6810_PLUS)),
+ MPP_MODE(23,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "sck", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad2", V_88F6810_PLUS)),
+ MPP_MODE(24,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "miso", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ua0", "cts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua1", "rxd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d4", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ready", V_88F6810_PLUS)),
+ MPP_MODE(25,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "cs0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ua0", "rts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua1", "txd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d5", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "cs0", V_88F6810_PLUS)),
+ MPP_MODE(26,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "cs2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "i2c1", "sck", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d6", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "cs1", V_88F6810_PLUS)),
+ MPP_MODE(27,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "cs3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "txclkout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "i2c1", "sda", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d7", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "cs2", V_88F6810_PLUS)),
+ MPP_MODE(28,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "txd0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "clk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad5", V_88F6810_PLUS)),
+ MPP_MODE(29,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "txd1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ale0", V_88F6810_PLUS)),
+ MPP_MODE(30,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "txd2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "oen", V_88F6810_PLUS)),
+ MPP_MODE(31,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "txd3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ale1", V_88F6810_PLUS)),
+ MPP_MODE(32,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "txctl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "wen0", V_88F6810_PLUS)),
+ MPP_MODE(33,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "m", "decc_err", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad3", V_88F6810_PLUS)),
+ MPP_MODE(34,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad1", V_88F6810_PLUS)),
+ MPP_MODE(35,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ref", "clk_out1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "a1", V_88F6810_PLUS)),
+ MPP_MODE(36,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ptp", "trig_gen", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "a0", V_88F6810_PLUS)),
+ MPP_MODE(37,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ptp", "clk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "rxclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad8", V_88F6810_PLUS)),
+ MPP_MODE(38,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ptp", "event_req", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "rxd1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ref", "clk_out0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad4", V_88F6810_PLUS)),
+ MPP_MODE(39,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "i2c1", "sck", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "rxd2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "cts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "a2", V_88F6810_PLUS)),
+ MPP_MODE(40,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "i2c1", "sda", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "rxd3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "rts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad6", V_88F6810_PLUS)),
+ MPP_MODE(41,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ua1", "rxd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "rxctl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "cts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "cs3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "burst/last", V_88F6810_PLUS)),
+ MPP_MODE(42,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ua1", "txd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "rts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad7", V_88F6810_PLUS)),
+ MPP_MODE(43,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie0", "clkreq", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "m", "vtt_ctrl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "m", "decc_err", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "clkout", V_88F6810_PLUS)),
+ MPP_MODE(44,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "sata1", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "sata2", "prsnt", V_88F6828),
+ MPP_VAR_FUNCTION(4, "sata3", "prsnt", V_88F6828),
+ MPP_VAR_FUNCTION(5, "pcie0", "rstout", V_88F6810_PLUS)),
+ MPP_MODE(45,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ref", "clk_out0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "pcie2", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "pcie3", "rstout", V_88F6810_PLUS)),
+ MPP_MODE(46,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ref", "clk_out1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "pcie2", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "pcie3", "rstout", V_88F6810_PLUS)),
+ MPP_MODE(47,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "sata1", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "sata2", "prsnt", V_88F6828),
+ MPP_VAR_FUNCTION(4, "spi1", "cs2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sata3", "prsnt", V_88F6828)),
+ MPP_MODE(48,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "m", "vtt_ctrl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm2c", "pclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "audio", "mclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d4", V_88F6810_PLUS)),
+ MPP_MODE(49,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "sata2", "prsnt", V_88F6828),
+ MPP_VAR_FUNCTION(2, "sata3", "prsnt", V_88F6828),
+ MPP_VAR_FUNCTION(3, "tdm2c", "fsync", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "audio", "lrclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d5", V_88F6810_PLUS)),
+ MPP_MODE(50,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm2c", "drx", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "audio", "extclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "cmd", V_88F6810_PLUS)),
+ MPP_MODE(51,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm2c", "dtx", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "audio", "sdo", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "m", "decc_err", V_88F6810_PLUS)),
+ MPP_MODE(52,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm2c", "intn", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "audio", "sdi", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d6", V_88F6810_PLUS)),
+ MPP_MODE(53,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "sata1", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm2c", "rstn", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "audio", "bclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d7", V_88F6810_PLUS)),
+ MPP_MODE(54,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "sata1", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d3", V_88F6810_PLUS)),
+ MPP_MODE(55,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ua1", "cts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge", "mdio", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "clkreq", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "cs1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d0", V_88F6810_PLUS)),
+ MPP_MODE(56,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ua1", "rts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge", "mdc", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "m", "decc_err", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "mosi", V_88F6810_PLUS)),
+ MPP_MODE(57,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "sck", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "clk", V_88F6810_PLUS)),
+ MPP_MODE(58,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie1", "clkreq", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(2, "i2c1", "sck", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie2", "clkreq", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "miso", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d1", V_88F6810_PLUS)),
+ MPP_MODE(59,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "i2c1", "sda", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "cs0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d2", V_88F6810_PLUS)),
+};
+
+static struct mvebu_pinctrl_soc_info armada_38x_pinctrl_info;
+
+static struct of_device_id armada_38x_pinctrl_of_match[] = {
+ {
+ .compatible = "marvell,mv88f6810-pinctrl",
+ .data = (void *) V_88F6810,
+ },
+ {
+ .compatible = "marvell,mv88f6820-pinctrl",
+ .data = (void *) V_88F6820,
+ },
+ {
+ .compatible = "marvell,mv88f6828-pinctrl",
+ .data = (void *) V_88F6828,
+ },
+ { },
+};
+
+static struct mvebu_mpp_ctrl armada_38x_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 59, NULL, armada_38x_mpp_ctrl),
+};
+
+static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+ MPP_GPIO_RANGE(1, 32, 32, 27),
+};
+
+static int armada_38x_pinctrl_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc = &armada_38x_pinctrl_info;
+ const struct of_device_id *match =
+ of_match_device(armada_38x_pinctrl_of_match, &pdev->dev);
+ struct resource *res;
+
+ if (!match)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
+
+ soc->variant = (unsigned) match->data & 0xff;
+ soc->controls = armada_38x_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(armada_38x_mpp_controls);
+ soc->gpioranges = armada_38x_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(armada_38x_mpp_gpio_ranges);
+ soc->modes = armada_38x_mpp_modes;
+ soc->nmodes = armada_38x_mpp_controls[0].npins;
+
+ pdev->dev.platform_data = soc;
+
+ return mvebu_pinctrl_probe(pdev);
+}
+
+static int armada_38x_pinctrl_remove(struct platform_device *pdev)
+{
+ return mvebu_pinctrl_remove(pdev);
+}
+
+static struct platform_driver armada_38x_pinctrl_driver = {
+ .driver = {
+ .name = "armada-38x-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(armada_38x_pinctrl_of_match),
+ },
+ .probe = armada_38x_pinctrl_probe,
+ .remove = armada_38x_pinctrl_remove,
+};
+
+module_platform_driver(armada_38x_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada 38x pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index 843a51f9d129..de311129f7a0 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -33,6 +33,18 @@
#include "pinctrl-mvebu.h"
+static void __iomem *mpp_base;
+
+static int armada_xp_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_xp_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
enum armada_xp_variant {
V_MV78230 = BIT(0),
V_MV78260 = BIT(1),
@@ -366,7 +378,7 @@ static struct of_device_id armada_xp_pinctrl_of_match[] = {
};
static struct mvebu_mpp_ctrl mv78230_mpp_controls[] = {
- MPP_REG_CTRL(0, 48),
+ MPP_FUNC_CTRL(0, 48, NULL, armada_xp_mpp_ctrl),
};
static struct pinctrl_gpio_range mv78230_mpp_gpio_ranges[] = {
@@ -375,7 +387,7 @@ static struct pinctrl_gpio_range mv78230_mpp_gpio_ranges[] = {
};
static struct mvebu_mpp_ctrl mv78260_mpp_controls[] = {
- MPP_REG_CTRL(0, 66),
+ MPP_FUNC_CTRL(0, 66, NULL, armada_xp_mpp_ctrl),
};
static struct pinctrl_gpio_range mv78260_mpp_gpio_ranges[] = {
@@ -385,7 +397,7 @@ static struct pinctrl_gpio_range mv78260_mpp_gpio_ranges[] = {
};
static struct mvebu_mpp_ctrl mv78460_mpp_controls[] = {
- MPP_REG_CTRL(0, 66),
+ MPP_FUNC_CTRL(0, 66, NULL, armada_xp_mpp_ctrl),
};
static struct pinctrl_gpio_range mv78460_mpp_gpio_ranges[] = {
@@ -399,10 +411,16 @@ static int armada_xp_pinctrl_probe(struct platform_device *pdev)
struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info;
const struct of_device_id *match =
of_match_device(armada_xp_pinctrl_of_match, &pdev->dev);
+ struct resource *res;
if (!match)
return -ENODEV;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
+
soc->variant = (unsigned) match->data & 0xff;
switch (soc->variant) {
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index 47268393af34..3b022178a566 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -18,107 +18,122 @@
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
#include "pinctrl-mvebu.h"
-#define DOVE_SB_REGS_VIRT_BASE IOMEM(0xfde00000)
-#define DOVE_MPP_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xd0200)
-#define DOVE_PMU_MPP_GENERAL_CTRL (DOVE_MPP_VIRT_BASE + 0x10)
-#define DOVE_AU0_AC97_SEL BIT(16)
-#define DOVE_PMU_SIGNAL_SELECT_0 (DOVE_SB_REGS_VIRT_BASE + 0xd802C)
-#define DOVE_PMU_SIGNAL_SELECT_1 (DOVE_SB_REGS_VIRT_BASE + 0xd8030)
-#define DOVE_GLOBAL_CONFIG_1 (DOVE_SB_REGS_VIRT_BASE + 0xe802C)
-#define DOVE_GLOBAL_CONFIG_1 (DOVE_SB_REGS_VIRT_BASE + 0xe802C)
-#define DOVE_TWSI_ENABLE_OPTION1 BIT(7)
-#define DOVE_GLOBAL_CONFIG_2 (DOVE_SB_REGS_VIRT_BASE + 0xe8030)
-#define DOVE_TWSI_ENABLE_OPTION2 BIT(20)
-#define DOVE_TWSI_ENABLE_OPTION3 BIT(21)
-#define DOVE_TWSI_OPTION3_GPIO BIT(22)
-#define DOVE_SSP_CTRL_STATUS_1 (DOVE_SB_REGS_VIRT_BASE + 0xe8034)
-#define DOVE_SSP_ON_AU1 BIT(0)
-#define DOVE_MPP_GENERAL_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xe803c)
-#define DOVE_AU1_SPDIFO_GPIO_EN BIT(1)
-#define DOVE_NAND_GPIO_EN BIT(0)
-#define DOVE_GPIO_LO_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xd0400)
-#define DOVE_MPP_CTRL4_VIRT_BASE (DOVE_GPIO_LO_VIRT_BASE + 0x40)
-#define DOVE_SPI_GPIO_SEL BIT(5)
-#define DOVE_UART1_GPIO_SEL BIT(4)
-#define DOVE_AU1_GPIO_SEL BIT(3)
-#define DOVE_CAM_GPIO_SEL BIT(2)
-#define DOVE_SD1_GPIO_SEL BIT(1)
-#define DOVE_SD0_GPIO_SEL BIT(0)
-
-#define MPPS_PER_REG 8
-#define MPP_BITS 4
-#define MPP_MASK 0xf
+/* Internal registers can be configured at any 1 MiB aligned address */
+#define INT_REGS_MASK ~(SZ_1M - 1)
+#define MPP4_REGS_OFFS 0xd0440
+#define PMU_REGS_OFFS 0xd802c
+#define GC_REGS_OFFS 0xe802c
+
+/* MPP Base registers */
+#define PMU_MPP_GENERAL_CTRL 0x10
+#define AU0_AC97_SEL BIT(16)
+
+/* MPP Control 4 register */
+#define SPI_GPIO_SEL BIT(5)
+#define UART1_GPIO_SEL BIT(4)
+#define AU1_GPIO_SEL BIT(3)
+#define CAM_GPIO_SEL BIT(2)
+#define SD1_GPIO_SEL BIT(1)
+#define SD0_GPIO_SEL BIT(0)
+
+/* PMU Signal Select registers */
+#define PMU_SIGNAL_SELECT_0 0x00
+#define PMU_SIGNAL_SELECT_1 0x04
+
+/* Global Config regmap registers */
+#define GLOBAL_CONFIG_1 0x00
+#define TWSI_ENABLE_OPTION1 BIT(7)
+#define GLOBAL_CONFIG_2 0x04
+#define TWSI_ENABLE_OPTION2 BIT(20)
+#define TWSI_ENABLE_OPTION3 BIT(21)
+#define TWSI_OPTION3_GPIO BIT(22)
+#define SSP_CTRL_STATUS_1 0x08
+#define SSP_ON_AU1 BIT(0)
+#define MPP_GENERAL_CONFIG 0x10
+#define AU1_SPDIFO_GPIO_EN BIT(1)
+#define NAND_GPIO_EN BIT(0)
#define CONFIG_PMU BIT(4)
-static int dove_pmu_mpp_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
- unsigned long *config)
+static void __iomem *mpp_base;
+static void __iomem *mpp4_base;
+static void __iomem *pmu_base;
+static struct regmap *gconfmap;
+
+static int dove_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int dove_mpp_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned off = (ctrl->pid / MPPS_PER_REG) * MPP_BITS;
- unsigned shift = (ctrl->pid % MPPS_PER_REG) * MPP_BITS;
- unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
+static int dove_pmu_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
unsigned long func;
- if (pmu & (1 << ctrl->pid)) {
- func = readl(DOVE_PMU_SIGNAL_SELECT_0 + off);
- *config = (func >> shift) & MPP_MASK;
- *config |= CONFIG_PMU;
- } else {
- func = readl(DOVE_MPP_VIRT_BASE + off);
- *config = (func >> shift) & MPP_MASK;
- }
+ if ((pmu & BIT(pid)) == 0)
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+
+ func = readl(pmu_base + PMU_SIGNAL_SELECT_0 + off);
+ *config = (func >> shift) & MVEBU_MPP_MASK;
+ *config |= CONFIG_PMU;
+
return 0;
}
-static int dove_pmu_mpp_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
- unsigned long config)
+static int dove_pmu_mpp_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned off = (ctrl->pid / MPPS_PER_REG) * MPP_BITS;
- unsigned shift = (ctrl->pid % MPPS_PER_REG) * MPP_BITS;
- unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
unsigned long func;
- if (config & CONFIG_PMU) {
- writel(pmu | (1 << ctrl->pid), DOVE_PMU_MPP_GENERAL_CTRL);
- func = readl(DOVE_PMU_SIGNAL_SELECT_0 + off);
- func &= ~(MPP_MASK << shift);
- func |= (config & MPP_MASK) << shift;
- writel(func, DOVE_PMU_SIGNAL_SELECT_0 + off);
- } else {
- writel(pmu & ~(1 << ctrl->pid), DOVE_PMU_MPP_GENERAL_CTRL);
- func = readl(DOVE_MPP_VIRT_BASE + off);
- func &= ~(MPP_MASK << shift);
- func |= (config & MPP_MASK) << shift;
- writel(func, DOVE_MPP_VIRT_BASE + off);
+ if ((config & CONFIG_PMU) == 0) {
+ writel(pmu & ~BIT(pid), mpp_base + PMU_MPP_GENERAL_CTRL);
+ return default_mpp_ctrl_set(mpp_base, pid, config);
}
+
+ writel(pmu | BIT(pid), mpp_base + PMU_MPP_GENERAL_CTRL);
+ func = readl(pmu_base + PMU_SIGNAL_SELECT_0 + off);
+ func &= ~(MVEBU_MPP_MASK << shift);
+ func |= (config & MVEBU_MPP_MASK) << shift;
+ writel(func, pmu_base + PMU_SIGNAL_SELECT_0 + off);
+
return 0;
}
-static int dove_mpp4_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
- unsigned long *config)
+static int dove_mpp4_ctrl_get(unsigned pid, unsigned long *config)
{
- unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
+ unsigned long mpp4 = readl(mpp4_base);
unsigned long mask;
- switch (ctrl->pid) {
+ switch (pid) {
case 24: /* mpp_camera */
- mask = DOVE_CAM_GPIO_SEL;
+ mask = CAM_GPIO_SEL;
break;
case 40: /* mpp_sdio0 */
- mask = DOVE_SD0_GPIO_SEL;
+ mask = SD0_GPIO_SEL;
break;
case 46: /* mpp_sdio1 */
- mask = DOVE_SD1_GPIO_SEL;
+ mask = SD1_GPIO_SEL;
break;
case 58: /* mpp_spi0 */
- mask = DOVE_SPI_GPIO_SEL;
+ mask = SPI_GPIO_SEL;
break;
case 62: /* mpp_uart1 */
- mask = DOVE_UART1_GPIO_SEL;
+ mask = UART1_GPIO_SEL;
break;
default:
return -EINVAL;
@@ -129,27 +144,26 @@ static int dove_mpp4_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
return 0;
}
-static int dove_mpp4_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
- unsigned long config)
+static int dove_mpp4_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
+ unsigned long mpp4 = readl(mpp4_base);
unsigned long mask;
- switch (ctrl->pid) {
+ switch (pid) {
case 24: /* mpp_camera */
- mask = DOVE_CAM_GPIO_SEL;
+ mask = CAM_GPIO_SEL;
break;
case 40: /* mpp_sdio0 */
- mask = DOVE_SD0_GPIO_SEL;
+ mask = SD0_GPIO_SEL;
break;
case 46: /* mpp_sdio1 */
- mask = DOVE_SD1_GPIO_SEL;
+ mask = SD1_GPIO_SEL;
break;
case 58: /* mpp_spi0 */
- mask = DOVE_SPI_GPIO_SEL;
+ mask = SPI_GPIO_SEL;
break;
case 62: /* mpp_uart1 */
- mask = DOVE_UART1_GPIO_SEL;
+ mask = UART1_GPIO_SEL;
break;
default:
return -EINVAL;
@@ -159,74 +173,69 @@ static int dove_mpp4_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
if (config)
mpp4 |= mask;
- writel(mpp4, DOVE_MPP_CTRL4_VIRT_BASE);
+ writel(mpp4, mpp4_base);
return 0;
}
-static int dove_nand_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
- unsigned long *config)
+static int dove_nand_ctrl_get(unsigned pid, unsigned long *config)
{
- unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
+ unsigned int gmpp;
- *config = ((gmpp & DOVE_NAND_GPIO_EN) != 0);
+ regmap_read(gconfmap, MPP_GENERAL_CONFIG, &gmpp);
+ *config = ((gmpp & NAND_GPIO_EN) != 0);
return 0;
}
-static int dove_nand_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
- unsigned long config)
+static int dove_nand_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
-
- gmpp &= ~DOVE_NAND_GPIO_EN;
- if (config)
- gmpp |= DOVE_NAND_GPIO_EN;
-
- writel(gmpp, DOVE_MPP_GENERAL_VIRT_BASE);
-
+ regmap_update_bits(gconfmap, MPP_GENERAL_CONFIG,
+ NAND_GPIO_EN,
+ (config) ? NAND_GPIO_EN : 0);
return 0;
}
-static int dove_audio0_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
- unsigned long *config)
+static int dove_audio0_ctrl_get(unsigned pid, unsigned long *config)
{
- unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+ unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
- *config = ((pmu & DOVE_AU0_AC97_SEL) != 0);
+ *config = ((pmu & AU0_AC97_SEL) != 0);
return 0;
}
-static int dove_audio0_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
- unsigned long config)
+static int dove_audio0_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+ unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
- pmu &= ~DOVE_AU0_AC97_SEL;
+ pmu &= ~AU0_AC97_SEL;
if (config)
- pmu |= DOVE_AU0_AC97_SEL;
- writel(pmu, DOVE_PMU_MPP_GENERAL_CTRL);
+ pmu |= AU0_AC97_SEL;
+ writel(pmu, mpp_base + PMU_MPP_GENERAL_CTRL);
return 0;
}
-static int dove_audio1_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
- unsigned long *config)
+static int dove_audio1_ctrl_get(unsigned pid, unsigned long *config)
{
- unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
- unsigned long sspc1 = readl(DOVE_SSP_CTRL_STATUS_1);
- unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
- unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+ unsigned int mpp4 = readl(mpp4_base);
+ unsigned int sspc1;
+ unsigned int gmpp;
+ unsigned int gcfg2;
+
+ regmap_read(gconfmap, SSP_CTRL_STATUS_1, &sspc1);
+ regmap_read(gconfmap, MPP_GENERAL_CONFIG, &gmpp);
+ regmap_read(gconfmap, GLOBAL_CONFIG_2, &gcfg2);
*config = 0;
- if (mpp4 & DOVE_AU1_GPIO_SEL)
+ if (mpp4 & AU1_GPIO_SEL)
*config |= BIT(3);
- if (sspc1 & DOVE_SSP_ON_AU1)
+ if (sspc1 & SSP_ON_AU1)
*config |= BIT(2);
- if (gmpp & DOVE_AU1_SPDIFO_GPIO_EN)
+ if (gmpp & AU1_SPDIFO_GPIO_EN)
*config |= BIT(1);
- if (gcfg2 & DOVE_TWSI_OPTION3_GPIO)
+ if (gcfg2 & TWSI_OPTION3_GPIO)
*config |= BIT(0);
/* SSP/TWSI only if I2S1 not set*/
@@ -238,35 +247,24 @@ static int dove_audio1_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
return 0;
}
-static int dove_audio1_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
- unsigned long config)
+static int dove_audio1_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
- unsigned long sspc1 = readl(DOVE_SSP_CTRL_STATUS_1);
- unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
- unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+ unsigned int mpp4 = readl(mpp4_base);
- /*
- * clear all audio1 related bits before configure
- */
- gcfg2 &= ~DOVE_TWSI_OPTION3_GPIO;
- gmpp &= ~DOVE_AU1_SPDIFO_GPIO_EN;
- sspc1 &= ~DOVE_SSP_ON_AU1;
- mpp4 &= ~DOVE_AU1_GPIO_SEL;
-
- if (config & BIT(0))
- gcfg2 |= DOVE_TWSI_OPTION3_GPIO;
- if (config & BIT(1))
- gmpp |= DOVE_AU1_SPDIFO_GPIO_EN;
- if (config & BIT(2))
- sspc1 |= DOVE_SSP_ON_AU1;
+ mpp4 &= ~AU1_GPIO_SEL;
if (config & BIT(3))
- mpp4 |= DOVE_AU1_GPIO_SEL;
-
- writel(mpp4, DOVE_MPP_CTRL4_VIRT_BASE);
- writel(sspc1, DOVE_SSP_CTRL_STATUS_1);
- writel(gmpp, DOVE_MPP_GENERAL_VIRT_BASE);
- writel(gcfg2, DOVE_GLOBAL_CONFIG_2);
+ mpp4 |= AU1_GPIO_SEL;
+ writel(mpp4, mpp4_base);
+
+ regmap_update_bits(gconfmap, SSP_CTRL_STATUS_1,
+ SSP_ON_AU1,
+ (config & BIT(2)) ? SSP_ON_AU1 : 0);
+ regmap_update_bits(gconfmap, MPP_GENERAL_CONFIG,
+ AU1_SPDIFO_GPIO_EN,
+ (config & BIT(1)) ? AU1_SPDIFO_GPIO_EN : 0);
+ regmap_update_bits(gconfmap, GLOBAL_CONFIG_2,
+ TWSI_OPTION3_GPIO,
+ (config & BIT(0)) ? TWSI_OPTION3_GPIO : 0);
return 0;
}
@@ -276,11 +274,11 @@ static int dove_audio1_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
* break other functions. If you require all mpps as gpio
* enforce gpio setting by pinctrl mapping.
*/
-static int dove_audio1_ctrl_gpio_req(struct mvebu_mpp_ctrl *ctrl, u8 pid)
+static int dove_audio1_ctrl_gpio_req(unsigned pid)
{
unsigned long config;
- dove_audio1_ctrl_get(ctrl, &config);
+ dove_audio1_ctrl_get(pid, &config);
switch (config) {
case 0x02: /* i2s1 : gpio[56:57] */
@@ -303,76 +301,62 @@ static int dove_audio1_ctrl_gpio_req(struct mvebu_mpp_ctrl *ctrl, u8 pid)
}
/* mpp[52:57] has gpio pins capable of in and out */
-static int dove_audio1_ctrl_gpio_dir(struct mvebu_mpp_ctrl *ctrl, u8 pid,
- bool input)
+static int dove_audio1_ctrl_gpio_dir(unsigned pid, bool input)
{
if (pid < 52 || pid > 57)
return -ENOTSUPP;
return 0;
}
-static int dove_twsi_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
- unsigned long *config)
+static int dove_twsi_ctrl_get(unsigned pid, unsigned long *config)
{
- unsigned long gcfg1 = readl(DOVE_GLOBAL_CONFIG_1);
- unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+ unsigned int gcfg1;
+ unsigned int gcfg2;
+
+ regmap_read(gconfmap, GLOBAL_CONFIG_1, &gcfg1);
+ regmap_read(gconfmap, GLOBAL_CONFIG_2, &gcfg2);
*config = 0;
- if (gcfg1 & DOVE_TWSI_ENABLE_OPTION1)
+ if (gcfg1 & TWSI_ENABLE_OPTION1)
*config = 1;
- else if (gcfg2 & DOVE_TWSI_ENABLE_OPTION2)
+ else if (gcfg2 & TWSI_ENABLE_OPTION2)
*config = 2;
- else if (gcfg2 & DOVE_TWSI_ENABLE_OPTION3)
+ else if (gcfg2 & TWSI_ENABLE_OPTION3)
*config = 3;
return 0;
}
-static int dove_twsi_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
- unsigned long config)
+static int dove_twsi_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned long gcfg1 = readl(DOVE_GLOBAL_CONFIG_1);
- unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
-
- gcfg1 &= ~DOVE_TWSI_ENABLE_OPTION1;
- gcfg2 &= ~(DOVE_TWSI_ENABLE_OPTION2 | DOVE_TWSI_ENABLE_OPTION3);
+ unsigned int gcfg1 = 0;
+ unsigned int gcfg2 = 0;
switch (config) {
case 1:
- gcfg1 |= DOVE_TWSI_ENABLE_OPTION1;
+ gcfg1 = TWSI_ENABLE_OPTION1;
break;
case 2:
- gcfg2 |= DOVE_TWSI_ENABLE_OPTION2;
+ gcfg2 = TWSI_ENABLE_OPTION2;
break;
case 3:
- gcfg2 |= DOVE_TWSI_ENABLE_OPTION3;
+ gcfg2 = TWSI_ENABLE_OPTION3;
break;
}
- writel(gcfg1, DOVE_GLOBAL_CONFIG_1);
- writel(gcfg2, DOVE_GLOBAL_CONFIG_2);
+ regmap_update_bits(gconfmap, GLOBAL_CONFIG_1,
+ TWSI_ENABLE_OPTION1,
+ gcfg1);
+ regmap_update_bits(gconfmap, GLOBAL_CONFIG_2,
+ TWSI_ENABLE_OPTION2 | TWSI_ENABLE_OPTION3,
+ gcfg2);
return 0;
}
static struct mvebu_mpp_ctrl dove_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 0, "mpp0", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(1, 1, "mpp1", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(2, 2, "mpp2", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(3, 3, "mpp3", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(4, 4, "mpp4", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(5, 5, "mpp5", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(6, 6, "mpp6", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(7, 7, "mpp7", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(8, 8, "mpp8", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(9, 9, "mpp9", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(10, 10, "mpp10", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(11, 11, "mpp11", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(12, 12, "mpp12", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(13, 13, "mpp13", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(14, 14, "mpp14", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(15, 15, "mpp15", dove_pmu_mpp_ctrl),
- MPP_REG_CTRL(16, 23),
+ MPP_FUNC_CTRL(0, 15, NULL, dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(16, 23, NULL, dove_mpp_ctrl),
MPP_FUNC_CTRL(24, 39, "mpp_camera", dove_mpp4_ctrl),
MPP_FUNC_CTRL(40, 45, "mpp_sdio0", dove_mpp4_ctrl),
MPP_FUNC_CTRL(46, 51, "mpp_sdio1", dove_mpp4_ctrl),
@@ -772,8 +756,17 @@ static struct of_device_id dove_pinctrl_of_match[] = {
{ }
};
+static struct regmap_config gc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 5,
+};
+
static int dove_pinctrl_probe(struct platform_device *pdev)
{
+ struct resource *res, *mpp_res;
+ struct resource fb_res;
const struct of_device_id *match =
of_match_device(dove_pinctrl_of_match, &pdev->dev);
pdev->dev.platform_data = (void *)match->data;
@@ -789,6 +782,59 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
}
clk_prepare_enable(clk);
+ mpp_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, mpp_res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
+
+ /* prepare fallback resource */
+ memcpy(&fb_res, mpp_res, sizeof(struct resource));
+ fb_res.start = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_warn(&pdev->dev, "falling back to hardcoded MPP4 resource\n");
+ adjust_resource(&fb_res,
+ (mpp_res->start & INT_REGS_MASK) + MPP4_REGS_OFFS, 0x4);
+ res = &fb_res;
+ }
+
+ mpp4_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp4_base))
+ return PTR_ERR(mpp4_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ dev_warn(&pdev->dev, "falling back to hardcoded PMU resource\n");
+ adjust_resource(&fb_res,
+ (mpp_res->start & INT_REGS_MASK) + PMU_REGS_OFFS, 0x8);
+ res = &fb_res;
+ }
+
+ pmu_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmu_base))
+ return PTR_ERR(pmu_base);
+
+ gconfmap = syscon_regmap_lookup_by_compatible("marvell,dove-global-config");
+ if (IS_ERR(gconfmap)) {
+ void __iomem *gc_base;
+
+ dev_warn(&pdev->dev, "falling back to hardcoded global registers\n");
+ adjust_resource(&fb_res,
+ (mpp_res->start & INT_REGS_MASK) + GC_REGS_OFFS, 0x14);
+ gc_base = devm_ioremap_resource(&pdev->dev, &fb_res);
+ if (IS_ERR(gc_base))
+ return PTR_ERR(gc_base);
+ gconfmap = devm_regmap_init_mmio(&pdev->dev,
+ gc_base, &gc_regmap_config);
+ if (IS_ERR(gconfmap))
+ return PTR_ERR(gconfmap);
+ }
+
+ /* Warn on any missing DT resource */
+ if (fb_res.start)
+ dev_warn(&pdev->dev, FW_BUG "Missing pinctrl regs in DTB. Please update your firmware.\n");
+
return mvebu_pinctrl_probe(pdev);
}
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index 6b504b5935a5..0d0211a1a0b0 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -21,6 +21,18 @@
#include "pinctrl-mvebu.h"
+static void __iomem *mpp_base;
+
+static int kirkwood_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int kirkwood_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
#define V(f6180, f6190, f6192, f6281, f6282, dx4122) \
((f6180 << 0) | (f6190 << 1) | (f6192 << 2) | \
(f6281 << 3) | (f6282 << 4) | (dx4122 << 5))
@@ -359,7 +371,7 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
};
static struct mvebu_mpp_ctrl mv88f6180_mpp_controls[] = {
- MPP_REG_CTRL(0, 29),
+ MPP_FUNC_CTRL(0, 29, NULL, kirkwood_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = {
@@ -367,7 +379,7 @@ static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = {
};
static struct mvebu_mpp_ctrl mv88f619x_mpp_controls[] = {
- MPP_REG_CTRL(0, 35),
+ MPP_FUNC_CTRL(0, 35, NULL, kirkwood_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f619x_gpio_ranges[] = {
@@ -376,7 +388,7 @@ static struct pinctrl_gpio_range mv88f619x_gpio_ranges[] = {
};
static struct mvebu_mpp_ctrl mv88f628x_mpp_controls[] = {
- MPP_REG_CTRL(0, 49),
+ MPP_FUNC_CTRL(0, 49, NULL, kirkwood_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f628x_gpio_ranges[] = {
@@ -456,9 +468,16 @@ static struct of_device_id kirkwood_pinctrl_of_match[] = {
static int kirkwood_pinctrl_probe(struct platform_device *pdev)
{
+ struct resource *res;
const struct of_device_id *match =
of_match_device(kirkwood_pinctrl_of_match, &pdev->dev);
pdev->dev.platform_data = (void *)match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
+
return mvebu_pinctrl_probe(pdev);
}
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index 0fd1ad31fbf9..9908374f8f92 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -50,7 +50,6 @@ struct mvebu_pinctrl {
struct device *dev;
struct pinctrl_dev *pctldev;
struct pinctrl_desc desc;
- void __iomem *base;
struct mvebu_pinctrl_group *groups;
unsigned num_groups;
struct mvebu_pinctrl_function *functions;
@@ -138,43 +137,6 @@ static struct mvebu_pinctrl_function *mvebu_pinctrl_find_function_by_name(
return NULL;
}
-/*
- * Common mpp pin configuration registers on MVEBU are
- * registers of eight 4-bit values for each mpp setting.
- * Register offset and bit mask are calculated accordingly below.
- */
-static int mvebu_common_mpp_get(struct mvebu_pinctrl *pctl,
- struct mvebu_pinctrl_group *grp,
- unsigned long *config)
-{
- unsigned pin = grp->gid;
- unsigned off = (pin / MPPS_PER_REG) * MPP_BITS;
- unsigned shift = (pin % MPPS_PER_REG) * MPP_BITS;
-
- *config = readl(pctl->base + off);
- *config >>= shift;
- *config &= MPP_MASK;
-
- return 0;
-}
-
-static int mvebu_common_mpp_set(struct mvebu_pinctrl *pctl,
- struct mvebu_pinctrl_group *grp,
- unsigned long config)
-{
- unsigned pin = grp->gid;
- unsigned off = (pin / MPPS_PER_REG) * MPP_BITS;
- unsigned shift = (pin % MPPS_PER_REG) * MPP_BITS;
- unsigned long reg;
-
- reg = readl(pctl->base + off);
- reg &= ~(MPP_MASK << shift);
- reg |= (config << shift);
- writel(reg, pctl->base + off);
-
- return 0;
-}
-
static int mvebu_pinconf_group_get(struct pinctrl_dev *pctldev,
unsigned gid, unsigned long *config)
{
@@ -184,10 +146,7 @@ static int mvebu_pinconf_group_get(struct pinctrl_dev *pctldev,
if (!grp->ctrl)
return -EINVAL;
- if (grp->ctrl->mpp_get)
- return grp->ctrl->mpp_get(grp->ctrl, config);
-
- return mvebu_common_mpp_get(pctl, grp, config);
+ return grp->ctrl->mpp_get(grp->pins[0], config);
}
static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev,
@@ -202,11 +161,7 @@ static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev,
return -EINVAL;
for (i = 0; i < num_configs; i++) {
- if (grp->ctrl->mpp_set)
- ret = grp->ctrl->mpp_set(grp->ctrl, configs[i]);
- else
- ret = mvebu_common_mpp_set(pctl, grp, configs[i]);
-
+ ret = grp->ctrl->mpp_set(grp->pins[0], configs[i]);
if (ret)
return ret;
} /* for each config */
@@ -347,7 +302,7 @@ static int mvebu_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
return -EINVAL;
if (grp->ctrl->mpp_gpio_req)
- return grp->ctrl->mpp_gpio_req(grp->ctrl, offset);
+ return grp->ctrl->mpp_gpio_req(offset);
setting = mvebu_pinctrl_find_gpio_setting(pctl, grp);
if (!setting)
@@ -370,7 +325,7 @@ static int mvebu_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
return -EINVAL;
if (grp->ctrl->mpp_gpio_dir)
- return grp->ctrl->mpp_gpio_dir(grp->ctrl, offset, input);
+ return grp->ctrl->mpp_gpio_dir(offset, input);
setting = mvebu_pinctrl_find_gpio_setting(pctl, grp);
if (!setting)
@@ -593,11 +548,12 @@ static int mvebu_pinctrl_build_functions(struct platform_device *pdev,
int mvebu_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
- struct resource *res;
struct mvebu_pinctrl *pctl;
- void __iomem *base;
struct pinctrl_pin_desc *pdesc;
unsigned gid, n, k;
+ unsigned size, noname = 0;
+ char *noname_buf;
+ void *p;
int ret;
if (!soc || !soc->controls || !soc->modes) {
@@ -605,11 +561,6 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
pctl = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pinctrl),
GFP_KERNEL);
if (!pctl) {
@@ -623,7 +574,6 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pctl->desc.pmxops = &mvebu_pinmux_ops;
pctl->desc.confops = &mvebu_pinconf_ops;
pctl->variant = soc->variant;
- pctl->base = base;
pctl->dev = &pdev->dev;
platform_set_drvdata(pdev, pctl);
@@ -633,33 +583,23 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pctl->desc.npins = 0;
for (n = 0; n < soc->ncontrols; n++) {
struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
- char *names;
pctl->desc.npins += ctrl->npins;
- /* initial control pins */
+ /* initialize control's pins[] array */
for (k = 0; k < ctrl->npins; k++)
ctrl->pins[k] = ctrl->pid + k;
- /* special soc specific control */
- if (ctrl->mpp_get || ctrl->mpp_set) {
- if (!ctrl->name || !ctrl->mpp_get || !ctrl->mpp_set) {
- dev_err(&pdev->dev, "wrong soc control info\n");
- return -EINVAL;
- }
+ /*
+ * We allow to pass controls with NULL name that we treat
+ * as a range of one-pin groups with generic mvebu register
+ * controls.
+ */
+ if (!ctrl->name) {
+ pctl->num_groups += ctrl->npins;
+ noname += ctrl->npins;
+ } else {
pctl->num_groups += 1;
- continue;
}
-
- /* generic mvebu register control */
- names = devm_kzalloc(&pdev->dev, ctrl->npins * 8, GFP_KERNEL);
- if (!names) {
- dev_err(&pdev->dev, "failed to alloc mpp names\n");
- return -ENOMEM;
- }
- for (k = 0; k < ctrl->npins; k++)
- sprintf(names + 8*k, "mpp%d", ctrl->pid+k);
- ctrl->name = names;
- pctl->num_groups += ctrl->npins;
}
pdesc = devm_kzalloc(&pdev->dev, pctl->desc.npins *
@@ -673,12 +613,17 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pdesc[n].number = n;
pctl->desc.pins = pdesc;
- pctl->groups = devm_kzalloc(&pdev->dev, pctl->num_groups *
- sizeof(struct mvebu_pinctrl_group), GFP_KERNEL);
- if (!pctl->groups) {
- dev_err(&pdev->dev, "failed to alloc pinctrl groups\n");
+ /*
+ * allocate groups and name buffers for unnamed groups.
+ */
+ size = pctl->num_groups * sizeof(*pctl->groups) + noname * 8;
+ p = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!p) {
+ dev_err(&pdev->dev, "failed to alloc group data\n");
return -ENOMEM;
}
+ pctl->groups = p;
+ noname_buf = p + pctl->num_groups * sizeof(*pctl->groups);
/* assign mpp controls to groups */
gid = 0;
@@ -690,17 +635,26 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pctl->groups[gid].pins = ctrl->pins;
pctl->groups[gid].npins = ctrl->npins;
- /* generic mvebu register control maps to a number of groups */
- if (!ctrl->mpp_get && !ctrl->mpp_set) {
+ /*
+ * We treat unnamed controls as a range of one-pin groups
+ * with generic mvebu register controls. Use one group for
+ * each in this range and assign a default group name.
+ */
+ if (!ctrl->name) {
+ pctl->groups[gid].name = noname_buf;
pctl->groups[gid].npins = 1;
+ sprintf(noname_buf, "mpp%d", ctrl->pid+0);
+ noname_buf += 8;
for (k = 1; k < ctrl->npins; k++) {
gid++;
pctl->groups[gid].gid = gid;
pctl->groups[gid].ctrl = ctrl;
- pctl->groups[gid].name = &ctrl->name[8*k];
+ pctl->groups[gid].name = noname_buf;
pctl->groups[gid].pins = &ctrl->pins[k];
pctl->groups[gid].npins = 1;
+ sprintf(noname_buf, "mpp%d", ctrl->pid+k);
+ noname_buf += 8;
}
}
gid++;
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.h b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
index 90bd3beee860..65a98e6f7265 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.h
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
@@ -28,20 +28,19 @@
* between two or more different settings, e.g. assign mpp pin 13 to
* uart1 or sata.
*
- * If optional mpp_get/_set functions are set these are used to get/set
- * a specific mode. Otherwise it is assumed that the mpp control is based
- * on 4-bit groups in subsequent registers. The optional mpp_gpio_req/_dir
- * functions can be used to allow pin settings with varying gpio pins.
+ * The mpp_get/_set functions are mandatory and are used to get/set a
+ * specific mode. The optional mpp_gpio_req/_dir functions can be used
+ * to allow pin settings with varying gpio pins.
*/
struct mvebu_mpp_ctrl {
const char *name;
u8 pid;
u8 npins;
unsigned *pins;
- int (*mpp_get)(struct mvebu_mpp_ctrl *ctrl, unsigned long *config);
- int (*mpp_set)(struct mvebu_mpp_ctrl *ctrl, unsigned long config);
- int (*mpp_gpio_req)(struct mvebu_mpp_ctrl *ctrl, u8 pid);
- int (*mpp_gpio_dir)(struct mvebu_mpp_ctrl *ctrl, u8 pid, bool input);
+ int (*mpp_get)(unsigned pid, unsigned long *config);
+ int (*mpp_set)(unsigned pid, unsigned long config);
+ int (*mpp_gpio_req)(unsigned pid);
+ int (*mpp_gpio_dir)(unsigned pid, bool input);
};
/**
@@ -114,18 +113,6 @@ struct mvebu_pinctrl_soc_info {
int ngpioranges;
};
-#define MPP_REG_CTRL(_idl, _idh) \
- { \
- .name = NULL, \
- .pid = _idl, \
- .npins = _idh - _idl + 1, \
- .pins = (unsigned[_idh - _idl + 1]) { }, \
- .mpp_get = NULL, \
- .mpp_set = NULL, \
- .mpp_gpio_req = NULL, \
- .mpp_gpio_dir = NULL, \
- }
-
#define MPP_FUNC_CTRL(_idl, _idh, _name, _func) \
{ \
.name = _name, \
@@ -186,6 +173,34 @@ struct mvebu_pinctrl_soc_info {
.npins = _npins, \
}
+#define MVEBU_MPPS_PER_REG 8
+#define MVEBU_MPP_BITS 4
+#define MVEBU_MPP_MASK 0xf
+
+static inline int default_mpp_ctrl_get(void __iomem *base, unsigned int pid,
+ unsigned long *config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+
+ *config = (readl(base + off) >> shift) & MVEBU_MPP_MASK;
+
+ return 0;
+}
+
+static inline int default_mpp_ctrl_set(void __iomem *base, unsigned int pid,
+ unsigned long config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned long reg;
+
+ reg = readl(base + off) & ~(MVEBU_MPP_MASK << shift);
+ writel(reg | (config << shift), base + off);
+
+ return 0;
+}
+
int mvebu_pinctrl_probe(struct platform_device *pdev);
int mvebu_pinctrl_remove(struct platform_device *pdev);
diff --git a/drivers/pinctrl/pinctrl-adi2-bf54x.c b/drivers/pinctrl/pinctrl-adi2-bf54x.c
index ea9d9ab9cda1..008a29e92e56 100644
--- a/drivers/pinctrl/pinctrl-adi2-bf54x.c
+++ b/drivers/pinctrl/pinctrl-adi2-bf54x.c
@@ -309,39 +309,6 @@ static const unsigned keys_8x8_pins[] = {
GPIO_PE4, GPIO_PE5, GPIO_PE6, GPIO_PE7,
};
-static const struct adi_pin_group adi_pin_groups[] = {
- ADI_PIN_GROUP("uart0grp", uart0_pins),
- ADI_PIN_GROUP("uart1grp", uart1_pins),
- ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins),
- ADI_PIN_GROUP("uart2grp", uart2_pins),
- ADI_PIN_GROUP("uart3grp", uart3_pins),
- ADI_PIN_GROUP("uart3ctsrtsgrp", uart3_ctsrts_pins),
- ADI_PIN_GROUP("rsi0grp", rsi0_pins),
- ADI_PIN_GROUP("spi0grp", spi0_pins),
- ADI_PIN_GROUP("spi1grp", spi1_pins),
- ADI_PIN_GROUP("twi0grp", twi0_pins),
- ADI_PIN_GROUP("twi1grp", twi1_pins),
- ADI_PIN_GROUP("rotarygrp", rotary_pins),
- ADI_PIN_GROUP("can0grp", can0_pins),
- ADI_PIN_GROUP("can1grp", can1_pins),
- ADI_PIN_GROUP("smc0grp", smc0_pins),
- ADI_PIN_GROUP("sport0grp", sport0_pins),
- ADI_PIN_GROUP("sport1grp", sport1_pins),
- ADI_PIN_GROUP("sport2grp", sport2_pins),
- ADI_PIN_GROUP("sport3grp", sport3_pins),
- ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins),
- ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins),
- ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins),
- ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins),
- ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins),
- ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins),
- ADI_PIN_GROUP("atapigrp", atapi_pins),
- ADI_PIN_GROUP("atapialtergrp", atapi_alter_pins),
- ADI_PIN_GROUP("nfc0grp", nfc0_pins),
- ADI_PIN_GROUP("keys_4x4grp", keys_4x4_pins),
- ADI_PIN_GROUP("keys_8x8grp", keys_8x8_pins),
-};
-
static const unsigned short uart0_mux[] = {
P_UART0_TX, P_UART0_RX,
0
@@ -513,6 +480,39 @@ static const unsigned short keys_8x8_mux[] = {
0
};
+static const struct adi_pin_group adi_pin_groups[] = {
+ ADI_PIN_GROUP("uart0grp", uart0_pins, uart0_mux),
+ ADI_PIN_GROUP("uart1grp", uart1_pins, uart1_mux),
+ ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins, uart1_ctsrts_mux),
+ ADI_PIN_GROUP("uart2grp", uart2_pins, uart2_mux),
+ ADI_PIN_GROUP("uart3grp", uart3_pins, uart3_mux),
+ ADI_PIN_GROUP("uart3ctsrtsgrp", uart3_ctsrts_pins, uart3_ctsrts_mux),
+ ADI_PIN_GROUP("rsi0grp", rsi0_pins, rsi0_mux),
+ ADI_PIN_GROUP("spi0grp", spi0_pins, spi0_mux),
+ ADI_PIN_GROUP("spi1grp", spi1_pins, spi1_mux),
+ ADI_PIN_GROUP("twi0grp", twi0_pins, twi0_mux),
+ ADI_PIN_GROUP("twi1grp", twi1_pins, twi1_mux),
+ ADI_PIN_GROUP("rotarygrp", rotary_pins, rotary_mux),
+ ADI_PIN_GROUP("can0grp", can0_pins, can0_mux),
+ ADI_PIN_GROUP("can1grp", can1_pins, can1_mux),
+ ADI_PIN_GROUP("smc0grp", smc0_pins, smc0_mux),
+ ADI_PIN_GROUP("sport0grp", sport0_pins, sport0_mux),
+ ADI_PIN_GROUP("sport1grp", sport1_pins, sport1_mux),
+ ADI_PIN_GROUP("sport2grp", sport2_pins, sport2_mux),
+ ADI_PIN_GROUP("sport3grp", sport3_pins, sport3_mux),
+ ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins, ppi0_8b_mux),
+ ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins, ppi0_16b_mux),
+ ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins, ppi0_24b_mux),
+ ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins, ppi1_8b_mux),
+ ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins, ppi1_16b_mux),
+ ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins, ppi2_8b_mux),
+ ADI_PIN_GROUP("atapigrp", atapi_pins, atapi_mux),
+ ADI_PIN_GROUP("atapialtergrp", atapi_alter_pins, atapi_alter_mux),
+ ADI_PIN_GROUP("nfc0grp", nfc0_pins, nfc0_mux),
+ ADI_PIN_GROUP("keys_4x4grp", keys_4x4_pins, keys_4x4_mux),
+ ADI_PIN_GROUP("keys_8x8grp", keys_8x8_pins, keys_8x8_mux),
+};
+
static const char * const uart0grp[] = { "uart0grp" };
static const char * const uart1grp[] = { "uart1grp" };
static const char * const uart1ctsrtsgrp[] = { "uart1ctsrtsgrp" };
@@ -532,49 +532,45 @@ static const char * const sport0grp[] = { "sport0grp" };
static const char * const sport1grp[] = { "sport1grp" };
static const char * const sport2grp[] = { "sport2grp" };
static const char * const sport3grp[] = { "sport3grp" };
-static const char * const ppi0_8bgrp[] = { "ppi0_8bgrp" };
-static const char * const ppi0_16bgrp[] = { "ppi0_16bgrp" };
-static const char * const ppi0_24bgrp[] = { "ppi0_24bgrp" };
-static const char * const ppi1_8bgrp[] = { "ppi1_8bgrp" };
-static const char * const ppi1_16bgrp[] = { "ppi1_16bgrp" };
-static const char * const ppi2_8bgrp[] = { "ppi2_8bgrp" };
+static const char * const ppi0grp[] = { "ppi0_8bgrp",
+ "ppi0_16bgrp",
+ "ppi0_24bgrp" };
+static const char * const ppi1grp[] = { "ppi1_8bgrp",
+ "ppi1_16bgrp" };
+static const char * const ppi2grp[] = { "ppi2_8bgrp" };
static const char * const atapigrp[] = { "atapigrp" };
static const char * const atapialtergrp[] = { "atapialtergrp" };
static const char * const nfc0grp[] = { "nfc0grp" };
-static const char * const keys_4x4grp[] = { "keys_4x4grp" };
-static const char * const keys_8x8grp[] = { "keys_8x8grp" };
+static const char * const keysgrp[] = { "keys_4x4grp",
+ "keys_8x8grp" };
static const struct adi_pmx_func adi_pmx_functions[] = {
- ADI_PMX_FUNCTION("uart0", uart0grp, uart0_mux),
- ADI_PMX_FUNCTION("uart1", uart1grp, uart1_mux),
- ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp, uart1_ctsrts_mux),
- ADI_PMX_FUNCTION("uart2", uart2grp, uart2_mux),
- ADI_PMX_FUNCTION("uart3", uart3grp, uart3_mux),
- ADI_PMX_FUNCTION("uart3_ctsrts", uart3ctsrtsgrp, uart3_ctsrts_mux),
- ADI_PMX_FUNCTION("rsi0", rsi0grp, rsi0_mux),
- ADI_PMX_FUNCTION("spi0", spi0grp, spi0_mux),
- ADI_PMX_FUNCTION("spi1", spi1grp, spi1_mux),
- ADI_PMX_FUNCTION("twi0", twi0grp, twi0_mux),
- ADI_PMX_FUNCTION("twi1", twi1grp, twi1_mux),
- ADI_PMX_FUNCTION("rotary", rotarygrp, rotary_mux),
- ADI_PMX_FUNCTION("can0", can0grp, can0_mux),
- ADI_PMX_FUNCTION("can1", can1grp, can1_mux),
- ADI_PMX_FUNCTION("smc0", smc0grp, smc0_mux),
- ADI_PMX_FUNCTION("sport0", sport0grp, sport0_mux),
- ADI_PMX_FUNCTION("sport1", sport1grp, sport1_mux),
- ADI_PMX_FUNCTION("sport2", sport2grp, sport2_mux),
- ADI_PMX_FUNCTION("sport3", sport3grp, sport3_mux),
- ADI_PMX_FUNCTION("ppi0_8b", ppi0_8bgrp, ppi0_8b_mux),
- ADI_PMX_FUNCTION("ppi0_16b", ppi0_16bgrp, ppi0_16b_mux),
- ADI_PMX_FUNCTION("ppi0_24b", ppi0_24bgrp, ppi0_24b_mux),
- ADI_PMX_FUNCTION("ppi1_8b", ppi1_8bgrp, ppi1_8b_mux),
- ADI_PMX_FUNCTION("ppi1_16b", ppi1_16bgrp, ppi1_16b_mux),
- ADI_PMX_FUNCTION("ppi2_8b", ppi2_8bgrp, ppi2_8b_mux),
- ADI_PMX_FUNCTION("atapi", atapigrp, atapi_mux),
- ADI_PMX_FUNCTION("atapi_alter", atapialtergrp, atapi_alter_mux),
- ADI_PMX_FUNCTION("nfc0", nfc0grp, nfc0_mux),
- ADI_PMX_FUNCTION("keys_4x4", keys_4x4grp, keys_4x4_mux),
- ADI_PMX_FUNCTION("keys_8x8", keys_8x8grp, keys_8x8_mux),
+ ADI_PMX_FUNCTION("uart0", uart0grp),
+ ADI_PMX_FUNCTION("uart1", uart1grp),
+ ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp),
+ ADI_PMX_FUNCTION("uart2", uart2grp),
+ ADI_PMX_FUNCTION("uart3", uart3grp),
+ ADI_PMX_FUNCTION("uart3_ctsrts", uart3ctsrtsgrp),
+ ADI_PMX_FUNCTION("rsi0", rsi0grp),
+ ADI_PMX_FUNCTION("spi0", spi0grp),
+ ADI_PMX_FUNCTION("spi1", spi1grp),
+ ADI_PMX_FUNCTION("twi0", twi0grp),
+ ADI_PMX_FUNCTION("twi1", twi1grp),
+ ADI_PMX_FUNCTION("rotary", rotarygrp),
+ ADI_PMX_FUNCTION("can0", can0grp),
+ ADI_PMX_FUNCTION("can1", can1grp),
+ ADI_PMX_FUNCTION("smc0", smc0grp),
+ ADI_PMX_FUNCTION("sport0", sport0grp),
+ ADI_PMX_FUNCTION("sport1", sport1grp),
+ ADI_PMX_FUNCTION("sport2", sport2grp),
+ ADI_PMX_FUNCTION("sport3", sport3grp),
+ ADI_PMX_FUNCTION("ppi0", ppi0grp),
+ ADI_PMX_FUNCTION("ppi1", ppi1grp),
+ ADI_PMX_FUNCTION("ppi2", ppi2grp),
+ ADI_PMX_FUNCTION("atapi", atapigrp),
+ ADI_PMX_FUNCTION("atapi_alter", atapialtergrp),
+ ADI_PMX_FUNCTION("nfc0", nfc0grp),
+ ADI_PMX_FUNCTION("keys", keysgrp),
};
static const struct adi_pinctrl_soc_data adi_bf54x_soc = {
diff --git a/drivers/pinctrl/pinctrl-adi2-bf60x.c b/drivers/pinctrl/pinctrl-adi2-bf60x.c
index bf57aea2826c..4cb59fe9be70 100644
--- a/drivers/pinctrl/pinctrl-adi2-bf60x.c
+++ b/drivers/pinctrl/pinctrl-adi2-bf60x.c
@@ -259,37 +259,6 @@ static const unsigned lp3_pins[] = {
GPIO_PF12, GPIO_PF13, GPIO_PF14, GPIO_PF15,
};
-static const struct adi_pin_group adi_pin_groups[] = {
- ADI_PIN_GROUP("uart0grp", uart0_pins),
- ADI_PIN_GROUP("uart0ctsrtsgrp", uart0_ctsrts_pins),
- ADI_PIN_GROUP("uart1grp", uart1_pins),
- ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins),
- ADI_PIN_GROUP("rsi0grp", rsi0_pins),
- ADI_PIN_GROUP("eth0grp", eth0_pins),
- ADI_PIN_GROUP("eth1grp", eth1_pins),
- ADI_PIN_GROUP("spi0grp", spi0_pins),
- ADI_PIN_GROUP("spi1grp", spi1_pins),
- ADI_PIN_GROUP("twi0grp", twi0_pins),
- ADI_PIN_GROUP("twi1grp", twi1_pins),
- ADI_PIN_GROUP("rotarygrp", rotary_pins),
- ADI_PIN_GROUP("can0grp", can0_pins),
- ADI_PIN_GROUP("smc0grp", smc0_pins),
- ADI_PIN_GROUP("sport0grp", sport0_pins),
- ADI_PIN_GROUP("sport1grp", sport1_pins),
- ADI_PIN_GROUP("sport2grp", sport2_pins),
- ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins),
- ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins),
- ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins),
- ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins),
- ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins),
- ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins),
- ADI_PIN_GROUP("ppi2_16bgrp", ppi2_16b_pins),
- ADI_PIN_GROUP("lp0grp", lp0_pins),
- ADI_PIN_GROUP("lp1grp", lp1_pins),
- ADI_PIN_GROUP("lp2grp", lp2_pins),
- ADI_PIN_GROUP("lp3grp", lp3_pins),
-};
-
static const unsigned short uart0_mux[] = {
P_UART0_TX, P_UART0_RX,
0
@@ -446,6 +415,37 @@ static const unsigned short lp3_mux[] = {
0
};
+static const struct adi_pin_group adi_pin_groups[] = {
+ ADI_PIN_GROUP("uart0grp", uart0_pins, uart0_mux),
+ ADI_PIN_GROUP("uart0ctsrtsgrp", uart0_ctsrts_pins, uart0_ctsrts_mux),
+ ADI_PIN_GROUP("uart1grp", uart1_pins, uart1_mux),
+ ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins, uart1_ctsrts_mux),
+ ADI_PIN_GROUP("rsi0grp", rsi0_pins, rsi0_mux),
+ ADI_PIN_GROUP("eth0grp", eth0_pins, eth0_mux),
+ ADI_PIN_GROUP("eth1grp", eth1_pins, eth1_mux),
+ ADI_PIN_GROUP("spi0grp", spi0_pins, spi0_mux),
+ ADI_PIN_GROUP("spi1grp", spi1_pins, spi1_mux),
+ ADI_PIN_GROUP("twi0grp", twi0_pins, twi0_mux),
+ ADI_PIN_GROUP("twi1grp", twi1_pins, twi1_mux),
+ ADI_PIN_GROUP("rotarygrp", rotary_pins, rotary_mux),
+ ADI_PIN_GROUP("can0grp", can0_pins, can0_mux),
+ ADI_PIN_GROUP("smc0grp", smc0_pins, smc0_mux),
+ ADI_PIN_GROUP("sport0grp", sport0_pins, sport0_mux),
+ ADI_PIN_GROUP("sport1grp", sport1_pins, sport1_mux),
+ ADI_PIN_GROUP("sport2grp", sport2_pins, sport2_mux),
+ ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins, ppi0_8b_mux),
+ ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins, ppi0_16b_mux),
+ ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins, ppi0_24b_mux),
+ ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins, ppi1_8b_mux),
+ ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins, ppi1_16b_mux),
+ ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins, ppi2_8b_mux),
+ ADI_PIN_GROUP("ppi2_16bgrp", ppi2_16b_pins, ppi2_16b_mux),
+ ADI_PIN_GROUP("lp0grp", lp0_pins, lp0_mux),
+ ADI_PIN_GROUP("lp1grp", lp1_pins, lp1_mux),
+ ADI_PIN_GROUP("lp2grp", lp2_pins, lp2_mux),
+ ADI_PIN_GROUP("lp3grp", lp3_pins, lp3_mux),
+};
+
static const char * const uart0grp[] = { "uart0grp" };
static const char * const uart0ctsrtsgrp[] = { "uart0ctsrtsgrp" };
static const char * const uart1grp[] = { "uart1grp" };
@@ -463,47 +463,43 @@ static const char * const smc0grp[] = { "smc0grp" };
static const char * const sport0grp[] = { "sport0grp" };
static const char * const sport1grp[] = { "sport1grp" };
static const char * const sport2grp[] = { "sport2grp" };
-static const char * const ppi0_8bgrp[] = { "ppi0_8bgrp" };
-static const char * const ppi0_16bgrp[] = { "ppi0_16bgrp" };
-static const char * const ppi0_24bgrp[] = { "ppi0_24bgrp" };
-static const char * const ppi1_8bgrp[] = { "ppi1_8bgrp" };
-static const char * const ppi1_16bgrp[] = { "ppi1_16bgrp" };
-static const char * const ppi2_8bgrp[] = { "ppi2_8bgrp" };
-static const char * const ppi2_16bgrp[] = { "ppi2_16bgrp" };
+static const char * const ppi0grp[] = { "ppi0_8bgrp",
+ "ppi0_16bgrp",
+ "ppi0_24bgrp" };
+static const char * const ppi1grp[] = { "ppi1_8bgrp",
+ "ppi1_16bgrp" };
+static const char * const ppi2grp[] = { "ppi2_8bgrp",
+ "ppi2_16bgrp" };
static const char * const lp0grp[] = { "lp0grp" };
static const char * const lp1grp[] = { "lp1grp" };
static const char * const lp2grp[] = { "lp2grp" };
static const char * const lp3grp[] = { "lp3grp" };
static const struct adi_pmx_func adi_pmx_functions[] = {
- ADI_PMX_FUNCTION("uart0", uart0grp, uart0_mux),
- ADI_PMX_FUNCTION("uart0_ctsrts", uart0ctsrtsgrp, uart0_ctsrts_mux),
- ADI_PMX_FUNCTION("uart1", uart1grp, uart1_mux),
- ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp, uart1_ctsrts_mux),
- ADI_PMX_FUNCTION("rsi0", rsi0grp, rsi0_mux),
- ADI_PMX_FUNCTION("eth0", eth0grp, eth0_mux),
- ADI_PMX_FUNCTION("eth1", eth1grp, eth1_mux),
- ADI_PMX_FUNCTION("spi0", spi0grp, spi0_mux),
- ADI_PMX_FUNCTION("spi1", spi1grp, spi1_mux),
- ADI_PMX_FUNCTION("twi0", twi0grp, twi0_mux),
- ADI_PMX_FUNCTION("twi1", twi1grp, twi1_mux),
- ADI_PMX_FUNCTION("rotary", rotarygrp, rotary_mux),
- ADI_PMX_FUNCTION("can0", can0grp, can0_mux),
- ADI_PMX_FUNCTION("smc0", smc0grp, smc0_mux),
- ADI_PMX_FUNCTION("sport0", sport0grp, sport0_mux),
- ADI_PMX_FUNCTION("sport1", sport1grp, sport1_mux),
- ADI_PMX_FUNCTION("sport2", sport2grp, sport2_mux),
- ADI_PMX_FUNCTION("ppi0_8b", ppi0_8bgrp, ppi0_8b_mux),
- ADI_PMX_FUNCTION("ppi0_16b", ppi0_16bgrp, ppi0_16b_mux),
- ADI_PMX_FUNCTION("ppi0_24b", ppi0_24bgrp, ppi0_24b_mux),
- ADI_PMX_FUNCTION("ppi1_8b", ppi1_8bgrp, ppi1_8b_mux),
- ADI_PMX_FUNCTION("ppi1_16b", ppi1_16bgrp, ppi1_16b_mux),
- ADI_PMX_FUNCTION("ppi2_8b", ppi2_8bgrp, ppi2_8b_mux),
- ADI_PMX_FUNCTION("ppi2_16b", ppi2_16bgrp, ppi2_16b_mux),
- ADI_PMX_FUNCTION("lp0", lp0grp, lp0_mux),
- ADI_PMX_FUNCTION("lp1", lp1grp, lp1_mux),
- ADI_PMX_FUNCTION("lp2", lp2grp, lp2_mux),
- ADI_PMX_FUNCTION("lp3", lp3grp, lp3_mux),
+ ADI_PMX_FUNCTION("uart0", uart0grp),
+ ADI_PMX_FUNCTION("uart0_ctsrts", uart0ctsrtsgrp),
+ ADI_PMX_FUNCTION("uart1", uart1grp),
+ ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp),
+ ADI_PMX_FUNCTION("rsi0", rsi0grp),
+ ADI_PMX_FUNCTION("eth0", eth0grp),
+ ADI_PMX_FUNCTION("eth1", eth1grp),
+ ADI_PMX_FUNCTION("spi0", spi0grp),
+ ADI_PMX_FUNCTION("spi1", spi1grp),
+ ADI_PMX_FUNCTION("twi0", twi0grp),
+ ADI_PMX_FUNCTION("twi1", twi1grp),
+ ADI_PMX_FUNCTION("rotary", rotarygrp),
+ ADI_PMX_FUNCTION("can0", can0grp),
+ ADI_PMX_FUNCTION("smc0", smc0grp),
+ ADI_PMX_FUNCTION("sport0", sport0grp),
+ ADI_PMX_FUNCTION("sport1", sport1grp),
+ ADI_PMX_FUNCTION("sport2", sport2grp),
+ ADI_PMX_FUNCTION("ppi0", ppi0grp),
+ ADI_PMX_FUNCTION("ppi1", ppi1grp),
+ ADI_PMX_FUNCTION("ppi2", ppi2grp),
+ ADI_PMX_FUNCTION("lp0", lp0grp),
+ ADI_PMX_FUNCTION("lp1", lp1grp),
+ ADI_PMX_FUNCTION("lp2", lp2grp),
+ ADI_PMX_FUNCTION("lp3", lp3grp),
};
static const struct adi_pinctrl_soc_data adi_bf60x_soc = {
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index 7a39562c3e42..0cc0eec83396 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -89,6 +89,19 @@ struct gpio_port_saved {
u32 mux;
};
+/*
+ * struct gpio_pint_saved - PINT registers saved in PM operations
+ *
+ * @assign: ASSIGN register
+ * @edge_set: EDGE_SET register
+ * @invert_set: INVERT_SET register
+ */
+struct gpio_pint_saved {
+ u32 assign;
+ u32 edge_set;
+ u32 invert_set;
+};
+
/**
* struct gpio_pint - Pin interrupt controller device. Multiple ADI GPIO
* banks can be mapped into one Pin interrupt controller.
@@ -114,7 +127,7 @@ struct gpio_pint {
int irq;
struct irq_domain *domain[2];
struct gpio_pint_regs *regs;
- struct adi_pm_pint_save saved_data;
+ struct gpio_pint_saved saved_data;
int map_count;
spinlock_t lock;
@@ -160,7 +173,7 @@ struct adi_pinctrl {
struct gpio_port {
struct list_head node;
void __iomem *base;
- unsigned int irq_base;
+ int irq_base;
unsigned int width;
struct gpio_port_t *regs;
struct gpio_port_saved saved_data;
@@ -324,6 +337,7 @@ static unsigned int adi_gpio_irq_startup(struct irq_data *d)
if (!port) {
pr_err("GPIO IRQ %d :Not exist\n", d->irq);
+ /* FIXME: negative return code will be ignored */
return -ENODEV;
}
@@ -605,8 +619,8 @@ static struct pinctrl_ops adi_pctrl_ops = {
.get_group_pins = adi_get_group_pins,
};
-static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned func_id,
+ unsigned group_id)
{
struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
struct gpio_port *port;
@@ -614,7 +628,7 @@ static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
unsigned long flags;
unsigned short *mux, pin;
- mux = (unsigned short *)pinctrl->soc->functions[selector].mux;
+ mux = (unsigned short *)pinctrl->soc->groups[group_id].mux;
while (*mux) {
pin = P_IDENT(*mux);
@@ -628,7 +642,7 @@ static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
spin_lock_irqsave(&port->lock, flags);
portmux_setup(port, pin_to_offset(range, pin),
- P_FUNCT2MUX(*mux));
+ P_FUNCT2MUX(*mux));
port_setup(port, pin_to_offset(range, pin), false);
mux++;
@@ -638,8 +652,8 @@ static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
-static void adi_pinmux_disable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static void adi_pinmux_disable(struct pinctrl_dev *pctldev, unsigned func_id,
+ unsigned group_id)
{
struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
struct gpio_port *port;
@@ -647,7 +661,7 @@ static void adi_pinmux_disable(struct pinctrl_dev *pctldev, unsigned selector,
unsigned long flags;
unsigned short *mux, pin;
- mux = (unsigned short *)pinctrl->soc->functions[selector].mux;
+ mux = (unsigned short *)pinctrl->soc->groups[group_id].mux;
while (*mux) {
pin = P_IDENT(*mux);
diff --git a/drivers/pinctrl/pinctrl-adi2.h b/drivers/pinctrl/pinctrl-adi2.h
index 1f06f8df1fa3..3ca29738213f 100644
--- a/drivers/pinctrl/pinctrl-adi2.h
+++ b/drivers/pinctrl/pinctrl-adi2.h
@@ -21,13 +21,15 @@ struct adi_pin_group {
const char *name;
const unsigned *pins;
const unsigned num;
+ const unsigned short *mux;
};
-#define ADI_PIN_GROUP(n, p) \
+#define ADI_PIN_GROUP(n, p, m) \
{ \
.name = n, \
.pins = p, \
.num = ARRAY_SIZE(p), \
+ .mux = m, \
}
/**
@@ -41,15 +43,13 @@ struct adi_pmx_func {
const char *name;
const char * const *groups;
const unsigned num_groups;
- const unsigned short *mux;
};
-#define ADI_PMX_FUNCTION(n, g, m) \
+#define ADI_PMX_FUNCTION(n, g) \
{ \
.name = n, \
.groups = g, \
.num_groups = ARRAY_SIZE(g), \
- .mux = m, \
}
/**
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index d990e33d8aa7..5d24aaec5dbc 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1137,6 +1137,17 @@ static void at91_gpio_free(struct gpio_chip *chip, unsigned offset)
pinctrl_free_gpio(gpio);
}
+static int at91_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct at91_gpio_chip *at91_gpio = to_at91_gpio_chip(chip);
+ void __iomem *pio = at91_gpio->regbase;
+ unsigned mask = 1 << offset;
+ u32 osr;
+
+ osr = readl_relaxed(pio + PIO_OSR);
+ return !(osr & mask);
+}
+
static int at91_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct at91_gpio_chip *at91_gpio = to_at91_gpio_chip(chip);
@@ -1325,6 +1336,31 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
return 0;
}
+static unsigned int gpio_irq_startup(struct irq_data *d)
+{
+ struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
+ unsigned pin = d->hwirq;
+ int ret;
+
+ ret = gpio_lock_as_irq(&at91_gpio->chip, pin);
+ if (ret) {
+ dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
+ d->hwirq);
+ return ret;
+ }
+ gpio_irq_unmask(d);
+ return 0;
+}
+
+static void gpio_irq_shutdown(struct irq_data *d)
+{
+ struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
+ unsigned pin = d->hwirq;
+
+ gpio_irq_mask(d);
+ gpio_unlock_as_irq(&at91_gpio->chip, pin);
+}
+
#ifdef CONFIG_PM
static u32 wakeups[MAX_GPIO_BANKS];
@@ -1399,6 +1435,8 @@ void at91_pinctrl_gpio_resume(void)
static struct irq_chip gpio_irqchip = {
.name = "GPIO",
+ .irq_startup = gpio_irq_startup,
+ .irq_shutdown = gpio_irq_shutdown,
.irq_disable = gpio_irq_mask,
.irq_mask = gpio_irq_mask,
.irq_unmask = gpio_irq_unmask,
@@ -1543,6 +1581,7 @@ static int at91_gpio_of_irq_setup(struct device_node *node,
static struct gpio_chip at91_gpio_template = {
.request = at91_gpio_request,
.free = at91_gpio_free,
+ .get_direction = at91_gpio_get_direction,
.direction_input = at91_gpio_direction_input,
.get = at91_gpio_get,
.direction_output = at91_gpio_direction_output,
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 665b96bc0c3a..6e8301f77187 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -60,6 +60,10 @@
#define BYT_NGPIO_NCORE 28
#define BYT_NGPIO_SUS 44
+#define BYT_SCORE_ACPI_UID "1"
+#define BYT_NCORE_ACPI_UID "2"
+#define BYT_SUS_ACPI_UID "3"
+
/*
* Baytrail gpio controller consist of three separate sub-controllers called
* SCORE, NCORE and SUS. The sub-controllers are identified by their acpi UID.
@@ -102,17 +106,17 @@ static unsigned const sus_pins[BYT_NGPIO_SUS] = {
static struct pinctrl_gpio_range byt_ranges[] = {
{
- .name = "1", /* match with acpi _UID in probe */
+ .name = BYT_SCORE_ACPI_UID, /* match with acpi _UID in probe */
.npins = BYT_NGPIO_SCORE,
.pins = score_pins,
},
{
- .name = "2",
+ .name = BYT_NCORE_ACPI_UID,
.npins = BYT_NGPIO_NCORE,
.pins = ncore_pins,
},
{
- .name = "3",
+ .name = BYT_SUS_ACPI_UID,
.npins = BYT_NGPIO_SUS,
.pins = sus_pins,
},
@@ -145,9 +149,41 @@ static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset,
return vg->reg_base + reg_offset + reg;
}
+static bool is_special_pin(struct byt_gpio *vg, unsigned offset)
+{
+ /* SCORE pin 92-93 */
+ if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) &&
+ offset >= 92 && offset <= 93)
+ return true;
+
+ /* SUS pin 11-21 */
+ if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) &&
+ offset >= 11 && offset <= 21)
+ return true;
+
+ return false;
+}
+
static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
{
struct byt_gpio *vg = to_byt_gpio(chip);
+ void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
+ u32 value;
+ bool special;
+
+ /*
+ * In most cases, func pin mux 000 means GPIO function.
+ * But, some pins may have func pin mux 001 represents
+ * GPIO function. Only allow user to export pin with
+ * func pin mux preset as GPIO function by BIOS/FW.
+ */
+ value = readl(reg) & BYT_PIN_MUX;
+ special = is_special_pin(vg, offset);
+ if ((special && value != 1) || (!special && value)) {
+ dev_err(&vg->pdev->dev,
+ "pin %u cannot be used as GPIO.\n", offset);
+ return -EINVAL;
+ }
pm_runtime_get(&vg->pdev->dev);
@@ -371,23 +407,23 @@ static void byt_irq_mask(struct irq_data *d)
{
}
-static unsigned int byt_irq_startup(struct irq_data *d)
+static int byt_irq_reqres(struct irq_data *d)
{
struct byt_gpio *vg = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&vg->chip, irqd_to_hwirq(d)))
+ if (gpio_lock_as_irq(&vg->chip, irqd_to_hwirq(d))) {
dev_err(vg->chip.dev,
"unable to lock HW IRQ %lu for IRQ\n",
irqd_to_hwirq(d));
- byt_irq_unmask(d);
+ return -EINVAL;
+ }
return 0;
}
-static void byt_irq_shutdown(struct irq_data *d)
+static void byt_irq_relres(struct irq_data *d)
{
struct byt_gpio *vg = irq_data_get_irq_chip_data(d);
- byt_irq_mask(d);
gpio_unlock_as_irq(&vg->chip, irqd_to_hwirq(d));
}
@@ -396,8 +432,8 @@ static struct irq_chip byt_irqchip = {
.irq_mask = byt_irq_mask,
.irq_unmask = byt_irq_unmask,
.irq_set_type = byt_irq_type,
- .irq_startup = byt_irq_startup,
- .irq_shutdown = byt_irq_shutdown,
+ .irq_request_resources = byt_irq_reqres,
+ .irq_release_resources = byt_irq_relres,
};
static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 162ac0d73739..d182fdd2e715 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -8,17 +8,14 @@
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
#include <linux/module.h>
-#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/io.h>
-#include <linux/irqdomain.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -61,9 +58,17 @@
#define U300_GPIO_PINS_PER_PORT 8
#define U300_GPIO_MAX (U300_GPIO_PINS_PER_PORT * U300_GPIO_NUM_PORTS)
+struct u300_gpio_port {
+ struct u300_gpio *gpio;
+ char name[8];
+ int irq;
+ int number;
+ u8 toggle_edge_mode;
+};
+
struct u300_gpio {
struct gpio_chip chip;
- struct list_head port_list;
+ struct u300_gpio_port ports[U300_GPIO_NUM_PORTS];
struct clk *clk;
void __iomem *base;
struct device *dev;
@@ -78,16 +83,6 @@ struct u300_gpio {
u32 iev;
};
-struct u300_gpio_port {
- struct list_head node;
- struct u300_gpio *gpio;
- char name[8];
- struct irq_domain *domain;
- int irq;
- int number;
- u8 toggle_edge_mode;
-};
-
/*
* Macro to expand to read a specific register found in the "gpio"
* struct. It requires the struct u300_gpio *gpio variable to exist in
@@ -308,39 +303,6 @@ static int u300_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
return 0;
}
-static int u300_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct u300_gpio *gpio = to_u300_gpio(chip);
- int portno = offset >> 3;
- struct u300_gpio_port *port = NULL;
- struct list_head *p;
- int retirq;
- bool found = false;
-
- list_for_each(p, &gpio->port_list) {
- port = list_entry(p, struct u300_gpio_port, node);
- if (port->number == portno) {
- found = true;
- break;
- }
- }
- if (!found) {
- dev_err(gpio->dev, "could not locate port for GPIO %d IRQ\n",
- offset);
- return -EINVAL;
- }
-
- /*
- * The local hwirqs on the port are the lower three bits, there
- * are exactly 8 IRQs per port since they are 8-bit
- */
- retirq = irq_find_mapping(port->domain, (offset & 0x7));
-
- dev_dbg(gpio->dev, "request IRQ for GPIO %d, return %d from port %d\n",
- offset, retirq, port->number);
- return retirq;
-}
-
/* Returning -EINVAL means "supported but not available" */
int u300_gpio_config_get(struct gpio_chip *chip,
unsigned offset,
@@ -461,7 +423,6 @@ static struct gpio_chip u300_gpio_chip = {
.set = u300_gpio_set,
.direction_input = u300_gpio_direction_input,
.direction_output = u300_gpio_direction_output,
- .to_irq = u300_gpio_to_irq,
};
static void u300_toggle_trigger(struct u300_gpio *gpio, unsigned offset)
@@ -485,9 +446,10 @@ static void u300_toggle_trigger(struct u300_gpio *gpio, unsigned offset)
static int u300_gpio_irq_type(struct irq_data *d, unsigned trigger)
{
- struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
- struct u300_gpio *gpio = port->gpio;
- int offset = (port->number << 3) + d->hwirq;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct u300_gpio *gpio = to_u300_gpio(chip);
+ struct u300_gpio_port *port = &gpio->ports[d->hwirq >> 3];
+ int offset = d->hwirq;
u32 val;
if ((trigger & IRQF_TRIGGER_RISING) &&
@@ -521,18 +483,15 @@ static int u300_gpio_irq_type(struct irq_data *d, unsigned trigger)
static void u300_gpio_irq_enable(struct irq_data *d)
{
- struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
- struct u300_gpio *gpio = port->gpio;
- int offset = (port->number << 3) + d->hwirq;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct u300_gpio *gpio = to_u300_gpio(chip);
+ struct u300_gpio_port *port = &gpio->ports[d->hwirq >> 3];
+ int offset = d->hwirq;
u32 val;
unsigned long flags;
dev_dbg(gpio->dev, "enable IRQ for hwirq %lu on port %s, offset %d\n",
d->hwirq, port->name, offset);
- if (gpio_lock_as_irq(&gpio->chip, d->hwirq))
- dev_err(gpio->dev,
- "unable to lock HW IRQ %lu for IRQ\n",
- d->hwirq);
local_irq_save(flags);
val = readl(U300_PIN_REG(offset, ien));
writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
@@ -541,9 +500,9 @@ static void u300_gpio_irq_enable(struct irq_data *d)
static void u300_gpio_irq_disable(struct irq_data *d)
{
- struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
- struct u300_gpio *gpio = port->gpio;
- int offset = (port->number << 3) + d->hwirq;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct u300_gpio *gpio = to_u300_gpio(chip);
+ int offset = d->hwirq;
u32 val;
unsigned long flags;
@@ -551,7 +510,6 @@ static void u300_gpio_irq_disable(struct irq_data *d)
val = readl(U300_PIN_REG(offset, ien));
writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
local_irq_restore(flags);
- gpio_unlock_as_irq(&gpio->chip, d->hwirq);
}
static struct irq_chip u300_gpio_irqchip = {
@@ -559,17 +517,19 @@ static struct irq_chip u300_gpio_irqchip = {
.irq_enable = u300_gpio_irq_enable,
.irq_disable = u300_gpio_irq_disable,
.irq_set_type = u300_gpio_irq_type,
-
};
static void u300_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct u300_gpio_port *port = irq_get_handler_data(irq);
- struct u300_gpio *gpio = port->gpio;
+ struct irq_chip *parent_chip = irq_get_chip(irq);
+ struct gpio_chip *chip = irq_get_handler_data(irq);
+ struct u300_gpio *gpio = to_u300_gpio(chip);
+ struct u300_gpio_port *port = &gpio->ports[irq - chip->base];
int pinoffset = port->number << 3; /* get the right stride */
unsigned long val;
- desc->irq_data.chip->irq_ack(&desc->irq_data);
+ chained_irq_enter(parent_chip, desc);
+
/* Read event register */
val = readl(U300_PIN_REG(pinoffset, iev));
/* Mask relevant bits */
@@ -582,8 +542,8 @@ static void u300_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
int irqoffset;
for_each_set_bit(irqoffset, &val, U300_GPIO_PINS_PER_PORT) {
- int pin_irq = irq_find_mapping(port->domain, irqoffset);
int offset = pinoffset + irqoffset;
+ int pin_irq = irq_find_mapping(chip->irqdomain, offset);
dev_dbg(gpio->dev, "GPIO IRQ %d on pin %d\n",
pin_irq, offset);
@@ -597,7 +557,7 @@ static void u300_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
}
}
- desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ chained_irq_exit(parent_chip, desc);
}
static void __init u300_gpio_init_pin(struct u300_gpio *gpio,
@@ -648,20 +608,6 @@ static void __init u300_gpio_init_coh901571(struct u300_gpio *gpio)
}
}
-static inline void u300_gpio_free_ports(struct u300_gpio *gpio)
-{
- struct u300_gpio_port *port;
- struct list_head *p, *n;
-
- list_for_each_safe(p, n, &gpio->port_list) {
- port = list_entry(p, struct u300_gpio_port, node);
- list_del(&port->node);
- if (port->domain)
- irq_domain_remove(port->domain);
- kfree(port);
- }
-}
-
/*
* Here we map a GPIO in the local gpio_chip pin space to a pin in
* the local pinctrl pin space. The pin controller used is
@@ -752,17 +698,28 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
gpio->base + U300_GPIO_CR);
u300_gpio_init_coh901571(gpio);
+#ifdef CONFIG_OF_GPIO
+ gpio->chip.of_node = pdev->dev.of_node;
+#endif
+ err = gpiochip_add(&gpio->chip);
+ if (err) {
+ dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
+ goto err_no_chip;
+ }
+
+ err = gpiochip_irqchip_add(&gpio->chip,
+ &u300_gpio_irqchip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_EDGE_FALLING);
+ if (err) {
+ dev_err(gpio->dev, "no GPIO irqchip\n");
+ goto err_no_irqchip;
+ }
+
/* Add each port with its IRQ separately */
- INIT_LIST_HEAD(&gpio->port_list);
for (portno = 0 ; portno < U300_GPIO_NUM_PORTS; portno++) {
- struct u300_gpio_port *port =
- kmalloc(sizeof(struct u300_gpio_port), GFP_KERNEL);
-
- if (!port) {
- dev_err(gpio->dev, "out of memory\n");
- err = -ENOMEM;
- goto err_no_port;
- }
+ struct u300_gpio_port *port = &gpio->ports[portno];
snprintf(port->name, 8, "gpio%d", portno);
port->number = portno;
@@ -770,50 +727,16 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
port->irq = platform_get_irq(pdev, portno);
- dev_dbg(gpio->dev, "register IRQ %d for port %s\n", port->irq,
- port->name);
-
- port->domain = irq_domain_add_linear(pdev->dev.of_node,
- U300_GPIO_PINS_PER_PORT,
- &irq_domain_simple_ops,
- port);
- if (!port->domain) {
- err = -ENOMEM;
- goto err_no_domain;
- }
-
- irq_set_chained_handler(port->irq, u300_gpio_irq_handler);
- irq_set_handler_data(port->irq, port);
-
- /* For each GPIO pin set the unique IRQ handler */
- for (i = 0; i < U300_GPIO_PINS_PER_PORT; i++) {
- int irqno = irq_create_mapping(port->domain, i);
-
- dev_dbg(gpio->dev, "GPIO%d on port %s gets IRQ %d\n",
- gpio->chip.base + (port->number << 3) + i,
- port->name, irqno);
- irq_set_chip_and_handler(irqno, &u300_gpio_irqchip,
- handle_simple_irq);
- set_irq_flags(irqno, IRQF_VALID);
- irq_set_chip_data(irqno, port);
- }
+ gpiochip_set_chained_irqchip(&gpio->chip,
+ &u300_gpio_irqchip,
+ port->irq,
+ u300_gpio_irq_handler);
/* Turns off irq force (test register) for this port */
writel(0x0, gpio->base + portno * gpio->stride + ifr);
-
- list_add_tail(&port->node, &gpio->port_list);
}
dev_dbg(gpio->dev, "initialized %d GPIO ports\n", portno);
-#ifdef CONFIG_OF_GPIO
- gpio->chip.of_node = pdev->dev.of_node;
-#endif
- err = gpiochip_add(&gpio->chip);
- if (err) {
- dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
- goto err_no_chip;
- }
-
/*
* Add pinctrl pin ranges, the pin controller must be registered
* at this point
@@ -832,12 +755,10 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
return 0;
err_no_range:
+err_no_irqchip:
if (gpiochip_remove(&gpio->chip))
dev_err(&pdev->dev, "failed to remove gpio chip\n");
err_no_chip:
-err_no_domain:
-err_no_port:
- u300_gpio_free_ports(gpio);
clk_disable_unprepare(gpio->clk);
dev_err(&pdev->dev, "module ERROR:%d\n", err);
return err;
@@ -856,7 +777,6 @@ static int __exit u300_gpio_remove(struct platform_device *pdev)
dev_err(gpio->dev, "unable to remove gpiochip: %d\n", err);
return err;
}
- u300_gpio_free_ports(gpio);
clk_disable_unprepare(gpio->clk);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/pinctrl-exynos.c
index 155b1b3a0e7a..07c81306f2f3 100644
--- a/drivers/pinctrl/pinctrl-exynos.c
+++ b/drivers/pinctrl/pinctrl-exynos.c
@@ -1042,6 +1042,88 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
},
};
+/* pin banks of exynos5260 pin-controller 0 */
+static struct samsung_pin_bank exynos5260_pin_banks0[] = {
+ EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(4, 0x080, "gpb1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(5, 0x0a0, "gpb2", 0x14),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0c0, "gpb3", 0x18),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpb4", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpb5", 0x20),
+ EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd0", 0x24),
+ EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpd1", 0x28),
+ EXYNOS_PIN_BANK_EINTG(5, 0x160, "gpd2", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpe0", 0x30),
+ EXYNOS_PIN_BANK_EINTG(5, 0x1a0, "gpe1", 0x34),
+ EXYNOS_PIN_BANK_EINTG(4, 0x1c0, "gpf0", 0x38),
+ EXYNOS_PIN_BANK_EINTG(8, 0x1e0, "gpf1", 0x3c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x200, "gpk0", 0x40),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc00, "gpx0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc20, "gpx1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc40, "gpx2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gpx3", 0x0c),
+};
+
+/* pin banks of exynos5260 pin-controller 1 */
+static struct samsung_pin_bank exynos5260_pin_banks1[] = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpc0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpc1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpc3", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(4, 0x080, "gpc4", 0x10),
+};
+
+/* pin banks of exynos5260 pin-controller 2 */
+static struct samsung_pin_bank exynos5260_pin_banks2[] = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes
+ * three gpio/pin-mux/pinconfig controllers.
+ */
+struct samsung_pin_ctrl exynos5260_pin_ctrl[] = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos5260_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos5260_pin_banks0),
+ .geint_con = EXYNOS_GPIO_ECON_OFFSET,
+ .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
+ .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ .weint_con = EXYNOS_WKUP_ECON_OFFSET,
+ .weint_mask = EXYNOS_WKUP_EMASK_OFFSET,
+ .weint_pend = EXYNOS_WKUP_EPEND_OFFSET,
+ .svc = EXYNOS_SVC_OFFSET,
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .label = "exynos5260-gpio-ctrl0",
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos5260_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos5260_pin_banks1),
+ .geint_con = EXYNOS_GPIO_ECON_OFFSET,
+ .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
+ .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ .svc = EXYNOS_SVC_OFFSET,
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .label = "exynos5260-gpio-ctrl1",
+ }, {
+ /* pin-controller instance 2 data */
+ .pin_banks = exynos5260_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos5260_pin_banks2),
+ .geint_con = EXYNOS_GPIO_ECON_OFFSET,
+ .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
+ .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ .svc = EXYNOS_SVC_OFFSET,
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .label = "exynos5260-gpio-ctrl2",
+ },
+};
+
/* pin banks of exynos5420 pin-controller 0 */
static struct samsung_pin_bank exynos5420_pin_banks0[] = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00),
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index 4779b8e0eee8..e118fb121e02 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -491,7 +491,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
pin->mux_mode |= IOMUXC_CONFIG_SION;
pin->config = config & ~IMX_PAD_SION;
- dev_dbg(info->dev, "%s: %d 0x%08lx", info->pins[i].name,
+ dev_dbg(info->dev, "%s: %d 0x%08lx", info->pins[pin_id].name,
pin->mux_mode, pin->config);
}
diff --git a/drivers/pinctrl/pinctrl-msm.c b/drivers/pinctrl/pinctrl-msm.c
index ef2bf3126da6..38d579b47f31 100644
--- a/drivers/pinctrl/pinctrl-msm.c
+++ b/drivers/pinctrl/pinctrl-msm.c
@@ -28,7 +28,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
-#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include "core.h"
@@ -50,7 +49,6 @@
* @enabled_irqs: Bitmap of currently enabled irqs.
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection.
- * @wake_irqs: Bitmap of irqs with requested as wakeup source.
* @soc; Reference to soc_data of platform specific data.
* @regs: Base address for the TLMM register map.
*/
@@ -65,7 +63,6 @@ struct msm_pinctrl {
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
- DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs;
@@ -203,42 +200,29 @@ static const struct pinmux_ops msm_pinmux_ops = {
static int msm_config_reg(struct msm_pinctrl *pctrl,
const struct msm_pingroup *g,
unsigned param,
- s16 *reg,
unsigned *mask,
unsigned *bit)
{
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- *reg = g->ctl_reg;
- *bit = g->pull_bit;
- *mask = 3;
- break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- *reg = g->ctl_reg;
- *bit = g->pull_bit;
- *mask = 3;
- break;
case PIN_CONFIG_BIAS_PULL_UP:
- *reg = g->ctl_reg;
*bit = g->pull_bit;
*mask = 3;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- *reg = g->ctl_reg;
*bit = g->drv_bit;
*mask = 7;
break;
+ case PIN_CONFIG_OUTPUT:
+ *bit = g->oe_bit;
+ *mask = 1;
+ break;
default:
dev_err(pctrl->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
}
- if (*reg < 0) {
- dev_err(pctrl->dev, "Config param %04x not supported on group %s\n",
- param, g->name);
- return -ENOTSUPP;
- }
-
return 0;
}
@@ -261,8 +245,10 @@ static int msm_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
#define MSM_PULL_DOWN 1
#define MSM_PULL_UP 3
-static const unsigned msm_regval_to_drive[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
-static const unsigned msm_drive_to_regval[] = { -1, -1, 0, -1, 1, -1, 2, -1, 3, -1, 4, -1, 5, -1, 6, -1, 7 };
+static unsigned msm_regval_to_drive(u32 val)
+{
+ return (val + 1) * 2;
+}
static int msm_config_group_get(struct pinctrl_dev *pctldev,
unsigned int group,
@@ -274,17 +260,16 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
unsigned mask;
unsigned arg;
unsigned bit;
- s16 reg;
int ret;
u32 val;
g = &pctrl->soc->groups[group];
- ret = msm_config_reg(pctrl, g, param, &reg, &mask, &bit);
+ ret = msm_config_reg(pctrl, g, param, &mask, &bit);
if (ret < 0)
return ret;
- val = readl(pctrl->regs + reg);
+ val = readl(pctrl->regs + g->ctl_reg);
arg = (val >> bit) & mask;
/* Convert register value to pinconf value */
@@ -299,7 +284,15 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
arg = arg == MSM_PULL_UP;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- arg = msm_regval_to_drive[arg];
+ arg = msm_regval_to_drive(arg);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ /* Pin is not output */
+ if (!arg)
+ return -EINVAL;
+
+ val = readl(pctrl->regs + g->io_reg);
+ arg = !!(val & BIT(g->in_bit));
break;
default:
dev_err(pctrl->dev, "Unsupported config parameter: %x\n",
@@ -324,7 +317,6 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
unsigned mask;
unsigned arg;
unsigned bit;
- s16 reg;
int ret;
u32 val;
int i;
@@ -335,7 +327,7 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
- ret = msm_config_reg(pctrl, g, param, &reg, &mask, &bit);
+ ret = msm_config_reg(pctrl, g, param, &mask, &bit);
if (ret < 0)
return ret;
@@ -352,10 +344,24 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_DRIVE_STRENGTH:
/* Check for invalid values */
- if (arg >= ARRAY_SIZE(msm_drive_to_regval))
+ if (arg > 16 || arg < 2 || (arg % 2) != 0)
arg = -1;
else
- arg = msm_drive_to_regval[arg];
+ arg = (arg / 2) - 1;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ /* set output value */
+ spin_lock_irqsave(&pctrl->lock, flags);
+ val = readl(pctrl->regs + g->io_reg);
+ if (arg)
+ val |= BIT(g->out_bit);
+ else
+ val &= ~BIT(g->out_bit);
+ writel(val, pctrl->regs + g->io_reg);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ /* enable output */
+ arg = 1;
break;
default:
dev_err(pctrl->dev, "Unsupported config parameter: %x\n",
@@ -370,10 +376,10 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
}
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + reg);
+ val = readl(pctrl->regs + g->ctl_reg);
val &= ~(mask << bit);
val |= arg << bit;
- writel(val, pctrl->regs + reg);
+ writel(val, pctrl->regs + g->ctl_reg);
spin_unlock_irqrestore(&pctrl->lock, flags);
}
@@ -402,8 +408,6 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u32 val;
g = &pctrl->soc->groups[offset];
- if (WARN_ON(g->io_reg < 0))
- return -EINVAL;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -424,8 +428,6 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
u32 val;
g = &pctrl->soc->groups[offset];
- if (WARN_ON(g->io_reg < 0))
- return -EINVAL;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -452,8 +454,6 @@ static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
u32 val;
g = &pctrl->soc->groups[offset];
- if (WARN_ON(g->io_reg < 0))
- return -EINVAL;
val = readl(pctrl->regs + g->io_reg);
return !!(val & BIT(g->in_bit));
@@ -467,8 +467,6 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
u32 val;
g = &pctrl->soc->groups[offset];
- if (WARN_ON(g->io_reg < 0))
- return;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -534,7 +532,7 @@ static void msm_gpio_dbg_show_one(struct seq_file *s,
pull = (ctl_reg >> g->pull_bit) & 3;
seq_printf(s, " %-8s: %-3s %d", g->name, is_out ? "out" : "in", func);
- seq_printf(s, " %dmA", msm_regval_to_drive[drive]);
+ seq_printf(s, " %dmA", msm_regval_to_drive(drive));
seq_printf(s, " %s", pulls[pull]);
}
@@ -617,8 +615,6 @@ static void msm_gpio_irq_mask(struct irq_data *d)
pctrl = irq_data_get_irq_chip_data(d);
g = &pctrl->soc->groups[d->hwirq];
- if (WARN_ON(g->intr_cfg_reg < 0))
- return;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -640,8 +636,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
pctrl = irq_data_get_irq_chip_data(d);
g = &pctrl->soc->groups[d->hwirq];
- if (WARN_ON(g->intr_status_reg < 0))
- return;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -667,8 +661,6 @@ static void msm_gpio_irq_ack(struct irq_data *d)
pctrl = irq_data_get_irq_chip_data(d);
g = &pctrl->soc->groups[d->hwirq];
- if (WARN_ON(g->intr_status_reg < 0))
- return;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -693,8 +685,6 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pctrl = irq_data_get_irq_chip_data(d);
g = &pctrl->soc->groups[d->hwirq];
- if (WARN_ON(g->intr_cfg_reg < 0))
- return -EINVAL;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -783,45 +773,34 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct msm_pinctrl *pctrl;
unsigned long flags;
- unsigned ngpio;
pctrl = irq_data_get_irq_chip_data(d);
- ngpio = pctrl->chip.ngpio;
spin_lock_irqsave(&pctrl->lock, flags);
- if (on) {
- if (bitmap_empty(pctrl->wake_irqs, ngpio))
- enable_irq_wake(pctrl->irq);
- set_bit(d->hwirq, pctrl->wake_irqs);
- } else {
- clear_bit(d->hwirq, pctrl->wake_irqs);
- if (bitmap_empty(pctrl->wake_irqs, ngpio))
- disable_irq_wake(pctrl->irq);
- }
+ irq_set_irq_wake(pctrl->irq, on);
spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
-static unsigned int msm_gpio_irq_startup(struct irq_data *d)
+static int msm_gpio_irq_reqres(struct irq_data *d)
{
struct msm_pinctrl *pctrl = irq_data_get_irq_chip_data(d);
if (gpio_lock_as_irq(&pctrl->chip, d->hwirq)) {
dev_err(pctrl->dev, "unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
+ return -EINVAL;
}
- msm_gpio_irq_unmask(d);
return 0;
}
-static void msm_gpio_irq_shutdown(struct irq_data *d)
+static void msm_gpio_irq_relres(struct irq_data *d)
{
struct msm_pinctrl *pctrl = irq_data_get_irq_chip_data(d);
- msm_gpio_irq_mask(d);
gpio_unlock_as_irq(&pctrl->chip, d->hwirq);
}
@@ -832,8 +811,8 @@ static struct irq_chip msm_gpio_irq_chip = {
.irq_ack = msm_gpio_irq_ack,
.irq_set_type = msm_gpio_irq_set_type,
.irq_set_wake = msm_gpio_irq_set_wake,
- .irq_startup = msm_gpio_irq_startup,
- .irq_shutdown = msm_gpio_irq_shutdown,
+ .irq_request_resources = msm_gpio_irq_reqres,
+ .irq_release_resources = msm_gpio_irq_relres,
};
static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -869,6 +848,12 @@ static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
static int msm_gpio_init(struct msm_pinctrl *pctrl)
{
struct gpio_chip *chip;
@@ -876,10 +861,14 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
int ret;
int i;
int r;
+ unsigned ngpio = pctrl->soc->ngpios;
+
+ if (WARN_ON(ngpio > MAX_NR_GPIO))
+ return -EINVAL;
chip = &pctrl->chip;
chip->base = 0;
- chip->ngpio = pctrl->soc->ngpios;
+ chip->ngpio = ngpio;
chip->label = dev_name(pctrl->dev);
chip->dev = pctrl->dev;
chip->owner = THIS_MODULE;
@@ -907,6 +896,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
for (i = 0; i < chip->ngpio; i++) {
irq = irq_create_mapping(pctrl->domain, i);
+ irq_set_lockdep_class(irq, &gpio_lock_class);
irq_set_chip_and_handler(irq, &msm_gpio_irq_chip, handle_edge_irq);
irq_set_chip_data(irq, pctrl);
}
diff --git a/drivers/pinctrl/pinctrl-msm.h b/drivers/pinctrl/pinctrl-msm.h
index 206e782e2daa..8fbe9fb19f36 100644
--- a/drivers/pinctrl/pinctrl-msm.h
+++ b/drivers/pinctrl/pinctrl-msm.h
@@ -13,10 +13,7 @@
#ifndef __PINCTRL_MSM_H__
#define __PINCTRL_MSM_H__
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/machine.h>
+struct pinctrl_pin_desc;
/**
* struct msm_function - a pinmux function
diff --git a/drivers/pinctrl/pinctrl-msm8x74.c b/drivers/pinctrl/pinctrl-msm8x74.c
index f944bf2172ef..dde5529807aa 100644
--- a/drivers/pinctrl/pinctrl-msm8x74.c
+++ b/drivers/pinctrl/pinctrl-msm8x74.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
#include "pinctrl-msm.h"
@@ -406,6 +405,7 @@ enum msm8x74_functions {
MSM_MUX_blsp_i2c6,
MSM_MUX_blsp_i2c11,
MSM_MUX_blsp_spi1,
+ MSM_MUX_blsp_spi8,
MSM_MUX_blsp_uart2,
MSM_MUX_blsp_uart8,
MSM_MUX_slimbus,
@@ -416,6 +416,9 @@ static const char * const blsp_i2c2_groups[] = { "gpio6", "gpio7" };
static const char * const blsp_i2c6_groups[] = { "gpio29", "gpio30" };
static const char * const blsp_i2c11_groups[] = { "gpio83", "gpio84" };
static const char * const blsp_spi1_groups[] = { "gpio0", "gpio1", "gpio2", "gpio3" };
+static const char * const blsp_spi8_groups[] = {
+ "gpio45", "gpio46", "gpio47", "gpio48"
+};
static const char * const blsp_uart2_groups[] = { "gpio4", "gpio5" };
static const char * const blsp_uart8_groups[] = { "gpio45", "gpio46" };
static const char * const slimbus_groups[] = { "gpio70", "gpio71" };
@@ -425,6 +428,7 @@ static const struct msm_function msm8x74_functions[] = {
FUNCTION(blsp_i2c6),
FUNCTION(blsp_i2c11),
FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi8),
FUNCTION(blsp_uart2),
FUNCTION(blsp_uart8),
FUNCTION(slimbus),
@@ -476,10 +480,10 @@ static const struct msm_pingroup msm8x74_groups[] = {
PINGROUP(42, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(43, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(44, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(45, NA, blsp_uart8, NA, NA, NA, NA, NA),
- PINGROUP(46, NA, blsp_uart8, NA, NA, NA, NA, NA),
- PINGROUP(47, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(48, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, blsp_spi8, blsp_uart8, NA, NA, NA, NA, NA),
+ PINGROUP(46, blsp_spi8, blsp_uart8, NA, NA, NA, NA, NA),
+ PINGROUP(47, blsp_spi8, NA, NA, NA, NA, NA, NA),
+ PINGROUP(48, blsp_spi8, NA, NA, NA, NA, NA, NA),
PINGROUP(49, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(50, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(51, NA, NA, NA, NA, NA, NA, NA),
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 53a11114927f..208341fd57d2 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -21,9 +21,6 @@
#include <linux/gpio.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/irqchip/chained_irq.h>
#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
@@ -246,28 +243,14 @@ enum nmk_gpio_slpm {
NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE,
};
-/*
- * Platform data to register a block: only the initial gpio/irq number.
- */
-struct nmk_gpio_platform_data {
- char *name;
- int first_gpio;
- int first_irq;
- int num_gpio;
- u32 (*get_secondary_status)(unsigned int bank);
- void (*set_ioforce)(bool enable);
- bool supports_sleepmode;
-};
-
struct nmk_gpio_chip {
struct gpio_chip chip;
- struct irq_domain *domain;
void __iomem *addr;
struct clk *clk;
unsigned int bank;
unsigned int parent_irq;
- int secondary_parent_irq;
- u32 (*get_secondary_status)(unsigned int bank);
+ int latent_parent_irq;
+ u32 (*get_latent_status)(unsigned int bank);
void (*set_ioforce)(bool enable);
spinlock_t lock;
bool sleepmode;
@@ -432,7 +415,7 @@ nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset)
u32 falling = nmk_chip->fimsc & BIT(offset);
u32 rising = nmk_chip->rimsc & BIT(offset);
int gpio = nmk_chip->chip.base + offset;
- int irq = irq_find_mapping(nmk_chip->domain, offset);
+ int irq = irq_find_mapping(nmk_chip->chip.irqdomain, offset);
struct irq_data *d = irq_get_irq_data(irq);
if (!rising && !falling)
@@ -660,11 +643,8 @@ static inline int nmk_gpio_get_bitmask(int gpio)
static void nmk_gpio_irq_ack(struct irq_data *d)
{
- struct nmk_gpio_chip *nmk_chip;
-
- nmk_chip = irq_data_get_irq_chip_data(d);
- if (!nmk_chip)
- return;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
clk_enable(nmk_chip->clk);
writel(nmk_gpio_get_bitmask(d->hwirq), nmk_chip->addr + NMK_GPIO_IC);
@@ -848,10 +828,6 @@ static unsigned int nmk_gpio_irq_startup(struct irq_data *d)
{
struct nmk_gpio_chip *nmk_chip = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&nmk_chip->chip, d->hwirq))
- dev_err(nmk_chip->chip.dev,
- "unable to lock HW IRQ %lu for IRQ\n",
- d->hwirq);
clk_enable(nmk_chip->clk);
nmk_gpio_irq_unmask(d);
return 0;
@@ -863,7 +839,6 @@ static void nmk_gpio_irq_shutdown(struct irq_data *d)
nmk_gpio_irq_mask(d);
clk_disable(nmk_chip->clk);
- gpio_unlock_as_irq(&nmk_chip->chip, d->hwirq);
}
static struct irq_chip nmk_gpio_irq_chip = {
@@ -881,16 +856,15 @@ static struct irq_chip nmk_gpio_irq_chip = {
static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
u32 status)
{
- struct nmk_gpio_chip *nmk_chip;
struct irq_chip *host_chip = irq_get_chip(irq);
+ struct gpio_chip *chip = irq_desc_get_handler_data(desc);
chained_irq_enter(host_chip, desc);
- nmk_chip = irq_get_handler_data(irq);
while (status) {
int bit = __ffs(status);
- generic_handle_irq(irq_find_mapping(nmk_chip->domain, bit));
+ generic_handle_irq(irq_find_mapping(chip->irqdomain, bit));
status &= ~BIT(bit);
}
@@ -899,9 +873,11 @@ static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
- struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq);
+ struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
u32 status;
+ pr_err("PLONK IRQ %d\n", irq);
clk_enable(nmk_chip->clk);
status = readl(nmk_chip->addr + NMK_GPIO_IS);
clk_disable(nmk_chip->clk);
@@ -909,29 +885,16 @@ static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
__nmk_gpio_irq_handler(irq, desc, status);
}
-static void nmk_gpio_secondary_irq_handler(unsigned int irq,
+static void nmk_gpio_latent_irq_handler(unsigned int irq,
struct irq_desc *desc)
{
- struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq);
- u32 status = nmk_chip->get_secondary_status(nmk_chip->bank);
+ struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
+ u32 status = nmk_chip->get_latent_status(nmk_chip->bank);
__nmk_gpio_irq_handler(irq, desc, status);
}
-static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip)
-{
- irq_set_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler);
- irq_set_handler_data(nmk_chip->parent_irq, nmk_chip);
-
- if (nmk_chip->secondary_parent_irq >= 0) {
- irq_set_chained_handler(nmk_chip->secondary_parent_irq,
- nmk_gpio_secondary_irq_handler);
- irq_set_handler_data(nmk_chip->secondary_parent_irq, nmk_chip);
- }
-
- return 0;
-}
-
/* I/O Functions */
static int nmk_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -1010,14 +973,6 @@ static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
return 0;
}
-static int nmk_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct nmk_gpio_chip *nmk_chip =
- container_of(chip, struct nmk_gpio_chip, chip);
-
- return irq_create_mapping(nmk_chip->domain, offset);
-}
-
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
@@ -1116,7 +1071,6 @@ static struct gpio_chip nmk_gpio_template = {
.get = nmk_gpio_get_input,
.direction_output = nmk_gpio_make_output,
.set = nmk_gpio_set_output,
- .to_irq = nmk_gpio_to_irq,
.dbg_show = nmk_gpio_dbg_show,
.can_sleep = false,
};
@@ -1217,62 +1171,35 @@ void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up)
}
}
-static int nmk_gpio_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct nmk_gpio_chip *nmk_chip = d->host_data;
-
- if (!nmk_chip)
- return -EINVAL;
-
- irq_set_chip_and_handler(irq, &nmk_gpio_irq_chip, handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID);
- irq_set_chip_data(irq, nmk_chip);
- irq_set_irq_type(irq, IRQ_TYPE_EDGE_FALLING);
-
- return 0;
-}
-
-static const struct irq_domain_ops nmk_gpio_irq_simple_ops = {
- .map = nmk_gpio_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
static int nmk_gpio_probe(struct platform_device *dev)
{
- struct nmk_gpio_platform_data *pdata;
struct device_node *np = dev->dev.of_node;
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
struct resource *res;
struct clk *clk;
- int secondary_irq;
+ int latent_irq;
+ bool supports_sleepmode;
void __iomem *base;
int irq;
int ret;
- pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
if (of_get_property(np, "st,supports-sleepmode", NULL))
- pdata->supports_sleepmode = true;
+ supports_sleepmode = true;
+ else
+ supports_sleepmode = false;
if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
dev_err(&dev->dev, "gpio-bank property not found\n");
return -EINVAL;
}
- pdata->first_gpio = dev->id * NMK_GPIO_PER_CHIP;
- pdata->num_gpio = NMK_GPIO_PER_CHIP;
-
irq = platform_get_irq(dev, 0);
if (irq < 0)
return irq;
- secondary_irq = platform_get_irq(dev, 1);
- if (secondary_irq >= 0 && !pdata->get_secondary_status)
- return -EINVAL;
+ /* It's OK for this IRQ not to be present */
+ latent_irq = platform_get_irq(dev, 1);
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&dev->dev, res);
@@ -1297,16 +1224,14 @@ static int nmk_gpio_probe(struct platform_device *dev)
nmk_chip->addr = base;
nmk_chip->chip = nmk_gpio_template;
nmk_chip->parent_irq = irq;
- nmk_chip->secondary_parent_irq = secondary_irq;
- nmk_chip->get_secondary_status = pdata->get_secondary_status;
- nmk_chip->set_ioforce = pdata->set_ioforce;
- nmk_chip->sleepmode = pdata->supports_sleepmode;
+ nmk_chip->latent_parent_irq = latent_irq;
+ nmk_chip->sleepmode = supports_sleepmode;
spin_lock_init(&nmk_chip->lock);
chip = &nmk_chip->chip;
- chip->base = pdata->first_gpio;
- chip->ngpio = pdata->num_gpio;
- chip->label = pdata->name ?: dev_name(&dev->dev);
+ chip->base = dev->id * NMK_GPIO_PER_CHIP;
+ chip->ngpio = NMK_GPIO_PER_CHIP;
+ chip->label = dev_name(&dev->dev);
chip->dev = &dev->dev;
chip->owner = THIS_MODULE;
@@ -1325,17 +1250,31 @@ static int nmk_gpio_probe(struct platform_device *dev)
platform_set_drvdata(dev, nmk_chip);
- nmk_chip->domain = irq_domain_add_simple(np,
- NMK_GPIO_PER_CHIP, 0,
- &nmk_gpio_irq_simple_ops, nmk_chip);
- if (!nmk_chip->domain) {
- dev_err(&dev->dev, "failed to create irqdomain\n");
- /* Just do this, no matter if it fails */
+ /*
+ * Let the generic code handle this edge IRQ, the the chained
+ * handler will perform the actual work of handling the parent
+ * interrupt.
+ */
+ ret = gpiochip_irqchip_add(&nmk_chip->chip,
+ &nmk_gpio_irq_chip,
+ 0,
+ handle_edge_irq,
+ IRQ_TYPE_EDGE_FALLING);
+ if (ret) {
+ dev_err(&dev->dev, "could not add irqchip\n");
ret = gpiochip_remove(&nmk_chip->chip);
- return -ENOSYS;
+ return -ENODEV;
}
-
- nmk_gpio_init_irq(nmk_chip);
+ /* Then register the chain on the parent IRQ */
+ gpiochip_set_chained_irqchip(&nmk_chip->chip,
+ &nmk_gpio_irq_chip,
+ nmk_chip->parent_irq,
+ nmk_gpio_irq_handler);
+ if (nmk_chip->latent_parent_irq > 0)
+ gpiochip_set_chained_irqchip(&nmk_chip->chip,
+ &nmk_gpio_irq_chip,
+ nmk_chip->latent_parent_irq,
+ nmk_gpio_latent_irq_handler);
dev_info(&dev->dev, "at address %p\n", nmk_chip->addr);
@@ -2035,27 +1974,29 @@ static const struct of_device_id nmk_pinctrl_match[] = {
{},
};
-static int nmk_pinctrl_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int nmk_pinctrl_suspend(struct device *dev)
{
struct nmk_pinctrl *npct;
- npct = platform_get_drvdata(pdev);
+ npct = dev_get_drvdata(dev);
if (!npct)
return -EINVAL;
return pinctrl_force_sleep(npct->pctl);
}
-static int nmk_pinctrl_resume(struct platform_device *pdev)
+static int nmk_pinctrl_resume(struct device *dev)
{
struct nmk_pinctrl *npct;
- npct = platform_get_drvdata(pdev);
+ npct = dev_get_drvdata(dev);
if (!npct)
return -EINVAL;
return pinctrl_force_default(npct->pctl);
}
+#endif
static int nmk_pinctrl_probe(struct platform_device *pdev)
{
@@ -2144,17 +2085,18 @@ static struct platform_driver nmk_gpio_driver = {
.probe = nmk_gpio_probe,
};
+static SIMPLE_DEV_PM_OPS(nmk_pinctrl_pm_ops,
+ nmk_pinctrl_suspend,
+ nmk_pinctrl_resume);
+
static struct platform_driver nmk_pinctrl_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "pinctrl-nomadik",
.of_match_table = nmk_pinctrl_match,
+ .pm = &nmk_pinctrl_pm_ops,
},
.probe = nmk_pinctrl_probe,
-#ifdef CONFIG_PM
- .suspend = nmk_pinctrl_suspend,
- .resume = nmk_pinctrl_resume,
-#endif
};
static int __init nmk_gpio_init(void)
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c
index 47ec2e8741e4..0324d4cb19b2 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/pinctrl-samsung.c
@@ -1120,6 +1120,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = (void *)exynos4x12_pin_ctrl },
{ .compatible = "samsung,exynos5250-pinctrl",
.data = (void *)exynos5250_pin_ctrl },
+ { .compatible = "samsung,exynos5260-pinctrl",
+ .data = (void *)exynos5260_pin_ctrl },
{ .compatible = "samsung,exynos5420-pinctrl",
.data = (void *)exynos5420_pin_ctrl },
{ .compatible = "samsung,s5pv210-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-samsung.h b/drivers/pinctrl/pinctrl-samsung.h
index 30622d9afa2e..bab9c2122556 100644
--- a/drivers/pinctrl/pinctrl-samsung.h
+++ b/drivers/pinctrl/pinctrl-samsung.h
@@ -254,6 +254,7 @@ struct samsung_pmx_func {
extern struct samsung_pin_ctrl exynos4210_pin_ctrl[];
extern struct samsung_pin_ctrl exynos4x12_pin_ctrl[];
extern struct samsung_pin_ctrl exynos5250_pin_ctrl[];
+extern struct samsung_pin_ctrl exynos5260_pin_ctrl[];
extern struct samsung_pin_ctrl exynos5420_pin_ctrl[];
extern struct samsung_pin_ctrl s3c64xx_pin_ctrl[];
extern struct samsung_pin_ctrl s3c2412_pin_ctrl[];
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index de6459628b4f..81075f2a1d3f 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -662,6 +662,7 @@ static int pcs_pinconf_get(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_DRIVE_STRENGTH:
case PIN_CONFIG_SLEW_RATE:
+ case PIN_CONFIG_LOW_POWER_MODE:
default:
*config = data;
break;
@@ -699,6 +700,7 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_INPUT_SCHMITT:
case PIN_CONFIG_DRIVE_STRENGTH:
case PIN_CONFIG_SLEW_RATE:
+ case PIN_CONFIG_LOW_POWER_MODE:
shift = ffs(func->conf[i].mask) - 1;
data &= ~func->conf[i].mask;
data |= (arg << shift) & func->conf[i].mask;
@@ -1101,6 +1103,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
{ "pinctrl-single,drive-strength", PIN_CONFIG_DRIVE_STRENGTH, },
{ "pinctrl-single,slew-rate", PIN_CONFIG_SLEW_RATE, },
{ "pinctrl-single,input-schmitt", PIN_CONFIG_INPUT_SCHMITT, },
+ { "pinctrl-single,low-power-mode", PIN_CONFIG_LOW_POWER_MODE, },
};
struct pcs_conf_type prop4[] = {
{ "pinctrl-single,bias-pullup", PIN_CONFIG_BIAS_PULL_UP, },
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 320c27363cc8..bd725b0a4341 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -13,7 +13,12 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
@@ -266,11 +271,59 @@ struct st_pctl_group {
struct st_pinconf *pin_conf;
};
+/*
+ * Edge triggers are not supported at hardware level, it is supported by
+ * software by exploiting the level trigger support in hardware.
+ * Software uses a virtual register (EDGE_CONF) for edge trigger configuration
+ * of each gpio pin in a GPIO bank.
+ *
+ * Each bank has a 32 bit EDGE_CONF register which is divided in to 8 parts of
+ * 4-bits. Each 4-bit space is allocated for each pin in a gpio bank.
+ *
+ * bit allocation per pin is:
+ * Bits: [0 - 3] | [4 - 7] [8 - 11] ... ... ... ... [ 28 - 31]
+ * --------------------------------------------------------
+ * | pin-0 | pin-2 | pin-3 | ... ... ... ... | pin -7 |
+ * --------------------------------------------------------
+ *
+ * A pin can have one of following the values in its edge configuration field.
+ *
+ * ------- ----------------------------
+ * [0-3] - Description
+ * ------- ----------------------------
+ * 0000 - No edge IRQ.
+ * 0001 - Falling edge IRQ.
+ * 0010 - Rising edge IRQ.
+ * 0011 - Rising and Falling edge IRQ.
+ * ------- ----------------------------
+ */
+
+#define ST_IRQ_EDGE_CONF_BITS_PER_PIN 4
+#define ST_IRQ_EDGE_MASK 0xf
+#define ST_IRQ_EDGE_FALLING BIT(0)
+#define ST_IRQ_EDGE_RISING BIT(1)
+#define ST_IRQ_EDGE_BOTH (BIT(0) | BIT(1))
+
+#define ST_IRQ_RISING_EDGE_CONF(pin) \
+ (ST_IRQ_EDGE_RISING << (pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN))
+
+#define ST_IRQ_FALLING_EDGE_CONF(pin) \
+ (ST_IRQ_EDGE_FALLING << (pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN))
+
+#define ST_IRQ_BOTH_EDGE_CONF(pin) \
+ (ST_IRQ_EDGE_BOTH << (pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN))
+
+#define ST_IRQ_EDGE_CONF(conf, pin) \
+ (conf >> (pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN) & ST_IRQ_EDGE_MASK)
+
struct st_gpio_bank {
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range range;
void __iomem *base;
struct st_pio_control pc;
+ struct irq_domain *domain;
+ unsigned long irq_edge_conf;
+ spinlock_t lock;
};
struct st_pinctrl {
@@ -284,6 +337,7 @@ struct st_pinctrl {
int ngroups;
struct regmap *regmap;
const struct st_pctl_data *data;
+ void __iomem *irqmux_base;
};
/* SOC specific data */
@@ -330,12 +384,25 @@ static unsigned int stih416_delays[] = {0, 300, 500, 750, 1000, 1250, 1500,
static const struct st_pctl_data stih416_data = {
.rt_style = st_retime_style_dedicated,
.input_delays = stih416_delays,
- .ninput_delays = 14,
+ .ninput_delays = ARRAY_SIZE(stih416_delays),
.output_delays = stih416_delays,
- .noutput_delays = 14,
+ .noutput_delays = ARRAY_SIZE(stih416_delays),
.alt = 0, .oe = 40, .pu = 50, .od = 60, .rt = 100,
};
+static const struct st_pctl_data stih407_flashdata = {
+ .rt_style = st_retime_style_none,
+ .input_delays = stih416_delays,
+ .ninput_delays = ARRAY_SIZE(stih416_delays),
+ .output_delays = stih416_delays,
+ .noutput_delays = ARRAY_SIZE(stih416_delays),
+ .alt = 0,
+ .oe = -1, /* Not Available */
+ .pu = -1, /* Not Available */
+ .od = 60,
+ .rt = 100,
+};
+
/* Low level functions.. */
static inline int st_gpio_bank(int gpio)
{
@@ -356,25 +423,29 @@ static void st_pinconf_set_config(struct st_pio_control *pc,
unsigned int oe_value, pu_value, od_value;
unsigned long mask = BIT(pin);
- regmap_field_read(output_enable, &oe_value);
- regmap_field_read(pull_up, &pu_value);
- regmap_field_read(open_drain, &od_value);
-
- /* Clear old values */
- oe_value &= ~mask;
- pu_value &= ~mask;
- od_value &= ~mask;
-
- if (config & ST_PINCONF_OE)
- oe_value |= mask;
- if (config & ST_PINCONF_PU)
- pu_value |= mask;
- if (config & ST_PINCONF_OD)
- od_value |= mask;
-
- regmap_field_write(output_enable, oe_value);
- regmap_field_write(pull_up, pu_value);
- regmap_field_write(open_drain, od_value);
+ if (output_enable) {
+ regmap_field_read(output_enable, &oe_value);
+ oe_value &= ~mask;
+ if (config & ST_PINCONF_OE)
+ oe_value |= mask;
+ regmap_field_write(output_enable, oe_value);
+ }
+
+ if (pull_up) {
+ regmap_field_read(pull_up, &pu_value);
+ pu_value &= ~mask;
+ if (config & ST_PINCONF_PU)
+ pu_value |= mask;
+ regmap_field_write(pull_up, pu_value);
+ }
+
+ if (open_drain) {
+ regmap_field_read(open_drain, &od_value);
+ od_value &= ~mask;
+ if (config & ST_PINCONF_OD)
+ od_value |= mask;
+ regmap_field_write(open_drain, od_value);
+ }
}
static void st_pctl_set_function(struct st_pio_control *pc,
@@ -385,6 +456,9 @@ static void st_pctl_set_function(struct st_pio_control *pc,
int pin = st_gpio_pin(pin_id);
int offset = pin * 4;
+ if (!alt)
+ return;
+
regmap_field_read(alt, &val);
val &= ~(0xf << offset);
val |= function << offset;
@@ -522,17 +596,23 @@ static void st_pinconf_get_direction(struct st_pio_control *pc,
{
unsigned int oe_value, pu_value, od_value;
- regmap_field_read(pc->oe, &oe_value);
- regmap_field_read(pc->pu, &pu_value);
- regmap_field_read(pc->od, &od_value);
+ if (pc->oe) {
+ regmap_field_read(pc->oe, &oe_value);
+ if (oe_value & BIT(pin))
+ ST_PINCONF_PACK_OE(*config);
+ }
- if (oe_value & BIT(pin))
- ST_PINCONF_PACK_OE(*config);
- if (pu_value & BIT(pin))
- ST_PINCONF_PACK_PU(*config);
- if (od_value & BIT(pin))
- ST_PINCONF_PACK_OD(*config);
+ if (pc->pu) {
+ regmap_field_read(pc->pu, &pu_value);
+ if (pu_value & BIT(pin))
+ ST_PINCONF_PACK_PU(*config);
+ }
+ if (pc->od) {
+ regmap_field_read(pc->od, &od_value);
+ if (od_value & BIT(pin))
+ ST_PINCONF_PACK_OD(*config);
+ }
}
static int st_pinconf_get_retime_packed(struct st_pinctrl *info,
@@ -1051,8 +1131,21 @@ static int st_pctl_dt_setup_retime(struct st_pinctrl *info,
return -EINVAL;
}
-static int st_parse_syscfgs(struct st_pinctrl *info,
- int bank, struct device_node *np)
+
+static struct regmap_field *st_pc_get_value(struct device *dev,
+ struct regmap *regmap, int bank,
+ int data, int lsb, int msb)
+{
+ struct reg_field reg = REG_FIELD((data + bank) * 4, lsb, msb);
+
+ if (data < 0)
+ return NULL;
+
+ return devm_regmap_field_alloc(dev, regmap, reg);
+}
+
+static void st_parse_syscfgs(struct st_pinctrl *info, int bank,
+ struct device_node *np)
{
const struct st_pctl_data *data = info->data;
/**
@@ -1062,29 +1155,21 @@ static int st_parse_syscfgs(struct st_pinctrl *info,
*/
int lsb = (bank%4) * ST_GPIO_PINS_PER_BANK;
int msb = lsb + ST_GPIO_PINS_PER_BANK - 1;
- struct reg_field alt_reg = REG_FIELD((data->alt + bank) * 4, 0, 31);
- struct reg_field oe_reg = REG_FIELD((data->oe + bank/4) * 4, lsb, msb);
- struct reg_field pu_reg = REG_FIELD((data->pu + bank/4) * 4, lsb, msb);
- struct reg_field od_reg = REG_FIELD((data->od + bank/4) * 4, lsb, msb);
struct st_pio_control *pc = &info->banks[bank].pc;
struct device *dev = info->dev;
struct regmap *regmap = info->regmap;
- pc->alt = devm_regmap_field_alloc(dev, regmap, alt_reg);
- pc->oe = devm_regmap_field_alloc(dev, regmap, oe_reg);
- pc->pu = devm_regmap_field_alloc(dev, regmap, pu_reg);
- pc->od = devm_regmap_field_alloc(dev, regmap, od_reg);
-
- if (IS_ERR(pc->alt) || IS_ERR(pc->oe) ||
- IS_ERR(pc->pu) || IS_ERR(pc->od))
- return -EINVAL;
+ pc->alt = st_pc_get_value(dev, regmap, bank, data->alt, 0, 31);
+ pc->oe = st_pc_get_value(dev, regmap, bank/4, data->oe, lsb, msb);
+ pc->pu = st_pc_get_value(dev, regmap, bank/4, data->pu, lsb, msb);
+ pc->od = st_pc_get_value(dev, regmap, bank/4, data->od, lsb, msb);
/* retime avaiable for all pins by default */
pc->rt_pin_mask = 0xff;
of_property_read_u32(np, "st,retime-pin-mask", &pc->rt_pin_mask);
st_pctl_dt_setup_retime(info, bank, pc);
- return 0;
+ return;
}
/*
@@ -1200,6 +1285,194 @@ static int st_pctl_parse_functions(struct device_node *np,
return 0;
}
+static int st_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct st_gpio_bank *bank = gpio_chip_to_bank(chip);
+ int irq = -ENXIO;
+
+ if (offset < chip->ngpio)
+ irq = irq_find_mapping(bank->domain, offset);
+
+ dev_info(chip->dev, "%s: request IRQ for GPIO %d, return %d\n",
+ chip->label, offset + chip->base, irq);
+ return irq;
+}
+
+static void st_gpio_irq_mask(struct irq_data *d)
+{
+ struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(d->hwirq), bank->base + REG_PIO_CLR_PMASK);
+}
+
+static void st_gpio_irq_unmask(struct irq_data *d)
+{
+ struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
+}
+
+static unsigned int st_gpio_irq_startup(struct irq_data *d)
+{
+ struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ if (gpio_lock_as_irq(&bank->gpio_chip, d->hwirq))
+ dev_err(bank->gpio_chip.dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ d->hwirq);
+
+ st_gpio_irq_unmask(d);
+
+ return 0;
+}
+
+static void st_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ st_gpio_irq_mask(d);
+ gpio_unlock_as_irq(&bank->gpio_chip, d->hwirq);
+}
+
+static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
+{
+ struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+ int comp, pin = d->hwirq;
+ u32 val;
+ u32 pin_edge_conf = 0;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ comp = 0;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ comp = 0;
+ pin_edge_conf = ST_IRQ_FALLING_EDGE_CONF(pin);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ comp = 1;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ comp = 1;
+ pin_edge_conf = ST_IRQ_RISING_EDGE_CONF(pin);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ comp = st_gpio_get(&bank->gpio_chip, pin);
+ pin_edge_conf = ST_IRQ_BOTH_EDGE_CONF(pin);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bank->lock, flags);
+ bank->irq_edge_conf &= ~(ST_IRQ_EDGE_MASK << (
+ pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN));
+ bank->irq_edge_conf |= pin_edge_conf;
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ val = readl(bank->base + REG_PIO_PCOMP);
+ val &= ~BIT(pin);
+ val |= (comp << pin);
+ writel(val, bank->base + REG_PIO_PCOMP);
+
+ return 0;
+}
+
+/*
+ * As edge triggers are not supported at hardware level, it is supported by
+ * software by exploiting the level trigger support in hardware.
+ *
+ * Steps for detection raising edge interrupt in software.
+ *
+ * Step 1: CONFIGURE pin to detect level LOW interrupts.
+ *
+ * Step 2: DETECT level LOW interrupt and in irqmux/gpio bank interrupt handler,
+ * if the value of pin is low, then CONFIGURE pin for level HIGH interrupt.
+ * IGNORE calling the actual interrupt handler for the pin at this stage.
+ *
+ * Step 3: DETECT level HIGH interrupt and in irqmux/gpio-bank interrupt handler
+ * if the value of pin is HIGH, CONFIGURE pin for level LOW interrupt and then
+ * DISPATCH the interrupt to the interrupt handler of the pin.
+ *
+ * step-1 ________ __________
+ * | | step - 3
+ * | |
+ * step -2 |_____|
+ *
+ * falling edge is also detected int the same way.
+ *
+ */
+static void __gpio_irq_handler(struct st_gpio_bank *bank)
+{
+ unsigned long port_in, port_mask, port_comp, active_irqs;
+ unsigned long bank_edge_mask, flags;
+ int n, val, ecfg;
+
+ spin_lock_irqsave(&bank->lock, flags);
+ bank_edge_mask = bank->irq_edge_conf;
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ for (;;) {
+ port_in = readl(bank->base + REG_PIO_PIN);
+ port_comp = readl(bank->base + REG_PIO_PCOMP);
+ port_mask = readl(bank->base + REG_PIO_PMASK);
+
+ active_irqs = (port_in ^ port_comp) & port_mask;
+
+ if (active_irqs == 0)
+ break;
+
+ for_each_set_bit(n, &active_irqs, BITS_PER_LONG) {
+ /* check if we are detecting fake edges ... */
+ ecfg = ST_IRQ_EDGE_CONF(bank_edge_mask, n);
+
+ if (ecfg) {
+ /* edge detection. */
+ val = st_gpio_get(&bank->gpio_chip, n);
+
+ writel(BIT(n),
+ val ? bank->base + REG_PIO_SET_PCOMP :
+ bank->base + REG_PIO_CLR_PCOMP);
+
+ if (ecfg != ST_IRQ_EDGE_BOTH &&
+ !((ecfg & ST_IRQ_EDGE_FALLING) ^ val))
+ continue;
+ }
+
+ generic_handle_irq(irq_find_mapping(bank->domain, n));
+ }
+ }
+}
+
+static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ /* interrupt dedicated per bank */
+ struct irq_chip *chip = irq_get_chip(irq);
+ struct st_gpio_bank *bank = irq_get_handler_data(irq);
+
+ chained_irq_enter(chip, desc);
+ __gpio_irq_handler(bank);
+ chained_irq_exit(chip, desc);
+}
+
+static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_get_chip(irq);
+ struct st_pinctrl *info = irq_get_handler_data(irq);
+ unsigned long status;
+ int n;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl(info->irqmux_base);
+
+ for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK)
+ __gpio_irq_handler(&info->banks[n]);
+
+ chained_irq_exit(chip, desc);
+}
+
static struct gpio_chip st_gpio_template = {
.request = st_gpio_request,
.free = st_gpio_free,
@@ -1210,6 +1483,34 @@ static struct gpio_chip st_gpio_template = {
.ngpio = ST_GPIO_PINS_PER_BANK,
.of_gpio_n_cells = 1,
.of_xlate = st_gpio_xlate,
+ .to_irq = st_gpio_to_irq,
+};
+
+static struct irq_chip st_gpio_irqchip = {
+ .name = "GPIO",
+ .irq_mask = st_gpio_irq_mask,
+ .irq_unmask = st_gpio_irq_unmask,
+ .irq_set_type = st_gpio_irq_set_type,
+ .irq_startup = st_gpio_irq_startup,
+ .irq_shutdown = st_gpio_irq_shutdown,
+};
+
+static int st_gpio_irq_domain_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ struct st_gpio_bank *bank = h->host_data;
+
+ irq_set_chip(virq, &st_gpio_irqchip);
+ irq_set_handler(virq, handle_simple_irq);
+ set_irq_flags(virq, IRQF_VALID);
+ irq_set_chip_data(virq, bank);
+
+ return 0;
+}
+
+static struct irq_domain_ops st_gpio_irq_ops = {
+ .map = st_gpio_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
};
static int st_gpiolib_register_bank(struct st_pinctrl *info,
@@ -1219,8 +1520,8 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
struct pinctrl_gpio_range *range = &bank->range;
struct device *dev = info->dev;
int bank_num = of_alias_get_id(np, "gpio");
- struct resource res;
- int err;
+ struct resource res, irq_res;
+ int gpio_irq = 0, err, i;
if (of_address_to_resource(np, 0, &res))
return -ENODEV;
@@ -1233,6 +1534,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
bank->gpio_chip.base = bank_num * ST_GPIO_PINS_PER_BANK;
bank->gpio_chip.ngpio = ST_GPIO_PINS_PER_BANK;
bank->gpio_chip.of_node = np;
+ spin_lock_init(&bank->lock);
of_property_read_string(np, "st,bank-name", &range->name);
bank->gpio_chip.label = range->name;
@@ -1248,6 +1550,51 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
}
dev_info(dev, "%s bank added.\n", range->name);
+ /**
+ * GPIO bank can have one of the two possible types of
+ * interrupt-wirings.
+ *
+ * First type is via irqmux, single interrupt is used by multiple
+ * gpio banks. This reduces number of overall interrupts numbers
+ * required. All these banks belong to a single pincontroller.
+ * _________
+ * | |----> [gpio-bank (n) ]
+ * | |----> [gpio-bank (n + 1)]
+ * [irqN]-- | irq-mux |----> [gpio-bank (n + 2)]
+ * | |----> [gpio-bank (... )]
+ * |_________|----> [gpio-bank (n + 7)]
+ *
+ * Second type has a dedicated interrupt per each gpio bank.
+ *
+ * [irqN]----> [gpio-bank (n)]
+ */
+
+ if (of_irq_to_resource(np, 0, &irq_res)) {
+ gpio_irq = irq_res.start;
+ irq_set_chained_handler(gpio_irq, st_gpio_irq_handler);
+ irq_set_handler_data(gpio_irq, bank);
+ }
+
+ if (info->irqmux_base > 0 || gpio_irq > 0) {
+ /* Setup IRQ domain */
+ bank->domain = irq_domain_add_linear(np,
+ ST_GPIO_PINS_PER_BANK,
+ &st_gpio_irq_ops, bank);
+ if (!bank->domain) {
+ dev_err(dev, "Failed to add irq domain for %s\n",
+ np->full_name);
+ } else {
+ for (i = 0; i < ST_GPIO_PINS_PER_BANK; i++) {
+ if (irq_create_mapping(bank->domain, i) < 0)
+ dev_err(dev,
+ "Failed to map IRQ %i\n", i);
+ }
+ }
+
+ } else {
+ dev_info(dev, "No IRQ support for %s bank\n", np->full_name);
+ }
+
return 0;
}
@@ -1264,6 +1611,10 @@ static struct of_device_id st_pctl_of_match[] = {
{ .compatible = "st,stih416-rear-pinctrl", .data = &stih416_data},
{ .compatible = "st,stih416-fvdp-fe-pinctrl", .data = &stih416_data},
{ .compatible = "st,stih416-fvdp-lite-pinctrl", .data = &stih416_data},
+ { .compatible = "st,stih407-sbc-pinctrl", .data = &stih416_data},
+ { .compatible = "st,stih407-front-pinctrl", .data = &stih416_data},
+ { .compatible = "st,stih407-rear-pinctrl", .data = &stih416_data},
+ { .compatible = "st,stih407-flash-pinctrl", .data = &stih407_flashdata},
{ /* sentinel */ }
};
@@ -1276,6 +1627,8 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
int grp_index = 0;
+ int irq = 0;
+ struct resource *res;
st_pctl_dt_child_count(info, np);
if (!info->nbanks) {
@@ -1306,6 +1659,21 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
}
info->data = of_match_node(st_pctl_of_match, np)->data;
+ irq = platform_get_irq(pdev, 0);
+
+ if (irq > 0) {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "irqmux");
+ info->irqmux_base = devm_ioremap_resource(&pdev->dev, res);
+
+ if (IS_ERR(info->irqmux_base))
+ return PTR_ERR(info->irqmux_base);
+
+ irq_set_chained_handler(irq, st_gpio_irqmux_handler);
+ irq_set_handler_data(irq, info);
+
+ }
+
pctl_desc->npins = info->nbanks * ST_GPIO_PINS_PER_BANK;
pdesc = devm_kzalloc(&pdev->dev,
sizeof(*pdesc) * pctl_desc->npins, GFP_KERNEL);
diff --git a/drivers/pinctrl/pinctrl-sunxi-pins.h b/drivers/pinctrl/pinctrl-sunxi-pins.h
index 6fd8d4d95140..3d6066988a72 100644
--- a/drivers/pinctrl/pinctrl-sunxi-pins.h
+++ b/drivers/pinctrl/pinctrl-sunxi-pins.h
@@ -1932,27 +1932,27 @@ static const struct sunxi_desc_pin sun5i_a13_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN_PF0,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x4, "mmc0")), /* D1 */
+ SUNXI_FUNCTION(0x2, "mmc0")), /* D1 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PF1,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x4, "mmc0")), /* D0 */
+ SUNXI_FUNCTION(0x2, "mmc0")), /* D0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PF2,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x4, "mmc0")), /* CLK */
+ SUNXI_FUNCTION(0x2, "mmc0")), /* CLK */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PF3,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x4, "mmc0")), /* CMD */
+ SUNXI_FUNCTION(0x2, "mmc0")), /* CMD */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PF4,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x4, "mmc0")), /* D3 */
+ SUNXI_FUNCTION(0x2, "mmc0")), /* D3 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PF5,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x4, "mmc0")), /* D2 */
+ SUNXI_FUNCTION(0x2, "mmc0")), /* D2 */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PG0,
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index e767355ab0ad..65458096f41e 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -39,6 +39,7 @@ struct tegra_pmx {
struct pinctrl_dev *pctl;
const struct tegra_pinctrl_soc_data *soc;
+ const char **group_pins;
int nbanks;
void __iomem **regs;
@@ -620,6 +621,8 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
struct tegra_pmx *pmx;
struct resource *res;
int i;
+ const char **group_pins;
+ int fn, gn, gfn;
pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
if (!pmx) {
@@ -629,6 +632,41 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
pmx->dev = &pdev->dev;
pmx->soc = soc_data;
+ /*
+ * Each mux group will appear in 4 functions' list of groups.
+ * This over-allocates slightly, since not all groups are mux groups.
+ */
+ pmx->group_pins = devm_kzalloc(&pdev->dev,
+ soc_data->ngroups * 4 * sizeof(*pmx->group_pins),
+ GFP_KERNEL);
+ if (!pmx->group_pins)
+ return -ENOMEM;
+
+ group_pins = pmx->group_pins;
+ for (fn = 0; fn < soc_data->nfunctions; fn++) {
+ struct tegra_function *func = &soc_data->functions[fn];
+
+ func->groups = group_pins;
+
+ for (gn = 0; gn < soc_data->ngroups; gn++) {
+ const struct tegra_pingroup *g = &soc_data->groups[gn];
+
+ if (g->mux_reg == -1)
+ continue;
+
+ for (gfn = 0; gfn < 4; gfn++)
+ if (g->funcs[gfn] == fn)
+ break;
+ if (gfn == 4)
+ continue;
+
+ BUG_ON(group_pins - pmx->group_pins >=
+ soc_data->ngroups * 4);
+ *group_pins++ = g->name;
+ func->ngroups++;
+ }
+ }
+
tegra_pinctrl_gpio_range.npins = pmx->soc->ngpios;
tegra_pinctrl_desc.name = dev_name(&pdev->dev);
tegra_pinctrl_desc.pins = pmx->soc->pins;
diff --git a/drivers/pinctrl/pinctrl-tegra.h b/drivers/pinctrl/pinctrl-tegra.h
index 817f7061dc4c..6053832d433e 100644
--- a/drivers/pinctrl/pinctrl-tegra.h
+++ b/drivers/pinctrl/pinctrl-tegra.h
@@ -72,7 +72,7 @@ enum tegra_pinconf_tristate {
*/
struct tegra_function {
const char *name;
- const char * const *groups;
+ const char **groups;
unsigned ngroups;
};
@@ -193,7 +193,7 @@ struct tegra_pinctrl_soc_data {
unsigned ngpios;
const struct pinctrl_pin_desc *pins;
unsigned npins;
- const struct tegra_function *functions;
+ struct tegra_function *functions;
unsigned nfunctions;
const struct tegra_pingroup *groups;
unsigned ngroups;
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
index 93c9e3899d5e..63fe7619d3ff 100644
--- a/drivers/pinctrl/pinctrl-tegra114.c
+++ b/drivers/pinctrl/pinctrl-tegra114.c
@@ -1,10 +1,8 @@
/*
- * Pinctrl data and driver for the NVIDIA Tegra114 pinmux
+ * Pinctrl data for the NVIDIA Tegra114 pinmux
*
* Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
*
- * Author: Pritesh Raithatha <praithatha@nvidia.com>
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -13,9 +11,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
@@ -203,8 +198,8 @@
#define TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 _GPIO(245)
/* All non-GPIO pins follow */
-#define NUM_GPIOS (TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 + 1)
-#define _PIN(offset) (NUM_GPIOS + (offset))
+#define NUM_GPIOS (TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
/* Non-GPIO pins */
#define TEGRA_PIN_CORE_PWR_REQ _PIN(0)
@@ -212,8 +207,11 @@
#define TEGRA_PIN_PWR_INT_N _PIN(2)
#define TEGRA_PIN_RESET_OUT_N _PIN(3)
#define TEGRA_PIN_OWR _PIN(4)
+#define TEGRA_PIN_JTAG_RTCK _PIN(5)
+#define TEGRA_PIN_CLK_32K_IN _PIN(6)
+#define TEGRA_PIN_GMI_CLK_LB _PIN(7)
-static const struct pinctrl_pin_desc tegra114_pins[] = {
+static const struct pinctrl_pin_desc tegra114_pins[] = {
PINCTRL_PIN(TEGRA_PIN_CLK_32K_OUT_PA0, "CLK_32K_OUT PA0"),
PINCTRL_PIN(TEGRA_PIN_UART3_CTS_N_PA1, "UART3_CTS_N PA1"),
PINCTRL_PIN(TEGRA_PIN_DAP2_FS_PA2, "DAP2_FS PA2"),
@@ -385,9 +383,12 @@ static const struct pinctrl_pin_desc tegra114_pins[] = {
PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5, "SDMMC3_CLK_LB_IN PEE5"),
PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"),
PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"),
- PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"),
PINCTRL_PIN(TEGRA_PIN_RESET_OUT_N, "RESET_OUT_N"),
+ PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
+ PINCTRL_PIN(TEGRA_PIN_JTAG_RTCK, "JTAG_RTCK"),
+ PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CLK_LB, "GMI_CLK_LB"),
};
static const unsigned clk_32k_out_pa0_pins[] = {
@@ -1074,10 +1075,6 @@ static const unsigned cpu_pwr_req_pins[] = {
TEGRA_PIN_CPU_PWR_REQ,
};
-static const unsigned owr_pins[] = {
- TEGRA_PIN_OWR,
-};
-
static const unsigned pwr_int_n_pins[] = {
TEGRA_PIN_PWR_INT_N,
};
@@ -1086,6 +1083,22 @@ static const unsigned reset_out_n_pins[] = {
TEGRA_PIN_RESET_OUT_N,
};
+static const unsigned owr_pins[] = {
+ TEGRA_PIN_OWR,
+};
+
+static const unsigned jtag_rtck_pins[] = {
+ TEGRA_PIN_JTAG_RTCK,
+};
+
+static const unsigned clk_32k_in_pins[] = {
+ TEGRA_PIN_CLK_32K_IN,
+};
+
+static const unsigned gmi_clk_lb_pins[] = {
+ TEGRA_PIN_GMI_CLK_LB,
+};
+
static const unsigned drive_ao1_pins[] = {
TEGRA_PIN_KB_ROW0_PR0,
TEGRA_PIN_KB_ROW1_PR1,
@@ -1127,7 +1140,6 @@ static const unsigned drive_at1_pins[] = {
TEGRA_PIN_GMI_AD13_PH5,
TEGRA_PIN_GMI_AD14_PH6,
TEGRA_PIN_GMI_AD15_PH7,
-
TEGRA_PIN_GMI_IORDY_PI5,
TEGRA_PIN_GMI_CS7_N_PI6,
};
@@ -1141,15 +1153,12 @@ static const unsigned drive_at2_pins[] = {
TEGRA_PIN_GMI_AD5_PG5,
TEGRA_PIN_GMI_AD6_PG6,
TEGRA_PIN_GMI_AD7_PG7,
-
TEGRA_PIN_GMI_WR_N_PI0,
TEGRA_PIN_GMI_OE_N_PI1,
TEGRA_PIN_GMI_CS6_N_PI3,
TEGRA_PIN_GMI_RST_N_PI4,
TEGRA_PIN_GMI_WAIT_PI7,
-
TEGRA_PIN_GMI_DQS_P_PJ3,
-
TEGRA_PIN_GMI_ADV_N_PK0,
TEGRA_PIN_GMI_CLK_PK1,
TEGRA_PIN_GMI_CS4_N_PK2,
@@ -1342,14 +1351,37 @@ static const unsigned drive_uda_pins[] = {
};
static const unsigned drive_dev3_pins[] = {
- TEGRA_PIN_CLK3_OUT_PEE0,
- TEGRA_PIN_CLK3_REQ_PEE1,
+};
+
+static const unsigned drive_cec_pins[] = {
+};
+
+static const unsigned drive_at6_pins[] = {
+};
+
+static const unsigned drive_dap5_pins[] = {
+};
+
+static const unsigned drive_usb_vbus_en_pins[] = {
+};
+
+static const unsigned drive_ao3_pins[] = {
+};
+
+static const unsigned drive_hv0_pins[] = {
+};
+
+static const unsigned drive_sdio4_pins[] = {
+};
+
+static const unsigned drive_ao0_pins[] = {
};
enum tegra_mux {
TEGRA_MUX_BLINK,
TEGRA_MUX_CEC,
TEGRA_MUX_CLDVFS,
+ TEGRA_MUX_CLK,
TEGRA_MUX_CLK12,
TEGRA_MUX_CPU,
TEGRA_MUX_DAP,
@@ -1394,6 +1426,7 @@ enum tegra_mux {
TEGRA_MUX_RSVD2,
TEGRA_MUX_RSVD3,
TEGRA_MUX_RSVD4,
+ TEGRA_MUX_RTCK,
TEGRA_MUX_SDMMC1,
TEGRA_MUX_SDMMC2,
TEGRA_MUX_SDMMC3,
@@ -1425,944 +1458,16 @@ enum tegra_mux {
TEGRA_MUX_VI_ALT3,
};
-static const char * const blink_groups[] = {
- "clk_32k_out_pa0",
-};
-
-static const char * const cec_groups[] = {
- "hdmi_cec_pee3",
-};
-
-static const char * const cldvfs_groups[] = {
- "gmi_ad9_ph1",
- "gmi_ad10_ph2",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "dvfs_pwm_px0",
- "dvfs_clk_px2",
-};
-
-static const char * const clk12_groups[] = {
- "sdmmc1_wp_n_pv3",
- "sdmmc1_clk_pz0",
-};
-
-static const char * const cpu_groups[] = {
- "cpu_pwr_req",
-};
-
-static const char * const dap_groups[] = {
- "clk1_req_pee2",
- "clk2_req_pcc5",
-};
-
-static const char * const dap1_groups[] = {
- "clk1_req_pee2",
-};
-
-static const char * const dap2_groups[] = {
- "clk1_out_pw4",
- "gpio_x4_aud_px4",
-};
-
-static const char * const dev3_groups[] = {
- "clk3_req_pee1",
-};
-
-static const char * const displaya_groups[] = {
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_sclk_pp3",
- "uart3_rts_n_pc0",
- "pu3",
- "pu4",
- "pu5",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "kb_col3_pq3",
- "sdmmc3_dat2_pb5",
-};
-
-static const char * const displaya_alt_groups[] = {
- "kb_row6_pr6",
-};
-
-static const char * const displayb_groups[] = {
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_sclk_pp3",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "sdmmc3_dat3_pb4",
-};
-
-static const char * const dtv_groups[] = {
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
- "dap4_fs_pp4",
- "dap4_dout_pp6",
- "gmi_wait_pi7",
- "gmi_ad8_ph0",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
-};
-
-static const char * const emc_dll_groups[] = {
- "kb_col0_pq0",
- "kb_col1_pq1",
-};
-
-static const char * const extperiph1_groups[] = {
- "clk1_out_pw4",
-};
-
-static const char * const extperiph2_groups[] = {
- "clk2_out_pw5",
-};
-
-static const char * const extperiph3_groups[] = {
- "clk3_out_pee0",
-};
-
-static const char * const gmi_groups[] = {
- "gmi_wp_n_pc7",
-
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_ad8_ph0",
- "gmi_ad9_ph1",
- "gmi_ad10_ph2",
- "gmi_ad11_ph3",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_wr_n_pi0",
- "gmi_oe_n_pi1",
- "gmi_cs6_n_pi3",
- "gmi_rst_n_pi4",
- "gmi_iordy_pi5",
- "gmi_cs7_n_pi6",
- "gmi_wait_pi7",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
- "gmi_dqs_p_pj3",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs4_n_pk2",
- "gmi_cs2_n_pk3",
- "gmi_cs3_n_pk4",
- "gmi_a16_pj7",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_a19_pk7",
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
-};
-
-static const char * const gmi_alt_groups[] = {
- "gmi_wp_n_pc7",
- "gmi_cs3_n_pk4",
- "gmi_a16_pj7",
-};
-
-static const char * const hda_groups[] = {
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
-};
-
-static const char * const hsi_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-};
-
-static const char * const i2c1_groups[] = {
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const i2c2_groups[] = {
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
-};
-
-static const char * const i2c3_groups[] = {
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
-};
-
-static const char * const i2c4_groups[] = {
- "ddc_scl_pv4",
- "ddc_sda_pv5",
-};
-
-static const char * const i2cpwr_groups[] = {
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
-};
-
-static const char * const i2s0_groups[] = {
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
-};
-
-static const char * const i2s1_groups[] = {
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
-};
-
-static const char * const i2s2_groups[] = {
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_sclk_pp3",
-};
-
-static const char * const i2s3_groups[] = {
- "dap4_fs_pp4",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_sclk_pp7",
-};
-
-static const char * const i2s4_groups[] = {
- "pcc1",
- "pbb0",
- "pbb7",
- "pcc2",
-};
-
-static const char * const irda_groups[] = {
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
-};
-
-static const char * const kbc_groups[] = {
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
-};
-
-static const char * const nand_groups[] = {
- "gmi_wp_n_pc7",
- "gmi_wait_pi7",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
- "gmi_cs2_n_pk3",
- "gmi_cs3_n_pk4",
- "gmi_cs4_n_pk2",
- "gmi_cs6_n_pi3",
- "gmi_cs7_n_pi6",
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_ad8_ph0",
- "gmi_ad9_ph1",
- "gmi_ad10_ph2",
- "gmi_ad11_ph3",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_wr_n_pi0",
- "gmi_oe_n_pi1",
- "gmi_dqs_p_pj3",
- "gmi_rst_n_pi4",
-};
-
-static const char * const nand_alt_groups[] = {
- "gmi_cs6_n_pi3",
- "gmi_cs7_n_pi6",
- "gmi_rst_n_pi4",
-};
-
-static const char * const owr_groups[] = {
- "pu0",
- "kb_col4_pq4",
- "owr",
- "sdmmc3_cd_n_pv2",
-};
-
-static const char * const pmi_groups[] = {
- "pwr_int_n",
-};
-
-static const char * const pwm0_groups[] = {
- "sdmmc1_dat2_py5",
- "uart3_rts_n_pc0",
- "pu3",
- "gmi_ad8_ph0",
- "sdmmc3_dat3_pb4",
-};
-
-static const char * const pwm1_groups[] = {
- "sdmmc1_dat1_py6",
- "pu4",
- "gmi_ad9_ph1",
- "sdmmc3_dat2_pb5",
-};
-
-static const char * const pwm2_groups[] = {
- "pu5",
- "gmi_ad10_ph2",
- "kb_col3_pq3",
- "sdmmc3_dat1_pb6",
-};
-
-static const char * const pwm3_groups[] = {
- "pu6",
- "gmi_ad11_ph3",
- "sdmmc3_cmd_pa7",
-};
-
-static const char * const pwron_groups[] = {
- "core_pwr_req",
-};
-
-static const char * const reset_out_n_groups[] = {
- "reset_out_n",
-};
-
-static const char * const rsvd1_groups[] = {
- "pv1",
- "hdmi_int_pn7",
- "pu1",
- "pu2",
- "gmi_wp_n_pc7",
- "gmi_adv_n_pk0",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_wr_n_pi0",
- "gmi_oe_n_pi1",
- "gpio_x4_aud_px4",
- "gpio_x5_aud_px5",
- "gpio_x7_aud_px7",
-
- "reset_out_n",
-};
-
-static const char * const rsvd2_groups[] = {
- "pv0",
- "pv1",
- "sdmmc1_dat0_py7",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "hdmi_int_pn7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "dap4_fs_pp4",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_sclk_pp7",
- "clk3_out_pee0",
- "clk3_req_pee1",
- "gmi_iordy_pi5",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat7_paa7",
- "pcc1",
- "pbb7",
- "pcc2",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "sys_clk_req_pz5",
- "core_pwr_req",
- "cpu_pwr_req",
- "pwr_int_n",
- "owr",
- "spdif_out_pk5",
- "gpio_x1_aud_px1",
- "sdmmc3_clk_pa6",
- "sdmmc3_dat0_pb7",
- "gpio_w2_aud_pw2",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "sdmmc3_clk_lb_out_pee4",
- "sdmmc3_clk_lb_in_pee5",
- "reset_out_n",
-};
-
-static const char * const rsvd3_groups[] = {
- "pv0",
- "pv1",
- "sdmmc1_clk_pz0",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "hdmi_int_pn7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
- "pu0",
- "pu1",
- "pu2",
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "dap4_din_pp5",
- "dap4_sclk_pp7",
- "clk3_out_pee0",
- "clk3_req_pee1",
- "pcc1",
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "pbb7",
- "pcc2",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row3_pr3",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "clk_32k_out_pa0",
- "sys_clk_req_pz5",
- "core_pwr_req",
- "cpu_pwr_req",
- "pwr_int_n",
- "owr",
- "clk1_req_pee2",
- "clk1_out_pw4",
- "spdif_out_pk5",
- "spdif_in_pk6",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dvfs_pwm_px0",
- "gpio_x1_aud_px1",
- "gpio_x3_aud_px3",
- "dvfs_clk_px2",
- "sdmmc3_clk_pa6",
- "sdmmc3_dat0_pb7",
- "hdmi_cec_pee3",
- "sdmmc3_cd_n_pv2",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "sdmmc3_clk_lb_out_pee4",
- "sdmmc3_clk_lb_in_pee5",
- "reset_out_n",
-};
-
-static const char * const rsvd4_groups[] = {
- "pv0",
- "pv1",
- "sdmmc1_clk_pz0",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "hdmi_int_pn7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
- "pu0",
- "pu1",
- "pu2",
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "dap4_fs_pp4",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_sclk_pp7",
- "clk3_out_pee0",
- "clk3_req_pee1",
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_rst_n_pi4",
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
- "cam_mclk_pcc0",
- "pcc1",
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "pbb7",
- "pcc2",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_col2_pq2",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "clk_32k_out_pa0",
- "sys_clk_req_pz5",
- "core_pwr_req",
- "cpu_pwr_req",
- "pwr_int_n",
- "owr",
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
- "clk1_req_pee2",
- "clk1_out_pw4",
- "spdif_in_pk6",
- "spdif_out_pk5",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dvfs_pwm_px0",
- "gpio_x1_aud_px1",
- "gpio_x3_aud_px3",
- "dvfs_clk_px2",
- "gpio_x5_aud_px5",
- "gpio_x6_aud_px6",
- "gpio_x7_aud_px7",
- "sdmmc3_cd_n_pv2",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "sdmmc3_clk_lb_in_pee5",
- "sdmmc3_clk_lb_out_pee4",
-};
-
-static const char * const sdmmc1_groups[] = {
-
- "sdmmc1_clk_pz0",
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat0_py7",
- "uart3_cts_n_pa1",
- "kb_col5_pq5",
- "sdmmc1_wp_n_pv3",
-};
-
-static const char * const sdmmc2_groups[] = {
- "gmi_iordy_pi5",
- "gmi_clk_pk1",
- "gmi_cs2_n_pk3",
- "gmi_cs3_n_pk4",
- "gmi_cs7_n_pi6",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_dqs_p_pj3",
-};
-
-static const char * const sdmmc3_groups[] = {
- "kb_col4_pq4",
- "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
- "hdmi_cec_pee3",
- "sdmmc3_cd_n_pv2",
- "sdmmc3_clk_lb_in_pee5",
- "sdmmc3_clk_lb_out_pee4",
-};
-
-static const char * const sdmmc4_groups[] = {
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
-};
-
-static const char * const soc_groups[] = {
- "gmi_cs1_n_pj2",
- "gmi_oe_n_pi1",
- "clk_32k_out_pa0",
- "hdmi_cec_pee3",
-};
-
-static const char * const spdif_groups[] = {
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "spdif_in_pk6",
- "spdif_out_pk5",
-};
-
-static const char * const spi1_groups[] = {
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "gpio_x3_aud_px3",
- "gpio_x4_aud_px4",
- "gpio_x5_aud_px5",
- "gpio_x6_aud_px6",
- "gpio_x7_aud_px7",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const spi2_groups[] = {
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "gpio_x4_aud_px4",
- "gpio_x5_aud_px5",
- "gpio_x6_aud_px6",
- "gpio_x7_aud_px7",
- "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const spi3_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
-};
-
-static const char * const spi4_groups[] = {
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat0_py7",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
- "uart3_cts_n_pa1",
- "gmi_wait_pi7",
- "gmi_cs6_n_pi3",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_a19_pk7",
- "gmi_wr_n_pi0",
- "sdmmc1_wp_n_pv3",
-};
-
-static const char * const spi5_groups[] = {
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_sclk_pp3",
-};
-
-static const char * const spi6_groups[] = {
- "dvfs_pwm_px0",
- "gpio_x1_aud_px1",
- "gpio_x3_aud_px3",
- "dvfs_clk_px2",
- "gpio_x6_aud_px6",
- "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const sysclk_groups[] = {
- "sys_clk_req_pz5",
-};
-
-static const char * const trace_groups[] = {
- "gmi_iordy_pi5",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs2_n_pk3",
- "gmi_cs4_n_pk2",
- "gmi_a16_pj7",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_a19_pk7",
- "gmi_dqs_p_pj3",
-};
-
-static const char * const uarta_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat0_py7",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
- "pu0",
- "pu1",
- "pu2",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat1_pb6",
- "sdmmc1_wp_n_pv3",
-};
-
-static const char * const uartb_groups[] = {
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
-};
-
-static const char * const uartc_groups[] = {
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
-};
-
-static const char * const uartd_groups[] = {
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "gmi_a16_pj7",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_a19_pk7",
-};
-
-static const char * const ulpi_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
-};
-
-static const char * const usb_groups[] = {
- "pv0",
- "pu6",
- "gmi_cs0_n_pj0",
- "gmi_cs4_n_pk2",
- "gmi_ad11_ph3",
- "kb_col0_pq0",
- "spdif_in_pk6",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
-};
-
-static const char * const vgp1_groups[] = {
- "cam_i2c_scl_pbb1",
-};
-
-static const char * const vgp2_groups[] = {
- "cam_i2c_sda_pbb2",
-};
-
-static const char * const vgp3_groups[] = {
- "pbb3",
-};
-
-static const char * const vgp4_groups[] = {
- "pbb4",
-};
-
-static const char * const vgp5_groups[] = {
- "pbb5",
-};
-
-static const char * const vgp6_groups[] = {
- "pbb6",
-};
-
-static const char * const vi_groups[] = {
- "cam_mclk_pcc0",
- "pbb0",
-};
-
-static const char * const vi_alt1_groups[] = {
- "cam_mclk_pcc0",
- "pbb0",
-};
-
-static const char * const vi_alt3_groups[] = {
- "cam_mclk_pcc0",
- "pbb0",
-};
-
#define FUNCTION(fname) \
{ \
.name = #fname, \
- .groups = fname##_groups, \
- .ngroups = ARRAY_SIZE(fname##_groups), \
}
-static const struct tegra_function tegra114_functions[] = {
+static struct tegra_function tegra114_functions[] = {
FUNCTION(blink),
FUNCTION(cec),
FUNCTION(cldvfs),
+ FUNCTION(clk),
FUNCTION(clk12),
FUNCTION(cpu),
FUNCTION(dap),
@@ -2407,6 +1512,7 @@ static const struct tegra_function tegra114_functions[] = {
FUNCTION(rsvd2),
FUNCTION(rsvd3),
FUNCTION(rsvd4),
+ FUNCTION(rtck),
FUNCTION(sdmmc1),
FUNCTION(sdmmc2),
FUNCTION(sdmmc3),
@@ -2438,11 +1544,11 @@ static const struct tegra_function tegra114_functions[] = {
FUNCTION(vi_alt3),
};
-#define DRV_PINGROUP_REG_START 0x868 /* bank 0 */
-#define PINGROUP_REG_START 0x3000 /* bank 1 */
+#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
+#define PINGROUP_REG_A 0x3000 /* bank 1 */
-#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_START)
-#define PINGROUP_REG_N(r) -1
+#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
+#define PINGROUP_REG_N(r) -1
#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior, rcv_sel) \
{ \
@@ -2484,13 +1590,14 @@ static const struct tegra_function tegra114_functions[] = {
.drvtype_reg = -1, \
}
-#define DRV_PINGROUP_DVRTYPE_Y(r) ((r) - DRV_PINGROUP_REG_START)
-#define DRV_PINGROUP_DVRTYPE_N(r) -1
+#define DRV_PINGROUP_REG_Y(r) ((r) - DRV_PINGROUP_REG_A)
+#define DRV_PINGROUP_REG_N(r) -1
+
#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
- drvdn_b, drvdn_w, drvup_b, drvup_w, \
- slwr_b, slwr_w, slwf_b, slwf_w, \
- drvtype) \
+ drvdn_b, drvdn_w, drvup_b, drvup_w, \
+ slwr_b, slwr_w, slwf_b, slwf_w, \
+ drvtype) \
{ \
.name = "drive_" #pg_name, \
.pins = drive_##pg_name##_pins, \
@@ -2503,7 +1610,7 @@ static const struct tegra_function tegra114_functions[] = {
.lock_reg = -1, \
.ioreset_reg = -1, \
.rcv_sel_reg = -1, \
- .drv_reg = DRV_PINGROUP_DVRTYPE_Y(r), \
+ .drv_reg = DRV_PINGROUP_REG_Y(r), \
.drv_bank = 0, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
@@ -2516,14 +1623,13 @@ static const struct tegra_function tegra114_functions[] = {
.slwr_width = slwr_w, \
.slwf_bit = slwf_b, \
.slwf_width = slwf_w, \
- .drvtype_reg = DRV_PINGROUP_DVRTYPE_##drvtype(r), \
+ .drvtype_reg = DRV_PINGROUP_REG_##drvtype(r), \
.drvtype_bank = 0, \
.drvtype_bit = 6, \
}
static const struct tegra_pingroup tegra114_groups[] = {
/* pg_name, f0, f1, f2, f3, safe, r, od, ior, rcv_sel */
- /* FIXME: Fill in correct data in safe column */
PINGROUP(ulpi_data0_po1, SPI3, HSI, UARTA, ULPI, ULPI, 0x3000, N, N, N),
PINGROUP(ulpi_data1_po2, SPI3, HSI, UARTA, ULPI, ULPI, 0x3004, N, N, N),
PINGROUP(ulpi_data2_po3, SPI3, HSI, UARTA, ULPI, ULPI, 0x3008, N, N, N),
@@ -2635,6 +1741,7 @@ static const struct tegra_pingroup tegra114_groups[] = {
PINGROUP(pbb6, VGP6, DISPLAYA, DISPLAYB, RSVD4, RSVD4, 0x32a4, N, N, N),
PINGROUP(pbb7, I2S4, RSVD2, RSVD3, RSVD4, RSVD4, 0x32a8, N, N, N),
PINGROUP(pcc2, I2S4, RSVD2, RSVD3, RSVD4, RSVD4, 0x32ac, N, N, N),
+ PINGROUP(jtag_rtck, RTCK, RSVD2, RSVD3, RSVD4, RTCK, 0x32b0, N, N, N),
PINGROUP(pwr_i2c_scl_pz6, I2CPWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x32b4, Y, N, N),
PINGROUP(pwr_i2c_sda_pz7, I2CPWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x32b8, Y, N, N),
PINGROUP(kb_row0_pr0, KBC, RSVD2, RSVD3, RSVD4, RSVD4, 0x32bc, N, N, N),
@@ -2661,6 +1768,7 @@ static const struct tegra_pingroup tegra114_groups[] = {
PINGROUP(core_pwr_req, PWRON, RSVD2, RSVD3, RSVD4, RSVD4, 0x3324, N, N, N),
PINGROUP(cpu_pwr_req, CPU, RSVD2, RSVD3, RSVD4, RSVD4, 0x3328, N, N, N),
PINGROUP(pwr_int_n, PMI, RSVD2, RSVD3, RSVD4, RSVD4, 0x332c, N, N, N),
+ PINGROUP(clk_32k_in, CLK, RSVD2, RSVD3, RSVD4, CLK, 0x3330, N, N, N),
PINGROUP(owr, OWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x3334, N, N, Y),
PINGROUP(dap1_fs_pn0, I2S0, HDA, GMI, RSVD4, RSVD4, 0x3338, N, N, N),
PINGROUP(dap1_din_pn1, I2S0, HDA, GMI, RSVD4, RSVD4, 0x333c, N, N, N),
@@ -2697,38 +1805,48 @@ static const struct tegra_pingroup tegra114_groups[] = {
PINGROUP(usb_vbus_en1_pn5, USB, RSVD2, RSVD3, RSVD4, RSVD4, 0x33f8, Y, N, N),
PINGROUP(sdmmc3_clk_lb_in_pee5, SDMMC3, RSVD2, RSVD3, RSVD4, RSVD4, 0x33fc, N, N, N),
PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3, RSVD2, RSVD3, RSVD4, RSVD4, 0x3400, N, N, N),
+ PINGROUP(gmi_clk_lb, SDMMC2, NAND, GMI, RSVD4, GMI, 0x3404, N, N, N),
PINGROUP(reset_out_n, RSVD1, RSVD2, RSVD3, RESET_OUT_N, RSVD3, 0x3408, N, N, N),
/* pg_name, r, hsm_b, schmitt_b, lpmd_b, drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w, drvtype */
- DRV_PINGROUP(ao1, 0x868, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(ao2, 0x86c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(at1, 0x870, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
- DRV_PINGROUP(at2, 0x874, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
- DRV_PINGROUP(at3, 0x878, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
- DRV_PINGROUP(at4, 0x87c, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
- DRV_PINGROUP(at5, 0x880, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(cdev1, 0x884, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(cdev2, 0x888, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(dap1, 0x890, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(dap2, 0x894, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(dap3, 0x898, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(dap4, 0x89c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(dbg, 0x8a0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(sdio3, 0x8b0, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
- DRV_PINGROUP(spi, 0x8b4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(uaa, 0x8b8, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(uab, 0x8bc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(uart2, 0x8c0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(uart3, 0x8c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(sdio1, 0x8ec, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
- DRV_PINGROUP(ddc, 0x8fc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(gma, 0x900, 2, 3, 4, 14, 5, 20, 5, 28, 2, 30, 2, Y),
- DRV_PINGROUP(gme, 0x910, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(gmf, 0x914, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(gmg, 0x918, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(gmh, 0x91c, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(owr, 0x920, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(uda, 0x924, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao1, 0x868, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao2, 0x86c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(at1, 0x870, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at2, 0x874, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at3, 0x878, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at4, 0x87c, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at5, 0x880, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(cdev1, 0x884, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(cdev2, 0x888, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap1, 0x890, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap2, 0x894, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap3, 0x898, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap4, 0x89c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dbg, 0x8a0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(sdio3, 0x8b0, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
+ DRV_PINGROUP(spi, 0x8b4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uaa, 0x8b8, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uab, 0x8bc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uart2, 0x8c0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uart3, 0x8c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(sdio1, 0x8ec, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ddc, 0x8fc, 2, 3, -1, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gma, 0x900, 2, 3, -1, 14, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gme, 0x910, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gmf, 0x914, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gmg, 0x918, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gmh, 0x91c, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(owr, 0x920, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uda, 0x924, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dev3, 0x92c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(cec, 0x938, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(at6, 0x994, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(dap5, 0x998, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(usb_vbus_en, 0x99c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao3, 0x9a0, 2, 3, 4, 12, 5, -1, -1, 28, 2, -1, -1, N),
+ DRV_PINGROUP(hv0, 0x9a4, 2, 3, 4, 12, 5, -1, -1, 28, 2, -1, -1, N),
+ DRV_PINGROUP(sdio4, 0x9a8, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao0, 0x9ac, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
};
static const struct tegra_pinctrl_soc_data tegra114_pinctrl = {
diff --git a/drivers/pinctrl/pinctrl-tegra124.c b/drivers/pinctrl/pinctrl-tegra124.c
index c20e0e1dda83..73773706755b 100644
--- a/drivers/pinctrl/pinctrl-tegra124.c
+++ b/drivers/pinctrl/pinctrl-tegra124.c
@@ -1,7 +1,7 @@
/*
* Pinctrl data for the NVIDIA Tegra124 pinmux
*
- * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -212,8 +212,8 @@
#define TEGRA_PIN_PFF2 _GPIO(250)
/* All non-GPIO pins follow */
-#define NUM_GPIOS (TEGRA_PIN_PFF2 + 1)
-#define _PIN(offset) (NUM_GPIOS + (offset))
+#define NUM_GPIOS (TEGRA_PIN_PFF2 + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
/* Non-GPIO pins */
#define TEGRA_PIN_CORE_PWR_REQ _PIN(0)
@@ -325,13 +325,13 @@ static const struct pinctrl_pin_desc tegra124_pins[] = {
PINCTRL_PIN(TEGRA_PIN_KB_ROW8_PS0, "KB_ROW8 PS0"),
PINCTRL_PIN(TEGRA_PIN_KB_ROW9_PS1, "KB_ROW9 PS1"),
PINCTRL_PIN(TEGRA_PIN_KB_ROW10_PS2, "KB_ROW10 PS2"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW11_PS3, "KB_ROW10 PS3"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW12_PS4, "KB_ROW10 PS4"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW13_PS5, "KB_ROW10 PS5"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW14_PS6, "KB_ROW10 PS6"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW15_PS7, "KB_ROW10 PS7"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW16_PT0, "KB_ROW10 PT0"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW17_PT1, "KB_ROW10 PT1"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW11_PS3, "KB_ROW11 PS3"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW12_PS4, "KB_ROW12 PS4"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW13_PS5, "KB_ROW13 PS5"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW14_PS6, "KB_ROW14 PS6"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW15_PS7, "KB_ROW15 PS7"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW16_PT0, "KB_ROW16 PT0"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW17_PT1, "KB_ROW17 PT1"),
PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SCL_PT5, "GEN2_I2C_SCL PT5"),
PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SDA_PT6, "GEN2_I2C_SDA PT6"),
PINCTRL_PIN(TEGRA_PIN_SDMMC4_CMD_PT7, "SDMMC4_CMD PT7"),
@@ -406,16 +406,16 @@ static const struct pinctrl_pin_desc tegra124_pins[] = {
PINCTRL_PIN(TEGRA_PIN_HDMI_CEC_PEE3, "HDMI_CEC PEE3"),
PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4, "SDMMC3_CLK_LB_OUT PEE4"),
PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5, "SDMMC3_CLK_LB_IN PEE5"),
+ PINCTRL_PIN(TEGRA_PIN_DP_HPD_PFF0, "DP_HPD PFF0"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN2_PFF1, "USB_VBUS_EN2 PFF1"),
+ PINCTRL_PIN(TEGRA_PIN_PFF2, "PFF2"),
PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"),
PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"),
- PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CLK_LB, "GMI_CLK_LB"),
PINCTRL_PIN(TEGRA_PIN_RESET_OUT_N, "RESET_OUT_N"),
- PINCTRL_PIN(TEGRA_PIN_DP_HPD_PFF0, "DP_HPD PFF0"),
- PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN2_PFF1, "USB_VBUS_EN2 PFF1"),
- PINCTRL_PIN(TEGRA_PIN_PFF2, "PFF2"),
+ PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"),
- PINCTRL_PIN(TEGRA_PIN_GMI_CLK_LB, "GMI_CLK_LB"),
PINCTRL_PIN(TEGRA_PIN_JTAG_RTCK, "JTAG_RTCK"),
};
@@ -1138,6 +1138,7 @@ static const unsigned sdmmc3_clk_lb_out_pee4_pins[] = {
static const unsigned sdmmc3_clk_lb_in_pee5_pins[] = {
TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5,
};
+
static const unsigned dp_hpd_pff0_pins[] = {
TEGRA_PIN_DP_HPD_PFF0,
};
@@ -1158,24 +1159,24 @@ static const unsigned cpu_pwr_req_pins[] = {
TEGRA_PIN_CPU_PWR_REQ,
};
-static const unsigned owr_pins[] = {
- TEGRA_PIN_OWR,
-};
-
static const unsigned pwr_int_n_pins[] = {
TEGRA_PIN_PWR_INT_N,
};
+static const unsigned gmi_clk_lb_pins[] = {
+ TEGRA_PIN_GMI_CLK_LB,
+};
+
static const unsigned reset_out_n_pins[] = {
TEGRA_PIN_RESET_OUT_N,
};
-static const unsigned clk_32k_in_pins[] = {
- TEGRA_PIN_CLK_32K_IN,
+static const unsigned owr_pins[] = {
+ TEGRA_PIN_OWR,
};
-static const unsigned gmi_clk_lb_pins[] = {
- TEGRA_PIN_GMI_CLK_LB,
+static const unsigned clk_32k_in_pins[] = {
+ TEGRA_PIN_CLK_32K_IN,
};
static const unsigned jtag_rtck_pins[] = {
@@ -1441,15 +1442,15 @@ static const unsigned drive_gpv_pins[] = {
TEGRA_PIN_PFF2,
};
-static const unsigned drive_cec_pins[] = {
- TEGRA_PIN_HDMI_CEC_PEE3,
-};
-
static const unsigned drive_dev3_pins[] = {
TEGRA_PIN_CLK3_OUT_PEE0,
TEGRA_PIN_CLK3_REQ_PEE1,
};
+static const unsigned drive_cec_pins[] = {
+ TEGRA_PIN_HDMI_CEC_PEE3,
+};
+
static const unsigned drive_at6_pins[] = {
TEGRA_PIN_PK1,
TEGRA_PIN_PK3,
@@ -1496,8 +1497,10 @@ static const unsigned drive_ao4_pins[] = {
enum tegra_mux {
TEGRA_MUX_BLINK,
+ TEGRA_MUX_CCLA,
TEGRA_MUX_CEC,
TEGRA_MUX_CLDVFS,
+ TEGRA_MUX_CLK,
TEGRA_MUX_CLK12,
TEGRA_MUX_CPU,
TEGRA_MUX_DAP,
@@ -1507,6 +1510,7 @@ enum tegra_mux {
TEGRA_MUX_DISPLAYA,
TEGRA_MUX_DISPLAYA_ALT,
TEGRA_MUX_DISPLAYB,
+ TEGRA_MUX_DP,
TEGRA_MUX_DTV,
TEGRA_MUX_EXTPERIPH1,
TEGRA_MUX_EXTPERIPH2,
@@ -1528,6 +1532,9 @@ enum tegra_mux {
TEGRA_MUX_IRDA,
TEGRA_MUX_KBC,
TEGRA_MUX_OWR,
+ TEGRA_MUX_PE,
+ TEGRA_MUX_PE0,
+ TEGRA_MUX_PE1,
TEGRA_MUX_PMI,
TEGRA_MUX_PWM0,
TEGRA_MUX_PWM1,
@@ -1539,6 +1546,8 @@ enum tegra_mux {
TEGRA_MUX_RSVD2,
TEGRA_MUX_RSVD3,
TEGRA_MUX_RSVD4,
+ TEGRA_MUX_RTCK,
+ TEGRA_MUX_SATA,
TEGRA_MUX_SDMMC1,
TEGRA_MUX_SDMMC2,
TEGRA_MUX_SDMMC3,
@@ -1551,6 +1560,8 @@ enum tegra_mux {
TEGRA_MUX_SPI4,
TEGRA_MUX_SPI5,
TEGRA_MUX_SPI6,
+ TEGRA_MUX_SYS,
+ TEGRA_MUX_TMDS,
TEGRA_MUX_TRACE,
TEGRA_MUX_UARTA,
TEGRA_MUX_UARTB,
@@ -1569,1134 +1580,19 @@ enum tegra_mux {
TEGRA_MUX_VI_ALT3,
TEGRA_MUX_VIMCLK2,
TEGRA_MUX_VIMCLK2_ALT,
- TEGRA_MUX_SATA,
- TEGRA_MUX_CCLA,
- TEGRA_MUX_PE0,
- TEGRA_MUX_PE,
- TEGRA_MUX_PE1,
- TEGRA_MUX_DP,
- TEGRA_MUX_RTCK,
- TEGRA_MUX_SYS,
- TEGRA_MUX_CLK,
- TEGRA_MUX_TMDS,
-};
-
-static const char * const blink_groups[] = {
- "clk_32k_out_pa0",
-};
-
-static const char * const cec_groups[] = {
- "hdmi_cec_pee3",
-};
-
-static const char * const cldvfs_groups[] = {
- "ph2",
- "ph3",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "dvfs_pwm_px0",
- "dvfs_clk_px2",
-};
-
-static const char * const clk12_groups[] = {
- "sdmmc1_wp_n_pv3",
- "sdmmc1_clk_pz0",
-};
-
-static const char * const cpu_groups[] = {
- "cpu_pwr_req",
-};
-
-static const char * const dap_groups[] = {
- "dap_mclk1_pee2",
- "clk2_req_pcc5",
-};
-
-static const char * const dap1_groups[] = {
- "dap_mclk1_pee2",
-};
-
-static const char * const dap2_groups[] = {
- "dap_mclk1_pw4",
- "gpio_x4_aud_px4",
-};
-
-static const char * const dev3_groups[] = {
- "clk3_req_pee1",
-};
-
-static const char * const displaya_groups[] = {
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "ph1",
- "pi4",
- "pbb3",
- "pbb4",
- "pbb5",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "kb_col3_pq3",
- "sdmmc3_dat2_pb5",
-};
-
-static const char * const displaya_alt_groups[] = {
- "kb_row6_pr6",
-};
-
-static const char * const displayb_groups[] = {
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_sclk_pp3",
-
- "pu3",
- "pu4",
- "pu5",
-
- "pbb3",
- "pbb4",
- "pbb6",
-
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
-
- "sdmmc3_dat3_pb4",
-};
-
-static const char * const dtv_groups[] = {
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
- "dap4_fs_pp4",
- "dap4_dout_pp6",
- "pi7",
- "ph0",
- "ph6",
- "ph7",
-};
-
-static const char * const extperiph1_groups[] = {
- "dap_mclk1_pw4",
-};
-
-static const char * const extperiph2_groups[] = {
- "clk2_out_pw5",
-};
-
-static const char * const extperiph3_groups[] = {
- "clk3_out_pee0",
-};
-
-static const char * const gmi_groups[] = {
- "uart2_cts_n_pj5",
- "uart2_rts_n_pj6",
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
-
- "pu0",
- "pu1",
- "pu2",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
-
- "dap4_fs_pp4",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_sclk_pp7",
-
- "pc7",
-
- "pg0",
- "pg1",
- "pg2",
- "pg3",
- "pg4",
- "pg5",
- "pg6",
- "pg7",
-
- "ph0",
- "ph1",
- "ph2",
- "ph3",
- "ph4",
- "ph5",
- "ph6",
- "ph7",
-
- "pi0",
- "pi1",
- "pi2",
- "pi3",
- "pi4",
- "pi5",
- "pi6",
- "pi7",
-
- "pj0",
- "pj2",
-
- "pk0",
- "pk1",
- "pk2",
- "pk3",
- "pk4",
-
- "pj7",
- "pb0",
- "pb1",
- "pk7",
-
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
-
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "gmi_clk_lb",
-
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
-
- "dap2_fs_pa2",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dap2_sclk_pa3",
-
- "dvfs_pwm_px0",
- "dvfs_clk_px2",
- "gpio_x1_aud_px1",
- "gpio_x3_aud_px3",
- "gpio_x4_aud_px4",
- "gpio_x5_aud_px5",
- "gpio_x6_aud_px6",
-};
-
-static const char * const gmi_alt_groups[] = {
- "pc7",
- "pk4",
- "pj7",
-};
-
-static const char * const hda_groups[] = {
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
-};
-
-static const char * const hsi_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-};
-
-static const char * const i2c1_groups[] = {
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const i2c2_groups[] = {
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
-};
-
-static const char * const i2c3_groups[] = {
- "spdif_in_pk6",
- "spdif_out_pk5",
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
-};
-
-static const char * const i2c4_groups[] = {
- "ddc_scl_pv4",
- "ddc_sda_pv5",
-};
-
-static const char * const i2cpwr_groups[] = {
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
-};
-
-static const char * const i2s0_groups[] = {
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
-};
-
-static const char * const i2s1_groups[] = {
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
-};
-
-static const char * const i2s2_groups[] = {
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_sclk_pp3",
-};
-
-static const char * const i2s3_groups[] = {
- "dap4_fs_pp4",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_sclk_pp7",
-};
-
-static const char * const i2s4_groups[] = {
- "pcc1",
- "pbb6",
- "pbb7",
- "pcc2",
-};
-
-static const char * const irda_groups[] = {
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "kb_row11_ps3",
- "kb_row12_ps4",
-};
-
-static const char * const kbc_groups[] = {
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
- "kb_row16_pt0",
- "kb_row17_pt1",
-
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
-};
-
-static const char * const owr_groups[] = {
- "pu0",
- "kb_col4_pq4",
- "owr",
- "sdmmc3_cd_n_pv2",
-};
-
-static const char * const pmi_groups[] = {
- "pwr_int_n",
-};
-
-static const char * const pwm0_groups[] = {
- "sdmmc1_dat2_py5",
- "uart3_rts_n_pc0",
- "pu3",
- "ph0",
- "sdmmc3_dat3_pb4",
-};
-
-static const char * const pwm1_groups[] = {
- "sdmmc1_dat1_py6",
- "pu4",
- "ph1",
- "sdmmc3_dat2_pb5",
-};
-
-static const char * const pwm2_groups[] = {
- "pu5",
- "ph2",
- "kb_col3_pq3",
- "sdmmc3_dat1_pb6",
-};
-
-static const char * const pwm3_groups[] = {
- "pu6",
- "ph3",
- "sdmmc3_cmd_pa7",
-};
-
-static const char * const pwron_groups[] = {
- "core_pwr_req",
-};
-
-static const char * const reset_out_n_groups[] = {
- "reset_out_n",
-};
-
-static const char * const rsvd1_groups[] = {
- "pv0",
- "pv1",
-
- "hdmi_int_pn7",
- "pu1",
- "pu2",
- "pc7",
- "pi7",
- "pk0",
- "pj0",
- "pj2",
- "pk2",
- "pi3",
- "pi6",
-
- "pg0",
- "pg1",
- "pg2",
- "pg3",
- "pg4",
- "pg5",
- "pg6",
- "pg7",
-
- "pi0",
- "pi1",
-
- "gpio_x7_aud_px7",
-
- "reset_out_n",
-};
-
-static const char * const rsvd2_groups[] = {
- "pv0",
- "pv1",
-
- "sdmmc1_dat0_py7",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "hdmi_int_pn7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
-
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
-
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
-
- "clk2_out_pee0",
- "clk2_req_pee1",
- "pc7",
- "pi5",
- "pj0",
- "pj2",
-
- "pk4",
- "pk2",
- "pi3",
- "pi6",
- "pg0",
- "pg1",
- "pg5",
- "pg6",
- "pg7",
-
- "ph4",
- "ph5",
- "pj7",
- "pb0",
- "pb1",
- "pk7",
- "pi0",
- "pi1",
-
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat7_paa7",
- "pcc1",
- "pbb6",
- "pbb7",
- "pcc2",
- "jtag_rtck",
-
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
-
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row13_ps5",
- "kb_row14_ps6",
-
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
-
- "core_pwr_req",
- "cpu_pwr_req",
- "pwr_int_n",
- "clk_32k_in",
- "owr",
-
- "spdif_in_pk6",
- "spdif_out_pk5",
- "gpio_x1_aud_px1",
-
- "sdmmc3_clk_pa6",
- "sdmmc3_dat0_pb7",
-
- "pex_l0_rst_n_pdd1",
- "pex_l0_clkreq_n_pdd2",
- "pex_wake_n_pdd3",
- "pex_l1_rst_n_pdd5",
- "pex_l1_clkreq_n_pdd6",
- "hdmi_cec_pee3",
-
- "gpio_w2_aud_pw2",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "sdmmc3_clk_lb_out_pee4",
- "sdmmc3_clk_lb_in_pee5",
- "gmi_clk_lb",
- "reset_out_n",
- "kb_row16_pt0",
- "kb_row17_pt1",
- "dp_hpd_pff0",
- "usb_vbus_en2_pff1",
- "pff2",
-};
-
-static const char * const rsvd3_groups[] = {
- "dap3_sclk_pp3",
- "pv0",
- "pv1",
- "sdmmc1_clk_pz0",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "hdmi_int_pn7",
-
- "ddc_scl_pv4",
- "ddc_sda_pv5",
-
- "pu6",
-
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
-
- "dap4_din_pp5",
- "dap4_sclk_pp7",
-
- "clk3_out_pee0",
- "clk3_req_pee1",
-
- "sdmmc4_dat5_paa5",
- "gpio_pcc1",
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "pbb5",
- "pbb7",
- "jtag_rtck",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
-
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row15_ps7",
-
- "clk_32k_out_pa0",
- "core_pwr_req",
- "cpu_pwr_req",
- "pwr_int_n",
- "clk_32k_in",
- "owr",
-
- "dap_mclk1_pw4",
- "spdif_in_pk6",
- "spdif_out_pk5",
- "sdmmc3_clk_pa6",
- "sdmmc3_dat0_pb7",
-
- "pex_l0_rst_n_pdd1",
- "pex_l0_clkreq_n_pdd2",
- "pex_wake_n_pdd3",
- "pex_l1_rst_n_pdd5",
- "pex_l1_clkreq_n_pdd6",
- "hdmi_cec_pee3",
-
- "sdmmc3_cd_n_pv2",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "sdmmc3_clk_lb_out_pee4",
- "sdmmc3_clk_lb_in_pee5",
- "reset_out_n",
- "kb_row16_pt0",
- "kb_row17_pt1",
- "dp_hpd_pff0",
- "usb_vbus_en2_pff1",
- "pff2",
-};
-
-static const char * const rsvd4_groups[] = {
- "dap3_dout_pp2",
- "pv0",
- "pv1",
- "sdmmc1_clk_pz0",
-
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "hdmi_int_pn7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
-
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
-
- "pu0",
- "pu1",
- "pu2",
-
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
-
- "dap4_fs_pp4",
- "dap4_dout_pp6",
- "dap4_din_pp5",
- "dap4_sclk_pp7",
-
- "clk3_out_pee0",
- "clk3_req_pee1",
-
- "pi5",
- "pk1",
- "pk2",
- "pg0",
- "pg1",
- "pg2",
- "pg3",
- "ph4",
- "ph5",
- "pb0",
- "pb1",
- "pk7",
- "pi0",
- "pi1",
- "pi2",
-
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
-
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
-
- "jtag_rtck",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
-
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
-
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col5_pq5",
-
- "clk_32k_out_pa0",
- "core_pwr_req",
- "cpu_pwr_req",
- "pwr_int_n",
- "clk_32k_in",
- "owr",
-
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_sclk_pn3",
- "dap_mclk1_req_pee2",
- "dap_mclk1_pw5",
-
- "dap2_fs_pa2",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dap2_sclk_pa3",
-
- "dvfs_pwm_px0",
- "dvfs_clk_px2",
- "gpio_x1_aud_px1",
- "gpio_x3_aud_px3",
-
- "gpio_x5_aud_px5",
- "gpio_x7_aud_px7",
-
- "pex_l0_rst_n_pdd1",
- "pex_l0_clkreq_n_pdd2",
- "pex_wake_n_pdd3",
- "pex_l1_rst_n_pdd5",
- "pex_l1_clkreq_n_pdd6",
- "hdmi_cec_pee3",
-
- "sdmmc3_cd_n_pv2",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "sdmmc3_clk_lb_out_pee4",
- "sdmmc3_clk_lb_in_pee5",
- "gmi_clk_lb",
-
- "dp_hpd_pff0",
- "usb_vbus_en2_pff1",
- "pff2",
-};
-
-static const char * const sdmmc1_groups[] = {
- "sdmmc1_clk_pz0",
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat0_py7",
- "clk2_out_pw5",
- "clk2_req_pcc",
- "uart3_cts_n_pa1",
- "sdmmc1_wp_n_pv3",
-};
-
-static const char * const sdmmc2_groups[] = {
- "pi5",
- "pk1",
- "pk3",
- "pk4",
- "pi6",
- "ph4",
- "ph5",
- "ph6",
- "ph7",
- "pi2",
- "cam_mclk_pcc0",
- "pcc1",
- "pbb0",
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "pbb7",
- "pcc2",
- "gmi_clk_lb",
-};
-
-static const char * const sdmmc3_groups[] = {
- "pk0",
- "pcc2",
-
- "kb_col4_pq4",
- "kb_col5_pq5",
-
- "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
-
- "sdmmc3_cd_n_pv2",
- "sdmmc3_clk_lb_in_pee5",
- "sdmmc3_clk_lb_out_pee4",
-};
-
-static const char * const sdmmc4_groups[] = {
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
-};
-
-static const char * const soc_groups[] = {
- "pk0",
- "pj2",
- "kb_row15_ps7",
- "clk_32k_out_pa0",
-};
-
-static const char * const spdif_groups[] = {
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "spdif_in_pk6",
- "spdif_out_pk5",
-};
-
-static const char * const spi1_groups[] = {
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "gpio_x3_aud_px3",
- "gpio_x4_aud_px4",
- "gpio_x5_aud_px5",
- "gpio_x6_aud_px6",
- "gpio_x7_aud_px7",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const spi2_groups[] = {
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "gpio_x4_aud_px4",
- "gpio_x5_aud_px5",
- "gpio_x6_aud_px6",
- "gpio_x7_aud_px7",
- "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const spi3_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
-};
-
-static const char * const spi4_groups[] = {
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat0_py7",
-
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
-
- "pi3",
- "pg4",
- "pg5",
- "pg6",
- "pg7",
- "ph3",
- "pi4",
- "sdmmc1_wp_n_pv3",
-};
-
-static const char * const spi5_groups[] = {
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_sclk_pp3",
-};
-
-static const char * const spi6_groups[] = {
- "dvfs_pwm_px0",
- "gpio_x1_aud_px1",
- "gpio_x3_aud_px3",
- "dvfs_clk_px2",
- "gpio_x6_aud_px6",
- "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const trace_groups[] = {
- "pi2",
- "pi4",
- "pi7",
- "ph0",
- "ph6",
- "ph7",
- "pg2",
- "pg3",
- "pk1",
- "pk3",
-};
-
-static const char * const uarta_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat0_py7",
-
-
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
-
- "pu0",
- "pu1",
- "pu2",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
-
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_col3_pq3",
- "kb_col4_pq4",
-
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat1_pb6",
- "sdmmc1_wp_n_pv3",
-
-};
-
-static const char * const uartb_groups[] = {
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
-};
-
-static const char * const uartc_groups[] = {
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
- "kb_row16_pt0",
- "kn_row17_pt1",
-};
-
-static const char * const uartd_groups[] = {
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "pj7",
- "pb0",
- "pb1",
- "pk7",
- "kb_col6_pq6",
- "kb_col7_pq7",
-};
-
-static const char * const ulpi_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
-};
-
-static const char * const usb_groups[] = {
- "pj0",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "usb_vbus_en2_pff1",
-};
-
-static const char * const vgp1_groups[] = {
- "cam_i2c_scl_pbb1",
-};
-
-static const char * const vgp2_groups[] = {
- "cam_i2c_sda_pbb2",
-};
-
-static const char * const vgp3_groups[] = {
- "pbb3",
-};
-
-static const char * const vgp4_groups[] = {
- "pbb4",
-};
-
-static const char * const vgp5_groups[] = {
- "pbb5",
-};
-
-static const char * const vgp6_groups[] = {
- "pbb0",
-};
-
-static const char * const vi_groups[] = {
- "cam_mclk_pcc0",
-};
-
-static const char * const vi_alt1_groups[] = {
- "cam_mclk_pcc0",
-};
-
-static const char * const vi_alt3_groups[] = {
- "cam_mclk_pcc0",
-};
-
-static const char * const vimclk2_groups[] = {
- "pbb0",
-};
-
-static const char * const vimclk2_alt_groups[] = {
- "pbb0",
-};
-
-static const char * const sata_groups[] = {
- "dap_mclk1_req_pee2",
- "dap1_dout_pn2",
- "pff2",
-};
-
-static const char * const ccla_groups[] = {
- "pk3",
-};
-
-static const char * const rtck_groups[] = {
- "jtag_rtck",
-};
-
-static const char * const sys_groups[] = {
- "kb_row3_pr3",
-};
-
-static const char * const pe0_groups[] = {
- "pex_l0_rst_n_pdd1",
- "pex_l0_clkreq_n_pdd2",
-};
-
-static const char * const pe_groups[] = {
- "pex_wake_n_pdd3",
-};
-
-static const char * const pe1_groups[] = {
- "pex_l1_rst_n_pdd5",
- "pex_l1_clkreq_n_pdd6",
-};
-
-static const char * const dp_groups[] = {
- "dp_hpd_pff0",
-};
-
-static const char * const clk_groups[] = {
- "clk_32k_in",
-};
-
-static const char * const tmds_groups[] = {
- "pg4",
- "ph1",
- "ph2",
};
#define FUNCTION(fname) \
{ \
.name = #fname, \
- .groups = fname##_groups, \
- .ngroups = ARRAY_SIZE(fname##_groups), \
}
-static const struct tegra_function tegra124_functions[] = {
+static struct tegra_function tegra124_functions[] = {
FUNCTION(blink),
+ FUNCTION(ccla),
FUNCTION(cec),
FUNCTION(cldvfs),
+ FUNCTION(clk),
FUNCTION(clk12),
FUNCTION(cpu),
FUNCTION(dap),
@@ -2706,6 +1602,7 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(displaya),
FUNCTION(displaya_alt),
FUNCTION(displayb),
+ FUNCTION(dp),
FUNCTION(dtv),
FUNCTION(extperiph1),
FUNCTION(extperiph2),
@@ -2727,6 +1624,9 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(irda),
FUNCTION(kbc),
FUNCTION(owr),
+ FUNCTION(pe),
+ FUNCTION(pe0),
+ FUNCTION(pe1),
FUNCTION(pmi),
FUNCTION(pwm0),
FUNCTION(pwm1),
@@ -2738,6 +1638,8 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(rsvd2),
FUNCTION(rsvd3),
FUNCTION(rsvd4),
+ FUNCTION(rtck),
+ FUNCTION(sata),
FUNCTION(sdmmc1),
FUNCTION(sdmmc2),
FUNCTION(sdmmc3),
@@ -2750,6 +1652,8 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(spi4),
FUNCTION(spi5),
FUNCTION(spi6),
+ FUNCTION(sys),
+ FUNCTION(tmds),
FUNCTION(trace),
FUNCTION(uarta),
FUNCTION(uartb),
@@ -2768,23 +1672,13 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(vi_alt3),
FUNCTION(vimclk2),
FUNCTION(vimclk2_alt),
- FUNCTION(sata),
- FUNCTION(ccla),
- FUNCTION(pe0),
- FUNCTION(pe),
- FUNCTION(pe1),
- FUNCTION(dp),
- FUNCTION(rtck),
- FUNCTION(sys),
- FUNCTION(clk),
- FUNCTION(tmds),
};
-#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
-#define PINGROUP_REG_A 0x3000 /* bank 1 */
+#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
+#define PINGROUP_REG_A 0x3000 /* bank 1 */
-#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
-#define PINGROUP_REG_N(r) -1
+#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
+#define PINGROUP_REG_N(r) -1
#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior, rcv_sel) \
{ \
@@ -2792,12 +1686,12 @@ static const struct tegra_function tegra124_functions[] = {
.pins = pg_name##_pins, \
.npins = ARRAY_SIZE(pg_name##_pins), \
.funcs = { \
- TEGRA_MUX_ ## f0, \
- TEGRA_MUX_ ## f1, \
- TEGRA_MUX_ ## f2, \
- TEGRA_MUX_ ## f3, \
+ TEGRA_MUX_##f0, \
+ TEGRA_MUX_##f1, \
+ TEGRA_MUX_##f2, \
+ TEGRA_MUX_##f3, \
}, \
- .func_safe = TEGRA_MUX_ ## f_safe, \
+ .func_safe = TEGRA_MUX_##f_safe, \
.mux_reg = PINGROUP_REG_Y(r), \
.mux_bank = 1, \
.mux_bit = 0, \
@@ -2826,8 +1720,9 @@ static const struct tegra_function tegra124_functions[] = {
.drvtype_reg = -1, \
}
-#define DRV_PINGROUP_DVRTYPE_Y(r) ((r) - DRV_PINGROUP_REG_A)
-#define DRV_PINGROUP_DVRTYPE_N(r) -1
+#define DRV_PINGROUP_REG_Y(r) ((r) - DRV_PINGROUP_REG_A)
+#define DRV_PINGROUP_REG_N(r) -1
+
#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
drvdn_b, drvdn_w, drvup_b, drvup_w, \
@@ -2845,7 +1740,7 @@ static const struct tegra_function tegra124_functions[] = {
.lock_reg = -1, \
.ioreset_reg = -1, \
.rcv_sel_reg = -1, \
- .drv_reg = DRV_PINGROUP_DVRTYPE_Y(r), \
+ .drv_reg = DRV_PINGROUP_REG_Y(r), \
.drv_bank = 0, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
@@ -2858,7 +1753,7 @@ static const struct tegra_function tegra124_functions[] = {
.slwr_width = slwr_w, \
.slwf_bit = slwf_b, \
.slwf_width = slwf_w, \
- .drvtype_reg = DRV_PINGROUP_DVRTYPE_##drvtype(r), \
+ .drvtype_reg = DRV_PINGROUP_REG_##drvtype(r), \
.drvtype_bank = 0, \
.drvtype_bit = 6, \
}
@@ -2909,8 +1804,8 @@ static const struct tegra_pingroup tegra124_groups[] = {
PINGROUP(pu4, PWM1, UARTA, GMI, DISPLAYB, PWM1, 0x3194, N, N, N),
PINGROUP(pu5, PWM2, UARTA, GMI, DISPLAYB, PWM2, 0x3198, N, N, N),
PINGROUP(pu6, PWM3, UARTA, RSVD3, GMI, RSVD3, 0x319c, N, N, N),
- PINGROUP(gen1_i2c_scl_pc4, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a0, Y, N, N),
- PINGROUP(gen1_i2c_sda_pc5, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a4, Y, N, N),
+ PINGROUP(gen1_i2c_sda_pc5, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a0, Y, N, N),
+ PINGROUP(gen1_i2c_scl_pc4, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a4, Y, N, N),
PINGROUP(dap4_fs_pp4, I2S3, GMI, DTV, RSVD4, I2S3, 0x31a8, N, N, N),
PINGROUP(dap4_din_pp5, I2S3, GMI, RSVD3, RSVD4, I2S3, 0x31ac, N, N, N),
PINGROUP(dap4_dout_pp6, I2S3, GMI, DTV, RSVD4, I2S3, 0x31b0, N, N, N),
@@ -2964,9 +1859,9 @@ static const struct tegra_pingroup tegra124_groups[] = {
PINGROUP(sdmmc4_dat4_paa4, SDMMC4, SPI3, GMI, RSVD4, SDMMC4, 0x3270, N, Y, N),
PINGROUP(sdmmc4_dat5_paa5, SDMMC4, SPI3, RSVD3, RSVD4, SDMMC4, 0x3274, N, Y, N),
PINGROUP(sdmmc4_dat6_paa6, SDMMC4, SPI3, GMI, RSVD4, SDMMC4, 0x3278, N, Y, N),
- PINGROUP(sdmmc4_dat7_paa7, SDMMC4, RSVD1, GMI, RSVD4, SDMMC4, 0x327c, N, Y, N),
+ PINGROUP(sdmmc4_dat7_paa7, SDMMC4, RSVD2, GMI, RSVD4, SDMMC4, 0x327c, N, Y, N),
PINGROUP(cam_mclk_pcc0, VI, VI_ALT1, VI_ALT3, SDMMC2, VI, 0x3284, N, N, N),
- PINGROUP(pcc1, I2S4, RSVD1, RSVD3, SDMMC2, I2S4, 0x3288, N, N, N),
+ PINGROUP(pcc1, I2S4, RSVD2, RSVD3, SDMMC2, I2S4, 0x3288, N, N, N),
PINGROUP(pbb0, VGP6, VIMCLK2, SDMMC2, VIMCLK2_ALT, VGP6, 0x328c, N, N, N),
PINGROUP(cam_i2c_scl_pbb1, VGP1, I2C3, RSVD3, SDMMC2, VGP1, 0x3290, Y, N, N),
PINGROUP(cam_i2c_sda_pbb2, VGP2, I2C3, RSVD3, SDMMC2, VGP2, 0x3294, Y, N, N),
@@ -3047,8 +1942,8 @@ static const struct tegra_pingroup tegra124_groups[] = {
PINGROUP(gpio_w3_aud_pw3, SPI6, SPI1, SPI2, I2C1, SPI1, 0x33f0, N, N, N),
PINGROUP(usb_vbus_en0_pn4, USB, RSVD2, RSVD3, RSVD4, USB, 0x33f4, Y, N, N),
PINGROUP(usb_vbus_en1_pn5, USB, RSVD2, RSVD3, RSVD4, USB, 0x33f8, Y, N, N),
- PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x33fc, N, N, N),
- PINGROUP(sdmmc3_clk_lb_in_pee5, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x3400, N, N, N),
+ PINGROUP(sdmmc3_clk_lb_in_pee5, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x33fc, N, N, N),
+ PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x3400, N, N, N),
PINGROUP(gmi_clk_lb, SDMMC2, RSVD2, GMI, RSVD4, SDMMC2, 0x3404, N, N, N),
PINGROUP(reset_out_n, RSVD1, RSVD2, RSVD3, RESET_OUT_N, RSVD1, 0x3408, N, N, N),
PINGROUP(kb_row16_pt0, KBC, RSVD2, RSVD3, UARTC, KBC, 0x340c, N, N, N),
diff --git a/drivers/pinctrl/pinctrl-tegra20.c b/drivers/pinctrl/pinctrl-tegra20.c
index fcfb7d012c5b..e0b504088387 100644
--- a/drivers/pinctrl/pinctrl-tegra20.c
+++ b/drivers/pinctrl/pinctrl-tegra20.c
@@ -1894,637 +1894,12 @@ enum tegra_mux {
TEGRA_MUX_XIO,
};
-static const char * const ahb_clk_groups[] = {
- "cdev2",
-};
-
-static const char * const apb_clk_groups[] = {
- "cdev2",
-};
-
-static const char * const audio_sync_groups[] = {
- "cdev1",
-};
-
-static const char * const crt_groups[] = {
- "crtp",
- "lm1",
-};
-
-static const char * const dap1_groups[] = {
- "dap1",
-};
-
-static const char * const dap2_groups[] = {
- "dap2",
-};
-
-static const char * const dap3_groups[] = {
- "dap3",
-};
-
-static const char * const dap4_groups[] = {
- "dap4",
-};
-
-static const char * const dap5_groups[] = {
- "gme",
-};
-
-static const char * const displaya_groups[] = {
- "lcsn",
- "ld0",
- "ld1",
- "ld10",
- "ld11",
- "ld12",
- "ld13",
- "ld14",
- "ld15",
- "ld16",
- "ld17",
- "ld2",
- "ld3",
- "ld4",
- "ld5",
- "ld6",
- "ld7",
- "ld8",
- "ld9",
- "ldc",
- "ldi",
- "lhp0",
- "lhp1",
- "lhp2",
- "lhs",
- "lm0",
- "lm1",
- "lpp",
- "lpw0",
- "lpw1",
- "lpw2",
- "lsc0",
- "lsc1",
- "lsck",
- "lsda",
- "lsdi",
- "lspi",
- "lvp0",
- "lvp1",
- "lvs",
-};
-
-static const char * const displayb_groups[] = {
- "lcsn",
- "ld0",
- "ld1",
- "ld10",
- "ld11",
- "ld12",
- "ld13",
- "ld14",
- "ld15",
- "ld16",
- "ld17",
- "ld2",
- "ld3",
- "ld4",
- "ld5",
- "ld6",
- "ld7",
- "ld8",
- "ld9",
- "ldc",
- "ldi",
- "lhp0",
- "lhp1",
- "lhp2",
- "lhs",
- "lm0",
- "lm1",
- "lpp",
- "lpw0",
- "lpw1",
- "lpw2",
- "lsc0",
- "lsc1",
- "lsck",
- "lsda",
- "lsdi",
- "lspi",
- "lvp0",
- "lvp1",
- "lvs",
-};
-
-static const char * const emc_test0_dll_groups[] = {
- "kbca",
-};
-
-static const char * const emc_test1_dll_groups[] = {
- "kbcc",
-};
-
-static const char * const gmi_groups[] = {
- "ata",
- "atb",
- "atc",
- "atd",
- "ate",
- "dap1",
- "dap2",
- "dap4",
- "gma",
- "gmb",
- "gmc",
- "gmd",
- "gme",
- "gpu",
- "irrx",
- "irtx",
- "pta",
- "spia",
- "spib",
- "spic",
- "spid",
- "spie",
- "uca",
- "ucb",
-};
-
-static const char * const gmi_int_groups[] = {
- "gmb",
-};
-
-static const char * const hdmi_groups[] = {
- "hdint",
- "lpw0",
- "lpw2",
- "lsc1",
- "lsck",
- "lsda",
- "lspi",
- "pta",
-};
-
-static const char * const i2cp_groups[] = {
- "i2cp",
-};
-
-static const char * const i2c1_groups[] = {
- "rm",
- "spdi",
- "spdo",
- "spig",
- "spih",
-};
-
-static const char * const i2c2_groups[] = {
- "ddc",
- "pta",
-};
-
-static const char * const i2c3_groups[] = {
- "dtf",
-};
-
-static const char * const ide_groups[] = {
- "ata",
- "atb",
- "atc",
- "atd",
- "ate",
- "gmb",
-};
-
-static const char * const irda_groups[] = {
- "uad",
-};
-
-static const char * const kbc_groups[] = {
- "kbca",
- "kbcb",
- "kbcc",
- "kbcd",
- "kbce",
- "kbcf",
-};
-
-static const char * const mio_groups[] = {
- "kbcb",
- "kbcd",
- "kbcf",
-};
-
-static const char * const mipi_hs_groups[] = {
- "uaa",
- "uab",
-};
-
-static const char * const nand_groups[] = {
- "ata",
- "atb",
- "atc",
- "atd",
- "ate",
- "gmb",
- "gmd",
- "kbca",
- "kbcb",
- "kbcc",
- "kbcd",
- "kbce",
- "kbcf",
-};
-
-static const char * const osc_groups[] = {
- "cdev1",
- "cdev2",
-};
-
-static const char * const owr_groups[] = {
- "kbce",
- "owc",
- "uac",
-};
-
-static const char * const pcie_groups[] = {
- "gpv",
- "slxa",
- "slxk",
-};
-
-static const char * const plla_out_groups[] = {
- "cdev1",
-};
-
-static const char * const pllc_out1_groups[] = {
- "csus",
-};
-
-static const char * const pllm_out1_groups[] = {
- "cdev1",
-};
-
-static const char * const pllp_out2_groups[] = {
- "csus",
-};
-
-static const char * const pllp_out3_groups[] = {
- "csus",
-};
-
-static const char * const pllp_out4_groups[] = {
- "cdev2",
-};
-
-static const char * const pwm_groups[] = {
- "gpu",
- "sdb",
- "sdc",
- "sdd",
- "ucb",
-};
-
-static const char * const pwr_intr_groups[] = {
- "pmc",
-};
-
-static const char * const pwr_on_groups[] = {
- "pmc",
-};
-
-static const char * const rsvd1_groups[] = {
- "dta",
- "dtb",
- "dtc",
- "dtd",
- "dte",
- "gmd",
- "gme",
-};
-
-static const char * const rsvd2_groups[] = {
- "crtp",
- "dap1",
- "dap3",
- "dap4",
- "ddc",
- "dtb",
- "dtc",
- "dte",
- "dtf",
- "gpu7",
- "gpv",
- "hdint",
- "i2cp",
- "owc",
- "rm",
- "sdio1",
- "spdi",
- "spdo",
- "uac",
- "uca",
- "uda",
-};
-
-static const char * const rsvd3_groups[] = {
- "crtp",
- "dap2",
- "dap3",
- "ddc",
- "gpu7",
- "gpv",
- "hdint",
- "i2cp",
- "ld17",
- "ldc",
- "ldi",
- "lhp0",
- "lhp1",
- "lhp2",
- "lm1",
- "lpp",
- "lpw1",
- "lvp0",
- "lvp1",
- "owc",
- "pmc",
- "rm",
- "uac",
-};
-
-static const char * const rsvd4_groups[] = {
- "ata",
- "ate",
- "crtp",
- "dap3",
- "dap4",
- "ddc",
- "dta",
- "dtc",
- "dtd",
- "dtf",
- "gpu",
- "gpu7",
- "gpv",
- "hdint",
- "i2cp",
- "kbce",
- "lcsn",
- "ld0",
- "ld1",
- "ld2",
- "ld3",
- "ld4",
- "ld5",
- "ld6",
- "ld7",
- "ld8",
- "ld9",
- "ld10",
- "ld11",
- "ld12",
- "ld13",
- "ld14",
- "ld15",
- "ld16",
- "ld17",
- "ldc",
- "ldi",
- "lhp0",
- "lhp1",
- "lhp2",
- "lhs",
- "lm0",
- "lpp",
- "lpw1",
- "lsc0",
- "lsdi",
- "lvp0",
- "lvp1",
- "lvs",
- "owc",
- "pmc",
- "pta",
- "rm",
- "spif",
- "uac",
- "uca",
- "ucb",
-};
-
-static const char * const rtck_groups[] = {
- "gpu7",
-};
-
-static const char * const sdio1_groups[] = {
- "sdio1",
-};
-
-static const char * const sdio2_groups[] = {
- "dap1",
- "dta",
- "dtd",
- "kbca",
- "kbcb",
- "kbcd",
- "spdi",
- "spdo",
-};
-
-static const char * const sdio3_groups[] = {
- "sdb",
- "sdc",
- "sdd",
- "slxa",
- "slxc",
- "slxd",
- "slxk",
-};
-
-static const char * const sdio4_groups[] = {
- "atb",
- "atc",
- "atd",
- "gma",
- "gme",
-};
-
-static const char * const sflash_groups[] = {
- "gmc",
- "gmd",
-};
-
-static const char * const spdif_groups[] = {
- "slxc",
- "slxd",
- "spdi",
- "spdo",
- "uad",
-};
-
-static const char * const spi1_groups[] = {
- "dtb",
- "dte",
- "spia",
- "spib",
- "spic",
- "spid",
- "spie",
- "spif",
- "uda",
-};
-
-static const char * const spi2_groups[] = {
- "sdb",
- "slxa",
- "slxc",
- "slxd",
- "slxk",
- "spia",
- "spib",
- "spic",
- "spid",
- "spie",
- "spif",
- "spig",
- "spih",
- "uab",
-};
-
-static const char * const spi2_alt_groups[] = {
- "spid",
- "spie",
- "spig",
- "spih",
-};
-
-static const char * const spi3_groups[] = {
- "gma",
- "lcsn",
- "lm0",
- "lpw0",
- "lpw2",
- "lsc1",
- "lsck",
- "lsda",
- "lsdi",
- "sdc",
- "sdd",
- "spia",
- "spib",
- "spic",
- "spif",
- "spig",
- "spih",
- "uaa",
-};
-
-static const char * const spi4_groups[] = {
- "gmc",
- "irrx",
- "irtx",
- "slxa",
- "slxc",
- "slxd",
- "slxk",
- "uad",
-};
-
-static const char * const trace_groups[] = {
- "kbcc",
- "kbcf",
-};
-
-static const char * const twc_groups[] = {
- "dap2",
- "sdc",
-};
-
-static const char * const uarta_groups[] = {
- "gpu",
- "irrx",
- "irtx",
- "sdb",
- "sdd",
- "sdio1",
- "uaa",
- "uab",
- "uad",
-};
-
-static const char * const uartb_groups[] = {
- "irrx",
- "irtx",
-};
-
-static const char * const uartc_groups[] = {
- "uca",
- "ucb",
-};
-
-static const char * const uartd_groups[] = {
- "gmc",
- "uda",
-};
-
-static const char * const uarte_groups[] = {
- "gma",
- "sdio1",
-};
-
-static const char * const ulpi_groups[] = {
- "uaa",
- "uab",
- "uda",
-};
-
-static const char * const vi_groups[] = {
- "dta",
- "dtb",
- "dtc",
- "dtd",
- "dte",
- "dtf",
-};
-
-static const char * const vi_sensor_clk_groups[] = {
- "csus",
-};
-
-static const char * const xio_groups[] = {
- "ld0",
- "ld1",
- "ld10",
- "ld11",
- "ld12",
- "ld13",
- "ld14",
- "ld15",
- "ld16",
- "ld2",
- "ld3",
- "ld4",
- "ld5",
- "ld6",
- "ld7",
- "ld8",
- "ld9",
- "lhs",
- "lsc0",
- "lspi",
- "lvs",
-};
-
#define FUNCTION(fname) \
{ \
.name = #fname, \
- .groups = fname##_groups, \
- .ngroups = ARRAY_SIZE(fname##_groups), \
}
-static const struct tegra_function tegra20_functions[] = {
+static struct tegra_function tegra20_functions[] = {
FUNCTION(ahb_clk),
FUNCTION(apb_clk),
FUNCTION(audio_sync),
@@ -2881,18 +2256,7 @@ static struct platform_driver tegra20_pinctrl_driver = {
.probe = tegra20_pinctrl_probe,
.remove = tegra_pinctrl_remove,
};
-
-static int __init tegra20_pinctrl_init(void)
-{
- return platform_driver_register(&tegra20_pinctrl_driver);
-}
-arch_initcall(tegra20_pinctrl_init);
-
-static void __exit tegra20_pinctrl_exit(void)
-{
- platform_driver_unregister(&tegra20_pinctrl_driver);
-}
-module_exit(tegra20_pinctrl_exit);
+module_platform_driver(tegra20_pinctrl_driver);
MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra20 pinctrl driver");
diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c
index 2300deba25bd..41d24f5c2854 100644
--- a/drivers/pinctrl/pinctrl-tegra30.c
+++ b/drivers/pinctrl/pinctrl-tegra30.c
@@ -25,7 +25,7 @@
* Most pins affected by the pinmux can also be GPIOs. Define these first.
* These must match how the GPIO driver names/numbers its pins.
*/
-#define _GPIO(offset) (offset)
+#define _GPIO(offset) (offset)
#define TEGRA_PIN_CLK_32K_OUT_PA0 _GPIO(0)
#define TEGRA_PIN_UART3_CTS_N_PA1 _GPIO(1)
@@ -277,8 +277,8 @@
#define TEGRA_PIN_PEE7 _GPIO(247)
/* All non-GPIO pins follow */
-#define NUM_GPIOS (TEGRA_PIN_PEE7 + 1)
-#define _PIN(offset) (NUM_GPIOS + (offset))
+#define NUM_GPIOS (TEGRA_PIN_PEE7 + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
/* Non-GPIO pins */
#define TEGRA_PIN_CLK_32K_IN _PIN(0)
@@ -2015,1253 +2015,13 @@ enum tegra_mux {
TEGRA_MUX_VI_ALT2,
TEGRA_MUX_VI_ALT3,
};
-static const char * const blink_groups[] = {
- "clk_32k_out_pa0",
-};
-
-static const char * const cec_groups[] = {
- "hdmi_cec_pee3",
- "owr",
-};
-
-static const char * const clk_12m_out_groups[] = {
- "pv3",
-};
-
-static const char * const clk_32k_in_groups[] = {
- "clk_32k_in",
-};
-
-static const char * const core_pwr_req_groups[] = {
- "core_pwr_req",
-};
-
-static const char * const cpu_pwr_req_groups[] = {
- "cpu_pwr_req",
-};
-
-static const char * const crt_groups[] = {
- "crt_hsync_pv6",
- "crt_vsync_pv7",
-};
-
-static const char * const dap_groups[] = {
- "clk1_req_pee2",
- "clk2_req_pcc5",
-};
-
-static const char * const ddr_groups[] = {
- "vi_d0_pt4",
- "vi_d1_pd5",
- "vi_d10_pt2",
- "vi_d11_pt3",
- "vi_d2_pl0",
- "vi_d3_pl1",
- "vi_d4_pl2",
- "vi_d5_pl3",
- "vi_d6_pl4",
- "vi_d7_pl5",
- "vi_d8_pl6",
- "vi_d9_pl7",
- "vi_hsync_pd7",
- "vi_vsync_pd6",
-};
-
-static const char * const dev3_groups[] = {
- "clk3_req_pee1",
-};
-
-static const char * const displaya_groups[] = {
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_fs_pp0",
- "dap3_sclk_pp3",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "lcd_cs0_n_pn4",
- "lcd_cs1_n_pw0",
- "lcd_d0_pe0",
- "lcd_d1_pe1",
- "lcd_d10_pf2",
- "lcd_d11_pf3",
- "lcd_d12_pf4",
- "lcd_d13_pf5",
- "lcd_d14_pf6",
- "lcd_d15_pf7",
- "lcd_d16_pm0",
- "lcd_d17_pm1",
- "lcd_d18_pm2",
- "lcd_d19_pm3",
- "lcd_d2_pe2",
- "lcd_d20_pm4",
- "lcd_d21_pm5",
- "lcd_d22_pm6",
- "lcd_d23_pm7",
- "lcd_d3_pe3",
- "lcd_d4_pe4",
- "lcd_d5_pe5",
- "lcd_d6_pe6",
- "lcd_d7_pe7",
- "lcd_d8_pf0",
- "lcd_d9_pf1",
- "lcd_dc0_pn6",
- "lcd_dc1_pd2",
- "lcd_de_pj1",
- "lcd_hsync_pj3",
- "lcd_m1_pw1",
- "lcd_pclk_pb3",
- "lcd_pwr0_pb2",
- "lcd_pwr1_pc1",
- "lcd_pwr2_pc6",
- "lcd_sck_pz4",
- "lcd_sdin_pz2",
- "lcd_sdout_pn5",
- "lcd_vsync_pj4",
- "lcd_wr_n_pz3",
-};
-
-static const char * const displayb_groups[] = {
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_fs_pp0",
- "dap3_sclk_pp3",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "lcd_cs0_n_pn4",
- "lcd_cs1_n_pw0",
- "lcd_d0_pe0",
- "lcd_d1_pe1",
- "lcd_d10_pf2",
- "lcd_d11_pf3",
- "lcd_d12_pf4",
- "lcd_d13_pf5",
- "lcd_d14_pf6",
- "lcd_d15_pf7",
- "lcd_d16_pm0",
- "lcd_d17_pm1",
- "lcd_d18_pm2",
- "lcd_d19_pm3",
- "lcd_d2_pe2",
- "lcd_d20_pm4",
- "lcd_d21_pm5",
- "lcd_d22_pm6",
- "lcd_d23_pm7",
- "lcd_d3_pe3",
- "lcd_d4_pe4",
- "lcd_d5_pe5",
- "lcd_d6_pe6",
- "lcd_d7_pe7",
- "lcd_d8_pf0",
- "lcd_d9_pf1",
- "lcd_dc0_pn6",
- "lcd_dc1_pd2",
- "lcd_de_pj1",
- "lcd_hsync_pj3",
- "lcd_m1_pw1",
- "lcd_pclk_pb3",
- "lcd_pwr0_pb2",
- "lcd_pwr1_pc1",
- "lcd_pwr2_pc6",
- "lcd_sck_pz4",
- "lcd_sdin_pz2",
- "lcd_sdout_pn5",
- "lcd_vsync_pj4",
- "lcd_wr_n_pz3",
-};
-
-static const char * const dtv_groups[] = {
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
-};
-
-static const char * const extperiph1_groups[] = {
- "clk1_out_pw4",
-};
-
-static const char * const extperiph2_groups[] = {
- "clk2_out_pw5",
-};
-
-static const char * const extperiph3_groups[] = {
- "clk3_out_pee0",
-};
-
-static const char * const gmi_groups[] = {
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_fs_pn0",
- "dap1_sclk_pn3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_fs_pp4",
- "dap4_sclk_pp7",
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "gmi_a16_pj7",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_a19_pk7",
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad10_ph2",
- "gmi_ad11_ph3",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_ad8_ph0",
- "gmi_ad9_ph1",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
- "gmi_cs2_n_pk3",
- "gmi_cs3_n_pk4",
- "gmi_cs4_n_pk2",
- "gmi_cs6_n_pi3",
- "gmi_cs7_n_pi6",
- "gmi_dqs_pi2",
- "gmi_iordy_pi5",
- "gmi_oe_n_pi1",
- "gmi_rst_n_pi4",
- "gmi_wait_pi7",
- "gmi_wp_n_pc7",
- "gmi_wr_n_pi0",
- "pu0",
- "pu1",
- "pu2",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
- "spi1_cs0_n_px6",
- "spi1_mosi_px4",
- "spi1_sck_px5",
- "spi2_cs0_n_px3",
- "spi2_miso_px1",
- "spi2_mosi_px0",
- "spi2_sck_px2",
- "uart2_cts_n_pj5",
- "uart2_rts_n_pj6",
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
- "uart3_rxd_pw7",
- "uart3_txd_pw6",
-};
-
-static const char * const gmi_alt_groups[] = {
- "gmi_a16_pj7",
- "gmi_cs3_n_pk4",
- "gmi_cs7_n_pi6",
- "gmi_wp_n_pc7",
-};
-
-static const char * const hda_groups[] = {
- "clk1_req_pee2",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_fs_pn0",
- "dap1_sclk_pn3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "pex_l0_clkreq_n_pdd2",
- "pex_l0_prsnt_n_pdd0",
- "pex_l0_rst_n_pdd1",
- "pex_l1_clkreq_n_pdd6",
- "pex_l1_prsnt_n_pdd4",
- "pex_l1_rst_n_pdd5",
- "pex_l2_clkreq_n_pcc7",
- "pex_l2_prsnt_n_pdd7",
- "pex_l2_rst_n_pcc6",
- "pex_wake_n_pdd3",
- "spdif_in_pk6",
-};
-
-static const char * const hdcp_groups[] = {
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "lcd_pwr0_pb2",
- "lcd_pwr2_pc6",
- "lcd_sck_pz4",
- "lcd_sdout_pn5",
- "lcd_wr_n_pz3",
-};
-
-static const char * const hdmi_groups[] = {
- "hdmi_int_pn7",
-};
-
-static const char * const hsi_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-};
-
-static const char * const i2c1_groups[] = {
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "spdif_in_pk6",
- "spdif_out_pk5",
- "spi2_cs1_n_pw2",
- "spi2_cs2_n_pw3",
-};
-
-static const char * const i2c2_groups[] = {
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
-};
-
-static const char * const i2c3_groups[] = {
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat4_paa4",
-};
-
-static const char * const i2c4_groups[] = {
- "ddc_scl_pv4",
- "ddc_sda_pv5",
-};
-
-static const char * const i2cpwr_groups[] = {
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
-};
-
-static const char * const i2s0_groups[] = {
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_fs_pn0",
- "dap1_sclk_pn3",
-};
-
-static const char * const i2s1_groups[] = {
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
-};
-
-static const char * const i2s2_groups[] = {
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_fs_pp0",
- "dap3_sclk_pp3",
-};
-
-static const char * const i2s3_groups[] = {
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_fs_pp4",
- "dap4_sclk_pp7",
-};
-
-static const char * const i2s4_groups[] = {
- "pbb0",
- "pbb7",
- "pcc1",
- "pcc2",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
-};
-
-static const char * const invalid_groups[] = {
- "kb_row3_pr3",
- "sdmmc4_clk_pcc4",
-};
-
-static const char * const kbc_groups[] = {
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
- "kb_row2_pr2",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
-};
-
-static const char * const mio_groups[] = {
- "kb_col6_pq6",
- "kb_col7_pq7",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
- "kb_row6_pr6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
-};
-
-static const char * const nand_groups[] = {
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad10_ph2",
- "gmi_ad11_ph3",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_ad8_ph0",
- "gmi_ad9_ph1",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
- "gmi_cs2_n_pk3",
- "gmi_cs3_n_pk4",
- "gmi_cs4_n_pk2",
- "gmi_cs6_n_pi3",
- "gmi_cs7_n_pi6",
- "gmi_dqs_pi2",
- "gmi_iordy_pi5",
- "gmi_oe_n_pi1",
- "gmi_rst_n_pi4",
- "gmi_wait_pi7",
- "gmi_wp_n_pc7",
- "gmi_wr_n_pi0",
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
- "kb_row2_pr2",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
-};
-
-static const char * const nand_alt_groups[] = {
- "gmi_cs6_n_pi3",
- "gmi_cs7_n_pi6",
- "gmi_rst_n_pi4",
-};
-
-static const char * const owr_groups[] = {
- "pu0",
- "pv2",
- "kb_row5_pr5",
- "owr",
-};
-
-static const char * const pcie_groups[] = {
- "pex_l0_clkreq_n_pdd2",
- "pex_l0_prsnt_n_pdd0",
- "pex_l0_rst_n_pdd1",
- "pex_l1_clkreq_n_pdd6",
- "pex_l1_prsnt_n_pdd4",
- "pex_l1_rst_n_pdd5",
- "pex_l2_clkreq_n_pcc7",
- "pex_l2_prsnt_n_pdd7",
- "pex_l2_rst_n_pcc6",
- "pex_wake_n_pdd3",
-};
-
-static const char * const pwm0_groups[] = {
- "gmi_ad8_ph0",
- "pu3",
- "sdmmc3_dat3_pb4",
- "sdmmc3_dat5_pd0",
- "uart3_rts_n_pc0",
-};
-
-static const char * const pwm1_groups[] = {
- "gmi_ad9_ph1",
- "pu4",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat4_pd1",
-};
-
-static const char * const pwm2_groups[] = {
- "gmi_ad10_ph2",
- "pu5",
- "sdmmc3_clk_pa6",
-};
-
-static const char * const pwm3_groups[] = {
- "gmi_ad11_ph3",
- "pu6",
- "sdmmc3_cmd_pa7",
-};
-
-static const char * const pwr_int_n_groups[] = {
- "pwr_int_n",
-};
-
-static const char * const rsvd1_groups[] = {
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
- "gmi_cs2_n_pk3",
- "gmi_cs3_n_pk4",
- "gmi_cs4_n_pk2",
- "gmi_dqs_pi2",
- "gmi_iordy_pi5",
- "gmi_oe_n_pi1",
- "gmi_wait_pi7",
- "gmi_wp_n_pc7",
- "gmi_wr_n_pi0",
- "pu1",
- "pu2",
- "pv0",
- "pv1",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
- "vi_pclk_pt0",
-};
-
-static const char * const rsvd2_groups[] = {
- "clk1_out_pw4",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "clk3_out_pee0",
- "clk3_req_pee1",
- "clk_32k_in",
- "clk_32k_out_pa0",
- "core_pwr_req",
- "cpu_pwr_req",
- "crt_hsync_pv6",
- "crt_vsync_pv7",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_fs_pp0",
- "dap3_sclk_pp3",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_fs_pp4",
- "dap4_sclk_pp7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "pbb0",
- "pbb7",
- "pcc1",
- "pcc2",
- "pv0",
- "pv1",
- "pv2",
- "pv3",
- "hdmi_cec_pee3",
- "hdmi_int_pn7",
- "jtag_rtck_pu7",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
- "pwr_int_n",
- "sdmmc1_clk_pz0",
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat0_py7",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat3_py4",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc4_rst_n_pcc3",
- "spdif_out_pk5",
- "sys_clk_req_pz5",
- "uart3_cts_n_pa1",
- "uart3_rxd_pw7",
- "uart3_txd_pw6",
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "vi_d0_pt4",
- "vi_d10_pt2",
- "vi_d11_pt3",
- "vi_hsync_pd7",
- "vi_vsync_pd6",
-};
-
-static const char * const rsvd3_groups[] = {
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "clk1_out_pw4",
- "clk1_req_pee2",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "clk3_out_pee0",
- "clk3_req_pee1",
- "clk_32k_in",
- "clk_32k_out_pa0",
- "core_pwr_req",
- "cpu_pwr_req",
- "crt_hsync_pv6",
- "crt_vsync_pv7",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "pbb0",
- "pbb7",
- "pcc1",
- "pcc2",
- "pv0",
- "pv1",
- "pv2",
- "pv3",
- "hdmi_cec_pee3",
- "hdmi_int_pn7",
- "jtag_rtck_pu7",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row3_pr3",
- "lcd_d0_pe0",
- "lcd_d1_pe1",
- "lcd_d10_pf2",
- "lcd_d11_pf3",
- "lcd_d12_pf4",
- "lcd_d13_pf5",
- "lcd_d14_pf6",
- "lcd_d15_pf7",
- "lcd_d16_pm0",
- "lcd_d17_pm1",
- "lcd_d18_pm2",
- "lcd_d19_pm3",
- "lcd_d2_pe2",
- "lcd_d20_pm4",
- "lcd_d21_pm5",
- "lcd_d22_pm6",
- "lcd_d23_pm7",
- "lcd_d3_pe3",
- "lcd_d4_pe4",
- "lcd_d5_pe5",
- "lcd_d6_pe6",
- "lcd_d7_pe7",
- "lcd_d8_pf0",
- "lcd_d9_pf1",
- "lcd_dc0_pn6",
- "lcd_dc1_pd2",
- "lcd_de_pj1",
- "lcd_hsync_pj3",
- "lcd_m1_pw1",
- "lcd_pclk_pb3",
- "lcd_pwr1_pc1",
- "lcd_vsync_pj4",
- "owr",
- "pex_l0_clkreq_n_pdd2",
- "pex_l0_prsnt_n_pdd0",
- "pex_l0_rst_n_pdd1",
- "pex_l1_clkreq_n_pdd6",
- "pex_l1_prsnt_n_pdd4",
- "pex_l1_rst_n_pdd5",
- "pex_l2_clkreq_n_pcc7",
- "pex_l2_prsnt_n_pdd7",
- "pex_l2_rst_n_pcc6",
- "pex_wake_n_pdd3",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
- "pwr_int_n",
- "sdmmc1_clk_pz0",
- "sdmmc1_cmd_pz1",
- "sdmmc4_rst_n_pcc3",
- "sys_clk_req_pz5",
-};
-
-static const char * const rsvd4_groups[] = {
- "clk1_out_pw4",
- "clk1_req_pee2",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "clk3_out_pee0",
- "clk3_req_pee1",
- "clk_32k_in",
- "clk_32k_out_pa0",
- "core_pwr_req",
- "cpu_pwr_req",
- "crt_hsync_pv6",
- "crt_vsync_pv7",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_fs_pp4",
- "dap4_sclk_pp7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "gmi_a19_pk7",
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad10_ph2",
- "gmi_ad11_ph3",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_ad8_ph0",
- "gmi_ad9_ph1",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs2_n_pk3",
- "gmi_cs4_n_pk2",
- "gmi_dqs_pi2",
- "gmi_iordy_pi5",
- "gmi_oe_n_pi1",
- "gmi_rst_n_pi4",
- "gmi_wait_pi7",
- "gmi_wr_n_pi0",
- "pcc2",
- "pu0",
- "pu1",
- "pu2",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
- "pv0",
- "pv1",
- "pv2",
- "pv3",
- "hdmi_cec_pee3",
- "hdmi_int_pn7",
- "jtag_rtck_pu7",
- "kb_col2_pq2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "kb_col5_pq5",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row4_pr4",
- "lcd_cs0_n_pn4",
- "lcd_cs1_n_pw0",
- "lcd_d0_pe0",
- "lcd_d1_pe1",
- "lcd_d10_pf2",
- "lcd_d11_pf3",
- "lcd_d12_pf4",
- "lcd_d13_pf5",
- "lcd_d14_pf6",
- "lcd_d15_pf7",
- "lcd_d16_pm0",
- "lcd_d17_pm1",
- "lcd_d18_pm2",
- "lcd_d19_pm3",
- "lcd_d2_pe2",
- "lcd_d20_pm4",
- "lcd_d21_pm5",
- "lcd_d22_pm6",
- "lcd_d23_pm7",
- "lcd_d3_pe3",
- "lcd_d4_pe4",
- "lcd_d5_pe5",
- "lcd_d6_pe6",
- "lcd_d7_pe7",
- "lcd_d8_pf0",
- "lcd_d9_pf1",
- "lcd_dc0_pn6",
- "lcd_dc1_pd2",
- "lcd_de_pj1",
- "lcd_hsync_pj3",
- "lcd_m1_pw1",
- "lcd_pclk_pb3",
- "lcd_pwr1_pc1",
- "lcd_sdin_pz2",
- "lcd_vsync_pj4",
- "owr",
- "pex_l0_clkreq_n_pdd2",
- "pex_l0_prsnt_n_pdd0",
- "pex_l0_rst_n_pdd1",
- "pex_l1_clkreq_n_pdd6",
- "pex_l1_prsnt_n_pdd4",
- "pex_l1_rst_n_pdd5",
- "pex_l2_clkreq_n_pcc7",
- "pex_l2_prsnt_n_pdd7",
- "pex_l2_rst_n_pcc6",
- "pex_wake_n_pdd3",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
- "pwr_int_n",
- "spi1_miso_px7",
- "sys_clk_req_pz5",
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
- "uart3_rxd_pw7",
- "uart3_txd_pw6",
- "vi_d0_pt4",
- "vi_d1_pd5",
- "vi_d10_pt2",
- "vi_d11_pt3",
- "vi_d2_pl0",
- "vi_d3_pl1",
- "vi_d4_pl2",
- "vi_d5_pl3",
- "vi_d6_pl4",
- "vi_d7_pl5",
- "vi_d8_pl6",
- "vi_d9_pl7",
- "vi_hsync_pd7",
- "vi_pclk_pt0",
- "vi_vsync_pd6",
-};
-
-static const char * const rtck_groups[] = {
- "jtag_rtck_pu7",
-};
-
-static const char * const sata_groups[] = {
- "gmi_cs6_n_pi3",
-};
-
-static const char * const sdmmc1_groups[] = {
- "sdmmc1_clk_pz0",
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat0_py7",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat3_py4",
-};
-
-static const char * const sdmmc2_groups[] = {
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_fs_pn0",
- "dap1_sclk_pn3",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
- "kb_row6_pr6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "spdif_in_pk6",
- "spdif_out_pk5",
- "vi_d1_pd5",
- "vi_d2_pl0",
- "vi_d3_pl1",
- "vi_d4_pl2",
- "vi_d5_pl3",
- "vi_d6_pl4",
- "vi_d7_pl5",
- "vi_d8_pl6",
- "vi_d9_pl7",
- "vi_pclk_pt0",
-};
-
-static const char * const sdmmc3_groups[] = {
- "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
- "sdmmc3_dat4_pd1",
- "sdmmc3_dat5_pd0",
- "sdmmc3_dat6_pd3",
- "sdmmc3_dat7_pd4",
-};
-
-static const char * const sdmmc4_groups[] = {
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "cam_mclk_pcc0",
- "pbb0",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "pbb7",
- "pcc1",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
- "sdmmc4_rst_n_pcc3",
-};
-
-static const char * const spdif_groups[] = {
- "sdmmc3_dat6_pd3",
- "sdmmc3_dat7_pd4",
- "spdif_in_pk6",
- "spdif_out_pk5",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
-};
-
-static const char * const spi1_groups[] = {
- "spi1_cs0_n_px6",
- "spi1_miso_px7",
- "spi1_mosi_px4",
- "spi1_sck_px5",
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
-};
-
-static const char * const spi2_groups[] = {
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat4_pd1",
- "sdmmc3_dat5_pd0",
- "sdmmc3_dat6_pd3",
- "sdmmc3_dat7_pd4",
- "spi1_cs0_n_px6",
- "spi1_mosi_px4",
- "spi1_sck_px5",
- "spi2_cs0_n_px3",
- "spi2_cs1_n_pw2",
- "spi2_cs2_n_pw3",
- "spi2_miso_px1",
- "spi2_mosi_px0",
- "spi2_sck_px2",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-};
-
-static const char * const spi2_alt_groups[] = {
- "spi1_cs0_n_px6",
- "spi1_miso_px7",
- "spi1_mosi_px4",
- "spi1_sck_px5",
- "spi2_cs1_n_pw2",
- "spi2_cs2_n_pw3",
-};
-
-static const char * const spi3_groups[] = {
- "sdmmc3_clk_pa6",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "spi1_miso_px7",
- "spi2_cs0_n_px3",
- "spi2_cs1_n_pw2",
- "spi2_cs2_n_pw3",
- "spi2_miso_px1",
- "spi2_mosi_px0",
- "spi2_sck_px2",
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
-};
-
-static const char * const spi4_groups[] = {
- "gmi_a16_pj7",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_a19_pk7",
- "sdmmc3_dat4_pd1",
- "sdmmc3_dat5_pd0",
- "sdmmc3_dat6_pd3",
- "sdmmc3_dat7_pd4",
- "uart2_cts_n_pj5",
- "uart2_rts_n_pj6",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
-};
-
-static const char * const spi5_groups[] = {
- "lcd_cs0_n_pn4",
- "lcd_cs1_n_pw0",
- "lcd_pwr0_pb2",
- "lcd_pwr2_pc6",
- "lcd_sck_pz4",
- "lcd_sdin_pz2",
- "lcd_sdout_pn5",
- "lcd_wr_n_pz3",
-};
-
-static const char * const spi6_groups[] = {
- "spi2_cs0_n_px3",
- "spi2_miso_px1",
- "spi2_mosi_px0",
- "spi2_sck_px2",
-};
-
-static const char * const sysclk_groups[] = {
- "sys_clk_req_pz5",
-};
-
-static const char * const test_groups[] = {
- "kb_col0_pq0",
- "kb_col1_pq1",
-};
-
-static const char * const trace_groups[] = {
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "kb_row4_pr4",
- "kb_row5_pr5",
-};
-
-static const char * const uarta_groups[] = {
- "pu0",
- "pu1",
- "pu2",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
- "sdmmc1_clk_pz0",
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat0_py7",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat3_py4",
- "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7",
- "uart2_cts_n_pj5",
- "uart2_rts_n_pj6",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-};
-
-static const char * const uartb_groups[] = {
- "uart2_cts_n_pj5",
- "uart2_rts_n_pj6",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
-};
-
-static const char * const uartc_groups[] = {
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
- "uart3_rxd_pw7",
- "uart3_txd_pw6",
-};
-
-static const char * const uartd_groups[] = {
- "gmi_a16_pj7",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_a19_pk7",
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
-};
-
-static const char * const uarte_groups[] = {
- "sdmmc1_dat0_py7",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat3_py4",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
-};
-
-static const char * const ulpi_groups[] = {
- "ulpi_clk_py0",
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
-};
-
-static const char * const vgp1_groups[] = {
- "cam_i2c_scl_pbb1",
-};
-
-static const char * const vgp2_groups[] = {
- "cam_i2c_sda_pbb2",
-};
-
-static const char * const vgp3_groups[] = {
- "pbb3",
- "sdmmc4_dat5_paa5",
-};
-
-static const char * const vgp4_groups[] = {
- "pbb4",
- "sdmmc4_dat6_paa6",
-};
-
-static const char * const vgp5_groups[] = {
- "pbb5",
- "sdmmc4_dat7_paa7",
-};
-
-static const char * const vgp6_groups[] = {
- "pbb6",
- "sdmmc4_rst_n_pcc3",
-};
-
-static const char * const vi_groups[] = {
- "cam_mclk_pcc0",
- "vi_d0_pt4",
- "vi_d1_pd5",
- "vi_d10_pt2",
- "vi_d11_pt3",
- "vi_d2_pl0",
- "vi_d3_pl1",
- "vi_d4_pl2",
- "vi_d5_pl3",
- "vi_d6_pl4",
- "vi_d7_pl5",
- "vi_d8_pl6",
- "vi_d9_pl7",
- "vi_hsync_pd7",
- "vi_mclk_pt1",
- "vi_pclk_pt0",
- "vi_vsync_pd6",
-};
-
-static const char * const vi_alt1_groups[] = {
- "cam_mclk_pcc0",
- "vi_mclk_pt1",
-};
-
-static const char * const vi_alt2_groups[] = {
- "vi_mclk_pt1",
-};
-
-static const char * const vi_alt3_groups[] = {
- "cam_mclk_pcc0",
- "vi_mclk_pt1",
-};
#define FUNCTION(fname) \
{ \
.name = #fname, \
- .groups = fname##_groups, \
- .ngroups = ARRAY_SIZE(fname##_groups), \
}
-static const struct tegra_function tegra30_functions[] = {
+static struct tegra_function tegra30_functions[] = {
FUNCTION(blink),
FUNCTION(cec),
FUNCTION(clk_12m_out),
@@ -3345,11 +2105,11 @@ static const struct tegra_function tegra30_functions[] = {
FUNCTION(vi_alt3),
};
-#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
-#define PINGROUP_REG_A 0x3000 /* bank 1 */
+#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
+#define PINGROUP_REG_A 0x3000 /* bank 1 */
-#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
-#define PINGROUP_REG_N(r) -1
+#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
+#define PINGROUP_REG_N(r) -1
#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior) \
{ \
@@ -3357,12 +2117,12 @@ static const struct tegra_function tegra30_functions[] = {
.pins = pg_name##_pins, \
.npins = ARRAY_SIZE(pg_name##_pins), \
.funcs = { \
- TEGRA_MUX_ ## f0, \
- TEGRA_MUX_ ## f1, \
- TEGRA_MUX_ ## f2, \
- TEGRA_MUX_ ## f3, \
+ TEGRA_MUX_##f0, \
+ TEGRA_MUX_##f1, \
+ TEGRA_MUX_##f2, \
+ TEGRA_MUX_##f3, \
}, \
- .func_safe = TEGRA_MUX_ ## f_safe, \
+ .func_safe = TEGRA_MUX_##f_safe, \
.mux_reg = PINGROUP_REG_Y(r), \
.mux_bank = 1, \
.mux_bit = 0, \
@@ -3389,6 +2149,9 @@ static const struct tegra_function tegra30_functions[] = {
.drvtype_reg = -1, \
}
+#define DRV_PINGROUP_REG_Y(r) ((r) - DRV_PINGROUP_REG_A)
+#define DRV_PINGROUP_REG_N(r) -1
+
#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
drvdn_b, drvdn_w, drvup_b, drvup_w, \
slwr_b, slwr_w, slwf_b, slwf_w) \
@@ -3404,7 +2167,7 @@ static const struct tegra_function tegra30_functions[] = {
.lock_reg = -1, \
.ioreset_reg = -1, \
.rcv_sel_reg = -1, \
- .drv_reg = ((r) - DRV_PINGROUP_REG_A), \
+ .drv_reg = DRV_PINGROUP_REG_Y(r), \
.drv_bank = 0, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
@@ -3422,7 +2185,6 @@ static const struct tegra_function tegra30_functions[] = {
static const struct tegra_pingroup tegra30_groups[] = {
/* pg_name, f0, f1, f2, f3, safe, r, od, ior */
- /* FIXME: Fill in correct data in safe column */
PINGROUP(clk_32k_out_pa0, BLINK, RSVD2, RSVD3, RSVD4, RSVD4, 0x331c, N, N),
PINGROUP(uart3_cts_n_pa1, UARTC, RSVD2, GMI, RSVD4, RSVD4, 0x317c, N, N),
PINGROUP(dap2_fs_pa2, I2S1, HDA, RSVD3, GMI, RSVD3, 0x3358, N, N),
@@ -3735,6 +2497,7 @@ static struct of_device_id tegra30_pinctrl_of_match[] = {
{ .compatible = "nvidia,tegra30-pinmux", },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra30_pinctrl_of_match);
static struct platform_driver tegra30_pinctrl_driver = {
.driver = {
@@ -3745,20 +2508,8 @@ static struct platform_driver tegra30_pinctrl_driver = {
.probe = tegra30_pinctrl_probe,
.remove = tegra_pinctrl_remove,
};
-
-static int __init tegra30_pinctrl_init(void)
-{
- return platform_driver_register(&tegra30_pinctrl_driver);
-}
-arch_initcall(tegra30_pinctrl_init);
-
-static void __exit tegra30_pinctrl_exit(void)
-{
- platform_driver_unregister(&tegra30_pinctrl_driver);
-}
-module_exit(tegra30_pinctrl_exit);
+module_platform_driver(tegra30_pinctrl_driver);
MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra30 pinctrl driver");
MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, tegra30_pinctrl_of_match);
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index c381ae63c508..48093719167a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -2260,6 +2260,42 @@ static const unsigned int msiof0_tx_pins[] = {
static const unsigned int msiof0_tx_mux[] = {
MSIOF0_TXD_MARK,
};
+
+static const unsigned int msiof0_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof0_clk_b_mux[] = {
+ MSIOF0_SCK_B_MARK,
+};
+static const unsigned int msiof0_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 12),
+};
+static const unsigned int msiof0_ss1_b_mux[] = {
+ MSIOF0_SS1_B_MARK,
+};
+static const unsigned int msiof0_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof0_ss2_b_mux[] = {
+ MSIOF0_SS2_B_MARK,
+};
+static const unsigned int msiof0_rx_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 29),
+};
+static const unsigned int msiof0_rx_b_mux[] = {
+ MSIOF0_RXD_B_MARK,
+};
+static const unsigned int msiof0_tx_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 28),
+};
+static const unsigned int msiof0_tx_b_mux[] = {
+ MSIOF0_TXD_B_MARK,
+};
/* - MSIOF1 ----------------------------------------------------------------- */
static const unsigned int msiof1_clk_pins[] = {
/* SCK */
@@ -2303,6 +2339,42 @@ static const unsigned int msiof1_tx_pins[] = {
static const unsigned int msiof1_tx_mux[] = {
MSIOF1_TXD_MARK,
};
+
+static const unsigned int msiof1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 16),
+};
+static const unsigned int msiof1_clk_b_mux[] = {
+ MSIOF1_SCK_B_MARK,
+};
+static const unsigned int msiof1_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int msiof1_ss1_b_mux[] = {
+ MSIOF1_SS1_B_MARK,
+};
+static const unsigned int msiof1_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 19),
+};
+static const unsigned int msiof1_ss2_b_mux[] = {
+ MSIOF1_SS2_B_MARK,
+};
+static const unsigned int msiof1_rx_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 17),
+};
+static const unsigned int msiof1_rx_b_mux[] = {
+ MSIOF1_RXD_B_MARK,
+};
+static const unsigned int msiof1_tx_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 20),
+};
+static const unsigned int msiof1_tx_b_mux[] = {
+ MSIOF1_TXD_B_MARK,
+};
/* - MSIOF2 ----------------------------------------------------------------- */
static const unsigned int msiof2_clk_pins[] = {
/* SCK */
@@ -2389,6 +2461,58 @@ static const unsigned int msiof3_tx_pins[] = {
static const unsigned int msiof3_tx_mux[] = {
MSIOF3_TXD_MARK,
};
+
+static const unsigned int msiof3_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof3_clk_b_mux[] = {
+ MSIOF3_SCK_B_MARK,
+};
+static const unsigned int msiof3_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_sync_b_mux[] = {
+ MSIOF3_SYNC_B_MARK,
+};
+static const unsigned int msiof3_rx_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_rx_b_mux[] = {
+ MSIOF3_RXD_B_MARK,
+};
+static const unsigned int msiof3_tx_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_tx_b_mux[] = {
+ MSIOF3_TXD_B_MARK,
+};
+/* - QSPI ------------------------------------------------------------------- */
+static const unsigned int qspi_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int qspi_ctrl_mux[] = {
+ SPCLK_MARK, SSL_MARK,
+};
+static const unsigned int qspi_data2_pins[] = {
+ /* MOSI_IO0, MISO_IO1 */
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int qspi_data2_mux[] = {
+ MOSI_IO0_MARK, MISO_IO1_MARK,
+};
+static const unsigned int qspi_data4_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int qspi_data4_mux[] = {
+ MOSI_IO0_MARK, MISO_IO1_MARK, IO2_MARK, IO3_MARK,
+};
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
/* RX, TX */
@@ -3231,6 +3355,13 @@ static const unsigned int usb0_pins[] = {
static const unsigned int usb0_mux[] = {
USB0_PWEN_MARK, USB0_OVC_VBUS_MARK,
};
+static const unsigned int usb0_ovc_vbus_pins[] = {
+ /* OVC/VBUS */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int usb0_ovc_vbus_mux[] = {
+ USB0_OVC_VBUS_MARK,
+};
/* - USB1 ------------------------------------------------------------------- */
static const unsigned int usb1_pins[] = {
/* PWEN, OVC */
@@ -3653,12 +3784,22 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(msiof0_ss2),
SH_PFC_PIN_GROUP(msiof0_rx),
SH_PFC_PIN_GROUP(msiof0_tx),
+ SH_PFC_PIN_GROUP(msiof0_clk_b),
+ SH_PFC_PIN_GROUP(msiof0_ss1_b),
+ SH_PFC_PIN_GROUP(msiof0_ss2_b),
+ SH_PFC_PIN_GROUP(msiof0_rx_b),
+ SH_PFC_PIN_GROUP(msiof0_tx_b),
SH_PFC_PIN_GROUP(msiof1_clk),
SH_PFC_PIN_GROUP(msiof1_sync),
SH_PFC_PIN_GROUP(msiof1_ss1),
SH_PFC_PIN_GROUP(msiof1_ss2),
SH_PFC_PIN_GROUP(msiof1_rx),
SH_PFC_PIN_GROUP(msiof1_tx),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_rx_b),
+ SH_PFC_PIN_GROUP(msiof1_tx_b),
SH_PFC_PIN_GROUP(msiof2_clk),
SH_PFC_PIN_GROUP(msiof2_sync),
SH_PFC_PIN_GROUP(msiof2_ss1),
@@ -3671,6 +3812,13 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(msiof3_ss2),
SH_PFC_PIN_GROUP(msiof3_rx),
SH_PFC_PIN_GROUP(msiof3_tx),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_rx_b),
+ SH_PFC_PIN_GROUP(msiof3_tx_b),
+ SH_PFC_PIN_GROUP(qspi_ctrl),
+ SH_PFC_PIN_GROUP(qspi_data2),
+ SH_PFC_PIN_GROUP(qspi_data4),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -3789,6 +3937,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(tpu0_to2),
SH_PFC_PIN_GROUP(tpu0_to3),
SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb0_ovc_vbus),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb2),
VIN_DATA_PIN_GROUP(vin0_data, 24),
@@ -3941,6 +4090,11 @@ static const char * const msiof0_groups[] = {
"msiof0_ss2",
"msiof0_rx",
"msiof0_tx",
+ "msiof0_clk_b",
+ "msiof0_ss1_b",
+ "msiof0_ss2_b",
+ "msiof0_rx_b",
+ "msiof0_tx_b",
};
static const char * const msiof1_groups[] = {
@@ -3950,6 +4104,11 @@ static const char * const msiof1_groups[] = {
"msiof1_ss2",
"msiof1_rx",
"msiof1_tx",
+ "msiof1_clk_b",
+ "msiof1_ss1_b",
+ "msiof1_ss2_b",
+ "msiof1_rx_b",
+ "msiof1_tx_b",
};
static const char * const msiof2_groups[] = {
@@ -3968,6 +4127,16 @@ static const char * const msiof3_groups[] = {
"msiof3_ss2",
"msiof3_rx",
"msiof3_tx",
+ "msiof3_clk_b",
+ "msiof3_sync_b",
+ "msiof3_rx_b",
+ "msiof3_tx_b",
+};
+
+static const char * const qspi_groups[] = {
+ "qspi_ctrl",
+ "qspi_data2",
+ "qspi_data4",
};
static const char * const scif0_groups[] = {
@@ -4134,6 +4303,7 @@ static const char * const tpu0_groups[] = {
static const char * const usb0_groups[] = {
"usb0",
+ "usb0_ovc_vbus",
};
static const char * const usb1_groups[] = {
@@ -4213,6 +4383,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(msiof2),
SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(qspi),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 567d6918d50b..5186d70c49d4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -1945,6 +1945,50 @@ static const unsigned int i2c4_c_pins[] = {
static const unsigned int i2c4_c_mux[] = {
SCL4_C_MARK, SDA4_C_MARK,
};
+/* - I2C7 ------------------------------------------------------------------- */
+static const unsigned int i2c7_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int i2c7_mux[] = {
+ SCL7_MARK, SDA7_MARK,
+};
+static const unsigned int i2c7_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+};
+static const unsigned int i2c7_b_mux[] = {
+ SCL7_B_MARK, SDA7_B_MARK,
+};
+static const unsigned int i2c7_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int i2c7_c_mux[] = {
+ SCL7_C_MARK, SDA7_C_MARK,
+};
+/* - I2C8 ------------------------------------------------------------------- */
+static const unsigned int i2c8_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+};
+static const unsigned int i2c8_mux[] = {
+ SCL8_MARK, SDA8_MARK,
+};
+static const unsigned int i2c8_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
+};
+static const unsigned int i2c8_b_mux[] = {
+ SCL8_B_MARK, SDA8_B_MARK,
+};
+static const unsigned int i2c8_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(6, 22), RCAR_GP_PIN(6, 23),
+};
+static const unsigned int i2c8_c_mux[] = {
+ SCL8_C_MARK, SDA8_C_MARK,
+};
/* - INTC ------------------------------------------------------------------- */
static const unsigned int intc_irq0_pins[] = {
/* IRQ */
@@ -2051,6 +2095,92 @@ static const unsigned int msiof0_tx_pins[] = {
static const unsigned int msiof0_tx_mux[] = {
MSIOF0_TXD_MARK,
};
+
+static const unsigned int msiof0_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 16),
+};
+static const unsigned int msiof0_clk_b_mux[] = {
+ MSIOF0_SCK_B_MARK,
+};
+static const unsigned int msiof0_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 17),
+};
+static const unsigned int msiof0_sync_b_mux[] = {
+ MSIOF0_SYNC_B_MARK,
+};
+static const unsigned int msiof0_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int msiof0_ss1_b_mux[] = {
+ MSIOF0_SS1_B_MARK,
+};
+static const unsigned int msiof0_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 19),
+};
+static const unsigned int msiof0_ss2_b_mux[] = {
+ MSIOF0_SS2_B_MARK,
+};
+static const unsigned int msiof0_rx_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 21),
+};
+static const unsigned int msiof0_rx_b_mux[] = {
+ MSIOF0_RXD_B_MARK,
+};
+static const unsigned int msiof0_tx_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 20),
+};
+static const unsigned int msiof0_tx_b_mux[] = {
+ MSIOF0_TXD_B_MARK,
+};
+
+static const unsigned int msiof0_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 26),
+};
+static const unsigned int msiof0_clk_c_mux[] = {
+ MSIOF0_SCK_C_MARK,
+};
+static const unsigned int msiof0_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 25),
+};
+static const unsigned int msiof0_sync_c_mux[] = {
+ MSIOF0_SYNC_C_MARK,
+};
+static const unsigned int msiof0_ss1_c_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 27),
+};
+static const unsigned int msiof0_ss1_c_mux[] = {
+ MSIOF0_SS1_C_MARK,
+};
+static const unsigned int msiof0_ss2_c_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 28),
+};
+static const unsigned int msiof0_ss2_c_mux[] = {
+ MSIOF0_SS2_C_MARK,
+};
+static const unsigned int msiof0_rx_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 29),
+};
+static const unsigned int msiof0_rx_c_mux[] = {
+ MSIOF0_RXD_C_MARK,
+};
+static const unsigned int msiof0_tx_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 30),
+};
+static const unsigned int msiof0_tx_c_mux[] = {
+ MSIOF0_TXD_C_MARK,
+};
/* - MSIOF1 ----------------------------------------------------------------- */
static const unsigned int msiof1_clk_pins[] = {
/* SCK */
@@ -2094,6 +2224,143 @@ static const unsigned int msiof1_tx_pins[] = {
static const unsigned int msiof1_tx_mux[] = {
MSIOF1_TXD_MARK,
};
+
+static const unsigned int msiof1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 29),
+};
+static const unsigned int msiof1_clk_b_mux[] = {
+ MSIOF1_SCK_B_MARK,
+};
+static const unsigned int msiof1_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 30),
+};
+static const unsigned int msiof1_sync_b_mux[] = {
+ MSIOF1_SYNC_B_MARK,
+};
+static const unsigned int msiof1_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(2, 31),
+};
+static const unsigned int msiof1_ss1_b_mux[] = {
+ MSIOF1_SS1_B_MARK,
+};
+static const unsigned int msiof1_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(7, 16),
+};
+static const unsigned int msiof1_ss2_b_mux[] = {
+ MSIOF1_SS2_B_MARK,
+};
+static const unsigned int msiof1_rx_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(7, 18),
+};
+static const unsigned int msiof1_rx_b_mux[] = {
+ MSIOF1_RXD_B_MARK,
+};
+static const unsigned int msiof1_tx_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(7, 17),
+};
+static const unsigned int msiof1_tx_b_mux[] = {
+ MSIOF1_TXD_B_MARK,
+};
+
+static const unsigned int msiof1_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int msiof1_clk_c_mux[] = {
+ MSIOF1_SCK_C_MARK,
+};
+static const unsigned int msiof1_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 16),
+};
+static const unsigned int msiof1_sync_c_mux[] = {
+ MSIOF1_SYNC_C_MARK,
+};
+static const unsigned int msiof1_rx_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 18),
+};
+static const unsigned int msiof1_rx_c_mux[] = {
+ MSIOF1_RXD_C_MARK,
+};
+static const unsigned int msiof1_tx_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 17),
+};
+static const unsigned int msiof1_tx_c_mux[] = {
+ MSIOF1_TXD_C_MARK,
+};
+
+static const unsigned int msiof1_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 28),
+};
+static const unsigned int msiof1_clk_d_mux[] = {
+ MSIOF1_SCK_D_MARK,
+};
+static const unsigned int msiof1_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 30),
+};
+static const unsigned int msiof1_sync_d_mux[] = {
+ MSIOF1_SYNC_D_MARK,
+};
+static const unsigned int msiof1_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 29),
+};
+static const unsigned int msiof1_ss1_d_mux[] = {
+ MSIOF1_SS1_D_MARK,
+};
+static const unsigned int msiof1_rx_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 27),
+};
+static const unsigned int msiof1_rx_d_mux[] = {
+ MSIOF1_RXD_D_MARK,
+};
+static const unsigned int msiof1_tx_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 26),
+};
+static const unsigned int msiof1_tx_d_mux[] = {
+ MSIOF1_TXD_D_MARK,
+};
+
+static const unsigned int msiof1_clk_e_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int msiof1_clk_e_mux[] = {
+ MSIOF1_SCK_E_MARK,
+};
+static const unsigned int msiof1_sync_e_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int msiof1_sync_e_mux[] = {
+ MSIOF1_SYNC_E_MARK,
+};
+static const unsigned int msiof1_rx_e_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 17),
+};
+static const unsigned int msiof1_rx_e_mux[] = {
+ MSIOF1_RXD_E_MARK,
+};
+static const unsigned int msiof1_tx_e_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int msiof1_tx_e_mux[] = {
+ MSIOF1_TXD_E_MARK,
+};
/* - MSIOF2 ----------------------------------------------------------------- */
static const unsigned int msiof2_clk_pins[] = {
/* SCK */
@@ -2137,6 +2404,197 @@ static const unsigned int msiof2_tx_pins[] = {
static const unsigned int msiof2_tx_mux[] = {
MSIOF2_TXD_MARK,
};
+
+static const unsigned int msiof2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 0),
+};
+static const unsigned int msiof2_clk_b_mux[] = {
+ MSIOF2_SCK_B_MARK,
+};
+static const unsigned int msiof2_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(3, 1),
+};
+static const unsigned int msiof2_sync_b_mux[] = {
+ MSIOF2_SYNC_B_MARK,
+};
+static const unsigned int msiof2_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(3, 8),
+};
+static const unsigned int msiof2_ss1_b_mux[] = {
+ MSIOF2_SS1_B_MARK,
+};
+static const unsigned int msiof2_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(3, 9),
+};
+static const unsigned int msiof2_ss2_b_mux[] = {
+ MSIOF2_SS2_B_MARK,
+};
+static const unsigned int msiof2_rx_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(3, 17),
+};
+static const unsigned int msiof2_rx_b_mux[] = {
+ MSIOF2_RXD_B_MARK,
+};
+static const unsigned int msiof2_tx_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(3, 16),
+};
+static const unsigned int msiof2_tx_b_mux[] = {
+ MSIOF2_TXD_B_MARK,
+};
+
+static const unsigned int msiof2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int msiof2_clk_c_mux[] = {
+ MSIOF2_SCK_C_MARK,
+};
+static const unsigned int msiof2_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int msiof2_sync_c_mux[] = {
+ MSIOF2_SYNC_C_MARK,
+};
+static const unsigned int msiof2_rx_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int msiof2_rx_c_mux[] = {
+ MSIOF2_RXD_C_MARK,
+};
+static const unsigned int msiof2_tx_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int msiof2_tx_c_mux[] = {
+ MSIOF2_TXD_C_MARK,
+};
+
+static const unsigned int msiof2_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int msiof2_clk_d_mux[] = {
+ MSIOF2_SCK_D_MARK,
+};
+static const unsigned int msiof2_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int msiof2_sync_d_mux[] = {
+ MSIOF2_SYNC_D_MARK,
+};
+static const unsigned int msiof2_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(2, 17),
+};
+static const unsigned int msiof2_ss1_d_mux[] = {
+ MSIOF2_SS1_D_MARK,
+};
+static const unsigned int msiof2_ss2_d_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(2, 19),
+};
+static const unsigned int msiof2_ss2_d_mux[] = {
+ MSIOF2_SS2_D_MARK,
+};
+static const unsigned int msiof2_rx_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 18),
+};
+static const unsigned int msiof2_rx_d_mux[] = {
+ MSIOF2_RXD_D_MARK,
+};
+static const unsigned int msiof2_tx_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 16),
+};
+static const unsigned int msiof2_tx_d_mux[] = {
+ MSIOF2_TXD_D_MARK,
+};
+
+static const unsigned int msiof2_clk_e_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(7, 15),
+};
+static const unsigned int msiof2_clk_e_mux[] = {
+ MSIOF2_SCK_E_MARK,
+};
+static const unsigned int msiof2_sync_e_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(7, 16),
+};
+static const unsigned int msiof2_sync_e_mux[] = {
+ MSIOF2_SYNC_E_MARK,
+};
+static const unsigned int msiof2_rx_e_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(7, 14),
+};
+static const unsigned int msiof2_rx_e_mux[] = {
+ MSIOF2_RXD_E_MARK,
+};
+static const unsigned int msiof2_tx_e_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(7, 13),
+};
+static const unsigned int msiof2_tx_e_mux[] = {
+ MSIOF2_TXD_E_MARK,
+};
+/* - QSPI ------------------------------------------------------------------- */
+static const unsigned int qspi_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int qspi_ctrl_mux[] = {
+ SPCLK_MARK, SSL_MARK,
+};
+static const unsigned int qspi_data2_pins[] = {
+ /* MOSI_IO0, MISO_IO1 */
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int qspi_data2_mux[] = {
+ MOSI_IO0_MARK, MISO_IO1_MARK,
+};
+static const unsigned int qspi_data4_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int qspi_data4_mux[] = {
+ MOSI_IO0_MARK, MISO_IO1_MARK, IO2_MARK, IO3_MARK,
+};
+
+static const unsigned int qspi_ctrl_b_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 5),
+};
+static const unsigned int qspi_ctrl_b_mux[] = {
+ SPCLK_B_MARK, SSL_B_MARK,
+};
+static const unsigned int qspi_data2_b_pins[] = {
+ /* MOSI_IO0, MISO_IO1 */
+ RCAR_GP_PIN(6, 1), RCAR_GP_PIN(6, 2),
+};
+static const unsigned int qspi_data2_b_mux[] = {
+ MOSI_IO0_B_MARK, MISO_IO1_B_MARK,
+};
+static const unsigned int qspi_data4_b_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(6, 1), RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 3),
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int qspi_data4_b_mux[] = {
+ SPCLK_B_MARK, MOSI_IO0_B_MARK, MISO_IO1_B_MARK,
+ IO2_B_MARK, IO3_B_MARK, SSL_B_MARK,
+};
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
/* RX, TX */
@@ -3125,6 +3583,12 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(i2c4),
SH_PFC_PIN_GROUP(i2c4_b),
SH_PFC_PIN_GROUP(i2c4_c),
+ SH_PFC_PIN_GROUP(i2c7),
+ SH_PFC_PIN_GROUP(i2c7_b),
+ SH_PFC_PIN_GROUP(i2c7_c),
+ SH_PFC_PIN_GROUP(i2c8),
+ SH_PFC_PIN_GROUP(i2c8_b),
+ SH_PFC_PIN_GROUP(i2c8_c),
SH_PFC_PIN_GROUP(intc_irq0),
SH_PFC_PIN_GROUP(intc_irq1),
SH_PFC_PIN_GROUP(intc_irq2),
@@ -3139,18 +3603,75 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(msiof0_ss2),
SH_PFC_PIN_GROUP(msiof0_rx),
SH_PFC_PIN_GROUP(msiof0_tx),
+ SH_PFC_PIN_GROUP(msiof0_clk_b),
+ SH_PFC_PIN_GROUP(msiof0_sync_b),
+ SH_PFC_PIN_GROUP(msiof0_ss1_b),
+ SH_PFC_PIN_GROUP(msiof0_ss2_b),
+ SH_PFC_PIN_GROUP(msiof0_rx_b),
+ SH_PFC_PIN_GROUP(msiof0_tx_b),
+ SH_PFC_PIN_GROUP(msiof0_clk_c),
+ SH_PFC_PIN_GROUP(msiof0_sync_c),
+ SH_PFC_PIN_GROUP(msiof0_ss1_c),
+ SH_PFC_PIN_GROUP(msiof0_ss2_c),
+ SH_PFC_PIN_GROUP(msiof0_rx_c),
+ SH_PFC_PIN_GROUP(msiof0_tx_c),
SH_PFC_PIN_GROUP(msiof1_clk),
SH_PFC_PIN_GROUP(msiof1_sync),
SH_PFC_PIN_GROUP(msiof1_ss1),
SH_PFC_PIN_GROUP(msiof1_ss2),
SH_PFC_PIN_GROUP(msiof1_rx),
SH_PFC_PIN_GROUP(msiof1_tx),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_sync_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_rx_b),
+ SH_PFC_PIN_GROUP(msiof1_tx_b),
+ SH_PFC_PIN_GROUP(msiof1_clk_c),
+ SH_PFC_PIN_GROUP(msiof1_sync_c),
+ SH_PFC_PIN_GROUP(msiof1_rx_c),
+ SH_PFC_PIN_GROUP(msiof1_tx_c),
+ SH_PFC_PIN_GROUP(msiof1_clk_d),
+ SH_PFC_PIN_GROUP(msiof1_sync_d),
+ SH_PFC_PIN_GROUP(msiof1_ss1_d),
+ SH_PFC_PIN_GROUP(msiof1_rx_d),
+ SH_PFC_PIN_GROUP(msiof1_tx_d),
+ SH_PFC_PIN_GROUP(msiof1_clk_e),
+ SH_PFC_PIN_GROUP(msiof1_sync_e),
+ SH_PFC_PIN_GROUP(msiof1_rx_e),
+ SH_PFC_PIN_GROUP(msiof1_tx_e),
SH_PFC_PIN_GROUP(msiof2_clk),
SH_PFC_PIN_GROUP(msiof2_sync),
SH_PFC_PIN_GROUP(msiof2_ss1),
SH_PFC_PIN_GROUP(msiof2_ss2),
SH_PFC_PIN_GROUP(msiof2_rx),
SH_PFC_PIN_GROUP(msiof2_tx),
+ SH_PFC_PIN_GROUP(msiof2_clk_b),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1_b),
+ SH_PFC_PIN_GROUP(msiof2_ss2_b),
+ SH_PFC_PIN_GROUP(msiof2_rx_b),
+ SH_PFC_PIN_GROUP(msiof2_tx_b),
+ SH_PFC_PIN_GROUP(msiof2_clk_c),
+ SH_PFC_PIN_GROUP(msiof2_sync_c),
+ SH_PFC_PIN_GROUP(msiof2_rx_c),
+ SH_PFC_PIN_GROUP(msiof2_tx_c),
+ SH_PFC_PIN_GROUP(msiof2_clk_d),
+ SH_PFC_PIN_GROUP(msiof2_sync_d),
+ SH_PFC_PIN_GROUP(msiof2_ss1_d),
+ SH_PFC_PIN_GROUP(msiof2_ss2_d),
+ SH_PFC_PIN_GROUP(msiof2_rx_d),
+ SH_PFC_PIN_GROUP(msiof2_tx_d),
+ SH_PFC_PIN_GROUP(msiof2_clk_e),
+ SH_PFC_PIN_GROUP(msiof2_sync_e),
+ SH_PFC_PIN_GROUP(msiof2_rx_e),
+ SH_PFC_PIN_GROUP(msiof2_tx_e),
+ SH_PFC_PIN_GROUP(qspi_ctrl),
+ SH_PFC_PIN_GROUP(qspi_data2),
+ SH_PFC_PIN_GROUP(qspi_data4),
+ SH_PFC_PIN_GROUP(qspi_ctrl_b),
+ SH_PFC_PIN_GROUP(qspi_data2_b),
+ SH_PFC_PIN_GROUP(qspi_data4_b),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_data_b),
SH_PFC_PIN_GROUP(scif0_data_c),
@@ -3337,6 +3858,18 @@ static const char * const i2c4_groups[] = {
"i2c4_c",
};
+static const char * const i2c7_groups[] = {
+ "i2c7",
+ "i2c7_b",
+ "i2c7_c",
+};
+
+static const char * const i2c8_groups[] = {
+ "i2c8",
+ "i2c8_b",
+ "i2c8_c",
+};
+
static const char * const intc_groups[] = {
"intc_irq0",
"intc_irq1",
@@ -3358,6 +3891,18 @@ static const char * const msiof0_groups[] = {
"msiof0_ss2",
"msiof0_rx",
"msiof0_tx",
+ "msiof0_clk_b",
+ "msiof0_sync_b",
+ "msiof0_ss1_b",
+ "msiof0_ss2_b",
+ "msiof0_rx_b",
+ "msiof0_tx_b",
+ "msiof0_clk_c",
+ "msiof0_sync_c",
+ "msiof0_ss1_c",
+ "msiof0_ss2_c",
+ "msiof0_rx_c",
+ "msiof0_tx_c",
};
static const char * const msiof1_groups[] = {
@@ -3367,6 +3912,25 @@ static const char * const msiof1_groups[] = {
"msiof1_ss2",
"msiof1_rx",
"msiof1_tx",
+ "msiof1_clk_b",
+ "msiof1_sync_b",
+ "msiof1_ss1_b",
+ "msiof1_ss2_b",
+ "msiof1_rx_b",
+ "msiof1_tx_b",
+ "msiof1_clk_c",
+ "msiof1_sync_c",
+ "msiof1_rx_c",
+ "msiof1_tx_c",
+ "msiof1_clk_d",
+ "msiof1_sync_d",
+ "msiof1_ss1_d",
+ "msiof1_rx_d",
+ "msiof1_tx_d",
+ "msiof1_clk_e",
+ "msiof1_sync_e",
+ "msiof1_rx_e",
+ "msiof1_tx_e",
};
static const char * const msiof2_groups[] = {
@@ -3376,6 +3940,35 @@ static const char * const msiof2_groups[] = {
"msiof2_ss2",
"msiof2_rx",
"msiof2_tx",
+ "msiof2_clk_b",
+ "msiof2_sync_b",
+ "msiof2_ss1_b",
+ "msiof2_ss2_b",
+ "msiof2_rx_b",
+ "msiof2_tx_b",
+ "msiof2_clk_c",
+ "msiof2_sync_c",
+ "msiof2_rx_c",
+ "msiof2_tx_c",
+ "msiof2_clk_d",
+ "msiof2_sync_d",
+ "msiof2_ss1_d",
+ "msiof2_ss2_d",
+ "msiof2_rx_d",
+ "msiof2_tx_d",
+ "msiof2_clk_e",
+ "msiof2_sync_e",
+ "msiof2_rx_e",
+ "msiof2_tx_e",
+};
+
+static const char * const qspi_groups[] = {
+ "qspi_ctrl",
+ "qspi_data2",
+ "qspi_data4",
+ "qspi_ctrl_b",
+ "qspi_data2_b",
+ "qspi_data4_b",
};
static const char * const scif0_groups[] = {
@@ -3568,11 +4161,14 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c3),
SH_PFC_FUNCTION(i2c4),
+ SH_PFC_FUNCTION(i2c7),
+ SH_PFC_FUNCTION(i2c8),
SH_PFC_FUNCTION(intc),
SH_PFC_FUNCTION(mmc),
SH_PFC_FUNCTION(msiof0),
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(qspi),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c
index 2b9f32065920..c4dd3d5cf9c3 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas6.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas6.c
@@ -1,7 +1,8 @@
/*
* pinctrl pads, groups, functions for CSR SiRFatlasVI
*
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
*
* Licensed under GPLv2 or later.
*/
@@ -529,6 +530,40 @@ static const struct sirfsoc_padmux usp0_padmux = {
static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
+static const struct sirfsoc_muxmask usp0_only_utfs_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22),
+ },
+};
+
+static const struct sirfsoc_padmux usp0_only_utfs_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp0_only_utfs_muxmask),
+ .muxmask = usp0_only_utfs_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+ .funcmask = BIT(1) | BIT(2) | BIT(6),
+ .funcval = 0,
+};
+
+static const unsigned usp0_only_utfs_pins[] = { 51, 52, 53, 54 };
+
+static const struct sirfsoc_muxmask usp0_only_urfs_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(19) | BIT(20) | BIT(21) | BIT(23),
+ },
+};
+
+static const struct sirfsoc_padmux usp0_only_urfs_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp0_only_urfs_muxmask),
+ .muxmask = usp0_only_urfs_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+ .funcmask = BIT(1) | BIT(2) | BIT(9),
+ .funcval = 0,
+};
+
+static const unsigned usp0_only_urfs_pins[] = { 51, 52, 53, 55 };
+
static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = {
{
.group = 1,
@@ -905,6 +940,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
usp0_uart_nostreamctrl_pins),
+ SIRFSOC_PIN_GROUP("usp0_only_utfs_grp", usp0_only_utfs_pins),
+ SIRFSOC_PIN_GROUP("usp0_only_urfs_grp", usp0_only_urfs_pins),
SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
SIRFSOC_PIN_GROUP("usp1_uart_nostreamctrl_grp",
usp1_uart_nostreamctrl_pins),
@@ -953,6 +990,9 @@ static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
static const char * const usp0_uart_nostreamctrl_grp[] = {
"usp0_uart_nostreamctrl_grp" };
static const char * const usp0grp[] = { "usp0grp" };
+static const char * const usp0_only_utfs_grp[] = { "usp0_only_utfs_grp" };
+static const char * const usp0_only_urfs_grp[] = { "usp0_only_urfs_grp" };
+
static const char * const usp1grp[] = { "usp1grp" };
static const char * const usp1_uart_nostreamctrl_grp[] = {
"usp1_uart_nostreamctrl_grp" };
@@ -1003,6 +1043,10 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
usp0_uart_nostreamctrl_grp,
usp0_uart_nostreamctrl_padmux),
+ SIRFSOC_PMX_FUNCTION("usp0_only_utfs", usp0_only_utfs_grp,
+ usp0_only_utfs_padmux),
+ SIRFSOC_PMX_FUNCTION("usp0_only_urfs", usp0_only_urfs_grp,
+ usp0_only_urfs_padmux),
SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
SIRFSOC_PMX_FUNCTION("usp1_uart_nostreamctrl",
usp1_uart_nostreamctrl_grp,
diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c
index dde0285544d6..8aa76f0776d7 100644
--- a/drivers/pinctrl/sirf/pinctrl-prima2.c
+++ b/drivers/pinctrl/sirf/pinctrl-prima2.c
@@ -1,7 +1,8 @@
/*
* pinctrl pads, groups, functions for CSR SiRFprimaII
*
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
*
* Licensed under GPLv2 or later.
*/
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 617a4916b50f..76502aab2cb1 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -1,7 +1,8 @@
/*
* pinmux driver for CSR SiRFprimaII
*
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
*
* Licensed under GPLv2 or later.
*/
@@ -594,23 +595,23 @@ static int sirfsoc_gpio_irq_type(struct irq_data *d, unsigned type)
return 0;
}
-static unsigned int sirfsoc_gpio_irq_startup(struct irq_data *d)
+static int sirfsoc_gpio_irq_reqres(struct irq_data *d)
{
struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE))
+ if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE)) {
dev_err(bank->chip.gc.dev,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
- sirfsoc_gpio_irq_unmask(d);
+ return -EINVAL;
+ }
return 0;
}
-static void sirfsoc_gpio_irq_shutdown(struct irq_data *d)
+static void sirfsoc_gpio_irq_relres(struct irq_data *d)
{
struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- sirfsoc_gpio_irq_mask(d);
gpio_unlock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE);
}
@@ -620,8 +621,8 @@ static struct irq_chip sirfsoc_irq_chip = {
.irq_mask = sirfsoc_gpio_irq_mask,
.irq_unmask = sirfsoc_gpio_irq_unmask,
.irq_set_type = sirfsoc_gpio_irq_type,
- .irq_startup = sirfsoc_gpio_irq_startup,
- .irq_shutdown = sirfsoc_gpio_irq_shutdown,
+ .irq_request_resources = sirfsoc_gpio_irq_reqres,
+ .irq_release_resources = sirfsoc_gpio_irq_relres,
};
static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 5ae65c11d544..27df2c533b09 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -27,8 +27,6 @@ config ACER_WMI
depends on ACPI_WMI
select INPUT_SPARSEKMAP
# Acer WMI depends on ACPI_VIDEO when ACPI is enabled
- # but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select VIDEO_OUTPUT_CONTROL if ACPI
select ACPI_VIDEO if ACPI
---help---
This is a driver for newer Acer (and Wistron) laptops. It adds
@@ -55,6 +53,18 @@ config ACERHDF
If you have an Acer Aspire One netbook, say Y or M
here.
+config ALIENWARE_WMI
+ tristate "Alienware Special feature control"
+ depends on ACPI
+ depends on LEDS_CLASS
+ depends on NEW_LEDS
+ depends on ACPI_WMI
+ ---help---
+ This is a driver for controlling Alienware BIOS driven
+ features. It exposes an interface for controlling the AlienFX
+ zones on Alienware machines that don't contain a dedicated AlienFX
+ USB MCU such as the X51 and X51-R2.
+
config ASUS_LAPTOP
tristate "Asus Laptop Extras"
depends on ACPI
@@ -198,7 +208,7 @@ config HP_ACCEL
be called hp_accel.
config HP_WIRELESS
- tristate "HP WIRELESS"
+ tristate "HP wireless button"
depends on ACPI
depends on INPUT
help
@@ -819,12 +829,4 @@ config PVPANIC
a paravirtualized device provided by QEMU; it lets a virtual machine
(guest) communicate panic events to the host.
-config INTEL_BAYTRAIL_MBI
- tristate
- depends on PCI
- ---help---
- Needed on Baytrail platforms for access to the IOSF Sideband Mailbox
- Interface. This is a requirement for systems that need to configure
- the PUNIT for power management features such as RAPL.
-
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 9b87cfc42b84..1a2eafc9d48e 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -55,4 +55,4 @@ obj-$(CONFIG_INTEL_RST) += intel-rst.o
obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
obj-$(CONFIG_PVPANIC) += pvpanic.o
-obj-$(CONFIG_INTEL_BAYTRAIL_MBI) += intel_baytrail.o
+obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
new file mode 100644
index 000000000000..541f9514f76f
--- /dev/null
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -0,0 +1,565 @@
+/*
+ * Alienware AlienFX control
+ *
+ * Copyright (C) 2014 Dell Inc <mario_limonciello@dell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/leds.h>
+
+#define LEGACY_CONTROL_GUID "A90597CE-A997-11DA-B012-B622A1EF5492"
+#define LEGACY_POWER_CONTROL_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
+#define WMAX_CONTROL_GUID "A70591CE-A997-11DA-B012-B622A1EF5492"
+
+#define WMAX_METHOD_HDMI_SOURCE 0x1
+#define WMAX_METHOD_HDMI_STATUS 0x2
+#define WMAX_METHOD_BRIGHTNESS 0x3
+#define WMAX_METHOD_ZONE_CONTROL 0x4
+
+MODULE_AUTHOR("Mario Limonciello <mario_limonciello@dell.com>");
+MODULE_DESCRIPTION("Alienware special feature control");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("wmi:" LEGACY_CONTROL_GUID);
+MODULE_ALIAS("wmi:" WMAX_CONTROL_GUID);
+
+enum INTERFACE_FLAGS {
+ LEGACY,
+ WMAX,
+};
+
+enum LEGACY_CONTROL_STATES {
+ LEGACY_RUNNING = 1,
+ LEGACY_BOOTING = 0,
+ LEGACY_SUSPEND = 3,
+};
+
+enum WMAX_CONTROL_STATES {
+ WMAX_RUNNING = 0xFF,
+ WMAX_BOOTING = 0,
+ WMAX_SUSPEND = 3,
+};
+
+struct quirk_entry {
+ u8 num_zones;
+};
+
+static struct quirk_entry *quirks;
+
+static struct quirk_entry quirk_unknown = {
+ .num_zones = 2,
+};
+
+static struct quirk_entry quirk_x51_family = {
+ .num_zones = 3,
+};
+
+static int dmi_matched(const struct dmi_system_id *dmi)
+{
+ quirks = dmi->driver_data;
+ return 1;
+}
+
+static struct dmi_system_id alienware_quirks[] = {
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware X51 R1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
+ },
+ .driver_data = &quirk_x51_family,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware X51 R2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R2"),
+ },
+ .driver_data = &quirk_x51_family,
+ },
+ {}
+};
+
+struct color_platform {
+ u8 blue;
+ u8 green;
+ u8 red;
+} __packed;
+
+struct platform_zone {
+ u8 location;
+ struct device_attribute *attr;
+ struct color_platform colors;
+};
+
+struct wmax_brightness_args {
+ u32 led_mask;
+ u32 percentage;
+};
+
+struct hdmi_args {
+ u8 arg;
+};
+
+struct legacy_led_args {
+ struct color_platform colors;
+ u8 brightness;
+ u8 state;
+} __packed;
+
+struct wmax_led_args {
+ u32 led_mask;
+ struct color_platform colors;
+ u8 state;
+} __packed;
+
+static struct platform_device *platform_device;
+static struct device_attribute *zone_dev_attrs;
+static struct attribute **zone_attrs;
+static struct platform_zone *zone_data;
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "alienware-wmi",
+ .owner = THIS_MODULE,
+ }
+};
+
+static struct attribute_group zone_attribute_group = {
+ .name = "rgb_zones",
+};
+
+static u8 interface;
+static u8 lighting_control_state;
+static u8 global_brightness;
+
+/*
+ * Helpers used for zone control
+*/
+static int parse_rgb(const char *buf, struct platform_zone *zone)
+{
+ long unsigned int rgb;
+ int ret;
+ union color_union {
+ struct color_platform cp;
+ int package;
+ } repackager;
+
+ ret = kstrtoul(buf, 16, &rgb);
+ if (ret)
+ return ret;
+
+ /* RGB triplet notation is 24-bit hexadecimal */
+ if (rgb > 0xFFFFFF)
+ return -EINVAL;
+
+ repackager.package = rgb & 0x0f0f0f0f;
+ pr_debug("alienware-wmi: r: %d g:%d b: %d\n",
+ repackager.cp.red, repackager.cp.green, repackager.cp.blue);
+ zone->colors = repackager.cp;
+ return 0;
+}
+
+static struct platform_zone *match_zone(struct device_attribute *attr)
+{
+ int i;
+ for (i = 0; i < quirks->num_zones; i++) {
+ if ((struct device_attribute *)zone_data[i].attr == attr) {
+ pr_debug("alienware-wmi: matched zone location: %d\n",
+ zone_data[i].location);
+ return &zone_data[i];
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Individual RGB zone control
+*/
+static int alienware_update_led(struct platform_zone *zone)
+{
+ int method_id;
+ acpi_status status;
+ char *guid;
+ struct acpi_buffer input;
+ struct legacy_led_args legacy_args;
+ struct wmax_led_args wmax_args;
+ if (interface == WMAX) {
+ wmax_args.led_mask = 1 << zone->location;
+ wmax_args.colors = zone->colors;
+ wmax_args.state = lighting_control_state;
+ guid = WMAX_CONTROL_GUID;
+ method_id = WMAX_METHOD_ZONE_CONTROL;
+
+ input.length = (acpi_size) sizeof(wmax_args);
+ input.pointer = &wmax_args;
+ } else {
+ legacy_args.colors = zone->colors;
+ legacy_args.brightness = global_brightness;
+ legacy_args.state = 0;
+ if (lighting_control_state == LEGACY_BOOTING ||
+ lighting_control_state == LEGACY_SUSPEND) {
+ guid = LEGACY_POWER_CONTROL_GUID;
+ legacy_args.state = lighting_control_state;
+ } else
+ guid = LEGACY_CONTROL_GUID;
+ method_id = zone->location + 1;
+
+ input.length = (acpi_size) sizeof(legacy_args);
+ input.pointer = &legacy_args;
+ }
+ pr_debug("alienware-wmi: guid %s method %d\n", guid, method_id);
+
+ status = wmi_evaluate_method(guid, 1, method_id, &input, NULL);
+ if (ACPI_FAILURE(status))
+ pr_err("alienware-wmi: zone set failure: %u\n", status);
+ return ACPI_FAILURE(status);
+}
+
+static ssize_t zone_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_zone *target_zone;
+ target_zone = match_zone(attr);
+ if (target_zone == NULL)
+ return sprintf(buf, "red: -1, green: -1, blue: -1\n");
+ return sprintf(buf, "red: %d, green: %d, blue: %d\n",
+ target_zone->colors.red,
+ target_zone->colors.green, target_zone->colors.blue);
+
+}
+
+static ssize_t zone_set(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_zone *target_zone;
+ int ret;
+ target_zone = match_zone(attr);
+ if (target_zone == NULL) {
+ pr_err("alienware-wmi: invalid target zone\n");
+ return 1;
+ }
+ ret = parse_rgb(buf, target_zone);
+ if (ret)
+ return ret;
+ ret = alienware_update_led(target_zone);
+ return ret ? ret : count;
+}
+
+/*
+ * LED Brightness (Global)
+*/
+static int wmax_brightness(int brightness)
+{
+ acpi_status status;
+ struct acpi_buffer input;
+ struct wmax_brightness_args args = {
+ .led_mask = 0xFF,
+ .percentage = brightness,
+ };
+ input.length = (acpi_size) sizeof(args);
+ input.pointer = &args;
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+ WMAX_METHOD_BRIGHTNESS, &input, NULL);
+ if (ACPI_FAILURE(status))
+ pr_err("alienware-wmi: brightness set failure: %u\n", status);
+ return ACPI_FAILURE(status);
+}
+
+static void global_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int ret;
+ global_brightness = brightness;
+ if (interface == WMAX)
+ ret = wmax_brightness(brightness);
+ else
+ ret = alienware_update_led(&zone_data[0]);
+ if (ret)
+ pr_err("LED brightness update failed\n");
+}
+
+static enum led_brightness global_led_get(struct led_classdev *led_cdev)
+{
+ return global_brightness;
+}
+
+static struct led_classdev global_led = {
+ .brightness_set = global_led_set,
+ .brightness_get = global_led_get,
+ .name = "alienware::global_brightness",
+};
+
+/*
+ * Lighting control state device attribute (Global)
+*/
+static ssize_t show_control_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (lighting_control_state == LEGACY_BOOTING)
+ return scnprintf(buf, PAGE_SIZE, "[booting] running suspend\n");
+ else if (lighting_control_state == LEGACY_SUSPEND)
+ return scnprintf(buf, PAGE_SIZE, "booting running [suspend]\n");
+ return scnprintf(buf, PAGE_SIZE, "booting [running] suspend\n");
+}
+
+static ssize_t store_control_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ long unsigned int val;
+ if (strcmp(buf, "booting\n") == 0)
+ val = LEGACY_BOOTING;
+ else if (strcmp(buf, "suspend\n") == 0)
+ val = LEGACY_SUSPEND;
+ else if (interface == LEGACY)
+ val = LEGACY_RUNNING;
+ else
+ val = WMAX_RUNNING;
+ lighting_control_state = val;
+ pr_debug("alienware-wmi: updated control state to %d\n",
+ lighting_control_state);
+ return count;
+}
+
+static DEVICE_ATTR(lighting_control_state, 0644, show_control_state,
+ store_control_state);
+
+static int alienware_zone_init(struct platform_device *dev)
+{
+ int i;
+ char buffer[10];
+ char *name;
+
+ if (interface == WMAX) {
+ global_led.max_brightness = 100;
+ lighting_control_state = WMAX_RUNNING;
+ } else if (interface == LEGACY) {
+ global_led.max_brightness = 0x0F;
+ lighting_control_state = LEGACY_RUNNING;
+ }
+ global_brightness = global_led.max_brightness;
+
+ /*
+ * - zone_dev_attrs num_zones + 1 is for individual zones and then
+ * null terminated
+ * - zone_attrs num_zones + 2 is for all attrs in zone_dev_attrs +
+ * the lighting control + null terminated
+ * - zone_data num_zones is for the distinct zones
+ */
+ zone_dev_attrs =
+ kzalloc(sizeof(struct device_attribute) * (quirks->num_zones + 1),
+ GFP_KERNEL);
+ if (!zone_dev_attrs)
+ return -ENOMEM;
+
+ zone_attrs =
+ kzalloc(sizeof(struct attribute *) * (quirks->num_zones + 2),
+ GFP_KERNEL);
+ if (!zone_attrs)
+ return -ENOMEM;
+
+ zone_data =
+ kzalloc(sizeof(struct platform_zone) * (quirks->num_zones),
+ GFP_KERNEL);
+ if (!zone_data)
+ return -ENOMEM;
+
+ for (i = 0; i < quirks->num_zones; i++) {
+ sprintf(buffer, "zone%02X", i);
+ name = kstrdup(buffer, GFP_KERNEL);
+ if (name == NULL)
+ return 1;
+ sysfs_attr_init(&zone_dev_attrs[i].attr);
+ zone_dev_attrs[i].attr.name = name;
+ zone_dev_attrs[i].attr.mode = 0644;
+ zone_dev_attrs[i].show = zone_show;
+ zone_dev_attrs[i].store = zone_set;
+ zone_data[i].location = i;
+ zone_attrs[i] = &zone_dev_attrs[i].attr;
+ zone_data[i].attr = &zone_dev_attrs[i];
+ }
+ zone_attrs[quirks->num_zones] = &dev_attr_lighting_control_state.attr;
+ zone_attribute_group.attrs = zone_attrs;
+
+ led_classdev_register(&dev->dev, &global_led);
+
+ return sysfs_create_group(&dev->dev.kobj, &zone_attribute_group);
+}
+
+static void alienware_zone_exit(struct platform_device *dev)
+{
+ sysfs_remove_group(&dev->dev.kobj, &zone_attribute_group);
+ led_classdev_unregister(&global_led);
+ if (zone_dev_attrs) {
+ int i;
+ for (i = 0; i < quirks->num_zones; i++)
+ kfree(zone_dev_attrs[i].attr.name);
+ }
+ kfree(zone_dev_attrs);
+ kfree(zone_data);
+ kfree(zone_attrs);
+}
+
+/*
+ The HDMI mux sysfs node indicates the status of the HDMI input mux.
+ It can toggle between standard system GPU output and HDMI input.
+*/
+static ssize_t show_hdmi(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ acpi_status status;
+ struct acpi_buffer input;
+ union acpi_object *obj;
+ u32 tmp = 0;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct hdmi_args in_args = {
+ .arg = 0,
+ };
+ input.length = (acpi_size) sizeof(in_args);
+ input.pointer = &in_args;
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+ WMAX_METHOD_HDMI_STATUS, &input, &output);
+
+ if (ACPI_SUCCESS(status)) {
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ tmp = (u32) obj->integer.value;
+ if (tmp == 1)
+ return scnprintf(buf, PAGE_SIZE,
+ "[input] gpu unknown\n");
+ else if (tmp == 2)
+ return scnprintf(buf, PAGE_SIZE,
+ "input [gpu] unknown\n");
+ }
+ pr_err("alienware-wmi: unknown HDMI status: %d\n", status);
+ return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n");
+}
+
+static ssize_t toggle_hdmi(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_buffer input;
+ acpi_status status;
+ struct hdmi_args args;
+ if (strcmp(buf, "gpu\n") == 0)
+ args.arg = 1;
+ else if (strcmp(buf, "input\n") == 0)
+ args.arg = 2;
+ else
+ args.arg = 3;
+ pr_debug("alienware-wmi: setting hdmi to %d : %s", args.arg, buf);
+ input.length = (acpi_size) sizeof(args);
+ input.pointer = &args;
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+ WMAX_METHOD_HDMI_SOURCE, &input, NULL);
+ if (ACPI_FAILURE(status))
+ pr_err("alienware-wmi: HDMI toggle failed: results: %u\n",
+ status);
+ return count;
+}
+
+static DEVICE_ATTR(hdmi, S_IRUGO | S_IWUSR, show_hdmi, toggle_hdmi);
+
+static void remove_hdmi(struct platform_device *device)
+{
+ device_remove_file(&device->dev, &dev_attr_hdmi);
+}
+
+static int create_hdmi(void)
+{
+ int ret = -ENOMEM;
+ ret = device_create_file(&platform_device->dev, &dev_attr_hdmi);
+ if (ret)
+ goto error_create_hdmi;
+ return 0;
+
+error_create_hdmi:
+ remove_hdmi(platform_device);
+ return ret;
+}
+
+static int __init alienware_wmi_init(void)
+{
+ int ret;
+
+ if (wmi_has_guid(LEGACY_CONTROL_GUID))
+ interface = LEGACY;
+ else if (wmi_has_guid(WMAX_CONTROL_GUID))
+ interface = WMAX;
+ else {
+ pr_warn("alienware-wmi: No known WMI GUID found\n");
+ return -ENODEV;
+ }
+
+ dmi_check_system(alienware_quirks);
+ if (quirks == NULL)
+ quirks = &quirk_unknown;
+
+ ret = platform_driver_register(&platform_driver);
+ if (ret)
+ goto fail_platform_driver;
+ platform_device = platform_device_alloc("alienware-wmi", -1);
+ if (!platform_device) {
+ ret = -ENOMEM;
+ goto fail_platform_device1;
+ }
+ ret = platform_device_add(platform_device);
+ if (ret)
+ goto fail_platform_device2;
+
+ if (interface == WMAX) {
+ ret = create_hdmi();
+ if (ret)
+ goto fail_prep_hdmi;
+ }
+
+ ret = alienware_zone_init(platform_device);
+ if (ret)
+ goto fail_prep_zones;
+
+ return 0;
+
+fail_prep_zones:
+ alienware_zone_exit(platform_device);
+fail_prep_hdmi:
+ platform_device_del(platform_device);
+fail_platform_device2:
+ platform_device_put(platform_device);
+fail_platform_device1:
+ platform_driver_unregister(&platform_driver);
+fail_platform_driver:
+ return ret;
+}
+
+module_init(alienware_wmi_init);
+
+static void __exit alienware_wmi_exit(void)
+{
+ if (platform_device) {
+ alienware_zone_exit(platform_device);
+ remove_hdmi(platform_device);
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&platform_driver);
+ }
+}
+
+module_exit(alienware_wmi_exit);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index be02bcc346d3..e6f336270c21 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -66,7 +66,6 @@
#include <linux/backlight.h>
#include <linux/input.h>
#include <linux/kfifo.h>
-#include <linux/video_output.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index 570926c10014..c3784baceae3 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -71,6 +71,44 @@ static unsigned short keymap_Lifebook_Tseries[KEYMAP_LEN] __initdata = {
KEY_LEFTALT
};
+static unsigned short keymap_Lifebook_T901[KEYMAP_LEN] __initdata = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_SCROLLDOWN,
+ KEY_SCROLLUP,
+ KEY_CYCLEWINDOWS,
+ KEY_LEFTCTRL,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_LEFTMETA
+};
+
+static unsigned short keymap_Lifebook_T902[KEYMAP_LEN] __initdata = {
+ KEY_RESERVED,
+ KEY_VOLUMEDOWN,
+ KEY_VOLUMEUP,
+ KEY_CYCLEWINDOWS,
+ KEY_PROG1,
+ KEY_PROG2,
+ KEY_LEFTMETA,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+};
+
static unsigned short keymap_Lifebook_U810[KEYMAP_LEN] __initdata = {
KEY_RESERVED,
KEY_RESERVED,
@@ -302,6 +340,33 @@ static int fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
static const struct dmi_system_id dmi_ids[] __initconst = {
{
.callback = fujitsu_dmi_lifebook,
+ .ident = "Fujitsu Lifebook T901",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook T901")
+ },
+ .driver_data = keymap_Lifebook_T901
+ },
+ {
+ .callback = fujitsu_dmi_lifebook,
+ .ident = "Fujitsu Lifebook T901",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T901")
+ },
+ .driver_data = keymap_Lifebook_T901
+ },
+ {
+ .callback = fujitsu_dmi_lifebook,
+ .ident = "Fujitsu Lifebook T902",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T902")
+ },
+ .driver_data = keymap_Lifebook_T902
+ },
+ {
+ .callback = fujitsu_dmi_lifebook,
.ident = "Fujitsu Siemens P/T Series",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/platform/x86/intel_baytrail.c b/drivers/platform/x86/intel_baytrail.c
deleted file mode 100644
index f96626b17260..000000000000
--- a/drivers/platform/x86/intel_baytrail.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Baytrail IOSF-SB MailBox Interface Driver
- * Copyright (c) 2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *
- * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
- * mailbox interface (MBI) to communicate with mutiple devices. This
- * driver implements BayTrail-specific access to this interface.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-
-#include "intel_baytrail.h"
-
-static DEFINE_SPINLOCK(iosf_mbi_lock);
-
-static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
-{
- return (op << 24) | (port << 16) | (offset << 8) | BT_MBI_ENABLE;
-}
-
-static struct pci_dev *mbi_pdev; /* one mbi device */
-
-/* Hold lock before calling */
-static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
-{
- int result;
-
- if (!mbi_pdev)
- return -ENODEV;
-
- if (mcrx) {
- result = pci_write_config_dword(mbi_pdev,
- BT_MBI_MCRX_OFFSET, mcrx);
- if (result < 0)
- goto iosf_mbi_read_err;
- }
-
- result = pci_write_config_dword(mbi_pdev,
- BT_MBI_MCR_OFFSET, mcr);
- if (result < 0)
- goto iosf_mbi_read_err;
-
- result = pci_read_config_dword(mbi_pdev,
- BT_MBI_MDR_OFFSET, mdr);
- if (result < 0)
- goto iosf_mbi_read_err;
-
- return 0;
-
-iosf_mbi_read_err:
- dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
- result);
- return result;
-}
-
-/* Hold lock before calling */
-static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
-{
- int result;
-
- if (!mbi_pdev)
- return -ENODEV;
-
- result = pci_write_config_dword(mbi_pdev,
- BT_MBI_MDR_OFFSET, mdr);
- if (result < 0)
- goto iosf_mbi_write_err;
-
- if (mcrx) {
- result = pci_write_config_dword(mbi_pdev,
- BT_MBI_MCRX_OFFSET, mcrx);
- if (result < 0)
- goto iosf_mbi_write_err;
- }
-
- result = pci_write_config_dword(mbi_pdev,
- BT_MBI_MCR_OFFSET, mcr);
- if (result < 0)
- goto iosf_mbi_write_err;
-
- return 0;
-
-iosf_mbi_write_err:
- dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
- result);
- return result;
-}
-
-int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
-{
- u32 mcr, mcrx;
- unsigned long flags;
- int ret;
-
- /*Access to the GFX unit is handled by GPU code */
- BUG_ON(port == BT_MBI_UNIT_GFX);
-
- mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
- mcrx = offset & BT_MBI_MASK_HI;
-
- spin_lock_irqsave(&iosf_mbi_lock, flags);
- ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
- spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(bt_mbi_read);
-
-int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
-{
- u32 mcr, mcrx;
- unsigned long flags;
- int ret;
-
- /*Access to the GFX unit is handled by GPU code */
- BUG_ON(port == BT_MBI_UNIT_GFX);
-
- mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
- mcrx = offset & BT_MBI_MASK_HI;
-
- spin_lock_irqsave(&iosf_mbi_lock, flags);
- ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
- spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(bt_mbi_write);
-
-int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
-{
- u32 mcr, mcrx;
- u32 value;
- unsigned long flags;
- int ret;
-
- /*Access to the GFX unit is handled by GPU code */
- BUG_ON(port == BT_MBI_UNIT_GFX);
-
- mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
- mcrx = offset & BT_MBI_MASK_HI;
-
- spin_lock_irqsave(&iosf_mbi_lock, flags);
-
- /* Read current mdr value */
- ret = iosf_mbi_pci_read_mdr(mcrx, mcr & BT_MBI_RD_MASK, &value);
- if (ret < 0) {
- spin_unlock_irqrestore(&iosf_mbi_lock, flags);
- return ret;
- }
-
- /* Apply mask */
- value &= ~mask;
- mdr &= mask;
- value |= mdr;
-
- /* Write back */
- ret = iosf_mbi_pci_write_mdr(mcrx, mcr | BT_MBI_WR_MASK, value);
-
- spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(bt_mbi_modify);
-
-static int iosf_mbi_probe(struct pci_dev *pdev,
- const struct pci_device_id *unused)
-{
- int ret;
-
- ret = pci_enable_device(pdev);
- if (ret < 0) {
- dev_err(&pdev->dev, "error: could not enable device\n");
- return ret;
- }
-
- mbi_pdev = pci_dev_get(pdev);
- return 0;
-}
-
-static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
-
-static struct pci_driver iosf_mbi_pci_driver = {
- .name = "iosf_mbi_pci",
- .probe = iosf_mbi_probe,
- .id_table = iosf_mbi_pci_ids,
-};
-
-static int __init bt_mbi_init(void)
-{
- return pci_register_driver(&iosf_mbi_pci_driver);
-}
-
-static void __exit bt_mbi_exit(void)
-{
- pci_unregister_driver(&iosf_mbi_pci_driver);
- if (mbi_pdev) {
- pci_dev_put(mbi_pdev);
- mbi_pdev = NULL;
- }
-}
-
-module_init(bt_mbi_init);
-module_exit(bt_mbi_exit);
-
-MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
-MODULE_DESCRIPTION("BayTrail Mailbox Interface accessor");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_baytrail.h b/drivers/platform/x86/intel_baytrail.h
deleted file mode 100644
index 8bcc311262e9..000000000000
--- a/drivers/platform/x86/intel_baytrail.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * intel_baytrail.h: MailBox access support for Intel BayTrail platforms
- */
-
-#ifndef INTEL_BAYTRAIL_MBI_SYMS_H
-#define INTEL_BAYTRAIL_MBI_SYMS_H
-
-#define BT_MBI_MCR_OFFSET 0xD0
-#define BT_MBI_MDR_OFFSET 0xD4
-#define BT_MBI_MCRX_OFFSET 0xD8
-
-#define BT_MBI_RD_MASK 0xFEFFFFFF
-#define BT_MBI_WR_MASK 0X01000000
-
-#define BT_MBI_MASK_HI 0xFFFFFF00
-#define BT_MBI_MASK_LO 0x000000FF
-#define BT_MBI_ENABLE 0xF0
-
-/* BT-SB unit access methods */
-#define BT_MBI_UNIT_AUNIT 0x00
-#define BT_MBI_UNIT_SMC 0x01
-#define BT_MBI_UNIT_CPU 0x02
-#define BT_MBI_UNIT_BUNIT 0x03
-#define BT_MBI_UNIT_PMC 0x04
-#define BT_MBI_UNIT_GFX 0x06
-#define BT_MBI_UNIT_SMI 0x0C
-#define BT_MBI_UNIT_USB 0x43
-#define BT_MBI_UNIT_SATA 0xA3
-#define BT_MBI_UNIT_PCIE 0xA6
-
-/* Read/write opcodes */
-#define BT_MBI_AUNIT_READ 0x10
-#define BT_MBI_AUNIT_WRITE 0x11
-#define BT_MBI_SMC_READ 0x10
-#define BT_MBI_SMC_WRITE 0x11
-#define BT_MBI_CPU_READ 0x10
-#define BT_MBI_CPU_WRITE 0x11
-#define BT_MBI_BUNIT_READ 0x10
-#define BT_MBI_BUNIT_WRITE 0x11
-#define BT_MBI_PMC_READ 0x06
-#define BT_MBI_PMC_WRITE 0x07
-#define BT_MBI_GFX_READ 0x00
-#define BT_MBI_GFX_WRITE 0x01
-#define BT_MBI_SMIO_READ 0x06
-#define BT_MBI_SMIO_WRITE 0x07
-#define BT_MBI_USB_READ 0x06
-#define BT_MBI_USB_WRITE 0x07
-#define BT_MBI_SATA_READ 0x00
-#define BT_MBI_SATA_WRITE 0x01
-#define BT_MBI_PCIE_READ 0x00
-#define BT_MBI_PCIE_WRITE 0x01
-
-/**
- * bt_mbi_read() - MailBox Interface read command
- * @port: port indicating subunit being accessed
- * @opcode: port specific read or write opcode
- * @offset: register address offset
- * @mdr: register data to be read
- *
- * Locking is handled by spinlock - cannot sleep.
- * Return: Nonzero on error
- */
-int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr);
-
-/**
- * bt_mbi_write() - MailBox unmasked write command
- * @port: port indicating subunit being accessed
- * @opcode: port specific read or write opcode
- * @offset: register address offset
- * @mdr: register data to be written
- *
- * Locking is handled by spinlock - cannot sleep.
- * Return: Nonzero on error
- */
-int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
-
-/**
- * bt_mbi_modify() - MailBox masked write command
- * @port: port indicating subunit being accessed
- * @opcode: port specific read or write opcode
- * @offset: register address offset
- * @mdr: register data being modified
- * @mask: mask indicating bits in mdr to be modified
- *
- * Locking is handled by spinlock - cannot sleep.
- * Return: Nonzero on error
- */
-int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
-
-#endif /* INTEL_BAYTRAIL_MBI_SYMS_H */
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 609d38779b26..3f870972247c 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -449,6 +449,7 @@ static struct attribute_group pcc_attr_group = {
/* hotkey input device driver */
+static int sleep_keydown_seen;
static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
{
struct input_dev *hotk_input_dev = pcc->input_dev;
@@ -462,6 +463,16 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
"error getting hotkey status\n"));
return;
}
+
+ /* hack: some firmware sends no key down for sleep / hibernate */
+ if ((result & 0xf) == 0x7 || (result & 0xf) == 0xa) {
+ if (result & 0x80)
+ sleep_keydown_seen = 1;
+ if (!sleep_keydown_seen)
+ sparse_keymap_report_event(hotk_input_dev,
+ result & 0xf, 0x80, false);
+ }
+
if (!sparse_keymap_report_event(hotk_input_dev,
result & 0xf, result & 0x80, false))
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 8f8551a63cc0..9c5a07417b2b 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -76,8 +76,6 @@ do { \
pr_warn(fmt, ##__VA_ARGS__); \
} while (0)
-#define SONY_LAPTOP_DRIVER_VERSION "0.6"
-
#define SONY_NC_CLASS "sony-nc"
#define SONY_NC_HID "SNY5001"
#define SONY_NC_DRIVER_NAME "Sony Notebook Control Driver"
@@ -89,7 +87,6 @@ do { \
MODULE_AUTHOR("Stelian Pop, Mattia Dongili");
MODULE_DESCRIPTION("Sony laptop extras driver (SPIC and SNC ACPI device)");
MODULE_LICENSE("GPL");
-MODULE_VERSION(SONY_LAPTOP_DRIVER_VERSION);
static int debug;
module_param(debug, int, 0);
@@ -129,7 +126,8 @@ static int kbd_backlight = -1;
module_param(kbd_backlight, int, 0444);
MODULE_PARM_DESC(kbd_backlight,
"set this to 0 to disable keyboard backlight, "
- "1 to enable it (default: no change from current value)");
+ "1 to enable it with automatic control and 2 to have it always "
+ "on (default: no change from current value)");
static int kbd_backlight_timeout = -1;
module_param(kbd_backlight_timeout, int, 0444);
@@ -152,7 +150,8 @@ static void sony_nc_battery_care_cleanup(struct platform_device *pd);
static int sony_nc_thermal_setup(struct platform_device *pd);
static void sony_nc_thermal_cleanup(struct platform_device *pd);
-static int sony_nc_lid_resume_setup(struct platform_device *pd);
+static int sony_nc_lid_resume_setup(struct platform_device *pd,
+ unsigned int handle);
static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
static int sony_nc_gfx_switch_setup(struct platform_device *pd,
@@ -163,6 +162,21 @@ static int __sony_nc_gfx_switch_status_get(void);
static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
+static int sony_nc_lowbatt_setup(struct platform_device *pd);
+static void sony_nc_lowbatt_cleanup(struct platform_device *pd);
+
+static int sony_nc_fanspeed_setup(struct platform_device *pd);
+static void sony_nc_fanspeed_cleanup(struct platform_device *pd);
+
+static int sony_nc_usb_charge_setup(struct platform_device *pd);
+static void sony_nc_usb_charge_cleanup(struct platform_device *pd);
+
+static int sony_nc_panelid_setup(struct platform_device *pd);
+static void sony_nc_panelid_cleanup(struct platform_device *pd);
+
+static int sony_nc_smart_conn_setup(struct platform_device *pd);
+static void sony_nc_smart_conn_cleanup(struct platform_device *pd);
+
static int sony_nc_touchpad_setup(struct platform_device *pd,
unsigned int handle);
static void sony_nc_touchpad_cleanup(struct platform_device *pd);
@@ -1122,6 +1136,8 @@ static struct sony_nc_event sony_100_events[] = {
{ 0x25, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0xa6, SONYPI_EVENT_HELP_PRESSED },
{ 0x26, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0xa8, SONYPI_EVENT_FNKEY_1 },
+ { 0x28, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0, 0 },
};
@@ -1339,7 +1355,8 @@ static void sony_nc_function_setup(struct acpi_device *device,
result);
break;
case 0x0119:
- result = sony_nc_lid_resume_setup(pf_device);
+ case 0x015D:
+ result = sony_nc_lid_resume_setup(pf_device, handle);
if (result)
pr_err("couldn't set up lid resume function (%d)\n",
result);
@@ -1381,6 +1398,36 @@ static void sony_nc_function_setup(struct acpi_device *device,
pr_err("couldn't set up keyboard backlight function (%d)\n",
result);
break;
+ case 0x0121:
+ result = sony_nc_lowbatt_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up low battery function (%d)\n",
+ result);
+ break;
+ case 0x0149:
+ result = sony_nc_fanspeed_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up fan speed function (%d)\n",
+ result);
+ break;
+ case 0x0155:
+ result = sony_nc_usb_charge_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up USB charge support (%d)\n",
+ result);
+ break;
+ case 0x011D:
+ result = sony_nc_panelid_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up panel ID function (%d)\n",
+ result);
+ break;
+ case 0x0168:
+ result = sony_nc_smart_conn_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up smart connect support (%d)\n",
+ result);
+ break;
default:
continue;
}
@@ -1420,6 +1467,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
sony_nc_battery_care_cleanup(pd);
break;
case 0x0119:
+ case 0x015D:
sony_nc_lid_resume_cleanup(pd);
break;
case 0x0122:
@@ -1444,6 +1492,21 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
case 0x0163:
sony_nc_kbd_backlight_cleanup(pd, handle);
break;
+ case 0x0121:
+ sony_nc_lowbatt_cleanup(pd);
+ break;
+ case 0x0149:
+ sony_nc_fanspeed_cleanup(pd);
+ break;
+ case 0x0155:
+ sony_nc_usb_charge_cleanup(pd);
+ break;
+ case 0x011D:
+ sony_nc_panelid_cleanup(pd);
+ break;
+ case 0x0168:
+ sony_nc_smart_conn_cleanup(pd);
+ break;
default:
continue;
}
@@ -1719,7 +1782,7 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
{
int result;
- if (value > 1)
+ if (value > 2)
return -EINVAL;
if (sony_call_snc_handle(kbdbl_ctl->handle,
@@ -1727,8 +1790,10 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
return -EIO;
/* Try to turn the light on/off immediately */
- sony_call_snc_handle(kbdbl_ctl->handle,
- (value << 0x10) | (kbdbl_ctl->base + 0x100), &result);
+ if (value != 1)
+ sony_call_snc_handle(kbdbl_ctl->handle,
+ (value << 0x0f) | (kbdbl_ctl->base + 0x100),
+ &result);
kbdbl_ctl->mode = value;
@@ -2221,9 +2286,14 @@ static void sony_nc_thermal_resume(void)
#endif
/* resume on LID open */
+#define LID_RESUME_S5 0
+#define LID_RESUME_S4 1
+#define LID_RESUME_S3 2
+#define LID_RESUME_MAX 3
struct snc_lid_resume_control {
- struct device_attribute attrs[3];
+ struct device_attribute attrs[LID_RESUME_MAX];
unsigned int status;
+ int handle;
};
static struct snc_lid_resume_control *lid_ctl;
@@ -2231,8 +2301,9 @@ static ssize_t sony_nc_lid_resume_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
- unsigned int result, pos;
+ unsigned int result;
unsigned long value;
+ unsigned int pos = LID_RESUME_S5;
if (count > 31)
return -EINVAL;
@@ -2245,21 +2316,21 @@ static ssize_t sony_nc_lid_resume_store(struct device *dev,
* +--------------+
* 2 1 0
*/
- if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
- pos = 2;
- else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
- pos = 1;
- else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
- pos = 0;
- else
- return -EINVAL;
+ while (pos < LID_RESUME_MAX) {
+ if (&lid_ctl->attrs[pos].attr == &attr->attr)
+ break;
+ pos++;
+ }
+ if (pos == LID_RESUME_MAX)
+ return -EINVAL;
if (value)
value = lid_ctl->status | (1 << pos);
else
value = lid_ctl->status & ~(1 << pos);
- if (sony_call_snc_handle(0x0119, value << 0x10 | 0x0100, &result))
+ if (sony_call_snc_handle(lid_ctl->handle, value << 0x10 | 0x0100,
+ &result))
return -EIO;
lid_ctl->status = value;
@@ -2268,29 +2339,27 @@ static ssize_t sony_nc_lid_resume_store(struct device *dev,
}
static ssize_t sony_nc_lid_resume_show(struct device *dev,
- struct device_attribute *attr, char *buffer)
+ struct device_attribute *attr,
+ char *buffer)
{
- unsigned int pos;
+ unsigned int pos = LID_RESUME_S5;
- if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
- pos = 2;
- else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
- pos = 1;
- else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
- pos = 0;
- else
- return -EINVAL;
-
- return snprintf(buffer, PAGE_SIZE, "%d\n",
- (lid_ctl->status >> pos) & 0x01);
+ while (pos < LID_RESUME_MAX) {
+ if (&lid_ctl->attrs[pos].attr == &attr->attr)
+ return snprintf(buffer, PAGE_SIZE, "%d\n",
+ (lid_ctl->status >> pos) & 0x01);
+ pos++;
+ }
+ return -EINVAL;
}
-static int sony_nc_lid_resume_setup(struct platform_device *pd)
+static int sony_nc_lid_resume_setup(struct platform_device *pd,
+ unsigned int handle)
{
unsigned int result;
int i;
- if (sony_call_snc_handle(0x0119, 0x0000, &result))
+ if (sony_call_snc_handle(handle, 0x0000, &result))
return -EIO;
lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL);
@@ -2298,26 +2367,29 @@ static int sony_nc_lid_resume_setup(struct platform_device *pd)
return -ENOMEM;
lid_ctl->status = result & 0x7;
+ lid_ctl->handle = handle;
sysfs_attr_init(&lid_ctl->attrs[0].attr);
- lid_ctl->attrs[0].attr.name = "lid_resume_S3";
- lid_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
- lid_ctl->attrs[0].show = sony_nc_lid_resume_show;
- lid_ctl->attrs[0].store = sony_nc_lid_resume_store;
-
- sysfs_attr_init(&lid_ctl->attrs[1].attr);
- lid_ctl->attrs[1].attr.name = "lid_resume_S4";
- lid_ctl->attrs[1].attr.mode = S_IRUGO | S_IWUSR;
- lid_ctl->attrs[1].show = sony_nc_lid_resume_show;
- lid_ctl->attrs[1].store = sony_nc_lid_resume_store;
-
- sysfs_attr_init(&lid_ctl->attrs[2].attr);
- lid_ctl->attrs[2].attr.name = "lid_resume_S5";
- lid_ctl->attrs[2].attr.mode = S_IRUGO | S_IWUSR;
- lid_ctl->attrs[2].show = sony_nc_lid_resume_show;
- lid_ctl->attrs[2].store = sony_nc_lid_resume_store;
-
- for (i = 0; i < 3; i++) {
+ lid_ctl->attrs[LID_RESUME_S5].attr.name = "lid_resume_S5";
+ lid_ctl->attrs[LID_RESUME_S5].attr.mode = S_IRUGO | S_IWUSR;
+ lid_ctl->attrs[LID_RESUME_S5].show = sony_nc_lid_resume_show;
+ lid_ctl->attrs[LID_RESUME_S5].store = sony_nc_lid_resume_store;
+
+ if (handle == 0x0119) {
+ sysfs_attr_init(&lid_ctl->attrs[1].attr);
+ lid_ctl->attrs[LID_RESUME_S4].attr.name = "lid_resume_S4";
+ lid_ctl->attrs[LID_RESUME_S4].attr.mode = S_IRUGO | S_IWUSR;
+ lid_ctl->attrs[LID_RESUME_S4].show = sony_nc_lid_resume_show;
+ lid_ctl->attrs[LID_RESUME_S4].store = sony_nc_lid_resume_store;
+
+ sysfs_attr_init(&lid_ctl->attrs[2].attr);
+ lid_ctl->attrs[LID_RESUME_S3].attr.name = "lid_resume_S3";
+ lid_ctl->attrs[LID_RESUME_S3].attr.mode = S_IRUGO | S_IWUSR;
+ lid_ctl->attrs[LID_RESUME_S3].show = sony_nc_lid_resume_show;
+ lid_ctl->attrs[LID_RESUME_S3].store = sony_nc_lid_resume_store;
+ }
+ for (i = 0; i < LID_RESUME_MAX &&
+ lid_ctl->attrs[LID_RESUME_S3].attr.name; i++) {
result = device_create_file(&pd->dev, &lid_ctl->attrs[i]);
if (result)
goto liderror;
@@ -2340,8 +2412,12 @@ static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
int i;
if (lid_ctl) {
- for (i = 0; i < 3; i++)
+ for (i = 0; i < LID_RESUME_MAX; i++) {
+ if (!lid_ctl->attrs[i].attr.name)
+ break;
+
device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+ }
kfree(lid_ctl);
lid_ctl = NULL;
@@ -2524,6 +2600,355 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
}
}
+/* low battery function */
+static struct device_attribute *lowbatt_handle;
+
+static ssize_t sony_nc_lowbatt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0121, value << 8, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_lowbatt_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0121, 0x0200, &result))
+ return -EIO;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", result & 1);
+}
+
+static int sony_nc_lowbatt_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ lowbatt_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!lowbatt_handle)
+ return -ENOMEM;
+
+ sysfs_attr_init(&lowbatt_handle->attr);
+ lowbatt_handle->attr.name = "lowbatt_hibernate";
+ lowbatt_handle->attr.mode = S_IRUGO | S_IWUSR;
+ lowbatt_handle->show = sony_nc_lowbatt_show;
+ lowbatt_handle->store = sony_nc_lowbatt_store;
+
+ result = device_create_file(&pd->dev, lowbatt_handle);
+ if (result) {
+ kfree(lowbatt_handle);
+ lowbatt_handle = NULL;
+ return result;
+ }
+
+ return 0;
+}
+
+static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
+{
+ if (lowbatt_handle) {
+ device_remove_file(&pd->dev, lowbatt_handle);
+ kfree(lowbatt_handle);
+ lowbatt_handle = NULL;
+ }
+}
+
+/* fan speed function */
+static struct device_attribute *fan_handle, *hsf_handle;
+
+static ssize_t sony_nc_hsfan_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0149, value << 0x10 | 0x0200, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_hsfan_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0149, 0x0100, &result))
+ return -EIO;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+}
+
+static ssize_t sony_nc_fanspeed_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0149, 0x0300, &result))
+ return -EIO;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0xff);
+}
+
+static int sony_nc_fanspeed_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ fan_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!fan_handle)
+ return -ENOMEM;
+
+ hsf_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!hsf_handle) {
+ result = -ENOMEM;
+ goto out_hsf_handle_alloc;
+ }
+
+ sysfs_attr_init(&fan_handle->attr);
+ fan_handle->attr.name = "fanspeed";
+ fan_handle->attr.mode = S_IRUGO;
+ fan_handle->show = sony_nc_fanspeed_show;
+ fan_handle->store = NULL;
+
+ sysfs_attr_init(&hsf_handle->attr);
+ hsf_handle->attr.name = "fan_forced";
+ hsf_handle->attr.mode = S_IRUGO | S_IWUSR;
+ hsf_handle->show = sony_nc_hsfan_show;
+ hsf_handle->store = sony_nc_hsfan_store;
+
+ result = device_create_file(&pd->dev, fan_handle);
+ if (result)
+ goto out_fan_handle;
+
+ result = device_create_file(&pd->dev, hsf_handle);
+ if (result)
+ goto out_hsf_handle;
+
+ return 0;
+
+out_hsf_handle:
+ device_remove_file(&pd->dev, fan_handle);
+
+out_fan_handle:
+ kfree(hsf_handle);
+ hsf_handle = NULL;
+
+out_hsf_handle_alloc:
+ kfree(fan_handle);
+ fan_handle = NULL;
+ return result;
+}
+
+static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
+{
+ if (fan_handle) {
+ device_remove_file(&pd->dev, fan_handle);
+ kfree(fan_handle);
+ fan_handle = NULL;
+ }
+ if (hsf_handle) {
+ device_remove_file(&pd->dev, hsf_handle);
+ kfree(hsf_handle);
+ hsf_handle = NULL;
+ }
+}
+
+/* USB charge function */
+static struct device_attribute *uc_handle;
+
+static ssize_t sony_nc_usb_charge_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0155, value << 0x10 | 0x0100, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_usb_charge_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0155, 0x0000, &result))
+ return -EIO;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+}
+
+static int sony_nc_usb_charge_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0155, 0x0000, &result) || !(result & 0x01)) {
+ /* some models advertise the handle but have no implementation
+ * for it
+ */
+ pr_info("No USB Charge capability found\n");
+ return 0;
+ }
+
+ uc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!uc_handle)
+ return -ENOMEM;
+
+ sysfs_attr_init(&uc_handle->attr);
+ uc_handle->attr.name = "usb_charge";
+ uc_handle->attr.mode = S_IRUGO | S_IWUSR;
+ uc_handle->show = sony_nc_usb_charge_show;
+ uc_handle->store = sony_nc_usb_charge_store;
+
+ result = device_create_file(&pd->dev, uc_handle);
+ if (result) {
+ kfree(uc_handle);
+ uc_handle = NULL;
+ return result;
+ }
+
+ return 0;
+}
+
+static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
+{
+ if (uc_handle) {
+ device_remove_file(&pd->dev, uc_handle);
+ kfree(uc_handle);
+ uc_handle = NULL;
+ }
+}
+
+/* Panel ID function */
+static struct device_attribute *panel_handle;
+
+static ssize_t sony_nc_panelid_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x011D, 0x0000, &result))
+ return -EIO;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", result);
+}
+
+static int sony_nc_panelid_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ panel_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!panel_handle)
+ return -ENOMEM;
+
+ sysfs_attr_init(&panel_handle->attr);
+ panel_handle->attr.name = "panel_id";
+ panel_handle->attr.mode = S_IRUGO;
+ panel_handle->show = sony_nc_panelid_show;
+ panel_handle->store = NULL;
+
+ result = device_create_file(&pd->dev, panel_handle);
+ if (result) {
+ kfree(panel_handle);
+ panel_handle = NULL;
+ return result;
+ }
+
+ return 0;
+}
+
+static void sony_nc_panelid_cleanup(struct platform_device *pd)
+{
+ if (panel_handle) {
+ device_remove_file(&pd->dev, panel_handle);
+ kfree(panel_handle);
+ panel_handle = NULL;
+ }
+}
+
+/* smart connect function */
+static struct device_attribute *sc_handle;
+
+static ssize_t sony_nc_smart_conn_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0168, value << 0x10, &result))
+ return -EIO;
+
+ return count;
+}
+
+static int sony_nc_smart_conn_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ sc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!sc_handle)
+ return -ENOMEM;
+
+ sysfs_attr_init(&sc_handle->attr);
+ sc_handle->attr.name = "smart_connect";
+ sc_handle->attr.mode = S_IWUSR;
+ sc_handle->show = NULL;
+ sc_handle->store = sony_nc_smart_conn_store;
+
+ result = device_create_file(&pd->dev, sc_handle);
+ if (result) {
+ kfree(sc_handle);
+ sc_handle = NULL;
+ return result;
+ }
+
+ return 0;
+}
+
+static void sony_nc_smart_conn_cleanup(struct platform_device *pd)
+{
+ if (sc_handle) {
+ device_remove_file(&pd->dev, sc_handle);
+ kfree(sc_handle);
+ sc_handle = NULL;
+ }
+}
+
/* Touchpad enable/disable */
struct touchpad_control {
struct device_attribute attr;
@@ -2726,8 +3151,6 @@ static int sony_nc_add(struct acpi_device *device)
int result = 0;
struct sony_nc_value *item;
- pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
-
sony_nc_acpi_device = device;
strcpy(acpi_device_class(device), "sony/hotkey");
@@ -2821,6 +3244,7 @@ static int sony_nc_add(struct acpi_device *device)
}
}
+ pr_info("SNC setup done.\n");
return 0;
out_sysfs:
@@ -4259,8 +4683,6 @@ static int sony_pic_add(struct acpi_device *device)
struct sony_pic_ioport *io, *tmp_io;
struct sony_pic_irq *irq, *tmp_irq;
- pr_info("%s v%s\n", SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
-
spic_dev.acpi_dev = device;
strcpy(acpi_device_class(device), "sony/hotkey");
sony_pic_detect_device_type(&spic_dev);
@@ -4360,6 +4782,7 @@ static int sony_pic_add(struct acpi_device *device)
if (result)
goto err_remove_pf;
+ pr_info("SPIC setup done.\n");
return 0;
err_remove_pf:
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index defb6afc1409..15e61c16736e 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2321,53 +2321,55 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
}
}
-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
- struct tp_nvram_state *newn,
- const u32 event_mask)
-{
-
#define TPACPI_COMPARE_KEY(__scancode, __member) \
- do { \
- if ((event_mask & (1 << __scancode)) && \
- oldn->__member != newn->__member) \
- tpacpi_hotkey_send_key(__scancode); \
- } while (0)
+do { \
+ if ((event_mask & (1 << __scancode)) && \
+ oldn->__member != newn->__member) \
+ tpacpi_hotkey_send_key(__scancode); \
+} while (0)
#define TPACPI_MAY_SEND_KEY(__scancode) \
- do { \
- if (event_mask & (1 << __scancode)) \
- tpacpi_hotkey_send_key(__scancode); \
- } while (0)
+do { \
+ if (event_mask & (1 << __scancode)) \
+ tpacpi_hotkey_send_key(__scancode); \
+} while (0)
- void issue_volchange(const unsigned int oldvol,
- const unsigned int newvol)
- {
- unsigned int i = oldvol;
+static void issue_volchange(const unsigned int oldvol,
+ const unsigned int newvol,
+ const u32 event_mask)
+{
+ unsigned int i = oldvol;
- while (i > newvol) {
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
- i--;
- }
- while (i < newvol) {
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
- i++;
- }
+ while (i > newvol) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+ i--;
+ }
+ while (i < newvol) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+ i++;
}
+}
- void issue_brightnesschange(const unsigned int oldbrt,
- const unsigned int newbrt)
- {
- unsigned int i = oldbrt;
+static void issue_brightnesschange(const unsigned int oldbrt,
+ const unsigned int newbrt,
+ const u32 event_mask)
+{
+ unsigned int i = oldbrt;
- while (i > newbrt) {
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
- i--;
- }
- while (i < newbrt) {
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
- i++;
- }
+ while (i > newbrt) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+ i--;
}
+ while (i < newbrt) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+ i++;
+ }
+}
+
+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+ struct tp_nvram_state *newn,
+ const u32 event_mask)
+{
TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
@@ -2402,7 +2404,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
oldn->volume_level != newn->volume_level) {
/* recently muted, or repeated mute keypress, or
* multiple presses ending in mute */
- issue_volchange(oldn->volume_level, newn->volume_level);
+ issue_volchange(oldn->volume_level, newn->volume_level,
+ event_mask);
TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
}
} else {
@@ -2412,7 +2415,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
}
if (oldn->volume_level != newn->volume_level) {
- issue_volchange(oldn->volume_level, newn->volume_level);
+ issue_volchange(oldn->volume_level, newn->volume_level,
+ event_mask);
} else if (oldn->volume_toggle != newn->volume_toggle) {
/* repeated vol up/down keypress at end of scale ? */
if (newn->volume_level == 0)
@@ -2425,7 +2429,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
/* handle brightness */
if (oldn->brightness_level != newn->brightness_level) {
issue_brightnesschange(oldn->brightness_level,
- newn->brightness_level);
+ newn->brightness_level, event_mask);
} else if (oldn->brightness_toggle != newn->brightness_toggle) {
/* repeated key presses that didn't change state */
if (newn->brightness_level == 0)
@@ -3437,6 +3441,106 @@ err_exit:
return (res < 0)? res : 1;
}
+/* Thinkpad X1 Carbon support 5 modes including Home mode, Web browser
+ * mode, Web conference mode, Function mode and Lay-flat mode.
+ * We support Home mode and Function mode currently.
+ *
+ * Will consider support rest of modes in future.
+ *
+ */
+enum ADAPTIVE_KEY_MODE {
+ HOME_MODE,
+ WEB_BROWSER_MODE,
+ WEB_CONFERENCE_MODE,
+ FUNCTION_MODE,
+ LAYFLAT_MODE
+};
+
+const int adaptive_keyboard_modes[] = {
+ HOME_MODE,
+/* WEB_BROWSER_MODE = 2,
+ WEB_CONFERENCE_MODE = 3, */
+ FUNCTION_MODE
+};
+
+#define DFR_CHANGE_ROW 0x101
+#define DFR_SHOW_QUICKVIEW_ROW 0x102
+
+/* press Fn key a while second, it will switch to Function Mode. Then
+ * release Fn key, previous mode be restored.
+ */
+static bool adaptive_keyboard_mode_is_saved;
+static int adaptive_keyboard_prev_mode;
+
+static int adaptive_keyboard_get_next_mode(int mode)
+{
+ size_t i;
+ size_t max_mode = ARRAY_SIZE(adaptive_keyboard_modes) - 1;
+
+ for (i = 0; i <= max_mode; i++) {
+ if (adaptive_keyboard_modes[i] == mode)
+ break;
+ }
+
+ if (i >= max_mode)
+ i = 0;
+ else
+ i++;
+
+ return adaptive_keyboard_modes[i];
+}
+
+static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode)
+{
+ u32 current_mode = 0;
+ int new_mode = 0;
+
+ switch (scancode) {
+ case DFR_CHANGE_ROW:
+ if (adaptive_keyboard_mode_is_saved) {
+ new_mode = adaptive_keyboard_prev_mode;
+ adaptive_keyboard_mode_is_saved = false;
+ } else {
+ if (!acpi_evalf(
+ hkey_handle, &current_mode,
+ "GTRW", "dd", 0)) {
+ pr_err("Cannot read adaptive keyboard mode\n");
+ return false;
+ } else {
+ new_mode = adaptive_keyboard_get_next_mode(
+ current_mode);
+ }
+ }
+
+ if (!acpi_evalf(hkey_handle, NULL, "STRW", "vd", new_mode)) {
+ pr_err("Cannot set adaptive keyboard mode\n");
+ return false;
+ }
+
+ return true;
+
+ case DFR_SHOW_QUICKVIEW_ROW:
+ if (!acpi_evalf(hkey_handle,
+ &adaptive_keyboard_prev_mode,
+ "GTRW", "dd", 0)) {
+ pr_err("Cannot read adaptive keyboard mode\n");
+ return false;
+ } else {
+ adaptive_keyboard_mode_is_saved = true;
+
+ if (!acpi_evalf(hkey_handle,
+ NULL, "STRW", "vd", FUNCTION_MODE)) {
+ pr_err("Cannot set adaptive keyboard mode\n");
+ return false;
+ }
+ }
+ return true;
+
+ default:
+ return false;
+ }
+}
+
static bool hotkey_notify_hotkey(const u32 hkey,
bool *send_acpi_ev,
bool *ignore_acpi_ev)
@@ -3456,6 +3560,8 @@ static bool hotkey_notify_hotkey(const u32 hkey,
*ignore_acpi_ev = true;
}
return true;
+ } else {
+ return adaptive_keyboard_hotkey_notify_hotkey(scancode);
}
return false;
}
@@ -3728,13 +3834,28 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
static void hotkey_suspend(void)
{
+ int hkeyv;
+
/* Do these on suspend, we get the events on early resume! */
hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE;
hotkey_autosleep_ack = 0;
+
+ /* save previous mode of adaptive keyboard of X1 Carbon */
+ if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
+ if ((hkeyv >> 8) == 2) {
+ if (!acpi_evalf(hkey_handle,
+ &adaptive_keyboard_prev_mode,
+ "GTRW", "dd", 0)) {
+ pr_err("Cannot read adaptive keyboard mode.\n");
+ }
+ }
+ }
}
static void hotkey_resume(void)
{
+ int hkeyv;
+
tpacpi_disable_brightness_delay();
if (hotkey_status_set(true) < 0 ||
@@ -3747,6 +3868,18 @@ static void hotkey_resume(void)
hotkey_wakeup_reason_notify_change();
hotkey_wakeup_hotunplug_complete_notify_change();
hotkey_poll_setup_safe(false);
+
+ /* restore previous mode of adapive keyboard of X1 Carbon */
+ if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
+ if ((hkeyv >> 8) == 2) {
+ if (!acpi_evalf(hkey_handle,
+ NULL,
+ "STRW", "vd",
+ adaptive_keyboard_prev_mode)) {
+ pr_err("Cannot set adaptive keyboard mode.\n");
+ }
+ }
+ }
}
/* procfs -------------------------------------------------------------- */
@@ -6776,8 +6909,9 @@ static int __init volume_create_alsa_mixer(void)
struct snd_kcontrol *ctl_mute;
int rc;
- rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE,
- sizeof(struct tpacpi_alsa_data), &card);
+ rc = snd_card_new(&tpacpi_pdev->dev,
+ alsa_index, alsa_id, THIS_MODULE,
+ sizeof(struct tpacpi_alsa_data), &card);
if (rc < 0 || !card) {
pr_err("Failed to create ALSA card structures: %d\n", rc);
return 1;
@@ -6828,7 +6962,6 @@ static int __init volume_create_alsa_mixer(void)
}
data->ctl_mute_id = &ctl_mute->id;
- snd_card_set_dev(card, &tpacpi_pdev->dev);
rc = snd_card_register(card);
if (rc < 0) {
pr_err("Failed to register ALSA card: %d\n", rc);
@@ -8447,9 +8580,21 @@ static void mute_led_exit(void)
tpacpi_led_set(i, false);
}
+static void mute_led_resume(void)
+{
+ int i;
+
+ for (i = 0; i < TPACPI_LED_MAX; i++) {
+ struct tp_led_table *t = &led_tables[i];
+ if (t->state >= 0)
+ mute_led_on_off(t, t->state);
+ }
+}
+
static struct ibm_struct mute_led_driver_data = {
.name = "mute_led",
.exit = mute_led_exit,
+ .resume = mute_led_resume,
};
/****************************************************************************
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 90dd7645a9e5..46473ca7566b 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -5,6 +5,7 @@
* Copyright (C) 2002-2004 John Belmonte
* Copyright (C) 2008 Philip Langdale
* Copyright (C) 2010 Pierre Ducroquet
+ * Copyright (C) 2014 Azael Avalos
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -37,7 +38,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define TOSHIBA_ACPI_VERSION "0.19"
+#define TOSHIBA_ACPI_VERSION "0.20"
#define PROC_INTERFACE_VERSION 1
#include <linux/kernel.h>
@@ -77,6 +78,9 @@ MODULE_LICENSE("GPL");
* However the ACPI methods seem to be incomplete in some areas (for
* example they allow setting, but not reading, the LCD brightness value),
* so this is still useful.
+ *
+ * SCI stands for "System Configuration Interface" which aim is to
+ * conceal differences in hardware between different models.
*/
#define HCI_WORDS 6
@@ -84,12 +88,23 @@ MODULE_LICENSE("GPL");
/* operations */
#define HCI_SET 0xff00
#define HCI_GET 0xfe00
+#define SCI_OPEN 0xf100
+#define SCI_CLOSE 0xf200
+#define SCI_GET 0xf300
+#define SCI_SET 0xf400
/* return codes */
#define HCI_SUCCESS 0x0000
#define HCI_FAILURE 0x1000
#define HCI_NOT_SUPPORTED 0x8000
#define HCI_EMPTY 0x8c00
+#define HCI_DATA_NOT_AVAILABLE 0x8d20
+#define HCI_NOT_INITIALIZED 0x8d50
+#define SCI_OPEN_CLOSE_OK 0x0044
+#define SCI_ALREADY_OPEN 0x8100
+#define SCI_NOT_OPENED 0x8200
+#define SCI_INPUT_DATA_ERROR 0x8300
+#define SCI_NOT_PRESENT 0x8600
/* registers */
#define HCI_FAN 0x0004
@@ -99,13 +114,22 @@ MODULE_LICENSE("GPL");
#define HCI_HOTKEY_EVENT 0x001e
#define HCI_LCD_BRIGHTNESS 0x002a
#define HCI_WIRELESS 0x0056
+#define HCI_ACCELEROMETER 0x006d
+#define HCI_KBD_ILLUMINATION 0x0095
+#define HCI_ECO_MODE 0x0097
+#define HCI_ACCELEROMETER2 0x00a6
+#define SCI_ILLUMINATION 0x014e
+#define SCI_KBD_ILLUM_STATUS 0x015c
+#define SCI_TOUCHPAD 0x050e
/* field definitions */
+#define HCI_ACCEL_MASK 0x7fff
#define HCI_HOTKEY_DISABLE 0x0b
#define HCI_HOTKEY_ENABLE 0x09
#define HCI_LCD_BRIGHTNESS_BITS 3
#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
#define HCI_LCD_BRIGHTNESS_LEVELS (1 << HCI_LCD_BRIGHTNESS_BITS)
+#define HCI_MISC_SHIFT 0x10
#define HCI_VIDEO_OUT_LCD 0x1
#define HCI_VIDEO_OUT_CRT 0x2
#define HCI_VIDEO_OUT_TV 0x4
@@ -113,6 +137,8 @@ MODULE_LICENSE("GPL");
#define HCI_WIRELESS_BT_PRESENT 0x0f
#define HCI_WIRELESS_BT_ATTACH 0x40
#define HCI_WIRELESS_BT_POWER 0x80
+#define SCI_KBD_MODE_FNZ 0x1
+#define SCI_KBD_MODE_AUTO 0x2
struct toshiba_acpi_dev {
struct acpi_device *acpi_dev;
@@ -122,10 +148,14 @@ struct toshiba_acpi_dev {
struct work_struct hotkey_work;
struct backlight_device *backlight_dev;
struct led_classdev led_dev;
+ struct led_classdev kbd_led;
+ struct led_classdev eco_led;
int force_fan;
int last_key_event;
int key_event_valid;
+ int kbd_mode;
+ int kbd_time;
unsigned int illumination_supported:1;
unsigned int video_supported:1;
@@ -134,6 +164,12 @@ struct toshiba_acpi_dev {
unsigned int ntfy_supported:1;
unsigned int info_supported:1;
unsigned int tr_backlight_supported:1;
+ unsigned int kbd_illum_supported:1;
+ unsigned int kbd_led_registered:1;
+ unsigned int touchpad_supported:1;
+ unsigned int eco_supported:1;
+ unsigned int accelerometer_supported:1;
+ unsigned int sysfs_created:1;
struct mutex mutex;
};
@@ -280,21 +316,94 @@ static acpi_status hci_read2(struct toshiba_acpi_dev *dev, u32 reg,
return status;
}
+/* common sci tasks
+ */
+
+static int sci_open(struct toshiba_acpi_dev *dev)
+{
+ u32 in[HCI_WORDS] = { SCI_OPEN, 0, 0, 0, 0, 0 };
+ u32 out[HCI_WORDS];
+ acpi_status status;
+
+ status = hci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+ pr_err("ACPI call to open SCI failed\n");
+ return 0;
+ }
+
+ if (out[0] == SCI_OPEN_CLOSE_OK) {
+ return 1;
+ } else if (out[0] == SCI_ALREADY_OPEN) {
+ pr_info("Toshiba SCI already opened\n");
+ return 1;
+ } else if (out[0] == SCI_NOT_PRESENT) {
+ pr_info("Toshiba SCI is not present\n");
+ }
+
+ return 0;
+}
+
+static void sci_close(struct toshiba_acpi_dev *dev)
+{
+ u32 in[HCI_WORDS] = { SCI_CLOSE, 0, 0, 0, 0, 0 };
+ u32 out[HCI_WORDS];
+ acpi_status status;
+
+ status = hci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+ pr_err("ACPI call to close SCI failed\n");
+ return;
+ }
+
+ if (out[0] == SCI_OPEN_CLOSE_OK)
+ return;
+ else if (out[0] == SCI_NOT_OPENED)
+ pr_info("Toshiba SCI not opened\n");
+ else if (out[0] == SCI_NOT_PRESENT)
+ pr_info("Toshiba SCI is not present\n");
+}
+
+static acpi_status sci_read(struct toshiba_acpi_dev *dev, u32 reg,
+ u32 *out1, u32 *result)
+{
+ u32 in[HCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 };
+ u32 out[HCI_WORDS];
+ acpi_status status = hci_raw(dev, in, out);
+ *out1 = out[2];
+ *result = (ACPI_SUCCESS(status)) ? out[0] : HCI_FAILURE;
+ return status;
+}
+
+static acpi_status sci_write(struct toshiba_acpi_dev *dev, u32 reg,
+ u32 in1, u32 *result)
+{
+ u32 in[HCI_WORDS] = { SCI_SET, reg, in1, 0, 0, 0 };
+ u32 out[HCI_WORDS];
+ acpi_status status = hci_raw(dev, in, out);
+ *result = (ACPI_SUCCESS(status)) ? out[0] : HCI_FAILURE;
+ return status;
+}
+
/* Illumination support */
static int toshiba_illumination_available(struct toshiba_acpi_dev *dev)
{
- u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
+ u32 in[HCI_WORDS] = { SCI_GET, SCI_ILLUMINATION, 0, 0, 0, 0 };
u32 out[HCI_WORDS];
acpi_status status;
- in[0] = 0xf100;
+ if (!sci_open(dev))
+ return 0;
+
status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status)) {
+ sci_close(dev);
+ if (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+ pr_err("ACPI call to query Illumination support failed\n");
+ return 0;
+ } else if (out[0] == HCI_NOT_SUPPORTED || out[1] != 1) {
pr_info("Illumination device not available\n");
return 0;
}
- in[0] = 0xf400;
- status = hci_raw(dev, in, out);
+
return 1;
}
@@ -303,82 +412,270 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
{
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, led_dev);
- u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
- u32 out[HCI_WORDS];
+ u32 state, result;
acpi_status status;
/* First request : initialize communication. */
- in[0] = 0xf100;
- status = hci_raw(dev, in, out);
+ if (!sci_open(dev))
+ return;
+
+ /* Switch the illumination on/off */
+ state = brightness ? 1 : 0;
+ status = sci_write(dev, SCI_ILLUMINATION, state, &result);
+ sci_close(dev);
if (ACPI_FAILURE(status)) {
- pr_info("Illumination device not available\n");
+ pr_err("ACPI call for illumination failed\n");
+ return;
+ } else if (result == HCI_NOT_SUPPORTED) {
+ pr_info("Illumination not supported\n");
return;
}
+}
- if (brightness) {
- /* Switch the illumination on */
- in[0] = 0xf400;
- in[1] = 0x14e;
- in[2] = 1;
- status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status)) {
- pr_info("ACPI call for illumination failed\n");
- return;
- }
- } else {
- /* Switch the illumination off */
- in[0] = 0xf400;
- in[1] = 0x14e;
- in[2] = 0;
- status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status)) {
- pr_info("ACPI call for illumination failed.\n");
- return;
- }
+static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
+{
+ struct toshiba_acpi_dev *dev = container_of(cdev,
+ struct toshiba_acpi_dev, led_dev);
+ u32 state, result;
+ acpi_status status;
+
+ /* First request : initialize communication. */
+ if (!sci_open(dev))
+ return LED_OFF;
+
+ /* Check the illumination */
+ status = sci_read(dev, SCI_ILLUMINATION, &state, &result);
+ sci_close(dev);
+ if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+ pr_err("ACPI call for illumination failed\n");
+ return LED_OFF;
+ } else if (result == HCI_NOT_SUPPORTED) {
+ pr_info("Illumination not supported\n");
+ return LED_OFF;
}
- /* Last request : close communication. */
- in[0] = 0xf200;
- in[1] = 0;
- in[2] = 0;
- hci_raw(dev, in, out);
+ return state ? LED_FULL : LED_OFF;
}
-static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
+/* KBD Illumination */
+static int toshiba_kbd_illum_status_set(struct toshiba_acpi_dev *dev, u32 time)
+{
+ u32 result;
+ acpi_status status;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ status = sci_write(dev, SCI_KBD_ILLUM_STATUS, time, &result);
+ sci_close(dev);
+ if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+ pr_err("ACPI call to set KBD backlight status failed\n");
+ return -EIO;
+ } else if (result == HCI_NOT_SUPPORTED) {
+ pr_info("Keyboard backlight status not supported\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int toshiba_kbd_illum_status_get(struct toshiba_acpi_dev *dev, u32 *time)
+{
+ u32 result;
+ acpi_status status;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ status = sci_read(dev, SCI_KBD_ILLUM_STATUS, time, &result);
+ sci_close(dev);
+ if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+ pr_err("ACPI call to get KBD backlight status failed\n");
+ return -EIO;
+ } else if (result == HCI_NOT_SUPPORTED) {
+ pr_info("Keyboard backlight status not supported\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static enum led_brightness toshiba_kbd_backlight_get(struct led_classdev *cdev)
{
struct toshiba_acpi_dev *dev = container_of(cdev,
- struct toshiba_acpi_dev, led_dev);
- u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
+ struct toshiba_acpi_dev, kbd_led);
+ u32 state, result;
+ acpi_status status;
+
+ /* Check the keyboard backlight state */
+ status = hci_read1(dev, HCI_KBD_ILLUMINATION, &state, &result);
+ if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+ pr_err("ACPI call to get the keyboard backlight failed\n");
+ return LED_OFF;
+ } else if (result == HCI_NOT_SUPPORTED) {
+ pr_info("Keyboard backlight not supported\n");
+ return LED_OFF;
+ }
+
+ return state ? LED_FULL : LED_OFF;
+}
+
+static void toshiba_kbd_backlight_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct toshiba_acpi_dev *dev = container_of(cdev,
+ struct toshiba_acpi_dev, kbd_led);
+ u32 state, result;
+ acpi_status status;
+
+ /* Set the keyboard backlight state */
+ state = brightness ? 1 : 0;
+ status = hci_write1(dev, HCI_KBD_ILLUMINATION, state, &result);
+ if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+ pr_err("ACPI call to set KBD Illumination mode failed\n");
+ return;
+ } else if (result == HCI_NOT_SUPPORTED) {
+ pr_info("Keyboard backlight not supported\n");
+ return;
+ }
+}
+
+/* TouchPad support */
+static int toshiba_touchpad_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+ u32 result;
+ acpi_status status;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ status = sci_write(dev, SCI_TOUCHPAD, state, &result);
+ sci_close(dev);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to set the touchpad failed\n");
+ return -EIO;
+ } else if (result == HCI_NOT_SUPPORTED) {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+ u32 result;
+ acpi_status status;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ status = sci_read(dev, SCI_TOUCHPAD, state, &result);
+ sci_close(dev);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to query the touchpad failed\n");
+ return -EIO;
+ } else if (result == HCI_NOT_SUPPORTED) {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* Eco Mode support */
+static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
+{
+ acpi_status status;
+ u32 in[HCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
+ u32 out[HCI_WORDS];
+
+ status = hci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+ pr_info("ACPI call to get ECO led failed\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static enum led_brightness toshiba_eco_mode_get_status(struct led_classdev *cdev)
+{
+ struct toshiba_acpi_dev *dev = container_of(cdev,
+ struct toshiba_acpi_dev, eco_led);
+ u32 in[HCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
u32 out[HCI_WORDS];
acpi_status status;
- enum led_brightness result;
- /* First request : initialize communication. */
- in[0] = 0xf100;
status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status)) {
- pr_info("Illumination device not available\n");
+ if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+ pr_err("ACPI call to get ECO led failed\n");
return LED_OFF;
}
- /* Check the illumination */
- in[0] = 0xf300;
- in[1] = 0x14e;
+ return out[2] ? LED_FULL : LED_OFF;
+}
+
+static void toshiba_eco_mode_set_status(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct toshiba_acpi_dev *dev = container_of(cdev,
+ struct toshiba_acpi_dev, eco_led);
+ u32 in[HCI_WORDS] = { HCI_SET, HCI_ECO_MODE, 0, 1, 0, 0 };
+ u32 out[HCI_WORDS];
+ acpi_status status;
+
+ /* Switch the Eco Mode led on/off */
+ in[2] = (brightness) ? 1 : 0;
status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status)) {
- pr_info("ACPI call for illumination failed.\n");
- return LED_OFF;
+ if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+ pr_err("ACPI call to set ECO led failed\n");
+ return;
}
+}
- result = out[2] ? LED_FULL : LED_OFF;
+/* Accelerometer support */
+static int toshiba_accelerometer_supported(struct toshiba_acpi_dev *dev)
+{
+ u32 in[HCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER2, 0, 0, 0, 0 };
+ u32 out[HCI_WORDS];
+ acpi_status status;
+
+ /* Check if the accelerometer call exists,
+ * this call also serves as initialization
+ */
+ status = hci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+ pr_err("ACPI call to query the accelerometer failed\n");
+ return -EIO;
+ } else if (out[0] == HCI_DATA_NOT_AVAILABLE ||
+ out[0] == HCI_NOT_INITIALIZED) {
+ pr_err("Accelerometer not initialized\n");
+ return -EIO;
+ } else if (out[0] == HCI_NOT_SUPPORTED) {
+ pr_info("Accelerometer not supported\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
+ u32 *xy, u32 *z)
+{
+ u32 in[HCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER, 0, 1, 0, 0 };
+ u32 out[HCI_WORDS];
+ acpi_status status;
+
+ /* Check the Accelerometer status */
+ status = hci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+ pr_err("ACPI call to query the accelerometer failed\n");
+ return -EIO;
+ }
- /* Last request : close communication. */
- in[0] = 0xf200;
- in[1] = 0;
- in[2] = 0;
- hci_raw(dev, in, out);
+ *xy = out[2];
+ *z = out[4];
- return result;
+ return 0;
}
/* Bluetooth rfkill handlers */
@@ -904,6 +1201,177 @@ static const struct backlight_ops toshiba_backlight_data = {
.update_status = set_lcd_status,
};
+/*
+ * Sysfs files
+ */
+
+static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int mode = -1;
+ int time = -1;
+
+ if (sscanf(buf, "%i", &mode) != 1 && (mode != 2 || mode != 1))
+ return -EINVAL;
+
+ /* Set the Keyboard Backlight Mode where:
+ * Mode - Auto (2) | FN-Z (1)
+ * Auto - KBD backlight turns off automatically in given time
+ * FN-Z - KBD backlight "toggles" when hotkey pressed
+ */
+ if (mode != -1 && toshiba->kbd_mode != mode) {
+ time = toshiba->kbd_time << HCI_MISC_SHIFT;
+ time = time + toshiba->kbd_mode;
+ if (toshiba_kbd_illum_status_set(toshiba, time) < 0)
+ return -EIO;
+ toshiba->kbd_mode = mode;
+ }
+
+ return count;
+}
+
+static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 time;
+
+ if (toshiba_kbd_illum_status_get(toshiba, &time) < 0)
+ return -EIO;
+
+ return sprintf(buf, "%i\n", time & 0x07);
+}
+
+static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int time = -1;
+
+ if (sscanf(buf, "%i", &time) != 1 && (time < 0 || time > 60))
+ return -EINVAL;
+
+ /* Set the Keyboard Backlight Timeout: 0-60 seconds */
+ if (time != -1 && toshiba->kbd_time != time) {
+ time = time << HCI_MISC_SHIFT;
+ time = (toshiba->kbd_mode == SCI_KBD_MODE_AUTO) ?
+ time + 1 : time + 2;
+ if (toshiba_kbd_illum_status_set(toshiba, time) < 0)
+ return -EIO;
+ toshiba->kbd_time = time >> HCI_MISC_SHIFT;
+ }
+
+ return count;
+}
+
+static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 time;
+
+ if (toshiba_kbd_illum_status_get(toshiba, &time) < 0)
+ return -EIO;
+
+ return sprintf(buf, "%i\n", time >> HCI_MISC_SHIFT);
+}
+
+static ssize_t toshiba_touchpad_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int state;
+
+ /* Set the TouchPad on/off, 0 - Disable | 1 - Enable */
+ if (sscanf(buf, "%i", &state) == 1 && (state == 0 || state == 1)) {
+ if (toshiba_touchpad_set(toshiba, state) < 0)
+ return -EIO;
+ }
+
+ return count;
+}
+
+static ssize_t toshiba_touchpad_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 state;
+ int ret;
+
+ ret = toshiba_touchpad_get(toshiba, &state);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%i\n", state);
+}
+
+static ssize_t toshiba_position_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 xyval, zval, tmp;
+ u16 x, y, z;
+ int ret;
+
+ xyval = zval = 0;
+ ret = toshiba_accelerometer_get(toshiba, &xyval, &zval);
+ if (ret < 0)
+ return ret;
+
+ x = xyval & HCI_ACCEL_MASK;
+ tmp = xyval >> HCI_MISC_SHIFT;
+ y = tmp & HCI_ACCEL_MASK;
+ z = zval & HCI_ACCEL_MASK;
+
+ return sprintf(buf, "%d %d %d\n", x, y, z);
+}
+
+static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR,
+ toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store);
+static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR,
+ toshiba_kbd_bl_timeout_show, toshiba_kbd_bl_timeout_store);
+static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR,
+ toshiba_touchpad_show, toshiba_touchpad_store);
+static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL);
+
+static struct attribute *toshiba_attributes[] = {
+ &dev_attr_kbd_backlight_mode.attr,
+ &dev_attr_kbd_backlight_timeout.attr,
+ &dev_attr_touchpad.attr,
+ &dev_attr_position.attr,
+ NULL,
+};
+
+static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct toshiba_acpi_dev *drv = dev_get_drvdata(dev);
+ bool exists = true;
+
+ if (attr == &dev_attr_kbd_backlight_mode.attr)
+ exists = (drv->kbd_illum_supported) ? true : false;
+ else if (attr == &dev_attr_kbd_backlight_timeout.attr)
+ exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false;
+ else if (attr == &dev_attr_touchpad.attr)
+ exists = (drv->touchpad_supported) ? true : false;
+ else if (attr == &dev_attr_position.attr)
+ exists = (drv->accelerometer_supported) ? true : false;
+
+ return exists ? attr->mode : 0;
+}
+
+static struct attribute_group toshiba_attr_group = {
+ .is_visible = toshiba_sysfs_is_visible,
+ .attrs = toshiba_attributes,
+};
+
static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
@@ -1106,6 +1574,10 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
remove_toshiba_proc_entries(dev);
+ if (dev->sysfs_created)
+ sysfs_remove_group(&dev->acpi_dev->dev.kobj,
+ &toshiba_attr_group);
+
if (dev->ntfy_supported) {
i8042_remove_filter(toshiba_acpi_i8042_filter);
cancel_work_sync(&dev->hotkey_work);
@@ -1127,6 +1599,12 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
if (dev->illumination_supported)
led_classdev_unregister(&dev->led_dev);
+ if (dev->kbd_led_registered)
+ led_classdev_unregister(&dev->kbd_led);
+
+ if (dev->eco_supported)
+ led_classdev_unregister(&dev->eco_led);
+
if (toshiba_acpi)
toshiba_acpi = NULL;
@@ -1172,6 +1650,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
dev->acpi_dev = acpi_dev;
dev->method_hci = hci_method;
acpi_dev->driver_data = dev;
+ dev_set_drvdata(&acpi_dev->dev, dev);
if (toshiba_acpi_setup_keyboard(dev))
pr_info("Unable to activate hotkeys\n");
@@ -1212,6 +1691,40 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
dev->illumination_supported = 1;
}
+ if (toshiba_eco_mode_available(dev)) {
+ dev->eco_led.name = "toshiba::eco_mode";
+ dev->eco_led.max_brightness = 1;
+ dev->eco_led.brightness_set = toshiba_eco_mode_set_status;
+ dev->eco_led.brightness_get = toshiba_eco_mode_get_status;
+ if (!led_classdev_register(&dev->acpi_dev->dev, &dev->eco_led))
+ dev->eco_supported = 1;
+ }
+
+ ret = toshiba_kbd_illum_status_get(dev, &dummy);
+ if (!ret) {
+ dev->kbd_time = dummy >> HCI_MISC_SHIFT;
+ dev->kbd_mode = dummy & 0x07;
+ }
+ dev->kbd_illum_supported = !ret;
+ /*
+ * Only register the LED if KBD illumination is supported
+ * and the keyboard backlight operation mode is set to FN-Z
+ */
+ if (dev->kbd_illum_supported && dev->kbd_mode == SCI_KBD_MODE_FNZ) {
+ dev->kbd_led.name = "toshiba::kbd_backlight";
+ dev->kbd_led.max_brightness = 1;
+ dev->kbd_led.brightness_set = toshiba_kbd_backlight_set;
+ dev->kbd_led.brightness_get = toshiba_kbd_backlight_get;
+ if (!led_classdev_register(&dev->acpi_dev->dev, &dev->kbd_led))
+ dev->kbd_led_registered = 1;
+ }
+
+ ret = toshiba_touchpad_get(dev, &dummy);
+ dev->touchpad_supported = !ret;
+
+ ret = toshiba_accelerometer_supported(dev);
+ dev->accelerometer_supported = !ret;
+
/* Determine whether or not BIOS supports fan and video interfaces */
ret = get_video_status(dev, &dummy);
@@ -1220,6 +1733,14 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
ret = get_fan_status(dev, &dummy);
dev->fan_supported = !ret;
+ ret = sysfs_create_group(&dev->acpi_dev->dev.kobj,
+ &toshiba_attr_group);
+ if (ret) {
+ dev->sysfs_created = 0;
+ goto error;
+ }
+ dev->sysfs_created = !ret;
+
create_toshiba_proc_entries(dev);
toshiba_acpi = dev;
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index 769d265b221b..deb7f4bcdb7b 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -21,7 +21,7 @@
#include "pnpbios.h"
-static struct {
+__visible struct {
u16 offset;
u16 segment;
} pnp_bios_callpoint;
@@ -41,6 +41,7 @@ asmlinkage void pnp_bios_callfunc(void);
__asm__(".text \n"
__ALIGN_STR "\n"
+ ".globl pnp_bios_callfunc\n"
"pnp_bios_callfunc:\n"
" pushl %edx \n"
" pushl %ecx \n"
@@ -66,9 +67,9 @@ static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
* after PnP BIOS oopses.
*/
-u32 pnp_bios_fault_esp;
-u32 pnp_bios_fault_eip;
-u32 pnp_bios_is_utter_crap = 0;
+__visible u32 pnp_bios_fault_esp;
+__visible u32 pnp_bios_fault_eip;
+__visible u32 pnp_bios_is_utter_crap = 0;
static spinlock_t pnp_bios_lock;
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index bacddd102ae9..01712cbfd92e 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -385,7 +385,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
* device is active because it itself may be in use */
if (!dev->active) {
if (request_irq(*irq, pnp_test_handler,
- IRQF_DISABLED | IRQF_PROBE_SHARED, "pnp", NULL))
+ IRQF_PROBE_SHARED, "pnp", NULL))
return 0;
free_irq(*irq, NULL);
}
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 6d452a78b19c..fa0e4e057b99 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -22,7 +22,7 @@ config POWER_RESET_GPIO
config POWER_RESET_MSM
bool "Qualcomm MSM power-off driver"
- depends on POWER_RESET && ARCH_MSM
+ depends on POWER_RESET && ARCH_QCOM
help
Power off and restart support for Qualcomm boards.
diff --git a/drivers/power/reset/qnap-poweroff.c b/drivers/power/reset/qnap-poweroff.c
index 37f56f7ee926..a75db7f8a92f 100644
--- a/drivers/power/reset/qnap-poweroff.c
+++ b/drivers/power/reset/qnap-poweroff.c
@@ -1,5 +1,5 @@
/*
- * QNAP Turbo NAS Board power off
+ * QNAP Turbo NAS Board power off. Can also be used on Synology devices.
*
* Copyright (C) 2012 Andrew Lunn <andrew@lunn.ch>
*
@@ -25,17 +25,43 @@
#define UART1_REG(x) (base + ((UART_##x) << 2))
+struct power_off_cfg {
+ u32 baud;
+ char cmd;
+};
+
+static const struct power_off_cfg qnap_power_off_cfg = {
+ .baud = 19200,
+ .cmd = 'A',
+};
+
+static const struct power_off_cfg synology_power_off_cfg = {
+ .baud = 9600,
+ .cmd = '1',
+};
+
+static const struct of_device_id qnap_power_off_of_match_table[] = {
+ { .compatible = "qnap,power-off",
+ .data = &qnap_power_off_cfg,
+ },
+ { .compatible = "synology,power-off",
+ .data = &synology_power_off_cfg,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qnap_power_off_of_match_table);
+
static void __iomem *base;
static unsigned long tclk;
+static const struct power_off_cfg *cfg;
static void qnap_power_off(void)
{
- /* 19200 baud divisor */
- const unsigned divisor = ((tclk + (8 * 19200)) / (16 * 19200));
+ const unsigned divisor = ((tclk + (8 * cfg->baud)) / (16 * cfg->baud));
pr_err("%s: triggering power-off...\n", __func__);
- /* hijack UART1 and reset into sane state (19200,8n1) */
+ /* hijack UART1 and reset into sane state */
writel(0x83, UART1_REG(LCR));
writel(divisor & 0xff, UART1_REG(DLL));
writel((divisor >> 8) & 0xff, UART1_REG(DLM));
@@ -44,16 +70,21 @@ static void qnap_power_off(void)
writel(0x00, UART1_REG(FCR));
writel(0x00, UART1_REG(MCR));
- /* send the power-off command 'A' to PIC */
- writel('A', UART1_REG(TX));
+ /* send the power-off command to PIC */
+ writel(cfg->cmd, UART1_REG(TX));
}
static int qnap_power_off_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct resource *res;
struct clk *clk;
char symname[KSYM_NAME_LEN];
+ const struct of_device_id *match =
+ of_match_node(qnap_power_off_of_match_table, np);
+ cfg = match->data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Missing resource");
@@ -94,12 +125,6 @@ static int qnap_power_off_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id qnap_power_off_of_match_table[] = {
- { .compatible = "qnap,power-off", },
- {}
-};
-MODULE_DEVICE_TABLE(of, qnap_power_off_of_match_table);
-
static struct platform_driver qnap_power_off_driver = {
.probe = qnap_power_off_probe,
.remove = qnap_power_off_remove,
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 3c6768378a94..d9a0770b6c73 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -834,7 +834,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
}
static const struct x86_cpu_id energy_unit_quirk_ids[] = {
- { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
+ { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */
{}
};
@@ -947,11 +947,11 @@ static void package_power_limit_irq_restore(int package_id)
}
static const struct x86_cpu_id rapl_ids[] = {
- { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
- { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
- { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
- { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
- { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
+ { X86_VENDOR_INTEL, 6, 0x2a},/* Sandy Bridge */
+ { X86_VENDOR_INTEL, 6, 0x2d},/* Sandy Bridge EP */
+ { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */
+ { X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */
+ { X86_VENDOR_INTEL, 6, 0x45},/* Haswell */
/* TODO: Add more CPU IDs after testing */
{}
};
@@ -1147,6 +1147,11 @@ static int rapl_check_domain(int cpu, int domain)
if (rdmsrl_safe_on_cpu(cpu, msr, &val1))
return -ENODEV;
+ /* PP1/uncore/graphics domain may not be active at the time of
+ * driver loading. So skip further checks.
+ */
+ if (domain == RAPL_DOMAIN_PP1)
+ return 0;
/* energy counters roll slowly on some domains */
while (++retry < 10) {
usleep_range(10000, 15000);
@@ -1369,6 +1374,9 @@ static int __init rapl_init(void)
return -ENODEV;
}
+
+ cpu_notifier_register_begin();
+
/* prevent CPU hotplug during detection */
get_online_cpus();
ret = rapl_detect_topology();
@@ -1380,20 +1388,23 @@ static int __init rapl_init(void)
ret = -ENODEV;
goto done;
}
- register_hotcpu_notifier(&rapl_cpu_notifier);
+ __register_hotcpu_notifier(&rapl_cpu_notifier);
done:
put_online_cpus();
+ cpu_notifier_register_done();
return ret;
}
static void __exit rapl_exit(void)
{
+ cpu_notifier_register_begin();
get_online_cpus();
- unregister_hotcpu_notifier(&rapl_cpu_notifier);
+ __unregister_hotcpu_notifier(&rapl_cpu_notifier);
rapl_unregister_powercap();
rapl_cleanup_data();
put_online_cpus();
+ cpu_notifier_register_done();
}
module_init(rapl_init);
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index fb7300837fee..bc1e5139ba29 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -699,8 +699,6 @@ int ps3_vuart_read_async(struct ps3_system_bus_device *dev, unsigned int bytes)
BUG_ON(!bytes);
- PREPARE_WORK(&priv->rx_list.work.work, ps3_vuart_work);
-
spin_lock_irqsave(&priv->rx_list.lock, flags);
if (priv->rx_list.bytes_held >= bytes) {
dev_dbg(&dev->core, "%s:%d: schedule_work %xh bytes\n",
@@ -1052,7 +1050,7 @@ static int ps3_vuart_probe(struct ps3_system_bus_device *dev)
INIT_LIST_HEAD(&priv->rx_list.head);
spin_lock_init(&priv->rx_list.lock);
- INIT_WORK(&priv->rx_list.work.work, NULL);
+ INIT_WORK(&priv->rx_list.work.work, ps3_vuart_work);
priv->rx_list.work.trigger = 0;
priv->rx_list.work.dev = dev;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 5a7910e61e17..6963bdf54175 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -7,6 +7,7 @@ menu "PTP clock support"
config PTP_1588_CLOCK
tristate "PTP clock support"
select PPS
+ select NET_PTP_CLASSIFY
help
The IEEE 1588 standard defines a method to precisely
synchronize distributed clocks over Ethernet networks. The
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 34a0c607318e..419056d7887e 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -25,6 +25,96 @@
#include "ptp_private.h"
+static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct ptp_clock_request rq;
+ int err = 0;
+
+ memset(&rq, 0, sizeof(rq));
+
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ rq.type = PTP_CLK_REQ_EXTTS;
+ rq.extts.index = chan;
+ err = ops->enable(ops, &rq, 0);
+ break;
+ case PTP_PF_PEROUT:
+ rq.type = PTP_CLK_REQ_PEROUT;
+ rq.perout.index = chan;
+ err = ops->enable(ops, &rq, 0);
+ break;
+ case PTP_PF_PHYSYNC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct ptp_clock_info *info = ptp->info;
+ struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin];
+ unsigned int i;
+
+ /* Check to see if any other pin previously had this function. */
+ for (i = 0; i < info->n_pins; i++) {
+ if (info->pin_config[i].func == func &&
+ info->pin_config[i].chan == chan) {
+ pin1 = &info->pin_config[i];
+ break;
+ }
+ }
+ if (pin1 && i == pin)
+ return 0;
+
+ /* Check the desired function and channel. */
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ if (chan >= info->n_ext_ts)
+ return -EINVAL;
+ break;
+ case PTP_PF_PEROUT:
+ if (chan >= info->n_per_out)
+ return -EINVAL;
+ break;
+ case PTP_PF_PHYSYNC:
+ pr_err("sorry, cannot reassign the calibration pin\n");
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+
+ if (pin2->func == PTP_PF_PHYSYNC) {
+ pr_err("sorry, cannot reprogram the calibration pin\n");
+ return -EINVAL;
+ }
+
+ if (info->verify(info, pin, func, chan)) {
+ pr_err("driver cannot use function %u on pin %u\n", func, chan);
+ return -EOPNOTSUPP;
+ }
+
+ /* Disable whatever function was previously assigned. */
+ if (pin1) {
+ ptp_disable_pinfunc(info, func, chan);
+ pin1->func = PTP_PF_NONE;
+ pin1->chan = 0;
+ }
+ ptp_disable_pinfunc(info, pin2->func, pin2->chan);
+ pin2->func = func;
+ pin2->chan = chan;
+
+ return 0;
+}
+
int ptp_open(struct posix_clock *pc, fmode_t fmode)
{
return 0;
@@ -35,12 +125,13 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
struct ptp_clock_caps caps;
struct ptp_clock_request req;
struct ptp_sys_offset *sysoff = NULL;
+ struct ptp_pin_desc pd;
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct ptp_clock_info *ops = ptp->info;
struct ptp_clock_time *pct;
struct timespec ts;
int enable, err = 0;
- unsigned int i;
+ unsigned int i, pin_index;
switch (cmd) {
@@ -51,6 +142,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
caps.n_ext_ts = ptp->info->n_ext_ts;
caps.n_per_out = ptp->info->n_per_out;
caps.pps = ptp->info->pps;
+ caps.n_pins = ptp->info->n_pins;
if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
err = -EFAULT;
break;
@@ -126,6 +218,40 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EFAULT;
break;
+ case PTP_PIN_GETFUNC:
+ if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
+ err = -EFAULT;
+ break;
+ }
+ pin_index = pd.index;
+ if (pin_index >= ops->n_pins) {
+ err = -EINVAL;
+ break;
+ }
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+ pd = ops->pin_config[pin_index];
+ mutex_unlock(&ptp->pincfg_mux);
+ if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd)))
+ err = -EFAULT;
+ break;
+
+ case PTP_PIN_SETFUNC:
+ if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
+ err = -EFAULT;
+ break;
+ }
+ pin_index = pd.index;
+ if (pin_index >= ops->n_pins) {
+ err = -EINVAL;
+ break;
+ }
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+ err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
+ mutex_unlock(&ptp->pincfg_mux);
+ break;
+
default:
err = -ENOTTY;
break;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index a8319b266643..e25d2bc898e5 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -169,6 +169,7 @@ static void delete_ptp_clock(struct posix_clock *pc)
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
mutex_destroy(&ptp->tsevq_mux);
+ mutex_destroy(&ptp->pincfg_mux);
ida_simple_remove(&ptp_clocks_map, ptp->index);
kfree(ptp);
}
@@ -203,6 +204,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
ptp->index = index;
spin_lock_init(&ptp->tsevq.lock);
mutex_init(&ptp->tsevq_mux);
+ mutex_init(&ptp->pincfg_mux);
init_waitqueue_head(&ptp->tsev_wq);
/* Create a new device in our class. */
@@ -249,6 +251,7 @@ no_sysfs:
device_destroy(ptp_class, ptp->devid);
no_device:
mutex_destroy(&ptp->tsevq_mux);
+ mutex_destroy(&ptp->pincfg_mux);
no_slot:
kfree(ptp);
no_memory:
@@ -305,6 +308,26 @@ int ptp_clock_index(struct ptp_clock *ptp)
}
EXPORT_SYMBOL(ptp_clock_index);
+int ptp_find_pin(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct ptp_pin_desc *pin = NULL;
+ int i;
+
+ mutex_lock(&ptp->pincfg_mux);
+ for (i = 0; i < ptp->info->n_pins; i++) {
+ if (ptp->info->pin_config[i].func == func &&
+ ptp->info->pin_config[i].chan == chan) {
+ pin = &ptp->info->pin_config[i];
+ break;
+ }
+ }
+ mutex_unlock(&ptp->pincfg_mux);
+
+ return pin ? i : -1;
+}
+EXPORT_SYMBOL(ptp_find_pin);
+
/* module operations */
static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index 4a08727fcaf3..604d340f2095 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -244,6 +244,7 @@ static struct ptp_clock_info ptp_ixp_caps = {
.name = "IXP46X timer",
.max_adj = 66666655,
.n_ext_ts = N_EXT_TS,
+ .n_pins = 0,
.pps = 0,
.adjfreq = ptp_ixp_adjfreq,
.adjtime = ptp_ixp_adjtime,
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 71a2559278d7..90a106308c4f 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -514,6 +514,7 @@ static struct ptp_clock_info ptp_pch_caps = {
.name = "PCH timer",
.max_adj = 50000000,
.n_ext_ts = N_EXT_TS,
+ .n_pins = 0,
.pps = 0,
.adjfreq = ptp_pch_adjfreq,
.adjtime = ptp_pch_adjtime,
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index df03f2e30ad9..9c5d41421b65 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -48,8 +48,12 @@ struct ptp_clock {
long dialed_frequency; /* remembers the frequency adjustment */
struct timestamp_event_queue tsevq; /* simple fifo for time stamps */
struct mutex tsevq_mux; /* one process at a time reading the fifo */
+ struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
wait_queue_head_t tsev_wq;
int defunct; /* tells readers to go away when clock is being removed */
+ struct device_attribute *pin_dev_attr;
+ struct attribute **pin_attr;
+ struct attribute_group pin_attr_group;
};
/*
@@ -69,6 +73,10 @@ static inline int queue_cnt(struct timestamp_event_queue *q)
* see ptp_chardev.c
*/
+/* caller must hold pincfg_mux */
+int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan);
+
long ptp_ioctl(struct posix_clock *pc,
unsigned int cmd, unsigned long arg);
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 13ec5311746a..302e626fe6b0 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/capability.h>
+#include <linux/slab.h>
#include "ptp_private.h"
@@ -42,6 +43,7 @@ PTP_SHOW_INT(max_adjustment, max_adj);
PTP_SHOW_INT(n_alarms, n_alarm);
PTP_SHOW_INT(n_external_timestamps, n_ext_ts);
PTP_SHOW_INT(n_periodic_outputs, n_per_out);
+PTP_SHOW_INT(n_programmable_pins, n_pins);
PTP_SHOW_INT(pps_available, pps);
static struct attribute *ptp_attrs[] = {
@@ -50,6 +52,7 @@ static struct attribute *ptp_attrs[] = {
&dev_attr_n_alarms.attr,
&dev_attr_n_external_timestamps.attr,
&dev_attr_n_periodic_outputs.attr,
+ &dev_attr_n_programmable_pins.attr,
&dev_attr_pps_available.attr,
NULL,
};
@@ -175,6 +178,63 @@ out:
return err;
}
+static int ptp_pin_name2index(struct ptp_clock *ptp, const char *name)
+{
+ int i;
+ for (i = 0; i < ptp->info->n_pins; i++) {
+ if (!strcmp(ptp->info->pin_config[i].name, name))
+ return i;
+ }
+ return -1;
+}
+
+static ssize_t ptp_pin_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ unsigned int func, chan;
+ int index;
+
+ index = ptp_pin_name2index(ptp, attr->attr.name);
+ if (index < 0)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+
+ func = ptp->info->pin_config[index].func;
+ chan = ptp->info->pin_config[index].chan;
+
+ mutex_unlock(&ptp->pincfg_mux);
+
+ return snprintf(page, PAGE_SIZE, "%u %u\n", func, chan);
+}
+
+static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ unsigned int func, chan;
+ int cnt, err, index;
+
+ cnt = sscanf(buf, "%u %u", &func, &chan);
+ if (cnt != 2)
+ return -EINVAL;
+
+ index = ptp_pin_name2index(ptp, attr->attr.name);
+ if (index < 0)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+ err = ptp_set_pinfunc(ptp, index, func, chan);
+ mutex_unlock(&ptp->pincfg_mux);
+ if (err)
+ return err;
+
+ return count;
+}
+
static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
static DEVICE_ATTR(period, 0220, NULL, period_store);
@@ -195,9 +255,56 @@ int ptp_cleanup_sysfs(struct ptp_clock *ptp)
if (info->pps)
device_remove_file(dev, &dev_attr_pps_enable);
+ if (info->n_pins) {
+ sysfs_remove_group(&dev->kobj, &ptp->pin_attr_group);
+ kfree(ptp->pin_attr);
+ kfree(ptp->pin_dev_attr);
+ }
return 0;
}
+static int ptp_populate_pins(struct ptp_clock *ptp)
+{
+ struct device *dev = ptp->dev;
+ struct ptp_clock_info *info = ptp->info;
+ int err = -ENOMEM, i, n_pins = info->n_pins;
+
+ ptp->pin_dev_attr = kzalloc(n_pins * sizeof(*ptp->pin_dev_attr),
+ GFP_KERNEL);
+ if (!ptp->pin_dev_attr)
+ goto no_dev_attr;
+
+ ptp->pin_attr = kzalloc((1 + n_pins) * sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!ptp->pin_attr)
+ goto no_pin_attr;
+
+ for (i = 0; i < n_pins; i++) {
+ struct device_attribute *da = &ptp->pin_dev_attr[i];
+ sysfs_attr_init(&da->attr);
+ da->attr.name = info->pin_config[i].name;
+ da->attr.mode = 0644;
+ da->show = ptp_pin_show;
+ da->store = ptp_pin_store;
+ ptp->pin_attr[i] = &da->attr;
+ }
+
+ ptp->pin_attr_group.name = "pins";
+ ptp->pin_attr_group.attrs = ptp->pin_attr;
+
+ err = sysfs_create_group(&dev->kobj, &ptp->pin_attr_group);
+ if (err)
+ goto no_group;
+ return 0;
+
+no_group:
+ kfree(ptp->pin_attr);
+no_pin_attr:
+ kfree(ptp->pin_dev_attr);
+no_dev_attr:
+ return err;
+}
+
int ptp_populate_sysfs(struct ptp_clock *ptp)
{
struct device *dev = ptp->dev;
@@ -222,7 +329,15 @@ int ptp_populate_sysfs(struct ptp_clock *ptp)
if (err)
goto out4;
}
+ if (info->n_pins) {
+ err = ptp_populate_pins(ptp);
+ if (err)
+ goto out5;
+ }
return 0;
+out5:
+ if (info->pps)
+ device_remove_file(dev, &dev_attr_pps_enable);
out4:
if (info->n_per_out)
device_remove_file(dev, &dev_attr_period);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 22f2f2857b82..5b34ff29ea38 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -71,6 +71,15 @@ config PWM_BFIN
To compile this driver as a module, choose M here: the module
will be called pwm-bfin.
+config PWM_CLPS711X
+ tristate "CLPS711X PWM support"
+ depends on ARCH_CLPS711X || COMPILE_TEST
+ help
+ Generic PWM framework driver for Cirrus Logic CLPS711X.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-clps711x.
+
config PWM_EP93XX
tristate "Cirrus Logic EP93xx PWM support"
depends on ARCH_EP93XX
@@ -80,6 +89,16 @@ config PWM_EP93XX
To compile this driver as a module, choose M here: the module
will be called pwm-ep93xx.
+config PWM_FSL_FTM
+ tristate "Freescale FlexTimer Module (FTM) PWM support"
+ depends on OF
+ help
+ Generic FTM PWM framework driver for Freescale VF610 and
+ Layerscape LS-1 SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-fsl-ftm.
+
config PWM_IMX
tristate "i.MX PWM support"
depends on ARCH_MXC
@@ -119,6 +138,16 @@ config PWM_LPC32XX
To compile this driver as a module, choose M here: the module
will be called pwm-lpc32xx.
+config PWM_LPSS
+ tristate "Intel LPSS PWM support"
+ depends on ACPI
+ help
+ Generic PWM framework driver for Intel Low Power Subsystem PWM
+ controller.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-lpss.
+
config PWM_MXS
tristate "Freescale MXS PWM support"
depends on ARCH_MXS && OF
@@ -160,6 +189,7 @@ config PWM_PXA
config PWM_RENESAS_TPU
tristate "Renesas TPU PWM support"
depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on HAS_IOMEM
help
This driver exposes the Timer Pulse Unit (TPU) PWM controller found
in Renesas chips through the PWM API.
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index d8906ec69976..e57d2c38a794 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -4,11 +4,14 @@ obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
+obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
+obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
obj-$(CONFIG_PWM_IMX) += pwm-imx.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
+obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o
obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o
obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index bf4144a14661..0adc952cc4ef 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -32,6 +32,7 @@
/* Bit field in CMR */
#define PWM_CMR_CPOL (1 << 9)
#define PWM_CMR_UPD_CDTY (1 << 10)
+#define PWM_CMR_CPRE_MSK 0xF
/* The following registers for PWM v1 */
#define PWMV1_CDTY 0x04
@@ -104,6 +105,7 @@ static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long clk_rate, prd, dty;
unsigned long long div;
unsigned int pres = 0;
+ u32 val;
int ret;
if (test_bit(PWMF_ENABLED, &pwm->flags) && (period_ns != pwm->period)) {
@@ -131,7 +133,7 @@ static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
prd = div;
div *= duty_ns;
do_div(div, period_ns);
- dty = div;
+ dty = prd - div;
ret = clk_enable(atmel_pwm->clk);
if (ret) {
@@ -139,7 +141,10 @@ static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
- atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, pres);
+ /* It is necessary to preserve CPOL, inside CMR */
+ val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
+ val = (val & ~PWM_CMR_CPRE_MSK) | (pres & PWM_CMR_CPRE_MSK);
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
atmel_pwm->config(chip, pwm, dty, prd);
clk_disable(atmel_pwm->clk);
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
new file mode 100644
index 000000000000..fafb6a0111b0
--- /dev/null
+++ b/drivers/pwm/pwm-clps711x.c
@@ -0,0 +1,176 @@
+/*
+ * Cirrus Logic CLPS711X PWM driver
+ *
+ * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+struct clps711x_chip {
+ struct pwm_chip chip;
+ void __iomem *pmpcon;
+ struct clk *clk;
+ spinlock_t lock;
+};
+
+static inline struct clps711x_chip *to_clps711x_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct clps711x_chip, chip);
+}
+
+static void clps711x_pwm_update_val(struct clps711x_chip *priv, u32 n, u32 v)
+{
+ /* PWM0 - bits 4..7, PWM1 - bits 8..11 */
+ u32 shift = (n + 1) * 4;
+ unsigned long flags;
+ u32 tmp;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ tmp = readl(priv->pmpcon);
+ tmp &= ~(0xf << shift);
+ tmp |= v << shift;
+ writel(tmp, priv->pmpcon);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static unsigned int clps711x_get_duty(struct pwm_device *pwm, unsigned int v)
+{
+ /* Duty cycle 0..15 max */
+ return DIV_ROUND_CLOSEST(v * 0xf, pwm_get_period(pwm));
+}
+
+static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct clps711x_chip *priv = to_clps711x_chip(chip);
+ unsigned int freq = clk_get_rate(priv->clk);
+
+ if (!freq)
+ return -EINVAL;
+
+ /* Store constant period value */
+ pwm_set_period(pwm, DIV_ROUND_CLOSEST(NSEC_PER_SEC, freq));
+
+ return 0;
+}
+
+static int clps711x_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct clps711x_chip *priv = to_clps711x_chip(chip);
+ unsigned int duty;
+
+ if (period_ns != pwm_get_period(pwm))
+ return -EINVAL;
+
+ duty = clps711x_get_duty(pwm, duty_ns);
+ clps711x_pwm_update_val(priv, pwm->hwpwm, duty);
+
+ return 0;
+}
+
+static int clps711x_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct clps711x_chip *priv = to_clps711x_chip(chip);
+ unsigned int duty;
+
+ duty = clps711x_get_duty(pwm, pwm_get_duty_cycle(pwm));
+ clps711x_pwm_update_val(priv, pwm->hwpwm, duty);
+
+ return 0;
+}
+
+static void clps711x_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct clps711x_chip *priv = to_clps711x_chip(chip);
+
+ clps711x_pwm_update_val(priv, pwm->hwpwm, 0);
+}
+
+static const struct pwm_ops clps711x_pwm_ops = {
+ .request = clps711x_pwm_request,
+ .config = clps711x_pwm_config,
+ .enable = clps711x_pwm_enable,
+ .disable = clps711x_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static struct pwm_device *clps711x_pwm_xlate(struct pwm_chip *chip,
+ const struct of_phandle_args *args)
+{
+ if (args->args[0] >= chip->npwm)
+ return ERR_PTR(-EINVAL);
+
+ return pwm_request_from_chip(chip, args->args[0], NULL);
+}
+
+static int clps711x_pwm_probe(struct platform_device *pdev)
+{
+ struct clps711x_chip *priv;
+ struct resource *res;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->pmpcon = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->pmpcon))
+ return PTR_ERR(priv->pmpcon);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->chip.ops = &clps711x_pwm_ops;
+ priv->chip.dev = &pdev->dev;
+ priv->chip.base = -1;
+ priv->chip.npwm = 2;
+ priv->chip.of_xlate = clps711x_pwm_xlate;
+ priv->chip.of_pwm_n_cells = 1;
+
+ spin_lock_init(&priv->lock);
+
+ platform_set_drvdata(pdev, priv);
+
+ return pwmchip_add(&priv->chip);
+}
+
+static int clps711x_pwm_remove(struct platform_device *pdev)
+{
+ struct clps711x_chip *priv = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&priv->chip);
+}
+
+static const struct of_device_id __maybe_unused clps711x_pwm_dt_ids[] = {
+ { .compatible = "cirrus,clps711x-pwm", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clps711x_pwm_dt_ids);
+
+static struct platform_driver clps711x_pwm_driver = {
+ .driver = {
+ .name = "clps711x-pwm",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(clps711x_pwm_dt_ids),
+ },
+ .probe = clps711x_pwm_probe,
+ .remove = clps711x_pwm_remove,
+};
+module_platform_driver(clps711x_pwm_driver);
+
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("Cirrus Logic CLPS711X PWM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
new file mode 100644
index 000000000000..420169e96b5f
--- /dev/null
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -0,0 +1,495 @@
+/*
+ * Freescale FlexTimer Module (FTM) PWM Driver
+ *
+ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#define FTM_SC 0x00
+#define FTM_SC_CLK_MASK 0x3
+#define FTM_SC_CLK_SHIFT 3
+#define FTM_SC_CLK(c) (((c) + 1) << FTM_SC_CLK_SHIFT)
+#define FTM_SC_PS_MASK 0x7
+#define FTM_SC_PS_SHIFT 0
+
+#define FTM_CNT 0x04
+#define FTM_MOD 0x08
+
+#define FTM_CSC_BASE 0x0C
+#define FTM_CSC_MSB BIT(5)
+#define FTM_CSC_MSA BIT(4)
+#define FTM_CSC_ELSB BIT(3)
+#define FTM_CSC_ELSA BIT(2)
+#define FTM_CSC(_channel) (FTM_CSC_BASE + ((_channel) * 8))
+
+#define FTM_CV_BASE 0x10
+#define FTM_CV(_channel) (FTM_CV_BASE + ((_channel) * 8))
+
+#define FTM_CNTIN 0x4C
+#define FTM_STATUS 0x50
+
+#define FTM_MODE 0x54
+#define FTM_MODE_FTMEN BIT(0)
+#define FTM_MODE_INIT BIT(2)
+#define FTM_MODE_PWMSYNC BIT(3)
+
+#define FTM_SYNC 0x58
+#define FTM_OUTINIT 0x5C
+#define FTM_OUTMASK 0x60
+#define FTM_COMBINE 0x64
+#define FTM_DEADTIME 0x68
+#define FTM_EXTTRIG 0x6C
+#define FTM_POL 0x70
+#define FTM_FMS 0x74
+#define FTM_FILTER 0x78
+#define FTM_FLTCTRL 0x7C
+#define FTM_QDCTRL 0x80
+#define FTM_CONF 0x84
+#define FTM_FLTPOL 0x88
+#define FTM_SYNCONF 0x8C
+#define FTM_INVCTRL 0x90
+#define FTM_SWOCTRL 0x94
+#define FTM_PWMLOAD 0x98
+
+enum fsl_pwm_clk {
+ FSL_PWM_CLK_SYS,
+ FSL_PWM_CLK_FIX,
+ FSL_PWM_CLK_EXT,
+ FSL_PWM_CLK_CNTEN,
+ FSL_PWM_CLK_MAX
+};
+
+struct fsl_pwm_chip {
+ struct pwm_chip chip;
+
+ struct mutex lock;
+
+ unsigned int use_count;
+ unsigned int cnt_select;
+ unsigned int clk_ps;
+
+ void __iomem *base;
+
+ int period_ns;
+
+ struct clk *clk[FSL_PWM_CLK_MAX];
+};
+
+static inline struct fsl_pwm_chip *to_fsl_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct fsl_pwm_chip, chip);
+}
+
+static int fsl_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
+
+ return clk_prepare_enable(fpc->clk[FSL_PWM_CLK_SYS]);
+}
+
+static void fsl_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
+
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
+}
+
+static int fsl_pwm_calculate_default_ps(struct fsl_pwm_chip *fpc,
+ enum fsl_pwm_clk index)
+{
+ unsigned long sys_rate, cnt_rate;
+ unsigned long long ratio;
+
+ sys_rate = clk_get_rate(fpc->clk[FSL_PWM_CLK_SYS]);
+ if (!sys_rate)
+ return -EINVAL;
+
+ cnt_rate = clk_get_rate(fpc->clk[fpc->cnt_select]);
+ if (!cnt_rate)
+ return -EINVAL;
+
+ switch (index) {
+ case FSL_PWM_CLK_SYS:
+ fpc->clk_ps = 1;
+ break;
+ case FSL_PWM_CLK_FIX:
+ ratio = 2 * cnt_rate - 1;
+ do_div(ratio, sys_rate);
+ fpc->clk_ps = ratio;
+ break;
+ case FSL_PWM_CLK_EXT:
+ ratio = 4 * cnt_rate - 1;
+ do_div(ratio, sys_rate);
+ fpc->clk_ps = ratio;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long fsl_pwm_calculate_cycles(struct fsl_pwm_chip *fpc,
+ unsigned long period_ns)
+{
+ unsigned long long c, c0;
+
+ c = clk_get_rate(fpc->clk[fpc->cnt_select]);
+ c = c * period_ns;
+ do_div(c, 1000000000UL);
+
+ do {
+ c0 = c;
+ do_div(c0, (1 << fpc->clk_ps));
+ if (c0 <= 0xFFFF)
+ return (unsigned long)c0;
+ } while (++fpc->clk_ps < 8);
+
+ return 0;
+}
+
+static unsigned long fsl_pwm_calculate_period_cycles(struct fsl_pwm_chip *fpc,
+ unsigned long period_ns,
+ enum fsl_pwm_clk index)
+{
+ int ret;
+
+ ret = fsl_pwm_calculate_default_ps(fpc, index);
+ if (ret) {
+ dev_err(fpc->chip.dev,
+ "failed to calculate default prescaler: %d\n",
+ ret);
+ return 0;
+ }
+
+ return fsl_pwm_calculate_cycles(fpc, period_ns);
+}
+
+static unsigned long fsl_pwm_calculate_period(struct fsl_pwm_chip *fpc,
+ unsigned long period_ns)
+{
+ enum fsl_pwm_clk m0, m1;
+ unsigned long fix_rate, ext_rate, cycles;
+
+ cycles = fsl_pwm_calculate_period_cycles(fpc, period_ns,
+ FSL_PWM_CLK_SYS);
+ if (cycles) {
+ fpc->cnt_select = FSL_PWM_CLK_SYS;
+ return cycles;
+ }
+
+ fix_rate = clk_get_rate(fpc->clk[FSL_PWM_CLK_FIX]);
+ ext_rate = clk_get_rate(fpc->clk[FSL_PWM_CLK_EXT]);
+
+ if (fix_rate > ext_rate) {
+ m0 = FSL_PWM_CLK_FIX;
+ m1 = FSL_PWM_CLK_EXT;
+ } else {
+ m0 = FSL_PWM_CLK_EXT;
+ m1 = FSL_PWM_CLK_FIX;
+ }
+
+ cycles = fsl_pwm_calculate_period_cycles(fpc, period_ns, m0);
+ if (cycles) {
+ fpc->cnt_select = m0;
+ return cycles;
+ }
+
+ fpc->cnt_select = m1;
+
+ return fsl_pwm_calculate_period_cycles(fpc, period_ns, m1);
+}
+
+static unsigned long fsl_pwm_calculate_duty(struct fsl_pwm_chip *fpc,
+ unsigned long period_ns,
+ unsigned long duty_ns)
+{
+ unsigned long long val, duty;
+
+ val = readl(fpc->base + FTM_MOD);
+ duty = duty_ns * (val + 1);
+ do_div(duty, period_ns);
+
+ return (unsigned long)duty;
+}
+
+static int fsl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
+ u32 val, period, duty;
+
+ mutex_lock(&fpc->lock);
+
+ /*
+ * The Freescale FTM controller supports only a single period for
+ * all PWM channels, therefore incompatible changes need to be
+ * refused.
+ */
+ if (fpc->period_ns && fpc->period_ns != period_ns) {
+ dev_err(fpc->chip.dev,
+ "conflicting period requested for PWM %u\n",
+ pwm->hwpwm);
+ mutex_unlock(&fpc->lock);
+ return -EBUSY;
+ }
+
+ if (!fpc->period_ns && duty_ns) {
+ period = fsl_pwm_calculate_period(fpc, period_ns);
+ if (!period) {
+ dev_err(fpc->chip.dev, "failed to calculate period\n");
+ mutex_unlock(&fpc->lock);
+ return -EINVAL;
+ }
+
+ val = readl(fpc->base + FTM_SC);
+ val &= ~(FTM_SC_PS_MASK << FTM_SC_PS_SHIFT);
+ val |= fpc->clk_ps;
+ writel(val, fpc->base + FTM_SC);
+ writel(period - 1, fpc->base + FTM_MOD);
+
+ fpc->period_ns = period_ns;
+ }
+
+ mutex_unlock(&fpc->lock);
+
+ duty = fsl_pwm_calculate_duty(fpc, period_ns, duty_ns);
+
+ writel(FTM_CSC_MSB | FTM_CSC_ELSB, fpc->base + FTM_CSC(pwm->hwpwm));
+ writel(duty, fpc->base + FTM_CV(pwm->hwpwm));
+
+ return 0;
+}
+
+static int fsl_pwm_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
+ u32 val;
+
+ val = readl(fpc->base + FTM_POL);
+
+ if (polarity == PWM_POLARITY_INVERSED)
+ val |= BIT(pwm->hwpwm);
+ else
+ val &= ~BIT(pwm->hwpwm);
+
+ writel(val, fpc->base + FTM_POL);
+
+ return 0;
+}
+
+static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
+{
+ u32 val;
+ int ret;
+
+ if (fpc->use_count != 0)
+ return 0;
+
+ /* select counter clock source */
+ val = readl(fpc->base + FTM_SC);
+ val &= ~(FTM_SC_CLK_MASK << FTM_SC_CLK_SHIFT);
+ val |= FTM_SC_CLK(fpc->cnt_select);
+ writel(val, fpc->base + FTM_SC);
+
+ ret = clk_prepare_enable(fpc->clk[fpc->cnt_select]);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ if (ret) {
+ clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
+ return ret;
+ }
+
+ fpc->use_count++;
+
+ return 0;
+}
+
+static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
+ u32 val;
+ int ret;
+
+ mutex_lock(&fpc->lock);
+ val = readl(fpc->base + FTM_OUTMASK);
+ val &= ~BIT(pwm->hwpwm);
+ writel(val, fpc->base + FTM_OUTMASK);
+
+ ret = fsl_counter_clock_enable(fpc);
+ mutex_unlock(&fpc->lock);
+
+ return ret;
+}
+
+static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc)
+{
+ u32 val;
+
+ /*
+ * already disabled, do nothing
+ */
+ if (fpc->use_count == 0)
+ return;
+
+ /* there are still users, so can't disable yet */
+ if (--fpc->use_count > 0)
+ return;
+
+ /* no users left, disable PWM counter clock */
+ val = readl(fpc->base + FTM_SC);
+ val &= ~(FTM_SC_CLK_MASK << FTM_SC_CLK_SHIFT);
+ writel(val, fpc->base + FTM_SC);
+
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
+}
+
+static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
+ u32 val;
+
+ mutex_lock(&fpc->lock);
+ val = readl(fpc->base + FTM_OUTMASK);
+ val |= BIT(pwm->hwpwm);
+ writel(val, fpc->base + FTM_OUTMASK);
+
+ fsl_counter_clock_disable(fpc);
+
+ val = readl(fpc->base + FTM_OUTMASK);
+
+ if ((val & 0xFF) == 0xFF)
+ fpc->period_ns = 0;
+
+ mutex_unlock(&fpc->lock);
+}
+
+static const struct pwm_ops fsl_pwm_ops = {
+ .request = fsl_pwm_request,
+ .free = fsl_pwm_free,
+ .config = fsl_pwm_config,
+ .set_polarity = fsl_pwm_set_polarity,
+ .enable = fsl_pwm_enable,
+ .disable = fsl_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int fsl_pwm_init(struct fsl_pwm_chip *fpc)
+{
+ int ret;
+
+ ret = clk_prepare_enable(fpc->clk[FSL_PWM_CLK_SYS]);
+ if (ret)
+ return ret;
+
+ writel(0x00, fpc->base + FTM_CNTIN);
+ writel(0x00, fpc->base + FTM_OUTINIT);
+ writel(0xFF, fpc->base + FTM_OUTMASK);
+
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
+
+ return 0;
+}
+
+static int fsl_pwm_probe(struct platform_device *pdev)
+{
+ struct fsl_pwm_chip *fpc;
+ struct resource *res;
+ int ret;
+
+ fpc = devm_kzalloc(&pdev->dev, sizeof(*fpc), GFP_KERNEL);
+ if (!fpc)
+ return -ENOMEM;
+
+ mutex_init(&fpc->lock);
+
+ fpc->chip.dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fpc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fpc->base))
+ return PTR_ERR(fpc->base);
+
+ fpc->clk[FSL_PWM_CLK_SYS] = devm_clk_get(&pdev->dev, "ftm_sys");
+ if (IS_ERR(fpc->clk[FSL_PWM_CLK_SYS])) {
+ dev_err(&pdev->dev, "failed to get \"ftm_sys\" clock\n");
+ return PTR_ERR(fpc->clk[FSL_PWM_CLK_SYS]);
+ }
+
+ fpc->clk[FSL_PWM_CLK_FIX] = devm_clk_get(fpc->chip.dev, "ftm_fix");
+ if (IS_ERR(fpc->clk[FSL_PWM_CLK_FIX]))
+ return PTR_ERR(fpc->clk[FSL_PWM_CLK_FIX]);
+
+ fpc->clk[FSL_PWM_CLK_EXT] = devm_clk_get(fpc->chip.dev, "ftm_ext");
+ if (IS_ERR(fpc->clk[FSL_PWM_CLK_EXT]))
+ return PTR_ERR(fpc->clk[FSL_PWM_CLK_EXT]);
+
+ fpc->clk[FSL_PWM_CLK_CNTEN] =
+ devm_clk_get(fpc->chip.dev, "ftm_cnt_clk_en");
+ if (IS_ERR(fpc->clk[FSL_PWM_CLK_CNTEN]))
+ return PTR_ERR(fpc->clk[FSL_PWM_CLK_CNTEN]);
+
+ fpc->chip.ops = &fsl_pwm_ops;
+ fpc->chip.of_xlate = of_pwm_xlate_with_flags;
+ fpc->chip.of_pwm_n_cells = 3;
+ fpc->chip.base = -1;
+ fpc->chip.npwm = 8;
+
+ ret = pwmchip_add(&fpc->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, fpc);
+
+ return fsl_pwm_init(fpc);
+}
+
+static int fsl_pwm_remove(struct platform_device *pdev)
+{
+ struct fsl_pwm_chip *fpc = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&fpc->chip);
+}
+
+static const struct of_device_id fsl_pwm_dt_ids[] = {
+ { .compatible = "fsl,vf610-ftm-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_pwm_dt_ids);
+
+static struct platform_driver fsl_pwm_driver = {
+ .driver = {
+ .name = "fsl-ftm-pwm",
+ .of_match_table = fsl_pwm_dt_ids,
+ },
+ .probe = fsl_pwm_probe,
+ .remove = fsl_pwm_remove,
+};
+module_platform_driver(fsl_pwm_driver);
+
+MODULE_DESCRIPTION("Freescale FlexTimer Module PWM Driver");
+MODULE_AUTHOR("Xiubo Li <Li.Xiubo@freescale.com>");
+MODULE_ALIAS("platform:fsl-ftm-pwm");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
new file mode 100644
index 000000000000..449e372050a0
--- /dev/null
+++ b/drivers/pwm/pwm-lpss.c
@@ -0,0 +1,183 @@
+/*
+ * Intel Low Power Subsystem PWM controller driver
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Author: Chew Kean Ho <kean.ho.chew@intel.com>
+ * Author: Chang Rebecca Swee Fun <rebecca.swee.fun.chang@intel.com>
+ * Author: Chew Chiau Ee <chiau.ee.chew@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pwm.h>
+#include <linux/platform_device.h>
+
+#define PWM 0x00000000
+#define PWM_ENABLE BIT(31)
+#define PWM_SW_UPDATE BIT(30)
+#define PWM_BASE_UNIT_SHIFT 8
+#define PWM_BASE_UNIT_MASK 0x00ffff00
+#define PWM_ON_TIME_DIV_MASK 0x000000ff
+#define PWM_DIVISION_CORRECTION 0x2
+#define PWM_LIMIT (0x8000 + PWM_DIVISION_CORRECTION)
+#define NSECS_PER_SEC 1000000000UL
+
+struct pwm_lpss_chip {
+ struct pwm_chip chip;
+ void __iomem *regs;
+ struct clk *clk;
+};
+
+static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct pwm_lpss_chip, chip);
+}
+
+static int pwm_lpss_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct pwm_lpss_chip *lpwm = to_lpwm(chip);
+ u8 on_time_div;
+ unsigned long c;
+ unsigned long long base_unit, freq = NSECS_PER_SEC;
+ u32 ctrl;
+
+ do_div(freq, period_ns);
+
+ /* The equation is: base_unit = ((freq / c) * 65536) + correction */
+ base_unit = freq * 65536;
+
+ c = clk_get_rate(lpwm->clk);
+ if (!c)
+ return -EINVAL;
+
+ do_div(base_unit, c);
+ base_unit += PWM_DIVISION_CORRECTION;
+ if (base_unit > PWM_LIMIT)
+ return -EINVAL;
+
+ if (duty_ns <= 0)
+ duty_ns = 1;
+ on_time_div = 255 - (255 * duty_ns / period_ns);
+
+ ctrl = readl(lpwm->regs + PWM);
+ ctrl &= ~(PWM_BASE_UNIT_MASK | PWM_ON_TIME_DIV_MASK);
+ ctrl |= (u16) base_unit << PWM_BASE_UNIT_SHIFT;
+ ctrl |= on_time_div;
+ /* request PWM to update on next cycle */
+ ctrl |= PWM_SW_UPDATE;
+ writel(ctrl, lpwm->regs + PWM);
+
+ return 0;
+}
+
+static int pwm_lpss_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pwm_lpss_chip *lpwm = to_lpwm(chip);
+ u32 ctrl;
+ int ret;
+
+ ret = clk_prepare_enable(lpwm->clk);
+ if (ret)
+ return ret;
+
+ ctrl = readl(lpwm->regs + PWM);
+ writel(ctrl | PWM_ENABLE, lpwm->regs + PWM);
+
+ return 0;
+}
+
+static void pwm_lpss_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pwm_lpss_chip *lpwm = to_lpwm(chip);
+ u32 ctrl;
+
+ ctrl = readl(lpwm->regs + PWM);
+ writel(ctrl & ~PWM_ENABLE, lpwm->regs + PWM);
+
+ clk_disable_unprepare(lpwm->clk);
+}
+
+static const struct pwm_ops pwm_lpss_ops = {
+ .config = pwm_lpss_config,
+ .enable = pwm_lpss_enable,
+ .disable = pwm_lpss_disable,
+ .owner = THIS_MODULE,
+};
+
+static const struct acpi_device_id pwm_lpss_acpi_match[] = {
+ { "80860F09", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, pwm_lpss_acpi_match);
+
+static int pwm_lpss_probe(struct platform_device *pdev)
+{
+ struct pwm_lpss_chip *lpwm;
+ struct resource *r;
+ int ret;
+
+ lpwm = devm_kzalloc(&pdev->dev, sizeof(*lpwm), GFP_KERNEL);
+ if (!lpwm)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ lpwm->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(lpwm->regs))
+ return PTR_ERR(lpwm->regs);
+
+ lpwm->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(lpwm->clk)) {
+ dev_err(&pdev->dev, "failed to get PWM clock\n");
+ return PTR_ERR(lpwm->clk);
+ }
+
+ lpwm->chip.dev = &pdev->dev;
+ lpwm->chip.ops = &pwm_lpss_ops;
+ lpwm->chip.base = -1;
+ lpwm->chip.npwm = 1;
+
+ ret = pwmchip_add(&lpwm->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, lpwm);
+ return 0;
+}
+
+static int pwm_lpss_remove(struct platform_device *pdev)
+{
+ struct pwm_lpss_chip *lpwm = platform_get_drvdata(pdev);
+ u32 ctrl;
+
+ ctrl = readl(lpwm->regs + PWM);
+ writel(ctrl & ~PWM_ENABLE, lpwm->regs + PWM);
+
+ return pwmchip_remove(&lpwm->chip);
+}
+
+static struct platform_driver pwm_lpss_driver = {
+ .driver = {
+ .name = "pwm-lpss",
+ .acpi_match_table = pwm_lpss_acpi_match,
+ },
+ .probe = pwm_lpss_probe,
+ .remove = pwm_lpss_remove,
+};
+module_platform_driver(pwm_lpss_driver);
+
+MODULE_DESCRIPTION("PWM driver for Intel LPSS");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:pwm-lpss");
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index 8d995731cef8..cd356d870244 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -127,12 +127,12 @@ static struct pwm_ops pxa_pwm_ops = {
#ifdef CONFIG_OF
/*
- * Device tree users must create one device instance for each pwm channel.
+ * Device tree users must create one device instance for each PWM channel.
* Hence we dispense with the HAS_SECONDARY_PWM and "tell" the original driver
* code that this is a single channel pxa25x-pwm. Currently all devices are
* supported identically.
*/
-static struct of_device_id pwm_of_match[] = {
+static const struct of_device_id pwm_of_match[] = {
{ .compatible = "marvell,pxa250-pwm", .data = &pwm_id_table[0]},
{ .compatible = "marvell,pxa270-pwm", .data = &pwm_id_table[0]},
{ .compatible = "marvell,pxa168-pwm", .data = &pwm_id_table[0]},
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index b59639e0c029..d66529a995a1 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -598,9 +598,8 @@ static int pwm_samsung_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops pwm_samsung_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pwm_samsung_suspend, pwm_samsung_resume)
-};
+static SIMPLE_DEV_PM_OPS(pwm_samsung_pm_ops, pwm_samsung_suspend,
+ pwm_samsung_resume);
static struct platform_driver pwm_samsung_driver = {
.driver = {
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index ff7cbf2d28e3..1753dc693c15 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -2256,6 +2256,7 @@ static int tsi721_setup_mport(struct tsi721_device *priv)
mport->phy_type = RIO_PHY_SERIAL;
mport->priv = (void *)priv;
mport->phys_efptr = 0x100;
+ mport->dev.parent = &pdev->dev;
priv->mport = mport;
INIT_LIST_HEAD(&mport->dbells);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 7061ac0ad428..0305675270ee 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -644,6 +644,9 @@ enum tsi721_smsg_int_flag {
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+#define TSI721_BDMA_BD_RING_SZ 128
+#define TSI721_BDMA_MAX_BCOUNT (TSI721_DMAD_BCOUNT1 + 1)
+
struct tsi721_tx_desc {
struct dma_async_tx_descriptor txd;
struct tsi721_dma_desc *hw_desc;
@@ -652,6 +655,7 @@ struct tsi721_tx_desc {
u64 rio_addr;
/* upper 2-bits of 66-bit RIO address */
u8 rio_addr_u;
+ u32 bcount;
bool interrupt;
struct list_head desc_node;
struct list_head tx_list;
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 91245f5dbe81..9b60b1f3261c 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -304,35 +304,17 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
}
static int
-tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
- struct tsi721_tx_desc *desc, struct scatterlist *sg,
+tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct scatterlist *sg,
enum dma_rtype rtype, u32 sys_size)
{
struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
u64 rio_addr;
- if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) {
- dev_err(bdma_chan->dchan.device->dev,
- "SG element is too large\n");
- return -EINVAL;
- }
-
- dev_dbg(bdma_chan->dchan.device->dev,
- "desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
- (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg),
- sg_dma_len(sg));
-
- dev_dbg(bdma_chan->dchan.device->dev,
- "bd_ptr = %p did=%d raddr=0x%llx\n",
- bd_ptr, desc->destid, desc->rio_addr);
-
/* Initialize DMA descriptor */
bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
(rtype << 19) | desc->destid);
- if (desc->interrupt)
- bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
- (sys_size << 26) | sg_dma_len(sg));
+ (sys_size << 26));
rio_addr = (desc->rio_addr >> 2) |
((u64)(desc->rio_addr_u & 0x3) << 62);
bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
@@ -346,6 +328,20 @@ tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
return 0;
}
+static int
+tsi721_desc_fill_end(struct tsi721_tx_desc *desc)
+{
+ struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
+
+ /* Update DMA descriptor */
+ if (desc->interrupt)
+ bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
+ bd_ptr->bcount |= cpu_to_le32(desc->bcount & TSI721_DMAD_BCOUNT1);
+
+ return 0;
+}
+
+
static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
struct tsi721_tx_desc *desc)
{
@@ -674,6 +670,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
unsigned int i;
u32 sys_size = dma_to_mport(dchan->device)->sys_size;
enum dma_rtype rtype;
+ dma_addr_t next_addr = -1;
if (!sgl || !sg_len) {
dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
@@ -704,36 +701,84 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
for_each_sg(sgl, sg, sg_len, i) {
int err;
- dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i);
+ if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
+ dev_err(dchan->device->dev,
+ "%s: SG entry %d is too large\n", __func__, i);
+ goto err_desc_put;
+ }
+
+ /*
+ * If this sg entry forms contiguous block with previous one,
+ * try to merge it into existing DMA descriptor
+ */
+ if (desc) {
+ if (next_addr == sg_dma_address(sg) &&
+ desc->bcount + sg_dma_len(sg) <=
+ TSI721_BDMA_MAX_BCOUNT) {
+ /* Adjust byte count of the descriptor */
+ desc->bcount += sg_dma_len(sg);
+ goto entry_done;
+ }
+
+ /*
+ * Finalize this descriptor using total
+ * byte count value.
+ */
+ tsi721_desc_fill_end(desc);
+ dev_dbg(dchan->device->dev, "%s: desc final len: %d\n",
+ __func__, desc->bcount);
+ }
+
+ /*
+ * Obtain and initialize a new descriptor
+ */
desc = tsi721_desc_get(bdma_chan);
if (!desc) {
dev_err(dchan->device->dev,
- "Not enough descriptors available\n");
- goto err_desc_get;
+ "%s: Failed to get new descriptor for SG %d\n",
+ __func__, i);
+ goto err_desc_put;
}
- if (sg_is_last(sg))
- desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
- else
- desc->interrupt = false;
-
desc->destid = rext->destid;
desc->rio_addr = rio_addr;
desc->rio_addr_u = 0;
+ desc->bcount = sg_dma_len(sg);
+
+ dev_dbg(dchan->device->dev,
+ "sg%d desc: 0x%llx, addr: 0x%llx len: %d\n",
+ i, (u64)desc->txd.phys,
+ (unsigned long long)sg_dma_address(sg),
+ sg_dma_len(sg));
+
+ dev_dbg(dchan->device->dev,
+ "bd_ptr = %p did=%d raddr=0x%llx\n",
+ desc->hw_desc, desc->destid, desc->rio_addr);
- err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size);
+ err = tsi721_desc_fill_init(desc, sg, rtype, sys_size);
if (err) {
dev_err(dchan->device->dev,
"Failed to build desc: %d\n", err);
- goto err_desc_get;
+ goto err_desc_put;
}
- rio_addr += sg_dma_len(sg);
+ next_addr = sg_dma_address(sg);
if (!first)
first = desc;
else
list_add_tail(&desc->desc_node, &first->tx_list);
+
+entry_done:
+ if (sg_is_last(sg)) {
+ desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
+ tsi721_desc_fill_end(desc);
+ dev_dbg(dchan->device->dev, "%s: desc final len: %d\n",
+ __func__, desc->bcount);
+ } else {
+ rio_addr += sg_dma_len(sg);
+ next_addr += sg_dma_len(sg);
+ }
}
first->txd.cookie = -EBUSY;
@@ -741,7 +786,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
return &first->txd;
-err_desc_get:
+err_desc_put:
tsi721_desc_put(bdma_chan, first);
return NULL;
}
@@ -792,7 +837,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
if (i == TSI721_DMACH_MAINT)
continue;
- bdma_chan->bd_num = 64;
+ bdma_chan->bd_num = TSI721_BDMA_BD_RING_SZ;
bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
bdma_chan->dchan.device = &mport->dma;
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c
index c9ae692d3451..f301f059bb85 100644
--- a/drivers/rapidio/rio-driver.c
+++ b/drivers/rapidio/rio-driver.c
@@ -167,7 +167,6 @@ void rio_unregister_driver(struct rio_driver *rdrv)
void rio_attach_device(struct rio_dev *rdev)
{
rdev->dev.bus = &rio_bus_type;
- rdev->dev.parent = &rio_bus;
}
EXPORT_SYMBOL_GPL(rio_attach_device);
@@ -216,9 +215,12 @@ static int rio_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-struct device rio_bus = {
- .init_name = "rapidio",
+struct class rio_mport_class = {
+ .name = "rapidio_port",
+ .owner = THIS_MODULE,
+ .dev_groups = rio_mport_groups,
};
+EXPORT_SYMBOL_GPL(rio_mport_class);
struct bus_type rio_bus_type = {
.name = "rapidio",
@@ -233,14 +235,20 @@ struct bus_type rio_bus_type = {
/**
* rio_bus_init - Register the RapidIO bus with the device model
*
- * Registers the RIO bus device and RIO bus type with the Linux
+ * Registers the RIO mport device class and RIO bus type with the Linux
* device model.
*/
static int __init rio_bus_init(void)
{
- if (device_register(&rio_bus) < 0)
- printk("RIO: failed to register RIO bus device\n");
- return bus_register(&rio_bus_type);
+ int ret;
+
+ ret = class_register(&rio_mport_class);
+ if (!ret) {
+ ret = bus_register(&rio_bus_type);
+ if (ret)
+ class_unregister(&rio_mport_class);
+ }
+ return ret;
}
postcore_initcall(rio_bus_init);
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index d3a6539a77cc..47a1b2ea76c4 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -461,6 +461,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
rdev->comp_tag & RIO_CTAG_UDEVID);
}
+ rdev->dev.parent = &port->dev;
rio_attach_device(rdev);
device_initialize(&rdev->dev);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index e0221c6d0cc2..cdb005c0094d 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -341,3 +341,43 @@ const struct attribute_group *rio_bus_groups[] = {
&rio_bus_group,
NULL,
};
+
+static ssize_t
+port_destid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rio_mport *mport = to_rio_mport(dev);
+
+ if (mport)
+ return sprintf(buf, "0x%04x\n", mport->host_deviceid);
+ else
+ return -ENODEV;
+}
+static DEVICE_ATTR_RO(port_destid);
+
+static ssize_t sys_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rio_mport *mport = to_rio_mport(dev);
+
+ if (mport)
+ return sprintf(buf, "%u\n", mport->sys_size);
+ else
+ return -ENODEV;
+}
+static DEVICE_ATTR_RO(sys_size);
+
+static struct attribute *rio_mport_attrs[] = {
+ &dev_attr_port_destid.attr,
+ &dev_attr_sys_size.attr,
+ NULL,
+};
+
+static const struct attribute_group rio_mport_group = {
+ .attrs = rio_mport_attrs,
+};
+
+const struct attribute_group *rio_mport_groups[] = {
+ &rio_mport_group,
+ NULL,
+};
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 2e8a20cac588..a54ba0494dd3 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1884,6 +1884,7 @@ static int rio_get_hdid(int index)
int rio_register_mport(struct rio_mport *port)
{
struct rio_scan_node *scan = NULL;
+ int res = 0;
if (next_portid >= RIO_MAX_MPORTS) {
pr_err("RIO: reached specified max number of mports\n");
@@ -1894,6 +1895,16 @@ int rio_register_mport(struct rio_mport *port)
port->host_deviceid = rio_get_hdid(port->id);
port->nscan = NULL;
+ dev_set_name(&port->dev, "rapidio%d", port->id);
+ port->dev.class = &rio_mport_class;
+
+ res = device_register(&port->dev);
+ if (res)
+ dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n",
+ port->id, res);
+ else
+ dev_dbg(&port->dev, "RIO: mport%d registered\n", port->id);
+
mutex_lock(&rio_mport_list_lock);
list_add_tail(&port->node, &rio_mports);
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 5f99d22ad0b0..2d0550e08ea2 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -50,6 +50,7 @@ extern int rio_mport_scan(int mport_id);
/* Structures internal to the RIO core code */
extern const struct attribute_group *rio_dev_groups[];
extern const struct attribute_group *rio_bus_groups[];
+extern const struct attribute_group *rio_mport_groups[];
#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index d333f7eac106..7a721d67e6ac 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -310,10 +310,8 @@ static int pm800_regulator_probe(struct platform_device *pdev)
pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data),
GFP_KERNEL);
- if (!pm800_data) {
- dev_err(&pdev->dev, "Failed to allocate pm800_regualtors");
+ if (!pm800_data)
return -ENOMEM;
- }
pm800_data->map = chip->subchip->regmap_power;
pm800_data->chip = chip;
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index f704d83c93c4..337634ad0562 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -2,7 +2,7 @@
* Regulators driver for Marvell 88PM8607
*
* Copyright (C) 2009 Marvell International Ltd.
- * Haojian Zhuang <haojian.zhuang@marvell.com>
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -78,7 +78,7 @@ static const unsigned int BUCK2_suspend_table[] = {
};
static const unsigned int BUCK3_table[] = {
- 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
+ 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
@@ -89,7 +89,7 @@ static const unsigned int BUCK3_table[] = {
};
static const unsigned int BUCK3_suspend_table[] = {
- 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
+ 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
@@ -322,7 +322,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
nproot = of_node_get(pdev->dev.parent->of_node);
if (!nproot)
return -ENODEV;
- nproot = of_find_node_by_name(nproot, "regulators");
+ nproot = of_get_child_by_name(nproot, "regulators");
if (!nproot) {
dev_err(&pdev->dev, "failed to find regulators node\n");
return -ENODEV;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 6a7932822e37..903eb37f047a 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -139,6 +139,14 @@ config REGULATOR_AS3722
AS3722 PMIC. This will enable support for all the software
controllable DCDC/LDO regulators.
+config REGULATOR_BCM590XX
+ tristate "Broadcom BCM590xx PMU Regulators"
+ depends on MFD_BCM590XX
+ help
+ This driver provides support for the voltage regulators on the
+ BCM590xx PMUs. This will enable support for the software
+ controllable LDO/Switching regulators.
+
config REGULATOR_DA903X
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
depends on PMIC_DA903X
@@ -384,6 +392,15 @@ config REGULATOR_PALMAS
on the muxing. This is handled automatically in the driver by
reading the mux info from OTP.
+config REGULATOR_PBIAS
+ tristate "PBIAS OMAP regulator driver"
+ depends on (ARCH_OMAP || COMPILE_TEST) && MFD_SYSCON
+ help
+ Say y here to support pbias regulator for mmc1:SD card i/o
+ on OMAP SoCs.
+ This driver provides support for OMAP pbias modelled
+ regulators.
+
config REGULATOR_PCAP
tristate "Motorola PCAP2 regulator driver"
depends on EZX_PCAP
@@ -399,12 +416,12 @@ config REGULATOR_PCF50633
on PCF50633
config REGULATOR_PFUZE100
- tristate "Freescale PFUZE100 regulator driver"
+ tristate "Freescale PFUZE100/PFUZE200 regulator driver"
depends on I2C
select REGMAP_I2C
help
- Say y here to support the regulators found on the Freescale PFUZE100
- PMIC.
+ Say y here to support the regulators found on the Freescale
+ PFUZE100/PFUZE200 PMIC.
config REGULATOR_RC5T583
tristate "RICOH RC5T583 Power regulators"
@@ -416,13 +433,21 @@ config REGULATOR_RC5T583
through regulator interface. The device supports multiple DCDC/LDO
outputs which can be controlled by i2c communication.
+config REGULATOR_S2MPA01
+ tristate "Samsung S2MPA01 voltage regulator"
+ depends on MFD_SEC_CORE
+ help
+ This driver controls Samsung S2MPA01 voltage output regulator
+ via I2C bus. S2MPA01 has 10 Bucks and 26 LDO outputs.
+
config REGULATOR_S2MPS11
- tristate "Samsung S2MPS11 voltage regulator"
+ tristate "Samsung S2MPS11/S2MPS14 voltage regulator"
depends on MFD_SEC_CORE
help
- This driver supports a Samsung S2MPS11 voltage output regulator
- via I2C bus. S2MPS11 is comprised of high efficient Buck converters
- including Dual-Phase Buck converter, Buck-Boost converter, various LDOs.
+ This driver supports a Samsung S2MPS11/S2MPS14 voltage output
+ regulator via I2C bus. The chip is comprised of high efficient Buck
+ converters including Dual-Phase Buck converter, Buck-Boost converter,
+ various LDOs.
config REGULATOR_S5M8767
tristate "Samsung S5M8767A voltage regulator"
@@ -432,6 +457,12 @@ config REGULATOR_S5M8767
via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
supports DVS mode with 8bits of output voltage control.
+config REGULATOR_ST_PWM
+ tristate "STMicroelectronics PWM voltage regulator"
+ depends on ARCH_STI
+ help
+ This driver supports ST's PWM controlled voltage regulators.
+
config REGULATOR_TI_ABB
tristate "TI Adaptive Body Bias on-chip LDO"
depends on ARCH_OMAP
@@ -513,6 +544,15 @@ config REGULATOR_TPS65217
voltage regulators. It supports software based voltage control
for different voltage domains
+config REGULATOR_TPS65218
+ tristate "TI TPS65218 Power regulators"
+ depends on MFD_TPS65218 && OF
+ help
+ This driver supports TPS65218 voltage regulator chips. TPS65218
+ provides six step-down converters and one general-purpose LDO
+ voltage regulators. It supports software based voltage control
+ for different voltage domains
+
config REGULATOR_TPS6524X
tristate "TI TPS6524X Power regulators"
depends on SPI
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 979f9ddcf259..12ef277a48b4 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
+obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o
@@ -54,11 +55,14 @@ obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
+obj-$(CONFIG_REGULATOR_PBIAS) += pbias-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
+obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
+obj-$(CONFIG_REGULATOR_ST_PWM) += st-pwm.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
@@ -67,6 +71,7 @@ obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65090) += tps65090-regulator.o
obj-$(CONFIG_REGULATOR_TPS65217) += tps65217-regulator.o
+obj-$(CONFIG_REGULATOR_TPS65218) += tps65218-regulator.o
obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index f70a9bfa5ff2..c873ee0082cf 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -99,6 +99,7 @@ static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
static struct regulator_ops aat2870_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = aat2870_ldo_set_voltage_sel,
.get_voltage_sel = aat2870_ldo_get_voltage_sel,
.enable = aat2870_ldo_enable,
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 084cc0819a52..b92d7dd01a18 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -62,7 +62,6 @@
#define ACT8865_VOLTAGE_NUM 64
struct act8865 {
- struct regulator_dev *rdev[ACT8865_REG_NUM];
struct regmap *regmap;
};
@@ -213,7 +212,7 @@ static int act8865_pdata_from_dt(struct device *dev,
struct device_node *np;
struct act8865_regulator_data *regulator;
- np = of_find_node_by_name(dev->of_node, "regulators");
+ np = of_get_child_by_name(dev->of_node, "regulators");
if (!np) {
dev_err(dev, "missing 'regulators' subnode in DT\n");
return -EINVAL;
@@ -221,17 +220,15 @@ static int act8865_pdata_from_dt(struct device *dev,
matched = of_regulator_match(dev, np,
act8865_matches, ARRAY_SIZE(act8865_matches));
+ of_node_put(np);
if (matched <= 0)
return matched;
pdata->regulators = devm_kzalloc(dev,
sizeof(struct act8865_regulator_data) *
ARRAY_SIZE(act8865_matches), GFP_KERNEL);
- if (!pdata->regulators) {
- dev_err(dev, "%s: failed to allocate act8865 registor\n",
- __func__);
+ if (!pdata->regulators)
return -ENOMEM;
- }
pdata->num_regulators = matched;
regulator = pdata->regulators;
@@ -258,7 +255,7 @@ static inline int act8865_pdata_from_dt(struct device *dev,
static int act8865_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
- struct regulator_dev **rdev;
+ struct regulator_dev *rdev;
struct device *dev = &client->dev;
struct act8865_platform_data *pdata = dev_get_platdata(dev);
struct regulator_config config = { };
@@ -292,8 +289,6 @@ static int act8865_pmic_probe(struct i2c_client *client,
if (!act8865)
return -ENOMEM;
- rdev = act8865->rdev;
-
act8865->regmap = devm_regmap_init_i2c(client, &act8865_regmap_config);
if (IS_ERR(act8865->regmap)) {
error = PTR_ERR(act8865->regmap);
@@ -313,12 +308,12 @@ static int act8865_pmic_probe(struct i2c_client *client,
config.driver_data = act8865;
config.regmap = act8865->regmap;
- rdev[i] = devm_regulator_register(&client->dev,
- &act8865_reg[i], &config);
- if (IS_ERR(rdev[i])) {
+ rdev = devm_regulator_register(&client->dev, &act8865_reg[i],
+ &config);
+ if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n",
act8865_reg[id].name);
- return PTR_ERR(rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 862e63e451d0..7c397bb81e01 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -34,6 +34,9 @@
#define LDO_RAMP_UP_UNIT_IN_CYCLES 64 /* 64 cycles per step */
#define LDO_RAMP_UP_FREQ_IN_MHZ 24 /* cycle based on 24M OSC */
+#define LDO_POWER_GATE 0x00
+#define LDO_FET_FULL_ON 0x1f
+
struct anatop_regulator {
const char *name;
u32 control_reg;
@@ -48,19 +51,10 @@ struct anatop_regulator {
int max_voltage;
struct regulator_desc rdesc;
struct regulator_init_data *initdata;
+ bool bypass;
+ int sel;
};
-static int anatop_regmap_set_voltage_sel(struct regulator_dev *reg,
- unsigned selector)
-{
- struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
-
- if (!anatop_reg->control_reg)
- return -ENOTSUPP;
-
- return regulator_set_voltage_sel_regmap(reg, selector);
-}
-
static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg,
unsigned int old_sel,
unsigned int new_sel)
@@ -87,22 +81,99 @@ static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg,
return ret;
}
-static int anatop_regmap_get_voltage_sel(struct regulator_dev *reg)
+static int anatop_regmap_enable(struct regulator_dev *reg)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+ int sel;
- if (!anatop_reg->control_reg)
- return -ENOTSUPP;
+ sel = anatop_reg->bypass ? LDO_FET_FULL_ON : anatop_reg->sel;
+ return regulator_set_voltage_sel_regmap(reg, sel);
+}
+
+static int anatop_regmap_disable(struct regulator_dev *reg)
+{
+ return regulator_set_voltage_sel_regmap(reg, LDO_POWER_GATE);
+}
+
+static int anatop_regmap_is_enabled(struct regulator_dev *reg)
+{
+ return regulator_get_voltage_sel_regmap(reg) != LDO_POWER_GATE;
+}
+
+static int anatop_regmap_core_set_voltage_sel(struct regulator_dev *reg,
+ unsigned selector)
+{
+ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+ int ret;
+
+ if (anatop_reg->bypass || !anatop_regmap_is_enabled(reg)) {
+ anatop_reg->sel = selector;
+ return 0;
+ }
+
+ ret = regulator_set_voltage_sel_regmap(reg, selector);
+ if (!ret)
+ anatop_reg->sel = selector;
+ return ret;
+}
+
+static int anatop_regmap_core_get_voltage_sel(struct regulator_dev *reg)
+{
+ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+
+ if (anatop_reg->bypass || !anatop_regmap_is_enabled(reg))
+ return anatop_reg->sel;
return regulator_get_voltage_sel_regmap(reg);
}
+static int anatop_regmap_get_bypass(struct regulator_dev *reg, bool *enable)
+{
+ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+ int sel;
+
+ sel = regulator_get_voltage_sel_regmap(reg);
+ if (sel == LDO_FET_FULL_ON)
+ WARN_ON(!anatop_reg->bypass);
+ else if (sel != LDO_POWER_GATE)
+ WARN_ON(anatop_reg->bypass);
+
+ *enable = anatop_reg->bypass;
+ return 0;
+}
+
+static int anatop_regmap_set_bypass(struct regulator_dev *reg, bool enable)
+{
+ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+ int sel;
+
+ if (enable == anatop_reg->bypass)
+ return 0;
+
+ sel = enable ? LDO_FET_FULL_ON : anatop_reg->sel;
+ anatop_reg->bypass = enable;
+
+ return regulator_set_voltage_sel_regmap(reg, sel);
+}
+
static struct regulator_ops anatop_rops = {
- .set_voltage_sel = anatop_regmap_set_voltage_sel,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+};
+
+static struct regulator_ops anatop_core_rops = {
+ .enable = anatop_regmap_enable,
+ .disable = anatop_regmap_disable,
+ .is_enabled = anatop_regmap_is_enabled,
+ .set_voltage_sel = anatop_regmap_core_set_voltage_sel,
.set_voltage_time_sel = anatop_regmap_set_voltage_time_sel,
- .get_voltage_sel = anatop_regmap_get_voltage_sel,
+ .get_voltage_sel = anatop_regmap_core_get_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
+ .get_bypass = anatop_regmap_get_bypass,
+ .set_bypass = anatop_regmap_set_bypass,
};
static int anatop_regulator_probe(struct platform_device *pdev)
@@ -116,6 +187,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
struct regulator_init_data *initdata;
struct regulator_config config = { };
int ret = 0;
+ u32 val;
initdata = of_get_regulator_init_data(dev, np);
sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL);
@@ -125,7 +197,6 @@ static int anatop_regulator_probe(struct platform_device *pdev)
sreg->name = of_get_property(np, "regulator-name", NULL);
rdesc = &sreg->rdesc;
rdesc->name = sreg->name;
- rdesc->ops = &anatop_rops;
rdesc->type = REGULATOR_VOLTAGE;
rdesc->owner = THIS_MODULE;
@@ -197,6 +268,25 @@ static int anatop_regulator_probe(struct platform_device *pdev)
config.of_node = pdev->dev.of_node;
config.regmap = sreg->anatop;
+ /* Only core regulators have the ramp up delay configuration. */
+ if (sreg->control_reg && sreg->delay_bit_width) {
+ rdesc->ops = &anatop_core_rops;
+
+ ret = regmap_read(config.regmap, rdesc->vsel_reg, &val);
+ if (ret) {
+ dev_err(dev, "failed to read initial state\n");
+ return ret;
+ }
+
+ sreg->sel = (val & rdesc->vsel_mask) >> sreg->vol_bit_shift;
+ if (sreg->sel == LDO_FET_FULL_ON) {
+ sreg->sel = 0;
+ sreg->bypass = true;
+ }
+ } else {
+ rdesc->ops = &anatop_rops;
+ }
+
/* register regulator */
rdev = devm_regulator_register(dev, rdesc, &config);
if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index 4f6c2055f6b2..b1033d30b504 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -153,11 +153,9 @@ static const struct regulator_desc arizona_ldo1 = {
.vsel_reg = ARIZONA_LDO1_CONTROL_1,
.vsel_mask = ARIZONA_LDO1_VSEL_MASK,
- .bypass_reg = ARIZONA_LDO1_CONTROL_1,
- .bypass_mask = ARIZONA_LDO1_BYPASS,
.min_uV = 900000,
- .uV_step = 50000,
- .n_voltages = 7,
+ .uV_step = 25000,
+ .n_voltages = 13,
.enable_time = 500,
.owner = THIS_MODULE,
@@ -189,10 +187,8 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
int ret;
ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL);
- if (ldo1 == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!ldo1)
return -ENOMEM;
- }
ldo1->arizona = arizona;
@@ -203,6 +199,7 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
*/
switch (arizona->type) {
case WM5102:
+ case WM8997:
desc = &arizona_ldo1_hc;
ldo1->init_data = arizona_ldo1_dvfs;
break;
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index 034ece707083..6fdd9bf6927f 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -204,10 +204,8 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
int ret;
micsupp = devm_kzalloc(&pdev->dev, sizeof(*micsupp), GFP_KERNEL);
- if (micsupp == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!micsupp)
return -ENOMEM;
- }
micsupp->arizona = arizona;
INIT_WORK(&micsupp->check_cp_work, arizona_micsupp_check_cp);
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index c77a58478cca..b47283f91e2d 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -191,7 +191,7 @@ static int as3711_regulator_parse_dt(struct device *dev,
{
struct as3711_regulator_pdata *pdata = dev_get_platdata(dev);
struct device_node *regulators =
- of_find_node_by_name(dev->parent->of_node, "regulators");
+ of_get_child_by_name(dev->parent->of_node, "regulators");
struct of_regulator_match *match;
int ret, i;
@@ -221,7 +221,6 @@ static int as3711_regulator_probe(struct platform_device *pdev)
{
struct as3711_regulator_pdata *pdata = dev_get_platdata(&pdev->dev);
struct as3711 *as3711 = dev_get_drvdata(pdev->dev.parent);
- struct regulator_init_data *reg_data;
struct regulator_config config = {.dev = &pdev->dev,};
struct as3711_regulator *reg = NULL;
struct as3711_regulator *regs;
@@ -246,22 +245,14 @@ static int as3711_regulator_probe(struct platform_device *pdev)
regs = devm_kzalloc(&pdev->dev, AS3711_REGULATOR_NUM *
sizeof(struct as3711_regulator), GFP_KERNEL);
- if (!regs) {
- dev_err(&pdev->dev, "Memory allocation failed exiting..\n");
+ if (!regs)
return -ENOMEM;
- }
for (id = 0, ri = as3711_reg_info; id < AS3711_REGULATOR_NUM; ++id, ri++) {
- reg_data = pdata->init_data[id];
-
- /* No need to register if there is no regulator data */
- if (!reg_data)
- continue;
-
reg = &regs[id];
reg->reg_info = ri;
- config.init_data = reg_data;
+ config.init_data = pdata->init_data[id];
config.driver_data = reg;
config.regmap = as3711->regmap;
config.of_node = of_node[id];
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index 8b17d786cb71..85585219ce82 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -719,6 +719,7 @@ static int as3722_get_regulator_dt_data(struct platform_device *pdev,
ret = of_regulator_match(&pdev->dev, np, as3722_regulator_matches,
ARRAY_SIZE(as3722_regulator_matches));
+ of_node_put(np);
if (ret < 0) {
dev_err(&pdev->dev, "Parsing of regulator node failed: %d\n",
ret);
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
new file mode 100644
index 000000000000..c3750c5b382b
--- /dev/null
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -0,0 +1,404 @@
+/*
+ * Broadcom BCM590xx regulator driver
+ *
+ * Copyright 2014 Linaro Limited
+ * Author: Matt Porter <mporter@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/bcm590xx.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+/* Register defs */
+#define BCM590XX_RFLDOPMCTRL1 0x60
+#define BCM590XX_IOSR1PMCTRL1 0x7a
+#define BCM590XX_IOSR2PMCTRL1 0x7c
+#define BCM590XX_CSRPMCTRL1 0x7e
+#define BCM590XX_SDSR1PMCTRL1 0x82
+#define BCM590XX_SDSR2PMCTRL1 0x86
+#define BCM590XX_MSRPMCTRL1 0x8a
+#define BCM590XX_VSRPMCTRL1 0x8e
+#define BCM590XX_REG_ENABLE BIT(7)
+
+#define BCM590XX_RFLDOCTRL 0x96
+#define BCM590XX_CSRVOUT1 0xc0
+#define BCM590XX_LDO_VSEL_MASK GENMASK(5, 3)
+#define BCM590XX_SR_VSEL_MASK GENMASK(5, 0)
+
+/* LDO regulator IDs */
+#define BCM590XX_REG_RFLDO 0
+#define BCM590XX_REG_CAMLDO1 1
+#define BCM590XX_REG_CAMLDO2 2
+#define BCM590XX_REG_SIMLDO1 3
+#define BCM590XX_REG_SIMLDO2 4
+#define BCM590XX_REG_SDLDO 5
+#define BCM590XX_REG_SDXLDO 6
+#define BCM590XX_REG_MMCLDO1 7
+#define BCM590XX_REG_MMCLDO2 8
+#define BCM590XX_REG_AUDLDO 9
+#define BCM590XX_REG_MICLDO 10
+#define BCM590XX_REG_USBLDO 11
+#define BCM590XX_REG_VIBLDO 12
+
+/* DCDC regulator IDs */
+#define BCM590XX_REG_CSR 13
+#define BCM590XX_REG_IOSR1 14
+#define BCM590XX_REG_IOSR2 15
+#define BCM590XX_REG_MSR 16
+#define BCM590XX_REG_SDSR1 17
+#define BCM590XX_REG_SDSR2 18
+#define BCM590XX_REG_VSR 19
+
+#define BCM590XX_NUM_REGS 20
+
+#define BCM590XX_REG_IS_LDO(n) (n < BCM590XX_REG_CSR)
+
+struct bcm590xx_board {
+ struct regulator_init_data *bcm590xx_pmu_init_data[BCM590XX_NUM_REGS];
+};
+
+/* LDO group A: supported voltages in microvolts */
+static const unsigned int ldo_a_table[] = {
+ 1200000, 1800000, 2500000, 2700000, 2800000,
+ 2900000, 3000000, 3300000,
+};
+
+/* LDO group C: supported voltages in microvolts */
+static const unsigned int ldo_c_table[] = {
+ 3100000, 1800000, 2500000, 2700000, 2800000,
+ 2900000, 3000000, 3300000,
+};
+
+/* DCDC group CSR: supported voltages in microvolts */
+static const struct regulator_linear_range dcdc_csr_ranges[] = {
+ REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
+ REGULATOR_LINEAR_RANGE(1360000, 51, 55, 20000),
+ REGULATOR_LINEAR_RANGE(900000, 56, 63, 0),
+};
+
+/* DCDC group IOSR1: supported voltages in microvolts */
+static const struct regulator_linear_range dcdc_iosr1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(860000, 2, 51, 10000),
+ REGULATOR_LINEAR_RANGE(1500000, 52, 52, 0),
+ REGULATOR_LINEAR_RANGE(1800000, 53, 53, 0),
+ REGULATOR_LINEAR_RANGE(900000, 54, 63, 0),
+};
+
+/* DCDC group SDSR1: supported voltages in microvolts */
+static const struct regulator_linear_range dcdc_sdsr1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
+ REGULATOR_LINEAR_RANGE(1340000, 51, 51, 0),
+ REGULATOR_LINEAR_RANGE(900000, 52, 63, 0),
+};
+
+struct bcm590xx_info {
+ const char *name;
+ const char *vin_name;
+ u8 n_voltages;
+ const unsigned int *volt_table;
+ u8 n_linear_ranges;
+ const struct regulator_linear_range *linear_ranges;
+};
+
+#define BCM590XX_REG_TABLE(_name, _table) \
+ { \
+ .name = #_name, \
+ .n_voltages = ARRAY_SIZE(_table), \
+ .volt_table = _table, \
+ }
+
+#define BCM590XX_REG_RANGES(_name, _ranges) \
+ { \
+ .name = #_name, \
+ .n_voltages = 64, \
+ .n_linear_ranges = ARRAY_SIZE(_ranges), \
+ .linear_ranges = _ranges, \
+ }
+
+static struct bcm590xx_info bcm590xx_regs[] = {
+ BCM590XX_REG_TABLE(rfldo, ldo_a_table),
+ BCM590XX_REG_TABLE(camldo1, ldo_c_table),
+ BCM590XX_REG_TABLE(camldo2, ldo_c_table),
+ BCM590XX_REG_TABLE(simldo1, ldo_a_table),
+ BCM590XX_REG_TABLE(simldo2, ldo_a_table),
+ BCM590XX_REG_TABLE(sdldo, ldo_c_table),
+ BCM590XX_REG_TABLE(sdxldo, ldo_a_table),
+ BCM590XX_REG_TABLE(mmcldo1, ldo_a_table),
+ BCM590XX_REG_TABLE(mmcldo2, ldo_a_table),
+ BCM590XX_REG_TABLE(audldo, ldo_a_table),
+ BCM590XX_REG_TABLE(micldo, ldo_a_table),
+ BCM590XX_REG_TABLE(usbldo, ldo_a_table),
+ BCM590XX_REG_TABLE(vibldo, ldo_c_table),
+ BCM590XX_REG_RANGES(csr, dcdc_csr_ranges),
+ BCM590XX_REG_RANGES(iosr1, dcdc_iosr1_ranges),
+ BCM590XX_REG_RANGES(iosr2, dcdc_iosr1_ranges),
+ BCM590XX_REG_RANGES(msr, dcdc_iosr1_ranges),
+ BCM590XX_REG_RANGES(sdsr1, dcdc_sdsr1_ranges),
+ BCM590XX_REG_RANGES(sdsr2, dcdc_iosr1_ranges),
+ BCM590XX_REG_RANGES(vsr, dcdc_iosr1_ranges),
+};
+
+struct bcm590xx_reg {
+ struct regulator_desc *desc;
+ struct bcm590xx *mfd;
+ struct bcm590xx_info **info;
+};
+
+static int bcm590xx_get_vsel_register(int id)
+{
+ if (BCM590XX_REG_IS_LDO(id))
+ return BCM590XX_RFLDOCTRL + id;
+ else
+ return BCM590XX_CSRVOUT1 + (id - BCM590XX_REG_CSR) * 3;
+}
+
+static int bcm590xx_get_enable_register(int id)
+{
+ int reg = 0;
+
+ if (BCM590XX_REG_IS_LDO(id))
+ reg = BCM590XX_RFLDOPMCTRL1 + id * 2;
+ else
+ switch (id) {
+ case BCM590XX_REG_CSR:
+ reg = BCM590XX_CSRPMCTRL1;
+ break;
+ case BCM590XX_REG_IOSR1:
+ reg = BCM590XX_IOSR1PMCTRL1;
+ break;
+ case BCM590XX_REG_IOSR2:
+ reg = BCM590XX_IOSR2PMCTRL1;
+ break;
+ case BCM590XX_REG_MSR:
+ reg = BCM590XX_MSRPMCTRL1;
+ break;
+ case BCM590XX_REG_SDSR1:
+ reg = BCM590XX_SDSR1PMCTRL1;
+ break;
+ case BCM590XX_REG_SDSR2:
+ reg = BCM590XX_SDSR2PMCTRL1;
+ break;
+ };
+
+ return reg;
+}
+
+static struct regulator_ops bcm590xx_ops_ldo = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+};
+
+static struct regulator_ops bcm590xx_ops_dcdc = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+#define BCM590XX_MATCH(_name, _id) \
+ { \
+ .name = #_name, \
+ .driver_data = (void *)&bcm590xx_regs[BCM590XX_REG_##_id], \
+ }
+
+static struct of_regulator_match bcm590xx_matches[] = {
+ BCM590XX_MATCH(rfldo, RFLDO),
+ BCM590XX_MATCH(camldo1, CAMLDO1),
+ BCM590XX_MATCH(camldo2, CAMLDO2),
+ BCM590XX_MATCH(simldo1, SIMLDO1),
+ BCM590XX_MATCH(simldo2, SIMLDO2),
+ BCM590XX_MATCH(sdldo, SDLDO),
+ BCM590XX_MATCH(sdxldo, SDXLDO),
+ BCM590XX_MATCH(mmcldo1, MMCLDO1),
+ BCM590XX_MATCH(mmcldo2, MMCLDO2),
+ BCM590XX_MATCH(audldo, AUDLDO),
+ BCM590XX_MATCH(micldo, MICLDO),
+ BCM590XX_MATCH(usbldo, USBLDO),
+ BCM590XX_MATCH(vibldo, VIBLDO),
+ BCM590XX_MATCH(csr, CSR),
+ BCM590XX_MATCH(iosr1, IOSR1),
+ BCM590XX_MATCH(iosr2, IOSR2),
+ BCM590XX_MATCH(msr, MSR),
+ BCM590XX_MATCH(sdsr1, SDSR1),
+ BCM590XX_MATCH(sdsr2, SDSR2),
+ BCM590XX_MATCH(vsr, VSR),
+};
+
+static struct bcm590xx_board *bcm590xx_parse_dt_reg_data(
+ struct platform_device *pdev,
+ struct of_regulator_match **bcm590xx_reg_matches)
+{
+ struct bcm590xx_board *data;
+ struct device_node *np = pdev->dev.parent->of_node;
+ struct device_node *regulators;
+ struct of_regulator_match *matches = bcm590xx_matches;
+ int count = ARRAY_SIZE(bcm590xx_matches);
+ int idx = 0;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "of node not found\n");
+ return NULL;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to allocate regulator board data\n");
+ return NULL;
+ }
+
+ np = of_node_get(np);
+ regulators = of_get_child_by_name(np, "regulators");
+ if (!regulators) {
+ dev_warn(&pdev->dev, "regulator node not found\n");
+ return NULL;
+ }
+
+ ret = of_regulator_match(&pdev->dev, regulators, matches, count);
+ of_node_put(regulators);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
+ ret);
+ return NULL;
+ }
+
+ *bcm590xx_reg_matches = matches;
+
+ for (idx = 0; idx < count; idx++) {
+ if (!matches[idx].init_data || !matches[idx].of_node)
+ continue;
+
+ data->bcm590xx_pmu_init_data[idx] = matches[idx].init_data;
+ }
+
+ return data;
+}
+
+static int bcm590xx_probe(struct platform_device *pdev)
+{
+ struct bcm590xx *bcm590xx = dev_get_drvdata(pdev->dev.parent);
+ struct bcm590xx_board *pmu_data = NULL;
+ struct bcm590xx_reg *pmu;
+ struct regulator_config config = { };
+ struct bcm590xx_info *info;
+ struct regulator_init_data *reg_data;
+ struct regulator_dev *rdev;
+ struct of_regulator_match *bcm590xx_reg_matches = NULL;
+ int i;
+
+ pmu_data = bcm590xx_parse_dt_reg_data(pdev,
+ &bcm590xx_reg_matches);
+
+ pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu) {
+ dev_err(&pdev->dev, "Memory allocation failed for pmu\n");
+ return -ENOMEM;
+ }
+
+ pmu->mfd = bcm590xx;
+
+ platform_set_drvdata(pdev, pmu);
+
+ pmu->desc = devm_kzalloc(&pdev->dev, BCM590XX_NUM_REGS *
+ sizeof(struct regulator_desc), GFP_KERNEL);
+ if (!pmu->desc) {
+ dev_err(&pdev->dev, "Memory alloc fails for desc\n");
+ return -ENOMEM;
+ }
+
+ pmu->info = devm_kzalloc(&pdev->dev, BCM590XX_NUM_REGS *
+ sizeof(struct bcm590xx_info *), GFP_KERNEL);
+ if (!pmu->info) {
+ dev_err(&pdev->dev, "Memory alloc fails for info\n");
+ return -ENOMEM;
+ }
+
+ info = bcm590xx_regs;
+
+ for (i = 0; i < BCM590XX_NUM_REGS; i++, info++) {
+ if (pmu_data)
+ reg_data = pmu_data->bcm590xx_pmu_init_data[i];
+ else
+ reg_data = NULL;
+
+ /* Register the regulators */
+ pmu->info[i] = info;
+
+ pmu->desc[i].name = info->name;
+ pmu->desc[i].supply_name = info->vin_name;
+ pmu->desc[i].id = i;
+ pmu->desc[i].volt_table = info->volt_table;
+ pmu->desc[i].n_voltages = info->n_voltages;
+ pmu->desc[i].linear_ranges = info->linear_ranges;
+ pmu->desc[i].n_linear_ranges = info->n_linear_ranges;
+
+ if (BCM590XX_REG_IS_LDO(i)) {
+ pmu->desc[i].ops = &bcm590xx_ops_ldo;
+ pmu->desc[i].vsel_mask = BCM590XX_LDO_VSEL_MASK;
+ } else {
+ pmu->desc[i].ops = &bcm590xx_ops_dcdc;
+ pmu->desc[i].vsel_mask = BCM590XX_SR_VSEL_MASK;
+ }
+
+ pmu->desc[i].vsel_reg = bcm590xx_get_vsel_register(i);
+ pmu->desc[i].enable_is_inverted = true;
+ pmu->desc[i].enable_mask = BCM590XX_REG_ENABLE;
+ pmu->desc[i].enable_reg = bcm590xx_get_enable_register(i);
+ pmu->desc[i].type = REGULATOR_VOLTAGE;
+ pmu->desc[i].owner = THIS_MODULE;
+
+ config.dev = bcm590xx->dev;
+ config.init_data = reg_data;
+ config.driver_data = pmu;
+ config.regmap = bcm590xx->regmap;
+
+ if (bcm590xx_reg_matches)
+ config.of_node = bcm590xx_reg_matches[i].of_node;
+
+ rdev = devm_regulator_register(&pdev->dev, &pmu->desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(bcm590xx->dev,
+ "failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver bcm590xx_regulator_driver = {
+ .driver = {
+ .name = "bcm590xx-vregs",
+ .owner = THIS_MODULE,
+ },
+ .probe = bcm590xx_probe,
+};
+module_platform_driver(bcm590xx_regulator_driver);
+
+MODULE_AUTHOR("Matt Porter <mporter@linaro.org>");
+MODULE_DESCRIPTION("BCM590xx voltage regulator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bcm590xx-vregs");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index afca1bc24f26..9a09f3cdbabb 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2138,7 +2138,7 @@ EXPORT_SYMBOL_GPL(regulator_is_enabled);
* @regulator: regulator source
*
* Returns positive if the regulator driver backing the source/client
- * can change its voltage, false otherwise. Usefull for detecting fixed
+ * can change its voltage, false otherwise. Useful for detecting fixed
* or dummy regulators and disabling voltage change logic in the client
* driver.
*/
@@ -2399,6 +2399,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
struct regulator_dev *rdev = regulator->rdev;
int ret = 0;
int old_min_uV, old_max_uV;
+ int current_uV;
mutex_lock(&rdev->mutex);
@@ -2409,6 +2410,19 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
goto out;
+ /* If we're trying to set a range that overlaps the current voltage,
+ * return succesfully even though the regulator does not support
+ * changing the voltage.
+ */
+ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ current_uV = _regulator_get_voltage(rdev);
+ if (min_uV <= current_uV && current_uV <= max_uV) {
+ regulator->min_uV = min_uV;
+ regulator->max_uV = max_uV;
+ goto out;
+ }
+ }
+
/* sanity check */
if (!rdev->desc->ops->set_voltage &&
!rdev->desc->ops->set_voltage_sel) {
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 3adeaeffc485..fdb6ea8ae7e6 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -240,6 +240,31 @@ static int da9052_regulator_set_voltage_sel(struct regulator_dev *rdev,
return ret;
}
+static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_sel,
+ unsigned int new_sel)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int id = rdev_get_id(rdev);
+ int ret = 0;
+
+ /* The DVC controlled LDOs and DCDCs ramp with 6.25mV/µs after enabling
+ * the activate bit.
+ */
+ switch (id) {
+ case DA9052_ID_BUCK1:
+ case DA9052_ID_BUCK2:
+ case DA9052_ID_BUCK3:
+ case DA9052_ID_LDO2:
+ case DA9052_ID_LDO3:
+ ret = (new_sel - old_sel) * info->step_uV / 6250;
+ break;
+ }
+
+ return ret;
+}
+
static struct regulator_ops da9052_dcdc_ops = {
.get_current_limit = da9052_dcdc_get_current_limit,
.set_current_limit = da9052_dcdc_set_current_limit,
@@ -248,6 +273,7 @@ static struct regulator_ops da9052_dcdc_ops = {
.map_voltage = da9052_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = da9052_regulator_set_voltage_sel,
+ .set_voltage_time_sel = da9052_regulator_set_voltage_time_sel,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -258,6 +284,7 @@ static struct regulator_ops da9052_ldo_ops = {
.map_voltage = da9052_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = da9052_regulator_set_voltage_sel,
+ .set_voltage_time_sel = da9052_regulator_set_voltage_time_sel,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -401,7 +428,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
if (!nproot)
return -ENODEV;
- nproot = of_find_node_by_name(nproot, "regulators");
+ nproot = of_get_child_by_name(nproot, "regulators");
if (!nproot)
return -ENODEV;
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index b14ebdad5dd2..9516317e1a9f 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -19,6 +19,8 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/mfd/da9055/core.h>
#include <linux/mfd/da9055/reg.h>
@@ -446,6 +448,9 @@ static int da9055_gpio_init(struct da9055_regulator *regulator,
struct da9055_regulator_info *info = regulator->info;
int ret = 0;
+ if (!pdata)
+ return 0;
+
if (pdata->gpio_ren && pdata->gpio_ren[id]) {
char name[18];
int gpio_mux = pdata->gpio_ren[id];
@@ -530,6 +535,59 @@ static inline struct da9055_regulator_info *find_regulator_info(int id)
return NULL;
}
+#ifdef CONFIG_OF
+static struct of_regulator_match da9055_reg_matches[] = {
+ { .name = "BUCK1", },
+ { .name = "BUCK2", },
+ { .name = "LDO1", },
+ { .name = "LDO2", },
+ { .name = "LDO3", },
+ { .name = "LDO4", },
+ { .name = "LDO5", },
+ { .name = "LDO6", },
+};
+
+static int da9055_regulator_dt_init(struct platform_device *pdev,
+ struct da9055_regulator *regulator,
+ struct regulator_config *config,
+ int regid)
+{
+ struct device_node *nproot, *np;
+ int ret;
+
+ nproot = of_node_get(pdev->dev.parent->of_node);
+ if (!nproot)
+ return -ENODEV;
+
+ np = of_get_child_by_name(nproot, "regulators");
+ if (!np)
+ return -ENODEV;
+
+ ret = of_regulator_match(&pdev->dev, np, &da9055_reg_matches[regid], 1);
+ of_node_put(nproot);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error matching regulator: %d\n", ret);
+ return ret;
+ }
+
+ config->init_data = da9055_reg_matches[regid].init_data;
+ config->of_node = da9055_reg_matches[regid].of_node;
+
+ if (!config->of_node)
+ return -ENODEV;
+
+ return 0;
+}
+#else
+static inline int da9055_regulator_dt_init(struct platform_device *pdev,
+ struct da9055_regulator *regulator,
+ struct regulator_config *config,
+ int regid)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_OF */
+
static int da9055_regulator_probe(struct platform_device *pdev)
{
struct regulator_config config = { };
@@ -538,9 +596,6 @@ static int da9055_regulator_probe(struct platform_device *pdev)
struct da9055_pdata *pdata = dev_get_platdata(da9055->dev);
int ret, irq;
- if (pdata == NULL || pdata->regulators[pdev->id] == NULL)
- return -ENODEV;
-
regulator = devm_kzalloc(&pdev->dev, sizeof(struct da9055_regulator),
GFP_KERNEL);
if (!regulator)
@@ -557,8 +612,14 @@ static int da9055_regulator_probe(struct platform_device *pdev)
config.driver_data = regulator;
config.regmap = da9055->regmap;
- if (pdata && pdata->regulators)
+ if (pdata && pdata->regulators) {
config.init_data = pdata->regulators[pdev->id];
+ } else {
+ ret = da9055_regulator_dt_init(pdev, regulator, &config,
+ pdev->id);
+ if (ret < 0)
+ return ret;
+ }
ret = da9055_gpio_init(regulator, &config, pdata, pdev->id);
if (ret < 0)
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 91e99a2c8dc1..7c9461d13313 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -365,7 +365,7 @@ static int da9063_set_suspend_voltage(struct regulator_dev *rdev, int uV)
sel = regulator_map_voltage_linear(rdev, uV, uV);
if (sel < 0)
- return -EINVAL;
+ return sel;
sel <<= ffs(rdev->desc->vsel_mask) - 1;
@@ -666,7 +666,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
struct device_node *node;
int i, n, num;
- node = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ node = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
if (!node) {
dev_err(&pdev->dev, "Regulators device node not found\n");
return ERR_PTR(-ENODEV);
@@ -674,6 +674,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
num = of_regulator_match(&pdev->dev, node, da9063_matches,
ARRAY_SIZE(da9063_matches));
+ of_node_put(node);
if (num < 0) {
dev_err(&pdev->dev, "Failed to match regulators\n");
return ERR_PTR(-EINVAL);
@@ -710,7 +711,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
struct platform_device *pdev,
struct of_regulator_match **da9063_reg_matches)
{
- da9063_reg_matches = NULL;
+ *da9063_reg_matches = NULL;
return ERR_PTR(-ENODEV);
}
#endif
@@ -756,7 +757,7 @@ static int da9063_regulator_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"Error while reading BUCKs configuration\n");
- return -EIO;
+ return ret;
}
bcores_merged = val & DA9063_BCORE_MERGE;
bmem_bio_merged = val & DA9063_BUCK_MERGE;
@@ -775,10 +776,8 @@ static int da9063_regulator_probe(struct platform_device *pdev)
size = sizeof(struct da9063_regulators) +
n_regulators * sizeof(struct da9063_regulator);
regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!regulators) {
- dev_err(&pdev->dev, "No memory for regulators\n");
+ if (!regulators)
return -ENOMEM;
- }
regulators->n_regulators = n_regulators;
platform_set_drvdata(pdev, regulators);
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 6f5ecbe1132e..7a320dd11c46 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -134,11 +134,8 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
int error;
chip = devm_kzalloc(&i2c->dev, sizeof(struct da9210), GFP_KERNEL);
- if (NULL == chip) {
- dev_err(&i2c->dev,
- "Cannot kzalloc memory for regulator structure\n");
+ if (!chip)
return -ENOMEM;
- }
chip->regmap = devm_regmap_init_i2c(i2c, &da9210_regmap_config);
if (IS_ERR(chip->regmap)) {
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 846acf240e48..617c1adca816 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -263,6 +263,8 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .fixed_uV = 1800000,
+ .n_voltages = 1,
},
.exclude_from_power_state = true,
},
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
index ce89f7848a57..2d16b9f16de7 100644
--- a/drivers/regulator/dbx500-prcmu.c
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -78,6 +78,7 @@ static struct ux500_regulator_debug {
void ux500_regulator_suspend_debug(void)
{
int i;
+
for (i = 0; i < rdebug.num_regulators; i++)
rdebug.state_before_suspend[i] =
rdebug.regulator_array[i].is_enabled;
@@ -86,6 +87,7 @@ void ux500_regulator_suspend_debug(void)
void ux500_regulator_resume_debug(void)
{
int i;
+
for (i = 0; i < rdebug.num_regulators; i++)
rdebug.state_after_suspend[i] =
rdebug.regulator_array[i].is_enabled;
@@ -127,9 +129,9 @@ static int ux500_regulator_status_print(struct seq_file *s, void *p)
int i;
/* print dump header */
- err = seq_printf(s, "ux500-regulator status:\n");
+ err = seq_puts(s, "ux500-regulator status:\n");
if (err < 0)
- dev_err(dev, "seq_printf overflow\n");
+ dev_err(dev, "seq_puts overflow\n");
err = seq_printf(s, "%31s : %8s : %8s\n", "current",
"before", "after");
@@ -202,18 +204,12 @@ ux500_regulator_debug_init(struct platform_device *pdev,
rdebug.num_regulators = num_regulators;
rdebug.state_before_suspend = kzalloc(num_regulators, GFP_KERNEL);
- if (!rdebug.state_before_suspend) {
- dev_err(&pdev->dev,
- "could not allocate memory for saving state\n");
+ if (!rdebug.state_before_suspend)
goto exit_destroy_power_state;
- }
rdebug.state_after_suspend = kzalloc(num_regulators, GFP_KERNEL);
- if (!rdebug.state_after_suspend) {
- dev_err(&pdev->dev,
- "could not allocate memory for saving state\n");
+ if (!rdebug.state_after_suspend)
goto exit_free;
- }
dbx500_regulator_testcase(regulator_info, num_regulators);
return 0;
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index df9f42524abb..2436db9e2ca3 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -25,7 +25,11 @@
struct regulator_dev *dummy_regulator_rdev;
-static struct regulator_init_data dummy_initdata;
+static struct regulator_init_data dummy_initdata = {
+ .constraints = {
+ .always_on = 1,
+ },
+};
static struct regulator_ops dummy_ops;
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 7ca3d9e3b0fe..714fd9a89aa1 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -90,11 +90,11 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV)
return 0;
ret = regulator_map_voltage_linear(rdev, uV, uV);
if (ret < 0)
- return -EINVAL;
+ return ret;
ret = regmap_update_bits(di->regmap, di->sleep_reg,
VSEL_NSEL_MASK, ret);
if (ret < 0)
- return -EINVAL;
+ return ret;
/* Cache the sleep voltage setting.
* Might not be the real voltage which is rounded */
di->sleep_vol_cache = uV;
@@ -244,10 +244,9 @@ static int fan53555_regulator_probe(struct i2c_client *client,
di = devm_kzalloc(&client->dev, sizeof(struct fan53555_device_info),
GFP_KERNEL);
- if (!di) {
- dev_err(&client->dev, "Failed to allocate device info data!\n");
+ if (!di)
return -ENOMEM;
- }
+
di->regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config);
if (IS_ERR(di->regmap)) {
dev_err(&client->dev, "Failed to allocate regmap!\n");
@@ -260,14 +259,14 @@ static int fan53555_regulator_probe(struct i2c_client *client,
ret = regmap_read(di->regmap, FAN53555_ID1, &val);
if (ret < 0) {
dev_err(&client->dev, "Failed to get chip ID!\n");
- return -ENODEV;
+ return ret;
}
di->chip_id = val & DIE_ID;
/* Get chip revision */
ret = regmap_read(di->regmap, FAN53555_ID2, &val);
if (ret < 0) {
dev_err(&client->dev, "Failed to get chip Rev!\n");
- return -ENODEV;
+ return ret;
}
di->chip_rev = val & DIE_REV;
dev_info(&client->dev, "FAN53555 Option[%d] Rev[%d] Detected!\n",
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 5ea64b94341c..c61f7e97e4f8 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -130,17 +130,15 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct fixed_voltage_data),
GFP_KERNEL);
- if (drvdata == NULL) {
- dev_err(&pdev->dev, "Failed to allocate device data\n");
- ret = -ENOMEM;
- goto err;
- }
+ if (!drvdata)
+ return -ENOMEM;
- drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
+ drvdata->desc.name = devm_kstrdup(&pdev->dev,
+ config->supply_name,
+ GFP_KERNEL);
if (drvdata->desc.name == NULL) {
dev_err(&pdev->dev, "Failed to allocate supply name\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
drvdata->desc.type = REGULATOR_VOLTAGE;
drvdata->desc.owner = THIS_MODULE;
@@ -149,13 +147,13 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->desc.enable_time = config->startup_delay;
if (config->input_supply) {
- drvdata->desc.supply_name = kstrdup(config->input_supply,
- GFP_KERNEL);
+ drvdata->desc.supply_name = devm_kstrdup(&pdev->dev,
+ config->input_supply,
+ GFP_KERNEL);
if (!drvdata->desc.supply_name) {
dev_err(&pdev->dev,
"Failed to allocate input supply\n");
- ret = -ENOMEM;
- goto err_name;
+ return -ENOMEM;
}
}
@@ -186,11 +184,12 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
cfg.driver_data = drvdata;
cfg.of_node = pdev->dev.of_node;
- drvdata->dev = regulator_register(&drvdata->desc, &cfg);
+ drvdata->dev = devm_regulator_register(&pdev->dev, &drvdata->desc,
+ &cfg);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
- goto err_input;
+ return ret;
}
platform_set_drvdata(pdev, drvdata);
@@ -199,24 +198,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->desc.fixed_uV);
return 0;
-
-err_input:
- kfree(drvdata->desc.supply_name);
-err_name:
- kfree(drvdata->desc.name);
-err:
- return ret;
-}
-
-static int reg_fixed_voltage_remove(struct platform_device *pdev)
-{
- struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev);
-
- regulator_unregister(drvdata->dev);
- kfree(drvdata->desc.supply_name);
- kfree(drvdata->desc.name);
-
- return 0;
}
#if defined(CONFIG_OF)
@@ -229,7 +210,6 @@ MODULE_DEVICE_TABLE(of, fixed_of_match);
static struct platform_driver regulator_fixed_voltage_driver = {
.probe = reg_fixed_voltage_probe,
- .remove = reg_fixed_voltage_remove,
.driver = {
.name = "reg-fixed-voltage",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index c0a1d00b78c9..989b23b377c0 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -136,7 +136,6 @@ static struct gpio_regulator_config *
of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
{
struct gpio_regulator_config *config;
- struct property *prop;
const char *regtype;
int proplen, gpio, i;
int ret;
@@ -172,22 +171,35 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
if (!config->gpios)
return ERR_PTR(-ENOMEM);
+ proplen = of_property_count_u32_elems(np, "gpios-states");
+ /* optional property */
+ if (proplen < 0)
+ proplen = 0;
+
+ if (proplen > 0 && proplen != config->nr_gpios) {
+ dev_warn(dev, "gpios <-> gpios-states mismatch\n");
+ proplen = 0;
+ }
+
for (i = 0; i < config->nr_gpios; i++) {
gpio = of_get_named_gpio(np, "gpios", i);
if (gpio < 0)
break;
config->gpios[i].gpio = gpio;
+ if (proplen > 0) {
+ of_property_read_u32_index(np, "gpios-states", i, &ret);
+ if (ret)
+ config->gpios[i].flags = GPIOF_OUT_INIT_HIGH;
+ }
}
/* Fetch states. */
- prop = of_find_property(np, "states", NULL);
- if (!prop) {
+ proplen = of_property_count_u32_elems(np, "states");
+ if (proplen < 0) {
dev_err(dev, "No 'states' property found\n");
return ERR_PTR(-EINVAL);
}
- proplen = prop->length / sizeof(int);
-
config->states = devm_kzalloc(dev,
sizeof(struct gpio_regulator_state)
* (proplen / 2),
@@ -196,10 +208,10 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
return ERR_PTR(-ENOMEM);
for (i = 0; i < proplen / 2; i++) {
- config->states[i].value =
- be32_to_cpup((int *)prop->value + (i * 2));
- config->states[i].gpios =
- be32_to_cpup((int *)prop->value + (i * 2 + 1));
+ of_property_read_u32_index(np, "states", i * 2,
+ &config->states[i].value);
+ of_property_read_u32_index(np, "states", i * 2 + 1,
+ &config->states[i].gpios);
}
config->nr_states = i;
@@ -239,10 +251,8 @@ static int gpio_regulator_probe(struct platform_device *pdev)
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data),
GFP_KERNEL);
- if (drvdata == NULL) {
- dev_err(&pdev->dev, "Failed to allocate device data\n");
+ if (drvdata == NULL)
return -ENOMEM;
- }
drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
if (drvdata->desc.name == NULL) {
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index e221a271ba56..cbc39096c78d 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -37,10 +37,17 @@ int regulator_is_enabled_regmap(struct regulator_dev *rdev)
if (ret != 0)
return ret;
- if (rdev->desc->enable_is_inverted)
- return (val & rdev->desc->enable_mask) == 0;
- else
- return (val & rdev->desc->enable_mask) != 0;
+ val &= rdev->desc->enable_mask;
+
+ if (rdev->desc->enable_is_inverted) {
+ if (rdev->desc->enable_val)
+ return val != rdev->desc->enable_val;
+ return val == 0;
+ } else {
+ if (rdev->desc->enable_val)
+ return val == rdev->desc->enable_val;
+ return val != 0;
+ }
}
EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap);
@@ -57,10 +64,13 @@ int regulator_enable_regmap(struct regulator_dev *rdev)
{
unsigned int val;
- if (rdev->desc->enable_is_inverted)
- val = 0;
- else
- val = rdev->desc->enable_mask;
+ if (rdev->desc->enable_is_inverted) {
+ val = rdev->desc->disable_val;
+ } else {
+ val = rdev->desc->enable_val;
+ if (!val)
+ val = rdev->desc->enable_mask;
+ }
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val);
@@ -80,10 +90,13 @@ int regulator_disable_regmap(struct regulator_dev *rdev)
{
unsigned int val;
- if (rdev->desc->enable_is_inverted)
- val = rdev->desc->enable_mask;
- else
- val = 0;
+ if (rdev->desc->enable_is_inverted) {
+ val = rdev->desc->enable_val;
+ if (!val)
+ val = rdev->desc->enable_mask;
+ } else {
+ val = rdev->desc->disable_val;
+ }
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val);
@@ -419,10 +432,13 @@ int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable)
{
unsigned int val;
- if (enable)
- val = rdev->desc->bypass_mask;
- else
- val = 0;
+ if (enable) {
+ val = rdev->desc->bypass_val_on;
+ if (!val)
+ val = rdev->desc->bypass_mask;
+ } else {
+ val = rdev->desc->bypass_val_off;
+ }
return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg,
rdev->desc->bypass_mask, val);
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 3b1102b75071..66fd2330dca0 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -327,7 +327,7 @@ static int lp3971_i2c_read(struct i2c_client *i2c, char reg, int count,
return -EIO;
ret = i2c_smbus_read_byte_data(i2c, reg);
if (ret < 0)
- return -EIO;
+ return ret;
*dest = ret;
return 0;
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 2e4734ff79fc..2e022aabd951 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -211,7 +211,7 @@ static int lp872x_get_timestep_usec(struct lp872x *lp)
ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val);
if (ret)
- return -EINVAL;
+ return ret;
val = (val & mask) >> shift;
if (val >= size)
@@ -229,7 +229,7 @@ static int lp872x_regulator_enable_time(struct regulator_dev *rdev)
u8 addr, val;
if (time_step_us < 0)
- return -EINVAL;
+ return time_step_us;
switch (rid) {
case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index e0619526708c..ed60baaeceec 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -1,7 +1,7 @@
/*
* max14577.c - Regulator driver for the Maxim 14577
*
- * Copyright (C) 2013 Samsung Electronics
+ * Copyright (C) 2013,2014 Samsung Electronics
* Krzysztof Kozlowski <k.kozlowski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -22,12 +22,6 @@
#include <linux/mfd/max14577-private.h>
#include <linux/regulator/of_regulator.h>
-struct max14577_regulator {
- struct device *dev;
- struct max14577 *max14577;
- struct regulator_dev **regulators;
-};
-
static int max14577_reg_is_enabled(struct regulator_dev *rdev)
{
int rid = rdev_get_id(rdev);
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index e242dd316d36..d23d0577754b 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -46,8 +46,6 @@ struct max1586_data {
unsigned int v3_curr_sel;
unsigned int v6_curr_sel;
-
- struct regulator_dev *rdev[0];
};
/*
@@ -162,14 +160,12 @@ static struct regulator_desc max1586_reg[] = {
static int max1586_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
- struct regulator_dev **rdev;
struct max1586_platform_data *pdata = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct max1586_data *max1586;
int i, id;
- max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data) +
- sizeof(struct regulator_dev *) * (MAX1586_V6 + 1),
+ max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data),
GFP_KERNEL);
if (!max1586)
return -ENOMEM;
@@ -186,8 +182,9 @@ static int max1586_pmic_probe(struct i2c_client *client,
max1586->v3_curr_sel = 24; /* 1.3V */
max1586->v6_curr_sel = 0;
- rdev = max1586->rdev;
for (i = 0; i < pdata->num_subdevs && i <= MAX1586_V6; i++) {
+ struct regulator_dev *rdev;
+
id = pdata->subdevs[i].id;
if (!pdata->subdevs[i].platform_data)
continue;
@@ -207,12 +204,12 @@ static int max1586_pmic_probe(struct i2c_client *client,
config.init_data = pdata->subdevs[i].platform_data;
config.driver_data = max1586;
- rdev[i] = devm_regulator_register(&client->dev,
+ rdev = devm_regulator_register(&client->dev,
&max1586_reg[id], &config);
- if (IS_ERR(rdev[i])) {
+ if (IS_ERR(rdev)) {
dev_err(&client->dev, "failed to register %s\n",
max1586_reg[id].name);
- return PTR_ERR(rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index ae001ccf26f4..ef1af2debbd2 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -65,7 +65,6 @@ enum max77686_ramp_rate {
};
struct max77686_data {
- struct regulator_dev *rdev[MAX77686_REGULATORS];
unsigned int opmode[MAX77686_REGULATORS];
};
@@ -400,7 +399,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
unsigned int i;
pmic_np = iodev->dev->of_node;
- regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators");
+ regulators_np = of_get_child_by_name(pmic_np, "voltage-regulators");
if (!regulators_np) {
dev_err(&pdev->dev, "could not find regulators sub-node\n");
return -EINVAL;
@@ -410,8 +409,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
pdata->num_regulators, GFP_KERNEL);
if (!rdata) {
- dev_err(&pdev->dev,
- "could not allocate memory for regulator data\n");
+ of_node_put(regulators_np);
return -ENOMEM;
}
@@ -425,6 +423,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
}
pdata->regulators = rdata;
+ of_node_put(regulators_np);
return 0;
}
@@ -474,16 +473,18 @@ static int max77686_pmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, max77686);
for (i = 0; i < MAX77686_REGULATORS; i++) {
+ struct regulator_dev *rdev;
+
config.init_data = pdata->regulators[i].initdata;
config.of_node = pdata->regulators[i].of_node;
max77686->opmode[i] = regulators[i].enable_mask;
- max77686->rdev[i] = devm_regulator_register(&pdev->dev,
+ rdev = devm_regulator_register(&pdev->dev,
&regulators[i], &config);
- if (IS_ERR(max77686->rdev[i])) {
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"regulator init failed for %d\n", i);
- return PTR_ERR(max77686->rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index 5fb899f461d0..653a58b49cdf 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -34,13 +34,6 @@
#define CHGIN_ILIM_STEP_20mA 20000
-struct max77693_pmic_dev {
- struct device *dev;
- struct max77693_dev *iodev;
- int num_regulators;
- struct regulator_dev **rdev;
-};
-
/* CHARGER regulator ops */
/* CHARGER regulator uses two bits for enabling */
static int max77693_chg_is_enabled(struct regulator_dev *rdev)
@@ -170,19 +163,22 @@ static int max77693_pmic_dt_parse_rdata(struct device *dev,
struct max77693_regulator_data *tmp;
int i, matched = 0;
- np = of_find_node_by_name(dev->parent->of_node, "regulators");
+ np = of_get_child_by_name(dev->parent->of_node, "regulators");
if (!np)
return -EINVAL;
rmatch = devm_kzalloc(dev,
sizeof(*rmatch) * ARRAY_SIZE(regulators), GFP_KERNEL);
- if (!rmatch)
+ if (!rmatch) {
+ of_node_put(np);
return -ENOMEM;
+ }
for (i = 0; i < ARRAY_SIZE(regulators); i++)
rmatch[i].name = regulators[i].name;
matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(regulators));
+ of_node_put(np);
if (matched <= 0)
return matched;
*rdata = devm_kzalloc(dev, sizeof(**rdata) * matched, GFP_KERNEL);
@@ -229,7 +225,6 @@ static int max77693_pmic_init_rdata(struct device *dev,
static int max77693_pmic_probe(struct platform_device *pdev)
{
struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct max77693_pmic_dev *max77693_pmic;
struct max77693_regulator_data *rdata = NULL;
int num_rdata, i;
struct regulator_config config;
@@ -240,39 +235,22 @@ static int max77693_pmic_probe(struct platform_device *pdev)
return -ENODEV;
}
- max77693_pmic = devm_kzalloc(&pdev->dev,
- sizeof(struct max77693_pmic_dev),
- GFP_KERNEL);
- if (!max77693_pmic)
- return -ENOMEM;
-
- max77693_pmic->rdev = devm_kzalloc(&pdev->dev,
- sizeof(struct regulator_dev *) * num_rdata,
- GFP_KERNEL);
- if (!max77693_pmic->rdev)
- return -ENOMEM;
-
- max77693_pmic->dev = &pdev->dev;
- max77693_pmic->iodev = iodev;
- max77693_pmic->num_regulators = num_rdata;
-
config.dev = &pdev->dev;
config.regmap = iodev->regmap;
- config.driver_data = max77693_pmic;
- platform_set_drvdata(pdev, max77693_pmic);
- for (i = 0; i < max77693_pmic->num_regulators; i++) {
+ for (i = 0; i < num_rdata; i++) {
int id = rdata[i].id;
+ struct regulator_dev *rdev;
config.init_data = rdata[i].initdata;
config.of_node = rdata[i].of_node;
- max77693_pmic->rdev[i] = devm_regulator_register(&pdev->dev,
+ rdev = devm_regulator_register(&pdev->dev,
&regulators[id], &config);
- if (IS_ERR(max77693_pmic->rdev[i])) {
- dev_err(max77693_pmic->dev,
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
"Failed to initialize regulator-%d\n", id);
- return PTR_ERR(max77693_pmic->rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 7f049c92ee52..3172da847d24 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -49,7 +49,6 @@
#define MAX8649_RAMP_DOWN (1 << 1)
struct max8649_regulator_info {
- struct regulator_dev *regulator;
struct device *dev;
struct regmap *regmap;
@@ -154,6 +153,7 @@ static int max8649_regulator_probe(struct i2c_client *client,
{
struct max8649_platform_data *pdata = dev_get_platdata(&client->dev);
struct max8649_regulator_info *info = NULL;
+ struct regulator_dev *regulator;
struct regulator_config config = { };
unsigned int val;
unsigned char data;
@@ -234,12 +234,12 @@ static int max8649_regulator_probe(struct i2c_client *client,
config.driver_data = info;
config.regmap = info->regmap;
- info->regulator = devm_regulator_register(&client->dev, &dcdc_desc,
+ regulator = devm_regulator_register(&client->dev, &dcdc_desc,
&config);
- if (IS_ERR(info->regulator)) {
+ if (IS_ERR(regulator)) {
dev_err(info->dev, "failed to register regulator %s\n",
dcdc_desc.name);
- return PTR_ERR(info->regulator);
+ return PTR_ERR(regulator);
}
return 0;
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 8d94d3d7f97f..2fc411188794 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -81,16 +81,17 @@ enum {
struct max8660 {
struct i2c_client *client;
u8 shadow_regs[MAX8660_N_REGS]; /* as chip is write only */
- struct regulator_dev *rdev[];
};
static int max8660_write(struct max8660 *max8660, u8 reg, u8 mask, u8 val)
{
- static const u8 max8660_addresses[MAX8660_N_REGS] =
- { 0x10, 0x12, 0x20, 0x23, 0x24, 0x29, 0x2a, 0x32, 0x33, 0x39, 0x80 };
+ static const u8 max8660_addresses[MAX8660_N_REGS] = {
+ 0x10, 0x12, 0x20, 0x23, 0x24, 0x29, 0x2a, 0x32, 0x33, 0x39, 0x80
+ };
int ret;
u8 reg_val = (max8660->shadow_regs[reg] & mask) | val;
+
dev_vdbg(&max8660->client->dev, "Writing reg %02x with %02x\n",
max8660_addresses[reg], reg_val);
@@ -112,6 +113,7 @@ static int max8660_dcdc_is_enabled(struct regulator_dev *rdev)
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 val = max8660->shadow_regs[MAX8660_OVER1];
u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
+
return !!(val & mask);
}
@@ -119,6 +121,7 @@ static int max8660_dcdc_enable(struct regulator_dev *rdev)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 bit = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
+
return max8660_write(max8660, MAX8660_OVER1, 0xff, bit);
}
@@ -126,15 +129,16 @@ static int max8660_dcdc_disable(struct regulator_dev *rdev)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? ~1 : ~4;
+
return max8660_write(max8660, MAX8660_OVER1, mask, 0);
}
static int max8660_dcdc_get_voltage_sel(struct regulator_dev *rdev)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
-
u8 reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
u8 selector = max8660->shadow_regs[reg];
+
return selector;
}
@@ -207,6 +211,7 @@ static int max8660_ldo67_is_enabled(struct regulator_dev *rdev)
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 val = max8660->shadow_regs[MAX8660_OVER2];
u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
+
return !!(val & mask);
}
@@ -214,6 +219,7 @@ static int max8660_ldo67_enable(struct regulator_dev *rdev)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 bit = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
+
return max8660_write(max8660, MAX8660_OVER2, 0xff, bit);
}
@@ -221,15 +227,16 @@ static int max8660_ldo67_disable(struct regulator_dev *rdev)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? ~2 : ~4;
+
return max8660_write(max8660, MAX8660_OVER2, mask, 0);
}
static int max8660_ldo67_get_voltage_sel(struct regulator_dev *rdev)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
-
u8 shift = (rdev_get_id(rdev) == MAX8660_V6) ? 0 : 4;
u8 selector = (max8660->shadow_regs[MAX8660_L12VCR] >> shift) & 0xf;
+
return selector;
}
@@ -330,7 +337,7 @@ static int max8660_pdata_from_dt(struct device *dev,
struct max8660_subdev_data *sub;
struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)];
- np = of_find_node_by_name(dev->of_node, "regulators");
+ np = of_get_child_by_name(dev->of_node, "regulators");
if (!np) {
dev_err(dev, "missing 'regulators' subnode in DT\n");
return -EINVAL;
@@ -340,6 +347,7 @@ static int max8660_pdata_from_dt(struct device *dev,
rmatch[i].name = max8660_reg[i].name;
matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(rmatch));
+ of_node_put(np);
if (matched <= 0)
return matched;
@@ -373,7 +381,6 @@ static inline int max8660_pdata_from_dt(struct device *dev,
static int max8660_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
- struct regulator_dev **rdev;
struct device *dev = &client->dev;
struct max8660_platform_data *pdata = dev_get_platdata(dev);
struct regulator_config config = { };
@@ -406,14 +413,11 @@ static int max8660_probe(struct i2c_client *client,
return -EINVAL;
}
- max8660 = devm_kzalloc(dev, sizeof(struct max8660) +
- sizeof(struct regulator_dev *) * MAX8660_V_END,
- GFP_KERNEL);
+ max8660 = devm_kzalloc(dev, sizeof(struct max8660), GFP_KERNEL);
if (!max8660)
return -ENOMEM;
max8660->client = client;
- rdev = max8660->rdev;
if (pdata->en34_is_high) {
/* Simulate always on */
@@ -481,6 +485,7 @@ static int max8660_probe(struct i2c_client *client,
/* Finally register devices */
for (i = 0; i < pdata->num_subdevs; i++) {
+ struct regulator_dev *rdev;
id = pdata->subdevs[i].id;
@@ -489,13 +494,13 @@ static int max8660_probe(struct i2c_client *client,
config.of_node = of_node[i];
config.driver_data = max8660;
- rdev[i] = devm_regulator_register(&client->dev,
+ rdev = devm_regulator_register(&client->dev,
&max8660_reg[id], &config);
- if (IS_ERR(rdev[i])) {
- ret = PTR_ERR(rdev[i]);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
dev_err(&client->dev, "failed to register %s\n",
max8660_reg[id].name);
- return PTR_ERR(rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index 0c5fe6c6ac26..9623e9e290bf 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -34,7 +34,6 @@
struct max8907_regulator {
struct regulator_desc desc[MAX8907_NUM_REGULATORS];
- struct regulator_dev *rdev[MAX8907_NUM_REGULATORS];
};
#define REG_MBATT() \
@@ -231,7 +230,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev)
if (!np)
return 0;
- regulators = of_find_node_by_name(np, "regulators");
+ regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
dev_err(&pdev->dev, "regulators node not found\n");
return -EINVAL;
@@ -292,10 +291,9 @@ static int max8907_regulator_probe(struct platform_device *pdev)
return ret;
pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
- if (!pmic) {
- dev_err(&pdev->dev, "Failed to alloc pmic\n");
+ if (!pmic)
return -ENOMEM;
- }
+
platform_set_drvdata(pdev, pmic);
memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc));
@@ -311,6 +309,8 @@ static int max8907_regulator_probe(struct platform_device *pdev)
}
for (i = 0; i < MAX8907_NUM_REGULATORS; i++) {
+ struct regulator_dev *rdev;
+
config.dev = pdev->dev.parent;
if (pdata)
idata = pdata->init_data[i];
@@ -350,13 +350,13 @@ static int max8907_regulator_probe(struct platform_device *pdev)
pmic->desc[i].ops = &max8907_out5v_hwctl_ops;
}
- pmic->rdev[i] = devm_regulator_register(&pdev->dev,
+ rdev = devm_regulator_register(&pdev->dev,
&pmic->desc[i], &config);
- if (IS_ERR(pmic->rdev[i])) {
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"failed to register %s regulator\n",
pmic->desc[i].name);
- return PTR_ERR(pmic->rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 759510789e71..dad2bcd14e96 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -36,9 +36,7 @@
struct max8925_regulator_info {
struct regulator_desc desc;
- struct regulator_dev *regulator;
struct i2c_client *i2c;
- struct max8925_chip *chip;
int vol_reg;
int enable_reg;
@@ -251,10 +249,11 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
{
struct device_node *nproot, *np;
int rcount;
+
nproot = of_node_get(pdev->dev.parent->of_node);
if (!nproot)
return -ENODEV;
- np = of_find_node_by_name(nproot, "regulators");
+ np = of_get_child_by_name(nproot, "regulators");
if (!np) {
dev_err(&pdev->dev, "failed to find regulators node\n");
return -ENODEV;
@@ -264,7 +263,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
&max8925_regulator_matches[ridx], 1);
of_node_put(np);
if (rcount < 0)
- return -ENODEV;
+ return rcount;
config->init_data = max8925_regulator_matches[ridx].init_data;
config->of_node = max8925_regulator_matches[ridx].of_node;
@@ -303,7 +302,6 @@ static int max8925_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
ri->i2c = chip->i2c;
- ri->chip = chip;
config.dev = &pdev->dev;
config.driver_data = ri;
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 788e5ae2af1b..d920f5a32ec8 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -48,9 +48,7 @@ enum {
struct max8952_data {
struct i2c_client *client;
- struct device *dev;
struct max8952_platform_data *pdata;
- struct regulator_dev *rdev;
bool vid0;
bool vid1;
@@ -59,6 +57,7 @@ struct max8952_data {
static int max8952_read_reg(struct max8952_data *max8952, u8 reg)
{
int ret = i2c_smbus_read_byte_data(max8952->client, reg);
+
if (ret > 0)
ret &= 0xff;
@@ -144,10 +143,8 @@ static struct max8952_platform_data *max8952_parse_dt(struct device *dev)
int i;
pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- dev_err(dev, "Failed to allocate platform data\n");
+ if (!pd)
return NULL;
- }
pd->gpio_vid0 = of_get_named_gpio(np, "max8952,vid-gpios", 0);
pd->gpio_vid1 = of_get_named_gpio(np, "max8952,vid-gpios", 1);
@@ -199,6 +196,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
struct max8952_platform_data *pdata = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct max8952_data *max8952;
+ struct regulator_dev *rdev;
int ret = 0, err = 0;
@@ -219,10 +217,9 @@ static int max8952_pmic_probe(struct i2c_client *client,
return -ENOMEM;
max8952->client = client;
- max8952->dev = &client->dev;
max8952->pdata = pdata;
- config.dev = max8952->dev;
+ config.dev = &client->dev;
config.init_data = pdata->reg_data;
config.driver_data = max8952;
config.of_node = client->dev.of_node;
@@ -231,11 +228,11 @@ static int max8952_pmic_probe(struct i2c_client *client,
if (pdata->reg_data->constraints.boot_on)
config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
- max8952->rdev = regulator_register(&regulator, &config);
+ rdev = devm_regulator_register(&client->dev, &regulator, &config);
- if (IS_ERR(max8952->rdev)) {
- ret = PTR_ERR(max8952->rdev);
- dev_err(max8952->dev, "regulator init failed (%d)\n", ret);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&client->dev, "regulator init failed (%d)\n", ret);
return ret;
}
@@ -263,7 +260,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
err = 3;
if (err) {
- dev_warn(max8952->dev, "VID0/1 gpio invalid: "
+ dev_warn(&client->dev, "VID0/1 gpio invalid: "
"DVS not available.\n");
max8952->vid0 = 0;
max8952->vid1 = 0;
@@ -274,7 +271,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
/* Disable Pulldown of EN only */
max8952_write_reg(max8952, MAX8952_REG_CONTROL, 0x60);
- dev_err(max8952->dev, "DVS modes disabled because VID0 and VID1"
+ dev_err(&client->dev, "DVS modes disabled because VID0 and VID1"
" do not have proper controls.\n");
} else {
/*
@@ -321,9 +318,6 @@ static int max8952_pmic_remove(struct i2c_client *client)
{
struct max8952_data *max8952 = i2c_get_clientdata(client);
struct max8952_platform_data *pdata = max8952->pdata;
- struct regulator_dev *rdev = max8952->rdev;
-
- regulator_unregister(rdev);
gpio_free(pdata->gpio_vid0);
gpio_free(pdata->gpio_vid1);
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 892aa1e5b96c..dbedf1768db0 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -93,7 +93,6 @@
struct max8973_chip {
struct device *dev;
struct regulator_desc desc;
- struct regulator_dev *rdev;
struct regmap *regmap;
bool enable_external_control;
int dvs_gpio;
@@ -379,10 +378,8 @@ static int max8973_probe(struct i2c_client *client,
}
max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL);
- if (!max) {
- dev_err(&client->dev, "Memory allocation for max failed\n");
+ if (!max)
return -ENOMEM;
- }
max->regmap = devm_regmap_init_i2c(client, &max8973_regmap_config);
if (IS_ERR(max->regmap)) {
@@ -474,7 +471,6 @@ static int max8973_probe(struct i2c_client *client,
return ret;
}
- max->rdev = rdev;
return 0;
}
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 2d618fc9c1af..90b4c530dee5 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -38,7 +38,6 @@ struct max8997_data {
struct device *dev;
struct max8997_dev *iodev;
int num_regulators;
- struct regulator_dev **rdev;
int ramp_delay; /* in mV/us */
bool buck1_gpiodvs;
@@ -924,7 +923,7 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
return -ENODEV;
}
- regulators_np = of_find_node_by_name(pmic_np, "regulators");
+ regulators_np = of_get_child_by_name(pmic_np, "regulators");
if (!regulators_np) {
dev_err(&pdev->dev, "could not find regulators sub-node\n");
return -EINVAL;
@@ -937,7 +936,6 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
pdata->num_regulators, GFP_KERNEL);
if (!rdata) {
of_node_put(regulators_np);
- dev_err(&pdev->dev, "could not allocate memory for regulator data\n");
return -ENOMEM;
}
@@ -1030,10 +1028,10 @@ static int max8997_pmic_probe(struct platform_device *pdev)
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max8997_platform_data *pdata = iodev->pdata;
struct regulator_config config = { };
- struct regulator_dev **rdev;
+ struct regulator_dev *rdev;
struct max8997_data *max8997;
struct i2c_client *i2c;
- int i, ret, size, nr_dvs;
+ int i, ret, nr_dvs;
u8 max_buck1 = 0, max_buck2 = 0, max_buck5 = 0;
if (!pdata) {
@@ -1052,12 +1050,6 @@ static int max8997_pmic_probe(struct platform_device *pdev)
if (!max8997)
return -ENOMEM;
- size = sizeof(struct regulator_dev *) * pdata->num_regulators;
- max8997->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!max8997->rdev)
- return -ENOMEM;
-
- rdev = max8997->rdev;
max8997->dev = &pdev->dev;
max8997->iodev = iodev;
max8997->num_regulators = pdata->num_regulators;
@@ -1205,12 +1197,12 @@ static int max8997_pmic_probe(struct platform_device *pdev)
config.driver_data = max8997;
config.of_node = pdata->regulators[i].reg_node;
- rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
- &config);
- if (IS_ERR(rdev[i])) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[id],
+ &config);
+ if (IS_ERR(rdev)) {
dev_err(max8997->dev, "regulator init failed for %d\n",
id);
- return PTR_ERR(rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index ae3f0656feb0..961091b46557 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -40,7 +40,6 @@ struct max8998_data {
struct device *dev;
struct max8998_dev *iodev;
int num_regulators;
- struct regulator_dev **rdev;
u8 buck1_vol[4]; /* voltages for selection */
u8 buck2_vol[2];
unsigned int buck1_idx; /* index to last changed voltage */
@@ -674,8 +673,10 @@ static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev,
rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
pdata->num_regulators, GFP_KERNEL);
- if (!rdata)
+ if (!rdata) {
+ of_node_put(regulators_np);
return -ENOMEM;
+ }
pdata->regulators = rdata;
for (i = 0; i < ARRAY_SIZE(regulators); ++i) {
@@ -692,6 +693,9 @@ static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev,
}
pdata->num_regulators = rdata - pdata->regulators;
+ of_node_put(reg_np);
+ of_node_put(regulators_np);
+
ret = max8998_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
if (ret)
return -EINVAL;
@@ -741,10 +745,10 @@ static int max8998_pmic_probe(struct platform_device *pdev)
struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max8998_platform_data *pdata = iodev->pdata;
struct regulator_config config = { };
- struct regulator_dev **rdev;
+ struct regulator_dev *rdev;
struct max8998_data *max8998;
struct i2c_client *i2c;
- int i, ret, size;
+ int i, ret;
unsigned int v;
if (!pdata) {
@@ -763,12 +767,6 @@ static int max8998_pmic_probe(struct platform_device *pdev)
if (!max8998)
return -ENOMEM;
- size = sizeof(struct regulator_dev *) * pdata->num_regulators;
- max8998->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!max8998->rdev)
- return -ENOMEM;
-
- rdev = max8998->rdev;
max8998->dev = &pdev->dev;
max8998->iodev = iodev;
max8998->num_regulators = pdata->num_regulators;
@@ -872,13 +870,12 @@ static int max8998_pmic_probe(struct platform_device *pdev)
config.init_data = pdata->regulators[i].initdata;
config.driver_data = max8998;
- rdev[i] = devm_regulator_register(&pdev->dev,
- &regulators[index], &config);
- if (IS_ERR(rdev[i])) {
- ret = PTR_ERR(rdev[i]);
+ rdev = devm_regulator_register(&pdev->dev, &regulators[index],
+ &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
dev_err(max8998->dev, "regulator %s init failed (%d)\n",
regulators[index].name, ret);
- rdev[i] = NULL;
return ret;
}
}
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index da4859282302..05b971726ffa 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -167,8 +167,10 @@ int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
struct device_node *parent;
int num;
- of_node_get(pdev->dev.parent->of_node);
- parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!pdev->dev.parent->of_node)
+ return -ENODEV;
+
+ parent = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
if (!parent)
return -ENODEV;
@@ -187,8 +189,10 @@ struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
struct device_node *parent, *child;
int i, parsed = 0;
- of_node_get(pdev->dev.parent->of_node);
- parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!pdev->dev.parent->of_node)
+ return NULL;
+
+ parent = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
if (!parent)
return NULL;
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
new file mode 100644
index 000000000000..ded3b3574209
--- /dev/null
+++ b/drivers/regulator/pbias-regulator.c
@@ -0,0 +1,255 @@
+/*
+ * pbias-regulator.c
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Balaji T K <balajitk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+struct pbias_reg_info {
+ u32 enable;
+ u32 enable_mask;
+ u32 vmode;
+ unsigned int enable_time;
+ char *name;
+};
+
+struct pbias_regulator_data {
+ struct regulator_desc desc;
+ void __iomem *pbias_addr;
+ unsigned int pbias_reg;
+ struct regulator_dev *dev;
+ struct regmap *syscon;
+ const struct pbias_reg_info *info;
+ int voltage;
+};
+
+static int pbias_regulator_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct pbias_regulator_data *data = rdev_get_drvdata(dev);
+ const struct pbias_reg_info *info = data->info;
+ int ret, vmode;
+
+ if (min_uV <= 1800000)
+ vmode = 0;
+ else if (min_uV > 1800000)
+ vmode = info->vmode;
+
+ ret = regmap_update_bits(data->syscon, data->pbias_reg,
+ info->vmode, vmode);
+
+ return ret;
+}
+
+static int pbias_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
+ const struct pbias_reg_info *info = data->info;
+ int value, voltage;
+
+ regmap_read(data->syscon, data->pbias_reg, &value);
+ value &= info->vmode;
+
+ voltage = value ? 3000000 : 1800000;
+
+ return voltage;
+}
+
+static int pbias_regulator_enable(struct regulator_dev *rdev)
+{
+ struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
+ const struct pbias_reg_info *info = data->info;
+ int ret;
+
+ ret = regmap_update_bits(data->syscon, data->pbias_reg,
+ info->enable_mask, info->enable);
+
+ return ret;
+}
+
+static int pbias_regulator_disable(struct regulator_dev *rdev)
+{
+ struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
+ const struct pbias_reg_info *info = data->info;
+ int ret;
+
+ ret = regmap_update_bits(data->syscon, data->pbias_reg,
+ info->enable_mask, 0);
+ return ret;
+}
+
+static int pbias_regulator_is_enable(struct regulator_dev *rdev)
+{
+ struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
+ const struct pbias_reg_info *info = data->info;
+ int value;
+
+ regmap_read(data->syscon, data->pbias_reg, &value);
+
+ return (value & info->enable_mask) == info->enable_mask;
+}
+
+static struct regulator_ops pbias_regulator_voltage_ops = {
+ .set_voltage = pbias_regulator_set_voltage,
+ .get_voltage = pbias_regulator_get_voltage,
+ .enable = pbias_regulator_enable,
+ .disable = pbias_regulator_disable,
+ .is_enabled = pbias_regulator_is_enable,
+};
+
+static const struct pbias_reg_info pbias_mmc_omap2430 = {
+ .enable = BIT(1),
+ .enable_mask = BIT(1),
+ .vmode = BIT(0),
+ .enable_time = 100,
+ .name = "pbias_mmc_omap2430"
+};
+
+static const struct pbias_reg_info pbias_sim_omap3 = {
+ .enable = BIT(9),
+ .enable_mask = BIT(9),
+ .vmode = BIT(8),
+ .enable_time = 100,
+ .name = "pbias_sim_omap3"
+};
+
+static const struct pbias_reg_info pbias_mmc_omap4 = {
+ .enable = BIT(26) | BIT(22),
+ .enable_mask = BIT(26) | BIT(25) | BIT(22),
+ .vmode = BIT(21),
+ .enable_time = 100,
+ .name = "pbias_mmc_omap4"
+};
+
+static const struct pbias_reg_info pbias_mmc_omap5 = {
+ .enable = BIT(27) | BIT(26),
+ .enable_mask = BIT(27) | BIT(25) | BIT(26),
+ .vmode = BIT(21),
+ .enable_time = 100,
+ .name = "pbias_mmc_omap5"
+};
+
+static struct of_regulator_match pbias_matches[] = {
+ { .name = "pbias_mmc_omap2430", .driver_data = (void *)&pbias_mmc_omap2430},
+ { .name = "pbias_sim_omap3", .driver_data = (void *)&pbias_sim_omap3},
+ { .name = "pbias_mmc_omap4", .driver_data = (void *)&pbias_mmc_omap4},
+ { .name = "pbias_mmc_omap5", .driver_data = (void *)&pbias_mmc_omap5},
+};
+#define PBIAS_NUM_REGS ARRAY_SIZE(pbias_matches)
+
+static const struct of_device_id pbias_of_match[] = {
+ { .compatible = "ti,pbias-omap", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pbias_of_match);
+
+static int pbias_regulator_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct pbias_regulator_data *drvdata;
+ struct resource *res;
+ struct regulator_config cfg = { };
+ struct regmap *syscon;
+ const struct pbias_reg_info *info;
+ int ret = 0;
+ int count, idx, data_idx = 0;
+
+ count = of_regulator_match(&pdev->dev, np, pbias_matches,
+ PBIAS_NUM_REGS);
+ if (count < 0)
+ return count;
+
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(struct pbias_regulator_data)
+ * count, GFP_KERNEL);
+ if (drvdata == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate device data\n");
+ return -ENOMEM;
+ }
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
+ if (IS_ERR(syscon))
+ return PTR_ERR(syscon);
+
+ cfg.dev = &pdev->dev;
+
+ for (idx = 0; idx < PBIAS_NUM_REGS && data_idx < count; idx++) {
+ if (!pbias_matches[idx].init_data ||
+ !pbias_matches[idx].of_node)
+ continue;
+
+ info = pbias_matches[idx].driver_data;
+ if (!info)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ drvdata[data_idx].pbias_reg = res->start;
+ drvdata[data_idx].syscon = syscon;
+ drvdata[data_idx].info = info;
+ drvdata[data_idx].desc.name = info->name;
+ drvdata[data_idx].desc.owner = THIS_MODULE;
+ drvdata[data_idx].desc.type = REGULATOR_VOLTAGE;
+ drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops;
+ drvdata[data_idx].desc.n_voltages = 2;
+ drvdata[data_idx].desc.enable_time = info->enable_time;
+
+ cfg.init_data = pbias_matches[idx].init_data;
+ cfg.driver_data = &drvdata[data_idx];
+ cfg.of_node = pbias_matches[idx].of_node;
+
+ drvdata[data_idx].dev = devm_regulator_register(&pdev->dev,
+ &drvdata[data_idx].desc, &cfg);
+ if (IS_ERR(drvdata[data_idx].dev)) {
+ ret = PTR_ERR(drvdata[data_idx].dev);
+ dev_err(&pdev->dev,
+ "Failed to register regulator: %d\n", ret);
+ goto err_regulator;
+ }
+ data_idx++;
+ }
+
+ platform_set_drvdata(pdev, drvdata);
+
+err_regulator:
+ return ret;
+}
+
+static struct platform_driver pbias_regulator_driver = {
+ .probe = pbias_regulator_probe,
+ .driver = {
+ .name = "pbias-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pbias_of_match),
+ },
+};
+
+module_platform_driver(pbias_regulator_driver);
+
+MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
+MODULE_DESCRIPTION("pbias voltage regulator");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pbias-regulator");
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index ab174f20ca11..67e678c4301c 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -56,6 +56,8 @@
#define PFUZE100_VGEN5VOL 0x70
#define PFUZE100_VGEN6VOL 0x71
+enum chips { PFUZE100, PFUZE200 };
+
struct pfuze_regulator {
struct regulator_desc desc;
unsigned char stby_reg;
@@ -63,6 +65,7 @@ struct pfuze_regulator {
};
struct pfuze_chip {
+ int chip_id;
struct regmap *regmap;
struct device *dev;
struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR];
@@ -78,21 +81,23 @@ static const int pfuze100_vsnvs[] = {
};
static const struct i2c_device_id pfuze_device_id[] = {
- {.name = "pfuze100"},
- {},
+ {.name = "pfuze100", .driver_data = PFUZE100},
+ {.name = "pfuze200", .driver_data = PFUZE200},
+ { }
};
MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
static const struct of_device_id pfuze_dt_ids[] = {
- { .compatible = "fsl,pfuze100" },
- {},
+ { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100},
+ { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200},
+ { }
};
MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
struct pfuze_chip *pfuze100 = rdev_get_drvdata(rdev);
- int id = rdev->desc->id;
+ int id = rdev_get_id(rdev);
unsigned int ramp_bits;
int ret;
@@ -139,14 +144,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
};
-#define PFUZE100_FIXED_REG(_name, base, voltage) \
- [PFUZE100_ ## _name] = { \
+#define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \
+ [_chip ## _ ## _name] = { \
.desc = { \
.name = #_name, \
.n_voltages = 1, \
.ops = &pfuze100_fixed_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
- .id = PFUZE100_ ## _name, \
+ .id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.min_uV = (voltage), \
.enable_reg = (base), \
@@ -154,14 +159,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
}, \
}
-#define PFUZE100_SW_REG(_name, base, min, max, step) \
- [PFUZE100_ ## _name] = { \
+#define PFUZE100_SW_REG(_chip, _name, base, min, max, step) \
+ [_chip ## _ ## _name] = { \
.desc = { \
.name = #_name,\
.n_voltages = ((max) - (min)) / (step) + 1, \
.ops = &pfuze100_sw_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
- .id = PFUZE100_ ## _name, \
+ .id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.min_uV = (min), \
.uV_step = (step), \
@@ -172,14 +177,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
.stby_mask = 0x3f, \
}
-#define PFUZE100_SWB_REG(_name, base, mask, voltages) \
- [PFUZE100_ ## _name] = { \
+#define PFUZE100_SWB_REG(_chip, _name, base, mask, voltages) \
+ [_chip ## _ ## _name] = { \
.desc = { \
.name = #_name, \
.n_voltages = ARRAY_SIZE(voltages), \
.ops = &pfuze100_swb_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
- .id = PFUZE100_ ## _name, \
+ .id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.volt_table = voltages, \
.vsel_reg = (base), \
@@ -187,14 +192,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
}, \
}
-#define PFUZE100_VGEN_REG(_name, base, min, max, step) \
- [PFUZE100_ ## _name] = { \
+#define PFUZE100_VGEN_REG(_chip, _name, base, min, max, step) \
+ [_chip ## _ ## _name] = { \
.desc = { \
.name = #_name, \
.n_voltages = ((max) - (min)) / (step) + 1, \
.ops = &pfuze100_ldo_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
- .id = PFUZE100_ ## _name, \
+ .id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.min_uV = (min), \
.uV_step = (step), \
@@ -207,25 +212,45 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
.stby_mask = 0x20, \
}
+/* PFUZE100 */
static struct pfuze_regulator pfuze100_regulators[] = {
- PFUZE100_SW_REG(SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
- PFUZE100_SW_REG(SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000),
- PFUZE100_SW_REG(SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
- PFUZE100_SW_REG(SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
- PFUZE100_SW_REG(SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
- PFUZE100_SW_REG(SW4, PFUZE100_SW4VOL, 400000, 1975000, 25000),
- PFUZE100_SWB_REG(SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
- PFUZE100_SWB_REG(VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
- PFUZE100_FIXED_REG(VREFDDR, PFUZE100_VREFDDRCON, 750000),
- PFUZE100_VGEN_REG(VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
- PFUZE100_VGEN_REG(VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
- PFUZE100_VGEN_REG(VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
- PFUZE100_VGEN_REG(VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
- PFUZE100_VGEN_REG(VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
- PFUZE100_VGEN_REG(VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+ PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
+ PFUZE100_SW_REG(PFUZE100, SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000),
+ PFUZE100_SW_REG(PFUZE100, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(PFUZE100, SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(PFUZE100, SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(PFUZE100, SW4, PFUZE100_SW4VOL, 400000, 1975000, 25000),
+ PFUZE100_SWB_REG(PFUZE100, SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
+ PFUZE100_SWB_REG(PFUZE100, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+ PFUZE100_FIXED_REG(PFUZE100, VREFDDR, PFUZE100_VREFDDRCON, 750000),
+ PFUZE100_VGEN_REG(PFUZE100, VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
+ PFUZE100_VGEN_REG(PFUZE100, VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+ PFUZE100_VGEN_REG(PFUZE100, VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE100, VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE100, VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE100, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+};
+
+static struct pfuze_regulator pfuze200_regulators[] = {
+ PFUZE100_SW_REG(PFUZE200, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
+ PFUZE100_SW_REG(PFUZE200, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(PFUZE200, SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(PFUZE200, SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
+ PFUZE100_SWB_REG(PFUZE200, SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
+ PFUZE100_SWB_REG(PFUZE200, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+ PFUZE100_FIXED_REG(PFUZE200, VREFDDR, PFUZE100_VREFDDRCON, 750000),
+ PFUZE100_VGEN_REG(PFUZE200, VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
+ PFUZE100_VGEN_REG(PFUZE200, VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+ PFUZE100_VGEN_REG(PFUZE200, VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE200, VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE200, VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
};
+static struct pfuze_regulator *pfuze_regulators;
+
#ifdef CONFIG_OF
+/* PFUZE100 */
static struct of_regulator_match pfuze100_matches[] = {
{ .name = "sw1ab", },
{ .name = "sw1c", },
@@ -244,24 +269,56 @@ static struct of_regulator_match pfuze100_matches[] = {
{ .name = "vgen6", },
};
+/* PFUZE200 */
+static struct of_regulator_match pfuze200_matches[] = {
+
+ { .name = "sw1ab", },
+ { .name = "sw2", },
+ { .name = "sw3a", },
+ { .name = "sw3b", },
+ { .name = "swbst", },
+ { .name = "vsnvs", },
+ { .name = "vrefddr", },
+ { .name = "vgen1", },
+ { .name = "vgen2", },
+ { .name = "vgen3", },
+ { .name = "vgen4", },
+ { .name = "vgen5", },
+ { .name = "vgen6", },
+};
+
+static struct of_regulator_match *pfuze_matches;
+
static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
{
struct device *dev = chip->dev;
struct device_node *np, *parent;
int ret;
- np = of_node_get(dev->parent->of_node);
+ np = of_node_get(dev->of_node);
if (!np)
- return 0;
+ return -EINVAL;
- parent = of_find_node_by_name(np, "regulators");
+ parent = of_get_child_by_name(np, "regulators");
if (!parent) {
dev_err(dev, "regulators node not found\n");
return -EINVAL;
}
- ret = of_regulator_match(dev, parent, pfuze100_matches,
- ARRAY_SIZE(pfuze100_matches));
+ switch (chip->chip_id) {
+ case PFUZE200:
+ pfuze_matches = pfuze200_matches;
+ ret = of_regulator_match(dev, parent, pfuze200_matches,
+ ARRAY_SIZE(pfuze200_matches));
+ break;
+
+ case PFUZE100:
+ default:
+ pfuze_matches = pfuze100_matches;
+ ret = of_regulator_match(dev, parent, pfuze100_matches,
+ ARRAY_SIZE(pfuze100_matches));
+ break;
+ }
of_node_put(parent);
if (ret < 0) {
@@ -275,12 +332,12 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
static inline struct regulator_init_data *match_init_data(int index)
{
- return pfuze100_matches[index].init_data;
+ return pfuze_matches[index].init_data;
}
static inline struct device_node *match_of_node(int index)
{
- return pfuze100_matches[index].of_node;
+ return pfuze_matches[index].of_node;
}
#else
static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
@@ -308,16 +365,14 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
if (ret)
return ret;
- switch (value & 0x0f) {
- /*
- * Freescale misprogrammed 1-3% of parts prior to week 8 of 2013
- * as ID=8
- */
- case 0x8:
+ if (((value & 0x0f) == 0x8) && (pfuze_chip->chip_id == PFUZE100)) {
+ /*
+ * Freescale misprogrammed 1-3% of parts prior to week 8 of 2013
+ * as ID=8 in PFUZE100
+ */
dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
- case 0x0:
- break;
- default:
+ } else if ((value & 0x0f) != pfuze_chip->chip_id) {
+ /* device id NOT match with your setting */
dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
return -ENODEV;
}
@@ -353,17 +408,31 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
dev_get_platdata(&client->dev);
struct regulator_config config = { };
int i, ret;
+ const struct of_device_id *match;
+ u32 regulator_num;
+ u32 sw_check_start, sw_check_end;
pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip),
GFP_KERNEL);
if (!pfuze_chip)
return -ENOMEM;
- i2c_set_clientdata(client, pfuze_chip);
-
- memcpy(pfuze_chip->regulator_descs, pfuze100_regulators,
- sizeof(pfuze_chip->regulator_descs));
+ if (client->dev.of_node) {
+ match = of_match_device(of_match_ptr(pfuze_dt_ids),
+ &client->dev);
+ if (!match) {
+ dev_err(&client->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ pfuze_chip->chip_id = (int)(long)match->data;
+ } else if (id) {
+ pfuze_chip->chip_id = id->driver_data;
+ } else {
+ dev_err(&client->dev, "No dts match or id table match found\n");
+ return -ENODEV;
+ }
+ i2c_set_clientdata(client, pfuze_chip);
pfuze_chip->dev = &client->dev;
pfuze_chip->regmap = devm_regmap_init_i2c(client, &pfuze_regmap_config);
@@ -380,11 +449,34 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
return ret;
}
+ /* use the right regulators after identify the right device */
+ switch (pfuze_chip->chip_id) {
+ case PFUZE200:
+ pfuze_regulators = pfuze200_regulators;
+ regulator_num = ARRAY_SIZE(pfuze200_regulators);
+ sw_check_start = PFUZE200_SW2;
+ sw_check_end = PFUZE200_SW3B;
+ break;
+
+ case PFUZE100:
+ default:
+ pfuze_regulators = pfuze100_regulators;
+ regulator_num = ARRAY_SIZE(pfuze100_regulators);
+ sw_check_start = PFUZE100_SW2;
+ sw_check_end = PFUZE100_SW4;
+ break;
+ }
+ dev_info(&client->dev, "pfuze%s found.\n",
+ (pfuze_chip->chip_id == PFUZE100) ? "100" : "200");
+
+ memcpy(pfuze_chip->regulator_descs, pfuze_regulators,
+ sizeof(pfuze_chip->regulator_descs));
+
ret = pfuze_parse_regulators_dt(pfuze_chip);
if (ret)
return ret;
- for (i = 0; i < PFUZE100_MAX_REGULATOR; i++) {
+ for (i = 0; i < regulator_num; i++) {
struct regulator_init_data *init_data;
struct regulator_desc *desc;
int val;
@@ -397,7 +489,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
init_data = match_init_data(i);
/* SW2~SW4 high bit check and modify the voltage value table */
- if (i > PFUZE100_SW1C && i < PFUZE100_SWBST) {
+ if (i >= sw_check_start && i <= sw_check_end) {
regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
if (val & 0x40) {
desc->min_uV = 800000;
@@ -415,7 +507,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
devm_regulator_register(&client->dev, desc, &config);
if (IS_ERR(pfuze_chip->regulators[i])) {
dev_err(&client->dev, "register regulator%s failed\n",
- pfuze100_regulators[i].desc.name);
+ pfuze_regulators[i].desc.name);
return PTR_ERR(pfuze_chip->regulators[i]);
}
}
@@ -435,6 +527,6 @@ static struct i2c_driver pfuze_driver = {
module_i2c_driver(pfuze_driver);
MODULE_AUTHOR("Robin Gong <b38343@freescale.com>");
-MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100 PMIC");
+MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/PFUZE200 PMIC");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("i2c:pfuze100-regulator");
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index b58affb33143..4c414ae109ae 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -119,7 +119,6 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent);
struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev);
- struct regulator_init_data *reg_data;
struct regulator_config config = { };
struct rc5t583_regulator *reg = NULL;
struct rc5t583_regulator *regs;
@@ -135,19 +134,11 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
regs = devm_kzalloc(&pdev->dev, RC5T583_REGULATOR_MAX *
sizeof(struct rc5t583_regulator), GFP_KERNEL);
- if (!regs) {
- dev_err(&pdev->dev, "Memory allocation failed exiting..\n");
+ if (!regs)
return -ENOMEM;
- }
for (id = 0; id < RC5T583_REGULATOR_MAX; ++id) {
- reg_data = pdata->reg_init_data[id];
-
- /* No need to register if there is no regulator data */
- if (!reg_data)
- continue;
-
reg = &regs[id];
ri = &rc5t583_reg_info[id];
reg->reg_info = ri;
@@ -169,7 +160,7 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
skip_ext_pwr_config:
config.dev = &pdev->dev;
- config.init_data = reg_data;
+ config.init_data = pdata->reg_init_data[id];
config.driver_data = reg;
config.regmap = rc5t583->regmap;
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
new file mode 100644
index 000000000000..f19a30f0fb42
--- /dev/null
+++ b/drivers/regulator/s2mpa01.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/s2mpa01.h>
+
+#define S2MPA01_REGULATOR_CNT ARRAY_SIZE(regulators)
+
+struct s2mpa01_info {
+ int ramp_delay24;
+ int ramp_delay3;
+ int ramp_delay5;
+ int ramp_delay16;
+ int ramp_delay7;
+ int ramp_delay8910;
+};
+
+static int get_ramp_delay(int ramp_delay)
+{
+ unsigned char cnt = 0;
+
+ ramp_delay /= 6250;
+
+ while (true) {
+ ramp_delay = ramp_delay >> 1;
+ if (ramp_delay == 0)
+ break;
+ cnt++;
+ }
+
+ if (cnt > 3)
+ cnt = 3;
+
+ return cnt;
+}
+
+static int s2mpa01_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector,
+ unsigned int new_selector)
+{
+ struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev);
+ unsigned int ramp_delay = 0;
+ int old_volt, new_volt;
+
+ switch (rdev->desc->id) {
+ case S2MPA01_BUCK2:
+ case S2MPA01_BUCK4:
+ ramp_delay = s2mpa01->ramp_delay24;
+ break;
+ case S2MPA01_BUCK3:
+ ramp_delay = s2mpa01->ramp_delay3;
+ break;
+ case S2MPA01_BUCK5:
+ ramp_delay = s2mpa01->ramp_delay5;
+ break;
+ case S2MPA01_BUCK1:
+ case S2MPA01_BUCK6:
+ ramp_delay = s2mpa01->ramp_delay16;
+ break;
+ case S2MPA01_BUCK7:
+ ramp_delay = s2mpa01->ramp_delay7;
+ break;
+ case S2MPA01_BUCK8:
+ case S2MPA01_BUCK9:
+ case S2MPA01_BUCK10:
+ ramp_delay = s2mpa01->ramp_delay8910;
+ break;
+ }
+
+ if (ramp_delay == 0)
+ ramp_delay = rdev->desc->ramp_delay;
+
+ old_volt = rdev->desc->min_uV + (rdev->desc->uV_step * old_selector);
+ new_volt = rdev->desc->min_uV + (rdev->desc->uV_step * new_selector);
+
+ return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay);
+}
+
+static int s2mpa01_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev);
+ unsigned int ramp_val, ramp_shift, ramp_reg = S2MPA01_REG_RAMP2;
+ unsigned int ramp_enable = 1, enable_shift = 0;
+ int ret;
+
+ switch (rdev->desc->id) {
+ case S2MPA01_BUCK1:
+ enable_shift = S2MPA01_BUCK1_RAMP_EN_SHIFT;
+ if (!ramp_delay) {
+ ramp_enable = 0;
+ break;
+ }
+
+ if (ramp_delay > s2mpa01->ramp_delay16)
+ s2mpa01->ramp_delay16 = ramp_delay;
+ else
+ ramp_delay = s2mpa01->ramp_delay16;
+
+ ramp_shift = S2MPA01_BUCK16_RAMP_SHIFT;
+ ramp_reg = S2MPA01_REG_RAMP1;
+ break;
+ case S2MPA01_BUCK2:
+ enable_shift = S2MPA01_BUCK2_RAMP_EN_SHIFT;
+ if (!ramp_delay) {
+ ramp_enable = 0;
+ break;
+ }
+
+ if (ramp_delay > s2mpa01->ramp_delay24)
+ s2mpa01->ramp_delay24 = ramp_delay;
+ else
+ ramp_delay = s2mpa01->ramp_delay24;
+
+ ramp_shift = S2MPA01_BUCK24_RAMP_SHIFT;
+ ramp_reg = S2MPA01_REG_RAMP1;
+ break;
+ case S2MPA01_BUCK3:
+ enable_shift = S2MPA01_BUCK3_RAMP_EN_SHIFT;
+ if (!ramp_delay) {
+ ramp_enable = 0;
+ break;
+ }
+
+ s2mpa01->ramp_delay3 = ramp_delay;
+ ramp_shift = S2MPA01_BUCK3_RAMP_SHIFT;
+ ramp_reg = S2MPA01_REG_RAMP1;
+ break;
+ case S2MPA01_BUCK4:
+ enable_shift = S2MPA01_BUCK4_RAMP_EN_SHIFT;
+ if (!ramp_delay) {
+ ramp_enable = 0;
+ break;
+ }
+
+ if (ramp_delay > s2mpa01->ramp_delay24)
+ s2mpa01->ramp_delay24 = ramp_delay;
+ else
+ ramp_delay = s2mpa01->ramp_delay24;
+
+ ramp_shift = S2MPA01_BUCK24_RAMP_SHIFT;
+ ramp_reg = S2MPA01_REG_RAMP1;
+ break;
+ case S2MPA01_BUCK5:
+ s2mpa01->ramp_delay5 = ramp_delay;
+ ramp_shift = S2MPA01_BUCK5_RAMP_SHIFT;
+ break;
+ case S2MPA01_BUCK6:
+ if (ramp_delay > s2mpa01->ramp_delay16)
+ s2mpa01->ramp_delay16 = ramp_delay;
+ else
+ ramp_delay = s2mpa01->ramp_delay16;
+
+ ramp_shift = S2MPA01_BUCK16_RAMP_SHIFT;
+ break;
+ case S2MPA01_BUCK7:
+ s2mpa01->ramp_delay7 = ramp_delay;
+ ramp_shift = S2MPA01_BUCK7_RAMP_SHIFT;
+ break;
+ case S2MPA01_BUCK8:
+ case S2MPA01_BUCK9:
+ case S2MPA01_BUCK10:
+ if (ramp_delay > s2mpa01->ramp_delay8910)
+ s2mpa01->ramp_delay8910 = ramp_delay;
+ else
+ ramp_delay = s2mpa01->ramp_delay8910;
+
+ ramp_shift = S2MPA01_BUCK8910_RAMP_SHIFT;
+ break;
+ default:
+ return 0;
+ }
+
+ if (!ramp_enable)
+ goto ramp_disable;
+
+ ret = regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
+ 1 << enable_shift, 1 << enable_shift);
+ if (ret) {
+ dev_err(&rdev->dev, "failed to enable ramp rate\n");
+ return ret;
+ }
+
+ ramp_val = get_ramp_delay(ramp_delay);
+
+ return regmap_update_bits(rdev->regmap, ramp_reg, 0x3 << ramp_shift,
+ ramp_val << ramp_shift);
+
+ramp_disable:
+ return regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
+ 1 << enable_shift, 0);
+}
+
+static struct regulator_ops s2mpa01_ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops s2mpa01_buck_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = s2mpa01_regulator_set_voltage_time_sel,
+ .set_ramp_delay = s2mpa01_set_ramp_delay,
+};
+
+#define regulator_desc_ldo1(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPA01_LDO##num, \
+ .ops = &s2mpa01_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_LDO_MIN, \
+ .uV_step = S2MPA01_LDO_STEP1, \
+ .n_voltages = S2MPA01_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPA01_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPA01_LDO_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+#define regulator_desc_ldo2(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPA01_LDO##num, \
+ .ops = &s2mpa01_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_LDO_MIN, \
+ .uV_step = S2MPA01_LDO_STEP2, \
+ .n_voltages = S2MPA01_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPA01_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPA01_LDO_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+#define regulator_desc_buck1_4(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPA01_BUCK##num, \
+ .ops = &s2mpa01_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_BUCK_MIN1, \
+ .uV_step = S2MPA01_BUCK_STEP1, \
+ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPA01_RAMP_DELAY, \
+ .vsel_reg = S2MPA01_REG_B1CTRL2 + (num - 1) * 2, \
+ .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_B1CTRL1 + (num - 1) * 2, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+#define regulator_desc_buck5 { \
+ .name = "BUCK5", \
+ .id = S2MPA01_BUCK5, \
+ .ops = &s2mpa01_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_BUCK_MIN2, \
+ .uV_step = S2MPA01_BUCK_STEP1, \
+ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPA01_RAMP_DELAY, \
+ .vsel_reg = S2MPA01_REG_B5CTRL2, \
+ .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_B5CTRL1, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+#define regulator_desc_buck6_7(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPA01_BUCK##num, \
+ .ops = &s2mpa01_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_BUCK_MIN1, \
+ .uV_step = S2MPA01_BUCK_STEP1, \
+ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPA01_RAMP_DELAY, \
+ .vsel_reg = S2MPA01_REG_B6CTRL2 + (num - 6) * 2, \
+ .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_B6CTRL1 + (num - 6) * 2, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+#define regulator_desc_buck8 { \
+ .name = "BUCK8", \
+ .id = S2MPA01_BUCK8, \
+ .ops = &s2mpa01_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_BUCK_MIN2, \
+ .uV_step = S2MPA01_BUCK_STEP2, \
+ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPA01_RAMP_DELAY, \
+ .vsel_reg = S2MPA01_REG_B8CTRL2, \
+ .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_B8CTRL1, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+#define regulator_desc_buck9 { \
+ .name = "BUCK9", \
+ .id = S2MPA01_BUCK9, \
+ .ops = &s2mpa01_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_BUCK_MIN4, \
+ .uV_step = S2MPA01_BUCK_STEP2, \
+ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPA01_RAMP_DELAY, \
+ .vsel_reg = S2MPA01_REG_B9CTRL2, \
+ .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_B9CTRL1, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+#define regulator_desc_buck10 { \
+ .name = "BUCK10", \
+ .id = S2MPA01_BUCK10, \
+ .ops = &s2mpa01_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_BUCK_MIN3, \
+ .uV_step = S2MPA01_BUCK_STEP2, \
+ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPA01_RAMP_DELAY, \
+ .vsel_reg = S2MPA01_REG_B10CTRL2, \
+ .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_B10CTRL1, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+static struct regulator_desc regulators[] = {
+ regulator_desc_ldo2(1),
+ regulator_desc_ldo1(2),
+ regulator_desc_ldo1(3),
+ regulator_desc_ldo1(4),
+ regulator_desc_ldo1(5),
+ regulator_desc_ldo2(6),
+ regulator_desc_ldo1(7),
+ regulator_desc_ldo1(8),
+ regulator_desc_ldo1(9),
+ regulator_desc_ldo1(10),
+ regulator_desc_ldo2(11),
+ regulator_desc_ldo1(12),
+ regulator_desc_ldo1(13),
+ regulator_desc_ldo1(14),
+ regulator_desc_ldo1(15),
+ regulator_desc_ldo1(16),
+ regulator_desc_ldo1(17),
+ regulator_desc_ldo1(18),
+ regulator_desc_ldo1(19),
+ regulator_desc_ldo1(20),
+ regulator_desc_ldo1(21),
+ regulator_desc_ldo2(22),
+ regulator_desc_ldo2(23),
+ regulator_desc_ldo1(24),
+ regulator_desc_ldo1(25),
+ regulator_desc_ldo1(26),
+ regulator_desc_buck1_4(1),
+ regulator_desc_buck1_4(2),
+ regulator_desc_buck1_4(3),
+ regulator_desc_buck1_4(4),
+ regulator_desc_buck5,
+ regulator_desc_buck6_7(6),
+ regulator_desc_buck6_7(7),
+ regulator_desc_buck8,
+ regulator_desc_buck9,
+ regulator_desc_buck10,
+};
+
+static int s2mpa01_pmic_probe(struct platform_device *pdev)
+{
+ struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX];
+ struct device_node *reg_np = NULL;
+ struct regulator_config config = { };
+ struct s2mpa01_info *s2mpa01;
+ int i;
+
+ s2mpa01 = devm_kzalloc(&pdev->dev, sizeof(*s2mpa01), GFP_KERNEL);
+ if (!s2mpa01)
+ return -ENOMEM;
+
+ for (i = 0; i < S2MPA01_REGULATOR_CNT; i++)
+ rdata[i].name = regulators[i].name;
+
+ if (iodev->dev->of_node) {
+ reg_np = of_get_child_by_name(iodev->dev->of_node,
+ "regulators");
+ if (!reg_np) {
+ dev_err(&pdev->dev,
+ "could not find regulators sub-node\n");
+ return -EINVAL;
+ }
+
+ of_regulator_match(&pdev->dev, reg_np, rdata,
+ S2MPA01_REGULATOR_MAX);
+ of_node_put(reg_np);
+ }
+
+ platform_set_drvdata(pdev, s2mpa01);
+
+ config.dev = &pdev->dev;
+ config.regmap = iodev->regmap_pmic;
+ config.driver_data = s2mpa01;
+
+ for (i = 0; i < S2MPA01_REGULATOR_MAX; i++) {
+ struct regulator_dev *rdev;
+ if (pdata)
+ config.init_data = pdata->regulators[i].initdata;
+ else
+ config.init_data = rdata[i].init_data;
+
+ if (reg_np)
+ config.of_node = rdata[i].of_node;
+
+ rdev = devm_regulator_register(&pdev->dev,
+ &regulators[i], &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "regulator init failed for %d\n",
+ i);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id s2mpa01_pmic_id[] = {
+ { "s2mpa01-pmic", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(platform, s2mpa01_pmic_id);
+
+static struct platform_driver s2mpa01_pmic_driver = {
+ .driver = {
+ .name = "s2mpa01-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = s2mpa01_pmic_probe,
+ .id_table = s2mpa01_pmic_id,
+};
+
+module_platform_driver(s2mpa01_pmic_driver);
+
+/* Module information */
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_AUTHOR("Sachin Kamat <sachin.kamat@samsung.com>");
+MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index cd0b9e35a56d..e713c162fbd4 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -1,13 +1,18 @@
/*
* s2mps11.c
*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
+ * Copyright (c) 2012-2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*
*/
@@ -24,18 +29,21 @@
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s2mps11.h>
-
-#define S2MPS11_REGULATOR_CNT ARRAY_SIZE(regulators)
+#include <linux/mfd/samsung/s2mps14.h>
struct s2mps11_info {
- struct regulator_dev *rdev[S2MPS11_REGULATOR_MAX];
-
+ unsigned int rdev_num;
int ramp_delay2;
int ramp_delay34;
int ramp_delay5;
int ramp_delay16;
int ramp_delay7810;
int ramp_delay9;
+ /*
+ * One bit for each S2MPS14 regulator whether the suspend mode
+ * was enabled.
+ */
+ unsigned int s2mps14_suspend_state:30;
};
static int get_ramp_delay(int ramp_delay)
@@ -65,7 +73,7 @@ static int s2mps11_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int ramp_delay = 0;
int old_volt, new_volt;
- switch (rdev->desc->id) {
+ switch (rdev_get_id(rdev)) {
case S2MPS11_BUCK2:
ramp_delay = s2mps11->ramp_delay2;
break;
@@ -105,7 +113,7 @@ static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
unsigned int ramp_enable = 1, enable_shift = 0;
int ret;
- switch (rdev->desc->id) {
+ switch (rdev_get_id(rdev)) {
case S2MPS11_BUCK1:
if (ramp_delay > s2mps11->ramp_delay16)
s2mps11->ramp_delay16 = ramp_delay;
@@ -194,13 +202,11 @@ static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
if (!ramp_enable)
goto ramp_disable;
- if (enable_shift) {
- ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
- 1 << enable_shift, 1 << enable_shift);
- if (ret) {
- dev_err(&rdev->dev, "failed to enable ramp rate\n");
- return ret;
- }
+ ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
+ 1 << enable_shift, 1 << enable_shift);
+ if (ret) {
+ dev_err(&rdev->dev, "failed to enable ramp rate\n");
+ return ret;
}
ramp_val = get_ramp_delay(ramp_delay);
@@ -236,7 +242,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.set_ramp_delay = s2mps11_set_ramp_delay,
};
-#define regulator_desc_ldo1(num) { \
+#define regulator_desc_s2mps11_ldo1(num) { \
.name = "LDO"#num, \
.id = S2MPS11_LDO##num, \
.ops = &s2mps11_ldo_ops, \
@@ -250,7 +256,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_reg = S2MPS11_REG_L1CTRL + num - 1, \
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_ldo2(num) { \
+#define regulator_desc_s2mps11_ldo2(num) { \
.name = "LDO"#num, \
.id = S2MPS11_LDO##num, \
.ops = &s2mps11_ldo_ops, \
@@ -265,7 +271,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_buck1_4(num) { \
+#define regulator_desc_s2mps11_buck1_4(num) { \
.name = "BUCK"#num, \
.id = S2MPS11_BUCK##num, \
.ops = &s2mps11_buck_ops, \
@@ -281,7 +287,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_buck5 { \
+#define regulator_desc_s2mps11_buck5 { \
.name = "BUCK5", \
.id = S2MPS11_BUCK5, \
.ops = &s2mps11_buck_ops, \
@@ -297,7 +303,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_buck6_8(num) { \
+#define regulator_desc_s2mps11_buck6_8(num) { \
.name = "BUCK"#num, \
.id = S2MPS11_BUCK##num, \
.ops = &s2mps11_buck_ops, \
@@ -313,7 +319,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_buck9 { \
+#define regulator_desc_s2mps11_buck9 { \
.name = "BUCK9", \
.id = S2MPS11_BUCK9, \
.ops = &s2mps11_buck_ops, \
@@ -329,7 +335,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_buck10 { \
+#define regulator_desc_s2mps11_buck10 { \
.name = "BUCK10", \
.id = S2MPS11_BUCK10, \
.ops = &s2mps11_buck_ops, \
@@ -345,72 +351,252 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-static struct regulator_desc regulators[] = {
- regulator_desc_ldo2(1),
- regulator_desc_ldo1(2),
- regulator_desc_ldo1(3),
- regulator_desc_ldo1(4),
- regulator_desc_ldo1(5),
- regulator_desc_ldo2(6),
- regulator_desc_ldo1(7),
- regulator_desc_ldo1(8),
- regulator_desc_ldo1(9),
- regulator_desc_ldo1(10),
- regulator_desc_ldo2(11),
- regulator_desc_ldo1(12),
- regulator_desc_ldo1(13),
- regulator_desc_ldo1(14),
- regulator_desc_ldo1(15),
- regulator_desc_ldo1(16),
- regulator_desc_ldo1(17),
- regulator_desc_ldo1(18),
- regulator_desc_ldo1(19),
- regulator_desc_ldo1(20),
- regulator_desc_ldo1(21),
- regulator_desc_ldo2(22),
- regulator_desc_ldo2(23),
- regulator_desc_ldo1(24),
- regulator_desc_ldo1(25),
- regulator_desc_ldo1(26),
- regulator_desc_ldo2(27),
- regulator_desc_ldo1(28),
- regulator_desc_ldo1(29),
- regulator_desc_ldo1(30),
- regulator_desc_ldo1(31),
- regulator_desc_ldo1(32),
- regulator_desc_ldo1(33),
- regulator_desc_ldo1(34),
- regulator_desc_ldo1(35),
- regulator_desc_ldo1(36),
- regulator_desc_ldo1(37),
- regulator_desc_ldo1(38),
- regulator_desc_buck1_4(1),
- regulator_desc_buck1_4(2),
- regulator_desc_buck1_4(3),
- regulator_desc_buck1_4(4),
- regulator_desc_buck5,
- regulator_desc_buck6_8(6),
- regulator_desc_buck6_8(7),
- regulator_desc_buck6_8(8),
- regulator_desc_buck9,
- regulator_desc_buck10,
+static const struct regulator_desc s2mps11_regulators[] = {
+ regulator_desc_s2mps11_ldo2(1),
+ regulator_desc_s2mps11_ldo1(2),
+ regulator_desc_s2mps11_ldo1(3),
+ regulator_desc_s2mps11_ldo1(4),
+ regulator_desc_s2mps11_ldo1(5),
+ regulator_desc_s2mps11_ldo2(6),
+ regulator_desc_s2mps11_ldo1(7),
+ regulator_desc_s2mps11_ldo1(8),
+ regulator_desc_s2mps11_ldo1(9),
+ regulator_desc_s2mps11_ldo1(10),
+ regulator_desc_s2mps11_ldo2(11),
+ regulator_desc_s2mps11_ldo1(12),
+ regulator_desc_s2mps11_ldo1(13),
+ regulator_desc_s2mps11_ldo1(14),
+ regulator_desc_s2mps11_ldo1(15),
+ regulator_desc_s2mps11_ldo1(16),
+ regulator_desc_s2mps11_ldo1(17),
+ regulator_desc_s2mps11_ldo1(18),
+ regulator_desc_s2mps11_ldo1(19),
+ regulator_desc_s2mps11_ldo1(20),
+ regulator_desc_s2mps11_ldo1(21),
+ regulator_desc_s2mps11_ldo2(22),
+ regulator_desc_s2mps11_ldo2(23),
+ regulator_desc_s2mps11_ldo1(24),
+ regulator_desc_s2mps11_ldo1(25),
+ regulator_desc_s2mps11_ldo1(26),
+ regulator_desc_s2mps11_ldo2(27),
+ regulator_desc_s2mps11_ldo1(28),
+ regulator_desc_s2mps11_ldo1(29),
+ regulator_desc_s2mps11_ldo1(30),
+ regulator_desc_s2mps11_ldo1(31),
+ regulator_desc_s2mps11_ldo1(32),
+ regulator_desc_s2mps11_ldo1(33),
+ regulator_desc_s2mps11_ldo1(34),
+ regulator_desc_s2mps11_ldo1(35),
+ regulator_desc_s2mps11_ldo1(36),
+ regulator_desc_s2mps11_ldo1(37),
+ regulator_desc_s2mps11_ldo1(38),
+ regulator_desc_s2mps11_buck1_4(1),
+ regulator_desc_s2mps11_buck1_4(2),
+ regulator_desc_s2mps11_buck1_4(3),
+ regulator_desc_s2mps11_buck1_4(4),
+ regulator_desc_s2mps11_buck5,
+ regulator_desc_s2mps11_buck6_8(6),
+ regulator_desc_s2mps11_buck6_8(7),
+ regulator_desc_s2mps11_buck6_8(8),
+ regulator_desc_s2mps11_buck9,
+ regulator_desc_s2mps11_buck10,
+};
+
+static int s2mps14_regulator_enable(struct regulator_dev *rdev)
+{
+ struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+ unsigned int val;
+
+ if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+ val = S2MPS14_ENABLE_SUSPEND;
+ else
+ val = rdev->desc->enable_mask;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, val);
+}
+
+static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
+{
+ int ret;
+ unsigned int val;
+ struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+
+ /* LDO3 should be always on and does not support suspend mode */
+ if (rdev_get_id(rdev) == S2MPS14_LDO3)
+ return 0;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
+ if (ret < 0)
+ return ret;
+
+ s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev));
+ /*
+ * Don't enable suspend mode if regulator is already disabled because
+ * this would effectively for a short time turn on the regulator after
+ * resuming.
+ * However we still want to toggle the suspend_state bit for regulator
+ * in case if it got enabled before suspending the system.
+ */
+ if (!(val & rdev->desc->enable_mask))
+ return 0;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, S2MPS14_ENABLE_SUSPEND);
+}
+
+static struct regulator_ops s2mps14_reg_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = s2mps14_regulator_enable,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_suspend_disable = s2mps14_regulator_set_suspend_disable,
+};
+
+#define regulator_desc_s2mps14_ldo1(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPS14_LDO##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS14_LDO_MIN_800MV, \
+ .uV_step = S2MPS14_LDO_STEP_25MV, \
+ .n_voltages = S2MPS14_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPS14_LDO_VSEL_MASK, \
+ .enable_reg = S2MPS14_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+#define regulator_desc_s2mps14_ldo2(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPS14_LDO##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS14_LDO_MIN_1800MV, \
+ .uV_step = S2MPS14_LDO_STEP_25MV, \
+ .n_voltages = S2MPS14_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPS14_LDO_VSEL_MASK, \
+ .enable_reg = S2MPS14_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+#define regulator_desc_s2mps14_ldo3(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPS14_LDO##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS14_LDO_MIN_800MV, \
+ .uV_step = S2MPS14_LDO_STEP_12_5MV, \
+ .n_voltages = S2MPS14_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPS14_LDO_VSEL_MASK, \
+ .enable_reg = S2MPS14_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+#define regulator_desc_s2mps14_buck1235(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS14_BUCK##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS14_BUCK1235_MIN_600MV, \
+ .uV_step = S2MPS14_BUCK1235_STEP_6_25MV, \
+ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
+ .linear_min_sel = S2MPS14_BUCK1235_START_SEL, \
+ .ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \
+ .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS14_REG_B1CTRL1 + (num - 1) * 2, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+#define regulator_desc_s2mps14_buck4(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS14_BUCK##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS14_BUCK4_MIN_1400MV, \
+ .uV_step = S2MPS14_BUCK4_STEP_12_5MV, \
+ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
+ .linear_min_sel = S2MPS14_BUCK4_START_SEL, \
+ .ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \
+ .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS14_REG_B1CTRL1 + (num - 1) * 2, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+
+static const struct regulator_desc s2mps14_regulators[] = {
+ regulator_desc_s2mps14_ldo3(1),
+ regulator_desc_s2mps14_ldo3(2),
+ regulator_desc_s2mps14_ldo1(3),
+ regulator_desc_s2mps14_ldo1(4),
+ regulator_desc_s2mps14_ldo3(5),
+ regulator_desc_s2mps14_ldo3(6),
+ regulator_desc_s2mps14_ldo1(7),
+ regulator_desc_s2mps14_ldo2(8),
+ regulator_desc_s2mps14_ldo3(9),
+ regulator_desc_s2mps14_ldo3(10),
+ regulator_desc_s2mps14_ldo1(11),
+ regulator_desc_s2mps14_ldo2(12),
+ regulator_desc_s2mps14_ldo2(13),
+ regulator_desc_s2mps14_ldo2(14),
+ regulator_desc_s2mps14_ldo2(15),
+ regulator_desc_s2mps14_ldo2(16),
+ regulator_desc_s2mps14_ldo2(17),
+ regulator_desc_s2mps14_ldo2(18),
+ regulator_desc_s2mps14_ldo1(19),
+ regulator_desc_s2mps14_ldo1(20),
+ regulator_desc_s2mps14_ldo1(21),
+ regulator_desc_s2mps14_ldo3(22),
+ regulator_desc_s2mps14_ldo1(23),
+ regulator_desc_s2mps14_ldo2(24),
+ regulator_desc_s2mps14_ldo2(25),
+ regulator_desc_s2mps14_buck1235(1),
+ regulator_desc_s2mps14_buck1235(2),
+ regulator_desc_s2mps14_buck1235(3),
+ regulator_desc_s2mps14_buck4(4),
+ regulator_desc_s2mps14_buck1235(5),
};
static int s2mps11_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
- struct of_regulator_match rdata[S2MPS11_REGULATOR_MAX];
+ struct sec_platform_data *pdata = iodev->pdata;
+ struct of_regulator_match *rdata = NULL;
struct device_node *reg_np = NULL;
struct regulator_config config = { };
struct s2mps11_info *s2mps11;
- int i, ret;
+ int i, ret = 0;
+ const struct regulator_desc *regulators;
+ enum sec_device_type dev_type;
s2mps11 = devm_kzalloc(&pdev->dev, sizeof(struct s2mps11_info),
GFP_KERNEL);
if (!s2mps11)
return -ENOMEM;
+ dev_type = platform_get_device_id(pdev)->driver_data;
+ switch (dev_type) {
+ case S2MPS11X:
+ s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
+ regulators = s2mps11_regulators;
+ break;
+ case S2MPS14X:
+ s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
+ regulators = s2mps14_regulators;
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device type: %u\n", dev_type);
+ return -EINVAL;
+ };
+
if (!iodev->dev->of_node) {
if (pdata) {
goto common_reg;
@@ -421,16 +607,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < S2MPS11_REGULATOR_CNT; i++)
+ rdata = kzalloc(sizeof(*rdata) * s2mps11->rdev_num, GFP_KERNEL);
+ if (!rdata)
+ return -ENOMEM;
+
+ for (i = 0; i < s2mps11->rdev_num; i++)
rdata[i].name = regulators[i].name;
- reg_np = of_find_node_by_name(iodev->dev->of_node, "regulators");
+ reg_np = of_get_child_by_name(iodev->dev->of_node, "regulators");
if (!reg_np) {
dev_err(&pdev->dev, "could not find regulators sub-node\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- of_regulator_match(&pdev->dev, reg_np, rdata, S2MPS11_REGULATOR_MAX);
+ of_regulator_match(&pdev->dev, reg_np, rdata, s2mps11->rdev_num);
+ of_node_put(reg_np);
common_reg:
platform_set_drvdata(pdev, s2mps11);
@@ -438,7 +630,9 @@ common_reg:
config.dev = &pdev->dev;
config.regmap = iodev->regmap_pmic;
config.driver_data = s2mps11;
- for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
+ for (i = 0; i < s2mps11->rdev_num; i++) {
+ struct regulator_dev *regulator;
+
if (!reg_np) {
config.init_data = pdata->regulators[i].initdata;
config.of_node = pdata->regulators[i].reg_node;
@@ -447,21 +641,25 @@ common_reg:
config.of_node = rdata[i].of_node;
}
- s2mps11->rdev[i] = devm_regulator_register(&pdev->dev,
+ regulator = devm_regulator_register(&pdev->dev,
&regulators[i], &config);
- if (IS_ERR(s2mps11->rdev[i])) {
- ret = PTR_ERR(s2mps11->rdev[i]);
+ if (IS_ERR(regulator)) {
+ ret = PTR_ERR(regulator);
dev_err(&pdev->dev, "regulator init failed for %d\n",
i);
- return ret;
+ goto out;
}
}
- return 0;
+out:
+ kfree(rdata);
+
+ return ret;
}
static const struct platform_device_id s2mps11_pmic_id[] = {
- { "s2mps11-pmic", 0},
+ { "s2mps11-pmic", S2MPS11X},
+ { "s2mps14-pmic", S2MPS14X},
{ },
};
MODULE_DEVICE_TABLE(platform, s2mps11_pmic_id);
@@ -489,5 +687,5 @@ module_exit(s2mps11_pmic_exit);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPS11 Regulator Driver");
+MODULE_DESCRIPTION("SAMSUNG S2MPS11/S2MPS14 Regulator Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index d958dfa05125..92f19a005dc3 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -11,11 +11,8 @@
*
*/
-#include <linux/bug.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
@@ -170,12 +167,11 @@ static unsigned int s5m8767_opmode_reg[][4] = {
{0x0, 0x3, 0x1, 0x1}, /* BUCK9 */
};
-static int s5m8767_get_register(struct regulator_dev *rdev, int *reg,
- int *enable_ctrl)
+static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
+ int *reg, int *enable_ctrl)
{
- int i, reg_id = rdev_get_id(rdev);
+ int i;
unsigned int mode;
- struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
switch (reg_id) {
case S5M8767_LDO1 ... S5M8767_LDO2:
@@ -214,53 +210,6 @@ static int s5m8767_get_register(struct regulator_dev *rdev, int *reg,
return 0;
}
-static int s5m8767_reg_is_enabled(struct regulator_dev *rdev)
-{
- struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- int ret, reg;
- int enable_ctrl;
- unsigned int val;
-
- ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
- if (ret == -EINVAL)
- return 1;
- else if (ret)
- return ret;
-
- ret = regmap_read(s5m8767->iodev->regmap_pmic, reg, &val);
- if (ret)
- return ret;
-
- return (val & S5M8767_ENCTRL_MASK) == enable_ctrl;
-}
-
-static int s5m8767_reg_enable(struct regulator_dev *rdev)
-{
- struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- int ret, reg;
- int enable_ctrl;
-
- ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
- if (ret)
- return ret;
-
- return regmap_update_bits(s5m8767->iodev->regmap_pmic, reg,
- S5M8767_ENCTRL_MASK, enable_ctrl);
-}
-
-static int s5m8767_reg_disable(struct regulator_dev *rdev)
-{
- struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- int ret, reg, enable_ctrl;
-
- ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
- if (ret)
- return ret;
-
- return regmap_update_bits(s5m8767->iodev->regmap_pmic, reg,
- S5M8767_ENCTRL_MASK, ~S5M8767_ENCTRL_MASK);
-}
-
static int s5m8767_get_vsel_reg(int reg_id, struct s5m8767_info *s5m8767)
{
int reg;
@@ -410,9 +359,9 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
static struct regulator_ops s5m8767_ops = {
.list_voltage = regulator_list_voltage_linear,
- .is_enabled = s5m8767_reg_is_enabled,
- .enable = s5m8767_reg_enable,
- .disable = s5m8767_reg_disable,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = s5m8767_set_voltage_sel,
.set_voltage_time_sel = s5m8767_set_voltage_time_sel,
@@ -420,9 +369,9 @@ static struct regulator_ops s5m8767_ops = {
static struct regulator_ops s5m8767_buck78_ops = {
.list_voltage = regulator_list_voltage_linear,
- .is_enabled = s5m8767_reg_is_enabled,
- .enable = s5m8767_reg_enable,
- .disable = s5m8767_reg_disable,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
@@ -483,6 +432,66 @@ static struct regulator_desc regulators[] = {
s5m8767_regulator_desc(BUCK9),
};
+/*
+ * Enable GPIO control over BUCK9 in regulator_config for that regulator.
+ */
+static void s5m8767_regulator_config_ext_control(struct s5m8767_info *s5m8767,
+ struct sec_regulator_data *rdata,
+ struct regulator_config *config)
+{
+ int i, mode = 0;
+
+ if (rdata->id != S5M8767_BUCK9)
+ return;
+
+ /* Check if opmode for regulator matches S5M8767_ENCTRL_USE_GPIO */
+ for (i = 0; i < s5m8767->num_regulators; i++) {
+ const struct sec_opmode_data *opmode = &s5m8767->opmode[i];
+ if (opmode->id == rdata->id) {
+ mode = s5m8767_opmode_reg[rdata->id][opmode->mode];
+ break;
+ }
+ }
+ if (mode != S5M8767_ENCTRL_USE_GPIO) {
+ dev_warn(s5m8767->dev,
+ "ext-control for %s: mismatched op_mode (%x), ignoring\n",
+ rdata->reg_node->name, mode);
+ return;
+ }
+
+ if (!gpio_is_valid(rdata->ext_control_gpio)) {
+ dev_warn(s5m8767->dev,
+ "ext-control for %s: GPIO not valid, ignoring\n",
+ rdata->reg_node->name);
+ return;
+ }
+
+ config->ena_gpio = rdata->ext_control_gpio;
+ config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
+}
+
+/*
+ * Turn on GPIO control over BUCK9.
+ */
+static int s5m8767_enable_ext_control(struct s5m8767_info *s5m8767,
+ struct regulator_dev *rdev)
+{
+ int id = rdev_get_id(rdev);
+ int ret, reg, enable_ctrl;
+
+ if (id != S5M8767_BUCK9)
+ return -EINVAL;
+
+ ret = s5m8767_get_register(s5m8767, id, &reg, &enable_ctrl);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ reg, S5M8767_ENCTRL_MASK,
+ S5M8767_ENCTRL_USE_GPIO << S5M8767_ENCTRL_SHIFT);
+}
+
+
#ifdef CONFIG_OF
static int s5m8767_pmic_dt_parse_dvs_gpio(struct sec_pmic_dev *iodev,
struct sec_platform_data *pdata,
@@ -520,6 +529,16 @@ static int s5m8767_pmic_dt_parse_ds_gpio(struct sec_pmic_dev *iodev,
return 0;
}
+static void s5m8767_pmic_dt_parse_ext_control_gpio(struct sec_pmic_dev *iodev,
+ struct sec_regulator_data *rdata,
+ struct device_node *reg_np)
+{
+ rdata->ext_control_gpio = of_get_named_gpio(reg_np,
+ "s5m8767,pmic-ext-control-gpios", 0);
+ if (!gpio_is_valid(rdata->ext_control_gpio))
+ rdata->ext_control_gpio = 0;
+}
+
static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
struct sec_platform_data *pdata)
{
@@ -546,19 +565,13 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
pdata->num_regulators, GFP_KERNEL);
- if (!rdata) {
- dev_err(iodev->dev,
- "could not allocate memory for regulator data\n");
+ if (!rdata)
return -ENOMEM;
- }
rmode = devm_kzalloc(&pdev->dev, sizeof(*rmode) *
pdata->num_regulators, GFP_KERNEL);
- if (!rmode) {
- dev_err(iodev->dev,
- "could not allocate memory for regulator mode\n");
+ if (!rmode)
return -ENOMEM;
- }
pdata->regulators = rdata;
pdata->opmode = rmode;
@@ -574,6 +587,8 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
continue;
}
+ s5m8767_pmic_dt_parse_ext_control_gpio(iodev, rdata, reg_np);
+
rdata->id = i;
rdata->initdata = of_get_regulator_init_data(
&pdev->dev, reg_np);
@@ -922,6 +937,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
for (i = 0; i < pdata->num_regulators; i++) {
const struct sec_voltage_desc *desc;
int id = pdata->regulators[i].id;
+ int enable_reg, enable_val;
desc = reg_voltage_map[id];
if (desc) {
@@ -935,6 +951,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
regulators[id].vsel_mask = 0x3f;
else
regulators[id].vsel_mask = 0xff;
+
+ s5m8767_get_register(s5m8767, id, &enable_reg,
+ &enable_val);
+ regulators[id].enable_reg = enable_reg;
+ regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
+ regulators[id].enable_val = enable_val;
}
config.dev = s5m8767->dev;
@@ -942,6 +964,10 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
config.driver_data = s5m8767;
config.regmap = iodev->regmap_pmic;
config.of_node = pdata->regulators[i].reg_node;
+ config.ena_gpio = config.ena_gpio_flags = 0;
+ if (pdata->regulators[i].ext_control_gpio)
+ s5m8767_regulator_config_ext_control(s5m8767,
+ &pdata->regulators[i], &config);
rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
&config);
@@ -951,6 +977,16 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
id);
return ret;
}
+
+ if (pdata->regulators[i].ext_control_gpio) {
+ ret = s5m8767_enable_ext_control(s5m8767, rdev[i]);
+ if (ret < 0) {
+ dev_err(s5m8767->dev,
+ "failed to enable gpio control over %s: %d\n",
+ rdev[i]->desc->name, ret);
+ return ret;
+ }
+ }
}
return 0;
diff --git a/drivers/regulator/st-pwm.c b/drivers/regulator/st-pwm.c
new file mode 100644
index 000000000000..e367af1c5f9d
--- /dev/null
+++ b/drivers/regulator/st-pwm.c
@@ -0,0 +1,190 @@
+/*
+ * Regulator driver for ST's PWM Regulators
+ *
+ * Copyright (C) 2014 - STMicroelectronics Inc.
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pwm.h>
+
+#define ST_PWM_REG_PERIOD 8448
+
+struct st_pwm_regulator_pdata {
+ const struct regulator_desc *desc;
+ struct st_pwm_voltages *duty_cycle_table;
+};
+
+struct st_pwm_regulator_data {
+ const struct st_pwm_regulator_pdata *pdata;
+ struct pwm_device *pwm;
+ bool enabled;
+ int state;
+};
+
+struct st_pwm_voltages {
+ unsigned int uV;
+ unsigned int dutycycle;
+};
+
+static int st_pwm_regulator_get_voltage_sel(struct regulator_dev *dev)
+{
+ struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+
+ return drvdata->state;
+}
+
+static int st_pwm_regulator_set_voltage_sel(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+ int dutycycle;
+ int ret;
+
+ dutycycle = (ST_PWM_REG_PERIOD / 100) *
+ drvdata->pdata->duty_cycle_table[selector].dutycycle;
+
+ ret = pwm_config(drvdata->pwm, dutycycle, ST_PWM_REG_PERIOD);
+ if (ret) {
+ dev_err(&dev->dev, "Failed to configure PWM\n");
+ return ret;
+ }
+
+ drvdata->state = selector;
+
+ if (!drvdata->enabled) {
+ ret = pwm_enable(drvdata->pwm);
+ if (ret) {
+ dev_err(&dev->dev, "Failed to enable PWM\n");
+ return ret;
+ }
+ drvdata->enabled = true;
+ }
+
+ return 0;
+}
+
+static int st_pwm_regulator_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+
+ if (selector >= dev->desc->n_voltages)
+ return -EINVAL;
+
+ return drvdata->pdata->duty_cycle_table[selector].uV;
+}
+
+static struct regulator_ops st_pwm_regulator_voltage_ops = {
+ .set_voltage_sel = st_pwm_regulator_set_voltage_sel,
+ .get_voltage_sel = st_pwm_regulator_get_voltage_sel,
+ .list_voltage = st_pwm_regulator_list_voltage,
+ .map_voltage = regulator_map_voltage_iterate,
+};
+
+static struct st_pwm_voltages b2105_duty_cycle_table[] = {
+ { .uV = 1114000, .dutycycle = 0, },
+ { .uV = 1095000, .dutycycle = 10, },
+ { .uV = 1076000, .dutycycle = 20, },
+ { .uV = 1056000, .dutycycle = 30, },
+ { .uV = 1036000, .dutycycle = 40, },
+ { .uV = 1016000, .dutycycle = 50, },
+ /* WARNING: Values above 50% duty-cycle cause boot failures. */
+};
+
+static const struct regulator_desc b2105_desc = {
+ .name = "b2105-pwm-regulator",
+ .ops = &st_pwm_regulator_voltage_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(b2105_duty_cycle_table),
+ .supply_name = "pwm",
+};
+
+static const struct st_pwm_regulator_pdata b2105_info = {
+ .desc = &b2105_desc,
+ .duty_cycle_table = b2105_duty_cycle_table,
+};
+
+static struct of_device_id st_pwm_of_match[] = {
+ { .compatible = "st,b2105-pwm-regulator", .data = &b2105_info, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, st_pwm_of_match);
+
+static int st_pwm_regulator_probe(struct platform_device *pdev)
+{
+ struct st_pwm_regulator_data *drvdata;
+ struct regulator_dev *regulator;
+ struct regulator_config config = { };
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_match;
+
+ if (!np) {
+ dev_err(&pdev->dev, "Device Tree node missing\n");
+ return -EINVAL;
+ }
+
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ of_match = of_match_device(st_pwm_of_match, &pdev->dev);
+ if (!of_match) {
+ dev_err(&pdev->dev, "failed to match of device\n");
+ return -ENODEV;
+ }
+ drvdata->pdata = of_match->data;
+
+ config.init_data = of_get_regulator_init_data(&pdev->dev, np);
+ if (!config.init_data)
+ return -ENOMEM;
+
+ config.of_node = np;
+ config.dev = &pdev->dev;
+ config.driver_data = drvdata;
+
+ drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
+ if (IS_ERR(drvdata->pwm)) {
+ dev_err(&pdev->dev, "Failed to get PWM\n");
+ return PTR_ERR(drvdata->pwm);
+ }
+
+ regulator = devm_regulator_register(&pdev->dev,
+ drvdata->pdata->desc, &config);
+ if (IS_ERR(regulator)) {
+ dev_err(&pdev->dev, "Failed to register regulator %s\n",
+ drvdata->pdata->desc->name);
+ return PTR_ERR(regulator);
+ }
+
+ return 0;
+}
+
+static struct platform_driver st_pwm_regulator_driver = {
+ .driver = {
+ .name = "st-pwm-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(st_pwm_of_match),
+ },
+ .probe = st_pwm_regulator_probe,
+};
+
+module_platform_driver(st_pwm_regulator_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>");
+MODULE_DESCRIPTION("ST PWM Regulator Driver");
+MODULE_ALIAS("platform:st_pwm-regulator");
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index b187b6bba7ad..a2dabb575b97 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -54,8 +54,8 @@ struct ti_abb_info {
/**
* struct ti_abb_reg - Register description for ABB block
- * @setup_reg: setup register offset from base
- * @control_reg: control register offset from base
+ * @setup_off: setup register offset from base
+ * @control_off: control register offset from base
* @sr2_wtcnt_value_mask: setup register- sr2_wtcnt_value mask
* @fbb_sel_mask: setup register- FBB sel mask
* @rbb_sel_mask: setup register- RBB sel mask
@@ -64,8 +64,8 @@ struct ti_abb_info {
* @opp_sel_mask: control register - mask for mode to operate
*/
struct ti_abb_reg {
- u32 setup_reg;
- u32 control_reg;
+ u32 setup_off;
+ u32 control_off;
/* Setup register fields */
u32 sr2_wtcnt_value_mask;
@@ -83,6 +83,8 @@ struct ti_abb_reg {
* @rdesc: regulator descriptor
* @clk: clock(usually sysclk) supplying ABB block
* @base: base address of ABB block
+ * @setup_reg: setup register of ABB block
+ * @control_reg: control register of ABB block
* @int_base: interrupt register base address
* @efuse_base: (optional) efuse base address for ABB modes
* @ldo_base: (optional) LDOVBB vset override base address
@@ -99,6 +101,8 @@ struct ti_abb {
struct regulator_desc rdesc;
struct clk *clk;
void __iomem *base;
+ void __iomem *setup_reg;
+ void __iomem *control_reg;
void __iomem *int_base;
void __iomem *efuse_base;
void __iomem *ldo_base;
@@ -118,20 +122,18 @@ struct ti_abb {
* ti_abb_rmw() - handy wrapper to set specific register bits
* @mask: mask for register field
* @value: value shifted to mask location and written
- * @offset: offset of register
- * @base: base address
+ * @reg: register address
*
* Return: final register value (may be unused)
*/
-static inline u32 ti_abb_rmw(u32 mask, u32 value, u32 offset,
- void __iomem *base)
+static inline u32 ti_abb_rmw(u32 mask, u32 value, void __iomem *reg)
{
u32 val;
- val = readl(base + offset);
+ val = readl(reg);
val &= ~mask;
val |= (value << __ffs(mask)) & mask;
- writel(val, base + offset);
+ writel(val, reg);
return val;
}
@@ -263,21 +265,19 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
if (ret)
goto out;
- ti_abb_rmw(regs->fbb_sel_mask | regs->rbb_sel_mask, 0, regs->setup_reg,
- abb->base);
+ ti_abb_rmw(regs->fbb_sel_mask | regs->rbb_sel_mask, 0, abb->setup_reg);
switch (info->opp_sel) {
case TI_ABB_SLOW_OPP:
- ti_abb_rmw(regs->rbb_sel_mask, 1, regs->setup_reg, abb->base);
+ ti_abb_rmw(regs->rbb_sel_mask, 1, abb->setup_reg);
break;
case TI_ABB_FAST_OPP:
- ti_abb_rmw(regs->fbb_sel_mask, 1, regs->setup_reg, abb->base);
+ ti_abb_rmw(regs->fbb_sel_mask, 1, abb->setup_reg);
break;
}
/* program next state of ABB ldo */
- ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg,
- abb->base);
+ ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, abb->control_reg);
/*
* program LDO VBB vset override if needed for !bypass mode
@@ -288,7 +288,7 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
ti_abb_program_ldovbb(dev, abb, info);
/* Initiate ABB ldo change */
- ti_abb_rmw(regs->opp_change_mask, 1, regs->control_reg, abb->base);
+ ti_abb_rmw(regs->opp_change_mask, 1, abb->control_reg);
/* Wait for ABB LDO to complete transition to new Bias setting */
ret = ti_abb_wait_txdone(dev, abb);
@@ -490,8 +490,7 @@ static int ti_abb_init_timings(struct device *dev, struct ti_abb *abb)
dev_dbg(dev, "%s: Clk_rate=%ld, sr2_cnt=0x%08x\n", __func__,
clk_get_rate(abb->clk), sr2_wt_cnt_val);
- ti_abb_rmw(regs->sr2_wtcnt_value_mask, sr2_wt_cnt_val, regs->setup_reg,
- abb->base);
+ ti_abb_rmw(regs->sr2_wtcnt_value_mask, sr2_wt_cnt_val, abb->setup_reg);
return 0;
}
@@ -508,32 +507,24 @@ static int ti_abb_init_table(struct device *dev, struct ti_abb *abb,
struct regulator_init_data *rinit_data)
{
struct ti_abb_info *info;
- const struct property *prop;
- const __be32 *abb_info;
const u32 num_values = 6;
char *pname = "ti,abb_info";
- u32 num_entries, i;
+ u32 i;
unsigned int *volt_table;
- int min_uV = INT_MAX, max_uV = 0;
+ int num_entries, min_uV = INT_MAX, max_uV = 0;
struct regulation_constraints *c = &rinit_data->constraints;
- prop = of_find_property(dev->of_node, pname, NULL);
- if (!prop) {
- dev_err(dev, "No '%s' property?\n", pname);
- return -ENODEV;
- }
-
- if (!prop->value) {
- dev_err(dev, "Empty '%s' property?\n", pname);
- return -ENODATA;
- }
-
/*
* Each abb_info is a set of n-tuple, where n is num_values, consisting
* of voltage and a set of detection logic for ABB information for that
* voltage to apply.
*/
- num_entries = prop->length / sizeof(u32);
+ num_entries = of_property_count_u32_elems(dev->of_node, pname);
+ if (num_entries < 0) {
+ dev_err(dev, "No '%s' property?\n", pname);
+ return num_entries;
+ }
+
if (!num_entries || (num_entries % num_values)) {
dev_err(dev, "All '%s' list entries need %d vals\n", pname,
num_values);
@@ -542,38 +533,38 @@ static int ti_abb_init_table(struct device *dev, struct ti_abb *abb,
num_entries /= num_values;
info = devm_kzalloc(dev, sizeof(*info) * num_entries, GFP_KERNEL);
- if (!info) {
- dev_err(dev, "Can't allocate info table for '%s' property\n",
- pname);
+ if (!info)
return -ENOMEM;
- }
+
abb->info = info;
volt_table = devm_kzalloc(dev, sizeof(unsigned int) * num_entries,
GFP_KERNEL);
- if (!volt_table) {
- dev_err(dev, "Can't allocate voltage table for '%s' property\n",
- pname);
+ if (!volt_table)
return -ENOMEM;
- }
abb->rdesc.n_voltages = num_entries;
abb->rdesc.volt_table = volt_table;
/* We do not know where the OPP voltage is at the moment */
abb->current_info_idx = -EINVAL;
- abb_info = prop->value;
for (i = 0; i < num_entries; i++, info++, volt_table++) {
u32 efuse_offset, rbb_mask, fbb_mask, vset_mask;
u32 efuse_val;
/* NOTE: num_values should equal to entries picked up here */
- *volt_table = be32_to_cpup(abb_info++);
- info->opp_sel = be32_to_cpup(abb_info++);
- efuse_offset = be32_to_cpup(abb_info++);
- rbb_mask = be32_to_cpup(abb_info++);
- fbb_mask = be32_to_cpup(abb_info++);
- vset_mask = be32_to_cpup(abb_info++);
+ of_property_read_u32_index(dev->of_node, pname, i * num_values,
+ volt_table);
+ of_property_read_u32_index(dev->of_node, pname,
+ i * num_values + 1, &info->opp_sel);
+ of_property_read_u32_index(dev->of_node, pname,
+ i * num_values + 2, &efuse_offset);
+ of_property_read_u32_index(dev->of_node, pname,
+ i * num_values + 3, &rbb_mask);
+ of_property_read_u32_index(dev->of_node, pname,
+ i * num_values + 4, &fbb_mask);
+ of_property_read_u32_index(dev->of_node, pname,
+ i * num_values + 5, &vset_mask);
dev_dbg(dev,
"[%d]v=%d ABB=%d ef=0x%x rbb=0x%x fbb=0x%x vset=0x%x\n",
@@ -648,8 +639,8 @@ static struct regulator_ops ti_abb_reg_ops = {
/* Default ABB block offsets, IF this changes in future, create new one */
static const struct ti_abb_reg abb_regs_v1 = {
/* WARNING: registers are wrongly documented in TRM */
- .setup_reg = 0x04,
- .control_reg = 0x00,
+ .setup_off = 0x04,
+ .control_off = 0x00,
.sr2_wtcnt_value_mask = (0xff << 8),
.fbb_sel_mask = (0x01 << 2),
@@ -661,8 +652,8 @@ static const struct ti_abb_reg abb_regs_v1 = {
};
static const struct ti_abb_reg abb_regs_v2 = {
- .setup_reg = 0x00,
- .control_reg = 0x04,
+ .setup_off = 0x00,
+ .control_off = 0x04,
.sr2_wtcnt_value_mask = (0xff << 8),
.fbb_sel_mask = (0x01 << 2),
@@ -673,9 +664,20 @@ static const struct ti_abb_reg abb_regs_v2 = {
.opp_sel_mask = (0x03 << 0),
};
+static const struct ti_abb_reg abb_regs_generic = {
+ .sr2_wtcnt_value_mask = (0xff << 8),
+ .fbb_sel_mask = (0x01 << 2),
+ .rbb_sel_mask = (0x01 << 1),
+ .sr2_en_mask = (0x01 << 0),
+
+ .opp_change_mask = (0x01 << 2),
+ .opp_sel_mask = (0x03 << 0),
+};
+
static const struct of_device_id ti_abb_of_match[] = {
{.compatible = "ti,abb-v1", .data = &abb_regs_v1},
{.compatible = "ti,abb-v2", .data = &abb_regs_v2},
+ {.compatible = "ti,abb-v3", .data = &abb_regs_generic},
{ },
};
@@ -722,11 +724,29 @@ static int ti_abb_probe(struct platform_device *pdev)
abb->regs = match->data;
/* Map ABB resources */
- pname = "base-address";
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
- abb->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(abb->base))
- return PTR_ERR(abb->base);
+ if (abb->regs->setup_off || abb->regs->control_off) {
+ pname = "base-address";
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
+ abb->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(abb->base))
+ return PTR_ERR(abb->base);
+
+ abb->setup_reg = abb->base + abb->regs->setup_off;
+ abb->control_reg = abb->base + abb->regs->control_off;
+
+ } else {
+ pname = "control-address";
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
+ abb->control_reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(abb->control_reg))
+ return PTR_ERR(abb->control_reg);
+
+ pname = "setup-address";
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
+ abb->setup_reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(abb->setup_reg))
+ return PTR_ERR(abb->setup_reg);
+ }
pname = "int-address";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
@@ -860,7 +880,7 @@ skip_opt:
platform_set_drvdata(pdev, rdev);
/* Enable the ldo if not already done by bootloader */
- ti_abb_rmw(abb->regs->sr2_en_mask, 1, abb->regs->setup_reg, abb->base);
+ ti_abb_rmw(abb->regs->sr2_en_mask, 1, abb->setup_reg);
return 0;
}
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index b3764f594ee9..f31f22e3e1bd 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -227,10 +227,8 @@ static struct tps51632_regulator_platform_data *
struct device_node *np = dev->of_node;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "Memory alloc failed for platform data\n");
+ if (!pdata)
return NULL;
- }
pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node);
if (!pdata->reg_init_data) {
@@ -299,10 +297,8 @@ static int tps51632_probe(struct i2c_client *client,
}
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
- if (!tps) {
- dev_err(&client->dev, "Memory allocation failed\n");
+ if (!tps)
return -ENOMEM;
- }
tps->dev = &client->dev;
tps->desc.name = client->name;
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index c3fa15a299b1..a1672044e519 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -299,10 +299,8 @@ static struct tps62360_regulator_platform_data *
struct device_node *np = dev->of_node;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "Memory alloc failed for platform data\n");
+ if (!pdata)
return NULL;
- }
pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node);
if (!pdata->reg_init_data) {
@@ -377,11 +375,8 @@ static int tps62360_probe(struct i2c_client *client,
}
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
- if (!tps) {
- dev_err(&client->dev, "%s(): Memory allocation failed\n",
- __func__);
+ if (!tps)
return -ENOMEM;
- }
tps->en_discharge = pdata->en_discharge;
tps->en_internal_pulldn = pdata->en_internal_pulldn;
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 162a0fae20b3..98e66ce26723 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -359,7 +359,6 @@ static struct regulator_ops tps6507x_pmic_ops = {
.map_voltage = regulator_map_voltage_ascend,
};
-#ifdef CONFIG_OF
static struct of_regulator_match tps6507x_matches[] = {
{ .name = "VDCDC1"},
{ .name = "VDCDC2"},
@@ -381,12 +380,10 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
tps_board = devm_kzalloc(&pdev->dev, sizeof(*tps_board),
GFP_KERNEL);
- if (!tps_board) {
- dev_err(&pdev->dev, "Failure to alloc pdata for regulators.\n");
+ if (!tps_board)
return NULL;
- }
- regulators = of_find_node_by_name(np, "regulators");
+ regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
dev_err(&pdev->dev, "regulator node not found\n");
return NULL;
@@ -396,6 +393,7 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
matches = tps6507x_matches;
ret = of_regulator_match(&pdev->dev, regulators, matches, count);
+ of_node_put(regulators);
if (ret < 0) {
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
ret);
@@ -406,10 +404,8 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
reg_data = devm_kzalloc(&pdev->dev, (sizeof(struct regulator_init_data)
* TPS6507X_NUM_REGULATOR), GFP_KERNEL);
- if (!reg_data) {
- dev_err(&pdev->dev, "Failure to alloc init data for regulators.\n");
+ if (!reg_data)
return NULL;
- }
tps_board->tps6507x_pmic_init_data = reg_data;
@@ -424,15 +420,7 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
return tps_board;
}
-#else
-static inline struct tps6507x_board *tps6507x_parse_dt_reg_data(
- struct platform_device *pdev,
- struct of_regulator_match **tps6507x_reg_matches)
-{
- *tps6507x_reg_matches = NULL;
- return NULL;
-}
-#endif
+
static int tps6507x_pmic_probe(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
@@ -453,9 +441,10 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
*/
tps_board = dev_get_platdata(tps6507x_dev->dev);
- if (!tps_board && tps6507x_dev->dev->of_node)
+ if (IS_ENABLED(CONFIG_OF) && !tps_board &&
+ tps6507x_dev->dev->of_node)
tps_board = tps6507x_parse_dt_reg_data(pdev,
- &tps6507x_reg_matches);
+ &tps6507x_reg_matches);
if (!tps_board)
return -EINVAL;
@@ -481,7 +470,7 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
tps->info[i] = info;
if (init_data->driver_data) {
struct tps6507x_reg_platform_data *data =
- init_data->driver_data;
+ init_data->driver_data;
tps->info[i]->defdcdc_default = data->defdcdc_default;
}
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index 676f75548f00..2e92ef68574d 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -168,17 +168,13 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
tps65090_pdata = devm_kzalloc(&pdev->dev, sizeof(*tps65090_pdata),
GFP_KERNEL);
- if (!tps65090_pdata) {
- dev_err(&pdev->dev, "Memory alloc for tps65090_pdata failed\n");
+ if (!tps65090_pdata)
return ERR_PTR(-ENOMEM);
- }
reg_pdata = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX *
sizeof(*reg_pdata), GFP_KERNEL);
- if (!reg_pdata) {
- dev_err(&pdev->dev, "Memory alloc for reg_pdata failed\n");
+ if (!reg_pdata)
return ERR_PTR(-ENOMEM);
- }
regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
@@ -188,6 +184,7 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
ret = of_regulator_match(&pdev->dev, regulators, tps65090_matches,
ARRAY_SIZE(tps65090_matches));
+ of_node_put(regulators);
if (ret < 0) {
dev_err(&pdev->dev,
"Error parsing regulator init data: %d\n", ret);
@@ -252,10 +249,8 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
pmic = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX * sizeof(*pmic),
GFP_KERNEL);
- if (!pmic) {
- dev_err(&pdev->dev, "mem alloc for pmic failed\n");
+ if (!pmic)
return -ENOMEM;
- }
for (num = 0; num < TPS65090_REGULATOR_MAX; num++) {
tps_pdata = tps65090_pdata->reg_pdata[num];
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 9ea1bf26bd13..10b78d2b766a 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -187,7 +187,7 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
struct device_node *regs;
int i, count;
- regs = of_find_node_by_name(node, "regulators");
+ regs = of_get_child_by_name(node, "regulators");
if (!regs)
return NULL;
@@ -202,7 +202,7 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
return NULL;
for (i = 0; i < count; i++) {
- if (!reg_matches[i].init_data || !reg_matches[i].of_node)
+ if (!reg_matches[i].of_node)
continue;
pdata->tps65217_init_data[i] = reg_matches[i].init_data;
@@ -222,7 +222,6 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
{
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65217_board *pdata = dev_get_platdata(tps->dev);
- struct regulator_init_data *reg_data;
struct regulator_dev *rdev;
struct regulator_config config = { };
int i;
@@ -243,19 +242,9 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tps);
for (i = 0; i < TPS65217_NUM_REGULATOR; i++) {
-
- reg_data = pdata->tps65217_init_data[i];
-
- /*
- * Regulator API handles empty constraints but not NULL
- * constraints
- */
- if (!reg_data)
- continue;
-
/* Register the regulators */
config.dev = tps->dev;
- config.init_data = reg_data;
+ config.init_data = pdata->tps65217_init_data[i];
config.driver_data = tps;
config.regmap = tps->regmap;
if (tps->dev->of_node)
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
new file mode 100644
index 000000000000..cec72fa71d1d
--- /dev/null
+++ b/drivers/regulator/tps65218-regulator.c
@@ -0,0 +1,285 @@
+/*
+ * tps65218-regulator.c
+ *
+ * Regulator driver for TPS65218 PMIC
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps65218.h>
+
+static unsigned int tps65218_ramp_delay = 4000;
+
+enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4, DCDC5, DCDC6, LDO1 };
+
+#define TPS65218_REGULATOR(_name, _id, _ops, _n, _vr, _vm, _er, _em, _t, \
+ _lr, _nlr) \
+ { \
+ .name = _name, \
+ .id = _id, \
+ .ops = &_ops, \
+ .n_voltages = _n, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = _vr, \
+ .vsel_mask = _vm, \
+ .enable_reg = _er, \
+ .enable_mask = _em, \
+ .volt_table = _t, \
+ .linear_ranges = _lr, \
+ .n_linear_ranges = _nlr, \
+ } \
+
+#define TPS65218_INFO(_id, _nm, _min, _max) \
+ { \
+ .id = _id, \
+ .name = _nm, \
+ .min_uV = _min, \
+ .max_uV = _max, \
+ }
+
+static const struct regulator_linear_range dcdc1_dcdc2_ranges[] = {
+ REGULATOR_LINEAR_RANGE(850000, 0x0, 0x32, 10000),
+ REGULATOR_LINEAR_RANGE(1375000, 0x33, 0x3f, 25000),
+};
+
+static const struct regulator_linear_range ldo1_dcdc3_ranges[] = {
+ REGULATOR_LINEAR_RANGE(900000, 0x0, 0x1a, 25000),
+ REGULATOR_LINEAR_RANGE(1600000, 0x1b, 0x3f, 50000),
+};
+
+static const struct regulator_linear_range dcdc4_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1175000, 0x0, 0xf, 25000),
+ REGULATOR_LINEAR_RANGE(1550000, 0x10, 0x34, 50000),
+};
+
+static struct tps_info tps65218_pmic_regs[] = {
+ TPS65218_INFO(0, "DCDC1", 850000, 167500),
+ TPS65218_INFO(1, "DCDC2", 850000, 1675000),
+ TPS65218_INFO(2, "DCDC3", 900000, 3400000),
+ TPS65218_INFO(3, "DCDC4", 1175000, 3400000),
+ TPS65218_INFO(4, "DCDC5", 1000000, 1000000),
+ TPS65218_INFO(5, "DCDC6", 1800000, 1800000),
+ TPS65218_INFO(6, "LDO1", 900000, 3400000),
+};
+
+#define TPS65218_OF_MATCH(comp, label) \
+ { \
+ .compatible = comp, \
+ .data = &label, \
+ }
+
+static const struct of_device_id tps65218_of_match[] = {
+ TPS65218_OF_MATCH("ti,tps65218-dcdc1", tps65218_pmic_regs[DCDC1]),
+ TPS65218_OF_MATCH("ti,tps65218-dcdc2", tps65218_pmic_regs[DCDC2]),
+ TPS65218_OF_MATCH("ti,tps65218-dcdc3", tps65218_pmic_regs[DCDC3]),
+ TPS65218_OF_MATCH("ti,tps65218-dcdc4", tps65218_pmic_regs[DCDC4]),
+ TPS65218_OF_MATCH("ti,tps65218-dcdc5", tps65218_pmic_regs[DCDC5]),
+ TPS65218_OF_MATCH("ti,tps65218-dcdc6", tps65218_pmic_regs[DCDC6]),
+ TPS65218_OF_MATCH("ti,tps65218-ldo1", tps65218_pmic_regs[LDO1]),
+ { }
+};
+MODULE_DEVICE_TABLE(of, tps65218_of_match);
+
+static int tps65218_pmic_set_voltage_sel(struct regulator_dev *dev,
+ unsigned selector)
+{
+ int ret;
+ struct tps65218 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+
+ /* Set the voltage based on vsel value and write protect level is 2 */
+ ret = tps65218_set_bits(tps, dev->desc->vsel_reg, dev->desc->vsel_mask,
+ selector, TPS65218_PROTECT_L1);
+
+ /* Set GO bit for DCDC1/2 to initiate voltage transistion */
+ switch (rid) {
+ case TPS65218_DCDC_1:
+ case TPS65218_DCDC_2:
+ ret = tps65218_set_bits(tps, TPS65218_REG_CONTRL_SLEW_RATE,
+ TPS65218_SLEW_RATE_GO,
+ TPS65218_SLEW_RATE_GO,
+ TPS65218_PROTECT_L1);
+ break;
+ }
+
+ return ret;
+}
+
+static int tps65218_pmic_enable(struct regulator_dev *dev)
+{
+ struct tps65218 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+
+ if (rid < TPS65218_DCDC_1 || rid > TPS65218_LDO_1)
+ return -EINVAL;
+
+ /* Enable the regulator and password protection is level 1 */
+ return tps65218_set_bits(tps, dev->desc->enable_reg,
+ dev->desc->enable_mask, dev->desc->enable_mask,
+ TPS65218_PROTECT_L1);
+}
+
+static int tps65218_pmic_disable(struct regulator_dev *dev)
+{
+ struct tps65218 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+
+ if (rid < TPS65218_DCDC_1 || rid > TPS65218_LDO_1)
+ return -EINVAL;
+
+ /* Disable the regulator and password protection is level 1 */
+ return tps65218_clear_bits(tps, dev->desc->enable_reg,
+ dev->desc->enable_mask, TPS65218_PROTECT_L1);
+}
+
+static int tps65218_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector, unsigned int new_selector)
+{
+ int old_uv, new_uv;
+
+ old_uv = regulator_list_voltage_linear_range(rdev, old_selector);
+ if (old_uv < 0)
+ return old_uv;
+
+ new_uv = regulator_list_voltage_linear_range(rdev, new_selector);
+ if (new_uv < 0)
+ return new_uv;
+
+ return DIV_ROUND_UP(abs(old_uv - new_uv), tps65218_ramp_delay);
+}
+
+/* Operations permitted on DCDC1, DCDC2 */
+static struct regulator_ops tps65218_dcdc12_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = tps65218_pmic_enable,
+ .disable = tps65218_pmic_disable,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = tps65218_pmic_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_time_sel = tps65218_set_voltage_time_sel,
+};
+
+/* Operations permitted on DCDC3, DCDC4 and LDO1 */
+static struct regulator_ops tps65218_ldo1_dcdc34_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = tps65218_pmic_enable,
+ .disable = tps65218_pmic_disable,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = tps65218_pmic_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+/* Operations permitted on DCDC5, DCDC6 */
+static struct regulator_ops tps65218_dcdc56_pmic_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = tps65218_pmic_enable,
+ .disable = tps65218_pmic_disable,
+};
+
+static const struct regulator_desc regulators[] = {
+ TPS65218_REGULATOR("DCDC1", TPS65218_DCDC_1, tps65218_dcdc12_ops, 64,
+ TPS65218_REG_CONTROL_DCDC1,
+ TPS65218_CONTROL_DCDC1_MASK,
+ TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC1_EN, NULL,
+ dcdc1_dcdc2_ranges, 2),
+ TPS65218_REGULATOR("DCDC2", TPS65218_DCDC_2, tps65218_dcdc12_ops, 64,
+ TPS65218_REG_CONTROL_DCDC2,
+ TPS65218_CONTROL_DCDC2_MASK,
+ TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC2_EN, NULL,
+ dcdc1_dcdc2_ranges, 2),
+ TPS65218_REGULATOR("DCDC3", TPS65218_DCDC_3, tps65218_ldo1_dcdc34_ops,
+ 64, TPS65218_REG_CONTROL_DCDC3,
+ TPS65218_CONTROL_DCDC3_MASK, TPS65218_REG_ENABLE1,
+ TPS65218_ENABLE1_DC3_EN, NULL,
+ ldo1_dcdc3_ranges, 2),
+ TPS65218_REGULATOR("DCDC4", TPS65218_DCDC_4, tps65218_ldo1_dcdc34_ops,
+ 53, TPS65218_REG_CONTROL_DCDC4,
+ TPS65218_CONTROL_DCDC4_MASK,
+ TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC4_EN, NULL,
+ dcdc4_ranges, 2),
+ TPS65218_REGULATOR("DCDC5", TPS65218_DCDC_5, tps65218_dcdc56_pmic_ops,
+ 1, -1, -1, TPS65218_REG_ENABLE1,
+ TPS65218_ENABLE1_DC5_EN, NULL, NULL, 0),
+ TPS65218_REGULATOR("DCDC6", TPS65218_DCDC_6, tps65218_dcdc56_pmic_ops,
+ 1, -1, -1, TPS65218_REG_ENABLE1,
+ TPS65218_ENABLE1_DC6_EN, NULL, NULL, 0),
+ TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, tps65218_ldo1_dcdc34_ops, 64,
+ TPS65218_REG_CONTROL_DCDC4,
+ TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
+ TPS65218_ENABLE2_LDO1_EN, NULL, ldo1_dcdc3_ranges,
+ 2),
+};
+
+static int tps65218_regulator_probe(struct platform_device *pdev)
+{
+ struct tps65218 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_init_data *init_data;
+ const struct tps_info *template;
+ struct regulator_dev *rdev;
+ const struct of_device_id *match;
+ struct regulator_config config = { };
+ int id;
+
+ match = of_match_device(tps65218_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ template = match->data;
+ id = template->id;
+ init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+
+ platform_set_drvdata(pdev, tps);
+
+ tps->info[id] = &tps65218_pmic_regs[id];
+ config.dev = &pdev->dev;
+ config.init_data = init_data;
+ config.driver_data = tps;
+ config.regmap = tps->regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config);
+ if (IS_ERR(rdev)) {
+ dev_err(tps->dev, "failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver tps65218_regulator_driver = {
+ .driver = {
+ .name = "tps65218-pmic",
+ .owner = THIS_MODULE,
+ .of_match_table = tps65218_of_match,
+ },
+ .probe = tps65218_regulator_probe,
+};
+
+module_platform_driver(tps65218_regulator_driver);
+
+MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("TPS65218 voltage regulator driver");
+MODULE_ALIAS("platform:tps65218-pmic");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 9f6bfda711b7..5b494db9f95c 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -593,10 +593,9 @@ static int pmic_probe(struct spi_device *spi)
}
hw = devm_kzalloc(&spi->dev, sizeof(struct tps6524x), GFP_KERNEL);
- if (!hw) {
- dev_err(dev, "cannot allocate regulator private data\n");
+ if (!hw)
return -ENOMEM;
- }
+
spi_set_drvdata(spi, hw);
memset(hw, 0, sizeof(struct tps6524x));
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 0485d47f0d8a..32f38a63d944 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -363,10 +363,8 @@ static struct tps6586x_platform_data *tps6586x_parse_regulator_dt(
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev, "Memory alloction failed\n");
+ if (!pdata)
return NULL;
- }
for (i = 0; i < num; i++) {
int id;
@@ -398,7 +396,7 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
{
struct tps6586x_regulator *ri = NULL;
struct regulator_config config = { };
- struct regulator_dev **rdev;
+ struct regulator_dev *rdev;
struct regulator_init_data *reg_data;
struct tps6586x_platform_data *pdata;
struct of_regulator_match *tps6586x_reg_matches = NULL;
@@ -418,13 +416,6 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
return -ENODEV;
}
- rdev = devm_kzalloc(&pdev->dev, TPS6586X_ID_MAX_REGULATOR *
- sizeof(*rdev), GFP_KERNEL);
- if (!rdev) {
- dev_err(&pdev->dev, "Mmemory alloc failed\n");
- return -ENOMEM;
- }
-
version = tps6586x_get_version(pdev->dev.parent);
for (id = 0; id < TPS6586X_ID_MAX_REGULATOR; ++id) {
@@ -451,12 +442,11 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
if (tps6586x_reg_matches)
config.of_node = tps6586x_reg_matches[id].of_node;
- rdev[id] = devm_regulator_register(&pdev->dev, &ri->desc,
- &config);
- if (IS_ERR(rdev[id])) {
+ rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
- return PTR_ERR(rdev[id]);
+ return PTR_ERR(rdev);
}
if (reg_data) {
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index f50dd847eebc..fa7db8847578 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -1011,11 +1011,8 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
pmic_plat_data = devm_kzalloc(&pdev->dev, sizeof(*pmic_plat_data),
GFP_KERNEL);
-
- if (!pmic_plat_data) {
- dev_err(&pdev->dev, "Failure to alloc pdata for regulators.\n");
+ if (!pmic_plat_data)
return NULL;
- }
np = of_node_get(pdev->dev.parent->of_node);
regulators = of_get_child_by_name(np, "regulators");
@@ -1098,10 +1095,8 @@ static int tps65910_probe(struct platform_device *pdev)
}
pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
- if (!pmic) {
- dev_err(&pdev->dev, "Memory allocation failed for pmic\n");
+ if (!pmic)
return -ENOMEM;
- }
pmic->mfd = tps65910;
platform_set_drvdata(pdev, pmic);
@@ -1130,24 +1125,18 @@ static int tps65910_probe(struct platform_device *pdev)
pmic->desc = devm_kzalloc(&pdev->dev, pmic->num_regulators *
sizeof(struct regulator_desc), GFP_KERNEL);
- if (!pmic->desc) {
- dev_err(&pdev->dev, "Memory alloc fails for desc\n");
+ if (!pmic->desc)
return -ENOMEM;
- }
pmic->info = devm_kzalloc(&pdev->dev, pmic->num_regulators *
sizeof(struct tps_info *), GFP_KERNEL);
- if (!pmic->info) {
- dev_err(&pdev->dev, "Memory alloc fails for info\n");
+ if (!pmic->info)
return -ENOMEM;
- }
pmic->rdev = devm_kzalloc(&pdev->dev, pmic->num_regulators *
sizeof(struct regulator_dev *), GFP_KERNEL);
- if (!pmic->rdev) {
- dev_err(&pdev->dev, "Memory alloc fails for rdev\n");
+ if (!pmic->rdev)
return -ENOMEM;
- }
for (i = 0; i < pmic->num_regulators && i < TPS65910_NUM_REGS;
i++, info++) {
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
index 71f457a42623..26aa6d9c308f 100644
--- a/drivers/regulator/tps80031-regulator.c
+++ b/drivers/regulator/tps80031-regulator.c
@@ -115,7 +115,7 @@ static int tps80031_reg_is_enabled(struct regulator_dev *rdev)
ri->rinfo->state_reg, ret);
return ret;
}
- return ((reg_val & TPS80031_STATE_MASK) == TPS80031_STATE_ON);
+ return (reg_val & TPS80031_STATE_MASK) == TPS80031_STATE_ON;
}
static int tps80031_reg_enable(struct regulator_dev *rdev)
@@ -693,10 +693,8 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
pmic = devm_kzalloc(&pdev->dev,
TPS80031_REGULATOR_MAX * sizeof(*pmic), GFP_KERNEL);
- if (!pmic) {
- dev_err(&pdev->dev, "mem alloc for pmic failed\n");
+ if (!pmic)
return -ENOMEM;
- }
for (num = 0; num < TPS80031_REGULATOR_MAX; ++num) {
tps_pdata = pdata->regulator_pdata[num];
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 04cf9c16ef23..0d88a82ab2a2 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -469,10 +469,8 @@ static int wm831x_buckv_probe(struct platform_device *pdev)
dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
GFP_KERNEL);
- if (dcdc == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!dcdc)
return -ENOMEM;
- }
dcdc->wm831x = wm831x;
@@ -622,10 +620,8 @@ static int wm831x_buckp_probe(struct platform_device *pdev)
dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
GFP_KERNEL);
- if (dcdc == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!dcdc)
return -ENOMEM;
- }
dcdc->wm831x = wm831x;
@@ -752,10 +748,8 @@ static int wm831x_boostp_probe(struct platform_device *pdev)
return -ENODEV;
dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc), GFP_KERNEL);
- if (dcdc == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!dcdc)
return -ENOMEM;
- }
dcdc->wm831x = wm831x;
@@ -842,10 +836,8 @@ static int wm831x_epe_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probing EPE%d\n", id + 1);
dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc), GFP_KERNEL);
- if (dcdc == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!dcdc)
return -ENOMEM;
- }
dcdc->wm831x = wm831x;
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 0339b886df5d..72e385e76a9d 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -165,10 +165,8 @@ static int wm831x_isink_probe(struct platform_device *pdev)
isink = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_isink),
GFP_KERNEL);
- if (isink == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!isink)
return -ENOMEM;
- }
isink->wm831x = wm831x;
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 46d6700467b5..eca0eeb78acd 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -235,10 +235,8 @@ static int wm831x_gp_ldo_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
- if (ldo == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!ldo)
return -ENOMEM;
- }
ldo->wm831x = wm831x;
@@ -447,10 +445,8 @@ static int wm831x_aldo_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
- if (ldo == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!ldo)
return -ENOMEM;
- }
ldo->wm831x = wm831x;
@@ -594,10 +590,8 @@ static int wm831x_alive_ldo_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
- if (ldo == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!ldo)
return -ENOMEM;
- }
ldo->wm831x = wm831x;
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index de7b9c73e3fa..7ec7c390eeda 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -361,7 +361,7 @@ static int wm8350_dcdc_set_suspend_voltage(struct regulator_dev *rdev, int uV)
sel = regulator_map_voltage_linear(rdev, uV, uV);
if (sel < 0)
- return -EINVAL;
+ return sel;
/* all DCDCs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK;
@@ -574,7 +574,7 @@ static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
sel = regulator_map_voltage_linear_range(rdev, uV, uV);
if (sel < 0)
- return -EINVAL;
+ return sel;
/* all LDOs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK;
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 71c5911f2e71..c24346db8a71 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -134,10 +134,8 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm8994_ldo), GFP_KERNEL);
- if (ldo == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!ldo)
return -ENOMEM;
- }
ldo->wm8994 = wm8994;
ldo->supply = wm8994_ldo_consumer[id];
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index c9d04f797862..0615f50a14cd 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -11,3 +11,5 @@ menuconfig RESET_CONTROLLER
via GPIOs or SoC-internal reset controller modules.
If unsure, say no.
+
+source "drivers/reset/sti/Kconfig"
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index cc29832c9638..4f60caf750ce 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_RESET_CONTROLLER) += core.o
obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
+obj-$(CONFIG_ARCH_STI) += sti/
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index d1b6089a0ef8..baeaf82d40d9 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -43,7 +43,7 @@ struct reset_control {
* This simple translation function should be used for reset controllers
* with 1:1 mapping, where reset lines can be indexed by number without gaps.
*/
-int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
+static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells))
@@ -54,7 +54,6 @@ int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
return reset_spec->args[0];
}
-EXPORT_SYMBOL_GPL(of_reset_simple_xlate);
/**
* reset_controller_register - register a reset controller device
@@ -127,15 +126,16 @@ int reset_control_deassert(struct reset_control *rstc)
EXPORT_SYMBOL_GPL(reset_control_deassert);
/**
- * reset_control_get - Lookup and obtain a reference to a reset controller.
- * @dev: device to be reset by the controller
+ * of_reset_control_get - Lookup and obtain a reference to a reset controller.
+ * @node: device to be reset by the controller
* @id: reset line name
*
* Returns a struct reset_control or IS_ERR() condition containing errno.
*
* Use of id names is optional.
*/
-struct reset_control *reset_control_get(struct device *dev, const char *id)
+struct reset_control *of_reset_control_get(struct device_node *node,
+ const char *id)
{
struct reset_control *rstc = ERR_PTR(-EPROBE_DEFER);
struct reset_controller_dev *r, *rcdev;
@@ -144,13 +144,10 @@ struct reset_control *reset_control_get(struct device *dev, const char *id)
int rstc_id;
int ret;
- if (!dev)
- return ERR_PTR(-EINVAL);
-
if (id)
- index = of_property_match_string(dev->of_node,
+ index = of_property_match_string(node,
"reset-names", id);
- ret = of_parse_phandle_with_args(dev->of_node, "resets", "#reset-cells",
+ ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
index, &args);
if (ret)
return ERR_PTR(ret);
@@ -167,7 +164,7 @@ struct reset_control *reset_control_get(struct device *dev, const char *id)
if (!rcdev) {
mutex_unlock(&reset_controller_list_mutex);
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-EPROBE_DEFER);
}
rstc_id = rcdev->of_xlate(rcdev, &args);
@@ -185,12 +182,35 @@ struct reset_control *reset_control_get(struct device *dev, const char *id)
return ERR_PTR(-ENOMEM);
}
- rstc->dev = dev;
rstc->rcdev = rcdev;
rstc->id = rstc_id;
return rstc;
}
+EXPORT_SYMBOL_GPL(of_reset_control_get);
+
+/**
+ * reset_control_get - Lookup and obtain a reference to a reset controller.
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+struct reset_control *reset_control_get(struct device *dev, const char *id)
+{
+ struct reset_control *rstc;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ rstc = of_reset_control_get(dev->of_node, id);
+ if (!IS_ERR(rstc))
+ rstc->dev = dev;
+
+ return rstc;
+}
EXPORT_SYMBOL_GPL(reset_control_get);
/**
@@ -243,33 +263,6 @@ struct reset_control *devm_reset_control_get(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(devm_reset_control_get);
-static int devm_reset_control_match(struct device *dev, void *res, void *data)
-{
- struct reset_control **rstc = res;
- if (WARN_ON(!rstc || !*rstc))
- return 0;
- return *rstc == data;
-}
-
-/**
- * devm_reset_control_put - resource managed reset_control_put()
- * @rstc: reset controller to free
- *
- * Deallocate a reset control allocated withd devm_reset_control_get().
- * This function will not need to be called normally, as devres will take
- * care of freeing the resource.
- */
-void devm_reset_control_put(struct reset_control *rstc)
-{
- int ret;
-
- ret = devres_release(rstc->dev, devm_reset_control_release,
- devm_reset_control_match, rstc);
- if (ret)
- WARN_ON(ret);
-}
-EXPORT_SYMBOL_GPL(devm_reset_control_put);
-
/**
* device_reset - find reset controller associated with the device
* and perform reset
diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig
new file mode 100644
index 000000000000..88d2d0316613
--- /dev/null
+++ b/drivers/reset/sti/Kconfig
@@ -0,0 +1,15 @@
+if ARCH_STI
+
+config STI_RESET_SYSCFG
+ bool
+ select RESET_CONTROLLER
+
+config STIH415_RESET
+ bool
+ select STI_RESET_SYSCFG
+
+config STIH416_RESET
+ bool
+ select STI_RESET_SYSCFG
+
+endif
diff --git a/drivers/reset/sti/Makefile b/drivers/reset/sti/Makefile
new file mode 100644
index 000000000000..be1c97647871
--- /dev/null
+++ b/drivers/reset/sti/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_STI_RESET_SYSCFG) += reset-syscfg.o
+
+obj-$(CONFIG_STIH415_RESET) += reset-stih415.o
+obj-$(CONFIG_STIH416_RESET) += reset-stih416.o
diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c
new file mode 100644
index 000000000000..e6f6c41abe12
--- /dev/null
+++ b/drivers/reset/sti/reset-stih415.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2013 STMicroelectronics (R&D) Limited
+ * Author: Stephen Gallimore <stephen.gallimore@st.com>
+ * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/reset-controller/stih415-resets.h>
+
+#include "reset-syscfg.h"
+
+/*
+ * STiH415 Peripheral powerdown definitions.
+ */
+static const char stih415_front[] = "st,stih415-front-syscfg";
+static const char stih415_rear[] = "st,stih415-rear-syscfg";
+static const char stih415_sbc[] = "st,stih415-sbc-syscfg";
+static const char stih415_lpm[] = "st,stih415-lpm-syscfg";
+
+#define STIH415_PDN_FRONT(_bit) \
+ _SYSCFG_RST_CH(stih415_front, SYSCFG_114, _bit, SYSSTAT_187, _bit)
+
+#define STIH415_PDN_REAR(_cntl, _stat) \
+ _SYSCFG_RST_CH(stih415_rear, SYSCFG_336, _cntl, SYSSTAT_384, _stat)
+
+#define STIH415_SRST_REAR(_reg, _bit) \
+ _SYSCFG_RST_CH_NO_ACK(stih415_rear, _reg, _bit)
+
+#define STIH415_SRST_SBC(_reg, _bit) \
+ _SYSCFG_RST_CH_NO_ACK(stih415_sbc, _reg, _bit)
+
+#define STIH415_SRST_FRONT(_reg, _bit) \
+ _SYSCFG_RST_CH_NO_ACK(stih415_front, _reg, _bit)
+
+#define STIH415_SRST_LPM(_reg, _bit) \
+ _SYSCFG_RST_CH_NO_ACK(stih415_lpm, _reg, _bit)
+
+#define SYSCFG_114 0x38 /* Powerdown request EMI/NAND/Keyscan */
+#define SYSSTAT_187 0x15c /* Powerdown status EMI/NAND/Keyscan */
+
+#define SYSCFG_336 0x90 /* Powerdown request USB/SATA/PCIe */
+#define SYSSTAT_384 0x150 /* Powerdown status USB/SATA/PCIe */
+
+#define SYSCFG_376 0x130 /* Reset generator 0 control 0 */
+#define SYSCFG_166 0x108 /* Softreset Ethernet 0 */
+#define SYSCFG_31 0x7c /* Softreset Ethernet 1 */
+#define LPM_SYSCFG_1 0x4 /* Softreset IRB */
+
+static const struct syscfg_reset_channel_data stih415_powerdowns[] = {
+ [STIH415_EMISS_POWERDOWN] = STIH415_PDN_FRONT(0),
+ [STIH415_NAND_POWERDOWN] = STIH415_PDN_FRONT(1),
+ [STIH415_KEYSCAN_POWERDOWN] = STIH415_PDN_FRONT(2),
+ [STIH415_USB0_POWERDOWN] = STIH415_PDN_REAR(0, 0),
+ [STIH415_USB1_POWERDOWN] = STIH415_PDN_REAR(1, 1),
+ [STIH415_USB2_POWERDOWN] = STIH415_PDN_REAR(2, 2),
+ [STIH415_SATA0_POWERDOWN] = STIH415_PDN_REAR(3, 3),
+ [STIH415_SATA1_POWERDOWN] = STIH415_PDN_REAR(4, 4),
+ [STIH415_PCIE_POWERDOWN] = STIH415_PDN_REAR(5, 8),
+};
+
+static const struct syscfg_reset_channel_data stih415_softresets[] = {
+ [STIH415_ETH0_SOFTRESET] = STIH415_SRST_FRONT(SYSCFG_166, 0),
+ [STIH415_ETH1_SOFTRESET] = STIH415_SRST_SBC(SYSCFG_31, 0),
+ [STIH415_IRB_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 6),
+ [STIH415_USB0_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 9),
+ [STIH415_USB1_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 10),
+ [STIH415_USB2_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 11),
+};
+
+static struct syscfg_reset_controller_data stih415_powerdown_controller = {
+ .wait_for_ack = true,
+ .nr_channels = ARRAY_SIZE(stih415_powerdowns),
+ .channels = stih415_powerdowns,
+};
+
+static struct syscfg_reset_controller_data stih415_softreset_controller = {
+ .wait_for_ack = false,
+ .active_low = true,
+ .nr_channels = ARRAY_SIZE(stih415_softresets),
+ .channels = stih415_softresets,
+};
+
+static struct of_device_id stih415_reset_match[] = {
+ { .compatible = "st,stih415-powerdown",
+ .data = &stih415_powerdown_controller, },
+ { .compatible = "st,stih415-softreset",
+ .data = &stih415_softreset_controller, },
+ {},
+};
+
+static struct platform_driver stih415_reset_driver = {
+ .probe = syscfg_reset_probe,
+ .driver = {
+ .name = "reset-stih415",
+ .owner = THIS_MODULE,
+ .of_match_table = stih415_reset_match,
+ },
+};
+
+static int __init stih415_reset_init(void)
+{
+ return platform_driver_register(&stih415_reset_driver);
+}
+arch_initcall(stih415_reset_init);
diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c
new file mode 100644
index 000000000000..fe3bf02bdc8c
--- /dev/null
+++ b/drivers/reset/sti/reset-stih416.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2013 STMicroelectronics (R&D) Limited
+ * Author: Stephen Gallimore <stephen.gallimore@st.com>
+ * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/reset-controller/stih416-resets.h>
+
+#include "reset-syscfg.h"
+
+/*
+ * STiH416 Peripheral powerdown definitions.
+ */
+static const char stih416_front[] = "st,stih416-front-syscfg";
+static const char stih416_rear[] = "st,stih416-rear-syscfg";
+static const char stih416_sbc[] = "st,stih416-sbc-syscfg";
+static const char stih416_lpm[] = "st,stih416-lpm-syscfg";
+static const char stih416_cpu[] = "st,stih416-cpu-syscfg";
+
+#define STIH416_PDN_FRONT(_bit) \
+ _SYSCFG_RST_CH(stih416_front, SYSCFG_1500, _bit, SYSSTAT_1578, _bit)
+
+#define STIH416_PDN_REAR(_cntl, _stat) \
+ _SYSCFG_RST_CH(stih416_rear, SYSCFG_2525, _cntl, SYSSTAT_2583, _stat)
+
+#define SYSCFG_1500 0x7d0 /* Powerdown request EMI/NAND/Keyscan */
+#define SYSSTAT_1578 0x908 /* Powerdown status EMI/NAND/Keyscan */
+
+#define SYSCFG_2525 0x834 /* Powerdown request USB/SATA/PCIe */
+#define SYSSTAT_2583 0x91c /* Powerdown status USB/SATA/PCIe */
+
+#define SYSCFG_2552 0x8A0 /* Reset Generator control 0 */
+#define SYSCFG_1539 0x86c /* Softreset Ethernet 0 */
+#define SYSCFG_510 0x7f8 /* Softreset Ethernet 1 */
+#define LPM_SYSCFG_1 0x4 /* Softreset IRB */
+#define SYSCFG_2553 0x8a4 /* Softreset SATA0/1, PCIE0/1 */
+#define SYSCFG_7563 0x8cc /* MPE softresets 0 */
+#define SYSCFG_7564 0x8d0 /* MPE softresets 1 */
+
+#define STIH416_SRST_CPU(_reg, _bit) \
+ _SYSCFG_RST_CH_NO_ACK(stih416_cpu, _reg, _bit)
+
+#define STIH416_SRST_FRONT(_reg, _bit) \
+ _SYSCFG_RST_CH_NO_ACK(stih416_front, _reg, _bit)
+
+#define STIH416_SRST_REAR(_reg, _bit) \
+ _SYSCFG_RST_CH_NO_ACK(stih416_rear, _reg, _bit)
+
+#define STIH416_SRST_LPM(_reg, _bit) \
+ _SYSCFG_RST_CH_NO_ACK(stih416_lpm, _reg, _bit)
+
+#define STIH416_SRST_SBC(_reg, _bit) \
+ _SYSCFG_RST_CH_NO_ACK(stih416_sbc, _reg, _bit)
+
+static const struct syscfg_reset_channel_data stih416_powerdowns[] = {
+ [STIH416_EMISS_POWERDOWN] = STIH416_PDN_FRONT(0),
+ [STIH416_NAND_POWERDOWN] = STIH416_PDN_FRONT(1),
+ [STIH416_KEYSCAN_POWERDOWN] = STIH416_PDN_FRONT(2),
+ [STIH416_USB0_POWERDOWN] = STIH416_PDN_REAR(0, 0),
+ [STIH416_USB1_POWERDOWN] = STIH416_PDN_REAR(1, 1),
+ [STIH416_USB2_POWERDOWN] = STIH416_PDN_REAR(2, 2),
+ [STIH416_USB3_POWERDOWN] = STIH416_PDN_REAR(6, 5),
+ [STIH416_SATA0_POWERDOWN] = STIH416_PDN_REAR(3, 3),
+ [STIH416_SATA1_POWERDOWN] = STIH416_PDN_REAR(4, 4),
+ [STIH416_PCIE0_POWERDOWN] = STIH416_PDN_REAR(7, 9),
+ [STIH416_PCIE1_POWERDOWN] = STIH416_PDN_REAR(5, 8),
+};
+
+static const struct syscfg_reset_channel_data stih416_softresets[] = {
+ [STIH416_ETH0_SOFTRESET] = STIH416_SRST_FRONT(SYSCFG_1539, 0),
+ [STIH416_ETH1_SOFTRESET] = STIH416_SRST_SBC(SYSCFG_510, 0),
+ [STIH416_IRB_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 6),
+ [STIH416_USB0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 9),
+ [STIH416_USB1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 10),
+ [STIH416_USB2_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 11),
+ [STIH416_USB3_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 28),
+ [STIH416_SATA0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 7),
+ [STIH416_SATA1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 3),
+ [STIH416_PCIE0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 15),
+ [STIH416_PCIE1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 2),
+ [STIH416_AUD_DAC_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 14),
+ [STIH416_HDTVOUT_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 5),
+ [STIH416_VTAC_M_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 25),
+ [STIH416_VTAC_A_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 26),
+ [STIH416_SYNC_HD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 5),
+ [STIH416_SYNC_SD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 6),
+ [STIH416_BLITTER_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 10),
+ [STIH416_GPU_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 11),
+ [STIH416_VTAC_M_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 18),
+ [STIH416_VTAC_A_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 19),
+ [STIH416_VTG_AUX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 21),
+ [STIH416_JPEG_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 23),
+ [STIH416_HVA_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 2),
+ [STIH416_COMPO_M_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 3),
+ [STIH416_COMPO_A_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 4),
+ [STIH416_VP8_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 10),
+ [STIH416_VTG_MAIN_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 16),
+};
+
+static struct syscfg_reset_controller_data stih416_powerdown_controller = {
+ .wait_for_ack = true,
+ .nr_channels = ARRAY_SIZE(stih416_powerdowns),
+ .channels = stih416_powerdowns,
+};
+
+static struct syscfg_reset_controller_data stih416_softreset_controller = {
+ .wait_for_ack = false,
+ .active_low = true,
+ .nr_channels = ARRAY_SIZE(stih416_softresets),
+ .channels = stih416_softresets,
+};
+
+static struct of_device_id stih416_reset_match[] = {
+ { .compatible = "st,stih416-powerdown",
+ .data = &stih416_powerdown_controller, },
+ { .compatible = "st,stih416-softreset",
+ .data = &stih416_softreset_controller, },
+ {},
+};
+
+static struct platform_driver stih416_reset_driver = {
+ .probe = syscfg_reset_probe,
+ .driver = {
+ .name = "reset-stih416",
+ .owner = THIS_MODULE,
+ .of_match_table = stih416_reset_match,
+ },
+};
+
+static int __init stih416_reset_init(void)
+{
+ return platform_driver_register(&stih416_reset_driver);
+}
+arch_initcall(stih416_reset_init);
diff --git a/drivers/reset/sti/reset-syscfg.c b/drivers/reset/sti/reset-syscfg.c
new file mode 100644
index 000000000000..a145cc066d4a
--- /dev/null
+++ b/drivers/reset/sti/reset-syscfg.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2013 STMicroelectronics Limited
+ * Author: Stephen Gallimore <stephen.gallimore@st.com>
+ *
+ * Inspired by mach-imx/src.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "reset-syscfg.h"
+
+/**
+ * Reset channel regmap configuration
+ *
+ * @reset: regmap field for the channel's reset bit.
+ * @ack: regmap field for the channel's ack bit (optional).
+ */
+struct syscfg_reset_channel {
+ struct regmap_field *reset;
+ struct regmap_field *ack;
+};
+
+/**
+ * A reset controller which groups together a set of related reset bits, which
+ * may be located in different system configuration registers.
+ *
+ * @rst: base reset controller structure.
+ * @active_low: are the resets in this controller active low, i.e. clearing
+ * the reset bit puts the hardware into reset.
+ * @channels: An array of reset channels for this controller.
+ */
+struct syscfg_reset_controller {
+ struct reset_controller_dev rst;
+ bool active_low;
+ struct syscfg_reset_channel *channels;
+};
+
+#define to_syscfg_reset_controller(_rst) \
+ container_of(_rst, struct syscfg_reset_controller, rst)
+
+static int syscfg_reset_program_hw(struct reset_controller_dev *rcdev,
+ unsigned long idx, int assert)
+{
+ struct syscfg_reset_controller *rst = to_syscfg_reset_controller(rcdev);
+ const struct syscfg_reset_channel *ch;
+ u32 ctrl_val = rst->active_low ? !assert : !!assert;
+ int err;
+
+ if (idx >= rcdev->nr_resets)
+ return -EINVAL;
+
+ ch = &rst->channels[idx];
+
+ err = regmap_field_write(ch->reset, ctrl_val);
+ if (err)
+ return err;
+
+ if (ch->ack) {
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ u32 ack_val;
+
+ while (true) {
+ err = regmap_field_read(ch->ack, &ack_val);
+ if (err)
+ return err;
+
+ if (ack_val == ctrl_val)
+ break;
+
+ if (time_after(jiffies, timeout))
+ return -ETIME;
+
+ cpu_relax();
+ }
+ }
+
+ return 0;
+}
+
+static int syscfg_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ return syscfg_reset_program_hw(rcdev, idx, true);
+}
+
+static int syscfg_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ return syscfg_reset_program_hw(rcdev, idx, false);
+}
+
+static int syscfg_reset_dev(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ int err = syscfg_reset_assert(rcdev, idx);
+ if (err)
+ return err;
+
+ return syscfg_reset_deassert(rcdev, idx);
+}
+
+static struct reset_control_ops syscfg_reset_ops = {
+ .reset = syscfg_reset_dev,
+ .assert = syscfg_reset_assert,
+ .deassert = syscfg_reset_deassert,
+};
+
+static int syscfg_reset_controller_register(struct device *dev,
+ const struct syscfg_reset_controller_data *data)
+{
+ struct syscfg_reset_controller *rc;
+ size_t size;
+ int i, err;
+
+ rc = devm_kzalloc(dev, sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+
+ size = sizeof(struct syscfg_reset_channel) * data->nr_channels;
+
+ rc->channels = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!rc->channels)
+ return -ENOMEM;
+
+ rc->rst.ops = &syscfg_reset_ops,
+ rc->rst.of_node = dev->of_node;
+ rc->rst.nr_resets = data->nr_channels;
+ rc->active_low = data->active_low;
+
+ for (i = 0; i < data->nr_channels; i++) {
+ struct regmap *map;
+ struct regmap_field *f;
+ const char *compatible = data->channels[i].compatible;
+
+ map = syscon_regmap_lookup_by_compatible(compatible);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ f = devm_regmap_field_alloc(dev, map, data->channels[i].reset);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ rc->channels[i].reset = f;
+
+ if (!data->wait_for_ack)
+ continue;
+
+ f = devm_regmap_field_alloc(dev, map, data->channels[i].ack);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ rc->channels[i].ack = f;
+ }
+
+ err = reset_controller_register(&rc->rst);
+ if (!err)
+ dev_info(dev, "registered\n");
+
+ return err;
+}
+
+int syscfg_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = pdev ? &pdev->dev : NULL;
+ const struct of_device_id *match;
+
+ if (!dev || !dev->driver)
+ return -ENODEV;
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!match || !match->data)
+ return -EINVAL;
+
+ return syscfg_reset_controller_register(dev, match->data);
+}
diff --git a/drivers/reset/sti/reset-syscfg.h b/drivers/reset/sti/reset-syscfg.h
new file mode 100644
index 000000000000..2cc2283bac40
--- /dev/null
+++ b/drivers/reset/sti/reset-syscfg.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2013 STMicroelectronics (R&D) Limited
+ * Author: Stephen Gallimore <stephen.gallimore@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __STI_RESET_SYSCFG_H
+#define __STI_RESET_SYSCFG_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+/**
+ * Reset channel description for a system configuration register based
+ * reset controller.
+ *
+ * @compatible: Compatible string of the syscon regmap containing this
+ * channel's control and ack (status) bits.
+ * @reset: Regmap field description of the channel's reset bit.
+ * @ack: Regmap field description of the channel's acknowledge bit.
+ */
+struct syscfg_reset_channel_data {
+ const char *compatible;
+ struct reg_field reset;
+ struct reg_field ack;
+};
+
+#define _SYSCFG_RST_CH(_c, _rr, _rb, _ar, _ab) \
+ { .compatible = _c, \
+ .reset = REG_FIELD(_rr, _rb, _rb), \
+ .ack = REG_FIELD(_ar, _ab, _ab), }
+
+#define _SYSCFG_RST_CH_NO_ACK(_c, _rr, _rb) \
+ { .compatible = _c, \
+ .reset = REG_FIELD(_rr, _rb, _rb), }
+
+/**
+ * Description of a system configuration register based reset controller.
+ *
+ * @wait_for_ack: The controller will wait for reset assert and de-assert to
+ * be "ack'd" in a channel's ack field.
+ * @active_low: Are the resets in this controller active low, i.e. clearing
+ * the reset bit puts the hardware into reset.
+ * @nr_channels: The number of reset channels in this controller.
+ * @channels: An array of reset channel descriptions.
+ */
+struct syscfg_reset_controller_data {
+ bool wait_for_ack;
+ bool active_low;
+ int nr_channels;
+ const struct syscfg_reset_channel_data *channels;
+};
+
+/**
+ * syscfg_reset_probe(): platform device probe function used by syscfg
+ * reset controller drivers. This registers a reset
+ * controller configured by the OF match data for
+ * the compatible device which should be of type
+ * "struct syscfg_reset_controller_data".
+ *
+ * @pdev: platform device
+ */
+int syscfg_reset_probe(struct platform_device *pdev);
+
+#endif /* __STI_RESET_SYSCFG_H */
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index db933decc39c..2e565f8e5165 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -573,6 +573,18 @@ config RTC_DRV_DS1305
This driver can also be built as a module. If so, the module
will be called rtc-ds1305.
+config RTC_DRV_DS1347
+ tristate "Dallas/Maxim DS1347"
+ help
+ If you say yes here you get support for the
+ Dallas/Maxim DS1347 chips.
+
+ This driver only supports the RTC feature, and not other chip
+ features such as alarms.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-ds1347.
+
config RTC_DRV_DS1390
tristate "Dallas/Maxim DS1390/93/94"
help
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index b427bf7dd20d..40a09915c8f6 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o
obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o
obj-$(CONFIG_RTC_DRV_DS1305) += rtc-ds1305.o
obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
+obj-$(CONFIG_RTC_DRV_DS1347) += rtc-ds1347.o
obj-$(CONFIG_RTC_DRV_DS1374) += rtc-ds1374.o
obj-$(CONFIG_RTC_DRV_DS1390) += rtc-ds1390.o
obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 544be722937c..c2eff6082363 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -584,6 +584,9 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
void rtc_update_irq(struct rtc_device *rtc,
unsigned long num, unsigned long events)
{
+ if (unlikely(IS_ERR_OR_NULL(rtc)))
+ return;
+
pm_stay_awake(rtc->dev.parent);
schedule_work(&rtc->irqwork);
}
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c
index 4af016985890..9f38eda69154 100644
--- a/drivers/rtc/rtc-as3722.c
+++ b/drivers/rtc/rtc-as3722.c
@@ -242,9 +242,8 @@ static int as3722_rtc_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops as3722_rtc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(as3722_rtc_suspend, as3722_rtc_resume)
-};
+static SIMPLE_DEV_PM_OPS(as3722_rtc_pm_ops, as3722_rtc_suspend,
+ as3722_rtc_resume);
static struct platform_driver as3722_rtc_driver = {
.probe = as3722_rtc_probe,
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index 3161ab5263ed..aee3387fb099 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -204,10 +204,8 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
rtc = devm_kzalloc(&pdev->dev, sizeof(struct rtc_at32ap700x),
GFP_KERNEL);
- if (!rtc) {
- dev_dbg(&pdev->dev, "out of memory\n");
+ if (!rtc)
return -ENOMEM;
- }
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 309b8b342d9c..596374304532 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -24,7 +24,7 @@
#include <mach/at91_rtt.h>
#include <mach/cpu.h>
-
+#include <mach/hardware.h>
/*
* This driver uses two configurable hardware resources that live in the
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index cae212f30d65..0963c9309c74 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -837,7 +837,7 @@ static void __exit cmos_do_remove(struct device *dev)
cmos->dev = NULL;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int cmos_suspend(struct device *dev)
{
@@ -935,8 +935,6 @@ static int cmos_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
-
#else
static inline int cmos_poweroff(struct device *dev)
@@ -946,6 +944,8 @@ static inline int cmos_poweroff(struct device *dev)
#endif
+static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
+
/*----------------------------------------------------------------*/
/* On non-x86 systems, a "CMOS" RTC lives most naturally on platform_bus.
@@ -1088,11 +1088,9 @@ static struct pnp_driver cmos_pnp_driver = {
/* flag ensures resume() gets called, and stops syslog spam */
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
-#ifdef CONFIG_PM_SLEEP
.driver = {
.pm = &cmos_pm_ops,
},
-#endif
};
#endif /* CONFIG_PNP */
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 73f157519dff..869cae273799 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -43,8 +43,6 @@
struct coh901331_port {
struct rtc_device *rtc;
struct clk *clk;
- u32 phybase;
- u32 physize;
void __iomem *virtbase;
int irq;
#ifdef CONFIG_PM_SLEEP
@@ -173,19 +171,9 @@ static int __init coh901331_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
-
- rtap->phybase = res->start;
- rtap->physize = resource_size(res);
-
- if (devm_request_mem_region(&pdev->dev, rtap->phybase, rtap->physize,
- "rtc-coh901331") == NULL)
- return -EBUSY;
-
- rtap->virtbase = devm_ioremap(&pdev->dev, rtap->phybase, rtap->physize);
- if (!rtap->virtbase)
- return -ENOMEM;
+ rtap->virtbase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rtap->virtbase))
+ return PTR_ERR(rtap->virtbase);
rtap->irq = platform_get_irq(pdev, 0);
if (devm_request_irq(&pdev->dev, rtap->irq, coh901331_interrupt, 0,
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 4385ca4503da..a1cbf64242a5 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -26,7 +26,6 @@
struct da9052_rtc {
struct rtc_device *rtc;
struct da9052 *da9052;
- int irq;
};
static int da9052_rtc_enable_alarm(struct da9052 *da9052, bool enable)
@@ -240,8 +239,7 @@ static int da9052_rtc_probe(struct platform_device *pdev)
rtc->da9052 = dev_get_drvdata(pdev->dev.parent);
platform_set_drvdata(pdev, rtc);
- rtc->irq = DA9052_IRQ_ALARM;
- ret = da9052_request_irq(rtc->da9052, rtc->irq, "ALM",
+ ret = da9052_request_irq(rtc->da9052, DA9052_IRQ_ALARM, "ALM",
da9052_rtc_irq, rtc);
if (ret != 0) {
rtc_err(rtc->da9052, "irq registration failed: %d\n", ret);
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c
index 48cb2ac3bd3e..a825491331c8 100644
--- a/drivers/rtc/rtc-da9055.c
+++ b/drivers/rtc/rtc-da9055.c
@@ -302,7 +302,9 @@ static int da9055_rtc_probe(struct platform_device *pdev)
}
alm_irq = platform_get_irq_byname(pdev, "ALM");
- alm_irq = regmap_irq_get_virq(rtc->da9055->irq_data, alm_irq);
+ if (alm_irq < 0)
+ return alm_irq;
+
ret = devm_request_threaded_irq(&pdev->dev, alm_irq, NULL,
da9055_rtc_alm_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index 24677ef8c39a..c0a3b59f65a2 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -119,8 +119,6 @@ static DEFINE_SPINLOCK(davinci_rtc_lock);
struct davinci_rtc {
struct rtc_device *rtc;
void __iomem *base;
- resource_size_t pbase;
- size_t base_size;
int irq;
};
@@ -482,14 +480,12 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct davinci_rtc *davinci_rtc;
- struct resource *res, *mem;
+ struct resource *res;
int ret = 0;
davinci_rtc = devm_kzalloc(&pdev->dev, sizeof(struct davinci_rtc), GFP_KERNEL);
- if (!davinci_rtc) {
- dev_dbg(dev, "could not allocate memory for private data\n");
+ if (!davinci_rtc)
return -ENOMEM;
- }
davinci_rtc->irq = platform_get_irq(pdev, 0);
if (davinci_rtc->irq < 0) {
@@ -498,28 +494,9 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "no mem resource\n");
- return -EINVAL;
- }
-
- davinci_rtc->pbase = res->start;
- davinci_rtc->base_size = resource_size(res);
-
- mem = devm_request_mem_region(dev, davinci_rtc->pbase,
- davinci_rtc->base_size, pdev->name);
- if (!mem) {
- dev_err(dev, "RTC registers at %08x are not free\n",
- davinci_rtc->pbase);
- return -EBUSY;
- }
-
- davinci_rtc->base = devm_ioremap(dev, davinci_rtc->pbase,
- davinci_rtc->base_size);
- if (!davinci_rtc->base) {
- dev_err(dev, "unable to ioremap MEM resource\n");
- return -ENOMEM;
- }
+ davinci_rtc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(davinci_rtc->base))
+ return PTR_ERR(davinci_rtc->base);
platform_set_drvdata(pdev, davinci_rtc);
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 2dd586a19b59..129add77065d 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -756,19 +756,17 @@ static int ds1305_probe(struct spi_device *spi)
status = devm_request_irq(&spi->dev, spi->irq, ds1305_irq,
0, dev_name(&ds1305->rtc->dev), ds1305);
if (status < 0) {
- dev_dbg(&spi->dev, "request_irq %d --> %d\n",
+ dev_err(&spi->dev, "request_irq %d --> %d\n",
spi->irq, status);
- return status;
+ } else {
+ device_set_wakeup_capable(&spi->dev, 1);
}
-
- device_set_wakeup_capable(&spi->dev, 1);
}
/* export NVRAM */
status = sysfs_create_bin_file(&spi->dev.kobj, &nvram);
if (status < 0) {
- dev_dbg(&spi->dev, "register nvram --> %d\n", status);
- return status;
+ dev_err(&spi->dev, "register nvram --> %d\n", status);
}
return 0;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 4e75345a559a..f03d5ba96db1 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -154,6 +154,7 @@ static const struct chip_desc chips[last_ds_type] = {
.alarm = 1,
},
[mcp7941x] = {
+ .alarm = 1,
/* this is battery backed SRAM */
.nvram_offset = 0x20,
.nvram_size = 0x40,
@@ -606,6 +607,178 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
/*----------------------------------------------------------------------*/
+/*
+ * Alarm support for mcp7941x devices.
+ */
+
+#define MCP7941X_REG_CONTROL 0x07
+# define MCP7941X_BIT_ALM0_EN 0x10
+# define MCP7941X_BIT_ALM1_EN 0x20
+#define MCP7941X_REG_ALARM0_BASE 0x0a
+#define MCP7941X_REG_ALARM0_CTRL 0x0d
+#define MCP7941X_REG_ALARM1_BASE 0x11
+#define MCP7941X_REG_ALARM1_CTRL 0x14
+# define MCP7941X_BIT_ALMX_IF (1 << 3)
+# define MCP7941X_BIT_ALMX_C0 (1 << 4)
+# define MCP7941X_BIT_ALMX_C1 (1 << 5)
+# define MCP7941X_BIT_ALMX_C2 (1 << 6)
+# define MCP7941X_BIT_ALMX_POL (1 << 7)
+# define MCP7941X_MSK_ALMX_MATCH (MCP7941X_BIT_ALMX_C0 | \
+ MCP7941X_BIT_ALMX_C1 | \
+ MCP7941X_BIT_ALMX_C2)
+
+static void mcp7941x_work(struct work_struct *work)
+{
+ struct ds1307 *ds1307 = container_of(work, struct ds1307, work);
+ struct i2c_client *client = ds1307->client;
+ int reg, ret;
+
+ mutex_lock(&ds1307->rtc->ops_lock);
+
+ /* Check and clear alarm 0 interrupt flag. */
+ reg = i2c_smbus_read_byte_data(client, MCP7941X_REG_ALARM0_CTRL);
+ if (reg < 0)
+ goto out;
+ if (!(reg & MCP7941X_BIT_ALMX_IF))
+ goto out;
+ reg &= ~MCP7941X_BIT_ALMX_IF;
+ ret = i2c_smbus_write_byte_data(client, MCP7941X_REG_ALARM0_CTRL, reg);
+ if (ret < 0)
+ goto out;
+
+ /* Disable alarm 0. */
+ reg = i2c_smbus_read_byte_data(client, MCP7941X_REG_CONTROL);
+ if (reg < 0)
+ goto out;
+ reg &= ~MCP7941X_BIT_ALM0_EN;
+ ret = i2c_smbus_write_byte_data(client, MCP7941X_REG_CONTROL, reg);
+ if (ret < 0)
+ goto out;
+
+ rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF);
+
+out:
+ if (test_bit(HAS_ALARM, &ds1307->flags))
+ enable_irq(client->irq);
+ mutex_unlock(&ds1307->rtc->ops_lock);
+}
+
+static int mcp7941x_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds1307 *ds1307 = i2c_get_clientdata(client);
+ u8 *regs = ds1307->regs;
+ int ret;
+
+ if (!test_bit(HAS_ALARM, &ds1307->flags))
+ return -EINVAL;
+
+ /* Read control and alarm 0 registers. */
+ ret = ds1307->read_block_data(client, MCP7941X_REG_CONTROL, 10, regs);
+ if (ret < 0)
+ return ret;
+
+ t->enabled = !!(regs[0] & MCP7941X_BIT_ALM0_EN);
+
+ /* Report alarm 0 time assuming 24-hour and day-of-month modes. */
+ t->time.tm_sec = bcd2bin(ds1307->regs[3] & 0x7f);
+ t->time.tm_min = bcd2bin(ds1307->regs[4] & 0x7f);
+ t->time.tm_hour = bcd2bin(ds1307->regs[5] & 0x3f);
+ t->time.tm_wday = bcd2bin(ds1307->regs[6] & 0x7) - 1;
+ t->time.tm_mday = bcd2bin(ds1307->regs[7] & 0x3f);
+ t->time.tm_mon = bcd2bin(ds1307->regs[8] & 0x1f) - 1;
+ t->time.tm_year = -1;
+ t->time.tm_yday = -1;
+ t->time.tm_isdst = -1;
+
+ dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
+ "enabled=%d polarity=%d irq=%d match=%d\n", __func__,
+ t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
+ t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled,
+ !!(ds1307->regs[6] & MCP7941X_BIT_ALMX_POL),
+ !!(ds1307->regs[6] & MCP7941X_BIT_ALMX_IF),
+ (ds1307->regs[6] & MCP7941X_MSK_ALMX_MATCH) >> 4);
+
+ return 0;
+}
+
+static int mcp7941x_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds1307 *ds1307 = i2c_get_clientdata(client);
+ unsigned char *regs = ds1307->regs;
+ int ret;
+
+ if (!test_bit(HAS_ALARM, &ds1307->flags))
+ return -EINVAL;
+
+ dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
+ "enabled=%d pending=%d\n", __func__,
+ t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
+ t->time.tm_wday, t->time.tm_mday, t->time.tm_mon,
+ t->enabled, t->pending);
+
+ /* Read control and alarm 0 registers. */
+ ret = ds1307->read_block_data(client, MCP7941X_REG_CONTROL, 10, regs);
+ if (ret < 0)
+ return ret;
+
+ /* Set alarm 0, using 24-hour and day-of-month modes. */
+ regs[3] = bin2bcd(t->time.tm_sec);
+ regs[4] = bin2bcd(t->time.tm_min);
+ regs[5] = bin2bcd(t->time.tm_hour);
+ regs[6] = bin2bcd(t->time.tm_wday) + 1;
+ regs[7] = bin2bcd(t->time.tm_mday);
+ regs[8] = bin2bcd(t->time.tm_mon) + 1;
+
+ /* Clear the alarm 0 interrupt flag. */
+ regs[6] &= ~MCP7941X_BIT_ALMX_IF;
+ /* Set alarm match: second, minute, hour, day, date, month. */
+ regs[6] |= MCP7941X_MSK_ALMX_MATCH;
+
+ if (t->enabled)
+ regs[0] |= MCP7941X_BIT_ALM0_EN;
+ else
+ regs[0] &= ~MCP7941X_BIT_ALM0_EN;
+
+ ret = ds1307->write_block_data(client, MCP7941X_REG_CONTROL, 10, regs);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int mcp7941x_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds1307 *ds1307 = i2c_get_clientdata(client);
+ int reg;
+
+ if (!test_bit(HAS_ALARM, &ds1307->flags))
+ return -EINVAL;
+
+ reg = i2c_smbus_read_byte_data(client, MCP7941X_REG_CONTROL);
+ if (reg < 0)
+ return reg;
+
+ if (enabled)
+ reg |= MCP7941X_BIT_ALM0_EN;
+ else
+ reg &= ~MCP7941X_BIT_ALM0_EN;
+
+ return i2c_smbus_write_byte_data(client, MCP7941X_REG_CONTROL, reg);
+}
+
+static const struct rtc_class_ops mcp7941x_rtc_ops = {
+ .read_time = ds1307_get_time,
+ .set_time = ds1307_set_time,
+ .read_alarm = mcp7941x_read_alarm,
+ .set_alarm = mcp7941x_set_alarm,
+ .alarm_irq_enable = mcp7941x_alarm_irq_enable,
+};
+
+/*----------------------------------------------------------------------*/
+
static ssize_t
ds1307_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
@@ -678,6 +851,7 @@ static int ds1307_probe(struct i2c_client *client,
[ds_1339] = DS1339_BIT_BBSQI,
[ds_3231] = DS3231_BIT_BBSQW,
};
+ const struct rtc_class_ops *rtc_ops = &ds13xx_rtc_ops;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)
&& !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
@@ -816,6 +990,13 @@ static int ds1307_probe(struct i2c_client *client,
case ds_1388:
ds1307->offset = 1; /* Seconds starts at 1 */
break;
+ case mcp7941x:
+ rtc_ops = &mcp7941x_rtc_ops;
+ if (ds1307->client->irq > 0 && chip->alarm) {
+ INIT_WORK(&ds1307->work, mcp7941x_work);
+ want_irq = true;
+ }
+ break;
default:
break;
}
@@ -927,55 +1108,61 @@ read_rtc:
bin2bcd(tmp));
}
+ device_set_wakeup_capable(&client->dev, want_irq);
ds1307->rtc = devm_rtc_device_register(&client->dev, client->name,
- &ds13xx_rtc_ops, THIS_MODULE);
+ rtc_ops, THIS_MODULE);
if (IS_ERR(ds1307->rtc)) {
- err = PTR_ERR(ds1307->rtc);
- dev_err(&client->dev,
- "unable to register the class device\n");
- goto exit;
+ return PTR_ERR(ds1307->rtc);
}
if (want_irq) {
err = request_irq(client->irq, ds1307_irq, IRQF_SHARED,
ds1307->rtc->name, client);
if (err) {
- dev_err(&client->dev,
- "unable to request IRQ!\n");
- goto exit;
- }
+ client->irq = 0;
+ dev_err(&client->dev, "unable to request IRQ!\n");
+ } else {
- device_set_wakeup_capable(&client->dev, 1);
- set_bit(HAS_ALARM, &ds1307->flags);
- dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
+ set_bit(HAS_ALARM, &ds1307->flags);
+ dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
+ }
}
if (chip->nvram_size) {
+
ds1307->nvram = devm_kzalloc(&client->dev,
sizeof(struct bin_attribute),
GFP_KERNEL);
if (!ds1307->nvram) {
- err = -ENOMEM;
- goto err_irq;
+ dev_err(&client->dev, "cannot allocate memory for nvram sysfs\n");
+ } else {
+
+ ds1307->nvram->attr.name = "nvram";
+ ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR;
+
+ sysfs_bin_attr_init(ds1307->nvram);
+
+ ds1307->nvram->read = ds1307_nvram_read;
+ ds1307->nvram->write = ds1307_nvram_write;
+ ds1307->nvram->size = chip->nvram_size;
+ ds1307->nvram_offset = chip->nvram_offset;
+
+ err = sysfs_create_bin_file(&client->dev.kobj,
+ ds1307->nvram);
+ if (err) {
+ dev_err(&client->dev,
+ "unable to create sysfs file: %s\n",
+ ds1307->nvram->attr.name);
+ } else {
+ set_bit(HAS_NVRAM, &ds1307->flags);
+ dev_info(&client->dev, "%zu bytes nvram\n",
+ ds1307->nvram->size);
+ }
}
- ds1307->nvram->attr.name = "nvram";
- ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR;
- sysfs_bin_attr_init(ds1307->nvram);
- ds1307->nvram->read = ds1307_nvram_read;
- ds1307->nvram->write = ds1307_nvram_write;
- ds1307->nvram->size = chip->nvram_size;
- ds1307->nvram_offset = chip->nvram_offset;
- err = sysfs_create_bin_file(&client->dev.kobj, ds1307->nvram);
- if (err)
- goto err_irq;
- set_bit(HAS_NVRAM, &ds1307->flags);
- dev_info(&client->dev, "%zu bytes nvram\n", ds1307->nvram->size);
}
return 0;
-err_irq:
- free_irq(client->irq, client);
exit:
return err;
}
diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c
new file mode 100644
index 000000000000..c82b4c050326
--- /dev/null
+++ b/drivers/rtc/rtc-ds1347.c
@@ -0,0 +1,166 @@
+/* rtc-ds1347.c
+ *
+ * Driver for Dallas Semiconductor DS1347 Low Current, SPI Compatible
+ * Real Time Clock
+ *
+ * Author : Raghavendra Chandra Ganiga <ravi23ganiga@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/spi/spi.h>
+#include <linux/bcd.h>
+
+/* Registers in ds1347 rtc */
+
+#define DS1347_SECONDS_REG 0x01
+#define DS1347_MINUTES_REG 0x03
+#define DS1347_HOURS_REG 0x05
+#define DS1347_DATE_REG 0x07
+#define DS1347_MONTH_REG 0x09
+#define DS1347_DAY_REG 0x0B
+#define DS1347_YEAR_REG 0x0D
+#define DS1347_CONTROL_REG 0x0F
+#define DS1347_STATUS_REG 0x17
+#define DS1347_CLOCK_BURST 0x3F
+
+static int ds1347_read_reg(struct device *dev, unsigned char address,
+ unsigned char *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ *data = address | 0x80;
+
+ return spi_write_then_read(spi, data, 1, data, 1);
+}
+
+static int ds1347_write_reg(struct device *dev, unsigned char address,
+ unsigned char data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char buf[2];
+
+ buf[0] = address & 0x7F;
+ buf[1] = data;
+
+ return spi_write_then_read(spi, buf, 2, NULL, 0);
+}
+
+static int ds1347_read_time(struct device *dev, struct rtc_time *dt)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ int err;
+ unsigned char buf[8];
+
+ buf[0] = DS1347_CLOCK_BURST | 0x80;
+
+ err = spi_write_then_read(spi, buf, 1, buf, 8);
+ if (err)
+ return err;
+
+ dt->tm_sec = bcd2bin(buf[0]);
+ dt->tm_min = bcd2bin(buf[1]);
+ dt->tm_hour = bcd2bin(buf[2] & 0x3F);
+ dt->tm_mday = bcd2bin(buf[3]);
+ dt->tm_mon = bcd2bin(buf[4]) - 1;
+ dt->tm_wday = bcd2bin(buf[5]) - 1;
+ dt->tm_year = bcd2bin(buf[6]) + 100;
+
+ return rtc_valid_tm(dt);
+}
+
+static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char buf[9];
+
+ buf[0] = DS1347_CLOCK_BURST & 0x7F;
+ buf[1] = bin2bcd(dt->tm_sec);
+ buf[2] = bin2bcd(dt->tm_min);
+ buf[3] = (bin2bcd(dt->tm_hour) & 0x3F);
+ buf[4] = bin2bcd(dt->tm_mday);
+ buf[5] = bin2bcd(dt->tm_mon + 1);
+ buf[6] = bin2bcd(dt->tm_wday + 1);
+
+ /* year in linux is from 1900 i.e in range of 100
+ in rtc it is from 00 to 99 */
+ dt->tm_year = dt->tm_year % 100;
+
+ buf[7] = bin2bcd(dt->tm_year);
+ buf[8] = bin2bcd(0x00);
+
+ /* write the rtc settings */
+ return spi_write_then_read(spi, buf, 9, NULL, 0);
+}
+
+static const struct rtc_class_ops ds1347_rtc_ops = {
+ .read_time = ds1347_read_time,
+ .set_time = ds1347_set_time,
+};
+
+static int ds1347_probe(struct spi_device *spi)
+{
+ struct rtc_device *rtc;
+ unsigned char data;
+ int res;
+
+ /* spi setup with ds1347 in mode 3 and bits per word as 8 */
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+
+ /* RTC Settings */
+ res = ds1347_read_reg(&spi->dev, DS1347_SECONDS_REG, &data);
+ if (res)
+ return res;
+
+ /* Disable the write protect of rtc */
+ ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data);
+ data = data & ~(1<<7);
+ ds1347_write_reg(&spi->dev, DS1347_CONTROL_REG, data);
+
+ /* Enable the oscillator , disable the oscillator stop flag,
+ and glitch filter to reduce current consumption */
+ ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data);
+ data = data & 0x1B;
+ ds1347_write_reg(&spi->dev, DS1347_STATUS_REG, data);
+
+ /* display the settings */
+ ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data);
+ dev_info(&spi->dev, "DS1347 RTC CTRL Reg = 0x%02x\n", data);
+
+ ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data);
+ dev_info(&spi->dev, "DS1347 RTC Status Reg = 0x%02x\n", data);
+
+ rtc = devm_rtc_device_register(&spi->dev, "ds1347",
+ &ds1347_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ spi_set_drvdata(spi, rtc);
+
+ return 0;
+}
+
+static struct spi_driver ds1347_driver = {
+ .driver = {
+ .name = "ds1347",
+ .owner = THIS_MODULE,
+ },
+ .probe = ds1347_probe,
+};
+
+module_spi_driver(ds1347_driver);
+
+MODULE_DESCRIPTION("DS1347 SPI RTC DRIVER");
+MODULE_AUTHOR("Raghavendra C Ganiga <ravi23ganiga@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c
index be9d8c0a7e3a..e67bfcb3a1aa 100644
--- a/drivers/rtc/rtc-ds1390.c
+++ b/drivers/rtc/rtc-ds1390.c
@@ -132,10 +132,9 @@ static int ds1390_probe(struct spi_device *spi)
spi_setup(spi);
chip = devm_kzalloc(&spi->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip) {
- dev_err(&spi->dev, "unable to allocate device memory\n");
+ if (!chip)
return -ENOMEM;
- }
+
spi_set_drvdata(spi, chip);
res = ds1390_get_reg(&spi->dev, DS1390_REG_SECONDS, &tmp);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index bc7b4fcf603c..b13d1399b81a 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -371,8 +371,7 @@ ds1511_interrupt(int irq, void *dev_id)
events |= RTC_UF;
else
events |= RTC_AF;
- if (likely(pdata->rtc))
- rtc_update_irq(pdata->rtc, 1, events);
+ rtc_update_irq(pdata->rtc, 1, events);
}
spin_unlock(&pdata->lock);
return events ? IRQ_HANDLED : IRQ_NONE;
@@ -473,7 +472,6 @@ static struct bin_attribute ds1511_nvram_attr = {
static int ds1511_rtc_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc;
struct resource *res;
struct rtc_plat_data *pdata;
int ret = 0;
@@ -512,6 +510,12 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
spin_lock_init(&pdata->lock);
platform_set_drvdata(pdev, pdata);
+
+ pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &ds1511_rtc_ops, THIS_MODULE);
+ if (IS_ERR(pdata->rtc))
+ return PTR_ERR(pdata->rtc);
+
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
@@ -526,15 +530,12 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
}
}
- rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &ds1511_rtc_ops,
- THIS_MODULE);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
- pdata->rtc = rtc;
-
ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
+ if (ret)
+ dev_err(&pdev->dev, "Unable to create sysfs entry: %s\n",
+ ds1511_nvram_attr.attr.name);
- return ret;
+ return 0;
}
static int ds1511_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index fd31571941f5..ab56893aac73 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -206,8 +206,7 @@ static irqreturn_t ds1553_rtc_interrupt(int irq, void *dev_id)
events |= RTC_UF;
else
events |= RTC_AF;
- if (likely(pdata->rtc))
- rtc_update_irq(pdata->rtc, 1, events);
+ rtc_update_irq(pdata->rtc, 1, events);
}
spin_unlock(&pdata->lock);
return events ? IRQ_HANDLED : IRQ_NONE;
@@ -278,7 +277,6 @@ static struct bin_attribute ds1553_nvram_attr = {
static int ds1553_rtc_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc;
struct resource *res;
unsigned int cen, sec;
struct rtc_plat_data *pdata;
@@ -311,6 +309,12 @@ static int ds1553_rtc_probe(struct platform_device *pdev)
spin_lock_init(&pdata->lock);
pdata->last_jiffies = jiffies;
platform_set_drvdata(pdev, pdata);
+
+ pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &ds1553_rtc_ops, THIS_MODULE);
+ if (IS_ERR(pdata->rtc))
+ return PTR_ERR(pdata->rtc);
+
if (pdata->irq > 0) {
writeb(0, ioaddr + RTC_INTERRUPTS);
if (devm_request_irq(&pdev->dev, pdata->irq,
@@ -321,15 +325,12 @@ static int ds1553_rtc_probe(struct platform_device *pdev)
}
}
- rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &ds1553_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
- pdata->rtc = rtc;
-
ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
+ if (ret)
+ dev_err(&pdev->dev, "unable to create sysfs file: %s\n",
+ ds1553_nvram_attr.attr.name);
- return ret;
+ return 0;
}
static int ds1553_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 18e2d8471472..a4888dbca2e1 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -177,8 +177,9 @@ static int ds1672_probe(struct i2c_client *client,
/* read control register */
err = ds1672_get_control(client, &control);
- if (err)
- goto exit_devreg;
+ if (err) {
+ dev_warn(&client->dev, "Unable to read the control register\n");
+ }
if (control & DS1672_REG_CONTROL_EOSC)
dev_warn(&client->dev, "Oscillator not enabled. "
@@ -187,12 +188,10 @@ static int ds1672_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = device_create_file(&client->dev, &dev_attr_control);
if (err)
- goto exit_devreg;
+ dev_err(&client->dev, "Unable to create sysfs entry: %s\n",
+ dev_attr_control.attr.name);
return 0;
-
- exit_devreg:
- return err;
}
static struct i2c_device_id ds1672_id[] = {
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 5a1f3b2a8f1e..942103dac30f 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -204,8 +204,11 @@ static int ds1742_rtc_probe(struct platform_device *pdev)
return PTR_ERR(rtc);
ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
+ if (ret)
+ dev_err(&pdev->dev, "Unable to create sysfs entry: %s\n",
+ pdata->nvram_attr.attr.name);
- return ret;
+ return 0;
}
static int ds1742_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index b83bb5a527f8..adaf06c41479 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -57,6 +57,7 @@ struct ds3232 {
* in the remove function.
*/
struct mutex mutex;
+ bool suspended;
int exiting;
};
@@ -345,7 +346,15 @@ static irqreturn_t ds3232_irq(int irq, void *dev_id)
struct ds3232 *ds3232 = i2c_get_clientdata(client);
disable_irq_nosync(irq);
- schedule_work(&ds3232->work);
+
+ /*
+ * If rtc as a wakeup source, can't schedule the work
+ * at system resume flow, because at this time the i2c bus
+ * has not been resumed.
+ */
+ if (!ds3232->suspended)
+ schedule_work(&ds3232->work);
+
return IRQ_HANDLED;
}
@@ -363,22 +372,26 @@ static void ds3232_work(struct work_struct *work)
if (stat & DS3232_REG_SR_A1F) {
control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
- if (control < 0)
- goto out;
- /* disable alarm1 interrupt */
- control &= ~(DS3232_REG_CR_A1IE);
- i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
-
- /* clear the alarm pend flag */
- stat &= ~DS3232_REG_SR_A1F;
- i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
-
- rtc_update_irq(ds3232->rtc, 1, RTC_AF | RTC_IRQF);
+ if (control < 0) {
+ pr_warn("Read DS3232 Control Register error."
+ "Disable IRQ%d.\n", client->irq);
+ } else {
+ /* disable alarm1 interrupt */
+ control &= ~(DS3232_REG_CR_A1IE);
+ i2c_smbus_write_byte_data(client, DS3232_REG_CR,
+ control);
+
+ /* clear the alarm pend flag */
+ stat &= ~DS3232_REG_SR_A1F;
+ i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
+
+ rtc_update_irq(ds3232->rtc, 1, RTC_AF | RTC_IRQF);
+
+ if (!ds3232->exiting)
+ enable_irq(client->irq);
+ }
}
-out:
- if (!ds3232->exiting)
- enable_irq(client->irq);
unlock:
mutex_unlock(&ds3232->mutex);
}
@@ -411,23 +424,17 @@ static int ds3232_probe(struct i2c_client *client,
if (ret)
return ret;
- ds3232->rtc = devm_rtc_device_register(&client->dev, client->name,
- &ds3232_rtc_ops, THIS_MODULE);
- if (IS_ERR(ds3232->rtc)) {
- dev_err(&client->dev, "unable to register the class device\n");
- return PTR_ERR(ds3232->rtc);
- }
-
- if (client->irq >= 0) {
- ret = devm_request_irq(&client->dev, client->irq, ds3232_irq, 0,
- "ds3232", client);
+ if (client->irq > 0) {
+ ret = devm_request_irq(&client->dev, client->irq, ds3232_irq,
+ IRQF_SHARED, "ds3232", client);
if (ret) {
dev_err(&client->dev, "unable to request IRQ\n");
- return ret;
}
+ device_init_wakeup(&client->dev, 1);
}
-
- return 0;
+ ds3232->rtc = devm_rtc_device_register(&client->dev, client->name,
+ &ds3232_rtc_ops, THIS_MODULE);
+ return PTR_ERR_OR_ZERO(ds3232->rtc);
}
static int ds3232_remove(struct i2c_client *client)
@@ -446,6 +453,42 @@ static int ds3232_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int ds3232_suspend(struct device *dev)
+{
+ struct ds3232 *ds3232 = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_can_wakeup(dev)) {
+ ds3232->suspended = true;
+ irq_set_irq_wake(client->irq, 1);
+ }
+
+ return 0;
+}
+
+static int ds3232_resume(struct device *dev)
+{
+ struct ds3232 *ds3232 = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (ds3232->suspended) {
+ ds3232->suspended = false;
+
+ /* Clear the hardware alarm pend flag */
+ schedule_work(&ds3232->work);
+
+ irq_set_irq_wake(client->irq, 0);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops ds3232_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ds3232_suspend, ds3232_resume)
+};
+
static const struct i2c_device_id ds3232_id[] = {
{ "ds3232", 0 },
{ }
@@ -456,6 +499,7 @@ static struct i2c_driver ds3232_driver = {
.driver = {
.name = "rtc-ds3232",
.owner = THIS_MODULE,
+ .pm = &ds3232_pm_ops,
},
.probe = ds3232_probe,
.remove = ds3232_remove,
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index abd7f9091f34..cd741c77e085 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -401,7 +401,9 @@ static int __init dryice_rtc_probe(struct platform_device *pdev)
imxdi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(imxdi->clk))
return PTR_ERR(imxdi->clk);
- clk_prepare_enable(imxdi->clk);
+ rc = clk_prepare_enable(imxdi->clk);
+ if (rc)
+ return rc;
/*
* Initialize dryice hardware
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
index 7854a656628f..41bd76aaff76 100644
--- a/drivers/rtc/rtc-isl12057.c
+++ b/drivers/rtc/rtc-isl12057.c
@@ -26,7 +26,6 @@
#include <linux/rtc.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
-#include <linux/rtc.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
@@ -275,10 +274,7 @@ static int isl12057_probe(struct i2c_client *client,
dev_set_drvdata(dev, data);
rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
-
- return 0;
+ return PTR_ERR_OR_ZERO(rtc);
}
#ifdef CONFIG_OF
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 1b126d2513de..08f5160fb6d4 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -38,7 +38,6 @@
#define JZ_RTC_CTRL_ENABLE BIT(0)
struct jz4740_rtc {
- struct resource *mem;
void __iomem *base;
struct rtc_device *rtc;
@@ -216,6 +215,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
int ret;
struct jz4740_rtc *rtc;
uint32_t scratchpad;
+ struct resource *mem;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
@@ -227,25 +227,10 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
return -ENOENT;
}
- rtc->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!rtc->mem) {
- dev_err(&pdev->dev, "Failed to get platform mmio memory\n");
- return -ENOENT;
- }
-
- rtc->mem = devm_request_mem_region(&pdev->dev, rtc->mem->start,
- resource_size(rtc->mem), pdev->name);
- if (!rtc->mem) {
- dev_err(&pdev->dev, "Failed to request mmio memory region\n");
- return -EBUSY;
- }
-
- rtc->base = devm_ioremap_nocache(&pdev->dev, rtc->mem->start,
- resource_size(rtc->mem));
- if (!rtc->base) {
- dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
- return -EBUSY;
- }
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rtc->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(rtc->base))
+ return PTR_ERR(rtc->base);
spin_lock_init(&rtc->lock);
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index bfdbcb82d069..f130c08c98f8 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -211,10 +211,9 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
}
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
- if (unlikely(!rtc)) {
- dev_err(&pdev->dev, "Can't allocate memory\n");
+ if (unlikely(!rtc))
return -ENOMEM;
- }
+
rtc->irq = rtcirq;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index 77ea9896b5ba..0765606a2d14 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -23,6 +23,8 @@
#define MC13XXX_RTCDAY 22
#define MC13XXX_RTCDAYA 23
+#define SEC_PER_DAY (24 * 60 * 60)
+
struct mc13xxx_rtc {
struct rtc_device *rtc;
struct mc13xxx *mc13xxx;
@@ -42,15 +44,15 @@ static int mc13xxx_rtc_irq_enable_unlocked(struct device *dev,
return func(priv->mc13xxx, irq);
}
-static int mc13xxx_rtc_irq_enable(struct device *dev,
- unsigned int enabled, int irq)
+static int mc13xxx_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
{
struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
int ret;
mc13xxx_lock(priv->mc13xxx);
- ret = mc13xxx_rtc_irq_enable_unlocked(dev, enabled, irq);
+ ret = mc13xxx_rtc_irq_enable_unlocked(dev, enabled, MC13XXX_IRQ_TODA);
mc13xxx_unlock(priv->mc13xxx);
@@ -61,44 +63,27 @@ static int mc13xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
unsigned int seconds, days1, days2;
- unsigned long s1970;
- int ret;
-
- mc13xxx_lock(priv->mc13xxx);
-
- if (!priv->valid) {
- ret = -ENODATA;
- goto out;
- }
- ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days1);
- if (unlikely(ret))
- goto out;
-
- ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTOD, &seconds);
- if (unlikely(ret))
- goto out;
-
- ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days2);
-out:
- mc13xxx_unlock(priv->mc13xxx);
+ if (!priv->valid)
+ return -ENODATA;
- if (ret)
- return ret;
+ do {
+ int ret;
- if (days2 == days1 + 1) {
- if (seconds >= 86400 / 2)
- days2 = days1;
- else
- days1 = days2;
- }
+ ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days1);
+ if (ret)
+ return ret;
- if (days1 != days2)
- return -EIO;
+ ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTOD, &seconds);
+ if (ret)
+ return ret;
- s1970 = days1 * 86400 + seconds;
+ ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days2);
+ if (ret)
+ return ret;
+ } while (days1 != days2);
- rtc_time_to_tm(s1970, tm);
+ rtc_time_to_tm(days1 * SEC_PER_DAY + seconds, tm);
return rtc_valid_tm(tm);
}
@@ -110,8 +95,8 @@ static int mc13xxx_rtc_set_mmss(struct device *dev, unsigned long secs)
unsigned int alarmseconds;
int ret;
- seconds = secs % 86400;
- days = secs / 86400;
+ seconds = secs % SEC_PER_DAY;
+ days = secs / SEC_PER_DAY;
mc13xxx_lock(priv->mc13xxx);
@@ -123,7 +108,7 @@ static int mc13xxx_rtc_set_mmss(struct device *dev, unsigned long secs)
if (unlikely(ret))
goto out;
- if (alarmseconds < 86400) {
+ if (alarmseconds < SEC_PER_DAY) {
ret = mc13xxx_reg_write(priv->mc13xxx,
MC13XXX_RTCTODA, 0x1ffff);
if (unlikely(ret))
@@ -147,18 +132,21 @@ static int mc13xxx_rtc_set_mmss(struct device *dev, unsigned long secs)
goto out;
/* restore alarm */
- if (alarmseconds < 86400) {
+ if (alarmseconds < SEC_PER_DAY) {
ret = mc13xxx_reg_write(priv->mc13xxx,
MC13XXX_RTCTODA, alarmseconds);
if (unlikely(ret))
goto out;
}
- ret = mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_RTCRST);
- if (unlikely(ret))
- goto out;
+ if (!priv->valid) {
+ ret = mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_RTCRST);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13xxx_irq_unmask(priv->mc13xxx, MC13XXX_IRQ_RTCRST);
+ }
- ret = mc13xxx_irq_unmask(priv->mc13xxx, MC13XXX_IRQ_RTCRST);
out:
priv->valid = !ret;
@@ -180,7 +168,7 @@ static int mc13xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTODA, &seconds);
if (unlikely(ret))
goto out;
- if (seconds >= 86400) {
+ if (seconds >= SEC_PER_DAY) {
ret = -ENODATA;
goto out;
}
@@ -201,7 +189,7 @@ out:
alarm->enabled = enabled;
alarm->pending = pending;
- s1970 = days * 86400 + seconds;
+ s1970 = days * SEC_PER_DAY + seconds;
rtc_time_to_tm(s1970, &alarm->time);
dev_dbg(dev, "%s: %lu\n", __func__, s1970);
@@ -239,8 +227,8 @@ static int mc13xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
if (unlikely(ret))
goto out;
- seconds = s1970 % 86400;
- days = s1970 / 86400;
+ seconds = s1970 % SEC_PER_DAY;
+ days = s1970 / SEC_PER_DAY;
ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCDAYA, days);
if (unlikely(ret))
@@ -259,8 +247,6 @@ static irqreturn_t mc13xxx_rtc_alarm_handler(int irq, void *dev)
struct mc13xxx_rtc *priv = dev;
struct mc13xxx *mc13xxx = priv->mc13xxx;
- dev_dbg(&priv->rtc->dev, "Alarm\n");
-
rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_AF);
mc13xxx_irq_ack(mc13xxx, irq);
@@ -273,8 +259,6 @@ static irqreturn_t mc13xxx_rtc_update_handler(int irq, void *dev)
struct mc13xxx_rtc *priv = dev;
struct mc13xxx *mc13xxx = priv->mc13xxx;
- dev_dbg(&priv->rtc->dev, "1HZ\n");
-
rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF);
mc13xxx_irq_ack(mc13xxx, irq);
@@ -282,12 +266,6 @@ static irqreturn_t mc13xxx_rtc_update_handler(int irq, void *dev)
return IRQ_HANDLED;
}
-static int mc13xxx_rtc_alarm_irq_enable(struct device *dev,
- unsigned int enabled)
-{
- return mc13xxx_rtc_irq_enable(dev, enabled, MC13XXX_IRQ_TODA);
-}
-
static const struct rtc_class_ops mc13xxx_rtc_ops = {
.read_time = mc13xxx_rtc_read_time,
.set_mmss = mc13xxx_rtc_set_mmss,
@@ -301,7 +279,6 @@ static irqreturn_t mc13xxx_rtc_reset_handler(int irq, void *dev)
struct mc13xxx_rtc *priv = dev;
struct mc13xxx *mc13xxx = priv->mc13xxx;
- dev_dbg(&priv->rtc->dev, "RTCRST\n");
priv->valid = 0;
mc13xxx_irq_mask(mc13xxx, irq);
@@ -314,7 +291,6 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
int ret;
struct mc13xxx_rtc *priv;
struct mc13xxx *mc13xxx;
- int rtcrst_pending;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -322,60 +298,47 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
mc13xxx = dev_get_drvdata(pdev->dev.parent);
priv->mc13xxx = mc13xxx;
+ priv->valid = 1;
platform_set_drvdata(pdev, priv);
mc13xxx_lock(mc13xxx);
+ mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_RTCRST);
+
ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_RTCRST,
mc13xxx_rtc_reset_handler, DRIVER_NAME, priv);
if (ret)
- goto err_reset_irq_request;
-
- ret = mc13xxx_irq_status(mc13xxx, MC13XXX_IRQ_RTCRST,
- NULL, &rtcrst_pending);
- if (ret)
- goto err_reset_irq_status;
-
- priv->valid = !rtcrst_pending;
+ goto err_irq_request;
- ret = mc13xxx_irq_request_nounmask(mc13xxx, MC13XXX_IRQ_1HZ,
+ ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_1HZ,
mc13xxx_rtc_update_handler, DRIVER_NAME, priv);
if (ret)
- goto err_update_irq_request;
+ goto err_irq_request;
ret = mc13xxx_irq_request_nounmask(mc13xxx, MC13XXX_IRQ_TODA,
mc13xxx_rtc_alarm_handler, DRIVER_NAME, priv);
if (ret)
- goto err_alarm_irq_request;
+ goto err_irq_request;
mc13xxx_unlock(mc13xxx);
priv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &mc13xxx_rtc_ops, THIS_MODULE);
- if (IS_ERR(priv->rtc)) {
- ret = PTR_ERR(priv->rtc);
+ &mc13xxx_rtc_ops, THIS_MODULE);
- mc13xxx_lock(mc13xxx);
-
- mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv);
-err_alarm_irq_request:
-
- mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_1HZ, priv);
-err_update_irq_request:
-
-err_reset_irq_status:
+ return 0;
- mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv);
-err_reset_irq_request:
+err_irq_request:
+ mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv);
+ mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_1HZ, priv);
+ mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv);
- mc13xxx_unlock(mc13xxx);
- }
+ mc13xxx_unlock(mc13xxx);
return ret;
}
-static int __exit mc13xxx_rtc_remove(struct platform_device *pdev)
+static int mc13xxx_rtc_remove(struct platform_device *pdev)
{
struct mc13xxx_rtc *priv = platform_get_drvdata(pdev);
@@ -404,7 +367,7 @@ MODULE_DEVICE_TABLE(platform, mc13xxx_rtc_idtable);
static struct platform_driver mc13xxx_rtc_driver = {
.id_table = mc13xxx_rtc_idtable,
- .remove = __exit_p(mc13xxx_rtc_remove),
+ .remove = mc13xxx_rtc_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-moxart.c b/drivers/rtc/rtc-moxart.c
index c29dee0946e6..c31846238871 100644
--- a/drivers/rtc/rtc-moxart.c
+++ b/drivers/rtc/rtc-moxart.c
@@ -247,10 +247,8 @@ static int moxart_rtc_probe(struct platform_device *pdev)
int ret = 0;
moxart_rtc = devm_kzalloc(&pdev->dev, sizeof(*moxart_rtc), GFP_KERNEL);
- if (!moxart_rtc) {
- dev_err(&pdev->dev, "devm_kzalloc failed\n");
+ if (!moxart_rtc)
return -ENOMEM;
- }
moxart_rtc->gpio_data = of_get_named_gpio(pdev->dev.of_node,
"gpio-rtc-data", 0);
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index d536c5962c99..d15a999363fc 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -222,6 +222,7 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
struct resource *res;
struct rtc_plat_data *pdata;
u32 rtc_time;
+ u32 rtc_date;
int ret = 0;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
@@ -257,6 +258,17 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
}
}
+ /*
+ * A date after January 19th, 2038 does not fit on 32 bits and
+ * will confuse the kernel and userspace. Reset to a sane date
+ * (January 1st, 2013) if we're after 2038.
+ */
+ rtc_date = readl(pdata->ioaddr + RTC_DATE_REG_OFFS);
+ if (bcd2bin((rtc_date >> RTC_YEAR_OFFS) & 0xff) >= 38) {
+ dev_info(&pdev->dev, "invalid RTC date, resetting to January 1st, 2013\n");
+ writel(0x130101, pdata->ioaddr + RTC_DATE_REG_OFFS);
+ }
+
pdata->irq = platform_get_irq(pdev, 0);
platform_set_drvdata(pdev, pdata);
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index 248653c74b80..a53da0958e95 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -229,10 +229,9 @@ static int __init nuc900_rtc_probe(struct platform_device *pdev)
nuc900_rtc = devm_kzalloc(&pdev->dev, sizeof(struct nuc900_rtc),
GFP_KERNEL);
- if (!nuc900_rtc) {
- dev_err(&pdev->dev, "kzalloc nuc900_rtc failed\n");
+ if (!nuc900_rtc)
return -ENOMEM;
- }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(nuc900_rtc->rtc_reg))
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index fffb7d3449d7..c360d62fb3f6 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -348,9 +348,8 @@ static int palmas_rtc_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops palmas_rtc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(palmas_rtc_suspend, palmas_rtc_resume)
-};
+static SIMPLE_DEV_PM_OPS(palmas_rtc_pm_ops, palmas_rtc_suspend,
+ palmas_rtc_resume);
#ifdef CONFIG_OF
static struct of_device_id of_palmas_rtc_match[] = {
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 03f8f75d5af2..197699f358c7 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -9,18 +9,16 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-
+#include <linux/of.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/rtc.h>
+#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/mfd/pm8xxx/core.h>
-#include <linux/mfd/pm8xxx/rtc.h>
-
-
/* RTC Register offsets from RTC CTRL REG */
#define PM8XXX_ALARM_CTRL_OFFSET 0x01
#define PM8XXX_RTC_WRITE_OFFSET 0x02
@@ -37,6 +35,8 @@
/**
* struct pm8xxx_rtc - rtc driver internal structure
* @rtc: rtc device for this driver.
+ * @regmap: regmap used to access RTC registers
+ * @allow_set_time: indicates whether writing to the RTC is allowed
* @rtc_alarm_irq: rtc alarm irq number.
* @rtc_base: address of rtc control register.
* @rtc_read_base: base address of read registers.
@@ -48,55 +48,19 @@
*/
struct pm8xxx_rtc {
struct rtc_device *rtc;
+ struct regmap *regmap;
+ bool allow_set_time;
int rtc_alarm_irq;
int rtc_base;
int rtc_read_base;
int rtc_write_base;
int alarm_rw_base;
- u8 ctrl_reg;
+ u8 ctrl_reg;
struct device *rtc_dev;
spinlock_t ctrl_reg_lock;
};
/*
- * The RTC registers need to be read/written one byte at a time. This is a
- * hardware limitation.
- */
-static int pm8xxx_read_wrapper(struct pm8xxx_rtc *rtc_dd, u8 *rtc_val,
- int base, int count)
-{
- int i, rc;
- struct device *parent = rtc_dd->rtc_dev->parent;
-
- for (i = 0; i < count; i++) {
- rc = pm8xxx_readb(parent, base + i, &rtc_val[i]);
- if (rc < 0) {
- dev_err(rtc_dd->rtc_dev, "PMIC read failed\n");
- return rc;
- }
- }
-
- return 0;
-}
-
-static int pm8xxx_write_wrapper(struct pm8xxx_rtc *rtc_dd, u8 *rtc_val,
- int base, int count)
-{
- int i, rc;
- struct device *parent = rtc_dd->rtc_dev->parent;
-
- for (i = 0; i < count; i++) {
- rc = pm8xxx_writeb(parent, base + i, rtc_val[i]);
- if (rc < 0) {
- dev_err(rtc_dd->rtc_dev, "PMIC write failed\n");
- return rc;
- }
- }
-
- return 0;
-}
-
-/*
* Steps to write the RTC registers.
* 1. Disable alarm if enabled.
* 2. Write 0x00 to LSB.
@@ -107,9 +71,12 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
int rc, i;
unsigned long secs, irq_flags;
- u8 value[NUM_8_BIT_RTC_REGS], reg = 0, alarm_enabled = 0, ctrl_reg;
+ u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0, ctrl_reg;
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+ if (!rtc_dd->allow_set_time)
+ return -EACCES;
+
rtc_tm_to_time(tm, &secs);
for (i = 0; i < NUM_8_BIT_RTC_REGS; i++) {
@@ -125,47 +92,43 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
if (ctrl_reg & PM8xxx_RTC_ALARM_ENABLE) {
alarm_enabled = 1;
ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
- rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base,
- 1);
- if (rc < 0) {
- dev_err(dev, "Write to RTC control register "
- "failed\n");
+ rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
+ if (rc) {
+ dev_err(dev, "Write to RTC control register failed\n");
goto rtc_rw_fail;
}
rtc_dd->ctrl_reg = ctrl_reg;
- } else
+ } else {
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+ }
/* Write 0 to Byte[0] */
- reg = 0;
- rc = pm8xxx_write_wrapper(rtc_dd, &reg, rtc_dd->rtc_write_base, 1);
- if (rc < 0) {
+ rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, 0);
+ if (rc) {
dev_err(dev, "Write to RTC write data register failed\n");
goto rtc_rw_fail;
}
/* Write Byte[1], Byte[2], Byte[3] */
- rc = pm8xxx_write_wrapper(rtc_dd, value + 1,
- rtc_dd->rtc_write_base + 1, 3);
- if (rc < 0) {
+ rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->rtc_write_base + 1,
+ &value[1], sizeof(value) - 1);
+ if (rc) {
dev_err(dev, "Write to RTC write data register failed\n");
goto rtc_rw_fail;
}
/* Write Byte[0] */
- rc = pm8xxx_write_wrapper(rtc_dd, value, rtc_dd->rtc_write_base, 1);
- if (rc < 0) {
+ rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, value[0]);
+ if (rc) {
dev_err(dev, "Write to RTC write data register failed\n");
goto rtc_rw_fail;
}
if (alarm_enabled) {
ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE;
- rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base,
- 1);
- if (rc < 0) {
- dev_err(dev, "Write to RTC control register "
- "failed\n");
+ rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
+ if (rc) {
+ dev_err(dev, "Write to RTC control register failed\n");
goto rtc_rw_fail;
}
rtc_dd->ctrl_reg = ctrl_reg;
@@ -181,13 +144,14 @@ rtc_rw_fail:
static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
int rc;
- u8 value[NUM_8_BIT_RTC_REGS], reg;
+ u8 value[NUM_8_BIT_RTC_REGS];
unsigned long secs;
+ unsigned int reg;
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
- rc = pm8xxx_read_wrapper(rtc_dd, value, rtc_dd->rtc_read_base,
- NUM_8_BIT_RTC_REGS);
- if (rc < 0) {
+ rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base,
+ value, sizeof(value));
+ if (rc) {
dev_err(dev, "RTC read data register failed\n");
return rc;
}
@@ -196,16 +160,16 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
* Read the LSB again and check if there has been a carry over.
* If there is, redo the read operation.
*/
- rc = pm8xxx_read_wrapper(rtc_dd, &reg, rtc_dd->rtc_read_base, 1);
+ rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_read_base, &reg);
if (rc < 0) {
dev_err(dev, "RTC read data register failed\n");
return rc;
}
if (unlikely(reg < value[0])) {
- rc = pm8xxx_read_wrapper(rtc_dd, value,
- rtc_dd->rtc_read_base, NUM_8_BIT_RTC_REGS);
- if (rc < 0) {
+ rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base,
+ value, sizeof(value));
+ if (rc) {
dev_err(dev, "RTC read data register failed\n");
return rc;
}
@@ -222,8 +186,8 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
}
dev_dbg(dev, "secs = %lu, h:m:s == %d:%d:%d, d/m/y = %d/%d/%d\n",
- secs, tm->tm_hour, tm->tm_min, tm->tm_sec,
- tm->tm_mday, tm->tm_mon, tm->tm_year);
+ secs, tm->tm_hour, tm->tm_min, tm->tm_sec,
+ tm->tm_mday, tm->tm_mon, tm->tm_year);
return 0;
}
@@ -244,19 +208,22 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
- rc = pm8xxx_write_wrapper(rtc_dd, value, rtc_dd->alarm_rw_base,
- NUM_8_BIT_RTC_REGS);
- if (rc < 0) {
+ rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->alarm_rw_base, value,
+ sizeof(value));
+ if (rc) {
dev_err(dev, "Write to RTC ALARM register failed\n");
goto rtc_rw_fail;
}
ctrl_reg = rtc_dd->ctrl_reg;
- ctrl_reg = alarm->enabled ? (ctrl_reg | PM8xxx_RTC_ALARM_ENABLE) :
- (ctrl_reg & ~PM8xxx_RTC_ALARM_ENABLE);
- rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1);
- if (rc < 0) {
+ if (alarm->enabled)
+ ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE;
+ else
+ ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
+
+ rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
+ if (rc) {
dev_err(dev, "Write to RTC control register failed\n");
goto rtc_rw_fail;
}
@@ -264,9 +231,9 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
rtc_dd->ctrl_reg = ctrl_reg;
dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
- alarm->time.tm_hour, alarm->time.tm_min,
- alarm->time.tm_sec, alarm->time.tm_mday,
- alarm->time.tm_mon, alarm->time.tm_year);
+ alarm->time.tm_hour, alarm->time.tm_min,
+ alarm->time.tm_sec, alarm->time.tm_mday,
+ alarm->time.tm_mon, alarm->time.tm_year);
rtc_rw_fail:
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
return rc;
@@ -279,9 +246,9 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
unsigned long secs;
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
- rc = pm8xxx_read_wrapper(rtc_dd, value, rtc_dd->alarm_rw_base,
- NUM_8_BIT_RTC_REGS);
- if (rc < 0) {
+ rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->alarm_rw_base, value,
+ sizeof(value));
+ if (rc) {
dev_err(dev, "RTC alarm time read failed\n");
return rc;
}
@@ -297,9 +264,9 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
}
dev_dbg(dev, "Alarm set for - h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
- alarm->time.tm_hour, alarm->time.tm_min,
- alarm->time.tm_sec, alarm->time.tm_mday,
- alarm->time.tm_mon, alarm->time.tm_year);
+ alarm->time.tm_hour, alarm->time.tm_min,
+ alarm->time.tm_sec, alarm->time.tm_mday,
+ alarm->time.tm_mon, alarm->time.tm_year);
return 0;
}
@@ -312,12 +279,16 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
u8 ctrl_reg;
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+
ctrl_reg = rtc_dd->ctrl_reg;
- ctrl_reg = (enable) ? (ctrl_reg | PM8xxx_RTC_ALARM_ENABLE) :
- (ctrl_reg & ~PM8xxx_RTC_ALARM_ENABLE);
- rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1);
- if (rc < 0) {
+ if (enable)
+ ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE;
+ else
+ ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
+
+ rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
+ if (rc) {
dev_err(dev, "Write to RTC control register failed\n");
goto rtc_rw_fail;
}
@@ -329,8 +300,9 @@ rtc_rw_fail:
return rc;
}
-static struct rtc_class_ops pm8xxx_rtc_ops = {
+static const struct rtc_class_ops pm8xxx_rtc_ops = {
.read_time = pm8xxx_rtc_read_time,
+ .set_time = pm8xxx_rtc_set_time,
.set_alarm = pm8xxx_rtc_set_alarm,
.read_alarm = pm8xxx_rtc_read_alarm,
.alarm_irq_enable = pm8xxx_rtc_alarm_irq_enable,
@@ -339,7 +311,7 @@ static struct rtc_class_ops pm8xxx_rtc_ops = {
static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
{
struct pm8xxx_rtc *rtc_dd = dev_id;
- u8 ctrl_reg;
+ unsigned int ctrl_reg;
int rc;
unsigned long irq_flags;
@@ -351,11 +323,11 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
ctrl_reg = rtc_dd->ctrl_reg;
ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
- rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1);
- if (rc < 0) {
+ rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
+ if (rc) {
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
- dev_err(rtc_dd->rtc_dev, "Write to RTC control register "
- "failed\n");
+ dev_err(rtc_dd->rtc_dev,
+ "Write to RTC control register failed\n");
goto rtc_alarm_handled;
}
@@ -363,61 +335,71 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
/* Clear RTC alarm register */
- rc = pm8xxx_read_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base +
- PM8XXX_ALARM_CTRL_OFFSET, 1);
- if (rc < 0) {
- dev_err(rtc_dd->rtc_dev, "RTC Alarm control register read "
- "failed\n");
+ rc = regmap_read(rtc_dd->regmap,
+ rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET,
+ &ctrl_reg);
+ if (rc) {
+ dev_err(rtc_dd->rtc_dev,
+ "RTC Alarm control register read failed\n");
goto rtc_alarm_handled;
}
ctrl_reg &= ~PM8xxx_RTC_ALARM_CLEAR;
- rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base +
- PM8XXX_ALARM_CTRL_OFFSET, 1);
- if (rc < 0)
- dev_err(rtc_dd->rtc_dev, "Write to RTC Alarm control register"
- " failed\n");
+ rc = regmap_write(rtc_dd->regmap,
+ rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET,
+ ctrl_reg);
+ if (rc)
+ dev_err(rtc_dd->rtc_dev,
+ "Write to RTC Alarm control register failed\n");
rtc_alarm_handled:
return IRQ_HANDLED;
}
+/*
+ * Hardcoded RTC bases until IORESOURCE_REG mapping is figured out
+ */
+static const struct of_device_id pm8xxx_id_table[] = {
+ { .compatible = "qcom,pm8921-rtc", .data = (void *) 0x11D },
+ { .compatible = "qcom,pm8058-rtc", .data = (void *) 0x1E8 },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
+
static int pm8xxx_rtc_probe(struct platform_device *pdev)
{
int rc;
- u8 ctrl_reg;
- bool rtc_write_enable = false;
+ unsigned int ctrl_reg;
struct pm8xxx_rtc *rtc_dd;
- struct resource *rtc_resource;
- const struct pm8xxx_rtc_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ const struct of_device_id *match;
- if (pdata != NULL)
- rtc_write_enable = pdata->rtc_write_enable;
+ match = of_match_node(pm8xxx_id_table, pdev->dev.of_node);
+ if (!match)
+ return -ENXIO;
rtc_dd = devm_kzalloc(&pdev->dev, sizeof(*rtc_dd), GFP_KERNEL);
- if (rtc_dd == NULL) {
- dev_err(&pdev->dev, "Unable to allocate memory!\n");
+ if (rtc_dd == NULL)
return -ENOMEM;
- }
/* Initialise spinlock to protect RTC control register */
spin_lock_init(&rtc_dd->ctrl_reg_lock);
+ rtc_dd->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!rtc_dd->regmap) {
+ dev_err(&pdev->dev, "Parent regmap unavailable.\n");
+ return -ENXIO;
+ }
+
rtc_dd->rtc_alarm_irq = platform_get_irq(pdev, 0);
if (rtc_dd->rtc_alarm_irq < 0) {
dev_err(&pdev->dev, "Alarm IRQ resource absent!\n");
return -ENXIO;
}
- rtc_resource = platform_get_resource_byname(pdev, IORESOURCE_IO,
- "pmic_rtc_base");
- if (!(rtc_resource && rtc_resource->start)) {
- dev_err(&pdev->dev, "RTC IO resource absent!\n");
- return -ENXIO;
- }
+ rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node,
+ "allow-set-time");
- rtc_dd->rtc_base = rtc_resource->start;
+ rtc_dd->rtc_base = (long) match->data;
/* Setup RTC register addresses */
rtc_dd->rtc_write_base = rtc_dd->rtc_base + PM8XXX_RTC_WRITE_OFFSET;
@@ -427,64 +409,52 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
rtc_dd->rtc_dev = &pdev->dev;
/* Check if the RTC is on, else turn it on */
- rc = pm8xxx_read_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1);
- if (rc < 0) {
+ rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_base, &ctrl_reg);
+ if (rc) {
dev_err(&pdev->dev, "RTC control register read failed!\n");
return rc;
}
if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
ctrl_reg |= PM8xxx_RTC_ENABLE;
- rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base,
- 1);
- if (rc < 0) {
- dev_err(&pdev->dev, "Write to RTC control register "
- "failed\n");
+ rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Write to RTC control register failed\n");
return rc;
}
}
rtc_dd->ctrl_reg = ctrl_reg;
- if (rtc_write_enable == true)
- pm8xxx_rtc_ops.set_time = pm8xxx_rtc_set_time;
platform_set_drvdata(pdev, rtc_dd);
+ device_init_wakeup(&pdev->dev, 1);
+
/* Register the RTC device */
rtc_dd->rtc = devm_rtc_device_register(&pdev->dev, "pm8xxx_rtc",
- &pm8xxx_rtc_ops, THIS_MODULE);
+ &pm8xxx_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc_dd->rtc)) {
dev_err(&pdev->dev, "%s: RTC registration failed (%ld)\n",
- __func__, PTR_ERR(rtc_dd->rtc));
+ __func__, PTR_ERR(rtc_dd->rtc));
return PTR_ERR(rtc_dd->rtc);
}
/* Request the alarm IRQ */
- rc = request_any_context_irq(rtc_dd->rtc_alarm_irq,
- pm8xxx_alarm_trigger, IRQF_TRIGGER_RISING,
- "pm8xxx_rtc_alarm", rtc_dd);
+ rc = devm_request_any_context_irq(&pdev->dev, rtc_dd->rtc_alarm_irq,
+ pm8xxx_alarm_trigger,
+ IRQF_TRIGGER_RISING,
+ "pm8xxx_rtc_alarm", rtc_dd);
if (rc < 0) {
dev_err(&pdev->dev, "Request IRQ failed (%d)\n", rc);
return rc;
}
- device_init_wakeup(&pdev->dev, 1);
-
dev_dbg(&pdev->dev, "Probe success !!\n");
return 0;
}
-static int pm8xxx_rtc_remove(struct platform_device *pdev)
-{
- struct pm8xxx_rtc *rtc_dd = platform_get_drvdata(pdev);
-
- device_init_wakeup(&pdev->dev, 0);
- free_irq(rtc_dd->rtc_alarm_irq, rtc_dd);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int pm8xxx_rtc_resume(struct device *dev)
{
@@ -507,15 +477,17 @@ static int pm8xxx_rtc_suspend(struct device *dev)
}
#endif
-static SIMPLE_DEV_PM_OPS(pm8xxx_rtc_pm_ops, pm8xxx_rtc_suspend, pm8xxx_rtc_resume);
+static SIMPLE_DEV_PM_OPS(pm8xxx_rtc_pm_ops,
+ pm8xxx_rtc_suspend,
+ pm8xxx_rtc_resume);
static struct platform_driver pm8xxx_rtc_driver = {
.probe = pm8xxx_rtc_probe,
- .remove = pm8xxx_rtc_remove,
.driver = {
- .name = PM8XXX_RTC_DEV_NAME,
- .owner = THIS_MODULE,
- .pm = &pm8xxx_rtc_pm_ops,
+ .name = "rtc-pm8xxx",
+ .owner = THIS_MODULE,
+ .pm = &pm8xxx_rtc_pm_ops,
+ .of_match_table = pm8xxx_id_table,
},
};
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index a355f2b82bb8..cccbf9d89729 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -32,7 +32,6 @@
#include <mach/hardware.h>
-#define TIMER_FREQ CLOCK_TICK_RATE
#define RTC_DEF_DIVIDER (32768 - 1)
#define RTC_DEF_TRIM 0
#define MAXFREQ_PERIODIC 1000
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 1a779a67ff66..e9ac5a43be1a 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -395,6 +395,12 @@ static int rv3029c2_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_EMUL))
return -ENODEV;
+ rc = rv3029c2_i2c_get_sr(client, buf);
+ if (rc < 0) {
+ dev_err(&client->dev, "reading status failed\n");
+ return rc;
+ }
+
rtc = devm_rtc_device_register(&client->dev, client->name,
&rv3029c2_rtc_ops, THIS_MODULE);
@@ -403,12 +409,6 @@ static int rv3029c2_probe(struct i2c_client *client,
i2c_set_clientdata(client, rtc);
- rc = rv3029c2_i2c_get_sr(client, buf);
- if (rc < 0) {
- dev_err(&client->dev, "reading status failed\n");
- return rc;
- }
-
return 0;
}
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 8fa23eabcb68..e6298e02b400 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -551,7 +551,6 @@ static int rx8025_probe(struct i2c_client *client,
rx8025 = devm_kzalloc(&client->dev, sizeof(*rx8025), GFP_KERNEL);
if (!rx8025) {
- dev_err(&adapter->dev, "failed to alloc memory\n");
err = -ENOMEM;
goto errout;
}
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index c4cde9c08f1f..4958a363b2c7 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -48,8 +48,8 @@ struct s3c_rtc_drv_data {
static struct clk *rtc_clk;
static void __iomem *s3c_rtc_base;
-static int s3c_rtc_alarmno = NO_IRQ;
-static int s3c_rtc_tickno = NO_IRQ;
+static int s3c_rtc_alarmno;
+static int s3c_rtc_tickno;
static enum s3c_cpu_type s3c_rtc_cpu_type;
static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index 3eb3642ae299..76e38007ba90 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -264,12 +264,8 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
rtcdrv = devm_kzalloc(&pdev->dev,
sizeof(struct sirfsoc_rtc_drv), GFP_KERNEL);
- if (rtcdrv == NULL) {
- dev_err(&pdev->dev,
- "%s: can't alloc mem for drv struct\n",
- pdev->name);
+ if (rtcdrv == NULL)
return -ENOMEM;
- }
err = of_property_read_u32(np, "reg", &rtcdrv->rtc_base);
if (err) {
@@ -335,39 +331,29 @@ static int sirfsoc_rtc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static int sirfsoc_rtc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
+ struct sirfsoc_rtc_drv *rtcdrv = dev_get_drvdata(dev);
rtcdrv->overflow_rtc =
sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE);
rtcdrv->saved_counter =
sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
rtcdrv->saved_overflow_rtc = rtcdrv->overflow_rtc;
- if (device_may_wakeup(&pdev->dev) && !enable_irq_wake(rtcdrv->irq))
+ if (device_may_wakeup(dev) && !enable_irq_wake(rtcdrv->irq))
rtcdrv->irq_wake = 1;
return 0;
}
-static int sirfsoc_rtc_freeze(struct device *dev)
-{
- sirfsoc_rtc_suspend(dev);
-
- return 0;
-}
-
-static int sirfsoc_rtc_thaw(struct device *dev)
+static int sirfsoc_rtc_resume(struct device *dev)
{
u32 tmp;
- struct sirfsoc_rtc_drv *rtcdrv;
- rtcdrv = dev_get_drvdata(dev);
+ struct sirfsoc_rtc_drv *rtcdrv = dev_get_drvdata(dev);
/*
- * if resume from snapshot and the rtc power is losed,
+ * if resume from snapshot and the rtc power is lost,
* restroe the rtc settings
*/
if (SIRFSOC_RTC_CLK != sirfsoc_rtc_iobrg_readl(
@@ -407,57 +393,23 @@ static int sirfsoc_rtc_thaw(struct device *dev)
sirfsoc_rtc_iobrg_writel(rtcdrv->overflow_rtc,
rtcdrv->rtc_base + RTC_SW_VALUE);
- return 0;
-}
-
-static int sirfsoc_rtc_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
- sirfsoc_rtc_thaw(dev);
- if (device_may_wakeup(&pdev->dev) && rtcdrv->irq_wake) {
+ if (device_may_wakeup(dev) && rtcdrv->irq_wake) {
disable_irq_wake(rtcdrv->irq);
rtcdrv->irq_wake = 0;
}
return 0;
}
-
-static int sirfsoc_rtc_restore(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
-
- if (device_may_wakeup(&pdev->dev) && rtcdrv->irq_wake) {
- disable_irq_wake(rtcdrv->irq);
- rtcdrv->irq_wake = 0;
- }
- return 0;
-}
-
-#else
-#define sirfsoc_rtc_suspend NULL
-#define sirfsoc_rtc_resume NULL
-#define sirfsoc_rtc_freeze NULL
-#define sirfsoc_rtc_thaw NULL
-#define sirfsoc_rtc_restore NULL
#endif
-static const struct dev_pm_ops sirfsoc_rtc_pm_ops = {
- .suspend = sirfsoc_rtc_suspend,
- .resume = sirfsoc_rtc_resume,
- .freeze = sirfsoc_rtc_freeze,
- .thaw = sirfsoc_rtc_thaw,
- .restore = sirfsoc_rtc_restore,
-};
+static SIMPLE_DEV_PM_OPS(sirfsoc_rtc_pm_ops,
+ sirfsoc_rtc_suspend, sirfsoc_rtc_resume);
static struct platform_driver sirfsoc_rtc_driver = {
.driver = {
.name = "sirfsoc-rtc",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &sirfsoc_rtc_pm_ops,
-#endif
.of_match_table = sirfsoc_rtc_of_match,
},
.probe = sirfsoc_rtc_probe,
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index c492cf0ab8cd..d2cdb9823a15 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -365,10 +365,8 @@ static int spear_rtc_probe(struct platform_device *pdev)
}
config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL);
- if (!config) {
- dev_err(&pdev->dev, "out of memory\n");
+ if (!config)
return -ENOMEM;
- }
/* alarm irqs */
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index a176ba614683..35ed49ea1f81 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -214,8 +214,7 @@ static irqreturn_t stk17ta8_rtc_interrupt(int irq, void *dev_id)
events |= RTC_UF;
else
events |= RTC_AF;
- if (likely(pdata->rtc))
- rtc_update_irq(pdata->rtc, 1, events);
+ rtc_update_irq(pdata->rtc, 1, events);
}
spin_unlock(&pdata->lock);
return events ? IRQ_HANDLED : IRQ_NONE;
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c
index 68a35284e5ad..b6f21f73d508 100644
--- a/drivers/rtc/rtc-sunxi.c
+++ b/drivers/rtc/rtc-sunxi.c
@@ -428,7 +428,7 @@ static const struct rtc_class_ops sunxi_rtc_ops = {
};
static const struct of_device_id sunxi_rtc_dt_ids[] = {
- { .compatible = "allwinner,sun4i-rtc", .data = &data_year_param[0] },
+ { .compatible = "allwinner,sun4i-a10-rtc", .data = &data_year_param[0] },
{ .compatible = "allwinner,sun7i-a20-rtc", .data = &data_year_param[1] },
{ /* sentinel */ },
};
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 7746e65b93f2..6599c20bc454 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -104,20 +104,17 @@ static int test_probe(struct platform_device *plat_dev)
rtc = devm_rtc_device_register(&plat_dev->dev, "test",
&test_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
- err = PTR_ERR(rtc);
- return err;
+ return PTR_ERR(rtc);
}
err = device_create_file(&plat_dev->dev, &dev_attr_irq);
if (err)
- goto err;
+ dev_err(&plat_dev->dev, "Unable to create sysfs entry: %s\n",
+ dev_attr_irq.attr.name);
platform_set_drvdata(plat_dev, rtc);
return 0;
-
-err:
- return err;
}
static int test_remove(struct platform_device *plat_dev)
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 4f87234e0dee..2e678c681b13 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -176,8 +176,8 @@ static irqreturn_t tx4939_rtc_interrupt(int irq, void *dev_id)
tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_NOP);
}
spin_unlock(&pdata->lock);
- if (likely(pdata->rtc))
- rtc_update_irq(pdata->rtc, 1, events);
+ rtc_update_irq(pdata->rtc, 1, events);
+
return IRQ_HANDLED;
}
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index df2ef3eba7cd..051da968da6d 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -79,7 +79,6 @@
struct vt8500_rtc {
void __iomem *regbase;
- struct resource *res;
int irq_alarm;
struct rtc_device *rtc;
spinlock_t lock; /* Protects this structure */
@@ -209,6 +208,7 @@ static const struct rtc_class_ops vt8500_rtc_ops = {
static int vt8500_rtc_probe(struct platform_device *pdev)
{
struct vt8500_rtc *vt8500_rtc;
+ struct resource *res;
int ret;
vt8500_rtc = devm_kzalloc(&pdev->dev,
@@ -219,34 +219,16 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
spin_lock_init(&vt8500_rtc->lock);
platform_set_drvdata(pdev, vt8500_rtc);
- vt8500_rtc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!vt8500_rtc->res) {
- dev_err(&pdev->dev, "No I/O memory resource defined\n");
- return -ENXIO;
- }
-
vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0);
if (vt8500_rtc->irq_alarm < 0) {
dev_err(&pdev->dev, "No alarm IRQ resource defined\n");
return vt8500_rtc->irq_alarm;
}
- vt8500_rtc->res = devm_request_mem_region(&pdev->dev,
- vt8500_rtc->res->start,
- resource_size(vt8500_rtc->res),
- "vt8500-rtc");
- if (vt8500_rtc->res == NULL) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- return -EBUSY;
- }
-
- vt8500_rtc->regbase = devm_ioremap(&pdev->dev, vt8500_rtc->res->start,
- resource_size(vt8500_rtc->res));
- if (!vt8500_rtc->regbase) {
- dev_err(&pdev->dev, "Unable to map RTC I/O memory\n");
- ret = -EBUSY;
- goto err_return;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vt8500_rtc->regbase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(vt8500_rtc->regbase))
+ return PTR_ERR(vt8500_rtc->regbase);
/* Enable RTC and set it to 24-hour mode */
writel(VT8500_RTC_CR_ENABLE,
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 365dc6505148..b1de58e0b3d0 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -660,7 +660,7 @@ static int x1205_probe(struct i2c_client *client,
err = x1205_sysfs_register(&client->dev);
if (err)
- return err;
+ dev_err(&client->dev, "Unable to create sysfs entries\n");
return 0;
}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 9cbc567698ce..c062f1620c58 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -646,7 +646,7 @@ dasd_diag_init(void)
ASCEBC(dasd_diag_discipline.ebcname, 4);
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
- register_external_interrupt(0x2603, dasd_ext_handler);
+ register_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
dasd_diag_discipline_pointer = &dasd_diag_discipline;
return 0;
}
@@ -654,7 +654,7 @@ dasd_diag_init(void)
static void __exit
dasd_diag_cleanup(void)
{
- unregister_external_interrupt(0x2603, dasd_ext_handler);
+ unregister_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
dasd_diag_discipline_pointer = NULL;
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index ebf41e228e55..ee0e85abe1fd 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -304,12 +304,6 @@ dcssblk_load_segment(char *name, struct segment_info **seg_info)
return rc;
}
-static void dcssblk_unregister_callback(struct device *dev)
-{
- device_unregister(dev);
- put_device(dev);
-}
-
/*
* device attribute for switching shared/nonshared (exclusive)
* operation (show + store)
@@ -397,7 +391,13 @@ removeseg:
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
- rc = device_schedule_callback(dev, dcssblk_unregister_callback);
+ up_write(&dcssblk_devices_sem);
+
+ if (device_remove_file_self(dev, attr)) {
+ device_unregister(dev);
+ put_device(dev);
+ }
+ return rc;
out:
up_write(&dcssblk_devices_sem);
return rc;
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index eb5d22795c47..5af7f0bd6125 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -922,7 +922,7 @@ static int __init con3215_init(void)
raw3215_freelist = req;
}
- cdev = ccw_device_probe_console();
+ cdev = ccw_device_create_console(&raw3215_ccw_driver);
if (IS_ERR(cdev))
return -ENODEV;
@@ -932,6 +932,12 @@ static int __init con3215_init(void)
cdev->handler = raw3215_irq;
raw->flags |= RAW3215_FIXED;
+ if (ccw_device_enable_console(cdev)) {
+ ccw_device_destroy_console(cdev);
+ raw3215_free_info(raw);
+ raw3215[0] = NULL;
+ return -ENODEV;
+ }
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 699fd3e363df..75ffe9980c3e 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -7,6 +7,7 @@
* Copyright IBM Corp. 2003, 2009
*/
+#include <linux/module.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -30,6 +31,9 @@
static struct raw3270_fn con3270_fn;
+static bool auto_update = 1;
+module_param(auto_update, bool, 0);
+
/*
* Main 3270 console view data structure.
*/
@@ -204,6 +208,8 @@ con3270_update(struct con3270 *cp)
struct string *s, *n;
int rc;
+ if (!auto_update && !raw3270_view_active(&cp->view))
+ return;
if (cp->view.dev)
raw3270_activate_view(&cp->view);
@@ -529,6 +535,7 @@ con3270_flush(void)
if (!cp->view.dev)
return;
raw3270_pm_unfreeze(&cp->view);
+ raw3270_activate_view(&cp->view);
spin_lock_irqsave(&cp->view.lock, flags);
con3270_wait_write(cp);
cp->nr_up = 0;
@@ -576,7 +583,6 @@ static struct console con3270 = {
static int __init
con3270_init(void)
{
- struct ccw_device *cdev;
struct raw3270 *rp;
void *cbuf;
int i;
@@ -591,10 +597,7 @@ con3270_init(void)
cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
}
- cdev = ccw_device_probe_console();
- if (IS_ERR(cdev))
- return -ENODEV;
- rp = raw3270_setup_console(cdev);
+ rp = raw3270_setup_console();
if (IS_ERR(rp))
return PTR_ERR(rp);
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 2cdec21e8924..15b3459f8656 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -276,6 +276,15 @@ __raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
}
int
+raw3270_view_active(struct raw3270_view *view)
+{
+ struct raw3270 *rp = view->dev;
+
+ return rp && rp->view == view &&
+ !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+}
+
+int
raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
{
unsigned long flags;
@@ -623,6 +632,8 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
raw3270_size_device_done(rp);
} else
raw3270_writesf_readpart(rp);
+ memset(&rp->init_reset, 0, sizeof(rp->init_reset));
+ memset(&rp->init_data, 0, sizeof(rp->init_data));
}
static int
@@ -630,9 +641,10 @@ __raw3270_reset_device(struct raw3270 *rp)
{
int rc;
+ /* Check if reset is already pending */
+ if (rp->init_reset.view)
+ return -EBUSY;
/* Store reset data stream to init_data/init_reset */
- memset(&rp->init_reset, 0, sizeof(rp->init_reset));
- memset(&rp->init_data, 0, sizeof(rp->init_data));
rp->init_data[0] = TW_KR;
rp->init_reset.ccw.cmd_code = TC_EWRITEA;
rp->init_reset.ccw.flags = CCW_FLAG_SLI;
@@ -776,22 +788,37 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
}
#ifdef CONFIG_TN3270_CONSOLE
+/* Tentative definition - see below for actual definition. */
+static struct ccw_driver raw3270_ccw_driver;
+
/*
* Setup 3270 device configured as console.
*/
-struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
+struct raw3270 __init *raw3270_setup_console(void)
{
+ struct ccw_device *cdev;
unsigned long flags;
struct raw3270 *rp;
char *ascebc;
int rc;
+ cdev = ccw_device_create_console(&raw3270_ccw_driver);
+ if (IS_ERR(cdev))
+ return ERR_CAST(cdev);
+
rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
ascebc = kzalloc(256, GFP_KERNEL);
rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc)
return ERR_PTR(rc);
set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
+
+ rc = ccw_device_enable_console(cdev);
+ if (rc) {
+ ccw_device_destroy_console(cdev);
+ return ERR_PTR(rc);
+ }
+
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
do {
__raw3270_reset_device(rp);
@@ -826,7 +853,7 @@ raw3270_create_device(struct ccw_device *cdev)
char *ascebc;
int rc;
- rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+ rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
if (!rp)
return ERR_PTR(-ENOMEM);
ascebc = kmalloc(256, GFP_KERNEL);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 7b73ff8c1bd7..e1e41c2861fb 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -173,6 +173,7 @@ int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *);
int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
int raw3270_reset(struct raw3270_view *);
struct raw3270_view *raw3270_view(struct raw3270_view *);
+int raw3270_view_active(struct raw3270_view *);
/* Reference count inliner for view structures. */
static inline void
@@ -190,7 +191,7 @@ raw3270_put_view(struct raw3270_view *view)
wake_up(&raw3270_wait_queue);
}
-struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
+struct raw3270 *raw3270_setup_console(void);
void raw3270_wait_cons_dev(struct raw3270 *);
/* Notifier for device addition/removal */
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 1fe264379e0d..1990285296c6 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -91,6 +91,9 @@ static struct sclp_req sclp_suspend_req;
/* Timer for request retries. */
static struct timer_list sclp_request_timer;
+/* Timer for queued requests. */
+static struct timer_list sclp_queue_timer;
+
/* Internal state: is the driver initialized? */
static volatile enum sclp_init_state_t {
sclp_init_state_uninitialized,
@@ -215,6 +218,76 @@ sclp_request_timeout(unsigned long data)
sclp_process_queue();
}
+/*
+ * Returns the expire value in jiffies of the next pending request timeout,
+ * if any. Needs to be called with sclp_lock.
+ */
+static unsigned long __sclp_req_queue_find_next_timeout(void)
+{
+ unsigned long expires_next = 0;
+ struct sclp_req *req;
+
+ list_for_each_entry(req, &sclp_req_queue, list) {
+ if (!req->queue_expires)
+ continue;
+ if (!expires_next ||
+ (time_before(req->queue_expires, expires_next)))
+ expires_next = req->queue_expires;
+ }
+ return expires_next;
+}
+
+/*
+ * Returns expired request, if any, and removes it from the list.
+ */
+static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
+{
+ unsigned long flags, now;
+ struct sclp_req *req;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ now = jiffies;
+ /* Don't need list_for_each_safe because we break out after list_del */
+ list_for_each_entry(req, &sclp_req_queue, list) {
+ if (!req->queue_expires)
+ continue;
+ if (time_before_eq(req->queue_expires, now)) {
+ if (req->status == SCLP_REQ_QUEUED) {
+ req->status = SCLP_REQ_QUEUED_TIMEOUT;
+ list_del(&req->list);
+ goto out;
+ }
+ }
+ }
+ req = NULL;
+out:
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return req;
+}
+
+/*
+ * Timeout handler for queued requests. Removes request from list and
+ * invokes callback. This timer can be set per request in situations where
+ * waiting too long would be harmful to the system, e.g. during SE reboot.
+ */
+static void sclp_req_queue_timeout(unsigned long data)
+{
+ unsigned long flags, expires_next;
+ struct sclp_req *req;
+
+ do {
+ req = __sclp_req_queue_remove_expired_req();
+ if (req && req->callback)
+ req->callback(req, req->callback_data);
+ } while (req);
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ expires_next = __sclp_req_queue_find_next_timeout();
+ if (expires_next)
+ mod_timer(&sclp_queue_timer, expires_next);
+ spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
/* Try to start a request. Return zero if the request was successfully
* started or if it will be started at a later time. Return non-zero otherwise.
* Called while sclp_lock is locked. */
@@ -317,6 +390,13 @@ sclp_add_request(struct sclp_req *req)
req->start_count = 0;
list_add_tail(&req->list, &sclp_req_queue);
rc = 0;
+ if (req->queue_timeout) {
+ req->queue_expires = jiffies + req->queue_timeout * HZ;
+ if (!timer_pending(&sclp_queue_timer) ||
+ time_after(sclp_queue_timer.expires, req->queue_expires))
+ mod_timer(&sclp_queue_timer, req->queue_expires);
+ } else
+ req->queue_expires = 0;
/* Start if request is first in list */
if (sclp_running_state == sclp_running_state_idle &&
req->list.prev == &sclp_req_queue) {
@@ -892,7 +972,7 @@ sclp_check_interface(void)
spin_lock_irqsave(&sclp_lock, flags);
/* Prepare init mask command */
- rc = register_external_interrupt(0x2401, sclp_check_handler);
+ rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
if (rc) {
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
@@ -925,7 +1005,7 @@ sclp_check_interface(void)
} else
rc = -EBUSY;
}
- unregister_external_interrupt(0x2401, sclp_check_handler);
+ unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
@@ -1113,6 +1193,8 @@ sclp_init(void)
INIT_LIST_HEAD(&sclp_reg_list);
list_add(&sclp_state_change_event.list, &sclp_reg_list);
init_timer(&sclp_request_timer);
+ init_timer(&sclp_queue_timer);
+ sclp_queue_timer.function = sclp_req_queue_timeout;
/* Check interface */
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_check_interface();
@@ -1124,7 +1206,7 @@ sclp_init(void)
if (rc)
goto fail_init_state_uninitialized;
/* Register interrupt handler */
- rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
+ rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
if (rc)
goto fail_unregister_reboot_notifier;
sclp_init_state = sclp_init_state_initialized;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index fea76aed9eea..a68b5ec7d042 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -133,6 +133,11 @@ struct sclp_req {
/* Callback that is called after reaching final status. */
void (*callback)(struct sclp_req *, void *data);
void *callback_data;
+ int queue_timeout; /* request queue timeout (sec), set by
+ caller of sclp_add_request(), if
+ needed */
+ /* Internal fields */
+ unsigned long queue_expires; /* request queue timeout (jiffies) */
};
#define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */
@@ -140,6 +145,9 @@ struct sclp_req {
#define SCLP_REQ_RUNNING 0x02 /* request is currently running */
#define SCLP_REQ_DONE 0x03 /* request is completed successfully */
#define SCLP_REQ_FAILED 0x05 /* request is finally failed */
+#define SCLP_REQ_QUEUED_TIMEOUT 0x06 /* request on queue timed out */
+
+#define SCLP_QUEUE_INTERVAL 5 /* timeout interval for request queue */
/* function pointers that a high level driver has to use for registration */
/* of some routines it wants to be called from the low level driver */
@@ -173,6 +181,7 @@ int sclp_deactivate(void);
int sclp_reactivate(void);
int sclp_service_call(sclp_cmdw_t command, void *sccb);
int sclp_sync_request(sclp_cmdw_t command, void *sccb);
+int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout);
int sclp_sdias_init(void);
void sclp_sdias_exit(void);
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 49af8eeb90ea..6e8f90f84e49 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -37,6 +37,11 @@ static void sclp_sync_callback(struct sclp_req *req, void *data)
int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
{
+ return sclp_sync_request_timeout(cmd, sccb, 0);
+}
+
+int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
+{
struct completion completion;
struct sclp_req *request;
int rc;
@@ -44,6 +49,8 @@ int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
+ if (timeout)
+ request->queue_timeout = timeout;
request->command = cmd;
request->sccb = sccb;
request->status = SCLP_REQ_FILLED;
@@ -110,7 +117,8 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info)
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
- rc = sclp_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
+ rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
+ SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
@@ -144,7 +152,7 @@ static int do_cpu_configure(sclp_cmdw_t cmd)
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
- rc = sclp_sync_request(cmd, sccb);
+ rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
switch (sccb->header.response_code) {
@@ -214,7 +222,7 @@ static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->rn = rn;
- rc = sclp_sync_request(cmd, sccb);
+ rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
switch (sccb->header.response_code) {
@@ -269,7 +277,8 @@ static int sclp_attach_storage(u8 id)
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
- rc = sclp_sync_request(0x00080001 | id << 8, sccb);
+ rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
+ SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
switch (sccb->header.response_code) {
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 82f2c389b4d1..14196ea0fdf3 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -20,7 +20,9 @@ struct read_info_sccb {
struct sccb_header header; /* 0-7 */
u16 rnmax; /* 8-9 */
u8 rnsize; /* 10 */
- u8 _reserved0[24 - 11]; /* 11-15 */
+ u8 _reserved0[16 - 11]; /* 11-15 */
+ u16 ncpurl; /* 16-17 */
+ u8 _reserved7[24 - 18]; /* 18-23 */
u8 loadparm[8]; /* 24-31 */
u8 _reserved1[48 - 32]; /* 32-47 */
u64 facilities; /* 48-55 */
@@ -32,13 +34,16 @@ struct read_info_sccb {
u8 _reserved4[100 - 92]; /* 92-99 */
u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */
- u8 _reserved5[4096 - 112]; /* 112-4095 */
+ u8 _reserved5[120 - 112]; /* 112-119 */
+ u16 hcpua; /* 120-121 */
+ u8 _reserved6[4096 - 122]; /* 122-4095 */
} __packed __aligned(PAGE_SIZE);
static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
static unsigned int sclp_con_has_vt220 __initdata;
static unsigned int sclp_con_has_linemode __initdata;
static unsigned long sclp_hsa_size;
+static unsigned int sclp_max_cpu;
static struct sclp_ipl_info sclp_ipl_info;
u64 sclp_facilities;
@@ -102,6 +107,15 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp_rzm <<= 20;
+ if (!sccb->hcpua) {
+ if (MACHINE_IS_VM)
+ sclp_max_cpu = 64;
+ else
+ sclp_max_cpu = sccb->ncpurl;
+ } else {
+ sclp_max_cpu = sccb->hcpua + 1;
+ }
+
/* Save IPL information */
sclp_ipl_info.is_valid = 1;
if (sccb->flags & 0x2)
@@ -129,6 +143,11 @@ unsigned long long sclp_get_rzm(void)
return sclp_rzm;
}
+unsigned int sclp_get_max_cpu(void)
+{
+ return sclp_max_cpu;
+}
+
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. The sclp_facilities_detect() function retrieves
@@ -184,9 +203,9 @@ static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
sccb_init_eq_size(sccb);
if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
return -EIO;
- if (sccb->evbuf.blk_cnt != 0)
- return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
- return 0;
+ if (sccb->evbuf.blk_cnt == 0)
+ return 0;
+ return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
@@ -195,6 +214,8 @@ static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
sccb->length = PAGE_SIZE;
if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
return -EIO;
+ if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0)
+ return 0;
return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 981a99fd8d42..3478e19ae194 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -78,7 +78,8 @@ tape_std_assign(struct tape_device *device)
rc = tape_do_io_interruptible(device, request);
- del_timer(&timeout);
+ del_timer_sync(&timeout);
+ destroy_timer_on_stack(&timeout);
if (rc != 0) {
DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index f055df0b167f..445564c790f6 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -186,55 +186,71 @@ void airq_iv_release(struct airq_iv *iv)
EXPORT_SYMBOL(airq_iv_release);
/**
- * airq_iv_alloc_bit - allocate an irq bit from an interrupt vector
+ * airq_iv_alloc - allocate irq bits from an interrupt vector
* @iv: pointer to an interrupt vector structure
+ * @num: number of consecutive irq bits to allocate
*
- * Returns the bit number of the allocated irq, or -1UL if no bit
- * is available or the AIRQ_IV_ALLOC flag has not been specified
+ * Returns the bit number of the first irq in the allocated block of irqs,
+ * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
+ * specified
*/
-unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
+unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
{
- unsigned long bit;
+ unsigned long bit, i;
- if (!iv->avail)
+ if (!iv->avail || num == 0)
return -1UL;
spin_lock(&iv->lock);
bit = find_first_bit_inv(iv->avail, iv->bits);
- if (bit < iv->bits) {
- clear_bit_inv(bit, iv->avail);
- if (bit >= iv->end)
- iv->end = bit + 1;
- } else
+ while (bit + num <= iv->bits) {
+ for (i = 1; i < num; i++)
+ if (!test_bit_inv(bit + i, iv->avail))
+ break;
+ if (i >= num) {
+ /* Found a suitable block of irqs */
+ for (i = 0; i < num; i++)
+ clear_bit_inv(bit + i, iv->avail);
+ if (bit + num >= iv->end)
+ iv->end = bit + num + 1;
+ break;
+ }
+ bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
+ }
+ if (bit + num > iv->bits)
bit = -1UL;
spin_unlock(&iv->lock);
return bit;
}
-EXPORT_SYMBOL(airq_iv_alloc_bit);
+EXPORT_SYMBOL(airq_iv_alloc);
/**
- * airq_iv_free_bit - free an irq bit of an interrupt vector
+ * airq_iv_free - free irq bits of an interrupt vector
* @iv: pointer to interrupt vector structure
- * @bit: number of the irq bit to free
+ * @bit: number of the first irq bit to free
+ * @num: number of consecutive irq bits to free
*/
-void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
+void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
{
- if (!iv->avail)
+ unsigned long i;
+
+ if (!iv->avail || num == 0)
return;
spin_lock(&iv->lock);
- /* Clear (possibly left over) interrupt bit */
- clear_bit_inv(bit, iv->vector);
- /* Make the bit position available again */
- set_bit_inv(bit, iv->avail);
- if (bit == iv->end - 1) {
+ for (i = 0; i < num; i++) {
+ /* Clear (possibly left over) interrupt bit */
+ clear_bit_inv(bit + i, iv->vector);
+ /* Make the bit positions available again */
+ set_bit_inv(bit + i, iv->avail);
+ }
+ if (bit + num >= iv->end) {
/* Find new end of bit-field */
- while (--iv->end > 0)
- if (!test_bit_inv(iv->end - 1, iv->avail))
- break;
+ while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
+ iv->end--;
}
spin_unlock(&iv->lock);
}
-EXPORT_SYMBOL(airq_iv_free_bit);
+EXPORT_SYMBOL(airq_iv_free);
/**
* airq_iv_scan - scan interrupt vector for non-zero bits
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index fd3367a1dc7a..dfd7bc681c25 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -168,14 +168,12 @@ static ssize_t ccwgroup_online_show(struct device *dev,
* Provide an 'ungroup' attribute so the user can remove group devices no
* longer needed or accidentially created. Saves memory :)
*/
-static void ccwgroup_ungroup_callback(struct device *dev)
+static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-
mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
- device_unregister(dev);
+ device_unregister(&gdev->dev);
__ccwgroup_remove_cdev_refs(gdev);
}
mutex_unlock(&gdev->reg_mutex);
@@ -195,10 +193,9 @@ static ssize_t ccwgroup_ungroup_store(struct device *dev,
rc = -EINVAL;
goto out;
}
- /* Note that we cannot unregister the device from one of its
- * attribute methods, so we have to use this roundabout approach.
- */
- rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
+
+ if (device_remove_file_self(dev, attr))
+ ccwgroup_ungroup(gdev);
out:
if (rc) {
if (rc != -EAGAIN)
@@ -224,6 +221,14 @@ static const struct attribute_group *ccwgroup_attr_groups[] = {
NULL,
};
+static void ccwgroup_ungroup_workfn(struct work_struct *work)
+{
+ struct ccwgroup_device *gdev =
+ container_of(work, struct ccwgroup_device, ungroup_work);
+
+ ccwgroup_ungroup(gdev);
+}
+
static void ccwgroup_release(struct device *dev)
{
kfree(to_ccwgroupdev(dev));
@@ -323,6 +328,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
atomic_set(&gdev->onoff, 0);
mutex_init(&gdev->reg_mutex);
mutex_lock(&gdev->reg_mutex);
+ INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn);
gdev->count = num_devices;
gdev->dev.bus = &ccwgroup_bus_type;
gdev->dev.parent = parent;
@@ -404,10 +410,10 @@ EXPORT_SYMBOL(ccwgroup_create_dev);
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
- struct device *dev = data;
+ struct ccwgroup_device *gdev = to_ccwgroupdev(data);
if (action == BUS_NOTIFY_UNBIND_DRIVER)
- device_schedule_callback(dev, ccwgroup_ungroup_callback);
+ schedule_work(&gdev->ungroup_work);
return NOTIFY_OK;
}
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 7b29d0be0ca3..1d3661af7bd8 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -173,8 +173,7 @@ static struct css_driver chsc_subchannel_driver = {
static int __init chsc_init_dbfs(void)
{
- chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
- 16 * sizeof(long));
+ chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long));
if (!chsc_debug_msg_id)
goto out;
debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 8ee88c4ebd83..9e058c4657a3 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -18,6 +18,7 @@
#include <linux/device.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <asm/cio.h>
#include <asm/delay.h>
#include <asm/irq.h>
@@ -28,7 +29,7 @@
#include <asm/chpid.h>
#include <asm/airq.h>
#include <asm/isc.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
#include <asm/fcx.h>
#include <asm/nmi.h>
#include <asm/crw.h>
@@ -54,7 +55,7 @@ debug_info_t *cio_debug_crw_id;
*/
static int __init cio_debug_init(void)
{
- cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
+ cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long));
if (!cio_debug_msg_id)
goto out_unregister;
debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
@@ -64,7 +65,7 @@ static int __init cio_debug_init(void)
goto out_unregister;
debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
debug_set_level(cio_debug_trace_id, 2);
- cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
+ cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long));
if (!cio_debug_crw_id)
goto out_unregister;
debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
@@ -584,8 +585,6 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
return IRQ_HANDLED;
}
-static struct irq_desc *irq_desc_io;
-
static struct irqaction io_interrupt = {
.name = "IO",
.handler = do_cio_interrupt,
@@ -596,7 +595,6 @@ void __init init_cio_interrupts(void)
irq_set_chip_and_handler(IO_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
setup_irq(IO_INTERRUPT, &io_interrupt);
- irq_desc_io = irq_to_desc(IO_INTERRUPT);
}
#ifdef CONFIG_CCW_CONSOLE
@@ -623,7 +621,7 @@ void cio_tsch(struct subchannel *sch)
local_bh_disable();
irq_enter();
}
- kstat_incr_irqs_this_cpu(IO_INTERRUPT, irq_desc_io);
+ kstat_incr_irq_this_cpu(IO_INTERRUPT);
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
else
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e9d783563cbb..d8d9b5b5cc56 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1571,12 +1571,27 @@ out:
return rc;
}
+static void ccw_device_set_int_class(struct ccw_device *cdev)
+{
+ struct ccw_driver *cdrv = cdev->drv;
+
+ /* Note: we interpret class 0 in this context as an uninitialized
+ * field since it translates to a non-I/O interrupt class. */
+ if (cdrv->int_class != 0)
+ cdev->private->int_class = cdrv->int_class;
+ else
+ cdev->private->int_class = IRQIO_CIO;
+}
+
#ifdef CONFIG_CCW_CONSOLE
-static int ccw_device_console_enable(struct ccw_device *cdev,
- struct subchannel *sch)
+int __init ccw_device_enable_console(struct ccw_device *cdev)
{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
int rc;
+ if (!cdev->drv || !cdev->handler)
+ return -EINVAL;
+
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch);
if (rc)
@@ -1609,12 +1624,11 @@ out_unlock:
return rc;
}
-struct ccw_device *ccw_device_probe_console(void)
+struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
{
struct io_subchannel_private *io_priv;
struct ccw_device *cdev;
struct subchannel *sch;
- int ret;
sch = cio_probe_console();
if (IS_ERR(sch))
@@ -1631,18 +1645,23 @@ struct ccw_device *ccw_device_probe_console(void)
kfree(io_priv);
return cdev;
}
+ cdev->drv = drv;
set_io_private(sch, io_priv);
- ret = ccw_device_console_enable(cdev, sch);
- if (ret) {
- set_io_private(sch, NULL);
- put_device(&sch->dev);
- put_device(&cdev->dev);
- kfree(io_priv);
- return ERR_PTR(ret);
- }
+ ccw_device_set_int_class(cdev);
return cdev;
}
+void __init ccw_device_destroy_console(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct io_subchannel_private *io_priv = to_io_private(sch);
+
+ set_io_private(sch, NULL);
+ put_device(&sch->dev);
+ put_device(&cdev->dev);
+ kfree(io_priv);
+}
+
/**
* ccw_device_wait_idle() - busy wait for device to become idle
* @cdev: ccw device
@@ -1726,15 +1745,8 @@ ccw_device_probe (struct device *dev)
int ret;
cdev->drv = cdrv; /* to let the driver call _set_online */
- /* Note: we interpret class 0 in this context as an uninitialized
- * field since it translates to a non-I/O interrupt class. */
- if (cdrv->int_class != 0)
- cdev->private->int_class = cdrv->int_class;
- else
- cdev->private->int_class = IRQIO_CIO;
-
+ ccw_device_set_int_class(cdev);
ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
-
if (ret) {
cdev->drv = NULL;
cdev->private->int_class = IRQIO_CIO;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 4b824b15194f..5222ebe15705 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -626,8 +626,8 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
return -ENOMEM;
if (copy_from_user(ep11_dev_list.targets,
- (struct ep11_target_dev *)xcrb->targets,
- xcrb->targets_num *
+ (struct ep11_target_dev __force __user *)
+ xcrb->targets, xcrb->targets_num *
sizeof(struct ep11_target_dev)))
return -EFAULT;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 0bc91e46395a..46b324ce6c7a 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -315,6 +315,10 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
char *function_code;
+ if (CEIL4(xcRB->request_control_blk_length) <
+ xcRB->request_control_blk_length)
+ return -EINVAL; /* overflow after alignment*/
+
/* length checks */
ap_msg->length = sizeof(struct type6_hdr) +
CEIL4(xcRB->request_control_blk_length) +
@@ -333,6 +337,10 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
return -EINVAL;
}
+ if (CEIL4(xcRB->reply_control_blk_length) <
+ xcRB->reply_control_blk_length)
+ return -EINVAL; /* overflow after alignment*/
+
replylen = sizeof(struct type86_fmt2_msg) +
CEIL4(xcRB->reply_control_blk_length) +
xcRB->reply_data_length;
@@ -415,12 +423,18 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
unsigned int dom_val; /* domain id */
} __packed * payload_hdr;
+ if (CEIL4(xcRB->req_len) < xcRB->req_len)
+ return -EINVAL; /* overflow after alignment*/
+
/* length checks */
ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len;
if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE -
(sizeof(struct type6_hdr)))
return -EINVAL;
+ if (CEIL4(xcRB->resp_len) < xcRB->resp_len)
+ return -EINVAL; /* overflow after alignment*/
+
if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE -
(sizeof(struct type86_fmt2_msg)))
return -EINVAL;
@@ -432,7 +446,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
/* Import CPRB data from the ioctl input parameter */
if (copy_from_user(&(msg->cprbx.cprb_len),
- (char *)xcRB->req, xcRB->req_len)) {
+ (char __force __user *)xcRB->req, xcRB->req_len)) {
return -EFAULT;
}
@@ -645,7 +659,7 @@ static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
return -EINVAL;
/* Copy response CPRB to user */
- if (copy_to_user((char *)xcRB->resp,
+ if (copy_to_user((char __force __user *)xcRB->resp,
data + msg->fmt2.offset1, msg->fmt2.count1))
return -EFAULT;
xcRB->resp_len = msg->fmt2.count1;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 1abd0db29915..a1349653c6d9 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -477,7 +477,7 @@ static int __init kvm_devices_init(void)
INIT_WORK(&hotplug_work, hotplug_devices);
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
- register_external_interrupt(0x2603, kvm_extint_handler);
+ register_external_irq(EXT_IRQ_CP_SERVICE, kvm_extint_handler);
scan_devices();
return 0;
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 0fc584832001..1e1fc671f89a 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -1,7 +1,7 @@
/*
* ccw based virtio transport
*
- * Copyright IBM Corp. 2012
+ * Copyright IBM Corp. 2012, 2014
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -32,6 +32,8 @@
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/virtio-ccw.h>
+#include <asm/isc.h>
+#include <asm/airq.h>
/*
* virtio related functions
@@ -58,6 +60,9 @@ struct virtio_ccw_device {
unsigned long indicators;
unsigned long indicators2;
struct vq_config_block *config_block;
+ bool is_thinint;
+ bool going_away;
+ void *airq_info;
};
struct vq_info_block {
@@ -72,15 +77,38 @@ struct virtio_feature_desc {
__u8 index;
} __packed;
+struct virtio_thinint_area {
+ unsigned long summary_indicator;
+ unsigned long indicator;
+ u64 bit_nr;
+ u8 isc;
+} __packed;
+
struct virtio_ccw_vq_info {
struct virtqueue *vq;
int num;
void *queue;
struct vq_info_block *info_block;
+ int bit_nr;
struct list_head node;
long cookie;
};
+#define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
+
+#define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
+#define MAX_AIRQ_AREAS 20
+
+static int virtio_ccw_use_airq = 1;
+
+struct airq_info {
+ rwlock_t lock;
+ u8 summary_indicator;
+ struct airq_struct airq;
+ struct airq_iv *aiv;
+};
+static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
+
#define CCW_CMD_SET_VQ 0x13
#define CCW_CMD_VDEV_RESET 0x33
#define CCW_CMD_SET_IND 0x43
@@ -91,6 +119,7 @@ struct virtio_ccw_vq_info {
#define CCW_CMD_WRITE_CONF 0x21
#define CCW_CMD_WRITE_STATUS 0x31
#define CCW_CMD_READ_VQ_CONF 0x32
+#define CCW_CMD_SET_IND_ADAPTER 0x73
#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
#define VIRTIO_CCW_DOING_RESET 0x00040000
@@ -102,6 +131,7 @@ struct virtio_ccw_vq_info {
#define VIRTIO_CCW_DOING_SET_IND 0x01000000
#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
+#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -109,6 +139,125 @@ static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
return container_of(vdev, struct virtio_ccw_device, vdev);
}
+static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
+{
+ unsigned long i, flags;
+
+ write_lock_irqsave(&info->lock, flags);
+ for (i = 0; i < airq_iv_end(info->aiv); i++) {
+ if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
+ airq_iv_free_bit(info->aiv, i);
+ airq_iv_set_ptr(info->aiv, i, 0);
+ break;
+ }
+ }
+ write_unlock_irqrestore(&info->lock, flags);
+}
+
+static void virtio_airq_handler(struct airq_struct *airq)
+{
+ struct airq_info *info = container_of(airq, struct airq_info, airq);
+ unsigned long ai;
+
+ inc_irq_stat(IRQIO_VAI);
+ read_lock(&info->lock);
+ /* Walk through indicators field, summary indicator active. */
+ for (ai = 0;;) {
+ ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
+ if (ai == -1UL)
+ break;
+ vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
+ }
+ info->summary_indicator = 0;
+ smp_wmb();
+ /* Walk through indicators field, summary indicator not active. */
+ for (ai = 0;;) {
+ ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
+ if (ai == -1UL)
+ break;
+ vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
+ }
+ read_unlock(&info->lock);
+}
+
+static struct airq_info *new_airq_info(void)
+{
+ struct airq_info *info;
+ int rc;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+ rwlock_init(&info->lock);
+ info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
+ if (!info->aiv) {
+ kfree(info);
+ return NULL;
+ }
+ info->airq.handler = virtio_airq_handler;
+ info->airq.lsi_ptr = &info->summary_indicator;
+ info->airq.lsi_mask = 0xff;
+ info->airq.isc = VIRTIO_AIRQ_ISC;
+ rc = register_adapter_interrupt(&info->airq);
+ if (rc) {
+ airq_iv_release(info->aiv);
+ kfree(info);
+ return NULL;
+ }
+ return info;
+}
+
+static void destroy_airq_info(struct airq_info *info)
+{
+ if (!info)
+ return;
+
+ unregister_adapter_interrupt(&info->airq);
+ airq_iv_release(info->aiv);
+ kfree(info);
+}
+
+static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
+ u64 *first, void **airq_info)
+{
+ int i, j;
+ struct airq_info *info;
+ unsigned long indicator_addr = 0;
+ unsigned long bit, flags;
+
+ for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
+ if (!airq_areas[i])
+ airq_areas[i] = new_airq_info();
+ info = airq_areas[i];
+ if (!info)
+ return 0;
+ write_lock_irqsave(&info->lock, flags);
+ bit = airq_iv_alloc(info->aiv, nvqs);
+ if (bit == -1UL) {
+ /* Not enough vacancies. */
+ write_unlock_irqrestore(&info->lock, flags);
+ continue;
+ }
+ *first = bit;
+ *airq_info = info;
+ indicator_addr = (unsigned long)info->aiv->vector;
+ for (j = 0; j < nvqs; j++) {
+ airq_iv_set_ptr(info->aiv, bit + j,
+ (unsigned long)vqs[j]);
+ }
+ write_unlock_irqrestore(&info->lock, flags);
+ }
+ return indicator_addr;
+}
+
+static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
+{
+ struct virtio_ccw_vq_info *info;
+
+ list_for_each_entry(info, &vcdev->virtqueues, node)
+ drop_airq_indicator(info->vq, vcdev->airq_info);
+}
+
static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
{
unsigned long flags;
@@ -145,6 +294,51 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
return ret ? ret : vcdev->err;
}
+static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
+ struct ccw1 *ccw)
+{
+ int ret;
+ unsigned long *indicatorp = NULL;
+ struct virtio_thinint_area *thinint_area = NULL;
+ struct airq_info *airq_info = vcdev->airq_info;
+
+ if (vcdev->is_thinint) {
+ thinint_area = kzalloc(sizeof(*thinint_area),
+ GFP_DMA | GFP_KERNEL);
+ if (!thinint_area)
+ return;
+ thinint_area->summary_indicator =
+ (unsigned long) &airq_info->summary_indicator;
+ thinint_area->isc = VIRTIO_AIRQ_ISC;
+ ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
+ ccw->count = sizeof(*thinint_area);
+ ccw->cda = (__u32)(unsigned long) thinint_area;
+ } else {
+ indicatorp = kmalloc(sizeof(&vcdev->indicators),
+ GFP_DMA | GFP_KERNEL);
+ if (!indicatorp)
+ return;
+ *indicatorp = 0;
+ ccw->cmd_code = CCW_CMD_SET_IND;
+ ccw->count = sizeof(vcdev->indicators);
+ ccw->cda = (__u32)(unsigned long) indicatorp;
+ }
+ /* Deregister indicators from host. */
+ vcdev->indicators = 0;
+ ccw->flags = 0;
+ ret = ccw_io_helper(vcdev, ccw,
+ vcdev->is_thinint ?
+ VIRTIO_CCW_DOING_SET_IND_ADAPTER :
+ VIRTIO_CCW_DOING_SET_IND);
+ if (ret && (ret != -ENODEV))
+ dev_info(&vcdev->cdev->dev,
+ "Failed to deregister indicators (%d)\n", ret);
+ else if (vcdev->is_thinint)
+ virtio_ccw_drop_indicators(vcdev);
+ kfree(indicatorp);
+ kfree(thinint_area);
+}
+
static inline long do_kvm_notify(struct subchannel_id schid,
unsigned long queue_index,
long cookie)
@@ -232,11 +426,13 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
{
struct virtqueue *vq, *n;
struct ccw1 *ccw;
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
if (!ccw)
return;
+ virtio_ccw_drop_indicator(vcdev, ccw);
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
virtio_ccw_del_vq(vq, ccw);
@@ -326,6 +522,54 @@ out_err:
return ERR_PTR(err);
}
+static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
+ struct virtqueue *vqs[], int nvqs,
+ struct ccw1 *ccw)
+{
+ int ret;
+ struct virtio_thinint_area *thinint_area = NULL;
+ struct airq_info *info;
+
+ thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
+ if (!thinint_area) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Try to get an indicator. */
+ thinint_area->indicator = get_airq_indicator(vqs, nvqs,
+ &thinint_area->bit_nr,
+ &vcdev->airq_info);
+ if (!thinint_area->indicator) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ info = vcdev->airq_info;
+ thinint_area->summary_indicator =
+ (unsigned long) &info->summary_indicator;
+ thinint_area->isc = VIRTIO_AIRQ_ISC;
+ ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = sizeof(*thinint_area);
+ ccw->cda = (__u32)(unsigned long)thinint_area;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
+ if (ret) {
+ if (ret == -EOPNOTSUPP) {
+ /*
+ * The host does not support adapter interrupts
+ * for virtio-ccw, stop trying.
+ */
+ virtio_ccw_use_airq = 0;
+ pr_info("Adapter interrupts unsupported on host\n");
+ } else
+ dev_warn(&vcdev->cdev->dev,
+ "enabling adapter interrupts = %d\n", ret);
+ virtio_ccw_drop_indicators(vcdev);
+ }
+out:
+ kfree(thinint_area);
+ return ret;
+}
+
static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
@@ -355,15 +599,23 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
if (!indicatorp)
goto out;
*indicatorp = (unsigned long) &vcdev->indicators;
- /* Register queue indicators with host. */
- vcdev->indicators = 0;
- ccw->cmd_code = CCW_CMD_SET_IND;
- ccw->flags = 0;
- ccw->count = sizeof(vcdev->indicators);
- ccw->cda = (__u32)(unsigned long) indicatorp;
- ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
- if (ret)
- goto out;
+ if (vcdev->is_thinint) {
+ ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
+ if (ret)
+ /* no error, just fall back to legacy interrupts */
+ vcdev->is_thinint = 0;
+ }
+ if (!vcdev->is_thinint) {
+ /* Register queue indicators with host. */
+ vcdev->indicators = 0;
+ ccw->cmd_code = CCW_CMD_SET_IND;
+ ccw->flags = 0;
+ ccw->count = sizeof(vcdev->indicators);
+ ccw->cda = (__u32)(unsigned long) indicatorp;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
+ if (ret)
+ goto out;
+ }
/* Register indicators2 with host for config changes */
*indicatorp = (unsigned long) &vcdev->indicators2;
vcdev->indicators2 = 0;
@@ -636,6 +888,8 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
struct virtqueue *vq;
struct virtio_driver *drv;
+ if (!vcdev)
+ return;
/* Check if it's a notification from the host. */
if ((intparm == 0) &&
(scsw_stctl(&irb->scsw) ==
@@ -663,6 +917,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
case VIRTIO_CCW_DOING_SET_CONF_IND:
case VIRTIO_CCW_DOING_RESET:
case VIRTIO_CCW_DOING_READ_VQ_CONF:
+ case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
vcdev->curr_io &= ~activity;
wake_up(&vcdev->wait_q);
break;
@@ -734,23 +989,46 @@ static int virtio_ccw_probe(struct ccw_device *cdev)
return 0;
}
+static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
+{
+ unsigned long flags;
+ struct virtio_ccw_device *vcdev;
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ vcdev = dev_get_drvdata(&cdev->dev);
+ if (!vcdev || vcdev->going_away) {
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ return NULL;
+ }
+ vcdev->going_away = true;
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ return vcdev;
+}
+
static void virtio_ccw_remove(struct ccw_device *cdev)
{
- struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+ unsigned long flags;
+ struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
- if (cdev->online) {
+ if (vcdev && cdev->online)
unregister_virtio_device(&vcdev->vdev);
- dev_set_drvdata(&cdev->dev, NULL);
- }
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
cdev->handler = NULL;
}
static int virtio_ccw_offline(struct ccw_device *cdev)
{
- struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+ unsigned long flags;
+ struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
- unregister_virtio_device(&vcdev->vdev);
- dev_set_drvdata(&cdev->dev, NULL);
+ if (vcdev) {
+ unregister_virtio_device(&vcdev->vdev);
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ }
return 0;
}
@@ -759,6 +1037,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
{
int ret;
struct virtio_ccw_device *vcdev;
+ unsigned long flags;
vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
if (!vcdev) {
@@ -778,6 +1057,8 @@ static int virtio_ccw_online(struct ccw_device *cdev)
goto out_free;
}
+ vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
+
vcdev->vdev.dev.parent = &cdev->dev;
vcdev->vdev.dev.release = virtio_ccw_release_dev;
vcdev->vdev.config = &virtio_ccw_config_ops;
@@ -786,7 +1067,9 @@ static int virtio_ccw_online(struct ccw_device *cdev)
INIT_LIST_HEAD(&vcdev->virtqueues);
spin_lock_init(&vcdev->lock);
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, vcdev);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
vcdev->vdev.id.vendor = cdev->id.cu_type;
vcdev->vdev.id.device = cdev->id.cu_model;
ret = register_virtio_device(&vcdev->vdev);
@@ -797,7 +1080,9 @@ static int virtio_ccw_online(struct ccw_device *cdev)
}
return 0;
out_put:
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
put_device(&vcdev->vdev.dev);
return ret;
out_free:
@@ -935,6 +1220,10 @@ module_init(virtio_ccw_init);
static void __exit virtio_ccw_exit(void)
{
+ int i;
+
ccw_driver_unregister(&virtio_ccw_driver);
+ for (i = 0; i < MAX_AIRQ_AREAS; i++)
+ destroy_airq_info(airq_areas[i]);
}
module_exit(virtio_ccw_exit);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index f404f55b3191..c461f2aac610 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -899,6 +899,7 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
add_timer(&timer);
wait_event(reply->wait_q, reply->received);
del_timer_sync(&timer);
+ destroy_timer_on_stack(&timer);
LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
rc = reply->rc;
lcs_put_reply(reply);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index a0de045eb227..5333b2c018e7 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -854,8 +854,11 @@ static inline int qeth_get_micros(void)
static inline int qeth_get_ip_version(struct sk_buff *skb)
{
- struct ethhdr *ehdr = (struct ethhdr *)skb->data;
- switch (ehdr->h_proto) {
+ __be16 *p = &((struct ethhdr *)skb->data)->h_proto;
+
+ if (*p == ETH_P_8021Q)
+ p += 2;
+ switch (*p) {
case ETH_P_IPV6:
return 6;
case ETH_P_IP:
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 795ed61a5496..22470a3b182f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -33,8 +33,8 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
/* N P A M L V H */
[QETH_DBF_SETUP] = {"qeth_setup",
8, 1, 8, 5, &debug_hex_ascii_view, NULL},
- [QETH_DBF_MSG] = {"qeth_msg",
- 8, 1, 128, 3, &debug_sprintf_view, NULL},
+ [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
+ &debug_sprintf_view, NULL},
[QETH_DBF_CTRL] = {"qeth_control",
8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
@@ -4610,8 +4610,8 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_query_oat_command);
-int qeth_query_card_info_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long data)
+static int qeth_query_card_info_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
struct qeth_query_card_info *card_info;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 908d82529ee9..8dea3f12ccc1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -241,7 +241,7 @@ static inline int qeth_l2_get_cast_type(struct qeth_card *card,
}
static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int ipv, int cast_type)
+ struct sk_buff *skb, int cast_type)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
@@ -762,7 +762,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
elements_needed++;
skb_reset_mac_header(new_skb);
- qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
+ qeth_l2_fill_header(card, hdr, new_skb, cast_type);
hdr->hdr.l2.pkt_length = new_skb->len;
memcpy(((char *)hdr) + sizeof(struct qeth_hdr),
skb_mac_header(new_skb), ETH_HLEN);
@@ -775,7 +775,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr = (struct qeth_hdr *)skb_push(new_skb,
sizeof(struct qeth_hdr));
skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
- qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
+ qeth_l2_fill_header(card, hdr, new_skb, cast_type);
}
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index c8bd092fc945..02832d64d918 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -263,6 +263,9 @@ config SCSI_SCAN_ASYNC
You can override this choice by specifying "scsi_mod.scan=sync"
or async on the kernel's command line.
+ Note that this setting also affects whether resuming from
+ system suspend will be performed asynchronously.
+
menu "SCSI Transports"
depends on SCSI
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 1e9d6ad9302b..bcd223868227 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -584,7 +584,7 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
NCR5380_setup(instance);
for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1)
- if ((mask & possible) && (request_irq(i, &probe_intr, IRQF_DISABLED, "NCR-probe", NULL) == 0))
+ if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
trying_irqs |= mask;
timeout = jiffies + (250 * HZ / 1000);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 9323d058706b..eaaf8705a5f4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 30200
+# define AAC_DRIVER_BUILD 30300
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index dada38aeacc0..5c6a8703f535 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -480,7 +480,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
{
- u32 var;
+ u32 var = 0;
if (!(dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
@@ -500,13 +500,14 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
if (bled && (bled != -ETIMEDOUT))
return -EINVAL;
}
- if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */
+ if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */
rx_writel(dev, MUnit.reserved2, 3);
msleep(5000); /* Delay 5 seconds */
var = 0x00000001;
}
- if (var != 0x00000001)
+ if (bled && (var != 0x00000001))
return -EINVAL;
+ ssleep(5);
if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
return -ENODEV;
if (startup_timeout < 300)
@@ -646,7 +647,7 @@ int _aac_rx_init(struct aac_dev *dev)
dev->sync_mode = 0; /* sync. mode not supported */
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+ IRQF_SHARED, "aacraid", dev) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 2244f315f33b..e66477c98240 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -387,8 +387,7 @@ int aac_sa_init(struct aac_dev *dev)
goto error_irq;
dev->sync_mode = 0; /* sync. mode not supported */
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED|IRQF_DISABLED,
- "aacraid", (void *)dev ) < 0) {
+ IRQF_SHARED, "aacraid", (void *)dev) < 0) {
printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
name, instance);
goto error_iounmap;
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 7e17107643d4..9c65aed26212 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -647,7 +647,7 @@ int aac_src_init(struct aac_dev *dev)
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+ IRQF_SHARED, "aacraid", dev) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
@@ -804,7 +804,7 @@ int aac_srcv_init(struct aac_dev *dev)
goto error_iounmap;
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+ IRQF_SHARED, "aacraid", dev) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 3f7b6fee0a74..e86eb6a921fc 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -857,7 +857,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
SETPORT(SIMODE0, 0);
SETPORT(SIMODE1, 0);
- if( request_irq(shpnt->irq, swintr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) {
+ if (request_irq(shpnt->irq, swintr, IRQF_SHARED, "aha152x", shpnt)) {
printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq);
goto out_host_put;
}
@@ -891,7 +891,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
SETPORT(SSTAT0, 0x7f);
SETPORT(SSTAT1, 0xef);
- if ( request_irq(shpnt->irq, intr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) {
+ if (request_irq(shpnt->irq, intr, IRQF_SHARED, "aha152x", shpnt)) {
printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq);
goto out_host_put;
}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
index 9df9e2ce3538..8373447bd7d3 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
@@ -209,7 +209,6 @@ struct instruction {
#define AIC_OP_JC16 0x9105
#define AIC_OP_JNC16 0x9205
#define AIC_OP_CALL16 0x9305
-#define AIC_OP_CALL16 0x9305
/* Page extension is low three bits of second opcode byte. */
#define AIC_OP_JMPF 0xA005
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 4f6a30b8e5f9..652b41b4ddbd 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2500,16 +2500,15 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
{
uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
- dma_addr_t dma_coherent_handle;
+
/*
********************************************************************
** here we need to tell iop 331 our freeccb.HighPart
** if freeccb.HighPart is not zero
********************************************************************
*/
- dma_coherent_handle = acb->dma_coherent_handle;
- cdb_phyaddr = (uint32_t)(dma_coherent_handle);
- cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
+ cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle);
+ cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle);
acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
/*
***********************************************************************
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 09ba1869d366..059ff477a398 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2971,7 +2971,7 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
ec->irqaddr = ashost->fast + INT_REG;
ec->irqmask = 0x0a;
- ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost);
+ ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost);
if (ret) {
printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n",
host->host_no, ashost->scsi.irq, ret);
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index b679778376c5..f8e060900052 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -262,7 +262,7 @@ static int cumanascsi1_probe(struct expansion_card *ec,
goto out_unmap;
}
- ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED,
+ ret = request_irq(host->irq, cumanascsi_intr, 0,
"CumanaSCSI-1", host);
if (ret) {
printk("scsi%d: IRQ%d not free: %d\n",
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index 58915f29055b..abc66f5263ec 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -431,7 +431,7 @@ static int cumanascsi2_probe(struct expansion_card *ec,
goto out_free;
ret = request_irq(ec->irq, cumanascsi_2_intr,
- IRQF_DISABLED, "cumanascsi2", info);
+ 0, "cumanascsi2", info);
if (ret) {
printk("scsi%d: IRQ%d not free: %d\n",
host->host_no, ec->irq, ret);
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index abc9593615e9..5e1b73e1b743 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -358,7 +358,7 @@ static int powertecscsi_probe(struct expansion_card *ec,
goto out_free;
ret = request_irq(ec->irq, powertecscsi_intr,
- IRQF_DISABLED, "powertec", info);
+ 0, "powertec", info);
if (ret) {
printk("scsi%d: IRQ%d not free: %d\n",
host->host_no, ec->irq, ret);
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a3e6c8a3ff0f..296c936cc03c 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -90,6 +90,7 @@
#include <linux/init.h>
#include <linux/nvram.h>
#include <linux/bitops.h>
+#include <linux/wait.h>
#include <asm/setup.h>
#include <asm/atarihw.h>
@@ -549,8 +550,10 @@ static void falcon_get_lock(void)
local_irq_save(flags);
- while (!in_irq() && falcon_got_lock && stdma_others_waiting())
- sleep_on(&falcon_fairness_wait);
+ wait_event_cmd(falcon_fairness_wait,
+ in_interrupt() || !falcon_got_lock || !stdma_others_waiting(),
+ local_irq_restore(flags),
+ local_irq_save(flags));
while (!falcon_got_lock) {
if (in_irq())
@@ -562,7 +565,10 @@ static void falcon_get_lock(void)
falcon_trying_lock = 0;
wake_up(&falcon_try_wait);
} else {
- sleep_on(&falcon_try_wait);
+ wait_event_cmd(falcon_try_wait,
+ falcon_got_lock && !falcon_trying_lock,
+ local_irq_restore(flags),
+ local_irq_save(flags));
}
}
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 2e28f6c419fe..1bfb0bd01198 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -98,6 +98,14 @@ struct be_mcc_obj {
struct be_queue_info cq;
};
+struct beiscsi_mcc_tag_state {
+#define MCC_TAG_STATE_COMPLETED 0x00
+#define MCC_TAG_STATE_RUNNING 0x01
+#define MCC_TAG_STATE_TIMEOUT 0x02
+ uint8_t tag_state;
+ struct be_dma_mem tag_mem_state;
+};
+
struct be_ctrl_info {
u8 __iomem *csr;
u8 __iomem *db; /* Door Bell */
@@ -122,6 +130,8 @@ struct be_ctrl_info {
unsigned short mcc_alloc_index;
unsigned short mcc_free_index;
unsigned int mcc_tag_available;
+
+ struct beiscsi_mcc_tag_state ptag_state[MAX_MCC_CMD + 1];
};
#include "be_cmds.h"
@@ -129,6 +139,7 @@ struct be_ctrl_info {
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
#define mcc_timeout 120000 /* 12s timeout */
+#define BEISCSI_LOGOUT_SYNC_DELAY 250
/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) \
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 3338391b64de..1432ed5e9fc6 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -138,7 +138,7 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
* @phba: Driver private structure
* @tag: Tag for the MBX Command
* @wrb: the WRB used for the MBX Command
- * @cmd_hdr: IOCTL Hdr for the MBX Cmd
+ * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
*
* Waits for MBX completion with the passed TAG.
*
@@ -148,21 +148,26 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
**/
int beiscsi_mccq_compl(struct beiscsi_hba *phba,
uint32_t tag, struct be_mcc_wrb **wrb,
- void *cmd_hdr)
+ struct be_dma_mem *mbx_cmd_mem)
{
int rc = 0;
uint32_t mcc_tag_response;
uint16_t status = 0, addl_status = 0, wrb_num = 0;
struct be_mcc_wrb *temp_wrb;
- struct be_cmd_req_hdr *ioctl_hdr;
- struct be_cmd_resp_hdr *ioctl_resp_hdr;
+ struct be_cmd_req_hdr *mbx_hdr;
+ struct be_cmd_resp_hdr *mbx_resp_hdr;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
if (beiscsi_error(phba)) {
free_mcc_tag(&phba->ctrl, tag);
- return -EIO;
+ return -EPERM;
}
+ /* Set MBX Tag state to Active */
+ spin_lock(&phba->ctrl.mbox_lock);
+ phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING;
+ spin_unlock(&phba->ctrl.mbox_lock);
+
/* wait for the mccq completion */
rc = wait_event_interruptible_timeout(
phba->ctrl.mcc_wait[tag],
@@ -171,56 +176,71 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
BEISCSI_HOST_MBX_TIMEOUT));
if (rc <= 0) {
+ struct be_dma_mem *tag_mem;
+ /* Set MBX Tag state to timeout */
+ spin_lock(&phba->ctrl.mbox_lock);
+ phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT;
+ spin_unlock(&phba->ctrl.mbox_lock);
+
+ /* Store resource addr to be freed later */
+ tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
+ if (mbx_cmd_mem) {
+ tag_mem->size = mbx_cmd_mem->size;
+ tag_mem->va = mbx_cmd_mem->va;
+ tag_mem->dma = mbx_cmd_mem->dma;
+ } else
+ tag_mem->size = 0;
+
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG,
"BC_%d : MBX Cmd Completion timed out\n");
- rc = -EBUSY;
-
- /* decrement the mccq used count */
- atomic_dec(&phba->ctrl.mcc_obj.q.used);
-
- goto release_mcc_tag;
- } else
+ return -EBUSY;
+ } else {
rc = 0;
+ /* Set MBX Tag state to completed */
+ spin_lock(&phba->ctrl.mbox_lock);
+ phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
+ spin_unlock(&phba->ctrl.mbox_lock);
+ }
mcc_tag_response = phba->ctrl.mcc_numtag[tag];
status = (mcc_tag_response & CQE_STATUS_MASK);
addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
CQE_STATUS_ADDL_SHIFT);
- if (cmd_hdr) {
- ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr;
+ if (mbx_cmd_mem) {
+ mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
} else {
wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
CQE_STATUS_WRB_SHIFT;
temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
- ioctl_hdr = embedded_payload(temp_wrb);
+ mbx_hdr = embedded_payload(temp_wrb);
if (wrb)
*wrb = temp_wrb;
}
if (status || addl_status) {
- beiscsi_log(phba, KERN_ERR,
+ beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG,
"BC_%d : MBX Cmd Failed for "
"Subsys : %d Opcode : %d with "
"Status : %d and Extd_Status : %d\n",
- ioctl_hdr->subsystem,
- ioctl_hdr->opcode,
+ mbx_hdr->subsystem,
+ mbx_hdr->opcode,
status, addl_status);
if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
- ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
+ mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG,
"BC_%d : Insufficent Buffer Error "
"Resp_Len : %d Actual_Resp_Len : %d\n",
- ioctl_resp_hdr->response_length,
- ioctl_resp_hdr->actual_resp_len);
+ mbx_resp_hdr->response_length,
+ mbx_resp_hdr->actual_resp_len);
rc = -EAGAIN;
goto release_mcc_tag;
@@ -319,6 +339,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
struct be_mcc_compl *compl)
{
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
u16 compl_status, extd_status;
unsigned short tag;
@@ -338,7 +359,32 @@ int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
- wake_up_interruptible(&ctrl->mcc_wait[tag]);
+
+ if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) {
+ wake_up_interruptible(&ctrl->mcc_wait[tag]);
+ } else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) {
+ struct be_dma_mem *tag_mem;
+ tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Completion for timeout Command "
+ "from FW\n");
+ /* Check if memory needs to be freed */
+ if (tag_mem->size)
+ pci_free_consistent(ctrl->pdev, tag_mem->size,
+ tag_mem->va, tag_mem->dma);
+
+ /* Change tag state */
+ spin_lock(&phba->ctrl.mbox_lock);
+ ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
+ spin_unlock(&phba->ctrl.mbox_lock);
+
+ /* Free MCC Tag */
+ free_mcc_tag(ctrl, tag);
+ }
+
return 0;
}
@@ -354,8 +400,23 @@ static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
return NULL;
}
-static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
+/**
+ * be2iscsi_fail_session(): Closing session with appropriate error
+ * @cls_session: ptr to session
+ *
+ * Depending on adapter state appropriate error flag is passed.
+ **/
+void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ uint32_t iscsi_err_flag;
+
+ if (phba->state & BE_ADAPTER_STATE_SHUTDOWN)
+ iscsi_err_flag = ISCSI_ERR_INVALID_HOST;
+ else
+ iscsi_err_flag = ISCSI_ERR_CONN_FAILED;
+
iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
}
@@ -386,18 +447,6 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
}
}
-static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
- u16 num_popped)
-{
- u32 val = 0;
- val |= qid & DB_CQ_RING_ID_MASK;
- if (arm)
- val |= 1 << DB_CQ_REARM_SHIFT;
- val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
- iowrite32(val, phba->db_va + DB_CQ_OFFSET);
-}
-
-
int beiscsi_process_mcc(struct beiscsi_hba *phba)
{
struct be_mcc_compl *compl;
@@ -428,7 +477,7 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
}
if (num)
- beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
+ hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);
spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
return status;
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 627ebbe0172c..7cf7f99ee442 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -103,7 +103,7 @@ struct be_mcc_compl {
/********** MCC door bell ************/
#define DB_MCCQ_OFFSET 0x140
-#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+#define DB_MCCQ_RING_ID_MASK 0xFFFF /* bits 0 - 15 */
/* Number of entries posted */
#define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */
@@ -709,7 +709,8 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
int beiscsi_mccq_compl(struct beiscsi_hba *phba,
- uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
+ uint32_t tag, struct be_mcc_wrb **wrb,
+ struct be_dma_mem *mbx_cmd_mem);
/*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
@@ -1017,8 +1018,8 @@ struct be_mcc_wrb_context {
int *users_final_status;
} __packed;
-#define DB_DEF_PDU_RING_ID_MASK 0x3FF /* bits 0 - 9 */
-#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 0 - 9 */
+#define DB_DEF_PDU_RING_ID_MASK 0x3FFF /* bits 0 - 13 */
+#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 16 - 29 */
#define DB_DEF_PDU_REARM_SHIFT 14
#define DB_DEF_PDU_EVENT_SHIFT 15
#define DB_DEF_PDU_CQPROC_SHIFT 16
@@ -1317,4 +1318,5 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
u8 subsystem, u8 opcode, int cmd_len);
+void be2iscsi_fail_session(struct iscsi_cls_session *cls_session);
#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 889066d9d6fb..a3df43324c98 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -793,7 +793,7 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;
break;
case BE2ISCSI_LINK_SPEED_100MBPS:
- ihost->port_speed = BE2ISCSI_LINK_SPEED_100MBPS;
+ ihost->port_speed = ISCSI_PORT_SPEED_100MBPS;
break;
case BE2ISCSI_LINK_SPEED_1GBPS:
ihost->port_speed = ISCSI_PORT_SPEED_1GBPS;
@@ -1153,16 +1153,18 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
return -EAGAIN;
}
- ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
+ ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BS_%d : mgmt_open_connection Failed");
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
+ if (ret != -EBUSY)
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+
beiscsi_free_ep(beiscsi_ep);
- return -EBUSY;
+ return ret;
}
ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
@@ -1359,6 +1361,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
beiscsi_mccq_compl(phba, tag, NULL, NULL);
beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
free_ep:
+ msleep(BEISCSI_LOGOUT_SYNC_DELAY);
beiscsi_free_ep(beiscsi_ep);
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 5642a9b250c2..0d822297aa80 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -228,24 +228,25 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
struct invalidate_command_table *inv_tbl;
struct be_dma_mem nonemb_cmd;
unsigned int cid, tag, num_invalidate;
+ int rc;
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (!aborted_task || !aborted_task->sc) {
/* we raced */
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return SUCCESS;
}
aborted_io_task = aborted_task->dd_data;
if (!aborted_io_task->scsi_cmnd) {
/* raced or invalid command */
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return SUCCESS;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
/* Invalidate WRB Posted for this Task */
AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
aborted_io_task->pwrb_handle->pwrb,
@@ -285,9 +286,11 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
return FAILED;
}
- beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
+ rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ if (rc != -EBUSY)
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+
return iscsi_eh_abort(sc);
}
@@ -303,13 +306,14 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
struct invalidate_command_table *inv_tbl;
struct be_dma_mem nonemb_cmd;
unsigned int cid, tag, i, num_invalidate;
+ int rc;
/* invalidate iocbs */
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return FAILED;
}
conn = session->leadconn;
@@ -338,7 +342,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
num_invalidate++;
inv_tbl++;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
inv_tbl = phba->inv_tbl;
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -363,9 +367,10 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
return FAILED;
}
- beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
+ rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ if (rc != -EBUSY)
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
return iscsi_eh_device_reset(sc);
}
@@ -674,8 +679,19 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
}
pci_set_master(pcidev);
- if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
- ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
+ ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
+ pci_disable_device(pcidev);
+ return ret;
+ } else {
+ ret = pci_set_consistent_dma_mask(pcidev,
+ DMA_BIT_MASK(32));
+ }
+ } else {
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
if (ret) {
dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
pci_disable_device(pcidev);
@@ -804,14 +820,23 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba,
unsigned char rearm, unsigned char event)
{
u32 val = 0;
- val |= id & DB_EQ_RING_ID_MASK;
+
if (rearm)
val |= 1 << DB_EQ_REARM_SHIFT;
if (clr_interrupt)
val |= 1 << DB_EQ_CLR_SHIFT;
if (event)
val |= 1 << DB_EQ_EVNT_SHIFT;
+
val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
+ /* Setting lower order EQ_ID Bits */
+ val |= (id & DB_EQ_RING_ID_LOW_MASK);
+
+ /* Setting Higher order EQ_ID Bits */
+ val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) &
+ DB_EQ_RING_ID_HIGH_MASK)
+ << DB_EQ_HIGH_SET_SHIFT);
+
iowrite32(val, phba->db_va + DB_EQ_OFFSET);
}
@@ -873,7 +898,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
struct be_queue_info *cq;
unsigned int num_eq_processed;
struct be_eq_obj *pbe_eq;
- unsigned long flags;
pbe_eq = dev_id;
eq = &pbe_eq->q;
@@ -882,31 +906,15 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
phba = pbe_eq->phba;
num_eq_processed = 0;
- if (blk_iopoll_enabled) {
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
- if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
- blk_iopoll_sched(&pbe_eq->iopoll);
-
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
- num_eq_processed++;
- }
- } else {
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
- num_eq_processed++;
- }
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
+ blk_iopoll_sched(&pbe_eq->iopoll);
- if (pbe_eq->todo_cq)
- queue_work(phba->wq, &pbe_eq->work_cqs);
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ num_eq_processed++;
}
if (num_eq_processed)
@@ -927,7 +935,6 @@ static irqreturn_t be_isr(int irq, void *dev_id)
struct hwi_context_memory *phwi_context;
struct be_eq_entry *eqe = NULL;
struct be_queue_info *eq;
- struct be_queue_info *cq;
struct be_queue_info *mcc;
unsigned long flags, index;
unsigned int num_mcceq_processed, num_ioeq_processed;
@@ -953,72 +960,40 @@ static irqreturn_t be_isr(int irq, void *dev_id)
num_ioeq_processed = 0;
num_mcceq_processed = 0;
- if (blk_iopoll_enabled) {
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
- if (((eqe->dw[offsetof(struct amap_eq_entry,
- resource_id) / 32] &
- EQE_RESID_MASK) >> 16) == mcc->id) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_mcc_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- num_mcceq_processed++;
- } else {
- if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
- blk_iopoll_sched(&pbe_eq->iopoll);
- num_ioeq_processed++;
- }
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
- }
- if (num_ioeq_processed || num_mcceq_processed) {
- if (pbe_eq->todo_mcc_cq)
- queue_work(phba->wq, &pbe_eq->work_cqs);
-
- if ((num_mcceq_processed) && (!num_ioeq_processed))
- hwi_ring_eq_db(phba, eq->id, 0,
- (num_ioeq_processed +
- num_mcceq_processed) , 1, 1);
- else
- hwi_ring_eq_db(phba, eq->id, 0,
- (num_ioeq_processed +
- num_mcceq_processed), 0, 1);
-
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
- } else {
- cq = &phwi_context->be_cq[0];
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
-
- if (((eqe->dw[offsetof(struct amap_eq_entry,
- resource_id) / 32] &
- EQE_RESID_MASK) >> 16) != cq->id) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_mcc_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- } else {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- }
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ if (((eqe->dw[offsetof(struct amap_eq_entry,
+ resource_id) / 32] &
+ EQE_RESID_MASK) >> 16) == mcc->id) {
+ spin_lock_irqsave(&phba->isr_lock, flags);
+ pbe_eq->todo_mcc_cq = true;
+ spin_unlock_irqrestore(&phba->isr_lock, flags);
+ num_mcceq_processed++;
+ } else {
+ if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
+ blk_iopoll_sched(&pbe_eq->iopoll);
num_ioeq_processed++;
}
- if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ }
+ if (num_ioeq_processed || num_mcceq_processed) {
+ if (pbe_eq->todo_mcc_cq)
queue_work(phba->wq, &pbe_eq->work_cqs);
- if (num_ioeq_processed) {
+ if ((num_mcceq_processed) && (!num_ioeq_processed))
hwi_ring_eq_db(phba, eq->id, 0,
- num_ioeq_processed, 1, 1);
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
- }
+ (num_ioeq_processed +
+ num_mcceq_processed) , 1, 1);
+ else
+ hwi_ring_eq_db(phba, eq->id, 0,
+ (num_ioeq_processed +
+ num_mcceq_processed), 0, 1);
+
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
}
static int beiscsi_init_irqs(struct beiscsi_hba *phba)
@@ -1093,15 +1068,25 @@ free_msix_irqs:
return ret;
}
-static void hwi_ring_cq_db(struct beiscsi_hba *phba,
+void hwi_ring_cq_db(struct beiscsi_hba *phba,
unsigned int id, unsigned int num_processed,
unsigned char rearm, unsigned char event)
{
u32 val = 0;
- val |= id & DB_CQ_RING_ID_MASK;
+
if (rearm)
val |= 1 << DB_CQ_REARM_SHIFT;
+
val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
+
+ /* Setting lower order CQ_ID Bits */
+ val |= (id & DB_CQ_RING_ID_LOW_MASK);
+
+ /* Setting Higher order CQ_ID Bits */
+ val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) &
+ DB_CQ_RING_ID_HIGH_MASK)
+ << DB_CQ_HIGH_SET_SHIFT);
+
iowrite32(val, phba->db_va + DB_CQ_OFFSET);
}
@@ -1150,9 +1135,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
return 1;
}
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->back_lock);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->back_lock);
return 0;
}
@@ -1342,8 +1327,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
resid = csol_cqe->res_cnt;
if (!task->sc) {
- if (io_task->scsi_cmnd)
+ if (io_task->scsi_cmnd) {
scsi_dma_unmap(io_task->scsi_cmnd);
+ io_task->scsi_cmnd = NULL;
+ }
return;
}
@@ -1380,6 +1367,7 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
conn->rxdata_octets += resid;
unmap:
scsi_dma_unmap(io_task->scsi_cmnd);
+ io_task->scsi_cmnd = NULL;
iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
}
@@ -1568,7 +1556,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
pwrb = pwrb_handle->pwrb;
type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->back_lock);
switch (type) {
case HWH_TYPE_IO:
case HWH_TYPE_IO_RD:
@@ -1607,7 +1595,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
break;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->back_lock);
}
static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
@@ -4360,12 +4348,16 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
goto boot_freemem;
}
- ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
+ ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BM_%d : beiscsi_get_session_info Failed");
- goto boot_freemem;
+
+ if (ret != -EBUSY)
+ goto boot_freemem;
+ else
+ return ret;
}
session_resp = nonemb_cmd.va ;
@@ -4625,6 +4617,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
spin_unlock(&phba->io_sgl_lock);
io_task->psgl_handle = NULL;
}
+
+ if (io_task->scsi_cmnd) {
+ scsi_dma_unmap(io_task->scsi_cmnd);
+ io_task->scsi_cmnd = NULL;
+ }
} else {
if (!beiscsi_conn->login_in_progress)
beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
@@ -4646,9 +4643,9 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
* login/startup related tasks.
*/
beiscsi_conn->login_in_progress = 0;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->back_lock);
beiscsi_cleanup_task(task);
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->back_lock);
pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
@@ -5216,11 +5213,10 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
}
pci_disable_msix(phba->pcidev);
- if (blk_iopoll_enabled)
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- blk_iopoll_disable(&pbe_eq->iopoll);
- }
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_disable(&pbe_eq->iopoll);
+ }
if (unload_state == BEISCSI_CLEAN_UNLOAD) {
destroy_workqueue(phba->wq);
@@ -5273,6 +5269,8 @@ static void beiscsi_shutdown(struct pci_dev *pcidev)
return;
}
+ phba->state = BE_ADAPTER_STATE_SHUTDOWN;
+ iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session);
beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
pci_disable_device(pcidev);
}
@@ -5429,32 +5427,18 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- if (blk_iopoll_enabled) {
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
- be_iopoll);
- blk_iopoll_enable(&pbe_eq->iopoll);
- }
-
- i = (phba->msix_enabled) ? i : 0;
- /* Work item for MCC handling */
+ for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
- } else {
- if (phba->msix_enabled) {
- for (i = 0; i <= phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs,
- beiscsi_process_all_cqs);
- }
- } else {
- pbe_eq = &phwi_context->be_eq[0];
- INIT_WORK(&pbe_eq->work_cqs,
- beiscsi_process_all_cqs);
- }
+ blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+ be_iopoll);
+ blk_iopoll_enable(&pbe_eq->iopoll);
}
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+
ret = beiscsi_init_irqs(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5594,6 +5578,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_tag[i] = i + 1;
phba->ctrl.mcc_numtag[i + 1] = 0;
phba->ctrl.mcc_tag_available++;
+ memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
+ sizeof(struct beiscsi_mcc_tag_state));
}
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
@@ -5614,32 +5600,18 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- if (blk_iopoll_enabled) {
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
- be_iopoll);
- blk_iopoll_enable(&pbe_eq->iopoll);
- }
-
- i = (phba->msix_enabled) ? i : 0;
- /* Work item for MCC handling */
+ for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
- } else {
- if (phba->msix_enabled) {
- for (i = 0; i <= phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs,
- beiscsi_process_all_cqs);
- }
- } else {
- pbe_eq = &phwi_context->be_eq[0];
- INIT_WORK(&pbe_eq->work_cqs,
- beiscsi_process_all_cqs);
- }
+ blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+ be_iopoll);
+ blk_iopoll_enable(&pbe_eq->iopoll);
}
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+
ret = beiscsi_init_irqs(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5668,11 +5640,10 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
free_blkenbld:
destroy_workqueue(phba->wq);
- if (blk_iopoll_enabled)
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- blk_iopoll_disable(&pbe_eq->iopoll);
- }
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_disable(&pbe_eq->iopoll);
+ }
free_twq:
beiscsi_clean_port(phba);
beiscsi_free_mem(phba);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 31fa27b4a9b2..9380b55bdeaf 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
#include <scsi/scsi_transport_iscsi.h>
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "10.0.659.0"
+#define BUILD_STR "10.2.125.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -97,9 +97,14 @@
#define INVALID_SESS_HANDLE 0xFFFFFFFF
+/**
+ * Adapter States
+ **/
#define BE_ADAPTER_LINK_UP 0x001
#define BE_ADAPTER_LINK_DOWN 0x002
#define BE_ADAPTER_PCI_ERR 0x004
+#define BE_ADAPTER_STATE_SHUTDOWN 0x008
+
#define BEISCSI_CLEAN_UNLOAD 0x01
#define BEISCSI_EEH_UNLOAD 0x02
@@ -135,11 +140,15 @@
#define DB_RXULP0_OFFSET 0xA0
/********* Event Q door bell *************/
#define DB_EQ_OFFSET DB_CQ_OFFSET
-#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
+#define DB_EQ_RING_ID_LOW_MASK 0x1FF /* bits 0 - 8 */
/* Clear the interrupt for this eq */
#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
/* Must be 1 */
#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
+/* Higher Order EQ_ID bit */
+#define DB_EQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */
+#define DB_EQ_HIGH_SET_SHIFT 11
+#define DB_EQ_HIGH_FEILD_SHIFT 9
/* Number of event entries processed */
#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
@@ -147,7 +156,12 @@
/********* Compl Q door bell *************/
#define DB_CQ_OFFSET 0x120
-#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+#define DB_CQ_RING_ID_LOW_MASK 0x3FF /* bits 0 - 9 */
+/* Higher Order CQ_ID bit */
+#define DB_CQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */
+#define DB_CQ_HIGH_SET_SHIFT 11
+#define DB_CQ_HIGH_FEILD_SHIFT 10
+
/* Number of event entries processed */
#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
@@ -821,6 +835,9 @@ void beiscsi_process_all_cqs(struct work_struct *work);
void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
struct iscsi_task *task);
+void hwi_ring_cq_db(struct beiscsi_hba *phba,
+ unsigned int id, unsigned int num_processed,
+ unsigned char rearm, unsigned char event);
static inline bool beiscsi_error(struct beiscsi_hba *phba)
{
return phba->ue_detected || phba->fw_timeout;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index b2fcac78feaa..088bdf752cfa 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -828,22 +828,25 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
- rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va);
+ rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd);
+
+ if (resp_buf)
+ memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
+
if (rc) {
- /* Check if the IOCTL needs to be re-issued */
+ /* Check if the MBX Cmd needs to be re-issued */
if (rc == -EAGAIN)
return rc;
- beiscsi_log(phba, KERN_ERR,
+ beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
- goto free_cmd;
+ if (rc != -EBUSY)
+ goto free_cmd;
+ else
+ return rc;
}
-
- if (resp_buf)
- memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
-
free_cmd:
pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
nonemb_cmd->va, nonemb_cmd->dma);
@@ -1348,7 +1351,6 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,
{
int rc;
unsigned int tag;
- struct be_mcc_wrb *wrb = NULL;
tag = be_cmd_set_vlan(phba, vlan_tag);
if (!tag) {
@@ -1358,7 +1360,7 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,
return -EBUSY;
}
- rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
(BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 65180e15de6e..315d6d6dcfc8 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3878,7 +3878,7 @@ bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
bfa_trc(sfp, sfp->data_valid);
if (sfp->data_valid) {
u32 size = sizeof(struct sfp_mem_s);
- u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
+ u8 *des = (u8 *) &(sfp->sfpmem);
memcpy(des, sfp->dbuf_kva, size);
}
/*
@@ -6851,7 +6851,7 @@ static u32
bfa_flash_status_read(void __iomem *pci_bar)
{
union bfa_flash_dev_status_reg_u dev_status;
- u32 status;
+ int status;
u32 ret_status;
int i;
@@ -6899,7 +6899,7 @@ static u32
bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
char *buf)
{
- u32 status;
+ int status;
/*
* len must be mutiple of 4 and not exceeding fifo size
@@ -7006,7 +7006,7 @@ bfa_flash_sem_get(void __iomem *bar)
while (!bfa_raw_sem_get(bar)) {
if (--n <= 0)
return BFA_STATUS_BADFLASH;
- udelay(10000);
+ mdelay(10);
}
return BFA_STATUS_OK;
}
@@ -7021,7 +7021,8 @@ bfa_status_t
bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
u32 len)
{
- u32 n, status;
+ u32 n;
+ int status;
u32 off, l, s, residue, fifo_sz;
residue = len;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 157f6044a9bb..8994fb857ee9 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2304,8 +2304,10 @@ bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
spin_lock_irqsave(&bfad->bfad_lock, flags);
- if (bfa_fcport_is_dport(&bfad->bfa))
+ if (bfa_fcport_is_dport(&bfad->bfa)) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return BFA_STATUS_DPORT_ERR;
+ }
if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 9967f9c14851..f067332bf763 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -73,9 +73,14 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
break;
- case BFI_IOIM_STS_ABORTED:
case BFI_IOIM_STS_TIMEDOUT:
+ host_status = DID_TIME_OUT;
+ cmnd->result = ScsiResult(host_status, 0);
+ break;
case BFI_IOIM_STS_PATHTOV:
+ host_status = DID_TRANSPORT_DISRUPTED;
+ cmnd->result = ScsiResult(host_status, 0);
+ break;
default:
host_status = DID_ERROR;
cmnd->result = ScsiResult(host_status, 0);
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 1ebf3fb683e6..6a976657b475 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -64,7 +64,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "2.4.1"
+#define BNX2FC_VERSION "2.4.2"
#define PFX "bnx2fc: "
@@ -367,6 +367,7 @@ struct bnx2fc_rport {
atomic_t num_active_ios;
u32 flush_in_prog;
unsigned long timestamp;
+ unsigned long retry_delay_timestamp;
struct list_head free_task_list;
struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
struct list_head active_cmd_queue;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 9b948505d118..1d41f4b9114f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Sep 17, 2013"
+#define DRV_MODULE_RELDATE "Dec 11, 2013"
static char version[] =
@@ -850,6 +850,9 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
__bnx2fc_destroy(interface);
}
mutex_unlock(&bnx2fc_dev_lock);
+
+ /* Ensure ALL destroy work has been completed before return */
+ flush_workqueue(bnx2fc_wq);
return;
default:
@@ -2389,6 +2392,9 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
__bnx2fc_destroy(interface);
mutex_unlock(&bnx2fc_dev_lock);
+ /* Ensure ALL destroy work has been completed before return */
+ flush_workqueue(bnx2fc_wq);
+
bnx2fc_ulp_stop(hba);
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
@@ -2586,12 +2592,16 @@ static int __init bnx2fc_mod_init(void)
spin_lock_init(&p->fp_work_lock);
}
+ cpu_notifier_register_begin();
+
for_each_online_cpu(cpu) {
bnx2fc_percpu_thread_create(cpu);
}
/* Initialize per CPU interrupt thread */
- register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+ __register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+ cpu_notifier_register_done();
cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
@@ -2656,13 +2666,17 @@ static void __exit bnx2fc_mod_exit(void)
if (l2_thread)
kthread_stop(l2_thread);
- unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+ cpu_notifier_register_begin();
/* Destroy per cpu threads */
for_each_online_cpu(cpu) {
bnx2fc_percpu_thread_destroy(cpu);
}
+ __unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+ cpu_notifier_register_done();
+
destroy_workqueue(bnx2fc_wq);
/*
* detach from scsi transport
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index e9279a8c1e1c..32a5e0a2a669 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1871,7 +1871,15 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
rc = SCSI_MLQUEUE_TARGET_BUSY;
goto exit_qcmd;
}
-
+ if (tgt->retry_delay_timestamp) {
+ if (time_after(jiffies, tgt->retry_delay_timestamp)) {
+ tgt->retry_delay_timestamp = 0;
+ } else {
+ /* If retry_delay timer is active, flow off the ML */
+ rc = SCSI_MLQUEUE_TARGET_BUSY;
+ goto exit_qcmd;
+ }
+ }
io_req = bnx2fc_cmd_alloc(tgt);
if (!io_req) {
rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -1961,6 +1969,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
" fcp_resid = 0x%x\n",
io_req->cdb_status, io_req->fcp_resid);
sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+
+ if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
+ io_req->cdb_status == SAM_STAT_BUSY) {
+ /* Set the jiffies + retry_delay_timer * 100ms
+ for the rport/tgt */
+ tgt->retry_delay_timestamp = jiffies +
+ fcp_rsp->retry_delay_timer * HZ / 10;
+ }
+
}
if (io_req->fcp_resid)
scsi_set_resid(sc_cmd, io_req->fcp_resid);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index d9bae5672273..6870cf6781d9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -386,6 +386,7 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
tgt->rq_prod_idx = 0x8000;
tgt->rq_cons_idx = 0;
atomic_set(&tgt->num_active_ios, 0);
+ tgt->retry_delay_timestamp = 0;
if (rdata->flags & FC_RP_FLAGS_RETRY &&
rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index b87a1933f880..b5ffd280a1ae 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1361,7 +1361,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
u32 datalen = 0;
resp_cqe = (struct bnx2i_cmd_response *)cqe;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->back_lock);
task = iscsi_itt_to_task(conn,
resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
if (!task)
@@ -1432,7 +1432,7 @@ done:
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
conn->data, datalen);
fail:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->back_lock);
return 0;
}
@@ -1457,7 +1457,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session,
int pad_len;
login = (struct bnx2i_login_response *) cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
if (!task)
@@ -1500,7 +1500,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session,
bnx2i_conn->gen_pdu.resp_buf,
bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
done:
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
return 0;
}
@@ -1525,7 +1525,7 @@ static int bnx2i_process_text_resp(struct iscsi_session *session,
int pad_len;
text = (struct bnx2i_text_response *) cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
if (!task)
goto done;
@@ -1561,7 +1561,7 @@ static int bnx2i_process_text_resp(struct iscsi_session *session,
bnx2i_conn->gen_pdu.resp_wr_ptr -
bnx2i_conn->gen_pdu.resp_buf);
done:
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
return 0;
}
@@ -1584,7 +1584,7 @@ static int bnx2i_process_tmf_resp(struct iscsi_session *session,
struct iscsi_tm_rsp *resp_hdr;
tmf_cqe = (struct bnx2i_tmf_response *)cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
if (!task)
@@ -1600,7 +1600,7 @@ static int bnx2i_process_tmf_resp(struct iscsi_session *session,
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
done:
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
return 0;
}
@@ -1623,7 +1623,7 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session,
struct iscsi_logout_rsp *resp_hdr;
logout = (struct bnx2i_logout_response *) cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
if (!task)
@@ -1647,7 +1647,7 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session,
bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD;
done:
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
return 0;
}
@@ -1668,12 +1668,12 @@ static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
struct iscsi_task *task;
nop_in = (struct bnx2i_nop_in_msg *)cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
if (task)
__iscsi_put_task(task);
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
}
/**
@@ -1712,7 +1712,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
nop_in = (struct bnx2i_nop_in_msg *)cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
memset(hdr, 0, sizeof(struct iscsi_hdr));
hdr->opcode = nop_in->op_code;
@@ -1738,7 +1738,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
}
done:
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
return tgt_async_nop;
}
@@ -1771,7 +1771,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,
return;
}
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
resp_hdr->opcode = async_cqe->op_code;
@@ -1790,7 +1790,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,
__iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
(struct iscsi_hdr *)resp_hdr, NULL, 0);
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
}
@@ -1817,7 +1817,7 @@ static void bnx2i_process_reject_mesg(struct iscsi_session *session,
} else
bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
memset(hdr, 0, sizeof(struct iscsi_hdr));
hdr->opcode = reject->op_code;
@@ -1828,7 +1828,7 @@ static void bnx2i_process_reject_mesg(struct iscsi_session *session,
hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
reject->data_length);
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
}
/**
@@ -1848,13 +1848,13 @@ static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
struct iscsi_task *task;
cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
if (!task)
printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
complete(&bnx2i_conn->cmd_cleanup_cmpl);
}
@@ -1921,11 +1921,11 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
int rc = 0;
int cpu;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
if (!task || !task->sc) {
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
return -EINVAL;
}
sc = task->sc;
@@ -1935,7 +1935,7 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
else
cpu = sc->request->cpu;
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
p = &per_cpu(bnx2i_percpu, cpu);
spin_lock(&p->p_work_lock);
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 34c294b42c84..80c03b452d61 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -537,11 +537,15 @@ static int __init bnx2i_mod_init(void)
p->iothread = NULL;
}
+ cpu_notifier_register_begin();
+
for_each_online_cpu(cpu)
bnx2i_percpu_thread_create(cpu);
/* Initialize per CPU interrupt thread */
- register_hotcpu_notifier(&bnx2i_cpu_notifier);
+ __register_hotcpu_notifier(&bnx2i_cpu_notifier);
+
+ cpu_notifier_register_done();
return 0;
@@ -581,11 +585,15 @@ static void __exit bnx2i_mod_exit(void)
}
mutex_unlock(&bnx2i_dev_lock);
- unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
+ cpu_notifier_register_begin();
for_each_online_cpu(cpu)
bnx2i_percpu_thread_destroy(cpu);
+ __unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
+
+ cpu_notifier_register_done();
+
iscsi_unregister_transport(&bnx2i_iscsi_transport);
cnic_unregister_driver(CNIC_ULP_ISCSI);
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index c8b0aff5bbd4..166543f7ef55 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1170,10 +1170,10 @@ static void bnx2i_cleanup_task(struct iscsi_task *task)
if (task->state == ISCSI_TASK_ABRT_TMF) {
bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->back_lock);
wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
- spin_lock_bh(&conn->session->lock);
+ spin_lock_bh(&conn->session->back_lock);
}
bnx2i_iscsi_unmap_sg_list(task->dd_data);
}
@@ -2060,7 +2060,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
goto out;
if (session) {
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
if (session->state == ISCSI_STATE_LOGGING_OUT) {
if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
@@ -2076,7 +2076,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
} else
close = 1;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
}
bnx2i_ep->state = EP_STATE_DISCONN_START;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5a9f84238a53..e8ee5e5fe0ef 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -175,52 +175,6 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
sizeof(struct fw_ofld_tx_data_wr));
}
-
-#define VLAN_NONE 0xfff
-#define FILTER_SEL_VLAN_NONE 0xffff
-#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
-#define FILTER_SEL_WIDTH_VIN_P_FC \
- (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
-#define FILTER_SEL_WIDTH_TAG_P_FC \
- (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
-#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
-
-static unsigned int select_ntuple(struct cxgbi_device *cdev,
- struct l2t_entry *l2t)
-{
- struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
- unsigned int ntuple = 0;
- u32 viid;
-
- switch (lldi->filt_mode) {
-
- /* default filter mode */
- case HW_TPL_FR_MT_PR_IV_P_FC:
- if (l2t->vlan == VLAN_NONE)
- ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
- else {
- ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
- ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- }
- ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
- FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- break;
- case HW_TPL_FR_MT_PR_OV_P_FC: {
- viid = cxgb4_port_viid(l2t->neigh->dev);
-
- ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
- ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
- ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
- ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
- FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- break;
- }
- default:
- break;
- }
- return ntuple;
-}
-
static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *e)
{
@@ -248,8 +202,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
struct cpl_act_open_req *req =
(struct cpl_act_open_req *)skb->head;
- req = (struct cpl_act_open_req *)skb->head;
-
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
qid_atid));
@@ -258,7 +210,9 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
req->local_ip = csk->saddr.sin_addr.s_addr;
req->peer_ip = csk->daddr.sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t));
+ req->params = cpu_to_be32(cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t));
opt2 |= 1 << 22;
req->opt2 = cpu_to_be32(opt2);
@@ -271,8 +225,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
struct cpl_t5_act_open_req *req =
(struct cpl_t5_act_open_req *)skb->head;
- req = (struct cpl_t5_act_open_req *)skb->head;
-
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
qid_atid));
@@ -281,7 +233,10 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
req->local_ip = csk->saddr.sin_addr.s_addr;
req->peer_ip = csk->daddr.sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t));
+ req->params = cpu_to_be64(V_FILTER_TUPLE(
+ cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t)));
opt2 |= 1 << 31;
req->opt2 = cpu_to_be32(opt2);
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index d01f01604140..eb29fe7eaf49 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -277,7 +277,7 @@ found:
/* With interrupts enabled, it will sometimes hang when doing heavy
* reads. So better not enable them until I finger it out. */
if (instance->irq != SCSI_IRQ_NONE)
- if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED,
+ if (request_irq(instance->irq, dtc_intr, 0,
"dtc", instance)) {
printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE;
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 94de88955a99..ebf57364df91 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1221,7 +1221,7 @@ static int port_detect(unsigned long port_base, unsigned int j,
/* Board detected, allocate its IRQ */
if (request_irq(irq, do_interrupt_handler,
- IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0),
+ (subversion == ESA) ? IRQF_SHARED : 0,
driver_name, (void *)&sha[j])) {
printk("%s: unable to allocate IRQ %u, detaching.\n", name,
irq);
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 1663173cdb91..8319d2b417b8 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -687,7 +687,7 @@ static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev
return 0;
if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */
- if (!request_irq(gc->IRQ, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", sh)) {
+ if (!request_irq(gc->IRQ, do_eata_pio_int_handler, 0, "EATA-PIO", sh)) {
reg_IRQ[gc->IRQ]++;
if (!gc->IRQ_TR)
reg_IRQL[gc->IRQ] = 1; /* IRQ is edge triggered */
@@ -921,7 +921,7 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)
for (i = 0; i < MAXIRQ; i++)
if (reg_IRQ[i])
- request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL);
+ request_irq(i, do_eata_pio_int_handler, 0, "EATA-PIO", NULL);
HBA_ptr = first_HBA;
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index b9750e296d71..6776931e25d4 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -231,7 +231,7 @@ use_legacy_interrupts:
static void esas2r_claim_interrupts(struct esas2r_adapter *a)
{
- unsigned long flags = IRQF_DISABLED;
+ unsigned long flags = 0;
if (a->intr_mode == INTR_MODE_LEGACY)
flags |= IRQF_SHARED;
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
index 9bf285df58dd..a82030aa8577 100644
--- a/drivers/scsi/esas2r/esas2r_log.c
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -165,13 +165,9 @@ static int esas2r_log_master(const long level,
/*
* Put a line break at the end of the formatted string so that
- * we don't wind up with run-on messages. only append if there
- * is enough space in the buffer.
+ * we don't wind up with run-on messages.
*/
- if (strlen(event_buffer) < buflen)
- strcat(buffer, "\n");
-
- printk(event_buffer);
+ printk("%s\n", event_buffer);
spin_unlock_irqrestore(&event_buffer_lock, flags);
}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f3170008ae71..d5e105b173f0 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2633,14 +2633,18 @@ static int __init fcoe_init(void)
skb_queue_head_init(&p->fcoe_rx_list);
}
+ cpu_notifier_register_begin();
+
for_each_online_cpu(cpu)
fcoe_percpu_thread_create(cpu);
/* Initialize per CPU interrupt thread */
- rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
+ rc = __register_hotcpu_notifier(&fcoe_cpu_notifier);
if (rc)
goto out_free;
+ cpu_notifier_register_done();
+
/* Setup link change notification */
fcoe_dev_setup();
@@ -2655,6 +2659,9 @@ out_free:
for_each_online_cpu(cpu) {
fcoe_percpu_thread_destroy(cpu);
}
+
+ cpu_notifier_register_done();
+
mutex_unlock(&fcoe_config_mutex);
destroy_workqueue(fcoe_wq);
return rc;
@@ -2687,11 +2694,15 @@ static void __exit fcoe_exit(void)
}
rtnl_unlock();
- unregister_hotcpu_notifier(&fcoe_cpu_notifier);
+ cpu_notifier_register_begin();
for_each_online_cpu(cpu)
fcoe_percpu_thread_destroy(cpu);
+ __unregister_hotcpu_notifier(&fcoe_cpu_notifier);
+
+ cpu_notifier_register_done();
+
mutex_unlock(&fcoe_config_mutex);
/*
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 5cec6c60ca22..7176365e916b 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -461,7 +461,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
if (instance->irq != SCSI_IRQ_NONE)
if (request_irq(instance->irq, generic_NCR5380_intr,
- IRQF_DISABLED, "NCR5380", instance)) {
+ 0, "NCR5380", instance)) {
printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE;
}
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index ce5ef0190bad..0f1ae13ce7c7 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4711,7 +4711,7 @@ static int __init gdth_isa_probe_one(u32 isa_bios)
printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n",
isa_bios, ha->irq, ha->drq);
- error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha);
+ error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
if (error) {
printk("GDT-ISA: Unable to allocate IRQ\n");
goto out_host_put;
@@ -4843,7 +4843,7 @@ static int __init gdth_eisa_probe_one(u16 eisa_slot)
printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",
eisa_slot >> 12, ha->irq);
- error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha);
+ error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
if (error) {
printk("GDT-EISA: Unable to allocate IRQ\n");
goto out_host_put;
@@ -4979,7 +4979,7 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
ha->irq);
error = request_irq(ha->irq, gdth_interrupt,
- IRQF_DISABLED|IRQF_SHARED, "gdth", ha);
+ IRQF_SHARED, "gdth", ha);
if (error) {
printk("GDT-PCI: Unable to allocate IRQ\n");
goto out_host_put;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f28ea070d3df..3cbb57a8b846 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -398,7 +398,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->ordered_tag = sht->ordered_tag;
shost->no_write_same = sht->no_write_same;
- if (shost_eh_deadline == -1)
+ if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
shost->eh_deadline = -1;
else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
shost_printk(KERN_WARNING, shost,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 868318a7067c..8cf4a0c69baf 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,6 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
- * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -47,13 +47,13 @@
#include <linux/string.h>
#include <linux/bitmap.h>
#include <linux/atomic.h>
-#include <linux/kthread.h>
#include <linux/jiffies.h>
+#include <asm/div64.h>
#include "hpsa_cmd.h"
#include "hpsa.h"
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "3.4.0-1"
+#define HPSA_DRIVER_VERSION "3.4.4-1"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -118,6 +118,11 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
+ {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@@ -163,6 +168,11 @@ static struct board_type products[] = {
{0x21C7103C, "Smart Array", &SA5_access},
{0x21C8103C, "Smart Array", &SA5_access},
{0x21C9103C, "Smart Array", &SA5_access},
+ {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
+ {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
+ {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
+ {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
+ {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
@@ -182,8 +192,9 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
- void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
+ void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
int cmd_type);
+#define VPD_PAGE (1 << 8)
static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static void hpsa_scan_start(struct Scsi_Host *);
@@ -204,7 +215,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
struct CommandList *c);
/* performant mode helper functions */
static void calc_bucket_map(int *bucket, int num_buckets,
- int nsgs, int *bucket_map);
+ int nsgs, int min_blocks, int *bucket_map);
static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
static inline u32 next_command(struct ctlr_info *h, u8 q);
static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
@@ -216,8 +227,14 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
int wait_for_ready);
static inline void finish_cmd(struct CommandList *c);
+static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
#define BOARD_NOT_READY 0
#define BOARD_READY 1
+static void hpsa_drain_accel_commands(struct ctlr_info *h);
+static void hpsa_flush_cache(struct ctlr_info *h);
+static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+ u8 *scsi3addr);
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
{
@@ -280,6 +297,55 @@ static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
return 1;
}
+static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int status, len;
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ char tmpbuf[10];
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+ strncpy(tmpbuf, buf, len);
+ tmpbuf[len] = '\0';
+ if (sscanf(tmpbuf, "%d", &status) != 1)
+ return -EINVAL;
+ h = shost_to_hba(shost);
+ h->acciopath_status = !!status;
+ dev_warn(&h->pdev->dev,
+ "hpsa: HP SSD Smart Path %s via sysfs update.\n",
+ h->acciopath_status ? "enabled" : "disabled");
+ return count;
+}
+
+static ssize_t host_store_raid_offload_debug(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int debug_level, len;
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ char tmpbuf[10];
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+ strncpy(tmpbuf, buf, len);
+ tmpbuf[len] = '\0';
+ if (sscanf(tmpbuf, "%d", &debug_level) != 1)
+ return -EINVAL;
+ if (debug_level < 0)
+ debug_level = 0;
+ h = shost_to_hba(shost);
+ h->raid_offload_debug = debug_level;
+ dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
+ h->raid_offload_debug);
+ return count;
+}
+
static ssize_t host_store_rescan(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -327,6 +393,17 @@ static ssize_t host_show_transport_mode(struct device *dev,
"performant" : "simple");
}
+static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ h = shost_to_hba(shost);
+ return snprintf(buf, 30, "HP SSD Smart Path %s\n",
+ (h->acciopath_status == 1) ? "enabled" : "disabled");
+}
+
/* List of controllers which cannot be hard reset on kexec with reset_devices */
static u32 unresettable_controller[] = {
0x324a103C, /* Smart Array P712m */
@@ -416,6 +493,13 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
"1(ADM)", "UNKNOWN"
};
+#define HPSA_RAID_0 0
+#define HPSA_RAID_4 1
+#define HPSA_RAID_1 2 /* also used for RAID 10 */
+#define HPSA_RAID_5 3 /* also used for RAID 50 */
+#define HPSA_RAID_51 4
+#define HPSA_RAID_6 5 /* also used for RAID 60 */
+#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
static ssize_t raid_level_show(struct device *dev,
@@ -504,10 +588,39 @@ static ssize_t unique_id_show(struct device *dev,
sn[12], sn[13], sn[14], sn[15]);
}
+static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+ int offload_enabled;
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+ offload_enabled = hdev->offload_enabled;
+ spin_unlock_irqrestore(&h->lock, flags);
+ return snprintf(buf, 20, "%d\n", offload_enabled);
+}
+
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
+ host_show_hp_ssd_smart_path_enabled, NULL);
+static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
+ host_show_hp_ssd_smart_path_status,
+ host_store_hp_ssd_smart_path_status);
+static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
+ host_store_raid_offload_debug);
static DEVICE_ATTR(firmware_revision, S_IRUGO,
host_show_firmware_revision, NULL);
static DEVICE_ATTR(commands_outstanding, S_IRUGO,
@@ -521,6 +634,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_raid_level,
&dev_attr_lunid,
&dev_attr_unique_id,
+ &dev_attr_hp_ssd_smart_path_enabled,
NULL,
};
@@ -530,6 +644,8 @@ static struct device_attribute *hpsa_shost_attrs[] = {
&dev_attr_commands_outstanding,
&dev_attr_transport_mode,
&dev_attr_resettable,
+ &dev_attr_hp_ssd_smart_path_status,
+ &dev_attr_raid_offload_debug,
NULL,
};
@@ -570,6 +686,9 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
struct reply_pool *rq = &h->reply_queue[q];
unsigned long flags;
+ if (h->transMethod & CFGTBL_Trans_io_accel1)
+ return h->access.command_completed(h, q);
+
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
return h->access.command_completed(h, q);
@@ -590,6 +709,32 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
return a;
}
+/*
+ * There are some special bits in the bus address of the
+ * command that we have to set for the controller to know
+ * how to process the command:
+ *
+ * Normal performant mode:
+ * bit 0: 1 means performant mode, 0 means simple mode.
+ * bits 1-3 = block fetch table entry
+ * bits 4-6 = command type (== 0)
+ *
+ * ioaccel1 mode:
+ * bit 0 = "performant mode" bit.
+ * bits 1-3 = block fetch table entry
+ * bits 4-6 = command type (== 110)
+ * (command type is needed because ioaccel1 mode
+ * commands are submitted through the same register as normal
+ * mode commands, so this is how the controller knows whether
+ * the command is normal mode or ioaccel1 mode.)
+ *
+ * ioaccel2 mode:
+ * bit 0 = "performant mode" bit.
+ * bits 1-4 = block fetch table entry (note extra bit)
+ * bits 4-6 = not needed, because ioaccel2 mode has
+ * a separate special register for submitting commands.
+ */
+
/* set_performant_mode: Modify the tag for cciss performant
* set bit 0 for pull model, bits 3-1 for block fetch
* register number
@@ -598,12 +743,47 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
{
if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
- if (likely(h->msix_vector))
+ if (likely(h->msix_vector > 0))
c->Header.ReplyQueue =
raw_smp_processor_id() % h->nreply_queues;
}
}
+static void set_ioaccel1_performant_mode(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
+
+ /* Tell the controller to post the reply to the queue for this
+ * processor. This seems to give the best I/O throughput.
+ */
+ cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
+ /* Set the bits in the address sent down to include:
+ * - performant mode bit (bit 0)
+ * - pull count (bits 1-3)
+ * - command type (bits 4-6)
+ */
+ c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
+ IOACCEL1_BUSADDR_CMDTYPE;
+}
+
+static void set_ioaccel2_performant_mode(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
+
+ /* Tell the controller to post the reply to the queue for this
+ * processor. This seems to give the best I/O throughput.
+ */
+ cp->reply_queue = smp_processor_id() % h->nreply_queues;
+ /* Set the bits in the address sent down to include:
+ * - performant mode bit not used in ioaccel mode 2
+ * - pull count (bits 0-3)
+ * - command type isn't needed for ioaccel2
+ */
+ c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
+}
+
static int is_firmware_flash_cmd(u8 *cdb)
{
return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
@@ -638,7 +818,16 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
{
unsigned long flags;
- set_performant_mode(h, c);
+ switch (c->cmd_type) {
+ case CMD_IOACCEL1:
+ set_ioaccel1_performant_mode(h, c);
+ break;
+ case CMD_IOACCEL2:
+ set_ioaccel2_performant_mode(h, c);
+ break;
+ default:
+ set_performant_mode(h, c);
+ }
dial_down_lockup_detection_during_fw_flash(h, c);
spin_lock_irqsave(&h->lock, flags);
addQ(&h->reqQ, c);
@@ -782,6 +971,14 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
/* Raid level changed. */
h->dev[entry]->raid_level = new_entry->raid_level;
+
+ /* Raid offload parameters changed. */
+ h->dev[entry]->offload_config = new_entry->offload_config;
+ h->dev[entry]->offload_enabled = new_entry->offload_enabled;
+ h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+ h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
+ h->dev[entry]->raid_map = new_entry->raid_map;
+
dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
new_entry->target, new_entry->lun);
@@ -902,6 +1099,10 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
*/
if (dev1->raid_level != dev2->raid_level)
return 1;
+ if (dev1->offload_config != dev2->offload_config)
+ return 1;
+ if (dev1->offload_enabled != dev2->offload_enabled)
+ return 1;
return 0;
}
@@ -932,6 +1133,9 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
return DEVICE_UPDATED;
return DEVICE_SAME;
} else {
+ /* Keep offline devices offline */
+ if (needle->volume_offline)
+ return DEVICE_NOT_FOUND;
return DEVICE_CHANGED;
}
}
@@ -940,6 +1144,110 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
return DEVICE_NOT_FOUND;
}
+static void hpsa_monitor_offline_device(struct ctlr_info *h,
+ unsigned char scsi3addr[])
+{
+ struct offline_device_entry *device;
+ unsigned long flags;
+
+ /* Check to see if device is already on the list */
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ list_for_each_entry(device, &h->offline_device_list, offline_list) {
+ if (memcmp(device->scsi3addr, scsi3addr,
+ sizeof(device->scsi3addr)) == 0) {
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+
+ /* Device is not on the list, add it. */
+ device = kmalloc(sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
+ return;
+ }
+ memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ list_add_tail(&device->offline_list, &h->offline_device_list);
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+}
+
+/* Print a message explaining various offline volume states */
+static void hpsa_show_volume_status(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *sd)
+{
+ if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ switch (sd->volume_offline) {
+ case HPSA_LV_OK:
+ break;
+ case HPSA_LV_UNDERGOING_ERASE:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_UNDERGOING_RPI:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_PENDING_RPI:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_ENCRYPTED_NO_KEY:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_UNDERGOING_ENCRYPTION:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_PENDING_ENCRYPTION:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ }
+}
+
static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
struct hpsa_scsi_dev_t *sd[], int nsds)
{
@@ -1004,6 +1312,20 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
for (i = 0; i < nsds; i++) {
if (!sd[i]) /* if already added above. */
continue;
+
+ /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
+ * as the SCSI mid-layer does not handle such devices well.
+ * It relentlessly loops sending TUR at 3Hz, then READ(10)
+ * at 160Hz, and prevents the system from coming up.
+ */
+ if (sd[i]->volume_offline) {
+ hpsa_show_volume_status(h, sd[i]);
+ dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
+ h->scsi_host->host_no,
+ sd[i]->bus, sd[i]->target, sd[i]->lun);
+ continue;
+ }
+
device_change = hpsa_scsi_find_entry(sd[i], h->dev,
h->ndevices, &entry);
if (device_change == DEVICE_NOT_FOUND) {
@@ -1022,6 +1344,17 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
}
spin_unlock_irqrestore(&h->devlock, flags);
+ /* Monitor devices which are in one of several NOT READY states to be
+ * brought online later. This must be done without holding h->devlock,
+ * so don't touch h->dev[]
+ */
+ for (i = 0; i < nsds; i++) {
+ if (!sd[i]) /* if already added above. */
+ continue;
+ if (sd[i]->volume_offline)
+ hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
+ }
+
/* Don't notify scsi mid layer of any changes the first time through
* (or if there are no changes) scsi_scan_host will do it later the
* first time through.
@@ -1187,11 +1520,163 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
}
+
+/* Decode the various types of errors on ioaccel2 path.
+ * Return 1 for any error that should generate a RAID path retry.
+ * Return 0 for errors that don't require a RAID path retry.
+ */
+static int handle_ioaccel_mode2_error(struct ctlr_info *h,
+ struct CommandList *c,
+ struct scsi_cmnd *cmd,
+ struct io_accel2_cmd *c2)
+{
+ int data_len;
+ int retry = 0;
+
+ switch (c2->error_data.serv_response) {
+ case IOACCEL2_SERV_RESPONSE_COMPLETE:
+ switch (c2->error_data.status) {
+ case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with check condition.\n",
+ "HP SSD Smart Path");
+ if (c2->error_data.data_present !=
+ IOACCEL2_SENSE_DATA_PRESENT)
+ break;
+ /* copy the sense data */
+ data_len = c2->error_data.sense_data_len;
+ if (data_len > SCSI_SENSE_BUFFERSIZE)
+ data_len = SCSI_SENSE_BUFFERSIZE;
+ if (data_len > sizeof(c2->error_data.sense_data_buff))
+ data_len =
+ sizeof(c2->error_data.sense_data_buff);
+ memcpy(cmd->sense_buffer,
+ c2->error_data.sense_data_buff, data_len);
+ cmd->result |= SAM_STAT_CHECK_CONDITION;
+ retry = 1;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with BUSY status.\n",
+ "HP SSD Smart Path");
+ retry = 1;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with reservation conflict.\n",
+ "HP SSD Smart Path");
+ retry = 1;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
+ /* Make scsi midlayer do unlimited retries */
+ cmd->result = DID_IMM_RETRY << 16;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with aborted status.\n",
+ "HP SSD Smart Path");
+ retry = 1;
+ break;
+ default:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with unrecognized status: 0x%02x\n",
+ "HP SSD Smart Path", c2->error_data.status);
+ retry = 1;
+ break;
+ }
+ break;
+ case IOACCEL2_SERV_RESPONSE_FAILURE:
+ /* don't expect to get here. */
+ dev_warn(&h->pdev->dev,
+ "unexpected delivery or target failure, status = 0x%02x\n",
+ c2->error_data.status);
+ retry = 1;
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
+ dev_warn(&h->pdev->dev, "task management function rejected.\n");
+ retry = 1;
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
+ dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
+ break;
+ default:
+ dev_warn(&h->pdev->dev,
+ "%s: Unrecognized server response: 0x%02x\n",
+ "HP SSD Smart Path",
+ c2->error_data.serv_response);
+ retry = 1;
+ break;
+ }
+
+ return retry; /* retry on raid path? */
+}
+
+static void process_ioaccel2_completion(struct ctlr_info *h,
+ struct CommandList *c, struct scsi_cmnd *cmd,
+ struct hpsa_scsi_dev_t *dev)
+{
+ struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
+ int raid_retry = 0;
+
+ /* check for good status */
+ if (likely(c2->error_data.serv_response == 0 &&
+ c2->error_data.status == 0)) {
+ cmd_free(h, c);
+ cmd->scsi_done(cmd);
+ return;
+ }
+
+ /* Any RAID offload error results in retry which will use
+ * the normal I/O path so the controller can handle whatever's
+ * wrong.
+ */
+ if (is_logical_dev_addr_mode(dev->scsi3addr) &&
+ c2->error_data.serv_response ==
+ IOACCEL2_SERV_RESPONSE_FAILURE) {
+ if (c2->error_data.status ==
+ IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
+ dev_warn(&h->pdev->dev,
+ "%s: Path is unavailable, retrying on standard path.\n",
+ "HP SSD Smart Path");
+ else
+ dev_warn(&h->pdev->dev,
+ "%s: Error 0x%02x, retrying on standard path.\n",
+ "HP SSD Smart Path", c2->error_data.status);
+
+ dev->offload_enabled = 0;
+ h->drv_req_rescan = 1; /* schedule controller for a rescan */
+ cmd->result = DID_SOFT_ERROR << 16;
+ cmd_free(h, c);
+ cmd->scsi_done(cmd);
+ return;
+ }
+ raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
+ /* If error found, disable Smart Path, schedule a rescan,
+ * and force a retry on the standard path.
+ */
+ if (raid_retry) {
+ dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
+ "HP SSD Smart Path");
+ dev->offload_enabled = 0; /* Disable Smart Path */
+ h->drv_req_rescan = 1; /* schedule controller rescan */
+ cmd->result = DID_SOFT_ERROR << 16;
+ }
+ cmd_free(h, c);
+ cmd->scsi_done(cmd);
+}
+
static void complete_scsi_command(struct CommandList *cp)
{
struct scsi_cmnd *cmd;
struct ctlr_info *h;
struct ErrorInfo *ei;
+ struct hpsa_scsi_dev_t *dev;
unsigned char sense_key;
unsigned char asc; /* additional sense code */
@@ -1201,13 +1686,19 @@ static void complete_scsi_command(struct CommandList *cp)
ei = cp->err_info;
cmd = (struct scsi_cmnd *) cp->scsi_cmd;
h = cp->h;
+ dev = cmd->device->hostdata;
scsi_dma_unmap(cmd); /* undo the DMA mappings */
- if (cp->Header.SGTotal > h->max_cmd_sg_entries)
+ if ((cp->cmd_type == CMD_SCSI) &&
+ (cp->Header.SGTotal > h->max_cmd_sg_entries))
hpsa_unmap_sg_chain_block(h, cp);
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
+
+ if (cp->cmd_type == CMD_IOACCEL2)
+ return process_ioaccel2_completion(h, cp, cmd, dev);
+
cmd->result |= ei->ScsiStatus;
/* copy the sense data whether we need to or not. */
@@ -1227,6 +1718,32 @@ static void complete_scsi_command(struct CommandList *cp)
return;
}
+ /* For I/O accelerator commands, copy over some fields to the normal
+ * CISS header used below for error handling.
+ */
+ if (cp->cmd_type == CMD_IOACCEL1) {
+ struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
+ cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
+ cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
+ cp->Header.Tag.lower = c->Tag.lower;
+ cp->Header.Tag.upper = c->Tag.upper;
+ memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
+ memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
+
+ /* Any RAID offload error results in retry which will use
+ * the normal I/O path so the controller can handle whatever's
+ * wrong.
+ */
+ if (is_logical_dev_addr_mode(dev->scsi3addr)) {
+ if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
+ dev->offload_enabled = 0;
+ cmd->result = DID_SOFT_ERROR << 16;
+ cmd_free(h, cp);
+ cmd->scsi_done(cmd);
+ return;
+ }
+ }
+
/* an error has occurred */
switch (ei->CommandStatus) {
@@ -1389,6 +1906,14 @@ static void complete_scsi_command(struct CommandList *cp)
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "Command unabortable\n");
break;
+ case CMD_IOACCEL_DISABLED:
+ /* This only handles the direct pass-through case since RAID
+ * offload is handled above. Just attempt a retry.
+ */
+ cmd->result = DID_SOFT_ERROR << 16;
+ dev_warn(&h->pdev->dev,
+ "cp %p had HP SSD Smart Path error\n", cp);
+ break;
default:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
@@ -1438,6 +1963,7 @@ static int hpsa_map_one(struct pci_dev *pdev,
cp->SG[0].Addr.upper =
(u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Len = buflen;
+ cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
return 0;
@@ -1490,17 +2016,37 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
hpsa_pci_unmap(h->pdev, c, 1, data_direction);
}
-static void hpsa_scsi_interpret_error(struct CommandList *cp)
+static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
+ struct CommandList *c)
{
- struct ErrorInfo *ei;
+ const u8 *cdb = c->Request.CDB;
+ const u8 *lun = c->Header.LUN.LunAddrBytes;
+
+ dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
+ " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ txt, lun[0], lun[1], lun[2], lun[3],
+ lun[4], lun[5], lun[6], lun[7],
+ cdb[0], cdb[1], cdb[2], cdb[3],
+ cdb[4], cdb[5], cdb[6], cdb[7],
+ cdb[8], cdb[9], cdb[10], cdb[11],
+ cdb[12], cdb[13], cdb[14], cdb[15]);
+}
+
+static void hpsa_scsi_interpret_error(struct ctlr_info *h,
+ struct CommandList *cp)
+{
+ const struct ErrorInfo *ei = cp->err_info;
struct device *d = &cp->h->pdev->dev;
+ const u8 *sd = ei->SenseInfo;
- ei = cp->err_info;
switch (ei->CommandStatus) {
case CMD_TARGET_STATUS:
- dev_warn(d, "cmd %p has completed with errors\n", cp);
- dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
- ei->ScsiStatus);
+ hpsa_print_cmd(h, "SCSI status", cp);
+ if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
+ dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
+ sd[2] & 0x0f, sd[12], sd[13]);
+ else
+ dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
if (ei->ScsiStatus == 0)
dev_warn(d, "SCSI status is abnormally zero. "
"(probably indicates selection timeout "
@@ -1508,54 +2054,51 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)
"firmware bug, circa July, 2001.)\n");
break;
case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
- dev_info(d, "UNDERRUN\n");
break;
case CMD_DATA_OVERRUN:
- dev_warn(d, "cp %p has completed with data overrun\n", cp);
+ hpsa_print_cmd(h, "overrun condition", cp);
break;
case CMD_INVALID: {
/* controller unfortunately reports SCSI passthru's
* to non-existent targets as invalid commands.
*/
- dev_warn(d, "cp %p is reported invalid (probably means "
- "target device no longer present)\n", cp);
- /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
- print_cmd(cp); */
+ hpsa_print_cmd(h, "invalid command", cp);
+ dev_warn(d, "probably means device no longer present\n");
}
break;
case CMD_PROTOCOL_ERR:
- dev_warn(d, "cp %p has protocol error \n", cp);
+ hpsa_print_cmd(h, "protocol error", cp);
break;
case CMD_HARDWARE_ERR:
- /* cmd->result = DID_ERROR << 16; */
- dev_warn(d, "cp %p had hardware error\n", cp);
+ hpsa_print_cmd(h, "hardware error", cp);
break;
case CMD_CONNECTION_LOST:
- dev_warn(d, "cp %p had connection lost\n", cp);
+ hpsa_print_cmd(h, "connection lost", cp);
break;
case CMD_ABORTED:
- dev_warn(d, "cp %p was aborted\n", cp);
+ hpsa_print_cmd(h, "aborted", cp);
break;
case CMD_ABORT_FAILED:
- dev_warn(d, "cp %p reports abort failed\n", cp);
+ hpsa_print_cmd(h, "abort failed", cp);
break;
case CMD_UNSOLICITED_ABORT:
- dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
+ hpsa_print_cmd(h, "unsolicited abort", cp);
break;
case CMD_TIMEOUT:
- dev_warn(d, "cp %p timed out\n", cp);
+ hpsa_print_cmd(h, "timed out", cp);
break;
case CMD_UNABORTABLE:
- dev_warn(d, "Command unabortable\n");
+ hpsa_print_cmd(h, "unabortable", cp);
break;
default:
- dev_warn(d, "cp %p returned unknown status %x\n", cp,
+ hpsa_print_cmd(h, "unknown status", cp);
+ dev_warn(d, "Unknown command status %x\n",
ei->CommandStatus);
}
}
static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
- unsigned char page, unsigned char *buf,
+ u16 page, unsigned char *buf,
unsigned char bufsize)
{
int rc = IO_OK;
@@ -1577,7 +2120,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
- hpsa_scsi_interpret_error(c);
+ hpsa_scsi_interpret_error(h, c);
rc = -1;
}
out:
@@ -1585,7 +2128,39 @@ out:
return rc;
}
-static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
+static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
+ unsigned char *scsi3addr, unsigned char page,
+ struct bmic_controller_parameters *buf, size_t bufsize)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_special_alloc(h);
+
+ if (c == NULL) { /* trouble... */
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ return -ENOMEM;
+ }
+
+ if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ }
+out:
+ cmd_special_free(h, c);
+ return rc;
+ }
+
+static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
+ u8 reset_type)
{
int rc = IO_OK;
struct CommandList *c;
@@ -1599,14 +2174,15 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
}
/* fill_cmd can't fail here, no data buffer to map. */
- (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
- NULL, 0, 0, scsi3addr, TYPE_MSG);
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+ scsi3addr, TYPE_MSG);
+ c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
ei = c->err_info;
if (ei->CommandStatus != 0) {
- hpsa_scsi_interpret_error(c);
+ hpsa_scsi_interpret_error(h, c);
rc = -1;
}
cmd_special_free(h, c);
@@ -1623,7 +2199,7 @@ static void hpsa_get_raid_level(struct ctlr_info *h,
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
return;
- rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
if (rc == 0)
*raid_level = buf[8];
if (*raid_level > RAID_UNKNOWN)
@@ -1632,6 +2208,204 @@ static void hpsa_get_raid_level(struct ctlr_info *h,
return;
}
+#define HPSA_MAP_DEBUG
+#ifdef HPSA_MAP_DEBUG
+static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
+ struct raid_map_data *map_buff)
+{
+ struct raid_map_disk_data *dd = &map_buff->data[0];
+ int map, row, col;
+ u16 map_cnt, row_cnt, disks_per_row;
+
+ if (rc != 0)
+ return;
+
+ /* Show details only if debugging has been activated. */
+ if (h->raid_offload_debug < 2)
+ return;
+
+ dev_info(&h->pdev->dev, "structure_size = %u\n",
+ le32_to_cpu(map_buff->structure_size));
+ dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
+ le32_to_cpu(map_buff->volume_blk_size));
+ dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
+ le64_to_cpu(map_buff->volume_blk_cnt));
+ dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
+ map_buff->phys_blk_shift);
+ dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
+ map_buff->parity_rotation_shift);
+ dev_info(&h->pdev->dev, "strip_size = %u\n",
+ le16_to_cpu(map_buff->strip_size));
+ dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
+ le64_to_cpu(map_buff->disk_starting_blk));
+ dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
+ le64_to_cpu(map_buff->disk_blk_cnt));
+ dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
+ le16_to_cpu(map_buff->data_disks_per_row));
+ dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
+ le16_to_cpu(map_buff->metadata_disks_per_row));
+ dev_info(&h->pdev->dev, "row_cnt = %u\n",
+ le16_to_cpu(map_buff->row_cnt));
+ dev_info(&h->pdev->dev, "layout_map_count = %u\n",
+ le16_to_cpu(map_buff->layout_map_count));
+ dev_info(&h->pdev->dev, "flags = %u\n",
+ le16_to_cpu(map_buff->flags));
+ if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
+ dev_info(&h->pdev->dev, "encrypytion = ON\n");
+ else
+ dev_info(&h->pdev->dev, "encrypytion = OFF\n");
+ dev_info(&h->pdev->dev, "dekindex = %u\n",
+ le16_to_cpu(map_buff->dekindex));
+
+ map_cnt = le16_to_cpu(map_buff->layout_map_count);
+ for (map = 0; map < map_cnt; map++) {
+ dev_info(&h->pdev->dev, "Map%u:\n", map);
+ row_cnt = le16_to_cpu(map_buff->row_cnt);
+ for (row = 0; row < row_cnt; row++) {
+ dev_info(&h->pdev->dev, " Row%u:\n", row);
+ disks_per_row =
+ le16_to_cpu(map_buff->data_disks_per_row);
+ for (col = 0; col < disks_per_row; col++, dd++)
+ dev_info(&h->pdev->dev,
+ " D%02u: h=0x%04x xor=%u,%u\n",
+ col, dd->ioaccel_handle,
+ dd->xor_mult[0], dd->xor_mult[1]);
+ disks_per_row =
+ le16_to_cpu(map_buff->metadata_disks_per_row);
+ for (col = 0; col < disks_per_row; col++, dd++)
+ dev_info(&h->pdev->dev,
+ " M%02u: h=0x%04x xor=%u,%u\n",
+ col, dd->ioaccel_handle,
+ dd->xor_mult[0], dd->xor_mult[1]);
+ }
+ }
+}
+#else
+static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
+ __attribute__((unused)) int rc,
+ __attribute__((unused)) struct raid_map_data *map_buff)
+{
+}
+#endif
+
+static int hpsa_get_raid_map(struct ctlr_info *h,
+ unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
+{
+ int rc = 0;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_special_alloc(h);
+ if (c == NULL) {
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ return -ENOMEM;
+ }
+ if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
+ sizeof(this_device->raid_map), 0,
+ scsi3addr, TYPE_CMD)) {
+ dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
+ cmd_special_free(h, c);
+ return -ENOMEM;
+ }
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ cmd_special_free(h, c);
+ return -1;
+ }
+ cmd_special_free(h, c);
+
+ /* @todo in the future, dynamically allocate RAID map memory */
+ if (le32_to_cpu(this_device->raid_map.structure_size) >
+ sizeof(this_device->raid_map)) {
+ dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
+ rc = -1;
+ }
+ hpsa_debug_map_buff(h, rc, &this_device->raid_map);
+ return rc;
+}
+
+static int hpsa_vpd_page_supported(struct ctlr_info *h,
+ unsigned char scsi3addr[], u8 page)
+{
+ int rc;
+ int i;
+ int pages;
+ unsigned char *buf, bufsize;
+
+ buf = kzalloc(256, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ /* Get the size of the page list first */
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+ VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
+ buf, HPSA_VPD_HEADER_SZ);
+ if (rc != 0)
+ goto exit_unsupported;
+ pages = buf[3];
+ if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
+ bufsize = pages + HPSA_VPD_HEADER_SZ;
+ else
+ bufsize = 255;
+
+ /* Get the whole VPD page list */
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+ VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
+ buf, bufsize);
+ if (rc != 0)
+ goto exit_unsupported;
+
+ pages = buf[3];
+ for (i = 1; i <= pages; i++)
+ if (buf[3 + i] == page)
+ goto exit_supported;
+exit_unsupported:
+ kfree(buf);
+ return 0;
+exit_supported:
+ kfree(buf);
+ return 1;
+}
+
+static void hpsa_get_ioaccel_status(struct ctlr_info *h,
+ unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
+{
+ int rc;
+ unsigned char *buf;
+ u8 ioaccel_status;
+
+ this_device->offload_config = 0;
+ this_device->offload_enabled = 0;
+
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return;
+ if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
+ goto out;
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+ VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
+ if (rc != 0)
+ goto out;
+
+#define IOACCEL_STATUS_BYTE 4
+#define OFFLOAD_CONFIGURED_BIT 0x01
+#define OFFLOAD_ENABLED_BIT 0x02
+ ioaccel_status = buf[IOACCEL_STATUS_BYTE];
+ this_device->offload_config =
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+ if (this_device->offload_config) {
+ this_device->offload_enabled =
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+ if (hpsa_get_raid_map(h, scsi3addr, this_device))
+ this_device->offload_enabled = 0;
+ }
+out:
+ kfree(buf);
+ return;
+}
+
/* Get the device id from inquiry page 0x83 */
static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
unsigned char *device_id, int buflen)
@@ -1644,7 +2418,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
return -1;
- rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
if (rc == 0)
memcpy(device_id, &buf[8], buflen);
kfree(buf);
@@ -1678,8 +2452,16 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
ei = c->err_info;
if (ei->CommandStatus != 0 &&
ei->CommandStatus != CMD_DATA_UNDERRUN) {
- hpsa_scsi_interpret_error(c);
+ hpsa_scsi_interpret_error(h, c);
rc = -1;
+ } else {
+ if (buf->extended_response_flag != extended_response) {
+ dev_err(&h->pdev->dev,
+ "report luns requested format %u, got %u\n",
+ extended_response,
+ buf->extended_response_flag);
+ rc = -1;
+ }
}
out:
cmd_special_free(h, c);
@@ -1707,6 +2489,117 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
device->lun = lun;
}
+/* Use VPD inquiry to get details of volume status */
+static int hpsa_get_volume_status(struct ctlr_info *h,
+ unsigned char scsi3addr[])
+{
+ int rc;
+ int status;
+ int size;
+ unsigned char *buf;
+
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return HPSA_VPD_LV_STATUS_UNSUPPORTED;
+
+ /* Does controller have VPD for logical volume status? */
+ if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) {
+ dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n");
+ goto exit_failed;
+ }
+
+ /* Get the size of the VPD return buffer */
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
+ buf, HPSA_VPD_HEADER_SZ);
+ if (rc != 0) {
+ dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
+ goto exit_failed;
+ }
+ size = buf[3];
+
+ /* Now get the whole VPD buffer */
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
+ buf, size + HPSA_VPD_HEADER_SZ);
+ if (rc != 0) {
+ dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
+ goto exit_failed;
+ }
+ status = buf[4]; /* status byte */
+
+ kfree(buf);
+ return status;
+exit_failed:
+ kfree(buf);
+ return HPSA_VPD_LV_STATUS_UNSUPPORTED;
+}
+
+/* Determine offline status of a volume.
+ * Return either:
+ * 0 (not offline)
+ * -1 (offline for unknown reasons)
+ * # (integer code indicating one of several NOT READY states
+ * describing why a volume is to be kept offline)
+ */
+static unsigned char hpsa_volume_offline(struct ctlr_info *h,
+ unsigned char scsi3addr[])
+{
+ struct CommandList *c;
+ unsigned char *sense, sense_key, asc, ascq;
+ int ldstat = 0;
+ u16 cmd_status;
+ u8 scsi_status;
+#define ASC_LUN_NOT_READY 0x04
+#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
+#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
+
+ c = cmd_alloc(h);
+ if (!c)
+ return 0;
+ (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ sense = c->err_info->SenseInfo;
+ sense_key = sense[2];
+ asc = sense[12];
+ ascq = sense[13];
+ cmd_status = c->err_info->CommandStatus;
+ scsi_status = c->err_info->ScsiStatus;
+ cmd_free(h, c);
+ /* Is the volume 'not ready'? */
+ if (cmd_status != CMD_TARGET_STATUS ||
+ scsi_status != SAM_STAT_CHECK_CONDITION ||
+ sense_key != NOT_READY ||
+ asc != ASC_LUN_NOT_READY) {
+ return 0;
+ }
+
+ /* Determine the reason for not ready state */
+ ldstat = hpsa_get_volume_status(h, scsi3addr);
+
+ /* Keep volume offline in certain cases: */
+ switch (ldstat) {
+ case HPSA_LV_UNDERGOING_ERASE:
+ case HPSA_LV_UNDERGOING_RPI:
+ case HPSA_LV_PENDING_RPI:
+ case HPSA_LV_ENCRYPTED_NO_KEY:
+ case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
+ case HPSA_LV_UNDERGOING_ENCRYPTION:
+ case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
+ case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+ return ldstat;
+ case HPSA_VPD_LV_STATUS_UNSUPPORTED:
+ /* If VPD status page isn't available,
+ * use ASC/ASCQ to determine state
+ */
+ if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
+ (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
+ return ldstat;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int hpsa_update_device_info(struct ctlr_info *h,
unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
unsigned char *is_OBDR_device)
@@ -1745,10 +2638,18 @@ static int hpsa_update_device_info(struct ctlr_info *h,
sizeof(this_device->device_id));
if (this_device->devtype == TYPE_DISK &&
- is_logical_dev_addr_mode(scsi3addr))
+ is_logical_dev_addr_mode(scsi3addr)) {
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
- else
+ if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
+ hpsa_get_ioaccel_status(h, scsi3addr, this_device);
+ this_device->volume_offline =
+ hpsa_volume_offline(h, scsi3addr);
+ } else {
this_device->raid_level = RAID_UNKNOWN;
+ this_device->offload_config = 0;
+ this_device->offload_enabled = 0;
+ this_device->volume_offline = 0;
+ }
if (is_OBDR_device) {
/* See if this is a One-Button-Disaster-Recovery device
@@ -1878,6 +2779,105 @@ static int add_ext_target_dev(struct ctlr_info *h,
}
/*
+ * Get address of physical disk used for an ioaccel2 mode command:
+ * 1. Extract ioaccel2 handle from the command.
+ * 2. Find a matching ioaccel2 handle from list of physical disks.
+ * 3. Return:
+ * 1 and set scsi3addr to address of matching physical
+ * 0 if no matching physical disk was found.
+ */
+static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
+ struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
+{
+ struct ReportExtendedLUNdata *physicals = NULL;
+ int responsesize = 24; /* size of physical extended response */
+ int extended = 2; /* flag forces reporting 'other dev info'. */
+ int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
+ u32 nphysicals = 0; /* number of reported physical devs */
+ int found = 0; /* found match (1) or not (0) */
+ u32 find; /* handle we need to match */
+ int i;
+ struct scsi_cmnd *scmd; /* scsi command within request being aborted */
+ struct hpsa_scsi_dev_t *d; /* device of request being aborted */
+ struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
+ u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
+ u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
+
+ if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
+ return 0; /* no match */
+
+ /* point to the ioaccel2 device handle */
+ c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
+ if (c2a == NULL)
+ return 0; /* no match */
+
+ scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
+ if (scmd == NULL)
+ return 0; /* no match */
+
+ d = scmd->device->hostdata;
+ if (d == NULL)
+ return 0; /* no match */
+
+ it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
+ scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
+ find = c2a->scsi_nexus;
+
+ if (h->raid_offload_debug > 0)
+ dev_info(&h->pdev->dev,
+ "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ __func__, scsi_nexus,
+ d->device_id[0], d->device_id[1], d->device_id[2],
+ d->device_id[3], d->device_id[4], d->device_id[5],
+ d->device_id[6], d->device_id[7], d->device_id[8],
+ d->device_id[9], d->device_id[10], d->device_id[11],
+ d->device_id[12], d->device_id[13], d->device_id[14],
+ d->device_id[15]);
+
+ /* Get the list of physical devices */
+ physicals = kzalloc(reportsize, GFP_KERNEL);
+ if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
+ reportsize, extended)) {
+ dev_err(&h->pdev->dev,
+ "Can't lookup %s device handle: report physical LUNs failed.\n",
+ "HP SSD Smart Path");
+ kfree(physicals);
+ return 0;
+ }
+ nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
+ responsesize;
+
+
+ /* find ioaccel2 handle in list of physicals: */
+ for (i = 0; i < nphysicals; i++) {
+ /* handle is in bytes 28-31 of each lun */
+ if (memcmp(&((struct ReportExtendedLUNdata *)
+ physicals)->LUN[i][20], &find, 4) != 0) {
+ continue; /* didn't match */
+ }
+ found = 1;
+ memcpy(scsi3addr, &((struct ReportExtendedLUNdata *)
+ physicals)->LUN[i][0], 8);
+ if (h->raid_offload_debug > 0)
+ dev_info(&h->pdev->dev,
+ "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ __func__, find,
+ ((struct ReportExtendedLUNdata *)
+ physicals)->LUN[i][20],
+ scsi3addr[0], scsi3addr[1], scsi3addr[2],
+ scsi3addr[3], scsi3addr[4], scsi3addr[5],
+ scsi3addr[6], scsi3addr[7]);
+ break; /* found it */
+ }
+
+ kfree(physicals);
+ if (found)
+ return 1;
+ else
+ return 0;
+
+}
+/*
* Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
* logdev. The number of luns in physdev and logdev are returned in
* *nphysicals and *nlogicals, respectively.
@@ -1885,14 +2885,26 @@ static int add_ext_target_dev(struct ctlr_info *h,
*/
static int hpsa_gather_lun_info(struct ctlr_info *h,
int reportlunsize,
- struct ReportLUNdata *physdev, u32 *nphysicals,
+ struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
struct ReportLUNdata *logdev, u32 *nlogicals)
{
- if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
+ int physical_entry_size = 8;
+
+ *physical_mode = 0;
+
+ /* For I/O accelerator mode we need to read physical device handles */
+ if (h->transMethod & CFGTBL_Trans_io_accel1 ||
+ h->transMethod & CFGTBL_Trans_io_accel2) {
+ *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
+ physical_entry_size = 24;
+ }
+ if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize,
+ *physical_mode)) {
dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
return -1;
}
- *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
+ *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
+ physical_entry_size;
if (*nphysicals > HPSA_MAX_PHYS_LUN) {
dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
" %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
@@ -1923,7 +2935,8 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
}
u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
- int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
+ int nphysicals, int nlogicals,
+ struct ReportExtendedLUNdata *physdev_list,
struct ReportLUNdata *logdev_list)
{
/* Helper function, figure out where the LUN ID info is coming from
@@ -1947,6 +2960,24 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
return NULL;
}
+static int hpsa_hba_mode_enabled(struct ctlr_info *h)
+{
+ int rc;
+ struct bmic_controller_parameters *ctlr_params;
+ ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
+ GFP_KERNEL);
+
+ if (!ctlr_params)
+ return 0;
+ rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
+ sizeof(struct bmic_controller_parameters));
+ if (rc != 0) {
+ kfree(ctlr_params);
+ return 0;
+ }
+ return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0;
+}
+
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
{
/* the idea here is we could get notified
@@ -1959,16 +2990,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
* tell which devices we already know about, vs. new
* devices, vs. disappearing devices.
*/
- struct ReportLUNdata *physdev_list = NULL;
+ struct ReportExtendedLUNdata *physdev_list = NULL;
struct ReportLUNdata *logdev_list = NULL;
u32 nphysicals = 0;
u32 nlogicals = 0;
+ int physical_mode = 0;
u32 ndev_allocated = 0;
struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
int ncurrent = 0;
- int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
+ int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
int i, n_ext_target_devs, ndevs_to_allocate;
int raid_ctlr_position;
+ u8 rescan_hba_mode;
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -1982,8 +3015,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
}
memset(lunzerobits, 0, sizeof(lunzerobits));
- if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
- logdev_list, &nlogicals))
+ rescan_hba_mode = hpsa_hba_mode_enabled(h);
+
+ if (!h->hba_mode_enabled && rescan_hba_mode)
+ dev_warn(&h->pdev->dev, "HBA mode enabled\n");
+ else if (h->hba_mode_enabled && !rescan_hba_mode)
+ dev_warn(&h->pdev->dev, "HBA mode disabled\n");
+
+ h->hba_mode_enabled = rescan_hba_mode;
+
+ if (hpsa_gather_lun_info(h, reportlunsize,
+ (struct ReportLUNdata *) physdev_list, &nphysicals,
+ &physical_mode, logdev_list, &nlogicals))
goto out;
/* We might see up to the maximum number of logical and physical disks
@@ -2064,9 +3107,28 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
ncurrent++;
break;
case TYPE_DISK:
- if (i < nphysicals)
+ if (h->hba_mode_enabled) {
+ /* never use raid mapper in HBA mode */
+ this_device->offload_enabled = 0;
+ ncurrent++;
break;
- ncurrent++;
+ } else if (h->acciopath_status) {
+ if (i >= nphysicals) {
+ ncurrent++;
+ break;
+ }
+ } else {
+ if (i < nphysicals)
+ break;
+ ncurrent++;
+ break;
+ }
+ if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
+ memcpy(&this_device->ioaccel_handle,
+ &lunaddrbytes[20],
+ sizeof(this_device->ioaccel_handle));
+ ncurrent++;
+ }
break;
case TYPE_TAPE:
case TYPE_MEDIUM_CHANGER:
@@ -2136,7 +3198,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
curr_sg->Len = len;
- curr_sg->Ext = 0; /* we are not chaining */
+ curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
curr_sg++;
}
@@ -2160,6 +3222,726 @@ sglist_finished:
return 0;
}
+#define IO_ACCEL_INELIGIBLE (1)
+static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
+{
+ int is_write = 0;
+ u32 block;
+ u32 block_cnt;
+
+ /* Perform some CDB fixups if needed using 10 byte reads/writes only */
+ switch (cdb[0]) {
+ case WRITE_6:
+ case WRITE_12:
+ is_write = 1;
+ case READ_6:
+ case READ_12:
+ if (*cdb_len == 6) {
+ block = (((u32) cdb[2]) << 8) | cdb[3];
+ block_cnt = cdb[4];
+ } else {
+ BUG_ON(*cdb_len != 12);
+ block = (((u32) cdb[2]) << 24) |
+ (((u32) cdb[3]) << 16) |
+ (((u32) cdb[4]) << 8) |
+ cdb[5];
+ block_cnt =
+ (((u32) cdb[6]) << 24) |
+ (((u32) cdb[7]) << 16) |
+ (((u32) cdb[8]) << 8) |
+ cdb[9];
+ }
+ if (block_cnt > 0xffff)
+ return IO_ACCEL_INELIGIBLE;
+
+ cdb[0] = is_write ? WRITE_10 : READ_10;
+ cdb[1] = 0;
+ cdb[2] = (u8) (block >> 24);
+ cdb[3] = (u8) (block >> 16);
+ cdb[4] = (u8) (block >> 8);
+ cdb[5] = (u8) (block);
+ cdb[6] = 0;
+ cdb[7] = (u8) (block_cnt >> 8);
+ cdb[8] = (u8) (block_cnt);
+ cdb[9] = 0;
+ *cdb_len = 10;
+ break;
+ }
+ return 0;
+}
+
+static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+ u8 *scsi3addr)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
+ unsigned int len;
+ unsigned int total_len = 0;
+ struct scatterlist *sg;
+ u64 addr64;
+ int use_sg, i;
+ struct SGDescriptor *curr_sg;
+ u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
+
+ /* TODO: implement chaining support */
+ if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
+ return IO_ACCEL_INELIGIBLE;
+
+ BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
+
+ if (fixup_ioaccel_cdb(cdb, &cdb_len))
+ return IO_ACCEL_INELIGIBLE;
+
+ c->cmd_type = CMD_IOACCEL1;
+
+ /* Adjust the DMA address to point to the accelerated command buffer */
+ c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
+ (c->cmdindex * sizeof(*cp));
+ BUG_ON(c->busaddr & 0x0000007F);
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg < 0)
+ return use_sg;
+
+ if (use_sg) {
+ curr_sg = cp->SG;
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ addr64 = (u64) sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ total_len += len;
+ curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
+ curr_sg->Addr.upper =
+ (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
+ curr_sg->Len = len;
+
+ if (i == (scsi_sg_count(cmd) - 1))
+ curr_sg->Ext = HPSA_SG_LAST;
+ else
+ curr_sg->Ext = 0; /* we are not chaining */
+ curr_sg++;
+ }
+
+ switch (cmd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ control |= IOACCEL1_CONTROL_DATA_OUT;
+ break;
+ case DMA_FROM_DEVICE:
+ control |= IOACCEL1_CONTROL_DATA_IN;
+ break;
+ case DMA_NONE:
+ control |= IOACCEL1_CONTROL_NODATAXFER;
+ break;
+ default:
+ dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+ cmd->sc_data_direction);
+ BUG();
+ break;
+ }
+ } else {
+ control |= IOACCEL1_CONTROL_NODATAXFER;
+ }
+
+ c->Header.SGList = use_sg;
+ /* Fill out the command structure to submit */
+ cp->dev_handle = ioaccel_handle & 0xFFFF;
+ cp->transfer_len = total_len;
+ cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
+ (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
+ cp->control = control;
+ memcpy(cp->CDB, cdb, cdb_len);
+ memcpy(cp->CISS_LUN, scsi3addr, 8);
+ /* Tag was already set at init time. */
+ enqueue_cmd_and_start_io(h, c);
+ return 0;
+}
+
+/*
+ * Queue a command directly to a device behind the controller using the
+ * I/O accelerator path.
+ */
+static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+
+ return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
+ cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
+}
+
+/*
+ * Set encryption parameters for the ioaccel2 request
+ */
+static void set_encrypt_ioaccel2(struct ctlr_info *h,
+ struct CommandList *c, struct io_accel2_cmd *cp)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+ struct raid_map_data *map = &dev->raid_map;
+ u64 first_block;
+
+ BUG_ON(!(dev->offload_config && dev->offload_enabled));
+
+ /* Are we doing encryption on this device */
+ if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
+ return;
+ /* Set the data encryption key index. */
+ cp->dekindex = map->dekindex;
+
+ /* Set the encryption enable flag, encoded into direction field. */
+ cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
+
+ /* Set encryption tweak values based on logical block address
+ * If block size is 512, tweak value is LBA.
+ * For other block sizes, tweak is (LBA * block size)/ 512)
+ */
+ switch (cmd->cmnd[0]) {
+ /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
+ case WRITE_6:
+ case READ_6:
+ if (map->volume_blk_size == 512) {
+ cp->tweak_lower =
+ (((u32) cmd->cmnd[2]) << 8) |
+ cmd->cmnd[3];
+ cp->tweak_upper = 0;
+ } else {
+ first_block =
+ (((u64) cmd->cmnd[2]) << 8) |
+ cmd->cmnd[3];
+ first_block = (first_block * map->volume_blk_size)/512;
+ cp->tweak_lower = (u32)first_block;
+ cp->tweak_upper = (u32)(first_block >> 32);
+ }
+ break;
+ case WRITE_10:
+ case READ_10:
+ if (map->volume_blk_size == 512) {
+ cp->tweak_lower =
+ (((u32) cmd->cmnd[2]) << 24) |
+ (((u32) cmd->cmnd[3]) << 16) |
+ (((u32) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ cp->tweak_upper = 0;
+ } else {
+ first_block =
+ (((u64) cmd->cmnd[2]) << 24) |
+ (((u64) cmd->cmnd[3]) << 16) |
+ (((u64) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ first_block = (first_block * map->volume_blk_size)/512;
+ cp->tweak_lower = (u32)first_block;
+ cp->tweak_upper = (u32)(first_block >> 32);
+ }
+ break;
+ /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
+ case WRITE_12:
+ case READ_12:
+ if (map->volume_blk_size == 512) {
+ cp->tweak_lower =
+ (((u32) cmd->cmnd[2]) << 24) |
+ (((u32) cmd->cmnd[3]) << 16) |
+ (((u32) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ cp->tweak_upper = 0;
+ } else {
+ first_block =
+ (((u64) cmd->cmnd[2]) << 24) |
+ (((u64) cmd->cmnd[3]) << 16) |
+ (((u64) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ first_block = (first_block * map->volume_blk_size)/512;
+ cp->tweak_lower = (u32)first_block;
+ cp->tweak_upper = (u32)(first_block >> 32);
+ }
+ break;
+ case WRITE_16:
+ case READ_16:
+ if (map->volume_blk_size == 512) {
+ cp->tweak_lower =
+ (((u32) cmd->cmnd[6]) << 24) |
+ (((u32) cmd->cmnd[7]) << 16) |
+ (((u32) cmd->cmnd[8]) << 8) |
+ cmd->cmnd[9];
+ cp->tweak_upper =
+ (((u32) cmd->cmnd[2]) << 24) |
+ (((u32) cmd->cmnd[3]) << 16) |
+ (((u32) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ } else {
+ first_block =
+ (((u64) cmd->cmnd[2]) << 56) |
+ (((u64) cmd->cmnd[3]) << 48) |
+ (((u64) cmd->cmnd[4]) << 40) |
+ (((u64) cmd->cmnd[5]) << 32) |
+ (((u64) cmd->cmnd[6]) << 24) |
+ (((u64) cmd->cmnd[7]) << 16) |
+ (((u64) cmd->cmnd[8]) << 8) |
+ cmd->cmnd[9];
+ first_block = (first_block * map->volume_blk_size)/512;
+ cp->tweak_lower = (u32)first_block;
+ cp->tweak_upper = (u32)(first_block >> 32);
+ }
+ break;
+ default:
+ dev_err(&h->pdev->dev,
+ "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
+ __func__);
+ BUG();
+ break;
+ }
+}
+
+static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+ u8 *scsi3addr)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
+ struct ioaccel2_sg_element *curr_sg;
+ int use_sg, i;
+ struct scatterlist *sg;
+ u64 addr64;
+ u32 len;
+ u32 total_len = 0;
+
+ if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
+ return IO_ACCEL_INELIGIBLE;
+
+ if (fixup_ioaccel_cdb(cdb, &cdb_len))
+ return IO_ACCEL_INELIGIBLE;
+ c->cmd_type = CMD_IOACCEL2;
+ /* Adjust the DMA address to point to the accelerated command buffer */
+ c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
+ (c->cmdindex * sizeof(*cp));
+ BUG_ON(c->busaddr & 0x0000007F);
+
+ memset(cp, 0, sizeof(*cp));
+ cp->IU_type = IOACCEL2_IU_TYPE;
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg < 0)
+ return use_sg;
+
+ if (use_sg) {
+ BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
+ curr_sg = cp->sg;
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ addr64 = (u64) sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ total_len += len;
+ curr_sg->address = cpu_to_le64(addr64);
+ curr_sg->length = cpu_to_le32(len);
+ curr_sg->reserved[0] = 0;
+ curr_sg->reserved[1] = 0;
+ curr_sg->reserved[2] = 0;
+ curr_sg->chain_indicator = 0;
+ curr_sg++;
+ }
+
+ switch (cmd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+ cp->direction |= IOACCEL2_DIR_DATA_OUT;
+ break;
+ case DMA_FROM_DEVICE:
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+ cp->direction |= IOACCEL2_DIR_DATA_IN;
+ break;
+ case DMA_NONE:
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+ cp->direction |= IOACCEL2_DIR_NO_DATA;
+ break;
+ default:
+ dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+ cmd->sc_data_direction);
+ BUG();
+ break;
+ }
+ } else {
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+ cp->direction |= IOACCEL2_DIR_NO_DATA;
+ }
+
+ /* Set encryption parameters, if necessary */
+ set_encrypt_ioaccel2(h, c, cp);
+
+ cp->scsi_nexus = ioaccel_handle;
+ cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
+ DIRECT_LOOKUP_BIT;
+ memcpy(cp->cdb, cdb, sizeof(cp->cdb));
+
+ /* fill in sg elements */
+ cp->sg_count = (u8) use_sg;
+
+ cp->data_len = cpu_to_le32(total_len);
+ cp->err_ptr = cpu_to_le64(c->busaddr +
+ offsetof(struct io_accel2_cmd, error_data));
+ cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
+
+ enqueue_cmd_and_start_io(h, c);
+ return 0;
+}
+
+/*
+ * Queue a command to the correct I/O accelerator path.
+ */
+static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+ u8 *scsi3addr)
+{
+ if (h->transMethod & CFGTBL_Trans_io_accel1)
+ return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
+ cdb, cdb_len, scsi3addr);
+ else
+ return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
+ cdb, cdb_len, scsi3addr);
+}
+
+static void raid_map_helper(struct raid_map_data *map,
+ int offload_to_mirror, u32 *map_index, u32 *current_group)
+{
+ if (offload_to_mirror == 0) {
+ /* use physical disk in the first mirrored group. */
+ *map_index %= map->data_disks_per_row;
+ return;
+ }
+ do {
+ /* determine mirror group that *map_index indicates */
+ *current_group = *map_index / map->data_disks_per_row;
+ if (offload_to_mirror == *current_group)
+ continue;
+ if (*current_group < (map->layout_map_count - 1)) {
+ /* select map index from next group */
+ *map_index += map->data_disks_per_row;
+ (*current_group)++;
+ } else {
+ /* select map index from first group */
+ *map_index %= map->data_disks_per_row;
+ *current_group = 0;
+ }
+ } while (offload_to_mirror != *current_group);
+}
+
+/*
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
+ */
+static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+ struct raid_map_data *map = &dev->raid_map;
+ struct raid_map_disk_data *dd = &map->data[0];
+ int is_write = 0;
+ u32 map_index;
+ u64 first_block, last_block;
+ u32 block_cnt;
+ u32 blocks_per_row;
+ u64 first_row, last_row;
+ u32 first_row_offset, last_row_offset;
+ u32 first_column, last_column;
+ u64 r0_first_row, r0_last_row;
+ u32 r5or6_blocks_per_row;
+ u64 r5or6_first_row, r5or6_last_row;
+ u32 r5or6_first_row_offset, r5or6_last_row_offset;
+ u32 r5or6_first_column, r5or6_last_column;
+ u32 total_disks_per_row;
+ u32 stripesize;
+ u32 first_group, last_group, current_group;
+ u32 map_row;
+ u32 disk_handle;
+ u64 disk_block;
+ u32 disk_block_cnt;
+ u8 cdb[16];
+ u8 cdb_len;
+#if BITS_PER_LONG == 32
+ u64 tmpdiv;
+#endif
+ int offload_to_mirror;
+
+ BUG_ON(!(dev->offload_config && dev->offload_enabled));
+
+ /* check for valid opcode, get LBA and block count */
+ switch (cmd->cmnd[0]) {
+ case WRITE_6:
+ is_write = 1;
+ case READ_6:
+ first_block =
+ (((u64) cmd->cmnd[2]) << 8) |
+ cmd->cmnd[3];
+ block_cnt = cmd->cmnd[4];
+ break;
+ case WRITE_10:
+ is_write = 1;
+ case READ_10:
+ first_block =
+ (((u64) cmd->cmnd[2]) << 24) |
+ (((u64) cmd->cmnd[3]) << 16) |
+ (((u64) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ block_cnt =
+ (((u32) cmd->cmnd[7]) << 8) |
+ cmd->cmnd[8];
+ break;
+ case WRITE_12:
+ is_write = 1;
+ case READ_12:
+ first_block =
+ (((u64) cmd->cmnd[2]) << 24) |
+ (((u64) cmd->cmnd[3]) << 16) |
+ (((u64) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ block_cnt =
+ (((u32) cmd->cmnd[6]) << 24) |
+ (((u32) cmd->cmnd[7]) << 16) |
+ (((u32) cmd->cmnd[8]) << 8) |
+ cmd->cmnd[9];
+ break;
+ case WRITE_16:
+ is_write = 1;
+ case READ_16:
+ first_block =
+ (((u64) cmd->cmnd[2]) << 56) |
+ (((u64) cmd->cmnd[3]) << 48) |
+ (((u64) cmd->cmnd[4]) << 40) |
+ (((u64) cmd->cmnd[5]) << 32) |
+ (((u64) cmd->cmnd[6]) << 24) |
+ (((u64) cmd->cmnd[7]) << 16) |
+ (((u64) cmd->cmnd[8]) << 8) |
+ cmd->cmnd[9];
+ block_cnt =
+ (((u32) cmd->cmnd[10]) << 24) |
+ (((u32) cmd->cmnd[11]) << 16) |
+ (((u32) cmd->cmnd[12]) << 8) |
+ cmd->cmnd[13];
+ break;
+ default:
+ return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
+ }
+ BUG_ON(block_cnt == 0);
+ last_block = first_block + block_cnt - 1;
+
+ /* check for write to non-RAID-0 */
+ if (is_write && dev->raid_level != 0)
+ return IO_ACCEL_INELIGIBLE;
+
+ /* check for invalid block or wraparound */
+ if (last_block >= map->volume_blk_cnt || last_block < first_block)
+ return IO_ACCEL_INELIGIBLE;
+
+ /* calculate stripe information for the request */
+ blocks_per_row = map->data_disks_per_row * map->strip_size;
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ (void) do_div(tmpdiv, blocks_per_row);
+ first_row = tmpdiv;
+ tmpdiv = last_block;
+ (void) do_div(tmpdiv, blocks_per_row);
+ last_row = tmpdiv;
+ first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
+ last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
+ tmpdiv = first_row_offset;
+ (void) do_div(tmpdiv, map->strip_size);
+ first_column = tmpdiv;
+ tmpdiv = last_row_offset;
+ (void) do_div(tmpdiv, map->strip_size);
+ last_column = tmpdiv;
+#else
+ first_row = first_block / blocks_per_row;
+ last_row = last_block / blocks_per_row;
+ first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
+ last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
+ first_column = first_row_offset / map->strip_size;
+ last_column = last_row_offset / map->strip_size;
+#endif
+
+ /* if this isn't a single row/column then give to the controller */
+ if ((first_row != last_row) || (first_column != last_column))
+ return IO_ACCEL_INELIGIBLE;
+
+ /* proceeding with driver mapping */
+ total_disks_per_row = map->data_disks_per_row +
+ map->metadata_disks_per_row;
+ map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
+ map->row_cnt;
+ map_index = (map_row * total_disks_per_row) + first_column;
+
+ switch (dev->raid_level) {
+ case HPSA_RAID_0:
+ break; /* nothing special to do */
+ case HPSA_RAID_1:
+ /* Handles load balance across RAID 1 members.
+ * (2-drive R1 and R10 with even # of drives.)
+ * Appropriate for SSDs, not optimal for HDDs
+ */
+ BUG_ON(map->layout_map_count != 2);
+ if (dev->offload_to_mirror)
+ map_index += map->data_disks_per_row;
+ dev->offload_to_mirror = !dev->offload_to_mirror;
+ break;
+ case HPSA_RAID_ADM:
+ /* Handles N-way mirrors (R1-ADM)
+ * and R10 with # of drives divisible by 3.)
+ */
+ BUG_ON(map->layout_map_count != 3);
+
+ offload_to_mirror = dev->offload_to_mirror;
+ raid_map_helper(map, offload_to_mirror,
+ &map_index, &current_group);
+ /* set mirror group to use next time */
+ offload_to_mirror =
+ (offload_to_mirror >= map->layout_map_count - 1)
+ ? 0 : offload_to_mirror + 1;
+ /* FIXME: remove after debug/dev */
+ BUG_ON(offload_to_mirror >= map->layout_map_count);
+ dev_warn(&h->pdev->dev,
+ "DEBUG: Using physical disk map index %d from mirror group %d\n",
+ map_index, offload_to_mirror);
+ dev->offload_to_mirror = offload_to_mirror;
+ /* Avoid direct use of dev->offload_to_mirror within this
+ * function since multiple threads might simultaneously
+ * increment it beyond the range of dev->layout_map_count -1.
+ */
+ break;
+ case HPSA_RAID_5:
+ case HPSA_RAID_6:
+ if (map->layout_map_count <= 1)
+ break;
+
+ /* Verify first and last block are in same RAID group */
+ r5or6_blocks_per_row =
+ map->strip_size * map->data_disks_per_row;
+ BUG_ON(r5or6_blocks_per_row == 0);
+ stripesize = r5or6_blocks_per_row * map->layout_map_count;
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ first_group = do_div(tmpdiv, stripesize);
+ tmpdiv = first_group;
+ (void) do_div(tmpdiv, r5or6_blocks_per_row);
+ first_group = tmpdiv;
+ tmpdiv = last_block;
+ last_group = do_div(tmpdiv, stripesize);
+ tmpdiv = last_group;
+ (void) do_div(tmpdiv, r5or6_blocks_per_row);
+ last_group = tmpdiv;
+#else
+ first_group = (first_block % stripesize) / r5or6_blocks_per_row;
+ last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+#endif
+ if (first_group != last_group)
+ return IO_ACCEL_INELIGIBLE;
+
+ /* Verify request is in a single row of RAID 5/6 */
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ (void) do_div(tmpdiv, stripesize);
+ first_row = r5or6_first_row = r0_first_row = tmpdiv;
+ tmpdiv = last_block;
+ (void) do_div(tmpdiv, stripesize);
+ r5or6_last_row = r0_last_row = tmpdiv;
+#else
+ first_row = r5or6_first_row = r0_first_row =
+ first_block / stripesize;
+ r5or6_last_row = r0_last_row = last_block / stripesize;
+#endif
+ if (r5or6_first_row != r5or6_last_row)
+ return IO_ACCEL_INELIGIBLE;
+
+
+ /* Verify request is in a single column */
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ first_row_offset = do_div(tmpdiv, stripesize);
+ tmpdiv = first_row_offset;
+ first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
+ r5or6_first_row_offset = first_row_offset;
+ tmpdiv = last_block;
+ r5or6_last_row_offset = do_div(tmpdiv, stripesize);
+ tmpdiv = r5or6_last_row_offset;
+ r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
+ tmpdiv = r5or6_first_row_offset;
+ (void) do_div(tmpdiv, map->strip_size);
+ first_column = r5or6_first_column = tmpdiv;
+ tmpdiv = r5or6_last_row_offset;
+ (void) do_div(tmpdiv, map->strip_size);
+ r5or6_last_column = tmpdiv;
+#else
+ first_row_offset = r5or6_first_row_offset =
+ (u32)((first_block % stripesize) %
+ r5or6_blocks_per_row);
+
+ r5or6_last_row_offset =
+ (u32)((last_block % stripesize) %
+ r5or6_blocks_per_row);
+
+ first_column = r5or6_first_column =
+ r5or6_first_row_offset / map->strip_size;
+ r5or6_last_column =
+ r5or6_last_row_offset / map->strip_size;
+#endif
+ if (r5or6_first_column != r5or6_last_column)
+ return IO_ACCEL_INELIGIBLE;
+
+ /* Request is eligible */
+ map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
+ map->row_cnt;
+
+ map_index = (first_group *
+ (map->row_cnt * total_disks_per_row)) +
+ (map_row * total_disks_per_row) + first_column;
+ break;
+ default:
+ return IO_ACCEL_INELIGIBLE;
+ }
+
+ disk_handle = dd[map_index].ioaccel_handle;
+ disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
+ (first_row_offset - (first_column * map->strip_size));
+ disk_block_cnt = block_cnt;
+
+ /* handle differing logical/physical block sizes */
+ if (map->phys_blk_shift) {
+ disk_block <<= map->phys_blk_shift;
+ disk_block_cnt <<= map->phys_blk_shift;
+ }
+ BUG_ON(disk_block_cnt > 0xffff);
+
+ /* build the new CDB for the physical disk I/O */
+ if (disk_block > 0xffffffff) {
+ cdb[0] = is_write ? WRITE_16 : READ_16;
+ cdb[1] = 0;
+ cdb[2] = (u8) (disk_block >> 56);
+ cdb[3] = (u8) (disk_block >> 48);
+ cdb[4] = (u8) (disk_block >> 40);
+ cdb[5] = (u8) (disk_block >> 32);
+ cdb[6] = (u8) (disk_block >> 24);
+ cdb[7] = (u8) (disk_block >> 16);
+ cdb[8] = (u8) (disk_block >> 8);
+ cdb[9] = (u8) (disk_block);
+ cdb[10] = (u8) (disk_block_cnt >> 24);
+ cdb[11] = (u8) (disk_block_cnt >> 16);
+ cdb[12] = (u8) (disk_block_cnt >> 8);
+ cdb[13] = (u8) (disk_block_cnt);
+ cdb[14] = 0;
+ cdb[15] = 0;
+ cdb_len = 16;
+ } else {
+ cdb[0] = is_write ? WRITE_10 : READ_10;
+ cdb[1] = 0;
+ cdb[2] = (u8) (disk_block >> 24);
+ cdb[3] = (u8) (disk_block >> 16);
+ cdb[4] = (u8) (disk_block >> 8);
+ cdb[5] = (u8) (disk_block);
+ cdb[6] = 0;
+ cdb[7] = (u8) (disk_block_cnt >> 8);
+ cdb[8] = (u8) (disk_block_cnt);
+ cdb[9] = 0;
+ cdb_len = 10;
+ }
+ return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
+ dev->scsi3addr);
+}
static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
@@ -2169,6 +3951,7 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
unsigned char scsi3addr[8];
struct CommandList *c;
unsigned long flags;
+ int rc = 0;
/* Get the ptr to our adapter structure out of cmd->host. */
h = sdev_to_hba(cmd->device);
@@ -2203,6 +3986,32 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
+
+ /* Call alternate submit routine for I/O accelerated commands.
+ * Retries always go down the normal I/O path.
+ */
+ if (likely(cmd->retries == 0 &&
+ cmd->request->cmd_type == REQ_TYPE_FS &&
+ h->acciopath_status)) {
+ if (dev->offload_enabled) {
+ rc = hpsa_scsi_ioaccel_raid_map(h, c);
+ if (rc == 0)
+ return 0; /* Sent on ioaccel path */
+ if (rc < 0) { /* scsi_dma_map failed. */
+ cmd_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ } else if (dev->ioaccel_handle) {
+ rc = hpsa_scsi_ioaccel_direct_map(h, c);
+ if (rc == 0)
+ return 0; /* Sent on direct map path */
+ if (rc < 0) { /* scsi_dma_map failed. */
+ cmd_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ }
+ }
+
c->Header.ReplyQueue = 0; /* unused in simple mode */
memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
@@ -2262,11 +4071,38 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
+static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
+{
+ unsigned long flags;
+
+ /*
+ * Don't let rescans be initiated on a controller known
+ * to be locked up. If the controller locks up *during*
+ * a rescan, that thread is probably hosed, but at least
+ * we can prevent new rescan threads from piling up on a
+ * locked up controller.
+ */
+ spin_lock_irqsave(&h->lock, flags);
+ if (unlikely(h->lockup_detected)) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ spin_lock_irqsave(&h->scan_lock, flags);
+ h->scan_finished = 1;
+ wake_up_all(&h->scan_wait_queue);
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+ return 1;
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
+ return 0;
+}
+
static void hpsa_scan_start(struct Scsi_Host *sh)
{
struct ctlr_info *h = shost_to_hba(sh);
unsigned long flags;
+ if (do_not_scan_if_controller_locked_up(h))
+ return;
+
/* wait until any scan already in progress is finished. */
while (1) {
spin_lock_irqsave(&h->scan_lock, flags);
@@ -2283,6 +4119,9 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
h->scan_finished = 0; /* mark scan as in progress */
spin_unlock_irqrestore(&h->scan_lock, flags);
+ if (do_not_scan_if_controller_locked_up(h))
+ return;
+
hpsa_update_scsi_devices(h, h->scsi_host->host_no);
spin_lock_irqsave(&h->scan_lock, flags);
@@ -2346,7 +4185,10 @@ static int hpsa_register_scsi(struct ctlr_info *h)
sh->max_lun = HPSA_MAX_LUN;
sh->max_id = HPSA_MAX_LUN;
sh->can_queue = h->nr_cmds;
- sh->cmd_per_lun = h->nr_cmds;
+ if (h->hba_mode_enabled)
+ sh->cmd_per_lun = 7;
+ else
+ sh->cmd_per_lun = h->nr_cmds;
sh->sg_tablesize = h->maxsgentries;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
@@ -2372,7 +4214,7 @@ static int hpsa_register_scsi(struct ctlr_info *h)
static int wait_for_device_to_become_ready(struct ctlr_info *h,
unsigned char lunaddr[])
{
- int rc = 0;
+ int rc;
int count = 0;
int waittime = 1; /* seconds */
struct CommandList *c;
@@ -2392,6 +4234,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
*/
msleep(1000 * waittime);
count++;
+ rc = 0; /* Device ready. */
/* Increase wait time with each try, up to a point. */
if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
@@ -2448,7 +4291,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
/* send a reset to the SCSI LUN which the command was sent to */
- rc = hpsa_send_reset(h, dev->scsi3addr);
+ rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
return SUCCESS;
@@ -2471,12 +4314,36 @@ static void swizzle_abort_tag(u8 *tag)
tag[7] = original_tag[4];
}
+static void hpsa_get_tag(struct ctlr_info *h,
+ struct CommandList *c, u32 *taglower, u32 *tagupper)
+{
+ if (c->cmd_type == CMD_IOACCEL1) {
+ struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
+ &h->ioaccel_cmd_pool[c->cmdindex];
+ *tagupper = cm1->Tag.upper;
+ *taglower = cm1->Tag.lower;
+ return;
+ }
+ if (c->cmd_type == CMD_IOACCEL2) {
+ struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
+ &h->ioaccel2_cmd_pool[c->cmdindex];
+ /* upper tag not used in ioaccel2 mode */
+ memset(tagupper, 0, sizeof(*tagupper));
+ *taglower = cm2->Tag;
+ return;
+ }
+ *tagupper = c->Header.Tag.upper;
+ *taglower = c->Header.Tag.lower;
+}
+
+
static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
struct CommandList *abort, int swizzle)
{
int rc = IO_OK;
struct CommandList *c;
struct ErrorInfo *ei;
+ u32 tagupper, taglower;
c = cmd_special_alloc(h);
if (c == NULL) { /* trouble... */
@@ -2490,8 +4357,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
if (swizzle)
swizzle_abort_tag(&c->Request.CDB[4]);
hpsa_scsi_do_simple_cmd_core(h, c);
+ hpsa_get_tag(h, abort, &taglower, &tagupper);
dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
- __func__, abort->Header.Tag.upper, abort->Header.Tag.lower);
+ __func__, tagupper, taglower);
/* no unmap needed here because no data xfer. */
ei = c->err_info;
@@ -2503,15 +4371,14 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
break;
default:
dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
- __func__, abort->Header.Tag.upper,
- abort->Header.Tag.lower);
- hpsa_scsi_interpret_error(c);
+ __func__, tagupper, taglower);
+ hpsa_scsi_interpret_error(h, c);
rc = -1;
break;
}
cmd_special_free(h, c);
- dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
- abort->Header.Tag.upper, abort->Header.Tag.lower);
+ dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
+ __func__, tagupper, taglower);
return rc;
}
@@ -2565,6 +4432,83 @@ static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
return NULL;
}
+/* ioaccel2 path firmware cannot handle abort task requests.
+ * Change abort requests to physical target reset, and send to the
+ * address of the physical disk used for the ioaccel 2 command.
+ * Return 0 on success (IO_OK)
+ * -1 on failure
+ */
+
+static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
+ unsigned char *scsi3addr, struct CommandList *abort)
+{
+ int rc = IO_OK;
+ struct scsi_cmnd *scmd; /* scsi command within request being aborted */
+ struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
+ unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
+ unsigned char *psa = &phys_scsi3addr[0];
+
+ /* Get a pointer to the hpsa logical device. */
+ scmd = (struct scsi_cmnd *) abort->scsi_cmd;
+ dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
+ if (dev == NULL) {
+ dev_warn(&h->pdev->dev,
+ "Cannot abort: no device pointer for command.\n");
+ return -1; /* not abortable */
+ }
+
+ if (h->raid_offload_debug > 0)
+ dev_info(&h->pdev->dev,
+ "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
+ scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
+ scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
+
+ if (!dev->offload_enabled) {
+ dev_warn(&h->pdev->dev,
+ "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
+ return -1; /* not abortable */
+ }
+
+ /* Incoming scsi3addr is logical addr. We need physical disk addr. */
+ if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
+ dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
+ return -1; /* not abortable */
+ }
+
+ /* send the reset */
+ if (h->raid_offload_debug > 0)
+ dev_info(&h->pdev->dev,
+ "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ psa[0], psa[1], psa[2], psa[3],
+ psa[4], psa[5], psa[6], psa[7]);
+ rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
+ if (rc != 0) {
+ dev_warn(&h->pdev->dev,
+ "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ psa[0], psa[1], psa[2], psa[3],
+ psa[4], psa[5], psa[6], psa[7]);
+ return rc; /* failed to reset */
+ }
+
+ /* wait for device to recover */
+ if (wait_for_device_to_become_ready(h, psa) != 0) {
+ dev_warn(&h->pdev->dev,
+ "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ psa[0], psa[1], psa[2], psa[3],
+ psa[4], psa[5], psa[6], psa[7]);
+ return -1; /* failed to recover */
+ }
+
+ /* device recovered */
+ dev_info(&h->pdev->dev,
+ "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ psa[0], psa[1], psa[2], psa[3],
+ psa[4], psa[5], psa[6], psa[7]);
+
+ return rc; /* success */
+}
+
/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
* tell which kind we're dealing with, so we send the abort both ways. There
* shouldn't be any collisions between swizzled and unswizzled tags due to the
@@ -2578,6 +4522,14 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h,
struct CommandList *c;
int rc = 0, rc2 = 0;
+ /* ioccelerator mode 2 commands should be aborted via the
+ * accelerated path, since RAID path is unaware of these commands,
+ * but underlying firmware can't handle abort TMF.
+ * Change abort to physical device reset.
+ */
+ if (abort->cmd_type == CMD_IOACCEL2)
+ return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
+
/* we do not expect to find the swizzled tag in our queue, but
* check anyway just to be sure the assumptions which make this
* the case haven't become wrong.
@@ -2616,6 +4568,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
char msg[256]; /* For debug messaging. */
int ml = 0;
+ u32 tagupper, taglower;
/* Find the controller of the command to be aborted */
h = sdev_to_hba(sc->device);
@@ -2648,9 +4601,8 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
msg);
return FAILED;
}
-
- ml += sprintf(msg+ml, "Tag:0x%08x:%08x ",
- abort->Header.Tag.upper, abort->Header.Tag.lower);
+ hpsa_get_tag(h, abort, &taglower, &tagupper);
+ ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
as = (struct scsi_cmnd *) abort->scsi_cmd;
if (as != NULL)
ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
@@ -2776,6 +4728,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
return NULL;
memset(c, 0, sizeof(*c));
+ c->cmd_type = CMD_SCSI;
c->cmdindex = -1;
c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
@@ -3038,7 +4991,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[0].Addr.lower = temp64.val32.lower;
c->SG[0].Addr.upper = temp64.val32.upper;
c->SG[0].Len = iocommand.buf_size;
- c->SG[0].Ext = 0; /* we are not chaining*/
+ c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
}
hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
if (iocommand.buf_size > 0)
@@ -3168,8 +5121,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[i].Addr.lower = temp64.val32.lower;
c->SG[i].Addr.upper = temp64.val32.upper;
c->SG[i].Len = buff_size[i];
- /* we are not chaining */
- c->SG[i].Ext = 0;
+ c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
}
}
hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
@@ -3304,7 +5256,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
}
static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
- void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
+ void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
int cmd_type)
{
int pci_dir = XFER_NONE;
@@ -3327,9 +5279,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
switch (cmd) {
case HPSA_INQUIRY:
/* are we trying to read a vital product page */
- if (page_code != 0) {
+ if (page_code & VPD_PAGE) {
c->Request.CDB[1] = 0x01;
- c->Request.CDB[2] = page_code;
+ c->Request.CDB[2] = (page_code & 0xff);
}
c->Request.CDBLen = 6;
c->Request.Type.Attribute = ATTR_SIMPLE;
@@ -3369,6 +5321,28 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.Type.Direction = XFER_NONE;
c->Request.Timeout = 0;
break;
+ case HPSA_GET_RAID_MAP:
+ c->Request.CDBLen = 12;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = HPSA_CISS_READ;
+ c->Request.CDB[1] = cmd;
+ c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
+ c->Request.CDB[9] = size & 0xFF;
+ break;
+ case BMIC_SENSE_CONTROLLER_PARAMETERS:
+ c->Request.CDBLen = 10;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = BMIC_READ;
+ c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
+ break;
default:
dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
BUG();
@@ -3562,7 +5536,8 @@ static inline void finish_cmd(struct CommandList *c)
spin_unlock_irqrestore(&h->lock, flags);
dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
- if (likely(c->cmd_type == CMD_SCSI))
+ if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
+ || c->cmd_type == CMD_IOACCEL2))
complete_scsi_command(c);
else if (c->cmd_type == CMD_IOCTL_PEND)
complete(c->waiting);
@@ -4169,21 +6144,24 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
goto default_int_mode;
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
dev_info(&h->pdev->dev, "MSIX\n");
+ h->msix_vector = MAX_REPLY_QUEUES;
err = pci_enable_msix(h->pdev, hpsa_msix_entries,
- MAX_REPLY_QUEUES);
- if (!err) {
- for (i = 0; i < MAX_REPLY_QUEUES; i++)
- h->intr[i] = hpsa_msix_entries[i].vector;
- h->msix_vector = 1;
- return;
- }
+ h->msix_vector);
if (err > 0) {
dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
"available\n", err);
- goto default_int_mode;
+ h->msix_vector = err;
+ err = pci_enable_msix(h->pdev, hpsa_msix_entries,
+ h->msix_vector);
+ }
+ if (!err) {
+ for (i = 0; i < h->msix_vector; i++)
+ h->intr[i] = hpsa_msix_entries[i].vector;
+ return;
} else {
dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
err);
+ h->msix_vector = 0;
goto default_int_mode;
}
}
@@ -4336,6 +6314,7 @@ static void hpsa_find_board_params(struct ctlr_info *h)
hpsa_get_max_perf_mode_cmds(h);
h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
+ h->fw_support = readl(&(h->cfgtable->misc_fw_support));
/*
* Limit in-command s/g elements to 32 save dma'able memory.
* Howvever spec says if 0, use 31
@@ -4352,6 +6331,10 @@ static void hpsa_find_board_params(struct ctlr_info *h)
/* Find out what task management functions are supported and cache */
h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
+ if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
+ dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
+ if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
+ dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
}
static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
@@ -4390,6 +6373,23 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
}
+static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
+{
+ int i;
+ u32 doorbell_value;
+ unsigned long flags;
+ /* wait until the clear_event_notify bit 6 is cleared by controller. */
+ for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+ spin_lock_irqsave(&h->lock, flags);
+ doorbell_value = readl(h->vaddr + SA5_DOORBELL);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
+ break;
+ /* delay and try again */
+ msleep(20);
+ }
+}
+
static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
{
int i;
@@ -4420,18 +6420,20 @@ static int hpsa_enter_simple_mode(struct ctlr_info *h)
return -ENOTSUPP;
h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+
/* Update the field, and then ring the doorbell */
writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+ writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
print_cfg_table(&h->pdev->dev, h->cfgtable);
- if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
- dev_warn(&h->pdev->dev,
- "unable to get board into simple mode\n");
- return -ENODEV;
- }
+ if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
+ goto error;
h->transMethod = CFGTBL_Trans_Simple;
return 0;
+error:
+ dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
+ return -ENODEV;
}
static int hpsa_pci_init(struct ctlr_info *h)
@@ -4577,11 +6579,19 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct CommandList),
h->cmd_pool, h->cmd_pool_dhandle);
+ if (h->ioaccel2_cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
if (h->errinfo_pool)
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
+ if (h->ioaccel_cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct io_accel1_cmd),
+ h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
}
static int hpsa_request_irq(struct ctlr_info *h,
@@ -4597,15 +6607,15 @@ static int hpsa_request_irq(struct ctlr_info *h,
for (i = 0; i < MAX_REPLY_QUEUES; i++)
h->q[i] = (u8) i;
- if (h->intr_mode == PERF_MODE_INT && h->msix_vector) {
+ if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
/* If performant mode and MSI-X, use multiple reply queues */
- for (i = 0; i < MAX_REPLY_QUEUES; i++)
+ for (i = 0; i < h->msix_vector; i++)
rc = request_irq(h->intr[i], msixhandler,
0, h->devname,
&h->q[i]);
} else {
/* Use single reply pool */
- if (h->msix_vector || h->msi_vector) {
+ if (h->msix_vector > 0 || h->msi_vector) {
rc = request_irq(h->intr[h->intr_mode],
msixhandler, 0, h->devname,
&h->q[h->intr_mode]);
@@ -4658,7 +6668,7 @@ static void free_irqs(struct ctlr_info *h)
return;
}
- for (i = 0; i < MAX_REPLY_QUEUES; i++)
+ for (i = 0; i < h->msix_vector; i++)
free_irq(h->intr[i], &h->q[i]);
}
@@ -4681,6 +6691,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
hpsa_free_irqs_and_disable_msix(h);
hpsa_free_sg_chain_blocks(h);
hpsa_free_cmd_pool(h);
+ kfree(h->ioaccel1_blockFetchTable);
kfree(h->blockFetchTable);
pci_free_consistent(h->pdev, h->reply_pool_size,
h->reply_pool, h->reply_pool_dhandle);
@@ -4760,6 +6771,92 @@ static void detect_controller_lockup(struct ctlr_info *h)
h->last_heartbeat_timestamp = now;
}
+static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+{
+ int i;
+ char *event_type;
+
+ /* Clear the driver-requested rescan flag */
+ h->drv_req_rescan = 0;
+
+ /* Ask the controller to clear the events we're handling. */
+ if ((h->transMethod & (CFGTBL_Trans_io_accel1
+ | CFGTBL_Trans_io_accel2)) &&
+ (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
+ h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
+
+ if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
+ event_type = "state change";
+ if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
+ event_type = "configuration change";
+ /* Stop sending new RAID offload reqs via the IO accelerator */
+ scsi_block_requests(h->scsi_host);
+ for (i = 0; i < h->ndevices; i++)
+ h->dev[i]->offload_enabled = 0;
+ hpsa_drain_accel_commands(h);
+ /* Set 'accelerator path config change' bit */
+ dev_warn(&h->pdev->dev,
+ "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
+ h->events, event_type);
+ writel(h->events, &(h->cfgtable->clear_event_notify));
+ /* Set the "clear event notify field update" bit 6 */
+ writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
+ /* Wait until ctlr clears 'clear event notify field', bit 6 */
+ hpsa_wait_for_clear_event_notify_ack(h);
+ scsi_unblock_requests(h->scsi_host);
+ } else {
+ /* Acknowledge controller notification events. */
+ writel(h->events, &(h->cfgtable->clear_event_notify));
+ writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
+ hpsa_wait_for_clear_event_notify_ack(h);
+#if 0
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ hpsa_wait_for_mode_change_ack(h);
+#endif
+ }
+ return;
+}
+
+/* Check a register on the controller to see if there are configuration
+ * changes (added/changed/removed logical drives, etc.) which mean that
+ * we should rescan the controller for devices.
+ * Also check flag for driver-initiated rescan.
+ */
+static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
+{
+ if (h->drv_req_rescan)
+ return 1;
+
+ if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
+ return 0;
+
+ h->events = readl(&(h->cfgtable->event_notify));
+ return h->events & RESCAN_REQUIRED_EVENT_BITS;
+}
+
+/*
+ * Check if any of the offline devices have become ready
+ */
+static int hpsa_offline_devices_ready(struct ctlr_info *h)
+{
+ unsigned long flags;
+ struct offline_device_entry *d;
+ struct list_head *this, *tmp;
+
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ list_for_each_safe(this, tmp, &h->offline_device_list) {
+ d = list_entry(this, struct offline_device_entry,
+ offline_list);
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+ if (!hpsa_volume_offline(h, d->scsi3addr))
+ return 1;
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ }
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+ return 0;
+}
+
+
static void hpsa_monitor_ctlr_worker(struct work_struct *work)
{
unsigned long flags;
@@ -4768,6 +6865,15 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work)
detect_controller_lockup(h);
if (h->lockup_detected)
return;
+
+ if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
+ scsi_host_get(h->scsi_host);
+ h->drv_req_rescan = 0;
+ hpsa_ack_ctlr_events(h);
+ hpsa_scan_start(h->scsi_host);
+ scsi_host_put(h->scsi_host);
+ }
+
spin_lock_irqsave(&h->lock, flags);
if (h->remove_in_progress) {
spin_unlock_irqrestore(&h->lock, flags);
@@ -4807,7 +6913,7 @@ reinit_after_soft_reset:
* the 5 lower bits of the address are used by the hardware. and by
* the driver. See comments in hpsa.h for more info.
*/
-#define COMMANDLIST_ALIGNMENT 32
+#define COMMANDLIST_ALIGNMENT 128
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
@@ -4817,7 +6923,9 @@ reinit_after_soft_reset:
h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
INIT_LIST_HEAD(&h->cmpQ);
INIT_LIST_HEAD(&h->reqQ);
+ INIT_LIST_HEAD(&h->offline_device_list);
spin_lock_init(&h->lock);
+ spin_lock_init(&h->offline_device_lock);
spin_lock_init(&h->scan_lock);
spin_lock_init(&h->passthru_count_lock);
rc = hpsa_pci_init(h);
@@ -4859,6 +6967,7 @@ reinit_after_soft_reset:
pci_set_drvdata(pdev, h);
h->ndevices = 0;
+ h->hba_mode_enabled = 0;
h->scsi_host = NULL;
spin_lock_init(&h->devlock);
hpsa_put_ctlr_into_performant_mode(h);
@@ -4918,6 +7027,11 @@ reinit_after_soft_reset:
goto reinit_after_soft_reset;
}
+ /* Enable Accelerated IO path at driver layer */
+ h->acciopath_status = 1;
+
+ h->drv_req_rescan = 0;
+
/* Turn the interrupts on so we can service requests */
h->access.set_intr_mask(h, HPSA_INTR_ON);
@@ -5034,6 +7148,8 @@ static void hpsa_remove_one(struct pci_dev *pdev)
h->reply_pool, h->reply_pool_dhandle);
kfree(h->cmd_pool_bits);
kfree(h->blockFetchTable);
+ kfree(h->ioaccel1_blockFetchTable);
+ kfree(h->ioaccel2_blockFetchTable);
kfree(h->hba_inquiry_data);
pci_disable_device(pdev);
pci_release_regions(pdev);
@@ -5074,20 +7190,17 @@ static struct pci_driver hpsa_pci_driver = {
* bits of the command address.
*/
static void calc_bucket_map(int bucket[], int num_buckets,
- int nsgs, int *bucket_map)
+ int nsgs, int min_blocks, int *bucket_map)
{
int i, j, b, size;
- /* even a command with 0 SGs requires 4 blocks */
-#define MINIMUM_TRANSFER_BLOCKS 4
-#define NUM_BUCKETS 8
/* Note, bucket_map must have nsgs+1 entries. */
for (i = 0; i <= nsgs; i++) {
/* Compute size of a command with i SG entries */
- size = i + MINIMUM_TRANSFER_BLOCKS;
+ size = i + min_blocks;
b = num_buckets; /* Assume the biggest bucket */
/* Find the bucket that is just big enough */
- for (j = 0; j < 8; j++) {
+ for (j = 0; j < num_buckets; j++) {
if (bucket[j] >= size) {
b = j;
break;
@@ -5098,10 +7211,16 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
-static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
+static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
{
int i;
unsigned long register_value;
+ unsigned long transMethod = CFGTBL_Trans_Performant |
+ (trans_support & CFGTBL_Trans_use_short_tags) |
+ CFGTBL_Trans_enable_directed_msix |
+ (trans_support & (CFGTBL_Trans_io_accel1 |
+ CFGTBL_Trans_io_accel2));
+ struct access_method access = SA5_performant_access;
/* This is a bit complicated. There are 8 registers on
* the controller which we write to to tell it 8 different
@@ -5121,6 +7240,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
* sizes for small commands, and fewer sizes for larger commands.
*/
int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
+#define MIN_IOACCEL2_BFT_ENTRY 5
+#define HPSA_IOACCEL2_HEADER_SZ 4
+ int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19,
+ HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
+ BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
+ BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
+ 16 * MIN_IOACCEL2_BFT_ENTRY);
+ BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
/* 5 = 1 s/g entry or 4k
* 6 = 2 s/g entry or 8k
@@ -5133,7 +7262,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
bft[7] = SG_ENTRIES_IN_CMD + 4;
calc_bucket_map(bft, ARRAY_SIZE(bft),
- SG_ENTRIES_IN_CMD, h->blockFetchTable);
+ SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
for (i = 0; i < 8; i++)
writel(bft[i], &h->transtable->BlockFetch[i]);
@@ -5150,9 +7279,22 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
&h->transtable->RepQAddr[i].lower);
}
- writel(CFGTBL_Trans_Performant | use_short_tags |
- CFGTBL_Trans_enable_directed_msix,
- &(h->cfgtable->HostWrite.TransportRequest));
+ writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
+ writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
+ /*
+ * enable outbound interrupt coalescing in accelerator mode;
+ */
+ if (trans_support & CFGTBL_Trans_io_accel1) {
+ access = SA5_ioaccel_mode1_access;
+ writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
+ writel(4, &h->cfgtable->HostWrite.CoalIntCount);
+ } else {
+ if (trans_support & CFGTBL_Trans_io_accel2) {
+ access = SA5_ioaccel_mode2_access;
+ writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
+ writel(4, &h->cfgtable->HostWrite.CoalIntCount);
+ }
+ }
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
register_value = readl(&(h->cfgtable->TransportActive));
@@ -5162,23 +7304,186 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
return;
}
/* Change the access methods to the performant access methods */
- h->access = SA5_performant_access;
- h->transMethod = CFGTBL_Trans_Performant;
+ h->access = access;
+ h->transMethod = transMethod;
+
+ if (!((trans_support & CFGTBL_Trans_io_accel1) ||
+ (trans_support & CFGTBL_Trans_io_accel2)))
+ return;
+
+ if (trans_support & CFGTBL_Trans_io_accel1) {
+ /* Set up I/O accelerator mode */
+ for (i = 0; i < h->nreply_queues; i++) {
+ writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
+ h->reply_queue[i].current_entry =
+ readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
+ }
+ bft[7] = h->ioaccel_maxsg + 8;
+ calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
+ h->ioaccel1_blockFetchTable);
+
+ /* initialize all reply queue entries to unused */
+ memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
+ h->reply_pool_size);
+
+ /* set all the constant fields in the accelerator command
+ * frames once at init time to save CPU cycles later.
+ */
+ for (i = 0; i < h->nr_cmds; i++) {
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
+
+ cp->function = IOACCEL1_FUNCTION_SCSIIO;
+ cp->err_info = (u32) (h->errinfo_pool_dhandle +
+ (i * sizeof(struct ErrorInfo)));
+ cp->err_info_len = sizeof(struct ErrorInfo);
+ cp->sgl_offset = IOACCEL1_SGLOFFSET;
+ cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
+ cp->timeout_sec = 0;
+ cp->ReplyQueue = 0;
+ cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
+ DIRECT_LOOKUP_BIT;
+ cp->Tag.upper = 0;
+ cp->host_addr.lower =
+ (u32) (h->ioaccel_cmd_pool_dhandle +
+ (i * sizeof(struct io_accel1_cmd)));
+ cp->host_addr.upper = 0;
+ }
+ } else if (trans_support & CFGTBL_Trans_io_accel2) {
+ u64 cfg_offset, cfg_base_addr_index;
+ u32 bft2_offset, cfg_base_addr;
+ int rc;
+
+ rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
+ bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
+ calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
+ 4, h->ioaccel2_blockFetchTable);
+ bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
+ BUILD_BUG_ON(offsetof(struct CfgTable,
+ io_accel_request_size_offset) != 0xb8);
+ h->ioaccel2_bft2_regs =
+ remap_pci_mem(pci_resource_start(h->pdev,
+ cfg_base_addr_index) +
+ cfg_offset + bft2_offset,
+ ARRAY_SIZE(bft2) *
+ sizeof(*h->ioaccel2_bft2_regs));
+ for (i = 0; i < ARRAY_SIZE(bft2); i++)
+ writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
+ }
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ hpsa_wait_for_mode_change_ack(h);
+}
+
+static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
+{
+ h->ioaccel_maxsg =
+ readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
+ if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
+ h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
+
+ /* Command structures must be aligned on a 128-byte boundary
+ * because the 7 lower bits of the address are used by the
+ * hardware.
+ */
+#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
+ BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
+ IOACCEL1_COMMANDLIST_ALIGNMENT);
+ h->ioaccel_cmd_pool =
+ pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+ &(h->ioaccel_cmd_pool_dhandle));
+
+ h->ioaccel1_blockFetchTable =
+ kmalloc(((h->ioaccel_maxsg + 1) *
+ sizeof(u32)), GFP_KERNEL);
+
+ if ((h->ioaccel_cmd_pool == NULL) ||
+ (h->ioaccel1_blockFetchTable == NULL))
+ goto clean_up;
+
+ memset(h->ioaccel_cmd_pool, 0,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
+ return 0;
+
+clean_up:
+ if (h->ioaccel_cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+ h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
+ kfree(h->ioaccel1_blockFetchTable);
+ return 1;
+}
+
+static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
+{
+ /* Allocate ioaccel2 mode command blocks and block fetch table */
+
+ h->ioaccel_maxsg =
+ readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
+ if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
+ h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
+
+#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
+ BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
+ IOACCEL2_COMMANDLIST_ALIGNMENT);
+ h->ioaccel2_cmd_pool =
+ pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ &(h->ioaccel2_cmd_pool_dhandle));
+
+ h->ioaccel2_blockFetchTable =
+ kmalloc(((h->ioaccel_maxsg + 1) *
+ sizeof(u32)), GFP_KERNEL);
+
+ if ((h->ioaccel2_cmd_pool == NULL) ||
+ (h->ioaccel2_blockFetchTable == NULL))
+ goto clean_up;
+
+ memset(h->ioaccel2_cmd_pool, 0,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
+ return 0;
+
+clean_up:
+ if (h->ioaccel2_cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
+ kfree(h->ioaccel2_blockFetchTable);
+ return 1;
}
static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
+ unsigned long transMethod = CFGTBL_Trans_Performant |
+ CFGTBL_Trans_use_short_tags;
int i;
if (hpsa_simple_mode)
return;
+ /* Check for I/O accelerator mode support */
+ if (trans_support & CFGTBL_Trans_io_accel1) {
+ transMethod |= CFGTBL_Trans_io_accel1 |
+ CFGTBL_Trans_enable_directed_msix;
+ if (hpsa_alloc_ioaccel_cmd_and_bft(h))
+ goto clean_up;
+ } else {
+ if (trans_support & CFGTBL_Trans_io_accel2) {
+ transMethod |= CFGTBL_Trans_io_accel2 |
+ CFGTBL_Trans_enable_directed_msix;
+ if (ioaccel2_alloc_cmds_and_bft(h))
+ goto clean_up;
+ }
+ }
+
+ /* TODO, check that this next line h->nreply_queues is correct */
trans_support = readl(&(h->cfgtable->TransportSupport));
if (!(trans_support & PERFORMANT_MODE))
return;
- h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1;
+ h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
hpsa_get_max_perf_mode_cmds(h);
/* Performant mode ring buffer and supporting data structures */
h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
@@ -5200,9 +7505,7 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|| (h->blockFetchTable == NULL))
goto clean_up;
- hpsa_enter_performant_mode(h,
- trans_support & CFGTBL_Trans_use_short_tags);
-
+ hpsa_enter_performant_mode(h, trans_support);
return;
clean_up:
@@ -5212,6 +7515,31 @@ clean_up:
kfree(h->blockFetchTable);
}
+static int is_accelerated_cmd(struct CommandList *c)
+{
+ return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
+}
+
+static void hpsa_drain_accel_commands(struct ctlr_info *h)
+{
+ struct CommandList *c = NULL;
+ unsigned long flags;
+ int accel_cmds_out;
+
+ do { /* wait for all outstanding commands to drain out */
+ accel_cmds_out = 0;
+ spin_lock_irqsave(&h->lock, flags);
+ list_for_each_entry(c, &h->cmpQ, list)
+ accel_cmds_out += is_accelerated_cmd(c);
+ list_for_each_entry(c, &h->reqQ, list)
+ accel_cmds_out += is_accelerated_cmd(c);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (accel_cmds_out <= 0)
+ break;
+ msleep(100);
+ } while (1);
+}
+
/*
* This is it. Register the PCI driver information for the cards we control
* the OS will call our registered routines when it finds one of our cards.
@@ -5226,5 +7554,83 @@ static void __exit hpsa_cleanup(void)
pci_unregister_driver(&hpsa_pci_driver);
}
+static void __attribute__((unused)) verify_offsets(void)
+{
+#define VERIFY_OFFSET(member, offset) \
+ BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
+
+ VERIFY_OFFSET(structure_size, 0);
+ VERIFY_OFFSET(volume_blk_size, 4);
+ VERIFY_OFFSET(volume_blk_cnt, 8);
+ VERIFY_OFFSET(phys_blk_shift, 16);
+ VERIFY_OFFSET(parity_rotation_shift, 17);
+ VERIFY_OFFSET(strip_size, 18);
+ VERIFY_OFFSET(disk_starting_blk, 20);
+ VERIFY_OFFSET(disk_blk_cnt, 28);
+ VERIFY_OFFSET(data_disks_per_row, 36);
+ VERIFY_OFFSET(metadata_disks_per_row, 38);
+ VERIFY_OFFSET(row_cnt, 40);
+ VERIFY_OFFSET(layout_map_count, 42);
+ VERIFY_OFFSET(flags, 44);
+ VERIFY_OFFSET(dekindex, 46);
+ /* VERIFY_OFFSET(reserved, 48 */
+ VERIFY_OFFSET(data, 64);
+
+#undef VERIFY_OFFSET
+
+#define VERIFY_OFFSET(member, offset) \
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
+
+ VERIFY_OFFSET(IU_type, 0);
+ VERIFY_OFFSET(direction, 1);
+ VERIFY_OFFSET(reply_queue, 2);
+ /* VERIFY_OFFSET(reserved1, 3); */
+ VERIFY_OFFSET(scsi_nexus, 4);
+ VERIFY_OFFSET(Tag, 8);
+ VERIFY_OFFSET(cdb, 16);
+ VERIFY_OFFSET(cciss_lun, 32);
+ VERIFY_OFFSET(data_len, 40);
+ VERIFY_OFFSET(cmd_priority_task_attr, 44);
+ VERIFY_OFFSET(sg_count, 45);
+ /* VERIFY_OFFSET(reserved3 */
+ VERIFY_OFFSET(err_ptr, 48);
+ VERIFY_OFFSET(err_len, 56);
+ /* VERIFY_OFFSET(reserved4 */
+ VERIFY_OFFSET(sg, 64);
+
+#undef VERIFY_OFFSET
+
+#define VERIFY_OFFSET(member, offset) \
+ BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
+
+ VERIFY_OFFSET(dev_handle, 0x00);
+ VERIFY_OFFSET(reserved1, 0x02);
+ VERIFY_OFFSET(function, 0x03);
+ VERIFY_OFFSET(reserved2, 0x04);
+ VERIFY_OFFSET(err_info, 0x0C);
+ VERIFY_OFFSET(reserved3, 0x10);
+ VERIFY_OFFSET(err_info_len, 0x12);
+ VERIFY_OFFSET(reserved4, 0x13);
+ VERIFY_OFFSET(sgl_offset, 0x14);
+ VERIFY_OFFSET(reserved5, 0x15);
+ VERIFY_OFFSET(transfer_len, 0x1C);
+ VERIFY_OFFSET(reserved6, 0x20);
+ VERIFY_OFFSET(io_flags, 0x24);
+ VERIFY_OFFSET(reserved7, 0x26);
+ VERIFY_OFFSET(LUN, 0x34);
+ VERIFY_OFFSET(control, 0x3C);
+ VERIFY_OFFSET(CDB, 0x40);
+ VERIFY_OFFSET(reserved8, 0x50);
+ VERIFY_OFFSET(host_context_flags, 0x60);
+ VERIFY_OFFSET(timeout_sec, 0x62);
+ VERIFY_OFFSET(ReplyQueue, 0x64);
+ VERIFY_OFFSET(reserved9, 0x65);
+ VERIFY_OFFSET(Tag, 0x68);
+ VERIFY_OFFSET(host_addr, 0x70);
+ VERIFY_OFFSET(CISS_LUN, 0x78);
+ VERIFY_OFFSET(SG, 0x78 + 8);
+#undef VERIFY_OFFSET
+}
+
module_init(hpsa_init);
module_exit(hpsa_cleanup);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 01c328349c83..44235a27e1b6 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,6 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
- * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -46,6 +46,15 @@ struct hpsa_scsi_dev_t {
unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
unsigned char model[16]; /* bytes 16-31 of inquiry data */
unsigned char raid_level; /* from inquiry page 0xC1 */
+ unsigned char volume_offline; /* discovered via TUR or VPD */
+ u32 ioaccel_handle;
+ int offload_config; /* I/O accel RAID offload configured */
+ int offload_enabled; /* I/O accel RAID offload enabled */
+ int offload_to_mirror; /* Send next I/O accelerator RAID
+ * offload request to mirror drive
+ */
+ struct raid_map_data raid_map; /* I/O accelerator RAID map */
+
};
struct reply_pool {
@@ -55,6 +64,46 @@ struct reply_pool {
u32 current_entry;
};
+#pragma pack(1)
+struct bmic_controller_parameters {
+ u8 led_flags;
+ u8 enable_command_list_verification;
+ u8 backed_out_write_drives;
+ u16 stripes_for_parity;
+ u8 parity_distribution_mode_flags;
+ u16 max_driver_requests;
+ u16 elevator_trend_count;
+ u8 disable_elevator;
+ u8 force_scan_complete;
+ u8 scsi_transfer_mode;
+ u8 force_narrow;
+ u8 rebuild_priority;
+ u8 expand_priority;
+ u8 host_sdb_asic_fix;
+ u8 pdpi_burst_from_host_disabled;
+ char software_name[64];
+ char hardware_name[32];
+ u8 bridge_revision;
+ u8 snapshot_priority;
+ u32 os_specific;
+ u8 post_prompt_timeout;
+ u8 automatic_drive_slamming;
+ u8 reserved1;
+ u8 nvram_flags;
+ u8 cache_nvram_flags;
+ u8 drive_config_flags;
+ u16 reserved2;
+ u8 temp_warning_level;
+ u8 temp_shutdown_level;
+ u8 temp_condition_reset;
+ u8 max_coalesce_commands;
+ u32 max_coalesce_delay;
+ u8 orca_password[4];
+ u8 access_id[16];
+ u8 reserved[356];
+};
+#pragma pack()
+
struct ctlr_info {
int ctlr;
char devname[8];
@@ -80,6 +129,7 @@ struct ctlr_info {
unsigned int msi_vector;
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
struct access_method access;
+ char hba_mode_enabled;
/* queue and queue Info */
struct list_head reqQ;
@@ -95,6 +145,10 @@ struct ctlr_info {
/* pointers to command and error info pool */
struct CommandList *cmd_pool;
dma_addr_t cmd_pool_dhandle;
+ struct io_accel1_cmd *ioaccel_cmd_pool;
+ dma_addr_t ioaccel_cmd_pool_dhandle;
+ struct io_accel2_cmd *ioaccel2_cmd_pool;
+ dma_addr_t ioaccel2_cmd_pool_dhandle;
struct ErrorInfo *errinfo_pool;
dma_addr_t errinfo_pool_dhandle;
unsigned long *cmd_pool_bits;
@@ -128,7 +182,14 @@ struct ctlr_info {
u8 nreply_queues;
dma_addr_t reply_pool_dhandle;
u32 *blockFetchTable;
+ u32 *ioaccel1_blockFetchTable;
+ u32 *ioaccel2_blockFetchTable;
+ u32 *ioaccel2_bft2_regs;
unsigned char *hba_inquiry_data;
+ u32 driver_support;
+ u32 fw_support;
+ int ioaccel_support;
+ int ioaccel_maxsg;
u64 last_intr_timestamp;
u32 last_heartbeat;
u64 last_heartbeat_timestamp;
@@ -161,7 +222,35 @@ struct ctlr_info {
#define HPSATMF_LOG_QRY_TASK (1 << 23)
#define HPSATMF_LOG_QRY_TSET (1 << 24)
#define HPSATMF_LOG_QRY_ASYNC (1 << 25)
+ u32 events;
+#define CTLR_STATE_CHANGE_EVENT (1 << 0)
+#define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
+#define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
+#define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
+#define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
+#define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
+#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
+
+#define RESCAN_REQUIRED_EVENT_BITS \
+ (CTLR_STATE_CHANGE_EVENT | \
+ CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
+ CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
+ CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
+ CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \
+ CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
+ CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
+ spinlock_t offline_device_lock;
+ struct list_head offline_device_list;
+ int acciopath_status;
+ int drv_req_rescan; /* flag for driver to request rescan event */
+ int raid_offload_debug;
};
+
+struct offline_device_entry {
+ unsigned char scsi3addr[8];
+ struct list_head offline_list;
+};
+
#define HPSA_ABORT_MSG 0
#define HPSA_DEVICE_RESET_MSG 1
#define HPSA_RESET_TYPE_CONTROLLER 0x00
@@ -242,6 +331,14 @@ struct ctlr_info {
#define HPSA_INTR_ON 1
#define HPSA_INTR_OFF 0
+
+/*
+ * Inbound Post Queue offsets for IO Accelerator Mode 2
+ */
+#define IOACCEL2_INBOUND_POSTQ_32 0x48
+#define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
+#define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
+
/*
Send the command to the hardware
*/
@@ -254,6 +351,18 @@ static void SA5_submit_command(struct ctlr_info *h,
(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
}
+static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
+ c->Header.Tag.lower);
+ if (c->cmd_type == CMD_IOACCEL2)
+ writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
+ else
+ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+}
+
/*
* This card is the opposite of the other cards.
* 0 turns interrupts on...
@@ -387,6 +496,50 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)
return register_value & SA5_OUTDB_STATUS_PERF_BIT;
}
+#define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
+
+static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
+{
+ unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
+
+ return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
+ true : false;
+}
+
+#define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
+#define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
+#define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
+#define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
+
+static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
+{
+ u64 register_value;
+ struct reply_pool *rq = &h->reply_queue[q];
+ unsigned long flags;
+
+ BUG_ON(q >= h->nreply_queues);
+
+ register_value = rq->head[rq->current_entry];
+ if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
+ rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
+ if (++rq->current_entry == rq->size)
+ rq->current_entry = 0;
+ /*
+ * @todo
+ *
+ * Don't really need to write the new index after each command,
+ * but with current driver design this is easiest.
+ */
+ wmb();
+ writel((q << 24) | rq->current_entry, h->vaddr +
+ IOACCEL_MODE1_CONSUMER_INDEX);
+ spin_lock_irqsave(&h->lock, flags);
+ h->commands_outstanding--;
+ spin_unlock_irqrestore(&h->lock, flags);
+ }
+ return (unsigned long) register_value;
+}
+
static struct access_method SA5_access = {
SA5_submit_command,
SA5_intr_mask,
@@ -395,6 +548,22 @@ static struct access_method SA5_access = {
SA5_completed,
};
+static struct access_method SA5_ioaccel_mode1_access = {
+ SA5_submit_command,
+ SA5_performant_intr_mask,
+ SA5_fifo_full,
+ SA5_ioaccel_mode1_intr_pending,
+ SA5_ioaccel_mode1_completed,
+};
+
+static struct access_method SA5_ioaccel_mode2_access = {
+ SA5_submit_command_ioaccel2,
+ SA5_performant_intr_mask,
+ SA5_fifo_full,
+ SA5_performant_intr_pending,
+ SA5_performant_completed,
+};
+
static struct access_method SA5_performant_access = {
SA5_submit_command,
SA5_performant_intr_mask,
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index bfc8c4ea66f8..b5cc7052339f 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,6 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
- * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,6 +25,7 @@
#define SENSEINFOBYTES 32 /* may vary between hbas */
#define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */
#define HPSA_SG_CHAIN 0x80000000
+#define HPSA_SG_LAST 0x40000000
#define MAXREPLYQS 256
/* Command Status value */
@@ -41,6 +42,8 @@
#define CMD_UNSOLICITED_ABORT 0x000A
#define CMD_TIMEOUT 0x000B
#define CMD_UNABORTABLE 0x000C
+#define CMD_IOACCEL_DISABLED 0x000E
+
/* Unit Attentions ASC's as defined for the MSA2012sa */
#define POWER_OR_RESET 0x29
@@ -79,8 +82,9 @@
#define ATTR_ACA 0x07
/* cdb type */
-#define TYPE_CMD 0x00
-#define TYPE_MSG 0x01
+#define TYPE_CMD 0x00
+#define TYPE_MSG 0x01
+#define TYPE_IOACCEL2_CMD 0x81 /* 0x81 is not used by hardware */
/* Message Types */
#define HPSA_TASK_MANAGEMENT 0x00
@@ -125,9 +129,12 @@
#define CFGTBL_AccCmds 0x00000001l
#define DOORBELL_CTLR_RESET 0x00000004l
#define DOORBELL_CTLR_RESET2 0x00000020l
+#define DOORBELL_CLEAR_EVENTS 0x00000040l
#define CFGTBL_Trans_Simple 0x00000002l
#define CFGTBL_Trans_Performant 0x00000004l
+#define CFGTBL_Trans_io_accel1 0x00000080l
+#define CFGTBL_Trans_io_accel2 0x00000100l
#define CFGTBL_Trans_use_short_tags 0x20000000l
#define CFGTBL_Trans_enable_directed_msix (1 << 30)
@@ -135,6 +142,28 @@
#define CFGTBL_BusType_Ultra3 0x00000002l
#define CFGTBL_BusType_Fibre1G 0x00000100l
#define CFGTBL_BusType_Fibre2G 0x00000200l
+
+/* VPD Inquiry types */
+#define HPSA_VPD_SUPPORTED_PAGES 0x00
+#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
+#define HPSA_VPD_LV_IOACCEL_STATUS 0xC2
+#define HPSA_VPD_LV_STATUS 0xC3
+#define HPSA_VPD_HEADER_SZ 4
+
+/* Logical volume states */
+#define HPSA_VPD_LV_STATUS_UNSUPPORTED -1
+#define HPSA_LV_OK 0x0
+#define HPSA_LV_UNDERGOING_ERASE 0x0F
+#define HPSA_LV_UNDERGOING_RPI 0x12
+#define HPSA_LV_PENDING_RPI 0x13
+#define HPSA_LV_ENCRYPTED_NO_KEY 0x14
+#define HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15
+#define HPSA_LV_UNDERGOING_ENCRYPTION 0x16
+#define HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17
+#define HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18
+#define HPSA_LV_PENDING_ENCRYPTION 0x19
+#define HPSA_LV_PENDING_ENCRYPTION_REKEYING 0x1A
+
struct vals32 {
u32 lower;
u32 upper;
@@ -162,9 +191,50 @@ struct InquiryData {
#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
+#define HPSA_REPORT_PHYS_EXTENDED 0x02
+#define HPSA_CISS_READ 0xc0 /* CISS Read */
+#define HPSA_GET_RAID_MAP 0xc8 /* CISS Get RAID Layout Map */
+
+#define RAID_MAP_MAX_ENTRIES 256
+
+struct raid_map_disk_data {
+ u32 ioaccel_handle; /**< Handle to access this disk via the
+ * I/O accelerator */
+ u8 xor_mult[2]; /**< XOR multipliers for this position,
+ * valid for data disks only */
+ u8 reserved[2];
+};
+
+struct raid_map_data {
+ u32 structure_size; /* Size of entire structure in bytes */
+ u32 volume_blk_size; /* bytes / block in the volume */
+ u64 volume_blk_cnt; /* logical blocks on the volume */
+ u8 phys_blk_shift; /* Shift factor to convert between
+ * units of logical blocks and physical
+ * disk blocks */
+ u8 parity_rotation_shift; /* Shift factor to convert between units
+ * of logical stripes and physical
+ * stripes */
+ u16 strip_size; /* blocks used on each disk / stripe */
+ u64 disk_starting_blk; /* First disk block used in volume */
+ u64 disk_blk_cnt; /* disk blocks used by volume / disk */
+ u16 data_disks_per_row; /* data disk entries / row in the map */
+ u16 metadata_disks_per_row; /* mirror/parity disk entries / row
+ * in the map */
+ u16 row_cnt; /* rows in each layout map */
+ u16 layout_map_count; /* layout maps (1 map per mirror/parity
+ * group) */
+ u16 flags; /* Bit 0 set if encryption enabled */
+#define RAID_MAP_FLAG_ENCRYPT_ON 0x01
+ u16 dekindex; /* Data encryption key index. */
+ u8 reserved[16];
+ struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
+};
+
struct ReportLUNdata {
u8 LUNListLength[4];
- u32 reserved;
+ u8 extended_response_flag;
+ u8 reserved[3];
u8 LUN[HPSA_MAX_LUN][8];
};
@@ -187,6 +257,7 @@ struct SenseSubsystem_info {
#define BMIC_CACHE_FLUSH 0xc2
#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
#define BMIC_FLASH_FIRMWARE 0xF7
+#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
/* Command List Structure */
union SCSI3Addr {
@@ -283,6 +354,8 @@ struct ErrorInfo {
/* Command types */
#define CMD_IOCTL_PEND 0x01
#define CMD_SCSI 0x03
+#define CMD_IOACCEL1 0x04
+#define CMD_IOACCEL2 0x05
#define DIRECT_LOOKUP_SHIFT 5
#define DIRECT_LOOKUP_BIT 0x10
@@ -314,7 +387,6 @@ struct CommandList {
int cmd_type;
long cmdindex;
struct list_head list;
- struct request *rq;
struct completion *waiting;
void *scsi_cmd;
@@ -327,16 +399,183 @@ struct CommandList {
*/
#define IS_32_BIT ((8 - sizeof(long))/4)
#define IS_64_BIT (!IS_32_BIT)
-#define PAD_32 (4)
-#define PAD_64 (4)
+#define PAD_32 (40)
+#define PAD_64 (12)
#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
u8 pad[COMMANDLIST_PAD];
};
+/* Max S/G elements in I/O accelerator command */
+#define IOACCEL1_MAXSGENTRIES 24
+#define IOACCEL2_MAXSGENTRIES 28
+
+/*
+ * Structure for I/O accelerator (mode 1) commands.
+ * Note that this structure must be 128-byte aligned in size.
+ */
+struct io_accel1_cmd {
+ u16 dev_handle; /* 0x00 - 0x01 */
+ u8 reserved1; /* 0x02 */
+ u8 function; /* 0x03 */
+ u8 reserved2[8]; /* 0x04 - 0x0B */
+ u32 err_info; /* 0x0C - 0x0F */
+ u8 reserved3[2]; /* 0x10 - 0x11 */
+ u8 err_info_len; /* 0x12 */
+ u8 reserved4; /* 0x13 */
+ u8 sgl_offset; /* 0x14 */
+ u8 reserved5[7]; /* 0x15 - 0x1B */
+ u32 transfer_len; /* 0x1C - 0x1F */
+ u8 reserved6[4]; /* 0x20 - 0x23 */
+ u16 io_flags; /* 0x24 - 0x25 */
+ u8 reserved7[14]; /* 0x26 - 0x33 */
+ u8 LUN[8]; /* 0x34 - 0x3B */
+ u32 control; /* 0x3C - 0x3F */
+ u8 CDB[16]; /* 0x40 - 0x4F */
+ u8 reserved8[16]; /* 0x50 - 0x5F */
+ u16 host_context_flags; /* 0x60 - 0x61 */
+ u16 timeout_sec; /* 0x62 - 0x63 */
+ u8 ReplyQueue; /* 0x64 */
+ u8 reserved9[3]; /* 0x65 - 0x67 */
+ struct vals32 Tag; /* 0x68 - 0x6F */
+ struct vals32 host_addr; /* 0x70 - 0x77 */
+ u8 CISS_LUN[8]; /* 0x78 - 0x7F */
+ struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
+#define IOACCEL1_PAD_64 0
+#define IOACCEL1_PAD_32 0
+#define IOACCEL1_PAD (IS_32_BIT * IOACCEL1_PAD_32 + \
+ IS_64_BIT * IOACCEL1_PAD_64)
+ u8 pad[IOACCEL1_PAD];
+};
+
+#define IOACCEL1_FUNCTION_SCSIIO 0x00
+#define IOACCEL1_SGLOFFSET 32
+
+#define IOACCEL1_IOFLAGS_IO_REQ 0x4000
+#define IOACCEL1_IOFLAGS_CDBLEN_MASK 0x001F
+#define IOACCEL1_IOFLAGS_CDBLEN_MAX 16
+
+#define IOACCEL1_CONTROL_NODATAXFER 0x00000000
+#define IOACCEL1_CONTROL_DATA_OUT 0x01000000
+#define IOACCEL1_CONTROL_DATA_IN 0x02000000
+#define IOACCEL1_CONTROL_TASKPRIO_MASK 0x00007800
+#define IOACCEL1_CONTROL_TASKPRIO_SHIFT 11
+#define IOACCEL1_CONTROL_SIMPLEQUEUE 0x00000000
+#define IOACCEL1_CONTROL_HEADOFQUEUE 0x00000100
+#define IOACCEL1_CONTROL_ORDEREDQUEUE 0x00000200
+#define IOACCEL1_CONTROL_ACA 0x00000400
+
+#define IOACCEL1_HCFLAGS_CISS_FORMAT 0x0013
+
+#define IOACCEL1_BUSADDR_CMDTYPE 0x00000060
+
+struct ioaccel2_sg_element {
+ u64 address;
+ u32 length;
+ u8 reserved[3];
+ u8 chain_indicator;
+#define IOACCEL2_CHAIN 0x80
+};
+
+/*
+ * SCSI Response Format structure for IO Accelerator Mode 2
+ */
+struct io_accel2_scsi_response {
+ u8 IU_type;
+#define IOACCEL2_IU_TYPE_SRF 0x60
+ u8 reserved1[3];
+ u8 req_id[4]; /* request identifier */
+ u8 reserved2[4];
+ u8 serv_response; /* service response */
+#define IOACCEL2_SERV_RESPONSE_COMPLETE 0x000
+#define IOACCEL2_SERV_RESPONSE_FAILURE 0x001
+#define IOACCEL2_SERV_RESPONSE_TMF_COMPLETE 0x002
+#define IOACCEL2_SERV_RESPONSE_TMF_SUCCESS 0x003
+#define IOACCEL2_SERV_RESPONSE_TMF_REJECTED 0x004
+#define IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN 0x005
+ u8 status; /* status */
+#define IOACCEL2_STATUS_SR_TASK_COMP_GOOD 0x00
+#define IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND 0x02
+#define IOACCEL2_STATUS_SR_TASK_COMP_BUSY 0x08
+#define IOACCEL2_STATUS_SR_TASK_COMP_RES_CON 0x18
+#define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL 0x28
+#define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED 0x40
+#define IOACCEL2_STATUS_SR_IOACCEL_DISABLED 0x0E
+ u8 data_present; /* low 2 bits */
+#define IOACCEL2_NO_DATAPRESENT 0x000
+#define IOACCEL2_RESPONSE_DATAPRESENT 0x001
+#define IOACCEL2_SENSE_DATA_PRESENT 0x002
+#define IOACCEL2_RESERVED 0x003
+ u8 sense_data_len; /* sense/response data length */
+ u8 resid_cnt[4]; /* residual count */
+ u8 sense_data_buff[32]; /* sense/response data buffer */
+};
+
+#define IOACCEL2_64_PAD 76
+#define IOACCEL2_32_PAD 76
+#define IOACCEL2_PAD (IS_32_BIT * IOACCEL2_32_PAD + \
+ IS_64_BIT * IOACCEL2_64_PAD)
+/*
+ * Structure for I/O accelerator (mode 2 or m2) commands.
+ * Note that this structure must be 128-byte aligned in size.
+ */
+struct io_accel2_cmd {
+ u8 IU_type; /* IU Type */
+ u8 direction; /* direction, memtype, and encryption */
+#define IOACCEL2_DIRECTION_MASK 0x03 /* bits 0,1: direction */
+#define IOACCEL2_DIRECTION_MEMTYPE_MASK 0x04 /* bit 2: memtype source/dest */
+ /* 0b=PCIe, 1b=DDR */
+#define IOACCEL2_DIRECTION_ENCRYPT_MASK 0x08 /* bit 3: encryption flag */
+ /* 0=off, 1=on */
+ u8 reply_queue; /* Reply Queue ID */
+ u8 reserved1; /* Reserved */
+ u32 scsi_nexus; /* Device Handle */
+ u32 Tag; /* cciss tag, lower 4 bytes only */
+ u32 tweak_lower; /* Encryption tweak, lower 4 bytes */
+ u8 cdb[16]; /* SCSI Command Descriptor Block */
+ u8 cciss_lun[8]; /* 8 byte SCSI address */
+ u32 data_len; /* Total bytes to transfer */
+ u8 cmd_priority_task_attr; /* priority and task attrs */
+#define IOACCEL2_PRIORITY_MASK 0x78
+#define IOACCEL2_ATTR_MASK 0x07
+ u8 sg_count; /* Number of sg elements */
+ u16 dekindex; /* Data encryption key index */
+ u64 err_ptr; /* Error Pointer */
+ u32 err_len; /* Error Length*/
+ u32 tweak_upper; /* Encryption tweak, upper 4 bytes */
+ struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
+ struct io_accel2_scsi_response error_data;
+ u8 pad[IOACCEL2_PAD];
+};
+
+/*
+ * defines for Mode 2 command struct
+ * FIXME: this can't be all I need mfm
+ */
+#define IOACCEL2_IU_TYPE 0x40
+#define IOACCEL2_IU_TMF_TYPE 0x41
+#define IOACCEL2_DIR_NO_DATA 0x00
+#define IOACCEL2_DIR_DATA_IN 0x01
+#define IOACCEL2_DIR_DATA_OUT 0x02
+/*
+ * SCSI Task Management Request format for Accelerator Mode 2
+ */
+struct hpsa_tmf_struct {
+ u8 iu_type; /* Information Unit Type */
+ u8 reply_queue; /* Reply Queue ID */
+ u8 tmf; /* Task Management Function */
+ u8 reserved1; /* byte 3 Reserved */
+ u32 it_nexus; /* SCSI I-T Nexus */
+ u8 lun_id[8]; /* LUN ID for TMF request */
+ struct vals32 Tag; /* cciss tag associated w/ request */
+ struct vals32 abort_tag;/* cciss tag of SCSI cmd or task to abort */
+ u64 error_ptr; /* Error Pointer */
+ u32 error_len; /* Error Length */
+};
+
/* Configuration Table Structure */
struct HostWrite {
u32 TransportRequest;
- u32 Reserved;
+ u32 command_pool_addr_hi;
u32 CoalIntDelay;
u32 CoalIntCount;
};
@@ -344,6 +583,9 @@ struct HostWrite {
#define SIMPLE_MODE 0x02
#define PERFORMANT_MODE 0x04
#define MEMQ_MODE 0x08
+#define IOACCEL_MODE_1 0x80
+
+#define DRIVER_SUPPORT_UA_ENABLE 0x00000001
struct CfgTable {
u8 Signature[4];
@@ -373,8 +615,18 @@ struct CfgTable {
u32 misc_fw_support; /* offset 0x78 */
#define MISC_FW_DOORBELL_RESET (0x02)
#define MISC_FW_DOORBELL_RESET2 (0x010)
+#define MISC_FW_RAID_OFFLOAD_BASIC (0x020)
+#define MISC_FW_EVENT_NOTIFY (0x080)
u8 driver_version[32];
-
+ u32 max_cached_write_size;
+ u8 driver_scratchpad[16];
+ u32 max_error_info_length;
+ u32 io_accel_max_embedded_sg_count;
+ u32 io_accel_request_size_offset;
+ u32 event_notify;
+#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
+#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
+ u32 clear_event_notify;
};
#define NUM_BLOCKFETCH_ENTRIES 8
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index bf9eca845166..56f8a861ed72 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -589,7 +589,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
}
err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
- IRQF_DISABLED, "ibmvstgt", target);
+ 0, "ibmvstgt", target);
if (err)
goto req_irq_failed;
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index bf028218ac36..b1c4d831137d 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -2015,7 +2015,7 @@ static int __init in2000_detect(struct scsi_host_template * tpnt)
write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
write1_io(0, IO_INTR_MASK); /* allow all ints */
x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
- if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) {
+ if (request_irq(x, in2000_intr, 0, "in2000", instance)) {
printk("in2000_detect: Unable to allocate IRQ.\n");
detect_count--;
continue;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 280d5af113d1..e5dae7b54d9a 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2931,7 +2931,7 @@ static int initio_probe_one(struct pci_dev *pdev,
shost->base = host->addr;
shost->sg_tablesize = TOTAL_SG_ENTRY;
- error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost);
+ error = request_irq(pdev->irq, i91u_intr, IRQF_SHARED, "i91u", shost);
if (error < 0) {
printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
goto out_free_scbs;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3f5b56a99892..924b0ba74dfe 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1143,6 +1143,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
res->add_to_ml = 0;
res->del_from_ml = 0;
res->resetting_device = 0;
+ res->reset_occurred = 0;
res->sdev = NULL;
res->sata_port = NULL;
@@ -2367,6 +2368,42 @@ static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
}
/**
+ * ipr_log_sis64_device_error - Log a cache error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_21_error *error;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+ error = &hostrcb->hcam.u.error64.u.type_21_error;
+
+ ipr_err("-----Failing Device Information-----\n");
+ ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
+ be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
+ be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
+ ipr_err("Device Resource Path: %s\n",
+ __ipr_format_res_path(error->res_path,
+ buffer, sizeof(buffer)));
+ error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
+ error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
+ ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
+ ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
+ ipr_err("SCSI Sense Data:\n");
+ ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
+ ipr_err("SCSI Command Descriptor Block: \n");
+ ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
+
+ ipr_err("Additional IOA Data:\n");
+ ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
+}
+
+/**
* ipr_get_error - Find the specfied IOASC in the ipr_error_table.
* @ioasc: IOASC
*
@@ -2467,6 +2504,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
case IPR_HOST_RCB_OVERLAY_ID_20:
ipr_log_fabric_error(ioa_cfg, hostrcb);
break;
+ case IPR_HOST_RCB_OVERLAY_ID_21:
+ ipr_log_sis64_device_error(ioa_cfg, hostrcb);
+ break;
case IPR_HOST_RCB_OVERLAY_ID_23:
ipr_log_sis64_config_error(ioa_cfg, hostrcb);
break;
@@ -3630,16 +3670,14 @@ static ssize_t ipr_store_iopoll_weight(struct device *dev,
return strlen(buf);
}
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
for (i = 1; i < ioa_cfg->hrrq_num; i++)
blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
}
spin_lock_irqsave(shost->host_lock, lock_flags);
ioa_cfg->iopoll_weight = user_iopoll_weight;
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
ioa_cfg->iopoll_weight, ipr_iopoll);
@@ -5015,6 +5053,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
} else
rc = ipr_device_reset(ioa_cfg, res);
res->resetting_device = 0;
+ res->reset_occurred = 1;
LEAVE;
return rc ? FAILED : SUCCESS;
@@ -5484,8 +5523,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
return IRQ_NONE;
}
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
hrrq->toggle_bit) {
if (!blk_iopoll_sched_prep(&hrrq->iopoll))
@@ -6183,8 +6221,10 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
- if (ipr_is_gscsi(res))
+ if (ipr_is_gscsi(res) && res->reset_occurred) {
+ res->reset_occurred = 0;
ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
+ }
ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
}
@@ -8641,6 +8681,25 @@ static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called to tell us that the MMIO
+ * access to the IOA has been restored
+ */
+static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ unsigned long flags = 0;
+ struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ if (!ioa_cfg->probe_done)
+ pci_save_state(pdev);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
* ipr_pci_frozen - Called when slot has experienced a PCI bus error.
* @pdev: PCI device struct
*
@@ -8654,7 +8713,8 @@ static void ipr_pci_frozen(struct pci_dev *pdev)
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
+ if (ioa_cfg->probe_done)
+ _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -8672,11 +8732,14 @@ static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- if (ioa_cfg->needs_warm_reset)
- ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
- else
- _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
- IPR_SHUTDOWN_NONE);
+ if (ioa_cfg->probe_done) {
+ if (ioa_cfg->needs_warm_reset)
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ else
+ _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
+ IPR_SHUTDOWN_NONE);
+ } else
+ wake_up_all(&ioa_cfg->eeh_wait_q);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -8695,17 +8758,20 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
- ioa_cfg->sdt_state = ABORT_DUMP;
- ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
- ioa_cfg->in_ioa_bringdown = 1;
- for (i = 0; i < ioa_cfg->hrrq_num; i++) {
- spin_lock(&ioa_cfg->hrrq[i]._lock);
- ioa_cfg->hrrq[i].allow_cmds = 0;
- spin_unlock(&ioa_cfg->hrrq[i]._lock);
- }
- wmb();
- ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ if (ioa_cfg->probe_done) {
+ if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
+ ioa_cfg->sdt_state = ABORT_DUMP;
+ ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
+ ioa_cfg->in_ioa_bringdown = 1;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_cmds = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ } else
+ wake_up_all(&ioa_cfg->eeh_wait_q);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -8725,7 +8791,7 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
switch (state) {
case pci_channel_io_frozen:
ipr_pci_frozen(pdev);
- return PCI_ERS_RESULT_NEED_RESET;
+ return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_perm_failure:
ipr_pci_perm_failure(pdev);
return PCI_ERS_RESULT_DISCONNECT;
@@ -8755,6 +8821,7 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
+ ioa_cfg->probe_done = 1;
if (ioa_cfg->needs_hard_reset) {
ioa_cfg->needs_hard_reset = 0;
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -9030,16 +9097,6 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
if (!ioa_cfg->vpd_cbs)
goto out_free_res_entries;
- for (i = 0; i < ioa_cfg->hrrq_num; i++) {
- INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
- INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
- spin_lock_init(&ioa_cfg->hrrq[i]._lock);
- if (i == 0)
- ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
- else
- ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
- }
-
if (ipr_alloc_cmd_blks(ioa_cfg))
goto out_free_vpd_cbs;
@@ -9140,6 +9197,48 @@ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
}
/**
+ * ipr_init_regs - Initialize IOA registers
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
+{
+ const struct ipr_interrupt_offsets *p;
+ struct ipr_interrupts *t;
+ void __iomem *base;
+
+ p = &ioa_cfg->chip_cfg->regs;
+ t = &ioa_cfg->regs;
+ base = ioa_cfg->hdw_dma_regs;
+
+ t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
+ t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
+ t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
+ t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
+ t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
+ t->clr_interrupt_reg = base + p->clr_interrupt_reg;
+ t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
+ t->sense_interrupt_reg = base + p->sense_interrupt_reg;
+ t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
+ t->ioarrin_reg = base + p->ioarrin_reg;
+ t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
+ t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
+ t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
+ t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
+ t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
+ t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
+
+ if (ioa_cfg->sis64) {
+ t->init_feedback_reg = base + p->init_feedback_reg;
+ t->dump_addr_reg = base + p->dump_addr_reg;
+ t->dump_data_reg = base + p->dump_data_reg;
+ t->endian_swap_reg = base + p->endian_swap_reg;
+ }
+}
+
+/**
* ipr_init_ioa_cfg - Initialize IOA config struct
* @ioa_cfg: ioa config struct
* @host: scsi host struct
@@ -9151,9 +9250,7 @@ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
struct Scsi_Host *host, struct pci_dev *pdev)
{
- const struct ipr_interrupt_offsets *p;
- struct ipr_interrupts *t;
- void __iomem *base;
+ int i;
ioa_cfg->host = host;
ioa_cfg->pdev = pdev;
@@ -9173,6 +9270,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
init_waitqueue_head(&ioa_cfg->reset_wait_q);
init_waitqueue_head(&ioa_cfg->msi_wait_q);
+ init_waitqueue_head(&ioa_cfg->eeh_wait_q);
ioa_cfg->sdt_state = INACTIVE;
ipr_initialize_bus_attr(ioa_cfg);
@@ -9183,44 +9281,33 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
+ ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
+ + ((sizeof(struct ipr_config_table_entry64)
+ * ioa_cfg->max_devs_supported)));
} else {
host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
+ ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
+ + ((sizeof(struct ipr_config_table_entry)
+ * ioa_cfg->max_devs_supported)));
}
+
host->max_channel = IPR_MAX_BUS_TO_SCAN;
host->unique_id = host->host_no;
host->max_cmd_len = IPR_MAX_CDB_LEN;
host->can_queue = ioa_cfg->max_cmds;
pci_set_drvdata(pdev, ioa_cfg);
- p = &ioa_cfg->chip_cfg->regs;
- t = &ioa_cfg->regs;
- base = ioa_cfg->hdw_dma_regs;
-
- t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
- t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
- t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
- t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
- t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
- t->clr_interrupt_reg = base + p->clr_interrupt_reg;
- t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
- t->sense_interrupt_reg = base + p->sense_interrupt_reg;
- t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
- t->ioarrin_reg = base + p->ioarrin_reg;
- t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
- t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
- t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
- t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
- t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
- t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
-
- if (ioa_cfg->sis64) {
- t->init_feedback_reg = base + p->init_feedback_reg;
- t->dump_addr_reg = base + p->dump_addr_reg;
- t->dump_data_reg = base + p->dump_data_reg;
- t->endian_swap_reg = base + p->endian_swap_reg;
+ for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
+ spin_lock_init(&ioa_cfg->hrrq[i]._lock);
+ if (i == 0)
+ ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
+ else
+ ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
}
}
@@ -9243,54 +9330,63 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
return NULL;
}
+/**
+ * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
+ * during probe time
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * None
+ **/
+static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct pci_dev *pdev = ioa_cfg->pdev;
+
+ if (pci_channel_offline(pdev)) {
+ wait_event_timeout(ioa_cfg->eeh_wait_q,
+ !pci_channel_offline(pdev),
+ IPR_PCI_ERROR_RECOVERY_TIMEOUT);
+ pci_restore_state(pdev);
+ }
+}
+
static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
{
struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
- int i, err, vectors;
+ int i, vectors;
for (i = 0; i < ARRAY_SIZE(entries); ++i)
entries[i].entry = i;
- vectors = ipr_number_of_msix;
-
- while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
- vectors = err;
-
- if (err < 0) {
- pci_disable_msix(ioa_cfg->pdev);
- return err;
+ vectors = pci_enable_msix_range(ioa_cfg->pdev,
+ entries, 1, ipr_number_of_msix);
+ if (vectors < 0) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ return vectors;
}
- if (!err) {
- for (i = 0; i < vectors; i++)
- ioa_cfg->vectors_info[i].vec = entries[i].vector;
- ioa_cfg->nvectors = vectors;
- }
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = entries[i].vector;
+ ioa_cfg->nvectors = vectors;
- return err;
+ return 0;
}
static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
{
- int i, err, vectors;
-
- vectors = ipr_number_of_msix;
+ int i, vectors;
- while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
- vectors = err;
-
- if (err < 0) {
- pci_disable_msi(ioa_cfg->pdev);
- return err;
+ vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
+ if (vectors < 0) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ return vectors;
}
- if (!err) {
- for (i = 0; i < vectors; i++)
- ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
- ioa_cfg->nvectors = vectors;
- }
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
+ ioa_cfg->nvectors = vectors;
- return err;
+ return 0;
}
static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
@@ -9355,7 +9451,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
* ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
* @pdev: PCI device struct
*
- * Description: The return value from pci_enable_msi() can not always be
+ * Description: The return value from pci_enable_msi_range() can not always be
* trusted. This routine sets up and initiates a test interrupt to determine
* if the interrupt is received via the ipr_test_intr() service routine.
* If the tests fails, the driver will fall back to LSI.
@@ -9434,19 +9530,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ENTER;
- if ((rc = pci_enable_device(pdev))) {
- dev_err(&pdev->dev, "Cannot enable adapter\n");
- goto out;
- }
-
dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
-
host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
if (!host) {
dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
rc = -ENOMEM;
- goto out_disable;
+ goto out;
}
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
@@ -9476,6 +9566,8 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg->revid = pdev->revision;
+ ipr_init_ioa_cfg(ioa_cfg, host, pdev);
+
ipr_regs_pci = pci_resource_start(pdev, 0);
rc = pci_request_regions(pdev, IPR_NAME);
@@ -9485,22 +9577,35 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
goto out_scsi_host_put;
}
+ rc = pci_enable_device(pdev);
+
+ if (rc || pci_channel_offline(pdev)) {
+ if (pci_channel_offline(pdev)) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ rc = pci_enable_device(pdev);
+ }
+
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable adapter\n");
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ goto out_release_regions;
+ }
+ }
+
ipr_regs = pci_ioremap_bar(pdev, 0);
if (!ipr_regs) {
dev_err(&pdev->dev,
"Couldn't map memory range of registers\n");
rc = -ENOMEM;
- goto out_release_regions;
+ goto out_disable;
}
ioa_cfg->hdw_dma_regs = ipr_regs;
ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
- ipr_init_ioa_cfg(ioa_cfg, host, pdev);
-
- pci_set_master(pdev);
+ ipr_init_regs(ioa_cfg);
if (ioa_cfg->sis64) {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -9508,7 +9613,6 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
}
-
} else
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -9522,10 +9626,15 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
if (rc != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev, "Write of cache line size failed\n");
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
rc = -EIO;
goto cleanup_nomem;
}
+ /* Issue MMIO read to ensure card is not in EEH */
+ interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+
if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
dev_err(&pdev->dev, "The max number of MSIX is %d\n",
IPR_MAX_MSIX_VECTORS);
@@ -9544,10 +9653,22 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
dev_info(&pdev->dev, "Cannot enable MSI.\n");
}
+ pci_set_master(pdev);
+
+ if (pci_channel_offline(pdev)) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ pci_set_master(pdev);
+ if (pci_channel_offline(pdev)) {
+ rc = -EIO;
+ goto out_msi_disable;
+ }
+ }
+
if (ioa_cfg->intr_flag == IPR_USE_MSI ||
ioa_cfg->intr_flag == IPR_USE_MSIX) {
rc = ipr_test_msi(ioa_cfg, pdev);
if (rc == -EOPNOTSUPP) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
if (ioa_cfg->intr_flag == IPR_USE_MSI) {
ioa_cfg->intr_flag &= ~IPR_USE_MSI;
pci_disable_msi(pdev);
@@ -9577,30 +9698,12 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
(unsigned int)num_online_cpus(),
(unsigned int)IPR_MAX_HRRQ_NUM);
- /* Save away PCI config space for use following IOA reset */
- rc = pci_save_state(pdev);
-
- if (rc != PCIBIOS_SUCCESSFUL) {
- dev_err(&pdev->dev, "Failed to save PCI config space\n");
- rc = -EIO;
- goto out_msi_disable;
- }
-
if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
goto out_msi_disable;
if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
goto out_msi_disable;
- if (ioa_cfg->sis64)
- ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
- + ((sizeof(struct ipr_config_table_entry64)
- * ioa_cfg->max_devs_supported)));
- else
- ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
- + ((sizeof(struct ipr_config_table_entry)
- * ioa_cfg->max_devs_supported)));
-
rc = ipr_alloc_mem(ioa_cfg);
if (rc < 0) {
dev_err(&pdev->dev,
@@ -9608,6 +9711,15 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
goto out_msi_disable;
}
+ /* Save away PCI config space for use following IOA reset */
+ rc = pci_save_state(pdev);
+
+ if (rc != PCIBIOS_SUCCESSFUL) {
+ dev_err(&pdev->dev, "Failed to save PCI config space\n");
+ rc = -EIO;
+ goto cleanup_nolog;
+ }
+
/*
* If HRRQ updated interrupt is not masked, or reset alert is set,
* the card is in an unknown state and needs a hard reset
@@ -9664,18 +9776,19 @@ out:
cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
if (ioa_cfg->intr_flag == IPR_USE_MSI)
pci_disable_msi(pdev);
else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
pci_disable_msix(pdev);
cleanup_nomem:
iounmap(ipr_regs);
+out_disable:
+ pci_disable_device(pdev);
out_release_regions:
pci_release_regions(pdev);
out_scsi_host_put:
scsi_host_put(host);
-out_disable:
- pci_disable_device(pdev);
goto out;
}
@@ -9859,8 +9972,7 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
ioa_cfg->host->max_channel = IPR_VSET_BUS;
ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
ioa_cfg->iopoll_weight, ipr_iopoll);
@@ -9889,8 +10001,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
ioa_cfg->iopoll_weight = 0;
for (i = 1; i < ioa_cfg->hrrq_num; i++)
blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
@@ -9994,6 +10105,8 @@ static struct pci_device_id ipr_pci_table[] = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
@@ -10005,12 +10118,19 @@ static struct pci_device_id ipr_pci_table[] = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(pci, ipr_pci_table);
static const struct pci_error_handlers ipr_err_handler = {
.error_detected = ipr_pci_error_detected,
+ .mmio_enabled = ipr_pci_mmio_enabled,
.slot_reset = ipr_pci_slot_reset,
};
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 9ce38a22647e..31ed126f7143 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -101,12 +101,16 @@
#define IPR_SUBS_DEV_ID_57D7 0x03FF
#define IPR_SUBS_DEV_ID_57D8 0x03FE
#define IPR_SUBS_DEV_ID_57D9 0x046D
+#define IPR_SUBS_DEV_ID_57DA 0x04CA
#define IPR_SUBS_DEV_ID_57EB 0x0474
#define IPR_SUBS_DEV_ID_57EC 0x0475
#define IPR_SUBS_DEV_ID_57ED 0x0499
#define IPR_SUBS_DEV_ID_57EE 0x049A
#define IPR_SUBS_DEV_ID_57EF 0x049B
#define IPR_SUBS_DEV_ID_57F0 0x049C
+#define IPR_SUBS_DEV_ID_2CCA 0x04C7
+#define IPR_SUBS_DEV_ID_2CD2 0x04C8
+#define IPR_SUBS_DEV_ID_2CCD 0x04C9
#define IPR_NAME "ipr"
/*
@@ -230,6 +234,7 @@
#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ)
#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
+#define IPR_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
#define IPR_PCI_RESET_TIMEOUT (HZ / 2)
#define IPR_SIS32_DUMP_TIMEOUT (15 * HZ)
#define IPR_SIS64_DUMP_TIMEOUT (40 * HZ)
@@ -897,6 +902,18 @@ struct ipr_hostrcb_type_01_error {
__be32 ioa_data[236];
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb_type_21_error {
+ __be32 wwn[4];
+ u8 res_path[8];
+ u8 primary_problem_desc[32];
+ u8 second_problem_desc[32];
+ __be32 sense_data[8];
+ __be32 cdb[4];
+ __be32 residual_trans_length;
+ __be32 length_of_error;
+ __be32 ioa_data[236];
+}__attribute__((packed, aligned (4)));
+
struct ipr_hostrcb_type_02_error {
struct ipr_vpd ioa_vpd;
struct ipr_vpd cfc_vpd;
@@ -1126,6 +1143,7 @@ struct ipr_hostrcb64_error {
struct ipr_hostrcb_type_ff_error type_ff_error;
struct ipr_hostrcb_type_12_error type_12_error;
struct ipr_hostrcb_type_17_error type_17_error;
+ struct ipr_hostrcb_type_21_error type_21_error;
struct ipr_hostrcb_type_23_error type_23_error;
struct ipr_hostrcb_type_24_error type_24_error;
struct ipr_hostrcb_type_30_error type_30_error;
@@ -1169,6 +1187,7 @@ struct ipr_hcam {
#define IPR_HOST_RCB_OVERLAY_ID_16 0x16
#define IPR_HOST_RCB_OVERLAY_ID_17 0x17
#define IPR_HOST_RCB_OVERLAY_ID_20 0x20
+#define IPR_HOST_RCB_OVERLAY_ID_21 0x21
#define IPR_HOST_RCB_OVERLAY_ID_23 0x23
#define IPR_HOST_RCB_OVERLAY_ID_24 0x24
#define IPR_HOST_RCB_OVERLAY_ID_26 0x26
@@ -1252,6 +1271,7 @@ struct ipr_resource_entry {
u8 add_to_ml:1;
u8 del_from_ml:1;
u8 resetting_device:1;
+ u8 reset_occurred:1;
u32 bus; /* AKA channel */
u32 target; /* AKA id */
@@ -1441,6 +1461,7 @@ struct ipr_ioa_cfg {
u8 dump_timeout:1;
u8 cfg_locked:1;
u8 clear_isr:1;
+ u8 probe_done:1;
u8 revid;
@@ -1519,6 +1540,7 @@ struct ipr_ioa_cfg {
wait_queue_head_t reset_wait_q;
wait_queue_head_t msi_wait_q;
+ wait_queue_head_t eeh_wait_q;
struct ipr_dump *dump;
enum ipr_sdt_state sdt_state;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index d25d0d859f05..695b34e9154e 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -66,7 +66,7 @@
#include "probe_roms.h"
#define MAJ 1
-#define MIN 1
+#define MIN 2
#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD)
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 99d2930b18c8..56e38096f0c4 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2723,13 +2723,9 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_
memcpy(resp->ending_fis, fis, sizeof(*fis));
ts->buf_valid_size = sizeof(*resp);
- /* If the device fault bit is set in the status register, then
- * set the sense data and return.
- */
- if (fis->status & ATA_DF)
+ /* If an error is flagged let libata decode the fis */
+ if (ac_err_mask(fis->status))
ts->stat = SAS_PROTO_RESPONSE;
- else if (fis->status & ATA_ERR)
- ts->stat = SAM_STAT_CHECK_CONDITION;
else
ts->stat = SAM_STAT_GOOD;
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 14c1c8f6a95e..680bf6f0ce76 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -490,5 +490,6 @@ void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset)
iscsi_boot_remove_kobj(boot_kobj);
kset_unregister(boot_kset->kset);
+ kfree(boot_kset);
}
EXPORT_SYMBOL_GPL(iscsi_boot_destroy_kset);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index add6d1566ec8..bfb6d07d87f0 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -593,9 +593,9 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
tcp_sw_conn->sock = NULL;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
sockfd_put(sock);
}
@@ -663,10 +663,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
if (err)
goto free_socket;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
/* bind iSCSI connection and socket */
tcp_sw_conn->sock = sock;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
/* setup Socket parameters */
sk = sock->sk;
@@ -726,14 +726,14 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
switch(param) {
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_CONN_ADDRESS:
- spin_lock_bh(&conn->session->lock);
+ spin_lock_bh(&conn->session->frwd_lock);
if (!tcp_sw_conn || !tcp_sw_conn->sock) {
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
return -ENOTCONN;
}
rc = kernel_getpeername(tcp_sw_conn->sock,
(struct sockaddr *)&addr, &len);
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
if (rc)
return rc;
@@ -759,23 +759,26 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
switch (param) {
case ISCSI_HOST_PARAM_IPADDRESS:
- spin_lock_bh(&session->lock);
+ if (!session)
+ return -ENOTCONN;
+
+ spin_lock_bh(&session->frwd_lock);
conn = session->leadconn;
if (!conn) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return -ENOTCONN;
}
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
if (!tcp_sw_conn->sock) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return -ENOTCONN;
}
rc = kernel_getsockname(tcp_sw_conn->sock,
(struct sockaddr *)&addr, &len);
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
if (rc)
return rc;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 40462415291e..26dc005bb0f0 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -110,16 +110,8 @@ static void __iscsi_update_cmdsn(struct iscsi_session *session,
session->exp_cmdsn = exp_cmdsn;
if (max_cmdsn != session->max_cmdsn &&
- !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
+ !iscsi_sna_lt(max_cmdsn, session->max_cmdsn))
session->max_cmdsn = max_cmdsn;
- /*
- * if the window closed with IO queued, then kick the
- * xmit thread
- */
- if (!list_empty(&session->leadconn->cmdqueue) ||
- !list_empty(&session->leadconn->mgmtqueue))
- iscsi_conn_queue_work(session->leadconn);
- }
}
void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
@@ -395,6 +387,10 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
if (rc)
return rc;
}
+
+ if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
+ task->protected = true;
+
if (sc->sc_data_direction == DMA_TO_DEVICE) {
unsigned out_len = scsi_out(sc)->length;
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
@@ -481,7 +477,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
* iscsi_free_task - free a task
* @task: iscsi cmd task
*
- * Must be called with session lock.
+ * Must be called with session back_lock.
* This function returns the scsi command to scsi-ml or cleans
* up mgmt tasks then returns the task to the pool.
*/
@@ -535,9 +531,10 @@ void iscsi_put_task(struct iscsi_task *task)
{
struct iscsi_session *session = task->conn->session;
- spin_lock_bh(&session->lock);
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&session->back_lock);
__iscsi_put_task(task);
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->back_lock);
}
EXPORT_SYMBOL_GPL(iscsi_put_task);
@@ -546,7 +543,7 @@ EXPORT_SYMBOL_GPL(iscsi_put_task);
* @task: iscsi cmd task
* @state: state to complete task with
*
- * Must be called with session lock.
+ * Must be called with session back_lock.
*/
static void iscsi_complete_task(struct iscsi_task *task, int state)
{
@@ -585,7 +582,7 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
* This is used when drivers do not need or cannot perform
* lower level pdu processing.
*
- * Called with session lock
+ * Called with session back_lock
*/
void iscsi_complete_scsi_task(struct iscsi_task *task,
uint32_t exp_cmdsn, uint32_t max_cmdsn)
@@ -602,7 +599,7 @@ EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
/*
- * session lock must be held and if not called for a task that is
+ * session back_lock must be held and if not called for a task that is
* still pending or from the xmit thread, then xmit thread must
* be suspended.
*/
@@ -642,7 +639,10 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
scsi_in(sc)->resid = scsi_in(sc)->length;
}
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&conn->session->back_lock);
iscsi_complete_task(task, state);
+ spin_unlock_bh(&conn->session->back_lock);
}
static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -780,7 +780,10 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
return task;
free_task:
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&session->back_lock);
__iscsi_put_task(task);
+ spin_unlock_bh(&session->back_lock);
return NULL;
}
@@ -791,10 +794,10 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
struct iscsi_session *session = conn->session;
int err = 0;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
err = -EPERM;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return err;
}
EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
@@ -823,6 +826,33 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
sc->result = (DID_OK << 16) | rhdr->cmd_status;
+ if (task->protected) {
+ sector_t sector;
+ u8 ascq;
+
+ /**
+ * Transports that didn't implement check_protection
+ * callback but still published T10-PI support to scsi-mid
+ * deserve this BUG_ON.
+ **/
+ BUG_ON(!session->tt->check_protection);
+
+ ascq = session->tt->check_protection(task, &sector);
+ if (ascq) {
+ sc->result = DRIVER_SENSE << 24 |
+ SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(1, sc->sense_buffer,
+ ILLEGAL_REQUEST, 0x10, ascq);
+ sc->sense_buffer[7] = 0xc; /* Additional sense length */
+ sc->sense_buffer[8] = 0; /* Information desc type */
+ sc->sense_buffer[9] = 0xa; /* Additional desc length */
+ sc->sense_buffer[10] = 0x80; /* Validity bit */
+
+ put_unaligned_be64(sector, &sc->sense_buffer[12]);
+ goto out;
+ }
+ }
+
if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
sc->result = DID_ERROR << 16;
goto out;
@@ -1013,13 +1043,13 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
iscsi_conn_printk(KERN_ERR, conn,
"pdu (op 0x%x itt 0x%x) rejected "
"due to DataDigest error.\n",
- rejected_pdu.itt, opcode);
+ opcode, rejected_pdu.itt);
break;
case ISCSI_REASON_IMM_CMD_REJECT:
iscsi_conn_printk(KERN_ERR, conn,
"pdu (op 0x%x itt 0x%x) rejected. Too many "
"immediate commands.\n",
- rejected_pdu.itt, opcode);
+ opcode, rejected_pdu.itt);
/*
* We only send one TMF at a time so if the target could not
* handle it, then it should get fixed (RFC mandates that
@@ -1031,14 +1061,19 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (opcode != ISCSI_OP_NOOP_OUT)
return 0;
- if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG))
+ if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
/*
* nop-out in response to target's nop-out rejected.
* Just resend.
*/
+ /* In RX path we are under back lock */
+ spin_unlock(&conn->session->back_lock);
+ spin_lock(&conn->session->frwd_lock);
iscsi_send_nopout(conn,
(struct iscsi_nopin*)&rejected_pdu);
- else {
+ spin_unlock(&conn->session->frwd_lock);
+ spin_lock(&conn->session->back_lock);
+ } else {
struct iscsi_task *task;
/*
* Our nop as ping got dropped. We know the target
@@ -1059,8 +1094,8 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
default:
iscsi_conn_printk(KERN_ERR, conn,
"pdu (op 0x%x itt 0x%x) rejected. Reason "
- "code 0x%x\n", rejected_pdu.itt,
- rejected_pdu.opcode, reject->reason);
+ "code 0x%x\n", rejected_pdu.opcode,
+ rejected_pdu.itt, reject->reason);
break;
}
return rc;
@@ -1074,7 +1109,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
* This should be used for mgmt tasks like login and nops, or if
* the LDD's itt space does not include the session age.
*
- * The session lock must be held.
+ * The session back_lock must be held.
*/
struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
{
@@ -1103,7 +1138,7 @@ EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
* @datalen: len of data buffer
*
* Completes pdu processing by freeing any resources allocated at
- * queuecommand or send generic. session lock must be held and verify
+ * queuecommand or send generic. session back_lock must be held and verify
* itt must have been called.
*/
int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -1140,7 +1175,12 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
break;
+ /* In RX path we are under back lock */
+ spin_unlock(&session->back_lock);
+ spin_lock(&session->frwd_lock);
iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
+ spin_unlock(&session->frwd_lock);
+ spin_lock(&session->back_lock);
break;
case ISCSI_OP_REJECT:
rc = iscsi_handle_reject(conn, hdr, data, datalen);
@@ -1247,9 +1287,9 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
{
int rc;
- spin_lock(&conn->session->lock);
+ spin_lock(&conn->session->back_lock);
rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
- spin_unlock(&conn->session->lock);
+ spin_unlock(&conn->session->back_lock);
return rc;
}
EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
@@ -1293,7 +1333,7 @@ EXPORT_SYMBOL_GPL(iscsi_verify_itt);
*
* This should be used for cmd tasks.
*
- * The session lock must be held.
+ * The session back_lock must be held.
*/
struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
{
@@ -1323,15 +1363,15 @@ void iscsi_session_failure(struct iscsi_session *session,
struct iscsi_conn *conn;
struct device *dev;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
conn = session->leadconn;
if (session->state == ISCSI_STATE_TERMINATE || !conn) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return;
}
dev = get_device(&conn->cls_conn->dev);
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
if (!dev)
return;
/*
@@ -1351,15 +1391,15 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
{
struct iscsi_session *session = conn->session;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (session->state == ISCSI_STATE_FAILED) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return;
}
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
@@ -1393,15 +1433,18 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
return -ENODATA;
__iscsi_get_task(task);
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
- spin_lock_bh(&conn->session->lock);
+ spin_lock_bh(&conn->session->frwd_lock);
if (!rc) {
/* done with this task */
task->last_xfer = jiffies;
conn->task = NULL;
}
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&conn->session->back_lock);
__iscsi_put_task(task);
+ spin_unlock_bh(&conn->session->back_lock);
return rc;
}
@@ -1410,7 +1453,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
* @task: task to requeue
*
* LLDs that need to run a task from the session workqueue should call
- * this. The session lock must be held. This should only be called
+ * this. The session frwd_lock must be held. This should only be called
* by software drivers.
*/
void iscsi_requeue_task(struct iscsi_task *task)
@@ -1441,10 +1484,10 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
struct iscsi_task *task;
int rc = 0;
- spin_lock_bh(&conn->session->lock);
+ spin_lock_bh(&conn->session->frwd_lock);
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
}
@@ -1465,7 +1508,10 @@ check_mgmt:
struct iscsi_task, running);
list_del_init(&conn->task->running);
if (iscsi_prep_mgmt_task(conn, conn->task)) {
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&conn->session->back_lock);
__iscsi_put_task(conn->task);
+ spin_unlock_bh(&conn->session->back_lock);
conn->task = NULL;
continue;
}
@@ -1527,11 +1573,11 @@ check_mgmt:
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
done:
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
return rc;
}
@@ -1567,6 +1613,7 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
task->have_checked_conn = false;
task->last_timeout = jiffies;
task->last_xfer = jiffies;
+ task->protected = false;
INIT_LIST_HEAD(&task->running);
return task;
}
@@ -1600,7 +1647,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
reason = iscsi_session_chkready(cls_session);
if (reason) {
@@ -1686,13 +1733,13 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
}
session->queued_cmdsn++;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return 0;
prepd_reject:
iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
reject:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
sc->cmnd[0], reason);
return SCSI_MLQUEUE_TARGET_BUSY;
@@ -1700,7 +1747,7 @@ reject:
prepd_fault:
iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
fault:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
sc->cmnd[0], reason);
if (!scsi_bidi_cmnd(sc))
@@ -1748,14 +1795,14 @@ static void iscsi_tmf_timedout(unsigned long data)
struct iscsi_conn *conn = (struct iscsi_conn *)data;
struct iscsi_session *session = conn->session;
- spin_lock(&session->lock);
+ spin_lock(&session->frwd_lock);
if (conn->tmf_state == TMF_QUEUED) {
conn->tmf_state = TMF_TIMEDOUT;
ISCSI_DBG_EH(session, "tmf timedout\n");
/* unblock eh_abort() */
wake_up(&conn->ehwait);
}
- spin_unlock(&session->lock);
+ spin_unlock(&session->frwd_lock);
}
static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
@@ -1768,10 +1815,10 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
NULL, 0);
if (!task) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
return -EPERM;
}
conn->tmfcmd_pdus_cnt++;
@@ -1781,7 +1828,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
add_timer(&conn->tmf_timer);
ISCSI_DBG_EH(session, "tmf set timeout\n");
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
/*
@@ -1800,7 +1847,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
del_timer_sync(&conn->tmf_timer);
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
/* if the session drops it will clean up the task */
if (age != session->age ||
session->state != ISCSI_STATE_LOGGED_IN)
@@ -1837,7 +1884,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
* iscsi_suspend_queue - suspend iscsi_queuecommand
* @conn: iscsi conn to stop queueing IO on
*
- * This grabs the session lock to make sure no one is in
+ * This grabs the session frwd_lock to make sure no one is in
* xmit_task/queuecommand, and then sets suspend to prevent
* new commands from being queued. This only needs to be called
* by offload drivers that need to sync a path like ep disconnect
@@ -1846,9 +1893,9 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
*/
void iscsi_suspend_queue(struct iscsi_conn *conn)
{
- spin_lock_bh(&conn->session->lock);
+ spin_lock_bh(&conn->session->frwd_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
@@ -1907,7 +1954,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
- spin_lock(&session->lock);
+ spin_lock(&session->frwd_lock);
task = (struct iscsi_task *)sc->SCp.ptr;
if (!task) {
/*
@@ -2021,7 +2068,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
done:
if (task)
task->last_timeout = jiffies;
- spin_unlock(&session->lock);
+ spin_unlock(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
"timer reset" : "nh");
return rc;
@@ -2033,7 +2080,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
struct iscsi_session *session = conn->session;
unsigned long recv_timeout, next_timeout = 0, last_recv;
- spin_lock(&session->lock);
+ spin_lock(&session->frwd_lock);
if (session->state != ISCSI_STATE_LOGGED_IN)
goto done;
@@ -2050,7 +2097,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
"last ping %lu, now %lu\n",
conn->ping_timeout, conn->recv_timeout,
last_recv, conn->last_ping, jiffies);
- spin_unlock(&session->lock);
+ spin_unlock(&session->frwd_lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return;
}
@@ -2066,7 +2113,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);
mod_timer(&conn->transport_timer, next_timeout);
done:
- spin_unlock(&session->lock);
+ spin_unlock(&session->frwd_lock);
}
static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
@@ -2096,7 +2143,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
/*
* if session was ISCSI_STATE_IN_RECOVERY then we may not have
* got the command.
@@ -2104,7 +2151,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
if (!sc->SCp.ptr) {
ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
"it completed.\n");
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
}
@@ -2115,7 +2162,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
*/
if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
sc->SCp.phase != session->age) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
ISCSI_DBG_EH(session, "failing abort due to dropped "
"session.\n");
@@ -2156,7 +2203,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
switch (conn->tmf_state) {
case TMF_SUCCESS:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
/*
* stop tx side incase the target had sent a abort rsp but
* the initiator was still writing out data.
@@ -2167,15 +2214,15 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
* good and have never sent us a successful tmf response
* then sent more data for the cmd.
*/
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
fail_scsi_task(task, DID_ABORT);
conn->tmf_state = TMF_INITIAL;
memset(hdr, 0, sizeof(*hdr));
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
goto success_unlocked;
case TMF_TIMEDOUT:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto failed_unlocked;
case TMF_NOT_FOUND:
@@ -2194,7 +2241,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
}
success:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
success_unlocked:
ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
sc, task->itt);
@@ -2202,7 +2249,7 @@ success_unlocked:
return SUCCESS;
failed:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
failed_unlocked:
ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
task ? task->itt : 0);
@@ -2235,7 +2282,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
/*
* Just check if we are not logged in. We cannot check for
* the phase because the reset could come from a ioctl.
@@ -2262,7 +2309,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
case TMF_SUCCESS:
break;
case TMF_TIMEDOUT:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto done;
default:
@@ -2271,21 +2318,21 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
}
rc = SUCCESS;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_suspend_tx(conn);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
memset(hdr, 0, sizeof(*hdr));
fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
conn->tmf_state = TMF_INITIAL;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
goto done;
unlock:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
done:
ISCSI_DBG_EH(session, "dev reset result = %s\n",
rc == SUCCESS ? "SUCCESS" : "FAILED");
@@ -2298,13 +2345,13 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
session->state = ISCSI_STATE_RECOVERY_FAILED;
if (session->leadconn)
wake_up(&session->leadconn->ehwait);
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
@@ -2326,19 +2373,19 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
conn = session->leadconn;
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (session->state == ISCSI_STATE_TERMINATE) {
failed:
ISCSI_DBG_EH(session,
"failing session reset: Could not log back into "
"%s, %s [age %d]\n", session->targetname,
conn->persistent_address, session->age);
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
return FAILED;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
/*
* we drop the lock here but the leadconn cannot be destoyed while
@@ -2355,14 +2402,14 @@ failed:
flush_signals(current);
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (session->state == ISCSI_STATE_LOGGED_IN) {
ISCSI_DBG_EH(session,
"session reset succeeded for %s,%s\n",
session->targetname, conn->persistent_address);
} else
goto failed;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
}
@@ -2398,7 +2445,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
session->targetname);
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
/*
* Just check if we are not logged in. We cannot check for
* the phase because the reset could come from a ioctl.
@@ -2425,7 +2472,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
case TMF_SUCCESS:
break;
case TMF_TIMEDOUT:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto done;
default:
@@ -2434,21 +2481,21 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
}
rc = SUCCESS;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_suspend_tx(conn);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
memset(hdr, 0, sizeof(*hdr));
fail_scsi_tasks(conn, -1, DID_ERROR);
conn->tmf_state = TMF_INITIAL;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
goto done;
unlock:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
done:
ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
rc == SUCCESS ? "SUCCESS" : "FAILED");
@@ -2746,8 +2793,10 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
session->max_r2t = 1;
session->tt = iscsit;
session->dd_data = cls_session->dd_data + sizeof(*session);
+
mutex_init(&session->eh_mutex);
- spin_lock_init(&session->lock);
+ spin_lock_init(&session->frwd_lock);
+ spin_lock_init(&session->back_lock);
/* initialize SCSI PDU commands pool */
if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
@@ -2861,14 +2910,14 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_task used for the login/text sequences */
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (!kfifo_out(&session->cmdpool.queue,
(void*)&conn->login_task,
sizeof(void*))) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
goto login_task_alloc_fail;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
data = (char *) __get_free_pages(GFP_KERNEL,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
@@ -2905,7 +2954,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
del_timer_sync(&conn->transport_timer);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
if (session->leadconn == conn) {
/*
@@ -2914,7 +2963,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
session->state = ISCSI_STATE_TERMINATE;
wake_up(&conn->ehwait);
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
/*
* Block until all in-progress commands for this connection
@@ -2941,16 +2990,19 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
/* flush queued up work because we free the connection below */
iscsi_suspend_tx(conn);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
kfree(conn->persistent_address);
kfree(conn->local_ipaddr);
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&session->back_lock);
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
+ spin_unlock_bh(&session->back_lock);
if (session->leadconn == conn)
session->leadconn = NULL;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_destroy_conn(cls_conn);
}
@@ -2987,7 +3039,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
conn->ping_timeout = 5;
}
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
conn->c_stage = ISCSI_CONN_STARTED;
session->state = ISCSI_STATE_LOGGED_IN;
session->queued_cmdsn = session->cmdsn;
@@ -3016,7 +3068,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
default:
break;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_unblock_session(session->cls_session);
wake_up(&conn->ehwait);
@@ -3055,9 +3107,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
int old_stop_stage;
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (conn->stop_stage == STOP_CONN_TERM) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
return;
}
@@ -3074,14 +3126,14 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
old_stop_stage = conn->stop_stage;
conn->stop_stage = flag;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
del_timer_sync(&conn->transport_timer);
iscsi_suspend_tx(conn);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
conn->c_stage = ISCSI_CONN_STOPPED;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
/*
* for connection level recovery we should not calculate
@@ -3102,11 +3154,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
/*
* flush queues.
*/
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
fail_mgmt_tasks(session, conn);
memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
}
@@ -3133,10 +3185,10 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn = cls_conn->dd_data;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (is_leading)
session->leadconn = conn;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
/*
* Unblock xmitworker(), Login Phase will pass through.
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 1d58d5336018..60cb6dc3c6f0 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -446,7 +446,7 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
* iscsi_tcp_cleanup_task - free tcp_task resources
* @task: iscsi task
*
- * must be called with session lock
+ * must be called with session back_lock
*/
void iscsi_tcp_cleanup_task(struct iscsi_task *task)
{
@@ -457,6 +457,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
if (!task->sc)
return;
+ spin_lock_bh(&tcp_task->queue2pool);
/* flush task's r2t queues */
while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
@@ -470,6 +471,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
sizeof(void*));
tcp_task->r2t = NULL;
}
+ spin_unlock_bh(&tcp_task->queue2pool);
}
EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
@@ -529,6 +531,8 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
struct iscsi_r2t_info *r2t;
int r2tsn = be32_to_cpu(rhdr->r2tsn);
+ u32 data_length;
+ u32 data_offset;
int rc;
if (tcp_conn->in.datalen) {
@@ -554,40 +558,41 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
return 0;
}
- rc = kfifo_out(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
- if (!rc) {
- iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
- "Target has sent more R2Ts than it "
- "negotiated for or driver has leaked.\n");
- return ISCSI_ERR_PROTO;
- }
-
- r2t->exp_statsn = rhdr->statsn;
- r2t->data_length = be32_to_cpu(rhdr->data_length);
- if (r2t->data_length == 0) {
+ data_length = be32_to_cpu(rhdr->data_length);
+ if (data_length == 0) {
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with zero data len\n");
- kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
- sizeof(void*));
return ISCSI_ERR_DATALEN;
}
- if (r2t->data_length > session->max_burst)
+ if (data_length > session->max_burst)
ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max "
"burst %u. Attempting to execute request.\n",
- r2t->data_length, session->max_burst);
+ data_length, session->max_burst);
- r2t->data_offset = be32_to_cpu(rhdr->data_offset);
- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
+ data_offset = be32_to_cpu(rhdr->data_offset);
+ if (data_offset + data_length > scsi_out(task->sc)->length) {
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with data len %u at offset %u "
- "and total length %d\n", r2t->data_length,
- r2t->data_offset, scsi_out(task->sc)->length);
- kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
- sizeof(void*));
+ "and total length %d\n", data_length,
+ data_offset, scsi_out(task->sc)->length);
return ISCSI_ERR_DATALEN;
}
+ spin_lock(&tcp_task->pool2queue);
+ rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *));
+ if (!rc) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
+ "Target has sent more R2Ts than it "
+ "negotiated for or driver has leaked.\n");
+ spin_unlock(&tcp_task->pool2queue);
+ return ISCSI_ERR_PROTO;
+ }
+
+ r2t->exp_statsn = rhdr->statsn;
+ r2t->data_length = data_length;
+ r2t->data_offset = data_offset;
+
r2t->ttt = rhdr->ttt; /* no flip */
r2t->datasn = 0;
r2t->sent = 0;
@@ -595,6 +600,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
tcp_task->exp_datasn = r2tsn + 1;
kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
conn->r2t_pdus_cnt++;
+ spin_unlock(&tcp_task->pool2queue);
iscsi_requeue_task(task);
return 0;
@@ -667,14 +673,14 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
switch(opcode) {
case ISCSI_OP_SCSI_DATA_IN:
- spin_lock(&conn->session->lock);
+ spin_lock(&conn->session->back_lock);
task = iscsi_itt_to_ctask(conn, hdr->itt);
if (!task)
rc = ISCSI_ERR_BAD_ITT;
else
rc = iscsi_tcp_data_in(conn, task);
if (rc) {
- spin_unlock(&conn->session->lock);
+ spin_unlock(&conn->session->back_lock);
break;
}
@@ -707,11 +713,11 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
tcp_conn->in.datalen,
iscsi_tcp_process_data_in,
rx_hash);
- spin_unlock(&conn->session->lock);
+ spin_unlock(&conn->session->back_lock);
return rc;
}
rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
- spin_unlock(&conn->session->lock);
+ spin_unlock(&conn->session->back_lock);
break;
case ISCSI_OP_SCSI_CMD_RSP:
if (tcp_conn->in.datalen) {
@@ -721,18 +727,20 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
break;
case ISCSI_OP_R2T:
- spin_lock(&conn->session->lock);
+ spin_lock(&conn->session->back_lock);
task = iscsi_itt_to_ctask(conn, hdr->itt);
+ spin_unlock(&conn->session->back_lock);
if (!task)
rc = ISCSI_ERR_BAD_ITT;
else if (ahslen)
rc = ISCSI_ERR_AHSLEN;
else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
task->last_xfer = jiffies;
+ spin_lock(&conn->session->frwd_lock);
rc = iscsi_tcp_r2t_rsp(conn, task);
+ spin_unlock(&conn->session->frwd_lock);
} else
rc = ISCSI_ERR_PROTO;
- spin_unlock(&conn->session->lock);
break;
case ISCSI_OP_LOGIN_RSP:
case ISCSI_OP_TEXT_RSP:
@@ -980,14 +988,13 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_task_init);
static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
{
- struct iscsi_session *session = task->conn->session;
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_r2t_info *r2t = NULL;
if (iscsi_task_has_unsol_data(task))
r2t = &task->unsol_r2t;
else {
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&tcp_task->queue2pool);
if (tcp_task->r2t) {
r2t = tcp_task->r2t;
/* Continue with this R2T? */
@@ -1009,7 +1016,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
else
r2t = tcp_task->r2t;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&tcp_task->queue2pool);
}
return r2t;
@@ -1139,6 +1146,8 @@ int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
iscsi_pool_free(&tcp_task->r2tpool);
goto r2t_alloc_fail;
}
+ spin_lock_init(&tcp_task->pool2queue);
+ spin_lock_init(&tcp_task->queue2pool);
}
return 0;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index d2895836f9fa..766098af4eb7 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -700,46 +700,26 @@ void sas_probe_sata(struct asd_sas_port *port)
}
-static bool sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
+static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
{
struct domain_device *dev, *n;
- bool retry = false;
list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
- int rc;
-
if (!dev_is_sata(dev))
continue;
sas_ata_wait_eh(dev);
- rc = dev->sata_dev.pm_result;
- if (rc == -EAGAIN)
- retry = true;
- else if (rc) {
- /* since we don't have a
- * ->port_{suspend|resume} routine in our
- * ata_port ops, and no entanglements with
- * acpi, suspend should just be mechanical trip
- * through eh, catch cases where these
- * assumptions are invalidated
- */
- WARN_ONCE(1, "failed %s %s error: %d\n", func,
- dev_name(&dev->rphy->dev), rc);
- }
/* if libata failed to power manage the device, tear it down */
if (ata_dev_disabled(sas_to_ata_dev(dev)))
sas_fail_probe(dev, func, -ENODEV);
}
-
- return retry;
}
void sas_suspend_sata(struct asd_sas_port *port)
{
struct domain_device *dev;
- retry:
mutex_lock(&port->ha->disco_mutex);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
struct sata_device *sata;
@@ -751,20 +731,17 @@ void sas_suspend_sata(struct asd_sas_port *port)
if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)
continue;
- sata->pm_result = -EIO;
- ata_sas_port_async_suspend(sata->ap, &sata->pm_result);
+ ata_sas_port_suspend(sata->ap);
}
mutex_unlock(&port->ha->disco_mutex);
- if (sas_ata_flush_pm_eh(port, __func__))
- goto retry;
+ sas_ata_flush_pm_eh(port, __func__);
}
void sas_resume_sata(struct asd_sas_port *port)
{
struct domain_device *dev;
- retry:
mutex_lock(&port->ha->disco_mutex);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
struct sata_device *sata;
@@ -776,13 +753,11 @@ void sas_resume_sata(struct asd_sas_port *port)
if (sata->ap->pm_mesg.event == PM_EVENT_ON)
continue;
- sata->pm_result = -EIO;
- ata_sas_port_async_resume(sata->ap, &sata->pm_result);
+ ata_sas_port_resume(sata->ap);
}
mutex_unlock(&port->ha->disco_mutex);
- if (sas_ata_flush_pm_eh(port, __func__))
- goto retry;
+ sas_ata_flush_pm_eh(port, __func__);
}
/**
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index da3aee17faa5..25d0f127424d 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -862,7 +862,7 @@ out:
enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
- scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd);
+ scmd_dbg(cmd, "command %p timed out\n", cmd);
return BLK_EH_NOT_HANDLED;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 4e1b75ca7451..94a3cafe7197 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -73,8 +73,6 @@ struct lpfc_sli2_slim;
*/
/* 1 Second */
#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
-/* 5 minutes */
-#define QUEUE_RAMP_UP_INTERVAL (msecs_to_jiffies(1000 * 300))
/* Number of exchanges reserved for discovery to complete */
#define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -722,6 +720,20 @@ struct lpfc_hba {
uint32_t cfg_hba_queue_depth;
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
+ uint32_t cfg_fof;
+ uint32_t cfg_EnableXLane;
+ uint8_t cfg_oas_tgt_wwpn[8];
+ uint8_t cfg_oas_vpt_wwpn[8];
+ uint32_t cfg_oas_lun_state;
+#define OAS_LUN_ENABLE 1
+#define OAS_LUN_DISABLE 0
+ uint32_t cfg_oas_lun_status;
+#define OAS_LUN_STATUS_EXISTS 0x01
+ uint32_t cfg_oas_flags;
+#define OAS_FIND_ANY_VPORT 0x01
+#define OAS_FIND_ANY_TARGET 0x02
+#define OAS_LUN_VALID 0x04
+ uint32_t cfg_XLanePriority;
uint32_t cfg_enable_bg;
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
@@ -730,6 +742,7 @@ struct lpfc_hba {
uint32_t cfg_request_firmware_upgrade;
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
+ uint32_t cfg_rrq_xri_bitmap_sz;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
@@ -835,6 +848,7 @@ struct lpfc_hba {
mempool_t *mbox_mem_pool;
mempool_t *nlp_mem_pool;
mempool_t *rrq_pool;
+ mempool_t *active_rrq_pool;
struct fc_host_statistics link_stats;
enum intr_type_t intr_type;
@@ -869,7 +883,6 @@ struct lpfc_hba {
atomic_t num_cmd_success;
unsigned long last_rsrc_error_time;
unsigned long last_ramp_down_time;
- unsigned long last_ramp_up_time;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *hba_debugfs_root;
atomic_t debugfs_vport_count;
@@ -971,6 +984,9 @@ struct lpfc_hba {
atomic_t sdev_cnt;
uint8_t fips_spec_rev;
uint8_t fips_level;
+ spinlock_t devicelock; /* lock for luns list */
+ mempool_t *device_data_mem_pool;
+ struct list_head luns;
};
static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 00656fc92b93..8d5b6ceec9c9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -529,6 +529,27 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
+ * (OAS) is supported.
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ phba->sli4_hba.pc_sli4_params.oas_supported);
+}
+
+/**
* lpfc_link_state_store - Transition the link_state on an HBA port
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -2041,9 +2062,53 @@ static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
lpfc_sriov_hw_max_virtfn_show, NULL);
static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
+static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
+ NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
+#define WWN_SZ 8
+/**
+ * lpfc_wwn_set - Convert string to the 8 byte WWN value.
+ * @buf: WWN string.
+ * @cnt: Length of string.
+ * @wwn: Array to receive converted wwn value.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain a valid wwn
+ * 0 success
+ **/
+static size_t
+lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
+{
+ unsigned int i, j;
+
+ /* Count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+ if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
+ ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+ return -EINVAL;
+
+ memset(wwn, 0, WWN_SZ);
+
+ /* Validate and store the new name */
+ for (i = 0, j = 0; i < 16; i++) {
+ if ((*buf >= 'a') && (*buf <= 'f'))
+ j = ((j << 4) | ((*buf++ - 'a') + 10));
+ else if ((*buf >= 'A') && (*buf <= 'F'))
+ j = ((j << 4) | ((*buf++ - 'A') + 10));
+ else if ((*buf >= '0') && (*buf <= '9'))
+ j = ((j << 4) | (*buf++ - '0'));
+ else
+ return -EINVAL;
+ if (i % 2) {
+ wwn[i/2] = j & 0xff;
+ j = 0;
+ }
+ }
+ return 0;
+}
/**
* lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
* @dev: class device that is converted into a Scsi_host.
@@ -2132,9 +2197,9 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
- int stat1=0, stat2=0;
- unsigned int i, j, cnt=count;
- u8 wwpn[8];
+ int stat1 = 0, stat2 = 0;
+ unsigned int cnt = count;
+ u8 wwpn[WWN_SZ];
int rc;
if (!phba->cfg_enable_hba_reset)
@@ -2149,29 +2214,19 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
if (buf[cnt-1] == '\n')
cnt--;
- if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
- ((cnt == 17) && (*buf++ != 'x')) ||
- ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+ if (!phba->soft_wwn_enable)
return -EINVAL;
+ /* lock setting wwpn, wwnn down */
phba->soft_wwn_enable = 0;
- memset(wwpn, 0, sizeof(wwpn));
-
- /* Validate and store the new name */
- for (i=0, j=0; i < 16; i++) {
- int value;
-
- value = hex_to_bin(*buf++);
- if (value >= 0)
- j = (j << 4) | value;
- else
- return -EINVAL;
- if (i % 2) {
- wwpn[i/2] = j & 0xff;
- j = 0;
- }
+ rc = lpfc_wwn_set(buf, cnt, wwpn);
+ if (!rc) {
+ /* not able to set wwpn, unlock it */
+ phba->soft_wwn_enable = 1;
+ return rc;
}
+
phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
fc_host_port_name(shost) = phba->cfg_soft_wwpn;
if (phba->cfg_soft_wwnn)
@@ -2198,7 +2253,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
"reinit adapter - %d\n", stat2);
return (stat1 || stat2) ? -EIO : count;
}
-static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,
lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
/**
@@ -2235,39 +2290,25 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- unsigned int i, j, cnt=count;
- u8 wwnn[8];
+ unsigned int cnt = count;
+ u8 wwnn[WWN_SZ];
+ int rc;
/* count may include a LF at end of string */
if (buf[cnt-1] == '\n')
cnt--;
- if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
- ((cnt == 17) && (*buf++ != 'x')) ||
- ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+ if (!phba->soft_wwn_enable)
return -EINVAL;
- /*
- * Allow wwnn to be set many times, as long as the enable is set.
- * However, once the wwpn is set, everything locks.
- */
-
- memset(wwnn, 0, sizeof(wwnn));
-
- /* Validate and store the new name */
- for (i=0, j=0; i < 16; i++) {
- int value;
-
- value = hex_to_bin(*buf++);
- if (value >= 0)
- j = (j << 4) | value;
- else
- return -EINVAL;
- if (i % 2) {
- wwnn[i/2] = j & 0xff;
- j = 0;
- }
+ rc = lpfc_wwn_set(buf, cnt, wwnn);
+ if (!rc) {
+ /* Allow wwnn to be set many times, as long as the enable
+ * is set. However, once the wwpn is set, everything locks.
+ */
+ return rc;
}
+
phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
dev_printk(KERN_NOTICE, &phba->pcidev->dev,
@@ -2276,9 +2317,438 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,
lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
+/**
+ * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
+ * Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count
+ **/
+static ssize_t
+lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ wwn_to_u64(phba->cfg_oas_tgt_wwpn));
+}
+
+/**
+ * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
+ * Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ unsigned int cnt = count;
+ uint8_t wwpn[WWN_SZ];
+ int rc;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ rc = lpfc_wwn_set(buf, cnt, wwpn);
+ if (rc)
+ return rc;
+
+ memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ if (wwn_to_u64(wwpn) == 0)
+ phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
+ else
+ phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
+ phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+ phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
+ lpfc_oas_tgt_show, lpfc_oas_tgt_store);
+
+/**
+ * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ wwn_to_u64(phba->cfg_oas_vpt_wwpn));
+}
+
+/**
+ * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ unsigned int cnt = count;
+ uint8_t wwpn[WWN_SZ];
+ int rc;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ rc = lpfc_wwn_set(buf, cnt, wwpn);
+ if (rc)
+ return rc;
+
+ memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ if (wwn_to_u64(wwpn) == 0)
+ phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
+ else
+ phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
+ phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+ phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
+ lpfc_oas_vpt_show, lpfc_oas_vpt_store);
+
+/**
+ * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
+ * of whether luns will be enabled or disabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
+}
+
+/**
+ * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
+ * of whether luns will be enabled or disabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ int val = 0;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ phba->cfg_oas_lun_state = val;
+
+ return strlen(buf);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
+ lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
+
+/**
+ * lpfc_oas_lun_status_show - Return the status of the Optimized Access
+ * Storage (OAS) lun returned by the
+ * lpfc_oas_lun_show function.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
+ return -EFAULT;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
+ lpfc_oas_lun_status_show, NULL);
+
+
+/**
+ * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
+ * (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @ndlp: pointer to fcp target node.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the lun.
+ *
+ * Returns:
+ * SUCCESS : 0
+ * -EPERM OAS is not enabled or not supported by this port.
+ *
+ */
+static size_t
+lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+ uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state)
+{
+
+ int rc = 0;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ if (oas_state) {
+ if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+ (struct lpfc_name *)tgt_wwpn, lun))
+ rc = -ENOMEM;
+ } else {
+ lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+ (struct lpfc_name *)tgt_wwpn, lun);
+ }
+ return rc;
+
+}
+
+/**
+ * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
+ * Access Storage (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: wwpn of the vport associated with the returned lun
+ * @tgt_wwpn: wwpn of the target associated with the returned lun
+ * @lun_status: status of the lun returned lun
+ *
+ * Returns the first or next lun enabled for OAS operations for the vport/target
+ * specified. If a lun is found, its vport wwpn, target wwpn and status is
+ * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
+ *
+ * Return:
+ * lun that is OAS enabled for the vport/target
+ * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
+ */
+static uint64_t
+lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+ uint8_t tgt_wwpn[], uint32_t *lun_status)
+{
+ uint64_t found_lun;
+
+ if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
+ return NOT_OAS_ENABLED_LUN;
+ if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
+ phba->sli4_hba.oas_next_vpt_wwpn,
+ (struct lpfc_name *)
+ phba->sli4_hba.oas_next_tgt_wwpn,
+ &phba->sli4_hba.oas_next_lun,
+ (struct lpfc_name *)vpt_wwpn,
+ (struct lpfc_name *)tgt_wwpn,
+ &found_lun, lun_status))
+ return found_lun;
+ else
+ return NOT_OAS_ENABLED_LUN;
+}
+
+/**
+ * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: vport wwpn by reference.
+ * @tgt_wwpn: target wwpn by reference.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the oas_lun.
+ *
+ * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
+ * a lun for OAS operations.
+ *
+ * Return:
+ * SUCCESS: 0
+ * -ENOMEM: failed to enable an lun for OAS operations
+ * -EPERM: OAS is not enabled
+ */
+static ssize_t
+lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+ uint8_t tgt_wwpn[], uint64_t lun,
+ uint32_t oas_state)
+{
+
+ int rc;
+
+ rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
+ oas_state);
+ return rc;
+}
+
+/**
+ * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This routine returns a lun enabled for OAS each time the function
+ * is called.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ **/
+static ssize_t
+lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ uint64_t oas_lun;
+ int len = 0;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+ if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
+ return -EFAULT;
+
+ if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+ if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
+ return -EFAULT;
+
+ oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
+ phba->cfg_oas_tgt_wwpn,
+ &phba->cfg_oas_lun_status);
+ if (oas_lun != NOT_OAS_ENABLED_LUN)
+ phba->cfg_oas_flags |= OAS_LUN_VALID;
+
+ len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
+
+ return len;
+}
+
+/**
+ * lpfc_oas_lun_store - Sets the OAS state for lun
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This function sets the OAS state for lun. Before this function is called,
+ * the vport wwpn, target wwpn, and oas state need to be set.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ uint64_t scsi_lun;
+ ssize_t rc;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+ return -EFAULT;
+
+ if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+ return -EFAULT;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
+ return -EINVAL;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3372 Try to set vport 0x%llx target 0x%llx lun:%lld "
+ "with oas set to %d\n",
+ wwn_to_u64(phba->cfg_oas_vpt_wwpn),
+ wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
+ phba->cfg_oas_lun_state);
+
+ rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
+ phba->cfg_oas_tgt_wwpn, scsi_lun,
+ phba->cfg_oas_lun_state);
+
+ if (rc)
+ return rc;
+
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
+ lpfc_oas_lun_show, lpfc_oas_lun_store);
static int lpfc_poll = 0;
module_param(lpfc_poll, int, S_IRUGO);
@@ -3818,7 +4288,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_vector_map_info *cpup;
- int idx, len = 0;
+ int len = 0;
if ((phba->sli_rev != LPFC_SLI_REV4) ||
(phba->intr_type != MSIX))
@@ -3846,23 +4316,39 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
break;
}
- cpup = phba->sli4_hba.cpu_map;
- for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+ while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
+ cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
+
+ /* margin should fit in this and the truncated message */
if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
len += snprintf(buf + len, PAGE_SIZE-len,
"CPU %02d io_chan %02d "
"physid %d coreid %d\n",
- idx, cpup->channel_id, cpup->phys_id,
+ phba->sli4_hba.curr_disp_cpu,
+ cpup->channel_id, cpup->phys_id,
cpup->core_id);
else
len += snprintf(buf + len, PAGE_SIZE-len,
"CPU %02d io_chan %02d "
"physid %d coreid %d IRQ %d\n",
- idx, cpup->channel_id, cpup->phys_id,
+ phba->sli4_hba.curr_disp_cpu,
+ cpup->channel_id, cpup->phys_id,
cpup->core_id, cpup->irq);
- cpup++;
+ phba->sli4_hba.curr_disp_cpu++;
+
+ /* display max number of CPUs keeping some margin */
+ if (phba->sli4_hba.curr_disp_cpu <
+ phba->sli4_hba.num_present_cpu &&
+ (len >= (PAGE_SIZE - 64))) {
+ len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
+ break;
+ }
}
+
+ if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu)
+ phba->sli4_hba.curr_disp_cpu = 0;
+
return len;
}
@@ -4157,6 +4643,21 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
/*
+# lpfc_EnableXLane: Enable Express Lane Feature
+# 0x0 Express Lane Feature disabled
+# 0x1 Express Lane Feature enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
+
+/*
+# lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature
+# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
+# Value range is [0x0,0x7f]. Default value is 0
+*/
+LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
+
+/*
# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
# 0 = BlockGuard disabled (default)
# 1 = BlockGuard enabled
@@ -4317,6 +4818,13 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_soft_wwn_enable,
&dev_attr_lpfc_enable_hba_reset,
&dev_attr_lpfc_enable_hba_heartbeat,
+ &dev_attr_lpfc_EnableXLane,
+ &dev_attr_lpfc_XLanePriority,
+ &dev_attr_lpfc_xlane_lun,
+ &dev_attr_lpfc_xlane_tgt,
+ &dev_attr_lpfc_xlane_vpt,
+ &dev_attr_lpfc_xlane_lun_state,
+ &dev_attr_lpfc_xlane_lun_status,
&dev_attr_lpfc_sg_seg_cnt,
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
@@ -4335,6 +4843,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_dss,
&dev_attr_lpfc_sriov_hw_max_virtfn,
&dev_attr_protocol,
+ &dev_attr_lpfc_xlane_supported,
NULL,
};
@@ -5296,11 +5805,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
+ lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ phba->cfg_EnableXLane = 0;
+ lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
+ memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
+ memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
+ phba->cfg_oas_lun_state = 0;
+ phba->cfg_oas_lun_status = 0;
+ phba->cfg_oas_flags = 0;
lpfc_enable_bg_init(phba, lpfc_enable_bg);
if (phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_poll = 0;
else
- phba->cfg_poll = lpfc_poll;
+ phba->cfg_poll = lpfc_poll;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 82134d20e2d8..ca2f4ea7cdef 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -4153,6 +4153,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
switch (opcode) {
case FCOE_OPCODE_READ_FCF:
+ case FCOE_OPCODE_GET_DPORT_RESULTS:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2957 Handled SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
@@ -4161,6 +4162,8 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
nemb_mse, dmabuf);
break;
case FCOE_OPCODE_ADD_FCF:
+ case FCOE_OPCODE_SET_DPORT_MODE:
+ case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2958 Handled SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 67f7d0a160d1..a94d4c9dfaa5 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -231,6 +231,8 @@ struct lpfc_sli_config_emb0_subsys {
#define SLI_CONFIG_SUBSYS_FCOE 0x0C
#define FCOE_OPCODE_READ_FCF 0x08
#define FCOE_OPCODE_ADD_FCF 0x09
+#define FCOE_OPCODE_SET_DPORT_MODE 0x27
+#define FCOE_OPCODE_GET_DPORT_RESULTS 0x28
};
struct lpfc_sli_config_emb1_subsys {
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index cda076a84239..adda0bf7a244 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -187,6 +187,11 @@ void lpfc_offline_prep(struct lpfc_hba *, int);
void lpfc_offline(struct lpfc_hba *);
void lpfc_reset_hba(struct lpfc_hba *);
+int lpfc_fof_queue_create(struct lpfc_hba *);
+int lpfc_fof_queue_setup(struct lpfc_hba *);
+int lpfc_fof_queue_destroy(struct lpfc_hba *);
+irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
+
int lpfc_sli_setup(struct lpfc_hba *);
int lpfc_sli_queue_setup(struct lpfc_hba *);
@@ -242,6 +247,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *, int align);
+int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
void lpfc_mem_free(struct lpfc_hba *);
void lpfc_mem_free_all(struct lpfc_hba *);
void lpfc_stop_vport_timers(struct lpfc_vport *);
@@ -399,7 +405,6 @@ void lpfc_fabric_block_timeout(unsigned long);
void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
void lpfc_rampdown_queue_depth(struct lpfc_hba *);
void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
-void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
void lpfc_scsi_dev_block(struct lpfc_hba *);
void
@@ -471,3 +476,20 @@ void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
void lpfc_sli4_offline_eratt(struct lpfc_hba *);
+
+struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
+ struct lpfc_name *,
+ struct lpfc_name *,
+ uint64_t, bool);
+void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
+struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
+ struct list_head *list,
+ struct lpfc_name *,
+ struct lpfc_name *, uint64_t);
+bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t);
+bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t);
+bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t *, uint32_t *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index b800cc952ca6..828c08e9389e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2280,6 +2280,104 @@ proc_cq:
}
}
+ if (phba->cfg_fof) {
+ /* FOF EQ */
+ qp = phba->sli4_hba.fof_eq;
+ if (!qp)
+ goto out;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\nFOF EQ info: "
+ "EQ-STAT[max:x%x noE:x%x "
+ "bs:x%x proc:x%llx]\n",
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "EQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ /* Reset max counter */
+ qp->EQ_max_eqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+
+ if (phba->cfg_EnableXLane) {
+
+ /* OAS CQ */
+ qp = phba->sli4_hba.oas_cq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tOAS CQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocEQID[%02d]: "
+ "CQ STAT[max:x%x relw:x%x "
+ "xabt:x%x wq:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tCQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ /* Reset max counter */
+ qp->CQ_max_cqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+
+ /* OAS WQ */
+ qp = phba->sli4_hba.oas_wq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tOAS WQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocCQID[%02d]: "
+ "WQ-STAT[oflow:x%x posted:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tWQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+ }
+out:
spin_unlock_irq(&phba->hbalock);
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
@@ -3927,6 +4025,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
char name[64];
uint32_t num, i;
+ bool pport_setup = false;
if (!lpfc_debugfs_enable)
return;
@@ -3947,6 +4046,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
/* Setup funcX directory for specific HBA PCI function */
snprintf(name, sizeof(name), "fn%d", phba->brd_no);
if (!phba->hba_debugfs_root) {
+ pport_setup = true;
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
if (!phba->hba_debugfs_root) {
@@ -4239,6 +4339,14 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
/*
+ * The following section is for additional directories/files for the
+ * physical port.
+ */
+
+ if (!pport_setup)
+ goto debug_failed;
+
+ /*
* iDiag debugfs root entry points for SLI4 device only
*/
if (phba->sli_rev < LPFC_SLI_REV4)
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index e409ba5f728c..1a6fe524940d 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -116,7 +116,7 @@ struct lpfc_nodelist {
atomic_t cmd_pending;
uint32_t cmd_qdepth;
unsigned long last_change_time;
- struct lpfc_node_rrqs active_rrqs;
+ unsigned long *active_rrqs_xri_bitmap;
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
};
struct lpfc_node_rrq {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 110445f0c58d..624fe0b3cc0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1516,7 +1516,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
uint32_t rc, keepDID = 0;
int put_node;
int put_rport;
- struct lpfc_node_rrqs rrq;
+ unsigned long *active_rrqs_xri_bitmap = NULL;
/* Fabric nodes can have the same WWPN so we don't bother searching
* by WWPN. Just return the ndlp that was given to us.
@@ -1534,7 +1534,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
return ndlp;
- memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
+ GFP_KERNEL);
+ if (active_rrqs_xri_bitmap)
+ memset(active_rrqs_xri_bitmap, 0,
+ phba->cfg_rrq_xri_bitmap_sz);
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
@@ -1543,41 +1549,58 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
if (!new_ndlp) {
rc = memcmp(&ndlp->nlp_portname, name,
sizeof(struct lpfc_name));
- if (!rc)
+ if (!rc) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return ndlp;
+ }
new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
- if (!new_ndlp)
+ if (!new_ndlp) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return ndlp;
+ }
lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
rc = memcmp(&ndlp->nlp_portname, name,
sizeof(struct lpfc_name));
- if (!rc)
+ if (!rc) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return ndlp;
+ }
new_ndlp = lpfc_enable_node(vport, new_ndlp,
NLP_STE_UNUSED_NODE);
- if (!new_ndlp)
+ if (!new_ndlp) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return ndlp;
+ }
keepDID = new_ndlp->nlp_DID;
- if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(&rrq.xri_bitmap,
- &new_ndlp->active_rrqs.xri_bitmap,
- sizeof(new_ndlp->active_rrqs.xri_bitmap));
+ if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
+ memcpy(active_rrqs_xri_bitmap,
+ new_ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
} else {
keepDID = new_ndlp->nlp_DID;
- if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(&rrq.xri_bitmap,
- &new_ndlp->active_rrqs.xri_bitmap,
- sizeof(new_ndlp->active_rrqs.xri_bitmap));
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(active_rrqs_xri_bitmap,
+ new_ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
}
lpfc_unreg_rpi(vport, new_ndlp);
new_ndlp->nlp_DID = ndlp->nlp_DID;
new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(new_ndlp->active_rrqs.xri_bitmap,
- &ndlp->active_rrqs.xri_bitmap,
- sizeof(ndlp->active_rrqs.xri_bitmap));
+ memcpy(new_ndlp->active_rrqs_xri_bitmap,
+ ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -1619,10 +1642,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did on the nodelist */
ndlp->nlp_DID = keepDID;
- if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(&ndlp->active_rrqs.xri_bitmap,
- &rrq.xri_bitmap,
- sizeof(ndlp->active_rrqs.xri_bitmap));
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(ndlp->active_rrqs_xri_bitmap,
+ active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
lpfc_drop_node(vport, ndlp);
}
else {
@@ -1634,10 +1658,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did */
ndlp->nlp_DID = keepDID;
- if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(&ndlp->active_rrqs.xri_bitmap,
- &rrq.xri_bitmap,
- sizeof(ndlp->active_rrqs.xri_bitmap));
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(ndlp->active_rrqs_xri_bitmap,
+ active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
/* Since we are swapping the ndlp passed in with the new one
* and the did has already been swapped, copy over state.
@@ -1668,6 +1693,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
put_device(&rport->dev);
}
}
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return new_ndlp;
}
@@ -2772,6 +2801,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of node.
*/
+
lpfc_nlp_put(ndlp);
return 0;
}
@@ -6193,11 +6223,11 @@ lpfc_els_timeout(unsigned long ptr)
spin_lock_irqsave(&vport->work_port_lock, iflag);
tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
- if (!tmo_posted)
+ if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
vport->work_port_events |= WORKER_ELS_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
- if (!tmo_posted)
+ if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
lpfc_worker_wake_up(phba);
return;
}
@@ -6223,19 +6253,26 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
uint32_t els_command = 0;
uint32_t timeout;
uint32_t remote_ID = 0xffffffff;
- LIST_HEAD(txcmplq_completions);
LIST_HEAD(abort_list);
timeout = (uint32_t)(phba->fc_ratov << 1);
pring = &phba->sli.ring[LPFC_ELS_RING];
-
+ if ((phba->pport->load_flag & FC_UNLOADING))
+ return;
spin_lock_irq(&phba->hbalock);
- list_splice_init(&pring->txcmplq, &txcmplq_completions);
- spin_unlock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ if ((phba->pport->load_flag & FC_UNLOADING)) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
- list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
cmd = &piocb->iocb;
if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
@@ -6274,11 +6311,12 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
}
list_add_tail(&piocb->dlist, &abort_list);
}
- spin_lock_irq(&phba->hbalock);
- list_splice(&txcmplq_completions, &pring->txcmplq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+ cmd = &piocb->iocb;
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0127 ELS timeout Data: x%x x%x x%x "
"x%x\n", els_command,
@@ -6290,8 +6328,9 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
}
if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
- mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * timeout));
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ mod_timer(&vport->els_tmofunc,
+ jiffies + msecs_to_jiffies(1000 * timeout));
}
/**
@@ -6317,15 +6356,50 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
void
lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
- LIST_HEAD(completions);
+ LIST_HEAD(abort_list);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
lpfc_fabric_abort_vport(vport);
+ /*
+ * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
+ * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
+ * ultimately grabs the ring_lock, the driver must splice the list into
+ * a working list and release the locks before calling the abort.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+ continue;
+
+ if (piocb->vport != vport)
+ continue;
+ list_add_tail(&piocb->dlist, &abort_list);
+ }
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+ /* Abort each iocb on the aborted list and remove the dlist links. */
+ list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&piocb->dlist);
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ spin_unlock_irq(&phba->hbalock);
+ }
+ if (!list_empty(&abort_list))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "3387 abort list for txq not empty\n");
+ INIT_LIST_HEAD(&abort_list);
spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
cmd = &piocb->iocb;
@@ -6343,24 +6417,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (piocb->vport != vport)
continue;
- list_move_tail(&piocb->list, &completions);
- }
-
- list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
- continue;
- }
-
- if (piocb->vport != vport)
- continue;
-
- lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ list_del_init(&piocb->list);
+ list_add_tail(&piocb->list, &abort_list);
}
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
/* Cancell all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ lpfc_sli_cancel_iocbs(phba, &abort_list,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
return;
}
@@ -6385,35 +6451,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
void
lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
{
- LIST_HEAD(completions);
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
- struct lpfc_iocbq *tmp_iocb, *piocb;
- IOCB_t *cmd = NULL;
-
- lpfc_fabric_abort_hba(phba);
- spin_lock_irq(&phba->hbalock);
- list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
- cmd = &piocb->iocb;
- if (piocb->iocb_flag & LPFC_IO_LIBDFC)
- continue;
- /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
- if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
- cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
- cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- cmd->ulpCommand == CMD_ABORT_XRI_CN)
- continue;
- list_move_tail(&piocb->list, &completions);
- }
- list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->iocb_flag & LPFC_IO_LIBDFC)
- continue;
- lpfc_sli_issue_abort_iotag(phba, pring, piocb);
- }
- spin_unlock_irq(&phba->hbalock);
-
- /* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ struct lpfc_vport *vport;
+ list_for_each_entry(vport, &phba->port_list, listentry)
+ lpfc_els_flush_cmd(vport);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 883ea2d9f237..59b51c529ba0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -674,8 +674,6 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_fdmi_timeout_handler(vport);
if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
lpfc_ramp_down_queue_handler(phba);
- if (work_port_events & WORKER_RAMP_UP_QUEUE)
- lpfc_ramp_up_queue_handler(phba);
if (work_port_events & WORKER_DELAYED_DISC_TMO)
lpfc_delayed_disc_timeout_handler(vport);
}
@@ -2545,8 +2543,11 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (!new_fcf_record) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2766 Mailbox command READ_FCF_RECORD "
- "failed to retrieve a FCF record.\n");
- goto error_out;
+ "failed to retrieve a FCF record. "
+ "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
+ phba->fcf.fcf_flag);
+ lpfc_unregister_fcf_rescan(phba);
+ goto out;
}
/* Get the needed parameters from FCF record */
@@ -3973,7 +3974,10 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
vport->fc_map_cnt += count;
break;
case NLP_STE_NPR_NODE:
- vport->fc_npr_cnt += count;
+ if (vport->fc_npr_cnt == 0 && count == -1)
+ vport->fc_npr_cnt = 0;
+ else
+ vport->fc_npr_cnt += count;
break;
}
spin_unlock_irq(shost->host_lock);
@@ -4180,6 +4184,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_hba *phba = vport->phba;
uint32_t did;
unsigned long flags;
+ unsigned long *active_rrqs_xri_bitmap = NULL;
if (!ndlp)
return NULL;
@@ -4208,12 +4213,17 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Keep the original DID */
did = ndlp->nlp_DID;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
/* re-initialize ndlp except of ndlp linked list pointer */
memset((((char *)ndlp) + sizeof (struct list_head)), 0,
sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
lpfc_initialize_node(vport, ndlp, did);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
+
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
if (vport->phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
@@ -4799,9 +4809,10 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
((uint32_t) ndlp->nlp_rpi & 0xff));
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0929 FIND node DID "
- "Data: x%p x%x x%x x%x\n",
+ "Data: x%p x%x x%x x%x %p\n",
ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag, data1);
+ ndlp->nlp_flag, data1,
+ ndlp->active_rrqs_xri_bitmap);
return ndlp;
}
}
@@ -5618,8 +5629,13 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ ndlp->active_rrqs_xri_bitmap =
+ mempool_alloc(vport->phba->active_rrq_pool,
+ GFP_KERNEL);
+ }
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
@@ -5664,6 +5680,9 @@ lpfc_nlp_release(struct kref *kref)
/* free ndlp memory for final ndlp release */
if (NLP_CHK_FREE_REQ(ndlp)) {
kfree(ndlp->lat_data);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mempool_free(ndlp->active_rrqs_xri_bitmap,
+ ndlp->phba->active_rrq_pool);
mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
}
}
@@ -6170,10 +6189,6 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
memcpy(&conn_entry->conn_rec, &conn_rec[i],
sizeof(struct lpfc_fcf_conn_rec));
- conn_entry->conn_rec.vlan_tag =
- conn_entry->conn_rec.vlan_tag;
- conn_entry->conn_rec.flags =
- conn_entry->conn_rec.flags;
list_add_tail(&conn_entry->list,
&phba->fcf_conn_rec_list);
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 6f927d30ca69..3d9438ce59ab 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -45,6 +45,7 @@
#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
#define LPFC_FCP_NEXT_RING 3
+#define LPFC_FCP_OAS_RING 3
#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 5464b116d328..fd79f7de7666 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2616,6 +2616,9 @@ struct lpfc_sli4_parameters {
#define cfg_phwq_SHIFT 15
#define cfg_phwq_MASK 0x00000001
#define cfg_phwq_WORD word12
+#define cfg_oas_SHIFT 25
+#define cfg_oas_MASK 0x00000001
+#define cfg_oas_WORD word12
#define cfg_loopbk_scope_SHIFT 28
#define cfg_loopbk_scope_MASK 0x0000000f
#define cfg_loopbk_scope_WORD word12
@@ -3322,6 +3325,9 @@ struct wqe_common {
#define wqe_ebde_cnt_SHIFT 0
#define wqe_ebde_cnt_MASK 0x0000000f
#define wqe_ebde_cnt_WORD word10
+#define wqe_oas_SHIFT 6
+#define wqe_oas_MASK 0x00000001
+#define wqe_oas_WORD word10
#define wqe_lenloc_SHIFT 7
#define wqe_lenloc_MASK 0x00000003
#define wqe_lenloc_WORD word10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 68c94cc85c35..635eeb3d6987 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -80,6 +80,7 @@ static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
static void lpfc_sli4_disable_intr(struct lpfc_hba *);
static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
+static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1005,9 +1006,14 @@ lpfc_rrq_timeout(unsigned long ptr)
phba = (struct lpfc_hba *)ptr;
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
- phba->hba_flag |= HBA_RRQ_ACTIVE;
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ phba->hba_flag |= HBA_RRQ_ACTIVE;
+ else
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
- lpfc_worker_wake_up(phba);
+
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_worker_wake_up(phba);
}
/**
@@ -1468,7 +1474,8 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
* for handling possible port resource change.
**/
static int
-lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
+lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
+ bool en_rn_msg)
{
int rc;
uint32_t intr_mode;
@@ -1480,9 +1487,10 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
rc = lpfc_sli4_pdev_status_reg_wait(phba);
if (!rc) {
/* need reset: attempt for port recovery */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2887 Reset Needed: Attempting Port "
- "Recovery...\n");
+ if (en_rn_msg)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2887 Reset Needed: Attempting Port "
+ "Recovery...\n");
lpfc_offline_prep(phba, mbx_action);
lpfc_offline(phba);
/* release interrupt for possible resource change */
@@ -1522,6 +1530,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
uint32_t reg_err1, reg_err2;
uint32_t uerrlo_reg, uemasklo_reg;
uint32_t pci_rd_rc1, pci_rd_rc2;
+ bool en_rn_msg = true;
int rc;
/* If the pci channel is offline, ignore possible errors, since
@@ -1572,10 +1581,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
break;
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
- reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
+ reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3143 Port Down: Firmware Restarted\n");
- else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ "3143 Port Down: Firmware Update "
+ "Detected\n");
+ en_rn_msg = false;
+ } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3144 Port Down: Debug Dump\n");
@@ -1585,7 +1596,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
"3145 Port Down: Provisioning\n");
/* Check port status register for function reset */
- rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT);
+ rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
+ en_rn_msg);
if (rc == 0) {
/* don't report event on forced debug dump */
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
@@ -4856,6 +4868,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
struct lpfc_mqe *mqe;
int longs;
+ int fof_vectors = 0;
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
@@ -5061,6 +5074,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = lpfc_sli4_read_config(phba);
if (unlikely(rc))
goto out_free_bsmbx;
+ rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
+ if (unlikely(rc))
+ goto out_free_bsmbx;
/* IF Type 0 ports get initialized now. */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
@@ -5118,6 +5134,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
}
mempool_free(mboxq, phba->mbox_mem_pool);
+
+ /* Verify OAS is supported */
+ lpfc_sli4_oas_verify(phba);
+ if (phba->cfg_fof)
+ fof_vectors = 1;
+
/* Verify all the SLI4 queues */
rc = lpfc_sli4_queue_verify(phba);
if (rc)
@@ -5159,7 +5181,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->sli4_hba.fcp_eq_hdl =
kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
- phba->cfg_fcp_io_channel), GFP_KERNEL);
+ (fof_vectors + phba->cfg_fcp_io_channel)),
+ GFP_KERNEL);
if (!phba->sli4_hba.fcp_eq_hdl) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2572 Failed allocate memory for "
@@ -5169,7 +5192,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
- phba->cfg_fcp_io_channel), GFP_KERNEL);
+ (fof_vectors +
+ phba->cfg_fcp_io_channel)), GFP_KERNEL);
if (!phba->sli4_hba.msix_entries) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2573 Failed allocate memory for msi-x "
@@ -5267,6 +5291,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
kfree(phba->sli4_hba.cpu_map);
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.num_online_cpu = 0;
+ phba->sli4_hba.curr_disp_cpu = 0;
/* Free memory allocated for msi-x interrupt vector entries */
kfree(phba->sli4_hba.msix_entries);
@@ -5390,6 +5415,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Initialize FCF connection rec list */
INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
+ /* Initialize OAS configuration list */
+ spin_lock_init(&phba->devicelock);
+ INIT_LIST_HEAD(&phba->luns);
+
return 0;
}
@@ -6816,6 +6845,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
int cfg_fcp_io_channel;
uint32_t cpu;
uint32_t i = 0;
+ int fof_vectors = phba->cfg_fof ? 1 : 0;
/*
* Sanity check for configured queue parameters against the run-time
@@ -6832,6 +6862,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
}
phba->sli4_hba.num_online_cpu = i;
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
+ phba->sli4_hba.curr_disp_cpu = 0;
if (i < cfg_fcp_io_channel) {
lpfc_printf_log(phba,
@@ -6842,7 +6873,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
cfg_fcp_io_channel = i;
}
- if (cfg_fcp_io_channel >
+ if (cfg_fcp_io_channel + fof_vectors >
phba->sli4_hba.max_cfg_param.max_eq) {
if (phba->sli4_hba.max_cfg_param.max_eq <
LPFC_FCP_IO_CHAN_MIN) {
@@ -6859,7 +6890,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
"available EQs: from %d to %d\n",
cfg_fcp_io_channel,
phba->sli4_hba.max_cfg_param.max_eq);
- cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
+ cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
+ fof_vectors;
}
/* The actual number of FCP event queues adopted */
@@ -7070,6 +7102,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
phba->sli4_hba.dat_rq = qdesc;
+ /* Create the Queues needed for Flash Optimized Fabric operations */
+ if (phba->cfg_fof)
+ lpfc_fof_queue_create(phba);
return 0;
out_error:
@@ -7094,6 +7129,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
{
int idx;
+ if (phba->cfg_fof)
+ lpfc_fof_queue_destroy(phba);
+
if (phba->sli4_hba.hba_eq != NULL) {
/* Release HBA event queue */
for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
@@ -7478,8 +7516,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.hdr_rq->queue_id,
phba->sli4_hba.dat_rq->queue_id,
phba->sli4_hba.els_cq->queue_id);
+
+ if (phba->cfg_fof) {
+ rc = lpfc_fof_queue_setup(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0549 Failed setup of FOF Queues: "
+ "rc = 0x%x\n", rc);
+ goto out_destroy_els_rq;
+ }
+ }
return 0;
+out_destroy_els_rq:
+ lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
out_destroy_els_wq:
lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
out_destroy_mbx_wq:
@@ -7518,6 +7568,9 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
{
int fcp_qidx;
+ /* Unset the queues created for Flash Optimized Fabric operations */
+ if (phba->cfg_fof)
+ lpfc_fof_queue_destroy(phba);
/* Unset mailbox command work queue */
lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
/* Unset ELS work queue */
@@ -8635,6 +8688,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Configure MSI-X capability structure */
vectors = phba->cfg_fcp_io_channel;
+ if (phba->cfg_fof) {
+ phba->sli4_hba.msix_entries[index].entry = index;
+ vectors++;
+ }
enable_msix_vectors:
rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
vectors);
@@ -8664,7 +8721,15 @@ enable_msix_vectors:
phba->sli4_hba.fcp_eq_hdl[index].idx = index;
phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
- rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
+ if (phba->cfg_fof && (index == (vectors - 1)))
+ rc = request_irq(
+ phba->sli4_hba.msix_entries[index].vector,
+ &lpfc_sli4_fof_intr_handler, IRQF_SHARED,
+ (char *)&phba->sli4_hba.handler_name[index],
+ &phba->sli4_hba.fcp_eq_hdl[index]);
+ else
+ rc = request_irq(
+ phba->sli4_hba.msix_entries[index].vector,
&lpfc_sli4_hba_intr_handler, IRQF_SHARED,
(char *)&phba->sli4_hba.handler_name[index],
&phba->sli4_hba.fcp_eq_hdl[index]);
@@ -8676,6 +8741,9 @@ enable_msix_vectors:
}
}
+ if (phba->cfg_fof)
+ vectors--;
+
if (vectors != phba->cfg_fcp_io_channel) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3238 Reducing IO channels to match number of "
@@ -8721,7 +8789,10 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
free_irq(phba->sli4_hba.msix_entries[index].vector,
&phba->sli4_hba.fcp_eq_hdl[index]);
}
-
+ if (phba->cfg_fof) {
+ free_irq(phba->sli4_hba.msix_entries[index].vector,
+ &phba->sli4_hba.fcp_eq_hdl[index]);
+ }
/* Disable MSI-X */
pci_disable_msix(phba->pcidev);
@@ -8771,6 +8842,10 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
}
+ if (phba->cfg_fof) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ }
return 0;
}
@@ -8853,6 +8928,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
fcp_eq_in_use, 1);
}
+ if (phba->cfg_fof) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
+ fcp_eq_in_use, 1);
+ }
}
}
return intr_mode;
@@ -9163,6 +9244,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+ sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
@@ -10796,6 +10878,169 @@ lpfc_io_resume(struct pci_dev *pdev)
return;
}
+/**
+ * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine checks to see if OAS is supported for this adapter. If
+ * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
+ * the enable oas flag is cleared and the pool created for OAS device data
+ * is destroyed.
+ *
+ **/
+void
+lpfc_sli4_oas_verify(struct lpfc_hba *phba)
+{
+
+ if (!phba->cfg_EnableXLane)
+ return;
+
+ if (phba->sli4_hba.pc_sli4_params.oas_supported) {
+ phba->cfg_fof = 1;
+ } else {
+ phba->cfg_EnableXLane = 0;
+ if (phba->device_data_mem_pool)
+ mempool_destroy(phba->device_data_mem_pool);
+ phba->device_data_mem_pool = NULL;
+ }
+
+ return;
+}
+
+/**
+ * lpfc_fof_queue_setup - Set up all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the fof queues for the FC HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ **/
+int
+lpfc_fof_queue_setup(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ int rc;
+
+ rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
+ if (rc)
+ return -ENOMEM;
+
+ if (phba->cfg_EnableXLane) {
+
+ rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
+ phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
+ if (rc)
+ goto out_oas_cq;
+
+ rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
+ phba->sli4_hba.oas_cq, LPFC_FCP);
+ if (rc)
+ goto out_oas_wq;
+
+ phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
+ phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
+ }
+
+ return 0;
+
+out_oas_wq:
+ if (phba->cfg_EnableXLane)
+ lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
+out_oas_cq:
+ lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
+ return rc;
+
+}
+
+/**
+ * lpfc_fof_queue_create - Create all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the fof queues for the FC HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No availble memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_fof_queue_create(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *qdesc;
+
+ /* Create FOF EQ */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ phba->sli4_hba.eq_ecount);
+ if (!qdesc)
+ goto out_error;
+
+ phba->sli4_hba.fof_eq = qdesc;
+
+ if (phba->cfg_EnableXLane) {
+
+ /* Create OAS CQ */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc)
+ goto out_error;
+
+ phba->sli4_hba.oas_cq = qdesc;
+
+ /* Create OAS WQ */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+ if (!qdesc)
+ goto out_error;
+
+ phba->sli4_hba.oas_wq = qdesc;
+
+ }
+ return 0;
+
+out_error:
+ lpfc_fof_queue_destroy(phba);
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_fof_queue_destroy - Destroy all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FC HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - successful
+ **/
+int
+lpfc_fof_queue_destroy(struct lpfc_hba *phba)
+{
+ /* Release FOF Event queue */
+ if (phba->sli4_hba.fof_eq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
+ phba->sli4_hba.fof_eq = NULL;
+ }
+
+ /* Release OAS Completion queue */
+ if (phba->sli4_hba.oas_cq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
+ phba->sli4_hba.oas_cq = NULL;
+ }
+
+ /* Release OAS Work queue */
+ if (phba->sli4_hba.oas_wq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
+ phba->sli4_hba.oas_wq = NULL;
+ }
+ return 0;
+}
+
static struct pci_device_id lpfc_id_table[] = {
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
PCI_ANY_ID, PCI_ANY_ID, },
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 812d0cd7c86d..ed419aad2b1f 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -38,10 +38,29 @@
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_crtn.h"
+#include "lpfc_logmsg.h"
#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
+#define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
+int
+lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
+ size_t bytes;
+ int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+ if (max_xri <= 0)
+ return -ENOMEM;
+ bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
+ sizeof(unsigned long);
+ phba->cfg_rrq_xri_bitmap_sz = bytes;
+ phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+ bytes);
+ if (!phba->active_rrq_pool)
+ return -ENOMEM;
+ else
+ return 0;
+}
/**
* lpfc_mem_alloc - create and allocate all PCI and memory pools
@@ -146,6 +165,16 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
phba->lpfc_drb_pool = NULL;
}
+ if (phba->cfg_EnableXLane) {
+ phba->device_data_mem_pool = mempool_create_kmalloc_pool(
+ LPFC_DEVICE_DATA_POOL_SIZE,
+ sizeof(struct lpfc_device_data));
+ if (!phba->device_data_mem_pool)
+ goto fail_free_hrb_pool;
+ } else {
+ phba->device_data_mem_pool = NULL;
+ }
+
return 0;
fail_free_hrb_pool:
pci_pool_destroy(phba->lpfc_hrb_pool);
@@ -188,6 +217,7 @@ lpfc_mem_free(struct lpfc_hba *phba)
{
int i;
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ struct lpfc_device_data *device_data;
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
@@ -209,6 +239,10 @@ lpfc_mem_free(struct lpfc_hba *phba)
/* Free NLP memory pool */
mempool_destroy(phba->nlp_mem_pool);
phba->nlp_mem_pool = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
+ mempool_destroy(phba->active_rrq_pool);
+ phba->active_rrq_pool = NULL;
+ }
/* Free mbox memory pool */
mempool_destroy(phba->mbox_mem_pool);
@@ -227,6 +261,19 @@ lpfc_mem_free(struct lpfc_hba *phba)
pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
phba->lpfc_scsi_dma_buf_pool = NULL;
+ /* Free Device Data memory pool */
+ if (phba->device_data_mem_pool) {
+ /* Ensure all objects have been returned to the pool */
+ while (!list_empty(&phba->luns)) {
+ device_data = list_first_entry(&phba->luns,
+ struct lpfc_device_data,
+ listentry);
+ list_del(&device_data->listentry);
+ mempool_free(device_data, phba->device_data_mem_pool);
+ }
+ mempool_destroy(phba->device_data_mem_pool);
+ }
+ phba->device_data_mem_pool = NULL;
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index abc361259d6d..c342f6afd747 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -203,8 +203,6 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int
lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
- LIST_HEAD(completions);
- LIST_HEAD(txcmplq_completions);
LIST_HEAD(abort_list);
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
@@ -216,32 +214,27 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
-
+ /* Clean up all fabric IOs first.*/
lpfc_fabric_abort_nport(ndlp);
- /* First check the txq */
+ /*
+ * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
+ * of all ELS IOs that need an ABTS. The IOs need to stay on the
+ * txcmplq so that the abort operation completes them successfully.
+ */
spin_lock_irq(&phba->hbalock);
- list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
- /* Check to see if iocb matches the nport we are looking for */
- if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
- /* It matches, so deque and call compl with anp error */
- list_move_tail(&iocb->list, &completions);
- }
- }
-
- /* Next check the txcmplq */
- list_splice_init(&pring->txcmplq, &txcmplq_completions);
- spin_unlock_irq(&phba->hbalock);
-
- list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
- /* Check to see if iocb matches the nport we are looking for */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ /* Add to abort_list on on NDLP match. */
if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
list_add_tail(&iocb->dlist, &abort_list);
}
- spin_lock_irq(&phba->hbalock);
- list_splice(&txcmplq_completions, &pring->txcmplq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
+ /* Abort the targeted IOs and remove them from the abort list. */
list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
spin_lock_irq(&phba->hbalock);
list_del_init(&iocb->dlist);
@@ -249,9 +242,28 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&phba->hbalock);
}
+ INIT_LIST_HEAD(&abort_list);
+
+ /* Now process the txq */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ /* Check to see if iocb matches the nport we are looking for */
+ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
+ list_del_init(&iocb->list);
+ list_add_tail(&iocb->list, &abort_list);
+ }
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+
/* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ lpfc_sli_cancel_iocbs(phba, &abort_list,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b2ede05a5f0a..462453ee0bda 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -68,6 +68,17 @@ struct scsi_dif_tuple {
__be32 ref_tag; /* Target LBA or indirect LBA */
};
+static struct lpfc_rport_data *
+lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
+
+ if (vport->phba->cfg_EnableXLane)
+ return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
+ else
+ return (struct lpfc_rport_data *)sdev->hostdata;
+}
+
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
static void
@@ -304,9 +315,27 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
unsigned long new_queue_depth, old_queue_depth;
old_queue_depth = sdev->queue_depth;
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+
+ switch (reason) {
+ case SCSI_QDEPTH_DEFAULT:
+ /* change request from sysfs, fall through */
+ case SCSI_QDEPTH_RAMP_UP:
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+ break;
+ case SCSI_QDEPTH_QFULL:
+ if (scsi_track_queue_full(sdev, qdepth) == 0)
+ return sdev->queue_depth;
+
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "0711 detected queue full - lun queue "
+ "depth adjusted to %d.\n", sdev->queue_depth);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
new_queue_depth = sdev->queue_depth;
- rdata = sdev->hostdata;
+ rdata = lpfc_rport_data_from_scsi_device(sdev);
if (rdata)
lpfc_send_sdev_queuedepth_change_event(phba, vport,
rdata->pnode, sdev->lun,
@@ -377,50 +406,6 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
}
/**
- * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
- * @phba: The Hba for which this call is being executed.
- *
- * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
- * post at most 1 event every 5 minute after last_ramp_up_time or
- * last_rsrc_error_time. This routine wakes up worker thread of @phba
- * to process WORKER_RAM_DOWN_EVENT event.
- *
- * This routine should be called with no lock held.
- **/
-static inline void
-lpfc_rampup_queue_depth(struct lpfc_vport *vport,
- uint32_t queue_depth)
-{
- unsigned long flags;
- struct lpfc_hba *phba = vport->phba;
- uint32_t evt_posted;
- atomic_inc(&phba->num_cmd_success);
-
- if (vport->cfg_lun_queue_depth <= queue_depth)
- return;
- spin_lock_irqsave(&phba->hbalock, flags);
- if (time_before(jiffies,
- phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
- time_before(jiffies,
- phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return;
- }
- phba->last_ramp_up_time = jiffies;
- spin_unlock_irqrestore(&phba->hbalock, flags);
-
- spin_lock_irqsave(&phba->pport->work_port_lock, flags);
- evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
- if (!evt_posted)
- phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
- spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
-
- if (!evt_posted)
- lpfc_worker_wake_up(phba);
- return;
-}
-
-/**
* lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
* @phba: The Hba for which this call is being executed.
*
@@ -472,41 +457,6 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
}
/**
- * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
- * @phba: The Hba for which this call is being executed.
- *
- * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
- * thread.This routine increases queue depth for all scsi device on each vport
- * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
- * num_cmd_success to zero.
- **/
-void
-lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
-{
- struct lpfc_vport **vports;
- struct Scsi_Host *shost;
- struct scsi_device *sdev;
- int i;
-
- vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL)
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- shost_for_each_device(sdev, shost) {
- if (vports[i]->cfg_lun_queue_depth <=
- sdev->queue_depth)
- continue;
- lpfc_change_queue_depth(sdev,
- sdev->queue_depth+1,
- SCSI_QDEPTH_RAMP_UP);
- }
- }
- lpfc_destroy_vport_work_array(phba, vports);
- atomic_set(&phba->num_rsrc_err, 0);
- atomic_set(&phba->num_cmd_success, 0);
-}
-
-/**
* lpfc_scsi_dev_block - set all scsi hosts to block state
* @phba: Pointer to HBA context object.
*
@@ -1502,7 +1452,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
/* Next check if we need to match the remote NPortID or WWPN */
- rdata = sc->device->hostdata;
+ rdata = lpfc_rport_data_from_scsi_device(sc->device);
if (rdata && rdata->pnode) {
ndlp = rdata->pnode;
@@ -3507,6 +3457,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+
+ /*
+ * If the OAS driver feature is enabled and the lun is enabled for
+ * OAS, set the oas iocb related flags.
+ */
+ if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *)
+ scsi_cmnd->device->hostdata)->oas_enabled)
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
return 0;
}
@@ -4021,7 +3979,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_nodelist *pnode = rdata->pnode;
struct scsi_cmnd *cmd;
int result;
- struct scsi_device *tmp_sdev;
int depth;
unsigned long flags;
struct lpfc_fast_path_event *fast_path_evt;
@@ -4266,32 +4223,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
return;
}
- if (!result)
- lpfc_rampup_queue_depth(vport, queue_depth);
-
- /*
- * Check for queue full. If the lun is reporting queue full, then
- * back off the lun queue depth to prevent target overloads.
- */
- if (result == SAM_STAT_TASK_SET_FULL && pnode &&
- NLP_CHK_NODE_ACT(pnode)) {
- shost_for_each_device(tmp_sdev, shost) {
- if (tmp_sdev->id != scsi_id)
- continue;
- depth = scsi_track_queue_full(tmp_sdev,
- tmp_sdev->queue_depth-1);
- if (depth <= 0)
- continue;
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
- "0711 detected queue full - lun queue "
- "depth adjusted to %d.\n", depth);
- lpfc_send_sdev_queuedepth_change_event(phba, vport,
- pnode,
- tmp_sdev->lun,
- depth+1, depth);
- }
- }
-
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_cmd->pCmd = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -4492,6 +4423,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
}
piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
+ piocb->ulpPU = 0;
+ piocb->un.fcpi.fcpi_parm = 0;
/* ulpTimeout is only one byte */
if (lpfc_cmd->timeout > 0xff) {
@@ -4691,12 +4624,13 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_rport_data *rdata;
struct lpfc_nodelist *ndlp;
struct lpfc_scsi_buf *lpfc_cmd;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
err = fc_remote_port_chkready(rport);
if (err) {
cmnd->result = err;
@@ -4782,6 +4716,24 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err) {
atomic_dec(&ndlp->cmd_pending);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "3376 FCP could not issue IOCB err %x"
+ "FCP cmd x%x <%d/%d> "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x x%x\n",
+ err, cmnd->cmnd[0],
+ cmnd->device ? cmnd->device->id : 0xffff,
+ cmnd->device ? cmnd->device->lun : 0xffff,
+ vport->fc_myDID, ndlp->nlp_DID,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
+ lpfc_cmd->cur_iocbq.iocb.ulpContext,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
+ lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
+ (uint32_t)
+ (cmnd->request->timeout / 1000));
+
+
goto out_host_busy_free_buf;
}
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -5161,10 +5113,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
static int
lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
{
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
unsigned long later;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0797 Tgt Map rport failure: rdata x%p\n", rdata);
@@ -5182,7 +5135,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
return SUCCESS;
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
- rdata = cmnd->device->hostdata;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata)
return FAILED;
pnode = rdata->pnode;
@@ -5254,13 +5207,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0798 Device Reset rport failure: rdata x%p\n", rdata);
@@ -5323,13 +5277,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0799 Target Reset rport failure: rdata x%p\n", rdata);
@@ -5529,11 +5484,45 @@ lpfc_slave_alloc(struct scsi_device *sdev)
uint32_t num_to_alloc = 0;
int num_allocated = 0;
uint32_t sdev_cnt;
+ struct lpfc_device_data *device_data;
+ unsigned long flags;
+ struct lpfc_name target_wwpn;
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
- sdev->hostdata = rport->dd_data;
+ if (phba->cfg_EnableXLane) {
+
+ /*
+ * Check to see if the device data structure for the lun
+ * exists. If not, create one.
+ */
+
+ u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
+ spin_lock_irqsave(&phba->devicelock, flags);
+ device_data = __lpfc_get_device_data(phba,
+ &phba->luns,
+ &vport->fc_portname,
+ &target_wwpn,
+ sdev->lun);
+ if (!device_data) {
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ device_data = lpfc_create_device_data(phba,
+ &vport->fc_portname,
+ &target_wwpn,
+ sdev->lun, true);
+ if (!device_data)
+ return -ENOMEM;
+ spin_lock_irqsave(&phba->devicelock, flags);
+ list_add_tail(&device_data->listentry, &phba->luns);
+ }
+ device_data->rport_data = rport->dd_data;
+ device_data->available = true;
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ sdev->hostdata = device_data;
+ } else {
+ sdev->hostdata = rport->dd_data;
+ }
sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
/*
@@ -5623,11 +5612,344 @@ lpfc_slave_destroy(struct scsi_device *sdev)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
+ unsigned long flags;
+ struct lpfc_device_data *device_data = sdev->hostdata;
+
atomic_dec(&phba->sdev_cnt);
+ if ((phba->cfg_EnableXLane) && (device_data)) {
+ spin_lock_irqsave(&phba->devicelock, flags);
+ device_data->available = false;
+ if (!device_data->oas_enabled)
+ lpfc_delete_device_data(phba, device_data);
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ }
sdev->hostdata = NULL;
return;
}
+/**
+ * lpfc_create_device_data - creates and initializes device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ * @atomic_create: Flag to indicate if memory should be allocated using the
+ * GFP_ATOMIC flag or not.
+ *
+ * This routine creates a device data structure which will contain identifying
+ * information for the device (host wwpn, target wwpn, lun), state of OAS,
+ * whether or not the corresponding lun is available by the system,
+ * and pointer to the rport data.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun,
+ bool atomic_create)
+{
+
+ struct lpfc_device_data *lun_info;
+ int memory_flags;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !(phba->cfg_EnableXLane))
+ return NULL;
+
+ /* Attempt to create the device data to contain lun info */
+
+ if (atomic_create)
+ memory_flags = GFP_ATOMIC;
+ else
+ memory_flags = GFP_KERNEL;
+ lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
+ if (!lun_info)
+ return NULL;
+ INIT_LIST_HEAD(&lun_info->listentry);
+ lun_info->rport_data = NULL;
+ memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
+ sizeof(struct lpfc_name));
+ memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
+ sizeof(struct lpfc_name));
+ lun_info->device_id.lun = lun;
+ lun_info->oas_enabled = false;
+ lun_info->available = false;
+ return lun_info;
+}
+
+/**
+ * lpfc_delete_device_data - frees a device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @lun_info: Pointer to device data structure to free.
+ *
+ * This routine frees the previously allocated device data structure passed.
+ *
+ **/
+void
+lpfc_delete_device_data(struct lpfc_hba *phba,
+ struct lpfc_device_data *lun_info)
+{
+
+ if (unlikely(!phba) || !lun_info ||
+ !(phba->cfg_EnableXLane))
+ return;
+
+ if (!list_empty(&lun_info->listentry))
+ list_del(&lun_info->listentry);
+ mempool_free(lun_info, phba->device_data_mem_pool);
+ return;
+}
+
+/**
+ * __lpfc_get_device_data - returns the device data for the specified lun
+ * @pha: Pointer to host bus adapter structure.
+ * @list: Point to list to search.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ *
+ * This routine searches the list passed for the specified lun's device data.
+ * This function does not hold locks, it is the responsibility of the caller
+ * to ensure the proper lock is held before calling the function.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
+ struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+ struct lpfc_device_data *lun_info;
+
+ if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
+ !phba->cfg_EnableXLane)
+ return NULL;
+
+ /* Check to see if the lun is already enabled for OAS. */
+
+ list_for_each_entry(lun_info, list, listentry) {
+ if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+ sizeof(struct lpfc_name)) == 0) &&
+ (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+ sizeof(struct lpfc_name)) == 0) &&
+ (lun_info->device_id.lun == lun))
+ return lun_info;
+ }
+
+ return NULL;
+}
+
+/**
+ * lpfc_find_next_oas_lun - searches for the next oas lun
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @starting_lun: Pointer to the lun to start searching for
+ * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
+ * @found_target_wwpn: Pointer to the found lun's target wwpn information
+ * @found_lun: Pointer to the found lun.
+ * @found_lun_status: Pointer to status of the found lun.
+ *
+ * This routine searches the luns list for the specified lun
+ * or the first lun for the vport/target. If the vport wwpn contains
+ * a zero value then a specific vport is not specified. In this case
+ * any vport which contains the lun will be considered a match. If the
+ * target wwpn contains a zero value then a specific target is not specified.
+ * In this case any target which contains the lun will be considered a
+ * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
+ * are returned. The function will also return the next lun if available.
+ * If the next lun is not found, starting_lun parameter will be set to
+ * NO_MORE_OAS_LUN.
+ *
+ * Return codes:
+ * non-0 - Error
+ * 0 - Success
+ **/
+bool
+lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t *starting_lun,
+ struct lpfc_name *found_vport_wwpn,
+ struct lpfc_name *found_target_wwpn,
+ uint64_t *found_lun,
+ uint32_t *found_lun_status)
+{
+
+ unsigned long flags;
+ struct lpfc_device_data *lun_info;
+ struct lpfc_device_id *device_id;
+ uint64_t lun;
+ bool found = false;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !starting_lun || !found_vport_wwpn ||
+ !found_target_wwpn || !found_lun || !found_lun_status ||
+ (*starting_lun == NO_MORE_OAS_LUN) ||
+ !phba->cfg_EnableXLane)
+ return false;
+
+ lun = *starting_lun;
+ *found_lun = NO_MORE_OAS_LUN;
+ *starting_lun = NO_MORE_OAS_LUN;
+
+ /* Search for lun or the lun closet in value */
+
+ spin_lock_irqsave(&phba->devicelock, flags);
+ list_for_each_entry(lun_info, &phba->luns, listentry) {
+ if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
+ (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+ sizeof(struct lpfc_name)) == 0)) &&
+ ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
+ (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+ sizeof(struct lpfc_name)) == 0)) &&
+ (lun_info->oas_enabled)) {
+ device_id = &lun_info->device_id;
+ if ((!found) &&
+ ((lun == FIND_FIRST_OAS_LUN) ||
+ (device_id->lun == lun))) {
+ *found_lun = device_id->lun;
+ memcpy(found_vport_wwpn,
+ &device_id->vport_wwpn,
+ sizeof(struct lpfc_name));
+ memcpy(found_target_wwpn,
+ &device_id->target_wwpn,
+ sizeof(struct lpfc_name));
+ if (lun_info->available)
+ *found_lun_status =
+ OAS_LUN_STATUS_EXISTS;
+ else
+ *found_lun_status = 0;
+ if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
+ memset(vport_wwpn, 0x0,
+ sizeof(struct lpfc_name));
+ if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
+ memset(target_wwpn, 0x0,
+ sizeof(struct lpfc_name));
+ found = true;
+ } else if (found) {
+ *starting_lun = device_id->lun;
+ memcpy(vport_wwpn, &device_id->vport_wwpn,
+ sizeof(struct lpfc_name));
+ memcpy(target_wwpn, &device_id->target_wwpn,
+ sizeof(struct lpfc_name));
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return found;
+}
+
+/**
+ * lpfc_enable_oas_lun - enables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine enables a lun for oas operations. The routines does so by
+ * doing the following :
+ *
+ * 1) Checks to see if the device data for the lun has been created.
+ * 2) If found, sets the OAS enabled flag if not set and returns.
+ * 3) Otherwise, creates a device data structure.
+ * 4) If successfully created, indicates the device data is for an OAS lun,
+ * indicates the lun is not available and add to the list of luns.
+ *
+ * Return codes:
+ * false - Error
+ * true - Success
+ **/
+bool
+lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+ struct lpfc_device_data *lun_info;
+ unsigned long flags;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !phba->cfg_EnableXLane)
+ return false;
+
+ spin_lock_irqsave(&phba->devicelock, flags);
+
+ /* Check to see if the device data for the lun has been created */
+ lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
+ target_wwpn, lun);
+ if (lun_info) {
+ if (!lun_info->oas_enabled)
+ lun_info->oas_enabled = true;
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return true;
+ }
+
+ /* Create an lun info structure and add to list of luns */
+ lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
+ false);
+ if (lun_info) {
+ lun_info->oas_enabled = true;
+ lun_info->available = false;
+ list_add_tail(&lun_info->listentry, &phba->luns);
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return true;
+ }
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return false;
+}
+
+/**
+ * lpfc_disable_oas_lun - disables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine disables a lun for oas operations. The routines does so by
+ * doing the following :
+ *
+ * 1) Checks to see if the device data for the lun is created.
+ * 2) If present, clears the flag indicating this lun is for OAS.
+ * 3) If the lun is not available by the system, the device data is
+ * freed.
+ *
+ * Return codes:
+ * false - Error
+ * true - Success
+ **/
+bool
+lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+ struct lpfc_device_data *lun_info;
+ unsigned long flags;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !phba->cfg_EnableXLane)
+ return false;
+
+ spin_lock_irqsave(&phba->devicelock, flags);
+
+ /* Check to see if the lun is available. */
+ lun_info = __lpfc_get_device_data(phba,
+ &phba->luns, vport_wwpn,
+ target_wwpn, lun);
+ if (lun_info) {
+ lun_info->oas_enabled = false;
+ if (!lun_info->available)
+ lpfc_delete_device_data(phba, lun_info);
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return true;
+ }
+
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return false;
+}
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 852ff7def493..0120bfccf50b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -41,6 +41,20 @@ struct lpfc_rport_data {
struct lpfc_nodelist *pnode; /* Pointer to the node structure. */
};
+struct lpfc_device_id {
+ struct lpfc_name vport_wwpn;
+ struct lpfc_name target_wwpn;
+ uint64_t lun;
+};
+
+struct lpfc_device_data {
+ struct list_head listentry;
+ struct lpfc_rport_data *rport_data;
+ struct lpfc_device_id device_id;
+ bool oas_enabled;
+ bool available;
+};
+
struct fcp_rsp {
uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */
uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */
@@ -166,3 +180,7 @@ struct lpfc_scsi_buf {
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
#define MDAC_DIRECT_CMD 0x22
+
+#define FIND_FIRST_OAS_LUN 0
+#define NO_MORE_OAS_LUN -1
+#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8f580fda443f..6bb51f8e3c1b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -635,7 +635,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
if (!ndlp)
goto out;
- if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
+ if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
@@ -678,7 +678,8 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
next_time = rrq->rrq_stop_time;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
- if (!list_empty(&phba->active_rrq_list))
+ if ((!list_empty(&phba->active_rrq_list)) &&
+ (!(phba->pport->load_flag & FC_UNLOADING)))
mod_timer(&phba->rrq_tmr, next_time);
list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
list_del(&rrq->list);
@@ -792,7 +793,9 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
list_del(&rrq->list);
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
}
- if (!list_empty(&phba->active_rrq_list))
+ if ((!list_empty(&phba->active_rrq_list)) &&
+ (!(phba->pport->load_flag & FC_UNLOADING)))
+
mod_timer(&phba->rrq_tmr, next_time);
}
@@ -813,7 +816,9 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
{
if (!ndlp)
return 0;
- if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+ if (!ndlp->active_rrqs_xri_bitmap)
+ return 0;
+ if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
return 1;
else
return 0;
@@ -863,7 +868,10 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
- if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+ if (!ndlp->active_rrqs_xri_bitmap)
+ goto out;
+
+ if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
goto out;
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -1318,7 +1326,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
- (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
+ (!(piocb->vport->load_flag & FC_UNLOADING))) {
if (!piocb->vport)
BUG();
else
@@ -4971,12 +4980,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
LPFC_QUEUE_REARM);
} while (++fcp_eqidx < phba->cfg_fcp_io_channel);
}
+
+ if (phba->cfg_EnableXLane)
+ lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
+
if (phba->sli4_hba.hba_eq) {
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
fcp_eqidx++)
lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
LPFC_QUEUE_REARM);
}
+
+ if (phba->cfg_fof)
+ lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
}
/**
@@ -8032,7 +8048,8 @@ lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
struct lpfc_vector_map_info *cpup;
int chann, cpu;
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
+ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
+ && phba->cfg_fcp_io_channel > 1) {
cpu = smp_processor_id();
if (cpu < phba->sli4_hba.num_present_cpu) {
cpup = phba->sli4_hba.cpu_map;
@@ -8250,6 +8267,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8271,6 +8296,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_FCP_ICMND64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8291,6 +8324,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
iocbq->iocb.ulpFCP2Rcvy);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_GEN_REQUEST64_CR:
/* For this command calculate the xmit length of the
@@ -8523,6 +8564,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
{
struct lpfc_sglq *sglq;
union lpfc_wqe wqe;
+ struct lpfc_queue *wq;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
if (piocb->sli4_xritag == NO_XRI) {
@@ -8575,11 +8617,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_ERROR;
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
- (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- if (unlikely(!phba->sli4_hba.fcp_wq))
- return IOCB_ERROR;
- if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
- &wqe))
+ (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+ LPFC_IO_OAS))) {
+ wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
+ } else {
+ wq = phba->sli4_hba.oas_wq;
+ }
+ if (lpfc_sli4_wq_put(wq, &wqe))
return IOCB_ERROR;
} else {
if (unlikely(!phba->sli4_hba.els_wq))
@@ -8669,12 +8714,20 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
if (phba->sli_rev == LPFC_SLI_REV4) {
if (piocb->iocb_flag & LPFC_IO_FCP) {
- if (unlikely(!phba->sli4_hba.fcp_wq))
- return IOCB_ERROR;
- idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
- piocb->fcp_wqidx = idx;
- ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
-
+ if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+ LPFC_IO_OAS))) {
+ if (unlikely(!phba->sli4_hba.fcp_wq))
+ return IOCB_ERROR;
+ idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+ piocb->fcp_wqidx = idx;
+ ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+ } else {
+ if (unlikely(!phba->sli4_hba.oas_wq))
+ return IOCB_ERROR;
+ idx = 0;
+ piocb->fcp_wqidx = 0;
+ ring_number = LPFC_FCP_OAS_RING;
+ }
pring = &phba->sli.ring[ring_number];
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
@@ -12132,6 +12185,175 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
}
+
+/**
+ * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
+ * entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the Flash Optimized Fabric
+ * event queue. It will check the MajorCode and MinorCode to determine this
+ * is for a completion event on a completion queue, if not, an error shall be
+ * logged and just return. Otherwise, it will get to the corresponding
+ * completion queue and process all the entries on the completion queue, rearm
+ * the completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+ struct lpfc_queue *cq;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ uint16_t cqid;
+ int ecount = 0;
+
+ if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9147 Not a valid completion "
+ "event: majorcode=x%x, minorcode=x%x\n",
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
+ return;
+ }
+
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+ /* Next check for OAS */
+ cq = phba->sli4_hba.oas_cq;
+ if (unlikely(!cq)) {
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9148 OAS completion queue "
+ "does not exist\n");
+ return;
+ }
+
+ if (unlikely(cqid != cq->queue_id)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9149 Miss-matched fast-path compl "
+ "queue id: eqcqid=%d, fcpcqid=%d\n",
+ cqid, cq->queue_id);
+ return;
+ }
+
+ /* Process all the entries to the OAS CQ */
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+ if (!(++ecount % cq->entry_repost))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ }
+
+ /* Track the max number of CQEs processed in 1 EQ */
+ if (ecount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ecount;
+
+ /* Catch the no cq entry condition */
+ if (unlikely(ecount == 0))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9153 No entry from fast-path completion "
+ "queue fcpcqid=%d\n", cq->queue_id);
+
+ /* In any case, flash and re-arm the CQ */
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
+ * IOCB ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The Flash Optimized Fabric ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the EQ to CQ are one-to-one map such that the EQ index is
+ * equal to that of CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_queue *eq;
+ struct lpfc_eqe *eqe;
+ unsigned long iflag;
+ int ecount = 0;
+ uint32_t eqidx;
+
+ /* Get the driver's phba structure from the dev_id */
+ fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+ phba = fcp_eq_hdl->phba;
+ eqidx = fcp_eq_hdl->idx;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /* Get to the EQ struct associated with this vector */
+ eq = phba->sli4_hba.fof_eq;
+ if (unlikely(!eq))
+ return IRQ_NONE;
+
+ /* Check device state for handling interrupt */
+ if (unlikely(lpfc_intr_state_check(phba))) {
+ eq->EQ_badstate++;
+ /* Check again for link_state with lock held */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->link_state < LPFC_LINK_DOWN)
+ /* Flush, clear interrupt, and rearm the EQ */
+ lpfc_sli4_eq_flush(phba, eq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Process all the event on FCP fast-path EQ
+ */
+ while ((eqe = lpfc_sli4_eq_get(eq))) {
+ lpfc_sli4_fof_handle_eqe(phba, eqe);
+ if (!(++ecount % eq->entry_repost))
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
+ eq->EQ_processed++;
+ }
+
+ /* Track the max number of EQEs processed in 1 intr */
+ if (ecount > eq->EQ_max_eqe)
+ eq->EQ_max_eqe = ecount;
+
+
+ if (unlikely(ecount == 0)) {
+ eq->EQ_no_entry++;
+
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "9145 MSI-X interrupt with no EQE\n");
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9146 ISR interrupt with no EQE\n");
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
+ }
+ /* Always clear and re-arm the fast-path EQ */
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+ return IRQ_HANDLED;
+}
+
/**
* lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
* @irq: Interrupt number.
@@ -12287,6 +12509,13 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
hba_handled |= true;
}
+ if (phba->cfg_fof) {
+ hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
+ &phba->sli4_hba.fcp_eq_hdl[0]);
+ if (hba_irq_rc == IRQ_HANDLED)
+ hba_handled |= true;
+ }
+
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
@@ -16544,7 +16773,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
- struct lpfc_iocbq *piocbq = 0;
+ struct lpfc_iocbq *piocbq = NULL;
unsigned long iflags = 0;
char *fail_msg = NULL;
struct lpfc_sglq *sglq;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 6b0f2478706e..6f04080f4ea8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -78,6 +78,8 @@ struct lpfc_iocbq {
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14
+#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
+
uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */
struct lpfc_vport *vport;/* virtual port pointer */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 298c8cd1a89d..9b8cda866176 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -39,6 +39,10 @@
#define LPFC_FCP_IO_CHAN_MIN 1
#define LPFC_FCP_IO_CHAN_MAX 16
+/* Number of channels used for Flash Optimized Fabric (FOF) operations */
+
+#define LPFC_FOF_IO_CHAN_NUM 1
+
/*
* Provide the default FCF Record attributes used by the driver
* when nonFIP mode is configured and there is no other default
@@ -399,6 +403,7 @@ struct lpfc_pc_sli4_params {
uint32_t if_page_sz;
uint32_t rq_db_window;
uint32_t loopbk_scope;
+ uint32_t oas_supported;
uint32_t eq_pages_max;
uint32_t eqe_size;
uint32_t cq_pages_max;
@@ -439,6 +444,8 @@ struct lpfc_sli4_lnk_info {
uint8_t lnk_no;
};
+#define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \
+ LPFC_FOF_IO_CHAN_NUM)
#define LPFC_SLI4_HANDLER_NAME_SZ 16
/* Used for IRQ vector to CPU mapping */
@@ -507,7 +514,7 @@ struct lpfc_sli4_hba {
struct lpfc_register sli_intf;
struct lpfc_pc_sli4_params pc_sli4_params;
struct msix_entry *msix_entries;
- uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
+ uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
/* Pointers to the constructed SLI4 queues */
@@ -527,6 +534,17 @@ struct lpfc_sli4_hba {
uint32_t ulp0_mode; /* ULP0 protocol mode */
uint32_t ulp1_mode; /* ULP1 protocol mode */
+ struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
+
+ /* Optimized Access Storage specific queues/structures */
+
+ struct lpfc_queue *oas_cq; /* OAS completion queue */
+ struct lpfc_queue *oas_wq; /* OAS Work queue */
+ struct lpfc_sli_ring *oas_ring;
+ uint64_t oas_next_lun;
+ uint8_t oas_next_tgt_wwpn[8];
+ uint8_t oas_next_vpt_wwpn[8];
+
/* Setup information for various queue parameters */
int eq_esize;
int eq_ecount;
@@ -589,6 +607,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map;
uint16_t num_online_cpu;
uint16_t num_present_cpu;
+ uint16_t curr_disp_cpu;
};
enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e3094c4e143b..e32cbec70324 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.43"
+#define LPFC_DRIVER_VERSION "8.3.45"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 816db12ef5d5..b7770516f4c2 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -531,13 +531,6 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
int target = 0;
int ldrv_num = 0; /* logical drive number */
-
- /*
- * filter the internal and ioctl commands
- */
- if((cmd->cmnd[0] == MEGA_INTERNAL_CMD))
- return (scb_t *)cmd->host_scribble;
-
/*
* We know what channels our logical drives are on - mega_find_card()
*/
@@ -1439,19 +1432,22 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
cmdid = completed[i];
- if( cmdid == CMDID_INT_CMDS ) { /* internal command */
+ /*
+ * Only free SCBs for the commands coming down from the
+ * mid-layer, not for which were issued internally
+ *
+ * For internal command, restore the status returned by the
+ * firmware so that user can interpret it.
+ */
+ if (cmdid == CMDID_INT_CMDS) {
scb = &adapter->int_scb;
- cmd = scb->cmd;
- mbox = (mbox_t *)scb->raw_mbox;
- /*
- * Internal command interface do not fire the extended
- * passthru or 64-bit passthru
- */
- pthru = scb->pthru;
+ list_del_init(&scb->list);
+ scb->state = SCB_FREE;
- }
- else {
+ adapter->int_status = status;
+ complete(&adapter->int_waitq);
+ } else {
scb = &adapter->scb_list[cmdid];
/*
@@ -1640,25 +1636,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
cmd->result |= (DID_BAD_TARGET << 16)|status;
}
- /*
- * Only free SCBs for the commands coming down from the
- * mid-layer, not for which were issued internally
- *
- * For internal command, restore the status returned by the
- * firmware so that user can interpret it.
- */
- if( cmdid == CMDID_INT_CMDS ) { /* internal command */
- cmd->result = status;
-
- /*
- * Remove the internal command from the pending list
- */
- list_del_init(&scb->list);
- scb->state = SCB_FREE;
- }
- else {
- mega_free_scb(adapter, scb);
- }
+ mega_free_scb(adapter, scb);
/* Add Scsi_Command to end of completed queue */
list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
@@ -4133,23 +4111,15 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
* The last argument is the address of the passthru structure if the command
* to be fired is a passthru command
*
- * lockscope specifies whether the caller has already acquired the lock. Of
- * course, the caller must know which lock we are talking about.
- *
* Note: parameter 'pthru' is null for non-passthru commands.
*/
static int
mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
{
- Scsi_Cmnd *scmd;
- struct scsi_device *sdev;
+ unsigned long flags;
scb_t *scb;
int rval;
- scmd = scsi_allocate_command(GFP_KERNEL);
- if (!scmd)
- return -ENOMEM;
-
/*
* The internal commands share one command id and hence are
* serialized. This is so because we want to reserve maximum number of
@@ -4160,73 +4130,45 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
scb = &adapter->int_scb;
memset(scb, 0, sizeof(scb_t));
- sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
- scmd->device = sdev;
-
- memset(adapter->int_cdb, 0, sizeof(adapter->int_cdb));
- scmd->cmnd = adapter->int_cdb;
- scmd->device->host = adapter->host;
- scmd->host_scribble = (void *)scb;
- scmd->cmnd[0] = MEGA_INTERNAL_CMD;
-
- scb->state |= SCB_ACTIVE;
- scb->cmd = scmd;
+ scb->idx = CMDID_INT_CMDS;
+ scb->state |= SCB_ACTIVE | SCB_PENDQ;
memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
/*
* Is it a passthru command
*/
- if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
-
+ if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
scb->pthru = pthru;
- }
-
- scb->idx = CMDID_INT_CMDS;
- megaraid_queue_lck(scmd, mega_internal_done);
+ spin_lock_irqsave(&adapter->lock, flags);
+ list_add_tail(&scb->list, &adapter->pending_list);
+ /*
+ * Check if the HBA is in quiescent state, e.g., during a
+ * delete logical drive opertion. If it is, don't run
+ * the pending_list.
+ */
+ if (atomic_read(&adapter->quiescent) == 0)
+ mega_runpendq(adapter);
+ spin_unlock_irqrestore(&adapter->lock, flags);
wait_for_completion(&adapter->int_waitq);
- rval = scmd->result;
- mc->status = scmd->result;
- kfree(sdev);
+ mc->status = rval = adapter->int_status;
/*
* Print a debug message for all failed commands. Applications can use
* this information.
*/
- if( scmd->result && trace_level ) {
+ if (rval && trace_level) {
printk("megaraid: cmd [%x, %x, %x] status:[%x]\n",
- mc->cmd, mc->opcode, mc->subopcode, scmd->result);
+ mc->cmd, mc->opcode, mc->subopcode, rval);
}
mutex_unlock(&adapter->int_mtx);
-
- scsi_free_command(GFP_KERNEL, scmd);
-
return rval;
}
-
-/**
- * mega_internal_done()
- * @scmd - internal scsi command
- *
- * Callback routine for internal commands.
- */
-static void
-mega_internal_done(Scsi_Cmnd *scmd)
-{
- adapter_t *adapter;
-
- adapter = (adapter_t *)scmd->device->host->hostdata;
-
- complete(&adapter->int_waitq);
-
-}
-
-
static struct scsi_host_template megaraid_template = {
.module = THIS_MODULE,
.name = "MegaRAID",
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 4d0ce4e78dfd..508d65e5a518 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -853,10 +853,10 @@ typedef struct {
u8 sglen; /* f/w supported scatter-gather list length */
- unsigned char int_cdb[MAX_COMMAND_SIZE];
scb_t int_scb;
struct mutex int_mtx; /* To synchronize the internal
commands */
+ int int_status; /* status of internal cmd */
struct completion int_waitq; /* wait queue for internal
cmds */
@@ -1004,7 +1004,6 @@ static int mega_del_logdrv(adapter_t *, int);
static int mega_do_del_logdrv(adapter_t *, int);
static void mega_get_max_sgl(adapter_t *);
static int mega_internal_command(adapter_t *, megacmd_t *, mega_passthru *);
-static void mega_internal_done(Scsi_Cmnd *);
static int mega_support_cluster(adapter_t *);
#endif
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index dfffd0f37916..a70692779a16 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -486,6 +486,8 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
pthru32->dataxferaddr = kioc->buf_paddr;
if (kioc->data_dir & UIOC_WR) {
+ if (pthru32->dataxferlen > kioc->xferlen)
+ return -EINVAL;
if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
pthru32->dataxferlen)) {
return (-EFAULT);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 34452ea386ac..32166c2c7854 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.700.06.00-rc1"
-#define MEGASAS_RELDATE "Aug. 31, 2013"
-#define MEGASAS_EXT_VERSION "Sat. Aug. 31 17:00:00 PDT 2013"
+#define MEGASAS_VERSION "06.803.01.00-rc1"
+#define MEGASAS_RELDATE "Mar. 10, 2014"
+#define MEGASAS_EXT_VERSION "Mon. Mar. 10 17:00:00 PDT 2014"
/*
* Device IDs
@@ -48,6 +48,7 @@
#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073
#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071
#define PCI_DEVICE_ID_LSI_FUSION 0x005b
+#define PCI_DEVICE_ID_LSI_PLASMA 0x002f
#define PCI_DEVICE_ID_LSI_INVADER 0x005d
#define PCI_DEVICE_ID_LSI_FURY 0x005f
@@ -559,7 +560,8 @@ struct megasas_ctrl_info {
u8 PCIE:1;
u8 iSCSI:1;
u8 SAS_3G:1;
- u8 reserved_0:4;
+ u8 SRIOV:1;
+ u8 reserved_0:3;
u8 reserved_1[6];
u8 port_count;
u64 port_addr[8];
@@ -839,7 +841,12 @@ struct megasas_ctrl_info {
struct { /*7A4h */
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:11;
+ u32 reserved:5;
+ u32 activePassive:2;
+ u32 supportConfigAutoBalance:1;
+ u32 mpio:1;
+ u32 supportDataLDonSSCArray:1;
+ u32 supportPointInTimeProgress:1;
u32 supportUnevenSpans:1;
u32 dedicatedHotSparesLimited:1;
u32 headlessMode:1;
@@ -886,7 +893,12 @@ struct megasas_ctrl_info {
u32 supportUnevenSpans:1;
- u32 reserved:11;
+ u32 supportPointInTimeProgress:1;
+ u32 supportDataLDonSSCArray:1;
+ u32 mpio:1;
+ u32 supportConfigAutoBalance:1;
+ u32 activePassive:2;
+ u32 reserved:5;
#endif
} adapterOperations2;
@@ -914,8 +926,14 @@ struct megasas_ctrl_info {
} cluster;
char clusterId[16]; /*7D4h */
+ struct {
+ u8 maxVFsSupported; /*0x7E4*/
+ u8 numVFsEnabled; /*0x7E5*/
+ u8 requestorId; /*0x7E6 0:PF, 1:VF1, 2:VF2*/
+ u8 reserved; /*0x7E7*/
+ } iov;
- u8 pad[0x800-0x7E4]; /*7E4 */
+ u8 pad[0x800-0x7E8]; /*0x7E8 pad to 2k */
} __packed;
/*
@@ -986,7 +1004,9 @@ struct megasas_ctrl_info {
#define MFI_OB_INTR_STATUS_MASK 0x00000002
#define MFI_POLL_TIMEOUT_SECS 60
-
+#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ)
+#define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30)
+#define MEGASAS_ROUTINE_WAIT_TIME_VF 300
#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
#define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004)
@@ -1347,9 +1367,15 @@ struct megasas_cmd;
union megasas_evt_class_locale {
struct {
+#ifndef __BIG_ENDIAN_BITFIELD
u16 locale;
u8 reserved;
s8 class;
+#else
+ s8 class;
+ u8 reserved;
+ u16 locale;
+#endif
} __attribute__ ((packed)) members;
u32 word;
@@ -1523,6 +1549,12 @@ struct megasas_instance {
dma_addr_t producer_h;
u32 *consumer;
dma_addr_t consumer_h;
+ struct MR_LD_VF_AFFILIATION *vf_affiliation;
+ dma_addr_t vf_affiliation_h;
+ struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111;
+ dma_addr_t vf_affiliation_111_h;
+ struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
+ dma_addr_t hb_host_mem_h;
u32 *reply_queue;
dma_addr_t reply_queue_h;
@@ -1598,10 +1630,73 @@ struct megasas_instance {
unsigned long bar;
long reset_flags;
struct mutex reset_mutex;
+ struct timer_list sriov_heartbeat_timer;
+ char skip_heartbeat_timer_del;
+ u8 requestorId;
+ u64 initiator_sas_address;
+ u64 ld_sas_address[64];
+ char PlasmaFW111;
+ char mpio;
int throttlequeuedepth;
u8 mask_interrupts;
u8 is_imr;
};
+struct MR_LD_VF_MAP {
+ u32 size;
+ union MR_LD_REF ref;
+ u8 ldVfCount;
+ u8 reserved[6];
+ u8 policy[1];
+};
+
+struct MR_LD_VF_AFFILIATION {
+ u32 size;
+ u8 ldCount;
+ u8 vfCount;
+ u8 thisVf;
+ u8 reserved[9];
+ struct MR_LD_VF_MAP map[1];
+};
+
+/* Plasma 1.11 FW backward compatibility structures */
+#define IOV_111_OFFSET 0x7CE
+#define MAX_VIRTUAL_FUNCTIONS 8
+
+struct IOV_111 {
+ u8 maxVFsSupported;
+ u8 numVFsEnabled;
+ u8 requestorId;
+ u8 reserved[5];
+};
+
+struct MR_LD_VF_MAP_111 {
+ u8 targetId;
+ u8 reserved[3];
+ u8 policy[MAX_VIRTUAL_FUNCTIONS];
+};
+
+struct MR_LD_VF_AFFILIATION_111 {
+ u8 vdCount;
+ u8 vfCount;
+ u8 thisVf;
+ u8 reserved[5];
+ struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES];
+};
+
+struct MR_CTRL_HB_HOST_MEM {
+ struct {
+ u32 fwCounter; /* Firmware heart beat counter */
+ struct {
+ u32 debugmode:1; /* 1=Firmware is in debug mode.
+ Heart beat will not be updated. */
+ u32 reserved:31;
+ } debug;
+ u32 reserved_fw[6];
+ u32 driverCounter; /* Driver heart beat counter. 0x20 */
+ u32 reserved_driver[7];
+ } HB;
+ u8 pad[0x400-0x40];
+};
enum {
MEGASAS_HBA_OPERATIONAL = 0,
@@ -1609,6 +1704,7 @@ enum {
MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS = 2,
MEGASAS_ADPRESET_SM_OPERATIONAL = 3,
MEGASAS_HW_CRITICAL_ERROR = 4,
+ MEGASAS_ADPRESET_SM_POLLING = 5,
MEGASAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD,
};
@@ -1728,7 +1824,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
-u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3b7ad10497fe..d84d02c2aad9 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : 06.700.06.00-rc1
+ * Version : 06.803.01.00-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -75,6 +75,10 @@ static unsigned int msix_vectors;
module_param(msix_vectors, int, S_IRUGO);
MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
+static int allow_vf_ioctls;
+module_param(allow_vf_ioctls, int, S_IRUGO);
+MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
+
static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
module_param(throttlequeuedepth, int, S_IRUGO);
MODULE_PARM_DESC(throttlequeuedepth,
@@ -122,6 +126,8 @@ static struct pci_device_id megasas_pci_table[] = {
/* xscale IOP */
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
/* Fusion */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
+ /* Plasma */
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
/* Invader */
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
@@ -132,7 +138,7 @@ static struct pci_device_id megasas_pci_table[] = {
MODULE_DEVICE_TABLE(pci, megasas_pci_table);
static int megasas_mgmt_majorno;
-static struct megasas_mgmt_info megasas_mgmt_info;
+struct megasas_mgmt_info megasas_mgmt_info;
static struct fasync_struct *megasas_async_queue;
static DEFINE_MUTEX(megasas_async_queue_mutex);
@@ -171,10 +177,15 @@ megasas_get_map_info(struct megasas_instance *instance);
int
megasas_sync_map_info(struct megasas_instance *instance);
int
-wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds);
void megasas_reset_reply_desc(struct megasas_instance *instance);
-int megasas_reset_fusion(struct Scsi_Host *shost);
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout);
void megasas_fusion_ocr_wq(struct work_struct *work);
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+ int initial);
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd);
void
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -224,6 +235,7 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
cmd->scmd = NULL;
cmd->frame_count = 0;
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
(reset_devices))
@@ -877,6 +889,7 @@ extern struct megasas_instance_template megasas_instance_template_fusion;
int
megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
+ int seconds;
struct megasas_header *frame_hdr = &cmd->frame->hdr;
@@ -891,13 +904,18 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
/*
* Wait for cmd_status to change
*/
- return wait_and_poll(instance, cmd);
+ if (instance->requestorId)
+ seconds = MEGASAS_ROUTINE_WAIT_TIME_VF;
+ else
+ seconds = MFI_POLL_TIMEOUT_SECS;
+ return wait_and_poll(instance, cmd, seconds);
}
/**
* megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
* @instance: Adapter soft state
* @cmd: Command to be issued
+ * @timeout: Timeout in seconds
*
* This function waits on an event for the command to be returned from ISR.
* Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
@@ -905,13 +923,20 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
*/
static int
megasas_issue_blocked_cmd(struct megasas_instance *instance,
- struct megasas_cmd *cmd)
+ struct megasas_cmd *cmd, int timeout)
{
+ int ret = 0;
cmd->cmd_status = ENODATA;
instance->instancet->issue_dcmd(instance, cmd);
-
- wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA);
+ if (timeout) {
+ ret = wait_event_timeout(instance->int_cmd_wait_q,
+ cmd->cmd_status != ENODATA, timeout * HZ);
+ if (!ret)
+ return 1;
+ } else
+ wait_event(instance->int_cmd_wait_q,
+ cmd->cmd_status != ENODATA);
return 0;
}
@@ -920,18 +945,20 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
* megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
* @instance: Adapter soft state
* @cmd_to_abort: Previously issued cmd to be aborted
+ * @timeout: Timeout in seconds
*
- * MFI firmware can abort previously issued AEN command (automatic event
+ * MFI firmware can abort previously issued AEN comamnd (automatic event
* notification). The megasas_issue_blocked_abort_cmd() issues such abort
* cmd and waits for return status.
* Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
*/
static int
megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
- struct megasas_cmd *cmd_to_abort)
+ struct megasas_cmd *cmd_to_abort, int timeout)
{
struct megasas_cmd *cmd;
struct megasas_abort_frame *abort_fr;
+ int ret = 0;
cmd = megasas_get_cmd(instance);
@@ -957,10 +984,18 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
instance->instancet->issue_dcmd(instance, cmd);
- /*
- * Wait for this cmd to complete
- */
- wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF);
+ if (timeout) {
+ ret = wait_event_timeout(instance->abort_cmd_wait_q,
+ cmd->cmd_status != ENODATA, timeout * HZ);
+ if (!ret) {
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+ return 1;
+ }
+ } else
+ wait_event(instance->abort_cmd_wait_q,
+ cmd->cmd_status != ENODATA);
+
cmd->sync_cmd = 0;
megasas_return_cmd(instance, cmd);
@@ -1514,9 +1549,23 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
spin_lock_irqsave(&instance->hba_lock, flags);
+ /* Check for an mpio path and adjust behavior */
+ if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+ if (megasas_check_mpio_paths(instance, scmd) ==
+ (DID_RESET << 16)) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ } else {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ scmd->result = DID_NO_CONNECT << 16;
+ done(scmd);
+ return 0;
+ }
+ }
+
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
- scmd->result = DID_ERROR << 16;
+ scmd->result = DID_NO_CONNECT << 16;
done(scmd);
return 0;
}
@@ -1641,9 +1690,14 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
+ /* Flush */
+ readl(&instance->reg_set->doorbell);
+ if (instance->mpio && instance->requestorId)
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
} else {
writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
}
@@ -1730,6 +1784,25 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
megasas_check_and_restore_queue_depth(instance);
}
+/**
+ * megasas_start_timer - Initializes a timer object
+ * @instance: Adapter soft state
+ * @timer: timer object to be initialized
+ * @fn: timer function
+ * @interval: time interval between timer function call
+ *
+ */
+void megasas_start_timer(struct megasas_instance *instance,
+ struct timer_list *timer,
+ void *fn, unsigned long interval)
+{
+ init_timer(timer);
+ timer->expires = jiffies + interval;
+ timer->data = (unsigned long)instance;
+ timer->function = fn;
+ add_timer(timer);
+}
+
static void
megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
@@ -1752,6 +1825,295 @@ void megasas_do_ocr(struct megasas_instance *instance)
process_fw_state_change_wq(&instance->work_init);
}
+/* This function will get the current SR-IOV LD/VF affiliation */
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
+ struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
+ struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
+ dma_addr_t new_affiliation_h;
+ dma_addr_t new_affiliation_111_h;
+ int ld, retval = 0;
+ u8 thisVf;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_get_ld_vf_"
+ "affiliation: Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (!instance->vf_affiliation && !instance->vf_affiliation_111) {
+ printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ if (initial)
+ if (instance->PlasmaFW111)
+ memset(instance->vf_affiliation_111, 0,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ else
+ memset(instance->vf_affiliation, 0,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ else {
+ if (instance->PlasmaFW111)
+ new_affiliation_111 =
+ pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &new_affiliation_111_h);
+ else
+ new_affiliation =
+ pci_alloc_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &new_affiliation_h);
+ if (!new_affiliation && !new_affiliation_111) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
+ "memory for new affiliation for scsi%d.\n",
+ instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+ if (instance->PlasmaFW111)
+ memset(new_affiliation_111, 0,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ else
+ memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ if (instance->PlasmaFW111) {
+ dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
+ } else {
+ dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
+ }
+
+ if (initial) {
+ if (instance->PlasmaFW111)
+ dcmd->sgl.sge32[0].phys_addr =
+ instance->vf_affiliation_111_h;
+ else
+ dcmd->sgl.sge32[0].phys_addr =
+ instance->vf_affiliation_h;
+ } else {
+ if (instance->PlasmaFW111)
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
+ else
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
+ }
+ if (instance->PlasmaFW111)
+ dcmd->sgl.sge32[0].length =
+ sizeof(struct MR_LD_VF_AFFILIATION_111);
+ else
+ dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+ "scsi%d\n", instance->host->host_no);
+
+ megasas_issue_blocked_cmd(instance, cmd, 0);
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
+ " failed with status 0x%x for scsi%d.\n",
+ dcmd->cmd_status, instance->host->host_no);
+ retval = 1; /* Do a scan if we couldn't get affiliation */
+ goto out;
+ }
+
+ if (!initial) {
+ if (instance->PlasmaFW111) {
+ if (!new_affiliation_111->vdCount) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new "
+ "LD/VF affiliation for passive path "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ thisVf = new_affiliation_111->thisVf;
+ for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
+ if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "Got new LD/VF affiliation "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ memcpy(instance->vf_affiliation_111,
+ new_affiliation_111,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ retval = 1;
+ goto out;
+ }
+ } else {
+ if (!new_affiliation->ldCount) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new "
+ "LD/VF affiliation for passive "
+ "path for scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ newmap = new_affiliation->map;
+ savedmap = instance->vf_affiliation->map;
+ thisVf = new_affiliation->thisVf;
+ for (ld = 0 ; ld < new_affiliation->ldCount; ld++) {
+ if (savedmap->policy[thisVf] !=
+ newmap->policy[thisVf]) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "Got new LD/VF affiliation "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ memcpy(instance->vf_affiliation,
+ new_affiliation,
+ new_affiliation->size);
+ retval = 1;
+ goto out;
+ }
+ savedmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)savedmap +
+ savedmap->size);
+ newmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)newmap +
+ newmap->size);
+ }
+ }
+ }
+out:
+ if (new_affiliation) {
+ if (instance->PlasmaFW111)
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ new_affiliation_111,
+ new_affiliation_111_h);
+ else
+ pci_free_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ new_affiliation, new_affiliation_h);
+ }
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+/* This function will tell FW to start the SR-IOV heartbeat */
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ int retval = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: "
+ "Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (initial) {
+ instance->hb_host_mem =
+ pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_CTRL_HB_HOST_MEM),
+ &instance->hb_host_mem_h);
+ if (!instance->hb_host_mem) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate"
+ " memory for heartbeat host memory for "
+ "scsi%d.\n", instance->host->host_no);
+ retval = -ENOMEM;
+ goto out;
+ }
+ memset(instance->hb_host_mem, 0,
+ sizeof(struct MR_CTRL_HB_HOST_MEM));
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM);
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM);
+ dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC;
+ dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h;
+ dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n",
+ instance->host->host_no);
+
+ if (!megasas_issue_polled(instance, cmd)) {
+ retval = 0;
+ } else {
+ printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+ "_MEM_ALLOC DCMD timed out for scsi%d\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+ "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n",
+ dcmd->cmd_status,
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+
+out:
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+/* Handler for SR-IOV heartbeat */
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
+{
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+
+ if (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ mod_timer(&instance->sriov_heartbeat_timer,
+ jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ } else {
+ printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never "
+ "completed for scsi%d\n", instance->host->host_no);
+ schedule_work(&instance->work_init);
+ }
+}
+
/**
* megasas_wait_for_outstanding - Wait for all outstanding cmds
* @instance: Adapter soft state
@@ -2014,9 +2376,10 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
* First wait for all commands to complete
*/
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
- ret = megasas_reset_fusion(scmd->device->host);
+ ret = megasas_reset_fusion(scmd->device->host, 1);
else
ret = megasas_generic_reset(scmd);
@@ -2731,6 +3094,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_FUSION) ||
(instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_FURY)) {
@@ -2755,6 +3120,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_FUSION) ||
(instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_FURY)) {
@@ -2780,6 +3147,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
(instance->pdev->device
== PCI_DEVICE_ID_LSI_FUSION) ||
(instance->pdev->device
+ == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device
== PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device
== PCI_DEVICE_ID_LSI_FURY)) {
@@ -2788,6 +3157,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
if ((instance->pdev->device ==
PCI_DEVICE_ID_LSI_FUSION) ||
(instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_FURY)) {
@@ -3014,6 +3385,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
cmd->frame->io.context = cpu_to_le32(cmd->index);
cmd->frame->io.pad_0 = 0;
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
(reset_devices))
@@ -3620,6 +3992,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
struct megasas_ctrl_info *ctrl_info;
unsigned long bar_list;
int i, loop, fw_msix_count = 0;
+ struct IOV_111 *iovPtr;
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
@@ -3642,6 +4015,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
case PCI_DEVICE_ID_LSI_INVADER:
case PCI_DEVICE_ID_LSI_FURY:
instance->instancet = &megasas_instance_template_fusion;
@@ -3696,7 +4070,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
scratch_pad_2 = readl
(&instance->reg_set->outbound_scratch_pad_2);
/* Check max MSI-X vectors */
- if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) {
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) {
instance->msix_vectors = (scratch_pad_2
& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
fw_msix_count = instance->msix_vectors;
@@ -3763,7 +4138,10 @@ static int megasas_init_fw(struct megasas_instance *instance)
memset(instance->pd_list, 0 ,
(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
- megasas_get_pd_list(instance);
+ if (megasas_get_pd_list(instance) < 0) {
+ printk(KERN_ERR "megasas: failed to get PD list\n");
+ goto fail_init_adapter;
+ }
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
if (megasas_ld_list_query(instance,
@@ -3807,6 +4185,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
/* adapterOperations2 are converted into CPU arch*/
le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
+ instance->mpio = ctrl_info->adapterOperations2.mpio;
instance->UnevenSpanSupport =
ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) {
@@ -3819,6 +4198,20 @@ static int megasas_init_fw(struct megasas_instance *instance)
fusion->fast_path_io = 0;
}
+ if (ctrl_info->host_interface.SRIOV) {
+ if (!ctrl_info->adapterOperations2.activePassive)
+ instance->PlasmaFW111 = 1;
+
+ if (!instance->PlasmaFW111)
+ instance->requestorId =
+ ctrl_info->iov.requestorId;
+ else {
+ iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
+ instance->requestorId = iovPtr->requestorId;
+ }
+ printk(KERN_WARNING "megaraid_sas: I am VF "
+ "requestorId %d\n", instance->requestorId);
+ }
}
instance->max_sectors_per_req = instance->max_num_sge *
PAGE_SIZE / 512;
@@ -3851,6 +4244,17 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
+ /* Launch SR-IOV heartbeat timer */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 1))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
return 0;
fail_init_adapter:
@@ -3933,16 +4337,19 @@ megasas_get_seq_num(struct megasas_instance *instance,
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
- megasas_issue_blocked_cmd(instance, cmd);
-
- /*
- * Copy the data back into callers buffer
- */
- eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
- eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
- eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
- eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
- eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+ else {
+ /*
+ * Copy the data back into callers buffer
+ */
+ eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
+ eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
+ eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
+ eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
+ eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
+ }
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
@@ -4018,7 +4425,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
instance->aen_cmd->abort_aen = 1;
ret_val = megasas_issue_blocked_abort_cmd(instance,
instance->
- aen_cmd);
+ aen_cmd, 30);
if (ret_val) {
printk(KERN_DEBUG "megasas: Failed to abort "
@@ -4160,6 +4567,7 @@ static int megasas_io_attach(struct megasas_instance *instance)
/* Fusion only supports host reset */
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
host->hostt->eh_device_reset_handler = NULL;
@@ -4197,6 +4605,19 @@ megasas_set_dma_mask(struct pci_dev *pdev)
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
goto fail_set_dma_mask;
}
+ /*
+ * Ensure that all data structures are allocated in 32-bit
+ * memory.
+ */
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ /* Try 32bit DMA mask and 32 bit Consistent dma mask */
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ dev_info(&pdev->dev, "set 32bit DMA mask"
+ "and 32 bit consistent mask\n");
+ else
+ goto fail_set_dma_mask;
+ }
return 0;
@@ -4212,7 +4633,7 @@ fail_set_dma_mask:
static int megasas_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int rval, pos, i, j;
+ int rval, pos, i, j, cpu;
struct Scsi_Host *host;
struct megasas_instance *instance;
u16 control = 0;
@@ -4272,6 +4693,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
case PCI_DEVICE_ID_LSI_INVADER:
case PCI_DEVICE_ID_LSI_FURY:
{
@@ -4368,6 +4790,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->UnevenSpanSupport = 0;
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
@@ -4380,12 +4803,33 @@ static int megasas_probe_one(struct pci_dev *pdev,
if (megasas_init_fw(instance))
goto fail_init_mfi;
+ if (instance->requestorId) {
+ if (instance->PlasmaFW111) {
+ instance->vf_affiliation_111 =
+ pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &instance->vf_affiliation_111_h);
+ if (!instance->vf_affiliation_111)
+ printk(KERN_WARNING "megasas: Can't allocate "
+ "memory for VF affiliation buffer\n");
+ } else {
+ instance->vf_affiliation =
+ pci_alloc_consistent(pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &instance->vf_affiliation_h);
+ if (!instance->vf_affiliation)
+ printk(KERN_WARNING "megasas: Can't allocate "
+ "memory for VF affiliation buffer\n");
+ }
+ }
+
retry_irq_register:
/*
* Register IRQ
*/
if (instance->msix_vectors) {
- for (i = 0 ; i < instance->msix_vectors; i++) {
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i;
if (request_irq(instance->msixentry[i].vector,
@@ -4394,14 +4838,22 @@ retry_irq_register:
&instance->irq_context[i])) {
printk(KERN_DEBUG "megasas: Failed to "
"register IRQ for vector %d.\n", i);
- for (j = 0 ; j < i ; j++)
+ for (j = 0; j < i; j++) {
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
free_irq(
instance->msixentry[j].vector,
&instance->irq_context[j]);
+ }
/* Retry irq register for IO_APIC */
instance->msix_vectors = 0;
goto retry_irq_register;
}
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev, "Error setting"
+ "affinity hint for cpu %d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
}
} else {
instance->irq_context[0].instance = instance;
@@ -4455,13 +4907,17 @@ retry_irq_register:
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
- for (i = 0 ; i < instance->msix_vectors; i++)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
+ }
else
free_irq(instance->pdev->irq, &instance->irq_context[0]);
fail_irq:
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
megasas_release_fusion(instance);
@@ -4522,7 +4978,9 @@ static void megasas_flush_cache(struct megasas_instance *instance)
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
- megasas_issue_blocked_cmd(instance, cmd);
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ " from %s\n", __func__);
megasas_return_cmd(instance, cmd);
@@ -4549,10 +5007,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
return;
if (instance->aen_cmd)
- megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
+ megasas_issue_blocked_abort_cmd(instance,
+ instance->aen_cmd, 30);
if (instance->map_update_cmd)
megasas_issue_blocked_abort_cmd(instance,
- instance->map_update_cmd);
+ instance->map_update_cmd, 30);
dcmd = &cmd->frame->dcmd;
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4566,7 +5025,9 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
dcmd->data_xfer_len = 0;
dcmd->opcode = cpu_to_le32(opcode);
- megasas_issue_blocked_cmd(instance, cmd);
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
megasas_return_cmd(instance, cmd);
@@ -4590,6 +5051,10 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
host = instance->host;
instance->unload = 1;
+ /* Shutdown SR-IOV heartbeat timer */
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
@@ -4606,9 +5071,12 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
- for (i = 0 ; i < instance->msix_vectors; i++)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
+ }
else
free_irq(instance->pdev->irq, &instance->irq_context[0]);
if (instance->msix_vectors)
@@ -4629,7 +5097,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
static int
megasas_resume(struct pci_dev *pdev)
{
- int rval, i, j;
+ int rval, i, j, cpu;
struct Scsi_Host *host;
struct megasas_instance *instance;
@@ -4673,6 +5141,7 @@ megasas_resume(struct pci_dev *pdev)
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
case PCI_DEVICE_ID_LSI_INVADER:
case PCI_DEVICE_ID_LSI_FURY:
{
@@ -4701,6 +5170,7 @@ megasas_resume(struct pci_dev *pdev)
* Register IRQ
*/
if (instance->msix_vectors) {
+ cpu = cpumask_first(cpu_online_mask);
for (i = 0 ; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i;
@@ -4710,12 +5180,21 @@ megasas_resume(struct pci_dev *pdev)
&instance->irq_context[i])) {
printk(KERN_DEBUG "megasas: Failed to "
"register IRQ for vector %d.\n", i);
- for (j = 0 ; j < i ; j++)
+ for (j = 0; j < i; j++) {
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
free_irq(
instance->msixentry[j].vector,
&instance->irq_context[j]);
+ }
goto fail_irq;
}
+
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev, "Error setting"
+ "affinity hint for cpu %d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
}
} else {
instance->irq_context[0].instance = instance;
@@ -4728,6 +5207,17 @@ megasas_resume(struct pci_dev *pdev)
}
}
+ /* Re-launch SR-IOV heartbeat timer */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 0))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
instance->instancet->enable_intr(instance);
instance->unload = 0;
@@ -4782,6 +5272,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
host = instance->host;
fusion = instance->ctrl_context;
+ /* Shutdown SR-IOV heartbeat timer */
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
scsi_remove_host(instance->host);
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4793,6 +5287,9 @@ static void megasas_detach_one(struct pci_dev *pdev)
instance->ev = NULL;
}
+ /* cancel all wait events */
+ wake_up_all(&instance->int_cmd_wait_q);
+
tasklet_kill(&instance->isr_tasklet);
/*
@@ -4811,9 +5308,12 @@ static void megasas_detach_one(struct pci_dev *pdev)
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
- for (i = 0 ; i < instance->msix_vectors; i++)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
+ }
else
free_irq(instance->pdev->irq, &instance->irq_context[0]);
if (instance->msix_vectors)
@@ -4821,6 +5321,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
case PCI_DEVICE_ID_LSI_INVADER:
case PCI_DEVICE_ID_LSI_FURY:
megasas_release_fusion(instance);
@@ -4847,6 +5348,24 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->evt_detail)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
instance->evt_detail, instance->evt_detail_h);
+
+ if (instance->vf_affiliation)
+ pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ instance->vf_affiliation,
+ instance->vf_affiliation_h);
+
+ if (instance->vf_affiliation_111)
+ pci_free_consistent(pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ instance->vf_affiliation_111,
+ instance->vf_affiliation_111_h);
+
+ if (instance->hb_host_mem)
+ pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+ instance->hb_host_mem,
+ instance->hb_host_mem_h);
+
scsi_host_put(host);
pci_disable_device(pdev);
@@ -4868,9 +5387,12 @@ static void megasas_shutdown(struct pci_dev *pdev)
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
- for (i = 0 ; i < instance->msix_vectors; i++)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
+ }
else
free_irq(instance->pdev->irq, &instance->irq_context[0]);
if (instance->msix_vectors)
@@ -5045,7 +5567,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* cmd to the SCSI mid-layer
*/
cmd->sync_cmd = 1;
- megasas_issue_blocked_cmd(instance, cmd);
+ megasas_issue_blocked_cmd(instance, cmd, 0);
cmd->sync_cmd = 0;
/*
@@ -5132,6 +5654,16 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
+ /* Adjust ioctl wait time for VF mode */
+ if (instance->requestorId)
+ wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+ /* Block ioctls in VF mode */
+ if (instance->requestorId && !allow_vf_ioctls) {
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
printk(KERN_ERR "Controller in crit error\n");
error = -ENODEV;
@@ -5441,7 +5973,7 @@ megasas_aen_polling(struct work_struct *work)
u16 pd_index = 0;
u16 ld_index = 0;
int i, j, doscan = 0;
- u32 seq_num;
+ u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
int error;
if (!instance) {
@@ -5449,6 +5981,23 @@ megasas_aen_polling(struct work_struct *work)
kfree(ev);
return;
}
+
+ /* Adjust event workqueue thread wait time for VF mode */
+ if (instance->requestorId)
+ wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+ /* Don't run the event workqueue thread if OCR is running */
+ for (i = 0; i < wait_time; i++) {
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
+ break;
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: %s waiting for "
+ "controller reset to finish for scsi%d\n",
+ __func__, instance->host->host_no);
+ }
+ msleep(1000);
+ }
+
instance->ev = NULL;
host = instance->host;
if (instance->evt_detail) {
@@ -5515,65 +6064,64 @@ megasas_aen_polling(struct work_struct *work)
case MR_EVT_LD_OFFLINE:
case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host,
- MEGASAS_MAX_PD_CHANNELS + i,
- j,
- 0);
-
- if (instance->ld_ids[ld_index] != 0xff) {
- if (sdev1) {
- scsi_device_put(sdev1);
- }
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (sdev1)
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
}
}
- }
+ doscan = 0;
}
- doscan = 0;
break;
case MR_EVT_LD_CREATED:
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host,
- MEGASAS_MAX_PD_CHANNELS + i,
- j, 0);
-
- if (instance->ld_ids[ld_index] !=
- 0xff) {
- if (!sdev1) {
- scsi_add_device(host,
- MEGASAS_MAX_PD_CHANNELS + i,
- j, 0);
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
}
- }
- if (sdev1) {
- scsi_device_put(sdev1);
+ if (sdev1)
+ scsi_device_put(sdev1);
}
}
+ doscan = 0;
}
- doscan = 0;
break;
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
@@ -5591,50 +6139,55 @@ megasas_aen_polling(struct work_struct *work)
}
if (doscan) {
- printk(KERN_INFO "scanning ...\n");
- megasas_get_pd_list(instance);
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
- pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
- sdev1 = scsi_device_lookup(host, i, j, 0);
- if (instance->pd_list[pd_index].driveState ==
- MR_PD_STATE_SYSTEM) {
- if (!sdev1) {
- scsi_add_device(host, i, j, 0);
- }
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
+ printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n",
+ instance->host->host_no);
+ if (megasas_get_pd_list(instance) == 0) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+ sdev1 = scsi_device_lookup(host, i, j, 0);
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ if (!sdev1) {
+ scsi_add_device(host, i, j, 0);
+ }
+ if (sdev1)
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
}
}
}
}
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
- sdev1 = scsi_device_lookup(host,
- MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- if (instance->ld_ids[ld_index] != 0xff) {
- if (!sdev1) {
- scsi_add_device(host,
- MEGASAS_MAX_PD_CHANNELS + i,
- j, 0);
+ sdev1 = scsi_device_lookup(host,
+ MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ else
+ scsi_device_put(sdev1);
} else {
- scsi_device_put(sdev1);
- }
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
}
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e24b6eb645b5..081bfff12d00 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -143,12 +143,12 @@ u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
{
- return map->raidMap.ldSpanMap[ld].ldRaid.targetId;
+ return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
}
-u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
{
- return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]);
+ return map->raidMap.ldTgtIdToLd[ldTgtId];
}
static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
@@ -975,7 +975,10 @@ MR_BuildRaidContext(struct megasas_instance *instance,
regSize += stripSize;
}
- pRAID_Context->timeoutValue = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec);
+ pRAID_Context->timeoutValue =
+ cpu_to_le16(raid->fpIoTimeoutForLd ?
+ raid->fpIoTimeoutForLd :
+ map->raidMap.fpPdIoTimeoutSec);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
pRAID_Context->regLockFlags = (isRead) ?
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f6555921fd7a..22600419ae9f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -62,7 +62,8 @@ megasas_complete_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd, u8 alt_status);
int megasas_is_ldio(struct scsi_cmnd *cmd);
int
-wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds);
void
megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
@@ -81,6 +82,13 @@ int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
void megaraid_sas_kill_hba(struct megasas_instance *instance);
extern u32 megasas_dbg_lvl;
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+ int initial);
+void megasas_start_timer(struct megasas_instance *instance,
+ struct timer_list *timer,
+ void *fn, unsigned long interval);
+extern struct megasas_mgmt_info megasas_mgmt_info;
extern int resetwaittime;
/**
@@ -549,12 +557,13 @@ fail_req_desc:
* For polling, MFI requires the cmd_status to be set to 0xFF before posting.
*/
int
-wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd)
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds)
{
int i;
struct megasas_header *frame_hdr = &cmd->frame->hdr;
- u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
+ u32 msecs = seconds * 1000;
/*
* Wait for cmd_status to change
@@ -585,7 +594,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
struct megasas_cmd *cmd;
u8 ret;
struct fusion_context *fusion;
- union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
int i;
struct megasas_header *frame_hdr;
@@ -644,18 +653,18 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
- init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle);
+ init_frame->queue_info_new_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(ioc_init_handle));
+ init_frame->queue_info_new_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(ioc_init_handle));
init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
- req_desc =
- (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc;
-
- req_desc->Words = 0;
- req_desc->MFAIo.RequestFlags =
+ req_desc.Words = 0;
+ req_desc.MFAIo.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- cpu_to_le32s((u32 *)&req_desc->MFAIo);
- req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr);
+ cpu_to_le32s((u32 *)&req_desc.MFAIo);
+ req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr);
/*
* disable the intr before firing the init frame
@@ -669,10 +678,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
break;
}
- instance->instancet->fire_cmd(instance, req_desc->u.low,
- req_desc->u.high, instance->reg_set);
+ instance->instancet->fire_cmd(instance, req_desc.u.low,
+ req_desc.u.high, instance->reg_set);
- wait_and_poll(instance, cmd);
+ wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
frame_hdr = &cmd->frame->hdr;
if (frame_hdr->cmd_status != 0) {
@@ -723,7 +732,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
if (!fusion) {
megasas_return_cmd(instance, cmd);
- return 1;
+ return -ENXIO;
}
dcmd = &cmd->frame->dcmd;
@@ -1604,13 +1613,15 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
- io_request->IoFlags |=
- MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ io_request->IoFlags |= cpu_to_le16(
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
cmd->request_desc->SCSIIO.DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
/*
* If the command is for the tape device, set the
* FP timeout to the os layer timeout value.
@@ -1770,7 +1781,8 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
if (index >= instance->max_fw_cmds) {
printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
- "descriptor\n", index);
+ "descriptor for scsi%d\n", index,
+ instance->host->host_no);
return NULL;
}
fusion = instance->ctrl_context;
@@ -2038,8 +2050,11 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
/* If we didn't complete any commands, check for FW fault */
fw_state = instance->instancet->read_fw_status_reg(
instance->reg_set) & MFI_STATE_MASK;
- if (fw_state == MFI_STATE_FAULT)
+ if (fw_state == MFI_STATE_FAULT) {
+ printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
+ "for scsi%d\n", instance->host->host_no);
schedule_work(&instance->work_init);
+ }
}
return IRQ_HANDLED;
@@ -2210,9 +2225,10 @@ megasas_check_reset_fusion(struct megasas_instance *instance,
}
/* This function waits for outstanding commands on fusion to complete */
-int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
+int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+ int iotimeout, int *convert)
{
- int i, outstanding, retval = 0;
+ int i, outstanding, retval = 0, hb_seconds_missed = 0;
u32 fw_state;
for (i = 0; i < resetwaittime; i++) {
@@ -2221,18 +2237,49 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
instance->reg_set) & MFI_STATE_MASK;
if (fw_state == MFI_STATE_FAULT) {
printk(KERN_WARNING "megasas: Found FW in FAULT state,"
- " will reset adapter.\n");
+ " will reset adapter scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ /* If SR-IOV VF mode & heartbeat timeout, don't wait */
+ if (instance->requestorId && !iotimeout) {
retval = 1;
goto out;
}
+ /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
+ if (instance->requestorId && iotimeout) {
+ if (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ hb_seconds_missed = 0;
+ } else {
+ hb_seconds_missed++;
+ if (hb_seconds_missed ==
+ (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
+ printk(KERN_WARNING "megasas: SR-IOV:"
+ " Heartbeat never completed "
+ " while polling during I/O "
+ " timeout handling for "
+ "scsi%d.\n",
+ instance->host->host_no);
+ *convert = 1;
+ retval = 1;
+ goto out;
+ }
+ }
+ }
+
outstanding = atomic_read(&instance->fw_outstanding);
if (!outstanding)
goto out;
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
- "commands to complete\n", i, outstanding);
+ "commands to complete for scsi%d\n", i,
+ outstanding, instance->host->host_no);
megasas_complete_cmd_dpc_fusion(
(unsigned long)instance);
}
@@ -2241,7 +2288,8 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
if (atomic_read(&instance->fw_outstanding)) {
printk("megaraid_sas: pending commands remain after waiting, "
- "will reset adapter.\n");
+ "will reset adapter scsi%d.\n",
+ instance->host->host_no);
retval = 1;
}
out:
@@ -2263,10 +2311,34 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
reply_desc->Words = ULLONG_MAX;
}
+/* Check for a second path that is currently UP */
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ int i, j, retval = (DID_RESET << 16);
+
+ if (instance->mpio && instance->requestorId) {
+ for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
+ for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
+ if (megasas_mgmt_info.instance[i] &&
+ (megasas_mgmt_info.instance[i] != instance) &&
+ megasas_mgmt_info.instance[i]->mpio &&
+ megasas_mgmt_info.instance[i]->requestorId
+ &&
+ (megasas_mgmt_info.instance[i]->ld_ids[j]
+ == scmd->device->id)) {
+ retval = (DID_NO_CONNECT << 16);
+ goto out;
+ }
+ }
+out:
+ return retval;
+}
+
/* Core fusion reset function */
-int megasas_reset_fusion(struct Scsi_Host *shost)
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
{
- int retval = SUCCESS, i, j, retry = 0;
+ int retval = SUCCESS, i, j, retry = 0, convert = 0;
struct megasas_instance *instance;
struct megasas_cmd_fusion *cmd_fusion;
struct fusion_context *fusion;
@@ -2277,28 +2349,39 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
+ mutex_lock(&instance->reset_mutex);
+
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
- "returning FAILED.\n");
+ "returning FAILED for scsi%d.\n",
+ instance->host->host_no);
return FAILED;
}
- mutex_lock(&instance->reset_mutex);
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
instance->instancet->disable_intr(instance);
msleep(1000);
/* First try waiting for commands to complete */
- if (megasas_wait_for_outstanding_fusion(instance)) {
+ if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
+ &convert)) {
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
printk(KERN_WARNING "megaraid_sas: resetting fusion "
- "adapter.\n");
+ "adapter scsi%d.\n", instance->host->host_no);
+ if (convert)
+ iotimeout = 0;
+
/* Now return commands back to the OS */
for (i = 0 ; i < instance->max_fw_cmds; i++) {
cmd_fusion = fusion->cmd_list[i];
if (cmd_fusion->scmd) {
scsi_dma_unmap(cmd_fusion->scmd);
- cmd_fusion->scmd->result = (DID_RESET << 16);
+ cmd_fusion->scmd->result =
+ megasas_check_mpio_paths(instance,
+ cmd_fusion->scmd);
cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
megasas_return_cmd_fusion(instance, cmd_fusion);
atomic_dec(&instance->fw_outstanding);
@@ -2313,13 +2396,67 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
(abs_state == MFI_STATE_FAULT && !reset_adapter)) {
/* Reset not supported, kill adapter */
printk(KERN_WARNING "megaraid_sas: Reset not supported"
- ", killing adapter.\n");
+ ", killing adapter scsi%d.\n",
+ instance->host->host_no);
megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
retval = FAILED;
goto out;
}
+ /* Let SR-IOV VF & PF sync up if there was a HB failure */
+ if (instance->requestorId && !iotimeout) {
+ msleep(MEGASAS_OCR_SETTLE_TIME_VF);
+ /* Look for a late HB update after VF settle time */
+ if (abs_state == MFI_STATE_OPERATIONAL &&
+ (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter)) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ printk(KERN_WARNING "megasas: SR-IOV:"
+ "Late FW heartbeat update for "
+ "scsi%d.\n",
+ instance->host->host_no);
+ } else {
+ /* In VF mode, first poll for FW ready */
+ for (i = 0;
+ i < (MEGASAS_RESET_WAIT_TIME * 1000);
+ i += 20) {
+ status_reg =
+ instance->instancet->
+ read_fw_status_reg(
+ instance->reg_set);
+ abs_state = status_reg &
+ MFI_STATE_MASK;
+ if (abs_state == MFI_STATE_READY) {
+ printk(KERN_WARNING "megasas"
+ ": SR-IOV: FW was found"
+ "to be in ready state "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ break;
+ }
+ msleep(20);
+ }
+ if (abs_state != MFI_STATE_READY) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "FW not in ready state after %d"
+ " seconds for scsi%d, status_reg = "
+ "0x%x.\n",
+ MEGASAS_RESET_WAIT_TIME,
+ instance->host->host_no,
+ status_reg);
+ megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ instance->adprecovery =
+ MEGASAS_HW_CRITICAL_ERROR;
+ retval = FAILED;
+ goto out;
+ }
+ }
+ }
+
/* Now try to reset the chip */
for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
writel(MPI2_WRSEQ_FLUSH_KEY_VALUE,
@@ -2346,7 +2483,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
readl(&instance->reg_set->fusion_host_diag);
if (retry++ == 100) {
printk(KERN_WARNING "megaraid_sas: "
- "Host diag unlock failed!\n");
+ "Host diag unlock failed! "
+ "for scsi%d\n",
+ instance->host->host_no);
break;
}
}
@@ -2368,7 +2507,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
if (retry++ == 1000) {
printk(KERN_WARNING "megaraid_sas: "
"Diag reset adapter never "
- "cleared!\n");
+ "cleared for scsi%d!\n",
+ instance->host->host_no);
break;
}
}
@@ -2390,29 +2530,29 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
if (abs_state <= MFI_STATE_FW_INIT) {
printk(KERN_WARNING "megaraid_sas: firmware "
"state < MFI_STATE_FW_INIT, state = "
- "0x%x\n", abs_state);
+ "0x%x for scsi%d\n", abs_state,
+ instance->host->host_no);
continue;
}
/* Wait for FW to become ready */
if (megasas_transition_to_ready(instance, 1)) {
printk(KERN_WARNING "megaraid_sas: Failed to "
- "transition controller to ready.\n");
+ "transition controller to ready "
+ "for scsi%d.\n",
+ instance->host->host_no);
continue;
}
megasas_reset_reply_desc(instance);
if (megasas_ioc_init_fusion(instance)) {
printk(KERN_WARNING "megaraid_sas: "
- "megasas_ioc_init_fusion() failed!\n");
+ "megasas_ioc_init_fusion() failed!"
+ " for scsi%d\n",
+ instance->host->host_no);
continue;
}
- clear_bit(MEGASAS_FUSION_IN_RESET,
- &instance->reset_flags);
- instance->instancet->enable_intr(instance);
- instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
-
/* Re-fire management commands */
for (j = 0 ; j < instance->max_fw_cmds; j++) {
cmd_fusion = fusion->cmd_list[j];
@@ -2422,7 +2562,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
instance->
cmd_list[cmd_fusion->sync_cmd_idx];
if (cmd_mfi->frame->dcmd.opcode ==
- MR_DCMD_LD_MAP_GET_INFO) {
+ cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
megasas_return_cmd(instance,
cmd_mfi);
megasas_return_cmd_fusion(
@@ -2433,11 +2573,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
instance,
cmd_mfi->context.smid
-1);
- if (!req_desc)
+ if (!req_desc) {
printk(KERN_WARNING
"req_desc NULL"
- "\n");
- else {
+ " for scsi%d\n",
+ instance->host->host_no);
+ /* Return leaked MPT
+ frame */
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ } else {
instance->instancet->
fire_cmd(instance,
req_desc->
@@ -2451,6 +2595,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
}
}
+ clear_bit(MEGASAS_FUSION_IN_RESET,
+ &instance->reset_flags);
+ instance->instancet->enable_intr(instance);
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+
/* Reset load balance info */
memset(fusion->load_balance_info, 0,
sizeof(struct LD_LOAD_BALANCE_INFO)
@@ -2459,18 +2608,39 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
if (!megasas_get_map_info(instance))
megasas_sync_map_info(instance);
+ /* Restart SR-IOV heartbeat */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 0))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
/* Adapter reset completed successfully */
printk(KERN_WARNING "megaraid_sas: Reset "
- "successful.\n");
+ "successful for scsi%d.\n",
+ instance->host->host_no);
retval = SUCCESS;
goto out;
}
/* Reset failed, kill the adapter */
printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
- "adapter.\n");
+ "adapter scsi%d.\n", instance->host->host_no);
megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
retval = FAILED;
} else {
+ /* For VF: Restart HB timer if we didn't OCR */
+ if (instance->requestorId) {
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ }
clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
instance->instancet->enable_intr(instance);
instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
@@ -2487,7 +2657,7 @@ void megasas_fusion_ocr_wq(struct work_struct *work)
struct megasas_instance *instance =
container_of(work, struct megasas_instance, work_init);
- megasas_reset_fusion(instance->host);
+ megasas_reset_fusion(instance->host, 0);
}
struct megasas_instance_template megasas_instance_template_fusion = {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 35a51397b364..e76af5459a09 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -485,6 +485,9 @@ struct MPI2_IOC_INIT_REQUEST {
#define MAX_PHYSICAL_DEVICES 256
#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
+#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
struct MR_DEV_HANDLE_INFO {
u16 curDevHdl;
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 62f1a6031765..0d78a4d5576c 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -453,7 +453,7 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
if (instance->irq != SCSI_IRQ_NONE)
- if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED,
+ if (request_irq(instance->irq, pas16_intr, 0,
"pas16", instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index a04b4ff8c7f6..28b4e8139153 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -323,24 +323,17 @@ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
int offset;
char *str = buf;
int start = 0;
-#define IB_MEMMAP(c) \
- (*(u32 *)((u8 *)pm8001_ha-> \
- memoryMap.region[IB].virt_ptr + \
+#define IB_MEMMAP(c) \
+ (*(u32 *)((u8 *)pm8001_ha-> \
+ memoryMap.region[IB].virt_ptr + \
pm8001_ha->evtlog_ib_offset + (c)))
for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
- if (pm8001_ha->chip_id != chip_8001)
- str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
- else
- str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+ str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
start = start + 4;
}
pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
- if ((((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
- && (pm8001_ha->chip_id != chip_8001))
- pm8001_ha->evtlog_ib_offset = 0;
- if ((((pm8001_ha->evtlog_ib_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
- && (pm8001_ha->chip_id == chip_8001))
+ if (((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
pm8001_ha->evtlog_ib_offset = 0;
return str - buf;
@@ -363,24 +356,17 @@ static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
int offset;
char *str = buf;
int start = 0;
-#define OB_MEMMAP(c) \
- (*(u32 *)((u8 *)pm8001_ha-> \
- memoryMap.region[OB].virt_ptr + \
+#define OB_MEMMAP(c) \
+ (*(u32 *)((u8 *)pm8001_ha-> \
+ memoryMap.region[OB].virt_ptr + \
pm8001_ha->evtlog_ob_offset + (c)))
for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
- if (pm8001_ha->chip_id != chip_8001)
- str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
- else
- str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+ str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
start = start + 4;
}
pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
- if ((((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
- && (pm8001_ha->chip_id != chip_8001))
- pm8001_ha->evtlog_ob_offset = 0;
- if ((((pm8001_ha->evtlog_ob_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
- && (pm8001_ha->chip_id == chip_8001))
+ if (((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
pm8001_ha->evtlog_ob_offset = 0;
return str - buf;
@@ -466,7 +452,7 @@ static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
- u32 count;
+ ssize_t count;
count = pm80xx_get_fatal_dump(cdev, attr, buf);
return count;
@@ -484,7 +470,7 @@ static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
- u32 count;
+ ssize_t count;
count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf);
return count;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 0a1296a87d66..a97be015e52e 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -644,7 +644,7 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
/* 8081 controllers need BAR shift to access MPI space
* as this is shared with BIOS data */
- if (deviceid == 0x8081) {
+ if (deviceid == 0x8081 || deviceid == 0x0042) {
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
@@ -673,7 +673,7 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
update_outbnd_queue_table(pm8001_ha, i);
/* 8081 controller donot require these operations */
- if (deviceid != 0x8081) {
+ if (deviceid != 0x8081 && deviceid != 0x0042) {
mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
/* 7->130ms, 34->500ms, 119->1.5s */
mpi_set_open_retry_interval_reg(pm8001_ha, 119);
@@ -701,7 +701,7 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
u32 gst_len_mpistate;
u16 deviceid;
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
- if (deviceid == 0x8081) {
+ if (deviceid == 0x8081 || deviceid == 0x0042) {
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
@@ -2502,11 +2502,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*in order to force CPU ordering*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2522,11 +2518,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2550,11 +2542,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2617,11 +2605,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_NON_OPERATIONAL);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2641,11 +2625,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_IN_ERROR);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2674,20 +2654,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else if (t->uldd_task) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* ditto */
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
- } else if (!t->uldd_task) {
+ } else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
}
}
@@ -2796,11 +2765,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2909,20 +2874,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else if (t->uldd_task) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* ditto */
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
- } else if (!t->uldd_task) {
+ } else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
}
}
@@ -4467,23 +4421,11 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
" stat 0x%x but aborted by upper layer "
"\n", task, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
- } else if (task->uldd_task) {
- spin_unlock_irqrestore(&task->task_state_lock,
- flags);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
- mb();/* ditto */
- spin_unlock_irq(&pm8001_ha->lock);
- task->task_done(task);
- spin_lock_irq(&pm8001_ha->lock);
- return 0;
- } else if (!task->uldd_task) {
+ } else {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- task->task_done(task);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, task,
+ ccb, tag);
return 0;
}
}
@@ -5020,7 +4962,7 @@ pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
/* check max is 1 Mbytes */
if ((length > 0x100000) || (gsm_dump_offset & 3) ||
((gsm_dump_offset + length) > 0x1000000))
- return 1;
+ return -EINVAL;
if (pm8001_ha->chip_id == chip_8001)
bar = 2;
@@ -5048,12 +4990,12 @@ pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
gsm_base = GSM_BASE;
if (-1 == pm8001_bar4_shift(pm8001_ha,
(gsm_base + shift_value)))
- return 1;
+ return -EIO;
} else {
gsm_base = 0;
if (-1 == pm80xx_bar4_shift(pm8001_ha,
(gsm_base + shift_value)))
- return 1;
+ return -EIO;
}
gsm_dump_offset = (gsm_dump_offset + offset) &
0xFFFF0000;
@@ -5072,13 +5014,8 @@ pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
direct_data += sprintf(direct_data, "%08x ", value);
}
/* Shift back to BAR4 original address */
- if (pm8001_ha->chip_id == chip_8001) {
- if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
- return 1;
- } else {
- if (-1 == pm80xx_bar4_shift(pm8001_ha, 0))
- return 1;
- }
+ if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
+ return -EIO;
pm8001_ha->fatal_forensic_shift_offset += 1024;
if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000)
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 73a120d81b4d..c4f31b21feb8 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -625,7 +625,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->nvmd_completion = &completion;
if (pm8001_ha->chip_id == chip_8001) {
- if (deviceid == 0x8081) {
+ if (deviceid == 0x8081 || deviceid == 0x0042) {
payload.minor_function = 4;
payload.length = 4096;
} else {
@@ -646,6 +646,9 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
if (deviceid == 0x8081)
pm8001_ha->sas_addr[j] =
payload.func_specific[0x704 + i];
+ else if (deviceid == 0x0042)
+ pm8001_ha->sas_addr[j] =
+ payload.func_specific[0x010 + i];
} else
pm8001_ha->sas_addr[j] =
payload.func_specific[0x804 + i];
@@ -713,11 +716,9 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
number_of_intr = 1;
- flag |= IRQF_DISABLED;
} else {
number_of_intr = PM8001_MAX_MSIX_VEC;
flag &= ~IRQF_SHARED;
- flag |= IRQF_DISABLED;
}
max_entry = sizeof(pm8001_ha->msix_entries) /
@@ -1072,10 +1073,7 @@ err_out_enable:
*/
static struct pci_device_id pm8001_pci_table[] = {
{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
- {
- PCI_DEVICE(0x117c, 0x0042),
- .driver_data = chip_8001
- },
+ { PCI_VDEVICE(ATTO, 0x0042), chip_8001 },
/* Support for SPC/SPCv/SPCve controllers */
{ PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
{ PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index f50ac44b950e..8a44bc92bc78 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -434,6 +434,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
ccb->n_elem = n_elem;
ccb->ccb_tag = tag;
ccb->task = t;
+ ccb->device = pm8001_dev;
switch (t->task_proto) {
case SAS_PROTOCOL_SMP:
rc = pm8001_task_prep_smp(pm8001_ha, ccb);
@@ -865,13 +866,11 @@ ex_err:
static void pm8001_dev_gone_notify(struct domain_device *dev)
{
unsigned long flags = 0;
- u32 tag;
struct pm8001_hba_info *pm8001_ha;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
spin_lock_irqsave(&pm8001_ha->lock, flags);
- pm8001_tag_alloc(pm8001_ha, &tag);
if (pm8001_dev) {
u32 device_id = pm8001_dev->device_id;
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 6c5fd5ee22d3..1ee06f21803b 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -708,5 +708,17 @@ ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
/* ctl shared API */
extern struct device_attribute *pm8001_host_attrs[];
+static inline void
+pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
+ struct sas_task *task, struct pm8001_ccb_info *ccb,
+ u32 ccb_idx)
+{
+ pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
+ smp_mb(); /*in order to force CPU ordering*/
+ spin_unlock(&pm8001_ha->lock);
+ task->task_done(task);
+ spin_lock(&pm8001_ha->lock);
+}
+
#endif
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index c950dc5c9943..d70587f96184 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -91,7 +91,6 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
- u32 status = 1;
u32 accum_len , reg_val, index, *temp;
unsigned long start;
u8 *direct_data;
@@ -111,13 +110,10 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
direct_data = (u8 *)fatal_error_data;
pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
- pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
- }
- if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
/* start to get data */
/* Program the MEMBASE II Shifting Register with 0x00.*/
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
@@ -126,6 +122,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
pm8001_ha->forensic_fatal_step = 0;
pm8001_ha->fatal_bar_loc = 0;
}
+
/* Read until accum_len is retrived */
accum_len = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
@@ -135,7 +132,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("Possible PCI issue 0x%x not expected\n",
accum_len));
- return status;
+ return -EIO;
}
if (accum_len == 0 || accum_len >= 0x100000) {
pm8001_ha->forensic_info.data_buf.direct_data +=
@@ -178,7 +175,6 @@ moreData:
pm8001_ha->forensic_fatal_step = 1;
pm8001_ha->fatal_forensic_shift_offset = 0;
pm8001_ha->forensic_last_offset = 0;
- status = 0;
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -194,7 +190,6 @@ moreData:
forensic_info.data_buf.direct_data,
"%08x ", *(temp + index));
}
- status = 0;
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -214,7 +209,6 @@ moreData:
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
pm8001_ha->fatal_bar_loc = 0;
- status = 0;
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
@@ -239,7 +233,7 @@ moreData:
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
" = 0x%x\n", reg_val));
- return -1;
+ return -EIO;
}
/* Read the next 64K of the debug data. */
@@ -259,7 +253,6 @@ moreData:
pm8001_ha->forensic_info.data_buf.direct_len = 0;
pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
- status = 0;
}
}
@@ -2175,11 +2168,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*in order to force CPU ordering*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2195,11 +2184,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2221,11 +2206,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2288,11 +2269,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_NON_OPERATIONAL);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2312,11 +2289,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_IN_ERROR);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2345,20 +2318,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else if (t->uldd_task) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* ditto */
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
- } else if (!t->uldd_task) {
+ } else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
}
}
@@ -2470,11 +2432,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2596,20 +2554,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else if (t->uldd_task) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* ditto */
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
- } else if (!t->uldd_task) {
+ } else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
}
}
@@ -4304,23 +4251,11 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
"\n", task, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
return 0;
- } else if (task->uldd_task) {
- spin_unlock_irqrestore(&task->task_state_lock,
- flags);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
- mb();/* ditto */
- spin_unlock_irq(&pm8001_ha->lock);
- task->task_done(task);
- spin_lock_irq(&pm8001_ha->lock);
- return 0;
- } else if (!task->uldd_task) {
+ } else {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- task->task_done(task);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, task,
+ ccb, tag);
return 0;
}
}
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 97dabd39b092..158020522dfb 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -379,14 +379,7 @@
#define DEBUG_PRINT_NVRAM 0
#define DEBUG_QLA1280 0
-/*
- * The SGI VISWS is broken and doesn't support MMIO ;-(
- */
-#ifdef CONFIG_X86_VISWS
-#define MEMORY_MAPPED_IO 0
-#else
#define MEMORY_MAPPED_IO 1
-#endif
#include "qla1280.h"
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index ff0fc7c7812f..44def6bb4bb0 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o qla_mr.o qla_nx2.o qla_target.o
+ qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4a0d7c92181f..07befcf365b8 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -147,6 +147,92 @@ static struct bin_attribute sysfs_fw_dump_attr = {
};
static ssize_t
+qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->fw_dump_template || !ha->fw_dump_template_len)
+ return 0;
+
+ ql_dbg(ql_dbg_user, vha, 0x70e2,
+ "chunk <- off=%llx count=%zx\n", off, count);
+ return memory_read_from_buffer(buf, count, &off,
+ ha->fw_dump_template, ha->fw_dump_template_len);
+}
+
+static ssize_t
+qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t size;
+
+ if (off == 0) {
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+
+ ha->fw_dump = NULL;
+ ha->fw_dump_len = 0;
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ size = qla27xx_fwdt_template_size(buf);
+ ql_dbg(ql_dbg_user, vha, 0x70d1,
+ "-> allocating fwdt (%x bytes)...\n", size);
+ ha->fw_dump_template = vmalloc(size);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x70d2,
+ "Failed allocate fwdt (%x bytes).\n", size);
+ return -ENOMEM;
+ }
+ ha->fw_dump_template_len = size;
+ }
+
+ if (off + count > ha->fw_dump_template_len) {
+ count = ha->fw_dump_template_len - off;
+ ql_dbg(ql_dbg_user, vha, 0x70d3,
+ "chunk -> truncating to %zx bytes.\n", count);
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x70d4,
+ "chunk -> off=%llx count=%zx\n", off, count);
+ memcpy(ha->fw_dump_template + off, buf, count);
+
+ if (off + count == ha->fw_dump_template_len) {
+ size = qla27xx_fwdt_calculate_dump_size(vha);
+ ql_dbg(ql_dbg_user, vha, 0x70d5,
+ "-> allocating fwdump (%x bytes)...\n", size);
+ ha->fw_dump = vmalloc(size);
+ if (!ha->fw_dump) {
+ ql_log(ql_log_warn, vha, 0x70d6,
+ "Failed allocate fwdump (%x bytes).\n", size);
+ return -ENOMEM;
+ }
+ ha->fw_dump_len = size;
+ }
+
+ return count;
+}
+static struct bin_attribute sysfs_fw_dump_template_attr = {
+ .attr = {
+ .name = "fw_dump_template",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 0,
+ .read = qla2x00_sysfs_read_fw_dump_template,
+ .write = qla2x00_sysfs_write_fw_dump_template,
+};
+
+static ssize_t
qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -241,12 +327,17 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
+ ssize_t rval = 0;
if (ha->optrom_state != QLA_SREADING)
return 0;
- return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
- ha->optrom_region_size);
+ mutex_lock(&ha->optrom_mutex);
+ rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
+ ha->optrom_region_size);
+ mutex_unlock(&ha->optrom_mutex);
+
+ return rval;
}
static ssize_t
@@ -265,7 +356,9 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
if (off + count > ha->optrom_region_size)
count = ha->optrom_region_size - off;
+ mutex_lock(&ha->optrom_mutex);
memcpy(&ha->optrom_buffer[off], buf, count);
+ mutex_unlock(&ha->optrom_mutex);
return count;
}
@@ -288,10 +381,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
-
uint32_t start = 0;
uint32_t size = ha->optrom_size;
int val, valid;
+ ssize_t rval = count;
if (off)
return -EINVAL;
@@ -304,12 +397,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
if (start > ha->optrom_size)
return -EINVAL;
+ mutex_lock(&ha->optrom_mutex);
switch (val) {
case 0:
if (ha->optrom_state != QLA_SREADING &&
- ha->optrom_state != QLA_SWRITING)
- return -EINVAL;
-
+ ha->optrom_state != QLA_SWRITING) {
+ rval = -EINVAL;
+ goto out;
+ }
ha->optrom_state = QLA_SWAITING;
ql_dbg(ql_dbg_user, vha, 0x7061,
@@ -320,8 +415,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_buffer = NULL;
break;
case 1:
- if (ha->optrom_state != QLA_SWAITING)
- return -EINVAL;
+ if (ha->optrom_state != QLA_SWAITING) {
+ rval = -EINVAL;
+ goto out;
+ }
ha->optrom_region_start = start;
ha->optrom_region_size = start + size > ha->optrom_size ?
@@ -335,13 +432,15 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
"(%x).\n", ha->optrom_region_size);
ha->optrom_state = QLA_SWAITING;
- return -ENOMEM;
+ rval = -ENOMEM;
+ goto out;
}
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7063,
"HBA not online, failing NVRAM update.\n");
- return -EAGAIN;
+ rval = -EAGAIN;
+ goto out;
}
ql_dbg(ql_dbg_user, vha, 0x7064,
@@ -353,8 +452,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_region_start, ha->optrom_region_size);
break;
case 2:
- if (ha->optrom_state != QLA_SWAITING)
- return -EINVAL;
+ if (ha->optrom_state != QLA_SWAITING) {
+ rval = -EINVAL;
+ goto out;
+ }
/*
* We need to be more restrictive on which FLASH regions are
@@ -388,7 +489,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
if (!valid) {
ql_log(ql_log_warn, vha, 0x7065,
"Invalid start region 0x%x/0x%x.\n", start, size);
- return -EINVAL;
+ rval = -EINVAL;
+ goto out;
}
ha->optrom_region_start = start;
@@ -403,7 +505,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
"(%x)\n", ha->optrom_region_size);
ha->optrom_state = QLA_SWAITING;
- return -ENOMEM;
+ rval = -ENOMEM;
+ goto out;
}
ql_dbg(ql_dbg_user, vha, 0x7067,
@@ -413,13 +516,16 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
break;
case 3:
- if (ha->optrom_state != QLA_SWRITING)
- return -EINVAL;
+ if (ha->optrom_state != QLA_SWRITING) {
+ rval = -EINVAL;
+ goto out;
+ }
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7068,
"HBA not online, failing flash update.\n");
- return -EAGAIN;
+ rval = -EAGAIN;
+ goto out;
}
ql_dbg(ql_dbg_user, vha, 0x7069,
@@ -430,9 +536,12 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_region_start, ha->optrom_region_size);
break;
default:
- return -EINVAL;
+ rval = -EINVAL;
}
- return count;
+
+out:
+ mutex_unlock(&ha->optrom_mutex);
+ return rval;
}
static struct bin_attribute sysfs_optrom_ctl_attr = {
@@ -822,6 +931,7 @@ static struct sysfs_entry {
int is4GBp_only;
} bin_file_entries[] = {
{ "fw_dump", &sysfs_fw_dump_attr, },
+ { "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 },
{ "nvram", &sysfs_nvram_attr, },
{ "optrom", &sysfs_optrom_attr, },
{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
@@ -847,6 +957,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
continue;
+ if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+ continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
iter->attr);
@@ -1187,7 +1299,7 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
return scnprintf(buf, PAGE_SIZE, "\n");
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1391,6 +1503,37 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%d\n", size);
}
+static ssize_t
+qla2x00_allow_cna_fw_dump_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_P3P_TYPE(vha->hw))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+ else
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ vha->hw->allow_cna_fw_dump ? "true" : "false");
+}
+
+static ssize_t
+qla2x00_allow_cna_fw_dump_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+
+ if (!IS_P3P_TYPE(vha->hw))
+ return -EINVAL;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ vha->hw->allow_cna_fw_dump = val != 0;
+
+ return strlen(buf);
+}
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1432,6 +1575,9 @@ static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
+static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
+ qla2x00_allow_cna_fw_dump_show,
+ qla2x00_allow_cna_fw_dump_store);
struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version,
@@ -1464,6 +1610,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_diag_requests,
&dev_attr_diag_megabytes,
&dev_attr_fw_dump_size,
+ &dev_attr_allow_cna_fw_dump,
NULL,
};
@@ -1509,6 +1656,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
case PORT_SPEED_16GB:
speed = FC_PORTSPEED_16GBIT;
break;
+ case PORT_SPEED_32GB:
+ speed = FC_PORTSPEED_32GBIT;
+ break;
}
fc_host_speed(shost) = speed;
}
@@ -2160,6 +2310,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
else if (IS_QLAFX00(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+ else if (IS_QLA27XX(ha))
+ speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
+ FC_PORTSPEED_8GBIT;
else
speed = FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(vha->host) = speed;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index f15d03e6b7ee..71ff340f6de4 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1437,9 +1437,12 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
if (ha->flags.nic_core_reset_hdlr_active)
return -EBUSY;
+ mutex_lock(&ha->optrom_mutex);
rval = qla2x00_optrom_setup(bsg_job, vha, 0);
- if (rval)
+ if (rval) {
+ mutex_unlock(&ha->optrom_mutex);
return rval;
+ }
ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
@@ -1453,6 +1456,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
+ mutex_unlock(&ha->optrom_mutex);
bsg_job->job_done(bsg_job);
return rval;
}
@@ -1465,9 +1469,12 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
struct qla_hw_data *ha = vha->hw;
int rval = 0;
+ mutex_lock(&ha->optrom_mutex);
rval = qla2x00_optrom_setup(bsg_job, vha, 1);
- if (rval)
+ if (rval) {
+ mutex_unlock(&ha->optrom_mutex);
return rval;
+ }
/* Set the isp82xx_no_md_cap not to capture minidump */
ha->flags.isp82xx_no_md_cap = 1;
@@ -1483,6 +1490,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
+ mutex_unlock(&ha->optrom_mutex);
bsg_job->job_done(bsg_job);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f6103f553bb1..97255f7c3975 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,13 +11,15 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x015b | 0x4b,0xba,0xfa |
- * | | | 0x0x015a |
- * | Mailbox commands | 0x1187 | 0x111a-0x111b |
- * | | | 0x1155-0x1158 |
- * | | | 0x1018-0x1019 |
+ * | Module Init and Probe | 0x017d | 0x004b,0x0141 |
+ * | | | 0x0144,0x0146 |
+ * | | | 0x015b-0x0160 |
+ * | | | 0x016e-0x0170 |
+ * | Mailbox commands | 0x1187 | 0x1018-0x1019 |
+ * | | | 0x10ca |
* | | | 0x1115-0x1116 |
- * | | | 0x10ca |
+ * | | | 0x111a-0x111b |
+ * | | | 0x1155-0x1158 |
* | Device Discovery | 0x2095 | 0x2020-0x2022, |
* | | | 0x2011-0x2012, |
* | | | 0x2016 |
@@ -32,18 +34,17 @@
* | | | 0x5047,0x5052 |
* | | | 0x5084,0x5075 |
* | | | 0x503d,0x5044 |
+ * | | | 0x507b |
* | Timer Routines | 0x6012 | |
- * | User Space Interactions | 0x70e1 | 0x7018,0x702e, |
- * | | | 0x7020,0x7024, |
- * | | | 0x7039,0x7045, |
- * | | | 0x7073-0x7075, |
- * | | | 0x707b,0x708c, |
- * | | | 0x70a5,0x70a6, |
- * | | | 0x70a8,0x70ab, |
- * | | | 0x70ad-0x70ae, |
- * | | | 0x70d1-0x70db, |
- * | | | 0x7047,0x703b |
- * | | | 0x70de-0x70df, |
+ * | User Space Interactions | 0x70e2 | 0x7018,0x702e |
+ * | | | 0x7020,0x7024 |
+ * | | | 0x7039,0x7045 |
+ * | | | 0x7073-0x7075 |
+ * | | | 0x70a5-0x70a6 |
+ * | | | 0x70a8,0x70ab |
+ * | | | 0x70ad-0x70ae |
+ * | | | 0x70d7-0x70db |
+ * | | | 0x70de-0x70df |
* | Task Management | 0x803d | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |
@@ -59,7 +60,11 @@
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc00c | |
- * | Misc | 0xd010 | |
+ * | Misc | 0xd2ff | 0xd017-0xd019 |
+ * | | | 0xd020 |
+ * | | | 0xd02e-0xd0ff |
+ * | | | 0xd101-0xd1fe |
+ * | | | 0xd212-0xd2fe |
* | Target Mode | 0xe070 | 0xe021 |
* | Target Mode Management | 0xf072 | 0xf002-0xf003 |
* | | | 0xf046-0xf049 |
@@ -104,7 +109,87 @@ qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
return ptr + (rsp->length * sizeof(response_t));
}
-static int
+int
+qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
+ uint32_t ram_dwords, void **nxt)
+{
+ int rval;
+ uint32_t cnt, stat, timer, dwords, idx;
+ uint16_t mb0, mb1;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ dma_addr_t dump_dma = ha->gid_list_dma;
+ uint32_t *dump = (uint32_t *)ha->gid_list;
+
+ rval = QLA_SUCCESS;
+ mb0 = 0;
+
+ WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ dwords = qla2x00_gid_list_size(ha) / 4;
+ for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
+ cnt += dwords, addr += dwords) {
+ if (cnt + dwords > ram_dwords)
+ dwords = ram_dwords - cnt;
+
+ WRT_REG_WORD(&reg->mailbox1, LSW(addr));
+ WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+
+ WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
+ WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+
+ WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
+ WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
+
+ WRT_REG_WORD(&reg->mailbox9, 0);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+
+ ha->flags.mbox_int = 0;
+ for (timer = 6000000; timer; timer--) {
+ /* Check for pending interrupts. */
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (stat & HSRX_RISC_INT) {
+ stat &= 0xff;
+
+ if (stat == 0x1 || stat == 0x2 ||
+ stat == 0x10 || stat == 0x11) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb0 = RD_REG_WORD(&reg->mailbox0);
+ mb1 = RD_REG_WORD(&reg->mailbox1);
+
+ WRT_REG_DWORD(&reg->hccr,
+ HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ break;
+ }
+
+ /* Clear this intr; it wasn't a mailbox intr */
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ }
+ udelay(5);
+ }
+ ha->flags.mbox_int = 1;
+
+ if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+ rval = mb0 & MBS_MASK;
+ for (idx = 0; idx < dwords; idx++)
+ ram[cnt + idx] = IS_QLA27XX(ha) ?
+ le32_to_cpu(dump[idx]) : swab32(dump[idx]);
+ } else {
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+
+ *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
+ return rval;
+}
+
+int
qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
uint32_t ram_dwords, void **nxt)
{
@@ -139,6 +224,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ ha->flags.mbox_int = 0;
for (timer = 6000000; timer; timer--) {
/* Check for pending interrupts. */
stat = RD_REG_DWORD(&reg->host_status);
@@ -164,11 +250,13 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
}
udelay(5);
}
+ ha->flags.mbox_int = 1;
if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
rval = mb0 & MBS_MASK;
for (idx = 0; idx < dwords; idx++)
- ram[cnt + idx] = swab32(dump[idx]);
+ ram[cnt + idx] = IS_QLA27XX(ha) ?
+ le32_to_cpu(dump[idx]) : swab32(dump[idx]);
} else {
rval = QLA_FUNCTION_FAILED;
}
@@ -208,7 +296,7 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
return buf;
}
-static inline int
+int
qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
{
int rval = QLA_SUCCESS;
@@ -227,7 +315,7 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
return rval;
}
-static int
+int
qla24xx_soft_reset(struct qla_hw_data *ha)
{
int rval = QLA_SUCCESS;
@@ -537,7 +625,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
struct qla2xxx_mq_chain *mq = ptr;
device_reg_t __iomem *reg;
- if (!ha->mqenable || IS_QLA83XX(ha))
+ if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
return ptr;
mq = ptr;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 35e20b4f8b6c..cc961040f8b1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -348,3 +348,10 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
#define ql_dbg_tgt 0x00004000 /* Target mode */
#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
+
+extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+ uint32_t, void **);
+extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+ uint32_t, void **);
+extern int qla24xx_pause_risc(struct device_reg_24xx __iomem *);
+extern int qla24xx_soft_reset(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 266724b6b899..6a106136716c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -654,7 +654,7 @@ typedef union {
struct device_reg_25xxmq isp25mq;
struct device_reg_82xx isp82;
struct device_reg_fx00 ispfx00;
-} device_reg_t;
+} __iomem device_reg_t;
#define ISP_REQ_Q_IN(ha, reg) \
(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
@@ -808,7 +808,7 @@ struct mbx_cmd_32 {
Notification */
#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */
#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */
-
+#define MBA_FW_INIT_INPROGRESS 0x8500 /* Firmware boot in progress */
/* 83XX FCoE specific */
#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
@@ -938,6 +938,7 @@ struct mbx_cmd_32 {
*/
#define MBC_WRITE_SERDES 0x3 /* Write serdes word. */
#define MBC_READ_SERDES 0x4 /* Read serdes word. */
+#define MBC_LOAD_DUMP_MPI_RAM 0x5 /* Load/Dump MPI RAM. */
#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */
#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */
#define MBC_PORT_PARAMS 0x1A /* Port iDMA Parameters. */
@@ -1197,30 +1198,6 @@ typedef struct {
uint8_t reserved_3[26];
} init_cb_t;
-
-struct init_cb_fx {
- uint16_t version;
- uint16_t reserved_1[13];
- __le16 request_q_outpointer;
- __le16 response_q_inpointer;
- uint16_t reserved_2[2];
- __le16 response_q_length;
- __le16 request_q_length;
- uint16_t reserved_3[2];
- __le32 request_q_address[2];
- __le32 response_q_address[2];
- uint16_t reserved_4[4];
- uint8_t response_q_msivec;
- uint8_t reserved_5[19];
- uint16_t interrupt_delay_timer;
- uint16_t reserved_6;
- uint32_t fwoptions1;
- uint32_t fwoptions2;
- uint32_t fwoptions3;
- uint8_t reserved_7[24];
-};
-
-
/*
* Get Link Status mailbox command return buffer.
*/
@@ -2172,6 +2149,7 @@ struct ct_fdmi_hba_attributes {
#define FDMI_PORT_SPEED_4GB 0x8
#define FDMI_PORT_SPEED_8GB 0x10
#define FDMI_PORT_SPEED_16GB 0x20
+#define FDMI_PORT_SPEED_32GB 0x40
#define FDMI_PORT_SPEED_UNKNOWN 0x8000
struct ct_fdmi_port_attr {
@@ -2680,7 +2658,7 @@ struct bidi_statistics {
#define QLA_MQ_SIZE 32
#define QLA_MAX_QUEUES 256
#define ISP_QUE_REG(ha, id) \
- ((ha->mqenable || IS_QLA83XX(ha)) ? \
+ ((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \
((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
((void __iomem *)ha->iobase))
#define QLA_REQ_QUE_ID(tag) \
@@ -2818,7 +2796,6 @@ struct qla_hw_data {
uint32_t fac_supported :1;
uint32_t chip_reset_done :1;
- uint32_t port0 :1;
uint32_t running_gold_fw :1;
uint32_t eeh_busy :1;
uint32_t cpu_affinity_enabled :1;
@@ -2849,7 +2826,7 @@ struct qla_hw_data {
spinlock_t hardware_lock ____cacheline_aligned;
int bars;
int mem_only;
- device_reg_t __iomem *iobase; /* Base I/O address */
+ device_reg_t *iobase; /* Base I/O address */
resource_size_t pio_address;
#define MIN_IOBASE_LEN 0x100
@@ -2868,8 +2845,8 @@ struct qla_hw_data {
uint32_t rsp_que_off;
/* Multi queue data structs */
- device_reg_t __iomem *mqiobase;
- device_reg_t __iomem *msixbase;
+ device_reg_t *mqiobase;
+ device_reg_t *msixbase;
uint16_t msix_count;
uint8_t mqenable;
struct req_que **req_q_map;
@@ -2905,6 +2882,7 @@ struct qla_hw_data {
#define PORT_SPEED_4GB 0x03
#define PORT_SPEED_8GB 0x04
#define PORT_SPEED_16GB 0x05
+#define PORT_SPEED_32GB 0x06
#define PORT_SPEED_10GB 0x13
uint16_t link_data_rate; /* F/W operating speed */
@@ -2928,6 +2906,7 @@ struct qla_hw_data {
#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031
#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
+#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
uint32_t device_type;
#define DT_ISP2100 BIT_0
#define DT_ISP2200 BIT_1
@@ -2948,7 +2927,8 @@ struct qla_hw_data {
#define DT_ISP8031 BIT_16
#define DT_ISPFX00 BIT_17
#define DT_ISP8044 BIT_18
-#define DT_ISP_LAST (DT_ISP8044 << 1)
+#define DT_ISP2071 BIT_19
+#define DT_ISP_LAST (DT_ISP2071 << 1)
#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
@@ -2978,6 +2958,7 @@ struct qla_hw_data {
#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
+#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2986,6 +2967,7 @@ struct qla_hw_data {
#define IS_QLA25XX(ha) (IS_QLA2532(ha))
#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
#define IS_QLA84XX(ha) (IS_QLA8432(ha))
+#define IS_QLA27XX(ha) (IS_QLA2071(ha))
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
IS_QLA84XX(ha))
#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -2994,11 +2976,13 @@ struct qla_hw_data {
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
- IS_QLA8044(ha))
+ IS_QLA8044(ha) || IS_QLA27XX(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
-#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
-#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha))
+#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
@@ -3008,7 +2992,8 @@ struct qla_hw_data {
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
-#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha))
+#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha))
#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha)))
/* Bit 21 of fw_attributes decides the MCTP capabilities */
#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
@@ -3133,6 +3118,9 @@ struct qla_hw_data {
uint16_t fw_xcb_count;
uint16_t fw_iocb_count;
+ uint32_t fw_shared_ram_start;
+ uint32_t fw_shared_ram_end;
+
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
uint16_t fw_seriallink_options24[4];
@@ -3141,6 +3129,9 @@ struct qla_hw_data {
uint32_t mpi_capabilities;
uint8_t phy_version[3];
+ /* Firmware dump template */
+ void *fw_dump_template;
+ uint32_t fw_dump_template_len;
/* Firmware dump information. */
struct qla2xxx_fw_dump *fw_dump;
uint32_t fw_dump_len;
@@ -3183,6 +3174,7 @@ struct qla_hw_data {
#define QLA_SWRITING 2
uint32_t optrom_region_start;
uint32_t optrom_region_size;
+ struct mutex optrom_mutex;
/* PCI expansion ROM image information. */
#define ROM_CODE_TYPE_BIOS 0
@@ -3309,6 +3301,7 @@ struct qla_hw_data {
struct mr_data_fx00 mr;
struct qlt_hw_data tgt;
+ int allow_cna_fw_dump;
};
/*
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 792a29294b62..32ab80957688 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -114,7 +114,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
goto out;
if (!ha->fce)
goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 610d3aa905a0..3a7353eaccbd 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1378,6 +1378,10 @@ struct qla_flt_header {
#define FLT_REG_NVRAM_0 0x15
#define FLT_REG_VPD_1 0x16
#define FLT_REG_NVRAM_1 0x17
+#define FLT_REG_VPD_2 0xD4
+#define FLT_REG_NVRAM_2 0xD5
+#define FLT_REG_VPD_3 0xD6
+#define FLT_REG_NVRAM_3 0xD7
#define FLT_REG_FDT 0x1a
#define FLT_REG_FLT 0x1c
#define FLT_REG_HW_EVENT_0 0x1d
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1f426628a0a5..e665e8109933 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -330,6 +330,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
dma_addr_t);
extern int qla24xx_abort_command(srb_t *);
+extern int qla24xx_async_abort_command(srb_t *);
extern int
qla24xx_abort_target(struct fc_port *, unsigned int, int);
extern int
@@ -511,6 +512,16 @@ extern void qla2300_fw_dump(scsi_qla_host_t *, int);
extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla8044_fw_dump(scsi_qla_host_t *, int);
+
+extern void qla27xx_fwdump(scsi_qla_host_t *, int);
+extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *);
+extern int qla27xx_fwdt_template_valid(void *);
+extern ulong qla27xx_fwdt_template_size(void *);
+extern const void *qla27xx_fwdt_template_default(void);
+extern ulong qla27xx_fwdt_template_default_size(void);
+
extern void qla2x00_dump_regs(scsi_qla_host_t *);
extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
@@ -594,7 +605,6 @@ extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
extern irqreturn_t qlafx00_intr_handler(int, void *);
extern void qlafx00_enable_intrs(struct qla_hw_data *);
extern void qlafx00_disable_intrs(struct qla_hw_data *);
-extern int qlafx00_abort_command(srb_t *);
extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
extern int qlafx00_start_scsi(srb_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index cd47f1b32d9a..e377f9d2f92a 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1532,6 +1532,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
if (IS_CNA_CAPABLE(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(
FDMI_PORT_SPEED_10GB);
+ else if (IS_QLA27XX(ha))
+ eiter->a.sup_speed = __constant_cpu_to_be32(
+ FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB|
+ FDMI_PORT_SPEED_8GB);
else if (IS_QLA25XX(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(
FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
@@ -1580,6 +1584,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
break;
+ case PORT_SPEED_32GB:
+ eiter->a.cur_speed =
+ __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB);
+ break;
default:
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
@@ -1889,6 +1897,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
case BIT_10:
list[i].fp_speed = PORT_SPEED_16GB;
break;
+ case BIT_8:
+ list[i].fp_speed = PORT_SPEED_32GB;
+ break;
}
ql_dbg(ql_dbg_disc, vha, 0x205b,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e7e5f4facf7f..38aeb54cd9d8 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -271,56 +271,46 @@ done:
}
static void
-qla2x00_async_tm_cmd_done(void *data, void *ptr, int res)
+qla2x00_tmf_iocb_timeout(void *data)
{
- srb_t *sp = (srb_t *)ptr;
- struct srb_iocb *iocb = &sp->u.iocb_cmd;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
- uint32_t flags;
- uint16_t lun;
- int rval;
-
- if (!test_bit(UNLOADING, &vha->dpc_flags)) {
- flags = iocb->u.tmf.flags;
- lun = (uint16_t)iocb->u.tmf.lun;
+ srb_t *sp = (srb_t *)data;
+ struct srb_iocb *tmf = &sp->u.iocb_cmd;
- /* Issue Marker IOCB */
- rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
- vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
- flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
+ tmf->u.tmf.comp_status = CS_TIMEOUT;
+ complete(&tmf->u.tmf.comp);
+}
- if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
- ql_dbg(ql_dbg_taskm, vha, 0x8030,
- "TM IOCB failed (%x).\n", rval);
- }
- }
- sp->free(sp->fcport->vha, sp);
+static void
+qla2x00_tmf_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *tmf = &sp->u.iocb_cmd;
+ complete(&tmf->u.tmf.comp);
}
int
-qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
uint32_t tag)
{
struct scsi_qla_host *vha = fcport->vha;
+ struct srb_iocb *tm_iocb;
srb_t *sp;
- struct srb_iocb *tcf;
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
- rval = QLA_FUNCTION_FAILED;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ tm_iocb = &sp->u.iocb_cmd;
sp->type = SRB_TM_CMD;
sp->name = "tmf";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- tcf = &sp->u.iocb_cmd;
- tcf->u.tmf.flags = tm_flags;
- tcf->u.tmf.lun = lun;
- tcf->u.tmf.data = tag;
- tcf->timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_tm_cmd_done;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+ tm_iocb->u.tmf.flags = flags;
+ tm_iocb->u.tmf.lun = lun;
+ tm_iocb->u.tmf.data = tag;
+ sp->done = qla2x00_tmf_sp_done;
+ tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
+ init_completion(&tm_iocb->u.tmf.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -330,14 +320,121 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
"Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ wait_for_completion(&tm_iocb->u.tmf.comp);
+
+ rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
+ QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+ if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8030,
+ "TM IOCB failed (%x).\n", rval);
+ }
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
+ flags = tm_iocb->u.tmf.flags;
+ lun = (uint16_t)tm_iocb->u.tmf.lun;
+
+ /* Issue Marker IOCB */
+ qla2x00_marker(vha, vha->hw->req_q_map[0],
+ vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
+ flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
+ }
+
+done_free_sp:
+ sp->free(vha, sp);
+done:
return rval;
+}
+
+static void
+qla24xx_abort_iocb_timeout(void *data)
+{
+ srb_t *sp = (srb_t *)data;
+ struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+ abt->u.abt.comp_status = CS_TIMEOUT;
+ complete(&abt->u.abt.comp);
+}
+
+static void
+qla24xx_abort_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+ complete(&abt->u.abt.comp);
+}
+
+static int
+qla24xx_async_abort_cmd(srb_t *cmd_sp)
+{
+ scsi_qla_host_t *vha = cmd_sp->fcport->vha;
+ fc_port_t *fcport = cmd_sp->fcport;
+ struct srb_iocb *abt_iocb;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ abt_iocb = &sp->u.iocb_cmd;
+ sp->type = SRB_ABT_CMD;
+ sp->name = "abort";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+ abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+ sp->done = qla24xx_abort_sp_done;
+ abt_iocb->timeout = qla24xx_abort_iocb_timeout;
+ init_completion(&abt_iocb->u.abt.comp);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_async, vha, 0x507c,
+ "Abort command issued - hdl=%x, target_id=%x\n",
+ cmd_sp->handle, fcport->tgt_id);
+
+ wait_for_completion(&abt_iocb->u.abt.comp);
+
+ rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+ QLA_SUCCESS : QLA_FUNCTION_FAILED;
done_free_sp:
- sp->free(fcport->vha, sp);
+ sp->free(vha, sp);
done:
return rval;
}
+int
+qla24xx_async_abort_command(srb_t *sp)
+{
+ unsigned long flags = 0;
+
+ uint32_t handle;
+ fc_port_t *fcport = sp->fcport;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = vha->req;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
+ if (req->outstanding_cmds[handle] == sp)
+ break;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (handle == req->num_outstanding_cmds) {
+ /* Command not found. */
+ return QLA_FUNCTION_FAILED;
+ }
+ if (sp->type == SRB_FXIOCB_DCMD)
+ return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
+ FXDISC_ABORT_IOCTL);
+
+ return qla24xx_async_abort_cmd(sp);
+}
+
void
qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
@@ -1379,7 +1476,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
}
ha->fw_dumped = 0;
- fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+ dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+ req_q_size = rsp_q_size = 0;
+
+ if (IS_QLA27XX(ha))
+ goto try_fce;
+
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
fixed_size = sizeof(struct qla2100_fw_dump);
} else if (IS_QLA23XX(ha)) {
@@ -1395,6 +1497,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
else
fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
+
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
if (ha->mqenable) {
@@ -1412,9 +1515,16 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
if (ha->tgt.atio_ring)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
/* Allocate memory for Fibre Channel Event Buffer. */
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
goto try_eft;
+try_fce:
+ if (ha->fce)
+ dma_free_coherent(&ha->pdev->dev,
+ FCE_SIZE, ha->fce, ha->fce_dma);
+
+ /* Allocate memory for Fibre Channel Event Buffer. */
tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
GFP_KERNEL);
if (!tc) {
@@ -1442,7 +1552,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
ha->flags.fce_enabled = 1;
ha->fce_dma = tc_dma;
ha->fce = tc;
+
try_eft:
+ if (ha->eft)
+ dma_free_coherent(&ha->pdev->dev,
+ EFT_SIZE, ha->eft, ha->eft_dma);
+
/* Allocate memory for Extended Trace Buffer. */
tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
GFP_KERNEL);
@@ -1469,15 +1584,28 @@ try_eft:
ha->eft_dma = tc_dma;
ha->eft = tc;
}
+
cont_alloc:
+ if (IS_QLA27XX(ha)) {
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x00ba,
+ "Failed missing fwdump template\n");
+ return;
+ }
+ dump_size = qla27xx_fwdt_calculate_dump_size(vha);
+ ql_dbg(ql_dbg_init, vha, 0x00fa,
+ "-> allocating fwdump (%x bytes)...\n", dump_size);
+ goto allocate;
+ }
+
req_q_size = req->length * sizeof(request_t);
rsp_q_size = rsp->length * sizeof(response_t);
-
dump_size = offsetof(struct qla2xxx_fw_dump, isp);
dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
ha->chain_offset = dump_size;
dump_size += mq_size + fce_size;
+allocate:
ha->fw_dump = vmalloc(dump_size);
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0x00c4,
@@ -1499,10 +1627,13 @@ cont_alloc:
}
return;
}
+ ha->fw_dump_len = dump_size;
ql_dbg(ql_dbg_init, vha, 0x00c5,
"Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
- ha->fw_dump_len = dump_size;
+ if (IS_QLA27XX(ha))
+ return;
+
ha->fw_dump->signature[0] = 'Q';
ha->fw_dump->signature[1] = 'L';
ha->fw_dump->signature[2] = 'G';
@@ -1718,9 +1849,6 @@ enable_82xx_npiv:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
- if (IS_QLA83XX(ha))
- goto skip_fac_check;
-
if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
uint32_t size;
@@ -1733,8 +1861,8 @@ enable_82xx_npiv:
"Unsupported FAC firmware (%d.%02d.%02d).\n",
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version);
-skip_fac_check:
- if (IS_QLA83XX(ha)) {
+
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ha->flags.fac_supported = 0;
rval = QLA_SUCCESS;
}
@@ -1933,7 +2061,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
- if (ha->mqenable || IS_QLA83XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
icb->rid = __constant_cpu_to_le16(rid);
if (ha->flags.msix_enabled) {
@@ -4792,13 +4920,14 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
nv = ha->nvram;
/* Determine NVRAM starting address. */
- if (ha->flags.port0) {
+ if (ha->port_no == 0) {
ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
ha->vpd_base = FA_NVRAM_VPD0_ADDR;
} else {
ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
ha->vpd_base = FA_NVRAM_VPD1_ADDR;
}
+
ha->nvram_size = sizeof(struct nvram_24xx);
ha->vpd_size = FA_NVRAM_VPD_SIZE;
@@ -4842,7 +4971,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
nv->exchange_count = __constant_cpu_to_le16(0);
nv->hard_address = __constant_cpu_to_le16(124);
nv->port_name[0] = 0x21;
- nv->port_name[1] = 0x00 + ha->port_no;
+ nv->port_name[1] = 0x00 + ha->port_no + 1;
nv->port_name[2] = 0x00;
nv->port_name[3] = 0xe0;
nv->port_name[4] = 0x8b;
@@ -5117,6 +5246,99 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
segments--;
}
+ if (!IS_QLA27XX(ha))
+ return rval;
+
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ ql_dbg(ql_dbg_init, vha, 0x0161,
+ "Loading fwdump template from %x\n", faddr);
+ qla24xx_read_flash_data(vha, dcode, faddr, 7);
+ risc_size = be32_to_cpu(dcode[2]);
+ ql_dbg(ql_dbg_init, vha, 0x0162,
+ "-> array size %x dwords\n", risc_size);
+ if (risc_size == 0 || risc_size == ~0)
+ goto default_template;
+
+ dlen = (risc_size - 8) * sizeof(*dcode);
+ ql_dbg(ql_dbg_init, vha, 0x0163,
+ "-> template allocating %x bytes...\n", dlen);
+ ha->fw_dump_template = vmalloc(dlen);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x0164,
+ "Failed fwdump template allocate %x bytes.\n", risc_size);
+ goto default_template;
+ }
+
+ faddr += 7;
+ risc_size -= 8;
+ dcode = ha->fw_dump_template;
+ qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = le32_to_cpu(dcode[i]);
+
+ if (!qla27xx_fwdt_template_valid(dcode)) {
+ ql_log(ql_log_warn, vha, 0x0165,
+ "Failed fwdump template validate\n");
+ goto default_template;
+ }
+
+ dlen = qla27xx_fwdt_template_size(dcode);
+ ql_dbg(ql_dbg_init, vha, 0x0166,
+ "-> template size %x bytes\n", dlen);
+ if (dlen > risc_size * sizeof(*dcode)) {
+ ql_log(ql_log_warn, vha, 0x0167,
+ "Failed fwdump template exceeds array by %x bytes\n",
+ (uint32_t)(dlen - risc_size * sizeof(*dcode)));
+ goto default_template;
+ }
+ ha->fw_dump_template_len = dlen;
+ return rval;
+
+default_template:
+ ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ dlen = qla27xx_fwdt_template_default_size();
+ ql_dbg(ql_dbg_init, vha, 0x0169,
+ "-> template allocating %x bytes...\n", dlen);
+ ha->fw_dump_template = vmalloc(dlen);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x016a,
+ "Failed fwdump template allocate %x bytes.\n", risc_size);
+ goto failed_template;
+ }
+
+ dcode = ha->fw_dump_template;
+ risc_size = dlen / sizeof(*dcode);
+ memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = be32_to_cpu(dcode[i]);
+
+ if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
+ ql_log(ql_log_warn, vha, 0x016b,
+ "Failed fwdump template validate\n");
+ goto failed_template;
+ }
+
+ dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
+ ql_dbg(ql_dbg_init, vha, 0x016c,
+ "-> template size %x bytes\n", dlen);
+ ha->fw_dump_template_len = dlen;
+ return rval;
+
+failed_template:
+ ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
return rval;
}
@@ -5231,7 +5453,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
uint32_t risc_size;
uint32_t i;
struct fw_blob *blob;
- uint32_t *fwcode, fwclen;
+ const uint32_t *fwcode;
+ uint32_t fwclen;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
@@ -5263,7 +5486,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ql_log(ql_log_fatal, vha, 0x0093,
"Unable to verify integrity of firmware image (%Zd).\n",
blob->fw->size);
- goto fail_fw_integrity;
+ return QLA_FUNCTION_FAILED;
}
for (i = 0; i < 4; i++)
dcode[i] = be32_to_cpu(fwcode[i + 4]);
@@ -5277,7 +5500,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ql_log(ql_log_fatal, vha, 0x0095,
"Firmware data: %08x %08x %08x %08x.\n",
dcode[0], dcode[1], dcode[2], dcode[3]);
- goto fail_fw_integrity;
+ return QLA_FUNCTION_FAILED;
}
while (segments && rval == QLA_SUCCESS) {
@@ -5291,8 +5514,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ql_log(ql_log_fatal, vha, 0x0096,
"Unable to verify integrity of firmware image "
"(%Zd).\n", blob->fw->size);
-
- goto fail_fw_integrity;
+ return QLA_FUNCTION_FAILED;
}
fragment = 0;
@@ -5326,10 +5548,100 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Next segment. */
segments--;
}
+
+ if (!IS_QLA27XX(ha))
+ return rval;
+
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ ql_dbg(ql_dbg_init, vha, 0x171,
+ "Loading fwdump template from %x\n",
+ (uint32_t)((void *)fwcode - (void *)blob->fw->data));
+ risc_size = be32_to_cpu(fwcode[2]);
+ ql_dbg(ql_dbg_init, vha, 0x172,
+ "-> array size %x dwords\n", risc_size);
+ if (risc_size == 0 || risc_size == ~0)
+ goto default_template;
+
+ dlen = (risc_size - 8) * sizeof(*fwcode);
+ ql_dbg(ql_dbg_init, vha, 0x0173,
+ "-> template allocating %x bytes...\n", dlen);
+ ha->fw_dump_template = vmalloc(dlen);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x0174,
+ "Failed fwdump template allocate %x bytes.\n", risc_size);
+ goto default_template;
+ }
+
+ fwcode += 7;
+ risc_size -= 8;
+ dcode = ha->fw_dump_template;
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = le32_to_cpu(fwcode[i]);
+
+ if (!qla27xx_fwdt_template_valid(dcode)) {
+ ql_log(ql_log_warn, vha, 0x0175,
+ "Failed fwdump template validate\n");
+ goto default_template;
+ }
+
+ dlen = qla27xx_fwdt_template_size(dcode);
+ ql_dbg(ql_dbg_init, vha, 0x0176,
+ "-> template size %x bytes\n", dlen);
+ if (dlen > risc_size * sizeof(*fwcode)) {
+ ql_log(ql_log_warn, vha, 0x0177,
+ "Failed fwdump template exceeds array by %x bytes\n",
+ (uint32_t)(dlen - risc_size * sizeof(*fwcode)));
+ goto default_template;
+ }
+ ha->fw_dump_template_len = dlen;
return rval;
-fail_fw_integrity:
- return QLA_FUNCTION_FAILED;
+default_template:
+ ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ dlen = qla27xx_fwdt_template_default_size();
+ ql_dbg(ql_dbg_init, vha, 0x0179,
+ "-> template allocating %x bytes...\n", dlen);
+ ha->fw_dump_template = vmalloc(dlen);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x017a,
+ "Failed fwdump template allocate %x bytes.\n", risc_size);
+ goto failed_template;
+ }
+
+ dcode = ha->fw_dump_template;
+ risc_size = dlen / sizeof(*fwcode);
+ fwcode = qla27xx_fwdt_template_default();
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = be32_to_cpu(fwcode[i]);
+
+ if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
+ ql_log(ql_log_warn, vha, 0x017b,
+ "Failed fwdump template validate\n");
+ goto failed_template;
+ }
+
+ dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
+ ql_dbg(ql_dbg_init, vha, 0x017c,
+ "-> template size %x bytes\n", dlen);
+ ha->fw_dump_template_len = dlen;
+ return rval;
+
+failed_template:
+ ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+ return rval;
}
int
@@ -5605,7 +5917,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
nv->exchange_count = __constant_cpu_to_le16(0);
nv->port_name[0] = 0x21;
- nv->port_name[1] = 0x00 + ha->port_no;
+ nv->port_name[1] = 0x00 + ha->port_no + 1;
nv->port_name[2] = 0x00;
nv->port_name[3] = 0xe0;
nv->port_name[4] = 0x8b;
@@ -5639,7 +5951,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->enode_mac[2] = 0xDD;
nv->enode_mac[3] = 0x04;
nv->enode_mac[4] = 0x05;
- nv->enode_mac[5] = 0x06 + ha->port_no;
+ nv->enode_mac[5] = 0x06 + ha->port_no + 1;
rval = 1;
}
@@ -5677,7 +5989,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
icb->enode_mac[2] = 0xDD;
icb->enode_mac[3] = 0x04;
icb->enode_mac[4] = 0x05;
- icb->enode_mac[5] = 0x06 + ha->port_no;
+ icb->enode_mac[5] = 0x06 + ha->port_no + 1;
}
/* Use extended-initialization control block. */
@@ -5780,7 +6092,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
ha->login_retry_count = ql2xloginretrycount;
/* if not running MSI-X we need handshaking on interrupts */
- if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha))
+ if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
/* Enable ZIO. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 46b9307e8be4..e607568bce49 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -488,7 +488,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
req->ring_ptr++;
/* Set chip new ring index. */
- if (ha->mqenable || IS_QLA83XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
WRT_REG_DWORD(req->req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
} else if (IS_QLAFX00(ha)) {
@@ -524,7 +524,6 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
{
mrk_entry_t *mrk;
struct mrk_entry_24xx *mrk24 = NULL;
- struct mrk_entry_fx00 *mrkfx = NULL;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
@@ -541,15 +540,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
mrk->entry_type = MARKER_TYPE;
mrk->modifier = type;
if (type != MK_SYNC_ALL) {
- if (IS_QLAFX00(ha)) {
- mrkfx = (struct mrk_entry_fx00 *) mrk;
- mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
- mrkfx->handle_hi = 0;
- mrkfx->tgt_id = cpu_to_le16(loop_id);
- mrkfx->lun[1] = LSB(lun);
- mrkfx->lun[2] = MSB(lun);
- host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
- } else if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
mrk24 = (struct mrk_entry_24xx *) mrk;
mrk24->nport_handle = cpu_to_le16(loop_id);
mrk24->lun[1] = LSB(lun);
@@ -1823,7 +1814,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; req->num_outstanding_cmds; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
if (handle == req->num_outstanding_cmds)
handle = 1;
@@ -1848,7 +1839,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt) {
- if (ha->mqenable || IS_QLA83XX(ha))
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2594,6 +2585,29 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
+void
+qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
+{
+ struct srb_iocb *aio = &sp->u.iocb_cmd;
+ scsi_qla_host_t *vha = sp->fcport->vha;
+ struct req_que *req = vha->req;
+
+ memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
+ abt_iocb->entry_type = ABORT_IOCB_TYPE;
+ abt_iocb->entry_count = 1;
+ abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ abt_iocb->handle_to_abort =
+ cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
+ abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ abt_iocb->vp_index = vha->vp_idx;
+ abt_iocb->req_que_no = cpu_to_le16(req->id);
+ /* Send the command to the firmware */
+ wmb();
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -2647,7 +2661,9 @@ qla2x00_start_sp(srb_t *sp)
qlafx00_fxdisc_iocb(sp, pkt);
break;
case SRB_ABT_CMD:
- qlafx00_abort_iocb(sp, pkt);
+ IS_QLAFX00(ha) ?
+ qlafx00_abort_iocb(sp, pkt) :
+ qla24xx_abort_iocb(sp, pkt);
break;
default:
break;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 0a1dcb43d18b..95314ef2e505 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -356,15 +356,16 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
const char *
qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
{
- static const char * const link_speeds[] = {
- "1", "2", "?", "4", "8", "16", "10"
+ static const char *const link_speeds[] = {
+ "1", "2", "?", "4", "8", "16", "32", "10"
};
+#define QLA_LAST_SPEED 7
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return link_speeds[0];
else if (speed == 0x13)
- return link_speeds[6];
- else if (speed < 6)
+ return link_speeds[QLA_LAST_SPEED];
+ else if (speed < QLA_LAST_SPEED)
return link_speeds[speed];
else
return link_speeds[LS_UNKNOWN];
@@ -649,7 +650,7 @@ skip_rio:
break;
case MBA_SYSTEM_ERR: /* System Error */
- mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
+ mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
RD_REG_WORD(&reg24->mailbox7) : 0;
ql_log(ql_log_warn, vha, 0x5003,
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
@@ -666,7 +667,7 @@ skip_rio:
vha->device_flags |= DFLG_DEV_FAILED;
} else {
/* Check to see if MPI timeout occurred */
- if ((mbx & MBX_3) && (ha->flags.port0))
+ if ((mbx & MBX_3) && (ha->port_no == 0))
set_bit(MPI_RESET_NEEDED,
&vha->dpc_flags);
@@ -1497,8 +1498,7 @@ logio_done:
}
static void
-qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
- struct tsk_mgmt_entry *tsk)
+qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
{
const char func[] = "TMF-IOCB";
const char *type;
@@ -1506,7 +1506,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
srb_t *sp;
struct srb_iocb *iocb;
struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
- int error = 1;
sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
if (!sp)
@@ -1515,37 +1514,35 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
iocb = &sp->u.iocb_cmd;
type = sp->name;
fcport = sp->fcport;
+ iocb->u.tmf.data = QLA_SUCCESS;
if (sts->entry_status) {
ql_log(ql_log_warn, fcport->vha, 0x5038,
"Async-%s error - hdl=%x entry-status(%x).\n",
type, sp->handle, sts->entry_status);
+ iocb->u.tmf.data = QLA_FUNCTION_FAILED;
} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
ql_log(ql_log_warn, fcport->vha, 0x5039,
"Async-%s error - hdl=%x completion status(%x).\n",
type, sp->handle, sts->comp_status);
- } else if (!(le16_to_cpu(sts->scsi_status) &
+ iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+ } else if ((le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
- ql_log(ql_log_warn, fcport->vha, 0x503a,
- "Async-%s error - hdl=%x no response info(%x).\n",
- type, sp->handle, sts->scsi_status);
- } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
- ql_log(ql_log_warn, fcport->vha, 0x503b,
- "Async-%s error - hdl=%x not enough response(%d).\n",
- type, sp->handle, sts->rsp_data_len);
- } else if (sts->data[3]) {
- ql_log(ql_log_warn, fcport->vha, 0x503c,
- "Async-%s error - hdl=%x response(%x).\n",
- type, sp->handle, sts->data[3]);
- } else {
- error = 0;
+ if (le32_to_cpu(sts->rsp_data_len) < 4) {
+ ql_log(ql_log_warn, fcport->vha, 0x503b,
+ "Async-%s error - hdl=%x not enough response(%d).\n",
+ type, sp->handle, sts->rsp_data_len);
+ } else if (sts->data[3]) {
+ ql_log(ql_log_warn, fcport->vha, 0x503c,
+ "Async-%s error - hdl=%x response(%x).\n",
+ type, sp->handle, sts->data[3]);
+ iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+ }
}
- if (error) {
- iocb->u.tmf.data = error;
+ if (iocb->u.tmf.data != QLA_SUCCESS)
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
(uint8_t *)sts, sizeof(*sts));
- }
sp->done(vha, sp, 0);
}
@@ -2025,6 +2022,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
+ /* Task Management completion. */
+ if (sp->type == SRB_TM_CMD) {
+ qla24xx_tm_iocb_entry(vha, req, pkt);
+ return;
+ }
+
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
qla2x00_process_completed_request(vha, req, handle);
@@ -2425,6 +2428,23 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
}
}
+static void
+qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct abort_entry_24xx *pkt)
+{
+ const char func[] = "ABT_IOCB";
+ srb_t *sp;
+ struct srb_iocb *abt;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ abt = &sp->u.iocb_cmd;
+ abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
+ sp->done(vha, sp, 0);
+}
+
/**
* qla24xx_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@@ -2474,10 +2494,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla24xx_logio_entry(vha, rsp->req,
(struct logio_entry_24xx *)pkt);
break;
- case TSK_MGMT_IOCB_TYPE:
- qla24xx_tm_iocb_entry(vha, rsp->req,
- (struct tsk_mgmt_entry *)pkt);
- break;
case CT_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
break;
@@ -2497,6 +2513,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
* from falling into default case
*/
break;
+ case ABORT_IOCB_TYPE:
+ qla24xx_abort_iocb_entry(vha, rsp->req,
+ (struct abort_entry_24xx *)pkt);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -2525,7 +2545,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
return;
rval = QLA_SUCCESS;
@@ -2979,7 +3000,7 @@ msix_register_fail:
}
/* Enable MSI-X vector for response queue update for queue 0 */
- if (IS_QLA83XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
if (ha->msixbase && ha->mqiobase &&
(ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
ha->mqenable = 1;
@@ -3003,12 +3024,13 @@ int
qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
{
int ret = QLA_FUNCTION_FAILED;
- device_reg_t __iomem *reg = ha->iobase;
+ device_reg_t *reg = ha->iobase;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
- !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
+ !IS_QLA27XX(ha))
goto skip_msi;
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -3043,7 +3065,8 @@ skip_msix:
"Falling back-to MSI mode -%d.\n", ret);
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
- !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha))
+ !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
+ !IS_QLA27XX(ha))
goto skip_msi;
ret = pci_enable_msi(ha->pdev);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b94511ae0051..2528709c4add 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -35,7 +35,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
{
int rval;
unsigned long flags = 0;
- device_reg_t __iomem *reg;
+ device_reg_t *reg;
uint8_t abort_active;
uint8_t io_lock_on;
uint16_t command = 0;
@@ -468,7 +468,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mcp->mb[1] = MSW(risc_addr);
mcp->mb[2] = LSW(risc_addr);
mcp->mb[3] = 0;
- if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) {
+ if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha)) {
struct nvram_81xx *nv = ha->nvram;
mcp->mb[4] = (nv->enhanced_features &
EXTENDED_BB_CREDITS);
@@ -539,6 +540,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
if (IS_FWI2_CAPABLE(ha))
mcp->in_mb |= MBX_17|MBX_16|MBX_15;
+ if (IS_QLA27XX(ha))
+ mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18;
mcp->flags = 0;
mcp->tov = MBX_TOV_SECONDS;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -574,6 +577,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
"%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
__func__, mcp->mb[17], mcp->mb[16]);
}
+ if (IS_QLA27XX(ha)) {
+ ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
+ ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
+ }
failed:
if (rval != QLA_SUCCESS) {
@@ -1225,7 +1232,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
}
/* 1 and 2 should normally be captured. */
mcp->in_mb = MBX_2|MBX_1|MBX_0;
- if (IS_QLA83XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
/* mb3 is additional info about the installed SFP. */
mcp->in_mb |= MBX_3;
mcp->buf_size = size;
@@ -2349,7 +2356,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
+ if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
mcp->in_mb |= MBX_12;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -2590,6 +2597,9 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
+ if (ql2xasynctmfenable)
+ return qla24xx_async_abort_command(sp);
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -3032,7 +3042,7 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
"Entered %s.\n", __func__);
if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
- !IS_QLA83XX(vha->hw))
+ !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
@@ -3662,7 +3672,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
mcp->mb[12] = req->qos;
mcp->mb[11] = req->vp_idx;
mcp->mb[13] = req->rid;
- if (IS_QLA83XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
mcp->mb[15] = 0;
mcp->mb[4] = req->id;
@@ -3676,9 +3686,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
mcp->flags = MBX_DMA_OUT;
mcp->tov = MBX_TOV_SECONDS * 2;
- if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
mcp->in_mb |= MBX_1;
- if (IS_QLA83XX(ha)) {
+ if (IS_QLA83XX(ha) || !IS_QLA27XX(ha)) {
mcp->out_mb |= MBX_15;
/* debug q create issue in SR-IOV */
mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
@@ -3687,7 +3697,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) {
WRT_REG_DWORD(req->req_q_in, 0);
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) || !IS_QLA27XX(ha))
WRT_REG_DWORD(req->req_q_out, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3725,7 +3735,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mcp->mb[5] = rsp->length;
mcp->mb[14] = rsp->msix->entry;
mcp->mb[13] = rsp->rid;
- if (IS_QLA83XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
mcp->mb[15] = 0;
mcp->mb[4] = rsp->id;
@@ -3742,7 +3752,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
if (IS_QLA81XX(ha)) {
mcp->out_mb |= MBX_12|MBX_11|MBX_10;
mcp->in_mb |= MBX_1;
- } else if (IS_QLA83XX(ha)) {
+ } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
mcp->in_mb |= MBX_1;
/* debug q create issue in SR-IOV */
@@ -3809,7 +3819,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
"Entered %s.\n", __func__);
- if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+ !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
@@ -3840,7 +3851,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+ !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
@@ -3874,7 +3886,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+ !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
@@ -4545,7 +4558,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mcp->mb[1] = 0;
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
- if (IS_QLA83XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -4574,7 +4587,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
"Entered %s.\n", __func__);
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
+ !IS_QLA27XX(ha))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_GET_PORT_CONFIG;
mcp->out_mb = MBX_0;
@@ -5070,7 +5084,7 @@ qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
@@ -5145,7 +5159,7 @@ qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
struct qla_hw_data *ha = vha->hw;
unsigned long retry_max_time = jiffies + (2 * HZ);
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index a72df701fb38..f0a852257f99 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -630,7 +630,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
struct req_que *req = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
- device_reg_t __iomem *reg;
+ device_reg_t *reg;
uint32_t cnt;
req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
@@ -754,7 +754,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
struct rsp_que *rsp = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
- device_reg_t __iomem *reg;
+ device_reg_t *reg;
rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
if (rsp == NULL) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index ba6f8b139c98..0aaf6a9c87d3 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -40,7 +40,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
{
int rval;
unsigned long flags = 0;
- device_reg_t __iomem *reg;
+ device_reg_t *reg;
uint8_t abort_active;
uint8_t io_lock_on;
uint16_t command = 0;
@@ -631,20 +631,6 @@ qlafx00_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
- struct init_cb_fx *icb;
- struct req_que *req = ha->req_q_map[0];
- struct rsp_que *rsp = ha->rsp_q_map[0];
-
- /* Setup ring parameters in initialization control block. */
- icb = (struct init_cb_fx *)ha->init_cb;
- icb->request_q_outpointer = __constant_cpu_to_le16(0);
- icb->response_q_inpointer = __constant_cpu_to_le16(0);
- icb->request_q_length = cpu_to_le16(req->length);
- icb->response_q_length = cpu_to_le16(rsp->length);
- icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
- icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
- icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
- icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
WRT_REG_DWORD(&reg->req_q_in, 0);
WRT_REG_DWORD(&reg->req_q_out, 0);
@@ -699,78 +685,16 @@ qlafx00_disable_intrs(struct qla_hw_data *ha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-static void
-qlafx00_tmf_iocb_timeout(void *data)
-{
- srb_t *sp = (srb_t *)data;
- struct srb_iocb *tmf = &sp->u.iocb_cmd;
-
- tmf->u.tmf.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
- complete(&tmf->u.tmf.comp);
-}
-
-static void
-qlafx00_tmf_sp_done(void *data, void *ptr, int res)
-{
- srb_t *sp = (srb_t *)ptr;
- struct srb_iocb *tmf = &sp->u.iocb_cmd;
-
- complete(&tmf->u.tmf.comp);
-}
-
-static int
-qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags,
- uint32_t lun, uint32_t tag)
-{
- scsi_qla_host_t *vha = fcport->vha;
- struct srb_iocb *tm_iocb;
- srb_t *sp;
- int rval = QLA_FUNCTION_FAILED;
-
- sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
- if (!sp)
- goto done;
-
- tm_iocb = &sp->u.iocb_cmd;
- sp->type = SRB_TM_CMD;
- sp->name = "tmf";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
- tm_iocb->u.tmf.flags = flags;
- tm_iocb->u.tmf.lun = lun;
- tm_iocb->u.tmf.data = tag;
- sp->done = qlafx00_tmf_sp_done;
- tm_iocb->timeout = qlafx00_tmf_iocb_timeout;
- init_completion(&tm_iocb->u.tmf.comp);
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
- ql_dbg(ql_dbg_async, vha, 0x507b,
- "Task management command issued target_id=%x\n",
- fcport->tgt_id);
-
- wait_for_completion(&tm_iocb->u.tmf.comp);
-
- rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
- QLA_SUCCESS : QLA_FUNCTION_FAILED;
-
-done_free_sp:
- sp->free(vha, sp);
-done:
- return rval;
-}
-
int
qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
{
- return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
+ return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
}
int
qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
{
- return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
+ return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
}
int
@@ -997,6 +921,9 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
break;
default:
+ if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS)
+ break;
+
/* If fw is apparently not ready. In order to continue,
* we might need to issue Mbox cmd, but the problem is
* that the DoorBell vector values that come with the
@@ -2014,7 +1941,8 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
} else if (fx_type == FXDISC_ABORT_IOCTL)
fdisc->u.fxiocb.result =
- (fdisc->u.fxiocb.result == cpu_to_le32(0x68)) ?
+ (fdisc->u.fxiocb.result ==
+ cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ?
cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED);
rval = le32_to_cpu(fdisc->u.fxiocb.result);
@@ -2034,94 +1962,6 @@ done:
return rval;
}
-static void
-qlafx00_abort_iocb_timeout(void *data)
-{
- srb_t *sp = (srb_t *)data;
- struct srb_iocb *abt = &sp->u.iocb_cmd;
-
- abt->u.abt.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
- complete(&abt->u.abt.comp);
-}
-
-static void
-qlafx00_abort_sp_done(void *data, void *ptr, int res)
-{
- srb_t *sp = (srb_t *)ptr;
- struct srb_iocb *abt = &sp->u.iocb_cmd;
-
- complete(&abt->u.abt.comp);
-}
-
-static int
-qlafx00_async_abt_cmd(srb_t *cmd_sp)
-{
- scsi_qla_host_t *vha = cmd_sp->fcport->vha;
- fc_port_t *fcport = cmd_sp->fcport;
- struct srb_iocb *abt_iocb;
- srb_t *sp;
- int rval = QLA_FUNCTION_FAILED;
-
- sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
- if (!sp)
- goto done;
-
- abt_iocb = &sp->u.iocb_cmd;
- sp->type = SRB_ABT_CMD;
- sp->name = "abort";
- qla2x00_init_timer(sp, FXDISC_TIMEOUT);
- abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
- sp->done = qlafx00_abort_sp_done;
- abt_iocb->timeout = qlafx00_abort_iocb_timeout;
- init_completion(&abt_iocb->u.abt.comp);
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
- ql_dbg(ql_dbg_async, vha, 0x507c,
- "Abort command issued - hdl=%x, target_id=%x\n",
- cmd_sp->handle, fcport->tgt_id);
-
- wait_for_completion(&abt_iocb->u.abt.comp);
-
- rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
- QLA_SUCCESS : QLA_FUNCTION_FAILED;
-
-done_free_sp:
- sp->free(vha, sp);
-done:
- return rval;
-}
-
-int
-qlafx00_abort_command(srb_t *sp)
-{
- unsigned long flags = 0;
-
- uint32_t handle;
- fc_port_t *fcport = sp->fcport;
- struct scsi_qla_host *vha = fcport->vha;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) {
- if (req->outstanding_cmds[handle] == sp)
- break;
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (handle == DEFAULT_OUTSTANDING_COMMANDS) {
- /* Command not found. */
- return QLA_FUNCTION_FAILED;
- }
- if (sp->type == SRB_FXIOCB_DCMD)
- return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
- FXDISC_ABORT_IOCTL);
-
- return qlafx00_async_abt_cmd(sp);
-}
-
/*
* qlafx00_initialize_adapter
* Initialize board.
@@ -2150,7 +1990,6 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
vha->device_flags = DFLG_NO_CABLE;
vha->dpc_flags = 0;
vha->flags.management_server_logged_in = 0;
- vha->marker_needed = 0;
ha->isp_abort_cnt = 0;
ha->beacon_blink_led = 0;
@@ -2354,8 +2193,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
fstatus.ioctl_flags = pkt->fw_iotcl_flags;
fstatus.ioctl_data = pkt->dataword_r;
fstatus.adapid = pkt->adapid;
- fstatus.adapid_hi = pkt->adapid_hi;
- fstatus.reserved_2 = pkt->reserved_1;
+ fstatus.reserved_2 = pkt->dataword_r_extra;
fstatus.res_count = pkt->residuallen;
fstatus.status = pkt->status;
fstatus.seq_number = pkt->seq_no;
@@ -2804,7 +2642,7 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
const char func[] = "ERROR-IOCB";
- uint16_t que = MSW(pkt->handle);
+ uint16_t que = 0;
struct req_que *req = NULL;
int res = DID_ERROR << 16;
@@ -2833,16 +2671,22 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
{
struct sts_entry_fx00 *pkt;
response_t *lptr;
+ uint16_t lreq_q_in = 0;
+ uint16_t lreq_q_out = 0;
- while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) !=
- RESPONSE_PROCESSED) {
+ lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
+ lreq_q_out = RD_REG_DWORD(rsp->rsp_q_out);
+
+ while (lreq_q_in != lreq_q_out) {
lptr = rsp->ring_ptr;
memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
sizeof(rsp->rsp_pkt));
pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
rsp->ring_index++;
+ lreq_q_out++;
if (rsp->ring_index == rsp->length) {
+ lreq_q_out = 0;
rsp->ring_index = 0;
rsp->ring_ptr = rsp->ring;
} else {
@@ -2854,7 +2698,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
qlafx00_error_entry(vha, rsp,
(struct sts_entry_fx00 *)pkt, pkt->entry_status,
pkt->entry_type);
- goto next_iter;
continue;
}
@@ -2888,10 +2731,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
pkt->entry_type, pkt->entry_status);
break;
}
-next_iter:
- WRT_REG_DWORD((void __iomem *)&lptr->signature,
- RESPONSE_PROCESSED);
- wmb();
}
/* Adjust ring index */
@@ -2926,9 +2765,9 @@ qlafx00_async_event(scsi_qla_host_t *vha)
break;
case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
- ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
- ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
- ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
+ ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1);
+ ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2);
+ ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3);
ql_dbg(ql_dbg_async, vha, 0x5077,
"Asynchronous port Update received "
"aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
@@ -2985,7 +2824,7 @@ static void
qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
{
uint16_t cnt;
- uint16_t __iomem *wptr;
+ uint32_t __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
@@ -2995,10 +2834,10 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out32[0] = mb0;
- wptr = (uint16_t __iomem *)&reg->mailbox17;
+ wptr = (uint32_t __iomem *)&reg->mailbox17;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out32[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
wptr++;
}
}
@@ -3025,6 +2864,7 @@ qlafx00_intr_handler(int irq, void *dev_id)
struct rsp_que *rsp;
unsigned long flags;
uint32_t clr_intr = 0;
+ uint32_t intr_stat = 0;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -3046,34 +2886,26 @@ qlafx00_intr_handler(int irq, void *dev_id)
stat = QLAFX00_RD_INTR_REG(ha);
if (qla2x00_check_reg_for_disconnect(vha, stat))
break;
- if ((stat & QLAFX00_HST_INT_STS_BITS) == 0)
+ intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
+ if (!intr_stat)
break;
- switch (stat & QLAFX00_HST_INT_STS_BITS) {
- case QLAFX00_INTR_MB_CMPLT:
- case QLAFX00_INTR_MB_RSP_CMPLT:
- case QLAFX00_INTR_MB_ASYNC_CMPLT:
- case QLAFX00_INTR_ALL_CMPLT:
+ if (stat & QLAFX00_INTR_MB_CMPLT) {
mb[0] = RD_REG_WORD(&reg->mailbox16);
qlafx00_mbx_completion(vha, mb[0]);
status |= MBX_INTERRUPT;
clr_intr |= QLAFX00_INTR_MB_CMPLT;
- break;
- case QLAFX00_INTR_ASYNC_CMPLT:
- case QLAFX00_INTR_RSP_ASYNC_CMPLT:
+ }
+ if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
qlafx00_async_event(vha);
clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
- break;
- case QLAFX00_INTR_RSP_CMPLT:
+ }
+ if (intr_stat & QLAFX00_INTR_RSP_CMPLT) {
qlafx00_process_response_queue(vha, rsp);
clr_intr |= QLAFX00_INTR_RSP_CMPLT;
- break;
- default:
- ql_dbg(ql_dbg_async, vha, 0x507a,
- "Unrecognized interrupt type (%d).\n", stat);
- break;
}
+
QLAFX00_CLR_INTR_REG(ha, clr_intr);
QLAFX00_RD_INTR_REG(ha);
}
@@ -3223,17 +3055,6 @@ qlafx00_start_scsi(srb_t *sp)
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
- /* Forcing marker needed for now */
- vha->marker_needed = 0;
-
- /* Send marker if required */
- if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
- QLA_SUCCESS)
- return QLA_FUNCTION_FAILED;
- vha->marker_needed = 0;
- }
-
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3284,7 +3105,9 @@ qlafx00_start_scsi(srb_t *sp)
memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
- lcmd_pkt.handle_hi = 0;
+ lcmd_pkt.reserved_0 = 0;
+ lcmd_pkt.port_path_ctrl = 0;
+ lcmd_pkt.reserved_1 = 0;
lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
@@ -3364,8 +3187,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
tm_iocb.entry_count = 1;
tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
- tm_iocb.handle_hi = 0;
- tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
+ tm_iocb.reserved_0 = 0;
tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 6cd7072cc0ff..e529dfaeb854 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -22,13 +22,16 @@ struct cmd_type_7_fx00 {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint32_t handle_hi;
+ uint8_t reserved_0;
+ uint8_t port_path_ctrl;
+ uint16_t reserved_1;
__le16 tgt_idx; /* Target Idx. */
uint16_t timeout; /* Command timeout. */
__le16 dseg_count; /* Data segment count. */
- uint16_t scsi_rsp_dsd_len;
+ uint8_t scsi_rsp_dsd_len;
+ uint8_t reserved_2;
struct scsi_lun lun; /* LUN (LE). */
@@ -47,30 +50,6 @@ struct cmd_type_7_fx00 {
uint32_t dseg_0_len; /* Data segment 0 length. */
};
-/*
- * ISP queue - marker entry structure definition.
- */
-struct mrk_entry_fx00 {
- uint8_t entry_type; /* Entry type. */
- uint8_t entry_count; /* Entry count. */
- uint8_t handle_count; /* Handle count. */
- uint8_t entry_status; /* Entry Status. */
-
- uint32_t handle; /* System handle. */
- uint32_t handle_hi; /* System handle. */
-
- uint16_t tgt_id; /* Target ID. */
-
- uint8_t modifier; /* Modifier (7-0). */
- uint8_t reserved_1;
-
- uint8_t reserved_2[5];
-
- uint8_t lun[8]; /* FCP LUN (BE). */
- uint8_t reserved_3[36];
-};
-
-
#define STATUS_TYPE_FX00 0x01 /* Status entry. */
struct sts_entry_fx00 {
uint8_t entry_type; /* Entry type. */
@@ -79,7 +58,7 @@ struct sts_entry_fx00 {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint32_t handle_hi; /* System handle. */
+ uint32_t reserved_3; /* System handle. */
__le16 comp_status; /* Completion status. */
uint16_t reserved_0; /* OX_ID used by the firmware. */
@@ -102,7 +81,7 @@ struct sts_entry_fx00 {
struct multi_sts_entry_fx00 {
uint8_t entry_type; /* Entry type. */
- uint8_t sys_define; /* System defined. */
+ uint8_t entry_count; /* Entry count. */
uint8_t handle_count;
uint8_t entry_status;
@@ -118,15 +97,13 @@ struct tsk_mgmt_entry_fx00 {
__le32 handle; /* System handle. */
- uint32_t handle_hi; /* System handle. */
+ uint32_t reserved_0;
__le16 tgt_id; /* Target Idx. */
uint16_t reserved_1;
-
- uint16_t delay; /* Activity delay in seconds. */
-
- __le16 timeout; /* Command timeout. */
+ uint16_t reserved_3;
+ uint16_t reserved_4;
struct scsi_lun lun; /* LUN (LE). */
@@ -144,13 +121,13 @@ struct abort_iocb_entry_fx00 {
uint8_t entry_status; /* Entry Status. */
__le32 handle; /* System handle. */
- __le32 handle_hi; /* System handle. */
+ __le32 reserved_0;
__le16 tgt_id_sts; /* Completion status. */
__le16 options;
__le32 abort_handle; /* System handle. */
- __le32 abort_handle_hi; /* System handle. */
+ __le32 reserved_2;
__le16 req_que_no;
uint8_t reserved_1[38];
@@ -171,8 +148,7 @@ struct ioctl_iocb_entry_fx00 {
__le32 dataword_r; /* Data word returned */
uint32_t adapid; /* Adapter ID */
- uint32_t adapid_hi; /* Adapter ID high */
- uint32_t reserved_1;
+ uint32_t dataword_r_extra;
__le32 seq_no;
uint8_t reserved_2[20];
@@ -360,11 +336,7 @@ struct config_info_data {
#define QLAFX00_INTR_MB_CMPLT 0x1
#define QLAFX00_INTR_RSP_CMPLT 0x2
-#define QLAFX00_INTR_MB_RSP_CMPLT 0x3
#define QLAFX00_INTR_ASYNC_CMPLT 0x4
-#define QLAFX00_INTR_MB_ASYNC_CMPLT 0x5
-#define QLAFX00_INTR_RSP_ASYNC_CMPLT 0x6
-#define QLAFX00_INTR_ALL_CMPLT 0x7
#define QLAFX00_MBA_SYSTEM_ERR 0x8002
#define QLAFX00_MBA_TEMP_OVER 0x8005
@@ -548,4 +520,7 @@ struct mr_data_fx00 {
/* Max conncurrent IOs that can be queued */
#define QLAFX00_MAX_CANQUEUE 1024
+/* IOCTL IOCB abort success */
+#define QLAFX00_IOCTL_ICOB_ABORT_SUCCESS 0x68
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 1e6ba4a369e2..5511e24b1f11 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1664,10 +1664,10 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
/* Mapping of IO base pointer */
if (IS_QLA8044(ha)) {
ha->iobase =
- (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase);
+ (device_reg_t *)((uint8_t *)ha->nx_pcibase);
} else if (IS_QLA82XX(ha)) {
ha->iobase =
- (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
+ (device_reg_t *)((uint8_t *)ha->nx_pcibase +
0xbc000 + (ha->pdev->devfn << 11));
}
@@ -4502,3 +4502,20 @@ exit:
qla82xx_idc_unlock(ha);
return rval;
}
+
+void
+qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->allow_cna_fw_dump)
+ return;
+
+ scsi_block_requests(vha->host);
+ ha->flags.isp82xx_no_md_cap = 1;
+ qla82xx_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla82xx_idc_unlock(ha);
+ qla2x00_wait_for_chip_reset(vha);
+ scsi_unblock_requests(vha->host);
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index f60989d729a8..86cf10815db0 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1578,8 +1578,8 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha)
do {
if (time_after_eq(jiffies, dev_init_timeout)) {
ql_log(ql_log_info, vha, 0xb0c4,
- "%s: Non Reset owner DEV INIT "
- "TIMEOUT!\n", __func__);
+ "%s: Non Reset owner: Reset Ack Timeout!\n",
+ __func__);
break;
}
@@ -2014,8 +2014,6 @@ qla8044_watchdog(struct scsi_qla_host *vha)
/* don't poll if reset is going on or FW hang in quiescent state */
if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags) ||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
@@ -3715,3 +3713,19 @@ exit_isp_reset:
return rval;
}
+void
+qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->allow_cna_fw_dump)
+ return;
+
+ scsi_block_requests(vha->host);
+ ha->flags.isp82xx_no_md_cap = 1;
+ qla8044_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla8044_idc_unlock(ha);
+ qla2x00_wait_for_chip_reset(vha);
+ scsi_unblock_requests(vha->host);
+}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 89a53002b585..19e99cc33724 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -120,15 +120,17 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
int ql2xenabledif = 2;
module_param(ql2xenabledif, int, S_IRUGO);
MODULE_PARM_DESC(ql2xenabledif,
- " Enable T10-CRC-DIF "
- " Default is 0 - No DIF Support. 1 - Enable it"
- ", 2 - Enable DIF for all types, except Type 0.");
+ " Enable T10-CRC-DIF:\n"
+ " Default is 2.\n"
+ " 0 -- No DIF Support\n"
+ " 1 -- Enable DIF for all types\n"
+ " 2 -- Enable DIF for all types, except Type 0.\n");
int ql2xenablehba_err_chk = 2;
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenablehba_err_chk,
" Enable T10-CRC-DIF Error isolation by HBA:\n"
- " Default is 1.\n"
+ " Default is 2.\n"
" 0 -- Error isolation disabled\n"
" 1 -- Error isolation enabled only for DIX Type 0\n"
" 2 -- Error isolation enabled for all Types\n");
@@ -1975,7 +1977,7 @@ static struct isp_operations qla82xx_isp_ops = {
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = qla24xx_read_nvram_data,
.write_nvram = qla24xx_write_nvram_data,
- .fw_dump = qla24xx_fw_dump,
+ .fw_dump = qla82xx_fw_dump,
.beacon_on = qla82xx_beacon_on,
.beacon_off = qla82xx_beacon_off,
.beacon_blink = NULL,
@@ -2013,11 +2015,11 @@ static struct isp_operations qla8044_isp_ops = {
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = NULL,
.write_nvram = NULL,
- .fw_dump = qla24xx_fw_dump,
+ .fw_dump = qla8044_fw_dump,
.beacon_on = qla82xx_beacon_on,
.beacon_off = qla82xx_beacon_off,
.beacon_blink = NULL,
- .read_optrom = qla82xx_read_optrom_data,
+ .read_optrom = qla8044_read_optrom_data,
.write_optrom = qla8044_write_optrom_data,
.get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
@@ -2078,7 +2080,7 @@ static struct isp_operations qlafx00_isp_ops = {
.intr_handler = qlafx00_intr_handler,
.enable_intrs = qlafx00_enable_intrs,
.disable_intrs = qlafx00_disable_intrs,
- .abort_command = qlafx00_abort_command,
+ .abort_command = qla24xx_async_abort_command,
.target_reset = qlafx00_abort_target,
.lun_reset = qlafx00_lun_reset,
.fabric_login = NULL,
@@ -2102,6 +2104,44 @@ static struct isp_operations qlafx00_isp_ops = {
.initialize_adapter = qlafx00_initialize_adapter,
};
+static struct isp_operations qla27xx_isp_ops = {
+ .pci_config = qla25xx_pci_config,
+ .reset_chip = qla24xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla24xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla81xx_update_fw_options,
+ .load_risc = qla81xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla24xx_intr_handler,
+ .enable_intrs = qla24xx_enable_intrs,
+ .disable_intrs = qla24xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = NULL,
+ .write_nvram = NULL,
+ .fw_dump = qla27xx_fwdump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla83xx_beacon_blink,
+ .read_optrom = qla25xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla24xx_dif_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla83xx_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
static inline void
qla2x00_set_isp_flags(struct qla_hw_data *ha)
{
@@ -2223,21 +2263,29 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
case PCI_DEVICE_ID_QLOGIC_ISPF001:
ha->device_type |= DT_ISPFX00;
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2071:
+ ha->device_type |= DT_ISP2071;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
}
if (IS_QLA82XX(ha))
- ha->port_no = !(ha->portnum & 1);
- else
+ ha->port_no = ha->portnum & 1;
+ else {
/* Get adapter physical port no from interrupt pin register. */
pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+ if (IS_QLA27XX(ha))
+ ha->port_no--;
+ else
+ ha->port_no = !(ha->port_no & 1);
+ }
- if (ha->port_no & 1)
- ha->flags.port0 = 1;
- else
- ha->flags.port0 = 0;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
"device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
- ha->device_type, ha->flags.port0, ha->fw_srisc_address);
+ ha->device_type, ha->port_no, ha->fw_srisc_address);
}
static void
@@ -2297,7 +2345,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2334,13 +2383,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&ha->hardware_lock);
spin_lock_init(&ha->vport_slock);
mutex_init(&ha->selflogin_lock);
+ mutex_init(&ha->optrom_mutex);
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
/* Set EEH reset type to fundamental if required by hba */
if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
- IS_QLA83XX(ha))
+ IS_QLA83XX(ha) || IS_QLA27XX(ha))
pdev->needs_freset = 1;
ha->prev_topology = 0;
@@ -2488,7 +2538,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
req_length = REQUEST_ENTRY_CNT_FX00;
rsp_length = RESPONSE_ENTRY_CNT_FX00;
- ha->init_cb_size = sizeof(struct init_cb_fx);
ha->isp_ops = &qlafx00_isp_ops;
ha->port_down_retry_count = 30; /* default value */
ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
@@ -2497,6 +2546,22 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mr.fw_hbt_en = 1;
ha->mr.host_info_resend = false;
ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
+ } else if (IS_QLA27XX(ha)) {
+ ha->portnum = PCI_FUNC(ha->pdev->devfn);
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_24XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_83XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla27xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
}
ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -2536,7 +2601,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flags.enable_64bit_addressing ? "enable" :
"disable");
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
- if (!ret) {
+ if (ret) {
ql_log_pci(ql_log_fatal, pdev, 0x0031,
"Failed to allocate memory for adapter, aborting.\n");
@@ -2561,10 +2626,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
- if (IS_QLAFX00(ha))
- host->can_queue = QLAFX00_MAX_CANQUEUE;
- else
- host->can_queue = req->length + 128;
if (IS_QLA2XXX_MIDTYPE(ha))
base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
else
@@ -2587,11 +2648,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!IS_QLA82XX(ha))
host->sg_tablesize = QLA_SG_ALL;
}
- ql_dbg(ql_dbg_init, base_vha, 0x0032,
- "can_queue=%d, req=%p, "
- "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
- host->can_queue, base_vha->req,
- base_vha->mgmt_svr_loop_id, host->sg_tablesize);
host->max_id = ha->max_fibre_devices;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
@@ -2646,7 +2702,7 @@ que_init:
req->req_q_out = &ha->iobase->isp24.req_q_out;
rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
- if (ha->mqenable || IS_QLA83XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -2707,6 +2763,16 @@ que_init:
goto probe_failed;
}
+ if (IS_QLAFX00(ha))
+ host->can_queue = QLAFX00_MAX_CANQUEUE;
+ else
+ host->can_queue = req->num_outstanding_cmds - 10;
+
+ ql_dbg(ql_dbg_init, base_vha, 0x0032,
+ "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
+ host->can_queue, base_vha->req,
+ base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+
if (ha->mqenable) {
if (qla25xx_setup_mode(base_vha)) {
ql_log(ql_log_warn, base_vha, 0x00ec,
@@ -2887,9 +2953,9 @@ probe_hw_failed:
iospace_config_failed:
if (IS_P3P_TYPE(ha)) {
if (!ha->nx_pcibase)
- iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ iounmap((device_reg_t *)ha->nx_pcibase);
if (!ql2xdbwr)
- iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+ iounmap((device_reg_t *)ha->nxdb_wr_ptr);
} else {
if (ha->iobase)
iounmap(ha->iobase);
@@ -3020,9 +3086,9 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
{
if (IS_QLA82XX(ha)) {
- iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ iounmap((device_reg_t *)ha->nx_pcibase);
if (!ql2xdbwr)
- iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+ iounmap((device_reg_t *)ha->nxdb_wr_ptr);
} else {
if (ha->iobase)
iounmap(ha->iobase);
@@ -3033,7 +3099,7 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
if (ha->mqiobase)
iounmap(ha->mqiobase);
- if (IS_QLA83XX(ha) && ha->msixbase)
+ if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
iounmap(ha->msixbase);
}
}
@@ -3447,7 +3513,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->npiv_info = NULL;
/* Get consistent memory allocated for EX-INIT-CB. */
- if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) {
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->ex_init_cb_dma);
if (!ha->ex_init_cb)
@@ -3478,10 +3544,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
else {
qla2x00_set_reserved_loop_ids(ha);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
- "loop_id_map=%p. \n", ha->loop_id_map);
+ "loop_id_map=%p.\n", ha->loop_id_map);
}
- return 1;
+ return 0;
fail_async_pd:
dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
@@ -3562,22 +3628,28 @@ static void
qla2x00_free_fw_dump(struct qla_hw_data *ha)
{
if (ha->fce)
- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
- ha->fce_dma);
+ dma_free_coherent(&ha->pdev->dev,
+ FCE_SIZE, ha->fce, ha->fce_dma);
- if (ha->fw_dump) {
- if (ha->eft)
- dma_free_coherent(&ha->pdev->dev,
- ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
+ if (ha->eft)
+ dma_free_coherent(&ha->pdev->dev,
+ EFT_SIZE, ha->eft, ha->eft_dma);
+
+ if (ha->fw_dump)
vfree(ha->fw_dump);
- }
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+
ha->fce = NULL;
ha->fce_dma = 0;
ha->eft = NULL;
ha->eft_dma = 0;
- ha->fw_dump = NULL;
ha->fw_dumped = 0;
ha->fw_dump_reading = 0;
+ ha->fw_dump = NULL;
+ ha->fw_dump_len = 0;
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
}
/*
@@ -5242,7 +5314,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Firmware interface routines. */
-#define FW_BLOBS 10
+#define FW_BLOBS 11
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
@@ -5253,6 +5325,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_ISP82XX 7
#define FW_ISP2031 8
#define FW_ISP8031 9
+#define FW_ISP2071 10
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -5264,6 +5337,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_FILE_ISP82XX "ql8200_fw.bin"
#define FW_FILE_ISP2031 "ql2600_fw.bin"
#define FW_FILE_ISP8031 "ql8300_fw.bin"
+#define FW_FILE_ISP2071 "ql2700_fw.bin"
+
static DEFINE_MUTEX(qla_fw_lock);
@@ -5278,6 +5353,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP82XX, },
{ .name = FW_FILE_ISP2031, },
{ .name = FW_FILE_ISP8031, },
+ { .name = FW_FILE_ISP2071, },
};
struct fw_blob *
@@ -5306,6 +5382,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
blob = &qla_fw_blobs[FW_ISP2031];
} else if (IS_QLA8031(ha)) {
blob = &qla_fw_blobs[FW_ISP8031];
+ } else if (IS_QLA2071(ha)) {
+ blob = &qla_fw_blobs[FW_ISP2071];
} else {
return NULL;
}
@@ -5635,6 +5713,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index bd56cde795fc..f28123e8ed65 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -568,7 +568,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
else if (IS_P3P_TYPE(ha)) {
*start = FA_FLASH_LAYOUT_ADDR_82;
goto end;
- } else if (IS_QLA83XX(ha)) {
+ } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
*start = FA_FLASH_LAYOUT_ADDR_83;
goto end;
}
@@ -682,7 +682,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
/* Assign FCP prio region since older adapters may not have FLT, or
FCP prio region in it's FLT.
*/
- ha->flt_region_fcp_prio = ha->flags.port0 ?
+ ha->flt_region_fcp_prio = (ha->port_no == 0) ?
fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
ha->flt_region_flt = flt_addr;
@@ -743,47 +743,71 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_vpd_nvram = start;
if (IS_P3P_TYPE(ha))
break;
- if (ha->flags.port0)
+ if (ha->port_no == 0)
ha->flt_region_vpd = start;
break;
case FLT_REG_VPD_1:
if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
break;
- if (!ha->flags.port0)
+ if (ha->port_no == 1)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_VPD_2:
+ if (!IS_QLA27XX(ha))
+ break;
+ if (ha->port_no == 2)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_VPD_3:
+ if (!IS_QLA27XX(ha))
+ break;
+ if (ha->port_no == 3)
ha->flt_region_vpd = start;
break;
case FLT_REG_NVRAM_0:
if (IS_QLA8031(ha))
break;
- if (ha->flags.port0)
+ if (ha->port_no == 0)
ha->flt_region_nvram = start;
break;
case FLT_REG_NVRAM_1:
if (IS_QLA8031(ha))
break;
- if (!ha->flags.port0)
+ if (ha->port_no == 1)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_NVRAM_2:
+ if (!IS_QLA27XX(ha))
+ break;
+ if (ha->port_no == 2)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_NVRAM_3:
+ if (!IS_QLA27XX(ha))
+ break;
+ if (ha->port_no == 3)
ha->flt_region_nvram = start;
break;
case FLT_REG_FDT:
ha->flt_region_fdt = start;
break;
case FLT_REG_NPIV_CONF_0:
- if (ha->flags.port0)
+ if (ha->port_no == 0)
ha->flt_region_npiv_conf = start;
break;
case FLT_REG_NPIV_CONF_1:
- if (!ha->flags.port0)
+ if (ha->port_no == 1)
ha->flt_region_npiv_conf = start;
break;
case FLT_REG_GOLD_FW:
ha->flt_region_gold_fw = start;
break;
case FLT_REG_FCP_PRIO_0:
- if (ha->flags.port0)
+ if (ha->port_no == 0)
ha->flt_region_fcp_prio = start;
break;
case FLT_REG_FCP_PRIO_1:
- if (!ha->flags.port0)
+ if (ha->port_no == 1)
ha->flt_region_fcp_prio = start;
break;
case FLT_REG_BOOT_CODE_82XX:
@@ -813,13 +837,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_FCOE_NVRAM_0:
if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
break;
- if (ha->flags.port0)
+ if (ha->port_no == 0)
ha->flt_region_nvram = start;
break;
case FLT_REG_FCOE_NVRAM_1:
if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
break;
- if (!ha->flags.port0)
+ if (ha->port_no == 1)
ha->flt_region_nvram = start;
break;
}
@@ -832,12 +856,12 @@ no_flash_data:
ha->flt_region_fw = def_fw[def];
ha->flt_region_boot = def_boot[def];
ha->flt_region_vpd_nvram = def_vpd_nvram[def];
- ha->flt_region_vpd = ha->flags.port0 ?
+ ha->flt_region_vpd = (ha->port_no == 0) ?
def_vpd0[def] : def_vpd1[def];
- ha->flt_region_nvram = ha->flags.port0 ?
+ ha->flt_region_nvram = (ha->port_no == 0) ?
def_nvram0[def] : def_nvram1[def];
ha->flt_region_fdt = def_fdt[def];
- ha->flt_region_npiv_conf = ha->flags.port0 ?
+ ha->flt_region_npiv_conf = (ha->port_no == 0) ?
def_npiv_conf0[def] : def_npiv_conf1[def];
done:
ql_dbg(ql_dbg_init, vha, 0x004a,
@@ -989,7 +1013,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
- !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
return QLA_SUCCESS;
ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -1192,7 +1216,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
struct qla_hw_data *ha = vha->hw;
/* Prepare burst-capable write on supported ISPs. */
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
+ if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha)) &&
!(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
@@ -1675,7 +1700,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
if (!IS_QLA83XX(ha))
goto out;
- if (ha->flags.port0)
+ if (ha->port_no == 0)
led_select_value = QLA83XX_LED_PORT0;
else
led_select_value = QLA83XX_LED_PORT1;
@@ -2332,7 +2357,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
*/
rest_addr = 0xffff;
sec_mask = 0x10000;
- break;
+ break;
}
/*
* ST m29w010b part - 16kb sector size
@@ -2558,7 +2583,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
uint32_t faddr, left, burst;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
+ if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA27XX(ha))
goto try_fast;
if (offset & 0xfff)
goto slow_read;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
new file mode 100644
index 000000000000..a804e9b744bb
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -0,0 +1,909 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_tmpl.h"
+
+/* note default template is in big endian */
+static const uint32_t ql27xx_fwdt_default_template[] = {
+ 0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
+ 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x04010000, 0x14000000, 0x00000000,
+ 0x02000000, 0x44000000, 0x09010000, 0x10000000,
+ 0x00000000, 0x02000000, 0x01010000, 0x1c000000,
+ 0x00000000, 0x02000000, 0x00600000, 0x00000000,
+ 0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
+ 0x02000000, 0x00600000, 0x00000000, 0xcc000000,
+ 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
+ 0x10600000, 0x00000000, 0xd4000000, 0x01010000,
+ 0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
+ 0x00000060, 0xf0000000, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00700000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x10700000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x40700000, 0x041000c0,
+ 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
+ 0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
+ 0x18000000, 0x00000000, 0x02000000, 0x007c0000,
+ 0x040300c4, 0x00010000, 0x18000000, 0x00000000,
+ 0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
+ 0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
+ 0x00000000, 0xc0000000, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x007c0000, 0x04200000,
+ 0x0b010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x0c000000, 0x00000000, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x0a000000, 0x04200080, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00300000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x10300000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x20300000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
+ 0x00000000, 0x02000000, 0x06010000, 0x1c000000,
+ 0x00000000, 0x02000000, 0x01000000, 0x00000200,
+ 0xff230200, 0x06010000, 0x1c000000, 0x00000000,
+ 0x02000000, 0x02000000, 0x00001000, 0x00000000,
+ 0x07010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x00000000, 0x01000000, 0x07010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00000000, 0x02000000,
+ 0x07010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x00000000, 0x03000000, 0x0d010000, 0x14000000,
+ 0x00000000, 0x02000000, 0x00000000, 0xff000000,
+ 0x10000000, 0x00000000, 0x00000080,
+};
+
+static inline void __iomem *
+qla27xx_isp_reg(struct scsi_qla_host *vha)
+{
+ return &vha->hw->iobase->isp24;
+}
+
+static inline void
+qla27xx_insert16(uint16_t value, void *buf, ulong *len)
+{
+ if (buf) {
+ buf += *len;
+ *(__le16 *)buf = cpu_to_le16(value);
+ }
+ *len += sizeof(value);
+}
+
+static inline void
+qla27xx_insert32(uint32_t value, void *buf, ulong *len)
+{
+ if (buf) {
+ buf += *len;
+ *(__le32 *)buf = cpu_to_le32(value);
+ }
+ *len += sizeof(value);
+}
+
+static inline void
+qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
+{
+ ulong cnt = size;
+
+ if (buf && mem) {
+ buf += *len;
+ while (cnt >= sizeof(uint32_t)) {
+ *(__le32 *)buf = cpu_to_le32p(mem);
+ buf += sizeof(uint32_t);
+ mem += sizeof(uint32_t);
+ cnt -= sizeof(uint32_t);
+ }
+ if (cnt)
+ memcpy(buf, mem, cnt);
+ }
+ *len += size;
+}
+
+static inline void
+qla27xx_read8(void *window, void *buf, ulong *len)
+{
+ uint8_t value = ~0;
+
+ if (buf) {
+ value = RD_REG_BYTE((__iomem void *)window);
+ ql_dbg(ql_dbg_misc, NULL, 0xd011,
+ "%s: -> %x\n", __func__, value);
+ }
+ qla27xx_insert32(value, buf, len);
+}
+
+static inline void
+qla27xx_read16(void *window, void *buf, ulong *len)
+{
+ uint16_t value = ~0;
+
+ if (buf) {
+ value = RD_REG_WORD((__iomem void *)window);
+ ql_dbg(ql_dbg_misc, NULL, 0xd012,
+ "%s: -> %x\n", __func__, value);
+ }
+ qla27xx_insert32(value, buf, len);
+}
+
+static inline void
+qla27xx_read32(void *window, void *buf, ulong *len)
+{
+ uint32_t value = ~0;
+
+ if (buf) {
+ value = RD_REG_DWORD((__iomem void *)window);
+ ql_dbg(ql_dbg_misc, NULL, 0xd013,
+ "%s: -> %x\n", __func__, value);
+ }
+ qla27xx_insert32(value, buf, len);
+}
+
+static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *)
+{
+ return
+ (width == 1) ? qla27xx_read8 :
+ (width == 2) ? qla27xx_read16 :
+ qla27xx_read32;
+}
+
+static inline void
+qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
+ uint offset, void *buf, ulong *len)
+{
+ void *window = (void *)reg + offset;
+
+ if (buf) {
+ ql_dbg(ql_dbg_misc, NULL, 0xd014,
+ "%s: @%x\n", __func__, offset);
+ }
+ qla27xx_insert32(offset, buf, len);
+ qla27xx_read32(window, buf, len);
+}
+
+static inline void
+qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
+ uint offset, uint32_t data, void *buf)
+{
+ __iomem void *window = reg + offset;
+
+ if (buf) {
+ ql_dbg(ql_dbg_misc, NULL, 0xd015,
+ "%s: @%x <- %x\n", __func__, offset, data);
+ WRT_REG_DWORD(window, data);
+ }
+}
+
+static inline void
+qla27xx_read_window(__iomem struct device_reg_24xx *reg,
+ uint32_t base, uint offset, uint count, uint width, void *buf,
+ ulong *len)
+{
+ void *window = (void *)reg + offset;
+ void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
+
+ if (buf) {
+ ql_dbg(ql_dbg_misc, NULL, 0xd016,
+ "%s: base=%x offset=%x count=%x width=%x\n",
+ __func__, base, offset, count, width);
+ }
+ qla27xx_write_reg(reg, IOBASE_ADDR, base, buf);
+ while (count--) {
+ qla27xx_insert32(base, buf, len);
+ readn(window, buf, len);
+ window += width;
+ base += width;
+ }
+}
+
+static inline void
+qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
+{
+ if (buf)
+ ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
+}
+
+static int
+qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd100,
+ "%s: nop [%lx]\n", __func__, *len);
+ qla27xx_skip_entry(ent, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd1ff,
+ "%s: end [%lx]\n", __func__, *len);
+ qla27xx_skip_entry(ent, buf);
+
+ /* terminate */
+ return true;
+}
+
+static int
+qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd200,
+ "%s: rdio t1 [%lx]\n", __func__, *len);
+ qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
+ ent->t256.reg_count, ent->t256.reg_width, buf, len);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd201,
+ "%s: wrio t1 [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
+ qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd202,
+ "%s: rdio t2 [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
+ qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
+ ent->t258.reg_count, ent->t258.reg_width, buf, len);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd203,
+ "%s: wrio t2 [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
+ qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
+ qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd204,
+ "%s: rdpci [%lx]\n", __func__, *len);
+ qla27xx_read_reg(reg, ent->t260.pci_addr, buf, len);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd205,
+ "%s: wrpci [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, ent->t261.pci_addr, ent->t261.write_data, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ulong dwords;
+ ulong start;
+ ulong end;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd206,
+ "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
+ start = ent->t262.start_addr;
+ end = ent->t262.end_addr;
+
+ if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
+ ;
+ } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
+ end = vha->hw->fw_memory_size;
+ if (buf)
+ ent->t262.end_addr = end;
+ } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
+ start = vha->hw->fw_shared_ram_start;
+ end = vha->hw->fw_shared_ram_end;
+ if (buf) {
+ ent->t262.start_addr = start;
+ ent->t262.end_addr = end;
+ }
+ } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
+ ql_dbg(ql_dbg_misc, vha, 0xd021,
+ "%s: unsupported ddr ram\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd022,
+ "%s: unknown area %u\n", __func__, ent->t262.ram_area);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+
+ if (end < start) {
+ ql_dbg(ql_dbg_misc, vha, 0xd023,
+ "%s: bad range (start=%x end=%x)\n", __func__,
+ ent->t262.end_addr, ent->t262.start_addr);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+
+ dwords = end - start + 1;
+ if (buf) {
+ ql_dbg(ql_dbg_misc, vha, 0xd024,
+ "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
+ buf += *len;
+ qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+ }
+ *len += dwords * sizeof(uint32_t);
+done:
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ uint count = 0;
+ uint i;
+ uint length;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd207,
+ "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
+ if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
+ for (i = 0; i < vha->hw->max_req_queues; i++) {
+ struct req_que *req = vha->hw->req_q_map[i];
+ if (req || !buf) {
+ length = req ?
+ req->length : REQUEST_ENTRY_CNT_24XX;
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(length, buf, len);
+ qla27xx_insertbuf(req ? req->ring : NULL,
+ length * sizeof(*req->ring), buf, len);
+ count++;
+ }
+ }
+ } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
+ for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+ if (rsp || !buf) {
+ length = rsp ?
+ rsp->length : RESPONSE_ENTRY_CNT_MQ;
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(length, buf, len);
+ qla27xx_insertbuf(rsp ? rsp->ring : NULL,
+ length * sizeof(*rsp->ring), buf, len);
+ count++;
+ }
+ }
+ } else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
+ ql_dbg(ql_dbg_misc, vha, 0xd025,
+ "%s: unsupported atio queue\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd026,
+ "%s: unknown queue %u\n", __func__, ent->t263.queue_type);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+
+ if (buf)
+ ent->t263.num_queues = count;
+done:
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd208,
+ "%s: getfce [%lx]\n", __func__, *len);
+ if (vha->hw->fce) {
+ if (buf) {
+ ent->t264.fce_trace_size = FCE_SIZE;
+ ent->t264.write_pointer = vha->hw->fce_wr;
+ ent->t264.base_pointer = vha->hw->fce_dma;
+ ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
+ ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
+ ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
+ ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
+ ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
+ ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
+ }
+ qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd027,
+ "%s: missing fce\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd209,
+ "%s: pause risc [%lx]\n", __func__, *len);
+ if (buf)
+ qla24xx_pause_risc(reg);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd20a,
+ "%s: reset risc [%lx]\n", __func__, *len);
+ if (buf)
+ qla24xx_soft_reset(vha->hw);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd20b,
+ "%s: dis intr [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd20c,
+ "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
+ if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
+ if (vha->hw->eft) {
+ if (buf) {
+ ent->t268.buf_size = EFT_SIZE;
+ ent->t268.start_addr = vha->hw->eft_dma;
+ }
+ qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing eft\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ } else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) {
+ ql_dbg(ql_dbg_misc, vha, 0xd029,
+ "%s: unsupported exchange offload buffer\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ } else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) {
+ ql_dbg(ql_dbg_misc, vha, 0xd02a,
+ "%s: unsupported extended login buffer\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd02b,
+ "%s: unknown buf %x\n", __func__, ent->t268.buf_type);
+ qla27xx_skip_entry(ent, buf);
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd20d,
+ "%s: scratch [%lx]\n", __func__, *len);
+ qla27xx_insert32(0xaaaaaaaa, buf, len);
+ qla27xx_insert32(0xbbbbbbbb, buf, len);
+ qla27xx_insert32(0xcccccccc, buf, len);
+ qla27xx_insert32(0xdddddddd, buf, len);
+ qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
+ if (buf)
+ ent->t269.scratch_size = 5 * sizeof(uint32_t);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ void *window = (void *)reg + 0xc4;
+ ulong dwords = ent->t270.count;
+ ulong addr = ent->t270.addr;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd20e,
+ "%s: rdremreg [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+ while (dwords--) {
+ qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
+ qla27xx_read_reg(reg, 0xc4, buf, len);
+ qla27xx_insert32(addr, buf, len);
+ qla27xx_read32(window, buf, len);
+ addr++;
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ ulong addr = ent->t271.addr;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd20f,
+ "%s: wrremreg [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+ qla27xx_read_reg(reg, 0xc4, buf, len);
+ qla27xx_insert32(addr, buf, len);
+ qla27xx_write_reg(reg, 0xc0, addr, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ulong dwords = ent->t272.count;
+ ulong start = ent->t272.addr;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd210,
+ "%s: rdremram [%lx]\n", __func__, *len);
+ if (buf) {
+ ql_dbg(ql_dbg_misc, vha, 0xd02c,
+ "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
+ buf += *len;
+ qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
+ }
+ *len += dwords * sizeof(uint32_t);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ulong dwords = ent->t273.count;
+ ulong addr = ent->t273.addr;
+ uint32_t value;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd211,
+ "%s: pcicfg [%lx]\n", __func__, *len);
+ while (dwords--) {
+ value = ~0;
+ if (pci_read_config_dword(vha->hw->pdev, addr, &value))
+ ql_dbg(ql_dbg_misc, vha, 0xd02d,
+ "%s: failed pcicfg read at %lx\n", __func__, addr);
+ qla27xx_insert32(addr, buf, len);
+ qla27xx_insert32(value, buf, len);
+ addr += 4;
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd2ff,
+ "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
+ qla27xx_skip_entry(ent, buf);
+
+ return false;
+}
+
+struct qla27xx_fwdt_entry_call {
+ int type;
+ int (*call)(
+ struct scsi_qla_host *,
+ struct qla27xx_fwdt_entry *,
+ void *,
+ ulong *);
+};
+
+static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
+ { ENTRY_TYPE_NOP , qla27xx_fwdt_entry_t0 } ,
+ { ENTRY_TYPE_TMP_END , qla27xx_fwdt_entry_t255 } ,
+ { ENTRY_TYPE_RD_IOB_T1 , qla27xx_fwdt_entry_t256 } ,
+ { ENTRY_TYPE_WR_IOB_T1 , qla27xx_fwdt_entry_t257 } ,
+ { ENTRY_TYPE_RD_IOB_T2 , qla27xx_fwdt_entry_t258 } ,
+ { ENTRY_TYPE_WR_IOB_T2 , qla27xx_fwdt_entry_t259 } ,
+ { ENTRY_TYPE_RD_PCI , qla27xx_fwdt_entry_t260 } ,
+ { ENTRY_TYPE_WR_PCI , qla27xx_fwdt_entry_t261 } ,
+ { ENTRY_TYPE_RD_RAM , qla27xx_fwdt_entry_t262 } ,
+ { ENTRY_TYPE_GET_QUEUE , qla27xx_fwdt_entry_t263 } ,
+ { ENTRY_TYPE_GET_FCE , qla27xx_fwdt_entry_t264 } ,
+ { ENTRY_TYPE_PSE_RISC , qla27xx_fwdt_entry_t265 } ,
+ { ENTRY_TYPE_RST_RISC , qla27xx_fwdt_entry_t266 } ,
+ { ENTRY_TYPE_DIS_INTR , qla27xx_fwdt_entry_t267 } ,
+ { ENTRY_TYPE_GET_HBUF , qla27xx_fwdt_entry_t268 } ,
+ { ENTRY_TYPE_SCRATCH , qla27xx_fwdt_entry_t269 } ,
+ { ENTRY_TYPE_RDREMREG , qla27xx_fwdt_entry_t270 } ,
+ { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } ,
+ { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
+ { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
+ { -1 , qla27xx_fwdt_entry_other }
+};
+
+static inline int (*qla27xx_find_entry(int type))
+ (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
+{
+ struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
+
+ while (list->type != -1 && list->type != type)
+ list++;
+
+ return list->call;
+}
+
+static inline void *
+qla27xx_next_entry(void *p)
+{
+ struct qla27xx_fwdt_entry *ent = p;
+
+ return p + ent->hdr.entry_size;
+}
+
+static void
+qla27xx_walk_template(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
+{
+ struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
+ ulong count = tmp->entry_count;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd01a,
+ "%s: entry count %lx\n", __func__, count);
+ while (count--) {
+ if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
+ break;
+ ent = qla27xx_next_entry(ent);
+ }
+ ql_dbg(ql_dbg_misc, vha, 0xd01b,
+ "%s: len=%lx\n", __func__, *len);
+}
+
+static void
+qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
+{
+ tmp->capture_timestamp = jiffies;
+}
+
+static void
+qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
+{
+ uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
+ int rval = 0;
+
+ rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
+ v+0, v+1, v+2, v+3, v+4, v+5);
+
+ tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
+ tmp->driver_info[1] = v[5] << 8 | v[4];
+ tmp->driver_info[2] = 0x12345678;
+}
+
+static void
+qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
+ struct scsi_qla_host *vha)
+{
+ tmp->firmware_version[0] = vha->hw->fw_major_version;
+ tmp->firmware_version[1] = vha->hw->fw_minor_version;
+ tmp->firmware_version[2] = vha->hw->fw_subminor_version;
+ tmp->firmware_version[3] =
+ vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
+ tmp->firmware_version[4] =
+ vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
+}
+
+static void
+ql27xx_edit_template(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_template *tmp)
+{
+ qla27xx_time_stamp(tmp);
+ qla27xx_driver_info(tmp);
+ qla27xx_firmware_info(tmp, vha);
+}
+
+static inline uint32_t
+qla27xx_template_checksum(void *p, ulong size)
+{
+ uint32_t *buf = p;
+ uint64_t sum = 0;
+
+ size /= sizeof(*buf);
+
+ while (size--)
+ sum += *buf++;
+
+ sum = (sum & 0xffffffff) + (sum >> 32);
+
+ return ~sum;
+}
+
+static inline int
+qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
+{
+ return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
+}
+
+static inline int
+qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
+{
+ return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
+}
+
+static void
+qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
+{
+ struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+ ulong len;
+
+ if (qla27xx_fwdt_template_valid(tmp)) {
+ len = tmp->template_size;
+ tmp = memcpy(vha->hw->fw_dump, tmp, len);
+ ql27xx_edit_template(vha, tmp);
+ qla27xx_walk_template(vha, tmp, tmp, &len);
+ vha->hw->fw_dump_len = len;
+ vha->hw->fw_dumped = 1;
+ }
+}
+
+ulong
+qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
+{
+ struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+ ulong len = 0;
+
+ if (qla27xx_fwdt_template_valid(tmp)) {
+ len = tmp->template_size;
+ qla27xx_walk_template(vha, tmp, NULL, &len);
+ }
+
+ return len;
+}
+
+ulong
+qla27xx_fwdt_template_size(void *p)
+{
+ struct qla27xx_fwdt_template *tmp = p;
+
+ return tmp->template_size;
+}
+
+ulong
+qla27xx_fwdt_template_default_size(void)
+{
+ return sizeof(ql27xx_fwdt_default_template);
+}
+
+const void *
+qla27xx_fwdt_template_default(void)
+{
+ return ql27xx_fwdt_default_template;
+}
+
+int
+qla27xx_fwdt_template_valid(void *p)
+{
+ struct qla27xx_fwdt_template *tmp = p;
+
+ if (!qla27xx_verify_template_header(tmp)) {
+ ql_log(ql_log_warn, NULL, 0xd01c,
+ "%s: template type %x\n", __func__, tmp->template_type);
+ return false;
+ }
+
+ if (!qla27xx_verify_template_checksum(tmp)) {
+ ql_log(ql_log_warn, NULL, 0xd01d,
+ "%s: failed template checksum\n", __func__);
+ return false;
+ }
+
+ return true;
+}
+
+void
+qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ ulong flags = 0;
+
+ if (!hardware_locked)
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+
+ if (!vha->hw->fw_dump)
+ ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
+ else if (!vha->hw->fw_dump_template)
+ ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
+ else
+ qla27xx_execute_fwdt_template(vha);
+
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
new file mode 100644
index 000000000000..c9d2fff4d964
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -0,0 +1,205 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA_DMP27_H__
+#define __QLA_DMP27_H__
+
+#define IOBASE_ADDR offsetof(struct device_reg_24xx, iobase_addr)
+
+struct __packed qla27xx_fwdt_template {
+ uint32_t template_type;
+ uint32_t entry_offset;
+ uint32_t template_size;
+ uint32_t reserved_1;
+
+ uint32_t entry_count;
+ uint32_t template_version;
+ uint32_t capture_timestamp;
+ uint32_t template_checksum;
+
+ uint32_t reserved_2;
+ uint32_t driver_info[3];
+
+ uint32_t saved_state[16];
+
+ uint32_t reserved_3[8];
+ uint32_t firmware_version[5];
+};
+
+#define TEMPLATE_TYPE_FWDUMP 99
+
+#define ENTRY_TYPE_NOP 0
+#define ENTRY_TYPE_TMP_END 255
+#define ENTRY_TYPE_RD_IOB_T1 256
+#define ENTRY_TYPE_WR_IOB_T1 257
+#define ENTRY_TYPE_RD_IOB_T2 258
+#define ENTRY_TYPE_WR_IOB_T2 259
+#define ENTRY_TYPE_RD_PCI 260
+#define ENTRY_TYPE_WR_PCI 261
+#define ENTRY_TYPE_RD_RAM 262
+#define ENTRY_TYPE_GET_QUEUE 263
+#define ENTRY_TYPE_GET_FCE 264
+#define ENTRY_TYPE_PSE_RISC 265
+#define ENTRY_TYPE_RST_RISC 266
+#define ENTRY_TYPE_DIS_INTR 267
+#define ENTRY_TYPE_GET_HBUF 268
+#define ENTRY_TYPE_SCRATCH 269
+#define ENTRY_TYPE_RDREMREG 270
+#define ENTRY_TYPE_WRREMREG 271
+#define ENTRY_TYPE_RDREMRAM 272
+#define ENTRY_TYPE_PCICFG 273
+
+#define CAPTURE_FLAG_PHYS_ONLY BIT_0
+#define CAPTURE_FLAG_PHYS_VIRT BIT_1
+
+#define DRIVER_FLAG_SKIP_ENTRY BIT_7
+
+struct __packed qla27xx_fwdt_entry {
+ struct __packed {
+ uint32_t entry_type;
+ uint32_t entry_size;
+ uint32_t reserved_1;
+
+ uint8_t capture_flags;
+ uint8_t reserved_2[2];
+ uint8_t driver_flags;
+ } hdr;
+ union __packed {
+ struct __packed {
+ } t0;
+
+ struct __packed {
+ } t255;
+
+ struct __packed {
+ uint32_t base_addr;
+ uint8_t reg_width;
+ uint16_t reg_count;
+ uint8_t pci_offset;
+ } t256;
+
+ struct __packed {
+ uint32_t base_addr;
+ uint32_t write_data;
+ uint8_t pci_offset;
+ uint8_t reserved[3];
+ } t257;
+
+ struct __packed {
+ uint32_t base_addr;
+ uint8_t reg_width;
+ uint16_t reg_count;
+ uint8_t pci_offset;
+ uint8_t banksel_offset;
+ uint8_t reserved[3];
+ uint32_t bank;
+ } t258;
+
+ struct __packed {
+ uint32_t base_addr;
+ uint32_t write_data;
+ uint8_t reserved[2];
+ uint8_t pci_offset;
+ uint8_t banksel_offset;
+ uint32_t bank;
+ } t259;
+
+ struct __packed {
+ uint8_t pci_addr;
+ uint8_t reserved[3];
+ } t260;
+
+ struct __packed {
+ uint8_t pci_addr;
+ uint8_t reserved[3];
+ uint32_t write_data;
+ } t261;
+
+ struct __packed {
+ uint8_t ram_area;
+ uint8_t reserved[3];
+ uint32_t start_addr;
+ uint32_t end_addr;
+ } t262;
+
+ struct __packed {
+ uint32_t num_queues;
+ uint8_t queue_type;
+ uint8_t reserved[3];
+ } t263;
+
+ struct __packed {
+ uint32_t fce_trace_size;
+ uint64_t write_pointer;
+ uint64_t base_pointer;
+ uint32_t fce_enable_mb0;
+ uint32_t fce_enable_mb2;
+ uint32_t fce_enable_mb3;
+ uint32_t fce_enable_mb4;
+ uint32_t fce_enable_mb5;
+ uint32_t fce_enable_mb6;
+ } t264;
+
+ struct __packed {
+ } t265;
+
+ struct __packed {
+ } t266;
+
+ struct __packed {
+ uint8_t pci_offset;
+ uint8_t reserved[3];
+ uint32_t data;
+ } t267;
+
+ struct __packed {
+ uint8_t buf_type;
+ uint8_t reserved[3];
+ uint32_t buf_size;
+ uint64_t start_addr;
+ } t268;
+
+ struct __packed {
+ uint32_t scratch_size;
+ } t269;
+
+ struct __packed {
+ uint32_t addr;
+ uint32_t count;
+ } t270;
+
+ struct __packed {
+ uint32_t addr;
+ uint32_t data;
+ } t271;
+
+ struct __packed {
+ uint32_t addr;
+ uint32_t count;
+ } t272;
+
+ struct __packed {
+ uint32_t addr;
+ uint32_t count;
+ } t273;
+ };
+};
+
+#define T262_RAM_AREA_CRITICAL_RAM 1
+#define T262_RAM_AREA_EXTERNAL_RAM 2
+#define T262_RAM_AREA_SHARED_RAM 3
+#define T262_RAM_AREA_DDR_RAM 4
+
+#define T263_QUEUE_TYPE_REQ 1
+#define T263_QUEUE_TYPE_RSP 2
+#define T263_QUEUE_TYPE_ATIO 3
+
+#define T268_BUF_TYPE_EXTD_TRACE 1
+#define T268_BUF_TYPE_EXCH_BUFOFF 2
+#define T268_BUF_TYPE_EXTD_LOGIN 3
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 31d19535b015..e36b94712544 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.06.00.12-k"
+#define QLA2XXX_VERSION "8.07.00.02-k"
#define QLA_DRIVER_MAJOR_VER 8
-#define QLA_DRIVER_MINOR_VER 6
+#define QLA_DRIVER_MINOR_VER 7
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 919284834ad7..2eba35365920 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -1304,12 +1304,24 @@ static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
static int qla4_83xx_restart(struct scsi_qla_host *ha)
{
int ret_val = QLA_SUCCESS;
+ uint32_t idc_ctrl;
qla4_83xx_process_stop_seq(ha);
- /* Collect minidump*/
- if (!test_and_clear_bit(AF_83XX_NO_FW_DUMP, &ha->flags))
+ /*
+ * Collect minidump.
+ * If IDC_CTRL BIT1 is set, clear it on going to INIT state and
+ * don't collect minidump
+ */
+ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+ if (idc_ctrl & GRACEFUL_RESET_BIT1) {
+ qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
+ (idc_ctrl & ~GRACEFUL_RESET_BIT1));
+ ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n",
+ __func__);
+ } else {
qla4_8xxx_get_minidump(ha);
+ }
qla4_83xx_process_init_seq(ha);
@@ -1664,3 +1676,23 @@ void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
__qla4_83xx_disable_pause(ha);
ha->isp_ops->idc_unlock(ha);
}
+
+/**
+ * qla4_83xx_is_detached - Check if we are marked invisible.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4_83xx_is_detached(struct scsi_qla_host *ha)
+{
+ uint32_t drv_active;
+
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+ if (test_bit(AF_INIT_DONE, &ha->flags) &&
+ !(drv_active & (1 << ha->func_num))) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n",
+ __func__, drv_active));
+ return QLA_SUCCESS;
+ }
+
+ return QLA_ERROR;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
index 04a0027dbca0..9f92cbf96477 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.c
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -517,7 +517,7 @@ static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
(ha->idc_extend_tmo * HZ))) {
ha->notify_idc_comp = 0;
ha->notify_link_up_comp = 0;
- ql4_printk(KERN_WARNING, ha, "%s: IDC Complete notification not received",
+ ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
__func__);
status = QLA_ERROR;
goto exit_wait;
@@ -538,7 +538,7 @@ static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
if (!wait_for_completion_timeout(&ha->link_up_comp,
(IDC_COMP_TOV * HZ))) {
ha->notify_link_up_comp = 0;
- ql4_printk(KERN_WARNING, ha, "%s: LINK UP notification not received",
+ ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
__func__);
status = QLA_ERROR;
goto exit_wait;
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index aa67bb9a4426..73a502288bde 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -194,7 +194,7 @@
#define ADAPTER_INIT_TOV 30
#define ADAPTER_RESET_TOV 180
#define EXTEND_CMD_TOV 60
-#define WAIT_CMD_TOV 30
+#define WAIT_CMD_TOV 5
#define EH_WAIT_CMD_TOV 120
#define FIRMWARE_UP_TOV 60
#define RESET_FIRMWARE_TOV 30
@@ -297,6 +297,8 @@ struct ddb_entry {
/* Driver Re-login */
unsigned long flags; /* DDB Flags */
+#define DDB_CONN_CLOSE_FAILURE 0 /* 0x00000001 */
+
uint16_t default_relogin_timeout; /* Max time to wait for
* relogin to complete */
atomic_t retry_relogin_timer; /* Min Time between relogins
@@ -580,7 +582,6 @@ struct scsi_qla_host {
#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */
#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
-#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */
#define AF_83XX_IOCB_INTR_ON 28 /* 0x10000000 */
#define AF_83XX_MBOX_INTR_ON 29 /* 0x20000000 */
@@ -595,10 +596,10 @@ struct scsi_qla_host {
#define DPC_AEN 9 /* 0x00000200 */
#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
#define DPC_LINK_CHANGED 18 /* 0x00040000 */
-#define DPC_RESET_ACTIVE 20 /* 0x00040000 */
-#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/
-#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/
-#define DPC_POST_IDC_ACK 23 /* 0x00200000 */
+#define DPC_RESET_ACTIVE 20 /* 0x00100000 */
+#define DPC_HA_UNRECOVERABLE 21 /* 0x00200000 ISP-82xx only*/
+#define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/
+#define DPC_POST_IDC_ACK 23 /* 0x00800000 */
#define DPC_RESTORE_ACB 24 /* 0x01000000 */
struct Scsi_Host *host; /* pointer to host data */
@@ -768,6 +769,7 @@ struct scsi_qla_host {
uint32_t fw_dump_capture_mask;
void *fw_dump_tmplt_hdr;
uint32_t fw_dump_tmplt_size;
+ uint32_t fw_dump_skip_size;
struct completion mbx_intr_comp;
@@ -910,7 +912,8 @@ static inline int is_qla80XX(struct scsi_qla_host *ha)
static inline int is_aer_supported(struct scsi_qla_host *ha)
{
return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) ||
- (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324));
+ (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324) ||
+ (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042));
}
static inline int adapter_up(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 8d4092b33c07..209853ce0bbc 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -390,6 +390,7 @@ struct qla_flt_region {
#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
#define MBOX_CMD_CONN_OPEN 0x0074
#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
+#define DDB_NOT_LOGGED_IN 0x09
#define LOGOUT_OPTION_CLOSE_SESSION 0x0002
#define LOGOUT_OPTION_RELOGIN 0x0004
#define LOGOUT_OPTION_FREE_DDB 0x0008
@@ -505,9 +506,9 @@ struct qla_flt_region {
#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028
#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029
#define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED 0x802A
-#define MBOX_ASTS_IPV6_PREFIX_EXPIRED 0x802B
-#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
-#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
+#define MBOX_ASTS_IPV6_LINK_MTU_CHANGE 0x802B
+#define MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED 0x802C
+#define MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED 0x802D
#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
#define MBOX_ASTS_INITIALIZATION_FAILED 0x8031
#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036
@@ -528,14 +529,14 @@ struct qla_flt_region {
#define ACB_CONFIG_DISABLE 0x00
#define ACB_CONFIG_SET 0x01
-/* ACB State Defines */
-#define ACB_STATE_UNCONFIGURED 0x00
-#define ACB_STATE_INVALID 0x01
-#define ACB_STATE_ACQUIRING 0x02
-#define ACB_STATE_TENTATIVE 0x03
-#define ACB_STATE_DEPRICATED 0x04
-#define ACB_STATE_VALID 0x05
-#define ACB_STATE_DISABLING 0x06
+/* ACB/IP Address State Defines */
+#define IP_ADDRSTATE_UNCONFIGURED 0
+#define IP_ADDRSTATE_INVALID 1
+#define IP_ADDRSTATE_ACQUIRING 2
+#define IP_ADDRSTATE_TENTATIVE 3
+#define IP_ADDRSTATE_DEPRICATED 4
+#define IP_ADDRSTATE_PREFERRED 5
+#define IP_ADDRSTATE_DISABLING 6
/* FLASH offsets */
#define FLASH_SEGMENT_IFCB 0x04000000
@@ -698,14 +699,6 @@ struct addr_ctrl_blk {
uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
uint8_t ipv6_addr0_state; /* 223 */
uint8_t ipv6_addr1_state; /* 224 */
-#define IP_ADDRSTATE_UNCONFIGURED 0
-#define IP_ADDRSTATE_INVALID 1
-#define IP_ADDRSTATE_ACQUIRING 2
-#define IP_ADDRSTATE_TENTATIVE 3
-#define IP_ADDRSTATE_DEPRICATED 4
-#define IP_ADDRSTATE_PREFERRED 5
-#define IP_ADDRSTATE_DISABLING 6
-
uint8_t ipv6_dflt_rtr_state; /* 225 */
#define IPV6_RTRSTATE_UNKNOWN 0
#define IPV6_RTRSTATE_MANUAL 1
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index d67c50e0b896..b1a19cd8d5b2 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -279,6 +279,8 @@ int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha,
uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state);
int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config);
int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config);
+int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha);
+int qla4_83xx_is_detached(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 7456eeb2e58a..28fbece7e08f 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -959,13 +959,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
qla4xxx_build_ddb_list(ha, is_reset);
set_bit(AF_ONLINE, &ha->flags);
-exit_init_hba:
- if (is_qla80XX(ha) && (status == QLA_ERROR)) {
- /* Since interrupts are registered in start_firmware for
- * 80XX, release them here if initialize_adapter fails */
- qla4xxx_free_irqs(ha);
- }
+exit_init_hba:
DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
return status;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index a3c8bc7706c2..b1925d195f41 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -385,9 +385,9 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
cls_conn = ddb_entry->conn;
conn = cls_conn->dd_data;
- spin_lock(&conn->session->lock);
+ spin_lock(&conn->session->back_lock);
task = iscsi_itt_to_task(conn, itt);
- spin_unlock(&conn->session->lock);
+ spin_unlock(&conn->session->back_lock);
if (task == NULL) {
ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
@@ -635,6 +635,18 @@ static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha,
}
}
+static void qla4xxx_default_router_changed(struct scsi_qla_host *ha,
+ uint32_t *mbox_sts)
+{
+ memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0],
+ &mbox_sts[2], sizeof(uint32_t));
+ memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1],
+ &mbox_sts[3], sizeof(uint32_t));
+ memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2],
+ &mbox_sts[4], sizeof(uint32_t));
+ memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3],
+ &mbox_sts[5], sizeof(uint32_t));
+}
/**
* qla4xxx_isr_decode_mailbox - decodes mailbox status
@@ -781,27 +793,44 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
mbox_sts[3]);
/* mbox_sts[2] = Old ACB state
* mbox_sts[3] = new ACB state */
- if ((mbox_sts[3] == ACB_STATE_VALID) &&
- ((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
- (mbox_sts[2] == ACB_STATE_ACQUIRING))) {
+ if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) &&
+ ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) ||
+ (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) {
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
- } else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
- (mbox_sts[2] == ACB_STATE_VALID)) {
+ } else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) &&
+ (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) {
if (is_qla80XX(ha))
set_bit(DPC_RESET_HA_FW_CONTEXT,
&ha->dpc_flags);
else
set_bit(DPC_RESET_HA, &ha->dpc_flags);
- } else if (mbox_sts[3] == ACB_STATE_DISABLING) {
+ } else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",
ha->host_no, __func__);
- } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) {
+ } else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) {
complete(&ha->disable_acb_comp);
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",
ha->host_no, __func__);
}
break;
+ case MBOX_ASTS_IPV6_LINK_MTU_CHANGE:
+ case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED:
+ case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED:
+ /* No action */
+ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n",
+ ha->host_no, mbox_status));
+ break;
+
+ case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, IPv6 ERROR, "
+ "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5]));
+ break;
+
case MBOX_ASTS_MAC_ADDRESS_CHANGED:
case MBOX_ASTS_DNS:
/* No action */
@@ -939,6 +968,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
ha->host_no, mbox_sts[0]));
+ qla4xxx_default_router_changed(ha, mbox_sts);
break;
case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION:
@@ -1022,7 +1052,8 @@ void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status)
{
/* Process response queue interrupt. */
- if (intr_status & HSRX_RISC_IOCB_INT)
+ if ((intr_status & HSRX_RISC_IOCB_INT) &&
+ test_bit(AF_INIT_DONE, &ha->flags))
qla4xxx_process_response_queue(ha);
/* Process mailbox/asynch event interrupt.*/
@@ -1399,6 +1430,7 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
{
struct scsi_qla_host *ha = dev_id;
unsigned long flags;
+ int intr_status;
uint32_t ival = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1412,8 +1444,15 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
qla4xxx_process_response_queue(ha);
writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
} else {
- qla4xxx_process_response_queue(ha);
- writel(0, &ha->qla4_82xx_reg->host_int);
+ intr_status = readl(&ha->qla4_82xx_reg->host_status);
+ if (intr_status & HSRX_RISC_IOCB_INT) {
+ qla4xxx_process_response_queue(ha);
+ writel(0, &ha->qla4_82xx_reg->host_int);
+ } else {
+ ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n",
+ __func__);
+ goto exit_msix_rsp_q;
+ }
}
ha->isr_count++;
exit_msix_rsp_q:
@@ -1488,6 +1527,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
int qla4xxx_request_irqs(struct scsi_qla_host *ha)
{
int ret;
+ int rval = QLA_ERROR;
if (is_qla40XX(ha))
goto try_intx;
@@ -1568,9 +1608,10 @@ irq_attached:
set_bit(AF_IRQ_ATTACHED, &ha->flags);
ha->host->irq = ha->pdev->irq;
ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
- __func__, ha->pdev->irq);
+ __func__, ha->pdev->irq);
+ rval = QLA_SUCCESS;
irq_not_attached:
- return ret;
+ return rval;
}
void qla4xxx_free_irqs(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 9ae8ca3b69f9..0a6b782d6fdb 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -212,9 +212,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
ha->host_no, __func__));
goto mbox_exit;
}
- DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...,"
- " Scheduling Adapter Reset\n", ha->host_no,
- mbx_cmd[0]));
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n",
+ ha->host_no, mbx_cmd[0]);
ha->mailbox_timeout_count++;
mbx_sts[0] = (-1);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -251,15 +250,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
break;
case MBOX_STS_BUSY:
- DEBUG2( printk("scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
- ha->host_no, __func__, mbx_cmd[0]));
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
+ ha->host_no, __func__, mbx_cmd[0]);
ha->mailbox_timeout_count++;
break;
default:
- DEBUG2(printk("scsi%ld: %s: **** FAILED, cmd = %08X, "
- "sts = %08X ****\n", ha->host_no, __func__,
- mbx_cmd[0], mbx_sts[0]));
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n",
+ ha->host_no, __func__, mbx_cmd[0], mbx_sts[0],
+ mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4],
+ mbx_sts[5], mbx_sts[6], mbx_sts[7]);
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -383,7 +383,6 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
- mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN;
if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
QLA_SUCCESS) {
@@ -648,9 +647,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
goto exit_init_fw_cb;
}
- /* Initialize request and response queues. */
- qla4xxx_init_rings(ha);
-
/* Fill in the request and response queue information. */
init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
@@ -1002,6 +998,10 @@ int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
"%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
"failed sts %04X %04X", __func__,
mbox_sts[0], mbox_sts[1]));
+ if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) &&
+ (mbox_sts[1] == DDB_NOT_LOGGED_IN)) {
+ set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
+ }
}
return status;
@@ -1918,6 +1918,7 @@ int qla4xxx_disable_acb(struct scsi_qla_host *ha)
mbox_sts[0], mbox_sts[1], mbox_sts[2]));
} else {
if (is_qla8042(ha) &&
+ test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) &&
(mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {
/*
* Disable ACB mailbox command takes time to complete
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index d001202d3565..63328c812b70 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -2383,6 +2383,11 @@ static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
"scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
ha->host_no, index, entry_hdr->entry_type,
entry_hdr->d_ctrl.entry_capture_mask));
+ /* If driver encounters a new entry type that it cannot process,
+ * it should just skip the entry and adjust the total buffer size by
+ * from subtracting the skipped bytes from it
+ */
+ ha->fw_dump_skip_size += entry_hdr->entry_capture_size;
}
/* ISP83xx functions to process new minidump entries... */
@@ -2590,6 +2595,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
uint64_t now;
uint32_t timestamp;
+ ha->fw_dump_skip_size = 0;
if (!ha->fw_dump) {
ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
__func__, ha->host_no);
@@ -2761,7 +2767,7 @@ skip_nxt_entry:
entry_hdr->entry_size);
}
- if (data_collected != ha->fw_dump_size) {
+ if ((data_collected + ha->fw_dump_skip_size) != ha->fw_dump_size) {
ql4_printk(KERN_INFO, ha,
"Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
data_collected, ha->fw_dump_size);
@@ -2820,63 +2826,35 @@ void qla4_8xxx_get_minidump(struct scsi_qla_host *ha)
int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
{
int rval = QLA_ERROR;
- int i, timeout;
- uint32_t old_count, count, idc_ctrl;
- int need_reset = 0, peg_stuck = 1;
+ int i;
+ uint32_t old_count, count;
+ int need_reset = 0;
need_reset = ha->isp_ops->need_reset(ha);
- old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
-
- for (i = 0; i < 10; i++) {
- timeout = msleep_interruptible(200);
- if (timeout) {
- qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
- QLA8XXX_DEV_FAILED);
- return rval;
- }
-
- count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
- if (count != old_count)
- peg_stuck = 0;
- }
if (need_reset) {
/* We are trying to perform a recovery here. */
- if (peg_stuck)
+ if (test_bit(AF_FW_RECOVERY, &ha->flags))
ha->isp_ops->rom_lock_recovery(ha);
- goto dev_initialize;
} else {
- /* Start of day for this ha context. */
- if (peg_stuck) {
- /* Either we are the first or recovery in progress. */
- ha->isp_ops->rom_lock_recovery(ha);
- goto dev_initialize;
- } else {
- /* Firmware already running. */
- rval = QLA_SUCCESS;
- goto dev_ready;
+ old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+ for (i = 0; i < 10; i++) {
+ msleep(200);
+ count = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_PEG_ALIVE_COUNTER);
+ if (count != old_count) {
+ rval = QLA_SUCCESS;
+ goto dev_ready;
+ }
}
+ ha->isp_ops->rom_lock_recovery(ha);
}
-dev_initialize:
/* set to DEV_INITIALIZING */
ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
QLA8XXX_DEV_INITIALIZING);
- /*
- * For ISP8324 and ISP8042, if IDC_CTRL GRACEFUL_RESET_BIT1 is set,
- * reset it after device goes to INIT state.
- */
- if (is_qla8032(ha) || is_qla8042(ha)) {
- idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
- if (idc_ctrl & GRACEFUL_RESET_BIT1) {
- qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
- (idc_ctrl & ~GRACEFUL_RESET_BIT1));
- set_bit(AF_83XX_NO_FW_DUMP, &ha->flags);
- }
- }
-
ha->isp_ops->idc_unlock(ha);
if (is_qla8022(ha))
@@ -3209,6 +3187,10 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
retval = qla4_8xxx_device_state_handler(ha);
+ /* Initialize request and response queues. */
+ if (retval == QLA_SUCCESS)
+ qla4xxx_init_rings(ha);
+
if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
retval = qla4xxx_request_irqs(ha);
@@ -3836,3 +3818,24 @@ qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
msix_out:
return ret;
}
+
+int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha)
+{
+ int status = QLA_SUCCESS;
+
+ /* Dont retry adapter initialization if IRQ allocation failed */
+ if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
+ ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization as IRQs are not attached\n",
+ __func__);
+ status = QLA_ERROR;
+ goto exit_init_adapter_failure;
+ }
+
+ /* Since interrupts are registered in start_firmware for
+ * 8xxx, release them here if initialize_adapter fails
+ * and retry adapter initialization */
+ qla4xxx_free_irqs(ha);
+
+exit_init_adapter_failure:
+ return status;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c21adc338cf1..459b9f7186fd 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1670,16 +1670,13 @@ qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
if (!shost) {
ret = -ENXIO;
- printk(KERN_ERR "%s: shost is NULL\n",
- __func__);
+ pr_err("%s: shost is NULL\n", __func__);
return ERR_PTR(ret);
}
ha = iscsi_host_priv(shost);
-
ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
if (!ep) {
ret = -ENOMEM;
@@ -1699,6 +1696,9 @@ qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
(char *)&addr6->sin6_addr));
+ } else {
+ ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n",
+ __func__);
}
qla_ep->host = shost;
@@ -1712,9 +1712,9 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
struct scsi_qla_host *ha;
int ret = 0;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
qla_ep = ep->dd_data;
ha = to_qla_host(qla_ep->host);
+ DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no));
if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
ret = 1;
@@ -1724,7 +1724,13 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
{
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ struct qla_endpoint *qla_ep;
+ struct scsi_qla_host *ha;
+
+ qla_ep = ep->dd_data;
+ ha = to_qla_host(qla_ep->host);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
iscsi_destroy_endpoint(ep);
}
@@ -1734,8 +1740,11 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
{
struct qla_endpoint *qla_ep = ep->dd_data;
struct sockaddr *dst_addr;
+ struct scsi_qla_host *ha;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ ha = to_qla_host(qla_ep->host);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
switch (param) {
case ISCSI_PARAM_CONN_PORT:
@@ -1766,13 +1775,13 @@ static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
int ret;
dma_addr_t iscsi_stats_dma;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
-
cls_sess = iscsi_conn_to_session(cls_conn);
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
/* Allocate memory */
ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
@@ -2100,7 +2109,8 @@ static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE);
else
init_fw_cb->ipv6_tcp_opts &=
- cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE);
+ cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE &
+ 0xFFFF);
break;
case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
if (iface_param->iface_num & 0x1)
@@ -2297,7 +2307,8 @@ static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE);
else
init_fw_cb->ipv4_tcp_opts &=
- cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE);
+ cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE &
+ 0xFFFF);
break;
case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
if (iface_param->iface_num & 0x1)
@@ -3045,7 +3056,6 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
struct sockaddr *dst_addr;
int ret;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
if (!ep) {
printk(KERN_ERR "qla4xxx: missing ep.\n");
return NULL;
@@ -3054,6 +3064,8 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
qla_ep = ep->dd_data;
dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
ha = to_qla_host(qla_ep->host);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
ret = qla4xxx_get_ddb_index(ha, &ddb_index);
if (ret == QLA_ERROR)
@@ -3074,6 +3086,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
ddb_entry->sess = cls_sess;
ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
ddb_entry->ddb_change = qla4xxx_ddb_change;
+ clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
ha->tot_ddbs++;
@@ -3092,10 +3105,11 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
uint32_t ddb_state;
int ret;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
@@ -3123,7 +3137,8 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
destroy_session:
qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
-
+ if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags))
+ clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
spin_lock_irqsave(&ha->hardware_lock, flags);
qla4xxx_free_ddb(ha, ddb_entry);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3141,17 +3156,23 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
struct iscsi_cls_conn *cls_conn;
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
conn_idx);
- if (!cls_conn)
+ if (!cls_conn) {
+ pr_info("%s: Can not create connection for conn_idx = %u\n",
+ __func__, conn_idx);
return NULL;
+ }
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->conn = cls_conn;
+ ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__,
+ conn_idx));
return cls_conn;
}
@@ -3162,8 +3183,16 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_conn *conn;
struct qla_conn *qla_conn;
struct iscsi_endpoint *ep;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct iscsi_session *sess;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
+ cls_session->sid, cls_conn->cid));
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
@@ -3186,10 +3215,11 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
int ret = 0;
int status = QLA_SUCCESS;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
+ cls_sess->sid, cls_conn->cid));
/* Check if we have matching FW DDB, if yes then do not
* login to this target. This could cause target to logout previous
@@ -3263,10 +3293,11 @@ static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
struct ddb_entry *ddb_entry;
int options;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__,
+ cls_conn->cid));
options = LOGOUT_OPTION_CLOSE_SESSION;
if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
@@ -4372,6 +4403,11 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
uint32_t dev_state;
uint32_t idc_ctrl;
+ if (is_qla8032(ha) &&
+ (qla4_83xx_is_detached(ha) == QLA_SUCCESS))
+ WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n",
+ __func__, ha->func_num);
+
/* don't poll if reset is going on */
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
@@ -4554,11 +4590,19 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
uint32_t index = 0;
unsigned long flags;
struct scsi_cmnd *cmd;
+ unsigned long wtime;
+ uint32_t wtmo;
+
+ if (is_qla40XX(ha))
+ wtmo = WAIT_CMD_TOV;
+ else
+ wtmo = ha->nx_reset_timeout / 2;
- unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
+ wtime = jiffies + (wtmo * HZ);
- DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
- "complete\n", WAIT_CMD_TOV));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Wait up to %u seconds for cmds to complete\n",
+ wtmo));
while (!time_after_eq(jiffies, wtime)) {
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -4861,11 +4905,11 @@ chip_reset:
qla4xxx_cmd_wait(ha);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
- qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: %s - Performing chip reset..\n",
ha->host_no, __func__));
status = ha->isp_ops->reset_chip(ha);
+ qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
}
/* Flush any pending ddb changed AENs */
@@ -4881,8 +4925,21 @@ recover_ha_init_adapter:
ssleep(6);
/* NOTE: AF_ONLINE flag set upon successful completion of
- * qla4xxx_initialize_adapter */
+ * qla4xxx_initialize_adapter */
status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
+ if (is_qla80XX(ha) && (status == QLA_ERROR)) {
+ status = qla4_8xxx_check_init_adapter_retry(ha);
+ if (status == QLA_ERROR) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n",
+ ha->host_no, __func__);
+ qla4xxx_dead_adapter_cleanup(ha);
+ clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ goto exit_recover;
+ }
+ }
}
/* Retry failed adapter initialization, if necessary
@@ -5228,9 +5285,9 @@ static void qla4xxx_do_dpc(struct work_struct *work)
container_of(work, struct scsi_qla_host, dpc_work);
int status = QLA_ERROR;
- DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
- "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
- ha->host_no, __func__, ha->flags, ha->dpc_flags))
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n",
+ ha->host_no, __func__, ha->flags, ha->dpc_flags));
/* Initialization not yet finished. Don't do anything yet. */
if (!test_bit(AF_INIT_DONE, &ha->flags))
@@ -8681,11 +8738,8 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
/* Dont retry adapter initialization if IRQ allocation failed */
- if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
- ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
- __func__);
+ if (is_qla80XX(ha) && (status == QLA_ERROR))
goto skip_retry_init;
- }
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
@@ -8709,6 +8763,10 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
continue;
status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
+ if (is_qla80XX(ha) && (status == QLA_ERROR)) {
+ if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR)
+ goto skip_retry_init;
+ }
}
skip_retry_init:
@@ -8857,10 +8915,56 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
}
}
+static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ unsigned long wtime;
+ uint32_t ddb_state;
+ int options;
+ int status;
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
+ goto clear_ddb;
+ }
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto clear_ddb;
+ }
+
+ wtime = jiffies + (HZ * LOGOUT_TOV);
+ do {
+ status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (status == QLA_ERROR)
+ goto free_ddb;
+
+ if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ goto free_ddb;
+
+ schedule_timeout_uninterruptible(HZ);
+ } while ((time_after(wtime, jiffies)));
+
+free_ddb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+clear_ddb:
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+}
+
static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
{
struct ddb_entry *ddb_entry;
- int options;
int idx;
for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
@@ -8869,13 +8973,7 @@ static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
if ((ddb_entry != NULL) &&
(ddb_entry->ddb_type == FLASH_DDB)) {
- options = LOGOUT_OPTION_CLOSE_SESSION;
- if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
- == QLA_ERROR)
- ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
- __func__);
-
- qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+ qla4xxx_destroy_ddb(ha, ddb_entry);
/*
* we have decremented the reference count of the driver
* when we setup the session to have the driver unload
@@ -9136,14 +9234,15 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
int ret = SUCCESS;
int wait = 0;
- ql4_printk(KERN_INFO, ha,
- "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
- ha->host_no, id, lun, cmd);
+ ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Abort command issued cmd=%p, cdb=0x%x\n",
+ ha->host_no, id, lun, cmd, cmd->cmnd[0]);
spin_lock_irqsave(&ha->hardware_lock, flags);
srb = (struct srb *) CMD_SP(cmd);
if (!srb) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Specified command has already completed.\n",
+ ha->host_no, id, lun);
return SUCCESS;
}
kref_get(&srb->srb_ref);
@@ -9560,28 +9659,36 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
}
fn = PCI_FUNC(ha->pdev->devfn);
- while (fn > 0) {
- fn--;
- ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
- "func %x\n", ha->host_no, __func__, fn);
- /* Get the pci device given the domain, bus,
- * slot/function number */
- other_pdev =
- pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
- ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
- fn));
-
- if (!other_pdev)
- continue;
+ if (is_qla8022(ha)) {
+ while (fn > 0) {
+ fn--;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n",
+ ha->host_no, __func__, fn);
+ /* Get the pci device given the domain, bus,
+ * slot/function number */
+ other_pdev = pci_get_domain_bus_and_slot(
+ pci_domain_nr(ha->pdev->bus),
+ ha->pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
+ fn));
+
+ if (!other_pdev)
+ continue;
- if (atomic_read(&other_pdev->enable_cnt)) {
- ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
- "func in enabled state%x\n", ha->host_no,
- __func__, fn);
+ if (atomic_read(&other_pdev->enable_cnt)) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n",
+ ha->host_no, __func__, fn);
+ pci_dev_put(other_pdev);
+ break;
+ }
pci_dev_put(other_pdev);
- break;
}
- pci_dev_put(other_pdev);
+ } else {
+ /* this case is meant for ISP83xx/ISP84xx only */
+ if (qla4_83xx_can_perform_reset(ha)) {
+ /* reset fn as iSCSI is going to perform the reset */
+ fn = 0;
+ }
}
/* The first function on the card, the reset owner will
@@ -9615,6 +9722,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
if (rval != QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
"FAILED\n", ha->host_no, __func__);
+ qla4xxx_free_irqs(ha);
ha->isp_ops->idc_lock(ha);
qla4_8xxx_clear_drv_active(ha);
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
@@ -9642,6 +9750,8 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
if (rval == QLA_SUCCESS)
ha->isp_ops->enable_intrs(ha);
+ else
+ qla4xxx_free_irqs(ha);
ha->isp_ops->idc_lock(ha);
qla4_8xxx_set_drv_active(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 9b2946658683..c6ba0a6b8458 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.04.00-k3"
+#define QLA4XXX_DRIVER_VERSION "5.04.00-k4"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d8afec8317cf..88d46fe6bf98 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -91,6 +91,15 @@ EXPORT_SYMBOL(scsi_logging_level);
ASYNC_DOMAIN(scsi_sd_probe_domain);
EXPORT_SYMBOL(scsi_sd_probe_domain);
+/*
+ * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
+ * asynchronous system resume operations. It is marked 'exclusive' to avoid
+ * being included in the async_synchronize_full() that is invoked by
+ * dpm_resume()
+ */
+ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
+EXPORT_SYMBOL(scsi_sd_pm_domain);
+
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
* encouraged once assigned by ANSI/INCITS T10
@@ -161,47 +170,20 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
static DEFINE_MUTEX(host_cmd_pool_mutex);
/**
- * scsi_pool_alloc_command - internal function to get a fully allocated command
- * @pool: slab pool to allocate the command from
- * @gfp_mask: mask for the allocation
- *
- * Returns a fully allocated command (with the allied sense buffer) or
- * NULL on failure
- */
-static struct scsi_cmnd *
-scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask)
-{
- struct scsi_cmnd *cmd;
-
- cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
- if (!cmd)
- return NULL;
-
- cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
- gfp_mask | pool->gfp_mask);
- if (!cmd->sense_buffer) {
- kmem_cache_free(pool->cmd_slab, cmd);
- return NULL;
- }
-
- return cmd;
-}
-
-/**
- * scsi_pool_free_command - internal function to release a command
- * @pool: slab pool to allocate the command from
+ * scsi_host_free_command - internal function to release a command
+ * @shost: host to free the command for
* @cmd: command to release
*
* the command must previously have been allocated by
- * scsi_pool_alloc_command.
+ * scsi_host_alloc_command.
*/
static void
-scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
- struct scsi_cmnd *cmd)
+scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
+ struct scsi_host_cmd_pool *pool = shost->cmd_pool;
+
if (cmd->prot_sdb)
kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
-
kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
kmem_cache_free(pool->cmd_slab, cmd);
}
@@ -217,22 +199,32 @@ scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
static struct scsi_cmnd *
scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
{
+ struct scsi_host_cmd_pool *pool = shost->cmd_pool;
struct scsi_cmnd *cmd;
- cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
+ cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
if (!cmd)
- return NULL;
+ goto fail;
+
+ cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
+ gfp_mask | pool->gfp_mask);
+ if (!cmd->sense_buffer)
+ goto fail_free_cmd;
if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
-
- if (!cmd->prot_sdb) {
- scsi_pool_free_command(shost->cmd_pool, cmd);
- return NULL;
- }
+ if (!cmd->prot_sdb)
+ goto fail_free_sense;
}
return cmd;
+
+fail_free_sense:
+ kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
+fail_free_cmd:
+ kmem_cache_free(pool->cmd_slab, cmd);
+fail:
+ return NULL;
}
/**
@@ -284,27 +276,19 @@ EXPORT_SYMBOL_GPL(__scsi_get_command);
*/
struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
{
- struct scsi_cmnd *cmd;
+ struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
+ unsigned long flags;
- /* Bail if we can't get a reference to the device */
- if (!get_device(&dev->sdev_gendev))
+ if (unlikely(cmd == NULL))
return NULL;
- cmd = __scsi_get_command(dev->host, gfp_mask);
-
- if (likely(cmd != NULL)) {
- unsigned long flags;
-
- cmd->device = dev;
- INIT_LIST_HEAD(&cmd->list);
- INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
- spin_lock_irqsave(&dev->list_lock, flags);
- list_add_tail(&cmd->list, &dev->cmd_list);
- spin_unlock_irqrestore(&dev->list_lock, flags);
- cmd->jiffies_at_alloc = jiffies;
- } else
- put_device(&dev->sdev_gendev);
-
+ cmd->device = dev;
+ INIT_LIST_HEAD(&cmd->list);
+ INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
+ spin_lock_irqsave(&dev->list_lock, flags);
+ list_add_tail(&cmd->list, &dev->cmd_list);
+ spin_unlock_irqrestore(&dev->list_lock, flags);
+ cmd->jiffies_at_alloc = jiffies;
return cmd;
}
EXPORT_SYMBOL(scsi_get_command);
@@ -313,25 +297,22 @@ EXPORT_SYMBOL(scsi_get_command);
* __scsi_put_command - Free a struct scsi_cmnd
* @shost: dev->host
* @cmd: Command to free
- * @dev: parent scsi device
*/
-void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
- struct device *dev)
+void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
unsigned long flags;
- /* changing locks here, don't need to restore the irq state */
- spin_lock_irqsave(&shost->free_list_lock, flags);
if (unlikely(list_empty(&shost->free_list))) {
- list_add(&cmd->list, &shost->free_list);
- cmd = NULL;
+ spin_lock_irqsave(&shost->free_list_lock, flags);
+ if (list_empty(&shost->free_list)) {
+ list_add(&cmd->list, &shost->free_list);
+ cmd = NULL;
+ }
+ spin_unlock_irqrestore(&shost->free_list_lock, flags);
}
- spin_unlock_irqrestore(&shost->free_list_lock, flags);
if (likely(cmd != NULL))
- scsi_pool_free_command(shost->cmd_pool, cmd);
-
- put_device(dev);
+ scsi_host_free_command(shost, cmd);
}
EXPORT_SYMBOL(__scsi_put_command);
@@ -345,7 +326,6 @@ EXPORT_SYMBOL(__scsi_put_command);
*/
void scsi_put_command(struct scsi_cmnd *cmd)
{
- struct scsi_device *sdev = cmd->device;
unsigned long flags;
/* serious error if the command hasn't come from a device list */
@@ -356,50 +336,107 @@ void scsi_put_command(struct scsi_cmnd *cmd)
cancel_delayed_work(&cmd->abort_work);
- __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
+ __scsi_put_command(cmd->device->host, cmd);
}
EXPORT_SYMBOL(scsi_put_command);
-static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask)
+static struct scsi_host_cmd_pool *
+scsi_find_host_cmd_pool(struct Scsi_Host *shost)
+{
+ if (shost->hostt->cmd_size)
+ return shost->hostt->cmd_pool;
+ if (shost->unchecked_isa_dma)
+ return &scsi_cmd_dma_pool;
+ return &scsi_cmd_pool;
+}
+
+static void
+scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
+{
+ kfree(pool->sense_name);
+ kfree(pool->cmd_name);
+ kfree(pool);
+}
+
+static struct scsi_host_cmd_pool *
+scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
+{
+ struct scsi_host_template *hostt = shost->hostt;
+ struct scsi_host_cmd_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name);
+ pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name);
+ if (!pool->cmd_name || !pool->sense_name) {
+ scsi_free_host_cmd_pool(pool);
+ return NULL;
+ }
+
+ pool->slab_flags = SLAB_HWCACHE_ALIGN;
+ if (shost->unchecked_isa_dma) {
+ pool->slab_flags |= SLAB_CACHE_DMA;
+ pool->gfp_mask = __GFP_DMA;
+ }
+ return pool;
+}
+
+static struct scsi_host_cmd_pool *
+scsi_get_host_cmd_pool(struct Scsi_Host *shost)
{
+ struct scsi_host_template *hostt = shost->hostt;
struct scsi_host_cmd_pool *retval = NULL, *pool;
+ size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
+
/*
* Select a command slab for this host and create it if not
* yet existent.
*/
mutex_lock(&host_cmd_pool_mutex);
- pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
- &scsi_cmd_pool;
+ pool = scsi_find_host_cmd_pool(shost);
+ if (!pool) {
+ pool = scsi_alloc_host_cmd_pool(shost);
+ if (!pool)
+ goto out;
+ }
+
if (!pool->users) {
- pool->cmd_slab = kmem_cache_create(pool->cmd_name,
- sizeof(struct scsi_cmnd), 0,
+ pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
pool->slab_flags, NULL);
if (!pool->cmd_slab)
- goto fail;
+ goto out_free_pool;
pool->sense_slab = kmem_cache_create(pool->sense_name,
SCSI_SENSE_BUFFERSIZE, 0,
pool->slab_flags, NULL);
- if (!pool->sense_slab) {
- kmem_cache_destroy(pool->cmd_slab);
- goto fail;
- }
+ if (!pool->sense_slab)
+ goto out_free_slab;
}
pool->users++;
retval = pool;
- fail:
+out:
mutex_unlock(&host_cmd_pool_mutex);
return retval;
+
+out_free_slab:
+ kmem_cache_destroy(pool->cmd_slab);
+out_free_pool:
+ if (hostt->cmd_size)
+ scsi_free_host_cmd_pool(pool);
+ goto out;
}
-static void scsi_put_host_cmd_pool(gfp_t gfp_mask)
+static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
{
+ struct scsi_host_template *hostt = shost->hostt;
struct scsi_host_cmd_pool *pool;
mutex_lock(&host_cmd_pool_mutex);
- pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
- &scsi_cmd_pool;
+ pool = scsi_find_host_cmd_pool(shost);
+
/*
* This may happen if a driver has a mismatched get and put
* of the command pool; the driver should be implicated in
@@ -410,67 +447,13 @@ static void scsi_put_host_cmd_pool(gfp_t gfp_mask)
if (!--pool->users) {
kmem_cache_destroy(pool->cmd_slab);
kmem_cache_destroy(pool->sense_slab);
+ if (hostt->cmd_size)
+ scsi_free_host_cmd_pool(pool);
}
mutex_unlock(&host_cmd_pool_mutex);
}
/**
- * scsi_allocate_command - get a fully allocated SCSI command
- * @gfp_mask: allocation mask
- *
- * This function is for use outside of the normal host based pools.
- * It allocates the relevant command and takes an additional reference
- * on the pool it used. This function *must* be paired with
- * scsi_free_command which also has the identical mask, otherwise the
- * free pool counts will eventually go wrong and you'll trigger a bug.
- *
- * This function should *only* be used by drivers that need a static
- * command allocation at start of day for internal functions.
- */
-struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask)
-{
- struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
-
- if (!pool)
- return NULL;
-
- return scsi_pool_alloc_command(pool, gfp_mask);
-}
-EXPORT_SYMBOL(scsi_allocate_command);
-
-/**
- * scsi_free_command - free a command allocated by scsi_allocate_command
- * @gfp_mask: mask used in the original allocation
- * @cmd: command to free
- *
- * Note: using the original allocation mask is vital because that's
- * what determines which command pool we use to free the command. Any
- * mismatch will cause the system to BUG eventually.
- */
-void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd)
-{
- struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
-
- /*
- * this could trigger if the mask to scsi_allocate_command
- * doesn't match this mask. Otherwise we're guaranteed that this
- * succeeds because scsi_allocate_command must have taken a reference
- * on the pool
- */
- BUG_ON(!pool);
-
- scsi_pool_free_command(pool, cmd);
- /*
- * scsi_put_host_cmd_pool is called twice; once to release the
- * reference we took above, and once to release the reference
- * originally taken by scsi_allocate_command
- */
- scsi_put_host_cmd_pool(gfp_mask);
- scsi_put_host_cmd_pool(gfp_mask);
-}
-EXPORT_SYMBOL(scsi_free_command);
-
-/**
* scsi_setup_command_freelist - Setup the command freelist for a scsi host.
* @shost: host to allocate the freelist for.
*
@@ -482,14 +465,13 @@ EXPORT_SYMBOL(scsi_free_command);
*/
int scsi_setup_command_freelist(struct Scsi_Host *shost)
{
- struct scsi_cmnd *cmd;
const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
+ struct scsi_cmnd *cmd;
spin_lock_init(&shost->free_list_lock);
INIT_LIST_HEAD(&shost->free_list);
- shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask);
-
+ shost->cmd_pool = scsi_get_host_cmd_pool(shost);
if (!shost->cmd_pool)
return -ENOMEM;
@@ -498,7 +480,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
*/
cmd = scsi_host_alloc_command(shost, gfp_mask);
if (!cmd) {
- scsi_put_host_cmd_pool(gfp_mask);
+ scsi_put_host_cmd_pool(shost);
shost->cmd_pool = NULL;
return -ENOMEM;
}
@@ -524,10 +506,10 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost)
cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
list_del_init(&cmd->list);
- scsi_pool_free_command(shost->cmd_pool, cmd);
+ scsi_host_free_command(shost, cmd);
}
shost->cmd_pool = NULL;
- scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL);
+ scsi_put_host_cmd_pool(shost);
}
#ifdef CONFIG_SCSI_LOGGING
@@ -954,7 +936,7 @@ EXPORT_SYMBOL(scsi_track_queue_full);
* This is an internal helper function. You probably want to use
* scsi_get_vpd_page instead.
*
- * Returns 0 on success or a negative error number.
+ * Returns size of the vpd page on success or a negative error number.
*/
static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
u8 page, unsigned len)
@@ -962,6 +944,9 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
int result;
unsigned char cmd[16];
+ if (len < 4)
+ return -EINVAL;
+
cmd[0] = INQUIRY;
cmd[1] = 1; /* EVPD */
cmd[2] = page;
@@ -976,13 +961,13 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
len, NULL, 30 * HZ, 3, NULL);
if (result)
- return result;
+ return -EIO;
/* Sanity check that we got the page back that we asked for */
if (buffer[1] != page)
return -EIO;
- return 0;
+ return get_unaligned_be16(&buffer[2]) + 4;
}
/**
@@ -1009,18 +994,18 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
/* Ask for all the pages supported by this device */
result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
- if (result)
+ if (result < 4)
goto fail;
/* If the user actually wanted this page, we can skip the rest */
if (page == 0)
return 0;
- for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
- if (buf[i + 4] == page)
+ for (i = 4; i < min(result, buf_len); i++)
+ if (buf[i] == page)
goto found;
- if (i < buf[3] && i >= buf_len - 4)
+ if (i < result && i >= buf_len)
/* ran off the end of the buffer, give us benefit of doubt */
goto found;
/* The device claims it doesn't support the requested page */
@@ -1028,7 +1013,7 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
found:
result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
- if (result)
+ if (result < 0)
goto fail;
return 0;
@@ -1039,6 +1024,93 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
/**
+ * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
+ * @sdev: The device to ask
+ *
+ * Attach the 'Device Identification' VPD page (0x83) and the
+ * 'Unit Serial Number' VPD page (0x80) to a SCSI device
+ * structure. This information can be used to identify the device
+ * uniquely.
+ */
+void scsi_attach_vpd(struct scsi_device *sdev)
+{
+ int result, i;
+ int vpd_len = SCSI_VPD_PG_LEN;
+ int pg80_supported = 0;
+ int pg83_supported = 0;
+ unsigned char *vpd_buf;
+
+ if (sdev->skip_vpd_pages)
+ return;
+retry_pg0:
+ vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+ if (!vpd_buf)
+ return;
+
+ /* Ask for all the pages supported by this device */
+ result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
+ if (result < 0) {
+ kfree(vpd_buf);
+ return;
+ }
+ if (result > vpd_len) {
+ vpd_len = result;
+ kfree(vpd_buf);
+ goto retry_pg0;
+ }
+
+ for (i = 4; i < result; i++) {
+ if (vpd_buf[i] == 0x80)
+ pg80_supported = 1;
+ if (vpd_buf[i] == 0x83)
+ pg83_supported = 1;
+ }
+ kfree(vpd_buf);
+ vpd_len = SCSI_VPD_PG_LEN;
+
+ if (pg80_supported) {
+retry_pg80:
+ vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+ if (!vpd_buf)
+ return;
+
+ result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
+ if (result < 0) {
+ kfree(vpd_buf);
+ return;
+ }
+ if (result > vpd_len) {
+ vpd_len = result;
+ kfree(vpd_buf);
+ goto retry_pg80;
+ }
+ sdev->vpd_pg80_len = result;
+ sdev->vpd_pg80 = vpd_buf;
+ vpd_len = SCSI_VPD_PG_LEN;
+ }
+
+ if (pg83_supported) {
+retry_pg83:
+ vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+ if (!vpd_buf)
+ return;
+
+ result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
+ if (result < 0) {
+ kfree(vpd_buf);
+ return;
+ }
+ if (result > vpd_len) {
+ vpd_len = result;
+ kfree(vpd_buf);
+ goto retry_pg83;
+ }
+ sdev->vpd_pg83_len = result;
+ sdev->vpd_pg83 = vpd_buf;
+ }
+}
+
+/**
* scsi_report_opcode - Find out if a given command opcode is supported
* @sdev: scsi device to query
* @buffer: scratch buffer (must be at least 20 bytes long)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 2decc6417518..f3e9cc038d1d 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -64,6 +64,7 @@ static const char * scsi_debug_version_date = "20100324";
/* Additional Sense Code (ASC) */
#define NO_ADDITIONAL_SENSE 0x0
#define LOGICAL_UNIT_NOT_READY 0x4
+#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
#define UNRECOVERED_READ_ERR 0x11
#define PARAMETER_LIST_LENGTH_ERR 0x1a
#define INVALID_OPCODE 0x20
@@ -195,6 +196,7 @@ static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
static bool scsi_debug_removable = DEF_REMOVABLE;
+static bool scsi_debug_clustering;
static int scsi_debug_cmnd_count = 0;
@@ -1780,7 +1782,6 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
be32_to_cpu(sdt->ref_tag) != ei_lba) {
pr_err("%s: REF check failed on sector %lu\n",
__func__, (unsigned long)sector);
- dif_errors++;
return 0x03;
}
return 0;
@@ -1789,23 +1790,27 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
unsigned int sectors, bool read)
{
- unsigned int i, resid;
- struct scatterlist *psgl;
+ size_t resid;
void *paddr;
const void *dif_store_end = dif_storep + sdebug_store_sectors;
+ struct sg_mapping_iter miter;
/* Bytes of protection data to copy into sgl */
resid = sectors * sizeof(*dif_storep);
- scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
- int len = min(psgl->length, resid);
+ sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
+ scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
+ (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
+
+ while (sg_miter_next(&miter) && resid > 0) {
+ size_t len = min(miter.length, resid);
void *start = dif_store(sector);
- int rest = 0;
+ size_t rest = 0;
if (dif_store_end < start + len)
rest = start + len - dif_store_end;
- paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
+ paddr = miter.addr;
if (read)
memcpy(paddr, start, len - rest);
@@ -1821,8 +1826,8 @@ static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
sector += len / sizeof(*dif_storep);
resid -= len;
- kunmap_atomic(paddr);
}
+ sg_miter_stop(&miter);
}
static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
@@ -1832,7 +1837,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
struct sd_dif_tuple *sdt;
sector_t sector;
- for (i = 0; i < sectors; i++) {
+ for (i = 0; i < sectors; i++, ei_lba++) {
int ret;
sector = start_sec + i;
@@ -1846,8 +1851,6 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
dif_errors++;
return ret;
}
-
- ei_lba++;
}
dif_copy_prot(SCpnt, start_sec, sectors, true);
@@ -1886,17 +1889,19 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
return check_condition_result;
}
+ read_lock_irqsave(&atomic_rw, iflags);
+
/* DIX + T10 DIF */
if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
if (prot_ret) {
+ read_unlock_irqrestore(&atomic_rw, iflags);
mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
return illegal_condition_result;
}
}
- read_lock_irqsave(&atomic_rw, iflags);
ret = do_device_access(SCpnt, devip, lba, num, 0);
read_unlock_irqrestore(&atomic_rw, iflags);
if (ret == -1)
@@ -1931,55 +1936,62 @@ void dump_sector(unsigned char *buf, int len)
static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
- int i, j, ret;
+ int ret;
struct sd_dif_tuple *sdt;
- struct scatterlist *dsgl;
- struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
- void *daddr, *paddr;
+ void *daddr;
sector_t sector = start_sec;
int ppage_offset;
+ int dpage_offset;
+ struct sg_mapping_iter diter;
+ struct sg_mapping_iter piter;
BUG_ON(scsi_sg_count(SCpnt) == 0);
BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
- ppage_offset = 0;
-
- /* For each data page */
- scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
- daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
- paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
-
- /* For each sector-sized chunk in data page */
- for (j = 0; j < dsgl->length; j += scsi_debug_sector_size) {
+ sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
+ scsi_prot_sg_count(SCpnt),
+ SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+ sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
+ SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+
+ /* For each protection page */
+ while (sg_miter_next(&piter)) {
+ dpage_offset = 0;
+ if (WARN_ON(!sg_miter_next(&diter))) {
+ ret = 0x01;
+ goto out;
+ }
+ for (ppage_offset = 0; ppage_offset < piter.length;
+ ppage_offset += sizeof(struct sd_dif_tuple)) {
/* If we're at the end of the current
- * protection page advance to the next one
+ * data page advance to the next one
*/
- if (ppage_offset >= psgl->length) {
- kunmap_atomic(paddr);
- psgl = sg_next(psgl);
- BUG_ON(psgl == NULL);
- paddr = kmap_atomic(sg_page(psgl))
- + psgl->offset;
- ppage_offset = 0;
+ if (dpage_offset >= diter.length) {
+ if (WARN_ON(!sg_miter_next(&diter))) {
+ ret = 0x01;
+ goto out;
+ }
+ dpage_offset = 0;
}
- sdt = paddr + ppage_offset;
+ sdt = piter.addr + ppage_offset;
+ daddr = diter.addr + dpage_offset;
- ret = dif_verify(sdt, daddr + j, sector, ei_lba);
+ ret = dif_verify(sdt, daddr, sector, ei_lba);
if (ret) {
- dump_sector(daddr + j, scsi_debug_sector_size);
+ dump_sector(daddr, scsi_debug_sector_size);
goto out;
}
sector++;
ei_lba++;
- ppage_offset += sizeof(struct sd_dif_tuple);
+ dpage_offset += scsi_debug_sector_size;
}
-
- kunmap_atomic(paddr);
- kunmap_atomic(daddr);
+ diter.consumed = dpage_offset;
+ sg_miter_stop(&diter);
}
+ sg_miter_stop(&piter);
dif_copy_prot(SCpnt, start_sec, sectors, false);
dix_writes++;
@@ -1988,8 +2000,8 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
out:
dif_errors++;
- kunmap_atomic(paddr);
- kunmap_atomic(daddr);
+ sg_miter_stop(&diter);
+ sg_miter_stop(&piter);
return ret;
}
@@ -2089,17 +2101,19 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
if (ret)
return ret;
+ write_lock_irqsave(&atomic_rw, iflags);
+
/* DIX + T10 DIF */
if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
if (prot_ret) {
+ write_unlock_irqrestore(&atomic_rw, iflags);
mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
return illegal_condition_result;
}
}
- write_lock_irqsave(&atomic_rw, iflags);
ret = do_device_access(SCpnt, devip, lba, num, 1);
if (scsi_debug_lbp())
map_region(lba, num);
@@ -2178,6 +2192,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
struct unmap_block_desc *desc;
unsigned int i, payload_len, descriptors;
int ret;
+ unsigned long iflags;
ret = check_readiness(scmd, 1, devip);
if (ret)
@@ -2199,6 +2214,8 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
desc = (void *)&buf[8];
+ write_lock_irqsave(&atomic_rw, iflags);
+
for (i = 0 ; i < descriptors ; i++) {
unsigned long long lba = get_unaligned_be64(&desc[i].lba);
unsigned int num = get_unaligned_be32(&desc[i].blocks);
@@ -2213,6 +2230,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
ret = 0;
out:
+ write_unlock_irqrestore(&atomic_rw, iflags);
kfree(buf);
return ret;
@@ -2313,36 +2331,37 @@ static int resp_report_luns(struct scsi_cmnd * scp,
static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
unsigned int num, struct sdebug_dev_info *devip)
{
- int i, j, ret = -1;
+ int j;
unsigned char *kaddr, *buf;
unsigned int offset;
- struct scatterlist *sg;
struct scsi_data_buffer *sdb = scsi_in(scp);
+ struct sg_mapping_iter miter;
/* better not to use temporary buffer. */
buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
- if (!buf)
- return ret;
+ if (!buf) {
+ mk_sense_buffer(devip, NOT_READY,
+ LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+ return check_condition_result;
+ }
scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
offset = 0;
- for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
- kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
- if (!kaddr)
- goto out;
+ sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
+ SG_MITER_ATOMIC | SG_MITER_TO_SG);
- for (j = 0; j < sg->length; j++)
- *(kaddr + sg->offset + j) ^= *(buf + offset + j);
+ while (sg_miter_next(&miter)) {
+ kaddr = miter.addr;
+ for (j = 0; j < miter.length; j++)
+ *(kaddr + j) ^= *(buf + offset + j);
- offset += sg->length;
- kunmap_atomic(kaddr);
+ offset += miter.length;
}
- ret = 0;
-out:
+ sg_miter_stop(&miter);
kfree(buf);
- return ret;
+ return 0;
}
/* When timer goes off this function is called. */
@@ -2744,6 +2763,7 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
*/
module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
+module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
@@ -2787,6 +2807,7 @@ MODULE_VERSION(SCSI_DEBUG_VERSION);
MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
@@ -3248,7 +3269,7 @@ static struct attribute *sdebug_drv_attrs[] = {
};
ATTRIBUTE_GROUPS(sdebug_drv);
-struct device *pseudo_primary;
+static struct device *pseudo_primary;
static int __init scsi_debug_init(void)
{
@@ -3934,6 +3955,8 @@ static int sdebug_driver_probe(struct device * dev)
sdbg_host = to_sdebug_host(dev);
sdebug_driver_template.can_queue = scsi_debug_max_queue;
+ if (scsi_debug_clustering)
+ sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
if (NULL == hpnt) {
printk(KERN_ERR "%s: scsi_register failed\n", __func__);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 78b004da2885..771c16bfdbac 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2288,6 +2288,11 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
if (scsi_autopm_get_host(shost) < 0)
return FAILED;
+ if (!get_device(&dev->sdev_gendev)) {
+ rtn = FAILED;
+ goto out_put_autopm_host;
+ }
+
scmd = scsi_get_command(dev, GFP_KERNEL);
blk_rq_init(NULL, &req);
scmd->request = &req;
@@ -2345,6 +2350,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
scsi_run_host_queues(shost);
scsi_next_command(scmd);
+out_put_autopm_host:
scsi_autopm_put_host(shost);
return rtn;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62ec84b42e31..65a123d9c676 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -75,28 +75,6 @@ struct kmem_cache *scsi_sdb_cache;
*/
#define SCSI_QUEUE_DELAY 3
-/*
- * Function: scsi_unprep_request()
- *
- * Purpose: Remove all preparation done for a request, including its
- * associated scsi_cmnd, so that it can be requeued.
- *
- * Arguments: req - request to unprepare
- *
- * Lock status: Assumed that no locks are held upon entry.
- *
- * Returns: Nothing.
- */
-static void scsi_unprep_request(struct request *req)
-{
- struct scsi_cmnd *cmd = req->special;
-
- blk_unprep_request(req);
- req->special = NULL;
-
- scsi_put_command(cmd);
-}
-
/**
* __scsi_queue_insert - private queue insertion
* @cmd: The SCSI command being requeued
@@ -206,7 +184,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
*/
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
- unsigned char *sense, int timeout, int retries, int flags,
+ unsigned char *sense, int timeout, int retries, u64 flags,
int *resid)
{
struct request *req;
@@ -257,7 +235,7 @@ EXPORT_SYMBOL(scsi_execute);
int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
struct scsi_sense_hdr *sshdr, int timeout, int retries,
- int *resid, int flags)
+ int *resid, u64 flags)
{
char *sense = NULL;
int result;
@@ -385,29 +363,12 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost)
return 0;
}
-/*
- * Function: scsi_run_queue()
- *
- * Purpose: Select a proper request queue to serve next
- *
- * Arguments: q - last request's queue
- *
- * Returns: Nothing
- *
- * Notes: The previous command was completely finished, start
- * a new one if possible.
- */
-static void scsi_run_queue(struct request_queue *q)
+static void scsi_starved_list_run(struct Scsi_Host *shost)
{
- struct scsi_device *sdev = q->queuedata;
- struct Scsi_Host *shost;
LIST_HEAD(starved_list);
+ struct scsi_device *sdev;
unsigned long flags;
- shost = sdev->host;
- if (scsi_target(sdev)->single_lun)
- scsi_single_lun_run(sdev);
-
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->starved_list, &starved_list);
@@ -459,6 +420,28 @@ static void scsi_run_queue(struct request_queue *q)
/* put any unprocessed entries back */
list_splice(&starved_list, &shost->starved_list);
spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/*
+ * Function: scsi_run_queue()
+ *
+ * Purpose: Select a proper request queue to serve next
+ *
+ * Arguments: q - last request's queue
+ *
+ * Returns: Nothing
+ *
+ * Notes: The previous command was completely finished, start
+ * a new one if possible.
+ */
+static void scsi_run_queue(struct request_queue *q)
+{
+ struct scsi_device *sdev = q->queuedata;
+
+ if (scsi_target(sdev)->single_lun)
+ scsi_single_lun_run(sdev);
+ if (!list_empty(&sdev->host->starved_list))
+ scsi_starved_list_run(sdev->host);
blk_run_queue(q);
}
@@ -497,16 +480,10 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
struct request *req = cmd->request;
unsigned long flags;
- /*
- * We need to hold a reference on the device to avoid the queue being
- * killed after the unlock and before scsi_run_queue is invoked which
- * may happen because scsi_unprep_request() puts the command which
- * releases its reference on the device.
- */
- get_device(&sdev->sdev_gendev);
-
spin_lock_irqsave(q->queue_lock, flags);
- scsi_unprep_request(req);
+ blk_unprep_request(req);
+ req->special = NULL;
+ scsi_put_command(cmd);
blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
@@ -520,13 +497,9 @@ void scsi_next_command(struct scsi_cmnd *cmd)
struct scsi_device *sdev = cmd->device;
struct request_queue *q = sdev->request_queue;
- /* need to hold a reference on the device before we let go of the cmd */
- get_device(&sdev->sdev_gendev);
-
scsi_put_command(cmd);
scsi_run_queue(q);
- /* ok to remove device now */
put_device(&sdev->sdev_gendev);
}
@@ -788,6 +761,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
char *description = NULL;
+ unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
if (result) {
sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
@@ -989,6 +963,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
action = ACTION_FAIL;
}
+ if (action != ACTION_FAIL &&
+ time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+ action = ACTION_FAIL;
+ description = "Command timed out";
+ }
+
switch (action) {
case ACTION_FAIL:
/* Give up and fail the remainder of the request */
@@ -1111,6 +1091,7 @@ err_exit:
scsi_release_buffers(cmd);
cmd->request->special = NULL;
scsi_put_command(cmd);
+ put_device(&cmd->device->sdev_gendev);
return error;
}
EXPORT_SYMBOL(scsi_init_io);
@@ -1121,9 +1102,15 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
struct scsi_cmnd *cmd;
if (!req->special) {
+ /* Bail if we can't get a reference to the device */
+ if (!get_device(&sdev->sdev_gendev))
+ return NULL;
+
cmd = scsi_get_command(sdev, GFP_ATOMIC);
- if (unlikely(!cmd))
+ if (unlikely(!cmd)) {
+ put_device(&sdev->sdev_gendev);
return NULL;
+ }
req->special = cmd;
} else {
cmd = req->special;
@@ -1286,6 +1273,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
struct scsi_cmnd *cmd = req->special;
scsi_release_buffers(cmd);
scsi_put_command(cmd);
+ put_device(&cmd->device->sdev_gendev);
req->special = NULL;
}
break;
@@ -1543,16 +1531,14 @@ static void scsi_softirq_done(struct request *rq)
* Lock status: IO request lock assumed to be held when called.
*/
static void scsi_request_fn(struct request_queue *q)
+ __releases(q->queue_lock)
+ __acquires(q->queue_lock)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
struct scsi_cmnd *cmd;
struct request *req;
- if(!get_device(&sdev->sdev_gendev))
- /* We must be tearing the block queue down already */
- return;
-
/*
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
@@ -1641,7 +1627,7 @@ static void scsi_request_fn(struct request_queue *q)
goto out_delay;
}
- goto out;
+ return;
not_ready:
spin_unlock_irq(shost->host_lock);
@@ -1660,12 +1646,6 @@ static void scsi_request_fn(struct request_queue *q)
out_delay:
if (sdev->device_busy == 0)
blk_delay_queue(q, SCSI_QUEUE_DELAY);
-out:
- /* must be careful here...if we trigger the ->remove() function
- * we cannot be holding the q lock */
- spin_unlock_irq(q->queue_lock);
- put_device(&sdev->sdev_gendev);
- spin_lock_irq(q->queue_lock);
}
u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 001e9ceda4c3..7454498c4091 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -18,35 +18,77 @@
#ifdef CONFIG_PM_SLEEP
-static int scsi_dev_type_suspend(struct device *dev, int (*cb)(struct device *))
+static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
{
+ return pm && pm->suspend ? pm->suspend(dev) : 0;
+}
+
+static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->freeze ? pm->freeze(dev) : 0;
+}
+
+static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+}
+
+static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->resume ? pm->resume(dev) : 0;
+}
+
+static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->thaw ? pm->thaw(dev) : 0;
+}
+
+static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->restore ? pm->restore(dev) : 0;
+}
+
+static int scsi_dev_type_suspend(struct device *dev,
+ int (*cb)(struct device *, const struct dev_pm_ops *))
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err;
+ /* flush pending in-flight resume operations, suspend is synchronous */
+ async_synchronize_full_domain(&scsi_sd_pm_domain);
+
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
- if (cb) {
- err = cb(dev);
- if (err)
- scsi_device_resume(to_scsi_device(dev));
- }
+ err = cb(dev, pm);
+ if (err)
+ scsi_device_resume(to_scsi_device(dev));
}
dev_dbg(dev, "scsi suspend: %d\n", err);
return err;
}
-static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *))
+static int scsi_dev_type_resume(struct device *dev,
+ int (*cb)(struct device *, const struct dev_pm_ops *))
{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err = 0;
- if (cb)
- err = cb(dev);
+ err = cb(dev, pm);
scsi_device_resume(to_scsi_device(dev));
dev_dbg(dev, "scsi resume: %d\n", err);
+
+ if (err == 0) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
return err;
}
static int
-scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
+scsi_bus_suspend_common(struct device *dev,
+ int (*cb)(struct device *, const struct dev_pm_ops *))
{
int err = 0;
@@ -66,20 +108,54 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
return err;
}
-static int
-scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *))
+static void async_sdev_resume(void *dev, async_cookie_t cookie)
{
- int err = 0;
+ scsi_dev_type_resume(dev, do_scsi_resume);
+}
- if (scsi_is_sdev_device(dev))
- err = scsi_dev_type_resume(dev, cb);
+static void async_sdev_thaw(void *dev, async_cookie_t cookie)
+{
+ scsi_dev_type_resume(dev, do_scsi_thaw);
+}
- if (err == 0) {
+static void async_sdev_restore(void *dev, async_cookie_t cookie)
+{
+ scsi_dev_type_resume(dev, do_scsi_restore);
+}
+
+static int scsi_bus_resume_common(struct device *dev,
+ int (*cb)(struct device *, const struct dev_pm_ops *))
+{
+ async_func_t fn;
+
+ if (!scsi_is_sdev_device(dev))
+ fn = NULL;
+ else if (cb == do_scsi_resume)
+ fn = async_sdev_resume;
+ else if (cb == do_scsi_thaw)
+ fn = async_sdev_thaw;
+ else if (cb == do_scsi_restore)
+ fn = async_sdev_restore;
+ else
+ fn = NULL;
+
+ if (fn) {
+ async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
+
+ /*
+ * If a user has disabled async probing a likely reason
+ * is due to a storage enclosure that does not inject
+ * staggered spin-ups. For safety, make resume
+ * synchronous as well in that case.
+ */
+ if (strncmp(scsi_scan_type, "async", 5) != 0)
+ async_synchronize_full_domain(&scsi_sd_pm_domain);
+ } else {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
- return err;
+ return 0;
}
static int scsi_bus_prepare(struct device *dev)
@@ -97,38 +173,32 @@ static int scsi_bus_prepare(struct device *dev)
static int scsi_bus_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- return scsi_bus_suspend_common(dev, pm ? pm->suspend : NULL);
+ return scsi_bus_suspend_common(dev, do_scsi_suspend);
}
static int scsi_bus_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- return scsi_bus_resume_common(dev, pm ? pm->resume : NULL);
+ return scsi_bus_resume_common(dev, do_scsi_resume);
}
static int scsi_bus_freeze(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- return scsi_bus_suspend_common(dev, pm ? pm->freeze : NULL);
+ return scsi_bus_suspend_common(dev, do_scsi_freeze);
}
static int scsi_bus_thaw(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- return scsi_bus_resume_common(dev, pm ? pm->thaw : NULL);
+ return scsi_bus_resume_common(dev, do_scsi_thaw);
}
static int scsi_bus_poweroff(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- return scsi_bus_suspend_common(dev, pm ? pm->poweroff : NULL);
+ return scsi_bus_suspend_common(dev, do_scsi_poweroff);
}
static int scsi_bus_restore(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- return scsi_bus_resume_common(dev, pm ? pm->restore : NULL);
+ return scsi_bus_resume_common(dev, do_scsi_restore);
}
#else /* CONFIG_PM_SLEEP */
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index f079a598bed4..48e5b657e79f 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -112,6 +112,7 @@ extern void scsi_exit_procfs(void);
#endif /* CONFIG_PROC_FS */
/* scsi_scan.c */
+extern char scsi_scan_type[];
extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, unsigned int, int);
@@ -166,6 +167,7 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
#endif /* CONFIG_PM_RUNTIME */
+extern struct async_domain scsi_sd_pm_domain;
extern struct async_domain scsi_sd_probe_domain;
/*
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 307a81137607..e02b3aab56ce 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns,
#define SCSI_SCAN_TYPE_DEFAULT "sync"
#endif
-static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
+char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
MODULE_PARM_DESC(scan, "sync, async or none");
@@ -320,6 +320,7 @@ static void scsi_target_destroy(struct scsi_target *starget)
struct Scsi_Host *shost = dev_to_shost(dev->parent);
unsigned long flags;
+ starget->state = STARGET_DEL;
transport_destroy_device(dev);
spin_lock_irqsave(shost->host_lock, flags);
if (shost->hostt->target_destroy)
@@ -371,6 +372,37 @@ static struct scsi_target *__scsi_find_target(struct device *parent,
}
/**
+ * scsi_target_reap_ref_release - remove target from visibility
+ * @kref: the reap_ref in the target being released
+ *
+ * Called on last put of reap_ref, which is the indication that no device
+ * under this target is visible anymore, so render the target invisible in
+ * sysfs. Note: we have to be in user context here because the target reaps
+ * should be done in places where the scsi device visibility is being removed.
+ */
+static void scsi_target_reap_ref_release(struct kref *kref)
+{
+ struct scsi_target *starget
+ = container_of(kref, struct scsi_target, reap_ref);
+
+ /*
+ * if we get here and the target is still in the CREATED state that
+ * means it was allocated but never made visible (because a scan
+ * turned up no LUNs), so don't call device_del() on it.
+ */
+ if (starget->state != STARGET_CREATED) {
+ transport_remove_device(&starget->dev);
+ device_del(&starget->dev);
+ }
+ scsi_target_destroy(starget);
+}
+
+static void scsi_target_reap_ref_put(struct scsi_target *starget)
+{
+ kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
+}
+
+/**
* scsi_alloc_target - allocate a new or find an existing target
* @parent: parent of the target (need not be a scsi host)
* @channel: target channel number (zero if no channels)
@@ -392,7 +424,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
+ shost->transportt->target_size;
struct scsi_target *starget;
struct scsi_target *found_target;
- int error;
+ int error, ref_got;
starget = kzalloc(size, GFP_KERNEL);
if (!starget) {
@@ -401,7 +433,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
}
dev = &starget->dev;
device_initialize(dev);
- starget->reap_ref = 1;
+ kref_init(&starget->reap_ref);
dev->parent = get_device(parent);
dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
dev->bus = &scsi_bus_type;
@@ -441,29 +473,36 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
return starget;
found:
- found_target->reap_ref++;
+ /*
+ * release routine already fired if kref is zero, so if we can still
+ * take the reference, the target must be alive. If we can't, it must
+ * be dying and we need to wait for a new target
+ */
+ ref_got = kref_get_unless_zero(&found_target->reap_ref);
+
spin_unlock_irqrestore(shost->host_lock, flags);
- if (found_target->state != STARGET_DEL) {
+ if (ref_got) {
put_device(dev);
return found_target;
}
- /* Unfortunately, we found a dying target; need to
- * wait until it's dead before we can get a new one */
+ /*
+ * Unfortunately, we found a dying target; need to wait until it's
+ * dead before we can get a new one. There is an anomaly here. We
+ * *should* call scsi_target_reap() to balance the kref_get() of the
+ * reap_ref above. However, since the target being released, it's
+ * already invisible and the reap_ref is irrelevant. If we call
+ * scsi_target_reap() we might spuriously do another device_del() on
+ * an already invisible target.
+ */
put_device(&found_target->dev);
- flush_scheduled_work();
+ /*
+ * length of time is irrelevant here, we just want to yield the CPU
+ * for a tick to avoid busy waiting for the target to die.
+ */
+ msleep(1);
goto retry;
}
-static void scsi_target_reap_usercontext(struct work_struct *work)
-{
- struct scsi_target *starget =
- container_of(work, struct scsi_target, ew.work);
-
- transport_remove_device(&starget->dev);
- device_del(&starget->dev);
- scsi_target_destroy(starget);
-}
-
/**
* scsi_target_reap - check to see if target is in use and destroy if not
* @starget: target to be checked
@@ -474,28 +513,13 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
*/
void scsi_target_reap(struct scsi_target *starget)
{
- struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
- unsigned long flags;
- enum scsi_target_state state;
- int empty = 0;
-
- spin_lock_irqsave(shost->host_lock, flags);
- state = starget->state;
- if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
- empty = 1;
- starget->state = STARGET_DEL;
- }
- spin_unlock_irqrestore(shost->host_lock, flags);
-
- if (!empty)
- return;
-
- BUG_ON(state == STARGET_DEL);
- if (state == STARGET_CREATED)
- scsi_target_destroy(starget);
- else
- execute_in_process_context(scsi_target_reap_usercontext,
- &starget->ew);
+ /*
+ * serious problem if this triggers: STARGET_DEL is only set in the if
+ * the reap_ref drops to zero, so we're trying to do another final put
+ * on an already released kref
+ */
+ BUG_ON(starget->state == STARGET_DEL);
+ scsi_target_reap_ref_put(starget);
}
/**
@@ -946,6 +970,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
}
+ if (sdev->scsi_level >= SCSI_3)
+ scsi_attach_vpd(sdev);
+
sdev->max_queue_depth = sdev->queue_depth;
/*
@@ -1532,6 +1559,10 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
}
mutex_unlock(&shost->scan_mutex);
scsi_autopm_put_target(starget);
+ /*
+ * paired with scsi_alloc_target(). Target will be destroyed unless
+ * scsi_probe_and_add_lun made an underlying device visible
+ */
scsi_target_reap(starget);
put_device(&starget->dev);
@@ -1612,8 +1643,10 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
out_reap:
scsi_autopm_put_target(starget);
- /* now determine if the target has any children at all
- * and if not, nuke it */
+ /*
+ * paired with scsi_alloc_target(): determine if the target has
+ * any children at all and if not, nuke it
+ */
scsi_target_reap(starget);
put_device(&starget->dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 9117d0bf408e..074e8cc30955 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -300,7 +300,9 @@ store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
int ret = -EINVAL;
unsigned long deadline, flags;
- if (shost->transportt && shost->transportt->eh_strategy_handler)
+ if (shost->transportt &&
+ (shost->transportt->eh_strategy_handler ||
+ !shost->hostt->eh_host_reset_handler))
return ret;
if (!strncmp(buf, "off", strlen("off")))
@@ -383,17 +385,14 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
{
struct scsi_device *sdev;
struct device *parent;
- struct scsi_target *starget;
struct list_head *this, *tmp;
unsigned long flags;
sdev = container_of(work, struct scsi_device, ew.work);
parent = sdev->sdev_gendev.parent;
- starget = to_scsi_target(parent);
spin_lock_irqsave(sdev->host->host_lock, flags);
- starget->reap_ref++;
list_del(&sdev->siblings);
list_del(&sdev->same_target_siblings);
list_del(&sdev->starved_entry);
@@ -413,8 +412,8 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
/* NULL queue means the device can't be used */
sdev->request_queue = NULL;
- scsi_target_reap(scsi_target(sdev));
-
+ kfree(sdev->vpd_pg83);
+ kfree(sdev->vpd_pg80);
kfree(sdev->inquiry);
kfree(sdev);
@@ -579,7 +578,6 @@ static int scsi_sdev_check_buf_bit(const char *buf)
* Create the actual show/store functions and data structures.
*/
sdev_rd_attr (device_blocked, "%d\n");
-sdev_rd_attr (queue_depth, "%d\n");
sdev_rd_attr (device_busy, "%d\n");
sdev_rd_attr (type, "%d\n");
sdev_rd_attr (scsi_level, "%d\n");
@@ -649,23 +647,12 @@ store_rescan_field (struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
-static void sdev_store_delete_callback(struct device *dev)
-{
- scsi_remove_device(to_scsi_device(dev));
-}
-
static ssize_t
sdev_store_delete(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int rc;
-
- /* An attribute cannot be unregistered by one of its own methods,
- * so we have to use this roundabout approach.
- */
- rc = device_schedule_callback(dev, sdev_store_delete_callback);
- if (rc)
- count = rc;
+ if (device_remove_file_self(dev, attr))
+ scsi_remove_device(to_scsi_device(dev));
return count;
};
static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
@@ -723,10 +710,64 @@ show_queue_type_field(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 20, "%s\n", name);
}
-static DEVICE_ATTR(queue_type, S_IRUGO, show_queue_type_field, NULL);
+static ssize_t
+store_queue_type_field(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct scsi_host_template *sht = sdev->host->hostt;
+ int tag_type = 0, retval;
+ int prev_tag_type = scsi_get_tag_type(sdev);
+
+ if (!sdev->tagged_supported || !sht->change_queue_type)
+ return -EINVAL;
+
+ if (strncmp(buf, "ordered", 7) == 0)
+ tag_type = MSG_ORDERED_TAG;
+ else if (strncmp(buf, "simple", 6) == 0)
+ tag_type = MSG_SIMPLE_TAG;
+ else if (strncmp(buf, "none", 4) != 0)
+ return -EINVAL;
+
+ if (tag_type == prev_tag_type)
+ return count;
+
+ retval = sht->change_queue_type(sdev, tag_type);
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
+ store_queue_type_field);
+
+#define sdev_vpd_pg_attr(_page) \
+static ssize_t \
+show_vpd_##_page(struct file *filp, struct kobject *kobj, \
+ struct bin_attribute *bin_attr, \
+ char *buf, loff_t off, size_t count) \
+{ \
+ struct device *dev = container_of(kobj, struct device, kobj); \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+ if (!sdev->vpd_##_page) \
+ return -EINVAL; \
+ return memory_read_from_buffer(buf, count, &off, \
+ sdev->vpd_##_page, \
+ sdev->vpd_##_page##_len); \
+} \
+static struct bin_attribute dev_attr_vpd_##_page = { \
+ .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \
+ .size = 0, \
+ .read = show_vpd_##_page, \
+};
+
+sdev_vpd_pg_attr(pg83);
+sdev_vpd_pg_attr(pg80);
static ssize_t
-show_iostat_counterbits(struct device *dev, struct device_attribute *attr, char *buf)
+show_iostat_counterbits(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);
}
@@ -797,46 +838,9 @@ DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED)
DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED)
DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED)
-/* Default template for device attributes. May NOT be modified */
-static struct attribute *scsi_sdev_attrs[] = {
- &dev_attr_device_blocked.attr,
- &dev_attr_type.attr,
- &dev_attr_scsi_level.attr,
- &dev_attr_device_busy.attr,
- &dev_attr_vendor.attr,
- &dev_attr_model.attr,
- &dev_attr_rev.attr,
- &dev_attr_rescan.attr,
- &dev_attr_delete.attr,
- &dev_attr_state.attr,
- &dev_attr_timeout.attr,
- &dev_attr_eh_timeout.attr,
- &dev_attr_iocounterbits.attr,
- &dev_attr_iorequest_cnt.attr,
- &dev_attr_iodone_cnt.attr,
- &dev_attr_ioerr_cnt.attr,
- &dev_attr_modalias.attr,
- REF_EVT(media_change),
- REF_EVT(inquiry_change_reported),
- REF_EVT(capacity_change_reported),
- REF_EVT(soft_threshold_reached),
- REF_EVT(mode_parameter_change_reported),
- REF_EVT(lun_change_reported),
- NULL
-};
-
-static struct attribute_group scsi_sdev_attr_group = {
- .attrs = scsi_sdev_attrs,
-};
-
-static const struct attribute_group *scsi_sdev_attr_groups[] = {
- &scsi_sdev_attr_group,
- NULL
-};
-
static ssize_t
-sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int depth, retval;
struct scsi_device *sdev = to_scsi_device(dev);
@@ -859,10 +863,10 @@ sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,
return count;
}
+sdev_show_function(queue_depth, "%d\n");
-static struct device_attribute sdev_attr_queue_depth_rw =
- __ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
- sdev_store_queue_depth_rw);
+static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
+ sdev_store_queue_depth);
static ssize_t
sdev_show_queue_ramp_up_period(struct device *dev,
@@ -890,40 +894,79 @@ sdev_store_queue_ramp_up_period(struct device *dev,
return period;
}
-static struct device_attribute sdev_attr_queue_ramp_up_period =
- __ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
- sdev_show_queue_ramp_up_period,
- sdev_store_queue_ramp_up_period);
+static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
+ sdev_show_queue_ramp_up_period,
+ sdev_store_queue_ramp_up_period);
-static ssize_t
-sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
{
+ struct device *dev = container_of(kobj, struct device, kobj);
struct scsi_device *sdev = to_scsi_device(dev);
- struct scsi_host_template *sht = sdev->host->hostt;
- int tag_type = 0, retval;
- int prev_tag_type = scsi_get_tag_type(sdev);
- if (!sdev->tagged_supported || !sht->change_queue_type)
- return -EINVAL;
- if (strncmp(buf, "ordered", 7) == 0)
- tag_type = MSG_ORDERED_TAG;
- else if (strncmp(buf, "simple", 6) == 0)
- tag_type = MSG_SIMPLE_TAG;
- else if (strncmp(buf, "none", 4) != 0)
- return -EINVAL;
+ if (attr == &dev_attr_queue_depth.attr &&
+ !sdev->host->hostt->change_queue_depth)
+ return S_IRUGO;
- if (tag_type == prev_tag_type)
- return count;
+ if (attr == &dev_attr_queue_ramp_up_period.attr &&
+ !sdev->host->hostt->change_queue_depth)
+ return 0;
- retval = sht->change_queue_type(sdev, tag_type);
- if (retval < 0)
- return retval;
+ if (attr == &dev_attr_queue_type.attr &&
+ !sdev->host->hostt->change_queue_type)
+ return S_IRUGO;
- return count;
+ return attr->mode;
}
+/* Default template for device attributes. May NOT be modified */
+static struct attribute *scsi_sdev_attrs[] = {
+ &dev_attr_device_blocked.attr,
+ &dev_attr_type.attr,
+ &dev_attr_scsi_level.attr,
+ &dev_attr_device_busy.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_model.attr,
+ &dev_attr_rev.attr,
+ &dev_attr_rescan.attr,
+ &dev_attr_delete.attr,
+ &dev_attr_state.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_eh_timeout.attr,
+ &dev_attr_iocounterbits.attr,
+ &dev_attr_iorequest_cnt.attr,
+ &dev_attr_iodone_cnt.attr,
+ &dev_attr_ioerr_cnt.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_queue_depth.attr,
+ &dev_attr_queue_type.attr,
+ &dev_attr_queue_ramp_up_period.attr,
+ REF_EVT(media_change),
+ REF_EVT(inquiry_change_reported),
+ REF_EVT(capacity_change_reported),
+ REF_EVT(soft_threshold_reached),
+ REF_EVT(mode_parameter_change_reported),
+ REF_EVT(lun_change_reported),
+ NULL
+};
+
+static struct bin_attribute *scsi_sdev_bin_attrs[] = {
+ &dev_attr_vpd_pg83,
+ &dev_attr_vpd_pg80,
+ NULL
+};
+static struct attribute_group scsi_sdev_attr_group = {
+ .attrs = scsi_sdev_attrs,
+ .bin_attrs = scsi_sdev_bin_attrs,
+ .is_visible = scsi_sdev_attr_is_visible,
+};
+
+static const struct attribute_group *scsi_sdev_attr_groups[] = {
+ &scsi_sdev_attr_group,
+ NULL
+};
+
static int scsi_target_add(struct scsi_target *starget)
{
int error;
@@ -946,10 +989,6 @@ static int scsi_target_add(struct scsi_target *starget)
return 0;
}
-static struct device_attribute sdev_attr_queue_type_rw =
- __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
- sdev_store_queue_type_rw);
-
/**
* scsi_sysfs_add_sdev - add scsi device to sysfs
* @sdev: scsi_device to add
@@ -1003,25 +1042,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
transport_add_device(&sdev->sdev_gendev);
sdev->is_visible = 1;
- /* create queue files, which may be writable, depending on the host */
- if (sdev->host->hostt->change_queue_depth) {
- error = device_create_file(&sdev->sdev_gendev,
- &sdev_attr_queue_depth_rw);
- error = device_create_file(&sdev->sdev_gendev,
- &sdev_attr_queue_ramp_up_period);
- }
- else
- error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
- if (error)
- return error;
-
- if (sdev->host->hostt->change_queue_type)
- error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
- else
- error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
- if (error)
- return error;
-
error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
if (error)
@@ -1071,6 +1091,13 @@ void __scsi_remove_device(struct scsi_device *sdev)
sdev->host->hostt->slave_destroy(sdev);
transport_destroy_device(dev);
+ /*
+ * Paired with the kref_get() in scsi_sysfs_initialize(). We have
+ * remoed sysfs visibility from the device, so make the target
+ * invisible if this was the last device underneath it.
+ */
+ scsi_target_reap(scsi_target(sdev));
+
put_device(dev);
}
@@ -1133,7 +1160,7 @@ void scsi_remove_target(struct device *dev)
continue;
if (starget->dev.parent == dev || &starget->dev == dev) {
/* assuming new targets arrive at the end */
- starget->reap_ref++;
+ kref_get(&starget->reap_ref);
spin_unlock_irqrestore(shost->host_lock, flags);
if (last)
scsi_target_reap(last);
@@ -1217,6 +1244,12 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
list_add_tail(&sdev->same_target_siblings, &starget->devices);
list_add_tail(&sdev->siblings, &shost->__devices);
spin_unlock_irqrestore(shost->host_lock, flags);
+ /*
+ * device can now only be removed via __scsi_remove_device() so hold
+ * the target. Target will be held in CREATED state until something
+ * beneath it becomes visible (in which case it moves to RUNNING)
+ */
+ kref_get(&starget->reap_ref);
}
int scsi_is_sdev_device(const struct device *dev)
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 84a1fdf67864..e51add05fb8d 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -155,7 +155,8 @@ void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
__blk_put_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
- __scsi_put_command(shost, cmd, &shost->shost_gendev);
+ __scsi_put_command(shost, cmd);
+ put_device(&shost->shost_gendev);
}
EXPORT_SYMBOL_GPL(scsi_host_put_command);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 4628fd5e0688..f80908f74ca9 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -261,6 +261,7 @@ static const struct {
{ FC_PORTSPEED_10GBIT, "10 Gbit" },
{ FC_PORTSPEED_8GBIT, "8 Gbit" },
{ FC_PORTSPEED_16GBIT, "16 Gbit" },
+ { FC_PORTSPEED_32GBIT, "32 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fd8ffe6bcfdd..0102a2d70dd8 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1225,7 +1225,7 @@ struct bus_type iscsi_flashnode_bus = {
* Adds a sysfs entry for the flashnode session attributes
*
* Returns:
- * pointer to allocated flashnode sess on sucess
+ * pointer to allocated flashnode sess on success
* %NULL on failure
*/
struct iscsi_bus_flash_session *
@@ -1423,7 +1423,7 @@ static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data)
}
/**
- * iscsi_destroy_flashnode_sess - destory flashnode session entry
+ * iscsi_destroy_flashnode_sess - destroy flashnode session entry
* @fnode_sess: pointer to flashnode session entry to be destroyed
*
* Deletes the flashnode session entry and all children flashnode connection
@@ -1453,7 +1453,7 @@ static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data)
}
/**
- * iscsi_destroy_all_flashnode - destory all flashnode session entries
+ * iscsi_destroy_all_flashnode - destroy all flashnode session entries
* @shost: pointer to host data
*
* Destroys all the flashnode session entries and all corresponding children
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index d47ffc8d3e43..13e898332e45 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -810,6 +810,7 @@ EXPORT_SYMBOL_GPL(srp_remove_host);
/**
* srp_stop_rport_timers - stop the transport layer recovery timers
+ * @rport: SRP remote port for which to stop the timers.
*
* Must be called after srp_remove_host() and scsi_remove_host(). The caller
* must hold a reference on the rport (rport->dev) and on the SCSI host
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 470954aba728..efcbcd182863 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1463,8 +1463,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
sd_print_sense_hdr(sdkp, &sshdr);
/* we need to evaluate the error return */
if (scsi_sense_valid(&sshdr) &&
- /* 0x3a is medium not present */
- sshdr.asc == 0x3a)
+ (sshdr.asc == 0x3a || /* medium not present */
+ sshdr.asc == 0x20)) /* invalid command */
/* this is no error here */
return 0;
@@ -2281,7 +2281,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
set_disk_ro(sdkp->disk, 0);
if (sdp->skip_ms_page_3f) {
- sd_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
+ sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
return;
}
@@ -2313,7 +2313,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
}
if (!scsi_status_is_good(res)) {
- sd_printk(KERN_WARNING, sdkp,
+ sd_first_printk(KERN_WARNING, sdkp,
"Test WP failed, assume Write Enabled\n");
} else {
sdkp->write_prot = ((data.device_specific & 0x80) != 0);
@@ -2381,7 +2381,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
if (!data.header_length) {
modepage = 6;
first_len = 0;
- sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
+ sd_first_printk(KERN_ERR, sdkp,
+ "Missing header in MODE_SENSE response\n");
}
/* that went OK, now ask for the proper length */
@@ -2394,7 +2395,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
if (len < 3)
goto bad_sense;
else if (len > SD_BUF_SIZE) {
- sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
+ sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
"data from %d to %d bytes\n", len, SD_BUF_SIZE);
len = SD_BUF_SIZE;
}
@@ -2417,8 +2418,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
/* We're interested only in the first 3 bytes.
*/
if (len - offset <= 2) {
- sd_printk(KERN_ERR, sdkp, "Incomplete "
- "mode parameter data\n");
+ sd_first_printk(KERN_ERR, sdkp,
+ "Incomplete mode parameter "
+ "data\n");
goto defaults;
} else {
modepage = page_code;
@@ -2432,14 +2434,15 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
else if (!spf && len - offset > 1)
offset += 2 + buffer[offset+1];
else {
- sd_printk(KERN_ERR, sdkp, "Incomplete "
- "mode parameter data\n");
+ sd_first_printk(KERN_ERR, sdkp,
+ "Incomplete mode "
+ "parameter data\n");
goto defaults;
}
}
}
- sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
+ sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
goto defaults;
Page_found:
@@ -2453,7 +2456,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
- sd_printk(KERN_NOTICE, sdkp,
+ sd_first_printk(KERN_NOTICE, sdkp,
"Uses READ/WRITE(6), disabling FUA\n");
sdkp->DPOFUA = 0;
}
@@ -2475,16 +2478,19 @@ bad_sense:
sshdr.sense_key == ILLEGAL_REQUEST &&
sshdr.asc == 0x24 && sshdr.ascq == 0x0)
/* Invalid field in CDB */
- sd_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
+ sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
else
- sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n");
+ sd_first_printk(KERN_ERR, sdkp,
+ "Asking for cache data failed\n");
defaults:
if (sdp->wce_default_on) {
- sd_printk(KERN_NOTICE, sdkp, "Assuming drive cache: write back\n");
+ sd_first_printk(KERN_NOTICE, sdkp,
+ "Assuming drive cache: write back\n");
sdkp->WCE = 1;
} else {
- sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
+ sd_first_printk(KERN_ERR, sdkp,
+ "Assuming drive cache: write through\n");
sdkp->WCE = 0;
}
sdkp->RCD = 0;
@@ -2513,7 +2519,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
if (!scsi_status_is_good(res) || !data.header_length ||
data.length < 6) {
- sd_printk(KERN_WARNING, sdkp,
+ sd_first_printk(KERN_WARNING, sdkp,
"getting Control mode page failed, assume no ATO\n");
if (scsi_sense_valid(&sshdr))
@@ -2525,7 +2531,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
offset = data.header_length + data.block_descriptor_length;
if ((buffer[offset] & 0x3f) != 0x0a) {
- sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
+ sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
return;
}
@@ -3020,6 +3026,7 @@ static int sd_remove(struct device *dev)
devt = disk_devt(sdkp->disk);
scsi_autopm_get_device(sdkp->device);
+ async_synchronize_full_domain(&scsi_sd_pm_domain);
async_synchronize_full_domain(&scsi_sd_probe_domain);
blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 26895ff247c5..620871efbf0a 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -104,6 +104,12 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
(sdsk)->disk->disk_name, ##a) : \
sdev_printk(prefix, (sdsk)->device, fmt, ##a)
+#define sd_first_printk(prefix, sdsk, fmt, a...) \
+ do { \
+ if ((sdkp)->first_scan) \
+ sd_printk(prefix, sdsk, fmt, ##a); \
+ } while (0)
+
static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
{
switch (scmd->cmnd[0]) {
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index eba183c428cf..80bfece1a2de 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/enclosure.h>
+#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -448,27 +449,18 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
static void ses_match_to_enclosure(struct enclosure_device *edev,
struct scsi_device *sdev)
{
- unsigned char *buf;
unsigned char *desc;
- unsigned int vpd_len;
struct efd efd = {
.addr = 0,
};
- buf = kmalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
- if (!buf || scsi_get_vpd_page(sdev, 0x83, buf, INIT_ALLOC_SIZE))
- goto free;
-
ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
- vpd_len = ((buf[2] << 8) | buf[3]) + 4;
- kfree(buf);
- buf = kmalloc(vpd_len, GFP_KERNEL);
- if (!buf ||scsi_get_vpd_page(sdev, 0x83, buf, vpd_len))
- goto free;
+ if (!sdev->vpd_pg83_len)
+ return;
- desc = buf + 4;
- while (desc < buf + vpd_len) {
+ desc = sdev->vpd_pg83 + 4;
+ while (desc < sdev->vpd_pg83 + sdev->vpd_pg83_len) {
enum scsi_protocol proto = desc[0] >> 4;
u8 code_set = desc[0] & 0x0f;
u8 piv = desc[1] & 0x80;
@@ -478,25 +470,15 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
if (piv && code_set == 1 && assoc == 1
&& proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8)
- efd.addr = (u64)desc[4] << 56 |
- (u64)desc[5] << 48 |
- (u64)desc[6] << 40 |
- (u64)desc[7] << 32 |
- (u64)desc[8] << 24 |
- (u64)desc[9] << 16 |
- (u64)desc[10] << 8 |
- (u64)desc[11];
+ efd.addr = get_unaligned_be64(&desc[4]);
desc += len + 4;
}
- if (!efd.addr)
- goto free;
+ if (efd.addr) {
+ efd.dev = &sdev->sdev_gendev;
- efd.dev = &sdev->sdev_gendev;
-
- enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
- free:
- kfree(buf);
+ enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
+ }
}
static int ses_intf_add(struct device *cdev,
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a1d6986261a3..afc834e172c6 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -2198,12 +2198,19 @@ static int st_set_options(struct scsi_tape *STp, long options)
struct st_modedef *STm;
char *name = tape_name(STp);
struct cdev *cd0, *cd1;
+ struct device *d0, *d1;
STm = &(STp->modes[STp->current_mode]);
if (!STm->defined) {
- cd0 = STm->cdevs[0]; cd1 = STm->cdevs[1];
+ cd0 = STm->cdevs[0];
+ cd1 = STm->cdevs[1];
+ d0 = STm->devs[0];
+ d1 = STm->devs[1];
memcpy(STm, &(STp->modes[0]), sizeof(struct st_modedef));
- STm->cdevs[0] = cd0; STm->cdevs[1] = cd1;
+ STm->cdevs[0] = cd0;
+ STm->cdevs[1] = cd1;
+ STm->devs[0] = d0;
+ STm->devs[1] = d1;
modes_defined = 1;
DEBC(printk(ST_DEB_MSG
"%s: Initialized mode %d definition from mode 0\n",
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index f1e4b4148c75..a4abce9d526e 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -259,7 +259,7 @@ found:
instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
if (instance->irq != SCSI_IRQ_NONE)
- if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128",
+ if (request_irq(instance->irq, t128_intr, 0, "t128",
instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 9c216e563568..5a03bb3bcfef 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -873,7 +873,7 @@ static int port_detect \
/* Board detected, allocate its IRQ */
if (request_irq(irq, do_interrupt_handler,
- IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0),
+ (subversion == ESA) ? IRQF_SHARED : 0,
driver_name, (void *) &sha[j])) {
printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
goto freelock;
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index b9755ec0e812..c88e1468aad7 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's para-virtualized SCSI HBA.
*
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -32,6 +32,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
#include "vmw_pvscsi.h"
@@ -44,7 +45,7 @@ MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
-#define PVSCSI_DEFAULT_QUEUE_DEPTH 64
+#define PVSCSI_DEFAULT_QUEUE_DEPTH 254
#define SGL_SIZE PAGE_SIZE
struct pvscsi_sg_list {
@@ -62,6 +63,7 @@ struct pvscsi_ctx {
dma_addr_t dataPA;
dma_addr_t sensePA;
dma_addr_t sglPA;
+ struct completion *abort_cmp;
};
struct pvscsi_adapter {
@@ -71,6 +73,7 @@ struct pvscsi_adapter {
bool use_msi;
bool use_msix;
bool use_msg;
+ bool use_req_threshold;
spinlock_t hw_lock;
@@ -102,18 +105,22 @@ struct pvscsi_adapter {
/* Command line parameters */
-static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
+static int pvscsi_ring_pages;
static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
static bool pvscsi_disable_msi;
static bool pvscsi_disable_msix;
static bool pvscsi_use_msg = true;
+static bool pvscsi_use_req_threshold = true;
#define PVSCSI_RW (S_IRUSR | S_IWUSR)
module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
- __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING)
+ "[up to 16 targets],"
+ __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)
+ "[for 16+ targets])");
module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
@@ -121,7 +128,7 @@ MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
- __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
+ __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")");
module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
@@ -132,6 +139,10 @@ MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
+module_param_named(use_req_threshold, pvscsi_use_req_threshold,
+ bool, PVSCSI_RW);
+MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)");
+
static const struct pci_device_id pvscsi_pci_tbl[] = {
{ PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
{ 0 }
@@ -177,6 +188,7 @@ static void pvscsi_release_context(struct pvscsi_adapter *adapter,
struct pvscsi_ctx *ctx)
{
ctx->cmd = NULL;
+ ctx->abort_cmp = NULL;
list_add(&ctx->list, &adapter->cmd_pool);
}
@@ -280,10 +292,15 @@ static int scsi_is_rw(unsigned char op)
static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
unsigned char op)
{
- if (scsi_is_rw(op))
- pvscsi_kick_rw_io(adapter);
- else
+ if (scsi_is_rw(op)) {
+ struct PVSCSIRingsState *s = adapter->rings_state;
+
+ if (!adapter->use_req_threshold ||
+ s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold)
+ pvscsi_kick_rw_io(adapter);
+ } else {
pvscsi_process_request_ring(adapter);
+ }
}
static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
@@ -487,6 +504,35 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
}
}
+static int pvscsi_change_queue_depth(struct scsi_device *sdev,
+ int qdepth,
+ int reason)
+{
+ int max_depth;
+ struct Scsi_Host *shost = sdev->host;
+
+ if (reason != SCSI_QDEPTH_DEFAULT)
+ /*
+ * We support only changing default.
+ */
+ return -EOPNOTSUPP;
+
+ max_depth = shost->can_queue;
+ if (!sdev->tagged_supported)
+ max_depth = 1;
+ if (qdepth > max_depth)
+ qdepth = max_depth;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+
+ if (sdev->inquiry_len > 7)
+ sdev_printk(KERN_INFO, sdev,
+ "qdepth(%d), tagged(%d), simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n",
+ sdev->queue_depth, sdev->tagged_supported,
+ sdev->simple_tags, sdev->ordered_tags,
+ sdev->scsi_level, (sdev->inquiry[7] & 2) >> 1);
+ return sdev->queue_depth;
+}
+
/*
* Pull a completion descriptor off and pass the completion back
* to the SCSI mid layer.
@@ -496,15 +542,27 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
{
struct pvscsi_ctx *ctx;
struct scsi_cmnd *cmd;
+ struct completion *abort_cmp;
u32 btstat = e->hostStatus;
u32 sdstat = e->scsiStatus;
ctx = pvscsi_get_context(adapter, e->context);
cmd = ctx->cmd;
+ abort_cmp = ctx->abort_cmp;
pvscsi_unmap_buffers(adapter, ctx);
pvscsi_release_context(adapter, ctx);
- cmd->result = 0;
+ if (abort_cmp) {
+ /*
+ * The command was requested to be aborted. Just signal that
+ * the request completed and swallow the actual cmd completion
+ * here. The abort handler will post a completion for this
+ * command indicating that it got successfully aborted.
+ */
+ complete(abort_cmp);
+ return;
+ }
+ cmd->result = 0;
if (sdstat != SAM_STAT_GOOD &&
(btstat == BTSTAT_SUCCESS ||
btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
@@ -726,6 +784,8 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
struct pvscsi_ctx *ctx;
unsigned long flags;
+ int result = SUCCESS;
+ DECLARE_COMPLETION_ONSTACK(abort_cmp);
scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
adapter->host->host_no, cmd);
@@ -748,13 +808,40 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
goto out;
}
+ /*
+ * Mark that the command has been requested to be aborted and issue
+ * the abort.
+ */
+ ctx->abort_cmp = &abort_cmp;
+
pvscsi_abort_cmd(adapter, ctx);
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ /* Wait for 2 secs for the completion. */
+ wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
+ spin_lock_irqsave(&adapter->hw_lock, flags);
- pvscsi_process_completion_ring(adapter);
+ if (!completion_done(&abort_cmp)) {
+ /*
+ * Failed to abort the command, unmark the fact that it
+ * was requested to be aborted.
+ */
+ ctx->abort_cmp = NULL;
+ result = FAILED;
+ scmd_printk(KERN_DEBUG, cmd,
+ "Failed to get completion for aborted cmd %p\n",
+ cmd);
+ goto out;
+ }
+
+ /*
+ * Successfully aborted the command.
+ */
+ cmd->result = (DID_ABORT << 16);
+ cmd->scsi_done(cmd);
out:
spin_unlock_irqrestore(&adapter->hw_lock, flags);
- return SUCCESS;
+ return result;
}
/*
@@ -911,6 +998,7 @@ static struct scsi_host_template pvscsi_template = {
.dma_boundary = UINT_MAX,
.max_sectors = 0xffff,
.use_clustering = ENABLE_CLUSTERING,
+ .change_queue_depth = pvscsi_change_queue_depth,
.eh_abort_handler = pvscsi_abort,
.eh_device_reset_handler = pvscsi_device_reset,
.eh_bus_reset_handler = pvscsi_bus_reset,
@@ -1034,6 +1122,34 @@ static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
return 1;
}
+static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter,
+ bool enable)
+{
+ u32 val;
+
+ if (!pvscsi_use_req_threshold)
+ return false;
+
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
+ PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
+ val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS);
+ if (val == -1) {
+ printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n");
+ return false;
+ } else {
+ struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 };
+ cmd_msg.enable = enable;
+ printk(KERN_INFO
+ "vmw_pvscsi: %sabling reqCallThreshold\n",
+ enable ? "en" : "dis");
+ pvscsi_write_cmd_desc(adapter,
+ PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
+ &cmd_msg, sizeof(cmd_msg));
+ return pvscsi_reg_read(adapter,
+ PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0;
+ }
+}
+
static irqreturn_t pvscsi_isr(int irq, void *devp)
{
struct pvscsi_adapter *adapter = devp;
@@ -1236,11 +1352,12 @@ exit:
static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pvscsi_adapter *adapter;
- struct Scsi_Host *host;
- struct device *dev;
+ struct pvscsi_adapter adapter_temp;
+ struct Scsi_Host *host = NULL;
unsigned int i;
unsigned long flags = 0;
int error;
+ u32 max_id;
error = -ENODEV;
@@ -1258,34 +1375,19 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_disable_device;
}
- pvscsi_template.can_queue =
- min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
- PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
- pvscsi_template.cmd_per_lun =
- min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
- host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
- if (!host) {
- printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
- goto out_disable_device;
- }
-
- adapter = shost_priv(host);
+ /*
+ * Let's use a temp pvscsi_adapter struct until we find the number of
+ * targets on the adapter, after that we will switch to the real
+ * allocated struct.
+ */
+ adapter = &adapter_temp;
memset(adapter, 0, sizeof(*adapter));
adapter->dev = pdev;
- adapter->host = host;
-
- spin_lock_init(&adapter->hw_lock);
-
- host->max_channel = 0;
- host->max_id = 16;
- host->max_lun = 1;
- host->max_cmd_len = 16;
-
adapter->rev = pdev->revision;
if (pci_request_regions(pdev, "vmw_pvscsi")) {
printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
- goto out_free_host;
+ goto out_disable_device;
}
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1301,7 +1403,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (i == DEVICE_COUNT_RESOURCE) {
printk(KERN_ERR
"vmw_pvscsi: adapter has no suitable MMIO region\n");
- goto out_release_resources;
+ goto out_release_resources_and_disable;
}
adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
@@ -1310,10 +1412,60 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(KERN_ERR
"vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
i, PVSCSI_MEM_SPACE_SIZE);
- goto out_release_resources;
+ goto out_release_resources_and_disable;
}
pci_set_master(pdev);
+
+ /*
+ * Ask the device for max number of targets before deciding the
+ * default pvscsi_ring_pages value.
+ */
+ max_id = pvscsi_get_max_targets(adapter);
+ printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id);
+
+ if (pvscsi_ring_pages == 0)
+ /*
+ * Set the right default value. Up to 16 it is 8, above it is
+ * max.
+ */
+ pvscsi_ring_pages = (max_id > 16) ?
+ PVSCSI_SETUP_RINGS_MAX_NUM_PAGES :
+ PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
+ printk(KERN_INFO
+ "vmw_pvscsi: setting ring_pages to %d\n",
+ pvscsi_ring_pages);
+
+ pvscsi_template.can_queue =
+ min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
+ PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
+ pvscsi_template.cmd_per_lun =
+ min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
+ host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
+ if (!host) {
+ printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
+ goto out_release_resources_and_disable;
+ }
+
+ /*
+ * Let's use the real pvscsi_adapter struct here onwards.
+ */
+ adapter = shost_priv(host);
+ memset(adapter, 0, sizeof(*adapter));
+ adapter->dev = pdev;
+ adapter->host = host;
+ /*
+ * Copy back what we already have to the allocated adapter struct.
+ */
+ adapter->rev = adapter_temp.rev;
+ adapter->mmioBase = adapter_temp.mmioBase;
+
+ spin_lock_init(&adapter->hw_lock);
+ host->max_channel = 0;
+ host->max_lun = 1;
+ host->max_cmd_len = 16;
+ host->max_id = max_id;
+
pci_set_drvdata(pdev, host);
ll_adapter_reset(adapter);
@@ -1327,13 +1479,6 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/*
- * Ask the device for max number of targets.
- */
- host->max_id = pvscsi_get_max_targets(adapter);
- dev = pvscsi_dev(adapter);
- dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id);
-
- /*
* From this point on we should reset the adapter if anything goes
* wrong.
*/
@@ -1373,6 +1518,10 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
flags = IRQF_SHARED;
}
+ adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
+ printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
+ adapter->use_req_threshold ? "en" : "dis");
+
error = request_irq(adapter->irq, pvscsi_isr, flags,
"vmw_pvscsi", adapter);
if (error) {
@@ -1402,12 +1551,15 @@ out_reset_adapter:
ll_adapter_reset(adapter);
out_release_resources:
pvscsi_release_resources(adapter);
-out_free_host:
scsi_host_put(host);
out_disable_device:
pci_disable_device(pdev);
return error;
+
+out_release_resources_and_disable:
+ pvscsi_release_resources(adapter);
+ goto out_disable_device;
}
static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 3546e8662e30..ce4588851274 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -1,7 +1,7 @@
/*
* VMware PVSCSI header file
*
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -26,7 +26,7 @@
#include <linux/types.h>
-#define PVSCSI_DRIVER_VERSION_STRING "1.0.2.0-k"
+#define PVSCSI_DRIVER_VERSION_STRING "1.0.5.0-k"
#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
@@ -117,8 +117,9 @@ enum PVSCSICommands {
PVSCSI_CMD_CONFIG = 7,
PVSCSI_CMD_SETUP_MSG_RING = 8,
PVSCSI_CMD_DEVICE_UNPLUG = 9,
+ PVSCSI_CMD_SETUP_REQCALLTHRESHOLD = 10,
- PVSCSI_CMD_LAST = 10 /* has to be last */
+ PVSCSI_CMD_LAST = 11 /* has to be last */
};
/*
@@ -141,6 +142,14 @@ struct PVSCSICmdDescConfigCmd {
u32 _pad;
} __packed;
+/*
+ * Command descriptor for PVSCSI_CMD_SETUP_REQCALLTHRESHOLD --
+ */
+
+struct PVSCSICmdDescSetupReqCall {
+ u32 enable;
+} __packed;
+
enum PVSCSIConfigPageType {
PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958,
PVSCSI_CONFIG_PAGE_PHY = 0x1959,
@@ -261,7 +270,9 @@ struct PVSCSIRingsState {
u32 cmpConsIdx;
u32 cmpNumEntriesLog2;
- u8 _pad[104];
+ u32 reqCallThreshold;
+
+ u8 _pad[100];
u32 msgProdIdx;
u32 msgConsIdx;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index f9a6e4b0affe..32674236fec7 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1252,7 +1252,7 @@ static int wd7000_init(Adapter * host)
return 0;
- if (request_irq(host->irq, wd7000_intr, IRQF_DISABLED, "wd7000", host)) {
+ if (request_irq(host->irq, wd7000_intr, 0, "wd7000", host)) {
printk("wd7000_init: can't get IRQ %d.\n", host->irq);
return (0);
}
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index 1ebe67cd1833..7442bc130055 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -36,9 +36,47 @@ static void sh_clk_write(int value, struct clk *clk)
iowrite32(value, clk->mapped_reg);
}
+static unsigned int r8(const void __iomem *addr)
+{
+ return ioread8(addr);
+}
+
+static unsigned int r16(const void __iomem *addr)
+{
+ return ioread16(addr);
+}
+
+static unsigned int r32(const void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
static int sh_clk_mstp_enable(struct clk *clk)
{
sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
+ if (clk->status_reg) {
+ unsigned int (*read)(const void __iomem *addr);
+ int i;
+ void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
+ (phys_addr_t)clk->enable_reg + clk->mapped_reg;
+
+ if (clk->flags & CLK_ENABLE_REG_8BIT)
+ read = r8;
+ else if (clk->flags & CLK_ENABLE_REG_16BIT)
+ read = r16;
+ else
+ read = r32;
+
+ for (i = 1000;
+ (read(mapped_status) & (1 << clk->enable_bit)) && i;
+ i--)
+ cpu_relax();
+ if (!i) {
+ pr_err("cpg: failed to enable %p[%d]\n",
+ clk->enable_reg, clk->enable_bit);
+ return -ETIMEDOUT;
+ }
+ }
return 0;
}
diff --git a/drivers/sh/intc/Kconfig b/drivers/sh/intc/Kconfig
index a305731742a9..f7d90617c9d9 100644
--- a/drivers/sh/intc/Kconfig
+++ b/drivers/sh/intc/Kconfig
@@ -6,7 +6,7 @@ comment "Interrupt controller options"
config INTC_USERIMASK
bool "Userspace interrupt masking support"
- depends on ARCH_SHMOBILE || (SUPERH && CPU_SH4A)
+ depends on ARCH_SHMOBILE || (SUPERH && CPU_SH4A) || COMPILE_TEST
help
This enables support for hardware-assisted userspace hardirq
masking.
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 581ee2a8856b..60f2b41c7310 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -150,7 +150,7 @@ config SPI_BUTTERFLY
config SPI_CLPS711X
tristate "CLPS711X host SPI controller"
- depends on ARCH_CLPS711X
+ depends on ARCH_CLPS711X || COMPILE_TEST
help
This enables dedicated general purpose SPI/Microwire1-compatible
master mode interface (SSI1) for CLPS711X-based CPUs.
@@ -212,7 +212,6 @@ config SPI_IMX
tristate "Freescale i.MX SPI controllers"
depends on ARCH_MXC || COMPILE_TEST
select SPI_BITBANG
- default m if IMX_HAVE_PLATFORM_SPI_IMX
help
This enables using the Freescale i.MX SPI controllers in master
mode.
@@ -270,6 +269,7 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select SPI_BITBANG
+ select REGMAP_MMIO
depends on SOC_VF610 || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
@@ -307,7 +307,7 @@ config SPI_OMAP_UWIRE
config SPI_OMAP24XX
tristate "McSPI driver for OMAP"
- depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SH
+ depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH
depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
SPI master controller for OMAP24XX and later Multichannel SPI
@@ -381,6 +381,19 @@ config SPI_RSPI
help
SPI driver for Renesas RSPI and QSPI blocks.
+config SPI_QUP
+ tristate "Qualcomm SPI controller with QUP interface"
+ depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ help
+ Qualcomm Universal Peripheral (QUP) core is an AHB slave that
+ provides a common data path (an output FIFO and an input FIFO)
+ for serial peripheral interface (SPI) mini-core. SPI in master
+ mode supports up to 50MHz, up to four chip selects, programmable
+ data path from 4 bits to 32 bits and numerous protocol variants.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi_qup.
+
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
depends on ARCH_S3C24XX
@@ -416,7 +429,6 @@ config SPI_SH_MSIOF
tristate "SuperH MSIOF SPI controller"
depends on HAVE_CLK
depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
- select SPI_BITBANG
help
SPI driver for SuperH and SH Mobile MSIOF blocks.
@@ -446,6 +458,19 @@ config SPI_SIRF
help
SPI driver for CSR SiRFprimaII SoCs
+config SPI_SUN4I
+ tristate "Allwinner A10 SoCs SPI controller"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ SPI driver for Allwinner sun4i, sun5i and sun7i SoCs
+
+config SPI_SUN6I
+ tristate "Allwinner A31 SPI controller"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ This enables using the SPI controller on the Allwinner A31 SoCs.
+
config SPI_MXS
tristate "Freescale MXS SPI controller"
depends on ARCH_MXS
@@ -478,13 +503,6 @@ config SPI_TEGRA20_SLINK
help
SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
-config SPI_TI_SSP
- tristate "TI Sequencer Serial Port - SPI Support"
- depends on MFD_TI_SSP
- help
- This selects an SPI master implementation using a TI sequencer
- serial port.
-
config SPI_TOPCLIFF_PCH
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
depends on PCI
@@ -520,6 +538,19 @@ config SPI_XILINX
Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
+config SPI_XTENSA_XTFPGA
+ tristate "Xtensa SPI controller for xtfpga"
+ depends on (XTENSA && XTENSA_PLATFORM_XTFPGA) || COMPILE_TEST
+ select SPI_BITBANG
+ help
+ SPI driver for xtfpga SPI master controller.
+
+ This simple SPI master controller is built into xtfpga bitstreams
+ and is used to control daughterboard audio codec. It always transfers
+ 16 bit words in SPI mode 0, automatically asserting CS on transfer
+ start and deasserting on end.
+
+
config SPI_NUC900
tristate "Nuvoton NUC900 series SPI"
depends on ARCH_W90X900
@@ -546,7 +577,7 @@ config SPI_DW_MID_DMA
config SPI_DW_MMIO
tristate "Memory-mapped io interface driver for DW SPI core"
- depends on SPI_DESIGNWARE && HAVE_CLK
+ depends on SPI_DESIGNWARE
#
# There are lots of SPI device types, with sensors and memory
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 95af48d2d360..bd792669e563 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -59,6 +59,7 @@ spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o
spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
+obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
spi-s3c24xx-hw-y := spi-s3c24xx.o
@@ -70,12 +71,14 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
+obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
+obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o
obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
-obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
+obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 5d7deaf62867..5b5709a5c957 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -13,7 +13,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -200,7 +199,6 @@ static irqreturn_t altera_spi_irq(int irq, void *dev)
static int altera_spi_probe(struct platform_device *pdev)
{
- struct altera_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
struct altera_spi *hw;
struct spi_master *master;
struct resource *res;
@@ -214,6 +212,8 @@ static int altera_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->num_chipselect = 16;
master->mode_bits = SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ master->dev.of_node = pdev->dev.of_node;
hw = spi_master_get_devdata(master);
platform_set_drvdata(pdev, hw);
@@ -245,9 +245,6 @@ static int altera_spi_probe(struct platform_device *pdev)
if (err)
goto exit;
}
- /* find platform data */
- if (!platp)
- hw->bitbang.master->dev.of_node = pdev->dev.of_node;
/* register our spi controller */
err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index c3b2fb9b6713..3898b0b9ee77 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 5d7b07f08326..8005f9869481 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -9,7 +9,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -26,6 +25,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
/* SPI register offsets */
#define SPI_CR 0x0000
@@ -993,13 +993,6 @@ static int atmel_spi_setup(struct spi_device *spi)
as = spi_master_get_devdata(spi->master);
- if (spi->chip_select > spi->master->num_chipselect) {
- dev_dbg(&spi->dev,
- "setup: invalid chipselect %u (%u defined)\n",
- spi->chip_select, spi->master->num_chipselect);
- return -EINVAL;
- }
-
/* see notes above re chipselect */
if (!atmel_spi_is_v2(as)
&& spi->chip_select == 0
@@ -1087,14 +1080,6 @@ static int atmel_spi_one_transfer(struct spi_master *master,
}
}
- if (xfer->bits_per_word > 8) {
- if (xfer->len % 2) {
- dev_dbg(&spi->dev,
- "buffer len should be 16 bits aligned\n");
- return -EINVAL;
- }
- }
-
/*
* DMA map early, for performance (empties dcache ASAP) and
* better fault reporting.
@@ -1221,9 +1206,6 @@ static int atmel_spi_transfer_one_message(struct spi_master *master,
dev_dbg(&spi->dev, "new message %p submitted for %s\n",
msg, dev_name(&spi->dev));
- if (unlikely(list_empty(&msg->transfers)))
- return -EINVAL;
-
atmel_spi_lock(as);
cs_activate(as, spi);
@@ -1244,10 +1226,10 @@ static int atmel_spi_transfer_one_message(struct spi_master *master,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
dev_dbg(&spi->dev,
- " xfer %p: len %u tx %p/%08x rx %p/%08x\n",
+ " xfer %p: len %u tx %p/%pad rx %p/%pad\n",
xfer, xfer->len,
- xfer->tx_buf, xfer->tx_dma,
- xfer->rx_buf, xfer->rx_dma);
+ xfer->tx_buf, &xfer->tx_dma,
+ xfer->rx_buf, &xfer->rx_dma);
}
msg_done:
@@ -1303,6 +1285,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct atmel_spi *as;
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(&pdev->dev);
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENXIO;
@@ -1465,6 +1450,9 @@ static int atmel_spi_suspend(struct device *dev)
}
clk_disable_unprepare(as->clk);
+
+ pinctrl_pm_select_sleep_state(dev);
+
return 0;
}
@@ -1474,6 +1462,8 @@ static int atmel_spi_resume(struct device *dev)
struct atmel_spi *as = spi_master_get_devdata(master);
int ret;
+ pinctrl_pm_select_default_state(dev);
+
clk_prepare_enable(as->clk);
/* Start the queue running */
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index c4141c92bcff..67375a11d4bd 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -55,8 +55,6 @@ struct au1550_spi {
volatile psc_spi_t __iomem *regs;
int irq;
- unsigned freq_max;
- unsigned freq_min;
unsigned len;
unsigned tx_count;
@@ -248,11 +246,8 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
hz = t->speed_hz;
}
- if (hz > spi->max_speed_hz || hz > hw->freq_max || hz < hw->freq_min) {
- dev_err(&spi->dev, "setupxfer: clock rate=%d out of range\n",
- hz);
+ if (!hz)
return -EINVAL;
- }
au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
@@ -287,23 +282,6 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
return 0;
}
-static int au1550_spi_setup(struct spi_device *spi)
-{
- struct au1550_spi *hw = spi_master_get_devdata(spi->master);
-
- if (spi->max_speed_hz == 0)
- spi->max_speed_hz = hw->freq_max;
- if (spi->max_speed_hz > hw->freq_max
- || spi->max_speed_hz < hw->freq_min)
- return -EINVAL;
- /*
- * NOTE: cannot change speed and other hw settings immediately,
- * otherwise sharing of spi bus is not possible,
- * so do not call setupxfer(spi, NULL) here
- */
- return 0;
-}
-
/*
* for dma spi transfers, we have to setup rx channel, otherwise there is
* no reliable way how to recognize that spi transfer is done
@@ -838,7 +816,6 @@ static int au1550_spi_probe(struct platform_device *pdev)
hw->bitbang.master = hw->master;
hw->bitbang.setup_transfer = au1550_spi_setupxfer;
hw->bitbang.chipselect = au1550_spi_chipsel;
- hw->bitbang.master->setup = au1550_spi_setup;
hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs;
if (hw->usedma) {
@@ -909,8 +886,9 @@ static int au1550_spi_probe(struct platform_device *pdev)
{
int min_div = (2 << 0) * (2 * (4 + 1));
int max_div = (2 << 3) * (2 * (63 + 1));
- hw->freq_max = hw->pdata->mainclk_hz / min_div;
- hw->freq_min = hw->pdata->mainclk_hz / (max_div + 1) + 1;
+ master->max_speed_hz = hw->pdata->mainclk_hz / min_div;
+ master->min_speed_hz =
+ hw->pdata->mainclk_hz / (max_div + 1) + 1;
}
au1550_spi_setup_psc_as_spi(hw);
@@ -999,6 +977,15 @@ static int __init au1550_spi_init(void)
* create memory device with 8 bits dev_devwidth
* needed for proper byte ordering to spi fifo
*/
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1550:
+ case ALCHEMY_CPU_AU1200:
+ case ALCHEMY_CPU_AU1300:
+ break;
+ default:
+ return -ENODEV;
+ }
+
if (usedma) {
ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev);
if (!ddma_memid)
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 8a89dd1f2654..69167456ec1e 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -315,7 +315,6 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
master->mode_bits = BCM2835_SPI_MODE_BITS;
master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->bus_num = -1;
master->num_chipselect = 3;
master->transfer_one_message = bcm2835_spi_transfer_one;
master->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index b528f9fc8bc0..5a211e98383b 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -180,7 +180,7 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
while (pending > 0) {
int curr_step = min_t(int, step_size, pending);
- init_completion(&bs->done);
+ reinit_completion(&bs->done);
if (tx) {
memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, tx, curr_step);
tx += curr_step;
@@ -369,6 +369,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
mutex_init(&bs->bus_mutex);
+ init_completion(&bs->done);
master->bus_num = HSSPI_BUS_NUM;
master->num_chipselect = 8;
@@ -453,9 +454,8 @@ static int bcm63xx_hsspi_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops bcm63xx_hsspi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(bcm63xx_hsspi_suspend, bcm63xx_hsspi_resume)
-};
+static SIMPLE_DEV_PM_OPS(bcm63xx_hsspi_pm_ops, bcm63xx_hsspi_suspend,
+ bcm63xx_hsspi_resume);
static struct platform_driver bcm63xx_hsspi_driver = {
.driver = {
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 77286aef2adf..0250fa721cea 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -20,7 +20,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -35,8 +34,6 @@
#include <bcm63xx_dev_spi.h>
-#define PFX KBUILD_MODNAME
-
#define BCM63XX_SPI_MAX_PREPEND 15
struct bcm63xx_spi {
@@ -169,7 +166,7 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
transfer_list);
}
- init_completion(&bs->done);
+ reinit_completion(&bs->done);
/* Fill in the Message control register */
msg_ctl = (len << SPI_BYTE_CNT_SHIFT);
@@ -353,6 +350,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
}
bs = spi_master_get_devdata(master);
+ init_completion(&bs->done);
platform_set_drvdata(pdev, master);
bs->pdev = pdev;
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 38941e5920b5..f515c5e9db57 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -8,7 +8,6 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/spi/spi-bfin-v3.c b/drivers/spi/spi-bfin-v3.c
index 8f8598834b30..4089d0e0d84e 100644
--- a/drivers/spi/spi-bfin-v3.c
+++ b/drivers/spi/spi-bfin-v3.c
@@ -822,7 +822,8 @@ static int bfin_spi_probe(struct platform_device *pdev)
master->cleanup = bfin_spi_cleanup;
master->setup = bfin_spi_setup;
master->transfer_one_message = bfin_spi_transfer_one_message;
- master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
drv_data = spi_master_get_devdata(master);
drv_data->master = master;
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index f0f195af75d4..55e57c3eb9bd 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -350,7 +350,6 @@ static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data)
static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data)
{
struct bfin_spi_slave_data *chip = drv_data->cur_chip;
- struct spi_transfer *last_transfer;
unsigned long flags;
struct spi_message *msg;
@@ -362,9 +361,6 @@ static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data)
queue_work(drv_data->workqueue, &drv_data->pump_messages);
spin_unlock_irqrestore(&drv_data->lock, flags);
- last_transfer = list_entry(msg->transfers.prev,
- struct spi_transfer, transfer_list);
-
msg->state = NULL;
if (!drv_data->cs_change)
@@ -1030,10 +1026,6 @@ static int bfin_spi_setup(struct spi_device *spi)
}
/* translate common spi framework into our register */
- if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) {
- dev_err(&spi->dev, "unsupported spi modes detected\n");
- goto error;
- }
if (spi->mode & SPI_CPOL)
chip->ctl_reg |= BIT_CTL_CPOL;
if (spi->mode & SPI_CPHA)
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index bd222f6b677d..dc7d2c2d643e 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -16,7 +16,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
@@ -467,11 +466,9 @@ EXPORT_SYMBOL_GPL(spi_bitbang_start);
/**
* spi_bitbang_stop - stops the task providing spi communication
*/
-int spi_bitbang_stop(struct spi_bitbang *bitbang)
+void spi_bitbang_stop(struct spi_bitbang *bitbang)
{
spi_unregister_master(bitbang->master);
-
- return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_stop);
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index 8081f96bd1d5..ee4f91ccd8fd 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -309,7 +309,6 @@ done:
static void butterfly_detach(struct parport *p)
{
struct butterfly *pp;
- int status;
/* FIXME this global is ugly ... but, how to quickly get from
* the parport to the "struct butterfly" associated with it?
@@ -321,7 +320,7 @@ static void butterfly_detach(struct parport *p)
butterfly = NULL;
/* stop() unregisters child devices too */
- status = spi_bitbang_stop(&pp->bitbang);
+ spi_bitbang_stop(&pp->bitbang);
/* turn off VCC */
parport_write_data(pp->port, 0);
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 374ba4a48a9e..4cd62f636547 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -11,158 +11,125 @@
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/clps711x.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/spi-clps711x.h>
-#include <mach/hardware.h>
-
#define DRIVER_NAME "spi-clps711x"
-struct spi_clps711x_data {
- struct completion done;
+#define SYNCIO_FRMLEN(x) ((x) << 8)
+#define SYNCIO_TXFRMEN (1 << 14)
+struct spi_clps711x_data {
+ void __iomem *syncio;
+ struct regmap *syscon;
+ struct regmap *syscon1;
struct clk *spi_clk;
- u32 max_speed_hz;
u8 *tx_buf;
u8 *rx_buf;
- int count;
+ unsigned int bpw;
int len;
-
- int chipselect[0];
};
static int spi_clps711x_setup(struct spi_device *spi)
{
- struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master);
-
/* We are expect that SPI-device is not selected */
- gpio_direction_output(hw->chipselect[spi->chip_select],
- !(spi->mode & SPI_CS_HIGH));
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
return 0;
}
-static void spi_clps711x_setup_mode(struct spi_device *spi)
-{
- /* Setup edge for transfer */
- if (spi->mode & SPI_CPHA)
- clps_writew(clps_readw(SYSCON3) | SYSCON3_ADCCKNSEN, SYSCON3);
- else
- clps_writew(clps_readw(SYSCON3) & ~SYSCON3_ADCCKNSEN, SYSCON3);
-}
-
-static int spi_clps711x_setup_xfer(struct spi_device *spi,
- struct spi_transfer *xfer)
+static void spi_clps711x_setup_xfer(struct spi_device *spi,
+ struct spi_transfer *xfer)
{
- u32 speed = xfer->speed_hz ? : spi->max_speed_hz;
- u8 bpw = xfer->bits_per_word;
- struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master);
-
- if (bpw != 8) {
- dev_err(&spi->dev, "Unsupported master bus width %i\n", bpw);
- return -EINVAL;
- }
+ struct spi_master *master = spi->master;
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
/* Setup SPI frequency divider */
- if (!speed || (speed >= hw->max_speed_hz))
- clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
- SYSCON1_ADCKSEL(3), SYSCON1);
- else if (speed >= (hw->max_speed_hz / 2))
- clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
- SYSCON1_ADCKSEL(2), SYSCON1);
- else if (speed >= (hw->max_speed_hz / 8))
- clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
- SYSCON1_ADCKSEL(1), SYSCON1);
+ if (xfer->speed_hz >= master->max_speed_hz)
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(3));
+ else if (xfer->speed_hz >= (master->max_speed_hz / 2))
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(2));
+ else if (xfer->speed_hz >= (master->max_speed_hz / 8))
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(1));
else
- clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
- SYSCON1_ADCKSEL(0), SYSCON1);
-
- return 0;
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(0));
}
-static int spi_clps711x_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
+static int spi_clps711x_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
struct spi_clps711x_data *hw = spi_master_get_devdata(master);
- struct spi_transfer *xfer;
- int status = 0, cs = hw->chipselect[msg->spi->chip_select];
- u32 data;
-
- spi_clps711x_setup_mode(msg->spi);
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (spi_clps711x_setup_xfer(msg->spi, xfer)) {
- status = -EINVAL;
- goto out_xfr;
- }
+ struct spi_device *spi = msg->spi;
- gpio_set_value(cs, !!(msg->spi->mode & SPI_CS_HIGH));
-
- reinit_completion(&hw->done);
-
- hw->count = 0;
- hw->len = xfer->len;
- hw->tx_buf = (u8 *)xfer->tx_buf;
- hw->rx_buf = (u8 *)xfer->rx_buf;
-
- /* Initiate transfer */
- data = hw->tx_buf ? hw->tx_buf[hw->count] : 0;
- clps_writel(data | SYNCIO_FRMLEN(8) | SYNCIO_TXFRMEN, SYNCIO);
-
- wait_for_completion(&hw->done);
+ /* Setup mode for transfer */
+ return regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCKNSEN,
+ (spi->mode & SPI_CPHA) ?
+ SYSCON3_ADCCKNSEN : 0);
+}
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
+static int spi_clps711x_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ u8 data;
- if (xfer->cs_change ||
- list_is_last(&xfer->transfer_list, &msg->transfers))
- gpio_set_value(cs, !(msg->spi->mode & SPI_CS_HIGH));
+ spi_clps711x_setup_xfer(spi, xfer);
- msg->actual_length += xfer->len;
- }
+ hw->len = xfer->len;
+ hw->bpw = xfer->bits_per_word;
+ hw->tx_buf = (u8 *)xfer->tx_buf;
+ hw->rx_buf = (u8 *)xfer->rx_buf;
-out_xfr:
- msg->status = status;
- spi_finalize_current_message(master);
+ /* Initiate transfer */
+ data = hw->tx_buf ? *hw->tx_buf++ : 0;
+ writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN, hw->syncio);
- return 0;
+ return 1;
}
static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
{
- struct spi_clps711x_data *hw = (struct spi_clps711x_data *)dev_id;
- u32 data;
+ struct spi_master *master = dev_id;
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ u8 data;
/* Handle RX */
- data = clps_readb(SYNCIO);
+ data = readb(hw->syncio);
if (hw->rx_buf)
- hw->rx_buf[hw->count] = (u8)data;
-
- hw->count++;
+ *hw->rx_buf++ = data;
/* Handle TX */
- if (hw->count < hw->len) {
- data = hw->tx_buf ? hw->tx_buf[hw->count] : 0;
- clps_writel(data | SYNCIO_FRMLEN(8) | SYNCIO_TXFRMEN, SYNCIO);
+ if (--hw->len > 0) {
+ data = hw->tx_buf ? *hw->tx_buf++ : 0;
+ writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN,
+ hw->syncio);
} else
- complete(&hw->done);
+ spi_finalize_current_transfer(master);
return IRQ_HANDLED;
}
static int spi_clps711x_probe(struct platform_device *pdev)
{
- int i, ret;
- struct spi_master *master;
struct spi_clps711x_data *hw;
struct spi_clps711x_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct spi_master *master;
+ struct resource *res;
+ int i, irq, ret;
if (!pdata) {
dev_err(&pdev->dev, "No platform data supplied\n");
@@ -174,33 +141,37 @@ static int spi_clps711x_probe(struct platform_device *pdev)
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev,
- sizeof(struct spi_clps711x_data) +
- sizeof(int) * pdata->num_chipselect);
- if (!master) {
- dev_err(&pdev->dev, "SPI allocating memory error\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*hw));
+ if (!master)
return -ENOMEM;
+
+ master->cs_gpios = devm_kzalloc(&pdev->dev, sizeof(int) *
+ pdata->num_chipselect, GFP_KERNEL);
+ if (!master->cs_gpios) {
+ ret = -ENOMEM;
+ goto err_out;
}
master->bus_num = pdev->id;
master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
master->num_chipselect = pdata->num_chipselect;
master->setup = spi_clps711x_setup;
- master->transfer_one_message = spi_clps711x_transfer_one_message;
+ master->prepare_message = spi_clps711x_prepare_message;
+ master->transfer_one = spi_clps711x_transfer_one;
hw = spi_master_get_devdata(master);
for (i = 0; i < master->num_chipselect; i++) {
- hw->chipselect[i] = pdata->chipselect[i];
- if (!gpio_is_valid(hw->chipselect[i])) {
- dev_err(&pdev->dev, "Invalid CS GPIO %i\n", i);
- ret = -EINVAL;
- goto err_out;
- }
- if (devm_gpio_request(&pdev->dev, hw->chipselect[i], NULL)) {
+ master->cs_gpios[i] = pdata->chipselect[i];
+ ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
+ DRIVER_NAME);
+ if (ret) {
dev_err(&pdev->dev, "Can't get CS GPIO %i\n", i);
- ret = -EINVAL;
goto err_out;
}
}
@@ -211,29 +182,45 @@ static int spi_clps711x_probe(struct platform_device *pdev)
ret = PTR_ERR(hw->spi_clk);
goto err_out;
}
- hw->max_speed_hz = clk_get_rate(hw->spi_clk);
+ master->max_speed_hz = clk_get_rate(hw->spi_clk);
- init_completion(&hw->done);
platform_set_drvdata(pdev, master);
+ hw->syscon = syscon_regmap_lookup_by_pdevname("syscon.3");
+ if (IS_ERR(hw->syscon)) {
+ ret = PTR_ERR(hw->syscon);
+ goto err_out;
+ }
+
+ hw->syscon1 = syscon_regmap_lookup_by_pdevname("syscon.1");
+ if (IS_ERR(hw->syscon1)) {
+ ret = PTR_ERR(hw->syscon1);
+ goto err_out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->syncio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->syncio)) {
+ ret = PTR_ERR(hw->syncio);
+ goto err_out;
+ }
+
/* Disable extended mode due hardware problems */
- clps_writew(clps_readw(SYSCON3) & ~SYSCON3_ADCCON, SYSCON3);
+ regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCON, 0);
/* Clear possible pending interrupt */
- clps_readl(SYNCIO);
+ readl(hw->syncio);
- ret = devm_request_irq(&pdev->dev, IRQ_SSEOTI, spi_clps711x_isr, 0,
- dev_name(&pdev->dev), hw);
- if (ret) {
- dev_err(&pdev->dev, "Can't request IRQ\n");
+ ret = devm_request_irq(&pdev->dev, irq, spi_clps711x_isr, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
goto err_out;
- }
ret = devm_spi_register_master(&pdev->dev, master);
if (!ret) {
dev_info(&pdev->dev,
"SPI bus driver initialized. Master clock %u Hz\n",
- hw->max_speed_hz);
+ master->max_speed_hz);
return 0;
}
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 28ae470397a9..e2fa628e55e7 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -77,8 +77,6 @@ struct mcfqspi {
struct mcfqspi_cs_control *cs_control;
wait_queue_head_t waitq;
-
- struct device *dev;
};
static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val)
@@ -135,13 +133,13 @@ static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select,
static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi)
{
- return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ?
+ return (mcfqspi->cs_control->setup) ?
mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0;
}
static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi)
{
- if (mcfqspi->cs_control && mcfqspi->cs_control->teardown)
+ if (mcfqspi->cs_control->teardown)
mcfqspi->cs_control->teardown(mcfqspi->cs_control);
}
@@ -300,68 +298,45 @@ static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
}
}
-static int mcfqspi_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
+static void mcfqspi_set_cs(struct spi_device *spi, bool enable)
{
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- struct spi_device *spi = msg->spi;
- struct spi_transfer *t;
- int status = 0;
-
- list_for_each_entry(t, &msg->transfers, transfer_list) {
- bool cs_high = spi->mode & SPI_CS_HIGH;
- u16 qmr = MCFQSPI_QMR_MSTR;
-
- qmr |= t->bits_per_word << 10;
- if (spi->mode & SPI_CPHA)
- qmr |= MCFQSPI_QMR_CPHA;
- if (spi->mode & SPI_CPOL)
- qmr |= MCFQSPI_QMR_CPOL;
- if (t->speed_hz)
- qmr |= mcfqspi_qmr_baud(t->speed_hz);
- else
- qmr |= mcfqspi_qmr_baud(spi->max_speed_hz);
- mcfqspi_wr_qmr(mcfqspi, qmr);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(spi->master);
+ bool cs_high = spi->mode & SPI_CS_HIGH;
+ if (enable)
mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
+ else
+ mcfqspi_cs_deselect(mcfqspi, spi->chip_select, cs_high);
+}
- mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
- if (t->bits_per_word == 8)
- mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf,
- t->rx_buf);
- else
- mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
- t->rx_buf);
- mcfqspi_wr_qir(mcfqspi, 0);
-
- if (t->delay_usecs)
- udelay(t->delay_usecs);
- if (t->cs_change) {
- if (!list_is_last(&t->transfer_list, &msg->transfers))
- mcfqspi_cs_deselect(mcfqspi, spi->chip_select,
- cs_high);
- } else {
- if (list_is_last(&t->transfer_list, &msg->transfers))
- mcfqspi_cs_deselect(mcfqspi, spi->chip_select,
- cs_high);
- }
- msg->actual_length += t->len;
- }
- msg->status = status;
- spi_finalize_current_message(master);
-
- return status;
+static int mcfqspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ u16 qmr = MCFQSPI_QMR_MSTR;
+
+ qmr |= t->bits_per_word << 10;
+ if (spi->mode & SPI_CPHA)
+ qmr |= MCFQSPI_QMR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ qmr |= MCFQSPI_QMR_CPOL;
+ qmr |= mcfqspi_qmr_baud(t->speed_hz);
+ mcfqspi_wr_qmr(mcfqspi, qmr);
+
+ mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
+ if (t->bits_per_word == 8)
+ mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf, t->rx_buf);
+ else
+ mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
+ t->rx_buf);
+ mcfqspi_wr_qir(mcfqspi, 0);
+ return 0;
}
static int mcfqspi_setup(struct spi_device *spi)
{
- if (spi->chip_select >= spi->master->num_chipselect) {
- dev_dbg(&spi->dev, "%d chip select is out of range\n",
- spi->chip_select);
- return -EINVAL;
- }
-
mcfqspi_cs_deselect(spi_master_get_devdata(spi->master),
spi->chip_select, spi->mode & SPI_CS_HIGH);
@@ -388,6 +363,11 @@ static int mcfqspi_probe(struct platform_device *pdev)
return -ENOENT;
}
+ if (!pdata->cs_control) {
+ dev_dbg(&pdev->dev, "pdata->cs_control is NULL\n");
+ return -EINVAL;
+ }
+
master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi));
if (master == NULL) {
dev_dbg(&pdev->dev, "spi_alloc_master failed\n");
@@ -436,12 +416,12 @@ static int mcfqspi_probe(struct platform_device *pdev)
}
init_waitqueue_head(&mcfqspi->waitq);
- mcfqspi->dev = &pdev->dev;
master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
master->setup = mcfqspi_setup;
- master->transfer_one_message = mcfqspi_transfer_one_message;
+ master->set_cs = mcfqspi_set_cs;
+ master->transfer_one = mcfqspi_transfer_one;
master->auto_runtime_pm = true;
platform_set_drvdata(pdev, master);
@@ -451,7 +431,7 @@ static int mcfqspi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "spi_register_master failed\n");
goto fail2;
}
- pm_runtime_enable(mcfqspi->dev);
+ pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
@@ -473,9 +453,8 @@ static int mcfqspi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pm_runtime_disable(mcfqspi->dev);
+ pm_runtime_disable(&pdev->dev);
/* disable the hardware (set the baud rate to 0) */
mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
@@ -490,8 +469,11 @@ static int mcfqspi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ int ret;
- spi_master_suspend(master);
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
clk_disable(mcfqspi->clk);
@@ -503,11 +485,9 @@ static int mcfqspi_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- spi_master_resume(master);
-
clk_enable(mcfqspi->clk);
- return 0;
+ return spi_master_resume(master);
}
#endif
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 5e7389faa2a0..50f750989258 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -802,8 +802,7 @@ static int spi_davinci_get_pdata(struct platform_device *pdev,
pdata = &dspi->pdata;
pdata->version = SPI_VERSION_1;
- match = of_match_device(of_match_ptr(davinci_spi_of_match),
- &pdev->dev);
+ match = of_match_device(davinci_spi_of_match, &pdev->dev);
if (!match)
return -ENODEV;
@@ -824,7 +823,6 @@ static int spi_davinci_get_pdata(struct platform_device *pdev,
return 0;
}
#else
-#define davinci_spi_of_match NULL
static struct davinci_spi_platform_data
*spi_davinci_get_pdata(struct platform_device *pdev,
struct davinci_spi *dspi)
@@ -864,10 +862,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
dspi = spi_master_get_devdata(master);
- if (dspi == NULL) {
- ret = -ENOENT;
- goto free_master;
- }
if (dev_get_platdata(&pdev->dev)) {
pdata = dev_get_platdata(&pdev->dev);
@@ -908,10 +902,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_master;
dspi->bitbang.master = master;
- if (dspi->bitbang.master == NULL) {
- ret = -ENODEV;
- goto free_master;
- }
dspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dspi->clk)) {
@@ -1040,7 +1030,7 @@ static struct platform_driver davinci_spi_driver = {
.driver = {
.name = "spi_davinci",
.owner = THIS_MODULE,
- .of_match_table = davinci_spi_of_match,
+ .of_match_table = of_match_ptr(davinci_spi_of_match),
},
.probe = davinci_spi_probe,
.remove = davinci_spi_remove,
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 9af56cdf1540..1492f5ee9aaa 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -66,7 +66,7 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
if (ret)
return ret;
- dws->bus_num = 0;
+ dws->bus_num = pdev->id;
dws->num_cs = 4;
dws->max_freq = clk_get_rate(dwsmmio->clk);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index bf98d63d92b3..712ac5629cd4 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -276,8 +276,7 @@ static void giveback(struct dw_spi *dws)
queue_work(dws->workqueue, &dws->pump_messages);
spin_unlock_irqrestore(&dws->lock, flags);
- last_transfer = list_entry(msg->transfers.prev,
- struct spi_transfer,
+ last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
transfer_list);
if (!last_transfer->cs_change && dws->cs_control)
@@ -439,12 +438,6 @@ static void pump_transfers(unsigned long data)
if (transfer->speed_hz != speed) {
speed = transfer->speed_hz;
- if (speed > dws->max_freq) {
- printk(KERN_ERR "MRST SPI0: unsupported"
- "freq: %dHz\n", speed);
- message->status = -EIO;
- goto early_exit;
- }
/* clk_div doesn't support odd number */
clk_div = dws->max_freq / speed;
@@ -671,12 +664,6 @@ static int dw_spi_setup(struct spi_device *spi)
return 0;
}
-static void dw_spi_cleanup(struct spi_device *spi)
-{
- struct chip_data *chip = spi_get_ctldata(spi);
- kfree(chip);
-}
-
static int init_queue(struct dw_spi *dws)
{
INIT_LIST_HEAD(&dws->queue);
@@ -806,9 +793,9 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->bus_num = dws->bus_num;
master->num_chipselect = dws->num_cs;
- master->cleanup = dw_spi_cleanup;
master->setup = dw_spi_setup;
master->transfer = dw_spi_transfer;
+ master->max_speed_hz = dws->max_freq;
/* Basic HW init */
spi_hw_init(dws);
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
index d4d3cc534792..be44a3eeb5e8 100644
--- a/drivers/spi/spi-efm32.c
+++ b/drivers/spi/spi-efm32.c
@@ -198,7 +198,7 @@ static int efm32_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
efm32_spi_filltx(ddata);
- init_completion(&ddata->done);
+ reinit_completion(&ddata->done);
efm32_spi_write32(ddata, REG_IF_TXBL | REG_IF_RXDATAV, REG_IEN);
@@ -287,17 +287,17 @@ static u32 efm32_spi_get_configured_location(struct efm32_spi_ddata *ddata)
return (reg & REG_ROUTE_LOCATION__MASK) >> __ffs(REG_ROUTE_LOCATION__MASK);
}
-static int efm32_spi_probe_dt(struct platform_device *pdev,
+static void efm32_spi_probe_dt(struct platform_device *pdev,
struct spi_master *master, struct efm32_spi_ddata *ddata)
{
struct device_node *np = pdev->dev.of_node;
u32 location;
int ret;
- if (!np)
- return 1;
-
- ret = of_property_read_u32(np, "location", &location);
+ ret = of_property_read_u32(np, "efm32,location", &location);
+ if (ret)
+ /* fall back to old and (wrongly) generic property "location" */
+ ret = of_property_read_u32(np, "location", &location);
if (!ret) {
dev_dbg(&pdev->dev, "using location %u\n", location);
} else {
@@ -308,11 +308,6 @@ static int efm32_spi_probe_dt(struct platform_device *pdev,
}
ddata->pdata.location = location;
-
- /* spi core takes care about the bus number using an alias */
- master->bus_num = -1;
-
- return 0;
}
static int efm32_spi_probe(struct platform_device *pdev)
@@ -322,9 +317,14 @@ static int efm32_spi_probe(struct platform_device *pdev)
int ret;
struct spi_master *master;
struct device_node *np = pdev->dev.of_node;
- unsigned int num_cs, i;
+ int num_cs, i;
+
+ if (!np)
+ return -EINVAL;
num_cs = of_gpio_named_count(np, "cs-gpios");
+ if (num_cs < 0)
+ return num_cs;
master = spi_alloc_master(&pdev->dev,
sizeof(*ddata) + num_cs * sizeof(unsigned));
@@ -349,6 +349,7 @@ static int efm32_spi_probe(struct platform_device *pdev)
ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs;
spin_lock_init(&ddata->lock);
+ init_completion(&ddata->done);
ddata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ddata->clk)) {
@@ -415,23 +416,7 @@ static int efm32_spi_probe(struct platform_device *pdev)
goto err;
}
- ret = efm32_spi_probe_dt(pdev, master, ddata);
- if (ret > 0) {
- /* not created by device tree */
- const struct efm32_spi_pdata *pdata =
- dev_get_platdata(&pdev->dev);
-
- if (pdata)
- ddata->pdata = *pdata;
- else
- ddata->pdata.location =
- efm32_spi_get_configured_location(ddata);
-
- master->bus_num = pdev->id;
-
- } else if (ret < 0) {
- goto err_disable_clk;
- }
+ efm32_spi_probe_dt(pdev, master, ddata);
efm32_spi_write32(ddata, 0, REG_IEN);
efm32_spi_write32(ddata, REG_ROUTE_TXPEN | REG_ROUTE_RXPEN |
@@ -487,6 +472,9 @@ static int efm32_spi_remove(struct platform_device *pdev)
static const struct of_device_id efm32_spi_dt_ids[] = {
{
+ .compatible = "energymicro,efm32-spi",
+ }, {
+ /* doesn't follow the "vendor,device" scheme, don't use */
.compatible = "efm32,spi",
}, {
/* sentinel */
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 1bfaed6e4073..2f675d32df0e 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -73,8 +73,6 @@
* @clk: clock for the controller
* @regs_base: pointer to ioremap()'d registers
* @sspdr_phys: physical address of the SSPDR register
- * @min_rate: minimum clock rate (in Hz) supported by the controller
- * @max_rate: maximum clock rate (in Hz) supported by the controller
* @wait: wait here until given transfer is completed
* @current_msg: message that is currently processed (or %NULL if none)
* @tx: current byte in transfer to transmit
@@ -95,8 +93,6 @@ struct ep93xx_spi {
struct clk *clk;
void __iomem *regs_base;
unsigned long sspdr_phys;
- unsigned long min_rate;
- unsigned long max_rate;
struct completion wait;
struct spi_message *current_msg;
size_t tx;
@@ -199,9 +195,9 @@ static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
* @div_scr: pointer to return the scr divider
*/
static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
- unsigned long rate,
- u8 *div_cpsr, u8 *div_scr)
+ u32 rate, u8 *div_cpsr, u8 *div_scr)
{
+ struct spi_master *master = platform_get_drvdata(espi->pdev);
unsigned long spi_clk_rate = clk_get_rate(espi->clk);
int cpsr, scr;
@@ -210,7 +206,7 @@ static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
* controller. Note that minimum value is already checked in
* ep93xx_spi_transfer_one_message().
*/
- rate = clamp(rate, espi->min_rate, espi->max_rate);
+ rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
/*
* Calculate divisors so that we can get speed according the
@@ -735,13 +731,6 @@ static int ep93xx_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct ep93xx_spi *espi = spi_master_get_devdata(master);
- struct spi_transfer *t;
-
- /* first validate each transfer */
- list_for_each_entry(t, &msg->transfers, transfer_list) {
- if (t->speed_hz < espi->min_rate)
- return -EINVAL;
- }
msg->state = NULL;
msg->status = 0;
@@ -917,8 +906,8 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
* Calculate maximum and minimum supported clock rates
* for the controller.
*/
- espi->max_rate = clk_get_rate(espi->clk) / 2;
- espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
+ master->max_speed_hz = clk_get_rate(espi->clk) / 2;
+ master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
espi->pdev = pdev;
espi->sspdr_phys = res->start + SSPDR;
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index dd5bd468e962..09965f069a1c 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -312,9 +312,6 @@ static int falcon_sflash_setup(struct spi_device *spi)
unsigned int i;
unsigned long flags;
- if (spi->chip_select > 0)
- return -ENODEV;
-
spin_lock_irqsave(&ebu_lock, flags);
if (spi->max_speed_hz >= CLOCK_100M) {
@@ -422,9 +419,7 @@ static int falcon_sflash_probe(struct platform_device *pdev)
priv->master = master;
master->mode_bits = SPI_MODE_3;
- master->num_chipselect = 1;
master->flags = SPI_MASTER_HALF_DUPLEX;
- master->bus_num = -1;
master->setup = falcon_sflash_setup;
master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
master->transfer_one_message = falcon_sflash_xfer_one;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index a25392065d9b..d565eeee3bd8 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -108,11 +109,11 @@ struct fsl_dspi {
struct spi_bitbang bitbang;
struct platform_device *pdev;
- void __iomem *base;
+ struct regmap *regmap;
int irq;
- struct clk *clk;
+ struct clk *clk;
- struct spi_transfer *cur_transfer;
+ struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
size_t len;
void *tx;
@@ -123,24 +124,17 @@ struct fsl_dspi {
u8 cs;
u16 void_write_data;
- wait_queue_head_t waitq;
- u32 waitflags;
+ wait_queue_head_t waitq;
+ u32 waitflags;
};
static inline int is_double_byte_mode(struct fsl_dspi *dspi)
{
- return ((readl(dspi->base + SPI_CTAR(dspi->cs)) & SPI_FRAME_BITS_MASK)
- == SPI_FRAME_BITS(8)) ? 0 : 1;
-}
+ unsigned int val;
-static void set_bit_mode(struct fsl_dspi *dspi, unsigned char bits)
-{
- u32 temp;
+ regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val);
- temp = readl(dspi->base + SPI_CTAR(dspi->cs));
- temp &= ~SPI_FRAME_BITS_MASK;
- temp |= SPI_FRAME_BITS(bits);
- writel(temp, dspi->base + SPI_CTAR(dspi->cs));
+ return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
}
static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
@@ -188,7 +182,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
*/
if (tx_word && (dspi->len == 1)) {
dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
- set_bit_mode(dspi, 8);
+ regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
tx_word = 0;
}
@@ -238,7 +233,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
dspi_pushr |= SPI_PUSHR_CTCNT; /* clear counter */
}
- writel(dspi_pushr, dspi->base + SPI_PUSHR);
+ regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
+
tx_count++;
}
@@ -253,17 +249,23 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
while ((dspi->rx < dspi->rx_end)
&& (rx_count < DSPI_FIFO_SIZE)) {
if (rx_word) {
+ unsigned int val;
+
if ((dspi->rx_end - dspi->rx) == 1)
break;
- d = SPI_POPR_RXDATA(readl(dspi->base + SPI_POPR));
+ regmap_read(dspi->regmap, SPI_POPR, &val);
+ d = SPI_POPR_RXDATA(val);
if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
*(u16 *)dspi->rx = d;
dspi->rx += 2;
} else {
- d = SPI_POPR_RXDATA(readl(dspi->base + SPI_POPR));
+ unsigned int val;
+
+ regmap_read(dspi->regmap, SPI_POPR, &val);
+ d = SPI_POPR_RXDATA(val);
if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
*(u8 *)dspi->rx = d;
dspi->rx++;
@@ -295,13 +297,13 @@ static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t)
if (!dspi->tx)
dspi->dataflags |= TRAN_STATE_TX_VOID;
- writel(dspi->cur_chip->mcr_val, dspi->base + SPI_MCR);
- writel(dspi->cur_chip->ctar_val, dspi->base + SPI_CTAR(dspi->cs));
- writel(SPI_RSER_EOQFE, dspi->base + SPI_RSER);
+ regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
+ regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val);
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
if (t->speed_hz)
- writel(dspi->cur_chip->ctar_val,
- dspi->base + SPI_CTAR(dspi->cs));
+ regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
+ dspi->cur_chip->ctar_val);
dspi_transfer_write(dspi);
@@ -315,7 +317,9 @@ static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t)
static void dspi_chipselect(struct spi_device *spi, int value)
{
struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
- u32 pushr = readl(dspi->base + SPI_PUSHR);
+ unsigned int pushr;
+
+ regmap_read(dspi->regmap, SPI_PUSHR, &pushr);
switch (value) {
case BITBANG_CS_ACTIVE:
@@ -326,7 +330,7 @@ static void dspi_chipselect(struct spi_device *spi, int value)
break;
}
- writel(pushr, dspi->base + SPI_PUSHR);
+ regmap_write(dspi->regmap, SPI_PUSHR, pushr);
}
static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
@@ -338,7 +342,8 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (chip == NULL) {
- chip = kcalloc(1, sizeof(struct chip_data), GFP_KERNEL);
+ chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
+ GFP_KERNEL);
if (!chip)
return -ENOMEM;
}
@@ -349,7 +354,6 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
fmsz = spi->bits_per_word - 1;
} else {
pr_err("Invalid wordsize\n");
- kfree(chip);
return -ENODEV;
}
@@ -382,13 +386,15 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
{
struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
- writel(SPI_SR_EOQF, dspi->base + SPI_SR);
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
dspi_transfer_read(dspi);
if (!dspi->len) {
if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
- set_bit_mode(dspi, 16);
+ regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
+
dspi->waitflags = 1;
wake_up_interruptible(&dspi->waitq);
} else {
@@ -430,8 +436,13 @@ static int dspi_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static const struct dev_pm_ops dspi_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(dspi_suspend, dspi_resume)
+static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+
+static struct regmap_config dspi_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x88,
};
static int dspi_probe(struct platform_device *pdev)
@@ -440,6 +451,7 @@ static int dspi_probe(struct platform_device *pdev)
struct spi_master *master;
struct fsl_dspi *dspi;
struct resource *res;
+ void __iomem *base;
int ret = 0, cs_num, bus_num;
master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
@@ -474,12 +486,24 @@ static int dspi_probe(struct platform_device *pdev)
master->bus_num = bus_num;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dspi->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dspi->base)) {
- ret = PTR_ERR(dspi->base);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
goto out_master_put;
}
+ dspi_regmap_config.lock_arg = dspi;
+ dspi_regmap_config.val_format_endian =
+ of_property_read_bool(np, "big-endian")
+ ? REGMAP_ENDIAN_BIG : REGMAP_ENDIAN_DEFAULT;
+ dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dspi", base,
+ &dspi_regmap_config);
+ if (IS_ERR(dspi->regmap)) {
+ dev_err(&pdev->dev, "failed to init regmap: %ld\n",
+ PTR_ERR(dspi->regmap));
+ return PTR_ERR(dspi->regmap);
+ }
+
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq < 0) {
dev_err(&pdev->dev, "can't get platform irq\n");
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 428dc7a6b62e..e767f5831b9c 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -219,13 +219,8 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
unsigned int len = t->len;
- u8 bits_per_word;
int ret;
- bits_per_word = spi->bits_per_word;
- if (t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
mpc8xxx_spi->len = t->len;
len = roundup(len, 4) / 4;
@@ -446,7 +441,8 @@ static void fsl_espi_do_one_msg(struct spi_message *m)
m->actual_length = espi_trans.actual_length;
m->status = espi_trans.status;
- m->complete(m->context);
+ if (m->complete)
+ m->complete(m->context);
}
static int fsl_espi_setup(struct spi_device *spi)
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 0b75f26158ab..e5d45fca3551 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -200,7 +200,7 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
const void *prop;
int ret = -ENOMEM;
- pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(&ofdev->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
@@ -215,15 +215,13 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
pdata->sysclk = get_brgfreq();
if (pdata->sysclk == -1) {
pdata->sysclk = fsl_get_sys_freq();
- if (pdata->sysclk == -1) {
- ret = -ENODEV;
- goto err;
- }
+ if (pdata->sysclk == -1)
+ return -ENODEV;
}
#else
ret = of_property_read_u32(np, "clock-frequency", &pdata->sysclk);
if (ret)
- goto err;
+ return ret;
#endif
prop = of_get_property(np, "mode", NULL);
@@ -237,8 +235,4 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
pdata->flags = SPI_CPM_MODE | SPI_CPM1;
return 0;
-
-err:
- kfree(pinfo);
- return ret;
}
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 119f7af94537..b3e7775034db 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -239,12 +239,6 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
if (!bits_per_word)
bits_per_word = spi->bits_per_word;
- /* Make sure its a bit width we support [4..16, 32] */
- if ((bits_per_word < 4)
- || ((bits_per_word > 16) && (bits_per_word != 32))
- || (bits_per_word > mpc8xxx_spi->max_bits_per_word))
- return -EINVAL;
-
if (!hz)
hz = spi->max_speed_hz;
@@ -362,18 +356,28 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
static void fsl_spi_do_one_msg(struct spi_message *m)
{
struct spi_device *spi = m->spi;
- struct spi_transfer *t;
+ struct spi_transfer *t, *first;
unsigned int cs_change;
const int nsecs = 50;
int status;
- cs_change = 1;
- status = 0;
+ /* Don't allow changes if CS is active */
+ first = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->bits_per_word || t->speed_hz) {
- /* Don't allow changes if CS is active */
+ if ((first->bits_per_word != t->bits_per_word) ||
+ (first->speed_hz != t->speed_hz)) {
status = -EINVAL;
+ dev_err(&spi->dev,
+ "bits_per_word/speed_hz should be same for the same SPI transfer\n");
+ return;
+ }
+ }
+ cs_change = 1;
+ status = -EINVAL;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
if (cs_change)
status = fsl_spi_setup_transfer(spi, t);
if (status < 0)
@@ -404,7 +408,8 @@ static void fsl_spi_do_one_msg(struct spi_message *m)
}
m->status = status;
- m->complete(m->context);
+ if (m->complete)
+ m->complete(m->context);
if (status || !cs_change) {
ndelay(nsecs);
@@ -641,6 +646,10 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
if (mpc8xxx_spi->type == TYPE_GRLIB)
fsl_spi_grlib_probe(dev);
+ master->bits_per_word_mask =
+ (SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32)) &
+ SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
+
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
mpc8xxx_spi->set_shifts = fsl_spi_qe_cpu_set_shifts;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 7beeb29472ac..09823076df88 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -19,7 +19,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/of.h>
@@ -250,7 +249,7 @@ static int spi_gpio_setup(struct spi_device *spi)
/*
* ... otherwise, take it from spi->controller_data
*/
- cs = (unsigned int) spi->controller_data;
+ cs = (unsigned int)(uintptr_t) spi->controller_data;
}
if (!spi->controller_state) {
@@ -503,13 +502,12 @@ static int spi_gpio_remove(struct platform_device *pdev)
{
struct spi_gpio *spi_gpio;
struct spi_gpio_platform_data *pdata;
- int status;
spi_gpio = platform_get_drvdata(pdev);
pdata = dev_get_platdata(&pdev->dev);
/* stop() unregisters child devices too */
- status = spi_bitbang_stop(&spi_gpio->bitbang);
+ spi_bitbang_stop(&spi_gpio->bitbang);
if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
gpio_free(SPI_MISO_GPIO);
@@ -518,7 +516,7 @@ static int spi_gpio_remove(struct platform_device *pdev)
gpio_free(SPI_SCK_GPIO);
spi_master_put(spi_gpio->bitbang.master);
- return status;
+ return 0;
}
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 47f15d97e7fa..5daff2054ae4 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -741,7 +740,7 @@ static int spi_imx_transfer(struct spi_device *spi,
spi_imx->count = transfer->len;
spi_imx->txfifo = 0;
- init_completion(&spi_imx->xfer_done);
+ reinit_completion(&spi_imx->xfer_done);
spi_imx_push(spi_imx);
@@ -880,12 +879,12 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->irq = platform_get_irq(pdev, 0);
if (spi_imx->irq < 0) {
- ret = -EINVAL;
+ ret = spi_imx->irq;
goto out_master_put;
}
ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0,
- DRIVER_NAME, spi_imx);
+ dev_name(&pdev->dev), spi_imx);
if (ret) {
dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
goto out_master_put;
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 5032141eeeec..577d23a12763 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
@@ -301,7 +300,8 @@ static int mpc512x_psc_spi_msg_xfer(struct spi_master *master,
}
m->status = status;
- m->complete(m->context);
+ if (m->complete)
+ m->complete(m->context);
if (status || !cs_change)
mpc512x_psc_spi_deactivate_cs(spi);
@@ -466,10 +466,8 @@ static void mpc512x_spi_cs_control(struct spi_device *spi, bool onoff)
gpio_set_value(spi->cs_gpio, onoff);
}
-/* bus_num is used only for the case dev->platform_data == NULL */
static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
- u32 size, unsigned int irq,
- s16 bus_num)
+ u32 size, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc512x_psc_spi *mps;
@@ -488,7 +486,6 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
if (pdata == NULL) {
mps->cs_control = mpc512x_spi_cs_control;
- master->bus_num = bus_num;
} else {
mps->cs_control = pdata->cs_control;
master->bus_num = pdata->bus_num;
@@ -574,7 +571,6 @@ static int mpc512x_psc_spi_of_probe(struct platform_device *op)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
- s16 id = -1;
regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL);
if (!regaddr_p) {
@@ -583,16 +579,8 @@ static int mpc512x_psc_spi_of_probe(struct platform_device *op)
}
regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
- /* get PSC id (0..11, used by port_config) */
- id = of_alias_get_id(op->dev.of_node, "spi");
- if (id < 0) {
- dev_err(&op->dev, "no alias id for %s\n",
- op->dev.of_node->full_name);
- return id;
- }
-
return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64,
- irq_of_parse_and_map(op->dev.of_node, 0), id);
+ irq_of_parse_and_map(op->dev.of_node, 0));
}
static int mpc512x_psc_spi_of_remove(struct platform_device *op)
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index 00ba910ab302..de532aa11d34 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -248,7 +247,8 @@ static void mpc52xx_psc_spi_work(struct work_struct *work)
}
m->status = status;
- m->complete(m->context);
+ if (m->complete)
+ m->complete(m->context);
if (status || !cs_change)
mpc52xx_psc_spi_deactivate_cs(spi);
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 7c675fe83101..b07db4b62d80 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
@@ -235,7 +234,8 @@ static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
dev_err(&ms->master->dev, "mode fault\n");
mpc52xx_spi_chipsel(ms, 0);
ms->message->status = -EIO;
- ms->message->complete(ms->message->context);
+ if (ms->message->complete)
+ ms->message->complete(ms->message->context);
ms->state = mpc52xx_spi_fsmstate_idle;
return FSM_CONTINUE;
}
@@ -289,7 +289,8 @@ mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
ms->msg_count++;
mpc52xx_spi_chipsel(ms, 0);
ms->message->status = 0;
- ms->message->complete(ms->message->context);
+ if (ms->message->complete)
+ ms->message->complete(ms->message->context);
ms->state = mpc52xx_spi_fsmstate_idle;
return FSM_CONTINUE;
}
@@ -357,20 +358,6 @@ static void mpc52xx_spi_wq(struct work_struct *work)
* spi_master ops
*/
-static int mpc52xx_spi_setup(struct spi_device *spi)
-{
- if (spi->bits_per_word % 8)
- return -EINVAL;
-
- if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST))
- return -EINVAL;
-
- if (spi->chip_select >= spi->master->num_chipselect)
- return -EINVAL;
-
- return 0;
-}
-
static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
{
struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
@@ -433,9 +420,9 @@ static int mpc52xx_spi_probe(struct platform_device *op)
goto err_alloc;
}
- master->setup = mpc52xx_spi_setup;
master->transfer = mpc52xx_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = op->dev.of_node;
platform_set_drvdata(op, master);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 79e5aa2250c8..2884f0c2f5f0 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -29,7 +29,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -371,7 +370,7 @@ static int mxs_spi_transfer_one(struct spi_master *master,
{
struct mxs_spi *spi = spi_master_get_devdata(master);
struct mxs_ssp *ssp = &spi->ssp;
- struct spi_transfer *t, *tmp_t;
+ struct spi_transfer *t;
unsigned int flag;
int status = 0;
@@ -381,7 +380,7 @@ static int mxs_spi_transfer_one(struct spi_master *master,
writel(mxs_spi_cs_to_reg(m->spi->chip_select),
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
- list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
status = mxs_spi_setup_transfer(m->spi, t);
if (status)
@@ -473,7 +472,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_err = platform_get_irq(pdev, 0);
if (irq_err < 0)
- return -EINVAL;
+ return irq_err;
base = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(base))
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index bae97ffec4b9..16e30de650b0 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -9,7 +9,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
@@ -38,7 +37,9 @@
/* usi register bit */
#define ENINT (0x01 << 17)
#define ENFLG (0x01 << 16)
+#define SLEEP (0x0f << 12)
#define TXNUM (0x03 << 8)
+#define TXBITLEN (0x1f << 3)
#define TXNEG (0x01 << 2)
#define RXNEG (0x01 << 1)
#define LSB (0x01 << 10)
@@ -58,11 +59,8 @@ struct nuc900_spi {
unsigned char *rx;
struct clk *clk;
struct spi_master *master;
- struct spi_device *curdev;
- struct device *dev;
struct nuc900_spi_info *pdata;
spinlock_t lock;
- struct resource *res;
};
static inline struct nuc900_spi *to_hw(struct spi_device *sdev)
@@ -119,19 +117,16 @@ static void nuc900_spi_chipsel(struct spi_device *spi, int value)
}
}
-static void nuc900_spi_setup_txnum(struct nuc900_spi *hw,
- unsigned int txnum)
+static void nuc900_spi_setup_txnum(struct nuc900_spi *hw, unsigned int txnum)
{
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
- val = __raw_readl(hw->regs + USI_CNT);
+ val = __raw_readl(hw->regs + USI_CNT) & ~TXNUM;
- if (!txnum)
- val &= ~TXNUM;
- else
+ if (txnum)
val |= txnum << 0x08;
__raw_writel(val, hw->regs + USI_CNT);
@@ -148,7 +143,7 @@ static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw,
spin_lock_irqsave(&hw->lock, flags);
- val = __raw_readl(hw->regs + USI_CNT);
+ val = __raw_readl(hw->regs + USI_CNT) & ~TXBITLEN;
val |= (txbitlen << 0x03);
@@ -287,12 +282,11 @@ static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep)
spin_lock_irqsave(&hw->lock, flags);
- val = __raw_readl(hw->regs + USI_CNT);
+ val = __raw_readl(hw->regs + USI_CNT) & ~SLEEP;
if (sleep)
val |= (sleep << 12);
- else
- val &= ~(0x0f << 12);
+
__raw_writel(val, hw->regs + USI_CNT);
spin_unlock_irqrestore(&hw->lock, flags);
@@ -338,6 +332,7 @@ static int nuc900_spi_probe(struct platform_device *pdev)
{
struct nuc900_spi *hw;
struct spi_master *master;
+ struct resource *res;
int err = 0;
master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi));
@@ -349,7 +344,6 @@ static int nuc900_spi_probe(struct platform_device *pdev)
hw = spi_master_get_devdata(master);
hw->master = master;
hw->pdata = dev_get_platdata(&pdev->dev);
- hw->dev = &pdev->dev;
if (hw->pdata == NULL) {
dev_err(&pdev->dev, "No platform data supplied\n");
@@ -369,8 +363,8 @@ static int nuc900_spi_probe(struct platform_device *pdev)
hw->bitbang.chipselect = nuc900_spi_chipsel;
hw->bitbang.txrx_bufs = nuc900_spi_txrx;
- hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hw->regs = devm_ioremap_resource(&pdev->dev, hw->res);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hw->regs)) {
err = PTR_ERR(hw->regs);
goto err_pdata;
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index f7c896e2981e..8998d11c7238 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -15,7 +15,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -267,8 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
/* setup the state for the bitbang driver */
hw->bitbang.master = master;
- if (!hw->bitbang.master)
- return err;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
hw->bitbang.chipselect = tiny_spi_chipselect;
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index 67249a48b391..c5e2f718eebd 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -11,7 +11,6 @@
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -33,13 +32,6 @@ struct octeon_spi {
u64 cs_enax;
};
-struct octeon_spi_setup {
- u32 max_speed_hz;
- u8 chip_select;
- u8 mode;
- u8 bits_per_word;
-};
-
static void octeon_spi_wait_ready(struct octeon_spi *p)
{
union cvmx_mpi_sts mpi_sts;
@@ -57,6 +49,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
struct spi_transfer *xfer,
bool last_xfer)
{
+ struct spi_device *spi = msg->spi;
union cvmx_mpi_cfg mpi_cfg;
union cvmx_mpi_tx mpi_tx;
unsigned int clkdiv;
@@ -68,18 +61,11 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
int len;
int i;
- struct octeon_spi_setup *msg_setup = spi_get_ctldata(msg->spi);
-
- speed_hz = msg_setup->max_speed_hz;
- mode = msg_setup->mode;
+ mode = spi->mode;
cpha = mode & SPI_CPHA;
cpol = mode & SPI_CPOL;
- if (xfer->speed_hz)
- speed_hz = xfer->speed_hz;
-
- if (speed_hz > OCTEON_SPI_MAX_CLOCK_HZ)
- speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+ speed_hz = xfer->speed_hz ? : spi->max_speed_hz;
clkdiv = octeon_get_io_clock_rate() / (2 * speed_hz);
@@ -93,8 +79,8 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
mpi_cfg.s.cslate = cpha ? 1 : 0;
mpi_cfg.s.enable = 1;
- if (msg_setup->chip_select < 4)
- p->cs_enax |= 1ull << (12 + msg_setup->chip_select);
+ if (spi->chip_select < 4)
+ p->cs_enax |= 1ull << (12 + spi->chip_select);
mpi_cfg.u64 |= p->cs_enax;
if (mpi_cfg.u64 != p->last_cfg) {
@@ -114,7 +100,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
cvmx_write_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i), d);
}
mpi_tx.u64 = 0;
- mpi_tx.s.csid = msg_setup->chip_select;
+ mpi_tx.s.csid = spi->chip_select;
mpi_tx.s.leavecs = 1;
mpi_tx.s.txnum = tx_buf ? OCTEON_SPI_MAX_BYTES : 0;
mpi_tx.s.totnum = OCTEON_SPI_MAX_BYTES;
@@ -139,7 +125,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
}
mpi_tx.u64 = 0;
- mpi_tx.s.csid = msg_setup->chip_select;
+ mpi_tx.s.csid = spi->chip_select;
if (last_xfer)
mpi_tx.s.leavecs = xfer->cs_change;
else
@@ -169,17 +155,9 @@ static int octeon_spi_transfer_one_message(struct spi_master *master,
int status = 0;
struct spi_transfer *xfer;
- /*
- * We better have set the configuration via a call to .setup
- * before we get here.
- */
- if (spi_get_ctldata(msg->spi) == NULL) {
- status = -EINVAL;
- goto err;
- }
-
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- bool last_xfer = &xfer->transfer_list == msg->transfers.prev;
+ bool last_xfer = list_is_last(&xfer->transfer_list,
+ &msg->transfers);
int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer);
if (r < 0) {
status = r;
@@ -194,41 +172,6 @@ err:
return status;
}
-static struct octeon_spi_setup *octeon_spi_new_setup(struct spi_device *spi)
-{
- struct octeon_spi_setup *setup = kzalloc(sizeof(*setup), GFP_KERNEL);
- if (!setup)
- return NULL;
-
- setup->max_speed_hz = spi->max_speed_hz;
- setup->chip_select = spi->chip_select;
- setup->mode = spi->mode;
- setup->bits_per_word = spi->bits_per_word;
- return setup;
-}
-
-static int octeon_spi_setup(struct spi_device *spi)
-{
- struct octeon_spi_setup *new_setup;
- struct octeon_spi_setup *old_setup = spi_get_ctldata(spi);
-
- new_setup = octeon_spi_new_setup(spi);
- if (!new_setup)
- return -ENOMEM;
-
- spi_set_ctldata(spi, new_setup);
- kfree(old_setup);
-
- return 0;
-}
-
-static void octeon_spi_cleanup(struct spi_device *spi)
-{
- struct octeon_spi_setup *old_setup = spi_get_ctldata(spi);
- spi_set_ctldata(spi, NULL);
- kfree(old_setup);
-}
-
static int octeon_spi_probe(struct platform_device *pdev)
{
struct resource *res_mem;
@@ -257,8 +200,6 @@ static int octeon_spi_probe(struct platform_device *pdev)
p->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
resource_size(res_mem));
- /* Dynamic bus numbering */
- master->bus_num = -1;
master->num_chipselect = 4;
master->mode_bits = SPI_CPHA |
SPI_CPOL |
@@ -266,10 +207,9 @@ static int octeon_spi_probe(struct platform_device *pdev)
SPI_LSB_FIRST |
SPI_3WIRE;
- master->setup = octeon_spi_setup;
- master->cleanup = octeon_spi_cleanup;
master->transfer_one_message = octeon_spi_transfer_one_message;
master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
master->dev.of_node = pdev->dev.of_node;
err = devm_spi_register_master(&pdev->dev, master);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 0d32054bfc0d..e7ffcded4e14 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -83,15 +83,11 @@
#define SPI_SHUTDOWN 1
struct omap1_spi100k {
- struct spi_master *master;
struct clk *ick;
struct clk *fck;
/* Virtual base address of the controller */
void __iomem *base;
-
- /* State of the SPI */
- unsigned int state;
};
struct omap1_spi100k_cs {
@@ -99,13 +95,6 @@ struct omap1_spi100k_cs {
int word_len;
};
-#define MOD_REG_BIT(val, mask, set) do { \
- if (set) \
- val |= mask; \
- else \
- val &= ~mask; \
-} while (0)
-
static void spi100k_enable_clock(struct spi_master *master)
{
unsigned int val;
@@ -139,7 +128,7 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
}
spi100k_enable_clock(master);
- writew( data , spi100k->base + SPI_TX_MSB);
+ writew(data , spi100k->base + SPI_TX_MSB);
writew(SPI_CTRL_SEN(0) |
SPI_CTRL_WORD_SIZE(len) |
@@ -147,7 +136,8 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
spi100k->base + SPI_CTRL);
/* Wait for bit ack send change */
- while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE);
+ while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE)
+ ;
udelay(1000);
spi100k_disable_clock(master);
@@ -155,7 +145,7 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
static int spi100k_read_data(struct spi_master *master, int len)
{
- int dataH,dataL;
+ int dataH, dataL;
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
/* Always do at least 16 bits */
@@ -168,7 +158,8 @@ static int spi100k_read_data(struct spi_master *master, int len)
SPI_CTRL_RD,
spi100k->base + SPI_CTRL);
- while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD);
+ while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD)
+ ;
udelay(1000);
dataL = readw(spi100k->base + SPI_RX_LSB);
@@ -204,12 +195,10 @@ static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable)
static unsigned
omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
{
- struct omap1_spi100k *spi100k;
struct omap1_spi100k_cs *cs = spi->controller_state;
unsigned int count, c;
int word_len;
- spi100k = spi_master_get_devdata(spi->master);
count = xfer->len;
c = count;
word_len = cs->word_len;
@@ -221,12 +210,12 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
- c-=1;
+ c -= 1;
if (xfer->tx_buf != NULL)
spi100k_write_data(spi->master, word_len, *tx++);
if (xfer->rx_buf != NULL)
*rx++ = spi100k_read_data(spi->master, word_len);
- } while(c);
+ } while (c);
} else if (word_len <= 16) {
u16 *rx;
const u16 *tx;
@@ -234,12 +223,12 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
- c-=2;
+ c -= 2;
if (xfer->tx_buf != NULL)
- spi100k_write_data(spi->master,word_len, *tx++);
+ spi100k_write_data(spi->master, word_len, *tx++);
if (xfer->rx_buf != NULL)
- *rx++ = spi100k_read_data(spi->master,word_len);
- } while(c);
+ *rx++ = spi100k_read_data(spi->master, word_len);
+ } while (c);
} else if (word_len <= 32) {
u32 *rx;
const u32 *tx;
@@ -247,12 +236,12 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
- c-=4;
+ c -= 4;
if (xfer->tx_buf != NULL)
- spi100k_write_data(spi->master,word_len, *tx);
+ spi100k_write_data(spi->master, word_len, *tx);
if (xfer->rx_buf != NULL)
- *rx = spi100k_read_data(spi->master,word_len);
- } while(c);
+ *rx = spi100k_read_data(spi->master, word_len);
+ } while (c);
}
return count - c;
}
@@ -294,7 +283,7 @@ static int omap1_spi100k_setup(struct spi_device *spi)
spi100k = spi_master_get_devdata(spi->master);
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->base = spi100k->base + spi->chip_select * 0x14;
@@ -411,14 +400,14 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
if (!pdev->id)
return -EINVAL;
- master = spi_alloc_master(&pdev->dev, sizeof *spi100k);
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi100k));
if (master == NULL) {
dev_dbg(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
}
if (pdev->id != -1)
- master->bus_num = pdev->id;
+ master->bus_num = pdev->id;
master->setup = omap1_spi100k_setup;
master->transfer_one_message = omap1_spi100k_transfer_one_message;
@@ -434,7 +423,6 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
spi100k = spi_master_get_devdata(master);
- spi100k->master = master;
/*
* The memory region base address is taken as the platform_data.
@@ -461,8 +449,6 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
if (status < 0)
goto err;
- spi100k->state = SPI_RUNNING;
-
return status;
err:
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 9313fd3b413d..be2a2e108e2f 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -99,7 +99,6 @@ struct uwire_spi {
};
struct uwire_state {
- unsigned bits_per_word;
unsigned div1_idx;
};
@@ -210,9 +209,8 @@ static void uwire_chipselect(struct spi_device *spi, int value)
static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
{
- struct uwire_state *ust = spi->controller_state;
unsigned len = t->len;
- unsigned bits = ust->bits_per_word;
+ unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned bytes;
u16 val, w;
int status = 0;
@@ -220,10 +218,6 @@ static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
if (!t->tx_buf && !t->rx_buf)
return 0;
- /* Microwire doesn't read and write concurrently */
- if (t->tx_buf && t->rx_buf)
- return -EPERM;
-
w = spi->chip_select << 10;
w |= CS_CMD;
@@ -322,7 +316,6 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
struct uwire_state *ust = spi->controller_state;
struct uwire_spi *uwire;
unsigned flags = 0;
- unsigned bits;
unsigned hz;
unsigned long rate;
int div1_idx;
@@ -332,23 +325,6 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
uwire = spi_master_get_devdata(spi->master);
- if (spi->chip_select > 3) {
- pr_debug("%s: cs%d?\n", dev_name(&spi->dev), spi->chip_select);
- status = -ENODEV;
- goto done;
- }
-
- bits = spi->bits_per_word;
- if (t != NULL && t->bits_per_word)
- bits = t->bits_per_word;
-
- if (bits > 16) {
- pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits);
- status = -ENODEV;
- goto done;
- }
- ust->bits_per_word = bits;
-
/* mode 0..3, clock inverted separately;
* standard nCS signaling;
* don't treat DI=high as "not ready"
@@ -502,6 +478,7 @@ static int uwire_probe(struct platform_device *pdev)
status = PTR_ERR(uwire->ck);
dev_dbg(&pdev->dev, "no functional clock?\n");
spi_master_put(master);
+ iounmap(uwire_base);
return status;
}
clk_enable(uwire->ck);
@@ -515,7 +492,7 @@ static int uwire_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
master->flags = SPI_MASTER_HALF_DUPLEX;
master->bus_num = 2; /* "official" */
@@ -539,14 +516,13 @@ static int uwire_probe(struct platform_device *pdev)
static int uwire_remove(struct platform_device *pdev)
{
struct uwire_spi *uwire = platform_get_drvdata(pdev);
- int status;
// FIXME remove all child devices, somewhere ...
- status = spi_bitbang_stop(&uwire->bitbang);
+ spi_bitbang_stop(&uwire->bitbang);
uwire_off(uwire);
iounmap(uwire_base);
- return status;
+ return 0;
}
/* work with hotplug and coldplug */
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index a72127f08e39..4dc77df38864 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -22,7 +22,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -45,6 +44,7 @@
#include <linux/platform_data/spi-omap2-mcspi.h>
#define OMAP2_MCSPI_MAX_FREQ 48000000
+#define OMAP2_MCSPI_MAX_DIVIDER 4096
#define OMAP2_MCSPI_MAX_FIFODEPTH 64
#define OMAP2_MCSPI_MAX_FIFOWCNT 0xFFFF
#define SPI_AUTOSUSPEND_TIMEOUT 2000
@@ -89,6 +89,7 @@
#define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
#define OMAP2_MCSPI_CHCONF_FFET BIT(27)
#define OMAP2_MCSPI_CHCONF_FFER BIT(28)
+#define OMAP2_MCSPI_CHCONF_CLKG BIT(29)
#define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
#define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
@@ -96,6 +97,7 @@
#define OMAP2_MCSPI_CHSTAT_TXFFE BIT(3)
#define OMAP2_MCSPI_CHCTRL_EN BIT(0)
+#define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK (0xff << 8)
#define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
@@ -149,7 +151,7 @@ struct omap2_mcspi_cs {
int word_len;
struct list_head node;
/* Context save and restore shadow register */
- u32 chconf0;
+ u32 chconf0, chctrl0;
};
static inline void mcspi_write_reg(struct spi_master *master,
@@ -230,10 +232,16 @@ static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
u32 l;
- l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
- mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
+ l = cs->chctrl0;
+ if (enable)
+ l |= OMAP2_MCSPI_CHCTRL_EN;
+ else
+ l &= ~OMAP2_MCSPI_CHCTRL_EN;
+ cs->chctrl0 = l;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
/* Flash post-writes */
mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
}
@@ -840,7 +848,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
struct spi_master *spi_cntrl;
- u32 l = 0, div = 0;
+ u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
u8 word_len = spi->bits_per_word;
u32 speed_hz = spi->max_speed_hz;
@@ -856,7 +864,17 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
speed_hz = t->speed_hz;
speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
- div = omap2_mcspi_calc_divisor(speed_hz);
+ if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
+ clkd = omap2_mcspi_calc_divisor(speed_hz);
+ speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
+ clkg = 0;
+ } else {
+ div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
+ speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
+ clkd = (div - 1) & 0xf;
+ extclk = (div - 1) >> 4;
+ clkg = OMAP2_MCSPI_CHCONF_CLKG;
+ }
l = mcspi_cached_chconf0(spi);
@@ -885,7 +903,16 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
/* set clock divisor */
l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
- l |= div << 2;
+ l |= clkd << 2;
+
+ /* set clock granularity */
+ l &= ~OMAP2_MCSPI_CHCONF_CLKG;
+ l |= clkg;
+ if (clkg) {
+ cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
+ cs->chctrl0 |= extclk << 8;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
+ }
/* set SPI mode 0..3 */
if (spi->mode & SPI_CPOL)
@@ -900,7 +927,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
mcspi_write_chconf0(spi, l);
dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
- OMAP2_MCSPI_MAX_FREQ >> div,
+ speed_hz,
(spi->mode & SPI_CPHA) ? "trailing" : "leading",
(spi->mode & SPI_CPOL) ? "inverted" : "normal");
@@ -972,6 +999,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
cs->base = mcspi->base + spi->chip_select * 0x14;
cs->phys = mcspi->phys + spi->chip_select * 0x14;
cs->chconf0 = 0;
+ cs->chctrl0 = 0;
spi->controller_state = cs;
/* Link this to context save list */
list_add_tail(&cs->node, &ctx->cs);
@@ -1057,12 +1085,15 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
status = -EINVAL;
break;
}
- if (par_override || t->speed_hz || t->bits_per_word) {
+ if (par_override ||
+ (t->speed_hz != spi->max_speed_hz) ||
+ (t->bits_per_word != spi->bits_per_word)) {
par_override = 1;
status = omap2_mcspi_setup_transfer(spi, t);
if (status < 0)
break;
- if (!t->speed_hz && !t->bits_per_word)
+ if (t->speed_hz == spi->max_speed_hz &&
+ t->bits_per_word == spi->bits_per_word)
par_override = 0;
}
if (cd && cd->cs_per_word) {
@@ -1176,16 +1207,12 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
m->actual_length = 0;
m->status = 0;
- /* reject invalid messages and transfers */
- if (list_empty(&m->transfers))
- return -EINVAL;
list_for_each_entry(t, &m->transfers, transfer_list) {
const void *tx_buf = t->tx_buf;
void *rx_buf = t->rx_buf;
unsigned len = t->len;
- if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
- || (len && !(rx_buf || tx_buf))) {
+ if ((len && !(rx_buf || tx_buf))) {
dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
t->speed_hz,
len,
@@ -1194,12 +1221,6 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
t->bits_per_word);
return -EINVAL;
}
- if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
- dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
- t->speed_hz,
- OMAP2_MCSPI_MAX_FREQ >> 15);
- return -EINVAL;
- }
if (m->is_dma_mapped || len < DMA_MIN_BYTES)
continue;
@@ -1311,6 +1332,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->transfer_one_message = omap2_mcspi_transfer_one_message;
master->cleanup = omap2_mcspi_cleanup;
master->dev.of_node = node;
+ master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
+ master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
platform_set_drvdata(pdev, master);
@@ -1356,12 +1379,13 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&mcspi->ctx.cs);
- mcspi->dma_channels = kcalloc(master->num_chipselect,
- sizeof(struct omap2_mcspi_dma),
- GFP_KERNEL);
-
- if (mcspi->dma_channels == NULL)
+ mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
+ sizeof(struct omap2_mcspi_dma),
+ GFP_KERNEL);
+ if (mcspi->dma_channels == NULL) {
+ status = -ENOMEM;
goto free_master;
+ }
for (i = 0; i < master->num_chipselect; i++) {
char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name;
@@ -1403,7 +1427,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
}
if (status < 0)
- goto dma_chnl_free;
+ goto free_master;
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
@@ -1421,8 +1445,6 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
disable_pm:
pm_runtime_disable(&pdev->dev);
-dma_chnl_free:
- kfree(mcspi->dma_channels);
free_master:
spi_master_put(master);
return status;
@@ -1430,19 +1452,12 @@ free_master:
static int omap2_mcspi_remove(struct platform_device *pdev)
{
- struct spi_master *master;
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *dma_channels;
-
- master = platform_get_drvdata(pdev);
- mcspi = spi_master_get_devdata(master);
- dma_channels = mcspi->dma_channels;
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
pm_runtime_put_sync(mcspi->dev);
pm_runtime_disable(&pdev->dev);
- kfree(dma_channels);
-
return 0;
}
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 7f2121fe2622..d018a4aac3a1 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -9,7 +9,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -43,8 +42,6 @@
struct orion_spi {
struct spi_master *master;
void __iomem *base;
- unsigned int max_speed;
- unsigned int min_speed;
struct clk *clk;
};
@@ -75,23 +72,6 @@ orion_spi_clrbits(struct orion_spi *orion_spi, u32 reg, u32 mask)
writel(val, reg_addr);
}
-static int orion_spi_set_transfer_size(struct orion_spi *orion_spi, int size)
-{
- if (size == 16) {
- orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
- ORION_SPI_IF_8_16_BIT_MODE);
- } else if (size == 8) {
- orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
- ORION_SPI_IF_8_16_BIT_MODE);
- } else {
- pr_debug("Bad bits per word value %d (only 8 or 16 are allowed).\n",
- size);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
{
u32 tclk_hz;
@@ -170,7 +150,14 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if (rc)
return rc;
- return orion_spi_set_transfer_size(orion_spi, bits_per_word);
+ if (bits_per_word == 16)
+ orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
+ ORION_SPI_IF_8_16_BIT_MODE);
+ else
+ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
+ ORION_SPI_IF_8_16_BIT_MODE);
+
+ return 0;
}
static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable)
@@ -260,11 +247,9 @@ orion_spi_write_read_16bit(struct spi_device *spi,
static unsigned int
orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
{
- struct orion_spi *orion_spi;
unsigned int count;
int word_len;
- orion_spi = spi_master_get_devdata(spi->master);
word_len = spi->bits_per_word;
count = xfer->len;
@@ -310,27 +295,6 @@ static int orion_spi_transfer_one_message(struct spi_master *master,
goto msg_done;
list_for_each_entry(t, &m->transfers, transfer_list) {
- /* make sure buffer length is even when working in 16
- * bit mode*/
- if ((t->bits_per_word == 16) && (t->len & 1)) {
- dev_err(&spi->dev,
- "message rejected : "
- "odd data length %d while in 16 bit mode\n",
- t->len);
- status = -EIO;
- goto msg_done;
- }
-
- if (t->speed_hz && t->speed_hz < orion_spi->min_speed) {
- dev_err(&spi->dev,
- "message rejected : "
- "device min speed (%d Hz) exceeds "
- "required transfer speed (%d Hz)\n",
- orion_spi->min_speed, t->speed_hz);
- status = -EIO;
- goto msg_done;
- }
-
if (par_override || t->speed_hz || t->bits_per_word) {
par_override = 1;
status = orion_spi_setup_transfer(spi, t);
@@ -375,28 +339,6 @@ static int orion_spi_reset(struct orion_spi *orion_spi)
return 0;
}
-static int orion_spi_setup(struct spi_device *spi)
-{
- struct orion_spi *orion_spi;
-
- orion_spi = spi_master_get_devdata(spi->master);
-
- if ((spi->max_speed_hz == 0)
- || (spi->max_speed_hz > orion_spi->max_speed))
- spi->max_speed_hz = orion_spi->max_speed;
-
- if (spi->max_speed_hz < orion_spi->min_speed) {
- dev_err(&spi->dev, "setup: requested speed too low %d Hz\n",
- spi->max_speed_hz);
- return -EINVAL;
- }
-
- /*
- * baudrate & width will be set orion_spi_setup_transfer
- */
- return 0;
-}
-
static int orion_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -425,9 +367,9 @@ static int orion_spi_probe(struct platform_device *pdev)
/* we support only mode 0, and no options */
master->mode_bits = SPI_CPHA | SPI_CPOL;
- master->setup = orion_spi_setup;
master->transfer_one_message = orion_spi_transfer_one_message;
master->num_chipselect = ORION_NUM_CHIPSELECTS;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
platform_set_drvdata(pdev, master);
@@ -443,8 +385,8 @@ static int orion_spi_probe(struct platform_device *pdev)
clk_prepare(spi->clk);
clk_enable(spi->clk);
tclk_hz = clk_get_rate(spi->clk);
- spi->max_speed = DIV_ROUND_UP(tclk_hz, 4);
- spi->min_speed = DIV_ROUND_UP(tclk_hz, 30);
+ master->max_speed_hz = DIV_ROUND_UP(tclk_hz, 4);
+ master->min_speed_hz = DIV_ROUND_UP(tclk_hz, 30);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi->base = devm_ioremap_resource(&pdev->dev, r);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 2789b452e711..51d99779682f 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -459,9 +459,8 @@ static void giveback(struct pl022 *pl022)
struct spi_transfer *last_transfer;
pl022->next_msg_cs_active = false;
- last_transfer = list_entry(pl022->cur_msg->transfers.prev,
- struct spi_transfer,
- transfer_list);
+ last_transfer = list_last_entry(&pl022->cur_msg->transfers,
+ struct spi_transfer, transfer_list);
/* Delay if requested before any change in chip select */
if (last_transfer->delay_usecs)
@@ -2109,8 +2108,6 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
pl022->chipselects = devm_kzalloc(dev, num_cs * sizeof(int),
GFP_KERNEL);
- pinctrl_pm_select_default_state(dev);
-
/*
* Bus Number Which has been Assigned to this SSP controller
* on this board
@@ -2183,13 +2180,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_no_clk;
}
- status = clk_prepare(pl022->clk);
- if (status) {
- dev_err(&adev->dev, "could not prepare SSP/SPI bus clock\n");
- goto err_clk_prep;
- }
-
- status = clk_enable(pl022->clk);
+ status = clk_prepare_enable(pl022->clk);
if (status) {
dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
goto err_no_clk_en;
@@ -2250,10 +2241,8 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
if (platform_info->enable_dma)
pl022_dma_remove(pl022);
err_no_irq:
- clk_disable(pl022->clk);
+ clk_disable_unprepare(pl022->clk);
err_no_clk_en:
- clk_unprepare(pl022->clk);
- err_clk_prep:
err_no_clk:
err_no_ioremap:
amba_release_regions(adev);
@@ -2281,42 +2270,13 @@ pl022_remove(struct amba_device *adev)
if (pl022->master_info->enable_dma)
pl022_dma_remove(pl022);
- clk_disable(pl022->clk);
- clk_unprepare(pl022->clk);
+ clk_disable_unprepare(pl022->clk);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
return 0;
}
-#if defined(CONFIG_SUSPEND) || defined(CONFIG_PM_RUNTIME)
-/*
- * These two functions are used from both suspend/resume and
- * the runtime counterparts to handle external resources like
- * clocks, pins and regulators when going to sleep.
- */
-static void pl022_suspend_resources(struct pl022 *pl022, bool runtime)
-{
- clk_disable(pl022->clk);
-
- if (runtime)
- pinctrl_pm_select_idle_state(&pl022->adev->dev);
- else
- pinctrl_pm_select_sleep_state(&pl022->adev->dev);
-}
-
-static void pl022_resume_resources(struct pl022 *pl022, bool runtime)
-{
- /* First go to the default state */
- pinctrl_pm_select_default_state(&pl022->adev->dev);
- if (!runtime)
- /* Then let's idle the pins until the next transfer happens */
- pinctrl_pm_select_idle_state(&pl022->adev->dev);
-
- clk_enable(pl022->clk);
-}
-#endif
-
-#ifdef CONFIG_SUSPEND
+#ifdef CONFIG_PM_SLEEP
static int pl022_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
@@ -2328,8 +2288,13 @@ static int pl022_suspend(struct device *dev)
return ret;
}
- pm_runtime_get_sync(dev);
- pl022_suspend_resources(pl022, false);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret) {
+ spi_master_resume(pl022->master);
+ return ret;
+ }
+
+ pinctrl_pm_select_sleep_state(dev);
dev_dbg(dev, "suspended\n");
return 0;
@@ -2340,8 +2305,9 @@ static int pl022_resume(struct device *dev)
struct pl022 *pl022 = dev_get_drvdata(dev);
int ret;
- pl022_resume_resources(pl022, false);
- pm_runtime_put(dev);
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ dev_err(dev, "problem resuming\n");
/* Start the queue running */
ret = spi_master_resume(pl022->master);
@@ -2352,14 +2318,16 @@ static int pl022_resume(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM */
+#endif
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int pl022_runtime_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- pl022_suspend_resources(pl022, true);
+ clk_disable_unprepare(pl022->clk);
+ pinctrl_pm_select_idle_state(dev);
+
return 0;
}
@@ -2367,14 +2335,16 @@ static int pl022_runtime_resume(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- pl022_resume_resources(pl022, true);
+ pinctrl_pm_select_default_state(dev);
+ clk_prepare_enable(pl022->clk);
+
return 0;
}
#endif
static const struct dev_pm_ops pl022_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
- SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
+ SET_PM_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
};
static struct vendor_data vendor_arm = {
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 5ee56726f8d0..80b8408ac3e3 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -24,7 +24,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 3c0b55125f1e..713af4806f26 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -9,7 +9,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
index 2916efc7cfe5..e8a26f25d5c0 100644
--- a/drivers/spi/spi-pxa2xx-pxadma.c
+++ b/drivers/spi/spi-pxa2xx-pxadma.c
@@ -18,7 +18,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index c702fc536a77..41185d0557fa 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -362,8 +362,7 @@ static void giveback(struct driver_data *drv_data)
drv_data->cur_msg = NULL;
drv_data->cur_transfer = NULL;
- last_transfer = list_entry(msg->transfers.prev,
- struct spi_transfer,
+ last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
transfer_list);
/* Delay if requested before any change in chip select */
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
new file mode 100644
index 000000000000..b032e8885e24
--- /dev/null
+++ b/drivers/spi/spi-qup.c
@@ -0,0 +1,779 @@
+/*
+ * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License rev 2 and
+ * only rev 2 as published by the free Software foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#define QUP_CONFIG 0x0000
+#define QUP_STATE 0x0004
+#define QUP_IO_M_MODES 0x0008
+#define QUP_SW_RESET 0x000c
+#define QUP_OPERATIONAL 0x0018
+#define QUP_ERROR_FLAGS 0x001c
+#define QUP_ERROR_FLAGS_EN 0x0020
+#define QUP_OPERATIONAL_MASK 0x0028
+#define QUP_HW_VERSION 0x0030
+#define QUP_MX_OUTPUT_CNT 0x0100
+#define QUP_OUTPUT_FIFO 0x0110
+#define QUP_MX_WRITE_CNT 0x0150
+#define QUP_MX_INPUT_CNT 0x0200
+#define QUP_MX_READ_CNT 0x0208
+#define QUP_INPUT_FIFO 0x0218
+
+#define SPI_CONFIG 0x0300
+#define SPI_IO_CONTROL 0x0304
+#define SPI_ERROR_FLAGS 0x0308
+#define SPI_ERROR_FLAGS_EN 0x030c
+
+/* QUP_CONFIG fields */
+#define QUP_CONFIG_SPI_MODE (1 << 8)
+#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
+#define QUP_CONFIG_NO_INPUT BIT(7)
+#define QUP_CONFIG_NO_OUTPUT BIT(6)
+#define QUP_CONFIG_N 0x001f
+
+/* QUP_STATE fields */
+#define QUP_STATE_VALID BIT(2)
+#define QUP_STATE_RESET 0
+#define QUP_STATE_RUN 1
+#define QUP_STATE_PAUSE 3
+#define QUP_STATE_MASK 3
+#define QUP_STATE_CLEAR 2
+
+#define QUP_HW_VERSION_2_1_1 0x20010001
+
+/* QUP_IO_M_MODES fields */
+#define QUP_IO_M_PACK_EN BIT(15)
+#define QUP_IO_M_UNPACK_EN BIT(14)
+#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
+#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
+#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
+#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
+
+#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
+#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
+#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
+#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
+
+#define QUP_IO_M_MODE_FIFO 0
+#define QUP_IO_M_MODE_BLOCK 1
+#define QUP_IO_M_MODE_DMOV 2
+#define QUP_IO_M_MODE_BAM 3
+
+/* QUP_OPERATIONAL fields */
+#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
+#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
+#define QUP_OP_IN_SERVICE_FLAG BIT(9)
+#define QUP_OP_OUT_SERVICE_FLAG BIT(8)
+#define QUP_OP_IN_FIFO_FULL BIT(7)
+#define QUP_OP_OUT_FIFO_FULL BIT(6)
+#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
+#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
+
+/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
+#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
+#define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
+#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
+#define QUP_ERROR_INPUT_OVER_RUN BIT(2)
+
+/* SPI_CONFIG fields */
+#define SPI_CONFIG_HS_MODE BIT(10)
+#define SPI_CONFIG_INPUT_FIRST BIT(9)
+#define SPI_CONFIG_LOOPBACK BIT(8)
+
+/* SPI_IO_CONTROL fields */
+#define SPI_IO_C_FORCE_CS BIT(11)
+#define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
+#define SPI_IO_C_MX_CS_MODE BIT(8)
+#define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
+#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
+#define SPI_IO_C_CS_SELECT_MASK 0x000c
+#define SPI_IO_C_TRISTATE_CS BIT(1)
+#define SPI_IO_C_NO_TRI_STATE BIT(0)
+
+/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
+#define SPI_ERROR_CLK_OVER_RUN BIT(1)
+#define SPI_ERROR_CLK_UNDER_RUN BIT(0)
+
+#define SPI_NUM_CHIPSELECTS 4
+
+/* high speed mode is when bus rate is greater then 26MHz */
+#define SPI_HS_MIN_RATE 26000000
+#define SPI_MAX_RATE 50000000
+
+#define SPI_DELAY_THRESHOLD 1
+#define SPI_DELAY_RETRY 10
+
+struct spi_qup {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *cclk; /* core clock */
+ struct clk *iclk; /* interface clock */
+ int irq;
+ spinlock_t lock;
+
+ int in_fifo_sz;
+ int out_fifo_sz;
+ int in_blk_sz;
+ int out_blk_sz;
+
+ struct spi_transfer *xfer;
+ struct completion done;
+ int error;
+ int w_size; /* bytes per SPI word */
+ int tx_bytes;
+ int rx_bytes;
+};
+
+
+static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
+{
+ u32 opstate = readl_relaxed(controller->base + QUP_STATE);
+
+ return opstate & QUP_STATE_VALID;
+}
+
+static int spi_qup_set_state(struct spi_qup *controller, u32 state)
+{
+ unsigned long loop;
+ u32 cur_state;
+
+ loop = 0;
+ while (!spi_qup_is_valid_state(controller)) {
+
+ usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
+
+ if (++loop > SPI_DELAY_RETRY)
+ return -EIO;
+ }
+
+ if (loop)
+ dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
+ loop, state);
+
+ cur_state = readl_relaxed(controller->base + QUP_STATE);
+ /*
+ * Per spec: for PAUSE_STATE to RESET_STATE, two writes
+ * of (b10) are required
+ */
+ if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
+ (state == QUP_STATE_RESET)) {
+ writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
+ writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
+ } else {
+ cur_state &= ~QUP_STATE_MASK;
+ cur_state |= state;
+ writel_relaxed(cur_state, controller->base + QUP_STATE);
+ }
+
+ loop = 0;
+ while (!spi_qup_is_valid_state(controller)) {
+
+ usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
+
+ if (++loop > SPI_DELAY_RETRY)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static void spi_qup_fifo_read(struct spi_qup *controller,
+ struct spi_transfer *xfer)
+{
+ u8 *rx_buf = xfer->rx_buf;
+ u32 word, state;
+ int idx, shift, w_size;
+
+ w_size = controller->w_size;
+
+ while (controller->rx_bytes < xfer->len) {
+
+ state = readl_relaxed(controller->base + QUP_OPERATIONAL);
+ if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
+ break;
+
+ word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
+
+ if (!rx_buf) {
+ controller->rx_bytes += w_size;
+ continue;
+ }
+
+ for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
+ /*
+ * The data format depends on bytes per SPI word:
+ * 4 bytes: 0x12345678
+ * 2 bytes: 0x00001234
+ * 1 byte : 0x00000012
+ */
+ shift = BITS_PER_BYTE;
+ shift *= (w_size - idx - 1);
+ rx_buf[controller->rx_bytes] = word >> shift;
+ }
+ }
+}
+
+static void spi_qup_fifo_write(struct spi_qup *controller,
+ struct spi_transfer *xfer)
+{
+ const u8 *tx_buf = xfer->tx_buf;
+ u32 word, state, data;
+ int idx, w_size;
+
+ w_size = controller->w_size;
+
+ while (controller->tx_bytes < xfer->len) {
+
+ state = readl_relaxed(controller->base + QUP_OPERATIONAL);
+ if (state & QUP_OP_OUT_FIFO_FULL)
+ break;
+
+ word = 0;
+ for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
+
+ if (!tx_buf) {
+ controller->tx_bytes += w_size;
+ break;
+ }
+
+ data = tx_buf[controller->tx_bytes];
+ word |= data << (BITS_PER_BYTE * (3 - idx));
+ }
+
+ writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
+ }
+}
+
+static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
+{
+ struct spi_qup *controller = dev_id;
+ struct spi_transfer *xfer;
+ u32 opflags, qup_err, spi_err;
+ unsigned long flags;
+ int error = 0;
+
+ spin_lock_irqsave(&controller->lock, flags);
+ xfer = controller->xfer;
+ controller->xfer = NULL;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
+ spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
+ opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
+
+ writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
+ writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
+ writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
+
+ if (!xfer) {
+ dev_err_ratelimited(controller->dev, "unexpected irq %x08 %x08 %x08\n",
+ qup_err, spi_err, opflags);
+ return IRQ_HANDLED;
+ }
+
+ if (qup_err) {
+ if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
+ dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
+ if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
+ dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
+ if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
+ dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
+ if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
+ dev_warn(controller->dev, "INPUT_OVER_RUN\n");
+
+ error = -EIO;
+ }
+
+ if (spi_err) {
+ if (spi_err & SPI_ERROR_CLK_OVER_RUN)
+ dev_warn(controller->dev, "CLK_OVER_RUN\n");
+ if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
+ dev_warn(controller->dev, "CLK_UNDER_RUN\n");
+
+ error = -EIO;
+ }
+
+ if (opflags & QUP_OP_IN_SERVICE_FLAG)
+ spi_qup_fifo_read(controller, xfer);
+
+ if (opflags & QUP_OP_OUT_SERVICE_FLAG)
+ spi_qup_fifo_write(controller, xfer);
+
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->error = error;
+ controller->xfer = xfer;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (controller->rx_bytes == xfer->len || error)
+ complete(&controller->done);
+
+ return IRQ_HANDLED;
+}
+
+
+/* set clock freq ... bits per word */
+static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(spi->master);
+ u32 config, iomode, mode;
+ int ret, n_words, w_size;
+
+ if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
+ dev_err(controller->dev, "too big size for loopback %d > %d\n",
+ xfer->len, controller->in_fifo_sz);
+ return -EIO;
+ }
+
+ ret = clk_set_rate(controller->cclk, xfer->speed_hz);
+ if (ret) {
+ dev_err(controller->dev, "fail to set frequency %d",
+ xfer->speed_hz);
+ return -EIO;
+ }
+
+ if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
+ dev_err(controller->dev, "cannot set RESET state\n");
+ return -EIO;
+ }
+
+ w_size = 4;
+ if (xfer->bits_per_word <= 8)
+ w_size = 1;
+ else if (xfer->bits_per_word <= 16)
+ w_size = 2;
+
+ n_words = xfer->len / w_size;
+ controller->w_size = w_size;
+
+ if (n_words <= controller->in_fifo_sz) {
+ mode = QUP_IO_M_MODE_FIFO;
+ writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
+ /* must be zero for FIFO */
+ writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
+ } else {
+ mode = QUP_IO_M_MODE_BLOCK;
+ writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
+ /* must be zero for BLOCK and BAM */
+ writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
+ }
+
+ iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
+ /* Set input and output transfer mode */
+ iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
+ iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
+ iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
+ iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
+
+ writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
+
+ config = readl_relaxed(controller->base + SPI_CONFIG);
+
+ if (spi->mode & SPI_LOOP)
+ config |= SPI_CONFIG_LOOPBACK;
+ else
+ config &= ~SPI_CONFIG_LOOPBACK;
+
+ if (spi->mode & SPI_CPHA)
+ config &= ~SPI_CONFIG_INPUT_FIRST;
+ else
+ config |= SPI_CONFIG_INPUT_FIRST;
+
+ /*
+ * HS_MODE improves signal stability for spi-clk high rates,
+ * but is invalid in loop back mode.
+ */
+ if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
+ config |= SPI_CONFIG_HS_MODE;
+ else
+ config &= ~SPI_CONFIG_HS_MODE;
+
+ writel_relaxed(config, controller->base + SPI_CONFIG);
+
+ config = readl_relaxed(controller->base + QUP_CONFIG);
+ config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
+ config |= xfer->bits_per_word - 1;
+ config |= QUP_CONFIG_SPI_MODE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+
+ writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
+ return 0;
+}
+
+static void spi_qup_set_cs(struct spi_device *spi, bool enable)
+{
+ struct spi_qup *controller = spi_master_get_devdata(spi->master);
+
+ u32 iocontol, mask;
+
+ iocontol = readl_relaxed(controller->base + SPI_IO_CONTROL);
+
+ /* Disable auto CS toggle and use manual */
+ iocontol &= ~SPI_IO_C_MX_CS_MODE;
+ iocontol |= SPI_IO_C_FORCE_CS;
+
+ iocontol &= ~SPI_IO_C_CS_SELECT_MASK;
+ iocontol |= SPI_IO_C_CS_SELECT(spi->chip_select);
+
+ mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
+
+ if (enable)
+ iocontol |= mask;
+ else
+ iocontol &= ~mask;
+
+ writel_relaxed(iocontol, controller->base + SPI_IO_CONTROL);
+}
+
+static int spi_qup_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ unsigned long timeout, flags;
+ int ret = -EIO;
+
+ ret = spi_qup_io_config(spi, xfer);
+ if (ret)
+ return ret;
+
+ timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
+ timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
+ timeout = 100 * msecs_to_jiffies(timeout);
+
+ reinit_completion(&controller->done);
+
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->xfer = xfer;
+ controller->error = 0;
+ controller->rx_bytes = 0;
+ controller->tx_bytes = 0;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
+ dev_warn(controller->dev, "cannot set RUN state\n");
+ goto exit;
+ }
+
+ if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
+ dev_warn(controller->dev, "cannot set PAUSE state\n");
+ goto exit;
+ }
+
+ spi_qup_fifo_write(controller, xfer);
+
+ if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
+ dev_warn(controller->dev, "cannot set EXECUTE state\n");
+ goto exit;
+ }
+
+ if (!wait_for_completion_timeout(&controller->done, timeout))
+ ret = -ETIMEDOUT;
+exit:
+ spi_qup_set_state(controller, QUP_STATE_RESET);
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->xfer = NULL;
+ if (!ret)
+ ret = controller->error;
+ spin_unlock_irqrestore(&controller->lock, flags);
+ return ret;
+}
+
+static int spi_qup_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct clk *iclk, *cclk;
+ struct spi_qup *controller;
+ struct resource *res;
+ struct device *dev;
+ void __iomem *base;
+ u32 data, max_freq, iomode;
+ int ret, irq, size;
+
+ dev = &pdev->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ cclk = devm_clk_get(dev, "core");
+ if (IS_ERR(cclk))
+ return PTR_ERR(cclk);
+
+ iclk = devm_clk_get(dev, "iface");
+ if (IS_ERR(iclk))
+ return PTR_ERR(iclk);
+
+ /* This is optional parameter */
+ if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
+ max_freq = SPI_MAX_RATE;
+
+ if (!max_freq || max_freq > SPI_MAX_RATE) {
+ dev_err(dev, "invalid clock frequency %d\n", max_freq);
+ return -ENXIO;
+ }
+
+ ret = clk_prepare_enable(cclk);
+ if (ret) {
+ dev_err(dev, "cannot enable core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(iclk);
+ if (ret) {
+ clk_disable_unprepare(cclk);
+ dev_err(dev, "cannot enable iface clock\n");
+ return ret;
+ }
+
+ data = readl_relaxed(base + QUP_HW_VERSION);
+
+ if (data < QUP_HW_VERSION_2_1_1) {
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ dev_err(dev, "v.%08x is not supported\n", data);
+ return -ENXIO;
+ }
+
+ master = spi_alloc_master(dev, sizeof(struct spi_qup));
+ if (!master) {
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ dev_err(dev, "cannot allocate master\n");
+ return -ENOMEM;
+ }
+
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+ master->num_chipselect = SPI_NUM_CHIPSELECTS;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->max_speed_hz = max_freq;
+ master->set_cs = spi_qup_set_cs;
+ master->transfer_one = spi_qup_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ controller = spi_master_get_devdata(master);
+
+ controller->dev = dev;
+ controller->base = base;
+ controller->iclk = iclk;
+ controller->cclk = cclk;
+ controller->irq = irq;
+
+ spin_lock_init(&controller->lock);
+ init_completion(&controller->done);
+
+ iomode = readl_relaxed(base + QUP_IO_M_MODES);
+
+ size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
+ if (size)
+ controller->out_blk_sz = size * 16;
+ else
+ controller->out_blk_sz = 4;
+
+ size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
+ if (size)
+ controller->in_blk_sz = size * 16;
+ else
+ controller->in_blk_sz = 4;
+
+ size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
+ controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
+
+ size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
+ controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
+
+ dev_info(dev, "v.%08x IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
+ data, controller->in_blk_sz, controller->in_fifo_sz,
+ controller->out_blk_sz, controller->out_fifo_sz);
+
+ writel_relaxed(1, base + QUP_SW_RESET);
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret) {
+ dev_err(dev, "cannot set RESET state\n");
+ goto error;
+ }
+
+ writel_relaxed(0, base + QUP_OPERATIONAL);
+ writel_relaxed(0, base + QUP_IO_M_MODES);
+ writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
+ writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
+ base + SPI_ERROR_FLAGS_EN);
+
+ writel_relaxed(0, base + SPI_CONFIG);
+ writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
+
+ ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
+ IRQF_TRIGGER_HIGH, pdev->name, controller);
+ if (ret)
+ goto error;
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto error;
+
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ return 0;
+
+error:
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ spi_master_put(master);
+ return ret;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int spi_qup_pm_suspend_runtime(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ u32 config;
+
+ /* Enable clocks auto gaiting */
+ config = readl(controller->base + QUP_CONFIG);
+ config |= QUP_CONFIG_CLOCK_AUTO_GATE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+ return 0;
+}
+
+static int spi_qup_pm_resume_runtime(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ u32 config;
+
+ /* Disable clocks auto gaiting */
+ config = readl_relaxed(controller->base + QUP_CONFIG);
+ config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+ return 0;
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_qup_suspend(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ return 0;
+}
+
+static int spi_qup_resume(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(controller->iclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(controller->cclk);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int spi_qup_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = dev_get_drvdata(&pdev->dev);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct of_device_id spi_qup_dt_match[] = {
+ { .compatible = "qcom,spi-qup-v2.1.1", },
+ { .compatible = "qcom,spi-qup-v2.2.1", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
+
+static const struct dev_pm_ops spi_qup_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
+ SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
+ spi_qup_pm_resume_runtime,
+ NULL)
+};
+
+static struct platform_driver spi_qup_driver = {
+ .driver = {
+ .name = "spi_qup",
+ .owner = THIS_MODULE,
+ .pm = &spi_qup_dev_pm_ops,
+ .of_match_table = spi_qup_dt_match,
+ },
+ .probe = spi_qup_probe,
+ .remove = spi_qup_remove,
+};
+module_platform_driver(spi_qup_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spi_qup");
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 28987d9fcfe5..1fb0ad213324 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1,7 +1,8 @@
/*
* SH RSPI driver
*
- * Copyright (C) 2012 Renesas Solutions Corp.
+ * Copyright (C) 2012, 2013 Renesas Solutions Corp.
+ * Copyright (C) 2014 Glider bvba
*
* Based on spi-sh.c:
* Copyright (C) 2011 Renesas Solutions Corp.
@@ -25,14 +26,14 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
#include <linux/spi/spi.h>
#include <linux/spi/rspi.h>
@@ -49,7 +50,7 @@
#define RSPI_SPCKD 0x0c /* Clock Delay Register */
#define RSPI_SSLND 0x0d /* Slave Select Negation Delay Register */
#define RSPI_SPND 0x0e /* Next-Access Delay Register */
-#define RSPI_SPCR2 0x0f /* Control Register 2 */
+#define RSPI_SPCR2 0x0f /* Control Register 2 (SH only) */
#define RSPI_SPCMD0 0x10 /* Command Register 0 */
#define RSPI_SPCMD1 0x12 /* Command Register 1 */
#define RSPI_SPCMD2 0x14 /* Command Register 2 */
@@ -58,16 +59,23 @@
#define RSPI_SPCMD5 0x1a /* Command Register 5 */
#define RSPI_SPCMD6 0x1c /* Command Register 6 */
#define RSPI_SPCMD7 0x1e /* Command Register 7 */
+#define RSPI_SPCMD(i) (RSPI_SPCMD0 + (i) * 2)
+#define RSPI_NUM_SPCMD 8
+#define RSPI_RZ_NUM_SPCMD 4
+#define QSPI_NUM_SPCMD 4
+
+/* RSPI on RZ only */
#define RSPI_SPBFCR 0x20 /* Buffer Control Register */
#define RSPI_SPBFDR 0x22 /* Buffer Data Count Setting Register */
-/*qspi only */
+/* QSPI only */
#define QSPI_SPBFCR 0x18 /* Buffer Control Register */
#define QSPI_SPBDCR 0x1a /* Buffer Data Count Register */
#define QSPI_SPBMUL0 0x1c /* Transfer Data Length Multiplier Setting Register 0 */
#define QSPI_SPBMUL1 0x20 /* Transfer Data Length Multiplier Setting Register 1 */
#define QSPI_SPBMUL2 0x24 /* Transfer Data Length Multiplier Setting Register 2 */
#define QSPI_SPBMUL3 0x28 /* Transfer Data Length Multiplier Setting Register 3 */
+#define QSPI_SPBMUL(i) (QSPI_SPBMUL0 + (i) * 4)
/* SPCR - Control Register */
#define SPCR_SPRIE 0x80 /* Receive Interrupt Enable */
@@ -104,7 +112,7 @@
#define SPSR_PERF 0x08 /* Parity Error Flag */
#define SPSR_MODF 0x04 /* Mode Fault Error Flag */
#define SPSR_IDLNF 0x02 /* RSPI Idle Flag */
-#define SPSR_OVRF 0x01 /* Overrun Error Flag */
+#define SPSR_OVRF 0x01 /* Overrun Error Flag (RSPI only) */
/* SPSCR - Sequence Control Register */
#define SPSCR_SPSLN_MASK 0x07 /* Sequence Length Specification */
@@ -121,13 +129,13 @@
#define SPDCR_SPLWORD SPDCR_SPLW1
#define SPDCR_SPLBYTE SPDCR_SPLW0
#define SPDCR_SPLW 0x20 /* Access Width Specification (SH) */
-#define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select */
+#define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select (SH) */
#define SPDCR_SLSEL1 0x08
#define SPDCR_SLSEL0 0x04
-#define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select */
+#define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select (SH) */
#define SPDCR_SPFC1 0x02
#define SPDCR_SPFC0 0x01
-#define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) */
+#define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) (SH) */
/* SPCKD - Clock Delay Register */
#define SPCKD_SCKDL_MASK 0x07 /* Clock Delay Setting (1-8) */
@@ -151,7 +159,7 @@
#define SPCMD_LSBF 0x1000 /* LSB First */
#define SPCMD_SPB_MASK 0x0f00 /* Data Length Setting */
#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
-#define SPCMD_SPB_8BIT 0x0000 /* qspi only */
+#define SPCMD_SPB_8BIT 0x0000 /* QSPI only */
#define SPCMD_SPB_16BIT 0x0100
#define SPCMD_SPB_20BIT 0x0000
#define SPCMD_SPB_24BIT 0x0100
@@ -170,8 +178,8 @@
#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
/* SPBFCR - Buffer Control Register */
-#define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset (qspi only) */
-#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset (qspi only) */
+#define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset */
+#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */
#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
@@ -181,22 +189,21 @@ struct rspi_data {
void __iomem *addr;
u32 max_speed_hz;
struct spi_master *master;
- struct list_head queue;
- struct work_struct ws;
wait_queue_head_t wait;
- spinlock_t lock;
struct clk *clk;
- u8 spsr;
u16 spcmd;
+ u8 spsr;
+ u8 sppcr;
+ int rx_irq, tx_irq;
const struct spi_ops *ops;
/* for dmaengine */
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
- int irq;
unsigned dma_width_16bit:1;
unsigned dma_callbacked:1;
+ unsigned byte_access:1;
};
static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
@@ -224,34 +231,47 @@ static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
return ioread16(rspi->addr + offset);
}
+static void rspi_write_data(const struct rspi_data *rspi, u16 data)
+{
+ if (rspi->byte_access)
+ rspi_write8(rspi, data, RSPI_SPDR);
+ else /* 16 bit */
+ rspi_write16(rspi, data, RSPI_SPDR);
+}
+
+static u16 rspi_read_data(const struct rspi_data *rspi)
+{
+ if (rspi->byte_access)
+ return rspi_read8(rspi, RSPI_SPDR);
+ else /* 16 bit */
+ return rspi_read16(rspi, RSPI_SPDR);
+}
+
/* optional functions */
struct spi_ops {
- int (*set_config_register)(const struct rspi_data *rspi,
- int access_size);
- int (*send_pio)(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t);
- int (*receive_pio)(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t);
-
+ int (*set_config_register)(struct rspi_data *rspi, int access_size);
+ int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer);
+ u16 mode_bits;
};
/*
- * functions for RSPI
+ * functions for RSPI on legacy SH
*/
-static int rspi_set_config_register(const struct rspi_data *rspi,
- int access_size)
+static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
{
int spbr;
- /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
- rspi_write8(rspi, 0x00, RSPI_SPPCR);
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
- /* Sets number of frames to be used: 1 frame */
- rspi_write8(rspi, 0x00, RSPI_SPDCR);
+ /* Disable dummy transmission, set 16-bit word access, 1 frame */
+ rspi_write8(rspi, 0, RSPI_SPDCR);
+ rspi->byte_access = 0;
/* Sets RSPCK, SSL, next-access delay value */
rspi_write8(rspi, 0x00, RSPI_SPCKD);
@@ -262,8 +282,41 @@ static int rspi_set_config_register(const struct rspi_data *rspi,
rspi_write8(rspi, 0x00, RSPI_SPCR2);
/* Sets SPCMD */
- rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | rspi->spcmd,
- RSPI_SPCMD0);
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
+}
+
+/*
+ * functions for RSPI on RZ
+ */
+static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
+{
+ int spbr;
+
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+
+ /* Disable dummy transmission, set byte access */
+ rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
+ rspi->byte_access = 1;
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Sets SPCMD */
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
/* Sets RSPI mode */
rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
@@ -274,21 +327,20 @@ static int rspi_set_config_register(const struct rspi_data *rspi,
/*
* functions for QSPI
*/
-static int qspi_set_config_register(const struct rspi_data *rspi,
- int access_size)
+static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
{
- u16 spcmd;
int spbr;
- /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
- rspi_write8(rspi, 0x00, RSPI_SPPCR);
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz);
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
- /* Sets number of frames to be used: 1 frame */
- rspi_write8(rspi, 0x00, RSPI_SPDCR);
+ /* Disable dummy transmission, set byte access */
+ rspi_write8(rspi, 0, RSPI_SPDCR);
+ rspi->byte_access = 1;
/* Sets RSPCK, SSL, next-access delay value */
rspi_write8(rspi, 0x00, RSPI_SPCKD);
@@ -297,13 +349,13 @@ static int qspi_set_config_register(const struct rspi_data *rspi,
/* Data Length Setting */
if (access_size == 8)
- spcmd = SPCMD_SPB_8BIT;
+ rspi->spcmd |= SPCMD_SPB_8BIT;
else if (access_size == 16)
- spcmd = SPCMD_SPB_16BIT;
+ rspi->spcmd |= SPCMD_SPB_16BIT;
else
- spcmd = SPCMD_SPB_32BIT;
+ rspi->spcmd |= SPCMD_SPB_32BIT;
- spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | rspi->spcmd | SPCMD_SPNDEN;
+ rspi->spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SPNDEN;
/* Resets transfer data length */
rspi_write32(rspi, 0, QSPI_SPBMUL0);
@@ -314,9 +366,9 @@ static int qspi_set_config_register(const struct rspi_data *rspi,
rspi_write8(rspi, 0x00, QSPI_SPBFCR);
/* Sets SPCMD */
- rspi_write16(rspi, spcmd, RSPI_SPCMD0);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
- /* Enables SPI function in a master mode */
+ /* Enables SPI function in master mode */
rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
return 0;
@@ -340,6 +392,9 @@ static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
int ret;
rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (rspi->spsr & wait_mask)
+ return 0;
+
rspi_enable_irq(rspi, enable_bit);
ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
if (ret == 0 && !(rspi->spsr & wait_mask))
@@ -348,78 +403,39 @@ static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
return 0;
}
-static void rspi_assert_ssl(const struct rspi_data *rspi)
-{
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
-}
-
-static void rspi_negate_ssl(const struct rspi_data *rspi)
+static int rspi_data_out(struct rspi_data *rspi, u8 data)
{
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
+ dev_err(&rspi->master->dev, "transmit timeout\n");
+ return -ETIMEDOUT;
+ }
+ rspi_write_data(rspi, data);
+ return 0;
}
-static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+static int rspi_data_in(struct rspi_data *rspi)
{
- int remain = t->len;
- const u8 *data = t->tx_buf;
- while (remain > 0) {
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
- RSPI_SPCR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
+ u8 data;
- rspi_write16(rspi, *data, RSPI_SPDR);
- data++;
- remain--;
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
+ dev_err(&rspi->master->dev, "receive timeout\n");
+ return -ETIMEDOUT;
}
-
- /* Waiting for the last transmission */
- rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
-
- return 0;
+ data = rspi_read_data(rspi);
+ return data;
}
-static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+static int rspi_data_out_in(struct rspi_data *rspi, u8 data)
{
- int remain = t->len;
- const u8 *data = t->tx_buf;
-
- rspi_write8(rspi, SPBFCR_TXRST, QSPI_SPBFCR);
- rspi_write8(rspi, 0x00, QSPI_SPBFCR);
-
- while (remain > 0) {
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
- rspi_write8(rspi, *data++, RSPI_SPDR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: receive timeout\n", __func__);
- return -ETIMEDOUT;
- }
- rspi_read8(rspi, RSPI_SPDR);
-
- remain--;
- }
+ int ret;
- /* Waiting for the last transmission */
- rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+ ret = rspi_data_out(rspi, data);
+ if (ret < 0)
+ return ret;
- return 0;
+ return rspi_data_in(rspi);
}
-#define send_pio(spi, mesg, t) spi->ops->send_pio(spi, mesg, t)
-
static void rspi_dma_complete(void *arg)
{
struct rspi_data *rspi = arg;
@@ -471,7 +487,7 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
struct scatterlist sg;
const void *buf = NULL;
struct dma_async_tx_descriptor *desc;
- unsigned len;
+ unsigned int len;
int ret = 0;
if (rspi->dma_width_16bit) {
@@ -509,7 +525,7 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
* DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
* called. So, this driver disables the IRQ while DMA transfer.
*/
- disable_irq(rspi->irq);
+ disable_irq(rspi->tx_irq);
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
rspi_enable_irq(rspi, SPCR_SPTIE);
@@ -528,7 +544,7 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
ret = -ETIMEDOUT;
rspi_disable_irq(rspi, SPCR_SPTIE);
- enable_irq(rspi->irq);
+ enable_irq(rspi->tx_irq);
end:
rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE);
@@ -545,46 +561,17 @@ static void rspi_receive_init(const struct rspi_data *rspi)
spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
- rspi_read16(rspi, RSPI_SPDR); /* dummy read */
+ rspi_read_data(rspi); /* dummy read */
if (spsr & SPSR_OVRF)
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
RSPI_SPSR);
}
-static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+static void rspi_rz_receive_init(const struct rspi_data *rspi)
{
- int remain = t->len;
- u8 *data;
-
rspi_receive_init(rspi);
-
- data = t->rx_buf;
- while (remain > 0) {
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
- RSPI_SPCR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* dummy write for generate clock */
- rspi_write16(rspi, DUMMY_DATA, RSPI_SPDR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: receive timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* SPDR allows 16 or 32-bit access only */
- *data = (u8)rspi_read16(rspi, RSPI_SPDR);
-
- data++;
- remain--;
- }
-
- return 0;
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
+ rspi_write8(rspi, 0, RSPI_SPBFCR);
}
static void qspi_receive_init(const struct rspi_data *rspi)
@@ -593,51 +580,17 @@ static void qspi_receive_init(const struct rspi_data *rspi)
spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
- rspi_read8(rspi, RSPI_SPDR); /* dummy read */
+ rspi_read_data(rspi); /* dummy read */
rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
- rspi_write8(rspi, 0x00, QSPI_SPBFCR);
+ rspi_write8(rspi, 0, QSPI_SPBFCR);
}
-static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
-{
- int remain = t->len;
- u8 *data;
-
- qspi_receive_init(rspi);
-
- data = t->rx_buf;
- while (remain > 0) {
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* dummy write for generate clock */
- rspi_write8(rspi, DUMMY_DATA, RSPI_SPDR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: receive timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* SPDR allows 8, 16 or 32-bit access */
- *data++ = rspi_read8(rspi, RSPI_SPDR);
- remain--;
- }
-
- return 0;
-}
-
-#define receive_pio(spi, mesg, t) spi->ops->receive_pio(spi, mesg, t)
-
static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
{
struct scatterlist sg, sg_dummy;
void *dummy = NULL, *rx_buf = NULL;
struct dma_async_tx_descriptor *desc, *desc_dummy;
- unsigned len;
+ unsigned int len;
int ret = 0;
if (rspi->dma_width_16bit) {
@@ -695,7 +648,9 @@ static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
* DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
* called. So, this driver disables the IRQ while DMA transfer.
*/
- disable_irq(rspi->irq);
+ disable_irq(rspi->tx_irq);
+ if (rspi->rx_irq != rspi->tx_irq)
+ disable_irq(rspi->rx_irq);
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
@@ -718,7 +673,9 @@ static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
ret = -ETIMEDOUT;
rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
- enable_irq(rspi->irq);
+ enable_irq(rspi->tx_irq);
+ if (rspi->rx_irq != rspi->tx_irq)
+ enable_irq(rspi->rx_irq);
end:
rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
@@ -746,56 +703,175 @@ static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t)
return 0;
}
-static void rspi_work(struct work_struct *work)
+static int rspi_transfer_out_in(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
{
- struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
- struct spi_message *mesg;
- struct spi_transfer *t;
- unsigned long flags;
- int ret;
+ int remain = xfer->len, ret;
+ const u8 *tx_buf = xfer->tx_buf;
+ u8 *rx_buf = xfer->rx_buf;
+ u8 spcr, data;
- while (1) {
- spin_lock_irqsave(&rspi->lock, flags);
- if (list_empty(&rspi->queue)) {
- spin_unlock_irqrestore(&rspi->lock, flags);
- break;
- }
- mesg = list_entry(rspi->queue.next, struct spi_message, queue);
- list_del_init(&mesg->queue);
- spin_unlock_irqrestore(&rspi->lock, flags);
-
- rspi_assert_ssl(rspi);
-
- list_for_each_entry(t, &mesg->transfers, transfer_list) {
- if (t->tx_buf) {
- if (rspi_is_dma(rspi, t))
- ret = rspi_send_dma(rspi, t);
- else
- ret = send_pio(rspi, mesg, t);
- if (ret < 0)
- goto error;
- }
- if (t->rx_buf) {
- if (rspi_is_dma(rspi, t))
- ret = rspi_receive_dma(rspi, t);
- else
- ret = receive_pio(rspi, mesg, t);
- if (ret < 0)
- goto error;
- }
- mesg->actual_length += t->len;
+ rspi_receive_init(rspi);
+
+ spcr = rspi_read8(rspi, RSPI_SPCR);
+ if (rx_buf)
+ spcr &= ~SPCR_TXMD;
+ else
+ spcr |= SPCR_TXMD;
+ rspi_write8(rspi, spcr, RSPI_SPCR);
+
+ while (remain > 0) {
+ data = tx_buf ? *tx_buf++ : DUMMY_DATA;
+ ret = rspi_data_out(rspi, data);
+ if (ret < 0)
+ return ret;
+ if (rx_buf) {
+ ret = rspi_data_in(rspi);
+ if (ret < 0)
+ return ret;
+ *rx_buf++ = ret;
}
- rspi_negate_ssl(rspi);
+ remain--;
+ }
+
+ /* Wait for the last transmission */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+
+ return 0;
+}
+
+static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (!rspi_is_dma(rspi, xfer))
+ return rspi_transfer_out_in(rspi, xfer);
+
+ if (xfer->tx_buf) {
+ ret = rspi_send_dma(rspi, xfer);
+ if (ret < 0)
+ return ret;
+ }
+ if (xfer->rx_buf)
+ return rspi_receive_dma(rspi, xfer);
+
+ return 0;
+}
+
+static int rspi_rz_transfer_out_in(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ int remain = xfer->len, ret;
+ const u8 *tx_buf = xfer->tx_buf;
+ u8 *rx_buf = xfer->rx_buf;
+ u8 data;
+
+ rspi_rz_receive_init(rspi);
+
+ while (remain > 0) {
+ data = tx_buf ? *tx_buf++ : DUMMY_DATA;
+ ret = rspi_data_out_in(rspi, data);
+ if (ret < 0)
+ return ret;
+ if (rx_buf)
+ *rx_buf++ = ret;
+ remain--;
+ }
+
+ /* Wait for the last transmission */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+
+ return 0;
+}
+
+static int rspi_rz_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ return rspi_rz_transfer_out_in(rspi, xfer);
+}
+
+static int qspi_transfer_out_in(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ int remain = xfer->len, ret;
+ const u8 *tx_buf = xfer->tx_buf;
+ u8 *rx_buf = xfer->rx_buf;
+ u8 data;
- mesg->status = 0;
- mesg->complete(mesg->context);
+ qspi_receive_init(rspi);
+
+ while (remain > 0) {
+ data = tx_buf ? *tx_buf++ : DUMMY_DATA;
+ ret = rspi_data_out_in(rspi, data);
+ if (ret < 0)
+ return ret;
+ if (rx_buf)
+ *rx_buf++ = ret;
+ remain--;
+ }
+
+ /* Wait for the last transmission */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+
+ return 0;
+}
+
+static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
+{
+ const u8 *buf = xfer->tx_buf;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < xfer->len; i++) {
+ ret = rspi_data_out(rspi, *buf++);
+ if (ret < 0)
+ return ret;
}
- return;
+ /* Wait for the last transmission */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
-error:
- mesg->status = ret;
- mesg->complete(mesg->context);
+ return 0;
+}
+
+static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
+{
+ u8 *buf = xfer->rx_buf;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < xfer->len; i++) {
+ ret = rspi_data_in(rspi);
+ if (ret < 0)
+ return ret;
+ *buf++ = ret;
+ }
+
+ return 0;
+}
+
+static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ if (spi->mode & SPI_LOOP) {
+ return qspi_transfer_out_in(rspi, xfer);
+ } else if (xfer->tx_buf && xfer->tx_nbits > SPI_NBITS_SINGLE) {
+ /* Quad or Dual SPI Write */
+ return qspi_transfer_out(rspi, xfer);
+ } else if (xfer->rx_buf && xfer->rx_nbits > SPI_NBITS_SINGLE) {
+ /* Quad or Dual SPI Read */
+ return qspi_transfer_in(rspi, xfer);
+ } else {
+ /* Single SPI Transfer */
+ return qspi_transfer_out_in(rspi, xfer);
+ }
}
static int rspi_setup(struct spi_device *spi)
@@ -810,32 +886,115 @@ static int rspi_setup(struct spi_device *spi)
if (spi->mode & SPI_CPHA)
rspi->spcmd |= SPCMD_CPHA;
+ /* CMOS output mode and MOSI signal from previous transfer */
+ rspi->sppcr = 0;
+ if (spi->mode & SPI_LOOP)
+ rspi->sppcr |= SPPCR_SPLP;
+
set_config_register(rspi, 8);
return 0;
}
-static int rspi_transfer(struct spi_device *spi, struct spi_message *mesg)
+static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
{
- struct rspi_data *rspi = spi_master_get_devdata(spi->master);
- unsigned long flags;
+ if (xfer->tx_buf)
+ switch (xfer->tx_nbits) {
+ case SPI_NBITS_QUAD:
+ return SPCMD_SPIMOD_QUAD;
+ case SPI_NBITS_DUAL:
+ return SPCMD_SPIMOD_DUAL;
+ default:
+ return 0;
+ }
+ if (xfer->rx_buf)
+ switch (xfer->rx_nbits) {
+ case SPI_NBITS_QUAD:
+ return SPCMD_SPIMOD_QUAD | SPCMD_SPRW;
+ case SPI_NBITS_DUAL:
+ return SPCMD_SPIMOD_DUAL | SPCMD_SPRW;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
- mesg->actual_length = 0;
- mesg->status = -EINPROGRESS;
+static int qspi_setup_sequencer(struct rspi_data *rspi,
+ const struct spi_message *msg)
+{
+ const struct spi_transfer *xfer;
+ unsigned int i = 0, len = 0;
+ u16 current_mode = 0xffff, mode;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ mode = qspi_transfer_mode(xfer);
+ if (mode == current_mode) {
+ len += xfer->len;
+ continue;
+ }
+
+ /* Transfer mode change */
+ if (i) {
+ /* Set transfer data length of previous transfer */
+ rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
+ }
- spin_lock_irqsave(&rspi->lock, flags);
- list_add_tail(&mesg->queue, &rspi->queue);
- schedule_work(&rspi->ws);
- spin_unlock_irqrestore(&rspi->lock, flags);
+ if (i >= QSPI_NUM_SPCMD) {
+ dev_err(&msg->spi->dev,
+ "Too many different transfer modes");
+ return -EINVAL;
+ }
+
+ /* Program transfer mode for this transfer */
+ rspi_write16(rspi, rspi->spcmd | mode, RSPI_SPCMD(i));
+ current_mode = mode;
+ len = xfer->len;
+ i++;
+ }
+ if (i) {
+ /* Set final transfer data length and sequence length */
+ rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
+ rspi_write8(rspi, i - 1, RSPI_SPSCR);
+ }
return 0;
}
-static void rspi_cleanup(struct spi_device *spi)
+static int rspi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (msg->spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
+ /* Setup sequencer for messages with multiple transfer modes */
+ ret = qspi_setup_sequencer(rspi, msg);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable SPI function in master mode */
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
+ return 0;
}
-static irqreturn_t rspi_irq(int irq, void *_sr)
+static int rspi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ /* Disable SPI function */
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
+
+ /* Reset sequencer for Single SPI Transfers */
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+ rspi_write8(rspi, 0, RSPI_SPSCR);
+ return 0;
+}
+
+static irqreturn_t rspi_irq_mux(int irq, void *_sr)
{
struct rspi_data *rspi = _sr;
u8 spsr;
@@ -857,6 +1016,36 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
return ret;
}
+static irqreturn_t rspi_irq_rx(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF) {
+ rspi_disable_irq(rspi, SPCR_SPRIE);
+ wake_up(&rspi->wait);
+ return IRQ_HANDLED;
+ }
+
+ return 0;
+}
+
+static irqreturn_t rspi_irq_tx(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPTEF) {
+ rspi_disable_irq(rspi, SPCR_SPTIE);
+ wake_up(&rspi->wait);
+ return IRQ_HANDLED;
+ }
+
+ return 0;
+}
+
static int rspi_request_dma(struct rspi_data *rspi,
struct platform_device *pdev)
{
@@ -923,34 +1112,89 @@ static int rspi_remove(struct platform_device *pdev)
struct rspi_data *rspi = platform_get_drvdata(pdev);
rspi_release_dma(rspi);
- clk_disable(rspi->clk);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
+static const struct spi_ops rspi_ops = {
+ .set_config_register = rspi_set_config_register,
+ .transfer_one = rspi_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
+};
+
+static const struct spi_ops rspi_rz_ops = {
+ .set_config_register = rspi_rz_set_config_register,
+ .transfer_one = rspi_rz_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
+};
+
+static const struct spi_ops qspi_ops = {
+ .set_config_register = qspi_set_config_register,
+ .transfer_one = qspi_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
+ SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_RX_DUAL | SPI_RX_QUAD,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id rspi_of_match[] = {
+ /* RSPI on legacy SH */
+ { .compatible = "renesas,rspi", .data = &rspi_ops },
+ /* RSPI on RZ/A1H */
+ { .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
+ /* QSPI on R-Car Gen2 */
+ { .compatible = "renesas,qspi", .data = &qspi_ops },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rspi_of_match);
+
+static int rspi_parse_dt(struct device *dev, struct spi_master *master)
+{
+ u32 num_cs;
+ int error;
+
+ /* Parse DT properties */
+ error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
+ if (error) {
+ dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
+ return error;
+ }
+
+ master->num_chipselect = num_cs;
+ return 0;
+}
+#else
+#define rspi_of_match NULL
+static inline int rspi_parse_dt(struct device *dev, struct spi_master *master)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+static int rspi_request_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, const char *suffix,
+ void *dev_id)
+{
+ const char *base = dev_name(dev);
+ size_t len = strlen(base) + strlen(suffix) + 2;
+ char *name = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ snprintf(name, len, "%s:%s", base, suffix);
+ return devm_request_irq(dev, irq, handler, 0, name, dev_id);
+}
+
static int rspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
struct rspi_data *rspi;
- int ret, irq;
- char clk_name[16];
- const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
+ int ret;
+ const struct of_device_id *of_id;
+ const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
- const struct platform_device_id *id_entry = pdev->id_entry;
-
- ops = (struct spi_ops *)id_entry->driver_data;
- /* ops parameter check */
- if (!ops->set_config_register) {
- dev_err(&pdev->dev, "there is no set_config_register\n");
- return -ENODEV;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq error\n");
- return -ENODEV;
- }
master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
if (master == NULL) {
@@ -958,6 +1202,28 @@ static int rspi_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ of_id = of_match_device(rspi_of_match, &pdev->dev);
+ if (of_id) {
+ ops = of_id->data;
+ ret = rspi_parse_dt(&pdev->dev, master);
+ if (ret)
+ goto error1;
+ } else {
+ ops = (struct spi_ops *)pdev->id_entry->driver_data;
+ rspi_pd = dev_get_platdata(&pdev->dev);
+ if (rspi_pd && rspi_pd->num_chipselect)
+ master->num_chipselect = rspi_pd->num_chipselect;
+ else
+ master->num_chipselect = 2; /* default */
+ };
+
+ /* ops parameter check */
+ if (!ops->set_config_register) {
+ dev_err(&pdev->dev, "there is no set_config_register\n");
+ ret = -ENODEV;
+ goto error1;
+ }
+
rspi = spi_master_get_devdata(master);
platform_set_drvdata(pdev, rspi);
rspi->ops = ops;
@@ -970,39 +1236,61 @@ static int rspi_probe(struct platform_device *pdev)
goto error1;
}
- snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id);
- rspi->clk = devm_clk_get(&pdev->dev, clk_name);
+ rspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rspi->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(rspi->clk);
goto error1;
}
- clk_enable(rspi->clk);
- INIT_LIST_HEAD(&rspi->queue);
- spin_lock_init(&rspi->lock);
- INIT_WORK(&rspi->ws, rspi_work);
- init_waitqueue_head(&rspi->wait);
+ pm_runtime_enable(&pdev->dev);
- if (rspi_pd && rspi_pd->num_chipselect)
- master->num_chipselect = rspi_pd->num_chipselect;
- else
- master->num_chipselect = 2; /* default */
+ init_waitqueue_head(&rspi->wait);
master->bus_num = pdev->id;
master->setup = rspi_setup;
- master->transfer = rspi_transfer;
- master->cleanup = rspi_cleanup;
- master->mode_bits = SPI_CPHA | SPI_CPOL;
+ master->auto_runtime_pm = true;
+ master->transfer_one = ops->transfer_one;
+ master->prepare_message = rspi_prepare_message;
+ master->unprepare_message = rspi_unprepare_message;
+ master->mode_bits = ops->mode_bits;
+ master->dev.of_node = pdev->dev.of_node;
+
+ ret = platform_get_irq_byname(pdev, "rx");
+ if (ret < 0) {
+ ret = platform_get_irq_byname(pdev, "mux");
+ if (ret < 0)
+ ret = platform_get_irq(pdev, 0);
+ if (ret >= 0)
+ rspi->rx_irq = rspi->tx_irq = ret;
+ } else {
+ rspi->rx_irq = ret;
+ ret = platform_get_irq_byname(pdev, "tx");
+ if (ret >= 0)
+ rspi->tx_irq = ret;
+ }
+ if (ret < 0) {
+ dev_err(&pdev->dev, "platform_get_irq error\n");
+ goto error2;
+ }
- ret = devm_request_irq(&pdev->dev, irq, rspi_irq, 0,
- dev_name(&pdev->dev), rspi);
+ if (rspi->rx_irq == rspi->tx_irq) {
+ /* Single multiplexed interrupt */
+ ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
+ "mux", rspi);
+ } else {
+ /* Multi-interrupt mode, only SPRI and SPTI are used */
+ ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
+ "rx", rspi);
+ if (!ret)
+ ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
+ rspi_irq_tx, "tx", rspi);
+ }
if (ret < 0) {
dev_err(&pdev->dev, "request_irq error\n");
goto error2;
}
- rspi->irq = irq;
ret = rspi_request_dma(rspi, pdev);
if (ret < 0) {
dev_err(&pdev->dev, "rspi_request_dma failed.\n");
@@ -1022,27 +1310,16 @@ static int rspi_probe(struct platform_device *pdev)
error3:
rspi_release_dma(rspi);
error2:
- clk_disable(rspi->clk);
+ pm_runtime_disable(&pdev->dev);
error1:
spi_master_put(master);
return ret;
}
-static struct spi_ops rspi_ops = {
- .set_config_register = rspi_set_config_register,
- .send_pio = rspi_send_pio,
- .receive_pio = rspi_receive_pio,
-};
-
-static struct spi_ops qspi_ops = {
- .set_config_register = qspi_set_config_register,
- .send_pio = qspi_send_pio,
- .receive_pio = qspi_receive_pio,
-};
-
static struct platform_device_id spi_driver_ids[] = {
{ "rspi", (kernel_ulong_t)&rspi_ops },
+ { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
{ "qspi", (kernel_ulong_t)&qspi_ops },
{},
};
@@ -1056,6 +1333,7 @@ static struct platform_driver rspi_driver = {
.driver = {
.name = "renesas_spi",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(rspi_of_match),
},
};
module_platform_driver(rspi_driver);
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 746424aa5353..bed23384dfab 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -9,7 +9,6 @@
*
*/
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
@@ -123,25 +122,15 @@ static int s3c24xx_spi_update_state(struct spi_device *spi,
{
struct s3c24xx_spi *hw = to_hw(spi);
struct s3c24xx_spi_devstate *cs = spi->controller_state;
- unsigned int bpw;
unsigned int hz;
unsigned int div;
unsigned long clk;
- bpw = t ? t->bits_per_word : spi->bits_per_word;
hz = t ? t->speed_hz : spi->max_speed_hz;
- if (!bpw)
- bpw = 8;
-
if (!hz)
hz = spi->max_speed_hz;
- if (bpw != 8) {
- dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw);
- return -EINVAL;
- }
-
if (spi->mode != cs->mode) {
u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
@@ -544,6 +533,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = pdata->bus_num;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
/* setup the state for the bitbang driver */
@@ -643,6 +633,11 @@ static int s3c24xx_spi_remove(struct platform_device *dev)
static int s3c24xx_spi_suspend(struct device *dev)
{
struct s3c24xx_spi *hw = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(hw->master);
+ if (ret)
+ return ret;
if (hw->pdata && hw->pdata->gpio_setup)
hw->pdata->gpio_setup(hw->pdata, 0);
@@ -656,7 +651,7 @@ static int s3c24xx_spi_resume(struct device *dev)
struct s3c24xx_spi *hw = dev_get_drvdata(dev);
s3c24xx_spi_initialsetup(hw);
- return 0;
+ return spi_master_resume(hw->master);
}
static const struct dev_pm_ops s3c24xx_spi_pmops = {
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index ae907dde1371..f19cd97855e8 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -34,10 +34,6 @@
#include <linux/platform_data/spi-s3c64xx.h>
-#ifdef CONFIG_S3C_DMA
-#include <mach/dma.h>
-#endif
-
#define MAX_SPI_PORTS 3
#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
@@ -200,9 +196,6 @@ struct s3c64xx_spi_driver_data {
unsigned cur_speed;
struct s3c64xx_spi_dma_data rx_dma;
struct s3c64xx_spi_dma_data tx_dma;
-#ifdef CONFIG_S3C_DMA
- struct samsung_dma_ops *ops;
-#endif
struct s3c64xx_spi_port_config *port_conf;
unsigned int port_id;
bool cs_gpio;
@@ -284,104 +277,8 @@ static void s3c64xx_spi_dmacb(void *data)
spin_unlock_irqrestore(&sdd->lock, flags);
}
-#ifdef CONFIG_S3C_DMA
-/* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */
-
-static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
- .name = "samsung-spi-dma",
-};
-
-static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
- unsigned len, dma_addr_t buf)
-{
- struct s3c64xx_spi_driver_data *sdd;
- struct samsung_dma_prep info;
- struct samsung_dma_config config;
-
- if (dma->direction == DMA_DEV_TO_MEM) {
- sdd = container_of((void *)dma,
- struct s3c64xx_spi_driver_data, rx_dma);
- config.direction = sdd->rx_dma.direction;
- config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
- config.width = sdd->cur_bpw / 8;
- sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config);
- } else {
- sdd = container_of((void *)dma,
- struct s3c64xx_spi_driver_data, tx_dma);
- config.direction = sdd->tx_dma.direction;
- config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
- config.width = sdd->cur_bpw / 8;
- sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config);
- }
-
- info.cap = DMA_SLAVE;
- info.len = len;
- info.fp = s3c64xx_spi_dmacb;
- info.fp_param = dma;
- info.direction = dma->direction;
- info.buf = buf;
-
- sdd->ops->prepare((enum dma_ch)dma->ch, &info);
- sdd->ops->trigger((enum dma_ch)dma->ch);
-}
-
-static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
-{
- struct samsung_dma_req req;
- struct device *dev = &sdd->pdev->dev;
-
- sdd->ops = samsung_dma_get_ops();
-
- req.cap = DMA_SLAVE;
- req.client = &s3c64xx_spi_dma_client;
-
- sdd->rx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
- sdd->rx_dma.dmach, &req, dev, "rx");
- sdd->tx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
- sdd->tx_dma.dmach, &req, dev, "tx");
-
- return 1;
-}
-
-static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-
- /*
- * If DMA resource was not available during
- * probe, no need to continue with dma requests
- * else Acquire DMA channels
- */
- while (!is_polling(sdd) && !acquire_dma(sdd))
- usleep_range(10000, 11000);
-
- return 0;
-}
-
-static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-
- /* Free DMA channels */
- if (!is_polling(sdd)) {
- sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
- &s3c64xx_spi_dma_client);
- sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
- &s3c64xx_spi_dma_client);
- }
-
- return 0;
-}
-
-static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
- struct s3c64xx_spi_dma_data *dma)
-{
- sdd->ops->stop((enum dma_ch)dma->ch);
-}
-#else
-
static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
- unsigned len, dma_addr_t buf)
+ struct sg_table *sgt)
{
struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config;
@@ -407,8 +304,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
dmaengine_slave_config(dma->ch, &config);
}
- desc = dmaengine_prep_slave_single(dma->ch, buf, len,
- dma->direction, DMA_PREP_INTERRUPT);
+ desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
+ dma->direction, DMA_PREP_INTERRUPT);
desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma;
@@ -437,6 +334,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
ret = -EBUSY;
goto out;
}
+ spi->dma_rx = sdd->rx_dma.ch;
sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
(void *)sdd->tx_dma.dmach, dev, "tx");
@@ -445,6 +343,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
ret = -EBUSY;
goto out_rx;
}
+ spi->dma_tx = sdd->tx_dma.ch;
}
ret = pm_runtime_get_sync(&sdd->pdev->dev);
@@ -477,12 +376,14 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
return 0;
}
-static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
- struct s3c64xx_spi_dma_data *dma)
+static bool s3c64xx_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
{
- dmaengine_terminate_all(dma->ch);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
}
-#endif
static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi,
@@ -515,7 +416,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
- prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
+ prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
} else {
switch (sdd->cur_bpw) {
case 32:
@@ -547,7 +448,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
- prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
+ prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
}
}
@@ -555,23 +456,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
}
-static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
- struct spi_device *spi)
-{
- if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
- if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
- /* Deselect the last toggled device */
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio,
- spi->mode & SPI_CS_HIGH ? 0 : 1);
- }
- sdd->tgl_spi = NULL;
- }
-
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0);
-}
-
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
int timeout_ms)
{
@@ -593,112 +477,111 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
return RX_FIFO_LVL(status, sdd);
}
-static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
- struct spi_transfer *xfer, int dma_mode)
+static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
{
void __iomem *regs = sdd->regs;
unsigned long val;
+ u32 status;
int ms;
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed;
ms += 10; /* some tolerance */
- if (dma_mode) {
- val = msecs_to_jiffies(ms) + 10;
- val = wait_for_completion_timeout(&sdd->xfer_completion, val);
- } else {
- u32 status;
- val = msecs_to_loops(ms);
- do {
+ val = msecs_to_jiffies(ms) + 10;
+ val = wait_for_completion_timeout(&sdd->xfer_completion, val);
+
+ /*
+ * If the previous xfer was completed within timeout, then
+ * proceed further else return -EIO.
+ * DmaTx returns after simply writing data in the FIFO,
+ * w/o waiting for real transmission on the bus to finish.
+ * DmaRx returns only after Dma read data from FIFO which
+ * needs bus transmission to finish, so we don't worry if
+ * Xfer involved Rx(with or without Tx).
+ */
+ if (val && !xfer->rx_buf) {
+ val = msecs_to_loops(10);
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ while ((TX_FIFO_LVL(status, sdd)
+ || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
+ && --val) {
+ cpu_relax();
status = readl(regs + S3C64XX_SPI_STATUS);
- } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
+ }
+
}
- if (dma_mode) {
- u32 status;
-
- /*
- * If the previous xfer was completed within timeout, then
- * proceed further else return -EIO.
- * DmaTx returns after simply writing data in the FIFO,
- * w/o waiting for real transmission on the bus to finish.
- * DmaRx returns only after Dma read data from FIFO which
- * needs bus transmission to finish, so we don't worry if
- * Xfer involved Rx(with or without Tx).
- */
- if (val && !xfer->rx_buf) {
- val = msecs_to_loops(10);
- status = readl(regs + S3C64XX_SPI_STATUS);
- while ((TX_FIFO_LVL(status, sdd)
- || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
- && --val) {
- cpu_relax();
- status = readl(regs + S3C64XX_SPI_STATUS);
- }
+ /* If timed out while checking rx/tx status return error */
+ if (!val)
+ return -EIO;
- }
+ return 0;
+}
- /* If timed out while checking rx/tx status return error */
- if (!val)
- return -EIO;
- } else {
- int loops;
- u32 cpy_len;
- u8 *buf;
-
- /* If it was only Tx */
- if (!xfer->rx_buf) {
- sdd->state &= ~TXBUSY;
- return 0;
- }
+static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long val;
+ u32 status;
+ int loops;
+ u32 cpy_len;
+ u8 *buf;
+ int ms;
- /*
- * If the receive length is bigger than the controller fifo
- * size, calculate the loops and read the fifo as many times.
- * loops = length / max fifo size (calculated by using the
- * fifo mask).
- * For any size less than the fifo size the below code is
- * executed atleast once.
- */
- loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
- buf = xfer->rx_buf;
- do {
- /* wait for data to be received in the fifo */
- cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
- (loops ? ms : 0));
+ /* millisecs to xfer 'len' bytes @ 'cur_speed' */
+ ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ ms += 10; /* some tolerance */
- switch (sdd->cur_bpw) {
- case 32:
- ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
- buf, cpy_len / 4);
- break;
- case 16:
- ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
- buf, cpy_len / 2);
- break;
- default:
- ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
- buf, cpy_len);
- break;
- }
+ val = msecs_to_loops(ms);
+ do {
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
- buf = buf + cpy_len;
- } while (loops--);
- sdd->state &= ~RXBUSY;
+
+ /* If it was only Tx */
+ if (!xfer->rx_buf) {
+ sdd->state &= ~TXBUSY;
+ return 0;
}
- return 0;
-}
+ /*
+ * If the receive length is bigger than the controller fifo
+ * size, calculate the loops and read the fifo as many times.
+ * loops = length / max fifo size (calculated by using the
+ * fifo mask).
+ * For any size less than the fifo size the below code is
+ * executed atleast once.
+ */
+ loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
+ buf = xfer->rx_buf;
+ do {
+ /* wait for data to be received in the fifo */
+ cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
+ (loops ? ms : 0));
+
+ switch (sdd->cur_bpw) {
+ case 32:
+ ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len / 4);
+ break;
+ case 16:
+ ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len / 2);
+ break;
+ default:
+ ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len);
+ break;
+ }
-static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
- struct spi_device *spi)
-{
- if (sdd->tgl_spi == spi)
- sdd->tgl_spi = NULL;
+ buf = buf + cpy_len;
+ } while (loops--);
+ sdd->state &= ~RXBUSY;
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
+ return 0;
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
@@ -774,81 +657,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
-static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
- struct spi_message *msg)
-{
- struct device *dev = &sdd->pdev->dev;
- struct spi_transfer *xfer;
-
- if (is_polling(sdd) || msg->is_dma_mapped)
- return 0;
-
- /* First mark all xfer unmapped */
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- xfer->rx_dma = XFER_DMAADDR_INVALID;
- xfer->tx_dma = XFER_DMAADDR_INVALID;
- }
-
- /* Map until end or first fail */
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-
- if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
- continue;
-
- if (xfer->tx_buf != NULL) {
- xfer->tx_dma = dma_map_single(dev,
- (void *)xfer->tx_buf, xfer->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, xfer->tx_dma)) {
- dev_err(dev, "dma_map_single Tx failed\n");
- xfer->tx_dma = XFER_DMAADDR_INVALID;
- return -ENOMEM;
- }
- }
-
- if (xfer->rx_buf != NULL) {
- xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
- xfer->len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, xfer->rx_dma)) {
- dev_err(dev, "dma_map_single Rx failed\n");
- dma_unmap_single(dev, xfer->tx_dma,
- xfer->len, DMA_TO_DEVICE);
- xfer->tx_dma = XFER_DMAADDR_INVALID;
- xfer->rx_dma = XFER_DMAADDR_INVALID;
- return -ENOMEM;
- }
- }
- }
-
- return 0;
-}
-
-static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
- struct spi_message *msg)
-{
- struct device *dev = &sdd->pdev->dev;
- struct spi_transfer *xfer;
-
- if (is_polling(sdd) || msg->is_dma_mapped)
- return;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-
- if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
- continue;
-
- if (xfer->rx_buf != NULL
- && xfer->rx_dma != XFER_DMAADDR_INVALID)
- dma_unmap_single(dev, xfer->rx_dma,
- xfer->len, DMA_FROM_DEVICE);
-
- if (xfer->tx_buf != NULL
- && xfer->tx_dma != XFER_DMAADDR_INVALID)
- dma_unmap_single(dev, xfer->tx_dma,
- xfer->len, DMA_TO_DEVICE);
- }
-}
-
static int s3c64xx_spi_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -866,13 +674,6 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master,
s3c64xx_spi_config(sdd);
}
- /* Map all the transfers if needed */
- if (s3c64xx_spi_map_mssg(sdd, msg)) {
- dev_err(&spi->dev,
- "Xfer: Unable to map message buffers!\n");
- return -ENOMEM;
- }
-
/* Configure feedback delay */
writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
@@ -896,13 +697,6 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
bpw = xfer->bits_per_word;
speed = xfer->speed_hz ? : spi->max_speed_hz;
- if (xfer->len % (bpw / 8)) {
- dev_err(&spi->dev,
- "Xfer length(%u) not a multiple of word size(%u)\n",
- xfer->len, bpw / 8);
- return -EIO;
- }
-
if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
sdd->cur_bpw = bpw;
sdd->cur_speed = speed;
@@ -929,7 +723,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
spin_unlock_irqrestore(&sdd->lock, flags);
- status = wait_for_xfer(sdd, xfer, use_dma);
+ if (use_dma)
+ status = wait_for_dma(sdd, xfer);
+ else
+ status = wait_for_pio(sdd, xfer);
if (status) {
dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
@@ -941,10 +738,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
if (use_dma) {
if (xfer->tx_buf != NULL
&& (sdd->state & TXBUSY))
- s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
+ dmaengine_terminate_all(sdd->tx_dma.ch);
if (xfer->rx_buf != NULL
&& (sdd->state & RXBUSY))
- s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
+ dmaengine_terminate_all(sdd->rx_dma.ch);
}
} else {
flush_fifo(sdd);
@@ -953,16 +750,6 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
return status;
}
-static int s3c64xx_spi_unprepare_message(struct spi_master *master,
- struct spi_message *msg)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
-
- s3c64xx_spi_unmap_mssg(sdd, msg);
-
- return 0;
-}
-
static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
struct spi_device *spi)
{
@@ -1092,14 +879,12 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
pm_runtime_put(&sdd->pdev->dev);
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- disable_cs(sdd, spi);
return 0;
setup_exit:
pm_runtime_put(&sdd->pdev->dev);
/* setup() returns with device de-selected */
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- disable_cs(sdd, spi);
gpio_free(cs->line);
spi_set_ctldata(spi, NULL);
@@ -1338,7 +1123,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
master->prepare_message = s3c64xx_spi_prepare_message;
master->transfer_one = s3c64xx_spi_transfer_one;
- master->unprepare_message = s3c64xx_spi_unprepare_message;
master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
@@ -1347,6 +1131,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->auto_runtime_pm = true;
+ if (!is_polling(sdd))
+ master->can_dma = s3c64xx_spi_can_dma;
sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(sdd->regs)) {
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 121c2e1dea36..237f2e7a7179 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -183,17 +183,9 @@ static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
static int sc18is602_check_transfer(struct spi_device *spi,
struct spi_transfer *t, int tlen)
{
- uint32_t hz;
-
if (t && t->len + tlen > SC18IS602_BUFSIZ)
return -EINVAL;
- hz = spi->max_speed_hz;
- if (t && t->speed_hz)
- hz = t->speed_hz;
- if (hz == 0)
- return -EINVAL;
-
return 0;
}
@@ -205,22 +197,15 @@ static int sc18is602_transfer_one(struct spi_master *master,
struct spi_transfer *t;
int status = 0;
- /* SC18IS602 does not support CS2 */
- if (hw->id == sc18is602 && spi->chip_select == 2) {
- status = -ENXIO;
- goto error;
- }
-
hw->tlen = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
- u32 hz = t->speed_hz ? : spi->max_speed_hz;
bool do_transfer;
status = sc18is602_check_transfer(spi, t, hw->tlen);
if (status < 0)
break;
- status = sc18is602_setup_transfer(hw, hz, spi->mode);
+ status = sc18is602_setup_transfer(hw, t->speed_hz, spi->mode);
if (status < 0)
break;
@@ -238,7 +223,6 @@ static int sc18is602_transfer_one(struct spi_master *master,
if (t->delay_usecs)
udelay(t->delay_usecs);
}
-error:
m->status = status;
spi_finalize_current_message(master);
@@ -247,10 +231,13 @@ error:
static int sc18is602_setup(struct spi_device *spi)
{
- if (spi->mode & ~(SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST))
- return -EINVAL;
+ struct sc18is602 *hw = spi_master_get_devdata(spi->master);
- return sc18is602_check_transfer(spi, NULL, 0);
+ /* SC18IS602 does not support CS2 */
+ if (hw->id == sc18is602 && spi->chip_select == 2)
+ return -ENXIO;
+
+ return 0;
}
static int sc18is602_probe(struct i2c_client *client,
@@ -309,6 +296,8 @@ static int sc18is602_probe(struct i2c_client *client,
master->setup = sc18is602_setup;
master->transfer_one_message = sc18is602_transfer_one;
master->dev.of_node = np;
+ master->min_speed_hz = hw->freq / 128;
+ master->max_speed_hz = hw->freq / 4;
error = devm_spi_register_master(dev, master);
if (error)
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 82d2f922ffa0..9009456bdf4d 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -46,8 +46,6 @@
/* SPSR */
#define RXFL (1 << 2)
-#define hspi2info(h) (h->dev->platform_data)
-
struct hspi_priv {
void __iomem *addr;
struct spi_master *master;
@@ -113,14 +111,9 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
{
struct spi_device *spi = msg->spi;
struct device *dev = hspi->dev;
- u32 target_rate;
u32 spcr, idiv_clk;
u32 rate, best_rate, min, tmp;
- target_rate = t ? t->speed_hz : 0;
- if (!target_rate)
- target_rate = spi->max_speed_hz;
-
/*
* find best IDIV/CLKCx settings
*/
@@ -140,7 +133,7 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
rate /= (((idiv_clk & 0x1F) + 1) * 2);
/* save best settings */
- tmp = abs(target_rate - rate);
+ tmp = abs(t->speed_hz - rate);
if (tmp < min) {
min = tmp;
spcr = idiv_clk;
@@ -153,7 +146,7 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
if (spi->mode & SPI_CPOL)
spcr |= 1 << 6;
- dev_dbg(dev, "speed %d/%d\n", target_rate, best_rate);
+ dev_dbg(dev, "speed %d/%d\n", t->speed_hz, best_rate);
hspi_write(hspi, SPCR, spcr);
hspi_write(hspi, SPSR, 0x0);
@@ -230,29 +223,6 @@ static int hspi_transfer_one_message(struct spi_master *master,
return ret;
}
-static int hspi_setup(struct spi_device *spi)
-{
- struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
- struct device *dev = hspi->dev;
-
- if (8 != spi->bits_per_word) {
- dev_err(dev, "bits_per_word should be 8\n");
- return -EIO;
- }
-
- dev_dbg(dev, "%s setup\n", spi->modalias);
-
- return 0;
-}
-
-static void hspi_cleanup(struct spi_device *spi)
-{
- struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
- struct device *dev = hspi->dev;
-
- dev_dbg(dev, "%s cleanup\n", spi->modalias);
-}
-
static int hspi_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -298,22 +268,23 @@ static int hspi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
- master->num_chipselect = 1;
master->bus_num = pdev->id;
- master->setup = hspi_setup;
- master->cleanup = hspi_cleanup;
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
master->transfer_one_message = hspi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
- goto error1;
+ goto error2;
}
return 0;
+ error2:
+ pm_runtime_disable(&pdev->dev);
error1:
clk_put(clk);
error0:
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 81cc02f5f9b0..e850d03e7190 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -15,59 +15,108 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/sh_msiof.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
#include <asm/unaligned.h>
+
+struct sh_msiof_chipdata {
+ u16 tx_fifo_size;
+ u16 rx_fifo_size;
+ u16 master_flags;
+};
+
struct sh_msiof_spi_priv {
- struct spi_bitbang bitbang; /* must be first for spi_bitbang.c */
void __iomem *mapbase;
struct clk *clk;
struct platform_device *pdev;
+ const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct completion done;
- unsigned long flags;
int tx_fifo_size;
int rx_fifo_size;
};
-#define TMDR1 0x00
-#define TMDR2 0x04
-#define TMDR3 0x08
-#define RMDR1 0x10
-#define RMDR2 0x14
-#define RMDR3 0x18
-#define TSCR 0x20
-#define RSCR 0x22
-#define CTR 0x28
-#define FCTR 0x30
-#define STR 0x40
-#define IER 0x44
-#define TDR1 0x48
-#define TDR2 0x4c
-#define TFDR 0x50
-#define RDR1 0x58
-#define RDR2 0x5c
-#define RFDR 0x60
-
-#define CTR_TSCKE (1 << 15)
-#define CTR_TFSE (1 << 14)
-#define CTR_TXE (1 << 9)
-#define CTR_RXE (1 << 8)
-
-#define STR_TEOF (1 << 23)
-#define STR_REOF (1 << 7)
+#define TMDR1 0x00 /* Transmit Mode Register 1 */
+#define TMDR2 0x04 /* Transmit Mode Register 2 */
+#define TMDR3 0x08 /* Transmit Mode Register 3 */
+#define RMDR1 0x10 /* Receive Mode Register 1 */
+#define RMDR2 0x14 /* Receive Mode Register 2 */
+#define RMDR3 0x18 /* Receive Mode Register 3 */
+#define TSCR 0x20 /* Transmit Clock Select Register */
+#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define CTR 0x28 /* Control Register */
+#define FCTR 0x30 /* FIFO Control Register */
+#define STR 0x40 /* Status Register */
+#define IER 0x44 /* Interrupt Enable Register */
+#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define TFDR 0x50 /* Transmit FIFO Data Register */
+#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define RFDR 0x60 /* Receive FIFO Data Register */
+
+/* TMDR1 and RMDR1 */
+#define MDR1_TRMD 0x80000000 /* Transfer Mode (1 = Master mode) */
+#define MDR1_SYNCMD_MASK 0x30000000 /* SYNC Mode */
+#define MDR1_SYNCMD_SPI 0x20000000 /* Level mode/SPI */
+#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
+#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
+#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
+#define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */
+#define MDR1_FLD_SHIFT 2
+#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
+/* TMDR1 */
+#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
+
+/* TMDR2 and RMDR2 */
+#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
+#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+#define MDR2_GRPMASK1 0x00000001 /* Group Output Mask 1 (SH, A1) */
+
+/* TSCR and RSCR */
+#define SCR_BRPS_MASK 0x1f00 /* Prescaler Setting (1-32) */
+#define SCR_BRPS(i) (((i) - 1) << 8)
+#define SCR_BRDV_MASK 0x0007 /* Baud Rate Generator's Division Ratio */
+#define SCR_BRDV_DIV_2 0x0000
+#define SCR_BRDV_DIV_4 0x0001
+#define SCR_BRDV_DIV_8 0x0002
+#define SCR_BRDV_DIV_16 0x0003
+#define SCR_BRDV_DIV_32 0x0004
+#define SCR_BRDV_DIV_1 0x0007
+
+/* CTR */
+#define CTR_TSCKIZ_MASK 0xc0000000 /* Transmit Clock I/O Polarity Select */
+#define CTR_TSCKIZ_SCK 0x80000000 /* Disable SCK when TX disabled */
+#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
+#define CTR_RSCKIZ_MASK 0x30000000 /* Receive Clock Polarity Select */
+#define CTR_RSCKIZ_SCK 0x20000000 /* Must match CTR_TSCKIZ_SCK */
+#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
+#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
+#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
+#define CTR_TXDIZ_MASK 0x00c00000 /* Pin Output When TX is Disabled */
+#define CTR_TXDIZ_LOW 0x00000000 /* 0 */
+#define CTR_TXDIZ_HIGH 0x00400000 /* 1 */
+#define CTR_TXDIZ_HIZ 0x00800000 /* High-impedance */
+#define CTR_TSCKE 0x00008000 /* Transmit Serial Clock Output Enable */
+#define CTR_TFSE 0x00004000 /* Transmit Frame Sync Signal Output Enable */
+#define CTR_TXE 0x00000200 /* Transmit Enable */
+#define CTR_RXE 0x00000100 /* Receive Enable */
+
+/* STR and IER */
+#define STR_TEOF 0x00800000 /* Frame Transmission End */
+#define STR_REOF 0x00000080 /* Frame Reception End */
+
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
@@ -131,22 +180,21 @@ static struct {
unsigned short div;
unsigned short scr;
} const sh_msiof_spi_clk_table[] = {
- { 1, 0x0007 },
- { 2, 0x0000 },
- { 4, 0x0001 },
- { 8, 0x0002 },
- { 16, 0x0003 },
- { 32, 0x0004 },
- { 64, 0x1f00 },
- { 128, 0x1f01 },
- { 256, 0x1f02 },
- { 512, 0x1f03 },
- { 1024, 0x1f04 },
+ { 1, SCR_BRPS( 1) | SCR_BRDV_DIV_1 },
+ { 2, SCR_BRPS( 1) | SCR_BRDV_DIV_2 },
+ { 4, SCR_BRPS( 1) | SCR_BRDV_DIV_4 },
+ { 8, SCR_BRPS( 1) | SCR_BRDV_DIV_8 },
+ { 16, SCR_BRPS( 1) | SCR_BRDV_DIV_16 },
+ { 32, SCR_BRPS( 1) | SCR_BRDV_DIV_32 },
+ { 64, SCR_BRPS(32) | SCR_BRDV_DIV_2 },
+ { 128, SCR_BRPS(32) | SCR_BRDV_DIV_4 },
+ { 256, SCR_BRPS(32) | SCR_BRDV_DIV_8 },
+ { 512, SCR_BRPS(32) | SCR_BRDV_DIV_16 },
+ { 1024, SCR_BRPS(32) | SCR_BRDV_DIV_32 },
};
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
- unsigned long parent_rate,
- unsigned long spi_hz)
+ unsigned long parent_rate, u32 spi_hz)
{
unsigned long div = 1024;
size_t k;
@@ -164,7 +212,8 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1);
sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr);
- sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
+ if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX))
+ sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
}
static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
@@ -183,21 +232,25 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
*/
sh_msiof_write(p, FCTR, 0);
- tmp = 0;
- tmp |= !cs_high << 25;
- tmp |= lsb_first << 24;
- sh_msiof_write(p, TMDR1, 0xe0000005 | tmp);
- sh_msiof_write(p, RMDR1, 0x20000005 | tmp);
+ tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
+ tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
+ tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+ sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
+ if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) {
+ /* These bits are reserved if RX needs TX */
+ tmp &= ~0x0000ffff;
+ }
+ sh_msiof_write(p, RMDR1, tmp);
- tmp = 0xa0000000;
- tmp |= cpol << 30; /* TSCKIZ */
- tmp |= cpol << 28; /* RSCKIZ */
+ tmp = 0;
+ tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
+ tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
edge = cpol ^ !cpha;
- tmp |= edge << 27; /* TEDG */
- tmp |= edge << 26; /* REDG */
- tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */
+ tmp |= edge << CTR_TEDG_SHIFT;
+ tmp |= edge << CTR_REDG_SHIFT;
+ tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
sh_msiof_write(p, CTR, tmp);
}
@@ -205,12 +258,12 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
u32 bits, u32 words)
{
- u32 dr2 = ((bits - 1) << 24) | ((words - 1) << 16);
+ u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
- if (tx_buf)
+ if (tx_buf || (p->chipdata->master_flags & SPI_MASTER_MUST_TX))
sh_msiof_write(p, TMDR2, dr2);
else
- sh_msiof_write(p, TMDR2, dr2 | 1);
+ sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
if (rx_buf)
sh_msiof_write(p, RMDR2, dr2);
@@ -363,77 +416,45 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
}
-static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t)
+static int sh_msiof_spi_setup(struct spi_device *spi)
{
- int bits;
-
- bits = t ? t->bits_per_word : 0;
- if (!bits)
- bits = spi->bits_per_word;
- return bits;
-}
-
-static unsigned long sh_msiof_spi_hz(struct spi_device *spi,
- struct spi_transfer *t)
-{
- unsigned long hz;
-
- hz = t ? t->speed_hz : 0;
- if (!hz)
- hz = spi->max_speed_hz;
- return hz;
-}
+ struct device_node *np = spi->master->dev.of_node;
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
-static int sh_msiof_spi_setup_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- int bits;
+ if (!np) {
+ /*
+ * Use spi->controller_data for CS (same strategy as spi_gpio),
+ * if any. otherwise let HW control CS
+ */
+ spi->cs_gpio = (uintptr_t)spi->controller_data;
+ }
- /* noting to check hz values against since parent clock is disabled */
+ /* Configure pins before deasserting CS */
+ sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST),
+ !!(spi->mode & SPI_CS_HIGH));
- bits = sh_msiof_spi_bits(spi, t);
- if (bits < 8)
- return -EINVAL;
- if (bits > 32)
- return -EINVAL;
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
- return spi_bitbang_setup_transfer(spi, t);
+ return 0;
}
-static void sh_msiof_spi_chipselect(struct spi_device *spi, int is_on)
+static int sh_msiof_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
- int value;
-
- /* chip select is active low unless SPI_CS_HIGH is set */
- if (spi->mode & SPI_CS_HIGH)
- value = (is_on == BITBANG_CS_ACTIVE) ? 1 : 0;
- else
- value = (is_on == BITBANG_CS_ACTIVE) ? 0 : 1;
-
- if (is_on == BITBANG_CS_ACTIVE) {
- if (!test_and_set_bit(0, &p->flags)) {
- pm_runtime_get_sync(&p->pdev->dev);
- clk_enable(p->clk);
- }
-
- /* Configure pins before asserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
- !!(spi->mode & SPI_CPHA),
- !!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST),
- !!(spi->mode & SPI_CS_HIGH));
- }
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+ const struct spi_device *spi = msg->spi;
- /* use spi->controller data for CS (same strategy as spi_gpio) */
- gpio_set_value((uintptr_t)spi->controller_data, value);
-
- if (is_on == BITBANG_CS_INACTIVE) {
- if (test_and_clear_bit(0, &p->flags)) {
- clk_disable(p->clk);
- pm_runtime_put(&p->pdev->dev);
- }
- }
+ /* Configure pins before asserting CS */
+ sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST),
+ !!(spi->mode & SPI_CS_HIGH));
+ return 0;
}
static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
@@ -487,7 +508,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
/* clear status bits */
sh_msiof_reset_str(p);
- /* shut down frame, tx/tx and clock signals */
+ /* shut down frame, rx/tx and clock signals */
ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
if (rx_buf)
@@ -505,9 +526,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
return ret;
}
-static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+static int sh_msiof_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
int bits;
@@ -517,7 +540,7 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
int n;
bool swab;
- bits = sh_msiof_spi_bits(spi, t);
+ bits = t->bits_per_word;
if (bits <= 8 && t->len > 15 && !(t->len & 3)) {
bits = 32;
@@ -567,8 +590,7 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
}
/* setup clocks (clock already enabled in chipselect()) */
- sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk),
- sh_msiof_spi_hz(spi, t));
+ sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
/* transfer in fifo sized chunks */
words = t->len / bytes_per_word;
@@ -588,22 +610,36 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
words -= n;
}
- return bytes_done;
-}
-
-static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
- u32 word, u8 bits)
-{
- BUG(); /* unused but needed by bitbang code */
return 0;
}
+static const struct sh_msiof_chipdata sh_data = {
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 64,
+ .master_flags = 0,
+};
+
+static const struct sh_msiof_chipdata r8a779x_data = {
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 256,
+ .master_flags = SPI_MASTER_MUST_TX,
+};
+
+static const struct of_device_id sh_msiof_match[] = {
+ { .compatible = "renesas,sh-msiof", .data = &sh_data },
+ { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_msiof_match);
+
#ifdef CONFIG_OF
static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
{
struct sh_msiof_spi_info *info;
struct device_node *np = dev->of_node;
- u32 num_cs = 0;
+ u32 num_cs = 1;
info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
if (!info) {
@@ -633,6 +669,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
{
struct resource *r;
struct spi_master *master;
+ const struct of_device_id *of_id;
struct sh_msiof_spi_priv *p;
int i;
int ret;
@@ -646,10 +683,15 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
p = spi_master_get_devdata(master);
platform_set_drvdata(pdev, p);
- if (pdev->dev.of_node)
+
+ of_id = of_match_device(sh_msiof_match, &pdev->dev);
+ if (of_id) {
+ p->chipdata = of_id->data;
p->info = sh_msiof_spi_parse_dt(&pdev->dev);
- else
+ } else {
+ p->chipdata = (const void *)pdev->id_entry->driver_data;
p->info = dev_get_platdata(&pdev->dev);
+ }
if (!p->info) {
dev_err(&pdev->dev, "failed to obtain device info\n");
@@ -687,49 +729,40 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
goto err1;
}
- ret = clk_prepare(p->clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "unable to prepare clock\n");
- goto err1;
- }
-
p->pdev = pdev;
pm_runtime_enable(&pdev->dev);
- /* The standard version of MSIOF use 64 word FIFOs */
- p->tx_fifo_size = 64;
- p->rx_fifo_size = 64;
-
/* Platform data may override FIFO sizes */
+ p->tx_fifo_size = p->chipdata->tx_fifo_size;
+ p->rx_fifo_size = p->chipdata->rx_fifo_size;
if (p->info->tx_fifo_override)
p->tx_fifo_size = p->info->tx_fifo_override;
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
- /* init master and bitbang code */
+ /* init master code */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
- master->flags = 0;
+ master->flags = p->chipdata->master_flags;
master->bus_num = pdev->id;
+ master->dev.of_node = pdev->dev.of_node;
master->num_chipselect = p->info->num_chipselect;
- master->setup = spi_bitbang_setup;
- master->cleanup = spi_bitbang_cleanup;
-
- p->bitbang.master = master;
- p->bitbang.chipselect = sh_msiof_spi_chipselect;
- p->bitbang.setup_transfer = sh_msiof_spi_setup_transfer;
- p->bitbang.txrx_bufs = sh_msiof_spi_txrx;
- p->bitbang.txrx_word[SPI_MODE_0] = sh_msiof_spi_txrx_word;
- p->bitbang.txrx_word[SPI_MODE_1] = sh_msiof_spi_txrx_word;
- p->bitbang.txrx_word[SPI_MODE_2] = sh_msiof_spi_txrx_word;
- p->bitbang.txrx_word[SPI_MODE_3] = sh_msiof_spi_txrx_word;
-
- ret = spi_bitbang_start(&p->bitbang);
- if (ret == 0)
- return 0;
+ master->setup = sh_msiof_spi_setup;
+ master->prepare_message = sh_msiof_prepare_message;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ master->auto_runtime_pm = true;
+ master->transfer_one = sh_msiof_transfer_one;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master error.\n");
+ goto err2;
+ }
+ return 0;
+
+ err2:
pm_runtime_disable(&pdev->dev);
- clk_unprepare(p->clk);
err1:
spi_master_put(master);
return ret;
@@ -737,30 +770,22 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
static int sh_msiof_spi_remove(struct platform_device *pdev)
{
- struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
- int ret;
-
- ret = spi_bitbang_stop(&p->bitbang);
- if (!ret) {
- pm_runtime_disable(&pdev->dev);
- clk_unprepare(p->clk);
- spi_master_put(p->bitbang.master);
- }
- return ret;
+ pm_runtime_disable(&pdev->dev);
+ return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id sh_msiof_match[] = {
- { .compatible = "renesas,sh-msiof", },
- { .compatible = "renesas,sh-mobile-msiof", },
+static struct platform_device_id spi_driver_ids[] = {
+ { "spi_sh_msiof", (kernel_ulong_t)&sh_data },
+ { "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data },
+ { "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data },
{},
};
-MODULE_DEVICE_TABLE(of, sh_msiof_match);
-#endif
+MODULE_DEVICE_TABLE(platform, spi_driver_ids);
static struct platform_driver sh_msiof_spi_drv = {
.probe = sh_msiof_spi_probe,
.remove = sh_msiof_spi_remove,
+ .id_table = spi_driver_ids,
.driver = {
.name = "spi_sh_msiof",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 38eb24df796c..8b44b71f5024 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -14,7 +14,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -109,7 +108,7 @@ static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
{
struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
- if (sp->info && sp->info->chip_select)
+ if (sp->info->chip_select)
(sp->info->chip_select)(sp->info, dev->chip_select, value);
}
@@ -131,6 +130,11 @@ static int sh_sci_spi_probe(struct platform_device *dev)
platform_set_drvdata(dev, sp);
sp->info = dev_get_platdata(&dev->dev);
+ if (!sp->info) {
+ dev_err(&dev->dev, "platform data is missing\n");
+ ret = -ENOENT;
+ goto err1;
+ }
/* setup spi bitbang adaptor */
sp->bitbang.master = master;
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index f6f2c7010177..03edf5ed0e9f 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -322,7 +322,8 @@ static void spi_sh_work(struct work_struct *work)
spin_lock_irqsave(&ss->lock, flags);
mesg->status = 0;
- mesg->complete(mesg->context);
+ if (mesg->complete)
+ mesg->complete(mesg->context);
}
clear_fifo(ss);
@@ -340,7 +341,8 @@ static void spi_sh_work(struct work_struct *work)
error:
mesg->status = ret;
- mesg->complete(mesg->context);
+ if (mesg->complete)
+ mesg->complete(mesg->context);
spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
SPI_SH_CR1);
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index e430689c3837..1a77ad52812f 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -22,7 +22,6 @@
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
-#include <linux/sirfsoc_dma.h>
#define DRIVER_NAME "sirfsoc_spi"
@@ -132,6 +131,8 @@
#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
+#define SIRFSOC_MAX_CMD_BYTES 4
+
struct sirfsoc_spi {
struct spi_bitbang bitbang;
struct completion rx_done;
@@ -162,6 +163,12 @@ struct sirfsoc_spi {
void *dummypage;
int word_width; /* in bytes */
+ /*
+ * if tx size is not more than 4 and rx size is NULL, use
+ * command model
+ */
+ bool tx_by_cmd;
+
int chipselect[0];
};
@@ -260,6 +267,12 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
writel(spi_stat, sspi->base + SIRFSOC_SPI_INT_STATUS);
+ if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) {
+ complete(&sspi->tx_done);
+ writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
+ return IRQ_HANDLED;
+ }
+
/* Error Conditions */
if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
spi_stat & SIRFSOC_SPI_TX_UFLOW) {
@@ -310,6 +323,34 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
+ /*
+ * fill tx_buf into command register and wait for its completion
+ */
+ if (sspi->tx_by_cmd) {
+ u32 cmd;
+ memcpy(&cmd, sspi->tx, t->len);
+
+ if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
+ cmd = cpu_to_be32(cmd) >>
+ ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
+ if (sspi->word_width == 2 && t->len == 4 &&
+ (!(spi->mode & SPI_LSB_FIRST)))
+ cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
+
+ writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
+ writel(SIRFSOC_SPI_FRM_END_INT_EN,
+ sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_CMD_TX_EN,
+ sspi->base + SIRFSOC_SPI_TX_RX_EN);
+
+ if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
+ dev_err(&spi->dev, "transfer timeout\n");
+ return 0;
+ }
+
+ return t->len;
+ }
+
if (sspi->left_tx_word == 1) {
writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
SIRFSOC_SPI_ENA_AUTO_CLR,
@@ -459,11 +500,6 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
sspi->rx_word = spi_sirfsoc_rx_word_u8;
sspi->tx_word = spi_sirfsoc_tx_word_u8;
- txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_BYTE;
- rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_BYTE;
- sspi->word_width = 1;
break;
case 12:
case 16:
@@ -471,26 +507,22 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
sspi->rx_word = spi_sirfsoc_rx_word_u16;
sspi->tx_word = spi_sirfsoc_tx_word_u16;
- txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_WORD;
- rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_WORD;
- sspi->word_width = 2;
break;
case 32:
regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
sspi->rx_word = spi_sirfsoc_rx_word_u32;
sspi->tx_word = spi_sirfsoc_tx_word_u32;
- txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_DWORD;
- rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_DWORD;
- sspi->word_width = 4;
break;
default:
BUG();
}
+ sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
+ txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ sspi->word_width;
+ rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ sspi->word_width;
+
if (!(spi->mode & SPI_CS_HIGH))
regval |= SIRFSOC_SPI_CS_IDLE_STAT;
if (!(spi->mode & SPI_LSB_FIRST))
@@ -519,6 +551,14 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
+ if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
+ regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
+ SIRFSOC_SPI_CMD_MODE);
+ sspi->tx_by_cmd = true;
+ } else {
+ regval &= ~SIRFSOC_SPI_CMD_MODE;
+ sspi->tx_by_cmd = false;
+ }
writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
if (IS_DMA_VALID(t)) {
@@ -548,8 +588,6 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
struct spi_master *master;
struct resource *mem_res;
int num_cs, cs_gpio, irq;
- u32 rx_dma_ch, tx_dma_ch;
- dma_cap_mask_t dma_cap_mask;
int i;
int ret;
@@ -560,20 +598,6 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
goto err_cs;
}
- ret = of_property_read_u32(pdev->dev.of_node,
- "sirf,spi-dma-rx-channel", &rx_dma_ch);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to get rx dma channel\n");
- goto err_cs;
- }
-
- ret = of_property_read_u32(pdev->dev.of_node,
- "sirf,spi-dma-tx-channel", &tx_dma_ch);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to get tx dma channel\n");
- goto err_cs;
- }
-
master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs);
if (!master) {
dev_err(&pdev->dev, "Unable to allocate SPI master\n");
@@ -637,18 +661,13 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
/* request DMA channels */
- dma_cap_zero(dma_cap_mask);
- dma_cap_set(DMA_INTERLEAVE, dma_cap_mask);
-
- sspi->rx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id,
- (void *)rx_dma_ch);
+ sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
if (!sspi->rx_chan) {
dev_err(&pdev->dev, "can not allocate rx dma channel\n");
ret = -ENODEV;
goto free_master;
}
- sspi->tx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id,
- (void *)tx_dma_ch);
+ sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
if (!sspi->tx_chan) {
dev_err(&pdev->dev, "can not allocate tx dma channel\n");
ret = -ENODEV;
@@ -724,11 +743,16 @@ static int spi_sirfsoc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int spi_sirfsoc_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
clk_disable(sspi->clk);
return 0;
@@ -745,15 +769,13 @@ static int spi_sirfsoc_resume(struct device *dev)
writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- return 0;
+ return spi_master_resume(master);
}
-
-static const struct dev_pm_ops spi_sirfsoc_pm_ops = {
- .suspend = spi_sirfsoc_suspend,
- .resume = spi_sirfsoc_resume,
-};
#endif
+static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
+ spi_sirfsoc_resume);
+
static const struct of_device_id spi_sirfsoc_of_match[] = {
{ .compatible = "sirf,prima2-spi", },
{ .compatible = "sirf,marco-spi", },
@@ -765,9 +787,7 @@ static struct platform_driver spi_sirfsoc_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &spi_sirfsoc_pm_ops,
-#endif
.of_match_table = spi_sirfsoc_of_match,
},
.probe = spi_sirfsoc_probe,
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
new file mode 100644
index 000000000000..d266a8702067
--- /dev/null
+++ b/drivers/spi/spi-sun4i.c
@@ -0,0 +1,478 @@
+/*
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+
+#include <linux/spi/spi.h>
+
+#define SUN4I_FIFO_DEPTH 64
+
+#define SUN4I_RXDATA_REG 0x00
+
+#define SUN4I_TXDATA_REG 0x04
+
+#define SUN4I_CTL_REG 0x08
+#define SUN4I_CTL_ENABLE BIT(0)
+#define SUN4I_CTL_MASTER BIT(1)
+#define SUN4I_CTL_CPHA BIT(2)
+#define SUN4I_CTL_CPOL BIT(3)
+#define SUN4I_CTL_CS_ACTIVE_LOW BIT(4)
+#define SUN4I_CTL_LMTF BIT(6)
+#define SUN4I_CTL_TF_RST BIT(8)
+#define SUN4I_CTL_RF_RST BIT(9)
+#define SUN4I_CTL_XCH BIT(10)
+#define SUN4I_CTL_CS_MASK 0x3000
+#define SUN4I_CTL_CS(cs) (((cs) << 12) & SUN4I_CTL_CS_MASK)
+#define SUN4I_CTL_DHB BIT(15)
+#define SUN4I_CTL_CS_MANUAL BIT(16)
+#define SUN4I_CTL_CS_LEVEL BIT(17)
+#define SUN4I_CTL_TP BIT(18)
+
+#define SUN4I_INT_CTL_REG 0x0c
+#define SUN4I_INT_CTL_TC BIT(16)
+
+#define SUN4I_INT_STA_REG 0x10
+
+#define SUN4I_DMA_CTL_REG 0x14
+
+#define SUN4I_WAIT_REG 0x18
+
+#define SUN4I_CLK_CTL_REG 0x1c
+#define SUN4I_CLK_CTL_CDR2_MASK 0xff
+#define SUN4I_CLK_CTL_CDR2(div) ((div) & SUN4I_CLK_CTL_CDR2_MASK)
+#define SUN4I_CLK_CTL_CDR1_MASK 0xf
+#define SUN4I_CLK_CTL_CDR1(div) (((div) & SUN4I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN4I_CLK_CTL_DRS BIT(12)
+
+#define SUN4I_BURST_CNT_REG 0x20
+#define SUN4I_BURST_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN4I_XMIT_CNT_REG 0x24
+#define SUN4I_XMIT_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN4I_FIFO_STA_REG 0x28
+#define SUN4I_FIFO_STA_RF_CNT_MASK 0x7f
+#define SUN4I_FIFO_STA_RF_CNT_BITS 0
+#define SUN4I_FIFO_STA_TF_CNT_MASK 0x7f
+#define SUN4I_FIFO_STA_TF_CNT_BITS 16
+
+struct sun4i_spi {
+ struct spi_master *master;
+ void __iomem *base_addr;
+ struct clk *hclk;
+ struct clk *mclk;
+
+ struct completion done;
+
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+};
+
+static inline u32 sun4i_spi_read(struct sun4i_spi *sspi, u32 reg)
+{
+ return readl(sspi->base_addr + reg);
+}
+
+static inline void sun4i_spi_write(struct sun4i_spi *sspi, u32 reg, u32 value)
+{
+ writel(value, sspi->base_addr + reg);
+}
+
+static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len)
+{
+ u32 reg, cnt;
+ u8 byte;
+
+ /* See how much data is available */
+ reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG);
+ reg &= SUN4I_FIFO_STA_RF_CNT_MASK;
+ cnt = reg >> SUN4I_FIFO_STA_RF_CNT_BITS;
+
+ if (len > cnt)
+ len = cnt;
+
+ while (len--) {
+ byte = readb(sspi->base_addr + SUN4I_RXDATA_REG);
+ if (sspi->rx_buf)
+ *sspi->rx_buf++ = byte;
+ }
+}
+
+static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len)
+{
+ u8 byte;
+
+ if (len > sspi->len)
+ len = sspi->len;
+
+ while (len--) {
+ byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
+ writeb(byte, sspi->base_addr + SUN4I_TXDATA_REG);
+ sspi->len--;
+ }
+}
+
+static void sun4i_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct sun4i_spi *sspi = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+
+ reg &= ~SUN4I_CTL_CS_MASK;
+ reg |= SUN4I_CTL_CS(spi->chip_select);
+
+ if (enable)
+ reg |= SUN4I_CTL_CS_LEVEL;
+ else
+ reg &= ~SUN4I_CTL_CS_LEVEL;
+
+ /*
+ * Even though this looks irrelevant since we are supposed to
+ * be controlling the chip select manually, this bit also
+ * controls the levels of the chip select for inactive
+ * devices.
+ *
+ * If we don't set it, the chip select level will go low by
+ * default when the device is idle, which is not really
+ * expected in the common case where the chip select is active
+ * low.
+ */
+ if (spi->mode & SPI_CS_HIGH)
+ reg &= ~SUN4I_CTL_CS_ACTIVE_LOW;
+ else
+ reg |= SUN4I_CTL_CS_ACTIVE_LOW;
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
+}
+
+static int sun4i_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ unsigned int mclk_rate, div, timeout;
+ unsigned int tx_len = 0;
+ int ret = 0;
+ u32 reg;
+
+ /* We don't support transfer larger than the FIFO */
+ if (tfr->len > SUN4I_FIFO_DEPTH)
+ return -EINVAL;
+
+ reinit_completion(&sspi->done);
+ sspi->tx_buf = tfr->tx_buf;
+ sspi->rx_buf = tfr->rx_buf;
+ sspi->len = tfr->len;
+
+ /* Clear pending interrupts */
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, ~0);
+
+
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+
+ /* Reset FIFOs */
+ sun4i_spi_write(sspi, SUN4I_CTL_REG,
+ reg | SUN4I_CTL_RF_RST | SUN4I_CTL_TF_RST);
+
+ /*
+ * Setup the transfer control register: Chip Select,
+ * polarities, etc.
+ */
+ if (spi->mode & SPI_CPOL)
+ reg |= SUN4I_CTL_CPOL;
+ else
+ reg &= ~SUN4I_CTL_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ reg |= SUN4I_CTL_CPHA;
+ else
+ reg &= ~SUN4I_CTL_CPHA;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ reg |= SUN4I_CTL_LMTF;
+ else
+ reg &= ~SUN4I_CTL_LMTF;
+
+
+ /*
+ * If it's a TX only transfer, we don't want to fill the RX
+ * FIFO with bogus data
+ */
+ if (sspi->rx_buf)
+ reg &= ~SUN4I_CTL_DHB;
+ else
+ reg |= SUN4I_CTL_DHB;
+
+ /* We want to control the chip select manually */
+ reg |= SUN4I_CTL_CS_MANUAL;
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
+
+ /* Ensure that we have a parent clock fast enough */
+ mclk_rate = clk_get_rate(sspi->mclk);
+ if (mclk_rate < (2 * spi->max_speed_hz)) {
+ clk_set_rate(sspi->mclk, 2 * spi->max_speed_hz);
+ mclk_rate = clk_get_rate(sspi->mclk);
+ }
+
+ /*
+ * Setup clock divider.
+ *
+ * We have two choices there. Either we can use the clock
+ * divide rate 1, which is calculated thanks to this formula:
+ * SPI_CLK = MOD_CLK / (2 ^ (cdr + 1))
+ * Or we can use CDR2, which is calculated with the formula:
+ * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
+ * Wether we use the former or the latter is set through the
+ * DRS bit.
+ *
+ * First try CDR2, and if we can't reach the expected
+ * frequency, fall back to CDR1.
+ */
+ div = mclk_rate / (2 * spi->max_speed_hz);
+ if (div <= (SUN4I_CLK_CTL_CDR2_MASK + 1)) {
+ if (div > 0)
+ div--;
+
+ reg = SUN4I_CLK_CTL_CDR2(div) | SUN4I_CLK_CTL_DRS;
+ } else {
+ div = ilog2(mclk_rate) - ilog2(spi->max_speed_hz);
+ reg = SUN4I_CLK_CTL_CDR1(div);
+ }
+
+ sun4i_spi_write(sspi, SUN4I_CLK_CTL_REG, reg);
+
+ /* Setup the transfer now... */
+ if (sspi->tx_buf)
+ tx_len = tfr->len;
+
+ /* Setup the counters */
+ sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
+ sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
+
+ /* Fill the TX FIFO */
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+
+ /* Enable the interrupts */
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
+
+ /* Start the transfer */
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
+
+ timeout = wait_for_completion_timeout(&sspi->done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
+
+out:
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, 0);
+
+ return ret;
+}
+
+static irqreturn_t sun4i_spi_handler(int irq, void *dev_id)
+{
+ struct sun4i_spi *sspi = dev_id;
+ u32 status = sun4i_spi_read(sspi, SUN4I_INT_STA_REG);
+
+ /* Transfer complete */
+ if (status & SUN4I_INT_CTL_TC) {
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TC);
+ complete(&sspi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sun4i_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(sspi->hclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable AHB clock\n");
+ goto out;
+ }
+
+ ret = clk_prepare_enable(sspi->mclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable module clock\n");
+ goto err;
+ }
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG,
+ SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(sspi->hclk);
+out:
+ return ret;
+}
+
+static int sun4i_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(sspi->mclk);
+ clk_disable_unprepare(sspi->hclk);
+
+ return 0;
+}
+
+static int sun4i_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct sun4i_spi *sspi;
+ struct resource *res;
+ int ret = 0, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+ sspi = spi_master_get_devdata(master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sspi->base_addr)) {
+ ret = PTR_ERR(sspi->base_addr);
+ goto err_free_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No spi IRQ specified\n");
+ ret = -ENXIO;
+ goto err_free_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, sun4i_spi_handler,
+ 0, "sun4i-spi", sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_free_master;
+ }
+
+ sspi->master = master;
+ master->set_cs = sun4i_spi_set_cs;
+ master->transfer_one = sun4i_spi_transfer_one;
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+
+ sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sspi->hclk)) {
+ dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+ ret = PTR_ERR(sspi->hclk);
+ goto err_free_master;
+ }
+
+ sspi->mclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(sspi->mclk)) {
+ dev_err(&pdev->dev, "Unable to acquire module clock\n");
+ ret = PTR_ERR(sspi->mclk);
+ goto err_free_master;
+ }
+
+ init_completion(&sspi->done);
+
+ /*
+ * This wake-up/shutdown pattern is to be able to have the
+ * device woken up, even if runtime_pm is disabled
+ */
+ ret = sun4i_spi_runtime_resume(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't resume the device\n");
+ goto err_free_master;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register SPI master\n");
+ goto err_pm_disable;
+ }
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ sun4i_spi_runtime_suspend(&pdev->dev);
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int sun4i_spi_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_spi_match[] = {
+ { .compatible = "allwinner,sun4i-a10-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun4i_spi_match);
+
+static const struct dev_pm_ops sun4i_spi_pm_ops = {
+ .runtime_resume = sun4i_spi_runtime_resume,
+ .runtime_suspend = sun4i_spi_runtime_suspend,
+};
+
+static struct platform_driver sun4i_spi_driver = {
+ .probe = sun4i_spi_probe,
+ .remove = sun4i_spi_remove,
+ .driver = {
+ .name = "sun4i-spi",
+ .owner = THIS_MODULE,
+ .of_match_table = sun4i_spi_match,
+ .pm = &sun4i_spi_pm_ops,
+ },
+};
+module_platform_driver(sun4i_spi_driver);
+
+MODULE_AUTHOR("Pan Nan <pannan@allwinnertech.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A1X/A20 SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
new file mode 100644
index 000000000000..b3e3498a7e6f
--- /dev/null
+++ b/drivers/spi/spi-sun6i.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/workqueue.h>
+
+#include <linux/spi/spi.h>
+
+#define SUN6I_FIFO_DEPTH 128
+
+#define SUN6I_GBL_CTL_REG 0x04
+#define SUN6I_GBL_CTL_BUS_ENABLE BIT(0)
+#define SUN6I_GBL_CTL_MASTER BIT(1)
+#define SUN6I_GBL_CTL_TP BIT(7)
+#define SUN6I_GBL_CTL_RST BIT(31)
+
+#define SUN6I_TFR_CTL_REG 0x08
+#define SUN6I_TFR_CTL_CPHA BIT(0)
+#define SUN6I_TFR_CTL_CPOL BIT(1)
+#define SUN6I_TFR_CTL_SPOL BIT(2)
+#define SUN6I_TFR_CTL_CS_MASK 0x30
+#define SUN6I_TFR_CTL_CS(cs) (((cs) << 4) & SUN6I_TFR_CTL_CS_MASK)
+#define SUN6I_TFR_CTL_CS_MANUAL BIT(6)
+#define SUN6I_TFR_CTL_CS_LEVEL BIT(7)
+#define SUN6I_TFR_CTL_DHB BIT(8)
+#define SUN6I_TFR_CTL_FBS BIT(12)
+#define SUN6I_TFR_CTL_XCH BIT(31)
+
+#define SUN6I_INT_CTL_REG 0x10
+#define SUN6I_INT_CTL_RF_OVF BIT(8)
+#define SUN6I_INT_CTL_TC BIT(12)
+
+#define SUN6I_INT_STA_REG 0x14
+
+#define SUN6I_FIFO_CTL_REG 0x18
+#define SUN6I_FIFO_CTL_RF_RST BIT(15)
+#define SUN6I_FIFO_CTL_TF_RST BIT(31)
+
+#define SUN6I_FIFO_STA_REG 0x1c
+#define SUN6I_FIFO_STA_RF_CNT_MASK 0x7f
+#define SUN6I_FIFO_STA_RF_CNT_BITS 0
+#define SUN6I_FIFO_STA_TF_CNT_MASK 0x7f
+#define SUN6I_FIFO_STA_TF_CNT_BITS 16
+
+#define SUN6I_CLK_CTL_REG 0x24
+#define SUN6I_CLK_CTL_CDR2_MASK 0xff
+#define SUN6I_CLK_CTL_CDR2(div) (((div) & SUN6I_CLK_CTL_CDR2_MASK) << 0)
+#define SUN6I_CLK_CTL_CDR1_MASK 0xf
+#define SUN6I_CLK_CTL_CDR1(div) (((div) & SUN6I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN6I_CLK_CTL_DRS BIT(12)
+
+#define SUN6I_BURST_CNT_REG 0x30
+#define SUN6I_BURST_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN6I_XMIT_CNT_REG 0x34
+#define SUN6I_XMIT_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN6I_BURST_CTL_CNT_REG 0x38
+#define SUN6I_BURST_CTL_CNT_STC(cnt) ((cnt) & 0xffffff)
+
+#define SUN6I_TXDATA_REG 0x200
+#define SUN6I_RXDATA_REG 0x300
+
+struct sun6i_spi {
+ struct spi_master *master;
+ void __iomem *base_addr;
+ struct clk *hclk;
+ struct clk *mclk;
+ struct reset_control *rstc;
+
+ struct completion done;
+
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+};
+
+static inline u32 sun6i_spi_read(struct sun6i_spi *sspi, u32 reg)
+{
+ return readl(sspi->base_addr + reg);
+}
+
+static inline void sun6i_spi_write(struct sun6i_spi *sspi, u32 reg, u32 value)
+{
+ writel(value, sspi->base_addr + reg);
+}
+
+static inline void sun6i_spi_drain_fifo(struct sun6i_spi *sspi, int len)
+{
+ u32 reg, cnt;
+ u8 byte;
+
+ /* See how much data is available */
+ reg = sun6i_spi_read(sspi, SUN6I_FIFO_STA_REG);
+ reg &= SUN6I_FIFO_STA_RF_CNT_MASK;
+ cnt = reg >> SUN6I_FIFO_STA_RF_CNT_BITS;
+
+ if (len > cnt)
+ len = cnt;
+
+ while (len--) {
+ byte = readb(sspi->base_addr + SUN6I_RXDATA_REG);
+ if (sspi->rx_buf)
+ *sspi->rx_buf++ = byte;
+ }
+}
+
+static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi, int len)
+{
+ u8 byte;
+
+ if (len > sspi->len)
+ len = sspi->len;
+
+ while (len--) {
+ byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
+ writeb(byte, sspi->base_addr + SUN6I_TXDATA_REG);
+ sspi->len--;
+ }
+}
+
+static void sun6i_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct sun6i_spi *sspi = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+ reg &= ~SUN6I_TFR_CTL_CS_MASK;
+ reg |= SUN6I_TFR_CTL_CS(spi->chip_select);
+
+ if (enable)
+ reg |= SUN6I_TFR_CTL_CS_LEVEL;
+ else
+ reg &= ~SUN6I_TFR_CTL_CS_LEVEL;
+
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
+}
+
+
+static int sun6i_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ unsigned int mclk_rate, div, timeout;
+ unsigned int tx_len = 0;
+ int ret = 0;
+ u32 reg;
+
+ /* We don't support transfer larger than the FIFO */
+ if (tfr->len > SUN6I_FIFO_DEPTH)
+ return -EINVAL;
+
+ reinit_completion(&sspi->done);
+ sspi->tx_buf = tfr->tx_buf;
+ sspi->rx_buf = tfr->rx_buf;
+ sspi->len = tfr->len;
+
+ /* Clear pending interrupts */
+ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, ~0);
+
+ /* Reset FIFO */
+ sun6i_spi_write(sspi, SUN6I_FIFO_CTL_REG,
+ SUN6I_FIFO_CTL_RF_RST | SUN6I_FIFO_CTL_TF_RST);
+
+ /*
+ * Setup the transfer control register: Chip Select,
+ * polarities, etc.
+ */
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+
+ if (spi->mode & SPI_CPOL)
+ reg |= SUN6I_TFR_CTL_CPOL;
+ else
+ reg &= ~SUN6I_TFR_CTL_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ reg |= SUN6I_TFR_CTL_CPHA;
+ else
+ reg &= ~SUN6I_TFR_CTL_CPHA;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ reg |= SUN6I_TFR_CTL_FBS;
+ else
+ reg &= ~SUN6I_TFR_CTL_FBS;
+
+ /*
+ * If it's a TX only transfer, we don't want to fill the RX
+ * FIFO with bogus data
+ */
+ if (sspi->rx_buf)
+ reg &= ~SUN6I_TFR_CTL_DHB;
+ else
+ reg |= SUN6I_TFR_CTL_DHB;
+
+ /* We want to control the chip select manually */
+ reg |= SUN6I_TFR_CTL_CS_MANUAL;
+
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
+
+ /* Ensure that we have a parent clock fast enough */
+ mclk_rate = clk_get_rate(sspi->mclk);
+ if (mclk_rate < (2 * spi->max_speed_hz)) {
+ clk_set_rate(sspi->mclk, 2 * spi->max_speed_hz);
+ mclk_rate = clk_get_rate(sspi->mclk);
+ }
+
+ /*
+ * Setup clock divider.
+ *
+ * We have two choices there. Either we can use the clock
+ * divide rate 1, which is calculated thanks to this formula:
+ * SPI_CLK = MOD_CLK / (2 ^ cdr)
+ * Or we can use CDR2, which is calculated with the formula:
+ * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
+ * Wether we use the former or the latter is set through the
+ * DRS bit.
+ *
+ * First try CDR2, and if we can't reach the expected
+ * frequency, fall back to CDR1.
+ */
+ div = mclk_rate / (2 * spi->max_speed_hz);
+ if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
+ if (div > 0)
+ div--;
+
+ reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS;
+ } else {
+ div = ilog2(mclk_rate) - ilog2(spi->max_speed_hz);
+ reg = SUN6I_CLK_CTL_CDR1(div);
+ }
+
+ sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
+
+ /* Setup the transfer now... */
+ if (sspi->tx_buf)
+ tx_len = tfr->len;
+
+ /* Setup the counters */
+ sun6i_spi_write(sspi, SUN6I_BURST_CNT_REG, SUN6I_BURST_CNT(tfr->len));
+ sun6i_spi_write(sspi, SUN6I_XMIT_CNT_REG, SUN6I_XMIT_CNT(tx_len));
+ sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG,
+ SUN6I_BURST_CTL_CNT_STC(tx_len));
+
+ /* Fill the TX FIFO */
+ sun6i_spi_fill_fifo(sspi, SUN6I_FIFO_DEPTH);
+
+ /* Enable the interrupts */
+ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, SUN6I_INT_CTL_TC);
+
+ /* Start the transfer */
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+
+ timeout = wait_for_completion_timeout(&sspi->done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ sun6i_spi_drain_fifo(sspi, SUN6I_FIFO_DEPTH);
+
+out:
+ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);
+
+ return ret;
+}
+
+static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
+{
+ struct sun6i_spi *sspi = dev_id;
+ u32 status = sun6i_spi_read(sspi, SUN6I_INT_STA_REG);
+
+ /* Transfer complete */
+ if (status & SUN6I_INT_CTL_TC) {
+ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
+ complete(&sspi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sun6i_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(sspi->hclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable AHB clock\n");
+ goto out;
+ }
+
+ ret = clk_prepare_enable(sspi->mclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable module clock\n");
+ goto err;
+ }
+
+ ret = reset_control_deassert(sspi->rstc);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert the device from reset\n");
+ goto err2;
+ }
+
+ sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG,
+ SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
+
+ return 0;
+
+err2:
+ clk_disable_unprepare(sspi->mclk);
+err:
+ clk_disable_unprepare(sspi->hclk);
+out:
+ return ret;
+}
+
+static int sun6i_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+
+ reset_control_assert(sspi->rstc);
+ clk_disable_unprepare(sspi->mclk);
+ clk_disable_unprepare(sspi->hclk);
+
+ return 0;
+}
+
+static int sun6i_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct sun6i_spi *sspi;
+ struct resource *res;
+ int ret = 0, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+ sspi = spi_master_get_devdata(master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sspi->base_addr)) {
+ ret = PTR_ERR(sspi->base_addr);
+ goto err_free_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No spi IRQ specified\n");
+ ret = -ENXIO;
+ goto err_free_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, sun6i_spi_handler,
+ 0, "sun6i-spi", sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_free_master;
+ }
+
+ sspi->master = master;
+ master->set_cs = sun6i_spi_set_cs;
+ master->transfer_one = sun6i_spi_transfer_one;
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+
+ sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sspi->hclk)) {
+ dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+ ret = PTR_ERR(sspi->hclk);
+ goto err_free_master;
+ }
+
+ sspi->mclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(sspi->mclk)) {
+ dev_err(&pdev->dev, "Unable to acquire module clock\n");
+ ret = PTR_ERR(sspi->mclk);
+ goto err_free_master;
+ }
+
+ init_completion(&sspi->done);
+
+ sspi->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(sspi->rstc)) {
+ dev_err(&pdev->dev, "Couldn't get reset controller\n");
+ ret = PTR_ERR(sspi->rstc);
+ goto err_free_master;
+ }
+
+ /*
+ * This wake-up/shutdown pattern is to be able to have the
+ * device woken up, even if runtime_pm is disabled
+ */
+ ret = sun6i_spi_runtime_resume(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't resume the device\n");
+ goto err_free_master;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register SPI master\n");
+ goto err_pm_disable;
+ }
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ sun6i_spi_runtime_suspend(&pdev->dev);
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int sun6i_spi_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_spi_match[] = {
+ { .compatible = "allwinner,sun6i-a31-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun6i_spi_match);
+
+static const struct dev_pm_ops sun6i_spi_pm_ops = {
+ .runtime_resume = sun6i_spi_runtime_resume,
+ .runtime_suspend = sun6i_spi_runtime_suspend,
+};
+
+static struct platform_driver sun6i_spi_driver = {
+ .probe = sun6i_spi_probe,
+ .remove = sun6i_spi_remove,
+ .driver = {
+ .name = "sun6i-spi",
+ .owner = THIS_MODULE,
+ .of_match_table = sun6i_spi_match,
+ .pm = &sun6i_spi_pm_ops,
+ },
+};
+module_platform_driver(sun6i_spi_driver);
+
+MODULE_AUTHOR("Pan Nan <pannan@allwinnertech.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 413c71843492..400649595505 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -23,7 +23,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -172,7 +171,6 @@ struct tegra_spi_data {
void __iomem *base;
phys_addr_t phys;
unsigned irq;
- u32 spi_max_frequency;
u32 cur_speed;
struct spi_device *cur_spi;
@@ -761,11 +759,6 @@ static int tegra_spi_setup(struct spi_device *spi)
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
- BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
-
- /* Set speed to the spi max fequency if spi device has not set */
- spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
-
ret = pm_runtime_get_sync(tspi->dev);
if (ret < 0) {
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
@@ -853,8 +846,8 @@ complete_xfer:
SPI_COMMAND1);
tegra_spi_transfer_delay(xfer->delay_usecs);
goto exit;
- } else if (msg->transfers.prev == &xfer->transfer_list) {
- /* This is the last transfer in message */
+ } else if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
if (xfer->cs_change)
tspi->cs_control = spi;
else {
@@ -1019,16 +1012,6 @@ static irqreturn_t tegra_spi_isr(int irq, void *context_data)
return IRQ_WAKE_THREAD;
}
-static void tegra_spi_parse_dt(struct platform_device *pdev,
- struct tegra_spi_data *tspi)
-{
- struct device_node *np = pdev->dev.of_node;
-
- if (of_property_read_u32(np, "spi-max-frequency",
- &tspi->spi_max_frequency))
- tspi->spi_max_frequency = 25000000; /* 25MHz */
-}
-
static struct of_device_id tegra_spi_of_match[] = {
{ .compatible = "nvidia,tegra114-spi", },
{}
@@ -1050,15 +1033,15 @@ static int tegra_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
tspi = spi_master_get_devdata(master);
- /* Parse DT */
- tegra_spi_parse_dt(pdev, tspi);
+ if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tegra_spi_setup;
master->transfer_one_message = tegra_spi_transfer_one_message;
master->num_chipselect = MAX_CHIP_SELECT;
- master->bus_num = -1;
master->auto_runtime_pm = true;
tspi->master = master;
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 08794977f21a..47869ea636e1 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -22,7 +22,6 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -121,7 +120,6 @@ struct tegra_sflash_data {
struct reset_control *rst;
void __iomem *base;
unsigned irq;
- u32 spi_max_frequency;
u32 cur_speed;
struct spi_device *cur_spi;
@@ -315,15 +313,6 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
return tegra_sflash_start_cpu_based_transfer(tsd, t);
}
-static int tegra_sflash_setup(struct spi_device *spi)
-{
- struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
-
- /* Set speed to the spi max fequency if spi device has not set */
- spi->max_speed_hz = spi->max_speed_hz ? : tsd->spi_max_frequency;
- return 0;
-}
-
static int tegra_sflash_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -430,15 +419,6 @@ static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
return handle_cpu_based_xfer(tsd);
}
-static void tegra_sflash_parse_dt(struct tegra_sflash_data *tsd)
-{
- struct device_node *np = tsd->dev->of_node;
-
- if (of_property_read_u32(np, "spi-max-frequency",
- &tsd->spi_max_frequency))
- tsd->spi_max_frequency = 25000000; /* 25MHz */
-}
-
static struct of_device_id tegra_sflash_of_match[] = {
{ .compatible = "nvidia,tegra20-sflash", },
{}
@@ -467,11 +447,9 @@ static int tegra_sflash_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA;
- master->setup = tegra_sflash_setup;
master->transfer_one_message = tegra_sflash_transfer_one_message;
master->auto_runtime_pm = true;
master->num_chipselect = MAX_CHIP_SELECT;
- master->bus_num = -1;
platform_set_drvdata(pdev, master);
tsd = spi_master_get_devdata(master);
@@ -479,7 +457,9 @@ static int tegra_sflash_probe(struct platform_device *pdev)
tsd->dev = &pdev->dev;
spin_lock_init(&tsd->lock);
- tegra_sflash_parse_dt(tsd);
+ if (of_property_read_u32(tsd->dev->of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tsd->base = devm_ioremap_resource(&pdev->dev, r);
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index be3a069879c3..e3c1b93e45d1 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -23,7 +23,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -171,7 +170,6 @@ struct tegra_slink_data {
void __iomem *base;
phys_addr_t phys;
unsigned irq;
- u32 spi_max_frequency;
u32 cur_speed;
struct spi_device *cur_spi;
@@ -761,10 +759,6 @@ static int tegra_slink_setup(struct spi_device *spi)
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
- BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
-
- /* Set speed to the spi max fequency if spi device has not set */
- spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
ret = pm_runtime_get_sync(tspi->dev);
if (ret < 0) {
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
@@ -999,15 +993,6 @@ static irqreturn_t tegra_slink_isr(int irq, void *context_data)
return IRQ_WAKE_THREAD;
}
-static void tegra_slink_parse_dt(struct tegra_slink_data *tspi)
-{
- struct device_node *np = tspi->dev->of_node;
-
- if (of_property_read_u32(np, "spi-max-frequency",
- &tspi->spi_max_frequency))
- tspi->spi_max_frequency = 25000000; /* 25MHz */
-}
-
static const struct tegra_slink_chip_data tegra30_spi_cdata = {
.cs_hold_time = true,
};
@@ -1053,7 +1038,6 @@ static int tegra_slink_probe(struct platform_device *pdev)
master->unprepare_message = tegra_slink_unprepare_message;
master->auto_runtime_pm = true;
master->num_chipselect = MAX_CHIP_SELECT;
- master->bus_num = -1;
platform_set_drvdata(pdev, master);
tspi = spi_master_get_devdata(master);
@@ -1062,7 +1046,9 @@ static int tegra_slink_probe(struct platform_device *pdev)
tspi->chip_data = cdata;
spin_lock_init(&tspi->lock);
- tegra_slink_parse_dt(tspi);
+ if (of_property_read_u32(tspi->dev->of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 3d09265b5133..6c211d1910b0 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -429,13 +429,13 @@ static int ti_qspi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
- master->bus_num = -1;
master->flags = SPI_MASTER_HALF_DUPLEX;
master->setup = ti_qspi_setup;
master->auto_runtime_pm = true;
master->transfer_one_message = ti_qspi_start_transfer_one;
master->dev.of_node = pdev->dev.of_node;
- master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
if (!of_property_read_u32(np, "num-cs", &num_cs))
master->num_chipselect = num_cs;
@@ -461,7 +461,6 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (res_mmap == NULL) {
dev_err(&pdev->dev,
"memory mapped resource not required\n");
- return -ENODEV;
}
}
diff --git a/drivers/spi/spi-ti-ssp.c b/drivers/spi/spi-ti-ssp.c
deleted file mode 100644
index 7d20e121e4c1..000000000000
--- a/drivers/spi/spi-ti-ssp.c
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Sequencer Serial Port (SSP) based SPI master driver
- *
- * Copyright (C) 2010 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/mfd/ti_ssp.h>
-
-#define MODE_BITS (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH)
-
-struct ti_ssp_spi {
- struct spi_master *master;
- struct device *dev;
- spinlock_t lock;
- struct list_head msg_queue;
- struct completion complete;
- bool shutdown;
- struct workqueue_struct *workqueue;
- struct work_struct work;
- u8 mode, bpw;
- int cs_active;
- u32 pc_en, pc_dis, pc_wr, pc_rd;
- void (*select)(int cs);
-};
-
-static u32 ti_ssp_spi_rx(struct ti_ssp_spi *hw)
-{
- u32 ret;
-
- ti_ssp_run(hw->dev, hw->pc_rd, 0, &ret);
- return ret;
-}
-
-static void ti_ssp_spi_tx(struct ti_ssp_spi *hw, u32 data)
-{
- ti_ssp_run(hw->dev, hw->pc_wr, data << (32 - hw->bpw), NULL);
-}
-
-static int ti_ssp_spi_txrx(struct ti_ssp_spi *hw, struct spi_message *msg,
- struct spi_transfer *t)
-{
- int count;
-
- if (hw->bpw <= 8) {
- u8 *rx = t->rx_buf;
- const u8 *tx = t->tx_buf;
-
- for (count = 0; count < t->len; count += 1) {
- if (t->tx_buf)
- ti_ssp_spi_tx(hw, *tx++);
- if (t->rx_buf)
- *rx++ = ti_ssp_spi_rx(hw);
- }
- } else if (hw->bpw <= 16) {
- u16 *rx = t->rx_buf;
- const u16 *tx = t->tx_buf;
-
- for (count = 0; count < t->len; count += 2) {
- if (t->tx_buf)
- ti_ssp_spi_tx(hw, *tx++);
- if (t->rx_buf)
- *rx++ = ti_ssp_spi_rx(hw);
- }
- } else {
- u32 *rx = t->rx_buf;
- const u32 *tx = t->tx_buf;
-
- for (count = 0; count < t->len; count += 4) {
- if (t->tx_buf)
- ti_ssp_spi_tx(hw, *tx++);
- if (t->rx_buf)
- *rx++ = ti_ssp_spi_rx(hw);
- }
- }
-
- msg->actual_length += count; /* bytes transferred */
-
- dev_dbg(&msg->spi->dev, "xfer %s%s, %d bytes, %d bpw, count %d%s\n",
- t->tx_buf ? "tx" : "", t->rx_buf ? "rx" : "", t->len,
- hw->bpw, count, (count < t->len) ? " (under)" : "");
-
- return (count < t->len) ? -EIO : 0; /* left over data */
-}
-
-static void ti_ssp_spi_chip_select(struct ti_ssp_spi *hw, int cs_active)
-{
- cs_active = !!cs_active;
- if (cs_active == hw->cs_active)
- return;
- ti_ssp_run(hw->dev, cs_active ? hw->pc_en : hw->pc_dis, 0, NULL);
- hw->cs_active = cs_active;
-}
-
-#define __SHIFT_OUT(bits) (SSP_OPCODE_SHIFT | SSP_OUT_MODE | \
- cs_en | clk | SSP_COUNT((bits) * 2 - 1))
-#define __SHIFT_IN(bits) (SSP_OPCODE_SHIFT | SSP_IN_MODE | \
- cs_en | clk | SSP_COUNT((bits) * 2 - 1))
-
-static int ti_ssp_spi_setup_transfer(struct ti_ssp_spi *hw, u8 bpw, u8 mode)
-{
- int error, idx = 0;
- u32 seqram[16];
- u32 cs_en, cs_dis, clk;
- u32 topbits, botbits;
-
- mode &= MODE_BITS;
- if (mode == hw->mode && bpw == hw->bpw)
- return 0;
-
- cs_en = (mode & SPI_CS_HIGH) ? SSP_CS_HIGH : SSP_CS_LOW;
- cs_dis = (mode & SPI_CS_HIGH) ? SSP_CS_LOW : SSP_CS_HIGH;
- clk = (mode & SPI_CPOL) ? SSP_CLK_HIGH : SSP_CLK_LOW;
-
- /* Construct instructions */
-
- /* Disable Chip Select */
- hw->pc_dis = idx;
- seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_dis | clk;
- seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_dis | clk;
-
- /* Enable Chip Select */
- hw->pc_en = idx;
- seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_en | clk;
- seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
-
- /* Reads and writes need to be split for bpw > 16 */
- topbits = (bpw > 16) ? 16 : bpw;
- botbits = bpw - topbits;
-
- /* Write */
- hw->pc_wr = idx;
- seqram[idx++] = __SHIFT_OUT(topbits) | SSP_ADDR_REG;
- if (botbits)
- seqram[idx++] = __SHIFT_OUT(botbits) | SSP_DATA_REG;
- seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
-
- /* Read */
- hw->pc_rd = idx;
- if (botbits)
- seqram[idx++] = __SHIFT_IN(botbits) | SSP_ADDR_REG;
- seqram[idx++] = __SHIFT_IN(topbits) | SSP_DATA_REG;
- seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
-
- error = ti_ssp_load(hw->dev, 0, seqram, idx);
- if (error < 0)
- return error;
-
- error = ti_ssp_set_mode(hw->dev, ((mode & SPI_CPHA) ?
- 0 : SSP_EARLY_DIN));
- if (error < 0)
- return error;
-
- hw->bpw = bpw;
- hw->mode = mode;
-
- return error;
-}
-
-static void ti_ssp_spi_work(struct work_struct *work)
-{
- struct ti_ssp_spi *hw = container_of(work, struct ti_ssp_spi, work);
-
- spin_lock(&hw->lock);
-
- while (!list_empty(&hw->msg_queue)) {
- struct spi_message *m;
- struct spi_device *spi;
- struct spi_transfer *t = NULL;
- int status = 0;
-
- m = container_of(hw->msg_queue.next, struct spi_message,
- queue);
-
- list_del_init(&m->queue);
-
- spin_unlock(&hw->lock);
-
- spi = m->spi;
-
- if (hw->select)
- hw->select(spi->chip_select);
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- int bpw = spi->bits_per_word;
- int xfer_status;
-
- if (t->bits_per_word)
- bpw = t->bits_per_word;
-
- if (ti_ssp_spi_setup_transfer(hw, bpw, spi->mode) < 0)
- break;
-
- ti_ssp_spi_chip_select(hw, 1);
-
- xfer_status = ti_ssp_spi_txrx(hw, m, t);
- if (xfer_status < 0)
- status = xfer_status;
-
- if (t->delay_usecs)
- udelay(t->delay_usecs);
-
- if (t->cs_change)
- ti_ssp_spi_chip_select(hw, 0);
- }
-
- ti_ssp_spi_chip_select(hw, 0);
- m->status = status;
- m->complete(m->context);
-
- spin_lock(&hw->lock);
- }
-
- if (hw->shutdown)
- complete(&hw->complete);
-
- spin_unlock(&hw->lock);
-}
-
-static int ti_ssp_spi_transfer(struct spi_device *spi, struct spi_message *m)
-{
- struct ti_ssp_spi *hw;
- struct spi_transfer *t;
- int error = 0;
-
- m->actual_length = 0;
- m->status = -EINPROGRESS;
-
- hw = spi_master_get_devdata(spi->master);
-
- if (list_empty(&m->transfers) || !m->complete)
- return -EINVAL;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->len && !(t->rx_buf || t->tx_buf)) {
- dev_err(&spi->dev, "invalid xfer, no buffer\n");
- return -EINVAL;
- }
-
- if (t->len && t->rx_buf && t->tx_buf) {
- dev_err(&spi->dev, "invalid xfer, full duplex\n");
- return -EINVAL;
- }
- }
-
- spin_lock(&hw->lock);
- if (hw->shutdown) {
- error = -ESHUTDOWN;
- goto error_unlock;
- }
- list_add_tail(&m->queue, &hw->msg_queue);
- queue_work(hw->workqueue, &hw->work);
-error_unlock:
- spin_unlock(&hw->lock);
- return error;
-}
-
-static int ti_ssp_spi_probe(struct platform_device *pdev)
-{
- const struct ti_ssp_spi_data *pdata;
- struct ti_ssp_spi *hw;
- struct spi_master *master;
- struct device *dev = &pdev->dev;
- int error = 0;
-
- pdata = dev_get_platdata(dev);
- if (!pdata) {
- dev_err(dev, "platform data not found\n");
- return -EINVAL;
- }
-
- master = spi_alloc_master(dev, sizeof(struct ti_ssp_spi));
- if (!master) {
- dev_err(dev, "cannot allocate SPI master\n");
- return -ENOMEM;
- }
-
- hw = spi_master_get_devdata(master);
- platform_set_drvdata(pdev, hw);
-
- hw->master = master;
- hw->dev = dev;
- hw->select = pdata->select;
-
- spin_lock_init(&hw->lock);
- init_completion(&hw->complete);
- INIT_LIST_HEAD(&hw->msg_queue);
- INIT_WORK(&hw->work, ti_ssp_spi_work);
-
- hw->workqueue = create_singlethread_workqueue(dev_name(dev));
- if (!hw->workqueue) {
- error = -ENOMEM;
- dev_err(dev, "work queue creation failed\n");
- goto error_wq;
- }
-
- error = ti_ssp_set_iosel(hw->dev, pdata->iosel);
- if (error < 0) {
- dev_err(dev, "io setup failed\n");
- goto error_iosel;
- }
-
- master->bus_num = pdev->id;
- master->num_chipselect = pdata->num_cs;
- master->mode_bits = MODE_BITS;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->flags = SPI_MASTER_HALF_DUPLEX;
- master->transfer = ti_ssp_spi_transfer;
-
- error = spi_register_master(master);
- if (error) {
- dev_err(dev, "master registration failed\n");
- goto error_reg;
- }
-
- return 0;
-
-error_reg:
-error_iosel:
- destroy_workqueue(hw->workqueue);
-error_wq:
- spi_master_put(master);
- return error;
-}
-
-static int ti_ssp_spi_remove(struct platform_device *pdev)
-{
- struct ti_ssp_spi *hw = platform_get_drvdata(pdev);
- int error;
-
- hw->shutdown = 1;
- while (!list_empty(&hw->msg_queue)) {
- error = wait_for_completion_interruptible(&hw->complete);
- if (error < 0) {
- hw->shutdown = 0;
- return error;
- }
- }
- destroy_workqueue(hw->workqueue);
- spi_unregister_master(hw->master);
-
- return 0;
-}
-
-static struct platform_driver ti_ssp_spi_driver = {
- .probe = ti_ssp_spi_probe,
- .remove = ti_ssp_spi_remove,
- .driver = {
- .name = "ti-ssp-spi",
- .owner = THIS_MODULE,
- },
-};
-module_platform_driver(ti_ssp_spi_driver);
-
-MODULE_DESCRIPTION("SSP SPI Master");
-MODULE_AUTHOR("Cyril Chemparathy");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ti-ssp-spi");
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 88eb57e858b3..f406b30af961 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -332,7 +332,7 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
data->transfer_active = false;
wake_up(&data->wait);
} else {
- dev_err(&data->master->dev,
+ dev_vdbg(&data->master->dev,
"%s : Transfer is not completed",
__func__);
}
@@ -464,20 +464,6 @@ static void pch_spi_reset(struct spi_master *master)
pch_spi_writereg(master, PCH_SRST, 0x0);
}
-static int pch_spi_setup(struct spi_device *pspi)
-{
- /* Check baud rate setting */
- /* if baud rate of chip is greater than
- max we can support,return error */
- if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE)
- pspi->max_speed_hz = PCH_MAX_BAUDRATE;
-
- dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__,
- (pspi->mode) & (SPI_CPOL | SPI_CPHA));
-
- return 0;
-}
-
static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
{
@@ -486,23 +472,6 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
int retval;
unsigned long flags;
- /* validate spi message and baud rate */
- if (unlikely(list_empty(&pmsg->transfers) == 1)) {
- dev_err(&pspi->dev, "%s list empty\n", __func__);
- retval = -EINVAL;
- goto err_out;
- }
-
- if (unlikely(pspi->max_speed_hz == 0)) {
- dev_err(&pspi->dev, "%s pch_spi_transfer maxspeed=%d\n",
- __func__, pspi->max_speed_hz);
- retval = -EINVAL;
- goto err_out;
- }
-
- dev_dbg(&pspi->dev,
- "%s Transfer List not empty. Transfer Speed is set.\n", __func__);
-
spin_lock_irqsave(&data->lock, flags);
/* validate Tx/Rx buffers and Transfer length */
list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
@@ -523,10 +492,6 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
dev_dbg(&pspi->dev,
"%s Tx/Rx buffer valid. Transfer length valid\n",
__func__);
-
- /* if baud rate has been specified validate the same */
- if (transfer->speed_hz > PCH_MAX_BAUDRATE)
- transfer->speed_hz = PCH_MAX_BAUDRATE;
}
spin_unlock_irqrestore(&data->lock, flags);
@@ -1151,8 +1116,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
dma->nent = num;
dma->desc_tx = desc_tx;
- dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
- "0x2 to SSNXCR\n", __func__);
+ dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
spin_lock_irqsave(&data->lock, flags);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
@@ -1418,10 +1382,10 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
/* initialize members of SPI master */
master->num_chipselect = PCH_MAX_CS;
- master->setup = pch_spi_setup;
master->transfer = pch_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->max_speed_hz = PCH_MAX_BAUDRATE;
data->board_dat = board_dat;
data->plat_dev = plat_dev;
@@ -1605,8 +1569,7 @@ static struct platform_driver pch_spi_pd_driver = {
.resume = pch_spi_pd_resume
};
-static int pch_spi_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pch_spi_board_data *board_dat;
struct platform_device *pd_dev = NULL;
@@ -1676,6 +1639,8 @@ static int pch_spi_probe(struct pci_dev *pdev,
return 0;
err_platform_device:
+ while (--i >= 0)
+ platform_device_unregister(pd_dev_save->pd_save[i]);
pci_disable_device(pdev);
pci_enable_device:
pci_release_regions(pdev);
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 6191ced514b2..5f183baa91a9 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -80,7 +80,6 @@ struct txx9spi {
void __iomem *membase;
int baseclk;
struct clk *clk;
- u32 max_speed_hz, min_speed_hz;
int last_chipselect;
int last_chipselect_val;
};
@@ -117,9 +116,7 @@ static int txx9spi_setup(struct spi_device *spi)
{
struct txx9spi *c = spi_master_get_devdata(spi->master);
- if (!spi->max_speed_hz
- || spi->max_speed_hz > c->max_speed_hz
- || spi->max_speed_hz < c->min_speed_hz)
+ if (!spi->max_speed_hz)
return -EINVAL;
if (gpio_direction_output(spi->chip_select,
@@ -265,7 +262,8 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
exit:
m->status = status;
- m->complete(m->context);
+ if (m->complete)
+ m->complete(m->context);
/* normally deactivate chipselect ... unless no error and
* cs_change has hinted that the next message will probably
@@ -309,15 +307,8 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
/* check each transfer's parameters */
list_for_each_entry(t, &m->transfers, transfer_list) {
- u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
- u8 bits_per_word = t->bits_per_word;
-
if (!t->tx_buf && !t->rx_buf && t->len)
return -EINVAL;
- if (t->len & ((bits_per_word >> 3) - 1))
- return -EINVAL;
- if (speed_hz < c->min_speed_hz || speed_hz > c->max_speed_hz)
- return -EINVAL;
}
spin_lock_irqsave(&c->lock, flags);
@@ -360,17 +351,12 @@ static int txx9spi_probe(struct platform_device *dev)
goto exit;
}
c->baseclk = clk_get_rate(c->clk);
- c->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1);
- c->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1);
+ master->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1);
+ master->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1);
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- goto exit_busy;
- if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res),
- "spi_txx9"))
- goto exit_busy;
- c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res));
- if (!c->membase)
+ c->membase = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(c->membase))
goto exit_busy;
/* enter config mode */
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 24c40b13dab1..bb478dccf1d8 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -74,15 +73,13 @@ static void spi_xcomm_chipselect(struct spi_xcomm *spi_xcomm,
static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm,
struct spi_device *spi, struct spi_transfer *t, unsigned int *settings)
{
- unsigned int speed;
-
if (t->len > 62)
return -EINVAL;
- speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+ if (t->speed_hz != spi_xcomm->current_speed) {
+ unsigned int divider;
- if (speed != spi_xcomm->current_speed) {
- unsigned int divider = DIV_ROUND_UP(SPI_XCOMM_CLOCK, speed);
+ divider = DIV_ROUND_UP(SPI_XCOMM_CLOCK, t->speed_hz);
if (divider >= 64)
*settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_64;
else if (divider >= 16)
@@ -90,7 +87,7 @@ static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm,
else
*settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_4;
- spi_xcomm->current_speed = speed;
+ spi_xcomm->current_speed = t->speed_hz;
}
if (spi->mode & SPI_CPOL)
@@ -148,8 +145,6 @@ static int spi_xcomm_transfer_one(struct spi_master *master,
int status = 0;
bool is_last;
- is_first = true;
-
spi_xcomm_chipselect(spi_xcomm, spi, true);
list_for_each_entry(t, &msg->transfers, transfer_list) {
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 6d4ce4615163..a3b0b9944bf0 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -88,10 +87,10 @@ struct xilinx_spi {
const u8 *tx_ptr; /* pointer in the Rx buffer */
int remaining_bytes; /* the number of bytes left to transfer */
u8 bits_per_word;
- unsigned int (*read_fn) (void __iomem *);
- void (*write_fn) (u32, void __iomem *);
- void (*tx_fn) (struct xilinx_spi *);
- void (*rx_fn) (struct xilinx_spi *);
+ unsigned int (*read_fn)(void __iomem *);
+ void (*write_fn)(u32, void __iomem *);
+ void (*tx_fn)(struct xilinx_spi *);
+ void (*rx_fn)(struct xilinx_spi *);
};
static void xspi_write32(u32 val, void __iomem *addr)
@@ -209,26 +208,11 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
}
/* spi_bitbang requires custom setup_transfer() to be defined if there is a
- * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
- * supports 8 or 16 bits per word which cannot be changed in software.
- * SPI clock can't be changed in software either.
- * Check for correct bits per word. Chip select delay calculations could be
- * added here as soon as bitbang_work() can be made aware of the delay value.
+ * custom txrx_bufs().
*/
static int xilinx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
- u8 bits_per_word;
-
- bits_per_word = (t && t->bits_per_word)
- ? t->bits_per_word : spi->bits_per_word;
- if (bits_per_word != xspi->bits_per_word) {
- dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
- __func__, bits_per_word);
- return -EINVAL;
- }
-
return 0;
}
@@ -407,6 +391,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
xspi->write_fn = xspi_write32_be;
}
+ master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
xspi->bits_per_word = bits_per_word;
if (xspi->bits_per_word == 8) {
xspi->tx_fn = xspi_tx8;
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
new file mode 100644
index 000000000000..41e158187f9d
--- /dev/null
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -0,0 +1,170 @@
+/*
+ * Xtensa xtfpga SPI controller driver
+ *
+ * Copyright (c) 2014 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#define XTFPGA_SPI_NAME "xtfpga_spi"
+
+#define XTFPGA_SPI_START 0x0
+#define XTFPGA_SPI_BUSY 0x4
+#define XTFPGA_SPI_DATA 0x8
+
+#define BUSY_WAIT_US 100
+
+struct xtfpga_spi {
+ struct spi_bitbang bitbang;
+ void __iomem *regs;
+ u32 data;
+ unsigned data_sz;
+};
+
+static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
+ unsigned addr, u32 val)
+{
+ iowrite32(val, spi->regs + addr);
+}
+
+static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
+ unsigned addr)
+{
+ return ioread32(spi->regs + addr);
+}
+
+static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
+{
+ unsigned i;
+ for (i = 0; xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY) &&
+ i < BUSY_WAIT_US; ++i)
+ udelay(1);
+ WARN_ON_ONCE(i == BUSY_WAIT_US);
+}
+
+static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
+ u32 v, u8 bits)
+{
+ struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+
+ xspi->data = (xspi->data << bits) | (v & GENMASK(bits - 1, 0));
+ xspi->data_sz += bits;
+ if (xspi->data_sz >= 16) {
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_DATA,
+ xspi->data >> (xspi->data_sz - 16));
+ xspi->data_sz -= 16;
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 1);
+ xtfpga_spi_wait_busy(xspi);
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
+ }
+
+ return 0;
+}
+
+static void xtfpga_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+
+ WARN_ON(xspi->data_sz != 0);
+ xspi->data_sz = 0;
+}
+
+static int xtfpga_spi_probe(struct platform_device *pdev)
+{
+ struct xtfpga_spi *xspi;
+ struct resource *mem;
+ int ret;
+ struct spi_master *master;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi));
+ if (!master)
+ return -ENOMEM;
+
+ master->flags = SPI_MASTER_NO_RX;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ master->bus_num = pdev->dev.id;
+ master->dev.of_node = pdev->dev.of_node;
+
+ xspi = spi_master_get_devdata(master);
+ xspi->bitbang.master = master;
+ xspi->bitbang.chipselect = xtfpga_spi_chipselect;
+ xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ xspi->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(xspi->regs)) {
+ ret = PTR_ERR(xspi->regs);
+ goto err;
+ }
+
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
+ usleep_range(1000, 2000);
+ if (xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY)) {
+ dev_err(&pdev->dev, "Device stuck in busy state\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ret = spi_bitbang_start(&xspi->bitbang);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_bitbang_start failed\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, master);
+ return 0;
+err:
+ spi_master_put(master);
+ return ret;
+}
+
+static int xtfpga_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct xtfpga_spi *xspi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&xspi->bitbang);
+ spi_master_put(master);
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:" XTFPGA_SPI_NAME);
+
+#ifdef CONFIG_OF
+static const struct of_device_id xtfpga_spi_of_match[] = {
+ { .compatible = "cdns,xtfpga-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xtfpga_spi_of_match);
+#endif
+
+static struct platform_driver xtfpga_spi_driver = {
+ .probe = xtfpga_spi_probe,
+ .remove = xtfpga_spi_remove,
+ .driver = {
+ .name = XTFPGA_SPI_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(xtfpga_spi_of_match),
+ },
+};
+module_platform_driver(xtfpga_spi_driver);
+
+MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
+MODULE_DESCRIPTION("xtensa xtfpga SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index d0b28bba38be..4eb9bf02996c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -24,6 +24,8 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -255,13 +257,12 @@ EXPORT_SYMBOL_GPL(spi_bus_type);
static int spi_drv_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
- struct spi_device *spi = to_spi_device(dev);
int ret;
- acpi_dev_pm_attach(&spi->dev, true);
- ret = sdrv->probe(spi);
+ acpi_dev_pm_attach(dev, true);
+ ret = sdrv->probe(to_spi_device(dev));
if (ret)
- acpi_dev_pm_detach(&spi->dev, true);
+ acpi_dev_pm_detach(dev, true);
return ret;
}
@@ -269,11 +270,10 @@ static int spi_drv_probe(struct device *dev)
static int spi_drv_remove(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
- struct spi_device *spi = to_spi_device(dev);
int ret;
- ret = sdrv->remove(spi);
- acpi_dev_pm_detach(&spi->dev, true);
+ ret = sdrv->remove(to_spi_device(dev));
+ acpi_dev_pm_detach(dev, true);
return ret;
}
@@ -580,6 +580,169 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
spi->master->set_cs(spi, !enable);
}
+static int spi_map_buf(struct spi_master *master, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ const bool vmalloced_buf = is_vmalloc_addr(buf);
+ const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
+ const int sgs = DIV_ROUND_UP(len, desc_len);
+ struct page *vm_page;
+ void *sg_buf;
+ size_t min;
+ int i, ret;
+
+ ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
+ if (ret != 0)
+ return ret;
+
+ for (i = 0; i < sgs; i++) {
+ min = min_t(size_t, len, desc_len);
+
+ if (vmalloced_buf) {
+ vm_page = vmalloc_to_page(buf);
+ if (!vm_page) {
+ sg_free_table(sgt);
+ return -ENOMEM;
+ }
+ sg_buf = page_address(vm_page) +
+ ((size_t)buf & ~PAGE_MASK);
+ } else {
+ sg_buf = buf;
+ }
+
+ sg_set_buf(&sgt->sgl[i], sg_buf, min);
+
+ buf += min;
+ len -= min;
+ }
+
+ ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
+ if (ret < 0) {
+ sg_free_table(sgt);
+ return ret;
+ }
+
+ sgt->nents = ret;
+
+ return 0;
+}
+
+static void spi_unmap_buf(struct spi_master *master, struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ if (sgt->orig_nents) {
+ dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
+ sg_free_table(sgt);
+ }
+}
+
+static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct device *tx_dev, *rx_dev;
+ struct spi_transfer *xfer;
+ void *tmp;
+ unsigned int max_tx, max_rx;
+ int ret;
+
+ if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+ max_tx = 0;
+ max_rx = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if ((master->flags & SPI_MASTER_MUST_TX) &&
+ !xfer->tx_buf)
+ max_tx = max(xfer->len, max_tx);
+ if ((master->flags & SPI_MASTER_MUST_RX) &&
+ !xfer->rx_buf)
+ max_rx = max(xfer->len, max_rx);
+ }
+
+ if (max_tx) {
+ tmp = krealloc(master->dummy_tx, max_tx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_tx = tmp;
+ memset(tmp, 0, max_tx);
+ }
+
+ if (max_rx) {
+ tmp = krealloc(master->dummy_rx, max_rx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_rx = tmp;
+ }
+
+ if (max_tx || max_rx) {
+ list_for_each_entry(xfer, &msg->transfers,
+ transfer_list) {
+ if (!xfer->tx_buf)
+ xfer->tx_buf = master->dummy_tx;
+ if (!xfer->rx_buf)
+ xfer->rx_buf = master->dummy_rx;
+ }
+ }
+ }
+
+ if (!master->can_dma)
+ return 0;
+
+ tx_dev = &master->dma_tx->dev->device;
+ rx_dev = &master->dma_rx->dev->device;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!master->can_dma(master, msg->spi, xfer))
+ continue;
+
+ if (xfer->tx_buf != NULL) {
+ ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
+ (void *)xfer->tx_buf, xfer->len,
+ DMA_TO_DEVICE);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (xfer->rx_buf != NULL) {
+ ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE);
+ if (ret != 0) {
+ spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE);
+ return ret;
+ }
+ }
+ }
+
+ master->cur_msg_mapped = true;
+
+ return 0;
+}
+
+static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ struct device *tx_dev, *rx_dev;
+
+ if (!master->cur_msg_mapped || !master->can_dma)
+ return 0;
+
+ tx_dev = &master->dma_tx->dev->device;
+ rx_dev = &master->dma_rx->dev->device;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!master->can_dma(master, msg->spi, xfer))
+ continue;
+
+ spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
@@ -591,9 +754,9 @@ static int spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct spi_transfer *xfer;
- bool cur_cs = true;
bool keep_cs = false;
int ret = 0;
+ int ms = 1;
spi_set_cs(msg->spi, true);
@@ -611,7 +774,16 @@ static int spi_transfer_one_message(struct spi_master *master,
if (ret > 0) {
ret = 0;
- wait_for_completion(&master->xfer_completion);
+ ms = xfer->len * 8 * 1000 / xfer->speed_hz;
+ ms += 10; /* some tolerance */
+
+ ms = wait_for_completion_timeout(&master->xfer_completion,
+ msecs_to_jiffies(ms));
+ }
+
+ if (ms == 0) {
+ dev_err(&msg->spi->dev, "SPI transfer timed out\n");
+ msg->status = -ETIMEDOUT;
}
trace_spi_transfer_stop(msg, xfer);
@@ -627,8 +799,9 @@ static int spi_transfer_one_message(struct spi_master *master,
&msg->transfers)) {
keep_cs = true;
} else {
- cur_cs = !cur_cs;
- spi_set_cs(msg->spi, cur_cs);
+ spi_set_cs(msg->spi, false);
+ udelay(10);
+ spi_set_cs(msg->spi, true);
}
}
@@ -686,6 +859,10 @@ static void spi_pump_messages(struct kthread_work *work)
}
master->busy = false;
spin_unlock_irqrestore(&master->queue_lock, flags);
+ kfree(master->dummy_rx);
+ master->dummy_rx = NULL;
+ kfree(master->dummy_tx);
+ master->dummy_tx = NULL;
if (master->unprepare_transfer_hardware &&
master->unprepare_transfer_hardware(master))
dev_err(&master->dev,
@@ -752,6 +929,13 @@ static void spi_pump_messages(struct kthread_work *work)
master->cur_msg_prepared = true;
}
+ ret = spi_map_msg(master, master->cur_msg);
+ if (ret) {
+ master->cur_msg->status = ret;
+ spi_finalize_current_message(master);
+ return;
+ }
+
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
@@ -839,6 +1023,8 @@ void spi_finalize_current_message(struct spi_master *master)
queue_kthread_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
+ spi_unmap_msg(master, mesg);
+
if (master->cur_msg_prepared && master->unprepare_message) {
ret = master->unprepare_message(master, mesg);
if (ret) {
@@ -892,7 +1078,7 @@ static int spi_stop_queue(struct spi_master *master)
*/
while ((!list_empty(&master->queue) || master->busy) && limit--) {
spin_unlock_irqrestore(&master->queue_lock, flags);
- msleep(10);
+ usleep_range(10000, 11000);
spin_lock_irqsave(&master->queue_lock, flags);
}
@@ -1372,6 +1558,8 @@ int spi_register_master(struct spi_master *master)
mutex_init(&master->bus_lock_mutex);
master->bus_lock_flag = 0;
init_completion(&master->xfer_completion);
+ if (!master->max_dma_len)
+ master->max_dma_len = INT_MAX;
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.
@@ -1597,6 +1785,9 @@ int spi_setup(struct spi_device *spi)
if (!spi->bits_per_word)
spi->bits_per_word = 8;
+ if (!spi->max_speed_hz)
+ spi->max_speed_hz = spi->master->max_speed_hz;
+
if (spi->master->setup)
status = spi->master->setup(spi);
@@ -1617,11 +1808,10 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
struct spi_transfer *xfer;
+ int w_size;
if (list_empty(&message->transfers))
return -EINVAL;
- if (!message->complete)
- return -EINVAL;
/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
@@ -1652,12 +1842,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
message->frame_length += xfer->len;
if (!xfer->bits_per_word)
xfer->bits_per_word = spi->bits_per_word;
- if (!xfer->speed_hz) {
+
+ if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
- if (master->max_speed_hz &&
- xfer->speed_hz > master->max_speed_hz)
- xfer->speed_hz = master->max_speed_hz;
- }
+
+ if (master->max_speed_hz &&
+ xfer->speed_hz > master->max_speed_hz)
+ xfer->speed_hz = master->max_speed_hz;
if (master->bits_per_word_mask) {
/* Only 32 bits fit in the mask */
@@ -1668,12 +1859,24 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return -EINVAL;
}
+ /*
+ * SPI transfer length should be multiple of SPI word size
+ * where SPI word size should be power-of-two multiple
+ */
+ if (xfer->bits_per_word <= 8)
+ w_size = 1;
+ else if (xfer->bits_per_word <= 16)
+ w_size = 2;
+ else
+ w_size = 4;
+
+ /* No partial transfers accepted */
+ if (xfer->len % w_size)
+ return -EINVAL;
+
if (xfer->speed_hz && master->min_speed_hz &&
xfer->speed_hz < master->min_speed_hz)
return -EINVAL;
- if (xfer->speed_hz && master->max_speed_hz &&
- xfer->speed_hz > master->max_speed_hz)
- return -EINVAL;
if (xfer->tx_buf && !xfer->tx_nbits)
xfer->tx_nbits = SPI_NBITS_SINGLE;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d7c6e36021e8..e3bc23bb5883 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -73,7 +73,8 @@ static DECLARE_BITMAP(minors, N_SPI_MINORS);
*/
#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
- | SPI_NO_CS | SPI_READY)
+ | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
+ | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)
struct spidev_data {
dev_t devt;
@@ -265,6 +266,8 @@ static int spidev_message(struct spidev_data *spidev,
buf += k_tmp->len;
k_tmp->cs_change = !!u_tmp->cs_change;
+ k_tmp->tx_nbits = u_tmp->tx_nbits;
+ k_tmp->rx_nbits = u_tmp->rx_nbits;
k_tmp->bits_per_word = u_tmp->bits_per_word;
k_tmp->delay_usecs = u_tmp->delay_usecs;
k_tmp->speed_hz = u_tmp->speed_hz;
@@ -359,6 +362,10 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = __put_user(spi->mode & SPI_MODE_MASK,
(__u8 __user *)arg);
break;
+ case SPI_IOC_RD_MODE32:
+ retval = __put_user(spi->mode & SPI_MODE_MASK,
+ (__u32 __user *)arg);
+ break;
case SPI_IOC_RD_LSB_FIRST:
retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
(__u8 __user *)arg);
@@ -372,9 +379,13 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* write requests */
case SPI_IOC_WR_MODE:
- retval = __get_user(tmp, (u8 __user *)arg);
+ case SPI_IOC_WR_MODE32:
+ if (cmd == SPI_IOC_WR_MODE)
+ retval = __get_user(tmp, (u8 __user *)arg);
+ else
+ retval = __get_user(tmp, (u32 __user *)arg);
if (retval == 0) {
- u8 save = spi->mode;
+ u32 save = spi->mode;
if (tmp & ~SPI_MODE_MASK) {
retval = -EINVAL;
@@ -382,18 +393,18 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
tmp |= spi->mode & ~SPI_MODE_MASK;
- spi->mode = (u8)tmp;
+ spi->mode = (u16)tmp;
retval = spi_setup(spi);
if (retval < 0)
spi->mode = save;
else
- dev_dbg(&spi->dev, "spi mode %02x\n", tmp);
+ dev_dbg(&spi->dev, "spi mode %x\n", tmp);
}
break;
case SPI_IOC_WR_LSB_FIRST:
retval = __get_user(tmp, (__u8 __user *)arg);
if (retval == 0) {
- u8 save = spi->mode;
+ u32 save = spi->mode;
if (tmp)
spi->mode |= SPI_LSB_FIRST;
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
new file mode 100644
index 000000000000..bf1295e19f89
--- /dev/null
+++ b/drivers/spmi/Kconfig
@@ -0,0 +1,27 @@
+#
+# SPMI driver configuration
+#
+menuconfig SPMI
+ tristate "SPMI support"
+ help
+ SPMI (System Power Management Interface) is a two-wire
+ serial interface between baseband and application processors
+ and Power Management Integrated Circuits (PMIC).
+
+if SPMI
+
+config SPMI_MSM_PMIC_ARB
+ tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)"
+ depends on ARM
+ depends on IRQ_DOMAIN
+ depends on ARCH_QCOM || COMPILE_TEST
+ default ARCH_QCOM
+ help
+ If you say yes to this option, support will be included for the
+ built-in SPMI PMIC Arbiter interface on Qualcomm MSM family
+ processors.
+
+ This is required for communicating with Qualcomm PMICs and
+ other devices that have the SPMI interface.
+
+endif
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
new file mode 100644
index 000000000000..fc75104a5aab
--- /dev/null
+++ b/drivers/spmi/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for kernel SPMI framework.
+#
+obj-$(CONFIG_SPMI) += spmi.o
+
+obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
new file mode 100644
index 000000000000..246e03a18c94
--- /dev/null
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -0,0 +1,778 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+
+/* PMIC Arbiter configuration registers */
+#define PMIC_ARB_VERSION 0x0000
+#define PMIC_ARB_INT_EN 0x0004
+
+/* PMIC Arbiter channel registers */
+#define PMIC_ARB_CMD(N) (0x0800 + (0x80 * (N)))
+#define PMIC_ARB_CONFIG(N) (0x0804 + (0x80 * (N)))
+#define PMIC_ARB_STATUS(N) (0x0808 + (0x80 * (N)))
+#define PMIC_ARB_WDATA0(N) (0x0810 + (0x80 * (N)))
+#define PMIC_ARB_WDATA1(N) (0x0814 + (0x80 * (N)))
+#define PMIC_ARB_RDATA0(N) (0x0818 + (0x80 * (N)))
+#define PMIC_ARB_RDATA1(N) (0x081C + (0x80 * (N)))
+
+/* Interrupt Controller */
+#define SPMI_PIC_OWNER_ACC_STATUS(M, N) (0x0000 + ((32 * (M)) + (4 * (N))))
+#define SPMI_PIC_ACC_ENABLE(N) (0x0200 + (4 * (N)))
+#define SPMI_PIC_IRQ_STATUS(N) (0x0600 + (4 * (N)))
+#define SPMI_PIC_IRQ_CLEAR(N) (0x0A00 + (4 * (N)))
+
+/* Mapping Table */
+#define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N)))
+#define SPMI_MAPPING_BIT_INDEX(X) (((X) >> 18) & 0xF)
+#define SPMI_MAPPING_BIT_IS_0_FLAG(X) (((X) >> 17) & 0x1)
+#define SPMI_MAPPING_BIT_IS_0_RESULT(X) (((X) >> 9) & 0xFF)
+#define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1)
+#define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF)
+
+#define SPMI_MAPPING_TABLE_LEN 255
+#define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */
+
+/* Ownership Table */
+#define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N)))
+#define SPMI_OWNERSHIP_PERIPH2OWNER(X) ((X) & 0x7)
+
+/* Channel Status fields */
+enum pmic_arb_chnl_status {
+ PMIC_ARB_STATUS_DONE = (1 << 0),
+ PMIC_ARB_STATUS_FAILURE = (1 << 1),
+ PMIC_ARB_STATUS_DENIED = (1 << 2),
+ PMIC_ARB_STATUS_DROPPED = (1 << 3),
+};
+
+/* Command register fields */
+#define PMIC_ARB_CMD_MAX_BYTE_COUNT 8
+
+/* Command Opcodes */
+enum pmic_arb_cmd_op_code {
+ PMIC_ARB_OP_EXT_WRITEL = 0,
+ PMIC_ARB_OP_EXT_READL = 1,
+ PMIC_ARB_OP_EXT_WRITE = 2,
+ PMIC_ARB_OP_RESET = 3,
+ PMIC_ARB_OP_SLEEP = 4,
+ PMIC_ARB_OP_SHUTDOWN = 5,
+ PMIC_ARB_OP_WAKEUP = 6,
+ PMIC_ARB_OP_AUTHENTICATE = 7,
+ PMIC_ARB_OP_MSTR_READ = 8,
+ PMIC_ARB_OP_MSTR_WRITE = 9,
+ PMIC_ARB_OP_EXT_READ = 13,
+ PMIC_ARB_OP_WRITE = 14,
+ PMIC_ARB_OP_READ = 15,
+ PMIC_ARB_OP_ZERO_WRITE = 16,
+};
+
+/* Maximum number of support PMIC peripherals */
+#define PMIC_ARB_MAX_PERIPHS 256
+#define PMIC_ARB_PERIPH_ID_VALID (1 << 15)
+#define PMIC_ARB_TIMEOUT_US 100
+#define PMIC_ARB_MAX_TRANS_BYTES (8)
+
+#define PMIC_ARB_APID_MASK 0xFF
+#define PMIC_ARB_PPID_MASK 0xFFF
+
+/* interrupt enable bit */
+#define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
+
+/**
+ * spmi_pmic_arb_dev - SPMI PMIC Arbiter object
+ *
+ * @base: address of the PMIC Arbiter core registers.
+ * @intr: address of the SPMI interrupt control registers.
+ * @cnfg: address of the PMIC Arbiter configuration registers.
+ * @lock: lock to synchronize accesses.
+ * @channel: which channel to use for accesses.
+ * @irq: PMIC ARB interrupt.
+ * @ee: the current Execution Environment
+ * @min_apid: minimum APID (used for bounding IRQ search)
+ * @max_apid: maximum APID
+ * @mapping_table: in-memory copy of PPID -> APID mapping table.
+ * @domain: irq domain object for PMIC IRQ domain
+ * @spmic: SPMI controller object
+ * @apid_to_ppid: cached mapping from APID to PPID
+ */
+struct spmi_pmic_arb_dev {
+ void __iomem *base;
+ void __iomem *intr;
+ void __iomem *cnfg;
+ raw_spinlock_t lock;
+ u8 channel;
+ int irq;
+ u8 ee;
+ u8 min_apid;
+ u8 max_apid;
+ u32 mapping_table[SPMI_MAPPING_TABLE_LEN];
+ struct irq_domain *domain;
+ struct spmi_controller *spmic;
+ u16 apid_to_ppid[256];
+};
+
+static inline u32 pmic_arb_base_read(struct spmi_pmic_arb_dev *dev, u32 offset)
+{
+ return readl_relaxed(dev->base + offset);
+}
+
+static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev,
+ u32 offset, u32 val)
+{
+ writel_relaxed(val, dev->base + offset);
+}
+
+/**
+ * pa_read_data: reads pmic-arb's register and copy 1..4 bytes to buf
+ * @bc: byte count -1. range: 0..3
+ * @reg: register's address
+ * @buf: output parameter, length must be bc + 1
+ */
+static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
+{
+ u32 data = pmic_arb_base_read(dev, reg);
+ memcpy(buf, &data, (bc & 3) + 1);
+}
+
+/**
+ * pa_write_data: write 1..4 bytes from buf to pmic-arb's register
+ * @bc: byte-count -1. range: 0..3.
+ * @reg: register's address.
+ * @buf: buffer to write. length must be bc + 1.
+ */
+static void
+pa_write_data(struct spmi_pmic_arb_dev *dev, const u8 *buf, u32 reg, u8 bc)
+{
+ u32 data = 0;
+ memcpy(&data, buf, (bc & 3) + 1);
+ pmic_arb_base_write(dev, reg, data);
+}
+
+static int pmic_arb_wait_for_done(struct spmi_controller *ctrl)
+{
+ struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
+ u32 status = 0;
+ u32 timeout = PMIC_ARB_TIMEOUT_US;
+ u32 offset = PMIC_ARB_STATUS(dev->channel);
+
+ while (timeout--) {
+ status = pmic_arb_base_read(dev, offset);
+
+ if (status & PMIC_ARB_STATUS_DONE) {
+ if (status & PMIC_ARB_STATUS_DENIED) {
+ dev_err(&ctrl->dev,
+ "%s: transaction denied (0x%x)\n",
+ __func__, status);
+ return -EPERM;
+ }
+
+ if (status & PMIC_ARB_STATUS_FAILURE) {
+ dev_err(&ctrl->dev,
+ "%s: transaction failed (0x%x)\n",
+ __func__, status);
+ return -EIO;
+ }
+
+ if (status & PMIC_ARB_STATUS_DROPPED) {
+ dev_err(&ctrl->dev,
+ "%s: transaction dropped (0x%x)\n",
+ __func__, status);
+ return -EIO;
+ }
+
+ return 0;
+ }
+ udelay(1);
+ }
+
+ dev_err(&ctrl->dev,
+ "%s: timeout, status 0x%x\n",
+ __func__, status);
+ return -ETIMEDOUT;
+}
+
+/* Non-data command */
+static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
+{
+ struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ unsigned long flags;
+ u32 cmd;
+ int rc;
+
+ /* Check for valid non-data command */
+ if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
+ return -EINVAL;
+
+ cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
+
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
+ rc = pmic_arb_wait_for_done(ctrl);
+ raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+
+ return rc;
+}
+
+static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, u8 *buf, size_t len)
+{
+ struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ unsigned long flags;
+ u8 bc = len - 1;
+ u32 cmd;
+ int rc;
+
+ if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
+ dev_err(&ctrl->dev,
+ "pmic-arb supports 1..%d bytes per trans, but %d requested",
+ PMIC_ARB_MAX_TRANS_BYTES, len);
+ return -EINVAL;
+ }
+
+ /* Check the opcode */
+ if (opc >= 0x60 && opc <= 0x7F)
+ opc = PMIC_ARB_OP_READ;
+ else if (opc >= 0x20 && opc <= 0x2F)
+ opc = PMIC_ARB_OP_EXT_READ;
+ else if (opc >= 0x38 && opc <= 0x3F)
+ opc = PMIC_ARB_OP_EXT_READL;
+ else
+ return -EINVAL;
+
+ cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
+
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
+ rc = pmic_arb_wait_for_done(ctrl);
+ if (rc)
+ goto done;
+
+ pa_read_data(pmic_arb, buf, PMIC_ARB_RDATA0(pmic_arb->channel),
+ min_t(u8, bc, 3));
+
+ if (bc > 3)
+ pa_read_data(pmic_arb, buf + 4,
+ PMIC_ARB_RDATA1(pmic_arb->channel), bc - 4);
+
+done:
+ raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+ return rc;
+}
+
+static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, const u8 *buf, size_t len)
+{
+ struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ unsigned long flags;
+ u8 bc = len - 1;
+ u32 cmd;
+ int rc;
+
+ if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
+ dev_err(&ctrl->dev,
+ "pmic-arb supports 1..%d bytes per trans, but:%d requested",
+ PMIC_ARB_MAX_TRANS_BYTES, len);
+ return -EINVAL;
+ }
+
+ /* Check the opcode */
+ if (opc >= 0x40 && opc <= 0x5F)
+ opc = PMIC_ARB_OP_WRITE;
+ else if (opc >= 0x00 && opc <= 0x0F)
+ opc = PMIC_ARB_OP_EXT_WRITE;
+ else if (opc >= 0x30 && opc <= 0x37)
+ opc = PMIC_ARB_OP_EXT_WRITEL;
+ else if (opc >= 0x80 && opc <= 0xFF)
+ opc = PMIC_ARB_OP_ZERO_WRITE;
+ else
+ return -EINVAL;
+
+ cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
+
+ /* Write data to FIFOs */
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ pa_write_data(pmic_arb, buf, PMIC_ARB_WDATA0(pmic_arb->channel)
+ , min_t(u8, bc, 3));
+ if (bc > 3)
+ pa_write_data(pmic_arb, buf + 4,
+ PMIC_ARB_WDATA1(pmic_arb->channel), bc - 4);
+
+ /* Start the transaction */
+ pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
+ rc = pmic_arb_wait_for_done(ctrl);
+ raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+
+ return rc;
+}
+
+enum qpnpint_regs {
+ QPNPINT_REG_RT_STS = 0x10,
+ QPNPINT_REG_SET_TYPE = 0x11,
+ QPNPINT_REG_POLARITY_HIGH = 0x12,
+ QPNPINT_REG_POLARITY_LOW = 0x13,
+ QPNPINT_REG_LATCHED_CLR = 0x14,
+ QPNPINT_REG_EN_SET = 0x15,
+ QPNPINT_REG_EN_CLR = 0x16,
+ QPNPINT_REG_LATCHED_STS = 0x18,
+};
+
+struct spmi_pmic_arb_qpnpint_type {
+ u8 type; /* 1 -> edge */
+ u8 polarity_high;
+ u8 polarity_low;
+} __packed;
+
+/* Simplified accessor functions for irqchip callbacks */
+static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
+ size_t len)
+{
+ struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
+ u8 sid = d->hwirq >> 24;
+ u8 per = d->hwirq >> 16;
+
+ if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
+ (per << 8) + reg, buf, len))
+ dev_err_ratelimited(&pa->spmic->dev,
+ "failed irqchip transaction on %x\n",
+ d->irq);
+}
+
+static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
+{
+ struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
+ u8 sid = d->hwirq >> 24;
+ u8 per = d->hwirq >> 16;
+
+ if (pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid,
+ (per << 8) + reg, buf, len))
+ dev_err_ratelimited(&pa->spmic->dev,
+ "failed irqchip transaction on %x\n",
+ d->irq);
+}
+
+static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid)
+{
+ unsigned int irq;
+ u32 status;
+ int id;
+
+ status = readl_relaxed(pa->intr + SPMI_PIC_IRQ_STATUS(apid));
+ while (status) {
+ id = ffs(status) - 1;
+ status &= ~(1 << id);
+ irq = irq_find_mapping(pa->domain,
+ pa->apid_to_ppid[apid] << 16
+ | id << 8
+ | apid);
+ generic_handle_irq(irq);
+ }
+}
+
+static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct spmi_pmic_arb_dev *pa = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
+ void __iomem *intr = pa->intr;
+ int first = pa->min_apid >> 5;
+ int last = pa->max_apid >> 5;
+ u32 status;
+ int i, id;
+
+ chained_irq_enter(chip, desc);
+
+ for (i = first; i <= last; ++i) {
+ status = readl_relaxed(intr +
+ SPMI_PIC_OWNER_ACC_STATUS(pa->ee, i));
+ while (status) {
+ id = ffs(status) - 1;
+ status &= ~(1 << id);
+ periph_interrupt(pa, id + i * 32);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void qpnpint_irq_ack(struct irq_data *d)
+{
+ struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
+ u8 irq = d->hwirq >> 8;
+ u8 apid = d->hwirq;
+ unsigned long flags;
+ u8 data;
+
+ raw_spin_lock_irqsave(&pa->lock, flags);
+ writel_relaxed(1 << irq, pa->intr + SPMI_PIC_IRQ_CLEAR(apid));
+ raw_spin_unlock_irqrestore(&pa->lock, flags);
+
+ data = 1 << irq;
+ qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
+}
+
+static void qpnpint_irq_mask(struct irq_data *d)
+{
+ struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
+ u8 irq = d->hwirq >> 8;
+ u8 apid = d->hwirq;
+ unsigned long flags;
+ u32 status;
+ u8 data;
+
+ raw_spin_lock_irqsave(&pa->lock, flags);
+ status = readl_relaxed(pa->intr + SPMI_PIC_ACC_ENABLE(apid));
+ if (status & SPMI_PIC_ACC_ENABLE_BIT) {
+ status = status & ~SPMI_PIC_ACC_ENABLE_BIT;
+ writel_relaxed(status, pa->intr + SPMI_PIC_ACC_ENABLE(apid));
+ }
+ raw_spin_unlock_irqrestore(&pa->lock, flags);
+
+ data = 1 << irq;
+ qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
+}
+
+static void qpnpint_irq_unmask(struct irq_data *d)
+{
+ struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
+ u8 irq = d->hwirq >> 8;
+ u8 apid = d->hwirq;
+ unsigned long flags;
+ u32 status;
+ u8 data;
+
+ raw_spin_lock_irqsave(&pa->lock, flags);
+ status = readl_relaxed(pa->intr + SPMI_PIC_ACC_ENABLE(apid));
+ if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
+ writel_relaxed(status | SPMI_PIC_ACC_ENABLE_BIT,
+ pa->intr + SPMI_PIC_ACC_ENABLE(apid));
+ }
+ raw_spin_unlock_irqrestore(&pa->lock, flags);
+
+ data = 1 << irq;
+ qpnpint_spmi_write(d, QPNPINT_REG_EN_SET, &data, 1);
+}
+
+static void qpnpint_irq_enable(struct irq_data *d)
+{
+ u8 irq = d->hwirq >> 8;
+ u8 data;
+
+ qpnpint_irq_unmask(d);
+
+ data = 1 << irq;
+ qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
+}
+
+static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct spmi_pmic_arb_qpnpint_type type;
+ u8 irq = d->hwirq >> 8;
+
+ qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+
+ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ type.type |= 1 << irq;
+ if (flow_type & IRQF_TRIGGER_RISING)
+ type.polarity_high |= 1 << irq;
+ if (flow_type & IRQF_TRIGGER_FALLING)
+ type.polarity_low |= 1 << irq;
+ } else {
+ if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
+ (flow_type & (IRQF_TRIGGER_LOW)))
+ return -EINVAL;
+
+ type.type &= ~(1 << irq); /* level trig */
+ if (flow_type & IRQF_TRIGGER_HIGH)
+ type.polarity_high |= 1 << irq;
+ else
+ type.polarity_low |= 1 << irq;
+ }
+
+ qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+ return 0;
+}
+
+static struct irq_chip pmic_arb_irqchip = {
+ .name = "pmic_arb",
+ .irq_enable = qpnpint_irq_enable,
+ .irq_ack = qpnpint_irq_ack,
+ .irq_mask = qpnpint_irq_mask,
+ .irq_unmask = qpnpint_irq_unmask,
+ .irq_set_type = qpnpint_irq_set_type,
+ .flags = IRQCHIP_MASK_ON_SUSPEND
+ | IRQCHIP_SKIP_SET_WAKE,
+};
+
+struct spmi_pmic_arb_irq_spec {
+ unsigned slave:4;
+ unsigned per:8;
+ unsigned irq:3;
+};
+
+static int search_mapping_table(struct spmi_pmic_arb_dev *pa,
+ struct spmi_pmic_arb_irq_spec *spec,
+ u8 *apid)
+{
+ u16 ppid = spec->slave << 8 | spec->per;
+ u32 *mapping_table = pa->mapping_table;
+ int index = 0, i;
+ u32 data;
+
+ for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
+ data = mapping_table[index];
+
+ if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
+ if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
+ index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+ } else {
+ *apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+ return 0;
+ }
+ } else {
+ if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
+ index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+ } else {
+ *apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+ return 0;
+ }
+ }
+ }
+
+ return -ENODEV;
+}
+
+static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ struct spmi_pmic_arb_dev *pa = d->host_data;
+ struct spmi_pmic_arb_irq_spec spec;
+ int err;
+ u8 apid;
+
+ dev_dbg(&pa->spmic->dev,
+ "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
+ intspec[0], intspec[1], intspec[2]);
+
+ if (d->of_node != controller)
+ return -EINVAL;
+ if (intsize != 4)
+ return -EINVAL;
+ if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7)
+ return -EINVAL;
+
+ spec.slave = intspec[0];
+ spec.per = intspec[1];
+ spec.irq = intspec[2];
+
+ err = search_mapping_table(pa, &spec, &apid);
+ if (err)
+ return err;
+
+ pa->apid_to_ppid[apid] = spec.slave << 8 | spec.per;
+
+ /* Keep track of {max,min}_apid for bounding search during interrupt */
+ if (apid > pa->max_apid)
+ pa->max_apid = apid;
+ if (apid < pa->min_apid)
+ pa->min_apid = apid;
+
+ *out_hwirq = spec.slave << 24
+ | spec.per << 16
+ | spec.irq << 8
+ | apid;
+ *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
+
+ dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
+
+ return 0;
+}
+
+static int qpnpint_irq_domain_map(struct irq_domain *d,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct spmi_pmic_arb_dev *pa = d->host_data;
+
+ dev_dbg(&pa->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq);
+
+ irq_set_chip_and_handler(virq, &pmic_arb_irqchip, handle_level_irq);
+ irq_set_chip_data(virq, d->host_data);
+ irq_set_noprobe(virq);
+ return 0;
+}
+
+static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
+ .map = qpnpint_irq_domain_map,
+ .xlate = qpnpint_irq_domain_dt_translate,
+};
+
+static int spmi_pmic_arb_probe(struct platform_device *pdev)
+{
+ struct spmi_pmic_arb_dev *pa;
+ struct spmi_controller *ctrl;
+ struct resource *res;
+ u32 channel, ee;
+ int err, i;
+
+ ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
+ if (!ctrl)
+ return -ENOMEM;
+
+ pa = spmi_controller_get_drvdata(ctrl);
+ pa->spmic = ctrl;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+ pa->base = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pa->base)) {
+ err = PTR_ERR(pa->base);
+ goto err_put_ctrl;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
+ pa->intr = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pa->intr)) {
+ err = PTR_ERR(pa->intr);
+ goto err_put_ctrl;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg");
+ pa->cnfg = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pa->cnfg)) {
+ err = PTR_ERR(pa->cnfg);
+ goto err_put_ctrl;
+ }
+
+ pa->irq = platform_get_irq_byname(pdev, "periph_irq");
+ if (pa->irq < 0) {
+ err = pa->irq;
+ goto err_put_ctrl;
+ }
+
+ err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel);
+ if (err) {
+ dev_err(&pdev->dev, "channel unspecified.\n");
+ goto err_put_ctrl;
+ }
+
+ if (channel > 5) {
+ dev_err(&pdev->dev, "invalid channel (%u) specified.\n",
+ channel);
+ goto err_put_ctrl;
+ }
+
+ pa->channel = channel;
+
+ err = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &ee);
+ if (err) {
+ dev_err(&pdev->dev, "EE unspecified.\n");
+ goto err_put_ctrl;
+ }
+
+ if (ee > 5) {
+ dev_err(&pdev->dev, "invalid EE (%u) specified\n", ee);
+ err = -EINVAL;
+ goto err_put_ctrl;
+ }
+
+ pa->ee = ee;
+
+ for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i)
+ pa->mapping_table[i] = readl_relaxed(
+ pa->cnfg + SPMI_MAPPING_TABLE_REG(i));
+
+ /* Initialize max_apid/min_apid to the opposite bounds, during
+ * the irq domain translation, we are sure to update these */
+ pa->max_apid = 0;
+ pa->min_apid = PMIC_ARB_MAX_PERIPHS - 1;
+
+ platform_set_drvdata(pdev, ctrl);
+ raw_spin_lock_init(&pa->lock);
+
+ ctrl->cmd = pmic_arb_cmd;
+ ctrl->read_cmd = pmic_arb_read_cmd;
+ ctrl->write_cmd = pmic_arb_write_cmd;
+
+ dev_dbg(&pdev->dev, "adding irq domain\n");
+ pa->domain = irq_domain_add_tree(pdev->dev.of_node,
+ &pmic_arb_irq_domain_ops, pa);
+ if (!pa->domain) {
+ dev_err(&pdev->dev, "unable to create irq_domain\n");
+ err = -ENOMEM;
+ goto err_put_ctrl;
+ }
+
+ irq_set_handler_data(pa->irq, pa);
+ irq_set_chained_handler(pa->irq, pmic_arb_chained_irq);
+
+ err = spmi_controller_add(ctrl);
+ if (err)
+ goto err_domain_remove;
+
+ dev_dbg(&ctrl->dev, "PMIC Arb Version 0x%x\n",
+ pmic_arb_base_read(pa, PMIC_ARB_VERSION));
+
+ return 0;
+
+err_domain_remove:
+ irq_set_chained_handler(pa->irq, NULL);
+ irq_set_handler_data(pa->irq, NULL);
+ irq_domain_remove(pa->domain);
+err_put_ctrl:
+ spmi_controller_put(ctrl);
+ return err;
+}
+
+static int spmi_pmic_arb_remove(struct platform_device *pdev)
+{
+ struct spmi_controller *ctrl = platform_get_drvdata(pdev);
+ struct spmi_pmic_arb_dev *pa = spmi_controller_get_drvdata(ctrl);
+ spmi_controller_remove(ctrl);
+ irq_set_chained_handler(pa->irq, NULL);
+ irq_set_handler_data(pa->irq, NULL);
+ irq_domain_remove(pa->domain);
+ spmi_controller_put(ctrl);
+ return 0;
+}
+
+static const struct of_device_id spmi_pmic_arb_match_table[] = {
+ { .compatible = "qcom,spmi-pmic-arb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, spmi_pmic_arb_match_table);
+
+static struct platform_driver spmi_pmic_arb_driver = {
+ .probe = spmi_pmic_arb_probe,
+ .remove = spmi_pmic_arb_remove,
+ .driver = {
+ .name = "spmi_pmic_arb",
+ .owner = THIS_MODULE,
+ .of_match_table = spmi_pmic_arb_match_table,
+ },
+};
+module_platform_driver(spmi_pmic_arb_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spmi_pmic_arb");
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
new file mode 100644
index 000000000000..3b5780710d50
--- /dev/null
+++ b/drivers/spmi/spmi.c
@@ -0,0 +1,574 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spmi.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <dt-bindings/spmi/spmi.h>
+
+static DEFINE_IDA(ctrl_ida);
+
+static void spmi_dev_release(struct device *dev)
+{
+ struct spmi_device *sdev = to_spmi_device(dev);
+ kfree(sdev);
+}
+
+static const struct device_type spmi_dev_type = {
+ .release = spmi_dev_release,
+};
+
+static void spmi_ctrl_release(struct device *dev)
+{
+ struct spmi_controller *ctrl = to_spmi_controller(dev);
+ ida_simple_remove(&ctrl_ida, ctrl->nr);
+ kfree(ctrl);
+}
+
+static const struct device_type spmi_ctrl_type = {
+ .release = spmi_ctrl_release,
+};
+
+static int spmi_device_match(struct device *dev, struct device_driver *drv)
+{
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (drv->name)
+ return strncmp(dev_name(dev), drv->name,
+ SPMI_NAME_SIZE) == 0;
+
+ return 0;
+}
+
+/**
+ * spmi_device_add() - add a device previously constructed via spmi_device_alloc()
+ * @sdev: spmi_device to be added
+ */
+int spmi_device_add(struct spmi_device *sdev)
+{
+ struct spmi_controller *ctrl = sdev->ctrl;
+ int err;
+
+ dev_set_name(&sdev->dev, "%d-%02x", ctrl->nr, sdev->usid);
+
+ err = device_add(&sdev->dev);
+ if (err < 0) {
+ dev_err(&sdev->dev, "Can't add %s, status %d\n",
+ dev_name(&sdev->dev), err);
+ goto err_device_add;
+ }
+
+ dev_dbg(&sdev->dev, "device %s registered\n", dev_name(&sdev->dev));
+
+err_device_add:
+ return err;
+}
+EXPORT_SYMBOL_GPL(spmi_device_add);
+
+/**
+ * spmi_device_remove(): remove an SPMI device
+ * @sdev: spmi_device to be removed
+ */
+void spmi_device_remove(struct spmi_device *sdev)
+{
+ device_unregister(&sdev->dev);
+}
+EXPORT_SYMBOL_GPL(spmi_device_remove);
+
+static inline int
+spmi_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid)
+{
+ if (!ctrl || !ctrl->cmd || ctrl->dev.type != &spmi_ctrl_type)
+ return -EINVAL;
+
+ return ctrl->cmd(ctrl, opcode, sid);
+}
+
+static inline int spmi_read_cmd(struct spmi_controller *ctrl, u8 opcode,
+ u8 sid, u16 addr, u8 *buf, size_t len)
+{
+ if (!ctrl || !ctrl->read_cmd || ctrl->dev.type != &spmi_ctrl_type)
+ return -EINVAL;
+
+ return ctrl->read_cmd(ctrl, opcode, sid, addr, buf, len);
+}
+
+static inline int spmi_write_cmd(struct spmi_controller *ctrl, u8 opcode,
+ u8 sid, u16 addr, const u8 *buf, size_t len)
+{
+ if (!ctrl || !ctrl->write_cmd || ctrl->dev.type != &spmi_ctrl_type)
+ return -EINVAL;
+
+ return ctrl->write_cmd(ctrl, opcode, sid, addr, buf, len);
+}
+
+/**
+ * spmi_register_read() - register read
+ * @sdev: SPMI device.
+ * @addr: slave register address (5-bit address).
+ * @buf: buffer to be populated with data from the Slave.
+ *
+ * Reads 1 byte of data from a Slave device register.
+ */
+int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf)
+{
+ /* 5-bit register address */
+ if (addr > 0x1F)
+ return -EINVAL;
+
+ return spmi_read_cmd(sdev->ctrl, SPMI_CMD_READ, sdev->usid, addr,
+ buf, 1);
+}
+EXPORT_SYMBOL_GPL(spmi_register_read);
+
+/**
+ * spmi_ext_register_read() - extended register read
+ * @sdev: SPMI device.
+ * @addr: slave register address (8-bit address).
+ * @buf: buffer to be populated with data from the Slave.
+ * @len: the request number of bytes to read (up to 16 bytes).
+ *
+ * Reads up to 16 bytes of data from the extended register space on a
+ * Slave device.
+ */
+int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf,
+ size_t len)
+{
+ /* 8-bit register address, up to 16 bytes */
+ if (len == 0 || len > 16)
+ return -EINVAL;
+
+ return spmi_read_cmd(sdev->ctrl, SPMI_CMD_EXT_READ, sdev->usid, addr,
+ buf, len);
+}
+EXPORT_SYMBOL_GPL(spmi_ext_register_read);
+
+/**
+ * spmi_ext_register_readl() - extended register read long
+ * @sdev: SPMI device.
+ * @addr: slave register address (16-bit address).
+ * @buf: buffer to be populated with data from the Slave.
+ * @len: the request number of bytes to read (up to 8 bytes).
+ *
+ * Reads up to 8 bytes of data from the extended register space on a
+ * Slave device using 16-bit address.
+ */
+int spmi_ext_register_readl(struct spmi_device *sdev, u16 addr, u8 *buf,
+ size_t len)
+{
+ /* 16-bit register address, up to 8 bytes */
+ if (len == 0 || len > 8)
+ return -EINVAL;
+
+ return spmi_read_cmd(sdev->ctrl, SPMI_CMD_EXT_READL, sdev->usid, addr,
+ buf, len);
+}
+EXPORT_SYMBOL_GPL(spmi_ext_register_readl);
+
+/**
+ * spmi_register_write() - register write
+ * @sdev: SPMI device
+ * @addr: slave register address (5-bit address).
+ * @data: buffer containing the data to be transferred to the Slave.
+ *
+ * Writes 1 byte of data to a Slave device register.
+ */
+int spmi_register_write(struct spmi_device *sdev, u8 addr, u8 data)
+{
+ /* 5-bit register address */
+ if (addr > 0x1F)
+ return -EINVAL;
+
+ return spmi_write_cmd(sdev->ctrl, SPMI_CMD_WRITE, sdev->usid, addr,
+ &data, 1);
+}
+EXPORT_SYMBOL_GPL(spmi_register_write);
+
+/**
+ * spmi_register_zero_write() - register zero write
+ * @sdev: SPMI device.
+ * @data: the data to be written to register 0 (7-bits).
+ *
+ * Writes data to register 0 of the Slave device.
+ */
+int spmi_register_zero_write(struct spmi_device *sdev, u8 data)
+{
+ return spmi_write_cmd(sdev->ctrl, SPMI_CMD_ZERO_WRITE, sdev->usid, 0,
+ &data, 1);
+}
+EXPORT_SYMBOL_GPL(spmi_register_zero_write);
+
+/**
+ * spmi_ext_register_write() - extended register write
+ * @sdev: SPMI device.
+ * @addr: slave register address (8-bit address).
+ * @buf: buffer containing the data to be transferred to the Slave.
+ * @len: the request number of bytes to read (up to 16 bytes).
+ *
+ * Writes up to 16 bytes of data to the extended register space of a
+ * Slave device.
+ */
+int spmi_ext_register_write(struct spmi_device *sdev, u8 addr, const u8 *buf,
+ size_t len)
+{
+ /* 8-bit register address, up to 16 bytes */
+ if (len == 0 || len > 16)
+ return -EINVAL;
+
+ return spmi_write_cmd(sdev->ctrl, SPMI_CMD_EXT_WRITE, sdev->usid, addr,
+ buf, len);
+}
+EXPORT_SYMBOL_GPL(spmi_ext_register_write);
+
+/**
+ * spmi_ext_register_writel() - extended register write long
+ * @sdev: SPMI device.
+ * @addr: slave register address (16-bit address).
+ * @buf: buffer containing the data to be transferred to the Slave.
+ * @len: the request number of bytes to read (up to 8 bytes).
+ *
+ * Writes up to 8 bytes of data to the extended register space of a
+ * Slave device using 16-bit address.
+ */
+int spmi_ext_register_writel(struct spmi_device *sdev, u16 addr, const u8 *buf,
+ size_t len)
+{
+ /* 4-bit Slave Identifier, 16-bit register address, up to 8 bytes */
+ if (len == 0 || len > 8)
+ return -EINVAL;
+
+ return spmi_write_cmd(sdev->ctrl, SPMI_CMD_EXT_WRITEL, sdev->usid,
+ addr, buf, len);
+}
+EXPORT_SYMBOL_GPL(spmi_ext_register_writel);
+
+/**
+ * spmi_command_reset() - sends RESET command to the specified slave
+ * @sdev: SPMI device.
+ *
+ * The Reset command initializes the Slave and forces all registers to
+ * their reset values. The Slave shall enter the STARTUP state after
+ * receiving a Reset command.
+ */
+int spmi_command_reset(struct spmi_device *sdev)
+{
+ return spmi_cmd(sdev->ctrl, SPMI_CMD_RESET, sdev->usid);
+}
+EXPORT_SYMBOL_GPL(spmi_command_reset);
+
+/**
+ * spmi_command_sleep() - sends SLEEP command to the specified SPMI device
+ * @sdev: SPMI device.
+ *
+ * The Sleep command causes the Slave to enter the user defined SLEEP state.
+ */
+int spmi_command_sleep(struct spmi_device *sdev)
+{
+ return spmi_cmd(sdev->ctrl, SPMI_CMD_SLEEP, sdev->usid);
+}
+EXPORT_SYMBOL_GPL(spmi_command_sleep);
+
+/**
+ * spmi_command_wakeup() - sends WAKEUP command to the specified SPMI device
+ * @sdev: SPMI device.
+ *
+ * The Wakeup command causes the Slave to move from the SLEEP state to
+ * the ACTIVE state.
+ */
+int spmi_command_wakeup(struct spmi_device *sdev)
+{
+ return spmi_cmd(sdev->ctrl, SPMI_CMD_WAKEUP, sdev->usid);
+}
+EXPORT_SYMBOL_GPL(spmi_command_wakeup);
+
+/**
+ * spmi_command_shutdown() - sends SHUTDOWN command to the specified SPMI device
+ * @sdev: SPMI device.
+ *
+ * The Shutdown command causes the Slave to enter the SHUTDOWN state.
+ */
+int spmi_command_shutdown(struct spmi_device *sdev)
+{
+ return spmi_cmd(sdev->ctrl, SPMI_CMD_SHUTDOWN, sdev->usid);
+}
+EXPORT_SYMBOL_GPL(spmi_command_shutdown);
+
+static int spmi_drv_probe(struct device *dev)
+{
+ const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
+ struct spmi_device *sdev = to_spmi_device(dev);
+ int err;
+
+ /* Ensure the slave is in ACTIVE state */
+ err = spmi_command_wakeup(sdev);
+ if (err)
+ goto fail_wakeup;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ err = sdrv->probe(sdev);
+ if (err)
+ goto fail_probe;
+
+ return 0;
+
+fail_probe:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+fail_wakeup:
+ return err;
+}
+
+static int spmi_drv_remove(struct device *dev)
+{
+ const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
+
+ pm_runtime_get_sync(dev);
+ sdrv->remove(to_spmi_device(dev));
+ pm_runtime_put_noidle(dev);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+ return 0;
+}
+
+static struct bus_type spmi_bus_type = {
+ .name = "spmi",
+ .match = spmi_device_match,
+ .probe = spmi_drv_probe,
+ .remove = spmi_drv_remove,
+};
+
+/**
+ * spmi_controller_alloc() - Allocate a new SPMI device
+ * @ctrl: associated controller
+ *
+ * Caller is responsible for either calling spmi_device_add() to add the
+ * newly allocated controller, or calling spmi_device_put() to discard it.
+ */
+struct spmi_device *spmi_device_alloc(struct spmi_controller *ctrl)
+{
+ struct spmi_device *sdev;
+
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return NULL;
+
+ sdev->ctrl = ctrl;
+ device_initialize(&sdev->dev);
+ sdev->dev.parent = &ctrl->dev;
+ sdev->dev.bus = &spmi_bus_type;
+ sdev->dev.type = &spmi_dev_type;
+ return sdev;
+}
+EXPORT_SYMBOL_GPL(spmi_device_alloc);
+
+/**
+ * spmi_controller_alloc() - Allocate a new SPMI controller
+ * @parent: parent device
+ * @size: size of private data
+ *
+ * Caller is responsible for either calling spmi_controller_add() to add the
+ * newly allocated controller, or calling spmi_controller_put() to discard it.
+ * The allocated private data region may be accessed via
+ * spmi_controller_get_drvdata()
+ */
+struct spmi_controller *spmi_controller_alloc(struct device *parent,
+ size_t size)
+{
+ struct spmi_controller *ctrl;
+ int id;
+
+ if (WARN_ON(!parent))
+ return NULL;
+
+ ctrl = kzalloc(sizeof(*ctrl) + size, GFP_KERNEL);
+ if (!ctrl)
+ return NULL;
+
+ device_initialize(&ctrl->dev);
+ ctrl->dev.type = &spmi_ctrl_type;
+ ctrl->dev.bus = &spmi_bus_type;
+ ctrl->dev.parent = parent;
+ ctrl->dev.of_node = parent->of_node;
+ spmi_controller_set_drvdata(ctrl, &ctrl[1]);
+
+ id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(parent,
+ "unable to allocate SPMI controller identifier.\n");
+ spmi_controller_put(ctrl);
+ return NULL;
+ }
+
+ ctrl->nr = id;
+ dev_set_name(&ctrl->dev, "spmi-%d", id);
+
+ dev_dbg(&ctrl->dev, "allocated controller 0x%p id %d\n", ctrl, id);
+ return ctrl;
+}
+EXPORT_SYMBOL_GPL(spmi_controller_alloc);
+
+static void of_spmi_register_devices(struct spmi_controller *ctrl)
+{
+ struct device_node *node;
+ int err;
+
+ if (!ctrl->dev.of_node)
+ return;
+
+ for_each_available_child_of_node(ctrl->dev.of_node, node) {
+ struct spmi_device *sdev;
+ u32 reg[2];
+
+ dev_dbg(&ctrl->dev, "adding child %s\n", node->full_name);
+
+ err = of_property_read_u32_array(node, "reg", reg, 2);
+ if (err) {
+ dev_err(&ctrl->dev,
+ "node %s err (%d) does not have 'reg' property\n",
+ node->full_name, err);
+ continue;
+ }
+
+ if (reg[1] != SPMI_USID) {
+ dev_err(&ctrl->dev,
+ "node %s contains unsupported 'reg' entry\n",
+ node->full_name);
+ continue;
+ }
+
+ if (reg[0] >= SPMI_MAX_SLAVE_ID) {
+ dev_err(&ctrl->dev,
+ "invalid usid on node %s\n",
+ node->full_name);
+ continue;
+ }
+
+ dev_dbg(&ctrl->dev, "read usid %02x\n", reg[0]);
+
+ sdev = spmi_device_alloc(ctrl);
+ if (!sdev)
+ continue;
+
+ sdev->dev.of_node = node;
+ sdev->usid = (u8) reg[0];
+
+ err = spmi_device_add(sdev);
+ if (err) {
+ dev_err(&sdev->dev,
+ "failure adding device. status %d\n", err);
+ spmi_device_put(sdev);
+ }
+ }
+}
+
+/**
+ * spmi_controller_add() - Add an SPMI controller
+ * @ctrl: controller to be registered.
+ *
+ * Register a controller previously allocated via spmi_controller_alloc() with
+ * the SPMI core.
+ */
+int spmi_controller_add(struct spmi_controller *ctrl)
+{
+ int ret;
+
+ /* Can't register until after driver model init */
+ if (WARN_ON(!spmi_bus_type.p))
+ return -EAGAIN;
+
+ ret = device_add(&ctrl->dev);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_OF))
+ of_spmi_register_devices(ctrl);
+
+ dev_dbg(&ctrl->dev, "spmi-%d registered: dev:%p\n",
+ ctrl->nr, &ctrl->dev);
+
+ return 0;
+};
+EXPORT_SYMBOL_GPL(spmi_controller_add);
+
+/* Remove a device associated with a controller */
+static int spmi_ctrl_remove_device(struct device *dev, void *data)
+{
+ struct spmi_device *spmidev = to_spmi_device(dev);
+ if (dev->type == &spmi_dev_type)
+ spmi_device_remove(spmidev);
+ return 0;
+}
+
+/**
+ * spmi_controller_remove(): remove an SPMI controller
+ * @ctrl: controller to remove
+ *
+ * Remove a SPMI controller. Caller is responsible for calling
+ * spmi_controller_put() to discard the allocated controller.
+ */
+void spmi_controller_remove(struct spmi_controller *ctrl)
+{
+ int dummy;
+
+ if (!ctrl)
+ return;
+
+ dummy = device_for_each_child(&ctrl->dev, NULL,
+ spmi_ctrl_remove_device);
+ device_del(&ctrl->dev);
+}
+EXPORT_SYMBOL_GPL(spmi_controller_remove);
+
+/**
+ * spmi_driver_register() - Register client driver with SPMI core
+ * @sdrv: client driver to be associated with client-device.
+ *
+ * This API will register the client driver with the SPMI framework.
+ * It is typically called from the driver's module-init function.
+ */
+int spmi_driver_register(struct spmi_driver *sdrv)
+{
+ sdrv->driver.bus = &spmi_bus_type;
+ return driver_register(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(spmi_driver_register);
+
+static void __exit spmi_exit(void)
+{
+ bus_unregister(&spmi_bus_type);
+}
+module_exit(spmi_exit);
+
+static int __init spmi_init(void)
+{
+ return bus_register(&spmi_bus_type);
+}
+postcore_initcall(spmi_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SPMI module");
+MODULE_ALIAS("platform:spmi");
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 99375f0a9440..ea5efb426f75 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -34,8 +34,6 @@ source "drivers/staging/winbond/Kconfig"
source "drivers/staging/wlan-ng/Kconfig"
-source "drivers/staging/echo/Kconfig"
-
source "drivers/staging/comedi/Kconfig"
source "drivers/staging/olpc_dcon/Kconfig"
@@ -52,6 +50,8 @@ source "drivers/staging/rtl8712/Kconfig"
source "drivers/staging/rtl8188eu/Kconfig"
+source "drivers/staging/rtl8723au/Kconfig"
+
source "drivers/staging/rtl8821ae/Kconfig"
source "drivers/staging/rts5139/Kconfig"
@@ -82,8 +82,6 @@ source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig"
-source "drivers/staging/sm7xxfb/Kconfig"
-
source "drivers/staging/crystalhd/Kconfig"
source "drivers/staging/cxt1e1/Kconfig"
@@ -128,8 +126,6 @@ source "drivers/staging/imx-drm/Kconfig"
source "drivers/staging/dgrp/Kconfig"
-source "drivers/staging/sb105x/Kconfig"
-
source "drivers/staging/fwserial/Kconfig"
source "drivers/staging/goldfish/Kconfig"
@@ -146,4 +142,10 @@ source "drivers/staging/dgnc/Kconfig"
source "drivers/staging/dgap/Kconfig"
+source "drivers/staging/gs_fpgaboot/Kconfig"
+
+source "drivers/staging/nokia_h4p/Kconfig"
+
+source "drivers/staging/unisys/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index ddc3c4a5d39d..86e020c2ad0d 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_USBIP_CORE) += usbip/
obj-$(CONFIG_W35UND) += winbond/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
-obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
obj-$(CONFIG_PANEL) += panel/
@@ -18,6 +17,7 @@ obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += rtl8188eu/
+obj-$(CONFIG_R8723AU) += rtl8723au/
obj-$(CONFIG_R8821AE) += rtl8821ae/
obj-$(CONFIG_RTS5139) += rts5139/
obj-$(CONFIG_RTS5208) += rts5208/
@@ -35,7 +35,6 @@ obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
-obj-$(CONFIG_FB_SM7XX) += sm7xxfb/
obj-$(CONFIG_CRYSTALHD) += crystalhd/
obj-$(CONFIG_CXT1E1) += cxt1e1/
obj-$(CONFIG_FB_XGI) += xgifb/
@@ -57,7 +56,6 @@ obj-$(CONFIG_NET_VENDOR_SILICOM) += silicom/
obj-$(CONFIG_CED1401) += ced1401/
obj-$(CONFIG_DRM_IMX) += imx-drm/
obj-$(CONFIG_DGRP) += dgrp/
-obj-$(CONFIG_SB105X) += sb105x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_LUSTRE_FS) += lustre/
@@ -65,3 +63,6 @@ obj-$(CONFIG_XILLYBUS) += xillybus/
obj-$(CONFIG_DGNC) += dgnc/
obj-$(CONFIG_DGAP) += dgap/
obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/
+obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
+obj-$(CONFIG_BT_NOKIA_H4P) += nokia_h4p/
+obj-$(CONFIG_UNISYSSPAR) += unisys/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index b91c758883bf..99e484f845f2 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -2,7 +2,6 @@ menu "Android"
config ANDROID
bool "Android Drivers"
- default N
---help---
Enable support for various drivers needed on the Android platform
@@ -20,6 +19,19 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_IPC_32BIT
+ bool
+ depends on !64BIT && ANDROID_BINDER_IPC
+ default y
+ ---help---
+ The Binder API has been changed to support both 32 and 64bit
+ applications in a mixed environment.
+
+ Enable this to support an old 32-bit Android user-space (v4.4 and
+ earlier).
+
+ Note that enabling this will break newer Android user-space.
+
config ASHMEM
bool "Enable the Anonymous Shared Memory Subsystem"
default n
@@ -60,7 +72,6 @@ config ANDROID_TIMED_GPIO
config ANDROID_LOW_MEMORY_KILLER
bool "Android Low Memory Killer"
- default N
---help---
Registers processes to be killed when memory is low
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
index 4fd32f337f9c..495b20cf3bf6 100644
--- a/drivers/staging/android/android_alarm.h
+++ b/drivers/staging/android/android_alarm.h
@@ -16,50 +16,10 @@
#ifndef _LINUX_ANDROID_ALARM_H
#define _LINUX_ANDROID_ALARM_H
-#include <linux/ioctl.h>
-#include <linux/time.h>
#include <linux/compat.h>
+#include <linux/ioctl.h>
-enum android_alarm_type {
- /* return code bit numbers or set alarm arg */
- ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME,
-
- ANDROID_ALARM_TYPE_COUNT,
-
- /* return code bit numbers */
- /* ANDROID_ALARM_TIME_CHANGE = 16 */
-};
-
-enum android_alarm_return_flags {
- ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
- ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
-};
-
-/* Disable alarm */
-#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
-
-/* Ack last alarm and wait for next */
-#define ANDROID_ALARM_WAIT _IO('a', 1)
-
-#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
-/* Set alarm */
-#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
-#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
-#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
-#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
-#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
-
+#include "uapi/android_alarm.h"
#ifdef CONFIG_COMPAT
#define ANDROID_ALARM_SET_COMPAT(type) ALARM_IOW(2, type, \
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
index 8dc0f0d3adf3..5abcfd7aa706 100644
--- a/drivers/staging/android/ashmem.h
+++ b/drivers/staging/android/ashmem.h
@@ -16,35 +16,7 @@
#include <linux/ioctl.h>
#include <linux/compat.h>
-#define ASHMEM_NAME_LEN 256
-
-#define ASHMEM_NAME_DEF "dev/ashmem"
-
-/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
-#define ASHMEM_NOT_PURGED 0
-#define ASHMEM_WAS_PURGED 1
-
-/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
-#define ASHMEM_IS_UNPINNED 0
-#define ASHMEM_IS_PINNED 1
-
-struct ashmem_pin {
- __u32 offset; /* offset into region, in bytes, page-aligned */
- __u32 len; /* length forward from offset, in bytes, page-aligned */
-};
-
-#define __ASHMEMIOC 0x77
-
-#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
-#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
-#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
-#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
-#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
-#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
-#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
-#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
-#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
-#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+#include "uapi/ashmem.h"
/* support of 32bit userspace on 64bit platforms */
#ifdef CONFIG_COMPAT
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 1432d956769c..cfe4bc8f05cb 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -228,8 +228,8 @@ struct binder_node {
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
- void __user *ptr;
- void __user *cookie;
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
unsigned has_strong_ref:1;
unsigned pending_strong_ref:1;
unsigned has_weak_ref:1;
@@ -242,7 +242,7 @@ struct binder_node {
struct binder_ref_death {
struct binder_work work;
- void __user *cookie;
+ binder_uintptr_t cookie;
};
struct binder_ref {
@@ -515,14 +515,14 @@ static void binder_insert_allocated_buffer(struct binder_proc *proc,
}
static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
- void __user *user_ptr)
+ uintptr_t user_ptr)
{
struct rb_node *n = proc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
struct binder_buffer *kern_ptr;
- kern_ptr = user_ptr - proc->user_buffer_offset
- - offsetof(struct binder_buffer, data);
+ kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
+ - offsetof(struct binder_buffer, data));
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -856,7 +856,7 @@ static void binder_free_buf(struct binder_proc *proc,
}
static struct binder_node *binder_get_node(struct binder_proc *proc,
- void __user *ptr)
+ binder_uintptr_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
@@ -875,8 +875,8 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
}
static struct binder_node *binder_new_node(struct binder_proc *proc,
- void __user *ptr,
- void __user *cookie)
+ binder_uintptr_t ptr,
+ binder_uintptr_t cookie)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
@@ -908,9 +908,9 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%p c%p created\n",
+ "%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
- node->ptr, node->cookie);
+ (u64)node->ptr, (u64)node->cookie);
return node;
}
@@ -1226,9 +1226,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
- size_t *failed_at)
+ binder_size_t *failed_at)
{
- size_t *offp, *off_end;
+ binder_size_t *offp, *off_end;
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -1239,7 +1239,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
- offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
+ offp = (binder_size_t *)(buffer->data +
+ ALIGN(buffer->data_size, sizeof(void *)));
if (failed_at)
off_end = failed_at;
else
@@ -1249,8 +1250,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
if (*offp > buffer->data_size - sizeof(*fp) ||
buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
- pr_err("transaction release %d bad offset %zd, size %zd\n",
- debug_id, *offp, buffer->data_size);
+ pr_err("transaction release %d bad offset %lld, size %zd\n",
+ debug_id, (u64)*offp, buffer->data_size);
continue;
}
fp = (struct flat_binder_object *)(buffer->data + *offp);
@@ -1259,13 +1260,13 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_WEAK_BINDER: {
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
- pr_err("transaction release %d bad node %p\n",
- debug_id, fp->binder);
+ pr_err("transaction release %d bad node %016llx\n",
+ debug_id, (u64)fp->binder);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
- " node %d u%p\n",
- node->debug_id, node->ptr);
+ " node %d u%016llx\n",
+ node->debug_id, (u64)node->ptr);
binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
} break;
case BINDER_TYPE_HANDLE:
@@ -1303,7 +1304,7 @@ static void binder_transaction(struct binder_proc *proc,
{
struct binder_transaction *t;
struct binder_work *tcomplete;
- size_t *offp, *off_end;
+ binder_size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -1432,18 +1433,20 @@ static void binder_transaction(struct binder_proc *proc,
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_REPLY %d -> %d:%d, data %p-%p size %zd-%zd\n",
+ "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
- tr->data.ptr.buffer, tr->data.ptr.offsets,
- tr->data_size, tr->offsets_size);
+ (u64)tr->data.ptr.buffer,
+ (u64)tr->data.ptr.offsets,
+ (u64)tr->data_size, (u64)tr->offsets_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %p-%p size %zd-%zd\n",
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
- tr->data.ptr.buffer, tr->data.ptr.offsets,
- tr->data_size, tr->offsets_size);
+ (u64)tr->data.ptr.buffer,
+ (u64)tr->data.ptr.offsets,
+ (u64)tr->data_size, (u64)tr->offsets_size);
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
@@ -1472,23 +1475,26 @@ static void binder_transaction(struct binder_proc *proc,
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
- offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+ offp = (binder_size_t *)(t->buffer->data +
+ ALIGN(tr->data_size, sizeof(void *)));
- if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+ if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
+ tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
- if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+ if (copy_from_user(offp, (const void __user *)(uintptr_t)
+ tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
- if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
- binder_user_error("%d:%d got transaction with invalid offsets size, %zd\n",
- proc->pid, thread->pid, tr->offsets_size);
+ if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
+ binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
+ proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
@@ -1498,8 +1504,8 @@ static void binder_transaction(struct binder_proc *proc,
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
- binder_user_error("%d:%d got transaction with invalid offset, %zd\n",
- proc->pid, thread->pid, *offp);
+ binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
+ proc->pid, thread->pid, (u64)*offp);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
@@ -1519,10 +1525,10 @@ static void binder_transaction(struct binder_proc *proc,
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
- binder_user_error("%d:%d sending u%p node %d, cookie mismatch %p != %p\n",
+ binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
- fp->binder, node->debug_id,
- fp->cookie, node->cookie);
+ (u64)fp->binder, node->debug_id,
+ (u64)fp->cookie, (u64)node->cookie);
goto err_binder_get_ref_for_node_failed;
}
ref = binder_get_ref_for_node(target_proc, node);
@@ -1540,9 +1546,9 @@ static void binder_transaction(struct binder_proc *proc,
trace_binder_transaction_node_to_ref(t, node, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
- " node %d u%p -> ref %d desc %d\n",
- node->debug_id, node->ptr, ref->debug_id,
- ref->desc);
+ " node %d u%016llx -> ref %d desc %d\n",
+ node->debug_id, (u64)node->ptr,
+ ref->debug_id, ref->desc);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
@@ -1564,9 +1570,9 @@ static void binder_transaction(struct binder_proc *proc,
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
trace_binder_transaction_ref_to_node(t, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d -> node %d u%p\n",
+ " ref %d desc %d -> node %d u%016llx\n",
ref->debug_id, ref->desc, ref->node->debug_id,
- ref->node->ptr);
+ (u64)ref->node->ptr);
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
@@ -1682,9 +1688,9 @@ err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d, size %zd-%zd\n",
+ "%d:%d transaction failed %d, size %lld-%lld\n",
proc->pid, thread->pid, return_error,
- tr->data_size, tr->offsets_size);
+ (u64)tr->data_size, (u64)tr->offsets_size);
{
struct binder_transaction_log_entry *fe;
@@ -1702,9 +1708,11 @@ err_no_context_mgr_node:
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
- void __user *buffer, size_t size, size_t *consumed)
+ binder_uintptr_t binder_buffer, size_t size,
+ binder_size_t *consumed)
{
uint32_t cmd;
+ void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
@@ -1773,33 +1781,33 @@ static int binder_thread_write(struct binder_proc *proc,
}
case BC_INCREFS_DONE:
case BC_ACQUIRE_DONE: {
- void __user *node_ptr;
- void __user *cookie;
+ binder_uintptr_t node_ptr;
+ binder_uintptr_t cookie;
struct binder_node *node;
- if (get_user(node_ptr, (void * __user *)ptr))
+ if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
- if (get_user(cookie, (void * __user *)ptr))
+ ptr += sizeof(binder_uintptr_t);
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
node = binder_get_node(proc, node_ptr);
if (node == NULL) {
- binder_user_error("%d:%d %s u%p no match\n",
+ binder_user_error("%d:%d %s u%016llx no match\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" :
"BC_ACQUIRE_DONE",
- node_ptr);
+ (u64)node_ptr);
break;
}
if (cookie != node->cookie) {
- binder_user_error("%d:%d %s u%p node %d cookie mismatch %p != %p\n",
+ binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
- node_ptr, node->debug_id,
- cookie, node->cookie);
+ (u64)node_ptr, node->debug_id,
+ (u64)cookie, (u64)node->cookie);
break;
}
if (cmd == BC_ACQUIRE_DONE) {
@@ -1835,27 +1843,28 @@ static int binder_thread_write(struct binder_proc *proc,
return -EINVAL;
case BC_FREE_BUFFER: {
- void __user *data_ptr;
+ binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
- if (get_user(data_ptr, (void * __user *)ptr))
+ if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
buffer = binder_buffer_lookup(proc, data_ptr);
if (buffer == NULL) {
- binder_user_error("%d:%d BC_FREE_BUFFER u%p no match\n",
- proc->pid, thread->pid, data_ptr);
+ binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
+ proc->pid, thread->pid, (u64)data_ptr);
break;
}
if (!buffer->allow_user_free) {
- binder_user_error("%d:%d BC_FREE_BUFFER u%p matched unreturned buffer\n",
- proc->pid, thread->pid, data_ptr);
+ binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
+ proc->pid, thread->pid, (u64)data_ptr);
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
- "%d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
- proc->pid, thread->pid, data_ptr, buffer->debug_id,
+ "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
+ proc->pid, thread->pid, (u64)data_ptr,
+ buffer->debug_id,
buffer->transaction ? "active" : "finished");
if (buffer->transaction) {
@@ -1925,16 +1934,16 @@ static int binder_thread_write(struct binder_proc *proc,
case BC_REQUEST_DEATH_NOTIFICATION:
case BC_CLEAR_DEATH_NOTIFICATION: {
uint32_t target;
- void __user *cookie;
+ binder_uintptr_t cookie;
struct binder_ref *ref;
struct binder_ref_death *death;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (get_user(cookie, (void __user * __user *)ptr))
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
ref = binder_get_ref(proc, target);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
@@ -1947,12 +1956,12 @@ static int binder_thread_write(struct binder_proc *proc,
}
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
- "%d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
+ "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
proc->pid, thread->pid,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
- cookie, ref->debug_id, ref->desc,
+ (u64)cookie, ref->debug_id, ref->desc,
ref->strong, ref->weak, ref->node->debug_id);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
@@ -1990,9 +1999,10 @@ static int binder_thread_write(struct binder_proc *proc,
}
death = ref->death;
if (death->cookie != cookie) {
- binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %p != %p\n",
+ binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
- death->cookie, cookie);
+ (u64)death->cookie,
+ (u64)cookie);
break;
}
ref->death = NULL;
@@ -2012,9 +2022,9 @@ static int binder_thread_write(struct binder_proc *proc,
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
- void __user *cookie;
+ binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
- if (get_user(cookie, (void __user * __user *)ptr))
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(void *);
@@ -2026,11 +2036,12 @@ static int binder_thread_write(struct binder_proc *proc,
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
- "%d:%d BC_DEAD_BINDER_DONE %p found %p\n",
- proc->pid, thread->pid, cookie, death);
+ "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+ proc->pid, thread->pid, (u64)cookie,
+ death);
if (death == NULL) {
- binder_user_error("%d:%d BC_DEAD_BINDER_DONE %p not found\n",
- proc->pid, thread->pid, cookie);
+ binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
+ proc->pid, thread->pid, (u64)cookie);
break;
}
@@ -2082,9 +2093,10 @@ static int binder_has_thread_work(struct binder_thread *thread)
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
- void __user *buffer, size_t size,
- size_t *consumed, int non_block)
+ binder_uintptr_t binder_buffer, size_t size,
+ binder_size_t *consumed, int non_block)
{
+ void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
@@ -2229,32 +2241,40 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user(node->ptr, (void * __user *)ptr))
+ if (put_user(node->ptr,
+ (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
- if (put_user(node->cookie, (void * __user *)ptr))
+ ptr += sizeof(binder_uintptr_t);
+ if (put_user(node->cookie,
+ (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s %d u%p c%p\n",
- proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
+ "%d:%d %s %d u%016llx c%016llx\n",
+ proc->pid, thread->pid, cmd_name,
+ node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
} else {
list_del_init(&w->entry);
if (!weak && !strong) {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%p c%p deleted\n",
- proc->pid, thread->pid, node->debug_id,
- node->ptr, node->cookie);
+ "%d:%d node %d u%016llx c%016llx deleted\n",
+ proc->pid, thread->pid,
+ node->debug_id,
+ (u64)node->ptr,
+ (u64)node->cookie);
rb_erase(&node->rb_node, &proc->nodes);
kfree(node);
binder_stats_deleted(BINDER_STAT_NODE);
} else {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%p c%p state unchanged\n",
- proc->pid, thread->pid, node->debug_id, node->ptr,
- node->cookie);
+ "%d:%d node %d u%016llx c%016llx state unchanged\n",
+ proc->pid, thread->pid,
+ node->debug_id,
+ (u64)node->ptr,
+ (u64)node->cookie);
}
}
} break;
@@ -2272,17 +2292,18 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user(death->cookie, (void * __user *)ptr))
+ if (put_user(death->cookie,
+ (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
- "%d:%d %s %p\n",
+ "%d:%d %s %016llx\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
- death->cookie);
+ (u64)death->cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
list_del(&w->entry);
@@ -2312,8 +2333,8 @@ retry:
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
- tr.target.ptr = NULL;
- tr.cookie = NULL;
+ tr.target.ptr = 0;
+ tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
@@ -2330,8 +2351,9 @@ retry:
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (void *)t->buffer->data +
- proc->user_buffer_offset;
+ tr.data.ptr.buffer = (binder_uintptr_t)(
+ (uintptr_t)t->buffer->data +
+ proc->user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
@@ -2346,14 +2368,14 @@ retry:
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %p-%p\n",
+ "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
t->debug_id, t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
- tr.data.ptr.buffer, tr.data.ptr.offsets);
+ (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
@@ -2423,8 +2445,8 @@ static void binder_release_work(struct list_head *list)
death = container_of(w, struct binder_ref_death, work);
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
- "undelivered death notification, %p\n",
- death->cookie);
+ "undelivered death notification, %016llx\n",
+ (u64)death->cookie);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} break;
@@ -2580,12 +2602,16 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
- "%d:%d write %zd at %016lx, read %zd at %016lx\n",
- proc->pid, thread->pid, bwr.write_size,
- bwr.write_buffer, bwr.read_size, bwr.read_buffer);
+ "%d:%d write %lld at %016llx, read %lld at %016llx\n",
+ proc->pid, thread->pid,
+ (u64)bwr.write_size, (u64)bwr.write_buffer,
+ (u64)bwr.read_size, (u64)bwr.read_buffer);
if (bwr.write_size > 0) {
- ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+ ret = binder_thread_write(proc, thread,
+ bwr.write_buffer,
+ bwr.write_size,
+ &bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
@@ -2595,7 +2621,10 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
if (bwr.read_size > 0) {
- ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
+ ret = binder_thread_read(proc, thread, bwr.read_buffer,
+ bwr.read_size,
+ &bwr.read_consumed,
+ filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
@@ -2606,9 +2635,10 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
- "%d:%d wrote %zd of %zd, read return %zd of %zd\n",
- proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
- bwr.read_consumed, bwr.read_size);
+ "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
+ proc->pid, thread->pid,
+ (u64)bwr.write_consumed, (u64)bwr.write_size,
+ (u64)bwr.read_consumed, (u64)bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
@@ -2637,7 +2667,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
} else
binder_context_mgr_uid = current->cred->euid;
- binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
+ binder_context_mgr_node = binder_new_node(proc, 0, 0);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
@@ -3132,8 +3162,9 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
break;
case BINDER_WORK_NODE:
node = container_of(w, struct binder_node, work);
- seq_printf(m, "%snode work %d: u%p c%p\n",
- prefix, node->debug_id, node->ptr, node->cookie);
+ seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
+ prefix, node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
break;
case BINDER_WORK_DEAD_BINDER:
seq_printf(m, "%shas dead binder\n", prefix);
@@ -3193,8 +3224,8 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
- seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
- node->debug_id, node->ptr, node->cookie,
+ seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+ node->debug_id, (u64)node->ptr, (u64)node->cookie,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
node->internal_strong_refs, count);
@@ -3496,6 +3527,7 @@ static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
+ .compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
index cbe345168067..eb0834656dfe 100644
--- a/drivers/staging/android/binder.h
+++ b/drivers/staging/android/binder.h
@@ -20,311 +20,11 @@
#ifndef _LINUX_BINDER_H
#define _LINUX_BINDER_H
-#include <linux/ioctl.h>
+#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
+#define BINDER_IPC_32BIT 1
+#endif
-#define B_PACK_CHARS(c1, c2, c3, c4) \
- ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
-#define B_TYPE_LARGE 0x85
-
-enum {
- BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
- BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
- BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
- BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
- BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
-};
-
-enum {
- FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
- FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
-};
-
-/*
- * This is the flattened representation of a Binder object for transfer
- * between processes. The 'offsets' supplied as part of a binder transaction
- * contains offsets into the data where these structures occur. The Binder
- * driver takes care of re-writing the structure type and data as it moves
- * between processes.
- */
-struct flat_binder_object {
- /* 8 bytes for large_flat_header. */
- __u32 type;
- __u32 flags;
-
- /* 8 bytes of data. */
- union {
- void __user *binder; /* local object */
- __u32 handle; /* remote object */
- };
-
- /* extra data associated with local object */
- void __user *cookie;
-};
-
-/*
- * On 64-bit platforms where user code may run in 32-bits the driver must
- * translate the buffer (and local binder) addresses appropriately.
- */
-
-struct binder_write_read {
- size_t write_size; /* bytes to write */
- size_t write_consumed; /* bytes consumed by driver */
- unsigned long write_buffer;
- size_t read_size; /* bytes to read */
- size_t read_consumed; /* bytes consumed by driver */
- unsigned long read_buffer;
-};
-
-/* Use with BINDER_VERSION, driver fills in fields. */
-struct binder_version {
- /* driver protocol version -- increment with incompatible change */
- __s32 protocol_version;
-};
-
-/* This is the current protocol version. */
-#define BINDER_CURRENT_PROTOCOL_VERSION 7
-
-#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
-#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
-#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
-#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
-#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
-#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
-#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
-
-/*
- * NOTE: Two special error codes you should check for when calling
- * in to the driver are:
- *
- * EINTR -- The operation has been interupted. This should be
- * handled by retrying the ioctl() until a different error code
- * is returned.
- *
- * ECONNREFUSED -- The driver is no longer accepting operations
- * from your process. That is, the process is being destroyed.
- * You should handle this by exiting from your process. Note
- * that once this error code is returned, all further calls to
- * the driver from any thread will return this same code.
- */
-
-enum transaction_flags {
- TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
- TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
- TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
- TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
-};
-
-struct binder_transaction_data {
- /* The first two are only used for bcTRANSACTION and brTRANSACTION,
- * identifying the target and contents of the transaction.
- */
- union {
- __u32 handle; /* target descriptor of command transaction */
- void *ptr; /* target descriptor of return transaction */
- } target;
- void *cookie; /* target object cookie */
- __u32 code; /* transaction command */
-
- /* General information about the transaction. */
- __u32 flags;
- pid_t sender_pid;
- uid_t sender_euid;
- size_t data_size; /* number of bytes of data */
- size_t offsets_size; /* number of bytes of offsets */
-
- /* If this transaction is inline, the data immediately
- * follows here; otherwise, it ends with a pointer to
- * the data buffer.
- */
- union {
- struct {
- /* transaction data */
- const void __user *buffer;
- /* offsets from buffer to flat_binder_object structs */
- const void __user *offsets;
- } ptr;
- __u8 buf[8];
- } data;
-};
-
-struct binder_ptr_cookie {
- void *ptr;
- void *cookie;
-};
-
-struct binder_pri_desc {
- __s32 priority;
- __u32 desc;
-};
-
-struct binder_pri_ptr_cookie {
- __s32 priority;
- void *ptr;
- void *cookie;
-};
-
-enum binder_driver_return_protocol {
- BR_ERROR = _IOR('r', 0, __s32),
- /*
- * int: error code
- */
-
- BR_OK = _IO('r', 1),
- /* No parameters! */
-
- BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
- BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
- /*
- * binder_transaction_data: the received command.
- */
-
- BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
- /*
- * not currently supported
- * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
- * Else the remote object has acquired a primary reference.
- */
-
- BR_DEAD_REPLY = _IO('r', 5),
- /*
- * The target of the last transaction (either a bcTRANSACTION or
- * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
- */
-
- BR_TRANSACTION_COMPLETE = _IO('r', 6),
- /*
- * No parameters... always refers to the last transaction requested
- * (including replies). Note that this will be sent even for
- * asynchronous transactions.
- */
-
- BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
- BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
- BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
- BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie for binder
- */
-
- BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
- /*
- * not currently supported
- * int: priority
- * void *: ptr to binder
- * void *: cookie for binder
- */
-
- BR_NOOP = _IO('r', 12),
- /*
- * No parameters. Do nothing and examine the next command. It exists
- * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
- */
-
- BR_SPAWN_LOOPER = _IO('r', 13),
- /*
- * No parameters. The driver has determined that a process has no
- * threads waiting to service incoming transactions. When a process
- * receives this command, it must spawn a new service thread and
- * register it via bcENTER_LOOPER.
- */
-
- BR_FINISHED = _IO('r', 14),
- /*
- * not currently supported
- * stop threadpool thread
- */
-
- BR_DEAD_BINDER = _IOR('r', 15, void *),
- /*
- * void *: cookie
- */
- BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
- /*
- * void *: cookie
- */
-
- BR_FAILED_REPLY = _IO('r', 17),
- /*
- * The the last transaction (either a bcTRANSACTION or
- * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
- */
-};
-
-enum binder_driver_command_protocol {
- BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
- BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
- /*
- * binder_transaction_data: the sent command.
- */
-
- BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
- /*
- * not currently supported
- * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
- * Else you have acquired a primary reference on the object.
- */
-
- BC_FREE_BUFFER = _IOW('c', 3, void *),
- /*
- * void *: ptr to transaction data received on a read
- */
-
- BC_INCREFS = _IOW('c', 4, __u32),
- BC_ACQUIRE = _IOW('c', 5, __u32),
- BC_RELEASE = _IOW('c', 6, __u32),
- BC_DECREFS = _IOW('c', 7, __u32),
- /*
- * int: descriptor
- */
-
- BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
- BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie for binder
- */
-
- BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
- /*
- * not currently supported
- * int: priority
- * int: descriptor
- */
-
- BC_REGISTER_LOOPER = _IO('c', 11),
- /*
- * No parameters.
- * Register a spawned looper thread with the device.
- */
-
- BC_ENTER_LOOPER = _IO('c', 12),
- BC_EXIT_LOOPER = _IO('c', 13),
- /*
- * No parameters.
- * These two commands are sent as an application-level thread
- * enters and exits the binder loop, respectively. They are
- * used so the binder can have an accurate count of the number
- * of looping threads it has available.
- */
-
- BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie
- */
-
- BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie
- */
-
- BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
- /*
- * void *: cookie
- */
-};
+#include "uapi/binder.h"
#endif /* _LINUX_BINDER_H */
diff --git a/drivers/staging/android/binder_trace.h b/drivers/staging/android/binder_trace.h
index 82a567c2af67..7f20f3dc8369 100644
--- a/drivers/staging/android/binder_trace.h
+++ b/drivers/staging/android/binder_trace.h
@@ -152,7 +152,7 @@ TRACE_EVENT(binder_transaction_node_to_ref,
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, node_debug_id)
- __field(void __user *, node_ptr)
+ __field(binder_uintptr_t, node_ptr)
__field(int, ref_debug_id)
__field(uint32_t, ref_desc)
),
@@ -163,8 +163,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
__entry->ref_debug_id = ref->debug_id;
__entry->ref_desc = ref->desc;
),
- TP_printk("transaction=%d node=%d src_ptr=0x%p ==> dest_ref=%d dest_desc=%d",
- __entry->debug_id, __entry->node_debug_id, __entry->node_ptr,
+ TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
+ __entry->debug_id, __entry->node_debug_id,
+ (u64)__entry->node_ptr,
__entry->ref_debug_id, __entry->ref_desc)
);
@@ -177,7 +178,7 @@ TRACE_EVENT(binder_transaction_ref_to_node,
__field(int, ref_debug_id)
__field(uint32_t, ref_desc)
__field(int, node_debug_id)
- __field(void __user *, node_ptr)
+ __field(binder_uintptr_t, node_ptr)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
@@ -186,9 +187,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
__entry->node_debug_id = ref->node->debug_id;
__entry->node_ptr = ref->node->ptr;
),
- TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%p",
+ TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
- __entry->ref_debug_id, __entry->ref_desc, __entry->node_ptr)
+ __entry->ref_debug_id, __entry->ref_desc,
+ (u64)__entry->node_ptr)
);
TRACE_EVENT(binder_transaction_ref_to_ref,
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 574066ff73f8..3d5bf1472236 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -16,6 +16,7 @@
*/
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/fs.h>
@@ -55,10 +56,12 @@ struct ion_device {
struct mutex buffer_lock;
struct rw_semaphore lock;
struct plist_head heaps;
- long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
- unsigned long arg);
+ long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
struct rb_root clients;
struct dentry *debug_root;
+ struct dentry *heaps_debug_root;
+ struct dentry *clients_debug_root;
};
/**
@@ -69,6 +72,8 @@ struct ion_device {
* @idr: an idr space for allocating handle ids
* @lock: lock protecting the tree of handles
* @name: used for debugging
+ * @display_name: used for debugging (unique version of @name)
+ * @display_serial: used for debugging (to make display_name unique)
* @task: used for debugging
*
* A client represents a list of buffers this client may access.
@@ -82,6 +87,8 @@ struct ion_client {
struct idr idr;
struct mutex lock;
const char *name;
+ char *display_name;
+ int display_serial;
struct task_struct *task;
pid_t pid;
struct dentry *debug_root;
@@ -208,7 +215,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
if (IS_ERR(table)) {
heap->ops->free(buffer);
kfree(buffer);
- return ERR_PTR(PTR_ERR(table));
+ return ERR_CAST(table);
}
buffer->sg_table = table;
if (ion_buffer_fault_user_mappings(buffer)) {
@@ -429,7 +436,7 @@ static bool ion_handle_validate(struct ion_client *client,
struct ion_handle *handle)
{
WARN_ON(!mutex_is_locked(&client->lock));
- return (idr_find(&client->idr, handle->id) == handle);
+ return idr_find(&client->idr, handle->id) == handle;
}
static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
@@ -501,7 +508,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
return ERR_PTR(-ENODEV);
if (IS_ERR(buffer))
- return ERR_PTR(PTR_ERR(buffer));
+ return ERR_CAST(buffer);
handle = ion_handle_create(client, buffer);
@@ -708,6 +715,21 @@ static const struct file_operations debug_client_fops = {
.release = single_release,
};
+static int ion_get_client_serial(const struct rb_root *root,
+ const unsigned char *name)
+{
+ int serial = -1;
+ struct rb_node *node;
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ struct ion_client *client = rb_entry(node, struct ion_client,
+ node);
+ if (strcmp(client->name, name))
+ continue;
+ serial = max(serial, client->display_serial);
+ }
+ return serial + 1;
+}
+
struct ion_client *ion_client_create(struct ion_device *dev,
const char *name)
{
@@ -716,9 +738,13 @@ struct ion_client *ion_client_create(struct ion_device *dev,
struct rb_node **p;
struct rb_node *parent = NULL;
struct ion_client *entry;
- char debug_name[64];
pid_t pid;
+ if (!name) {
+ pr_err("%s: Name cannot be null\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
get_task_struct(current->group_leader);
task_lock(current->group_leader);
pid = task_pid_nr(current->group_leader);
@@ -733,21 +759,27 @@ struct ion_client *ion_client_create(struct ion_device *dev,
task_unlock(current->group_leader);
client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
- if (!client) {
- if (task)
- put_task_struct(current->group_leader);
- return ERR_PTR(-ENOMEM);
- }
+ if (!client)
+ goto err_put_task_struct;
client->dev = dev;
client->handles = RB_ROOT;
idr_init(&client->idr);
mutex_init(&client->lock);
- client->name = name;
client->task = task;
client->pid = pid;
+ client->name = kstrdup(name, GFP_KERNEL);
+ if (!client->name)
+ goto err_free_client;
down_write(&dev->lock);
+ client->display_serial = ion_get_client_serial(&dev->clients, name);
+ client->display_name = kasprintf(
+ GFP_KERNEL, "%s-%d", name, client->display_serial);
+ if (!client->display_name) {
+ up_write(&dev->lock);
+ goto err_free_client_name;
+ }
p = &dev->clients.rb_node;
while (*p) {
parent = *p;
@@ -761,13 +793,28 @@ struct ion_client *ion_client_create(struct ion_device *dev,
rb_link_node(&client->node, parent, p);
rb_insert_color(&client->node, &dev->clients);
- snprintf(debug_name, 64, "%u", client->pid);
- client->debug_root = debugfs_create_file(debug_name, 0664,
- dev->debug_root, client,
- &debug_client_fops);
+ client->debug_root = debugfs_create_file(client->display_name, 0664,
+ dev->clients_debug_root,
+ client, &debug_client_fops);
+ if (!client->debug_root) {
+ char buf[256], *path;
+ path = dentry_path(dev->clients_debug_root, buf, 256);
+ pr_err("Failed to create client debugfs at %s/%s\n",
+ path, client->display_name);
+ }
+
up_write(&dev->lock);
return client;
+
+err_free_client_name:
+ kfree(client->name);
+err_free_client:
+ kfree(client);
+err_put_task_struct:
+ if (task)
+ put_task_struct(current->group_leader);
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(ion_client_create);
@@ -792,6 +839,8 @@ void ion_client_destroy(struct ion_client *client)
debugfs_remove_recursive(client->debug_root);
up_write(&dev->lock);
+ kfree(client->display_name);
+ kfree(client->name);
kfree(client);
}
EXPORT_SYMBOL(ion_client_destroy);
@@ -954,8 +1003,8 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
int ret = 0;
if (!buffer->heap->ops->map_user) {
- pr_err("%s: this heap does not define a method for mapping "
- "to userspace\n", __func__);
+ pr_err("%s: this heap does not define a method for mapping to userspace\n",
+ __func__);
return -EINVAL;
}
@@ -1017,9 +1066,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
mutex_unlock(&buffer->lock);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
- return 0;
+ return PTR_ERR_OR_ZERO(vaddr);
}
static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
@@ -1100,7 +1147,7 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
dmabuf = dma_buf_get(fd);
if (IS_ERR(dmabuf))
- return ERR_PTR(PTR_ERR(dmabuf));
+ return ERR_CAST(dmabuf);
/* if this memory came from ion */
if (dmabuf->ops != &dma_buf_ops) {
@@ -1293,9 +1340,11 @@ static int ion_open(struct inode *inode, struct file *file)
struct miscdevice *miscdev = file->private_data;
struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
struct ion_client *client;
+ char debug_name[64];
pr_debug("%s: %d\n", __func__, __LINE__);
- client = ion_client_create(dev, "user");
+ snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
+ client = ion_client_create(dev, debug_name);
if (IS_ERR(client))
return PTR_ERR(client);
file->private_data = client;
@@ -1338,7 +1387,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
size_t total_orphaned_size = 0;
seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
- seq_printf(s, "----------------------------------------------------\n");
+ seq_puts(s, "----------------------------------------------------\n");
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
@@ -1357,9 +1406,8 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
client->pid, size);
}
}
- seq_printf(s, "----------------------------------------------------\n");
- seq_printf(s, "orphaned allocations (info is from last known client):"
- "\n");
+ seq_puts(s, "----------------------------------------------------\n");
+ seq_puts(s, "orphaned allocations (info is from last known client):\n");
mutex_lock(&dev->buffer_lock);
for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
@@ -1376,14 +1424,14 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
}
}
mutex_unlock(&dev->buffer_lock);
- seq_printf(s, "----------------------------------------------------\n");
+ seq_puts(s, "----------------------------------------------------\n");
seq_printf(s, "%16.s %16zu\n", "total orphaned",
total_orphaned_size);
seq_printf(s, "%16.s %16zu\n", "total ", total_size);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
seq_printf(s, "%16.s %16zu\n", "deferred free",
heap->free_list_size);
- seq_printf(s, "----------------------------------------------------\n");
+ seq_puts(s, "----------------------------------------------------\n");
if (heap->debug_show)
heap->debug_show(heap, s, unused);
@@ -1443,6 +1491,8 @@ DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
+ struct dentry *debug_file;
+
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
!heap->ops->unmap_dma)
pr_err("%s: can not add heap with invalid ops struct.\n",
@@ -1451,21 +1501,40 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
ion_heap_init_deferred_free(heap);
+ if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
+ ion_heap_init_shrinker(heap);
+
heap->dev = dev;
down_write(&dev->lock);
/* use negative heap->id to reverse the priority -- when traversing
the list later attempt higher id numbers first */
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
- debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
- &debug_heap_fops);
+ debug_file = debugfs_create_file(heap->name, 0664,
+ dev->heaps_debug_root, heap,
+ &debug_heap_fops);
+
+ if (!debug_file) {
+ char buf[256], *path;
+ path = dentry_path(dev->heaps_debug_root, buf, 256);
+ pr_err("Failed to create heap debugfs at %s/%s\n",
+ path, heap->name);
+ }
+
#ifdef DEBUG_HEAP_SHRINKER
if (heap->shrinker.shrink) {
char debug_name[64];
snprintf(debug_name, 64, "%s_shrink", heap->name);
- debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
- &debug_shrink_fops);
+ debug_file = debugfs_create_file(
+ debug_name, 0644, dev->heaps_debug_root, heap,
+ &debug_shrink_fops);
+ if (!debug_file) {
+ char buf[256], *path;
+ path = dentry_path(dev->heaps_debug_root, buf, 256);
+ pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
+ path, debug_name);
+ }
}
#endif
up_write(&dev->lock);
@@ -1494,8 +1563,21 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
}
idev->debug_root = debugfs_create_dir("ion", NULL);
- if (!idev->debug_root)
- pr_err("ion: failed to create debug files.\n");
+ if (!idev->debug_root) {
+ pr_err("ion: failed to create debugfs root directory.\n");
+ goto debugfs_done;
+ }
+ idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
+ if (!idev->heaps_debug_root) {
+ pr_err("ion: failed to create debugfs heaps directory.\n");
+ goto debugfs_done;
+ }
+ idev->clients_debug_root = debugfs_create_dir("clients",
+ idev->debug_root);
+ if (!idev->clients_debug_root)
+ pr_err("ion: failed to create debugfs clients directory.\n");
+
+debugfs_done:
idev->custom_ioctl = custom_ioctl;
idev->buffers = RB_ROOT;
@@ -1509,6 +1591,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
void ion_device_destroy(struct ion_device *dev)
{
misc_deregister(&dev->dev);
+ debugfs_remove_recursive(dev->debug_root);
/* XXX need to free the heaps and clients ? */
kfree(dev);
}
@@ -1527,8 +1610,7 @@ void __init ion_reserve(struct ion_platform_data *data)
data->heaps[i].align,
MEMBLOCK_ALLOC_ANYWHERE);
if (!paddr) {
- pr_err("%s: error allocating memblock for "
- "heap %d\n",
+ pr_err("%s: error allocating memblock for heap %d\n",
__func__, i);
continue;
}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index f0f98897e4b9..ce68ecfed31f 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -135,7 +135,7 @@ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
struct device *dev = cma_heap->dev;
struct ion_cma_buffer_info *info = buffer->priv_virt;
- dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+ dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
&info->handle);
*addr = info->handle;
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 01cdc8aee898..3a45e79fe444 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -25,13 +25,13 @@
#include "ion.h"
#include "ion_priv.h"
-struct ion_device *idev;
-struct ion_heap **heaps;
+static struct ion_device *idev;
+static struct ion_heap **heaps;
-void *carveout_ptr;
-void *chunk_ptr;
+static void *carveout_ptr;
+static void *chunk_ptr;
-struct ion_platform_heap dummy_heaps[] = {
+static struct ion_platform_heap dummy_heaps[] = {
{
.id = ION_HEAP_TYPE_SYSTEM,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -58,7 +58,7 @@ struct ion_platform_heap dummy_heaps[] = {
},
};
-struct ion_platform_data dummy_ion_pdata = {
+static struct ion_platform_data dummy_ion_pdata = {
.nr = ARRAY_SIZE(dummy_heaps),
.heaps = dummy_heaps,
};
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 37e64d51394c..bdc6a28ba8c9 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -178,7 +178,8 @@ size_t ion_heap_freelist_size(struct ion_heap *heap)
return size;
}
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
+ bool skip_pools)
{
struct ion_buffer *buffer;
size_t total_drained = 0;
@@ -197,6 +198,8 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
list);
list_del(&buffer->list);
heap->free_list_size -= buffer->size;
+ if (skip_pools)
+ buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
total_drained += buffer->size;
spin_unlock(&heap->free_lock);
ion_buffer_destroy(buffer);
@@ -207,6 +210,16 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
return total_drained;
}
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, false);
+}
+
+size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, true);
+}
+
static int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;
@@ -246,12 +259,62 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
if (IS_ERR(heap->task)) {
pr_err("%s: creating thread for deferred free failed\n",
__func__);
- return PTR_RET(heap->task);
+ return PTR_ERR_OR_ZERO(heap->task);
}
sched_setscheduler(heap->task, SCHED_IDLE, &param);
return 0;
}
+static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+ shrinker);
+ int total = 0;
+
+ total = ion_heap_freelist_size(heap) / PAGE_SIZE;
+ if (heap->ops->shrink)
+ total += heap->ops->shrink(heap, sc->gfp_mask, 0);
+ return total;
+}
+
+static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+ shrinker);
+ int freed = 0;
+ int to_scan = sc->nr_to_scan;
+
+ if (to_scan == 0)
+ return 0;
+
+ /*
+ * shrink the free list first, no point in zeroing the memory if we're
+ * just going to reclaim it. Also, skip any possible page pooling.
+ */
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
+ PAGE_SIZE;
+
+ to_scan -= freed;
+ if (to_scan <= 0)
+ return freed;
+
+ if (heap->ops->shrink)
+ freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
+ return freed;
+}
+
+void ion_heap_init_shrinker(struct ion_heap *heap)
+{
+ heap->shrinker.count_objects = ion_heap_shrink_count;
+ heap->shrinker.scan_objects = ion_heap_shrink_scan;
+ heap->shrinker.seeks = DEFAULT_SEEKS;
+ heap->shrinker.batch = 0;
+ register_shrinker(&heap->shrinker);
+}
+
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_heap *heap = NULL;
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index fa693c23681a..ecb5fc34ec5c 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -130,8 +130,7 @@ static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int nr_to_scan)
{
- int nr_freed = 0;
- int i;
+ int freed;
bool high;
high = !!(gfp_mask & __GFP_HIGHMEM);
@@ -139,7 +138,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
if (nr_to_scan == 0)
return ion_page_pool_total(pool, high);
- for (i = 0; i < nr_to_scan; i++) {
+ for (freed = 0; freed < nr_to_scan; freed++) {
struct page *page;
mutex_lock(&pool->mutex);
@@ -153,10 +152,9 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
}
mutex_unlock(&pool->mutex);
ion_page_pool_free_pages(pool, page);
- nr_freed += (1 << pool->order);
}
- return nr_freed;
+ return freed;
}
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index fc2e4fccf69d..1eba3f2076a9 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -38,6 +38,7 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
* @dev: back pointer to the ion_device
* @heap: back pointer to the heap the buffer came from
* @flags: buffer specific flags
+ * @private_flags: internal buffer specific flags
* @size: size of the buffer
* @priv_virt: private data to the buffer representable as
* a void *
@@ -66,6 +67,7 @@ struct ion_buffer {
struct ion_device *dev;
struct ion_heap *heap;
unsigned long flags;
+ unsigned long private_flags;
size_t size;
union {
void *priv_virt;
@@ -98,22 +100,27 @@ void ion_buffer_destroy(struct ion_buffer *buffer);
* @map_user map memory to userspace
*
* allocate, phys, and map_user return 0 on success, -errno on error.
- * map_dma and map_kernel return pointer on success, ERR_PTR on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
*/
struct ion_heap_ops {
- int (*allocate) (struct ion_heap *heap,
- struct ion_buffer *buffer, unsigned long len,
- unsigned long align, unsigned long flags);
- void (*free) (struct ion_buffer *buffer);
- int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len);
- struct sg_table *(*map_dma) (struct ion_heap *heap,
- struct ion_buffer *buffer);
- void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
- void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
- void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
- int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma);
+ int (*allocate)(struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long align, unsigned long flags);
+ void (*free)(struct ion_buffer *buffer);
+ int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len);
+ struct sg_table * (*map_dma)(struct ion_heap *heap,
+ struct ion_buffer *buffer);
+ void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
+ void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
+ void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
+ int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+ int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
};
/**
@@ -122,6 +129,17 @@ struct ion_heap_ops {
#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
/**
+ * private flags - flags internal to ion
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
+
+/**
* struct ion_heap - represents a heap in the system
* @node: rb node to put the heap on the device's tree of heaps
* @dev: back pointer to the ion_device
@@ -132,10 +150,7 @@ struct ion_heap_ops {
* allocating. These are specified by platform data and
* MUST be unique
* @name: used for debugging
- * @shrinker: a shrinker for the heap, if the heap caches system
- * memory, it must define a shrinker to return it on low
- * memory conditions, this includes system memory cached
- * in the deferred free lists for heaps that support it
+ * @shrinker: a shrinker for the heap
* @free_list: free list head if deferred free is used
* @free_list_size size of the deferred free list in bytes
* @lock: protects the free list
@@ -219,6 +234,16 @@ int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
/**
+ * ion_heap_init_shrinker
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+void ion_heap_init_shrinker(struct ion_heap *heap);
+
+/**
* ion_heap_init_deferred_free -- initialize deferred free functionality
* @heap: the heap
*
@@ -250,6 +275,29 @@ void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
/**
+ * ion_heap_freelist_shrink - drain the deferred free
+ * list, skipping any heap-specific
+ * pooling or caching mechanisms
+ *
+ * @heap: the heap
+ * @size: amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
+ * flag.
+ */
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+ size_t size);
+
+/**
* ion_heap_freelist_size - returns the size of the freelist in bytes
* @heap: the heap
*/
@@ -305,13 +353,8 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
* @low_count: number of lowmem items in the pool
* @high_items: list of highmem items
* @low_items: list of lowmem items
- * @shrinker: a shrinker for the items
* @mutex: lock protecting this struct and especially the count
* item list
- * @alloc: function to be used to allocate pageory when the pool
- * is empty
- * @free: function to be used to free pageory back to the system
- * when the shrinker fires
* @gfp_mask: gfp_mask to use from alloc
* @order: order of pages in the pool
* @list: plist node for list of pools
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 9849f3963e75..c92363356ae1 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -90,7 +90,7 @@ static void free_buffer_page(struct ion_system_heap *heap,
{
bool cached = ion_buffer_cached(buffer);
- if (!cached) {
+ if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
struct ion_page_pool *pool = heap->pools[order_to_index(order)];
ion_page_pool_free(pool, page);
} else {
@@ -209,7 +209,7 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
/* uncached pages come from the page pools, zero them before returning
for security purposes (other allocations are zerod at alloc time */
- if (!cached)
+ if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i)
@@ -231,75 +231,34 @@ static void ion_system_heap_unmap_dma(struct ion_heap *heap,
return;
}
-static struct ion_heap_ops system_heap_ops = {
- .allocate = ion_system_heap_allocate,
- .free = ion_system_heap_free,
- .map_dma = ion_system_heap_map_dma,
- .unmap_dma = ion_system_heap_unmap_dma,
- .map_kernel = ion_heap_map_kernel,
- .unmap_kernel = ion_heap_unmap_kernel,
- .map_user = ion_heap_map_user,
-};
-
-static unsigned long ion_system_heap_shrink_count(struct shrinker *shrinker,
- struct shrink_control *sc)
+static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+ int nr_to_scan)
{
- struct ion_heap *heap = container_of(shrinker, struct ion_heap,
- shrinker);
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
+ struct ion_system_heap *sys_heap;
int nr_total = 0;
int i;
- /* total number of items is whatever the page pools are holding
- plus whatever's in the freelist */
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->pools[i];
- nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
- }
- nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
- return nr_total;
-
-}
-
-static unsigned long ion_system_heap_shrink_scan(struct shrinker *shrinker,
- struct shrink_control *sc)
-{
-
- struct ion_heap *heap = container_of(shrinker, struct ion_heap,
- shrinker);
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- int nr_freed = 0;
- int i;
-
- if (sc->nr_to_scan == 0)
- goto end;
-
- /* shrink the free list first, no point in zeroing the memory if
- we're just going to reclaim it */
- nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) /
- PAGE_SIZE;
-
- if (nr_freed >= sc->nr_to_scan)
- goto end;
+ sys_heap = container_of(heap, struct ion_system_heap, heap);
for (i = 0; i < num_orders; i++) {
struct ion_page_pool *pool = sys_heap->pools[i];
-
- nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask,
- sc->nr_to_scan);
- if (nr_freed >= sc->nr_to_scan)
- break;
+ nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
}
-end:
- return nr_freed;
-
+ return nr_total;
}
+static struct ion_heap_ops system_heap_ops = {
+ .allocate = ion_system_heap_allocate,
+ .free = ion_system_heap_free,
+ .map_dma = ion_system_heap_map_dma,
+ .unmap_dma = ion_system_heap_unmap_dma,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+ .shrink = ion_system_heap_shrink,
+};
+
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
void *unused)
{
@@ -347,11 +306,6 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
heap->pools[i] = pool;
}
- heap->heap.shrinker.scan_objects = ion_system_heap_shrink_scan;
- heap->heap.shrinker.count_objects = ion_system_heap_shrink_count;
- heap->heap.shrinker.seeks = DEFAULT_SEEKS;
- heap->heap.shrinker.batch = 0;
- register_shrinker(&heap->heap.shrinker);
heap->heap.debug_show = ion_system_heap_debug_show;
return &heap->heap;
err_create_pool:
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
index 3474c65f87fa..11c7cceb3c7d 100644
--- a/drivers/staging/android/ion/tegra/tegra_ion.c
+++ b/drivers/staging/android/ion/tegra/tegra_ion.c
@@ -32,13 +32,13 @@ static int tegra_ion_probe(struct platform_device *pdev)
num_heaps = pdata->nr;
- heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+ heaps = devm_kzalloc(&pdev->dev,
+ sizeof(struct ion_heap *) * pdata->nr,
+ GFP_KERNEL);
idev = ion_device_create(NULL);
- if (IS_ERR_OR_NULL(idev)) {
- kfree(heaps);
+ if (IS_ERR_OR_NULL(idev))
return PTR_ERR(idev);
- }
/* create the heaps as specified in the board file */
for (i = 0; i < num_heaps; i++) {
@@ -58,7 +58,6 @@ err:
if (heaps[i])
ion_heap_destroy(heaps[i]);
}
- kfree(heaps);
return err;
}
@@ -70,7 +69,6 @@ static int tegra_ion_remove(struct platform_device *pdev)
ion_device_destroy(idev);
for (i = 0; i < num_heaps; i++)
ion_heap_destroy(heaps[i]);
- kfree(heaps);
return 0;
}
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 6f094b37f1f1..b545d3d1da3e 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -88,7 +88,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
int other_file = global_page_state(NR_FILE_PAGES) -
- global_page_state(NR_SHMEM);
+ global_page_state(NR_SHMEM) -
+ total_swapcache_pages();
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
@@ -159,8 +160,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
selected->pid, selected->comm,
selected_oom_score_adj, selected_tasksize);
lowmem_deathpending_timeout = jiffies + HZ;
- send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
+ send_sig(SIGKILL, selected, 0);
rem += selected_tasksize;
}
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index 5aaf71d6974b..1a50669ec8a9 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -18,10 +18,9 @@
#define _LINUX_SW_SYNC_H
#include <linux/types.h>
-
-#ifdef __KERNEL__
-
+#include <linux/kconfig.h>
#include "sync.h"
+#include "uapi/sw_sync.h"
struct sw_sync_timeline {
struct sync_timeline obj;
@@ -57,19 +56,4 @@ static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
}
#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
-#endif /* __KERNEL __ */
-
-struct sw_sync_create_fence_data {
- __u32 value;
- char name[32];
- __s32 fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC 'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
- struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
-
#endif /* _LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index 62e2255b1c1e..eaf57cccf626 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -14,14 +14,14 @@
#define _LINUX_SYNC_H
#include <linux/types.h>
-#ifdef __KERNEL__
-
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include "uapi/sync.h"
+
struct sync_timeline;
struct sync_pt;
struct sync_fence;
@@ -53,7 +53,7 @@ struct sync_timeline_ops {
const char *driver_name;
/* required */
- struct sync_pt *(*dup)(struct sync_pt *pt);
+ struct sync_pt * (*dup)(struct sync_pt *pt);
/* required */
int (*has_signaled)(struct sync_pt *pt);
@@ -341,86 +341,4 @@ int sync_fence_cancel_async(struct sync_fence *fence,
*/
int sync_fence_wait(struct sync_fence *fence, long timeout);
-#endif /* __KERNEL__ */
-
-/**
- * struct sync_merge_data - data passed to merge ioctl
- * @fd2: file descriptor of second fence
- * @name: name of new fence
- * @fence: returns the fd of the new fence to userspace
- */
-struct sync_merge_data {
- __s32 fd2; /* fd of second fence */
- char name[32]; /* name of new fence */
- __s32 fence; /* fd on newly created fence */
-};
-
-/**
- * struct sync_pt_info - detailed sync_pt information
- * @len: length of sync_pt_info including any driver_data
- * @obj_name: name of parent sync_timeline
- * @driver_name: name of driver implementing the parent
- * @status: status of the sync_pt 0:active 1:signaled <0:error
- * @timestamp_ns: timestamp of status change in nanoseconds
- * @driver_data: any driver dependent data
- */
-struct sync_pt_info {
- __u32 len;
- char obj_name[32];
- char driver_name[32];
- __s32 status;
- __u64 timestamp_ns;
-
- __u8 driver_data[0];
-};
-
-/**
- * struct sync_fence_info_data - data returned from fence info ioctl
- * @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_fence_data returned to userspace
- * including pt_info.
- * @name: name of fence
- * @status: status of fence. 1: signaled 0:active <0:error
- * @pt_info: a sync_pt_info struct for every sync_pt in the fence
- */
-struct sync_fence_info_data {
- __u32 len;
- char name[32];
- __s32 status;
-
- __u8 pt_info[0];
-};
-
-#define SYNC_IOC_MAGIC '>'
-
-/**
- * DOC: SYNC_IOC_WAIT - wait for a fence to signal
- *
- * pass timeout in milliseconds. Waits indefinitely timeout < 0.
- */
-#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
-
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data. Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
- * new fence's fd in sync_merge_data.fence
- */
-#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
- *
- * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len. On return, len is
- * updated to reflect the total size of the sync_fence_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
- */
-#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
- struct sync_fence_info_data)
-
#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index e81451425c01..0c7fdc83b336 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -90,8 +90,9 @@ static int timed_gpio_probe(struct platform_device *pdev)
if (!pdata)
return -EBUSY;
- gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios,
- GFP_KERNEL);
+ gpio_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct timed_gpio_data) * pdata->num_gpios,
+ GFP_KERNEL);
if (!gpio_data)
return -ENOMEM;
@@ -131,7 +132,6 @@ err_out:
timed_output_dev_unregister(&gpio_data[i].dev);
gpio_free(gpio_data[i].gpio);
}
- kfree(gpio_data);
return ret;
}
@@ -147,8 +147,6 @@ static int timed_gpio_remove(struct platform_device *pdev)
gpio_free(gpio_data[i].gpio);
}
- kfree(gpio_data);
-
return 0;
}
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
index 905c7cc9588e..13d2ca51cbe8 100644
--- a/drivers/staging/android/timed_output.h
+++ b/drivers/staging/android/timed_output.h
@@ -20,10 +20,10 @@ struct timed_output_dev {
const char *name;
/* enable the output and set the timer */
- void (*enable)(struct timed_output_dev *sdev, int timeout);
+ void (*enable)(struct timed_output_dev *sdev, int timeout);
/* returns the current number of milliseconds remaining on the timer */
- int (*get_time)(struct timed_output_dev *sdev);
+ int (*get_time)(struct timed_output_dev *sdev);
/* private data */
struct device *dev;
diff --git a/drivers/staging/android/uapi/android_alarm.h b/drivers/staging/android/uapi/android_alarm.h
new file mode 100644
index 000000000000..aa013f6f5f3a
--- /dev/null
+++ b/drivers/staging/android/uapi/android_alarm.h
@@ -0,0 +1,62 @@
+/* drivers/staging/android/uapi/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ANDROID_ALARM_H
+#define _UAPI_LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+ /* return code bit numbers or set alarm arg */
+ ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME,
+
+ ANDROID_ALARM_TYPE_COUNT,
+
+ /* return code bit numbers */
+ /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+enum android_alarm_return_flags {
+ ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+ ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT _IO('a', 1)
+
+#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
new file mode 100644
index 000000000000..ba4743c71d6b
--- /dev/null
+++ b/drivers/staging/android/uapi/ashmem.h
@@ -0,0 +1,47 @@
+/*
+ * drivers/staging/android/uapi/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed. It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _UAPI_LINUX_ASHMEM_H
+#define _UAPI_LINUX_ASHMEM_H
+
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN 256
+
+#define ASHMEM_NAME_DEF "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED 0
+#define ASHMEM_WAS_PURGED 1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED 0
+#define ASHMEM_IS_PINNED 1
+
+struct ashmem_pin {
+ __u32 offset; /* offset into region, in bytes, page-aligned */
+ __u32 len; /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC 0x77
+
+#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+
+#endif /* _UAPI_LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/uapi/binder.h b/drivers/staging/android/uapi/binder.h
new file mode 100644
index 000000000000..904adb7600cf
--- /dev/null
+++ b/drivers/staging/android/uapi/binder.h
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_BINDER_H
+#define _UAPI_LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+ ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+ BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+#ifdef BINDER_IPC_32BIT
+typedef __u32 binder_size_t;
+typedef __u32 binder_uintptr_t;
+#else
+typedef __u64 binder_size_t;
+typedef __u64 binder_uintptr_t;
+#endif
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes. The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur. The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+ /* 8 bytes for large_flat_header. */
+ __u32 type;
+ __u32 flags;
+
+ /* 8 bytes of data. */
+ union {
+ binder_uintptr_t binder; /* local object */
+ __u32 handle; /* remote object */
+ };
+
+ /* extra data associated with local object */
+ binder_uintptr_t cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses appropriately.
+ */
+
+struct binder_write_read {
+ binder_size_t write_size; /* bytes to write */
+ binder_size_t write_consumed; /* bytes consumed by driver */
+ binder_uintptr_t write_buffer;
+ binder_size_t read_size; /* bytes to read */
+ binder_size_t read_consumed; /* bytes consumed by driver */
+ binder_uintptr_t read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+ /* driver protocol version -- increment with incompatible change */
+ __s32 protocol_version;
+};
+
+/* This is the current protocol version. */
+#ifdef BINDER_IPC_32BIT
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+#else
+#define BINDER_CURRENT_PROTOCOL_VERSION 8
+#endif
+
+#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
+#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
+#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
+#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
+#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
+#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted. This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process. That is, the process is being destroyed.
+ * You should handle this by exiting from your process. Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+ TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
+ TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
+ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
+ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+ /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+ * identifying the target and contents of the transaction.
+ */
+ union {
+ /* target descriptor of command transaction */
+ __u32 handle;
+ /* target descriptor of return transaction */
+ binder_uintptr_t ptr;
+ } target;
+ binder_uintptr_t cookie; /* target object cookie */
+ __u32 code; /* transaction command */
+
+ /* General information about the transaction. */
+ __u32 flags;
+ pid_t sender_pid;
+ uid_t sender_euid;
+ binder_size_t data_size; /* number of bytes of data */
+ binder_size_t offsets_size; /* number of bytes of offsets */
+
+ /* If this transaction is inline, the data immediately
+ * follows here; otherwise, it ends with a pointer to
+ * the data buffer.
+ */
+ union {
+ struct {
+ /* transaction data */
+ binder_uintptr_t buffer;
+ /* offsets from buffer to flat_binder_object structs */
+ binder_uintptr_t offsets;
+ } ptr;
+ __u8 buf[8];
+ } data;
+};
+
+struct binder_ptr_cookie {
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+};
+
+struct binder_handle_cookie {
+ __u32 handle;
+ binder_uintptr_t cookie;
+} __attribute__((packed));
+
+struct binder_pri_desc {
+ __s32 priority;
+ __u32 desc;
+};
+
+struct binder_pri_ptr_cookie {
+ __s32 priority;
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+};
+
+enum binder_driver_return_protocol {
+ BR_ERROR = _IOR('r', 0, __s32),
+ /*
+ * int: error code
+ */
+
+ BR_OK = _IO('r', 1),
+ /* No parameters! */
+
+ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+ BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the received command.
+ */
+
+ BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
+ /*
+ * not currently supported
+ * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+ * Else the remote object has acquired a primary reference.
+ */
+
+ BR_DEAD_REPLY = _IO('r', 5),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
+ */
+
+ BR_TRANSACTION_COMPLETE = _IO('r', 6),
+ /*
+ * No parameters... always refers to the last transaction requested
+ * (including replies). Note that this will be sent even for
+ * asynchronous transactions.
+ */
+
+ BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+ BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+ BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+ BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+ /*
+ * not currently supported
+ * int: priority
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_NOOP = _IO('r', 12),
+ /*
+ * No parameters. Do nothing and examine the next command. It exists
+ * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+ */
+
+ BR_SPAWN_LOOPER = _IO('r', 13),
+ /*
+ * No parameters. The driver has determined that a process has no
+ * threads waiting to service incoming transactions. When a process
+ * receives this command, it must spawn a new service thread and
+ * register it via bcENTER_LOOPER.
+ */
+
+ BR_FINISHED = _IO('r', 14),
+ /*
+ * not currently supported
+ * stop threadpool thread
+ */
+
+ BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
+ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
+
+ BR_FAILED_REPLY = _IO('r', 17),
+ /*
+ * The the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
+ */
+};
+
+enum binder_driver_command_protocol {
+ BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+ BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the sent command.
+ */
+
+ BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
+ /*
+ * not currently supported
+ * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+ * Else you have acquired a primary reference on the object.
+ */
+
+ BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
+ /*
+ * void *: ptr to transaction data received on a read
+ */
+
+ BC_INCREFS = _IOW('c', 4, __u32),
+ BC_ACQUIRE = _IOW('c', 5, __u32),
+ BC_RELEASE = _IOW('c', 6, __u32),
+ BC_DECREFS = _IOW('c', 7, __u32),
+ /*
+ * int: descriptor
+ */
+
+ BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+ BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+ /*
+ * not currently supported
+ * int: priority
+ * int: descriptor
+ */
+
+ BC_REGISTER_LOOPER = _IO('c', 11),
+ /*
+ * No parameters.
+ * Register a spawned looper thread with the device.
+ */
+
+ BC_ENTER_LOOPER = _IO('c', 12),
+ BC_EXIT_LOOPER = _IO('c', 13),
+ /*
+ * No parameters.
+ * These two commands are sent as an application-level thread
+ * enters and exits the binder loop, respectively. They are
+ * used so the binder can have an accurate count of the number
+ * of looping threads it has available.
+ */
+
+ BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14,
+ struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15,
+ struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
+};
+
+#endif /* _UAPI_LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h
new file mode 100644
index 000000000000..9b5d4869505c
--- /dev/null
+++ b/drivers/staging/android/uapi/sw_sync.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SW_SYNC_H
+#define _UAPI_LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC 'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+ struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+#endif /* _UAPI_LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
new file mode 100644
index 000000000000..e964c751f6b8
--- /dev/null
+++ b/drivers/staging/android/uapi/sync.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SYNC_H
+#define _UAPI_LINUX_SYNC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2: file descriptor of second fence
+ * @name: name of new fence
+ * @fence: returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+ __s32 fd2; /* fd of second fence */
+ char name[32]; /* name of new fence */
+ __s32 fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len: length of sync_pt_info including any driver_data
+ * @obj_name: name of parent sync_timeline
+ * @driver_name: name of driver implementing the parent
+ * @status: status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns: timestamp of status change in nanoseconds
+ * @driver_data: any driver dependent data
+ */
+struct sync_pt_info {
+ __u32 len;
+ char obj_name[32];
+ char driver_name[32];
+ __s32 status;
+ __u64 timestamp_ns;
+
+ __u8 driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len: ioctl caller writes the size of the buffer its passing in.
+ * ioctl returns length of sync_fence_data returned to userspace
+ * including pt_info.
+ * @name: name of fence
+ * @status: status of fence. 1: signaled 0:active <0:error
+ * @pt_info: a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+ __u32 len;
+ char name[32];
+ __s32 status;
+
+ __u8 pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC '>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds. Waits indefinitely timeout < 0.
+ */
+#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data. Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len. On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
+ struct sync_fence_info_data)
+
+#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/drivers/staging/bcm/Adapter.h b/drivers/staging/bcm/Adapter.h
index f0d6f0c38207..1b2d9f3bd55a 100644
--- a/drivers/staging/bcm/Adapter.h
+++ b/drivers/staging/bcm/Adapter.h
@@ -37,8 +37,10 @@ struct bcm_link_request {
union u_ip_address {
struct {
- ULONG ulIpv4Addr[MAX_IP_RANGE_LENGTH]; /* Source Ip Address Range */
- ULONG ulIpv4Mask[MAX_IP_RANGE_LENGTH]; /* Source Ip Mask Address Range */
+ /* Source Ip Address Range */
+ ULONG ulIpv4Addr[MAX_IP_RANGE_LENGTH];
+ /* Source Ip Mask Address Range */
+ ULONG ulIpv4Mask[MAX_IP_RANGE_LENGTH];
};
struct {
ULONG ulIpv6Addr[MAX_IP_RANGE_LENGTH * 4]; /* Source Ip Address Range */
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index f1b6de0293c8..ae7490b4d70e 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -150,1903 +150,2392 @@ static ssize_t bcm_char_read(struct file *filp, char __user *buf, size_t size,
return PktLen;
}
-static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
+static int bcm_char_ioctl_reg_read_private(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
{
- struct bcm_tarang_data *pTarang = filp->private_data;
- void __user *argp = (void __user *)arg;
- struct bcm_mini_adapter *Adapter = pTarang->Adapter;
- INT Status = STATUS_FAILURE;
- int timeout = 0;
+ struct bcm_rdm_buffer sRdmBuffer = {0};
struct bcm_ioctl_buffer IoBuffer;
+ PCHAR temp_buff;
+ INT Status = STATUS_FAILURE;
+ UINT Bufflen;
+ u16 temp_value;
int bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX",
- cmd, arg);
-
- if (_IOC_TYPE(cmd) != BCM_IOCTL)
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
- if (_IOC_DIR(cmd) & _IOC_READ)
- Status = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
- else if (_IOC_DIR(cmd) & _IOC_WRITE)
- Status = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
- else if (_IOC_NONE == (_IOC_DIR(cmd) & _IOC_NONE))
- Status = STATUS_SUCCESS;
- if (Status)
- return -EFAULT;
+ if (IoBuffer.InputLength > sizeof(sRdmBuffer))
+ return -EINVAL;
- if (Adapter->device_removed)
+ if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
return -EFAULT;
- if (false == Adapter->fw_download_done) {
- switch (cmd) {
- case IOCTL_MAC_ADDR_REQ:
- case IOCTL_LINK_REQ:
- case IOCTL_CM_REQUEST:
- case IOCTL_SS_INFO_REQ:
- case IOCTL_SEND_CONTROL_MESSAGE:
- case IOCTL_IDLE_REQ:
- case IOCTL_BCM_GPIO_SET_REQUEST:
- case IOCTL_BCM_GPIO_STATUS_REQUEST:
- return -EACCES;
- default:
- break;
- }
+ if (IoBuffer.OutputLength > USHRT_MAX ||
+ IoBuffer.OutputLength == 0) {
+ return -EINVAL;
}
- Status = vendorextnIoctl(Adapter, cmd, arg);
- if (Status != CONTINUE_COMMON_PATH)
- return Status;
+ Bufflen = IoBuffer.OutputLength;
+ temp_value = 4 - (Bufflen % 4);
+ Bufflen += temp_value % 4;
- switch (cmd) {
- /* Rdms for Swin Idle... */
- case IOCTL_BCM_REGISTER_READ_PRIVATE: {
- struct bcm_rdm_buffer sRdmBuffer = {0};
- PCHAR temp_buff;
- UINT Bufflen;
- u16 temp_value;
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (IoBuffer.InputLength > sizeof(sRdmBuffer))
- return -EINVAL;
+ temp_buff = kmalloc(Bufflen, GFP_KERNEL);
+ if (!temp_buff)
+ return -ENOMEM;
- if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ bytes = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
+ (PUINT)temp_buff, Bufflen);
+ if (bytes > 0) {
+ Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
+ kfree(temp_buff);
return -EFAULT;
-
- if (IoBuffer.OutputLength > USHRT_MAX ||
- IoBuffer.OutputLength == 0) {
- return -EINVAL;
}
+ } else {
+ Status = bytes;
+ }
- Bufflen = IoBuffer.OutputLength;
- temp_value = 4 - (Bufflen % 4);
- Bufflen += temp_value % 4;
+ kfree(temp_buff);
+ return Status;
+}
- temp_buff = kmalloc(Bufflen, GFP_KERNEL);
- if (!temp_buff)
- return -ENOMEM;
+static int bcm_char_ioctl_reg_write_private(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_wrm_buffer sWrmBuffer = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ UINT uiTempVar = 0;
+ INT Status;
- bytes = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
- (PUINT)temp_buff, Bufflen);
- if (bytes > 0) {
- Status = STATUS_SUCCESS;
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
- kfree(temp_buff);
- return -EFAULT;
- }
- } else {
- Status = bytes;
- }
+ /* Copy Ioctl Buffer structure */
- kfree(temp_buff);
- break;
- }
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- case IOCTL_BCM_REGISTER_WRITE_PRIVATE: {
- struct bcm_wrm_buffer sWrmBuffer = {0};
- UINT uiTempVar = 0;
- /* Copy Ioctl Buffer structure */
+ if (IoBuffer.InputLength > sizeof(sWrmBuffer))
+ return -EINVAL;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ /* Get WrmBuffer structure */
+ if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
+ return -EFAULT;
- if (IoBuffer.InputLength > sizeof(sWrmBuffer))
- return -EINVAL;
+ uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
+ if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
+ ((uiTempVar == EEPROM_REJECT_REG_1) ||
+ (uiTempVar == EEPROM_REJECT_REG_2) ||
+ (uiTempVar == EEPROM_REJECT_REG_3) ||
+ (uiTempVar == EEPROM_REJECT_REG_4))) {
- /* Get WrmBuffer structure */
- if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "EEPROM Access Denied, not in VSG Mode\n");
+ return -EFAULT;
+ }
- uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
- if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
- ((uiTempVar == EEPROM_REJECT_REG_1) ||
- (uiTempVar == EEPROM_REJECT_REG_2) ||
- (uiTempVar == EEPROM_REJECT_REG_3) ||
- (uiTempVar == EEPROM_REJECT_REG_4))) {
+ Status = wrmalt(Adapter, (UINT)sWrmBuffer.Register,
+ (PUINT)sWrmBuffer.Data, sizeof(ULONG));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "EEPROM Access Denied, not in VSG Mode\n");
- return -EFAULT;
- }
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Done\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Failed\n");
+ Status = -EFAULT;
+ }
+ return Status;
+}
- Status = wrmalt(Adapter, (UINT)sWrmBuffer.Register,
- (PUINT)sWrmBuffer.Data, sizeof(ULONG));
+static int bcm_char_ioctl_eeprom_reg_read(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_rdm_buffer sRdmBuffer = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ PCHAR temp_buff = NULL;
+ UINT uiTempVar = 0;
+ INT Status;
+ int bytes;
- if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "WRM Done\n");
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "WRM Failed\n");
- Status = -EFAULT;
- }
- break;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Device in Idle Mode, Blocking Rdms\n");
+ return -EACCES;
}
- case IOCTL_BCM_REGISTER_READ:
- case IOCTL_BCM_EEPROM_REGISTER_READ: {
- struct bcm_rdm_buffer sRdmBuffer = {0};
- PCHAR temp_buff = NULL;
- UINT uiTempVar = 0;
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Device in Idle Mode, Blocking Rdms\n");
- return -EACCES;
- }
+ if (IoBuffer.InputLength > sizeof(sRdmBuffer))
+ return -EINVAL;
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
+ return -EFAULT;
- if (IoBuffer.InputLength > sizeof(sRdmBuffer))
- return -EINVAL;
+ if (IoBuffer.OutputLength > USHRT_MAX ||
+ IoBuffer.OutputLength == 0) {
+ return -EINVAL;
+ }
- if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
+ if (!temp_buff)
+ return STATUS_FAILURE;
- if (IoBuffer.OutputLength > USHRT_MAX ||
- IoBuffer.OutputLength == 0) {
- return -EINVAL;
- }
+ if ((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
+ ((ULONG)sRdmBuffer.Register & 0x3)) {
- temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
- if (!temp_buff)
- return STATUS_FAILURE;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "RDM Done On invalid Address : %x Access Denied.\n",
+ (int)sRdmBuffer.Register);
- if ((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
- ((ULONG)sRdmBuffer.Register & 0x3)) {
+ kfree(temp_buff);
+ return -EINVAL;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "RDM Done On invalid Address : %x Access Denied.\n",
- (int)sRdmBuffer.Register);
+ uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
+ bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register,
+ (PUINT)temp_buff, IoBuffer.OutputLength);
+ if (bytes > 0) {
+ Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
kfree(temp_buff);
- return -EINVAL;
+ return -EFAULT;
}
+ } else {
+ Status = bytes;
+ }
- uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
- bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register,
- (PUINT)temp_buff, IoBuffer.OutputLength);
+ kfree(temp_buff);
+ return Status;
+}
- if (bytes > 0) {
- Status = STATUS_SUCCESS;
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
- kfree(temp_buff);
- return -EFAULT;
- }
- } else {
- Status = bytes;
- }
+static int bcm_char_ioctl_eeprom_reg_write(void __user *argp,
+ struct bcm_mini_adapter *Adapter, UINT cmd)
+{
+ struct bcm_wrm_buffer sWrmBuffer = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ UINT uiTempVar = 0;
+ INT Status;
- kfree(temp_buff);
- break;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Device in Idle Mode, Blocking Wrms\n");
+ return -EACCES;
}
- case IOCTL_BCM_REGISTER_WRITE:
- case IOCTL_BCM_EEPROM_REGISTER_WRITE: {
- struct bcm_wrm_buffer sWrmBuffer = {0};
- UINT uiTempVar = 0;
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Device in Idle Mode, Blocking Wrms\n");
- return -EACCES;
- }
+ if (IoBuffer.InputLength > sizeof(sWrmBuffer))
+ return -EINVAL;
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ /* Get WrmBuffer structure */
+ if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
+ return -EFAULT;
- if (IoBuffer.InputLength > sizeof(sWrmBuffer))
- return -EINVAL;
+ if ((((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
+ ((ULONG)sWrmBuffer.Register & 0x3)) {
- /* Get WrmBuffer structure */
- if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM Done On invalid Address : %x Access Denied.\n",
+ (int)sWrmBuffer.Register);
+ return -EINVAL;
+ }
- if ((((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
- ((ULONG)sWrmBuffer.Register & 0x3)) {
+ uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
+ if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
+ ((uiTempVar == EEPROM_REJECT_REG_1) ||
+ (uiTempVar == EEPROM_REJECT_REG_2) ||
+ (uiTempVar == EEPROM_REJECT_REG_3) ||
+ (uiTempVar == EEPROM_REJECT_REG_4)) &&
+ (cmd == IOCTL_BCM_REGISTER_WRITE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "WRM Done On invalid Address : %x Access Denied.\n",
- (int)sWrmBuffer.Register);
- return -EINVAL;
- }
-
- uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
- if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
- ((uiTempVar == EEPROM_REJECT_REG_1) ||
- (uiTempVar == EEPROM_REJECT_REG_2) ||
- (uiTempVar == EEPROM_REJECT_REG_3) ||
- (uiTempVar == EEPROM_REJECT_REG_4)) &&
- (cmd == IOCTL_BCM_REGISTER_WRITE)) {
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "EEPROM Access Denied, not in VSG Mode\n");
- return -EFAULT;
- }
+ "EEPROM Access Denied, not in VSG Mode\n");
+ return -EFAULT;
+ }
- Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register,
- (PUINT)sWrmBuffer.Data,
- sWrmBuffer.Length);
+ Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register,
+ (PUINT)sWrmBuffer.Data,
+ sWrmBuffer.Length);
- if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG,
- DBG_LVL_ALL, "WRM Done\n");
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "WRM Failed\n");
- Status = -EFAULT;
- }
- break;
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Done\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Failed\n");
+ Status = -EFAULT;
}
- case IOCTL_BCM_GPIO_SET_REQUEST: {
- UCHAR ucResetValue[4];
- UINT value = 0;
- UINT uiBit = 0;
- UINT uiOperation = 0;
- struct bcm_gpio_info gpio_info = {0};
-
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ return Status;
+}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "GPIO Can't be set/clear in Low power Mode");
- return -EACCES;
- }
+static int bcm_char_ioctl_gpio_set_request(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_gpio_info gpio_info = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ UCHAR ucResetValue[4];
+ UINT value = 0;
+ UINT uiBit = 0;
+ UINT uiOperation = 0;
+ INT Status;
+ int bytes;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- if (IoBuffer.InputLength > sizeof(gpio_info))
- return -EINVAL;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "GPIO Can't be set/clear in Low power Mode");
+ return -EACCES;
+ }
- if (copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- uiBit = gpio_info.uiGpioNumber;
- uiOperation = gpio_info.uiGpioValue;
- value = (1<<uiBit);
+ if (IoBuffer.InputLength > sizeof(gpio_info))
+ return -EINVAL;
- if (IsReqGpioIsLedInNVM(Adapter, value) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!",
- value);
- Status = -EINVAL;
- break;
- }
+ if (copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
- /* Set - setting 1 */
- if (uiOperation) {
- /* Set the gpio output register */
- Status = wrmaltWithLock(Adapter,
- BCM_GPIO_OUTPUT_SET_REG,
- (PUINT)(&value), sizeof(UINT));
+ uiBit = gpio_info.uiGpioNumber;
+ uiOperation = gpio_info.uiGpioValue;
+ value = (1<<uiBit);
- if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Set the GPIO bit\n");
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Failed to set the %dth GPIO\n",
- uiBit);
- break;
- }
- } else {
- /* Set the gpio output register */
- Status = wrmaltWithLock(Adapter,
- BCM_GPIO_OUTPUT_CLR_REG,
- (PUINT)(&value), sizeof(UINT));
+ if (IsReqGpioIsLedInNVM(Adapter, value) == false) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!",
+ value);
+ return -EINVAL;
+ }
- if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Set the GPIO bit\n");
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Failed to clear the %dth GPIO\n",
- uiBit);
- break;
- }
- }
+ /* Set - setting 1 */
+ if (uiOperation) {
+ /* Set the gpio output register */
+ Status = wrmaltWithLock(Adapter,
+ BCM_GPIO_OUTPUT_SET_REG,
+ (PUINT)(&value), sizeof(UINT));
- bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER,
- (PUINT)ucResetValue, sizeof(UINT));
- if (bytes < 0) {
- Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "GPIO_MODE_REGISTER read failed");
- break;
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Set the GPIO bit\n");
} else {
- Status = STATUS_SUCCESS;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Failed to set the %dth GPIO\n",
+ uiBit);
+ return Status;
}
-
- /* Set the gpio mode register to output */
- *(UINT *)ucResetValue |= (1<<uiBit);
- Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER,
- (PUINT)ucResetValue, sizeof(UINT));
+ } else {
+ /* Set the gpio output register */
+ Status = wrmaltWithLock(Adapter,
+ BCM_GPIO_OUTPUT_CLR_REG,
+ (PUINT)(&value), sizeof(UINT));
if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Set the GPIO to output Mode\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Set the GPIO bit\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Failed to put GPIO in Output Mode\n");
- break;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Failed to clear the %dth GPIO\n",
+ uiBit);
+ return Status;
}
}
- break;
- case BCM_LED_THREAD_STATE_CHANGE_REQ: {
- struct bcm_user_thread_req threadReq = {0};
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER,
+ (PUINT)ucResetValue, sizeof(UINT));
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "User made LED thread InActive");
+ "GPIO_MODE_REGISTER read failed");
+ return Status;
+ } else {
+ Status = STATUS_SUCCESS;
+ }
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ /* Set the gpio mode register to output */
+ *(UINT *)ucResetValue |= (1<<uiBit);
+ Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER,
+ (PUINT)ucResetValue, sizeof(UINT));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "GPIO Can't be set/clear in Low power Mode");
- Status = -EACCES;
- break;
- }
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Set the GPIO to output Mode\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Failed to put GPIO in Output Mode\n");
+ }
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ return Status;
+}
- if (IoBuffer.InputLength > sizeof(threadReq))
- return -EINVAL;
+static int bcm_char_ioctl_led_thread_state_change_req(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_user_thread_req threadReq = {0};
+ struct bcm_ioctl_buffer IoBuffer;
- if (copy_from_user(&threadReq, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "User made LED thread InActive");
- /* if LED thread is running(Actively or Inactively) set it state to make inactive */
- if (Adapter->LEDInfo.led_thread_running) {
- if (threadReq.ThreadState == LED_THREAD_ACTIVATION_REQ) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Activating thread req");
- Adapter->DriverState = LED_THREAD_ACTIVE;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "DeActivating Thread req.....");
- Adapter->DriverState = LED_THREAD_INACTIVE;
- }
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- /* signal thread. */
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "GPIO Can't be set/clear in Low power Mode");
+ return -EACCES;
}
- break;
-
- case IOCTL_BCM_GPIO_STATUS_REQUEST: {
- ULONG uiBit = 0;
- UCHAR ucRead[4];
- struct bcm_gpio_info gpio_info = {0};
-
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE))
- return -EACCES;
-
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
- if (IoBuffer.InputLength > sizeof(gpio_info))
- return -EINVAL;
-
- if (copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- uiBit = gpio_info.uiGpioNumber;
+ if (IoBuffer.InputLength > sizeof(threadReq))
+ return -EINVAL;
- /* Set the gpio output register */
- bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
- (PUINT)ucRead, sizeof(UINT));
+ if (copy_from_user(&threadReq, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
- if (bytes < 0) {
- Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "RDM Failed\n");
- return Status;
+ /* if LED thread is running(Actively or Inactively)
+ * set it state to make inactive
+ */
+ if (Adapter->LEDInfo.led_thread_running) {
+ if (threadReq.ThreadState == LED_THREAD_ACTIVATION_REQ) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Activating thread req");
+ Adapter->DriverState = LED_THREAD_ACTIVE;
} else {
- Status = STATUS_SUCCESS;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "DeActivating Thread req.....");
+ Adapter->DriverState = LED_THREAD_INACTIVE;
}
+
+ /* signal thread. */
+ wake_up(&Adapter->LEDInfo.notify_led_event);
}
- break;
+ return STATUS_SUCCESS;
+}
- case IOCTL_BCM_GPIO_MULTI_REQUEST: {
- UCHAR ucResetValue[4];
- struct bcm_gpio_multi_info gpio_multi_info[MAX_IDX];
- struct bcm_gpio_multi_info *pgpio_multi_info = (struct bcm_gpio_multi_info *)gpio_multi_info;
+static int bcm_char_ioctl_gpio_status_request(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_gpio_info gpio_info = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ ULONG uiBit = 0;
+ UCHAR ucRead[4];
+ INT Status;
+ int bytes;
- memset(pgpio_multi_info, 0, MAX_IDX * sizeof(struct bcm_gpio_multi_info));
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE))
+ return -EACCES;
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE))
- return -EINVAL;
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ if (IoBuffer.InputLength > sizeof(gpio_info))
+ return -EINVAL;
- if (IoBuffer.InputLength > sizeof(gpio_multi_info))
- return -EINVAL;
+ if (copy_from_user(&gpio_info, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
+ return -EFAULT;
- if (copy_from_user(&gpio_multi_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ uiBit = gpio_info.uiGpioNumber;
- if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_info[WIMAX_IDX].uiGPIOMask) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
- pgpio_multi_info[WIMAX_IDX].uiGPIOMask,
- Adapter->gpioBitMap);
- Status = -EINVAL;
- break;
- }
+ /* Set the gpio output register */
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
+ (PUINT)ucRead, sizeof(UINT));
- /* Set the gpio output register */
- if ((pgpio_multi_info[WIMAX_IDX].uiGPIOMask) &
- (pgpio_multi_info[WIMAX_IDX].uiGPIOCommand)) {
- /* Set 1's in GPIO OUTPUT REGISTER */
- *(UINT *)ucResetValue = pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
- pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
- pgpio_multi_info[WIMAX_IDX].uiGPIOValue;
+ if (bytes < 0) {
+ Status = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "RDM Failed\n");
+ return Status;
+ } else {
+ Status = STATUS_SUCCESS;
+ }
+ return Status;
+}
+
+static int bcm_char_ioctl_gpio_multi_request(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_gpio_multi_info gpio_multi_info[MAX_IDX];
+ struct bcm_gpio_multi_info *pgpio_multi_info =
+ (struct bcm_gpio_multi_info *)gpio_multi_info;
+ struct bcm_ioctl_buffer IoBuffer;
+ UCHAR ucResetValue[4];
+ INT Status = STATUS_FAILURE;
+ int bytes;
- if (*(UINT *) ucResetValue)
- Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG,
- (PUINT)ucResetValue, sizeof(ULONG));
+ memset(pgpio_multi_info, 0, MAX_IDX * sizeof(struct bcm_gpio_multi_info));
- if (Status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
- return Status;
- }
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE))
+ return -EINVAL;
- /* Clear to 0's in GPIO OUTPUT REGISTER */
- *(UINT *)ucResetValue = (pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
- pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
- (~(pgpio_multi_info[WIMAX_IDX].uiGPIOValue)));
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (*(UINT *) ucResetValue)
- Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)ucResetValue, sizeof(ULONG));
+ if (IoBuffer.InputLength > sizeof(gpio_multi_info))
+ return -EINVAL;
+ if (IoBuffer.OutputLength > sizeof(gpio_multi_info))
+ IoBuffer.OutputLength = sizeof(gpio_multi_info);
- if (Status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed.");
- return Status;
- }
- }
+ if (copy_from_user(&gpio_multi_info, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
+ return -EFAULT;
- if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) {
- bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ if (IsReqGpioIsLedInNVM(Adapter,
+ pgpio_multi_info[WIMAX_IDX].uiGPIOMask) == false) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
+ pgpio_multi_info[WIMAX_IDX].uiGPIOMask,
+ Adapter->gpioBitMap);
+ return -EINVAL;
+ }
- if (bytes < 0) {
- Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "RDM to GPIO_PIN_STATE_REGISTER Failed.");
- return Status;
- } else {
- Status = STATUS_SUCCESS;
- }
+ /* Set the gpio output register */
+ if ((pgpio_multi_info[WIMAX_IDX].uiGPIOMask) &
+ (pgpio_multi_info[WIMAX_IDX].uiGPIOCommand)) {
+ /* Set 1's in GPIO OUTPUT REGISTER */
+ *(UINT *)ucResetValue = pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
+ pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
+ pgpio_multi_info[WIMAX_IDX].uiGPIOValue;
- pgpio_multi_info[WIMAX_IDX].uiGPIOValue = (*(UINT *)ucResetValue &
- pgpio_multi_info[WIMAX_IDX].uiGPIOMask);
- }
+ if (*(UINT *) ucResetValue)
+ Status = wrmaltWithLock(Adapter,
+ BCM_GPIO_OUTPUT_SET_REG,
+ (PUINT)ucResetValue, sizeof(ULONG));
- Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_info, IoBuffer.OutputLength);
- if (Status) {
+ if (Status != STATUS_SUCCESS) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Failed while copying Content to IOBufer for user space err:%d", Status);
- return -EFAULT;
+ "WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
+ return Status;
}
- }
- break;
-
- case IOCTL_BCM_GPIO_MODE_REQUEST: {
- UCHAR ucResetValue[4];
- struct bcm_gpio_multi_mode gpio_multi_mode[MAX_IDX];
- struct bcm_gpio_multi_mode *pgpio_multi_mode = (struct bcm_gpio_multi_mode *)gpio_multi_mode;
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE))
- return -EINVAL;
-
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ /* Clear to 0's in GPIO OUTPUT REGISTER */
+ *(UINT *)ucResetValue = (pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
+ pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
+ (~(pgpio_multi_info[WIMAX_IDX].uiGPIOValue)));
- if (IoBuffer.InputLength > sizeof(gpio_multi_mode))
- return -EINVAL;
+ if (*(UINT *) ucResetValue)
+ Status = wrmaltWithLock(Adapter,
+ BCM_GPIO_OUTPUT_CLR_REG, (PUINT)ucResetValue,
+ sizeof(ULONG));
- if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ if (Status != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed.");
+ return Status;
+ }
+ }
- bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) {
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
+ (PUINT)ucResetValue, sizeof(UINT));
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read of GPIO_MODE_REGISTER failed");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "RDM to GPIO_PIN_STATE_REGISTER Failed.");
return Status;
} else {
Status = STATUS_SUCCESS;
}
- /* Validating the request */
- if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap);
- Status = -EINVAL;
- break;
- }
+ pgpio_multi_info[WIMAX_IDX].uiGPIOValue = (*(UINT *)ucResetValue &
+ pgpio_multi_info[WIMAX_IDX].uiGPIOMask);
+ }
- if (pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) {
- /* write all OUT's (1's) */
- *(UINT *) ucResetValue |= (pgpio_multi_mode[WIMAX_IDX].uiGPIOMode &
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
+ Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_info,
+ IoBuffer.OutputLength);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Failed while copying Content to IOBufer for user space err:%d",
+ Status);
+ return -EFAULT;
+ }
+ return Status;
+}
- /* write all IN's (0's) */
- *(UINT *) ucResetValue &= ~((~pgpio_multi_mode[WIMAX_IDX].uiGPIOMode) &
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
+static int bcm_char_ioctl_gpio_mode_request(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_gpio_multi_mode gpio_multi_mode[MAX_IDX];
+ struct bcm_gpio_multi_mode *pgpio_multi_mode =
+ (struct bcm_gpio_multi_mode *)gpio_multi_mode;
+ struct bcm_ioctl_buffer IoBuffer;
+ UCHAR ucResetValue[4];
+ INT Status;
+ int bytes;
- /* Currently implemented return the modes of all GPIO's
- * else needs to bit AND with mask
- */
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE))
+ return -EINVAL;
- Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(ULONG));
- if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "WRM to GPIO_MODE_REGISTER Done");
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "WRM to GPIO_MODE_REGISTER Failed");
- Status = -EFAULT;
- break;
- }
- } else {
-/* if uiGPIOMask is 0 then return mode register configuration */
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue;
- }
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_mode, IoBuffer.OutputLength);
- if (Status) {
+ if (IoBuffer.InputLength > sizeof(gpio_multi_mode))
+ return -EINVAL;
+ if (IoBuffer.OutputLength > sizeof(gpio_multi_mode))
+ IoBuffer.OutputLength = sizeof(gpio_multi_mode);
+
+ if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
+ return -EFAULT;
+
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER,
+ (PUINT)ucResetValue, sizeof(UINT));
+
+ if (bytes < 0) {
+ Status = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Read of GPIO_MODE_REGISTER failed");
+ return Status;
+ } else {
+ Status = STATUS_SUCCESS;
+ }
+
+ /* Validating the request */
+ if (IsReqGpioIsLedInNVM(Adapter,
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) == false) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMask,
+ Adapter->gpioBitMap);
+ return -EINVAL;
+ }
+
+ if (pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) {
+ /* write all OUT's (1's) */
+ *(UINT *) ucResetValue |= (pgpio_multi_mode[WIMAX_IDX].uiGPIOMode &
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
+
+ /* write all IN's (0's) */
+ *(UINT *) ucResetValue &= ~((~pgpio_multi_mode[WIMAX_IDX].uiGPIOMode) &
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
+
+ /* Currently implemented return the modes of all GPIO's
+ * else needs to bit AND with mask
+ */
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue;
+
+ Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER,
+ (PUINT)ucResetValue, sizeof(ULONG));
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "WRM to GPIO_MODE_REGISTER Done");
+ } else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Failed while copying Content to IOBufer for user space err:%d", Status);
+ "WRM to GPIO_MODE_REGISTER Failed");
return -EFAULT;
}
+ } else {
+ /* if uiGPIOMask is 0 then return mode register configuration */
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue;
}
- break;
- case IOCTL_MAC_ADDR_REQ:
- case IOCTL_LINK_REQ:
- case IOCTL_CM_REQUEST:
- case IOCTL_SS_INFO_REQ:
- case IOCTL_SEND_CONTROL_MESSAGE:
- case IOCTL_IDLE_REQ: {
- PVOID pvBuffer = NULL;
+ Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_mode,
+ IoBuffer.OutputLength);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Failed while copying Content to IOBufer for user space err:%d",
+ Status);
+ return -EFAULT;
+ }
+ return Status;
+}
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+static int bcm_char_ioctl_misc_request(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ PVOID pvBuffer = NULL;
+ INT Status;
- if (IoBuffer.InputLength < sizeof(struct bcm_link_request))
- return -EINVAL;
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (IoBuffer.InputLength > MAX_CNTL_PKT_SIZE)
- return -EINVAL;
+ if (IoBuffer.InputLength < sizeof(struct bcm_link_request))
+ return -EINVAL;
- pvBuffer = memdup_user(IoBuffer.InputBuffer,
- IoBuffer.InputLength);
- if (IS_ERR(pvBuffer))
- return PTR_ERR(pvBuffer);
-
- down(&Adapter->LowPowerModeSync);
- Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
- !Adapter->bPreparingForLowPowerMode,
- (1 * HZ));
- if (Status == -ERESTARTSYS)
- goto cntrlEnd;
-
- if (Adapter->bPreparingForLowPowerMode) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Preparing Idle Mode is still True - Hence Rejecting control message\n");
- Status = STATUS_FAILURE;
- goto cntrlEnd;
- }
- Status = CopyBufferToControlPacket(Adapter, (PVOID)pvBuffer);
+ if (IoBuffer.InputLength > MAX_CNTL_PKT_SIZE)
+ return -EINVAL;
-cntrlEnd:
- up(&Adapter->LowPowerModeSync);
- kfree(pvBuffer);
- break;
+ pvBuffer = memdup_user(IoBuffer.InputBuffer,
+ IoBuffer.InputLength);
+ if (IS_ERR(pvBuffer))
+ return PTR_ERR(pvBuffer);
+
+ down(&Adapter->LowPowerModeSync);
+ Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
+ !Adapter->bPreparingForLowPowerMode,
+ (1 * HZ));
+ if (Status == -ERESTARTSYS)
+ goto cntrlEnd;
+
+ if (Adapter->bPreparingForLowPowerMode) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Preparing Idle Mode is still True - Hence Rejecting control message\n");
+ Status = STATUS_FAILURE;
+ goto cntrlEnd;
}
+ Status = CopyBufferToControlPacket(Adapter, (PVOID)pvBuffer);
- case IOCTL_BCM_BUFFER_DOWNLOAD_START: {
- if (down_trylock(&Adapter->NVMRdmWrmLock)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
- return -EACCES;
- }
+cntrlEnd:
+ up(&Adapter->LowPowerModeSync);
+ kfree(pvBuffer);
+ return Status;
+}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Starting the firmware download PID =0x%x!!!!\n", current->pid);
-
- if (down_trylock(&Adapter->fw_download_sema))
- return -EBUSY;
-
- Adapter->bBinDownloaded = false;
- Adapter->fw_download_process_pid = current->pid;
- Adapter->bCfgDownloaded = false;
- Adapter->fw_download_done = false;
- netif_carrier_off(Adapter->dev);
- netif_stop_queue(Adapter->dev);
- Status = reset_card_proc(Adapter);
- if (Status) {
- pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- return Status;
- }
- mdelay(10);
+static int bcm_char_ioctl_buffer_download_start(
+ struct bcm_mini_adapter *Adapter)
+{
+ INT Status;
+ if (down_trylock(&Adapter->NVMRdmWrmLock)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
+ return -EACCES;
+ }
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Starting the firmware download PID =0x%x!!!!\n", current->pid);
+
+ if (down_trylock(&Adapter->fw_download_sema))
+ return -EBUSY;
+
+ Adapter->bBinDownloaded = false;
+ Adapter->fw_download_process_pid = current->pid;
+ Adapter->bCfgDownloaded = false;
+ Adapter->fw_download_done = false;
+ netif_carrier_off(Adapter->dev);
+ netif_stop_queue(Adapter->dev);
+ Status = reset_card_proc(Adapter);
+ if (Status) {
+ pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
+ up(&Adapter->fw_download_sema);
up(&Adapter->NVMRdmWrmLock);
return Status;
}
+ mdelay(10);
- case IOCTL_BCM_BUFFER_DOWNLOAD: {
- struct bcm_firmware_info *psFwInfo = NULL;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid);
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
+}
- if (!down_trylock(&Adapter->fw_download_sema)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Invalid way to download buffer. Use Start and then call this!!!\n");
- up(&Adapter->fw_download_sema);
- Status = -EINVAL;
- return Status;
- }
+static int bcm_char_ioctl_buffer_download(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_firmware_info *psFwInfo = NULL;
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status;
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
- up(&Adapter->fw_download_sema);
- return -EFAULT;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Starting the firmware download PID =0x%x!!!!\n", current->pid);
+ if (!down_trylock(&Adapter->fw_download_sema)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Length for FW DLD is : %lx\n", IoBuffer.InputLength);
-
- if (IoBuffer.InputLength > sizeof(struct bcm_firmware_info)) {
- up(&Adapter->fw_download_sema);
- return -EINVAL;
- }
-
- psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
- if (!psFwInfo) {
- up(&Adapter->fw_download_sema);
- return -ENOMEM;
- }
-
- if (copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
- up(&Adapter->fw_download_sema);
- kfree(psFwInfo);
- return -EFAULT;
- }
-
- if (!psFwInfo->pvMappedFirmwareAddress ||
- (psFwInfo->u32FirmwareLength == 0)) {
+ "Invalid way to download buffer. Use Start and then call this!!!\n");
+ up(&Adapter->fw_download_sema);
+ return -EINVAL;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Something else is wrong %lu\n",
- psFwInfo->u32FirmwareLength);
- up(&Adapter->fw_download_sema);
- kfree(psFwInfo);
- Status = -EINVAL;
- return Status;
- }
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
+ up(&Adapter->fw_download_sema);
+ return -EFAULT;
+ }
- Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Length for FW DLD is : %lx\n", IoBuffer.InputLength);
- if (Status != STATUS_SUCCESS) {
- if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Configuration File Upload Failed\n");
- else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Firmware File Upload Failed\n");
+ if (IoBuffer.InputLength > sizeof(struct bcm_firmware_info)) {
+ up(&Adapter->fw_download_sema);
+ return -EINVAL;
+ }
- /* up(&Adapter->fw_download_sema); */
+ psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
+ if (!psFwInfo) {
+ up(&Adapter->fw_download_sema);
+ return -ENOMEM;
+ }
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = DRIVER_INIT;
- Adapter->LEDInfo.bLedInitDone = false;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
- }
+ if (copy_from_user(psFwInfo, IoBuffer.InputBuffer,
+ IoBuffer.InputLength)) {
+ up(&Adapter->fw_download_sema);
+ kfree(psFwInfo);
+ return -EFAULT;
+ }
- if (Status != STATUS_SUCCESS)
- up(&Adapter->fw_download_sema);
+ if (!psFwInfo->pvMappedFirmwareAddress ||
+ (psFwInfo->u32FirmwareLength == 0)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "IOCTL: Firmware File Uploaded\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Something else is wrong %lu\n",
+ psFwInfo->u32FirmwareLength);
+ up(&Adapter->fw_download_sema);
kfree(psFwInfo);
+ Status = -EINVAL;
return Status;
}
- case IOCTL_BCM_BUFFER_DOWNLOAD_STOP: {
- if (!down_trylock(&Adapter->fw_download_sema)) {
- up(&Adapter->fw_download_sema);
- return -EINVAL;
- }
+ Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
- if (down_trylock(&Adapter->NVMRdmWrmLock)) {
+ if (Status != STATUS_SUCCESS) {
+ if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "FW download blocked as EEPROM Read/Write is in progress\n");
- up(&Adapter->fw_download_sema);
- return -EACCES;
- }
-
- Adapter->bBinDownloaded = TRUE;
- Adapter->bCfgDownloaded = TRUE;
- atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->downloadDDR = 0;
-
- /* setting the Mips to Run */
- Status = run_card_proc(Adapter);
+ "IOCTL: Configuration File Upload Failed\n");
+ else
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "IOCTL: Firmware File Upload Failed\n");
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Firm Download Failed\n");
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- return Status;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "Firm Download Over...\n");
- }
+ /* up(&Adapter->fw_download_sema); */
- mdelay(10);
-
- /* Wait for MailBox Interrupt */
- if (StartInterruptUrb((struct bcm_interface_adapter *)Adapter->pvInterfaceAdapter))
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n");
-
- timeout = 5*HZ;
- Adapter->waiting_to_fw_download_done = false;
- wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
- Adapter->waiting_to_fw_download_done, timeout);
- Adapter->fw_download_process_pid = INVALID_PID;
- Adapter->fw_download_done = TRUE;
- atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->PrevNumRecvDescs = 0;
- atomic_set(&Adapter->cntrlpktCnt, 0);
- Adapter->LinkUpStatus = 0;
- Adapter->LinkStatus = 0;
-
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = FW_DOWNLOAD_DONE;
+ if (Adapter->LEDInfo.led_thread_running &
+ BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = DRIVER_INIT;
+ Adapter->LEDInfo.bLedInitDone = false;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
+ }
+
+ if (Status != STATUS_SUCCESS)
+ up(&Adapter->fw_download_sema);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL: Firmware File Uploaded\n");
+ kfree(psFwInfo);
+ return Status;
+}
- if (!timeout)
- Status = -ENODEV;
+static int bcm_char_ioctl_buffer_download_stop(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ INT Status;
+ int timeout = 0;
+ if (!down_trylock(&Adapter->fw_download_sema)) {
up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- return Status;
+ return -EINVAL;
}
- case IOCTL_BE_BUCKET_SIZE:
- Status = 0;
- if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
- Status = -EFAULT;
- break;
+ if (down_trylock(&Adapter->NVMRdmWrmLock)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "FW download blocked as EEPROM Read/Write is in progress\n");
+ up(&Adapter->fw_download_sema);
+ return -EACCES;
+ }
- case IOCTL_RTPS_BUCKET_SIZE:
- Status = 0;
- if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
- Status = -EFAULT;
- break;
+ Adapter->bBinDownloaded = TRUE;
+ Adapter->bCfgDownloaded = TRUE;
+ atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
+ Adapter->CurrNumRecvDescs = 0;
+ Adapter->downloadDDR = 0;
- case IOCTL_CHIP_RESET: {
- INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
- if (NVMAccess) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, " IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
- return -EACCES;
- }
+ /* setting the Mips to Run */
+ Status = run_card_proc(Adapter);
- down(&Adapter->RxAppControlQueuelock);
- Status = reset_card_proc(Adapter);
- flushAllAppQ();
- up(&Adapter->RxAppControlQueuelock);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Firm Download Failed\n");
+ up(&Adapter->fw_download_sema);
up(&Adapter->NVMRdmWrmLock);
- ResetCounters(Adapter);
- break;
+ return Status;
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "Firm Download Over...\n");
}
- case IOCTL_QOS_THRESHOLD: {
- USHORT uiLoopIndex;
+ mdelay(10);
- Status = 0;
- for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
- if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
- (unsigned long __user *)arg)) {
- Status = -EFAULT;
- break;
- }
- }
- break;
+ /* Wait for MailBox Interrupt */
+ if (StartInterruptUrb((struct bcm_interface_adapter *)Adapter->pvInterfaceAdapter))
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Unable to send interrupt...\n");
+
+ timeout = 5*HZ;
+ Adapter->waiting_to_fw_download_done = false;
+ wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
+ Adapter->waiting_to_fw_download_done, timeout);
+ Adapter->fw_download_process_pid = INVALID_PID;
+ Adapter->fw_download_done = TRUE;
+ atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
+ Adapter->CurrNumRecvDescs = 0;
+ Adapter->PrevNumRecvDescs = 0;
+ atomic_set(&Adapter->cntrlpktCnt, 0);
+ Adapter->LinkUpStatus = 0;
+ Adapter->LinkStatus = 0;
+
+ if (Adapter->LEDInfo.led_thread_running &
+ BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = FW_DOWNLOAD_DONE;
+ wake_up(&Adapter->LEDInfo.notify_led_event);
}
- case IOCTL_DUMP_PACKET_INFO:
- DumpPackInfo(Adapter);
- DumpPhsRules(&Adapter->stBCMPhsContext);
- Status = STATUS_SUCCESS;
- break;
+ if (!timeout)
+ Status = -ENODEV;
- case IOCTL_GET_PACK_INFO:
- if (copy_to_user(argp, &Adapter->PackInfo, sizeof(struct bcm_packet_info)*NO_OF_QUEUES))
- return -EFAULT;
- Status = STATUS_SUCCESS;
- break;
+ up(&Adapter->fw_download_sema);
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
+}
- case IOCTL_BCM_SWITCH_TRANSFER_MODE: {
- UINT uiData = 0;
- if (copy_from_user(&uiData, argp, sizeof(UINT)))
- return -EFAULT;
+static int bcm_char_ioctl_chip_reset(struct bcm_mini_adapter *Adapter)
+{
+ INT Status;
+ INT NVMAccess;
- if (uiData) {
- /* Allow All Packets */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n");
- Adapter->TransferMode = ETH_PACKET_TUNNELING_MODE;
- } else {
- /* Allow IP only Packets */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: IP_PACKET_ONLY_MODE\n");
- Adapter->TransferMode = IP_PACKET_ONLY_MODE;
- }
- Status = STATUS_SUCCESS;
- break;
+ NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
+ if (NVMAccess) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ " IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
+ return -EACCES;
}
- case IOCTL_BCM_GET_DRIVER_VERSION: {
- ulong len;
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ down(&Adapter->RxAppControlQueuelock);
+ Status = reset_card_proc(Adapter);
+ flushAllAppQ();
+ up(&Adapter->RxAppControlQueuelock);
+ up(&Adapter->NVMRdmWrmLock);
+ ResetCounters(Adapter);
+ return Status;
+}
- len = min_t(ulong, IoBuffer.OutputLength, strlen(DRV_VERSION) + 1);
+static int bcm_char_ioctl_qos_threshold(ULONG arg,
+ struct bcm_mini_adapter *Adapter)
+{
+ USHORT uiLoopIndex;
- if (copy_to_user(IoBuffer.OutputBuffer, DRV_VERSION, len))
+ for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
+ if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
+ (unsigned long __user *)arg)) {
return -EFAULT;
- Status = STATUS_SUCCESS;
- break;
+ }
}
+ return 0;
+}
- case IOCTL_BCM_GET_CURRENT_STATUS: {
- struct bcm_link_state link_state;
+static int bcm_char_ioctl_switch_transfer_mode(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ UINT uiData = 0;
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user failed..\n");
- return -EFAULT;
- }
+ if (copy_from_user(&uiData, argp, sizeof(UINT)))
+ return -EFAULT;
- if (IoBuffer.OutputLength != sizeof(link_state)) {
- Status = -EINVAL;
- break;
- }
+ if (uiData) {
+ /* Allow All Packets */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n");
+ Adapter->TransferMode = ETH_PACKET_TUNNELING_MODE;
+ } else {
+ /* Allow IP only Packets */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_SWITCH_TRANSFER_MODE: IP_PACKET_ONLY_MODE\n");
+ Adapter->TransferMode = IP_PACKET_ONLY_MODE;
+ }
+ return STATUS_SUCCESS;
+}
- memset(&link_state, 0, sizeof(link_state));
- link_state.bIdleMode = Adapter->IdleMode;
- link_state.bShutdownMode = Adapter->bShutStatus;
- link_state.ucLinkStatus = Adapter->LinkStatus;
+static int bcm_char_ioctl_get_driver_version(void __user *argp)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ ulong len;
- if (copy_to_user(IoBuffer.OutputBuffer, &link_state, min_t(size_t, sizeof(link_state), IoBuffer.OutputLength))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
- return -EFAULT;
- }
- Status = STATUS_SUCCESS;
- break;
- }
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- case IOCTL_BCM_SET_MAC_TRACING: {
- UINT tracing_flag;
+ len = min_t(ulong, IoBuffer.OutputLength, strlen(DRV_VERSION) + 1);
- /* copy ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ if (copy_to_user(IoBuffer.OutputBuffer, DRV_VERSION, len))
+ return -EFAULT;
- if (copy_from_user(&tracing_flag, IoBuffer.InputBuffer, sizeof(UINT)))
- return -EFAULT;
+ return STATUS_SUCCESS;
+}
- if (tracing_flag)
- Adapter->pTarangs->MacTracingEnabled = TRUE;
- else
- Adapter->pTarangs->MacTracingEnabled = false;
- break;
- }
+static int bcm_char_ioctl_get_current_status(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_link_state link_state;
+ struct bcm_ioctl_buffer IoBuffer;
- case IOCTL_BCM_GET_DSX_INDICATION: {
- ULONG ulSFId = 0;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "copy_from_user failed..\n");
+ return -EFAULT;
+ }
- if (IoBuffer.OutputLength < sizeof(struct bcm_add_indication_alt)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Mismatch req: %lx needed is =0x%zx!!!",
- IoBuffer.OutputLength, sizeof(struct bcm_add_indication_alt));
- return -EINVAL;
- }
+ if (IoBuffer.OutputLength != sizeof(link_state))
+ return -EINVAL;
- if (copy_from_user(&ulSFId, IoBuffer.InputBuffer, sizeof(ulSFId)))
- return -EFAULT;
+ memset(&link_state, 0, sizeof(link_state));
+ link_state.bIdleMode = Adapter->IdleMode;
+ link_state.bShutdownMode = Adapter->bShutStatus;
+ link_state.ucLinkStatus = Adapter->LinkStatus;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Get DSX Data SF ID is =%lx\n", ulSFId);
- get_dsx_sf_data_to_application(Adapter, ulSFId, IoBuffer.OutputBuffer);
- Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, &link_state, min_t(size_t,
+ sizeof(link_state), IoBuffer.OutputLength))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy_to_user Failed..\n");
+ return -EFAULT;
}
- break;
+ return STATUS_SUCCESS;
+}
- case IOCTL_BCM_GET_HOST_MIBS: {
- PVOID temp_buff;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+static int bcm_char_ioctl_set_mac_tracing(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ UINT tracing_flag;
- if (IoBuffer.OutputLength != sizeof(struct bcm_host_stats_mibs)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Length Check failed %lu %zd\n",
- IoBuffer.OutputLength, sizeof(struct bcm_host_stats_mibs));
- return -EINVAL;
- }
+ /* copy ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- /* FIXME: HOST_STATS are too big for kmalloc (122048)! */
- temp_buff = kzalloc(sizeof(struct bcm_host_stats_mibs), GFP_KERNEL);
- if (!temp_buff)
- return STATUS_FAILURE;
+ if (copy_from_user(&tracing_flag, IoBuffer.InputBuffer, sizeof(UINT)))
+ return -EFAULT;
- Status = ProcessGetHostMibs(Adapter, temp_buff);
- GetDroppedAppCntrlPktMibs(temp_buff, pTarang);
+ if (tracing_flag)
+ Adapter->pTarangs->MacTracingEnabled = TRUE;
+ else
+ Adapter->pTarangs->MacTracingEnabled = false;
- if (Status != STATUS_FAILURE)
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(struct bcm_host_stats_mibs))) {
- kfree(temp_buff);
- return -EFAULT;
- }
+ return STATUS_SUCCESS;
+}
- kfree(temp_buff);
- break;
+static int bcm_char_ioctl_get_dsx_indication(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ ULONG ulSFId = 0;
+
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
+
+ if (IoBuffer.OutputLength < sizeof(struct bcm_add_indication_alt)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Mismatch req: %lx needed is =0x%zx!!!",
+ IoBuffer.OutputLength,
+ sizeof(struct bcm_add_indication_alt));
+ return -EINVAL;
}
- case IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE:
- if ((false == Adapter->bTriedToWakeUpFromlowPowerMode) && (TRUE == Adapter->IdleMode)) {
- Adapter->usIdleModePattern = ABORT_IDLE_MODE;
- Adapter->bWakeUpDevice = TRUE;
- wake_up(&Adapter->process_rx_cntrlpkt);
- }
+ if (copy_from_user(&ulSFId, IoBuffer.InputBuffer, sizeof(ulSFId)))
+ return -EFAULT;
- Status = STATUS_SUCCESS;
- break;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Get DSX Data SF ID is =%lx\n", ulSFId);
+ get_dsx_sf_data_to_application(Adapter, ulSFId, IoBuffer.OutputBuffer);
+ return STATUS_SUCCESS;
+}
- case IOCTL_BCM_BULK_WRM: {
- struct bcm_bulk_wrm_buffer *pBulkBuffer;
- UINT uiTempVar = 0;
- PCHAR pvBuffer = NULL;
+static int bcm_char_ioctl_get_host_mibs(void __user *argp,
+ struct bcm_mini_adapter *Adapter, struct bcm_tarang_data *pTarang)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status = STATUS_FAILURE;
+ PVOID temp_buff;
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle/Shutdown Mode, Blocking Wrms\n");
- Status = -EACCES;
- break;
- }
+ if (IoBuffer.OutputLength != sizeof(struct bcm_host_stats_mibs)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Length Check failed %lu %zd\n", IoBuffer.OutputLength,
+ sizeof(struct bcm_host_stats_mibs));
+ return -EINVAL;
+ }
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ /* FIXME: HOST_STATS are too big for kmalloc (122048)! */
+ temp_buff = kzalloc(sizeof(struct bcm_host_stats_mibs), GFP_KERNEL);
+ if (!temp_buff)
+ return STATUS_FAILURE;
+
+ Status = ProcessGetHostMibs(Adapter, temp_buff);
+ GetDroppedAppCntrlPktMibs(temp_buff, pTarang);
+
+ if (Status != STATUS_FAILURE) {
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff,
+ sizeof(struct bcm_host_stats_mibs))) {
+ kfree(temp_buff);
return -EFAULT;
+ }
+ }
- if (IoBuffer.InputLength < sizeof(ULONG) * 2)
- return -EINVAL;
+ kfree(temp_buff);
+ return Status;
+}
- pvBuffer = memdup_user(IoBuffer.InputBuffer,
- IoBuffer.InputLength);
- if (IS_ERR(pvBuffer))
- return PTR_ERR(pvBuffer);
+static int bcm_char_ioctl_bulk_wrm(void __user *argp,
+ struct bcm_mini_adapter *Adapter, UINT cmd)
+{
+ struct bcm_bulk_wrm_buffer *pBulkBuffer;
+ struct bcm_ioctl_buffer IoBuffer;
+ UINT uiTempVar = 0;
+ INT Status = STATUS_FAILURE;
+ PCHAR pvBuffer = NULL;
- pBulkBuffer = (struct bcm_bulk_wrm_buffer *)pvBuffer;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- if (((ULONG)pBulkBuffer->Register & 0x0F000000) != 0x0F000000 ||
- ((ULONG)pBulkBuffer->Register & 0x3)) {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n", (int)pBulkBuffer->Register);
- kfree(pvBuffer);
- Status = -EINVAL;
- break;
- }
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Device in Idle/Shutdown Mode, Blocking Wrms\n");
+ return -EACCES;
+ }
- uiTempVar = pBulkBuffer->Register & EEPROM_REJECT_MASK;
- if (!((Adapter->pstargetparams->m_u32Customize)&VSG_MODE) &&
- ((uiTempVar == EEPROM_REJECT_REG_1) ||
- (uiTempVar == EEPROM_REJECT_REG_2) ||
- (uiTempVar == EEPROM_REJECT_REG_3) ||
- (uiTempVar == EEPROM_REJECT_REG_4)) &&
- (cmd == IOCTL_BCM_REGISTER_WRITE)) {
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- kfree(pvBuffer);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
- Status = -EFAULT;
- break;
- }
+ if (IoBuffer.InputLength < sizeof(ULONG) * 2)
+ return -EINVAL;
- if (pBulkBuffer->SwapEndian == false)
- Status = wrmWithLock(Adapter, (UINT)pBulkBuffer->Register, (PCHAR)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG));
- else
- Status = wrmaltWithLock(Adapter, (UINT)pBulkBuffer->Register, (PUINT)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG));
+ pvBuffer = memdup_user(IoBuffer.InputBuffer,
+ IoBuffer.InputLength);
+ if (IS_ERR(pvBuffer))
+ return PTR_ERR(pvBuffer);
- if (Status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n");
+ pBulkBuffer = (struct bcm_bulk_wrm_buffer *)pvBuffer;
+ if (((ULONG)pBulkBuffer->Register & 0x0F000000) != 0x0F000000 ||
+ ((ULONG)pBulkBuffer->Register & 0x3)) {
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM Done On invalid Address : %x Access Denied.\n",
+ (int)pBulkBuffer->Register);
kfree(pvBuffer);
- break;
+ return -EINVAL;
}
- case IOCTL_BCM_GET_NVM_SIZE:
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ uiTempVar = pBulkBuffer->Register & EEPROM_REJECT_MASK;
+ if (!((Adapter->pstargetparams->m_u32Customize)&VSG_MODE) &&
+ ((uiTempVar == EEPROM_REJECT_REG_1) ||
+ (uiTempVar == EEPROM_REJECT_REG_2) ||
+ (uiTempVar == EEPROM_REJECT_REG_3) ||
+ (uiTempVar == EEPROM_REJECT_REG_4)) &&
+ (cmd == IOCTL_BCM_REGISTER_WRITE)) {
+
+ kfree(pvBuffer);
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "EEPROM Access Denied, not in VSG Mode\n");
+ return -EFAULT;
+ }
+
+ if (pBulkBuffer->SwapEndian == false)
+ Status = wrmWithLock(Adapter, (UINT)pBulkBuffer->Register,
+ (PCHAR)pBulkBuffer->Values,
+ IoBuffer.InputLength - 2*sizeof(ULONG));
+ else
+ Status = wrmaltWithLock(Adapter, (UINT)pBulkBuffer->Register,
+ (PUINT)pBulkBuffer->Values,
+ IoBuffer.InputLength - 2*sizeof(ULONG));
+
+ if (Status != STATUS_SUCCESS)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n");
+
+ kfree(pvBuffer);
+ return Status;
+}
+
+static int bcm_char_ioctl_get_nvm_size(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
+
+ if (Adapter->eNVMType == NVM_EEPROM || Adapter->eNVMType == NVM_FLASH) {
+ if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiNVMDSDSize,
+ sizeof(UINT)))
return -EFAULT;
+ }
- if (Adapter->eNVMType == NVM_EEPROM || Adapter->eNVMType == NVM_FLASH) {
- if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiNVMDSDSize, sizeof(UINT)))
- return -EFAULT;
- }
+ return STATUS_SUCCESS;
+}
- Status = STATUS_SUCCESS;
- break;
+static int bcm_char_ioctl_cal_init(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ UINT uiSectorSize = 0;
+ INT Status = STATUS_FAILURE;
- case IOCTL_BCM_CAL_INIT: {
- UINT uiSectorSize = 0;
- if (Adapter->eNVMType == NVM_FLASH) {
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ if (Adapter->eNVMType == NVM_FLASH) {
+ if (copy_from_user(&IoBuffer, argp,
+ sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (copy_from_user(&uiSectorSize, IoBuffer.InputBuffer, sizeof(UINT)))
- return -EFAULT;
+ if (copy_from_user(&uiSectorSize, IoBuffer.InputBuffer,
+ sizeof(UINT)))
+ return -EFAULT;
- if ((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE)) {
- if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize,
- sizeof(UINT)))
+ if ((uiSectorSize < MIN_SECTOR_SIZE) ||
+ (uiSectorSize > MAX_SECTOR_SIZE)) {
+ if (copy_to_user(IoBuffer.OutputBuffer,
+ &Adapter->uiSectorSize, sizeof(UINT)))
+ return -EFAULT;
+ } else {
+ if (IsFlash2x(Adapter)) {
+ if (copy_to_user(IoBuffer.OutputBuffer,
+ &Adapter->uiSectorSize, sizeof(UINT)))
return -EFAULT;
} else {
- if (IsFlash2x(Adapter)) {
- if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize, sizeof(UINT)))
- return -EFAULT;
- } else {
- if ((TRUE == Adapter->bShutStatus) || (TRUE == Adapter->IdleMode)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device is in Idle/Shutdown Mode\n");
- return -EACCES;
- }
-
- Adapter->uiSectorSize = uiSectorSize;
- BcmUpdateSectorSize(Adapter, Adapter->uiSectorSize);
+ if ((TRUE == Adapter->bShutStatus) ||
+ (TRUE == Adapter->IdleMode)) {
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_PRINTK, 0, 0,
+ "Device is in Idle/Shutdown Mode\n");
+ return -EACCES;
}
+
+ Adapter->uiSectorSize = uiSectorSize;
+ BcmUpdateSectorSize(Adapter,
+ Adapter->uiSectorSize);
}
- Status = STATUS_SUCCESS;
- } else {
- Status = STATUS_FAILURE;
}
+ Status = STATUS_SUCCESS;
+ } else {
+ Status = STATUS_FAILURE;
}
- break;
+ return Status;
+}
- case IOCTL_BCM_SET_DEBUG:
+static int bcm_char_ioctl_set_debug(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
#ifdef DEBUG
- {
- struct bcm_user_debug_state sUserDebugState;
+ struct bcm_ioctl_buffer IoBuffer;
+ struct bcm_user_debug_state sUserDebugState;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "In SET_DEBUG ioctl\n");
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "In SET_DEBUG ioctl\n");
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (copy_from_user(&sUserDebugState, IoBuffer.InputBuffer, sizeof(struct bcm_user_debug_state)))
- return -EFAULT;
+ if (copy_from_user(&sUserDebugState, IoBuffer.InputBuffer,
+ sizeof(struct bcm_user_debug_state)))
+ return -EFAULT;
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ",
- sUserDebugState.OnOff, sUserDebugState.Type);
- /* sUserDebugState.Subtype <<= 1; */
- sUserDebugState.Subtype = 1 << sUserDebugState.Subtype;
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "actual Subtype=0x%x\n", sUserDebugState.Subtype);
-
- /* Update new 'DebugState' in the Adapter */
- Adapter->stDebugState.type |= sUserDebugState.Type;
- /* Subtype: A bitmap of 32 bits for Subtype per Type.
- * Valid indexes in 'subtype' array: 1,2,4,8
- * corresponding to valid Type values. Hence we can use the 'Type' field
- * as the index value, ignoring the array entries 0,3,5,6,7 !
- */
- if (sUserDebugState.OnOff)
- Adapter->stDebugState.subtype[sUserDebugState.Type] |= sUserDebugState.Subtype;
- else
- Adapter->stDebugState.subtype[sUserDebugState.Type] &= ~sUserDebugState.Subtype;
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ",
+ sUserDebugState.OnOff, sUserDebugState.Type);
+ /* sUserDebugState.Subtype <<= 1; */
+ sUserDebugState.Subtype = 1 << sUserDebugState.Subtype;
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "actual Subtype=0x%x\n", sUserDebugState.Subtype);
+
+ /* Update new 'DebugState' in the Adapter */
+ Adapter->stDebugState.type |= sUserDebugState.Type;
+ /* Subtype: A bitmap of 32 bits for Subtype per Type.
+ * Valid indexes in 'subtype' array: 1,2,4,8
+ * corresponding to valid Type values. Hence we can use the 'Type' field
+ * as the index value, ignoring the array entries 0,3,5,6,7 !
+ */
+ if (sUserDebugState.OnOff)
+ Adapter->stDebugState.subtype[sUserDebugState.Type] |=
+ sUserDebugState.Subtype;
+ else
+ Adapter->stDebugState.subtype[sUserDebugState.Type] &=
+ ~sUserDebugState.Subtype;
+
+ BCM_SHOW_DEBUG_BITMAP(Adapter);
+#endif
+ return STATUS_SUCCESS;
+}
+
+static int bcm_char_ioctl_nvm_rw(void __user *argp,
+ struct bcm_mini_adapter *Adapter, UINT cmd)
+{
+ struct bcm_nvm_readwrite stNVMReadWrite;
+ struct timeval tv0, tv1;
+ struct bcm_ioctl_buffer IoBuffer;
+ PUCHAR pReadData = NULL;
+ ULONG ulDSDMagicNumInUsrBuff = 0;
+ INT Status = STATUS_FAILURE;
- BCM_SHOW_DEBUG_BITMAP(Adapter);
+ memset(&tv0, 0, sizeof(struct timeval));
+ memset(&tv1, 0, sizeof(struct timeval));
+ if ((Adapter->eNVMType == NVM_FLASH) &&
+ (Adapter->uiFlashLayoutMajorVersion == 0)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n");
+ return -EFAULT;
}
-#endif
- break;
- case IOCTL_BCM_NVM_READ:
- case IOCTL_BCM_NVM_WRITE: {
- struct bcm_nvm_readwrite stNVMReadWrite;
- PUCHAR pReadData = NULL;
- ULONG ulDSDMagicNumInUsrBuff = 0;
- struct timeval tv0, tv1;
- memset(&tv0, 0, sizeof(struct timeval));
- memset(&tv1, 0, sizeof(struct timeval));
- if ((Adapter->eNVMType == NVM_FLASH) && (Adapter->uiFlashLayoutMajorVersion == 0)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n");
- return -EFAULT;
+ if (IsFlash2x(Adapter)) {
+ if ((Adapter->eActiveDSD != DSD0) &&
+ (Adapter->eActiveDSD != DSD1) &&
+ (Adapter->eActiveDSD != DSD2)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "No DSD is active..hence NVM Command is blocked");
+ return STATUS_FAILURE;
}
+ }
- if (IsFlash2x(Adapter)) {
- if ((Adapter->eActiveDSD != DSD0) &&
- (Adapter->eActiveDSD != DSD1) &&
- (Adapter->eActiveDSD != DSD2)) {
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No DSD is active..hence NVM Command is blocked");
- return STATUS_FAILURE;
- }
- }
+ if (copy_from_user(&stNVMReadWrite,
+ (IOCTL_BCM_NVM_READ == cmd) ?
+ IoBuffer.OutputBuffer : IoBuffer.InputBuffer,
+ sizeof(struct bcm_nvm_readwrite)))
+ return -EFAULT;
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ /*
+ * Deny the access if the offset crosses the cal area limit.
+ */
+ if (stNVMReadWrite.uiNumBytes > Adapter->uiNVMDSDSize)
+ return STATUS_FAILURE;
- if (copy_from_user(&stNVMReadWrite,
- (IOCTL_BCM_NVM_READ == cmd) ? IoBuffer.OutputBuffer : IoBuffer.InputBuffer,
- sizeof(struct bcm_nvm_readwrite)))
- return -EFAULT;
+ if (stNVMReadWrite.uiOffset >
+ Adapter->uiNVMDSDSize - stNVMReadWrite.uiNumBytes)
+ return STATUS_FAILURE;
- /*
- * Deny the access if the offset crosses the cal area limit.
- */
- if (stNVMReadWrite.uiNumBytes > Adapter->uiNVMDSDSize)
- return STATUS_FAILURE;
+ pReadData = memdup_user(stNVMReadWrite.pBuffer,
+ stNVMReadWrite.uiNumBytes);
+ if (IS_ERR(pReadData))
+ return PTR_ERR(pReadData);
- if (stNVMReadWrite.uiOffset > Adapter->uiNVMDSDSize - stNVMReadWrite.uiNumBytes) {
- /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */
- return STATUS_FAILURE;
+ do_gettimeofday(&tv0);
+ if (IOCTL_BCM_NVM_READ == cmd) {
+ down(&Adapter->NVMRdmWrmLock);
+
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadData);
+ return -EACCES;
}
- pReadData = memdup_user(stNVMReadWrite.pBuffer,
- stNVMReadWrite.uiNumBytes);
- if (IS_ERR(pReadData))
- return PTR_ERR(pReadData);
+ Status = BeceemNVMRead(Adapter, (PUINT)pReadData,
+ stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes);
+ up(&Adapter->NVMRdmWrmLock);
- do_gettimeofday(&tv0);
- if (IOCTL_BCM_NVM_READ == cmd) {
- down(&Adapter->NVMRdmWrmLock);
+ if (Status != STATUS_SUCCESS) {
+ kfree(pReadData);
+ return Status;
+ }
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ if (copy_to_user(stNVMReadWrite.pBuffer, pReadData,
+ stNVMReadWrite.uiNumBytes)) {
+ kfree(pReadData);
+ return -EFAULT;
+ }
+ } else {
+ down(&Adapter->NVMRdmWrmLock);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadData);
- return -EACCES;
- }
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- Status = BeceemNVMRead(Adapter, (PUINT)pReadData, stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes);
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadData);
+ return -EACCES;
+ }
- if (Status != STATUS_SUCCESS) {
- kfree(pReadData);
- return Status;
- }
-
- if (copy_to_user(stNVMReadWrite.pBuffer, pReadData, stNVMReadWrite.uiNumBytes)) {
- kfree(pReadData);
- return -EFAULT;
- }
- } else {
- down(&Adapter->NVMRdmWrmLock);
+ Adapter->bHeaderChangeAllowed = TRUE;
+ if (IsFlash2x(Adapter)) {
+ /*
+ * New Requirement:-
+ * DSD section updation will be allowed in two case:-
+ * 1. if DSD sig is present in DSD header means dongle
+ * is ok and updation is fruitfull
+ * 2. if point 1 failes then user buff should have
+ * DSD sig. this point ensures that if dongle is
+ * corrupted then user space program first modify
+ * the DSD header with valid DSD sig so that this
+ * as well as further write may be worthwhile.
+ *
+ * This restriction has been put assuming that
+ * if DSD sig is corrupted, DSD data won't be
+ * considered valid.
+ */
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ Status = BcmFlash2xCorruptSig(Adapter,
+ Adapter->eActiveDSD);
+ if (Status != STATUS_SUCCESS) {
+ if (((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) !=
+ Adapter->uiNVMDSDSize) ||
+ (stNVMReadWrite.uiNumBytes < SIGNATURE_SIZE)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadData);
- return -EACCES;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "DSD Sig is present neither in Flash nor User provided Input..");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadData);
+ return Status;
+ }
- Adapter->bHeaderChangeAllowed = TRUE;
- if (IsFlash2x(Adapter)) {
- /*
- * New Requirement:-
- * DSD section updation will be allowed in two case:-
- * 1. if DSD sig is present in DSD header means dongle is ok and updation is fruitfull
- * 2. if point 1 failes then user buff should have DSD sig. this point ensures that if dongle is
- * corrupted then user space program first modify the DSD header with valid DSD sig so
- * that this as well as further write may be worthwhile.
- *
- * This restriction has been put assuming that if DSD sig is corrupted, DSD
- * data won't be considered valid.
- */
-
- Status = BcmFlash2xCorruptSig(Adapter, Adapter->eActiveDSD);
- if (Status != STATUS_SUCCESS) {
- if (((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) != Adapter->uiNVMDSDSize) ||
- (stNVMReadWrite.uiNumBytes < SIGNATURE_SIZE)) {
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DSD Sig is present neither in Flash nor User provided Input..");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadData);
- return Status;
- }
-
- ulDSDMagicNumInUsrBuff = ntohl(*(PUINT)(pReadData + stNVMReadWrite.uiNumBytes - SIGNATURE_SIZE));
- if (ulDSDMagicNumInUsrBuff != DSD_IMAGE_MAGIC_NUMBER) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DSD Sig is present neither in Flash nor User provided Input..");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadData);
- return Status;
- }
+ ulDSDMagicNumInUsrBuff = ntohl(*(PUINT)(pReadData + stNVMReadWrite.uiNumBytes - SIGNATURE_SIZE));
+ if (ulDSDMagicNumInUsrBuff !=
+ DSD_IMAGE_MAGIC_NUMBER) {
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "DSD Sig is present neither in Flash nor User provided Input..");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadData);
+ return Status;
}
}
+ }
- Status = BeceemNVMWrite(Adapter, (PUINT)pReadData, stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes, stNVMReadWrite.bVerify);
- if (IsFlash2x(Adapter))
- BcmFlash2xWriteSig(Adapter, Adapter->eActiveDSD);
+ Status = BeceemNVMWrite(Adapter, (PUINT)pReadData,
+ stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes,
+ stNVMReadWrite.bVerify);
+ if (IsFlash2x(Adapter))
+ BcmFlash2xWriteSig(Adapter, Adapter->eActiveDSD);
- Adapter->bHeaderChangeAllowed = false;
+ Adapter->bHeaderChangeAllowed = false;
- up(&Adapter->NVMRdmWrmLock);
+ up(&Adapter->NVMRdmWrmLock);
- if (Status != STATUS_SUCCESS) {
- kfree(pReadData);
- return Status;
- }
+ if (Status != STATUS_SUCCESS) {
+ kfree(pReadData);
+ return Status;
}
+ }
+
+ do_gettimeofday(&tv1);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ " timetaken by Write/read :%ld msec\n",
+ (tv1.tv_sec - tv0.tv_sec)*1000 + (tv1.tv_usec - tv0.tv_usec)/1000);
+
+ kfree(pReadData);
+ return STATUS_SUCCESS;
+}
- do_gettimeofday(&tv1);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " timetaken by Write/read :%ld msec\n", (tv1.tv_sec - tv0.tv_sec)*1000 + (tv1.tv_usec - tv0.tv_usec)/1000);
+static int bcm_char_ioctl_flash2x_section_read(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_flash2x_readwrite sFlash2xRead = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ PUCHAR pReadBuff = NULL;
+ UINT NOB = 0;
+ UINT BuffSize = 0;
+ UINT ReadBytes = 0;
+ UINT ReadOffset = 0;
+ INT Status = STATUS_FAILURE;
+ void __user *OutPutBuff;
- kfree(pReadData);
- return STATUS_SUCCESS;
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash Does not have 2.x map");
+ return -EINVAL;
}
- case IOCTL_BCM_FLASH2X_SECTION_READ: {
- struct bcm_flash2x_readwrite sFlash2xRead = {0};
- PUCHAR pReadBuff = NULL;
- UINT NOB = 0;
- UINT BuffSize = 0;
- UINT ReadBytes = 0;
- UINT ReadOffset = 0;
- void __user *OutPutBuff;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called");
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (IsFlash2x(Adapter) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
- return -EINVAL;
- }
+ /* Reading FLASH 2.x READ structure */
+ if (copy_from_user(&sFlash2xRead, IoBuffer.InputBuffer,
+ sizeof(struct bcm_flash2x_readwrite)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called");
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.Section :%x", sFlash2xRead.Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.offset :%x", sFlash2xRead.offset);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.numOfBytes :%x", sFlash2xRead.numOfBytes);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.bVerify :%x\n", sFlash2xRead.bVerify);
- /* Reading FLASH 2.x READ structure */
- if (copy_from_user(&sFlash2xRead, IoBuffer.InputBuffer, sizeof(struct bcm_flash2x_readwrite)))
- return -EFAULT;
+ /* This was internal to driver for raw read.
+ * now it has ben exposed to user space app.
+ */
+ if (validateFlash2xReadWrite(Adapter, &sFlash2xRead) == false)
+ return STATUS_FAILURE;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.Section :%x", sFlash2xRead.Section);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.offset :%x", sFlash2xRead.offset);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.numOfBytes :%x", sFlash2xRead.numOfBytes);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.bVerify :%x\n", sFlash2xRead.bVerify);
+ NOB = sFlash2xRead.numOfBytes;
+ if (NOB > Adapter->uiSectorSize)
+ BuffSize = Adapter->uiSectorSize;
+ else
+ BuffSize = NOB;
- /* This was internal to driver for raw read. now it has ben exposed to user space app. */
- if (validateFlash2xReadWrite(Adapter, &sFlash2xRead) == false)
- return STATUS_FAILURE;
+ ReadOffset = sFlash2xRead.offset;
+ OutPutBuff = IoBuffer.OutputBuffer;
+ pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL);
+
+ if (pReadBuff == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Memory allocation failed for Flash 2.x Read Structure");
+ return -ENOMEM;
+ }
+ down(&Adapter->NVMRdmWrmLock);
+
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return -EACCES;
+ }
- NOB = sFlash2xRead.numOfBytes;
+ while (NOB) {
if (NOB > Adapter->uiSectorSize)
- BuffSize = Adapter->uiSectorSize;
+ ReadBytes = Adapter->uiSectorSize;
else
- BuffSize = NOB;
-
- ReadOffset = sFlash2xRead.offset;
- OutPutBuff = IoBuffer.OutputBuffer;
- pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL);
+ ReadBytes = NOB;
- if (pReadBuff == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for Flash 2.x Read Structure");
- return -ENOMEM;
+ /* Reading the data from Flash 2.x */
+ Status = BcmFlash2xBulkRead(Adapter, (PUINT)pReadBuff,
+ sFlash2xRead.Section, ReadOffset, ReadBytes);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Flash 2x read err with Status :%d",
+ Status);
+ break;
}
- down(&Adapter->NVMRdmWrmLock);
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, pReadBuff, ReadBytes);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Copy to use failed with status :%d", Status);
up(&Adapter->NVMRdmWrmLock);
kfree(pReadBuff);
- return -EACCES;
+ return -EFAULT;
}
+ NOB = NOB - ReadBytes;
+ if (NOB) {
+ ReadOffset = ReadOffset + ReadBytes;
+ OutPutBuff = OutPutBuff + ReadBytes;
+ }
+ }
- while (NOB) {
- if (NOB > Adapter->uiSectorSize)
- ReadBytes = Adapter->uiSectorSize;
- else
- ReadBytes = NOB;
-
- /* Reading the data from Flash 2.x */
- Status = BcmFlash2xBulkRead(Adapter, (PUINT)pReadBuff, sFlash2xRead.Section, ReadOffset, ReadBytes);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Flash 2x read err with Status :%d", Status);
- break;
- }
-
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pReadBuff, ReadBytes);
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return Status;
+}
- Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Copy to use failed with status :%d", Status);
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadBuff);
- return -EFAULT;
- }
- NOB = NOB - ReadBytes;
- if (NOB) {
- ReadOffset = ReadOffset + ReadBytes;
- OutPutBuff = OutPutBuff + ReadBytes;
- }
- }
+static int bcm_char_ioctl_flash2x_section_write(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_flash2x_readwrite sFlash2xWrite = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ PUCHAR pWriteBuff;
+ void __user *InputAddr;
+ UINT NOB = 0;
+ UINT BuffSize = 0;
+ UINT WriteOffset = 0;
+ UINT WriteBytes = 0;
+ INT Status = STATUS_FAILURE;
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadBuff);
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash Does not have 2.x map");
+ return -EINVAL;
}
- break;
- case IOCTL_BCM_FLASH2X_SECTION_WRITE: {
- struct bcm_flash2x_readwrite sFlash2xWrite = {0};
- PUCHAR pWriteBuff;
- void __user *InputAddr;
- UINT NOB = 0;
- UINT BuffSize = 0;
- UINT WriteOffset = 0;
- UINT WriteBytes = 0;
+ /* First make this False so that we can enable the Sector
+ * Permission Check in BeceemFlashBulkWrite
+ */
+ Adapter->bAllDSDWriteAllow = false;
- if (IsFlash2x(Adapter) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
- return -EINVAL;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_FLASH2X_SECTION_WRITE Called");
- /* First make this False so that we can enable the Sector Permission Check in BeceemFlashBulkWrite */
- Adapter->bAllDSDWriteAllow = false;
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_WRITE Called");
+ /* Reading FLASH 2.x READ structure */
+ if (copy_from_user(&sFlash2xWrite, IoBuffer.InputBuffer,
+ sizeof(struct bcm_flash2x_readwrite)))
+ return -EFAULT;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.Section :%x", sFlash2xWrite.Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.offset :%d", sFlash2xWrite.offset);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.numOfBytes :%x", sFlash2xWrite.numOfBytes);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.bVerify :%x\n", sFlash2xWrite.bVerify);
- /* Reading FLASH 2.x READ structure */
- if (copy_from_user(&sFlash2xWrite, IoBuffer.InputBuffer, sizeof(struct bcm_flash2x_readwrite)))
- return -EFAULT;
+ if ((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1)
+ && (sFlash2xWrite.Section != VSA2)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Only VSA write is allowed");
+ return -EINVAL;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.Section :%x", sFlash2xWrite.Section);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.offset :%d", sFlash2xWrite.offset);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.numOfBytes :%x", sFlash2xWrite.numOfBytes);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.bVerify :%x\n", sFlash2xWrite.bVerify);
+ if (validateFlash2xReadWrite(Adapter, &sFlash2xWrite) == false)
+ return STATUS_FAILURE;
- if ((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1) && (sFlash2xWrite.Section != VSA2)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Only VSA write is allowed");
- return -EINVAL;
- }
+ InputAddr = sFlash2xWrite.pDataBuff;
+ WriteOffset = sFlash2xWrite.offset;
+ NOB = sFlash2xWrite.numOfBytes;
- if (validateFlash2xReadWrite(Adapter, &sFlash2xWrite) == false)
- return STATUS_FAILURE;
+ if (NOB > Adapter->uiSectorSize)
+ BuffSize = Adapter->uiSectorSize;
+ else
+ BuffSize = NOB;
- InputAddr = sFlash2xWrite.pDataBuff;
- WriteOffset = sFlash2xWrite.offset;
- NOB = sFlash2xWrite.numOfBytes;
+ pWriteBuff = kmalloc(BuffSize, GFP_KERNEL);
- if (NOB > Adapter->uiSectorSize)
- BuffSize = Adapter->uiSectorSize;
- else
- BuffSize = NOB;
+ if (pWriteBuff == NULL)
+ return -ENOMEM;
- pWriteBuff = kmalloc(BuffSize, GFP_KERNEL);
+ /* extracting the remainder of the given offset. */
+ WriteBytes = Adapter->uiSectorSize;
+ if (WriteOffset % Adapter->uiSectorSize)
+ WriteBytes = Adapter->uiSectorSize - (WriteOffset % Adapter->uiSectorSize);
- if (pWriteBuff == NULL)
- return -ENOMEM;
+ if (NOB < WriteBytes)
+ WriteBytes = NOB;
- /* extracting the remainder of the given offset. */
- WriteBytes = Adapter->uiSectorSize;
- if (WriteOffset % Adapter->uiSectorSize)
- WriteBytes = Adapter->uiSectorSize - (WriteOffset % Adapter->uiSectorSize);
+ down(&Adapter->NVMRdmWrmLock);
- if (NOB < WriteBytes)
- WriteBytes = NOB;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- down(&Adapter->NVMRdmWrmLock);
-
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pWriteBuff);
+ return -EACCES;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ BcmFlash2xCorruptSig(Adapter, sFlash2xWrite.Section);
+ do {
+ Status = copy_from_user(pWriteBuff, InputAddr, WriteBytes);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy to user failed with status :%d", Status);
up(&Adapter->NVMRdmWrmLock);
kfree(pWriteBuff);
- return -EACCES;
+ return -EFAULT;
}
+ BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL, pWriteBuff, WriteBytes);
- BcmFlash2xCorruptSig(Adapter, sFlash2xWrite.Section);
- do {
- Status = copy_from_user(pWriteBuff, InputAddr, WriteBytes);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to user failed with status :%d", Status);
- up(&Adapter->NVMRdmWrmLock);
- kfree(pWriteBuff);
- return -EFAULT;
- }
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pWriteBuff, WriteBytes);
+ /* Writing the data from Flash 2.x */
+ Status = BcmFlash2xBulkWrite(Adapter, (PUINT)pWriteBuff,
+ sFlash2xWrite.Section, WriteOffset, WriteBytes,
+ sFlash2xWrite.bVerify);
- /* Writing the data from Flash 2.x */
- Status = BcmFlash2xBulkWrite(Adapter, (PUINT)pWriteBuff, sFlash2xWrite.Section, WriteOffset, WriteBytes, sFlash2xWrite.bVerify);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash 2x read err with Status :%d", Status);
+ break;
+ }
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash 2x read err with Status :%d", Status);
- break;
- }
+ NOB = NOB - WriteBytes;
+ if (NOB) {
+ WriteOffset = WriteOffset + WriteBytes;
+ InputAddr = InputAddr + WriteBytes;
+ if (NOB > Adapter->uiSectorSize)
+ WriteBytes = Adapter->uiSectorSize;
+ else
+ WriteBytes = NOB;
+ }
+ } while (NOB > 0);
- NOB = NOB - WriteBytes;
- if (NOB) {
- WriteOffset = WriteOffset + WriteBytes;
- InputAddr = InputAddr + WriteBytes;
- if (NOB > Adapter->uiSectorSize)
- WriteBytes = Adapter->uiSectorSize;
- else
- WriteBytes = NOB;
- }
- } while (NOB > 0);
+ BcmFlash2xWriteSig(Adapter, sFlash2xWrite.Section);
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pWriteBuff);
+ return Status;
+}
+
+static int bcm_char_ioctl_flash2x_section_bitmap(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_flash2x_bitmap *psFlash2xBitMap;
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status = STATUS_FAILURE;
+
+BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called");
+
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
+
+ if (IoBuffer.OutputLength != sizeof(struct bcm_flash2x_bitmap))
+ return -EINVAL;
+
+ psFlash2xBitMap = kzalloc(sizeof(struct bcm_flash2x_bitmap), GFP_KERNEL);
+ if (psFlash2xBitMap == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Memory is not available");
+ return -ENOMEM;
+ }
+
+ /* Reading the Flash Sectio Bit map */
+ down(&Adapter->NVMRdmWrmLock);
+
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- BcmFlash2xWriteSig(Adapter, sFlash2xWrite.Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
- kfree(pWriteBuff);
+ kfree(psFlash2xBitMap);
+ return -EACCES;
}
- break;
- case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP: {
- struct bcm_flash2x_bitmap *psFlash2xBitMap;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called");
+ BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap);
+ up(&Adapter->NVMRdmWrmLock);
+ if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap,
+ sizeof(struct bcm_flash2x_bitmap))) {
+ kfree(psFlash2xBitMap);
+ return -EFAULT;
+ }
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ kfree(psFlash2xBitMap);
+ return Status;
+}
- if (IoBuffer.OutputLength != sizeof(struct bcm_flash2x_bitmap))
- return -EINVAL;
+static int bcm_char_ioctl_set_active_section(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ enum bcm_flash2x_section_val eFlash2xSectionVal = 0;
+ INT Status = STATUS_FAILURE;
+ struct bcm_ioctl_buffer IoBuffer;
- psFlash2xBitMap = kzalloc(sizeof(struct bcm_flash2x_bitmap), GFP_KERNEL);
- if (psFlash2xBitMap == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory is not available");
- return -ENOMEM;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_SET_ACTIVE_SECTION Called");
- /* Reading the Flash Sectio Bit map */
- down(&Adapter->NVMRdmWrmLock);
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash Does not have 2.x map");
+ return -EINVAL;
+ }
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of IOCTL BUFFER failed");
+ return -EFAULT;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- kfree(psFlash2xBitMap);
- return -EACCES;
- }
+ Status = copy_from_user(&eFlash2xSectionVal,
+ IoBuffer.InputBuffer, sizeof(INT));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of flash section val failed");
+ return -EFAULT;
+ }
+
+ down(&Adapter->NVMRdmWrmLock);
+
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
- if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(struct bcm_flash2x_bitmap))) {
- kfree(psFlash2xBitMap);
- return -EFAULT;
- }
+ return -EACCES;
+ }
- kfree(psFlash2xBitMap);
+ Status = BcmSetActiveSection(Adapter, eFlash2xSectionVal);
+ if (Status)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Failed to make it's priority Highest. Status %d",
+ Status);
+
+ up(&Adapter->NVMRdmWrmLock);
+
+ return Status;
+}
+
+static int bcm_char_ioctl_copy_section(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_flash2x_copy_section sCopySectStrut = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status = STATUS_SUCCESS;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_COPY_SECTION Called");
+
+ Adapter->bAllDSDWriteAllow = false;
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash Does not have 2.x map");
+ return -EINVAL;
}
- break;
- case IOCTL_BCM_SET_ACTIVE_SECTION: {
- enum bcm_flash2x_section_val eFlash2xSectionVal = 0;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SET_ACTIVE_SECTION Called");
+ Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of IOCTL BUFFER failed Status :%d", Status);
+ return -EFAULT;
+ }
- if (IsFlash2x(Adapter) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
- return -EINVAL;
- }
+ Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer,
+ sizeof(struct bcm_flash2x_copy_section));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of Copy_Section_Struct failed with Status :%d",
+ Status);
+ return -EFAULT;
+ }
- Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return -EFAULT;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Source SEction :%x", sCopySectStrut.SrcSection);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Destination SEction :%x", sCopySectStrut.DstSection);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "offset :%x", sCopySectStrut.offset);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "NOB :%x", sCopySectStrut.numOfBytes);
- Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
- return -EFAULT;
- }
+ if (IsSectionExistInFlash(Adapter, sCopySectStrut.SrcSection) == false) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Source Section<%x> does not exist in Flash ",
+ sCopySectStrut.SrcSection);
+ return -EINVAL;
+ }
- down(&Adapter->NVMRdmWrmLock);
+ if (IsSectionExistInFlash(Adapter, sCopySectStrut.DstSection) == false) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Destinatio Section<%x> does not exist in Flash ",
+ sCopySectStrut.DstSection);
+ return -EINVAL;
+ }
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ if (sCopySectStrut.SrcSection == sCopySectStrut.DstSection) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Source and Destination section should be different");
+ return -EINVAL;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- return -EACCES;
- }
+ down(&Adapter->NVMRdmWrmLock);
- Status = BcmSetActiveSection(Adapter, eFlash2xSectionVal);
- if (Status)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Failed to make it's priority Highest. Status %d", Status);
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
+ return -EACCES;
}
- break;
- case IOCTL_BCM_IDENTIFY_ACTIVE_SECTION: {
- /* Right Now we are taking care of only DSD */
- Adapter->bAllDSDWriteAllow = false;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called");
- Status = STATUS_SUCCESS;
+ if (sCopySectStrut.SrcSection == ISO_IMAGE1 ||
+ sCopySectStrut.SrcSection == ISO_IMAGE2) {
+ if (IsNonCDLessDevice(Adapter)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Device is Non-CDLess hence won't have ISO !!");
+ Status = -EINVAL;
+ } else if (sCopySectStrut.numOfBytes == 0) {
+ Status = BcmCopyISO(Adapter, sCopySectStrut);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Partial Copy of ISO section is not Allowed..");
+ Status = STATUS_FAILURE;
+ }
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
}
- break;
- case IOCTL_BCM_COPY_SECTION: {
- struct bcm_flash2x_copy_section sCopySectStrut = {0};
- Status = STATUS_SUCCESS;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_COPY_SECTION Called");
+ Status = BcmCopySection(Adapter, sCopySectStrut.SrcSection,
+ sCopySectStrut.DstSection,
+ sCopySectStrut.offset,
+ sCopySectStrut.numOfBytes);
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
+}
- Adapter->bAllDSDWriteAllow = false;
- if (IsFlash2x(Adapter) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
+static int bcm_char_ioctl_get_flash_cs_info(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status = STATUS_SUCCESS;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ " IOCTL_BCM_GET_FLASH_CS_INFO Called");
+
+ Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of IOCTL BUFFER failed");
+ return -EFAULT;
+ }
+
+ if (Adapter->eNVMType != NVM_FLASH) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Connected device does not have flash");
+ return -EINVAL;
+ }
+
+ if (IsFlash2x(Adapter) == TRUE) {
+ if (IoBuffer.OutputLength < sizeof(struct bcm_flash2x_cs_info))
return -EINVAL;
- }
- Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed Status :%d", Status);
+ if (copy_to_user(IoBuffer.OutputBuffer,
+ Adapter->psFlash2xCSInfo,
+ sizeof(struct bcm_flash2x_cs_info)))
return -EFAULT;
- }
+ } else {
+ if (IoBuffer.OutputLength < sizeof(struct bcm_flash_cs_info))
+ return -EINVAL;
- Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer, sizeof(struct bcm_flash2x_copy_section));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of Copy_Section_Struct failed with Status :%d", Status);
+ if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo,
+ sizeof(struct bcm_flash_cs_info)))
return -EFAULT;
- }
+ }
+ return Status;
+}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source SEction :%x", sCopySectStrut.SrcSection);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Destination SEction :%x", sCopySectStrut.DstSection);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "offset :%x", sCopySectStrut.offset);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "NOB :%x", sCopySectStrut.numOfBytes);
+static int bcm_char_ioctl_select_dsd(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status = STATUS_FAILURE;
+ UINT SectOfset = 0;
+ enum bcm_flash2x_section_val eFlash2xSectionVal;
- if (IsSectionExistInFlash(Adapter, sCopySectStrut.SrcSection) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source Section<%x> does not exist in Flash ", sCopySectStrut.SrcSection);
- return -EINVAL;
- }
+ eFlash2xSectionVal = NO_SECTION_VAL;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_SELECT_DSD Called");
- if (IsSectionExistInFlash(Adapter, sCopySectStrut.DstSection) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destinatio Section<%x> does not exist in Flash ", sCopySectStrut.DstSection);
- return -EINVAL;
- }
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash Does not have 2.x map");
+ return -EINVAL;
+ }
- if (sCopySectStrut.SrcSection == sCopySectStrut.DstSection) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source and Destination section should be different");
- return -EINVAL;
- }
+ Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of IOCTL BUFFER failed");
+ return -EFAULT;
+ }
+ Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer,
+ sizeof(INT));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of flash section val failed");
+ return -EFAULT;
+ }
- down(&Adapter->NVMRdmWrmLock);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Read Section :%d", eFlash2xSectionVal);
+ if ((eFlash2xSectionVal != DSD0) &&
+ (eFlash2xSectionVal != DSD1) &&
+ (eFlash2xSectionVal != DSD2)) {
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Passed section<%x> is not DSD section",
+ eFlash2xSectionVal);
+ return STATUS_FAILURE;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- return -EACCES;
- }
+ SectOfset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
+ if (SectOfset == INVALID_OFFSET) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Provided Section val <%d> does not exist in Flash 2.x",
+ eFlash2xSectionVal);
+ return -EINVAL;
+ }
- if (sCopySectStrut.SrcSection == ISO_IMAGE1 || sCopySectStrut.SrcSection == ISO_IMAGE2) {
- if (IsNonCDLessDevice(Adapter)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device is Non-CDLess hence won't have ISO !!");
- Status = -EINVAL;
- } else if (sCopySectStrut.numOfBytes == 0) {
- Status = BcmCopyISO(Adapter, sCopySectStrut);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Partial Copy of ISO section is not Allowed..");
- Status = STATUS_FAILURE;
- }
- up(&Adapter->NVMRdmWrmLock);
- return Status;
- }
+ Adapter->bAllDSDWriteAllow = TRUE;
+ Adapter->ulFlashCalStart = SectOfset;
+ Adapter->eActiveDSD = eFlash2xSectionVal;
- Status = BcmCopySection(Adapter, sCopySectStrut.SrcSection,
- sCopySectStrut.DstSection, sCopySectStrut.offset, sCopySectStrut.numOfBytes);
- up(&Adapter->NVMRdmWrmLock);
+ return STATUS_SUCCESS;
+}
+
+static int bcm_char_ioctl_nvm_raw_read(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_nvm_readwrite stNVMRead;
+ struct bcm_ioctl_buffer IoBuffer;
+ unsigned int NOB;
+ INT BuffSize;
+ INT ReadOffset = 0;
+ UINT ReadBytes = 0;
+ PUCHAR pReadBuff;
+ void __user *OutPutBuff;
+ INT Status = STATUS_FAILURE;
+
+ if (Adapter->eNVMType != NVM_FLASH) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "NVM TYPE is not Flash");
+ return -EINVAL;
}
- break;
- case IOCTL_BCM_GET_FLASH_CS_INFO: {
- Status = STATUS_SUCCESS;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " IOCTL_BCM_GET_FLASH_CS_INFO Called");
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "copy_from_user 1 failed\n");
+ return -EFAULT;
+ }
- Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return -EFAULT;
- }
+ if (copy_from_user(&stNVMRead, IoBuffer.OutputBuffer,
+ sizeof(struct bcm_nvm_readwrite)))
+ return -EFAULT;
- if (Adapter->eNVMType != NVM_FLASH) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Connected device does not have flash");
- Status = -EINVAL;
- break;
- }
+ NOB = stNVMRead.uiNumBytes;
+ /* In Raw-Read max Buff size : 64MB */
- if (IsFlash2x(Adapter) == TRUE) {
- if (IoBuffer.OutputLength < sizeof(struct bcm_flash2x_cs_info))
- return -EINVAL;
+ if (NOB > DEFAULT_BUFF_SIZE)
+ BuffSize = DEFAULT_BUFF_SIZE;
+ else
+ BuffSize = NOB;
- if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlash2xCSInfo, sizeof(struct bcm_flash2x_cs_info)))
- return -EFAULT;
- } else {
- if (IoBuffer.OutputLength < sizeof(struct bcm_flash_cs_info))
- return -EINVAL;
+ ReadOffset = stNVMRead.uiOffset;
+ OutPutBuff = stNVMRead.pBuffer;
- if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo, sizeof(struct bcm_flash_cs_info)))
- return -EFAULT;
- }
+ pReadBuff = kzalloc(BuffSize , GFP_KERNEL);
+ if (pReadBuff == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Memory allocation failed for Flash 2.x Read Structure");
+ return -ENOMEM;
}
- break;
+ down(&Adapter->NVMRdmWrmLock);
- case IOCTL_BCM_SELECT_DSD: {
- UINT SectOfset = 0;
- enum bcm_flash2x_section_val eFlash2xSectionVal;
- eFlash2xSectionVal = NO_SECTION_VAL;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SELECT_DSD Called");
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- if (IsFlash2x(Adapter) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
- return -EINVAL;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ kfree(pReadBuff);
+ up(&Adapter->NVMRdmWrmLock);
+ return -EACCES;
+ }
+
+ Adapter->bFlashRawRead = TRUE;
+
+ while (NOB) {
+ if (NOB > DEFAULT_BUFF_SIZE)
+ ReadBytes = DEFAULT_BUFF_SIZE;
+ else
+ ReadBytes = NOB;
- Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
+ /* Reading the data from Flash 2.x */
+ Status = BeceemNVMRead(Adapter, (PUINT)pReadBuff,
+ ReadOffset, ReadBytes);
if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash 2x read err with Status :%d", Status);
+ break;
}
- Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
+
+ BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, pReadBuff, ReadBytes);
+
+ Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy to use failed with status :%d", Status);
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
return -EFAULT;
}
+ NOB = NOB - ReadBytes;
+ if (NOB) {
+ ReadOffset = ReadOffset + ReadBytes;
+ OutPutBuff = OutPutBuff + ReadBytes;
+ }
+ }
+ Adapter->bFlashRawRead = false;
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return Status;
+}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Read Section :%d", eFlash2xSectionVal);
- if ((eFlash2xSectionVal != DSD0) &&
- (eFlash2xSectionVal != DSD1) &&
- (eFlash2xSectionVal != DSD2)) {
+static int bcm_char_ioctl_cntrlmsg_mask(void __user *argp,
+ struct bcm_mini_adapter *Adapter, struct bcm_tarang_data *pTarang)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status = STATUS_FAILURE;
+ ULONG RxCntrlMsgBitMask = 0;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Passed section<%x> is not DSD section", eFlash2xSectionVal);
- return STATUS_FAILURE;
- }
+ /* Copy Ioctl Buffer structure */
+ Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "copy of Ioctl buffer is failed from user space");
+ return -EFAULT;
+ }
- SectOfset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
- if (SectOfset == INVALID_OFFSET) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section val <%d> does not exist in Flash 2.x", eFlash2xSectionVal);
- return -EINVAL;
- }
+ if (IoBuffer.InputLength != sizeof(unsigned long))
+ return -EINVAL;
- Adapter->bAllDSDWriteAllow = TRUE;
- Adapter->ulFlashCalStart = SectOfset;
- Adapter->eActiveDSD = eFlash2xSectionVal;
+ Status = copy_from_user(&RxCntrlMsgBitMask,
+ IoBuffer.InputBuffer, IoBuffer.InputLength);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "copy of control bit mask failed from user space");
+ return -EFAULT;
}
- Status = STATUS_SUCCESS;
- break;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\n Got user defined cntrl msg bit mask :%lx",
+ RxCntrlMsgBitMask);
+ pTarang->RxCntrlMsgBitMask = RxCntrlMsgBitMask;
- case IOCTL_BCM_NVM_RAW_READ: {
- struct bcm_nvm_readwrite stNVMRead;
- INT NOB;
- INT BuffSize;
- INT ReadOffset = 0;
- UINT ReadBytes = 0;
- PUCHAR pReadBuff;
- void __user *OutPutBuff;
+ return Status;
+}
- if (Adapter->eNVMType != NVM_FLASH) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "NVM TYPE is not Flash");
- return -EINVAL;
- }
+static int bcm_char_ioctl_get_device_driver_info(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_driver_info DevInfo;
+ struct bcm_ioctl_buffer IoBuffer;
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user 1 failed\n");
- return -EFAULT;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
- if (copy_from_user(&stNVMRead, IoBuffer.OutputBuffer, sizeof(struct bcm_nvm_readwrite)))
- return -EFAULT;
+ memset(&DevInfo, 0, sizeof(DevInfo));
+ DevInfo.MaxRDMBufferSize = BUFFER_4K;
+ DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
+ DevInfo.u32RxAlignmentCorrection = 0;
+ DevInfo.u32NVMType = Adapter->eNVMType;
+ DevInfo.u32InterfaceType = BCM_USB;
- NOB = stNVMRead.uiNumBytes;
- /* In Raw-Read max Buff size : 64MB */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (NOB > DEFAULT_BUFF_SIZE)
- BuffSize = DEFAULT_BUFF_SIZE;
- else
- BuffSize = NOB;
+ if (IoBuffer.OutputLength < sizeof(DevInfo))
+ return -EINVAL;
- ReadOffset = stNVMRead.uiOffset;
- OutPutBuff = stNVMRead.pBuffer;
+ if (copy_to_user(IoBuffer.OutputBuffer, &DevInfo, sizeof(DevInfo)))
+ return -EFAULT;
- pReadBuff = kzalloc(BuffSize , GFP_KERNEL);
- if (pReadBuff == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for Flash 2.x Read Structure");
- Status = -ENOMEM;
- break;
- }
- down(&Adapter->NVMRdmWrmLock);
+ return STATUS_SUCCESS;
+}
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+static int bcm_char_ioctl_time_since_net_entry(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_time_elapsed stTimeElapsedSinceNetEntry = {0};
+ struct bcm_ioctl_buffer IoBuffer;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
- kfree(pReadBuff);
- up(&Adapter->NVMRdmWrmLock);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_TIME_SINCE_NET_ENTRY called");
+
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
+
+ if (IoBuffer.OutputLength < sizeof(struct bcm_time_elapsed))
+ return -EINVAL;
+
+ stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry =
+ get_seconds() - Adapter->liTimeSinceLastNetEntry;
+
+ if (copy_to_user(IoBuffer.OutputBuffer, &stTimeElapsedSinceNetEntry,
+ sizeof(struct bcm_time_elapsed)))
+ return -EFAULT;
+
+ return STATUS_SUCCESS;
+}
+
+
+static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
+{
+ struct bcm_tarang_data *pTarang = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ struct bcm_mini_adapter *Adapter = pTarang->Adapter;
+ INT Status = STATUS_FAILURE;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX",
+ cmd, arg);
+
+ if (_IOC_TYPE(cmd) != BCM_IOCTL)
+ return -EFAULT;
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ Status = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ Status = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
+ else if (_IOC_NONE == (_IOC_DIR(cmd) & _IOC_NONE))
+ Status = STATUS_SUCCESS;
+
+ if (Status)
+ return -EFAULT;
+
+ if (Adapter->device_removed)
+ return -EFAULT;
+
+ if (false == Adapter->fw_download_done) {
+ switch (cmd) {
+ case IOCTL_MAC_ADDR_REQ:
+ case IOCTL_LINK_REQ:
+ case IOCTL_CM_REQUEST:
+ case IOCTL_SS_INFO_REQ:
+ case IOCTL_SEND_CONTROL_MESSAGE:
+ case IOCTL_IDLE_REQ:
+ case IOCTL_BCM_GPIO_SET_REQUEST:
+ case IOCTL_BCM_GPIO_STATUS_REQUEST:
return -EACCES;
+ default:
+ break;
}
+ }
- Adapter->bFlashRawRead = TRUE;
+ Status = vendorextnIoctl(Adapter, cmd, arg);
+ if (Status != CONTINUE_COMMON_PATH)
+ return Status;
- while (NOB) {
- if (NOB > DEFAULT_BUFF_SIZE)
- ReadBytes = DEFAULT_BUFF_SIZE;
- else
- ReadBytes = NOB;
+ switch (cmd) {
+ /* Rdms for Swin Idle... */
+ case IOCTL_BCM_REGISTER_READ_PRIVATE:
+ Status = bcm_char_ioctl_reg_read_private(argp, Adapter);
+ return Status;
- /* Reading the data from Flash 2.x */
- Status = BeceemNVMRead(Adapter, (PUINT)pReadBuff, ReadOffset, ReadBytes);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash 2x read err with Status :%d", Status);
- break;
- }
+ case IOCTL_BCM_REGISTER_WRITE_PRIVATE:
+ Status = bcm_char_ioctl_reg_write_private(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_REGISTER_READ:
+ case IOCTL_BCM_EEPROM_REGISTER_READ:
+ Status = bcm_char_ioctl_eeprom_reg_read(argp, Adapter);
+ return Status;
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pReadBuff, ReadBytes);
+ case IOCTL_BCM_REGISTER_WRITE:
+ case IOCTL_BCM_EEPROM_REGISTER_WRITE:
+ Status = bcm_char_ioctl_eeprom_reg_write(argp, Adapter, cmd);
+ return Status;
- Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to use failed with status :%d", Status);
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadBuff);
- return -EFAULT;
- }
- NOB = NOB - ReadBytes;
- if (NOB) {
- ReadOffset = ReadOffset + ReadBytes;
- OutPutBuff = OutPutBuff + ReadBytes;
- }
- }
- Adapter->bFlashRawRead = false;
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadBuff);
+ case IOCTL_BCM_GPIO_SET_REQUEST:
+ Status = bcm_char_ioctl_gpio_set_request(argp, Adapter);
+ return Status;
+
+ case BCM_LED_THREAD_STATE_CHANGE_REQ:
+ Status = bcm_char_ioctl_led_thread_state_change_req(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GPIO_STATUS_REQUEST:
+ Status = bcm_char_ioctl_gpio_status_request(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GPIO_MULTI_REQUEST:
+ Status = bcm_char_ioctl_gpio_multi_request(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GPIO_MODE_REQUEST:
+ Status = bcm_char_ioctl_gpio_mode_request(argp, Adapter);
+ return Status;
+
+ case IOCTL_MAC_ADDR_REQ:
+ case IOCTL_LINK_REQ:
+ case IOCTL_CM_REQUEST:
+ case IOCTL_SS_INFO_REQ:
+ case IOCTL_SEND_CONTROL_MESSAGE:
+ case IOCTL_IDLE_REQ:
+ Status = bcm_char_ioctl_misc_request(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_BUFFER_DOWNLOAD_START:
+ Status = bcm_char_ioctl_buffer_download_start(Adapter);
+ return Status;
+
+ case IOCTL_BCM_BUFFER_DOWNLOAD:
+ Status = bcm_char_ioctl_buffer_download(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_BUFFER_DOWNLOAD_STOP:
+ Status = bcm_char_ioctl_buffer_download_stop(argp, Adapter);
+ return Status;
+
+
+ case IOCTL_BE_BUCKET_SIZE:
+ Status = 0;
+ if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
+ Status = -EFAULT;
+ break;
+
+ case IOCTL_RTPS_BUCKET_SIZE:
+ Status = 0;
+ if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
+ Status = -EFAULT;
break;
- }
- case IOCTL_BCM_CNTRLMSG_MASK: {
- ULONG RxCntrlMsgBitMask = 0;
+ case IOCTL_CHIP_RESET:
+ Status = bcm_char_ioctl_chip_reset(Adapter);
+ return Status;
- /* Copy Ioctl Buffer structure */
- Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of Ioctl buffer is failed from user space");
- return -EFAULT;
- }
+ case IOCTL_QOS_THRESHOLD:
+ Status = bcm_char_ioctl_qos_threshold(arg, Adapter);
+ return Status;
- if (IoBuffer.InputLength != sizeof(unsigned long)) {
- Status = -EINVAL;
- break;
- }
+ case IOCTL_DUMP_PACKET_INFO:
+ DumpPackInfo(Adapter);
+ DumpPhsRules(&Adapter->stBCMPhsContext);
+ Status = STATUS_SUCCESS;
+ break;
- Status = copy_from_user(&RxCntrlMsgBitMask, IoBuffer.InputBuffer, IoBuffer.InputLength);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of control bit mask failed from user space");
+ case IOCTL_GET_PACK_INFO:
+ if (copy_to_user(argp, &Adapter->PackInfo,
+ sizeof(struct bcm_packet_info)*NO_OF_QUEUES))
return -EFAULT;
+ Status = STATUS_SUCCESS;
+ break;
+
+ case IOCTL_BCM_SWITCH_TRANSFER_MODE:
+ Status = bcm_char_ioctl_switch_transfer_mode(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GET_DRIVER_VERSION:
+ Status = bcm_char_ioctl_get_driver_version(argp);
+ return Status;
+
+ case IOCTL_BCM_GET_CURRENT_STATUS:
+ Status = bcm_char_ioctl_get_current_status(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_SET_MAC_TRACING:
+ Status = bcm_char_ioctl_set_mac_tracing(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GET_DSX_INDICATION:
+ Status = bcm_char_ioctl_get_dsx_indication(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GET_HOST_MIBS:
+ Status = bcm_char_ioctl_get_host_mibs(argp, Adapter, pTarang);
+ return Status;
+
+ case IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE:
+ if ((false == Adapter->bTriedToWakeUpFromlowPowerMode) &&
+ (TRUE == Adapter->IdleMode)) {
+ Adapter->usIdleModePattern = ABORT_IDLE_MODE;
+ Adapter->bWakeUpDevice = TRUE;
+ wake_up(&Adapter->process_rx_cntrlpkt);
}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\n Got user defined cntrl msg bit mask :%lx", RxCntrlMsgBitMask);
- pTarang->RxCntrlMsgBitMask = RxCntrlMsgBitMask;
- }
- break;
- case IOCTL_BCM_GET_DEVICE_DRIVER_INFO: {
- struct bcm_driver_info DevInfo;
+ Status = STATUS_SUCCESS;
+ break;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
+ case IOCTL_BCM_BULK_WRM:
+ Status = bcm_char_ioctl_bulk_wrm(argp, Adapter, cmd);
+ return Status;
- memset(&DevInfo, 0, sizeof(DevInfo));
- DevInfo.MaxRDMBufferSize = BUFFER_4K;
- DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
- DevInfo.u32RxAlignmentCorrection = 0;
- DevInfo.u32NVMType = Adapter->eNVMType;
- DevInfo.u32InterfaceType = BCM_USB;
+ case IOCTL_BCM_GET_NVM_SIZE:
+ Status = bcm_char_ioctl_get_nvm_size(argp, Adapter);
+ return Status;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ case IOCTL_BCM_CAL_INIT:
+ Status = bcm_char_ioctl_cal_init(argp, Adapter);
+ return Status;
- if (IoBuffer.OutputLength < sizeof(DevInfo))
- return -EINVAL;
+ case IOCTL_BCM_SET_DEBUG:
+ Status = bcm_char_ioctl_set_debug(argp, Adapter);
+ return Status;
- if (copy_to_user(IoBuffer.OutputBuffer, &DevInfo, sizeof(DevInfo)))
- return -EFAULT;
- }
- break;
+ case IOCTL_BCM_NVM_READ:
+ case IOCTL_BCM_NVM_WRITE:
+ Status = bcm_char_ioctl_nvm_rw(argp, Adapter, cmd);
+ return Status;
- case IOCTL_BCM_TIME_SINCE_NET_ENTRY: {
- struct bcm_time_elapsed stTimeElapsedSinceNetEntry = {0};
+ case IOCTL_BCM_FLASH2X_SECTION_READ:
+ Status = bcm_char_ioctl_flash2x_section_read(argp, Adapter);
+ return Status;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_TIME_SINCE_NET_ENTRY called");
+ case IOCTL_BCM_FLASH2X_SECTION_WRITE:
+ Status = bcm_char_ioctl_flash2x_section_write(argp, Adapter);
+ return Status;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP:
+ Status = bcm_char_ioctl_flash2x_section_bitmap(argp, Adapter);
+ return Status;
- if (IoBuffer.OutputLength < sizeof(struct bcm_time_elapsed))
- return -EINVAL;
+ case IOCTL_BCM_SET_ACTIVE_SECTION:
+ Status = bcm_char_ioctl_set_active_section(argp, Adapter);
+ return Status;
- stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry = get_seconds() - Adapter->liTimeSinceLastNetEntry;
+ case IOCTL_BCM_IDENTIFY_ACTIVE_SECTION:
+ /* Right Now we are taking care of only DSD */
+ Adapter->bAllDSDWriteAllow = false;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called");
+ Status = STATUS_SUCCESS;
+ break;
- if (copy_to_user(IoBuffer.OutputBuffer, &stTimeElapsedSinceNetEntry, sizeof(struct bcm_time_elapsed)))
- return -EFAULT;
- }
- break;
+ case IOCTL_BCM_COPY_SECTION:
+ Status = bcm_char_ioctl_copy_section(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GET_FLASH_CS_INFO:
+ Status = bcm_char_ioctl_get_flash_cs_info(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_SELECT_DSD:
+ Status = bcm_char_ioctl_select_dsd(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_NVM_RAW_READ:
+ Status = bcm_char_ioctl_nvm_raw_read(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_CNTRLMSG_MASK:
+ Status = bcm_char_ioctl_cntrlmsg_mask(argp, Adapter, pTarang);
+ return Status;
+
+ case IOCTL_BCM_GET_DEVICE_DRIVER_INFO:
+ Status = bcm_char_ioctl_get_device_driver_info(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_TIME_SINCE_NET_ENTRY:
+ Status = bcm_char_ioctl_time_since_net_entry(argp, Adapter);
+ return Status;
case IOCTL_CLOSE_NOTIFICATION:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_CLOSE_NOTIFICATION");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "IOCTL_CLOSE_NOTIFICATION");
break;
default:
diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c
index cc91b5e934aa..632f81a7c63f 100644
--- a/drivers/staging/bcm/CmHost.c
+++ b/drivers/staging/bcm/CmHost.c
@@ -975,8 +975,8 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: "
- "%pM", psfCSType->cCPacketClassificationRule.
+ DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: %pM",
+ psfCSType->cCPacketClassificationRule.
u8EthernetSourceMACAddress);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ",
@@ -1092,18 +1092,16 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: "
- "0x%*ph ", 4, psfCSType->
- cCPacketClassificationRule.
+ DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%*ph ",
+ 4, psfCSType->cCPacketClassificationRule.
u8ProtocolSourcePortRange);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: "
- "0x%*ph ", 4, psfCSType->
- cCPacketClassificationRule.
+ DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%*ph ",
+ 4, psfCSType->cCPacketClassificationRule.
u8ProtocolDestPortRange);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
@@ -1118,8 +1116,8 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: "
- "%pM", psfCSType->cCPacketClassificationRule.
+ DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: %pM",
+ psfCSType->cCPacketClassificationRule.
u8EthernetSourceMACAddress);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ", psfCSType->cCPacketClassificationRule.u8EthertypeLength);
@@ -1637,6 +1635,8 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
struct bcm_add_indication_alt *pstAddIndication = NULL;
struct bcm_change_indication *pstChangeIndication = NULL;
struct bcm_leader *pLeader = NULL;
+ INT uiSearchRuleIndex = 0;
+ ULONG ulSFID;
/*
* Otherwise the message contains a target address from where we need to
@@ -1660,7 +1660,6 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n", pstAddIndication->u16TID);
switch (pstAddIndication->u8Type) {
case DSA_REQ:
- {
pLeader->PLength = sizeof(struct bcm_add_indication_alt);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength);
@@ -1671,22 +1670,16 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
kfree(pstAddIndication);
- }
- break;
+ break;
case DSA_RSP:
- {
pLeader->PLength = sizeof(struct bcm_add_indication_alt);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
pLeader->PLength);
*((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
= *pstAddIndication;
((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK;
-
- } /* no break here..we should go down. */
+ /* FALLTHROUGH */
case DSA_ACK:
- {
- UINT uiSearchRuleIndex = 0;
-
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
ntohs(pstAddIndication->u16VCID));
uiSearchRuleIndex = SearchFreeSfid(Adapter);
@@ -1694,7 +1687,7 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
uiSearchRuleIndex);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Direction:0x%X ",
pstAddIndication->u8Direction);
- if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
+ if (uiSearchRuleIndex < NO_OF_QUEUES) {
Adapter->PackInfo[uiSearchRuleIndex].ucDirection =
pstAddIndication->u8Direction;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
@@ -1769,10 +1762,8 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
kfree(pstAddIndication);
return false;
}
- }
- break;
+ break;
case DSC_REQ:
- {
pLeader->PLength = sizeof(struct bcm_change_indication);
pstChangeIndication = (struct bcm_change_indication *)pstAddIndication;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
@@ -1782,26 +1773,21 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
kfree(pstAddIndication);
- }
- break;
+ break;
case DSC_RSP:
- {
pLeader->PLength = sizeof(struct bcm_change_indication);
pstChangeIndication = (struct bcm_change_indication *)pstAddIndication;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength);
*((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
- }
+ /* FALLTHROUGH */
case DSC_ACK:
- {
- UINT uiSearchRuleIndex = 0;
-
pstChangeIndication = (struct bcm_change_indication *)pstAddIndication;
uiSearchRuleIndex = SearchSfid(Adapter, ntohl(pstChangeIndication->sfActiveSet.u32SFID));
if (uiSearchRuleIndex > NO_OF_QUEUES-1)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
- if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
+ if (uiSearchRuleIndex < NO_OF_QUEUES) {
Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
if (pstChangeIndication->sfActiveSet.bValid == TRUE)
Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
@@ -1849,13 +1835,8 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
kfree(pstAddIndication);
return false;
}
- }
- break;
+ break;
case DSD_REQ:
- {
- UINT uiSearchRuleIndex;
- ULONG ulSFID;
-
pLeader->PLength = sizeof(struct bcm_del_indication);
*((struct bcm_del_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((struct bcm_del_indication *)pstAddIndication);
@@ -1872,12 +1853,10 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC");
((struct bcm_del_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP;
CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
- }
+ /* FALLTHROUGH */
case DSD_RSP:
- {
/* Do nothing as SF has already got Deleted */
- }
- break;
+ break;
case DSD_ACK:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
break;
diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c
index ed285b2d892d..f1d7cb82fd7e 100644
--- a/drivers/staging/bcm/DDRInit.c
+++ b/drivers/staging/bcm/DDRInit.c
@@ -3,7 +3,7 @@
#define DDR_DUMP_INTERNAL_DEVICE_MEMORY 0xBFC02B00
-#define MIPS_CLOCK_REG 0x0f000820
+#define MIPS_CLOCK_REG 0x0f000820
/* DDR INIT-133Mhz */
#define T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 12 /* index for 0x0F007000 */
@@ -818,13 +818,13 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
if ((Adapter->chip_id != BCS220_2) &&
(Adapter->chip_id != BCS220_2BC) &&
(Adapter->chip_id != BCS220_3)) {
- retval = rdmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue |= 0x44;
- retval = wrmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
+ retval = wrmalt(Adapter, (UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
@@ -871,7 +871,7 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
case 0xbece0121:
case 0xbece0130:
case 0xbece0300:
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "DDR Setting: %x\n", Adapter->DDRSetting);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "DDR Setting: %x\n", Adapter->DDRSetting);
switch (Adapter->DDRSetting) {
case DDR_80_MHZ:
psDDRSetting = asT3_DDRSetting80MHz;
@@ -933,7 +933,7 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
}
value = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Register Count is =%lu\n", RegCount);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Register Count is =%lu\n", RegCount);
while (RegCount && !retval) {
if (uiClockSetting && psDDRSetting->ulRegAddress == MIPS_CLOCK_REG)
value = uiClockSetting;
@@ -990,12 +990,12 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
* we will change this when we will have internal PMU.
*/
if (Adapter->PmuMode == HYBRID_MODE_7C) {
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
@@ -1006,12 +1006,12 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
@@ -1024,12 +1024,12 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
}
} else if (Adapter->PmuMode == HYBRID_MODE_6) {
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
@@ -1040,12 +1040,12 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
@@ -1094,8 +1094,8 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
break;
- default:
- return -EINVAL;
+ default:
+ return -EINVAL;
}
break;
@@ -1215,7 +1215,7 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
ul_ddr_setting_load_addr += sizeof(ULONG);
if (!retval) {
if (bOverrideSelfRefresh && (psDDRSetting->ulRegAddress == 0x0F007018)) {
- value = (psDDRSetting->ulRegValue |(1<<8));
+ value = (psDDRSetting->ulRegValue | (1<<8));
if (STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr,
&value, sizeof(value))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index 94f32728f7c8..7c04c73e3bc8 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -1,5 +1,5 @@
#include "headers.h"
-
+#include <linux/usb/ch9.h>
static struct usb_device_id InterfaceUsbtable[] = {
{ USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) },
{ USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3B) },
@@ -30,19 +30,22 @@ static void InterfaceAdapterFree(struct bcm_interface_adapter *psIntfAdapter)
int i = 0;
/* Wake up the wait_queue... */
- if (psIntfAdapter->psAdapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ if (psIntfAdapter->psAdapter->LEDInfo.led_thread_running &
+ BCM_LED_THREAD_RUNNING_ACTIVELY) {
psIntfAdapter->psAdapter->DriverState = DRIVER_HALT;
wake_up(&psIntfAdapter->psAdapter->LEDInfo.notify_led_event);
}
reset_card_proc(psIntfAdapter->psAdapter);
/*
- * worst case time taken by the RDM/WRM will be 5 sec. will check after every 100 ms
- * to accertain the device is not being accessed. After this No RDM/WRM should be made.
+ * worst case time taken by the RDM/WRM will be 5 sec. will check after
+ * every 100 ms to accertain the device is not being accessed. After
+ * this No RDM/WRM should be made.
*/
while (psIntfAdapter->psAdapter->DeviceAccess) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Device is being accessed.\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT,
+ DRV_ENTRY, DBG_LVL_ALL,
+ "Device is being accessed.\n");
msleep(100);
}
/* Free interrupt URB */
@@ -71,6 +74,7 @@ static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter
{
u32 ulReg;
int bytes;
+ struct bcm_interface_adapter *interfaceAdapter;
/* Program EP2 MAX_PKT_SIZE */
ulReg = ntohl(EP2_MPS_REG);
@@ -80,7 +84,9 @@ static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter
ulReg = ntohl(EP2_CFG_REG);
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x132, 4, TRUE);
- if (((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter))->bHighSpeedDevice == TRUE) {
+ interfaceAdapter =
+ (struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter);
+ if (interfaceAdapter->bHighSpeedDevice) {
ulReg = ntohl(EP2_CFG_INT);
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x136, 4, TRUE);
} else {
@@ -98,8 +104,8 @@ static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter
/* Program TX EP as interrupt(Alternate Setting) */
bytes = rdmalt(Adapter, 0x0F0110F8, &ulReg, sizeof(u32));
if (bytes < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "reading of Tx EP failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL, "reading of Tx EP failed\n");
return;
}
ulReg |= 0x6;
@@ -150,7 +156,8 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
struct net_device *ndev;
/* Reserve one extra queue for the bit-bucket */
- ndev = alloc_etherdev_mq(sizeof(struct bcm_mini_adapter), NO_OF_QUEUES+1);
+ ndev = alloc_etherdev_mq(sizeof(struct bcm_mini_adapter),
+ NO_OF_QUEUES + 1);
if (ndev == NULL) {
dev_err(&udev->dev, DRV_NAME ": no memory for device\n");
return -ENOMEM;
@@ -169,13 +176,14 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
/*
* Technically, one can start using BCM_DEBUG_PRINT after this point.
- * However, realize that by default the Type/Subtype bitmaps are all zero now;
- * so no prints will actually appear until the TestApp turns on debug paths via
- * the ioctl(); so practically speaking, in early init, no logging happens.
+ * However, realize that by default the Type/Subtype bitmaps are all
+ * zero now; so no prints will actually appear until the TestApp turns
+ * on debug paths via the ioctl(); so practically speaking, in early
+ * init, no logging happens.
*
- * A solution (used below): we explicitly set the bitmaps to 1 for Type=DBG_TYPE_INITEXIT
- * and ALL subtype's of the same. Now all bcm debug statements get logged, enabling debug
- * during early init.
+ * A solution (used below): we explicitly set the bitmaps to 1 for
+ * Type=DBG_TYPE_INITEXIT and ALL subtype's of the same. Now all bcm
+ * debug statements get logged, enabling debug during early init.
* Further, we turn this OFF once init_module() completes.
*/
@@ -191,7 +199,7 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
/* Allocate interface adapter structure */
psIntfAdapter = kzalloc(sizeof(struct bcm_interface_adapter),
- GFP_KERNEL);
+ GFP_KERNEL);
if (psIntfAdapter == NULL) {
AdapterFree(psAdapter);
return -ENOMEM;
@@ -205,7 +213,7 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
usb_set_intfdata(intf, psIntfAdapter);
BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "psIntfAdapter 0x%p\n", psIntfAdapter);
+ "psIntfAdapter 0x%p\n", psIntfAdapter);
retval = InterfaceAdapterInit(psIntfAdapter);
if (retval) {
/* If the Firmware/Cfg File is not present
@@ -213,12 +221,13 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
* download the files.
*/
if (-ENOENT == retval) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "File Not Found. Use app to download.\n");
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "File Not Found. Use app to download.\n");
return STATUS_SUCCESS;
}
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "InterfaceAdapterInit failed.\n");
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL, "InterfaceAdapterInit failed.\n");
usb_set_intfdata(intf, NULL);
udev = interface_to_usbdev(intf);
usb_put_dev(udev);
@@ -228,7 +237,10 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
if (psAdapter->chip_id > T3) {
uint32_t uiNackZeroLengthInt = 4;
- retval = wrmalt(psAdapter, DISABLE_USB_ZERO_LEN_INT, &uiNackZeroLengthInt, sizeof(uiNackZeroLengthInt));
+ retval =
+ wrmalt(psAdapter, DISABLE_USB_ZERO_LEN_INT,
+ &uiNackZeroLengthInt,
+ sizeof(uiNackZeroLengthInt));
if (retval)
return retval;
}
@@ -242,9 +254,11 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
intf->needs_remote_wakeup = 1;
usb_enable_autosuspend(udev);
device_init_wakeup(&intf->dev, 1);
- INIT_WORK(&psIntfAdapter->usbSuspendWork, putUsbSuspend);
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Enabling USB Auto-Suspend\n");
+ INIT_WORK(&psIntfAdapter->usbSuspendWork,
+ putUsbSuspend);
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "Enabling USB Auto-Suspend\n");
#endif
} else {
intf->needs_remote_wakeup = 0;
@@ -271,7 +285,7 @@ static void usbbcm_disconnect(struct usb_interface *intf)
if (psAdapter->bDoSuspend)
intf->needs_remote_wakeup = 0;
- psAdapter->device_removed = TRUE ;
+ psAdapter->device_removed = TRUE;
usb_set_intfdata(intf, NULL);
InterfaceAdapterFree(psIntfAdapter);
usb_put_dev(udev);
@@ -285,8 +299,10 @@ static int AllocUsbCb(struct bcm_interface_adapter *psIntfAdapter)
psIntfAdapter->asUsbTcb[i].urb = usb_alloc_urb(0, GFP_KERNEL);
if (psIntfAdapter->asUsbTcb[i].urb == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "Can't allocate Tx urb for index %d\n", i);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_PRINTK, 0, 0,
+ "Can't allocate Tx urb for index %d\n",
+ i);
return -ENOMEM;
}
}
@@ -295,19 +311,25 @@ static int AllocUsbCb(struct bcm_interface_adapter *psIntfAdapter)
psIntfAdapter->asUsbRcb[i].urb = usb_alloc_urb(0, GFP_KERNEL);
if (psIntfAdapter->asUsbRcb[i].urb == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "Can't allocate Rx urb for index %d\n", i);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_PRINTK, 0, 0,
+ "Can't allocate Rx urb for index %d\n",
+ i);
return -ENOMEM;
}
- psIntfAdapter->asUsbRcb[i].urb->transfer_buffer = kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL);
+ psIntfAdapter->asUsbRcb[i].urb->transfer_buffer =
+ kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL);
if (psIntfAdapter->asUsbRcb[i].urb->transfer_buffer == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "Can't allocate Rx buffer for index %d\n", i);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_PRINTK, 0, 0,
+ "Can't allocate Rx buffer for index %d\n",
+ i);
return -ENOMEM;
}
- psIntfAdapter->asUsbRcb[i].urb->transfer_buffer_length = MAX_DATA_BUFFER_SIZE;
+ psIntfAdapter->asUsbRcb[i].urb->transfer_buffer_length =
+ MAX_DATA_BUFFER_SIZE;
}
return 0;
}
@@ -322,24 +344,29 @@ static int device_run(struct bcm_interface_adapter *psIntfAdapter)
pr_err(DRV_NAME "InitCardAndDownloadFirmware failed.\n");
return status;
}
- if (TRUE == psIntfAdapter->psAdapter->fw_download_done) {
+ if (psIntfAdapter->psAdapter->fw_download_done) {
if (StartInterruptUrb(psIntfAdapter)) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Cannot send interrupt in URB\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "Cannot send interrupt in URB\n");
}
/*
- * now register the cntrl interface.
- * after downloading the f/w waiting for 5 sec to get the mailbox interrupt.
+ * now register the cntrl interface. after downloading the f/w
+ * waiting for 5 sec to get the mailbox interrupt.
*/
psIntfAdapter->psAdapter->waiting_to_fw_download_done = false;
- value = wait_event_timeout(psIntfAdapter->psAdapter->ioctl_fw_dnld_wait_queue,
- psIntfAdapter->psAdapter->waiting_to_fw_download_done, 5*HZ);
+ value = wait_event_timeout(
+ psIntfAdapter->psAdapter->ioctl_fw_dnld_wait_queue,
+ psIntfAdapter->psAdapter->waiting_to_fw_download_done,
+ 5 * HZ);
if (value == 0)
pr_err(DRV_NAME ": Timeout waiting for mailbox interrupt.\n");
- if (register_control_device_interface(psIntfAdapter->psAdapter) < 0) {
+ if (register_control_device_interface(
+ psIntfAdapter->psAdapter) < 0) {
pr_err(DRV_NAME ": Register Control Device failed.\n");
return -EIO;
}
@@ -347,81 +374,6 @@ static int device_run(struct bcm_interface_adapter *psIntfAdapter)
return 0;
}
-
-static inline int bcm_usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
-{
- return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
-}
-
-static inline int bcm_usb_endpoint_type(const struct usb_endpoint_descriptor *epd)
-{
- return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
-}
-
-static inline int bcm_usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
-}
-
-static inline int bcm_usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
-}
-
-static inline int bcm_usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_BULK);
-}
-
-static inline int bcm_usb_endpoint_xfer_control(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_CONTROL);
-}
-
-static inline int bcm_usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_INT);
-}
-
-static inline int bcm_usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_ISOC);
-}
-
-static inline int bcm_usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
-{
- return bcm_usb_endpoint_xfer_bulk(epd) && bcm_usb_endpoint_dir_in(epd);
-}
-
-static inline int bcm_usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
-{
- return bcm_usb_endpoint_xfer_bulk(epd) && bcm_usb_endpoint_dir_out(epd);
-}
-
-static inline int bcm_usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd)
-{
- return bcm_usb_endpoint_xfer_int(epd) && bcm_usb_endpoint_dir_in(epd);
-}
-
-static inline int bcm_usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd)
-{
- return bcm_usb_endpoint_xfer_int(epd) && bcm_usb_endpoint_dir_out(epd);
-}
-
-static inline int bcm_usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd)
-{
- return bcm_usb_endpoint_xfer_isoc(epd) && bcm_usb_endpoint_dir_in(epd);
-}
-
-static inline int bcm_usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd)
-{
- return bcm_usb_endpoint_xfer_isoc(epd) && bcm_usb_endpoint_dir_out(epd);
-}
-
static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
{
struct usb_host_interface *iface_desc;
@@ -429,23 +381,27 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
size_t buffer_size;
unsigned long value;
int retval = 0;
- int usedIntOutForBulkTransfer = 0 ;
+ int usedIntOutForBulkTransfer = 0;
bool bBcm16 = false;
UINT uiData = 0;
int bytes;
/* Store the usb dev into interface adapter */
- psIntfAdapter->udev = usb_get_dev(interface_to_usbdev(psIntfAdapter->interface));
+ psIntfAdapter->udev =
+ usb_get_dev(interface_to_usbdev(psIntfAdapter->interface));
- psIntfAdapter->bHighSpeedDevice = (psIntfAdapter->udev->speed == USB_SPEED_HIGH);
+ psIntfAdapter->bHighSpeedDevice =
+ (psIntfAdapter->udev->speed == USB_SPEED_HIGH);
psIntfAdapter->psAdapter->interface_rdm = BcmRDM;
psIntfAdapter->psAdapter->interface_wrm = BcmWRM;
bytes = rdmalt(psIntfAdapter->psAdapter, CHIP_ID_REG,
- (u32 *)&(psIntfAdapter->psAdapter->chip_id), sizeof(u32));
+ (u32 *) &(psIntfAdapter->psAdapter->chip_id),
+ sizeof(u32));
if (bytes < 0) {
retval = bytes;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "CHIP ID Read Failed\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
+ "CHIP ID Read Failed\n");
return retval;
}
@@ -453,81 +409,119 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
psIntfAdapter->psAdapter->chip_id &= ~0xF0;
dev_info(&psIntfAdapter->udev->dev, "RDM Chip ID 0x%lx\n",
- psIntfAdapter->psAdapter->chip_id);
+ psIntfAdapter->psAdapter->chip_id);
iface_desc = psIntfAdapter->interface->cur_altsetting;
if (psIntfAdapter->psAdapter->chip_id == T3B) {
- /* T3B device will have EEPROM, check if EEPROM is proper and BCM16 can be done or not. */
+ /* T3B device will have EEPROM, check if EEPROM is proper and
+ * BCM16 can be done or not. */
BeceemEEPROMBulkRead(psIntfAdapter->psAdapter, &uiData, 0x0, 4);
if (uiData == BECM)
bBcm16 = TRUE;
- dev_info(&psIntfAdapter->udev->dev, "number of alternate setting %d\n",
- psIntfAdapter->interface->num_altsetting);
+ dev_info(&psIntfAdapter->udev->dev,
+ "number of alternate setting %d\n",
+ psIntfAdapter->interface->num_altsetting);
if (bBcm16 == TRUE) {
- /* selecting alternate setting one as a default setting for High Speed modem. */
+ /* selecting alternate setting one as a default setting
+ * for High Speed modem. */
if (psIntfAdapter->bHighSpeedDevice)
- retval = usb_set_interface(psIntfAdapter->udev, DEFAULT_SETTING_0, ALTERNATE_SETTING_1);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "BCM16 is applicable on this dongle\n");
- if (retval || (psIntfAdapter->bHighSpeedDevice == false)) {
- usedIntOutForBulkTransfer = EP2 ;
+ retval = usb_set_interface(psIntfAdapter->udev,
+ DEFAULT_SETTING_0,
+ ALTERNATE_SETTING_1);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "BCM16 is applicable on this dongle\n");
+ if (retval || !psIntfAdapter->bHighSpeedDevice) {
+ usedIntOutForBulkTransfer = EP2;
endpoint = &iface_desc->endpoint[EP2].desc;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Interface altsetting failed or modem is configured to Full Speed, hence will work on default setting 0\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "Interface altsetting failed or modem is configured to Full Speed, hence will work on default setting 0\n");
/*
- * If Modem is high speed device EP2 should be INT OUT End point
- * If Mode is FS then EP2 should be bulk end point
+ * If Modem is high speed device EP2 should be
+ * INT OUT End point
+ *
+ * If Mode is FS then EP2 should be bulk end
+ * point
*/
- if (((psIntfAdapter->bHighSpeedDevice == TRUE) && (bcm_usb_endpoint_is_int_out(endpoint) == false))
- || ((psIntfAdapter->bHighSpeedDevice == false) && (bcm_usb_endpoint_is_bulk_out(endpoint) == false))) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Configuring the EEPROM\n");
+ if ((psIntfAdapter->bHighSpeedDevice &&
+ !usb_endpoint_is_int_out(endpoint)) ||
+ (!psIntfAdapter->bHighSpeedDevice &&
+ !usb_endpoint_is_bulk_out(endpoint))) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT,
+ DRV_ENTRY, DBG_LVL_ALL,
+ "Configuring the EEPROM\n");
/* change the EP2, EP4 to INT OUT end point */
- ConfigureEndPointTypesThroughEEPROM(psIntfAdapter->psAdapter);
+ ConfigureEndPointTypesThroughEEPROM(
+ psIntfAdapter->psAdapter);
/*
- * It resets the device and if any thing gets changed
- * in USB descriptor it will show fail and re-enumerate
- * the device
+ * It resets the device and if any thing
+ * gets changed in USB descriptor it
+ * will show fail and re-enumerate the
+ * device
*/
- retval = usb_reset_device(psIntfAdapter->udev);
+ retval = usb_reset_device(
+ psIntfAdapter->udev);
if (retval) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "reset failed. Re-enumerating the device.\n");
- return retval ;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT,
+ DRV_ENTRY,
+ DBG_LVL_ALL,
+ "reset failed. Re-enumerating the device.\n");
+ return retval;
}
}
- if ((psIntfAdapter->bHighSpeedDevice == false) && bcm_usb_endpoint_is_bulk_out(endpoint)) {
+ if (!psIntfAdapter->bHighSpeedDevice &&
+ usb_endpoint_is_bulk_out(endpoint)) {
/* Once BULK is selected in FS mode. Revert it back to INT. Else USB_IF will fail. */
UINT _uiData = ntohl(EP2_CFG_INT);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Reverting Bulk to INT as it is in Full Speed mode.\n");
- BeceemEEPROMBulkWrite(psIntfAdapter->psAdapter, (PUCHAR)&_uiData, 0x136, 4, TRUE);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT,
+ DRV_ENTRY, DBG_LVL_ALL,
+ "Reverting Bulk to INT as it is in Full Speed mode.\n");
+ BeceemEEPROMBulkWrite(
+ psIntfAdapter->psAdapter,
+ (PUCHAR) & _uiData,
+ 0x136, 4, TRUE);
}
} else {
- usedIntOutForBulkTransfer = EP4 ;
+ usedIntOutForBulkTransfer = EP4;
endpoint = &iface_desc->endpoint[EP4].desc;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Choosing AltSetting as a default setting.\n");
- if (bcm_usb_endpoint_is_int_out(endpoint) == false) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Dongle does not have BCM16 Fix.\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "Choosing AltSetting as a default setting.\n");
+ if (!usb_endpoint_is_int_out(endpoint)) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT,
+ DRV_ENTRY, DBG_LVL_ALL,
+ "Dongle does not have BCM16 Fix.\n");
/* change the EP2, EP4 to INT OUT end point and use EP4 in altsetting */
- ConfigureEndPointTypesThroughEEPROM(psIntfAdapter->psAdapter);
+ ConfigureEndPointTypesThroughEEPROM(
+ psIntfAdapter->psAdapter);
/*
- * It resets the device and if any thing gets changed in
- * USB descriptor it will show fail and re-enumerate the
+ * It resets the device and if any thing
+ * gets changed in USB descriptor it
+ * will show fail and re-enumerate the
* device
*/
- retval = usb_reset_device(psIntfAdapter->udev);
+ retval = usb_reset_device(
+ psIntfAdapter->udev);
if (retval) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "reset failed. Re-enumerating the device.\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT,
+ DRV_ENTRY,
+ DBG_LVL_ALL,
+ "reset failed. Re-enumerating the device.\n");
return retval;
}
@@ -541,49 +535,69 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
for (value = 0; value < iface_desc->desc.bNumEndpoints; ++value) {
endpoint = &iface_desc->endpoint[value].desc;
- if (!psIntfAdapter->sBulkIn.bulk_in_endpointAddr && bcm_usb_endpoint_is_bulk_in(endpoint)) {
+ if (!psIntfAdapter->sBulkIn.bulk_in_endpointAddr &&
+ usb_endpoint_is_bulk_in(endpoint)) {
buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
psIntfAdapter->sBulkIn.bulk_in_size = buffer_size;
- psIntfAdapter->sBulkIn.bulk_in_endpointAddr = endpoint->bEndpointAddress;
- psIntfAdapter->sBulkIn.bulk_in_pipe =
- usb_rcvbulkpipe(psIntfAdapter->udev,
- psIntfAdapter->sBulkIn.bulk_in_endpointAddr);
+ psIntfAdapter->sBulkIn.bulk_in_endpointAddr =
+ endpoint->bEndpointAddress;
+ psIntfAdapter->sBulkIn.bulk_in_pipe = usb_rcvbulkpipe(
+ psIntfAdapter->udev,
+ psIntfAdapter->sBulkIn.bulk_in_endpointAddr);
}
- if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr && bcm_usb_endpoint_is_bulk_out(endpoint)) {
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr = endpoint->bEndpointAddress;
- psIntfAdapter->sBulkOut.bulk_out_pipe =
- usb_sndbulkpipe(psIntfAdapter->udev,
+ if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr &&
+ usb_endpoint_is_bulk_out(endpoint)) {
+ psIntfAdapter->sBulkOut.bulk_out_endpointAddr =
+ endpoint->bEndpointAddress;
+ psIntfAdapter->sBulkOut.bulk_out_pipe = usb_sndbulkpipe(
+ psIntfAdapter->udev,
psIntfAdapter->sBulkOut.bulk_out_endpointAddr);
}
- if (!psIntfAdapter->sIntrIn.int_in_endpointAddr && bcm_usb_endpoint_is_int_in(endpoint)) {
+ if (!psIntfAdapter->sIntrIn.int_in_endpointAddr &&
+ usb_endpoint_is_int_in(endpoint)) {
buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
psIntfAdapter->sIntrIn.int_in_size = buffer_size;
- psIntfAdapter->sIntrIn.int_in_endpointAddr = endpoint->bEndpointAddress;
- psIntfAdapter->sIntrIn.int_in_interval = endpoint->bInterval;
+ psIntfAdapter->sIntrIn.int_in_endpointAddr =
+ endpoint->bEndpointAddress;
+ psIntfAdapter->sIntrIn.int_in_interval =
+ endpoint->bInterval;
psIntfAdapter->sIntrIn.int_in_buffer =
- kmalloc(buffer_size, GFP_KERNEL);
+ kmalloc(buffer_size, GFP_KERNEL);
if (!psIntfAdapter->sIntrIn.int_in_buffer)
return -EINVAL;
}
- if (!psIntfAdapter->sIntrOut.int_out_endpointAddr && bcm_usb_endpoint_is_int_out(endpoint)) {
+ if (!psIntfAdapter->sIntrOut.int_out_endpointAddr &&
+ usb_endpoint_is_int_out(endpoint)) {
if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr &&
- (psIntfAdapter->psAdapter->chip_id == T3B) && (value == usedIntOutForBulkTransfer)) {
+ (psIntfAdapter->psAdapter->chip_id == T3B) &&
+ (value == usedIntOutForBulkTransfer)) {
/* use first intout end point as a bulk out end point */
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sBulkOut.bulk_out_size = buffer_size;
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr = endpoint->bEndpointAddress;
- psIntfAdapter->sBulkOut.bulk_out_pipe = usb_sndintpipe(psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr);
- psIntfAdapter->sBulkOut.int_out_interval = endpoint->bInterval;
+ buffer_size =
+ le16_to_cpu(endpoint->wMaxPacketSize);
+ psIntfAdapter->sBulkOut.bulk_out_size =
+ buffer_size;
+ psIntfAdapter->sBulkOut.bulk_out_endpointAddr =
+ endpoint->bEndpointAddress;
+ psIntfAdapter->sBulkOut.bulk_out_pipe =
+ usb_sndintpipe(psIntfAdapter->udev,
+ psIntfAdapter->sBulkOut
+ .bulk_out_endpointAddr);
+ psIntfAdapter->sBulkOut.int_out_interval =
+ endpoint->bInterval;
} else if (value == EP6) {
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sIntrOut.int_out_size = buffer_size;
- psIntfAdapter->sIntrOut.int_out_endpointAddr = endpoint->bEndpointAddress;
- psIntfAdapter->sIntrOut.int_out_interval = endpoint->bInterval;
- psIntfAdapter->sIntrOut.int_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
+ buffer_size =
+ le16_to_cpu(endpoint->wMaxPacketSize);
+ psIntfAdapter->sIntrOut.int_out_size =
+ buffer_size;
+ psIntfAdapter->sIntrOut.int_out_endpointAddr =
+ endpoint->bEndpointAddress;
+ psIntfAdapter->sIntrOut.int_out_interval =
+ endpoint->bInterval;
+ psIntfAdapter->sIntrOut.int_out_buffer =
+ kmalloc(buffer_size, GFP_KERNEL);
if (!psIntfAdapter->sIntrOut.int_out_buffer)
return -EINVAL;
}
@@ -594,14 +608,14 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
psIntfAdapter->psAdapter->bcm_file_download = InterfaceFileDownload;
psIntfAdapter->psAdapter->bcm_file_readback_from_chip =
- InterfaceFileReadbackFromChip;
+ InterfaceFileReadbackFromChip;
psIntfAdapter->psAdapter->interface_transmit = InterfaceTransmitPacket;
retval = CreateInterruptUrb(psIntfAdapter);
if (retval) {
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "Cannot create interrupt urb\n");
+ "Cannot create interrupt urb\n");
return retval;
}
@@ -618,17 +632,21 @@ static int InterfaceSuspend(struct usb_interface *intf, pm_message_t message)
psIntfAdapter->bSuspended = TRUE;
- if (TRUE == psIntfAdapter->bPreparingForBusSuspend) {
+ if (psIntfAdapter->bPreparingForBusSuspend) {
psIntfAdapter->bPreparingForBusSuspend = false;
if (psIntfAdapter->psAdapter->LinkStatus == LINKUP_DONE) {
- psIntfAdapter->psAdapter->IdleMode = TRUE ;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Host Entered in PMU Idle Mode.\n");
+ psIntfAdapter->psAdapter->IdleMode = TRUE;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "Host Entered in PMU Idle Mode.\n");
} else {
psIntfAdapter->psAdapter->bShutStatus = TRUE;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Host Entered in PMU Shutdown Mode.\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "Host Entered in PMU Shutdown Mode.\n");
}
}
psIntfAdapter->psAdapter->bPreparingForLowPowerMode = false;
diff --git a/drivers/staging/bcm/InterfaceIsr.c b/drivers/staging/bcm/InterfaceIsr.c
index 7b39f4f5f1ab..b9f8a7aa24fe 100644
--- a/drivers/staging/bcm/InterfaceIsr.c
+++ b/drivers/staging/bcm/InterfaceIsr.c
@@ -4,108 +4,126 @@
static void read_int_callback(struct urb *urb/*, struct pt_regs *regs*/)
{
int status = urb->status;
- struct bcm_interface_adapter *psIntfAdapter = (struct bcm_interface_adapter *)urb->context;
- struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter ;
+ struct bcm_interface_adapter *psIntfAdapter =
+ (struct bcm_interface_adapter *)urb->context;
+ struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter;
if (netif_msg_intr(Adapter))
pr_info(PFX "%s: interrupt status %d\n",
- Adapter->dev->name, status);
+ Adapter->dev->name, status);
- if(Adapter->device_removed == TRUE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Device has Got Removed.");
- return ;
+ if (Adapter->device_removed) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL, "Device has Got Removed.");
+ return;
}
- if(((Adapter->bPreparingForLowPowerMode == TRUE) && (Adapter->bDoSuspend == TRUE)) ||
- psIntfAdapter->bSuspended ||
- psIntfAdapter->bPreparingForBusSuspend)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Interrupt call back is called while suspending the device");
- return ;
+ if ((Adapter->bPreparingForLowPowerMode && Adapter->bDoSuspend) ||
+ psIntfAdapter->bSuspended ||
+ psIntfAdapter->bPreparingForBusSuspend) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL,
+ "Interrupt call back is called while suspending the device");
+ return;
}
- //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "interrupt urb status %d", status);
switch (status) {
- /* success */
- case STATUS_SUCCESS:
- if ( urb->actual_length )
- {
-
- if(psIntfAdapter->ulInterruptData[1] & 0xFF)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL, "Got USIM interrupt");
+ /* success */
+ case STATUS_SUCCESS:
+ if (urb->actual_length) {
+
+ if (psIntfAdapter->ulInterruptData[1] & 0xFF) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ INTF_INIT, DBG_LVL_ALL,
+ "Got USIM interrupt");
}
- if(psIntfAdapter->ulInterruptData[1] & 0xFF00)
- {
+ if (psIntfAdapter->ulInterruptData[1] & 0xFF00) {
atomic_set(&Adapter->CurrNumFreeTxDesc,
- (psIntfAdapter->ulInterruptData[1] & 0xFF00) >> 8);
- atomic_set (&Adapter->uiMBupdate, TRUE);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL, "TX mailbox contains %d",
+ (psIntfAdapter->ulInterruptData[1] &
+ 0xFF00) >> 8);
+ atomic_set(&Adapter->uiMBupdate, TRUE);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ INTF_INIT, DBG_LVL_ALL,
+ "TX mailbox contains %d",
atomic_read(&Adapter->CurrNumFreeTxDesc));
}
- if(psIntfAdapter->ulInterruptData[1] >> 16)
- {
- Adapter->CurrNumRecvDescs=
+ if (psIntfAdapter->ulInterruptData[1] >> 16) {
+ Adapter->CurrNumRecvDescs =
(psIntfAdapter->ulInterruptData[1] >> 16);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"RX mailbox contains %d",
- Adapter->CurrNumRecvDescs);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ INTF_INIT, DBG_LVL_ALL,
+ "RX mailbox contains %d",
+ Adapter->CurrNumRecvDescs);
InterfaceRx(psIntfAdapter);
}
- if(Adapter->fw_download_done &&
+ if (Adapter->fw_download_done &&
!Adapter->downloadDDR &&
- atomic_read(&Adapter->CurrNumFreeTxDesc))
- {
- psIntfAdapter->psAdapter->downloadDDR +=1;
+ atomic_read(&Adapter->CurrNumFreeTxDesc)) {
+
+ psIntfAdapter->psAdapter->downloadDDR += 1;
wake_up(&Adapter->tx_packet_wait_queue);
}
- if(false == Adapter->waiting_to_fw_download_done)
- {
+ if (!Adapter->waiting_to_fw_download_done) {
Adapter->waiting_to_fw_download_done = TRUE;
wake_up(&Adapter->ioctl_fw_dnld_wait_queue);
}
- if(!atomic_read(&Adapter->TxPktAvail))
- {
+ if (!atomic_read(&Adapter->TxPktAvail)) {
atomic_set(&Adapter->TxPktAvail, 1);
wake_up(&Adapter->tx_packet_wait_queue);
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Firing interrupt in URB");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL, "Firing interrupt in URB");
}
break;
- case -ENOENT :
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"URB has got disconnected ....");
- return ;
- }
- case -EINPROGRESS:
- {
- //This situation may happened when URBunlink is used. for detail check usb_unlink_urb documentation.
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Impossibe condition has occurred... something very bad is going on");
- break ;
- //return;
- }
- case -EPIPE:
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Interrupt IN endPoint has got halted/stalled...need to clear this");
- Adapter->bEndPointHalted = TRUE ;
- wake_up(&Adapter->tx_packet_wait_queue);
- urb->status = STATUS_SUCCESS ;
- return;
- }
- /* software-driven interface shutdown */
- case -ECONNRESET: //URB got unlinked.
- case -ESHUTDOWN: // hardware gone. this is the serious problem.
- //Occurs only when something happens with the host controller device
- case -ENODEV : //Device got removed
- case -EINVAL : //Some thing very bad happened with the URB. No description is available.
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"interrupt urb error %d", status);
- urb->status = STATUS_SUCCESS ;
- break ;
- //return;
- default:
- //This is required to check what is the defaults conditions when it occurs..
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"GOT DEFAULT INTERRUPT URB STATUS :%d..Please Analyze it...", status);
+ case -ENOENT:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL, "URB has got disconnected....");
+ return;
+ case -EINPROGRESS:
+ /*
+ * This situation may happened when URBunlink is used. for
+ * detail check usb_unlink_urb documentation.
+ */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL,
+ "Impossibe condition has occurred... something very bad is going on");
+ break;
+ /* return; */
+ case -EPIPE:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL,
+ "Interrupt IN endPoint has got halted/stalled...need to clear this");
+ Adapter->bEndPointHalted = TRUE;
+ wake_up(&Adapter->tx_packet_wait_queue);
+ urb->status = STATUS_SUCCESS;
+ return;
+ /* software-driven interface shutdown */
+ case -ECONNRESET: /* URB got unlinked */
+ case -ESHUTDOWN: /* hardware gone. this is the serious problem */
+ /*
+ * Occurs only when something happens with the
+ * host controller device
+ */
+ case -ENODEV: /* Device got removed */
+ case -EINVAL:
+ /*
+ * Some thing very bad happened with the URB. No
+ * description is available.
+ */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL, "interrupt urb error %d", status);
+ urb->status = STATUS_SUCCESS;
+ break;
+ /* return; */
+ default:
+ /*
+ * This is required to check what is the defaults conditions
+ * when it occurs..
+ */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,
+ "GOT DEFAULT INTERRUPT URB STATUS :%d..Please Analyze it...",
+ status);
break;
}
@@ -117,28 +135,30 @@ static void read_int_callback(struct urb *urb/*, struct pt_regs *regs*/)
int CreateInterruptUrb(struct bcm_interface_adapter *psIntfAdapter)
{
psIntfAdapter->psInterruptUrb = usb_alloc_urb(0, GFP_KERNEL);
- if (!psIntfAdapter->psInterruptUrb)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Cannot allocate interrupt urb");
+ if (!psIntfAdapter->psInterruptUrb) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS,
+ INTF_INIT, DBG_LVL_ALL,
+ "Cannot allocate interrupt urb");
return -ENOMEM;
}
psIntfAdapter->psInterruptUrb->transfer_buffer =
- psIntfAdapter->ulInterruptData;
+ psIntfAdapter->ulInterruptData;
psIntfAdapter->psInterruptUrb->transfer_buffer_length =
- sizeof(psIntfAdapter->ulInterruptData);
+ sizeof(psIntfAdapter->ulInterruptData);
psIntfAdapter->sIntrIn.int_in_pipe = usb_rcvintpipe(psIntfAdapter->udev,
- psIntfAdapter->sIntrIn.int_in_endpointAddr);
+ psIntfAdapter->sIntrIn.int_in_endpointAddr);
usb_fill_int_urb(psIntfAdapter->psInterruptUrb, psIntfAdapter->udev,
- psIntfAdapter->sIntrIn.int_in_pipe,
- psIntfAdapter->psInterruptUrb->transfer_buffer,
- psIntfAdapter->psInterruptUrb->transfer_buffer_length,
- read_int_callback, psIntfAdapter,
- psIntfAdapter->sIntrIn.int_in_interval);
-
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Interrupt Interval: %d\n",
- psIntfAdapter->sIntrIn.int_in_interval);
+ psIntfAdapter->sIntrIn.int_in_pipe,
+ psIntfAdapter->psInterruptUrb->transfer_buffer,
+ psIntfAdapter->psInterruptUrb->transfer_buffer_length,
+ read_int_callback, psIntfAdapter,
+ psIntfAdapter->sIntrIn.int_in_interval);
+
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL, "Interrupt Interval: %d\n",
+ psIntfAdapter->sIntrIn.int_in_interval);
return 0;
}
@@ -147,19 +167,20 @@ INT StartInterruptUrb(struct bcm_interface_adapter *psIntfAdapter)
{
INT status = 0;
- if( false == psIntfAdapter->psAdapter->device_removed &&
- false == psIntfAdapter->psAdapter->bEndPointHalted &&
- false == psIntfAdapter->bSuspended &&
- false == psIntfAdapter->bPreparingForBusSuspend &&
- false == psIntfAdapter->psAdapter->StopAllXaction)
- {
- status = usb_submit_urb(psIntfAdapter->psInterruptUrb, GFP_ATOMIC);
- if (status)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Cannot send int urb %d\n", status);
- if(status == -EPIPE)
- {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE ;
+ if (!(psIntfAdapter->psAdapter->device_removed ||
+ psIntfAdapter->psAdapter->bEndPointHalted ||
+ psIntfAdapter->bSuspended ||
+ psIntfAdapter->bPreparingForBusSuspend ||
+ psIntfAdapter->psAdapter->StopAllXaction)) {
+ status =
+ usb_submit_urb(psIntfAdapter->psInterruptUrb, GFP_ATOMIC);
+ if (status) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,
+ "Cannot send inturb %d\n", status);
+ if (status == -EPIPE) {
+ psIntfAdapter->psAdapter->bEndPointHalted =
+ TRUE;
wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
}
}
diff --git a/drivers/staging/bcm/Kconfig b/drivers/staging/bcm/Kconfig
index 83c9752504d4..8acf4b24a7c9 100644
--- a/drivers/staging/bcm/Kconfig
+++ b/drivers/staging/bcm/Kconfig
@@ -1,7 +1,6 @@
config BCM_WIMAX
tristate "Beceem BCS200/BCS220-3 and BCSM250 wimax support"
depends on USB && NET
- default N
help
This is an experimental driver for the Beceem WIMAX chipset used
by Sprint 4G.
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
index 0727599bf5fa..4f315835ddfc 100644
--- a/drivers/staging/bcm/Qos.c
+++ b/drivers/staging/bcm/Qos.c
@@ -222,10 +222,7 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
//Checking classifier validity
if (!pstClassifierRule->bUsed || pstClassifierRule->ucDirection == DOWNLINK_DIR)
- {
- bClassificationSucceed = false;
break;
- }
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "is IPv6 check!");
if (pstClassifierRule->bIpv6Protocol)
@@ -233,51 +230,47 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
//**************Checking IP header parameter**************************//
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Trying to match Source IP Address");
- if (false == (bClassificationSucceed =
- MatchSrcIpAddress(pstClassifierRule, iphd->saddr)))
+ if (!MatchSrcIpAddress(pstClassifierRule, iphd->saddr))
break;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source IP Address Matched");
- if (false == (bClassificationSucceed =
- MatchDestIpAddress(pstClassifierRule, iphd->daddr)))
+ if (!MatchDestIpAddress(pstClassifierRule, iphd->daddr))
break;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination IP Address Matched");
- if (false == (bClassificationSucceed =
- MatchTos(pstClassifierRule, iphd->tos)))
- {
+ if (!MatchTos(pstClassifierRule, iphd->tos)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Match failed\n");
break;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Matched");
- if (false == (bClassificationSucceed =
- MatchProtocol(pstClassifierRule, iphd->protocol)))
+ if (!MatchProtocol(pstClassifierRule, iphd->protocol))
break;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol Matched");
//if protocol is not TCP or UDP then no need of comparing source port and destination port
- if (iphd->protocol != TCP && iphd->protocol != UDP)
+ if (iphd->protocol != TCP && iphd->protocol != UDP) {
+ bClassificationSucceed = TRUE;
break;
+ }
//******************Checking Transport Layer Header field if present *****************//
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source Port %04x",
(iphd->protocol == UDP) ? xprt_hdr->uhdr.source : xprt_hdr->thdr.source);
- if (false == (bClassificationSucceed =
- MatchSrcPort(pstClassifierRule,
- ntohs((iphd->protocol == UDP) ?
- xprt_hdr->uhdr.source : xprt_hdr->thdr.source))))
+ if (!MatchSrcPort(pstClassifierRule,
+ ntohs((iphd->protocol == UDP) ?
+ xprt_hdr->uhdr.source : xprt_hdr->thdr.source)))
break;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Port Matched");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Port %04x",
(iphd->protocol == UDP) ? xprt_hdr->uhdr.dest :
xprt_hdr->thdr.dest);
- if (false == (bClassificationSucceed =
- MatchDestPort(pstClassifierRule,
- ntohs((iphd->protocol == UDP) ?
- xprt_hdr->uhdr.dest : xprt_hdr->thdr.dest))))
+ if (!MatchDestPort(pstClassifierRule,
+ ntohs((iphd->protocol == UDP) ?
+ xprt_hdr->uhdr.dest : xprt_hdr->thdr.dest)))
break;
+ bClassificationSucceed = TRUE;
} while (0);
if (TRUE == bClassificationSucceed)
diff --git a/drivers/staging/bcm/Typedefs.h b/drivers/staging/bcm/Typedefs.h
index 832adcfd1e3a..90b3b25dd606 100644
--- a/drivers/staging/bcm/Typedefs.h
+++ b/drivers/staging/bcm/Typedefs.h
@@ -25,16 +25,16 @@ typedef unsigned int B_UINT32;
typedef unsigned long ULONG;
typedef unsigned long DWORD;
-typedef char* PCHAR;
-typedef short* PSHORT;
-typedef int* PINT;
-typedef long* PLONG;
-typedef void* PVOID;
-
-typedef unsigned char* PUCHAR;
-typedef unsigned short* PUSHORT;
-typedef unsigned int* PUINT;
-typedef unsigned long* PULONG;
+typedef char *PCHAR;
+typedef short *PSHORT;
+typedef int *PINT;
+typedef long *PLONG;
+typedef void *PVOID;
+
+typedef unsigned char *PUCHAR;
+typedef unsigned short *PUSHORT;
+typedef unsigned int *PUINT;
+typedef unsigned long *PULONG;
typedef unsigned long long ULONG64;
typedef unsigned long long LARGE_INTEGER;
typedef unsigned int UINT32;
@@ -43,5 +43,5 @@ typedef unsigned int UINT32;
#endif
-#endif //__TYPEDEFS_H__
+#endif /* __TYPEDEFS_H__ */
diff --git a/drivers/staging/bcm/headers.h b/drivers/staging/bcm/headers.h
index 7fd21c6923cb..6f3270cc4176 100644
--- a/drivers/staging/bcm/headers.h
+++ b/drivers/staging/bcm/headers.h
@@ -34,7 +34,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/usb.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/ip.h>
#include "Typedefs.h"
diff --git a/drivers/staging/bcm/hostmibs.c b/drivers/staging/bcm/hostmibs.c
index f55300db1d48..39ace5510c43 100644
--- a/drivers/staging/bcm/hostmibs.c
+++ b/drivers/staging/bcm/hostmibs.c
@@ -27,9 +27,9 @@ INT ProcessGetHostMibs(struct bcm_mini_adapter *Adapter, struct bcm_host_stats_m
/* Copy the classifier Table */
for (nClassifierIndex = 0; nClassifierIndex < MAX_CLASSIFIERS; nClassifierIndex++) {
if (Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
- memcpy((PVOID) & pstHostMibs->
+ memcpy((PVOID) &pstHostMibs->
astClassifierTable[nClassifierIndex],
- (PVOID) & Adapter->
+ (PVOID) &Adapter->
astClassifierTable[nClassifierIndex],
sizeof(struct bcm_mibs_classifier_rule));
}
@@ -37,8 +37,8 @@ INT ProcessGetHostMibs(struct bcm_mini_adapter *Adapter, struct bcm_host_stats_m
/* Copy the SF Table */
for (nSfIndex = 0; nSfIndex < NO_OF_QUEUES; nSfIndex++) {
if (Adapter->PackInfo[nSfIndex].bValid) {
- memcpy((PVOID) & pstHostMibs->astSFtable[nSfIndex],
- (PVOID) & Adapter->PackInfo[nSfIndex],
+ memcpy((PVOID) &pstHostMibs->astSFtable[nSfIndex],
+ (PVOID) &Adapter->PackInfo[nSfIndex],
sizeof(struct bcm_mibs_table));
} else {
/* If index in not valid,
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index fca164f51f4b..63be3be62ebd 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -122,7 +122,7 @@ static UCHAR ReadEEPROMStatusRegister(struct bcm_mini_adapter *Adapter)
* OSAL_STATUS_CODE:
*/
-int ReadBeceemEEPROMBulk(struct bcm_mini_adapter *Adapter,
+static int ReadBeceemEEPROMBulk(struct bcm_mini_adapter *Adapter,
DWORD dwAddress,
DWORD *pdwData,
DWORD dwNumWords)
@@ -2698,7 +2698,7 @@ int BcmGetSectionValStartOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash
* On Failure -returns STATUS_FAILURE
*/
-int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal)
+static int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal)
{
int SectEndOffset = 0;
@@ -2980,7 +2980,7 @@ static int BcmGetActiveISO(struct bcm_mini_adapter *Adapter)
*
*/
-B_UINT8 IsOffsetWritable(struct bcm_mini_adapter *Adapter, unsigned int uiOffset)
+static B_UINT8 IsOffsetWritable(struct bcm_mini_adapter *Adapter, unsigned int uiOffset)
{
unsigned int uiSectorNum = 0;
unsigned int uiWordOfSectorPermission = 0;
@@ -3374,7 +3374,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
case DSD2:
if (ReadDSDSignature(Adapter, eFlash2xSectVal) == DSD_IMAGE_MAGIC_NUMBER) {
HighestPriDSD = getHighestPriDSD(Adapter);
- if ((HighestPriDSD == eFlash2xSectVal)) {
+ if (HighestPriDSD == eFlash2xSectVal) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given DSD<%x> already has highest priority", eFlash2xSectVal);
Status = STATUS_SUCCESS;
break;
@@ -3402,7 +3402,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
HighestPriDSD = getHighestPriDSD(Adapter);
- if ((HighestPriDSD == eFlash2xSectVal)) {
+ if (HighestPriDSD == eFlash2xSectVal) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Made the DSD: %x highest by reducing priority of other\n", eFlash2xSectVal);
Status = STATUS_SUCCESS;
break;
@@ -3421,7 +3421,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
}
HighestPriDSD = getHighestPriDSD(Adapter);
- if ((HighestPriDSD == eFlash2xSectVal)) {
+ if (HighestPriDSD == eFlash2xSectVal) {
Status = STATUS_SUCCESS;
break;
}
@@ -4074,7 +4074,7 @@ int BcmCopySection(struct bcm_mini_adapter *Adapter,
* Faillure :- Return negative error code
*/
-int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned int uiOffset)
+static int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned int uiOffset)
{
unsigned int offsetToProtect = 0, HeaderSizeToProtect = 0;
bool bHasHeader = false;
@@ -4213,7 +4213,7 @@ static int BcmDoChipSelect(struct bcm_mini_adapter *Adapter, unsigned int offset
return STATUS_SUCCESS;
}
-int ReadDSDSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd)
+static int ReadDSDSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd)
{
unsigned int uiDSDsig = 0;
/* unsigned int sigoffsetInMap = 0;
@@ -4238,7 +4238,7 @@ int ReadDSDSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_
return uiDSDsig;
}
-int ReadDSDPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd)
+static int ReadDSDPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd)
{
/* unsigned int priOffsetInMap = 0 ; */
unsigned int uiDSDPri = STATUS_FAILURE;
@@ -4261,7 +4261,7 @@ int ReadDSDPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_v
return uiDSDPri;
}
-enum bcm_flash2x_section_val getHighestPriDSD(struct bcm_mini_adapter *Adapter)
+static enum bcm_flash2x_section_val getHighestPriDSD(struct bcm_mini_adapter *Adapter)
{
int DSDHighestPri = STATUS_FAILURE;
int DsdPri = 0;
@@ -4293,7 +4293,7 @@ enum bcm_flash2x_section_val getHighestPriDSD(struct bcm_mini_adapter *Adapter)
return HighestPriDSD;
}
-int ReadISOSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso)
+static int ReadISOSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso)
{
unsigned int uiISOsig = 0;
/* unsigned int sigoffsetInMap = 0;
@@ -4316,7 +4316,7 @@ int ReadISOSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_
return uiISOsig;
}
-int ReadISOPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso)
+static int ReadISOPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso)
{
unsigned int ISOPri = STATUS_FAILURE;
if (IsSectionWritable(Adapter, iso)) {
@@ -4335,7 +4335,7 @@ int ReadISOPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_v
return ISOPri;
}
-enum bcm_flash2x_section_val getHighestPriISO(struct bcm_mini_adapter *Adapter)
+static enum bcm_flash2x_section_val getHighestPriISO(struct bcm_mini_adapter *Adapter)
{
int ISOHighestPri = STATUS_FAILURE;
int ISOPri = 0;
@@ -4359,7 +4359,7 @@ enum bcm_flash2x_section_val getHighestPriISO(struct bcm_mini_adapter *Adapter)
return HighestPriISO;
}
-int WriteToFlashWithoutSectorErase(struct bcm_mini_adapter *Adapter,
+static int WriteToFlashWithoutSectorErase(struct bcm_mini_adapter *Adapter,
PUINT pBuff,
enum bcm_flash2x_section_val eFlash2xSectionVal,
unsigned int uiOffset,
@@ -4472,7 +4472,7 @@ bool IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_se
return SectionPresent;
}
-int IsSectionWritable(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val Section)
+static int IsSectionWritable(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val Section)
{
int offset = STATUS_FAILURE;
int Status = false;
diff --git a/drivers/staging/ced1401/ced_ioc.c b/drivers/staging/ced1401/ced_ioc.c
index bf532b1bd345..ebbc5090f219 100644
--- a/drivers/staging/ced1401/ced_ioc.c
+++ b/drivers/staging/ced1401/ced_ioc.c
@@ -38,8 +38,8 @@
****************************************************************************/
static void FlushOutBuff(DEVICE_EXTENSION *pdx)
{
- dev_dbg(&pdx->interface->dev, "%s currentState=%d", __func__,
- pdx->sCurrentState);
+ dev_dbg(&pdx->interface->dev, "%s: currentState=%d\n",
+ __func__, pdx->sCurrentState);
if (pdx->sCurrentState == U14ERR_TIME) /* Do nothing if hardware in trouble */
return;
/* Kill off any pending I/O */
@@ -59,8 +59,8 @@ static void FlushOutBuff(DEVICE_EXTENSION *pdx)
****************************************************************************/
static void FlushInBuff(DEVICE_EXTENSION *pdx)
{
- dev_dbg(&pdx->interface->dev, "%s currentState=%d", __func__,
- pdx->sCurrentState);
+ dev_dbg(&pdx->interface->dev, "%s: currentState=%d\n",
+ __func__, pdx->sCurrentState);
if (pdx->sCurrentState == U14ERR_TIME) /* Do nothing if hardware in trouble */
return;
/* Kill off any pending I/O */
@@ -118,8 +118,8 @@ int SendString(DEVICE_EXTENSION *pdx, const char __user *pData,
mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
if (n > 0) { /* do nothing if nowt to do! */
- dev_dbg(&pdx->interface->dev, "%s n=%d>%s<", __func__, n,
- buffer);
+ dev_dbg(&pdx->interface->dev, "%s: n=%d>%s<\n",
+ __func__, n, buffer);
iReturn = PutChars(pdx, buffer, n);
}
@@ -139,7 +139,7 @@ int SendChar(DEVICE_EXTENSION *pdx, char c)
int iReturn;
mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
iReturn = PutChars(pdx, &c, 1);
- dev_dbg(&pdx->interface->dev, "SendChar >%c< (0x%02x)", c, c);
+ dev_dbg(&pdx->interface->dev, "SendChar >%c< (0x%02x)\n", c, c);
Allowi(pdx); /* Make sure char reads are running */
mutex_unlock(&pdx->io_mutex);
return iReturn;
@@ -174,7 +174,7 @@ int SendChar(DEVICE_EXTENSION *pdx, char c)
int Get1401State(DEVICE_EXTENSION *pdx, __u32 *state, __u32 *error)
{
int nGot;
- dev_dbg(&pdx->interface->dev, "Get1401State() entry");
+ dev_dbg(&pdx->interface->dev, "%s: entry\n", __func__);
*state = 0xFFFFFFFF; /* Start off with invalid state */
nGot = usb_control_msg(pdx->udev, usb_rcvctrlpipe(pdx->udev, 0),
@@ -182,15 +182,15 @@ int Get1401State(DEVICE_EXTENSION *pdx, __u32 *state, __u32 *error)
pdx->statBuf, sizeof(pdx->statBuf), HZ);
if (nGot != sizeof(pdx->statBuf)) {
dev_err(&pdx->interface->dev,
- "Get1401State() FAILED, return code %d", nGot);
+ "%s: FAILED, return code %d\n", __func__, nGot);
pdx->sCurrentState = U14ERR_TIME; /* Indicate that things are very wrong indeed */
*state = 0; /* Force status values to a known state */
*error = 0;
} else {
int nDevice;
dev_dbg(&pdx->interface->dev,
- "Get1401State() Success, state: 0x%x, 0x%x",
- pdx->statBuf[0], pdx->statBuf[1]);
+ "%s: Success, state: 0x%x, 0x%x\n",
+ __func__, pdx->statBuf[0], pdx->statBuf[1]);
*state = pdx->statBuf[0]; /* Return the state values to the calling code */
*error = pdx->statBuf[1];
@@ -220,8 +220,8 @@ int Get1401State(DEVICE_EXTENSION *pdx, __u32 *state, __u32 *error)
****************************************************************************/
int ReadWrite_Cancel(DEVICE_EXTENSION *pdx)
{
- dev_dbg(&pdx->interface->dev, "ReadWrite_Cancel entry %d",
- pdx->bStagedUrbPending);
+ dev_dbg(&pdx->interface->dev, "%s: entry %d\n",
+ __func__, pdx->bStagedUrbPending);
#ifdef NOT_WRITTEN_YET
int ntStatus = STATUS_SUCCESS;
bool bResult = false;
@@ -231,7 +231,7 @@ int ReadWrite_Cancel(DEVICE_EXTENSION *pdx)
if (pdx->bStagedUrbPending) { /* anything to be cancelled? May need more... */
dev_info(&pdx->interface - dev,
- "ReadWrite_Cancel about to cancel Urb");
+ "ReadWrite_Cancel about to cancel Urb\n");
/* Clear the staging done flag */
/* KeClearEvent(&pdx->StagingDoneEvent); */
USB_ASSERT(pdx->pStagedIrp != NULL);
@@ -244,14 +244,14 @@ int ReadWrite_Cancel(DEVICE_EXTENSION *pdx)
LARGE_INTEGER timeout;
timeout.QuadPart = -10000000; /* Use a timeout of 1 second */
dev_info(&pdx->interface - dev,
- "ReadWrite_Cancel about to wait till done");
+ "%s: about to wait till done\n", __func__);
ntStatus =
KeWaitForSingleObject(&pdx->StagingDoneEvent,
Executive, KernelMode, FALSE,
&timeout);
} else {
dev_info(&pdx->interface - dev,
- "ReadWrite_Cancel, cancellation failed");
+ "%s: cancellation failed\n", __func__);
ntStatus = U14ERR_FAIL;
}
USB_KdPrint(DBGLVL_DEFAULT,
@@ -260,7 +260,7 @@ int ReadWrite_Cancel(DEVICE_EXTENSION *pdx)
} else
spin_unlock_irq(&pdx->stagedLock);
- dev_info(&pdx->interface - dev, "ReadWrite_Cancel done");
+ dev_info(&pdx->interface - dev, "%s: done\n", __func__);
return ntStatus;
#else
return U14ERR_NOERROR;
@@ -304,7 +304,7 @@ static int InSelfTest(DEVICE_EXTENSION *pdx, unsigned int *pState)
bool Is1401(DEVICE_EXTENSION *pdx)
{
int iReturn;
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
ced_draw_down(pdx); /* wait for, then kill outstanding Urbs */
FlushInBuff(pdx); /* Clear out input buffer & pipe */
@@ -368,7 +368,7 @@ bool QuickCheck(DEVICE_EXTENSION *pdx, bool bTestBuff, bool bCanReset)
(pdx->sCurrentState >= U14ERR_STD)); /* No 1401 errors stored */
dev_dbg(&pdx->interface->dev,
- "%s DMAFlag:%d, state:%d, force:%d, testBuff:%d, short:%d",
+ "%s: DMAFlag:%d, state:%d, force:%d, testBuff:%d, short:%d\n",
__func__, pdx->dwDMAFlag, pdx->sCurrentState, pdx->bForceReset,
bTestBuff, bShortTest);
@@ -376,13 +376,13 @@ bool QuickCheck(DEVICE_EXTENSION *pdx, bool bTestBuff, bool bCanReset)
(pdx->dwNumInput || pdx->dwNumOutput)) { /* ...characters were in the buffer? */
bShortTest = false; /* Then do the full test */
dev_dbg(&pdx->interface->dev,
- "%s will reset as buffers not empty", __func__);
+ "%s: will reset as buffers not empty\n", __func__);
}
if (bShortTest || !bCanReset) { /* Still OK to try the short test? */
/* Always test if no reset - we want state update */
unsigned int state, error;
- dev_dbg(&pdx->interface->dev, "%s->Get1401State", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: Get1401State\n", __func__);
if (Get1401State(pdx, &state, &error) == U14ERR_NOERROR) { /* Check on the 1401 state */
if ((state & 0xFF) == 0) /* If call worked, check the status value */
bRet = true; /* If that was zero, all is OK, no reset needed */
@@ -390,7 +390,7 @@ bool QuickCheck(DEVICE_EXTENSION *pdx, bool bTestBuff, bool bCanReset)
}
if (!bRet && bCanReset) { /* If all not OK, then */
- dev_info(&pdx->interface->dev, "%s->Is1401 %d %d %d %d",
+ dev_info(&pdx->interface->dev, "%s: Is1401 %d %d %d %d\n",
__func__, bShortTest, pdx->sCurrentState, bTestBuff,
pdx->bForceReset);
bRet = Is1401(pdx); /* do full test */
@@ -407,7 +407,8 @@ bool QuickCheck(DEVICE_EXTENSION *pdx, bool bTestBuff, bool bCanReset)
int Reset1401(DEVICE_EXTENSION *pdx)
{
mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
- dev_dbg(&pdx->interface->dev, "ABout to call QuickCheck");
+ dev_dbg(&pdx->interface->dev, "%s: About to call QuickCheck\n",
+ __func__);
QuickCheck(pdx, true, true); /* Check 1401, reset if not OK */
mutex_unlock(&pdx->io_mutex);
return U14ERR_NOERROR;
@@ -423,7 +424,7 @@ int GetChar(DEVICE_EXTENSION *pdx)
int iReturn = U14ERR_NOIN; /* assume we will get nothing */
mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
- dev_dbg(&pdx->interface->dev, "GetChar");
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
Allowi(pdx); /* Make sure char reads are running */
SendChars(pdx); /* and send any buffered chars */
@@ -497,8 +498,8 @@ int GetString(DEVICE_EXTENSION *pdx, char __user *pUser, int n)
pdx->dwNumInput -= nGot;
spin_unlock_irq(&pdx->charInLock);
- dev_dbg(&pdx->interface->dev,
- "GetString read %d characters >%s<", nGot, buffer);
+ dev_dbg(&pdx->interface->dev, "%s: read %d characters >%s<\n",
+ __func__, nGot, buffer);
if (copy_to_user(pUser, buffer, nCopyToUser))
iReturn = -EFAULT;
else
@@ -555,7 +556,7 @@ int LineCount(DEVICE_EXTENSION *pdx)
}
spin_unlock_irq(&pdx->charInLock);
- dev_dbg(&pdx->interface->dev, "LineCount returned %d", iReturn);
+ dev_dbg(&pdx->interface->dev, "%s: returned %d\n", __func__, iReturn);
mutex_unlock(&pdx->io_mutex); /* Protect disconnect from new i/o */
return iReturn;
}
@@ -571,7 +572,7 @@ int GetOutBufSpace(DEVICE_EXTENSION *pdx)
mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
SendChars(pdx); /* send any buffered chars */
iReturn = (int)(OUTBUF_SZ - pdx->dwNumOutput); /* no lock needed for single read */
- dev_dbg(&pdx->interface->dev, "OutBufSpace %d", iReturn);
+ dev_dbg(&pdx->interface->dev, "%s: %d\n", __func__, iReturn);
mutex_unlock(&pdx->io_mutex); /* Protect disconnect from new i/o */
return iReturn;
}
@@ -589,7 +590,7 @@ int ClearArea(DEVICE_EXTENSION *pdx, int nArea)
if ((nArea < 0) || (nArea >= MAX_TRANSAREAS)) {
iReturn = U14ERR_BADAREA;
- dev_err(&pdx->interface->dev, "%s Attempt to clear area %d",
+ dev_err(&pdx->interface->dev, "%s: Attempt to clear area %d\n",
__func__, nArea);
} else {
TRANSAREA *pTA = &pdx->rTransDef[nArea]; /* to save typing */
@@ -602,14 +603,14 @@ int ClearArea(DEVICE_EXTENSION *pdx, int nArea)
int nPages = 0; /* and number of pages */
int np;
- dev_dbg(&pdx->interface->dev, "%s area %d", __func__,
- nArea);
+ dev_dbg(&pdx->interface->dev, "%s: area %d\n",
+ __func__, nArea);
spin_lock_irq(&pdx->stagedLock);
if ((pdx->StagedId == nArea)
&& (pdx->dwDMAFlag > MODE_CHAR)) {
iReturn = U14ERR_UNLOCKFAIL; /* cannot delete as in use */
dev_err(&pdx->interface->dev,
- "%s call on area %d while active",
+ "%s: call on area %d while active\n",
__func__, nArea);
} else {
pPages = pTA->pPages; /* save page address list */
@@ -633,7 +634,7 @@ int ClearArea(DEVICE_EXTENSION *pdx, int nArea)
/* Now we must undo the pinning down of the pages. We will assume the worst and mark */
/* all the pages as dirty. Don't be tempted to move this up above as you must not be */
/* holding a spin lock to do this stuff as it is not atomic. */
- dev_dbg(&pdx->interface->dev, "%s nPages=%d",
+ dev_dbg(&pdx->interface->dev, "%s: nPages=%d\n",
__func__, nPages);
for (np = 0; np < nPages; ++np) {
@@ -645,7 +646,7 @@ int ClearArea(DEVICE_EXTENSION *pdx, int nArea)
kfree(pPages);
dev_dbg(&pdx->interface->dev,
- "%s kfree(pPages) done", __func__);
+ "%s: kfree(pPages) done\n", __func__);
}
}
}
@@ -687,12 +688,12 @@ static int SetArea(DEVICE_EXTENSION *pdx, int nArea, char __user *puBuf,
iReturn = U14ERR_NOMEMORY;
goto error;
}
- dev_dbg(&pdx->interface->dev, "%s %p, length=%06x, circular %d",
+ dev_dbg(&pdx->interface->dev, "%s: %p, length=%06x, circular %d\n",
__func__, puBuf, dwLength, bCircular);
/* To pin down user pages we must first acquire the mapping semaphore. */
nPages = get_user_pages_fast(ulStart, len, 1, pPages);
- dev_dbg(&pdx->interface->dev, "%s nPages = %d", __func__, nPages);
+ dev_dbg(&pdx->interface->dev, "%s: nPages = %d\n", __func__, nPages);
if (nPages > 0) { /* if we succeeded */
/* If you are tempted to use page_address (form LDD3), forget it. You MUST use */
@@ -735,17 +736,17 @@ error:
** unset it. Unsetting will fail if the area is booked, and a transfer to that
** area is in progress. Otherwise, we will release the area and re-assign it.
****************************************************************************/
-int SetTransfer(DEVICE_EXTENSION *pdx, TRANSFERDESC __user *pTD)
+int SetTransfer(DEVICE_EXTENSION *pdx, struct transfer_area_desc __user *pTD)
{
int iReturn;
- TRANSFERDESC td;
+ struct transfer_area_desc td;
if (copy_from_user(&td, pTD, sizeof(td)))
return -EFAULT;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s area:%d, size:%08x", __func__,
- td.wAreaNum, td.dwLength);
+ dev_dbg(&pdx->interface->dev, "%s: area:%d, size:%08x\n",
+ __func__, td.wAreaNum, td.dwLength);
/* The strange cast is done so that we don't get warnings in 32-bit linux about the size of the */
/* pointer. The pointer is always passed as a 64-bit object so that we don't have problems using */
/* a 32-bit program on a 64-bit system. unsigned long is 64-bits on a 64-bit system. */
@@ -778,10 +779,10 @@ int UnsetTransfer(DEVICE_EXTENSION *pdx, int nArea)
** pretend that whatever the user asked for was achieved, so we return 1 if
** try to create one, and 0 if they ask to remove (assuming all else was OK).
****************************************************************************/
-int SetEvent(DEVICE_EXTENSION *pdx, TRANSFEREVENT __user *pTE)
+int SetEvent(DEVICE_EXTENSION *pdx, struct transfer_event __user *pTE)
{
int iReturn = U14ERR_NOERROR;
- TRANSFEREVENT te;
+ struct transfer_event te;
/* get a local copy of the data */
if (copy_from_user(&te, pTE, sizeof(te)))
@@ -922,7 +923,7 @@ int GetTransfer(DEVICE_EXTENSION *pdx, TGET_TX_BLOCK __user *pTX)
****************************************************************************/
int KillIO1401(DEVICE_EXTENSION *pdx)
{
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
mutex_lock(&pdx->io_mutex);
FlushOutBuff(pdx);
FlushInBuff(pdx);
@@ -938,7 +939,7 @@ int KillIO1401(DEVICE_EXTENSION *pdx)
int BlkTransState(DEVICE_EXTENSION *pdx)
{
int iReturn = pdx->dwDMAFlag != MODE_CHAR;
- dev_dbg(&pdx->interface->dev, "%s = %d", __func__, iReturn);
+ dev_dbg(&pdx->interface->dev, "%s: %d\n", __func__, iReturn);
return iReturn;
}
@@ -956,7 +957,7 @@ int StateOf1401(DEVICE_EXTENSION *pdx)
iReturn = pdx->sCurrentState;
mutex_unlock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s = %d", __func__, iReturn);
+ dev_dbg(&pdx->interface->dev, "%s: %d\n", __func__, iReturn);
return iReturn;
}
@@ -971,7 +972,7 @@ int StartSelfTest(DEVICE_EXTENSION *pdx)
{
int nGot;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
ced_draw_down(pdx); /* wait for, then kill outstanding Urbs */
FlushInBuff(pdx); /* Clear out input buffer & pipe */
@@ -987,7 +988,7 @@ int StartSelfTest(DEVICE_EXTENSION *pdx)
mutex_unlock(&pdx->io_mutex);
if (nGot < 0)
- dev_err(&pdx->interface->dev, "%s err=%d", __func__, nGot);
+ dev_err(&pdx->interface->dev, "%s: err=%d\n", __func__, nGot);
return nGot < 0 ? U14ERR_FAIL : U14ERR_NOERROR;
}
@@ -1005,7 +1006,7 @@ int CheckSelfTest(DEVICE_EXTENSION *pdx, TGET_SELFTEST __user *pGST)
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
iReturn = Get1401State(pdx, &state, &error);
if (iReturn == U14ERR_NOERROR) /* Only accept zero if it happens twice */
iReturn = Get1401State(pdx, &state, &error);
@@ -1013,8 +1014,8 @@ int CheckSelfTest(DEVICE_EXTENSION *pdx, TGET_SELFTEST __user *pGST)
if (iReturn != U14ERR_NOERROR) { /* Self-test can cause comms errors */
/* so we assume still testing */
dev_err(&pdx->interface->dev,
- "%s Get1401State=%d, assuming still testing", __func__,
- iReturn);
+ "%s: Get1401State=%d, assuming still testing\n",
+ __func__, iReturn);
state = 0x80; /* Force still-testing, no error */
error = 0;
iReturn = U14ERR_NOERROR;
@@ -1022,7 +1023,7 @@ int CheckSelfTest(DEVICE_EXTENSION *pdx, TGET_SELFTEST __user *pGST)
if ((state == -1) && (error == -1)) { /* If Get1401State had problems */
dev_err(&pdx->interface->dev,
- "%s Get1401State failed, assuming still testing",
+ "%s: Get1401State failed, assuming still testing\n",
__func__);
state = 0x80; /* Force still-testing, no error */
error = 0;
@@ -1033,21 +1034,21 @@ int CheckSelfTest(DEVICE_EXTENSION *pdx, TGET_SELFTEST __user *pGST)
gst.code = (state & 0x00FF0000) >> 16; /* read the error code */
gst.x = error & 0x0000FFFF; /* Error data X */
gst.y = (error & 0xFFFF0000) >> 16; /* and data Y */
- dev_dbg(&pdx->interface->dev, "Self-test error code %d",
- gst.code);
+ dev_dbg(&pdx->interface->dev,
+ "Self-test error code %d\n", gst.code);
} else { /* No error, check for timeout */
unsigned long ulNow = jiffies; /* get current time */
if (time_after(ulNow, pdx->ulSelfTestTime)) {
gst.code = -2; /* Flag the timeout */
dev_dbg(&pdx->interface->dev,
- "Self-test timed-out");
+ "Self-test timed-out\n");
} else
dev_dbg(&pdx->interface->dev,
- "Self-test on-going");
+ "Self-test on-going\n");
}
} else {
gst.code = -1; /* Flag the test is done */
- dev_dbg(&pdx->interface->dev, "Self-test done");
+ dev_dbg(&pdx->interface->dev, "Self-test done\n");
}
if (gst.code < 0) { /* If we have a problem or finished */
@@ -1074,7 +1075,7 @@ int TypeOf1401(DEVICE_EXTENSION *pdx)
{
int iReturn = TYPEUNKNOWN;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
switch (pdx->s1401Type) {
case TYPE1401:
@@ -1092,7 +1093,7 @@ int TypeOf1401(DEVICE_EXTENSION *pdx)
else /* for up-coming 1401 designs */
iReturn = TYPEUNKNOWN; /* Don't know or not there */
}
- dev_dbg(&pdx->interface->dev, "%s %d", __func__, iReturn);
+ dev_dbg(&pdx->interface->dev, "%s %d\n", __func__, iReturn);
mutex_unlock(&pdx->io_mutex);
return iReturn;
@@ -1107,7 +1108,7 @@ int TransferFlags(DEVICE_EXTENSION *pdx)
{
int iReturn = U14TF_MULTIA | U14TF_DIAG | /* we always have multiple DMA area */
U14TF_NOTIFY | U14TF_CIRCTH; /* diagnostics, notify and circular */
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
mutex_lock(&pdx->io_mutex);
if (pdx->bIsUSB2) /* Set flag for USB2 if appropriate */
iReturn |= U14TF_USB2;
@@ -1125,15 +1126,15 @@ static int DbgCmd1401(DEVICE_EXTENSION *pdx, unsigned char cmd,
unsigned int data)
{
int iReturn;
- dev_dbg(&pdx->interface->dev, "%s entry", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: entry\n", __func__);
iReturn = usb_control_msg(pdx->udev, usb_sndctrlpipe(pdx->udev, 0), cmd,
(H_TO_D | VENDOR | DEVREQ),
(unsigned short)data,
(unsigned short)(data >> 16), NULL, 0, HZ);
/* allow 1 second timeout */
if (iReturn < 0)
- dev_err(&pdx->interface->dev, "%s fail code=%d", __func__,
- iReturn);
+ dev_err(&pdx->interface->dev, "%s: fail code=%d\n",
+ __func__, iReturn);
return iReturn;
}
@@ -1152,7 +1153,7 @@ int DbgPeek(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
return -EFAULT;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s @ %08x", __func__, db.iAddr);
+ dev_dbg(&pdx->interface->dev, "%s: @ %08x\n", __func__, db.iAddr);
iReturn = DbgCmd1401(pdx, DB_SETADD, db.iAddr);
if (iReturn == U14ERR_NOERROR)
@@ -1181,7 +1182,7 @@ int DbgPoke(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
return -EFAULT;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s @ %08x", __func__, db.iAddr);
+ dev_dbg(&pdx->interface->dev, "%s: @ %08x\n", __func__, db.iAddr);
iReturn = DbgCmd1401(pdx, DB_SETADD, db.iAddr);
if (iReturn == U14ERR_NOERROR)
@@ -1210,7 +1211,7 @@ int DbgRampData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
return -EFAULT;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s @ %08x", __func__, db.iAddr);
+ dev_dbg(&pdx->interface->dev, "%s: @ %08x\n", __func__, db.iAddr);
iReturn = DbgCmd1401(pdx, DB_SETADD, db.iAddr);
if (iReturn == U14ERR_NOERROR)
@@ -1242,7 +1243,7 @@ int DbgRampAddr(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
return -EFAULT;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
iReturn = DbgCmd1401(pdx, DB_SETDEF, db.iDefault);
if (iReturn == U14ERR_NOERROR)
@@ -1270,7 +1271,7 @@ int DbgGetData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
memset(&db, 0, sizeof(db)); /* fill returned block with 0s */
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
/* Read back the last peeked value from the 1401. */
iReturn = usb_control_msg(pdx->udev, usb_rcvctrlpipe(pdx->udev, 0),
@@ -1282,8 +1283,8 @@ int DbgGetData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
else
iReturn = U14ERR_NOERROR;
} else
- dev_err(&pdx->interface->dev, "%s failed, code %d", __func__,
- iReturn);
+ dev_err(&pdx->interface->dev, "%s: failed, code %d\n",
+ __func__, iReturn);
mutex_unlock(&pdx->io_mutex);
@@ -1302,7 +1303,7 @@ int DbgStopLoop(DEVICE_EXTENSION *pdx)
unsigned int uState, uErr;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
iReturn = Get1401State(pdx, &uState, &uErr);
mutex_unlock(&pdx->io_mutex);
@@ -1317,18 +1318,18 @@ int DbgStopLoop(DEVICE_EXTENSION *pdx)
** booked and a transfer to that area is in progress. Otherwise, we will
** release the area and re-assign it.
****************************************************************************/
-int SetCircular(DEVICE_EXTENSION *pdx, TRANSFERDESC __user *pTD)
+int SetCircular(DEVICE_EXTENSION *pdx, struct transfer_area_desc __user *pTD)
{
int iReturn;
bool bToHost;
- TRANSFERDESC td;
+ struct transfer_area_desc td;
if (copy_from_user(&td, pTD, sizeof(td)))
return -EFAULT;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s area:%d, size:%08x", __func__,
- td.wAreaNum, td.dwLength);
+ dev_dbg(&pdx->interface->dev, "%s: area:%d, size:%08x\n",
+ __func__, td.wAreaNum, td.dwLength);
bToHost = td.eSize != 0; /* this is used as the tohost flag */
/* The strange cast is done so that we don't get warnings in 32-bit linux about the size of the */
@@ -1353,7 +1354,7 @@ int GetCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
unsigned int nArea;
TCIRCBLOCK cb;
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
if (copy_from_user(&cb, pCB, sizeof(cb)))
return -EFAULT;
@@ -1374,7 +1375,7 @@ int GetCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
cb.dwOffset = pArea->aBlocks[0].dwOffset;
cb.dwSize = pArea->aBlocks[0].dwSize;
dev_dbg(&pdx->interface->dev,
- "%s return block 0: %d bytes at %d",
+ "%s: return block 0: %d bytes at %d\n",
__func__, cb.dwSize, cb.dwOffset);
}
} else
@@ -1402,7 +1403,7 @@ int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
unsigned int nArea, uStart, uSize;
TCIRCBLOCK cb;
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
if (copy_from_user(&cb, pCB, sizeof(cb)))
return -EFAULT;
@@ -1437,7 +1438,7 @@ int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
}
dev_dbg(&pdx->interface->dev,
- "%s free %d bytes at %d, return %d bytes at %d, wait=%d",
+ "%s: free %d bytes at %d, return %d bytes at %d, wait=%d\n",
__func__, uSize, uStart,
pArea->aBlocks[0].dwSize,
pArea->aBlocks[0].dwOffset,
@@ -1453,13 +1454,13 @@ int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
bWaiting = pdx->bXFerWaiting;
if (bWaiting && pdx->bStagedUrbPending) {
dev_err(&pdx->interface->dev,
- "%s ERROR: waiting xfer and staged Urb pending!",
+ "%s: ERROR: waiting xfer and staged Urb pending!\n",
__func__);
bWaiting = false;
}
} else {
dev_err(&pdx->interface->dev,
- "%s ERROR: freeing %d bytes at %d, block 0 is %d bytes at %d",
+ "%s: ERROR: freeing %d bytes at %d, block 0 is %d bytes at %d\n",
__func__, uSize, uStart,
pArea->aBlocks[0].dwSize,
pArea->aBlocks[0].dwOffset);
@@ -1475,7 +1476,7 @@ int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
pdx->rDMAInfo.dwSize);
if (RWMStat != U14ERR_NOERROR)
dev_err(&pdx->interface->dev,
- "%s rw setup failed %d",
+ "%s: rw setup failed %d\n",
__func__, RWMStat);
}
} else
diff --git a/drivers/staging/ced1401/ced_ioctl.h b/drivers/staging/ced1401/ced_ioctl.h
index aa68878bd251..4b6c9dedb21e 100644
--- a/drivers/staging/ced1401/ced_ioctl.h
+++ b/drivers/staging/ced1401/ced_ioctl.h
@@ -26,24 +26,21 @@
** TypeDefs
*****************************************************************************/
-typedef unsigned short TBLOCKENTRY; /* index the blk transfer table 0-7 */
-
-typedef struct TransferDesc {
+struct transfer_area_desc {
long long lpvBuff; /* address of transfer area (for 64 or 32 bit) */
unsigned int dwLength; /* length of the area */
- TBLOCKENTRY wAreaNum; /* number of transfer area to set up */
+ unsigned short wAreaNum; /* number of transfer area to set up */
short eSize; /* element size - is tohost flag for circular */
-} TRANSFERDESC;
+};
-typedef TRANSFERDESC *LPTRANSFERDESC;
-typedef struct TransferEvent {
+struct transfer_event {
unsigned int dwStart; /* offset into the area */
unsigned int dwLength; /* length of the region */
unsigned short wAreaNum; /* the area number */
unsigned short wFlags; /* bit 0 set for toHost */
int iSetEvent; /* could be dummy in LINUX */
-} TRANSFEREVENT;
+};
#define MAX_TRANSFER_SIZE 0x4000 /* Maximum data bytes per IRP */
#define MAX_AREA_LENGTH 0x100000 /* Maximum size of transfer area */
@@ -85,12 +82,6 @@ typedef struct TCSBlock {
*/
#define CED_MAGIC_IOC 0xce
-/* NBNB: READ and WRITE are from the point of view of the device, not user. */
-typedef struct ced_ioc_string {
- int nChars;
- char buffer[256];
-} CED_IOC_STRING;
-
#define IOCTL_CED_SENDSTRING(n) _IOC(_IOC_WRITE, CED_MAGIC_IOC, 2, n)
#define IOCTL_CED_RESET1401 _IO(CED_MAGIC_IOC, 3)
@@ -100,9 +91,9 @@ typedef struct ced_ioc_string {
#define IOCTL_CED_LINECOUNT _IO(CED_MAGIC_IOC, 7)
#define IOCTL_CED_GETSTRING(nMax) _IOC(_IOC_READ, CED_MAGIC_IOC, 8, nMax)
-#define IOCTL_CED_SETTRANSFER _IOW(CED_MAGIC_IOC, 11, TRANSFERDESC)
+#define IOCTL_CED_SETTRANSFER _IOW(CED_MAGIC_IOC, 11, struct transfer_area_desc)
#define IOCTL_CED_UNSETTRANSFER _IO(CED_MAGIC_IOC, 12)
-#define IOCTL_CED_SETEVENT _IOW(CED_MAGIC_IOC, 13, TRANSFEREVENT)
+#define IOCTL_CED_SETEVENT _IOW(CED_MAGIC_IOC, 13, struct transfer_event)
#define IOCTL_CED_GETOUTBUFSPACE _IO(CED_MAGIC_IOC, 14)
#define IOCTL_CED_GETBASEADDRESS _IO(CED_MAGIC_IOC, 15)
#define IOCTL_CED_GETDRIVERREVISION _IO(CED_MAGIC_IOC, 16)
@@ -126,7 +117,7 @@ typedef struct ced_ioc_string {
#define IOCTL_CED_DBGGETDATA _IOR(CED_MAGIC_IOC, 39, TDBGBLOCK)
#define IOCTL_CED_DBGSTOPLOOP _IO(CED_MAGIC_IOC, 40)
#define IOCTL_CED_FULLRESET _IO(CED_MAGIC_IOC, 41)
-#define IOCTL_CED_SETCIRCULAR _IOW(CED_MAGIC_IOC, 42, TRANSFERDESC)
+#define IOCTL_CED_SETCIRCULAR _IOW(CED_MAGIC_IOC, 42, struct transfer_area_desc)
#define IOCTL_CED_GETCIRCBLOCK _IOWR(CED_MAGIC_IOC, 43, TCIRCBLOCK)
#define IOCTL_CED_FREECIRCBLOCK _IOWR(CED_MAGIC_IOC, 44, TCIRCBLOCK)
#define IOCTL_CED_WAITEVENT _IO(CED_MAGIC_IOC, 45)
@@ -198,7 +189,7 @@ inline int CED_GetDriverRevision(int fh)
return ioctl(fh, IOCTL_CED_GETDRIVERREVISION);
}
-inline int CED_SetTransfer(int fh, TRANSFERDESC *pTD)
+inline int CED_SetTransfer(int fh, struct transfer_area_desc *pTD)
{
return ioctl(fh, IOCTL_CED_SETTRANSFER, pTD);
}
@@ -208,7 +199,7 @@ inline int CED_UnsetTransfer(int fh, int nArea)
return ioctl(fh, IOCTL_CED_UNSETTRANSFER, nArea);
}
-inline int CED_SetEvent(int fh, TRANSFEREVENT *pTE)
+inline int CED_SetEvent(int fh, struct transfer_event *pTE)
{
return ioctl(fh, IOCTL_CED_SETEVENT, pTE);
}
@@ -299,7 +290,7 @@ inline int CED_FullReset(int fh)
return ioctl(fh, IOCTL_CED_FULLRESET);
}
-inline int CED_SetCircular(int fh, TRANSFERDESC *pTD)
+inline int CED_SetCircular(int fh, struct transfer_area_desc *pTD)
{
return ioctl(fh, IOCTL_CED_SETCIRCULAR, pTD);
}
diff --git a/drivers/staging/ced1401/usb1401.c b/drivers/staging/ced1401/usb1401.c
index efc310ca789e..284abc08922c 100644
--- a/drivers/staging/ced1401/usb1401.c
+++ b/drivers/staging/ced1401/usb1401.c
@@ -166,7 +166,7 @@ static int ced_open(struct inode *inode, struct file *file)
goto exit;
}
- dev_dbg(&interface->dev, "%s got pdx", __func__);
+ dev_dbg(&interface->dev, "%s: got pdx\n", __func__);
/* increment our usage count for the device */
kref_get(&pdx->kref);
@@ -184,7 +184,7 @@ static int ced_open(struct inode *inode, struct file *file)
goto exit;
}
} else { /* uncomment this block if you want exclusive open */
- dev_err(&interface->dev, "%s fail: already open", __func__);
+ dev_err(&interface->dev, "%s: fail: already open\n", __func__);
retval = -EBUSY;
pdx->open_count--;
mutex_unlock(&pdx->io_mutex);
@@ -207,7 +207,7 @@ static int ced_release(struct inode *inode, struct file *file)
if (pdx == NULL)
return -ENODEV;
- dev_dbg(&pdx->interface->dev, "%s called", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: called\n", __func__);
mutex_lock(&pdx->io_mutex);
if (!--pdx->open_count && pdx->interface) /* Allow autosuspend */
usb_autopm_put_interface(pdx->interface);
@@ -224,12 +224,12 @@ static int ced_flush(struct file *file, fl_owner_t id)
if (pdx == NULL)
return -ENODEV;
- dev_dbg(&pdx->interface->dev, "%s char in pend=%d", __func__,
- pdx->bReadCharsPending);
+ dev_dbg(&pdx->interface->dev, "%s: char in pend=%d\n",
+ __func__, pdx->bReadCharsPending);
/* wait for io to stop */
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s got io_mutex", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: got io_mutex\n", __func__);
ced_draw_down(pdx);
/* read out errors, leave subsequent opens a clean slate */
@@ -239,7 +239,7 @@ static int ced_flush(struct file *file, fl_owner_t id)
spin_unlock_irq(&pdx->err_lock);
mutex_unlock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s exit reached", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: exit reached\n", __func__);
return res;
}
@@ -270,7 +270,7 @@ static void ced_writechar_callback(struct urb *pUrb)
(pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
|| pUrb->status == -ESHUTDOWN)) {
dev_err(&pdx->interface->dev,
- "%s - nonzero write bulk status received: %d",
+ "%s: nonzero write bulk status received: %d\n",
__func__, pUrb->status);
}
@@ -287,10 +287,10 @@ static void ced_writechar_callback(struct urb *pUrb)
pdx->bSendCharsPending = false; /* Allow other threads again */
spin_unlock(&pdx->charOutLock); /* already at irq level */
dev_dbg(&pdx->interface->dev,
- "%s - char out done, 0 chars sent", __func__);
+ "%s: char out done, 0 chars sent\n", __func__);
} else {
dev_dbg(&pdx->interface->dev,
- "%s - char out done, %d chars sent", __func__, nGot);
+ "%s: char out done, %d chars sent\n", __func__, nGot);
spin_lock(&pdx->charOutLock); /* already at irq level */
pdx->dwNumOutput -= nGot; /* Now adjust the char send buffer */
pdx->dwOutBuffGet += nGot; /* to match what we did */
@@ -315,15 +315,15 @@ static void ced_writechar_callback(struct urb *pUrb)
URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(pdx->pUrbCharOut, &pdx->submitted); /* in case we need to kill it */
iReturn = usb_submit_urb(pdx->pUrbCharOut, GFP_ATOMIC);
- dev_dbg(&pdx->interface->dev, "%s n=%d>%s<", __func__,
- dwCount, pDat);
+ dev_dbg(&pdx->interface->dev, "%s: n=%d>%s<\n",
+ __func__, dwCount, pDat);
spin_lock(&pdx->charOutLock); /* grab lock for errors */
if (iReturn) {
pdx->bPipeError[nPipe] = 1; /* Flag an error to be handled later */
pdx->bSendCharsPending = false; /* Allow other threads again */
usb_unanchor_urb(pdx->pUrbCharOut);
dev_err(&pdx->interface->dev,
- "%s usb_submit_urb() returned %d",
+ "%s: usb_submit_urb() returned %d\n",
__func__, iReturn);
}
} else
@@ -350,8 +350,8 @@ int SendChars(DEVICE_EXTENSION *pdx)
pdx->bSendCharsPending = true; /* Set flag to lock out other threads */
dev_dbg(&pdx->interface->dev,
- "Send %d chars to 1401, EP0 flag %d\n", dwCount,
- pdx->nPipes == 3);
+ "Send %d chars to 1401, EP0 flag %d\n",
+ dwCount, pdx->nPipes == 3);
/* If we have only 3 end points we must send the characters to the 1401 using EP0. */
if (pdx->nPipes == 3) {
/* For EP0 character transmissions to the 1401, we have to hang about until they */
@@ -375,11 +375,11 @@ int SendChars(DEVICE_EXTENSION *pdx)
if (nSent <= 0) {
iReturn = nSent ? nSent : -ETIMEDOUT; /* if 0 chars says we timed out */
dev_err(&pdx->interface->dev,
- "Send %d chars by EP0 failed: %d",
+ "Send %d chars by EP0 failed: %d\n",
n, iReturn);
} else {
dev_dbg(&pdx->interface->dev,
- "Sent %d chars by EP0", n);
+ "Sent %d chars by EP0\n", n);
count -= nSent;
index += nSent;
}
@@ -416,9 +416,9 @@ int SendChars(DEVICE_EXTENSION *pdx)
}
} else if (pdx->bSendCharsPending && (pdx->dwNumOutput > 0))
dev_dbg(&pdx->interface->dev,
- "SendChars bSendCharsPending:true");
+ "%s: bSendCharsPending:true\n", __func__);
- dev_dbg(&pdx->interface->dev, "SendChars exit code: %d", iReturn);
+ dev_dbg(&pdx->interface->dev, "%s: exit code: %d\n", __func__, iReturn);
spin_unlock_irq(&pdx->charOutLock); /* Now let go of the spinlock */
return iReturn;
}
@@ -446,7 +446,7 @@ static void CopyUserSpace(DEVICE_EXTENSION *pdx, int n)
pdx->StagedDone + pdx->StagedOffset + pArea->dwBaseOffset;
char *pCoherBuf = pdx->pCoherStagedIO; /* coherent buffer */
if (!pArea->bUsed) {
- dev_err(&pdx->interface->dev, "%s area %d unused",
+ dev_err(&pdx->interface->dev, "%s: area %d unused\n",
__func__, nArea);
return;
}
@@ -474,21 +474,21 @@ static void CopyUserSpace(DEVICE_EXTENSION *pdx, int n)
n -= uiXfer;
} else {
dev_err(&pdx->interface->dev,
- "%s did not map page %d",
+ "%s: did not map page %d\n",
__func__, nPage);
return;
}
} else {
dev_err(&pdx->interface->dev,
- "%s exceeded pages %d", __func__,
- nPage);
+ "%s: exceeded pages %d\n",
+ __func__, nPage);
return;
}
}
} else
- dev_err(&pdx->interface->dev, "%s bad area %d", __func__,
- nArea);
+ dev_err(&pdx->interface->dev, "%s: bad area %d\n",
+ __func__, nArea);
}
/* Forward declarations for stuff used circularly */
@@ -513,11 +513,11 @@ static void staged_callback(struct urb *pUrb)
(pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
|| pUrb->status == -ESHUTDOWN)) {
dev_err(&pdx->interface->dev,
- "%s - nonzero write bulk status received: %d",
+ "%s: nonzero write bulk status received: %d\n",
__func__, pUrb->status);
} else
dev_info(&pdx->interface->dev,
- "%s - staged xfer cancelled", __func__);
+ "%s: staged xfer cancelled\n", __func__);
spin_lock(&pdx->err_lock);
pdx->errors = pUrb->status;
@@ -525,26 +525,26 @@ static void staged_callback(struct urb *pUrb)
nGot = 0; /* and tidy up again if so */
bCancel = true;
} else {
- dev_dbg(&pdx->interface->dev, "%s %d chars xferred", __func__,
- nGot);
+ dev_dbg(&pdx->interface->dev, "%s: %d chars xferred\n",
+ __func__, nGot);
if (pdx->StagedRead) /* if reading, save to user space */
CopyUserSpace(pdx, nGot); /* copy from buffer to user */
if (nGot == 0)
- dev_dbg(&pdx->interface->dev, "%s ZLP", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: ZLP\n", __func__);
}
/* Update the transfer length based on the TransferBufferLength value in the URB */
pdx->StagedDone += nGot;
- dev_dbg(&pdx->interface->dev, "%s, done %d bytes of %d", __func__,
- pdx->StagedDone, pdx->StagedLength);
+ dev_dbg(&pdx->interface->dev, "%s: done %d bytes of %d\n",
+ __func__, pdx->StagedDone, pdx->StagedLength);
if ((pdx->StagedDone == pdx->StagedLength) || /* If no more to do */
(bCancel)) { /* or this IRP was cancelled */
TRANSAREA *pArea = &pdx->rTransDef[pdx->StagedId]; /* Transfer area info */
dev_dbg(&pdx->interface->dev,
- "%s transfer done, bytes %d, cancel %d", __func__,
- pdx->StagedDone, bCancel);
+ "%s: transfer done, bytes %d, cancel %d\n",
+ __func__, pdx->StagedDone, bCancel);
/* Here is where we sort out what to do with this transfer if using a circular buffer. We have */
/* a completed transfer that can be assumed to fit into the transfer area. We should be able to */
@@ -559,7 +559,7 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[1].dwSize +=
pdx->StagedLength;
dev_dbg(&pdx->interface->dev,
- "RWM_Complete, circ block 1 now %d bytes at %d",
+ "RWM_Complete, circ block 1 now %d bytes at %d\n",
pArea->aBlocks[1].dwSize,
pArea->aBlocks[1].dwOffset);
} else {
@@ -569,7 +569,7 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[1].dwSize =
pdx->StagedLength;
dev_err(&pdx->interface->dev,
- "%s ERROR, circ block 1 re-started %d bytes at %d",
+ "%s: ERROR, circ block 1 re-started %d bytes at %d\n",
__func__,
pArea->aBlocks[1].dwSize,
pArea->aBlocks[1].dwOffset);
@@ -582,7 +582,7 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[0].dwSize)) {
pArea->aBlocks[0].dwSize += pdx->StagedLength; /* Just add this transfer in */
dev_dbg(&pdx->interface->dev,
- "RWM_Complete, circ block 0 now %d bytes at %d",
+ "RWM_Complete, circ block 0 now %d bytes at %d\n",
pArea->aBlocks[0].
dwSize,
pArea->aBlocks[0].
@@ -593,7 +593,7 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[1].dwSize =
pdx->StagedLength;
dev_dbg(&pdx->interface->dev,
- "RWM_Complete, circ block 1 started %d bytes at %d",
+ "RWM_Complete, circ block 1 started %d bytes at %d\n",
pArea->aBlocks[1].
dwSize,
pArea->aBlocks[1].
@@ -605,7 +605,7 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[0].dwSize =
pdx->StagedLength;
dev_dbg(&pdx->interface->dev,
- "RWM_Complete, circ block 0 started %d bytes at %d",
+ "RWM_Complete, circ block 0 started %d bytes at %d\n",
pArea->aBlocks[0].dwSize,
pArea->aBlocks[0].dwOffset);
}
@@ -614,7 +614,7 @@ static void staged_callback(struct urb *pUrb)
if (!bCancel) { /* Don't generate an event if cancelled */
dev_dbg(&pdx->interface->dev,
- "RWM_Complete, bCircular %d, bToHost %d, eStart %d, eSize %d",
+ "RWM_Complete, bCircular %d, bToHost %d, eStart %d, eSize %d\n",
pArea->bCircular, pArea->bEventToHost,
pArea->dwEventSt, pArea->dwEventSz);
if ((pArea->dwEventSz) && /* Set a user-mode event... */
@@ -641,7 +641,7 @@ static void staged_callback(struct urb *pUrb)
if (iWakeUp) {
dev_dbg(&pdx->interface->dev,
- "About to set event to notify app");
+ "About to set event to notify app\n");
wake_up_interruptible(&pArea->wqEvent); /* wake up waiting processes */
++pArea->iWakeUp; /* increment wakeup count */
}
@@ -655,7 +655,7 @@ static void staged_callback(struct urb *pUrb)
if (pdx->bXFerWaiting) { /* Got a block xfer waiting? */
int iReturn;
dev_info(&pdx->interface->dev,
- "*** RWM_Complete *** pending transfer will now be set up!!!");
+ "*** RWM_Complete *** pending transfer will now be set up!!!\n");
iReturn =
ReadWriteMem(pdx, !pdx->rDMAInfo.bOutWard,
pdx->rDMAInfo.wIdent,
@@ -664,7 +664,7 @@ static void staged_callback(struct urb *pUrb)
if (iReturn)
dev_err(&pdx->interface->dev,
- "RWM_Complete rw setup failed %d",
+ "RWM_Complete rw setup failed %d\n",
iReturn);
}
}
@@ -685,7 +685,7 @@ static void staged_callback(struct urb *pUrb)
/* not be upset by char input during DMA... sigh. Needs sorting out. */
if (bRestartCharInput) /* may be out of date, but... */
Allowi(pdx); /* ...Allowi tests a lock too. */
- dev_dbg(&pdx->interface->dev, "%s done", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: done\n", __func__);
}
/****************************************************************************
@@ -707,7 +707,7 @@ static int StageChunk(DEVICE_EXTENSION *pdx)
return U14ERR_FAIL;
if (!CanAcceptIoRequests(pdx)) { /* got sudden remove? */
- dev_info(&pdx->interface->dev, "%s sudden remove, giving up",
+ dev_info(&pdx->interface->dev, "%s: sudden remove, giving up\n",
__func__);
return U14ERR_FAIL; /* could do with a better error */
}
@@ -731,11 +731,11 @@ static int StageChunk(DEVICE_EXTENSION *pdx)
if (iReturn) {
usb_unanchor_urb(pdx->pStagedUrb); /* kill it */
pdx->bPipeError[nPipe] = 1; /* Flag an error to be handled later */
- dev_err(&pdx->interface->dev, "%s submit urb failed, code %d",
+ dev_err(&pdx->interface->dev, "%s: submit urb failed, code %d\n",
__func__, iReturn);
} else
pdx->bStagedUrbPending = true; /* Set the flag for staged URB pending */
- dev_dbg(&pdx->interface->dev, "%s done so far:%d, this size:%d",
+ dev_dbg(&pdx->interface->dev, "%s: done so far:%d, this size:%d\n",
__func__, pdx->StagedDone, ChunkSize);
return iReturn;
@@ -764,28 +764,28 @@ int ReadWriteMem(DEVICE_EXTENSION *pdx, bool Read, unsigned short wIdent,
TRANSAREA *pArea = &pdx->rTransDef[wIdent]; /* Transfer area info */
if (!CanAcceptIoRequests(pdx)) { /* Are we in a state to accept new requests? */
- dev_err(&pdx->interface->dev, "%s can't accept requests",
+ dev_err(&pdx->interface->dev, "%s: can't accept requests\n",
__func__);
return U14ERR_FAIL;
}
dev_dbg(&pdx->interface->dev,
- "%s xfer %d bytes to %s, offset %d, area %d", __func__, dwLen,
- Read ? "host" : "1401", dwOffs, wIdent);
+ "%s: xfer %d bytes to %s, offset %d, area %d\n",
+ __func__, dwLen, Read ? "host" : "1401", dwOffs, wIdent);
/* Amazingly, we can get an escape sequence back before the current staged Urb is done, so we */
/* have to check for this situation and, if so, wait until all is OK. */
if (pdx->bStagedUrbPending) {
pdx->bXFerWaiting = true; /* Flag we are waiting */
dev_info(&pdx->interface->dev,
- "%s xfer is waiting, as previous staged pending",
+ "%s: xfer is waiting, as previous staged pending\n",
__func__);
return U14ERR_NOERROR;
}
if (dwLen == 0) { /* allow 0-len read or write; just return success */
dev_dbg(&pdx->interface->dev,
- "%s OK; zero-len read/write request", __func__);
+ "%s: OK; zero-len read/write request\n", __func__);
return U14ERR_NOERROR;
}
@@ -795,7 +795,7 @@ int ReadWriteMem(DEVICE_EXTENSION *pdx, bool Read, unsigned short wIdent,
bool bWait = false; /* Flag for transfer having to wait */
dev_dbg(&pdx->interface->dev,
- "Circular buffers are %d at %d and %d at %d",
+ "Circular buffers are %d at %d and %d at %d\n",
pArea->aBlocks[0].dwSize, pArea->aBlocks[0].dwOffset,
pArea->aBlocks[1].dwSize, pArea->aBlocks[1].dwOffset);
if (pArea->aBlocks[1].dwSize > 0) { /* Using the second block already? */
@@ -819,14 +819,14 @@ int ReadWriteMem(DEVICE_EXTENSION *pdx, bool Read, unsigned short wIdent,
if (bWait) { /* This transfer will have to wait? */
pdx->bXFerWaiting = true; /* Flag we are waiting */
dev_dbg(&pdx->interface->dev,
- "%s xfer waiting for circular buffer space",
+ "%s: xfer waiting for circular buffer space\n",
__func__);
return U14ERR_NOERROR;
}
dev_dbg(&pdx->interface->dev,
- "%s circular xfer, %d bytes starting at %d", __func__,
- dwLen, dwOffs);
+ "%s: circular xfer, %d bytes starting at %d\n",
+ __func__, dwLen, dwOffs);
}
/* Save the parameters for the read\write transfer */
pdx->StagedRead = Read; /* Save the parameters for this read */
@@ -948,7 +948,7 @@ static bool ReadDMAInfo(volatile DMADESC *pDmaDesc, DEVICE_EXTENSION *pdx,
unsigned char ucData;
unsigned int dDone = 0; /* We haven't parsed anything so far */
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
if (ReadChar(&ucData, pBuf, &dDone, dwCount)) {
unsigned char ucTransCode = (ucData & 0x0F); /* get code for transfer type */
@@ -960,8 +960,8 @@ static bool ReadDMAInfo(volatile DMADESC *pDmaDesc, DEVICE_EXTENSION *pdx,
pDmaDesc->dwSize = 0; /* initialise other bits */
pDmaDesc->dwOffset = 0;
- dev_dbg(&pdx->interface->dev, "%s type: %d ident: %d", __func__,
- pDmaDesc->wTransType, pDmaDesc->wIdent);
+ dev_dbg(&pdx->interface->dev, "%s: type: %d ident: %d\n",
+ __func__, pDmaDesc->wTransType, pDmaDesc->wIdent);
pDmaDesc->bOutWard = (ucTransCode != TM_EXTTOHOST); /* set transfer direction */
@@ -976,7 +976,7 @@ static bool ReadDMAInfo(volatile DMADESC *pDmaDesc, DEVICE_EXTENSION *pdx,
&dDone, dwCount);
if (bResult) {
dev_dbg(&pdx->interface->dev,
- "%s xfer offset & size %d %d",
+ "%s: xfer offset & size %d %d\n",
__func__, pDmaDesc->dwOffset,
pDmaDesc->dwSize);
@@ -989,7 +989,7 @@ static bool ReadDMAInfo(volatile DMADESC *pDmaDesc, DEVICE_EXTENSION *pdx,
dwLength))) {
bResult = false; /* bad parameter(s) */
dev_dbg(&pdx->interface->dev,
- "%s bad param - id %d, bUsed %d, offset %d, size %d, area length %d",
+ "%s: bad param - id %d, bUsed %d, offset %d, size %d, area length %d\n",
__func__, wIdent,
pdx->rTransDef[wIdent].
bUsed,
@@ -1008,7 +1008,7 @@ static bool ReadDMAInfo(volatile DMADESC *pDmaDesc, DEVICE_EXTENSION *pdx,
bResult = false;
if (!bResult) /* now check parameters for validity */
- dev_err(&pdx->interface->dev, "%s error reading Esc sequence",
+ dev_err(&pdx->interface->dev, "%s: error reading Esc sequence\n",
__func__);
return bResult;
@@ -1045,15 +1045,15 @@ static int Handle1401Esc(DEVICE_EXTENSION *pdx, char *pCh,
unsigned short wTransType = pdx->rDMAInfo.wTransType; /* check transfer type */
dev_dbg(&pdx->interface->dev,
- "%s xfer to %s, offset %d, length %d", __func__,
+ "%s: xfer to %s, offset %d, length %d\n",
+ __func__,
pdx->rDMAInfo.bOutWard ? "1401" : "host",
pdx->rDMAInfo.dwOffset, pdx->rDMAInfo.dwSize);
if (pdx->bXFerWaiting) { /* Check here for badly out of kilter... */
/* This can never happen, really */
dev_err(&pdx->interface->dev,
- "ERROR: DMA setup while transfer still waiting");
- spin_unlock(&pdx->stagedLock);
+ "ERROR: DMA setup while transfer still waiting\n");
} else {
if ((wTransType == TM_EXTTOHOST)
|| (wTransType == TM_EXTTO1401)) {
@@ -1066,21 +1066,21 @@ static int Handle1401Esc(DEVICE_EXTENSION *pdx, char *pCh,
pdx->rDMAInfo.dwSize);
if (iReturn != U14ERR_NOERROR)
dev_err(&pdx->interface->dev,
- "%s ReadWriteMem() failed %d",
+ "%s: ReadWriteMem() failed %d\n",
__func__, iReturn);
} else /* This covers non-linear transfer setup */
dev_err(&pdx->interface->dev,
- "%s Unknown block xfer type %d",
+ "%s: Unknown block xfer type %d\n",
__func__, wTransType);
}
} else /* Failed to read parameters */
- dev_err(&pdx->interface->dev, "%s ReadDMAInfo() fail",
+ dev_err(&pdx->interface->dev, "%s: ReadDMAInfo() fail\n",
__func__);
spin_unlock(&pdx->stagedLock); /* OK here */
}
- dev_dbg(&pdx->interface->dev, "%s returns %d", __func__, iReturn);
+ dev_dbg(&pdx->interface->dev, "%s: returns %d\n", __func__, iReturn);
return iReturn;
}
@@ -1100,11 +1100,11 @@ static void ced_readchar_callback(struct urb *pUrb)
(pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
|| pUrb->status == -ESHUTDOWN)) {
dev_err(&pdx->interface->dev,
- "%s - nonzero write bulk status received: %d",
+ "%s: nonzero write bulk status received: %d\n",
__func__, pUrb->status);
} else
dev_dbg(&pdx->interface->dev,
- "%s - 0 chars pUrb->status=%d (shutdown?)",
+ "%s: 0 chars pUrb->status=%d (shutdown?)\n",
__func__, pUrb->status);
spin_lock(&pdx->err_lock);
@@ -1125,7 +1125,7 @@ static void ced_readchar_callback(struct urb *pUrb)
if (nGot < INBUF_SZ) {
pdx->pCoherCharIn[nGot] = 0; /* tidy the string */
dev_dbg(&pdx->interface->dev,
- "%s got %d chars >%s<",
+ "%s: got %d chars >%s<\n",
__func__, nGot,
pdx->pCoherCharIn);
}
@@ -1140,7 +1140,7 @@ static void ced_readchar_callback(struct urb *pUrb)
if ((pdx->dwNumInput + nGot) <= INBUF_SZ)
pdx->dwNumInput += nGot; /* Adjust the buffer count accordingly */
} else
- dev_dbg(&pdx->interface->dev, "%s read ZLP",
+ dev_dbg(&pdx->interface->dev, "%s: read ZLP\n",
__func__);
}
}
@@ -1178,7 +1178,7 @@ int Allowi(DEVICE_EXTENSION *pdx)
unsigned int nMax = INBUF_SZ - pdx->dwNumInput; /* max we could read */
int nPipe = pdx->nPipes == 4 ? 1 : 0; /* The pipe number to use */
- dev_dbg(&pdx->interface->dev, "%s %d chars in input buffer",
+ dev_dbg(&pdx->interface->dev, "%s: %d chars in input buffer\n",
__func__, pdx->dwNumInput);
usb_fill_int_urb(pdx->pUrbCharIn, pdx->udev,
@@ -1192,7 +1192,8 @@ int Allowi(DEVICE_EXTENSION *pdx)
usb_unanchor_urb(pdx->pUrbCharIn); /* remove from list of active Urbs */
pdx->bPipeError[nPipe] = 1; /* Flag an error to be handled later */
dev_err(&pdx->interface->dev,
- "%s submit urb failed: %d", __func__, iReturn);
+ "%s: submit urb failed: %d\n",
+ __func__, iReturn);
} else
pdx->bReadCharsPending = true; /* Flag that we are active here */
}
@@ -1250,13 +1251,13 @@ static long ced_ioctl(struct file *file, unsigned int cmd, unsigned long ulArg)
return GetString(pdx, (char __user *)ulArg, _IOC_SIZE(cmd));
case _IOC_NR(IOCTL_CED_SETTRANSFER):
- return SetTransfer(pdx, (TRANSFERDESC __user *) ulArg);
+ return SetTransfer(pdx, (struct transfer_area_desc __user *) ulArg);
case _IOC_NR(IOCTL_CED_UNSETTRANSFER):
return UnsetTransfer(pdx, (int)ulArg);
case _IOC_NR(IOCTL_CED_SETEVENT):
- return SetEvent(pdx, (TRANSFEREVENT __user *) ulArg);
+ return SetEvent(pdx, (struct transfer_event __user *) ulArg);
case _IOC_NR(IOCTL_CED_GETOUTBUFSPACE):
return GetOutBufSpace(pdx);
@@ -1315,7 +1316,7 @@ static long ced_ioctl(struct file *file, unsigned int cmd, unsigned long ulArg)
break;
case _IOC_NR(IOCTL_CED_SETCIRCULAR):
- return SetCircular(pdx, (TRANSFERDESC __user *) ulArg);
+ return SetCircular(pdx, (struct transfer_area_desc __user *) ulArg);
case _IOC_NR(IOCTL_CED_GETCIRCBLOCK):
return GetCircBlock(pdx, (TCIRCBLOCK __user *) ulArg);
@@ -1397,7 +1398,7 @@ static int ced_probe(struct usb_interface *interface,
else if ((i >= 1) && (i <= 23))
pdx->s1401Type = i + 2;
else {
- dev_err(&interface->dev, "%s Unknown device. bcdDevice = %d",
+ dev_err(&interface->dev, "%s: Unknown device. bcdDevice = %d\n",
__func__, bcdDevice);
goto error;
}
@@ -1405,7 +1406,7 @@ static int ced_probe(struct usb_interface *interface,
/* we know that we are dealing with a 1401 device. */
iface_desc = interface->cur_altsetting;
pdx->nPipes = iface_desc->desc.bNumEndpoints;
- dev_info(&interface->dev, "1401Type=%d with %d End Points",
+ dev_info(&interface->dev, "1401Type=%d with %d End Points\n",
pdx->s1401Type, pdx->nPipes);
if ((pdx->nPipes < 3) || (pdx->nPipes > 4))
goto error;
@@ -1415,7 +1416,7 @@ static int ced_probe(struct usb_interface *interface,
pdx->pUrbCharIn = usb_alloc_urb(0, GFP_KERNEL); /* character input URB */
pdx->pStagedUrb = usb_alloc_urb(0, GFP_KERNEL); /* block transfer URB */
if (!pdx->pUrbCharOut || !pdx->pUrbCharIn || !pdx->pStagedUrb) {
- dev_err(&interface->dev, "%s URB alloc failed", __func__);
+ dev_err(&interface->dev, "%s: URB alloc failed\n", __func__);
goto error;
}
@@ -1429,7 +1430,7 @@ static int ced_probe(struct usb_interface *interface,
usb_alloc_coherent(pdx->udev, INBUF_SZ, GFP_KERNEL,
&pdx->pUrbCharIn->transfer_dma);
if (!pdx->pCoherCharOut || !pdx->pCoherCharIn || !pdx->pCoherStagedIO) {
- dev_err(&interface->dev, "%s Coherent buffer alloc failed",
+ dev_err(&interface->dev, "%s: Coherent buffer alloc failed\n",
__func__);
goto error;
}
@@ -1437,19 +1438,19 @@ static int ced_probe(struct usb_interface *interface,
for (i = 0; i < pdx->nPipes; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
pdx->epAddr[i] = endpoint->bEndpointAddress;
- dev_info(&interface->dev, "Pipe %d, ep address %02x", i,
- pdx->epAddr[i]);
+ dev_info(&interface->dev, "Pipe %d, ep address %02x\n",
+ i, pdx->epAddr[i]);
if (((pdx->nPipes == 3) && (i == 0)) || /* if char input end point */
((pdx->nPipes == 4) && (i == 1))) {
pdx->bInterval = endpoint->bInterval; /* save the endpoint interrupt interval */
- dev_info(&interface->dev, "Pipe %d, bInterval = %d", i,
- pdx->bInterval);
+ dev_info(&interface->dev, "Pipe %d, bInterval = %d\n",
+ i, pdx->bInterval);
}
/* Detect USB2 by checking last ep size (64 if USB1) */
if (i == pdx->nPipes - 1) { /* if this is the last ep (bulk) */
pdx->bIsUSB2 =
le16_to_cpu(endpoint->wMaxPacketSize) > 64;
- dev_info(&pdx->interface->dev, "USB%d",
+ dev_info(&pdx->interface->dev, "USB%d\n",
pdx->bIsUSB2 + 1);
}
}
@@ -1462,14 +1463,14 @@ static int ced_probe(struct usb_interface *interface,
if (retval) {
/* something prevented us from registering this driver */
dev_err(&interface->dev,
- "Not able to get a minor for this device.\n");
+ "Not able to get a minor for this device\n");
usb_set_intfdata(interface, NULL);
goto error;
}
/* let the user know what node this device is now attached to */
dev_info(&interface->dev,
- "USB CEDUSB device now attached to cedusb #%d",
+ "USB CEDUSB device now attached to cedusb #%d\n",
interface->minor);
return 0;
@@ -1493,7 +1494,7 @@ static void ced_disconnect(struct usb_interface *interface)
for (i = 0; i < MAX_TRANSAREAS; ++i) {
int iErr = ClearArea(pdx, i); /* ...release any used memory */
if (iErr == U14ERR_UNLOCKFAIL)
- dev_err(&pdx->interface->dev, "%s Area %d was in used",
+ dev_err(&pdx->interface->dev, "%s: Area %d was in used\n",
__func__, i);
}
pdx->interface = NULL; /* ...we kill off link to interface */
@@ -1503,7 +1504,7 @@ static void ced_disconnect(struct usb_interface *interface)
kref_put(&pdx->kref, ced_delete); /* decrement our usage count */
- dev_info(&interface->dev, "USB cedusb #%d now disconnected", minor);
+ dev_info(&interface->dev, "USB cedusb #%d now disconnected\n", minor);
}
/* Wait for all the urbs we know of to be done with, then kill off any that */
@@ -1513,13 +1514,13 @@ static void ced_disconnect(struct usb_interface *interface)
void ced_draw_down(DEVICE_EXTENSION *pdx)
{
int time;
- dev_dbg(&pdx->interface->dev, "%s called", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: called\n", __func__);
pdx->bInDrawDown = true;
time = usb_wait_anchor_empty_timeout(&pdx->submitted, 3000);
if (!time) { /* if we timed out we kill the urbs */
usb_kill_anchored_urbs(&pdx->submitted);
- dev_err(&pdx->interface->dev, "%s timed out", __func__);
+ dev_err(&pdx->interface->dev, "%s: timed out\n", __func__);
}
pdx->bInDrawDown = false;
}
@@ -1531,7 +1532,7 @@ static int ced_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
ced_draw_down(pdx);
- dev_dbg(&pdx->interface->dev, "%s called", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: called\n", __func__);
return 0;
}
@@ -1540,14 +1541,14 @@ static int ced_resume(struct usb_interface *intf)
DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
if (!pdx)
return 0;
- dev_dbg(&pdx->interface->dev, "%s called", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: called\n", __func__);
return 0;
}
static int ced_pre_reset(struct usb_interface *intf)
{
DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
mutex_lock(&pdx->io_mutex);
ced_draw_down(pdx);
return 0;
@@ -1556,7 +1557,7 @@ static int ced_pre_reset(struct usb_interface *intf)
static int ced_post_reset(struct usb_interface *intf)
{
DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
/* we are sure no URBs are active - no locking needed */
pdx->errors = -EPIPE;
diff --git a/drivers/staging/ced1401/usb1401.h b/drivers/staging/ced1401/usb1401.h
index f031e3a2c7cf..ea0fe6398a01 100644
--- a/drivers/staging/ced1401/usb1401.h
+++ b/drivers/staging/ced1401/usb1401.h
@@ -218,9 +218,9 @@ extern bool QuickCheck(DEVICE_EXTENSION *pdx, bool bTestBuff, bool bCanReset);
extern int Reset1401(DEVICE_EXTENSION *pdx);
extern int GetChar(DEVICE_EXTENSION *pdx);
extern int GetString(DEVICE_EXTENSION *pdx, char __user *pUser, int n);
-extern int SetTransfer(DEVICE_EXTENSION *pdx, TRANSFERDESC __user *pTD);
+extern int SetTransfer(DEVICE_EXTENSION *pdx, struct transfer_area_desc __user *pTD);
extern int UnsetTransfer(DEVICE_EXTENSION *pdx, int nArea);
-extern int SetEvent(DEVICE_EXTENSION *pdx, TRANSFEREVENT __user *pTE);
+extern int SetEvent(DEVICE_EXTENSION *pdx, struct transfer_event __user *pTE);
extern int Stat1401(DEVICE_EXTENSION *pdx);
extern int LineCount(DEVICE_EXTENSION *pdx);
extern int GetOutBufSpace(DEVICE_EXTENSION *pdx);
@@ -238,7 +238,7 @@ extern int DbgRampData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB);
extern int DbgRampAddr(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB);
extern int DbgGetData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB);
extern int DbgStopLoop(DEVICE_EXTENSION *pdx);
-extern int SetCircular(DEVICE_EXTENSION *pdx, TRANSFERDESC __user *pTD);
+extern int SetCircular(DEVICE_EXTENSION *pdx, struct transfer_area_desc __user *pTD);
extern int GetCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB);
extern int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB);
extern int WaitEvent(DEVICE_EXTENSION *pdx, int nArea, int msTimeOut);
diff --git a/drivers/staging/ced1401/use14_ioc.h b/drivers/staging/ced1401/use14_ioc.h
index 97d7913840dc..42d2e4e6e9a9 100644
--- a/drivers/staging/ced1401/use14_ioc.h
+++ b/drivers/staging/ced1401/use14_ioc.h
@@ -276,15 +276,14 @@ typedef struct paramBlk {
typedef PARAMBLK* PPARAMBLK;
-typedef struct TransferDesc /* Structure and type for SetTransArea */
+struct transfer_area_desc /* Structure and type for SetTransArea */
{
unsigned short wArea; /* number of transfer area to set up */
void FAR *lpvBuff; /* address of transfer area */
unsigned int dwLength; /* length of area to set up */
short eSize; /* size to move (for swapping on MAC) */
-} TRANSFERDESC;
+};
-typedef TRANSFERDESC FAR *LPTRANSFERDESC;
/* This is the structure used to set up a transfer area */
typedef struct VXTransferDesc /* use1401.c and use1432x.x use only */
diff --git a/drivers/staging/ced1401/userspace/use1401.c b/drivers/staging/ced1401/userspace/use1401.c
index c9bc2ebfef1a..7b8a2227fe5b 100644
--- a/drivers/staging/ced1401/userspace/use1401.c
+++ b/drivers/staging/ced1401/userspace/use1401.c
@@ -2231,7 +2231,7 @@ U14API(short) U14UnSetTransfer(short hand, unsigned short wArea)
U14API(short) U14SetTransArea(short hand, unsigned short wArea, void *pvBuff,
unsigned int dwLength, short eSz)
{
- TRANSFERDESC td;
+ struct transfer_area_desc td;
short sErr = CheckHandle(hand);
if (sErr != U14ERR_NOERROR)
return sErr;
@@ -2292,7 +2292,7 @@ U14API(short) U14SetTransArea(short hand, unsigned short wArea, void *pvBuff,
td.eSize = 0; // Dummy element size
if (DeviceIoControl(aHand1401[hand],(unsigned int)U14_SETTRANSFER,
- &td,sizeof(TRANSFERDESC),
+ &td,sizeof(struct transfer_area_desc),
&rWork,sizeof(PARAMBLK),&dwBytes,NULL))
{
if (dwBytes >= sizeof(PARAMBLK)) // maybe error from driver?
@@ -2496,14 +2496,14 @@ U14API(short) U14SetCircular(short hand, unsigned short wArea, BOOL bToHost,
{
PARAMBLK rWork;
unsigned int dwBytes;
- TRANSFERDESC txDesc;
+ struct transfer_area_desc txDesc;
txDesc.wArea = wArea; /* Pure NT - put data into struct */
txDesc.lpvBuff = pvBuff;
txDesc.dwLength = dwLength;
txDesc.eSize = (short)bToHost; /* Use this for direction flag */
if (DeviceIoControl(aHand1401[hand],(unsigned int)U14_SETCIRCULAR,
- &txDesc, sizeof(TRANSFERDESC),
+ &txDesc, sizeof(struct transfer_area_desc),
&rWork, sizeof(PARAMBLK),&dwBytes,NULL))
{
if (dwBytes >= sizeof(PARAMBLK)) /* error from driver? */
@@ -2528,7 +2528,7 @@ U14API(short) U14SetCircular(short hand, unsigned short wArea, BOOL bToHost,
#ifdef LINUX
else
{
- TRANSFERDESC td;
+ struct transfer_area_desc td;
td.lpvBuff = (long long)((unsigned long)pvBuff);
td.wAreaNum = wArea;
td.dwLength = dwLength;
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 89e25b4203ad..703c5d40ae5c 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -177,6 +177,7 @@ config COMEDI_PCL730
config COMEDI_PCL812
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
depends on VIRT_TO_BUS && ISA_DMA_API
+ select COMEDI_FC
---help---
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
@@ -188,6 +189,7 @@ config COMEDI_PCL812
config COMEDI_PCL816
tristate "Advantech PCL-814 and PCL-816 ISA card support"
depends on VIRT_TO_BUS && ISA_DMA_API
+ select COMEDI_FC
---help---
Enable support for Advantech PCL-814 and PCL-816 ISA cards
@@ -197,6 +199,7 @@ config COMEDI_PCL816
config COMEDI_PCL818
tristate "Advantech PCL-718 and PCL-818 ISA card support"
depends on VIRT_TO_BUS && ISA_DMA_API
+ select COMEDI_FC
---help---
Enable support for Advantech PCL-818 ISA cards
PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
@@ -257,6 +260,14 @@ config COMEDI_RTI802
To compile this driver as a module, choose M here: the module will be
called rti802.
+config COMEDI_DAC02
+ tristate "Keithley Metrabyte DAC02 compatible ISA card support"
+ ---help---
+ Enable support for Keithley Metrabyte DAC02 compatible ISA cards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called dac02.
+
config COMEDI_DAS16M1
tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support"
select COMEDI_8255
@@ -559,15 +570,6 @@ config COMEDI_MULTIQ3
To compile this driver as a module, choose M here: the module will be
called multiq3.
-config COMEDI_POC
- tristate "Generic driver for very simple devices"
- ---help---
- Enable generic support for very simple / POC (Piece of Crap) boards,
- Keithley Metrabyte DAC-02 (dac02).
-
- To compile this driver as a module, choose M here: the module will be
- called poc.
-
config COMEDI_S526
tristate "Sensoray s526 support"
---help---
@@ -753,6 +755,7 @@ config COMEDI_ADL_PCI9118
config COMEDI_ADV_PCI1710
tristate "Advantech PCI-171x, PCI-1720 and PCI-1731 support"
+ select COMEDI_FC
---help---
Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
PCI-1713, PCI-1720 and PCI-1731
@@ -856,6 +859,7 @@ config COMEDI_DAS08_PCI
config COMEDI_DT3000
tristate "Data Translation DT3000 series support"
+ select COMEDI_FC
---help---
Enable support for Data Translation DT3000 series
DT3001, DT3001-PGL, DT3002, DT3003, DT3003-PGL, DT3004, DT3005 and
@@ -1101,6 +1105,7 @@ config COMEDI_S626
config COMEDI_MITE
depends on HAS_DMA
+ select COMEDI_FC
tristate
config COMEDI_NI_TIOCMD
@@ -1179,6 +1184,7 @@ config COMEDI_NI_MIO_CS
config COMEDI_QUATECH_DAQP_CS
tristate "Quatech DAQP PCMCIA data capture card support"
+ select COMEDI_FC
---help---
Enable support for the Quatech DAQP PCMCIA data capture cards
DAQP-208 and DAQP-308
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index c22c617b0da1..ea6dc36d753b 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -297,7 +297,7 @@ static ssize_t max_read_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%i\n", size);
+ return snprintf(buf, PAGE_SIZE, "%u\n", size);
}
static ssize_t max_read_buffer_kb_store(struct device *csdev,
@@ -353,7 +353,7 @@ static ssize_t read_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%i\n", size);
+ return snprintf(buf, PAGE_SIZE, "%u\n", size);
}
static ssize_t read_buffer_kb_store(struct device *csdev,
@@ -410,7 +410,7 @@ static ssize_t max_write_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%i\n", size);
+ return snprintf(buf, PAGE_SIZE, "%u\n", size);
}
static ssize_t max_write_buffer_kb_store(struct device *csdev,
@@ -466,7 +466,7 @@ static ssize_t write_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%i\n", size);
+ return snprintf(buf, PAGE_SIZE, "%u\n", size);
}
static ssize_t write_buffer_kb_store(struct device *csdev,
@@ -1194,6 +1194,11 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
switch (insn->insn) {
case INSN_READ:
ret = s->insn_read(dev, s, insn, data);
+ if (ret == -ETIMEDOUT) {
+ dev_dbg(dev->class_dev,
+ "subdevice %d read instruction timed out\n",
+ s->index);
+ }
break;
case INSN_WRITE:
maxdata = s->maxdata_list
@@ -1207,8 +1212,14 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
break;
}
}
- if (ret == 0)
+ if (ret == 0) {
ret = s->insn_write(dev, s, insn, data);
+ if (ret == -ETIMEDOUT) {
+ dev_dbg(dev->class_dev,
+ "subdevice %d write instruction timed out\n",
+ s->index);
+ }
+ }
break;
case INSN_BITS:
if (insn->n != 2) {
@@ -1405,41 +1416,94 @@ error:
return ret;
}
-static int do_cmd_ioctl(struct comedi_device *dev,
- struct comedi_cmd __user *arg, void *file)
+static int __comedi_get_user_cmd(struct comedi_device *dev,
+ struct comedi_cmd __user *arg,
+ struct comedi_cmd *cmd)
{
- struct comedi_cmd cmd;
struct comedi_subdevice *s;
- struct comedi_async *async;
- int ret = 0;
- unsigned int __user *user_chanlist;
- if (copy_from_user(&cmd, arg, sizeof(cmd))) {
+ if (copy_from_user(cmd, arg, sizeof(*cmd))) {
dev_dbg(dev->class_dev, "bad cmd address\n");
return -EFAULT;
}
- /* save user's chanlist pointer so it can be restored later */
- user_chanlist = (unsigned int __user *)cmd.chanlist;
- if (cmd.subdev >= dev->n_subdevices) {
- dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd.subdev);
+ if (cmd->subdev >= dev->n_subdevices) {
+ dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd->subdev);
return -ENODEV;
}
- s = &dev->subdevices[cmd.subdev];
- async = s->async;
+ s = &dev->subdevices[cmd->subdev];
if (s->type == COMEDI_SUBD_UNUSED) {
- dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd.subdev);
+ dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd->subdev);
return -EIO;
}
if (!s->do_cmd || !s->do_cmdtest || !s->async) {
dev_dbg(dev->class_dev,
- "subdevice %i does not support commands\n", cmd.subdev);
+ "subdevice %d does not support commands\n", cmd->subdev);
return -EIO;
}
+ /* make sure channel/gain list isn't too long */
+ if (cmd->chanlist_len > s->len_chanlist) {
+ dev_dbg(dev->class_dev, "channel/gain list too long %d > %d\n",
+ cmd->chanlist_len, s->len_chanlist);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __comedi_get_user_chanlist(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int __user *user_chanlist,
+ struct comedi_cmd *cmd)
+{
+ unsigned int *chanlist;
+ int ret;
+
+ /* user_chanlist could be NULL for do_cmdtest ioctls */
+ if (!user_chanlist)
+ return 0;
+
+ chanlist = memdup_user(user_chanlist,
+ cmd->chanlist_len * sizeof(unsigned int));
+ if (IS_ERR(chanlist))
+ return PTR_ERR(chanlist);
+
+ /* make sure each element in channel/gain list is valid */
+ ret = comedi_check_chanlist(s, cmd->chanlist_len, chanlist);
+ if (ret < 0) {
+ kfree(chanlist);
+ return ret;
+ }
+
+ cmd->chanlist = chanlist;
+
+ return 0;
+}
+
+static int do_cmd_ioctl(struct comedi_device *dev,
+ struct comedi_cmd __user *arg, void *file)
+{
+ struct comedi_cmd cmd;
+ struct comedi_subdevice *s;
+ struct comedi_async *async;
+ unsigned int __user *user_chanlist;
+ int ret;
+
+ /* get the user's cmd and do some simple validation */
+ ret = __comedi_get_user_cmd(dev, arg, &cmd);
+ if (ret)
+ return ret;
+
+ /* save user's chanlist pointer so it can be restored later */
+ user_chanlist = (unsigned int __user *)cmd.chanlist;
+
+ s = &dev->subdevices[cmd.subdev];
+ async = s->async;
+
/* are we locked? (ioctl lock) */
if (s->lock && s->lock != file) {
dev_dbg(dev->class_dev, "subdevice locked\n");
@@ -1452,13 +1516,6 @@ static int do_cmd_ioctl(struct comedi_device *dev,
return -EBUSY;
}
- /* make sure channel/gain list isn't too long */
- if (cmd.chanlist_len > s->len_chanlist) {
- dev_dbg(dev->class_dev, "channel/gain list too long %u > %d\n",
- cmd.chanlist_len, s->len_chanlist);
- return -EINVAL;
- }
-
/* make sure channel/gain list isn't too short */
if (cmd.chanlist_len < 1) {
dev_dbg(dev->class_dev, "channel/gain list too short %u < 1\n",
@@ -1468,25 +1525,11 @@ static int do_cmd_ioctl(struct comedi_device *dev,
async->cmd = cmd;
async->cmd.data = NULL;
- /* load channel/gain list */
- async->cmd.chanlist = memdup_user(user_chanlist,
- async->cmd.chanlist_len * sizeof(int));
- if (IS_ERR(async->cmd.chanlist)) {
- ret = PTR_ERR(async->cmd.chanlist);
- async->cmd.chanlist = NULL;
- dev_dbg(dev->class_dev, "memdup_user failed with code %d\n",
- ret);
- goto cleanup;
- }
- /* make sure each element in channel/gain list is valid */
- ret = comedi_check_chanlist(s,
- async->cmd.chanlist_len,
- async->cmd.chanlist);
- if (ret < 0) {
- dev_dbg(dev->class_dev, "bad chanlist\n");
+ /* load channel/gain list */
+ ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &async->cmd);
+ if (ret)
goto cleanup;
- }
ret = s->do_cmdtest(dev, s, &async->cmd);
@@ -1554,63 +1597,24 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
{
struct comedi_cmd cmd;
struct comedi_subdevice *s;
- int ret = 0;
unsigned int *chanlist = NULL;
unsigned int __user *user_chanlist;
+ int ret;
+
+ /* get the user's cmd and do some simple validation */
+ ret = __comedi_get_user_cmd(dev, arg, &cmd);
+ if (ret)
+ return ret;
- if (copy_from_user(&cmd, arg, sizeof(cmd))) {
- dev_dbg(dev->class_dev, "bad cmd address\n");
- return -EFAULT;
- }
/* save user's chanlist pointer so it can be restored later */
user_chanlist = (unsigned int __user *)cmd.chanlist;
- if (cmd.subdev >= dev->n_subdevices) {
- dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd.subdev);
- return -ENODEV;
- }
-
s = &dev->subdevices[cmd.subdev];
- if (s->type == COMEDI_SUBD_UNUSED) {
- dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd.subdev);
- return -EIO;
- }
-
- if (!s->do_cmd || !s->do_cmdtest) {
- dev_dbg(dev->class_dev,
- "subdevice %i does not support commands\n", cmd.subdev);
- return -EIO;
- }
-
- /* make sure channel/gain list isn't too long */
- if (cmd.chanlist_len > s->len_chanlist) {
- dev_dbg(dev->class_dev, "channel/gain list too long %d > %d\n",
- cmd.chanlist_len, s->len_chanlist);
- ret = -EINVAL;
- goto cleanup;
- }
/* load channel/gain list */
- if (cmd.chanlist) {
- chanlist = memdup_user(user_chanlist,
- cmd.chanlist_len * sizeof(int));
- if (IS_ERR(chanlist)) {
- ret = PTR_ERR(chanlist);
- chanlist = NULL;
- dev_dbg(dev->class_dev,
- "memdup_user exited with code %d", ret);
- goto cleanup;
- }
-
- /* make sure each element in channel/gain list is valid */
- ret = comedi_check_chanlist(s, cmd.chanlist_len, chanlist);
- if (ret < 0) {
- dev_dbg(dev->class_dev, "bad chanlist\n");
- goto cleanup;
- }
-
- cmd.chanlist = chanlist;
- }
+ ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &cmd);
+ if (ret)
+ return ret;
ret = s->do_cmdtest(dev, s, &cmd);
@@ -1620,9 +1624,8 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
if (copy_to_user(arg, &cmd, sizeof(cmd))) {
dev_dbg(dev->class_dev, "bad cmd address\n");
ret = -EFAULT;
- goto cleanup;
}
-cleanup:
+
kfree(chanlist);
return ret;
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index f82bd4256d51..d46123a97582 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -61,31 +61,31 @@ struct comedi_subdevice {
unsigned int *chanlist; /* driver-owned chanlist (not used) */
- int (*insn_read) (struct comedi_device *, struct comedi_subdevice *,
+ int (*insn_read)(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *);
+ int (*insn_write)(struct comedi_device *, struct comedi_subdevice *,
struct comedi_insn *, unsigned int *);
- int (*insn_write) (struct comedi_device *, struct comedi_subdevice *,
+ int (*insn_bits)(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *);
+ int (*insn_config)(struct comedi_device *, struct comedi_subdevice *,
struct comedi_insn *, unsigned int *);
- int (*insn_bits) (struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*insn_config) (struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-
- int (*do_cmd) (struct comedi_device *, struct comedi_subdevice *);
- int (*do_cmdtest) (struct comedi_device *, struct comedi_subdevice *,
- struct comedi_cmd *);
- int (*poll) (struct comedi_device *, struct comedi_subdevice *);
- int (*cancel) (struct comedi_device *, struct comedi_subdevice *);
+
+ int (*do_cmd)(struct comedi_device *, struct comedi_subdevice *);
+ int (*do_cmdtest)(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_cmd *);
+ int (*poll)(struct comedi_device *, struct comedi_subdevice *);
+ int (*cancel)(struct comedi_device *, struct comedi_subdevice *);
/* int (*do_lock)(struct comedi_device *, struct comedi_subdevice *); */
/* int (*do_unlock)(struct comedi_device *, \
struct comedi_subdevice *); */
/* called when the buffer changes */
- int (*buf_change) (struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned long new_size);
+ int (*buf_change)(struct comedi_device *dev,
+ struct comedi_subdevice *s, unsigned long new_size);
- void (*munge) (struct comedi_device *dev, struct comedi_subdevice *s,
- void *data, unsigned int num_bytes,
- unsigned int start_chan_index);
+ void (*munge)(struct comedi_device *dev, struct comedi_subdevice *s,
+ void *data, unsigned int num_bytes,
+ unsigned int start_chan_index);
enum dma_data_direction async_dma_dir;
unsigned int state;
@@ -146,8 +146,8 @@ struct comedi_async {
unsigned int cb_mask;
- int (*inttrig) (struct comedi_device *dev, struct comedi_subdevice *s,
- unsigned int x);
+ int (*inttrig)(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned int x);
};
struct comedi_driver {
@@ -155,9 +155,9 @@ struct comedi_driver {
const char *driver_name;
struct module *module;
- int (*attach) (struct comedi_device *, struct comedi_devconfig *);
- void (*detach) (struct comedi_device *);
- int (*auto_attach) (struct comedi_device *, unsigned long);
+ int (*attach)(struct comedi_device *, struct comedi_devconfig *);
+ void (*detach)(struct comedi_device *);
+ int (*auto_attach)(struct comedi_device *, unsigned long);
/* number of elements in board_name and board_id arrays */
unsigned int num_names;
@@ -202,8 +202,8 @@ struct comedi_device {
struct fasync_struct *async_queue;
- int (*open) (struct comedi_device *dev);
- void (*close) (struct comedi_device *dev);
+ int (*open)(struct comedi_device *dev);
+ void (*close)(struct comedi_device *dev);
};
static inline const void *comedi_board(const struct comedi_device *dev)
@@ -353,6 +353,14 @@ void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
/* drivers.c - general comedi driver functions */
+#define COMEDI_TIMEOUT_MS 1000
+
+int comedi_timeout(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *,
+ int (*cb)(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned long context),
+ unsigned long context);
+
int comedi_dio_insn_config(struct comedi_device *, struct comedi_subdevice *,
struct comedi_insn *, unsigned int *data,
unsigned int mask);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 5b15033a94bf..ab0e8ed47291 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -155,6 +155,36 @@ int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
}
/**
+ * comedi_timeout() - busy-wait for a driver condition to occur.
+ * @dev: comedi_device struct
+ * @s: comedi_subdevice struct
+ * @insn: comedi_insn struct
+ * @cb: callback to check for the condition
+ * @context: private context from the driver
+ */
+int comedi_timeout(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ int (*cb)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context),
+ unsigned long context)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(COMEDI_TIMEOUT_MS);
+ int ret;
+
+ while (time_before(jiffies, timeout)) {
+ ret = cb(dev, s, insn, context);
+ if (ret != -EBUSY)
+ return ret; /* success (0) or non EBUSY errno */
+ cpu_relax();
+ }
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(comedi_timeout);
+
+/**
* comedi_dio_insn_config() - boilerplate (*insn_config) for DIO subdevices.
* @dev: comedi_device struct
* @s: comedi_subdevice struct
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 8a57c3c1ade0..46a385c29ba8 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -56,6 +56,7 @@ Configuration Options: not applicable, uses PCI auto config
#include "../comedidev.h"
#include "8255.h"
+#include "mite.h"
enum pci_8255_boardid {
BOARD_ADLINK_PCI7224,
@@ -79,6 +80,7 @@ struct pci_8255_boardinfo {
const char *name;
int dio_badr;
int n_8255;
+ unsigned int has_mite:1;
};
static const struct pci_8255_boardinfo pci_8255_boards[] = {
@@ -126,36 +128,43 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
.name = "ni_pci-dio-96",
.dio_badr = 1,
.n_8255 = 4,
+ .has_mite = 1,
},
[BOARD_NI_PCIDIO96B] = {
.name = "ni_pci-dio-96b",
.dio_badr = 1,
.n_8255 = 4,
+ .has_mite = 1,
},
[BOARD_NI_PXI6508] = {
.name = "ni_pxi-6508",
.dio_badr = 1,
.n_8255 = 4,
+ .has_mite = 1,
},
[BOARD_NI_PCI6503] = {
.name = "ni_pci-6503",
.dio_badr = 1,
.n_8255 = 1,
+ .has_mite = 1,
},
[BOARD_NI_PCI6503B] = {
.name = "ni_pci-6503b",
.dio_badr = 1,
.n_8255 = 1,
+ .has_mite = 1,
},
[BOARD_NI_PCI6503X] = {
.name = "ni_pci-6503x",
.dio_badr = 1,
.n_8255 = 1,
+ .has_mite = 1,
},
[BOARD_NI_PXI_6503] = {
.name = "ni_pxi-6503",
.dio_badr = 1,
.n_8255 = 1,
+ .has_mite = 1,
},
};
@@ -163,6 +172,25 @@ struct pci_8255_private {
void __iomem *mmio_base;
};
+static int pci_8255_mite_init(struct pci_dev *pcidev)
+{
+ void __iomem *mite_base;
+ u32 main_phys_addr;
+
+ /* ioremap the MITE registers (BAR 0) temporarily */
+ mite_base = pci_ioremap_bar(pcidev, 0);
+ if (!mite_base)
+ return -ENOMEM;
+
+ /* set data window to main registers (BAR 1) */
+ main_phys_addr = pci_resource_start(pcidev, 1);
+ writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
+
+ /* finished with MITE registers */
+ iounmap(mite_base);
+ return 0;
+}
+
static int pci_8255_mmio(int dir, int port, int data, unsigned long iobase)
{
void __iomem *mmio_base = (void __iomem *)iobase;
@@ -201,6 +229,12 @@ static int pci_8255_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
+ if (board->has_mite) {
+ ret = pci_8255_mite_init(pcidev);
+ if (ret)
+ return ret;
+ }
+
is_mmio = (pci_resource_flags(pcidev, board->dio_badr) &
IORESOURCE_MEM) != 0;
if (is_mmio) {
@@ -235,9 +269,6 @@ static int pci_8255_auto_attach(struct comedi_device *dev,
return ret;
}
- dev_info(dev->class_dev, "%s attached (%d digital i/o channels)\n",
- dev->board_name, board->n_8255 * 24);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 2706f583d8f0..0757a82ddcfa 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_COMEDI_PCL818) += pcl818.o
obj-$(CONFIG_COMEDI_PCM3724) += pcm3724.o
obj-$(CONFIG_COMEDI_RTI800) += rti800.o
obj-$(CONFIG_COMEDI_RTI802) += rti802.o
+obj-$(CONFIG_COMEDI_DAC02) += dac02.o
obj-$(CONFIG_COMEDI_DAS16M1) += das16m1.o
obj-$(CONFIG_COMEDI_DAS08_ISA) += das08_isa.o
obj-$(CONFIG_COMEDI_DAS16) += das16.o
@@ -53,7 +54,6 @@ obj-$(CONFIG_COMEDI_PCMDA12) += pcmda12.o
obj-$(CONFIG_COMEDI_PCMMIO) += pcmmio.o
obj-$(CONFIG_COMEDI_PCMUIO) += pcmuio.o
obj-$(CONFIG_COMEDI_MULTIQ3) += multiq3.o
-obj-$(CONFIG_COMEDI_POC) += poc.o
obj-$(CONFIG_COMEDI_S526) += s526.o
# Comedi PCI drivers
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
index 1128c22e7517..28450f65a134 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
@@ -1,46 +1,24 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-------------------------------+---------------------------------------+
- | Project : APCI-035 | Compiler : GCC |
- | Module name : hwdrv_apci035.c | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-------------------------------+---------------------------------------+
- | Description : Hardware Layer Access For APCI-035 |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +----------+-----------+------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ */
/* Card Specific information */
#define APCI035_ADDRESS_RANGE 255
@@ -106,99 +84,66 @@ static struct comedi_lrange range_apci035_ai = {
}
};
-static int i_WatchdogNbr = 0;
-static int i_Temp = 0;
+static int i_WatchdogNbr;
+static int i_Temp;
static int i_Flag = 1;
+
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI035_ConfigTimerWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Configures The Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 0 Configure As Timer |
-| 1 Configure As Watchdog |
-| data[1] : Watchdog number
-| data[2] : Time base Unit |
-| data[3] : Reload Value |
-| data[4] : External Trigger |
-| 1:Enable
-| 0:Disable
-| data[5] :External Trigger Level
-| 00 Trigger Disabled
-| 01 Trigger Enabled (Low level)
-| 10 Trigger Enabled (High Level)
-| 11 Trigger Enabled (High/Low level)
-| data[6] : External Gate |
-| 1:Enable
-| 0:Disable
-| data[7] : External Gate level
-| 00 Gate Disabled
-| 01 Gate Enabled (Low level)
-| 10 Gate Enabled (High Level)
-| data[8] :Warning Relay
-| 1: ENABLE
-| 0: DISABLE
-| data[9] :Warning Delay available
-| data[10] :Warning Relay Time unit
-| data[11] :Warning Relay Time Reload value
-| data[12] :Reset Relay
-| 1 : ENABLE
-| 0 : DISABLE
-| data[13] :Interrupt
-| 1 : ENABLE
-| 0 : DISABLE
-|
-|
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI035_ConfigTimerWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures The Timer , Counter or Watchdog
+ *
+ * data[0] 0 = Configure As Timer, 1 = Configure As Watchdog
+ * data[1] Watchdog number
+ * data[2] Time base Unit
+ * data[3] Reload Value
+ * data[4] External Trigger, 1 = Enable, 0 = Disable
+ * data[5] External Trigger Level
+ * 00 = Trigger Disabled
+ * 01 = Trigger Enabled (Low level)
+ * 10 = Trigger Enabled (High Level)
+ * 11 = Trigger Enabled (High/Low level)
+ * data[6] External Gate, 1 = Enable, 0 = Disable
+ * data[7] External Gate level
+ * 00 = Gate Disabled
+ * 01 = Gate Enabled (Low level)
+ * 10 = Gate Enabled (High Level)
+ * data[8] Warning Relay, 1 = Enable, 0 = Disable
+ * data[9] Warning Delay available
+ * data[10] Warning Relay Time unit
+ * data[11] Warning Relay Time Reload value
+ * data[12] Reset Relay, 1 = Enable, 0 = Disable
+ * data[13] Interrupt, 1 = Enable, 0 = Disable
+ */
+static int apci035_timer_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- unsigned int ui_Status = 0;
- unsigned int ui_Command = 0;
- unsigned int ui_Mode = 0;
+ unsigned int ui_Status;
+ unsigned int ui_Command;
+ unsigned int ui_Mode;
i_Temp = 0;
devpriv->tsk_Current = current;
devpriv->b_TimerSelectMode = data[0];
i_WatchdogNbr = data[1];
- if (data[0] == 0) {
+ if (data[0] == 0)
ui_Mode = 2;
- } else {
+ else
ui_Mode = 0;
- }
-/* ui_Command = inl(devpriv->iobase+((i_WatchdogNbr-1)*32)+12); */
+
ui_Command = 0;
-/* ui_Command = ui_Command & 0xFFFFF9FEUL; */
outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- ui_Command = 0;
+
ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-/************************/
-/* Set the reload value */
-/************************/
+
+ /* Set the reload value */
outl(data[3], devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 4);
-/*********************/
-/* Set the time unit */
-/*********************/
+
+ /* Set the time unit */
outl(data[2], devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 8);
if (data[0] == ADDIDATA_TIMER) {
- /******************************/
/* Set the mode : */
/* - Disable the hardware */
/* - Disable the counter mode */
@@ -206,101 +151,82 @@ static int i_APCI035_ConfigTimerWatchdog(struct comedi_device *dev,
/* - Disable the reset */
/* - Enable the timer mode */
/* - Set the timer mode */
- /******************************/
ui_Command =
(ui_Command & 0xFFF719E2UL) | ui_Mode << 13UL | 0x10UL;
- } /* if (data[0] == ADDIDATA_TIMER) */
- else {
- if (data[0] == ADDIDATA_WATCHDOG) {
+ } else if (data[0] == ADDIDATA_WATCHDOG) {
- /******************************/
- /* Set the mode : */
- /* - Disable the hardware */
- /* - Disable the counter mode */
- /* - Disable the warning */
- /* - Disable the reset */
- /* - Disable the timer mode */
- /******************************/
+ /* Set the mode : */
+ /* - Disable the hardware */
+ /* - Disable the counter mode */
+ /* - Disable the warning */
+ /* - Disable the reset */
+ /* - Disable the timer mode */
- ui_Command = ui_Command & 0xFFF819E2UL;
+ ui_Command = ui_Command & 0xFFF819E2UL;
- } else {
- printk("\n The parameter for Timer/watchdog selection is in error\n");
- return -EINVAL;
- }
+ } else {
+ dev_err(dev->class_dev, "The parameter for Timer/watchdog selection is in error\n");
+ return -EINVAL;
}
+
outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- ui_Command = 0;
+
ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-/********************************/
-/* Disable the hardware trigger */
-/********************************/
+
+ /* Disable the hardware trigger */
ui_Command = ui_Command & 0xFFFFF89FUL;
if (data[4] == ADDIDATA_ENABLE) {
- /**********************************/
+
/* Set the hardware trigger level */
- /**********************************/
ui_Command = ui_Command | (data[5] << 5);
}
outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- ui_Command = 0;
+
ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-/*****************************/
-/* Disable the hardware gate */
-/*****************************/
+
+ /* Disable the hardware gate */
ui_Command = ui_Command & 0xFFFFF87FUL;
if (data[6] == ADDIDATA_ENABLE) {
-/*******************************/
-/* Set the hardware gate level */
-/*******************************/
+
+ /* Set the hardware gate level */
ui_Command = ui_Command | (data[7] << 7);
}
outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- ui_Command = 0;
+
ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-/*******************************/
-/* Disable the hardware output */
-/*******************************/
+
+ /* Disable the hardware output */
ui_Command = ui_Command & 0xFFFFF9FBUL;
-/*********************************/
-/* Set the hardware output level */
-/*********************************/
+
+ /* Set the hardware output level */
ui_Command = ui_Command | (data[8] << 2);
outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
if (data[9] == ADDIDATA_ENABLE) {
- /************************/
+
/* Set the reload value */
- /************************/
outl(data[11],
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 24);
- /**********************/
+
/* Set the time unite */
- /**********************/
outl(data[10],
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 28);
}
- ui_Command = 0;
ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- /*******************************/
+
/* Disable the hardware output */
- /*******************************/
ui_Command = ui_Command & 0xFFFFF9F7UL;
- /*********************************/
+
/* Set the hardware output level */
- /*********************************/
ui_Command = ui_Command | (data[12] << 3);
outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- /*************************************/
- /** Enable the watchdog interrupt **/
- /*************************************/
- ui_Command = 0;
+
+ /* Enable the watchdog interrupt */
ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-/*******************************/
-/* Set the interrupt selection */
-/*******************************/
+
+ /* Set the interrupt selection */
ui_Status = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 16);
ui_Command = (ui_Command & 0xFFFFF9FDUL) | (data[13] << 1);
@@ -310,82 +236,63 @@ static int i_APCI035_ConfigTimerWatchdog(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI035_StartStopWriteTimerWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Start / Stop The Selected Timer , or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 0 - Stop Selected Timer/Watchdog |
-| 1 - Start Selected Timer/Watchdog |
-| 2 - Trigger Selected Timer/Watchdog |
-| 3 - Stop All Timer/Watchdog |
-| 4 - Start All Timer/Watchdog |
-| 5 - Trigger All Timer/Watchdog |
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI035_StartStopWriteTimerWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Start / Stop The Selected Timer , or Watchdog
+ *
+ * data[0]
+ * 0 - Stop Selected Timer/Watchdog
+ * 1 - Start Selected Timer/Watch*dog
+ * 2 - Trigger Selected Timer/Watchdog
+ * 3 - Stop All Timer/Watchdog
+ * 4 - Start All Timer/Watchdog
+ * 5 - Trigger All Timer/Watchdog
+ */
+static int apci035_timer_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- unsigned int ui_Command = 0;
- int i_Count = 0;
+ unsigned int ui_Command;
+ int i_Count;
if (data[0] == 1) {
ui_Command =
inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- /**********************/
+
/* Start the hardware */
- /**********************/
ui_Command = (ui_Command & 0xFFFFF9FFUL) | 0x1UL;
outl(ui_Command,
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- } /* if (data[0]==1) */
+ }
if (data[0] == 2) {
ui_Command =
inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- /***************************/
+
/* Set the trigger command */
- /***************************/
ui_Command = (ui_Command & 0xFFFFF9FFUL) | 0x200UL;
outl(ui_Command,
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
}
- if (data[0] == 0) /* Stop The Watchdog */
- {
+ if (data[0] == 0) {
/* Stop The Watchdog */
ui_Command = 0;
-/*
-* ui_Command = inl(devpriv->iobase+((i_WatchdogNbr-1)*32)+12);
-* ui_Command = ui_Command & 0xFFFFF9FEUL;
-*/
+ /*
+ * ui_Command = inl(devpriv->iobase+((i_WatchdogNbr-1)*32)+12);
+ * ui_Command = ui_Command & 0xFFFFF9FEUL;
+ */
outl(ui_Command,
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- } /* if (data[1]==0) */
- if (data[0] == 3) /* stop all Watchdogs */
- {
+ }
+ if (data[0] == 3) {
+ /* stop all Watchdogs */
ui_Command = 0;
for (i_Count = 1; i_Count <= 4; i_Count++) {
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
+ if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG)
ui_Command = 0x2UL;
- } else {
+ else
ui_Command = 0x10UL;
- }
+
i_WatchdogNbr = i_Count;
outl(ui_Command,
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) +
@@ -393,30 +300,29 @@ static int i_APCI035_StartStopWriteTimerWatchdog(struct comedi_device *dev,
}
}
- if (data[0] == 4) /* start all Watchdogs */
- {
+ if (data[0] == 4) {
+ /* start all Watchdogs */
ui_Command = 0;
for (i_Count = 1; i_Count <= 4; i_Count++) {
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
+ if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG)
ui_Command = 0x1UL;
- } else {
+ else
ui_Command = 0x8UL;
- }
+
i_WatchdogNbr = i_Count;
outl(ui_Command,
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) +
0);
}
}
- if (data[0] == 5) /* trigger all Watchdogs */
- {
+ if (data[0] == 5) {
+ /* trigger all Watchdogs */
ui_Command = 0;
for (i_Count = 1; i_Count <= 4; i_Count++) {
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
+ if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG)
ui_Command = 0x4UL;
- } else {
+ else
ui_Command = 0x20UL;
- }
i_WatchdogNbr = i_Count;
outl(ui_Command,
@@ -429,109 +335,61 @@ static int i_APCI035_StartStopWriteTimerWatchdog(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI035_ReadTimerWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read The Selected Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : data[0] : software trigger status
-| data[1] : hardware trigger status
-| data[2] : Software clear status
-| data[3] : Overflow status
-| data[4] : Timer actual value
-|
-
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI035_ReadTimerWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Read The Selected Timer , Counter or Watchdog
+ *
+ * data[0] software trigger status
+ * data[1] hardware trigger status
+ * data[2] Software clear status
+ * data[3] Overflow status
+ * data[4] Timer actual value
+ */
+static int apci035_timer_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- unsigned int ui_Status = 0; /* Status register */
+ unsigned int ui_Status; /* Status register */
i_WatchdogNbr = insn->unused[0];
- /******************/
/* Get the status */
- /******************/
-
ui_Status = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 16);
- /***********************************/
/* Get the software trigger status */
- /***********************************/
-
data[0] = ((ui_Status >> 1) & 1);
- /***********************************/
+
/* Get the hardware trigger status */
- /***********************************/
data[1] = ((ui_Status >> 2) & 1);
- /*********************************/
+
/* Get the software clear status */
- /*********************************/
data[2] = ((ui_Status >> 3) & 1);
- /***************************/
+
/* Get the overflow status */
- /***************************/
data[3] = ((ui_Status >> 0) & 1);
- if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
+ if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER)
data[4] = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 0);
- } /* if (devpriv->b_TimerSelectMode==ADDIDATA_TIMER) */
-
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI035_ConfigAnalogInput |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Configures The Analog Input Subdevice |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s : Subdevice Pointer |
-| struct comedi_insn *insn : Insn Structure Pointer |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| data[0] : Warning delay value
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI035_ConfigAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures The Analog Input Subdevice
+ *
+ * data[0] Warning delay value
+ */
+static int apci035_ai_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
devpriv->tsk_Current = current;
outl(0x200 | 0, devpriv->iobase + 128 + 0x4);
outl(0, devpriv->iobase + 128 + 0);
-/********************************/
-/* Initialise the warning value */
-/********************************/
+
+ /* Initialise the warning value */
outl(0x300 | 0, devpriv->iobase + 128 + 0x4);
outl((data[0] << 8), devpriv->iobase + 128 + 0);
outl(0x200000UL, devpriv->iobase + 128 + 12);
@@ -540,145 +398,88 @@ static int i_APCI035_ConfigAnalogInput(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI035_ReadAnalogInput |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read value of the selected channel |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int ui_NoOfChannels : No Of Channels To read |
-| unsigned int *data : Data Pointer to read status |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-| data[0] : Digital Value Of Input |
-| |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI035_ReadAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Read value of the selected channel
+ *
+ * data[0] Digital Value Of Input
+ */
+static int apci035_ai_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- unsigned int ui_CommandRegister = 0;
+ unsigned int ui_CommandRegister;
-/******************/
-/* Set the start */
-/******************/
+ /* Set the start */
ui_CommandRegister = 0x80000;
- /******************************/
+
/* Write the command register */
- /******************************/
outl(ui_CommandRegister, devpriv->iobase + 128 + 8);
-/***************************************/
-/* Read the digital value of the input */
-/***************************************/
+ /* Read the digital value of the input */
data[0] = inl(devpriv->iobase + 128 + 28);
return insn->n;
}
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI035_Reset(struct comedi_device *dev) |
-| |
-+----------------------------------------------------------------------------+
-| Task :Resets the registers of the card |
-+----------------------------------------------------------------------------+
-| Input Parameters : |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI035_Reset(struct comedi_device *dev)
+static int apci035_reset(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
- int i_Count = 0;
+ int i_Count;
for (i_Count = 1; i_Count <= 4; i_Count++) {
i_WatchdogNbr = i_Count;
- outl(0x0, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 0); /* stop all timers */
+
+ /* stop all timers */
+ outl(0x0, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 0);
}
outl(0x0, devpriv->iobase + 128 + 12); /* Disable the warning delay */
return 0;
}
-/*
-+----------------------------------------------------------------------------+
-| Function Name : static void v_APCI035_Interrupt |
-| (int irq , void *d) |
-+----------------------------------------------------------------------------+
-| Task : Interrupt processing Routine |
-+----------------------------------------------------------------------------+
-| Input Parameters : int irq : irq number |
-| void *d : void pointer |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static void v_APCI035_Interrupt(int irq, void *d)
+static void apci035_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
- unsigned int ui_StatusRegister1 = 0;
- unsigned int ui_StatusRegister2 = 0;
- unsigned int ui_ReadCommand = 0;
- unsigned int ui_ChannelNumber = 0;
- unsigned int ui_DigitalTemperature = 0;
+ unsigned int ui_StatusRegister1;
+ unsigned int ui_StatusRegister2;
+ unsigned int ui_ReadCommand;
+ unsigned int ui_ChannelNumber;
+ unsigned int ui_DigitalTemperature;
if (i_Temp == 1) {
i_WatchdogNbr = i_Flag;
i_Flag = i_Flag + 1;
}
- /**************************************/
+
/* Read the interrupt status register of temperature Warning */
- /**************************************/
ui_StatusRegister1 = inl(devpriv->iobase + 128 + 16);
- /**************************************/
- /* Read the interrupt status register for Watchdog/timer */
- /**************************************/
+ /* Read the interrupt status register for Watchdog/timer */
ui_StatusRegister2 =
inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 20);
- if ((((ui_StatusRegister1) & 0x8) == 0x8)) /* Test if warning relay interrupt */
- {
- /**********************************/
+ /* Test if warning relay interrupt */
+ if ((((ui_StatusRegister1) & 0x8) == 0x8)) {
+
/* Disable the temperature warning */
- /**********************************/
ui_ReadCommand = inl(devpriv->iobase + 128 + 12);
ui_ReadCommand = ui_ReadCommand & 0xFFDF0000UL;
outl(ui_ReadCommand, devpriv->iobase + 128 + 12);
- /***************************/
+
/* Read the channel number */
- /***************************/
ui_ChannelNumber = inl(devpriv->iobase + 128 + 60);
- /**************************************/
+
/* Read the digital temperature value */
- /**************************************/
ui_DigitalTemperature = inl(devpriv->iobase + 128 + 60);
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- } /* if (((ui_StatusRegister1 & 0x8) == 0x8)) */
- else {
- if ((ui_StatusRegister2 & 0x1) == 0x1) {
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- }
- } /* else if (((ui_StatusRegister1 & 0x8) == 0x8)) */
+ /* send signal to the sample */
+ send_sig(SIGIO, devpriv->tsk_Current, 0);
+
+ } else if ((ui_StatusRegister2 & 0x1) == 0x1) {
+ /* send signal to the sample */
+ send_sig(SIGIO, devpriv->tsk_Current, 0);
+ }
return;
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
index 054910511e9e..a633957890d7 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
@@ -1,48 +1,25 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-------------------------------+---------------------------------------+
- | Project : APCI-1500 | Compiler : GCC |
- | Module name : hwdrv_apci1500.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-------------------------------+---------------------------------------+
- | Description : Hardware Layer Access For APCI-1500 |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +----------+-----------+------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
-
-/********* Definitions for APCI-1500 card *****/
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ */
/* Card Specific information */
#define APCI1500_ADDRESS_RANGE 4
@@ -141,99 +118,45 @@ enum {
APCI1500_RW_PORT_B_PATTERN_MASK
};
-static int i_TimerCounter1Init = 0;
-static int i_TimerCounter2Init = 0;
-static int i_WatchdogCounter3Init = 0;
-static int i_Event1Status = 0, i_Event2Status = 0;
-static int i_TimerCounterWatchdogInterrupt = 0;
-static int i_Logic = 0, i_CounterLogic = 0;
-static int i_InterruptMask = 0;
-static int i_InputChannel = 0;
-static int i_TimerCounter1Enabled = 0, i_TimerCounter2Enabled = 0,
- i_WatchdogCounter3Enabled = 0;
+static int i_TimerCounter1Init;
+static int i_TimerCounter2Init;
+static int i_WatchdogCounter3Init;
+static int i_Event1Status, i_Event2Status;
+static int i_TimerCounterWatchdogInterrupt;
+static int i_Logic, i_CounterLogic;
+static int i_InterruptMask;
+static int i_InputChannel;
+static int i_TimerCounter1Enabled, i_TimerCounter2Enabled,
+ i_WatchdogCounter3Enabled;
/*
- +----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_ConfigDigitalInputEvent |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : An event can be generated for each port. |
-| The first event is related to the first 8 channels |
-| (port 1) and the second to the following 6 channels |
-| (port 2). An interrupt is generated when one or both |
-| events have occurred |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] :Number of the input port on |
-| which the event will take place |
-| (1 or 2)
-| data[1] : The event logic for port 1 has |
-| three possibilities |
-| :0 APCI1500_AND :This logic |
-| links |
-| the inputs |
-| with an AND |
-| logic. |
-| 1 APCI1500_OR :This logic |
-| links |
-| the inputs |
-| with a |
-| OR logic. |
-| 2 APCI1500_OR_PRIORITY |
-| :This logic |
-| links |
-| the inputs |
-| with a |
-| priority |
-| OR logic. |
-| Input 1 |
-| has the |
-| highest |
-| priority |
-| level and |
-| input 8 |
-| the smallest|
-| For the second port the user has|
-| 1 possibility: |
-| APCI1500_OR :This logic |
-| links |
-| the inputs |
-| with a |
-| polarity |
-| OR logic |
-| data[2] : These 8-character word for port1|
-| and 6-character word for port 2 |
-| give the mask of the event. |
-| Each place gives the state |
-| of the input channels and can |
-| have one of these six characters|
-| |
-| 0 : This input must be on 0 |
-| 1 : This input must be on 1 |
-| 2 : This input reacts to |
-| a falling edge |
-| 3 : This input reacts to a |
-| rising edge |
-| 4 : This input reacts to both edges |
-|
-| 5 : This input is not |
-| used for event |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * An event can be generated for each port. The first event is related to the
+ * first 8 channels (port 1) and the second to the following 6 channels (port 2)
+ * An interrupt is generated when one or both events have occurred.
+ *
+ * data[0] Number of the input port on which the event will take place (1 or 2)
+ * data[1] The event logic for port 1 has three possibilities:
+ * APCI1500_AND This logic links the inputs with an AND logic.
+ * APCI1500_OR This logic links the inputs with a OR logic.
+ * APCI1500_OR_PRIORITY This logic links the inputs with a priority OR
+ * logic. Input 1 has the highest priority level
+ * and input 8 the smallest.
+ * For the second port the user has 1 possibility:
+ * APCI1500_OR This logic links the inputs with a polarity OR logic
+ * data[2] These 8-character word for port1 and 6-character word for port 2
+ * give the mask of the event. Each place gives the state of the input
+ * channels and can have one of these six characters
+ * 0 This input must be on 0
+ * 1 This input must be on 1
+ * 2 This input reacts to a falling edge
+ * 3 This input reacts to a rising edge
+ * 4 This input reacts to both edges
+ * 5 This input is not used for event
+ */
+static int apci1500_di_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
int i_PatternPolarity = 0, i_PatternTransition = 0, i_PatternMask = 0;
@@ -241,14 +164,10 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
int i_PatternTransitionCount = 0, i_RegValue;
int i;
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Disables the main interrupt on the board */
- /**********************************************/
outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
if (data[0] == 1) {
@@ -259,7 +178,8 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
i_MaxChannel = 6;
} /* if(data[0]==2) */
else {
- printk("\nThe specified port event does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified port event does not exist\n");
return -EINVAL;
} /* else if(data[0]==2) */
} /* else if (data[0] == 1) */
@@ -274,7 +194,8 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
data[1] = APCI1500_OR_PRIORITY;
break;
default:
- printk("\nThe specified interrupt logic does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified interrupt logic does not exist\n");
return -EINVAL;
} /* switch(data[1]); */
@@ -318,37 +239,30 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
case 5:
break;
default:
- printk("\nThe option indicated in the event mask does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The option indicated in the event mask does not exist\n");
return -EINVAL;
} /* switch(i_EventMask) */
} /* for (i_Count = i_MaxChannel; i_Count >0;i_Count --) */
if (data[0] == 1) {
- /****************************/
/* Test the interrupt logic */
- /****************************/
if (data[1] == APCI1500_AND ||
data[1] == APCI1500_OR ||
data[1] == APCI1500_OR_PRIORITY) {
- /**************************************/
/* Tests if a transition was declared */
/* for a OR PRIORITY logic */
- /**************************************/
if (data[1] == APCI1500_OR_PRIORITY
&& i_PatternTransition != 0) {
- /********************************************/
- /* Transition error on an OR PRIORITY logic */
- /********************************************/
- printk("\nTransition error on an OR PRIORITY logic\n");
+ dev_warn(dev->hw_dev,
+ "Transition error on an OR PRIORITY logic\n");
return -EINVAL;
} /* if (data[1]== APCI1500_OR_PRIORITY && i_PatternTransition != 0) */
- /*************************************/
/* Tests if more than one transition */
/* was declared for an AND logic */
- /*************************************/
if (data[1] == APCI1500_AND) {
for (i_Count = 0; i_Count < 8; i_Count++) {
@@ -360,29 +274,21 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
} /* for (i_Count = 0; i_Count < 8; i_Count++) */
if (i_PatternTransitionCount > 1) {
- /****************************************/
- /* Transition error on an AND logic */
- /****************************************/
- printk("\n Transition error on an AND logic\n");
+ dev_warn(dev->hw_dev,
+ "Transition error on an AND logic\n");
return -EINVAL;
} /* if (i_PatternTransitionCount > 1) */
} /* if (data[1]== APCI1500_AND) */
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************/
/* Disable Port A */
- /******************/
outb(0xF0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Selects the polarity register of port 1 */
- /**********************************************/
outb(APCI1500_RW_PORT_A_PATTERN_POLARITY,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -390,20 +296,16 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*********************************************/
/* Selects the pattern mask register of */
/* port 1 */
- /*********************************************/
outb(APCI1500_RW_PORT_A_PATTERN_MASK,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
outb(i_PatternMask,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************************/
/* Selects the pattern transition register */
/* of port 1 */
- /********************************************/
outb(APCI1500_RW_PORT_A_PATTERN_TRANSITION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -411,10 +313,8 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************/
/* Selects the mode specification mask */
/* register of port 1 */
- /******************************************/
outb(APCI1500_RW_PORT_A_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -422,17 +322,13 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************/
/* Selects the mode specification mask */
/* register of port 1 */
- /******************************************/
outb(APCI1500_RW_PORT_A_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************/
/* Port A new mode */
- /**********************/
i_RegValue = (i_RegValue & 0xF9) | data[1] | 0x9;
outb(i_RegValue,
@@ -441,53 +337,40 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
i_Event1Status = 1;
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*****************/
/* Enable Port A */
- /*****************/
outb(0xF4,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
} /* if(data[1]==APCI1500_AND||data[1]==APCI1500_OR||data[1]==APCI1500_OR_PRIORITY) */
else {
- printk("\nThe choice for interrupt logic does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The choice for interrupt logic does not exist\n");
return -EINVAL;
} /* else }// if(data[1]==APCI1500_AND||data[1]==APCI1500_OR||data[1]==APCI1500_OR_PRIORITY) */
} /* if (data[0]== 1) */
- /************************************/
/* Test if event setting for port 2 */
- /************************************/
if (data[0] == 2) {
- /************************/
/* Test the event logic */
- /************************/
if (data[1] == APCI1500_OR) {
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************/
/* Disable Port B */
- /******************/
outb(0x74,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************/
/* Selects the mode specification mask */
/* register of port B */
- /****************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -495,10 +378,8 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************/
/* Selects the mode specification mask */
/* register of port B */
- /******************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -507,37 +388,29 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************/
/* Selects error channels 1 and 2 */
- /**********************************/
i_PatternMask = (i_PatternMask | 0xC0);
i_PatternPolarity = (i_PatternPolarity | 0xC0);
i_PatternTransition = (i_PatternTransition | 0xC0);
- /**********************************************/
/* Selects the polarity register of port 2 */
- /**********************************************/
outb(APCI1500_RW_PORT_B_PATTERN_POLARITY,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
outb(i_PatternPolarity,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Selects the pattern transition register */
/* of port 2 */
- /**********************************************/
outb(APCI1500_RW_PORT_B_PATTERN_TRANSITION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
outb(i_PatternTransition,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Selects the pattern Mask register */
/* of port 2 */
- /**********************************************/
outb(APCI1500_RW_PORT_B_PATTERN_MASK,
devpriv->iobase +
@@ -546,20 +419,16 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************/
/* Selects the mode specification mask */
/* register of port 2 */
- /******************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue =
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************/
/* Selects the mode specification mask */
/* register of port 2 */
- /******************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -569,23 +438,20 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
APCI1500_Z8536_CONTROL_REGISTER);
i_Event2Status = 1;
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*****************/
/* Enable Port B */
- /*****************/
outb(0xF4,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
} /* if (data[1] == APCI1500_OR) */
else {
- printk("\nThe choice for interrupt logic does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The choice for interrupt logic does not exist\n");
return -EINVAL;
} /* elseif (data[1] == APCI1500_OR) */
} /* if(data[0]==2) */
@@ -594,31 +460,15 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_StartStopInputEvent |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Allows or disallows a port event |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int ui_Channel : Channel number to read |
-| unsigned int *data : Data Pointer to read status |
-| data[0] :0 Start input event
-| 1 Stop input event
-| data[1] :No of port (1 or 2)
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_StartStopInputEvent(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Allows or disallows a port event
+ *
+ * data[0] 0 = Start input event, 1 = Stop input event
+ * data[1] Number of port (1 or 2)
+ */
+static int apci1500_di_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
int i_Event1InterruptStatus = 0, i_Event2InterruptStatus =
@@ -626,48 +476,30 @@ static int i_APCI1500_StartStopInputEvent(struct comedi_device *dev,
switch (data[0]) {
case START:
- /*************************/
/* Tests the port number */
- /*************************/
if (data[1] == 1 || data[1] == 2) {
- /***************************/
/* Test if port 1 selected */
- /***************************/
if (data[1] == 1) {
- /*****************************/
/* Test if event initialised */
- /*****************************/
if (i_Event1Status == 1) {
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************/
/* Disable Port A */
- /******************/
outb(0xF0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************************/
/* Selects the command and status register of */
/* port 1 */
- /***************************************************/
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************/
/* Allows the pattern interrupt */
- /*************************************/
outb(0xC0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************/
/* Enable Port A */
- /*****************/
outb(0xF4,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -680,185 +512,142 @@ static int i_APCI1500_StartStopInputEvent(struct comedi_device *dev,
APCI1500_Z8536_CONTROL_REGISTER);
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Authorizes the main interrupt on the board */
- /**********************************************/
outb(0xD0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
} /* if(i_Event1Status==1) */
else {
- printk("\nEvent 1 not initialised\n");
+ dev_warn(dev->hw_dev,
+ "Event 1 not initialised\n");
return -EINVAL;
} /* else if(i_Event1Status==1) */
} /* if (data[1]==1) */
if (data[1] == 2) {
if (i_Event2Status == 1) {
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************/
/* Disable Port B */
- /******************/
outb(0x74,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************************/
/* Selects the command and status register of */
/* port 2 */
- /***************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************/
/* Allows the pattern interrupt */
- /*************************************/
outb(0xC0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************/
/* Enable Port B */
- /*****************/
outb(0xF4,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Authorizes the main interrupt on the board */
- /**********************************************/
outb(0xD0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
i_Event2InterruptStatus = 1;
} /* if(i_Event2Status==1) */
else {
- printk("\nEvent 2 not initialised\n");
+ dev_warn(dev->hw_dev,
+ "Event 2 not initialised\n");
return -EINVAL;
} /* else if(i_Event2Status==1) */
} /* if(data[1]==2) */
} /* if (data[1] == 1 || data[0] == 2) */
else {
- printk("\nThe port parameter is in error\n");
+ dev_warn(dev->hw_dev,
+ "The port parameter is in error\n");
return -EINVAL;
} /* else if (data[1] == 1 || data[0] == 2) */
break;
case STOP:
- /*************************/
/* Tests the port number */
- /*************************/
if (data[1] == 1 || data[1] == 2) {
- /***************************/
/* Test if port 1 selected */
- /***************************/
if (data[1] == 1) {
- /*****************************/
/* Test if event initialised */
- /*****************************/
if (i_Event1Status == 1) {
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************/
/* Disable Port A */
- /******************/
outb(0xF0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************************/
/* Selects the command and status register of */
/* port 1 */
- /***************************************************/
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************/
/* Inhibits the pattern interrupt */
- /*************************************/
outb(0xE0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************/
/* Enable Port A */
- /*****************/
outb(0xF4,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
i_Event1InterruptStatus = 0;
} /* if(i_Event1Status==1) */
else {
- printk("\nEvent 1 not initialised\n");
+ dev_warn(dev->hw_dev,
+ "Event 1 not initialised\n");
return -EINVAL;
} /* else if(i_Event1Status==1) */
} /* if (data[1]==1) */
if (data[1] == 2) {
- /*****************************/
/* Test if event initialised */
- /*****************************/
if (i_Event2Status == 1) {
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************/
/* Disable Port B */
- /******************/
outb(0x74,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************************/
/* Selects the command and status register of */
/* port 2 */
- /***************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************/
/* Inhibits the pattern interrupt */
- /*************************************/
outb(0xE0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************/
/* Enable Port B */
- /*****************/
outb(0xF4,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
i_Event2InterruptStatus = 0;
} /* if(i_Event2Status==1) */
else {
- printk("\nEvent 2 not initialised\n");
+ dev_warn(dev->hw_dev,
+ "Event 2 not initialised\n");
return -EINVAL;
} /* else if(i_Event2Status==1) */
} /* if(data[1]==2) */
} /* if (data[1] == 1 || data[1] == 2) */
else {
- printk("\nThe port parameter is in error\n");
+ dev_warn(dev->hw_dev,
+ "The port parameter is in error\n");
return -EINVAL;
} /* else if (data[1] == 1 || data[1] == 2) */
break;
default:
- printk("\nThe option of START/STOP logic does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The option of START/STOP logic does not exist\n");
return -EINVAL;
} /* switch(data[0]) */
@@ -866,35 +655,17 @@ static int i_APCI1500_StartStopInputEvent(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_Initialisation |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Return the status of the digital input |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int ui_Channel : Channel number to read |
-| unsigned int *data : Data Pointer to read status |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_Initialisation(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Return the status of the digital input
+ */
+static int apci1500_di_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
int i_DummyRead = 0;
- /******************/
/* Software reset */
- /******************/
i_DummyRead = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_DummyRead = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
@@ -902,16 +673,12 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
outb(1, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the master configuration control register */
- /*****************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0xF4, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the mode specification register of port A */
- /*****************************************************/
outb(APCI1500_RW_PORT_A_SPECIFICATION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0x10, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
@@ -943,9 +710,7 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
/* Deletes the register */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the mode specification register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0x10, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
@@ -975,9 +740,7 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
/* Deletes the register */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the data path polarity register of port C */
- /*****************************************************/
outb(APCI1500_RW_PORT_C_DATA_PCITCH_POLARITY,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* High level of port C means 1 */
@@ -992,9 +755,7 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes it */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************************/
/* Selects the command and status register of timer 1 */
- /******************************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes IP and IUS */
@@ -1004,9 +765,7 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deactivates the interrupt management of timer 1 */
outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************************/
/* Selects the command and status register of timer 2 */
- /******************************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes IP and IUS */
@@ -1016,9 +775,7 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deactivates Timer 2 interrupt management: */
outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************************/
/* Selects the command and status register of timer 3 */
- /******************************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes IP and IUS */
@@ -1028,9 +785,7 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deactivates interrupt management of timer 3: */
outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes all interrupts */
@@ -1051,37 +806,15 @@ static int apci1500_di_insn_bits(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_ConfigDigitalOutputErrorInterrupt
-| (struct comedi_device *dev,struct comedi_subdevice *s struct comedi_insn
-| *insn,unsigned int *data) |
-| |
-+----------------------------------------------------------------------------+
-| Task : Configures the digital output memory and the digital
-| output error interrupt |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| data[0] :1:Memory on |
-| 0:Memory off |
-| data[1] :1 Enable the voltage error interrupt
-| :0 Disable the voltage error interrupt |
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_ConfigDigitalOutputErrorInterrupt(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures the digital output memory and the digital output error interrupt
+ *
+ * data[1] 1 = Enable the voltage error interrupt
+ * 2 = Disable the voltage error interrupt
+ */
+static int apci1500_do_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
@@ -1090,31 +823,15 @@ static int i_APCI1500_ConfigDigitalOutputErrorInterrupt(struct comedi_device *de
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_WriteDigitalOutput |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Writes port value To the selected port |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int ui_NoOfChannels : No Of Channels To Write |
-| unsigned int *data : Data Pointer to read status |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_WriteDigitalOutput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Writes port value to the selected port
+ */
+static int apci1500_do_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- static unsigned int ui_Temp = 0;
+ static unsigned int ui_Temp;
unsigned int ui_Temp1;
unsigned int ui_NoOfChannel = CR_CHAN(insn->chanspec); /* get the channel */
@@ -1165,7 +882,9 @@ static int i_APCI1500_WriteDigitalOutput(struct comedi_device *dev,
APCI1500_DIGITAL_OP);
} /* if(data[1]==1) */
else {
- printk("\nSpecified channel not supported\n");
+ dev_warn(dev->hw_dev,
+ "Specified channel not supported\n");
+ return -EINVAL;
} /* else if(data[1]==1) */
} /* elseif(data[1]==0) */
} /* if(data[3]==0) */
@@ -1242,12 +961,15 @@ static int i_APCI1500_WriteDigitalOutput(struct comedi_device *dev,
APCI1500_DIGITAL_OP);
} /* if(data[1]==1) */
else {
- printk("\nSpecified channel not supported\n");
+ dev_warn(dev->hw_dev,
+ "Specified channel not supported\n");
+ return -EINVAL;
} /* else if(data[1]==1) */
} /* elseif(data[1]==0) */
} /* if(data[3]==1); */
else {
- printk("\nSpecified functionality does not exist\n");
+ dev_warn(dev->hw_dev,
+ "Specified functionality does not exist\n");
return -EINVAL;
} /* if else data[3]==1) */
} /* if else data[3]==0) */
@@ -1256,57 +978,23 @@ static int i_APCI1500_WriteDigitalOutput(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_ConfigCounterTimerWatchdog(comedi_device
-| *dev,struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data)|
-| |
-+----------------------------------------------------------------------------+
-| Task : Configures The Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| unsigned int *data : Data Pointer to read status data[0] : 2 APCI1500_1_8_KHZ
-| 1 APCI1500_3_6_KHZ |
-| 0 APCI1500_115_KHZ
-| data[1] : 0 Counter1/Timer1
-| 1 Counter2/Timer2
-| 2 Counter3/Watchdog
-| data[2] : 0 Counter
-| 1 Timer/Watchdog
-| data[3] : This parameter has |
-| two meanings. |
-| - If the counter/timer |
-| is used as a counter |
-| the limit value of |
-| the counter is given |
-| |
-| - If the counter/timer |
-| is used as a timer, |
-| the divider factor |
-| for the output is |
-| given.
-| data[4] : 0 APCI1500_CONTINUOUS
-| 1 APCI1500_SINGLE
-| data[5] : 0 Software Trigger
-| 1 Hardware Trigger
-|
-| data[6] :0 Software gate
-| 1 Hardware gate
-| data[7] :0 Interrupt Disable
-| 1 Interrupt Enable
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures The Watchdog
+ *
+ * data[0] 0 = APCI1500_115_KHZ, 1 = APCI1500_3_6_KHZ, 2 = APCI1500_1_8_KHZ
+ * data[1] 0 = Counter1/Timer1, 1 = Counter2/Timer2, 2 = Counter3/Watchdog
+ * data[2] 0 = Counter, 1 = Timer/Watchdog
+ * data[3] This parameter has two meanings. If the counter/timer is used as
+ * a counter the limit value of the counter is given. If the counter/timer
+ * is used as a timer, the divider factor for the output is given.
+ * data[4] 0 = APCI1500_CONTINUOUS, 1 = APCI1500_SINGLE
+ * data[5] 0 = Software Trigger, 1 = Hardware Trigger
+ * data[6] 0 = Software gate, 1 = Hardware gate
+ * data[7] 0 = Interrupt Disable, 1 = Interrupt Enable
+ */
+static int apci1500_timer_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
int i_TimerCounterMode, i_MasterConfiguration;
@@ -1319,7 +1007,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
} /* if(data[0]==0||data[0]==1||data[0]==2) */
else {
if (data[0] != 3) {
- printk("\nThe option for input clock selection does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The option for input clock selection does not exist\n");
return -EINVAL;
} /* if(data[0]!=3) */
} /* elseif(data[0]==0||data[0]==1||data[0]==2) */
@@ -1335,7 +1024,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[2] = APCI1500_TIMER;
break;
default:
- printk("\nThis choice is not a timer nor a counter\n");
+ dev_warn(dev->hw_dev,
+ "This choice is not a timer nor a counter\n");
return -EINVAL;
} /* switch(data[2]) */
@@ -1348,139 +1038,110 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[4] = APCI1500_SINGLE;
break;
default:
- printk("\nThis option for single/continuous mode does not exist\n");
+ dev_warn(dev->hw_dev,
+ "This option for single/continuous mode does not exist\n");
return -EINVAL;
} /* switch(data[4]) */
i_TimerCounterMode = data[2] | data[4] | 7;
- /*************************/
/* Test the reload value */
- /*************************/
if ((data[3] >= 0) && (data[3] <= 65535)) {
if (data[7] == APCI1500_ENABLE
|| data[7] == APCI1500_DISABLE) {
- /************************************************/
/* Selects the mode register of timer/counter 1 */
- /************************************************/
outb(APCI1500_RW_CPT_TMR1_MODE_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************/
/* Writes the new mode */
- /***********************/
outb(i_TimerCounterMode,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************************/
/* Selects the constant register of timer/counter 1 */
- /****************************************************/
outb(APCI1500_RW_CPT_TMR1_TIME_CST_LOW,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*************************/
/* Writes the low value */
- /*************************/
outb(data[3],
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************************/
/* Selects the constant register of timer/counter 1 */
- /****************************************************/
outb(APCI1500_RW_CPT_TMR1_TIME_CST_HIGH,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**************************/
/* Writes the high value */
- /**************************/
data[3] = data[3] >> 8;
outb(data[3],
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*********************************************/
/* Selects the master configuration register */
- /*********************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************/
/* Reads the register */
- /**********************/
i_MasterConfiguration =
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************************************/
/* Enables timer/counter 1 and triggers timer/counter 1 */
- /********************************************************/
i_MasterConfiguration =
i_MasterConfiguration | 0x40;
- /*********************************************/
/* Selects the master configuration register */
- /*********************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************/
/* Writes the new configuration */
- /********************************/
outb(i_MasterConfiguration,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************/
/* Selects the commands register of */
/* timer/counter 1 */
- /****************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************/
/* Disable timer/counter 1 */
- /***************************/
outb(0x0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************/
/* Selects the commands register of */
/* timer/counter 1 */
- /****************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************/
/* Trigger timer/counter 1 */
- /***************************/
outb(0x2,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
} /* if(data[7]== APCI1500_ENABLE ||data[7]== APCI1500_DISABLE) */
else {
- printk("\nError in selection of interrupt enable or disable\n");
+ dev_warn(dev->hw_dev,
+ "Error in selection of interrupt enable or disable\n");
return -EINVAL;
} /* elseif(data[7]== APCI1500_ENABLE ||data[7]== APCI1500_DISABLE) */
} /* if ((data[3]>= 0) && (data[3] <= 65535)) */
else {
- printk("\nError in selection of reload value\n");
+ dev_warn(dev->hw_dev,
+ "Error in selection of reload value\n");
return -EINVAL;
} /* else if ((data[3]>= 0) && (data[3] <= 65535)) */
i_TimerCounterWatchdogInterrupt = data[7];
@@ -1496,7 +1157,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[2] = APCI1500_TIMER;
break;
default:
- printk("\nThis choice is not a timer nor a counter\n");
+ dev_warn(dev->hw_dev,
+ "This choice is not a timer nor a counter\n");
return -EINVAL;
} /* switch(data[2]) */
@@ -1509,7 +1171,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[4] = APCI1500_SINGLE;
break;
default:
- printk("\nThis option for single/continuous mode does not exist\n");
+ dev_warn(dev->hw_dev,
+ "This option for single/continuous mode does not exist\n");
return -EINVAL;
} /* switch(data[4]) */
@@ -1522,7 +1185,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[5] = APCI1500_HARDWARE_TRIGGER;
break;
default:
- printk("\nThis choice for software or hardware trigger does not exist\n");
+ dev_warn(dev->hw_dev,
+ "This choice for software or hardware trigger does not exist\n");
return -EINVAL;
} /* switch(data[5]) */
@@ -1535,140 +1199,111 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[6] = APCI1500_HARDWARE_GATE;
break;
default:
- printk("\nThis choice for software or hardware gate does not exist\n");
+ dev_warn(dev->hw_dev,
+ "This choice for software or hardware gate does not exist\n");
return -EINVAL;
} /* switch(data[6]) */
i_TimerCounterMode = data[2] | data[4] | data[5] | data[6] | 7;
- /*************************/
/* Test the reload value */
- /*************************/
if ((data[3] >= 0) && (data[3] <= 65535)) {
if (data[7] == APCI1500_ENABLE
|| data[7] == APCI1500_DISABLE) {
- /************************************************/
/* Selects the mode register of timer/counter 2 */
- /************************************************/
outb(APCI1500_RW_CPT_TMR2_MODE_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************/
/* Writes the new mode */
- /***********************/
outb(i_TimerCounterMode,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************************/
/* Selects the constant register of timer/counter 2 */
- /****************************************************/
outb(APCI1500_RW_CPT_TMR2_TIME_CST_LOW,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*************************/
/* Writes the low value */
- /*************************/
outb(data[3],
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************************/
/* Selects the constant register of timer/counter 2 */
- /****************************************************/
outb(APCI1500_RW_CPT_TMR2_TIME_CST_HIGH,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**************************/
/* Writes the high value */
- /**************************/
data[3] = data[3] >> 8;
outb(data[3],
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*********************************************/
/* Selects the master configuration register */
- /*********************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************/
/* Reads the register */
- /**********************/
i_MasterConfiguration =
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************************************/
/* Enables timer/counter 2 and triggers timer/counter 2 */
- /********************************************************/
i_MasterConfiguration =
i_MasterConfiguration | 0x20;
- /*********************************************/
/* Selects the master configuration register */
- /*********************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************/
/* Writes the new configuration */
- /********************************/
outb(i_MasterConfiguration,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************/
/* Selects the commands register of */
/* timer/counter 2 */
- /****************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************/
/* Disable timer/counter 2 */
- /***************************/
outb(0x0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************/
/* Selects the commands register of */
/* timer/counter 2 */
- /****************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************/
/* Trigger timer/counter 1 */
- /***************************/
outb(0x2,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
} /* if(data[7]== APCI1500_ENABLE ||data[7]== APCI1500_DISABLE) */
else {
- printk("\nError in selection of interrupt enable or disable\n");
+ dev_warn(dev->hw_dev,
+ "Error in selection of interrupt enable or disable\n");
return -EINVAL;
} /* elseif(data[7]== APCI1500_ENABLE ||data[7]== APCI1500_DISABLE) */
} /* if ((data[3]>= 0) && (data[3] <= 65535)) */
else {
- printk("\nError in selection of reload value\n");
+ dev_warn(dev->hw_dev,
+ "Error in selection of reload value\n");
return -EINVAL;
} /* else if ((data[3]>= 0) && (data[3] <= 65535)) */
i_TimerCounterWatchdogInterrupt = data[7];
@@ -1684,7 +1319,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[2] = APCI1500_WATCHDOG;
break;
default:
- printk("\nThis choice is not a watchdog nor a counter\n");
+ dev_warn(dev->hw_dev,
+ "This choice is not a watchdog nor a counter\n");
return -EINVAL;
} /* switch(data[2]) */
@@ -1697,7 +1333,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[4] = APCI1500_SINGLE;
break;
default:
- printk("\nThis option for single/continuous mode does not exist\n");
+ dev_warn(dev->hw_dev,
+ "This option for single/continuous mode does not exist\n");
return -EINVAL;
} /* switch(data[4]) */
@@ -1710,146 +1347,109 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[6] = APCI1500_HARDWARE_GATE;
break;
default:
- printk("\nThis choice for software or hardware gate does not exist\n");
+ dev_warn(dev->hw_dev,
+ "This choice for software or hardware gate does not exist\n");
return -EINVAL;
} /* switch(data[6]) */
- /*****************************/
/* Test if used for watchdog */
- /*****************************/
if (data[2] == APCI1500_WATCHDOG) {
- /*****************************/
/* - Enables the output line */
/* - Enables retrigger */
/* - Pulses output */
- /*****************************/
i_TimerCounterMode = data[2] | data[4] | 0x54;
} /* if (data[2] == APCI1500_WATCHDOG) */
else {
i_TimerCounterMode = data[2] | data[4] | data[6] | 7;
} /* elseif (data[2] == APCI1500_WATCHDOG) */
- /*************************/
/* Test the reload value */
- /*************************/
if ((data[3] >= 0) && (data[3] <= 65535)) {
if (data[7] == APCI1500_ENABLE
|| data[7] == APCI1500_DISABLE) {
- /************************************************/
/* Selects the mode register of watchdog/counter 3 */
- /************************************************/
outb(APCI1500_RW_CPT_TMR3_MODE_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************/
/* Writes the new mode */
- /***********************/
outb(i_TimerCounterMode,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************************/
/* Selects the constant register of watchdog/counter 3 */
- /****************************************************/
outb(APCI1500_RW_CPT_TMR3_TIME_CST_LOW,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*************************/
/* Writes the low value */
- /*************************/
outb(data[3],
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************************/
/* Selects the constant register of watchdog/counter 3 */
- /****************************************************/
outb(APCI1500_RW_CPT_TMR3_TIME_CST_HIGH,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**************************/
/* Writes the high value */
- /**************************/
data[3] = data[3] >> 8;
outb(data[3],
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*********************************************/
/* Selects the master configuration register */
- /*********************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************/
/* Reads the register */
- /**********************/
i_MasterConfiguration =
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************************************/
/* Enables watchdog/counter 3 and triggers watchdog/counter 3 */
- /********************************************************/
i_MasterConfiguration =
i_MasterConfiguration | 0x10;
- /*********************************************/
/* Selects the master configuration register */
- /*********************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************/
/* Writes the new configuration */
- /********************************/
outb(i_MasterConfiguration,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************/
/* Test if COUNTER */
- /********************/
if (data[2] == APCI1500_COUNTER) {
- /*************************************/
/* Selects the command register of */
/* watchdog/counter 3 */
- /*************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************************/
/* Disable the watchdog/counter 3 and starts it */
- /*************************************************/
outb(0x0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************/
/* Selects the command register of */
/* watchdog/counter 3 */
- /*************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************************/
/* Trigger the watchdog/counter 3 and starts it */
- /*************************************************/
outb(0x2,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -1858,12 +1458,14 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
} /* if(data[7]== APCI1500_ENABLE ||data[7]== APCI1500_DISABLE) */
else {
- printk("\nError in selection of interrupt enable or disable\n");
+ dev_warn(dev->hw_dev,
+ "Error in selection of interrupt enable or disable\n");
return -EINVAL;
} /* elseif(data[7]== APCI1500_ENABLE ||data[7]== APCI1500_DISABLE) */
} /* if ((data[3]>= 0) && (data[3] <= 65535)) */
else {
- printk("\nError in selection of reload value\n");
+ dev_warn(dev->hw_dev,
+ "Error in selection of reload value\n");
return -EINVAL;
} /* else if ((data[3]>= 0) && (data[3] <= 65535)) */
i_TimerCounterWatchdogInterrupt = data[7];
@@ -1871,44 +1473,25 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
break;
default:
- printk("\nThe specified counter\timer option does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified counter/timer option does not exist\n");
+ return -EINVAL;
} /* switch(data[1]) */
i_CounterLogic = data[2];
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_StartStopTriggerTimerCounterWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s,
-| struct comedi_insn *insn,unsigned int *data); |
-+----------------------------------------------------------------------------+
-| Task : Start / Stop or trigger the timer counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| unsigned int *data : Data Pointer to read status |
-| data[0] : 0 Counter1/Timer1
-| 1 Counter2/Timer2
-| 2 Counter3/Watchdog
-| data[1] : 0 start
-| 1 stop
-| 2 Trigger
-| data[2] : 0 Counter
-| 1 Timer/Watchdog
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Start / Stop or trigger the timer counter or Watchdog
+ *
+ * data[0] 0 = Counter1/Timer1, 1 = Counter2/Timer2, 2 = Counter3/Watchdog
+ * data[1] 0 = Start, 1 = Stop, 2 = Trigger
+ * data[2] 0 = Counter, 1 = Timer/Watchdog
+ */
+static int apci1500_timer_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
int i_CommandAndStatusValue;
@@ -1924,13 +1507,9 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
else {
i_CommandAndStatusValue = 0xE4; /* disable the interrupt */
} /* elseif(i_TimerCounterWatchdogInterrupt==1) */
- /**************************/
/* Starts timer/counter 1 */
- /**************************/
i_TimerCounter1Enabled = 1;
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -1939,20 +1518,17 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_TimerCounter1Init==1) */
else {
- printk("\nCounter/Timer1 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Counter/Timer1 not configured\n");
return -EINVAL;
}
break;
case STOP:
- /**************************/
/* Stop timer/counter 1 */
- /**************************/
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -1965,23 +1541,17 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
case TRIGGER:
if (i_TimerCounter1Init == 1) {
if (i_TimerCounter1Enabled == 1) {
- /************************/
/* Set Trigger and gate */
- /************************/
i_CommandAndStatusValue = 0x6;
} /* if( i_TimerCounter1Enabled==1) */
else {
- /***************/
/* Set Trigger */
- /***************/
i_CommandAndStatusValue = 0x2;
} /* elseif(i_TimerCounter1Enabled==1) */
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -1990,13 +1560,15 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_TimerCounter1Init==1) */
else {
- printk("\nCounter/Timer1 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Counter/Timer1 not configured\n");
return -EINVAL;
}
break;
default:
- printk("\nThe specified option for start/stop/trigger does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified option for start/stop/trigger does not exist\n");
return -EINVAL;
} /* switch(data[1]) */
break;
@@ -2011,13 +1583,9 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
else {
i_CommandAndStatusValue = 0xE4; /* disable the interrupt */
} /* elseif(i_TimerCounterWatchdogInterrupt==1) */
- /**************************/
/* Starts timer/counter 2 */
- /**************************/
i_TimerCounter2Enabled = 1;
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2026,20 +1594,17 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_TimerCounter2Init==1) */
else {
- printk("\nCounter/Timer2 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Counter/Timer2 not configured\n");
return -EINVAL;
}
break;
case STOP:
- /**************************/
/* Stop timer/counter 2 */
- /**************************/
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2051,23 +1616,17 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
case TRIGGER:
if (i_TimerCounter2Init == 1) {
if (i_TimerCounter2Enabled == 1) {
- /************************/
/* Set Trigger and gate */
- /************************/
i_CommandAndStatusValue = 0x6;
} /* if( i_TimerCounter2Enabled==1) */
else {
- /***************/
/* Set Trigger */
- /***************/
i_CommandAndStatusValue = 0x2;
} /* elseif(i_TimerCounter2Enabled==1) */
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2076,12 +1635,14 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_TimerCounter2Init==1) */
else {
- printk("\nCounter/Timer2 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Counter/Timer2 not configured\n");
return -EINVAL;
}
break;
default:
- printk("\nThe specified option for start/stop/trigger does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified option for start/stop/trigger does not exist\n");
return -EINVAL;
} /* switch(data[1]) */
break;
@@ -2096,13 +1657,9 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
else {
i_CommandAndStatusValue = 0xE4; /* disable the interrupt */
} /* elseif(i_TimerCounterWatchdogInterrupt==1) */
- /**************************/
/* Starts Watchdog/counter 3 */
- /**************************/
i_WatchdogCounter3Enabled = 1;
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2112,20 +1669,17 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
} /* if( i_WatchdogCounter3init==1) */
else {
- printk("\nWatchdog/Counter3 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Watchdog/Counter3 not configured\n");
return -EINVAL;
}
break;
case STOP:
- /**************************/
/* Stop Watchdog/counter 3 */
- /**************************/
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2140,23 +1694,17 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
case 0: /* triggering counter 3 */
if (i_WatchdogCounter3Init == 1) {
if (i_WatchdogCounter3Enabled == 1) {
- /************************/
/* Set Trigger and gate */
- /************************/
i_CommandAndStatusValue = 0x6;
} /* if( i_WatchdogCounter3Enabled==1) */
else {
- /***************/
/* Set Trigger */
- /***************/
i_CommandAndStatusValue = 0x2;
} /* elseif(i_WatchdogCounter3Enabled==1) */
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2165,7 +1713,8 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_WatchdogCounter3Init==1) */
else {
- printk("\nCounter3 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Counter3 not configured\n");
return -EINVAL;
}
break;
@@ -2173,9 +1722,7 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
/* triggering Watchdog 3 */
if (i_WatchdogCounter3Init == 1) {
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2184,55 +1731,40 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_WatchdogCounter3Init==1) */
else {
- printk("\nWatchdog 3 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Watchdog 3 not configured\n");
return -EINVAL;
}
break;
default:
- printk("\nWrong choice of watchdog/counter3\n");
+ dev_warn(dev->hw_dev,
+ "Wrong choice of watchdog/counter3\n");
return -EINVAL;
} /* switch(data[2]) */
break;
default:
- printk("\nThe specified option for start/stop/trigger does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified option for start/stop/trigger does not exist\n");
return -EINVAL;
} /* switch(data[1]) */
break;
default:
- printk("\nThe specified choice for counter/watchdog/timer does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified choice for counter/watchdog/timer does not exist\n");
return -EINVAL;
} /* switch(data[0]) */
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_ReadCounterTimerWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,
-| unsigned int *data); |
-+----------------------------------------------------------------------------+
-| Task : Read The Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| unsigned int *data : Data Pointer to read status |
-| data[0] : 0 Counter1/Timer1
-| 1 Counter2/Timer2
-| 2 Counter3/Watchdog
-|
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Read The Watchdog
+ *
+ * data[0] 0 = Counter1/Timer1, 1 = Counter2/Timer2, 2 = Counter3/Watchdog
+ */
+static int apci1500_timer_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
int i_CommandAndStatusValue;
@@ -2242,23 +1774,17 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
/* Read counter/timer1 */
if (i_TimerCounter1Init == 1) {
if (i_TimerCounter1Enabled == 1) {
- /************************/
/* Set RCC and gate */
- /************************/
i_CommandAndStatusValue = 0xC;
} /* if( i_TimerCounter1Init==1) */
else {
- /***************/
/* Set RCC */
- /***************/
i_CommandAndStatusValue = 0x8;
} /* elseif(i_TimerCounter1Init==1) */
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2266,9 +1792,7 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************/
/* Selects the counter register (high) */
- /***************************************/
outb(APCI1500_R_CPT_TMR1_VALUE_HIGH,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2285,7 +1809,8 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_TimerCounter1Init==1) */
else {
- printk("\nTimer/Counter1 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Timer/Counter1 not configured\n");
return -EINVAL;
} /* elseif( i_TimerCounter1Init==1) */
break;
@@ -2293,23 +1818,17 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
/* Read counter/timer2 */
if (i_TimerCounter2Init == 1) {
if (i_TimerCounter2Enabled == 1) {
- /************************/
/* Set RCC and gate */
- /************************/
i_CommandAndStatusValue = 0xC;
} /* if( i_TimerCounter2Init==1) */
else {
- /***************/
/* Set RCC */
- /***************/
i_CommandAndStatusValue = 0x8;
} /* elseif(i_TimerCounter2Init==1) */
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2317,9 +1836,7 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************/
/* Selects the counter register (high) */
- /***************************************/
outb(APCI1500_R_CPT_TMR2_VALUE_HIGH,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2336,7 +1853,8 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_TimerCounter2Init==1) */
else {
- printk("\nTimer/Counter2 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Timer/Counter2 not configured\n");
return -EINVAL;
} /* elseif( i_TimerCounter2Init==1) */
break;
@@ -2344,23 +1862,17 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
/* Read counter/watchdog2 */
if (i_WatchdogCounter3Init == 1) {
if (i_WatchdogCounter3Enabled == 1) {
- /************************/
/* Set RCC and gate */
- /************************/
i_CommandAndStatusValue = 0xC;
} /* if( i_TimerCounter2Init==1) */
else {
- /***************/
/* Set RCC */
- /***************/
i_CommandAndStatusValue = 0x8;
} /* elseif(i_WatchdogCounter3Init==1) */
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2368,9 +1880,7 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************/
/* Selects the counter register (high) */
- /***************************************/
outb(APCI1500_R_CPT_TMR3_VALUE_HIGH,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2387,12 +1897,14 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_WatchdogCounter3Init==1) */
else {
- printk("\nWatchdogCounter3 not configured\n");
+ dev_warn(dev->hw_dev,
+ "WatchdogCounter3 not configured\n");
return -EINVAL;
} /* elseif( i_WatchdogCounter3Init==1) */
break;
default:
- printk("\nThe choice of timer/counter/watchdog does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The choice of timer/counter/watchdog does not exist\n");
return -EINVAL;
} /* switch(data[0]) */
@@ -2400,31 +1912,15 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_ReadInterruptMask |
-| (struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,
-| unsigned int *data); |
-+----------------------------------------------------------------------------+
-| Task : Read the interrupt mask |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| unsigned int *data : Data Pointer to read status |
-
-
-+----------------------------------------------------------------------------+
-| Output Parameters : -- data[0]:The interrupt mask value data[1]:Channel no
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_ReadInterruptMask(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Read the interrupt mask
+ *
+ * data[0] The interrupt mask value
+ * data[1] Channel Number
+ */
+static int apci1500_timer_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
data[0] = i_InterruptMask;
data[1] = i_InputChannel;
@@ -2433,31 +1929,12 @@ static int i_APCI1500_ReadInterruptMask(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_ConfigureInterrupt |
-| (struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,
-| unsigned int *data); |
-+----------------------------------------------------------------------------+
-| Task : Configures the interrupt registers |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| unsigned int *data : Data Pointer |
-|
-
-+----------------------------------------------------------------------------+
-| Output Parameters : --
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_ConfigureInterrupt(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures the interrupt registers
+ */
+static int apci1500_do_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ui_Status;
@@ -2474,140 +1951,101 @@ static int i_APCI1500_ConfigureInterrupt(struct comedi_device *dev,
i_Constant = 0x00;
} /* if{data[0]==0) */
else {
- printk("\nThe parameter passed to driver is in error for enabling the voltage interrupt\n");
+ dev_warn(dev->hw_dev,
+ "The parameter passed to driver is in error for enabling the voltage interrupt\n");
return -EINVAL;
} /* else if(data[0]==0) */
} /* elseif(data[0]==1) */
- /*****************************************************/
/* Selects the mode specification register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*********************************************/
/* Writes the new configuration (APCI1500_OR) */
- /*********************************************/
i_RegValue = (i_RegValue & 0xF9) | APCI1500_OR;
outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************/
/* Authorises the interrupt on the board */
- /*****************************************/
outb(0xC0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************************/
/* Selects the pattern polarity register of port B */
- /***************************************************/
outb(APCI1500_RW_PORT_B_PATTERN_POLARITY,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(i_Constant, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the pattern transition register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_PATTERN_TRANSITION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(i_Constant, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************************/
/* Selects the pattern mask register of port B */
- /***********************************************/
outb(APCI1500_RW_PORT_B_PATTERN_MASK,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(i_Constant, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of port A */
- /*****************************************************/
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of port A */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of port B */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of timer 1 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of timer 1 */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of timer 2 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of timer 2 */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of timer 3 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of timer 3 */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Authorizes the main interrupt on the board */
- /**********************************************/
outb(0xD0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***************************/
/* Enables the PCI interrupt */
- /*****************************/
outl(0x3000, devpriv->i_IobaseAmcc + 0x38);
ui_Status = inl(devpriv->i_IobaseAmcc + 0x10);
ui_Status = inl(devpriv->i_IobaseAmcc + 0x38);
@@ -2616,24 +2054,7 @@ static int i_APCI1500_ConfigureInterrupt(struct comedi_device *dev,
return insn->n;
}
-/*
-+----------------------------------------------------------------------------+
-| Function Name : static void v_APCI1500_Interrupt |
-| (int irq , void *d) |
-+----------------------------------------------------------------------------+
-| Task : Interrupt handler |
-+----------------------------------------------------------------------------+
-| Input Parameters : int irq : irq number |
-| void *d : void pointer |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static void v_APCI1500_Interrupt(int irq, void *d)
+static void apci1500_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
@@ -2642,44 +2063,28 @@ static void v_APCI1500_Interrupt(int irq, void *d)
int i_RegValue = 0;
i_InterruptMask = 0;
- /***********************************/
/* Read the board interrupt status */
- /***********************************/
ui_InterruptStatus = inl(devpriv->i_IobaseAmcc + 0x38);
- /***************************************/
/* Test if board generated a interrupt */
- /***************************************/
if ((ui_InterruptStatus & 0x800000) == 0x800000) {
- /************************/
/* Disable all Interrupt */
- /************************/
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
/* outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,devpriv->iobase+APCI1500_Z8536_CONTROL_REGISTER); */
- /**********************************************/
/* Disables the main interrupt on the board */
- /**********************************************/
/* outb(0x00,devpriv->iobase+APCI1500_Z8536_CONTROL_REGISTER); */
- /*****************************************************/
/* Selects the command and status register of port A */
- /*****************************************************/
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue =
inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
if ((i_RegValue & 0x60) == 0x60) {
- /*****************************************************/
/* Selects the command and status register of port A */
- /*****************************************************/
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of port A */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue,
devpriv->iobase +
@@ -2693,9 +2098,7 @@ static void v_APCI1500_Interrupt(int irq, void *d)
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************************/
/* Selects the interrupt vector register of port A */
- /***************************************************/
outb(APCI1500_RW_PORT_A_INTERRUPT_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2711,45 +2114,32 @@ static void v_APCI1500_Interrupt(int irq, void *d)
} /* elseif(i_Logic==APCI1500_OR_PRIORITY) */
} /* if ((i_RegValue & 0x60) == 0x60) */
- /*****************************************************/
/* Selects the command and status register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue =
inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
if ((i_RegValue & 0x60) == 0x60) {
- /*****************************************************/
/* Selects the command and status register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of port B */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- printk("\n\n\n");
- /****************/
/* Reads port B */
- /****************/
i_RegValue =
inb((unsigned int) devpriv->iobase +
APCI1500_Z8536_PORT_B);
i_RegValue = i_RegValue & 0xC0;
- /**************************************/
/* Tests if this is an external error */
- /**************************************/
if (i_RegValue) {
/* Disable the interrupt */
- /*****************************************************/
/* Selects the command and status register of port B */
- /*****************************************************/
outl(0x0, devpriv->i_IobaseAmcc + 0x38);
if (i_RegValue & 0x80) {
@@ -2767,46 +2157,34 @@ static void v_APCI1500_Interrupt(int irq, void *d)
} /* if (i_RegValue) */
} /* if ((i_RegValue & 0x60) == 0x60) */
- /*****************************************************/
/* Selects the command and status register of timer 1 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue =
inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
if ((i_RegValue & 0x60) == 0x60) {
- /*****************************************************/
/* Selects the command and status register of timer 1 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of timer 1 */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
i_InterruptMask = i_InterruptMask | 4;
} /* if ((i_RegValue & 0x60) == 0x60) */
- /*****************************************************/
/* Selects the command and status register of timer 2 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue =
inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
if ((i_RegValue & 0x60) == 0x60) {
- /*****************************************************/
/* Selects the command and status register of timer 2 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of timer 2 */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue,
devpriv->iobase +
@@ -2814,23 +2192,17 @@ static void v_APCI1500_Interrupt(int irq, void *d)
i_InterruptMask = i_InterruptMask | 8;
} /* if ((i_RegValue & 0x60) == 0x60) */
- /*****************************************************/
/* Selects the command and status register of timer 3 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue =
inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
if ((i_RegValue & 0x60) == 0x60) {
- /*****************************************************/
/* Selects the command and status register of timer 3 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of timer 3 */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue,
devpriv->iobase +
@@ -2844,42 +2216,23 @@ static void v_APCI1500_Interrupt(int irq, void *d)
} /* if ((i_RegValue & 0x60) == 0x60) */
send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- /***********************/
/* Enable all Interrupts */
- /***********************/
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Authorizes the main interrupt on the board */
- /**********************************************/
outb(0xD0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
} /* if ((ui_InterruptStatus & 0x800000) == 0x800000) */
else {
- printk("\nInterrupt from unknown source\n");
+ dev_warn(dev->hw_dev,
+ "Interrupt from unknown source\n");
} /* else if ((ui_InterruptStatus & 0x800000) == 0x800000) */
return;
}
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_Reset(struct comedi_device *dev) | |
-+----------------------------------------------------------------------------+
-| Task :resets all the registers |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_Reset(struct comedi_device *dev)
+static int apci1500_reset(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
int i_DummyRead = 0;
@@ -2898,9 +2251,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
i_TimerCounter2Enabled = 0;
i_WatchdogCounter3Enabled = 0;
- /******************/
/* Software reset */
- /******************/
i_DummyRead = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_DummyRead = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
@@ -2908,16 +2259,12 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
outb(1, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the master configuration control register */
- /*****************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0xF4, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the mode specification register of port A */
- /*****************************************************/
outb(APCI1500_RW_PORT_A_SPECIFICATION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0x10, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
@@ -2949,9 +2296,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
/* Deletes the register */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the mode specification register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0x10, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
@@ -2981,9 +2326,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
/* Deletes the register */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the data path polarity register of port C */
- /*****************************************************/
outb(APCI1500_RW_PORT_C_DATA_PCITCH_POLARITY,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* High level of port C means 1 */
@@ -2998,9 +2341,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes it */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************************/
/* Selects the command and status register of timer 1 */
- /******************************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes IP and IUS */
@@ -3010,9 +2351,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deactivates the interrupt management of timer 1 */
outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************************/
/* Selects the command and status register of timer 2 */
- /******************************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes IP and IUS */
@@ -3022,9 +2361,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deactivates Timer 2 interrupt management: */
outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************************/
/* Selects the command and status register of timer 3 */
- /******************************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes IP and IUS */
@@ -3034,71 +2371,43 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deactivates interrupt management of timer 3: */
outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes all interrupts */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* reset all the digital outputs */
outw(0x0, devpriv->i_IobaseAddon + APCI1500_DIGITAL_OP);
-/*******************************/
/* Disable the board interrupt */
-/*******************************/
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/****************************/
/* Deactivates all interrupts */
-/******************************/
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of port A */
- /*****************************************************/
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/****************************/
/* Deactivates all interrupts */
-/******************************/
outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/*****************************************************/
/* Selects the command and status register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/****************************/
/* Deactivates all interrupts */
-/******************************/
outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/*****************************************************/
/* Selects the command and status register of timer 1 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/****************************/
/* Deactivates all interrupts */
-/******************************/
outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/*****************************************************/
/* Selects the command and status register of timer 2 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/****************************/
/* Deactivates all interrupts */
-/******************************/
outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/*****************************************************/
/* Selects the command and status register of timer 3*/
-/*****************************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/****************************/
/* Deactivates all interrupts */
-/******************************/
outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
index 84668544f52d..9c86b02eb6da 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
@@ -1,74 +1,32 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-------------------------------+---------------------------------------+
- | Project : APCI-1564 | Compiler : GCC |
- | Module name : hwdrv_apci1564.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-------------------------------+---------------------------------------+
- | Description : Hardware Layer Access For APCI-1564 |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +----------+-----------+------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
-
-/********* Definitions for APCI-1564 card *****/
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ */
#define APCI1564_ADDRESS_RANGE 128
-/* DIGITAL INPUT-OUTPUT DEFINE */
-/* Input defines */
-#define APCI1564_DIGITAL_IP 0x04
-#define APCI1564_DIGITAL_IP_INTERRUPT_MODE1 4
-#define APCI1564_DIGITAL_IP_INTERRUPT_MODE2 8
-#define APCI1564_DIGITAL_IP_IRQ 16
-
-/* Output defines */
-#define APCI1564_DIGITAL_OP 0x18
-#define APCI1564_DIGITAL_OP_RW 0
-#define APCI1564_DIGITAL_OP_INTERRUPT 4
-#define APCI1564_DIGITAL_OP_IRQ 12
-
/* Digital Input IRQ Function Selection */
#define ADDIDATA_OR 0
#define ADDIDATA_AND 1
-/* Digital Input Interrupt Status */
-#define APCI1564_DIGITAL_IP_INTERRUPT_STATUS 12
-
-/* Digital Output Interrupt Status */
-#define APCI1564_DIGITAL_OP_INTERRUPT_STATUS 8
-
/* Digital Input Interrupt Enable Disable. */
#define APCI1564_DIGITAL_IP_INTERRUPT_ENABLE 0x4
#define APCI1564_DIGITAL_IP_INTERRUPT_DISABLE 0xfffffffb
@@ -80,98 +38,91 @@ This program is distributed in the hope that it will be useful, but WITHOUT ANY
#define APCI1564_DIGITAL_OP_CC_INTERRUPT_DISABLE 0xfffffffd
/* TIMER COUNTER WATCHDOG DEFINES */
-
#define ADDIDATA_TIMER 0
#define ADDIDATA_COUNTER 1
#define ADDIDATA_WATCHDOG 2
-#define APCI1564_DIGITAL_OP_WATCHDOG 0x28
-#define APCI1564_TIMER 0x48
-#define APCI1564_COUNTER1 0x0
-#define APCI1564_COUNTER2 0x20
-#define APCI1564_COUNTER3 0x40
-#define APCI1564_COUNTER4 0x60
-#define APCI1564_TCW_SYNC_ENABLEDISABLE 0
-#define APCI1564_TCW_RELOAD_VALUE 4
-#define APCI1564_TCW_TIMEBASE 8
-#define APCI1564_TCW_PROG 12
-#define APCI1564_TCW_TRIG_STATUS 16
-#define APCI1564_TCW_IRQ 20
-#define APCI1564_TCW_WARN_TIMEVAL 24
-#define APCI1564_TCW_WARN_TIMEBASE 28
+#define APCI1564_COUNTER1 0
+#define APCI1564_COUNTER2 1
+#define APCI1564_COUNTER3 2
+#define APCI1564_COUNTER4 3
+
+/*
+ * devpriv->i_IobaseAmcc Register Map
+ */
+#define APCI1564_DI_REG 0x04
+#define APCI1564_DI_INT_MODE1_REG 0x08
+#define APCI1564_DI_INT_MODE2_REG 0x0c
+#define APCI1564_DI_INT_STATUS_REG 0x10
+#define APCI1564_DI_IRQ_REG 0x14
+#define APCI1564_DO_REG 0x18
+#define APCI1564_DO_INT_CTRL_REG 0x1c
+#define APCI1564_DO_INT_STATUS_REG 0x20
+#define APCI1564_DO_IRQ_REG 0x24
+#define APCI1564_WDOG_REG 0x28
+#define APCI1564_WDOG_RELOAD_REG 0x2c
+#define APCI1564_WDOG_TIMEBASE_REG 0x30
+#define APCI1564_WDOG_CTRL_REG 0x34
+#define APCI1564_WDOG_STATUS_REG 0x38
+#define APCI1564_WDOG_IRQ_REG 0x3c
+#define APCI1564_WDOG_WARN_TIMEVAL_REG 0x40
+#define APCI1564_WDOG_WARN_TIMEBASE_REG 0x44
+#define APCI1564_TIMER_REG 0x48
+#define APCI1564_TIMER_RELOAD_REG 0x4c
+#define APCI1564_TIMER_TIMEBASE_REG 0x50
+#define APCI1564_TIMER_CTRL_REG 0x54
+#define APCI1564_TIMER_STATUS_REG 0x58
+#define APCI1564_TIMER_IRQ_REG 0x5c
+#define APCI1564_TIMER_WARN_TIMEVAL_REG 0x60
+#define APCI1564_TIMER_WARN_TIMEBASE_REG 0x64
+
+/*
+ * devpriv->iobase Register Map
+ */
+#define APCI1564_TCW_REG(x) (0x00 + ((x) * 0x20))
+#define APCI1564_TCW_RELOAD_REG(x) (0x04 + ((x) * 0x20))
+#define APCI1564_TCW_TIMEBASE_REG(x) (0x08 + ((x) * 0x20))
+#define APCI1564_TCW_CTRL_REG(x) (0x0c + ((x) * 0x20))
+#define APCI1564_TCW_STATUS_REG(x) (0x10 + ((x) * 0x20))
+#define APCI1564_TCW_IRQ_REG(x) (0x14 + ((x) * 0x20))
+#define APCI1564_TCW_WARN_TIMEVAL_REG(x) (0x18 + ((x) * 0x20))
+#define APCI1564_TCW_WARN_TIMEBASE_REG(x) (0x1c + ((x) * 0x20))
/* Global variables */
-static unsigned int ui_InterruptStatus_1564 = 0;
+static unsigned int ui_InterruptStatus_1564;
static unsigned int ui_InterruptData, ui_Type;
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_ConfigDigitalInput |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Configures the digital input Subdevice |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 1 Enable Digital Input Interrupt |
-| 0 Disable Digital Input Interrupt |
-| data[1] : 0 ADDIDATA Interrupt OR LOGIC |
-| : 1 ADDIDATA Interrupt AND LOGIC |
-| data[2] : Interrupt mask for the mode 1 |
-| data[3] : Interrupt mask for the mode 2 |
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1564_ConfigDigitalInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures the digital input Subdevice
+ *
+ * data[0] 1 = Enable interrupt, 0 = Disable interrupt
+ * data[1] 0 = ADDIDATA Interrupt OR LOGIC, 1 = ADDIDATA Interrupt AND LOGIC
+ * data[2] Interrupt mask for the mode 1
+ * data[3] Interrupt mask for the mode 2
+ */
+static int apci1564_di_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
devpriv->tsk_Current = current;
- /*******************************/
+
/* Set the digital input logic */
- /*******************************/
if (data[0] == ADDIDATA_ENABLE) {
data[2] = data[2] << 4;
data[3] = data[3] << 4;
- outl(data[2],
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_INTERRUPT_MODE1);
- outl(data[3],
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_INTERRUPT_MODE2);
- if (data[1] == ADDIDATA_OR) {
- outl(0x4,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ);
- } /* if (data[1] == ADDIDATA_OR) */
- else {
- outl(0x6,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ);
- } /* else if (data[1] == ADDIDATA_OR) */
- } /* if (data[0] == ADDIDATA_ENABLE) */
- else {
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_INTERRUPT_MODE1);
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_INTERRUPT_MODE2);
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ);
- } /* else if (data[0] == ADDIDATA_ENABLE) */
+ outl(data[2], devpriv->i_IobaseAmcc + APCI1564_DI_INT_MODE1_REG);
+ outl(data[3], devpriv->i_IobaseAmcc + APCI1564_DI_INT_MODE2_REG);
+ if (data[1] == ADDIDATA_OR)
+ outl(0x4, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
+ else
+ outl(0x6, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
+ } else {
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_INT_MODE1_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_INT_MODE2_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
+ }
return insn->n;
}
@@ -183,40 +134,21 @@ static int apci1564_di_insn_bits(struct comedi_device *dev,
{
struct addi_private *devpriv = dev->private;
- data[1] = inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP);
+ data[1] = inl(devpriv->i_IobaseAmcc + APCI1564_DI_REG);
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_ConfigDigitalOutput |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Configures The Digital Output Subdevice. |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[1] : 1 Enable VCC Interrupt |
-| 0 Disable VCC Interrupt |
-| data[2] : 1 Enable CC Interrupt |
-| 0 Disable CC Interrupt |
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1564_ConfigDigitalOutput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures The Digital Output Subdevice.
+ *
+ * data[1] 0 = Disable VCC Interrupt, 1 = Enable VCC Interrupt
+ * data[2] 0 = Disable CC Interrupt, 1 = Enable CC Interrupt
+ */
+static int apci1564_do_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ul_Command = 0;
@@ -225,31 +157,25 @@ static int i_APCI1564_ConfigDigitalOutput(struct comedi_device *dev,
comedi_error(dev,
"Not a valid Data !!! ,Data should be 1 or 0\n");
return -EINVAL;
- } /* if ((data[0]!=0) && (data[0]!=1)) */
- if (data[0]) {
+ }
+
+ if (data[0])
devpriv->b_OutputMemoryStatus = ADDIDATA_ENABLE;
- } /* if (data[0]) */
- else {
+ else
devpriv->b_OutputMemoryStatus = ADDIDATA_DISABLE;
- } /* else if (data[0]) */
- if (data[1] == ADDIDATA_ENABLE) {
+
+ if (data[1] == ADDIDATA_ENABLE)
ul_Command = ul_Command | 0x1;
- } /* if (data[1] == ADDIDATA_ENABLE) */
- else {
+ else
ul_Command = ul_Command & 0xFFFFFFFE;
- } /* else if (data[1] == ADDIDATA_ENABLE) */
- if (data[2] == ADDIDATA_ENABLE) {
+
+ if (data[2] == ADDIDATA_ENABLE)
ul_Command = ul_Command | 0x2;
- } /* if (data[2] == ADDIDATA_ENABLE) */
- else {
+ else
ul_Command = ul_Command & 0xFFFFFFFD;
- } /* else if (data[2] == ADDIDATA_ENABLE) */
- outl(ul_Command,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_INTERRUPT);
- ui_InterruptData =
- inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_INTERRUPT);
+
+ outl(ul_Command, devpriv->i_IobaseAmcc + APCI1564_DO_INT_CTRL_REG);
+ ui_InterruptData = inl(devpriv->i_IobaseAmcc + APCI1564_DO_INT_CTRL_REG);
devpriv->tsk_Current = current;
return insn->n;
}
@@ -261,12 +187,10 @@ static int apci1564_do_insn_bits(struct comedi_device *dev,
{
struct addi_private *devpriv = dev->private;
- s->state = inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_RW);
+ s->state = inl(devpriv->i_IobaseAmcc + APCI1564_DO_REG);
if (comedi_dio_update_state(s, data))
- outl(s->state, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_RW);
+ outl(s->state, devpriv->i_IobaseAmcc + APCI1564_DO_REG);
data[1] = s->state;
@@ -274,39 +198,20 @@ static int apci1564_do_insn_bits(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_ConfigTimerCounterWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Configures The Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 0 Configure As Timer |
-| 1 Configure As Counter |
-| 2 Configure As Watchdog |
-| data[1] : 1 Enable Interrupt |
-| 0 Disable Interrupt |
-| data[2] : Time Unit |
-| data[3] : Reload Value |
-| data[4] : Timer Mode |
-| data[5] : Timer Counter Watchdog Number|
- data[6] : Counter Direction
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1564_ConfigTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures The Timer, Counter or Watchdog
+ *
+ * data[0] Configure as: 0 = Timer, 1 = Counter, 2 = Watchdog
+ * data[1] 1 = Enable Interrupt, 0 = Disable Interrupt
+ * data[2] Time Unit
+ * data[3] Reload Value
+ * data[4] Timer Mode
+ * data[5] Timer Counter Watchdog Number
+ * data[6] Counter Direction
+ */
+static int apci1564_timer_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
@@ -316,89 +221,59 @@ static int i_APCI1564_ConfigTimerCounterWatchdog(struct comedi_device *dev,
devpriv->b_TimerSelectMode = ADDIDATA_WATCHDOG;
/* Disable the watchdog */
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_PROG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_WDOG_CTRL_REG);
/* Loading the Reload value */
- outl(data[3],
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_RELOAD_VALUE);
- } /* if (data[0]==ADDIDATA_WATCHDOG) */
- else if (data[0] == ADDIDATA_TIMER) {
+ outl(data[3], devpriv->i_IobaseAmcc + APCI1564_WDOG_RELOAD_REG);
+ } else if (data[0] == ADDIDATA_TIMER) {
/* First Stop The Timer */
- ul_Command1 =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
+ ul_Command1 = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(ul_Command1, devpriv->i_IobaseAmcc + APCI1564_TIMER + APCI1564_TCW_PROG); /* Stop The Timer */
+ /* Stop The Timer */
+ outl(ul_Command1, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
devpriv->b_TimerSelectMode = ADDIDATA_TIMER;
if (data[1] == 1) {
- outl(0x02, devpriv->i_IobaseAmcc + APCI1564_TIMER + APCI1564_TCW_PROG); /* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ);
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_IRQ);
+ /* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */
+ outl(0x02, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DO_IRQ_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_WDOG_IRQ_REG);
outl(0x0,
- devpriv->i_IobaseAmcc +
- APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_IRQ);
+ devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER1));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER1 +
- APCI1564_TCW_IRQ);
+ devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER2));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER2 +
- APCI1564_TCW_IRQ);
+ devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER3));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER3 +
- APCI1564_TCW_IRQ);
- outl(0x0,
- devpriv->iobase + APCI1564_COUNTER4 +
- APCI1564_TCW_IRQ);
- } /* if (data[1]==1) */
- else {
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER + APCI1564_TCW_PROG); /* disable Timer interrupt */
- } /* else if (data[1]==1) */
+ devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER4));
+ } else {
+ /* disable Timer interrupt */
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ }
/* Loading Timebase */
-
- outl(data[2],
- devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_TIMEBASE);
+ outl(data[2], devpriv->i_IobaseAmcc + APCI1564_TIMER_TIMEBASE_REG);
/* Loading the Reload value */
- outl(data[3],
- devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_RELOAD_VALUE);
+ outl(data[3], devpriv->i_IobaseAmcc + APCI1564_TIMER_RELOAD_REG);
- ul_Command1 =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
- ul_Command1 =
- (ul_Command1 & 0xFFF719E2UL) | 2UL << 13UL | 0x10UL;
- outl(ul_Command1, devpriv->i_IobaseAmcc + APCI1564_TIMER + APCI1564_TCW_PROG); /* mode 2 */
- } /* else if (data[0]==ADDIDATA_TIMER) */
- else if (data[0] == ADDIDATA_COUNTER) {
+ ul_Command1 = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ ul_Command1 = (ul_Command1 & 0xFFF719E2UL) | 2UL << 13UL | 0x10UL;
+ /* mode 2 */
+ outl(ul_Command1, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ } else if (data[0] == ADDIDATA_COUNTER) {
devpriv->b_TimerSelectMode = ADDIDATA_COUNTER;
devpriv->b_ModeSelectRegister = data[5];
/* First Stop The Counter */
- ul_Command1 =
- inl(devpriv->iobase + ((data[5] - 1) * 0x20) +
- APCI1564_TCW_PROG);
+ ul_Command1 = inl(devpriv->iobase + APCI1564_TCW_CTRL_REG(data[5] - 1));
ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(ul_Command1, devpriv->iobase + ((data[5] - 1) * 0x20) + APCI1564_TCW_PROG); /* Stop The Timer */
+ /* Stop The Timer */
+ outl(ul_Command1, devpriv->iobase + APCI1564_TCW_CTRL_REG(data[5] - 1));
- /************************/
/* Set the reload value */
- /************************/
- outl(data[3],
- devpriv->iobase + ((data[5] - 1) * 0x20) +
- APCI1564_TCW_RELOAD_VALUE);
+ outl(data[3], devpriv->iobase + APCI1564_TCW_RELOAD_REG(data[5] - 1));
- /******************************/
/* Set the mode : */
/* - Disable the hardware */
/* - Disable the counter mode */
@@ -406,65 +281,36 @@ static int i_APCI1564_ConfigTimerCounterWatchdog(struct comedi_device *dev,
/* - Disable the reset */
/* - Disable the timer mode */
/* - Enable the counter mode */
- /******************************/
+
ul_Command1 =
(ul_Command1 & 0xFFFC19E2UL) | 0x80000UL |
(unsigned int) ((unsigned int) data[4] << 16UL);
- outl(ul_Command1,
- devpriv->iobase + ((data[5] - 1) * 0x20) +
- APCI1564_TCW_PROG);
+ outl(ul_Command1, devpriv->iobase + APCI1564_TCW_CTRL_REG(data[5] - 1));
/* Enable or Disable Interrupt */
ul_Command1 = (ul_Command1 & 0xFFFFF9FD) | (data[1] << 1);
- outl(ul_Command1,
- devpriv->iobase + ((data[5] - 1) * 0x20) +
- APCI1564_TCW_PROG);
+ outl(ul_Command1, devpriv->iobase + APCI1564_TCW_CTRL_REG(data[5] - 1));
- /*****************************/
/* Set the Up/Down selection */
- /*****************************/
ul_Command1 = (ul_Command1 & 0xFFFBF9FFUL) | (data[6] << 18);
- outl(ul_Command1,
- devpriv->iobase + ((data[5] - 1) * 0x20) +
- APCI1564_TCW_PROG);
- } /* else if (data[0]==ADDIDATA_COUNTER) */
- else {
- printk(" Invalid subdevice.");
- } /* else if (data[0]==ADDIDATA_WATCHDOG) */
+ outl(ul_Command1, devpriv->iobase + APCI1564_TCW_CTRL_REG(data[5] - 1));
+ } else {
+ dev_err(dev->class_dev, "Invalid subdevice.\n");
+ }
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_StartStopWriteTimerCounterWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Start / Stop The Selected Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 0 Timer |
-| 1 Counter |
-| 2 Watchdog | | data[1] : 1 Start |
-| 0 Stop |
-| 2 Trigger |
-| Clear (Only Counter) |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1564_StartStopWriteTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Start / Stop The Selected Timer, Counter or Watchdog
+ *
+ * data[0] Configure as: 0 = Timer, 1 = Counter, 2 = Watchdog
+ * data[1] 0 = Stop, 1 = Start, 2 = Trigger Clear (Only Counter)
+ */
+static int apci1564_timer_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
@@ -472,203 +318,122 @@ static int i_APCI1564_StartStopWriteTimerCounterWatchdog(struct comedi_device *d
if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
switch (data[1]) {
case 0: /* stop the watchdog */
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP_WATCHDOG + APCI1564_TCW_PROG); /* disable the watchdog */
+ /* disable the watchdog */
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_WDOG_CTRL_REG);
break;
case 1: /* start the watchdog */
- outl(0x0001,
- devpriv->i_IobaseAmcc +
- APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_PROG);
+ outl(0x0001, devpriv->i_IobaseAmcc + APCI1564_WDOG_CTRL_REG);
break;
case 2: /* Software trigger */
- outl(0x0201,
- devpriv->i_IobaseAmcc +
- APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_PROG);
+ outl(0x0201, devpriv->i_IobaseAmcc + APCI1564_WDOG_CTRL_REG);
break;
default:
- printk("\nSpecified functionality does not exist\n");
+ dev_err(dev->class_dev, "Specified functionality does not exist.\n");
return -EINVAL;
- } /* switch (data[1]) */
- } /* if (devpriv->b_TimerSelectMode==ADDIDATA_WATCHDOG) */
+ }
+ }
if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
if (data[1] == 1) {
- ul_Command1 =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
+ ul_Command1 = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
/* Enable the Timer */
- outl(ul_Command1,
- devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
- } /* if (data[1]==1) */
- else if (data[1] == 0) {
+ outl(ul_Command1, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ } else if (data[1] == 0) {
/* Stop The Timer */
- ul_Command1 =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
+ ul_Command1 = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(ul_Command1,
- devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
- } /* else if(data[1]==0) */
- } /* if (devpriv->b_TimerSelectMode==ADDIDATA_TIMER) */
+ outl(ul_Command1, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ }
+ }
if (devpriv->b_TimerSelectMode == ADDIDATA_COUNTER) {
ul_Command1 =
- inl(devpriv->iobase + ((devpriv->b_ModeSelectRegister -
- 1) * 0x20) + APCI1564_TCW_PROG);
+ inl(devpriv->iobase +
+ APCI1564_TCW_CTRL_REG(devpriv->b_ModeSelectRegister - 1));
if (data[1] == 1) {
/* Start the Counter subdevice */
ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
- } /* if (data[1] == 1) */
- else if (data[1] == 0) {
+ } else if (data[1] == 0) {
/* Stops the Counter subdevice */
ul_Command1 = 0;
- } /* else if (data[1] == 0) */
- else if (data[1] == 2) {
+ } else if (data[1] == 2) {
/* Clears the Counter subdevice */
ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x400;
- } /* else if (data[1] == 3) */
+ }
outl(ul_Command1,
- devpriv->iobase + ((devpriv->b_ModeSelectRegister -
- 1) * 0x20) + APCI1564_TCW_PROG);
- } /* if (devpriv->b_TimerSelectMode==ADDIDATA_COUNTER) */
+ devpriv->iobase +
+ APCI1564_TCW_CTRL_REG(devpriv->b_ModeSelectRegister - 1));
+ }
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_ReadTimerCounterWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read The Selected Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1564_ReadTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Read The Selected Timer, Counter or Watchdog
+ */
+static int apci1564_timer_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
/* Stores the status of the Watchdog */
- data[0] =
- inl(devpriv->i_IobaseAmcc +
- APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_TRIG_STATUS) & 0x1;
- data[1] =
- inl(devpriv->i_IobaseAmcc +
- APCI1564_DIGITAL_OP_WATCHDOG);
- } /* if (devpriv->b_TimerSelectMode==ADDIDATA_WATCHDOG) */
- else if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
+ data[0] = inl(devpriv->i_IobaseAmcc + APCI1564_WDOG_STATUS_REG) & 0x1;
+ data[1] = inl(devpriv->i_IobaseAmcc + APCI1564_WDOG_REG);
+ } else if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
/* Stores the status of the Timer */
- data[0] =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_TRIG_STATUS) & 0x1;
+ data[0] = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_STATUS_REG) & 0x1;
/* Stores the Actual value of the Timer */
- data[1] = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER);
- } /* else if (devpriv->b_TimerSelectMode==ADDIDATA_TIMER) */
- else if (devpriv->b_TimerSelectMode == ADDIDATA_COUNTER) {
+ data[1] = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_REG);
+ } else if (devpriv->b_TimerSelectMode == ADDIDATA_COUNTER) {
/* Read the Counter Actual Value. */
data[0] =
- inl(devpriv->iobase + ((devpriv->b_ModeSelectRegister -
- 1) * 0x20) +
- APCI1564_TCW_SYNC_ENABLEDISABLE);
+ inl(devpriv->iobase +
+ APCI1564_TCW_REG(devpriv->b_ModeSelectRegister - 1));
ul_Command1 =
- inl(devpriv->iobase + ((devpriv->b_ModeSelectRegister -
- 1) * 0x20) + APCI1564_TCW_TRIG_STATUS);
+ inl(devpriv->iobase +
+ APCI1564_TCW_STATUS_REG(devpriv->b_ModeSelectRegister - 1));
- /***********************************/
/* Get the software trigger status */
- /***********************************/
data[1] = (unsigned char) ((ul_Command1 >> 1) & 1);
- /***********************************/
/* Get the hardware trigger status */
- /***********************************/
data[2] = (unsigned char) ((ul_Command1 >> 2) & 1);
- /*********************************/
/* Get the software clear status */
- /*********************************/
data[3] = (unsigned char) ((ul_Command1 >> 3) & 1);
- /***************************/
/* Get the overflow status */
- /***************************/
data[4] = (unsigned char) ((ul_Command1 >> 0) & 1);
- } /* else if (devpriv->b_TimerSelectMode==ADDIDATA_COUNTER) */
- else if ((devpriv->b_TimerSelectMode != ADDIDATA_TIMER)
+ } else if ((devpriv->b_TimerSelectMode != ADDIDATA_TIMER)
&& (devpriv->b_TimerSelectMode != ADDIDATA_WATCHDOG)
&& (devpriv->b_TimerSelectMode != ADDIDATA_COUNTER)) {
- printk("\n Invalid Subdevice !!!\n");
- } /* else if ((devpriv->b_TimerSelectMode!=ADDIDATA_TIMER) && (devpriv->b_TimerSelectMode!=ADDIDATA_WATCHDOG)&& (devpriv->b_TimerSelectMode!=ADDIDATA_COUNTER)) */
+ dev_err(dev->class_dev, "Invalid Subdevice!\n");
+ }
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_ReadInterruptStatus |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task :Reads the interrupt status register |
-+----------------------------------------------------------------------------+
-| Input Parameters : |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : |
-| |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1564_ReadInterruptStatus(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Reads the interrupt status register
+ */
+static int apci1564_do_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
*data = ui_Type;
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : static void v_APCI1564_Interrupt |
-| (int irq , void *d) |
-+----------------------------------------------------------------------------+
-| Task : Interrupt handler for the interruptible digital inputs |
-+----------------------------------------------------------------------------+
-| Input Parameters : int irq : irq number |
-| void *d : void pointer |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static void v_APCI1564_Interrupt(int irq, void *d)
+ * Interrupt handler for the interruptible digital inputs
+ */
+static void apci1564_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
@@ -677,79 +442,63 @@ static void v_APCI1564_Interrupt(int irq, void *d)
unsigned int ui_C1, ui_C2, ui_C3, ui_C4;
unsigned int ul_Command2 = 0;
- ui_DI = inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ) & 0x01;
- ui_DO = inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_IRQ) & 0x01;
- ui_Timer =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_IRQ) & 0x01;
- ui_C1 = inl(devpriv->iobase + APCI1564_COUNTER1 +
- APCI1564_TCW_IRQ) & 0x1;
- ui_C2 = inl(devpriv->iobase + APCI1564_COUNTER2 +
- APCI1564_TCW_IRQ) & 0x1;
- ui_C3 = inl(devpriv->iobase + APCI1564_COUNTER3 +
- APCI1564_TCW_IRQ) & 0x1;
- ui_C4 = inl(devpriv->iobase + APCI1564_COUNTER4 +
- APCI1564_TCW_IRQ) & 0x1;
+ ui_DI = inl(devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG) & 0x01;
+ ui_DO = inl(devpriv->i_IobaseAmcc + APCI1564_DO_IRQ_REG) & 0x01;
+ ui_Timer = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_IRQ_REG) & 0x01;
+ ui_C1 =
+ inl(devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER1)) & 0x1;
+ ui_C2 =
+ inl(devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER2)) & 0x1;
+ ui_C3 =
+ inl(devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER3)) & 0x1;
+ ui_C4 =
+ inl(devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER4)) & 0x1;
if (ui_DI == 0 && ui_DO == 0 && ui_Timer == 0 && ui_C1 == 0
&& ui_C2 == 0 && ui_C3 == 0 && ui_C4 == 0) {
- printk("\nInterrupt from unknown source\n");
- } /* if(ui_DI==0 && ui_DO==0 && ui_Timer==0 && ui_C1==0 && ui_C2==0 && ui_C3==0 && ui_C4==0) */
+ dev_err(dev->class_dev, "Interrupt from unknown source.\n");
+ }
if (ui_DI == 1) {
- ui_DI = inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ);
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ);
+ ui_DI = inl(devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
ui_InterruptStatus_1564 =
- inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_INTERRUPT_STATUS);
+ inl(devpriv->i_IobaseAmcc + APCI1564_DI_INT_STATUS_REG);
ui_InterruptStatus_1564 = ui_InterruptStatus_1564 & 0X000FFFF0;
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- outl(ui_DI, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP + APCI1564_DIGITAL_IP_IRQ); /* enable the interrupt */
+ /* send signal to the sample */
+ send_sig(SIGIO, devpriv->tsk_Current, 0);
+ /* enable the interrupt */
+ outl(ui_DI, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
return;
}
if (ui_DO == 1) {
- /* Check for Digital Output interrupt Type - 1: Vcc interrupt 2: CC interrupt. */
- ui_Type =
- inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_INTERRUPT_STATUS) & 0x3;
+ /* Check for Digital Output interrupt Type */
+ /* 1: VCC interrupt */
+ /* 2: CC interrupt */
+ ui_Type = inl(devpriv->i_IobaseAmcc + APCI1564_DO_INT_STATUS_REG) & 0x3;
/* Disable the Interrupt */
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_INTERRUPT);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DO_INT_CTRL_REG);
/* Sends signal to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
-
- } /* if (ui_DO) */
+ }
if (ui_Timer == 1) {
devpriv->b_TimerSelectMode = ADDIDATA_TIMER;
if (devpriv->b_TimerSelectMode) {
/* Disable Timer Interrupt */
- ul_Command2 =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
+ ul_Command2 = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
/* Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
/* Enable Timer Interrupt */
- outl(ul_Command2,
- devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
+ outl(ul_Command2, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
}
- }/* if (ui_Timer == 1) */
-
+ }
if (ui_C1 == 1) {
devpriv->b_TimerSelectMode = ADDIDATA_COUNTER;
@@ -757,21 +506,18 @@ static void v_APCI1564_Interrupt(int irq, void *d)
/* Disable Counter Interrupt */
ul_Command2 =
- inl(devpriv->iobase + APCI1564_COUNTER1 +
- APCI1564_TCW_PROG);
+ inl(devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER1));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER1 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER1));
/* Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
/* Enable Counter Interrupt */
outl(ul_Command2,
- devpriv->iobase + APCI1564_COUNTER1 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER1));
}
- } /* if (ui_C1 == 1) */
+ }
if (ui_C2 == 1) {
devpriv->b_TimerSelectMode = ADDIDATA_COUNTER;
@@ -779,21 +525,18 @@ static void v_APCI1564_Interrupt(int irq, void *d)
/* Disable Counter Interrupt */
ul_Command2 =
- inl(devpriv->iobase + APCI1564_COUNTER2 +
- APCI1564_TCW_PROG);
+ inl(devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER2));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER2 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER2));
/* Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
/* Enable Counter Interrupt */
outl(ul_Command2,
- devpriv->iobase + APCI1564_COUNTER2 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER2));
}
- } /* if ((ui_C2 == 1) */
+ }
if (ui_C3 == 1) {
devpriv->b_TimerSelectMode = ADDIDATA_COUNTER;
@@ -801,21 +544,18 @@ static void v_APCI1564_Interrupt(int irq, void *d)
/* Disable Counter Interrupt */
ul_Command2 =
- inl(devpriv->iobase + APCI1564_COUNTER3 +
- APCI1564_TCW_PROG);
+ inl(devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER3));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER3 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER3));
/* Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
/* Enable Counter Interrupt */
outl(ul_Command2,
- devpriv->iobase + APCI1564_COUNTER3 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER3));
}
- } /* if ((ui_C3 == 1) */
+ }
if (ui_C4 == 1) {
devpriv->b_TimerSelectMode = ADDIDATA_COUNTER;
@@ -823,60 +563,45 @@ static void v_APCI1564_Interrupt(int irq, void *d)
/* Disable Counter Interrupt */
ul_Command2 =
- inl(devpriv->iobase + APCI1564_COUNTER4 +
- APCI1564_TCW_PROG);
+ inl(devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER4));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER4 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER4));
/* Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
/* Enable Counter Interrupt */
outl(ul_Command2,
- devpriv->iobase + APCI1564_COUNTER4 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER4));
}
- } /* if (ui_C4 == 1) */
+ }
return;
}
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_Reset(struct comedi_device *dev) | |
-+----------------------------------------------------------------------------+
-| Task :resets all the registers |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : |
-| |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1564_Reset(struct comedi_device *dev)
+static int apci1564_reset(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP_IRQ); /* disable the interrupts */
- inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP_INTERRUPT_STATUS); /* Reset the interrupt status register */
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP_INTERRUPT_MODE1); /* Disable the and/or interrupt */
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP_INTERRUPT_MODE2);
+ /* disable the interrupts */
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
+ /* Reset the interrupt status register */
+ inl(devpriv->i_IobaseAmcc + APCI1564_DI_INT_STATUS_REG);
+ /* Disable the and/or interrupt */
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_INT_MODE1_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_INT_MODE2_REG);
devpriv->b_DigitalOutputRegister = 0;
ui_Type = 0;
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP); /* Resets the output channels */
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP_INTERRUPT); /* Disables the interrupt. */
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_RELOAD_VALUE);
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER);
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER + APCI1564_TCW_PROG);
-
- outl(0x0, devpriv->iobase + APCI1564_COUNTER1 + APCI1564_TCW_PROG);
- outl(0x0, devpriv->iobase + APCI1564_COUNTER2 + APCI1564_TCW_PROG);
- outl(0x0, devpriv->iobase + APCI1564_COUNTER3 + APCI1564_TCW_PROG);
- outl(0x0, devpriv->iobase + APCI1564_COUNTER4 + APCI1564_TCW_PROG);
+ /* Resets the output channels */
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DO_REG);
+ /* Disables the interrupt. */
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DO_INT_CTRL_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_WDOG_RELOAD_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+
+ outl(0x0, devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER1));
+ outl(0x0, devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER2));
+ outl(0x0, devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER3));
+ outl(0x0, devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER4));
return 0;
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
index bd05857b82f2..70e8f426285c 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
@@ -248,10 +248,10 @@ static const struct comedi_lrange range_apci3120_ao = {
+----------------------------------------------------------------------------+
*/
-static int i_APCI3120_InsnConfigAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3120_ai_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
const struct addi_board *this_board = comedi_board(dev);
struct addi_private *devpriv = dev->private;
@@ -304,11 +304,11 @@ static int i_APCI3120_InsnConfigAnalogInput(struct comedi_device *dev,
* If the last argument of function "check"is 1 then it only checks
* the channel list is ok or not.
*/
-static int i_APCI3120_SetupChannelList(struct comedi_device *dev,
- struct comedi_subdevice *s,
- int n_chan,
- unsigned int *chanlist,
- char check)
+static int apci3120_setup_chan_list(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ int n_chan,
+ unsigned int *chanlist,
+ char check)
{
struct addi_private *devpriv = dev->private;
unsigned int i; /* , differencial=0, bipolar=0; */
@@ -358,10 +358,10 @@ static int i_APCI3120_SetupChannelList(struct comedi_device *dev,
* as per configured if no conversion time is set uses default
* conversion time 10 microsec.
*/
-static int i_APCI3120_InsnReadAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3120_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
const struct addi_board *this_board = comedi_board(dev);
struct addi_private *devpriv = dev->private;
@@ -417,10 +417,7 @@ static int i_APCI3120_InsnReadAnalogInput(struct comedi_device *dev,
inw(devpriv->iobase + APCI3120_RESET_FIFO);
/* Initialize the sequence array */
-
- /* if (!i_APCI3120_SetupChannelList(dev,s,1,chanlist,0)) return -EINVAL; */
-
- if (!i_APCI3120_SetupChannelList(dev, s, 1,
+ if (!apci3120_setup_chan_list(dev, s, 1,
&insn->chanspec, 0))
return -EINVAL;
@@ -512,7 +509,7 @@ static int i_APCI3120_InsnReadAnalogInput(struct comedi_device *dev,
outw(devpriv->us_OutputRegister,
devpriv->iobase + APCI3120_WR_ADDRESS);
- if (!i_APCI3120_SetupChannelList(dev, s,
+ if (!apci3120_setup_chan_list(dev, s,
devpriv->ui_AiNbrofChannels,
devpriv->ui_AiChannelList, 0))
return -EINVAL;
@@ -606,7 +603,7 @@ static int i_APCI3120_InsnReadAnalogInput(struct comedi_device *dev,
}
-static int i_APCI3120_Reset(struct comedi_device *dev)
+static int apci3120_reset(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
unsigned int i;
@@ -663,7 +660,7 @@ static int i_APCI3120_Reset(struct comedi_device *dev)
return 0;
}
-static int i_APCI3120_ExttrigEnable(struct comedi_device *dev)
+static int apci3120_exttrig_enable(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
@@ -672,7 +669,7 @@ static int i_APCI3120_ExttrigEnable(struct comedi_device *dev)
return 0;
}
-static int i_APCI3120_ExttrigDisable(struct comedi_device *dev)
+static int apci3120_exttrig_disable(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
@@ -681,8 +678,8 @@ static int i_APCI3120_ExttrigDisable(struct comedi_device *dev)
return 0;
}
-static int i_APCI3120_StopCyclicAcquisition(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int apci3120_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct addi_private *devpriv = dev->private;
@@ -705,7 +702,7 @@ static int i_APCI3120_StopCyclicAcquisition(struct comedi_device *dev,
* devpriv->i_IobaseAmcc+AMCC_OP_REG_MCSR); stop DMA */
/* Disable ext trigger */
- i_APCI3120_ExttrigDisable(dev);
+ apci3120_exttrig_disable(dev);
devpriv->us_OutputRegister = 0;
/* stop counters */
@@ -729,13 +726,13 @@ static int i_APCI3120_StopCyclicAcquisition(struct comedi_device *dev,
devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE;
devpriv->b_InterruptMode = APCI3120_EOC_MODE;
devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- i_APCI3120_Reset(dev);
+ apci3120_reset(dev);
return 0;
}
-static int i_APCI3120_CommandTestAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
+static int apci3120_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
const struct addi_board *this_board = comedi_board(dev);
int err = 0;
@@ -818,9 +815,9 @@ static int i_APCI3120_CommandTestAnalogInput(struct comedi_device *dev,
* If DMA is configured does DMA initialization otherwise does the
* acquisition with EOS interrupt.
*/
-static int i_APCI3120_CyclicAnalogInput(int mode,
- struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int apci3120_cyclic_ai(int mode,
+ struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
const struct addi_board *this_board = comedi_board(dev);
struct addi_private *devpriv = dev->private;
@@ -904,7 +901,7 @@ static int i_APCI3120_CyclicAnalogInput(int mode,
/**********************************/
/* Initializes the sequence array */
/**********************************/
- if (!i_APCI3120_SetupChannelList(dev, s, devpriv->ui_AiNbrofChannels,
+ if (!apci3120_setup_chan_list(dev, s, devpriv->ui_AiNbrofChannels,
devpriv->pui_AiChannelList, 0))
return -EINVAL;
@@ -957,7 +954,7 @@ static int i_APCI3120_CyclicAnalogInput(int mode,
/*** EL241003 End ******************************************************************************/
if (devpriv->b_ExttrigEnable == APCI3120_ENABLE)
- i_APCI3120_ExttrigEnable(dev); /* activate EXT trigger */
+ apci3120_exttrig_enable(dev); /* activate EXT trigger */
switch (mode) {
case 1:
/* init timer0 in mode 2 */
@@ -1333,8 +1330,8 @@ static int i_APCI3120_CyclicAnalogInput(int mode,
* Does asynchronous acquisition.
* Determines the mode 1 or 2.
*/
-static int i_APCI3120_CommandAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int apci3120_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct addi_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
@@ -1371,7 +1368,7 @@ static int i_APCI3120_CommandAnalogInput(struct comedi_device *dev,
devpriv->ui_AiTimer0 = cmd->convert_arg; /* timer constant in nano seconds */
/* return this_board->ai_cmd(1,dev,s); */
- return i_APCI3120_CyclicAnalogInput(1, dev, s);
+ return apci3120_cyclic_ai(1, dev, s);
}
}
@@ -1381,7 +1378,7 @@ static int i_APCI3120_CommandAnalogInput(struct comedi_device *dev,
devpriv->ui_AiTimer1 = cmd->scan_begin_arg;
devpriv->ui_AiTimer0 = cmd->convert_arg; /* variable changed timer2 to timer0 */
/* return this_board->ai_cmd(2,dev,s); */
- return i_APCI3120_CyclicAnalogInput(2, dev, s);
+ return apci3120_cyclic_ai(2, dev, s);
}
return -1;
}
@@ -1410,7 +1407,7 @@ static void v_APCI3120_InterruptDmaMoveBlock16bit(struct comedi_device *dev,
* For continuous DMA it reinitializes the DMA operation.
* For single mode DMA it stop the acquisition.
*/
-static void v_APCI3120_InterruptDma(int irq, void *d)
+static void apci3120_interrupt_dma(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
@@ -1429,7 +1426,7 @@ static void v_APCI3120_InterruptDma(int irq, void *d)
}
if (samplesinbuf & 1) {
comedi_error(dev, "Odd count of bytes in DMA ring!");
- i_APCI3120_StopCyclicAcquisition(dev, s);
+ apci3120_cancel(dev, s);
devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE;
return;
@@ -1500,7 +1497,7 @@ static void v_APCI3120_InterruptDma(int irq, void *d)
if (!devpriv->b_AiContinuous)
if (devpriv->ui_AiActualScan >= devpriv->ui_AiNbrofScans) {
/* all data sampled */
- i_APCI3120_StopCyclicAcquisition(dev, s);
+ apci3120_cancel(dev, s);
devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE;
s->async->events |= COMEDI_CB_EOA;
comedi_event(dev, s);
@@ -1565,7 +1562,7 @@ static void v_APCI3120_InterruptDma(int irq, void *d)
* This function handles EOS interrupt.
* This function copies the acquired data(from FIFO) to Comedi buffer.
*/
-static int i_APCI3120_InterruptHandleEos(struct comedi_device *dev)
+static int apci3120_interrupt_handle_eos(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
@@ -1574,8 +1571,6 @@ static int i_APCI3120_InterruptHandleEos(struct comedi_device *dev)
n_chan = devpriv->ui_AiNbrofChannels;
- s->async->events = 0;
-
for (i = 0; i < n_chan; i++)
err &= comedi_buf_put(s->async, inw(dev->iobase + 0));
@@ -1589,7 +1584,7 @@ static int i_APCI3120_InterruptHandleEos(struct comedi_device *dev)
return 0;
}
-static void v_APCI3120_Interrupt(int irq, void *d)
+static void apci3120_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
@@ -1615,7 +1610,7 @@ static void v_APCI3120_Interrupt(int irq, void *d)
if (devpriv->b_ExttrigEnable == APCI3120_ENABLE) {
/* Disable ext trigger */
- i_APCI3120_ExttrigDisable(dev);
+ apci3120_exttrig_disable(dev);
devpriv->b_ExttrigEnable = APCI3120_DISABLE;
}
/* clear the timer 2 interrupt */
@@ -1655,7 +1650,7 @@ static void v_APCI3120_Interrupt(int irq, void *d)
if (devpriv->b_AiCyclicAcquisition == APCI3120_ENABLE) {
ui_Check = 0;
- i_APCI3120_InterruptHandleEos(dev);
+ apci3120_interrupt_handle_eos(dev);
devpriv->ui_AiActualScan++;
devpriv->b_ModeSelectRegister =
devpriv->
@@ -1711,7 +1706,7 @@ static void v_APCI3120_Interrupt(int irq, void *d)
dev->iobase + APCI3120_WR_ADDRESS);
/* stop timer 0 and timer 1 */
- i_APCI3120_StopCyclicAcquisition(dev, s);
+ apci3120_cancel(dev, s);
devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE;
/* UPDATE-0.7.57->0.7.68comedi_done(dev,s); */
@@ -1765,7 +1760,8 @@ static void v_APCI3120_Interrupt(int irq, void *d)
/* Clears the timer status register */
/************************************/
inw(dev->iobase + APCI3120_TIMER_STATUS_REGISTER);
- v_APCI3120_InterruptDma(irq, d); /* do some data transfer */
+ /* do some data transfer */
+ apci3120_interrupt_dma(irq, d);
} else {
/* Stops the Timer */
outw(devpriv->
@@ -1787,7 +1783,7 @@ static void v_APCI3120_Interrupt(int irq, void *d)
* data[1] = Timer constant
* data[2] = Timer2 interrupt (1)enable or(0) disable
*/
-static int i_APCI3120_InsnConfigTimer(struct comedi_device *dev,
+static int apci3120_config_insn_timer(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
@@ -1932,7 +1928,7 @@ static int i_APCI3120_InsnConfigTimer(struct comedi_device *dev,
* = 1 Timer
* = 2 Watch dog
*/
-static int i_APCI3120_InsnWriteTimer(struct comedi_device *dev,
+static int apci3120_write_insn_timer(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
@@ -2104,7 +2100,7 @@ static int i_APCI3120_InsnWriteTimer(struct comedi_device *dev,
* for watchdog: data[0] = 0 (still running)
* = 1 (run down)
*/
-static int i_APCI3120_InsnReadTimer(struct comedi_device *dev,
+static int apci3120_read_insn_timer(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
@@ -2189,10 +2185,10 @@ static int apci3120_do_insn_bits(struct comedi_device *dev,
return insn->n;
}
-static int i_APCI3120_InsnWriteAnalogOutput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3120_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ui_Range, ui_Channel;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index 8c85a09d1c66..0536d8373861 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -1268,7 +1268,7 @@ static int i_APCI3200_ReadCJCCalGain(struct comedi_device *dev,
return 0;
}
-static int i_APCI3200_Reset(struct comedi_device *dev)
+static int apci3200_reset(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
int i_Temp;
@@ -1322,10 +1322,10 @@ static int i_APCI3200_Reset(struct comedi_device *dev)
* data[7] : Channel current source from eeprom
* data[8] : Channle gain factor from eeprom
*/
-static int i_APCI3200_ReadAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3200_ai_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
unsigned int ui_DummyValue = 0;
int i_ConvertCJCCalibration;
@@ -1336,7 +1336,7 @@ static int i_APCI3200_ReadAnalogInput(struct comedi_device *dev,
if (s_BoardInfos[dev->minor].i_Initialised == 0)
/* END JK 06.07.04: Management of sevrals boards */
{
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return -EINVAL;
} /* if(i_Initialised==0); */
@@ -1586,7 +1586,7 @@ static int i_APCI3200_ReadAnalogInput(struct comedi_device *dev,
break;
default:
printk("\nThe parameters passed are in error\n");
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return -EINVAL;
} /* switch(insn->unused[0]) */
@@ -1626,10 +1626,10 @@ static int i_APCI3200_ReadAnalogInput(struct comedi_device *dev,
* = 2 RTD 3 wire connection
* = 3 RTD 4 wire connection
*/
-static int i_APCI3200_ConfigAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3200_ai_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ul_Config = 0, ul_Temp = 0;
@@ -1970,7 +1970,7 @@ static int i_APCI3200_ConfigAnalogInput(struct comedi_device *dev,
} /* switch(data[11]) */
} /* elseif(data[12]==0 || data[12]==1) */
if (i_err) {
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return -EINVAL;
}
/* if(i_ScanType!=1) */
@@ -2079,7 +2079,7 @@ static int i_APCI3200_ConfigAnalogInput(struct comedi_device *dev,
/* END JK 06.07.04: Management of sevrals boards */
insn->unused[0] = 0;
- i_APCI3200_ReadAnalogInput(dev, s, insn, &ui_Dummy);
+ apci3200_ai_read(dev, s, insn, &ui_Dummy);
}
return insn->n;
@@ -2095,10 +2095,10 @@ static int i_APCI3200_ConfigAnalogInput(struct comedi_device *dev,
* data[1] : calibration offset
* data[2] : calibration gain
*/
-static int i_APCI3200_InsnBits_AnalogInput_Test(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3200_ai_bits_test(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ui_Configuration = 0;
@@ -2107,12 +2107,12 @@ static int i_APCI3200_InsnBits_AnalogInput_Test(struct comedi_device *dev,
/* if(i_Initialised==0) */
if (s_BoardInfos[dev->minor].i_Initialised == 0) {
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return -EINVAL;
} /* if(i_Initialised==0); */
if (data[0] != 0 && data[0] != 1) {
printk("\nError in selection of functionality\n");
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return -EINVAL;
} /* if(data[0]!=0 && data[0]!=1) */
@@ -2202,18 +2202,18 @@ static int i_APCI3200_InsnBits_AnalogInput_Test(struct comedi_device *dev,
return insn->n;
}
-static int i_APCI3200_InsnWriteReleaseAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3200_ai_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return insn->n;
}
-static int i_APCI3200_CommandTestAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
+static int apci3200_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
int err = 0;
@@ -2241,7 +2241,7 @@ static int i_APCI3200_CommandTestAnalogInput(struct comedi_device *dev,
err |= -EINVAL;
if (err) {
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return 1;
}
@@ -2267,7 +2267,7 @@ static int i_APCI3200_CommandTestAnalogInput(struct comedi_device *dev,
}
if (err) {
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return 2;
}
/* i_FirstChannel=cmd->chanlist[0]; */
@@ -2308,7 +2308,7 @@ static int i_APCI3200_CommandTestAnalogInput(struct comedi_device *dev,
printk("\nThe Delay time value is in error\n");
}
if (err) {
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return 3;
}
fpu_begin();
@@ -2366,15 +2366,15 @@ static int i_APCI3200_CommandTestAnalogInput(struct comedi_device *dev,
} /* else if(cmd->scan_begin_src==TRIG_FOLLOW) */
if (err) {
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return 4;
}
return 0;
}
-static int i_APCI3200_StopCyclicAcquisition(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int apci3200_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct addi_private *devpriv = dev->private;
unsigned int ui_Configuration = 0;
@@ -2410,8 +2410,8 @@ static int i_APCI3200_StopCyclicAcquisition(struct comedi_device *dev,
* Does asynchronous acquisition
* Determines the mode 1 or 2.
*/
-static int i_APCI3200_CommandAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int apci3200_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct addi_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
@@ -2619,7 +2619,6 @@ static int i_APCI3200_InterruptHandleEos(struct comedi_device *dev)
/* BEGIN JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
/* This value is not used */
/* ui_ChannelNumber = inl(devpriv->iobase+s_BoardInfos [dev->minor].i_Offset + 24); */
- s->async->events = 0;
/* END JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
/*************************************/
@@ -2730,7 +2729,7 @@ static int i_APCI3200_InterruptHandleEos(struct comedi_device *dev)
return 0;
}
-static void v_APCI3200_Interrupt(int irq, void *d)
+static void apci3200_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
index ebc1534a8df8..20e89b0bdc4d 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
@@ -16,10 +16,10 @@
* data[2] : Time Unit
* data[3] : Reload Value
*/
-static int i_APCI3501_ConfigTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3501_config_insn_timer(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct apci3501_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
@@ -86,10 +86,10 @@ static int i_APCI3501_ConfigTimerCounterWatchdog(struct comedi_device *dev,
* 0 Stop
* 2 Trigger
*/
-static int i_APCI3501_StartStopWriteTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3501_write_insn_timer(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct apci3501_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
@@ -153,10 +153,10 @@ static int i_APCI3501_StartStopWriteTimerCounterWatchdog(struct comedi_device *d
* 2 Watchdog
* data[1] : Timer Counter Watchdog Number
*/
-static int i_APCI3501_ReadTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3501_read_insn_timer(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct apci3501_private *devpriv = dev->private;
diff --git a/drivers/staging/comedi/drivers/addi_apci_035.c b/drivers/staging/comedi/drivers/addi_apci_035.c
index ccd49211ea17..4da9db35b8e2 100644
--- a/drivers/staging/comedi/drivers/addi_apci_035.c
+++ b/drivers/staging/comedi/drivers/addi_apci_035.c
@@ -27,13 +27,13 @@ static const struct addi_board apci035_boardtypes[] = {
.i_Timer = 1,
.ui_MinAcquisitiontimeNs = 10000,
.ui_MinDelaytimeNs = 100000,
- .interrupt = v_APCI035_Interrupt,
- .reset = i_APCI035_Reset,
- .ai_config = i_APCI035_ConfigAnalogInput,
- .ai_read = i_APCI035_ReadAnalogInput,
- .timer_config = i_APCI035_ConfigTimerWatchdog,
- .timer_write = i_APCI035_StartStopWriteTimerWatchdog,
- .timer_read = i_APCI035_ReadTimerWatchdog,
+ .interrupt = apci035_interrupt,
+ .reset = apci035_reset,
+ .ai_config = apci035_ai_config,
+ .ai_read = apci035_ai_read,
+ .timer_config = apci035_timer_config,
+ .timer_write = apci035_timer_write,
+ .timer_read = apci035_timer_read,
},
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index 74f7ace8adbc..bd8e08ca14c0 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -20,19 +20,19 @@ static const struct addi_board apci1500_boardtypes[] = {
.i_NbrDoChannel = 16,
.i_DoMaxdata = 0xffff,
.i_Timer = 1,
- .interrupt = v_APCI1500_Interrupt,
- .reset = i_APCI1500_Reset,
- .di_config = i_APCI1500_ConfigDigitalInputEvent,
- .di_read = i_APCI1500_Initialisation,
- .di_write = i_APCI1500_StartStopInputEvent,
+ .interrupt = apci1500_interrupt,
+ .reset = apci1500_reset,
+ .di_config = apci1500_di_config,
+ .di_read = apci1500_di_read,
+ .di_write = apci1500_di_write,
.di_bits = apci1500_di_insn_bits,
- .do_config = i_APCI1500_ConfigDigitalOutputErrorInterrupt,
- .do_write = i_APCI1500_WriteDigitalOutput,
- .do_bits = i_APCI1500_ConfigureInterrupt,
- .timer_config = i_APCI1500_ConfigCounterTimerWatchdog,
- .timer_write = i_APCI1500_StartStopTriggerTimerCounterWatchdog,
- .timer_read = i_APCI1500_ReadInterruptMask,
- .timer_bits = i_APCI1500_ReadCounterTimerWatchdog,
+ .do_config = apci1500_do_config,
+ .do_write = apci1500_do_write,
+ .do_bits = apci1500_do_bits,
+ .timer_config = apci1500_timer_config,
+ .timer_write = apci1500_timer_write,
+ .timer_read = apci1500_timer_read,
+ .timer_bits = apci1500_timer_bits,
},
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 6248284caaf5..27aa9ae1bdd9 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -21,16 +21,16 @@ static const struct addi_board apci1564_boardtypes[] = {
.i_NbrDoChannel = 32,
.i_DoMaxdata = 0xffffffff,
.i_Timer = 1,
- .interrupt = v_APCI1564_Interrupt,
- .reset = i_APCI1564_Reset,
- .di_config = i_APCI1564_ConfigDigitalInput,
+ .interrupt = apci1564_interrupt,
+ .reset = apci1564_reset,
+ .di_config = apci1564_di_config,
.di_bits = apci1564_di_insn_bits,
- .do_config = i_APCI1564_ConfigDigitalOutput,
+ .do_config = apci1564_do_config,
.do_bits = apci1564_do_insn_bits,
- .do_read = i_APCI1564_ReadInterruptStatus,
- .timer_config = i_APCI1564_ConfigTimerCounterWatchdog,
- .timer_write = i_APCI1564_StartStopWriteTimerCounterWatchdog,
- .timer_read = i_APCI1564_ReadTimerCounterWatchdog,
+ .do_read = apci1564_do_read,
+ .timer_config = apci1564_timer_config,
+ .timer_write = apci1564_timer_write,
+ .timer_read = apci1564_timer_read,
},
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
index 1e6fa56c516e..57ee6e5c7635 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3120.c
@@ -26,7 +26,7 @@ static const struct addi_board apci3120_boardtypes[] = {
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 0x0f,
- .interrupt = v_APCI3120_Interrupt,
+ .interrupt = apci3120_interrupt,
},
[BOARD_APCI3001] = {
.pc_DriverName = "apci3001",
@@ -37,7 +37,7 @@ static const struct addi_board apci3120_boardtypes[] = {
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 0x0f,
- .interrupt = v_APCI3120_Interrupt,
+ .interrupt = apci3120_interrupt,
},
};
@@ -136,11 +136,11 @@ static int apci3120_auto_attach(struct comedi_device *dev,
s->len_chanlist = this_board->i_AiChannelList;
s->range_table = &range_apci3120_ai;
- s->insn_config = i_APCI3120_InsnConfigAnalogInput;
- s->insn_read = i_APCI3120_InsnReadAnalogInput;
- s->do_cmdtest = i_APCI3120_CommandTestAnalogInput;
- s->do_cmd = i_APCI3120_CommandAnalogInput;
- s->cancel = i_APCI3120_StopCyclicAcquisition;
+ s->insn_config = apci3120_ai_insn_config;
+ s->insn_read = apci3120_ai_insn_read;
+ s->do_cmdtest = apci3120_ai_cmdtest;
+ s->do_cmd = apci3120_ai_cmd;
+ s->cancel = apci3120_cancel;
/* Allocate and Initialise AO Subdevice Structures */
s = &dev->subdevices[1];
@@ -151,7 +151,7 @@ static int apci3120_auto_attach(struct comedi_device *dev,
s->maxdata = this_board->i_AoMaxdata;
s->len_chanlist = this_board->i_NbrAoChannel;
s->range_table = &range_apci3120_ao;
- s->insn_write = i_APCI3120_InsnWriteAnalogOutput;
+ s->insn_write = apci3120_ao_insn_write;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -186,11 +186,11 @@ static int apci3120_auto_attach(struct comedi_device *dev,
s->len_chanlist = 1;
s->range_table = &range_digital;
- s->insn_write = i_APCI3120_InsnWriteTimer;
- s->insn_read = i_APCI3120_InsnReadTimer;
- s->insn_config = i_APCI3120_InsnConfigTimer;
+ s->insn_write = apci3120_write_insn_timer;
+ s->insn_read = apci3120_read_insn_timer;
+ s->insn_config = apci3120_config_insn_timer;
- i_APCI3120_Reset(dev);
+ apci3120_reset(dev);
return 0;
}
@@ -200,7 +200,7 @@ static void apci3120_detach(struct comedi_device *dev)
if (devpriv) {
if (dev->iobase)
- i_APCI3120_Reset(dev);
+ apci3120_reset(dev);
if (dev->irq)
free_irq(dev->irq, dev);
if (devpriv->ul_DmaBufferVirtual[0]) {
diff --git a/drivers/staging/comedi/drivers/addi_apci_3200.c b/drivers/staging/comedi/drivers/addi_apci_3200.c
index 9868a5369aff..f0f891a482a3 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3200.c
@@ -43,15 +43,15 @@ static const struct addi_board apci3200_boardtypes[] = {
.i_NbrDoChannel = 4,
.ui_MinAcquisitiontimeNs = 10000,
.ui_MinDelaytimeNs = 100000,
- .interrupt = v_APCI3200_Interrupt,
- .reset = i_APCI3200_Reset,
- .ai_config = i_APCI3200_ConfigAnalogInput,
- .ai_read = i_APCI3200_ReadAnalogInput,
- .ai_write = i_APCI3200_InsnWriteReleaseAnalogInput,
- .ai_bits = i_APCI3200_InsnBits_AnalogInput_Test,
- .ai_cmdtest = i_APCI3200_CommandTestAnalogInput,
- .ai_cmd = i_APCI3200_CommandAnalogInput,
- .ai_cancel = i_APCI3200_StopCyclicAcquisition,
+ .interrupt = apci3200_interrupt,
+ .reset = apci3200_reset,
+ .ai_config = apci3200_ai_config,
+ .ai_read = apci3200_ai_read,
+ .ai_write = apci3200_ai_write,
+ .ai_bits = apci3200_ai_bits_test,
+ .ai_cmdtest = apci3200_ai_cmdtest,
+ .ai_cmd = apci3200_ai_cmd,
+ .ai_cancel = apci3200_cancel,
.di_bits = apci3200_di_insn_bits,
.do_bits = apci3200_do_insn_bits,
},
@@ -68,15 +68,15 @@ static const struct addi_board apci3200_boardtypes[] = {
.i_NbrDoChannel = 4,
.ui_MinAcquisitiontimeNs = 10000,
.ui_MinDelaytimeNs = 100000,
- .interrupt = v_APCI3200_Interrupt,
- .reset = i_APCI3200_Reset,
- .ai_config = i_APCI3200_ConfigAnalogInput,
- .ai_read = i_APCI3200_ReadAnalogInput,
- .ai_write = i_APCI3200_InsnWriteReleaseAnalogInput,
- .ai_bits = i_APCI3200_InsnBits_AnalogInput_Test,
- .ai_cmdtest = i_APCI3200_CommandTestAnalogInput,
- .ai_cmd = i_APCI3200_CommandAnalogInput,
- .ai_cancel = i_APCI3200_StopCyclicAcquisition,
+ .interrupt = apci3200_interrupt,
+ .reset = apci3200_reset,
+ .ai_config = apci3200_ai_config,
+ .ai_read = apci3200_ai_read,
+ .ai_write = apci3200_ai_write,
+ .ai_bits = apci3200_ai_bits_test,
+ .ai_cmdtest = apci3200_ai_cmdtest,
+ .ai_cmd = apci3200_ai_cmd,
+ .ai_cancel = apci3200_cancel,
.di_bits = apci3200_di_insn_bits,
.do_bits = apci3200_do_insn_bits,
},
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index 4cb69ec54c9b..49bf1fb840f6 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -390,9 +390,9 @@ static int apci3501_auto_attach(struct comedi_device *dev,
s->maxdata = 0;
s->len_chanlist = 1;
s->range_table = &range_digital;
- s->insn_write = i_APCI3501_StartStopWriteTimerCounterWatchdog;
- s->insn_read = i_APCI3501_ReadTimerCounterWatchdog;
- s->insn_config = i_APCI3501_ConfigTimerCounterWatchdog;
+ s->insn_write = apci3501_write_insn_timer;
+ s->insn_read = apci3501_read_insn_timer;
+ s->insn_config = apci3501_config_insn_timer;
/* Initialize the eeprom subdevice */
s = &dev->subdevices[4];
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index ceadf8eff47f..6dc11c407f57 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -434,13 +434,26 @@ static int apci3xxx_ai_setup(struct comedi_device *dev, unsigned int chanspec)
return 0;
}
+static int apci3xxx_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct apci3xxx_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readl(devpriv->mmio + 20);
+ if (status & 0x1)
+ return 0;
+ return -EBUSY;
+}
+
static int apci3xxx_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct apci3xxx_private *devpriv = dev->private;
- unsigned int val;
int ret;
int i;
@@ -453,10 +466,9 @@ static int apci3xxx_ai_insn_read(struct comedi_device *dev,
writel(0x80000, devpriv->mmio + 8);
/* Wait the EOS */
- do {
- val = readl(devpriv->mmio + 20);
- val &= 0x1;
- } while (!val);
+ ret = comedi_timeout(dev, s, insn, apci3xxx_ai_eoc, 0);
+ if (ret)
+ return ret;
/* Read the analog value */
data[i] = readl(devpriv->mmio + 28);
@@ -622,6 +634,20 @@ static int apci3xxx_ai_cancel(struct comedi_device *dev,
return 0;
}
+static int apci3xxx_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct apci3xxx_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readl(devpriv->mmio + 96);
+ if (status & 0x100)
+ return 0;
+ return -EBUSY;
+}
+
static int apci3xxx_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -630,7 +656,7 @@ static int apci3xxx_ao_insn_write(struct comedi_device *dev,
struct apci3xxx_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int status;
+ int ret;
int i;
for (i = 0; i < insn->n; i++) {
@@ -641,9 +667,9 @@ static int apci3xxx_ao_insn_write(struct comedi_device *dev,
writel((data[i] << 8) | chan, devpriv->mmio + 100);
/* Wait the end of transfer */
- do {
- status = readl(devpriv->mmio + 96);
- } while ((status & 0x100) != 0x100);
+ ret = comedi_timeout(dev, s, insn, apci3xxx_ao_eoc, 0);
+ if (ret)
+ return ret;
}
return insn->n;
@@ -680,7 +706,7 @@ static int apci3xxx_dio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
+ unsigned int mask = 0;
int ret;
/*
@@ -688,12 +714,13 @@ static int apci3xxx_dio_insn_config(struct comedi_device *dev,
* Port 1 (channels 8-15) are always outputs
* Port 2 (channels 16-23) are programmable i/o
*/
- if (chan < 16) {
- if (data[0] != INSN_CONFIG_DIO_QUERY)
+ if (data[0] != INSN_CONFIG_DIO_QUERY) {
+ /* ignore all other instructions for ports 0 and 1 */
+ if (chan < 16)
return -EINVAL;
- } else {
- /* changing any channel in port 2 changes the entire port */
- mask = 0xff0000;
+ else
+ /* changing any channel in port 2 changes the entire port */
+ mask = 0xff0000;
}
ret = comedi_dio_insn_config(dev, s, insn, data, mask);
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index 5c1413abc52d..921f6942dfce 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -73,19 +73,17 @@ struct pci6208_private {
unsigned int ao_readback[PCI6208_MAX_AO_CHANNELS];
};
-static int pci6208_ao_wait_for_data_send(struct comedi_device *dev,
- unsigned int timeout)
+static int pci6208_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
unsigned int status;
- while (timeout--) {
- status = inw(dev->iobase + PCI6208_AO_STATUS);
- if ((status & PCI6208_AO_STATUS_DATA_SEND) == 0)
- return 0;
- udelay(1);
- }
-
- return -ETIME;
+ status = inw(dev->iobase + PCI6208_AO_STATUS);
+ if ((status & PCI6208_AO_STATUS_DATA_SEND) == 0)
+ return 0;
+ return -EBUSY;
}
static int pci6208_ao_insn_write(struct comedi_device *dev,
@@ -102,8 +100,8 @@ static int pci6208_ao_insn_write(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
val = data[i];
- /* D/A transfer rate is 2.2us, wait up to 10us */
- ret = pci6208_ao_wait_for_data_send(dev, 10);
+ /* D/A transfer rate is 2.2us */
+ ret = comedi_timeout(dev, s, insn, pci6208_ao_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index 6f622b4de698..5e3cc77a8a0c 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -239,9 +239,6 @@ static int adl_pci7x3x_auto_attach(struct comedi_device *dev,
}
}
- dev_info(dev->class_dev, "%s attached (%d inputs/%d outputs)\n",
- dev->board_name, board->di_nchan, board->do_nchan);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index 363f2e42a27f..a29ceacb966d 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -84,7 +84,6 @@ TODO:
#define PCI9111_RANGE_SETTING_DELAY 10
#define PCI9111_AI_INSTANT_READ_UDELAY_US 2
-#define PCI9111_AI_INSTANT_READ_TIMEOUT 100
/*
* IO address map and bit defines
@@ -490,29 +489,18 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
dev->iobase + PCI9111_AI_RANGE_STAT_REG);
/* Set counter */
-
- switch (async_cmd->stop_src) {
- case TRIG_COUNT:
+ if (async_cmd->stop_src == TRIG_COUNT) {
dev_private->stop_counter =
async_cmd->stop_arg * async_cmd->chanlist_len;
dev_private->stop_is_none = 0;
- break;
-
- case TRIG_NONE:
+ } else { /* TRIG_NONE */
dev_private->stop_counter = 0;
dev_private->stop_is_none = 1;
- break;
-
- default:
- comedi_error(dev, "Invalid stop trigger");
- return -1;
}
/* Set timer pacer */
-
dev_private->scan_delay = 0;
- switch (async_cmd->convert_src) {
- case TRIG_TIMER:
+ if (async_cmd->convert_src == TRIG_TIMER) {
pci9111_trigger_source_set(dev, software);
pci9111_timer_set(dev);
pci9111_fifo_reset(dev);
@@ -528,11 +516,7 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
(async_cmd->convert_arg *
async_cmd->chanlist_len)) - 1;
}
-
- break;
-
- case TRIG_EXT:
-
+ } else { /* TRIG_EXT */
pci9111_trigger_source_set(dev, external);
pci9111_fifo_reset(dev);
pci9111_interrupt_source_set(dev, irq_on_fifo_half_full,
@@ -540,11 +524,6 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
plx9050_interrupt_control(dev_private->lcr_io_base, true, true,
false, true, true);
- break;
-
- default:
- comedi_error(dev, "Invalid convert trigger");
- return -1;
}
dev_private->stop_counter *= (1 + dev_private->scan_delay);
@@ -613,9 +592,8 @@ static irqreturn_t pci9111_interrupt(int irq, void *p_device)
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
comedi_error(dev, PCI9111_DRIVER_NAME " fifo overflow");
outb(0, dev->iobase + PCI9111_INT_CLR_REG);
- pci9111_ai_cancel(dev, s);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -693,20 +671,31 @@ static irqreturn_t pci9111_interrupt(int irq, void *p_device)
}
}
- if ((dev_private->stop_counter == 0) && (!dev_private->stop_is_none)) {
+ if (dev_private->stop_counter == 0 && !dev_private->stop_is_none)
async->events |= COMEDI_CB_EOA;
- pci9111_ai_cancel(dev, s);
- }
outb(0, dev->iobase + PCI9111_INT_CLR_REG);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
+static int pci9111_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + PCI9111_AI_RANGE_STAT_REG);
+ if (status & PCI9111_AI_STAT_FF_EF)
+ return 0;
+ return -EBUSY;
+}
+
static int pci9111_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -717,7 +706,7 @@ static int pci9111_ai_insn_read(struct comedi_device *dev,
unsigned int invert = (maxdata + 1) >> 1;
unsigned int shift = (maxdata == 0xffff) ? 0 : 4;
unsigned int status;
- int timeout;
+ int ret;
int i;
outb(chan, dev->iobase + PCI9111_AI_CHANNEL_REG);
@@ -734,22 +723,12 @@ static int pci9111_ai_insn_read(struct comedi_device *dev,
/* Generate a software trigger */
outb(0, dev->iobase + PCI9111_SOFT_TRIG_REG);
- timeout = PCI9111_AI_INSTANT_READ_TIMEOUT;
-
- while (timeout--) {
- status = inb(dev->iobase + PCI9111_AI_RANGE_STAT_REG);
- /* '1' means FIFO is not empty */
- if (status & PCI9111_AI_STAT_FF_EF)
- goto conversion_done;
+ ret = comedi_timeout(dev, s, insn, pci9111_ai_eoc, 0);
+ if (ret) {
+ pci9111_fifo_reset(dev);
+ return ret;
}
- comedi_error(dev, "A/D read timeout");
- data[i] = 0;
- pci9111_fifo_reset(dev);
- return -ETIME;
-
-conversion_done:
-
data[i] = inw(dev->iobase + PCI9111_AI_FIFO_REG);
data[i] = ((data[i] >> shift) & maxdata) ^ invert;
}
@@ -906,8 +885,6 @@ static int pci9111_auto_attach(struct comedi_device *dev,
s->range_table = &range_digital;
s->insn_bits = pci9111_do_insn_bits;
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index 4bdd9720e9eb..3cfa1756fa6a 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -571,12 +571,26 @@ static int setup_channel_list(struct comedi_device *dev,
return 1; /* we can serve this with scan logic */
}
+static int pci9118_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inl(dev->iobase + PCI9118_ADSTAT);
+ if (status & AdStatus_ADrdy)
+ return 0;
+ return -EBUSY;
+}
+
static int pci9118_insn_read_ai(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct pci9118_private *devpriv = dev->private;
- int n, timeout;
+ int ret;
+ int n;
devpriv->AdControlReg = AdControl_Int & 0xff;
devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg;
@@ -597,19 +611,13 @@ static int pci9118_insn_read_ai(struct comedi_device *dev,
for (n = 0; n < insn->n; n++) {
outw(0, dev->iobase + PCI9118_SOFTTRG); /* start conversion */
udelay(2);
- timeout = 100;
- while (timeout--) {
- if (inl(dev->iobase + PCI9118_ADSTAT) & AdStatus_ADrdy)
- goto conv_finish;
- udelay(1);
- }
- comedi_error(dev, "A/D insn timeout");
- data[n] = 0;
- outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
- return -ETIME;
+ ret = comedi_timeout(dev, s, insn, pci9118_ai_eoc, 0);
+ if (ret) {
+ outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
+ return ret;
+ }
-conv_finish:
if (devpriv->ai16bits) {
data[n] =
(inl(dev->iobase +
@@ -911,8 +919,7 @@ static char pci9118_decode_error_status(struct comedi_device *dev,
}
if (m & devpriv->ai_maskharderr) {
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- pci9118_ai_cancel(dev, s);
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return 1;
}
@@ -948,8 +955,6 @@ static void interrupt_pci9118_ai_onesample(struct comedi_device *dev,
struct pci9118_private *devpriv = dev->private;
unsigned short sampl;
- s->async->events = 0;
-
if (int_adstat & devpriv->ai_maskerr)
if (pci9118_decode_error_status(dev, s, int_adstat))
return;
@@ -965,8 +970,7 @@ static void interrupt_pci9118_ai_onesample(struct comedi_device *dev,
sampl & 0x000f,
devpriv->chanlist[s->async->cur_chan]);
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- pci9118_ai_cancel(dev, s);
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
}
@@ -977,16 +981,14 @@ static void interrupt_pci9118_ai_onesample(struct comedi_device *dev,
/* one scan done */
s->async->cur_chan %= devpriv->ai_n_scanlen;
devpriv->ai_act_scan++;
- if (!(devpriv->ai_neverending))
- if (devpriv->ai_act_scan >= devpriv->ai_scans) {
- /* all data sampled */
- pci9118_ai_cancel(dev, s);
+ if (!devpriv->ai_neverending) {
+ /* all data sampled? */
+ if (devpriv->ai_act_scan >= devpriv->ai_scans)
s->async->events |= COMEDI_CB_EOA;
- }
+ }
}
- if (s->async->events)
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
@@ -1001,16 +1003,14 @@ static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
if (int_amcc & MASTER_ABORT_INT) {
comedi_error(dev, "AMCC IRQ - MASTER DMA ABORT!");
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- pci9118_ai_cancel(dev, s);
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
if (int_amcc & TARGET_ABORT_INT) {
comedi_error(dev, "AMCC IRQ - TARGET DMA ABORT!");
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- pci9118_ai_cancel(dev, s);
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
if (int_adstat & devpriv->ai_maskerr)
@@ -1048,12 +1048,11 @@ static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
m = m - sampls; /* m= how many samples was transferred */
}
- if (!devpriv->ai_neverending)
- if (devpriv->ai_act_scan >= devpriv->ai_scans) {
- /* all data sampled */
- pci9118_ai_cancel(dev, s);
+ if (!devpriv->ai_neverending) {
+ /* all data sampled? */
+ if (devpriv->ai_act_scan >= devpriv->ai_scans)
s->async->events |= COMEDI_CB_EOA;
- }
+ }
if (devpriv->dma_doublebuf) { /* switch dma buffers */
devpriv->dma_actbuf = 1 - devpriv->dma_actbuf;
@@ -1066,7 +1065,7 @@ static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
interrupt_pci9118_ai_mode4_switch(dev);
}
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
static irqreturn_t interrupt_pci9118(int irq, void *d)
@@ -1936,28 +1935,6 @@ static struct pci_dev *pci9118_find_pci(struct comedi_device *dev,
return NULL;
}
-static void pci9118_report_attach(struct comedi_device *dev, unsigned int irq)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct pci9118_private *devpriv = dev->private;
- char irqbuf[30];
- char muxbuf[30];
-
- if (irq)
- snprintf(irqbuf, sizeof(irqbuf), "irq %u%s", irq,
- (dev->irq ? "" : " UNAVAILABLE"));
- else
- snprintf(irqbuf, sizeof(irqbuf), "irq DISABLED");
- if (devpriv->usemux)
- snprintf(muxbuf, sizeof(muxbuf), "ext mux %u chans",
- devpriv->usemux);
- else
- snprintf(muxbuf, sizeof(muxbuf), "no ext mux");
- dev_info(dev->class_dev, "%s (pci %s, %s, %sbus master, %s) attached\n",
- dev->board_name, pci_name(pcidev), irqbuf,
- (devpriv->master ? "" : "no "), muxbuf);
-}
-
static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
int master, int ext_mux, int softsshdelay,
int hw_err_mask)
@@ -2113,7 +2090,6 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
break;
}
- pci9118_report_attach(dev, dev->irq);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/adq12b.c b/drivers/staging/comedi/drivers/adq12b.c
index 3190ef7d285e..b4ea37704eaf 100644
--- a/drivers/staging/comedi/drivers/adq12b.c
+++ b/drivers/staging/comedi/drivers/adq12b.c
@@ -94,8 +94,6 @@ If you do not specify any options, they will default to
/* mask of the bit at STINR to check end of conversion */
#define ADQ12B_EOC 0x20
-#define TIMEOUT 20
-
/* available ranges through the PGA gains */
static const struct comedi_lrange range_adq12b_ai_bipolar = {
4, {
@@ -122,19 +120,28 @@ struct adq12b_private {
int last_range;
};
-/*
- * "instructions" read/write data in "one-shot" or "software-triggered"
- * mode.
- */
+static int adq12b_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned char status;
+
+ status = inb(dev->iobase + ADQ12B_STINR);
+ if (status & ADQ12B_EOC)
+ return 0;
+ return -EBUSY;
+}
static int adq12b_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
struct adq12b_private *devpriv = dev->private;
- int n, i;
+ int n;
int range, channel;
unsigned char hi, lo, status;
+ int ret;
/* change channel and range only if it is different from the previous */
range = CR_RANGE(insn->chanspec);
@@ -151,13 +158,9 @@ static int adq12b_ai_rinsn(struct comedi_device *dev,
for (n = 0; n < insn->n; n++) {
/* wait for end of conversion */
- i = 0;
- do {
- /* udelay(1); */
- status = inb(dev->iobase + ADQ12B_STINR);
- status = status & ADQ12B_EOC;
- } while (status == 0 && ++i < TIMEOUT);
- /* } while (++i < 10); */
+ ret = comedi_timeout(dev, s, insn, adq12b_ai_eoc, 0);
+ if (ret)
+ return ret;
/* read data */
hi = inb(dev->iobase + ADQ12B_ADHIG);
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index d9ad2c0fdda2..28ec48548317 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -429,15 +429,26 @@ static void setup_channel_list(struct comedi_device *dev,
outw(devpriv->ai_et_MuxVal, dev->iobase + PCI171x_MUX);
}
-/*
-==============================================================================
-*/
+static int pci171x_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + PCI171x_STATUS);
+ if ((status & Status_FE) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int pci171x_insn_read_ai(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
- int n, timeout;
+ int ret;
+ int n;
#ifdef PCI171x_PARANOIDCHECK
const struct boardtype *this_board = comedi_board(dev);
unsigned int idata;
@@ -453,19 +464,14 @@ static int pci171x_insn_read_ai(struct comedi_device *dev,
for (n = 0; n < insn->n; n++) {
outw(0, dev->iobase + PCI171x_SOFTTRG); /* start conversion */
- /* udelay(1); */
- timeout = 100;
- while (timeout--) {
- if (!(inw(dev->iobase + PCI171x_STATUS) & Status_FE))
- goto conv_finish;
+
+ ret = comedi_timeout(dev, s, insn, pci171x_ai_eoc, 0);
+ if (ret) {
+ outb(0, dev->iobase + PCI171x_CLRFIFO);
+ outb(0, dev->iobase + PCI171x_CLRINT);
+ return ret;
}
- comedi_error(dev, "A/D insn timeout");
- outb(0, dev->iobase + PCI171x_CLRFIFO);
- outb(0, dev->iobase + PCI171x_CLRINT);
- data[n] = 0;
- return -ETIME;
-conv_finish:
#ifdef PCI171x_PARANOIDCHECK
idata = inw(dev->iobase + PCI171x_AD_DATA);
if (this_board->cardtype != TYPE_PCI1713)
@@ -753,17 +759,15 @@ static void interrupt_pci1710_every_sample(void *d)
m = inw(dev->iobase + PCI171x_STATUS);
if (m & Status_FE) {
dev_dbg(dev->class_dev, "A/D FIFO empty (%4x)\n", m);
- pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
if (m & Status_FF) {
dev_dbg(dev->class_dev,
"A/D FIFO Full status (Fatal Error!) (%4x)\n", m);
- pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
@@ -782,10 +786,9 @@ static void interrupt_pci1710_every_sample(void *d)
act_chanlist[s->
async->cur_chan] & 0xf000) >>
12);
- pci171x_ai_cancel(dev, s);
s->async->events |=
COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
comedi_buf_put(s->async, sampl & 0x0fff);
@@ -804,9 +807,8 @@ static void interrupt_pci1710_every_sample(void *d)
if ((!devpriv->neverending_ai) &&
(devpriv->ai_act_scan >= devpriv->ai_scans)) {
/* all data sampled */
- pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
}
@@ -814,7 +816,7 @@ static void interrupt_pci1710_every_sample(void *d)
outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
/*
@@ -842,10 +844,9 @@ static int move_block_from_fifo(struct comedi_device *dev,
(devpriv->act_chanlist[j] & 0xf000) >> 12,
i, j, devpriv->ai_act_scan, n, turn,
sampl);
- pci171x_ai_cancel(dev, s);
s->async->events |=
COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return 1;
}
comedi_buf_put(s->async, sampl & 0x0fff);
@@ -877,17 +878,15 @@ static void interrupt_pci1710_half_fifo(void *d)
m = inw(dev->iobase + PCI171x_STATUS);
if (!(m & Status_FH)) {
dev_dbg(dev->class_dev, "A/D FIFO not half full! (%4x)\n", m);
- pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
if (m & Status_FF) {
dev_dbg(dev->class_dev,
"A/D FIFO Full status (Fatal Error!) (%4x)\n", m);
- pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
@@ -907,14 +906,13 @@ static void interrupt_pci1710_half_fifo(void *d)
if (!devpriv->neverending_ai)
if (devpriv->ai_act_scan >= devpriv->ai_scans) { /* all data
sampled */
- pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
/*
@@ -1354,9 +1352,6 @@ static int pci1710_auto_attach(struct comedi_device *dev,
subdev++;
}
- dev_info(dev->class_dev, "%s attached, irq %sabled\n",
- dev->board_name, dev->irq ? "en" : "dis");
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index 72394267ddfe..07b107d1ab33 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -280,8 +280,6 @@ static int pci1723_auto_attach(struct comedi_device *dev,
pci1723_reset(dev);
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index d4bd61d84daf..2d966a87f2e8 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -1130,10 +1130,12 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
for (i = 0; i < MAX_DIO_SUBDEVG; i++)
for (j = 0; j < this_board->sdio[i].regs; j++) {
s = &dev->subdevices[subdev];
- subdev_8255_init(dev, s, NULL,
- dev->iobase +
- this_board->sdio[i].addr +
- SIZE_8255 * j);
+ ret = subdev_8255_init(dev, s, NULL,
+ dev->iobase +
+ this_board->sdio[i].addr +
+ SIZE_8255 * j);
+ if (ret)
+ return ret;
subdev++;
}
diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c
index 68c3fb3226ca..324746b14931 100644
--- a/drivers/staging/comedi/drivers/aio_aio12_8.c
+++ b/drivers/staging/comedi/drivers/aio_aio12_8.c
@@ -101,14 +101,27 @@ struct aio12_8_private {
unsigned int ao_readback[4];
};
+static int aio_aio12_8_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + AIO12_8_STATUS_REG);
+ if (status & AIO12_8_STATUS_ADC_EOC)
+ return 0;
+ return -EBUSY;
+}
+
static int aio_aio12_8_ai_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int val;
unsigned char control;
+ int ret;
int n;
/*
@@ -122,20 +135,13 @@ static int aio_aio12_8_ai_read(struct comedi_device *dev,
inb(dev->iobase + AIO12_8_STATUS_REG);
for (n = 0; n < insn->n; n++) {
- int timeout = 5;
-
/* Setup and start conversion */
outb(control, dev->iobase + AIO12_8_ADC_REG);
/* Wait for conversion to complete */
- do {
- val = inb(dev->iobase + AIO12_8_STATUS_REG);
- timeout--;
- if (timeout == 0) {
- dev_err(dev->class_dev, "ADC timeout\n");
- return -ETIMEDOUT;
- }
- } while (!(val & AIO12_8_STATUS_ADC_EOC));
+ ret = comedi_timeout(dev, s, insn, aio_aio12_8_ai_eoc, 0);
+ if (ret)
+ return ret;
data[n] = inw(dev->iobase + AIO12_8_ADC_REG) & s->maxdata;
}
@@ -247,9 +253,6 @@ static int aio_aio12_8_attach(struct comedi_device *dev,
/* 8254 counter/timer subdevice */
s->type = COMEDI_SUBD_UNUSED;
- dev_info(dev->class_dev, "%s: %s attached\n",
- dev->driver->driver_name, dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/aio_iiro_16.c b/drivers/staging/comedi/drivers/aio_iiro_16.c
index 22b3dda135ff..781104aa533e 100644
--- a/drivers/staging/comedi/drivers/aio_iiro_16.c
+++ b/drivers/staging/comedi/drivers/aio_iiro_16.c
@@ -98,7 +98,7 @@ static int aio_iiro_16_attach(struct comedi_device *dev,
s->range_table = &range_digital;
s->insn_bits = aio_iiro_16_dio_insn_bits_read;
- return 1;
+ return 0;
}
static struct comedi_driver aio_iiro_16_driver = {
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index 9c9559ffc643..818a0d7e3d58 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -1208,7 +1208,7 @@ int amplc_dio200_common_attach(struct comedi_device *dev, unsigned int irq,
"warning! irq %u unavailable!\n", irq);
}
}
- dev_info(dev->class_dev, "attached\n");
+
return 0;
}
EXPORT_SYMBOL_GPL(amplc_dio200_common_attach);
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index 31734e1142fd..b21d7b48f1da 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -368,32 +368,6 @@ static irqreturn_t pc236_interrupt(int irq, void *d)
return IRQ_RETVAL(handled);
}
-static void pc236_report_attach(struct comedi_device *dev, unsigned int irq)
-{
- const struct pc236_board *thisboard = comedi_board(dev);
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- char tmpbuf[60];
- int tmplen;
-
- if (is_isa_board(thisboard))
- tmplen = scnprintf(tmpbuf, sizeof(tmpbuf),
- "(base %#lx) ", dev->iobase);
- else if (is_pci_board(thisboard))
- tmplen = scnprintf(tmpbuf, sizeof(tmpbuf),
- "(pci %s) ", pci_name(pcidev));
- else
- tmplen = 0;
- if (irq)
- tmplen += scnprintf(&tmpbuf[tmplen], sizeof(tmpbuf) - tmplen,
- "(irq %u%s) ", irq,
- (dev->irq ? "" : " UNAVAILABLE"));
- else
- tmplen += scnprintf(&tmpbuf[tmplen], sizeof(tmpbuf) - tmplen,
- "(no irq) ");
- dev_info(dev->class_dev, "%s %sattached\n",
- dev->board_name, tmpbuf);
-}
-
static int pc236_common_attach(struct comedi_device *dev, unsigned long iobase,
unsigned int irq, unsigned long req_irq_flags)
{
@@ -411,10 +385,9 @@ static int pc236_common_attach(struct comedi_device *dev, unsigned long iobase,
s = &dev->subdevices[0];
/* digital i/o subdevice (8255) */
ret = subdev_8255_init(dev, s, NULL, iobase);
- if (ret < 0) {
- dev_err(dev->class_dev, "error! out of memory!\n");
+ if (ret)
return ret;
- }
+
s = &dev->subdevices[1];
dev->read_subdev = s;
s->type = COMEDI_SUBD_UNUSED;
@@ -434,8 +407,8 @@ static int pc236_common_attach(struct comedi_device *dev, unsigned long iobase,
s->cancel = pc236_intr_cancel;
}
}
- pc236_report_attach(dev, irq);
- return 1;
+
+ return 0;
}
static int pc236_pci_common_attach(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index 5b4b5ab34e2e..7c10d28d2784 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -94,8 +94,6 @@ static int pc263_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* read initial relay state */
s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8);
- dev_info(dev->class_dev, "%s (base %#lx) attached\n", dev->board_name,
- dev->iobase);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index ae4048a624fa..29e01e280039 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -533,9 +533,8 @@ static void pci224_ao_start(struct comedi_device *dev,
set_bit(AO_CMD_STARTED, &devpriv->state);
if (!devpriv->ao_stop_continuous && devpriv->ao_stop_count == 0) {
/* An empty acquisition! */
- pci224_ao_stop(dev, s);
s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
} else {
/* Enable interrupts. */
spin_lock_irqsave(&devpriv->ao_spinlock, flags);
@@ -585,9 +584,8 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
room = PCI224_FIFO_ROOM_EMPTY;
if (!devpriv->ao_stop_continuous && devpriv->ao_stop_count == 0) {
/* FIFO empty at end of counted acquisition. */
- pci224_ao_stop(dev, s);
s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
break;
@@ -605,9 +603,8 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
/* FIFO is less than half-full. */
if (num_scans == 0) {
/* Nothing left to put in the FIFO. */
- pci224_ao_stop(dev, s);
- s->async->events |= COMEDI_CB_OVERFLOW;
dev_err(dev->class_dev, "AO buffer underrun\n");
+ s->async->events |= COMEDI_CB_OVERFLOW;
}
}
/* Determine how many new scans can be put in the FIFO. */
@@ -670,9 +667,8 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
PCI224_DACCON_TRIG_MASK);
outw(devpriv->daccon, dev->iobase + PCI224_DACCON);
}
- if (s->async->events)
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
/*
@@ -1237,20 +1233,6 @@ static struct pci_dev *pci224_find_pci_dev(struct comedi_device *dev,
return NULL;
}
-static void pci224_report_attach(struct comedi_device *dev, unsigned int irq)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- char tmpbuf[30];
-
- if (irq)
- snprintf(tmpbuf, sizeof(tmpbuf), "irq %u%s", irq,
- (dev->irq ? "" : " UNAVAILABLE"));
- else
- snprintf(tmpbuf, sizeof(tmpbuf), "no irq");
- dev_info(dev->class_dev, "%s (pci %s) (%s) attached\n",
- dev->board_name, pci_name(pcidev), tmpbuf);
-}
-
/*
* Common part of attach and auto_attach.
*/
@@ -1399,8 +1381,7 @@ static int pci224_attach_common(struct comedi_device *dev,
}
}
- pci224_report_attach(dev, irq);
- return 1;
+ return 0;
}
static int pci224_attach(struct comedi_device *dev, struct comedi_devconfig *it)
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index c08dfbbe4062..99e60835dc4a 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -799,19 +799,29 @@ static void pci230_cancel_ct(struct comedi_device *dev, unsigned int ct)
/* Counter ct, 8254 mode 1, initial count not written. */
}
-/*
- * COMEDI_SUBD_AI instruction;
- */
+static int pci230_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + PCI230_ADCCON);
+ if ((status & PCI230_ADC_FIFO_EMPTY) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int pci230_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
struct pci230_private *devpriv = dev->private;
- unsigned int n, i;
+ unsigned int n;
unsigned int chan, range, aref;
unsigned int gainshift;
- unsigned int status;
unsigned short adccon, adcen;
+ int ret;
/* Unpack channel and range. */
chan = CR_CHAN(insn->chanspec);
@@ -883,18 +893,10 @@ static int pci230_ai_rinsn(struct comedi_device *dev,
i8254_set_mode(devpriv->iobase1 + PCI230_Z2_CT_BASE, 0, 2,
I8254_MODE1);
-#define TIMEOUT 100
/* wait for conversion to end */
- for (i = 0; i < TIMEOUT; i++) {
- status = inw(dev->iobase + PCI230_ADCCON);
- if (!(status & PCI230_ADC_FIFO_EMPTY))
- break;
- udelay(1);
- }
- if (i == TIMEOUT) {
- dev_err(dev->class_dev, "timeout\n");
- return -ETIMEDOUT;
- }
+ ret = comedi_timeout(dev, s, insn, pci230_ai_eoc, 0);
+ if (ret)
+ return ret;
/* read data */
data[n] = pci230_ai_read(dev);
@@ -2762,14 +2764,14 @@ static int pci230_attach_common(struct comedi_device *dev,
/* digital i/o subdevice */
if (thisboard->have_dio) {
rc = subdev_8255_init(dev, s, NULL,
- (devpriv->iobase1 + PCI230_PPI_X_BASE));
- if (rc < 0)
+ devpriv->iobase1 + PCI230_PPI_X_BASE);
+ if (rc)
return rc;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
- dev_info(dev->class_dev, "attached\n");
- return 1;
+
+ return 0;
}
static int pci230_attach(struct comedi_device *dev, struct comedi_devconfig *it)
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index be28e6cbea20..93ed03ee416a 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -84,8 +84,6 @@ static int pci263_auto_attach(struct comedi_device *dev,
/* read initial relay state */
s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8);
- dev_info(dev->class_dev, "%s (pci %s) attached\n", dev->board_name,
- pci_name(pci_dev));
return 0;
}
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index 5034f663eec9..e03dd6e71415 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -1,34 +1,33 @@
/*
- comedi/drivers/c6xdigio.c
-
- Hardware driver for Mechatronic Systems Inc. C6x_DIGIO DSP daughter card.
- (http://robot0.ge.uiuc.edu/~spong/mecha/)
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1999 Dan Block
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * c6xdigio.c
+ * Hardware driver for Mechatronic Systems Inc. C6x_DIGIO DSP daughter card.
+ * http://web.archive.org/web/%2A/http://robot0.ge.uiuc.edu/~spong/mecha/
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1999 Dan Block
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
-/*
-Driver: c6xdigio
-Description: Mechatronic Systems Inc. C6x_DIGIO DSP daughter card
-Author: Dan Block
-Status: unknown
-Devices: [Mechatronic Systems Inc.] C6x_DIGIO DSP daughter card (c6xdigio)
-Updated: Sun Nov 20 20:18:34 EST 2005
-
-This driver will not work with a 2.4 kernel.
-http://robot0.ge.uiuc.edu/~spong/mecha/
-*/
+/*
+ * Driver: c6xdigio
+ * Description: Mechatronic Systems Inc. C6x_DIGIO DSP daughter card
+ * Author: Dan Block
+ * Status: unknown
+ * Devices: (Mechatronic Systems Inc.) C6x_DIGIO DSP daughter card [c6xdigio]
+ * Updated: Sun Nov 20 20:18:34 EST 2005
+ *
+ * Configuration Options:
+ * [0] - base address
+ */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -43,309 +42,194 @@ http://robot0.ge.uiuc.edu/~spong/mecha/
#include "../comedidev.h"
-static u8 ReadByteFromHwPort(unsigned long addr)
-{
- u8 result = inb(addr);
- return result;
-}
-
-static void WriteByteToHwPort(unsigned long addr, u8 val)
-{
- outb_p(val, addr);
-}
-
-#define C6XDIGIO_SIZE 3
-
/*
- * port offsets
+ * Register I/O map
*/
-#define C6XDIGIO_PARALLEL_DATA 0
-#define C6XDIGIO_PARALLEL_STATUS 1
-#define C6XDIGIO_PARALLEL_CONTROL 2
-struct pwmbitstype {
- unsigned sb0:2;
- unsigned sb1:2;
- unsigned sb2:2;
- unsigned sb3:2;
- unsigned sb4:2;
-};
-union pwmcmdtype {
- unsigned cmd; /* assuming here that int is 32bit */
- struct pwmbitstype bits;
-};
-struct encbitstype {
- unsigned sb0:3;
- unsigned sb1:3;
- unsigned sb2:3;
- unsigned sb3:3;
- unsigned sb4:3;
- unsigned sb5:3;
- unsigned sb6:3;
- unsigned sb7:3;
-};
-union encvaluetype {
- unsigned value;
- struct encbitstype bits;
-};
+#define C6XDIGIO_DATA_REG 0x00
+#define C6XDIGIO_DATA_CHAN(x) (((x) + 1) << 4)
+#define C6XDIGIO_DATA_PWM (1 << 5)
+#define C6XDIGIO_DATA_ENCODER (1 << 6)
+#define C6XDIGIO_STATUS_REG 0x01
+#define C6XDIGIO_CTRL_REG 0x02
#define C6XDIGIO_TIME_OUT 20
-static void C6X_pwmInit(unsigned long baseAddr)
+static int c6xdigio_chk_status(struct comedi_device *dev, unsigned long context)
{
+ unsigned int status;
int timeout = 0;
- WriteByteToHwPort(baseAddr, 0x70);
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0)
- && (timeout < C6XDIGIO_TIME_OUT)) {
+ do {
+ status = inb(dev->iobase + C6XDIGIO_STATUS_REG);
+ if ((status & 0x80) != context)
+ return 0;
timeout++;
- }
+ } while (timeout < C6XDIGIO_TIME_OUT);
- WriteByteToHwPort(baseAddr, 0x74);
- timeout = 0;
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x80)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
- }
+ return -EBUSY;
+}
- WriteByteToHwPort(baseAddr, 0x70);
- timeout = 0;
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x0)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
- }
+static int c6xdigio_write_data(struct comedi_device *dev,
+ unsigned int val, unsigned int status)
+{
+ outb_p(val, dev->iobase + C6XDIGIO_DATA_REG);
+ return c6xdigio_chk_status(dev, status);
+}
- WriteByteToHwPort(baseAddr, 0x0);
- timeout = 0;
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x80)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
- }
+static int c6xdigio_get_encoder_bits(struct comedi_device *dev,
+ unsigned int *bits,
+ unsigned int cmd,
+ unsigned int status)
+{
+ unsigned int val;
+
+ val = inb(dev->iobase + C6XDIGIO_STATUS_REG);
+ val >>= 3;
+ val &= 0x07;
+
+ *bits = val;
+ return c6xdigio_write_data(dev, cmd, status);
}
-static void C6X_pwmOutput(unsigned long baseAddr, unsigned channel, int value)
+static void c6xdigio_pwm_write(struct comedi_device *dev,
+ unsigned int chan, unsigned int val)
{
- unsigned ppcmd;
- union pwmcmdtype pwm;
- int timeout = 0;
- unsigned tmp;
-
- pwm.cmd = value;
- if (pwm.cmd > 498)
- pwm.cmd = 498;
- if (pwm.cmd < 2)
- pwm.cmd = 2;
-
- if (channel == 0) {
- ppcmd = 0x28;
- } else { /* if channel == 1 */
- ppcmd = 0x30;
- } /* endif */
-
- WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb0);
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ unsigned int cmd = C6XDIGIO_DATA_PWM | C6XDIGIO_DATA_CHAN(chan);
+ unsigned int bits;
+
+ if (val > 498)
+ val = 498;
+ if (val < 2)
+ val = 2;
+
+ bits = (val >> 0) & 0x03;
+ c6xdigio_write_data(dev, cmd | bits | (0 << 2), 0x00);
+ bits = (val >> 2) & 0x03;
+ c6xdigio_write_data(dev, cmd | bits | (1 << 2), 0x80);
+ bits = (val >> 4) & 0x03;
+ c6xdigio_write_data(dev, cmd | bits | (0 << 2), 0x00);
+ bits = (val >> 6) & 0x03;
+ c6xdigio_write_data(dev, cmd | bits | (1 << 2), 0x80);
+ bits = (val >> 8) & 0x03;
+ c6xdigio_write_data(dev, cmd | bits | (0 << 2), 0x00);
+
+ c6xdigio_write_data(dev, 0x00, 0x80);
+}
- WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb1 + 0x4);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+static int c6xdigio_encoder_read(struct comedi_device *dev,
+ unsigned int chan)
+{
+ unsigned int cmd = C6XDIGIO_DATA_ENCODER | C6XDIGIO_DATA_CHAN(chan);
+ unsigned int val = 0;
+ unsigned int bits;
- WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb2);
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_write_data(dev, cmd, 0x00);
- WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb3 + 0x4);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (1 << 2), 0x80);
+ val |= (bits << 0);
- WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb4);
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (0 << 2), 0x00);
+ val |= (bits << 3);
- WriteByteToHwPort(baseAddr, 0x0);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (1 << 2), 0x80);
+ val |= (bits << 6);
-}
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (0 << 2), 0x00);
+ val |= (bits << 9);
-static int C6X_encInput(unsigned long baseAddr, unsigned channel)
-{
- unsigned ppcmd;
- union encvaluetype enc;
- int timeout = 0;
- int tmp;
-
- enc.value = 0;
- if (channel == 0)
- ppcmd = 0x48;
- else
- ppcmd = 0x50;
-
- WriteByteToHwPort(baseAddr, ppcmd);
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (1 << 2), 0x80);
+ val |= (bits << 12);
- enc.bits.sb0 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd + 0x4);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb1 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb2 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd + 0x4);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb3 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb4 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd + 0x4);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb5 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb6 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd + 0x4);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb7 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (0 << 2), 0x00);
+ val |= (bits << 15);
- WriteByteToHwPort(baseAddr, 0x0);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (1 << 2), 0x80);
+ val |= (bits << 18);
+
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (0 << 2), 0x00);
+ val |= (bits << 21);
+
+ c6xdigio_write_data(dev, 0x00, 0x80);
- return enc.value ^ 0x800000;
+ return val;
}
-static void C6X_encResetAll(unsigned long baseAddr)
+static int c6xdigio_pwm_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned timeout = 0;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val = (s->state >> (16 * chan)) & 0xffff;
+ int i;
- WriteByteToHwPort(baseAddr, 0x68);
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
- }
- WriteByteToHwPort(baseAddr, 0x6C);
- timeout = 0;
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x80)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
- }
- WriteByteToHwPort(baseAddr, 0x68);
- timeout = 0;
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x0)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
- }
- WriteByteToHwPort(baseAddr, 0x0);
- timeout = 0;
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x80)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+ c6xdigio_pwm_write(dev, chan, val);
}
+
+ /*
+ * There are only 2 PWM channels and they have a maxdata of 500.
+ * Instead of allocating private data to save the values in for
+ * readback this driver just packs the values for the two channels
+ * in the s->state.
+ */
+ s->state &= (0xffff << (16 * chan));
+ s->state |= (val << (16 * chan));
+
+ return insn->n;
}
-static int c6xdigio_pwmo_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int c6xdigio_pwm_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
int i;
- int chan = CR_CHAN(insn->chanspec);
- for (i = 0; i < insn->n; i++) {
- C6X_pwmOutput(dev->iobase, chan, data[i]);
- /* devpriv->ao_readback[chan] = data[i]; */
- }
- return i;
+ val = (s->state >> (16 * chan)) & 0xffff;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = val;
+
+ return insn->n;
}
-static int c6xdigio_ei_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int c6xdigio_encoder_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int n;
- int chan = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
+ int i;
- for (n = 0; n < insn->n; n++)
- data[n] = (C6X_encInput(dev->iobase, chan) & 0xffffff);
+ for (i = 0; i < insn->n; i++) {
+ val = c6xdigio_encoder_read(dev, chan);
+
+ /* munge two's complement value to offset binary */
+ data[i] = comedi_offset_munge(s, val);
+ }
- return n;
+ return insn->n;
}
-static void board_init(struct comedi_device *dev)
+static void c6xdigio_init(struct comedi_device *dev)
{
- C6X_pwmInit(dev->iobase);
- C6X_encResetAll(dev->iobase);
+ /* Initialize the PWM */
+ c6xdigio_write_data(dev, 0x70, 0x00);
+ c6xdigio_write_data(dev, 0x74, 0x80);
+ c6xdigio_write_data(dev, 0x70, 0x00);
+ c6xdigio_write_data(dev, 0x00, 0x80);
+
+ /* Reset the encoders */
+ c6xdigio_write_data(dev, 0x68, 0x00);
+ c6xdigio_write_data(dev, 0x6c, 0x80);
+ c6xdigio_write_data(dev, 0x68, 0x00);
+ c6xdigio_write_data(dev, 0x00, 0x80);
}
static const struct pnp_device_id c6xdigio_pnp_tbl[] = {
@@ -367,7 +251,7 @@ static int c6xdigio_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- ret = comedi_request_region(dev, it->options[0], C6XDIGIO_SIZE);
+ ret = comedi_request_region(dev, it->options[0], 0x03);
if (ret)
return ret;
@@ -380,27 +264,26 @@ static int c6xdigio_attach(struct comedi_device *dev,
s = &dev->subdevices[0];
/* pwm output subdevice */
- s->type = COMEDI_SUBD_AO; /* Not sure what to put here */
- s->subdev_flags = SDF_WRITEABLE;
- s->n_chan = 2;
- /* s->trig[0] = c6xdigio_pwmo; */
- s->insn_write = c6xdigio_pwmo_insn_write;
- s->maxdata = 500;
- s->range_table = &range_bipolar10; /* A suitable lie */
+ s->type = COMEDI_SUBD_PWM;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 2;
+ s->maxdata = 500;
+ s->range_table = &range_unknown;
+ s->insn_write = c6xdigio_pwm_insn_write;
+ s->insn_read = c6xdigio_pwm_insn_read;
s = &dev->subdevices[1];
/* encoder (counter) subdevice */
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
- s->n_chan = 2;
- /* s->trig[0] = c6xdigio_ei; */
- s->insn_read = c6xdigio_ei_insn_read;
- s->maxdata = 0xffffff;
- s->range_table = &range_unknown;
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
+ s->n_chan = 2;
+ s->maxdata = 0xffffff;
+ s->range_table = &range_unknown;
+ s->insn_read = c6xdigio_encoder_insn_read;
/* I will call this init anyway but more than likely the DSP board */
/* will not be connected when device driver is loaded. */
- board_init(dev);
+ c6xdigio_init(dev);
return 0;
}
@@ -420,5 +303,5 @@ static struct comedi_driver c6xdigio_driver = {
module_comedi_driver(c6xdigio_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for the C6x_DIGIO DSP daughter card");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 64d5f291553f..645fcb0cf17d 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -95,10 +95,17 @@ static const struct comedi_lrange das16cs_ai_range = {
}
};
-static irqreturn_t das16cs_interrupt(int irq, void *d)
+static int das16cs_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- /* struct comedi_device *dev = d; */
- return IRQ_HANDLED;
+ unsigned int status;
+
+ status = inw(dev->iobase + DAS16CS_MISC1);
+ if (status & 0x0080)
+ return 0;
+ return -EBUSY;
}
static int das16cs_ai_rinsn(struct comedi_device *dev,
@@ -109,8 +116,8 @@ static int das16cs_ai_rinsn(struct comedi_device *dev,
int chan = CR_CHAN(insn->chanspec);
int range = CR_RANGE(insn->chanspec);
int aref = CR_AREF(insn->chanspec);
+ int ret;
int i;
- int to;
outw(chan, dev->iobase + DAS16CS_DIO_MUX);
@@ -138,132 +145,16 @@ static int das16cs_ai_rinsn(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
outw(0, dev->iobase + DAS16CS_ADC_DATA);
-#define TIMEOUT 1000
- for (to = 0; to < TIMEOUT; to++) {
- if (inw(dev->iobase + DAS16CS_MISC1) & 0x0080)
- break;
- }
- if (to == TIMEOUT) {
- dev_dbg(dev->class_dev, "cb_das16_cs: ai timeout\n");
- return -ETIME;
- }
+ ret = comedi_timeout(dev, s, insn, das16cs_ai_eoc, 0);
+ if (ret)
+ return ret;
+
data[i] = inw(dev->iobase + DAS16CS_ADC_DATA);
}
return i;
}
-static int das16cs_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- return -EINVAL;
-}
-
-static int das16cs_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
- int tmp;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= cfc_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_EXT);
- err |= cfc_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT);
- err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
- err |= cfc_check_trigger_is_unique(cmd->convert_src);
- err |= cfc_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
-
-#define MAX_SPEED 10000 /* in nanoseconds */
-#define MIN_SPEED 1000000000 /* in nanoseconds */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
- MAX_SPEED);
- err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg,
- MIN_SPEED);
- } else {
- /* external trigger */
- /* should be level/edge, hi/lo specification here */
- /* should specify multiple external triggers */
- err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 9);
- }
- if (cmd->convert_src == TRIG_TIMER) {
- err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
- MAX_SPEED);
- err |= cfc_check_trigger_arg_max(&cmd->convert_arg,
- MIN_SPEED);
- } else {
- /* external trigger */
- /* see above */
- err |= cfc_check_trigger_arg_max(&cmd->convert_arg, 9);
- }
-
- err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= cfc_check_trigger_arg_max(&cmd->stop_arg, 0x00ffffff);
- else /* TRIG_NONE */
- err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- unsigned int div1 = 0, div2 = 0;
-
- tmp = cmd->scan_begin_arg;
- i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
- &div1, &div2,
- &cmd->scan_begin_arg, cmd->flags);
- if (tmp != cmd->scan_begin_arg)
- err++;
- }
- if (cmd->convert_src == TRIG_TIMER) {
- unsigned int div1 = 0, div2 = 0;
-
- tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
- &div1, &div2,
- &cmd->scan_begin_arg, cmd->flags);
- if (tmp != cmd->convert_arg)
- err++;
- if (cmd->scan_begin_src == TRIG_TIMER &&
- cmd->scan_begin_arg <
- cmd->convert_arg * cmd->scan_end_arg) {
- cmd->scan_begin_arg =
- cmd->convert_arg * cmd->scan_end_arg;
- err++;
- }
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
static int das16cs_ao_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -401,10 +292,6 @@ static int das16cs_auto_attach(struct comedi_device *dev,
dev->iobase = link->resource[0]->start;
link->priv = dev;
- ret = pcmcia_request_irq(link, das16cs_interrupt);
- if (ret)
- return ret;
- dev->irq = link->irq;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -415,7 +302,6 @@ static int das16cs_auto_attach(struct comedi_device *dev,
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
/* analog input subdevice */
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ;
@@ -424,8 +310,6 @@ static int das16cs_auto_attach(struct comedi_device *dev,
s->range_table = &das16cs_ai_range;
s->len_chanlist = 16;
s->insn_read = das16cs_ai_rinsn;
- s->do_cmd = das16cs_ai_cmd;
- s->do_cmdtest = das16cs_ai_cmdtest;
s = &dev->subdevices[1];
/* analog output subdevice */
@@ -451,10 +335,6 @@ static int das16cs_auto_attach(struct comedi_device *dev,
s->insn_bits = das16cs_dio_insn_bits;
s->insn_config = das16cs_dio_insn_config;
- dev_info(dev->class_dev, "%s: %s, I/O base=0x%04lx, irq=%u\n",
- dev->driver->driver_name, dev->board_name,
- dev->iobase, dev->irq);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 9819be092f8d..83a265f3408c 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -376,6 +376,20 @@ static inline unsigned int cal_enable_bits(struct comedi_device *dev)
return CAL_EN_BIT | CAL_SRC_BITS(devpriv->calibration_source);
}
+static int cb_pcidas_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct cb_pcidas_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = inw(devpriv->control_status + ADCMUX_CONT);
+ if (status & EOC)
+ return 0;
+ return -EBUSY;
+}
+
static int cb_pcidas_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -385,7 +399,8 @@ static int cb_pcidas_ai_rinsn(struct comedi_device *dev,
unsigned int range = CR_RANGE(insn->chanspec);
unsigned int aref = CR_AREF(insn->chanspec);
unsigned int bits;
- int n, i;
+ int ret;
+ int n;
/* enable calibration input if appropriate */
if (insn->chanspec & CR_ALT_SOURCE) {
@@ -415,13 +430,9 @@ static int cb_pcidas_ai_rinsn(struct comedi_device *dev,
outw(0, devpriv->adc_fifo + ADCDATA);
/* wait for conversion to end */
- /* return -ETIMEDOUT if there is a timeout */
- for (i = 0; i < 10000; i++) {
- if (inw(devpriv->control_status + ADCMUX_CONT) & EOC)
- break;
- }
- if (i == 10000)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, cb_pcidas_ai_eoc, 0);
+ if (ret)
+ return ret;
/* read data */
data[n] = inw(devpriv->adc_fifo + ADCDATA);
@@ -1006,9 +1017,9 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
/* set start trigger and burst mode */
bits = 0;
- if (cmd->start_src == TRIG_NOW)
+ if (cmd->start_src == TRIG_NOW) {
bits |= SW_TRIGGER;
- else if (cmd->start_src == TRIG_EXT) {
+ } else { /* TRIG_EXT */
bits |= EXT_TRIGGER | TGEN | XTRCL;
if (thisboard->is_1602) {
if (cmd->start_arg & CR_INVERT)
@@ -1016,9 +1027,6 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
if (cmd->start_arg & CR_EDGE)
bits |= TGSEL;
}
- } else {
- comedi_error(dev, "bug!");
- return -1;
}
if (cmd->convert_src == TRIG_NOW && cmd->chanlist_len > 1)
bits |= BURSTE;
@@ -1269,8 +1277,6 @@ static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
unsigned int num_points;
unsigned long flags;
- async->events = 0;
-
if (status & DAEMI) {
/* clear dac empty interrupt latch */
spin_lock_irqsave(&dev->spinlock, flags);
@@ -1282,7 +1288,6 @@ static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
(cmd->stop_src == TRIG_COUNT
&& devpriv->ao_count)) {
comedi_error(dev, "dac fifo underflow");
- cb_pcidas_ao_cancel(dev, s);
async->events |= COMEDI_CB_ERROR;
}
async->events |= COMEDI_CB_EOA;
@@ -1312,7 +1317,7 @@ static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
spin_unlock_irqrestore(&dev->spinlock, flags);
}
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
@@ -1332,7 +1337,6 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
return IRQ_NONE;
async = s->async;
- async->events = 0;
s5933_status = inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR);
@@ -1364,10 +1368,8 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
cfc_write_array_to_buffer(s, devpriv->ai_buffer,
num_samples * sizeof(short));
devpriv->count -= num_samples;
- if (async->cmd.stop_src == TRIG_COUNT && devpriv->count == 0) {
+ if (async->cmd.stop_src == TRIG_COUNT && devpriv->count == 0)
async->events |= COMEDI_CB_EOA;
- cb_pcidas_cancel(dev, s);
- }
/* clear half-full interrupt latch */
spin_lock_irqsave(&dev->spinlock, flags);
outw(devpriv->adc_fifo_bits | INT,
@@ -1384,7 +1386,6 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
if (async->cmd.stop_src == TRIG_COUNT &&
--devpriv->count == 0) {
/* end of acquisition */
- cb_pcidas_cancel(dev, s);
async->events |= COMEDI_CB_EOA;
break;
}
@@ -1411,11 +1412,10 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
outw(devpriv->adc_fifo_bits | LADFUL,
devpriv->control_status + INT_ADCFIFO);
spin_unlock_irqrestore(&dev->spinlock, flags);
- cb_pcidas_cancel(dev, s);
async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
}
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -1576,9 +1576,6 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
outl(devpriv->s5933_intcsr_bits | INTCSR_INBOX_INTR_STATUS,
devpriv->s5933_config + AMCC_OP_REG_INTCSR);
- dev_info(dev->class_dev, "%s: %s attached\n",
- dev->driver->driver_name, dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 4fff1738e3f8..f9afcbe1da54 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1664,15 +1664,36 @@ static void i2c_write(struct comedi_device *dev, unsigned int address,
i2c_stop(dev);
}
+static int cb_pcidas64_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ const struct pcidas64_board *thisboard = comedi_board(dev);
+ struct pcidas64_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readw(devpriv->main_iobase + HW_STATUS_REG);
+ if (thisboard->layout == LAYOUT_4020) {
+ status = readw(devpriv->main_iobase + ADC_WRITE_PNTR_REG);
+ if (status)
+ return 0;
+ } else {
+ if (pipe_full_bits(status))
+ return 0;
+ }
+ return -EBUSY;
+}
+
static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
const struct pcidas64_board *thisboard = comedi_board(dev);
struct pcidas64_private *devpriv = dev->private;
- unsigned int bits = 0, n, i;
+ unsigned int bits = 0, n;
unsigned int channel, range, aref;
unsigned long flags;
- static const int timeout = 100;
+ int ret;
channel = CR_CHAN(insn->chanspec);
range = CR_RANGE(insn->chanspec);
@@ -1770,23 +1791,10 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
devpriv->main_iobase + ADC_CONVERT_REG);
/* wait for data */
- for (i = 0; i < timeout; i++) {
- bits = readw(devpriv->main_iobase + HW_STATUS_REG);
- if (thisboard->layout == LAYOUT_4020) {
- if (readw(devpriv->main_iobase +
- ADC_WRITE_PNTR_REG))
- break;
- } else {
- if (pipe_full_bits(bits))
- break;
- }
- udelay(1);
- }
- if (i == timeout) {
- comedi_error(dev, " analog input read insn timed out");
- dev_info(dev->class_dev, "status 0x%x\n", bits);
- return -ETIME;
- }
+ ret = comedi_timeout(dev, s, insn, cb_pcidas64_ai_eoc, 0);
+ if (ret)
+ return ret;
+
if (thisboard->layout == LAYOUT_4020)
data[n] = readl(devpriv->dio_counter_iobase +
ADC_FIFO_REG) & 0xffff;
@@ -3816,16 +3824,19 @@ static int setup_subdevices(struct comedi_device *dev)
if (thisboard->has_8255) {
if (thisboard->layout == LAYOUT_4020) {
dio_8255_iobase = devpriv->main_iobase + I8255_4020_REG;
- subdev_8255_init(dev, s, dio_callback_4020,
- (unsigned long)dio_8255_iobase);
+ ret = subdev_8255_init(dev, s, dio_callback_4020,
+ (unsigned long)dio_8255_iobase);
} else {
dio_8255_iobase =
devpriv->dio_counter_iobase + DIO_8255_OFFSET;
- subdev_8255_init(dev, s, dio_callback,
- (unsigned long)dio_8255_iobase);
+ ret = subdev_8255_init(dev, s, dio_callback,
+ (unsigned long)dio_8255_iobase);
}
- } else
+ if (ret)
+ return ret;
+ } else {
s->type = COMEDI_SUBD_UNUSED;
+ }
/* 8 channel dio for 60xx */
s = &dev->subdevices[5];
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index 8cca0518cfda..901dc5d1bb72 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -388,8 +388,6 @@ static int cb_pcidda_auto_attach(struct comedi_device *dev,
for (i = 0; i < thisboard->ao_chans; i++)
cb_pcidda_calibrate(dev, i, devpriv->ao_range[i]);
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index 57295d189ff6..d3141c865fe7 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -85,21 +85,31 @@ struct cb_pcimdas_private {
unsigned int ao_readback[2];
};
-/*
- * "instructions" read/write data in "one-shot" or "software-triggered"
- * mode.
- */
+static int cb_pcimdas_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct cb_pcimdas_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = inb(devpriv->BADR3 + 2);
+ if ((status & 0x80) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int cb_pcimdas_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct cb_pcimdas_private *devpriv = dev->private;
- int n, i;
+ int n;
unsigned int d;
- unsigned int busy;
int chan = CR_CHAN(insn->chanspec);
unsigned short chanlims;
int maxchans;
+ int ret;
/* only support sw initiated reads from a single channel */
@@ -133,17 +143,10 @@ static int cb_pcimdas_ai_rinsn(struct comedi_device *dev,
/* trigger conversion */
outw(0, dev->iobase + 0);
-#define TIMEOUT 1000 /* typically takes 5 loops on a lightly loaded Pentium 100MHz, */
- /* this is likely to be 100 loops on a 2GHz machine, so set 1000 as the limit. */
-
/* wait for conversion to end */
- for (i = 0; i < TIMEOUT; i++) {
- busy = inb(devpriv->BADR3 + 2) & 0x80;
- if (!busy)
- break;
- }
- if (i == TIMEOUT)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, cb_pcimdas_ai_eoc, 0);
+ if (ret)
+ return ret;
/* read data */
data[n] = inw(dev->iobase + 0);
@@ -247,9 +250,9 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[2];
/* digital i/o subdevice */
- subdev_8255_init(dev, s, NULL, iobase_8255);
-
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
+ ret = subdev_8255_init(dev, s, NULL, iobase_8255);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index 43a86630a66e..4a2b200de01b 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -187,9 +187,7 @@ static int cb_pcimdda_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
- return 1;
+ return 0;
}
static struct comedi_driver cb_pcimdda_driver = {
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 51a59e5b8ec5..8450c99af8b0 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -211,7 +211,7 @@ static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
return -EINVAL;
}
- snprintf(file, sizeof(file), "/dev/comedi%u", minor);
+ snprintf(file, sizeof(file), "/dev/comedi%d", minor);
file[sizeof(file) - 1] = 0;
d = comedi_open(file);
@@ -254,6 +254,7 @@ static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devs) {
dev_err(dev->class_dev,
"Could not allocate memory. Out of memory?\n");
+ kfree(bdev);
return -ENOMEM;
}
devpriv->devs = devs;
@@ -263,7 +264,7 @@ static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
char buf[20];
int left =
MAX_BOARD_NAME - strlen(devpriv->name) - 1;
- snprintf(buf, sizeof(buf), "%d:%d ",
+ snprintf(buf, sizeof(buf), "%u:%u ",
bdev->minor, bdev->subdev);
buf[sizeof(buf) - 1] = 0;
strncat(devpriv->name, buf, left);
diff --git a/drivers/staging/comedi/drivers/comedi_fc.c b/drivers/staging/comedi/drivers/comedi_fc.c
index 26d9dbcf8bd4..9d9b1469e89a 100644
--- a/drivers/staging/comedi/drivers/comedi_fc.c
+++ b/drivers/staging/comedi/drivers/comedi_fc.c
@@ -1,34 +1,53 @@
/*
- comedi/drivers/comedi_fc.c
-
- This is a place for code driver writers wish to share between
- two or more drivers. fc is short
- for frank-common.
-
- Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- Copyright (C) 2002 Frank Mori Hess
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi_fc.c
+ * This is a place for code driver writers wish to share between
+ * two or more drivers. fc is short for frank-common.
+ *
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Copyright (C) 2002 Frank Mori Hess
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#include <linux/module.h>
#include "../comedidev.h"
#include "comedi_fc.h"
-static void increment_scan_progress(struct comedi_subdevice *subd,
- unsigned int num_bytes)
+unsigned int cfc_bytes_per_scan(struct comedi_subdevice *s)
+{
+ unsigned int chanlist_len = s->async->cmd.chanlist_len;
+ unsigned int num_samples;
+ unsigned int bits_per_sample;
+
+ switch (s->type) {
+ case COMEDI_SUBD_DI:
+ case COMEDI_SUBD_DO:
+ case COMEDI_SUBD_DIO:
+ bits_per_sample = 8 * bytes_per_sample(s);
+ num_samples = (chanlist_len + bits_per_sample - 1) /
+ bits_per_sample;
+ break;
+ default:
+ num_samples = chanlist_len;
+ break;
+ }
+ return num_samples * bytes_per_sample(s);
+}
+EXPORT_SYMBOL_GPL(cfc_bytes_per_scan);
+
+void cfc_inc_scan_progress(struct comedi_subdevice *s, unsigned int num_bytes)
{
- struct comedi_async *async = subd->async;
- unsigned int scan_length = cfc_bytes_per_scan(subd);
+ struct comedi_async *async = s->async;
+ unsigned int scan_length = cfc_bytes_per_scan(s);
async->scan_progress += num_bytes;
if (async->scan_progress >= scan_length) {
@@ -36,12 +55,13 @@ static void increment_scan_progress(struct comedi_subdevice *subd,
async->events |= COMEDI_CB_EOS;
}
}
+EXPORT_SYMBOL_GPL(cfc_inc_scan_progress);
/* Writes an array of data points to comedi's buffer */
-unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *subd,
+unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *s,
void *data, unsigned int num_bytes)
{
- struct comedi_async *async = subd->async;
+ struct comedi_async *async = s->async;
unsigned int retval;
if (num_bytes == 0)
@@ -49,24 +69,24 @@ unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *subd,
retval = comedi_buf_write_alloc(async, num_bytes);
if (retval != num_bytes) {
- dev_warn(subd->device->class_dev, "comedi: buffer overrun\n");
+ dev_warn(s->device->class_dev, "buffer overrun\n");
async->events |= COMEDI_CB_OVERFLOW;
return 0;
}
comedi_buf_memcpy_to(async, 0, data, num_bytes);
comedi_buf_write_free(async, num_bytes);
- increment_scan_progress(subd, num_bytes);
+ cfc_inc_scan_progress(s, num_bytes);
async->events |= COMEDI_CB_BLOCK;
return num_bytes;
}
EXPORT_SYMBOL_GPL(cfc_write_array_to_buffer);
-unsigned int cfc_read_array_from_buffer(struct comedi_subdevice *subd,
+unsigned int cfc_read_array_from_buffer(struct comedi_subdevice *s,
void *data, unsigned int num_bytes)
{
- struct comedi_async *async = subd->async;
+ struct comedi_async *async = s->async;
if (num_bytes == 0)
return 0;
@@ -74,7 +94,7 @@ unsigned int cfc_read_array_from_buffer(struct comedi_subdevice *subd,
num_bytes = comedi_buf_read_alloc(async, num_bytes);
comedi_buf_memcpy_from(async, 0, data, num_bytes);
comedi_buf_read_free(async, num_bytes);
- increment_scan_progress(subd, num_bytes);
+ cfc_inc_scan_progress(s, num_bytes);
async->events |= COMEDI_CB_BLOCK;
return num_bytes;
@@ -82,34 +102,33 @@ unsigned int cfc_read_array_from_buffer(struct comedi_subdevice *subd,
EXPORT_SYMBOL_GPL(cfc_read_array_from_buffer);
unsigned int cfc_handle_events(struct comedi_device *dev,
- struct comedi_subdevice *subd)
+ struct comedi_subdevice *s)
{
- unsigned int events = subd->async->events;
+ unsigned int events = s->async->events;
if (events == 0)
return events;
if (events & (COMEDI_CB_EOA | COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW))
- subd->cancel(dev, subd);
+ s->cancel(dev, s);
- comedi_event(dev, subd);
+ comedi_event(dev, s);
return events;
}
EXPORT_SYMBOL_GPL(cfc_handle_events);
-MODULE_AUTHOR("Frank Mori Hess <fmhess@users.sourceforge.net>");
-MODULE_DESCRIPTION("Shared functions for Comedi low-level drivers");
-MODULE_LICENSE("GPL");
-
static int __init comedi_fc_init_module(void)
{
return 0;
}
+module_init(comedi_fc_init_module);
static void __exit comedi_fc_cleanup_module(void)
{
}
-
-module_init(comedi_fc_init_module);
module_exit(comedi_fc_cleanup_module);
+
+MODULE_AUTHOR("Frank Mori Hess <fmhess@users.sourceforge.net>");
+MODULE_DESCRIPTION("Shared functions for Comedi low-level drivers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_fc.h b/drivers/staging/comedi/drivers/comedi_fc.h
index 8558b07f8df3..541b9371d3da 100644
--- a/drivers/staging/comedi/drivers/comedi_fc.h
+++ b/drivers/staging/comedi/drivers/comedi_fc.h
@@ -1,72 +1,52 @@
/*
- comedi_fc.h
-
- This is a place for code driver writers wish to share between
- two or more drivers. These functions are meant to be used only
- by drivers, they are NOT part of the kcomedilib API!
-
- Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- Copyright (C) 2002 Frank Mori Hess
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi_fc.h
+ * This is a place for code driver writers wish to share between
+ * two or more drivers. These functions are meant to be used only
+ * by drivers, they are NOT part of the kcomedilib API!
+ *
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Copyright (C) 2002 Frank Mori Hess
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _COMEDI_FC_H
#define _COMEDI_FC_H
#include "../comedidev.h"
+unsigned int cfc_bytes_per_scan(struct comedi_subdevice *);
+void cfc_inc_scan_progress(struct comedi_subdevice *, unsigned int num_bytes);
+
/* Writes an array of data points to comedi's buffer */
-extern unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *subd,
- void *data,
- unsigned int num_bytes);
+unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *,
+ void *data, unsigned int num_bytes);
-static inline unsigned int cfc_write_to_buffer(struct comedi_subdevice *subd,
+static inline unsigned int cfc_write_to_buffer(struct comedi_subdevice *s,
unsigned short data)
{
- return cfc_write_array_to_buffer(subd, &data, sizeof(data));
+ return cfc_write_array_to_buffer(s, &data, sizeof(data));
};
-static inline unsigned int cfc_write_long_to_buffer(struct comedi_subdevice
- *subd, unsigned int data)
+static inline unsigned int cfc_write_long_to_buffer(struct comedi_subdevice *s,
+ unsigned int data)
{
- return cfc_write_array_to_buffer(subd, &data, sizeof(data));
+ return cfc_write_array_to_buffer(s, &data, sizeof(data));
};
-extern unsigned int cfc_read_array_from_buffer(struct comedi_subdevice *subd,
- void *data,
- unsigned int num_bytes);
+unsigned int cfc_read_array_from_buffer(struct comedi_subdevice *,
+ void *data, unsigned int num_bytes);
-extern unsigned int cfc_handle_events(struct comedi_device *dev,
- struct comedi_subdevice *subd);
-
-static inline unsigned int cfc_bytes_per_scan(struct comedi_subdevice *subd)
-{
- int num_samples;
- int bits_per_sample;
-
- switch (subd->type) {
- case COMEDI_SUBD_DI:
- case COMEDI_SUBD_DO:
- case COMEDI_SUBD_DIO:
- bits_per_sample = 8 * bytes_per_sample(subd);
- num_samples = (subd->async->cmd.chanlist_len +
- bits_per_sample - 1) / bits_per_sample;
- break;
- default:
- num_samples = subd->async->cmd.chanlist_len;
- break;
- }
- return num_samples * bytes_per_sample(subd);
-}
+unsigned int cfc_handle_events(struct comedi_device *,
+ struct comedi_subdevice *);
/**
* cfc_check_trigger_src() - trivially validate a comedi_cmd trigger source
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index d539eaf53b63..cd9562556d2c 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -185,7 +185,6 @@ static void waveform_ai_interrupt(unsigned long arg)
(devpriv->usec_remainder + elapsed_time) / devpriv->scan_period;
devpriv->usec_remainder =
(devpriv->usec_remainder + elapsed_time) % devpriv->scan_period;
- async->events = 0;
if (cmd->stop_src == TRIG_COUNT) {
unsigned int remaining = cmd->stop_arg - devpriv->ai_count;
@@ -318,12 +317,8 @@ static int waveform_ai_cmd(struct comedi_device *dev,
if (cmd->convert_src == TRIG_NOW)
devpriv->convert_period = 0;
- else if (cmd->convert_src == TRIG_TIMER)
+ else /* TRIG_TIMER */
devpriv->convert_period = cmd->convert_arg / nano_per_micro;
- else {
- comedi_error(dev, "bug setting conversion period");
- return -1;
- }
do_gettimeofday(&devpriv->last);
devpriv->usec_current = devpriv->last.tv_usec % devpriv->usec_period;
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index 323a7f39cd97..0a9c32e9db4a 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -92,8 +92,6 @@ static int contec_auto_attach(struct comedi_device *dev,
s->range_table = &range_digital;
s->insn_bits = contec_do_insn_bits;
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/dac02.c b/drivers/staging/comedi/drivers/dac02.c
new file mode 100644
index 000000000000..df46e0a5bade
--- /dev/null
+++ b/drivers/staging/comedi/drivers/dac02.c
@@ -0,0 +1,172 @@
+/*
+ * dac02.c
+ * Comedi driver for DAC02 compatible boards
+ * Copyright (C) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * Based on the poc driver
+ * Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Copyright (C) 2001 David A. Schleef <ds@schleef.org>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Driver: dac02
+ * Description: Comedi driver for DAC02 compatible boards
+ * Devices: (Keithley Metrabyte) DAC-02 [dac02]
+ * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
+ * Updated: Tue, 11 Mar 2014 11:27:19 -0700
+ * Status: unknown
+ *
+ * Configuration options:
+ * [0] - I/O port base
+ */
+
+#include <linux/module.h>
+
+#include "../comedidev.h"
+
+/*
+ * The output range is selected by jumpering pins on the I/O connector.
+ *
+ * Range Chan # Jumper pins Output
+ * ------------- ------ ------------- -----------------
+ * 0 to 5V 0 21 to 22 24
+ * 1 15 to 16 18
+ * 0 to 10V 0 20 to 22 24
+ * 1 14 to 16 18
+ * +/-5V 0 21 to 22 23
+ * 1 15 to 16 17
+ * +/-10V 0 20 to 22 23
+ * 1 14 to 16 17
+ * 4 to 20mA 0 21 to 22 25
+ * 1 15 to 16 19
+ * AC reference 0 In on pin 22 24 (2-quadrant)
+ * In on pin 22 23 (4-quadrant)
+ * 1 In on pin 16 18 (2-quadrant)
+ * In on pin 16 17 (4-quadrant)
+ */
+static const struct comedi_lrange das02_ao_ranges = {
+ 6, {
+ UNI_RANGE(5),
+ UNI_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ RANGE_mA(4, 20),
+ RANGE_ext(0, 1)
+ }
+};
+
+struct dac02_private {
+ unsigned int ao_readback[2];
+};
+
+/*
+ * Register I/O map
+ */
+#define DAC02_AO_LSB(x) (0x00 + ((x) * 2))
+#define DAC02_AO_MSB(x) (0x01 + ((x) * 2))
+
+static int dac02_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct dac02_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val;
+ int i;
+
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+
+ devpriv->ao_readback[chan] = val;
+
+ /*
+ * Unipolar outputs are true binary encoding.
+ * Bipolar outputs are complementary offset binary
+ * (that is, 0 = +full scale, maxdata = -full scale).
+ */
+ if (comedi_range_is_bipolar(s, range))
+ val = s->maxdata - val;
+
+ /*
+ * DACs are double-buffered.
+ * Write LSB then MSB to latch output.
+ */
+ outb((val << 4) & 0xf0, dev->iobase + DAC02_AO_LSB(chan));
+ outb((val >> 4) & 0xff, dev->iobase + DAC02_AO_MSB(chan));
+ }
+
+ return insn->n;
+}
+
+static int dac02_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct dac02_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
+
+ return insn->n;
+}
+
+static int dac02_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+{
+ struct dac02_private *devpriv;
+ struct comedi_subdevice *s;
+ int ret;
+
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
+
+ ret = comedi_request_region(dev, it->options[0], 0x08);
+ if (ret)
+ return ret;
+
+ ret = comedi_alloc_subdevices(dev, 1);
+ if (ret)
+ return ret;
+
+ /* Analog Output subdevice */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 2;
+ s->maxdata = 0x0fff;
+ s->range_table = &das02_ao_ranges;
+ s->insn_write = dac02_ao_insn_write;
+ s->insn_read = dac02_ao_insn_read;
+
+ return 0;
+}
+
+static struct comedi_driver dac02_driver = {
+ .driver_name = "dac02",
+ .module = THIS_MODULE,
+ .attach = dac02_attach,
+ .detach = comedi_legacy_detach,
+};
+module_comedi_driver(dac02_driver);
+
+MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_DESCRIPTION("Comedi driver for DAC02 compatible boards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index ce153fcb8b2a..a8f6036ad82b 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -333,14 +333,28 @@ static void setup_sampling(struct comedi_device *dev, int chan, int gain)
writeAcqScanListEntry(dev, word3);
}
+static int daqboard2000_ai_status(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct daqboard2000_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readw(devpriv->daq + acqControl);
+ if (status & context)
+ return 0;
+ return -EBUSY;
+}
+
static int daqboard2000_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct daqboard2000_private *devpriv = dev->private;
- unsigned int val;
- int gain, chan, timeout;
+ int gain, chan;
+ int ret;
int i;
writew(DAQBOARD2000_AcqResetScanListFifo |
@@ -367,25 +381,24 @@ static int daqboard2000_ai_insn_read(struct comedi_device *dev,
/* Enable reading from the scanlist FIFO */
writew(DAQBOARD2000_SeqStartScanList,
devpriv->daq + acqControl);
- for (timeout = 0; timeout < 20; timeout++) {
- val = readw(devpriv->daq + acqControl);
- if (val & DAQBOARD2000_AcqConfigPipeFull)
- break;
- /* udelay(2); */
- }
+
+ ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
+ DAQBOARD2000_AcqConfigPipeFull);
+ if (ret)
+ return ret;
+
writew(DAQBOARD2000_AdcPacerEnable, devpriv->daq + acqControl);
- for (timeout = 0; timeout < 20; timeout++) {
- val = readw(devpriv->daq + acqControl);
- if (val & DAQBOARD2000_AcqLogicScanning)
- break;
- /* udelay(2); */
- }
- for (timeout = 0; timeout < 20; timeout++) {
- val = readw(devpriv->daq + acqControl);
- if (val & DAQBOARD2000_AcqResultsFIFOHasValidData)
- break;
- /* udelay(2); */
- }
+
+ ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
+ DAQBOARD2000_AcqLogicScanning);
+ if (ret)
+ return ret;
+
+ ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
+ DAQBOARD2000_AcqResultsFIFOHasValidData);
+ if (ret)
+ return ret;
+
data[i] = readw(devpriv->daq + acqResultsFIFO);
writew(DAQBOARD2000_AdcPacerDisable, devpriv->daq + acqControl);
writew(DAQBOARD2000_SeqStopScanList, devpriv->daq + acqControl);
@@ -409,6 +422,21 @@ static int daqboard2000_ao_insn_read(struct comedi_device *dev,
return i;
}
+static int daqboard2000_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct daqboard2000_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int status;
+
+ status = readw(devpriv->daq + dacControl);
+ if ((status & ((chan + 1) * 0x0010)) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int daqboard2000_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -416,8 +444,7 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
{
struct daqboard2000_private *devpriv = dev->private;
int chan = CR_CHAN(insn->chanspec);
- unsigned int val;
- int timeout;
+ int ret;
int i;
for (i = 0; i < insn->n; i++) {
@@ -431,12 +458,11 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
udelay(1000);
#endif
writew(data[i], devpriv->daq + dacSetting(chan));
- for (timeout = 0; timeout < 20; timeout++) {
- val = readw(devpriv->daq + dacControl);
- if ((val & ((chan + 1) * 0x0010)) == 0)
- break;
- /* udelay(2); */
- }
+
+ ret = comedi_timeout(dev, s, insn, daqboard2000_ao_eoc, 0);
+ if (ret)
+ return ret;
+
devpriv->ao_readback[chan] = data[i];
#if 0
/*
@@ -737,9 +763,6 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
if (result)
return result;
- dev_info(dev->class_dev, "%s: %s attached\n",
- dev->driver->driver_name, dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index e5c0ee9a09c2..c5e352fb5555 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -201,17 +201,29 @@ static const int *const das08_gainlists[] = {
das08_pgm_gainlist,
};
-#define TIMEOUT 100000
+static int das08_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + DAS08_STATUS);
+ if ((status & DAS08_EOC) == 0)
+ return 0;
+ return -EBUSY;
+}
static int das08_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
const struct das08_board_struct *thisboard = comedi_board(dev);
struct das08_private_struct *devpriv = dev->private;
- int i, n;
+ int n;
int chan;
int range;
int lsb, msb;
+ int ret;
chan = CR_CHAN(insn->chanspec);
range = CR_RANGE(insn->chanspec);
@@ -244,14 +256,10 @@ static int das08_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
/* trigger conversion */
outb_p(0, dev->iobase + DAS08_TRIG_12BIT);
- for (i = 0; i < TIMEOUT; i++) {
- if (!(inb(dev->iobase + DAS08_STATUS) & DAS08_EOC))
- break;
- }
- if (i == TIMEOUT) {
- dev_err(dev->class_dev, "timeout\n");
- return -ETIME;
- }
+ ret = comedi_timeout(dev, s, insn, das08_ai_eoc, 0);
+ if (ret)
+ return ret;
+
msb = inb(dev->iobase + DAS08_MSB);
lsb = inb(dev->iobase + DAS08_LSB);
if (thisboard->ai_encoding == das08_encode12) {
@@ -529,9 +537,10 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
s = &dev->subdevices[4];
/* 8255 */
if (thisboard->i8255_offset != 0) {
- subdev_8255_init(dev, s, NULL, (unsigned long)(dev->iobase +
- thisboard->
- i8255_offset));
+ ret = subdev_8255_init(dev, s, NULL,
+ dev->iobase + thisboard->i8255_offset);
+ if (ret)
+ return ret;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index a8446ca04110..6a7d652ff564 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -856,18 +856,17 @@ static void das16_ai_munge(struct comedi_device *dev,
}
}
-static int das16_ai_wait_for_conv(struct comedi_device *dev,
- unsigned int timeout)
+static int das16_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
unsigned int status;
- int i;
- for (i = 0; i < timeout; i++) {
- status = inb(dev->iobase + DAS16_STATUS_REG);
- if (!(status & DAS16_STATUS_BUSY))
- return 0;
- }
- return -ETIME;
+ status = inb(dev->iobase + DAS16_STATUS_REG);
+ if ((status & DAS16_STATUS_BUSY) == 0)
+ return 0;
+ return -EBUSY;
}
static int das16_ai_insn_read(struct comedi_device *dev,
@@ -897,7 +896,7 @@ static int das16_ai_insn_read(struct comedi_device *dev,
/* trigger conversion */
outb_p(0, dev->iobase + DAS16_TRIG_REG);
- ret = das16_ai_wait_for_conv(dev, 1000);
+ ret = comedi_timeout(dev, s, insn, das16_ai_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index fee5facff8dd..779225831dc0 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -331,14 +331,27 @@ static int das16m1_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
+static int das16m1_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + DAS16M1_CS);
+ if (status & IRQDATA)
+ return 0;
+ return -EBUSY;
+}
+
static int das16m1_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct das16m1_private_struct *devpriv = dev->private;
- int i, n;
+ int ret;
+ int n;
int byte;
- const int timeout = 1000;
/* disable interrupts and internal pacer */
devpriv->control_state &= ~INTE & ~PACER_MASK;
@@ -356,14 +369,10 @@ static int das16m1_ai_rinsn(struct comedi_device *dev,
/* trigger conversion */
outb(0, dev->iobase);
- for (i = 0; i < timeout; i++) {
- if (inb(dev->iobase + DAS16M1_CS) & IRQDATA)
- break;
- }
- if (i == timeout) {
- comedi_error(dev, "timeout");
- return -ETIME;
- }
+ ret = comedi_timeout(dev, s, insn, das16m1_ai_eoc, 0);
+ if (ret)
+ return ret;
+
data[n] = munge_sample(inw(dev->iobase));
}
@@ -407,7 +416,6 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
s = dev->read_subdev;
async = s->async;
- async->events = 0;
cmd = &async->cmd;
/* figure out how many samples are in fifo */
@@ -440,8 +448,8 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
devpriv->adc_count += num_samples;
if (cmd->stop_src == TRIG_COUNT) {
- if (devpriv->adc_count >= cmd->stop_arg * cmd->chanlist_len) { /* end of acquisition */
- das16m1_cancel(dev, s);
+ if (devpriv->adc_count >= cmd->stop_arg * cmd->chanlist_len) {
+ /* end of acquisition */
async->events |= COMEDI_CB_EOA;
}
}
@@ -449,13 +457,11 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
/* this probably won't catch overruns since the card doesn't generate
* overrun interrupts, but we might as well try */
if (status & OVRUN) {
- das16m1_cancel(dev, s);
async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_error(dev, "fifo overflow");
}
- comedi_event(dev, s);
-
+ cfc_handle_events(dev, s);
}
static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s)
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index 320d95a5f47b..8e975d6b06db 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -646,7 +646,6 @@ static void das1800_ai_handler(struct comedi_device *dev)
struct comedi_cmd *cmd = &async->cmd;
unsigned int status = inb(dev->iobase + DAS1800_STATUS);
- async->events = 0;
/* select adc for base address + 0 */
outb(ADC, dev->iobase + DAS1800_SELECT);
/* dma buffer full */
@@ -665,9 +664,8 @@ static void das1800_ai_handler(struct comedi_device *dev)
/* clear OVF interrupt bit */
outb(CLEAR_INTR_MASK & ~OVF, dev->iobase + DAS1800_STATUS);
comedi_error(dev, "DAS1800 FIFO overflow");
- das1800_cancel(dev, s);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
/* stop taking data if appropriate */
@@ -680,16 +678,12 @@ static void das1800_ai_handler(struct comedi_device *dev)
das1800_flush_dma(dev, s);
else
das1800_handle_fifo_not_empty(dev, s);
- das1800_cancel(dev, s); /* disable hardware conversions */
async->events |= COMEDI_CB_EOA;
} else if (cmd->stop_src == TRIG_COUNT && devpriv->count == 0) { /* stop_src TRIG_COUNT */
- das1800_cancel(dev, s); /* disable hardware conversions */
async->events |= COMEDI_CB_EOA;
}
- comedi_event(dev, s);
-
- return;
+ cfc_handle_events(dev, s);
}
static int das1800_ai_poll(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index 43027ee500e3..e0cfb6cb547b 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -1,309 +1,532 @@
/*
- Some comments on the code..
-
- - it shouldn't be necessary to use outb_p().
-
- - ignoreirq creates a race condition. It needs to be fixed.
-
+ * das6402.c
+ * Comedi driver for DAS6402 compatible boards
+ * Copyright(c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * Rewrite of an experimental driver by:
+ * Copyright (C) 1999 Oystein Svendsen <svendsen@pvv.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
/*
- comedi/drivers/das6402.c
- An experimental driver for Computerboards' DAS6402 I/O card
+ * Driver: das6402
+ * Description: Keithley Metrabyte DAS6402 (& compatibles)
+ * Devices: (Keithley Metrabyte) DAS6402-12 (das6402-12)
+ * (Keithley Metrabyte) DAS6402-16 (das6402-16)
+ * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
+ * Updated: Fri, 14 Mar 2014 10:18:43 -0700
+ * Status: unknown
+ *
+ * Configuration Options:
+ * [0] - I/O base address
+ * [1] - IRQ (optional, needed for async command support)
+ */
- Copyright (C) 1999 Oystein Svendsen <svendsen@pvv.org>
+#include <linux/module.h>
+#include <linux/interrupt.h>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+#include "../comedidev.h"
+#include "8253.h"
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- */
/*
-Driver: das6402
-Description: Keithley Metrabyte DAS6402 (& compatibles)
-Author: Oystein Svendsen <svendsen@pvv.org>
-Status: bitrotten
-Devices: [Keithley Metrabyte] DAS6402 (das6402)
+ * Register I/O map
+ */
+#define DAS6402_AI_DATA_REG 0x00
+#define DAS6402_AI_MUX_REG 0x02
+#define DAS6402_AI_MUX_LO(x) (((x) & 0x3f) << 0)
+#define DAS6402_AI_MUX_HI(x) (((x) & 0x3f) << 8)
+#define DAS6402_DI_DO_REG 0x03
+#define DAS6402_AO_DATA_REG(x) (0x04 + ((x) * 2))
+#define DAS6402_AO_LSB_REG(x) (0x04 + ((x) * 2))
+#define DAS6402_AO_MSB_REG(x) (0x05 + ((x) * 2))
+#define DAS6402_STATUS_REG 0x08
+#define DAS6402_STATUS_FFNE (1 << 0)
+#define DAS6402_STATUS_FHALF (1 << 1)
+#define DAS6402_STATUS_FFULL (1 << 2)
+#define DAS6402_STATUS_XINT (1 << 3)
+#define DAS6402_STATUS_INT (1 << 4)
+#define DAS6402_STATUS_XTRIG (1 << 5)
+#define DAS6402_STATUS_INDGT (1 << 6)
+#define DAS6402_STATUS_10MHZ (1 << 7)
+#define DAS6402_STATUS_W_CLRINT (1 << 0)
+#define DAS6402_STATUS_W_CLRXTR (1 << 1)
+#define DAS6402_STATUS_W_CLRXIN (1 << 2)
+#define DAS6402_STATUS_W_EXTEND (1 << 4)
+#define DAS6402_STATUS_W_ARMED (1 << 5)
+#define DAS6402_STATUS_W_POSTMODE (1 << 6)
+#define DAS6402_STATUS_W_10MHZ (1 << 7)
+#define DAS6402_CTRL_REG 0x09
+#define DAS6402_CTRL_SOFT_TRIG (0 << 0)
+#define DAS6402_CTRL_EXT_FALL_TRIG (1 << 0)
+#define DAS6402_CTRL_EXT_RISE_TRIG (2 << 0)
+#define DAS6402_CTRL_PACER_TRIG (3 << 0)
+#define DAS6402_CTRL_BURSTEN (1 << 2)
+#define DAS6402_CTRL_XINTE (1 << 3)
+#define DAS6402_CTRL_IRQ(x) ((x) << 4)
+#define DAS6402_CTRL_INTE (1 << 7)
+#define DAS6402_TRIG_REG 0x0a
+#define DAS6402_TRIG_TGEN (1 << 0)
+#define DAS6402_TRIG_TGSEL (1 << 1)
+#define DAS6402_TRIG_TGPOL (1 << 2)
+#define DAS6402_TRIG_PRETRIG (1 << 3)
+#define DAS6402_AO_RANGE(_chan, _range) ((_range) << ((_chan) ? 6 : 4))
+#define DAS6402_AO_RANGE_MASK(_chan) (3 << ((_chan) ? 6 : 4))
+#define DAS6402_MODE_REG 0x0b
+#define DAS6402_MODE_RANGE(x) ((x) << 0)
+#define DAS6402_MODE_POLLED (0 << 2)
+#define DAS6402_MODE_FIFONEPTY (1 << 2)
+#define DAS6402_MODE_FIFOHFULL (2 << 2)
+#define DAS6402_MODE_EOB (3 << 2)
+#define DAS6402_MODE_ENHANCED (1 << 4)
+#define DAS6402_MODE_SE (1 << 5)
+#define DAS6402_MODE_UNI (1 << 6)
+#define DAS6402_MODE_DMA1 (0 << 7)
+#define DAS6402_MODE_DMA3 (1 << 7)
+#define DAS6402_TIMER_BASE 0x0c
+
+static const struct comedi_lrange das6402_ai_ranges = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
+};
-This driver has suffered bitrot.
-*/
+/*
+ * Analog output ranges are programmable on the DAS6402/12.
+ * For the DAS6402/16 the range bits have no function, the
+ * DAC ranges are selected by switches on the board.
+ */
+static const struct comedi_lrange das6402_ao_ranges = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
+};
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include "../comedidev.h"
+struct das6402_boardinfo {
+ const char *name;
+ unsigned int maxdata;
+};
-#define DAS6402_SIZE 16
-
-#define N_WORDS (3000*64)
-
-#define STOP 0
-#define START 1
-
-#define SCANL 0x3f00
-#define BYTE unsigned char
-#define WORD unsigned short
-
-/*----- register 8 ----*/
-#define CLRINT 0x01
-#define CLRXTR 0x02
-#define CLRXIN 0x04
-#define EXTEND 0x10
-#define ARMED 0x20 /* enable conting of post sample conv */
-#define POSTMODE 0x40
-#define MHZ 0x80 /* 10 MHz clock */
-/*---------------------*/
-
-/*----- register 9 ----*/
-#define IRQ (0x04 << 4) /* these two are */
-#define IRQV 10 /* dependent on each other */
-
-#define CONVSRC 0x03 /* trig src is Intarnal pacer */
-#define BURSTEN 0x04 /* enable burst */
-#define XINTE 0x08 /* use external int. trig */
-#define INTE 0x80 /* enable analog interrupts */
-/*---------------------*/
-
-/*----- register 10 ---*/
-#define TGEN 0x01 /* Use pin DI1 for externl trigging? */
-#define TGSEL 0x02 /* Use edge triggering */
-#define TGPOL 0x04 /* active edge is falling */
-#define PRETRIG 0x08 /* pretrig */
-/*---------------------*/
-
-/*----- register 11 ---*/
-#define EOB 0x0c
-#define FIFOHFULL 0x08
-#define GAIN 0x01
-#define FIFONEPTY 0x04
-#define MODE 0x10
-#define SEM 0x20
-#define BIP 0x40
-/*---------------------*/
-
-#define M0 0x00
-#define M2 0x04
-
-#define C0 0x00
-#define C1 0x40
-#define C2 0x80
-#define RWLH 0x30
+struct das6402_boardinfo das6402_boards[] = {
+ {
+ .name = "das6402-12",
+ .maxdata = 0x0fff,
+ }, {
+ .name = "das6402-16",
+ .maxdata = 0xffff,
+ },
+};
struct das6402_private {
- int ai_bytes_to_read;
+ unsigned int irq;
+
+ unsigned int count;
+ unsigned int divider1;
+ unsigned int divider2;
- int das6402_ignoreirq;
+ unsigned int ao_range;
+ unsigned int ao_readback[2];
};
-static void das6402_ai_fifo_dregs(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static void das6402_set_mode(struct comedi_device *dev,
+ unsigned int mode)
{
- while (1) {
- if (!(inb(dev->iobase + 8) & 0x01))
- return;
- comedi_buf_put(s->async, inw(dev->iobase));
- }
+ outb(DAS6402_MODE_ENHANCED | mode, dev->iobase + DAS6402_MODE_REG);
}
-static void das6402_setcounter(struct comedi_device *dev)
+static void das6402_set_extended(struct comedi_device *dev,
+ unsigned int val)
{
- BYTE p;
- unsigned short ctrlwrd;
-
- /* set up counter0 first, mode 0 */
- p = M0 | C0 | RWLH;
- outb_p(p, dev->iobase + 15);
- ctrlwrd = 2000;
- p = (BYTE) (0xff & ctrlwrd);
- outb_p(p, dev->iobase + 12);
- p = (BYTE) (0xff & (ctrlwrd >> 8));
- outb_p(p, dev->iobase + 12);
-
- /* set up counter1, mode 2 */
- p = M2 | C1 | RWLH;
- outb_p(p, dev->iobase + 15);
- ctrlwrd = 10;
- p = (BYTE) (0xff & ctrlwrd);
- outb_p(p, dev->iobase + 13);
- p = (BYTE) (0xff & (ctrlwrd >> 8));
- outb_p(p, dev->iobase + 13);
-
- /* set up counter1, mode 2 */
- p = M2 | C2 | RWLH;
- outb_p(p, dev->iobase + 15);
- ctrlwrd = 1000;
- p = (BYTE) (0xff & ctrlwrd);
- outb_p(p, dev->iobase + 14);
- p = (BYTE) (0xff & (ctrlwrd >> 8));
- outb_p(p, dev->iobase + 14);
+ outb(DAS6402_STATUS_W_EXTEND, dev->iobase + DAS6402_STATUS_REG);
+ outb(DAS6402_STATUS_W_EXTEND | val, dev->iobase + DAS6402_STATUS_REG);
+ outb(val, dev->iobase + DAS6402_STATUS_REG);
}
-static irqreturn_t intr_handler(int irq, void *d)
+static void das6402_clear_all_interrupts(struct comedi_device *dev)
+{
+ outb(DAS6402_STATUS_W_CLRINT |
+ DAS6402_STATUS_W_CLRXTR |
+ DAS6402_STATUS_W_CLRXIN, dev->iobase + DAS6402_STATUS_REG);
+}
+
+static void das6402_ai_clear_eoc(struct comedi_device *dev)
+{
+ outb(DAS6402_STATUS_W_CLRINT, dev->iobase + DAS6402_STATUS_REG);
+}
+
+static void das6402_enable_counter(struct comedi_device *dev, bool load)
{
- struct comedi_device *dev = d;
struct das6402_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ unsigned long timer_iobase = dev->iobase + DAS6402_TIMER_BASE;
- if (!dev->attached || devpriv->das6402_ignoreirq) {
- dev_warn(dev->class_dev, "BUG: spurious interrupt\n");
- return IRQ_HANDLED;
- }
+ if (load) {
+ i8254_set_mode(timer_iobase, 0, 0, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_iobase, 0, 1, I8254_MODE2 | I8254_BINARY);
+ i8254_set_mode(timer_iobase, 0, 2, I8254_MODE2 | I8254_BINARY);
- das6402_ai_fifo_dregs(dev, s);
+ i8254_write(timer_iobase, 0, 0, devpriv->count);
+ i8254_write(timer_iobase, 0, 1, devpriv->divider1);
+ i8254_write(timer_iobase, 0, 2, devpriv->divider2);
- if (s->async->buf_write_count >= devpriv->ai_bytes_to_read) {
- outw_p(SCANL, dev->iobase + 2); /* clears the fifo */
- outb(0x07, dev->iobase + 8); /* clears all flip-flops */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
+ } else {
+ i8254_set_mode(timer_iobase, 0, 0, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_iobase, 0, 1, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_iobase, 0, 2, I8254_MODE0 | I8254_BINARY);
}
+}
- outb(0x01, dev->iobase + 8); /* clear only the interrupt flip-flop */
+static irqreturn_t das6402_interrupt(int irq, void *d)
+{
+ struct comedi_device *dev = d;
+
+ das6402_clear_all_interrupts(dev);
- comedi_event(dev, s);
return IRQ_HANDLED;
}
-#if 0
-static void das6402_ai_fifo_read(struct comedi_device *dev, short *data, int n)
+static int das6402_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- int i;
+ return -EINVAL;
+}
- for (i = 0; i < n; i++)
- data[i] = inw(dev->iobase);
+static int das6402_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ return -EINVAL;
}
-#endif
static int das6402_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
+ outb(DAS6402_CTRL_SOFT_TRIG, dev->iobase + DAS6402_CTRL_REG);
+
+ return 0;
+}
+
+static void das6402_ai_soft_trig(struct comedi_device *dev)
+{
+ outw(0, dev->iobase + DAS6402_AI_DATA_REG);
+}
+
+static int das6402_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + DAS6402_STATUS_REG);
+ if (status & DAS6402_STATUS_FFNE)
+ return 0;
+ return -EBUSY;
+}
+
+static int das6402_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int aref = CR_AREF(insn->chanspec);
+ unsigned int val;
+ int ret;
+ int i;
+
+ val = DAS6402_MODE_RANGE(range) | DAS6402_MODE_POLLED;
+ if (aref == AREF_DIFF) {
+ if (chan > s->n_chan / 2)
+ return -EINVAL;
+ } else {
+ val |= DAS6402_MODE_SE;
+ }
+ if (comedi_range_is_unipolar(s, range))
+ val |= DAS6402_MODE_UNI;
+
+ /* enable software conversion trigger */
+ outb(DAS6402_CTRL_SOFT_TRIG, dev->iobase + DAS6402_CTRL_REG);
+
+ das6402_set_mode(dev, val);
+
+ /* load the mux for single channel conversion */
+ outw(DAS6402_AI_MUX_HI(chan) | DAS6402_AI_MUX_LO(chan),
+ dev->iobase + DAS6402_AI_MUX_REG);
+
+ for (i = 0; i < insn->n; i++) {
+ das6402_ai_clear_eoc(dev);
+ das6402_ai_soft_trig(dev);
+
+ ret = comedi_timeout(dev, s, insn, das6402_ai_eoc, 0);
+ if (ret)
+ break;
+
+ val = inw(dev->iobase + DAS6402_AI_DATA_REG);
+
+ if (s->maxdata == 0x0fff)
+ val >>= 4;
+
+ data[i] = val;
+ }
+
+ das6402_ai_clear_eoc(dev);
+
+ return insn->n;
+}
+
+static int das6402_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
struct das6402_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val;
+ int i;
+
+ /* set the range for this channel */
+ val = devpriv->ao_range;
+ val &= ~DAS6402_AO_RANGE_MASK(chan);
+ val |= DAS6402_AO_RANGE(chan, range);
+ if (val != devpriv->ao_range) {
+ devpriv->ao_range = val;
+ outb(val, dev->iobase + DAS6402_TRIG_REG);
+ }
/*
- * This function should reset the board from whatever condition it
- * is in (i.e., acquiring data), to a non-active state.
+ * The DAS6402/16 has a jumper to select either individual
+ * update (UPDATE) or simultaneous updating (XFER) of both
+ * DAC's. In UPDATE mode, when the MSB is written, that DAC
+ * is updated. In XFER mode, after both DAC's are loaded,
+ * a read cycle of any DAC register will update both DAC's
+ * simultaneously.
+ *
+ * If you have XFER mode enabled a (*insn_read) will need
+ * to be performed in order to update the DAC's with the
+ * last value written.
*/
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+
+ devpriv->ao_readback[chan] = val;
+
+ if (s->maxdata == 0x0fff) {
+ /*
+ * DAS6402/12 has the two 8-bit DAC registers, left
+ * justified (the 4 LSB bits are don't care). Data
+ * can be written as one word.
+ */
+ val <<= 4;
+ outw(val, dev->iobase + DAS6402_AO_DATA_REG(chan));
+ } else {
+ /*
+ * DAS6402/16 uses both 8-bit DAC registers and needs
+ * to be written LSB then MSB.
+ */
+ outb(val & 0xff,
+ dev->iobase + DAS6402_AO_LSB_REG(chan));
+ outb((val >> 8) & 0xff,
+ dev->iobase + DAS6402_AO_LSB_REG(chan));
+ }
+ }
- devpriv->das6402_ignoreirq = 1;
- dev_dbg(dev->class_dev, "Stopping acquisition\n");
- devpriv->das6402_ignoreirq = 1;
- outb_p(0x02, dev->iobase + 10); /* disable external trigging */
- outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */
- outb_p(0, dev->iobase + 9); /* disables interrupts */
-
- outw_p(SCANL, dev->iobase + 2);
-
- return 0;
+ return insn->n;
}
-#ifdef unused
-static int das6402_ai_mode2(struct comedi_device *dev,
- struct comedi_subdevice *s, comedi_trig *it)
+static int das6402_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct das6402_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
- devpriv->das6402_ignoreirq = 1;
- dev_dbg(dev->class_dev, "Starting acquisition\n");
- outb_p(0x03, dev->iobase + 10); /* enable external trigging */
- outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */
- outb_p(IRQ | CONVSRC | BURSTEN | INTE, dev->iobase + 9);
+ /*
+ * If XFER mode is enabled, reading any DAC register
+ * will update both DAC's simultaneously.
+ */
+ inw(dev->iobase + DAS6402_AO_LSB_REG(chan));
- devpriv->ai_bytes_to_read = it->n * sizeof(short);
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
- /* um... ignoreirq is a nasty race condition */
- devpriv->das6402_ignoreirq = 0;
+ return insn->n;
+}
- outw_p(SCANL, dev->iobase + 2);
+static int das6402_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ data[1] = inb(dev->iobase + DAS6402_DI_DO_REG);
- return 0;
+ return insn->n;
}
-#endif
-static int board_init(struct comedi_device *dev)
+static int das6402_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct das6402_private *devpriv = dev->private;
- BYTE b;
+ if (comedi_dio_update_state(s, data))
+ outb(s->state, dev->iobase + DAS6402_DI_DO_REG);
- devpriv->das6402_ignoreirq = 1;
+ data[1] = s->state;
- outb(0x07, dev->iobase + 8);
+ return insn->n;
+}
- /* register 11 */
- outb_p(MODE, dev->iobase + 11);
- b = BIP | SEM | MODE | GAIN | FIFOHFULL;
- outb_p(b, dev->iobase + 11);
+static void das6402_reset(struct comedi_device *dev)
+{
+ struct das6402_private *devpriv = dev->private;
- /* register 8 */
- outb_p(EXTEND, dev->iobase + 8);
- b = EXTEND | MHZ;
- outb_p(b, dev->iobase + 8);
- b = MHZ | CLRINT | CLRXTR | CLRXIN;
- outb_p(b, dev->iobase + 8);
+ /* enable "Enhanced" mode */
+ outb(DAS6402_MODE_ENHANCED, dev->iobase + DAS6402_MODE_REG);
- /* register 9 */
- b = IRQ | CONVSRC | BURSTEN | INTE;
- outb_p(b, dev->iobase + 9);
+ /* enable 10MHz pacer clock */
+ das6402_set_extended(dev, DAS6402_STATUS_W_10MHZ);
- /* register 10 */
- b = TGSEL | TGEN;
- outb_p(b, dev->iobase + 10);
+ /* enable software conversion trigger */
+ outb(DAS6402_CTRL_SOFT_TRIG, dev->iobase + DAS6402_CTRL_REG);
- b = 0x07;
- outb_p(b, dev->iobase + 8);
+ /* default ADC to single-ended unipolar 10V inputs */
+ das6402_set_mode(dev, DAS6402_MODE_RANGE(0) |
+ DAS6402_MODE_POLLED |
+ DAS6402_MODE_SE |
+ DAS6402_MODE_UNI);
- das6402_setcounter(dev);
+ /* default mux for single channel conversion (channel 0) */
+ outw(DAS6402_AI_MUX_HI(0) | DAS6402_AI_MUX_LO(0),
+ dev->iobase + DAS6402_AI_MUX_REG);
- outw_p(SCANL, dev->iobase + 2); /* reset card fifo */
+ /* set both DAC's for unipolar 5V output range */
+ devpriv->ao_range = DAS6402_AO_RANGE(0, 2) | DAS6402_AO_RANGE(1, 2);
+ outb(devpriv->ao_range, dev->iobase + DAS6402_TRIG_REG);
- devpriv->das6402_ignoreirq = 0;
+ /* set both DAC's to 0V */
+ outw(0, dev->iobase + DAS6402_AO_DATA_REG(0));
+ outw(0, dev->iobase + DAS6402_AO_DATA_REG(0));
+ inw(dev->iobase + DAS6402_AO_LSB_REG(0));
- return 0;
+ das6402_enable_counter(dev, false);
+
+ /* set all digital outputs low */
+ outb(0, dev->iobase + DAS6402_DI_DO_REG);
+
+ das6402_clear_all_interrupts(dev);
}
static int das6402_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
+ const struct das6402_boardinfo *board = comedi_board(dev);
struct das6402_private *devpriv;
- unsigned int irq;
- int ret;
struct comedi_subdevice *s;
-
- ret = comedi_request_region(dev, it->options[0], DAS6402_SIZE);
- if (ret)
- return ret;
-
- irq = it->options[0];
- dev_dbg(dev->class_dev, "( irq = %u )\n", irq);
- ret = request_irq(irq, intr_handler, 0, "das6402", dev);
- if (ret < 0)
- return ret;
-
- dev->irq = irq;
+ int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- ret = comedi_alloc_subdevices(dev, 1);
+ ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
- /* ai subdevice */
+ das6402_reset(dev);
+
+ /* IRQs 2,3,5,6,7, 10,11,15 are valid for "enhanced" mode */
+ if ((1 << it->options[1]) & 0x8cec) {
+ ret = request_irq(it->options[1], das6402_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0) {
+ dev->irq = it->options[1];
+
+ switch (dev->irq) {
+ case 10:
+ devpriv->irq = 4;
+ break;
+ case 11:
+ devpriv->irq = 1;
+ break;
+ case 15:
+ devpriv->irq = 6;
+ break;
+ default:
+ devpriv->irq = dev->irq;
+ break;
+ }
+ }
+ }
+
+ ret = comedi_alloc_subdevices(dev, 4);
+ if (ret)
+ return ret;
+
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 8;
- /* s->trig[2]=das6402_ai_mode2; */
- s->cancel = das6402_ai_cancel;
- s->maxdata = (1 << 12) - 1;
- s->len_chanlist = 16; /* ? */
- s->range_table = &range_unknown;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
+ s->n_chan = 64;
+ s->maxdata = board->maxdata;
+ s->range_table = &das6402_ai_ranges;
+ s->insn_read = das6402_ai_insn_read;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmdtest = das6402_ai_cmdtest;
+ s->do_cmd = das6402_ai_cmd;
+ s->cancel = das6402_ai_cancel;
+ }
- board_init(dev);
+ /* Analog Output subdevice */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 2;
+ s->maxdata = board->maxdata;
+ s->range_table = &das6402_ao_ranges;
+ s->insn_write = das6402_ao_insn_write;
+ s->insn_read = das6402_ao_insn_read;
+
+ /* Digital Input subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das6402_di_insn_bits;
+
+ /* Digital Input subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das6402_do_insn_bits;
return 0;
}
@@ -313,9 +536,12 @@ static struct comedi_driver das6402_driver = {
.module = THIS_MODULE,
.attach = das6402_attach,
.detach = comedi_legacy_detach,
+ .board_name = &das6402_boards[0].name,
+ .num_names = ARRAY_SIZE(das6402_boards),
+ .offset = sizeof(struct das6402_boardinfo),
};
module_comedi_driver(das6402_driver)
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_DESCRIPTION("Comedi driver for DAS6402 compatible boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index 5af0a5764a8c..3e408370dcf3 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -420,17 +420,12 @@ static int das800_ai_do_cmd(struct comedi_device *dev,
gain &= 0xf;
outb(gain, dev->iobase + DAS800_GAIN);
- switch (async->cmd.stop_src) {
- case TRIG_COUNT:
+ if (async->cmd.stop_src == TRIG_COUNT) {
devpriv->count = async->cmd.stop_arg * async->cmd.chanlist_len;
devpriv->forever = false;
- break;
- case TRIG_NONE:
+ } else { /* TRIG_NONE */
devpriv->forever = true;
devpriv->count = 0;
- break;
- default:
- break;
}
/* enable auto channel scan, send interrupts on end of conversion
@@ -440,26 +435,19 @@ static int das800_ai_do_cmd(struct comedi_device *dev,
conv_bits |= EACS | IEOC;
if (async->cmd.start_src == TRIG_EXT)
conv_bits |= DTEN;
- switch (async->cmd.convert_src) {
- case TRIG_TIMER:
+ if (async->cmd.convert_src == TRIG_TIMER) {
conv_bits |= CASC | ITE;
/* set conversion frequency */
if (das800_set_frequency(dev) < 0) {
comedi_error(dev, "Error setting up counters");
return -1;
}
- break;
- case TRIG_EXT:
- break;
- default:
- break;
}
spin_lock_irqsave(&dev->spinlock, irq_flags);
das800_ind_write(dev, conv_bits, CONV_CONTROL);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- async->events = 0;
das800_enable(dev);
return 0;
}
@@ -532,10 +520,8 @@ static irqreturn_t das800_interrupt(int irq, void *d)
if (fifo_overflow) {
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- das800_cancel(dev, s);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- comedi_event(dev, s);
- async->events = 0;
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -551,20 +537,21 @@ static irqreturn_t das800_interrupt(int irq, void *d)
das800_disable(dev);
async->events |= COMEDI_CB_EOA;
}
- comedi_event(dev, s);
- async->events = 0;
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
-static int das800_wait_for_conv(struct comedi_device *dev, int timeout)
+static int das800_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- int i;
+ unsigned int status;
- for (i = 0; i < timeout; i++) {
- if (!(inb(dev->iobase + DAS800_STATUS) & BUSY))
- return 0;
- }
- return -ETIME;
+ status = inb(dev->iobase + DAS800_STATUS);
+ if ((status & BUSY) == 0)
+ return 0;
+ return -EBUSY;
}
static int das800_ai_insn_read(struct comedi_device *dev,
@@ -599,7 +586,7 @@ static int das800_ai_insn_read(struct comedi_device *dev,
/* trigger conversion */
outb_p(0, dev->iobase + DAS800_MSB);
- ret = das800_wait_for_conv(dev, 1000);
+ ret = comedi_timeout(dev, s, insn, das800_ai_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index 78a19629ff56..c8a36eb5f015 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -164,16 +164,29 @@ struct dmm32at_private {
};
+static int dmm32at_ai_status(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned char status;
+
+ status = inb(dev->iobase + context);
+ if ((status & DMM32AT_STATUS) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int dmm32at_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- int n, i;
+ int n;
unsigned int d;
- unsigned char status;
unsigned short msb, lsb;
unsigned char chan;
int range;
+ int ret;
/* get the channel and range number */
@@ -190,26 +203,20 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev,
outb(dmm32at_rangebits[range], dev->iobase + DMM32AT_AICONF);
/* wait for circuit to settle */
- for (i = 0; i < 40000; i++) {
- status = inb(dev->iobase + DMM32AT_AIRBACK);
- if ((status & DMM32AT_STATUS) == 0)
- break;
- }
- if (i == 40000)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, dmm32at_ai_status, DMM32AT_AIRBACK);
+ if (ret)
+ return ret;
/* convert n samples */
for (n = 0; n < insn->n; n++) {
/* trigger conversion */
outb(0xff, dev->iobase + DMM32AT_CONV);
+
/* wait for conversion to end */
- for (i = 0; i < 40000; i++) {
- status = inb(dev->iobase + DMM32AT_AISTAT);
- if ((status & DMM32AT_STATUS) == 0)
- break;
- }
- if (i == 40000)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, dmm32at_ai_status,
+ DMM32AT_AISTAT);
+ if (ret)
+ return ret;
/* read data */
lsb = inb(dev->iobase + DMM32AT_AILSB);
@@ -403,8 +410,9 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct dmm32at_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- int i, range;
- unsigned char chanlo, chanhi, status;
+ int range;
+ unsigned char chanlo, chanhi;
+ int ret;
if (!cmd->chanlist)
return -EINVAL;
@@ -439,14 +447,13 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
* isr */
}
- /* wait for circuit to settle */
- for (i = 0; i < 40000; i++) {
- status = inb(dev->iobase + DMM32AT_AIRBACK);
- if ((status & DMM32AT_STATUS) == 0)
- break;
- }
- if (i == 40000)
- return -ETIMEDOUT;
+ /*
+ * wait for circuit to settle
+ * we don't have the 'insn' here but it's not needed
+ */
+ ret = comedi_timeout(dev, s, NULL, dmm32at_ai_status, DMM32AT_AIRBACK);
+ if (ret)
+ return ret;
if (devpriv->ai_scans_left > 1) {
/* start the clock and enable the interrupts */
@@ -525,6 +532,19 @@ static irqreturn_t dmm32at_isr(int irq, void *d)
return IRQ_HANDLED;
}
+static int dmm32at_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned char status;
+
+ status = inb(dev->iobase + DMM32AT_DACSTAT);
+ if ((status & DMM32AT_DACBUSY) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int dmm32at_ao_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -533,6 +553,7 @@ static int dmm32at_ao_winsn(struct comedi_device *dev,
int i;
int chan = CR_CHAN(insn->chanspec);
unsigned char hi, lo, status;
+ int ret;
/* Writing a list of values to an AO channel is probably not
* very useful, but that's how the interface is defined. */
@@ -549,13 +570,9 @@ static int dmm32at_ao_winsn(struct comedi_device *dev,
outb(hi, dev->iobase + DMM32AT_DACMSB);
/* wait for circuit to settle */
- for (i = 0; i < 40000; i++) {
- status = inb(dev->iobase + DMM32AT_DACSTAT);
- if ((status & DMM32AT_DACBUSY) == 0)
- break;
- }
- if (i == 40000)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, dmm32at_ao_eoc, 0);
+ if (ret)
+ return ret;
/* dummy read to update trigger the output */
status = inb(dev->iobase + DMM32AT_DACMSB);
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 4271903facd7..ba7c2ba618e6 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -224,23 +224,32 @@ static const struct comedi_lrange *dac_range_types[] = {
&range_unipolar5
};
-#define DT2811_TIMEOUT 5
+static int dt2811_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + DT2811_ADCSR);
+ if ((status & DT2811_ADBUSY) == 0)
+ return 0;
+ return -EBUSY;
+}
static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int chan = CR_CHAN(insn->chanspec);
- int timeout = DT2811_TIMEOUT;
+ int ret;
int i;
for (i = 0; i < insn->n; i++) {
outb(chan, dev->iobase + DT2811_ADGCR);
- while (timeout
- && inb(dev->iobase + DT2811_ADCSR) & DT2811_ADBUSY)
- timeout--;
- if (!timeout)
- return -ETIME;
+ ret = comedi_timeout(dev, s, insn, dt2811_ai_eoc, 0);
+ if (ret)
+ return ret;
data[i] = inb(dev->iobase + DT2811_ADDATLO);
data[i] |= inb(dev->iobase + DT2811_ADDATHI) << 8;
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index abad6e49c1c1..3794b7e52091 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -66,26 +66,35 @@ struct dt2814_private {
#define DT2814_TIMEOUT 10
#define DT2814_MAX_SPEED 100000 /* Arbitrary 10 khz limit */
+static int dt2814_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + DT2814_CSR);
+ if (status & DT2814_FINISH)
+ return 0;
+ return -EBUSY;
+}
+
static int dt2814_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- int n, i, hi, lo;
+ int n, hi, lo;
int chan;
- int status = 0;
+ int ret;
for (n = 0; n < insn->n; n++) {
chan = CR_CHAN(insn->chanspec);
outb(chan, dev->iobase + DT2814_CSR);
- for (i = 0; i < DT2814_TIMEOUT; i++) {
- status = inb(dev->iobase + DT2814_CSR);
- udelay(10);
- if (status & DT2814_FINISH)
- break;
- }
- if (i >= DT2814_TIMEOUT)
- return -ETIMEDOUT;
+
+ ret = comedi_timeout(dev, s, insn, dt2814_ai_eoc, 0);
+ if (ret)
+ return ret;
hi = inb(dev->iobase + DT2814_DATA);
lo = inb(dev->iobase + DT2814_DATA);
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
index ee24717821e1..b9ac4ed8babb 100644
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ b/drivers/staging/comedi/drivers/dt2815.c
@@ -67,15 +67,17 @@ struct dt2815_private {
unsigned int ao_readback[8];
};
-static int dt2815_wait_for_status(struct comedi_device *dev, int status)
+static int dt2815_ao_status(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- int i;
+ unsigned int status;
- for (i = 0; i < 100; i++) {
- if (inb(dev->iobase + DT2815_STATUS) == status)
- break;
- }
- return status;
+ status = inb(dev->iobase + DT2815_STATUS);
+ if (status == context)
+ return 0;
+ return -EBUSY;
}
static int dt2815_ao_insn_read(struct comedi_device *dev,
@@ -98,30 +100,23 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct dt2815_private *devpriv = dev->private;
int i;
int chan = CR_CHAN(insn->chanspec);
- unsigned int status;
unsigned int lo, hi;
+ int ret;
for (i = 0; i < insn->n; i++) {
lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01;
hi = (data[i] & 0xff0) >> 4;
- status = dt2815_wait_for_status(dev, 0x00);
- if (status != 0) {
- dev_dbg(dev->class_dev,
- "failed to write low byte on %d reason %x\n",
- chan, status);
- return -EBUSY;
- }
+ ret = comedi_timeout(dev, s, insn, dt2815_ao_status, 0x00);
+ if (ret)
+ return ret;
outb(lo, dev->iobase + DT2815_DATA);
- status = dt2815_wait_for_status(dev, 0x10);
- if (status != 0x10) {
- dev_dbg(dev->class_dev,
- "failed to write high byte on %d reason %x\n",
- chan, status);
- return -EBUSY;
- }
+ ret = comedi_timeout(dev, s, insn, dt2815_ao_status, 0x10);
+ if (ret)
+ return ret;
+
devpriv->ao_readback[chan] = data[i];
}
return i;
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 895f73a19023..16cc100531e5 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -63,7 +63,6 @@ Notes:
#include "comedi_fc.h"
-#define DT2821_TIMEOUT 100 /* 500 us */
#define DT2821_SIZE 0x10
/*
@@ -248,27 +247,6 @@ struct dt282x_private {
* Some useless abstractions
*/
#define chan_to_DAC(a) ((a)&1)
-#define mux_busy() (inw(dev->iobase+DT2821_ADCSR)&DT2821_MUXBUSY)
-#define ad_done() (inw(dev->iobase+DT2821_ADCSR)&DT2821_ADDONE)
-
-/*
- * danger! macro abuse... a is the expression to wait on, and b is
- * the statement(s) to execute if it doesn't happen.
- */
-#define wait_for(a, b) \
- do { \
- int _i; \
- for (_i = 0; _i < DT2821_TIMEOUT; _i++) { \
- if (a) { \
- _i = 0; \
- break; \
- } \
- udelay(5); \
- } \
- if (_i) { \
- b \
- } \
- } while (0)
static int prep_ai_dma(struct comedi_device *dev, int chan, int size);
static int prep_ao_dma(struct comedi_device *dev, int chan, int size);
@@ -328,7 +306,6 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev)
size = cfc_read_array_from_buffer(s, ptr, devpriv->dma_maxsize);
if (size == 0) {
dev_err(dev->class_dev, "AO underrun\n");
- dt282x_ao_cancel(dev, s);
s->async->events |= COMEDI_CB_OVERFLOW;
return;
}
@@ -363,7 +340,7 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev)
dt282x_munge(dev, ptr, size);
ret = cfc_write_array_to_buffer(s, ptr, size);
if (ret != size) {
- dt282x_ai_cancel(dev, s);
+ s->async->events |= COMEDI_CB_OVERFLOW;
return;
}
devpriv->nread -= size / 2;
@@ -373,7 +350,6 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev)
devpriv->nread = 0;
}
if (!devpriv->nread) {
- dt282x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
return;
}
@@ -471,15 +447,13 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
if (adcsr & DT2821_ADERR) {
if (devpriv->nread != 0) {
comedi_error(dev, "A/D error");
- dt282x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_ERROR;
}
handled = 1;
}
if (dacsr & DT2821_DAERR) {
comedi_error(dev, "D/A error");
- dt282x_ao_cancel(dev, s_ao);
- s->async->events |= COMEDI_CB_ERROR;
+ s_ao->async->events |= COMEDI_CB_ERROR;
handled = 1;
}
#if 0
@@ -508,7 +482,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
handled = 1;
}
#endif
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
+ cfc_handle_events(dev, s_ao);
return IRQ_RETVAL(handled);
}
@@ -530,6 +505,29 @@ static void dt282x_load_changain(struct comedi_device *dev, int n,
outw(n - 1, dev->iobase + DT2821_CHANCSR);
}
+static int dt282x_ai_timeout(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + DT2821_ADCSR);
+ switch (context) {
+ case DT2821_MUXBUSY:
+ if ((status & DT2821_MUXBUSY) == 0)
+ return 0;
+ break;
+ case DT2821_ADDONE:
+ if (status & DT2821_ADDONE)
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return -EBUSY;
+}
+
/*
* Performs a single A/D conversion.
* - Put channel/gain into channel-gain list
@@ -542,6 +540,7 @@ static int dt282x_ai_insn_read(struct comedi_device *dev,
{
const struct dt282x_board *board = comedi_board(dev);
struct dt282x_private *devpriv = dev->private;
+ int ret;
int i;
/* XXX should we really be enabling the ad clock here? */
@@ -551,13 +550,18 @@ static int dt282x_ai_insn_read(struct comedi_device *dev,
dt282x_load_changain(dev, 1, &insn->chanspec);
outw(devpriv->supcsr | DT2821_PRLD, dev->iobase + DT2821_SUPCSR);
- wait_for(!mux_busy(), comedi_error(dev, "timeout\n"); return -ETIME;);
+ ret = comedi_timeout(dev, s, insn, dt282x_ai_timeout, DT2821_MUXBUSY);
+ if (ret)
+ return ret;
for (i = 0; i < insn->n; i++) {
outw(devpriv->supcsr | DT2821_STRIG,
dev->iobase + DT2821_SUPCSR);
- wait_for(ad_done(), comedi_error(dev, "timeout\n");
- return -ETIME;);
+
+ ret = comedi_timeout(dev, s, insn, dt282x_ai_timeout,
+ DT2821_ADDONE);
+ if (ret)
+ return ret;
data[i] =
inw(dev->iobase +
@@ -646,6 +650,7 @@ static int dt282x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
struct dt282x_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
int timer;
+ int ret;
if (devpriv->usedma == 0) {
comedi_error(dev,
@@ -691,7 +696,9 @@ static int dt282x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
outw(devpriv->adcsr, dev->iobase + DT2821_ADCSR);
outw(devpriv->supcsr | DT2821_PRLD, dev->iobase + DT2821_SUPCSR);
- wait_for(!mux_busy(), comedi_error(dev, "timeout\n"); return -ETIME;);
+ ret = comedi_timeout(dev, s, NULL, dt282x_ai_timeout, DT2821_MUXBUSY);
+ if (ret)
+ return ret;
if (cmd->scan_begin_src == TRIG_FOLLOW) {
outw(devpriv->supcsr | DT2821_STRIG,
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index f52a4476cb73..436e451cadf5 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -369,12 +369,10 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
debug_n_ints++;
- if (debug_n_ints >= 10) {
- dt3k_ai_cancel(dev, s);
+ if (debug_n_ints >= 10)
s->async->events |= COMEDI_CB_EOA;
- }
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -767,8 +765,6 @@ static int dt3000_auto_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_PROC;
#endif
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index f224825830ba..e5593f8c7406 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -57,18 +57,27 @@ struct dyna_pci10xx_private {
unsigned long BADR3;
};
-/******************************************************************************/
-/************************** READ WRITE FUNCTIONS ******************************/
-/******************************************************************************/
+static int dyna_pci10xx_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw_p(dev->iobase);
+ if (status & (1 << 15))
+ return 0;
+ return -EBUSY;
+}
-/* analog input callback */
static int dyna_pci10xx_insn_read_ai(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct dyna_pci10xx_private *devpriv = dev->private;
- int n, counter;
+ int n;
u16 d = 0;
+ int ret = 0;
unsigned int chan, range;
/* get the channel number and range */
@@ -82,18 +91,13 @@ static int dyna_pci10xx_insn_read_ai(struct comedi_device *dev,
smp_mb();
outw_p(0x0000 + range + chan, dev->iobase + 2);
udelay(10);
+
+ ret = comedi_timeout(dev, s, insn, dyna_pci10xx_ai_eoc, 0);
+ if (ret)
+ break;
+
/* read data */
- for (counter = 0; counter < READ_TIMEOUT; counter++) {
- d = inw_p(dev->iobase);
-
- /* check if read is successful if the EOC bit is set */
- if (d & (1 << 15))
- goto conv_finish;
- }
- data[n] = 0;
- dev_dbg(dev->class_dev, "timeout reading analog input\n");
- continue;
-conv_finish:
+ d = inw_p(dev->iobase);
/* mask the first 4 bits - EOC bits */
d &= 0x0FFF;
data[n] = d;
@@ -101,7 +105,7 @@ conv_finish:
mutex_unlock(&devpriv->mutex);
/* return the number of samples read/written */
- return n;
+ return ret ? ret : n;
}
/* analog output callback */
@@ -232,8 +236,6 @@ static int dyna_pci10xx_auto_attach(struct comedi_device *dev,
s->state = 0;
s->insn_bits = dyna_pci10xx_do_insn_bits;
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index a99ddf00ddc4..4e410f3b0e24 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -1,27 +1,49 @@
/*
- comedi/drivers/fl512.c
- Anders Gnistrup <ex18@kalman.iau.dtu.dk>
-*/
+ * fl512.c
+ * Anders Gnistrup <ex18@kalman.iau.dtu.dk>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
-Driver: fl512
-Description: unknown
-Author: Anders Gnistrup <ex18@kalman.iau.dtu.dk>
-Devices: [unknown] FL512 (fl512)
-Status: unknown
-
-Digital I/O is not supported.
-
-Configuration options:
- [0] - I/O port base address
-*/
+ * Driver: fl512
+ * Description: unknown
+ * Author: Anders Gnistrup <ex18@kalman.iau.dtu.dk>
+ * Devices: [unknown] FL512 (fl512)
+ * Status: unknown
+ *
+ * Digital I/O is not supported.
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ */
#include <linux/module.h>
#include "../comedidev.h"
#include <linux/delay.h>
-#define FL512_SIZE 16 /* the size of the used memory */
+/*
+ * Register I/O map
+ */
+#define FL512_AI_LSB_REG 0x02
+#define FL512_AI_MSB_REG 0x03
+#define FL512_AI_MUX_REG 0x02
+#define FL512_AI_START_CONV_REG 0x03
+#define FL512_AO_DATA_REG(x) (0x04 + ((x) * 2))
+#define FL512_AO_TRIG_REG(x) (0x04 + ((x) * 2))
+
struct fl512_private {
unsigned short ao_readback[2];
};
@@ -38,72 +60,69 @@ static const struct comedi_lrange range_fl512 = {
}
};
-/*
- * fl512_ai_insn : this is the analog input function
- */
-static int fl512_ai_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
+static int fl512_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int n;
- unsigned int lo_byte, hi_byte;
- char chan = CR_CHAN(insn->chanspec);
- unsigned long iobase = dev->iobase;
-
- for (n = 0; n < insn->n; n++) { /* sample n times on selected channel */
- /* XXX probably can move next step out of for() loop -- will
- * make AI a little bit faster. */
- outb(chan, iobase + 2); /* select chan */
- outb(0, iobase + 3); /* start conversion */
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
+ int i;
+
+ outb(chan, dev->iobase + FL512_AI_MUX_REG);
+
+ for (i = 0; i < insn->n; i++) {
+ outb(0, dev->iobase + FL512_AI_START_CONV_REG);
+
/* XXX should test "done" flag instead of delay */
- udelay(30); /* sleep 30 usec */
- lo_byte = inb(iobase + 2); /* low 8 byte */
- hi_byte = inb(iobase + 3) & 0xf; /* high 4 bit and mask */
- data[n] = lo_byte + (hi_byte << 8);
+ udelay(30);
+
+ val = inb(dev->iobase + FL512_AI_LSB_REG);
+ val |= (inb(dev->iobase + FL512_AI_MSB_REG) << 8);
+ val &= s->maxdata;
+
+ data[i] = val;
}
- return n;
+
+ return insn->n;
}
-/*
- * fl512_ao_insn : used to write to a DA port n times
- */
-static int fl512_ao_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
+static int fl512_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct fl512_private *devpriv = dev->private;
- int n;
- int chan = CR_CHAN(insn->chanspec); /* get chan to write */
- unsigned long iobase = dev->iobase; /* get base address */
-
- for (n = 0; n < insn->n; n++) { /* write n data set */
- /* write low byte */
- outb(data[n] & 0x0ff, iobase + 4 + 2 * chan);
- /* write high byte */
- outb((data[n] & 0xf00) >> 8, iobase + 4 + 2 * chan);
- inb(iobase + 4 + 2 * chan); /* trig */
-
- devpriv->ao_readback[chan] = data[n];
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val = devpriv->ao_readback[chan];
+ int i;
+
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+
+ /* write LSB, MSB then trigger conversion */
+ outb(val & 0x0ff, dev->iobase + FL512_AO_DATA_REG(chan));
+ outb((val >> 8) & 0xf, dev->iobase + FL512_AO_DATA_REG(chan));
+ inb(dev->iobase + FL512_AO_TRIG_REG(chan));
}
- return n;
+ devpriv->ao_readback[chan] = val;
+
+ return insn->n;
}
-/*
- * fl512_ao_insn_readback : used to read previous values written to
- * DA port
- */
-static int fl512_ao_insn_readback(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int fl512_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct fl512_private *devpriv = dev->private;
- int n;
- int chan = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
- for (n = 0; n < insn->n; n++)
- data[n] = devpriv->ao_readback[chan];
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
- return n;
+ return insn->n;
}
static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
@@ -112,7 +131,7 @@ static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
struct comedi_subdevice *s;
int ret;
- ret = comedi_request_region(dev, it->options[0], FL512_SIZE);
+ ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
@@ -124,42 +143,26 @@ static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- /*
- * this if the definitions of the supdevices, 2 have been defined
- */
- /* Analog indput */
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- /* define subdevice as Analog In */
- s->type = COMEDI_SUBD_AI;
- /* you can read it from userspace */
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- /* Number of Analog input channels */
- s->n_chan = 16;
- /* accept only 12 bits of data */
- s->maxdata = 0x0fff;
- /* device use one of the ranges */
- s->range_table = &range_fl512;
- /* function to call when read AD */
- s->insn_read = fl512_ai_insn;
-
- /* Analog output */
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
+ s->n_chan = 16;
+ s->maxdata = 0x0fff;
+ s->range_table = &range_fl512;
+ s->insn_read = fl512_ai_insn_read;
+
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- /* define subdevice as Analog OUT */
- s->type = COMEDI_SUBD_AO;
- /* you can write it from userspace */
- s->subdev_flags = SDF_WRITABLE;
- /* Number of Analog output channels */
- s->n_chan = 2;
- /* accept only 12 bits of data */
- s->maxdata = 0x0fff;
- /* device use one of the ranges */
- s->range_table = &range_fl512;
- /* function to call when write DA */
- s->insn_write = fl512_ao_insn;
- /* function to call when reading DA */
- s->insn_read = fl512_ao_insn_readback;
-
- return 1;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 2;
+ s->maxdata = 0x0fff;
+ s->range_table = &range_fl512;
+ s->insn_write = fl512_ao_insn_write;
+ s->insn_read = fl512_ao_insn_read;
+
+ return 0;
}
static struct comedi_driver fl512_driver = {
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index de60a2871d70..08d7655e24e7 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -1,24 +1,24 @@
/*
- comedi/drivers/gsc_hpdi.c
- This is a driver for the General Standards Corporation High
- Speed Parallel Digital Interface rs485 boards.
-
- Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- Copyright (C) 2003 Coherent Imaging Systems
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * gsc_hpdi.c
+ * Comedi driver the General Standards Corporation
+ * High Speed Parallel Digital Interface rs485 boards.
+ *
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Copyright (C) 2003 Coherent Imaging Systems
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Driver: gsc_hpdi
@@ -40,8 +40,6 @@
* support could be added to this driver.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -52,149 +50,105 @@
#include "plx9080.h"
#include "comedi_fc.h"
-static void abort_dma(struct comedi_device *dev, unsigned int channel);
-static int hpdi_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
-static int hpdi_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd);
-static int hpdi_cancel(struct comedi_device *dev, struct comedi_subdevice *s);
-static irqreturn_t handle_interrupt(int irq, void *d);
-static int dio_config_block_size(struct comedi_device *dev, unsigned int *data);
-
-#define TIMER_BASE 50 /* 20MHz master clock */
-#define DMA_BUFFER_SIZE 0x10000
-#define NUM_DMA_BUFFERS 4
-#define NUM_DMA_DESCRIPTORS 256
-
-enum hpdi_registers {
- FIRMWARE_REV_REG = 0x0,
- BOARD_CONTROL_REG = 0x4,
- BOARD_STATUS_REG = 0x8,
- TX_PROG_ALMOST_REG = 0xc,
- RX_PROG_ALMOST_REG = 0x10,
- FEATURES_REG = 0x14,
- FIFO_REG = 0x18,
- TX_STATUS_COUNT_REG = 0x1c,
- TX_LINE_VALID_COUNT_REG = 0x20,
- TX_LINE_INVALID_COUNT_REG = 0x24,
- RX_STATUS_COUNT_REG = 0x28,
- RX_LINE_COUNT_REG = 0x2c,
- INTERRUPT_CONTROL_REG = 0x30,
- INTERRUPT_STATUS_REG = 0x34,
- TX_CLOCK_DIVIDER_REG = 0x38,
- TX_FIFO_SIZE_REG = 0x40,
- RX_FIFO_SIZE_REG = 0x44,
- TX_FIFO_WORDS_REG = 0x48,
- RX_FIFO_WORDS_REG = 0x4c,
- INTERRUPT_EDGE_LEVEL_REG = 0x50,
- INTERRUPT_POLARITY_REG = 0x54,
-};
-
-/* bit definitions */
-
-enum firmware_revision_bits {
- FEATURES_REG_PRESENT_BIT = 0x8000,
-};
-
-enum board_control_bits {
- BOARD_RESET_BIT = 0x1, /* wait 10usec before accessing fifos */
- TX_FIFO_RESET_BIT = 0x2,
- RX_FIFO_RESET_BIT = 0x4,
- TX_ENABLE_BIT = 0x10,
- RX_ENABLE_BIT = 0x20,
- DEMAND_DMA_DIRECTION_TX_BIT = 0x40,
- /* for ch 0, ch 1 can only transmit (when present) */
- LINE_VALID_ON_STATUS_VALID_BIT = 0x80,
- START_TX_BIT = 0x10,
- CABLE_THROTTLE_ENABLE_BIT = 0x20,
- TEST_MODE_ENABLE_BIT = 0x80000000,
-};
-
-enum board_status_bits {
- COMMAND_LINE_STATUS_MASK = 0x7f,
- TX_IN_PROGRESS_BIT = 0x80,
- TX_NOT_EMPTY_BIT = 0x100,
- TX_NOT_ALMOST_EMPTY_BIT = 0x200,
- TX_NOT_ALMOST_FULL_BIT = 0x400,
- TX_NOT_FULL_BIT = 0x800,
- RX_NOT_EMPTY_BIT = 0x1000,
- RX_NOT_ALMOST_EMPTY_BIT = 0x2000,
- RX_NOT_ALMOST_FULL_BIT = 0x4000,
- RX_NOT_FULL_BIT = 0x8000,
- BOARD_JUMPER0_INSTALLED_BIT = 0x10000,
- BOARD_JUMPER1_INSTALLED_BIT = 0x20000,
- TX_OVERRUN_BIT = 0x200000,
- RX_UNDERRUN_BIT = 0x400000,
- RX_OVERRUN_BIT = 0x800000,
-};
-
-static uint32_t almost_full_bits(unsigned int num_words)
-{
- /* XXX need to add or subtract one? */
- return (num_words << 16) & 0xff0000;
-}
-
-static uint32_t almost_empty_bits(unsigned int num_words)
-{
- return num_words & 0xffff;
-}
-
-enum features_bits {
- FIFO_SIZE_PRESENT_BIT = 0x1,
- FIFO_WORDS_PRESENT_BIT = 0x2,
- LEVEL_EDGE_INTERRUPTS_PRESENT_BIT = 0x4,
- GPIO_SUPPORTED_BIT = 0x8,
- PLX_DMA_CH1_SUPPORTED_BIT = 0x10,
- OVERRUN_UNDERRUN_SUPPORTED_BIT = 0x20,
-};
-
-enum interrupt_sources {
- FRAME_VALID_START_INTR = 0,
- FRAME_VALID_END_INTR = 1,
- TX_FIFO_EMPTY_INTR = 8,
- TX_FIFO_ALMOST_EMPTY_INTR = 9,
- TX_FIFO_ALMOST_FULL_INTR = 10,
- TX_FIFO_FULL_INTR = 11,
- RX_EMPTY_INTR = 12,
- RX_ALMOST_EMPTY_INTR = 13,
- RX_ALMOST_FULL_INTR = 14,
- RX_FULL_INTR = 15,
-};
-
-static uint32_t intr_bit(int interrupt_source)
-{
- return 0x1 << interrupt_source;
-}
-
-static unsigned int fifo_size(uint32_t fifo_size_bits)
-{
- return fifo_size_bits & 0xfffff;
-}
+/*
+ * PCI BAR2 Register map (devpriv->mmio)
+ */
+#define FIRMWARE_REV_REG 0x00
+#define FEATURES_REG_PRESENT_BIT (1 << 15)
+#define BOARD_CONTROL_REG 0x04
+#define BOARD_RESET_BIT (1 << 0)
+#define TX_FIFO_RESET_BIT (1 << 1)
+#define RX_FIFO_RESET_BIT (1 << 2)
+#define TX_ENABLE_BIT (1 << 4)
+#define RX_ENABLE_BIT (1 << 5)
+#define DEMAND_DMA_DIRECTION_TX_BIT (1 << 6) /* ch 0 only */
+#define LINE_VALID_ON_STATUS_VALID_BIT (1 << 7)
+#define START_TX_BIT (1 << 8)
+#define CABLE_THROTTLE_ENABLE_BIT (1 << 9)
+#define TEST_MODE_ENABLE_BIT (1 << 31)
+#define BOARD_STATUS_REG 0x08
+#define COMMAND_LINE_STATUS_MASK (0x7f << 0)
+#define TX_IN_PROGRESS_BIT (1 << 7)
+#define TX_NOT_EMPTY_BIT (1 << 8)
+#define TX_NOT_ALMOST_EMPTY_BIT (1 << 9)
+#define TX_NOT_ALMOST_FULL_BIT (1 << 10)
+#define TX_NOT_FULL_BIT (1 << 11)
+#define RX_NOT_EMPTY_BIT (1 << 12)
+#define RX_NOT_ALMOST_EMPTY_BIT (1 << 13)
+#define RX_NOT_ALMOST_FULL_BIT (1 << 14)
+#define RX_NOT_FULL_BIT (1 << 15)
+#define BOARD_JUMPER0_INSTALLED_BIT (1 << 16)
+#define BOARD_JUMPER1_INSTALLED_BIT (1 << 17)
+#define TX_OVERRUN_BIT (1 << 21)
+#define RX_UNDERRUN_BIT (1 << 22)
+#define RX_OVERRUN_BIT (1 << 23)
+#define TX_PROG_ALMOST_REG 0x0c
+#define RX_PROG_ALMOST_REG 0x10
+#define ALMOST_EMPTY_BITS(x) (((x) & 0xffff) << 0)
+#define ALMOST_FULL_BITS(x) (((x) & 0xff) << 16)
+#define FEATURES_REG 0x14
+#define FIFO_SIZE_PRESENT_BIT (1 << 0)
+#define FIFO_WORDS_PRESENT_BIT (1 << 1)
+#define LEVEL_EDGE_INTERRUPTS_PRESENT_BIT (1 << 2)
+#define GPIO_SUPPORTED_BIT (1 << 3)
+#define PLX_DMA_CH1_SUPPORTED_BIT (1 << 4)
+#define OVERRUN_UNDERRUN_SUPPORTED_BIT (1 << 5)
+#define FIFO_REG 0x18
+#define TX_STATUS_COUNT_REG 0x1c
+#define TX_LINE_VALID_COUNT_REG 0x20,
+#define TX_LINE_INVALID_COUNT_REG 0x24
+#define RX_STATUS_COUNT_REG 0x28
+#define RX_LINE_COUNT_REG 0x2c
+#define INTERRUPT_CONTROL_REG 0x30
+#define FRAME_VALID_START_INTR (1 << 0)
+#define FRAME_VALID_END_INTR (1 << 1)
+#define TX_FIFO_EMPTY_INTR (1 << 8)
+#define TX_FIFO_ALMOST_EMPTY_INTR (1 << 9)
+#define TX_FIFO_ALMOST_FULL_INTR (1 << 10)
+#define TX_FIFO_FULL_INTR (1 << 11)
+#define RX_EMPTY_INTR (1 << 12)
+#define RX_ALMOST_EMPTY_INTR (1 << 13)
+#define RX_ALMOST_FULL_INTR (1 << 14)
+#define RX_FULL_INTR (1 << 15)
+#define INTERRUPT_STATUS_REG 0x34
+#define TX_CLOCK_DIVIDER_REG 0x38
+#define TX_FIFO_SIZE_REG 0x40
+#define RX_FIFO_SIZE_REG 0x44
+#define FIFO_SIZE_MASK (0xfffff << 0)
+#define TX_FIFO_WORDS_REG 0x48
+#define RX_FIFO_WORDS_REG 0x4c
+#define INTERRUPT_EDGE_LEVEL_REG 0x50
+#define INTERRUPT_POLARITY_REG 0x54
+
+#define TIMER_BASE 50 /* 20MHz master clock */
+#define DMA_BUFFER_SIZE 0x10000
+#define NUM_DMA_BUFFERS 4
+#define NUM_DMA_DESCRIPTORS 256
struct hpdi_board {
- const char *name; /* board name */
- int device_id; /* pci device id */
- int subdevice_id; /* pci subdevice id */
+ const char *name;
+ int device_id;
+ int subdevice_id;
};
static const struct hpdi_board hpdi_boards[] = {
{
- .name = "pci-hpdi32",
- .device_id = PCI_DEVICE_ID_PLX_9080,
- .subdevice_id = 0x2400,
+ .name = "pci-hpdi32",
+ .device_id = PCI_DEVICE_ID_PLX_9080,
+ .subdevice_id = 0x2400,
},
#if 0
{
- .name = "pxi-hpdi32",
- .device_id = 0x9656,
- .subdevice_id = 0x2705,
+ .name = "pxi-hpdi32",
+ .device_id = 0x9656,
+ .subdevice_id = 0x2705,
},
#endif
};
struct hpdi_private {
- /* base addresses (ioremapped) */
- void __iomem *plx9080_iobase;
- void __iomem *hpdi_iobase;
+ void __iomem *plx9080_mmio;
+ void __iomem *mmio;
uint32_t *dio_buffer[NUM_DMA_BUFFERS]; /* dma buffers */
/* physical addresses of dma buffers */
dma_addr_t dio_buffer_phys_addr[NUM_DMA_BUFFERS];
@@ -207,26 +161,334 @@ struct hpdi_private {
/* pointer to start of buffers indexed by descriptor */
uint32_t *desc_dio_buffer[NUM_DMA_DESCRIPTORS];
/* index of the dma descriptor that is currently being used */
- volatile unsigned int dma_desc_index;
+ unsigned int dma_desc_index;
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
- volatile unsigned long dio_count;
- /* software copies of values written to hpdi registers */
- volatile uint32_t bits[24];
+ unsigned long dio_count;
/* number of bytes at which to generate COMEDI_CB_BLOCK events */
- volatile unsigned int block_size;
+ unsigned int block_size;
};
-static int dio_config_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static void gsc_hpdi_drain_dma(struct comedi_device *dev, unsigned int channel)
+{
+ struct hpdi_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int idx;
+ unsigned int start;
+ unsigned int desc;
+ unsigned int size;
+ unsigned int next;
+
+ if (channel)
+ next = readl(devpriv->plx9080_mmio + PLX_DMA1_PCI_ADDRESS_REG);
+ else
+ next = readl(devpriv->plx9080_mmio + PLX_DMA0_PCI_ADDRESS_REG);
+
+ idx = devpriv->dma_desc_index;
+ start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr);
+ /* loop until we have read all the full buffers */
+ for (desc = 0; (next < start || next >= start + devpriv->block_size) &&
+ desc < devpriv->num_dma_descriptors; desc++) {
+ /* transfer data from dma buffer to comedi buffer */
+ size = devpriv->block_size / sizeof(uint32_t);
+ if (cmd->stop_src == TRIG_COUNT) {
+ if (size > devpriv->dio_count)
+ size = devpriv->dio_count;
+ devpriv->dio_count -= size;
+ }
+ cfc_write_array_to_buffer(s, devpriv->desc_dio_buffer[idx],
+ size * sizeof(uint32_t));
+ idx++;
+ idx %= devpriv->num_dma_descriptors;
+ start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr);
+
+ devpriv->dma_desc_index = idx;
+ }
+ /* XXX check for buffer overrun somehow */
+}
+
+static irqreturn_t gsc_hpdi_interrupt(int irq, void *d)
+{
+ struct comedi_device *dev = d;
+ struct hpdi_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ uint32_t hpdi_intr_status, hpdi_board_status;
+ uint32_t plx_status;
+ uint32_t plx_bits;
+ uint8_t dma0_status, dma1_status;
+ unsigned long flags;
+
+ if (!dev->attached)
+ return IRQ_NONE;
+
+ plx_status = readl(devpriv->plx9080_mmio + PLX_INTRCS_REG);
+ if ((plx_status & (ICS_DMA0_A | ICS_DMA1_A | ICS_LIA)) == 0)
+ return IRQ_NONE;
+
+ hpdi_intr_status = readl(devpriv->mmio + INTERRUPT_STATUS_REG);
+ hpdi_board_status = readl(devpriv->mmio + BOARD_STATUS_REG);
+
+ if (hpdi_intr_status)
+ writel(hpdi_intr_status, devpriv->mmio + INTERRUPT_STATUS_REG);
+
+ /* spin lock makes sure no one else changes plx dma control reg */
+ spin_lock_irqsave(&dev->spinlock, flags);
+ dma0_status = readb(devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
+ if (plx_status & ICS_DMA0_A) { /* dma chan 0 interrupt */
+ writeb((dma0_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
+ devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
+
+ if (dma0_status & PLX_DMA_EN_BIT)
+ gsc_hpdi_drain_dma(dev, 0);
+ }
+ spin_unlock_irqrestore(&dev->spinlock, flags);
+
+ /* spin lock makes sure no one else changes plx dma control reg */
+ spin_lock_irqsave(&dev->spinlock, flags);
+ dma1_status = readb(devpriv->plx9080_mmio + PLX_DMA1_CS_REG);
+ if (plx_status & ICS_DMA1_A) { /* XXX *//* dma chan 1 interrupt */
+ writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
+ devpriv->plx9080_mmio + PLX_DMA1_CS_REG);
+ }
+ spin_unlock_irqrestore(&dev->spinlock, flags);
+
+ /* clear possible plx9080 interrupt sources */
+ if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */
+ plx_bits = readl(devpriv->plx9080_mmio + PLX_DBR_OUT_REG);
+ writel(plx_bits, devpriv->plx9080_mmio + PLX_DBR_OUT_REG);
+ }
+
+ if (hpdi_board_status & RX_OVERRUN_BIT) {
+ dev_err(dev->class_dev, "rx fifo overrun\n");
+ async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ }
+
+ if (hpdi_board_status & RX_UNDERRUN_BIT) {
+ dev_err(dev->class_dev, "rx fifo underrun\n");
+ async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ }
+
+ if (devpriv->dio_count == 0)
+ async->events |= COMEDI_CB_EOA;
+
+ cfc_handle_events(dev, s);
+
+ return IRQ_HANDLED;
+}
+
+static void gsc_hpdi_abort_dma(struct comedi_device *dev, unsigned int channel)
+{
+ struct hpdi_private *devpriv = dev->private;
+ unsigned long flags;
+
+ /* spinlock for plx dma control/status reg */
+ spin_lock_irqsave(&dev->spinlock, flags);
+
+ plx9080_abort_dma(devpriv->plx9080_mmio, channel);
+
+ spin_unlock_irqrestore(&dev->spinlock, flags);
+}
+
+static int gsc_hpdi_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct hpdi_private *devpriv = dev->private;
+
+ writel(0, devpriv->mmio + BOARD_CONTROL_REG);
+ writel(0, devpriv->mmio + INTERRUPT_CONTROL_REG);
+
+ gsc_hpdi_abort_dma(dev, 0);
+
+ return 0;
+}
+
+static int gsc_hpdi_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct hpdi_private *devpriv = dev->private;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ unsigned long flags;
+ uint32_t bits;
+
+ if (s->io_bits)
+ return -EINVAL;
+
+ writel(RX_FIFO_RESET_BIT, devpriv->mmio + BOARD_CONTROL_REG);
+
+ gsc_hpdi_abort_dma(dev, 0);
+
+ devpriv->dma_desc_index = 0;
+
+ /*
+ * These register are supposedly unused during chained dma,
+ * but I have found that left over values from last operation
+ * occasionally cause problems with transfer of first dma
+ * block. Initializing them to zero seems to fix the problem.
+ */
+ writel(0, devpriv->plx9080_mmio + PLX_DMA0_TRANSFER_SIZE_REG);
+ writel(0, devpriv->plx9080_mmio + PLX_DMA0_PCI_ADDRESS_REG);
+ writel(0, devpriv->plx9080_mmio + PLX_DMA0_LOCAL_ADDRESS_REG);
+
+ /* give location of first dma descriptor */
+ bits = devpriv->dma_desc_phys_addr | PLX_DESC_IN_PCI_BIT |
+ PLX_INTR_TERM_COUNT | PLX_XFER_LOCAL_TO_PCI;
+ writel(bits, devpriv->plx9080_mmio + PLX_DMA0_DESCRIPTOR_REG);
+
+ /* enable dma transfer */
+ spin_lock_irqsave(&dev->spinlock, flags);
+ writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT | PLX_CLEAR_DMA_INTR_BIT,
+ devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
+
+ if (cmd->stop_src == TRIG_COUNT)
+ devpriv->dio_count = cmd->stop_arg;
+ else
+ devpriv->dio_count = 1;
+
+ /* clear over/under run status flags */
+ writel(RX_UNDERRUN_BIT | RX_OVERRUN_BIT,
+ devpriv->mmio + BOARD_STATUS_REG);
+
+ /* enable interrupts */
+ writel(RX_FULL_INTR, devpriv->mmio + INTERRUPT_CONTROL_REG);
+
+ writel(RX_ENABLE_BIT, devpriv->mmio + BOARD_CONTROL_REG);
+
+ return 0;
+}
+
+static int gsc_hpdi_cmd_test(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ int err = 0;
+ int i;
+
+ if (s->io_bits)
+ return -EINVAL;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW);
+ err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+
+ err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
+ /* Step 2b : and mutually compatible */
+
+ if (err)
+ return 2;
+
+ /* Step 3: check if arguments are trivially valid */
+
+ if (!cmd->chanlist_len || !cmd->chanlist) {
+ cmd->chanlist_len = 32;
+ err |= -EINVAL;
+ }
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
+
+ if (cmd->stop_src == TRIG_COUNT)
+ err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
+ else /* TRIG_NONE */
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* step 4: fix up any arguments */
+
+ if (err)
+ return 4;
+
+ /* step 5: complain about special chanlist considerations */
+
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ if (CR_CHAN(cmd->chanlist[i]) != i) {
+ /* XXX could support 8 or 16 channels */
+ dev_err(dev->class_dev,
+ "chanlist must be ch 0 to 31 in order");
+ err |= -EINVAL;
+ break;
+ }
+ }
+
+ if (err)
+ return 5;
+
+ return 0;
+
+}
+
+/* setup dma descriptors so a link completes every 'len' bytes */
+static int gsc_hpdi_setup_dma_descriptors(struct comedi_device *dev,
+ unsigned int len)
+{
+ struct hpdi_private *devpriv = dev->private;
+ dma_addr_t phys_addr = devpriv->dma_desc_phys_addr;
+ uint32_t next_bits = PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT |
+ PLX_XFER_LOCAL_TO_PCI;
+ unsigned int offset = 0;
+ unsigned int idx = 0;
+ unsigned int i;
+
+ if (len > DMA_BUFFER_SIZE)
+ len = DMA_BUFFER_SIZE;
+ len -= len % sizeof(uint32_t);
+ if (len == 0)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_DMA_DESCRIPTORS && idx < NUM_DMA_BUFFERS; i++) {
+ devpriv->dma_desc[i].pci_start_addr =
+ cpu_to_le32(devpriv->dio_buffer_phys_addr[idx] + offset);
+ devpriv->dma_desc[i].local_start_addr = cpu_to_le32(FIFO_REG);
+ devpriv->dma_desc[i].transfer_size = cpu_to_le32(len);
+ devpriv->dma_desc[i].next = cpu_to_le32((phys_addr +
+ (i + 1) * sizeof(devpriv->dma_desc[0])) | next_bits);
+
+ devpriv->desc_dio_buffer[i] = devpriv->dio_buffer[idx] +
+ (offset / sizeof(uint32_t));
+
+ offset += len;
+ if (len + offset > DMA_BUFFER_SIZE) {
+ offset = 0;
+ idx++;
+ }
+ }
+ devpriv->num_dma_descriptors = i;
+ /* fix last descriptor to point back to first */
+ devpriv->dma_desc[i - 1].next = cpu_to_le32(phys_addr | next_bits);
+
+ devpriv->block_size = len;
+
+ return len;
+}
+
+static int gsc_hpdi_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int ret;
switch (data[0]) {
case INSN_CONFIG_BLOCK_SIZE:
- return dio_config_block_size(dev, data);
+ ret = gsc_hpdi_setup_dma_descriptors(dev, data[1]);
+ if (ret)
+ return ret;
+
+ data[1] = ret;
+ break;
default:
ret = comedi_dio_insn_config(dev, s, insn, data, 0xffffffff);
if (ret)
@@ -237,31 +499,53 @@ static int dio_config_insn(struct comedi_device *dev,
return insn->n;
}
-static void disable_plx_interrupts(struct comedi_device *dev)
+static int gsc_hpdi_init(struct comedi_device *dev)
{
struct hpdi_private *devpriv = dev->private;
+ uint32_t plx_intcsr_bits;
+
+ /* wait 10usec after reset before accessing fifos */
+ writel(BOARD_RESET_BIT, devpriv->mmio + BOARD_CONTROL_REG);
+ udelay(10);
+
+ writel(ALMOST_EMPTY_BITS(32) | ALMOST_FULL_BITS(32),
+ devpriv->mmio + RX_PROG_ALMOST_REG);
+ writel(ALMOST_EMPTY_BITS(32) | ALMOST_FULL_BITS(32),
+ devpriv->mmio + TX_PROG_ALMOST_REG);
- writel(0, devpriv->plx9080_iobase + PLX_INTRCS_REG);
+ devpriv->tx_fifo_size = readl(devpriv->mmio + TX_FIFO_SIZE_REG) &
+ FIFO_SIZE_MASK;
+ devpriv->rx_fifo_size = readl(devpriv->mmio + RX_FIFO_SIZE_REG) &
+ FIFO_SIZE_MASK;
+
+ writel(0, devpriv->mmio + INTERRUPT_CONTROL_REG);
+
+ /* enable interrupts */
+ plx_intcsr_bits =
+ ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE |
+ ICS_DMA0_E;
+ writel(plx_intcsr_bits, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+
+ return 0;
}
-/* initialize plx9080 chip */
-static void init_plx9080(struct comedi_device *dev)
+static void gsc_hpdi_init_plx9080(struct comedi_device *dev)
{
struct hpdi_private *devpriv = dev->private;
uint32_t bits;
- void __iomem *plx_iobase = devpriv->plx9080_iobase;
+ void __iomem *plx_iobase = devpriv->plx9080_mmio;
#ifdef __BIG_ENDIAN
bits = BIGEND_DMA0 | BIGEND_DMA1;
#else
bits = 0;
#endif
- writel(bits, devpriv->plx9080_iobase + PLX_BIGEND_REG);
+ writel(bits, devpriv->plx9080_mmio + PLX_BIGEND_REG);
- disable_plx_interrupts(dev);
+ writel(0, devpriv->plx9080_mmio + PLX_INTRCS_REG);
- abort_dma(dev, 0);
- abort_dma(dev, 1);
+ gsc_hpdi_abort_dma(dev, 0);
+ gsc_hpdi_abort_dma(dev, 1);
/* configure dma0 mode */
bits = 0;
@@ -285,117 +569,7 @@ static void init_plx9080(struct comedi_device *dev)
writel(bits, plx_iobase + PLX_DMA0_MODE_REG);
}
-/* Allocate and initialize the subdevice structures.
- */
-static int setup_subdevices(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* analog input subdevice */
- dev->read_subdev = s;
-/* dev->write_subdev = s; */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags =
- SDF_READABLE | SDF_WRITEABLE | SDF_LSAMPL | SDF_CMD_READ;
- s->n_chan = 32;
- s->len_chanlist = 32;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_config = dio_config_insn;
- s->do_cmd = hpdi_cmd;
- s->do_cmdtest = hpdi_cmd_test;
- s->cancel = hpdi_cancel;
-
- return 0;
-}
-
-static int init_hpdi(struct comedi_device *dev)
-{
- struct hpdi_private *devpriv = dev->private;
- uint32_t plx_intcsr_bits;
-
- writel(BOARD_RESET_BIT, devpriv->hpdi_iobase + BOARD_CONTROL_REG);
- udelay(10);
-
- writel(almost_empty_bits(32) | almost_full_bits(32),
- devpriv->hpdi_iobase + RX_PROG_ALMOST_REG);
- writel(almost_empty_bits(32) | almost_full_bits(32),
- devpriv->hpdi_iobase + TX_PROG_ALMOST_REG);
-
- devpriv->tx_fifo_size = fifo_size(readl(devpriv->hpdi_iobase +
- TX_FIFO_SIZE_REG));
- devpriv->rx_fifo_size = fifo_size(readl(devpriv->hpdi_iobase +
- RX_FIFO_SIZE_REG));
-
- writel(0, devpriv->hpdi_iobase + INTERRUPT_CONTROL_REG);
-
- /* enable interrupts */
- plx_intcsr_bits =
- ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE |
- ICS_DMA0_E;
- writel(plx_intcsr_bits, devpriv->plx9080_iobase + PLX_INTRCS_REG);
-
- return 0;
-}
-
-/* setup dma descriptors so a link completes every 'transfer_size' bytes */
-static int setup_dma_descriptors(struct comedi_device *dev,
- unsigned int transfer_size)
-{
- struct hpdi_private *devpriv = dev->private;
- unsigned int buffer_index, buffer_offset;
- uint32_t next_bits = PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT |
- PLX_XFER_LOCAL_TO_PCI;
- unsigned int i;
-
- if (transfer_size > DMA_BUFFER_SIZE)
- transfer_size = DMA_BUFFER_SIZE;
- transfer_size -= transfer_size % sizeof(uint32_t);
- if (transfer_size == 0)
- return -1;
-
- buffer_offset = 0;
- buffer_index = 0;
- for (i = 0; i < NUM_DMA_DESCRIPTORS &&
- buffer_index < NUM_DMA_BUFFERS; i++) {
- devpriv->dma_desc[i].pci_start_addr =
- cpu_to_le32(devpriv->dio_buffer_phys_addr[buffer_index] +
- buffer_offset);
- devpriv->dma_desc[i].local_start_addr = cpu_to_le32(FIFO_REG);
- devpriv->dma_desc[i].transfer_size =
- cpu_to_le32(transfer_size);
- devpriv->dma_desc[i].next =
- cpu_to_le32((devpriv->dma_desc_phys_addr + (i +
- 1) *
- sizeof(devpriv->dma_desc[0])) | next_bits);
-
- devpriv->desc_dio_buffer[i] =
- devpriv->dio_buffer[buffer_index] +
- (buffer_offset / sizeof(uint32_t));
-
- buffer_offset += transfer_size;
- if (transfer_size + buffer_offset > DMA_BUFFER_SIZE) {
- buffer_offset = 0;
- buffer_index++;
- }
- }
- devpriv->num_dma_descriptors = i;
- /* fix last descriptor to point back to first */
- devpriv->dma_desc[i - 1].next =
- cpu_to_le32(devpriv->dma_desc_phys_addr | next_bits);
-
- devpriv->block_size = transfer_size;
-
- return transfer_size;
-}
-
-static const struct hpdi_board *hpdi_find_board(struct pci_dev *pcidev)
+static const struct hpdi_board *gsc_hpdi_find_board(struct pci_dev *pcidev)
{
unsigned int i;
@@ -406,16 +580,17 @@ static const struct hpdi_board *hpdi_find_board(struct pci_dev *pcidev)
return NULL;
}
-static int hpdi_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
+static int gsc_hpdi_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct hpdi_board *thisboard;
struct hpdi_private *devpriv;
+ struct comedi_subdevice *s;
int i;
int retval;
- thisboard = hpdi_find_board(pcidev);
+ thisboard = gsc_hpdi_find_board(pcidev);
if (!thisboard) {
dev_err(dev->class_dev, "gsc_hpdi: pci %s not supported\n",
pci_name(pcidev));
@@ -433,17 +608,17 @@ static int hpdi_auto_attach(struct comedi_device *dev,
return retval;
pci_set_master(pcidev);
- devpriv->plx9080_iobase = pci_ioremap_bar(pcidev, 0);
- devpriv->hpdi_iobase = pci_ioremap_bar(pcidev, 2);
- if (!devpriv->plx9080_iobase || !devpriv->hpdi_iobase) {
+ devpriv->plx9080_mmio = pci_ioremap_bar(pcidev, 0);
+ devpriv->mmio = pci_ioremap_bar(pcidev, 2);
+ if (!devpriv->plx9080_mmio || !devpriv->mmio) {
dev_warn(dev->class_dev, "failed to remap io memory\n");
return -ENOMEM;
}
- init_plx9080(dev);
+ gsc_hpdi_init_plx9080(dev);
/* get irq */
- if (request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED,
+ if (request_irq(pcidev->irq, gsc_hpdi_interrupt, IRQF_SHARED,
dev->board_name, dev)) {
dev_warn(dev->class_dev,
"unable to allocate irq %u\n", pcidev->irq);
@@ -470,18 +645,33 @@ static int hpdi_auto_attach(struct comedi_device *dev,
return -EIO;
}
- retval = setup_dma_descriptors(dev, 0x1000);
+ retval = gsc_hpdi_setup_dma_descriptors(dev, 0x1000);
if (retval < 0)
return retval;
- retval = setup_subdevices(dev);
- if (retval < 0)
+ retval = comedi_alloc_subdevices(dev, 1);
+ if (retval)
return retval;
- return init_hpdi(dev);
+ /* Digital I/O subdevice */
+ s = &dev->subdevices[0];
+ dev->read_subdev = s;
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITEABLE | SDF_LSAMPL |
+ SDF_CMD_READ;
+ s->n_chan = 32;
+ s->len_chanlist = 32;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_config = gsc_hpdi_dio_insn_config;
+ s->do_cmd = gsc_hpdi_cmd;
+ s->do_cmdtest = gsc_hpdi_cmd_test;
+ s->cancel = gsc_hpdi_cancel;
+
+ return gsc_hpdi_init(dev);
}
-static void hpdi_detach(struct comedi_device *dev)
+static void gsc_hpdi_detach(struct comedi_device *dev)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct hpdi_private *devpriv = dev->private;
@@ -490,12 +680,12 @@ static void hpdi_detach(struct comedi_device *dev)
if (dev->irq)
free_irq(dev->irq, dev);
if (devpriv) {
- if (devpriv->plx9080_iobase) {
- disable_plx_interrupts(dev);
- iounmap(devpriv->plx9080_iobase);
+ if (devpriv->plx9080_mmio) {
+ writel(0, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+ iounmap(devpriv->plx9080_mmio);
}
- if (devpriv->hpdi_iobase)
- iounmap(devpriv->hpdi_iobase);
+ if (devpriv->mmio)
+ iounmap(devpriv->mmio);
/* free pci dma buffers */
for (i = 0; i < NUM_DMA_BUFFERS; i++) {
if (devpriv->dio_buffer[i])
@@ -516,318 +706,11 @@ static void hpdi_detach(struct comedi_device *dev)
comedi_pci_disable(dev);
}
-static int dio_config_block_size(struct comedi_device *dev, unsigned int *data)
-{
- unsigned int requested_block_size;
- int retval;
-
- requested_block_size = data[1];
-
- retval = setup_dma_descriptors(dev, requested_block_size);
- if (retval < 0)
- return retval;
-
- data[1] = retval;
-
- return 2;
-}
-
-static int di_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
- int i;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= cfc_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- if (!cmd->chanlist_len) {
- cmd->chanlist_len = 32;
- err |= -EINVAL;
- }
- err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
-
- switch (cmd->stop_src) {
- case TRIG_COUNT:
- err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
- break;
- case TRIG_NONE:
- err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
- break;
- default:
- break;
- }
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (err)
- return 4;
-
- if (!cmd->chanlist)
- return 0;
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- if (CR_CHAN(cmd->chanlist[i]) != i) {
- /* XXX could support 8 or 16 channels */
- comedi_error(dev,
- "chanlist must be ch 0 to 31 in order");
- err++;
- break;
- }
- }
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int hpdi_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- if (s->io_bits)
- return -EINVAL;
- else
- return di_cmd_test(dev, s, cmd);
-}
-
-static inline void hpdi_writel(struct comedi_device *dev, uint32_t bits,
- unsigned int offset)
-{
- struct hpdi_private *devpriv = dev->private;
-
- writel(bits | devpriv->bits[offset / sizeof(uint32_t)],
- devpriv->hpdi_iobase + offset);
-}
-
-static int di_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct hpdi_private *devpriv = dev->private;
- uint32_t bits;
- unsigned long flags;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
-
- hpdi_writel(dev, RX_FIFO_RESET_BIT, BOARD_CONTROL_REG);
-
- abort_dma(dev, 0);
-
- devpriv->dma_desc_index = 0;
-
- /* These register are supposedly unused during chained dma,
- * but I have found that left over values from last operation
- * occasionally cause problems with transfer of first dma
- * block. Initializing them to zero seems to fix the problem. */
- writel(0, devpriv->plx9080_iobase + PLX_DMA0_TRANSFER_SIZE_REG);
- writel(0, devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG);
- writel(0, devpriv->plx9080_iobase + PLX_DMA0_LOCAL_ADDRESS_REG);
- /* give location of first dma descriptor */
- bits =
- devpriv->dma_desc_phys_addr | PLX_DESC_IN_PCI_BIT |
- PLX_INTR_TERM_COUNT | PLX_XFER_LOCAL_TO_PCI;
- writel(bits, devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG);
-
- /* spinlock for plx dma control/status reg */
- spin_lock_irqsave(&dev->spinlock, flags);
- /* enable dma transfer */
- writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->dio_count = cmd->stop_arg;
- else
- devpriv->dio_count = 1;
-
- /* clear over/under run status flags */
- writel(RX_UNDERRUN_BIT | RX_OVERRUN_BIT,
- devpriv->hpdi_iobase + BOARD_STATUS_REG);
- /* enable interrupts */
- writel(intr_bit(RX_FULL_INTR),
- devpriv->hpdi_iobase + INTERRUPT_CONTROL_REG);
-
- hpdi_writel(dev, RX_ENABLE_BIT, BOARD_CONTROL_REG);
-
- return 0;
-}
-
-static int hpdi_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- if (s->io_bits)
- return -EINVAL;
- else
- return di_cmd(dev, s);
-}
-
-static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
-{
- struct hpdi_private *devpriv = dev->private;
- struct comedi_async *async = dev->read_subdev->async;
- uint32_t next_transfer_addr;
- int j;
- int num_samples = 0;
- void __iomem *pci_addr_reg;
-
- if (channel)
- pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG;
- else
- pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG;
-
- /* loop until we have read all the full buffers */
- j = 0;
- for (next_transfer_addr = readl(pci_addr_reg);
- (next_transfer_addr <
- le32_to_cpu(devpriv->dma_desc[devpriv->dma_desc_index].
- pci_start_addr)
- || next_transfer_addr >=
- le32_to_cpu(devpriv->dma_desc[devpriv->dma_desc_index].
- pci_start_addr) + devpriv->block_size)
- && j < devpriv->num_dma_descriptors; j++) {
- /* transfer data from dma buffer to comedi buffer */
- num_samples = devpriv->block_size / sizeof(uint32_t);
- if (async->cmd.stop_src == TRIG_COUNT) {
- if (num_samples > devpriv->dio_count)
- num_samples = devpriv->dio_count;
- devpriv->dio_count -= num_samples;
- }
- cfc_write_array_to_buffer(dev->read_subdev,
- devpriv->desc_dio_buffer[devpriv->
- dma_desc_index],
- num_samples * sizeof(uint32_t));
- devpriv->dma_desc_index++;
- devpriv->dma_desc_index %= devpriv->num_dma_descriptors;
- }
- /* XXX check for buffer overrun somehow */
-}
-
-static irqreturn_t handle_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct hpdi_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- uint32_t hpdi_intr_status, hpdi_board_status;
- uint32_t plx_status;
- uint32_t plx_bits;
- uint8_t dma0_status, dma1_status;
- unsigned long flags;
-
- if (!dev->attached)
- return IRQ_NONE;
-
- plx_status = readl(devpriv->plx9080_iobase + PLX_INTRCS_REG);
- if ((plx_status & (ICS_DMA0_A | ICS_DMA1_A | ICS_LIA)) == 0)
- return IRQ_NONE;
-
- hpdi_intr_status = readl(devpriv->hpdi_iobase + INTERRUPT_STATUS_REG);
- hpdi_board_status = readl(devpriv->hpdi_iobase + BOARD_STATUS_REG);
-
- async->events = 0;
-
- if (hpdi_intr_status) {
- writel(hpdi_intr_status,
- devpriv->hpdi_iobase + INTERRUPT_STATUS_REG);
- }
- /* spin lock makes sure no one else changes plx dma control reg */
- spin_lock_irqsave(&dev->spinlock, flags);
- dma0_status = readb(devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- if (plx_status & ICS_DMA0_A) { /* dma chan 0 interrupt */
- writeb((dma0_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
-
- if (dma0_status & PLX_DMA_EN_BIT)
- drain_dma_buffers(dev, 0);
- }
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* spin lock makes sure no one else changes plx dma control reg */
- spin_lock_irqsave(&dev->spinlock, flags);
- dma1_status = readb(devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- if (plx_status & ICS_DMA1_A) { /* XXX *//* dma chan 1 interrupt */
- writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- }
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* clear possible plx9080 interrupt sources */
- if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */
- plx_bits = readl(devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
- writel(plx_bits, devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
- }
-
- if (hpdi_board_status & RX_OVERRUN_BIT) {
- comedi_error(dev, "rx fifo overrun");
- async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- }
-
- if (hpdi_board_status & RX_UNDERRUN_BIT) {
- comedi_error(dev, "rx fifo underrun");
- async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- }
-
- if (devpriv->dio_count == 0)
- async->events |= COMEDI_CB_EOA;
-
- cfc_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static void abort_dma(struct comedi_device *dev, unsigned int channel)
-{
- struct hpdi_private *devpriv = dev->private;
- unsigned long flags;
-
- /* spinlock for plx dma control/status reg */
- spin_lock_irqsave(&dev->spinlock, flags);
-
- plx9080_abort_dma(devpriv->plx9080_iobase, channel);
-
- spin_unlock_irqrestore(&dev->spinlock, flags);
-}
-
-static int hpdi_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct hpdi_private *devpriv = dev->private;
-
- hpdi_writel(dev, 0, BOARD_CONTROL_REG);
-
- writel(0, devpriv->hpdi_iobase + INTERRUPT_CONTROL_REG);
-
- abort_dma(dev, 0);
-
- return 0;
-}
-
static struct comedi_driver gsc_hpdi_driver = {
.driver_name = "gsc_hpdi",
.module = THIS_MODULE,
- .auto_attach = hpdi_auto_attach,
- .detach = hpdi_detach,
+ .auto_attach = gsc_hpdi_auto_attach,
+ .detach = gsc_hpdi_detach,
};
static int gsc_hpdi_pci_probe(struct pci_dev *dev,
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
index 80539b2bea1a..0b8b2162b76b 100644
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ b/drivers/staging/comedi/drivers/icp_multi.c
@@ -171,12 +171,27 @@ static void setup_channel_list(struct comedi_device *dev,
}
}
+static int icp_multi_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct icp_multi_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readw(devpriv->io_addr + ICP_MULTI_ADC_CSR);
+ if ((status & ADC_BSY) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int icp_multi_insn_read_ai(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct icp_multi_private *devpriv = dev->private;
- int n, timeout;
+ int ret = 0;
+ int n;
/* Disable A/D conversion ready interrupt */
devpriv->IntEnable &= ~ADC_READY;
@@ -199,33 +214,10 @@ static int icp_multi_insn_read_ai(struct comedi_device *dev,
udelay(1);
/* Wait for conversion to complete, or get fed up waiting */
- timeout = 100;
- while (timeout--) {
- if (!(readw(devpriv->io_addr +
- ICP_MULTI_ADC_CSR) & ADC_BSY))
- goto conv_finish;
-
- udelay(1);
- }
-
- /* If we reach here, a timeout has occurred */
- comedi_error(dev, "A/D insn timeout");
-
- /* Disable interrupt */
- devpriv->IntEnable &= ~ADC_READY;
- writew(devpriv->IntEnable, devpriv->io_addr + ICP_MULTI_INT_EN);
-
- /* Clear interrupt status */
- devpriv->IntStatus |= ADC_READY;
- writew(devpriv->IntStatus,
- devpriv->io_addr + ICP_MULTI_INT_STAT);
+ ret = comedi_timeout(dev, s, insn, icp_multi_ai_eoc, 0);
+ if (ret)
+ break;
- /* Clear data received */
- data[n] = 0;
-
- return -ETIME;
-
-conv_finish:
data[n] =
(readw(devpriv->io_addr + ICP_MULTI_AI) >> 4) & 0x0fff;
}
@@ -238,7 +230,21 @@ conv_finish:
devpriv->IntStatus |= ADC_READY;
writew(devpriv->IntStatus, devpriv->io_addr + ICP_MULTI_INT_STAT);
- return n;
+ return ret ? ret : n;
+}
+
+static int icp_multi_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct icp_multi_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readw(devpriv->io_addr + ICP_MULTI_DAC_CSR);
+ if ((status & DAC_BSY) == 0)
+ return 0;
+ return -EBUSY;
}
static int icp_multi_insn_write_ao(struct comedi_device *dev,
@@ -246,7 +252,8 @@ static int icp_multi_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct icp_multi_private *devpriv = dev->private;
- int n, chan, range, timeout;
+ int n, chan, range;
+ int ret;
/* Disable D/A conversion ready interrupt */
devpriv->IntEnable &= ~DAC_READY;
@@ -274,33 +281,24 @@ static int icp_multi_insn_write_ao(struct comedi_device *dev,
for (n = 0; n < insn->n; n++) {
/* Wait for analogue output data register to be
* ready for new data, or get fed up waiting */
- timeout = 100;
- while (timeout--) {
- if (!(readw(devpriv->io_addr +
- ICP_MULTI_DAC_CSR) & DAC_BSY))
- goto dac_ready;
-
- udelay(1);
+ ret = comedi_timeout(dev, s, insn, icp_multi_ao_eoc, 0);
+ if (ret) {
+ /* Disable interrupt */
+ devpriv->IntEnable &= ~DAC_READY;
+ writew(devpriv->IntEnable,
+ devpriv->io_addr + ICP_MULTI_INT_EN);
+
+ /* Clear interrupt status */
+ devpriv->IntStatus |= DAC_READY;
+ writew(devpriv->IntStatus,
+ devpriv->io_addr + ICP_MULTI_INT_STAT);
+
+ /* Clear data received */
+ devpriv->ao_data[chan] = 0;
+
+ return ret;
}
- /* If we reach here, a timeout has occurred */
- comedi_error(dev, "D/A insn timeout");
-
- /* Disable interrupt */
- devpriv->IntEnable &= ~DAC_READY;
- writew(devpriv->IntEnable, devpriv->io_addr + ICP_MULTI_INT_EN);
-
- /* Clear interrupt status */
- devpriv->IntStatus |= DAC_READY;
- writew(devpriv->IntStatus,
- devpriv->io_addr + ICP_MULTI_INT_STAT);
-
- /* Clear data received */
- devpriv->ao_data[chan] = 0;
-
- return -ETIME;
-
-dac_ready:
/* Write data to analogue output data register */
writew(data[n], devpriv->io_addr + ICP_MULTI_AO);
@@ -565,9 +563,6 @@ static int icp_multi_auto_attach(struct comedi_device *dev,
devpriv->valid = 1;
- dev_info(dev->class_dev, "%s attached, irq %sabled\n",
- dev->board_name, dev->irq ? "en" : "dis");
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index 8577778441fa..3558ab3b6e1f 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -190,20 +190,18 @@ static int ii20k_ao_insn_write(struct comedi_device *dev,
return insn->n;
}
-static int ii20k_ai_wait_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- int timeout)
+static int ii20k_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
void __iomem *iobase = ii20k_module_iobase(dev, s);
unsigned char status;
- do {
- status = readb(iobase + II20K_AI_STATUS_REG);
- if ((status & II20K_AI_STATUS_INT) == 0)
- return 0;
- } while (timeout--);
-
- return -ETIME;
+ status = readb(iobase + II20K_AI_STATUS_REG);
+ if ((status & II20K_AI_STATUS_INT) == 0)
+ return 0;
+ return -EBUSY;
}
static void ii20k_ai_setup(struct comedi_device *dev,
@@ -263,7 +261,7 @@ static int ii20k_ai_insn_read(struct comedi_device *dev,
/* generate a software start convert signal */
readb(iobase + II20K_AI_PACER_RESET_REG);
- ret = ii20k_ai_wait_eoc(dev, s, 100);
+ ret = comedi_timeout(dev, s, insn, ii20k_ai_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 6100c12c164f..a8db9d86aadc 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -51,23 +51,55 @@
#include "jr3_pci.h"
#define PCI_VENDOR_ID_JR3 0x1762
-#define PCI_DEVICE_ID_JR3_1_CHANNEL 0x3111
-#define PCI_DEVICE_ID_JR3_1_CHANNEL_NEW 0x1111
-#define PCI_DEVICE_ID_JR3_2_CHANNEL 0x3112
-#define PCI_DEVICE_ID_JR3_3_CHANNEL 0x3113
-#define PCI_DEVICE_ID_JR3_4_CHANNEL 0x3114
-struct jr3_pci_dev_private {
- struct jr3_t __iomem *iobase;
- int n_channels;
- struct timer_list timer;
+enum jr3_pci_boardid {
+ BOARD_JR3_1,
+ BOARD_JR3_2,
+ BOARD_JR3_3,
+ BOARD_JR3_4,
};
-struct poll_delay_t {
+struct jr3_pci_board {
+ const char *name;
+ int n_subdevs;
+};
+
+static const struct jr3_pci_board jr3_pci_boards[] = {
+ [BOARD_JR3_1] = {
+ .name = "jr3_pci_1",
+ .n_subdevs = 1,
+ },
+ [BOARD_JR3_2] = {
+ .name = "jr3_pci_2",
+ .n_subdevs = 2,
+ },
+ [BOARD_JR3_3] = {
+ .name = "jr3_pci_3",
+ .n_subdevs = 3,
+ },
+ [BOARD_JR3_4] = {
+ .name = "jr3_pci_4",
+ .n_subdevs = 4,
+ },
+};
+
+struct jr3_pci_transform {
+ struct {
+ u16 link_type;
+ s16 link_amount;
+ } link[8];
+};
+
+struct jr3_pci_poll_delay {
int min;
int max;
};
+struct jr3_pci_dev_private {
+ struct jr3_t __iomem *iobase;
+ struct timer_list timer;
+};
+
struct jr3_pci_subdev_private {
struct jr3_channel __iomem *channel;
unsigned long next_time_min;
@@ -79,7 +111,6 @@ struct jr3_pci_subdev_private {
state_jr3_init_use_offset_complete,
state_jr3_done
} state;
- int channel_no;
int serial_no;
int model_no;
struct {
@@ -92,9 +123,9 @@ struct jr3_pci_subdev_private {
int retries;
};
-static struct poll_delay_t poll_delay_min_max(int min, int max)
+static struct jr3_pci_poll_delay poll_delay_min_max(int min, int max)
{
- struct poll_delay_t result;
+ struct jr3_pci_poll_delay result;
result.min = min;
result.max = max;
@@ -106,15 +137,8 @@ static int is_complete(struct jr3_channel __iomem *channel)
return get_s16(&channel->command_word0) == 0;
}
-struct transform_t {
- struct {
- u16 link_type;
- s16 link_amount;
- } link[8];
-};
-
static void set_transforms(struct jr3_channel __iomem *channel,
- struct transform_t transf, short num)
+ struct jr3_pci_transform transf, short num)
{
int i;
@@ -194,110 +218,99 @@ static struct six_axis_t get_max_full_scales(struct jr3_channel __iomem
return result;
}
+static unsigned int jr3_pci_ai_read_chan(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chan)
+{
+ struct jr3_pci_subdev_private *spriv = s->private;
+ unsigned int val = 0;
+
+ if (spriv->state != state_jr3_done)
+ return 0;
+
+ if (chan < 56) {
+ unsigned int axis = chan % 8;
+ unsigned filter = chan / 8;
+
+ switch (axis) {
+ case 0:
+ val = get_s16(&spriv->channel->filter[filter].fx);
+ break;
+ case 1:
+ val = get_s16(&spriv->channel->filter[filter].fy);
+ break;
+ case 2:
+ val = get_s16(&spriv->channel->filter[filter].fz);
+ break;
+ case 3:
+ val = get_s16(&spriv->channel->filter[filter].mx);
+ break;
+ case 4:
+ val = get_s16(&spriv->channel->filter[filter].my);
+ break;
+ case 5:
+ val = get_s16(&spriv->channel->filter[filter].mz);
+ break;
+ case 6:
+ val = get_s16(&spriv->channel->filter[filter].v1);
+ break;
+ case 7:
+ val = get_s16(&spriv->channel->filter[filter].v2);
+ break;
+ }
+ val += 0x4000;
+ } else if (chan == 56) {
+ val = get_u16(&spriv->channel->model_no);
+ } else if (chan == 57) {
+ val = get_u16(&spriv->channel->serial_no);
+ }
+
+ return val;
+}
+
static int jr3_pci_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int result;
- struct jr3_pci_subdev_private *p;
- int channel;
-
- p = s->private;
- channel = CR_CHAN(insn->chanspec);
- if (p == NULL || channel > 57) {
- result = -EINVAL;
- } else {
- int i;
-
- result = insn->n;
- if (p->state != state_jr3_done ||
- (get_u16(&p->channel->errors) & (watch_dog | watch_dog2 |
- sensor_change))) {
- /* No sensor or sensor changed */
- if (p->state == state_jr3_done) {
- /* Restart polling */
- p->state = state_jr3_poll;
- }
- result = -EAGAIN;
- }
- for (i = 0; i < insn->n; i++) {
- if (channel < 56) {
- int axis, filter;
-
- axis = channel % 8;
- filter = channel / 8;
- if (p->state != state_jr3_done) {
- data[i] = 0;
- } else {
- int F = 0;
- switch (axis) {
- case 0:
- F = get_s16(&p->channel->
- filter[filter].fx);
- break;
- case 1:
- F = get_s16(&p->channel->
- filter[filter].fy);
- break;
- case 2:
- F = get_s16(&p->channel->
- filter[filter].fz);
- break;
- case 3:
- F = get_s16(&p->channel->
- filter[filter].mx);
- break;
- case 4:
- F = get_s16(&p->channel->
- filter[filter].my);
- break;
- case 5:
- F = get_s16(&p->channel->
- filter[filter].mz);
- break;
- case 6:
- F = get_s16(&p->channel->
- filter[filter].v1);
- break;
- case 7:
- F = get_s16(&p->channel->
- filter[filter].v2);
- break;
- }
- data[i] = F + 0x4000;
- }
- } else if (channel == 56) {
- if (p->state != state_jr3_done)
- data[i] = 0;
- else
- data[i] =
- get_u16(&p->channel->model_no);
- } else if (channel == 57) {
- if (p->state != state_jr3_done)
- data[i] = 0;
- else
- data[i] =
- get_u16(&p->channel->serial_no);
- }
+ struct jr3_pci_subdev_private *spriv = s->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ u16 errors;
+ int i;
+
+ if (!spriv)
+ return -EINVAL;
+
+ errors = get_u16(&spriv->channel->errors);
+ if (spriv->state != state_jr3_done ||
+ (errors & (watch_dog | watch_dog2 | sensor_change))) {
+ /* No sensor or sensor changed */
+ if (spriv->state == state_jr3_done) {
+ /* Restart polling */
+ spriv->state = state_jr3_poll;
}
+ return -EAGAIN;
}
- return result;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = jr3_pci_ai_read_chan(dev, s, chan);
+
+ return insn->n;
}
static int jr3_pci_open(struct comedi_device *dev)
{
+ struct jr3_pci_subdev_private *spriv;
+ struct comedi_subdevice *s;
int i;
- struct jr3_pci_dev_private *devpriv = dev->private;
dev_dbg(dev->class_dev, "jr3_pci_open\n");
- for (i = 0; i < devpriv->n_channels; i++) {
- struct jr3_pci_subdev_private *p;
-
- p = dev->subdevices[i].private;
- if (p) {
- dev_dbg(dev->class_dev, "serial: %p %d (%d)\n", p,
- p->serial_no, p->channel_no);
- }
+ for (i = 0; i < dev->n_subdevices; i++) {
+ s = &dev->subdevices[i];
+ spriv = s->private;
+ if (spriv)
+ dev_dbg(dev->class_dev, "serial: %p %d (%d)\n",
+ spriv, spriv->serial_no, s->index);
}
return 0;
}
@@ -326,271 +339,262 @@ static int read_idm_word(const u8 *data, size_t size, int *pos,
return result;
}
-static int jr3_download_firmware(struct comedi_device *dev,
- const u8 *data, size_t size,
- unsigned long context)
+static int jr3_check_firmware(struct comedi_device *dev,
+ const u8 *data, size_t size)
{
+ int more = 1;
+ int pos = 0;
+
/*
* IDM file format is:
* { count, address, data <count> } *
* ffff
*/
- int result, more, pos, OK;
-
- result = 0;
- more = 1;
- pos = 0;
- OK = 0;
while (more) {
- unsigned int count, addr;
+ unsigned int count = 0;
+ unsigned int addr = 0;
more = more && read_idm_word(data, size, &pos, &count);
- if (more && count == 0xffff) {
- OK = 1;
- break;
- }
+ if (more && count == 0xffff)
+ return 0;
+
more = more && read_idm_word(data, size, &pos, &addr);
while (more && count > 0) {
- unsigned int dummy;
+ unsigned int dummy = 0;
+
more = more && read_idm_word(data, size, &pos, &dummy);
count--;
}
}
- if (!OK) {
- result = -ENODATA;
- } else {
- int i;
- struct jr3_pci_dev_private *p = dev->private;
+ return -ENODATA;
+}
+
+static void jr3_write_firmware(struct comedi_device *dev,
+ int subdev, const u8 *data, size_t size)
+{
+ struct jr3_pci_dev_private *devpriv = dev->private;
+ struct jr3_t __iomem *iobase = devpriv->iobase;
+ u32 __iomem *lo;
+ u32 __iomem *hi;
+ int more = 1;
+ int pos = 0;
+
+ while (more) {
+ unsigned int count = 0;
+ unsigned int addr = 0;
+
+ more = more && read_idm_word(data, size, &pos, &count);
+ if (more && count == 0xffff)
+ return;
+
+ more = more && read_idm_word(data, size, &pos, &addr);
+
+ dev_dbg(dev->class_dev, "Loading#%d %4.4x bytes at %4.4x\n",
+ subdev, count, addr);
+
+ while (more && count > 0) {
+ if (addr & 0x4000) {
+ /* 16 bit data, never seen in real life!! */
+ unsigned int data1 = 0;
- for (i = 0; i < p->n_channels; i++) {
- struct jr3_pci_subdev_private *sp;
+ more = more &&
+ read_idm_word(data, size, &pos, &data1);
+ count--;
+ /* jr3[addr + 0x20000 * pnum] = data1; */
+ } else {
+ /* Download 24 bit program */
+ unsigned int data1 = 0;
+ unsigned int data2 = 0;
+
+ lo = &iobase->channel[subdev].program_lo[addr];
+ hi = &iobase->channel[subdev].program_hi[addr];
- sp = dev->subdevices[i].private;
- more = 1;
- pos = 0;
- while (more) {
- unsigned int count, addr;
more = more &&
- read_idm_word(data, size, &pos, &count);
- if (more && count == 0xffff)
- break;
+ read_idm_word(data, size, &pos, &data1);
more = more &&
- read_idm_word(data, size, &pos, &addr);
- dev_dbg(dev->class_dev,
- "Loading#%d %4.4x bytes at %4.4x\n",
- i, count, addr);
- while (more && count > 0) {
- if (addr & 0x4000) {
- /* 16 bit data, never seen
- * in real life!! */
- unsigned int data1;
-
- more = more &&
- read_idm_word(data,
- size, &pos,
- &data1);
- count--;
- /* jr3[addr + 0x20000 * pnum] =
- data1; */
- } else {
- /* Download 24 bit program */
- unsigned int data1, data2;
-
- more = more &&
- read_idm_word(data,
- size, &pos,
- &data1);
- more = more &&
- read_idm_word(data, size,
- &pos,
- &data2);
- count -= 2;
- if (more) {
- set_u16(&p->
- iobase->channel
- [i].program_low
- [addr], data1);
- udelay(1);
- set_u16(&p->
- iobase->channel
- [i].program_high
- [addr], data2);
- udelay(1);
- }
- }
- addr++;
+ read_idm_word(data, size, &pos, &data2);
+ count -= 2;
+ if (more) {
+ set_u16(lo, data1);
+ udelay(1);
+ set_u16(hi, data2);
+ udelay(1);
}
}
+ addr++;
}
}
- return result;
}
-static struct poll_delay_t jr3_pci_poll_subdevice(struct comedi_subdevice *s)
+static int jr3_download_firmware(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context)
{
- struct poll_delay_t result = poll_delay_min_max(1000, 2000);
- struct jr3_pci_subdev_private *p = s->private;
- int i;
+ int subdev;
+ int ret;
- if (p) {
- struct jr3_channel __iomem *channel = p->channel;
- int errors = get_u16(&channel->errors);
-
- if (errors != p->errors)
- p->errors = errors;
-
- if (errors & (watch_dog | watch_dog2 | sensor_change))
- /* Sensor communication lost, force poll mode */
- p->state = state_jr3_poll;
-
- switch (p->state) {
- case state_jr3_poll: {
- u16 model_no = get_u16(&channel->model_no);
- u16 serial_no = get_u16(&channel->serial_no);
- if ((errors & (watch_dog | watch_dog2)) ||
- model_no == 0 || serial_no == 0) {
- /*
- * Still no sensor, keep on polling.
- * Since it takes up to 10 seconds
- * for offsets to stabilize, polling
- * each second should suffice.
- */
- result = poll_delay_min_max(1000, 2000);
- } else {
- p->retries = 0;
- p->state =
- state_jr3_init_wait_for_offset;
- result = poll_delay_min_max(1000, 2000);
- }
- }
- break;
- case state_jr3_init_wait_for_offset:
- p->retries++;
- if (p->retries < 10) {
- /* Wait for offeset to stabilize
- * (< 10 s according to manual) */
- result = poll_delay_min_max(1000, 2000);
- } else {
- struct transform_t transf;
+ /* verify IDM file format */
+ ret = jr3_check_firmware(dev, data, size);
+ if (ret)
+ return ret;
- p->model_no = get_u16(&channel->model_no);
- p->serial_no = get_u16(&channel->serial_no);
+ /* write firmware to each subdevice */
+ for (subdev = 0; subdev < dev->n_subdevices; subdev++)
+ jr3_write_firmware(dev, subdev, data, size);
- /* Transformation all zeros */
- for (i = 0; i < ARRAY_SIZE(transf.link); i++) {
- transf.link[i].link_type =
- (enum link_types)0;
- transf.link[i].link_amount = 0;
- }
+ return 0;
+}
- set_transforms(channel, transf, 0);
- use_transform(channel, 0);
- p->state = state_jr3_init_transform_complete;
- /* Allow 20 ms for completion */
- result = poll_delay_min_max(20, 100);
- }
- break;
- case state_jr3_init_transform_complete:
- if (!is_complete(channel)) {
- result = poll_delay_min_max(20, 100);
- } else {
- /* Set full scale */
- struct six_axis_t min_full_scale;
- struct six_axis_t max_full_scale;
-
- min_full_scale = get_min_full_scales(channel);
- max_full_scale = get_max_full_scales(channel);
- set_full_scales(channel, max_full_scale);
-
- p->state =
- state_jr3_init_set_full_scale_complete;
- /* Allow 20 ms for completion */
- result = poll_delay_min_max(20, 100);
- }
- break;
- case state_jr3_init_set_full_scale_complete:
- if (!is_complete(channel)) {
- result = poll_delay_min_max(20, 100);
- } else {
- struct force_array __iomem *full_scale;
-
- /* Use ranges in kN or we will
- * overflow around 2000N! */
- full_scale = &channel->full_scale;
- p->range[0].range.min =
- -get_s16(&full_scale->fx) * 1000;
- p->range[0].range.max =
- get_s16(&full_scale->fx) * 1000;
- p->range[1].range.min =
- -get_s16(&full_scale->fy) * 1000;
- p->range[1].range.max =
- get_s16(&full_scale->fy) * 1000;
- p->range[2].range.min =
- -get_s16(&full_scale->fz) * 1000;
- p->range[2].range.max =
- get_s16(&full_scale->fz) * 1000;
- p->range[3].range.min =
- -get_s16(&full_scale->mx) * 100;
- p->range[3].range.max =
- get_s16(&full_scale->mx) * 100;
- p->range[4].range.min =
- -get_s16(&full_scale->my) * 100;
- p->range[4].range.max =
- get_s16(&full_scale->my) * 100;
- p->range[5].range.min =
- -get_s16(&full_scale->mz) * 100;
- p->range[5].range.max =
- get_s16(&full_scale->mz) * 100; /* ?? */
- p->range[6].range.min =
- -get_s16(&full_scale->v1) * 100;/* ?? */
- p->range[6].range.max =
- get_s16(&full_scale->v1) * 100; /* ?? */
- p->range[7].range.min =
- -get_s16(&full_scale->v2) * 100;/* ?? */
- p->range[7].range.max =
- get_s16(&full_scale->v2) * 100; /* ?? */
- p->range[8].range.min = 0;
- p->range[8].range.max = 65535;
-
- use_offset(channel, 0);
- p->state = state_jr3_init_use_offset_complete;
- /* Allow 40 ms for completion */
- result = poll_delay_min_max(40, 100);
- }
- break;
- case state_jr3_init_use_offset_complete:
- if (!is_complete(channel)) {
- result = poll_delay_min_max(20, 100);
- } else {
- set_s16(&channel->offsets.fx, 0);
- set_s16(&channel->offsets.fy, 0);
- set_s16(&channel->offsets.fz, 0);
- set_s16(&channel->offsets.mx, 0);
- set_s16(&channel->offsets.my, 0);
- set_s16(&channel->offsets.mz, 0);
+static struct jr3_pci_poll_delay jr3_pci_poll_subdevice(struct comedi_subdevice *s)
+{
+ struct jr3_pci_subdev_private *spriv = s->private;
+ struct jr3_pci_poll_delay result = poll_delay_min_max(1000, 2000);
+ struct jr3_channel __iomem *channel;
+ u16 model_no;
+ u16 serial_no;
+ int errors;
+ int i;
- set_offset(channel);
+ if (!spriv)
+ return result;
- p->state = state_jr3_done;
+ channel = spriv->channel;
+ errors = get_u16(&channel->errors);
+
+ if (errors != spriv->errors)
+ spriv->errors = errors;
+
+ /* Sensor communication lost? force poll mode */
+ if (errors & (watch_dog | watch_dog2 | sensor_change))
+ spriv->state = state_jr3_poll;
+
+ switch (spriv->state) {
+ case state_jr3_poll:
+ model_no = get_u16(&channel->model_no);
+ serial_no = get_u16(&channel->serial_no);
+
+ if ((errors & (watch_dog | watch_dog2)) ||
+ model_no == 0 || serial_no == 0) {
+ /*
+ * Still no sensor, keep on polling.
+ * Since it takes up to 10 seconds for offsets to
+ * stabilize, polling each second should suffice.
+ */
+ } else {
+ spriv->retries = 0;
+ spriv->state = state_jr3_init_wait_for_offset;
+ }
+ break;
+ case state_jr3_init_wait_for_offset:
+ spriv->retries++;
+ if (spriv->retries < 10) {
+ /*
+ * Wait for offeset to stabilize
+ * (< 10 s according to manual)
+ */
+ } else {
+ struct jr3_pci_transform transf;
+
+ spriv->model_no = get_u16(&channel->model_no);
+ spriv->serial_no = get_u16(&channel->serial_no);
+
+ /* Transformation all zeros */
+ for (i = 0; i < ARRAY_SIZE(transf.link); i++) {
+ transf.link[i].link_type = (enum link_types)0;
+ transf.link[i].link_amount = 0;
}
- break;
- case state_jr3_done:
- poll_delay_min_max(10000, 20000);
- break;
- default:
- poll_delay_min_max(1000, 2000);
- break;
+
+ set_transforms(channel, transf, 0);
+ use_transform(channel, 0);
+ spriv->state = state_jr3_init_transform_complete;
+ /* Allow 20 ms for completion */
+ result = poll_delay_min_max(20, 100);
+ }
+ break;
+ case state_jr3_init_transform_complete:
+ if (!is_complete(channel)) {
+ result = poll_delay_min_max(20, 100);
+ } else {
+ /* Set full scale */
+ struct six_axis_t min_full_scale;
+ struct six_axis_t max_full_scale;
+
+ min_full_scale = get_min_full_scales(channel);
+ max_full_scale = get_max_full_scales(channel);
+ set_full_scales(channel, max_full_scale);
+
+ spriv->state = state_jr3_init_set_full_scale_complete;
+ /* Allow 20 ms for completion */
+ result = poll_delay_min_max(20, 100);
+ }
+ break;
+ case state_jr3_init_set_full_scale_complete:
+ if (!is_complete(channel)) {
+ result = poll_delay_min_max(20, 100);
+ } else {
+ struct force_array __iomem *fs = &channel->full_scale;
+
+ /* Use ranges in kN or we will overflow around 2000N! */
+ spriv->range[0].range.min = -get_s16(&fs->fx) * 1000;
+ spriv->range[0].range.max = get_s16(&fs->fx) * 1000;
+ spriv->range[1].range.min = -get_s16(&fs->fy) * 1000;
+ spriv->range[1].range.max = get_s16(&fs->fy) * 1000;
+ spriv->range[2].range.min = -get_s16(&fs->fz) * 1000;
+ spriv->range[2].range.max = get_s16(&fs->fz) * 1000;
+ spriv->range[3].range.min = -get_s16(&fs->mx) * 100;
+ spriv->range[3].range.max = get_s16(&fs->mx) * 100;
+ spriv->range[4].range.min = -get_s16(&fs->my) * 100;
+ spriv->range[4].range.max = get_s16(&fs->my) * 100;
+ spriv->range[5].range.min = -get_s16(&fs->mz) * 100;
+ /* the next five are questionable */
+ spriv->range[5].range.max = get_s16(&fs->mz) * 100;
+ spriv->range[6].range.min = -get_s16(&fs->v1) * 100;
+ spriv->range[6].range.max = get_s16(&fs->v1) * 100;
+ spriv->range[7].range.min = -get_s16(&fs->v2) * 100;
+ spriv->range[7].range.max = get_s16(&fs->v2) * 100;
+ spriv->range[8].range.min = 0;
+ spriv->range[8].range.max = 65535;
+
+ use_offset(channel, 0);
+ spriv->state = state_jr3_init_use_offset_complete;
+ /* Allow 40 ms for completion */
+ result = poll_delay_min_max(40, 100);
+ }
+ break;
+ case state_jr3_init_use_offset_complete:
+ if (!is_complete(channel)) {
+ result = poll_delay_min_max(20, 100);
+ } else {
+ set_s16(&channel->offsets.fx, 0);
+ set_s16(&channel->offsets.fy, 0);
+ set_s16(&channel->offsets.fz, 0);
+ set_s16(&channel->offsets.mx, 0);
+ set_s16(&channel->offsets.my, 0);
+ set_s16(&channel->offsets.mz, 0);
+
+ set_offset(channel);
+
+ spriv->state = state_jr3_done;
}
+ break;
+ case state_jr3_done:
+ result = poll_delay_min_max(10000, 20000);
+ break;
+ default:
+ break;
}
+
return result;
}
static void jr3_pci_poll_dev(unsigned long data)
{
- unsigned long flags;
struct comedi_device *dev = (struct comedi_device *)data;
struct jr3_pci_dev_private *devpriv = dev->private;
+ struct jr3_pci_subdev_private *spriv;
+ struct comedi_subdevice *s;
+ unsigned long flags;
unsigned long now;
int delay;
int i;
@@ -598,18 +602,22 @@ static void jr3_pci_poll_dev(unsigned long data)
spin_lock_irqsave(&dev->spinlock, flags);
delay = 1000;
now = jiffies;
- /* Poll all channels that are ready to be polled */
- for (i = 0; i < devpriv->n_channels; i++) {
- struct jr3_pci_subdev_private *subdevpriv =
- dev->subdevices[i].private;
- if (now > subdevpriv->next_time_min) {
- struct poll_delay_t sub_delay;
-
- sub_delay = jr3_pci_poll_subdevice(&dev->subdevices[i]);
- subdevpriv->next_time_min =
- jiffies + msecs_to_jiffies(sub_delay.min);
- subdevpriv->next_time_max =
- jiffies + msecs_to_jiffies(sub_delay.max);
+
+ /* Poll all channels that are ready to be polled */
+ for (i = 0; i < dev->n_subdevices; i++) {
+ s = &dev->subdevices[i];
+ spriv = s->private;
+
+ if (now > spriv->next_time_min) {
+ struct jr3_pci_poll_delay sub_delay;
+
+ sub_delay = jr3_pci_poll_subdevice(s);
+
+ spriv->next_time_min = jiffies +
+ msecs_to_jiffies(sub_delay.min);
+ spriv->next_time_max = jiffies +
+ msecs_to_jiffies(sub_delay.max);
+
if (sub_delay.max && sub_delay.max < delay)
/*
* Wake up as late as possible ->
@@ -624,13 +632,58 @@ static void jr3_pci_poll_dev(unsigned long data)
add_timer(&devpriv->timer);
}
+static struct jr3_pci_subdev_private *
+jr3_pci_alloc_spriv(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ struct jr3_pci_dev_private *devpriv = dev->private;
+ struct jr3_pci_subdev_private *spriv;
+ int j;
+ int k;
+
+ spriv = comedi_alloc_spriv(s, sizeof(*spriv));
+ if (!spriv)
+ return NULL;
+
+ spriv->channel = &devpriv->iobase->channel[s->index].data;
+
+ for (j = 0; j < 8; j++) {
+ spriv->range[j].length = 1;
+ spriv->range[j].range.min = -1000000;
+ spriv->range[j].range.max = 1000000;
+
+ for (k = 0; k < 7; k++) {
+ spriv->range_table_list[j + k * 8] =
+ (struct comedi_lrange *)&spriv->range[j];
+ spriv->maxdata_list[j + k * 8] = 0x7fff;
+ }
+ }
+ spriv->range[8].length = 1;
+ spriv->range[8].range.min = 0;
+ spriv->range[8].range.max = 65536;
+
+ spriv->range_table_list[56] = (struct comedi_lrange *)&spriv->range[8];
+ spriv->range_table_list[57] = (struct comedi_lrange *)&spriv->range[8];
+ spriv->maxdata_list[56] = 0xffff;
+ spriv->maxdata_list[57] = 0xffff;
+
+ dev_dbg(dev->class_dev, "p->channel %p %p (%tx)\n",
+ spriv->channel, devpriv->iobase,
+ ((char __iomem *)spriv->channel -
+ (char __iomem *)devpriv->iobase));
+
+ return spriv;
+}
+
static int jr3_pci_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
+ unsigned long context)
{
- int result;
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- int i;
+ static const struct jr3_pci_board *board = NULL;
struct jr3_pci_dev_private *devpriv;
+ struct jr3_pci_subdev_private *spriv;
+ struct comedi_subdevice *s;
+ int ret;
+ int i;
if (sizeof(struct jr3_channel) != 0xc00) {
dev_err(dev->class_dev,
@@ -639,106 +692,56 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
return -EINVAL;
}
+ if (context < ARRAY_SIZE(jr3_pci_boards))
+ board = &jr3_pci_boards[context];
+ if (!board)
+ return -ENODEV;
+ dev->board_ptr = board;
+ dev->board_name = board->name;
+
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
init_timer(&devpriv->timer);
- switch (pcidev->device) {
- case PCI_DEVICE_ID_JR3_1_CHANNEL:
- case PCI_DEVICE_ID_JR3_1_CHANNEL_NEW:
- devpriv->n_channels = 1;
- break;
- case PCI_DEVICE_ID_JR3_2_CHANNEL:
- devpriv->n_channels = 2;
- break;
- case PCI_DEVICE_ID_JR3_3_CHANNEL:
- devpriv->n_channels = 3;
- break;
- case PCI_DEVICE_ID_JR3_4_CHANNEL:
- devpriv->n_channels = 4;
- break;
- default:
- dev_err(dev->class_dev, "jr3_pci: pci %s not supported\n",
- pci_name(pcidev));
- return -EINVAL;
- break;
- }
- result = comedi_pci_enable(dev);
- if (result)
- return result;
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
devpriv->iobase = pci_ioremap_bar(pcidev, 0);
if (!devpriv->iobase)
return -ENOMEM;
- result = comedi_alloc_subdevices(dev, devpriv->n_channels);
- if (result)
- return result;
+ ret = comedi_alloc_subdevices(dev, board->n_subdevs);
+ if (ret)
+ return ret;
dev->open = jr3_pci_open;
- for (i = 0; i < devpriv->n_channels; i++) {
- dev->subdevices[i].type = COMEDI_SUBD_AI;
- dev->subdevices[i].subdev_flags = SDF_READABLE | SDF_GROUND;
- dev->subdevices[i].n_chan = 8 * 7 + 2;
- dev->subdevices[i].insn_read = jr3_pci_ai_insn_read;
- dev->subdevices[i].private =
- kzalloc(sizeof(struct jr3_pci_subdev_private),
- GFP_KERNEL);
- if (dev->subdevices[i].private) {
- struct jr3_pci_subdev_private *p;
- int j;
-
- p = dev->subdevices[i].private;
- p->channel = &devpriv->iobase->channel[i].data;
- dev_dbg(dev->class_dev, "p->channel %p %p (%tx)\n",
- p->channel, devpriv->iobase,
- ((char __iomem *)p->channel -
- (char __iomem *)devpriv->iobase));
- p->channel_no = i;
- for (j = 0; j < 8; j++) {
- int k;
-
- p->range[j].length = 1;
- p->range[j].range.min = -1000000;
- p->range[j].range.max = 1000000;
- for (k = 0; k < 7; k++) {
- p->range_table_list[j + k * 8] =
- (struct comedi_lrange *)&p->
- range[j];
- p->maxdata_list[j + k * 8] = 0x7fff;
- }
- }
- p->range[8].length = 1;
- p->range[8].range.min = 0;
- p->range[8].range.max = 65536;
-
- p->range_table_list[56] =
- (struct comedi_lrange *)&p->range[8];
- p->range_table_list[57] =
- (struct comedi_lrange *)&p->range[8];
- p->maxdata_list[56] = 0xffff;
- p->maxdata_list[57] = 0xffff;
- /* Channel specific range and maxdata */
- dev->subdevices[i].range_table = NULL;
- dev->subdevices[i].range_table_list =
- p->range_table_list;
- dev->subdevices[i].maxdata = 0;
- dev->subdevices[i].maxdata_list = p->maxdata_list;
+ for (i = 0; i < dev->n_subdevices; i++) {
+ s = &dev->subdevices[i];
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
+ s->n_chan = 8 * 7 + 2;
+ s->insn_read = jr3_pci_ai_insn_read;
+
+ spriv = jr3_pci_alloc_spriv(dev, s);
+ if (spriv) {
+ /* Channel specific range and maxdata */
+ s->range_table_list = spriv->range_table_list;
+ s->maxdata_list = spriv->maxdata_list;
}
}
/* Reset DSP card */
writel(0, &devpriv->iobase->channel[0].reset);
- result = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
- "comedi/jr3pci.idm",
- jr3_download_firmware, 0);
- dev_dbg(dev->class_dev, "Firmare load %d\n", result);
-
- if (result < 0)
- return result;
+ ret = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
+ "comedi/jr3pci.idm",
+ jr3_download_firmware, 0);
+ dev_dbg(dev->class_dev, "Firmare load %d\n", ret);
+ if (ret < 0)
+ return ret;
/*
* TODO: use firmware to load preferred offset tables. Suggested
* format:
@@ -761,11 +764,12 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
}
/* Start card timer */
- for (i = 0; i < devpriv->n_channels; i++) {
- struct jr3_pci_subdev_private *p = dev->subdevices[i].private;
+ for (i = 0; i < dev->n_subdevices; i++) {
+ s = &dev->subdevices[i];
+ spriv = s->private;
- p->next_time_min = jiffies + msecs_to_jiffies(500);
- p->next_time_max = jiffies + msecs_to_jiffies(2000);
+ spriv->next_time_min = jiffies + msecs_to_jiffies(500);
+ spriv->next_time_max = jiffies + msecs_to_jiffies(2000);
}
devpriv->timer.data = (unsigned long)dev;
@@ -773,21 +777,16 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
devpriv->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&devpriv->timer);
- return result;
+ return 0;
}
static void jr3_pci_detach(struct comedi_device *dev)
{
- int i;
struct jr3_pci_dev_private *devpriv = dev->private;
if (devpriv) {
del_timer_sync(&devpriv->timer);
- if (dev->subdevices) {
- for (i = 0; i < devpriv->n_channels; i++)
- kfree(dev->subdevices[i].private);
- }
if (devpriv->iobase)
iounmap(devpriv->iobase);
}
@@ -808,11 +807,11 @@ static int jr3_pci_pci_probe(struct pci_dev *dev,
}
static const struct pci_device_id jr3_pci_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL) },
- { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW) },
- { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL) },
- { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL) },
- { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_4_CHANNEL) },
+ { PCI_VDEVICE(JR3, 0x1111), BOARD_JR3_1 },
+ { PCI_VDEVICE(JR3, 0x3111), BOARD_JR3_1 },
+ { PCI_VDEVICE(JR3, 0x3112), BOARD_JR3_2 },
+ { PCI_VDEVICE(JR3, 0x3113), BOARD_JR3_3 },
+ { PCI_VDEVICE(JR3, 0x3114), BOARD_JR3_4 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, jr3_pci_pci_table);
diff --git a/drivers/staging/comedi/drivers/jr3_pci.h b/drivers/staging/comedi/drivers/jr3_pci.h
index 3317f7a04c48..20478ae8fad6 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.h
+++ b/drivers/staging/comedi/drivers/jr3_pci.h
@@ -671,11 +671,11 @@ struct jr3_channel {
struct jr3_t {
struct {
- u32 program_low[0x4000]; /* 0x00000 - 0x10000 */
+ u32 program_lo[0x4000]; /* 0x00000 - 0x10000 */
struct jr3_channel data; /* 0x10000 - 0x10c00 */
char pad2[0x30000 - 0x00c00]; /* 0x10c00 - 0x40000 */
- u32 program_high[0x8000]; /* 0x40000 - 0x60000 */
- u32 reset; /* 0x60000 - 0x60004 */
+ u32 program_hi[0x8000]; /* 0x40000 - 0x60000 */
+ u32 reset; /* 0x60000 - 0x60004 */
char pad3[0x20000 - 0x00004]; /* 0x60004 - 0x80000 */
} channel[4];
};
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index 6b9846fd8c48..ec43c38958de 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -1,92 +1,113 @@
/*
- comedi/drivers/ke_counter.c
- Comedi driver for Kolter-Electronic PCI Counter 1 Card
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ke_counter
-Description: Driver for Kolter Electronic Counter Card
-Devices: [Kolter Electronic] PCI Counter Card (ke_counter)
-Author: Michael Hillmann
-Updated: Mon, 14 Apr 2008 15:42:42 +0100
-Status: tested
-
-Configuration Options: not applicable, uses PCI auto config
+ * ke_counter.c
+ * Comedi driver for Kolter-Electronic PCI Counter 1 Card
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
-This driver is a simple driver to read the counter values from
-Kolter Electronic PCI Counter Card.
-*/
+/*
+ * Driver: ke_counter
+ * Description: Driver for Kolter Electronic Counter Card
+ * Devices: (Kolter Electronic) PCI Counter Card [ke_counter]
+ * Author: Michael Hillmann
+ * Updated: Mon, 14 Apr 2008 15:42:42 +0100
+ * Status: tested
+ *
+ * Configuration Options: not applicable, uses PCI auto config
+ */
#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
-#define CNT_CARD_DEVICE_ID 0x0014
+/*
+ * PCI BAR 0 Register I/O map
+ */
+#define KE_RESET_REG(x) (0x00 + ((x) * 0x20))
+#define KE_LATCH_REG(x) (0x00 + ((x) * 0x20))
+#define KE_LSB_REG(x) (0x04 + ((x) * 0x20))
+#define KE_MID_REG(x) (0x08 + ((x) * 0x20))
+#define KE_MSB_REG(x) (0x0c + ((x) * 0x20))
+#define KE_SIGN_REG(x) (0x10 + ((x) * 0x20))
+#define KE_OSC_SEL_REG 0xf8
+#define KE_OSC_SEL_EXT (1 << 0)
+#define KE_OSC_SEL_4MHZ (2 << 0)
+#define KE_OSC_SEL_20MHZ (3 << 0)
+#define KE_DO_REG 0xfc
+
+static int ke_counter_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
+ int i;
-/*-- counter write ----------------------------------------------------------*/
+ for (i = 0; i < insn->n; i++) {
+ val = data[0];
-/* This should be used only for resetting the counters; maybe it is better
- to make a special command 'reset'. */
-static int cnt_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- int chan = CR_CHAN(insn->chanspec);
-
- outb((unsigned char)((data[0] >> 24) & 0xff),
- dev->iobase + chan * 0x20 + 0x10);
- outb((unsigned char)((data[0] >> 16) & 0xff),
- dev->iobase + chan * 0x20 + 0x0c);
- outb((unsigned char)((data[0] >> 8) & 0xff),
- dev->iobase + chan * 0x20 + 0x08);
- outb((unsigned char)((data[0] >> 0) & 0xff),
- dev->iobase + chan * 0x20 + 0x04);
-
- /* return the number of samples written */
- return 1;
-}
+ /* Order matters */
+ outb((val >> 24) & 0xff, dev->iobase + KE_SIGN_REG(chan));
+ outb((val >> 16) & 0xff, dev->iobase + KE_MSB_REG(chan));
+ outb((val >> 8) & 0xff, dev->iobase + KE_MID_REG(chan));
+ outb((val >> 0) & 0xff, dev->iobase + KE_LSB_REG(chan));
+ }
-/*-- counter read -----------------------------------------------------------*/
+ return insn->n;
+}
-static int cnt_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
+static int ke_counter_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned char a0, a1, a2, a3, a4;
- int chan = CR_CHAN(insn->chanspec);
- int result;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
+ int i;
- a0 = inb(dev->iobase + chan * 0x20);
- a1 = inb(dev->iobase + chan * 0x20 + 0x04);
- a2 = inb(dev->iobase + chan * 0x20 + 0x08);
- a3 = inb(dev->iobase + chan * 0x20 + 0x0c);
- a4 = inb(dev->iobase + chan * 0x20 + 0x10);
+ for (i = 0; i < insn->n; i++) {
+ /* Order matters */
+ inb(dev->iobase + KE_LATCH_REG(chan));
- result = (a1 + (a2 * 256) + (a3 * 65536));
- if (a4 > 0)
- result = result - s->maxdata;
+ val = inb(dev->iobase + KE_LSB_REG(chan));
+ val |= (inb(dev->iobase + KE_MID_REG(chan)) << 8);
+ val |= (inb(dev->iobase + KE_MSB_REG(chan)) << 16);
+ val |= (inb(dev->iobase + KE_SIGN_REG(chan)) << 24);
- *data = (unsigned int)result;
+ data[i] = val;
+ }
- /* return the number of samples read */
- return 1;
+ return insn->n;
}
-static int cnt_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
+static int ke_counter_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ if (comedi_dio_update_state(s, data))
+ outb(s->state, dev->iobase + KE_DO_REG);
+
+ data[1] = s->state;
+
+ return insn->n;
+}
+
+static int ke_counter_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct comedi_subdevice *s;
@@ -97,30 +118,32 @@ static int cnt_auto_attach(struct comedi_device *dev,
return ret;
dev->iobase = pci_resource_start(pcidev, 0);
- ret = comedi_alloc_subdevices(dev, 1);
+ ret = comedi_alloc_subdevices(dev, 2);
if (ret)
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
-
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE /* | SDF_COMMON */ ;
- s->n_chan = 3;
- s->maxdata = 0x00ffffff;
- s->insn_read = cnt_rinsn;
- s->insn_write = cnt_winsn;
-
- /* select 20MHz clock */
- outb(3, dev->iobase + 248);
-
- /* reset all counters */
- outb(0, dev->iobase);
- outb(0, dev->iobase + 0x20);
- outb(0, dev->iobase + 0x40);
-
- dev_info(dev->class_dev, "%s: %s attached\n",
- dev->driver->driver_name, dev->board_name);
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 3;
+ s->maxdata = 0x01ffffff;
+ s->range_table = &range_unknown;
+ s->insn_read = ke_counter_insn_read;
+ s->insn_write = ke_counter_insn_write;
+
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 3;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = ke_counter_do_insn_bits;
+
+ outb(KE_OSC_SEL_20MHZ, dev->iobase + KE_OSC_SEL_REG);
+
+ outb(0, dev->iobase + KE_RESET_REG(0));
+ outb(0, dev->iobase + KE_RESET_REG(1));
+ outb(0, dev->iobase + KE_RESET_REG(2));
return 0;
}
@@ -128,7 +151,7 @@ static int cnt_auto_attach(struct comedi_device *dev,
static struct comedi_driver ke_counter_driver = {
.driver_name = "ke_counter",
.module = THIS_MODULE,
- .auto_attach = cnt_auto_attach,
+ .auto_attach = ke_counter_auto_attach,
.detach = comedi_pci_disable,
};
@@ -140,7 +163,7 @@ static int ke_counter_pci_probe(struct pci_dev *dev,
}
static const struct pci_device_id ke_counter_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_KOLTER, 0x0014) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ke_counter_pci_table);
@@ -154,5 +177,5 @@ static struct pci_driver ke_counter_pci_driver = {
module_comedi_pci_driver(ke_counter_driver, ke_counter_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Kolter Electronic Counter Card");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index e739bcd66a04..f02b31b317ec 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -1112,9 +1112,6 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
if (!dev->attached)
return IRQ_NONE;
- /* Reset all events */
- s->async->events = 0;
-
if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
ME4000_IRQ_STATUS_BIT_AI_HF) {
/* Read status register to find out what happened */
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index 7f6687896401..0ff126b1fdfd 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -248,6 +248,20 @@ static int me_dio_insn_bits(struct comedi_device *dev,
return insn->n;
}
+static int me_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct me_private_data *dev_private = dev->private;
+ unsigned int status;
+
+ status = readw(dev_private->me_regbase + ME_STATUS);
+ if ((status & 0x0004) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int me_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -258,7 +272,7 @@ static int me_ai_insn_read(struct comedi_device *dev,
unsigned int rang = CR_RANGE(insn->chanspec);
unsigned int aref = CR_AREF(insn->chanspec);
unsigned short val;
- int i;
+ int ret;
/* stop any running conversion */
dev_private->control_1 &= 0xFFFC;
@@ -290,19 +304,14 @@ static int me_ai_insn_read(struct comedi_device *dev,
readw(dev_private->me_regbase + ME_ADC_START);
/* wait for ADC fifo not empty flag */
- for (i = 100000; i > 0; i--)
- if (!(readw(dev_private->me_regbase + ME_STATUS) & 0x0004))
- break;
+ ret = comedi_timeout(dev, s, insn, me_ai_eoc, 0);
+ if (ret)
+ return ret;
/* get value from ADC fifo */
- if (i) {
- val = readw(dev_private->me_regbase + ME_READ_AD_FIFO);
- val = (val ^ 0x800) & 0x0fff;
- data[0] = val;
- } else {
- dev_err(dev->class_dev, "Cannot get single value\n");
- return -EIO;
- }
+ val = readw(dev_private->me_regbase + ME_READ_AD_FIFO);
+ val = (val ^ 0x800) & 0x0fff;
+ data[0] = val;
/* stop any running conversion */
dev_private->control_1 &= 0xFFFC;
@@ -542,9 +551,6 @@ static int me_auto_attach(struct comedi_device *dev,
s->insn_bits = me_dio_insn_bits;
s->insn_config = me_dio_insn_config;
- dev_info(dev->class_dev, "%s: %s attached\n",
- dev->driver->driver_name, dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c
index 81b78e053f4e..a4f7d6f138df 100644
--- a/drivers/staging/comedi/drivers/mf6x4.c
+++ b/drivers/staging/comedi/drivers/mf6x4.c
@@ -133,21 +133,18 @@ static int mf6x4_do_insn_bits(struct comedi_device *dev,
return insn->n;
}
-static int mf6x4_ai_wait_for_eoc(struct comedi_device *dev,
- unsigned int timeout)
+static int mf6x4_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
struct mf6x4_private *devpriv = dev->private;
- unsigned int eolc;
+ unsigned int status;
- while (timeout--) {
- eolc = ioread32(devpriv->gpioc_R) & MF6X4_GPIOC_EOLC;
- if (eolc)
- return 0;
-
- udelay(1);
- }
-
- return -ETIME;
+ status = ioread32(devpriv->gpioc_R);
+ if (status & MF6X4_GPIOC_EOLC)
+ return 0;
+ return -EBUSY;
}
static int mf6x4_ai_insn_read(struct comedi_device *dev,
@@ -168,7 +165,7 @@ static int mf6x4_ai_insn_read(struct comedi_device *dev,
/* Trigger ADC conversion by reading ADSTART */
ioread16(devpriv->bar1_mem + MF6X4_ADSTART_R);
- ret = mf6x4_ai_wait_for_eoc(dev, 100);
+ ret = comedi_timeout(dev, s, insn, mf6x4_ai_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 9c9a0ee432cf..1a572c83f996 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -527,9 +527,9 @@ EXPORT_SYMBOL_GPL(mite_dma_disarm);
int mite_sync_input_dma(struct mite_channel *mite_chan,
struct comedi_async *async)
{
+ struct comedi_subdevice *s = async->subdevice;
int count;
unsigned int nbytes, old_alloc_count;
- const unsigned bytes_per_scan = cfc_bytes_per_scan(async->subdevice);
old_alloc_count = async->buf_write_alloc_count;
/* write alloc as much as we can */
@@ -538,7 +538,7 @@ int mite_sync_input_dma(struct mite_channel *mite_chan,
nbytes = mite_bytes_written_to_memory_lb(mite_chan);
if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
old_alloc_count) > 0) {
- dev_warn(async->subdevice->device->class_dev,
+ dev_warn(s->device->class_dev,
"mite: DMA overwrite of free area\n");
async->events |= COMEDI_CB_OVERFLOW;
return -1;
@@ -551,12 +551,7 @@ int mite_sync_input_dma(struct mite_channel *mite_chan,
return 0;
comedi_buf_write_free(async, count);
-
- async->scan_progress += count;
- if (async->scan_progress >= bytes_per_scan) {
- async->scan_progress %= bytes_per_scan;
- async->events |= COMEDI_CB_EOS;
- }
+ cfc_inc_scan_progress(s, count);
async->events |= COMEDI_CB_BLOCK;
return 0;
}
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index bcf2f972376e..78f235747991 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -29,9 +29,9 @@
#define MAX_MITE_DMA_CHANNELS 8
struct mite_dma_descriptor {
- u32 count;
- u32 addr;
- u32 next;
+ __le32 count;
+ __le32 addr;
+ __le32 next;
u32 dar;
};
diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c
index fe4621ea65c3..f770400a0e81 100644
--- a/drivers/staging/comedi/drivers/mpc624.c
+++ b/drivers/staging/comedi/drivers/mpc624.c
@@ -142,8 +142,18 @@ static const struct comedi_lrange range_mpc624_bipolar10 = {
}
};
-/* Timeout 200ms */
-#define TIMEOUT 200
+static int mpc624_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned char status;
+
+ status = inb(dev->iobase + MPC624_ADC);
+ if ((status & MPC624_ADBUSY) == 0)
+ return 0;
+ return -EBUSY;
+}
static int mpc624_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
@@ -152,7 +162,7 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
struct mpc624_private *devpriv = dev->private;
int n, i;
unsigned long int data_in, data_out;
- unsigned char ucPort;
+ int ret;
/*
* WARNING:
@@ -170,15 +180,9 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
udelay(1);
/* Wait for the conversion to end */
- for (i = 0; i < TIMEOUT; i++) {
- ucPort = inb(dev->iobase + MPC624_ADC);
- if (ucPort & MPC624_ADBUSY)
- udelay(1000);
- else
- break;
- }
- if (i == TIMEOUT)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, mpc624_ai_eoc, 0);
+ if (ret)
+ return ret;
/* Start reading data */
data_in = 0;
@@ -341,7 +345,7 @@ static int mpc624_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->len_chanlist = 1;
s->insn_read = mpc624_ai_rinsn;
- return 1;
+ return 0;
}
static struct comedi_driver mpc624_driver = {
diff --git a/drivers/staging/comedi/drivers/multiq3.c b/drivers/staging/comedi/drivers/multiq3.c
index 3ca755eca285..b74b9e9bfd4a 100644
--- a/drivers/staging/comedi/drivers/multiq3.c
+++ b/drivers/staging/comedi/drivers/multiq3.c
@@ -81,34 +81,44 @@ struct multiq3_private {
unsigned int ao_readback[2];
};
+static int multiq3_ai_status(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + MULTIQ3_STATUS);
+ if (status & context)
+ return 0;
+ return -EBUSY;
+}
+
static int multiq3_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- int i, n;
+ int n;
int chan;
unsigned int hi, lo;
+ int ret;
chan = CR_CHAN(insn->chanspec);
outw(MULTIQ3_CONTROL_MUST | MULTIQ3_AD_MUX_EN | (chan << 3),
dev->iobase + MULTIQ3_CONTROL);
- for (i = 0; i < MULTIQ3_TIMEOUT; i++) {
- if (inw(dev->iobase + MULTIQ3_STATUS) & MULTIQ3_STATUS_EOC)
- break;
- }
- if (i == MULTIQ3_TIMEOUT)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, multiq3_ai_status,
+ MULTIQ3_STATUS_EOC);
+ if (ret)
+ return ret;
for (n = 0; n < insn->n; n++) {
outw(0, dev->iobase + MULTIQ3_AD_CS);
- for (i = 0; i < MULTIQ3_TIMEOUT; i++) {
- if (inw(dev->iobase +
- MULTIQ3_STATUS) & MULTIQ3_STATUS_EOC_I)
- break;
- }
- if (i == MULTIQ3_TIMEOUT)
- return -ETIMEDOUT;
+
+ ret = comedi_timeout(dev, s, insn, multiq3_ai_status,
+ MULTIQ3_STATUS_EOC_I);
+ if (ret)
+ return ret;
hi = inb(dev->iobase + MULTIQ3_AD_CS);
lo = inb(dev->iobase + MULTIQ3_AD_CS);
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index df42e3906171..0d4b9019f76a 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -40,6 +40,7 @@
#include "../comedidev.h"
+#include "comedi_fc.h"
#include "mite.h"
#include "ni_tio.h"
@@ -789,13 +790,7 @@ static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev,
struct ni_gpct *counter = s->private;
ni_tio_handle_interrupt(counter, s);
- if (s->async->events) {
- if (s->async->events & (COMEDI_CB_EOA | COMEDI_CB_ERROR |
- COMEDI_CB_OVERFLOW)) {
- ni_660x_cancel(dev, s);
- }
- comedi_event(dev, s);
- }
+ cfc_handle_events(dev, s);
}
static irqreturn_t ni_660x_interrupt(int irq, void *d)
@@ -1187,7 +1182,7 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
global_interrupt_config_bits |= Cascade_Int_Enable_Bit;
ni_660x_write_register(dev, 0, global_interrupt_config_bits,
NI660X_GLOBAL_INT_CFG);
- dev_info(dev->class_dev, "ni_660x: %s attached\n", dev->board_name);
+
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index 8550fdc4ccd3..1002ceacfdcc 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -246,9 +246,6 @@ static int ni_670x_auto_attach(struct comedi_device *dev,
/* Config of ao registers */
writel(0x00, devpriv->mite->daq_io_addr + AO_CONTROL_OFFSET);
- dev_info(dev->class_dev, "%s: %s attached\n",
- dev->driver->driver_name, dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index f83eb9ebe278..4e39b1f63d81 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -184,7 +184,6 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
}
/* initialize async here to make sure s is not NULL */
async = s->async;
- async->events = 0;
cmd = &async->cmd;
status = inw(dev->iobase + STATUS_REG);
@@ -196,15 +195,14 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
if (status & OVFL_BIT) {
comedi_error(dev, "fifo overflow");
- a2150_cancel(dev, s);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ cfc_handle_events(dev, s);
}
if ((status & DMA_TC_BIT) == 0) {
comedi_error(dev, "caught non-dma interrupt? Aborting.");
- a2150_cancel(dev, s);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -249,7 +247,6 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
cfc_write_to_buffer(s, dpnt);
if (cmd->stop_src == TRIG_COUNT) {
if (--devpriv->count == 0) { /* end of acquisition */
- a2150_cancel(dev, s);
async->events |= COMEDI_CB_EOA;
break;
}
@@ -265,7 +262,7 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
async->events |= COMEDI_CB_BLOCK;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
/* clear interrupt */
outw(0x00, dev->iobase + DMA_TC_CLEAR_REG);
@@ -488,13 +485,25 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
+static int a2150_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + STATUS_REG);
+ if (status & FNE_BIT)
+ return 0;
+ return -EBUSY;
+}
+
static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct a2150_private *devpriv = dev->private;
- unsigned int i, n;
- static const int timeout = 100000;
- static const int filter_delay = 36;
+ unsigned int n;
+ int ret;
/* clear fifo and reset triggering circuitry */
outw(0, dev->iobase + FIFO_RESET_REG);
@@ -524,30 +533,20 @@ static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
* there is a 35.6 sample delay for data to get through the
* antialias filter
*/
- for (n = 0; n < filter_delay; n++) {
- for (i = 0; i < timeout; i++) {
- if (inw(dev->iobase + STATUS_REG) & FNE_BIT)
- break;
- udelay(1);
- }
- if (i == timeout) {
- comedi_error(dev, "timeout");
- return -ETIME;
- }
+ for (n = 0; n < 36; n++) {
+ ret = comedi_timeout(dev, s, insn, a2150_ai_eoc, 0);
+ if (ret)
+ return ret;
+
inw(dev->iobase + FIFO_DATA_REG);
}
/* read data */
for (n = 0; n < insn->n; n++) {
- for (i = 0; i < timeout; i++) {
- if (inw(dev->iobase + STATUS_REG) & FNE_BIT)
- break;
- udelay(1);
- }
- if (i == timeout) {
- comedi_error(dev, "timeout");
- return -ETIME;
- }
+ ret = comedi_timeout(dev, s, insn, a2150_ai_eoc, 0);
+ if (ret)
+ return ret;
+
data[n] = inw(dev->iobase + FIFO_DATA_REG);
data[n] ^= 0x8000;
}
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index e8cd5ddb85c5..4262385e4f46 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -96,7 +96,6 @@ Devices: [National Instruments] AT-MIO-16 (atmio16), AT-MIO-16D (atmio16d)
#define CLOCK_100_HZ 0x8F25
/* Other miscellaneous defines */
#define ATMIO16D_SIZE 32 /* bus address range */
-#define ATMIO16D_TIMEOUT 10
struct atmio16_board_t {
@@ -448,16 +447,32 @@ static int atmio16d_ai_cancel(struct comedi_device *dev,
return 0;
}
-/* Mode 0 is used to get a single conversion on demand */
+static int atmio16d_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + STAT_REG);
+ if (status & STAT_AD_CONVAVAIL)
+ return 0;
+ if (status & STAT_AD_OVERFLOW) {
+ outw(0, dev->iobase + AD_CLEAR_REG);
+ return -EOVERFLOW;
+ }
+ return -EBUSY;
+}
+
static int atmio16d_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct atmio16d_private *devpriv = dev->private;
- int i, t;
+ int i;
int chan;
int gain;
- int status;
+ int ret;
chan = CR_CHAN(insn->chanspec);
gain = CR_RANGE(insn->chanspec);
@@ -473,26 +488,17 @@ static int atmio16d_ai_insn_read(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
/* start the conversion */
outw(0, dev->iobase + START_CONVERT_REG);
+
/* wait for it to finish */
- for (t = 0; t < ATMIO16D_TIMEOUT; t++) {
- /* check conversion status */
- status = inw(dev->iobase + STAT_REG);
- if (status & STAT_AD_CONVAVAIL) {
- /* read the data now */
- data[i] = inw(dev->iobase + AD_FIFO_REG);
- /* change to two's complement if need be */
- if (devpriv->adc_coding == adc_2comp)
- data[i] ^= 0x800;
- break;
- }
- if (status & STAT_AD_OVERFLOW) {
- outw(0, dev->iobase + AD_CLEAR_REG);
- return -ETIME;
- }
- }
- /* end waiting, now check if it timed out */
- if (t == ATMIO16D_TIMEOUT)
- return -ETIME;
+ ret = comedi_timeout(dev, s, insn, atmio16d_ai_eoc, 0);
+ if (ret)
+ return ret;
+
+ /* read the data now */
+ data[i] = inw(dev->iobase + AD_FIFO_REG);
+ /* change to two's complement if need be */
+ if (devpriv->adc_coding == adc_2comp)
+ data[i] ^= 0x800;
}
return i;
@@ -723,10 +729,13 @@ static int atmio16d_attach(struct comedi_device *dev,
/* 8255 subdevice */
s = &dev->subdevices[3];
- if (board->has_8255)
- subdev_8255_init(dev, s, NULL, dev->iobase);
- else
+ if (board->has_8255) {
+ ret = subdev_8255_init(dev, s, NULL, dev->iobase);
+ if (ret)
+ return ret;
+ } else {
s->type = COMEDI_SUBD_UNUSED;
+ }
/* don't yet know how to deal with counter/timers */
#if 0
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index e4cdca349157..171a71d20c88 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -109,14 +109,31 @@ static int daq700_dio_insn_config(struct comedi_device *dev,
return insn->n;
}
+static int daq700_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + STA_R2);
+ if ((status & 0x03))
+ return -EOVERFLOW;
+ status = inb(dev->iobase + STA_R1);
+ if ((status & 0x02))
+ return -ENODATA;
+ if ((status & 0x11) == 0x01)
+ return 0;
+ return -EBUSY;
+}
+
static int daq700_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- int n, i, chan;
+ int n, chan;
int d;
- unsigned int status;
- enum { TIMEOUT = 100 };
+ int ret;
chan = CR_CHAN(insn->chanspec);
/* write channel to multiplexer */
@@ -130,30 +147,12 @@ static int daq700_ai_rinsn(struct comedi_device *dev,
outb(0x30, dev->iobase + CMO_R); /* mode 0 out0 L, from H */
/* mode 1 out0 H, L to H, start conversion */
outb(0x32, dev->iobase + CMO_R);
+
/* wait for conversion to end */
- for (i = 0; i < TIMEOUT; i++) {
- status = inb(dev->iobase + STA_R2);
- if ((status & 0x03) != 0) {
- dev_info(dev->class_dev,
- "Overflow/run Error\n");
- return -EOVERFLOW;
- }
- status = inb(dev->iobase + STA_R1);
- if ((status & 0x02) != 0) {
- dev_info(dev->class_dev, "Data Error\n");
- return -ENODATA;
- }
- if ((status & 0x11) == 0x01) {
- /* ADC conversion complete */
- break;
- }
- udelay(1);
- }
- if (i == TIMEOUT) {
- dev_info(dev->class_dev,
- "timeout during ADC conversion\n");
- return -ETIMEDOUT;
- }
+ ret = comedi_timeout(dev, s, insn, daq700_ai_eoc, 0);
+ if (ret)
+ return ret;
+
/* read data */
d = inw(dev->iobase + ADFIFO_R);
/* mangle the data as necessary */
@@ -229,11 +228,6 @@ static int daq700_auto_attach(struct comedi_device *dev,
s->insn_read = daq700_ai_rinsn;
daq700_ai_config(dev, s);
- dev_info(dev->class_dev, "%s: %s, io 0x%lx\n",
- dev->driver->driver_name,
- dev->board_name,
- dev->iobase);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index 335ea34fa57c..925e82c65b2d 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -3,8 +3,8 @@
Driver for National Instruments PCMCIA DAQ-Card DIO-24
Copyright (C) 2002 Daniel Vecino Castel <dvecino@able.es>
- PCMCIA crap at end of file is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13
- from the pcmcia package.
+ PCMCIA crap at end of file is adapted from dummy_cs.c 1.31
+ 2001/08/24 12:13:13 from the pcmcia package.
The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
<dahinds@users.sourceforge.net>. Portions created by David A. Hinds
are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 0512445df08e..f4216e825f03 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -73,7 +73,6 @@
#include "ni_labpc_isadma.h"
#define LABPC_SIZE 0x20 /* size of ISA io region */
-#define LABPC_ADC_TIMEOUT 1000
enum scan_mode {
MODE_SINGLE_CHAN,
@@ -308,19 +307,17 @@ static void labpc_clear_adc_fifo(struct comedi_device *dev)
labpc_read_adc_fifo(dev);
}
-static int labpc_ai_wait_for_data(struct comedi_device *dev,
- int timeout)
+static int labpc_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
struct labpc_private *devpriv = dev->private;
- int i;
- for (i = 0; i < timeout; i++) {
- devpriv->stat1 = devpriv->read_byte(dev->iobase + STAT1_REG);
- if (devpriv->stat1 & STAT1_DAVAIL)
- return 0;
- udelay(1);
- }
- return -ETIME;
+ devpriv->stat1 = devpriv->read_byte(dev->iobase + STAT1_REG);
+ if (devpriv->stat1 & STAT1_DAVAIL)
+ return 0;
+ return -EBUSY;
}
static int labpc_ai_insn_read(struct comedi_device *dev,
@@ -363,7 +360,7 @@ static int labpc_ai_insn_read(struct comedi_device *dev,
/* trigger conversion */
devpriv->write_byte(0x1, dev->iobase + ADC_START_CONVERT_REG);
- ret = labpc_ai_wait_for_data(dev, LABPC_ADC_TIMEOUT);
+ ret = comedi_timeout(dev, s, insn, labpc_ai_eoc, 0);
if (ret)
return ret;
@@ -950,7 +947,6 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
async = s->async;
cmd = &async->cmd;
- async->events = 0;
/* read board status */
devpriv->stat1 = devpriv->read_byte(dev->iobase + STAT1_REG);
@@ -968,7 +964,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
/* clear error interrupt */
devpriv->write_byte(0x1, dev->iobase + ADC_FIFO_CLEAR_REG);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
comedi_error(dev, "overrun");
return IRQ_HANDLED;
}
@@ -988,7 +984,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
/* clear error interrupt */
devpriv->write_byte(0x1, dev->iobase + ADC_FIFO_CLEAR_REG);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
comedi_error(dev, "overflow");
return IRQ_HANDLED;
}
@@ -996,20 +992,17 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
if (cmd->stop_src == TRIG_EXT) {
if (devpriv->stat2 & STAT2_OUTA1) {
labpc_drain_dregs(dev);
- labpc_cancel(dev, s);
async->events |= COMEDI_CB_EOA;
}
}
/* TRIG_COUNT end of acquisition */
if (cmd->stop_src == TRIG_COUNT) {
- if (devpriv->count == 0) {
- labpc_cancel(dev, s);
+ if (devpriv->count == 0)
async->events |= COMEDI_CB_EOA;
- }
}
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 457b88481db0..8a0e3b7236ad 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -256,7 +256,6 @@ static int ni_rtsi_insn_config(struct comedi_device *dev,
static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s);
static int ni_read_eeprom(struct comedi_device *dev, int addr);
-static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s);
#ifndef PCIDMA
static void ni_handle_fifo_half_full(struct comedi_device *dev);
static int ni_ao_fifo_half_empty(struct comedi_device *dev,
@@ -272,15 +271,12 @@ static void shutdown_ai_command(struct comedi_device *dev);
static int ni_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum);
-static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s);
-
static int ni_8255_callback(int dir, int port, int data, unsigned long arg);
#ifdef PCIDMA
static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
+static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s);
#endif
-static int ni_gpct_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s);
static void handle_gpct_interrupt(struct comedi_device *dev,
unsigned short counter_index);
@@ -687,12 +683,22 @@ static void ni_clear_ai_fifo(struct comedi_device *dev)
{
const struct ni_board_struct *board = comedi_board(dev);
struct ni_private *devpriv = dev->private;
+ static const int timeout = 10000;
+ int i;
if (board->reg_type == ni_reg_6143) {
/* Flush the 6143 data FIFO */
ni_writel(0x10, AIFIFO_Control_6143); /* Flush fifo */
ni_writel(0x00, AIFIFO_Control_6143); /* Flush fifo */
- while (ni_readl(AIFIFO_Status_6143) & 0x10) ; /* Wait for complete */
+ /* Wait for complete */
+ for (i = 0; i < timeout; i++) {
+ if (!(ni_readl(AIFIFO_Status_6143) & 0x10))
+ break;
+ udelay(1);
+ }
+ if (i == timeout) {
+ comedi_error(dev, "FIFO flush timeout.");
+ }
} else {
devpriv->stc_writew(dev, 1, ADC_FIFO_Clear);
if (board->reg_type == ni_reg_625x) {
@@ -937,32 +943,6 @@ static void shutdown_ai_command(struct comedi_device *dev)
s->async->events |= COMEDI_CB_EOA;
}
-static void ni_event(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- if (s->
- async->events & (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW |
- COMEDI_CB_EOA)) {
- switch (s->index) {
- case NI_AI_SUBDEV:
- ni_ai_reset(dev, s);
- break;
- case NI_AO_SUBDEV:
- ni_ao_reset(dev, s);
- break;
- case NI_GPCT0_SUBDEV:
- case NI_GPCT1_SUBDEV:
- ni_gpct_cancel(dev, s);
- break;
- case NI_DIO_SUBDEV:
- ni_cdio_cancel(dev, s);
- break;
- default:
- break;
- }
- }
- comedi_event(dev, s);
-}
-
static void handle_gpct_interrupt(struct comedi_device *dev,
unsigned short counter_index)
{
@@ -974,8 +954,7 @@ static void handle_gpct_interrupt(struct comedi_device *dev,
ni_tio_handle_interrupt(&devpriv->counter_dev->counters[counter_index],
s);
- if (s->async->events)
- ni_event(dev, s);
+ cfc_handle_events(dev, s);
#endif
}
@@ -1033,7 +1012,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (comedi_is_subdevice_running(s)) {
s->async->events |=
COMEDI_CB_ERROR | COMEDI_CB_EOA;
- ni_event(dev, s);
+ cfc_handle_events(dev, s);
}
return;
}
@@ -1048,8 +1027,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (status & (AI_Overrun_St | AI_Overflow_St))
s->async->events |= COMEDI_CB_OVERFLOW;
- ni_event(dev, s);
-
+ cfc_handle_events(dev, s);
return;
}
if (status & AI_SC_TC_St) {
@@ -1076,7 +1054,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if ((status & AI_STOP_St))
ni_handle_eos(dev, s);
- ni_event(dev, s);
+ cfc_handle_events(dev, s);
}
static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
@@ -1151,7 +1129,7 @@ static void handle_b_interrupt(struct comedi_device *dev,
}
#endif
- ni_event(dev, s);
+ cfc_handle_events(dev, s);
}
#ifndef PCIDMA
@@ -3662,7 +3640,7 @@ static void handle_cdio_interrupt(struct comedi_device *dev)
M_Offset_CDIO_Command);
/* s->async->events |= COMEDI_CB_EOA; */
}
- ni_event(dev, s);
+ cfc_handle_events(dev, s);
}
static int ni_serial_insn_config(struct comedi_device *dev,
@@ -4282,10 +4260,14 @@ static int ni_E_init(struct comedi_device *dev)
/* 8255 device */
s = &dev->subdevices[NI_8255_DIO_SUBDEV];
- if (board->has_8255)
- subdev_8255_init(dev, s, ni_8255_callback, (unsigned long)dev);
- else
+ if (board->has_8255) {
+ ret = subdev_8255_init(dev, s, ni_8255_callback,
+ (unsigned long)dev);
+ if (ret)
+ return ret;
+ } else {
s->type = COMEDI_SUBD_UNUSED;
+ }
/* formerly general purpose counter/timer device, but no longer used */
s = &dev->subdevices[NI_UNUSED_SUBDEV];
@@ -4393,6 +4375,9 @@ static int ni_E_init(struct comedi_device *dev)
&ni_gpct_read_register,
counter_variant,
NUM_GPCT);
+ if (!devpriv->counter_dev)
+ return -ENOMEM;
+
/* General purpose counters */
for (j = 0; j < NUM_GPCT; ++j) {
s = &dev->subdevices[NI_GPCT_SUBDEV(j)];
@@ -4483,7 +4468,6 @@ static int ni_E_init(struct comedi_device *dev)
ni_writeb(0x0, M_Offset_AO_Calibration);
}
- printk("\n");
return 0;
}
@@ -4992,9 +4976,9 @@ static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
#endif
+#ifdef PCIDMA
static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
-#ifdef PCIDMA
struct ni_gpct *counter = s->private;
int retval;
@@ -5002,10 +4986,8 @@ static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
ni_e_series_enable_second_irq(dev, counter->counter_index, 0);
ni_release_gpct_mite_channel(dev, counter->counter_index);
return retval;
-#else
- return 0;
-#endif
}
+#endif
/*
*
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 30c46a3c1767..85ac2d964f5c 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -263,9 +263,6 @@ enum FPGA_Control_Bits {
#define IntEn (TransferReady|CountExpired|Waited|PrimaryTC|SecondaryTC)
#endif
-static int ni_pcidio_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s);
-
enum nidio_boardid {
BOARD_PCIDIO_32HS,
BOARD_PXI6533,
@@ -353,17 +350,6 @@ static void ni_pcidio_release_di_mite_channel(struct comedi_device *dev)
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
-static void ni_pcidio_event(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- if (s->
- async->events & (COMEDI_CB_EOA | COMEDI_CB_ERROR |
- COMEDI_CB_OVERFLOW)) {
- ni_pcidio_cancel(dev, s);
- }
- comedi_event(dev, s);
-}
-
static int ni_pcidio_poll(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct nidio96_private *devpriv = dev->private;
@@ -501,7 +487,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
}
out:
- ni_pcidio_event(dev, s);
+ cfc_handle_events(dev, s);
#if 0
if (!tag) {
writeb(0x03,
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 0ed980455875..d40df072583c 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1551,7 +1551,7 @@ static int pcimio_auto_attach(struct comedi_device *dev,
dev->subdevices[NI_GPCT_SUBDEV(1)].buf_change = &pcimio_gpct1_change;
dev->subdevices[NI_DIO_SUBDEV].buf_change = &pcimio_dio_change;
- return ret;
+ return 0;
}
static int pcimio_ai_change(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h
index 68378dab4e70..1056bf001e5e 100644
--- a/drivers/staging/comedi/drivers/ni_tio.h
+++ b/drivers/staging/comedi/drivers/ni_tio.h
@@ -115,10 +115,10 @@ struct ni_gpct {
struct ni_gpct_device {
struct comedi_device *dev;
- void (*write_register) (struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg);
- unsigned (*read_register) (struct ni_gpct *counter,
- enum ni_gpct_register reg);
+ void (*write_register)(struct ni_gpct *counter, unsigned bits,
+ enum ni_gpct_register reg);
+ unsigned (*read_register)(struct ni_gpct *counter,
+ enum ni_gpct_register reg);
enum ni_gpct_variant variant;
struct ni_gpct *counters;
unsigned num_counters;
diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
index f0fc123ef566..7c03a5d17b1b 100644
--- a/drivers/staging/comedi/drivers/pcl711.c
+++ b/drivers/staging/comedi/drivers/pcl711.c
@@ -253,18 +253,17 @@ static void pcl711_set_changain(struct comedi_device *dev,
outb(mux | PCL711_MUX_CHAN(chan), dev->iobase + PCL711_MUX_REG);
}
-static int pcl711_ai_wait_for_eoc(struct comedi_device *dev,
- unsigned int timeout)
+static int pcl711_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- unsigned int msb;
+ unsigned int status;
- while (timeout--) {
- msb = inb(dev->iobase + PCL711_AI_MSB_REG);
- if ((msb & PCL711_AI_MSB_DRDY) == 0)
- return 0;
- udelay(1);
- }
- return -ETIME;
+ status = inb(dev->iobase + PCL711_AI_MSB_REG);
+ if ((status & PCL711_AI_MSB_DRDY) == 0)
+ return 0;
+ return -EBUSY;
}
static int pcl711_ai_insn_read(struct comedi_device *dev,
@@ -282,7 +281,7 @@ static int pcl711_ai_insn_read(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
outb(PCL711_SOFTTRIG, dev->iobase + PCL711_SOFTTRIG_REG);
- ret = pcl711_ai_wait_for_eoc(dev, 100);
+ ret = comedi_timeout(dev, s, insn, pcl711_ai_eoc, 0);
if (ret)
return ret;
@@ -336,11 +335,8 @@ static int pcl711_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
- if (cmd->stop_src == TRIG_NONE) {
+ if (cmd->stop_src == TRIG_NONE)
err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
- } else {
- /* ignore */
- }
if (err)
return 3;
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index 53613b385f35..160eac8083db 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -131,34 +131,32 @@
#define boardACL8216 8 /* and ICP DAS A-826PG */
#define boardA821 9 /* PGH, PGL, PGL/NDA versions */
-#define PCLx1x_IORANGE 16
-
-#define PCL812_CTR0 0
-#define PCL812_CTR1 1
-#define PCL812_CTR2 2
-#define PCL812_CTRCTL 3
-#define PCL812_AD_LO 4
-#define PCL812_DA1_LO 4
-#define PCL812_AD_HI 5
-#define PCL812_DA1_HI 5
-#define PCL812_DA2_LO 6
-#define PCL812_DI_LO 6
-#define PCL812_DA2_HI 7
-#define PCL812_DI_HI 7
-#define PCL812_CLRINT 8
-#define PCL812_GAIN 9
-#define PCL812_MUX 10
-#define PCL812_MODE 11
-#define PCL812_CNTENABLE 10
-#define PCL812_SOFTTRIG 12
-#define PCL812_DO_LO 13
-#define PCL812_DO_HI 14
-
-#define PCL812_DRDY 0x10 /* =0 data ready */
-
-#define ACL8216_STATUS 8 /* 5. bit signalize data ready */
-
-#define ACL8216_DRDY 0x20 /* =0 data ready */
+/*
+ * Register I/O map
+ */
+#define PCL812_TIMER_BASE 0x00
+#define PCL812_AI_LSB_REG 0x04
+#define PCL812_AI_MSB_REG 0x05
+#define PCL812_AI_MSB_DRDY (1 << 4)
+#define PCL812_AO_LSB_REG(x) (0x04 + ((x) * 2))
+#define PCL812_AO_MSB_REG(x) (0x05 + ((x) * 2))
+#define PCL812_DI_LSB_REG 0x06
+#define PCL812_DI_MSB_REG 0x07
+#define PCL812_STATUS_REG 0x08
+#define PCL812_STATUS_DRDY (1 << 5)
+#define PCL812_RANGE_REG 0x09
+#define PCL812_MUX_REG 0x0a
+#define PCL812_MUX_CHAN(x) ((x) << 0)
+#define PCL812_MUX_CS0 (1 << 4)
+#define PCL812_MUX_CS1 (1 << 5)
+#define PCL812_CTRL_REG 0x0b
+#define PCL812_CTRL_DISABLE_TRIG (0 << 0)
+#define PCL812_CTRL_SOFT_TRIG (1 << 0)
+#define PCL812_CTRL_PACER_DMA_TRIG (2 << 0)
+#define PCL812_CTRL_PACER_EOC_TRIG (6 << 0)
+#define PCL812_SOFTTRIG_REG 0x0c
+#define PCL812_DO_LSB_REG 0x0d
+#define PCL812_DO_MSB_REG 0x0e
#define MAX_CHANLIST_LEN 256 /* length of scan list */
@@ -331,213 +329,391 @@ static const struct comedi_lrange range_a821pgh_ai = {
};
struct pcl812_board {
+ const char *name;
+ int board_type;
+ int n_aichan;
+ int n_aochan;
+ unsigned int ai_ns_min;
+ const struct comedi_lrange *rangelist_ai;
+ unsigned int IRQbits;
+ unsigned int has_dma:1;
+ unsigned int has_16bit_ai:1;
+ unsigned int has_mpc508_mux:1;
+ unsigned int has_dio:1;
+};
- const char *name; /* board name */
- int board_type; /* type of this board */
- int n_aichan; /* num of AI chans in S.E. */
- int n_aichan_diff; /* DIFF num of chans */
- int n_aochan; /* num of DA chans */
- int n_dichan; /* DI and DO chans */
- int n_dochan;
- int ai_maxdata; /* AI resolution */
- unsigned int ai_ns_min; /* max sample speed of card v ns */
- unsigned int i8254_osc_base; /* clock base */
- const struct comedi_lrange *rangelist_ai; /* rangelist for A/D */
- const struct comedi_lrange *rangelist_ao; /* rangelist for D/A */
- unsigned int IRQbits; /* allowed IRQ */
- unsigned char DMAbits; /* allowed DMA chans */
- unsigned char io_range; /* iorange for this board */
- unsigned char haveMPC508; /* 1=board use MPC508A multiplexor */
+static const struct pcl812_board boardtypes[] = {
+ {
+ .name = "pcl812",
+ .board_type = boardPCL812,
+ .n_aichan = 16,
+ .n_aochan = 2,
+ .ai_ns_min = 33000,
+ .rangelist_ai = &range_bipolar10,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "pcl812pg",
+ .board_type = boardPCL812PG,
+ .n_aichan = 16,
+ .n_aochan = 2,
+ .ai_ns_min = 33000,
+ .rangelist_ai = &range_pcl812pg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "acl8112pg",
+ .board_type = boardPCL812PG,
+ .n_aichan = 16,
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_pcl812pg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "acl8112dg",
+ .board_type = boardACL8112,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_acl8112dg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_mpc508_mux = 1,
+ .has_dio = 1,
+ }, {
+ .name = "acl8112hg",
+ .board_type = boardACL8112,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_acl8112hg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_mpc508_mux = 1,
+ .has_dio = 1,
+ }, {
+ .name = "a821pgl",
+ .board_type = boardA821,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 1,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_pcl813b_ai,
+ .IRQbits = 0x000c,
+ .has_dio = 1,
+ }, {
+ .name = "a821pglnda",
+ .board_type = boardA821,
+ .n_aichan = 16, /* 8 differential */
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_pcl813b_ai,
+ .IRQbits = 0x000c,
+ }, {
+ .name = "a821pgh",
+ .board_type = boardA821,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 1,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_a821pgh_ai,
+ .IRQbits = 0x000c,
+ .has_dio = 1,
+ }, {
+ .name = "a822pgl",
+ .board_type = boardACL8112,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_acl8112dg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "a822pgh",
+ .board_type = boardACL8112,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_acl8112hg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "a823pgl",
+ .board_type = boardACL8112,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 8000,
+ .rangelist_ai = &range_acl8112dg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "a823pgh",
+ .board_type = boardACL8112,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 8000,
+ .rangelist_ai = &range_acl8112hg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "pcl813",
+ .board_type = boardPCL813,
+ .n_aichan = 32,
+ .rangelist_ai = &range_pcl813b_ai,
+ }, {
+ .name = "pcl813b",
+ .board_type = boardPCL813B,
+ .n_aichan = 32,
+ .rangelist_ai = &range_pcl813b_ai,
+ }, {
+ .name = "acl8113",
+ .board_type = boardACL8113,
+ .n_aichan = 32,
+ .rangelist_ai = &range_acl8113_1_ai,
+ }, {
+ .name = "iso813",
+ .board_type = boardISO813,
+ .n_aichan = 32,
+ .rangelist_ai = &range_iso813_1_ai,
+ }, {
+ .name = "acl8216",
+ .board_type = boardACL8216,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_pcl813b2_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_16bit_ai = 1,
+ .has_mpc508_mux = 1,
+ .has_dio = 1,
+ }, {
+ .name = "a826pg",
+ .board_type = boardACL8216,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_pcl813b2_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_16bit_ai = 1,
+ .has_dio = 1,
+ },
};
struct pcl812_private {
-
- unsigned char valid; /* =1 device is OK */
unsigned char dma; /* >0 use dma ( usedDMA channel) */
- unsigned char use_diff; /* =1 diff inputs */
- unsigned char use_MPC; /* 1=board uses MPC508A multiplexor */
- unsigned char use_ext_trg; /* 1=board uses external trigger */
unsigned char range_correction; /* =1 we must add 1 to range number */
- unsigned char old_chan_reg; /* lastly used chan/gain pair */
- unsigned char old_gain_reg;
+ unsigned int last_ai_chanspec;
unsigned char mode_reg_int; /* there is stored INT number for some card */
- unsigned char ai_neverending; /* =1 we do unlimited AI */
- unsigned char ai_eos; /* 1=EOS wake up */
- unsigned char ai_dma; /* =1 we use DMA */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
- unsigned int ai_scans; /* len of scanlist */
unsigned int ai_act_scan; /* how many scans we finished */
- unsigned int ai_chanlist[MAX_CHANLIST_LEN]; /* our copy of channel/range list */
- unsigned int ai_n_chan; /* how many channels is measured */
- unsigned int ai_flags; /* flaglist */
- unsigned int ai_data_len; /* len of data buffer */
- unsigned int ai_is16b; /* =1 we have 16 bit card */
+ unsigned int dmapages;
+ unsigned int hwdmasize;
unsigned long dmabuf[2]; /* PTR to DMA buf */
- unsigned int dmapages[2]; /* how many pages we have allocated */
unsigned int hwdmaptr[2]; /* HW PTR to DMA buf */
- unsigned int hwdmasize[2]; /* DMA buf size in bytes */
unsigned int dmabytestomove[2]; /* how many bytes DMA transfer */
int next_dma_buf; /* which buffer is next to use */
unsigned int dma_runs_to_end; /* how many times we must switch DMA buffers */
unsigned int last_dma_run; /* how many bytes to transfer on last DMA buffer */
unsigned int max_812_ai_mode0_rangewait; /* setling time for gain */
unsigned int ao_readback[2]; /* data for AO readback */
+ unsigned int divisor1;
+ unsigned int divisor2;
+ unsigned int use_diff:1;
+ unsigned int use_mpc508:1;
+ unsigned int use_ext_trg:1;
+ unsigned int ai_dma:1;
+ unsigned int ai_eos:1;
};
-/*
-==============================================================================
-*/
-static void start_pacer(struct comedi_device *dev, int mode,
- unsigned int divisor1, unsigned int divisor2);
-static void setup_range_channel(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int rangechan, char wait);
-static int pcl812_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s);
-/*
-==============================================================================
-*/
-static int pcl812_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl812_start_pacer(struct comedi_device *dev, bool load_timers)
{
struct pcl812_private *devpriv = dev->private;
- int n;
- int timeout, hi;
-
- /* select software trigger */
- outb(devpriv->mode_reg_int | 1, dev->iobase + PCL812_MODE);
- /* select channel and renge */
- setup_range_channel(dev, s, insn->chanspec, 1);
- for (n = 0; n < insn->n; n++) {
- /* start conversion */
- outb(255, dev->iobase + PCL812_SOFTTRIG);
- udelay(5);
- timeout = 50; /* wait max 50us, it must finish under 33us */
- while (timeout--) {
- hi = inb(dev->iobase + PCL812_AD_HI);
- if (!(hi & PCL812_DRDY))
- goto conv_finish;
- udelay(1);
- }
- dev_dbg(dev->class_dev, "A/D insn read timeout\n");
- outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
- return -ETIME;
+ unsigned long timer_base = dev->iobase + PCL812_TIMER_BASE;
+
+ i8254_set_mode(timer_base, 0, 2, I8254_MODE2 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 1, I8254_MODE2 | I8254_BINARY);
+ udelay(1);
-conv_finish:
- data[n] = ((hi & 0xf) << 8) | inb(dev->iobase + PCL812_AD_LO);
+ if (load_timers) {
+ i8254_write(timer_base, 0, 2, devpriv->divisor2);
+ i8254_write(timer_base, 0, 1, devpriv->divisor1);
}
- outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
- return n;
}
-/*
-==============================================================================
-*/
-static int acl8216_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl812_ai_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- int n;
- int timeout;
-
- /* select software trigger */
- outb(1, dev->iobase + PCL812_MODE);
- /* select channel and renge */
- setup_range_channel(dev, s, insn->chanspec, 1);
- for (n = 0; n < insn->n; n++) {
- /* start conversion */
- outb(255, dev->iobase + PCL812_SOFTTRIG);
- udelay(5);
- timeout = 50; /* wait max 50us, it must finish under 33us */
- while (timeout--) {
- if (!(inb(dev->iobase + ACL8216_STATUS) & ACL8216_DRDY))
- goto conv_finish;
- udelay(1);
+ struct pcl812_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int dma_flags;
+ unsigned int bytes;
+
+ /* we use EOS, so adapt DMA buffer to one scan */
+ if (devpriv->ai_eos) {
+ devpriv->dmabytestomove[0] =
+ cmd->chanlist_len * sizeof(short);
+ devpriv->dmabytestomove[1] =
+ cmd->chanlist_len * sizeof(short);
+ devpriv->dma_runs_to_end = 1;
+ } else {
+ devpriv->dmabytestomove[0] = devpriv->hwdmasize;
+ devpriv->dmabytestomove[1] = devpriv->hwdmasize;
+ if (s->async->prealloc_bufsz < devpriv->hwdmasize) {
+ devpriv->dmabytestomove[0] =
+ s->async->prealloc_bufsz;
+ devpriv->dmabytestomove[1] =
+ s->async->prealloc_bufsz;
}
- dev_dbg(dev->class_dev, "A/D insn read timeout\n");
- outb(0, dev->iobase + PCL812_MODE);
- return -ETIME;
-
-conv_finish:
- data[n] =
- (inb(dev->iobase +
- PCL812_AD_HI) << 8) | inb(dev->iobase + PCL812_AD_LO);
+ if (cmd->stop_src == TRIG_NONE) {
+ devpriv->dma_runs_to_end = 1;
+ } else {
+ /* how many samples we must transfer? */
+ bytes = cmd->chanlist_len *
+ cmd->stop_arg * sizeof(short);
+
+ /* how many DMA pages we must fill */
+ devpriv->dma_runs_to_end =
+ bytes / devpriv->dmabytestomove[0];
+
+ /* on last dma transfer must be moved */
+ devpriv->last_dma_run =
+ bytes % devpriv->dmabytestomove[0];
+ if (devpriv->dma_runs_to_end == 0)
+ devpriv->dmabytestomove[0] =
+ devpriv->last_dma_run;
+ devpriv->dma_runs_to_end--;
+ }
+ }
+ if (devpriv->dmabytestomove[0] > devpriv->hwdmasize) {
+ devpriv->dmabytestomove[0] = devpriv->hwdmasize;
+ devpriv->ai_eos = 0;
}
- outb(0, dev->iobase + PCL812_MODE);
- return n;
+ if (devpriv->dmabytestomove[1] > devpriv->hwdmasize) {
+ devpriv->dmabytestomove[1] = devpriv->hwdmasize;
+ devpriv->ai_eos = 0;
+ }
+ devpriv->next_dma_buf = 0;
+ set_dma_mode(devpriv->dma, DMA_MODE_READ);
+ dma_flags = claim_dma_lock();
+ clear_dma_ff(devpriv->dma);
+ set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
+ set_dma_count(devpriv->dma, devpriv->dmabytestomove[0]);
+ release_dma_lock(dma_flags);
+ enable_dma(devpriv->dma);
}
-/*
-==============================================================================
-*/
-static int pcl812_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl812_ai_setup_next_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
- int i;
+ unsigned long dma_flags;
- for (i = 0; i < insn->n; i++) {
- outb((data[i] & 0xff),
- dev->iobase + (chan ? PCL812_DA2_LO : PCL812_DA1_LO));
- outb((data[i] >> 8) & 0x0f,
- dev->iobase + (chan ? PCL812_DA2_HI : PCL812_DA1_HI));
- devpriv->ao_readback[chan] = data[i];
+ devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
+ disable_dma(devpriv->dma);
+ set_dma_mode(devpriv->dma, DMA_MODE_READ);
+ dma_flags = claim_dma_lock();
+ set_dma_addr(devpriv->dma, devpriv->hwdmaptr[devpriv->next_dma_buf]);
+ if (devpriv->ai_eos) {
+ set_dma_count(devpriv->dma,
+ devpriv->dmabytestomove[devpriv->next_dma_buf]);
+ } else {
+ if (devpriv->dma_runs_to_end) {
+ set_dma_count(devpriv->dma,
+ devpriv->dmabytestomove[devpriv->
+ next_dma_buf]);
+ } else {
+ set_dma_count(devpriv->dma, devpriv->last_dma_run);
+ }
+ devpriv->dma_runs_to_end--;
}
-
- return i;
+ release_dma_lock(dma_flags);
+ enable_dma(devpriv->dma);
}
-/*
-==============================================================================
-*/
-static int pcl812_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl812_ai_set_chan_range(struct comedi_device *dev,
+ unsigned int chanspec, char wait)
{
struct pcl812_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
- int i;
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int mux = 0;
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->ao_readback[chan];
+ if (chanspec == devpriv->last_ai_chanspec)
+ return;
+
+ devpriv->last_ai_chanspec = chanspec;
+
+ if (devpriv->use_mpc508) {
+ if (devpriv->use_diff) {
+ mux |= PCL812_MUX_CS0 | PCL812_MUX_CS1;
+ } else {
+ if (chan < 8)
+ mux |= PCL812_MUX_CS0;
+ else
+ mux |= PCL812_MUX_CS1;
+ }
+ }
+
+ outb(mux | PCL812_MUX_CHAN(chan), dev->iobase + PCL812_MUX_REG);
+ outb(range + devpriv->range_correction, dev->iobase + PCL812_RANGE_REG);
- return i;
+ if (wait)
+ /*
+ * XXX this depends on selected range and can be very long for
+ * some high gain ranges!
+ */
+ udelay(devpriv->max_812_ai_mode0_rangewait);
}
-/*
-==============================================================================
-*/
-static int pcl812_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl812_ai_clear_eoc(struct comedi_device *dev)
{
- data[1] = inb(dev->iobase + PCL812_DI_LO);
- data[1] |= inb(dev->iobase + PCL812_DI_HI) << 8;
+ /* writing any value clears the interrupt request */
+ outb(0, dev->iobase + PCL812_STATUS_REG);
+}
- return insn->n;
+static void pcl812_ai_soft_trig(struct comedi_device *dev)
+{
+ /* writing any value triggers a software conversion */
+ outb(255, dev->iobase + PCL812_SOFTTRIG_REG);
}
-static int pcl812_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static unsigned int pcl812_ai_get_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase + PCL812_DO_LO);
- outb((s->state >> 8), dev->iobase + PCL812_DO_HI);
- }
+ unsigned int val;
- data[1] = s->state;
+ val = inb(dev->iobase + PCL812_AI_MSB_REG) << 8;
+ val |= inb(dev->iobase + PCL812_AI_LSB_REG);
- return insn->n;
+ return val & s->maxdata;
+}
+
+static int pcl812_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ if (s->maxdata > 0x0fff) {
+ status = inb(dev->iobase + PCL812_STATUS_REG);
+ if ((status & PCL812_STATUS_DRDY) == 0)
+ return 0;
+ } else {
+ status = inb(dev->iobase + PCL812_AI_MSB_REG);
+ if ((status & PCL812_AI_MSB_DRDY) == 0)
+ return 0;
+ }
+ return -EBUSY;
}
-/*
-==============================================================================
-*/
static int pcl812_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
@@ -545,7 +721,7 @@ static int pcl812_ai_cmdtest(struct comedi_device *dev,
struct pcl812_private *devpriv = dev->private;
int err = 0;
unsigned int flags;
- int tmp, divisor1, divisor2;
+ int tmp;
/* Step 1 : check if triggers are trivially valid */
@@ -600,8 +776,9 @@ static int pcl812_ai_cmdtest(struct comedi_device *dev,
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer(board->i8254_osc_base,
- &divisor1, &divisor2,
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_2MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
&cmd->convert_arg, cmd->flags);
if (cmd->convert_arg < board->ai_ns_min)
cmd->convert_arg = board->ai_ns_min;
@@ -615,54 +792,21 @@ static int pcl812_ai_cmdtest(struct comedi_device *dev,
return 0;
}
-/*
-==============================================================================
-*/
static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- const struct pcl812_board *board = comedi_board(dev);
struct pcl812_private *devpriv = dev->private;
- unsigned int divisor1 = 0, divisor2 = 0, i, dma_flags, bytes;
struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int ctrl = 0;
+ unsigned int i;
- if (cmd->start_src != TRIG_NOW)
- return -EINVAL;
- if (cmd->scan_begin_src != TRIG_FOLLOW)
- return -EINVAL;
- if (devpriv->use_ext_trg) {
- if (cmd->convert_src != TRIG_EXT)
- return -EINVAL;
- } else {
- if (cmd->convert_src != TRIG_TIMER)
- return -EINVAL;
- }
- if (cmd->scan_end_src != TRIG_COUNT)
- return -EINVAL;
- if (cmd->scan_end_arg != cmd->chanlist_len)
- return -EINVAL;
- if (cmd->chanlist_len > MAX_CHANLIST_LEN)
- return -EINVAL;
-
- if (cmd->convert_src == TRIG_TIMER) {
- if (cmd->convert_arg < board->ai_ns_min)
- cmd->convert_arg = board->ai_ns_min;
- i8253_cascade_ns_to_timer(board->i8254_osc_base,
- &divisor1, &divisor2,
- &cmd->convert_arg, cmd->flags);
- }
-
- start_pacer(dev, -1, 0, 0); /* stop pacer */
+ pcl812_start_pacer(dev, false);
- devpriv->ai_n_chan = cmd->chanlist_len;
- memcpy(devpriv->ai_chanlist, cmd->chanlist,
- sizeof(unsigned int) * cmd->scan_end_arg);
- /* select first channel and range */
- setup_range_channel(dev, s, devpriv->ai_chanlist[0], 1);
+ pcl812_ai_set_chan_range(dev, cmd->chanlist[0], 1);
if (devpriv->dma) { /* check if we can use DMA transfer */
devpriv->ai_dma = 1;
- for (i = 1; i < devpriv->ai_n_chan; i++)
- if (devpriv->ai_chanlist[0] != devpriv->ai_chanlist[i]) {
+ for (i = 1; i < cmd->chanlist_len; i++)
+ if (cmd->chanlist[0] != cmd->chanlist[i]) {
/* we cann't use DMA :-( */
devpriv->ai_dma = 0;
break;
@@ -670,212 +814,105 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
} else
devpriv->ai_dma = 0;
- devpriv->ai_flags = cmd->flags;
- devpriv->ai_data_len = s->async->prealloc_bufsz;
- if (cmd->stop_src == TRIG_COUNT) {
- devpriv->ai_scans = cmd->stop_arg;
- devpriv->ai_neverending = 0;
- } else {
- devpriv->ai_scans = 0;
- devpriv->ai_neverending = 1;
- }
-
devpriv->ai_act_scan = 0;
devpriv->ai_poll_ptr = 0;
s->async->cur_chan = 0;
/* don't we want wake up every scan? */
- if ((devpriv->ai_flags & TRIG_WAKE_EOS)) {
+ if (cmd->flags & TRIG_WAKE_EOS) {
devpriv->ai_eos = 1;
/* DMA is useless for this situation */
- if (devpriv->ai_n_chan == 1)
+ if (cmd->chanlist_len == 1)
devpriv->ai_dma = 0;
}
- if (devpriv->ai_dma) {
- /* we use EOS, so adapt DMA buffer to one scan */
- if (devpriv->ai_eos) {
- devpriv->dmabytestomove[0] =
- devpriv->ai_n_chan * sizeof(short);
- devpriv->dmabytestomove[1] =
- devpriv->ai_n_chan * sizeof(short);
- devpriv->dma_runs_to_end = 1;
- } else {
- devpriv->dmabytestomove[0] = devpriv->hwdmasize[0];
- devpriv->dmabytestomove[1] = devpriv->hwdmasize[1];
- if (devpriv->ai_data_len < devpriv->hwdmasize[0])
- devpriv->dmabytestomove[0] =
- devpriv->ai_data_len;
- if (devpriv->ai_data_len < devpriv->hwdmasize[1])
- devpriv->dmabytestomove[1] =
- devpriv->ai_data_len;
- if (devpriv->ai_neverending) {
- devpriv->dma_runs_to_end = 1;
- } else {
- /* how many samples we must transfer? */
- bytes = devpriv->ai_n_chan *
- devpriv->ai_scans * sizeof(short);
-
- /* how many DMA pages we must fill */
- devpriv->dma_runs_to_end =
- bytes / devpriv->dmabytestomove[0];
-
- /* on last dma transfer must be moved */
- devpriv->last_dma_run =
- bytes % devpriv->dmabytestomove[0];
- if (devpriv->dma_runs_to_end == 0)
- devpriv->dmabytestomove[0] =
- devpriv->last_dma_run;
- devpriv->dma_runs_to_end--;
- }
- }
- if (devpriv->dmabytestomove[0] > devpriv->hwdmasize[0]) {
- devpriv->dmabytestomove[0] = devpriv->hwdmasize[0];
- devpriv->ai_eos = 0;
- }
- if (devpriv->dmabytestomove[1] > devpriv->hwdmasize[1]) {
- devpriv->dmabytestomove[1] = devpriv->hwdmasize[1];
- devpriv->ai_eos = 0;
- }
- devpriv->next_dma_buf = 0;
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- dma_flags = claim_dma_lock();
- clear_dma_ff(devpriv->dma);
- set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
- set_dma_count(devpriv->dma, devpriv->dmabytestomove[0]);
- release_dma_lock(dma_flags);
- enable_dma(devpriv->dma);
- }
+ if (devpriv->ai_dma)
+ pcl812_ai_setup_dma(dev, s);
switch (cmd->convert_src) {
case TRIG_TIMER:
- start_pacer(dev, 1, divisor1, divisor2);
+ pcl812_start_pacer(dev, true);
break;
}
- if (devpriv->ai_dma) /* let's go! */
- outb(devpriv->mode_reg_int | 2, dev->iobase + PCL812_MODE);
- else /* let's go! */
- outb(devpriv->mode_reg_int | 6, dev->iobase + PCL812_MODE);
+ if (devpriv->ai_dma)
+ ctrl |= PCL812_CTRL_PACER_DMA_TRIG;
+ else
+ ctrl |= PCL812_CTRL_PACER_EOC_TRIG;
+ outb(devpriv->mode_reg_int | ctrl, dev->iobase + PCL812_CTRL_REG);
return 0;
}
-/*
-==============================================================================
-*/
-static irqreturn_t interrupt_pcl812_ai_int(int irq, void *d)
+static bool pcl812_ai_next_chan(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- char err = 1;
- unsigned int mask, timeout;
- struct comedi_device *dev = d;
struct pcl812_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int next_chan;
+ struct comedi_cmd *cmd = &s->async->cmd;
- s->async->events = 0;
+ s->async->events |= COMEDI_CB_BLOCK;
- timeout = 50; /* wait max 50us, it must finish under 33us */
- if (devpriv->ai_is16b) {
- mask = 0xffff;
- while (timeout--) {
- if (!(inb(dev->iobase + ACL8216_STATUS) & ACL8216_DRDY)) {
- err = 0;
- break;
- }
- udelay(1);
- }
- } else {
- mask = 0x0fff;
- while (timeout--) {
- if (!(inb(dev->iobase + PCL812_AD_HI) & PCL812_DRDY)) {
- err = 0;
- break;
- }
- udelay(1);
- }
+ s->async->cur_chan++;
+ if (s->async->cur_chan >= cmd->chanlist_len) {
+ s->async->cur_chan = 0;
+ devpriv->ai_act_scan++;
+ s->async->events |= COMEDI_CB_EOS;
+ }
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ devpriv->ai_act_scan >= cmd->stop_arg) {
+ /* all data sampled */
+ s->async->events |= COMEDI_CB_EOA;
+ return false;
}
- if (err) {
+ return true;
+}
+
+static void pcl812_handle_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int next_chan;
+
+ if (pcl812_ai_eoc(dev, s, NULL, 0)) {
dev_dbg(dev->class_dev, "A/D cmd IRQ without DRDY!\n");
- pcl812_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ return;
}
- comedi_buf_put(s->async,
- ((inb(dev->iobase + PCL812_AD_HI) << 8) |
- inb(dev->iobase + PCL812_AD_LO)) & mask);
+ comedi_buf_put(s->async, pcl812_ai_get_sample(dev, s));
/* Set up next channel. Added by abbotti 2010-01-20, but untested. */
next_chan = s->async->cur_chan + 1;
- if (next_chan >= devpriv->ai_n_chan)
+ if (next_chan >= cmd->chanlist_len)
next_chan = 0;
- if (devpriv->ai_chanlist[s->async->cur_chan] !=
- devpriv->ai_chanlist[next_chan])
- setup_range_channel(dev, s, devpriv->ai_chanlist[next_chan], 0);
-
- outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
+ if (cmd->chanlist[s->async->cur_chan] != cmd->chanlist[next_chan])
+ pcl812_ai_set_chan_range(dev, cmd->chanlist[next_chan], 0);
- s->async->cur_chan = next_chan;
- if (next_chan == 0) { /* one scan done */
- devpriv->ai_act_scan++;
- if (!(devpriv->ai_neverending))
- /* all data sampled */
- if (devpriv->ai_act_scan >= devpriv->ai_scans) {
- pcl812_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- }
- }
-
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ pcl812_ai_next_chan(dev, s);
}
-/*
-==============================================================================
-*/
static void transfer_from_dma_buf(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned short *ptr,
unsigned int bufptr, unsigned int len)
{
- struct pcl812_private *devpriv = dev->private;
unsigned int i;
- s->async->events = 0;
for (i = len; i; i--) {
- /* get one sample */
comedi_buf_put(s->async, ptr[bufptr++]);
- s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan++;
- if (!devpriv->ai_neverending)
- /* all data sampled */
- if (devpriv->ai_act_scan >= devpriv->ai_scans) {
- pcl812_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- break;
- }
- }
+ if (!pcl812_ai_next_chan(dev, s))
+ break;
}
-
- comedi_event(dev, s);
}
-/*
-==============================================================================
-*/
-static irqreturn_t interrupt_pcl812_ai_dma(int irq, void *d)
+static void pcl812_handle_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct comedi_device *dev = d;
struct pcl812_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned long dma_flags;
int len, bufptr;
unsigned short *ptr;
@@ -883,58 +920,36 @@ static irqreturn_t interrupt_pcl812_ai_dma(int irq, void *d)
len = (devpriv->dmabytestomove[devpriv->next_dma_buf] >> 1) -
devpriv->ai_poll_ptr;
- devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
- disable_dma(devpriv->dma);
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- dma_flags = claim_dma_lock();
- set_dma_addr(devpriv->dma, devpriv->hwdmaptr[devpriv->next_dma_buf]);
- if (devpriv->ai_eos) {
- set_dma_count(devpriv->dma,
- devpriv->dmabytestomove[devpriv->next_dma_buf]);
- } else {
- if (devpriv->dma_runs_to_end) {
- set_dma_count(devpriv->dma,
- devpriv->dmabytestomove[devpriv->
- next_dma_buf]);
- } else {
- set_dma_count(devpriv->dma, devpriv->last_dma_run);
- }
- devpriv->dma_runs_to_end--;
- }
- release_dma_lock(dma_flags);
- enable_dma(devpriv->dma);
-
- outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
+ pcl812_ai_setup_next_dma(dev, s);
bufptr = devpriv->ai_poll_ptr;
devpriv->ai_poll_ptr = 0;
transfer_from_dma_buf(dev, s, ptr, bufptr, len);
-
- return IRQ_HANDLED;
}
-/*
-==============================================================================
-*/
-static irqreturn_t interrupt_pcl812(int irq, void *d)
+static irqreturn_t pcl812_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
+ struct comedi_subdevice *s = dev->read_subdev;
struct pcl812_private *devpriv = dev->private;
if (!dev->attached) {
- comedi_error(dev, "spurious interrupt");
+ pcl812_ai_clear_eoc(dev);
return IRQ_HANDLED;
}
+
if (devpriv->ai_dma)
- return interrupt_pcl812_ai_dma(irq, d);
+ pcl812_handle_dma(dev, s);
else
- return interrupt_pcl812_ai_int(irq, d);
+ pcl812_handle_eoc(dev, s);
+
+ pcl812_ai_clear_eoc(dev);
+
+ cfc_handle_events(dev, s);
+ return IRQ_HANDLED;
}
-/*
-==============================================================================
-*/
static int pcl812_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
@@ -979,204 +994,308 @@ static int pcl812_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
return s->async->buf_write_count - s->async->buf_read_count;
}
-/*
-==============================================================================
-*/
-static void setup_range_channel(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int rangechan, char wait)
+static int pcl812_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
- unsigned char chan_reg = CR_CHAN(rangechan); /* normal board */
- /* gain index */
- unsigned char gain_reg = CR_RANGE(rangechan) +
- devpriv->range_correction;
- if ((chan_reg == devpriv->old_chan_reg)
- && (gain_reg == devpriv->old_gain_reg))
- return; /* we can return, no change */
+ if (devpriv->ai_dma)
+ disable_dma(devpriv->dma);
- devpriv->old_chan_reg = chan_reg;
- devpriv->old_gain_reg = gain_reg;
+ outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
+ dev->iobase + PCL812_CTRL_REG);
+ pcl812_start_pacer(dev, false);
+ pcl812_ai_clear_eoc(dev);
+ return 0;
+}
- if (devpriv->use_MPC) {
- if (devpriv->use_diff) {
- chan_reg = chan_reg | 0x30; /* DIFF inputs */
- } else {
- if (chan_reg & 0x80)
- /* SE inputs 8-15 */
- chan_reg = chan_reg | 0x20;
- else
- /* SE inputs 0-7 */
- chan_reg = chan_reg | 0x10;
- }
- }
+static int pcl812_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct pcl812_private *devpriv = dev->private;
+ int ret = 0;
+ int i;
- outb(chan_reg, dev->iobase + PCL812_MUX); /* select channel */
- outb(gain_reg, dev->iobase + PCL812_GAIN); /* select gain */
+ outb(devpriv->mode_reg_int | PCL812_CTRL_SOFT_TRIG,
+ dev->iobase + PCL812_CTRL_REG);
+ pcl812_ai_set_chan_range(dev, insn->chanspec, 1);
- if (wait)
- /*
- * XXX this depends on selected range and can be very long for
- * some high gain ranges!
- */
- udelay(devpriv->max_812_ai_mode0_rangewait);
+ for (i = 0; i < insn->n; i++) {
+ pcl812_ai_clear_eoc(dev);
+ pcl812_ai_soft_trig(dev);
+
+ ret = comedi_timeout(dev, s, insn, pcl812_ai_eoc, 0);
+ if (ret)
+ break;
+
+ data[i] = pcl812_ai_get_sample(dev, s);
+ }
+ outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
+ dev->iobase + PCL812_CTRL_REG);
+ pcl812_ai_clear_eoc(dev);
+
+ return ret ? ret : insn->n;
}
-/*
-==============================================================================
-*/
-static void start_pacer(struct comedi_device *dev, int mode,
- unsigned int divisor1, unsigned int divisor2)
+static int pcl812_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- outb(0xb4, dev->iobase + PCL812_CTRCTL);
- outb(0x74, dev->iobase + PCL812_CTRCTL);
- udelay(1);
+ struct pcl812_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
- if (mode == 1) {
- outb(divisor2 & 0xff, dev->iobase + PCL812_CTR2);
- outb((divisor2 >> 8) & 0xff, dev->iobase + PCL812_CTR2);
- outb(divisor1 & 0xff, dev->iobase + PCL812_CTR1);
- outb((divisor1 >> 8) & 0xff, dev->iobase + PCL812_CTR1);
+ for (i = 0; i < insn->n; i++) {
+ outb((data[i] & 0xff),
+ dev->iobase + PCL812_AO_LSB_REG(chan));
+ outb((data[i] >> 8) & 0x0f,
+ dev->iobase + PCL812_AO_MSB_REG(chan));
+ devpriv->ao_readback[chan] = data[i];
}
+
+ return insn->n;
}
-/*
-==============================================================================
-*/
-static int pcl812_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int pcl812_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pcl812_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
- if (devpriv->ai_dma)
- disable_dma(devpriv->dma);
- outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
- /* Stop A/D */
- outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
- start_pacer(dev, -1, 0, 0); /* stop 8254 */
- outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
- return 0;
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
+
+ return insn->n;
+}
+
+static int pcl812_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ data[1] = inb(dev->iobase + PCL812_DI_LSB_REG) |
+ (inb(dev->iobase + PCL812_DI_MSB_REG) << 8);
+
+ return insn->n;
+}
+
+static int pcl812_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ if (comedi_dio_update_state(s, data)) {
+ outb(s->state & 0xff, dev->iobase + PCL812_DO_LSB_REG);
+ outb((s->state >> 8), dev->iobase + PCL812_DO_MSB_REG);
+ }
+
+ data[1] = s->state;
+
+ return insn->n;
}
-/*
-==============================================================================
-*/
static void pcl812_reset(struct comedi_device *dev)
{
const struct pcl812_board *board = comedi_board(dev);
struct pcl812_private *devpriv = dev->private;
+ unsigned int chan;
+
+ /* disable analog input trigger */
+ outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
+ dev->iobase + PCL812_CTRL_REG);
+ pcl812_ai_clear_eoc(dev);
+
+ /* stop pacer */
+ if (board->IRQbits)
+ pcl812_start_pacer(dev, false);
+
+ /*
+ * Invalidate last_ai_chanspec then set analog input to
+ * known channel/range.
+ */
+ devpriv->last_ai_chanspec = CR_PACK(16, 0, 0);
+ pcl812_ai_set_chan_range(dev, CR_PACK(0, 0, 0), 0);
+
+ /* set analog output channels to 0V */
+ for (chan = 0; chan < board->n_aochan; chan++) {
+ outb(0, dev->iobase + PCL812_AO_LSB_REG(chan));
+ outb(0, dev->iobase + PCL812_AO_MSB_REG(chan));
+ }
- outb(0, dev->iobase + PCL812_MUX);
- outb(0 + devpriv->range_correction, dev->iobase + PCL812_GAIN);
- devpriv->old_chan_reg = -1; /* invalidate chain/gain memory */
- devpriv->old_gain_reg = -1;
+ /* set all digital outputs low */
+ if (board->has_dio) {
+ outb(0, dev->iobase + PCL812_DO_MSB_REG);
+ outb(0, dev->iobase + PCL812_DO_LSB_REG);
+ }
+}
+static void pcl812_set_ai_range_table(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_devconfig *it)
+{
+ const struct pcl812_board *board = comedi_board(dev);
+ struct pcl812_private *devpriv = dev->private;
+
+ /* default to the range table from the boardinfo */
+ s->range_table = board->rangelist_ai;
+
+ /* now check the user config option based on the boardtype */
switch (board->board_type) {
case boardPCL812PG:
+ if (it->options[4] == 1)
+ s->range_table = &range_pcl812pg2_ai;
+ break;
case boardPCL812:
- case boardACL8112:
- case boardACL8216:
- outb(0, dev->iobase + PCL812_DA2_LO);
- outb(0, dev->iobase + PCL812_DA2_HI);
- case boardA821:
- outb(0, dev->iobase + PCL812_DA1_LO);
- outb(0, dev->iobase + PCL812_DA1_HI);
- start_pacer(dev, -1, 0, 0); /* stop 8254 */
- outb(0, dev->iobase + PCL812_DO_HI);
- outb(0, dev->iobase + PCL812_DO_LO);
- outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
- outb(0, dev->iobase + PCL812_CLRINT);
+ switch (it->options[4]) {
+ case 0:
+ s->range_table = &range_bipolar10;
+ break;
+ case 1:
+ s->range_table = &range_bipolar5;
+ break;
+ case 2:
+ s->range_table = &range_bipolar2_5;
+ break;
+ case 3:
+ s->range_table = &range812_bipolar1_25;
+ break;
+ case 4:
+ s->range_table = &range812_bipolar0_625;
+ break;
+ case 5:
+ s->range_table = &range812_bipolar0_3125;
+ break;
+ default:
+ s->range_table = &range_bipolar10;
+ break;
+ }
break;
case boardPCL813B:
- case boardPCL813:
+ if (it->options[1] == 1)
+ s->range_table = &range_pcl813b2_ai;
+ break;
case boardISO813:
+ switch (it->options[1]) {
+ case 0:
+ s->range_table = &range_iso813_1_ai;
+ break;
+ case 1:
+ s->range_table = &range_iso813_1_2_ai;
+ break;
+ case 2:
+ s->range_table = &range_iso813_2_ai;
+ devpriv->range_correction = 1;
+ break;
+ case 3:
+ s->range_table = &range_iso813_2_2_ai;
+ devpriv->range_correction = 1;
+ break;
+ default:
+ s->range_table = &range_iso813_1_ai;
+ break;
+ }
+ break;
case boardACL8113:
- udelay(5);
+ switch (it->options[1]) {
+ case 0:
+ s->range_table = &range_acl8113_1_ai;
+ break;
+ case 1:
+ s->range_table = &range_acl8113_1_2_ai;
+ break;
+ case 2:
+ s->range_table = &range_acl8113_2_ai;
+ devpriv->range_correction = 1;
+ break;
+ case 3:
+ s->range_table = &range_acl8113_2_2_ai;
+ devpriv->range_correction = 1;
+ break;
+ default:
+ s->range_table = &range_acl8113_1_ai;
+ break;
+ }
break;
}
- udelay(5);
}
static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl812_board *board = comedi_board(dev);
struct pcl812_private *devpriv;
- int ret, subdev;
- unsigned int dma;
- unsigned long pages;
struct comedi_subdevice *s;
int n_subdevices;
-
- ret = comedi_request_region(dev, it->options[0], board->io_range);
- if (ret)
- return ret;
+ int subdev;
+ int ret;
+ int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
+ ret = comedi_request_region(dev, it->options[0], 0x10);
+ if (ret)
+ return ret;
+
if ((1 << it->options[1]) & board->IRQbits) {
- ret = request_irq(it->options[1], interrupt_pcl812, 0,
+ ret = request_irq(it->options[1], pcl812_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
- dma = 0;
- devpriv->dma = dma;
- if (!dev->irq)
- goto no_dma; /* if we haven't IRQ, we can't use DMA */
- if (board->DMAbits != 0) { /* board support DMA */
- dma = it->options[2];
- if (((1 << dma) & board->DMAbits) == 0) {
- dev_err(dev->class_dev,
- "DMA is out of allowed range, FAIL!\n");
- return -EINVAL; /* Bad DMA */
- }
- ret = request_dma(dma, dev->board_name);
+ /* we need an IRQ to do DMA on channel 3 or 1 */
+ if (dev->irq && board->has_dma &&
+ (it->options[2] == 3 || it->options[2] == 1)) {
+ ret = request_dma(it->options[2], dev->board_name);
if (ret) {
dev_err(dev->class_dev,
- "unable to allocate DMA %u, FAIL!\n", dma);
- return -EBUSY; /* DMA isn't free */
- }
- devpriv->dma = dma;
- pages = 1; /* we want 8KB */
- devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[0]) {
- dev_err(dev->class_dev,
- "unable to allocate DMA buffer, FAIL!\n");
- /*
- * maybe experiment with try_to_free_pages()
- * will help ....
- */
- return -EBUSY; /* no buffer :-( */
- }
- devpriv->dmapages[0] = pages;
- devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
- devpriv->hwdmasize[0] = PAGE_SIZE * (1 << pages);
- devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[1]) {
- dev_err(dev->class_dev,
- "unable to allocate DMA buffer, FAIL!\n");
+ "unable to request DMA channel %d\n",
+ it->options[2]);
return -EBUSY;
}
- devpriv->dmapages[1] = pages;
- devpriv->hwdmaptr[1] = virt_to_bus((void *)devpriv->dmabuf[1]);
- devpriv->hwdmasize[1] = PAGE_SIZE * (1 << pages);
+ devpriv->dma = it->options[2];
+
+ devpriv->dmapages = 1; /* we want 8KB */
+ devpriv->hwdmasize = (1 << devpriv->dmapages) * PAGE_SIZE;
+
+ for (i = 0; i < 2; i++) {
+ unsigned long dmabuf;
+
+ dmabuf = __get_dma_pages(GFP_KERNEL, devpriv->dmapages);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ devpriv->dmabuf[i] = dmabuf;
+ devpriv->hwdmaptr[i] = virt_to_bus((void *)dmabuf);
+ }
}
-no_dma:
- n_subdevices = 0;
- if (board->n_aichan > 0)
- n_subdevices++;
+ /* differential analog inputs? */
+ switch (board->board_type) {
+ case boardA821:
+ if (it->options[2] == 1)
+ devpriv->use_diff = 1;
+ break;
+ case boardACL8112:
+ case boardACL8216:
+ if (it->options[4] == 1)
+ devpriv->use_diff = 1;
+ break;
+ }
+
+ n_subdevices = 1; /* all boardtypes have analog inputs */
if (board->n_aochan > 0)
n_subdevices++;
- if (board->n_dichan > 0)
- n_subdevices++;
- if (board->n_dochan > 0)
- n_subdevices++;
+ if (board->has_dio)
+ n_subdevices += 2;
ret = comedi_alloc_subdevices(dev, n_subdevices);
if (ret)
@@ -1184,146 +1303,47 @@ no_dma:
subdev = 0;
- /* analog input */
- if (board->n_aichan > 0) {
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE;
- switch (board->board_type) {
- case boardA821:
- if (it->options[2] == 1) {
- s->n_chan = board->n_aichan_diff;
- s->subdev_flags |= SDF_DIFF;
- devpriv->use_diff = 1;
- } else {
- s->n_chan = board->n_aichan;
- s->subdev_flags |= SDF_GROUND;
- }
- break;
- case boardACL8112:
- case boardACL8216:
- if (it->options[4] == 1) {
- s->n_chan = board->n_aichan_diff;
- s->subdev_flags |= SDF_DIFF;
- devpriv->use_diff = 1;
- } else {
- s->n_chan = board->n_aichan;
- s->subdev_flags |= SDF_GROUND;
- }
- break;
- default:
- s->n_chan = board->n_aichan;
- s->subdev_flags |= SDF_GROUND;
- break;
- }
- s->maxdata = board->ai_maxdata;
- s->range_table = board->rangelist_ai;
- if (board->board_type == boardACL8216)
- s->insn_read = acl8216_ai_insn_read;
- else
- s->insn_read = pcl812_ai_insn_read;
-
- devpriv->use_MPC = board->haveMPC508;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = MAX_CHANLIST_LEN;
- s->do_cmdtest = pcl812_ai_cmdtest;
- s->do_cmd = pcl812_ai_cmd;
- s->poll = pcl812_ai_poll;
- s->cancel = pcl812_ai_cancel;
- }
- switch (board->board_type) {
- case boardPCL812PG:
- if (it->options[4] == 1)
- s->range_table = &range_pcl812pg2_ai;
- break;
- case boardPCL812:
- switch (it->options[4]) {
- case 0:
- s->range_table = &range_bipolar10;
- break;
- case 1:
- s->range_table = &range_bipolar5;
- break;
- case 2:
- s->range_table = &range_bipolar2_5;
- break;
- case 3:
- s->range_table = &range812_bipolar1_25;
- break;
- case 4:
- s->range_table = &range812_bipolar0_625;
- break;
- case 5:
- s->range_table = &range812_bipolar0_3125;
- break;
- default:
- s->range_table = &range_bipolar10;
- break;
- }
- break;
- break;
- case boardPCL813B:
- if (it->options[1] == 1)
- s->range_table = &range_pcl813b2_ai;
- break;
- case boardISO813:
- switch (it->options[1]) {
- case 0:
- s->range_table = &range_iso813_1_ai;
- break;
- case 1:
- s->range_table = &range_iso813_1_2_ai;
- break;
- case 2:
- s->range_table = &range_iso813_2_ai;
- devpriv->range_correction = 1;
- break;
- case 3:
- s->range_table = &range_iso813_2_2_ai;
- devpriv->range_correction = 1;
- break;
- default:
- s->range_table = &range_iso813_1_ai;
- break;
- }
- break;
- case boardACL8113:
- switch (it->options[1]) {
- case 0:
- s->range_table = &range_acl8113_1_ai;
- break;
- case 1:
- s->range_table = &range_acl8113_1_2_ai;
- break;
- case 2:
- s->range_table = &range_acl8113_2_ai;
- devpriv->range_correction = 1;
- break;
- case 3:
- s->range_table = &range_acl8113_2_2_ai;
- devpriv->range_correction = 1;
- break;
- default:
- s->range_table = &range_acl8113_1_ai;
- break;
- }
- break;
- }
- subdev++;
+ /* Analog Input subdevice */
+ s = &dev->subdevices[subdev];
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE;
+ if (devpriv->use_diff) {
+ s->subdev_flags |= SDF_DIFF;
+ s->n_chan = board->n_aichan / 2;
+ } else {
+ s->subdev_flags |= SDF_GROUND;
+ s->n_chan = board->n_aichan;
+ }
+ s->maxdata = board->has_16bit_ai ? 0xffff : 0x0fff;
+
+ pcl812_set_ai_range_table(dev, s, it);
+
+ s->insn_read = pcl812_ai_insn_read;
+
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = MAX_CHANLIST_LEN;
+ s->do_cmdtest = pcl812_ai_cmdtest;
+ s->do_cmd = pcl812_ai_cmd;
+ s->poll = pcl812_ai_poll;
+ s->cancel = pcl812_ai_cancel;
}
+ devpriv->use_mpc508 = board->has_mpc508_mux;
+
+ subdev++;
+
/* analog output */
if (board->n_aochan > 0) {
s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = board->n_aochan;
- s->maxdata = 0xfff;
- s->range_table = board->rangelist_ao;
- s->insn_read = pcl812_ao_insn_read;
- s->insn_write = pcl812_ao_insn_write;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
+ s->n_chan = board->n_aochan;
+ s->maxdata = 0xfff;
+ s->range_table = &range_unipolar5;
+ s->insn_read = pcl812_ao_insn_read;
+ s->insn_write = pcl812_ao_insn_write;
switch (board->board_type) {
case boardA821:
if (it->options[3] == 1)
@@ -1342,33 +1362,30 @@ no_dma:
subdev++;
}
- /* digital input */
- if (board->n_dichan > 0) {
+ if (board->has_dio) {
+ /* Digital Input subdevice */
s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = board->n_dichan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl812_di_insn_bits;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl812_di_insn_bits;
subdev++;
- }
- /* digital output */
- if (board->n_dochan > 0) {
+ /* Digital Output subdevice */
s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_dochan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl812_do_insn_bits;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl812_do_insn_bits;
subdev++;
}
switch (board->board_type) {
case boardACL8216:
- devpriv->ai_is16b = 1;
case boardPCL812PG:
case boardPCL812:
case boardACL8112:
@@ -1390,8 +1407,6 @@ no_dma:
break;
}
- devpriv->valid = 1;
-
pcl812_reset(dev);
return 0;
@@ -1403,72 +1418,15 @@ static void pcl812_detach(struct comedi_device *dev)
if (devpriv) {
if (devpriv->dmabuf[0])
- free_pages(devpriv->dmabuf[0], devpriv->dmapages[0]);
+ free_pages(devpriv->dmabuf[0], devpriv->dmapages);
if (devpriv->dmabuf[1])
- free_pages(devpriv->dmabuf[1], devpriv->dmapages[1]);
+ free_pages(devpriv->dmabuf[1], devpriv->dmapages);
if (devpriv->dma)
free_dma(devpriv->dma);
}
comedi_legacy_detach(dev);
}
-static const struct pcl812_board boardtypes[] = {
- {"pcl812", boardPCL812, 16, 0, 2, 16, 16, 0x0fff,
- 33000, I8254_OSC_BASE_2MHZ, &range_bipolar10, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"pcl812pg", boardPCL812PG, 16, 0, 2, 16, 16, 0x0fff,
- 33000, I8254_OSC_BASE_2MHZ, &range_pcl812pg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"acl8112pg", boardPCL812PG, 16, 0, 2, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_pcl812pg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"acl8112dg", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_acl8112dg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 1},
- {"acl8112hg", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_acl8112hg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 1},
- {"a821pgl", boardA821, 16, 8, 1, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_pcl813b_ai, &range_unipolar5,
- 0x000c, 0x00, PCLx1x_IORANGE, 0},
- {"a821pglnda", boardA821, 16, 8, 0, 0, 0, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_pcl813b_ai, NULL,
- 0x000c, 0x00, PCLx1x_IORANGE, 0},
- {"a821pgh", boardA821, 16, 8, 1, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_a821pgh_ai, &range_unipolar5,
- 0x000c, 0x00, PCLx1x_IORANGE, 0},
- {"a822pgl", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_acl8112dg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"a822pgh", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_acl8112hg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"a823pgl", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 8000, I8254_OSC_BASE_2MHZ, &range_acl8112dg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"a823pgh", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 8000, I8254_OSC_BASE_2MHZ, &range_acl8112hg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"pcl813", boardPCL813, 32, 0, 0, 0, 0, 0x0fff,
- 0, 0, &range_pcl813b_ai, NULL,
- 0x0000, 0x00, PCLx1x_IORANGE, 0},
- {"pcl813b", boardPCL813B, 32, 0, 0, 0, 0, 0x0fff,
- 0, 0, &range_pcl813b_ai, NULL,
- 0x0000, 0x00, PCLx1x_IORANGE, 0},
- {"acl8113", boardACL8113, 32, 0, 0, 0, 0, 0x0fff,
- 0, 0, &range_acl8113_1_ai, NULL,
- 0x0000, 0x00, PCLx1x_IORANGE, 0},
- {"iso813", boardISO813, 32, 0, 0, 0, 0, 0x0fff,
- 0, 0, &range_iso813_1_ai, NULL,
- 0x0000, 0x00, PCLx1x_IORANGE, 0},
- {"acl8216", boardACL8216, 16, 8, 2, 16, 16, 0xffff,
- 10000, I8254_OSC_BASE_2MHZ, &range_pcl813b2_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 1},
- {"a826pg", boardACL8216, 16, 8, 2, 16, 16, 0xffff,
- 10000, I8254_OSC_BASE_2MHZ, &range_pcl813b2_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
-};
-
static struct comedi_driver pcl812_driver = {
.driver_name = "pcl812",
.module = THIS_MODULE,
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index e9d470459933..6f276f23fabe 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -44,40 +44,38 @@ Configuration Options:
#include "comedi_fc.h"
#include "8253.h"
-/* boards constants */
-/* IO space len */
-#define PCLx1x_RANGE 16
-
-/* INTEL 8254 counters */
-#define PCL816_CTR0 4
-#define PCL816_CTR1 5
-#define PCL816_CTR2 6
-/* R: counter read-back register W: counter control */
-#define PCL816_CTRCTL 7
-
-/* R: A/D high byte W: A/D range control */
-#define PCL816_RANGE 9
-/* W: clear INT request */
-#define PCL816_CLRINT 10
-/* R: next mux scan channel W: mux scan channel & range control pointer */
-#define PCL816_MUX 11
-/* R/W: operation control register */
-#define PCL816_CONTROL 12
-
-/* R: return status byte W: set DMA/IRQ */
-#define PCL816_STATUS 13
-#define PCL816_STATUS_DRDY_MASK 0x80
-
-/* R: low byte of A/D W: soft A/D trigger */
-#define PCL816_AD_LO 8
-/* R: high byte of A/D W: A/D range control */
-#define PCL816_AD_HI 9
-
-/* type of interrupt handler */
-#define INT_TYPE_AI1_INT 1
-#define INT_TYPE_AI1_DMA 2
-#define INT_TYPE_AI3_INT 4
-#define INT_TYPE_AI3_DMA 5
+/*
+ * Register I/O map
+ */
+#define PCL816_DO_DI_LSB_REG 0x00
+#define PCL816_DO_DI_MSB_REG 0x01
+#define PCL816_TIMER_BASE 0x04
+#define PCL816_AI_LSB_REG 0x08
+#define PCL816_AI_MSB_REG 0x09
+#define PCL816_RANGE_REG 0x09
+#define PCL816_CLRINT_REG 0x0a
+#define PCL816_MUX_REG 0x0b
+#define PCL816_MUX_SCAN(_first, _last) (((_last) << 4) | (_first))
+#define PCL816_CTRL_REG 0x0c
+#define PCL816_CTRL_DISABLE_TRIG (0 << 0)
+#define PCL816_CTRL_SOFT_TRIG (1 << 0)
+#define PCL816_CTRL_PACER_TRIG (1 << 1)
+#define PCL816_CTRL_EXT_TRIG (1 << 2)
+#define PCL816_CTRL_POE (1 << 3)
+#define PCL816_CTRL_DMAEN (1 << 4)
+#define PCL816_CTRL_INTEN (1 << 5)
+#define PCL816_CTRL_DMASRC_SLOT0 (0 << 6)
+#define PCL816_CTRL_DMASRC_SLOT1 (1 << 6)
+#define PCL816_CTRL_DMASRC_SLOT2 (2 << 6)
+#define PCL816_STATUS_REG 0x0d
+#define PCL816_STATUS_NEXT_CHAN_MASK (0xf << 0)
+#define PCL816_STATUS_INTSRC_MASK (3 << 4)
+#define PCL816_STATUS_INTSRC_SLOT0 (0 << 4)
+#define PCL816_STATUS_INTSRC_SLOT1 (1 << 4)
+#define PCL816_STATUS_INTSRC_SLOT2 (2 << 4)
+#define PCL816_STATUS_INTSRC_DMA (3 << 4)
+#define PCL816_STATUS_INTACT (1 << 6)
+#define PCL816_STATUS_DRDY (1 << 7)
#define MAGIC_DMA_WORD 0x5a5a
@@ -95,314 +93,284 @@ static const struct comedi_lrange range_pcl816 = {
};
struct pcl816_board {
+ const char *name;
+ int ai_maxdata;
+ int ao_maxdata;
+ int ai_chanlist;
+};
- const char *name; /* board name */
- int n_ranges; /* len of range list */
- int n_aichan; /* num of A/D chans in diferencial mode */
- unsigned int ai_ns_min; /* minimal allowed delay between samples (in ns) */
- int n_aochan; /* num of D/A chans */
- int n_dichan; /* num of DI chans */
- int n_dochan; /* num of DO chans */
- const struct comedi_lrange *ai_range_type; /* default A/D rangelist */
- const struct comedi_lrange *ao_range_type; /* default D/A rangelist */
- unsigned int io_range; /* len of IO space */
- unsigned int IRQbits; /* allowed interrupts */
- unsigned int DMAbits; /* allowed DMA chans */
- int ai_maxdata; /* maxdata for A/D */
- int ao_maxdata; /* maxdata for D/A */
- int ai_chanlist; /* allowed len of channel list A/D */
- int ao_chanlist; /* allowed len of channel list D/A */
- int i8254_osc_base; /* 1/frequency of on board oscilator in ns */
+static const struct pcl816_board boardtypes[] = {
+ {
+ .name = "pcl816",
+ .ai_maxdata = 0xffff,
+ .ao_maxdata = 0xffff,
+ .ai_chanlist = 1024,
+ }, {
+ .name = "pcl814b",
+ .ai_maxdata = 0x3fff,
+ .ao_maxdata = 0x3fff,
+ .ai_chanlist = 1024,
+ },
};
struct pcl816_private {
-
unsigned int dma; /* used DMA, 0=don't use DMA */
+ unsigned int dmapages;
+ unsigned int hwdmasize;
unsigned long dmabuf[2]; /* pointers to begin of DMA buffers */
- unsigned int dmapages[2]; /* len of DMA buffers in PAGE_SIZEs */
unsigned int hwdmaptr[2]; /* hardware address of DMA buffers */
- unsigned int hwdmasize[2]; /* len of DMA buffers in Bytes */
- unsigned int dmasamplsize; /* size in samples hwdmasize[0]/2 */
int next_dma_buf; /* which DMA buffer will be used next round */
long dma_runs_to_end; /* how many we must permorm DMA transfer to end of record */
unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */
-
- unsigned int ai_scans; /* len of scanlist */
- unsigned char ai_neverending; /* if=1, then we do neverending record (you must use cancel()) */
- int irq_blocked; /* 1=IRQ now uses any subdev */
- int irq_was_now_closed; /* when IRQ finish, there's stored int816_mode for last interrupt */
- int int816_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */
- struct comedi_subdevice *last_int_sub; /* ptr to subdevice which now finish */
int ai_act_scan; /* how many scans we finished */
- unsigned int ai_act_chanlist[16]; /* MUX setting for actual AI operations */
- unsigned int ai_act_chanlist_len; /* how long is actual MUX list */
- unsigned int ai_act_chanlist_pos; /* actual position in MUX list */
- unsigned int ai_n_chan; /* how many channels per scan */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
+ unsigned int divisor1;
+ unsigned int divisor2;
+ unsigned int ai_cmd_running:1;
+ unsigned int ai_cmd_canceled:1;
};
-/*
-==============================================================================
-*/
static int check_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int *chanlist, unsigned int chanlen);
-static void setup_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist, unsigned int seglen);
-static int pcl816_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s);
-static void start_pacer(struct comedi_device *dev, int mode,
- unsigned int divisor1, unsigned int divisor2);
-static int pcl816_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd);
-static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
-
-/*
-==============================================================================
- ANALOG INPUT MODE0, 816 cards, slow version
-*/
-static int pcl816_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl816_start_pacer(struct comedi_device *dev, bool load_counters)
{
- int n;
- int timeout;
-
- /* software trigger, DMA and INT off */
- outb(0, dev->iobase + PCL816_CONTROL);
- /* clear INT (conversion end) flag */
- outb(0, dev->iobase + PCL816_CLRINT);
-
- /* Set the input channel */
- outb(CR_CHAN(insn->chanspec) & 0xf, dev->iobase + PCL816_MUX);
- /* select gain */
- outb(CR_RANGE(insn->chanspec), dev->iobase + PCL816_RANGE);
-
- for (n = 0; n < insn->n; n++) {
-
- outb(0, dev->iobase + PCL816_AD_LO); /* start conversion */
-
- timeout = 100;
- while (timeout--) {
- if (!(inb(dev->iobase + PCL816_STATUS) &
- PCL816_STATUS_DRDY_MASK)) {
- /* return read value */
- data[n] =
- ((inb(dev->iobase +
- PCL816_AD_HI) << 8) |
- (inb(dev->iobase + PCL816_AD_LO)));
- /* clear INT (conversion end) flag */
- outb(0, dev->iobase + PCL816_CLRINT);
- break;
- }
- udelay(1);
- }
- /* Return timeout error */
- if (!timeout) {
- comedi_error(dev, "A/D insn timeout\n");
- data[0] = 0;
- /* clear INT (conversion end) flag */
- outb(0, dev->iobase + PCL816_CLRINT);
- return -EIO;
- }
+ struct pcl816_private *devpriv = dev->private;
+ unsigned long timer_base = dev->iobase + PCL816_TIMER_BASE;
+
+ i8254_set_mode(timer_base, 0, 0, I8254_MODE1 | I8254_BINARY);
+ i8254_write(timer_base, 0, 0, 0x00ff);
+ udelay(1);
+
+ i8254_set_mode(timer_base, 0, 2, I8254_MODE2 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 1, I8254_MODE2 | I8254_BINARY);
+ udelay(1);
+ if (load_counters) {
+ i8254_write(timer_base, 0, 2, devpriv->divisor2);
+ i8254_write(timer_base, 0, 1, devpriv->divisor1);
}
- return n;
}
-/*
-==============================================================================
- analog input interrupt mode 1 & 3, 818 cards
- one sample per interrupt version
-*/
-static irqreturn_t interrupt_pcl816_ai_mode13_int(int irq, void *d)
+static void pcl816_ai_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct comedi_device *dev = d;
struct pcl816_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned char low, hi;
- int timeout = 50; /* wait max 50us */
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int dma_flags;
+ unsigned int bytes;
- while (timeout--) {
- if (!(inb(dev->iobase + PCL816_STATUS) &
- PCL816_STATUS_DRDY_MASK))
- break;
- udelay(1);
+ bytes = devpriv->hwdmasize;
+ if (cmd->stop_src == TRIG_COUNT) {
+ /* how many */
+ bytes = s->async->cmd.chanlist_len *
+ s->async->cmd.chanlist_len *
+ sizeof(short);
+
+ /* how many DMA pages we must fill */
+ devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize;
+
+ /* on last dma transfer must be moved */
+ devpriv->last_dma_run = bytes % devpriv->hwdmasize;
+ devpriv->dma_runs_to_end--;
+ if (devpriv->dma_runs_to_end >= 0)
+ bytes = devpriv->hwdmasize;
+ } else
+ devpriv->dma_runs_to_end = -1;
+
+ devpriv->next_dma_buf = 0;
+ set_dma_mode(devpriv->dma, DMA_MODE_READ);
+ dma_flags = claim_dma_lock();
+ clear_dma_ff(devpriv->dma);
+ set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
+ set_dma_count(devpriv->dma, bytes);
+ release_dma_lock(dma_flags);
+ enable_dma(devpriv->dma);
+}
+
+static void pcl816_ai_setup_next_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pcl816_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned long dma_flags;
+
+ disable_dma(devpriv->dma);
+ if (devpriv->dma_runs_to_end > -1 || cmd->stop_src == TRIG_NONE) {
+ /* switch dma bufs */
+ devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
+ set_dma_mode(devpriv->dma, DMA_MODE_READ);
+ dma_flags = claim_dma_lock();
+ set_dma_addr(devpriv->dma,
+ devpriv->hwdmaptr[devpriv->next_dma_buf]);
+ if (devpriv->dma_runs_to_end)
+ set_dma_count(devpriv->dma, devpriv->hwdmasize);
+ else
+ set_dma_count(devpriv->dma, devpriv->last_dma_run);
+ release_dma_lock(dma_flags);
+ enable_dma(devpriv->dma);
}
- if (!timeout) { /* timeout, bail error */
- outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
- comedi_error(dev, "A/D mode1/3 IRQ without DRDY!");
- pcl816_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ devpriv->dma_runs_to_end--;
+}
+
+static void pcl816_ai_set_chan_range(struct comedi_device *dev,
+ unsigned int chan,
+ unsigned int range)
+{
+ outb(chan, dev->iobase + PCL816_MUX_REG);
+ outb(range, dev->iobase + PCL816_RANGE_REG);
+}
+
+static void pcl816_ai_set_chan_scan(struct comedi_device *dev,
+ unsigned int first_chan,
+ unsigned int last_chan)
+{
+ outb(PCL816_MUX_SCAN(first_chan, last_chan),
+ dev->iobase + PCL816_MUX_REG);
+}
+
+static void pcl816_ai_setup_chanlist(struct comedi_device *dev,
+ unsigned int *chanlist,
+ unsigned int seglen)
+{
+ unsigned int first_chan = CR_CHAN(chanlist[0]);
+ unsigned int last_chan;
+ unsigned int range;
+ unsigned int i;
+
+ /* store range list to card */
+ for (i = 0; i < seglen; i++) {
+ last_chan = CR_CHAN(chanlist[i]);
+ range = CR_RANGE(chanlist[i]);
+
+ pcl816_ai_set_chan_range(dev, last_chan, range);
}
- /* get the sample */
- low = inb(dev->iobase + PCL816_AD_LO);
- hi = inb(dev->iobase + PCL816_AD_HI);
+ udelay(1);
+
+ pcl816_ai_set_chan_scan(dev, first_chan, last_chan);
+}
+
+static void pcl816_ai_clear_eoc(struct comedi_device *dev)
+{
+ /* writing any value clears the interrupt request */
+ outb(0, dev->iobase + PCL816_CLRINT_REG);
+}
+
+static void pcl816_ai_soft_trig(struct comedi_device *dev)
+{
+ /* writing any value triggers a software conversion */
+ outb(0, dev->iobase + PCL816_AI_LSB_REG);
+}
- comedi_buf_put(s->async, (hi << 8) | low);
+static unsigned int pcl816_ai_get_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int val;
- outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
+ val = inb(dev->iobase + PCL816_AI_MSB_REG) << 8;
+ val |= inb(dev->iobase + PCL816_AI_LSB_REG);
- if (++devpriv->ai_act_chanlist_pos >= devpriv->ai_act_chanlist_len)
- devpriv->ai_act_chanlist_pos = 0;
+ return val & s->maxdata;
+}
+
+static int pcl816_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + PCL816_STATUS_REG);
+ if ((status & PCL816_STATUS_DRDY) == 0)
+ return 0;
+ return -EBUSY;
+}
+
+static bool pcl816_ai_next_chan(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pcl816_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+
+ s->async->events |= COMEDI_CB_BLOCK;
s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
+ if (s->async->cur_chan >= cmd->chanlist_len) {
s->async->cur_chan = 0;
devpriv->ai_act_scan++;
+ s->async->events |= COMEDI_CB_EOS;
}
- if (!devpriv->ai_neverending)
- /* all data sampled */
- if (devpriv->ai_act_scan >= devpriv->ai_scans) {
- /* all data sampled */
- pcl816_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- }
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ if (cmd->stop_src == TRIG_COUNT &&
+ devpriv->ai_act_scan >= cmd->stop_arg) {
+ /* all data sampled */
+ s->async->events |= COMEDI_CB_EOA;
+ return false;
+ }
+
+ return true;
}
-/*
-==============================================================================
- analog input dma mode 1 & 3, 816 cards
-*/
static void transfer_from_dma_buf(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned short *ptr,
unsigned int bufptr, unsigned int len)
{
- struct pcl816_private *devpriv = dev->private;
int i;
- s->async->events = 0;
-
for (i = 0; i < len; i++) {
-
comedi_buf_put(s->async, ptr[bufptr++]);
- if (++devpriv->ai_act_chanlist_pos >=
- devpriv->ai_act_chanlist_len) {
- devpriv->ai_act_chanlist_pos = 0;
- }
-
- s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan++;
- }
-
- if (!devpriv->ai_neverending)
- /* all data sampled */
- if (devpriv->ai_act_scan >= devpriv->ai_scans) {
- pcl816_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_BLOCK;
- break;
- }
+ if (!pcl816_ai_next_chan(dev, s))
+ return;
}
-
- comedi_event(dev, s);
}
-static irqreturn_t interrupt_pcl816_ai_mode13_dma(int irq, void *d)
+static irqreturn_t pcl816_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct pcl816_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
- int len, bufptr, this_dma_buf;
- unsigned long dma_flags;
+ struct pcl816_private *devpriv = dev->private;
unsigned short *ptr;
+ unsigned int bufptr;
+ unsigned int len;
- disable_dma(devpriv->dma);
- this_dma_buf = devpriv->next_dma_buf;
-
- /* switch dma bufs */
- if ((devpriv->dma_runs_to_end > -1) || devpriv->ai_neverending) {
+ if (!dev->attached || !devpriv->ai_cmd_running) {
+ pcl816_ai_clear_eoc(dev);
+ return IRQ_HANDLED;
+ }
- devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- dma_flags = claim_dma_lock();
-/* clear_dma_ff (devpriv->dma); */
- set_dma_addr(devpriv->dma,
- devpriv->hwdmaptr[devpriv->next_dma_buf]);
- if (devpriv->dma_runs_to_end) {
- set_dma_count(devpriv->dma,
- devpriv->hwdmasize[devpriv->
- next_dma_buf]);
- } else {
- set_dma_count(devpriv->dma, devpriv->last_dma_run);
- }
- release_dma_lock(dma_flags);
- enable_dma(devpriv->dma);
+ if (devpriv->ai_cmd_canceled) {
+ devpriv->ai_cmd_canceled = 0;
+ pcl816_ai_clear_eoc(dev);
+ return IRQ_HANDLED;
}
- devpriv->dma_runs_to_end--;
- outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
+ ptr = (unsigned short *)devpriv->dmabuf[devpriv->next_dma_buf];
- ptr = (unsigned short *)devpriv->dmabuf[this_dma_buf];
+ pcl816_ai_setup_next_dma(dev, s);
- len = (devpriv->hwdmasize[0] >> 1) - devpriv->ai_poll_ptr;
+ len = (devpriv->hwdmasize >> 1) - devpriv->ai_poll_ptr;
bufptr = devpriv->ai_poll_ptr;
devpriv->ai_poll_ptr = 0;
transfer_from_dma_buf(dev, s, ptr, bufptr, len);
- return IRQ_HANDLED;
-}
-
-/*
-==============================================================================
- INT procedure
-*/
-static irqreturn_t interrupt_pcl816(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct pcl816_private *devpriv = dev->private;
- if (!dev->attached) {
- comedi_error(dev, "premature interrupt");
- return IRQ_HANDLED;
- }
-
- switch (devpriv->int816_mode) {
- case INT_TYPE_AI1_DMA:
- case INT_TYPE_AI3_DMA:
- return interrupt_pcl816_ai_mode13_dma(irq, d);
- case INT_TYPE_AI1_INT:
- case INT_TYPE_AI3_INT:
- return interrupt_pcl816_ai_mode13_int(irq, d);
- }
+ pcl816_ai_clear_eoc(dev);
- outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
- if (!dev->irq || !devpriv->irq_blocked || !devpriv->int816_mode) {
- if (devpriv->irq_was_now_closed) {
- devpriv->irq_was_now_closed = 0;
- /* comedi_error(dev,"last IRQ.."); */
- return IRQ_HANDLED;
- }
- comedi_error(dev, "bad IRQ!");
- return IRQ_NONE;
- }
- comedi_error(dev, "IRQ from unknown source!");
- return IRQ_NONE;
+ cfc_handle_events(dev, s);
+ return IRQ_HANDLED;
}
-/*
-==============================================================================
-*/
static int pcl816_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
- const struct pcl816_board *board = comedi_board(dev);
+ struct pcl816_private *devpriv = dev->private;
int err = 0;
- int tmp, divisor1 = 0, divisor2 = 0;
+ int tmp;
/* Step 1 : check if triggers are trivially valid */
@@ -432,8 +400,7 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
if (cmd->convert_src == TRIG_TIMER)
- err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_ns_min);
+ err |= cfc_check_trigger_arg_min(&cmd->convert_arg, 10000);
else /* TRIG_EXT */
err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
@@ -451,11 +418,12 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
/* step 4: fix up any arguments */
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer(board->i8254_osc_base,
- &divisor1, &divisor2,
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
&cmd->convert_arg, cmd->flags);
- if (cmd->convert_arg < board->ai_ns_min)
- cmd->convert_arg = board->ai_ns_min;
+ if (cmd->convert_arg < 10000)
+ cmd->convert_arg = 10000;
if (tmp != cmd->convert_arg)
err++;
}
@@ -477,120 +445,40 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- const struct pcl816_board *board = comedi_board(dev);
struct pcl816_private *devpriv = dev->private;
- unsigned int divisor1 = 0, divisor2 = 0, dma_flags, bytes, dmairq;
struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int ctrl;
unsigned int seglen;
- if (cmd->start_src != TRIG_NOW)
- return -EINVAL;
- if (cmd->scan_begin_src != TRIG_FOLLOW)
- return -EINVAL;
- if (cmd->scan_end_src != TRIG_COUNT)
- return -EINVAL;
- if (cmd->scan_end_arg != cmd->chanlist_len)
- return -EINVAL;
-/* if(cmd->chanlist_len>MAX_CHANLIST_LEN) return -EINVAL; */
- if (devpriv->irq_blocked)
+ if (devpriv->ai_cmd_running)
return -EBUSY;
- if (cmd->convert_src == TRIG_TIMER) {
- if (cmd->convert_arg < board->ai_ns_min)
- cmd->convert_arg = board->ai_ns_min;
-
- i8253_cascade_ns_to_timer(board->i8254_osc_base,
- &divisor1, &divisor2,
- &cmd->convert_arg, cmd->flags);
-
- /* PCL816 crash if any divisor is set to 1 */
- if (divisor1 == 1) {
- divisor1 = 2;
- divisor2 /= 2;
- }
- if (divisor2 == 1) {
- divisor2 = 2;
- divisor1 /= 2;
- }
- }
-
- start_pacer(dev, -1, 0, 0); /* stop pacer */
+ pcl816_start_pacer(dev, false);
seglen = check_channel_list(dev, s, cmd->chanlist, cmd->chanlist_len);
if (seglen < 1)
return -EINVAL;
- setup_channel_list(dev, s, cmd->chanlist, seglen);
+ pcl816_ai_setup_chanlist(dev, cmd->chanlist, seglen);
udelay(1);
- devpriv->ai_n_chan = cmd->chanlist_len;
devpriv->ai_act_scan = 0;
s->async->cur_chan = 0;
- devpriv->irq_blocked = 1;
+ devpriv->ai_cmd_running = 1;
devpriv->ai_poll_ptr = 0;
- devpriv->irq_was_now_closed = 0;
+ devpriv->ai_cmd_canceled = 0;
- if (cmd->stop_src == TRIG_COUNT) {
- devpriv->ai_scans = cmd->stop_arg;
- devpriv->ai_neverending = 0;
- } else {
- devpriv->ai_scans = 0;
- devpriv->ai_neverending = 1;
- }
-
- if (devpriv->dma) {
- bytes = devpriv->hwdmasize[0];
- if (!devpriv->ai_neverending) {
- /* how many */
- bytes = s->async->cmd.chanlist_len *
- s->async->cmd.chanlist_len *
- sizeof(short);
-
- /* how many DMA pages we must fill */
- devpriv->dma_runs_to_end = bytes /
- devpriv->hwdmasize[0];
-
- /* on last dma transfer must be moved */
- devpriv->last_dma_run = bytes % devpriv->hwdmasize[0];
- devpriv->dma_runs_to_end--;
- if (devpriv->dma_runs_to_end >= 0)
- bytes = devpriv->hwdmasize[0];
- } else
- devpriv->dma_runs_to_end = -1;
-
- devpriv->next_dma_buf = 0;
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- dma_flags = claim_dma_lock();
- clear_dma_ff(devpriv->dma);
- set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
- set_dma_count(devpriv->dma, bytes);
- release_dma_lock(dma_flags);
- enable_dma(devpriv->dma);
- }
-
- start_pacer(dev, 1, divisor1, divisor2);
- dmairq = ((devpriv->dma & 0x3) << 4) | (dev->irq & 0x7);
-
- switch (cmd->convert_src) {
- case TRIG_TIMER:
- devpriv->int816_mode = INT_TYPE_AI1_DMA;
-
- /* Pacer+IRQ+DMA */
- outb(0x32, dev->iobase + PCL816_CONTROL);
+ pcl816_ai_setup_dma(dev, s);
- /* write irq and DMA to card */
- outb(dmairq, dev->iobase + PCL816_STATUS);
- break;
+ pcl816_start_pacer(dev, true);
- default:
- devpriv->int816_mode = INT_TYPE_AI3_DMA;
-
- /* Ext trig+IRQ+DMA */
- outb(0x34, dev->iobase + PCL816_CONTROL);
+ ctrl = PCL816_CTRL_INTEN | PCL816_CTRL_DMAEN | PCL816_CTRL_DMASRC_SLOT0;
+ if (cmd->convert_src == TRIG_TIMER)
+ ctrl |= PCL816_CTRL_PACER_TRIG;
+ else /* TRIG_EXT */
+ ctrl |= PCL816_CTRL_EXT_TRIG;
- /* write irq to card */
- outb(dmairq, dev->iobase + PCL816_STATUS);
- break;
- }
+ outb(ctrl, dev->iobase + PCL816_CTRL_REG);
+ outb((devpriv->dma << 4) | dev->irq, dev->iobase + PCL816_STATUS_REG);
return 0;
}
@@ -601,9 +489,6 @@ static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
unsigned long flags;
unsigned int top1, top2, i;
- if (!devpriv->dma)
- return 0; /* poll is valid only for DMA transfer */
-
spin_lock_irqsave(&dev->spinlock, flags);
for (i = 0; i < 20; i++) {
@@ -618,7 +503,7 @@ static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
}
/* where is now DMA in buffer */
- top1 = devpriv->hwdmasize[0] - top1;
+ top1 = devpriv->hwdmasize - top1;
top1 >>= 1; /* sample position */
top2 = top1 - devpriv->ai_poll_ptr;
if (top2 < 1) { /* no new samples */
@@ -634,134 +519,34 @@ static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_poll_ptr = top1; /* new buffer position */
spin_unlock_irqrestore(&dev->spinlock, flags);
+ cfc_handle_events(dev, s);
+
return s->async->buf_write_count - s->async->buf_read_count;
}
-/*
-==============================================================================
- cancel any mode 1-4 AI
-*/
static int pcl816_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl816_private *devpriv = dev->private;
- if (devpriv->irq_blocked > 0) {
- switch (devpriv->int816_mode) {
- case INT_TYPE_AI1_DMA:
- case INT_TYPE_AI3_DMA:
- disable_dma(devpriv->dma);
- case INT_TYPE_AI1_INT:
- case INT_TYPE_AI3_INT:
- outb(inb(dev->iobase + PCL816_CONTROL) & 0x73,
- dev->iobase + PCL816_CONTROL); /* Stop A/D */
- udelay(1);
- outb(0, dev->iobase + PCL816_CONTROL); /* Stop A/D */
-
- /* Stop pacer */
- outb(0xb0, dev->iobase + PCL816_CTRCTL);
- outb(0x70, dev->iobase + PCL816_CTRCTL);
- outb(0, dev->iobase + PCL816_AD_LO);
- inb(dev->iobase + PCL816_AD_LO);
- inb(dev->iobase + PCL816_AD_HI);
-
- /* clear INT request */
- outb(0, dev->iobase + PCL816_CLRINT);
-
- /* Stop A/D */
- outb(0, dev->iobase + PCL816_CONTROL);
- devpriv->irq_blocked = 0;
- devpriv->irq_was_now_closed = devpriv->int816_mode;
- devpriv->int816_mode = 0;
- devpriv->last_int_sub = s;
-/* s->busy = 0; */
- break;
- }
- }
- return 0;
-}
-
-/*
-==============================================================================
- chech for PCL816
-*/
-static int pcl816_check(unsigned long iobase)
-{
- outb(0x00, iobase + PCL816_MUX);
- udelay(1);
- if (inb(iobase + PCL816_MUX) != 0x00)
- return 1; /* there isn't card */
- outb(0x55, iobase + PCL816_MUX);
- udelay(1);
- if (inb(iobase + PCL816_MUX) != 0x55)
- return 1; /* there isn't card */
- outb(0x00, iobase + PCL816_MUX);
- udelay(1);
- outb(0x18, iobase + PCL816_CONTROL);
- udelay(1);
- if (inb(iobase + PCL816_CONTROL) != 0x18)
- return 1; /* there isn't card */
- return 0; /* ok, card exist */
-}
-
-/*
-==============================================================================
- reset whole PCL-816 cards
-*/
-static void pcl816_reset(struct comedi_device *dev)
-{
-/* outb (0, dev->iobase + PCL818_DA_LO); DAC=0V */
-/* outb (0, dev->iobase + PCL818_DA_HI); */
-/* udelay (1); */
-/* outb (0, dev->iobase + PCL818_DO_HI); DO=$0000 */
-/* outb (0, dev->iobase + PCL818_DO_LO); */
-/* udelay (1); */
- outb(0, dev->iobase + PCL816_CONTROL);
- outb(0, dev->iobase + PCL816_MUX);
- outb(0, dev->iobase + PCL816_CLRINT);
- outb(0xb0, dev->iobase + PCL816_CTRCTL); /* Stop pacer */
- outb(0x70, dev->iobase + PCL816_CTRCTL);
- outb(0x30, dev->iobase + PCL816_CTRCTL);
- outb(0, dev->iobase + PCL816_RANGE);
-}
+ if (!devpriv->ai_cmd_running)
+ return 0;
-/*
-==============================================================================
- Start/stop pacer onboard pacer
-*/
-static void
-start_pacer(struct comedi_device *dev, int mode, unsigned int divisor1,
- unsigned int divisor2)
-{
- outb(0x32, dev->iobase + PCL816_CTRCTL);
- outb(0xff, dev->iobase + PCL816_CTR0);
- outb(0x00, dev->iobase + PCL816_CTR0);
- udelay(1);
+ outb(PCL816_CTRL_DISABLE_TRIG, dev->iobase + PCL816_CTRL_REG);
+ pcl816_ai_clear_eoc(dev);
- /* set counter 2 as mode 3 */
- outb(0xb4, dev->iobase + PCL816_CTRCTL);
- /* set counter 1 as mode 3 */
- outb(0x74, dev->iobase + PCL816_CTRCTL);
- udelay(1);
+ /* Stop pacer */
+ i8254_set_mode(dev->iobase + PCL816_TIMER_BASE, 0,
+ 2, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(dev->iobase + PCL816_TIMER_BASE, 0,
+ 1, I8254_MODE0 | I8254_BINARY);
- if (mode == 1) {
- dev_dbg(dev->class_dev, "mode %d, divisor1 %d, divisor2 %d\n",
- mode, divisor1, divisor2);
- outb(divisor2 & 0xff, dev->iobase + PCL816_CTR2);
- outb((divisor2 >> 8) & 0xff, dev->iobase + PCL816_CTR2);
- outb(divisor1 & 0xff, dev->iobase + PCL816_CTR1);
- outb((divisor1 >> 8) & 0xff, dev->iobase + PCL816_CTR1);
- }
+ devpriv->ai_cmd_running = 0;
+ devpriv->ai_cmd_canceled = 1;
- /* clear pending interrupts (just in case) */
-/* outb(0, dev->iobase + PCL816_CLRINT); */
+ return 0;
}
-/*
-==============================================================================
- Check if channel list from user is built correctly
- If it's ok, then return non-zero length of repeated segment of channel list
-*/
static int
check_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned int *chanlist,
@@ -818,180 +603,181 @@ check_channel_list(struct comedi_device *dev,
return seglen; /* we can serve this with MUX logic */
}
-/*
-==============================================================================
- Program scan/gain logic with channel list.
-*/
-static void
-setup_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned int *chanlist,
- unsigned int seglen)
+static int pcl816_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct pcl816_private *devpriv = dev->private;
- unsigned int i;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ int ret = 0;
+ int i;
- devpriv->ai_act_chanlist_len = seglen;
- devpriv->ai_act_chanlist_pos = 0;
+ outb(PCL816_CTRL_SOFT_TRIG, dev->iobase + PCL816_CTRL_REG);
- for (i = 0; i < seglen; i++) { /* store range list to card */
- devpriv->ai_act_chanlist[i] = CR_CHAN(chanlist[i]);
- outb(CR_CHAN(chanlist[0]) & 0xf, dev->iobase + PCL816_MUX);
- /* select gain */
- outb(CR_RANGE(chanlist[0]), dev->iobase + PCL816_RANGE);
+ pcl816_ai_set_chan_range(dev, chan, range);
+ pcl816_ai_set_chan_scan(dev, chan, chan);
+
+ for (i = 0; i < insn->n; i++) {
+ pcl816_ai_clear_eoc(dev);
+ pcl816_ai_soft_trig(dev);
+
+ ret = comedi_timeout(dev, s, insn, pcl816_ai_eoc, 0);
+ if (ret)
+ break;
+
+ data[i] = pcl816_ai_get_sample(dev, s);
}
+ outb(PCL816_CTRL_DISABLE_TRIG, dev->iobase + PCL816_CTRL_REG);
+ pcl816_ai_clear_eoc(dev);
- udelay(1);
- /* select channel interval to scan */
- outb(devpriv->ai_act_chanlist[0] |
- (devpriv->ai_act_chanlist[seglen - 1] << 4),
- dev->iobase + PCL816_MUX);
+ return ret ? ret : insn->n;
+}
+
+static int pcl816_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ data[1] = inb(dev->iobase + PCL816_DO_DI_LSB_REG) |
+ (inb(dev->iobase + PCL816_DO_DI_MSB_REG) << 8);
+
+ return insn->n;
+}
+
+static int pcl816_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ if (comedi_dio_update_state(s, data)) {
+ outb(s->state & 0xff, dev->iobase + PCL816_DO_DI_LSB_REG);
+ outb((s->state >> 8), dev->iobase + PCL816_DO_DI_MSB_REG);
+ }
+
+ data[1] = s->state;
+
+ return insn->n;
+}
+
+static void pcl816_reset(struct comedi_device *dev)
+{
+ unsigned long timer_base = dev->iobase + PCL816_TIMER_BASE;
+
+ outb(PCL816_CTRL_DISABLE_TRIG, dev->iobase + PCL816_CTRL_REG);
+ pcl816_ai_set_chan_range(dev, 0, 0);
+ pcl816_ai_clear_eoc(dev);
+
+ /* Stop pacer */
+ i8254_set_mode(timer_base, 0, 2, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 1, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 0, I8254_MODE0 | I8254_BINARY);
+
+ /* set all digital outputs low */
+ outb(0, dev->iobase + PCL816_DO_DI_LSB_REG);
+ outb(0, dev->iobase + PCL816_DO_DI_MSB_REG);
}
static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl816_board *board = comedi_board(dev);
struct pcl816_private *devpriv;
- int ret;
- unsigned int dma;
- unsigned long pages;
- /* int i; */
struct comedi_subdevice *s;
-
- ret = comedi_request_region(dev, it->options[0], board->io_range);
- if (ret)
- return ret;
-
- if (pcl816_check(dev->iobase)) {
- dev_err(dev->class_dev, "I can't detect board. FAIL!\n");
- return -EIO;
- }
+ int ret;
+ int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- if ((1 << it->options[1]) & board->IRQbits) {
- ret = request_irq(it->options[1], interrupt_pcl816, 0,
+ ret = comedi_request_region(dev, it->options[0], 0x10);
+ if (ret)
+ return ret;
+
+ /* we can use IRQ 2-7 for async command support */
+ if (it->options[1] >= 2 && it->options[1] <= 7) {
+ ret = request_irq(it->options[1], pcl816_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
- devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
- devpriv->int816_mode = 0; /* mode of irq */
-
- /* grab our DMA */
- dma = 0;
- devpriv->dma = dma;
- if (!dev->irq)
- goto no_dma; /* if we haven't IRQ, we can't use DMA */
-
- if (board->DMAbits != 0) { /* board support DMA */
- dma = it->options[2];
- if (dma < 1)
- goto no_dma; /* DMA disabled */
-
- if (((1 << dma) & board->DMAbits) == 0) {
- dev_err(dev->class_dev,
- "DMA is out of allowed range, FAIL!\n");
- return -EINVAL; /* Bad DMA */
- }
- ret = request_dma(dma, dev->board_name);
+ /* we need an IRQ to do DMA on channel 3 or 1 */
+ if (dev->irq && (it->options[2] == 3 || it->options[2] == 1)) {
+ ret = request_dma(it->options[2], dev->board_name);
if (ret) {
dev_err(dev->class_dev,
- "unable to allocate DMA %u, FAIL!\n", dma);
- return -EBUSY; /* DMA isn't free */
+ "unable to request DMA channel %d\n",
+ it->options[2]);
+ return -EBUSY;
}
+ devpriv->dma = it->options[2];
- devpriv->dma = dma;
- pages = 2; /* we need 16KB */
- devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
+ devpriv->dmapages = 2; /* we need 16KB */
+ devpriv->hwdmasize = (1 << devpriv->dmapages) * PAGE_SIZE;
- if (!devpriv->dmabuf[0]) {
- dev_err(dev->class_dev,
- "unable to allocate DMA buffer, FAIL!\n");
- /*
- * maybe experiment with try_to_free_pages()
- * will help ....
- */
- return -EBUSY; /* no buffer :-( */
- }
- devpriv->dmapages[0] = pages;
- devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
- devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
+ for (i = 0; i < 2; i++) {
+ unsigned long dmabuf;
- devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[1]) {
- dev_err(dev->class_dev,
- "unable to allocate DMA buffer, FAIL!\n");
- return -EBUSY;
+ dmabuf = __get_dma_pages(GFP_KERNEL, devpriv->dmapages);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ devpriv->dmabuf[i] = dmabuf;
+ devpriv->hwdmaptr[i] = virt_to_bus((void *)dmabuf);
}
- devpriv->dmapages[1] = pages;
- devpriv->hwdmaptr[1] = virt_to_bus((void *)devpriv->dmabuf[1]);
- devpriv->hwdmasize[1] = (1 << pages) * PAGE_SIZE;
}
-no_dma:
-
-/* if (board->n_aochan > 0)
- subdevs[1] = COMEDI_SUBD_AO;
- if (board->n_dichan > 0)
- subdevs[2] = COMEDI_SUBD_DI;
- if (board->n_dochan > 0)
- subdevs[3] = COMEDI_SUBD_DO;
-*/
-
- ret = comedi_alloc_subdevices(dev, 1);
+ ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
s = &dev->subdevices[0];
- if (board->n_aichan > 0) {
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_CMD_READ | SDF_DIFF;
- s->n_chan = board->n_aichan;
- s->maxdata = board->ai_maxdata;
- s->range_table = board->ai_range_type;
- s->insn_read = pcl816_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = board->ai_chanlist;
- s->do_cmdtest = pcl816_ai_cmdtest;
- s->do_cmd = pcl816_ai_cmd;
- s->poll = pcl816_ai_poll;
- s->cancel = pcl816_ai_cancel;
- }
- } else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_CMD_READ | SDF_DIFF;
+ s->n_chan = 16;
+ s->maxdata = board->ai_maxdata;
+ s->range_table = &range_pcl816;
+ s->insn_read = pcl816_ai_insn_read;
+ if (devpriv->dma) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = board->ai_chanlist;
+ s->do_cmdtest = pcl816_ai_cmdtest;
+ s->do_cmd = pcl816_ai_cmd;
+ s->poll = pcl816_ai_poll;
+ s->cancel = pcl816_ai_cancel;
}
+ /* Analog OUtput subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_UNUSED;
#if 0
-case COMEDI_SUBD_AO:
+ subdevs[1] = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = board->n_aochan;
+ s->n_chan = 1;
s->maxdata = board->ao_maxdata;
- s->len_chanlist = board->ao_chanlist;
- s->range_table = board->ao_range_type;
- break;
-
-case COMEDI_SUBD_DI:
- s->subdev_flags = SDF_READABLE;
- s->n_chan = board->n_dichan;
- s->maxdata = 1;
- s->len_chanlist = board->n_dichan;
- s->range_table = &range_digital;
- break;
-
-case COMEDI_SUBD_DO:
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_dochan;
- s->maxdata = 1;
- s->len_chanlist = board->n_dochan;
- s->range_table = &range_digital;
- break;
+ s->range_table = &range_pcl816;
#endif
+ /* Digital Input subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl816_di_insn_bits;
+
+ /* Digital Output subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl816_do_insn_bits;
+
pcl816_reset(dev);
return 0;
@@ -1007,34 +793,13 @@ static void pcl816_detach(struct comedi_device *dev)
if (devpriv->dma)
free_dma(devpriv->dma);
if (devpriv->dmabuf[0])
- free_pages(devpriv->dmabuf[0], devpriv->dmapages[0]);
+ free_pages(devpriv->dmabuf[0], devpriv->dmapages);
if (devpriv->dmabuf[1])
- free_pages(devpriv->dmabuf[1], devpriv->dmapages[1]);
+ free_pages(devpriv->dmabuf[1], devpriv->dmapages);
}
comedi_legacy_detach(dev);
}
-static const struct pcl816_board boardtypes[] = {
- {"pcl816", 8, 16, 10000, 1, 16, 16, &range_pcl816,
- &range_pcl816, PCLx1x_RANGE,
- 0x00fc, /* IRQ mask */
- 0x0a, /* DMA mask */
- 0xffff, /* 16-bit card */
- 0xffff, /* D/A maxdata */
- 1024,
- 1, /* ao chan list */
- I8254_OSC_BASE_10MHZ},
- {"pcl814b", 8, 16, 10000, 1, 16, 16, &range_pcl816,
- &range_pcl816, PCLx1x_RANGE,
- 0x00fc,
- 0x0a,
- 0x3fff, /* 14 bit card */
- 0x3fff,
- 1024,
- 1,
- I8254_OSC_BASE_10MHZ},
-};
-
static struct comedi_driver pcl816_driver = {
.driver_name = "pcl816",
.module = THIS_MODULE,
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index fa1758ad49d5..6463476ce45a 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -110,8 +110,6 @@ A word or two about DMA. Driver support DMA operations at two ways:
#include "comedi_fc.h"
#include "8253.h"
-/* #define PCL818_MODE13_AO 1 */
-
/* boards constants */
#define boardPCL818L 0
@@ -121,46 +119,38 @@ A word or two about DMA. Driver support DMA operations at two ways:
#define boardPCL818 4
#define boardPCL718 5
-/* IO space len */
-#define PCLx1x_RANGE 16
-/* IO space len if we use FIFO */
-#define PCLx1xFIFO_RANGE 32
-
-/* W: clear INT request */
-#define PCL818_CLRINT 8
-/* R: return status byte */
-#define PCL818_STATUS 8
-/* R: A/D high byte W: A/D range control */
-#define PCL818_RANGE 1
-/* R: next mux scan channel W: mux scan channel & range control pointer */
-#define PCL818_MUX 2
-/* R/W: operation control register */
-#define PCL818_CONTROL 9
-/* W: counter enable */
-#define PCL818_CNTENABLE 10
-
-/* R: low byte of A/D W: soft A/D trigger */
-#define PCL818_AD_LO 0
-/* R: high byte of A/D W: A/D range control */
-#define PCL818_AD_HI 1
-/* W: D/A low&high byte */
-#define PCL818_DA_LO 4
-#define PCL818_DA_HI 5
-/* R: low&high byte of DI */
-#define PCL818_DI_LO 3
-#define PCL818_DI_HI 11
-/* W: low&high byte of DO */
-#define PCL818_DO_LO 3
-#define PCL818_DO_HI 11
-/* W: PCL718 second D/A */
-#define PCL718_DA2_LO 6
-#define PCL718_DA2_HI 7
-/* counters */
-#define PCL818_CTR0 12
-#define PCL818_CTR1 13
-#define PCL818_CTR2 14
-/* W: counter control */
-#define PCL818_CTRCTL 15
+/*
+ * Register I/O map
+ */
+#define PCL818_AI_LSB_REG 0x00
+#define PCL818_AI_MSB_REG 0x01
+#define PCL818_RANGE_REG 0x01
+#define PCL818_MUX_REG 0x02
+#define PCL818_MUX_SCAN(_first, _last) (((_last) << 4) | (_first))
+#define PCL818_DO_DI_LSB_REG 0x03
+#define PCL818_AO_LSB_REG(x) (0x04 + ((x) * 2))
+#define PCL818_AO_MSB_REG(x) (0x05 + ((x) * 2))
+#define PCL818_STATUS_REG 0x08
+#define PCL818_STATUS_NEXT_CHAN_MASK (0xf << 0)
+#define PCL818_STATUS_INT (1 << 4)
+#define PCL818_STATUS_MUX (1 << 5)
+#define PCL818_STATUS_UNI (1 << 6)
+#define PCL818_STATUS_EOC (1 << 7)
+#define PCL818_CTRL_REG 0x09
+#define PCL818_CTRL_DISABLE_TRIG (0 << 0)
+#define PCL818_CTRL_SOFT_TRIG (1 << 0)
+#define PCL818_CTRL_EXT_TRIG (2 << 0)
+#define PCL818_CTRL_PACER_TRIG (3 << 0)
+#define PCL818_CTRL_DMAE (1 << 2)
+#define PCL818_CTRL_IRQ(x) ((x) << 4)
+#define PCL818_CTRL_INTE (1 << 7)
+#define PCL818_CNTENABLE_REG 0x0a
+#define PCL818_CNTENABLE_PACER_ENA (0 << 0)
+#define PCL818_CNTENABLE_PACER_TRIG0 (1 << 0)
+#define PCL818_CNTENABLE_CNT0_EXT_CLK (0 << 1)
+#define PCL818_CNTENABLE_CNT0_INT_CLK (1 << 1)
+#define PCL818_DO_DI_MSB_REG 0x0b
+#define PCL818_TIMER_BASE 0x0c
/* W: fifo enable/disable */
#define PCL818_FI_ENABLE 6
@@ -172,19 +162,7 @@ A word or two about DMA. Driver support DMA operations at two ways:
#define PCL818_FI_STATUS 25
/* R: one record from FIFO */
#define PCL818_FI_DATALO 23
-#define PCL818_FI_DATAHI 23
-
-/* type of interrupt handler */
-#define INT_TYPE_AI1_INT 1
-#define INT_TYPE_AI1_DMA 2
-#define INT_TYPE_AI1_FIFO 3
-#define INT_TYPE_AI3_INT 4
-#define INT_TYPE_AI3_DMA 5
-#define INT_TYPE_AI3_FIFO 6
-#ifdef PCL818_MODE13_AO
-#define INT_TYPE_AO1_INT 7
-#define INT_TYPE_AO3_INT 8
-#endif
+#define PCL818_FI_DATAHI 24
#define MAGIC_DMA_WORD 0x5a5a
@@ -262,630 +240,439 @@ static const struct comedi_lrange range718_unipolar1 = {
};
struct pcl818_board {
+ const char *name;
+ unsigned int ns_min;
+ int n_aochan;
+ const struct comedi_lrange *ai_range_type;
+ unsigned int has_dma:1;
+ unsigned int has_fifo:1;
+ unsigned int is_818:1;
+};
- const char *name; /* driver name */
- int n_ranges; /* len of range list */
- int n_aichan_se; /* num of A/D chans in single ended mode */
- int n_aichan_diff; /* num of A/D chans in diferencial mode */
- unsigned int ns_min; /* minimal allowed delay between samples (in ns) */
- int n_aochan; /* num of D/A chans */
- int n_dichan; /* num of DI chans */
- int n_dochan; /* num of DO chans */
- const struct comedi_lrange *ai_range_type; /* default A/D rangelist */
- const struct comedi_lrange *ao_range_type; /* default D/A rangelist */
- unsigned int io_range; /* len of IO space */
- unsigned int IRQbits; /* allowed interrupts */
- unsigned int DMAbits; /* allowed DMA chans */
- int ai_maxdata; /* maxdata for A/D */
- int ao_maxdata; /* maxdata for D/A */
- unsigned char fifo; /* 1=board has FIFO */
- int is_818;
+static const struct pcl818_board boardtypes[] = {
+ {
+ .name = "pcl818l",
+ .ns_min = 25000,
+ .n_aochan = 1,
+ .ai_range_type = &range_pcl818l_l_ai,
+ .has_dma = 1,
+ .is_818 = 1,
+ }, {
+ .name = "pcl818h",
+ .ns_min = 10000,
+ .n_aochan = 1,
+ .ai_range_type = &range_pcl818h_ai,
+ .has_dma = 1,
+ .is_818 = 1,
+ }, {
+ .name = "pcl818hd",
+ .ns_min = 10000,
+ .n_aochan = 1,
+ .ai_range_type = &range_pcl818h_ai,
+ .has_dma = 1,
+ .has_fifo = 1,
+ .is_818 = 1,
+ }, {
+ .name = "pcl818hg",
+ .ns_min = 10000,
+ .n_aochan = 1,
+ .ai_range_type = &range_pcl818hg_ai,
+ .has_dma = 1,
+ .has_fifo = 1,
+ .is_818 = 1,
+ }, {
+ .name = "pcl818",
+ .ns_min = 10000,
+ .n_aochan = 2,
+ .ai_range_type = &range_pcl818h_ai,
+ .has_dma = 1,
+ .is_818 = 1,
+ }, {
+ .name = "pcl718",
+ .ns_min = 16000,
+ .n_aochan = 2,
+ .ai_range_type = &range_unipolar5,
+ .has_dma = 1,
+ }, {
+ .name = "pcm3718",
+ .ns_min = 10000,
+ .ai_range_type = &range_pcl818h_ai,
+ .has_dma = 1,
+ .is_818 = 1,
+ },
};
struct pcl818_private {
-
unsigned int dma; /* used DMA, 0=don't use DMA */
- unsigned int io_range;
+ unsigned int dmapages;
+ unsigned int hwdmasize;
unsigned long dmabuf[2]; /* pointers to begin of DMA buffers */
- unsigned int dmapages[2]; /* len of DMA buffers in PAGE_SIZEs */
unsigned int hwdmaptr[2]; /* hardware address of DMA buffers */
- unsigned int hwdmasize[2]; /* len of DMA buffers in Bytes */
int next_dma_buf; /* which DMA buffer will be used next round */
long dma_runs_to_end; /* how many we must permorm DMA transfer to end of record */
unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */
- unsigned char neverending_ai; /* if=1, then we do neverending record (you must use cancel()) */
unsigned int ns_min; /* manimal allowed delay between samples (in us) for actual card */
int i8253_osc_base; /* 1/frequency of on board oscilator in ns */
- int irq_blocked; /* 1=IRQ now uses any subdev */
- int irq_was_now_closed; /* when IRQ finish, there's stored int818_mode for last interrupt */
- int ai_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */
- struct comedi_subdevice *last_int_sub; /* ptr to subdevice which now finish */
int ai_act_scan; /* how many scans we finished */
int ai_act_chan; /* actual position in actual scan */
unsigned int act_chanlist[16]; /* MUX setting for actual AI operations */
unsigned int act_chanlist_len; /* how long is actual MUX list */
unsigned int act_chanlist_pos; /* actual position in MUX list */
- unsigned int ai_scans; /* len of scanlist */
- unsigned int ai_n_chan; /* how many channels is measured */
- unsigned int *ai_chanlist; /* actaul chanlist */
- unsigned int ai_flags; /* flaglist */
unsigned int ai_data_len; /* len of data buffer */
- unsigned int ai_timer1; /* timers */
- unsigned int ai_timer2;
- unsigned char usefifo; /* 1=use fifo */
unsigned int ao_readback[2];
+ unsigned int divisor1;
+ unsigned int divisor2;
+ unsigned int usefifo:1;
+ unsigned int ai_cmd_running:1;
+ unsigned int ai_cmd_canceled:1;
};
-static const unsigned int muxonechan[] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, /* used for gain list programming */
- 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff
-};
-
-/*
-==============================================================================
-*/
-static void setup_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist, unsigned int n_chan,
- unsigned int seglen);
-static int check_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist, unsigned int n_chan);
-
-static int pcl818_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s);
-static void start_pacer(struct comedi_device *dev, int mode,
- unsigned int divisor1, unsigned int divisor2);
-
-/*
-==============================================================================
- ANALOG INPUT MODE0, 818 cards, slow version
-*/
-static int pcl818_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl818_start_pacer(struct comedi_device *dev, bool load_counters)
{
- int n;
- int timeout;
+ struct pcl818_private *devpriv = dev->private;
+ unsigned long timer_base = dev->iobase + PCL818_TIMER_BASE;
- /* software trigger, DMA and INT off */
- outb(0, dev->iobase + PCL818_CONTROL);
+ i8254_set_mode(timer_base, 0, 2, I8254_MODE2 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 1, I8254_MODE2 | I8254_BINARY);
+ udelay(1);
- /* select channel */
- outb(muxonechan[CR_CHAN(insn->chanspec)], dev->iobase + PCL818_MUX);
+ if (load_counters) {
+ i8254_write(timer_base, 0, 2, devpriv->divisor2);
+ i8254_write(timer_base, 0, 1, devpriv->divisor1);
+ }
+}
- /* select gain */
- outb(CR_RANGE(insn->chanspec), dev->iobase + PCL818_RANGE);
+static void pcl818_ai_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pcl818_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int flags;
+ unsigned int bytes;
- for (n = 0; n < insn->n; n++) {
+ disable_dma(devpriv->dma); /* disable dma */
+ bytes = devpriv->hwdmasize;
+ if (cmd->stop_src == TRIG_COUNT) {
+ bytes = cmd->chanlist_len * cmd->stop_arg * sizeof(short);
+ devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize;
+ devpriv->last_dma_run = bytes % devpriv->hwdmasize;
+ devpriv->dma_runs_to_end--;
+ if (devpriv->dma_runs_to_end >= 0)
+ bytes = devpriv->hwdmasize;
+ }
- /* clear INT (conversion end) flag */
- outb(0, dev->iobase + PCL818_CLRINT);
+ devpriv->next_dma_buf = 0;
+ set_dma_mode(devpriv->dma, DMA_MODE_READ);
+ flags = claim_dma_lock();
+ clear_dma_ff(devpriv->dma);
+ set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
+ set_dma_count(devpriv->dma, bytes);
+ release_dma_lock(flags);
+ enable_dma(devpriv->dma);
+}
- /* start conversion */
- outb(0, dev->iobase + PCL818_AD_LO);
+static void pcl818_ai_setup_next_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pcl818_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned long flags;
- timeout = 100;
- while (timeout--) {
- if (inb(dev->iobase + PCL818_STATUS) & 0x10)
- goto conv_finish;
- udelay(1);
- }
- comedi_error(dev, "A/D insn timeout");
- /* clear INT (conversion end) flag */
- outb(0, dev->iobase + PCL818_CLRINT);
- return -EIO;
-
-conv_finish:
- data[n] = ((inb(dev->iobase + PCL818_AD_HI) << 4) |
- (inb(dev->iobase + PCL818_AD_LO) >> 4));
+ disable_dma(devpriv->dma);
+ devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
+ if (devpriv->dma_runs_to_end > -1 || cmd->stop_src == TRIG_NONE) {
+ /* switch dma bufs */
+ set_dma_mode(devpriv->dma, DMA_MODE_READ);
+ flags = claim_dma_lock();
+ set_dma_addr(devpriv->dma,
+ devpriv->hwdmaptr[devpriv->next_dma_buf]);
+ if (devpriv->dma_runs_to_end || cmd->stop_src == TRIG_NONE)
+ set_dma_count(devpriv->dma, devpriv->hwdmasize);
+ else
+ set_dma_count(devpriv->dma, devpriv->last_dma_run);
+ release_dma_lock(flags);
+ enable_dma(devpriv->dma);
}
- return n;
+ devpriv->dma_runs_to_end--;
}
-/*
-==============================================================================
- ANALOG OUTPUT MODE0, 818 cards
- only one sample per call is supported
-*/
-static int pcl818_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl818_ai_set_chan_range(struct comedi_device *dev,
+ unsigned int chan,
+ unsigned int range)
{
- struct pcl818_private *devpriv = dev->private;
- int n;
- int chan = CR_CHAN(insn->chanspec);
-
- for (n = 0; n < insn->n; n++)
- data[n] = devpriv->ao_readback[chan];
+ outb(chan, dev->iobase + PCL818_MUX_REG);
+ outb(range, dev->iobase + PCL818_RANGE_REG);
+}
- return n;
+static void pcl818_ai_set_chan_scan(struct comedi_device *dev,
+ unsigned int first_chan,
+ unsigned int last_chan)
+{
+ outb(PCL818_MUX_SCAN(first_chan, last_chan),
+ dev->iobase + PCL818_MUX_REG);
}
-static int pcl818_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl818_ai_setup_chanlist(struct comedi_device *dev,
+ unsigned int *chanlist,
+ unsigned int seglen)
{
struct pcl818_private *devpriv = dev->private;
- int n;
- int chan = CR_CHAN(insn->chanspec);
-
- for (n = 0; n < insn->n; n++) {
- devpriv->ao_readback[chan] = data[n];
- outb((data[n] & 0x000f) << 4, dev->iobase +
- (chan ? PCL718_DA2_LO : PCL818_DA_LO));
- outb((data[n] & 0x0ff0) >> 4, dev->iobase +
- (chan ? PCL718_DA2_HI : PCL818_DA_HI));
+ unsigned int first_chan = CR_CHAN(chanlist[0]);
+ unsigned int last_chan;
+ unsigned int range;
+ int i;
+
+ devpriv->act_chanlist_len = seglen;
+ devpriv->act_chanlist_pos = 0;
+
+ /* store range list to card */
+ for (i = 0; i < seglen; i++) {
+ last_chan = CR_CHAN(chanlist[i]);
+ range = CR_RANGE(chanlist[i]);
+
+ devpriv->act_chanlist[i] = last_chan;
+
+ pcl818_ai_set_chan_range(dev, last_chan, range);
}
- return n;
+ udelay(1);
+
+ pcl818_ai_set_chan_scan(dev, first_chan, last_chan);
}
-/*
-==============================================================================
- DIGITAL INPUT MODE0, 818 cards
+static void pcl818_ai_clear_eoc(struct comedi_device *dev)
+{
+ /* writing any value clears the interrupt request */
+ outb(0, dev->iobase + PCL818_STATUS_REG);
+}
- only one sample per call is supported
-*/
-static int pcl818_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl818_ai_soft_trig(struct comedi_device *dev)
{
- data[1] = inb(dev->iobase + PCL818_DI_LO) |
- (inb(dev->iobase + PCL818_DI_HI) << 8);
+ /* writing any value triggers a software conversion */
+ outb(0, dev->iobase + PCL818_AI_LSB_REG);
+}
- return insn->n;
+static unsigned int pcl818_ai_get_fifo_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int *chan)
+{
+ unsigned int val;
+
+ val = inb(dev->iobase + PCL818_FI_DATALO);
+ val |= (inb(dev->iobase + PCL818_FI_DATAHI) << 8);
+
+ if (chan)
+ *chan = val & 0xf;
+
+ return (val >> 4) & s->maxdata;
}
-static int pcl818_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static unsigned int pcl818_ai_get_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int *chan)
{
- if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase + PCL818_DO_LO);
- outb((s->state >> 8), dev->iobase + PCL818_DO_HI);
- }
+ unsigned int val;
- data[1] = s->state;
+ val = inb(dev->iobase + PCL818_AI_MSB_REG) << 8;
+ val |= inb(dev->iobase + PCL818_AI_LSB_REG);
- return insn->n;
+ if (chan)
+ *chan = val & 0xf;
+
+ return (val >> 4) & s->maxdata;
}
-/*
-==============================================================================
- analog input interrupt mode 1 & 3, 818 cards
- one sample per interrupt version
-*/
-static irqreturn_t interrupt_pcl818_ai_mode13_int(int irq, void *d)
+static int pcl818_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- struct comedi_device *dev = d;
- struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned char low;
- int timeout = 50; /* wait max 50us */
+ unsigned int status;
- while (timeout--) {
- if (inb(dev->iobase + PCL818_STATUS) & 0x10)
- goto conv_finish;
- udelay(1);
- }
- outb(0, dev->iobase + PCL818_STATUS); /* clear INT request */
- comedi_error(dev, "A/D mode1/3 IRQ without DRDY!");
- pcl818_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ status = inb(dev->iobase + PCL818_STATUS_REG);
+ if (status & PCL818_STATUS_INT)
+ return 0;
+ return -EBUSY;
+}
-conv_finish:
- low = inb(dev->iobase + PCL818_AD_LO);
- comedi_buf_put(s->async, ((inb(dev->iobase + PCL818_AD_HI) << 4) | (low >> 4))); /* get one sample */
- outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
+static bool pcl818_ai_dropout(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chan)
+{
+ struct pcl818_private *devpriv = dev->private;
+ unsigned int expected_chan;
- if ((low & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */
+ expected_chan = devpriv->act_chanlist[devpriv->act_chanlist_pos];
+ if (chan != expected_chan) {
dev_dbg(dev->class_dev,
- "A/D mode1/3 IRQ - channel dropout %x!=%x !\n",
- (low & 0xf),
- devpriv->act_chanlist[devpriv->act_chanlist_pos]);
- pcl818_ai_cancel(dev, s);
+ "A/D mode1/3 %s - channel dropout %d!=%d !\n",
+ (devpriv->dma) ? "DMA" :
+ (devpriv->usefifo) ? "FIFO" : "IRQ",
+ chan, expected_chan);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ return true;
}
+ return false;
+}
+
+static bool pcl818_ai_next_chan(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pcl818_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+
+ s->async->events |= COMEDI_CB_BLOCK;
+
devpriv->act_chanlist_pos++;
if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
+ if (s->async->cur_chan >= cmd->chanlist_len) {
s->async->cur_chan = 0;
devpriv->ai_act_scan--;
+ s->async->events |= COMEDI_CB_EOS;
}
- if (!devpriv->neverending_ai) {
- if (devpriv->ai_act_scan == 0) { /* all data sampled */
- pcl818_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- }
+ if (cmd->stop_src == TRIG_COUNT && devpriv->ai_act_scan == 0) {
+ /* all data sampled */
+ s->async->events |= COMEDI_CB_EOA;
+ return false;
}
- comedi_event(dev, s);
- return IRQ_HANDLED;
+
+ return true;
}
-/*
-==============================================================================
- analog input dma mode 1 & 3, 818 cards
-*/
-static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
+static void pcl818_handle_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int chan;
+ unsigned int val;
+
+ if (pcl818_ai_eoc(dev, s, NULL, 0)) {
+ comedi_error(dev, "A/D mode1/3 IRQ without DRDY!");
+ s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ return;
+ }
+
+ val = pcl818_ai_get_sample(dev, s, &chan);
+
+ if (pcl818_ai_dropout(dev, s, chan))
+ return;
+
+ comedi_buf_put(s->async, val);
+
+ pcl818_ai_next_chan(dev, s);
+}
+
+static void pcl818_handle_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- int i, len, bufptr;
- unsigned long flags;
unsigned short *ptr;
+ unsigned int chan;
+ unsigned int val;
+ int i, len, bufptr;
- disable_dma(devpriv->dma);
- devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
- if ((devpriv->dma_runs_to_end) > -1 || devpriv->neverending_ai) { /* switch dma bufs */
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- flags = claim_dma_lock();
- set_dma_addr(devpriv->dma,
- devpriv->hwdmaptr[devpriv->next_dma_buf]);
- if (devpriv->dma_runs_to_end || devpriv->neverending_ai) {
- set_dma_count(devpriv->dma,
- devpriv->hwdmasize[devpriv->
- next_dma_buf]);
- } else {
- set_dma_count(devpriv->dma, devpriv->last_dma_run);
- }
- release_dma_lock(flags);
- enable_dma(devpriv->dma);
- }
+ pcl818_ai_setup_next_dma(dev, s);
- devpriv->dma_runs_to_end--;
- outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
ptr = (unsigned short *)devpriv->dmabuf[1 - devpriv->next_dma_buf];
- len = devpriv->hwdmasize[0] >> 1;
+ len = devpriv->hwdmasize >> 1;
bufptr = 0;
for (i = 0; i < len; i++) {
- if ((ptr[bufptr] & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */
- dev_dbg(dev->class_dev,
- "A/D mode1/3 DMA - channel dropout %d(card)!=%d(chanlist) at %d !\n",
- (ptr[bufptr] & 0xf),
- devpriv->act_chanlist[devpriv->act_chanlist_pos],
- devpriv->act_chanlist_pos);
- pcl818_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
- }
+ val = ptr[bufptr++];
+ chan = val & 0xf;
+ val = (val >> 4) & s->maxdata;
- comedi_buf_put(s->async, ptr[bufptr++] >> 4); /* get one sample */
+ if (pcl818_ai_dropout(dev, s, chan))
+ break;
- devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
- devpriv->act_chanlist_pos = 0;
+ comedi_buf_put(s->async, val);
- s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan--;
- }
-
- if (!devpriv->neverending_ai)
- if (devpriv->ai_act_scan == 0) { /* all data sampled */
- pcl818_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return IRQ_HANDLED;
- }
+ if (!pcl818_ai_next_chan(dev, s))
+ break;
}
-
- if (len > 0)
- comedi_event(dev, s);
- return IRQ_HANDLED;
}
-/*
-==============================================================================
- analog input interrupt mode 1 & 3, 818HD/HG cards
-*/
-static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
+static void pcl818_handle_fifo(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct comedi_device *dev = d;
- struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
+ unsigned int status;
+ unsigned int chan;
+ unsigned int val;
int i, len;
- unsigned char lo;
- outb(0, dev->iobase + PCL818_FI_INTCLR); /* clear fifo int request */
+ status = inb(dev->iobase + PCL818_FI_STATUS);
- lo = inb(dev->iobase + PCL818_FI_STATUS);
-
- if (lo & 4) {
+ if (status & 4) {
comedi_error(dev, "A/D mode1/3 FIFO overflow!");
- pcl818_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ return;
}
- if (lo & 1) {
+ if (status & 1) {
comedi_error(dev, "A/D mode1/3 FIFO interrupt without data!");
- pcl818_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ return;
}
- if (lo & 2)
+ if (status & 2)
len = 512;
else
len = 0;
for (i = 0; i < len; i++) {
- lo = inb(dev->iobase + PCL818_FI_DATALO);
- if ((lo & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */
- dev_dbg(dev->class_dev,
- "A/D mode1/3 FIFO - channel dropout %d!=%d !\n",
- (lo & 0xf),
- devpriv->act_chanlist[devpriv->act_chanlist_pos]);
- pcl818_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
- }
+ val = pcl818_ai_get_fifo_sample(dev, s, &chan);
- comedi_buf_put(s->async, (lo >> 4) | (inb(dev->iobase + PCL818_FI_DATAHI) << 4)); /* get one sample */
-
- devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
- devpriv->act_chanlist_pos = 0;
+ if (pcl818_ai_dropout(dev, s, chan))
+ break;
- s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan--;
- }
+ comedi_buf_put(s->async, val);
- if (!devpriv->neverending_ai)
- if (devpriv->ai_act_scan == 0) { /* all data sampled */
- pcl818_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return IRQ_HANDLED;
- }
+ if (!pcl818_ai_next_chan(dev, s))
+ break;
}
-
- if (len > 0)
- comedi_event(dev, s);
- return IRQ_HANDLED;
}
-/*
-==============================================================================
- INT procedure
-*/
-static irqreturn_t interrupt_pcl818(int irq, void *d)
+static irqreturn_t pcl818_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
- if (!dev->attached) {
- comedi_error(dev, "premature interrupt");
+ if (!dev->attached || !devpriv->ai_cmd_running) {
+ pcl818_ai_clear_eoc(dev);
return IRQ_HANDLED;
}
- if (devpriv->irq_blocked && devpriv->irq_was_now_closed) {
- if ((devpriv->neverending_ai || (!devpriv->neverending_ai &&
- devpriv->ai_act_scan > 0)) &&
- (devpriv->ai_mode == INT_TYPE_AI1_DMA ||
- devpriv->ai_mode == INT_TYPE_AI3_DMA)) {
- /* The cleanup from ai_cancel() has been delayed
- until now because the card doesn't seem to like
- being reprogrammed while a DMA transfer is in
- progress.
- */
- devpriv->ai_act_scan = 0;
- devpriv->neverending_ai = 0;
- pcl818_ai_cancel(dev, dev->read_subdev);
- }
-
- outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
-
+ if (devpriv->ai_cmd_canceled) {
+ /*
+ * The cleanup from ai_cancel() has been delayed
+ * until now because the card doesn't seem to like
+ * being reprogrammed while a DMA transfer is in
+ * progress.
+ */
+ devpriv->ai_act_scan = 0;
+ s->cancel(dev, s);
return IRQ_HANDLED;
}
- switch (devpriv->ai_mode) {
- case INT_TYPE_AI1_DMA:
- case INT_TYPE_AI3_DMA:
- return interrupt_pcl818_ai_mode13_dma(irq, d);
- case INT_TYPE_AI1_INT:
- case INT_TYPE_AI3_INT:
- return interrupt_pcl818_ai_mode13_int(irq, d);
- case INT_TYPE_AI1_FIFO:
- case INT_TYPE_AI3_FIFO:
- return interrupt_pcl818_ai_mode13_fifo(irq, d);
-#ifdef PCL818_MODE13_AO
- case INT_TYPE_AO1_INT:
- case INT_TYPE_AO3_INT:
- return interrupt_pcl818_ao_mode13_int(irq, d);
-#endif
- default:
- break;
- }
-
- outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
-
- if (!devpriv->irq_blocked || !devpriv->ai_mode) {
- comedi_error(dev, "bad IRQ!");
- return IRQ_NONE;
- }
-
- comedi_error(dev, "IRQ from unknown source!");
- return IRQ_NONE;
-}
-
-/*
-==============================================================================
- ANALOG INPUT MODE 1 or 3 DMA , 818 cards
-*/
-static void pcl818_ai_mode13dma_int(int mode, struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl818_private *devpriv = dev->private;
- unsigned int flags;
- unsigned int bytes;
-
- disable_dma(devpriv->dma); /* disable dma */
- bytes = devpriv->hwdmasize[0];
- if (!devpriv->neverending_ai) {
- bytes = devpriv->ai_n_chan * devpriv->ai_scans * sizeof(short); /* how many */
- devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize[0]; /* how many DMA pages we must fiil */
- devpriv->last_dma_run = bytes % devpriv->hwdmasize[0]; /* on last dma transfer must be moved */
- devpriv->dma_runs_to_end--;
- if (devpriv->dma_runs_to_end >= 0)
- bytes = devpriv->hwdmasize[0];
- }
-
- devpriv->next_dma_buf = 0;
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- flags = claim_dma_lock();
- clear_dma_ff(devpriv->dma);
- set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
- set_dma_count(devpriv->dma, bytes);
- release_dma_lock(flags);
- enable_dma(devpriv->dma);
-
- if (mode == 1) {
- devpriv->ai_mode = INT_TYPE_AI1_DMA;
- outb(0x87 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Pacer+IRQ+DMA */
- } else {
- devpriv->ai_mode = INT_TYPE_AI3_DMA;
- outb(0x86 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Ext trig+IRQ+DMA */
- }
-}
-
-/*
-==============================================================================
- ANALOG INPUT MODE 1 or 3, 818 cards
-*/
-static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl818_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int divisor1 = 0, divisor2 = 0;
- unsigned int seglen;
-
- if (devpriv->irq_blocked)
- return -EBUSY;
-
- start_pacer(dev, -1, 0, 0); /* stop pacer */
-
- seglen = check_channel_list(dev, s, devpriv->ai_chanlist,
- devpriv->ai_n_chan);
- if (seglen < 1)
- return -EINVAL;
- setup_channel_list(dev, s, devpriv->ai_chanlist,
- devpriv->ai_n_chan, seglen);
-
- udelay(1);
-
- devpriv->ai_act_scan = devpriv->ai_scans;
- devpriv->ai_act_chan = 0;
- devpriv->irq_blocked = 1;
- devpriv->irq_was_now_closed = 0;
- devpriv->neverending_ai = 0;
- devpriv->act_chanlist_pos = 0;
- devpriv->dma_runs_to_end = 0;
-
- if ((devpriv->ai_scans == 0) || (devpriv->ai_scans == -1))
- devpriv->neverending_ai = 1; /* well, user want neverending */
-
- if (mode == 1) {
- i8253_cascade_ns_to_timer(devpriv->i8253_osc_base,
- &divisor1, &divisor2,
- &cmd->convert_arg,
- TRIG_ROUND_NEAREST);
- if (divisor1 == 1) { /* PCL718/818 crash if any divisor is set to 1 */
- divisor1 = 2;
- divisor2 /= 2;
- }
- if (divisor2 == 1) {
- divisor2 = 2;
- divisor1 /= 2;
- }
- }
-
- outb(0, dev->iobase + PCL818_CNTENABLE); /* enable pacer */
-
- switch (devpriv->dma) {
- case 1: /* DMA */
- case 3:
- pcl818_ai_mode13dma_int(mode, dev, s);
- break;
- case 0:
- if (!devpriv->usefifo) {
- /* IRQ */
- if (mode == 1) {
- devpriv->ai_mode = INT_TYPE_AI1_INT;
- /* Pacer+IRQ */
- outb(0x83 | (dev->irq << 4),
- dev->iobase + PCL818_CONTROL);
- } else {
- devpriv->ai_mode = INT_TYPE_AI3_INT;
- /* Ext trig+IRQ */
- outb(0x82 | (dev->irq << 4),
- dev->iobase + PCL818_CONTROL);
- }
- } else {
- /* FIFO */
- /* enable FIFO */
- outb(1, dev->iobase + PCL818_FI_ENABLE);
- if (mode == 1) {
- devpriv->ai_mode = INT_TYPE_AI1_FIFO;
- /* Pacer */
- outb(0x03, dev->iobase + PCL818_CONTROL);
- } else {
- devpriv->ai_mode = INT_TYPE_AI3_FIFO;
- outb(0x02, dev->iobase + PCL818_CONTROL);
- }
- }
- }
-
- start_pacer(dev, mode, divisor1, divisor2);
-
- return 0;
-}
+ if (devpriv->dma)
+ pcl818_handle_dma(dev, s);
+ else if (devpriv->usefifo)
+ pcl818_handle_fifo(dev, s);
+ else
+ pcl818_handle_eoc(dev, s);
-/*
-==============================================================================
- Start/stop pacer onboard pacer
-*/
-static void start_pacer(struct comedi_device *dev, int mode,
- unsigned int divisor1, unsigned int divisor2)
-{
- outb(0xb4, dev->iobase + PCL818_CTRCTL);
- outb(0x74, dev->iobase + PCL818_CTRCTL);
- udelay(1);
+ pcl818_ai_clear_eoc(dev);
- if (mode == 1) {
- outb(divisor2 & 0xff, dev->iobase + PCL818_CTR2);
- outb((divisor2 >> 8) & 0xff, dev->iobase + PCL818_CTR2);
- outb(divisor1 & 0xff, dev->iobase + PCL818_CTR1);
- outb((divisor1 >> 8) & 0xff, dev->iobase + PCL818_CTR1);
- }
+ cfc_handle_events(dev, s);
+ return IRQ_HANDLED;
}
-/*
-==============================================================================
- Check if channel list from user is builded correctly
- If it's ok, then program scan/gain logic
-*/
static int check_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int *chanlist, unsigned int n_chan)
@@ -941,52 +728,20 @@ static int check_channel_list(struct comedi_device *dev,
return seglen;
}
-static void setup_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist, unsigned int n_chan,
- unsigned int seglen)
-{
- struct pcl818_private *devpriv = dev->private;
- int i;
-
- devpriv->act_chanlist_len = seglen;
- devpriv->act_chanlist_pos = 0;
-
- for (i = 0; i < seglen; i++) { /* store range list to card */
- devpriv->act_chanlist[i] = CR_CHAN(chanlist[i]);
- outb(muxonechan[CR_CHAN(chanlist[i])], dev->iobase + PCL818_MUX); /* select channel */
- outb(CR_RANGE(chanlist[i]), dev->iobase + PCL818_RANGE); /* select gain */
- }
-
- udelay(1);
-
- /* select channel interval to scan */
- outb(devpriv->act_chanlist[0] | (devpriv->act_chanlist[seglen -
- 1] << 4),
- dev->iobase + PCL818_MUX);
-}
-
-/*
-==============================================================================
- Check if board is switched to SE (1) or DIFF(0) mode
-*/
static int check_single_ended(unsigned int port)
{
- if (inb(port + PCL818_STATUS) & 0x20)
+ if (inb(port + PCL818_STATUS_REG) & PCL818_STATUS_MUX)
return 1;
return 0;
}
-/*
-==============================================================================
-*/
static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
const struct pcl818_board *board = comedi_board(dev);
struct pcl818_private *devpriv = dev->private;
int err = 0;
- int tmp, divisor1 = 0, divisor2 = 0;
+ int tmp;
/* Step 1 : check if triggers are trivially valid */
@@ -1035,7 +790,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
i8253_cascade_ns_to_timer(devpriv->i8253_osc_base,
- &divisor1, &divisor2,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
&cmd->convert_arg, cmd->flags);
if (cmd->convert_arg < board->ns_min)
cmd->convert_arg = board->ns_min;
@@ -1057,152 +813,272 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
return 0;
}
-/*
-==============================================================================
-*/
-static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+static int pcl818_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct pcl818_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- int retval;
+ unsigned int ctrl = 0;
+ unsigned int seglen;
+
+ if (devpriv->ai_cmd_running)
+ return -EBUSY;
+
+ pcl818_start_pacer(dev, false);
+
+ seglen = check_channel_list(dev, s, cmd->chanlist, cmd->chanlist_len);
+ if (seglen < 1)
+ return -EINVAL;
+ pcl818_ai_setup_chanlist(dev, cmd->chanlist, seglen);
- devpriv->ai_n_chan = cmd->chanlist_len;
- devpriv->ai_chanlist = cmd->chanlist;
- devpriv->ai_flags = cmd->flags;
devpriv->ai_data_len = s->async->prealloc_bufsz;
- devpriv->ai_timer1 = 0;
- devpriv->ai_timer2 = 0;
+ devpriv->ai_act_scan = cmd->stop_arg;
+ devpriv->ai_act_chan = 0;
+ devpriv->ai_cmd_running = 1;
+ devpriv->ai_cmd_canceled = 0;
+ devpriv->act_chanlist_pos = 0;
+ devpriv->dma_runs_to_end = 0;
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ai_scans = cmd->stop_arg;
+ if (cmd->convert_src == TRIG_TIMER)
+ ctrl |= PCL818_CTRL_PACER_TRIG;
else
- devpriv->ai_scans = 0;
+ ctrl |= PCL818_CTRL_EXT_TRIG;
- if (cmd->scan_begin_src == TRIG_FOLLOW) { /* mode 1, 3 */
- if (cmd->convert_src == TRIG_TIMER) { /* mode 1 */
- devpriv->ai_timer1 = cmd->convert_arg;
- retval = pcl818_ai_cmd_mode(1, dev, s);
- return retval;
- }
- if (cmd->convert_src == TRIG_EXT) { /* mode 3 */
- return pcl818_ai_cmd_mode(3, dev, s);
- }
+ outb(PCL818_CNTENABLE_PACER_ENA, dev->iobase + PCL818_CNTENABLE_REG);
+
+ if (devpriv->dma) {
+ pcl818_ai_setup_dma(dev, s);
+
+ ctrl |= PCL818_CTRL_INTE | PCL818_CTRL_IRQ(dev->irq) |
+ PCL818_CTRL_DMAE;
+ } else if (devpriv->usefifo) {
+ /* enable FIFO */
+ outb(1, dev->iobase + PCL818_FI_ENABLE);
+ } else {
+ ctrl |= PCL818_CTRL_INTE | PCL818_CTRL_IRQ(dev->irq);
}
+ outb(ctrl, dev->iobase + PCL818_CTRL_REG);
+
+ if (cmd->convert_src == TRIG_TIMER)
+ pcl818_start_pacer(dev, true);
- return -1;
+ return 0;
}
-/*
-==============================================================================
- cancel any mode 1-4 AI
-*/
static int pcl818_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl818_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
- if (devpriv->irq_blocked > 0) {
- devpriv->irq_was_now_closed = 1;
-
- switch (devpriv->ai_mode) {
- case INT_TYPE_AI1_DMA:
- case INT_TYPE_AI3_DMA:
- if (devpriv->neverending_ai ||
- (!devpriv->neverending_ai &&
- devpriv->ai_act_scan > 0)) {
- /* wait for running dma transfer to end, do cleanup in interrupt */
- goto end;
- }
- disable_dma(devpriv->dma);
- case INT_TYPE_AI1_INT:
- case INT_TYPE_AI3_INT:
- case INT_TYPE_AI1_FIFO:
- case INT_TYPE_AI3_FIFO:
-#ifdef PCL818_MODE13_AO
- case INT_TYPE_AO1_INT:
- case INT_TYPE_AO3_INT:
-#endif
- outb(inb(dev->iobase + PCL818_CONTROL) & 0x73, dev->iobase + PCL818_CONTROL); /* Stop A/D */
- udelay(1);
- start_pacer(dev, -1, 0, 0);
- outb(0, dev->iobase + PCL818_AD_LO);
- inb(dev->iobase + PCL818_AD_LO);
- inb(dev->iobase + PCL818_AD_HI);
- outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
- outb(0, dev->iobase + PCL818_CONTROL); /* Stop A/D */
- if (devpriv->usefifo) { /* FIFO shutdown */
- outb(0, dev->iobase + PCL818_FI_INTCLR);
- outb(0, dev->iobase + PCL818_FI_FLUSH);
- outb(0, dev->iobase + PCL818_FI_ENABLE);
+ if (!devpriv->ai_cmd_running)
+ return 0;
+
+ if (devpriv->dma) {
+ if (cmd->stop_src == TRIG_NONE ||
+ (cmd->stop_src == TRIG_COUNT && devpriv->ai_act_scan > 0)) {
+ if (!devpriv->ai_cmd_canceled) {
+ /*
+ * Wait for running dma transfer to end,
+ * do cleanup in interrupt.
+ */
+ devpriv->ai_cmd_canceled = 1;
+ return 0;
}
- devpriv->irq_blocked = 0;
- devpriv->last_int_sub = s;
- devpriv->neverending_ai = 0;
- devpriv->ai_mode = 0;
- devpriv->irq_was_now_closed = 0;
- break;
}
+ disable_dma(devpriv->dma);
+ }
+
+ outb(PCL818_CTRL_DISABLE_TRIG, dev->iobase + PCL818_CTRL_REG);
+ pcl818_start_pacer(dev, false);
+ pcl818_ai_clear_eoc(dev);
+
+ if (devpriv->usefifo) { /* FIFO shutdown */
+ outb(0, dev->iobase + PCL818_FI_INTCLR);
+ outb(0, dev->iobase + PCL818_FI_FLUSH);
+ outb(0, dev->iobase + PCL818_FI_ENABLE);
}
+ devpriv->ai_cmd_running = 0;
+ devpriv->ai_cmd_canceled = 0;
-end:
return 0;
}
-/*
-==============================================================================
- chech for PCL818
-*/
-static int pcl818_check(unsigned long iobase)
+static int pcl818_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- outb(0x00, iobase + PCL818_MUX);
- udelay(1);
- if (inb(iobase + PCL818_MUX) != 0x00)
- return 1; /* there isn't card */
- outb(0x55, iobase + PCL818_MUX);
- udelay(1);
- if (inb(iobase + PCL818_MUX) != 0x55)
- return 1; /* there isn't card */
- outb(0x00, iobase + PCL818_MUX);
- udelay(1);
- outb(0x18, iobase + PCL818_CONTROL);
- udelay(1);
- if (inb(iobase + PCL818_CONTROL) != 0x18)
- return 1; /* there isn't card */
- return 0; /* ok, card exist */
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ int ret = 0;
+ int i;
+
+ outb(PCL818_CTRL_SOFT_TRIG, dev->iobase + PCL818_CTRL_REG);
+
+ pcl818_ai_set_chan_range(dev, chan, range);
+ pcl818_ai_set_chan_scan(dev, chan, chan);
+
+ for (i = 0; i < insn->n; i++) {
+ pcl818_ai_clear_eoc(dev);
+ pcl818_ai_soft_trig(dev);
+
+ ret = comedi_timeout(dev, s, insn, pcl818_ai_eoc, 0);
+ if (ret)
+ break;
+
+ data[i] = pcl818_ai_get_sample(dev, s, NULL);
+ }
+ pcl818_ai_clear_eoc(dev);
+
+ return ret ? ret : insn->n;
+}
+
+static int pcl818_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct pcl818_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++) {
+ devpriv->ao_readback[chan] = data[i];
+ outb((data[i] & 0x000f) << 4,
+ dev->iobase + PCL818_AO_LSB_REG(chan));
+ outb((data[i] & 0x0ff0) >> 4,
+ dev->iobase + PCL818_AO_MSB_REG(chan));
+ }
+
+ return insn->n;
+}
+
+static int pcl818_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct pcl818_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
+
+ return insn->n;
+}
+
+static int pcl818_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ data[1] = inb(dev->iobase + PCL818_DO_DI_LSB_REG) |
+ (inb(dev->iobase + PCL818_DO_DI_MSB_REG) << 8);
+
+ return insn->n;
+}
+
+static int pcl818_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ if (comedi_dio_update_state(s, data)) {
+ outb(s->state & 0xff, dev->iobase + PCL818_DO_DI_LSB_REG);
+ outb((s->state >> 8), dev->iobase + PCL818_DO_DI_MSB_REG);
+ }
+
+ data[1] = s->state;
+
+ return insn->n;
}
-/*
-==============================================================================
- reset whole PCL-818 cards
-*/
static void pcl818_reset(struct comedi_device *dev)
{
const struct pcl818_board *board = comedi_board(dev);
- struct pcl818_private *devpriv = dev->private;
+ unsigned long timer_base = dev->iobase + PCL818_TIMER_BASE;
+ unsigned int chan;
- if (devpriv->usefifo) { /* FIFO shutdown */
+ /* flush and disable the FIFO */
+ if (board->has_fifo) {
outb(0, dev->iobase + PCL818_FI_INTCLR);
outb(0, dev->iobase + PCL818_FI_FLUSH);
outb(0, dev->iobase + PCL818_FI_ENABLE);
}
- outb(0, dev->iobase + PCL818_DA_LO); /* DAC=0V */
- outb(0, dev->iobase + PCL818_DA_HI);
- udelay(1);
- outb(0, dev->iobase + PCL818_DO_HI); /* DO=$0000 */
- outb(0, dev->iobase + PCL818_DO_LO);
- udelay(1);
- outb(0, dev->iobase + PCL818_CONTROL);
- outb(0, dev->iobase + PCL818_CNTENABLE);
- outb(0, dev->iobase + PCL818_MUX);
- outb(0, dev->iobase + PCL818_CLRINT);
- outb(0xb0, dev->iobase + PCL818_CTRCTL); /* Stop pacer */
- outb(0x70, dev->iobase + PCL818_CTRCTL);
- outb(0x30, dev->iobase + PCL818_CTRCTL);
+
+ /* disable analog input trigger */
+ outb(PCL818_CTRL_DISABLE_TRIG, dev->iobase + PCL818_CTRL_REG);
+ pcl818_ai_clear_eoc(dev);
+
+ pcl818_ai_set_chan_range(dev, 0, 0);
+
+ /* stop pacer */
+ outb(PCL818_CNTENABLE_PACER_ENA, dev->iobase + PCL818_CNTENABLE_REG);
+ i8254_set_mode(timer_base, 0, 2, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 1, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 0, I8254_MODE0 | I8254_BINARY);
+
+ /* set analog output channels to 0V */
+ for (chan = 0; chan < board->n_aochan; chan++) {
+ outb(0, dev->iobase + PCL818_AO_LSB_REG(chan));
+ outb(0, dev->iobase + PCL818_AO_MSB_REG(chan));
+ }
+
+ /* set all digital outputs low */
+ outb(0, dev->iobase + PCL818_DO_DI_MSB_REG);
+ outb(0, dev->iobase + PCL818_DO_DI_LSB_REG);
+}
+
+static void pcl818_set_ai_range_table(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_devconfig *it)
+{
+ const struct pcl818_board *board = comedi_board(dev);
+
+ /* default to the range table from the boardinfo */
+ s->range_table = board->ai_range_type;
+
+ /* now check the user config option based on the boardtype */
if (board->is_818) {
- outb(0, dev->iobase + PCL818_RANGE);
+ if (it->options[4] == 1 || it->options[4] == 10) {
+ /* secondary range list jumper selectable */
+ s->range_table = &range_pcl818l_h_ai;
+ }
} else {
- outb(0, dev->iobase + PCL718_DA2_LO);
- outb(0, dev->iobase + PCL718_DA2_HI);
+ switch (it->options[4]) {
+ case 0:
+ s->range_table = &range_bipolar10;
+ break;
+ case 1:
+ s->range_table = &range_bipolar5;
+ break;
+ case 2:
+ s->range_table = &range_bipolar2_5;
+ break;
+ case 3:
+ s->range_table = &range718_bipolar1;
+ break;
+ case 4:
+ s->range_table = &range718_bipolar0_5;
+ break;
+ case 6:
+ s->range_table = &range_unipolar10;
+ break;
+ case 7:
+ s->range_table = &range_unipolar5;
+ break;
+ case 8:
+ s->range_table = &range718_unipolar2;
+ break;
+ case 9:
+ s->range_table = &range718_unipolar1;
+ break;
+ default:
+ s->range_table = &range_unknown;
+ break;
+ }
}
}
@@ -1210,154 +1086,96 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl818_board *board = comedi_board(dev);
struct pcl818_private *devpriv;
- int ret;
- int dma;
- unsigned long pages;
struct comedi_subdevice *s;
+ int ret;
+ int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- devpriv->io_range = board->io_range;
- if ((board->fifo) && (it->options[2] == -1)) {
- /* we've board with FIFO and we want to use FIFO */
- devpriv->io_range = PCLx1xFIFO_RANGE;
- devpriv->usefifo = 1;
- }
- ret = comedi_request_region(dev, it->options[0], devpriv->io_range);
+ ret = comedi_request_region(dev, it->options[0],
+ board->has_fifo ? 0x20 : 0x10);
if (ret)
return ret;
- if (pcl818_check(dev->iobase)) {
- comedi_error(dev, "I can't detect board. FAIL!\n");
- return -EIO;
- }
-
- if ((1 << it->options[1]) & board->IRQbits) {
- ret = request_irq(it->options[1], interrupt_pcl818, 0,
+ /* we can use IRQ 2-7 for async command support */
+ if (it->options[1] >= 2 && it->options[1] <= 7) {
+ ret = request_irq(it->options[1], pcl818_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
- devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
- devpriv->ai_mode = 0; /* mode of irq */
-
- /* grab our DMA */
- dma = 0;
- devpriv->dma = dma;
- if (!dev->irq)
- goto no_dma; /* if we haven't IRQ, we can't use DMA */
- if (board->DMAbits != 0) { /* board support DMA */
- dma = it->options[2];
- if (dma < 1)
- goto no_dma; /* DMA disabled */
- if (((1 << dma) & board->DMAbits) == 0) {
+ /* should we use the FIFO? */
+ if (dev->irq && board->has_fifo && it->options[2] == -1)
+ devpriv->usefifo = 1;
+
+ /* we need an IRQ to do DMA on channel 3 or 1 */
+ if (dev->irq && board->has_dma &&
+ (it->options[2] == 3 || it->options[2] == 1)) {
+ ret = request_dma(it->options[2], dev->board_name);
+ if (ret) {
dev_err(dev->class_dev,
- "DMA is out of allowed range, FAIL!\n");
- return -EINVAL; /* Bad DMA */
- }
- ret = request_dma(dma, dev->board_name);
- if (ret)
- return -EBUSY; /* DMA isn't free */
- devpriv->dma = dma;
- pages = 2; /* we need 16KB */
- devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[0])
- /* maybe experiment with try_to_free_pages() will help .... */
- return -EBUSY; /* no buffer :-( */
- devpriv->dmapages[0] = pages;
- devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
- devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
- devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[1])
+ "unable to request DMA channel %d\n",
+ it->options[2]);
return -EBUSY;
- devpriv->dmapages[1] = pages;
- devpriv->hwdmaptr[1] = virt_to_bus((void *)devpriv->dmabuf[1]);
- devpriv->hwdmasize[1] = (1 << pages) * PAGE_SIZE;
- }
+ }
+ devpriv->dma = it->options[2];
+
+ devpriv->dmapages = 2; /* we need 16KB */
+ devpriv->hwdmasize = (1 << devpriv->dmapages) * PAGE_SIZE;
-no_dma:
+ for (i = 0; i < 2; i++) {
+ unsigned long dmabuf;
+
+ dmabuf = __get_dma_pages(GFP_KERNEL, devpriv->dmapages);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ devpriv->dmabuf[i] = dmabuf;
+ devpriv->hwdmaptr[i] = virt_to_bus((void *)dmabuf);
+ }
+ }
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
s = &dev->subdevices[0];
- if (!board->n_aichan_se) {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE;
+ if (check_single_ended(dev->iobase)) {
+ s->n_chan = 16;
+ s->subdev_flags |= SDF_COMMON | SDF_GROUND;
} else {
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE;
- if (check_single_ended(dev->iobase)) {
- s->n_chan = board->n_aichan_se;
- s->subdev_flags |= SDF_COMMON | SDF_GROUND;
- } else {
- s->n_chan = board->n_aichan_diff;
- s->subdev_flags |= SDF_DIFF;
- }
- s->maxdata = board->ai_maxdata;
- s->range_table = board->ai_range_type;
- s->insn_read = pcl818_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = ai_cmdtest;
- s->do_cmd = ai_cmd;
- s->cancel = pcl818_ai_cancel;
- }
- if (board->is_818) {
- if ((it->options[4] == 1) || (it->options[4] == 10))
- s->range_table = &range_pcl818l_h_ai; /* secondary range list jumper selectable */
- } else {
- switch (it->options[4]) {
- case 0:
- s->range_table = &range_bipolar10;
- break;
- case 1:
- s->range_table = &range_bipolar5;
- break;
- case 2:
- s->range_table = &range_bipolar2_5;
- break;
- case 3:
- s->range_table = &range718_bipolar1;
- break;
- case 4:
- s->range_table = &range718_bipolar0_5;
- break;
- case 6:
- s->range_table = &range_unipolar10;
- break;
- case 7:
- s->range_table = &range_unipolar5;
- break;
- case 8:
- s->range_table = &range718_unipolar2;
- break;
- case 9:
- s->range_table = &range718_unipolar1;
- break;
- default:
- s->range_table = &range_unknown;
- break;
- }
- }
+ s->n_chan = 8;
+ s->subdev_flags |= SDF_DIFF;
+ }
+ s->maxdata = 0x0fff;
+
+ pcl818_set_ai_range_table(dev, s, it);
+
+ s->insn_read = pcl818_ai_insn_read;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmdtest = ai_cmdtest;
+ s->do_cmd = pcl818_ai_cmd;
+ s->cancel = pcl818_ai_cancel;
}
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- if (!board->n_aochan) {
- s->type = COMEDI_SUBD_UNUSED;
- } else {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = board->n_aochan;
- s->maxdata = board->ao_maxdata;
- s->range_table = board->ao_range_type;
- s->insn_read = pcl818_ao_insn_read;
- s->insn_write = pcl818_ao_insn_write;
+ if (board->n_aochan) {
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
+ s->n_chan = board->n_aochan;
+ s->maxdata = 0x0fff;
+ s->range_table = &range_unipolar5;
+ s->insn_read = pcl818_ao_insn_read;
+ s->insn_write = pcl818_ao_insn_write;
if (board->is_818) {
if ((it->options[4] == 1) || (it->options[4] == 10))
s->range_table = &range_unipolar10;
@@ -1369,31 +1187,27 @@ no_dma:
if (it->options[5] == 2)
s->range_table = &range_unknown;
}
- }
-
- s = &dev->subdevices[2];
- if (!board->n_dichan) {
- s->type = COMEDI_SUBD_UNUSED;
} else {
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = board->n_dichan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl818_di_insn_bits;
+ s->type = COMEDI_SUBD_UNUSED;
}
+ /* Digital Input subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl818_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[3];
- if (!board->n_dochan) {
- s->type = COMEDI_SUBD_UNUSED;
- } else {
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_dochan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl818_do_insn_bits;
- }
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl818_do_insn_bits;
/* select 1/10MHz oscilator */
if ((it->options[3] == 0) || (it->options[3] == 10))
@@ -1424,38 +1238,13 @@ static void pcl818_detach(struct comedi_device *dev)
if (devpriv->dma)
free_dma(devpriv->dma);
if (devpriv->dmabuf[0])
- free_pages(devpriv->dmabuf[0], devpriv->dmapages[0]);
+ free_pages(devpriv->dmabuf[0], devpriv->dmapages);
if (devpriv->dmabuf[1])
- free_pages(devpriv->dmabuf[1], devpriv->dmapages[1]);
+ free_pages(devpriv->dmabuf[1], devpriv->dmapages);
}
comedi_legacy_detach(dev);
}
-static const struct pcl818_board boardtypes[] = {
- {"pcl818l", 4, 16, 8, 25000, 1, 16, 16, &range_pcl818l_l_ai,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 0, 1},
- {"pcl818h", 9, 16, 8, 10000, 1, 16, 16, &range_pcl818h_ai,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 0, 1},
- {"pcl818hd", 9, 16, 8, 10000, 1, 16, 16, &range_pcl818h_ai,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 1, 1},
- {"pcl818hg", 12, 16, 8, 10000, 1, 16, 16, &range_pcl818hg_ai,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 1, 1},
- {"pcl818", 9, 16, 8, 10000, 2, 16, 16, &range_pcl818h_ai,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 0, 1},
- {"pcl718", 1, 16, 8, 16000, 2, 16, 16, &range_unipolar5,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 0, 0},
- /* pcm3718 */
- {"pcm3718", 9, 16, 8, 10000, 0, 16, 16, &range_pcl818h_ai,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 0, 1 /* XXX ? */ },
-};
-
static struct comedi_driver pcl818_driver = {
.driver_name = "pcl818",
.module = THIS_MODULE,
diff --git a/drivers/staging/comedi/drivers/pcm3724.c b/drivers/staging/comedi/drivers/pcm3724.c
index f4a49bd649f0..53e73737a906 100644
--- a/drivers/staging/comedi/drivers/pcm3724.c
+++ b/drivers/staging/comedi/drivers/pcm3724.c
@@ -225,8 +225,10 @@ static int pcm3724_attach(struct comedi_device *dev,
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
- subdev_8255_init(dev, s, subdev_8255_cb,
- (unsigned long)(dev->iobase + SIZE_8255 * i));
+ ret = subdev_8255_init(dev, s, subdev_8255_cb,
+ dev->iobase + SIZE_8255 * i);
+ if (ret)
+ return ret;
s->insn_config = subdev_3724_insn_config;
}
return 0;
diff --git a/drivers/staging/comedi/drivers/pcmad.c b/drivers/staging/comedi/drivers/pcmad.c
index fe482fdd512e..87c61d9b11da 100644
--- a/drivers/staging/comedi/drivers/pcmad.c
+++ b/drivers/staging/comedi/drivers/pcmad.c
@@ -61,18 +61,17 @@ static const struct pcmad_board_struct pcmad_boards[] = {
},
};
-#define TIMEOUT 100
-
-static int pcmad_ai_wait_for_eoc(struct comedi_device *dev,
- int timeout)
+static int pcmad_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- int i;
+ unsigned int status;
- for (i = 0; i < timeout; i++) {
- if ((inb(dev->iobase + PCMAD_STATUS) & 0x3) == 0x3)
- return 0;
- }
- return -ETIME;
+ status = inb(dev->iobase + PCMAD_STATUS);
+ if ((status & 0x3) == 0x3)
+ return 0;
+ return -EBUSY;
}
static int pcmad_ai_insn_read(struct comedi_device *dev,
@@ -89,7 +88,7 @@ static int pcmad_ai_insn_read(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
outb(chan, dev->iobase + PCMAD_CONVERT);
- ret = pcmad_ai_wait_for_eoc(dev, TIMEOUT);
+ ret = comedi_timeout(dev, s, insn, pcmad_ai_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index c388f7f32227..e89bca845349 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -589,16 +589,17 @@ static int pcmmio_cmdtest(struct comedi_device *dev,
return 0;
}
-static int pcmmio_ai_wait_for_eoc(unsigned long iobase, unsigned int timeout)
+static int pcmmio_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
unsigned char status;
- while (timeout--) {
- status = inb(iobase + PCMMIO_AI_STATUS_REG);
- if (status & PCMMIO_AI_STATUS_DATA_READY)
- return 0;
- }
- return -ETIME;
+ status = inb(dev->iobase + PCMMIO_AI_STATUS_REG);
+ if (status & PCMMIO_AI_STATUS_DATA_READY)
+ return 0;
+ return -EBUSY;
}
static int pcmmio_ai_insn_read(struct comedi_device *dev,
@@ -643,7 +644,8 @@ static int pcmmio_ai_insn_read(struct comedi_device *dev,
cmd |= PCMMIO_AI_CMD_RANGE(range);
outb(cmd, iobase + PCMMIO_AI_CMD_REG);
- ret = pcmmio_ai_wait_for_eoc(iobase, 100000);
+
+ ret = comedi_timeout(dev, s, insn, pcmmio_ai_eoc, 0);
if (ret)
return ret;
@@ -652,7 +654,8 @@ static int pcmmio_ai_insn_read(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
outb(cmd, iobase + PCMMIO_AI_CMD_REG);
- ret = pcmmio_ai_wait_for_eoc(iobase, 100000);
+
+ ret = comedi_timeout(dev, s, insn, pcmmio_ai_eoc, 0);
if (ret)
return ret;
@@ -684,16 +687,17 @@ static int pcmmio_ao_insn_read(struct comedi_device *dev,
return insn->n;
}
-static int pcmmio_ao_wait_for_eoc(unsigned long iobase, unsigned int timeout)
+static int pcmmio_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
unsigned char status;
- while (timeout--) {
- status = inb(iobase + PCMMIO_AO_STATUS_REG);
- if (status & PCMMIO_AO_STATUS_DATA_READY)
- return 0;
- }
- return -ETIME;
+ status = inb(dev->iobase + PCMMIO_AO_STATUS_REG);
+ if (status & PCMMIO_AO_STATUS_DATA_READY)
+ return 0;
+ return -EBUSY;
}
static int pcmmio_ao_insn_write(struct comedi_device *dev,
@@ -726,7 +730,8 @@ static int pcmmio_ao_insn_write(struct comedi_device *dev,
outb(PCMMIO_AO_LSB_SPAN(range), iobase + PCMMIO_AO_LSB_REG);
outb(0, iobase + PCMMIO_AO_MSB_REG);
outb(cmd | PCMMIO_AO_CMD_WR_SPAN_UPDATE, iobase + PCMMIO_AO_CMD_REG);
- ret = pcmmio_ao_wait_for_eoc(iobase, 100000);
+
+ ret = comedi_timeout(dev, s, insn, pcmmio_ao_eoc, 0);
if (ret)
return ret;
@@ -738,7 +743,8 @@ static int pcmmio_ao_insn_write(struct comedi_device *dev,
outb((val >> 8) & 0xff, iobase + PCMMIO_AO_MSB_REG);
outb(cmd | PCMMIO_AO_CMD_WR_CODE_UPDATE,
iobase + PCMMIO_AO_CMD_REG);
- ret = pcmmio_ao_wait_for_eoc(iobase, 100000);
+
+ ret = comedi_timeout(dev, s, insn, pcmmio_ao_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index 55e3c2e2bc52..25706531b885 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -29,13 +29,13 @@
/* descriptor block used for chained dma transfers */
struct plx_dma_desc {
- volatile uint32_t pci_start_addr;
- volatile uint32_t local_start_addr;
+ __le32 pci_start_addr;
+ __le32 local_start_addr;
/* transfer_size is in bytes, only first 23 bits of register are used */
- volatile uint32_t transfer_size;
+ __le32 transfer_size;
/* address of next descriptor (quad word aligned), plus some
* additional bits (see PLX_DMA0_DESCRIPTOR_REG) */
- volatile uint32_t next;
+ __le32 next;
};
/**********************************************************************
diff --git a/drivers/staging/comedi/drivers/poc.c b/drivers/staging/comedi/drivers/poc.c
deleted file mode 100644
index 2ae4ee15704c..000000000000
--- a/drivers/staging/comedi/drivers/poc.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- comedi/drivers/poc.c
- Mini-drivers for POC (Piece of Crap) boards
- Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
- Copyright (C) 2001 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: poc
-Description: Generic driver for very simple devices
-Author: ds
-Devices: [Keithley Metrabyte] DAC-02 (dac02)
-Updated: Sat, 16 Mar 2002 17:34:48 -0800
-Status: unknown
-
-This driver is indended to support very simple ISA-based devices,
-including:
- dac02 - Keithley DAC-02 analog output board
-
-Configuration options:
- [0] - I/O port base
-*/
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-struct boarddef_struct {
- const char *name;
- unsigned int iosize;
- int (*setup) (struct comedi_device *);
- int type;
- int n_chan;
- int n_bits;
- int (*winsn) (struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*rinsn) (struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*insnbits) (struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- const struct comedi_lrange *range;
-};
-
-struct poc_private {
- unsigned int ao_readback[32];
-};
-
-static int readback_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct poc_private *devpriv = dev->private;
- int chan;
-
- chan = CR_CHAN(insn->chanspec);
- data[0] = devpriv->ao_readback[chan];
-
- return 1;
-}
-
-/* DAC-02 registers */
-#define DAC02_LSB(a) (2 * a)
-#define DAC02_MSB(a) (2 * a + 1)
-
-static int dac02_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct poc_private *devpriv = dev->private;
- int temp;
- int chan;
- int output;
-
- chan = CR_CHAN(insn->chanspec);
- devpriv->ao_readback[chan] = data[0];
- output = data[0];
-#ifdef wrong
- /* convert to complementary binary if range is bipolar */
- if ((CR_RANGE(insn->chanspec) & 0x2) == 0)
- output = ~output;
-#endif
- temp = (output << 4) & 0xf0;
- outb(temp, dev->iobase + DAC02_LSB(chan));
- temp = (output >> 4) & 0xff;
- outb(temp, dev->iobase + DAC02_MSB(chan));
-
- return 1;
-}
-
-static int poc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct boarddef_struct *board = comedi_board(dev);
- struct poc_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], board->iosize);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- /* analog output subdevice */
- s = &dev->subdevices[0];
- s->type = board->type;
- s->n_chan = board->n_chan;
- s->maxdata = (1 << board->n_bits) - 1;
- s->range_table = board->range;
- s->insn_write = board->winsn;
- s->insn_read = board->rinsn;
- s->insn_bits = board->insnbits;
- if (s->type == COMEDI_SUBD_AO || s->type == COMEDI_SUBD_DO)
- s->subdev_flags = SDF_WRITABLE;
-
- return 0;
-}
-
-static const struct boarddef_struct boards[] = {
- {
- .name = "dac02",
- .iosize = 8,
- /* .setup = dac02_setup, */
- .type = COMEDI_SUBD_AO,
- .n_chan = 2,
- .n_bits = 12,
- .winsn = dac02_ao_winsn,
- .rinsn = readback_insn,
- .range = &range_unknown,
- },
-};
-
-static struct comedi_driver poc_driver = {
- .driver_name = "poc",
- .module = THIS_MODULE,
- .attach = poc_attach,
- .detach = comedi_legacy_detach,
- .board_name = &boards[0].name,
- .num_names = ARRAY_SIZE(boards),
- .offset = sizeof(boards[0]),
-};
-module_comedi_driver(poc_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 96a46954b3c0..298dba03f902 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -214,7 +214,6 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
s->async->events |=
COMEDI_CB_EOA | COMEDI_CB_OVERFLOW;
dev_warn(dev->class_dev, "data lost\n");
- daqp_ai_cancel(dev, s);
break;
}
@@ -231,7 +230,6 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
if (devpriv->count > 0) {
devpriv->count--;
if (devpriv->count == 0) {
- daqp_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
break;
}
@@ -244,13 +242,12 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
if (loop_limit <= 0) {
dev_warn(dev->class_dev,
"loop_limit reached in daqp_interrupt()\n");
- daqp_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
}
s->async->events |= COMEDI_CB_BLOCK;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
return IRQ_HANDLED;
}
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 0f026afde9be..cd3fdf973bdd 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -237,20 +237,6 @@
/* The board support a channel list up to the FIFO length (1K or 8K) */
#define RTD_MAX_CHANLIST 128 /* max channel list that we allow */
-/* tuning for ai/ao instruction done polling */
-#ifdef FAST_SPIN
-#define WAIT_QUIETLY /* as nothing, spin on done bit */
-#define RTD_ADC_TIMEOUT 66000 /* 2 msec at 33mhz bus rate */
-#define RTD_DAC_TIMEOUT 66000
-#define RTD_DMA_TIMEOUT 33000 /* 1 msec */
-#else
-/* by delaying, power and electrical noise are reduced somewhat */
-#define WAIT_QUIETLY udelay(1)
-#define RTD_ADC_TIMEOUT 2000 /* in usec */
-#define RTD_DAC_TIMEOUT 2000 /* in usec */
-#define RTD_DMA_TIMEOUT 1000 /* in usec */
-#endif
-
/*======================================================================
Board specific stuff
======================================================================*/
@@ -562,21 +548,27 @@ static int rtd520_probe_fifo_depth(struct comedi_device *dev)
return fifo_size;
}
-/*
- "instructions" read/write data in "one-shot" or "software-triggered"
- mode (simplest case).
- This doesn't use interrupts.
+static int rtd_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct rtd_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readl(devpriv->las0 + LAS0_ADC);
+ if (status & FS_ADC_NOT_EMPTY)
+ return 0;
+ return -EBUSY;
+}
- Note, we don't do any settling delays. Use a instruction list to
- select, delay, then read.
- */
static int rtd_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
struct rtd_private *devpriv = dev->private;
- int n, ii;
- int stat;
+ int ret;
+ int n;
/* clear any old fifo data */
writel(0, devpriv->las0 + LAS0_ADC_FIFO_CLEAR);
@@ -593,14 +585,9 @@ static int rtd_ai_rinsn(struct comedi_device *dev,
/* trigger conversion */
writew(0, devpriv->las0 + LAS0_ADC);
- for (ii = 0; ii < RTD_ADC_TIMEOUT; ++ii) {
- stat = readl(devpriv->las0 + LAS0_ADC);
- if (stat & FS_ADC_NOT_EMPTY) /* 1 -> not empty */
- break;
- WAIT_QUIETLY;
- }
- if (ii >= RTD_ADC_TIMEOUT)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, rtd_ai_eoc, 0);
+ if (ret)
+ return ret;
/* read data */
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
@@ -1116,9 +1103,22 @@ static int rtd_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
-/*
- Output one (or more) analog values to a single port as fast as possible.
-*/
+static int rtd_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct rtd_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int bit = (chan == 0) ? FS_DAC1_NOT_EMPTY : FS_DAC2_NOT_EMPTY;
+ unsigned int status;
+
+ status = readl(devpriv->las0 + LAS0_ADC);
+ if (status & bit)
+ return 0;
+ return -EBUSY;
+}
+
static int rtd_ao_winsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
@@ -1127,6 +1127,7 @@ static int rtd_ao_winsn(struct comedi_device *dev,
int i;
int chan = CR_CHAN(insn->chanspec);
int range = CR_RANGE(insn->chanspec);
+ int ret;
/* Configure the output range (table index matches the range values) */
writew(range & 7, devpriv->las0 +
@@ -1136,8 +1137,6 @@ static int rtd_ao_winsn(struct comedi_device *dev,
* very useful, but that's how the interface is defined. */
for (i = 0; i < insn->n; ++i) {
int val = data[i] << 3;
- int stat = 0; /* initialize to avoid bogus warning */
- int ii;
/* VERIFY: comedi range and offset conversions */
@@ -1157,16 +1156,9 @@ static int rtd_ao_winsn(struct comedi_device *dev,
devpriv->ao_readback[chan] = data[i];
- for (ii = 0; ii < RTD_DAC_TIMEOUT; ++ii) {
- stat = readl(devpriv->las0 + LAS0_ADC);
- /* 1 -> not empty */
- if (stat & ((0 == chan) ? FS_DAC1_NOT_EMPTY :
- FS_DAC2_NOT_EMPTY))
- break;
- WAIT_QUIETLY;
- }
- if (ii >= RTD_DAC_TIMEOUT)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, rtd_ao_eoc, 0);
+ if (ret)
+ return ret;
}
/* return the number of samples read/written */
@@ -1382,8 +1374,6 @@ static int rtd_auto_attach(struct comedi_device *dev,
if (dev->irq)
writel(ICS_PIE | ICS_PLIE, devpriv->lcfg + PLX_INTRCS_REG);
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/rti800.c b/drivers/staging/comedi/drivers/rti800.c
index e1f3671ac056..bd447b2add7b 100644
--- a/drivers/staging/comedi/drivers/rti800.c
+++ b/drivers/staging/comedi/drivers/rti800.c
@@ -83,8 +83,6 @@
#define RTI800_IOSIZE 0x10
-#define RTI800_AI_TIMEOUT 100
-
static const struct comedi_lrange range_rti800_ai_10_bipolar = {
4, {
BIP_RANGE(10),
@@ -145,23 +143,21 @@ struct rti800_private {
unsigned char muxgain_bits;
};
-static int rti800_ai_wait_for_conversion(struct comedi_device *dev,
- int timeout)
+static int rti800_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
unsigned char status;
- int i;
- for (i = 0; i < timeout; i++) {
- status = inb(dev->iobase + RTI800_CSR);
- if (status & RTI800_CSR_OVERRUN) {
- outb(0, dev->iobase + RTI800_CLRFLAGS);
- return -EIO;
- }
- if (status & RTI800_CSR_DONE)
- return 0;
- udelay(1);
+ status = inb(dev->iobase + RTI800_CSR);
+ if (status & RTI800_CSR_OVERRUN) {
+ outb(0, dev->iobase + RTI800_CLRFLAGS);
+ return -EOVERFLOW;
}
- return -ETIME;
+ if (status & RTI800_CSR_DONE)
+ return 0;
+ return -EBUSY;
}
static int rti800_ai_insn_read(struct comedi_device *dev,
@@ -198,7 +194,8 @@ static int rti800_ai_insn_read(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
outb(0, dev->iobase + RTI800_CONVERT);
- ret = rti800_ai_wait_for_conversion(dev, RTI800_AI_TIMEOUT);
+
+ ret = comedi_timeout(dev, s, insn, rti800_ai_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/rti802.c b/drivers/staging/comedi/drivers/rti802.c
index a3fa2a4baef4..605a31d702e0 100644
--- a/drivers/staging/comedi/drivers/rti802.c
+++ b/drivers/staging/comedi/drivers/rti802.c
@@ -1,45 +1,44 @@
/*
- comedi/drivers/rti802.c
- Hardware driver for Analog Devices RTI-802 board
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * rti802.c
+ * Comedi driver for Analog Devices RTI-802 board
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
+
/*
-Driver: rti802
-Description: Analog Devices RTI-802
-Author: Anders Blomdell <anders.blomdell@control.lth.se>
-Devices: [Analog Devices] RTI-802 (rti802)
-Status: works
-
-Configuration Options:
- [0] - i/o base
- [1] - unused
- [2] - dac#0 0=two's comp, 1=straight
- [3] - dac#0 0=bipolar, 1=unipolar
- [4] - dac#1 ...
- ...
- [17] - dac#7 ...
-*/
+ * Driver: rti802
+ * Description: Analog Devices RTI-802
+ * Author: Anders Blomdell <anders.blomdell@control.lth.se>
+ * Devices: (Analog Devices) RTI-802 [rti802]
+ * Status: works
+ *
+ * Configuration Options:
+ * [0] - i/o base
+ * [1] - unused
+ * [2,4,6,8,10,12,14,16] - dac#[0-7] 0=two's comp, 1=straight
+ * [3,5,7,9,11,13,15,17] - dac#[0-7] 0=bipolar, 1=unipolar
+ */
#include <linux/module.h>
#include "../comedidev.h"
-#define RTI802_SIZE 4
-
-#define RTI802_SELECT 0
-#define RTI802_DATALOW 1
-#define RTI802_DATAHIGH 2
+/*
+ * Register I/O map
+ */
+#define RTI802_SELECT 0x00
+#define RTI802_DATALOW 0x01
+#define RTI802_DATAHIGH 0x02
struct rti802_private {
enum {
@@ -51,34 +50,45 @@ struct rti802_private {
static int rti802_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct rti802_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
int i;
for (i = 0; i < insn->n; i++)
- data[i] = devpriv->ao_readback[CR_CHAN(insn->chanspec)];
+ data[i] = devpriv->ao_readback[chan];
- return i;
+ return insn->n;
}
static int rti802_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct rti802_private *devpriv = dev->private;
- int i, d;
- int chan = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
+ int i;
+
+ outb(chan, dev->iobase + RTI802_SELECT);
for (i = 0; i < insn->n; i++) {
- d = devpriv->ao_readback[chan] = data[i];
+ val = data[i];
+
+ devpriv->ao_readback[chan] = val;
+
+ /* munge offset binary to two's complement if needed */
if (devpriv->dac_coding[chan] == dac_2comp)
- d ^= 0x800;
- outb(chan, dev->iobase + RTI802_SELECT);
- outb(d & 0xff, dev->iobase + RTI802_DATALOW);
- outb(d >> 8, dev->iobase + RTI802_DATAHIGH);
+ val = comedi_offset_munge(s, val);
+
+ outb(val & 0xff, dev->iobase + RTI802_DATALOW);
+ outb((val >> 8) & 0xff, dev->iobase + RTI802_DATAHIGH);
}
- return i;
+
+ return insn->n;
}
static int rti802_attach(struct comedi_device *dev, struct comedi_devconfig *it)
@@ -88,7 +98,7 @@ static int rti802_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int i;
int ret;
- ret = comedi_request_region(dev, it->options[0], RTI802_SIZE);
+ ret = comedi_request_region(dev, it->options[0], 0x04);
if (ret)
return ret;
@@ -100,22 +110,21 @@ static int rti802_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
+ /* Analog Output subdevice */
s = &dev->subdevices[0];
- /* ao subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->maxdata = 0xfff;
- s->n_chan = 8;
- s->insn_read = rti802_ao_insn_read;
- s->insn_write = rti802_ao_insn_write;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->maxdata = 0xfff;
+ s->n_chan = 8;
+ s->insn_read = rti802_ao_insn_read;
+ s->insn_write = rti802_ao_insn_write;
s->range_table_list = devpriv->range_type_list;
for (i = 0; i < 8; i++) {
devpriv->dac_coding[i] = (it->options[3 + 2 * i])
- ? (dac_straight)
- : (dac_2comp);
+ ? (dac_straight) : (dac_2comp);
devpriv->range_type_list[i] = (it->options[2 + 2 * i])
- ? &range_unipolar10 : &range_bipolar10;
+ ? &range_unipolar10 : &range_bipolar10;
}
return 0;
@@ -130,5 +139,5 @@ static struct comedi_driver rti802_driver = {
module_comedi_driver(rti802_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Analog Devices RTI-802 board");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index 9950f59b1192..85d2b7a3c125 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -420,15 +420,28 @@ static int s526_ai_insn_config(struct comedi_device *dev,
return result;
}
+static int s526_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + REG_ISR);
+ if (status & ISR_ADC_DONE)
+ return 0;
+ return -EBUSY;
+}
+
static int s526_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct s526_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
- int n, i;
+ int n;
unsigned short value;
unsigned int d;
- unsigned int status;
+ int ret;
/* Set configured delay, enable channel for this channel only,
* select "ADC read" channel, set "ADC start" bit. */
@@ -440,17 +453,12 @@ static int s526_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
/* trigger conversion */
outw(value, dev->iobase + REG_ADC);
-#define TIMEOUT 100
/* wait for conversion to end */
- for (i = 0; i < TIMEOUT; i++) {
- status = inw(dev->iobase + REG_ISR);
- if (status & ISR_ADC_DONE) {
- outw(ISR_ADC_DONE, dev->iobase + REG_ISR);
- break;
- }
- }
- if (i == TIMEOUT)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, s526_ai_eoc, 0);
+ if (ret)
+ return ret;
+
+ outw(ISR_ADC_DONE, dev->iobase + REG_ISR);
/* read data */
d = inw(dev->iobase + REG_ADD);
@@ -604,7 +612,7 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->insn_bits = s526_dio_insn_bits;
s->insn_config = s526_dio_insn_config;
- return 1;
+ return 0;
}
static struct comedi_driver s526_driver = {
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 19da1dbea494..95fadf343f27 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -106,16 +106,16 @@ struct s626_private {
struct s626_enc_info {
/* Pointers to functions that differ for A and B counters: */
/* Return clock enable. */
- uint16_t(*get_enable)(struct comedi_device *dev,
+ uint16_t (*get_enable)(struct comedi_device *dev,
const struct s626_enc_info *k);
/* Return interrupt source. */
- uint16_t(*get_int_src)(struct comedi_device *dev,
+ uint16_t (*get_int_src)(struct comedi_device *dev,
const struct s626_enc_info *k);
/* Return preload trigger source. */
- uint16_t(*get_load_trig)(struct comedi_device *dev,
+ uint16_t (*get_load_trig)(struct comedi_device *dev,
const struct s626_enc_info *k);
/* Return standardized operating mode. */
- uint16_t(*get_mode)(struct comedi_device *dev,
+ uint16_t (*get_mode)(struct comedi_device *dev,
const struct s626_enc_info *k);
/* Generate soft index strobe. */
void (*pulse_index)(struct comedi_device *dev,
@@ -209,6 +209,8 @@ static const struct comedi_lrange s626_range_table = {
static void s626_debi_transfer(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
+ static const int timeout = 10000;
+ int i;
/* Initiate upload of shadow RAM to DEBI control register */
s626_mc_enable(dev, S626_MC2_UPLD_DEBI, S626_P_MC2);
@@ -217,12 +219,23 @@ static void s626_debi_transfer(struct comedi_device *dev)
* Wait for completion of upload from shadow RAM to
* DEBI control register.
*/
- while (!s626_mc_test(dev, S626_MC2_UPLD_DEBI, S626_P_MC2))
- ;
+ for (i = 0; i < timeout; i++) {
+ if (s626_mc_test(dev, S626_MC2_UPLD_DEBI, S626_P_MC2))
+ break;
+ udelay(1);
+ }
+ if (i == timeout)
+ comedi_error(dev,
+ "Timeout while uploading to DEBI control register.");
/* Wait until DEBI transfer is done */
- while (readl(devpriv->mmio + S626_P_PSR) & S626_PSR_DEBI_S)
- ;
+ for (i = 0; i < timeout; i++) {
+ if (!(readl(devpriv->mmio + S626_P_PSR) & S626_PSR_DEBI_S))
+ break;
+ udelay(1);
+ }
+ if (i == timeout)
+ comedi_error(dev, "DEBI transfer timeout.");
}
/*
@@ -351,14 +364,57 @@ static const uint8_t s626_trimadrs[] = {
0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63
};
+enum {
+ s626_send_dac_wait_not_mc1_a2out,
+ s626_send_dac_wait_ssr_af2_out,
+ s626_send_dac_wait_fb_buffer2_msb_00,
+ s626_send_dac_wait_fb_buffer2_msb_ff
+};
+
+static int s626_send_dac_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct s626_private *devpriv = dev->private;
+ unsigned int status;
+
+ switch (context) {
+ case s626_send_dac_wait_not_mc1_a2out:
+ status = readl(devpriv->mmio + S626_P_MC1);
+ if (!(status & S626_MC1_A2OUT))
+ return 0;
+ break;
+ case s626_send_dac_wait_ssr_af2_out:
+ status = readl(devpriv->mmio + S626_P_SSR);
+ if (status & S626_SSR_AF2_OUT)
+ return 0;
+ break;
+ case s626_send_dac_wait_fb_buffer2_msb_00:
+ status = readl(devpriv->mmio + S626_P_FB_BUFFER2);
+ if (!(status & 0xff000000))
+ return 0;
+ break;
+ case s626_send_dac_wait_fb_buffer2_msb_ff:
+ status = readl(devpriv->mmio + S626_P_FB_BUFFER2);
+ if (status & 0xff000000)
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return -EBUSY;
+}
+
/*
* Private helper function: Transmit serial data to DAC via Audio
* channel 2. Assumes: (1) TSL2 slot records initialized, and (2)
* dacpol contains valid target image.
*/
-static void s626_send_dac(struct comedi_device *dev, uint32_t val)
+static int s626_send_dac(struct comedi_device *dev, uint32_t val)
{
struct s626_private *devpriv = dev->private;
+ int ret;
/* START THE SERIAL CLOCK RUNNING ------------- */
@@ -404,8 +460,12 @@ static void s626_send_dac(struct comedi_device *dev, uint32_t val)
* Done by polling the DMAC enable flag; this flag is automatically
* cleared when the transfer has finished.
*/
- while (readl(devpriv->mmio + S626_P_MC1) & S626_MC1_A2OUT)
- ;
+ ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
+ s626_send_dac_wait_not_mc1_a2out);
+ if (ret) {
+ comedi_error(dev, "DMA transfer timeout.");
+ return ret;
+ }
/* START THE OUTPUT STREAM TO THE TARGET DAC -------------------- */
@@ -425,8 +485,12 @@ static void s626_send_dac(struct comedi_device *dev, uint32_t val)
* finished transferring the DAC's data DWORD from the output FIFO
* to the output buffer register.
*/
- while (!(readl(devpriv->mmio + S626_P_SSR) & S626_SSR_AF2_OUT))
- ;
+ ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
+ s626_send_dac_wait_ssr_af2_out);
+ if (ret) {
+ comedi_error(dev, "TSL timeout waiting for slot 1 to execute.");
+ return ret;
+ }
/*
* Set up to trap execution at slot 0 when the TSL sequencer cycles
@@ -466,8 +530,13 @@ static void s626_send_dac(struct comedi_device *dev, uint32_t val)
* from 0xFF to 0x00, which slot 0 causes to happen by shifting
* out/in on SD2 the 0x00 that is always referenced by slot 5.
*/
- while (readl(devpriv->mmio + S626_P_FB_BUFFER2) & 0xff000000)
- ;
+ ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
+ s626_send_dac_wait_fb_buffer2_msb_00);
+ if (ret) {
+ comedi_error(dev,
+ "TSL timeout waiting for slot 0 to execute.");
+ return ret;
+ }
}
/*
* Either (1) we were too late setting the slot 0 trap; the TSL
@@ -486,14 +555,19 @@ static void s626_send_dac(struct comedi_device *dev, uint32_t val)
* the next DAC write. This is detected when FB_BUFFER2 MSB changes
* from 0x00 to 0xFF.
*/
- while (!(readl(devpriv->mmio + S626_P_FB_BUFFER2) & 0xff000000))
- ;
+ ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
+ s626_send_dac_wait_fb_buffer2_msb_ff);
+ if (ret) {
+ comedi_error(dev, "TSL timeout waiting for slot 0 to execute.");
+ return ret;
+ }
+ return 0;
}
/*
* Private helper function: Write setpoint to an application DAC channel.
*/
-static void s626_set_dac(struct comedi_device *dev, uint16_t chan,
+static int s626_set_dac(struct comedi_device *dev, uint16_t chan,
int16_t dacdata)
{
struct s626_private *devpriv = dev->private;
@@ -556,10 +630,10 @@ static void s626_set_dac(struct comedi_device *dev, uint16_t chan,
val |= ((uint32_t)(chan & 1) << 15); /* Address the DAC channel
* within the device. */
val |= (uint32_t)dacdata; /* Include DAC setpoint data. */
- s626_send_dac(dev, val);
+ return s626_send_dac(dev, val);
}
-static void s626_write_trim_dac(struct comedi_device *dev, uint8_t logical_chan,
+static int s626_write_trim_dac(struct comedi_device *dev, uint8_t logical_chan,
uint8_t dac_data)
{
struct s626_private *devpriv = dev->private;
@@ -606,17 +680,22 @@ static void s626_write_trim_dac(struct comedi_device *dev, uint8_t logical_chan,
* Address the DAC channel within the trimdac device.
* Include DAC setpoint data.
*/
- s626_send_dac(dev, (chan << 8) | dac_data);
+ return s626_send_dac(dev, (chan << 8) | dac_data);
}
-static void s626_load_trim_dacs(struct comedi_device *dev)
+static int s626_load_trim_dacs(struct comedi_device *dev)
{
uint8_t i;
+ int ret;
/* Copy TrimDac setpoint values from EEPROM to TrimDacs. */
- for (i = 0; i < ARRAY_SIZE(s626_trimchan); i++)
- s626_write_trim_dac(dev, i,
+ for (i = 0; i < ARRAY_SIZE(s626_trimchan); i++) {
+ ret = s626_write_trim_dac(dev, i,
s626_i2c_read(dev, s626_trimadrs[i]));
+ if (ret)
+ return ret;
+ }
+ return 0;
}
/* ****** COUNTER FUNCTIONS ******* */
@@ -1846,6 +1925,20 @@ static int s626_ai_rinsn(struct comedi_device *dev,
}
#endif
+static int s626_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct s626_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readl(devpriv->mmio + S626_P_PSR);
+ if (status & S626_PSR_GPIO2)
+ return 0;
+ return -EBUSY;
+}
+
static int s626_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -1856,6 +1949,7 @@ static int s626_ai_insn_read(struct comedi_device *dev,
uint16_t adc_spec = 0;
uint32_t gpio_image;
uint32_t tmp;
+ int ret;
int n;
/*
@@ -1897,8 +1991,9 @@ static int s626_ai_insn_read(struct comedi_device *dev,
*/
/* Wait for ADC done */
- while (!(readl(devpriv->mmio + S626_P_PSR) & S626_PSR_GPIO2))
- ;
+ ret = comedi_timeout(dev, s, insn, s626_ai_eoc, 0);
+ if (ret)
+ return ret;
/* Fetch ADC data */
if (n != 0) {
@@ -2299,6 +2394,7 @@ static int s626_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
{
struct s626_private *devpriv = dev->private;
int i;
+ int ret;
uint16_t chan = CR_CHAN(insn->chanspec);
int16_t dacdata;
@@ -2307,7 +2403,9 @@ static int s626_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
devpriv->ao_readback[CR_CHAN(insn->chanspec)] = data[i];
dacdata -= (0x1fff);
- s626_set_dac(dev, chan, dacdata);
+ ret = s626_set_dac(dev, chan, dacdata);
+ if (ret)
+ return ret;
}
return i;
@@ -2543,12 +2641,13 @@ static int s626_allocate_dma_buffers(struct comedi_device *dev)
return 0;
}
-static void s626_initialize(struct comedi_device *dev)
+static int s626_initialize(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
dma_addr_t phys_buf;
uint16_t chan;
int i;
+ int ret;
/* Enable DEBI and audio pins, enable I2C interface */
s626_mc_enable(dev, S626_MC1_DEBI | S626_MC1_AUDIO | S626_MC1_I2C,
@@ -2749,7 +2848,9 @@ static void s626_initialize(struct comedi_device *dev)
* sometimes causes the first few TrimDAC writes to malfunction.
*/
s626_load_trim_dacs(dev);
- s626_load_trim_dacs(dev);
+ ret = s626_load_trim_dacs(dev);
+ if (ret)
+ return ret;
/*
* Manually init all gate array hardware in case this is a soft
@@ -2763,8 +2864,11 @@ static void s626_initialize(struct comedi_device *dev)
* Init all DAC outputs to 0V and init all DAC setpoint and
* polarity images.
*/
- for (chan = 0; chan < S626_DAC_CHANNELS; chan++)
- s626_set_dac(dev, chan, 0);
+ for (chan = 0; chan < S626_DAC_CHANNELS; chan++) {
+ ret = s626_set_dac(dev, chan, 0);
+ if (ret)
+ return ret;
+ }
/* Init counters */
s626_counters_init(dev);
@@ -2780,6 +2884,8 @@ static void s626_initialize(struct comedi_device *dev)
/* Initialize the digital I/O subsystem */
s626_dio_init(dev);
+
+ return 0;
}
static int s626_auto_attach(struct comedi_device *dev,
@@ -2900,9 +3006,9 @@ static int s626_auto_attach(struct comedi_device *dev,
s->insn_read = s626_enc_insn_read;
s->insn_write = s626_enc_insn_write;
- s626_initialize(dev);
-
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
+ ret = s626_initialize(dev);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/staging/comedi/drivers/skel.c b/drivers/staging/comedi/drivers/skel.c
index 77e2059ff62e..39008cf30ecb 100644
--- a/drivers/staging/comedi/drivers/skel.c
+++ b/drivers/staging/comedi/drivers/skel.c
@@ -142,6 +142,29 @@ static int skel_ns_to_timer(unsigned int *ns, int round)
}
/*
+ * This function doesn't require a particular form, this is just
+ * what happens to be used in some of the drivers. The comedi_timeout()
+ * helper uses this callback to check for the end-of-conversion while
+ * waiting for up to 1 second. This function should return 0 when the
+ * conversion is finished and -EBUSY to keep waiting. Any other errno
+ * will terminate comedi_timeout() and return that errno to the caller.
+ * If the timeout occurs, comedi_timeout() will return -ETIMEDOUT.
+ */
+static int skel_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ /* status = inb(dev->iobase + SKEL_STATUS); */
+ status = 1;
+ if (status)
+ return 0;
+ return -EBUSY;
+}
+
+/*
* "instructions" read/write data in "one-shot" or "software-triggered"
* mode.
*/
@@ -149,9 +172,9 @@ static int skel_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
const struct skel_board *thisboard = comedi_board(dev);
- int n, i;
+ int n;
unsigned int d;
- unsigned int status;
+ int ret;
/* a typical programming sequence */
@@ -165,18 +188,10 @@ static int skel_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
/* trigger conversion */
/* outw(0,dev->iobase + SKEL_CONVERT); */
-#define TIMEOUT 100
/* wait for conversion to end */
- for (i = 0; i < TIMEOUT; i++) {
- status = 1;
- /* status = inb(dev->iobase + SKEL_STATUS); */
- if (status)
- break;
- }
- if (i == TIMEOUT) {
- dev_warn(dev->class_dev, "ai timeout\n");
- return -ETIMEDOUT;
- }
+ ret = comedi_timeout(dev, s, insn, skel_ai_eoc, 0);
+ if (ret)
+ return ret;
/* read data */
/* d = inw(dev->iobase + SKEL_AI_DATA); */
@@ -456,8 +471,6 @@ static int skel_common_attach(struct comedi_device *dev)
s->type = COMEDI_SUBD_UNUSED;
}
- dev_info(dev->class_dev, "skel: attached\n");
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ssv_dnp.c b/drivers/staging/comedi/drivers/ssv_dnp.c
index df22a78d2b7e..848c30801580 100644
--- a/drivers/staging/comedi/drivers/ssv_dnp.c
+++ b/drivers/staging/comedi/drivers/ssv_dnp.c
@@ -161,8 +161,7 @@ static int dnp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
outb(PCMR, CSCIR);
outb((inb(CSCDR) & 0xAA), CSCDR);
- dev_info(dev->class_dev, "%s: attached\n", dev->board_name);
- return 1;
+ return 0;
}
static void dnp_detach(struct comedi_device *dev)
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 5f85c55c1109..d6fae11ee4e0 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -372,7 +372,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_src(&cmd->start_src,
TRIG_NOW | TRIG_EXT | TRIG_INT);
err |= cfc_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_FOLLOW | TRIG_EXT);
+ TRIG_FOLLOW | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
@@ -424,9 +424,6 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->convert_arg, tmp);
}
- if (cmd->scan_begin_src == TRIG_TIMER)
- err |= -EINVAL;
-
/* stop source */
switch (cmd->stop_src) {
case TRIG_COUNT:
@@ -514,36 +511,28 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
*/
devpriv->ignore = PACKETS_TO_IGNORE;
- if (cmd->chanlist_len > 0) {
- gain = CR_RANGE(cmd->chanlist[0]);
- for (i = 0; i < cmd->chanlist_len; ++i) {
- chan = CR_CHAN(cmd->chanlist[i]);
- if (chan != i) {
- dev_err(dev->class_dev,
- "channels are not consecutive\n");
- up(&devpriv->sem);
- return -EINVAL;
- }
- if ((gain != CR_RANGE(cmd->chanlist[i]))
- && (cmd->chanlist_len > 3)) {
- dev_err(dev->class_dev,
- "gain must be the same for all channels\n");
- up(&devpriv->sem);
- return -EINVAL;
- }
- if (i >= NUMCHANNELS) {
- dev_err(dev->class_dev, "chanlist too long\n");
- break;
- }
+ gain = CR_RANGE(cmd->chanlist[0]);
+ for (i = 0; i < cmd->chanlist_len; ++i) {
+ chan = CR_CHAN(cmd->chanlist[i]);
+ if (chan != i) {
+ dev_err(dev->class_dev,
+ "channels are not consecutive\n");
+ up(&devpriv->sem);
+ return -EINVAL;
+ }
+ if ((gain != CR_RANGE(cmd->chanlist[i]))
+ && (cmd->chanlist_len > 3)) {
+ dev_err(dev->class_dev,
+ "gain must be the same for all channels\n");
+ up(&devpriv->sem);
+ return -EINVAL;
+ }
+ if (i >= NUMCHANNELS) {
+ dev_err(dev->class_dev, "chanlist too long\n");
+ break;
}
}
steps = 0;
- if (cmd->scan_begin_src == TRIG_TIMER) {
- dev_err(dev->class_dev,
- "scan_begin_src==TRIG_TIMER not valid\n");
- up(&devpriv->sem);
- return -EINVAL;
- }
if (cmd->convert_src == TRIG_TIMER)
steps = (cmd->convert_arg * 30) / 1000;
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index 7dc5a18e69d4..8777f958c041 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -41,7 +41,8 @@ struct comedi_device *comedi_open(const char *filename)
if (strncmp(filename, "/dev/comedi", 11) != 0)
return NULL;
- minor = simple_strtoul(filename + 11, NULL, 0);
+ if (kstrtouint(filename + 11, 0, &minor))
+ return NULL;
if (minor >= COMEDI_NUM_BOARD_MINORS)
return NULL;
diff --git a/drivers/staging/comedi/proc.c b/drivers/staging/comedi/proc.c
index da6bc5855ebc..91dea25b5724 100644
--- a/drivers/staging/comedi/proc.c
+++ b/drivers/staging/comedi/proc.c
@@ -1,27 +1,26 @@
/*
- module/proc.c
- /proc interface for comedi
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * /proc interface for comedi
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
- This is some serious bloatware.
-
- Taken from Dave A.'s PCL-711 driver, 'cuz I thought it
- was cool.
-*/
+ * This is some serious bloatware.
+ *
+ * Taken from Dave A.'s PCL-711 driver, 'cuz I thought it
+ * was cool.
+ */
#include "comedidev.h"
#include "comedi_internal.h"
@@ -34,11 +33,8 @@ static int comedi_read(struct seq_file *m, void *v)
int devices_q = 0;
struct comedi_driver *driv;
- seq_printf(m,
- "comedi version " COMEDI_RELEASE "\n"
- "format string: %s\n",
- "\"%2d: %-20s %-20s %4d\", i, "
- "driver_name, board_name, n_subdevices");
+ seq_printf(m, "comedi version " COMEDI_RELEASE "\nformat string: %s\n",
+ "\"%2d: %-20s %-20s %4d\", i, driver_name, board_name, n_subdevices");
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
struct comedi_device *dev = comedi_dev_get_from_minor(i);
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index 46b3da686384..b6849545b810 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -143,28 +143,23 @@ int comedi_check_chanlist(struct comedi_subdevice *s, int n,
unsigned int chanspec;
int chan, range_len, i;
- if (s->range_table || s->range_table_list) {
- for (i = 0; i < n; i++) {
- chanspec = chanlist[i];
- chan = CR_CHAN(chanspec);
- if (s->range_table)
- range_len = s->range_table->length;
- else if (s->range_table_list && chan < s->n_chan)
- range_len = s->range_table_list[chan]->length;
- else
- range_len = 0;
- if (chan >= s->n_chan ||
- CR_RANGE(chanspec) >= range_len ||
- aref_invalid(s, chanspec)) {
- dev_warn(dev->class_dev,
- "bad chanlist[%d]=0x%08x chan=%d range length=%d\n",
- i, chanspec, chan, range_len);
- return -EINVAL;
- }
+ for (i = 0; i < n; i++) {
+ chanspec = chanlist[i];
+ chan = CR_CHAN(chanspec);
+ if (s->range_table)
+ range_len = s->range_table->length;
+ else if (s->range_table_list && chan < s->n_chan)
+ range_len = s->range_table_list[chan]->length;
+ else
+ range_len = 0;
+ if (chan >= s->n_chan ||
+ CR_RANGE(chanspec) >= range_len ||
+ aref_invalid(s, chanspec)) {
+ dev_warn(dev->class_dev,
+ "bad chanlist[%d]=0x%08x chan=%d range length=%d\n",
+ i, chanspec, chan, range_len);
+ return -EINVAL;
}
- } else {
- dev_err(dev->class_dev, "(bug) no range type list!\n");
- return -EINVAL;
}
return 0;
}
diff --git a/drivers/staging/crystalhd/bcm_70012_regs.h b/drivers/staging/crystalhd/bcm_70012_regs.h
index f3ab3146cd90..da199ad8e27e 100644
--- a/drivers/staging/crystalhd/bcm_70012_regs.h
+++ b/drivers/staging/crystalhd/bcm_70012_regs.h
@@ -31,7 +31,8 @@
#define BRCM_SHIFT(c, r, f) c##_##r##_##f##_SHIFT
#define GET_FIELD(m, c, r, f) \
- ((((m) & BRCM_MASK(c, r, f)) >> BRCM_SHIFT(c, r, f)) << BRCM_ALIGN(c, r, f))
+ ((((m) & BRCM_MASK(c, r, f)) >> BRCM_SHIFT(c, r, f)) << \
+ BRCM_ALIGN(c, r, f))
#define SET_FIELD(m, c, r, f, d) \
((m) = (((m) & ~BRCM_MASK(c, r, f)) | ((((d) >> BRCM_ALIGN(c, r, f)) << \
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index 8d0680d93684..4765d528279b 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -433,10 +433,12 @@ static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
}
#define crystalhd_hw_delete_ioq(adp, q) \
+do { \
if (q) { \
crystalhd_delete_dioq(adp, q); \
q = NULL; \
- }
+ } \
+} while (0)
static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
{
@@ -1437,7 +1439,7 @@ static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw,
crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
}
- return (tmp_lsts != hw->rx_list_sts[0]);
+ return tmp_lsts != hw->rx_list_sts[0];
}
static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw,
@@ -1507,7 +1509,7 @@ static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw,
crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
}
- return (tmp_lsts != hw->rx_list_sts[1]);
+ return tmp_lsts != hw->rx_list_sts[1];
}
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index 99eefd0291c3..20be9571990a 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -111,7 +111,7 @@ static void chd_dec_free_iodata(struct crystalhd_adp *adp,
spin_unlock_irqrestore(&adp->lock, flags);
}
-static inline int crystalhd_user_data(unsigned long ud, void *dr,
+static inline int crystalhd_user_data(void __user *ud, void *dr,
int size, int set)
{
int rc;
@@ -122,9 +122,9 @@ static inline int crystalhd_user_data(unsigned long ud, void *dr,
}
if (set)
- rc = copy_to_user((void *)ud, dr, size);
+ rc = copy_to_user(ud, dr, size);
else
- rc = copy_from_user(dr, (void *)ud, size);
+ rc = copy_from_user(dr, ud, size);
if (rc) {
BCMLOG_ERR("Invalid args for command\n");
@@ -153,7 +153,8 @@ static int chd_dec_fetch_cdata(struct crystalhd_adp *adp,
io->add_cdata_sz = m_sz;
ua_off = ua + sizeof(io->udata);
- rc = crystalhd_user_data(ua_off, io->add_cdata, io->add_cdata_sz, 0);
+ rc = crystalhd_user_data((void __user *)ua_off, io->add_cdata,
+ io->add_cdata_sz, 0);
if (rc) {
BCMLOG_ERR("failed to pull add_cdata sz:%x ua_off:%x\n",
io->add_cdata_sz, (unsigned int)ua_off);
@@ -178,7 +179,7 @@ static int chd_dec_release_cdata(struct crystalhd_adp *adp,
if (io->cmd != BCM_IOC_FW_DOWNLOAD) {
ua_off = ua + sizeof(io->udata);
- rc = crystalhd_user_data(ua_off, io->add_cdata,
+ rc = crystalhd_user_data((void __user *)ua_off, io->add_cdata,
io->add_cdata_sz, 1);
if (rc) {
BCMLOG_ERR(
@@ -208,7 +209,8 @@ static int chd_dec_proc_user_data(struct crystalhd_adp *adp,
return -EINVAL;
}
- rc = crystalhd_user_data(ua, &io->udata, sizeof(io->udata), set);
+ rc = crystalhd_user_data((void __user *)ua, &io->udata,
+ sizeof(io->udata), set);
if (rc) {
BCMLOG_ERR("failed to %s iodata\n", (set ? "set" : "get"));
return rc;
@@ -546,9 +548,10 @@ static int chd_dec_pci_probe(struct pci_dev *pdev,
int rc;
enum BC_STATUS sts = BC_STS_SUCCESS;
- BCMLOG(BCMLOG_DBG, "PCI_INFO: Vendor:0x%04x Device:0x%04x s_vendor:0x%04x s_device: 0x%04x\n",
- pdev->vendor, pdev->device, pdev->subsystem_vendor,
- pdev->subsystem_device);
+ BCMLOG(BCMLOG_DBG,
+ "PCI_INFO: Vendor:0x%04x Device:0x%04x s_vendor:0x%04x s_device: 0x%04x\n",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
pinfo = kzalloc(sizeof(struct crystalhd_adp), GFP_KERNEL);
if (!pinfo) {
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.h b/drivers/staging/crystalhd/crystalhd_lnx.h
index 816e1cd5db62..49e1ef3a19af 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.h
+++ b/drivers/staging/crystalhd/crystalhd_lnx.h
@@ -58,11 +58,11 @@ struct crystalhd_adp {
unsigned long pci_mem_start;
uint32_t pci_mem_len;
- void *addr;
+ void __iomem *addr;
unsigned long pci_i2o_start;
uint32_t pci_i2o_len;
- void *i2o_addr;
+ void __iomem *i2o_addr;
unsigned int drv_data;
unsigned int dmabits; /* 32 | 64 */
diff --git a/drivers/staging/crystalhd/crystalhd_misc.c b/drivers/staging/crystalhd/crystalhd_misc.c
index c3d024406337..3aabf75b7d97 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.c
+++ b/drivers/staging/crystalhd/crystalhd_misc.c
@@ -740,7 +740,7 @@ enum BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
dio->fb_size = ubuff_sz & 0x03;
if (dio->fb_size) {
res = copy_from_user(dio->fb_va,
- (void *)(uaddr + count - dio->fb_size),
+ (void __user *)(uaddr + count - dio->fb_size),
dio->fb_size);
if (res) {
BCMLOG_ERR("failed %d to copy %u fill bytes from %p\n",
diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h
index 77ab72a2a061..0f63827acfb4 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.h
+++ b/drivers/staging/crystalhd/crystalhd_misc.h
@@ -225,7 +225,7 @@ do { \
#define BCMLOG_ERR(fmt, args...) \
do { \
if (g_linklog_level & BCMLOG_ERROR) \
- printk(KERN_ERR "*ERR*:%s:%d: "fmt, \
+ pr_err("*ERR*:%s:%d: "fmt, \
__FILE__, __LINE__, ##args); \
} while (0)
diff --git a/drivers/staging/cxt1e1/Makefile b/drivers/staging/cxt1e1/Makefile
index 2f217e9daac1..b879e7b553c2 100644
--- a/drivers/staging/cxt1e1/Makefile
+++ b/drivers/staging/cxt1e1/Makefile
@@ -4,7 +4,6 @@ ccflags-y := -DSBE_PMCC4_ENABLE
ccflags-y += -DSBE_ISR_TASKLET
cxt1e1-y := \
- ossiRelease.o \
musycc.o \
pmcc4_drv.o \
comet.o \
diff --git a/drivers/staging/cxt1e1/comet.c b/drivers/staging/cxt1e1/comet.c
index c4c8c0f9c959..7005ad022339 100644
--- a/drivers/staging/cxt1e1/comet.c
+++ b/drivers/staging/cxt1e1/comet.c
@@ -22,18 +22,20 @@
#include "comet.h"
#include "comet_tables.h"
-extern int cxt1e1_log_level;
#define COMET_NUM_SAMPLES 24 /* Number of entries in the waveform table */
#define COMET_NUM_UNITS 5 /* Number of points per entry in table */
/* forward references */
static void SetPwrLevel(struct s_comet_reg *comet);
-static void WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table);
-static void WrtXmtWaveformTbl(ci_t *ci, struct s_comet_reg *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
+static void WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet,
+ u_int32_t *table);
+static void WrtXmtWaveformTbl(ci_t *ci, struct s_comet_reg *comet,
+ u_int8_t table[COMET_NUM_SAMPLES]
+ [COMET_NUM_UNITS]);
-void *TWV_table[12] = {
+static void *TWV_table[12] = {
TWVLongHaul0DB, TWVLongHaul7_5DB, TWVLongHaul15DB, TWVLongHaul22_5DB,
TWVShortHaul0, TWVShortHaul1, TWVShortHaul2, TWVShortHaul3,
TWVShortHaul4, TWVShortHaul5,
@@ -50,6 +52,7 @@ lbo_tbl_lkup(int t1, int lbo) {
if (t1)
/* default T1 waveform table */
lbo = CFG_LBO_LH0;
+
else
/* default E1 waveform table */
lbo = CFG_LBO_E120;
@@ -58,8 +61,8 @@ lbo_tbl_lkup(int t1, int lbo) {
return lbo - 1;
}
-void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode, int clockmaster,
- u_int8_t moreParams)
+void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode,
+ int clockmaster, u_int8_t moreParams)
{
u_int8_t isT1mode;
/* T1 default */
@@ -146,7 +149,9 @@ void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode, int cl
/* t1RBOC enable(BOC:BitOriented Code) */
pci_write_32((u_int32_t *) &comet->t1_rboc_ena, 0x00);
if (isT1mode) {
- /* IBCD cfg: aka Inband Code Detection ** loopback code length set to */
+ /* IBCD cfg: aka Inband Code Detection ** loopback code length
+ * set to
+ */
/* 6 bit down, 5 bit up (assert) */
pci_write_32((u_int32_t *) &comet->ibcd_cfg, 0x04);
/* line loopback activate pattern */
@@ -286,7 +291,9 @@ void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode, int cl
/* 0x30: "BRIF cfg"; 0x20 is 'CMODE', 0x03 is (bit) rate */
/* note "rate bits can only be set once after reset" */
if (clockmaster) {
- /* CMODE == clockMode, 0=clock master (so all 3 others should be slave) */
+ /* CMODE == clockMode, 0=clock master
+ * (so all 3 others should be slave)
+ */
/* rate = 1.544 Mb/s */
if (isT1mode)
/* Comet 0 Master Mode(CMODE=0) */
@@ -398,7 +405,8 @@ void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode, int cl
** Returns: Nothing
*/
static void
-WrtXmtWaveform(ci_t *ci, struct s_comet_reg *comet, u_int32_t sample, u_int32_t unit, u_int8_t data)
+WrtXmtWaveform(ci_t *ci, struct s_comet_reg *comet, u_int32_t sample,
+ u_int32_t unit, u_int8_t data)
{
u_int8_t WaveformAddr;
@@ -447,7 +455,7 @@ static void
WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table)
{
u_int32_t ramaddr;
- volatile u_int32_t value;
+ u_int32_t value;
for (ramaddr = 0; ramaddr < 256; ramaddr++) {
/*** the following lines are per Errata 7, 2.5 ***/
@@ -515,7 +523,7 @@ WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table)
static void
SetPwrLevel(struct s_comet_reg *comet)
{
- volatile u_int32_t temp;
+ u_int32_t temp;
/*
** Algorithm to Balance the Power Distribution of Ttip Tring
@@ -557,17 +565,21 @@ SetPwrLevel(struct s_comet_reg *comet)
static void
SetCometOps(struct s_comet_reg *comet)
{
- volatile u_int8_t rd_value;
+ u_int8_t rd_value;
if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2)) {
/* read the BRIF Configuration */
- rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_cfg);
+ rd_value = (u_int8_t) pci_read_32((u_int32_t *)
+ &comet->brif_cfg);
rd_value &= ~0x20;
- pci_write_32((u_int32_t *) &comet->brif_cfg, (u_int32_t) rd_value);
+ pci_write_32((u_int32_t *) &comet->brif_cfg,
+ (u_int32_t) rd_value);
/* read the BRIF Frame Pulse Configuration */
- rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_fpcfg);
+ rd_value = (u_int8_t) pci_read_32((u_int32_t *)
+ &comet->brif_fpcfg);
rd_value &= ~0x20;
- pci_write_32((u_int32_t *) &comet->brif_fpcfg, (u_int8_t) rd_value);
+ pci_write_32((u_int32_t *) &comet->brif_fpcfg,
+ (u_int8_t) rd_value);
} else {
/* read the BRIF Configuration */
rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_cfg);
diff --git a/drivers/staging/cxt1e1/comet_tables.c b/drivers/staging/cxt1e1/comet_tables.c
index 84931117da35..e96665ea3662 100644
--- a/drivers/staging/cxt1e1/comet_tables.c
+++ b/drivers/staging/cxt1e1/comet_tables.c
@@ -19,6 +19,7 @@
*/
#include <linux/types.h>
+#include "comet_tables.h"
/*****************************************************************************
*
diff --git a/drivers/staging/cxt1e1/functions.c b/drivers/staging/cxt1e1/functions.c
index 95218e283966..ee9d39bbd251 100644
--- a/drivers/staging/cxt1e1/functions.c
+++ b/drivers/staging/cxt1e1/functions.c
@@ -25,7 +25,8 @@
#include "pmcc4.h"
#if defined(CONFIG_SBE_HDLC_V7) || defined(CONFIG_SBE_WAN256T3_HDLC_V7) || \
- defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
+defined(CONFIG_SBE_HDLC_V7_MODULE) || \
+defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
#define _v7_hdlc_ 1
#else
#define _v7_hdlc_ 0
@@ -33,9 +34,9 @@
#if _v7_hdlc_
#define V7(x) (x ## _v7)
-extern int hdlc_netif_rx_v7 (hdlc_device *, struct sk_buff *);
-extern int register_hdlc_device_v7 (hdlc_device *);
-extern int unregister_hdlc_device_v7 (hdlc_device *);
+extern int hdlc_netif_rx_v7(hdlc_device *, struct sk_buff *);
+extern int register_hdlc_device_v7(hdlc_device *);
+extern int unregister_hdlc_device_v7(hdlc_device *);
#else
#define V7(x) x
@@ -47,55 +48,54 @@ static int dummy = 0;
#endif
-extern int cxt1e1_log_level;
extern int drvr_state;
#if 1
u_int32_t
-pci_read_32 (u_int32_t *p)
+pci_read_32(u_int32_t *p)
{
#ifdef FLOW_DEBUG
- u_int32_t v;
+ u_int32_t v;
- FLUSH_PCI_READ ();
- v = le32_to_cpu (*p);
- if (cxt1e1_log_level >= LOG_DEBUG)
- pr_info("pci_read : %x = %x\n", (u_int32_t) p, v);
- return v;
+ FLUSH_PCI_READ();
+ v = le32_to_cpu(*p);
+ if (cxt1e1_log_level >= LOG_DEBUG)
+ pr_info("pci_read : %x = %x\n", (u_int32_t) p, v);
+ return v;
#else
- FLUSH_PCI_READ (); /* */
- return le32_to_cpu (*p);
+ FLUSH_PCI_READ(); /* */
+ return le32_to_cpu(*p);
#endif
}
void
-pci_write_32 (u_int32_t *p, u_int32_t v)
+pci_write_32(u_int32_t *p, u_int32_t v)
{
#ifdef FLOW_DEBUG
- if (cxt1e1_log_level >= LOG_DEBUG)
- pr_info("pci_write: %x = %x\n", (u_int32_t) p, v);
+ if (cxt1e1_log_level >= LOG_DEBUG)
+ pr_info("pci_write: %x = %x\n", (u_int32_t) p, v);
#endif
- *p = cpu_to_le32 (v);
- FLUSH_PCI_WRITE (); /* This routine is called from routines
- * which do multiple register writes
- * which themselves need flushing between
- * writes in order to guarantee write
- * ordering. It is less code-cumbersome
- * to flush here-in then to investigate
- * and code the many other register
- * writing routines. */
+ *p = cpu_to_le32 (v);
+ FLUSH_PCI_WRITE(); /* This routine is called from routines
+ * which do multiple register writes
+ * which themselves need flushing between
+ * writes in order to guarantee write
+ * ordering. It is less code-cumbersome
+ * to flush here-in then to investigate
+ * and code the many other register
+ * writing routines. */
}
#endif
void
-pci_flush_write (ci_t *ci)
+pci_flush_write(ci_t *ci)
{
- volatile u_int32_t v;
+ volatile u_int32_t v;
/* issue a PCI read to flush PCI write thru bridge */
- v = *(u_int32_t *) &ci->reg->glcd; /* any address would do */
+ v = *(u_int32_t *) &ci->reg->glcd; /* any address would do */
/*
* return nothing, this just reads PCI bridge interface to flush
@@ -105,55 +105,53 @@ pci_flush_write (ci_t *ci)
static void
-watchdog_func (unsigned long arg)
+watchdog_func(unsigned long arg)
{
- struct watchdog *wd = (void *) arg;
-
- if (drvr_state != SBE_DRVR_AVAILABLE)
- {
- if (cxt1e1_log_level >= LOG_MONITOR)
- pr_warning("%s: drvr not available (%x)\n", __func__, drvr_state);
- return;
- }
- schedule_work (&wd->work);
- mod_timer (&wd->h, jiffies + wd->ticks);
+ struct watchdog *wd = (void *) arg;
+
+ if (drvr_state != SBE_DRVR_AVAILABLE) {
+ if (cxt1e1_log_level >= LOG_MONITOR)
+ pr_warning("%s: drvr not available (%x)\n",
+ __func__, drvr_state);
+ return;
+ }
+ schedule_work(&wd->work);
+ mod_timer(&wd->h, jiffies + wd->ticks);
}
-int OS_init_watchdog(struct watchdog *wdp, void (*f) (void *), void *c, int usec)
+int OS_init_watchdog(struct watchdog *wdp, void (*f) (void *),
+ void *c, int usec)
{
- wdp->func = f;
- wdp->softc = c;
- wdp->ticks = (HZ) * (usec / 1000) / 1000;
- INIT_WORK(&wdp->work, (void *)f);
- init_timer (&wdp->h);
- {
- ci_t *ci = (ci_t *) c;
-
- wdp->h.data = (unsigned long) &ci->wd;
- }
- wdp->h.function = watchdog_func;
- return 0;
+ wdp->func = f;
+ wdp->softc = c;
+ wdp->ticks = (HZ) * (usec / 1000) / 1000;
+ INIT_WORK(&wdp->work, (void *)f);
+ init_timer(&wdp->h);
+ {
+ ci_t *ci = (ci_t *) c;
+
+ wdp->h.data = (unsigned long) &ci->wd;
+ }
+ wdp->h.function = watchdog_func;
+ return 0;
}
void
-OS_uwait (int usec, char *description)
+OS_uwait(int usec, char *description)
{
- int tmp;
-
- if (usec >= 1000)
- {
- mdelay (usec / 1000);
- /* now delay residual */
- tmp = (usec / 1000) * 1000; /* round */
- tmp = usec - tmp; /* residual */
- if (tmp)
- { /* wait on residual */
- udelay (tmp);
- }
- } else
- {
- udelay (usec);
- }
+ int tmp;
+
+ if (usec >= 1000) {
+ mdelay(usec / 1000);
+ /* now delay residual */
+ tmp = (usec / 1000) * 1000; /* round */
+ tmp = usec - tmp; /* residual */
+ if (tmp) { /* wait on residual */
+ udelay(tmp);
+ }
+ } else {
+ udelay(usec);
+ }
}
/* dummy short delay routine called as a subroutine so that compiler
@@ -161,96 +159,95 @@ OS_uwait (int usec, char *description)
*/
void
-OS_uwait_dummy (void)
+OS_uwait_dummy(void)
{
#ifndef USE_MAX_INT_DELAY
- dummy++;
+ dummy++;
#else
- udelay (1);
+ udelay(1);
#endif
}
void
-OS_sem_init (void *sem, int state)
+OS_sem_init(void *sem, int state)
{
- switch (state)
- {
- case SEM_TAKEN:
- sema_init((struct semaphore *) sem, 0);
- break;
- case SEM_AVAILABLE:
+ switch (state) {
+ case SEM_TAKEN:
+ sema_init((struct semaphore *) sem, 0);
+ break;
+ case SEM_AVAILABLE:
sema_init((struct semaphore *) sem, 1);
- break;
- default: /* otherwise, set sem.count to state's
- * value */
- sema_init (sem, state);
- break;
- }
+ break;
+ default: /* otherwise, set sem.count to state's
+ * value */
+ sema_init(sem, state);
+ break;
+ }
}
int
-sd_line_is_ok (void *user)
+sd_line_is_ok(void *user)
{
- struct net_device *ndev = (struct net_device *) user;
+ struct net_device *ndev = (struct net_device *) user;
- return netif_carrier_ok (ndev);
+ return netif_carrier_ok(ndev);
}
void
-sd_line_is_up (void *user)
+sd_line_is_up(void *user)
{
- struct net_device *ndev = (struct net_device *) user;
+ struct net_device *ndev = (struct net_device *) user;
- netif_carrier_on (ndev);
- return;
+ netif_carrier_on(ndev);
+ return;
}
void
-sd_line_is_down (void *user)
+sd_line_is_down(void *user)
{
- struct net_device *ndev = (struct net_device *) user;
+ struct net_device *ndev = (struct net_device *) user;
- netif_carrier_off (ndev);
- return;
+ netif_carrier_off(ndev);
+ return;
}
void
-sd_disable_xmit (void *user)
+sd_disable_xmit(void *user)
{
- struct net_device *dev = (struct net_device *) user;
+ struct net_device *dev = (struct net_device *) user;
- netif_stop_queue (dev);
- return;
+ netif_stop_queue(dev);
+ return;
}
void
-sd_enable_xmit (void *user)
+sd_enable_xmit(void *user)
{
- struct net_device *dev = (struct net_device *) user;
+ struct net_device *dev = (struct net_device *) user;
- netif_wake_queue (dev);
- return;
+ netif_wake_queue(dev);
+ return;
}
int
-sd_queue_stopped (void *user)
+sd_queue_stopped(void *user)
{
- struct net_device *ndev = (struct net_device *) user;
+ struct net_device *ndev = (struct net_device *) user;
- return netif_queue_stopped (ndev);
+ return netif_queue_stopped(ndev);
}
void sd_recv_consume(void *token, size_t len, void *user)
{
- struct net_device *ndev = user;
- struct sk_buff *skb = token;
+ struct net_device *ndev = user;
+ struct sk_buff *skb = token;
- skb->dev = ndev;
- skb_put (skb, len);
- skb->protocol = hdlc_type_trans(skb, ndev);
- netif_rx(skb);
+ skb->dev = ndev;
+ skb_put(skb, len);
+ skb->protocol = hdlc_type_trans(skb, ndev);
+ netif_rx(skb);
}
@@ -263,86 +260,76 @@ void sd_recv_consume(void *token, size_t len, void *user)
extern ci_t *CI; /* dummy pointer to board ZERO's data */
void
-VMETRO_TRACE (void *x)
+VMETRO_TRIGGER(ci_t *ci, int x)
{
- u_int32_t y = (u_int32_t) x;
-
- pci_write_32 ((u_int32_t *) &CI->cpldbase->leds, y);
-}
-
-
-void
-VMETRO_TRIGGER (ci_t *ci, int x)
-{
- struct s_comet_reg *comet;
- volatile u_int32_t data;
-
- comet = ci->port[0].cometbase; /* default to COMET # 0 */
-
- switch (x)
- {
- default:
- case 0:
- data = pci_read_32 ((u_int32_t *) &comet->__res24); /* 0x90 */
- break;
- case 1:
- data = pci_read_32 ((u_int32_t *) &comet->__res25); /* 0x94 */
- break;
- case 2:
- data = pci_read_32 ((u_int32_t *) &comet->__res26); /* 0x98 */
- break;
- case 3:
- data = pci_read_32 ((u_int32_t *) &comet->__res27); /* 0x9C */
- break;
- case 4:
- data = pci_read_32 ((u_int32_t *) &comet->__res88); /* 0x220 */
- break;
- case 5:
- data = pci_read_32 ((u_int32_t *) &comet->__res89); /* 0x224 */
- break;
- case 6:
- data = pci_read_32 ((u_int32_t *) &comet->__res8A); /* 0x228 */
- break;
- case 7:
- data = pci_read_32 ((u_int32_t *) &comet->__res8B); /* 0x22C */
- break;
- case 8:
- data = pci_read_32 ((u_int32_t *) &comet->__resA0); /* 0x280 */
- break;
- case 9:
- data = pci_read_32 ((u_int32_t *) &comet->__resA1); /* 0x284 */
- break;
- case 10:
- data = pci_read_32 ((u_int32_t *) &comet->__resA2); /* 0x288 */
- break;
- case 11:
- data = pci_read_32 ((u_int32_t *) &comet->__resA3); /* 0x28C */
- break;
- case 12:
- data = pci_read_32 ((u_int32_t *) &comet->__resA4); /* 0x290 */
- break;
- case 13:
- data = pci_read_32 ((u_int32_t *) &comet->__resA5); /* 0x294 */
- break;
- case 14:
- data = pci_read_32 ((u_int32_t *) &comet->__resA6); /* 0x298 */
- break;
- case 15:
- data = pci_read_32 ((u_int32_t *) &comet->__resA7); /* 0x29C */
- break;
- case 16:
- data = pci_read_32 ((u_int32_t *) &comet->__res74); /* 0x1D0 */
- break;
- case 17:
- data = pci_read_32 ((u_int32_t *) &comet->__res75); /* 0x1D4 */
- break;
- case 18:
- data = pci_read_32 ((u_int32_t *) &comet->__res76); /* 0x1D8 */
- break;
- case 19:
- data = pci_read_32 ((u_int32_t *) &comet->__res77); /* 0x1DC */
- break;
- }
+ struct s_comet_reg *comet;
+ volatile u_int32_t data;
+
+ comet = ci->port[0].cometbase; /* default to COMET # 0 */
+
+ switch (x) {
+ default:
+ case 0:
+ data = pci_read_32((u_int32_t *) &comet->__res24); /* 0x90 */
+ break;
+ case 1:
+ data = pci_read_32((u_int32_t *) &comet->__res25); /* 0x94 */
+ break;
+ case 2:
+ data = pci_read_32((u_int32_t *) &comet->__res26); /* 0x98 */
+ break;
+ case 3:
+ data = pci_read_32((u_int32_t *) &comet->__res27); /* 0x9C */
+ break;
+ case 4:
+ data = pci_read_32((u_int32_t *) &comet->__res88); /* 0x220 */
+ break;
+ case 5:
+ data = pci_read_32((u_int32_t *) &comet->__res89); /* 0x224 */
+ break;
+ case 6:
+ data = pci_read_32((u_int32_t *) &comet->__res8A); /* 0x228 */
+ break;
+ case 7:
+ data = pci_read_32((u_int32_t *) &comet->__res8B); /* 0x22C */
+ break;
+ case 8:
+ data = pci_read_32((u_int32_t *) &comet->__resA0); /* 0x280 */
+ break;
+ case 9:
+ data = pci_read_32((u_int32_t *) &comet->__resA1); /* 0x284 */
+ break;
+ case 10:
+ data = pci_read_32((u_int32_t *) &comet->__resA2); /* 0x288 */
+ break;
+ case 11:
+ data = pci_read_32((u_int32_t *) &comet->__resA3); /* 0x28C */
+ break;
+ case 12:
+ data = pci_read_32((u_int32_t *) &comet->__resA4); /* 0x290 */
+ break;
+ case 13:
+ data = pci_read_32((u_int32_t *) &comet->__resA5); /* 0x294 */
+ break;
+ case 14:
+ data = pci_read_32((u_int32_t *) &comet->__resA6); /* 0x298 */
+ break;
+ case 15:
+ data = pci_read_32((u_int32_t *) &comet->__resA7); /* 0x29C */
+ break;
+ case 16:
+ data = pci_read_32((u_int32_t *) &comet->__res74); /* 0x1D0 */
+ break;
+ case 17:
+ data = pci_read_32((u_int32_t *) &comet->__res75); /* 0x1D4 */
+ break;
+ case 18:
+ data = pci_read_32((u_int32_t *) &comet->__res76); /* 0x1D8 */
+ break;
+ case 19:
+ data = pci_read_32((u_int32_t *) &comet->__res77); /* 0x1DC */
+ break;
+ }
}
diff --git a/drivers/staging/cxt1e1/hwprobe.c b/drivers/staging/cxt1e1/hwprobe.c
index 02b4f8f1aca5..9b4198b1e634 100644
--- a/drivers/staging/cxt1e1/hwprobe.c
+++ b/drivers/staging/cxt1e1/hwprobe.c
@@ -31,360 +31,352 @@
#include "sbeproc.h"
#endif
-extern int cxt1e1_log_level;
extern int error_flag;
extern int drvr_state;
/* forward references */
-void c4_stopwd (ci_t *);
-struct net_device * __init c4_add_dev (hdw_info_t *, int, unsigned long, unsigned long, int, int);
+void c4_stopwd(ci_t *);
+struct net_device * __init c4_add_dev(hdw_info_t *, int, unsigned long,
+ unsigned long, int, int);
struct s_hdw_info hdw_info[MAX_BOARDS];
-void __init
-show_two (hdw_info_t *hi, int brdno)
+void __init
+show_two(hdw_info_t *hi, int brdno)
{
- ci_t *ci;
- struct pci_dev *pdev;
- char *bid;
- char *bp, banner[80];
- char sn[6];
-
- bp = banner;
- memset (banner, 0, 80); /* clear print buffer */
-
- ci = (ci_t *)(netdev_priv(hi->ndev));
- bid = sbeid_get_bdname (ci);
- switch (hi->promfmt)
- {
- case PROM_FORMAT_TYPE1:
- memcpy (sn, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
- break;
- case PROM_FORMAT_TYPE2:
- memcpy (sn, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
- break;
- default:
- memset (sn, 0, 6);
- break;
- }
-
- sprintf (banner, "%s: %s S/N %06X, MUSYCC Rev %02X",
- hi->devname, bid,
- ((sn[3] << 16) & 0xff0000) |
- ((sn[4] << 8) & 0x00ff00) |
- (sn[5] & 0x0000ff),
- (u_int8_t) hi->revid[0]);
-
- pr_info("%s\n", banner);
-
- pdev = hi->pdev[0];
- pr_info("%s: %s at v/p=%lx/%lx (%02x:%02x.%x) irq %d\n",
- hi->devname, "MUSYCC",
- (unsigned long) hi->addr_mapped[0], hi->addr[0],
- hi->pci_busno, (u_int8_t) PCI_SLOT (pdev->devfn),
- (u_int8_t) PCI_FUNC (pdev->devfn), pdev->irq);
-
- pdev = hi->pdev[1];
- pr_info("%s: %s at v/p=%lx/%lx (%02x:%02x.%x) irq %d\n",
- hi->devname, "EBUS ",
- (unsigned long) hi->addr_mapped[1], hi->addr[1],
- hi->pci_busno, (u_int8_t) PCI_SLOT (pdev->devfn),
- (u_int8_t) PCI_FUNC (pdev->devfn), pdev->irq);
+ ci_t *ci;
+ struct pci_dev *pdev;
+ char *bid;
+ char banner[80];
+ char sn[6] = {0,};
+
+ ci = (ci_t *)(netdev_priv(hi->ndev));
+ bid = sbeid_get_bdname(ci);
+ switch (hi->promfmt) {
+ case PROM_FORMAT_TYPE1:
+ memcpy(sn, hi->mfg_info.pft1.Serial, 6);
+ break;
+ case PROM_FORMAT_TYPE2:
+ memcpy(sn, hi->mfg_info.pft2.Serial, 6);
+ break;
+ }
+
+ sprintf(banner, "%s: %s S/N %06X, MUSYCC Rev %02X",
+ hi->devname, bid,
+ ((sn[3] << 16) & 0xff0000) |
+ ((sn[4] << 8) & 0x00ff00) |
+ (sn[5] & 0x0000ff),
+ (u_int8_t) hi->revid[0]);
+
+ pr_info("%s\n", banner);
+
+ pdev = hi->pdev[0];
+ pr_info("%s: %s at v/p=%lx/%lx (%02x:%02x.%x) irq %d\n",
+ hi->devname, "MUSYCC",
+ (unsigned long) hi->addr_mapped[0], hi->addr[0],
+ hi->pci_busno, (u_int8_t) PCI_SLOT(pdev->devfn),
+ (u_int8_t) PCI_FUNC(pdev->devfn), pdev->irq);
+
+ pdev = hi->pdev[1];
+ pr_info("%s: %s at v/p=%lx/%lx (%02x:%02x.%x) irq %d\n",
+ hi->devname, "EBUS ",
+ (unsigned long) hi->addr_mapped[1], hi->addr[1],
+ hi->pci_busno, (u_int8_t) PCI_SLOT(pdev->devfn),
+ (u_int8_t) PCI_FUNC(pdev->devfn), pdev->irq);
}
-void __init
-hdw_sn_get (hdw_info_t *hi, int brdno)
+void __init
+hdw_sn_get(hdw_info_t *hi, int brdno)
{
- /* obtain hardware EEPROM information */
- long addr;
+ /* obtain hardware EEPROM information */
+ long addr;
- addr = (long) hi->addr_mapped[1] + EEPROM_OFFSET;
+ addr = (long) hi->addr_mapped[1] + EEPROM_OFFSET;
- /* read EEPROM with largest known format size... */
- pmc_eeprom_read_buffer (addr, 0, (char *) hi->mfg_info.data, sizeof (FLD_TYPE2));
+ /* read EEPROM with largest known format size... */
+ pmc_eeprom_read_buffer(addr, 0, (char *)hi->mfg_info.data,
+ sizeof(FLD_TYPE2));
#if 0
- {
- unsigned char *ucp = (unsigned char *) &hi->mfg_info.data;
-
- pr_info("eeprom[00]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- *(ucp + 0), *(ucp + 1), *(ucp + 2), *(ucp + 3), *(ucp + 4), *(ucp + 5), *(ucp + 6), *(ucp + 7));
- pr_info("eeprom[08]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- *(ucp + 8), *(ucp + 9), *(ucp + 10), *(ucp + 11), *(ucp + 12), *(ucp + 13), *(ucp + 14), *(ucp + 15));
- pr_info("eeprom[16]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- *(ucp + 16), *(ucp + 17), *(ucp + 18), *(ucp + 19), *(ucp + 20), *(ucp + 21), *(ucp + 22), *(ucp + 23));
- pr_info("eeprom[24]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- *(ucp + 24), *(ucp + 25), *(ucp + 26), *(ucp + 27), *(ucp + 28), *(ucp + 29), *(ucp + 30), *(ucp + 31));
- pr_info("eeprom[32]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- *(ucp + 32), *(ucp + 33), *(ucp + 34), *(ucp + 35), *(ucp + 36), *(ucp + 37), *(ucp + 38), *(ucp + 39));
- pr_info("eeprom[40]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- *(ucp + 40), *(ucp + 41), *(ucp + 42), *(ucp + 43), *(ucp + 44), *(ucp + 45), *(ucp + 46), *(ucp + 47));
- }
+ {
+ unsigned char *ucp = (unsigned char *) &hi->mfg_info.data;
+
+ pr_info("eeprom[00]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 0), *(ucp + 1), *(ucp + 2), *(ucp + 3),
+ *(ucp + 4), *(ucp + 5), *(ucp + 6), *(ucp + 7));
+ pr_info("eeprom[08]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 8), *(ucp + 9), *(ucp + 10), *(ucp + 11),
+ *(ucp + 12), *(ucp + 13), *(ucp + 14), *(ucp + 15));
+ pr_info("eeprom[16]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 16), *(ucp + 17), *(ucp + 18), *(ucp + 19),
+ *(ucp + 20), *(ucp + 21), *(ucp + 22), *(ucp + 23));
+ pr_info("eeprom[24]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 24), *(ucp + 25), *(ucp + 26), *(ucp + 27),
+ *(ucp + 28), *(ucp + 29), *(ucp + 30), *(ucp + 31));
+ pr_info("eeprom[32]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 32), *(ucp + 33), *(ucp + 34), *(ucp + 35),
+ *(ucp + 36), *(ucp + 37), *(ucp + 38), *(ucp + 39));
+ pr_info("eeprom[40]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 40), *(ucp + 41), *(ucp + 42), *(ucp + 43),
+ *(ucp + 44), *(ucp + 45), *(ucp + 46), *(ucp + 47));
+ }
#endif
#if 0
- pr_info("sn: %x %x %x %x %x %x\n",
- hi->mfg_info.Serial[0],
- hi->mfg_info.Serial[1],
- hi->mfg_info.Serial[2],
- hi->mfg_info.Serial[3],
- hi->mfg_info.Serial[4],
- hi->mfg_info.Serial[5]);
+ pr_info("sn: %x %x %x %x %x %x\n",
+ hi->mfg_info.Serial[0],
+ hi->mfg_info.Serial[1],
+ hi->mfg_info.Serial[2],
+ hi->mfg_info.Serial[3],
+ hi->mfg_info.Serial[4],
+ hi->mfg_info.Serial[5]);
#endif
- if ((hi->promfmt = pmc_verify_cksum (&hi->mfg_info.data)) == PROM_FORMAT_Unk)
- {
- /* bad crc, data is suspect */
- if (cxt1e1_log_level >= LOG_WARN)
- pr_info("%s: EEPROM cksum error\n", hi->devname);
- hi->mfg_info_sts = EEPROM_CRCERR;
- } else
- hi->mfg_info_sts = EEPROM_OK;
+ hi->promfmt = pmc_verify_cksum(&hi->mfg_info.data);
+ if (hi->promfmt == PROM_FORMAT_Unk) {
+ /* bad crc, data is suspect */
+ if (cxt1e1_log_level >= LOG_WARN)
+ pr_info("%s: EEPROM cksum error\n", hi->devname);
+ hi->mfg_info_sts = EEPROM_CRCERR;
+ } else
+ hi->mfg_info_sts = EEPROM_OK;
}
-void __init
-prep_hdw_info (void)
+ void __init
+prep_hdw_info(void)
{
- hdw_info_t *hi;
- int i;
-
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- hi->pci_busno = 0xff;
- hi->pci_slot = 0xff;
- hi->pci_pin[0] = 0;
- hi->pci_pin[1] = 0;
- hi->ndev = NULL;
- hi->addr[0] = 0L;
- hi->addr[1] = 0L;
- hi->addr_mapped[0] = 0L;
- hi->addr_mapped[1] = 0L;
- }
+ hdw_info_t *hi;
+ int i;
+
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ hi->pci_busno = 0xff;
+ hi->pci_slot = 0xff;
+ hi->pci_pin[0] = 0;
+ hi->pci_pin[1] = 0;
+ hi->ndev = NULL;
+ hi->addr[0] = 0L;
+ hi->addr[1] = 0L;
+ hi->addr_mapped[0] = 0L;
+ hi->addr_mapped[1] = 0L;
+ }
}
void
-cleanup_ioremap (void)
+cleanup_ioremap(void)
{
- hdw_info_t *hi;
- int i;
-
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- if (hi->pci_slot == 0xff)
- break;
- if (hi->addr_mapped[0])
- {
- iounmap ((void *) (hi->addr_mapped[0]));
- release_mem_region ((long) hi->addr[0], hi->len[0]);
- hi->addr_mapped[0] = 0;
- }
- if (hi->addr_mapped[1])
- {
- iounmap ((void *) (hi->addr_mapped[1]));
- release_mem_region ((long) hi->addr[1], hi->len[1]);
- hi->addr_mapped[1] = 0;
- }
- }
+ hdw_info_t *hi;
+ int i;
+
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ if (hi->pci_slot == 0xff)
+ break;
+ if (hi->addr_mapped[0]) {
+ iounmap((void *)(hi->addr_mapped[0]));
+ release_mem_region((long) hi->addr[0], hi->len[0]);
+ hi->addr_mapped[0] = 0;
+ }
+ if (hi->addr_mapped[1]) {
+ iounmap((void *)(hi->addr_mapped[1]));
+ release_mem_region((long) hi->addr[1], hi->len[1]);
+ hi->addr_mapped[1] = 0;
+ }
+ }
}
void
-cleanup_devs (void)
+cleanup_devs(void)
{
- hdw_info_t *hi;
- int i;
-
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- if (hi->pci_slot == 0xff || !hi->ndev)
- break;
- c4_stopwd(netdev_priv(hi->ndev));
+ hdw_info_t *hi;
+ int i;
+
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ if (hi->pci_slot == 0xff || !hi->ndev)
+ break;
+ c4_stopwd(netdev_priv(hi->ndev));
#ifdef CONFIG_PROC_FS
- sbecom_proc_brd_cleanup(netdev_priv(hi->ndev));
+ sbecom_proc_brd_cleanup(netdev_priv(hi->ndev));
#endif
- unregister_netdev (hi->ndev);
- free_irq (hi->pdev[0]->irq, hi->ndev);
+ unregister_netdev(hi->ndev);
+ free_irq(hi->pdev[0]->irq, hi->ndev);
#ifdef CONFIG_SBE_PMCC4_NCOMM
- free_irq (hi->pdev[1]->irq, hi->ndev);
+ free_irq(hi->pdev[1]->irq, hi->ndev);
#endif
- OS_kfree (hi->ndev);
- }
+ OS_kfree(hi->ndev);
+ }
}
static int __init
-c4_hdw_init (struct pci_dev *pdev, int found)
+c4_hdw_init(struct pci_dev *pdev, int found)
{
- hdw_info_t *hi;
- int i;
- int fun, slot;
- unsigned char busno = 0xff;
-
- /* our MUSYCC chip supports two functions, 0 & 1 */
- if ((fun = PCI_FUNC (pdev->devfn)) > 1)
- {
- pr_warning("unexpected devfun: 0x%x\n", pdev->devfn);
- return 0;
- }
- if (pdev->bus) /* obtain bus number */
- busno = pdev->bus->number;
- else
- busno = 0; /* default for system PCI inconsistency */
- slot = pdev->devfn & ~0x07;
-
- /*
- * Functions 0 & 1 for a given board (identified by same bus(busno) and
- * slot(slot)) are placed into the same 'hardware' structure. The first
- * part of the board's functionality will be placed into an unpopulated
- * element, identified by "slot==(0xff)". The second part of a board's
- * functionality will match the previously loaded slot/busno.
- */
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- /*
- * match with board's first found interface, otherwise this is first
- * found
- */
- if ((hi->pci_slot == 0xff) || /* new board */
- ((hi->pci_slot == slot) && (hi->bus == pdev->bus)))
- break; /* found for-loop exit */
- }
- if (i == MAX_BOARDS) /* no match in above loop means MAX
- * exceeded */
- {
- pr_warning("exceeded number of allowed devices (>%d)?\n", MAX_BOARDS);
- return 0;
- }
- if (pdev->bus)
- hi->pci_busno = pdev->bus->number;
- else
- hi->pci_busno = 0; /* default for system PCI inconsistency */
- hi->pci_slot = slot;
- pci_read_config_byte (pdev, PCI_INTERRUPT_PIN, &hi->pci_pin[fun]);
- pci_read_config_byte (pdev, PCI_REVISION_ID, &hi->revid[fun]);
- hi->bus = pdev->bus;
- hi->addr[fun] = pci_resource_start (pdev, 0);
- hi->len[fun] = pci_resource_end (pdev, 0) - hi->addr[fun] + 1;
- hi->pdev[fun] = pdev;
-
- {
- /*
- * create device name from module name, plus add the appropriate
- * board number
- */
- char *cp = hi->devname;
-
- strcpy (cp, KBUILD_MODNAME);
- cp += strlen (cp); /* reposition */
- *cp++ = '-';
- *cp++ = '0' + (found / 2); /* there are two found interfaces per
- * board */
- *cp = 0; /* termination */
- }
-
- return 1;
+ hdw_info_t *hi;
+ int i;
+ int fun, slot;
+ unsigned char busno = 0xff;
+
+ /* our MUSYCC chip supports two functions, 0 & 1 */
+ fun = PCI_FUNC(pdev->devfn);
+ if (fun > 1) {
+ pr_warning("unexpected devfun: 0x%x\n", pdev->devfn);
+ return 0;
+ }
+
+ /* obtain bus number */
+ if (pdev->bus)
+ busno = pdev->bus->number;
+ else
+ busno = 0; /* default for system PCI inconsistency */
+ slot = pdev->devfn & ~0x07;
+
+ /*
+ * Functions 0 & 1 for a given board (identified by same bus(busno) and
+ * slot(slot)) are placed into the same 'hardware' structure. The first
+ * part of the board's functionality will be placed into an unpopulated
+ * element, identified by "slot==(0xff)". The second part of a board's
+ * functionality will match the previously loaded slot/busno.
+ */
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ /*
+ * match with board's first found interface, otherwise this is
+ * fisrt found
+ */
+ if ((hi->pci_slot == 0xff) || /* new board */
+ ((hi->pci_slot == slot) && (hi->bus == pdev->bus)))
+ break; /* found for-loop exit */
+ }
+
+ /* no match in above loop means MAX exceeded */
+ if (i == MAX_BOARDS) {
+ pr_warning("exceeded number of allowed devices (>%d)?\n",
+ MAX_BOARDS);
+ return 0;
+ }
+
+ if (pdev->bus)
+ hi->pci_busno = pdev->bus->number;
+ else
+ hi->pci_busno = 0; /* default for system PCI inconsistency */
+
+ hi->pci_slot = slot;
+ pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &hi->pci_pin[fun]);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hi->revid[fun]);
+ hi->bus = pdev->bus;
+ hi->addr[fun] = pci_resource_start(pdev, 0);
+ hi->len[fun] = pci_resource_end(pdev, 0) - hi->addr[fun] + 1;
+ hi->pdev[fun] = pdev;
+
+ {
+ /*
+ * create device name from module name, plus add the appropriate
+ * board number
+ */
+ char *cp = hi->devname;
+
+ strcpy(cp, KBUILD_MODNAME);
+ cp += strlen(cp); /* reposition */
+ *cp++ = '-';
+ *cp++ = '0' + (found / 2); /* there are two found interfaces per
+ * board */
+ *cp = 0; /* termination */
+ }
+
+ return 1;
}
-
-status_t __init
-c4hw_attach_all (void)
+status_t __init
+c4hw_attach_all(void)
{
- hdw_info_t *hi;
- struct pci_dev *pdev = NULL;
- int found = 0, i, j;
-
- error_flag = 0;
- prep_hdw_info ();
- /*** scan PCI bus for all possible boards */
- while ((pdev = pci_get_device (PCI_VENDOR_ID_CONEXANT,
- PCI_DEVICE_ID_CN8474,
- pdev)))
- {
- if (c4_hdw_init (pdev, found))
- found++;
- }
- if (!found)
- {
- pr_warning("No boards found\n");
- return -ENODEV;
- }
- /* sanity check for consistent hardware found */
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- if (hi->pci_slot != 0xff && (!hi->addr[0] || !hi->addr[1]))
- {
- pr_warning("%s: something very wrong with pci_get_device\n",
- hi->devname);
- return -EIO;
- }
- }
- /* bring board's memory regions on/line */
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- if (hi->pci_slot == 0xff)
- break;
- for (j = 0; j < 2; j++)
- {
- if (!request_mem_region (hi->addr[j], hi->len[j], hi->devname))
- {
- pr_warning("%s: memory in use, addr=0x%lx, len=0x%lx ?\n",
- hi->devname, hi->addr[j], hi->len[j]);
- cleanup_ioremap ();
- return -ENOMEM;
- }
- hi->addr_mapped[j] = (unsigned long) ioremap (hi->addr[j], hi->len[j]);
- if (!hi->addr_mapped[j])
- {
- pr_warning("%s: ioremap fails, addr=0x%lx, len=0x%lx ?\n",
- hi->devname, hi->addr[j], hi->len[j]);
- cleanup_ioremap ();
- return -ENOMEM;
- }
+ hdw_info_t *hi;
+ struct pci_dev *pdev = NULL;
+ int found = 0, i, j;
+
+ error_flag = 0;
+ prep_hdw_info();
+ /*** scan PCI bus for all possible boards */
+ while ((pdev = pci_get_device(PCI_VENDOR_ID_CONEXANT,
+ PCI_DEVICE_ID_CN8474,
+ pdev))) {
+ if (c4_hdw_init(pdev, found))
+ found++;
+ }
+
+ if (!found) {
+ pr_warning("No boards found\n");
+ return -ENODEV;
+ }
+
+ /* sanity check for consistent hardware found */
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ if (hi->pci_slot != 0xff && (!hi->addr[0] || !hi->addr[1])) {
+ pr_warning("%s: something very wrong with pci_get_device\n",
+ hi->devname);
+ return -EIO;
+ }
+ }
+ /* bring board's memory regions on/line */
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ if (hi->pci_slot == 0xff)
+ break;
+ for (j = 0; j < 2; j++) {
+ if (!request_mem_region(hi->addr[j], hi->len[j], hi->devname)) {
+ pr_warning("%s: memory in use, addr=0x%lx, len=0x%lx ?\n",
+ hi->devname, hi->addr[j], hi->len[j]);
+ cleanup_ioremap();
+ return -ENOMEM;
+ }
+
+ hi->addr_mapped[j] = (unsigned long)ioremap(hi->addr[j], hi->len[j]);
+ if (!hi->addr_mapped[j]) {
+ pr_warning("%s: ioremap fails, addr=0x%lx, len=0x%lx ?\n",
+ hi->devname, hi->addr[j], hi->len[j]);
+ cleanup_ioremap();
+ return -ENOMEM;
+ }
#ifdef SBE_MAP_DEBUG
- pr_warning("%s: io remapped from phys %x to virt %x\n",
- hi->devname, (u_int32_t) hi->addr[j], (u_int32_t) hi->addr_mapped[j]);
+ pr_warning("%s: io remapped from phys %x to virt %x\n",
+ hi->devname, (u_int32_t) hi->addr[j],
+ (u_int32_t) hi->addr_mapped[j]);
#endif
- }
- }
-
- drvr_state = SBE_DRVR_AVAILABLE;
-
- /* Have now memory mapped all boards. Now allow board's access to system */
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- if (hi->pci_slot == 0xff)
- break;
- if (pci_enable_device (hi->pdev[0]) ||
- pci_enable_device (hi->pdev[1]))
- {
- drvr_state = SBE_DRVR_DOWN;
- pr_warning("%s: failed to enable card %d slot %d\n",
- hi->devname, i, hi->pci_slot);
- cleanup_devs ();
- cleanup_ioremap ();
- return -EIO;
- }
- pci_set_master (hi->pdev[0]);
- pci_set_master (hi->pdev[1]);
- if (!(hi->ndev = c4_add_dev (hi, i, (long) hi->addr_mapped[0],
- (long) hi->addr_mapped[1],
- hi->pdev[0]->irq,
- hi->pdev[1]->irq)))
- {
- drvr_state = SBE_DRVR_DOWN;
- cleanup_ioremap ();
- /* NOTE: c4_add_dev() does its own device cleanup */
+ }
+ }
+
+ drvr_state = SBE_DRVR_AVAILABLE;
+
+ /* Have now memory mapped all boards. Now allow board's access to system */
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ if (hi->pci_slot == 0xff)
+ break;
+ if (pci_enable_device(hi->pdev[0]) ||
+ pci_enable_device(hi->pdev[1])) {
+ drvr_state = SBE_DRVR_DOWN;
+ pr_warning("%s: failed to enable card %d slot %d\n",
+ hi->devname, i, hi->pci_slot);
+ cleanup_devs();
+ cleanup_ioremap();
+ return -EIO;
+ }
+ pci_set_master(hi->pdev[0]);
+ pci_set_master(hi->pdev[1]);
+ hi->ndev = c4_add_dev(hi, i, (long) hi->addr_mapped[0],
+ (long) hi->addr_mapped[1],
+ hi->pdev[0]->irq,
+ hi->pdev[1]->irq);
+ if (!hi->ndev) {
+ drvr_state = SBE_DRVR_DOWN;
+ cleanup_ioremap();
+ /* NOTE: c4_add_dev() does its own device cleanup */
#if 0
- cleanup_devs ();
+ cleanup_devs();
#endif
- return error_flag; /* error_flag set w/in add_dev() */
- }
- show_two (hi, i); /* displays found information */
- }
- return 0;
+ return error_flag; /* error_flag set w/in add_dev() */
+ }
+ show_two(hi, i); /* displays found information */
+ }
+ return 0;
}
/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/libsbew.h b/drivers/staging/cxt1e1/libsbew.h
index 4254c0426db9..bd2bfba604b3 100644
--- a/drivers/staging/cxt1e1/libsbew.h
+++ b/drivers/staging/cxt1e1/libsbew.h
@@ -514,36 +514,36 @@ struct sbecom_port_param
};
typedef struct wancfg wcfg_t;
- extern wcfg_t *wancfg_init (char *, char *);
- extern int wancfg_card_blink (wcfg_t *, int);
- extern int wancfg_ctl (wcfg_t *, int, void *, int, void *, int);
- extern int wancfg_del_card_stats (wcfg_t *);
- extern int wancfg_del_chan_stats (wcfg_t *, int);
- extern int wancfg_enable_ports (wcfg_t *, int);
- extern int wancfg_free (wcfg_t *);
- extern int wancfg_get_brdaddr (wcfg_t *, struct sbe_brd_addr *);
- extern int wancfg_get_brdinfo (wcfg_t *, struct sbe_brd_info *);
- extern int wancfg_get_card (wcfg_t *, struct sbecom_card_param *);
- extern int wancfg_get_card_chan_stats (wcfg_t *, struct sbecom_chan_stats *);
- extern int wancfg_get_card_sn (wcfg_t *);
- extern int wancfg_get_card_stats (wcfg_t *, struct temux_card_stats *);
- extern int wancfg_get_chan (wcfg_t *, int, struct sbecom_chan_param *);
- extern int wancfg_get_chan_stats (wcfg_t *, int, struct sbecom_chan_stats *);
- extern int wancfg_get_drvinfo (wcfg_t *, int, struct sbe_drv_info *);
- extern int wancfg_get_framer (wcfg_t *, int, struct sbecom_framer_param *);
- extern int wancfg_get_iid (wcfg_t *, int, struct sbe_iid_info *);
- extern int wancfg_get_sn (wcfg_t *, unsigned int *);
- extern int wancfg_read (wcfg_t *, int, struct sbecom_wrt_vec *);
- extern int wancfg_reset_device (wcfg_t *, int);
- extern int wancfg_set_card (wcfg_t *, struct sbecom_card_param *);
- extern int wancfg_set_chan (wcfg_t *, int, struct sbecom_chan_param *);
- extern int wancfg_set_framer (wcfg_t *, int, struct sbecom_framer_param *);
- extern int wancfg_set_loglevel (wcfg_t *, uint);
- extern int wancfg_write (wcfg_t *, int, struct sbecom_wrt_vec *);
+ extern wcfg_t *wancfg_init(char *, char *);
+ extern int wancfg_card_blink(wcfg_t *, int);
+ extern int wancfg_ctl(wcfg_t *, int, void *, int, void *, int);
+ extern int wancfg_del_card_stats(wcfg_t *);
+ extern int wancfg_del_chan_stats(wcfg_t *, int);
+ extern int wancfg_enable_ports(wcfg_t *, int);
+ extern int wancfg_free(wcfg_t *);
+ extern int wancfg_get_brdaddr(wcfg_t *, struct sbe_brd_addr *);
+ extern int wancfg_get_brdinfo(wcfg_t *, struct sbe_brd_info *);
+ extern int wancfg_get_card(wcfg_t *, struct sbecom_card_param *);
+ extern int wancfg_get_card_chan_stats(wcfg_t *, struct sbecom_chan_stats *);
+ extern int wancfg_get_card_sn(wcfg_t *);
+ extern int wancfg_get_card_stats(wcfg_t *, struct temux_card_stats *);
+ extern int wancfg_get_chan(wcfg_t *, int, struct sbecom_chan_param *);
+ extern int wancfg_get_chan_stats(wcfg_t *, int, struct sbecom_chan_stats *);
+ extern int wancfg_get_drvinfo(wcfg_t *, int, struct sbe_drv_info *);
+ extern int wancfg_get_framer(wcfg_t *, int, struct sbecom_framer_param *);
+ extern int wancfg_get_iid(wcfg_t *, int, struct sbe_iid_info *);
+ extern int wancfg_get_sn(wcfg_t *, unsigned int *);
+ extern int wancfg_read(wcfg_t *, int, struct sbecom_wrt_vec *);
+ extern int wancfg_reset_device(wcfg_t *, int);
+ extern int wancfg_set_card(wcfg_t *, struct sbecom_card_param *);
+ extern int wancfg_set_chan(wcfg_t *, int, struct sbecom_chan_param *);
+ extern int wancfg_set_framer(wcfg_t *, int, struct sbecom_framer_param *);
+ extern int wancfg_set_loglevel(wcfg_t *, uint);
+ extern int wancfg_write(wcfg_t *, int, struct sbecom_wrt_vec *);
#ifdef NOT_YET_COMMON
- extern int wancfg_get_tsioc (wcfg_t *, struct wanc1t3_ts_hdr *, struct wanc1t3_ts_param *);
- extern int wancfg_set_tsioc (wcfg_t *, struct wanc1t3_ts_param *);
+ extern int wancfg_get_tsioc(wcfg_t *, struct wanc1t3_ts_hdr *, struct wanc1t3_ts_param *);
+ extern int wancfg_set_tsioc(wcfg_t *, struct wanc1t3_ts_param *);
#endif
#endif /*** _INC_LIBSBEW_H_ ***/
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 79206cb3fb94..b02f5ade6661 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -31,7 +31,7 @@
#include "pmcc4_private.h"
#include "sbeproc.h"
-/*****************************************************************************************
+/*******************************************************************************
* Error out early if we have compiler trouble.
*
* (This section is included from the kernel's init/main.c as a friendly
@@ -50,43 +50,42 @@
#warning gcc-4.1.0 is known to miscompile the kernel. A different compiler version is recommended.
#endif
-/*****************************************************************************************/
+/*******************************************************************************/
#define CHANNAME "hdlc"
/*******************************************************************/
/* forward references */
-status_t c4_chan_work_init (mpi_t *, mch_t *);
-void musycc_wq_chan_restart (void *);
-status_t __init c4_init (ci_t *, u_char *, u_char *);
-status_t __init c4_init2 (ci_t *);
-ci_t *__init c4_new (void *);
-int __init c4hw_attach_all (void);
-void __init hdw_sn_get (hdw_info_t *, int);
+status_t c4_chan_work_init(mpi_t *, mch_t *);
+void musycc_wq_chan_restart(void *);
+status_t __init c4_init(ci_t *, u_char *, u_char *);
+status_t __init c4_init2(ci_t *);
+ci_t *__init c4_new(void *);
+int __init c4hw_attach_all(void);
+void __init hdw_sn_get(hdw_info_t *, int);
#ifdef CONFIG_SBE_PMCC4_NCOMM
-irqreturn_t c4_ebus_intr_th_handler (void *);
+irqreturn_t c4_ebus_intr_th_handler(void *);
#endif
-int c4_frame_rw (ci_t *, struct sbecom_port_param *);
-status_t c4_get_port (ci_t *, int);
-int c4_loop_port (ci_t *, int, u_int8_t);
-int c4_musycc_rw (ci_t *, struct c4_musycc_param *);
-int c4_new_chan (ci_t *, int, int, void *);
-status_t c4_set_port (ci_t *, int);
-int c4_pld_rw (ci_t *, struct sbecom_port_param *);
-void cleanup_devs (void);
-void cleanup_ioremap (void);
-status_t musycc_chan_down (ci_t *, int);
-irqreturn_t musycc_intr_th_handler (void *);
-int musycc_start_xmit (ci_t *, int, void *);
-
-extern char pmcc4_OSSI_release[];
+int c4_frame_rw(ci_t *, struct sbecom_port_param *);
+status_t c4_get_port(ci_t *, int);
+int c4_loop_port(ci_t *, int, u_int8_t);
+int c4_musycc_rw(ci_t *, struct c4_musycc_param *);
+int c4_new_chan(ci_t *, int, int, void *);
+status_t c4_set_port(ci_t *, int);
+int c4_pld_rw(ci_t *, struct sbecom_port_param *);
+void cleanup_devs(void);
+void cleanup_ioremap(void);
+status_t musycc_chan_down(ci_t *, int);
+irqreturn_t musycc_intr_th_handler(void *);
+int musycc_start_xmit(ci_t *, int, void *);
+
extern ci_t *CI;
extern struct s_hdw_info hdw_info[];
#if defined(CONFIG_SBE_HDLC_V7) || defined(CONFIG_SBE_WAN256T3_HDLC_V7) || \
- defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
+ defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
#define _v7_hdlc_ 1
#else
#define _v7_hdlc_ 0
@@ -94,9 +93,9 @@ extern struct s_hdw_info hdw_info[];
#if _v7_hdlc_
#define V7(x) (x ## _v7)
-extern int hdlc_netif_rx_v7 (hdlc_device *, struct sk_buff *);
-extern int register_hdlc_device_v7 (hdlc_device *);
-extern int unregister_hdlc_device_v7 (hdlc_device *);
+extern int hdlc_netif_rx_v7(hdlc_device *, struct sk_buff *);
+extern int register_hdlc_device_v7(hdlc_device *);
+extern int unregister_hdlc_device_v7(hdlc_device *);
#else
#define V7(x) x
@@ -104,11 +103,11 @@ extern int unregister_hdlc_device_v7 (hdlc_device *);
int error_flag; /* module load error reporting */
int cxt1e1_log_level = LOG_ERROR;
-int log_level_default = LOG_ERROR;
+static int log_level_default = LOG_ERROR;
module_param(cxt1e1_log_level, int, 0444);
int cxt1e1_max_mru = MUSYCC_MRU;
-int max_mru_default = MUSYCC_MRU;
+static int max_mru_default = MUSYCC_MRU;
module_param(cxt1e1_max_mru, int, 0444);
int cxt1e1_max_mtu = MUSYCC_MTU;
@@ -127,33 +126,23 @@ module_param(max_rxdesc_used, int, 0444);
/****************************************************************************/
/****************************************************************************/
-void *
-getuserbychan (int channum)
+void *
+getuserbychan(int channum)
{
- mch_t *ch;
+ mch_t *ch;
- ch = c4_find_chan (channum);
- return ch ? ch->user : NULL;
+ ch = c4_find_chan(channum);
+ return ch ? ch->user : NULL;
}
-char *
-get_hdlc_name (hdlc_device *hdlc)
+char *
+get_hdlc_name(hdlc_device *hdlc)
{
- struct c4_priv *priv = hdlc->priv;
- struct net_device *dev = getuserbychan (priv->channum);
-
- return dev->name;
-}
+ struct c4_priv *priv = hdlc->priv;
+ struct net_device *dev = getuserbychan(priv->channum);
-
-static status_t
-mkret (int bsd)
-{
- if (bsd > 0)
- return -bsd;
- else
- return bsd;
+ return dev->name;
}
/***************************************************************************/
@@ -179,958 +168,946 @@ mkret (int bsd)
* within a port's group.
*/
void
-c4_wk_chan_restart (mch_t *ch)
+c4_wk_chan_restart(mch_t *ch)
{
- mpi_t *pi = ch->up;
+ mpi_t *pi = ch->up;
#ifdef RLD_RESTART_DEBUG
- pr_info(">> %s: queueing Port %d Chan %d, mch_t @ %p\n",
- __func__, pi->portnum, ch->channum, ch);
+ pr_info(">> %s: queueing Port %d Chan %d, mch_t @ %p\n",
+ __func__, pi->portnum, ch->channum, ch);
#endif
- /* create new entry w/in workqueue for this channel and let'er rip */
+ /* create new entry w/in workqueue for this channel and let'er rip */
- /** queue_work (struct workqueue_struct *queue,
- ** struct work_struct *work);
- **/
- queue_work (pi->wq_port, &ch->ch_work);
+ /** queue_work(struct workqueue_struct *queue,
+ ** struct work_struct *work);
+ **/
+ queue_work(pi->wq_port, &ch->ch_work);
}
status_t
-c4_wk_chan_init (mpi_t *pi, mch_t *ch)
+c4_wk_chan_init(mpi_t *pi, mch_t *ch)
{
- /*
- * this will be used to restart a stopped channel
- */
-
- /** INIT_WORK (struct work_struct *work,
- ** void (*function)(void *),
- ** void *data);
- **/
- INIT_WORK(&ch->ch_work, (void *)musycc_wq_chan_restart);
- return 0; /* success */
+ /*
+ * this will be used to restart a stopped channel
+ */
+
+ /** INIT_WORK(struct work_struct *work,
+ ** void (*function)(void *),
+ ** void *data);
+ **/
+ INIT_WORK(&ch->ch_work, (void *)musycc_wq_chan_restart);
+ return 0; /* success */
}
status_t
-c4_wq_port_init (mpi_t *pi)
+c4_wq_port_init(mpi_t *pi)
{
- char name[16], *np; /* NOTE: name of the queue limited by system
- * to 10 characters */
+ char name[16]; /* NOTE: name of the queue limited by system
+ * to 10 characters */
+ if (pi->wq_port)
+ return 0; /* already initialized */
- if (pi->wq_port)
- return 0; /* already initialized */
-
- np = name;
- memset (name, 0, 16);
- sprintf (np, "%s%d", pi->up->devname, pi->portnum); /* IE pmcc4-01) */
+ /* IE pmcc4-01 */
+ snprintf(name, sizeof(name), "%s%d", pi->up->devname, pi->portnum);
#ifdef RLD_RESTART_DEBUG
- pr_info(">> %s: creating workqueue <%s> for Port %d.\n",
- __func__, name, pi->portnum); /* RLD DEBUG */
+ pr_info(">> %s: creating workqueue <%s> for Port %d.\n",
+ __func__, name, pi->portnum); /* RLD DEBUG */
#endif
- if (!(pi->wq_port = create_singlethread_workqueue (name)))
- return -ENOMEM;
- return 0; /* success */
+ pi->wq_port = create_singlethread_workqueue(name);
+ if (!pi->wq_port)
+ return -ENOMEM;
+ return 0; /* success */
}
void
-c4_wq_port_cleanup (mpi_t *pi)
+c4_wq_port_cleanup(mpi_t *pi)
{
- /*
- * PORT POINT: cannot call this if WQ is statically allocated w/in
- * structure since it calls kfree(wq);
- */
- if (pi->wq_port)
- {
- destroy_workqueue (pi->wq_port); /* this also calls
- * flush_workqueue() */
- pi->wq_port = NULL;
- }
+ /*
+ * PORT POINT: cannot call this if WQ is statically allocated w/in
+ * structure since it calls kfree(wq);
+ */
+ if (pi->wq_port) {
+ destroy_workqueue(pi->wq_port); /* this also calls
+ * flush_workqueue() */
+ pi->wq_port = NULL;
+ }
}
/***************************************************************************/
-irqreturn_t
-c4_linux_interrupt (int irq, void *dev_instance)
+static irqreturn_t
+c4_linux_interrupt(int irq, void *dev_instance)
{
- struct net_device *ndev = dev_instance;
+ struct net_device *ndev = dev_instance;
- return musycc_intr_th_handler(netdev_priv(ndev));
+ return musycc_intr_th_handler(netdev_priv(ndev));
}
#ifdef CONFIG_SBE_PMCC4_NCOMM
-irqreturn_t
-c4_ebus_interrupt (int irq, void *dev_instance)
+static irqreturn_t
+c4_ebus_interrupt(int irq, void *dev_instance)
{
- struct net_device *ndev = dev_instance;
+ struct net_device *ndev = dev_instance;
- return c4_ebus_intr_th_handler(netdev_priv(ndev));
+ return c4_ebus_intr_th_handler(netdev_priv(ndev));
}
#endif
static int
-void_open (struct net_device *ndev)
+void_open(struct net_device *ndev)
{
- pr_info("%s: trying to open master device !\n", ndev->name);
- return -1;
+ pr_info("%s: trying to open master device !\n", ndev->name);
+ return -1;
}
static int
-chan_open (struct net_device *ndev)
+chan_open(struct net_device *ndev)
{
- hdlc_device *hdlc = dev_to_hdlc (ndev);
- const struct c4_priv *priv = hdlc->priv;
- int ret;
-
- if ((ret = hdlc_open (ndev)))
- {
- pr_info("hdlc_open failure, err %d.\n", ret);
- return ret;
- }
- if ((ret = c4_chan_up (priv->ci, priv->channum)))
- return -ret;
- try_module_get (THIS_MODULE);
- netif_start_queue (ndev);
- return 0; /* no error = success */
+ hdlc_device *hdlc = dev_to_hdlc(ndev);
+ const struct c4_priv *priv = hdlc->priv;
+ int ret;
+
+ ret = hdlc_open(ndev);
+ if (ret) {
+ pr_info("hdlc_open failure, err %d.\n", ret);
+ return ret;
+ }
+
+ ret = c4_chan_up(priv->ci, priv->channum);
+ if (ret < 0)
+ return ret;
+ try_module_get(THIS_MODULE);
+ netif_start_queue(ndev);
+ return 0; /* no error = success */
}
static int
-chan_close (struct net_device *ndev)
+chan_close(struct net_device *ndev)
{
- hdlc_device *hdlc = dev_to_hdlc (ndev);
- const struct c4_priv *priv = hdlc->priv;
-
- netif_stop_queue (ndev);
- musycc_chan_down ((ci_t *) 0, priv->channum);
- hdlc_close (ndev);
- module_put (THIS_MODULE);
- return 0;
+ hdlc_device *hdlc = dev_to_hdlc(ndev);
+ const struct c4_priv *priv = hdlc->priv;
+
+ netif_stop_queue(ndev);
+ musycc_chan_down((ci_t *) 0, priv->channum);
+ hdlc_close(ndev);
+ module_put(THIS_MODULE);
+ return 0;
}
static int
-chan_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
+chan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- return hdlc_ioctl (dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifr, cmd);
}
static int
-chan_attach_noop (struct net_device *ndev, unsigned short foo_1, unsigned short foo_2)
+chan_attach_noop(struct net_device *ndev, unsigned short foo_1,
+ unsigned short foo_2)
{
- return 0; /* our driver has nothing to do here, show's
- * over, go home */
+ /* our driver has nothing to do here, show's
+ * over, go home
+ */
+ return 0;
}
static struct net_device_stats *
-chan_get_stats (struct net_device *ndev)
+chan_get_stats(struct net_device *ndev)
{
- mch_t *ch;
- struct net_device_stats *nstats;
- struct sbecom_chan_stats *stats;
- int channum;
-
- {
- struct c4_priv *priv;
-
- priv = (struct c4_priv *) dev_to_hdlc (ndev)->priv;
- channum = priv->channum;
- }
-
- ch = c4_find_chan (channum);
- if (ch == NULL)
- return NULL;
-
- nstats = &ndev->stats;
- stats = &ch->s;
-
- memset (nstats, 0, sizeof (struct net_device_stats));
- nstats->rx_packets = stats->rx_packets;
- nstats->tx_packets = stats->tx_packets;
- nstats->rx_bytes = stats->rx_bytes;
- nstats->tx_bytes = stats->tx_bytes;
- nstats->rx_errors = stats->rx_length_errors +
- stats->rx_over_errors +
- stats->rx_crc_errors +
- stats->rx_frame_errors +
- stats->rx_fifo_errors +
- stats->rx_missed_errors;
- nstats->tx_errors = stats->tx_dropped +
- stats->tx_aborted_errors +
- stats->tx_fifo_errors;
- nstats->rx_dropped = stats->rx_dropped;
- nstats->tx_dropped = stats->tx_dropped;
-
- nstats->rx_length_errors = stats->rx_length_errors;
- nstats->rx_over_errors = stats->rx_over_errors;
- nstats->rx_crc_errors = stats->rx_crc_errors;
- nstats->rx_frame_errors = stats->rx_frame_errors;
- nstats->rx_fifo_errors = stats->rx_fifo_errors;
- nstats->rx_missed_errors = stats->rx_missed_errors;
-
- nstats->tx_aborted_errors = stats->tx_aborted_errors;
- nstats->tx_fifo_errors = stats->tx_fifo_errors;
-
- return nstats;
+ mch_t *ch;
+ struct net_device_stats *nstats;
+ struct sbecom_chan_stats *stats;
+ int channum;
+
+ {
+ struct c4_priv *priv;
+
+ priv = (struct c4_priv *)dev_to_hdlc(ndev)->priv;
+ channum = priv->channum;
+ }
+
+ ch = c4_find_chan(channum);
+ if (ch == NULL)
+ return NULL;
+
+ nstats = &ndev->stats;
+ stats = &ch->s;
+
+ memset(nstats, 0, sizeof(struct net_device_stats));
+ nstats->rx_packets = stats->rx_packets;
+ nstats->tx_packets = stats->tx_packets;
+ nstats->rx_bytes = stats->rx_bytes;
+ nstats->tx_bytes = stats->tx_bytes;
+ nstats->rx_errors = stats->rx_length_errors +
+ stats->rx_over_errors +
+ stats->rx_crc_errors +
+ stats->rx_frame_errors +
+ stats->rx_fifo_errors +
+ stats->rx_missed_errors;
+ nstats->tx_errors = stats->tx_dropped +
+ stats->tx_aborted_errors +
+ stats->tx_fifo_errors;
+ nstats->rx_dropped = stats->rx_dropped;
+ nstats->tx_dropped = stats->tx_dropped;
+
+ nstats->rx_length_errors = stats->rx_length_errors;
+ nstats->rx_over_errors = stats->rx_over_errors;
+ nstats->rx_crc_errors = stats->rx_crc_errors;
+ nstats->rx_frame_errors = stats->rx_frame_errors;
+ nstats->rx_fifo_errors = stats->rx_fifo_errors;
+ nstats->rx_missed_errors = stats->rx_missed_errors;
+
+ nstats->tx_aborted_errors = stats->tx_aborted_errors;
+ nstats->tx_fifo_errors = stats->tx_fifo_errors;
+
+ return nstats;
}
static ci_t *
-get_ci_by_dev (struct net_device *ndev)
+get_ci_by_dev(struct net_device *ndev)
{
- return (ci_t *)(netdev_priv(ndev));
+ return (ci_t *)(netdev_priv(ndev));
}
static int
-c4_linux_xmit (struct sk_buff *skb, struct net_device *ndev)
+c4_linux_xmit(struct sk_buff *skb, struct net_device *ndev)
{
- const struct c4_priv *priv;
- int rval;
+ const struct c4_priv *priv;
+ int rval;
- hdlc_device *hdlc = dev_to_hdlc (ndev);
+ hdlc_device *hdlc = dev_to_hdlc(ndev);
- priv = hdlc->priv;
+ priv = hdlc->priv;
- rval = musycc_start_xmit (priv->ci, priv->channum, skb);
- return rval;
+ rval = musycc_start_xmit(priv->ci, priv->channum, skb);
+ return rval;
}
static const struct net_device_ops chan_ops = {
- .ndo_open = chan_open,
- .ndo_stop = chan_close,
- .ndo_start_xmit = c4_linux_xmit,
- .ndo_do_ioctl = chan_dev_ioctl,
- .ndo_get_stats = chan_get_stats,
+ .ndo_open = chan_open,
+ .ndo_stop = chan_close,
+ .ndo_start_xmit = c4_linux_xmit,
+ .ndo_do_ioctl = chan_dev_ioctl,
+ .ndo_get_stats = chan_get_stats,
};
static struct net_device *
-create_chan (struct net_device *ndev, ci_t *ci,
- struct sbecom_chan_param *cp)
+create_chan(struct net_device *ndev, ci_t *ci,
+ struct sbecom_chan_param *cp)
{
- hdlc_device *hdlc;
- struct net_device *dev;
- hdw_info_t *hi;
- int ret;
-
- if (c4_find_chan (cp->channum))
- return NULL; /* channel already exists */
-
- {
- struct c4_priv *priv;
-
- /* allocate then fill in private data structure */
- priv = OS_kmalloc (sizeof (struct c4_priv));
- if (!priv)
- {
- pr_warning("%s: no memory for net_device !\n", ci->devname);
- return NULL;
- }
- dev = alloc_hdlcdev (priv);
- if (!dev)
- {
- pr_warning("%s: no memory for hdlc_device !\n", ci->devname);
- OS_kfree (priv);
- return NULL;
- }
- priv->ci = ci;
- priv->channum = cp->channum;
- }
-
- hdlc = dev_to_hdlc (dev);
-
- dev->base_addr = 0; /* not I/O mapped */
- dev->irq = ndev->irq;
- dev->type = ARPHRD_RAWHDLC;
- *dev->name = 0; /* default ifconfig name = "hdlc" */
-
- hi = (hdw_info_t *) ci->hdw_info;
- if (hi->mfg_info_sts == EEPROM_OK)
- {
- switch (hi->promfmt)
- {
- case PROM_FORMAT_TYPE1:
- memcpy (dev->dev_addr, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
- break;
- case PROM_FORMAT_TYPE2:
- memcpy (dev->dev_addr, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
- break;
- default:
- memset (dev->dev_addr, 0, 6);
- break;
- }
- } else
- {
- memset (dev->dev_addr, 0, 6);
- }
-
- hdlc->xmit = c4_linux_xmit;
-
- dev->netdev_ops = &chan_ops;
- /*
- * The native hdlc stack calls this 'attach' routine during
- * hdlc_raw_ioctl(), passing parameters for line encoding and parity.
- * Since hdlc_raw_ioctl() stack does not interrogate whether an 'attach'
- * routine is actually registered or not, we supply a dummy routine which
- * does nothing (since encoding and parity are setup for our driver via a
- * special configuration application).
- */
-
- hdlc->attach = chan_attach_noop;
-
- rtnl_unlock (); /* needed due to Ioctl calling sequence */
- ret = register_hdlc_device (dev);
- /* NOTE: <stats> setting must occur AFTER registration in order to "take" */
- dev->tx_queue_len = MAX_DEFAULT_IFQLEN;
-
- rtnl_lock (); /* needed due to Ioctl calling sequence */
- if (ret)
- {
- if (cxt1e1_log_level >= LOG_WARN)
- pr_info("%s: create_chan[%d] registration error = %d.\n",
- ci->devname, cp->channum, ret);
- free_netdev (dev); /* cleanup */
- return NULL; /* failed to register */
- }
- return dev;
+ hdlc_device *hdlc;
+ struct net_device *dev;
+ hdw_info_t *hi;
+ int ret;
+
+ if (c4_find_chan(cp->channum))
+ return NULL; /* channel already exists */
+
+ {
+ struct c4_priv *priv;
+
+ /* allocate then fill in private data structure */
+ priv = OS_kmalloc(sizeof(struct c4_priv));
+ if (!priv) {
+ pr_warning("%s: no memory for net_device !\n",
+ ci->devname);
+ return NULL;
+ }
+ dev = alloc_hdlcdev(priv);
+ if (!dev) {
+ pr_warning("%s: no memory for hdlc_device !\n",
+ ci->devname);
+ OS_kfree(priv);
+ return NULL;
+ }
+ priv->ci = ci;
+ priv->channum = cp->channum;
+ }
+
+ hdlc = dev_to_hdlc(dev);
+
+ dev->base_addr = 0; /* not I/O mapped */
+ dev->irq = ndev->irq;
+ dev->type = ARPHRD_RAWHDLC;
+ *dev->name = 0; /* default ifconfig name = "hdlc" */
+
+ hi = (hdw_info_t *)ci->hdw_info;
+ if (hi->mfg_info_sts == EEPROM_OK) {
+ switch (hi->promfmt) {
+ case PROM_FORMAT_TYPE1:
+ memcpy(dev->dev_addr,
+ (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
+ break;
+ case PROM_FORMAT_TYPE2:
+ memcpy(dev->dev_addr,
+ (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
+ break;
+ default:
+ memset(dev->dev_addr, 0, 6);
+ break;
+ }
+ } else
+ memset(dev->dev_addr, 0, 6);
+
+ hdlc->xmit = c4_linux_xmit;
+
+ dev->netdev_ops = &chan_ops;
+ /*
+ * The native hdlc stack calls this 'attach' routine during
+ * hdlc_raw_ioctl(), passing parameters for line encoding and parity.
+ * Since hdlc_raw_ioctl() stack does not interrogate whether an 'attach'
+ * routine is actually registered or not, we supply a dummy routine which
+ * does nothing (since encoding and parity are setup for our driver via a
+ * special configuration application).
+ */
+
+ hdlc->attach = chan_attach_noop;
+
+ /* needed due to Ioctl calling sequence */
+ rtnl_unlock();
+ ret = register_hdlc_device(dev);
+ /* NOTE: <stats> setting must occur AFTER registration in order to "take" */
+ dev->tx_queue_len = MAX_DEFAULT_IFQLEN;
+
+ /* needed due to Ioctl calling sequence */
+ rtnl_lock();
+ if (ret) {
+ if (cxt1e1_log_level >= LOG_WARN)
+ pr_info("%s: create_chan[%d] registration error = %d.\n",
+ ci->devname, cp->channum, ret);
+ /* cleanup */
+ free_netdev(dev);
+ /* failed to register */
+ return NULL;
+ }
+ return dev;
}
/* the idea here is to get port information and pass it back (using pointer) */
-static status_t
-do_get_port (struct net_device *ndev, void *data)
+static status_t
+do_get_port(struct net_device *ndev, void *data)
{
- int ret;
- ci_t *ci; /* ci stands for card information */
- struct sbecom_port_param pp;/* copy data to kernel land */
-
- if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- if (pp.portnum >= MUSYCC_NPORTS)
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL; /* get card info */
-
- ret = mkret (c4_get_port (ci, pp.portnum));
- if (ret)
- return ret;
- if (copy_to_user (data, &ci->port[pp.portnum].p,
- sizeof (struct sbecom_port_param)))
- return -EFAULT;
- return 0;
+ int ret;
+ ci_t *ci; /* ci stands for card information */
+ struct sbecom_port_param pp;/* copy data to kernel land */
+
+ if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ if (pp.portnum >= MUSYCC_NPORTS)
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL; /* get card info */
+
+ ret = c4_get_port(ci, pp.portnum);
+ if (ret < 0)
+ return ret;
+ if (copy_to_user(data, &ci->port[pp.portnum].p,
+ sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ return 0;
}
/* this function copys the user data and then calls the real action function */
-static status_t
-do_set_port (struct net_device *ndev, void *data)
+static status_t
+do_set_port(struct net_device *ndev, void *data)
{
- ci_t *ci; /* ci stands for card information */
- struct sbecom_port_param pp;/* copy data to kernel land */
-
- if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- if (pp.portnum >= MUSYCC_NPORTS)
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL; /* get card info */
-
- if (pp.portnum >= ci->max_port) /* sanity check */
- return -ENXIO;
-
- memcpy (&ci->port[pp.portnum].p, &pp, sizeof (struct sbecom_port_param));
- return mkret (c4_set_port (ci, pp.portnum));
+ ci_t *ci; /* ci stands for card information */
+ struct sbecom_port_param pp;/* copy data to kernel land */
+
+ if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ if (pp.portnum >= MUSYCC_NPORTS)
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL; /* get card info */
+
+ if (pp.portnum >= ci->max_port) /* sanity check */
+ return -ENXIO;
+
+ memcpy(&ci->port[pp.portnum].p, &pp, sizeof(struct sbecom_port_param));
+ return c4_set_port(ci, pp.portnum);
}
/* work the port loopback mode as per directed */
-static status_t
-do_port_loop (struct net_device *ndev, void *data)
+static status_t
+do_port_loop(struct net_device *ndev, void *data)
{
- struct sbecom_port_param pp;
- ci_t *ci;
-
- if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL;
- return mkret (c4_loop_port (ci, pp.portnum, pp.port_mode));
+ struct sbecom_port_param pp;
+ ci_t *ci;
+
+ if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+ return c4_loop_port(ci, pp.portnum, pp.port_mode);
}
/* set the specified register with the given value / or just read it */
-static status_t
-do_framer_rw (struct net_device *ndev, void *data)
+static status_t
+do_framer_rw(struct net_device *ndev, void *data)
{
- struct sbecom_port_param pp;
- ci_t *ci;
- int ret;
-
- if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL;
- ret = mkret (c4_frame_rw (ci, &pp));
- if (ret)
- return ret;
- if (copy_to_user (data, &pp, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- return 0;
+ struct sbecom_port_param pp;
+ ci_t *ci;
+ int ret;
+
+ if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+ ret = c4_frame_rw(ci, &pp);
+ if (ret < 0)
+ return ret;
+ if (copy_to_user(data, &pp, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ return 0;
}
/* set the specified register with the given value / or just read it */
-static status_t
-do_pld_rw (struct net_device *ndev, void *data)
+static status_t
+do_pld_rw(struct net_device *ndev, void *data)
{
- struct sbecom_port_param pp;
- ci_t *ci;
- int ret;
-
- if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL;
- ret = mkret (c4_pld_rw (ci, &pp));
- if (ret)
- return ret;
- if (copy_to_user (data, &pp, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- return 0;
+ struct sbecom_port_param pp;
+ ci_t *ci;
+ int ret;
+
+ if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+
+ ret = c4_pld_rw(ci, &pp);
+ if (ret)
+ return ret;
+ if (copy_to_user(data, &pp, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ return 0;
}
/* set the specified register with the given value / or just read it */
-static status_t
-do_musycc_rw (struct net_device *ndev, void *data)
+static status_t
+do_musycc_rw(struct net_device *ndev, void *data)
{
- struct c4_musycc_param mp;
- ci_t *ci;
- int ret;
-
- if (copy_from_user (&mp, data, sizeof (struct c4_musycc_param)))
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL;
- ret = mkret (c4_musycc_rw (ci, &mp));
- if (ret)
- return ret;
- if (copy_to_user (data, &mp, sizeof (struct c4_musycc_param)))
- return -EFAULT;
- return 0;
+ struct c4_musycc_param mp;
+ ci_t *ci;
+ int ret;
+
+ if (copy_from_user(&mp, data, sizeof(struct c4_musycc_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+ ret = c4_musycc_rw(ci, &mp);
+ if (ret < 0)
+ return ret;
+ if (copy_to_user(data, &mp, sizeof(struct c4_musycc_param)))
+ return -EFAULT;
+ return 0;
}
-static status_t
-do_get_chan (struct net_device *ndev, void *data)
+static status_t
+do_get_chan(struct net_device *ndev, void *data)
{
- struct sbecom_chan_param cp;
- int ret;
+ struct sbecom_chan_param cp;
+ int ret;
- if (copy_from_user (&cp, data,
- sizeof (struct sbecom_chan_param)))
- return -EFAULT;
+ if (copy_from_user(&cp, data,
+ sizeof(struct sbecom_chan_param)))
+ return -EFAULT;
- if ((ret = mkret (c4_get_chan (cp.channum, &cp))))
- return ret;
+ ret = c4_get_chan(cp.channum, &cp);
+ if (ret < 0)
+ return ret;
- if (copy_to_user (data, &cp, sizeof (struct sbecom_chan_param)))
- return -EFAULT;
- return 0;
+ if (copy_to_user(data, &cp, sizeof(struct sbecom_chan_param)))
+ return -EFAULT;
+ return 0;
}
-static status_t
-do_set_chan (struct net_device *ndev, void *data)
+static status_t
+do_set_chan(struct net_device *ndev, void *data)
{
- struct sbecom_chan_param cp;
- int ret;
- ci_t *ci;
-
- if (copy_from_user (&cp, data, sizeof (struct sbecom_chan_param)))
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL;
- switch (ret = mkret (c4_set_chan (cp.channum, &cp)))
- {
- case 0:
- return 0;
- default:
- return ret;
- }
+ struct sbecom_chan_param cp;
+ ci_t *ci;
+
+ if (copy_from_user(&cp, data, sizeof(struct sbecom_chan_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+ return c4_set_chan(cp.channum, &cp);
}
-static status_t
-do_create_chan (struct net_device *ndev, void *data)
+static status_t
+do_create_chan(struct net_device *ndev, void *data)
{
- ci_t *ci;
- struct net_device *dev;
- struct sbecom_chan_param cp;
- int ret;
-
- if (copy_from_user (&cp, data, sizeof (struct sbecom_chan_param)))
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL;
- dev = create_chan (ndev, ci, &cp);
- if (!dev)
- return -EBUSY;
- ret = mkret (c4_new_chan (ci, cp.port, cp.channum, dev));
- if (ret)
- {
- rtnl_unlock (); /* needed due to Ioctl calling sequence */
- unregister_hdlc_device (dev);
- rtnl_lock (); /* needed due to Ioctl calling sequence */
- free_netdev (dev);
- }
- return ret;
+ ci_t *ci;
+ struct net_device *dev;
+ struct sbecom_chan_param cp;
+ int ret;
+
+ if (copy_from_user(&cp, data, sizeof(struct sbecom_chan_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+ dev = create_chan(ndev, ci, &cp);
+ if (!dev)
+ return -EBUSY;
+ ret = c4_new_chan(ci, cp.port, cp.channum, dev);
+ if (ret < 0) {
+ /* needed due to Ioctl calling sequence */
+ rtnl_unlock();
+ unregister_hdlc_device(dev);
+ /* needed due to Ioctl calling sequence */
+ rtnl_lock();
+ free_netdev(dev);
+ }
+ return ret;
}
-static status_t
-do_get_chan_stats (struct net_device *ndev, void *data)
+static status_t
+do_get_chan_stats(struct net_device *ndev, void *data)
{
- struct c4_chan_stats_wrap ccs;
- int ret;
-
- if (copy_from_user (&ccs, data,
- sizeof (struct c4_chan_stats_wrap)))
- return -EFAULT;
- switch (ret = mkret (c4_get_chan_stats (ccs.channum, &ccs.stats)))
- {
- case 0:
- break;
- default:
- return ret;
- }
- if (copy_to_user (data, &ccs,
- sizeof (struct c4_chan_stats_wrap)))
- return -EFAULT;
- return 0;
+ struct c4_chan_stats_wrap ccs;
+ int ret;
+
+ if (copy_from_user(&ccs, data,
+ sizeof(struct c4_chan_stats_wrap)))
+ return -EFAULT;
+
+ ret = c4_get_chan_stats(ccs.channum, &ccs.stats);
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user(data, &ccs,
+ sizeof(struct c4_chan_stats_wrap)))
+ return -EFAULT;
+ return 0;
}
-static status_t
-do_set_loglevel (struct net_device *ndev, void *data)
+static status_t
+do_set_loglevel(struct net_device *ndev, void *data)
{
- unsigned int cxt1e1_log_level;
+ unsigned int cxt1e1_log_level;
- if (copy_from_user (&cxt1e1_log_level, data, sizeof (int)))
- return -EFAULT;
- sbecom_set_loglevel (cxt1e1_log_level);
- return 0;
+ if (copy_from_user(&cxt1e1_log_level, data, sizeof(int)))
+ return -EFAULT;
+ sbecom_set_loglevel(cxt1e1_log_level);
+ return 0;
}
-static status_t
-do_deluser (struct net_device *ndev, int lockit)
+static status_t
+do_deluser(struct net_device *ndev, int lockit)
{
- if (ndev->flags & IFF_UP)
- return -EBUSY;
-
- {
- ci_t *ci;
- mch_t *ch;
- const struct c4_priv *priv;
- int channum;
-
- priv = (struct c4_priv *) dev_to_hdlc (ndev)->priv;
- ci = priv->ci;
- channum = priv->channum;
-
- ch = c4_find_chan (channum);
- if (ch == NULL)
- return -ENOENT;
- ch->user = NULL; /* will be freed, below */
- }
-
- if (lockit)
- rtnl_unlock (); /* needed if Ioctl calling sequence */
- unregister_hdlc_device (ndev);
- if (lockit)
- rtnl_lock (); /* needed if Ioctl calling sequence */
- free_netdev (ndev);
- return 0;
+ if (ndev->flags & IFF_UP)
+ return -EBUSY;
+
+ {
+ ci_t *ci;
+ mch_t *ch;
+ const struct c4_priv *priv;
+ int channum;
+
+ priv = (struct c4_priv *)dev_to_hdlc(ndev)->priv;
+ ci = priv->ci;
+ channum = priv->channum;
+
+ ch = c4_find_chan(channum);
+ if (ch == NULL)
+ return -ENOENT;
+ ch->user = NULL; /* will be freed, below */
+ }
+
+ /* needed if Ioctl calling sequence */
+ if (lockit)
+ rtnl_unlock();
+ unregister_hdlc_device(ndev);
+ /* needed if Ioctl calling sequence */
+ if (lockit)
+ rtnl_lock();
+ free_netdev(ndev);
+ return 0;
}
int
-do_del_chan (struct net_device *musycc_dev, void *data)
+do_del_chan(struct net_device *musycc_dev, void *data)
{
- struct sbecom_chan_param cp;
- char buf[sizeof (CHANNAME) + 3];
- struct net_device *dev;
- int ret;
-
- if (copy_from_user (&cp, data,
- sizeof (struct sbecom_chan_param)))
- return -EFAULT;
- if (cp.channum > 999)
- return -EINVAL;
- snprintf (buf, sizeof(buf), CHANNAME "%d", cp.channum);
+ struct sbecom_chan_param cp;
+ char buf[sizeof(CHANNAME) + 3];
+ struct net_device *dev;
+ int ret;
+
+ if (copy_from_user(&cp, data,
+ sizeof(struct sbecom_chan_param)))
+ return -EFAULT;
+ if (cp.channum > 999)
+ return -EINVAL;
+ snprintf(buf, sizeof(buf), CHANNAME "%d", cp.channum);
dev = __dev_get_by_name(&init_net, buf);
if (!dev)
return -ENODEV;
- ret = do_deluser (dev, 1);
- if (ret)
- return ret;
- return c4_del_chan (cp.channum);
+ ret = do_deluser(dev, 1);
+ if (ret)
+ return ret;
+ return c4_del_chan(cp.channum);
}
-int c4_reset_board (void *);
+int c4_reset_board(void *);
int
-do_reset (struct net_device *musycc_dev, void *data)
+do_reset(struct net_device *musycc_dev, void *data)
{
- const struct c4_priv *priv;
- int i;
-
- for (i = 0; i < 128; i++)
- {
- struct net_device *ndev;
- char buf[sizeof (CHANNAME) + 3];
-
- sprintf (buf, CHANNAME "%d", i);
- ndev = __dev_get_by_name(&init_net, buf);
- if (!ndev)
- continue;
- priv = dev_to_hdlc (ndev)->priv;
-
- if ((unsigned long) (priv->ci) ==
- (unsigned long) (netdev_priv(musycc_dev)))
- {
- ndev->flags &= ~IFF_UP;
- netif_stop_queue (ndev);
- do_deluser (ndev, 1);
+ const struct c4_priv *priv;
+ int i;
+
+ for (i = 0; i < 128; i++) {
+ struct net_device *ndev;
+ char buf[sizeof(CHANNAME) + 3];
+
+ sprintf(buf, CHANNAME "%d", i);
+ ndev = __dev_get_by_name(&init_net, buf);
+ if (!ndev)
+ continue;
+ priv = dev_to_hdlc(ndev)->priv;
+
+ if ((unsigned long) (priv->ci) ==
+ (unsigned long) (netdev_priv(musycc_dev))) {
+ ndev->flags &= ~IFF_UP;
+ netif_stop_queue(ndev);
+ do_deluser(ndev, 1);
+ }
}
- }
- return 0;
+ return 0;
}
int
-do_reset_chan_stats (struct net_device *musycc_dev, void *data)
+do_reset_chan_stats(struct net_device *musycc_dev, void *data)
{
- struct sbecom_chan_param cp;
+ struct sbecom_chan_param cp;
- if (copy_from_user (&cp, data,
- sizeof (struct sbecom_chan_param)))
- return -EFAULT;
- return mkret (c4_del_chan_stats (cp.channum));
+ if (copy_from_user(&cp, data,
+ sizeof(struct sbecom_chan_param)))
+ return -EFAULT;
+ return c4_del_chan_stats(cp.channum);
}
-static status_t
-c4_ioctl (struct net_device *ndev, struct ifreq *ifr, int cmd)
+static status_t
+c4_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
{
- ci_t *ci;
- void *data;
- int iocmd, iolen;
- status_t ret;
- static struct data
- {
- union
- {
- u_int8_t c;
- u_int32_t i;
- struct sbe_brd_info bip;
- struct sbe_drv_info dip;
- struct sbe_iid_info iip;
- struct sbe_brd_addr bap;
- struct sbecom_chan_stats stats;
- struct sbecom_chan_param param;
- struct temux_card_stats cards;
- struct sbecom_card_param cardp;
- struct sbecom_framer_param frp;
- } u;
- } arg;
-
-
- if (!capable (CAP_SYS_ADMIN))
- return -EPERM;
- if (cmd != SIOCDEVPRIVATE + 15)
- return -EINVAL;
- if (!(ci = get_ci_by_dev (ndev)))
- return -EINVAL;
- if (ci->state != C_RUNNING)
- return -ENODEV;
- if (copy_from_user (&iocmd, ifr->ifr_data, sizeof (iocmd)))
- return -EFAULT;
+ ci_t *ci;
+ void *data;
+ int iocmd, iolen;
+ status_t ret;
+ static struct data {
+ union {
+ u_int8_t c;
+ u_int32_t i;
+ struct sbe_brd_info bip;
+ struct sbe_drv_info dip;
+ struct sbe_iid_info iip;
+ struct sbe_brd_addr bap;
+ struct sbecom_chan_stats stats;
+ struct sbecom_chan_param param;
+ struct temux_card_stats cards;
+ struct sbecom_card_param cardp;
+ struct sbecom_framer_param frp;
+ } u;
+ } arg;
+
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (cmd != SIOCDEVPRIVATE + 15)
+ return -EINVAL;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+ if (ci->state != C_RUNNING)
+ return -ENODEV;
+ if (copy_from_user(&iocmd, ifr->ifr_data, sizeof(iocmd)))
+ return -EFAULT;
#if 0
- if (copy_from_user (&len, ifr->ifr_data + sizeof (iocmd), sizeof (len)))
- return -EFAULT;
+ if (copy_from_user(&len, ifr->ifr_data + sizeof(iocmd), sizeof(len)))
+ return -EFAULT;
#endif
#if 0
- pr_info("c4_ioctl: iocmd %x, dir %x type %x nr %x iolen %d.\n", iocmd,
- _IOC_DIR (iocmd), _IOC_TYPE (iocmd), _IOC_NR (iocmd),
- _IOC_SIZE (iocmd));
+ pr_info("c4_ioctl: iocmd %x, dir %x type %x nr %x iolen %d.\n", iocmd,
+ _IOC_DIR(iocmd), _IOC_TYPE(iocmd), _IOC_NR(iocmd),
+ _IOC_SIZE(iocmd));
#endif
- iolen = _IOC_SIZE (iocmd);
- if (iolen > sizeof(arg))
- return -EFAULT;
- data = ifr->ifr_data + sizeof (iocmd);
- if (copy_from_user (&arg, data, iolen))
- return -EFAULT;
-
- ret = 0;
- switch (iocmd)
- {
- case SBE_IOC_PORT_GET:
- //pr_info(">> SBE_IOC_PORT_GET Ioctl...\n");
- ret = do_get_port (ndev, data);
- break;
- case SBE_IOC_PORT_SET:
- //pr_info(">> SBE_IOC_PORT_SET Ioctl...\n");
- ret = do_set_port (ndev, data);
- break;
- case SBE_IOC_CHAN_GET:
- //pr_info(">> SBE_IOC_CHAN_GET Ioctl...\n");
- ret = do_get_chan (ndev, data);
- break;
- case SBE_IOC_CHAN_SET:
- //pr_info(">> SBE_IOC_CHAN_SET Ioctl...\n");
- ret = do_set_chan (ndev, data);
- break;
- case C4_DEL_CHAN:
- //pr_info(">> C4_DEL_CHAN Ioctl...\n");
- ret = do_del_chan (ndev, data);
- break;
- case SBE_IOC_CHAN_NEW:
- ret = do_create_chan (ndev, data);
- break;
- case SBE_IOC_CHAN_GET_STAT:
- ret = do_get_chan_stats (ndev, data);
- break;
- case SBE_IOC_LOGLEVEL:
- ret = do_set_loglevel (ndev, data);
- break;
- case SBE_IOC_RESET_DEV:
- ret = do_reset (ndev, data);
- break;
- case SBE_IOC_CHAN_DEL_STAT:
- ret = do_reset_chan_stats (ndev, data);
- break;
- case C4_LOOP_PORT:
- ret = do_port_loop (ndev, data);
- break;
- case C4_RW_FRMR:
- ret = do_framer_rw (ndev, data);
- break;
- case C4_RW_MSYC:
- ret = do_musycc_rw (ndev, data);
- break;
- case C4_RW_PLD:
- ret = do_pld_rw (ndev, data);
- break;
- case SBE_IOC_IID_GET:
- ret = (iolen == sizeof (struct sbe_iid_info)) ? c4_get_iidinfo (ci, &arg.u.iip) : -EFAULT;
- if (ret == 0) /* no error, copy data */
- if (copy_to_user (data, &arg, iolen))
- return -EFAULT;
- break;
- default:
- //pr_info(">> c4_ioctl: EINVAL - unknown iocmd <%x>\n", iocmd);
- ret = -EINVAL;
- break;
- }
- return mkret (ret);
+ iolen = _IOC_SIZE(iocmd);
+ if (iolen > sizeof(arg))
+ return -EFAULT;
+ data = ifr->ifr_data + sizeof(iocmd);
+ if (copy_from_user(&arg, data, iolen))
+ return -EFAULT;
+
+ ret = 0;
+ switch (iocmd) {
+ case SBE_IOC_PORT_GET:
+ ret = do_get_port(ndev, data);
+ break;
+ case SBE_IOC_PORT_SET:
+ ret = do_set_port(ndev, data);
+ break;
+ case SBE_IOC_CHAN_GET:
+ ret = do_get_chan(ndev, data);
+ break;
+ case SBE_IOC_CHAN_SET:
+ ret = do_set_chan(ndev, data);
+ break;
+ case C4_DEL_CHAN:
+ ret = do_del_chan(ndev, data);
+ break;
+ case SBE_IOC_CHAN_NEW:
+ ret = do_create_chan(ndev, data);
+ break;
+ case SBE_IOC_CHAN_GET_STAT:
+ ret = do_get_chan_stats(ndev, data);
+ break;
+ case SBE_IOC_LOGLEVEL:
+ ret = do_set_loglevel(ndev, data);
+ break;
+ case SBE_IOC_RESET_DEV:
+ ret = do_reset(ndev, data);
+ break;
+ case SBE_IOC_CHAN_DEL_STAT:
+ ret = do_reset_chan_stats(ndev, data);
+ break;
+ case C4_LOOP_PORT:
+ ret = do_port_loop(ndev, data);
+ break;
+ case C4_RW_FRMR:
+ ret = do_framer_rw(ndev, data);
+ break;
+ case C4_RW_MSYC:
+ ret = do_musycc_rw(ndev, data);
+ break;
+ case C4_RW_PLD:
+ ret = do_pld_rw(ndev, data);
+ break;
+ case SBE_IOC_IID_GET:
+ ret = (iolen == sizeof(struct sbe_iid_info)) ?
+ c4_get_iidinfo(ci, &arg.u.iip) : -EFAULT;
+ if (ret == 0) /* no error, copy data */
+ if (copy_to_user(data, &arg, iolen))
+ return -EFAULT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
}
static const struct net_device_ops c4_ops = {
- .ndo_open = void_open,
- .ndo_start_xmit = c4_linux_xmit,
- .ndo_do_ioctl = c4_ioctl,
+ .ndo_open = void_open,
+ .ndo_start_xmit = c4_linux_xmit,
+ .ndo_do_ioctl = c4_ioctl,
};
static void c4_setup(struct net_device *dev)
{
- dev->type = ARPHRD_VOID;
- dev->netdev_ops = &c4_ops;
+ dev->type = ARPHRD_VOID;
+ dev->netdev_ops = &c4_ops;
}
struct net_device *__init
-c4_add_dev (hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
- int irq0, int irq1)
+c4_add_dev(hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
+ int irq0, int irq1)
{
- struct net_device *ndev;
- ci_t *ci;
-
- ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, c4_setup);
- if (!ndev)
- {
- pr_warning("%s: no memory for struct net_device !\n", hi->devname);
- error_flag = ENOMEM;
- return NULL;
- }
- ci = (ci_t *)(netdev_priv(ndev));
- ndev->irq = irq0;
-
- ci->hdw_info = hi;
- ci->state = C_INIT; /* mark as hardware not available */
- ci->next = c4_list;
- c4_list = ci;
- ci->brdno = ci->next ? ci->next->brdno + 1 : 0;
-
- if (!CI)
- CI = ci; /* DEBUG, only board 0 usage */
-
- strcpy (ci->devname, hi->devname);
- ci->release = &pmcc4_OSSI_release[0];
-
- /* tasklet */
+ struct net_device *ndev;
+ ci_t *ci;
+
+ ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, c4_setup);
+ if (!ndev) {
+ pr_warning("%s: no memory for struct net_device !\n",
+ hi->devname);
+ error_flag = -ENOMEM;
+ return NULL;
+ }
+ ci = (ci_t *)(netdev_priv(ndev));
+ ndev->irq = irq0;
+
+ ci->hdw_info = hi;
+ ci->state = C_INIT; /* mark as hardware not available */
+ ci->next = c4_list;
+ c4_list = ci;
+ ci->brdno = ci->next ? ci->next->brdno + 1 : 0;
+
+ if (!CI)
+ CI = ci; /* DEBUG, only board 0 usage */
+
+ strcpy(ci->devname, hi->devname);
+
+ /* tasklet */
#if defined(SBE_ISR_TASKLET)
- tasklet_init (&ci->ci_musycc_isr_tasklet,
- (void (*) (unsigned long)) musycc_intr_bh_tasklet,
- (unsigned long) ci);
+ tasklet_init(&ci->ci_musycc_isr_tasklet,
+ (void (*) (unsigned long)) musycc_intr_bh_tasklet,
+ (unsigned long) ci);
- if (atomic_read (&ci->ci_musycc_isr_tasklet.count) == 0)
- tasklet_disable_nosync (&ci->ci_musycc_isr_tasklet);
+ if (atomic_read(&ci->ci_musycc_isr_tasklet.count) == 0)
+ tasklet_disable_nosync(&ci->ci_musycc_isr_tasklet);
#elif defined(SBE_ISR_IMMEDIATE)
- ci->ci_musycc_isr_tq.routine = (void *) (unsigned long) musycc_intr_bh_tasklet;
- ci->ci_musycc_isr_tq.data = ci;
+ ci->ci_musycc_isr_tq.routine = (void *)(unsigned long)musycc_intr_bh_tasklet;
+ ci->ci_musycc_isr_tq.data = ci;
#endif
- if (register_netdev (ndev) ||
- (c4_init (ci, (u_char *) f0, (u_char *) f1) != SBE_DRVR_SUCCESS))
- {
- OS_kfree (netdev_priv(ndev));
- OS_kfree (ndev);
- error_flag = ENODEV;
- return NULL;
- }
- /*************************************************************
- * int request_irq(unsigned int irq,
- * void (*handler)(int, void *, struct pt_regs *),
- * unsigned long flags, const char *dev_name, void *dev_id);
- * wherein:
- * irq -> The interrupt number that is being requested.
- * handler -> Pointer to handling function being installed.
- * flags -> A bit mask of options related to interrupt management.
- * dev_name -> String used in /proc/interrupts to show owner of interrupt.
- * dev_id -> Pointer (for shared interrupt lines) to point to its own
- * private data area (to identify which device is interrupting).
- *
- * extern void free_irq(unsigned int irq, void *dev_id);
- **************************************************************/
-
- if (request_irq (irq0, &c4_linux_interrupt,
- IRQF_SHARED,
- ndev->name, ndev))
- {
- pr_warning("%s: MUSYCC could not get irq: %d\n", ndev->name, irq0);
- unregister_netdev (ndev);
- OS_kfree (netdev_priv(ndev));
- OS_kfree (ndev);
- error_flag = EIO;
- return NULL;
- }
+ if (register_netdev(ndev) ||
+ (c4_init(ci, (u_char *) f0, (u_char *) f1) != SBE_DRVR_SUCCESS)) {
+ OS_kfree(netdev_priv(ndev));
+ OS_kfree(ndev);
+ error_flag = -ENODEV;
+ return NULL;
+ }
+ /*************************************************************
+ * int request_irq(unsigned int irq,
+ * void (*handler)(int, void *, struct pt_regs *),
+ * unsigned long flags, const char *dev_name, void *dev_id);
+ * wherein:
+ * irq -> The interrupt number that is being requested.
+ * handler -> Pointer to handling function being installed.
+ * flags -> A bit mask of options related to interrupt management.
+ * dev_name -> String used in /proc/interrupts to show owner of interrupt.
+ * dev_id -> Pointer (for shared interrupt lines) to point to its own
+ * private data area (to identify which device is interrupting).
+ *
+ * extern void free_irq(unsigned int irq, void *dev_id);
+ **************************************************************/
+
+ if (request_irq(irq0, &c4_linux_interrupt,
+ IRQF_SHARED,
+ ndev->name, ndev)) {
+ pr_warning("%s: MUSYCC could not get irq: %d\n",
+ ndev->name, irq0);
+ unregister_netdev(ndev);
+ OS_kfree(netdev_priv(ndev));
+ OS_kfree(ndev);
+ error_flag = -EIO;
+ return NULL;
+ }
#ifdef CONFIG_SBE_PMCC4_NCOMM
- if (request_irq (irq1, &c4_ebus_interrupt, IRQF_SHARED, ndev->name, ndev))
- {
- pr_warning("%s: EBUS could not get irq: %d\n", hi->devname, irq1);
- unregister_netdev (ndev);
- free_irq (irq0, ndev);
- OS_kfree (netdev_priv(ndev));
- OS_kfree (ndev);
- error_flag = EIO;
- return NULL;
- }
+ if (request_irq(irq1, &c4_ebus_interrupt, IRQF_SHARED, ndev->name, ndev)) {
+ pr_warning("%s: EBUS could not get irq: %d\n", hi->devname, irq1);
+ unregister_netdev(ndev);
+ free_irq(irq0, ndev);
+ OS_kfree(netdev_priv(ndev));
+ OS_kfree(ndev);
+ error_flag = -EIO;
+ return NULL;
+ }
#endif
- /* setup board identification information */
-
- {
- u_int32_t tmp;
-
- hdw_sn_get (hi, brdno); /* also sets PROM format type (promfmt)
- * for later usage */
-
- switch (hi->promfmt)
- {
- case PROM_FORMAT_TYPE1:
- memcpy (ndev->dev_addr, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
- memcpy (&tmp, (FLD_TYPE1 *) (hi->mfg_info.pft1.Id), 4); /* unaligned data
- * acquisition */
- ci->brd_id = cpu_to_be32 (tmp);
- break;
- case PROM_FORMAT_TYPE2:
- memcpy (ndev->dev_addr, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
- memcpy (&tmp, (FLD_TYPE2 *) (hi->mfg_info.pft2.Id), 4); /* unaligned data
- * acquisition */
- ci->brd_id = cpu_to_be32 (tmp);
- break;
- default:
- ci->brd_id = 0;
- memset (ndev->dev_addr, 0, 6);
- break;
- }
+ /* setup board identification information */
+
+ {
+ u_int32_t tmp;
+
+ /* also sets PROM format type (promfmt) for later usage */
+ hdw_sn_get(hi, brdno);
+
+ switch (hi->promfmt) {
+ case PROM_FORMAT_TYPE1:
+ memcpy(ndev->dev_addr,
+ (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
+ /* unaligned data acquisition */
+ memcpy(&tmp, (FLD_TYPE1 *) (hi->mfg_info.pft1.Id), 4);
+ ci->brd_id = cpu_to_be32(tmp);
+ break;
+ case PROM_FORMAT_TYPE2:
+ memcpy(ndev->dev_addr,
+ (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
+ /* unaligned data acquisition */
+ memcpy(&tmp, (FLD_TYPE2 *) (hi->mfg_info.pft2.Id), 4);
+ ci->brd_id = cpu_to_be32(tmp);
+ break;
+ default:
+ ci->brd_id = 0;
+ memset(ndev->dev_addr, 0, 6);
+ break;
+ }
#if 1
- sbeid_set_hdwbid (ci); /* requires bid to be preset */
+ /* requires bid to be preset */
+ sbeid_set_hdwbid(ci);
#else
- sbeid_set_bdtype (ci); /* requires hdw_bid to be preset */
+ /* requires hdw_bid to be preset */
+ sbeid_set_bdtype(ci);
#endif
-
- }
+ }
#ifdef CONFIG_PROC_FS
- sbecom_proc_brd_init (ci);
+ sbecom_proc_brd_init(ci);
#endif
#if defined(SBE_ISR_TASKLET)
- tasklet_enable (&ci->ci_musycc_isr_tasklet);
+ tasklet_enable(&ci->ci_musycc_isr_tasklet);
#endif
-
- if ((error_flag = c4_init2 (ci)) != SBE_DRVR_SUCCESS)
- {
+ error_flag = c4_init2(ci);
+ if (error_flag != SBE_DRVR_SUCCESS) {
#ifdef CONFIG_PROC_FS
- sbecom_proc_brd_cleanup (ci);
+ sbecom_proc_brd_cleanup(ci);
#endif
- unregister_netdev (ndev);
- free_irq (irq1, ndev);
- free_irq (irq0, ndev);
- OS_kfree (netdev_priv(ndev));
- OS_kfree (ndev);
- return NULL; /* failure, error_flag is set */
- }
- return ndev;
+ unregister_netdev(ndev);
+ free_irq(irq1, ndev);
+ free_irq(irq0, ndev);
+ OS_kfree(netdev_priv(ndev));
+ OS_kfree(ndev);
+ /* failure, error_flag is set */
+ return NULL;
+ }
+ return ndev;
}
static int __init
-c4_mod_init (void)
+c4_mod_init(void)
{
- int rtn;
-
- pr_warning("%s\n", pmcc4_OSSI_release);
- if ((rtn = c4hw_attach_all ()))
- return -rtn; /* installation failure - see system log */
-
- /* housekeeping notifications */
- if (cxt1e1_log_level != log_level_default)
- pr_info("NOTE: driver parameter <cxt1e1_log_level> changed from default %d to %d.\n",
- log_level_default, cxt1e1_log_level);
- if (cxt1e1_max_mru != max_mru_default)
- pr_info("NOTE: driver parameter <cxt1e1_max_mru> changed from default %d to %d.\n",
- max_mru_default, cxt1e1_max_mru);
- if (cxt1e1_max_mtu != max_mtu_default)
- pr_info("NOTE: driver parameter <cxt1e1_max_mtu> changed from default %d to %d.\n",
- max_mtu_default, cxt1e1_max_mtu);
- if (max_rxdesc_used != max_rxdesc_default)
- {
- if (max_rxdesc_used > 2000)
- max_rxdesc_used = 2000; /* out-of-bounds reset */
- pr_info("NOTE: driver parameter <max_rxdesc_used> changed from default %d to %d.\n",
- max_rxdesc_default, max_rxdesc_used);
- }
- if (max_txdesc_used != max_txdesc_default)
- {
- if (max_txdesc_used > 1000)
- max_txdesc_used = 1000; /* out-of-bounds reset */
- pr_info("NOTE: driver parameter <max_txdesc_used> changed from default %d to %d.\n",
- max_txdesc_default, max_txdesc_used);
- }
- return 0; /* installation success */
+ int rtn;
+
+ rtn = c4hw_attach_all();
+ if (rtn)
+ return -rtn; /* installation failure - see system log */
+
+ /* housekeeping notifications */
+ if (cxt1e1_log_level != log_level_default)
+ pr_info("NOTE: driver parameter <cxt1e1_log_level> changed from default %d to %d.\n",
+ log_level_default, cxt1e1_log_level);
+ if (cxt1e1_max_mru != max_mru_default)
+ pr_info("NOTE: driver parameter <cxt1e1_max_mru> changed from default %d to %d.\n",
+ max_mru_default, cxt1e1_max_mru);
+ if (cxt1e1_max_mtu != max_mtu_default)
+ pr_info("NOTE: driver parameter <cxt1e1_max_mtu> changed from default %d to %d.\n",
+ max_mtu_default, cxt1e1_max_mtu);
+ if (max_rxdesc_used != max_rxdesc_default) {
+ if (max_rxdesc_used > 2000)
+ max_rxdesc_used = 2000; /* out-of-bounds reset */
+ pr_info("NOTE: driver parameter <max_rxdesc_used> changed from default %d to %d.\n",
+ max_rxdesc_default, max_rxdesc_used);
+ }
+ if (max_txdesc_used != max_txdesc_default) {
+ if (max_txdesc_used > 1000)
+ max_txdesc_used = 1000; /* out-of-bounds reset */
+ pr_info("NOTE: driver parameter <max_txdesc_used> changed from default %d to %d.\n",
+ max_txdesc_default, max_txdesc_used);
+ }
+ return 0; /* installation success */
}
@@ -1140,31 +1117,29 @@ c4_mod_init (void)
*/
static void __exit
-cleanup_hdlc (void)
+cleanup_hdlc(void)
{
- hdw_info_t *hi;
- ci_t *ci;
- struct net_device *ndev;
- int i, j, k;
-
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- if (hi->ndev) /* a board has been attached */
- {
- ci = (ci_t *)(netdev_priv(hi->ndev));
- for (j = 0; j < ci->max_port; j++)
- for (k = 0; k < MUSYCC_NCHANS; k++)
- if ((ndev = ci->port[j].chan[k]->user))
- {
- do_deluser (ndev, 0);
- }
- }
- }
+ hdw_info_t *hi;
+ ci_t *ci;
+ struct net_device *ndev;
+ int i, j, k;
+
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ if (hi->ndev) { /* a board has been attached */
+ ci = (ci_t *)(netdev_priv(hi->ndev));
+ for (j = 0; j < ci->max_port; j++)
+ for (k = 0; k < MUSYCC_NCHANS; k++) {
+ ndev = ci->port[j].chan[k]->user;
+ if (ndev)
+ do_deluser(ndev, 0);
+ }
+ }
+ }
}
static void __exit
-c4_mod_remove (void)
+c4_mod_remove(void)
{
cleanup_hdlc(); /* delete any missed channels */
cleanup_devs();
@@ -1173,13 +1148,13 @@ c4_mod_remove (void)
pr_info("SBE - driver removed.\n");
}
-module_init (c4_mod_init);
-module_exit (c4_mod_remove);
+module_init(c4_mod_init);
+module_exit(c4_mod_remove);
-MODULE_AUTHOR ("SBE Technical Services <support@sbei.com>");
-MODULE_DESCRIPTION ("wanPCI-CxT1E1 Generic HDLC WAN Driver module");
+MODULE_AUTHOR("SBE Technical Services <support@sbei.com>");
+MODULE_DESCRIPTION("wanPCI-CxT1E1 Generic HDLC WAN Driver module");
#ifdef MODULE_LICENSE
-MODULE_LICENSE ("GPL");
+MODULE_LICENSE("GPL");
#endif
/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index 7a3a30cd0f7f..7b4f6f2108e3 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -42,7 +42,6 @@ static unsigned int max_bh = 0;
/* global driver variables */
extern ci_t *c4_list;
extern int drvr_state;
-extern int cxt1e1_log_level;
extern int cxt1e1_max_mru;
extern int cxt1e1_max_mtu;
@@ -217,7 +216,8 @@ musycc_dump_ring(ci_t *ci, unsigned int chan)
max_intcnt = 0; /* reset counter */
}
- if (!(ch = sd_find_chan(dummy, chan))) {
+ ch = sd_find_chan(dummy, chan);
+ if (!ch) {
pr_info(">> musycc_dump_ring: channel %d not up.\n", chan);
return ENOENT;
}
@@ -1044,17 +1044,19 @@ musycc_bh_rx_eom(mpi_t *pi, int gchan)
#endif /*** CONFIG_SBE_WAN256T3_NCOMM ***/
{
- if ((m2 = OS_mem_token_alloc(cxt1e1_max_mru))) {
- /* substitute the mbuf+cluster */
- md->mem_token = m2;
- md->data = cpu_to_le32(OS_vtophys(OS_mem_token_data(m2)));
-
- /* pass the received mbuf upward */
- sd_recv_consume(m, status & LENGTH_MASK, ch->user);
- ch->s.rx_packets++;
- ch->s.rx_bytes += status & LENGTH_MASK;
+ m2 = OS_mem_token_alloc(cxt1e1_max_mru);
+ if (m2) {
+ /* substitute the mbuf+cluster */
+ md->mem_token = m2;
+ md->data = cpu_to_le32(OS_vtophys(
+ OS_mem_token_data(m2)));
+
+ /* pass the received mbuf upward */
+ sd_recv_consume(m, status & LENGTH_MASK, ch->user);
+ ch->s.rx_packets++;
+ ch->s.rx_bytes += status & LENGTH_MASK;
} else
- ch->s.rx_dropped++;
+ ch->s.rx_dropped++;
}
} else if (error == ERR_FCS)
ch->s.rx_crc_errors++;
@@ -1545,8 +1547,9 @@ musycc_chan_down(ci_t *dummy, int channum)
mch_t *ch;
int i, gchan;
- if (!(ch = sd_find_chan(dummy, channum)))
- return EINVAL;
+ ch = sd_find_chan(dummy, channum);
+ if (!ch)
+ return -EINVAL;
pi = ch->up;
gchan = ch->gchan;
@@ -1589,6 +1592,8 @@ musycc_chan_down(ci_t *dummy, int channum)
#endif
+#if 0
+/* TODO: determine if these functions will not be needed and can be removed */
int
musycc_del_chan(ci_t *ci, int channum)
{
@@ -1596,7 +1601,8 @@ musycc_del_chan(ci_t *ci, int channum)
if ((channum < 0) || (channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS))) /* sanity chk param */
return ECHRNG;
- if (!(ch = sd_find_chan(ci, channum)))
+ ch = sd_find_chan(ci, channum);
+ if (!ch)
return ENOENT;
if (ch->state == UP)
musycc_chan_down(ci, channum);
@@ -1612,12 +1618,14 @@ musycc_del_chan_stats(ci_t *ci, int channum)
if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)) /* sanity chk param */
return ECHRNG;
- if (!(ch = sd_find_chan(ci, channum)))
+ ch = sd_find_chan(ci, channum);
+ if (!ch)
return ENOENT;
memset(&ch->s, 0, sizeof(struct sbecom_chan_stats));
return 0;
}
+#endif
int
@@ -1632,7 +1640,8 @@ musycc_start_xmit(ci_t *ci, int channum, void *mem_token)
int txd_need_cnt;
u_int32_t len;
- if (!(ch = sd_find_chan(ci, channum)))
+ ch = sd_find_chan(ci, channum);
+ if (!ch)
return -ENOENT;
if (ci->state != C_RUNNING) /* full interrupt processing available */
diff --git a/drivers/staging/cxt1e1/ossiRelease.c b/drivers/staging/cxt1e1/ossiRelease.c
deleted file mode 100644
index f17a902e95e3..000000000000
--- a/drivers/staging/cxt1e1/ossiRelease.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*-----------------------------------------------------------------------------
- * ossiRelease.c -
- *
- * This string will be embedded into the executable and will track the
- * release. The embedded string may be displayed using the following:
- *
- * strings <filename> | grep \$Rel
- *
- * Copyright (C) 2002-2008 One Stop Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * For further information, contact via email: support@onestopsystems.com
- * One Stop Systems, Inc. Escondido, California U.S.A.
- *
- *-----------------------------------------------------------------------------
- */
-
-char pmcc4_OSSI_release[] = "$Release: PMCC4_3_1B, Copyright (c) 2008 One Stop Systems$";
-
-/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/pmc93x6_eeprom.c b/drivers/staging/cxt1e1/pmc93x6_eeprom.c
index 137b63cb5537..78cc1709ebdb 100644
--- a/drivers/staging/cxt1e1/pmc93x6_eeprom.c
+++ b/drivers/staging/cxt1e1/pmc93x6_eeprom.c
@@ -90,7 +90,7 @@ static int ByteReverseBuilt = FALSE;
*------------------------------------------------------------------------
*/
-short mfg_template[sizeof (FLD_TYPE2)] =
+static u8 mfg_template[sizeof(FLD_TYPE2)] =
{
PROM_FORMAT_TYPE2, /* type; */
0x00, 0x1A, /* length[2]; */
@@ -491,13 +491,11 @@ pmc_init_seeprom (u_int32_t addr, u_int32_t serialNum)
PROMFORMAT buffer; /* Memory image of structure */
u_int32_t crc; /* CRC of structure */
time_t createTime;
- int i;
createTime = get_seconds ();
/* use template data */
- for (i = 0; i < sizeof (FLD_TYPE2); ++i)
- buffer.bytes[i] = mfg_template[i];
+ memcpy(&buffer.fldType2, mfg_template, sizeof(buffer.fldType2));
/* Update serial number field in buffer */
pmcSetBuffValue (&buffer.fldType2.Serial[3], serialNum, 3);
diff --git a/drivers/staging/cxt1e1/pmcc4.h b/drivers/staging/cxt1e1/pmcc4.h
index 003eb8690190..b4b5e5ad791b 100644
--- a/drivers/staging/cxt1e1/pmcc4.h
+++ b/drivers/staging/cxt1e1/pmcc4.h
@@ -96,7 +96,6 @@ void sbeid_set_bdtype (ci_t *ci);
void sbeid_set_hdwbid (ci_t *ci);
u_int32_t sbeCrc (u_int8_t *, u_int32_t, u_int32_t, u_int32_t *);
-void VMETRO_TRACE (void *); /* put data into 8 LEDs */
void VMETRO_TRIGGER (ci_t *, int); /* Note: int = 0(default)
* thru 15 */
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index a9d95753be20..621a72926475 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -112,12 +112,12 @@ c4_find_chan (int channum)
for (portnum = 0; portnum < ci->max_port; portnum++)
for (gchan = 0; gchan < MUSYCC_NCHANS; gchan++)
{
- if ((ch = ci->port[portnum].chan[gchan]))
- {
- if ((ch->state != UNASSIGNED) &&
- (ch->channum == channum))
- return ch;
- }
+ ch = ci->port[portnum].chan[gchan];
+ if (ch) {
+ if ((ch->state != UNASSIGNED) &&
+ (ch->channum == channum))
+ return ch;
+ }
}
return NULL;
}
@@ -668,8 +668,9 @@ c4_init2 (ci_t *ci)
status_t ret;
/* PORT POINT: this routine generates first interrupt */
- if ((ret = musycc_init (ci)) != SBE_DRVR_SUCCESS)
- return ret;
+ ret = musycc_init(ci);
+ if (ret != SBE_DRVR_SUCCESS)
+ return ret;
#if 0
ci->p.framing_type = FRAMING_CBP;
@@ -756,7 +757,7 @@ c4_frame_rw (ci_t *ci, struct sbecom_port_param *pp)
volatile u_int32_t data;
if (pp->portnum >= ci->max_port)/* sanity check */
- return ENXIO;
+ return -ENXIO;
comet = ci->port[pp->portnum].cometbase;
data = pci_read_32 ((u_int32_t *) comet + pp->port_mode) & 0xff;
@@ -845,7 +846,7 @@ c4_musycc_rw (ci_t *ci, struct c4_musycc_param *mcp)
*/
portnum = (mcp->offset % 0x6000) / 0x800;
if (portnum >= ci->max_port)
- return ENXIO;
+ return -ENXIO;
pi = &ci->port[portnum];
if (mcp->offset >= 0x6000)
offset += 0x6000; /* put back in MsgCfgDesc address offset */
@@ -894,7 +895,7 @@ status_t
c4_get_port (ci_t *ci, int portnum)
{
if (portnum >= ci->max_port) /* sanity check */
- return ENXIO;
+ return -ENXIO;
SD_SEM_TAKE (&ci->sem_wdbusy, "_wd_"); /* only 1 thru here, per
* board */
@@ -915,7 +916,7 @@ c4_set_port (ci_t *ci, int portnum)
int i;
if (portnum >= ci->max_port) /* sanity check */
- return ENXIO;
+ return -ENXIO;
pi = &ci->port[portnum];
pp = &ci->port[portnum].p;
@@ -927,15 +928,15 @@ c4_set_port (ci_t *ci, int portnum)
portnum, e1mode, pi->openchans);
}
if (pi->openchans)
- return EBUSY; /* group needs initialization only for
+ return -EBUSY; /* group needs initialization only for
* first channel of a group */
{
status_t ret;
- if ((ret = c4_wq_port_init (pi))) /* create/init
- * workqueue_struct */
- return ret;
+ ret = c4_wq_port_init(pi);
+ if (ret) /* create/init workqueue_struct */
+ return ret;
}
init_comet (ci, pi->cometbase, pp->port_mode, 1 /* clockmaster == true */ , pp->portP);
@@ -1018,10 +1019,10 @@ c4_new_chan (ci_t *ci, int portnum, int channum, void *user)
int gchan;
if (c4_find_chan (channum)) /* a new channel shouldn't already exist */
- return EEXIST;
+ return -EEXIST;
if (portnum >= ci->max_port) /* sanity check */
- return ENXIO;
+ return -ENXIO;
pi = &(ci->port[portnum]);
/* find any available channel within this port */
@@ -1032,7 +1033,7 @@ c4_new_chan (ci_t *ci, int portnum, int channum, void *user)
break;
}
if (gchan == MUSYCC_NCHANS) /* exhausted table, all were assigned */
- return ENFILE;
+ return -ENFILE;
ch->up = pi;
@@ -1055,8 +1056,9 @@ c4_new_chan (ci_t *ci, int portnum, int channum, void *user)
{
status_t ret;
- if ((ret = c4_wk_chan_init (pi, ch)))
- return ret;
+ ret = c4_wk_chan_init(pi, ch);
+ if (ret)
+ return ret;
}
/* save off interface assignments which bound a board */
@@ -1079,8 +1081,10 @@ c4_del_chan (int channum)
{
mch_t *ch;
- if (!(ch = c4_find_chan (channum)))
- return ENOENT;
+ ch = c4_find_chan(channum);
+ if (!ch)
+ return -ENOENT;
+
if (ch->state == UP)
musycc_chan_down ((ci_t *) 0, channum);
ch->state = UNASSIGNED;
@@ -1095,8 +1099,9 @@ c4_del_chan_stats (int channum)
{
mch_t *ch;
- if (!(ch = c4_find_chan (channum)))
- return ENOENT;
+ ch = c4_find_chan(channum);
+ if (!ch)
+ return -ENOENT;
memset (&ch->s, 0, sizeof (struct sbecom_chan_stats));
return 0;
@@ -1109,19 +1114,20 @@ c4_set_chan (int channum, struct sbecom_chan_param *p)
mch_t *ch;
int i, x = 0;
- if (!(ch = c4_find_chan (channum)))
- return ENOENT;
+ ch = c4_find_chan(channum);
+ if (!ch)
+ return -ENOENT;
#if 1
if (ch->p.card != p->card ||
ch->p.port != p->port ||
ch->p.channum != p->channum)
- return EINVAL;
+ return -EINVAL;
#endif
if (!(ch->up->group_is_set))
{
- return EIO; /* out of order, SET_PORT command
+ return -EIO; /* out of order, SET_PORT command
* required prior to first group's
* SET_CHAN command */
}
@@ -1143,10 +1149,12 @@ c4_set_chan (int channum, struct sbecom_chan_param *p)
{
status_t ret;
- if ((ret = musycc_chan_down ((ci_t *) 0, channum)))
- return ret;
- if ((ret = c4_chan_up (ch->up->up, channum)))
- return ret;
+ ret = musycc_chan_down((ci_t *)0, channum);
+ if (ret)
+ return ret;
+ ret = c4_chan_up(ch->up->up, channum);
+ if (ret)
+ return ret;
sd_enable_xmit (ch->user); /* re-enable to catch flow controlled
* channel */
}
@@ -1159,8 +1167,10 @@ c4_get_chan (int channum, struct sbecom_chan_param *p)
{
mch_t *ch;
- if (!(ch = c4_find_chan (channum)))
- return ENOENT;
+ ch = c4_find_chan(channum);
+ if (!ch)
+ return -ENOENT;
+
*p = ch->p;
return 0;
}
@@ -1170,8 +1180,10 @@ c4_get_chan_stats (int channum, struct sbecom_chan_stats *p)
{
mch_t *ch;
- if (!(ch = c4_find_chan (channum)))
- return ENOENT;
+ ch = c4_find_chan(channum);
+ if (!ch)
+ return -ENOENT;
+
*p = ch->s;
p->tx_pending = atomic_read (&ch->tx_pending);
return 0;
@@ -1240,8 +1252,10 @@ c4_chan_up (ci_t *ci, int channum)
u_int32_t tmp; /* for optimizing conversion across BE
* platform */
- if (!(ch = c4_find_chan (channum)))
- return ENOENT;
+ ch = c4_find_chan(channum);
+ if (!ch)
+ return -ENOENT;
+
if (ch->state == UP)
{
if (cxt1e1_log_level >= LOG_MONITOR)
@@ -1264,7 +1278,7 @@ c4_chan_up (ci_t *ci, int channum)
pr_info("+ ask4 %x, currently %x\n",
ch->p.bitmask[i], pi->tsm[i]);
}
- return EINVAL;
+ return -EINVAL;
}
for (j = 0; j < 8; j++)
if (ch->p.bitmask[i] & (1 << j))
@@ -1277,7 +1291,7 @@ c4_chan_up (ci_t *ci, int channum)
/* if( cxt1e1_log_level >= LOG_WARN) */
pr_info("%s: c4_chan_up[%d] ENOBUFS (no TimeSlots assigned)\n",
ci->devname, channum);
- return ENOBUFS; /* this should not happen */
+ return -ENOBUFS; /* this should not happen */
}
addr = c4_fifo_alloc (pi, gchan, &nbuf);
ch->state = UP;
@@ -1372,12 +1386,13 @@ c4_chan_up (ci_t *ci, int channum)
}
md->next = cpu_to_le32 (OS_vtophys (md->snext));
- if (!(m = OS_mem_token_alloc (cxt1e1_max_mru)))
- {
- if (cxt1e1_log_level >= LOG_MONITOR)
- pr_info("%s: c4_chan_up[%d] - token alloc failure, size = %d.\n",
- ci->devname, channum, cxt1e1_max_mru);
- goto errfree;
+ m = OS_mem_token_alloc(cxt1e1_max_mru);
+ if (!m) {
+ if (cxt1e1_log_level >= LOG_MONITOR)
+ pr_info(
+ "%s: c4_chan_up[%d] - token alloc failure, size = %d.\n",
+ ci->devname, channum, cxt1e1_max_mru);
+ goto errfree;
}
md->mem_token = m;
md->data = cpu_to_le32 (OS_vtophys (OS_mem_token_data (m)));
@@ -1454,7 +1469,7 @@ errfree:
ch->mdr = NULL;
ch->rxd_num = 0;
ch->state = DOWN;
- return ENOBUFS;
+ return -ENOBUFS;
}
/* stop the hardware from servicing & interrupting */
@@ -1533,8 +1548,9 @@ c4_get_iidinfo (ci_t *ci, struct sbe_iid_info *iip)
struct net_device *dev;
char *np;
- if (!(dev = getuserbychan (iip->channum)))
- return ENOENT;
+ dev = getuserbychan(iip->channum);
+ if (!dev)
+ return -ENOENT;
np = dev->name;
strncpy (iip->iname, np, CHNM_STRLEN - 1);
diff --git a/drivers/staging/cxt1e1/pmcc4_private.h b/drivers/staging/cxt1e1/pmcc4_private.h
index 7edbd4e492e3..eb28f095ff8c 100644
--- a/drivers/staging/cxt1e1/pmcc4_private.h
+++ b/drivers/staging/cxt1e1/pmcc4_private.h
@@ -213,7 +213,6 @@ struct sbe_card_info
struct sbe_card_info *next;
u_int32_t *eeprombase; /* mapped address of board's EEPROM */
c4cpld_t *cpldbase; /* mapped address of board's CPLD hardware */
- char *release; /* SBE ID string w/in sbeRelease.c */
void *hdw_info;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir_dev;
diff --git a/drivers/staging/cxt1e1/pmcc4_sysdep.h b/drivers/staging/cxt1e1/pmcc4_sysdep.h
index 697f1943670f..2916c2cb13f9 100644
--- a/drivers/staging/cxt1e1/pmcc4_sysdep.h
+++ b/drivers/staging/cxt1e1/pmcc4_sysdep.h
@@ -60,3 +60,4 @@ void sd_line_is_down (void *user);
int sd_queue_stopped (void *user);
#endif /*** _INC_PMCC4_SYSDEP_H_ ***/
+extern int cxt1e1_log_level;
diff --git a/drivers/staging/cxt1e1/sbeproc.c b/drivers/staging/cxt1e1/sbeproc.c
index 353c001d3fbe..840c647fbf41 100644
--- a/drivers/staging/cxt1e1/sbeproc.c
+++ b/drivers/staging/cxt1e1/sbeproc.c
@@ -72,7 +72,8 @@ static int sbecom_proc_get_sbe_info(struct seq_file *m, void *v)
char *spd;
struct sbe_brd_info *bip;
- if (!(bip = OS_kmalloc(sizeof(struct sbe_brd_info))))
+ bip = OS_kmalloc(sizeof(struct sbe_brd_info));
+ if (!bip)
return -ENOMEM;
pr_devel(">> sbecom_proc_get_sbe_info: entered\n");
@@ -150,7 +151,6 @@ static int sbecom_proc_get_sbe_info(struct seq_file *m, void *v)
break;
}
seq_printf(m, "PCI Bus Speed: %s\n", spd);
- seq_printf(m, "Release: %s\n", ci->release);
#ifdef SBE_PMCC4_ENABLE
{
diff --git a/drivers/staging/dgap/Makefile b/drivers/staging/dgap/Makefile
index 3abe8d2bb748..0063d044ca71 100644
--- a/drivers/staging/dgap/Makefile
+++ b/drivers/staging/dgap/Makefile
@@ -1,7 +1 @@
obj-$(CONFIG_DGAP) += dgap.o
-
-
-dgap-objs := dgap_driver.o dgap_fep5.o \
- dgap_parse.o dgap_trace.o \
- dgap_tty.o dgap_sysfs.o
-
diff --git a/drivers/staging/dgap/dgap.c b/drivers/staging/dgap/dgap.c
new file mode 100644
index 000000000000..a5fc3c75ed4e
--- /dev/null
+++ b/drivers/staging/dgap/dgap.c
@@ -0,0 +1,7675 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+/*
+ * In the original out of kernel Digi dgap driver, firmware
+ * loading was done via user land to driver handshaking.
+ *
+ * For cards that support a concentrator (port expander),
+ * I believe the concentrator its self told the card which
+ * concentrator is actually attached and then that info
+ * was used to tell user land which concentrator firmware
+ * image was to be downloaded. I think even the BIOS or
+ * FEP images required could change with the connection
+ * of a particular concentrator.
+ *
+ * Since I have no access to any of these cards or
+ * concentrators, I cannot put the correct concentrator
+ * firmware file names into the firmware_info structure
+ * as is now done for the BIOS and FEP images.
+ *
+ * I think, but am not certain, that the cards supporting
+ * concentrators will function without them. So support
+ * of these cards has been left in this driver.
+ *
+ * In order to fully support those cards, they would
+ * either have to be acquired for dissection or maybe
+ * Digi International could provide some assistance.
+ */
+#undef DIGI_CONCENTRATORS_SUPPORTED
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h> /* For udelay */
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/ctype.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_reg.h>
+#include <linux/io.h> /* For read[bwl]/write[bwl] */
+
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+#include <linux/firmware.h>
+
+#include "dgap.h"
+
+#define init_MUTEX(sem) sema_init(sem, 1)
+#define DECLARE_MUTEX(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Digi International, http://www.digi.com");
+MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
+MODULE_SUPPORTED_DEVICE("dgap");
+
+/**************************************************************************
+ *
+ * protos for this file
+ *
+ */
+
+static int dgap_start(void);
+static void dgap_init_globals(void);
+static int dgap_found_board(struct pci_dev *pdev, int id);
+static void dgap_cleanup_board(struct board_t *brd);
+static void dgap_poll_handler(ulong dummy);
+static int dgap_init_pci(void);
+static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void dgap_remove_one(struct pci_dev *dev);
+static int dgap_probe1(struct pci_dev *pdev, int card_type);
+static int dgap_do_remap(struct board_t *brd);
+static irqreturn_t dgap_intr(int irq, void *voidbrd);
+
+/* Our function prototypes */
+static int dgap_tty_open(struct tty_struct *tty, struct file *file);
+static void dgap_tty_close(struct tty_struct *tty, struct file *file);
+static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
+ struct channel_t *ch);
+static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
+static int dgap_tty_digigeta(struct tty_struct *tty,
+ struct digi_t __user *retinfo);
+static int dgap_tty_digiseta(struct tty_struct *tty,
+ struct digi_t __user *new_info);
+static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo);
+static int dgap_tty_digisetedelay(struct tty_struct *tty, int __user *new_info);
+static int dgap_tty_write_room(struct tty_struct *tty);
+static int dgap_tty_chars_in_buffer(struct tty_struct *tty);
+static void dgap_tty_start(struct tty_struct *tty);
+static void dgap_tty_stop(struct tty_struct *tty);
+static void dgap_tty_throttle(struct tty_struct *tty);
+static void dgap_tty_unthrottle(struct tty_struct *tty);
+static void dgap_tty_flush_chars(struct tty_struct *tty);
+static void dgap_tty_flush_buffer(struct tty_struct *tty);
+static void dgap_tty_hangup(struct tty_struct *tty);
+static int dgap_wait_for_drain(struct tty_struct *tty);
+static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command,
+ unsigned int __user *value);
+static int dgap_get_modem_info(struct channel_t *ch,
+ unsigned int __user *value);
+static int dgap_tty_digisetcustombaud(struct tty_struct *tty,
+ int __user *new_info);
+static int dgap_tty_digigetcustombaud(struct tty_struct *tty,
+ int __user *retinfo);
+static int dgap_tty_tiocmget(struct tty_struct *tty);
+static int dgap_tty_tiocmset(struct tty_struct *tty, unsigned int set,
+ unsigned int clear);
+static int dgap_tty_send_break(struct tty_struct *tty, int msec);
+static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout);
+static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
+ int count);
+static void dgap_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios);
+static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c);
+static void dgap_tty_send_xchar(struct tty_struct *tty, char ch);
+
+static int dgap_tty_register(struct board_t *brd);
+static int dgap_tty_init(struct board_t *);
+static void dgap_tty_uninit(struct board_t *);
+static void dgap_carrier(struct channel_t *ch);
+static void dgap_input(struct channel_t *ch);
+
+/*
+ * Our function prototypes from dgap_fep5
+ */
+static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds);
+static int dgap_event(struct board_t *bd);
+
+static void dgap_poll_tasklet(unsigned long data);
+static void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1,
+ uchar byte2, uint ncmds);
+static void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds);
+static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt);
+static int dgap_param(struct tty_struct *tty);
+static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
+ unsigned char *fbuf, int *len);
+static uint dgap_get_custom_baud(struct channel_t *ch);
+static void dgap_firmware_reset_port(struct channel_t *ch);
+
+/*
+ * Function prototypes from dgap_parse.c.
+ */
+static int dgap_gettok(char **in, struct cnode *p);
+static char *dgap_getword(char **in);
+static char *dgap_savestring(char *s);
+static struct cnode *dgap_newnode(int t);
+static int dgap_checknode(struct cnode *p);
+static void dgap_err(char *s);
+
+/*
+ * Function prototypes from dgap_sysfs.h
+ */
+struct board_t;
+struct channel_t;
+struct un_t;
+struct pci_driver;
+struct class_device;
+
+static void dgap_create_ports_sysfiles(struct board_t *bd);
+static void dgap_remove_ports_sysfiles(struct board_t *bd);
+
+static int dgap_create_driver_sysfiles(struct pci_driver *);
+static void dgap_remove_driver_sysfiles(struct pci_driver *);
+
+static void dgap_create_tty_sysfs(struct un_t *un, struct device *c);
+static void dgap_remove_tty_sysfs(struct device *c);
+
+/*
+ * Function prototypes from dgap_parse.h
+ */
+static int dgap_parsefile(char **in, int Remove);
+static struct cnode *dgap_find_config(int type, int bus, int slot);
+static uint dgap_config_get_num_prts(struct board_t *bd);
+static char *dgap_create_config_string(struct board_t *bd, char *string);
+static uint dgap_config_get_useintr(struct board_t *bd);
+static uint dgap_config_get_altpin(struct board_t *bd);
+
+static int dgap_ms_sleep(ulong ms);
+static void dgap_do_bios_load(struct board_t *brd, const uchar *ubios, int len);
+static void dgap_do_fep_load(struct board_t *brd, const uchar *ufep, int len);
+#ifdef DIGI_CONCENTRATORS_SUPPORTED
+static void dgap_do_conc_load(struct board_t *brd, uchar *uaddr, int len);
+#endif
+static int dgap_after_config_loaded(int board);
+static int dgap_finalize_board_init(struct board_t *brd);
+
+static void dgap_get_vpd(struct board_t *brd);
+static void dgap_do_reset_board(struct board_t *brd);
+static int dgap_do_wait_for_bios(struct board_t *brd);
+static int dgap_do_wait_for_fep(struct board_t *brd);
+static int dgap_tty_register_ports(struct board_t *brd);
+static int dgap_firmware_load(struct pci_dev *pdev, int card_type);
+
+/* Driver unload function */
+static void dgap_cleanup_module(void);
+
+module_exit(dgap_cleanup_module);
+
+/*
+ * File operations permitted on Control/Management major.
+ */
+static const struct file_operations DgapBoardFops = {
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Globals
+ */
+static uint dgap_NumBoards;
+static struct board_t *dgap_Board[MAXBOARDS];
+static ulong dgap_poll_counter;
+static char *dgap_config_buf;
+static int dgap_driver_state = DRIVER_INITIALIZED;
+DEFINE_SPINLOCK(dgap_dl_lock);
+static wait_queue_head_t dgap_dl_wait;
+static int dgap_dl_action;
+static int dgap_poll_tick = 20; /* Poll interval - 20 ms */
+
+/*
+ * Static vars.
+ */
+static struct class *dgap_class;
+
+static struct board_t *dgap_BoardsByMajor[256];
+static uint dgap_count = 500;
+
+/*
+ * Poller stuff
+ */
+DEFINE_SPINLOCK(dgap_poll_lock); /* Poll scheduling lock */
+static ulong dgap_poll_time; /* Time of next poll */
+static uint dgap_poll_stop; /* Used to tell poller to stop */
+static struct timer_list dgap_poll_timer;
+
+/*
+ SUPPORTED PRODUCTS
+
+ Card Model Number of Ports Interface
+ ----------------------------------------------------------------
+ Acceleport Xem 4 - 64 (EIA232 & EIA422)
+ Acceleport Xr 4 & 8 (EIA232)
+ Acceleport Xr 920 4 & 8 (EIA232)
+ Acceleport C/X 8 - 128 (EIA232)
+ Acceleport EPC/X 8 - 224 (EIA232)
+ Acceleport Xr/422 4 & 8 (EIA422)
+ Acceleport 2r/920 2 (EIA232)
+ Acceleport 4r/920 4 (EIA232)
+ Acceleport 8r/920 8 (EIA232)
+
+ IBM 8-Port Asynchronous PCI Adapter (EIA232)
+ IBM 128-Port Asynchronous PCI Adapter (EIA232 & EIA422)
+*/
+
+static struct pci_device_id dgap_pci_tbl[] = {
+ { DIGI_VID, PCI_DEV_XEM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { DIGI_VID, PCI_DEV_CX_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { DIGI_VID, PCI_DEV_CX_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { DIGI_VID, PCI_DEV_EPCJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { DIGI_VID, PCI_DEV_920_2_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { DIGI_VID, PCI_DEV_920_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { DIGI_VID, PCI_DEV_920_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { DIGI_VID, PCI_DEV_XR_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ { DIGI_VID, PCI_DEV_XRJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ { DIGI_VID, PCI_DEV_XR_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ { DIGI_VID, PCI_DEV_XR_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ { DIGI_VID, PCI_DEV_XR_SAIP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ { DIGI_VID, PCI_DEV_XR_BULL_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ { DIGI_VID, PCI_DEV_920_8_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
+ { DIGI_VID, PCI_DEV_XEM_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
+ {0,} /* 0 terminated list. */
+};
+MODULE_DEVICE_TABLE(pci, dgap_pci_tbl);
+
+/*
+ * A generic list of Product names, PCI Vendor ID, and PCI Device ID.
+ */
+struct board_id {
+ uint config_type;
+ uchar *name;
+ uint maxports;
+ uint dpatype;
+};
+
+static struct board_id dgap_Ids[] = {
+ { PPCM, PCI_DEV_XEM_NAME, 64, (T_PCXM|T_PCLITE|T_PCIBUS) },
+ { PCX, PCI_DEV_CX_NAME, 128, (T_CX|T_PCIBUS) },
+ { PCX, PCI_DEV_CX_IBM_NAME, 128, (T_CX|T_PCIBUS) },
+ { PEPC, PCI_DEV_EPCJ_NAME, 224, (T_EPC|T_PCIBUS) },
+ { APORT2_920P, PCI_DEV_920_2_NAME, 2, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { APORT4_920P, PCI_DEV_920_4_NAME, 4, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { APORT8_920P, PCI_DEV_920_8_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PAPORT8, PCI_DEV_XR_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PAPORT8, PCI_DEV_XRJ_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PAPORT8, PCI_DEV_XR_422_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PAPORT8, PCI_DEV_XR_IBM_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PAPORT8, PCI_DEV_XR_SAIP_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PAPORT8, PCI_DEV_XR_BULL_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { APORT8_920P, PCI_DEV_920_8_HP_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PPCM, PCI_DEV_XEM_HP_NAME, 64, (T_PCXM|T_PCLITE|T_PCIBUS) },
+ {0,} /* 0 terminated list. */
+};
+
+static struct pci_driver dgap_driver = {
+ .name = "dgap",
+ .probe = dgap_init_one,
+ .id_table = dgap_pci_tbl,
+ .remove = dgap_remove_one,
+};
+
+struct firmware_info {
+ uchar *conf_name; /* dgap.conf */
+ uchar *bios_name; /* BIOS filename */
+ uchar *fep_name; /* FEP filename */
+ uchar *con_name; /* Concentrator filename FIXME*/
+ int num; /* sequence number */
+};
+
+/*
+ * Firmware - BIOS, FEP, and CONC filenames
+ */
+static struct firmware_info fw_info[] = {
+ { "dgap/dgap.conf", "dgap/sxbios.bin", "dgap/sxfep.bin", 0, 0 },
+ { "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", 0, 1 },
+ { "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", 0, 2 },
+ { "dgap/dgap.conf", "dgap/pcibios.bin", "dgap/pcifep.bin", 0, 3 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 4 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 5 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 6 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 7 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 8 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 9 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 10 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 11 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 12 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 13 },
+ { "dgap/dgap.conf", "dgap/sxbios.bin", "dgap/sxfep.bin", 0, 14 },
+ {0,}
+};
+
+/*
+ * Default transparent print information.
+ */
+static struct digi_t dgap_digi_init = {
+ .digi_flags = DIGI_COOK, /* Flags */
+ .digi_maxcps = 100, /* Max CPS */
+ .digi_maxchar = 50, /* Max chars in print queue */
+ .digi_bufsize = 100, /* Printer buffer size */
+ .digi_onlen = 4, /* size of printer on string */
+ .digi_offlen = 4, /* size of printer off string */
+ .digi_onstr = "\033[5i", /* ANSI printer on string ] */
+ .digi_offstr = "\033[4i", /* ANSI printer off string ] */
+ .digi_term = "ansi" /* default terminal type */
+};
+
+/*
+ * Define a local default termios struct. All ports will be created
+ * with this termios initially.
+ *
+ * This defines a raw port at 9600 baud, 8 data bits, no parity,
+ * 1 stop bit.
+ */
+
+static struct ktermios DgapDefaultTermios = {
+ .c_iflag = (DEFAULT_IFLAGS), /* iflags */
+ .c_oflag = (DEFAULT_OFLAGS), /* oflags */
+ .c_cflag = (DEFAULT_CFLAGS), /* cflags */
+ .c_lflag = (DEFAULT_LFLAGS), /* lflags */
+ .c_cc = INIT_C_CC,
+ .c_line = 0,
+};
+
+static const struct tty_operations dgap_tty_ops = {
+ .open = dgap_tty_open,
+ .close = dgap_tty_close,
+ .write = dgap_tty_write,
+ .write_room = dgap_tty_write_room,
+ .flush_buffer = dgap_tty_flush_buffer,
+ .chars_in_buffer = dgap_tty_chars_in_buffer,
+ .flush_chars = dgap_tty_flush_chars,
+ .ioctl = dgap_tty_ioctl,
+ .set_termios = dgap_tty_set_termios,
+ .stop = dgap_tty_stop,
+ .start = dgap_tty_start,
+ .throttle = dgap_tty_throttle,
+ .unthrottle = dgap_tty_unthrottle,
+ .hangup = dgap_tty_hangup,
+ .put_char = dgap_tty_put_char,
+ .tiocmget = dgap_tty_tiocmget,
+ .tiocmset = dgap_tty_tiocmset,
+ .break_ctl = dgap_tty_send_break,
+ .wait_until_sent = dgap_tty_wait_until_sent,
+ .send_xchar = dgap_tty_send_xchar
+};
+
+/*
+ * Our needed internal static variables from dgap_parse.c
+ */
+static struct cnode dgap_head;
+#define MAXCWORD 200
+static char dgap_cword[MAXCWORD];
+
+struct toklist {
+ int token;
+ char *string;
+};
+
+static struct toklist dgap_tlist[] = {
+ { BEGIN, "config_begin" },
+ { END, "config_end" },
+ { BOARD, "board" },
+ { PCX, "Digi_AccelePort_C/X_PCI" },
+ { PEPC, "Digi_AccelePort_EPC/X_PCI" },
+ { PPCM, "Digi_AccelePort_Xem_PCI" },
+ { APORT2_920P, "Digi_AccelePort_2r_920_PCI" },
+ { APORT4_920P, "Digi_AccelePort_4r_920_PCI" },
+ { APORT8_920P, "Digi_AccelePort_8r_920_PCI" },
+ { PAPORT4, "Digi_AccelePort_4r_PCI(EIA-232/RS-422)" },
+ { PAPORT8, "Digi_AccelePort_8r_PCI(EIA-232/RS-422)" },
+ { IO, "io" },
+ { PCIINFO, "pciinfo" },
+ { LINE, "line" },
+ { CONC, "conc" },
+ { CONC, "concentrator" },
+ { CX, "cx" },
+ { CX, "ccon" },
+ { EPC, "epccon" },
+ { EPC, "epc" },
+ { MOD, "module" },
+ { ID, "id" },
+ { STARTO, "start" },
+ { SPEED, "speed" },
+ { CABLE, "cable" },
+ { CONNECT, "connect" },
+ { METHOD, "method" },
+ { STATUS, "status" },
+ { CUSTOM, "Custom" },
+ { BASIC, "Basic" },
+ { MEM, "mem" },
+ { MEM, "memory" },
+ { PORTS, "ports" },
+ { MODEM, "modem" },
+ { NPORTS, "nports" },
+ { TTYN, "ttyname" },
+ { CU, "cuname" },
+ { PRINT, "prname" },
+ { CMAJOR, "major" },
+ { ALTPIN, "altpin" },
+ { USEINTR, "useintr" },
+ { TTSIZ, "ttysize" },
+ { CHSIZ, "chsize" },
+ { BSSIZ, "boardsize" },
+ { UNTSIZ, "schedsize" },
+ { F2SIZ, "f2200size" },
+ { VPSIZ, "vpixsize" },
+ { 0, NULL }
+};
+
+/************************************************************************
+ *
+ * Driver load/unload functions
+ *
+ ************************************************************************/
+
+/*
+ * init_module()
+ *
+ * Module load. This is where it all starts.
+ */
+static int dgap_init_module(void)
+{
+ int rc = 0;
+
+ pr_info("%s, Digi International Part Number %s\n", DG_NAME, DG_PART);
+
+ rc = dgap_start();
+ if (rc)
+ return rc;
+
+ rc = dgap_init_pci();
+ if (rc)
+ goto err_cleanup;
+
+ rc = dgap_create_driver_sysfiles(&dgap_driver);
+ if (rc)
+ goto err_cleanup;
+
+ dgap_driver_state = DRIVER_READY;
+
+ return 0;
+
+err_cleanup:
+
+ dgap_cleanup_module();
+
+ return rc;
+}
+module_init(dgap_init_module);
+
+/*
+ * Start of driver.
+ */
+static int dgap_start(void)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct device *device;
+
+ /*
+ * make sure that the globals are
+ * init'd before we do anything else
+ */
+ dgap_init_globals();
+
+ dgap_NumBoards = 0;
+
+ pr_info("For the tools package please visit http://www.digi.com\n");
+
+ /*
+ * Register our base character device into the kernel.
+ */
+
+ /*
+ * Register management/dpa devices
+ */
+ rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &DgapBoardFops);
+ if (rc < 0)
+ return rc;
+
+ dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
+ if (IS_ERR(dgap_class)) {
+ rc = PTR_ERR(dgap_class);
+ goto failed_class;
+ }
+
+ device = device_create(dgap_class, NULL,
+ MKDEV(DIGI_DGAP_MAJOR, 0),
+ NULL, "dgap_mgmt");
+ if (IS_ERR(device)) {
+ rc = PTR_ERR(device);
+ goto failed_device;
+ }
+
+ /* Start the poller */
+ spin_lock_irqsave(&dgap_poll_lock, flags);
+ init_timer(&dgap_poll_timer);
+ dgap_poll_timer.function = dgap_poll_handler;
+ dgap_poll_timer.data = 0;
+ dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
+ dgap_poll_timer.expires = dgap_poll_time;
+ spin_unlock_irqrestore(&dgap_poll_lock, flags);
+
+ add_timer(&dgap_poll_timer);
+
+ return rc;
+
+failed_device:
+ class_destroy(dgap_class);
+failed_class:
+ unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+ return rc;
+}
+
+/*
+ * Register pci driver, and return how many boards we have.
+ */
+static int dgap_init_pci(void)
+{
+ return pci_register_driver(&dgap_driver);
+}
+
+/* returns count (>= 0), or negative on error */
+static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+
+ /* wake up and enable device */
+ rc = pci_enable_device(pdev);
+
+ if (rc < 0) {
+ rc = -EIO;
+ } else {
+ rc = dgap_probe1(pdev, ent->driver_data);
+ if (rc == 0) {
+ dgap_NumBoards++;
+ rc = dgap_firmware_load(pdev, ent->driver_data);
+ }
+ }
+ return rc;
+}
+
+static int dgap_probe1(struct pci_dev *pdev, int card_type)
+{
+ return dgap_found_board(pdev, card_type);
+}
+
+static void dgap_remove_one(struct pci_dev *dev)
+{
+ /* Do Nothing */
+}
+
+/*
+ * dgap_cleanup_module()
+ *
+ * Module unload. This is where it all ends.
+ */
+static void dgap_cleanup_module(void)
+{
+ int i;
+ ulong lock_flags;
+
+ spin_lock_irqsave(&dgap_poll_lock, lock_flags);
+ dgap_poll_stop = 1;
+ spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
+
+ /* Turn off poller right away. */
+ del_timer_sync(&dgap_poll_timer);
+
+ dgap_remove_driver_sysfiles(&dgap_driver);
+
+ device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
+ class_destroy(dgap_class);
+ unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+
+ kfree(dgap_config_buf);
+
+ for (i = 0; i < dgap_NumBoards; ++i) {
+ dgap_remove_ports_sysfiles(dgap_Board[i]);
+ dgap_tty_uninit(dgap_Board[i]);
+ dgap_cleanup_board(dgap_Board[i]);
+ }
+
+ if (dgap_NumBoards)
+ pci_unregister_driver(&dgap_driver);
+}
+
+/*
+ * dgap_cleanup_board()
+ *
+ * Free all the memory associated with a board
+ */
+static void dgap_cleanup_board(struct board_t *brd)
+{
+ int i = 0;
+
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ if (brd->intr_used && brd->irq)
+ free_irq(brd->irq, brd);
+
+ tasklet_kill(&brd->helper_tasklet);
+
+ if (brd->re_map_port) {
+ release_mem_region(brd->membase + 0x200000, 0x200000);
+ iounmap(brd->re_map_port);
+ brd->re_map_port = NULL;
+ }
+
+ if (brd->re_map_membase) {
+ release_mem_region(brd->membase, 0x200000);
+ iounmap(brd->re_map_membase);
+ brd->re_map_membase = NULL;
+ }
+
+ /* Free all allocated channels structs */
+ for (i = 0; i < MAXPORTS ; i++)
+ kfree(brd->channels[i]);
+
+ kfree(brd->flipbuf);
+ kfree(brd->flipflagbuf);
+
+ dgap_Board[brd->boardnum] = NULL;
+
+ kfree(brd);
+}
+
+/*
+ * dgap_found_board()
+ *
+ * A board has been found, init it.
+ */
+static int dgap_found_board(struct pci_dev *pdev, int id)
+{
+ struct board_t *brd;
+ unsigned int pci_irq;
+ int i = 0;
+
+ /* get the board structure and prep it */
+ brd = kzalloc(sizeof(struct board_t), GFP_KERNEL);
+ if (!brd)
+ return -ENOMEM;
+
+ dgap_Board[dgap_NumBoards] = brd;
+
+ /* store the info for the board we've found */
+ brd->magic = DGAP_BOARD_MAGIC;
+ brd->boardnum = dgap_NumBoards;
+ brd->firstminor = 0;
+ brd->vendor = dgap_pci_tbl[id].vendor;
+ brd->device = dgap_pci_tbl[id].device;
+ brd->pdev = pdev;
+ brd->pci_bus = pdev->bus->number;
+ brd->pci_slot = PCI_SLOT(pdev->devfn);
+ brd->name = dgap_Ids[id].name;
+ brd->maxports = dgap_Ids[id].maxports;
+ brd->type = dgap_Ids[id].config_type;
+ brd->dpatype = dgap_Ids[id].dpatype;
+ brd->dpastatus = BD_NOFEP;
+ init_waitqueue_head(&brd->state_wait);
+
+ spin_lock_init(&brd->bd_lock);
+
+ brd->runwait = 0;
+ brd->inhibit_poller = FALSE;
+ brd->wait_for_bios = 0;
+ brd->wait_for_fep = 0;
+
+ for (i = 0; i < MAXPORTS; i++)
+ brd->channels[i] = NULL;
+
+ /* store which card & revision we have */
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
+
+ pci_irq = pdev->irq;
+ brd->irq = pci_irq;
+
+ /* get the PCI Base Address Registers */
+
+ /* Xr Jupiter and EPC use BAR 2 */
+ if (brd->device == PCI_DEV_XRJ_DID || brd->device == PCI_DEV_EPCJ_DID) {
+ brd->membase = pci_resource_start(pdev, 2);
+ brd->membase_end = pci_resource_end(pdev, 2);
+ }
+ /* Everyone else uses BAR 0 */
+ else {
+ brd->membase = pci_resource_start(pdev, 0);
+ brd->membase_end = pci_resource_end(pdev, 0);
+ }
+
+ if (!brd->membase)
+ return -ENODEV;
+
+ if (brd->membase & 1)
+ brd->membase &= ~3;
+ else
+ brd->membase &= ~15;
+
+ /*
+ * On the PCI boards, there is no IO space allocated
+ * The I/O registers will be in the first 3 bytes of the
+ * upper 2MB of the 4MB memory space. The board memory
+ * will be mapped into the low 2MB of the 4MB memory space
+ */
+ brd->port = brd->membase + PCI_IO_OFFSET;
+ brd->port_end = brd->port + PCI_IO_SIZE;
+
+ /*
+ * Special initialization for non-PLX boards
+ */
+ if (brd->device != PCI_DEV_XRJ_DID && brd->device != PCI_DEV_EPCJ_DID) {
+ unsigned short cmd;
+
+ pci_write_config_byte(pdev, 0x40, 0);
+ pci_write_config_byte(pdev, 0x46, 0);
+
+ /* Limit burst length to 2 doubleword transactions */
+ pci_write_config_byte(pdev, 0x42, 1);
+
+ /*
+ * Enable IO and mem if not already done.
+ * This was needed for support on Itanium.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ /* init our poll helper tasklet */
+ tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet,
+ (unsigned long) brd);
+
+ i = dgap_do_remap(brd);
+ if (i)
+ brd->state = BOARD_FAILED;
+
+ pr_info("dgap: board %d: %s (rev %d), irq %ld, %s\n",
+ dgap_NumBoards, brd->name, brd->rev, brd->irq,
+ brd->state ? "NOT READY\0" : "READY\0");
+
+ return 0;
+}
+
+
+static int dgap_finalize_board_init(struct board_t *brd)
+{
+ int rc;
+
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return -ENODEV;
+
+ brd->use_interrupts = dgap_config_get_useintr(brd);
+
+ /*
+ * Set up our interrupt handler if we are set to do interrupts.
+ */
+ if (brd->use_interrupts && brd->irq) {
+
+ rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
+
+ if (rc)
+ brd->intr_used = 0;
+ else
+ brd->intr_used = 1;
+ } else {
+ brd->intr_used = 0;
+ }
+
+ return 0;
+}
+
+static int dgap_firmware_load(struct pci_dev *pdev, int card_type)
+{
+ struct board_t *brd = dgap_Board[dgap_NumBoards - 1];
+ const struct firmware *fw;
+ int ret;
+
+ dgap_get_vpd(brd);
+ dgap_do_reset_board(brd);
+
+ if (fw_info[card_type].conf_name) {
+ ret = request_firmware(&fw, fw_info[card_type].conf_name,
+ &pdev->dev);
+ if (ret) {
+ pr_err("dgap: config file %s not found\n",
+ fw_info[card_type].conf_name);
+ return ret;
+ }
+ if (!dgap_config_buf) {
+ dgap_config_buf = kmalloc(fw->size + 1, GFP_ATOMIC);
+ if (!dgap_config_buf) {
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+ }
+
+ memcpy(dgap_config_buf, fw->data, fw->size);
+ release_firmware(fw);
+ dgap_config_buf[fw->size + 1] = '\0';
+
+ if (dgap_parsefile(&dgap_config_buf, TRUE) != 0)
+ return -EINVAL;
+ }
+
+ ret = dgap_after_config_loaded(brd->boardnum);
+ if (ret)
+ return ret;
+ /*
+ * Match this board to a config the user created for us.
+ */
+ brd->bd_config =
+ dgap_find_config(brd->type, brd->pci_bus, brd->pci_slot);
+
+ /*
+ * Because the 4 port Xr products share the same PCI ID
+ * as the 8 port Xr products, if we receive a NULL config
+ * back, and this is a PAPORT8 board, retry with a
+ * PAPORT4 attempt as well.
+ */
+ if (brd->type == PAPORT8 && !brd->bd_config)
+ brd->bd_config =
+ dgap_find_config(PAPORT4, brd->pci_bus, brd->pci_slot);
+
+ if (!brd->bd_config) {
+ pr_err("dgap: No valid configuration found\n");
+ return -EINVAL;
+ }
+
+ dgap_tty_register(brd);
+ dgap_finalize_board_init(brd);
+
+ if (fw_info[card_type].bios_name) {
+ ret = request_firmware(&fw, fw_info[card_type].bios_name,
+ &pdev->dev);
+ if (ret) {
+ pr_err("dgap: bios file %s not found\n",
+ fw_info[card_type].bios_name);
+ return ret;
+ }
+ dgap_do_bios_load(brd, fw->data, fw->size);
+ release_firmware(fw);
+
+ /* Wait for BIOS to test board... */
+ if (!dgap_do_wait_for_bios(brd))
+ return -ENXIO;
+ }
+
+ if (fw_info[card_type].fep_name) {
+ ret = request_firmware(&fw, fw_info[card_type].fep_name,
+ &pdev->dev);
+ if (ret) {
+ pr_err("dgap: fep file %s not found\n",
+ fw_info[card_type].fep_name);
+ return ret;
+ }
+ dgap_do_fep_load(brd, fw->data, fw->size);
+ release_firmware(fw);
+
+ /* Wait for FEP to load on board... */
+ if (!dgap_do_wait_for_fep(brd))
+ return -ENXIO;
+ }
+
+#ifdef DIGI_CONCENTRATORS_SUPPORTED
+ /*
+ * If this is a CX or EPCX, we need to see if the firmware
+ * is requesting a concentrator image from us.
+ */
+ if ((bd->type == PCX) || (bd->type == PEPC)) {
+ chk_addr = (u16 *) (vaddr + DOWNREQ);
+ /* Nonzero if FEP is requesting concentrator image. */
+ check = readw(chk_addr);
+ vaddr = brd->re_map_membase;
+ }
+
+ if (fw_info[card_type].con_name && check && vaddr) {
+ ret = request_firmware(&fw, fw_info[card_type].con_name,
+ &pdev->dev);
+ if (ret) {
+ pr_err("dgap: conc file %s not found\n",
+ fw_info[card_type].con_name);
+ return ret;
+ }
+ /* Put concentrator firmware loading code here */
+ offset = readw((u16 *) (vaddr + DOWNREQ));
+ memcpy_toio(offset, fw->data, fw->size);
+
+ dgap_do_conc_load(brd, (char *)fw->data, fw->size)
+ release_firmware(fw);
+ }
+#endif
+ /*
+ * Do tty device initialization.
+ */
+ ret = dgap_tty_init(brd);
+ if (ret < 0) {
+ dgap_tty_uninit(brd);
+ return ret;
+ }
+
+ ret = dgap_tty_register_ports(brd);
+ if (ret)
+ return ret;
+
+ brd->state = BOARD_READY;
+ brd->dpastatus = BD_RUNNING;
+
+ return 0;
+}
+
+/*
+ * Remap PCI memory.
+ */
+static int dgap_do_remap(struct board_t *brd)
+{
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return -ENXIO;
+
+ if (!request_mem_region(brd->membase, 0x200000, "dgap"))
+ return -ENOMEM;
+
+ if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000,
+ "dgap")) {
+ release_mem_region(brd->membase, 0x200000);
+ return -ENOMEM;
+ }
+
+ brd->re_map_membase = ioremap(brd->membase, 0x200000);
+ if (!brd->re_map_membase) {
+ release_mem_region(brd->membase, 0x200000);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ return -ENOMEM;
+ }
+
+ brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
+ if (!brd->re_map_port) {
+ release_mem_region(brd->membase, 0x200000);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ iounmap(brd->re_map_membase);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*****************************************************************************
+*
+* Function:
+*
+* dgap_poll_handler
+*
+* Author:
+*
+* Scott H Kilau
+*
+* Parameters:
+*
+* dummy -- ignored
+*
+* Return Values:
+*
+* none
+*
+* Description:
+*
+* As each timer expires, it determines (a) whether the "transmit"
+* waiter needs to be woken up, and (b) whether the poller needs to
+* be rescheduled.
+*
+******************************************************************************/
+
+static void dgap_poll_handler(ulong dummy)
+{
+ int i;
+ struct board_t *brd;
+ unsigned long lock_flags;
+ ulong new_time;
+
+ dgap_poll_counter++;
+
+ /*
+ * Do not start the board state machine until
+ * driver tells us its up and running, and has
+ * everything it needs.
+ */
+ if (dgap_driver_state != DRIVER_READY)
+ goto schedule_poller;
+
+ /*
+ * If we have just 1 board, or the system is not SMP,
+ * then use the typical old style poller.
+ * Otherwise, use our new tasklet based poller, which should
+ * speed things up for multiple boards.
+ */
+ if ((dgap_NumBoards == 1) || (num_online_cpus() <= 1)) {
+ for (i = 0; i < dgap_NumBoards; i++) {
+
+ brd = dgap_Board[i];
+
+ if (brd->state == BOARD_FAILED)
+ continue;
+ if (!brd->intr_running)
+ /* Call the real board poller directly */
+ dgap_poll_tasklet((unsigned long) brd);
+ }
+ } else {
+ /*
+ * Go thru each board, kicking off a
+ * tasklet for each if needed
+ */
+ for (i = 0; i < dgap_NumBoards; i++) {
+ brd = dgap_Board[i];
+
+ /*
+ * Attempt to grab the board lock.
+ *
+ * If we can't get it, no big deal, the next poll
+ * will get it. Basically, I just really don't want
+ * to spin in here, because I want to kick off my
+ * tasklets as fast as I can, and then get out the
+ * poller.
+ */
+ if (!spin_trylock(&brd->bd_lock))
+ continue;
+
+ /*
+ * If board is in a failed state, don't bother
+ * scheduling a tasklet
+ */
+ if (brd->state == BOARD_FAILED) {
+ spin_unlock(&brd->bd_lock);
+ continue;
+ }
+
+ /* Schedule a poll helper task */
+ if (!brd->intr_running)
+ tasklet_schedule(&brd->helper_tasklet);
+
+ /*
+ * Can't do DGAP_UNLOCK here, as we don't have
+ * lock_flags because we did a trylock above.
+ */
+ spin_unlock(&brd->bd_lock);
+ }
+ }
+
+schedule_poller:
+
+ /*
+ * Schedule ourself back at the nominal wakeup interval.
+ */
+ spin_lock_irqsave(&dgap_poll_lock, lock_flags);
+ dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
+
+ new_time = dgap_poll_time - jiffies;
+
+ if ((ulong) new_time >= 2 * dgap_poll_tick) {
+ dgap_poll_time =
+ jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
+ }
+
+ dgap_poll_timer.function = dgap_poll_handler;
+ dgap_poll_timer.data = 0;
+ dgap_poll_timer.expires = dgap_poll_time;
+ spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
+
+ if (!dgap_poll_stop)
+ add_timer(&dgap_poll_timer);
+}
+
+/*
+ * dgap_intr()
+ *
+ * Driver interrupt handler.
+ */
+static irqreturn_t dgap_intr(int irq, void *voidbrd)
+{
+ struct board_t *brd = (struct board_t *) voidbrd;
+
+ if (!brd)
+ return IRQ_NONE;
+
+ /*
+ * Check to make sure its for us.
+ */
+ if (brd->magic != DGAP_BOARD_MAGIC)
+ return IRQ_NONE;
+
+ brd->intr_count++;
+
+ /*
+ * Schedule tasklet to run at a better time.
+ */
+ tasklet_schedule(&brd->helper_tasklet);
+ return IRQ_HANDLED;
+}
+
+/*
+ * dgap_init_globals()
+ *
+ * This is where we initialize the globals from the static insmod
+ * configuration variables. These are declared near the head of
+ * this file.
+ */
+static void dgap_init_globals(void)
+{
+ int i = 0;
+
+ for (i = 0; i < MAXBOARDS; i++)
+ dgap_Board[i] = NULL;
+
+ init_timer(&dgap_poll_timer);
+
+ init_waitqueue_head(&dgap_dl_wait);
+ dgap_dl_action = 0;
+}
+
+/************************************************************************
+ *
+ * Utility functions
+ *
+ ************************************************************************/
+
+/*
+ * dgap_ms_sleep()
+ *
+ * Put the driver to sleep for x ms's
+ *
+ * Returns 0 if timed out, !0 (showing signal) if interrupted by a signal.
+ */
+static int dgap_ms_sleep(ulong ms)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout((ms * HZ) / 1000);
+ return signal_pending(current);
+}
+
+/************************************************************************
+ *
+ * TTY Initialization/Cleanup Functions
+ *
+ ************************************************************************/
+
+/*
+ * dgap_tty_register()
+ *
+ * Init the tty subsystem for this board.
+ */
+static int dgap_tty_register(struct board_t *brd)
+{
+ int rc = 0;
+
+ brd->SerialDriver = alloc_tty_driver(MAXPORTS);
+
+ snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgap_%d_", brd->boardnum);
+ brd->SerialDriver->name = brd->SerialName;
+ brd->SerialDriver->name_base = 0;
+ brd->SerialDriver->major = 0;
+ brd->SerialDriver->minor_start = 0;
+ brd->SerialDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->SerialDriver->subtype = SERIAL_TYPE_NORMAL;
+ brd->SerialDriver->init_termios = DgapDefaultTermios;
+ brd->SerialDriver->driver_name = DRVSTR;
+ brd->SerialDriver->flags = (TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
+
+ /* The kernel wants space to store pointers to tty_structs */
+ brd->SerialDriver->ttys =
+ kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
+ if (!brd->SerialDriver->ttys)
+ return -ENOMEM;
+
+ /*
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
+ */
+ tty_set_operations(brd->SerialDriver, &dgap_tty_ops);
+
+ /*
+ * If we're doing transparent print, we have to do all of the above
+ * again, separately so we don't get the LD confused about what major
+ * we are when we get into the dgap_tty_open() routine.
+ */
+ brd->PrintDriver = alloc_tty_driver(MAXPORTS);
+
+ snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgap_%d_", brd->boardnum);
+ brd->PrintDriver->name = brd->PrintName;
+ brd->PrintDriver->name_base = 0;
+ brd->PrintDriver->major = 0;
+ brd->PrintDriver->minor_start = 0;
+ brd->PrintDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->PrintDriver->subtype = SERIAL_TYPE_NORMAL;
+ brd->PrintDriver->init_termios = DgapDefaultTermios;
+ brd->PrintDriver->driver_name = DRVSTR;
+ brd->PrintDriver->flags = (TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
+
+ /* The kernel wants space to store pointers to tty_structs */
+ brd->PrintDriver->ttys =
+ kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
+ if (!brd->PrintDriver->ttys)
+ return -ENOMEM;
+
+ /*
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
+ */
+ tty_set_operations(brd->PrintDriver, &dgap_tty_ops);
+
+ if (!brd->dgap_Major_Serial_Registered) {
+ /* Register tty devices */
+ rc = tty_register_driver(brd->SerialDriver);
+ if (rc < 0)
+ return rc;
+ brd->dgap_Major_Serial_Registered = TRUE;
+ dgap_BoardsByMajor[brd->SerialDriver->major] = brd;
+ brd->dgap_Serial_Major = brd->SerialDriver->major;
+ }
+
+ if (!brd->dgap_Major_TransparentPrint_Registered) {
+ /* Register Transparent Print devices */
+ rc = tty_register_driver(brd->PrintDriver);
+ if (rc < 0)
+ return rc;
+ brd->dgap_Major_TransparentPrint_Registered = TRUE;
+ dgap_BoardsByMajor[brd->PrintDriver->major] = brd;
+ brd->dgap_TransparentPrint_Major = brd->PrintDriver->major;
+ }
+
+ return rc;
+}
+
+/*
+ * dgap_tty_init()
+ *
+ * Init the tty subsystem. Called once per board after board has been
+ * downloaded and init'ed.
+ */
+static int dgap_tty_init(struct board_t *brd)
+{
+ int i;
+ int tlw;
+ uint true_count = 0;
+ uchar *vaddr;
+ uchar modem = 0;
+ struct channel_t *ch;
+ struct bs_t *bs;
+ struct cm_t *cm;
+
+ if (!brd)
+ return -ENXIO;
+
+ /*
+ * Initialize board structure elements.
+ */
+
+ vaddr = brd->re_map_membase;
+ true_count = readw((vaddr + NCHAN));
+
+ brd->nasync = dgap_config_get_num_prts(brd);
+
+ if (!brd->nasync)
+ brd->nasync = brd->maxports;
+
+ if (brd->nasync > brd->maxports)
+ brd->nasync = brd->maxports;
+
+ if (true_count != brd->nasync) {
+ if ((brd->type == PPCM) && (true_count == 64)) {
+ pr_warn("dgap: %s configured for %d ports, has %d ports.\n",
+ brd->name, brd->nasync, true_count);
+ pr_warn("dgap: Please make SURE the EBI cable running from the card\n");
+ pr_warn("dgap: to each EM module is plugged into EBI IN!\n");
+ } else if ((brd->type == PPCM) && (true_count == 0)) {
+ pr_warn("dgap: %s configured for %d ports, has %d ports.\n",
+ brd->name, brd->nasync, true_count);
+ pr_warn("dgap: Please make SURE the EBI cable running from the card\n");
+ pr_warn("dgap: to each EM module is plugged into EBI IN!\n");
+ } else
+ pr_warn("dgap: %s configured for %d ports, has %d ports.\n",
+ brd->name, brd->nasync, true_count);
+
+ brd->nasync = true_count;
+
+ /* If no ports, don't bother going any further */
+ if (!brd->nasync) {
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return -ENXIO;
+ }
+ }
+
+ /*
+ * Allocate channel memory that might not have been allocated
+ * when the driver was first loaded.
+ */
+ for (i = 0; i < brd->nasync; i++) {
+ if (!brd->channels[i]) {
+ brd->channels[i] =
+ kzalloc(sizeof(struct channel_t), GFP_ATOMIC);
+ if (!brd->channels[i])
+ return -ENOMEM;
+ }
+ }
+
+ ch = brd->channels[0];
+ vaddr = brd->re_map_membase;
+
+ bs = (struct bs_t *) ((ulong) vaddr + CHANBUF);
+ cm = (struct cm_t *) ((ulong) vaddr + CMDBUF);
+
+ brd->bd_bs = bs;
+
+ /* Set up channel variables */
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
+
+ if (!brd->channels[i])
+ continue;
+
+ spin_lock_init(&ch->ch_lock);
+
+ /* Store all our magic numbers */
+ ch->magic = DGAP_CHANNEL_MAGIC;
+ ch->ch_tun.magic = DGAP_UNIT_MAGIC;
+ ch->ch_tun.un_type = DGAP_SERIAL;
+ ch->ch_tun.un_ch = ch;
+ ch->ch_tun.un_dev = i;
+
+ ch->ch_pun.magic = DGAP_UNIT_MAGIC;
+ ch->ch_pun.un_type = DGAP_PRINT;
+ ch->ch_pun.un_ch = ch;
+ ch->ch_pun.un_dev = i;
+
+ ch->ch_vaddr = vaddr;
+ ch->ch_bs = bs;
+ ch->ch_cm = cm;
+ ch->ch_bd = brd;
+ ch->ch_portnum = i;
+ ch->ch_digi = dgap_digi_init;
+
+ /*
+ * Set up digi dsr and dcd bits based on altpin flag.
+ */
+ if (dgap_config_get_altpin(brd)) {
+ ch->ch_dsr = DM_CD;
+ ch->ch_cd = DM_DSR;
+ ch->ch_digi.digi_flags |= DIGI_ALTPIN;
+ } else {
+ ch->ch_cd = DM_CD;
+ ch->ch_dsr = DM_DSR;
+ }
+
+ ch->ch_taddr = vaddr + ((ch->ch_bs->tx_seg) << 4);
+ ch->ch_raddr = vaddr + ((ch->ch_bs->rx_seg) << 4);
+ ch->ch_tx_win = 0;
+ ch->ch_rx_win = 0;
+ ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1;
+ ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1;
+ ch->ch_tstart = 0;
+ ch->ch_rstart = 0;
+
+ /* .25 second delay */
+ ch->ch_close_delay = 250;
+
+ /*
+ * Set queue water marks, interrupt mask,
+ * and general tty parameters.
+ */
+ tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) :
+ ch->ch_tsize / 2;
+ ch->ch_tlw = tlw;
+
+ dgap_cmdw(ch, STLOW, tlw, 0);
+
+ dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
+
+ dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
+
+ ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
+
+ init_waitqueue_head(&ch->ch_flags_wait);
+ init_waitqueue_head(&ch->ch_tun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_pun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_sniff_wait);
+
+ /* Turn on all modem interrupts for now */
+ modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
+ writeb(modem, &(ch->ch_bs->m_int));
+
+ /*
+ * Set edelay to 0 if interrupts are turned on,
+ * otherwise set edelay to the usual 100.
+ */
+ if (brd->intr_used)
+ writew(0, &(ch->ch_bs->edelay));
+ else
+ writew(100, &(ch->ch_bs->edelay));
+
+ writeb(1, &(ch->ch_bs->idata));
+ }
+
+ return 0;
+}
+
+/*
+ * dgap_tty_uninit()
+ *
+ * Uninitialize the TTY portion of this driver. Free all memory and
+ * resources.
+ */
+static void dgap_tty_uninit(struct board_t *brd)
+{
+ struct device *dev;
+ int i = 0;
+
+ if (brd->dgap_Major_Serial_Registered) {
+ dgap_BoardsByMajor[brd->SerialDriver->major] = NULL;
+ brd->dgap_Serial_Major = 0;
+ for (i = 0; i < brd->nasync; i++) {
+ tty_port_destroy(&brd->SerialPorts[i]);
+ dev = brd->channels[i]->ch_tun.un_sysfs;
+ dgap_remove_tty_sysfs(dev);
+ tty_unregister_device(brd->SerialDriver, i);
+ }
+ tty_unregister_driver(brd->SerialDriver);
+ kfree(brd->SerialDriver->ttys);
+ brd->SerialDriver->ttys = NULL;
+ put_tty_driver(brd->SerialDriver);
+ kfree(brd->SerialPorts);
+ brd->dgap_Major_Serial_Registered = FALSE;
+ }
+
+ if (brd->dgap_Major_TransparentPrint_Registered) {
+ dgap_BoardsByMajor[brd->PrintDriver->major] = NULL;
+ brd->dgap_TransparentPrint_Major = 0;
+ for (i = 0; i < brd->nasync; i++) {
+ tty_port_destroy(&brd->PrinterPorts[i]);
+ dev = brd->channels[i]->ch_pun.un_sysfs;
+ dgap_remove_tty_sysfs(dev);
+ tty_unregister_device(brd->PrintDriver, i);
+ }
+ tty_unregister_driver(brd->PrintDriver);
+ kfree(brd->PrintDriver->ttys);
+ brd->PrintDriver->ttys = NULL;
+ put_tty_driver(brd->PrintDriver);
+ kfree(brd->PrinterPorts);
+ brd->dgap_Major_TransparentPrint_Registered = FALSE;
+ }
+}
+
+#define TMPBUFLEN (1024)
+/*
+ * dgap_sniff - Dump data out to the "sniff" buffer if the
+ * proc sniff file is opened...
+ */
+static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text,
+ uchar *buf, int len)
+{
+ struct timeval tv;
+ int n;
+ int r;
+ int nbuf;
+ int i;
+ int tmpbuflen;
+ char tmpbuf[TMPBUFLEN];
+ char *p = tmpbuf;
+ int too_much_data;
+
+ /* Leave if sniff not open */
+ if (!(ch->ch_sniff_flags & SNIFF_OPEN))
+ return;
+
+ do_gettimeofday(&tv);
+
+ /* Create our header for data dump */
+ p += sprintf(p, "<%ld %ld><%s><", tv.tv_sec, tv.tv_usec, text);
+ tmpbuflen = p - tmpbuf;
+
+ do {
+ too_much_data = 0;
+
+ for (i = 0; i < len && tmpbuflen < (TMPBUFLEN - 4); i++) {
+ p += sprintf(p, "%02x ", *buf);
+ buf++;
+ tmpbuflen = p - tmpbuf;
+ }
+
+ if (tmpbuflen < (TMPBUFLEN - 4)) {
+ if (i > 0)
+ p += sprintf(p - 1, "%s\n", ">");
+ else
+ p += sprintf(p, "%s\n", ">");
+ } else {
+ too_much_data = 1;
+ len -= i;
+ }
+
+ nbuf = strlen(tmpbuf);
+ p = tmpbuf;
+
+ /*
+ * Loop while data remains.
+ */
+ while (nbuf > 0 && ch->ch_sniff_buf) {
+ /*
+ * Determine the amount of available space left in the
+ * buffer. If there's none, wait until some appears.
+ */
+ n = (ch->ch_sniff_out - ch->ch_sniff_in - 1) &
+ SNIFF_MASK;
+
+ /*
+ * If there is no space left to write to in our sniff
+ * buffer, we have no choice but to drop the data.
+ * We *cannot* sleep here waiting for space, because
+ * this function was probably called by the
+ * interrupt/timer routines!
+ */
+ if (n == 0)
+ return;
+
+ /*
+ * Copy as much data as will fit.
+ */
+
+ if (n > nbuf)
+ n = nbuf;
+
+ r = SNIFF_MAX - ch->ch_sniff_in;
+
+ if (r <= n) {
+ memcpy(ch->ch_sniff_buf +
+ ch->ch_sniff_in, p, r);
+
+ n -= r;
+ ch->ch_sniff_in = 0;
+ p += r;
+ nbuf -= r;
+ }
+
+ memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, n);
+
+ ch->ch_sniff_in += n;
+ p += n;
+ nbuf -= n;
+
+ /*
+ * Wakeup any thread waiting for data
+ */
+ if (ch->ch_sniff_flags & SNIFF_WAIT_DATA) {
+ ch->ch_sniff_flags &= ~SNIFF_WAIT_DATA;
+ wake_up_interruptible(&ch->ch_sniff_wait);
+ }
+ }
+
+ /*
+ * If the user sent us too much data to push into our tmpbuf,
+ * we need to keep looping around on all the data.
+ */
+ if (too_much_data) {
+ p = tmpbuf;
+ tmpbuflen = 0;
+ }
+
+ } while (too_much_data);
+}
+
+/*=======================================================================
+ *
+ * dgap_input - Process received data.
+ *
+ * ch - Pointer to channel structure.
+ *
+ *=======================================================================*/
+
+static void dgap_input(struct channel_t *ch)
+{
+ struct board_t *bd;
+ struct bs_t *bs;
+ struct tty_struct *tp;
+ struct tty_ldisc *ld;
+ uint rmask;
+ uint head;
+ uint tail;
+ int data_len;
+ ulong lock_flags;
+ ulong lock_flags2;
+ int flip_len;
+ int len = 0;
+ int n = 0;
+ uchar *buf;
+ uchar tmpchar;
+ int s = 0;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ tp = ch->ch_tun.un_tty;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ /*
+ * Figure the number of characters in the buffer.
+ * Exit immediately if none.
+ */
+
+ rmask = ch->ch_rsize - 1;
+
+ head = readw(&(bs->rx_head));
+ head &= rmask;
+ tail = readw(&(bs->rx_tail));
+ tail &= rmask;
+
+ data_len = (head - tail) & rmask;
+
+ if (data_len == 0) {
+ writeb(1, &(bs->idata));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * If the device is not open, or CREAD is off, flush
+ * input data and return immediately.
+ */
+ if ((bd->state != BOARD_READY) || !tp ||
+ (tp->magic != TTY_MAGIC) ||
+ !(ch->ch_tun.un_flags & UN_ISOPEN) ||
+ !(tp->termios.c_cflag & CREAD) ||
+ (ch->ch_tun.un_flags & UN_CLOSING)) {
+
+ writew(head, &(bs->rx_tail));
+ writeb(1, &(bs->idata));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * If we are throttled, simply don't read any data.
+ */
+ if (ch->ch_flags & CH_RXBLOCK) {
+ writeb(1, &(bs->idata));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * Ignore oruns.
+ */
+ tmpchar = readb(&(bs->orun));
+ if (tmpchar) {
+ ch->ch_err_overrun++;
+ writeb(0, &(bs->orun));
+ }
+
+ /* Decide how much data we can send into the tty layer */
+ flip_len = TTY_FLIPBUF_SIZE;
+
+ /* Chop down the length, if needed */
+ len = min(data_len, flip_len);
+ len = min(len, (N_TTY_BUF_SIZE - 1));
+
+ ld = tty_ldisc_ref(tp);
+
+#ifdef TTY_DONT_FLIP
+ /*
+ * If the DONT_FLIP flag is on, don't flush our buffer, and act
+ * like the ld doesn't have any space to put the data right now.
+ */
+ if (test_bit(TTY_DONT_FLIP, &tp->flags))
+ len = 0;
+#endif
+
+ /*
+ * If we were unable to get a reference to the ld,
+ * don't flush our buffer, and act like the ld doesn't
+ * have any space to put the data right now.
+ */
+ if (!ld) {
+ len = 0;
+ } else {
+ /*
+ * If ld doesn't have a pointer to a receive_buf function,
+ * flush the data, then act like the ld doesn't have any
+ * space to put the data right now.
+ */
+ if (!ld->ops->receive_buf) {
+ writew(head, &(bs->rx_tail));
+ len = 0;
+ }
+ }
+
+ if (len <= 0) {
+ writeb(1, &(bs->idata));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ if (ld)
+ tty_ldisc_deref(ld);
+ return;
+ }
+
+ buf = ch->ch_bd->flipbuf;
+ n = len;
+
+ /*
+ * n now contains the most amount of data we can copy,
+ * bounded either by our buffer size or the amount
+ * of data the card actually has pending...
+ */
+ while (n) {
+
+ s = ((head >= tail) ? head : ch->ch_rsize) - tail;
+ s = min(s, n);
+
+ if (s <= 0)
+ break;
+
+ memcpy_fromio(buf, (char *) ch->ch_raddr + tail, s);
+ dgap_sniff_nowait_nolock(ch, "USER READ", buf, s);
+
+ tail += s;
+ buf += s;
+
+ n -= s;
+ /* Flip queue if needed */
+ tail &= rmask;
+ }
+
+ writew(tail, &(bs->rx_tail));
+ writeb(1, &(bs->idata));
+ ch->ch_rxcount += len;
+
+ /*
+ * If we are completely raw, we don't need to go through a lot
+ * of the tty layers that exist.
+ * In this case, we take the shortest and fastest route we
+ * can to relay the data to the user.
+ *
+ * On the other hand, if we are not raw, we need to go through
+ * the tty layer, which has its API more well defined.
+ */
+ if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
+ dgap_parity_scan(ch, ch->ch_bd->flipbuf,
+ ch->ch_bd->flipflagbuf, &len);
+
+ len = tty_buffer_request_room(tp->port, len);
+ tty_insert_flip_string_flags(tp->port, ch->ch_bd->flipbuf,
+ ch->ch_bd->flipflagbuf, len);
+ } else {
+ len = tty_buffer_request_room(tp->port, len);
+ tty_insert_flip_string(tp->port, ch->ch_bd->flipbuf, len);
+ }
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ /* Tell the tty layer its okay to "eat" the data now */
+ tty_flip_buffer_push(tp->port);
+
+ if (ld)
+ tty_ldisc_deref(ld);
+
+}
+
+/************************************************************************
+ * Determines when CARRIER changes state and takes appropriate
+ * action.
+ ************************************************************************/
+static void dgap_carrier(struct channel_t *ch)
+{
+ struct board_t *bd;
+
+ int virt_carrier = 0;
+ int phys_carrier = 0;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ /* Make sure altpin is always set correctly */
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
+ ch->ch_dsr = DM_CD;
+ ch->ch_cd = DM_DSR;
+ } else {
+ ch->ch_dsr = DM_DSR;
+ ch->ch_cd = DM_CD;
+ }
+
+ if (ch->ch_mistat & D_CD(ch))
+ phys_carrier = 1;
+
+ if (ch->ch_digi.digi_flags & DIGI_FORCEDCD)
+ virt_carrier = 1;
+
+ if (ch->ch_c_cflag & CLOCAL)
+ virt_carrier = 1;
+
+ /*
+ * Test for a VIRTUAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL transition to low, so long as we aren't
+ * currently ignoring physical transitions (which is what "virtual
+ * carrier" indicates).
+ *
+ * The transition of the virtual carrier to low really doesn't
+ * matter... it really only means "ignore carrier state", not
+ * "make pretend that carrier is there".
+ */
+ if ((virt_carrier == 0) &&
+ ((ch->ch_flags & CH_CD) != 0) &&
+ (phys_carrier == 0)) {
+
+ /*
+ * When carrier drops:
+ *
+ * Drop carrier on all open units.
+ *
+ * Flush queues, waking up any task waiting in the
+ * line discipline.
+ *
+ * Send a hangup to the control terminal.
+ *
+ * Enable all select calls.
+ */
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+
+ if (ch->ch_tun.un_open_count > 0)
+ tty_hangup(ch->ch_tun.un_tty);
+
+ if (ch->ch_pun.un_open_count > 0)
+ tty_hangup(ch->ch_pun.un_tty);
+ }
+
+ /*
+ * Make sure that our cached values reflect the current reality.
+ */
+ if (virt_carrier == 1)
+ ch->ch_flags |= CH_FCAR;
+ else
+ ch->ch_flags &= ~CH_FCAR;
+
+ if (phys_carrier == 1)
+ ch->ch_flags |= CH_CD;
+ else
+ ch->ch_flags &= ~CH_CD;
+}
+
+/************************************************************************
+ *
+ * TTY Entry points and helper functions
+ *
+ ************************************************************************/
+
+/*
+ * dgap_tty_open()
+ *
+ */
+static int dgap_tty_open(struct tty_struct *tty, struct file *file)
+{
+ struct board_t *brd;
+ struct channel_t *ch;
+ struct un_t *un;
+ struct bs_t *bs;
+ uint major = 0;
+ uint minor = 0;
+ int rc = 0;
+ ulong lock_flags;
+ ulong lock_flags2;
+ u16 head;
+
+ rc = 0;
+
+ major = MAJOR(tty_devnum(tty));
+ minor = MINOR(tty_devnum(tty));
+
+ if (major > 255)
+ return -ENXIO;
+
+ /* Get board pointer from our array of majors we have allocated */
+ brd = dgap_BoardsByMajor[major];
+ if (!brd)
+ return -ENXIO;
+
+ /*
+ * If board is not yet up to a state of READY, go to
+ * sleep waiting for it to happen or they cancel the open.
+ */
+ rc = wait_event_interruptible(brd->state_wait,
+ (brd->state & BOARD_READY));
+
+ if (rc)
+ return rc;
+
+ spin_lock_irqsave(&brd->bd_lock, lock_flags);
+
+ /* The wait above should guarantee this cannot happen */
+ if (brd->state != BOARD_READY) {
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /* If opened device is greater than our number of ports, bail. */
+ if (MINOR(tty_devnum(tty)) > brd->nasync) {
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ ch = brd->channels[minor];
+ if (!ch) {
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /* Grab channel lock */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ /* Figure out our type */
+ if (major == brd->dgap_Serial_Major) {
+ un = &brd->channels[minor]->ch_tun;
+ un->un_type = DGAP_SERIAL;
+ } else if (major == brd->dgap_TransparentPrint_Major) {
+ un = &brd->channels[minor]->ch_pun;
+ un->un_type = DGAP_PRINT;
+ } else {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /* Store our unit into driver_data, so we always have it available. */
+ tty->driver_data = un;
+
+ /*
+ * Error if channel info pointer is NULL.
+ */
+ bs = ch->ch_bs;
+ if (!bs) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /*
+ * Initialize tty's
+ */
+ if (!(un->un_flags & UN_ISOPEN)) {
+ /* Store important variables. */
+ un->un_tty = tty;
+
+ /* Maybe do something here to the TTY struct as well? */
+ }
+
+ /*
+ * Initialize if neither terminal or printer is open.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
+
+ ch->ch_mforce = 0;
+ ch->ch_mval = 0;
+
+ /*
+ * Flush input queue.
+ */
+ head = readw(&(bs->rx_head));
+ writew(head, &(bs->rx_tail));
+
+ ch->ch_flags = 0;
+ ch->pscan_state = 0;
+ ch->pscan_savechar = 0;
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ /* TODO: flush our TTY struct here? */
+ }
+
+ dgap_carrier(ch);
+ /*
+ * Run param in case we changed anything
+ */
+ dgap_param(tty);
+
+ /*
+ * follow protocol for opening port
+ */
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+
+ rc = dgap_block_til_ready(tty, file, ch);
+
+ if (!un->un_tty)
+ return -ENODEV;
+
+ /* No going back now, increment our unit and channel counters */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ ch->ch_open_count++;
+ un->un_open_count++;
+ un->un_flags |= (UN_ISOPEN);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ return rc;
+}
+
+/*
+ * dgap_block_til_ready()
+ *
+ * Wait for DCD, if needed.
+ */
+static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
+ struct channel_t *ch)
+{
+ int retval = 0;
+ struct un_t *un = NULL;
+ ulong lock_flags;
+ uint old_flags = 0;
+ int sleep_on_un_flags = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC || !file || !ch ||
+ ch->magic != DGAP_CHANNEL_MAGIC)
+ return -ENXIO;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -ENXIO;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ ch->ch_wopen++;
+
+ /* Loop forever */
+ while (1) {
+
+ sleep_on_un_flags = 0;
+
+ /*
+ * If board has failed somehow during our sleep,
+ * bail with error.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED) {
+ retval = -ENXIO;
+ break;
+ }
+
+ /* If tty was hung up, break out of loop and set error. */
+ if (tty_hung_up_p(file)) {
+ retval = -EAGAIN;
+ break;
+ }
+
+ /*
+ * If either unit is in the middle of the fragile part of close,
+ * we just cannot touch the channel safely.
+ * Go back to sleep, knowing that when the channel can be
+ * touched safely, the close routine will signal the
+ * ch_wait_flags to wake us back up.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) &
+ UN_CLOSING)) {
+
+ /*
+ * Our conditions to leave cleanly and happily:
+ * 1) NONBLOCKING on the tty is set.
+ * 2) CLOCAL is set.
+ * 3) DCD (fake or real) is active.
+ */
+
+ if (file->f_flags & O_NONBLOCK)
+ break;
+
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ break;
+
+ if (ch->ch_flags & CH_CD)
+ break;
+
+ if (ch->ch_flags & CH_FCAR)
+ break;
+ } else {
+ sleep_on_un_flags = 1;
+ }
+
+ /*
+ * If there is a signal pending, the user probably
+ * interrupted (ctrl-c) us.
+ * Leave loop with error set.
+ */
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ /*
+ * Store the flags before we let go of channel lock
+ */
+ if (sleep_on_un_flags)
+ old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
+ else
+ old_flags = ch->ch_flags;
+
+ /*
+ * Let go of channel lock before calling schedule.
+ * Our poller will get any FEP events and wake us up when DCD
+ * eventually goes active.
+ */
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ /*
+ * Wait for something in the flags to change
+ * from the current value.
+ */
+ if (sleep_on_un_flags) {
+ retval = wait_event_interruptible(un->un_flags_wait,
+ (old_flags != (ch->ch_tun.un_flags |
+ ch->ch_pun.un_flags)));
+ } else {
+ retval = wait_event_interruptible(ch->ch_flags_wait,
+ (old_flags != ch->ch_flags));
+ }
+
+ /*
+ * We got woken up for some reason.
+ * Before looping around, grab our channel lock.
+ */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ }
+
+ ch->ch_wopen--;
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
+/*
+ * dgap_tty_hangup()
+ *
+ * Hangup the port. Like a close, but don't wait for output to drain.
+ */
+static void dgap_tty_hangup(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ /* flush the transmit queues */
+ dgap_tty_flush_buffer(tty);
+
+}
+
+/*
+ * dgap_tty_close()
+ *
+ */
+static void dgap_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct ktermios *ts;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ int rc = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ ts = &tty->termios;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ /*
+ * Determine if this is the last close or not - and if we agree about
+ * which type of close it is with the Line Discipline
+ */
+ if ((tty->count == 1) && (un->un_open_count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. un_open_count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ un->un_open_count = 1;
+ }
+
+ if (--un->un_open_count < 0)
+ un->un_open_count = 0;
+
+ ch->ch_open_count--;
+
+ if (ch->ch_open_count && un->un_open_count) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* OK, its the last close on the unit */
+
+ un->un_flags |= UN_CLOSING;
+
+ tty->closing = 1;
+
+ /*
+ * Only officially close channel if count is 0 and
+ * DIGI_PRINTER bit is not set.
+ */
+ if ((ch->ch_open_count == 0) &&
+ !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
+
+ ch->ch_flags &= ~(CH_RXBLOCK);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ /* wait for output to drain */
+ /* This will also return if we take an interrupt */
+
+ rc = dgap_wait_for_drain(tty);
+
+ dgap_tty_flush_buffer(tty);
+ tty_ldisc_flush(tty);
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ tty->closing = 0;
+
+ /*
+ * If we have HUPCL set, lower DTR and RTS
+ */
+ if (ch->ch_c_cflag & HUPCL) {
+ ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch));
+ dgap_cmdb(ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0);
+
+ /*
+ * Go to sleep to ensure RTS/DTR
+ * have been dropped for modems to see it.
+ */
+ if (ch->ch_close_delay) {
+ spin_unlock_irqrestore(&ch->ch_lock,
+ lock_flags);
+ dgap_ms_sleep(ch->ch_close_delay);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ }
+ }
+
+ ch->pscan_state = 0;
+ ch->pscan_savechar = 0;
+ ch->ch_baud_info = 0;
+
+ }
+
+ /*
+ * turn off print device when closing print device.
+ */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ un->un_tty = NULL;
+ un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
+ tty->driver_data = NULL;
+
+ wake_up_interruptible(&ch->ch_flags_wait);
+ wake_up_interruptible(&un->un_flags_wait);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+}
+
+/*
+ * dgap_tty_chars_in_buffer()
+ *
+ * Return number of characters that have not been transmitted yet.
+ *
+ * This routine is used by the line discipline to determine if there
+ * is data waiting to be transmitted/drained/flushed or not.
+ */
+static int dgap_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct board_t *bd = NULL;
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ struct bs_t *bs = NULL;
+ uchar tbusy;
+ uint chars = 0;
+ u16 thead, ttail, tmask, chead, ctail;
+ ulong lock_flags = 0;
+ ulong lock_flags2 = 0;
+
+ if (tty == NULL)
+ return 0;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return 0;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ tmask = (ch->ch_tsize - 1);
+
+ /* Get Transmit queue pointers */
+ thead = readw(&(bs->tx_head)) & tmask;
+ ttail = readw(&(bs->tx_tail)) & tmask;
+
+ /* Get tbusy flag */
+ tbusy = readb(&(bs->tbusy));
+
+ /* Get Command queue pointers */
+ chead = readw(&(ch->ch_cm->cm_head));
+ ctail = readw(&(ch->ch_cm->cm_tail));
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ /*
+ * The only way we know for sure if there is no pending
+ * data left to be transferred, is if:
+ * 1) Transmit head and tail are equal (empty).
+ * 2) Command queue head and tail are equal (empty).
+ * 3) The "TBUSY" flag is 0. (Transmitter not busy).
+ */
+
+ if ((ttail == thead) && (tbusy == 0) && (chead == ctail)) {
+ chars = 0;
+ } else {
+ if (thead >= ttail)
+ chars = thead - ttail;
+ else
+ chars = thead - ttail + ch->ch_tsize;
+ /*
+ * Fudge factor here.
+ * If chars is zero, we know that the command queue had
+ * something in it or tbusy was set. Because we cannot
+ * be sure if there is still some data to be transmitted,
+ * lets lie, and tell ld we have 1 byte left.
+ */
+ if (chars == 0) {
+ /*
+ * If TBUSY is still set, and our tx buffers are empty,
+ * force the firmware to send me another wakeup after
+ * TBUSY has been cleared.
+ */
+ if (tbusy != 0) {
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ spin_unlock_irqrestore(&ch->ch_lock,
+ lock_flags);
+ }
+ chars = 1;
+ }
+ }
+
+ return chars;
+}
+
+static int dgap_wait_for_drain(struct tty_struct *tty)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ struct bs_t *bs;
+ int ret = -EIO;
+ uint count = 1;
+ ulong lock_flags = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return ret;
+
+ ret = 0;
+
+ /* Loop until data is drained */
+ while (count != 0) {
+
+ count = dgap_tty_chars_in_buffer(tty);
+
+ if (count == 0)
+ break;
+
+ /* Set flag waiting for drain */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ /* Go to sleep till we get woken up */
+ ret = wait_event_interruptible(un->un_flags_wait,
+ ((un->un_flags & UN_EMPTY) == 0));
+ /* If ret is non-zero, user ctrl-c'ed us */
+ if (ret)
+ break;
+ }
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ un->un_flags &= ~(UN_EMPTY);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ return ret;
+}
+
+/*
+ * dgap_maxcps_room
+ *
+ * Reduces bytes_available to the max number of characters
+ * that can be sent currently given the maxcps value, and
+ * returns the new bytes_available. This only affects printer
+ * output.
+ */
+static int dgap_maxcps_room(struct tty_struct *tty, int bytes_available)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+
+ if (tty == NULL)
+ return bytes_available;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return bytes_available;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return bytes_available;
+
+ /*
+ * If its not the Transparent print device, return
+ * the full data amount.
+ */
+ if (un->un_type != DGAP_PRINT)
+ return bytes_available;
+
+ if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) {
+ int cps_limit = 0;
+ unsigned long current_time = jiffies;
+ unsigned long buffer_time = current_time +
+ (HZ * ch->ch_digi.digi_bufsize) /
+ ch->ch_digi.digi_maxcps;
+
+ if (ch->ch_cpstime < current_time) {
+ /* buffer is empty */
+ ch->ch_cpstime = current_time; /* reset ch_cpstime */
+ cps_limit = ch->ch_digi.digi_bufsize;
+ } else if (ch->ch_cpstime < buffer_time) {
+ /* still room in the buffer */
+ cps_limit = ((buffer_time - ch->ch_cpstime) *
+ ch->ch_digi.digi_maxcps) / HZ;
+ } else {
+ /* no room in the buffer */
+ cps_limit = 0;
+ }
+
+ bytes_available = min(cps_limit, bytes_available);
+ }
+
+ return bytes_available;
+}
+
+static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event)
+{
+ struct channel_t *ch = NULL;
+ struct bs_t *bs = NULL;
+
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+ bs = ch->ch_bs;
+ if (!bs)
+ return;
+
+ if ((event & UN_LOW) != 0) {
+ if ((un->un_flags & UN_LOW) == 0) {
+ un->un_flags |= UN_LOW;
+ writeb(1, &(bs->ilow));
+ }
+ }
+ if ((event & UN_LOW) != 0) {
+ if ((un->un_flags & UN_EMPTY) == 0) {
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ }
+ }
+}
+
+/*
+ * dgap_tty_write_room()
+ *
+ * Return space available in Tx buffer
+ */
+static int dgap_tty_write_room(struct tty_struct *tty)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ struct bs_t *bs = NULL;
+ u16 head, tail, tmask;
+ int ret = 0;
+ ulong lock_flags = 0;
+
+ if (!tty)
+ return 0;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return 0;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ tmask = ch->ch_tsize - 1;
+ head = readw(&(bs->tx_head)) & tmask;
+ tail = readw(&(bs->tx_tail)) & tmask;
+
+ ret = tail - head - 1;
+ if (ret < 0)
+ ret += ch->ch_tsize;
+
+ /* Limit printer to maxcps */
+ ret = dgap_maxcps_room(tty, ret);
+
+ /*
+ * If we are printer device, leave space for
+ * possibly both the on and off strings.
+ */
+ if (un->un_type == DGAP_PRINT) {
+ if (!(ch->ch_flags & CH_PRON))
+ ret -= ch->ch_digi.digi_onlen;
+ ret -= ch->ch_digi.digi_offlen;
+ } else {
+ if (ch->ch_flags & CH_PRON)
+ ret -= ch->ch_digi.digi_offlen;
+ }
+
+ if (ret < 0)
+ ret = 0;
+
+ /*
+ * Schedule FEP to wake us up if needed.
+ *
+ * TODO: This might be overkill...
+ * Do we really need to schedule callbacks from the FEP
+ * in every case? Can we get smarter based on ret?
+ */
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ return ret;
+}
+
+/*
+ * dgap_tty_put_char()
+ *
+ * Put a character into ch->ch_buf
+ *
+ * - used by the line discipline for OPOST processing
+ */
+static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
+{
+ /*
+ * Simply call tty_write.
+ */
+ dgap_tty_write(tty, &c, 1);
+ return 1;
+}
+
+/*
+ * dgap_tty_write()
+ *
+ * Take data from the user or kernel and send it out to the FEP.
+ * In here exists all the Transparent Print magic as well.
+ */
+static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ struct bs_t *bs = NULL;
+ char *vaddr = NULL;
+ u16 head, tail, tmask, remain;
+ int bufcount = 0, n = 0;
+ int orig_count = 0;
+ ulong lock_flags;
+
+ if (!tty)
+ return 0;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return 0;
+
+ if (!count)
+ return 0;
+
+ /*
+ * Store original amount of characters passed in.
+ * This helps to figure out if we should ask the FEP
+ * to send us an event when it has more space available.
+ */
+ orig_count = count;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ /* Get our space available for the channel from the board */
+ tmask = ch->ch_tsize - 1;
+ head = readw(&(bs->tx_head)) & tmask;
+ tail = readw(&(bs->tx_tail)) & tmask;
+
+ bufcount = tail - head - 1;
+ if (bufcount < 0)
+ bufcount += ch->ch_tsize;
+
+ /*
+ * Limit printer output to maxcps overall, with bursts allowed
+ * up to bufsize characters.
+ */
+ bufcount = dgap_maxcps_room(tty, bufcount);
+
+ /*
+ * Take minimum of what the user wants to send, and the
+ * space available in the FEP buffer.
+ */
+ count = min(count, bufcount);
+
+ /*
+ * Bail if no space left.
+ */
+ if (count <= 0) {
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ return 0;
+ }
+
+ /*
+ * Output the printer ON string, if we are in terminal mode, but
+ * need to be in printer mode.
+ */
+ if ((un->un_type == DGAP_PRINT) && !(ch->ch_flags & CH_PRON)) {
+ dgap_wmove(ch, ch->ch_digi.digi_onstr,
+ (int) ch->ch_digi.digi_onlen);
+ head = readw(&(bs->tx_head)) & tmask;
+ ch->ch_flags |= CH_PRON;
+ }
+
+ /*
+ * On the other hand, output the printer OFF string, if we are
+ * currently in printer mode, but need to output to the terminal.
+ */
+ if ((un->un_type != DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ head = readw(&(bs->tx_head)) & tmask;
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ /*
+ * If there is nothing left to copy, or
+ * I can't handle any more data, leave.
+ */
+ if (count <= 0) {
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ return 0;
+ }
+
+ n = count;
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ remain = ch->ch_tstart + ch->ch_tsize - head;
+
+ if (n >= remain) {
+ n -= remain;
+ vaddr = ch->ch_taddr + head;
+
+ memcpy_toio(vaddr, (uchar *) buf, remain);
+ dgap_sniff_nowait_nolock(ch, "USER WRITE", (uchar *) buf,
+ remain);
+
+ head = ch->ch_tstart;
+ buf += remain;
+ }
+
+ if (n > 0) {
+
+ /*
+ * Move rest of data.
+ */
+ vaddr = ch->ch_taddr + head;
+ remain = n;
+
+ memcpy_toio(vaddr, (uchar *) buf, remain);
+ dgap_sniff_nowait_nolock(ch, "USER WRITE", (uchar *)buf,
+ remain);
+
+ head += remain;
+
+ }
+
+ if (count) {
+ ch->ch_txcount += count;
+ head &= tmask;
+ writew(head, &(bs->tx_head));
+ }
+
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+
+ /*
+ * If this is the print device, and the
+ * printer is still on, we need to turn it
+ * off before going idle. If the buffer is
+ * non-empty, wait until it goes empty.
+ * Otherwise turn it off right now.
+ */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
+ tail = readw(&(bs->tx_tail)) & tmask;
+
+ if (tail != head) {
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ } else {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ head = readw(&(bs->tx_head)) & tmask;
+ ch->ch_flags &= ~CH_PRON;
+ }
+ }
+
+ /* Update printer buffer empty time. */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_digi.digi_maxcps > 0)
+ && (ch->ch_digi.digi_bufsize > 0)) {
+ ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
+ }
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ return count;
+}
+
+/*
+ * Return modem signals to ld.
+ */
+static int dgap_tty_tiocmget(struct tty_struct *tty)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int result = -EIO;
+ uchar mstat = 0;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return result;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return result;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return result;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ mstat = readb(&(ch->ch_bs->m_stat));
+ /* Append any outbound signals that might be pending... */
+ mstat |= ch->ch_mostat;
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ result = 0;
+
+ if (mstat & D_DTR(ch))
+ result |= TIOCM_DTR;
+ if (mstat & D_RTS(ch))
+ result |= TIOCM_RTS;
+ if (mstat & D_CTS(ch))
+ result |= TIOCM_CTS;
+ if (mstat & D_DSR(ch))
+ result |= TIOCM_DSR;
+ if (mstat & D_RI(ch))
+ result |= TIOCM_RI;
+ if (mstat & D_CD(ch))
+ result |= TIOCM_CD;
+
+ return result;
+}
+
+/*
+ * dgap_tty_tiocmset()
+ *
+ * Set modem signals, called by ld.
+ */
+static int dgap_tty_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -EIO;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return ret;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ if (set & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval |= D_RTS(ch);
+ }
+
+ if (set & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval |= D_DTR(ch);
+ }
+
+ if (clear & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval &= ~(D_RTS(ch));
+ }
+
+ if (clear & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval &= ~(D_DTR(ch));
+ }
+
+ dgap_param(tty);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+/*
+ * dgap_tty_send_break()
+ *
+ * Send a Break, called by ld.
+ */
+static int dgap_tty_send_break(struct tty_struct *tty, int msec)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -EIO;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return ret;
+
+ switch (msec) {
+ case -1:
+ msec = 0xFFFF;
+ break;
+ case 0:
+ msec = 1;
+ break;
+ default:
+ msec /= 10;
+ break;
+ }
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+#if 0
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+#endif
+ dgap_cmdw(ch, SBREAK, (u16) msec, 0);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+/*
+ * dgap_tty_wait_until_sent()
+ *
+ * wait until data has been transmitted, called by ld.
+ */
+static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ dgap_wait_for_drain(tty);
+}
+
+/*
+ * dgap_send_xchar()
+ *
+ * send a high priority character, called by ld.
+ */
+static void dgap_tty_send_xchar(struct tty_struct *tty, char c)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ /*
+ * This is technically what we should do.
+ * However, the NIST tests specifically want
+ * to see each XON or XOFF character that it
+ * sends, so lets just send each character
+ * by hand...
+ */
+#if 0
+ if (c == STOP_CHAR(tty))
+ dgap_cmdw(ch, RPAUSE, 0, 0);
+ else if (c == START_CHAR(tty))
+ dgap_cmdw(ch, RRESUME, 0, 0);
+ else
+ dgap_wmove(ch, &c, 1);
+#else
+ dgap_wmove(ch, &c, 1);
+#endif
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return;
+}
+
+/*
+ * Return modem signals to ld.
+ */
+static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value)
+{
+ int result = 0;
+ uchar mstat = 0;
+ ulong lock_flags;
+ int rc = 0;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -ENXIO;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ mstat = readb(&(ch->ch_bs->m_stat));
+ /* Append any outbound signals that might be pending... */
+ mstat |= ch->ch_mostat;
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ result = 0;
+
+ if (mstat & D_DTR(ch))
+ result |= TIOCM_DTR;
+ if (mstat & D_RTS(ch))
+ result |= TIOCM_RTS;
+ if (mstat & D_CTS(ch))
+ result |= TIOCM_CTS;
+ if (mstat & D_DSR(ch))
+ result |= TIOCM_DSR;
+ if (mstat & D_RI(ch))
+ result |= TIOCM_RI;
+ if (mstat & D_CD(ch))
+ result |= TIOCM_CD;
+
+ rc = put_user(result, value);
+
+ return rc;
+}
+
+/*
+ * dgap_set_modem_info()
+ *
+ * Set modem signals, called by ld.
+ */
+static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command,
+ unsigned int __user *value)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -ENXIO;
+ unsigned int arg = 0;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return ret;
+
+ ret = get_user(arg, value);
+ if (ret)
+ return ret;
+
+ switch (command) {
+ case TIOCMBIS:
+ if (arg & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval |= D_RTS(ch);
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval |= D_DTR(ch);
+ }
+
+ break;
+
+ case TIOCMBIC:
+ if (arg & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval &= ~(D_RTS(ch));
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval &= ~(D_DTR(ch));
+ }
+
+ break;
+
+ case TIOCMSET:
+ ch->ch_mforce = D_DTR(ch)|D_RTS(ch);
+
+ if (arg & TIOCM_RTS)
+ ch->ch_mval |= D_RTS(ch);
+ else
+ ch->ch_mval &= ~(D_RTS(ch));
+
+ if (arg & TIOCM_DTR)
+ ch->ch_mval |= (D_DTR(ch));
+ else
+ ch->ch_mval &= ~(D_DTR(ch));
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ dgap_param(tty);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+/*
+ * dgap_tty_digigeta()
+ *
+ * Ioctl to get the information for ditty.
+ *
+ *
+ *
+ */
+static int dgap_tty_digigeta(struct tty_struct *tty,
+ struct digi_t __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ struct digi_t tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return -EFAULT;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -EFAULT;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -EFAULT;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -EFAULT;
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * dgap_tty_digiseta()
+ *
+ * Ioctl to set the information for ditty.
+ *
+ *
+ *
+ */
+static int dgap_tty_digiseta(struct tty_struct *tty,
+ struct digi_t __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ struct digi_t new_digi;
+ ulong lock_flags = 0;
+ unsigned long lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -EFAULT;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -EFAULT;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -EFAULT;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -EFAULT;
+
+ if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
+
+ if (ch->ch_digi.digi_maxcps < 1)
+ ch->ch_digi.digi_maxcps = 1;
+
+ if (ch->ch_digi.digi_maxcps > 10000)
+ ch->ch_digi.digi_maxcps = 10000;
+
+ if (ch->ch_digi.digi_bufsize < 10)
+ ch->ch_digi.digi_bufsize = 10;
+
+ if (ch->ch_digi.digi_maxchar < 1)
+ ch->ch_digi.digi_maxchar = 1;
+
+ if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
+ ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
+
+ if (ch->ch_digi.digi_onlen > DIGI_PLEN)
+ ch->ch_digi.digi_onlen = DIGI_PLEN;
+
+ if (ch->ch_digi.digi_offlen > DIGI_PLEN)
+ ch->ch_digi.digi_offlen = DIGI_PLEN;
+
+ dgap_param(tty);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+/*
+ * dgap_tty_digigetedelay()
+ *
+ * Ioctl to get the current edelay setting.
+ *
+ *
+ *
+ */
+static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return -EFAULT;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -EFAULT;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -EFAULT;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -EFAULT;
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ tmp = readw(&(ch->ch_bs->edelay));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * dgap_tty_digisetedelay()
+ *
+ * Ioctl to set the EDELAY setting
+ *
+ */
+static int dgap_tty_digisetedelay(struct tty_struct *tty, int __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int new_digi;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -EFAULT;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -EFAULT;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -EFAULT;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -EFAULT;
+
+ if (copy_from_user(&new_digi, new_info, sizeof(int)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ writew((u16) new_digi, &(ch->ch_bs->edelay));
+
+ dgap_param(tty);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+/*
+ * dgap_tty_digigetcustombaud()
+ *
+ * Ioctl to get the current custom baud rate setting.
+ */
+static int dgap_tty_digigetcustombaud(struct tty_struct *tty,
+ int __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return -EFAULT;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -EFAULT;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -EFAULT;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -EFAULT;
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ tmp = dgap_get_custom_baud(ch);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * dgap_tty_digisetcustombaud()
+ *
+ * Ioctl to set the custom baud rate setting
+ */
+static int dgap_tty_digisetcustombaud(struct tty_struct *tty,
+ int __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ uint new_rate;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -EFAULT;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -EFAULT;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -EFAULT;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -EFAULT;
+
+
+ if (copy_from_user(&new_rate, new_info, sizeof(unsigned int)))
+ return -EFAULT;
+
+ if (bd->bd_flags & BD_FEP5PLUS) {
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ ch->ch_custom_speed = new_rate;
+
+ dgap_param(tty);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ }
+
+ return 0;
+}
+
+/*
+ * dgap_set_termios()
+ */
+static void dgap_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ unsigned long lock_flags;
+ unsigned long lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ dgap_carrier(ch);
+ dgap_param(tty);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+}
+
+static void dgap_tty_throttle(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ ch->ch_flags |= (CH_RXBLOCK);
+#if 1
+ dgap_cmdw(ch, RPAUSE, 0, 0);
+#endif
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+}
+
+static void dgap_tty_unthrottle(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ ch->ch_flags &= ~(CH_RXBLOCK);
+
+#if 1
+ dgap_cmdw(ch, RRESUME, 0, 0);
+#endif
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+}
+
+static void dgap_tty_start(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+}
+
+static void dgap_tty_stop(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, PAUSETX, 0, 0);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+}
+
+/*
+ * dgap_tty_flush_chars()
+ *
+ * Flush the cook buffer
+ *
+ * Note to self, and any other poor souls who venture here:
+ *
+ * flush in this case DOES NOT mean dispose of the data.
+ * instead, it means "stop buffering and send it if you
+ * haven't already." Just guess how I figured that out... SRW 2-Jun-98
+ *
+ * It is also always called in interrupt context - JAR 8-Sept-99
+ */
+static void dgap_tty_flush_chars(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ /* TODO: Do something here */
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+}
+
+/*
+ * dgap_tty_flush_buffer()
+ *
+ * Flush Tx buffer (make in == out)
+ */
+static void dgap_tty_flush_buffer(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+ u16 head = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->tx_head));
+ dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ if (waitqueue_active(&tty->write_wait))
+ wake_up_interruptible(&tty->write_wait);
+ tty_wakeup(tty);
+}
+
+/*****************************************************************************
+ *
+ * The IOCTL function and all of its helpers
+ *
+ *****************************************************************************/
+
+/*
+ * dgap_tty_ioctl()
+ *
+ * The usual assortment of ioctl's
+ */
+static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int rc;
+ u16 head = 0;
+ ulong lock_flags = 0;
+ ulong lock_flags2 = 0;
+ void __user *uarg = (void __user *) arg;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -ENODEV;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -ENODEV;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -ENODEV;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -ENODEV;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ if (un->un_open_count <= 0) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return -EIO;
+ }
+
+ switch (cmd) {
+
+ /* Here are all the standard ioctl's that we MUST implement */
+
+ case TCSBRK:
+ /*
+ * TCSBRK is SVID version: non-zero arg --> no break
+ * this behaviour is exploited by tcdrain().
+ *
+ * According to POSIX.1 spec (7.2.2.1.2) breaks should be
+ * between 0.25 and 0.5 seconds so we'll ask for something
+ * in the middle: 0.375 seconds.
+ */
+ rc = tty_check_change(tty);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ if (rc)
+ return rc;
+
+ rc = dgap_wait_for_drain(tty);
+
+ if (rc)
+ return -EINTR;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+
+ case TCSBRKP:
+ /* support for POSIX tcsendbreak()
+
+ * According to POSIX.1 spec (7.2.2.1.2) breaks should be
+ * between 0.25 and 0.5 seconds so we'll ask for something
+ * in the middle: 0.375 seconds.
+ */
+ rc = tty_check_change(tty);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ if (rc)
+ return rc;
+
+ rc = dgap_wait_for_drain(tty);
+ if (rc)
+ return -EINTR;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+
+ case TIOCSBRK:
+ /*
+ * FEP5 doesn't support turning on a break unconditionally.
+ * The FEP5 device will stop sending a break automatically
+ * after the specified time value that was sent when turning on
+ * the break.
+ */
+ rc = tty_check_change(tty);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ if (rc)
+ return rc;
+
+ rc = dgap_wait_for_drain(tty);
+ if (rc)
+ return -EINTR;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+
+ case TIOCCBRK:
+ /*
+ * FEP5 doesn't support turning off a break unconditionally.
+ * The FEP5 device will stop sending a break automatically
+ * after the specified time value that was sent when turning on
+ * the break.
+ */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return 0;
+
+ case TIOCGSOFTCAR:
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ rc = put_user(C_CLOCAL(tty) ? 1 : 0,
+ (unsigned long __user *) arg);
+ return rc;
+
+ case TIOCSSOFTCAR:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ rc = get_user(arg, (unsigned long __user *) arg);
+ if (rc)
+ return rc;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+ tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) |
+ (arg ? CLOCAL : 0));
+ dgap_param(tty);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+
+ case TIOCMGET:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_get_modem_info(ch, uarg);
+
+ case TIOCMBIS:
+ case TIOCMBIC:
+ case TIOCMSET:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_set_modem_info(tty, cmd, uarg);
+
+ /*
+ * Here are any additional ioctl's that we want to implement
+ */
+
+ case TCFLSH:
+ /*
+ * The linux tty driver doesn't have a flush
+ * input routine for the driver, assuming all backed
+ * up data is in the line disc. buffers. However,
+ * we all know that's not the case. Here, we
+ * act on the ioctl, but then lie and say we didn't
+ * so the line discipline will process the flush
+ * also.
+ */
+ rc = tty_check_change(tty);
+ if (rc) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return rc;
+ }
+
+ if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
+ if (!(un->un_type == DGAP_PRINT)) {
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+ writeb(0, &(ch->ch_bs->orun));
+ }
+ }
+
+ if ((arg != TCOFLUSH) && (arg != TCIOFLUSH)) {
+ /* pretend we didn't recognize this IOCTL */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return -ENOIOCTLCMD;
+ }
+
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->tx_head));
+ dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+ if (waitqueue_active(&tty->write_wait))
+ wake_up_interruptible(&tty->write_wait);
+
+ /* Can't hold any locks when calling tty_wakeup! */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ tty_wakeup(tty);
+
+ /* pretend we didn't recognize this IOCTL */
+ return -ENOIOCTLCMD;
+
+ case TCSETSF:
+ case TCSETSW:
+ /*
+ * The linux tty driver doesn't have a flush
+ * input routine for the driver, assuming all backed
+ * up data is in the line disc. buffers. However,
+ * we all know that's not the case. Here, we
+ * act on the ioctl, but then lie and say we didn't
+ * so the line discipline will process the flush
+ * also.
+ */
+ if (cmd == TCSETSF) {
+ /* flush rx */
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+ }
+
+ /* now wait for all the output to drain */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ rc = dgap_wait_for_drain(tty);
+ if (rc)
+ return -EINTR;
+
+ /* pretend we didn't recognize this */
+ return -ENOIOCTLCMD;
+
+ case TCSETAW:
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ rc = dgap_wait_for_drain(tty);
+ if (rc)
+ return -EINTR;
+
+ /* pretend we didn't recognize this */
+ return -ENOIOCTLCMD;
+
+ case TCXONC:
+ /*
+ * The Linux Line Discipline (LD) would do this for us if we
+ * let it, but we have the special firmware options to do this
+ * the "right way" regardless of hardware or software flow
+ * control so we'll do it outselves instead of letting the LD
+ * do it.
+ */
+ rc = tty_check_change(tty);
+ if (rc) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return rc;
+ }
+
+ switch (arg) {
+
+ case TCOON:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ dgap_tty_start(tty);
+ return 0;
+ case TCOOFF:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ dgap_tty_stop(tty);
+ return 0;
+ case TCION:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ /* Make the ld do it */
+ return -ENOIOCTLCMD;
+ case TCIOFF:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ /* Make the ld do it */
+ return -ENOIOCTLCMD;
+ default:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return -EINVAL;
+ }
+
+ case DIGI_GETA:
+ /* get information for ditty */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_tty_digigeta(tty, uarg);
+
+ case DIGI_SETAW:
+ case DIGI_SETAF:
+
+ /* set information for ditty */
+ if (cmd == (DIGI_SETAW)) {
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ rc = dgap_wait_for_drain(tty);
+ if (rc)
+ return -EINTR;
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+ } else
+ tty_ldisc_flush(tty);
+ /* fall thru */
+
+ case DIGI_SETA:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_tty_digiseta(tty, uarg);
+
+ case DIGI_GEDELAY:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_tty_digigetedelay(tty, uarg);
+
+ case DIGI_SEDELAY:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_tty_digisetedelay(tty, uarg);
+
+ case DIGI_GETCUSTOMBAUD:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_tty_digigetcustombaud(tty, uarg);
+
+ case DIGI_SETCUSTOMBAUD:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_tty_digisetcustombaud(tty, uarg);
+
+ case DIGI_RESET_PORT:
+ dgap_firmware_reset_port(ch);
+ dgap_param(tty);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return 0;
+
+ default:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return -ENOIOCTLCMD;
+ }
+}
+
+static int dgap_after_config_loaded(int board)
+{
+ /*
+ * Initialize KME waitqueues...
+ */
+ init_waitqueue_head(&(dgap_Board[board]->kme_wait));
+
+ /*
+ * allocate flip buffer for board.
+ */
+ dgap_Board[board]->flipbuf = kmalloc(MYFLIPLEN, GFP_ATOMIC);
+ if (!dgap_Board[board]->flipbuf)
+ return -ENOMEM;
+
+ dgap_Board[board]->flipflagbuf = kmalloc(MYFLIPLEN, GFP_ATOMIC);
+ if (!dgap_Board[board]->flipflagbuf) {
+ kfree(dgap_Board[board]->flipbuf);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Create pr and tty device entries
+ */
+static int dgap_tty_register_ports(struct board_t *brd)
+{
+ struct channel_t *ch;
+ int i;
+
+ brd->SerialPorts = kcalloc(brd->nasync, sizeof(*brd->SerialPorts),
+ GFP_KERNEL);
+ if (brd->SerialPorts == NULL)
+ return -ENOMEM;
+ for (i = 0; i < brd->nasync; i++)
+ tty_port_init(&brd->SerialPorts[i]);
+
+ brd->PrinterPorts = kcalloc(brd->nasync, sizeof(*brd->PrinterPorts),
+ GFP_KERNEL);
+ if (brd->PrinterPorts == NULL) {
+ kfree(brd->SerialPorts);
+ return -ENOMEM;
+ }
+ for (i = 0; i < brd->nasync; i++)
+ tty_port_init(&brd->PrinterPorts[i]);
+
+ ch = brd->channels[0];
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
+
+ struct device *classp;
+
+ classp = tty_port_register_device(&brd->SerialPorts[i],
+ brd->SerialDriver,
+ brd->firstminor + i, NULL);
+
+ dgap_create_tty_sysfs(&ch->ch_tun, classp);
+ ch->ch_tun.un_sysfs = classp;
+
+ classp = tty_port_register_device(&brd->PrinterPorts[i],
+ brd->PrintDriver,
+ brd->firstminor + i, NULL);
+
+ dgap_create_tty_sysfs(&ch->ch_pun, classp);
+ ch->ch_pun.un_sysfs = classp;
+ }
+ dgap_create_ports_sysfiles(brd);
+
+ return 0;
+}
+
+/*
+ * Copies the BIOS code from the user to the board,
+ * and starts the BIOS running.
+ */
+static void dgap_do_bios_load(struct board_t *brd, const uchar *ubios, int len)
+{
+ uchar *addr;
+ uint offset;
+ int i;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ addr = brd->re_map_membase;
+
+ /*
+ * clear POST area
+ */
+ for (i = 0; i < 16; i++)
+ writeb(0, addr + POSTAREA + i);
+
+ /*
+ * Download bios
+ */
+ offset = 0x1000;
+ memcpy_toio(addr + offset, ubios, len);
+
+ writel(0x0bf00401, addr);
+ writel(0, (addr + 4));
+
+ /* Clear the reset, and change states. */
+ writeb(FEPCLR, brd->re_map_port);
+}
+
+/*
+ * Checks to see if the BIOS completed running on the card.
+ */
+static int dgap_do_wait_for_bios(struct board_t *brd)
+{
+ uchar *addr;
+ u16 word;
+ u16 err1;
+ u16 err2;
+ int ret = 0;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return ret;
+
+ addr = brd->re_map_membase;
+ word = readw(addr + POSTAREA);
+
+ /*
+ * It can take 5-6 seconds for a board to
+ * pass the bios self test and post results.
+ * Give it 10 seconds.
+ */
+ brd->wait_for_bios = 0;
+ while (brd->wait_for_bios < 1000) {
+ /* Check to see if BIOS thinks board is good. (GD). */
+ if (word == *(u16 *) "GD")
+ return 1;
+ msleep_interruptible(10);
+ brd->wait_for_bios++;
+ word = readw(addr + POSTAREA);
+ }
+
+ /* Gave up on board after too long of time taken */
+ err1 = readw(addr + SEQUENCE);
+ err2 = readw(addr + ERROR);
+ pr_warn("dgap: %s failed diagnostics. Error #(%x,%x).\n",
+ brd->name, err1, err2);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOBIOS;
+
+ return ret;
+}
+
+/*
+ * Copies the FEP code from the user to the board,
+ * and starts the FEP running.
+ */
+static void dgap_do_fep_load(struct board_t *brd, const uchar *ufep, int len)
+{
+ uchar *addr;
+ uint offset;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ addr = brd->re_map_membase;
+
+ /*
+ * Download FEP
+ */
+ offset = 0x1000;
+ memcpy_toio(addr + offset, ufep, len);
+
+ /*
+ * If board is a concentrator product, we need to give
+ * it its config string describing how the concentrators look.
+ */
+ if ((brd->type == PCX) || (brd->type == PEPC)) {
+ uchar string[100];
+ uchar *config, *xconfig;
+ int i = 0;
+
+ xconfig = dgap_create_config_string(brd, string);
+
+ /* Write string to board memory */
+ config = addr + CONFIG;
+ for (; i < CONFIGSIZE; i++, config++, xconfig++) {
+ writeb(*xconfig, config);
+ if ((*xconfig & 0xff) == 0xff)
+ break;
+ }
+ }
+
+ writel(0xbfc01004, (addr + 0xc34));
+ writel(0x3, (addr + 0xc30));
+
+}
+
+/*
+ * Waits for the FEP to report thats its ready for us to use.
+ */
+static int dgap_do_wait_for_fep(struct board_t *brd)
+{
+ uchar *addr;
+ u16 word;
+ u16 err1;
+ u16 err2;
+ int ret = 0;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return ret;
+
+ addr = brd->re_map_membase;
+ word = readw(addr + FEPSTAT);
+
+ /*
+ * It can take 2-3 seconds for the FEP to
+ * be up and running. Give it 5 secs.
+ */
+ brd->wait_for_fep = 0;
+ while (brd->wait_for_fep < 500) {
+ /* Check to see if FEP is up and running now. */
+ if (word == *(u16 *) "OS") {
+ /*
+ * Check to see if the board can support FEP5+ commands.
+ */
+ word = readw(addr + FEP5_PLUS);
+ if (word == *(u16 *) "5A")
+ brd->bd_flags |= BD_FEP5PLUS;
+
+ return 1;
+ }
+ msleep_interruptible(10);
+ brd->wait_for_fep++;
+ word = readw(addr + FEPSTAT);
+ }
+
+ /* Gave up on board after too long of time taken */
+ err1 = readw(addr + SEQUENCE);
+ err2 = readw(addr + ERROR);
+ pr_warn("dgap: FEPOS for %s not functioning. Error #(%x,%x).\n",
+ brd->name, err1, err2);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+
+ return ret;
+}
+
+/*
+ * Physically forces the FEP5 card to reset itself.
+ */
+static void dgap_do_reset_board(struct board_t *brd)
+{
+ uchar check;
+ u32 check1;
+ u32 check2;
+ int i = 0;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) ||
+ !brd->re_map_membase || !brd->re_map_port)
+ return;
+
+ /* FEPRST does not vary among supported boards */
+ writeb(FEPRST, brd->re_map_port);
+
+ for (i = 0; i <= 1000; i++) {
+ check = readb(brd->re_map_port) & 0xe;
+ if (check == FEPRST)
+ break;
+ udelay(10);
+
+ }
+ if (i > 1000) {
+ pr_warn("dgap: Board not resetting... Failing board.\n");
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return;
+ }
+
+ /*
+ * Make sure there really is memory out there.
+ */
+ writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
+ writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
+ check1 = readl(brd->re_map_membase + LOWMEM);
+ check2 = readl(brd->re_map_membase + HIGHMEM);
+
+ if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
+ pr_warn("dgap: No memory at %p for board.\n",
+ brd->re_map_membase);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return;
+ }
+
+}
+
+#ifdef DIGI_CONCENTRATORS_SUPPORTED
+/*
+ * Sends a concentrator image into the FEP5 board.
+ */
+static void dgap_do_conc_load(struct board_t *brd, uchar *uaddr, int len)
+{
+ char *vaddr;
+ u16 offset = 0;
+ struct downld_t *to_dp;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ vaddr = brd->re_map_membase;
+
+ offset = readw((u16 *) (vaddr + DOWNREQ));
+ to_dp = (struct downld_t *) (vaddr + (int) offset);
+ memcpy_toio(to_dp, uaddr, len);
+
+ /* Tell card we have data for it */
+ writew(0, vaddr + (DOWNREQ));
+
+ brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
+}
+#endif
+
+#define EXPANSION_ROM_SIZE (64 * 1024)
+#define FEP5_ROM_MAGIC (0xFEFFFFFF)
+
+static void dgap_get_vpd(struct board_t *brd)
+{
+ u32 magic;
+ u32 base_offset;
+ u16 rom_offset;
+ u16 vpd_offset;
+ u16 image_length;
+ u16 i;
+ uchar byte1;
+ uchar byte2;
+
+ /*
+ * Poke the magic number at the PCI Rom Address location.
+ * If VPD is supported, the value read from that address
+ * will be non-zero.
+ */
+ magic = FEP5_ROM_MAGIC;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+ pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
+
+ /* VPD not supported, bail */
+ if (!magic)
+ return;
+
+ /*
+ * To get to the OTPROM memory, we have to send the boards base
+ * address or'ed with 1 into the PCI Rom Address location.
+ */
+ magic = brd->membase | 0x01;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+ pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
+
+ byte1 = readb(brd->re_map_membase);
+ byte2 = readb(brd->re_map_membase + 1);
+
+ /*
+ * If the board correctly swapped to the OTPROM memory,
+ * the first 2 bytes (header) should be 0x55, 0xAA
+ */
+ if (byte1 == 0x55 && byte2 == 0xAA) {
+
+ base_offset = 0;
+
+ /*
+ * We have to run through all the OTPROM memory looking
+ * for the VPD offset.
+ */
+ while (base_offset <= EXPANSION_ROM_SIZE) {
+
+ /*
+ * Lots of magic numbers here.
+ *
+ * The VPD offset is located inside the ROM Data
+ * Structure.
+ *
+ * We also have to remember the length of each
+ * ROM Data Structure, so we can "hop" to the next
+ * entry if the VPD isn't in the current
+ * ROM Data Structure.
+ */
+ rom_offset = readw(brd->re_map_membase +
+ base_offset + 0x18);
+ image_length = readw(brd->re_map_membase +
+ rom_offset + 0x10) * 512;
+ vpd_offset = readw(brd->re_map_membase +
+ rom_offset + 0x08);
+
+ /* Found the VPD entry */
+ if (vpd_offset)
+ break;
+
+ /* We didn't find a VPD entry, go to next ROM entry. */
+ base_offset += image_length;
+
+ byte1 = readb(brd->re_map_membase + base_offset);
+ byte2 = readb(brd->re_map_membase + base_offset + 1);
+
+ /*
+ * If the new ROM offset doesn't have 0x55, 0xAA
+ * as its header, we have run out of ROM.
+ */
+ if (byte1 != 0x55 || byte2 != 0xAA)
+ break;
+ }
+
+ /*
+ * If we have a VPD offset, then mark the board
+ * as having a valid VPD, and copy VPDSIZE (512) bytes of
+ * that VPD to the buffer we have in our board structure.
+ */
+ if (vpd_offset) {
+ brd->bd_flags |= BD_HAS_VPD;
+ for (i = 0; i < VPDSIZE; i++) {
+ brd->vpd[i] = readb(brd->re_map_membase +
+ vpd_offset + i);
+ }
+ }
+ }
+
+ /*
+ * We MUST poke the magic number at the PCI Rom Address location again.
+ * This makes the card report the regular board memory back to us,
+ * rather than the OTPROM memory.
+ */
+ magic = FEP5_ROM_MAGIC;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+}
+
+/*
+ * Our board poller function.
+ */
+static void dgap_poll_tasklet(unsigned long data)
+{
+ struct board_t *bd = (struct board_t *) data;
+ ulong lock_flags;
+ char *vaddr;
+ u16 head, tail;
+
+ if (!bd || (bd->magic != DGAP_BOARD_MAGIC))
+ return;
+
+ if (bd->inhibit_poller)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+
+ vaddr = bd->re_map_membase;
+
+ /*
+ * If board is ready, parse deeper to see if there is anything to do.
+ */
+ if (bd->state == BOARD_READY) {
+
+ struct ev_t *eaddr = NULL;
+
+ if (!bd->re_map_membase) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+ if (!bd->re_map_port) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+
+ if (!bd->nasync)
+ goto out;
+
+ eaddr = (struct ev_t *) (vaddr + EVBUF);
+
+ /* Get our head and tail */
+ head = readw(&(eaddr->ev_head));
+ tail = readw(&(eaddr->ev_tail));
+
+ /*
+ * If there is an event pending. Go service it.
+ */
+ if (head != tail) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ dgap_event(bd);
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ }
+
+out:
+ /*
+ * If board is doing interrupts, ACK the interrupt.
+ */
+ if (bd && bd->intr_running)
+ readb(bd->re_map_port + 2);
+
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+}
+
+/*=======================================================================
+ *
+ * dgap_cmdb - Sends a 2 byte command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * byte1 - Integer containing first byte to be sent.
+ * byte2 - Integer containing second byte to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1,
+ uchar byte2, uint ncmds)
+{
+ char *vaddr = NULL;
+ struct cm_t *cm_addr = NULL;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED)
+ return;
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+ writeb(cmd, (char *) (vaddr + head + CMDSTART + 0));
+ writeb((uchar) ch->ch_portnum, (char *) (vaddr + head + CMDSTART + 1));
+ writeb(byte1, (char *) (vaddr + head + CMDSTART + 2));
+ writeb(byte2, (char *) (vaddr + head + CMDSTART + 3));
+
+ head = (head + 4) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+/*=======================================================================
+ *
+ * dgap_cmdw - Sends a 1 word command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * word - Integer containing word to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
+{
+ char *vaddr = NULL;
+ struct cm_t *cm_addr = NULL;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED)
+ return;
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+ writeb(cmd, (char *) (vaddr + head + CMDSTART + 0));
+ writeb((uchar) ch->ch_portnum, (char *) (vaddr + head + CMDSTART + 1));
+ writew((u16) word, (char *) (vaddr + head + CMDSTART + 2));
+
+ head = (head + 4) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+/*=======================================================================
+ *
+ * dgap_cmdw_ext - Sends a extended word command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * word - Integer containing word to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
+{
+ char *vaddr = NULL;
+ struct cm_t *cm_addr = NULL;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED)
+ return;
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+
+ /* Write an FF to tell the FEP that we want an extended command */
+ writeb((uchar) 0xff, (char *) (vaddr + head + CMDSTART + 0));
+
+ writeb((uchar) ch->ch_portnum, (uchar *) (vaddr + head + CMDSTART + 1));
+ writew((u16) cmd, (char *) (vaddr + head + CMDSTART + 2));
+
+ /*
+ * If the second part of the command won't fit,
+ * put it at the beginning of the circular buffer.
+ */
+ if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03)))
+ writew((u16) word, (char *) (vaddr + CMDSTART));
+ else
+ writew((u16) word, (char *) (vaddr + head + CMDSTART + 4));
+
+ head = (head + 8) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+/*=======================================================================
+ *
+ * dgap_wmove - Write data to FEP buffer.
+ *
+ * ch - Pointer to channel structure.
+ * buf - Poiter to characters to be moved.
+ * cnt - Number of characters to move.
+ *
+ *=======================================================================*/
+static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
+{
+ int n;
+ char *taddr;
+ struct bs_t *bs;
+ u16 head;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check parameters.
+ */
+ bs = ch->ch_bs;
+ head = readw(&(bs->tx_head));
+
+ /*
+ * If pointers are out of range, just return.
+ */
+ if ((cnt > ch->ch_tsize) ||
+ (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize)
+ return;
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ n = ch->ch_tstart + ch->ch_tsize - head;
+
+ if (cnt >= n) {
+ cnt -= n;
+ taddr = ch->ch_taddr + head;
+ memcpy_toio(taddr, buf, n);
+ head = ch->ch_tstart;
+ buf += n;
+ }
+
+ /*
+ * Move rest of data.
+ */
+ taddr = ch->ch_taddr + head;
+ n = cnt;
+ memcpy_toio(taddr, buf, n);
+ head += cnt;
+
+ writew(head, &(bs->tx_head));
+}
+
+/*
+ * Retrives the current custom baud rate from FEP memory,
+ * and returns it back to the user.
+ * Returns 0 on error.
+ */
+static uint dgap_get_custom_baud(struct channel_t *ch)
+{
+ uchar *vaddr;
+ ulong offset = 0;
+ uint value = 0;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+
+ if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+
+ if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
+ return 0;
+
+ vaddr = ch->ch_bd->re_map_membase;
+
+ if (!vaddr)
+ return 0;
+
+ /*
+ * Go get from fep mem, what the fep
+ * believes the custom baud rate is.
+ */
+ offset = ((((*(unsigned short *)(vaddr + ECS_SEG)) << 4) +
+ (ch->ch_portnum * 0x28) + LINE_SPEED));
+
+ value = readw(vaddr + offset);
+ return value;
+}
+
+/*
+ * Calls the firmware to reset this channel.
+ */
+static void dgap_firmware_reset_port(struct channel_t *ch)
+{
+ dgap_cmdb(ch, CHRESET, 0, 0, 0);
+
+ /*
+ * Now that the channel is reset, we need to make sure
+ * all the current settings get reapplied to the port
+ * in the firmware.
+ *
+ * So we will set the driver's cache of firmware
+ * settings all to 0, and then call param.
+ */
+ ch->ch_fepiflag = 0;
+ ch->ch_fepcflag = 0;
+ ch->ch_fepoflag = 0;
+ ch->ch_fepstartc = 0;
+ ch->ch_fepstopc = 0;
+ ch->ch_fepastartc = 0;
+ ch->ch_fepastopc = 0;
+ ch->ch_mostat = 0;
+ ch->ch_hflow = 0;
+}
+
+/*=======================================================================
+ *
+ * dgap_param - Set Digi parameters.
+ *
+ * struct tty_struct * - TTY for port.
+ *
+ *=======================================================================*/
+static int dgap_param(struct tty_struct *tty)
+{
+ struct ktermios *ts;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct bs_t *bs;
+ struct un_t *un;
+ u16 head;
+ u16 cflag;
+ u16 iflag;
+ uchar mval;
+ uchar hflow;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -ENXIO;
+
+ un = (struct un_t *) tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -ENXIO;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -ENXIO;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -ENXIO;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return -ENXIO;
+
+ ts = &tty->termios;
+
+ /*
+ * If baud rate is zero, flush queues, and set mval to drop DTR.
+ */
+ if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+
+ /* flush rx */
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+
+ /* flush tx */
+ head = readw(&(ch->ch_bs->tx_head));
+ writew(head, &(ch->ch_bs->tx_tail));
+
+ ch->ch_flags |= (CH_BAUD0);
+
+ /* Drop RTS and DTR */
+ ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch));
+ mval = D_DTR(ch) | D_RTS(ch);
+ ch->ch_baud_info = 0;
+
+ } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
+ /*
+ * Tell the fep to do the command
+ */
+
+ dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
+
+ /*
+ * Now go get from fep mem, what the fep
+ * believes the custom baud rate is.
+ */
+ ch->ch_custom_speed = dgap_get_custom_baud(ch);
+ ch->ch_baud_info = ch->ch_custom_speed;
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+ ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
+ }
+ mval = D_DTR(ch) | D_RTS(ch);
+
+ } else {
+ /*
+ * Set baud rate, character size, and parity.
+ */
+
+
+ int iindex = 0;
+ int jindex = 0;
+ int baud = 0;
+
+ ulong bauds[4][16] = {
+ { /* slowbaud */
+ 0, 50, 75, 110,
+ 134, 150, 200, 300,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* slowbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud */
+ 0, 57600, 76800, 115200,
+ 14400, 57600, 230400, 76800,
+ 115200, 230400, 28800, 460800,
+ 921600, 9600, 19200, 38400 },
+ { /* fastbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 }
+ };
+
+ /*
+ * Only use the TXPrint baud rate if the
+ * terminal unit is NOT open
+ */
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
+ (un->un_type == DGAP_PRINT))
+ baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
+ else
+ baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
+
+ if (ch->ch_c_cflag & CBAUDEX)
+ iindex = 1;
+
+ if (ch->ch_digi.digi_flags & DIGI_FAST)
+ iindex += 2;
+
+ jindex = baud;
+
+ if ((iindex >= 0) && (iindex < 4) &&
+ (jindex >= 0) && (jindex < 16))
+ baud = bauds[iindex][jindex];
+ else
+ baud = 0;
+
+ if (baud == 0)
+ baud = 9600;
+
+ ch->ch_baud_info = baud;
+
+ /*
+ * CBAUD has bit position 0x1000 set these days to
+ * indicate Linux baud rate remap.
+ * We use a different bit assignment for high speed.
+ * Clear this bit out while grabbing the parts of
+ * "cflag" we want.
+ */
+ cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB |
+ CSTOPB | CSIZE);
+
+ /*
+ * HUPCL bit is used by FEP to indicate fast baud
+ * table is to be used.
+ */
+ if ((ch->ch_digi.digi_flags & DIGI_FAST) ||
+ (ch->ch_c_cflag & CBAUDEX))
+ cflag |= HUPCL;
+
+ if ((ch->ch_c_cflag & CBAUDEX) &&
+ !(ch->ch_digi.digi_flags & DIGI_FAST)) {
+ /*
+ * The below code is trying to guarantee that only
+ * baud rates 115200, 230400, 460800, 921600 are
+ * remapped. We use exclusive or because the various
+ * baud rates share common bit positions and therefore
+ * can't be tested for easily.
+ */
+ tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
+ int baudpart = 0;
+
+ /*
+ * Map high speed requests to index
+ * into FEP's baud table
+ */
+ switch (tcflag) {
+ case B57600:
+ baudpart = 1;
+ break;
+#ifdef B76800
+ case B76800:
+ baudpart = 2;
+ break;
+#endif
+ case B115200:
+ baudpart = 3;
+ break;
+ case B230400:
+ baudpart = 9;
+ break;
+ case B460800:
+ baudpart = 11;
+ break;
+#ifdef B921600
+ case B921600:
+ baudpart = 12;
+ break;
+#endif
+ default:
+ baudpart = 0;
+ }
+
+ if (baudpart)
+ cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
+ }
+
+ cflag &= 0xffff;
+
+ if (cflag != ch->ch_fepcflag) {
+ ch->ch_fepcflag = (u16) (cflag & 0xffff);
+
+ /*
+ * Okay to have channel and board
+ * locks held calling this
+ */
+ dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
+ }
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+ ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
+ }
+ mval = D_DTR(ch) | D_RTS(ch);
+ }
+
+ /*
+ * Get input flags.
+ */
+ iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
+ INPCK | ISTRIP | IXON | IXANY | IXOFF);
+
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE)) {
+ iflag &= ~(IXON | IXOFF);
+ ch->ch_c_iflag &= ~(IXON | IXOFF);
+ }
+
+ /*
+ * Only the IBM Xr card can switch between
+ * 232 and 422 modes on the fly
+ */
+ if (bd->device == PCI_DEV_XR_IBM_DID) {
+ if (ch->ch_digi.digi_flags & DIGI_422)
+ dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
+ else
+ dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
+ }
+
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
+ iflag |= IALTPIN;
+
+ if (iflag != ch->ch_fepiflag) {
+ ch->ch_fepiflag = iflag;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0);
+ }
+
+ /*
+ * Select hardware handshaking.
+ */
+ hflow = 0;
+
+ if (ch->ch_c_cflag & CRTSCTS)
+ hflow |= (D_RTS(ch) | D_CTS(ch));
+ if (ch->ch_digi.digi_flags & RTSPACE)
+ hflow |= D_RTS(ch);
+ if (ch->ch_digi.digi_flags & DTRPACE)
+ hflow |= D_DTR(ch);
+ if (ch->ch_digi.digi_flags & CTSPACE)
+ hflow |= D_CTS(ch);
+ if (ch->ch_digi.digi_flags & DSRPACE)
+ hflow |= D_DSR(ch);
+ if (ch->ch_digi.digi_flags & DCDPACE)
+ hflow |= D_CD(ch);
+
+ if (hflow != ch->ch_hflow) {
+ ch->ch_hflow = hflow;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SHFLOW, (uchar) hflow, 0xff, 0);
+ }
+
+
+ /*
+ * Set RTS and/or DTR Toggle if needed,
+ * but only if product is FEP5+ based.
+ */
+ if (bd->bd_flags & BD_FEP5PLUS) {
+ u16 hflow2 = 0;
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
+ hflow2 |= (D_RTS(ch));
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
+ hflow2 |= (D_DTR(ch));
+
+ dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
+ }
+
+ /*
+ * Set modem control lines.
+ */
+
+ mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
+
+ if (ch->ch_mostat ^ mval) {
+ ch->ch_mostat = mval;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SMODEM, (uchar) mval, D_RTS(ch)|D_DTR(ch), 0);
+ }
+
+ /*
+ * Read modem signals, and then call carrier function.
+ */
+ ch->ch_mistat = readb(&(bs->m_stat));
+ dgap_carrier(ch);
+
+ /*
+ * Set the start and stop characters.
+ */
+ if (ch->ch_startc != ch->ch_fepstartc ||
+ ch->ch_stopc != ch->ch_fepstopc) {
+ ch->ch_fepstartc = ch->ch_startc;
+ ch->ch_fepstopc = ch->ch_stopc;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
+ }
+
+ /*
+ * Set the Auxiliary start and stop characters.
+ */
+ if (ch->ch_astartc != ch->ch_fepastartc ||
+ ch->ch_astopc != ch->ch_fepastopc) {
+ ch->ch_fepastartc = ch->ch_astartc;
+ ch->ch_fepastopc = ch->ch_astopc;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * dgap_parity_scan()
+ *
+ * Convert the FEP5 way of reporting parity errors and breaks into
+ * the Linux line discipline way.
+ */
+static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
+ unsigned char *fbuf, int *len)
+{
+ int l = *len;
+ int count = 0;
+ unsigned char *in, *cout, *fout;
+ unsigned char c;
+
+ in = cbuf;
+ cout = cbuf;
+ fout = fbuf;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ while (l--) {
+ c = *in++;
+ switch (ch->pscan_state) {
+ default:
+ /* reset to sanity and fall through */
+ ch->pscan_state = 0;
+
+ case 0:
+ /* No FF seen yet */
+ if (c == (unsigned char) '\377')
+ /* delete this character from stream */
+ ch->pscan_state = 1;
+ else {
+ *cout++ = c;
+ *fout++ = TTY_NORMAL;
+ count += 1;
+ }
+ break;
+
+ case 1:
+ /* first FF seen */
+ if (c == (unsigned char) '\377') {
+ /* doubled ff, transform to single ff */
+ *cout++ = c;
+ *fout++ = TTY_NORMAL;
+ count += 1;
+ ch->pscan_state = 0;
+ } else {
+ /* save value examination in next state */
+ ch->pscan_savechar = c;
+ ch->pscan_state = 2;
+ }
+ break;
+
+ case 2:
+ /* third character of ff sequence */
+
+ *cout++ = c;
+
+ if (ch->pscan_savechar == 0x0) {
+
+ if (c == 0x0) {
+ ch->ch_err_break++;
+ *fout++ = TTY_BREAK;
+ } else {
+ ch->ch_err_parity++;
+ *fout++ = TTY_PARITY;
+ }
+ }
+
+ count += 1;
+ ch->pscan_state = 0;
+ }
+ }
+ *len = count;
+}
+
+static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch,
+ struct un_t *un, u32 mask,
+ unsigned long *irq_flags1,
+ unsigned long *irq_flags2)
+{
+ if (!(un->un_flags & mask))
+ return;
+
+ un->un_flags &= ~mask;
+
+ if (!(un->un_flags & UN_ISOPEN))
+ return;
+
+ if ((un->un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ un->un_tty->ldisc->ops->write_wakeup) {
+ spin_unlock_irqrestore(&ch->ch_lock, *irq_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, *irq_flags1);
+
+ (un->un_tty->ldisc->ops->write_wakeup)(un->un_tty);
+
+ spin_lock_irqsave(&bd->bd_lock, *irq_flags1);
+ spin_lock_irqsave(&ch->ch_lock, *irq_flags2);
+ }
+ wake_up_interruptible(&un->un_tty->write_wait);
+ wake_up_interruptible(&un->un_flags_wait);
+}
+
+/*=======================================================================
+ *
+ * dgap_event - FEP to host event processing routine.
+ *
+ * bd - Board of current event.
+ *
+ *=======================================================================*/
+static int dgap_event(struct board_t *bd)
+{
+ struct channel_t *ch;
+ ulong lock_flags;
+ ulong lock_flags2;
+ struct bs_t *bs;
+ uchar *event;
+ uchar *vaddr = NULL;
+ struct ev_t *eaddr = NULL;
+ uint head;
+ uint tail;
+ int port;
+ int reason;
+ int modem;
+ int b1;
+
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -ENXIO;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+
+ vaddr = bd->re_map_membase;
+
+ if (!vaddr) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ eaddr = (struct ev_t *) (vaddr + EVBUF);
+
+ /* Get our head and tail */
+ head = readw(&(eaddr->ev_head));
+ tail = readw(&(eaddr->ev_tail));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+
+ if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
+ (head | tail) & 03) {
+ /* Let go of board lock */
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /*
+ * Loop to process all the events in the buffer.
+ */
+ while (tail != head) {
+
+ /*
+ * Get interrupt information.
+ */
+
+ event = bd->re_map_membase + tail + EVSTART;
+
+ port = event[0];
+ reason = event[1];
+ modem = event[2];
+ b1 = event[3];
+
+ /*
+ * Make sure the interrupt is valid.
+ */
+ if (port >= bd->nasync)
+ goto next;
+
+ if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA)))
+ goto next;
+
+ ch = bd->channels[port];
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ goto next;
+
+ /*
+ * If we have made it here, the event was valid.
+ * Lock down the channel.
+ */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ bs = ch->ch_bs;
+
+ if (!bs) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ goto next;
+ }
+
+ /*
+ * Process received data.
+ */
+ if (reason & IFDATA) {
+
+ /*
+ * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
+ * input could send some data to ld, which in turn
+ * could do a callback to one of our other functions.
+ */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ dgap_input(ch);
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ if (ch->ch_flags & CH_RACTIVE)
+ ch->ch_flags |= CH_RENABLE;
+ else
+ writeb(1, &(bs->idata));
+
+ if (ch->ch_flags & CH_RWAIT) {
+ ch->ch_flags &= ~CH_RWAIT;
+
+ wake_up_interruptible
+ (&ch->ch_tun.un_flags_wait);
+ }
+ }
+
+ /*
+ * Process Modem change signals.
+ */
+ if (reason & IFMODEM) {
+ ch->ch_mistat = modem;
+ dgap_carrier(ch);
+ }
+
+ /*
+ * Process break.
+ */
+ if (reason & IFBREAK) {
+
+ if (ch->ch_tun.un_tty) {
+ /* A break has been indicated */
+ ch->ch_err_break++;
+ tty_buffer_request_room
+ (ch->ch_tun.un_tty->port, 1);
+ tty_insert_flip_char(ch->ch_tun.un_tty->port,
+ 0, TTY_BREAK);
+ tty_flip_buffer_push(ch->ch_tun.un_tty->port);
+ }
+ }
+
+ /*
+ * Process Transmit low.
+ */
+ if (reason & IFTLW) {
+ dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW,
+ &lock_flags, &lock_flags2);
+ dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW,
+ &lock_flags, &lock_flags2);
+ if (ch->ch_flags & CH_WLOW) {
+ ch->ch_flags &= ~CH_WLOW;
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+ }
+
+ /*
+ * Process Transmit empty.
+ */
+ if (reason & IFTEM) {
+ dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY,
+ &lock_flags, &lock_flags2);
+ dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY,
+ &lock_flags, &lock_flags2);
+ if (ch->ch_flags & CH_WEMPTY) {
+ ch->ch_flags &= ~CH_WEMPTY;
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+ }
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+
+next:
+ tail = (tail + 4) & (EVMAX - EVSTART - 4);
+ }
+
+ writew(tail, &(eaddr->ev_tail));
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
+}
+static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
+
+
+static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", dgap_NumBoards);
+}
+static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
+
+
+static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
+}
+static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
+
+
+static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
+}
+static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
+
+static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
+}
+
+static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ if (sscanf(buf, "%d\n", &dgap_poll_tick) != 1)
+ return -EINVAL;
+ return count;
+}
+static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show,
+ dgap_driver_pollrate_store);
+
+static int dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
+{
+ int rc = 0;
+ struct device_driver *driverfs = &dgap_driver->driver;
+
+ rc |= driver_create_file(driverfs, &driver_attr_version);
+ rc |= driver_create_file(driverfs, &driver_attr_boards);
+ rc |= driver_create_file(driverfs, &driver_attr_maxboards);
+ rc |= driver_create_file(driverfs, &driver_attr_pollrate);
+ rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
+
+ return rc;
+}
+
+static void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
+{
+ struct device_driver *driverfs = &dgap_driver->driver;
+ driver_remove_file(driverfs, &driver_attr_version);
+ driver_remove_file(driverfs, &driver_attr_boards);
+ driver_remove_file(driverfs, &driver_attr_maxboards);
+ driver_remove_file(driverfs, &driver_attr_pollrate);
+ driver_remove_file(driverfs, &driver_attr_pollcounter);
+}
+
+static struct board_t *dgap_verify_board(struct device *p)
+{
+ struct board_t *bd;
+
+ if (!p)
+ return NULL;
+
+ bd = dev_get_drvdata(p);
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC || bd->state != BOARD_READY)
+ return NULL;
+
+ return bd;
+}
+
+static ssize_t dgap_ports_state_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %s\n", bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_open_count ? "Open" : "Closed");
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_state, S_IRUSR, dgap_ports_state_show, NULL);
+
+static ssize_t dgap_ports_baud_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %d\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_baud_info);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_baud, S_IRUSR, dgap_ports_baud_show, NULL);
+
+static ssize_t dgap_ports_msignals_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++) {
+ if (bd->channels[i]->ch_open_count)
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %s %s %s %s %s %s\n",
+ bd->channels[i]->ch_portnum,
+ (bd->channels[i]->ch_mostat &
+ UART_MCR_RTS) ? "RTS" : "",
+ (bd->channels[i]->ch_mistat &
+ UART_MSR_CTS) ? "CTS" : "",
+ (bd->channels[i]->ch_mostat &
+ UART_MCR_DTR) ? "DTR" : "",
+ (bd->channels[i]->ch_mistat &
+ UART_MSR_DSR) ? "DSR" : "",
+ (bd->channels[i]->ch_mistat &
+ UART_MSR_DCD) ? "DCD" : "",
+ (bd->channels[i]->ch_mistat &
+ UART_MSR_RI) ? "RI" : "");
+ else
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d\n", bd->channels[i]->ch_portnum);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_msignals, S_IRUSR, dgap_ports_msignals_show, NULL);
+
+static ssize_t dgap_ports_iflag_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_c_iflag);
+ return count;
+}
+static DEVICE_ATTR(ports_iflag, S_IRUSR, dgap_ports_iflag_show, NULL);
+
+static ssize_t dgap_ports_cflag_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_c_cflag);
+ return count;
+}
+static DEVICE_ATTR(ports_cflag, S_IRUSR, dgap_ports_cflag_show, NULL);
+
+static ssize_t dgap_ports_oflag_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_c_oflag);
+ return count;
+}
+static DEVICE_ATTR(ports_oflag, S_IRUSR, dgap_ports_oflag_show, NULL);
+
+static ssize_t dgap_ports_lflag_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_c_lflag);
+ return count;
+}
+static DEVICE_ATTR(ports_lflag, S_IRUSR, dgap_ports_lflag_show, NULL);
+
+static ssize_t dgap_ports_digi_flag_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_digi.digi_flags);
+ return count;
+}
+static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgap_ports_digi_flag_show, NULL);
+
+static ssize_t dgap_ports_rxcount_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_rxcount);
+ return count;
+}
+static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgap_ports_rxcount_show, NULL);
+
+static ssize_t dgap_ports_txcount_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_txcount);
+ return count;
+}
+static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL);
+
+/* this function creates the sys files that will export each signal status
+ * to sysfs each value will be put in a separate filename
+ */
+static void dgap_create_ports_sysfiles(struct board_t *bd)
+{
+ dev_set_drvdata(&bd->pdev->dev, bd);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
+}
+
+/* removes all the sys files created for that port */
+static void dgap_remove_ports_sysfiles(struct board_t *bd)
+{
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
+}
+
+static ssize_t dgap_tty_state_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ?
+ "Open" : "Closed");
+}
+static DEVICE_ATTR(state, S_IRUSR, dgap_tty_state_show, NULL);
+
+static ssize_t dgap_tty_baud_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_baud_info);
+}
+static DEVICE_ATTR(baud, S_IRUSR, dgap_tty_baud_show, NULL);
+
+static ssize_t dgap_tty_msignals_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ if (ch->ch_open_count) {
+ return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
+ (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ }
+ return 0;
+}
+static DEVICE_ATTR(msignals, S_IRUSR, dgap_tty_msignals_show, NULL);
+
+static ssize_t dgap_tty_iflag_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
+}
+static DEVICE_ATTR(iflag, S_IRUSR, dgap_tty_iflag_show, NULL);
+
+static ssize_t dgap_tty_cflag_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
+}
+static DEVICE_ATTR(cflag, S_IRUSR, dgap_tty_cflag_show, NULL);
+
+static ssize_t dgap_tty_oflag_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
+}
+static DEVICE_ATTR(oflag, S_IRUSR, dgap_tty_oflag_show, NULL);
+
+static ssize_t dgap_tty_lflag_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
+}
+static DEVICE_ATTR(lflag, S_IRUSR, dgap_tty_lflag_show, NULL);
+
+static ssize_t dgap_tty_digi_flag_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
+}
+static DEVICE_ATTR(digi_flag, S_IRUSR, dgap_tty_digi_flag_show, NULL);
+
+static ssize_t dgap_tty_rxcount_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
+}
+static DEVICE_ATTR(rxcount, S_IRUSR, dgap_tty_rxcount_show, NULL);
+
+static ssize_t dgap_tty_txcount_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
+}
+static DEVICE_ATTR(txcount, S_IRUSR, dgap_tty_txcount_show, NULL);
+
+static ssize_t dgap_tty_name_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int cn;
+ int bn;
+ struct cnode *cptr = NULL;
+ int found = FALSE;
+ int ncount = 0;
+ int starto = 0;
+ int i = 0;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ bn = bd->boardnum;
+ cn = ch->ch_portnum;
+
+ for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
+
+ if ((cptr->type == BNODE) &&
+ ((cptr->u.board.type == APORT2_920P) ||
+ (cptr->u.board.type == APORT4_920P) ||
+ (cptr->u.board.type == APORT8_920P) ||
+ (cptr->u.board.type == PAPORT4) ||
+ (cptr->u.board.type == PAPORT8))) {
+
+ found = TRUE;
+ if (cptr->u.board.v_start)
+ starto = cptr->u.board.start;
+ else
+ starto = 1;
+ }
+
+ if (cptr->type == TNODE && found == TRUE) {
+ char *ptr1;
+ if (strstr(cptr->u.ttyname, "tty")) {
+ ptr1 = cptr->u.ttyname;
+ ptr1 += 3;
+ } else
+ ptr1 = cptr->u.ttyname;
+
+ for (i = 0; i < dgap_config_get_num_prts(bd); i++) {
+ if (cn != i)
+ continue;
+
+ return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
+ (un->un_type == DGAP_PRINT) ?
+ "pr" : "tty",
+ ptr1, i + starto);
+ }
+ }
+
+ if (cptr->type == CNODE) {
+
+ for (i = 0; i < cptr->u.conc.nport; i++) {
+ if (cn != (i + ncount))
+ continue;
+
+ return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n",
+ (un->un_type == DGAP_PRINT) ?
+ "pr" : "tty",
+ cptr->u.conc.id,
+ i + (cptr->u.conc.v_start ?
+ cptr->u.conc.start : 1));
+ }
+
+ ncount += cptr->u.conc.nport;
+ }
+
+ if (cptr->type == MNODE) {
+
+ for (i = 0; i < cptr->u.module.nport; i++) {
+ if (cn != (i + ncount))
+ continue;
+
+ return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n",
+ (un->un_type == DGAP_PRINT) ?
+ "pr" : "tty",
+ cptr->u.module.id,
+ i + (cptr->u.module.v_start ?
+ cptr->u.module.start : 1));
+ }
+
+ ncount += cptr->u.module.nport;
+
+ }
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s_dgap_%d_%d\n",
+ (un->un_type == DGAP_PRINT) ? "pr" : "tty", bn, cn);
+
+}
+static DEVICE_ATTR(custom_name, S_IRUSR, dgap_tty_name_show, NULL);
+
+static struct attribute *dgap_sysfs_tty_entries[] = {
+ &dev_attr_state.attr,
+ &dev_attr_baud.attr,
+ &dev_attr_msignals.attr,
+ &dev_attr_iflag.attr,
+ &dev_attr_cflag.attr,
+ &dev_attr_oflag.attr,
+ &dev_attr_lflag.attr,
+ &dev_attr_digi_flag.attr,
+ &dev_attr_rxcount.attr,
+ &dev_attr_txcount.attr,
+ &dev_attr_custom_name.attr,
+ NULL
+};
+
+static struct attribute_group dgap_tty_attribute_group = {
+ .name = NULL,
+ .attrs = dgap_sysfs_tty_entries,
+};
+
+static void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
+{
+ int ret;
+
+ ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
+ if (ret)
+ return;
+
+ dev_set_drvdata(c, un);
+
+}
+
+static void dgap_remove_tty_sysfs(struct device *c)
+{
+ sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
+}
+
+/*
+ * Parse a configuration file read into memory as a string.
+ */
+static int dgap_parsefile(char **in, int Remove)
+{
+ struct cnode *p, *brd, *line, *conc;
+ int rc;
+ char *s = NULL;
+ int linecnt = 0;
+
+ p = &dgap_head;
+ brd = line = conc = NULL;
+
+ /* perhaps we are adding to an existing list? */
+ while (p->next != NULL)
+ p = p->next;
+
+ /* file must start with a BEGIN */
+ while ((rc = dgap_gettok(in, p)) != BEGIN) {
+ if (rc == 0) {
+ dgap_err("unexpected EOF");
+ return -1;
+ }
+ }
+
+ for (; ;) {
+ rc = dgap_gettok(in, p);
+ if (rc == 0) {
+ dgap_err("unexpected EOF");
+ return -1;
+ }
+
+ switch (rc) {
+ case 0:
+ dgap_err("unexpected end of file");
+ return -1;
+
+ case BEGIN: /* should only be 1 begin */
+ dgap_err("unexpected config_begin\n");
+ return -1;
+
+ case END:
+ return 0;
+
+ case BOARD: /* board info */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(BNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+
+ p->u.board.status = dgap_savestring("No");
+ line = conc = NULL;
+ brd = p;
+ linecnt = -1;
+ break;
+
+ case APORT2_920P: /* AccelePort_4 */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_2r_920 string");
+ return -1;
+ }
+ p->u.board.type = APORT2_920P;
+ p->u.board.v_type = 1;
+ break;
+
+ case APORT4_920P: /* AccelePort_4 */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_4r_920 string");
+ return -1;
+ }
+ p->u.board.type = APORT4_920P;
+ p->u.board.v_type = 1;
+ break;
+
+ case APORT8_920P: /* AccelePort_8 */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_8r_920 string");
+ return -1;
+ }
+ p->u.board.type = APORT8_920P;
+ p->u.board.v_type = 1;
+ break;
+
+ case PAPORT4: /* AccelePort_4 PCI */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_4r(PCI) string");
+ return -1;
+ }
+ p->u.board.type = PAPORT4;
+ p->u.board.v_type = 1;
+ break;
+
+ case PAPORT8: /* AccelePort_8 PCI */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_8r string");
+ return -1;
+ }
+ p->u.board.type = PAPORT8;
+ p->u.board.v_type = 1;
+ break;
+
+ case PCX: /* PCI C/X */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_C/X_(PCI) string");
+ return -1;
+ }
+ p->u.board.type = PCX;
+ p->u.board.v_type = 1;
+ p->u.board.conc1 = 0;
+ p->u.board.conc2 = 0;
+ p->u.board.module1 = 0;
+ p->u.board.module2 = 0;
+ break;
+
+ case PEPC: /* PCI EPC/X */
+ if (p->type != BNODE) {
+ dgap_err("unexpected \"Digi_EPC/X_(PCI)\" string");
+ return -1;
+ }
+ p->u.board.type = PEPC;
+ p->u.board.v_type = 1;
+ p->u.board.conc1 = 0;
+ p->u.board.conc2 = 0;
+ p->u.board.module1 = 0;
+ p->u.board.module2 = 0;
+ break;
+
+ case PPCM: /* PCI/Xem */
+ if (p->type != BNODE) {
+ dgap_err("unexpected PCI/Xem string");
+ return -1;
+ }
+ p->u.board.type = PPCM;
+ p->u.board.v_type = 1;
+ p->u.board.conc1 = 0;
+ p->u.board.conc2 = 0;
+ break;
+
+ case IO: /* i/o port */
+ if (p->type != BNODE) {
+ dgap_err("IO port only vaild for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.portstr = dgap_savestring(s);
+ if (kstrtol(s, 0, &p->u.board.port)) {
+ dgap_err("bad number for IO port");
+ return -1;
+ }
+ p->u.board.v_port = 1;
+ break;
+
+ case MEM: /* memory address */
+ if (p->type != BNODE) {
+ dgap_err("memory address only vaild for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.addrstr = dgap_savestring(s);
+ if (kstrtoul(s, 0, &p->u.board.addr)) {
+ dgap_err("bad number for memory address");
+ return -1;
+ }
+ p->u.board.v_addr = 1;
+ break;
+
+ case PCIINFO: /* pci information */
+ if (p->type != BNODE) {
+ dgap_err("memory address only vaild for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.pcibusstr = dgap_savestring(s);
+ if (kstrtoul(s, 0, &p->u.board.pcibus)) {
+ dgap_err("bad number for pci bus");
+ return -1;
+ }
+ p->u.board.v_pcibus = 1;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.pcislotstr = dgap_savestring(s);
+ if (kstrtoul(s, 0, &p->u.board.pcislot)) {
+ dgap_err("bad number for pci slot");
+ return -1;
+ }
+ p->u.board.v_pcislot = 1;
+ break;
+
+ case METHOD:
+ if (p->type != BNODE) {
+ dgap_err("install method only vaild for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.method = dgap_savestring(s);
+ p->u.board.v_method = 1;
+ break;
+
+ case STATUS:
+ if (p->type != BNODE) {
+ dgap_err("config status only vaild for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.status = dgap_savestring(s);
+ break;
+
+ case NPORTS: /* number of ports */
+ if (p->type == BNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.board.nport)) {
+ dgap_err("bad number for number of ports");
+ return -1;
+ }
+ p->u.board.v_nport = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.conc.nport)) {
+ dgap_err("bad number for number of ports");
+ return -1;
+ }
+ p->u.conc.v_nport = 1;
+ } else if (p->type == MNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.module.nport)) {
+ dgap_err("bad number for number of ports");
+ return -1;
+ }
+ p->u.module.v_nport = 1;
+ } else {
+ dgap_err("nports only valid for concentrators or modules");
+ return -1;
+ }
+ break;
+
+ case ID: /* letter ID used in tty name */
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+
+ p->u.board.status = dgap_savestring(s);
+
+ if (p->type == CNODE) {
+ p->u.conc.id = dgap_savestring(s);
+ p->u.conc.v_id = 1;
+ } else if (p->type == MNODE) {
+ p->u.module.id = dgap_savestring(s);
+ p->u.module.v_id = 1;
+ } else {
+ dgap_err("id only valid for concentrators or modules");
+ return -1;
+ }
+ break;
+
+ case STARTO: /* start offset of ID */
+ if (p->type == BNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.board.start)) {
+ dgap_err("bad number for start of tty count");
+ return -1;
+ }
+ p->u.board.v_start = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.conc.start)) {
+ dgap_err("bad number for start of tty count");
+ return -1;
+ }
+ p->u.conc.v_start = 1;
+ } else if (p->type == MNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.module.start)) {
+ dgap_err("bad number for start of tty count");
+ return -1;
+ }
+ p->u.module.v_start = 1;
+ } else {
+ dgap_err("start only valid for concentrators or modules");
+ return -1;
+ }
+ break;
+
+ case TTYN: /* tty name prefix */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(TNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (!s) {
+ dgap_err("unexpeced end of file");
+ return -1;
+ }
+ p->u.ttyname = dgap_savestring(s);
+ if (!p->u.ttyname) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ break;
+
+ case CU: /* cu name prefix */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(CUNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (!s) {
+ dgap_err("unexpeced end of file");
+ return -1;
+ }
+ p->u.cuname = dgap_savestring(s);
+ if (!p->u.cuname) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ break;
+
+ case LINE: /* line information */
+ if (dgap_checknode(p))
+ return -1;
+ if (brd == NULL) {
+ dgap_err("must specify board before line info");
+ return -1;
+ }
+ switch (brd->u.board.type) {
+ case PPCM:
+ dgap_err("line not vaild for PC/em");
+ return -1;
+ }
+ p->next = dgap_newnode(LNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ conc = NULL;
+ line = p;
+ linecnt++;
+ break;
+
+ case CONC: /* concentrator information */
+ if (dgap_checknode(p))
+ return -1;
+ if (line == NULL) {
+ dgap_err("must specify line info before concentrator");
+ return -1;
+ }
+ p->next = dgap_newnode(CNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ conc = p;
+ if (linecnt)
+ brd->u.board.conc2++;
+ else
+ brd->u.board.conc1++;
+
+ break;
+
+ case CX: /* c/x type concentrator */
+ if (p->type != CNODE) {
+ dgap_err("cx only valid for concentrators");
+ return -1;
+ }
+ p->u.conc.type = CX;
+ p->u.conc.v_type = 1;
+ break;
+
+ case EPC: /* epc type concentrator */
+ if (p->type != CNODE) {
+ dgap_err("cx only valid for concentrators");
+ return -1;
+ }
+ p->u.conc.type = EPC;
+ p->u.conc.v_type = 1;
+ break;
+
+ case MOD: /* EBI module */
+ if (dgap_checknode(p))
+ return -1;
+ if (brd == NULL) {
+ dgap_err("must specify board info before EBI modules");
+ return -1;
+ }
+ switch (brd->u.board.type) {
+ case PPCM:
+ linecnt = 0;
+ break;
+ default:
+ if (conc == NULL) {
+ dgap_err("must specify concentrator info before EBI module");
+ return -1;
+ }
+ }
+ p->next = dgap_newnode(MNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ if (linecnt)
+ brd->u.board.module2++;
+ else
+ brd->u.board.module1++;
+
+ break;
+
+ case PORTS: /* ports type EBI module */
+ if (p->type != MNODE) {
+ dgap_err("ports only valid for EBI modules");
+ return -1;
+ }
+ p->u.module.type = PORTS;
+ p->u.module.v_type = 1;
+ break;
+
+ case MODEM: /* ports type EBI module */
+ if (p->type != MNODE) {
+ dgap_err("modem only valid for modem modules");
+ return -1;
+ }
+ p->u.module.type = MODEM;
+ p->u.module.v_type = 1;
+ break;
+
+ case CABLE:
+ if (p->type == LNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.line.cable = dgap_savestring(s);
+ p->u.line.v_cable = 1;
+ }
+ break;
+
+ case SPEED: /* sync line speed indication */
+ if (p->type == LNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.line.speed)) {
+ dgap_err("bad number for line speed");
+ return -1;
+ }
+ p->u.line.v_speed = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.conc.speed)) {
+ dgap_err("bad number for line speed");
+ return -1;
+ }
+ p->u.conc.v_speed = 1;
+ } else {
+ dgap_err("speed valid only for lines or concentrators.");
+ return -1;
+ }
+ break;
+
+ case CONNECT:
+ if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.conc.connect = dgap_savestring(s);
+ p->u.conc.v_connect = 1;
+ }
+ break;
+ case PRINT: /* transparent print name prefix */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(PNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (!s) {
+ dgap_err("unexpeced end of file");
+ return -1;
+ }
+ p->u.printname = dgap_savestring(s);
+ if (!p->u.printname) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ break;
+
+ case CMAJOR: /* major number */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(JNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.majornumber)) {
+ dgap_err("bad number for major number");
+ return -1;
+ }
+ break;
+
+ case ALTPIN: /* altpin setting */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(ANODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.altpin)) {
+ dgap_err("bad number for altpin");
+ return -1;
+ }
+ break;
+
+ case USEINTR: /* enable interrupt setting */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(INTRNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.useintr)) {
+ dgap_err("bad number for useintr");
+ return -1;
+ }
+ break;
+
+ case TTSIZ: /* size of tty structure */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(TSNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.ttysize)) {
+ dgap_err("bad number for ttysize");
+ return -1;
+ }
+ break;
+
+ case CHSIZ: /* channel structure size */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(CSNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.chsize)) {
+ dgap_err("bad number for chsize");
+ return -1;
+ }
+ break;
+
+ case BSSIZ: /* board structure size */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(BSNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.bssize)) {
+ dgap_err("bad number for bssize");
+ return -1;
+ }
+ break;
+
+ case UNTSIZ: /* sched structure size */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(USNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.unsize)) {
+ dgap_err("bad number for schedsize");
+ return -1;
+ }
+ break;
+
+ case F2SIZ: /* f2200 structure size */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(FSNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.f2size)) {
+ dgap_err("bad number for f2200size");
+ return -1;
+ }
+ break;
+
+ case VPSIZ: /* vpix structure size */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(VSNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.vpixsize)) {
+ dgap_err("bad number for vpixsize");
+ return -1;
+ }
+ break;
+ }
+ }
+}
+
+/*
+ * dgap_sindex: much like index(), but it looks for a match of any character in
+ * the group, and returns that position. If the first character is a ^, then
+ * this will match the first occurrence not in that group.
+ */
+static char *dgap_sindex(char *string, char *group)
+{
+ char *ptr;
+
+ if (!string || !group)
+ return (char *) NULL;
+
+ if (*group == '^') {
+ group++;
+ for (; *string; string++) {
+ for (ptr = group; *ptr; ptr++) {
+ if (*ptr == *string)
+ break;
+ }
+ if (*ptr == '\0')
+ return string;
+ }
+ } else {
+ for (; *string; string++) {
+ for (ptr = group; *ptr; ptr++) {
+ if (*ptr == *string)
+ return string;
+ }
+ }
+ }
+
+ return (char *) NULL;
+}
+
+/*
+ * Get a token from the input file; return 0 if end of file is reached
+ */
+static int dgap_gettok(char **in, struct cnode *p)
+{
+ char *w;
+ struct toklist *t;
+
+ if (strstr(dgap_cword, "boar")) {
+ w = dgap_getword(in);
+ snprintf(dgap_cword, MAXCWORD, "%s", w);
+ for (t = dgap_tlist; t->token != 0; t++) {
+ if (!strcmp(w, t->string))
+ return t->token;
+ }
+ dgap_err("board !!type not specified");
+ return 1;
+ } else {
+ while ((w = dgap_getword(in))) {
+ snprintf(dgap_cword, MAXCWORD, "%s", w);
+ for (t = dgap_tlist; t->token != 0; t++) {
+ if (!strcmp(w, t->string))
+ return t->token;
+ }
+ }
+ return 0;
+ }
+}
+
+/*
+ * get a word from the input stream, also keep track of current line number.
+ * words are separated by whitespace.
+ */
+static char *dgap_getword(char **in)
+{
+ char *ret_ptr = *in;
+
+ char *ptr = dgap_sindex(*in, " \t\n");
+
+ /* If no word found, return null */
+ if (!ptr)
+ return NULL;
+
+ /* Mark new location for our buffer */
+ *ptr = '\0';
+ *in = ptr + 1;
+
+ /* Eat any extra spaces/tabs/newlines that might be present */
+ while (*in && **in && ((**in == ' ') ||
+ (**in == '\t') ||
+ (**in == '\n'))) {
+ **in = '\0';
+ *in = *in + 1;
+ }
+
+ return ret_ptr;
+}
+
+/*
+ * print an error message, giving the line number in the file where
+ * the error occurred.
+ */
+static void dgap_err(char *s)
+{
+ pr_err("dgap: parse: %s\n", s);
+}
+
+/*
+ * allocate a new configuration node of type t
+ */
+static struct cnode *dgap_newnode(int t)
+{
+ struct cnode *n;
+
+ n = kmalloc(sizeof(struct cnode), GFP_ATOMIC);
+ if (n != NULL) {
+ memset((char *)n, 0, sizeof(struct cnode));
+ n->type = t;
+ }
+ return n;
+}
+
+/*
+ * dgap_checknode: see if all the necessary info has been supplied for a node
+ * before creating the next node.
+ */
+static int dgap_checknode(struct cnode *p)
+{
+ switch (p->type) {
+ case BNODE:
+ if (p->u.board.v_type == 0) {
+ dgap_err("board type !not specified");
+ return 1;
+ }
+
+ return 0;
+
+ case LNODE:
+ if (p->u.line.v_speed == 0) {
+ dgap_err("line speed not specified");
+ return 1;
+ }
+ return 0;
+
+ case CNODE:
+ if (p->u.conc.v_type == 0) {
+ dgap_err("concentrator type not specified");
+ return 1;
+ }
+ if (p->u.conc.v_speed == 0) {
+ dgap_err("concentrator line speed not specified");
+ return 1;
+ }
+ if (p->u.conc.v_nport == 0) {
+ dgap_err("number of ports on concentrator not specified");
+ return 1;
+ }
+ if (p->u.conc.v_id == 0) {
+ dgap_err("concentrator id letter not specified");
+ return 1;
+ }
+ return 0;
+
+ case MNODE:
+ if (p->u.module.v_type == 0) {
+ dgap_err("EBI module type not specified");
+ return 1;
+ }
+ if (p->u.module.v_nport == 0) {
+ dgap_err("number of ports on EBI module not specified");
+ return 1;
+ }
+ if (p->u.module.v_id == 0) {
+ dgap_err("EBI module id letter not specified");
+ return 1;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * save a string somewhere
+ */
+static char *dgap_savestring(char *s)
+{
+ char *p;
+
+ p = kmalloc(strlen(s) + 1, GFP_ATOMIC);
+ if (p)
+ strcpy(p, s);
+ return p;
+}
+
+/*
+ * Given a board pointer, returns whether we should use interrupts or not.
+ */
+static uint dgap_config_get_useintr(struct board_t *bd)
+{
+ struct cnode *p = NULL;
+
+ if (!bd)
+ return 0;
+
+ for (p = bd->bd_config; p; p = p->next) {
+ switch (p->type) {
+ case INTRNODE:
+ /*
+ * check for pcxr types.
+ */
+ return p->u.useintr;
+ default:
+ break;
+ }
+ }
+
+ /* If not found, then don't turn on interrupts. */
+ return 0;
+}
+
+/*
+ * Given a board pointer, returns whether we turn on altpin or not.
+ */
+static uint dgap_config_get_altpin(struct board_t *bd)
+{
+ struct cnode *p = NULL;
+
+ if (!bd)
+ return 0;
+
+ for (p = bd->bd_config; p; p = p->next) {
+ switch (p->type) {
+ case ANODE:
+ /*
+ * check for pcxr types.
+ */
+ return p->u.altpin;
+ default:
+ break;
+ }
+ }
+
+ /* If not found, then don't turn on interrupts. */
+ return 0;
+}
+
+/*
+ * Given a specific type of board, if found, detached link and
+ * returns the first occurrence in the list.
+ */
+static struct cnode *dgap_find_config(int type, int bus, int slot)
+{
+ struct cnode *p, *prev = NULL, *prev2 = NULL, *found = NULL;
+
+ p = &dgap_head;
+
+ while (p->next != NULL) {
+ prev = p;
+ p = p->next;
+
+ if (p->type == BNODE) {
+
+ if (p->u.board.type == type) {
+
+ if (p->u.board.v_pcibus &&
+ p->u.board.pcibus != bus)
+ continue;
+ if (p->u.board.v_pcislot &&
+ p->u.board.pcislot != slot)
+ continue;
+
+ found = p;
+ /*
+ * Keep walking thru the list till we
+ * find the next board.
+ */
+ while (p->next != NULL) {
+ prev2 = p;
+ p = p->next;
+ if (p->type == BNODE) {
+
+ /*
+ * Mark the end of our 1 board
+ * chain of configs.
+ */
+ prev2->next = NULL;
+
+ /*
+ * Link the "next" board to the
+ * previous board, effectively
+ * "unlinking" our board from
+ * the main config.
+ */
+ prev->next = p;
+
+ return found;
+ }
+ }
+ /*
+ * It must be the last board in the list.
+ */
+ prev->next = NULL;
+ return found;
+ }
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Given a board pointer, walks the config link, counting up
+ * all ports user specified should be on the board.
+ * (This does NOT mean they are all actually present right now tho)
+ */
+static uint dgap_config_get_num_prts(struct board_t *bd)
+{
+ int count = 0;
+ struct cnode *p = NULL;
+
+ if (!bd)
+ return 0;
+
+ for (p = bd->bd_config; p; p = p->next) {
+
+ switch (p->type) {
+ case BNODE:
+ /*
+ * check for pcxr types.
+ */
+ if (p->u.board.type > EPCFE)
+ count += p->u.board.nport;
+ break;
+ case CNODE:
+ count += p->u.conc.nport;
+ break;
+ case MNODE:
+ count += p->u.module.nport;
+ break;
+ }
+ }
+ return count;
+}
+
+static char *dgap_create_config_string(struct board_t *bd, char *string)
+{
+ char *ptr = string;
+ struct cnode *p = NULL;
+ struct cnode *q = NULL;
+ int speed;
+
+ if (!bd) {
+ *ptr = 0xff;
+ return string;
+ }
+
+ for (p = bd->bd_config; p; p = p->next) {
+
+ switch (p->type) {
+ case LNODE:
+ *ptr = '\0';
+ ptr++;
+ *ptr = p->u.line.speed;
+ ptr++;
+ break;
+ case CNODE:
+ /*
+ * Because the EPC/con concentrators can have EM modules
+ * hanging off of them, we have to walk ahead in the
+ * list and keep adding the number of ports on each EM
+ * to the config. UGH!
+ */
+ speed = p->u.conc.speed;
+ q = p->next;
+ if ((q != NULL) && (q->type == MNODE)) {
+ *ptr = (p->u.conc.nport + 0x80);
+ ptr++;
+ p = q;
+ while ((q->next != NULL) &&
+ (q->next->type) == MNODE) {
+
+ *ptr = (q->u.module.nport + 0x80);
+ ptr++;
+ p = q;
+ q = q->next;
+ }
+ *ptr = q->u.module.nport;
+ ptr++;
+ } else {
+ *ptr = p->u.conc.nport;
+ ptr++;
+ }
+
+ *ptr = speed;
+ ptr++;
+ break;
+ }
+ }
+
+ *ptr = 0xff;
+ return string;
+}
diff --git a/drivers/staging/dgap/dgap.h b/drivers/staging/dgap/dgap.h
new file mode 100644
index 000000000000..6b8f5f858327
--- /dev/null
+++ b/drivers/staging/dgap/dgap.h
@@ -0,0 +1,1322 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *************************************************************************
+ *
+ * Driver includes
+ *
+ *************************************************************************/
+
+#ifndef __DGAP_DRIVER_H
+#define __DGAP_DRIVER_H
+
+#include <linux/types.h> /* To pick up the varions Linux types */
+#include <linux/tty.h> /* To pick up the various tty structs/defines */
+#include <linux/interrupt.h> /* For irqreturn_t type */
+
+#ifndef TRUE
+# define TRUE 1
+#endif
+
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+/* Required for our shared headers! */
+typedef unsigned char uchar;
+
+#if !defined(TTY_FLIPBUF_SIZE)
+# define TTY_FLIPBUF_SIZE 512
+#endif
+
+/*************************************************************************
+ *
+ * Driver defines
+ *
+ *************************************************************************/
+
+/*
+ * Driver identification
+ */
+#define DG_NAME "dgap-1.3-16"
+#define DG_PART "40002347_C"
+#define DRVSTR "dgap"
+
+/*
+ * defines from dgap_pci.h
+ */
+#define PCIMAX 32 /* maximum number of PCI boards */
+
+#define DIGI_VID 0x114F
+
+#define PCI_DEV_EPC_DID 0x0002
+#define PCI_DEV_XEM_DID 0x0004
+#define PCI_DEV_XR_DID 0x0005
+#define PCI_DEV_CX_DID 0x0006
+#define PCI_DEV_XRJ_DID 0x0009 /* PLX-based Xr adapter */
+#define PCI_DEV_XR_IBM_DID 0x0011 /* IBM 8-port Async Adapter */
+#define PCI_DEV_XR_BULL_DID 0x0013 /* BULL 8-port Async Adapter */
+#define PCI_DEV_XR_SAIP_DID 0x001c /* SAIP card - Xr adapter */
+#define PCI_DEV_XR_422_DID 0x0012 /* Xr-422 */
+#define PCI_DEV_920_2_DID 0x0034 /* XR-Plus 920 K, 2 port */
+#define PCI_DEV_920_4_DID 0x0026 /* XR-Plus 920 K, 4 port */
+#define PCI_DEV_920_8_DID 0x0027 /* XR-Plus 920 K, 8 port */
+#define PCI_DEV_EPCJ_DID 0x000a /* PLX 9060 chip for PCI */
+#define PCI_DEV_CX_IBM_DID 0x001b /* IBM 128-port Async Adapter */
+#define PCI_DEV_920_8_HP_DID 0x0058 /* HP XR-Plus 920 K, 8 port */
+#define PCI_DEV_XEM_HP_DID 0x0059 /* HP Xem PCI */
+
+#define PCI_DEV_XEM_NAME "AccelePort XEM"
+#define PCI_DEV_CX_NAME "AccelePort CX"
+#define PCI_DEV_XR_NAME "AccelePort Xr"
+#define PCI_DEV_XRJ_NAME "AccelePort Xr (PLX)"
+#define PCI_DEV_XR_SAIP_NAME "AccelePort Xr (SAIP)"
+#define PCI_DEV_920_2_NAME "AccelePort Xr920 2 port"
+#define PCI_DEV_920_4_NAME "AccelePort Xr920 4 port"
+#define PCI_DEV_920_8_NAME "AccelePort Xr920 8 port"
+#define PCI_DEV_XR_422_NAME "AccelePort Xr 422"
+#define PCI_DEV_EPCJ_NAME "AccelePort EPC (PLX)"
+#define PCI_DEV_XR_BULL_NAME "AccelePort Xr (BULL)"
+#define PCI_DEV_XR_IBM_NAME "AccelePort Xr (IBM)"
+#define PCI_DEV_CX_IBM_NAME "AccelePort CX (IBM)"
+#define PCI_DEV_920_8_HP_NAME "AccelePort Xr920 8 port (HP)"
+#define PCI_DEV_XEM_HP_NAME "AccelePort XEM (HP)"
+
+/*
+ * On the PCI boards, there is no IO space allocated
+ * The I/O registers will be in the first 3 bytes of the
+ * upper 2MB of the 4MB memory space. The board memory
+ * will be mapped into the low 2MB of the 4MB memory space
+ */
+
+/* Potential location of PCI Bios from E0000 to FFFFF*/
+#define PCI_BIOS_SIZE 0x00020000
+
+/* Size of Memory and I/O for PCI (4MB) */
+#define PCI_RAM_SIZE 0x00400000
+
+/* Size of Memory (2MB) */
+#define PCI_MEM_SIZE 0x00200000
+
+/* Max PCI Window Size (2MB) */
+#define PCI_WIN_SIZE 0x00200000
+
+#define PCI_WIN_SHIFT 21 /* 21 bits max */
+
+/* Offset of I/0 in Memory (2MB) */
+#define PCI_IO_OFFSET 0x00200000
+
+/* Size of IO (2MB) */
+#define PCI_IO_SIZE 0x00200000
+
+/* Number of boards we support at once. */
+#define MAXBOARDS 32
+#define MAXPORTS 224
+#define MAXTTYNAMELEN 200
+
+/* Our 3 magic numbers for our board, channel and unit structs */
+#define DGAP_BOARD_MAGIC 0x5c6df104
+#define DGAP_CHANNEL_MAGIC 0x6c6df104
+#define DGAP_UNIT_MAGIC 0x7c6df104
+
+/* Serial port types */
+#define DGAP_SERIAL 0
+#define DGAP_PRINT 1
+
+#define SERIAL_TYPE_NORMAL 1
+
+/* 4 extra for alignment play space */
+#define WRITEBUFLEN ((4096) + 4)
+#define MYFLIPLEN N_TTY_BUF_SIZE
+
+#define SBREAK_TIME 0x25
+#define U2BSIZE 0x400
+
+#define dgap_jiffies_from_ms(a) (((a) * HZ) / 1000)
+
+/*
+ * Our major for the mgmt devices.
+ *
+ * We can use 22, because Digi was allocated 22 and 23 for the epca driver.
+ * 22 has now become obsolete now that the "cu" devices have
+ * been removed from 2.6.
+ * Also, this *IS* the epca driver, just PCI only now.
+ */
+#ifndef DIGI_DGAP_MAJOR
+# define DIGI_DGAP_MAJOR 22
+#endif
+
+/*
+ * The parameters we use to define the periods of the moving averages.
+ */
+#define MA_PERIOD (HZ / 10)
+#define SMA_DUR (1 * HZ)
+#define EMA_DUR (1 * HZ)
+#define SMA_NPERIODS (SMA_DUR / MA_PERIOD)
+#define EMA_NPERIODS (EMA_DUR / MA_PERIOD)
+
+/*
+ * Define a local default termios struct. All ports will be created
+ * with this termios initially. This is the same structure that is defined
+ * as the default in tty_io.c with the same settings overriden as in serial.c
+ *
+ * In short, this should match the internal serial ports' defaults.
+ */
+#define DEFAULT_IFLAGS (ICRNL | IXON)
+#define DEFAULT_OFLAGS (OPOST | ONLCR)
+#define DEFAULT_CFLAGS (B9600 | CS8 | CREAD | HUPCL | CLOCAL)
+#define DEFAULT_LFLAGS (ISIG | ICANON | ECHO | ECHOE | ECHOK | \
+ ECHOCTL | ECHOKE | IEXTEN)
+
+#ifndef _POSIX_VDISABLE
+#define _POSIX_VDISABLE '\0'
+#endif
+
+#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */
+#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */
+
+#define VPDSIZE (512)
+
+/************************************************************************
+ * FEP memory offsets
+ ************************************************************************/
+#define START 0x0004L /* Execution start address */
+
+#define CMDBUF 0x0d10L /* Command (cm_t) structure offset */
+#define CMDSTART 0x0400L /* Start of command buffer */
+#define CMDMAX 0x0800L /* End of command buffer */
+
+#define EVBUF 0x0d18L /* Event (ev_t) structure */
+#define EVSTART 0x0800L /* Start of event buffer */
+#define EVMAX 0x0c00L /* End of event buffer */
+#define FEP5_PLUS 0x0E40 /* ASCII '5' and ASCII 'A' is here */
+#define ECS_SEG 0x0E44 /* Segment of the extended channel structure */
+#define LINE_SPEED 0x10 /* Offset into ECS_SEG for line speed */
+ /* if the fep has extended capabilities */
+
+/* BIOS MAGIC SPOTS */
+#define ERROR 0x0C14L /* BIOS error code */
+#define SEQUENCE 0x0C12L /* BIOS sequence indicator */
+#define POSTAREA 0x0C00L /* POST complete message area */
+
+/* FEP MAGIC SPOTS */
+#define FEPSTAT POSTAREA /* OS here when FEP comes up */
+#define NCHAN 0x0C02L /* number of ports FEP sees */
+#define PANIC 0x0C10L /* PANIC area for FEP */
+#define KMEMEM 0x0C30L /* Memory for KME use */
+#define CONFIG 0x0CD0L /* Concentrator configuration info */
+#define CONFIGSIZE 0x0030 /* configuration info size */
+#define DOWNREQ 0x0D00 /* Download request buffer pointer */
+
+#define CHANBUF 0x1000L /* Async channel (bs_t) structs */
+#define FEPOSSIZE 0x1FFF /* 8K FEPOS */
+
+#define XEMPORTS 0xC02 /*
+ * Offset in board memory where FEP5 stores
+ * how many ports it has detected.
+ * NOTE: FEP5 reports 64 ports when the user
+ * has the cable in EBI OUT instead of EBI IN.
+ */
+
+#define FEPCLR 0x00
+#define FEPMEM 0x02
+#define FEPRST 0x04
+#define FEPINT 0x08
+#define FEPMASK 0x0e
+#define FEPWIN 0x80
+
+#define LOWMEM 0x0100
+#define HIGHMEM 0x7f00
+
+#define FEPTIMEOUT 200000
+
+#define ENABLE_INTR 0x0e04 /* Enable interrupts flag */
+#define FEPPOLL_MIN 1 /* minimum of 1 millisecond */
+#define FEPPOLL_MAX 20 /* maximum of 20 milliseconds */
+#define FEPPOLL 0x0c26 /* Fep event poll interval */
+
+#define IALTPIN 0x0080 /* Input flag to swap DSR <-> DCD */
+
+/************************************************************************
+ * FEP supported functions
+ ************************************************************************/
+#define SRLOW 0xe0 /* Set receive low water */
+#define SRHIGH 0xe1 /* Set receive high water */
+#define FLUSHTX 0xe2 /* Flush transmit buffer */
+#define PAUSETX 0xe3 /* Pause data transmission */
+#define RESUMETX 0xe4 /* Resume data transmission */
+#define SMINT 0xe5 /* Set Modem Interrupt */
+#define SAFLOWC 0xe6 /* Set Aux. flow control chars */
+#define SBREAK 0xe8 /* Send break */
+#define SMODEM 0xe9 /* Set 8530 modem control lines */
+#define SIFLAG 0xea /* Set UNIX iflags */
+#define SFLOWC 0xeb /* Set flow control characters */
+#define STLOW 0xec /* Set transmit low water mark */
+#define RPAUSE 0xee /* Pause receive */
+#define RRESUME 0xef /* Resume receive */
+#define CHRESET 0xf0 /* Reset Channel */
+#define BUFSETALL 0xf2 /* Set Tx & Rx buffer size avail*/
+#define SOFLAG 0xf3 /* Set UNIX oflags */
+#define SHFLOW 0xf4 /* Set hardware handshake */
+#define SCFLAG 0xf5 /* Set UNIX cflags */
+#define SVNEXT 0xf6 /* Set VNEXT character */
+#define SPINTFC 0xfc /* Reserved */
+#define SCOMMODE 0xfd /* Set RS232/422 mode */
+
+
+/************************************************************************
+ * Modes for SCOMMODE
+ ************************************************************************/
+#define MODE_232 0x00
+#define MODE_422 0x01
+
+
+/************************************************************************
+ * Event flags.
+ ************************************************************************/
+#define IFBREAK 0x01 /* Break received */
+#define IFTLW 0x02 /* Transmit low water */
+#define IFTEM 0x04 /* Transmitter empty */
+#define IFDATA 0x08 /* Receive data present */
+#define IFMODEM 0x20 /* Modem status change */
+
+/************************************************************************
+ * Modem flags
+ ************************************************************************/
+# define DM_RTS 0x02 /* Request to send */
+# define DM_CD 0x80 /* Carrier detect */
+# define DM_DSR 0x20 /* Data set ready */
+# define DM_CTS 0x10 /* Clear to send */
+# define DM_RI 0x40 /* Ring indicator */
+# define DM_DTR 0x01 /* Data terminal ready */
+
+/*
+ * defines from dgap_conf.h
+ */
+#define NULLNODE 0 /* header node, not used */
+#define BNODE 1 /* Board node */
+#define LNODE 2 /* Line node */
+#define CNODE 3 /* Concentrator node */
+#define MNODE 4 /* EBI Module node */
+#define TNODE 5 /* tty name prefix node */
+#define CUNODE 6 /* cu name prefix (non-SCO) */
+#define PNODE 7 /* trans. print prefix node */
+#define JNODE 8 /* maJor number node */
+#define ANODE 9 /* altpin */
+#define TSNODE 10 /* tty structure size */
+#define CSNODE 11 /* channel structure size */
+#define BSNODE 12 /* board structure size */
+#define USNODE 13 /* unit schedule structure size */
+#define FSNODE 14 /* f2200 structure size */
+#define VSNODE 15 /* size of VPIX structures */
+#define INTRNODE 16 /* enable interrupt */
+
+/* Enumeration of tokens */
+#define BEGIN 1
+#define END 2
+#define BOARD 10
+
+#define EPCFS 11 /* start of EPC family definitions */
+#define ICX 11
+#define MCX 13
+#define PCX 14
+#define IEPC 15
+#define EEPC 16
+#define MEPC 17
+#define IPCM 18
+#define EPCM 19
+#define MPCM 20
+#define PEPC 21
+#define PPCM 22
+#ifdef CP
+#define ICP 23
+#define ECP 24
+#define MCP 25
+#endif
+#define EPCFE 25 /* end of EPC family definitions */
+#define PC2E 26
+#define PC4E 27
+#define PC4E8K 28
+#define PC8E 29
+#define PC8E8K 30
+#define PC16E 31
+#define MC2E8K 34
+#define MC4E8K 35
+#define MC8E8K 36
+
+#define AVANFS 42 /* start of Avanstar family definitions */
+#define A8P 42
+#define A16P 43
+#define AVANFE 43 /* end of Avanstar family definitions */
+
+#define DA2000FS 44 /* start of AccelePort 2000 family definitions */
+#define DA22 44 /* AccelePort 2002 */
+#define DA24 45 /* AccelePort 2004 */
+#define DA28 46 /* AccelePort 2008 */
+#define DA216 47 /* AccelePort 2016 */
+#define DAR4 48 /* AccelePort RAS 4 port */
+#define DAR8 49 /* AccelePort RAS 8 port */
+#define DDR24 50 /* DataFire RAS 24 port */
+#define DDR30 51 /* DataFire RAS 30 port */
+#define DDR48 52 /* DataFire RAS 48 port */
+#define DDR60 53 /* DataFire RAS 60 port */
+#define DA2000FE 53 /* end of AccelePort 2000/RAS family definitions */
+
+#define PCXRFS 106 /* start of PCXR family definitions */
+#define APORT4 106
+#define APORT8 107
+#define PAPORT4 108
+#define PAPORT8 109
+#define APORT4_920I 110
+#define APORT8_920I 111
+#define APORT4_920P 112
+#define APORT8_920P 113
+#define APORT2_920P 114
+#define PCXRFE 117 /* end of PCXR family definitions */
+
+#define LINE 82
+#ifdef T1
+#define T1M 83
+#define E1M 84
+#endif
+#define CONC 64
+#define CX 65
+#define EPC 66
+#define MOD 67
+#define PORTS 68
+#define METHOD 69
+#define CUSTOM 70
+#define BASIC 71
+#define STATUS 72
+#define MODEM 73
+/* The following tokens can appear in multiple places */
+#define SPEED 74
+#define NPORTS 75
+#define ID 76
+#define CABLE 77
+#define CONNECT 78
+#define IO 79
+#define MEM 80
+#define DPSZ 81
+
+#define TTYN 90
+#define CU 91
+#define PRINT 92
+#define XPRINT 93
+#define CMAJOR 94
+#define ALTPIN 95
+#define STARTO 96
+#define USEINTR 97
+#define PCIINFO 98
+
+#define TTSIZ 100
+#define CHSIZ 101
+#define BSSIZ 102
+#define UNTSIZ 103
+#define F2SIZ 104
+#define VPSIZ 105
+
+#define TOTAL_BOARD 2
+#define CURRENT_BRD 4
+#define BOARD_TYPE 6
+#define IO_ADDRESS 8
+#define MEM_ADDRESS 10
+
+#define FIELDS_PER_PAGE 18
+
+#define TB_FIELD 1
+#define CB_FIELD 3
+#define BT_FIELD 5
+#define IO_FIELD 7
+#define ID_FIELD 8
+#define ME_FIELD 9
+#define TTY_FIELD 11
+#define CU_FIELD 13
+#define PR_FIELD 15
+#define MPR_FIELD 17
+
+#define MAX_FIELD 512
+
+#define INIT 0
+#define NITEMS 128
+#define MAX_ITEM 512
+
+#define DSCRINST 1
+#define DSCRNUM 3
+#define ALTPINQ 5
+#define SSAVE 7
+
+#define DSCR "32"
+#define ONETONINE "123456789"
+#define ALL "1234567890"
+
+/*
+ * All the possible states the driver can be while being loaded.
+ */
+enum {
+ DRIVER_INITIALIZED = 0,
+ DRIVER_READY
+};
+
+/*
+ * All the possible states the board can be while booting up.
+ */
+enum {
+ BOARD_FAILED = 0,
+ BOARD_READY
+};
+
+/*
+ * All the possible states that a requested concentrator image can be in.
+ */
+enum {
+ NO_PENDING_CONCENTRATOR_REQUESTS = 0,
+ NEED_CONCENTRATOR,
+ REQUESTED_CONCENTRATOR
+};
+
+
+
+/*
+ * Modem line constants are defined as macros because DSR and
+ * DCD are swapable using the ditty altpin option.
+ */
+#define D_CD(ch) ch->ch_cd /* Carrier detect */
+#define D_DSR(ch) ch->ch_dsr /* Data set ready */
+#define D_RTS(ch) DM_RTS /* Request to send */
+#define D_CTS(ch) DM_CTS /* Clear to send */
+#define D_RI(ch) DM_RI /* Ring indicator */
+#define D_DTR(ch) DM_DTR /* Data terminal ready */
+
+
+/*************************************************************************
+ *
+ * Structures and closely related defines.
+ *
+ *************************************************************************/
+
+
+/*
+ * A structure to hold a statistics counter. We also
+ * compute moving averages for this counter.
+ */
+struct macounter {
+ u32 cnt; /* Total count */
+ ulong accum; /* Acuumulator per period */
+ ulong sma; /* Simple moving average */
+ ulong ema; /* Exponential moving average */
+};
+
+
+/************************************************************************
+ * Device flag definitions for bd_flags.
+ ************************************************************************/
+#define BD_FEP5PLUS 0x0001 /* Supports FEP5 Plus commands */
+#define BD_HAS_VPD 0x0002 /* Board has VPD info available */
+
+/*
+ * Per-board information
+ */
+struct board_t {
+ int magic; /* Board Magic number. */
+ int boardnum; /* Board number: 0-3 */
+ int firstminor; /* First minor, e.g. 0, 30, 60 */
+
+ int type; /* Type of board */
+ char *name; /* Product Name */
+ struct pci_dev *pdev; /* Pointer to the pci_dev struct */
+ u16 vendor; /* PCI vendor ID */
+ u16 device; /* PCI device ID */
+ u16 subvendor; /* PCI subsystem vendor ID */
+ u16 subdevice; /* PCI subsystem device ID */
+ uchar rev; /* PCI revision ID */
+ uint pci_bus; /* PCI bus value */
+ uint pci_slot; /* PCI slot value */
+ u16 maxports; /* MAX ports this board can handle */
+ uchar vpd[VPDSIZE]; /* VPD of board, if found */
+ u32 bd_flags; /* Board flags */
+
+ spinlock_t bd_lock; /* Used to protect board */
+
+ u32 state; /* State of card. */
+ wait_queue_head_t state_wait; /* Place to sleep on for state change */
+
+ struct tasklet_struct helper_tasklet; /* Poll helper tasklet */
+
+ u32 wait_for_bios;
+ u32 wait_for_fep;
+
+ struct cnode *bd_config; /* Config of board */
+
+ u16 nasync; /* Number of ports on card */
+
+ u32 use_interrupts; /* Should we be interrupt driven? */
+ ulong irq; /* Interrupt request number */
+ ulong intr_count; /* Count of interrupts */
+ u32 intr_used; /* Non-zero if using interrupts */
+ u32 intr_running; /* Non-zero if FEP knows its doing interrupts */
+
+ ulong port; /* Start of base io port of the card */
+ ulong port_end; /* End of base io port of the card */
+ ulong membase; /* Start of base memory of the card */
+ ulong membase_end; /* End of base memory of the card */
+
+ uchar *re_map_port; /* Remapped io port of the card */
+ uchar *re_map_membase;/* Remapped memory of the card */
+
+ uchar runwait; /* # Processes waiting for FEP */
+ uchar inhibit_poller; /* Tells the poller to leave us alone */
+
+ struct channel_t *channels[MAXPORTS]; /* array of pointers to our channels. */
+
+ struct tty_driver *SerialDriver;
+ struct tty_port *SerialPorts;
+ char SerialName[200];
+ struct tty_driver *PrintDriver;
+ struct tty_port *PrinterPorts;
+ char PrintName[200];
+
+ u32 dgap_Major_Serial_Registered;
+ u32 dgap_Major_TransparentPrint_Registered;
+
+ u32 dgap_Serial_Major;
+ u32 dgap_TransparentPrint_Major;
+
+ struct bs_t *bd_bs; /* Base structure pointer */
+
+ char *flipbuf; /* Our flip buffer, alloced if board is found */
+ char *flipflagbuf; /* Our flip flag buffer, alloced if board is found */
+
+ u16 dpatype; /* The board "type", as defined by DPA */
+ u16 dpastatus; /* The board "status", as defined by DPA */
+ wait_queue_head_t kme_wait; /* Needed for DPA support */
+
+ u32 conc_dl_status; /* Status of any pending conc download */
+};
+
+
+
+/************************************************************************
+ * Unit flag definitions for un_flags.
+ ************************************************************************/
+#define UN_ISOPEN 0x0001 /* Device is open */
+#define UN_CLOSING 0x0002 /* Line is being closed */
+#define UN_IMM 0x0004 /* Service immediately */
+#define UN_BUSY 0x0008 /* Some work this channel */
+#define UN_BREAKI 0x0010 /* Input break received */
+#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
+#define UN_TIME 0x0040 /* Waiting on time */
+#define UN_EMPTY 0x0080 /* Waiting output queue empty */
+#define UN_LOW 0x0100 /* Waiting output low water mark*/
+#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
+#define UN_WOPEN 0x0400 /* Device waiting for open */
+#define UN_WIOCTL 0x0800 /* Device waiting for open */
+#define UN_HANGUP 0x8000 /* Carrier lost */
+
+struct device;
+
+/************************************************************************
+ * Structure for terminal or printer unit.
+ ************************************************************************/
+struct un_t {
+ int magic; /* Unit Magic Number. */
+ struct channel_t *un_ch;
+ u32 un_time;
+ u32 un_type;
+ u32 un_open_count; /* Counter of opens to port */
+ struct tty_struct *un_tty;/* Pointer to unit tty structure */
+ u32 un_flags; /* Unit flags */
+ wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
+ u32 un_dev; /* Minor device number */
+ tcflag_t un_oflag; /* oflags being done on board */
+ tcflag_t un_lflag; /* lflags being done on board */
+ struct device *un_sysfs;
+};
+
+
+/************************************************************************
+ * Device flag definitions for ch_flags.
+ ************************************************************************/
+#define CH_PRON 0x0001 /* Printer on string */
+#define CH_OUT 0x0002 /* Dial-out device open */
+#define CH_STOP 0x0004 /* Output is stopped */
+#define CH_STOPI 0x0008 /* Input is stopped */
+#define CH_CD 0x0010 /* Carrier is present */
+#define CH_FCAR 0x0020 /* Carrier forced on */
+
+#define CH_RXBLOCK 0x0080 /* Enable rx blocked flag */
+#define CH_WLOW 0x0100 /* Term waiting low event */
+#define CH_WEMPTY 0x0200 /* Term waiting empty event */
+#define CH_RENABLE 0x0400 /* Buffer just emptied */
+#define CH_RACTIVE 0x0800 /* Process active in xxread() */
+#define CH_RWAIT 0x1000 /* Process waiting in xxread() */
+#define CH_BAUD0 0x2000 /* Used for checking B0 transitions */
+#define CH_HANGUP 0x8000 /* Hangup received */
+
+/*
+ * Definitions for ch_sniff_flags
+ */
+#define SNIFF_OPEN 0x1
+#define SNIFF_WAIT_DATA 0x2
+#define SNIFF_WAIT_SPACE 0x4
+
+
+/************************************************************************
+ *** Definitions for Digi ditty(1) command.
+ ************************************************************************/
+
+/************************************************************************
+ * This module provides application access to special Digi
+ * serial line enhancements which are not standard UNIX(tm) features.
+ ************************************************************************/
+
+#if !defined(TIOCMODG)
+
+#define TIOCMODG (('d'<<8) | 250) /* get modem ctrl state */
+#define TIOCMODS (('d'<<8) | 251) /* set modem ctrl state */
+
+#ifndef TIOCM_LE
+#define TIOCM_LE 0x01 /* line enable */
+#define TIOCM_DTR 0x02 /* data terminal ready */
+#define TIOCM_RTS 0x04 /* request to send */
+#define TIOCM_ST 0x08 /* secondary transmit */
+#define TIOCM_SR 0x10 /* secondary receive */
+#define TIOCM_CTS 0x20 /* clear to send */
+#define TIOCM_CAR 0x40 /* carrier detect */
+#define TIOCM_RNG 0x80 /* ring indicator */
+#define TIOCM_DSR 0x100 /* data set ready */
+#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
+#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
+#endif
+
+#endif
+
+#if !defined(TIOCMSET)
+#define TIOCMSET (('d'<<8) | 252) /* set modem ctrl state */
+#define TIOCMGET (('d'<<8) | 253) /* set modem ctrl state */
+#endif
+
+#if !defined(TIOCMBIC)
+#define TIOCMBIC (('d'<<8) | 254) /* set modem ctrl state */
+#define TIOCMBIS (('d'<<8) | 255) /* set modem ctrl state */
+#endif
+
+
+#if !defined(TIOCSDTR)
+#define TIOCSDTR (('e'<<8) | 0) /* set DTR */
+#define TIOCCDTR (('e'<<8) | 1) /* clear DTR */
+#endif
+
+/************************************************************************
+ * Ioctl command arguments for DIGI parameters.
+ ************************************************************************/
+#define DIGI_GETA (('e'<<8) | 94) /* Read params */
+
+#define DIGI_SETA (('e'<<8) | 95) /* Set params */
+#define DIGI_SETAW (('e'<<8) | 96) /* Drain & set params */
+#define DIGI_SETAF (('e'<<8) | 97) /* Drain, flush & set params */
+
+#define DIGI_KME (('e'<<8) | 98) /* Read/Write Host */
+ /* Adapter Memory */
+
+#define DIGI_GETFLOW (('e'<<8) | 99) /* Get startc/stopc flow */
+ /* control characters */
+#define DIGI_SETFLOW (('e'<<8) | 100) /* Set startc/stopc flow */
+ /* control characters */
+#define DIGI_GETAFLOW (('e'<<8) | 101) /* Get Aux. startc/stopc */
+ /* flow control chars */
+#define DIGI_SETAFLOW (('e'<<8) | 102) /* Set Aux. startc/stopc */
+ /* flow control chars */
+
+#define DIGI_GEDELAY (('d'<<8) | 246) /* Get edelay */
+#define DIGI_SEDELAY (('d'<<8) | 247) /* Set edelay */
+
+struct digiflow_t {
+ unsigned char startc; /* flow cntl start char */
+ unsigned char stopc; /* flow cntl stop char */
+};
+
+
+#ifdef FLOW_2200
+#define F2200_GETA (('e'<<8) | 104) /* Get 2x36 flow cntl flags */
+#define F2200_SETAW (('e'<<8) | 105) /* Set 2x36 flow cntl flags */
+#define F2200_MASK 0x03 /* 2200 flow cntl bit mask */
+#define FCNTL_2200 0x01 /* 2x36 terminal flow cntl */
+#define PCNTL_2200 0x02 /* 2x36 printer flow cntl */
+#define F2200_XON 0xf8
+#define P2200_XON 0xf9
+#define F2200_XOFF 0xfa
+#define P2200_XOFF 0xfb
+
+#define FXOFF_MASK 0x03 /* 2200 flow status mask */
+#define RCVD_FXOFF 0x01 /* 2x36 Terminal XOFF rcvd */
+#define RCVD_PXOFF 0x02 /* 2x36 Printer XOFF rcvd */
+#endif
+
+/************************************************************************
+ * Values for digi_flags
+ ************************************************************************/
+#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */
+#define DIGI_FAST 0x0002 /* Fast baud rates */
+#define RTSPACE 0x0004 /* RTS input flow control */
+#define CTSPACE 0x0008 /* CTS output flow control */
+#define DSRPACE 0x0010 /* DSR output flow control */
+#define DCDPACE 0x0020 /* DCD output flow control */
+#define DTRPACE 0x0040 /* DTR input flow control */
+#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
+#define DIGI_FORCEDCD 0x0100 /* Force carrier */
+#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
+#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
+#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
+#define DIGI_PP_INPUT 0x1000 /* Change parallel port to input*/
+#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
+#define DIGI_422 0x4000 /* for 422/232 selectable panel */
+#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
+
+/************************************************************************
+ * These options are not supported on the comxi.
+ ************************************************************************/
+#define DIGI_COMXI (DIGI_FAST|DIGI_COOK|DSRPACE|DCDPACE|DTRPACE)
+
+#define DIGI_PLEN 28 /* String length */
+#define DIGI_TSIZ 10 /* Terminal string len */
+
+/************************************************************************
+ * Structure used with ioctl commands for DIGI parameters.
+ ************************************************************************/
+struct digi_t {
+ unsigned short digi_flags; /* Flags (see above) */
+ unsigned short digi_maxcps; /* Max printer CPS */
+ unsigned short digi_maxchar; /* Max chars in print queue */
+ unsigned short digi_bufsize; /* Buffer size */
+ unsigned char digi_onlen; /* Length of ON string */
+ unsigned char digi_offlen; /* Length of OFF string */
+ char digi_onstr[DIGI_PLEN]; /* Printer on string */
+ char digi_offstr[DIGI_PLEN]; /* Printer off string */
+ char digi_term[DIGI_TSIZ]; /* terminal string */
+};
+
+/************************************************************************
+ * KME definitions and structures.
+ ************************************************************************/
+#define RW_IDLE 0 /* Operation complete */
+#define RW_READ 1 /* Read Concentrator Memory */
+#define RW_WRITE 2 /* Write Concentrator Memory */
+
+struct rw_t {
+ unsigned char rw_req; /* Request type */
+ unsigned char rw_board; /* Host Adapter board number */
+ unsigned char rw_conc; /* Concentrator number */
+ unsigned char rw_reserved; /* Reserved for expansion */
+ unsigned long rw_addr; /* Address in concentrator */
+ unsigned short rw_size; /* Read/write request length */
+ unsigned char rw_data[128]; /* Data to read/write */
+};
+
+/***********************************************************************
+ * Shrink Buffer and Board Information definitions and structures.
+
+ ************************************************************************/
+ /* Board type return codes */
+#define PCXI_TYPE 1 /* Board type at the designated port is a PC/Xi */
+#define PCXM_TYPE 2 /* Board type at the designated port is a PC/Xm */
+#define PCXE_TYPE 3 /* Board type at the designated port is a PC/Xe */
+#define MCXI_TYPE 4 /* Board type at the designated port is a MC/Xi */
+#define COMXI_TYPE 5 /* Board type at the designated port is a COM/Xi */
+
+ /* Non-Zero Result codes. */
+#define RESULT_NOBDFND 1 /* A Digi product at that port is not config installed */
+#define RESULT_NODESCT 2 /* A memory descriptor was not obtainable */
+#define RESULT_NOOSSIG 3 /* FEP/OS signature was not detected on the board */
+#define RESULT_TOOSML 4 /* Too small an area to shrink. */
+#define RESULT_NOCHAN 5 /* Channel structure for the board was not found */
+
+struct shrink_buf_struct {
+ unsigned long shrink_buf_vaddr; /* Virtual address of board */
+ unsigned long shrink_buf_phys; /* Physical address of board */
+ unsigned long shrink_buf_bseg; /* Amount of board memory */
+ unsigned long shrink_buf_hseg; /* '186 Beginning of Dual-Port */
+
+ unsigned long shrink_buf_lseg; /* '186 Beginning of freed memory */
+ unsigned long shrink_buf_mseg; /* Linear address from start of
+ dual-port were freed memory
+ begins, host viewpoint. */
+
+ unsigned long shrink_buf_bdparam; /* Parameter for xxmemon and
+ xxmemoff */
+
+ unsigned long shrink_buf_reserva; /* Reserved */
+ unsigned long shrink_buf_reservb; /* Reserved */
+ unsigned long shrink_buf_reservc; /* Reserved */
+ unsigned long shrink_buf_reservd; /* Reserved */
+
+ unsigned char shrink_buf_result; /* Reason for call failing
+ Zero is Good return */
+ unsigned char shrink_buf_init; /* Non-Zero if it caused an
+ xxinit call. */
+
+ unsigned char shrink_buf_anports; /* Number of async ports */
+ unsigned char shrink_buf_snports; /* Number of sync ports */
+ unsigned char shrink_buf_type; /* Board type 1 = PC/Xi,
+ 2 = PC/Xm,
+ 3 = PC/Xe
+ 4 = MC/Xi
+ 5 = COMX/i */
+ unsigned char shrink_buf_card; /* Card number */
+
+};
+
+/************************************************************************
+ * Structure to get driver status information
+ ************************************************************************/
+struct digi_dinfo {
+ unsigned long dinfo_nboards; /* # boards configured */
+ char dinfo_reserved[12]; /* for future expansion */
+ char dinfo_version[16]; /* driver version */
+};
+
+#define DIGI_GETDD (('d'<<8) | 248) /* get driver info */
+
+/************************************************************************
+ * Structure used with ioctl commands for per-board information
+ *
+ * physsize and memsize differ when board has "windowed" memory
+ ************************************************************************/
+struct digi_info {
+ unsigned long info_bdnum; /* Board number (0 based) */
+ unsigned long info_ioport; /* io port address */
+ unsigned long info_physaddr; /* memory address */
+ unsigned long info_physsize; /* Size of host mem window */
+ unsigned long info_memsize; /* Amount of dual-port mem */
+ /* on board */
+ unsigned short info_bdtype; /* Board type */
+ unsigned short info_nports; /* number of ports */
+ char info_bdstate; /* board state */
+ char info_reserved[7]; /* for future expansion */
+};
+
+#define DIGI_GETBD (('d'<<8) | 249) /* get board info */
+
+struct digi_stat {
+ unsigned int info_chan; /* Channel number (0 based) */
+ unsigned int info_brd; /* Board number (0 based) */
+ unsigned long info_cflag; /* cflag for channel */
+ unsigned long info_iflag; /* iflag for channel */
+ unsigned long info_oflag; /* oflag for channel */
+ unsigned long info_mstat; /* mstat for channel */
+ unsigned long info_tx_data; /* tx_data for channel */
+ unsigned long info_rx_data; /* rx_data for channel */
+ unsigned long info_hflow; /* hflow for channel */
+ unsigned long info_reserved[8]; /* for future expansion */
+};
+
+#define DIGI_GETSTAT (('d'<<8) | 244) /* get board info */
+/************************************************************************
+ *
+ * Structure used with ioctl commands for per-channel information
+ *
+ ************************************************************************/
+struct digi_ch {
+ unsigned long info_bdnum; /* Board number (0 based) */
+ unsigned long info_channel; /* Channel index number */
+ unsigned long info_ch_cflag; /* Channel cflag */
+ unsigned long info_ch_iflag; /* Channel iflag */
+ unsigned long info_ch_oflag; /* Channel oflag */
+ unsigned long info_chsize; /* Channel structure size */
+ unsigned long info_sleep_stat; /* sleep status */
+ dev_t info_dev; /* device number */
+ unsigned char info_initstate; /* Channel init state */
+ unsigned char info_running; /* Channel running state */
+ long reserved[8]; /* reserved for future use */
+};
+
+/*
+* This structure is used with the DIGI_FEPCMD ioctl to
+* tell the driver which port to send the command for.
+*/
+struct digi_cmd {
+ int cmd;
+ int word;
+ int ncmds;
+ int chan; /* channel index (zero based) */
+ int bdid; /* board index (zero based) */
+};
+
+/*
+* info_sleep_stat defines
+*/
+#define INFO_RUNWAIT 0x0001
+#define INFO_WOPEN 0x0002
+#define INFO_TTIOW 0x0004
+#define INFO_CH_RWAIT 0x0008
+#define INFO_CH_WEMPTY 0x0010
+#define INFO_CH_WLOW 0x0020
+#define INFO_XXBUF_BUSY 0x0040
+
+#define DIGI_GETCH (('d'<<8) | 245) /* get board info */
+
+/* Board type definitions */
+
+#define SUBTYPE 0007
+#define T_PCXI 0000
+#define T_PCXM 0001
+#define T_PCXE 0002
+#define T_PCXR 0003
+#define T_SP 0004
+#define T_SP_PLUS 0005
+# define T_HERC 0000
+# define T_HOU 0001
+# define T_LON 0002
+# define T_CHA 0003
+#define FAMILY 0070
+#define T_COMXI 0000
+#define T_PCXX 0010
+#define T_CX 0020
+#define T_EPC 0030
+#define T_PCLITE 0040
+#define T_SPXX 0050
+#define T_AVXX 0060
+#define T_DXB 0070
+#define T_A2K_4_8 0070
+#define BUSTYPE 0700
+#define T_ISABUS 0000
+#define T_MCBUS 0100
+#define T_EISABUS 0200
+#define T_PCIBUS 0400
+
+/* Board State Definitions */
+
+#define BD_RUNNING 0x0
+#define BD_REASON 0x7f
+#define BD_NOTFOUND 0x1
+#define BD_NOIOPORT 0x2
+#define BD_NOMEM 0x3
+#define BD_NOBIOS 0x4
+#define BD_NOFEP 0x5
+#define BD_FAILED 0x6
+#define BD_ALLOCATED 0x7
+#define BD_TRIBOOT 0x8
+#define BD_BADKME 0x80
+
+#define DIGI_LOOPBACK (('d'<<8) | 252) /* Enable/disable UART internal loopback */
+#define DIGI_SPOLL (('d'<<8) | 254) /* change poller rate */
+
+#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
+#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
+#define DIGI_RESET_PORT (('e'<<8) | 93) /* Reset port */
+
+/************************************************************************
+ * Channel information structure.
+ ************************************************************************/
+struct channel_t {
+ int magic; /* Channel Magic Number */
+ struct bs_t *ch_bs; /* Base structure pointer */
+ struct cm_t *ch_cm; /* Command queue pointer */
+ struct board_t *ch_bd; /* Board structure pointer */
+ unsigned char *ch_vaddr; /* FEP memory origin */
+ unsigned char *ch_taddr; /* Write buffer origin */
+ unsigned char *ch_raddr; /* Read buffer origin */
+ struct digi_t ch_digi; /* Transparent Print structure */
+ struct un_t ch_tun; /* Terminal unit info */
+ struct un_t ch_pun; /* Printer unit info */
+
+ spinlock_t ch_lock; /* provide for serialization */
+ wait_queue_head_t ch_flags_wait;
+
+ u32 pscan_state;
+ uchar pscan_savechar;
+
+ u32 ch_portnum; /* Port number, 0 offset. */
+ u32 ch_open_count; /* open count */
+ u32 ch_flags; /* Channel flags */
+
+
+ u32 ch_close_delay; /* How long we should drop RTS/DTR for */
+
+ u32 ch_cpstime; /* Time for CPS calculations */
+
+ tcflag_t ch_c_iflag; /* channel iflags */
+ tcflag_t ch_c_cflag; /* channel cflags */
+ tcflag_t ch_c_oflag; /* channel oflags */
+ tcflag_t ch_c_lflag; /* channel lflags */
+
+ u16 ch_fepiflag; /* FEP tty iflags */
+ u16 ch_fepcflag; /* FEP tty cflags */
+ u16 ch_fepoflag; /* FEP tty oflags */
+ u16 ch_wopen; /* Waiting for open process cnt */
+ u16 ch_tstart; /* Transmit buffer start */
+ u16 ch_tsize; /* Transmit buffer size */
+ u16 ch_rstart; /* Receive buffer start */
+ u16 ch_rsize; /* Receive buffer size */
+ u16 ch_rdelay; /* Receive delay time */
+
+ u16 ch_tlw; /* Our currently set low water mark */
+
+ u16 ch_cook; /* Output character mask */
+
+ uchar ch_card; /* Card channel is on */
+ uchar ch_stopc; /* Stop character */
+ uchar ch_startc; /* Start character */
+
+ uchar ch_mostat; /* FEP output modem status */
+ uchar ch_mistat; /* FEP input modem status */
+ uchar ch_mforce; /* Modem values to be forced */
+ uchar ch_mval; /* Force values */
+ uchar ch_fepstopc; /* FEP stop character */
+ uchar ch_fepstartc; /* FEP start character */
+
+ uchar ch_astopc; /* Auxiliary Stop character */
+ uchar ch_astartc; /* Auxiliary Start character */
+ uchar ch_fepastopc; /* Auxiliary FEP stop char */
+ uchar ch_fepastartc; /* Auxiliary FEP start char */
+
+ uchar ch_hflow; /* FEP hardware handshake */
+ uchar ch_dsr; /* stores real dsr value */
+ uchar ch_cd; /* stores real cd value */
+ uchar ch_tx_win; /* channel tx buffer window */
+ uchar ch_rx_win; /* channel rx buffer window */
+ uint ch_custom_speed; /* Custom baud, if set */
+ uint ch_baud_info; /* Current baud info for /proc output */
+ ulong ch_rxcount; /* total of data received so far */
+ ulong ch_txcount; /* total of data transmitted so far */
+ ulong ch_err_parity; /* Count of parity errors on channel */
+ ulong ch_err_frame; /* Count of framing errors on channel */
+ ulong ch_err_break; /* Count of breaks on channel */
+ ulong ch_err_overrun; /* Count of overruns on channel */
+
+ uint ch_sniff_in;
+ uint ch_sniff_out;
+ char *ch_sniff_buf; /* Sniff buffer for proc */
+ ulong ch_sniff_flags; /* Channel flags */
+ wait_queue_head_t ch_sniff_wait;
+};
+
+/************************************************************************
+ * Command structure definition.
+ ************************************************************************/
+struct cm_t {
+ volatile unsigned short cm_head; /* Command buffer head offset */
+ volatile unsigned short cm_tail; /* Command buffer tail offset */
+ volatile unsigned short cm_start; /* start offset of buffer */
+ volatile unsigned short cm_max; /* last offset of buffer */
+};
+
+/************************************************************************
+ * Event structure definition.
+ ************************************************************************/
+struct ev_t {
+ volatile unsigned short ev_head; /* Command buffer head offset */
+ volatile unsigned short ev_tail; /* Command buffer tail offset */
+ volatile unsigned short ev_start; /* start offset of buffer */
+ volatile unsigned short ev_max; /* last offset of buffer */
+};
+
+/************************************************************************
+ * Download buffer structure.
+ ************************************************************************/
+struct downld_t {
+ uchar dl_type; /* Header */
+ uchar dl_seq; /* Download sequence */
+ ushort dl_srev; /* Software revision number */
+ ushort dl_lrev; /* Low revision number */
+ ushort dl_hrev; /* High revision number */
+ ushort dl_seg; /* Start segment address */
+ ushort dl_size; /* Number of bytes to download */
+ uchar dl_data[1024]; /* Download data */
+};
+
+/************************************************************************
+ * Per channel buffer structure
+ ************************************************************************
+ * Base Structure Entries Usage Meanings to Host *
+ * *
+ * W = read write R = read only *
+ * C = changed by commands only *
+ * U = unknown (may be changed w/o notice) *
+ ************************************************************************/
+struct bs_t {
+ volatile unsigned short tp_jmp; /* Transmit poll jump */
+ volatile unsigned short tc_jmp; /* Cooked procedure jump */
+ volatile unsigned short ri_jmp; /* Not currently used */
+ volatile unsigned short rp_jmp; /* Receive poll jump */
+
+ volatile unsigned short tx_seg; /* W Tx segment */
+ volatile unsigned short tx_head; /* W Tx buffer head offset */
+ volatile unsigned short tx_tail; /* R Tx buffer tail offset */
+ volatile unsigned short tx_max; /* W Tx buffer size - 1 */
+
+ volatile unsigned short rx_seg; /* W Rx segment */
+ volatile unsigned short rx_head; /* W Rx buffer head offset */
+ volatile unsigned short rx_tail; /* R Rx buffer tail offset */
+ volatile unsigned short rx_max; /* W Rx buffer size - 1 */
+
+ volatile unsigned short tx_lw; /* W Tx buffer low water mark */
+ volatile unsigned short rx_lw; /* W Rx buffer low water mark */
+ volatile unsigned short rx_hw; /* W Rx buffer high water mark */
+ volatile unsigned short incr; /* W Increment to next channel */
+
+ volatile unsigned short fepdev; /* U SCC device base address */
+ volatile unsigned short edelay; /* W Exception delay */
+ volatile unsigned short blen; /* W Break length */
+ volatile unsigned short btime; /* U Break complete time */
+
+ volatile unsigned short iflag; /* C UNIX input flags */
+ volatile unsigned short oflag; /* C UNIX output flags */
+ volatile unsigned short cflag; /* C UNIX control flags */
+ volatile unsigned short wfill[13]; /* U Reserved for expansion */
+
+ volatile unsigned char num; /* U Channel number */
+ volatile unsigned char ract; /* U Receiver active counter */
+ volatile unsigned char bstat; /* U Break status bits */
+ volatile unsigned char tbusy; /* W Transmit busy */
+ volatile unsigned char iempty; /* W Transmit empty event enable */
+ volatile unsigned char ilow; /* W Transmit low-water event enable */
+ volatile unsigned char idata; /* W Receive data interrupt enable */
+ volatile unsigned char eflag; /* U Host event flags */
+
+ volatile unsigned char tflag; /* U Transmit flags */
+ volatile unsigned char rflag; /* U Receive flags */
+ volatile unsigned char xmask; /* U Transmit ready flags */
+ volatile unsigned char xval; /* U Transmit ready value */
+ volatile unsigned char m_stat; /* RC Modem status bits */
+ volatile unsigned char m_change; /* U Modem bits which changed */
+ volatile unsigned char m_int; /* W Modem interrupt enable bits */
+ volatile unsigned char m_last; /* U Last modem status */
+
+ volatile unsigned char mtran; /* C Unreported modem trans */
+ volatile unsigned char orun; /* C Buffer overrun occurred */
+ volatile unsigned char astartc; /* W Auxiliary Xon char */
+ volatile unsigned char astopc; /* W Auxiliary Xoff char */
+ volatile unsigned char startc; /* W Xon character */
+ volatile unsigned char stopc; /* W Xoff character */
+ volatile unsigned char vnextc; /* W Vnext character */
+ volatile unsigned char hflow; /* C Software flow control */
+
+ volatile unsigned char fillc; /* U Delay Fill character */
+ volatile unsigned char ochar; /* U Saved output character */
+ volatile unsigned char omask; /* U Output character mask */
+
+ volatile unsigned char bfill[13]; /* U Reserved for expansion */
+
+ volatile unsigned char scc[16]; /* U SCC registers */
+};
+
+struct cnode {
+ struct cnode *next;
+ int type;
+ int numbrd;
+
+ union {
+ struct {
+ char type; /* Board Type */
+ long port; /* I/O Address */
+ char *portstr; /* I/O Address in string */
+ long addr; /* Memory Address */
+ char *addrstr; /* Memory Address in string */
+ long pcibus; /* PCI BUS */
+ char *pcibusstr; /* PCI BUS in string */
+ long pcislot; /* PCI SLOT */
+ char *pcislotstr; /* PCI SLOT in string */
+ long nport; /* Number of Ports */
+ char *id; /* tty id */
+ long start; /* start of tty counting */
+ char *method; /* Install method */
+ char v_type;
+ char v_port;
+ char v_addr;
+ char v_pcibus;
+ char v_pcislot;
+ char v_nport;
+ char v_id;
+ char v_start;
+ char v_method;
+ char line1;
+ char line2;
+ char conc1; /* total concs in line1 */
+ char conc2; /* total concs in line2 */
+ char module1; /* total modules for line1 */
+ char module2; /* total modules for line2 */
+ char *status; /* config status */
+ char *dimstatus; /* Y/N */
+ int status_index; /* field pointer */
+ } board;
+
+ struct {
+ char *cable;
+ char v_cable;
+ long speed;
+ char v_speed;
+ } line;
+
+ struct {
+ char type;
+ char *connect;
+ long speed;
+ long nport;
+ char *id;
+ char *idstr;
+ long start;
+ char v_type;
+ char v_connect;
+ char v_speed;
+ char v_nport;
+ char v_id;
+ char v_start;
+ } conc;
+
+ struct {
+ char type;
+ long nport;
+ char *id;
+ char *idstr;
+ long start;
+ char v_type;
+ char v_nport;
+ char v_id;
+ char v_start;
+ } module;
+
+ char *ttyname;
+
+ char *cuname;
+
+ char *printname;
+
+ long majornumber;
+
+ long altpin;
+
+ long ttysize;
+
+ long chsize;
+
+ long bssize;
+
+ long unsize;
+
+ long f2size;
+
+ long vpixsize;
+
+ long useintr;
+ } u;
+};
+
+#endif
diff --git a/drivers/staging/dgap/dgap_conf.h b/drivers/staging/dgap/dgap_conf.h
deleted file mode 100644
index 484ed726a4d6..000000000000
--- a/drivers/staging/dgap/dgap_conf.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *****************************************************************************
- *
- * dgap_conf.h - Header file for installations and parse files.
- *
- * $Id: dgap_conf.h,v 1.1 2009/10/23 14:01:57 markh Exp $
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-#ifndef _DGAP_CONF_H
-#define _DGAP_CONF_H
-
-#define NULLNODE 0 /* header node, not used */
-#define BNODE 1 /* Board node */
-#define LNODE 2 /* Line node */
-#define CNODE 3 /* Concentrator node */
-#define MNODE 4 /* EBI Module node */
-#define TNODE 5 /* tty name prefix node */
-#define CUNODE 6 /* cu name prefix (non-SCO) */
-#define PNODE 7 /* trans. print prefix node */
-#define JNODE 8 /* maJor number node */
-#define ANODE 9 /* altpin */
-#define TSNODE 10 /* tty structure size */
-#define CSNODE 11 /* channel structure size */
-#define BSNODE 12 /* board structure size */
-#define USNODE 13 /* unit schedule structure size */
-#define FSNODE 14 /* f2200 structure size */
-#define VSNODE 15 /* size of VPIX structures */
-#define INTRNODE 16 /* enable interrupt */
-
-/* Enumeration of tokens */
-#define BEGIN 1
-#define END 2
-#define BOARD 10
-
-#define EPCFS 11 /* start of EPC family definitions */
-#define ICX 11
-#define MCX 13
-#define PCX 14
-#define IEPC 15
-#define EEPC 16
-#define MEPC 17
-#define IPCM 18
-#define EPCM 19
-#define MPCM 20
-#define PEPC 21
-#define PPCM 22
-#ifdef CP
-#define ICP 23
-#define ECP 24
-#define MCP 25
-#endif
-#define EPCFE 25 /* end of EPC family definitions */
-#define PC2E 26
-#define PC4E 27
-#define PC4E8K 28
-#define PC8E 29
-#define PC8E8K 30
-#define PC16E 31
-#define MC2E8K 34
-#define MC4E8K 35
-#define MC8E8K 36
-
-#define AVANFS 42 /* start of Avanstar family definitions */
-#define A8P 42
-#define A16P 43
-#define AVANFE 43 /* end of Avanstar family definitions */
-
-#define DA2000FS 44 /* start of AccelePort 2000 family definitions */
-#define DA22 44 /* AccelePort 2002 */
-#define DA24 45 /* AccelePort 2004 */
-#define DA28 46 /* AccelePort 2008 */
-#define DA216 47 /* AccelePort 2016 */
-#define DAR4 48 /* AccelePort RAS 4 port */
-#define DAR8 49 /* AccelePort RAS 8 port */
-#define DDR24 50 /* DataFire RAS 24 port */
-#define DDR30 51 /* DataFire RAS 30 port */
-#define DDR48 52 /* DataFire RAS 48 port */
-#define DDR60 53 /* DataFire RAS 60 port */
-#define DA2000FE 53 /* end of AccelePort 2000/RAS family definitions */
-
-#define PCXRFS 106 /* start of PCXR family definitions */
-#define APORT4 106
-#define APORT8 107
-#define PAPORT4 108
-#define PAPORT8 109
-#define APORT4_920I 110
-#define APORT8_920I 111
-#define APORT4_920P 112
-#define APORT8_920P 113
-#define APORT2_920P 114
-#define PCXRFE 117 /* end of PCXR family definitions */
-
-#define LINE 82
-#ifdef T1
-#define T1M 83
-#define E1M 84
-#endif
-#define CONC 64
-#define CX 65
-#define EPC 66
-#define MOD 67
-#define PORTS 68
-#define METHOD 69
-#define CUSTOM 70
-#define BASIC 71
-#define STATUS 72
-#define MODEM 73
-/* The following tokens can appear in multiple places */
-#define SPEED 74
-#define NPORTS 75
-#define ID 76
-#define CABLE 77
-#define CONNECT 78
-#define IO 79
-#define MEM 80
-#define DPSZ 81
-
-#define TTYN 90
-#define CU 91
-#define PRINT 92
-#define XPRINT 93
-#define CMAJOR 94
-#define ALTPIN 95
-#define STARTO 96
-#define USEINTR 97
-#define PCIINFO 98
-
-#define TTSIZ 100
-#define CHSIZ 101
-#define BSSIZ 102
-#define UNTSIZ 103
-#define F2SIZ 104
-#define VPSIZ 105
-
-#define TOTAL_BOARD 2
-#define CURRENT_BRD 4
-#define BOARD_TYPE 6
-#define IO_ADDRESS 8
-#define MEM_ADDRESS 10
-
-#define FIELDS_PER_PAGE 18
-
-#define TB_FIELD 1
-#define CB_FIELD 3
-#define BT_FIELD 5
-#define IO_FIELD 7
-#define ID_FIELD 8
-#define ME_FIELD 9
-#define TTY_FIELD 11
-#define CU_FIELD 13
-#define PR_FIELD 15
-#define MPR_FIELD 17
-
-#define MAX_FIELD 512
-
-#define INIT 0
-#define NITEMS 128
-#define MAX_ITEM 512
-
-#define DSCRINST 1
-#define DSCRNUM 3
-#define ALTPINQ 5
-#define SSAVE 7
-
-#define DSCR "32"
-#define ONETONINE "123456789"
-#define ALL "1234567890"
-
-
-struct cnode {
- struct cnode *next;
- int type;
- int numbrd;
-
- union {
- struct {
- char type; /* Board Type */
- short port; /* I/O Address */
- char *portstr; /* I/O Address in string */
- long addr; /* Memory Address */
- char *addrstr; /* Memory Address in string */
- long pcibus; /* PCI BUS */
- char *pcibusstr; /* PCI BUS in string */
- long pcislot; /* PCI SLOT */
- char *pcislotstr; /* PCI SLOT in string */
- char nport; /* Number of Ports */
- char *id; /* tty id */
- int start; /* start of tty counting */
- char *method; /* Install method */
- char v_type;
- char v_port;
- char v_addr;
- char v_pcibus;
- char v_pcislot;
- char v_nport;
- char v_id;
- char v_start;
- char v_method;
- char line1;
- char line2;
- char conc1; /* total concs in line1 */
- char conc2; /* total concs in line2 */
- char module1; /* total modules for line1 */
- char module2; /* total modules for line2 */
- char *status; /* config status */
- char *dimstatus; /* Y/N */
- int status_index; /* field pointer */
- } board;
-
- struct {
- char *cable;
- char v_cable;
- char speed;
- char v_speed;
- } line;
-
- struct {
- char type;
- char *connect;
- char speed;
- char nport;
- char *id;
- char *idstr;
- int start;
- char v_type;
- char v_connect;
- char v_speed;
- char v_nport;
- char v_id;
- char v_start;
- } conc;
-
- struct {
- char type;
- char nport;
- char *id;
- char *idstr;
- int start;
- char v_type;
- char v_nport;
- char v_id;
- char v_start;
- } module;
-
- char *ttyname;
-
- char *cuname;
-
- char *printname;
-
- int majornumber;
-
- int altpin;
-
- int ttysize;
-
- int chsize;
-
- int bssize;
-
- int unsize;
-
- int f2size;
-
- int vpixsize;
-
- int useintr;
- } u;
-};
-
-#endif
diff --git a/drivers/staging/dgap/dgap_downld.h b/drivers/staging/dgap/dgap_downld.h
deleted file mode 100644
index 271ac19257f9..000000000000
--- a/drivers/staging/dgap/dgap_downld.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * $Id: dgap_downld.h,v 1.1 2009/10/23 14:01:57 markh Exp $
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- */
-
-/*
-** downld.h
-** - describes the interface between the user level download process
-** and the concentrator download driver.
-*/
-
-#ifndef _DGAP_DOWNLD_H_
-#define _DGAP_DOWNLD_H_
-
-
-struct fepimg {
- int type; /* board type */
- int len; /* length of image */
- char fepimage[1]; /* beginning of image */
-};
-
-struct downldio {
- unsigned int req_type; /* FEP or concentrator */
- unsigned int bdid; /* opaque board identifier */
- union {
- struct downld_t dl; /* download structure */
- struct fepimg fi; /* fep/bios image structure */
- } image;
-};
-
-#define DIGI_DLREQ_GET (('d'<<8) | 220)
-#define DIGI_DLREQ_SET (('d'<<8) | 221)
-
-#define DIGI_DL_NUKE (('d'<<8) | 222) /* Not really a dl request, but
- dangerous enuff to not put in
- digi.h */
-/* Packed bits of intarg for DIGI_DL_NUKE */
-#define DIGI_NUKE_RESET_ALL (1 << 31)
-#define DIGI_NUKE_INHIBIT_POLLER (1 << 30)
-#define DIGI_NUKE_BRD_NUMB 0x0f
-
-
-
-#define DLREQ_BIOS 0
-#define DLREQ_FEP 1
-#define DLREQ_CONC 2
-#define DLREQ_CONFIG 3
-#define DLREQ_DEVCREATE 4
-
-#endif
diff --git a/drivers/staging/dgap/dgap_driver.c b/drivers/staging/dgap/dgap_driver.c
deleted file mode 100644
index 089d017fc291..000000000000
--- a/drivers/staging/dgap/dgap_driver.c
+++ /dev/null
@@ -1,1030 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
- *
- * This is shared code between Digi's CVS archive and the
- * Linux Kernel sources.
- * Changing the source just for reformatting needlessly breaks
- * our CVS diff history.
- *
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
- *
- * $Id: dgap_driver.c,v 1.3 2011/06/21 10:35:16 markh Exp $
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/delay.h> /* For udelay */
-#include <linux/slab.h>
-#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
-#include <linux/sched.h>
-
-#include "dgap_driver.h"
-#include "dgap_pci.h"
-#include "dgap_fep5.h"
-#include "dgap_tty.h"
-#include "dgap_conf.h"
-#include "dgap_parse.h"
-#include "dgap_trace.h"
-#include "dgap_sysfs.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Digi International, http://www.digi.com");
-MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
-MODULE_SUPPORTED_DEVICE("dgap");
-
-/*
- * insmod command line overrideable parameters
- *
- * NOTE: we use a set of macros to create the variables, which allows
- * us to specify the variable type, name, initial value, and description.
- */
-PARM_INT(debug, 0x00, 0644, "Driver debugging level");
-PARM_INT(rawreadok, 1, 0644, "Bypass flip buffers on input");
-PARM_INT(trcbuf_size, 0x100000, 0644, "Debugging trace buffer size.");
-
-
-/**************************************************************************
- *
- * protos for this file
- *
- */
-
-static int dgap_start(void);
-static void dgap_init_globals(void);
-static int dgap_found_board(struct pci_dev *pdev, int id);
-static void dgap_cleanup_board(struct board_t *brd);
-static void dgap_poll_handler(ulong dummy);
-static int dgap_init_pci(void);
-static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void dgap_remove_one(struct pci_dev *dev);
-static int dgap_probe1(struct pci_dev *pdev, int card_type);
-static void dgap_mbuf(struct board_t *brd, const char *fmt, ...);
-static int dgap_do_remap(struct board_t *brd);
-static irqreturn_t dgap_intr(int irq, void *voidbrd);
-
-/* Driver load/unload functions */
-int dgap_init_module(void);
-void dgap_cleanup_module(void);
-
-module_init(dgap_init_module);
-module_exit(dgap_cleanup_module);
-
-
-/*
- * File operations permitted on Control/Management major.
- */
-static struct file_operations DgapBoardFops =
-{
- .owner = THIS_MODULE,
-};
-
-
-/*
- * Globals
- */
-uint dgap_NumBoards;
-struct board_t *dgap_Board[MAXBOARDS];
-DEFINE_SPINLOCK(dgap_global_lock);
-ulong dgap_poll_counter;
-char *dgap_config_buf;
-int dgap_driver_state = DRIVER_INITIALIZED;
-DEFINE_SPINLOCK(dgap_dl_lock);
-wait_queue_head_t dgap_dl_wait;
-int dgap_dl_action;
-int dgap_poll_tick = 20; /* Poll interval - 20 ms */
-
-/*
- * Static vars.
- */
-static int dgap_Major_Control_Registered = FALSE;
-static uint dgap_driver_start = FALSE;
-
-static struct class * dgap_class;
-
-/*
- * Poller stuff
- */
-static DEFINE_SPINLOCK(dgap_poll_lock); /* Poll scheduling lock */
-static ulong dgap_poll_time; /* Time of next poll */
-static uint dgap_poll_stop; /* Used to tell poller to stop */
-static struct timer_list dgap_poll_timer;
-
-
-static struct pci_device_id dgap_pci_tbl[] = {
- { DIGI_VID, PCI_DEVICE_XEM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { DIGI_VID, PCI_DEVICE_CX_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
- { DIGI_VID, PCI_DEVICE_CX_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
- { DIGI_VID, PCI_DEVICE_EPCJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
- { DIGI_VID, PCI_DEVICE_920_2_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
- { DIGI_VID, PCI_DEVICE_920_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
- { DIGI_VID, PCI_DEVICE_920_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
- { DIGI_VID, PCI_DEVICE_XR_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
- { DIGI_VID, PCI_DEVICE_XRJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
- { DIGI_VID, PCI_DEVICE_XR_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
- { DIGI_VID, PCI_DEVICE_XR_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
- { DIGI_VID, PCI_DEVICE_XR_SAIP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
- { DIGI_VID, PCI_DEVICE_XR_BULL_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
- { DIGI_VID, PCI_DEVICE_920_8_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
- { DIGI_VID, PCI_DEVICE_XEM_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
- {0,} /* 0 terminated list. */
-};
-MODULE_DEVICE_TABLE(pci, dgap_pci_tbl);
-
-
-/*
- * A generic list of Product names, PCI Vendor ID, and PCI Device ID.
- */
-struct board_id {
- uint config_type;
- uchar *name;
- uint maxports;
- uint dpatype;
-};
-
-static struct board_id dgap_Ids[] =
-{
- { PPCM, PCI_DEVICE_XEM_NAME, 64, (T_PCXM | T_PCLITE | T_PCIBUS) },
- { PCX, PCI_DEVICE_CX_NAME, 128, (T_CX | T_PCIBUS) },
- { PCX, PCI_DEVICE_CX_IBM_NAME, 128, (T_CX | T_PCIBUS) },
- { PEPC, PCI_DEVICE_EPCJ_NAME, 224, (T_EPC | T_PCIBUS) },
- { APORT2_920P, PCI_DEVICE_920_2_NAME, 2, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { APORT4_920P, PCI_DEVICE_920_4_NAME, 4, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { APORT8_920P, PCI_DEVICE_920_8_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PAPORT8, PCI_DEVICE_XR_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PAPORT8, PCI_DEVICE_XRJ_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PAPORT8, PCI_DEVICE_XR_422_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PAPORT8, PCI_DEVICE_XR_IBM_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PAPORT8, PCI_DEVICE_XR_SAIP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PAPORT8, PCI_DEVICE_XR_BULL_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { APORT8_920P, PCI_DEVICE_920_8_HP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PPCM, PCI_DEVICE_XEM_HP_NAME, 64, (T_PCXM | T_PCLITE | T_PCIBUS) },
- {0,} /* 0 terminated list. */
-};
-
-static struct pci_driver dgap_driver = {
- .name = "dgap",
- .probe = dgap_init_one,
- .id_table = dgap_pci_tbl,
- .remove = dgap_remove_one,
-};
-
-
-char *dgap_state_text[] = {
- "Board Failed",
- "Configuration for board not found.\n\t\t\tRun mpi to configure board.",
- "Board Found",
- "Need Reset",
- "Finished Reset",
- "Need Config",
- "Finished Config",
- "Need Device Creation",
- "Requested Device Creation",
- "Finished Device Creation",
- "Need BIOS Load",
- "Requested BIOS",
- "Doing BIOS Load",
- "Finished BIOS Load",
- "Need FEP Load",
- "Requested FEP",
- "Doing FEP Load",
- "Finished FEP Load",
- "Requested PROC creation",
- "Finished PROC creation",
- "Board READY",
-};
-
-char *dgap_driver_state_text[] = {
- "Driver Initialized",
- "Driver needs configuration load.",
- "Driver requested configuration from download daemon.",
- "Driver Ready."
-};
-
-
-
-/************************************************************************
- *
- * Driver load/unload functions
- *
- ************************************************************************/
-
-/*
- * init_module()
- *
- * Module load. This is where it all starts.
- */
-int dgap_init_module(void)
-{
- int rc = 0;
-
- APR(("%s, Digi International Part Number %s\n", DG_NAME, DG_PART));
-
- /*
- * Initialize global stuff
- */
- rc = dgap_start();
-
- if (rc < 0) {
- return(rc);
- }
-
- /*
- * Find and configure all the cards
- */
- rc = dgap_init_pci();
-
- /*
- * If something went wrong in the scan, bail out of driver.
- */
- if (rc < 0) {
- /* Only unregister the pci driver if it was actually registered. */
- if (dgap_NumBoards)
- pci_unregister_driver(&dgap_driver);
- else
- printk("WARNING: dgap driver load failed. No DGAP boards found.\n");
-
- dgap_cleanup_module();
- }
- else {
- dgap_create_driver_sysfiles(&dgap_driver);
- }
-
- DPR_INIT(("Finished init_module. Returning %d\n", rc));
- return (rc);
-}
-
-
-/*
- * Start of driver.
- */
-static int dgap_start(void)
-{
- int rc = 0;
- unsigned long flags;
-
- if (dgap_driver_start == FALSE) {
-
- dgap_driver_start = TRUE;
-
- /* make sure that the globals are init'd before we do anything else */
- dgap_init_globals();
-
- dgap_NumBoards = 0;
-
- APR(("For the tools package or updated drivers please visit http://www.digi.com\n"));
-
- /*
- * Register our base character device into the kernel.
- * This allows the download daemon to connect to the downld device
- * before any of the boards are init'ed.
- */
- if (!dgap_Major_Control_Registered) {
- /*
- * Register management/dpa devices
- */
- rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &DgapBoardFops);
- if (rc < 0) {
- APR(("Can't register dgap driver device (%d)\n", rc));
- return (rc);
- }
-
- dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
- device_create(dgap_class, NULL,
- MKDEV(DIGI_DGAP_MAJOR, 0),
- NULL, "dgap_mgmt");
- device_create(dgap_class, NULL,
- MKDEV(DIGI_DGAP_MAJOR, 1),
- NULL, "dgap_downld");
- dgap_Major_Control_Registered = TRUE;
- }
-
- /*
- * Init any global tty stuff.
- */
- rc = dgap_tty_preinit();
-
- if (rc < 0) {
- APR(("tty preinit - not enough memory (%d)\n", rc));
- return(rc);
- }
-
- /* Start the poller */
- DGAP_LOCK(dgap_poll_lock, flags);
- init_timer(&dgap_poll_timer);
- dgap_poll_timer.function = dgap_poll_handler;
- dgap_poll_timer.data = 0;
- dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
- dgap_poll_timer.expires = dgap_poll_time;
- DGAP_UNLOCK(dgap_poll_lock, flags);
-
- add_timer(&dgap_poll_timer);
-
- dgap_driver_state = DRIVER_NEED_CONFIG_LOAD;
- }
-
- return (rc);
-}
-
-
-/*
- * Register pci driver, and return how many boards we have.
- */
-static int dgap_init_pci(void)
-{
- return pci_register_driver(&dgap_driver);
-}
-
-
-/* returns count (>= 0), or negative on error */
-static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int rc;
-
- /* wake up and enable device */
- rc = pci_enable_device(pdev);
-
- if (rc < 0) {
- rc = -EIO;
- } else {
- rc = dgap_probe1(pdev, ent->driver_data);
- if (rc == 0) {
- dgap_NumBoards++;
- DPR_INIT(("Incrementing numboards to %d\n", dgap_NumBoards));
- }
- }
- return rc;
-}
-
-
-static int dgap_probe1(struct pci_dev *pdev, int card_type)
-{
- return dgap_found_board(pdev, card_type);
-}
-
-
-static void dgap_remove_one(struct pci_dev *dev)
-{
- /* Do Nothing */
-}
-
-
-/*
- * dgap_cleanup_module()
- *
- * Module unload. This is where it all ends.
- */
-void dgap_cleanup_module(void)
-{
- int i;
- ulong lock_flags;
-
- DGAP_LOCK(dgap_poll_lock, lock_flags);
- dgap_poll_stop = 1;
- DGAP_UNLOCK(dgap_poll_lock, lock_flags);
-
- /* Turn off poller right away. */
- del_timer_sync( &dgap_poll_timer);
-
- dgap_remove_driver_sysfiles(&dgap_driver);
-
-
- if (dgap_Major_Control_Registered) {
- device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
- device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 1));
- class_destroy(dgap_class);
- unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
- }
-
- kfree(dgap_config_buf);
-
- for (i = 0; i < dgap_NumBoards; ++i) {
- dgap_remove_ports_sysfiles(dgap_Board[i]);
- dgap_tty_uninit(dgap_Board[i]);
- dgap_cleanup_board(dgap_Board[i]);
- }
-
- dgap_tty_post_uninit();
-
-#if defined(DGAP_TRACER)
- /* last thing, make sure we release the tracebuffer */
- dgap_tracer_free();
-#endif
- if (dgap_NumBoards)
- pci_unregister_driver(&dgap_driver);
-}
-
-
-/*
- * dgap_cleanup_board()
- *
- * Free all the memory associated with a board
- */
-static void dgap_cleanup_board(struct board_t *brd)
-{
- int i = 0;
-
- if(!brd || brd->magic != DGAP_BOARD_MAGIC)
- return;
-
- if (brd->intr_used && brd->irq)
- free_irq(brd->irq, brd);
-
- tasklet_kill(&brd->helper_tasklet);
-
- if (brd->re_map_port) {
- release_mem_region(brd->membase + 0x200000, 0x200000);
- iounmap(brd->re_map_port);
- brd->re_map_port = NULL;
- }
-
- if (brd->re_map_membase) {
- release_mem_region(brd->membase, 0x200000);
- iounmap(brd->re_map_membase);
- brd->re_map_membase = NULL;
- }
-
- if (brd->msgbuf_head) {
- unsigned long flags;
-
- DGAP_LOCK(dgap_global_lock, flags);
- brd->msgbuf = NULL;
- printk("%s", brd->msgbuf_head);
- kfree(brd->msgbuf_head);
- brd->msgbuf_head = NULL;
- DGAP_UNLOCK(dgap_global_lock, flags);
- }
-
- /* Free all allocated channels structs */
- for (i = 0; i < MAXPORTS ; i++) {
- if (brd->channels[i]) {
- kfree(brd->channels[i]);
- brd->channels[i] = NULL;
- }
- }
-
- kfree(brd->flipbuf);
- kfree(brd->flipflagbuf);
-
- dgap_Board[brd->boardnum] = NULL;
-
- kfree(brd);
-}
-
-
-/*
- * dgap_found_board()
- *
- * A board has been found, init it.
- */
-static int dgap_found_board(struct pci_dev *pdev, int id)
-{
- struct board_t *brd;
- unsigned int pci_irq;
- int i = 0;
- unsigned long flags;
-
- /* get the board structure and prep it */
- brd = dgap_Board[dgap_NumBoards] =
- (struct board_t *) kzalloc(sizeof(struct board_t), GFP_KERNEL);
- if (!brd) {
- APR(("memory allocation for board structure failed\n"));
- return(-ENOMEM);
- }
-
- /* make a temporary message buffer for the boot messages */
- brd->msgbuf = brd->msgbuf_head =
- (char *) kzalloc(sizeof(char) * 8192, GFP_KERNEL);
- if(!brd->msgbuf) {
- kfree(brd);
- APR(("memory allocation for board msgbuf failed\n"));
- return(-ENOMEM);
- }
-
- /* store the info for the board we've found */
- brd->magic = DGAP_BOARD_MAGIC;
- brd->boardnum = dgap_NumBoards;
- brd->firstminor = 0;
- brd->vendor = dgap_pci_tbl[id].vendor;
- brd->device = dgap_pci_tbl[id].device;
- brd->pdev = pdev;
- brd->pci_bus = pdev->bus->number;
- brd->pci_slot = PCI_SLOT(pdev->devfn);
- brd->name = dgap_Ids[id].name;
- brd->maxports = dgap_Ids[id].maxports;
- brd->type = dgap_Ids[id].config_type;
- brd->dpatype = dgap_Ids[id].dpatype;
- brd->dpastatus = BD_NOFEP;
- init_waitqueue_head(&brd->state_wait);
-
- DGAP_SPINLOCK_INIT(brd->bd_lock);
-
- brd->state = BOARD_FOUND;
- brd->runwait = 0;
- brd->inhibit_poller = FALSE;
- brd->wait_for_bios = 0;
- brd->wait_for_fep = 0;
-
- for (i = 0; i < MAXPORTS; i++) {
- brd->channels[i] = NULL;
- }
-
- /* store which card & revision we have */
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
-
- pci_irq = pdev->irq;
- brd->irq = pci_irq;
-
- /* get the PCI Base Address Registers */
-
- /* Xr Jupiter and EPC use BAR 2 */
- if (brd->device == PCI_DEVICE_XRJ_DID || brd->device == PCI_DEVICE_EPCJ_DID) {
- brd->membase = pci_resource_start(pdev, 2);
- brd->membase_end = pci_resource_end(pdev, 2);
- }
- /* Everyone else uses BAR 0 */
- else {
- brd->membase = pci_resource_start(pdev, 0);
- brd->membase_end = pci_resource_end(pdev, 0);
- }
-
- if (!brd->membase) {
- APR(("card has no PCI IO resources, failing board.\n"));
- return -ENODEV;
- }
-
- if (brd->membase & 1)
- brd->membase &= ~3;
- else
- brd->membase &= ~15;
-
- /*
- * On the PCI boards, there is no IO space allocated
- * The I/O registers will be in the first 3 bytes of the
- * upper 2MB of the 4MB memory space. The board memory
- * will be mapped into the low 2MB of the 4MB memory space
- */
- brd->port = brd->membase + PCI_IO_OFFSET;
- brd->port_end = brd->port + PCI_IO_SIZE;
-
-
- /*
- * Special initialization for non-PLX boards
- */
- if (brd->device != PCI_DEVICE_XRJ_DID && brd->device != PCI_DEVICE_EPCJ_DID) {
- unsigned short cmd;
-
- pci_write_config_byte(pdev, 0x40, 0);
- pci_write_config_byte(pdev, 0x46, 0);
-
- /* Limit burst length to 2 doubleword transactions */
- pci_write_config_byte(pdev, 0x42, 1);
-
- /*
- * Enable IO and mem if not already done.
- * This was needed for support on Itanium.
- */
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_write_config_word(pdev, PCI_COMMAND, cmd);
- }
-
- /* init our poll helper tasklet */
- tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet, (unsigned long) brd);
-
- /* Log the information about the board */
- dgap_mbuf(brd, DRVSTR": board %d: %s (rev %d), irq %d\n",
- dgap_NumBoards, brd->name, brd->rev, brd->irq);
-
- DPR_INIT(("dgap_scan(%d) - printing out the msgbuf\n", i));
- DGAP_LOCK(dgap_global_lock, flags);
- brd->msgbuf = NULL;
- printk("%s", brd->msgbuf_head);
- kfree(brd->msgbuf_head);
- brd->msgbuf_head = NULL;
- DGAP_UNLOCK(dgap_global_lock, flags);
-
- i = dgap_do_remap(brd);
- if (i)
- brd->state = BOARD_FAILED;
- else
- brd->state = NEED_RESET;
-
- return(0);
-}
-
-
-int dgap_finalize_board_init(struct board_t *brd) {
-
- int rc;
-
- DPR_INIT(("dgap_finalize_board_init() - start\n"));
-
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return(-ENODEV);
-
- DPR_INIT(("dgap_finalize_board_init() - start #2\n"));
-
- brd->use_interrupts = dgap_config_get_useintr(brd);
-
- /*
- * Set up our interrupt handler if we are set to do interrupts.
- */
- if (brd->use_interrupts && brd->irq) {
-
- rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
-
- if (rc) {
- dgap_mbuf(brd, DRVSTR": Failed to hook IRQ %d. Board will work in poll mode.\n",
- brd->irq);
- brd->intr_used = 0;
- }
- else
- brd->intr_used = 1;
- } else {
- brd->intr_used = 0;
- }
-
- return(0);
-}
-
-
-/*
- * Remap PCI memory.
- */
-static int dgap_do_remap(struct board_t *brd)
-{
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return -ENXIO;
-
- if (!request_mem_region(brd->membase, 0x200000, "dgap")) {
- APR(("dgap: mem_region %lx already in use.\n", brd->membase));
- return -ENOMEM;
- }
-
- if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000, "dgap")) {
- APR(("dgap: mem_region IO %lx already in use.\n",
- brd->membase + PCI_IO_OFFSET));
- release_mem_region(brd->membase, 0x200000);
- return -ENOMEM;
- }
-
- brd->re_map_membase = ioremap(brd->membase, 0x200000);
- if (!brd->re_map_membase) {
- APR(("dgap: ioremap mem %lx cannot be mapped.\n", brd->membase));
- release_mem_region(brd->membase, 0x200000);
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- return -ENOMEM;
- }
-
- brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
- if (!brd->re_map_port) {
- release_mem_region(brd->membase, 0x200000);
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- iounmap(brd->re_map_membase);
- APR(("dgap: ioremap IO mem %lx cannot be mapped.\n",
- brd->membase + PCI_IO_OFFSET));
- return -ENOMEM;
- }
-
- DPR_INIT(("remapped io: 0x%p remapped mem: 0x%p\n",
- brd->re_map_port, brd->re_map_membase));
- return 0;
-}
-
-
-/*****************************************************************************
-*
-* Function:
-*
-* dgap_poll_handler
-*
-* Author:
-*
-* Scott H Kilau
-*
-* Parameters:
-*
-* dummy -- ignored
-*
-* Return Values:
-*
-* none
-*
-* Description:
-*
-* As each timer expires, it determines (a) whether the "transmit"
-* waiter needs to be woken up, and (b) whether the poller needs to
-* be rescheduled.
-*
-******************************************************************************/
-
-static void dgap_poll_handler(ulong dummy)
-{
- int i;
- struct board_t *brd;
- unsigned long lock_flags;
- unsigned long lock_flags2;
- ulong new_time;
-
- dgap_poll_counter++;
-
-
- /*
- * If driver needs the config file still,
- * keep trying to wake up the downloader to
- * send us the file.
- */
- if (dgap_driver_state == DRIVER_NEED_CONFIG_LOAD) {
- /*
- * Signal downloader, its got some work to do.
- */
- DGAP_LOCK(dgap_dl_lock, lock_flags2);
- if (dgap_dl_action != 1) {
- dgap_dl_action = 1;
- wake_up_interruptible(&dgap_dl_wait);
- }
- DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
- goto schedule_poller;
- }
- /*
- * Do not start the board state machine until
- * driver tells us its up and running, and has
- * everything it needs.
- */
- else if (dgap_driver_state != DRIVER_READY) {
- goto schedule_poller;
- }
-
- /*
- * If we have just 1 board, or the system is not SMP,
- * then use the typical old style poller.
- * Otherwise, use our new tasklet based poller, which should
- * speed things up for multiple boards.
- */
- if ( (dgap_NumBoards == 1) || (num_online_cpus() <= 1) ) {
- for (i = 0; i < dgap_NumBoards; i++) {
-
- brd = dgap_Board[i];
-
- if (brd->state == BOARD_FAILED) {
- continue;
- }
- if (!brd->intr_running) {
- /* Call the real board poller directly */
- dgap_poll_tasklet((unsigned long) brd);
- }
- }
- }
- else {
- /* Go thru each board, kicking off a tasklet for each if needed */
- for (i = 0; i < dgap_NumBoards; i++) {
- brd = dgap_Board[i];
-
- /*
- * Attempt to grab the board lock.
- *
- * If we can't get it, no big deal, the next poll will get it.
- * Basically, I just really don't want to spin in here, because I want
- * to kick off my tasklets as fast as I can, and then get out the poller.
- */
- if (!spin_trylock(&brd->bd_lock)) {
- continue;
- }
-
- /* If board is in a failed state, don't bother scheduling a tasklet */
- if (brd->state == BOARD_FAILED) {
- spin_unlock(&brd->bd_lock);
- continue;
- }
-
- /* Schedule a poll helper task */
- if (!brd->intr_running) {
- tasklet_schedule(&brd->helper_tasklet);
- }
-
- /*
- * Can't do DGAP_UNLOCK here, as we don't have
- * lock_flags because we did a trylock above.
- */
- spin_unlock(&brd->bd_lock);
- }
- }
-
-schedule_poller:
-
- /*
- * Schedule ourself back at the nominal wakeup interval.
- */
- DGAP_LOCK(dgap_poll_lock, lock_flags );
- dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
-
- new_time = dgap_poll_time - jiffies;
-
- if ((ulong) new_time >= 2 * dgap_poll_tick) {
- dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
- }
-
- dgap_poll_timer.function = dgap_poll_handler;
- dgap_poll_timer.data = 0;
- dgap_poll_timer.expires = dgap_poll_time;
- DGAP_UNLOCK(dgap_poll_lock, lock_flags );
-
- if (!dgap_poll_stop)
- add_timer(&dgap_poll_timer);
-}
-
-
-
-
-/*
- * dgap_intr()
- *
- * Driver interrupt handler.
- */
-static irqreturn_t dgap_intr(int irq, void *voidbrd)
-{
- struct board_t *brd = (struct board_t *) voidbrd;
-
- if (!brd) {
- APR(("Received interrupt (%d) with null board associated\n", irq));
- return IRQ_NONE;
- }
-
- /*
- * Check to make sure its for us.
- */
- if (brd->magic != DGAP_BOARD_MAGIC) {
- APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq));
- return IRQ_NONE;
- }
-
- brd->intr_count++;
-
- /*
- * Schedule tasklet to run at a better time.
- */
- tasklet_schedule(&brd->helper_tasklet);
- return IRQ_HANDLED;
-}
-
-
-/*
- * dgap_init_globals()
- *
- * This is where we initialize the globals from the static insmod
- * configuration variables. These are declared near the head of
- * this file.
- */
-static void dgap_init_globals(void)
-{
- int i = 0;
-
- dgap_rawreadok = rawreadok;
- dgap_trcbuf_size = trcbuf_size;
- dgap_debug = debug;
-
- for (i = 0; i < MAXBOARDS; i++) {
- dgap_Board[i] = NULL;
- }
-
- init_timer( &dgap_poll_timer );
-
- init_waitqueue_head(&dgap_dl_wait);
- dgap_dl_action = 0;
-}
-
-
-/************************************************************************
- *
- * Utility functions
- *
- ************************************************************************/
-
-
-/*
- * dgap_mbuf()
- *
- * Used to print to the message buffer during board init.
- */
-static void dgap_mbuf(struct board_t *brd, const char *fmt, ...) {
- va_list ap;
- char buf[1024];
- int i;
- unsigned long flags;
- size_t length;
-
- DGAP_LOCK(dgap_global_lock, flags);
-
- /* Format buf using fmt and arguments contained in ap. */
- va_start(ap, fmt);
- i = vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
-
- DPR((buf));
-
- if (!brd || !brd->msgbuf) {
- printk("%s", buf);
- DGAP_UNLOCK(dgap_global_lock, flags);
- return;
- }
-
- length = strlen(buf) + 1;
- if (brd->msgbuf - brd->msgbuf_head < length)
- length = brd->msgbuf - brd->msgbuf_head;
- memcpy(brd->msgbuf, buf, length);
- brd->msgbuf += length;
-
- DGAP_UNLOCK(dgap_global_lock, flags);
-}
-
-
-/*
- * dgap_ms_sleep()
- *
- * Put the driver to sleep for x ms's
- *
- * Returns 0 if timed out, !0 (showing signal) if interrupted by a signal.
- */
-int dgap_ms_sleep(ulong ms)
-{
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout((ms * HZ) / 1000);
- return (signal_pending(current));
-}
-
-
-
-/*
- * dgap_ioctl_name() : Returns a text version of each ioctl value.
- */
-char *dgap_ioctl_name(int cmd)
-{
- switch(cmd) {
-
- case TCGETA: return("TCGETA");
- case TCGETS: return("TCGETS");
- case TCSETA: return("TCSETA");
- case TCSETS: return("TCSETS");
- case TCSETAW: return("TCSETAW");
- case TCSETSW: return("TCSETSW");
- case TCSETAF: return("TCSETAF");
- case TCSETSF: return("TCSETSF");
- case TCSBRK: return("TCSBRK");
- case TCXONC: return("TCXONC");
- case TCFLSH: return("TCFLSH");
- case TIOCGSID: return("TIOCGSID");
-
- case TIOCGETD: return("TIOCGETD");
- case TIOCSETD: return("TIOCSETD");
- case TIOCGWINSZ: return("TIOCGWINSZ");
- case TIOCSWINSZ: return("TIOCSWINSZ");
-
- case TIOCMGET: return("TIOCMGET");
- case TIOCMSET: return("TIOCMSET");
- case TIOCMBIS: return("TIOCMBIS");
- case TIOCMBIC: return("TIOCMBIC");
-
- /* from digi.h */
- case DIGI_SETA: return("DIGI_SETA");
- case DIGI_SETAW: return("DIGI_SETAW");
- case DIGI_SETAF: return("DIGI_SETAF");
- case DIGI_SETFLOW: return("DIGI_SETFLOW");
- case DIGI_SETAFLOW: return("DIGI_SETAFLOW");
- case DIGI_GETFLOW: return("DIGI_GETFLOW");
- case DIGI_GETAFLOW: return("DIGI_GETAFLOW");
- case DIGI_GETA: return("DIGI_GETA");
- case DIGI_GEDELAY: return("DIGI_GEDELAY");
- case DIGI_SEDELAY: return("DIGI_SEDELAY");
- case DIGI_GETCUSTOMBAUD: return("DIGI_GETCUSTOMBAUD");
- case DIGI_SETCUSTOMBAUD: return("DIGI_SETCUSTOMBAUD");
- case TIOCMODG: return("TIOCMODG");
- case TIOCMODS: return("TIOCMODS");
- case TIOCSDTR: return("TIOCSDTR");
- case TIOCCDTR: return("TIOCCDTR");
-
- default: return("unknown");
- }
-}
diff --git a/drivers/staging/dgap/dgap_driver.h b/drivers/staging/dgap/dgap_driver.h
deleted file mode 100644
index 2f7a55a7e40d..000000000000
--- a/drivers/staging/dgap/dgap_driver.h
+++ /dev/null
@@ -1,620 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- *************************************************************************
- *
- * Driver includes
- *
- *************************************************************************/
-
-#ifndef __DGAP_DRIVER_H
-#define __DGAP_DRIVER_H
-
-#include <linux/version.h> /* To get the current Linux version */
-#include <linux/types.h> /* To pick up the varions Linux types */
-#include <linux/tty.h> /* To pick up the various tty structs/defines */
-#include <linux/interrupt.h> /* For irqreturn_t type */
-
-#include "dgap_types.h" /* Additional types needed by the Digi header files */
-#include "digi.h" /* Digi specific ioctl header */
-#include "dgap_kcompat.h" /* Kernel 2.4/2.6 compat includes */
-#include "dgap_sysfs.h" /* Support for SYSFS */
-
-/*************************************************************************
- *
- * Driver defines
- *
- *************************************************************************/
-
-/*
- * Driver identification, error and debugging statments
- *
- * In theory, you can change all occurrences of "digi" in the next
- * three lines, and the driver printk's will all automagically change.
- *
- * APR((fmt, args, ...)); Always prints message
- * DPR((fmt, args, ...)); Only prints if DGAP_TRACER is defined at
- * compile time and dgap_debug!=0
- */
-#define DG_NAME "dgap-1.3-16"
-#define DG_PART "40002347_C"
-
-#define PROCSTR "dgap" /* /proc entries */
-#define DEVSTR "/dev/dg/dgap" /* /dev entries */
-#define DRVSTR "dgap" /* Driver name string
- * displayed by APR */
-#define APR(args) do { PRINTF_TO_KMEM(args); printk(DRVSTR": "); printk args; \
- } while (0)
-#define RAPR(args) do { PRINTF_TO_KMEM(args); printk args; } while (0)
-
-#define TRC_TO_CONSOLE 1
-
-/*
- * Debugging levels can be set using debug insmod variable
- * They can also be compiled out completely.
- */
-
-#define DBG_INIT (dgap_debug & 0x01)
-#define DBG_BASIC (dgap_debug & 0x02)
-#define DBG_CORE (dgap_debug & 0x04)
-
-#define DBG_OPEN (dgap_debug & 0x08)
-#define DBG_CLOSE (dgap_debug & 0x10)
-#define DBG_READ (dgap_debug & 0x20)
-#define DBG_WRITE (dgap_debug & 0x40)
-
-#define DBG_IOCTL (dgap_debug & 0x80)
-
-#define DBG_PROC (dgap_debug & 0x100)
-#define DBG_PARAM (dgap_debug & 0x200)
-#define DBG_PSCAN (dgap_debug & 0x400)
-#define DBG_EVENT (dgap_debug & 0x800)
-
-#define DBG_DRAIN (dgap_debug & 0x1000)
-#define DBG_CARR (dgap_debug & 0x2000)
-
-#define DBG_MGMT (dgap_debug & 0x4000)
-
-
-#if defined(DGAP_TRACER)
-
-# if defined(TRC_TO_KMEM)
-/* Choose one: */
-# define TRC_ON_OVERFLOW_WRAP_AROUND
-# undef TRC_ON_OVERFLOW_SHIFT_BUFFER
-# endif //TRC_TO_KMEM
-
-# define TRC_MAXMSG 1024
-# define TRC_OVERFLOW "(OVERFLOW)"
-# define TRC_DTRC "/usr/bin/dtrc"
-
-#if defined TRC_TO_CONSOLE
-#define PRINTF_TO_CONSOLE(args) { printk(DRVSTR": "); printk args; }
-#else //!defined TRACE_TO_CONSOLE
-#define PRINTF_TO_CONSOLE(args)
-#endif
-
-#if defined TRC_TO_KMEM
-#define PRINTF_TO_KMEM(args) dgap_tracef args
-#else //!defined TRC_TO_KMEM
-#define PRINTF_TO_KMEM(args)
-#endif
-
-#define TRC(args) { PRINTF_TO_KMEM(args); PRINTF_TO_CONSOLE(args) }
-
-# define DPR_INIT(ARGS) if (DBG_INIT) TRC(ARGS)
-# define DPR_BASIC(ARGS) if (DBG_BASIC) TRC(ARGS)
-# define DPR_CORE(ARGS) if (DBG_CORE) TRC(ARGS)
-# define DPR_OPEN(ARGS) if (DBG_OPEN) TRC(ARGS)
-# define DPR_CLOSE(ARGS) if (DBG_CLOSE) TRC(ARGS)
-# define DPR_READ(ARGS) if (DBG_READ) TRC(ARGS)
-# define DPR_WRITE(ARGS) if (DBG_WRITE) TRC(ARGS)
-# define DPR_IOCTL(ARGS) if (DBG_IOCTL) TRC(ARGS)
-# define DPR_PROC(ARGS) if (DBG_PROC) TRC(ARGS)
-# define DPR_PARAM(ARGS) if (DBG_PARAM) TRC(ARGS)
-# define DPR_PSCAN(ARGS) if (DBG_PSCAN) TRC(ARGS)
-# define DPR_EVENT(ARGS) if (DBG_EVENT) TRC(ARGS)
-# define DPR_DRAIN(ARGS) if (DBG_DRAIN) TRC(ARGS)
-# define DPR_CARR(ARGS) if (DBG_CARR) TRC(ARGS)
-# define DPR_MGMT(ARGS) if (DBG_MGMT) TRC(ARGS)
-
-# define DPR(ARGS) if (dgap_debug) TRC(ARGS)
-# define P(X) dgap_tracef(#X "=%p\n", X)
-# define X(X) dgap_tracef(#X "=%x\n", X)
-
-#else//!defined DGAP_TRACER
-
-#define PRINTF_TO_KMEM(args)
-# define TRC(ARGS)
-# define DPR_INIT(ARGS)
-# define DPR_BASIC(ARGS)
-# define DPR_CORE(ARGS)
-# define DPR_OPEN(ARGS)
-# define DPR_CLOSE(ARGS)
-# define DPR_READ(ARGS)
-# define DPR_WRITE(ARGS)
-# define DPR_IOCTL(ARGS)
-# define DPR_PROC(ARGS)
-# define DPR_PARAM(ARGS)
-# define DPR_PSCAN(ARGS)
-# define DPR_EVENT(ARGS)
-# define DPR_DRAIN(ARGS)
-# define DPR_CARR(ARGS)
-# define DPR_MGMT(ARGS)
-
-# define DPR(args)
-
-#endif//DGAP_TRACER
-
-/* Number of boards we support at once. */
-#define MAXBOARDS 32
-#define MAXPORTS 224
-#define MAXTTYNAMELEN 200
-
-/* Our 3 magic numbers for our board, channel and unit structs */
-#define DGAP_BOARD_MAGIC 0x5c6df104
-#define DGAP_CHANNEL_MAGIC 0x6c6df104
-#define DGAP_UNIT_MAGIC 0x7c6df104
-
-/* Serial port types */
-#define DGAP_SERIAL 0
-#define DGAP_PRINT 1
-
-#define SERIAL_TYPE_NORMAL 1
-
-/* 4 extra for alignment play space */
-#define WRITEBUFLEN ((4096) + 4)
-#define MYFLIPLEN N_TTY_BUF_SIZE
-
-#define SBREAK_TIME 0x25
-#define U2BSIZE 0x400
-
-#define dgap_jiffies_from_ms(a) (((a) * HZ) / 1000)
-
-/*
- * Our major for the mgmt devices.
- *
- * We can use 22, because Digi was allocated 22 and 23 for the epca driver.
- * 22 has now become obsolete now that the "cu" devices have
- * been removed from 2.6.
- * Also, this *IS* the epca driver, just PCI only now.
- */
-#ifndef DIGI_DGAP_MAJOR
-# define DIGI_DGAP_MAJOR 22
-#endif
-
-/*
- * The parameters we use to define the periods of the moving averages.
- */
-#define MA_PERIOD (HZ / 10)
-#define SMA_DUR (1 * HZ)
-#define EMA_DUR (1 * HZ)
-#define SMA_NPERIODS (SMA_DUR / MA_PERIOD)
-#define EMA_NPERIODS (EMA_DUR / MA_PERIOD)
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially. This is the same structure that is defined
- * as the default in tty_io.c with the same settings overriden as in serial.c
- *
- * In short, this should match the internal serial ports' defaults.
- */
-#define DEFAULT_IFLAGS (ICRNL | IXON)
-#define DEFAULT_OFLAGS (OPOST | ONLCR)
-#define DEFAULT_CFLAGS (B9600 | CS8 | CREAD | HUPCL | CLOCAL)
-#define DEFAULT_LFLAGS (ISIG | ICANON | ECHO | ECHOE | ECHOK | \
- ECHOCTL | ECHOKE | IEXTEN)
-
-#ifndef _POSIX_VDISABLE
-#define _POSIX_VDISABLE '\0'
-#endif
-
-#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */
-#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */
-
-#define VPDSIZE (512)
-
-/*
- * Lock function/defines.
- * Makes spotting lock/unlock locations easier.
- */
-# define DGAP_SPINLOCK_INIT(x) spin_lock_init(&(x))
-# define DGAP_LOCK(x,y) spin_lock_irqsave(&(x), y)
-# define DGAP_UNLOCK(x,y) spin_unlock_irqrestore(&(x), y)
-# define DGAP_TRYLOCK(x,y) spin_trylock(&(x))
-
-/*
- * All the possible states the driver can be while being loaded.
- */
-enum {
- DRIVER_INITIALIZED = 0,
- DRIVER_NEED_CONFIG_LOAD,
- DRIVER_REQUESTED_CONFIG,
- DRIVER_READY
-};
-
-/*
- * All the possible states the board can be while booting up.
- */
-enum {
- BOARD_FAILED = 0,
- CONFIG_NOT_FOUND,
- BOARD_FOUND,
- NEED_RESET,
- FINISHED_RESET,
- NEED_CONFIG,
- FINISHED_CONFIG,
- NEED_DEVICE_CREATION,
- REQUESTED_DEVICE_CREATION,
- FINISHED_DEVICE_CREATION,
- NEED_BIOS_LOAD,
- REQUESTED_BIOS,
- WAIT_BIOS_LOAD,
- FINISHED_BIOS_LOAD,
- NEED_FEP_LOAD,
- REQUESTED_FEP,
- WAIT_FEP_LOAD,
- FINISHED_FEP_LOAD,
- NEED_PROC_CREATION,
- FINISHED_PROC_CREATION,
- BOARD_READY
-};
-
-/*
- * All the possible states that a requested concentrator image can be in.
- */
-enum {
- NO_PENDING_CONCENTRATOR_REQUESTS = 0,
- NEED_CONCENTRATOR,
- REQUESTED_CONCENTRATOR
-};
-
-extern char *dgap_state_text[];
-extern char *dgap_driver_state_text[];
-
-
-/*
- * Modem line constants are defined as macros because DSR and
- * DCD are swapable using the ditty altpin option.
- */
-#define D_CD(ch) ch->ch_cd /* Carrier detect */
-#define D_DSR(ch) ch->ch_dsr /* Data set ready */
-#define D_RTS(ch) DM_RTS /* Request to send */
-#define D_CTS(ch) DM_CTS /* Clear to send */
-#define D_RI(ch) DM_RI /* Ring indicator */
-#define D_DTR(ch) DM_DTR /* Data terminal ready */
-
-
-/*************************************************************************
- *
- * Structures and closely related defines.
- *
- *************************************************************************/
-
-
-/*
- * A structure to hold a statistics counter. We also
- * compute moving averages for this counter.
- */
-struct macounter
-{
- u32 cnt; /* Total count */
- ulong accum; /* Acuumulator per period */
- ulong sma; /* Simple moving average */
- ulong ema; /* Exponential moving average */
-};
-
-
-/************************************************************************
- * Device flag definitions for bd_flags.
- ************************************************************************/
-#define BD_FEP5PLUS 0x0001 /* Supports FEP5 Plus commands */
-#define BD_HAS_VPD 0x0002 /* Board has VPD info available */
-
-
-/*
- * Per-board information
- */
-struct board_t
-{
- int magic; /* Board Magic number. */
- int boardnum; /* Board number: 0-3 */
- int firstminor; /* First minor, e.g. 0, 30, 60 */
-
- int type; /* Type of board */
- char *name; /* Product Name */
- struct pci_dev *pdev; /* Pointer to the pci_dev struct */
- u16 vendor; /* PCI vendor ID */
- u16 device; /* PCI device ID */
- u16 subvendor; /* PCI subsystem vendor ID */
- u16 subdevice; /* PCI subsystem device ID */
- uchar rev; /* PCI revision ID */
- uint pci_bus; /* PCI bus value */
- uint pci_slot; /* PCI slot value */
- u16 maxports; /* MAX ports this board can handle */
- uchar vpd[VPDSIZE]; /* VPD of board, if found */
- u32 bd_flags; /* Board flags */
-
- spinlock_t bd_lock; /* Used to protect board */
-
- u32 state; /* State of card. */
- wait_queue_head_t state_wait; /* Place to sleep on for state change */
-
- struct tasklet_struct helper_tasklet; /* Poll helper tasklet */
-
- u32 wait_for_bios;
- u32 wait_for_fep;
-
- struct cnode * bd_config; /* Config of board */
-
- u16 nasync; /* Number of ports on card */
-
- u32 use_interrupts; /* Should we be interrupt driven? */
- ulong irq; /* Interrupt request number */
- ulong intr_count; /* Count of interrupts */
- u32 intr_used; /* Non-zero if using interrupts */
- u32 intr_running; /* Non-zero if FEP knows its doing interrupts */
-
- ulong port; /* Start of base io port of the card */
- ulong port_end; /* End of base io port of the card */
- ulong membase; /* Start of base memory of the card */
- ulong membase_end; /* End of base memory of the card */
-
- uchar *re_map_port; /* Remapped io port of the card */
- uchar *re_map_membase;/* Remapped memory of the card */
-
- uchar runwait; /* # Processes waiting for FEP */
- uchar inhibit_poller; /* Tells the poller to leave us alone */
-
- struct channel_t *channels[MAXPORTS]; /* array of pointers to our channels. */
-
- struct tty_driver *SerialDriver;
- char SerialName[200];
- struct tty_driver *PrintDriver;
- char PrintName[200];
-
- u32 dgap_Major_Serial_Registered;
- u32 dgap_Major_TransparentPrint_Registered;
-
- u32 dgap_Serial_Major;
- u32 dgap_TransparentPrint_Major;
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
- u32 TtyRefCnt;
-#endif
-
- struct bs_t *bd_bs; /* Base structure pointer */
-
- char *flipbuf; /* Our flip buffer, alloced if board is found */
- char *flipflagbuf; /* Our flip flag buffer, alloced if board is found */
-
- u16 dpatype; /* The board "type", as defined by DPA */
- u16 dpastatus; /* The board "status", as defined by DPA */
- wait_queue_head_t kme_wait; /* Needed for DPA support */
-
- u32 conc_dl_status; /* Status of any pending conc download */
- /*
- * Mgmt data.
- */
- char *msgbuf_head;
- char *msgbuf;
-};
-
-
-
-/************************************************************************
- * Unit flag definitions for un_flags.
- ************************************************************************/
-#define UN_ISOPEN 0x0001 /* Device is open */
-#define UN_CLOSING 0x0002 /* Line is being closed */
-#define UN_IMM 0x0004 /* Service immediately */
-#define UN_BUSY 0x0008 /* Some work this channel */
-#define UN_BREAKI 0x0010 /* Input break received */
-#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
-#define UN_TIME 0x0040 /* Waiting on time */
-#define UN_EMPTY 0x0080 /* Waiting output queue empty */
-#define UN_LOW 0x0100 /* Waiting output low water mark*/
-#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
-#define UN_WOPEN 0x0400 /* Device waiting for open */
-#define UN_WIOCTL 0x0800 /* Device waiting for open */
-#define UN_HANGUP 0x8000 /* Carrier lost */
-
-struct device;
-
-/************************************************************************
- * Structure for terminal or printer unit.
- ************************************************************************/
-struct un_t {
- int magic; /* Unit Magic Number. */
- struct channel_t *un_ch;
- u32 un_time;
- u32 un_type;
- u32 un_open_count; /* Counter of opens to port */
- struct tty_struct *un_tty;/* Pointer to unit tty structure */
- u32 un_flags; /* Unit flags */
- wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
- u32 un_dev; /* Minor device number */
- tcflag_t un_oflag; /* oflags being done on board */
- tcflag_t un_lflag; /* lflags being done on board */
- struct device *un_sysfs;
-};
-
-
-/************************************************************************
- * Device flag definitions for ch_flags.
- ************************************************************************/
-#define CH_PRON 0x0001 /* Printer on string */
-#define CH_OUT 0x0002 /* Dial-out device open */
-#define CH_STOP 0x0004 /* Output is stopped */
-#define CH_STOPI 0x0008 /* Input is stopped */
-#define CH_CD 0x0010 /* Carrier is present */
-#define CH_FCAR 0x0020 /* Carrier forced on */
-
-#define CH_RXBLOCK 0x0080 /* Enable rx blocked flag */
-#define CH_WLOW 0x0100 /* Term waiting low event */
-#define CH_WEMPTY 0x0200 /* Term waiting empty event */
-#define CH_RENABLE 0x0400 /* Buffer just emptied */
-#define CH_RACTIVE 0x0800 /* Process active in xxread() */
-#define CH_RWAIT 0x1000 /* Process waiting in xxread() */
-#define CH_BAUD0 0x2000 /* Used for checking B0 transitions */
-#define CH_HANGUP 0x8000 /* Hangup received */
-
-/*
- * Definitions for ch_sniff_flags
- */
-#define SNIFF_OPEN 0x1
-#define SNIFF_WAIT_DATA 0x2
-#define SNIFF_WAIT_SPACE 0x4
-
-
-/************************************************************************
- * Channel information structure.
- ************************************************************************/
-struct channel_t {
- int magic; /* Channel Magic Number */
- struct bs_t *ch_bs; /* Base structure pointer */
- struct cm_t *ch_cm; /* Command queue pointer */
- struct board_t *ch_bd; /* Board structure pointer */
- unsigned char *ch_vaddr; /* FEP memory origin */
- unsigned char *ch_taddr; /* Write buffer origin */
- unsigned char *ch_raddr; /* Read buffer origin */
- struct digi_t ch_digi; /* Transparent Print structure */
- struct un_t ch_tun; /* Terminal unit info */
- struct un_t ch_pun; /* Printer unit info */
-
- spinlock_t ch_lock; /* provide for serialization */
- wait_queue_head_t ch_flags_wait;
-
- u32 pscan_state;
- uchar pscan_savechar;
-
- u32 ch_portnum; /* Port number, 0 offset. */
- u32 ch_open_count; /* open count */
- u32 ch_flags; /* Channel flags */
-
-
- u32 ch_close_delay; /* How long we should drop RTS/DTR for */
-
- u32 ch_cpstime; /* Time for CPS calculations */
-
- tcflag_t ch_c_iflag; /* channel iflags */
- tcflag_t ch_c_cflag; /* channel cflags */
- tcflag_t ch_c_oflag; /* channel oflags */
- tcflag_t ch_c_lflag; /* channel lflags */
-
- u16 ch_fepiflag; /* FEP tty iflags */
- u16 ch_fepcflag; /* FEP tty cflags */
- u16 ch_fepoflag; /* FEP tty oflags */
- u16 ch_wopen; /* Waiting for open process cnt */
- u16 ch_tstart; /* Transmit buffer start */
- u16 ch_tsize; /* Transmit buffer size */
- u16 ch_rstart; /* Receive buffer start */
- u16 ch_rsize; /* Receive buffer size */
- u16 ch_rdelay; /* Receive delay time */
-
- u16 ch_tlw; /* Our currently set low water mark */
-
- u16 ch_cook; /* Output character mask */
-
- uchar ch_card; /* Card channel is on */
- uchar ch_stopc; /* Stop character */
- uchar ch_startc; /* Start character */
-
- uchar ch_mostat; /* FEP output modem status */
- uchar ch_mistat; /* FEP input modem status */
- uchar ch_mforce; /* Modem values to be forced */
- uchar ch_mval; /* Force values */
- uchar ch_fepstopc; /* FEP stop character */
- uchar ch_fepstartc; /* FEP start character */
-
- uchar ch_astopc; /* Auxiliary Stop character */
- uchar ch_astartc; /* Auxiliary Start character */
- uchar ch_fepastopc; /* Auxiliary FEP stop char */
- uchar ch_fepastartc; /* Auxiliary FEP start char */
-
- uchar ch_hflow; /* FEP hardware handshake */
- uchar ch_dsr; /* stores real dsr value */
- uchar ch_cd; /* stores real cd value */
- uchar ch_tx_win; /* channel tx buffer window */
- uchar ch_rx_win; /* channel rx buffer window */
- uint ch_custom_speed; /* Custom baud, if set */
- uint ch_baud_info; /* Current baud info for /proc output */
- ulong ch_rxcount; /* total of data received so far */
- ulong ch_txcount; /* total of data transmitted so far */
- ulong ch_err_parity; /* Count of parity errors on channel */
- ulong ch_err_frame; /* Count of framing errors on channel */
- ulong ch_err_break; /* Count of breaks on channel */
- ulong ch_err_overrun; /* Count of overruns on channel */
-
- uint ch_sniff_in;
- uint ch_sniff_out;
- char *ch_sniff_buf; /* Sniff buffer for proc */
- ulong ch_sniff_flags; /* Channel flags */
- wait_queue_head_t ch_sniff_wait;
-};
-
-
-/*************************************************************************
- *
- * Prototypes for non-static functions used in more than one module
- *
- *************************************************************************/
-
-extern int dgap_ms_sleep(ulong ms);
-extern char *dgap_ioctl_name(int cmd);
-extern void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len);
-extern void dgap_do_fep_load(struct board_t *brd, uchar __user *ufep, int len);
-extern void dgap_do_conc_load(struct board_t *brd, uchar *uaddr, int len);
-extern void dgap_do_config_load(uchar __user *uaddr, int len);
-extern int dgap_after_config_loaded(void);
-extern int dgap_finalize_board_init(struct board_t *brd);
-
-/*
- * Our Global Variables.
- */
-extern int dgap_driver_state; /* The state of the driver */
-extern int dgap_debug; /* Debug variable */
-extern int dgap_rawreadok; /* Set if user wants rawreads */
-extern int dgap_poll_tick; /* Poll interval - 20 ms */
-extern spinlock_t dgap_global_lock; /* Driver global spinlock */
-extern uint dgap_NumBoards; /* Total number of boards */
-extern struct board_t *dgap_Board[MAXBOARDS]; /* Array of board structs */
-extern ulong dgap_poll_counter; /* Times the poller has run */
-extern char *dgap_config_buf; /* The config file buffer */
-extern spinlock_t dgap_dl_lock; /* Downloader spinlock */
-extern wait_queue_head_t dgap_dl_wait; /* Wait queue for downloader */
-extern int dgap_dl_action; /* Action flag for downloader */
-extern int dgap_registerttyswithsysfs; /* Should we register the */
- /* ttys with sysfs or not */
-
-/*
- * Global functions declared in dgap_fep5.c, but must be hidden from
- * user space programs.
- */
-extern void dgap_poll_tasklet(unsigned long data);
-extern void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint ncmds);
-extern void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds);
-extern void dgap_wmove(struct channel_t *ch, char *buf, uint cnt);
-extern int dgap_param(struct tty_struct *tty);
-extern void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char *fbuf, int *len);
-extern uint dgap_get_custom_baud(struct channel_t *ch);
-extern void dgap_firmware_reset_port(struct channel_t *ch);
-
-#endif
diff --git a/drivers/staging/dgap/dgap_fep5.c b/drivers/staging/dgap/dgap_fep5.c
deleted file mode 100644
index f75831a422e8..000000000000
--- a/drivers/staging/dgap/dgap_fep5.c
+++ /dev/null
@@ -1,1938 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
- *
- * This is shared code between Digi's CVS archive and the
- * Linux Kernel sources.
- * Changing the source just for reformatting needlessly breaks
- * our CVS diff history.
- *
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
- *
- * $Id: dgap_fep5.c,v 1.2 2011/06/21 10:35:40 markh Exp $
- */
-
-
-#include <linux/kernel.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/delay.h> /* For udelay */
-#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
-#include <linux/tty.h>
-#include <linux/tty_flip.h> /* For tty_schedule_flip */
-#include <linux/slab.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
-#include <linux/sched.h>
-#endif
-
-#include "dgap_driver.h"
-#include "dgap_pci.h"
-#include "dgap_fep5.h"
-#include "dgap_tty.h"
-#include "dgap_conf.h"
-#include "dgap_parse.h"
-#include "dgap_trace.h"
-
-/*
- * Our function prototypes
- */
-static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds);
-static int dgap_event(struct board_t *bd);
-
-/*
- * internal variables
- */
-static uint dgap_count = 500;
-
-
-/*
- * Loads the dgap.conf config file from the user.
- */
-void dgap_do_config_load(uchar __user *uaddr, int len)
-{
- int orig_len = len;
- char *to_addr;
- uchar __user *from_addr = uaddr;
- char buf[U2BSIZE];
- int n;
-
- to_addr = dgap_config_buf = kzalloc(len + 1, GFP_ATOMIC);
- if (!dgap_config_buf) {
- DPR_INIT(("dgap_do_config_load - unable to allocate memory for file\n"));
- dgap_driver_state = DRIVER_NEED_CONFIG_LOAD;
- return;
- }
-
- n = U2BSIZE;
- while (len) {
-
- if (n > len)
- n = len;
-
- if (copy_from_user((char *) &buf, from_addr, n) == -1 )
- return;
-
- /* Copy data from buffer to kernel memory */
- memcpy(to_addr, buf, n);
-
- /* increment counts */
- len -= n;
- to_addr += n;
- from_addr += n;
- n = U2BSIZE;
- }
-
- dgap_config_buf[orig_len] = '\0';
-
- to_addr = dgap_config_buf;
- dgap_parsefile(&to_addr, TRUE);
-
- DPR_INIT(("dgap_config_load() finish\n"));
-
- return;
-}
-
-
-int dgap_after_config_loaded(void)
-{
- int i = 0;
- int rc = 0;
-
- /*
- * Register our ttys, now that we have the config loaded.
- */
- for (i = 0; i < dgap_NumBoards; ++i) {
-
- /*
- * Initialize KME waitqueues...
- */
- init_waitqueue_head(&(dgap_Board[i]->kme_wait));
-
- /*
- * allocate flip buffer for board.
- */
- dgap_Board[i]->flipbuf = kzalloc(MYFLIPLEN, GFP_ATOMIC);
- dgap_Board[i]->flipflagbuf = kzalloc(MYFLIPLEN, GFP_ATOMIC);
- }
-
- return rc;
-}
-
-
-
-/*=======================================================================
- *
- * usertoboard - copy from user space to board space.
- *
- *=======================================================================*/
-static int dgap_usertoboard(struct board_t *brd, char *to_addr, char __user *from_addr, int len)
-{
- char buf[U2BSIZE];
- int n = U2BSIZE;
-
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return -EFAULT;
-
- while (len) {
- if (n > len)
- n = len;
-
- if (copy_from_user((char *) &buf, from_addr, n) == -1 ) {
- return -EFAULT;
- }
-
- /* Copy data from buffer to card memory */
- memcpy_toio(to_addr, buf, n);
-
- /* increment counts */
- len -= n;
- to_addr += n;
- from_addr += n;
- n = U2BSIZE;
- }
- return 0;
-}
-
-
-/*
- * Copies the BIOS code from the user to the board,
- * and starts the BIOS running.
- */
-void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len)
-{
- uchar *addr;
- uint offset;
- int i;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- DPR_INIT(("dgap_do_bios_load() start\n"));
-
- addr = brd->re_map_membase;
-
- /*
- * clear POST area
- */
- for (i = 0; i < 16; i++)
- writeb(0, addr + POSTAREA + i);
-
- /*
- * Download bios
- */
- offset = 0x1000;
- if (dgap_usertoboard(brd, addr + offset, ubios, len) == -1 ) {
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return;
- }
-
- writel(0x0bf00401, addr);
- writel(0, (addr + 4));
-
- /* Clear the reset, and change states. */
- writeb(FEPCLR, brd->re_map_port);
- brd->state = WAIT_BIOS_LOAD;
-}
-
-
-/*
- * Checks to see if the BIOS completed running on the card.
- */
-static void dgap_do_wait_for_bios(struct board_t *brd)
-{
- uchar *addr;
- u16 word;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
- word = readw(addr + POSTAREA);
-
- /* Check to see if BIOS thinks board is good. (GD). */
- if (word == *(u16 *) "GD") {
- DPR_INIT(("GOT GD in memory, moving states.\n"));
- brd->state = FINISHED_BIOS_LOAD;
- return;
- }
-
- /* Give up on board after too long of time taken */
- if (brd->wait_for_bios++ > 5000) {
- u16 err1 = readw(addr + SEQUENCE);
- u16 err2 = readw(addr + ERROR);
- APR(("***WARNING*** %s failed diagnostics. Error #(%x,%x).\n",
- brd->name, err1, err2));
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- }
-}
-
-
-/*
- * Copies the FEP code from the user to the board,
- * and starts the FEP running.
- */
-void dgap_do_fep_load(struct board_t *brd, uchar __user *ufep, int len)
-{
- uchar *addr;
- uint offset;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
-
- DPR_INIT(("dgap_do_fep_load() for board %s : start\n", brd->name));
-
- /*
- * Download FEP
- */
- offset = 0x1000;
- if (dgap_usertoboard(brd, addr + offset, ufep, len) == -1 ) {
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return;
- }
-
- /*
- * If board is a concentrator product, we need to give
- * it its config string describing how the concentrators look.
- */
- if ((brd->type == PCX) || (brd->type == PEPC)) {
- uchar string[100];
- uchar *config, *xconfig;
- int i = 0;
-
- xconfig = dgap_create_config_string(brd, string);
-
- /* Write string to board memory */
- config = addr + CONFIG;
- for (; i < CONFIGSIZE; i++, config++, xconfig++) {
- writeb(*xconfig, config);
- if ((*xconfig & 0xff) == 0xff)
- break;
- }
- }
-
- writel(0xbfc01004, (addr + 0xc34));
- writel(0x3, (addr + 0xc30));
-
- /* change states. */
- brd->state = WAIT_FEP_LOAD;
-
- DPR_INIT(("dgap_do_fep_load() for board %s : finish\n", brd->name));
-
-}
-
-
-/*
- * Waits for the FEP to report thats its ready for us to use.
- */
-static void dgap_do_wait_for_fep(struct board_t *brd)
-{
- uchar *addr;
- u16 word;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
-
- DPR_INIT(("dgap_do_wait_for_fep() for board %s : start. addr: %p\n", brd->name, addr));
-
- word = readw(addr + FEPSTAT);
-
- /* Check to see if FEP is up and running now. */
- if (word == *(u16 *) "OS") {
- DPR_INIT(("GOT OS in memory for board %s, moving states.\n", brd->name));
- brd->state = FINISHED_FEP_LOAD;
-
- /*
- * Check to see if the board can support FEP5+ commands.
- */
- word = readw(addr + FEP5_PLUS);
- if (word == *(u16 *) "5A") {
- DPR_INIT(("GOT 5A in memory for board %s, board supports extended FEP5 commands.\n", brd->name));
- brd->bd_flags |= BD_FEP5PLUS;
- }
-
- return;
- }
-
- /* Give up on board after too long of time taken */
- if (brd->wait_for_fep++ > 5000) {
- u16 err1 = readw(addr + SEQUENCE);
- u16 err2 = readw(addr + ERROR);
- APR(("***WARNING*** FEPOS for %s not functioning. Error #(%x,%x).\n",
- brd->name, err1, err2));
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- }
-
- DPR_INIT(("dgap_do_wait_for_fep() for board %s : finish\n", brd->name));
-}
-
-
-/*
- * Physically forces the FEP5 card to reset itself.
- */
-static void dgap_do_reset_board(struct board_t *brd)
-{
- uchar check;
- u32 check1;
- u32 check2;
- int i = 0;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase || !brd->re_map_port) {
- DPR_INIT(("dgap_do_reset_board() start. bad values. brd: %p mem: %p io: %p\n",
- brd, brd ? brd->re_map_membase : 0, brd ? brd->re_map_port : 0));
- return;
- }
-
- DPR_INIT(("dgap_do_reset_board() start. io: %p\n", brd->re_map_port));
-
- /* FEPRST does not vary among supported boards */
- writeb(FEPRST, brd->re_map_port);
-
- for (i = 0; i <= 1000; i++) {
- check = readb(brd->re_map_port) & 0xe;
- if (check == FEPRST)
- break;
- udelay(10);
-
- }
- if (i > 1000) {
- APR(("*** WARNING *** Board not resetting... Failing board.\n"));
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- goto failed;
- }
-
- /*
- * Make sure there really is memory out there.
- */
- writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
- writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
- check1 = readl(brd->re_map_membase + LOWMEM);
- check2 = readl(brd->re_map_membase + HIGHMEM);
-
- if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
- APR(("*** Warning *** No memory at %p for board.\n", brd->re_map_membase));
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- goto failed;
- }
-
- if (brd->state != BOARD_FAILED)
- brd->state = FINISHED_RESET;
-
-failed:
- DPR_INIT(("dgap_do_reset_board() finish\n"));
-}
-
-
-/*
- * Sends a concentrator image into the FEP5 board.
- */
-void dgap_do_conc_load(struct board_t *brd, uchar *uaddr, int len)
-{
- char *vaddr;
- u16 offset = 0;
- struct downld_t *to_dp;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- vaddr = brd->re_map_membase;
-
- offset = readw((u16 *) (vaddr + DOWNREQ));
- to_dp = (struct downld_t *) (vaddr + (int) offset);
-
- /*
- * The image was already read into kernel space,
- * we do NOT need a user space read here
- */
- memcpy_toio((char *) to_dp, uaddr, sizeof(struct downld_t));
-
- /* Tell card we have data for it */
- writew(0, vaddr + (DOWNREQ));
-
- brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
-}
-
-
-#define EXPANSION_ROM_SIZE (64 * 1024)
-#define FEP5_ROM_MAGIC (0xFEFFFFFF)
-
-static void dgap_get_vpd(struct board_t *brd)
-{
- u32 magic;
- u32 base_offset;
- u16 rom_offset;
- u16 vpd_offset;
- u16 image_length;
- u16 i;
- uchar byte1;
- uchar byte2;
-
- /*
- * Poke the magic number at the PCI Rom Address location.
- * If VPD is supported, the value read from that address
- * will be non-zero.
- */
- magic = FEP5_ROM_MAGIC;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
- pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
- /* VPD not supported, bail */
- if (!magic)
- return;
-
- /*
- * To get to the OTPROM memory, we have to send the boards base
- * address or'ed with 1 into the PCI Rom Address location.
- */
- magic = brd->membase | 0x01;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
- pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
- byte1 = readb(brd->re_map_membase);
- byte2 = readb(brd->re_map_membase + 1);
-
- /*
- * If the board correctly swapped to the OTPROM memory,
- * the first 2 bytes (header) should be 0x55, 0xAA
- */
- if (byte1 == 0x55 && byte2 == 0xAA) {
-
- base_offset = 0;
-
- /*
- * We have to run through all the OTPROM memory looking
- * for the VPD offset.
- */
- while (base_offset <= EXPANSION_ROM_SIZE) {
-
- /*
- * Lots of magic numbers here.
- *
- * The VPD offset is located inside the ROM Data Structure.
- * We also have to remember the length of each
- * ROM Data Structure, so we can "hop" to the next
- * entry if the VPD isn't in the current
- * ROM Data Structure.
- */
- rom_offset = readw(brd->re_map_membase + base_offset + 0x18);
- image_length = readw(brd->re_map_membase + rom_offset + 0x10) * 512;
- vpd_offset = readw(brd->re_map_membase + rom_offset + 0x08);
-
- /* Found the VPD entry */
- if (vpd_offset)
- break;
-
- /* We didn't find a VPD entry, go to next ROM entry. */
- base_offset += image_length;
-
- byte1 = readb(brd->re_map_membase + base_offset);
- byte2 = readb(brd->re_map_membase + base_offset + 1);
-
- /*
- * If the new ROM offset doesn't have 0x55, 0xAA
- * as its header, we have run out of ROM.
- */
- if (byte1 != 0x55 || byte2 != 0xAA)
- break;
- }
-
- /*
- * If we have a VPD offset, then mark the board
- * as having a valid VPD, and copy VPDSIZE (512) bytes of
- * that VPD to the buffer we have in our board structure.
- */
- if (vpd_offset) {
- brd->bd_flags |= BD_HAS_VPD;
- for (i = 0; i < VPDSIZE; i++)
- brd->vpd[i] = readb(brd->re_map_membase + vpd_offset + i);
- }
- }
-
- /*
- * We MUST poke the magic number at the PCI Rom Address location again.
- * This makes the card report the regular board memory back to us,
- * rather than the OTPROM memory.
- */
- magic = FEP5_ROM_MAGIC;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
-}
-
-
-/*
- * Our board poller function.
- */
-void dgap_poll_tasklet(unsigned long data)
-{
- struct board_t *bd = (struct board_t *) data;
- ulong lock_flags;
- ulong lock_flags2;
- char *vaddr;
- u16 head, tail;
- u16 *chk_addr;
- u16 check = 0;
-
- if (!bd || (bd->magic != DGAP_BOARD_MAGIC)) {
- APR(("dgap_poll_tasklet() - NULL or bad bd.\n"));
- return;
- }
-
- if (bd->inhibit_poller)
- return;
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
-
- vaddr = bd->re_map_membase;
-
- /*
- * If board is ready, parse deeper to see if there is anything to do.
- */
- if (bd->state == BOARD_READY) {
-
- struct ev_t *eaddr = NULL;
-
- if (!bd->re_map_membase) {
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return;
- }
- if (!bd->re_map_port) {
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return;
- }
-
- if (!bd->nasync) {
- goto out;
- }
-
- /*
- * If this is a CX or EPCX, we need to see if the firmware
- * is requesting a concentrator image from us.
- */
- if ((bd->type == PCX) || (bd->type == PEPC)) {
- chk_addr = (u16 *) (vaddr + DOWNREQ);
- check = readw(chk_addr);
- /* Nonzero if FEP is requesting concentrator image. */
- if (check) {
- if (bd->conc_dl_status == NO_PENDING_CONCENTRATOR_REQUESTS)
- bd->conc_dl_status = NEED_CONCENTRATOR;
- /*
- * Signal downloader, its got some work to do.
- */
- DGAP_LOCK(dgap_dl_lock, lock_flags2);
- if (dgap_dl_action != 1) {
- dgap_dl_action = 1;
- wake_up_interruptible(&dgap_dl_wait);
- }
- DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
-
- }
- }
-
- eaddr = (struct ev_t *) (vaddr + EVBUF);
-
- /* Get our head and tail */
- head = readw(&(eaddr->ev_head));
- tail = readw(&(eaddr->ev_tail));
-
- /*
- * If there is an event pending. Go service it.
- */
- if (head != tail) {
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- dgap_event(bd);
- DGAP_LOCK(bd->bd_lock, lock_flags);
- }
-
-out:
- /*
- * If board is doing interrupts, ACK the interrupt.
- */
- if (bd && bd->intr_running) {
- readb(bd->re_map_port + 2);
- }
-
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return;
- }
-
- /* Our state machine to get the board up and running */
-
- /* Reset board */
- if (bd->state == NEED_RESET) {
-
- /* Get VPD info */
- dgap_get_vpd(bd);
-
- dgap_do_reset_board(bd);
- }
-
- /* Move to next state */
- if (bd->state == FINISHED_RESET) {
- bd->state = NEED_CONFIG;
- }
-
- if (bd->state == NEED_CONFIG) {
- /*
- * Match this board to a config the user created for us.
- */
- bd->bd_config = dgap_find_config(bd->type, bd->pci_bus, bd->pci_slot);
-
- /*
- * Because the 4 port Xr products share the same PCI ID
- * as the 8 port Xr products, if we receive a NULL config
- * back, and this is a PAPORT8 board, retry with a
- * PAPORT4 attempt as well.
- */
- if (bd->type == PAPORT8 && !bd->bd_config) {
- bd->bd_config = dgap_find_config(PAPORT4, bd->pci_bus, bd->pci_slot);
- }
-
- /*
- * Register the ttys (if any) into the kernel.
- */
- if (bd->bd_config) {
- bd->state = FINISHED_CONFIG;
- }
- else {
- bd->state = CONFIG_NOT_FOUND;
- }
- }
-
- /* Move to next state */
- if (bd->state == FINISHED_CONFIG) {
- bd->state = NEED_DEVICE_CREATION;
- }
-
- /* Move to next state */
- if (bd->state == NEED_DEVICE_CREATION) {
- /*
- * Signal downloader, its got some work to do.
- */
- DGAP_LOCK(dgap_dl_lock, lock_flags2);
- if (dgap_dl_action != 1) {
- dgap_dl_action = 1;
- wake_up_interruptible(&dgap_dl_wait);
- }
- DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
- }
-
- /* Move to next state */
- if (bd->state == FINISHED_DEVICE_CREATION) {
- bd->state = NEED_BIOS_LOAD;
- }
-
- /* Move to next state */
- if (bd->state == NEED_BIOS_LOAD) {
- /*
- * Signal downloader, its got some work to do.
- */
- DGAP_LOCK(dgap_dl_lock, lock_flags2);
- if (dgap_dl_action != 1) {
- dgap_dl_action = 1;
- wake_up_interruptible(&dgap_dl_wait);
- }
- DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
- }
-
- /* Wait for BIOS to test board... */
- if (bd->state == WAIT_BIOS_LOAD) {
- dgap_do_wait_for_bios(bd);
- }
-
- /* Move to next state */
- if (bd->state == FINISHED_BIOS_LOAD) {
- bd->state = NEED_FEP_LOAD;
-
- /*
- * Signal downloader, its got some work to do.
- */
- DGAP_LOCK(dgap_dl_lock, lock_flags2);
- if (dgap_dl_action != 1) {
- dgap_dl_action = 1;
- wake_up_interruptible(&dgap_dl_wait);
- }
- DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
- }
-
- /* Wait for FEP to load on board... */
- if (bd->state == WAIT_FEP_LOAD) {
- dgap_do_wait_for_fep(bd);
- }
-
-
- /* Move to next state */
- if (bd->state == FINISHED_FEP_LOAD) {
-
- /*
- * Do tty device initialization.
- */
- int rc = dgap_tty_init(bd);
-
- if (rc < 0) {
- dgap_tty_uninit(bd);
- APR(("Can't init tty devices (%d)\n", rc));
- bd->state = BOARD_FAILED;
- bd->dpastatus = BD_NOFEP;
- }
- else {
- bd->state = NEED_PROC_CREATION;
-
- /*
- * Signal downloader, its got some work to do.
- */
- DGAP_LOCK(dgap_dl_lock, lock_flags2);
- if (dgap_dl_action != 1) {
- dgap_dl_action = 1;
- wake_up_interruptible(&dgap_dl_wait);
- }
- DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
- }
- }
-
- /* Move to next state */
- if (bd->state == FINISHED_PROC_CREATION) {
-
- bd->state = BOARD_READY;
- bd->dpastatus = BD_RUNNING;
-
- /*
- * If user requested the board to run in interrupt mode,
- * go and set it up on the board.
- */
- if (bd->intr_used) {
- writew(1, (bd->re_map_membase + ENABLE_INTR));
- /*
- * Tell the board to poll the UARTS as fast as possible.
- */
- writew(FEPPOLL_MIN, (bd->re_map_membase + FEPPOLL));
- bd->intr_running = 1;
- }
-
- /* Wake up anyone waiting for board state to change to ready */
- wake_up_interruptible(&bd->state_wait);
- }
-
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-}
-
-
-/*=======================================================================
- *
- * dgap_cmdb - Sends a 2 byte command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * byte1 - Integer containing first byte to be sent.
- * byte2 - Integer containing second byte to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint ncmds)
-{
- char *vaddr = NULL;
- struct cm_t *cm_addr = NULL;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED) {
- DPR_CORE(("%s:%d board is in failed state.\n", __FILE__, __LINE__));
- return;
- }
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
-
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- DPR_CORE(("%s:%d pointers out of range, failing board!\n", __FILE__, __LINE__));
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
- writeb(cmd, (char *) (vaddr + head + CMDSTART + 0));
- writeb((uchar) ch->ch_portnum, (char *) (vaddr + head + CMDSTART + 1));
- writeb(byte1, (char *) (vaddr + head + CMDSTART + 2));
- writeb(byte2, (char *) (vaddr + head + CMDSTART + 3));
-
- head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- DPR_CORE(("%s:%d failing board.\n",__FILE__, __LINE__));
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-
-/*=======================================================================
- *
- * dgap_cmdw - Sends a 1 word command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * word - Integer containing word to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
-{
- char *vaddr = NULL;
- struct cm_t *cm_addr = NULL;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED) {
- DPR_CORE(("%s:%d board is failed!\n", __FILE__, __LINE__));
- return;
- }
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- DPR_CORE(("%s:%d Pointers out of range. Failing board.\n",__FILE__, __LINE__));
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
- writeb(cmd, (char *) (vaddr + head + CMDSTART + 0));
- writeb((uchar) ch->ch_portnum, (char *) (vaddr + head + CMDSTART + 1));
- writew((u16) word, (char *) (vaddr + head + CMDSTART + 2));
-
- head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- DPR_CORE(("%s:%d Failing board.\n",__FILE__, __LINE__));
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-
-
-/*=======================================================================
- *
- * dgap_cmdw_ext - Sends a extended word command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * word - Integer containing word to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
-{
- char *vaddr = NULL;
- struct cm_t *cm_addr = NULL;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED) {
- DPR_CORE(("%s:%d board is failed!\n", __FILE__, __LINE__));
- return;
- }
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- DPR_CORE(("%s:%d Pointers out of range. Failing board.\n",__FILE__, __LINE__));
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
-
- /* Write an FF to tell the FEP that we want an extended command */
- writeb((uchar) 0xff, (char *) (vaddr + head + CMDSTART + 0));
-
- writeb((uchar) ch->ch_portnum, (uchar *) (vaddr + head + CMDSTART + 1));
- writew((u16) cmd, (char *) (vaddr + head + CMDSTART + 2));
-
- /*
- * If the second part of the command won't fit,
- * put it at the beginning of the circular buffer.
- */
- if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03))) {
- writew((u16) word, (char *) (vaddr + CMDSTART));
- } else {
- writew((u16) word, (char *) (vaddr + head + CMDSTART + 4));
- }
-
- head = (head + 8) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- DPR_CORE(("%s:%d Failing board.\n",__FILE__, __LINE__));
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-
-/*=======================================================================
- *
- * dgap_wmove - Write data to FEP buffer.
- *
- * ch - Pointer to channel structure.
- * buf - Poiter to characters to be moved.
- * cnt - Number of characters to move.
- *
- *=======================================================================*/
-void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
-{
- int n;
- char *taddr;
- struct bs_t *bs;
- u16 head;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check parameters.
- */
- bs = ch->ch_bs;
- head = readw(&(bs->tx_head));
-
- /*
- * If pointers are out of range, just return.
- */
- if ((cnt > ch->ch_tsize) || (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize) {
- DPR_CORE(("%s:%d pointer out of range", __FILE__, __LINE__));
- return;
- }
-
- /*
- * If the write wraps over the top of the circular buffer,
- * move the portion up to the wrap point, and reset the
- * pointers to the bottom.
- */
- n = ch->ch_tstart + ch->ch_tsize - head;
-
- if (cnt >= n) {
- cnt -= n;
- taddr = ch->ch_taddr + head;
- memcpy_toio(taddr, buf, n);
- head = ch->ch_tstart;
- buf += n;
- }
-
- /*
- * Move rest of data.
- */
- taddr = ch->ch_taddr + head;
- n = cnt;
- memcpy_toio(taddr, buf, n);
- head += cnt;
-
- writew(head, &(bs->tx_head));
-}
-
-/*
- * Retrives the current custom baud rate from FEP memory,
- * and returns it back to the user.
- * Returns 0 on error.
- */
-uint dgap_get_custom_baud(struct channel_t *ch)
-{
- uchar *vaddr;
- ulong offset = 0;
- uint value = 0;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) {
- return 0;
- }
-
- if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC) {
- return 0;
- }
-
- if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
- return 0;
-
- vaddr = ch->ch_bd->re_map_membase;
-
- if (!vaddr)
- return 0;
-
- /*
- * Go get from fep mem, what the fep
- * believes the custom baud rate is.
- */
- offset = ((((*(unsigned short *)(vaddr + ECS_SEG)) << 4) +
- (ch->ch_portnum * 0x28) + LINE_SPEED));
-
- value = readw(vaddr + offset);
- return value;
-}
-
-
-/*
- * Calls the firmware to reset this channel.
- */
-void dgap_firmware_reset_port(struct channel_t *ch)
-{
- dgap_cmdb(ch, CHRESET, 0, 0, 0);
-
- /*
- * Now that the channel is reset, we need to make sure
- * all the current settings get reapplied to the port
- * in the firmware.
- *
- * So we will set the driver's cache of firmware
- * settings all to 0, and then call param.
- */
- ch->ch_fepiflag = 0;
- ch->ch_fepcflag = 0;
- ch->ch_fepoflag = 0;
- ch->ch_fepstartc = 0;
- ch->ch_fepstopc = 0;
- ch->ch_fepastartc = 0;
- ch->ch_fepastopc = 0;
- ch->ch_mostat = 0;
- ch->ch_hflow = 0;
-}
-
-
-/*=======================================================================
- *
- * dgap_param - Set Digi parameters.
- *
- * struct tty_struct * - TTY for port.
- *
- *=======================================================================*/
-int dgap_param(struct tty_struct *tty)
-{
- struct ktermios *ts;
- struct board_t *bd;
- struct channel_t *ch;
- struct bs_t *bs;
- struct un_t *un;
- u16 head;
- u16 cflag;
- u16 iflag;
- uchar mval;
- uchar hflow;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -ENXIO;
-
- un = (struct un_t *) tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -ENXIO;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -ENXIO;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -ENXIO;
-
- bs = ch->ch_bs;
- if (!bs)
- return -ENXIO;
-
- DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n",
- ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag));
-
- ts = &tty->termios;
-
- /*
- * If baud rate is zero, flush queues, and set mval to drop DTR.
- */
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
-
- /* flush rx */
- head = readw(&(ch->ch_bs->rx_head));
- writew(head, &(ch->ch_bs->rx_tail));
-
- /* flush tx */
- head = readw(&(ch->ch_bs->tx_head));
- writew(head, &(ch->ch_bs->tx_tail));
-
- ch->ch_flags |= (CH_BAUD0);
-
- /* Drop RTS and DTR */
- ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch));
- mval = D_DTR(ch) | D_RTS(ch);
- ch->ch_baud_info = 0;
-
- } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
- /*
- * Tell the fep to do the command
- */
-
- DPR_PARAM(("param: Want %d speed\n", ch->ch_custom_speed));
-
- dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
-
- /*
- * Now go get from fep mem, what the fep
- * believes the custom baud rate is.
- */
- ch->ch_baud_info = ch->ch_custom_speed = dgap_get_custom_baud(ch);
-
- DPR_PARAM(("param: Got %d speed\n", ch->ch_custom_speed));
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
- ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
- }
- mval = D_DTR(ch) | D_RTS(ch);
-
- } else {
- /*
- * Set baud rate, character size, and parity.
- */
-
-
- int iindex = 0;
- int jindex = 0;
- int baud = 0;
-
- ulong bauds[4][16] = {
- { /* slowbaud */
- 0, 50, 75, 110,
- 134, 150, 200, 300,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* slowbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* fastbaud */
- 0, 57600, 76800, 115200,
- 14400, 57600, 230400, 76800,
- 115200, 230400, 28800, 460800,
- 921600, 9600, 19200, 38400 },
- { /* fastbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 }
- };
-
- /* Only use the TXPrint baud rate if the terminal unit is NOT open */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGAP_PRINT))
- baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
- else
- baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
-
- if (ch->ch_c_cflag & CBAUDEX)
- iindex = 1;
-
- if (ch->ch_digi.digi_flags & DIGI_FAST)
- iindex += 2;
-
- jindex = baud;
-
- if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) {
- baud = bauds[iindex][jindex];
- } else {
- DPR_IOCTL(("baud indices were out of range (%d)(%d)",
- iindex, jindex));
- baud = 0;
- }
-
- if (baud == 0)
- baud = 9600;
-
- ch->ch_baud_info = baud;
-
-
- /*
- * CBAUD has bit position 0x1000 set these days to indicate Linux
- * baud rate remap.
- * We use a different bit assignment for high speed. Clear this
- * bit out while grabbing the parts of "cflag" we want.
- */
- cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE);
-
- /*
- * HUPCL bit is used by FEP to indicate fast baud
- * table is to be used.
- */
- if ((ch->ch_digi.digi_flags & DIGI_FAST) || (ch->ch_c_cflag & CBAUDEX))
- cflag |= HUPCL;
-
-
- if ((ch->ch_c_cflag & CBAUDEX) && !(ch->ch_digi.digi_flags & DIGI_FAST)) {
- /*
- * The below code is trying to guarantee that only baud rates
- * 115200, 230400, 460800, 921600 are remapped. We use exclusive or
- * because the various baud rates share common bit positions
- * and therefore can't be tested for easily.
- */
- tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
- int baudpart = 0;
-
- /* Map high speed requests to index into FEP's baud table */
- switch (tcflag) {
- case B57600 :
- baudpart = 1;
- break;
-#ifdef B76800
- case B76800 :
- baudpart = 2;
- break;
-#endif
- case B115200 :
- baudpart = 3;
- break;
- case B230400 :
- baudpart = 9;
- break;
- case B460800 :
- baudpart = 11;
- break;
-#ifdef B921600
- case B921600 :
- baudpart = 12;
- break;
-#endif
- default:
- baudpart = 0;
- }
-
- if (baudpart)
- cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
- }
-
- cflag &= 0xffff;
-
- if (cflag != ch->ch_fepcflag) {
- ch->ch_fepcflag = (u16) (cflag & 0xffff);
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
- }
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
- ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
- }
- mval = D_DTR(ch) | D_RTS(ch);
- }
-
- /*
- * Get input flags.
- */
- iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | INPCK | ISTRIP | IXON | IXANY | IXOFF);
-
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE)) {
- iflag &= ~(IXON | IXOFF);
- ch->ch_c_iflag &= ~(IXON | IXOFF);
- }
-
- /*
- * Only the IBM Xr card can switch between
- * 232 and 422 modes on the fly
- */
- if (bd->device == PCI_DEVICE_XR_IBM_DID) {
- if (ch->ch_digi.digi_flags & DIGI_422)
- dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
- else
- dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
- }
-
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
- iflag |= IALTPIN ;
-
- if (iflag != ch->ch_fepiflag) {
- ch->ch_fepiflag = iflag;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0);
- }
-
- /*
- * Select hardware handshaking.
- */
- hflow = 0;
-
- if (ch->ch_c_cflag & CRTSCTS) {
- hflow |= (D_RTS(ch) | D_CTS(ch));
- }
- if (ch->ch_digi.digi_flags & RTSPACE)
- hflow |= D_RTS(ch);
- if (ch->ch_digi.digi_flags & DTRPACE)
- hflow |= D_DTR(ch);
- if (ch->ch_digi.digi_flags & CTSPACE)
- hflow |= D_CTS(ch);
- if (ch->ch_digi.digi_flags & DSRPACE)
- hflow |= D_DSR(ch);
- if (ch->ch_digi.digi_flags & DCDPACE)
- hflow |= D_CD(ch);
-
- if (hflow != ch->ch_hflow) {
- ch->ch_hflow = hflow;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SHFLOW, (uchar) hflow, 0xff, 0);
- }
-
-
- /*
- * Set RTS and/or DTR Toggle if needed, but only if product is FEP5+ based.
- */
- if (bd->bd_flags & BD_FEP5PLUS) {
- u16 hflow2 = 0;
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
- hflow2 |= (D_RTS(ch));
- }
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
- hflow2 |= (D_DTR(ch));
- }
-
- dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
- }
-
- /*
- * Set modem control lines.
- */
-
- mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
-
- DPR_PARAM(("dgap_param: mval: %x ch_mforce: %x ch_mval: %x ch_mostat: %x\n",
- mval, ch->ch_mforce, ch->ch_mval, ch->ch_mostat));
-
- if (ch->ch_mostat ^ mval) {
- ch->ch_mostat = mval;
-
- /* Okay to have channel and board locks held calling this */
- DPR_PARAM(("dgap_param: Sending SMODEM mval: %x\n", mval));
- dgap_cmdb(ch, SMODEM, (uchar) mval, D_RTS(ch)|D_DTR(ch), 0);
- }
-
- /*
- * Read modem signals, and then call carrier function.
- */
- ch->ch_mistat = readb(&(bs->m_stat));
- dgap_carrier(ch);
-
- /*
- * Set the start and stop characters.
- */
- if (ch->ch_startc != ch->ch_fepstartc || ch->ch_stopc != ch->ch_fepstopc) {
- ch->ch_fepstartc = ch->ch_startc;
- ch->ch_fepstopc = ch->ch_stopc;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
- }
-
- /*
- * Set the Auxiliary start and stop characters.
- */
- if (ch->ch_astartc != ch->ch_fepastartc || ch->ch_astopc != ch->ch_fepastopc) {
- ch->ch_fepastartc = ch->ch_astartc;
- ch->ch_fepastopc = ch->ch_astopc;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
- }
-
- DPR_PARAM(("param finish\n"));
-
- return 0;
-}
-
-
-/*
- * dgap_parity_scan()
- *
- * Convert the FEP5 way of reporting parity errors and breaks into
- * the Linux line discipline way.
- */
-void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char *fbuf, int *len)
-{
- int l = *len;
- int count = 0;
- unsigned char *in, *cout, *fout;
- unsigned char c;
-
- in = cbuf;
- cout = cbuf;
- fout = fbuf;
-
- DPR_PSCAN(("dgap_parity_scan start\n"));
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- while (l--) {
- c = *in++;
- switch (ch->pscan_state) {
- default:
- /* reset to sanity and fall through */
- ch->pscan_state = 0;
-
- case 0:
- /* No FF seen yet */
- if (c == (unsigned char) '\377') {
- /* delete this character from stream */
- ch->pscan_state = 1;
- } else {
- *cout++ = c;
- *fout++ = TTY_NORMAL;
- count += 1;
- }
- break;
-
- case 1:
- /* first FF seen */
- if (c == (unsigned char) '\377') {
- /* doubled ff, transform to single ff */
- *cout++ = c;
- *fout++ = TTY_NORMAL;
- count += 1;
- ch->pscan_state = 0;
- } else {
- /* save value examination in next state */
- ch->pscan_savechar = c;
- ch->pscan_state = 2;
- }
- break;
-
- case 2:
- /* third character of ff sequence */
-
- *cout++ = c;
-
- if (ch->pscan_savechar == 0x0) {
-
- if (c == 0x0) {
- DPR_PSCAN(("dgap_parity_scan in 3rd char of ff seq. c: %x setting break.\n", c));
- ch->ch_err_break++;
- *fout++ = TTY_BREAK;
- }
- else {
- DPR_PSCAN(("dgap_parity_scan in 3rd char of ff seq. c: %x setting parity.\n", c));
- ch->ch_err_parity++;
- *fout++ = TTY_PARITY;
- }
- }
- else {
- DPR_PSCAN(("%s:%d Logic Error.\n", __FILE__, __LINE__));
- }
-
- count += 1;
- ch->pscan_state = 0;
- }
- }
- *len = count;
- DPR_PSCAN(("dgap_parity_scan finish\n"));
-}
-
-
-
-
-/*=======================================================================
- *
- * dgap_event - FEP to host event processing routine.
- *
- * bd - Board of current event.
- *
- *=======================================================================*/
-static int dgap_event(struct board_t *bd)
-{
- struct channel_t *ch;
- ulong lock_flags;
- ulong lock_flags2;
- struct bs_t *bs;
- uchar *event;
- uchar *vaddr = NULL;
- struct ev_t *eaddr = NULL;
- uint head;
- uint tail;
- int port;
- int reason;
- int modem;
- int b1;
-
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -ENXIO;
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
-
- vaddr = bd->re_map_membase;
-
- if (!vaddr) {
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return -ENXIO;
- }
-
- eaddr = (struct ev_t *) (vaddr + EVBUF);
-
- /* Get our head and tail */
- head = readw(&(eaddr->ev_head));
- tail = readw(&(eaddr->ev_tail));
-
- /*
- * Forget it if pointers out of range.
- */
-
- if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
- (head | tail) & 03) {
- DPR_EVENT(("should be calling xxfail %d\n", __LINE__));
- /* Let go of board lock */
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return -ENXIO;
- }
-
- /*
- * Loop to process all the events in the buffer.
- */
- while (tail != head) {
-
- /*
- * Get interrupt information.
- */
-
- event = bd->re_map_membase + tail + EVSTART;
-
- port = event[0];
- reason = event[1];
- modem = event[2];
- b1 = event[3];
-
- DPR_EVENT(("event: jiffies: %ld port: %d reason: %x modem: %x\n",
- jiffies, port, reason, modem));
-
- /*
- * Make sure the interrupt is valid.
- */
- if (port >= bd->nasync)
- goto next;
-
- if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA))) {
- goto next;
- }
-
- ch = bd->channels[port];
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) {
- goto next;
- }
-
- /*
- * If we have made it here, the event was valid.
- * Lock down the channel.
- */
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- bs = ch->ch_bs;
-
- if (!bs) {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- goto next;
- }
-
- /*
- * Process received data.
- */
- if (reason & IFDATA) {
-
- /*
- * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
- * input could send some data to ld, which in turn
- * could do a callback to one of our other functions.
- */
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- dgap_input(ch);
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- if (ch->ch_flags & CH_RACTIVE)
- ch->ch_flags |= CH_RENABLE;
- else
- writeb(1, &(bs->idata));
-
- if (ch->ch_flags & CH_RWAIT) {
- ch->ch_flags &= ~CH_RWAIT;
-
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- }
-
- /*
- * Process Modem change signals.
- */
- if (reason & IFMODEM) {
- ch->ch_mistat = modem;
- dgap_carrier(ch);
- }
-
- /*
- * Process break.
- */
- if (reason & IFBREAK) {
-
- DPR_EVENT(("got IFBREAK\n"));
-
- if (ch->ch_tun.un_tty) {
- /* A break has been indicated */
- ch->ch_err_break++;
- tty_buffer_request_room(ch->ch_tun.un_tty->port, 1);
- tty_insert_flip_char(ch->ch_tun.un_tty->port, 0, TTY_BREAK);
- tty_flip_buffer_push(ch->ch_tun.un_tty->port);
- }
- }
-
- /*
- * Process Transmit low.
- */
- if (reason & IFTLW) {
-
- DPR_EVENT(("event: got low event\n"));
-
- if (ch->ch_tun.un_flags & UN_LOW) {
- ch->ch_tun.un_flags &= ~UN_LOW;
-
- if (ch->ch_tun.un_flags & UN_ISOPEN) {
- if ((ch->ch_tun.un_tty->flags &
- (1 << TTY_DO_WRITE_WAKEUP)) &&
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
-#else
- ch->ch_tun.un_tty->ldisc.ops->write_wakeup)
-#endif
- {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
-#else
- (ch->ch_tun.un_tty->ldisc.ops->write_wakeup)(ch->ch_tun.un_tty);
-#endif
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
- wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
-
- DPR_EVENT(("event: Got low event. jiffies: %lu\n", jiffies));
- }
- }
-
- if (ch->ch_pun.un_flags & UN_LOW) {
- ch->ch_pun.un_flags &= ~UN_LOW;
- if (ch->ch_pun.un_flags & UN_ISOPEN) {
- if ((ch->ch_pun.un_tty->flags &
- (1 << TTY_DO_WRITE_WAKEUP)) &&
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
-#else
- ch->ch_pun.un_tty->ldisc.ops->write_wakeup)
-#endif
- {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
-#else
- (ch->ch_pun.un_tty->ldisc.ops->write_wakeup)(ch->ch_pun.un_tty);
-#endif
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
- wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
- }
-
- if (ch->ch_flags & CH_WLOW) {
- ch->ch_flags &= ~CH_WLOW;
- wake_up_interruptible(&ch->ch_flags_wait);
- }
- }
-
- /*
- * Process Transmit empty.
- */
- if (reason & IFTEM) {
- DPR_EVENT(("event: got empty event\n"));
-
- if (ch->ch_tun.un_flags & UN_EMPTY) {
- ch->ch_tun.un_flags &= ~UN_EMPTY;
- if (ch->ch_tun.un_flags & UN_ISOPEN) {
- if ((ch->ch_tun.un_tty->flags &
- (1 << TTY_DO_WRITE_WAKEUP)) &&
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
-#else
- ch->ch_tun.un_tty->ldisc.ops->write_wakeup)
-#endif
- {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
-#else
- (ch->ch_tun.un_tty->ldisc.ops->write_wakeup)(ch->ch_tun.un_tty);
-#endif
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
- wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- }
-
- if (ch->ch_pun.un_flags & UN_EMPTY) {
- ch->ch_pun.un_flags &= ~UN_EMPTY;
- if (ch->ch_pun.un_flags & UN_ISOPEN) {
- if ((ch->ch_pun.un_tty->flags &
- (1 << TTY_DO_WRITE_WAKEUP)) &&
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
-#else
- ch->ch_pun.un_tty->ldisc.ops->write_wakeup)
-#endif
- {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
-#else
- (ch->ch_pun.un_tty->ldisc.ops->write_wakeup)(ch->ch_pun.un_tty);
-#endif
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
- wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
- }
-
-
- if (ch->ch_flags & CH_WEMPTY) {
- ch->ch_flags &= ~CH_WEMPTY;
- wake_up_interruptible(&ch->ch_flags_wait);
- }
- }
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
-
-next:
- tail = (tail + 4) & (EVMAX - EVSTART - 4);
- }
-
- writew(tail, &(eaddr->ev_tail));
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- return 0;
-}
diff --git a/drivers/staging/dgap/dgap_fep5.h b/drivers/staging/dgap/dgap_fep5.h
deleted file mode 100644
index c9abc406a1e0..000000000000
--- a/drivers/staging/dgap/dgap_fep5.h
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- ************************************************************************
- *** FEP Version 5 dependent definitions
- ************************************************************************/
-
-#ifndef __DGAP_FEP5_H
-#define __DGAP_FEP5_H
-
-/************************************************************************
- * FEP memory offsets
- ************************************************************************/
-#define START 0x0004L /* Execution start address */
-
-#define CMDBUF 0x0d10L /* Command (cm_t) structure offset */
-#define CMDSTART 0x0400L /* Start of command buffer */
-#define CMDMAX 0x0800L /* End of command buffer */
-
-#define EVBUF 0x0d18L /* Event (ev_t) structure */
-#define EVSTART 0x0800L /* Start of event buffer */
-#define EVMAX 0x0c00L /* End of event buffer */
-#define FEP5_PLUS 0x0E40 /* ASCII '5' and ASCII 'A' is here */
-#define ECS_SEG 0x0E44 /* Segment of the extended channel structure */
-#define LINE_SPEED 0x10 /* Offset into ECS_SEG for line speed */
- /* if the fep has extended capabilities */
-
-/* BIOS MAGIC SPOTS */
-#define ERROR 0x0C14L /* BIOS error code */
-#define SEQUENCE 0x0C12L /* BIOS sequence indicator */
-#define POSTAREA 0x0C00L /* POST complete message area */
-
-/* FEP MAGIC SPOTS */
-#define FEPSTAT POSTAREA /* OS here when FEP comes up */
-#define NCHAN 0x0C02L /* number of ports FEP sees */
-#define PANIC 0x0C10L /* PANIC area for FEP */
-#define KMEMEM 0x0C30L /* Memory for KME use */
-#define CONFIG 0x0CD0L /* Concentrator configuration info */
-#define CONFIGSIZE 0x0030 /* configuration info size */
-#define DOWNREQ 0x0D00 /* Download request buffer pointer */
-
-#define CHANBUF 0x1000L /* Async channel (bs_t) structs */
-#define FEPOSSIZE 0x1FFF /* 8K FEPOS */
-
-#define XEMPORTS 0xC02 /*
- * Offset in board memory where FEP5 stores
- * how many ports it has detected.
- * NOTE: FEP5 reports 64 ports when the user
- * has the cable in EBI OUT instead of EBI IN.
- */
-
-#define FEPCLR 0x00
-#define FEPMEM 0x02
-#define FEPRST 0x04
-#define FEPINT 0x08
-#define FEPMASK 0x0e
-#define FEPWIN 0x80
-
-#define LOWMEM 0x0100
-#define HIGHMEM 0x7f00
-
-#define FEPTIMEOUT 200000
-
-#define ENABLE_INTR 0x0e04 /* Enable interrupts flag */
-#define FEPPOLL_MIN 1 /* minimum of 1 millisecond */
-#define FEPPOLL_MAX 20 /* maximum of 20 milliseconds */
-#define FEPPOLL 0x0c26 /* Fep event poll interval */
-
-#define IALTPIN 0x0080 /* Input flag to swap DSR <-> DCD */
-
-/************************************************************************
- * Command structure definition.
- ************************************************************************/
-struct cm_t {
- volatile unsigned short cm_head; /* Command buffer head offset */
- volatile unsigned short cm_tail; /* Command buffer tail offset */
- volatile unsigned short cm_start; /* start offset of buffer */
- volatile unsigned short cm_max; /* last offset of buffer */
-};
-
-/************************************************************************
- * Event structure definition.
- ************************************************************************/
-struct ev_t {
- volatile unsigned short ev_head; /* Command buffer head offset */
- volatile unsigned short ev_tail; /* Command buffer tail offset */
- volatile unsigned short ev_start; /* start offset of buffer */
- volatile unsigned short ev_max; /* last offset of buffer */
-};
-
-/************************************************************************
- * Download buffer structure.
- ************************************************************************/
-struct downld_t {
- uchar dl_type; /* Header */
- uchar dl_seq; /* Download sequence */
- ushort dl_srev; /* Software revision number */
- ushort dl_lrev; /* Low revision number */
- ushort dl_hrev; /* High revision number */
- ushort dl_seg; /* Start segment address */
- ushort dl_size; /* Number of bytes to download */
- uchar dl_data[1024]; /* Download data */
-};
-
-/************************************************************************
- * Per channel buffer structure
- ************************************************************************
- * Base Structure Entries Usage Meanings to Host *
- * *
- * W = read write R = read only *
- * C = changed by commands only *
- * U = unknown (may be changed w/o notice) *
- ************************************************************************/
-struct bs_t {
- volatile unsigned short tp_jmp; /* Transmit poll jump */
- volatile unsigned short tc_jmp; /* Cooked procedure jump */
- volatile unsigned short ri_jmp; /* Not currently used */
- volatile unsigned short rp_jmp; /* Receive poll jump */
-
- volatile unsigned short tx_seg; /* W Tx segment */
- volatile unsigned short tx_head; /* W Tx buffer head offset */
- volatile unsigned short tx_tail; /* R Tx buffer tail offset */
- volatile unsigned short tx_max; /* W Tx buffer size - 1 */
-
- volatile unsigned short rx_seg; /* W Rx segment */
- volatile unsigned short rx_head; /* W Rx buffer head offset */
- volatile unsigned short rx_tail; /* R Rx buffer tail offset */
- volatile unsigned short rx_max; /* W Rx buffer size - 1 */
-
- volatile unsigned short tx_lw; /* W Tx buffer low water mark */
- volatile unsigned short rx_lw; /* W Rx buffer low water mark */
- volatile unsigned short rx_hw; /* W Rx buffer high water mark */
- volatile unsigned short incr; /* W Increment to next channel */
-
- volatile unsigned short fepdev; /* U SCC device base address */
- volatile unsigned short edelay; /* W Exception delay */
- volatile unsigned short blen; /* W Break length */
- volatile unsigned short btime; /* U Break complete time */
-
- volatile unsigned short iflag; /* C UNIX input flags */
- volatile unsigned short oflag; /* C UNIX output flags */
- volatile unsigned short cflag; /* C UNIX control flags */
- volatile unsigned short wfill[13]; /* U Reserved for expansion */
-
- volatile unsigned char num; /* U Channel number */
- volatile unsigned char ract; /* U Receiver active counter */
- volatile unsigned char bstat; /* U Break status bits */
- volatile unsigned char tbusy; /* W Transmit busy */
- volatile unsigned char iempty; /* W Transmit empty event enable */
- volatile unsigned char ilow; /* W Transmit low-water event enable */
- volatile unsigned char idata; /* W Receive data interrupt enable */
- volatile unsigned char eflag; /* U Host event flags */
-
- volatile unsigned char tflag; /* U Transmit flags */
- volatile unsigned char rflag; /* U Receive flags */
- volatile unsigned char xmask; /* U Transmit ready flags */
- volatile unsigned char xval; /* U Transmit ready value */
- volatile unsigned char m_stat; /* RC Modem status bits */
- volatile unsigned char m_change; /* U Modem bits which changed */
- volatile unsigned char m_int; /* W Modem interrupt enable bits */
- volatile unsigned char m_last; /* U Last modem status */
-
- volatile unsigned char mtran; /* C Unreported modem trans */
- volatile unsigned char orun; /* C Buffer overrun occurred */
- volatile unsigned char astartc; /* W Auxiliary Xon char */
- volatile unsigned char astopc; /* W Auxiliary Xoff char */
- volatile unsigned char startc; /* W Xon character */
- volatile unsigned char stopc; /* W Xoff character */
- volatile unsigned char vnextc; /* W Vnext character */
- volatile unsigned char hflow; /* C Software flow control */
-
- volatile unsigned char fillc; /* U Delay Fill character */
- volatile unsigned char ochar; /* U Saved output character */
- volatile unsigned char omask; /* U Output character mask */
-
- volatile unsigned char bfill[13]; /* U Reserved for expansion */
-
- volatile unsigned char scc[16]; /* U SCC registers */
-};
-
-
-/************************************************************************
- * FEP supported functions
- ************************************************************************/
-#define SRLOW 0xe0 /* Set receive low water */
-#define SRHIGH 0xe1 /* Set receive high water */
-#define FLUSHTX 0xe2 /* Flush transmit buffer */
-#define PAUSETX 0xe3 /* Pause data transmission */
-#define RESUMETX 0xe4 /* Resume data transmission */
-#define SMINT 0xe5 /* Set Modem Interrupt */
-#define SAFLOWC 0xe6 /* Set Aux. flow control chars */
-#define SBREAK 0xe8 /* Send break */
-#define SMODEM 0xe9 /* Set 8530 modem control lines */
-#define SIFLAG 0xea /* Set UNIX iflags */
-#define SFLOWC 0xeb /* Set flow control characters */
-#define STLOW 0xec /* Set transmit low water mark */
-#define RPAUSE 0xee /* Pause receive */
-#define RRESUME 0xef /* Resume receive */
-#define CHRESET 0xf0 /* Reset Channel */
-#define BUFSETALL 0xf2 /* Set Tx & Rx buffer size avail*/
-#define SOFLAG 0xf3 /* Set UNIX oflags */
-#define SHFLOW 0xf4 /* Set hardware handshake */
-#define SCFLAG 0xf5 /* Set UNIX cflags */
-#define SVNEXT 0xf6 /* Set VNEXT character */
-#define SPINTFC 0xfc /* Reserved */
-#define SCOMMODE 0xfd /* Set RS232/422 mode */
-
-
-/************************************************************************
- * Modes for SCOMMODE
- ************************************************************************/
-#define MODE_232 0x00
-#define MODE_422 0x01
-
-
-/************************************************************************
- * Event flags.
- ************************************************************************/
-#define IFBREAK 0x01 /* Break received */
-#define IFTLW 0x02 /* Transmit low water */
-#define IFTEM 0x04 /* Transmitter empty */
-#define IFDATA 0x08 /* Receive data present */
-#define IFMODEM 0x20 /* Modem status change */
-
-/************************************************************************
- * Modem flags
- ************************************************************************/
-# define DM_RTS 0x02 /* Request to send */
-# define DM_CD 0x80 /* Carrier detect */
-# define DM_DSR 0x20 /* Data set ready */
-# define DM_CTS 0x10 /* Clear to send */
-# define DM_RI 0x40 /* Ring indicator */
-# define DM_DTR 0x01 /* Data terminal ready */
-
-
-#endif
diff --git a/drivers/staging/dgap/dgap_kcompat.h b/drivers/staging/dgap/dgap_kcompat.h
deleted file mode 100644
index 0dc2404922ff..000000000000
--- a/drivers/staging/dgap/dgap_kcompat.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright 2004 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- *************************************************************************
- *
- * This file is intended to contain all the kernel "differences" between the
- * various kernels that we support.
- *
- *************************************************************************/
-
-#ifndef __DGAP_KCOMPAT_H
-#define __DGAP_KCOMPAT_H
-
-#if !defined(TTY_FLIPBUF_SIZE)
-# define TTY_FLIPBUF_SIZE 512
-#endif
-
-
-/* Sparse stuff */
-# ifndef __user
-# define __user
-# define __kernel
-# define __safe
-# define __force
-# define __chk_user_ptr(x) (void)0
-# endif
-
-
-# define PARM_STR(VAR, INIT, PERM, DESC) \
- static char *VAR = INIT; \
- char *dgap_##VAR; \
- module_param(VAR, charp, PERM); \
- MODULE_PARM_DESC(VAR, DESC);
-
-# define PARM_INT(VAR, INIT, PERM, DESC) \
- static int VAR = INIT; \
- int dgap_##VAR; \
- module_param(VAR, int, PERM); \
- MODULE_PARM_DESC(VAR, DESC);
-
-# define PARM_ULONG(VAR, INIT, PERM, DESC) \
- static ulong VAR = INIT; \
- ulong dgap_##VAR; \
- module_param(VAR, long, PERM); \
- MODULE_PARM_DESC(VAR, DESC);
-
-#endif /* ! __DGAP_KCOMPAT_H */
diff --git a/drivers/staging/dgap/dgap_parse.c b/drivers/staging/dgap/dgap_parse.c
deleted file mode 100644
index 36fd93d3f5f6..000000000000
--- a/drivers/staging/dgap/dgap_parse.c
+++ /dev/null
@@ -1,1374 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
- *
- * This is shared code between Digi's CVS archive and the
- * Linux Kernel sources.
- * Changing the source just for reformatting needlessly breaks
- * our CVS diff history.
- *
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
- *
- *
- *****************************************************************************
- *
- * dgap_parse.c - Parses the configuration information from the input file.
- *
- * $Id: dgap_parse.c,v 1.1 2009/10/23 14:01:57 markh Exp $
- *
- */
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/slab.h>
-
-#include "dgap_types.h"
-#include "dgap_fep5.h"
-#include "dgap_driver.h"
-#include "dgap_parse.h"
-#include "dgap_conf.h"
-
-
-/*
- * Function prototypes.
- */
-static int dgap_gettok(char **in, struct cnode *p);
-static char *dgap_getword(char **in);
-static char *dgap_savestring(char *s);
-static struct cnode *dgap_newnode(int t);
-static int dgap_checknode(struct cnode *p);
-static void dgap_err(char *s);
-
-/*
- * Our needed internal static variables...
- */
-static struct cnode dgap_head;
-#define MAXCWORD 200
-static char dgap_cword[MAXCWORD];
-
-struct toklist {
- int token;
- char *string;
-};
-
-static struct toklist dgap_tlist[] = {
- { BEGIN, "config_begin" },
- { END, "config_end" },
- { BOARD, "board" },
- { PCX, "Digi_AccelePort_C/X_PCI" }, /* C/X_PCI */
- { PEPC, "Digi_AccelePort_EPC/X_PCI" }, /* EPC/X_PCI */
- { PPCM, "Digi_AccelePort_Xem_PCI" }, /* PCI/Xem */
- { APORT2_920P, "Digi_AccelePort_2r_920_PCI" },
- { APORT4_920P, "Digi_AccelePort_4r_920_PCI" },
- { APORT8_920P, "Digi_AccelePort_8r_920_PCI" },
- { PAPORT4, "Digi_AccelePort_4r_PCI(EIA-232/RS-422)" },
- { PAPORT8, "Digi_AccelePort_8r_PCI(EIA-232/RS-422)" },
- { IO, "io" },
- { PCIINFO, "pciinfo" },
- { LINE, "line" },
- { CONC, "conc" },
- { CONC, "concentrator" },
- { CX, "cx" },
- { CX, "ccon" },
- { EPC, "epccon" },
- { EPC, "epc" },
- { MOD, "module" },
- { ID, "id" },
- { STARTO, "start" },
- { SPEED, "speed" },
- { CABLE, "cable" },
- { CONNECT, "connect" },
- { METHOD, "method" },
- { STATUS, "status" },
- { CUSTOM, "Custom" },
- { BASIC, "Basic" },
- { MEM, "mem" },
- { MEM, "memory" },
- { PORTS, "ports" },
- { MODEM, "modem" },
- { NPORTS, "nports" },
- { TTYN, "ttyname" },
- { CU, "cuname" },
- { PRINT, "prname" },
- { CMAJOR, "major" },
- { ALTPIN, "altpin" },
- { USEINTR, "useintr" },
- { TTSIZ, "ttysize" },
- { CHSIZ, "chsize" },
- { BSSIZ, "boardsize" },
- { UNTSIZ, "schedsize" },
- { F2SIZ, "f2200size" },
- { VPSIZ, "vpixsize" },
- { 0, NULL }
-};
-
-
-/*
- * Parse a configuration file read into memory as a string.
- */
-int dgap_parsefile(char **in, int Remove)
-{
- struct cnode *p, *brd, *line, *conc;
- int rc;
- char *s = NULL, *s2 = NULL;
- int linecnt = 0;
-
- p = &dgap_head;
- brd = line = conc = NULL;
-
- /* perhaps we are adding to an existing list? */
- while (p->next != NULL) {
- p = p->next;
- }
-
- /* file must start with a BEGIN */
- while ( (rc = dgap_gettok(in,p)) != BEGIN ) {
- if (rc == 0) {
- dgap_err("unexpected EOF");
- return(-1);
- }
- }
-
- for (; ; ) {
- rc = dgap_gettok(in,p);
- if (rc == 0) {
- dgap_err("unexpected EOF");
- return(-1);
- }
-
- switch (rc) {
- case 0:
- dgap_err("unexpected end of file");
- return(-1);
-
- case BEGIN: /* should only be 1 begin */
- dgap_err("unexpected config_begin\n");
- return(-1);
-
- case END:
- return(0);
-
- case BOARD: /* board info */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(BNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
-
- p->u.board.status = dgap_savestring("No");
- line = conc = NULL;
- brd = p;
- linecnt = -1;
- break;
-
- case APORT2_920P: /* AccelePort_4 */
- if (p->type != BNODE) {
- dgap_err("unexpected Digi_2r_920 string");
- return(-1);
- }
- p->u.board.type = APORT2_920P;
- p->u.board.v_type = 1;
- DPR_INIT(("Adding Digi_2r_920 PCI to config...\n"));
- break;
-
- case APORT4_920P: /* AccelePort_4 */
- if (p->type != BNODE) {
- dgap_err("unexpected Digi_4r_920 string");
- return(-1);
- }
- p->u.board.type = APORT4_920P;
- p->u.board.v_type = 1;
- DPR_INIT(("Adding Digi_4r_920 PCI to config...\n"));
- break;
-
- case APORT8_920P: /* AccelePort_8 */
- if (p->type != BNODE) {
- dgap_err("unexpected Digi_8r_920 string");
- return(-1);
- }
- p->u.board.type = APORT8_920P;
- p->u.board.v_type = 1;
- DPR_INIT(("Adding Digi_8r_920 PCI to config...\n"));
- break;
-
- case PAPORT4: /* AccelePort_4 PCI */
- if (p->type != BNODE) {
- dgap_err("unexpected Digi_4r(PCI) string");
- return(-1);
- }
- p->u.board.type = PAPORT4;
- p->u.board.v_type = 1;
- DPR_INIT(("Adding Digi_4r PCI to config...\n"));
- break;
-
- case PAPORT8: /* AccelePort_8 PCI */
- if (p->type != BNODE) {
- dgap_err("unexpected Digi_8r string");
- return(-1);
- }
- p->u.board.type = PAPORT8;
- p->u.board.v_type = 1;
- DPR_INIT(("Adding Digi_8r PCI to config...\n"));
- break;
-
- case PCX: /* PCI C/X */
- if (p->type != BNODE) {
- dgap_err("unexpected Digi_C/X_(PCI) string");
- return(-1);
- }
- p->u.board.type = PCX;
- p->u.board.v_type = 1;
- p->u.board.conc1 = 0;
- p->u.board.conc2 = 0;
- p->u.board.module1 = 0;
- p->u.board.module2 = 0;
- DPR_INIT(("Adding PCI C/X to config...\n"));
- break;
-
- case PEPC: /* PCI EPC/X */
- if (p->type != BNODE) {
- dgap_err("unexpected \"Digi_EPC/X_(PCI)\" string");
- return(-1);
- }
- p->u.board.type = PEPC;
- p->u.board.v_type = 1;
- p->u.board.conc1 = 0;
- p->u.board.conc2 = 0;
- p->u.board.module1 = 0;
- p->u.board.module2 = 0;
- DPR_INIT(("Adding PCI EPC/X to config...\n"));
- break;
-
- case PPCM: /* PCI/Xem */
- if (p->type != BNODE) {
- dgap_err("unexpected PCI/Xem string");
- return(-1);
- }
- p->u.board.type = PPCM;
- p->u.board.v_type = 1;
- p->u.board.conc1 = 0;
- p->u.board.conc2 = 0;
- DPR_INIT(("Adding PCI XEM to config...\n"));
- break;
-
- case IO: /* i/o port */
- if (p->type != BNODE) {
- dgap_err("IO port only vaild for boards");
- return(-1);
- }
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.portstr = dgap_savestring(s);
- p->u.board.port = (short)simple_strtol(s, &s2, 0);
- if ((short)strlen(s) > (short)(s2 - s)) {
- dgap_err("bad number for IO port");
- return(-1);
- }
- p->u.board.v_port = 1;
- DPR_INIT(("Adding IO (%s) to config...\n", s));
- break;
-
- case MEM: /* memory address */
- if (p->type != BNODE) {
- dgap_err("memory address only vaild for boards");
- return(-1);
- }
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.addrstr = dgap_savestring(s);
- p->u.board.addr = simple_strtoul(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for memory address");
- return(-1);
- }
- p->u.board.v_addr = 1;
- DPR_INIT(("Adding MEM (%s) to config...\n", s));
- break;
-
- case PCIINFO: /* pci information */
- if (p->type != BNODE) {
- dgap_err("memory address only vaild for boards");
- return(-1);
- }
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.pcibusstr = dgap_savestring(s);
- p->u.board.pcibus = simple_strtoul(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for pci bus");
- return(-1);
- }
- p->u.board.v_pcibus = 1;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.pcislotstr = dgap_savestring(s);
- p->u.board.pcislot = simple_strtoul(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for pci slot");
- return(-1);
- }
- p->u.board.v_pcislot = 1;
-
- DPR_INIT(("Adding PCIINFO (%s %s) to config...\n", p->u.board.pcibusstr,
- p->u.board.pcislotstr));
- break;
-
- case METHOD:
- if (p->type != BNODE) {
- dgap_err("install method only vaild for boards");
- return(-1);
- }
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.method = dgap_savestring(s);
- p->u.board.v_method = 1;
- DPR_INIT(("Adding METHOD (%s) to config...\n", s));
- break;
-
- case STATUS:
- if (p->type != BNODE) {
- dgap_err("config status only vaild for boards");
- return(-1);
- }
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.status = dgap_savestring(s);
- DPR_INIT(("Adding STATUS (%s) to config...\n", s));
- break;
-
- case NPORTS: /* number of ports */
- if (p->type == BNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.nport = (char)simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for number of ports");
- return(-1);
- }
- p->u.board.v_nport = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.conc.nport = (char)simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for number of ports");
- return(-1);
- }
- p->u.conc.v_nport = 1;
- } else if (p->type == MNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.module.nport = (char)simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for number of ports");
- return(-1);
- }
- p->u.module.v_nport = 1;
- } else {
- dgap_err("nports only valid for concentrators or modules");
- return(-1);
- }
- DPR_INIT(("Adding NPORTS (%s) to config...\n", s));
- break;
-
- case ID: /* letter ID used in tty name */
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
-
- p->u.board.status = dgap_savestring(s);
-
- if (p->type == CNODE) {
- p->u.conc.id = dgap_savestring(s);
- p->u.conc.v_id = 1;
- } else if (p->type == MNODE) {
- p->u.module.id = dgap_savestring(s);
- p->u.module.v_id = 1;
- } else {
- dgap_err("id only valid for concentrators or modules");
- return(-1);
- }
- DPR_INIT(("Adding ID (%s) to config...\n", s));
- break;
-
- case STARTO: /* start offset of ID */
- if (p->type == BNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.start = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for start of tty count");
- return(-1);
- }
- p->u.board.v_start = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.conc.start = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for start of tty count");
- return(-1);
- }
- p->u.conc.v_start = 1;
- } else if (p->type == MNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.module.start = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for start of tty count");
- return(-1);
- }
- p->u.module.v_start = 1;
- } else {
- dgap_err("start only valid for concentrators or modules");
- return(-1);
- }
- DPR_INIT(("Adding START (%s) to config...\n", s));
- break;
-
- case TTYN: /* tty name prefix */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(TNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- if ( (s = dgap_getword(in)) == NULL ) {
- dgap_err("unexpeced end of file");
- return(-1);
- }
- if ( (p->u.ttyname = dgap_savestring(s)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- DPR_INIT(("Adding TTY (%s) to config...\n", s));
- break;
-
- case CU: /* cu name prefix */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(CUNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- if ( (s = dgap_getword(in)) == NULL ) {
- dgap_err("unexpeced end of file");
- return(-1);
- }
- if ( (p->u.cuname = dgap_savestring(s)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- DPR_INIT(("Adding CU (%s) to config...\n", s));
- break;
-
- case LINE: /* line information */
- if (dgap_checknode(p))
- return(-1);
- if (brd == NULL) {
- dgap_err("must specify board before line info");
- return(-1);
- }
- switch (brd->u.board.type) {
- case PPCM:
- dgap_err("line not vaild for PC/em");
- return(-1);
- }
- if ( (p->next = dgap_newnode(LNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- conc = NULL;
- line = p;
- linecnt++;
- DPR_INIT(("Adding LINE to config...\n"));
- break;
-
- case CONC: /* concentrator information */
- if (dgap_checknode(p))
- return(-1);
- if (line == NULL) {
- dgap_err("must specify line info before concentrator");
- return(-1);
- }
- if ( (p->next = dgap_newnode(CNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- conc = p;
- if (linecnt)
- brd->u.board.conc2++;
- else
- brd->u.board.conc1++;
-
- DPR_INIT(("Adding CONC to config...\n"));
- break;
-
- case CX: /* c/x type concentrator */
- if (p->type != CNODE) {
- dgap_err("cx only valid for concentrators");
- return(-1);
- }
- p->u.conc.type = CX;
- p->u.conc.v_type = 1;
- DPR_INIT(("Adding CX to config...\n"));
- break;
-
- case EPC: /* epc type concentrator */
- if (p->type != CNODE) {
- dgap_err("cx only valid for concentrators");
- return(-1);
- }
- p->u.conc.type = EPC;
- p->u.conc.v_type = 1;
- DPR_INIT(("Adding EPC to config...\n"));
- break;
-
- case MOD: /* EBI module */
- if (dgap_checknode(p))
- return(-1);
- if (brd == NULL) {
- dgap_err("must specify board info before EBI modules");
- return(-1);
- }
- switch (brd->u.board.type) {
- case PPCM:
- linecnt = 0;
- break;
- default:
- if (conc == NULL) {
- dgap_err("must specify concentrator info before EBI module");
- return(-1);
- }
- }
- if ( (p->next = dgap_newnode(MNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- if (linecnt)
- brd->u.board.module2++;
- else
- brd->u.board.module1++;
-
- DPR_INIT(("Adding MOD to config...\n"));
- break;
-
- case PORTS: /* ports type EBI module */
- if (p->type != MNODE) {
- dgap_err("ports only valid for EBI modules");
- return(-1);
- }
- p->u.module.type = PORTS;
- p->u.module.v_type = 1;
- DPR_INIT(("Adding PORTS to config...\n"));
- break;
-
- case MODEM: /* ports type EBI module */
- if (p->type != MNODE) {
- dgap_err("modem only valid for modem modules");
- return(-1);
- }
- p->u.module.type = MODEM;
- p->u.module.v_type = 1;
- DPR_INIT(("Adding MODEM to config...\n"));
- break;
-
- case CABLE:
- if (p->type == LNODE) {
- if ((s = dgap_getword(in)) == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.line.cable = dgap_savestring(s);
- p->u.line.v_cable = 1;
- }
- DPR_INIT(("Adding CABLE (%s) to config...\n", s));
- break;
-
- case SPEED: /* sync line speed indication */
- if (p->type == LNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.line.speed = (char)simple_strtol(s, &s2, 0);
- if ((short)strlen(s) > (short)(s2 - s)) {
- dgap_err("bad number for line speed");
- return(-1);
- }
- p->u.line.v_speed = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.conc.speed = (char)simple_strtol(s, &s2, 0);
- if ((short)strlen(s) > (short)(s2 - s)) {
- dgap_err("bad number for line speed");
- return(-1);
- }
- p->u.conc.v_speed = 1;
- } else {
- dgap_err("speed valid only for lines or concentrators.");
- return(-1);
- }
- DPR_INIT(("Adding SPEED (%s) to config...\n", s));
- break;
-
- case CONNECT:
- if (p->type == CNODE) {
- if ((s = dgap_getword(in)) == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.conc.connect = dgap_savestring(s);
- p->u.conc.v_connect = 1;
- }
- DPR_INIT(("Adding CONNECT (%s) to config...\n", s));
- break;
- case PRINT: /* transparent print name prefix */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(PNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- if ( (s = dgap_getword(in)) == NULL ) {
- dgap_err("unexpeced end of file");
- return(-1);
- }
- if ( (p->u.printname = dgap_savestring(s)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- DPR_INIT(("Adding PRINT (%s) to config...\n", s));
- break;
-
- case CMAJOR: /* major number */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(JNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.majornumber = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for major number");
- return(-1);
- }
- DPR_INIT(("Adding CMAJOR (%s) to config...\n", s));
- break;
-
- case ALTPIN: /* altpin setting */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(ANODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.altpin = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for altpin");
- return(-1);
- }
- DPR_INIT(("Adding ALTPIN (%s) to config...\n", s));
- break;
-
- case USEINTR: /* enable interrupt setting */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(INTRNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.useintr = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for useintr");
- return(-1);
- }
- DPR_INIT(("Adding USEINTR (%s) to config...\n", s));
- break;
-
- case TTSIZ: /* size of tty structure */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(TSNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.ttysize = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for ttysize");
- return(-1);
- }
- DPR_INIT(("Adding TTSIZ (%s) to config...\n", s));
- break;
-
- case CHSIZ: /* channel structure size */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(CSNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.chsize = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for chsize");
- return(-1);
- }
- DPR_INIT(("Adding CHSIZE (%s) to config...\n", s));
- break;
-
- case BSSIZ: /* board structure size */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(BSNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.bssize = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for bssize");
- return(-1);
- }
- DPR_INIT(("Adding BSSIZ (%s) to config...\n", s));
- break;
-
- case UNTSIZ: /* sched structure size */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(USNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.unsize = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for schedsize");
- return(-1);
- }
- DPR_INIT(("Adding UNTSIZ (%s) to config...\n", s));
- break;
-
- case F2SIZ: /* f2200 structure size */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(FSNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.f2size = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for f2200size");
- return(-1);
- }
- DPR_INIT(("Adding F2SIZ (%s) to config...\n", s));
- break;
-
- case VPSIZ: /* vpix structure size */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(VSNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.vpixsize = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for vpixsize");
- return(-1);
- }
- DPR_INIT(("Adding VPSIZ (%s) to config...\n", s));
- break;
- }
- }
-}
-
-
-/*
- * dgap_sindex: much like index(), but it looks for a match of any character in
- * the group, and returns that position. If the first character is a ^, then
- * this will match the first occurrence not in that group.
- */
-static char *dgap_sindex (char *string, char *group)
-{
- char *ptr;
-
- if (!string || !group)
- return (char *) NULL;
-
- if (*group == '^') {
- group++;
- for (; *string; string++) {
- for (ptr = group; *ptr; ptr++) {
- if (*ptr == *string)
- break;
- }
- if (*ptr == '\0')
- return string;
- }
- }
- else {
- for (; *string; string++) {
- for (ptr = group; *ptr; ptr++) {
- if (*ptr == *string)
- return string;
- }
- }
- }
-
- return (char *) NULL;
-}
-
-
-/*
- * Get a token from the input file; return 0 if end of file is reached
- */
-static int dgap_gettok(char **in, struct cnode *p)
-{
- char *w;
- struct toklist *t;
-
- if (strstr(dgap_cword, "boar")) {
- w = dgap_getword(in);
- snprintf(dgap_cword, MAXCWORD, "%s", w);
- for (t = dgap_tlist; t->token != 0; t++) {
- if ( !strcmp(w, t->string)) {
- return(t->token);
- }
- }
- dgap_err("board !!type not specified");
- return(1);
- }
- else {
- while ( (w = dgap_getword(in)) != NULL ) {
- snprintf(dgap_cword, MAXCWORD, "%s", w);
- for (t = dgap_tlist; t->token != 0; t++) {
- if ( !strcmp(w, t->string) )
- return(t->token);
- }
- }
- return(0);
- }
-}
-
-
-/*
- * get a word from the input stream, also keep track of current line number.
- * words are separated by whitespace.
- */
-static char *dgap_getword(char **in)
-{
- char *ret_ptr = *in;
-
- char *ptr = dgap_sindex(*in, " \t\n");
-
- /* If no word found, return null */
- if (!ptr)
- return NULL;
-
- /* Mark new location for our buffer */
- *ptr = '\0';
- *in = ptr + 1;
-
- /* Eat any extra spaces/tabs/newlines that might be present */
- while (*in && **in && ((**in == ' ') || (**in == '\t') || (**in == '\n'))) {
- **in = '\0';
- *in = *in + 1;
- }
-
- return ret_ptr;
-}
-
-
-/*
- * print an error message, giving the line number in the file where
- * the error occurred.
- */
-static void dgap_err(char *s)
-{
- printk("DGAP: parse: %s\n", s);
-}
-
-
-/*
- * allocate a new configuration node of type t
- */
-static struct cnode *dgap_newnode(int t)
-{
- struct cnode *n;
-
- n = kmalloc(sizeof(struct cnode), GFP_ATOMIC);
- if (n != NULL) {
- memset((char *)n, 0, sizeof(struct cnode));
- n->type = t;
- }
- return(n);
-}
-
-
-/*
- * dgap_checknode: see if all the necessary info has been supplied for a node
- * before creating the next node.
- */
-static int dgap_checknode(struct cnode *p)
-{
- switch (p->type) {
- case BNODE:
- if (p->u.board.v_type == 0) {
- dgap_err("board type !not specified");
- return(1);
- }
-
- return(0);
-
- case LNODE:
- if (p->u.line.v_speed == 0) {
- dgap_err("line speed not specified");
- return(1);
- }
- return(0);
-
- case CNODE:
- if (p->u.conc.v_type == 0) {
- dgap_err("concentrator type not specified");
- return(1);
- }
- if (p->u.conc.v_speed == 0) {
- dgap_err("concentrator line speed not specified");
- return(1);
- }
- if (p->u.conc.v_nport == 0) {
- dgap_err("number of ports on concentrator not specified");
- return(1);
- }
- if (p->u.conc.v_id == 0) {
- dgap_err("concentrator id letter not specified");
- return(1);
- }
- return(0);
-
- case MNODE:
- if (p->u.module.v_type == 0) {
- dgap_err("EBI module type not specified");
- return(1);
- }
- if (p->u.module.v_nport == 0) {
- dgap_err("number of ports on EBI module not specified");
- return(1);
- }
- if (p->u.module.v_id == 0) {
- dgap_err("EBI module id letter not specified");
- return(1);
- }
- return(0);
- }
- return(0);
-}
-
-/*
- * save a string somewhere
- */
-static char *dgap_savestring(char *s)
-{
- char *p;
- if ( (p = kmalloc(strlen(s) + 1, GFP_ATOMIC) ) != NULL) {
- strcpy(p, s);
- }
- return(p);
-}
-
-
-/*
- * Given a board pointer, returns whether we should use interrupts or not.
- */
-uint dgap_config_get_useintr(struct board_t *bd)
-{
- struct cnode *p = NULL;
-
- if (!bd)
- return(0);
-
- for (p = bd->bd_config; p; p = p->next) {
- switch (p->type) {
- case INTRNODE:
- /*
- * check for pcxr types.
- */
- return p->u.useintr;
- default:
- break;
- }
- }
-
- /* If not found, then don't turn on interrupts. */
- return 0;
-}
-
-
-/*
- * Given a board pointer, returns whether we turn on altpin or not.
- */
-uint dgap_config_get_altpin(struct board_t *bd)
-{
- struct cnode *p = NULL;
-
- if (!bd)
- return(0);
-
- for (p = bd->bd_config; p; p = p->next) {
- switch (p->type) {
- case ANODE:
- /*
- * check for pcxr types.
- */
- return p->u.altpin;
- default:
- break;
- }
- }
-
- /* If not found, then don't turn on interrupts. */
- return 0;
-}
-
-
-
-/*
- * Given a specific type of board, if found, detached link and
- * returns the first occurrence in the list.
- */
-struct cnode *dgap_find_config(int type, int bus, int slot)
-{
- struct cnode *p, *prev = NULL, *prev2 = NULL, *found = NULL;
-
- p = &dgap_head;
-
- while (p->next != NULL) {
- prev = p;
- p = p->next;
-
- if (p->type == BNODE) {
-
- if (p->u.board.type == type) {
-
- if (p->u.board.v_pcibus && p->u.board.pcibus != bus) {
- DPR(("Found matching board, but wrong bus position. System says bus %d, we want bus %ld\n",
- bus, p->u.board.pcibus));
- continue;
- }
- if (p->u.board.v_pcislot && p->u.board.pcislot != slot) {
- DPR_INIT(("Found matching board, but wrong slot position. System says slot %d, we want slot %ld\n",
- slot, p->u.board.pcislot));
- continue;
- }
-
- DPR_INIT(("Matched type in config file\n"));
-
- found = p;
- /*
- * Keep walking thru the list till we find the next board.
- */
- while (p->next != NULL) {
- prev2 = p;
- p = p->next;
- if (p->type == BNODE) {
-
- /*
- * Mark the end of our 1 board chain of configs.
- */
- prev2->next = NULL;
-
- /*
- * Link the "next" board to the previous board,
- * effectively "unlinking" our board from the main config.
- */
- prev->next = p;
-
- return found;
- }
- }
- /*
- * It must be the last board in the list.
- */
- prev->next = NULL;
- return found;
- }
- }
- }
- return NULL;
-}
-
-/*
- * Given a board pointer, walks the config link, counting up
- * all ports user specified should be on the board.
- * (This does NOT mean they are all actually present right now tho)
- */
-uint dgap_config_get_number_of_ports(struct board_t *bd)
-{
- int count = 0;
- struct cnode *p = NULL;
-
- if (!bd)
- return(0);
-
- for (p = bd->bd_config; p; p = p->next) {
-
- switch (p->type) {
- case BNODE:
- /*
- * check for pcxr types.
- */
- if (p->u.board.type > EPCFE)
- count += p->u.board.nport;
- break;
- case CNODE:
- count += p->u.conc.nport;
- break;
- case MNODE:
- count += p->u.module.nport;
- break;
- }
- }
- return (count);
-}
-
-char *dgap_create_config_string(struct board_t *bd, char *string)
-{
- char *ptr = string;
- struct cnode *p = NULL;
- struct cnode *q = NULL;
- int speed;
-
- if (!bd) {
- *ptr = 0xff;
- return string;
- }
-
- for (p = bd->bd_config; p; p = p->next) {
-
- switch (p->type) {
- case LNODE:
- *ptr = '\0';
- ptr++;
- *ptr = p->u.line.speed;
- ptr++;
- break;
- case CNODE:
- /*
- * Because the EPC/con concentrators can have EM modules
- * hanging off of them, we have to walk ahead in the list
- * and keep adding the number of ports on each EM to the config.
- * UGH!
- */
- speed = p->u.conc.speed;
- q = p->next;
- if ((q != NULL) && (q->type == MNODE) ) {
- *ptr = (p->u.conc.nport + 0x80);
- ptr++;
- p = q;
- while ((q->next != NULL) && (q->next->type) == MNODE) {
- *ptr = (q->u.module.nport + 0x80);
- ptr++;
- p = q;
- q = q->next;
- }
- *ptr = q->u.module.nport;
- ptr++;
- } else {
- *ptr = p->u.conc.nport;
- ptr++;
- }
-
- *ptr = speed;
- ptr++;
- break;
- }
- }
-
- *ptr = 0xff;
- return string;
-}
-
-
-
-char *dgap_get_config_letters(struct board_t *bd, char *string)
-{
- int found = FALSE;
- char *ptr = string;
- struct cnode *cptr = NULL;
- int len = 0;
- int left = MAXTTYNAMELEN;
-
- if (!bd) {
- return "<NULL>";
- }
-
- for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
-
- if ((cptr->type == BNODE) &&
- ((cptr->u.board.type == APORT2_920P) || (cptr->u.board.type == APORT4_920P) ||
- (cptr->u.board.type == APORT8_920P) || (cptr->u.board.type == PAPORT4) ||
- (cptr->u.board.type == PAPORT8))) {
-
- found = TRUE;
- }
-
- if (cptr->type == TNODE && found == TRUE) {
- char *ptr1;
- if (strstr(cptr->u.ttyname, "tty")) {
- ptr1 = cptr->u.ttyname;
- ptr1 += 3;
- }
- else {
- ptr1 = cptr->u.ttyname;
- }
- if (ptr1) {
- len = snprintf(ptr, left, "%s", ptr1);
- left -= len;
- ptr += len;
- if (left <= 0)
- break;
- }
- }
-
- if (cptr->type == CNODE) {
- if (cptr->u.conc.id) {
- len = snprintf(ptr, left, "%s", cptr->u.conc.id);
- left -= len;
- ptr += len;
- if (left <= 0)
- break;
- }
- }
-
- if (cptr->type == MNODE) {
- if (cptr->u.module.id) {
- len = snprintf(ptr, left, "%s", cptr->u.module.id);
- left -= len;
- ptr += len;
- if (left <= 0)
- break;
- }
- }
- }
-
- return string;
-}
diff --git a/drivers/staging/dgap/dgap_parse.h b/drivers/staging/dgap/dgap_parse.h
deleted file mode 100644
index 8128c47343cb..000000000000
--- a/drivers/staging/dgap/dgap_parse.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-#ifndef _DGAP_PARSE_H
-#define _DGAP_PARSE_H
-
-#include "dgap_driver.h"
-
-extern int dgap_parsefile(char **in, int Remove);
-extern struct cnode *dgap_find_config(int type, int bus, int slot);
-extern uint dgap_config_get_number_of_ports(struct board_t *bd);
-extern char *dgap_create_config_string(struct board_t *bd, char *string);
-extern char *dgap_get_config_letters(struct board_t *bd, char *string);
-extern uint dgap_config_get_useintr(struct board_t *bd);
-extern uint dgap_config_get_altpin(struct board_t *bd);
-
-#endif
diff --git a/drivers/staging/dgap/dgap_pci.h b/drivers/staging/dgap/dgap_pci.h
deleted file mode 100644
index 05ed374f08e1..000000000000
--- a/drivers/staging/dgap/dgap_pci.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-/* $Id: dgap_pci.h,v 1.1 2009/10/23 14:01:57 markh Exp $ */
-
-#ifndef __DGAP_PCI_H
-#define __DGAP_PCI_H
-
-#define PCIMAX 32 /* maximum number of PCI boards */
-
-#define DIGI_VID 0x114F
-
-#define PCI_DEVICE_EPC_DID 0x0002
-#define PCI_DEVICE_XEM_DID 0x0004
-#define PCI_DEVICE_XR_DID 0x0005
-#define PCI_DEVICE_CX_DID 0x0006
-#define PCI_DEVICE_XRJ_DID 0x0009 /* PLX-based Xr adapter */
-#define PCI_DEVICE_XR_IBM_DID 0x0011 /* IBM 8-port Async Adapter */
-#define PCI_DEVICE_XR_BULL_DID 0x0013 /* BULL 8-port Async Adapter */
-#define PCI_DEVICE_XR_SAIP_DID 0x001c /* SAIP card - Xr adapter */
-#define PCI_DEVICE_XR_422_DID 0x0012 /* Xr-422 */
-#define PCI_DEVICE_920_2_DID 0x0034 /* XR-Plus 920 K, 2 port */
-#define PCI_DEVICE_920_4_DID 0x0026 /* XR-Plus 920 K, 4 port */
-#define PCI_DEVICE_920_8_DID 0x0027 /* XR-Plus 920 K, 8 port */
-#define PCI_DEVICE_EPCJ_DID 0x000a /* PLX 9060 chip for PCI */
-#define PCI_DEVICE_CX_IBM_DID 0x001b /* IBM 128-port Async Adapter */
-#define PCI_DEVICE_920_8_HP_DID 0x0058 /* HP XR-Plus 920 K, 8 port */
-#define PCI_DEVICE_XEM_HP_DID 0x0059 /* HP Xem PCI */
-
-#define PCI_DEVICE_XEM_NAME "AccelePort XEM"
-#define PCI_DEVICE_CX_NAME "AccelePort CX"
-#define PCI_DEVICE_XR_NAME "AccelePort Xr"
-#define PCI_DEVICE_XRJ_NAME "AccelePort Xr (PLX)"
-#define PCI_DEVICE_XR_SAIP_NAME "AccelePort Xr (SAIP)"
-#define PCI_DEVICE_920_2_NAME "AccelePort Xr920 2 port"
-#define PCI_DEVICE_920_4_NAME "AccelePort Xr920 4 port"
-#define PCI_DEVICE_920_8_NAME "AccelePort Xr920 8 port"
-#define PCI_DEVICE_XR_422_NAME "AccelePort Xr 422"
-#define PCI_DEVICE_EPCJ_NAME "AccelePort EPC (PLX)"
-#define PCI_DEVICE_XR_BULL_NAME "AccelePort Xr (BULL)"
-#define PCI_DEVICE_XR_IBM_NAME "AccelePort Xr (IBM)"
-#define PCI_DEVICE_CX_IBM_NAME "AccelePort CX (IBM)"
-#define PCI_DEVICE_920_8_HP_NAME "AccelePort Xr920 8 port (HP)"
-#define PCI_DEVICE_XEM_HP_NAME "AccelePort XEM (HP)"
-
-
-/*
- * On the PCI boards, there is no IO space allocated
- * The I/O registers will be in the first 3 bytes of the
- * upper 2MB of the 4MB memory space. The board memory
- * will be mapped into the low 2MB of the 4MB memory space
- */
-
-/* Potential location of PCI Bios from E0000 to FFFFF*/
-#define PCI_BIOS_SIZE 0x00020000
-
-/* Size of Memory and I/O for PCI (4MB) */
-#define PCI_RAM_SIZE 0x00400000
-
-/* Size of Memory (2MB) */
-#define PCI_MEM_SIZE 0x00200000
-
-/* Max PCI Window Size (2MB) */
-#define PCI_WIN_SIZE 0x00200000
-
-#define PCI_WIN_SHIFT 21 /* 21 bits max */
-
-/* Offset of I/0 in Memory (2MB) */
-#define PCI_IO_OFFSET 0x00200000
-
-/* Size of IO (2MB) */
-#define PCI_IO_SIZE 0x00200000
-
-#endif
diff --git a/drivers/staging/dgap/dgap_sysfs.c b/drivers/staging/dgap/dgap_sysfs.c
deleted file mode 100644
index 7f4ec9a18293..000000000000
--- a/drivers/staging/dgap/dgap_sysfs.c
+++ /dev/null
@@ -1,793 +0,0 @@
-/*
- * Copyright 2004 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
- *
- * This is shared code between Digi's CVS archive and the
- * Linux Kernel sources.
- * Changing the source just for reformatting needlessly breaks
- * our CVS diff history.
- *
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
- *
- *
- *
- * $Id: dgap_sysfs.c,v 1.1 2009/10/23 14:01:57 markh Exp $
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/serial_reg.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-
-#include "dgap_driver.h"
-#include "dgap_conf.h"
-#include "dgap_parse.h"
-
-
-static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
-}
-static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
-
-
-static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", dgap_NumBoards);
-}
-static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
-
-
-static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
-}
-static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
-
-
-static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
-}
-static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
-
-
-static ssize_t dgap_driver_state_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", dgap_driver_state_text[dgap_driver_state]);
-}
-static DRIVER_ATTR(state, S_IRUSR, dgap_driver_state_show, NULL);
-
-
-static ssize_t dgap_driver_debug_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%x\n", dgap_debug);
-}
-
-static ssize_t dgap_driver_debug_store(struct device_driver *ddp, const char *buf, size_t count)
-{
- sscanf(buf, "0x%x\n", &dgap_debug);
- return count;
-}
-static DRIVER_ATTR(debug, (S_IRUSR | S_IWUSR), dgap_driver_debug_show, dgap_driver_debug_store);
-
-
-static ssize_t dgap_driver_rawreadok_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%x\n", dgap_rawreadok);
-}
-
-static ssize_t dgap_driver_rawreadok_store(struct device_driver *ddp, const char *buf, size_t count)
-{
- sscanf(buf, "0x%x\n", &dgap_rawreadok);
- return count;
-}
-static DRIVER_ATTR(rawreadok, (S_IRUSR | S_IWUSR), dgap_driver_rawreadok_show, dgap_driver_rawreadok_store);
-
-
-static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
-}
-
-static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp, const char *buf, size_t count)
-{
- sscanf(buf, "%d\n", &dgap_poll_tick);
- return count;
-}
-static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show, dgap_driver_pollrate_store);
-
-
-void dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
-{
- int rc = 0;
- struct device_driver *driverfs = &dgap_driver->driver;
-
- rc |= driver_create_file(driverfs, &driver_attr_version);
- rc |= driver_create_file(driverfs, &driver_attr_boards);
- rc |= driver_create_file(driverfs, &driver_attr_maxboards);
- rc |= driver_create_file(driverfs, &driver_attr_debug);
- rc |= driver_create_file(driverfs, &driver_attr_rawreadok);
- rc |= driver_create_file(driverfs, &driver_attr_pollrate);
- rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
- rc |= driver_create_file(driverfs, &driver_attr_state);
- if (rc) {
- printk(KERN_ERR "DGAP: sysfs driver_create_file failed!\n");
- }
-}
-
-
-void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
-{
- struct device_driver *driverfs = &dgap_driver->driver;
- driver_remove_file(driverfs, &driver_attr_version);
- driver_remove_file(driverfs, &driver_attr_boards);
- driver_remove_file(driverfs, &driver_attr_maxboards);
- driver_remove_file(driverfs, &driver_attr_debug);
- driver_remove_file(driverfs, &driver_attr_rawreadok);
- driver_remove_file(driverfs, &driver_attr_pollrate);
- driver_remove_file(driverfs, &driver_attr_pollcounter);
- driver_remove_file(driverfs, &driver_attr_state);
-}
-
-
-#define DGAP_VERIFY_BOARD(p, bd) \
- if (!p) \
- return (0); \
- \
- bd = dev_get_drvdata(p); \
- if (!bd || bd->magic != DGAP_BOARD_MAGIC) \
- return (0); \
- if (bd->state != BOARD_READY) \
- return (0); \
-
-
-static ssize_t dgap_ports_state_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s\n", bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_open_count ? "Open" : "Closed");
- }
- return count;
-}
-static DEVICE_ATTR(ports_state, S_IRUSR, dgap_ports_state_show, NULL);
-
-
-static ssize_t dgap_ports_baud_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %d\n", bd->channels[i]->ch_portnum, bd->channels[i]->ch_baud_info);
- }
- return count;
-}
-static DEVICE_ATTR(ports_baud, S_IRUSR, dgap_ports_baud_show, NULL);
-
-
-static ssize_t dgap_ports_msignals_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- if (bd->channels[i]->ch_open_count) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s %s %s %s %s %s\n", bd->channels[i]->ch_portnum,
- (bd->channels[i]->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (bd->channels[i]->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_RI) ? "RI" : "");
- } else {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d\n", bd->channels[i]->ch_portnum);
- }
- }
- return count;
-}
-static DEVICE_ATTR(ports_msignals, S_IRUSR, dgap_ports_msignals_show, NULL);
-
-
-static ssize_t dgap_ports_iflag_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_iflag);
- }
- return count;
-}
-static DEVICE_ATTR(ports_iflag, S_IRUSR, dgap_ports_iflag_show, NULL);
-
-
-static ssize_t dgap_ports_cflag_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_cflag);
- }
- return count;
-}
-static DEVICE_ATTR(ports_cflag, S_IRUSR, dgap_ports_cflag_show, NULL);
-
-
-static ssize_t dgap_ports_oflag_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_oflag);
- }
- return count;
-}
-static DEVICE_ATTR(ports_oflag, S_IRUSR, dgap_ports_oflag_show, NULL);
-
-
-static ssize_t dgap_ports_lflag_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_lflag);
- }
- return count;
-}
-static DEVICE_ATTR(ports_lflag, S_IRUSR, dgap_ports_lflag_show, NULL);
-
-
-static ssize_t dgap_ports_digi_flag_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_digi.digi_flags);
- }
- return count;
-}
-static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgap_ports_digi_flag_show, NULL);
-
-
-static ssize_t dgap_ports_rxcount_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_rxcount);
- }
- return count;
-}
-static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgap_ports_rxcount_show, NULL);
-
-
-static ssize_t dgap_ports_txcount_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_txcount);
- }
- return count;
-}
-static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL);
-
-
-/* this function creates the sys files that will export each signal status
- * to sysfs each value will be put in a separate filename
- */
-void dgap_create_ports_sysfiles(struct board_t *bd)
-{
- int rc = 0;
-
- dev_set_drvdata(&bd->pdev->dev, bd);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
- if (rc) {
- printk(KERN_ERR "DGAP: sysfs device_create_file failed!\n");
- }
-}
-
-
-/* removes all the sys files created for that port */
-void dgap_remove_ports_sysfiles(struct board_t *bd)
-{
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
-}
-
-
-static ssize_t dgap_tty_state_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ? "Open" : "Closed");
-}
-static DEVICE_ATTR(state, S_IRUSR, dgap_tty_state_show, NULL);
-
-
-static ssize_t dgap_tty_baud_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_baud_info);
-}
-static DEVICE_ATTR(baud, S_IRUSR, dgap_tty_baud_show, NULL);
-
-
-static ssize_t dgap_tty_msignals_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- if (ch->ch_open_count) {
- return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
- (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
- }
- return 0;
-}
-static DEVICE_ATTR(msignals, S_IRUSR, dgap_tty_msignals_show, NULL);
-
-
-static ssize_t dgap_tty_iflag_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
-}
-static DEVICE_ATTR(iflag, S_IRUSR, dgap_tty_iflag_show, NULL);
-
-
-static ssize_t dgap_tty_cflag_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
-}
-static DEVICE_ATTR(cflag, S_IRUSR, dgap_tty_cflag_show, NULL);
-
-
-static ssize_t dgap_tty_oflag_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
-}
-static DEVICE_ATTR(oflag, S_IRUSR, dgap_tty_oflag_show, NULL);
-
-
-static ssize_t dgap_tty_lflag_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
-}
-static DEVICE_ATTR(lflag, S_IRUSR, dgap_tty_lflag_show, NULL);
-
-
-static ssize_t dgap_tty_digi_flag_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
-}
-static DEVICE_ATTR(digi_flag, S_IRUSR, dgap_tty_digi_flag_show, NULL);
-
-
-static ssize_t dgap_tty_rxcount_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
-}
-static DEVICE_ATTR(rxcount, S_IRUSR, dgap_tty_rxcount_show, NULL);
-
-
-static ssize_t dgap_tty_txcount_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
-}
-static DEVICE_ATTR(txcount, S_IRUSR, dgap_tty_txcount_show, NULL);
-
-
-static ssize_t dgap_tty_name_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int cn;
- int bn;
- struct cnode *cptr = NULL;
- int found = FALSE;
- int ncount = 0;
- int starto = 0;
- int i = 0;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- bn = bd->boardnum;
- cn = ch->ch_portnum;
-
- for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
-
- if ((cptr->type == BNODE) &&
- ((cptr->u.board.type == APORT2_920P) || (cptr->u.board.type == APORT4_920P) ||
- (cptr->u.board.type == APORT8_920P) || (cptr->u.board.type == PAPORT4) ||
- (cptr->u.board.type == PAPORT8))) {
-
- found = TRUE;
- if (cptr->u.board.v_start)
- starto = cptr->u.board.start;
- else
- starto = 1;
- }
-
- if (cptr->type == TNODE && found == TRUE) {
- char *ptr1;
- if (strstr(cptr->u.ttyname, "tty")) {
- ptr1 = cptr->u.ttyname;
- ptr1 += 3;
- }
- else {
- ptr1 = cptr->u.ttyname;
- }
-
- for (i = 0; i < dgap_config_get_number_of_ports(bd); i++) {
- if (cn == i) {
- return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
- (un->un_type == DGAP_PRINT) ? "pr" : "tty",
- ptr1, i + starto);
- }
- }
- }
-
- if (cptr->type == CNODE) {
-
- for (i = 0; i < cptr->u.conc.nport; i++) {
- if (cn == (i + ncount)) {
-
- return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
- (un->un_type == DGAP_PRINT) ? "pr" : "tty",
- cptr->u.conc.id,
- i + (cptr->u.conc.v_start ? cptr->u.conc.start : 1));
- }
- }
-
- ncount += cptr->u.conc.nport;
- }
-
- if (cptr->type == MNODE) {
-
- for (i = 0; i < cptr->u.module.nport; i++) {
- if (cn == (i + ncount)) {
- return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
- (un->un_type == DGAP_PRINT) ? "pr" : "tty",
- cptr->u.module.id,
- i + (cptr->u.module.v_start ? cptr->u.module.start : 1));
- }
- }
-
- ncount += cptr->u.module.nport;
-
- }
- }
-
- return snprintf(buf, PAGE_SIZE, "%s_dgap_%d_%d\n",
- (un->un_type == DGAP_PRINT) ? "pr" : "tty", bn, cn);
-
-}
-static DEVICE_ATTR(custom_name, S_IRUSR, dgap_tty_name_show, NULL);
-
-
-static struct attribute *dgap_sysfs_tty_entries[] = {
- &dev_attr_state.attr,
- &dev_attr_baud.attr,
- &dev_attr_msignals.attr,
- &dev_attr_iflag.attr,
- &dev_attr_cflag.attr,
- &dev_attr_oflag.attr,
- &dev_attr_lflag.attr,
- &dev_attr_digi_flag.attr,
- &dev_attr_rxcount.attr,
- &dev_attr_txcount.attr,
- &dev_attr_custom_name.attr,
- NULL
-};
-
-
-static struct attribute_group dgap_tty_attribute_group = {
- .name = NULL,
- .attrs = dgap_sysfs_tty_entries,
-};
-
-
-
-
-void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
-{
- int ret;
-
- ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
- if (ret) {
- printk(KERN_ERR "dgap: failed to create sysfs tty device attributes.\n");
- sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
- return;
- }
-
- dev_set_drvdata(c, un);
-
-}
-
-
-void dgap_remove_tty_sysfs(struct device *c)
-{
- sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
-}
diff --git a/drivers/staging/dgap/dgap_sysfs.h b/drivers/staging/dgap/dgap_sysfs.h
deleted file mode 100644
index dde690eec5cf..000000000000
--- a/drivers/staging/dgap/dgap_sysfs.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-#ifndef __DGAP_SYSFS_H
-#define __DGAP_SYSFS_H
-
-#include "dgap_driver.h"
-
-#include <linux/device.h>
-
-struct board_t;
-struct channel_t;
-struct un_t;
-struct pci_driver;
-struct class_device;
-
-extern void dgap_create_ports_sysfiles(struct board_t *bd);
-extern void dgap_remove_ports_sysfiles(struct board_t *bd);
-
-extern void dgap_create_driver_sysfiles(struct pci_driver *);
-extern void dgap_remove_driver_sysfiles(struct pci_driver *);
-
-extern int dgap_tty_class_init(void);
-extern int dgap_tty_class_destroy(void);
-
-extern void dgap_create_tty_sysfs(struct un_t *un, struct device *c);
-extern void dgap_remove_tty_sysfs(struct device *c);
-
-
-#endif
diff --git a/drivers/staging/dgap/dgap_trace.c b/drivers/staging/dgap/dgap_trace.c
deleted file mode 100644
index a53db9e0a577..000000000000
--- a/drivers/staging/dgap/dgap_trace.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
- *
- * This is shared code between Digi's CVS archive and the
- * Linux Kernel sources.
- * Changing the source just for reformatting needlessly breaks
- * our CVS diff history.
- *
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
- *
- */
-
-/* $Id: dgap_trace.c,v 1.1 2009/10/23 14:01:57 markh Exp $ */
-
-#include <linux/kernel.h>
-#include <linux/sched.h> /* For jiffies, task states */
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
-#include <linux/vmalloc.h>
-
-#include "dgap_driver.h"
-#include "dgap_trace.h"
-
-#define TRC_TO_CONSOLE 1
-
-/* file level globals */
-static char *dgap_trcbuf; /* the ringbuffer */
-
-#if defined(TRC_TO_KMEM)
-static int dgap_trcbufi = 0; /* index of the tilde at the end of */
-#endif
-
-extern int dgap_trcbuf_size; /* size of the ringbuffer */
-
-#if defined(TRC_TO_KMEM)
-static DEFINE_SPINLOCK(dgap_tracef_lock);
-#endif
-
-#if 0
-
-#if !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE)
-void dgap_tracef(const char *fmt, ...)
-{
- return;
-}
-
-#else /* !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE) */
-
-void dgap_tracef(const char *fmt, ...)
-{
- va_list ap;
- char buf[TRC_MAXMSG+1];
- size_t lenbuf;
- int i;
- static int failed = FALSE;
-# if defined(TRC_TO_KMEM)
- unsigned long flags;
-#endif
-
- if(failed)
- return;
-# if defined(TRC_TO_KMEM)
- DGAP_LOCK(dgap_tracef_lock, flags);
-#endif
-
- /* Format buf using fmt and arguments contained in ap. */
- va_start(ap, fmt);
- i = vsprintf(buf, fmt, ap);
- va_end(ap);
- lenbuf = strlen(buf);
-
-# if defined(TRC_TO_KMEM)
- {
- static int initd=0;
-
- /*
- * Now, in addition to (or instead of) printing this stuff out
- * (which is a buffered operation), also tuck it away into a
- * corner of memory which can be examined post-crash in kdb.
- */
- if (!initd) {
- dgap_trcbuf = (char *) vmalloc(dgap_trcbuf_size);
- if(!dgap_trcbuf) {
- failed = TRUE;
- printk("dgap: tracing init failed!\n");
- return;
- }
-
- memset(dgap_trcbuf, '\0', dgap_trcbuf_size);
- dgap_trcbufi = 0;
- initd++;
-
- printk("dgap: tracing enabled - " TRC_DTRC
- " 0x%lx 0x%x\n",
- (unsigned long)dgap_trcbuf,
- dgap_trcbuf_size);
- }
-
-# if defined(TRC_ON_OVERFLOW_WRAP_AROUND)
- /*
- * This is the less CPU-intensive way to do things. We simply
- * wrap around before we fall off the end of the buffer. A
- * tilde (~) demarcates the current end of the trace.
- *
- * This method should be used if you are concerned about race
- * conditions as it is less likely to affect the timing of
- * things.
- */
-
- if (dgap_trcbufi + lenbuf >= dgap_trcbuf_size) {
- /* We are wrapping, so wipe out the last tilde. */
- dgap_trcbuf[dgap_trcbufi] = '\0';
- /* put the new string at the beginning of the buffer */
- dgap_trcbufi = 0;
- }
-
- strcpy(&dgap_trcbuf[dgap_trcbufi], buf);
- dgap_trcbufi += lenbuf;
- dgap_trcbuf[dgap_trcbufi] = '~';
-
-# elif defined(TRC_ON_OVERFLOW_SHIFT_BUFFER)
- /*
- * This is the more CPU-intensive way to do things. If we
- * venture into the last 1/8 of the buffer, we shift the
- * last 7/8 of the buffer forward, wiping out the first 1/8.
- * Advantage: No wrap-around, only truncation from the
- * beginning.
- *
- * This method should not be used if you are concerned about
- * timing changes affecting the behaviour of the driver (ie,
- * race conditions).
- */
- strcpy(&dgap_trcbuf[dgap_trcbufi], buf);
- dgap_trcbufi += lenbuf;
- dgap_trcbuf[dgap_trcbufi] = '~';
- dgap_trcbuf[dgap_trcbufi+1] = '\0';
-
- /* If we're near the end of the trace buffer... */
- if (dgap_trcbufi > (dgap_trcbuf_size/8)*7) {
- /* Wipe out the first eighth to make some more room. */
- strcpy(dgap_trcbuf, &dgap_trcbuf[dgap_trcbuf_size/8]);
- dgap_trcbufi = strlen(dgap_trcbuf)-1;
- /* Plop overflow message at the top of the buffer. */
- bcopy(TRC_OVERFLOW, dgap_trcbuf, strlen(TRC_OVERFLOW));
- }
-# else
-# error "TRC_ON_OVERFLOW_WRAP_AROUND or TRC_ON_OVERFLOW_SHIFT_BUFFER?"
-# endif
- }
- DGAP_UNLOCK(dgap_tracef_lock, flags);
-
-# endif /* defined(TRC_TO_KMEM) */
-}
-
-#endif /* !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE) */
-
-#endif
-
-/*
- * dgap_tracer_free()
- *
- *
- */
-void dgap_tracer_free(void)
-{
- if(dgap_trcbuf)
- vfree(dgap_trcbuf);
-}
diff --git a/drivers/staging/dgap/dgap_trace.h b/drivers/staging/dgap/dgap_trace.h
deleted file mode 100644
index b21f46198e71..000000000000
--- a/drivers/staging/dgap/dgap_trace.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- *****************************************************************************
- * Header file for dgap_trace.c
- *
- * $Id: dgap_trace.h,v 1.1 2009/10/23 14:01:57 markh Exp $
- */
-
-#ifndef __DGAP_TRACE_H
-#define __DGAP_TRACE_H
-
-#include "dgap_driver.h"
-
-void dgap_tracef(const char *fmt, ...);
-void dgap_tracer_free(void);
-
-#endif
-
diff --git a/drivers/staging/dgap/dgap_tty.c b/drivers/staging/dgap/dgap_tty.c
deleted file mode 100644
index 39fb4dfb8b7e..000000000000
--- a/drivers/staging/dgap/dgap_tty.c
+++ /dev/null
@@ -1,3580 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
- *
- * This is shared code between Digi's CVS archive and the
- * Linux Kernel sources.
- * Changing the source just for reformatting needlessly breaks
- * our CVS diff history.
- *
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
- */
-
-/************************************************************************
- *
- * This file implements the tty driver functionality for the
- * FEP5 based product lines.
- *
- ************************************************************************
- *
- * $Id: dgap_tty.c,v 1.3 2011/06/23 12:11:31 markh Exp $
- */
-
-#include <linux/kernel.h>
-#include <linux/version.h>
-#include <linux/sched.h> /* For jiffies, task states */
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_reg.h>
-#include <linux/slab.h>
-#include <linux/delay.h> /* For udelay */
-#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
-#include <asm/io.h> /* For read[bwl]/write[bwl] */
-#include <linux/pci.h>
-
-#include "dgap_driver.h"
-#include "dgap_tty.h"
-#include "dgap_types.h"
-#include "dgap_fep5.h"
-#include "dgap_parse.h"
-#include "dgap_conf.h"
-#include "dgap_sysfs.h"
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
-#define init_MUTEX(sem) sema_init(sem, 1)
-#define DECLARE_MUTEX(name) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
-#endif
-
-/*
- * internal variables
- */
-static struct board_t *dgap_BoardsByMajor[256];
-static uchar *dgap_TmpWriteBuf = NULL;
-static DECLARE_MUTEX(dgap_TmpWriteSem);
-
-/*
- * Default transparent print information.
- */
-static struct digi_t dgap_digi_init = {
- .digi_flags = DIGI_COOK, /* Flags */
- .digi_maxcps = 100, /* Max CPS */
- .digi_maxchar = 50, /* Max chars in print queue */
- .digi_bufsize = 100, /* Printer buffer size */
- .digi_onlen = 4, /* size of printer on string */
- .digi_offlen = 4, /* size of printer off string */
- .digi_onstr = "\033[5i", /* ANSI printer on string ] */
- .digi_offstr = "\033[4i", /* ANSI printer off string ] */
- .digi_term = "ansi" /* default terminal type */
-};
-
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially.
- *
- * This defines a raw port at 9600 baud, 8 data bits, no parity,
- * 1 stop bit.
- */
-
-static struct ktermios DgapDefaultTermios =
-{
- .c_iflag = (DEFAULT_IFLAGS), /* iflags */
- .c_oflag = (DEFAULT_OFLAGS), /* oflags */
- .c_cflag = (DEFAULT_CFLAGS), /* cflags */
- .c_lflag = (DEFAULT_LFLAGS), /* lflags */
- .c_cc = INIT_C_CC,
- .c_line = 0,
-};
-
-/* Our function prototypes */
-static int dgap_tty_open(struct tty_struct *tty, struct file *file);
-static void dgap_tty_close(struct tty_struct *tty, struct file *file);
-static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch);
-static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
-static int dgap_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo);
-static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info);
-static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo);
-static int dgap_tty_digisetedelay(struct tty_struct *tty, int __user *new_info);
-static int dgap_tty_write_room(struct tty_struct* tty);
-static int dgap_tty_chars_in_buffer(struct tty_struct* tty);
-static void dgap_tty_start(struct tty_struct *tty);
-static void dgap_tty_stop(struct tty_struct *tty);
-static void dgap_tty_throttle(struct tty_struct *tty);
-static void dgap_tty_unthrottle(struct tty_struct *tty);
-static void dgap_tty_flush_chars(struct tty_struct *tty);
-static void dgap_tty_flush_buffer(struct tty_struct *tty);
-static void dgap_tty_hangup(struct tty_struct *tty);
-static int dgap_wait_for_drain(struct tty_struct *tty);
-static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value);
-static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value);
-static int dgap_tty_digisetcustombaud(struct tty_struct *tty, int __user *new_info);
-static int dgap_tty_digigetcustombaud(struct tty_struct *tty, int __user *retinfo);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
-static int dgap_tty_tiocmget(struct tty_struct *tty);
-static int dgap_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear);
-#else
-static int dgap_tty_tiocmget(struct tty_struct *tty, struct file *file);
-static int dgap_tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear);
-#endif
-static int dgap_tty_send_break(struct tty_struct *tty, int msec);
-static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout);
-static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf, int count);
-static void dgap_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios);
-static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c);
-static void dgap_tty_send_xchar(struct tty_struct *tty, char ch);
-
-static const struct tty_operations dgap_tty_ops = {
- .open = dgap_tty_open,
- .close = dgap_tty_close,
- .write = dgap_tty_write,
- .write_room = dgap_tty_write_room,
- .flush_buffer = dgap_tty_flush_buffer,
- .chars_in_buffer = dgap_tty_chars_in_buffer,
- .flush_chars = dgap_tty_flush_chars,
- .ioctl = dgap_tty_ioctl,
- .set_termios = dgap_tty_set_termios,
- .stop = dgap_tty_stop,
- .start = dgap_tty_start,
- .throttle = dgap_tty_throttle,
- .unthrottle = dgap_tty_unthrottle,
- .hangup = dgap_tty_hangup,
- .put_char = dgap_tty_put_char,
- .tiocmget = dgap_tty_tiocmget,
- .tiocmset = dgap_tty_tiocmset,
- .break_ctl = dgap_tty_send_break,
- .wait_until_sent = dgap_tty_wait_until_sent,
- .send_xchar = dgap_tty_send_xchar
-};
-
-
-
-
-
-/************************************************************************
- *
- * TTY Initialization/Cleanup Functions
- *
- ************************************************************************/
-
-/*
- * dgap_tty_preinit()
- *
- * Initialize any global tty related data before we download any boards.
- */
-int dgap_tty_preinit(void)
-{
- unsigned long flags;
-
- DGAP_LOCK(dgap_global_lock, flags);
-
- /*
- * Allocate a buffer for doing the copy from user space to
- * kernel space in dgap_input(). We only use one buffer and
- * control access to it with a semaphore. If we are paging, we
- * are already in trouble so one buffer won't hurt much anyway.
- */
- dgap_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_ATOMIC);
-
- if (!dgap_TmpWriteBuf) {
- DGAP_UNLOCK(dgap_global_lock, flags);
- DPR_INIT(("unable to allocate tmp write buf"));
- return (-ENOMEM);
- }
-
- DGAP_UNLOCK(dgap_global_lock, flags);
- return(0);
-}
-
-
-/*
- * dgap_tty_register()
- *
- * Init the tty subsystem for this board.
- */
-int dgap_tty_register(struct board_t *brd)
-{
- int rc = 0;
-
- DPR_INIT(("tty_register start"));
-
- brd->SerialDriver = alloc_tty_driver(MAXPORTS);
-
- snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgap_%d_", brd->boardnum);
- brd->SerialDriver->name = brd->SerialName;
- brd->SerialDriver->name_base = 0;
- brd->SerialDriver->major = 0;
- brd->SerialDriver->minor_start = 0;
- brd->SerialDriver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->SerialDriver->subtype = SERIAL_TYPE_NORMAL;
- brd->SerialDriver->init_termios = DgapDefaultTermios;
- brd->SerialDriver->driver_name = DRVSTR;
- brd->SerialDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
-
- /* The kernel wants space to store pointers to tty_structs */
- brd->SerialDriver->ttys = kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
- if (!brd->SerialDriver->ttys)
- return(-ENOMEM);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
- brd->SerialDriver->refcount = brd->TtyRefCnt;
-#endif
-
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(brd->SerialDriver, &dgap_tty_ops);
-
- /*
- * If we're doing transparent print, we have to do all of the above
- * again, separately so we don't get the LD confused about what major
- * we are when we get into the dgap_tty_open() routine.
- */
- brd->PrintDriver = alloc_tty_driver(MAXPORTS);
-
- snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgap_%d_", brd->boardnum);
- brd->PrintDriver->name = brd->PrintName;
- brd->PrintDriver->name_base = 0;
- brd->PrintDriver->major = 0;
- brd->PrintDriver->minor_start = 0;
- brd->PrintDriver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->PrintDriver->subtype = SERIAL_TYPE_NORMAL;
- brd->PrintDriver->init_termios = DgapDefaultTermios;
- brd->PrintDriver->driver_name = DRVSTR;
- brd->PrintDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
-
- /* The kernel wants space to store pointers to tty_structs */
- brd->PrintDriver->ttys = kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
- if (!brd->PrintDriver->ttys)
- return(-ENOMEM);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
- brd->PrintDriver->refcount = brd->TtyRefCnt;
-#endif
-
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(brd->PrintDriver, &dgap_tty_ops);
-
- if (!brd->dgap_Major_Serial_Registered) {
- /* Register tty devices */
- rc = tty_register_driver(brd->SerialDriver);
- if (rc < 0) {
- APR(("Can't register tty device (%d)\n", rc));
- return(rc);
- }
- brd->dgap_Major_Serial_Registered = TRUE;
- dgap_BoardsByMajor[brd->SerialDriver->major] = brd;
- brd->dgap_Serial_Major = brd->SerialDriver->major;
- }
-
- if (!brd->dgap_Major_TransparentPrint_Registered) {
- /* Register Transparent Print devices */
- rc = tty_register_driver(brd->PrintDriver);
- if (rc < 0) {
- APR(("Can't register Transparent Print device (%d)\n", rc));
- return(rc);
- }
- brd->dgap_Major_TransparentPrint_Registered = TRUE;
- dgap_BoardsByMajor[brd->PrintDriver->major] = brd;
- brd->dgap_TransparentPrint_Major = brd->PrintDriver->major;
- }
-
- DPR_INIT(("DGAP REGISTER TTY: MAJORS: %d %d\n", brd->SerialDriver->major,
- brd->PrintDriver->major));
-
- return (rc);
-}
-
-
-/*
- * dgap_tty_init()
- *
- * Init the tty subsystem. Called once per board after board has been
- * downloaded and init'ed.
- */
-int dgap_tty_init(struct board_t *brd)
-{
- int i;
- int tlw;
- uint true_count = 0;
- uchar *vaddr;
- uchar modem = 0;
- struct channel_t *ch;
- struct bs_t *bs;
- struct cm_t *cm;
-
- if (!brd)
- return (-ENXIO);
-
- DPR_INIT(("dgap_tty_init start\n"));
-
- /*
- * Initialize board structure elements.
- */
-
- vaddr = brd->re_map_membase;
- true_count = readw((vaddr + NCHAN));
-
- brd->nasync = dgap_config_get_number_of_ports(brd);
-
- if (!brd->nasync) {
- brd->nasync = brd->maxports;
- }
-
- if (brd->nasync > brd->maxports) {
- brd->nasync = brd->maxports;
- }
-
- if (true_count != brd->nasync) {
- if ((brd->type == PPCM) && (true_count == 64)) {
- APR(("***WARNING**** %s configured for %d ports, has %d ports.\nPlease make SURE the EBI cable running from the card\nto each EM module is plugged into EBI IN!\n",
- brd->name, brd->nasync, true_count));
- }
- else if ((brd->type == PPCM) && (true_count == 0)) {
- APR(("***WARNING**** %s configured for %d ports, has %d ports.\nPlease make SURE the EBI cable running from the card\nto each EM module is plugged into EBI IN!\n",
- brd->name, brd->nasync, true_count));
- }
- else {
- APR(("***WARNING**** %s configured for %d ports, has %d ports.\n",
- brd->name, brd->nasync, true_count));
- }
-
- brd->nasync = true_count;
-
- /* If no ports, don't bother going any further */
- if (!brd->nasync) {
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return(-ENXIO);
- }
- }
-
- /*
- * Allocate channel memory that might not have been allocated
- * when the driver was first loaded.
- */
- for (i = 0; i < brd->nasync; i++) {
- if (!brd->channels[i]) {
- brd->channels[i] = kzalloc(sizeof(struct channel_t), GFP_ATOMIC);
- if (!brd->channels[i]) {
- DPR_CORE(("%s:%d Unable to allocate memory for channel struct\n",
- __FILE__, __LINE__));
- }
- }
- }
-
- ch = brd->channels[0];
- vaddr = brd->re_map_membase;
-
- bs = (struct bs_t *) ((ulong) vaddr + CHANBUF);
- cm = (struct cm_t *) ((ulong) vaddr + CMDBUF);
-
- brd->bd_bs = bs;
-
- /* Set up channel variables */
- for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
-
- if (!brd->channels[i])
- continue;
-
- DGAP_SPINLOCK_INIT(ch->ch_lock);
-
- /* Store all our magic numbers */
- ch->magic = DGAP_CHANNEL_MAGIC;
- ch->ch_tun.magic = DGAP_UNIT_MAGIC;
- ch->ch_tun.un_type = DGAP_SERIAL;
- ch->ch_tun.un_ch = ch;
- ch->ch_tun.un_dev = i;
-
- ch->ch_pun.magic = DGAP_UNIT_MAGIC;
- ch->ch_pun.un_type = DGAP_PRINT;
- ch->ch_pun.un_ch = ch;
- ch->ch_pun.un_dev = i;
-
- ch->ch_vaddr = vaddr;
- ch->ch_bs = bs;
- ch->ch_cm = cm;
- ch->ch_bd = brd;
- ch->ch_portnum = i;
- ch->ch_digi = dgap_digi_init;
-
- /*
- * Set up digi dsr and dcd bits based on altpin flag.
- */
- if (dgap_config_get_altpin(brd)) {
- ch->ch_dsr = DM_CD;
- ch->ch_cd = DM_DSR;
- ch->ch_digi.digi_flags |= DIGI_ALTPIN;
- }
- else {
- ch->ch_cd = DM_CD;
- ch->ch_dsr = DM_DSR;
- }
-
- ch->ch_taddr = vaddr + ((ch->ch_bs->tx_seg) << 4);
- ch->ch_raddr = vaddr + ((ch->ch_bs->rx_seg) << 4);
- ch->ch_tx_win = 0;
- ch->ch_rx_win = 0;
- ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1;
- ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1;
- ch->ch_tstart = 0;
- ch->ch_rstart = 0;
-
- /* .25 second delay */
- ch->ch_close_delay = 250;
-
- /*
- * Set queue water marks, interrupt mask,
- * and general tty parameters.
- */
- ch->ch_tlw = tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) : ch->ch_tsize / 2;
-
- dgap_cmdw(ch, STLOW, tlw, 0);
-
- dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
-
- dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
-
- ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
-
- init_waitqueue_head(&ch->ch_flags_wait);
- init_waitqueue_head(&ch->ch_tun.un_flags_wait);
- init_waitqueue_head(&ch->ch_pun.un_flags_wait);
- init_waitqueue_head(&ch->ch_sniff_wait);
-
- /* Turn on all modem interrupts for now */
- modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
- writeb(modem, &(ch->ch_bs->m_int));
-
- /*
- * Set edelay to 0 if interrupts are turned on,
- * otherwise set edelay to the usual 100.
- */
- if (brd->intr_used)
- writew(0, &(ch->ch_bs->edelay));
- else
- writew(100, &(ch->ch_bs->edelay));
-
- writeb(1, &(ch->ch_bs->idata));
- }
-
-
- DPR_INIT(("dgap_tty_init finish\n"));
-
- return (0);
-}
-
-
-/*
- * dgap_tty_post_uninit()
- *
- * UnInitialize any global tty related data.
- */
-void dgap_tty_post_uninit(void)
-{
- kfree(dgap_TmpWriteBuf);
- dgap_TmpWriteBuf = NULL;
-}
-
-
-/*
- * dgap_tty_uninit()
- *
- * Uninitialize the TTY portion of this driver. Free all memory and
- * resources.
- */
-void dgap_tty_uninit(struct board_t *brd)
-{
- int i = 0;
-
- if (brd->dgap_Major_Serial_Registered) {
- dgap_BoardsByMajor[brd->SerialDriver->major] = NULL;
- brd->dgap_Serial_Major = 0;
- for (i = 0; i < brd->nasync; i++) {
- dgap_remove_tty_sysfs(brd->channels[i]->ch_tun.un_sysfs);
- tty_unregister_device(brd->SerialDriver, i);
- }
- tty_unregister_driver(brd->SerialDriver);
- kfree(brd->SerialDriver->ttys);
- brd->SerialDriver->ttys = NULL;
- put_tty_driver(brd->SerialDriver);
- brd->dgap_Major_Serial_Registered = FALSE;
- }
-
- if (brd->dgap_Major_TransparentPrint_Registered) {
- dgap_BoardsByMajor[brd->PrintDriver->major] = NULL;
- brd->dgap_TransparentPrint_Major = 0;
- for (i = 0; i < brd->nasync; i++) {
- dgap_remove_tty_sysfs(brd->channels[i]->ch_pun.un_sysfs);
- tty_unregister_device(brd->PrintDriver, i);
- }
- tty_unregister_driver(brd->PrintDriver);
- kfree(brd->PrintDriver->ttys);
- brd->PrintDriver->ttys = NULL;
- put_tty_driver(brd->PrintDriver);
- brd->dgap_Major_TransparentPrint_Registered = FALSE;
- }
-}
-
-
-#define TMPBUFLEN (1024)
-
-/*
- * dgap_sniff - Dump data out to the "sniff" buffer if the
- * proc sniff file is opened...
- */
-static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *buf, int len)
-{
- struct timeval tv;
- int n;
- int r;
- int nbuf;
- int i;
- int tmpbuflen;
- char tmpbuf[TMPBUFLEN];
- char *p = tmpbuf;
- int too_much_data;
-
- /* Leave if sniff not open */
- if (!(ch->ch_sniff_flags & SNIFF_OPEN))
- return;
-
- do_gettimeofday(&tv);
-
- /* Create our header for data dump */
- p += sprintf(p, "<%ld %ld><%s><", tv.tv_sec, tv.tv_usec, text);
- tmpbuflen = p - tmpbuf;
-
- do {
- too_much_data = 0;
-
- for (i = 0; i < len && tmpbuflen < (TMPBUFLEN - 4); i++) {
- p += sprintf(p, "%02x ", *buf);
- buf++;
- tmpbuflen = p - tmpbuf;
- }
-
- if (tmpbuflen < (TMPBUFLEN - 4)) {
- if (i > 0)
- p += sprintf(p - 1, "%s\n", ">");
- else
- p += sprintf(p, "%s\n", ">");
- } else {
- too_much_data = 1;
- len -= i;
- }
-
- nbuf = strlen(tmpbuf);
- p = tmpbuf;
-
- /*
- * Loop while data remains.
- */
- while (nbuf > 0 && ch->ch_sniff_buf) {
- /*
- * Determine the amount of available space left in the
- * buffer. If there's none, wait until some appears.
- */
- n = (ch->ch_sniff_out - ch->ch_sniff_in - 1) & SNIFF_MASK;
-
- /*
- * If there is no space left to write to in our sniff buffer,
- * we have no choice but to drop the data.
- * We *cannot* sleep here waiting for space, because this
- * function was probably called by the interrupt/timer routines!
- */
- if (n == 0) {
- return;
- }
-
- /*
- * Copy as much data as will fit.
- */
-
- if (n > nbuf)
- n = nbuf;
-
- r = SNIFF_MAX - ch->ch_sniff_in;
-
- if (r <= n) {
- memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, r);
-
- n -= r;
- ch->ch_sniff_in = 0;
- p += r;
- nbuf -= r;
- }
-
- memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, n);
-
- ch->ch_sniff_in += n;
- p += n;
- nbuf -= n;
-
- /*
- * Wakeup any thread waiting for data
- */
- if (ch->ch_sniff_flags & SNIFF_WAIT_DATA) {
- ch->ch_sniff_flags &= ~SNIFF_WAIT_DATA;
- wake_up_interruptible(&ch->ch_sniff_wait);
- }
- }
-
- /*
- * If the user sent us too much data to push into our tmpbuf,
- * we need to keep looping around on all the data.
- */
- if (too_much_data) {
- p = tmpbuf;
- tmpbuflen = 0;
- }
-
- } while (too_much_data);
-}
-
-
-/*=======================================================================
- *
- * dgap_input - Process received data.
- *
- * ch - Pointer to channel structure.
- *
- *=======================================================================*/
-
-void dgap_input(struct channel_t *ch)
-{
- struct board_t *bd;
- struct bs_t *bs;
- struct tty_struct *tp;
- struct tty_ldisc *ld;
- uint rmask;
- uint head;
- uint tail;
- int data_len;
- ulong lock_flags;
- ulong lock_flags2;
- int flip_len;
- int len = 0;
- int n = 0;
- uchar *buf;
- uchar tmpchar;
- int s = 0;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- tp = ch->ch_tun.un_tty;
-
- bs = ch->ch_bs;
- if (!bs) {
- return;
- }
-
- bd = ch->ch_bd;
- if(!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_READ(("dgap_input start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- /*
- * Figure the number of characters in the buffer.
- * Exit immediately if none.
- */
-
- rmask = ch->ch_rsize - 1;
-
- head = readw(&(bs->rx_head));
- head &= rmask;
- tail = readw(&(bs->rx_tail));
- tail &= rmask;
-
- data_len = (head - tail) & rmask;
-
- if (data_len == 0) {
- writeb(1, &(bs->idata));
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_READ(("No data on port %d\n", ch->ch_portnum));
- return;
- }
-
- /*
- * If the device is not open, or CREAD is off, flush
- * input data and return immediately.
- */
- if ((bd->state != BOARD_READY) || !tp || (tp->magic != TTY_MAGIC) ||
- !(ch->ch_tun.un_flags & UN_ISOPEN) || !(tp->termios.c_cflag & CREAD) ||
- (ch->ch_tun.un_flags & UN_CLOSING)) {
-
- DPR_READ(("input. dropping %d bytes on port %d...\n", data_len, ch->ch_portnum));
- DPR_READ(("input. tp: %p tp->magic: %x MAGIC:%x ch flags: %x\n",
- tp, tp ? tp->magic : 0, TTY_MAGIC, ch->ch_tun.un_flags));
- writew(head, &(bs->rx_tail));
- writeb(1, &(bs->idata));
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return;
- }
-
- /*
- * If we are throttled, simply don't read any data.
- */
- if (ch->ch_flags & CH_RXBLOCK) {
- writeb(1, &(bs->idata));
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_READ(("Port %d throttled, not reading any data. head: %x tail: %x\n",
- ch->ch_portnum, head, tail));
- return;
- }
-
- /*
- * Ignore oruns.
- */
- tmpchar = readb(&(bs->orun));
- if (tmpchar) {
- ch->ch_err_overrun++;
- writeb(0, &(bs->orun));
- }
-
- DPR_READ(("dgap_input start 2\n"));
-
- /* Decide how much data we can send into the tty layer */
- flip_len = TTY_FLIPBUF_SIZE;
-
- /* Chop down the length, if needed */
- len = min(data_len, flip_len);
- len = min(len, (N_TTY_BUF_SIZE - 1));
-
- ld = tty_ldisc_ref(tp);
-
-#ifdef TTY_DONT_FLIP
- /*
- * If the DONT_FLIP flag is on, don't flush our buffer, and act
- * like the ld doesn't have any space to put the data right now.
- */
- if (test_bit(TTY_DONT_FLIP, &tp->flags))
- len = 0;
-#endif
-
- /*
- * If we were unable to get a reference to the ld,
- * don't flush our buffer, and act like the ld doesn't
- * have any space to put the data right now.
- */
- if (!ld) {
- len = 0;
- } else {
- /*
- * If ld doesn't have a pointer to a receive_buf function,
- * flush the data, then act like the ld doesn't have any
- * space to put the data right now.
- */
- if (!ld->ops->receive_buf) {
- writew(head, &(bs->rx_tail));
- len = 0;
- }
- }
-
- if (len <= 0) {
- writeb(1, &(bs->idata));
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_READ(("dgap_input 1 - finish\n"));
- if (ld)
- tty_ldisc_deref(ld);
- return;
- }
-
- buf = ch->ch_bd->flipbuf;
- n = len;
-
- /*
- * n now contains the most amount of data we can copy,
- * bounded either by our buffer size or the amount
- * of data the card actually has pending...
- */
- while (n) {
-
- s = ((head >= tail) ? head : ch->ch_rsize) - tail;
- s = min(s, n);
-
- if (s <= 0)
- break;
-
- memcpy_fromio(buf, (char *) ch->ch_raddr + tail, s);
- dgap_sniff_nowait_nolock(ch, "USER READ", buf, s);
-
- tail += s;
- buf += s;
-
- n -= s;
- /* Flip queue if needed */
- tail &= rmask;
- }
-
- writew(tail, &(bs->rx_tail));
- writeb(1, &(bs->idata));
- ch->ch_rxcount += len;
-
- /*
- * If we are completely raw, we don't need to go through a lot
- * of the tty layers that exist.
- * In this case, we take the shortest and fastest route we
- * can to relay the data to the user.
- *
- * On the other hand, if we are not raw, we need to go through
- * the tty layer, which has its API more well defined.
- */
- if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
- dgap_parity_scan(ch, ch->ch_bd->flipbuf, ch->ch_bd->flipflagbuf, &len);
-
- len = tty_buffer_request_room(tp->port, len);
- tty_insert_flip_string_flags(tp->port, ch->ch_bd->flipbuf,
- ch->ch_bd->flipflagbuf, len);
- }
- else {
- len = tty_buffer_request_room(tp->port, len);
- tty_insert_flip_string(tp->port, ch->ch_bd->flipbuf, len);
- }
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- /* Tell the tty layer its okay to "eat" the data now */
- tty_flip_buffer_push(tp->port);
-
- if (ld)
- tty_ldisc_deref(ld);
-
- DPR_READ(("dgap_input - finish\n"));
-}
-
-
-/************************************************************************
- * Determines when CARRIER changes state and takes appropriate
- * action.
- ************************************************************************/
-void dgap_carrier(struct channel_t *ch)
-{
- struct board_t *bd;
-
- int virt_carrier = 0;
- int phys_carrier = 0;
-
- DPR_CARR(("dgap_carrier called...\n"));
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
-
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- /* Make sure altpin is always set correctly */
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
- ch->ch_dsr = DM_CD;
- ch->ch_cd = DM_DSR;
- }
- else {
- ch->ch_dsr = DM_DSR;
- ch->ch_cd = DM_CD;
- }
-
- if (ch->ch_mistat & D_CD(ch)) {
- DPR_CARR(("mistat: %x D_CD: %x\n", ch->ch_mistat, D_CD(ch)));
- phys_carrier = 1;
- }
-
- if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) {
- virt_carrier = 1;
- }
-
- if (ch->ch_c_cflag & CLOCAL) {
- virt_carrier = 1;
- }
-
-
- DPR_CARR(("DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier));
-
- /*
- * Test for a VIRTUAL carrier transition to HIGH.
- */
- if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
-
- /*
- * When carrier rises, wake any threads waiting
- * for carrier in the open routine.
- */
-
- DPR_CARR(("carrier: virt DCD rose\n"));
-
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
- }
-
- /*
- * Test for a PHYSICAL carrier transition to HIGH.
- */
- if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
-
- /*
- * When carrier rises, wake any threads waiting
- * for carrier in the open routine.
- */
-
- DPR_CARR(("carrier: physical DCD rose\n"));
-
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
- }
-
- /*
- * Test for a PHYSICAL transition to low, so long as we aren't
- * currently ignoring physical transitions (which is what "virtual
- * carrier" indicates).
- *
- * The transition of the virtual carrier to low really doesn't
- * matter... it really only means "ignore carrier state", not
- * "make pretend that carrier is there".
- */
- if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) &&
- (phys_carrier == 0))
- {
-
- /*
- * When carrier drops:
- *
- * Drop carrier on all open units.
- *
- * Flush queues, waking up any task waiting in the
- * line discipline.
- *
- * Send a hangup to the control terminal.
- *
- * Enable all select calls.
- */
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
-
- if (ch->ch_tun.un_open_count > 0) {
- DPR_CARR(("Sending tty hangup\n"));
- tty_hangup(ch->ch_tun.un_tty);
- }
-
- if (ch->ch_pun.un_open_count > 0) {
- DPR_CARR(("Sending pr hangup\n"));
- tty_hangup(ch->ch_pun.un_tty);
- }
- }
-
- /*
- * Make sure that our cached values reflect the current reality.
- */
- if (virt_carrier == 1)
- ch->ch_flags |= CH_FCAR;
- else
- ch->ch_flags &= ~CH_FCAR;
-
- if (phys_carrier == 1)
- ch->ch_flags |= CH_CD;
- else
- ch->ch_flags &= ~CH_CD;
-}
-
-
-/************************************************************************
- *
- * TTY Entry points and helper functions
- *
- ************************************************************************/
-
-/*
- * dgap_tty_open()
- *
- */
-static int dgap_tty_open(struct tty_struct *tty, struct file *file)
-{
- struct board_t *brd;
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t *bs;
- uint major = 0;
- uint minor = 0;
- int rc = 0;
- ulong lock_flags;
- ulong lock_flags2;
- u16 head;
-
- rc = 0;
-
- major = MAJOR(tty_devnum(tty));
- minor = MINOR(tty_devnum(tty));
-
- if (major > 255) {
- return -ENXIO;
- }
-
- /* Get board pointer from our array of majors we have allocated */
- brd = dgap_BoardsByMajor[major];
- if (!brd) {
- return -ENXIO;
- }
-
- /*
- * If board is not yet up to a state of READY, go to
- * sleep waiting for it to happen or they cancel the open.
- */
- rc = wait_event_interruptible(brd->state_wait,
- (brd->state & BOARD_READY));
-
- if (rc) {
- return rc;
- }
-
- DGAP_LOCK(brd->bd_lock, lock_flags);
-
- /* The wait above should guarantee this cannot happen */
- if (brd->state != BOARD_READY) {
- DGAP_UNLOCK(brd->bd_lock, lock_flags);
- return -ENXIO;
- }
-
- /* If opened device is greater than our number of ports, bail. */
- if (MINOR(tty_devnum(tty)) > brd->nasync) {
- DGAP_UNLOCK(brd->bd_lock, lock_flags);
- return -ENXIO;
- }
-
- ch = brd->channels[minor];
- if (!ch) {
- DGAP_UNLOCK(brd->bd_lock, lock_flags);
- return -ENXIO;
- }
-
- /* Grab channel lock */
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- /* Figure out our type */
- if (major == brd->dgap_Serial_Major) {
- un = &brd->channels[minor]->ch_tun;
- un->un_type = DGAP_SERIAL;
- }
- else if (major == brd->dgap_TransparentPrint_Major) {
- un = &brd->channels[minor]->ch_pun;
- un->un_type = DGAP_PRINT;
- }
- else {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(brd->bd_lock, lock_flags);
- DPR_OPEN(("%d Unknown TYPE!\n", __LINE__));
- return -ENXIO;
- }
-
- /* Store our unit into driver_data, so we always have it available. */
- tty->driver_data = un;
-
- DPR_OPEN(("Open called. MAJOR: %d MINOR:%d unit: %p NAME: %s\n",
- MAJOR(tty_devnum(tty)), MINOR(tty_devnum(tty)), un, brd->name));
-
- /*
- * Error if channel info pointer is NULL.
- */
- bs = ch->ch_bs;
- if (!bs) {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(brd->bd_lock, lock_flags);
- DPR_OPEN(("%d BS is 0!\n", __LINE__));
- return -ENXIO;
- }
-
- DPR_OPEN(("%d: tflag=%x pflag=%x\n", __LINE__, ch->ch_tun.un_flags, ch->ch_pun.un_flags));
-
- /*
- * Initialize tty's
- */
- if (!(un->un_flags & UN_ISOPEN)) {
- /* Store important variables. */
- un->un_tty = tty;
-
- /* Maybe do something here to the TTY struct as well? */
- }
-
- /*
- * Initialize if neither terminal or printer is open.
- */
- if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
-
- DPR_OPEN(("dgap_open: initializing channel in open...\n"));
-
- ch->ch_mforce = 0;
- ch->ch_mval = 0;
-
- /*
- * Flush input queue.
- */
- head = readw(&(bs->rx_head));
- writew(head, &(bs->rx_tail));
-
- ch->ch_flags = 0;
- ch->pscan_state = 0;
- ch->pscan_savechar = 0;
-
- ch->ch_c_cflag = tty->termios.c_cflag;
- ch->ch_c_iflag = tty->termios.c_iflag;
- ch->ch_c_oflag = tty->termios.c_oflag;
- ch->ch_c_lflag = tty->termios.c_lflag;
- ch->ch_startc = tty->termios.c_cc[VSTART];
- ch->ch_stopc = tty->termios.c_cc[VSTOP];
-
- /* TODO: flush our TTY struct here? */
- }
-
- dgap_carrier(ch);
- /*
- * Run param in case we changed anything
- */
- dgap_param(tty);
-
- /*
- * follow protocol for opening port
- */
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(brd->bd_lock, lock_flags);
-
- rc = dgap_block_til_ready(tty, file, ch);
-
- if (!un->un_tty) {
- return -ENODEV;
- }
-
- if (rc) {
- DPR_OPEN(("dgap_tty_open returning after dgap_block_til_ready "
- "with %d\n", rc));
- }
-
- /* No going back now, increment our unit and channel counters */
- DGAP_LOCK(ch->ch_lock, lock_flags);
- ch->ch_open_count++;
- un->un_open_count++;
- un->un_flags |= (UN_ISOPEN);
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_OPEN(("dgap_tty_open finished\n"));
- return (rc);
-}
-
-
-/*
- * dgap_block_til_ready()
- *
- * Wait for DCD, if needed.
- */
-static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch)
-{
- int retval = 0;
- struct un_t *un = NULL;
- ulong lock_flags;
- uint old_flags = 0;
- int sleep_on_un_flags = 0;
-
- if (!tty || tty->magic != TTY_MAGIC || !file || !ch || ch->magic != DGAP_CHANNEL_MAGIC) {
- return (-ENXIO);
- }
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC) {
- return (-ENXIO);
- }
-
- DPR_OPEN(("dgap_block_til_ready - before block.\n"));
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- ch->ch_wopen++;
-
- /* Loop forever */
- while (1) {
-
- sleep_on_un_flags = 0;
-
- /*
- * If board has failed somehow during our sleep, bail with error.
- */
- if (ch->ch_bd->state == BOARD_FAILED) {
- retval = -ENXIO;
- break;
- }
-
- /* If tty was hung up, break out of loop and set error. */
- if (tty_hung_up_p(file)) {
- retval = -EAGAIN;
- break;
- }
-
- /*
- * If either unit is in the middle of the fragile part of close,
- * we just cannot touch the channel safely.
- * Go back to sleep, knowing that when the channel can be
- * touched safely, the close routine will signal the
- * ch_wait_flags to wake us back up.
- */
- if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) {
-
- /*
- * Our conditions to leave cleanly and happily:
- * 1) NONBLOCKING on the tty is set.
- * 2) CLOCAL is set.
- * 3) DCD (fake or real) is active.
- */
-
- if (file->f_flags & O_NONBLOCK) {
- break;
- }
-
- if (tty->flags & (1 << TTY_IO_ERROR)) {
- break;
- }
-
- if (ch->ch_flags & CH_CD) {
- DPR_OPEN(("%d: ch_flags: %x\n", __LINE__, ch->ch_flags));
- break;
- }
-
- if (ch->ch_flags & CH_FCAR) {
- DPR_OPEN(("%d: ch_flags: %x\n", __LINE__, ch->ch_flags));
- break;
- }
- }
- else {
- sleep_on_un_flags = 1;
- }
-
- /*
- * If there is a signal pending, the user probably
- * interrupted (ctrl-c) us.
- * Leave loop with error set.
- */
- if (signal_pending(current)) {
- DPR_OPEN(("%d: signal pending...\n", __LINE__));
- retval = -ERESTARTSYS;
- break;
- }
-
- DPR_OPEN(("dgap_block_til_ready - blocking.\n"));
-
- /*
- * Store the flags before we let go of channel lock
- */
- if (sleep_on_un_flags)
- old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
- else
- old_flags = ch->ch_flags;
-
- /*
- * Let go of channel lock before calling schedule.
- * Our poller will get any FEP events and wake us up when DCD
- * eventually goes active.
- */
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_OPEN(("Going to sleep on %s flags...\n",
- (sleep_on_un_flags ? "un" : "ch")));
-
- /*
- * Wait for something in the flags to change from the current value.
- */
- if (sleep_on_un_flags) {
- retval = wait_event_interruptible(un->un_flags_wait,
- (old_flags != (ch->ch_tun.un_flags | ch->ch_pun.un_flags)));
- }
- else {
- retval = wait_event_interruptible(ch->ch_flags_wait,
- (old_flags != ch->ch_flags));
- }
-
- DPR_OPEN(("After sleep... retval: %x\n", retval));
-
- /*
- * We got woken up for some reason.
- * Before looping around, grab our channel lock.
- */
- DGAP_LOCK(ch->ch_lock, lock_flags);
- }
-
- ch->ch_wopen--;
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_OPEN(("dgap_block_til_ready - after blocking.\n"));
-
- if (retval) {
- DPR_OPEN(("dgap_block_til_ready - done. error. retval: %x\n", retval));
- return(retval);
- }
-
- DPR_OPEN(("dgap_block_til_ready - done no error. jiffies: %lu\n", jiffies));
-
- return(0);
-}
-
-
-/*
- * dgap_tty_hangup()
- *
- * Hangup the port. Like a close, but don't wait for output to drain.
- */
-static void dgap_tty_hangup(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_CLOSE(("dgap_hangup called. ch->ch_open_count: %d un->un_open_count: %d\n",
- ch->ch_open_count, un->un_open_count));
-
- /* flush the transmit queues */
- dgap_tty_flush_buffer(tty);
-
- DPR_CLOSE(("dgap_hangup finished. ch->ch_open_count: %d un->un_open_count: %d\n",
- ch->ch_open_count, un->un_open_count));
-}
-
-
-
-/*
- * dgap_tty_close()
- *
- */
-static void dgap_tty_close(struct tty_struct *tty, struct file *file)
-{
- struct ktermios *ts;
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- int rc = 0;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- ts = &tty->termios;
-
- DPR_CLOSE(("Close called\n"));
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- /*
- * Determine if this is the last close or not - and if we agree about
- * which type of close it is with the Line Discipline
- */
- if ((tty->count == 1) && (un->un_open_count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. un_open_count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- APR(("tty->count is 1, un open count is %d\n", un->un_open_count));
- un->un_open_count = 1;
- }
-
- if (--un->un_open_count < 0) {
- APR(("bad serial port open count of %d\n", un->un_open_count));
- un->un_open_count = 0;
- }
-
- ch->ch_open_count--;
-
- if (ch->ch_open_count && un->un_open_count) {
- DPR_CLOSE(("dgap_tty_close: not last close ch: %d un:%d\n",
- ch->ch_open_count, un->un_open_count));
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- return;
- }
-
- /* OK, its the last close on the unit */
- DPR_CLOSE(("dgap_tty_close - last close on unit procedures\n"));
-
- un->un_flags |= UN_CLOSING;
-
- tty->closing = 1;
-
- /*
- * Only officially close channel if count is 0 and
- * DIGI_PRINTER bit is not set.
- */
- if ((ch->ch_open_count == 0) && !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
-
- ch->ch_flags &= ~(CH_RXBLOCK);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- /* wait for output to drain */
- /* This will also return if we take an interrupt */
-
- DPR_CLOSE(("Calling wait_for_drain\n"));
- rc = dgap_wait_for_drain(tty);
- DPR_CLOSE(("After calling wait_for_drain\n"));
-
- if (rc) {
- DPR_BASIC(("dgap_tty_close - bad return: %d ", rc));
- }
-
- dgap_tty_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- tty->closing = 0;
-
- /*
- * If we have HUPCL set, lower DTR and RTS
- */
- if (ch->ch_c_cflag & HUPCL ) {
- DPR_CLOSE(("Close. HUPCL set, dropping DTR/RTS\n"));
- ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch));
- dgap_cmdb( ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0 );
-
- /*
- * Go to sleep to ensure RTS/DTR
- * have been dropped for modems to see it.
- */
- if (ch->ch_close_delay) {
- DPR_CLOSE(("Close. Sleeping for RTS/DTR drop\n"));
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- dgap_ms_sleep(ch->ch_close_delay);
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- DPR_CLOSE(("Close. After sleeping for RTS/DTR drop\n"));
- }
- }
-
- ch->pscan_state = 0;
- ch->pscan_savechar = 0;
- ch->ch_baud_info = 0;
-
- }
-
- /*
- * turn off print device when closing print device.
- */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON) ) {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- ch->ch_flags &= ~CH_PRON;
- }
-
- un->un_tty = NULL;
- un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
- tty->driver_data = NULL;
-
- DPR_CLOSE(("Close. Doing wakeups\n"));
- wake_up_interruptible(&ch->ch_flags_wait);
- wake_up_interruptible(&un->un_flags_wait);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_BASIC(("dgap_tty_close - complete\n"));
-}
-
-
-/*
- * dgap_tty_chars_in_buffer()
- *
- * Return number of characters that have not been transmitted yet.
- *
- * This routine is used by the line discipline to determine if there
- * is data waiting to be transmitted/drained/flushed or not.
- */
-static int dgap_tty_chars_in_buffer(struct tty_struct *tty)
-{
- struct board_t *bd = NULL;
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
- struct bs_t *bs = NULL;
- uchar tbusy;
- uint chars = 0;
- u16 thead, ttail, tmask, chead, ctail;
- ulong lock_flags = 0;
- ulong lock_flags2 = 0;
-
- if (tty == NULL)
- return(0);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
-
- bs = ch->ch_bs;
- if (!bs)
- return (0);
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- tmask = (ch->ch_tsize - 1);
-
- /* Get Transmit queue pointers */
- thead = readw(&(bs->tx_head)) & tmask;
- ttail = readw(&(bs->tx_tail)) & tmask;
-
- /* Get tbusy flag */
- tbusy = readb(&(bs->tbusy));
-
- /* Get Command queue pointers */
- chead = readw(&(ch->ch_cm->cm_head));
- ctail = readw(&(ch->ch_cm->cm_tail));
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- /*
- * The only way we know for sure if there is no pending
- * data left to be transferred, is if:
- * 1) Transmit head and tail are equal (empty).
- * 2) Command queue head and tail are equal (empty).
- * 3) The "TBUSY" flag is 0. (Transmitter not busy).
- */
-
- if ((ttail == thead) && (tbusy == 0) && (chead == ctail)) {
- chars = 0;
- }
- else {
- if (thead >= ttail)
- chars = thead - ttail;
- else
- chars = thead - ttail + ch->ch_tsize;
- /*
- * Fudge factor here.
- * If chars is zero, we know that the command queue had
- * something in it or tbusy was set. Because we cannot
- * be sure if there is still some data to be transmitted,
- * lets lie, and tell ld we have 1 byte left.
- */
- if (chars == 0) {
- /*
- * If TBUSY is still set, and our tx buffers are empty,
- * force the firmware to send me another wakeup after
- * TBUSY has been cleared.
- */
- if (tbusy != 0) {
- DGAP_LOCK(ch->ch_lock, lock_flags);
- un->un_flags |= UN_EMPTY;
- writeb(1, &(bs->iempty));
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- }
- chars = 1;
- }
- }
-
- DPR_WRITE(("dgap_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d tsize: %d)\n",
- ch->ch_portnum, chars, thead, ttail, ch->ch_tsize));
- return(chars);
-}
-
-
-static int dgap_wait_for_drain(struct tty_struct *tty)
-{
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t *bs;
- int ret = -EIO;
- uint count = 1;
- ulong lock_flags = 0;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return ret;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return ret;
-
- bs = ch->ch_bs;
- if (!bs)
- return ret;
-
- ret = 0;
-
- DPR_DRAIN(("dgap_wait_for_drain start\n"));
-
- /* Loop until data is drained */
- while (count != 0) {
-
- count = dgap_tty_chars_in_buffer(tty);
-
- if (count == 0)
- break;
-
- /* Set flag waiting for drain */
- DGAP_LOCK(ch->ch_lock, lock_flags);
- un->un_flags |= UN_EMPTY;
- writeb(1, &(bs->iempty));
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- /* Go to sleep till we get woken up */
- ret = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
- /* If ret is non-zero, user ctrl-c'ed us */
- if (ret) {
- break;
- }
- }
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
- un->un_flags &= ~(UN_EMPTY);
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_DRAIN(("dgap_wait_for_drain finish\n"));
- return (ret);
-}
-
-
-/*
- * dgap_maxcps_room
- *
- * Reduces bytes_available to the max number of characters
- * that can be sent currently given the maxcps value, and
- * returns the new bytes_available. This only affects printer
- * output.
- */
-static int dgap_maxcps_room(struct tty_struct *tty, int bytes_available)
-{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
-
- if (tty == NULL)
- return (bytes_available);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (bytes_available);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (bytes_available);
-
- /*
- * If its not the Transparent print device, return
- * the full data amount.
- */
- if (un->un_type != DGAP_PRINT)
- return (bytes_available);
-
- if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0 ) {
- int cps_limit = 0;
- unsigned long current_time = jiffies;
- unsigned long buffer_time = current_time +
- (HZ * ch->ch_digi.digi_bufsize) / ch->ch_digi.digi_maxcps;
-
- if (ch->ch_cpstime < current_time) {
- /* buffer is empty */
- ch->ch_cpstime = current_time; /* reset ch_cpstime */
- cps_limit = ch->ch_digi.digi_bufsize;
- }
- else if (ch->ch_cpstime < buffer_time) {
- /* still room in the buffer */
- cps_limit = ((buffer_time - ch->ch_cpstime) * ch->ch_digi.digi_maxcps) / HZ;
- }
- else {
- /* no room in the buffer */
- cps_limit = 0;
- }
-
- bytes_available = min(cps_limit, bytes_available);
- }
-
- return (bytes_available);
-}
-
-
-static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event)
-{
- struct channel_t *ch = NULL;
- struct bs_t *bs = NULL;
-
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
- bs = ch->ch_bs;
- if (!bs)
- return;
-
- if ((event & UN_LOW) != 0) {
- if ((un->un_flags & UN_LOW) == 0) {
- un->un_flags |= UN_LOW;
- writeb(1, &(bs->ilow));
- }
- }
- if ((event & UN_LOW) != 0) {
- if ((un->un_flags & UN_EMPTY) == 0) {
- un->un_flags |= UN_EMPTY;
- writeb(1, &(bs->iempty));
- }
- }
-}
-
-
-/*
- * dgap_tty_write_room()
- *
- * Return space available in Tx buffer
- */
-static int dgap_tty_write_room(struct tty_struct *tty)
-{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
- struct bs_t *bs = NULL;
- u16 head, tail, tmask;
- int ret = 0;
- ulong lock_flags = 0;
-
- if (tty == NULL || dgap_TmpWriteBuf == NULL)
- return(0);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
-
- bs = ch->ch_bs;
- if (!bs)
- return (0);
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- tmask = ch->ch_tsize - 1;
- head = readw(&(bs->tx_head)) & tmask;
- tail = readw(&(bs->tx_tail)) & tmask;
-
- if ((ret = tail - head - 1) < 0)
- ret += ch->ch_tsize;
-
- /* Limit printer to maxcps */
- ret = dgap_maxcps_room(tty, ret);
-
- /*
- * If we are printer device, leave space for
- * possibly both the on and off strings.
- */
- if (un->un_type == DGAP_PRINT) {
- if (!(ch->ch_flags & CH_PRON))
- ret -= ch->ch_digi.digi_onlen;
- ret -= ch->ch_digi.digi_offlen;
- }
- else {
- if (ch->ch_flags & CH_PRON)
- ret -= ch->ch_digi.digi_offlen;
- }
-
- if (ret < 0)
- ret = 0;
-
- /*
- * Schedule FEP to wake us up if needed.
- *
- * TODO: This might be overkill...
- * Do we really need to schedule callbacks from the FEP
- * in every case? Can we get smarter based on ret?
- */
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_WRITE(("dgap_tty_write_room - %d tail: %d head: %d\n", ret, tail, head));
-
- return(ret);
-}
-
-
-/*
- * dgap_tty_put_char()
- *
- * Put a character into ch->ch_buf
- *
- * - used by the line discipline for OPOST processing
- */
-static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
-{
- /*
- * Simply call tty_write.
- */
- DPR_WRITE(("dgap_tty_put_char called\n"));
- dgap_tty_write(tty, &c, 1);
- return 1;
-}
-
-
-/*
- * dgap_tty_write()
- *
- * Take data from the user or kernel and send it out to the FEP.
- * In here exists all the Transparent Print magic as well.
- */
-static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
- struct bs_t *bs = NULL;
- char *vaddr = NULL;
- u16 head, tail, tmask, remain;
- int bufcount = 0, n = 0;
- int orig_count = 0;
- ulong lock_flags;
- int from_user = 0;
-
- if (tty == NULL || dgap_TmpWriteBuf == NULL)
- return(0);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return(0);
-
- bs = ch->ch_bs;
- if (!bs)
- return(0);
-
- if (!count)
- return(0);
-
- DPR_WRITE(("dgap_tty_write: Port: %x tty=%p user=%d len=%d\n",
- ch->ch_portnum, tty, from_user, count));
-
- /*
- * Store original amount of characters passed in.
- * This helps to figure out if we should ask the FEP
- * to send us an event when it has more space available.
- */
- orig_count = count;
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- /* Get our space available for the channel from the board */
- tmask = ch->ch_tsize - 1;
- head = readw(&(bs->tx_head)) & tmask;
- tail = readw(&(bs->tx_tail)) & tmask;
-
- if ((bufcount = tail - head - 1) < 0)
- bufcount += ch->ch_tsize;
-
- DPR_WRITE(("%d: bufcount: %x count: %x tail: %x head: %x tmask: %x\n",
- __LINE__, bufcount, count, tail, head, tmask));
-
- /*
- * Limit printer output to maxcps overall, with bursts allowed
- * up to bufsize characters.
- */
- bufcount = dgap_maxcps_room(tty, bufcount);
-
- /*
- * Take minimum of what the user wants to send, and the
- * space available in the FEP buffer.
- */
- count = min(count, bufcount);
-
- /*
- * Bail if no space left.
- */
- if (count <= 0) {
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- return(0);
- }
-
- /*
- * Output the printer ON string, if we are in terminal mode, but
- * need to be in printer mode.
- */
- if ((un->un_type == DGAP_PRINT) && !(ch->ch_flags & CH_PRON)) {
- dgap_wmove(ch, ch->ch_digi.digi_onstr,
- (int) ch->ch_digi.digi_onlen);
- head = readw(&(bs->tx_head)) & tmask;
- ch->ch_flags |= CH_PRON;
- }
-
- /*
- * On the other hand, output the printer OFF string, if we are
- * currently in printer mode, but need to output to the terminal.
- */
- if ((un->un_type != DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- head = readw(&(bs->tx_head)) & tmask;
- ch->ch_flags &= ~CH_PRON;
- }
-
- /*
- * If there is nothing left to copy, or I can't handle any more data, leave.
- */
- if (count <= 0) {
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- return(0);
- }
-
- if (from_user) {
-
- count = min(count, WRITEBUFLEN);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- /*
- * If data is coming from user space, copy it into a temporary
- * buffer so we don't get swapped out while doing the copy to
- * the board.
- */
- /* we're allowed to block if it's from_user */
- if (down_interruptible(&dgap_TmpWriteSem)) {
- return (-EINTR);
- }
-
- if (copy_from_user(dgap_TmpWriteBuf, (const uchar __user *) buf, count)) {
- up(&dgap_TmpWriteSem);
- printk("Write: Copy from user failed!\n");
- return -EFAULT;
- }
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- buf = dgap_TmpWriteBuf;
- }
-
- n = count;
-
- /*
- * If the write wraps over the top of the circular buffer,
- * move the portion up to the wrap point, and reset the
- * pointers to the bottom.
- */
- remain = ch->ch_tstart + ch->ch_tsize - head;
-
- if (n >= remain) {
- n -= remain;
- vaddr = ch->ch_taddr + head;
-
- memcpy_toio(vaddr, (uchar *) buf, remain);
- dgap_sniff_nowait_nolock(ch, "USER WRITE", (uchar *) buf, remain);
-
- head = ch->ch_tstart;
- buf += remain;
- }
-
- if (n > 0) {
-
- /*
- * Move rest of data.
- */
- vaddr = ch->ch_taddr + head;
- remain = n;
-
- memcpy_toio(vaddr, (uchar *) buf, remain);
- dgap_sniff_nowait_nolock(ch, "USER WRITE", (uchar *) buf, remain);
-
- head += remain;
-
- }
-
- if (count) {
- ch->ch_txcount += count;
- head &= tmask;
- writew(head, &(bs->tx_head));
- }
-
-
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
-
- /*
- * If this is the print device, and the
- * printer is still on, we need to turn it
- * off before going idle. If the buffer is
- * non-empty, wait until it goes empty.
- * Otherwise turn it off right now.
- */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
- tail = readw(&(bs->tx_tail)) & tmask;
-
- if (tail != head) {
- un->un_flags |= UN_EMPTY;
- writeb(1, &(bs->iempty));
- }
- else {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- head = readw(&(bs->tx_head)) & tmask;
- ch->ch_flags &= ~CH_PRON;
- }
- }
-
- /* Update printer buffer empty time. */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_digi.digi_maxcps > 0)
- && (ch->ch_digi.digi_bufsize > 0)) {
- ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
- }
-
- if (from_user) {
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- up(&dgap_TmpWriteSem);
- }
- else {
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- }
-
- DPR_WRITE(("Write finished - Write %d bytes of %d.\n", count, orig_count));
-
- return (count);
-}
-
-
-
-/*
- * Return modem signals to ld.
- */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
-static int dgap_tty_tiocmget(struct tty_struct *tty)
-#else
-static int dgap_tty_tiocmget(struct tty_struct *tty, struct file *file)
-#endif
-{
- struct channel_t *ch;
- struct un_t *un;
- int result = -EIO;
- uchar mstat = 0;
- ulong lock_flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return result;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return result;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return result;
-
- DPR_IOCTL(("dgap_tty_tiocmget start\n"));
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- mstat = readb(&(ch->ch_bs->m_stat));
- /* Append any outbound signals that might be pending... */
- mstat |= ch->ch_mostat;
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- result = 0;
-
- if (mstat & D_DTR(ch))
- result |= TIOCM_DTR;
- if (mstat & D_RTS(ch))
- result |= TIOCM_RTS;
- if (mstat & D_CTS(ch))
- result |= TIOCM_CTS;
- if (mstat & D_DSR(ch))
- result |= TIOCM_DSR;
- if (mstat & D_RI(ch))
- result |= TIOCM_RI;
- if (mstat & D_CD(ch))
- result |= TIOCM_CD;
-
- DPR_IOCTL(("dgap_tty_tiocmget finish\n"));
-
- return result;
-}
-
-
-/*
- * dgap_tty_tiocmset()
- *
- * Set modem signals, called by ld.
- */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
-static int dgap_tty_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-#else
-static int dgap_tty_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
-#endif
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int ret = -EIO;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return ret;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return ret;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return ret;
-
- DPR_IOCTL(("dgap_tty_tiocmset start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- if (set & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval |= D_RTS(ch);
- }
-
- if (set & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval |= D_DTR(ch);
- }
-
- if (clear & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval &= ~(D_RTS(ch));
- }
-
- if (clear & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval &= ~(D_DTR(ch));
- }
-
- dgap_param(tty);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_tiocmset finish\n"));
-
- return (0);
-}
-
-
-
-/*
- * dgap_tty_send_break()
- *
- * Send a Break, called by ld.
- */
-static int dgap_tty_send_break(struct tty_struct *tty, int msec)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int ret = -EIO;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return ret;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return ret;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return ret;
-
- switch (msec) {
- case -1:
- msec = 0xFFFF;
- break;
- case 0:
- msec = 1;
- break;
- default:
- msec /= 10;
- break;
- }
-
- DPR_IOCTL(("dgap_tty_send_break start 1. %lx\n", jiffies));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-#if 0
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
-#endif
- dgap_cmdw(ch, SBREAK, (u16) msec, 0);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_send_break finish\n"));
-
- return (0);
-}
-
-
-
-
-/*
- * dgap_tty_wait_until_sent()
- *
- * wait until data has been transmitted, called by ld.
- */
-static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- int rc;
- rc = dgap_wait_for_drain(tty);
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return;
- }
- return;
-}
-
-
-
-/*
- * dgap_send_xchar()
- *
- * send a high priority character, called by ld.
- */
-static void dgap_tty_send_xchar(struct tty_struct *tty, char c)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_send_xchar start 1. %lx\n", jiffies));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- /*
- * This is technically what we should do.
- * However, the NIST tests specifically want
- * to see each XON or XOFF character that it
- * sends, so lets just send each character
- * by hand...
- */
-#if 0
- if (c == STOP_CHAR(tty)) {
- dgap_cmdw(ch, RPAUSE, 0, 0);
- }
- else if (c == START_CHAR(tty)) {
- dgap_cmdw(ch, RRESUME, 0, 0);
- }
- else {
- dgap_wmove(ch, &c, 1);
- }
-#else
- dgap_wmove(ch, &c, 1);
-#endif
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_send_xchar finish\n"));
-
- return;
-}
-
-
-
-
-/*
- * Return modem signals to ld.
- */
-static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value)
-{
- int result = 0;
- uchar mstat = 0;
- ulong lock_flags;
- int rc = 0;
-
- DPR_IOCTL(("dgap_get_modem_info start\n"));
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return(-ENXIO);
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- mstat = readb(&(ch->ch_bs->m_stat));
- /* Append any outbound signals that might be pending... */
- mstat |= ch->ch_mostat;
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- result = 0;
-
- if (mstat & D_DTR(ch))
- result |= TIOCM_DTR;
- if (mstat & D_RTS(ch))
- result |= TIOCM_RTS;
- if (mstat & D_CTS(ch))
- result |= TIOCM_CTS;
- if (mstat & D_DSR(ch))
- result |= TIOCM_DSR;
- if (mstat & D_RI(ch))
- result |= TIOCM_RI;
- if (mstat & D_CD(ch))
- result |= TIOCM_CD;
-
- rc = put_user(result, value);
-
- DPR_IOCTL(("dgap_get_modem_info finish\n"));
- return(rc);
-}
-
-
-/*
- * dgap_set_modem_info()
- *
- * Set modem signals, called by ld.
- */
-static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int ret = -ENXIO;
- unsigned int arg = 0;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return ret;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return ret;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return ret;
-
- DPR_IOCTL(("dgap_set_modem_info() start\n"));
-
- ret = get_user(arg, value);
- if (ret) {
- DPR_IOCTL(("dgap_set_modem_info %d ret: %x. finished.\n", __LINE__, ret));
- return(ret);
- }
-
- DPR_IOCTL(("dgap_set_modem_info: command: %x arg: %x\n", command, arg));
-
- switch (command) {
- case TIOCMBIS:
- if (arg & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval |= D_RTS(ch);
- }
-
- if (arg & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval |= D_DTR(ch);
- }
-
- break;
-
- case TIOCMBIC:
- if (arg & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval &= ~(D_RTS(ch));
- }
-
- if (arg & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval &= ~(D_DTR(ch));
- }
-
- break;
-
- case TIOCMSET:
- ch->ch_mforce = D_DTR(ch)|D_RTS(ch);
-
- if (arg & TIOCM_RTS) {
- ch->ch_mval |= D_RTS(ch);
- }
- else {
- ch->ch_mval &= ~(D_RTS(ch));
- }
-
- if (arg & TIOCM_DTR) {
- ch->ch_mval |= (D_DTR(ch));
- }
- else {
- ch->ch_mval &= ~(D_DTR(ch));
- }
-
- break;
-
- default:
- return(-EINVAL);
- }
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- dgap_param(tty);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_set_modem_info finish\n"));
-
- return (0);
-}
-
-
-/*
- * dgap_tty_digigeta()
- *
- * Ioctl to get the information for ditty.
- *
- *
- *
- */
-static int dgap_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo)
-{
- struct channel_t *ch;
- struct un_t *un;
- struct digi_t tmp;
- ulong lock_flags;
-
- if (!retinfo)
- return (-EFAULT);
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-EFAULT);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-EFAULT);
-
- memset(&tmp, 0, sizeof(tmp));
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
- memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return (-EFAULT);
-
- return (0);
-}
-
-
-/*
- * dgap_tty_digiseta()
- *
- * Ioctl to set the information for ditty.
- *
- *
- *
- */
-static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- struct digi_t new_digi;
- ulong lock_flags = 0;
- unsigned long lock_flags2;
-
- DPR_IOCTL(("DIGI_SETA start\n"));
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-EFAULT);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-EFAULT);
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (-EFAULT);
-
- if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t))) {
- DPR_IOCTL(("DIGI_SETA failed copy_from_user\n"));
- return(-EFAULT);
- }
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
-
- if (ch->ch_digi.digi_maxcps < 1)
- ch->ch_digi.digi_maxcps = 1;
-
- if (ch->ch_digi.digi_maxcps > 10000)
- ch->ch_digi.digi_maxcps = 10000;
-
- if (ch->ch_digi.digi_bufsize < 10)
- ch->ch_digi.digi_bufsize = 10;
-
- if (ch->ch_digi.digi_maxchar < 1)
- ch->ch_digi.digi_maxchar = 1;
-
- if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
- ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
-
- if (ch->ch_digi.digi_onlen > DIGI_PLEN)
- ch->ch_digi.digi_onlen = DIGI_PLEN;
-
- if (ch->ch_digi.digi_offlen > DIGI_PLEN)
- ch->ch_digi.digi_offlen = DIGI_PLEN;
-
- dgap_param(tty);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("DIGI_SETA finish\n"));
-
- return(0);
-}
-
-
-/*
- * dgap_tty_digigetedelay()
- *
- * Ioctl to get the current edelay setting.
- *
- *
- *
- */
-static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo)
-{
- struct channel_t *ch;
- struct un_t *un;
- int tmp;
- ulong lock_flags;
-
- if (!retinfo)
- return (-EFAULT);
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-EFAULT);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-EFAULT);
-
- memset(&tmp, 0, sizeof(tmp));
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
- tmp = readw(&(ch->ch_bs->edelay));
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return (-EFAULT);
-
- return (0);
-}
-
-
-/*
- * dgap_tty_digisetedelay()
- *
- * Ioctl to set the EDELAY setting
- *
- */
-static int dgap_tty_digisetedelay(struct tty_struct *tty, int __user *new_info)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int new_digi;
- ulong lock_flags;
- ulong lock_flags2;
-
- DPR_IOCTL(("DIGI_SETA start\n"));
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-EFAULT);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-EFAULT);
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (-EFAULT);
-
- if (copy_from_user(&new_digi, new_info, sizeof(int))) {
- DPR_IOCTL(("DIGI_SETEDELAY failed copy_from_user\n"));
- return(-EFAULT);
- }
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- writew((u16) new_digi, &(ch->ch_bs->edelay));
-
- dgap_param(tty);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("DIGI_SETA finish\n"));
-
- return(0);
-}
-
-
-/*
- * dgap_tty_digigetcustombaud()
- *
- * Ioctl to get the current custom baud rate setting.
- */
-static int dgap_tty_digigetcustombaud(struct tty_struct *tty, int __user *retinfo)
-{
- struct channel_t *ch;
- struct un_t *un;
- int tmp;
- ulong lock_flags;
-
- if (!retinfo)
- return (-EFAULT);
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-EFAULT);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-EFAULT);
-
- memset(&tmp, 0, sizeof(tmp));
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
- tmp = dgap_get_custom_baud(ch);
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_IOCTL(("DIGI_GETCUSTOMBAUD. Returning %d\n", tmp));
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return (-EFAULT);
-
- return (0);
-}
-
-
-/*
- * dgap_tty_digisetcustombaud()
- *
- * Ioctl to set the custom baud rate setting
- */
-static int dgap_tty_digisetcustombaud(struct tty_struct *tty, int __user *new_info)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- uint new_rate;
- ulong lock_flags;
- ulong lock_flags2;
-
- DPR_IOCTL(("DIGI_SETCUSTOMBAUD start\n"));
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-EFAULT);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-EFAULT);
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (-EFAULT);
-
-
- if (copy_from_user(&new_rate, new_info, sizeof(unsigned int))) {
- DPR_IOCTL(("DIGI_SETCUSTOMBAUD failed copy_from_user\n"));
- return(-EFAULT);
- }
-
- if (bd->bd_flags & BD_FEP5PLUS) {
-
- DPR_IOCTL(("DIGI_SETCUSTOMBAUD. Setting %d\n", new_rate));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- ch->ch_custom_speed = new_rate;
-
- dgap_param(tty);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- }
-
- DPR_IOCTL(("DIGI_SETCUSTOMBAUD finish\n"));
-
- return(0);
-}
-
-
-/*
- * dgap_set_termios()
- */
-static void dgap_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- unsigned long lock_flags;
- unsigned long lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- ch->ch_c_cflag = tty->termios.c_cflag;
- ch->ch_c_iflag = tty->termios.c_iflag;
- ch->ch_c_oflag = tty->termios.c_oflag;
- ch->ch_c_lflag = tty->termios.c_lflag;
- ch->ch_startc = tty->termios.c_cc[VSTART];
- ch->ch_stopc = tty->termios.c_cc[VSTOP];
-
- dgap_carrier(ch);
- dgap_param(tty);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-}
-
-
-static void dgap_tty_throttle(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_throttle start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- ch->ch_flags |= (CH_RXBLOCK);
-#if 1
- dgap_cmdw(ch, RPAUSE, 0, 0);
-#endif
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_throttle finish\n"));
-}
-
-
-static void dgap_tty_unthrottle(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_unthrottle start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- ch->ch_flags &= ~(CH_RXBLOCK);
-
-#if 1
- dgap_cmdw(ch, RRESUME, 0, 0);
-#endif
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_unthrottle finish\n"));
-}
-
-
-static void dgap_tty_start(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_start start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, RESUMETX, 0, 0);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_start finish\n"));
-}
-
-
-static void dgap_tty_stop(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_stop start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, PAUSETX, 0, 0);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_stop finish\n"));
-}
-
-
-/*
- * dgap_tty_flush_chars()
- *
- * Flush the cook buffer
- *
- * Note to self, and any other poor souls who venture here:
- *
- * flush in this case DOES NOT mean dispose of the data.
- * instead, it means "stop buffering and send it if you
- * haven't already." Just guess how I figured that out... SRW 2-Jun-98
- *
- * It is also always called in interrupt context - JAR 8-Sept-99
- */
-static void dgap_tty_flush_chars(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_flush_chars start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- /* TODO: Do something here */
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_flush_chars finish\n"));
-}
-
-
-
-/*
- * dgap_tty_flush_buffer()
- *
- * Flush Tx buffer (make in == out)
- */
-static void dgap_tty_flush_buffer(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
- u16 head = 0;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_flush_buffer on port: %d start\n", ch->ch_portnum));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- ch->ch_flags &= ~CH_STOP;
- head = readw(&(ch->ch_bs->tx_head));
- dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
- dgap_cmdw(ch, RESUMETX, 0, 0);
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- if (waitqueue_active(&tty->write_wait))
- wake_up_interruptible(&tty->write_wait);
- tty_wakeup(tty);
-
- DPR_IOCTL(("dgap_tty_flush_buffer finish\n"));
-}
-
-
-
-/*****************************************************************************
- *
- * The IOCTL function and all of its helpers
- *
- *****************************************************************************/
-
-/*
- * dgap_tty_ioctl()
- *
- * The usual assortment of ioctl's
- */
-static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int rc;
- u16 head = 0;
- ulong lock_flags = 0;
- ulong lock_flags2 = 0;
- void __user *uarg = (void __user *) arg;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-ENODEV);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-ENODEV);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-ENODEV);
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (-ENODEV);
-
- DPR_IOCTL(("dgap_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n",
- ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- if (un->un_open_count <= 0) {
- DPR_BASIC(("dgap_tty_ioctl - unit not open.\n"));
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(-EIO);
- }
-
- switch (cmd) {
-
- /* Here are all the standard ioctl's that we MUST implement */
-
- case TCSBRK:
- /*
- * TCSBRK is SVID version: non-zero arg --> no break
- * this behaviour is exploited by tcdrain().
- *
- * According to POSIX.1 spec (7.2.2.1.2) breaks should be
- * between 0.25 and 0.5 seconds so we'll ask for something
- * in the middle: 0.375 seconds.
- */
- rc = tty_check_change(tty);
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- if (rc) {
- return(rc);
- }
-
- rc = dgap_wait_for_drain(tty);
-
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
- }
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- if(((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP)) {
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
- }
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
- ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
-
- return(0);
-
-
- case TCSBRKP:
- /* support for POSIX tcsendbreak()
-
- * According to POSIX.1 spec (7.2.2.1.2) breaks should be
- * between 0.25 and 0.5 seconds so we'll ask for something
- * in the middle: 0.375 seconds.
- */
- rc = tty_check_change(tty);
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- if (rc) {
- return(rc);
- }
-
- rc = dgap_wait_for_drain(tty);
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
- }
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
- ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
-
- return(0);
-
- case TIOCSBRK:
- /*
- * FEP5 doesn't support turning on a break unconditionally.
- * The FEP5 device will stop sending a break automatically
- * after the specified time value that was sent when turning on
- * the break.
- */
- rc = tty_check_change(tty);
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- if (rc) {
- return(rc);
- }
-
- rc = dgap_wait_for_drain(tty);
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
- }
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
- ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
-
- return 0;
-
- case TIOCCBRK:
- /*
- * FEP5 doesn't support turning off a break unconditionally.
- * The FEP5 device will stop sending a break automatically
- * after the specified time value that was sent when turning on
- * the break.
- */
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return 0;
-
- case TIOCGSOFTCAR:
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) arg);
- return(rc);
-
- case TIOCSSOFTCAR:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- rc = get_user(arg, (unsigned long __user *) arg);
- if (rc)
- return(rc);
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
- dgap_param(tty);
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- return(0);
-
- case TIOCMGET:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_get_modem_info(ch, uarg));
-
- case TIOCMBIS:
- case TIOCMBIC:
- case TIOCMSET:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_set_modem_info(tty, cmd, uarg));
-
- /*
- * Here are any additional ioctl's that we want to implement
- */
-
- case TCFLSH:
- /*
- * The linux tty driver doesn't have a flush
- * input routine for the driver, assuming all backed
- * up data is in the line disc. buffers. However,
- * we all know that's not the case. Here, we
- * act on the ioctl, but then lie and say we didn't
- * so the line discipline will process the flush
- * also.
- */
- rc = tty_check_change(tty);
- if (rc) {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(rc);
- }
-
- if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
- if (!(un->un_type == DGAP_PRINT)) {
- head = readw(&(ch->ch_bs->rx_head));
- writew(head, &(ch->ch_bs->rx_tail));
- writeb(0, &(ch->ch_bs->orun));
- }
- }
-
- if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) {
- ch->ch_flags &= ~CH_STOP;
- head = readw(&(ch->ch_bs->tx_head));
- dgap_cmdw(ch, FLUSHTX, (u16) head, 0 );
- dgap_cmdw(ch, RESUMETX, 0, 0);
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
- if (waitqueue_active(&tty->write_wait))
- wake_up_interruptible(&tty->write_wait);
-
- /* Can't hold any locks when calling tty_wakeup! */
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- tty_wakeup(tty);
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
-
- /* pretend we didn't recognize this IOCTL */
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_ioctl (LINE:%d) finish on port %d - cmd %s (%x), arg %lx\n",
- __LINE__, ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
-
- return(-ENOIOCTLCMD);
-
- case TCSETSF:
- case TCSETSW:
- /*
- * The linux tty driver doesn't have a flush
- * input routine for the driver, assuming all backed
- * up data is in the line disc. buffers. However,
- * we all know that's not the case. Here, we
- * act on the ioctl, but then lie and say we didn't
- * so the line discipline will process the flush
- * also.
- */
- if (cmd == TCSETSF) {
- /* flush rx */
- ch->ch_flags &= ~CH_STOP;
- head = readw(&(ch->ch_bs->rx_head));
- writew(head, &(ch->ch_bs->rx_tail));
- }
-
- /* now wait for all the output to drain */
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- rc = dgap_wait_for_drain(tty);
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
- }
-
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
- ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
-
- /* pretend we didn't recognize this */
- return(-ENOIOCTLCMD);
-
- case TCSETAW:
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- rc = dgap_wait_for_drain(tty);
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
- }
-
- /* pretend we didn't recognize this */
- return(-ENOIOCTLCMD);
-
- case TCXONC:
- /*
- * The Linux Line Discipline (LD) would do this for us if we
- * let it, but we have the special firmware options to do this
- * the "right way" regardless of hardware or software flow
- * control so we'll do it outselves instead of letting the LD
- * do it.
- */
- rc = tty_check_change(tty);
- if (rc) {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(rc);
- }
-
- DPR_IOCTL(("dgap_ioctl - in TCXONC - %d\n", cmd));
- switch (arg) {
-
- case TCOON:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- dgap_tty_start(tty);
- return(0);
- case TCOOFF:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- dgap_tty_stop(tty);
- return(0);
- case TCION:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- /* Make the ld do it */
- return(-ENOIOCTLCMD);
- case TCIOFF:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- /* Make the ld do it */
- return(-ENOIOCTLCMD);
- default:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(-EINVAL);
- }
-
- case DIGI_GETA:
- /* get information for ditty */
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_tty_digigeta(tty, uarg));
-
- case DIGI_SETAW:
- case DIGI_SETAF:
-
- /* set information for ditty */
- if (cmd == (DIGI_SETAW)) {
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- rc = dgap_wait_for_drain(tty);
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
- }
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
- else {
- tty_ldisc_flush(tty);
- }
- /* fall thru */
-
- case DIGI_SETA:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_tty_digiseta(tty, uarg));
-
- case DIGI_GEDELAY:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_tty_digigetedelay(tty, uarg));
-
- case DIGI_SEDELAY:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_tty_digisetedelay(tty, uarg));
-
- case DIGI_GETCUSTOMBAUD:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_tty_digigetcustombaud(tty, uarg));
-
- case DIGI_SETCUSTOMBAUD:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_tty_digisetcustombaud(tty, uarg));
-
- case DIGI_RESET_PORT:
- dgap_firmware_reset_port(ch);
- dgap_param(tty);
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return 0;
-
- default:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_ioctl - in default\n"));
- DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n",
- dgap_ioctl_name(cmd), cmd, arg));
-
- return(-ENOIOCTLCMD);
- }
-}
diff --git a/drivers/staging/dgap/dgap_tty.h b/drivers/staging/dgap/dgap_tty.h
deleted file mode 100644
index 464a460b6be8..000000000000
--- a/drivers/staging/dgap/dgap_tty.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-#ifndef __DGAP_TTY_H
-#define __DGAP_TTY_H
-
-#include "dgap_driver.h"
-
-int dgap_tty_register(struct board_t *brd);
-
-int dgap_tty_preinit(void);
-int dgap_tty_init(struct board_t *);
-
-void dgap_tty_post_uninit(void);
-void dgap_tty_uninit(struct board_t *);
-
-void dgap_carrier(struct channel_t *ch);
-void dgap_input(struct channel_t *ch);
-
-
-#endif
diff --git a/drivers/staging/dgap/dgap_types.h b/drivers/staging/dgap/dgap_types.h
deleted file mode 100644
index eca38c7f359d..000000000000
--- a/drivers/staging/dgap/dgap_types.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-#ifndef __DGAP_TYPES_H
-#define __DGAP_TYPES_H
-
-#ifndef TRUE
-# define TRUE 1
-#endif
-
-#ifndef FALSE
-# define FALSE 0
-#endif
-
-/* Required for our shared headers! */
-typedef unsigned char uchar;
-
-#endif
diff --git a/drivers/staging/dgap/digi.h b/drivers/staging/dgap/digi.h
deleted file mode 100644
index bcea4f734a32..000000000000
--- a/drivers/staging/dgap/digi.h
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * $Id: digi.h,v 1.1 2009/10/23 14:01:57 markh Exp $
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-#ifndef __DIGI_H
-#define __DIGI_H
-
-/************************************************************************
- *** Definitions for Digi ditty(1) command.
- ************************************************************************/
-
-
-/*
- * Copyright (c) 1988-96 Digi International Inc., All Rights Reserved.
- */
-
-/************************************************************************
- * This module provides application access to special Digi
- * serial line enhancements which are not standard UNIX(tm) features.
- ************************************************************************/
-
-#if !defined(TIOCMODG)
-
-#define TIOCMODG ('d'<<8) | 250 /* get modem ctrl state */
-#define TIOCMODS ('d'<<8) | 251 /* set modem ctrl state */
-
-#ifndef TIOCM_LE
-#define TIOCM_LE 0x01 /* line enable */
-#define TIOCM_DTR 0x02 /* data terminal ready */
-#define TIOCM_RTS 0x04 /* request to send */
-#define TIOCM_ST 0x08 /* secondary transmit */
-#define TIOCM_SR 0x10 /* secondary receive */
-#define TIOCM_CTS 0x20 /* clear to send */
-#define TIOCM_CAR 0x40 /* carrier detect */
-#define TIOCM_RNG 0x80 /* ring indicator */
-#define TIOCM_DSR 0x100 /* data set ready */
-#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
-#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
-#endif
-
-#endif
-
-#if !defined(TIOCMSET)
-#define TIOCMSET ('d'<<8) | 252 /* set modem ctrl state */
-#define TIOCMGET ('d'<<8) | 253 /* set modem ctrl state */
-#endif
-
-#if !defined(TIOCMBIC)
-#define TIOCMBIC ('d'<<8) | 254 /* set modem ctrl state */
-#define TIOCMBIS ('d'<<8) | 255 /* set modem ctrl state */
-#endif
-
-
-#if !defined(TIOCSDTR)
-#define TIOCSDTR ('e'<<8) | 0 /* set DTR */
-#define TIOCCDTR ('e'<<8) | 1 /* clear DTR */
-#endif
-
-/************************************************************************
- * Ioctl command arguments for DIGI parameters.
- ************************************************************************/
-#define DIGI_GETA ('e'<<8) | 94 /* Read params */
-
-#define DIGI_SETA ('e'<<8) | 95 /* Set params */
-#define DIGI_SETAW ('e'<<8) | 96 /* Drain & set params */
-#define DIGI_SETAF ('e'<<8) | 97 /* Drain, flush & set params */
-
-#define DIGI_KME ('e'<<8) | 98 /* Read/Write Host */
- /* Adapter Memory */
-
-#define DIGI_GETFLOW ('e'<<8) | 99 /* Get startc/stopc flow */
- /* control characters */
-#define DIGI_SETFLOW ('e'<<8) | 100 /* Set startc/stopc flow */
- /* control characters */
-#define DIGI_GETAFLOW ('e'<<8) | 101 /* Get Aux. startc/stopc */
- /* flow control chars */
-#define DIGI_SETAFLOW ('e'<<8) | 102 /* Set Aux. startc/stopc */
- /* flow control chars */
-
-#define DIGI_GEDELAY ('d'<<8) | 246 /* Get edelay */
-#define DIGI_SEDELAY ('d'<<8) | 247 /* Set edelay */
-
-struct digiflow_t {
- unsigned char startc; /* flow cntl start char */
- unsigned char stopc; /* flow cntl stop char */
-};
-
-
-#ifdef FLOW_2200
-#define F2200_GETA ('e'<<8) | 104 /* Get 2x36 flow cntl flags */
-#define F2200_SETAW ('e'<<8) | 105 /* Set 2x36 flow cntl flags */
-#define F2200_MASK 0x03 /* 2200 flow cntl bit mask */
-#define FCNTL_2200 0x01 /* 2x36 terminal flow cntl */
-#define PCNTL_2200 0x02 /* 2x36 printer flow cntl */
-#define F2200_XON 0xf8
-#define P2200_XON 0xf9
-#define F2200_XOFF 0xfa
-#define P2200_XOFF 0xfb
-
-#define FXOFF_MASK 0x03 /* 2200 flow status mask */
-#define RCVD_FXOFF 0x01 /* 2x36 Terminal XOFF rcvd */
-#define RCVD_PXOFF 0x02 /* 2x36 Printer XOFF rcvd */
-#endif
-
-/************************************************************************
- * Values for digi_flags
- ************************************************************************/
-#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */
-#define DIGI_FAST 0x0002 /* Fast baud rates */
-#define RTSPACE 0x0004 /* RTS input flow control */
-#define CTSPACE 0x0008 /* CTS output flow control */
-#define DSRPACE 0x0010 /* DSR output flow control */
-#define DCDPACE 0x0020 /* DCD output flow control */
-#define DTRPACE 0x0040 /* DTR input flow control */
-#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
-#define DIGI_FORCEDCD 0x0100 /* Force carrier */
-#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
-#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
-#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
-#define DIGI_PP_INPUT 0x1000 /* Change parallel port to input*/
-#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
-#define DIGI_422 0x4000 /* for 422/232 selectable panel */
-#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
-
-/************************************************************************
- * These options are not supported on the comxi.
- ************************************************************************/
-#define DIGI_COMXI (DIGI_FAST|DIGI_COOK|DSRPACE|DCDPACE|DTRPACE)
-
-#define DIGI_PLEN 28 /* String length */
-#define DIGI_TSIZ 10 /* Terminal string len */
-
-/************************************************************************
- * Structure used with ioctl commands for DIGI parameters.
- ************************************************************************/
-struct digi_t {
- unsigned short digi_flags; /* Flags (see above) */
- unsigned short digi_maxcps; /* Max printer CPS */
- unsigned short digi_maxchar; /* Max chars in print queue */
- unsigned short digi_bufsize; /* Buffer size */
- unsigned char digi_onlen; /* Length of ON string */
- unsigned char digi_offlen; /* Length of OFF string */
- char digi_onstr[DIGI_PLEN]; /* Printer on string */
- char digi_offstr[DIGI_PLEN]; /* Printer off string */
- char digi_term[DIGI_TSIZ]; /* terminal string */
-};
-
-/************************************************************************
- * KME definitions and structures.
- ************************************************************************/
-#define RW_IDLE 0 /* Operation complete */
-#define RW_READ 1 /* Read Concentrator Memory */
-#define RW_WRITE 2 /* Write Concentrator Memory */
-
-struct rw_t {
- unsigned char rw_req; /* Request type */
- unsigned char rw_board; /* Host Adapter board number */
- unsigned char rw_conc; /* Concentrator number */
- unsigned char rw_reserved; /* Reserved for expansion */
- unsigned long rw_addr; /* Address in concentrator */
- unsigned short rw_size; /* Read/write request length */
- unsigned char rw_data[128]; /* Data to read/write */
-};
-
-/***********************************************************************
- * Shrink Buffer and Board Information definitions and structures.
-
- ************************************************************************/
- /* Board type return codes */
-#define PCXI_TYPE 1 /* Board type at the designated port is a PC/Xi */
-#define PCXM_TYPE 2 /* Board type at the designated port is a PC/Xm */
-#define PCXE_TYPE 3 /* Board type at the designated port is a PC/Xe */
-#define MCXI_TYPE 4 /* Board type at the designated port is a MC/Xi */
-#define COMXI_TYPE 5 /* Board type at the designated port is a COM/Xi */
-
- /* Non-Zero Result codes. */
-#define RESULT_NOBDFND 1 /* A Digi product at that port is not config installed */
-#define RESULT_NODESCT 2 /* A memory descriptor was not obtainable */
-#define RESULT_NOOSSIG 3 /* FEP/OS signature was not detected on the board */
-#define RESULT_TOOSML 4 /* Too small an area to shrink. */
-#define RESULT_NOCHAN 5 /* Channel structure for the board was not found */
-
-struct shrink_buf_struct {
- unsigned long shrink_buf_vaddr; /* Virtual address of board */
- unsigned long shrink_buf_phys; /* Physical address of board */
- unsigned long shrink_buf_bseg; /* Amount of board memory */
- unsigned long shrink_buf_hseg; /* '186 Beginning of Dual-Port */
-
- unsigned long shrink_buf_lseg; /* '186 Beginning of freed memory */
- unsigned long shrink_buf_mseg; /* Linear address from start of
- dual-port were freed memory
- begins, host viewpoint. */
-
- unsigned long shrink_buf_bdparam; /* Parameter for xxmemon and
- xxmemoff */
-
- unsigned long shrink_buf_reserva; /* Reserved */
- unsigned long shrink_buf_reservb; /* Reserved */
- unsigned long shrink_buf_reservc; /* Reserved */
- unsigned long shrink_buf_reservd; /* Reserved */
-
- unsigned char shrink_buf_result; /* Reason for call failing
- Zero is Good return */
- unsigned char shrink_buf_init; /* Non-Zero if it caused an
- xxinit call. */
-
- unsigned char shrink_buf_anports; /* Number of async ports */
- unsigned char shrink_buf_snports; /* Number of sync ports */
- unsigned char shrink_buf_type; /* Board type 1 = PC/Xi,
- 2 = PC/Xm,
- 3 = PC/Xe
- 4 = MC/Xi
- 5 = COMX/i */
- unsigned char shrink_buf_card; /* Card number */
-
-};
-
-/************************************************************************
- * Structure to get driver status information
- ************************************************************************/
-struct digi_dinfo {
- unsigned long dinfo_nboards; /* # boards configured */
- char dinfo_reserved[12]; /* for future expansion */
- char dinfo_version[16]; /* driver version */
-};
-
-#define DIGI_GETDD ('d'<<8) | 248 /* get driver info */
-
-/************************************************************************
- * Structure used with ioctl commands for per-board information
- *
- * physsize and memsize differ when board has "windowed" memory
- ************************************************************************/
-struct digi_info {
- unsigned long info_bdnum; /* Board number (0 based) */
- unsigned long info_ioport; /* io port address */
- unsigned long info_physaddr; /* memory address */
- unsigned long info_physsize; /* Size of host mem window */
- unsigned long info_memsize; /* Amount of dual-port mem */
- /* on board */
- unsigned short info_bdtype; /* Board type */
- unsigned short info_nports; /* number of ports */
- char info_bdstate; /* board state */
- char info_reserved[7]; /* for future expansion */
-};
-
-#define DIGI_GETBD ('d'<<8) | 249 /* get board info */
-
-struct digi_stat {
- unsigned int info_chan; /* Channel number (0 based) */
- unsigned int info_brd; /* Board number (0 based) */
- unsigned long info_cflag; /* cflag for channel */
- unsigned long info_iflag; /* iflag for channel */
- unsigned long info_oflag; /* oflag for channel */
- unsigned long info_mstat; /* mstat for channel */
- unsigned long info_tx_data; /* tx_data for channel */
- unsigned long info_rx_data; /* rx_data for channel */
- unsigned long info_hflow; /* hflow for channel */
- unsigned long info_reserved[8]; /* for future expansion */
-};
-
-#define DIGI_GETSTAT ('d'<<8) | 244 /* get board info */
-/************************************************************************
- *
- * Structure used with ioctl commands for per-channel information
- *
- ************************************************************************/
-struct digi_ch {
- unsigned long info_bdnum; /* Board number (0 based) */
- unsigned long info_channel; /* Channel index number */
- unsigned long info_ch_cflag; /* Channel cflag */
- unsigned long info_ch_iflag; /* Channel iflag */
- unsigned long info_ch_oflag; /* Channel oflag */
- unsigned long info_chsize; /* Channel structure size */
- unsigned long info_sleep_stat; /* sleep status */
- dev_t info_dev; /* device number */
- unsigned char info_initstate; /* Channel init state */
- unsigned char info_running; /* Channel running state */
- long reserved[8]; /* reserved for future use */
-};
-
-/*
-* This structure is used with the DIGI_FEPCMD ioctl to
-* tell the driver which port to send the command for.
-*/
-struct digi_cmd {
- int cmd;
- int word;
- int ncmds;
- int chan; /* channel index (zero based) */
- int bdid; /* board index (zero based) */
-};
-
-/*
-* info_sleep_stat defines
-*/
-#define INFO_RUNWAIT 0x0001
-#define INFO_WOPEN 0x0002
-#define INFO_TTIOW 0x0004
-#define INFO_CH_RWAIT 0x0008
-#define INFO_CH_WEMPTY 0x0010
-#define INFO_CH_WLOW 0x0020
-#define INFO_XXBUF_BUSY 0x0040
-
-#define DIGI_GETCH ('d'<<8) | 245 /* get board info */
-
-/* Board type definitions */
-
-#define SUBTYPE 0007
-#define T_PCXI 0000
-#define T_PCXM 0001
-#define T_PCXE 0002
-#define T_PCXR 0003
-#define T_SP 0004
-#define T_SP_PLUS 0005
-# define T_HERC 0000
-# define T_HOU 0001
-# define T_LON 0002
-# define T_CHA 0003
-#define FAMILY 0070
-#define T_COMXI 0000
-#define T_PCXX 0010
-#define T_CX 0020
-#define T_EPC 0030
-#define T_PCLITE 0040
-#define T_SPXX 0050
-#define T_AVXX 0060
-#define T_DXB 0070
-#define T_A2K_4_8 0070
-#define BUSTYPE 0700
-#define T_ISABUS 0000
-#define T_MCBUS 0100
-#define T_EISABUS 0200
-#define T_PCIBUS 0400
-
-/* Board State Definitions */
-
-#define BD_RUNNING 0x0
-#define BD_REASON 0x7f
-#define BD_NOTFOUND 0x1
-#define BD_NOIOPORT 0x2
-#define BD_NOMEM 0x3
-#define BD_NOBIOS 0x4
-#define BD_NOFEP 0x5
-#define BD_FAILED 0x6
-#define BD_ALLOCATED 0x7
-#define BD_TRIBOOT 0x8
-#define BD_BADKME 0x80
-
-#define DIGI_LOOPBACK ('d'<<8) | 252 /* Enable/disable UART internal loopback */
-#define DIGI_SPOLL ('d'<<8) | 254 /* change poller rate */
-
-#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
-#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
-#define DIGI_RESET_PORT ('e'<<8) | 93 /* Reset port */
-
-#endif /* DIGI_H */
diff --git a/drivers/staging/dgap/downld.c b/drivers/staging/dgap/downld.c
deleted file mode 100644
index 1f4aa2eca437..000000000000
--- a/drivers/staging/dgap/downld.c
+++ /dev/null
@@ -1,798 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * $Id: downld.c,v 1.6 2009/01/14 14:10:54 markh Exp $
- */
-
-/*
-** downld.c
-**
-** This is the daemon that sends the fep, bios, and concentrator images
-** from user space to the driver.
-** BUGS:
-** If the file changes in the middle of the download, you probably
-** will get what you deserve.
-**
-*/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <fcntl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/errno.h>
-
-#include "dgap_types.h"
-#include "digi.h"
-#include "dgap_fep5.h"
-
-#include "dgap_downld.h"
-
-#include <string.h>
-#include <malloc.h>
-#include <stddef.h>
-#include <unistd.h>
-
-char *pgm;
-void myperror();
-
-/*
-** This structure is used to keep track of the different images available
-** to give to the driver. It is arranged so that the things that are
-** constants or that have defaults are first inthe strucutre to simplify
-** the table of initializers.
-*/
-struct image_info {
- short type; /* bios, fep, conc */
- short family; /* boards this applies to */
- short subtype; /* subtype */
- int len; /* size of image */
- char *image; /* ioctl struct + image */
- char *name;
- char *fname; /* filename of binary (i.e. "asfep.bin") */
- char *pathname; /* pathname to this binary ("/etc/dgap/xrfep.bin"); */
- time_t mtime; /* Last modification time */
-};
-
-#define IBIOS 0
-#define IFEP 1
-#define ICONC 2
-#define ICONFIG 3
-#define IBAD 4
-
-#define DEFAULT_LOC "/lib/firmware/dgap/"
-
-struct image_info *image_list;
-int nimages, count;
-
-struct image_info images[] = {
-{IBIOS, T_EPC, SUBTYPE, 0, NULL, "EPC/X", "fxbios.bin", DEFAULT_LOC "fxbios.bin", 0 },
-{IFEP, T_EPC, SUBTYPE, 0, NULL, "EPC/X", "fxfep.bin", DEFAULT_LOC "fxfep.bin", 0 },
-{ICONC, T_EPC, SUBTYPE, 0, NULL, "EPC/X", "fxcon.bin", DEFAULT_LOC "fxcon.bin", 0 },
-
-{IBIOS, T_CX, SUBTYPE, 0, NULL, "C/X", "cxbios.bin", DEFAULT_LOC "cxbios.bin", 0 },
-{IFEP, T_CX, SUBTYPE, 0, NULL, "C/X", "cxhost.bin", DEFAULT_LOC "cxhost.bin", 0 },
-
-{IBIOS, T_CX, T_PCIBUS, 0, NULL, "C/X PCI", "cxpbios.bin", DEFAULT_LOC "cxpbios.bin", 0 },
-{IFEP, T_CX, T_PCIBUS, 0, NULL, "C/X PCI", "cxpfep.bin", DEFAULT_LOC "cxpfep.bin", 0 },
-
-{ICONC, T_CX, SUBTYPE, 0, NULL, "C/X", "cxcon.bin", DEFAULT_LOC "cxcon.bin", 0 },
-{ICONC, T_CX, SUBTYPE, 0, NULL, "C/X", "ibmcxcon.bin", DEFAULT_LOC "ibmcxcon.bin", 0 },
-{ICONC, T_CX, SUBTYPE, 0, NULL, "C/X", "ibmencon.bin", DEFAULT_LOC "ibmencon.bin", 0 },
-
-{IBIOS, FAMILY, T_PCXR, 0, NULL, "PCXR", "xrbios.bin", DEFAULT_LOC "xrbios.bin", 0 },
-{IFEP, FAMILY, T_PCXR, 0, NULL, "PCXR", "xrfep.bin", DEFAULT_LOC "xrfep.bin", 0 },
-
-{IBIOS, T_PCLITE, SUBTYPE, 0, NULL, "X/em", "sxbios.bin", DEFAULT_LOC "sxbios.bin", 0 },
-{IFEP, T_PCLITE, SUBTYPE, 0, NULL, "X/em", "sxfep.bin", DEFAULT_LOC "sxfep.bin", 0 },
-
-{IBIOS, T_EPC, T_PCIBUS, 0, NULL, "PCI", "pcibios.bin", DEFAULT_LOC "pcibios.bin", 0 },
-{IFEP, T_EPC, T_PCIBUS, 0, NULL, "PCI", "pcifep.bin", DEFAULT_LOC "pcifep.bin", 0 },
-{ICONFIG, 0, 0, 0, NULL, NULL, "dgap.conf", "/etc/dgap.conf", 0 },
-
-/* IBAD/NULL entry indicating end-of-table */
-
-{IBAD, 0, 0, 0, NULL, NULL, NULL, NULL, 0 }
-
-} ;
-
-int errorprint = 1;
-int nodldprint = 1;
-int debugflag;
-int fd;
-
-struct downld_t *ip; /* Image pointer in current image */
-struct downld_t *dp; /* conc. download */
-
-
-/*
- * The same for either the FEP or the BIOS.
- * Append the downldio header, issue the ioctl, then free
- * the buffer. Not horribly CPU efficient, but quite RAM efficient.
- */
-
-void squirt(int req_type, int bdid, struct image_info *ii)
-{
- struct downldio *dliop;
- int size_buf;
- int sfd;
- struct stat sb;
-
- /*
- * If this binary comes from a file, stat it to see how
- * large it is. Yes, we intentionally do this each
- * time for the binary may change between loads.
- */
-
- if (ii->pathname) {
- sfd = open(ii->pathname, O_RDONLY);
-
- if (sfd < 0 ) {
- myperror(ii->pathname);
- goto squirt_end;
- }
-
- if (fstat(sfd, &sb) == -1 ) {
- myperror(ii->pathname);
- goto squirt_end;
- }
-
- ii->len = sb.st_size;
- }
-
- size_buf = ii->len + sizeof(struct downldio);
-
- /*
- * This buffer will be freed at the end of this function. It is
- * not resilient and should be around only long enough for the d/l
- * to happen.
- */
- dliop = (struct downldio *) malloc(size_buf);
-
- if (dliop == NULL) {
- fprintf(stderr,"%s: can't get %d bytes of memory; aborting\n",
- pgm, size_buf);
- exit (1);
- }
-
- /* Now, stick the image in fepimage. This can come from either
- * the compiled-in image or from the filesystem.
- */
- if (ii->pathname)
- read(sfd, dliop->image.fi.fepimage, ii->len);
- else
- memcpy(dliop ->image.fi.fepimage, ii->image, ii->len);
-
- dliop->req_type = req_type;
- dliop->bdid = bdid;
-
- dliop->image.fi.len = ii->len;
-
- if (debugflag)
- printf("sending %d bytes of %s %s from %s\n",
- ii->len,
- (ii->type == IFEP) ? "FEP" : (ii->type == IBIOS) ? "BIOS" : "CONFIG",
- ii->name ? ii->name : "",
- (ii->pathname) ? ii->pathname : "internal image" );
-
- if (ioctl(fd, DIGI_DLREQ_SET, (char *) dliop) == -1) {
- if(errorprint) {
- fprintf(stderr,
- "%s: warning - download ioctl failed\n",pgm);
- errorprint = 0;
- }
- sleep(2);
- }
-
-squirt_end:
-
- if (ii->pathname) {
- close(sfd);
- }
- free(dliop);
-}
-
-
-/*
- * See if we need to reload the download image in core
- *
- */
-void consider_file_rescan(struct image_info *ii)
-{
- int sfd;
- int len;
- struct stat sb;
-
- /* This operation only makes sense when we're working from a file */
-
- if (ii->pathname) {
-
- sfd = open (ii->pathname, O_RDONLY) ;
- if (sfd < 0 ) {
- myperror(ii->pathname);
- exit(1) ;
- }
-
- if( fstat(sfd,&sb) == -1 ) {
- myperror(ii->pathname);
- exit(1);
- }
-
- /* If the file hasn't changed since we last did this,
- * and we have not done a free() on the image, bail
- */
- if (ii->image && (sb.st_mtime == ii->mtime))
- goto end_rescan;
-
- ii->len = len = sb.st_size;
-
- /* Record the timestamp of the file */
- ii->mtime = sb.st_mtime;
-
- /* image should be NULL unless there is an image malloced
- * in already. Before we malloc again, make sure we don't
- * have a memory leak.
- */
- if ( ii->image ) {
- free( ii->image );
- /* ii->image = NULL; */ /* not necessary */
- }
-
- /* This image will be kept only long enough for the
- * download to happen. After sending the last block,
- * it will be freed
- */
- ii->image = malloc(len) ;
-
- if (ii->image == NULL) {
- fprintf(stderr,
- "%s: can't get %d bytes of memory; aborting\n",
- pgm, len);
- exit (1);
- }
-
- if (read(sfd, ii->image, len) < len) {
- fprintf(stderr,"%s: read error on %s; aborting\n",
- pgm, ii->pathname);
- exit (1);
- }
-
-end_rescan:
- close(sfd);
-
- }
-}
-
-/*
- * Scan for images to match the driver requests
- */
-
-struct image_info * find_conc_image()
-{
- int x;
- struct image_info *i = NULL;
-
- for ( x = 0; x < nimages; x++ ) {
- i=&image_list[x];
-
- if(i->type != ICONC)
- continue;
-
- consider_file_rescan(i) ;
-
- ip = (struct downld_t *) image_list[x].image;
- if (ip == NULL) continue;
-
- /*
- * When I removed Clusterport, I kept only the code that I
- * was SURE wasn't ClusterPort. We may not need the next two
- * lines of code.
- */
- if ((dp->dl_type != 'P' ) && ( ip->dl_srev == dp->dl_srev ))
- return i;
- }
- return NULL;
-}
-
-
-int main(int argc, char **argv)
-{
- struct downldio dlio;
- int offset, bsize;
- int x;
- char *down, *image, *fname;
- struct image_info *ii;
-
- pgm = argv[0];
- dp = &dlio.image.dl; /* conc. download */
-
- while((argc > 2) && !strcmp(argv[1],"-d")) {
- debugflag++ ;
- argc-- ;
- argv++ ;
- }
-
- if(argc < 2) {
- fprintf(stderr,
- "usage: %s download-device [image-file] ...\n",
- pgm);
- exit(1);
- }
-
-
-
- /*
- * Daemonize, unless debugging is turned on.
- */
- if (debugflag == 0) {
- switch (fork())
- {
- case 0:
- break;
-
- case -1:
- return 1;
-
- default:
- return 0;
- }
-
- setsid();
-
- /*
- * The child no longer needs "stdin", "stdout", or "stderr",
- * and should not block processes waiting for them to close.
- */
- fclose(stdin);
- fclose(stdout);
- fclose(stderr);
-
- }
-
- while (1) {
- if( (fd = open(argv[1], O_RDWR)) == -1 ) {
- sleep(1);
- }
- else
- break;
- }
-
- /*
- ** create a list of images to search through when trying to match
- ** requests from the driver. Put images from the command line in
- ** the list before built in images so that the command line images
- ** can override the built in ones.
- */
-
- /* allocate space for the list */
-
- nimages = argc - 2;
-
- /* count the number of default list entries */
-
- for (count = 0; images[count].type != IBAD; ++count) ;
-
- nimages += count;
-
- /* Really should just remove the variable "image_list".... robertl */
- image_list = images;
-
- /* get the images from the command line */
- for(x = 2; x < argc; x++) {
- int xx;
-
- /*
- * strip off any leading path information for
- * determining file type
- */
- if( (fname = strrchr(argv[x],'/')) == NULL)
- fname = argv[x];
- else
- fname++; /* skip the slash */
-
- for (xx = 0; xx < count; xx++) {
- if (strcmp(fname, images[xx].fname) == 0 ) {
- images[xx].pathname = argv[x];
-
- /* image should be NULL until */
- /* space is malloced */
- images[xx].image = NULL;
- }
- }
- }
-
- sleep(3);
-
- /*
- ** Endless loop: get a request from the fep, and service that request.
- */
- for(;;) {
- /* get the request */
- if (debugflag)
- printf("b4 get ioctl...");
-
- if (ioctl(fd,DIGI_DLREQ_GET, &dlio) == -1 ) {
- if (errorprint) {
- fprintf(stderr,
- "%s: warning - download ioctl failed\n",
- pgm);
- errorprint = 0;
- }
- sleep(2);
- } else {
- if (debugflag)
- printf("dlio.req_type is %d bd %d\n",
- dlio.req_type,dlio.bdid);
-
- switch(dlio.req_type) {
- case DLREQ_BIOS:
- /*
- ** find the bios image for this type
- */
- for ( x = 0; x < nimages; x++ ) {
- if(image_list[x].type != IBIOS)
- continue;
-
- if ((dlio.image.fi.type & FAMILY) ==
- image_list[x].family) {
-
- if ( image_list[x].family == T_CX ) {
- if ((dlio.image.fi.type & BUSTYPE)
- == T_PCIBUS ) {
- if ( image_list[x].subtype
- == T_PCIBUS )
- break;
- }
- else {
- break;
- }
- }
- else if ( image_list[x].family == T_EPC ) {
- /* If subtype of image is T_PCIBUS, it is */
- /* a PCI EPC image, so the board must */
- /* have bus type T_PCIBUS to match */
- if ((dlio.image.fi.type & BUSTYPE)
- == T_PCIBUS ) {
- if ( image_list[x].subtype
- == T_PCIBUS )
- break;
- }
- else {
- /* NON PCI EPC doesn't use PCI image */
- if ( image_list[x].subtype
- != T_PCIBUS )
- break;
- }
- }
- else
- break;
- }
- else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) {
- /* PCXR board will break out of the loop here */
- if ( image_list[x].subtype == T_PCXR ) {
- break;
- }
- }
- }
-
- if ( x >= nimages) {
- /*
- ** no valid images exist
- */
- if(nodldprint) {
- fprintf(stderr,
- "%s: cannot find correct BIOS image\n",
- pgm);
- nodldprint = 0;
- }
- dlio.image.fi.type = -1;
- if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1) {
- if (errorprint) {
- fprintf(stderr,
- "%s: warning - download ioctl failed\n",
- pgm);
- errorprint = 0;
- }
- sleep(2);
- }
- break;
- }
- squirt(dlio.req_type, dlio.bdid, &image_list[x]);
- break ;
-
- case DLREQ_FEP:
- /*
- ** find the fep image for this type
- */
- for ( x = 0; x < nimages; x++ ) {
- if(image_list[x].type != IFEP)
- continue;
- if( (dlio.image.fi.type & FAMILY) ==
- image_list[x].family ) {
- if ( image_list[x].family == T_CX ) {
- /* C/X PCI board */
- if ((dlio.image.fi.type & BUSTYPE)
- == T_PCIBUS ) {
- if ( image_list[x].subtype
- == T_PCIBUS )
- break;
- }
- else {
- /* Regular CX */
- break;
- }
- }
- else if ( image_list[x].family == T_EPC ) {
- /* If subtype of image is T_PCIBUS, it is */
- /* a PCI EPC image, so the board must */
- /* have bus type T_PCIBUS to match */
- if ((dlio.image.fi.type & BUSTYPE)
- == T_PCIBUS ) {
- if ( image_list[x].subtype
- == T_PCIBUS )
- break;
- }
- else {
- /* NON PCI EPC doesn't use PCI image */
- if ( image_list[x].subtype
- != T_PCIBUS )
- break;
- }
- }
- else
- break;
- }
- else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) {
- /* PCXR board will break out of the loop here */
- if ( image_list[x].subtype == T_PCXR ) {
- break;
- }
- }
- }
-
- if ( x >= nimages) {
- /*
- ** no valid images exist
- */
- if(nodldprint) {
- fprintf(stderr,
- "%s: cannot find correct FEP image\n",
- pgm);
- nodldprint = 0;
- }
- dlio.image.fi.type=-1;
- if( ioctl(fd,DIGI_DLREQ_SET,&dlio) == -1 ) {
- if(errorprint) {
- fprintf(stderr,
- "%s: warning - download ioctl failed\n",
- pgm);
- errorprint=0;
- }
- sleep(2);
- }
- break;
- }
- squirt(dlio.req_type, dlio.bdid, &image_list[x]);
- break;
-
- case DLREQ_DEVCREATE:
- {
- char string[1024];
-#if 0
- sprintf(string, "%s /proc/dgap/%d/mknod", DEFSHELL, dlio.bdid);
-#endif
- sprintf(string, "%s /usr/sbin/dgap_updatedevs %d", DEFSHELL, dlio.bdid);
- system(string);
-
- if (debugflag)
- printf("Created Devices.\n");
- if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
- if(errorprint) {
- fprintf(stderr, "%s: warning - DEVCREATE ioctl failed\n",pgm);
- errorprint = 0;
- }
- sleep(2);
- }
- if (debugflag)
- printf("After ioctl set - Created Device.\n");
- }
-
- break;
-
- case DLREQ_CONFIG:
- for ( x = 0; x < nimages; x++ ) {
- if(image_list[x].type != ICONFIG)
- continue;
- else
- break;
- }
-
- if ( x >= nimages) {
- /*
- ** no valid images exist
- */
- if(nodldprint) {
- fprintf(stderr,
- "%s: cannot find correct CONFIG image\n",
- pgm);
- nodldprint = 0;
- }
- dlio.image.fi.type=-1;
- if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
- if(errorprint) {
- fprintf(stderr,
- "%s: warning - download ioctl failed\n",
- pgm);
- errorprint=0;
- }
- sleep(2);
- }
- break;
- }
-
- squirt(dlio.req_type, dlio.bdid, &image_list[x]);
- break;
-
- case DLREQ_CONC:
- /*
- ** find the image needed for this download
- */
- if ( dp->dl_seq == 0 ) {
- /*
- ** find image for hardware rev range
- */
- for ( x = 0; x < nimages; x++ ) {
- ii=&image_list[x];
-
- if(image_list[x].type != ICONC)
- continue;
-
- consider_file_rescan(ii) ;
-
- ip = (struct downld_t *) image_list[x].image;
- if (ip == NULL) continue;
-
- /*
- * When I removed Clusterport, I kept only the
- * code that I was SURE wasn't ClusterPort.
- * We may not need the next four lines of code.
- */
-
- if ((dp->dl_type != 'P' ) &&
- (ip->dl_lrev <= dp->dl_lrev ) &&
- ( dp->dl_lrev <= ip->dl_hrev))
- break;
- }
-
- if ( x >= nimages ) {
- /*
- ** No valid images exist
- */
- if(nodldprint) {
- fprintf(stderr,
- "%s: cannot find correct download image %d\n",
- pgm, dp->dl_lrev);
- nodldprint=0;
- }
- continue;
- }
-
- } else {
- /*
- ** find image version required
- */
- if ((ii = find_conc_image()) == NULL ) {
- /*
- ** No valid images exist
- */
- fprintf(stderr,
- "%s: can't find rest of download image??\n",
- pgm);
- continue;
- }
- }
-
- /*
- ** download block of image
- */
-
- offset = 1024 * dp->dl_seq;
-
- /*
- ** test if block requested within image
- */
- if ( offset < ii->len ) {
-
- /*
- ** if it is, determine block size, set segment,
- ** set size, set pointers, and copy block
- */
- if (( bsize = ii->len - offset ) > 1024 )
- bsize = 1024;
-
- /*
- ** copy image version info to download area
- */
- dp->dl_srev = ip->dl_srev;
- dp->dl_lrev = ip->dl_lrev;
- dp->dl_hrev = ip->dl_hrev;
-
- dp->dl_seg = (64 * dp->dl_seq) + ip->dl_seg;
- dp->dl_size = bsize;
-
- down = (char *)&dp->dl_data[0];
- image = (char *)((char *)ip + offset);
-
- memcpy(down, image, bsize);
- }
- else {
- /*
- ** Image has been downloaded, set segment and
- ** size to indicate no more blocks
- */
- dp->dl_seg = ip->dl_seg;
- dp->dl_size = 0;
-
- /* Now, we can release the concentrator */
- /* image from memory if we're running */
- /* from filesystem images */
-
- if (ii->pathname)
- if (ii->image) {
- free(ii->image);
- ii->image = NULL;
- }
- }
-
- if (debugflag)
- printf(
- "sending conc dl section %d to %s from %s\n",
- dp->dl_seq, ii->name,
- ii->pathname ? ii->pathname : "Internal Image");
-
- if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
- if (errorprint) {
- fprintf(stderr,
- "%s: warning - download ioctl failed\n",
- pgm);
- errorprint=0;
- }
- sleep(2);
- }
- break;
- } /* switch */
- }
- if (debugflag > 1) {
- printf("pausing: "); fflush(stdout);
- fflush(stdin);
- while(getchar() != '\n');
- printf("continuing\n");
- }
- }
-}
-
-/*
-** myperror()
-**
-** Same as normal perror(), but places the program name at the beginning
-** of the message.
-*/
-void myperror(char *s)
-{
- fprintf(stderr,"%s: %s: %s.\n",pgm, s, strerror(errno));
-}
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index 708adbbcedbd..60d9b62c6f1f 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -827,9 +827,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
* Check to make sure its for us.
*/
if (brd->magic != DGNC_BOARD_MAGIC) {
- APR((
- "Received interrupt (%d) with a board pointer "
- "that wasn't ours!\n", irq));
+ APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n",
+ irq));
return IRQ_NONE;
}
@@ -846,8 +845,7 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
/* If 0, no interrupts pending */
if (!poll_reg) {
DPR_INTR((
- "Kernel interrupted to me, but no pending "
- "interrupts...\n"));
+ "Kernel interrupted to me, but no pending interrupts...\n"));
DGNC_UNLOCK(brd->bd_intr_lock, lock_flags);
return IRQ_NONE;
}
@@ -1388,8 +1386,7 @@ static void cls_send_break(struct channel_t *ch, int msecs)
writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr);
ch->ch_flags |= (CH_BREAK_SENDING);
DPR_IOCTL((
- "Port %d. Starting UART_LCR_SBC! start: %lx "
- "should end: %lx\n",
+ "Port %d. Starting UART_LCR_SBC! start: %lx should end: %lx\n",
ch->ch_portnum, jiffies, ch->ch_stop_sending_break));
}
}
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index c204266cb69f..b1a39b25b419 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -88,7 +88,7 @@ module_exit(dgnc_cleanup_module);
/*
* File operations permitted on Control/Management major.
*/
-static struct file_operations dgnc_BoardFops =
+static const struct file_operations dgnc_BoardFops =
{
.owner = THIS_MODULE,
.unlocked_ioctl = dgnc_mgmt_ioctl,
@@ -236,7 +236,7 @@ int dgnc_init_module(void)
if (dgnc_NumBoards)
pci_unregister_driver(&dgnc_driver);
else
- printk("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
+ pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
dgnc_cleanup_module();
}
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index 1c5ab3d007b0..c5b425bf6692 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -42,7 +42,7 @@
#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
#include <linux/serial_reg.h>
#include <linux/termios.h>
-#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
+#include <linux/uaccess.h> /* For copy_from_user/copy_to_user */
#include "dgnc_driver.h"
#include "dgnc_pci.h"
@@ -77,8 +77,7 @@ int dgnc_mgmt_open(struct inode *inode, struct file *file)
return -EBUSY;
}
dgnc_mgmt_in_use[minor]++;
- }
- else {
+ } else {
DGNC_UNLOCK(dgnc_global_lock, lock_flags);
return -ENXIO;
}
@@ -107,9 +106,8 @@ int dgnc_mgmt_close(struct inode *inode, struct file *file)
/* mgmt device */
if (minor < MAXMGMTDEVICES) {
- if (dgnc_mgmt_in_use[minor]) {
+ if (dgnc_mgmt_in_use[minor])
dgnc_mgmt_in_use[minor] = 0;
- }
}
DGNC_UNLOCK(dgnc_global_lock, lock_flags);
@@ -153,7 +151,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
DPR_MGMT(("DIGI_GETDD returning numboards: %d version: %s\n",
ddi.dinfo_nboards, ddi.dinfo_version));
- if (copy_to_user(uarg, &ddi, sizeof (ddi)))
+ if (copy_to_user(uarg, &ddi, sizeof(ddi)))
return -EFAULT;
break;
@@ -165,13 +163,13 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct digi_info di;
- if (copy_from_user(&brd, uarg, sizeof(int))) {
+ if (copy_from_user(&brd, uarg, sizeof(int)))
return -EFAULT;
- }
DPR_MGMT(("DIGI_GETBD asking about board: %d\n", brd));
- if ((brd < 0) || (brd > dgnc_NumBoards) || (dgnc_NumBoards == 0))
+ if ((brd < 0) || (brd > dgnc_NumBoards) ||
+ (dgnc_NumBoards == 0))
return -ENODEV;
memset(&di, 0, sizeof(di));
@@ -195,7 +193,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
DPR_MGMT(("DIGI_GETBD returning type: %x state: %x ports: %x size: %x\n",
di.info_bdtype, di.info_bdstate, di.info_nports, di.info_physsize));
- if (copy_to_user(uarg, &di, sizeof (di)))
+ if (copy_to_user(uarg, &di, sizeof(di)))
return -EFAULT;
break;
@@ -209,9 +207,8 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
uint board = 0;
uint channel = 0;
- if (copy_from_user(&ni, uarg, sizeof(ni))) {
+ if (copy_from_user(&ni, uarg, sizeof(ni)))
return -EFAULT;
- }
DPR_MGMT(("DIGI_GETBD asking about board: %d channel: %d\n",
ni.board, ni.channel));
@@ -268,12 +265,14 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ni.cflag = ch->ch_c_cflag;
ni.lflag = ch->ch_c_lflag;
- if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS)
+ if (ch->ch_digi.digi_flags & CTSPACE ||
+ ch->ch_c_cflag & CRTSCTS)
ni.hflow = 1;
else
ni.hflow = 0;
- if ((ch->ch_flags & CH_STOPI) || (ch->ch_flags & CH_FORCED_STOPI))
+ if ((ch->ch_flags & CH_STOPI) ||
+ (ch->ch_flags & CH_FORCED_STOPI))
ni.recv_stopped = 1;
else
ni.recv_stopped = 0;
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index dc5a138d8d4a..cf22c7b725f9 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -1201,7 +1201,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
ch->ch_cached_lsr = 0;
/* Store how much space we have left in the queue */
- if ((qleft = tail - head - 1) < 0)
+ qleft = tail - head - 1;
+ if (qleft < 0)
qleft += RQUEUEMASK + 1;
/*
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index a6c6aba82d72..f0b17c36edd8 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -964,8 +964,10 @@ static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
int deltahigh;
int deltalow;
- if (newrate < 0)
- newrate = 0;
+ if (newrate <= 0) {
+ ch->ch_custom_speed = 0;
+ return;
+ }
/*
* Since the divisor is stored in a 16-bit integer, we make sure
@@ -978,7 +980,7 @@ static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
if (newrate && newrate > ch->ch_bd->bd_dividend)
newrate = ch->ch_bd->bd_dividend;
- while (newrate > 0) {
+ if (newrate > 0) {
testdiv = ch->ch_bd->bd_dividend / newrate;
/*
@@ -995,28 +997,23 @@ static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
* If the rate for the requested divisor is correct, just
* use it and be done.
*/
- if (testrate_high == newrate )
- break;
-
- /*
- * Otherwise, pick the rate that is closer (i.e. whichever rate
- * has a smaller delta).
- */
- deltahigh = testrate_high - newrate;
- deltalow = newrate - testrate_low;
+ if (testrate_high != newrate) {
+ /*
+ * Otherwise, pick the rate that is closer (i.e. whichever rate
+ * has a smaller delta).
+ */
+ deltahigh = testrate_high - newrate;
+ deltalow = newrate - testrate_low;
- if (deltahigh < deltalow) {
- newrate = testrate_high;
- } else {
- newrate = testrate_low;
+ if (deltahigh < deltalow) {
+ newrate = testrate_high;
+ } else {
+ newrate = testrate_low;
+ }
}
-
- break;
}
ch->ch_custom_speed = newrate;
-
- return;
}
@@ -1025,7 +1022,8 @@ void dgnc_check_queue_flow_control(struct channel_t *ch)
int qleft = 0;
/* Store how much space we have left in the queue */
- if ((qleft = ch->ch_r_tail - ch->ch_r_head - 1) < 0)
+ qleft = ch->ch_r_tail - ch->ch_r_head - 1;
+ if (qleft < 0)
qleft += RQUEUEMASK + 1;
/*
@@ -1119,7 +1117,8 @@ void dgnc_wakeup_writes(struct channel_t *ch)
/*
* If channel now has space, wake up anyone waiting on the condition.
*/
- if ((qlen = ch->ch_w_head - ch->ch_w_tail) < 0)
+ qlen = ch->ch_w_head - ch->ch_w_tail;
+ if (qlen < 0)
qlen += WQUEUESIZE;
if (qlen >= (WQUEUESIZE - 256)) {
@@ -1917,7 +1916,8 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
head = (ch->ch_w_head) & tmask;
tail = (ch->ch_w_tail) & tmask;
- if ((ret = tail - head - 1) < 0)
+ ret = tail - head - 1;
+ if (ret < 0)
ret += WQUEUESIZE;
/* Limit printer to maxcps */
@@ -2017,7 +2017,8 @@ static int dgnc_tty_write(struct tty_struct *tty,
head = (ch->ch_w_head) & tmask;
tail = (ch->ch_w_tail) & tmask;
- if ((bufcount = tail - head - 1) < 0)
+ bufcount = tail - head - 1;
+ if (bufcount < 0)
bufcount += WQUEUESIZE;
DPR_WRITE(("%d: bufcount: %x count: %x tail: %x head: %x tmask: %x\n",
@@ -3316,10 +3317,10 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case DIGI_SETCUSTOMBAUD:
{
- uint new_rate;
+ int new_rate;
/* Let go of locks when accessing user space, could sleep */
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- rc = get_user(new_rate, (unsigned int __user *) arg);
+ rc = get_user(new_rate, (int __user *) arg);
if (rc)
return rc;
DGNC_LOCK(ch->ch_lock, lock_flags);
diff --git a/drivers/staging/dgrp/dgrp_sysfs.c b/drivers/staging/dgrp/dgrp_sysfs.c
index 9a18a2c9e73b..2f9345ff0abb 100644
--- a/drivers/staging/dgrp/dgrp_sysfs.c
+++ b/drivers/staging/dgrp/dgrp_sysfs.c
@@ -65,7 +65,9 @@ static ssize_t dgrp_class_pollrate_store(struct device *c,
struct device_attribute *attr,
const char *buf, size_t count)
{
- sscanf(buf, "0x%x\n", &dgrp_poll_tick);
+ if (sscanf(buf, "0x%x\n", &dgrp_poll_tick) != 1)
+ return -EINVAL;
+
return count;
}
static DEVICE_ATTR(pollrate, 0600, dgrp_class_pollrate_show,
diff --git a/drivers/staging/dgrp/dgrp_tty.c b/drivers/staging/dgrp/dgrp_tty.c
index 7a9694c1d9c4..30d26029b21e 100644
--- a/drivers/staging/dgrp/dgrp_tty.c
+++ b/drivers/staging/dgrp/dgrp_tty.c
@@ -1319,7 +1319,8 @@ static int dgrp_calculate_txprint_bounds(struct ch_struct *ch, int space,
if (ch->ch_tun.un_open_count != 0 &&
ch->ch_tun.un_tty->ops->chars_in_buffer &&
- ((ch->ch_tun.un_tty->ops->chars_in_buffer)(ch->ch_tun.un_tty) != 0)) {
+ ((ch->ch_tun.un_tty->ops->chars_in_buffer)
+ (ch->ch_tun.un_tty) != 0)) {
*un_flag = UN_PWAIT;
return 0;
}
@@ -1501,7 +1502,8 @@ static int dgrp_tty_write(struct tty_struct *tty,
*/
if (ch->ch_tun.un_open_count != 0 &&
- ((ch->ch_tun.un_tty->ops->chars_in_buffer)(ch->ch_tun.un_tty) != 0)) {
+ ((ch->ch_tun.un_tty->ops->chars_in_buffer)
+ (ch->ch_tun.un_tty) != 0)) {
un->un_flag |= UN_PWAIT;
count = 0;
goto out;
@@ -1666,7 +1668,8 @@ static int dgrp_tty_write(struct tty_struct *tty,
if (n >= t) {
memcpy(ch->ch_tbuf + ch->ch_tin, buf, t);
- if (nd->nd_dpa_debug && nd->nd_dpa_port == PORT_NUM(MINOR(tty_devnum(un->un_tty))))
+ if (nd->nd_dpa_debug && nd->nd_dpa_port ==
+ PORT_NUM(MINOR(tty_devnum(un->un_tty))))
dgrp_dpa_data(nd, 0, (char *) buf, t);
buf += t;
n -= t;
@@ -1675,7 +1678,8 @@ static int dgrp_tty_write(struct tty_struct *tty,
}
memcpy(ch->ch_tbuf + ch->ch_tin, buf, n);
- if (nd->nd_dpa_debug && nd->nd_dpa_port == PORT_NUM(MINOR(tty_devnum(un->un_tty))))
+ if (nd->nd_dpa_debug && nd->nd_dpa_port ==
+ PORT_NUM(MINOR(tty_devnum(un->un_tty))))
dgrp_dpa_data(nd, 0, (char *) buf, n);
buf += n;
ch->ch_tin += n;
@@ -2656,7 +2660,8 @@ static int dgrp_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
ch->ch_send |= RR_RX_FLUSH;
(ch->ch_nd)->nd_tx_work = 1;
(ch->ch_nd)->nd_tx_ready = 1;
- wake_up_interruptible(&(ch->ch_nd)->nd_tx_waitq);
+ wake_up_interruptible(
+ &(ch->ch_nd)->nd_tx_waitq);
}
if (arg == TCIFLUSH)
break;
@@ -2682,7 +2687,8 @@ static int dgrp_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
Linux HPUX Function
TCSETA TCSETA - set the termios
TCSETAF TCSETAF - wait for drain first, then set termios
- TCSETAW TCSETAW - wait for drain, flush the input queue, then set termios
+ TCSETAW TCSETAW - wait for drain,
+ flush the input queue, then set termios
- looking at the tty_ioctl code, these command all call our
tty_set_termios at the driver's end, when a TCSETA* is sent,
it is expecting the tty to have a termio structure,
@@ -2798,6 +2804,7 @@ static int dgrp_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
}
/* pretend we didn't recognize this */
+ /* fall-through */
case DIGI_SETA:
return dgrp_tty_digiseta(tty, (struct digi_struct *) arg);
@@ -3207,7 +3214,8 @@ dgrp_tty_init(struct nd_struct *nd)
int max_majors = 1U << (32 - MINORBITS);
for (i = 256; i < max_majors; i++) {
nd->nd_serial_ttdriver->major = i;
- rc = tty_register_driver(nd->nd_serial_ttdriver);
+ rc = tty_register_driver
+ (nd->nd_serial_ttdriver);
if (rc >= 0)
break;
}
diff --git a/drivers/staging/echo/TODO b/drivers/staging/echo/TODO
deleted file mode 100644
index 72a311a5a9cc..000000000000
--- a/drivers/staging/echo/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-TODO:
- - send to lkml for review
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: Steve
-Underwood <steveu@coppice.org> and David Rowe <david@rowetel.com>
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index e516bb69f3b4..d329cf314360 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -485,8 +485,6 @@ struct et131x_adapter {
u8 eeprom_data[2];
/* Spinlocks */
- spinlock_t lock;
-
spinlock_t tcb_send_qlock;
spinlock_t tcb_ready_qlock;
spinlock_t send_hw_lock;
@@ -1388,6 +1386,7 @@ static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
mii_indicator);
status = -EIO;
+ goto out;
}
/* If we hit here we were able to read the register and we need to
@@ -1395,6 +1394,7 @@ static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
*/
*value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
+out:
/* Stop the read operation */
writel(0, &mac->mii_mgmt_cmd);
@@ -2124,7 +2124,11 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
/* Alloc memory for the lookup table */
rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
+ if (rx_ring->fbr[0] == NULL)
+ return -ENOMEM;
rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
+ if (rx_ring->fbr[1] == NULL)
+ return -ENOMEM;
/* The first thing we will do is configure the sizes of the buffer
* rings. These will change based on jumbo packet support. Larger
@@ -2289,7 +2293,7 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
for (id = 0; id < NUM_FBRS; id++) {
fbr = rx_ring->fbr[id];
- if (!fbr->ring_virtaddr)
+ if (!fbr || !fbr->ring_virtaddr)
continue;
/* First the packet memory */
@@ -3523,7 +3527,7 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
goto err_out;
}
}
- memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
+ ether_addr_copy(adapter->addr, adapter->rom_addr);
out:
return rc;
err_out:
@@ -3591,6 +3595,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
if (status) {
dev_err(&adapter->pdev->dev,
"et131x_tx_dma_memory_alloc FAILED\n");
+ et131x_tx_dma_memory_free(adapter);
return status;
}
/* Receive buffer memory allocation */
@@ -3598,7 +3603,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
if (status) {
dev_err(&adapter->pdev->dev,
"et131x_rx_dma_memory_alloc FAILED\n");
- et131x_tx_dma_memory_free(adapter);
+ et131x_adapter_memory_free(adapter);
return status;
}
@@ -3760,7 +3765,6 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
adapter->netdev = netdev;
/* Initialize spinlocks here */
- spin_lock_init(&adapter->lock);
spin_lock_init(&adapter->tcb_send_qlock);
spin_lock_init(&adapter->tcb_ready_qlock);
spin_lock_init(&adapter->send_hw_lock);
@@ -3770,7 +3774,7 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
/* Set the MAC address to a default */
- memcpy(adapter->addr, default_mac, ETH_ALEN);
+ ether_addr_copy(adapter->addr, default_mac);
return adapter;
}
@@ -4292,12 +4296,9 @@ static void et131x_multicast(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
int packet_filter;
- unsigned long flags;
struct netdev_hw_addr *ha;
int i;
- spin_lock_irqsave(&adapter->lock, flags);
-
/* Before we modify the platform-independent filter flags, store them
* locally. This allows us to determine if anything's changed and if
* we even need to bother the hardware
@@ -4349,8 +4350,6 @@ static void et131x_multicast(struct net_device *netdev)
*/
if (packet_filter != adapter->packet_filter)
et131x_set_packet_filter(adapter);
-
- spin_unlock_irqrestore(&adapter->lock, flags);
}
/* et131x_tx - The handler to tx a packet on the device */
diff --git a/drivers/staging/frontier/Kconfig b/drivers/staging/frontier/Kconfig
index 7121853bd397..4da290b2f5bd 100644
--- a/drivers/staging/frontier/Kconfig
+++ b/drivers/staging/frontier/Kconfig
@@ -1,6 +1,5 @@
config TRANZPORT
tristate "Frontier Tranzport and Alphatrack support"
depends on USB
- default N
---help---
Enable support for the Frontier Tranzport and Alphatrack devices.
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index edd5cef300d0..226b23163109 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -208,7 +208,9 @@ static void usb_alphatrack_delete(struct usb_alphatrack *dev)
kfree(dev->ring_buffer);
kfree(dev->interrupt_in_buffer);
kfree(dev->interrupt_out_buffer);
- kfree(dev); /* fixme oldi_buffer */
+ kfree(dev->oldi_buffer);
+ kfree(dev->write_buffer);
+ kfree(dev);
}
/** usb_alphatrack_interrupt_in_callback */
@@ -233,8 +235,8 @@ static void usb_alphatrack_interrupt_in_callback(struct urb *urb)
if (urb->actual_length != INPUT_CMD_SIZE) {
dev_warn(&dev->intf->dev,
- "Urb length was %d bytes!!"
- "Do something intelligent\n", urb->actual_length);
+ "Urb length was %d bytes!! Do something intelligent\n",
+ urb->actual_length);
} else {
alphatrack_ocmd_info(&dev->intf->dev,
&(*dev->ring_buffer)[dev->ring_tail].cmd,
@@ -688,8 +690,7 @@ static int usb_alphatrack_probe(struct usb_interface *intf,
}
if (dev->interrupt_out_endpoint == NULL)
dev_warn(&intf->dev,
- "Interrupt out endpoint not found"
- "(using control endpoint instead)\n");
+ "Interrupt out endpoint not found (using control endpoint instead)\n");
dev->interrupt_in_endpoint_size =
le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize);
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index 0e499ce5f0d7..0571988c58fc 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -257,8 +257,7 @@ static void usb_tranzport_interrupt_in_callback(struct urb *urb)
if (urb->actual_length != 8) {
dev_warn(&dev->intf->dev,
- "Urb length was %d bytes!!"
- "Do something intelligent\n",
+ "Urb length was %d bytes!! Do something intelligent\n",
urb->actual_length);
} else {
dbg_info(&dev->intf->dev,
@@ -542,8 +541,7 @@ static ssize_t usb_tranzport_read(struct file *file, char __user *buffer,
}
dbg_info(&dev->intf->dev,
- "%s: copying to userspace: "
- "%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ "%s: copying to userspace: %02x%02x%02x%02x%02x%02x%02x%02x\n",
__func__,
(*dev->ring_buffer)[dev->ring_tail].cmd[0],
(*dev->ring_buffer)[dev->ring_tail].cmd[1],
@@ -570,8 +568,7 @@ static ssize_t usb_tranzport_read(struct file *file, char __user *buffer,
* and we are the same sign, we can compress +- 7F
*/
dbg_info(&dev->intf->dev,
- "%s: trying to compress: "
- "%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ "%s: trying to compress: %02x%02x%02x%02x%02x%02x%02x%02x\n",
__func__,
(*dev->ring_buffer)[dev->ring_tail].cmd[0],
(*dev->ring_buffer)[dev->ring_tail].cmd[1],
@@ -830,8 +827,7 @@ static int usb_tranzport_probe(struct usb_interface *intf,
}
if (dev->interrupt_out_endpoint == NULL)
dev_warn(&intf->dev,
- "Interrupt out endpoint not found"
- "(using control endpoint instead)\n");
+ "Interrupt out endpoint not found (using control endpoint instead)\n");
dev->interrupt_in_endpoint_size =
le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index a433e33049b5..b6a77088cfe4 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -1145,7 +1145,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
char *cmdbuffer = kmalloc(1600, GFP_KERNEL);
if (!cmdbuffer)
- return -1;
+ return -ENOMEM;
status = ft1000_read_dpram32(dev, 0x200, cmdbuffer, size);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
index 24b8d77a132c..419e534874f9 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
@@ -30,7 +30,7 @@
typedef struct _IOCTL_GET_VER {
unsigned long drv_ver;
-} __attribute__ ((packed)) IOCTL_GET_VER, *PIOCTL_GET_VER;
+} __packed IOCTL_GET_VER, *PIOCTL_GET_VER;
/* Data structure for Dsp statistics */
typedef struct _IOCTL_GET_DSP_STAT {
@@ -67,19 +67,19 @@ typedef struct _IOCTL_GET_DSP_STAT {
unsigned long ConTm; /* Current session connection time in seconds */
unsigned char CalVer[CALVERSZ]; /* Proprietary Calibration Version */
unsigned char CalDate[CALDATESZ]; /* Proprietary Calibration Date */
-} __attribute__ ((packed)) IOCTL_GET_DSP_STAT, *PIOCTL_GET_DSP_STAT;
+} __packed IOCTL_GET_DSP_STAT, *PIOCTL_GET_DSP_STAT;
/* Data structure for Dual Ported RAM messaging between Host and Dsp */
typedef struct _IOCTL_DPRAM_BLK {
unsigned short total_len;
struct pseudo_hdr pseudohdr;
unsigned char buffer[1780];
-} __attribute__ ((packed)) IOCTL_DPRAM_BLK, *PIOCTL_DPRAM_BLK;
+} __packed IOCTL_DPRAM_BLK, *PIOCTL_DPRAM_BLK;
typedef struct _IOCTL_DPRAM_COMMAND {
unsigned short extra;
IOCTL_DPRAM_BLK dpram_blk;
-} __attribute__ ((packed)) IOCTL_DPRAM_COMMAND, *PIOCTL_DPRAM_COMMAND;
+} __packed IOCTL_DPRAM_COMMAND, *PIOCTL_DPRAM_COMMAND;
/*
* Custom IOCTL command codes
diff --git a/drivers/staging/ft1000/ft1000.h b/drivers/staging/ft1000/ft1000.h
index 175abfa7682e..ccb821a1f370 100644
--- a/drivers/staging/ft1000/ft1000.h
+++ b/drivers/staging/ft1000/ft1000.h
@@ -21,34 +21,64 @@
#define FT1000_REG_SUP_CTRL 0x0020 /* HCTR - Host Control Register */
#define FT1000_REG_SUP_STAT 0x0022 /* HSTAT - Host Status Register */
#define FT1000_REG_RESET 0x0024 /* HCTR - Host Control Register */
-#define FT1000_REG_SUP_ISR 0x0026 /* HISR - Host Interrupt Status Register */
+#define FT1000_REG_SUP_ISR 0x0026 /* HISR - Host Interrupt Status
+ * Register
+ */
#define FT1000_REG_SUP_IMASK 0x0028 /* HIMASK - Host Interrupt Mask */
#define FT1000_REG_DOORBELL 0x002a /* DBELL - Door Bell Register */
-#define FT1000_REG_ASIC_ID 0x002e /* ASICID - ASIC Identification Number */
+#define FT1000_REG_ASIC_ID 0x002e /* ASICID - ASIC Identification
+ * Number
+ */
/* MEMORY MAP FOR ELECTRABUZZ ASIC */
#define FT1000_REG_UFIFO_STAT 0x0000 /* UFSR - Uplink FIFO status register */
-#define FT1000_REG_UFIFO_BEG 0x0002 /* UFBR - Uplink FIFO beginning register */
+#define FT1000_REG_UFIFO_BEG 0x0002 /* UFBR - Uplink FIFO beginning
+ * register
+ */
#define FT1000_REG_UFIFO_MID 0x0004 /* UFMR - Uplink FIFO middle register */
#define FT1000_REG_UFIFO_END 0x0006 /* UFER - Uplink FIFO end register */
-#define FT1000_REG_DFIFO_STAT 0x0008 /* DFSR - Downlink FIFO status register */
+#define FT1000_REG_DFIFO_STAT 0x0008 /* DFSR - Downlink FIFO status
+ * register
+ */
#define FT1000_REG_DFIFO 0x000A /* DFR - Downlink FIFO Register */
-#define FT1000_REG_DPRAM_DATA 0x000C /* DPRAM - Dual Port Indirect Data Register */
+#define FT1000_REG_DPRAM_DATA 0x000C /* DPRAM - Dual Port Indirect
+ * Data Register
+ */
#define FT1000_REG_WATERMARK 0x0010 /* WMARK - Watermark Register */
/* MEMORY MAP FOR MAGNEMITE */
-#define FT1000_REG_MAG_UFDR 0x0000 /* UFDR - Uplink FIFO Data Register (32-bits) */
-#define FT1000_REG_MAG_UFDRL 0x0000 /* UFDRL - Uplink FIFO Data Register low-word (16-bits) */
-#define FT1000_REG_MAG_UFDRH 0x0002 /* UFDRH - Uplink FIFO Data Register high-word (16-bits) */
+#define FT1000_REG_MAG_UFDR 0x0000 /* UFDR - Uplink FIFO Data
+ * Register (32-bits)
+ */
+#define FT1000_REG_MAG_UFDRL 0x0000 /* UFDRL - Uplink FIFO Data
+ * Register low-word (16-bits)
+ */
+#define FT1000_REG_MAG_UFDRH 0x0002 /* UFDRH - Uplink FIFO Data Register
+ * high-word (16-bits)
+ */
#define FT1000_REG_MAG_UFER 0x0004 /* UFER - Uplink FIFO End Register */
#define FT1000_REG_MAG_UFSR 0x0006 /* UFSR - Uplink FIFO Status Register */
-#define FT1000_REG_MAG_DFR 0x0008 /* DFR - Downlink FIFO Register (32-bits) */
-#define FT1000_REG_MAG_DFRL 0x0008 /* DFRL - Downlink FIFO Register low-word (16-bits) */
-#define FT1000_REG_MAG_DFRH 0x000a /* DFRH - Downlink FIFO Register high-word (16-bits) */
-#define FT1000_REG_MAG_DFSR 0x000c /* DFSR - Downlink FIFO Status Register */
-#define FT1000_REG_MAG_DPDATA 0x0010 /* DPDATA - Dual Port RAM Indirect Data Register (32-bits) */
-#define FT1000_REG_MAG_DPDATAL 0x0010 /* DPDATAL - Dual Port RAM Indirect Data Register low-word (16-bits) */
-#define FT1000_REG_MAG_DPDATAH 0x0012 /* DPDATAH - Dual Port RAM Indirect Data Register high-word (16-bits) */
+#define FT1000_REG_MAG_DFR 0x0008 /* DFR - Downlink FIFO Register
+ * (32-bits)
+ */
+#define FT1000_REG_MAG_DFRL 0x0008 /* DFRL - Downlink FIFO Register
+ * low-word (16-bits)
+ */
+#define FT1000_REG_MAG_DFRH 0x000a /* DFRH - Downlink FIFO Register
+ * high-word (16-bits)
+ */
+#define FT1000_REG_MAG_DFSR 0x000c /* DFSR - Downlink FIFO Status
+ * Register
+ */
+#define FT1000_REG_MAG_DPDATA 0x0010 /* DPDATA - Dual Port RAM Indirect
+ * Data Register (32-bits)
+ */
+#define FT1000_REG_MAG_DPDATAL 0x0010 /* DPDATAL - Dual Port RAM Indirect
+ * Data Register low-word (16-bits)
+ */
+#define FT1000_REG_MAG_DPDATAH 0x0012 /* DPDATAH - Dual Port RAM Indirect Data
+ * Register high-word (16-bits)
+ */
#define FT1000_REG_MAG_WATERMARK 0x002c /* WMARK - Watermark Register */
#define FT1000_REG_MAG_VERSION 0x0030 /* LLC Version */
@@ -57,7 +87,9 @@
#define FT1000_DPRAM_RX_BASE 0x0800 /* PC Card to Host Messaging Area */
#define FT1000_FIFO_LEN 0x07FC /* total length for DSP FIFO tracking */
#define FT1000_HI_HO 0x07FE /* heartbeat with HI/HO */
-#define FT1000_DSP_STATUS 0x0FFE /* dsp status - non-zero is a request to reset dsp */
+#define FT1000_DSP_STATUS 0x0FFE /* dsp status - non-zero is a request
+ * to reset dsp
+ */
#define FT1000_DSP_LED 0x0FFA /* dsp led status for PAD device */
#define FT1000_DSP_CON_STATE 0x0FF8 /* DSP Connection Status Info */
#define FT1000_DPRAM_FEFE 0x0002 /* location for dsp ready indicator */
@@ -67,26 +99,48 @@
#define FT1000_DSP_TIMER3 0x1FF6 /* Timer Field from Basestation */
/* Reserved Dual Port RAM offsets for Magnemite */
-#define FT1000_DPRAM_MAG_TX_BASE 0x0000 /* Host to PC Card Messaging Area */
-#define FT1000_DPRAM_MAG_RX_BASE 0x0200 /* PC Card to Host Messaging Area */
+#define FT1000_DPRAM_MAG_TX_BASE 0x0000 /* Host to PC Card
+ * Messaging Area
+ */
+#define FT1000_DPRAM_MAG_RX_BASE 0x0200 /* PC Card to Host
+ * Messaging Area
+ */
-#define FT1000_MAG_FIFO_LEN 0x1FF /* total length for DSP FIFO tracking */
+#define FT1000_MAG_FIFO_LEN 0x1FF /* total length for DSP
+ * FIFO tracking
+ */
#define FT1000_MAG_FIFO_LEN_INDX 0x1 /* low-word index */
#define FT1000_MAG_HI_HO 0x1FF /* heartbeat with HI/HO */
#define FT1000_MAG_HI_HO_INDX 0x0 /* high-word index */
-#define FT1000_MAG_DSP_LED 0x3FE /* dsp led status for PAD device */
-#define FT1000_MAG_DSP_LED_INDX 0x0 /* dsp led status for PAD device */
+#define FT1000_MAG_DSP_LED 0x3FE /* dsp led status for
+ * PAD device
+ */
+#define FT1000_MAG_DSP_LED_INDX 0x0 /* dsp led status for
+ * PAD device
+ */
#define FT1000_MAG_DSP_CON_STATE 0x3FE /* DSP Connection Status Info */
#define FT1000_MAG_DSP_CON_STATE_INDX 0x1 /* DSP Connection Status Info */
-#define FT1000_MAG_DPRAM_FEFE 0x000 /* location for dsp ready indicator */
-#define FT1000_MAG_DPRAM_FEFE_INDX 0x0 /* location for dsp ready indicator */
-#define FT1000_MAG_DSP_TIMER0 0x3FC /* Timer Field from Basestation */
+#define FT1000_MAG_DPRAM_FEFE 0x000 /* location for dsp ready
+ * indicator
+ */
+#define FT1000_MAG_DPRAM_FEFE_INDX 0x0 /* location for dsp ready
+ * indicator
+ */
+#define FT1000_MAG_DSP_TIMER0 0x3FC /* Timer Field from
+ * Basestation
+ */
#define FT1000_MAG_DSP_TIMER0_INDX 0x1
-#define FT1000_MAG_DSP_TIMER1 0x3FC /* Timer Field from Basestation */
+#define FT1000_MAG_DSP_TIMER1 0x3FC /* Timer Field from
+ * Basestation
+ */
#define FT1000_MAG_DSP_TIMER1_INDX 0x0
-#define FT1000_MAG_DSP_TIMER2 0x3FD /* Timer Field from Basestation */
+#define FT1000_MAG_DSP_TIMER2 0x3FD /* Timer Field from
+ * Basestation
+ */
#define FT1000_MAG_DSP_TIMER2_INDX 0x1
-#define FT1000_MAG_DSP_TIMER3 0x3FD /* Timer Field from Basestation */
+#define FT1000_MAG_DSP_TIMER3 0x3FD /* Timer Field from
+ * Basestation
+ */
#define FT1000_MAG_DSP_TIMER3_INDX 0x0
#define FT1000_MAG_TOTAL_LEN 0x200
#define FT1000_MAG_TOTAL_LEN_INDX 0x1
@@ -99,24 +153,38 @@
#define HOST_INTF_BE 0x1 /* Host interface big endian mode */
/* FT1000 to Host Doorbell assignments */
-#define FT1000_DB_DPRAM_RX 0x0001 /* this value indicates that DSP has data for host in DPRAM */
+#define FT1000_DB_DPRAM_RX 0x0001 /* this value indicates that DSP
+ * has data for host in DPRAM
+ */
#define FT1000_DB_DNLD_RX 0x0002 /* Downloader handshake doorbell */
-#define FT1000_ASIC_RESET_REQ 0x0004 /* DSP requesting host to reset the ASIC */
-#define FT1000_DSP_ASIC_RESET 0x0008 /* DSP indicating host that it will reset the ASIC */
+#define FT1000_ASIC_RESET_REQ 0x0004 /* DSP requesting host to
+ * reset the ASIC
+ */
+#define FT1000_DSP_ASIC_RESET 0x0008 /* DSP indicating host that
+ * it will reset the ASIC
+ */
#define FT1000_DB_COND_RESET 0x0010 /* DSP request for a card reset. */
/* Host to FT1000 Doorbell assignments */
-#define FT1000_DB_DPRAM_TX 0x0100 /* this value indicates that host has data for DSP in DPRAM. */
+#define FT1000_DB_DPRAM_TX 0x0100 /* this value indicates that host
+ * has data for DSP in DPRAM.
+ */
#define FT1000_DB_DNLD_TX 0x0200 /* Downloader handshake doorbell */
#define FT1000_ASIC_RESET_DSP 0x0400 /* Responds to FT1000_ASIC_RESET_REQ */
-#define FT1000_DB_HB 0x1000 /* Indicates that supervisor has a heartbeat message for DSP. */
+#define FT1000_DB_HB 0x1000 /* Indicates that supervisor has a
+ * heartbeat message for DSP.
+ */
#define hi 0x6869 /* PC Card heartbeat values */
#define ho 0x686f /* PC Card heartbeat values */
/* Magnemite specific defines */
-#define hi_mag 0x6968 /* Byte swap hi to avoid additional system call */
-#define ho_mag 0x6f68 /* Byte swap ho to avoid additional system call */
+#define hi_mag 0x6968 /* Byte swap hi to avoid
+ * additional system call
+ */
+#define ho_mag 0x6f68 /* Byte swap ho to avoid
+ * additional system call
+ */
/* Bit field definitions for Host Interrupt Status Register */
/* Indicate the cause of an interrupt. */
@@ -133,13 +201,19 @@
#define ISR_MASK_RCV 0x0004 /* Downlink Packet available mask */
#define ISR_MASK_WATERMARK 0x0008 /* Watermark interrupt mask */
#define ISR_MASK_ALL 0xffff /* Mask all interrupts */
-/* Default interrupt mask (Enable Doorbell pending and Packet available interrupts) */
+/* Default interrupt mask
+ * (Enable Doorbell pending and Packet available interrupts)
+ */
#define ISR_DEFAULT_MASK 0x7ff9
/* Bit field definition for Host Control Register */
-#define DSP_RESET_BIT 0x0001 /* Bit field to control dsp reset state */
+#define DSP_RESET_BIT 0x0001 /* Bit field to control
+ * dsp reset state
+ */
/* (0 = out of reset 1 = reset) */
-#define ASIC_RESET_BIT 0x0002 /* Bit field to control ASIC reset state */
+#define ASIC_RESET_BIT 0x0002 /* Bit field to control
+ * ASIC reset state
+ */
/* (0 = out of reset 1 = reset) */
#define DSP_UNENCRYPTED 0x0004
#define DSP_ENCRYPTED 0x0008
@@ -195,7 +269,9 @@ struct pseudo_hdr {
unsigned char source; /* hardware source id */
/* Host = 0x10 */
/* Dsp = 0x20 */
- unsigned char destination; /* hardware destination id (refer to source) */
+ unsigned char destination; /* hardware destination id
+ * (refer to source)
+ */
unsigned char portdest; /* software destination port id */
/* Host = 0x00 */
/* Applicaton Broadcast = 0x10 */
@@ -204,7 +280,9 @@ struct pseudo_hdr {
/* Dsp Airlink = 0x90 */
/* Dsp Loader = 0xa0 */
/* Dsp MIP = 0xb0 */
- unsigned char portsrc; /* software source port id (refer to portdest) */
+ unsigned char portsrc; /* software source port id
+ * (refer to portdest)
+ */
unsigned short sh_str_id; /* not used */
unsigned char control; /* not used */
unsigned char rsvd1;
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 8af136e9c9dc..b22142ee5262 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -2036,6 +2036,13 @@ static void fwserial_auto_connect(struct work_struct *work)
schedule_delayed_work(&peer->connect, CONNECT_RETRY_DELAY);
}
+static void fwserial_peer_workfn(struct work_struct *work)
+{
+ struct fwtty_peer *peer = to_peer(work, work);
+
+ peer->workfn(work);
+}
+
/**
* fwserial_add_peer - add a newly probed 'serial' unit device as a 'peer'
* @serial: aggregate representing the specific fw_card to add the peer to
@@ -2100,7 +2107,7 @@ static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit)
peer->port = NULL;
init_timer(&peer->timer);
- INIT_WORK(&peer->work, NULL);
+ INIT_WORK(&peer->work, fwserial_peer_workfn);
INIT_DELAYED_WORK(&peer->connect, fwserial_auto_connect);
/* associate peer with specific fw_card */
@@ -2702,7 +2709,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
} else {
peer->work_params.plug_req = pkt->plug_req;
- PREPARE_WORK(&peer->work, fwserial_handle_plug_req);
+ peer->workfn = fwserial_handle_plug_req;
queue_work(system_unbound_wq, &peer->work);
}
break;
@@ -2731,7 +2738,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
fwtty_err(&peer->unit, "unplug req: busy\n");
rcode = RCODE_CONFLICT_ERROR;
} else {
- PREPARE_WORK(&peer->work, fwserial_handle_unplug_req);
+ peer->workfn = fwserial_handle_unplug_req;
queue_work(system_unbound_wq, &peer->work);
}
break;
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index 54f7f9b9b212..98b853d4acbc 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -91,6 +91,7 @@ struct fwtty_peer {
struct rcu_head rcu;
spinlock_t lock;
+ work_func_t workfn;
struct work_struct work;
struct peer_work_params work_params;
struct timer_list timer;
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 74a03608b2dd..64c55b99fda4 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -131,7 +131,8 @@ static int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
/* Get the pointer of the original request */
arp_in = (struct arphdr *)(skb_in->data + mac_header_len);
- arp_data_in = (struct arpdata *)(skb_in->data + mac_header_len + sizeof(struct arphdr));
+ arp_data_in = (struct arpdata *)(skb_in->data + mac_header_len +
+ sizeof(struct arphdr));
/* Get the pointer of the outgoing response */
arp_out = (struct arphdr *)arp_temp;
@@ -160,9 +161,12 @@ static int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
return -ENOMEM;
skb_reserve(skb_out, NET_IP_ALIGN);
- memcpy(skb_put(skb_out, mac_header_len), mac_header_data, mac_header_len);
- memcpy(skb_put(skb_out, sizeof(struct arphdr)), arp_out, sizeof(struct arphdr));
- memcpy(skb_put(skb_out, sizeof(struct arpdata)), arp_data_out, sizeof(struct arpdata));
+ memcpy(skb_put(skb_out, mac_header_len), mac_header_data,
+ mac_header_len);
+ memcpy(skb_put(skb_out, sizeof(struct arphdr)), arp_out,
+ sizeof(struct arphdr));
+ memcpy(skb_put(skb_out, sizeof(struct arpdata)), arp_data_out,
+ sizeof(struct arpdata));
skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
skb_out->dev = skb_in->dev;
@@ -198,7 +202,7 @@ static int icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
pseudo_header.ph.ph_nxt = ipv6->nexthdr;
w = (u16 *)&pseudo_header;
- for (i = 0; i < sizeof(pseudo_header.pa) / sizeof(pseudo_header.pa[0]); i++)
+ for (i = 0; i < ARRAY_SIZE(pseudo_header.pa); i++)
sum += pseudo_header.pa[i];
w = ptr;
@@ -260,11 +264,14 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
return -1;
/* Check if this is NDP packet */
- icmp6_in = (struct icmp6hdr *)(skb_in->data + mac_header_len + sizeof(struct ipv6hdr));
+ icmp6_in = (struct icmp6hdr *)(skb_in->data + mac_header_len +
+ sizeof(struct ipv6hdr));
if (icmp6_in->icmp6_type == NDISC_ROUTER_SOLICITATION) { /* Check RS */
return -1;
- } else if (icmp6_in->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { /* Check NS */
- u8 icmp_na[sizeof(struct icmp6hdr) + sizeof(struct neighbour_advertisement)];
+ } else if (icmp6_in->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+ /* Check NS */
+ u8 icmp_na[sizeof(struct icmp6hdr) +
+ sizeof(struct neighbour_advertisement)];
u8 zero_addr8[16] = {0,};
if (memcmp(ipv6_in->saddr.in6_u.u6_addr8, zero_addr8, 16) == 0)
@@ -276,7 +283,9 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
icmp6_out.icmp6_cksum = 0;
icmp6_out.icmp6_dataun.un_data32[0] = htonl(0x60000000); /* R=0, S=1, O=1 */
- ns = (struct neighbour_solicitation *)(skb_in->data + mac_header_len + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
+ ns = (struct neighbour_solicitation *)
+ (skb_in->data + mac_header_len +
+ sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
memcpy(&na.target_address, ns->target_address, 16);
na.type = 0x02;
na.length = 1;
@@ -289,13 +298,17 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
memcpy(&ipv6_out, ipv6_in, sizeof(struct ipv6hdr));
memcpy(ipv6_out.saddr.in6_u.u6_addr8, &na.target_address, 16);
- memcpy(ipv6_out.daddr.in6_u.u6_addr8, ipv6_in->saddr.in6_u.u6_addr8, 16);
- ipv6_out.payload_len = htons(sizeof(struct icmp6hdr) + sizeof(struct neighbour_advertisement));
+ memcpy(ipv6_out.daddr.in6_u.u6_addr8,
+ ipv6_in->saddr.in6_u.u6_addr8, 16);
+ ipv6_out.payload_len = htons(sizeof(struct icmp6hdr) +
+ sizeof(struct neighbour_advertisement));
memcpy(icmp_na, &icmp6_out, sizeof(struct icmp6hdr));
- memcpy(icmp_na + sizeof(struct icmp6hdr), &na, sizeof(struct neighbour_advertisement));
+ memcpy(icmp_na + sizeof(struct icmp6hdr), &na,
+ sizeof(struct neighbour_advertisement));
- icmp6_out.icmp6_cksum = icmp6_checksum(&ipv6_out, (u16 *)icmp_na, sizeof(icmp_na));
+ icmp6_out.icmp6_cksum = icmp6_checksum(&ipv6_out,
+ (u16 *)icmp_na, sizeof(icmp_na));
} else {
return -1;
}
@@ -311,10 +324,14 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
return -ENOMEM;
skb_reserve(skb_out, NET_IP_ALIGN);
- memcpy(skb_put(skb_out, mac_header_len), mac_header_data, mac_header_len);
- memcpy(skb_put(skb_out, sizeof(struct ipv6hdr)), &ipv6_out, sizeof(struct ipv6hdr));
- memcpy(skb_put(skb_out, sizeof(struct icmp6hdr)), &icmp6_out, sizeof(struct icmp6hdr));
- memcpy(skb_put(skb_out, sizeof(struct neighbour_advertisement)), &na, sizeof(struct neighbour_advertisement));
+ memcpy(skb_put(skb_out, mac_header_len), mac_header_data,
+ mac_header_len);
+ memcpy(skb_put(skb_out, sizeof(struct ipv6hdr)), &ipv6_out,
+ sizeof(struct ipv6hdr));
+ memcpy(skb_put(skb_out, sizeof(struct icmp6hdr)), &icmp6_out,
+ sizeof(struct icmp6hdr));
+ memcpy(skb_put(skb_out, sizeof(struct neighbour_advertisement)), &na,
+ sizeof(struct neighbour_advertisement));
skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
skb_out->dev = skb_in->dev;
@@ -363,7 +380,8 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
/* Check DHCPv4 */
if (ip->protocol == IPPROTO_UDP) {
- struct udphdr *udp = (struct udphdr *)(network_data + sizeof(struct iphdr));
+ struct udphdr *udp = (struct udphdr *)
+ (network_data + sizeof(struct iphdr));
if (ntohs(udp->dest) == 67 || ntohs(udp->dest) == 68)
nic_type |= NIC_TYPE_F_DHCP;
}
@@ -373,12 +391,13 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
ipv6 = (struct ipv6hdr *)network_data;
if (ipv6->nexthdr == IPPROTO_ICMPV6) /* Check NDP request */ {
- struct icmp6hdr *icmp6 = (struct icmp6hdr *)(network_data + sizeof(struct ipv6hdr));
- if (/*icmp6->icmp6_type == NDISC_ROUTER_SOLICITATION || */
- icmp6->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
+ struct icmp6hdr *icmp6 = (struct icmp6hdr *)
+ (network_data + sizeof(struct ipv6hdr));
+ if (icmp6->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
nic_type |= NIC_TYPE_ICMPV6;
} else if (ipv6->nexthdr == IPPROTO_UDP) /* Check DHCPv6 */ {
- struct udphdr *udp = (struct udphdr *)(network_data + sizeof(struct ipv6hdr));
+ struct udphdr *udp = (struct udphdr *)
+ (network_data + sizeof(struct ipv6hdr));
if (ntohs(udp->dest) == 546 || ntohs(udp->dest) == 547)
nic_type |= NIC_TYPE_F_DHCP;
}
@@ -420,11 +439,12 @@ static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
}
/*
- Need byte shift (that is, remove VLAN tag) if there is one
- For the case of ARP, this breaks the offset as vlan_ethhdr+4 is treated as ethhdr
- However, it shouldn't be a problem as the response starts from arp_hdr and ethhdr
- is created by this driver based on the NIC mac
- */
+ * Need byte shift (that is, remove VLAN tag) if there is one
+ * For the case of ARP, this breaks the offset as vlan_ethhdr+4
+ * is treated as ethhdr However, it shouldn't be a problem as
+ * the response starts from arp_hdr and ethhdr is created by this
+ * driver based on the NIC mac
+ */
if (nic_type & NIC_TYPE_F_VLAN) {
struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr *)skb->data;
nic->vlan_id = ntohs(vlan_eth->h_vlan_TCI) & VLAN_VID_MASK;
@@ -436,15 +456,23 @@ static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
data_len = skb->len;
}
- /* If it is a ICMPV6 packet, clear all the other bits : for backward compatibility with the firmware */
+ /* If it is a ICMPV6 packet, clear all the other bits :
+ * for backward compatibility with the firmware
+ */
if (nic_type & NIC_TYPE_ICMPV6)
nic_type = NIC_TYPE_ICMPV6;
- /* If it is not a dhcp packet, clear all the flag bits : original NIC, otherwise the special flag (IPVX | DHCP) */
+ /* If it is not a dhcp packet, clear all the flag bits :
+ * original NIC, otherwise the special flag (IPVX | DHCP)
+ */
if (!(nic_type & NIC_TYPE_F_DHCP))
nic_type &= NIC_TYPE_MASK;
- sscanf(dev->name, "lte%d", &idx);
+ ret = sscanf(dev->name, "lte%d", &idx);
+ if (ret != 1) {
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
ret = nic->phy_dev->send_sdu_func(nic->phy_dev->priv_dev,
data_buf, data_len,
@@ -485,8 +513,11 @@ static int gdm_lte_event_send(struct net_device *dev, char *buf, int len)
struct nic *nic = netdev_priv(dev);
struct hci_packet *hci = (struct hci_packet *)buf;
int idx;
+ int ret;
- sscanf(dev->name, "lte%d", &idx);
+ ret = sscanf(dev->name, "lte%d", &idx);
+ if (ret != 1)
+ return -EINVAL;
return netlink_send(lte_event.sock, idx, 0, buf,
gdm_dev16_to_cpu(
@@ -495,7 +526,8 @@ static int gdm_lte_event_send(struct net_device *dev, char *buf, int len)
+ HCI_HEADER_SIZE);
}
-static void gdm_lte_event_rcv(struct net_device *dev, u16 type, void *msg, int len)
+static void gdm_lte_event_rcv(struct net_device *dev, u16 type,
+ void *msg, int len)
{
struct nic *nic = netdev_priv(dev);
@@ -536,7 +568,8 @@ static u8 find_dev_index(u32 nic_type)
return index;
}
-static void gdm_lte_netif_rx(struct net_device *dev, char *buf, int len, int flagged_nic_type)
+static void gdm_lte_netif_rx(struct net_device *dev, char *buf,
+ int len, int flagged_nic_type)
{
u32 nic_type;
struct nic *nic;
@@ -551,28 +584,46 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf, int len, int fla
nic = netdev_priv(dev);
if (flagged_nic_type & NIC_TYPE_F_DHCP) {
- /* Change the destination mac address with the one requested the IP */
+ /* Change the destination mac address
+ * with the one requested the IP
+ */
if (flagged_nic_type & NIC_TYPE_F_IPV4) {
struct dhcp_packet {
u8 op; /* BOOTREQUEST or BOOTREPLY */
- u8 htype; /* hardware address type. 1 = 10mb ethernet */
+ u8 htype; /* hardware address type.
+ * 1 = 10mb ethernet
+ */
u8 hlen; /* hardware address length */
u8 hops; /* used by relay agents only */
u32 xid; /* unique id */
- u16 secs; /* elapsed since client began acquisition/renewal */
+ u16 secs; /* elapsed since client began
+ * acquisition/renewal
+ */
u16 flags; /* only one flag so far: */
- #define BROADCAST_FLAG 0x8000 /* "I need broadcast replies" */
- u32 ciaddr; /* client IP (if client is in BOUND, RENEW or REBINDING state) */
+ #define BROADCAST_FLAG 0x8000
+ /* "I need broadcast replies" */
+ u32 ciaddr; /* client IP (if client is in
+ * BOUND, RENEW or REBINDING state)
+ */
u32 yiaddr; /* 'your' (client) IP address */
- /* IP address of next server to use in bootstrap, returned in DHCPOFFER, DHCPACK by server */
+ /* IP address of next server to use in
+ * bootstrap, returned in DHCPOFFER,
+ * DHCPACK by server
+ */
u32 siaddr_nip;
u32 gateway_nip; /* relay agent IP address */
- u8 chaddr[16]; /* link-layer client hardware address (MAC) */
+ u8 chaddr[16]; /* link-layer client hardware
+ * address (MAC)
+ */
u8 sname[64]; /* server host name (ASCIZ) */
u8 file[128]; /* boot file name (ASCIZ) */
- u32 cookie; /* fixed first four option bytes (99,130,83,99 dec) */
+ u32 cookie; /* fixed first four option
+ * bytes (99,130,83,99 dec)
+ */
} __packed;
- void *addr = buf + sizeof(struct iphdr) + sizeof(struct udphdr) + offsetof(struct dhcp_packet, chaddr);
+ void *addr = buf + sizeof(struct iphdr) +
+ sizeof(struct udphdr) +
+ offsetof(struct dhcp_packet, chaddr);
memcpy(nic->dest_mac_addr, addr, ETH_ALEN);
}
}
@@ -593,7 +644,9 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf, int len, int fla
vlan_eth.h_vlan_proto = htons(ETH_P_8021Q);
if (nic_type == NIC_TYPE_ARP) {
- /* Should be response: Only happens because there was a request from the host */
+ /* Should be response: Only happens because
+ * there was a request from the host
+ */
eth.h_proto = htons(ETH_P_ARP);
vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_ARP);
} else {
@@ -640,15 +693,20 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
u32 nic_type;
u8 index;
- hci_len = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), multi_sdu->len);
- num_packet = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), multi_sdu->num_packet);
+ hci_len = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev),
+ multi_sdu->len);
+ num_packet = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev),
+ multi_sdu->num_packet);
for (i = 0; i < num_packet; i++) {
sdu = (struct sdu *)data;
- cmd_evt = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->cmd_evt);
- hci_len = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->len);
- nic_type = gdm_dev32_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->nic_type);
+ cmd_evt = gdm_dev16_to_cpu(phy_dev->
+ get_endian(phy_dev->priv_dev), sdu->cmd_evt);
+ hci_len = gdm_dev16_to_cpu(phy_dev->
+ get_endian(phy_dev->priv_dev), sdu->len);
+ nic_type = gdm_dev32_to_cpu(phy_dev->
+ get_endian(phy_dev->priv_dev), sdu->nic_type);
if (cmd_evt != LTE_RX_SDU) {
pr_err("rx sdu wrong hci %04x\n", cmd_evt);
@@ -662,7 +720,8 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
index = find_dev_index(nic_type);
if (index < MAX_NIC_TYPE) {
dev = phy_dev->dev[index];
- gdm_lte_netif_rx(dev, (char *)sdu->data, (int)(hci_len-12), nic_type);
+ gdm_lte_netif_rx(dev, (char *)sdu->data,
+ (int)(hci_len-12), nic_type);
} else {
pr_err("rx sdu invalid nic_type :%x\n", nic_type);
}
@@ -709,7 +768,8 @@ static int gdm_lte_receive_pkt(struct phy_dev *phy_dev, char *buf, int len)
if (!len)
return ret;
- cmd_evt = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), hci->cmd_evt);
+ cmd_evt = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev),
+ hci->cmd_evt);
dev = phy_dev->dev[0];
if (dev == NULL)
@@ -718,7 +778,8 @@ static int gdm_lte_receive_pkt(struct phy_dev *phy_dev, char *buf, int len)
switch (cmd_evt) {
case LTE_RX_SDU:
sdu = (struct sdu *)hci->data;
- nic_type = gdm_dev32_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->nic_type);
+ nic_type = gdm_dev32_to_cpu(phy_dev->
+ get_endian(phy_dev->priv_dev), sdu->nic_type);
index = find_dev_index(nic_type);
dev = phy_dev->dev[index];
gdm_lte_netif_rx(dev, hci->data, len, nic_type);
@@ -733,7 +794,9 @@ static int gdm_lte_receive_pkt(struct phy_dev *phy_dev, char *buf, int len)
break;
case LTE_PDN_TABLE_IND:
pdn_table = (struct hci_pdn_table_ind *)buf;
- nic_type = gdm_dev32_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), pdn_table->nic_type);
+ nic_type = gdm_dev32_to_cpu(phy_dev->
+ get_endian(phy_dev->priv_dev),
+ pdn_table->nic_type);
index = find_dev_index(nic_type);
dev = phy_dev->dev[index];
gdm_lte_pdn_table(dev, buf, len);
@@ -758,7 +821,8 @@ void start_rx_proc(struct phy_dev *phy_dev)
int i;
for (i = 0; i < MAX_RX_SUBMIT_COUNT; i++)
- phy_dev->rcv_func(phy_dev->priv_dev, rx_complete, phy_dev, USB_COMPLETE);
+ phy_dev->rcv_func(phy_dev->priv_dev,
+ rx_complete, phy_dev, USB_COMPLETE);
}
static struct net_device_ops gdm_netdev_ops = {
@@ -771,7 +835,8 @@ static struct net_device_ops gdm_netdev_ops = {
static u8 gdm_lte_macaddr[ETH_ALEN] = {0x00, 0x0a, 0x3b, 0x00, 0x00, 0x00};
-static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest, u8 *mac_address, u8 index)
+static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest,
+ u8 *mac_address, u8 index)
{
/* Form the dev_addr */
if (!mac_address)
@@ -779,10 +844,14 @@ static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest, u8 *mac_ad
else
memcpy(dev_addr, mac_address, ETH_ALEN);
- /* The last byte of the mac address should be less than or equal to 0xFC */
+ /* The last byte of the mac address
+ * should be less than or equal to 0xFC
+ */
dev_addr[ETH_ALEN-1] += index;
- /* Create random nic src and copy the first 3 bytes to be the same as dev_addr */
+ /* Create random nic src and copy the first
+ * 3 bytes to be the same as dev_addr
+ */
random_ether_addr(nic_src);
memcpy(nic_src, dev_addr, 3);
@@ -799,7 +868,8 @@ static void validate_mac_address(u8 *mac_address)
}
}
-int register_lte_device(struct phy_dev *phy_dev, struct device *dev, u8 *mac_address)
+int register_lte_device(struct phy_dev *phy_dev,
+ struct device *dev, u8 *mac_address)
{
struct nic *nic;
struct net_device *net;
@@ -814,7 +884,8 @@ int register_lte_device(struct phy_dev *phy_dev, struct device *dev, u8 *mac_add
sprintf(pdn_dev_name, "lte%%dpdn%d", index);
/* Allocate netdev */
- net = alloc_netdev(sizeof(struct nic), pdn_dev_name, ether_setup);
+ net = alloc_netdev(sizeof(struct nic), pdn_dev_name,
+ ether_setup);
if (net == NULL) {
pr_err("alloc_netdev failed\n");
ret = -ENOMEM;
diff --git a/drivers/staging/gdm724x/gdm_lte.h b/drivers/staging/gdm724x/gdm_lte.h
index 9287d310d8e2..88414e5a70cc 100644
--- a/drivers/staging/gdm724x/gdm_lte.h
+++ b/drivers/staging/gdm724x/gdm_lte.h
@@ -56,7 +56,7 @@ struct phy_dev {
int (*cb)(void *cb_data, void *data, int len,
int context),
void *cb_data, int context);
- struct gdm_endian *(*get_endian)(void *priv_dev);
+ struct gdm_endian * (*get_endian)(void *priv_dev);
};
struct nic {
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 2fa3a5a6580f..10ce2c1805bb 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -165,7 +165,8 @@ static int up_to_host(struct mux_rx *r)
int len = r->len;
while (1) {
- mux_header = (struct mux_pkt_header *)(r->buf + packet_size_sum);
+ mux_header = (struct mux_pkt_header *)(r->buf +
+ packet_size_sum);
start_flag = __le32_to_cpu(mux_header->start_flag);
payload_size = __le32_to_cpu(mux_header->payload_size);
packet_type = __le16_to_cpu(mux_header->packet_type);
@@ -231,7 +232,8 @@ static void do_rx(struct work_struct *work)
spin_unlock_irqrestore(&rx->to_host_lock, flags);
break;
}
- r = list_entry(rx->to_host_list.next, struct mux_rx, to_host_list);
+ r = list_entry(rx->to_host_list.next, struct mux_rx,
+ to_host_list);
list_del(&r->to_host_list);
spin_unlock_irqrestore(&rx->to_host_lock, flags);
@@ -249,7 +251,8 @@ static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
struct mux_rx *r_remove, *r_remove_next;
spin_lock_irqsave(&rx->submit_list_lock, flags);
- list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list) {
+ list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list,
+ rx_submit_list) {
if (r == r_remove)
list_del(&r->rx_submit_list);
}
@@ -279,9 +282,8 @@ static void gdm_mux_rcv_complete(struct urb *urb)
}
}
-static int gdm_mux_recv(void *priv_dev,
- int (*cb)(void *data, int len, int tty_index, struct tty_dev *tty_dev, int complete)
- )
+static int gdm_mux_recv(void *priv_dev, int (*cb)(void *data, int len,
+ int tty_index, struct tty_dev *tty_dev, int complete))
{
struct mux_dev *mux_dev = priv_dev;
struct usb_device *usbdev = mux_dev->usbdev;
@@ -416,7 +418,8 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
return ret;
}
-static int gdm_mux_send_control(void *priv_dev, int request, int value, void *buf, int len)
+static int gdm_mux_send_control(void *priv_dev, int request, int value,
+ void *buf, int len)
{
struct mux_dev *mux_dev = priv_dev;
struct usb_device *usbdev = mux_dev->usbdev;
@@ -448,7 +451,8 @@ static void release_usb(struct mux_dev *mux_dev)
cancel_delayed_work(&mux_dev->work_rx);
spin_lock_irqsave(&rx->submit_list_lock, flags);
- list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) {
+ list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
+ rx_submit_list) {
spin_unlock_irqrestore(&rx->submit_list_lock, flags);
usb_kill_urb(r->urb);
spin_lock_irqsave(&rx->submit_list_lock, flags);
@@ -503,7 +507,8 @@ static int init_usb(struct mux_dev *mux_dev)
return ret;
}
-static int gdm_mux_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int gdm_mux_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct mux_dev *mux_dev;
struct tty_dev *tty_dev;
@@ -610,7 +615,8 @@ static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
spin_lock_irqsave(&rx->submit_list_lock, flags);
- list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) {
+ list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
+ rx_submit_list) {
spin_unlock_irqrestore(&rx->submit_list_lock, flags);
usb_kill_urb(r->urb);
spin_lock_irqsave(&rx->submit_list_lock, flags);
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index 33458a583142..ee6e40facca7 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -30,14 +30,17 @@
#include "gdm_endian.h"
#define USB_DEVICE_CDC_DATA(vid, pid) \
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_CLASS | \
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
.idVendor = vid,\
.idProduct = pid,\
.bInterfaceClass = USB_CLASS_COMM,\
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
#define USB_DEVICE_MASS_DATA(vid, pid) \
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,\
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_INFO,\
.idVendor = vid,\
.idProduct = pid,\
.bInterfaceSubClass = USB_SC_SCSI, \
@@ -59,7 +62,8 @@ static void do_tx(struct work_struct *work);
static void do_rx(struct work_struct *work);
static int gdm_usb_recv(void *priv_dev,
- int (*cb)(void *cb_data, void *data, int len, int context),
+ int (*cb)(void *cb_data,
+ void *data, int len, int context),
void *cb_data,
int context);
@@ -119,28 +123,15 @@ out:
static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
{
- struct usb_tx_sdu *t_sdu = NULL;
- int ret = 0;
-
+ struct usb_tx_sdu *t_sdu;
t_sdu = kzalloc(sizeof(struct usb_tx_sdu), GFP_ATOMIC);
- if (!t_sdu) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!t_sdu)
+ return NULL;
t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_ATOMIC);
if (!t_sdu->buf) {
- ret = -ENOMEM;
- goto out;
- }
-out:
-
- if (ret < 0) {
- if (t_sdu) {
- kfree(t_sdu->buf);
- kfree(t_sdu);
- }
+ kfree(t_sdu);
return NULL;
}
@@ -388,7 +379,8 @@ static int set_mac_address(u8 *data, void *arg)
if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
memcpy(mac_address, tlv->data, tlv->len);
- if (register_lte_device(phy_dev, &udev->intf->dev, mac_address) < 0)
+ if (register_lte_device(phy_dev,
+ &udev->intf->dev, mac_address) < 0)
pr_err("register lte device failed\n");
udev->request_mac_addr = 0;
@@ -401,7 +393,8 @@ static int set_mac_address(u8 *data, void *arg)
static void do_rx(struct work_struct *work)
{
- struct lte_udev *udev = container_of(work, struct lte_udev, work_rx.work);
+ struct lte_udev *udev =
+ container_of(work, struct lte_udev, work_rx.work);
struct rx_cxt *rx = &udev->rx;
struct usb_rx *r;
struct hci_packet *hci;
@@ -416,7 +409,8 @@ static void do_rx(struct work_struct *work)
spin_unlock_irqrestore(&rx->to_host_lock, flags);
break;
}
- r = list_entry(rx->to_host_list.next, struct usb_rx, to_host_list);
+ r = list_entry(rx->to_host_list.next,
+ struct usb_rx, to_host_list);
list_del(&r->to_host_list);
spin_unlock_irqrestore(&rx->to_host_lock, flags);
@@ -463,7 +457,8 @@ static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
struct usb_rx *r_remove, *r_remove_next;
spin_lock_irqsave(&rx->submit_lock, flags);
- list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list)
+ list_for_each_entry_safe(r_remove,
+ r_remove_next, &rx->rx_submit_list, rx_submit_list)
{
if (r == r_remove) {
list_del(&r->rx_submit_list);
@@ -500,7 +495,8 @@ static void gdm_usb_rcv_complete(struct urb *urb)
}
static int gdm_usb_recv(void *priv_dev,
- int (*cb)(void *cb_data, void *data, int len, int context),
+ int (*cb)(void *cb_data,
+ void *data, int len, int context),
void *cb_data,
int context)
{
@@ -654,7 +650,8 @@ static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
static void do_tx(struct work_struct *work)
{
- struct lte_udev *udev = container_of(work, struct lte_udev, work_tx.work);
+ struct lte_udev *udev =
+ container_of(work, struct lte_udev, work_tx.work);
struct usb_device *usbdev = udev->usbdev;
struct tx_cxt *tx = &udev->tx;
struct usb_tx *t = NULL;
@@ -813,7 +810,8 @@ static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
return &udev->gdm_ed;
}
-static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int gdm_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
int ret = 0;
struct phy_dev *phy_dev = NULL;
@@ -861,7 +859,9 @@ static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id
usb_enable_autosuspend(usbdev);
pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
- /* List up hosts with big endians, otherwise, defaults to little endian */
+ /* List up hosts with big endians, otherwise,
+ * defaults to little endian
+ */
if (idProduct == PID_GDM7243)
gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
else
diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c
index 77fc64e28428..5ddd36948a2f 100644
--- a/drivers/staging/gdm724x/netlink_k.c
+++ b/drivers/staging/gdm724x/netlink_k.c
@@ -32,7 +32,8 @@ static struct semaphore netlink_mutex;
#define ND_MAX_GROUP 30
#define ND_IFINDEX_LEN sizeof(int)
#define ND_NLMSG_SPACE(len) (NLMSG_SPACE(len) + ND_IFINDEX_LEN)
-#define ND_NLMSG_DATA(nlh) ((void *)((char *)NLMSG_DATA(nlh) + ND_IFINDEX_LEN))
+#define ND_NLMSG_DATA(nlh) ((void *)((char *)NLMSG_DATA(nlh) + \
+ ND_IFINDEX_LEN))
#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN)
#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN)
#define ND_NLMSG_IFIDX(nlh) NLMSG_DATA(nlh)
diff --git a/drivers/staging/gdm72xx/TODO b/drivers/staging/gdm72xx/TODO
index 30ac01ab972f..5ab27fb29594 100644
--- a/drivers/staging/gdm72xx/TODO
+++ b/drivers/staging/gdm72xx/TODO
@@ -1,5 +1,3 @@
TODO:
- Replace kernel_thread with kthread in gdm_usb.c
-- Replace hard-coded firmware paths with request_firmware in
- sdio_boot.c and usb_boot.c
- Clean up coding style to meet kernel standard.
diff --git a/drivers/staging/gdm72xx/gdm_sdio.c b/drivers/staging/gdm72xx/gdm_sdio.c
index 047a4d77f5ee..c24653739e13 100644
--- a/drivers/staging/gdm72xx/gdm_sdio.c
+++ b/drivers/staging/gdm72xx/gdm_sdio.c
@@ -38,26 +38,9 @@
#define TX_HZ 2000
#define TX_INTERVAL (1000000/TX_HZ)
-/*#define DEBUG*/
-
static int init_sdio(struct sdiowm_dev *sdev);
static void release_sdio(struct sdiowm_dev *sdev);
-#ifdef DEBUG
-static void hexdump(char *title, u8 *data, int len)
-{
- int i;
-
- printk(KERN_DEBUG "%s: length = %d\n", title, len);
- for (i = 0; i < len; i++) {
- printk(KERN_DEBUG "%02x ", data[i]);
- if ((i & 0xf) == 0xf)
- printk(KERN_DEBUG "\n");
- }
- printk(KERN_DEBUG "\n");
-}
-#endif
-
static struct sdio_tx *alloc_tx_struct(struct tx_cxt *tx)
{
struct sdio_tx *t = kzalloc(sizeof(*t), GFP_ATOMIC);
@@ -297,10 +280,9 @@ static void send_sdu(struct sdio_func *func, struct tx_cxt *tx)
spin_unlock_irqrestore(&tx->lock, flags);
-#ifdef DEBUG
- hexdump("sdio_send", tx->sdu_buf + TYPE_A_HEADER_SIZE,
- aggr_len - TYPE_A_HEADER_SIZE);
-#endif
+ print_hex_dump_debug("sdio_send: ", DUMP_PREFIX_NONE, 16, 1,
+ tx->sdu_buf + TYPE_A_HEADER_SIZE,
+ aggr_len - TYPE_A_HEADER_SIZE, false);
for (pos = TYPE_A_HEADER_SIZE; pos < aggr_len; pos += TX_CHUNK_SIZE) {
len = aggr_len - pos;
@@ -335,10 +317,9 @@ static void send_hci(struct sdio_func *func, struct tx_cxt *tx,
{
unsigned long flags;
-#ifdef DEBUG
- hexdump("sdio_send", t->buf + TYPE_A_HEADER_SIZE,
- t->len - TYPE_A_HEADER_SIZE);
-#endif
+ print_hex_dump_debug("sdio_send: ", DUMP_PREFIX_NONE, 16, 1,
+ t->buf + TYPE_A_HEADER_SIZE,
+ t->len - TYPE_A_HEADER_SIZE, false);
send_sdio_pkt(func, t->buf, t->len);
spin_lock_irqsave(&tx->lock, flags);
@@ -474,14 +455,10 @@ static int control_sdu_tx_flow(struct sdiowm_dev *sdev, u8 *hci_data, int len)
goto out;
if (hci_data[4] == 0) {
-#ifdef DEBUG
- printk(KERN_DEBUG "WIMAX ==> STOP SDU TX\n");
-#endif
+ dev_dbg(&sdev->func->dev, "WIMAX ==> STOP SDU TX\n");
tx->stop_sdu_tx = 1;
} else if (hci_data[4] == 1) {
-#ifdef DEBUG
- printk(KERN_DEBUG "WIMAX ==> START SDU TX\n");
-#endif
+ dev_dbg(&sdev->func->dev, "WIMAX ==> START SDU TX\n");
tx->stop_sdu_tx = 0;
if (tx->can_send)
schedule_work(&sdev->ws);
@@ -532,18 +509,14 @@ static void gdm_sdio_irq(struct sdio_func *func)
}
if (hdr[3] == 1) { /* Ack */
-#ifdef DEBUG
u32 *ack_seq = (u32 *)&hdr[4];
-#endif
spin_lock_irqsave(&tx->lock, flags);
tx->can_send = 1;
if (!list_empty(&tx->sdu_list) || !list_empty(&tx->hci_list))
schedule_work(&sdev->ws);
spin_unlock_irqrestore(&tx->lock, flags);
-#ifdef DEBUG
- printk(KERN_DEBUG "Ack... %0x\n", ntohl(*ack_seq));
-#endif
+ dev_dbg(&func->dev, "Ack... %0x\n", ntohl(*ack_seq));
goto done;
}
@@ -579,9 +552,8 @@ static void gdm_sdio_irq(struct sdio_func *func)
}
end_io:
-#ifdef DEBUG
- hexdump("sdio_receive", rx->rx_buf, len);
-#endif
+ print_hex_dump_debug("sdio_receive: ", DUMP_PREFIX_NONE, 16, 1,
+ rx->rx_buf, len, false);
len = control_sdu_tx_flow(sdev, rx->rx_buf, len);
spin_lock_irqsave(&rx->lock, flags);
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
index cdeffe75496b..20539d809397 100644
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ b/drivers/staging/gdm72xx/gdm_usb.c
@@ -55,22 +55,6 @@ static int k_mode_stop;
static int init_usb(struct usbwm_dev *udev);
static void release_usb(struct usbwm_dev *udev);
-/*#define DEBUG */
-#ifdef DEBUG
-static void hexdump(char *title, u8 *data, int len)
-{
- int i;
-
- printk(KERN_DEBUG "%s: length = %d\n", title, len);
- for (i = 0; i < len; i++) {
- printk(KERN_DEBUG "%02x ", data[i]);
- if ((i & 0xf) == 0xf)
- printk(KERN_DEBUG "\n");
- }
- printk(KERN_DEBUG "\n");
-}
-#endif
-
static struct usb_tx *alloc_tx_struct(struct tx_cxt *tx)
{
struct usb_tx *t = kzalloc(sizeof(*t), GFP_ATOMIC);
@@ -368,9 +352,8 @@ static int gdm_usb_send(void *priv_dev, void *data, int len,
gdm_usb_send_complete,
t);
-#ifdef DEBUG
- hexdump("usb_send", t->buf, len + padding);
-#endif
+ print_hex_dump_debug("usb_send: ", DUMP_PREFIX_NONE, 16, 1,
+ t->buf, len + padding, false);
#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
if (usbdev->state & USB_STATE_SUSPENDED) {
list_add_tail(&t->p_list, &tx->pending_list);
@@ -438,10 +421,7 @@ static void gdm_usb_rcv_complete(struct urb *urb)
struct usb_tx *t;
u16 cmd_evt;
unsigned long flags, flags2;
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
struct usb_device *dev = urb->dev;
-#endif
/* Completion by usb_unlink_urb */
if (urb->status == -ECONNRESET)
@@ -451,20 +431,15 @@ static void gdm_usb_rcv_complete(struct urb *urb)
if (!urb->status) {
cmd_evt = (r->buf[0] << 8) | (r->buf[1]);
-#ifdef DEBUG
- hexdump("usb_receive", r->buf, urb->actual_length);
-#endif
+ print_hex_dump_debug("usb_receive: ", DUMP_PREFIX_NONE, 16, 1,
+ r->buf, urb->actual_length, false);
if (cmd_evt == WIMAX_SDU_TX_FLOW) {
if (r->buf[4] == 0) {
-#ifdef DEBUG
- printk(KERN_DEBUG "WIMAX ==> STOP SDU TX\n");
-#endif
+ dev_dbg(&dev->dev, "WIMAX ==> STOP SDU TX\n");
list_for_each_entry(t, &tx->sdu_list, list)
usb_unlink_urb(t->urb);
} else if (r->buf[4] == 1) {
-#ifdef DEBUG
- printk(KERN_DEBUG "WIMAX ==> START SDU TX\n");
-#endif
+ dev_dbg(&dev->dev, "WIMAX ==> START SDU TX\n");
list_for_each_entry(t, &tx->sdu_list, list) {
usb_submit_urb(t->urb, GFP_ATOMIC);
}
diff --git a/drivers/staging/gdm72xx/gdm_wimax.c b/drivers/staging/gdm72xx/gdm_wimax.c
index dd854975db7d..05ce2a22c220 100644
--- a/drivers/staging/gdm72xx/gdm_wimax.c
+++ b/drivers/staging/gdm72xx/gdm_wimax.c
@@ -62,21 +62,6 @@ static u8 gdm_wimax_macaddr[6] = {0x00, 0x0a, 0x3b, 0xf0, 0x01, 0x30};
static void gdm_wimax_ind_fsm_update(struct net_device *dev, struct fsm_s *fsm);
static void gdm_wimax_ind_if_updown(struct net_device *dev, int if_up);
-#if defined(DEBUG_SDU)
-static void printk_hex(u8 *buf, u32 size)
-{
- int i;
-
- for (i = 0; i < size; i++) {
- if (i && i % 16 == 0)
- printk(KERN_DEBUG "\n%02x ", *buf++);
- else
- printk(KERN_DEBUG "%02x ", *buf++);
- }
-
- printk(KERN_DEBUG "\n");
-}
-
static const char *get_protocol_name(u16 protocol)
{
static char buf[32];
@@ -140,7 +125,8 @@ static const char *get_port_name(u16 port)
return buf;
}
-static void dump_eth_packet(const char *title, u8 *data, int len)
+static void dump_eth_packet(struct net_device *dev, const char *title,
+ u8 *data, int len)
{
struct iphdr *ih = NULL;
struct udphdr *uh = NULL;
@@ -162,48 +148,21 @@ static void dump_eth_packet(const char *title, u8 *data, int len)
port = ntohs(uh->dest);
}
- printk(KERN_DEBUG "[%s] len=%d, %s, %s, %s\n",
+ netdev_dbg(dev, "[%s] len=%d, %s, %s, %s\n",
title, len,
get_protocol_name(protocol),
get_ip_protocol_name(ip_protocol),
get_port_name(port));
if (!(data[0] == 0xff && data[1] == 0xff)) {
- if (protocol == ETH_P_IP) {
- printk(KERN_DEBUG " src=%pI4\n", &ih->saddr);
- } else if (protocol == ETH_P_IPV6) {
- printk(KERN_DEBUG " src=%pI6\n", &ih->saddr);
- }
+ if (protocol == ETH_P_IP)
+ netdev_dbg(dev, " src=%pI4\n", &ih->saddr);
+ else if (protocol == ETH_P_IPV6)
+ netdev_dbg(dev, " src=%pI6\n", &ih->saddr);
}
- #if (DUMP_PACKET & DUMP_SDU_ALL)
- printk_hex(data, len);
- #else
- #if (DUMP_PACKET & DUMP_SDU_ARP)
- if (protocol == ETH_P_ARP)
- printk_hex(data, len);
- #endif
- #if (DUMP_PACKET & DUMP_SDU_IP)
- if (protocol == ETH_P_IP || protocol == ETH_P_IPV6)
- printk_hex(data, len);
- #else
- #if (DUMP_PACKET & DUMP_SDU_IP_TCP)
- if (ip_protocol == IPPROTO_TCP)
- printk_hex(data, len);
- #endif
- #if (DUMP_PACKET & DUMP_SDU_IP_UDP)
- if (ip_protocol == IPPROTO_UDP)
- printk_hex(data, len);
- #endif
- #if (DUMP_PACKET & DUMP_SDU_IP_ICMP)
- if (ip_protocol == IPPROTO_ICMP)
- printk_hex(data, len);
- #endif
- #endif
- #endif
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, data, len, false);
}
-#endif
-
static inline int gdm_wimax_header(struct sk_buff **pskb)
{
@@ -237,12 +196,10 @@ static void gdm_wimax_event_rcv(struct net_device *dev, u16 type, void *msg,
{
struct nic *nic = netdev_priv(dev);
- #if defined(DEBUG_HCI)
u8 *buf = (u8 *) msg;
u16 hci_cmd = (buf[0]<<8) | buf[1];
u16 hci_len = (buf[2]<<8) | buf[3];
- printk(KERN_DEBUG "H=>D: 0x%04x(%d)\n", hci_cmd, hci_len);
- #endif
+ netdev_dbg(dev, "H=>D: 0x%04x(%d)\n", hci_cmd, hci_len);
gdm_wimax_send(nic, msg, len);
}
@@ -351,11 +308,9 @@ static int gdm_wimax_event_send(struct net_device *dev, char *buf, int size)
struct evt_entry *e;
unsigned long flags;
- #if defined(DEBUG_HCI)
u16 hci_cmd = ((u8)buf[0]<<8) | (u8)buf[1];
u16 hci_len = ((u8)buf[2]<<8) | (u8)buf[3];
- printk(KERN_DEBUG "D=>H: 0x%04x(%d)\n", hci_cmd, hci_len);
- #endif
+ netdev_dbg(dev, "D=>H: 0x%04x(%d)\n", hci_cmd, hci_len);
spin_lock_irqsave(&wm_event.evt_lock, flags);
@@ -415,9 +370,7 @@ static int gdm_wimax_tx(struct sk_buff *skb, struct net_device *dev)
struct nic *nic = netdev_priv(dev);
struct fsm_s *fsm = (struct fsm_s *) nic->sdk_data[SIOC_DATA_FSM].buf;
- #if defined(DEBUG_SDU)
- dump_eth_packet("TX", skb->data, skb->len);
- #endif
+ dump_eth_packet(dev, "TX", skb->data, skb->len);
ret = gdm_wimax_header(&skb);
if (ret < 0) {
@@ -540,7 +493,7 @@ static int gdm_wimax_ioctl_get_data(struct data_s *dst, struct data_s *src)
if (src->size) {
if (!dst->buf)
return -EINVAL;
- if (copy_to_user(dst->buf, src->buf, size))
+ if (copy_to_user((void __user *)dst->buf, src->buf, size))
return -EFAULT;
}
return 0;
@@ -563,7 +516,7 @@ static int gdm_wimax_ioctl_set_data(struct data_s *dst, struct data_s *src)
return -ENOMEM;
}
- if (copy_from_user(dst->buf, src->buf, src->size)) {
+ if (copy_from_user(dst->buf, (void __user *)src->buf, src->size)) {
kdelete(&dst->buf);
return -EFAULT;
}
@@ -756,9 +709,7 @@ static void gdm_wimax_netif_rx(struct net_device *dev, char *buf, int len)
struct sk_buff *skb;
int ret;
- #if defined(DEBUG_SDU)
- dump_eth_packet("RX", buf, len);
- #endif
+ dump_eth_packet(dev, "RX", buf, len);
skb = dev_alloc_skb(len + 2);
if (!skb) {
diff --git a/drivers/staging/gdm72xx/gdm_wimax.h b/drivers/staging/gdm72xx/gdm_wimax.h
index 6ec0ab43e9cc..1fcfc8555417 100644
--- a/drivers/staging/gdm72xx/gdm_wimax.h
+++ b/drivers/staging/gdm72xx/gdm_wimax.h
@@ -62,26 +62,6 @@ struct nic {
};
-
-#if 0
-#define dprintk(fmt, args ...) printk(KERN_DEBUG " [GDM] " fmt, ## args)
-#else
-#define dprintk(...)
-#endif
-
-/*#define DEBUG_SDU */
-#if defined(DEBUG_SDU)
-#define DUMP_SDU_ALL (1<<0)
-#define DUMP_SDU_ARP (1<<1)
-#define DUMP_SDU_IP (1<<2)
-#define DUMP_SDU_IP_TCP (1<<8)
-#define DUMP_SDU_IP_UDP (1<<9)
-#define DUMP_SDU_IP_ICMP (1<<10)
-#define DUMP_PACKET (DUMP_SDU_ALL)
-#endif
-
-/*#define DEBUG_HCI */
-
/*#define LOOPBACK_TEST */
extern int register_wimax_device(struct phy_dev *phy_dev, struct device *pdev);
diff --git a/drivers/staging/gs_fpgaboot/Kconfig b/drivers/staging/gs_fpgaboot/Kconfig
new file mode 100644
index 000000000000..550645291fab
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/Kconfig
@@ -0,0 +1,8 @@
+#
+# "xilinx FPGA firmware download, fpgaboot"
+#
+config GS_FPGABOOT
+ tristate "Xilinx FPGA firmware download module"
+ default n
+ help
+ Xilinx FPGA firmware download module
diff --git a/drivers/staging/gs_fpgaboot/Makefile b/drivers/staging/gs_fpgaboot/Makefile
new file mode 100644
index 000000000000..34cb606e0e3d
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/Makefile
@@ -0,0 +1,4 @@
+gs_fpga-y += gs_fpgaboot.o io.o
+obj-$(CONFIG_GS_FPGABOOT) += gs_fpga.o
+
+ccflags-$(CONFIG_GS_FPGA_DEBUG) := -DDEBUG
diff --git a/drivers/staging/gs_fpgaboot/README b/drivers/staging/gs_fpgaboot/README
new file mode 100644
index 000000000000..cfa8624304e5
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/README
@@ -0,0 +1,71 @@
+==============================================================================
+Linux Driver Source for Xilinx FPGA firmware download
+==============================================================================
+
+
+TABLE OF CONTENTS.
+
+1. SUMMARY
+2. BACKGROUND
+3. DESIGN
+4. HOW TO USE
+5. REFERENCE
+
+1. SUMMARY
+
+ - Download Xilinx FPGA firmware
+ - This module downloads Xilinx FPGA firmware using gpio pins.
+
+2. BACKGROUND
+
+ An FPGA (Field Programmable Gate Array) is a programmable hardware that is
+ used in various applications. Hardware design needs to programmed through
+ a dedicated device or CPU assisted way (serial or parallel).
+ This driver provides a way to download FPGA firmware.
+
+3. DESIGN
+
+ - load Xilinx FPGA bitstream format[1] firmware image file using
+ kernel firmware framework, request_firmware()
+ - program the Xilinx FPGA using SelectMAP (parallel) mode [2]
+ - FPGA prgram is done by gpio based bit-banging, as an example
+ - platform independent file: gs_fpgaboot.c
+ - platform dependent file: io.c
+
+
+4. HOW TO USE
+
+ $ insmod gs_fpga.ko file="xlinx_fpga_top_bitstream.bit"
+ $ rmmod gs_fpga
+
+5. USE CASE (from a mailing list discussion with Greg)
+
+ a. As a FPGA development support tool,
+ During FPGA firmware development, you need to download a new FPGA
+ image frequently.
+ You would do that with a dedicated JTAG, which usually a limited
+ resource in the lab.
+ However, if you use my driver, you don't have to have a dedicated JTAG.
+ This is a real gain :)
+
+ b. For the FPGA that runs without config after the download, which
+ doesn't talk to any of Linux interfaces (such as PCIE).
+
+ We download FPGA firmware from user triggered or some other way, and that's it.
+ Since that FPGA runs on its own, it doesn't require a linux driver
+ after the download.
+
+ c. For the FPGA that requires config after the download, which talk to
+ any of linux interfaces (such as PCIE)
+
+ Then, this type of FPGA config can be put into device tree and have a
+ separate driver (pcie or others), then THAT driver calls my driver to
+ download FPGA firmware during the Linux boot, the take over the device
+ through the interface.
+
+6. REFERENCE
+
+ 1. Xilinx APP NOTE XAPP583:
+ http://www.xilinx.com/support/documentation/application_notes/xapp583-fpga-configuration.pdf
+ 2. bitstream file info:
+ http://home.earthlink.net/~davesullins/software/bitinfo.html
diff --git a/drivers/staging/gs_fpgaboot/TODO b/drivers/staging/gs_fpgaboot/TODO
new file mode 100644
index 000000000000..2d9fb17d606d
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/TODO
@@ -0,0 +1,7 @@
+TODO:
+ - get bus width input instead of hardcoded bus width
+ - get it reviewed
+
+Please send any patches for this driver to Insop Song<insop.song@gainspeed.com>
+and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
+And please CC to "Staging subsystem" mail list <devel@driverdev.osuosl.org> too.
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
new file mode 100644
index 000000000000..89bc84d833e6
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -0,0 +1,422 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/firmware.h>
+
+#include "gs_fpgaboot.h"
+#include "io.h"
+
+#define DEVICE_NAME "device"
+#define CLASS_NAME "fpgaboot"
+
+static uint8_t bits_magic[] = {
+ 0x0, 0x9, 0xf, 0xf0, 0xf, 0xf0,
+ 0xf, 0xf0, 0xf, 0xf0, 0x0, 0x0, 0x1};
+
+/* fake device for request_firmware */
+static struct platform_device *firmware_pdev;
+
+static char *file = "xlinx_fpga_firmware.bit";
+module_param(file, charp, S_IRUGO);
+MODULE_PARM_DESC(file, "Xilinx FPGA firmware file.");
+
+#ifdef DEBUG_FPGA
+static void datadump(char *msg, void *m, int n)
+{
+ int i;
+ unsigned char *c;
+
+ pr_info("=== %s ===\n", msg);
+
+ c = m;
+
+ for (i = 0; i < n; i++) {
+ if ((i&0xf) == 0)
+ pr_info(KERN_INFO "\n 0x%4x: ", i);
+
+ pr_info("%02X ", c[i]);
+ }
+
+ pr_info("\n");
+}
+#endif /* DEBUG_FPGA */
+
+static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize)
+{
+ memcpy(buf, bitdata + *offset, rdsize);
+ *offset += rdsize;
+}
+
+static void readinfo_bitstream(char *bitdata, char *buf, int *offset)
+{
+ char tbuf[64];
+ int32_t len;
+
+ /* read section char */
+ read_bitstream(bitdata, tbuf, offset, 1);
+
+ /* read length */
+ read_bitstream(bitdata, tbuf, offset, 2);
+
+ len = tbuf[0] << 8 | tbuf[1];
+
+ read_bitstream(bitdata, buf, offset, len);
+ buf[len] = '\0';
+}
+
+/*
+ * read bitdata length
+ */
+static int readlength_bitstream(char *bitdata, int *lendata, int *offset)
+{
+ char tbuf[64];
+
+ /* read section char */
+ read_bitstream(bitdata, tbuf, offset, 1);
+
+ /* make sure it is section 'e' */
+ if (tbuf[0] != 'e') {
+ pr_err("error: length section is not 'e', but %c\n", tbuf[0]);
+ return -1;
+ }
+
+ /* read 4bytes length */
+ read_bitstream(bitdata, tbuf, offset, 4);
+
+ *lendata = tbuf[0] << 24 | tbuf[1] << 16 |
+ tbuf[2] << 8 | tbuf[3];
+
+ return 0;
+}
+
+
+/*
+ * read first 13 bytes to check bitstream magic number
+ */
+static int readmagic_bitstream(char *bitdata, int *offset)
+{
+ char buf[13];
+ int r;
+
+ read_bitstream(bitdata, buf, offset, 13);
+ r = memcmp(buf, bits_magic, 13);
+ if (r) {
+ pr_err("error: corrupted header");
+ return -1;
+ }
+ pr_info("bitstream file magic number Ok\n");
+
+ *offset = 13; /* magic length */
+
+ return 0;
+}
+
+/*
+ * NOTE: supports only bitstream format
+ */
+static enum fmt_image get_imageformat(struct fpgaimage *fimage)
+{
+ return f_bit;
+}
+
+static void gs_print_header(struct fpgaimage *fimage)
+{
+ pr_info("file: %s\n", fimage->filename);
+ pr_info("part: %s\n", fimage->part);
+ pr_info("date: %s\n", fimage->date);
+ pr_info("time: %s\n", fimage->time);
+ pr_info("lendata: %d\n", fimage->lendata);
+}
+
+static void gs_read_bitstream(struct fpgaimage *fimage)
+{
+ char *bitdata;
+ int size;
+ int offset;
+
+ offset = 0;
+ bitdata = (char *)fimage->fw_entry->data;
+ size = fimage->fw_entry->size;
+
+ readmagic_bitstream(bitdata, &offset);
+ readinfo_bitstream(bitdata, fimage->filename, &offset);
+ readinfo_bitstream(bitdata, fimage->part, &offset);
+ readinfo_bitstream(bitdata, fimage->date, &offset);
+ readinfo_bitstream(bitdata, fimage->time, &offset);
+ readlength_bitstream(bitdata, &fimage->lendata, &offset);
+
+ fimage->fpgadata = bitdata + offset;
+}
+
+static int gs_read_image(struct fpgaimage *fimage)
+{
+ int img_fmt;
+
+ img_fmt = get_imageformat(fimage);
+
+ switch (img_fmt) {
+ case f_bit:
+ pr_info("image is bitstream format\n");
+ gs_read_bitstream(fimage);
+ break;
+ default:
+ pr_err("unsupported fpga image format\n");
+ return -1;
+ }
+
+ gs_print_header(fimage);
+
+ return 0;
+}
+
+static int gs_load_image(struct fpgaimage *fimage, char *file)
+{
+ int err;
+
+ pr_info("load fpgaimage %s\n", file);
+
+ err = request_firmware(&fimage->fw_entry, file, &firmware_pdev->dev);
+ if (err != 0) {
+ pr_err("firmware %s is missing, cannot continue.\n", file);
+ return err;
+ }
+
+ return 0;
+}
+
+static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
+{
+ char *bitdata;
+ int size, i, cnt;
+ cnt = 0;
+
+ bitdata = (char *)fimage->fpgadata;
+ size = fimage->lendata;
+
+#ifdef DEBUG_FPGA
+ datadump("bitfile sample", bitdata, 0x100);
+#endif /* DEBUG_FPGA */
+
+ if (!xl_supported_prog_bus_width(bus_bytes)) {
+ pr_err("unsupported program bus width %d\n",
+ bus_bytes);
+ return -1;
+ }
+
+ /* Bring csi_b, rdwr_b Low and program_b High */
+ xl_program_b(1);
+ xl_rdwr_b(0);
+ xl_csi_b(0);
+
+ /* Configuration reset */
+ xl_program_b(0);
+ msleep(20);
+ xl_program_b(1);
+
+ /* Wait for Device Initialization */
+ while (xl_get_init_b() == 0)
+ ;
+
+ pr_info("device init done\n");
+
+ for (i = 0; i < size; i += bus_bytes)
+ xl_shift_bytes_out(bus_bytes, bitdata+i);
+
+ pr_info("program done\n");
+
+ /* Check INIT_B */
+ if (xl_get_init_b() == 0) {
+ pr_err("init_b 0\n");
+ return -1;
+ }
+
+ while (xl_get_done_b() == 0) {
+ if (cnt++ > MAX_WAIT_DONE) {
+ pr_err("init_B %d\n", xl_get_init_b());
+ break;
+ }
+ }
+
+ if (cnt > MAX_WAIT_DONE) {
+ pr_err("fpga download fail\n");
+ return -1;
+ }
+
+ pr_info("download fpgaimage\n");
+
+ /* Compensate for Special Startup Conditions */
+ xl_shift_cclk(8);
+
+ return 0;
+}
+
+static int gs_release_image(struct fpgaimage *fimage)
+{
+ release_firmware(fimage->fw_entry);
+ pr_info("release fpgaimage\n");
+
+ return 0;
+}
+
+/*
+ * NOTE: supports systemmap parallel programming
+ */
+static int gs_set_download_method(struct fpgaimage *fimage)
+{
+ pr_info("set program method\n");
+
+ fimage->dmethod = m_systemmap;
+
+ pr_info("systemmap program method\n");
+
+ return 0;
+}
+
+static int init_driver(void)
+{
+ firmware_pdev = platform_device_register_simple("fpgaboot", -1,
+ NULL, 0);
+ return PTR_ERR_OR_ZERO(firmware_pdev);
+}
+
+static void finish_driver(void)
+{
+ platform_device_unregister(firmware_pdev);
+}
+
+static int gs_fpgaboot(void)
+{
+ int err;
+ struct fpgaimage *fimage;
+
+ fimage = kmalloc(sizeof(struct fpgaimage), GFP_KERNEL);
+ if (fimage == NULL) {
+ pr_err("No memory is available\n");
+ goto err_out;
+ }
+
+ err = gs_load_image(fimage, file);
+ if (err) {
+ pr_err("gs_load_image error\n");
+ goto err_out1;
+ }
+
+ err = gs_read_image(fimage);
+ if (err) {
+ pr_err("gs_read_image error\n");
+ goto err_out2;
+ }
+
+ err = gs_set_download_method(fimage);
+ if (err) {
+ pr_err("gs_set_download_method error\n");
+ goto err_out2;
+ }
+
+ err = gs_download_image(fimage, bus_2byte);
+ if (err) {
+ pr_err("gs_download_image error\n");
+ goto err_out2;
+ }
+
+ err = gs_release_image(fimage);
+ if (err) {
+ pr_err("gs_release_image error\n");
+ goto err_out1;
+ }
+
+ kfree(fimage);
+ return 0;
+
+err_out2:
+ err = gs_release_image(fimage);
+ if (err)
+ pr_err("gs_release_image error\n");
+err_out1:
+ kfree(fimage);
+
+err_out:
+ return -1;
+
+}
+
+static int __init gs_fpgaboot_init(void)
+{
+ int err, r;
+
+ r = -1;
+
+ pr_info("FPGA DOWNLOAD --->\n");
+ pr_info("built at %s UTC\n", __TIMESTAMP__);
+
+ pr_info("FPGA image file name: %s\n", file);
+
+ err = init_driver();
+ if (err != 0) {
+ pr_err("FPGA DRIVER INIT FAIL!!\n");
+ return r;
+ }
+
+ err = xl_init_io();
+ if (err) {
+ pr_err("GPIO INIT FAIL!!\n");
+ r = -1;
+ goto errout;
+ }
+
+ err = gs_fpgaboot();
+ if (err) {
+ pr_err("FPGA DOWNLOAD FAIL!!\n");
+ r = -1;
+ goto errout;
+ }
+
+ pr_info("FPGA DOWNLOAD DONE <---\n");
+
+ r = 0;
+ return r;
+
+errout:
+ finish_driver();
+
+ return r;
+}
+
+static void __exit gs_fpgaboot_exit(void)
+{
+ finish_driver();
+ pr_info("FPGA image download module removed\n");
+}
+
+module_init(gs_fpgaboot_init);
+module_exit(gs_fpgaboot_exit);
+
+MODULE_AUTHOR("Insop Song");
+MODULE_DESCRIPTION("Xlinix FPGA firmware download");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
new file mode 100644
index 000000000000..f41f4cc798cc
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
@@ -0,0 +1,56 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/firmware.h>
+
+#define MAX_STR 256
+
+enum fmt_image {
+ f_bit, /* only bitstream is supported */
+ f_rbt,
+ f_bin,
+ f_mcs,
+ f_hex,
+};
+
+enum mdownload {
+ m_systemmap, /* only system map is supported */
+ m_serial,
+ m_jtag,
+};
+
+/*
+ * xilinx fpgaimage information
+ * NOTE: use MAX_STR instead of dynamic alloc for simplicity
+ */
+struct fpgaimage {
+ enum fmt_image fmt_img;
+ enum mdownload dmethod;
+
+ const struct firmware *fw_entry;
+
+ /*
+ * the followings can be read from bitstream,
+ * but other image format should have as well
+ */
+ char filename[MAX_STR];
+ char part[MAX_STR];
+ char date[MAX_STR];
+ char time[MAX_STR];
+ int32_t lendata;
+ char *fpgadata;
+};
diff --git a/drivers/staging/gs_fpgaboot/io.c b/drivers/staging/gs_fpgaboot/io.c
new file mode 100644
index 000000000000..b7be8e37b8d1
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/io.c
@@ -0,0 +1,294 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+
+#include "io.h"
+
+#ifdef CONFIG_B4860G100
+static struct gpiobus gbus;
+#endif /* CONFIG_B4860G100 */
+
+static inline void byte0_out(unsigned char data);
+static inline void byte1_out(unsigned char data);
+static inline void xl_cclk_b(int32_t i);
+
+
+/* Assert and Deassert CCLK */
+void xl_shift_cclk(int count)
+{
+ int i;
+ for (i = 0; i < count; i++) {
+ xl_cclk_b(1);
+ xl_cclk_b(0);
+ }
+}
+
+int xl_supported_prog_bus_width(enum wbus bus_bytes)
+{
+ switch (bus_bytes) {
+ case bus_1byte:
+ break;
+ case bus_2byte:
+ break;
+ default:
+ pr_err("unsupported program bus width %d\n",
+ bus_bytes);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Serialize byte and clock each bit on target's DIN and CCLK pins */
+void xl_shift_bytes_out(enum wbus bus_byte, unsigned char *pdata)
+{
+ /*
+ * supports 1 and 2 bytes programming mode
+ */
+ if (likely(bus_byte == bus_2byte))
+ byte0_out(pdata[0]);
+
+ byte1_out(pdata[1]);
+ xl_shift_cclk(1);
+}
+
+/*
+ * generic bit swap for xilinx SYSTEMMAP FPGA programming
+ */
+static inline unsigned char bitswap(unsigned char s)
+{
+ unsigned char d;
+ d = (((s&0x80)>>7) | ((s&0x40)>>5) | ((s&0x20)>>3) | ((s&0x10)>>1) |
+ ((s&0x08)<<1) | ((s&0x04)<<3) | ((s&0x02)<<5) | ((s&0x01)<<7));
+ return d;
+}
+
+#ifdef CONFIG_B4860G100
+/*
+ * ======================================================================
+ * board specific configuration
+ */
+
+static inline void mpc85xx_gpio_set_dir(
+ int32_t port,
+ uint32_t mask,
+ uint32_t dir)
+{
+ dir |= (in_be32(gbus.r[port]+GPDIR) & ~mask);
+ out_be32(gbus.r[port]+GPDIR, dir);
+}
+
+static inline void mpc85xx_gpio_set(int32_t port, uint32_t mask, uint32_t val)
+{
+ /* First mask off the unwanted parts of "dir" and "val" */
+ val &= mask;
+
+ /* Now read in the values we're supposed to preserve */
+ val |= (in_be32(gbus.r[port]+GPDAT) & ~mask);
+
+ out_be32(gbus.r[port]+GPDAT, val);
+}
+
+static inline uint32_t mpc85xx_gpio_get(int32_t port, uint32_t mask)
+{
+ /* Read the requested values */
+ return in_be32(gbus.r[port]+GPDAT) & mask;
+}
+
+static inline void mpc85xx_gpio_set_low(int32_t port, uint32_t gpios)
+{
+ mpc85xx_gpio_set(port, gpios, 0x00000000);
+}
+
+static inline void mpc85xx_gpio_set_high(int32_t port, uint32_t gpios)
+{
+ mpc85xx_gpio_set(port, gpios, 0xFFFFFFFF);
+}
+
+static inline void gpio_set_value(int32_t port, uint32_t gpio, uint32_t value)
+{
+ int32_t g;
+ g = 31 - gpio;
+ if (value)
+ mpc85xx_gpio_set_high(port, 1U << g);
+ else
+ mpc85xx_gpio_set_low(port, 1U << g);
+}
+
+static inline int gpio_get_value(int32_t port, uint32_t gpio)
+{
+ int32_t g;
+ g = 31 - gpio;
+ return !!mpc85xx_gpio_get(port, 1U << g);
+}
+
+static inline void xl_cclk_b(int32_t i)
+{
+ gpio_set_value(XL_CCLK_PORT, XL_CCLK_PIN, i);
+}
+
+void xl_program_b(int32_t i)
+{
+ gpio_set_value(XL_PROGN_PORT, XL_PROGN_PIN, i);
+}
+
+void xl_rdwr_b(int32_t i)
+{
+ gpio_set_value(XL_RDWRN_PORT, XL_RDWRN_PIN, i);
+}
+
+void xl_csi_b(int32_t i)
+{
+ gpio_set_value(XL_CSIN_PORT, XL_CSIN_PIN, i);
+}
+
+int xl_get_init_b(void)
+{
+ return gpio_get_value(XL_INITN_PORT, XL_INITN_PIN);
+}
+
+int xl_get_done_b(void)
+{
+ return gpio_get_value(XL_DONE_PORT, XL_DONE_PIN);
+}
+
+
+/* G100 specific bit swap and remmap (to gpio pins) for byte 0 */
+static inline uint32_t bit_remap_byte0(uint32_t s)
+{
+ uint32_t d;
+ d = (((s&0x80)>>7) | ((s&0x40)>>5) | ((s&0x20)>>3) | ((s&0x10)>>1) |
+ ((s&0x08)<<1) | ((s&0x04)<<3) | ((s&0x02)<<6) | ((s&0x01)<<9));
+ return d;
+}
+
+/*
+ * G100 specific MSB, in this order [byte0 | byte1], out
+ */
+static inline void byte0_out(unsigned char data)
+{
+ uint32_t swap32;
+ swap32 = bit_remap_byte0((uint32_t) data) << 8;
+
+ mpc85xx_gpio_set(0, 0x0002BF00, (uint32_t) swap32);
+}
+
+/*
+ * G100 specific LSB, in this order [byte0 | byte1], out
+ */
+static inline void byte1_out(unsigned char data)
+{
+ mpc85xx_gpio_set(0, 0x000000FF, (uint32_t) bitswap(data));
+}
+
+/*
+ * configurable per device type for different I/O config
+ */
+int xl_init_io(void)
+{
+ struct device_node *np;
+ const u32 *p_reg;
+ int reg, cnt;
+
+ cnt = 0;
+ memset(&gbus, 0, sizeof(struct gpiobus));
+ for_each_compatible_node(np, NULL, "fsl,qoriq-gpio") {
+ p_reg = of_get_property(np, "reg", NULL);
+ if (p_reg == NULL)
+ break;
+ reg = (int) *p_reg;
+ gbus.r[cnt] = of_iomap(np, 0);
+
+ if (!gbus.r[cnt]) {
+ pr_err("not findding gpio cell-index %d\n", cnt);
+ return -ENODEV;
+ }
+ cnt++;
+ }
+ mpc85xx_gpio_set_dir(0, 0x0002BFFF, 0x0002BFFF);
+ mpc85xx_gpio_set_dir(1, 0x00240060, 0x00240060);
+
+ gbus.ngpio = cnt;
+
+ return 0;
+}
+
+
+#else /* placeholder for boards with different config */
+
+void xl_program_b(int32_t i)
+{
+ return;
+}
+
+void xl_rdwr_b(int32_t i)
+{
+ return;
+}
+
+void xl_csi_b(int32_t i)
+{
+ return;
+}
+
+int xl_get_init_b(void)
+{
+ return -1;
+}
+
+int xl_get_done_b(void)
+{
+ return -1;
+}
+
+static inline void byte0_out(unsigned char data)
+{
+ return;
+}
+
+static inline void byte1_out(unsigned char data)
+{
+ return;
+}
+
+static inline void xl_cclk_b(int32_t i)
+{
+ return;
+}
+
+/*
+ * configurable per device type for different I/O config
+ */
+int xl_init_io(void)
+{
+ return -1;
+}
+
+#endif /* CONFIG_B4860G100 */
diff --git a/drivers/staging/gs_fpgaboot/io.h b/drivers/staging/gs_fpgaboot/io.h
new file mode 100644
index 000000000000..7b46ac24b74e
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/io.h
@@ -0,0 +1,90 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define GPDIR 0
+#define GPCFG 4 /* open drain or not */
+#define GPDAT 8
+
+/*
+ * gpio port and pin definitions
+ * NOTE: port number starts from 0
+ */
+#define XL_INITN_PORT 1
+#define XL_INITN_PIN 14
+#define XL_RDWRN_PORT 1
+#define XL_RDWRN_PIN 13
+#define XL_CCLK_PORT 1
+#define XL_CCLK_PIN 10
+#define XL_PROGN_PORT 1
+#define XL_PROGN_PIN 25
+#define XL_CSIN_PORT 1
+#define XL_CSIN_PIN 26
+#define XL_DONE_PORT 1
+#define XL_DONE_PIN 27
+
+/*
+ * gpio mapping
+ *
+ XL_config_D0 – gpio1_31
+ Xl_config_d1 – gpio1_30
+ Xl_config_d2 – gpio1_29
+ Xl_config_d3 – gpio1_28
+ Xl_config_d4 – gpio1_27
+ Xl_config_d5 – gpio1_26
+ Xl_config_d6 – gpio1_25
+ Xl_config_d7 – gpio1_24
+ Xl_config_d8 – gpio1_23
+ Xl_config_d9 – gpio1_22
+ Xl_config_d10 – gpio1_21
+ Xl_config_d11 – gpio1_20
+ Xl_config_d12 – gpio1_19
+ Xl_config_d13 – gpio1_18
+ Xl_config_d14 – gpio1_16
+ Xl_config_d15 – gpio1_14
+*
+*/
+
+/*
+ * program bus width in bytes
+ */
+enum wbus {
+ bus_1byte = 1,
+ bus_2byte = 2,
+};
+
+
+#define MAX_WAIT_DONE 10000
+
+
+struct gpiobus {
+ int ngpio;
+ void __iomem *r[4];
+};
+
+int xl_supported_prog_bus_width(enum wbus bus_bytes);
+
+void xl_program_b(int32_t i);
+void xl_rdwr_b(int32_t i);
+void xl_csi_b(int32_t i);
+
+int xl_get_init_b(void);
+int xl_get_done_b(void);
+
+void xl_shift_cclk(int count);
+void xl_shift_bytes_out(enum wbus bus_byte, unsigned char *pdata);
+
+int xl_init_io(void);
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index c9fedb79e3a2..2064839ef2cd 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -652,3 +652,25 @@ error_free:
free(temp);
return ret;
}
+
+read_sysfs_string(const char *filename, const char *basedir, char *str)
+{
+ float ret = 0;
+ FILE *sysfsfp;
+ char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+ if (temp == NULL) {
+ printf("Memory allocation failed");
+ return -ENOMEM;
+ }
+ sprintf(temp, "%s/%s", basedir, filename);
+ sysfsfp = fopen(temp, "r");
+ if (sysfsfp == NULL) {
+ ret = -errno;
+ goto error_free;
+ }
+ fscanf(sysfsfp, "%s\n", str);
+ fclose(sysfsfp);
+error_free:
+ free(temp);
+ return ret;
+}
diff --git a/drivers/staging/iio/Documentation/lsiio.c b/drivers/staging/iio/Documentation/lsiio.c
new file mode 100644
index 000000000000..24ae9694eb41
--- /dev/null
+++ b/drivers/staging/iio/Documentation/lsiio.c
@@ -0,0 +1,157 @@
+/*
+ * Industrial I/O utilities - lsiio.c
+ *
+ * Copyright (c) 2010 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <string.h>
+#include <dirent.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/dir.h>
+#include "iio_utils.h"
+
+
+static enum verbosity {
+ VERBLEVEL_DEFAULT, /* 0 gives lspci behaviour */
+ VERBLEVEL_SENSORS, /* 1 lists sensors */
+} verblevel = VERBLEVEL_DEFAULT;
+
+const char *type_device = "iio:device";
+const char *type_trigger = "trigger";
+
+
+static inline int check_prefix(const char *str, const char *prefix)
+{
+ return strlen(str) > strlen(prefix) &&
+ strncmp(str, prefix, strlen(prefix)) == 0;
+}
+
+static inline int check_postfix(const char *str, const char *postfix)
+{
+ return strlen(str) > strlen(postfix) &&
+ strcmp(str + strlen(str) - strlen(postfix), postfix) == 0;
+}
+
+static int dump_channels(const char *dev_dir_name)
+{
+ DIR *dp;
+ const struct dirent *ent;
+ dp = opendir(dev_dir_name);
+ if (dp == NULL)
+ return -errno;
+ while (ent = readdir(dp), ent != NULL)
+ if (check_prefix(ent->d_name, "in_") &&
+ check_postfix(ent->d_name, "_raw")) {
+ printf(" %-10s\n", ent->d_name);
+ }
+
+ return 0;
+}
+
+static int dump_one_device(const char *dev_dir_name)
+{
+ char name[IIO_MAX_NAME_LENGTH];
+ int dev_idx;
+
+ sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_device),
+ "%i", &dev_idx);
+ read_sysfs_string("name", dev_dir_name, name);
+ printf("Device %03d: %s\n", dev_idx, name);
+
+ if (verblevel >= VERBLEVEL_SENSORS) {
+ int ret = dump_channels(dev_dir_name);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int dump_one_trigger(const char *dev_dir_name)
+{
+ char name[IIO_MAX_NAME_LENGTH];
+ int dev_idx;
+
+ sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_trigger),
+ "%i", &dev_idx);
+ read_sysfs_string("name", dev_dir_name, name);
+ printf("Trigger %03d: %s\n", dev_idx, name);
+ return 0;
+}
+
+static void dump_devices(void)
+{
+ const struct dirent *ent;
+ int number, numstrlen;
+
+ FILE *nameFile;
+ DIR *dp;
+ char thisname[IIO_MAX_NAME_LENGTH];
+ char *filename;
+
+ dp = opendir(iio_dir);
+ if (dp == NULL) {
+ printf("No industrial I/O devices available\n");
+ return;
+ }
+
+ while (ent = readdir(dp), ent != NULL) {
+ if (check_prefix(ent->d_name, type_device)) {
+ char *dev_dir_name;
+ asprintf(&dev_dir_name, "%s%s", iio_dir, ent->d_name);
+ dump_one_device(dev_dir_name);
+ free(dev_dir_name);
+ if (verblevel >= VERBLEVEL_SENSORS)
+ printf("\n");
+ }
+ }
+ rewinddir(dp);
+ while (ent = readdir(dp), ent != NULL) {
+ if (check_prefix(ent->d_name, type_trigger)) {
+ char *dev_dir_name;
+ asprintf(&dev_dir_name, "%s%s", iio_dir, ent->d_name);
+ dump_one_trigger(dev_dir_name);
+ free(dev_dir_name);
+ }
+ }
+ closedir(dp);
+}
+
+int main(int argc, char **argv)
+{
+ int c, err = 0;
+
+ while ((c = getopt(argc, argv, "d:D:v")) != EOF) {
+ switch (c) {
+ case 'v':
+ verblevel++;
+ break;
+
+ case '?':
+ default:
+ err++;
+ break;
+ }
+ }
+ if (err || argc > optind) {
+ fprintf(stderr, "Usage: lsiio [options]...\n"
+ "List industrial I/O devices\n"
+ " -v, --verbose\n"
+ " Increase verbosity (may be given multiple times)\n"
+ );
+ exit(1);
+ }
+
+ dump_devices();
+
+ return 0;
+}
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
index c1016c510dae..b284e5a6cac1 100644
--- a/drivers/staging/iio/accel/sca3000.h
+++ b/drivers/staging/iio/accel/sca3000.h
@@ -65,7 +65,8 @@
#define SCA3000_RING_BUF_ENABLE 0x80
#define SCA3000_RING_BUF_8BIT 0x40
-/* Free fall detection triggers an interrupt if the acceleration
+/*
+ * Free fall detection triggers an interrupt if the acceleration
* is below a threshold for equivalent of 25cm drop
*/
#define SCA3000_FREE_FALL_DETECT 0x10
@@ -73,8 +74,9 @@
#define SCA3000_MEAS_MODE_OP_1 0x01
#define SCA3000_MEAS_MODE_OP_2 0x02
-/* In motion detection mode the accelerations are band pass filtered
- * (aprox 1 - 25Hz) and then a programmable threshold used to trigger
+/*
+ * In motion detection mode the accelerations are band pass filtered
+ * (approx 1 - 25Hz) and then a programmable threshold used to trigger
* and interrupt.
*/
#define SCA3000_MEAS_MODE_MOT_DET 0x03
@@ -99,8 +101,10 @@
#define SCA3000_REG_CTRL_SEL_MD_Y_TH 0x03
#define SCA3000_REG_CTRL_SEL_MD_X_TH 0x04
#define SCA3000_REG_CTRL_SEL_MD_Z_TH 0x05
-/* BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
- will not function */
+/*
+ * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
+ * will not function
+ */
#define SCA3000_REG_CTRL_SEL_OUT_CTRL 0x0B
#define SCA3000_OUT_CTRL_PROT_MASK 0xE0
#define SCA3000_OUT_CTRL_BUF_X_EN 0x10
@@ -109,8 +113,9 @@
#define SCA3000_OUT_CTRL_BUF_DIV_4 0x02
#define SCA3000_OUT_CTRL_BUF_DIV_2 0x01
-/* Control which motion detector interrupts are on.
- * For now only OR combinations are supported.x
+/*
+ * Control which motion detector interrupts are on.
+ * For now only OR combinations are supported.
*/
#define SCA3000_MD_CTRL_PROT_MASK 0xC0
#define SCA3000_MD_CTRL_OR_Y 0x01
@@ -121,7 +126,8 @@
#define SCA3000_MD_CTRL_AND_X 0x10
#define SAC3000_MD_CTRL_AND_Z 0x20
-/* Some control registers of complex access methods requiring this register to
+/*
+ * Some control registers of complex access methods requiring this register to
* be used to remove a lock.
*/
#define SCA3000_REG_ADDR_UNLOCK 0x1e
@@ -139,7 +145,8 @@
/* Values of multiplexed registers (write to ctrl_data after select) */
#define SCA3000_REG_ADDR_CTRL_DATA 0x22
-/* Measurement modes available on some sca3000 series chips. Code assumes others
+/*
+ * Measurement modes available on some sca3000 series chips. Code assumes others
* may become available in the future.
*
* Bypass - Bypass the low-pass filter in the signal channel so as to increase
@@ -160,7 +167,6 @@
* struct sca3000_state - device instance state information
* @us: the associated spi device
* @info: chip variant information
- * @indio_dev: device information used by the IIO core
* @interrupt_handler_ws: event interrupt handler for all events
* @last_timestamp: the timestamp of the last event
* @mo_det_use_count: reference counter for the motion detection unit
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index 7f6ccdfaf168..ed30e32e60de 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -32,7 +32,8 @@ enum sca3000_variant {
e05,
};
-/* Note where option modes are not defined, the chip simply does not
+/*
+ * Note where option modes are not defined, the chip simply does not
* support any.
* Other chips in the sca3000 series use i2c and are not included here.
*
@@ -191,7 +192,6 @@ error_ret:
return ret;
}
-/* Crucial that lock is called before calling this */
/**
* sca3000_read_ctrl_reg() read from lock protected control register.
*
@@ -250,9 +250,8 @@ error_ret:
}
#endif /* SCA3000_DEBUG */
-
/**
- * sca3000_show_reg() - sysfs interface to read the chip revision number
+ * sca3000_show_rev() - sysfs interface to read the chip revision number
**/
static ssize_t sca3000_show_rev(struct device *dev,
struct device_attribute *attr,
@@ -312,7 +311,7 @@ sca3000_show_available_measurement_modes(struct device *dev,
}
/**
- * sca3000_show_measurmenet_mode() sysfs read of current mode
+ * sca3000_show_measurement_mode() sysfs read of current mode
**/
static ssize_t
sca3000_show_measurement_mode(struct device *dev,
@@ -403,7 +402,8 @@ error_ret:
}
-/* Not even vaguely standard attributes so defined here rather than
+/*
+ * Not even vaguely standard attributes so defined here rather than
* in the relevant IIO core headers
*/
static IIO_DEVICE_ATTR(measurement_mode_available, S_IRUGO,
@@ -450,6 +450,18 @@ static const struct iio_chan_spec sca3000_channels[] = {
SCA3000_CHAN(2, IIO_MOD_Z),
};
+static const struct iio_chan_spec sca3000_channels_with_temp[] = {
+ SCA3000_CHAN(0, IIO_MOD_X),
+ SCA3000_CHAN(1, IIO_MOD_Y),
+ SCA3000_CHAN(2, IIO_MOD_Z),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ },
+};
+
static u8 sca3000_addresses[3][3] = {
[0] = {SCA3000_REG_ADDR_X_MSB, SCA3000_REG_CTRL_SEL_MD_X_TH,
SCA3000_MD_CTRL_OR_X},
@@ -472,19 +484,30 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
mutex_lock(&st->lock);
- if (st->mo_det_use_count) {
- mutex_unlock(&st->lock);
- return -EBUSY;
- }
- address = sca3000_addresses[chan->address][0];
- ret = sca3000_read_data_short(st, address, 2);
- if (ret < 0) {
- mutex_unlock(&st->lock);
- return ret;
+ if (chan->type == IIO_ACCEL) {
+ if (st->mo_det_use_count) {
+ mutex_unlock(&st->lock);
+ return -EBUSY;
+ }
+ address = sca3000_addresses[chan->address][0];
+ ret = sca3000_read_data_short(st, address, 2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+ *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
+ *val = ((*val) << (sizeof(*val)*8 - 13)) >>
+ (sizeof(*val)*8 - 13);
+ } else {
+ /* get the temperature when available */
+ ret = sca3000_read_data_short(st,
+ SCA3000_REG_ADDR_TEMP_MSB, 2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+ *val = ((st->rx[0] & 0x3F) << 3) | ((st->rx[1] & 0xE0) >> 5);
}
- *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
- *val = ((*val) << (sizeof(*val)*8 - 13)) >>
- (sizeof(*val)*8 - 13);
mutex_unlock(&st->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
@@ -494,6 +517,10 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
else /* temperature */
*val2 = 555556;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -214;
+ *val2 = 600000;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -547,7 +574,7 @@ error_ret:
return ret;
}
/**
- * __sca3000_get_base_frequency() obtain mode specific base frequency
+ * __sca3000_get_base_freq() obtain mode specific base frequency
*
* lock must be held
**/
@@ -663,7 +690,8 @@ error_free_lock:
return ret ? ret : len;
}
-/* Should only really be registered if ring buffer support is compiled in.
+/*
+ * Should only really be registered if ring buffer support is compiled in.
* Does no harm however and doing it right would add a fair bit of complexity
*/
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
@@ -672,37 +700,6 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
sca3000_read_frequency,
sca3000_set_frequency);
-
-/**
- * sca3000_read_temp() sysfs interface to get the temperature when available
- *
-* The alignment of data in here is downright odd. See data sheet.
-* Converting this into a meaningful value is left to inline functions in
-* userspace part of header.
-**/
-static ssize_t sca3000_read_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
- int val;
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_TEMP_MSB, 2);
- if (ret < 0)
- goto error_ret;
- val = ((st->rx[0] & 0x3F) << 3) | ((st->rx[1] & 0xE0) >> 5);
-
- return sprintf(buf, "%d\n", val);
-
-error_ret:
- return ret;
-}
-static IIO_DEV_ATTR_TEMP_RAW(sca3000_read_temp);
-
-static IIO_CONST_ATTR_TEMP_SCALE("0.555556");
-static IIO_CONST_ATTR_TEMP_OFFSET("-214.6");
-
/**
* sca3000_read_thresh() - query of a threshold
**/
@@ -782,33 +779,16 @@ static struct attribute *sca3000_attributes[] = {
NULL,
};
-static struct attribute *sca3000_attributes_with_temp[] = {
- &iio_dev_attr_revision.dev_attr.attr,
- &iio_dev_attr_measurement_mode_available.dev_attr.attr,
- &iio_dev_attr_measurement_mode.dev_attr.attr,
- &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- /* Only present if temp sensor is */
- &iio_dev_attr_in_temp_raw.dev_attr.attr,
- &iio_const_attr_in_temp_offset.dev_attr.attr,
- &iio_const_attr_in_temp_scale.dev_attr.attr,
- NULL,
-};
-
static const struct attribute_group sca3000_attribute_group = {
.attrs = sca3000_attributes,
};
-static const struct attribute_group sca3000_attribute_group_with_temp = {
- .attrs = sca3000_attributes_with_temp,
-};
-
-/* RING RELATED interrupt handler */
-/* depending on event, push to the ring buffer event chrdev or the event one */
-
/**
* sca3000_event_handler() - handling ring and non ring events
*
+ * Ring related interrupt handler. Depending on event, push to
+ * the ring buffer event chrdev or the event one.
+ *
* This function is complicated by the fact that the devices can signify ring
* and non ring events via the same interrupt line and they can only
* be distinguished via a read of the relevant status register.
@@ -820,7 +800,8 @@ static irqreturn_t sca3000_event_handler(int irq, void *private)
int ret, val;
s64 last_timestamp = iio_get_time_ns();
- /* Could lead if badly timed to an extra read of status reg,
+ /*
+ * Could lead if badly timed to an extra read of status reg,
* but ensures no interrupt is missed.
*/
mutex_lock(&st->lock);
@@ -935,7 +916,6 @@ static ssize_t sca3000_query_free_fall_mode(struct device *dev,
* the device falls more than 25cm. This has not been tested due
* to fragile wiring.
**/
-
static ssize_t sca3000_set_free_fall_mode(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -957,7 +937,7 @@ static ssize_t sca3000_set_free_fall_mode(struct device *dev,
if (ret)
goto error_ret;
- /*if off and should be on*/
+ /* if off and should be on */
if (val && !(st->rx[0] & protect_mask))
ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
(st->rx[0] | SCA3000_FREE_FALL_DETECT));
@@ -972,7 +952,7 @@ error_ret:
}
/**
- * sca3000_set_mo_det() simple on off control for motion detector
+ * sca3000_write_event_config() simple on off control for motion detector
*
* This is a per axis control, but enabling any will result in the
* motion detector unit being enabled.
@@ -992,13 +972,15 @@ static int sca3000_write_event_config(struct iio_dev *indio_dev,
int num = chan->channel2;
mutex_lock(&st->lock);
- /* First read the motion detector config to find out if
- * this axis is on*/
+ /*
+ * First read the motion detector config to find out if
+ * this axis is on
+ */
ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
if (ret < 0)
goto exit_point;
ctrlval = ret;
- /* Off and should be on */
+ /* if off and should be on */
if (state && !(ctrlval & sca3000_addresses[num][2])) {
ret = sca3000_write_ctrl_reg(st,
SCA3000_REG_CTRL_SEL_MD_CTRL,
@@ -1021,7 +1003,7 @@ static int sca3000_write_event_config(struct iio_dev *indio_dev,
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
if (ret)
goto exit_point;
- /*if off and should be on*/
+ /* if off and should be on */
if ((st->mo_det_use_count)
&& ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET))
ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
@@ -1067,7 +1049,7 @@ static struct attribute_group sca3000_event_attribute_group = {
* Devices use flash memory to store many of the register values
* and hence can come up in somewhat unpredictable states.
* Hence reset everything on driver load.
- **/
+ **/
static int sca3000_clean_setup(struct sca3000_state *st)
{
int ret;
@@ -1107,9 +1089,11 @@ static int sca3000_clean_setup(struct sca3000_state *st)
| SCA3000_INT_MASK_ACTIVE_LOW);
if (ret)
goto error_ret;
- /* Select normal measurement mode, free fall off, ring off */
- /* Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
- * as that occurs in one of the example on the datasheet */
+ /*
+ * Select normal measurement mode, free fall off, ring off
+ * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
+ * as that occurs in one of the example on the datasheet
+ */
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
if (ret)
goto error_ret;
@@ -1133,16 +1117,6 @@ static const struct iio_info sca3000_info = {
.driver_module = THIS_MODULE,
};
-static const struct iio_info sca3000_info_with_temp = {
- .attrs = &sca3000_attribute_group_with_temp,
- .read_raw = &sca3000_read_raw,
- .read_event_value = &sca3000_read_thresh,
- .write_event_value = &sca3000_write_thresh,
- .read_event_config = &sca3000_read_event_config,
- .write_event_config = &sca3000_write_event_config,
- .driver_module = THIS_MODULE,
-};
-
static int sca3000_probe(struct spi_device *spi)
{
int ret;
@@ -1162,10 +1136,12 @@ static int sca3000_probe(struct spi_device *spi)
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
- if (st->info->temp_output)
- indio_dev->info = &sca3000_info_with_temp;
- else {
- indio_dev->info = &sca3000_info;
+ indio_dev->info = &sca3000_info;
+ if (st->info->temp_output) {
+ indio_dev->channels = sca3000_channels_with_temp;
+ indio_dev->num_channels =
+ ARRAY_SIZE(sca3000_channels_with_temp);
+ } else {
indio_dev->channels = sca3000_channels;
indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
}
@@ -1236,7 +1212,7 @@ static int sca3000_remove(struct spi_device *spi)
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct sca3000_state *st = iio_priv(indio_dev);
- /* Must ensure no interrupts can be generated after this!*/
+ /* Must ensure no interrupts can be generated after this! */
sca3000_stop_all_interrupts(st);
if (spi->irq)
free_irq(spi->irq, indio_dev);
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index ea0af6d81d2b..198710651e0e 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -309,7 +309,7 @@ int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
if (ret)
goto error_ret;
if (state) {
- printk(KERN_INFO "supposedly enabling ring buffer\n");
+ dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
ret = sca3000_write_reg(st,
SCA3000_REG_ADDR_MODE,
(st->rx[0] | SCA3000_RING_BUF_ENABLE));
diff --git a/drivers/staging/iio/adc/ad799x.h b/drivers/staging/iio/adc/ad799x.h
index a591aa6feae1..fc8c85298feb 100644
--- a/drivers/staging/iio/adc/ad799x.h
+++ b/drivers/staging/iio/adc/ad799x.h
@@ -95,7 +95,7 @@ struct ad799x_state {
struct i2c_client *client;
const struct ad799x_chip_info *chip_info;
struct regulator *reg;
- u16 int_vref_mv;
+ struct regulator *vref;
unsigned id;
u16 config;
@@ -103,14 +103,6 @@ struct ad799x_state {
unsigned int transfer_size;
};
-/*
- * TODO: struct ad799x_platform_data needs to go into include/linux/iio
- */
-
-struct ad799x_platform_data {
- u16 vref_mv;
-};
-
#ifdef CONFIG_AD799X_RING_BUFFER
int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad799x_ring_cleanup(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 5708ffc62aec..979ec77d6c2d 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -1,6 +1,6 @@
/*
* iio/adc/ad799x.c
- * Copyright (C) 2010-1011 Michael Hennerich, Analog Devices Inc.
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
*
* based on iio/adc/max1363
* Copyright (C) 2008-2010 Jonathan Cameron
@@ -179,7 +179,10 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
RES_MASK(chan->scan_type.realbits);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = st->int_vref_mv;
+ ret = regulator_get_voltage(st->vref);
+ if (ret < 0)
+ return ret;
+ *val = ret / 1000;
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
}
@@ -533,7 +536,6 @@ static int ad799x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret;
- struct ad799x_platform_data *pdata = client->dev.platform_data;
struct ad799x_state *st;
struct iio_dev *indio_dev;
@@ -551,17 +553,21 @@ static int ad799x_probe(struct i2c_client *client,
/* TODO: Add pdata options for filtering and bit delay */
- if (!pdata)
- return -EINVAL;
-
- st->int_vref_mv = pdata->vref_mv;
-
st->reg = devm_regulator_get(&client->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ st->vref = devm_regulator_get(&client->dev, "vref");
+ if (IS_ERR(st->vref)) {
+ ret = PTR_ERR(st->vref);
+ goto error_disable_reg;
}
+ ret = regulator_enable(st->vref);
+ if (ret)
+ goto error_disable_reg;
+
st->client = client;
indio_dev->dev.parent = &client->dev;
@@ -577,28 +583,28 @@ static int ad799x_probe(struct i2c_client *client,
goto error_disable_reg;
if (client->irq > 0) {
- ret = request_threaded_irq(client->irq,
- NULL,
- ad799x_event_handler,
- IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- client->name,
- indio_dev);
+ ret = devm_request_threaded_irq(&client->dev,
+ client->irq,
+ NULL,
+ ad799x_event_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ client->name,
+ indio_dev);
if (ret)
goto error_cleanup_ring;
}
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_irq;
+ goto error_cleanup_ring;
return 0;
-error_free_irq:
- if (client->irq > 0)
- free_irq(client->irq, indio_dev);
error_cleanup_ring:
ad799x_ring_cleanup(indio_dev);
error_disable_reg:
+ if (!IS_ERR(st->vref))
+ regulator_disable(st->vref);
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
@@ -611,10 +617,10 @@ static int ad799x_remove(struct i2c_client *client)
struct ad799x_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (client->irq > 0)
- free_irq(client->irq, indio_dev);
ad799x_ring_cleanup(indio_dev);
+ if (!IS_ERR(st->vref))
+ regulator_disable(st->vref);
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
kfree(st->rx_buf);
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 514844efac75..11fb95201545 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -847,7 +847,8 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
/* Clean the slot's previous content, then set new one. */
- mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0), LRADC_CTRL4);
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0),
+ LRADC_CTRL4);
mxs_lradc_reg_set(lradc, chan, LRADC_CTRL4);
mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(0));
@@ -898,10 +899,6 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
{
struct mxs_lradc *lradc = iio_priv(iio_dev);
- /* Check for invalid channel */
- if (chan->channel > LRADC_MAX_TOTAL_CHANS)
- return -EINVAL;
-
switch (m) {
case IIO_CHAN_INFO_RAW:
if (chan->type == IIO_TEMP)
@@ -1173,7 +1170,8 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
else if (reg & LRADC_CTRL1_LRADC_IRQ(0))
complete(&lradc->completion);
- mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc), LRADC_CTRL1);
+ mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc),
+ LRADC_CTRL1);
return IRQ_HANDLED;
}
@@ -1264,7 +1262,8 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
uint32_t ctrl1_irq = 0;
const uint32_t chan_value = LRADC_CH_ACCUMULATE |
((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
- const int len = bitmap_weight(iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS);
+ const int len = bitmap_weight(iio->active_scan_mask,
+ LRADC_MAX_TOTAL_CHANS);
if (!len)
return -EINVAL;
@@ -1563,7 +1562,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
for (i = 0; i < of_cfg->irq_count; i++) {
lradc->irq[i] = platform_get_irq(pdev, i);
if (lradc->irq[i] < 0)
- return -EINVAL;
+ return lradc->irq[i];
ret = devm_request_irq(dev, lradc->irq[i],
mxs_lradc_handle_irq, 0,
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 16a8201228ff..9f0ebb329008 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -859,11 +859,14 @@ static ssize_t adt7316_show_DAC_update_mode(struct device *dev,
else {
switch (chip->dac_config & ADT7316_DA_EN_MODE_MASK) {
case ADT7316_DA_EN_MODE_SINGLE:
- return sprintf(buf, "0 - auto at any MSB DAC writing\n");
+ return sprintf(buf,
+ "0 - auto at any MSB DAC writing\n");
case ADT7316_DA_EN_MODE_AB_CD:
- return sprintf(buf, "1 - auto at MSB DAC AB and CD writing\n");
+ return sprintf(buf,
+ "1 - auto at MSB DAC AB and CD writing\n");
case ADT7316_DA_EN_MODE_ABCD:
- return sprintf(buf, "2 - auto at MSB DAC ABCD writing\n");
+ return sprintf(buf,
+ "2 - auto at MSB DAC ABCD writing\n");
default: /* ADT7316_DA_EN_MODE_LDAC */
return sprintf(buf, "3 - manual\n");
}
@@ -1102,7 +1105,8 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
ldac_config = chip->ldac_config | ADT7316_DAC_IN_VREF;
}
- ret = chip->bus.write(chip->bus.client, ADT7316_LDAC_CONFIG, ldac_config);
+ ret = chip->bus.write(chip->bus.client, ADT7316_LDAC_CONFIG,
+ ldac_config);
if (ret)
return -EIO;
@@ -1224,7 +1228,8 @@ static ssize_t adt7316_show_ex_temp_AIN1(struct device *dev,
return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_EX, buf);
}
-static IIO_DEVICE_ATTR(ex_temp_AIN1, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0);
+static IIO_DEVICE_ATTR(ex_temp_AIN1, S_IRUGO, adt7316_show_ex_temp_AIN1,
+ NULL, 0);
static IIO_DEVICE_ATTR(ex_temp, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0);
static ssize_t adt7316_show_AIN2(struct device *dev,
@@ -1319,7 +1324,8 @@ static ssize_t adt7316_store_in_temp_offset(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return adt7316_store_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf, len);
+ return adt7316_store_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf,
+ len);
}
static IIO_DEVICE_ATTR(in_temp_offset, S_IRUGO | S_IWUSR,
@@ -1344,7 +1350,8 @@ static ssize_t adt7316_store_ex_temp_offset(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return adt7316_store_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf, len);
+ return adt7316_store_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf,
+ len);
}
static IIO_DEVICE_ATTR(ex_temp_offset, S_IRUGO | S_IWUSR,
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
index 2dbfb499528d..ec50bf34628d 100644
--- a/drivers/staging/iio/addac/adt7316.h
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -18,10 +18,10 @@ struct adt7316_bus {
void *client;
int irq;
int irq_flags;
- int (*read) (void *client, u8 reg, u8 *data);
- int (*write) (void *client, u8 reg, u8 val);
- int (*multi_read) (void *client, u8 first_reg, u8 count, u8 *data);
- int (*multi_write) (void *client, u8 first_reg, u8 count, u8 *data);
+ int (*read)(void *client, u8 reg, u8 *data);
+ int (*write)(void *client, u8 reg, u8 val);
+ int (*multi_read)(void *client, u8 first_reg, u8 count, u8 *data);
+ int (*multi_write)(void *client, u8 first_reg, u8 count, u8 *data);
};
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index f8c659568c38..0a60def92735 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -433,7 +433,7 @@ static int taos_chip_on(struct iio_dev *indio_dev)
TSL258X_CMD_REG | TSL258X_CNTRL, utmp);
if (ret < 0) {
dev_err(&chip->client->dev, "taos_chip_on failed on CNTRL reg.\n");
- return -1;
+ return ret;
}
/* Use the following shadow copy for our delay before enabling ADC.
@@ -445,7 +445,7 @@ static int taos_chip_on(struct iio_dev *indio_dev)
if (ret < 0) {
dev_err(&chip->client->dev,
"taos_chip_on failed on reg %d.\n", i);
- return -1;
+ return ret;
}
}
@@ -458,7 +458,7 @@ static int taos_chip_on(struct iio_dev *indio_dev)
utmp);
if (ret < 0) {
dev_err(&chip->client->dev, "taos_chip_on failed on 2nd CTRL reg.\n");
- return -1;
+ return ret;
}
chip->taos_chip_status = TSL258X_CHIP_WORKING;
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 1e538086d48b..9e0f2a9c73ae 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -352,7 +352,7 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
/* device is not enabled */
dev_err(&chip->client->dev, "%s: device is not enabled\n",
__func__);
- ret = -EBUSY ;
+ ret = -EBUSY;
goto out_unlock;
}
@@ -1507,16 +1507,16 @@ static int tsl2x7x_device_id(unsigned char *id, int target)
case tsl2571:
case tsl2671:
case tsl2771:
- return ((*id & 0xf0) == TRITON_ID);
+ return (*id & 0xf0) == TRITON_ID;
case tmd2671:
case tmd2771:
- return ((*id & 0xf0) == HALIBUT_ID);
+ return (*id & 0xf0) == HALIBUT_ID;
case tsl2572:
case tsl2672:
case tmd2672:
case tsl2772:
case tmd2772:
- return ((*id & 0xf0) == SWORDFISH_ID);
+ return (*id & 0xf0) == SWORDFISH_ID;
}
return -EINVAL;
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 6966d5f76648..7fbaba41c872 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -312,7 +312,7 @@ static ssize_t ad2s1210_store_control(struct device *dev,
if (st->pdata->gpioin) {
data = ad2s1210_read_resolution_pin(st);
if (data != st->resolution)
- pr_warning("ad2s1210: resolution settings not match\n");
+ pr_warn("ad2s1210: resolution settings not match\n");
} else
ad2s1210_set_resolution_pin(st);
@@ -372,7 +372,7 @@ static ssize_t ad2s1210_store_resolution(struct device *dev,
if (st->pdata->gpioin) {
data = ad2s1210_read_resolution_pin(st);
if (data != st->resolution)
- pr_warning("ad2s1210: resolution settings not match\n");
+ pr_warn("ad2s1210: resolution settings not match\n");
} else
ad2s1210_set_resolution_pin(st);
ret = len;
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 78319ad176cd..c6e8ba7b3e4e 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -20,6 +20,7 @@ config DRM_IMX_FB_HELPER
config DRM_IMX_PARALLEL_DISPLAY
tristate "Support for parallel displays"
+ select DRM_PANEL
depends on DRM_IMX
select VIDEOMODE_HELPERS
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile
index 4677585b5ad5..129e3a3f59f1 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/staging/imx-drm/Makefile
@@ -1,12 +1,11 @@
-imxdrm-objs := imx-drm-core.o imx-fb.o
+imxdrm-objs := imx-drm-core.o
obj-$(CONFIG_DRM_IMX) += imxdrm.o
obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
-obj-$(CONFIG_DRM_IMX_FB_HELPER) += imx-fbdev.o
obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/
imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
diff --git a/drivers/staging/imx-drm/TODO b/drivers/staging/imx-drm/TODO
index 6a9da94c9573..29636fb13959 100644
--- a/drivers/staging/imx-drm/TODO
+++ b/drivers/staging/imx-drm/TODO
@@ -1,15 +1,10 @@
TODO:
- get DRM Maintainer review for this code
-- Wait for common display framework to hit mainline and update the IPU
- driver to use it. This will most probably make changes to the devicetree
- bindings necessary.
-- Factor out more code to common helper functions
- decide where to put the base driver. It is not specific to a subsystem
and would be used by DRM/KMS and media/V4L2
Missing features (not necessarily for moving out of staging):
-- Add i.MX6 HDMI support
- Add support for IC (Image converter)
- Add support for CSI (CMOS Sensor interface)
- Add support for VDIC (Video Deinterlacer)
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 236ed66f116a..4144a75e5f71 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -13,14 +13,15 @@
* GNU General Public License for more details.
*
*/
-
+#include <linux/component.h>
#include <linux/device.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <linux/fb.h>
-#include <linux/module.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
@@ -28,45 +29,29 @@
#define MAX_CRTC 4
-struct crtc_cookie {
- void *cookie;
- int id;
+struct imx_drm_crtc;
+
+struct imx_drm_component {
+ struct device_node *of_node;
struct list_head list;
};
struct imx_drm_device {
struct drm_device *drm;
- struct device *dev;
- struct list_head crtc_list;
- struct list_head encoder_list;
- struct list_head connector_list;
- struct mutex mutex;
+ struct imx_drm_crtc *crtc[MAX_CRTC];
int pipes;
struct drm_fbdev_cma *fbhelper;
};
struct imx_drm_crtc {
struct drm_crtc *crtc;
- struct list_head list;
- struct imx_drm_device *imxdrm;
int pipe;
struct imx_drm_crtc_helper_funcs imx_drm_helper_funcs;
- struct module *owner;
- struct crtc_cookie cookie;
-};
-
-struct imx_drm_encoder {
- struct drm_encoder *encoder;
- struct list_head list;
- struct module *owner;
- struct list_head possible_crtcs;
+ struct device_node *port;
};
-struct imx_drm_connector {
- struct drm_connector *connector;
- struct list_head list;
- struct module *owner;
-};
+static int legacyfb_depth = 16;
+module_param(legacyfb_depth, int, 0444);
int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
{
@@ -76,69 +61,71 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_id);
static void imx_drm_driver_lastclose(struct drm_device *drm)
{
+#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
struct imx_drm_device *imxdrm = drm->dev_private;
if (imxdrm->fbhelper)
drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
+#endif
}
static int imx_drm_driver_unload(struct drm_device *drm)
{
+#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
struct imx_drm_device *imxdrm = drm->dev_private;
+#endif
- imx_drm_device_put();
+ drm_kms_helper_poll_fini(drm);
+
+#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
+ if (imxdrm->fbhelper)
+ drm_fbdev_cma_fini(imxdrm->fbhelper);
+#endif
+
+ component_unbind_all(drm->dev, drm);
drm_vblank_cleanup(drm);
- drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
return 0;
}
-/*
- * We don't care at all for crtc numbers, but the core expects the
- * crtcs to be numbered
- */
-static struct imx_drm_crtc *imx_drm_crtc_by_num(struct imx_drm_device *imxdrm,
- int num)
+static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
{
- struct imx_drm_crtc *imx_drm_crtc;
+ struct imx_drm_device *imxdrm = crtc->dev->dev_private;
+ unsigned i;
+
+ for (i = 0; i < MAX_CRTC; i++)
+ if (imxdrm->crtc[i] && imxdrm->crtc[i]->crtc == crtc)
+ return imxdrm->crtc[i];
- list_for_each_entry(imx_drm_crtc, &imxdrm->crtc_list, list)
- if (imx_drm_crtc->pipe == num)
- return imx_drm_crtc;
return NULL;
}
-int imx_drm_crtc_panel_format_pins(struct drm_crtc *crtc, u32 encoder_type,
+int imx_drm_panel_format_pins(struct drm_encoder *encoder,
u32 interface_pix_fmt, int hsync_pin, int vsync_pin)
{
- struct imx_drm_device *imxdrm = crtc->dev->dev_private;
- struct imx_drm_crtc *imx_crtc;
struct imx_drm_crtc_helper_funcs *helper;
+ struct imx_drm_crtc *imx_crtc;
- list_for_each_entry(imx_crtc, &imxdrm->crtc_list, list)
- if (imx_crtc->crtc == crtc)
- goto found;
+ imx_crtc = imx_drm_find_crtc(encoder->crtc);
+ if (!imx_crtc)
+ return -EINVAL;
- return -EINVAL;
-found:
helper = &imx_crtc->imx_drm_helper_funcs;
if (helper->set_interface_pix_fmt)
- return helper->set_interface_pix_fmt(crtc,
- encoder_type, interface_pix_fmt,
+ return helper->set_interface_pix_fmt(encoder->crtc,
+ encoder->encoder_type, interface_pix_fmt,
hsync_pin, vsync_pin);
return 0;
}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format_pins);
+EXPORT_SYMBOL_GPL(imx_drm_panel_format_pins);
-int imx_drm_crtc_panel_format(struct drm_crtc *crtc, u32 encoder_type,
- u32 interface_pix_fmt)
+int imx_drm_panel_format(struct drm_encoder *encoder, u32 interface_pix_fmt)
{
- return imx_drm_crtc_panel_format_pins(crtc, encoder_type,
- interface_pix_fmt, 2, 3);
+ return imx_drm_panel_format_pins(encoder, interface_pix_fmt, 2, 3);
}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format);
+EXPORT_SYMBOL_GPL(imx_drm_panel_format);
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
@@ -161,10 +148,9 @@ EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
static int imx_drm_enable_vblank(struct drm_device *drm, int crtc)
{
struct imx_drm_device *imxdrm = drm->dev_private;
- struct imx_drm_crtc *imx_drm_crtc;
+ struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[crtc];
int ret;
- imx_drm_crtc = imx_drm_crtc_by_num(imxdrm, crtc);
if (!imx_drm_crtc)
return -EINVAL;
@@ -180,9 +166,8 @@ static int imx_drm_enable_vblank(struct drm_device *drm, int crtc)
static void imx_drm_disable_vblank(struct drm_device *drm, int crtc)
{
struct imx_drm_device *imxdrm = drm->dev_private;
- struct imx_drm_crtc *imx_drm_crtc;
+ struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[crtc];
- imx_drm_crtc = imx_drm_crtc_by_num(imxdrm, crtc);
if (!imx_drm_crtc)
return;
@@ -215,172 +200,54 @@ static const struct file_operations imx_drm_driver_fops = {
.llseek = noop_llseek,
};
-static struct imx_drm_device *imx_drm_device;
-
-static struct imx_drm_device *__imx_drm_device(void)
+int imx_drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- return imx_drm_device;
+ return MODE_OK;
}
+EXPORT_SYMBOL(imx_drm_connector_mode_valid);
-struct drm_device *imx_drm_device_get(void)
+void imx_drm_connector_destroy(struct drm_connector *connector)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct imx_drm_encoder *enc;
- struct imx_drm_connector *con;
- struct imx_drm_crtc *crtc;
-
- list_for_each_entry(enc, &imxdrm->encoder_list, list) {
- if (!try_module_get(enc->owner)) {
- dev_err(imxdrm->dev, "could not get module %s\n",
- module_name(enc->owner));
- goto unwind_enc;
- }
- }
-
- list_for_each_entry(con, &imxdrm->connector_list, list) {
- if (!try_module_get(con->owner)) {
- dev_err(imxdrm->dev, "could not get module %s\n",
- module_name(con->owner));
- goto unwind_con;
- }
- }
-
- list_for_each_entry(crtc, &imxdrm->crtc_list, list) {
- if (!try_module_get(crtc->owner)) {
- dev_err(imxdrm->dev, "could not get module %s\n",
- module_name(crtc->owner));
- goto unwind_crtc;
- }
- }
-
- return imxdrm->drm;
-
-unwind_crtc:
- list_for_each_entry_continue_reverse(crtc, &imxdrm->crtc_list, list)
- module_put(crtc->owner);
-unwind_con:
- list_for_each_entry_continue_reverse(con, &imxdrm->connector_list, list)
- module_put(con->owner);
-unwind_enc:
- list_for_each_entry_continue_reverse(enc, &imxdrm->encoder_list, list)
- module_put(enc->owner);
-
- mutex_unlock(&imxdrm->mutex);
-
- return NULL;
-
-}
-EXPORT_SYMBOL_GPL(imx_drm_device_get);
-
-void imx_drm_device_put(void)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct imx_drm_encoder *enc;
- struct imx_drm_connector *con;
- struct imx_drm_crtc *crtc;
-
- mutex_lock(&imxdrm->mutex);
-
- list_for_each_entry(crtc, &imxdrm->crtc_list, list)
- module_put(crtc->owner);
-
- list_for_each_entry(con, &imxdrm->connector_list, list)
- module_put(con->owner);
-
- list_for_each_entry(enc, &imxdrm->encoder_list, list)
- module_put(enc->owner);
-
- mutex_unlock(&imxdrm->mutex);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
}
-EXPORT_SYMBOL_GPL(imx_drm_device_put);
+EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
-static int drm_mode_group_reinit(struct drm_device *dev)
+void imx_drm_encoder_destroy(struct drm_encoder *encoder)
{
- struct drm_mode_group *group = &dev->primary->mode_group;
- uint32_t *id_list = group->id_list;
- int ret;
-
- ret = drm_mode_group_init_legacy_group(dev, group);
- if (ret < 0)
- return ret;
-
- kfree(id_list);
- return 0;
-}
-
-/*
- * register an encoder to the drm core
- */
-static int imx_drm_encoder_register(struct imx_drm_encoder *imx_drm_encoder)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
-
- INIT_LIST_HEAD(&imx_drm_encoder->possible_crtcs);
-
- drm_encoder_init(imxdrm->drm, imx_drm_encoder->encoder,
- imx_drm_encoder->encoder->funcs,
- imx_drm_encoder->encoder->encoder_type);
-
- drm_mode_group_reinit(imxdrm->drm);
-
- return 0;
-}
-
-/*
- * unregister an encoder from the drm core
- */
-static void imx_drm_encoder_unregister(struct imx_drm_encoder
- *imx_drm_encoder)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
-
- drm_encoder_cleanup(imx_drm_encoder->encoder);
-
- drm_mode_group_reinit(imxdrm->drm);
+ drm_encoder_cleanup(encoder);
}
+EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy);
-/*
- * register a connector to the drm core
- */
-static int imx_drm_connector_register(
- struct imx_drm_connector *imx_drm_connector)
+static void imx_drm_output_poll_changed(struct drm_device *drm)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
-
- drm_connector_init(imxdrm->drm, imx_drm_connector->connector,
- imx_drm_connector->connector->funcs,
- imx_drm_connector->connector->connector_type);
- drm_mode_group_reinit(imxdrm->drm);
+#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
+ struct imx_drm_device *imxdrm = drm->dev_private;
- return drm_sysfs_connector_add(imx_drm_connector->connector);
+ drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
+#endif
}
-/*
- * unregister a connector from the drm core
- */
-static void imx_drm_connector_unregister(
- struct imx_drm_connector *imx_drm_connector)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
-
- drm_sysfs_connector_remove(imx_drm_connector->connector);
- drm_connector_cleanup(imx_drm_connector->connector);
-
- drm_mode_group_reinit(imxdrm->drm);
-}
+static struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = imx_drm_output_poll_changed,
+};
/*
- * Called by the CRTC driver when all CRTCs are registered. This
- * puts all the pieces together and initializes the driver.
- * Once this is called no more CRTCs can be registered since
- * the drm core has hardcoded the number of crtcs in several
- * places.
+ * Main DRM initialisation. This binds, initialises and registers
+ * with DRM the subcomponents of the driver.
*/
static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
+ struct imx_drm_device *imxdrm;
+ struct drm_connector *connector;
int ret;
+ imxdrm = devm_kzalloc(drm->dev, sizeof(*imxdrm), GFP_KERNEL);
+ if (!imxdrm)
+ return -ENOMEM;
+
imxdrm->drm = drm;
drm->dev_private = imxdrm;
@@ -396,120 +263,118 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
*/
drm->irq_enabled = true;
- drm_mode_config_init(drm);
- imx_drm_mode_config_init(drm);
-
- mutex_lock(&imxdrm->mutex);
-
- drm_kms_helper_poll_init(drm);
+ /*
+ * set max width and height as default value(4096x4096).
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ drm->mode_config.min_width = 64;
+ drm->mode_config.min_height = 64;
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+ drm->mode_config.funcs = &imx_drm_mode_config_funcs;
- /* setup the grouping for the legacy output */
- ret = drm_mode_group_init_legacy_group(drm,
- &drm->primary->mode_group);
- if (ret)
- goto err_kms;
+ drm_mode_config_init(drm);
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret)
goto err_kms;
/*
- * with vblank_disable_allowed = true, vblank interrupt will be disabled
- * by drm timer once a current process gives up ownership of
- * vblank event.(after drm_vblank_put function is called)
+ * with vblank_disable_allowed = true, vblank interrupt will be
+ * disabled by drm timer once a current process gives up ownership
+ * of vblank event. (after drm_vblank_put function is called)
*/
drm->vblank_disable_allowed = true;
- if (!imx_drm_device_get()) {
- ret = -EINVAL;
+ platform_set_drvdata(drm->platformdev, drm);
+
+ /* Now try and bind all our sub-components */
+ ret = component_bind_all(drm->dev, drm);
+ if (ret)
goto err_vblank;
+
+ /*
+ * All components are now added, we can publish the connector sysfs
+ * entries to userspace. This will generate hotplug events and so
+ * userspace will expect to be able to access DRM at this point.
+ */
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ ret = drm_sysfs_connector_add(connector);
+ if (ret) {
+ dev_err(drm->dev,
+ "[CONNECTOR:%d:%s] drm_sysfs_connector_add failed: %d\n",
+ connector->base.id,
+ drm_get_connector_name(connector), ret);
+ goto err_unbind;
+ }
}
- platform_set_drvdata(drm->platformdev, drm);
- mutex_unlock(&imxdrm->mutex);
+ /*
+ * All components are now initialised, so setup the fb helper.
+ * The fb helper takes copies of key hardware information, so the
+ * crtcs/connectors/encoders must not change after this point.
+ */
+#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
+ if (legacyfb_depth != 16 && legacyfb_depth != 32) {
+ dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
+ legacyfb_depth = 16;
+ }
+ imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
+ drm->mode_config.num_crtc, MAX_CRTC);
+ if (IS_ERR(imxdrm->fbhelper)) {
+ ret = PTR_ERR(imxdrm->fbhelper);
+ imxdrm->fbhelper = NULL;
+ goto err_unbind;
+ }
+#endif
+
+ drm_kms_helper_poll_init(drm);
+
return 0;
+err_unbind:
+ component_unbind_all(drm->dev, drm);
err_vblank:
drm_vblank_cleanup(drm);
err_kms:
- drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
- mutex_unlock(&imxdrm->mutex);
return ret;
}
-static void imx_drm_update_possible_crtcs(void)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct imx_drm_crtc *imx_drm_crtc;
- struct imx_drm_encoder *enc;
- struct crtc_cookie *cookie;
-
- list_for_each_entry(enc, &imxdrm->encoder_list, list) {
- u32 possible_crtcs = 0;
-
- list_for_each_entry(cookie, &enc->possible_crtcs, list) {
- list_for_each_entry(imx_drm_crtc, &imxdrm->crtc_list, list) {
- if (imx_drm_crtc->cookie.cookie == cookie->cookie &&
- imx_drm_crtc->cookie.id == cookie->id) {
- possible_crtcs |= 1 << imx_drm_crtc->pipe;
- }
- }
- }
- enc->encoder->possible_crtcs = possible_crtcs;
- enc->encoder->possible_clones = possible_crtcs;
- }
-}
-
/*
* imx_drm_add_crtc - add a new crtc
- *
- * The return value if !NULL is a cookie for the caller to pass to
- * imx_drm_remove_crtc later.
*/
-int imx_drm_add_crtc(struct drm_crtc *crtc,
+int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
struct imx_drm_crtc **new_crtc,
const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs,
- struct module *owner, void *cookie, int id)
+ struct device_node *port)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
+ struct imx_drm_device *imxdrm = drm->dev_private;
struct imx_drm_crtc *imx_drm_crtc;
int ret;
- mutex_lock(&imxdrm->mutex);
-
/*
* The vblank arrays are dimensioned by MAX_CRTC - we can't
* pass IDs greater than this to those functions.
*/
- if (imxdrm->pipes >= MAX_CRTC) {
- ret = -EINVAL;
- goto err_busy;
- }
+ if (imxdrm->pipes >= MAX_CRTC)
+ return -EINVAL;
- if (imxdrm->drm->open_count) {
- ret = -EBUSY;
- goto err_busy;
- }
+ if (imxdrm->drm->open_count)
+ return -EBUSY;
imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
- if (!imx_drm_crtc) {
- ret = -ENOMEM;
- goto err_alloc;
- }
+ if (!imx_drm_crtc)
+ return -ENOMEM;
imx_drm_crtc->imx_drm_helper_funcs = *imx_drm_helper_funcs;
imx_drm_crtc->pipe = imxdrm->pipes++;
- imx_drm_crtc->cookie.cookie = cookie;
- imx_drm_crtc->cookie.id = id;
-
+ imx_drm_crtc->port = port;
imx_drm_crtc->crtc = crtc;
- imx_drm_crtc->imxdrm = imxdrm;
- imx_drm_crtc->owner = owner;
-
- list_add_tail(&imx_drm_crtc->list, &imxdrm->crtc_list);
+ imxdrm->crtc[imx_drm_crtc->pipe] = imx_drm_crtc;
*new_crtc = imx_drm_crtc;
@@ -520,23 +385,14 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
drm_crtc_helper_add(crtc,
imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
- drm_crtc_init(imxdrm->drm, crtc,
+ drm_crtc_init(drm, crtc,
imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
- drm_mode_group_reinit(imxdrm->drm);
-
- imx_drm_update_possible_crtcs();
-
- mutex_unlock(&imxdrm->mutex);
-
return 0;
err_register:
- list_del(&imx_drm_crtc->list);
+ imxdrm->crtc[imx_drm_crtc->pipe] = NULL;
kfree(imx_drm_crtc);
-err_alloc:
-err_busy:
- mutex_unlock(&imxdrm->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
@@ -546,17 +402,11 @@ EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
*/
int imx_drm_remove_crtc(struct imx_drm_crtc *imx_drm_crtc)
{
- struct imx_drm_device *imxdrm = imx_drm_crtc->imxdrm;
-
- mutex_lock(&imxdrm->mutex);
+ struct imx_drm_device *imxdrm = imx_drm_crtc->crtc->dev->dev_private;
drm_crtc_cleanup(imx_drm_crtc->crtc);
- list_del(&imx_drm_crtc->list);
-
- drm_mode_group_reinit(imxdrm->drm);
-
- mutex_unlock(&imxdrm->mutex);
+ imxdrm->crtc[imx_drm_crtc->pipe] = NULL;
kfree(imx_drm_crtc);
@@ -565,220 +415,115 @@ int imx_drm_remove_crtc(struct imx_drm_crtc *imx_drm_crtc)
EXPORT_SYMBOL_GPL(imx_drm_remove_crtc);
/*
- * imx_drm_add_encoder - add a new encoder
+ * Find the DRM CRTC possible mask for the connected endpoint.
+ *
+ * The encoder possible masks are defined by their position in the
+ * mode_config crtc_list. This means that CRTCs must not be added
+ * or removed once the DRM device has been fully initialised.
*/
-int imx_drm_add_encoder(struct drm_encoder *encoder,
- struct imx_drm_encoder **newenc, struct module *owner)
+static uint32_t imx_drm_find_crtc_mask(struct imx_drm_device *imxdrm,
+ struct device_node *endpoint)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct imx_drm_encoder *imx_drm_encoder;
- int ret;
-
- mutex_lock(&imxdrm->mutex);
+ struct device_node *port;
+ unsigned i;
- if (imxdrm->drm->open_count) {
- ret = -EBUSY;
- goto err_busy;
- }
+ port = of_graph_get_remote_port(endpoint);
+ if (!port)
+ return 0;
+ of_node_put(port);
- imx_drm_encoder = kzalloc(sizeof(*imx_drm_encoder), GFP_KERNEL);
- if (!imx_drm_encoder) {
- ret = -ENOMEM;
- goto err_alloc;
+ for (i = 0; i < MAX_CRTC; i++) {
+ struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[i];
+ if (imx_drm_crtc && imx_drm_crtc->port == port)
+ return drm_crtc_mask(imx_drm_crtc->crtc);
}
- imx_drm_encoder->encoder = encoder;
- imx_drm_encoder->owner = owner;
-
- ret = imx_drm_encoder_register(imx_drm_encoder);
- if (ret) {
- ret = -ENOMEM;
- goto err_register;
- }
-
- list_add_tail(&imx_drm_encoder->list, &imxdrm->encoder_list);
-
- *newenc = imx_drm_encoder;
-
- mutex_unlock(&imxdrm->mutex);
-
return 0;
+}
-err_register:
- kfree(imx_drm_encoder);
-err_alloc:
-err_busy:
- mutex_unlock(&imxdrm->mutex);
-
- return ret;
+static struct device_node *imx_drm_of_get_next_endpoint(
+ const struct device_node *parent, struct device_node *prev)
+{
+ struct device_node *node = of_graph_get_next_endpoint(parent, prev);
+ of_node_put(prev);
+ return node;
}
-EXPORT_SYMBOL_GPL(imx_drm_add_encoder);
-int imx_drm_encoder_add_possible_crtcs(
- struct imx_drm_encoder *imx_drm_encoder,
- struct device_node *np)
+int imx_drm_encoder_parse_of(struct drm_device *drm,
+ struct drm_encoder *encoder, struct device_node *np)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct of_phandle_args args;
- struct crtc_cookie *c;
- int ret = 0;
+ struct imx_drm_device *imxdrm = drm->dev_private;
+ struct device_node *ep = NULL;
+ uint32_t crtc_mask = 0;
int i;
- if (!list_empty(&imx_drm_encoder->possible_crtcs))
- return -EBUSY;
+ for (i = 0; ; i++) {
+ u32 mask;
- for (i = 0; !ret; i++) {
- ret = of_parse_phandle_with_args(np, "crtcs",
- "#crtc-cells", i, &args);
- if (ret < 0)
+ ep = imx_drm_of_get_next_endpoint(np, ep);
+ if (!ep)
break;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c) {
- of_node_put(args.np);
- return -ENOMEM;
- }
-
- c->cookie = args.np;
- c->id = args.args_count > 0 ? args.args[0] : 0;
-
- of_node_put(args.np);
-
- mutex_lock(&imxdrm->mutex);
-
- list_add_tail(&c->list, &imx_drm_encoder->possible_crtcs);
-
- mutex_unlock(&imxdrm->mutex);
- }
+ mask = imx_drm_find_crtc_mask(imxdrm, ep);
- imx_drm_update_possible_crtcs();
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (mask == 0)
+ return -EPROBE_DEFER;
- return 0;
-}
-EXPORT_SYMBOL_GPL(imx_drm_encoder_add_possible_crtcs);
-
-int imx_drm_encoder_get_mux_id(struct imx_drm_encoder *imx_drm_encoder,
- struct drm_crtc *crtc)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct imx_drm_crtc *imx_crtc;
- int i = 0;
-
- list_for_each_entry(imx_crtc, &imxdrm->crtc_list, list) {
- if (imx_crtc->crtc == crtc)
- goto found;
- i++;
+ crtc_mask |= mask;
}
- return -EINVAL;
-found:
- return i;
-}
-EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id);
-
-/*
- * imx_drm_remove_encoder - remove an encoder
- */
-int imx_drm_remove_encoder(struct imx_drm_encoder *imx_drm_encoder)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct crtc_cookie *c, *tmp;
-
- mutex_lock(&imxdrm->mutex);
-
- imx_drm_encoder_unregister(imx_drm_encoder);
-
- list_del(&imx_drm_encoder->list);
-
- list_for_each_entry_safe(c, tmp, &imx_drm_encoder->possible_crtcs,
- list)
- kfree(c);
+ if (ep)
+ of_node_put(ep);
+ if (i == 0)
+ return -ENOENT;
- mutex_unlock(&imxdrm->mutex);
+ encoder->possible_crtcs = crtc_mask;
- kfree(imx_drm_encoder);
+ /* FIXME: this is the mask of outputs which can clone this output. */
+ encoder->possible_clones = ~0;
return 0;
}
-EXPORT_SYMBOL_GPL(imx_drm_remove_encoder);
+EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of);
/*
- * imx_drm_add_connector - add a connector
+ * @node: device tree node containing encoder input ports
+ * @encoder: drm_encoder
*/
-int imx_drm_add_connector(struct drm_connector *connector,
- struct imx_drm_connector **new_con,
- struct module *owner)
+int imx_drm_encoder_get_mux_id(struct device_node *node,
+ struct drm_encoder *encoder)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct imx_drm_connector *imx_drm_connector;
+ struct imx_drm_crtc *imx_crtc = imx_drm_find_crtc(encoder->crtc);
+ struct device_node *ep = NULL;
+ struct of_endpoint endpoint;
+ struct device_node *port;
int ret;
- mutex_lock(&imxdrm->mutex);
-
- if (imxdrm->drm->open_count) {
- ret = -EBUSY;
- goto err_busy;
- }
-
- imx_drm_connector = kzalloc(sizeof(*imx_drm_connector), GFP_KERNEL);
- if (!imx_drm_connector) {
- ret = -ENOMEM;
- goto err_alloc;
- }
-
- imx_drm_connector->connector = connector;
- imx_drm_connector->owner = owner;
-
- ret = imx_drm_connector_register(imx_drm_connector);
- if (ret)
- goto err_register;
-
- list_add_tail(&imx_drm_connector->list, &imxdrm->connector_list);
-
- *new_con = imx_drm_connector;
-
- mutex_unlock(&imxdrm->mutex);
-
- return 0;
-
-err_register:
- kfree(imx_drm_connector);
-err_alloc:
-err_busy:
- mutex_unlock(&imxdrm->mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(imx_drm_add_connector);
-
-void imx_drm_fb_helper_set(struct drm_fbdev_cma *fbdev_helper)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
-
- imxdrm->fbhelper = fbdev_helper;
-}
-EXPORT_SYMBOL_GPL(imx_drm_fb_helper_set);
-
-/*
- * imx_drm_remove_connector - remove a connector
- */
-int imx_drm_remove_connector(struct imx_drm_connector *imx_drm_connector)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
-
- mutex_lock(&imxdrm->mutex);
-
- imx_drm_connector_unregister(imx_drm_connector);
-
- list_del(&imx_drm_connector->list);
+ if (!node || !imx_crtc)
+ return -EINVAL;
- mutex_unlock(&imxdrm->mutex);
+ do {
+ ep = imx_drm_of_get_next_endpoint(node, ep);
+ if (!ep)
+ break;
- kfree(imx_drm_connector);
+ port = of_graph_get_remote_port(ep);
+ of_node_put(port);
+ if (port == imx_crtc->port) {
+ ret = of_graph_parse_endpoint(ep, &endpoint);
+ return ret ? ret : endpoint.id;
+ }
+ } while (ep);
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL_GPL(imx_drm_remove_connector);
+EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id);
static const struct drm_ioctl_desc imx_drm_ioctls[] = {
/* none so far */
@@ -819,80 +564,156 @@ static struct drm_driver imx_drm_driver = {
.patchlevel = 0,
};
-static int imx_drm_platform_probe(struct platform_device *pdev)
+static int compare_of(struct device *dev, void *data)
{
- int ret;
+ struct device_node *np = data;
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- imx_drm_device->dev = &pdev->dev;
+ /* Special case for LDB, one device for two channels */
+ if (of_node_cmp(np->name, "lvds-channel") == 0) {
+ np = of_get_parent(np);
+ of_node_put(np);
+ }
- return drm_platform_init(&imx_drm_driver, pdev);
+ return dev->of_node == np;
}
-static int imx_drm_platform_remove(struct platform_device *pdev)
+static LIST_HEAD(imx_drm_components);
+
+static int imx_drm_add_components(struct device *master, struct master *m)
{
- drm_put_dev(platform_get_drvdata(pdev));
+ struct imx_drm_component *component;
+ int ret;
+ list_for_each_entry(component, &imx_drm_components, list) {
+ ret = component_master_add_child(m, compare_of,
+ component->of_node);
+ if (ret)
+ return ret;
+ }
return 0;
}
-static struct platform_driver imx_drm_pdrv = {
- .probe = imx_drm_platform_probe,
- .remove = imx_drm_platform_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = "imx-drm",
- },
+static int imx_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&imx_drm_driver, to_platform_device(dev));
+}
+
+static void imx_drm_unbind(struct device *dev)
+{
+ drm_put_dev(dev_get_drvdata(dev));
+}
+
+static const struct component_master_ops imx_drm_ops = {
+ .add_components = imx_drm_add_components,
+ .bind = imx_drm_bind,
+ .unbind = imx_drm_unbind,
};
-static struct platform_device *imx_drm_pdev;
+static struct imx_drm_component *imx_drm_find_component(struct device *dev,
+ struct device_node *node)
+{
+ struct imx_drm_component *component;
+
+ list_for_each_entry(component, &imx_drm_components, list)
+ if (component->of_node == node)
+ return component;
-static int __init imx_drm_init(void)
+ return NULL;
+}
+
+static int imx_drm_add_component(struct device *dev, struct device_node *node)
{
- int ret;
+ struct imx_drm_component *component;
+
+ if (imx_drm_find_component(dev, node))
+ return 0;
- imx_drm_device = kzalloc(sizeof(*imx_drm_device), GFP_KERNEL);
- if (!imx_drm_device)
+ component = devm_kzalloc(dev, sizeof(*component), GFP_KERNEL);
+ if (!component)
return -ENOMEM;
- mutex_init(&imx_drm_device->mutex);
- INIT_LIST_HEAD(&imx_drm_device->crtc_list);
- INIT_LIST_HEAD(&imx_drm_device->connector_list);
- INIT_LIST_HEAD(&imx_drm_device->encoder_list);
+ component->of_node = node;
+ list_add_tail(&component->list, &imx_drm_components);
+
+ return 0;
+}
+
+static int imx_drm_platform_probe(struct platform_device *pdev)
+{
+ struct device_node *ep, *port, *remote;
+ int ret;
+ int i;
+
+ /*
+ * Bind the IPU display interface ports first, so that
+ * imx_drm_encoder_parse_of called from encoder .bind callbacks
+ * works as expected.
+ */
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(pdev->dev.of_node, "ports", i);
+ if (!port)
+ break;
+
+ ret = imx_drm_add_component(&pdev->dev, port);
+ if (ret < 0)
+ return ret;
+ }
- imx_drm_pdev = platform_device_register_simple("imx-drm", -1, NULL, 0);
- if (IS_ERR(imx_drm_pdev)) {
- ret = PTR_ERR(imx_drm_pdev);
- goto err_pdev;
+ if (i == 0) {
+ dev_err(&pdev->dev, "missing 'ports' property\n");
+ return -ENODEV;
}
- ret = platform_driver_register(&imx_drm_pdrv);
- if (ret)
- goto err_pdrv;
+ /* Then bind all encoders */
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(pdev->dev.of_node, "ports", i);
+ if (!port)
+ break;
- return 0;
+ for_each_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
+ continue;
+ }
-err_pdrv:
- platform_device_unregister(imx_drm_pdev);
-err_pdev:
- kfree(imx_drm_device);
+ ret = imx_drm_add_component(&pdev->dev, remote);
+ of_node_put(remote);
+ if (ret < 0)
+ return ret;
+ }
+ of_node_put(port);
+ }
- return ret;
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ return component_master_add(&pdev->dev, &imx_drm_ops);
}
-static void __exit imx_drm_exit(void)
+static int imx_drm_platform_remove(struct platform_device *pdev)
{
- platform_device_unregister(imx_drm_pdev);
- platform_driver_unregister(&imx_drm_pdrv);
-
- kfree(imx_drm_device);
+ component_master_del(&pdev->dev, &imx_drm_ops);
+ return 0;
}
-module_init(imx_drm_init);
-module_exit(imx_drm_exit);
+static const struct of_device_id imx_drm_dt_ids[] = {
+ { .compatible = "fsl,imx-display-subsystem", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx_drm_dt_ids);
+
+static struct platform_driver imx_drm_pdrv = {
+ .probe = imx_drm_platform_probe,
+ .remove = imx_drm_platform_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "imx-drm",
+ .of_match_table = imx_drm_dt_ids,
+ },
+};
+module_platform_driver(imx_drm_pdrv);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX drm driver core");
diff --git a/drivers/staging/imx-drm/imx-drm.h b/drivers/staging/imx-drm/imx-drm.h
index ae90c9c15312..a322bac55414 100644
--- a/drivers/staging/imx-drm/imx-drm.h
+++ b/drivers/staging/imx-drm/imx-drm.h
@@ -1,17 +1,15 @@
#ifndef _IMX_DRM_H_
#define _IMX_DRM_H_
-#include <linux/videodev2.h>
-
-#define IPU_PIX_FMT_GBR24 v4l2_fourcc('G', 'B', 'R', '3')
-
+struct device_node;
struct drm_crtc;
struct drm_connector;
struct drm_device;
+struct drm_display_mode;
struct drm_encoder;
-struct imx_drm_crtc;
struct drm_fbdev_cma;
struct drm_framebuffer;
+struct imx_drm_crtc;
struct platform_device;
int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
@@ -25,10 +23,10 @@ struct imx_drm_crtc_helper_funcs {
const struct drm_crtc_funcs *crtc_funcs;
};
-int imx_drm_add_crtc(struct drm_crtc *crtc,
+int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
struct imx_drm_crtc **new_crtc,
const struct imx_drm_crtc_helper_funcs *imx_helper_funcs,
- struct module *owner, void *cookie, int id);
+ struct device_node *port);
int imx_drm_remove_crtc(struct imx_drm_crtc *);
int imx_drm_init_drm(struct platform_device *pdev,
int preferred_bpp);
@@ -38,35 +36,23 @@ int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc);
void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc);
void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc);
-struct imx_drm_encoder;
-int imx_drm_add_encoder(struct drm_encoder *encoder,
- struct imx_drm_encoder **new_enc,
- struct module *owner);
-int imx_drm_remove_encoder(struct imx_drm_encoder *);
-
-struct imx_drm_connector;
-int imx_drm_add_connector(struct drm_connector *connector,
- struct imx_drm_connector **new_con,
- struct module *owner);
-int imx_drm_remove_connector(struct imx_drm_connector *);
-
void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
-struct drm_device *imx_drm_device_get(void);
-void imx_drm_device_put(void);
-int imx_drm_crtc_panel_format_pins(struct drm_crtc *crtc, u32 encoder_type,
+int imx_drm_panel_format_pins(struct drm_encoder *encoder,
u32 interface_pix_fmt, int hsync_pin, int vsync_pin);
-int imx_drm_crtc_panel_format(struct drm_crtc *crtc, u32 encoder_type,
+int imx_drm_panel_format(struct drm_encoder *encoder,
u32 interface_pix_fmt);
-void imx_drm_fb_helper_set(struct drm_fbdev_cma *fbdev_helper);
-struct device_node;
+int imx_drm_encoder_get_mux_id(struct device_node *node,
+ struct drm_encoder *encoder);
+int imx_drm_encoder_parse_of(struct drm_device *drm,
+ struct drm_encoder *encoder, struct device_node *np);
-int imx_drm_encoder_get_mux_id(struct imx_drm_encoder *imx_drm_encoder,
- struct drm_crtc *crtc);
-int imx_drm_encoder_add_possible_crtcs(struct imx_drm_encoder *imx_drm_encoder,
- struct device_node *np);
+int imx_drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+void imx_drm_connector_destroy(struct drm_connector *connector);
+void imx_drm_encoder_destroy(struct drm_encoder *encoder);
#endif /* _IMX_DRM_H_ */
diff --git a/drivers/staging/imx-drm/imx-fb.c b/drivers/staging/imx-drm/imx-fb.c
deleted file mode 100644
index 03a7b4e14f67..000000000000
--- a/drivers/staging/imx-drm/imx-fb.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * i.MX drm driver
- *
- * Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * Based on Samsung Exynos code
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/module.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "imx-drm.h"
-
-static struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
-};
-
-void imx_drm_mode_config_init(struct drm_device *dev)
-{
- dev->mode_config.min_width = 64;
- dev->mode_config.min_height = 64;
-
- /*
- * set max width and height as default value(4096x4096).
- * this value would be used to check framebuffer size limitation
- * at drm_mode_addfb().
- */
- dev->mode_config.max_width = 4096;
- dev->mode_config.max_height = 4096;
-
- dev->mode_config.funcs = &imx_drm_mode_config_funcs;
-}
diff --git a/drivers/staging/imx-drm/imx-fbdev.c b/drivers/staging/imx-drm/imx-fbdev.c
deleted file mode 100644
index 8331739c3d08..000000000000
--- a/drivers/staging/imx-drm/imx-fbdev.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * i.MX drm driver
- *
- * Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * Based on Samsung Exynos code
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/module.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "imx-drm.h"
-
-#define MAX_CONNECTOR 4
-#define PREFERRED_BPP 16
-
-static struct drm_fbdev_cma *fbdev_cma;
-
-static int legacyfb_depth = 16;
-
-module_param(legacyfb_depth, int, 0444);
-
-static int __init imx_fb_helper_init(void)
-{
- struct drm_device *drm = imx_drm_device_get();
-
- if (!drm)
- return -EINVAL;
-
- if (legacyfb_depth != 16 && legacyfb_depth != 32) {
- pr_warn("i.MX legacyfb: invalid legacyfb_depth setting. defaulting to 16bpp\n");
- legacyfb_depth = 16;
- }
-
- fbdev_cma = drm_fbdev_cma_init(drm, legacyfb_depth,
- drm->mode_config.num_crtc, MAX_CONNECTOR);
-
- if (IS_ERR(fbdev_cma)) {
- imx_drm_device_put();
- return PTR_ERR(fbdev_cma);
- }
-
- imx_drm_fb_helper_set(fbdev_cma);
-
- return 0;
-}
-
-static void __exit imx_fb_helper_exit(void)
-{
- imx_drm_fb_helper_set(NULL);
- drm_fbdev_cma_fini(fbdev_cma);
- imx_drm_device_put();
-}
-
-late_initcall(imx_fb_helper_init);
-module_exit(imx_fb_helper_exit);
-
-MODULE_DESCRIPTION("Freescale i.MX legacy fb driver");
-MODULE_AUTHOR("Sascha Hauer, Pengutronix");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
index 62ce0e86f14b..d47dedd2cdb4 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/staging/imx-drm/imx-hdmi.c
@@ -12,6 +12,7 @@
* Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*/
+#include <linux/component.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -112,15 +113,15 @@ struct hdmi_data_info {
struct imx_hdmi {
struct drm_connector connector;
- struct imx_drm_connector *imx_drm_connector;
struct drm_encoder encoder;
- struct imx_drm_encoder *imx_drm_encoder;
enum imx_hdmi_devtype dev_type;
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
+ enum drm_connector_status connector_status;
+
struct hdmi_data_info hdmi_data;
int vic;
@@ -134,7 +135,6 @@ struct imx_hdmi {
struct i2c_adapter *ddc;
void __iomem *regs;
- unsigned long pixel_clk_rate;
unsigned int sample_rate;
int ratio;
};
@@ -156,37 +156,34 @@ static inline u8 hdmi_readb(struct imx_hdmi *hdmi, int offset)
return readb(hdmi->regs + offset);
}
+static void hdmi_modb(struct imx_hdmi *hdmi, u8 data, u8 mask, unsigned reg)
+{
+ u8 val = hdmi_readb(hdmi, reg) & ~mask;
+ val |= data & mask;
+ hdmi_writeb(hdmi, val, reg);
+}
+
static void hdmi_mask_writeb(struct imx_hdmi *hdmi, u8 data, unsigned int reg,
u8 shift, u8 mask)
{
- u8 value = hdmi_readb(hdmi, reg) & ~mask;
- value |= (data << shift) & mask;
- hdmi_writeb(hdmi, value, reg);
+ hdmi_modb(hdmi, data << shift, mask, reg);
}
static void hdmi_set_clock_regenerator_n(struct imx_hdmi *hdmi,
unsigned int value)
{
- u8 val;
-
hdmi_writeb(hdmi, value & 0xff, HDMI_AUD_N1);
hdmi_writeb(hdmi, (value >> 8) & 0xff, HDMI_AUD_N2);
hdmi_writeb(hdmi, (value >> 16) & 0x0f, HDMI_AUD_N3);
/* nshift factor = 0 */
- val = hdmi_readb(hdmi, HDMI_AUD_CTS3);
- val &= ~HDMI_AUD_CTS3_N_SHIFT_MASK;
- hdmi_writeb(hdmi, val, HDMI_AUD_CTS3);
+ hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_N_SHIFT_MASK, HDMI_AUD_CTS3);
}
static void hdmi_regenerate_cts(struct imx_hdmi *hdmi, unsigned int cts)
{
- u8 val;
-
/* Must be set/cleared first */
- val = hdmi_readb(hdmi, HDMI_AUD_CTS3);
- val &= ~HDMI_AUD_CTS3_CTS_MANUAL;
- hdmi_writeb(hdmi, val, HDMI_AUD_CTS3);
+ hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
hdmi_writeb(hdmi, cts & 0xff, HDMI_AUD_CTS1);
hdmi_writeb(hdmi, (cts >> 8) & 0xff, HDMI_AUD_CTS2);
@@ -331,34 +328,25 @@ static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
return (cts * ratio) / 100;
}
-static void hdmi_get_pixel_clk(struct imx_hdmi *hdmi)
-{
- unsigned long rate;
-
- rate = 65000000; /* FIXME */
-
- if (rate)
- hdmi->pixel_clk_rate = rate;
-}
-
-static void hdmi_set_clk_regenerator(struct imx_hdmi *hdmi)
+static void hdmi_set_clk_regenerator(struct imx_hdmi *hdmi,
+ unsigned long pixel_clk)
{
unsigned int clk_n, clk_cts;
- clk_n = hdmi_compute_n(hdmi->sample_rate, hdmi->pixel_clk_rate,
+ clk_n = hdmi_compute_n(hdmi->sample_rate, pixel_clk,
hdmi->ratio);
- clk_cts = hdmi_compute_cts(hdmi->sample_rate, hdmi->pixel_clk_rate,
+ clk_cts = hdmi_compute_cts(hdmi->sample_rate, pixel_clk,
hdmi->ratio);
if (!clk_cts) {
dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
- __func__, hdmi->pixel_clk_rate);
+ __func__, pixel_clk);
return;
}
dev_dbg(hdmi->dev, "%s: samplerate=%d ratio=%d pixelclk=%lu N=%d cts=%d\n",
__func__, hdmi->sample_rate, hdmi->ratio,
- hdmi->pixel_clk_rate, clk_n, clk_cts);
+ pixel_clk, clk_n, clk_cts);
hdmi_set_clock_regenerator_n(hdmi, clk_n);
hdmi_regenerate_cts(hdmi, clk_cts);
@@ -366,32 +354,12 @@ static void hdmi_set_clk_regenerator(struct imx_hdmi *hdmi)
static void hdmi_init_clk_regenerator(struct imx_hdmi *hdmi)
{
- unsigned int clk_n, clk_cts;
-
- clk_n = hdmi_compute_n(hdmi->sample_rate, hdmi->pixel_clk_rate,
- hdmi->ratio);
- clk_cts = hdmi_compute_cts(hdmi->sample_rate, hdmi->pixel_clk_rate,
- hdmi->ratio);
-
- if (!clk_cts) {
- dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
- __func__, hdmi->pixel_clk_rate);
- return;
- }
-
- dev_dbg(hdmi->dev, "%s: samplerate=%d ratio=%d pixelclk=%lu N=%d cts=%d\n",
- __func__, hdmi->sample_rate, hdmi->ratio,
- hdmi->pixel_clk_rate, clk_n, clk_cts);
-
- hdmi_set_clock_regenerator_n(hdmi, clk_n);
- hdmi_regenerate_cts(hdmi, clk_cts);
+ hdmi_set_clk_regenerator(hdmi, 74250000);
}
static void hdmi_clk_regenerator_update_pixel_clock(struct imx_hdmi *hdmi)
{
- /* Get pixel clock from ipu */
- hdmi_get_pixel_clk(hdmi);
- hdmi_set_clk_regenerator(hdmi);
+ hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
}
/*
@@ -459,38 +427,45 @@ static void hdmi_video_sample(struct imx_hdmi *hdmi)
static int is_color_space_conversion(struct imx_hdmi *hdmi)
{
- return (hdmi->hdmi_data.enc_in_format !=
- hdmi->hdmi_data.enc_out_format);
+ return hdmi->hdmi_data.enc_in_format != hdmi->hdmi_data.enc_out_format;
}
static int is_color_space_decimation(struct imx_hdmi *hdmi)
{
- return ((hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS) &&
- (hdmi->hdmi_data.enc_in_format == RGB ||
- hdmi->hdmi_data.enc_in_format == YCBCR444));
+ if (hdmi->hdmi_data.enc_out_format != YCBCR422_8BITS)
+ return 0;
+ if (hdmi->hdmi_data.enc_in_format == RGB ||
+ hdmi->hdmi_data.enc_in_format == YCBCR444)
+ return 1;
+ return 0;
}
static int is_color_space_interpolation(struct imx_hdmi *hdmi)
{
- return ((hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) &&
- (hdmi->hdmi_data.enc_out_format == RGB ||
- hdmi->hdmi_data.enc_out_format == YCBCR444));
+ if (hdmi->hdmi_data.enc_in_format != YCBCR422_8BITS)
+ return 0;
+ if (hdmi->hdmi_data.enc_out_format == RGB ||
+ hdmi->hdmi_data.enc_out_format == YCBCR444)
+ return 1;
+ return 0;
}
static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
{
const u16 (*csc_coeff)[3][4] = &csc_coeff_default;
+ unsigned i;
u32 csc_scale = 1;
- u8 val;
if (is_color_space_conversion(hdmi)) {
if (hdmi->hdmi_data.enc_out_format == RGB) {
- if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
+ if (hdmi->hdmi_data.colorimetry ==
+ HDMI_COLORIMETRY_ITU_601)
csc_coeff = &csc_coeff_rgb_out_eitu601;
else
csc_coeff = &csc_coeff_rgb_out_eitu709;
} else if (hdmi->hdmi_data.enc_in_format == RGB) {
- if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
+ if (hdmi->hdmi_data.colorimetry ==
+ HDMI_COLORIMETRY_ITU_601)
csc_coeff = &csc_coeff_rgb_in_eitu601;
else
csc_coeff = &csc_coeff_rgb_in_eitu709;
@@ -498,37 +473,24 @@ static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
}
}
- hdmi_writeb(hdmi, ((*csc_coeff)[0][0] & 0xff), HDMI_CSC_COEF_A1_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][0] >> 8), HDMI_CSC_COEF_A1_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][1] & 0xff), HDMI_CSC_COEF_A2_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][1] >> 8), HDMI_CSC_COEF_A2_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][2] & 0xff), HDMI_CSC_COEF_A3_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][2] >> 8), HDMI_CSC_COEF_A3_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][3] & 0xff), HDMI_CSC_COEF_A4_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][3] >> 8), HDMI_CSC_COEF_A4_MSB);
-
- hdmi_writeb(hdmi, ((*csc_coeff)[1][0] & 0xff), HDMI_CSC_COEF_B1_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][0] >> 8), HDMI_CSC_COEF_B1_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][1] & 0xff), HDMI_CSC_COEF_B2_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][1] >> 8), HDMI_CSC_COEF_B2_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][2] & 0xff), HDMI_CSC_COEF_B3_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][2] >> 8), HDMI_CSC_COEF_B3_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][3] & 0xff), HDMI_CSC_COEF_B4_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][3] >> 8), HDMI_CSC_COEF_B4_MSB);
-
- hdmi_writeb(hdmi, ((*csc_coeff)[2][0] & 0xff), HDMI_CSC_COEF_C1_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][0] >> 8), HDMI_CSC_COEF_C1_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][1] & 0xff), HDMI_CSC_COEF_C2_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][1] >> 8), HDMI_CSC_COEF_C2_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][2] & 0xff), HDMI_CSC_COEF_C3_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][2] >> 8), HDMI_CSC_COEF_C3_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][3] & 0xff), HDMI_CSC_COEF_C4_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][3] >> 8), HDMI_CSC_COEF_C4_MSB);
-
- val = hdmi_readb(hdmi, HDMI_CSC_SCALE);
- val &= ~HDMI_CSC_SCALE_CSCSCALE_MASK;
- val |= csc_scale & HDMI_CSC_SCALE_CSCSCALE_MASK;
- hdmi_writeb(hdmi, val, HDMI_CSC_SCALE);
+ /* The CSC registers are sequential, alternating MSB then LSB */
+ for (i = 0; i < ARRAY_SIZE(csc_coeff_default[0]); i++) {
+ u16 coeff_a = (*csc_coeff)[0][i];
+ u16 coeff_b = (*csc_coeff)[1][i];
+ u16 coeff_c = (*csc_coeff)[2][i];
+
+ hdmi_writeb(hdmi, coeff_a & 0xff,
+ HDMI_CSC_COEF_A1_LSB + i * 2);
+ hdmi_writeb(hdmi, coeff_a >> 8, HDMI_CSC_COEF_A1_MSB + i * 2);
+ hdmi_writeb(hdmi, coeff_b & 0xff, HDMI_CSC_COEF_B1_LSB + i * 2);
+ hdmi_writeb(hdmi, coeff_b >> 8, HDMI_CSC_COEF_B1_MSB + i * 2);
+ hdmi_writeb(hdmi, coeff_c & 0xff,
+ HDMI_CSC_COEF_C1_LSB + i * 2);
+ hdmi_writeb(hdmi, coeff_c >> 8, HDMI_CSC_COEF_C1_MSB + i * 2);
+ }
+
+ hdmi_modb(hdmi, csc_scale, HDMI_CSC_SCALE_CSCSCALE_MASK,
+ HDMI_CSC_SCALE);
}
static void hdmi_video_csc(struct imx_hdmi *hdmi)
@@ -536,7 +498,6 @@ static void hdmi_video_csc(struct imx_hdmi *hdmi)
int color_depth = 0;
int interpolation = HDMI_CSC_CFG_INTMODE_DISABLE;
int decimation = 0;
- u8 val;
/* YCC422 interpolation to 444 mode */
if (is_color_space_interpolation(hdmi))
@@ -557,10 +518,8 @@ static void hdmi_video_csc(struct imx_hdmi *hdmi)
/* Configure the CSC registers */
hdmi_writeb(hdmi, interpolation | decimation, HDMI_CSC_CFG);
- val = hdmi_readb(hdmi, HDMI_CSC_SCALE);
- val &= ~HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK;
- val |= color_depth;
- hdmi_writeb(hdmi, val, HDMI_CSC_SCALE);
+ hdmi_modb(hdmi, color_depth, HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK,
+ HDMI_CSC_SCALE);
imx_hdmi_update_csc_coeffs(hdmi);
}
@@ -576,7 +535,7 @@ static void hdmi_video_packetize(struct imx_hdmi *hdmi)
unsigned int remap_size = HDMI_VP_REMAP_YCC422_16bit;
unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP;
struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
- u8 val;
+ u8 val, vp_conf;
if (hdmi_data->enc_out_format == RGB
|| hdmi_data->enc_out_format == YCBCR444) {
@@ -615,107 +574,75 @@ static void hdmi_video_packetize(struct imx_hdmi *hdmi)
HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK);
hdmi_writeb(hdmi, val, HDMI_VP_PR_CD);
- val = hdmi_readb(hdmi, HDMI_VP_STUFF);
- val &= ~HDMI_VP_STUFF_PR_STUFFING_MASK;
- val |= HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE;
- hdmi_writeb(hdmi, val, HDMI_VP_STUFF);
+ hdmi_modb(hdmi, HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE,
+ HDMI_VP_STUFF_PR_STUFFING_MASK, HDMI_VP_STUFF);
/* Data from pixel repeater block */
if (hdmi_data->pix_repet_factor > 1) {
- val = hdmi_readb(hdmi, HDMI_VP_CONF);
- val &= ~(HDMI_VP_CONF_PR_EN_MASK |
- HDMI_VP_CONF_BYPASS_SELECT_MASK);
- val |= HDMI_VP_CONF_PR_EN_ENABLE |
- HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER;
- hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ vp_conf = HDMI_VP_CONF_PR_EN_ENABLE |
+ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER;
} else { /* data from packetizer block */
- val = hdmi_readb(hdmi, HDMI_VP_CONF);
- val &= ~(HDMI_VP_CONF_PR_EN_MASK |
- HDMI_VP_CONF_BYPASS_SELECT_MASK);
- val |= HDMI_VP_CONF_PR_EN_DISABLE |
- HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER;
- hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ vp_conf = HDMI_VP_CONF_PR_EN_DISABLE |
+ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER;
}
- val = hdmi_readb(hdmi, HDMI_VP_STUFF);
- val &= ~HDMI_VP_STUFF_IDEFAULT_PHASE_MASK;
- val |= 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET;
- hdmi_writeb(hdmi, val, HDMI_VP_STUFF);
+ hdmi_modb(hdmi, vp_conf,
+ HDMI_VP_CONF_PR_EN_MASK |
+ HDMI_VP_CONF_BYPASS_SELECT_MASK, HDMI_VP_CONF);
+
+ hdmi_modb(hdmi, 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET,
+ HDMI_VP_STUFF_IDEFAULT_PHASE_MASK, HDMI_VP_STUFF);
hdmi_writeb(hdmi, remap_size, HDMI_VP_REMAP);
if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_PP) {
- val = hdmi_readb(hdmi, HDMI_VP_CONF);
- val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
- HDMI_VP_CONF_PP_EN_ENMASK |
- HDMI_VP_CONF_YCC422_EN_MASK);
- val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
- HDMI_VP_CONF_PP_EN_ENABLE |
- HDMI_VP_CONF_YCC422_EN_DISABLE;
- hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ vp_conf = HDMI_VP_CONF_BYPASS_EN_DISABLE |
+ HDMI_VP_CONF_PP_EN_ENABLE |
+ HDMI_VP_CONF_YCC422_EN_DISABLE;
} else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422) {
- val = hdmi_readb(hdmi, HDMI_VP_CONF);
- val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
- HDMI_VP_CONF_PP_EN_ENMASK |
- HDMI_VP_CONF_YCC422_EN_MASK);
- val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
- HDMI_VP_CONF_PP_EN_DISABLE |
- HDMI_VP_CONF_YCC422_EN_ENABLE;
- hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ vp_conf = HDMI_VP_CONF_BYPASS_EN_DISABLE |
+ HDMI_VP_CONF_PP_EN_DISABLE |
+ HDMI_VP_CONF_YCC422_EN_ENABLE;
} else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS) {
- val = hdmi_readb(hdmi, HDMI_VP_CONF);
- val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
- HDMI_VP_CONF_PP_EN_ENMASK |
- HDMI_VP_CONF_YCC422_EN_MASK);
- val |= HDMI_VP_CONF_BYPASS_EN_ENABLE |
- HDMI_VP_CONF_PP_EN_DISABLE |
- HDMI_VP_CONF_YCC422_EN_DISABLE;
- hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ vp_conf = HDMI_VP_CONF_BYPASS_EN_ENABLE |
+ HDMI_VP_CONF_PP_EN_DISABLE |
+ HDMI_VP_CONF_YCC422_EN_DISABLE;
} else {
return;
}
- val = hdmi_readb(hdmi, HDMI_VP_STUFF);
- val &= ~(HDMI_VP_STUFF_PP_STUFFING_MASK |
- HDMI_VP_STUFF_YCC422_STUFFING_MASK);
- val |= HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE |
- HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE;
- hdmi_writeb(hdmi, val, HDMI_VP_STUFF);
+ hdmi_modb(hdmi, vp_conf,
+ HDMI_VP_CONF_BYPASS_EN_MASK | HDMI_VP_CONF_PP_EN_ENMASK |
+ HDMI_VP_CONF_YCC422_EN_MASK, HDMI_VP_CONF);
- val = hdmi_readb(hdmi, HDMI_VP_CONF);
- val &= ~HDMI_VP_CONF_OUTPUT_SELECTOR_MASK;
- val |= output_select;
- hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ hdmi_modb(hdmi, HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE |
+ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE,
+ HDMI_VP_STUFF_PP_STUFFING_MASK |
+ HDMI_VP_STUFF_YCC422_STUFFING_MASK, HDMI_VP_STUFF);
+
+ hdmi_modb(hdmi, output_select, HDMI_VP_CONF_OUTPUT_SELECTOR_MASK,
+ HDMI_VP_CONF);
}
static inline void hdmi_phy_test_clear(struct imx_hdmi *hdmi,
unsigned char bit)
{
- u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0);
- val &= ~HDMI_PHY_TST0_TSTCLR_MASK;
- val |= (bit << HDMI_PHY_TST0_TSTCLR_OFFSET) &
- HDMI_PHY_TST0_TSTCLR_MASK;
- hdmi_writeb(hdmi, val, HDMI_PHY_TST0);
+ hdmi_modb(hdmi, bit << HDMI_PHY_TST0_TSTCLR_OFFSET,
+ HDMI_PHY_TST0_TSTCLR_MASK, HDMI_PHY_TST0);
}
static inline void hdmi_phy_test_enable(struct imx_hdmi *hdmi,
unsigned char bit)
{
- u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0);
- val &= ~HDMI_PHY_TST0_TSTEN_MASK;
- val |= (bit << HDMI_PHY_TST0_TSTEN_OFFSET) &
- HDMI_PHY_TST0_TSTEN_MASK;
- hdmi_writeb(hdmi, val, HDMI_PHY_TST0);
+ hdmi_modb(hdmi, bit << HDMI_PHY_TST0_TSTEN_OFFSET,
+ HDMI_PHY_TST0_TSTEN_MASK, HDMI_PHY_TST0);
}
static inline void hdmi_phy_test_clock(struct imx_hdmi *hdmi,
unsigned char bit)
{
- u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0);
- val &= ~HDMI_PHY_TST0_TSTCLK_MASK;
- val |= (bit << HDMI_PHY_TST0_TSTCLK_OFFSET) &
- HDMI_PHY_TST0_TSTCLK_MASK;
- hdmi_writeb(hdmi, val, HDMI_PHY_TST0);
+ hdmi_modb(hdmi, bit << HDMI_PHY_TST0_TSTCLK_OFFSET,
+ HDMI_PHY_TST0_TSTCLK_MASK, HDMI_PHY_TST0);
}
static inline void hdmi_phy_test_din(struct imx_hdmi *hdmi,
@@ -806,19 +733,94 @@ static void imx_hdmi_phy_sel_interface_control(struct imx_hdmi *hdmi, u8 enable)
HDMI_PHY_CONF0_SELDIPIF_MASK);
}
+enum {
+ RES_8,
+ RES_10,
+ RES_12,
+ RES_MAX,
+};
+
+struct mpll_config {
+ unsigned long mpixelclock;
+ struct {
+ u16 cpce;
+ u16 gmp;
+ } res[RES_MAX];
+};
+
+static const struct mpll_config mpll_config[] = {
+ {
+ 45250000, {
+ { 0x01e0, 0x0000 },
+ { 0x21e1, 0x0000 },
+ { 0x41e2, 0x0000 }
+ },
+ }, {
+ 92500000, {
+ { 0x0140, 0x0005 },
+ { 0x2141, 0x0005 },
+ { 0x4142, 0x0005 },
+ },
+ }, {
+ 148500000, {
+ { 0x00a0, 0x000a },
+ { 0x20a1, 0x000a },
+ { 0x40a2, 0x000a },
+ },
+ }, {
+ ~0UL, {
+ { 0x00a0, 0x000a },
+ { 0x2001, 0x000f },
+ { 0x4002, 0x000f },
+ },
+ }
+};
+
+struct curr_ctrl {
+ unsigned long mpixelclock;
+ u16 curr[RES_MAX];
+};
+
+static const struct curr_ctrl curr_ctrl[] = {
+ /* pixelclk bpp8 bpp10 bpp12 */
+ {
+ 54000000, { 0x091c, 0x091c, 0x06dc },
+ }, {
+ 58400000, { 0x091c, 0x06dc, 0x06dc },
+ }, {
+ 72000000, { 0x06dc, 0x06dc, 0x091c },
+ }, {
+ 74250000, { 0x06dc, 0x0b5c, 0x091c },
+ }, {
+ 118800000, { 0x091c, 0x091c, 0x06dc },
+ }, {
+ 216000000, { 0x06dc, 0x0b5c, 0x091c },
+ }
+};
+
static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
unsigned char res, int cscon)
{
+ unsigned res_idx, i;
u8 val, msec;
- /* color resolution 0 is 8 bit colour depth */
- if (!res)
- res = 8;
-
if (prep)
return -EINVAL;
- else if (res != 8 && res != 12)
+
+ switch (res) {
+ case 0: /* color resolution 0 is 8 bit colour depth */
+ case 8:
+ res_idx = RES_8;
+ break;
+ case 10:
+ res_idx = RES_10;
+ break;
+ case 12:
+ res_idx = RES_12;
+ break;
+ default:
return -EINVAL;
+ }
/* Enable csc path */
if (cscon)
@@ -845,165 +847,30 @@ static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
HDMI_PHY_I2CM_SLAVE_ADDR);
hdmi_phy_test_clear(hdmi, 0);
- if (hdmi->hdmi_data.video_mode.mpixelclock <= 45250000) {
- switch (res) {
- case 8:
- /* PLL/MPLL Cfg */
- hdmi_phy_i2c_write(hdmi, 0x01e0, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x0000, 0x15); /* GMPCTRL */
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x21e1, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x41e2, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
+ /* PLL/MPLL Cfg - always match on final entry */
+ for (i = 0; i < ARRAY_SIZE(mpll_config) - 1; i++)
+ if (hdmi->hdmi_data.video_mode.mpixelclock <=
+ mpll_config[i].mpixelclock)
break;
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 92500000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x0140, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x2141, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x4142, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 148500000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x20a1, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x40a2, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
- default:
- return -EINVAL;
- }
- } else {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x2001, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x4002, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
- default:
- return -EINVAL;
- }
- }
- if (hdmi->hdmi_data.video_mode.mpixelclock <= 54000000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); /* CURRCTRL */
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 58400000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 72000000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
- break;
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 74250000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
- break;
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 118800000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 216000000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].cpce, 0x06);
+ hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].gmp, 0x15);
+
+ for (i = 0; i < ARRAY_SIZE(curr_ctrl); i++)
+ if (hdmi->hdmi_data.video_mode.mpixelclock <=
+ curr_ctrl[i].mpixelclock)
break;
- default:
- return -EINVAL;
- }
- } else {
+
+ if (i >= ARRAY_SIZE(curr_ctrl)) {
dev_err(hdmi->dev,
"Pixel clock %d - unsupported by HDMI\n",
hdmi->hdmi_data.video_mode.mpixelclock);
return -EINVAL;
}
+ /* CURRCTRL */
+ hdmi_phy_i2c_write(hdmi, curr_ctrl[i].curr[res_idx], 0x10);
+
hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
/* RESISTANCE TERM 133Ohm Cfg */
@@ -1072,7 +939,7 @@ static int imx_hdmi_phy_init(struct imx_hdmi *hdmi)
static void hdmi_tx_hdcp_config(struct imx_hdmi *hdmi)
{
- u8 de, val;
+ u8 de;
if (hdmi->hdmi_data.video_mode.mdataenablepolarity)
de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH;
@@ -1080,20 +947,13 @@ static void hdmi_tx_hdcp_config(struct imx_hdmi *hdmi)
de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW;
/* disable rx detect */
- val = hdmi_readb(hdmi, HDMI_A_HDCPCFG0);
- val &= HDMI_A_HDCPCFG0_RXDETECT_MASK;
- val |= HDMI_A_HDCPCFG0_RXDETECT_DISABLE;
- hdmi_writeb(hdmi, val, HDMI_A_HDCPCFG0);
+ hdmi_modb(hdmi, HDMI_A_HDCPCFG0_RXDETECT_DISABLE,
+ HDMI_A_HDCPCFG0_RXDETECT_MASK, HDMI_A_HDCPCFG0);
- val = hdmi_readb(hdmi, HDMI_A_VIDPOLCFG);
- val &= HDMI_A_VIDPOLCFG_DATAENPOL_MASK;
- val |= de;
- hdmi_writeb(hdmi, val, HDMI_A_VIDPOLCFG);
+ hdmi_modb(hdmi, de, HDMI_A_VIDPOLCFG_DATAENPOL_MASK, HDMI_A_VIDPOLCFG);
- val = hdmi_readb(hdmi, HDMI_A_HDCPCFG1);
- val &= HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK;
- val |= HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE;
- hdmi_writeb(hdmi, val, HDMI_A_HDCPCFG1);
+ hdmi_modb(hdmi, HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE,
+ HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK, HDMI_A_HDCPCFG1);
}
static void hdmi_config_AVI(struct imx_hdmi *hdmi)
@@ -1317,11 +1177,7 @@ static void imx_hdmi_enable_video_path(struct imx_hdmi *hdmi)
static void hdmi_enable_audio_clk(struct imx_hdmi *hdmi)
{
- u8 clkdis;
-
- clkdis = hdmi_readb(hdmi, HDMI_MC_CLKDIS);
- clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE;
- hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+ hdmi_modb(hdmi, 0, HDMI_MC_CLKDIS_AUDCLK_DISABLE, HDMI_MC_CLKDIS);
}
/* Workaround to clear the overflow condition */
@@ -1456,9 +1312,6 @@ static int imx_hdmi_fb_registered(struct imx_hdmi *hdmi)
/* Clear Hotplug interrupts */
hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
- /* Unmute interrupts */
- hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
-
return 0;
}
@@ -1527,12 +1380,9 @@ static void imx_hdmi_poweroff(struct imx_hdmi *hdmi)
static enum drm_connector_status imx_hdmi_connector_detect(struct drm_connector
*connector, bool force)
{
- /* FIXME */
- return connector_status_connected;
-}
-
-static void imx_hdmi_connector_destroy(struct drm_connector *connector)
-{
+ struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
+ connector);
+ return hdmi->connector_status;
}
static int imx_hdmi_connector_get_modes(struct drm_connector *connector)
@@ -1560,13 +1410,6 @@ static int imx_hdmi_connector_get_modes(struct drm_connector *connector)
return 0;
}
-static int imx_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
-
- return MODE_OK;
-}
-
static struct drm_encoder *imx_hdmi_connector_best_encoder(struct drm_connector
*connector)
{
@@ -1614,28 +1457,21 @@ static void imx_hdmi_encoder_prepare(struct drm_encoder *encoder)
struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
imx_hdmi_poweroff(hdmi);
- imx_drm_crtc_panel_format(encoder->crtc, DRM_MODE_ENCODER_NONE,
- V4L2_PIX_FMT_RGB24);
+ imx_drm_panel_format(encoder, V4L2_PIX_FMT_RGB24);
}
static void imx_hdmi_encoder_commit(struct drm_encoder *encoder)
{
struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
- int mux = imx_drm_encoder_get_mux_id(hdmi->imx_drm_encoder,
- encoder->crtc);
+ int mux = imx_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder);
imx_hdmi_set_ipu_di_mux(hdmi, mux);
imx_hdmi_poweron(hdmi);
}
-static void imx_hdmi_encoder_destroy(struct drm_encoder *encoder)
-{
- return;
-}
-
static struct drm_encoder_funcs imx_hdmi_encoder_funcs = {
- .destroy = imx_hdmi_encoder_destroy,
+ .destroy = imx_drm_encoder_destroy,
};
static struct drm_encoder_helper_funcs imx_hdmi_encoder_helper_funcs = {
@@ -1651,21 +1487,32 @@ static struct drm_connector_funcs imx_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_hdmi_connector_detect,
- .destroy = imx_hdmi_connector_destroy,
+ .destroy = imx_drm_connector_destroy,
};
static struct drm_connector_helper_funcs imx_hdmi_connector_helper_funcs = {
.get_modes = imx_hdmi_connector_get_modes,
- .mode_valid = imx_hdmi_connector_mode_valid,
+ .mode_valid = imx_drm_connector_mode_valid,
.best_encoder = imx_hdmi_connector_best_encoder,
};
+static irqreturn_t imx_hdmi_hardirq(int irq, void *dev_id)
+{
+ struct imx_hdmi *hdmi = dev_id;
+ u8 intr_stat;
+
+ intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
+ if (intr_stat)
+ hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
+
+ return intr_stat ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
static irqreturn_t imx_hdmi_irq(int irq, void *dev_id)
{
struct imx_hdmi *hdmi = dev_id;
u8 intr_stat;
u8 phy_int_pol;
- u8 val;
intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
@@ -1675,55 +1522,47 @@ static irqreturn_t imx_hdmi_irq(int irq, void *dev_id)
if (phy_int_pol & HDMI_PHY_HPD) {
dev_dbg(hdmi->dev, "EVENT=plugin\n");
- val = hdmi_readb(hdmi, HDMI_PHY_POL0);
- val &= ~HDMI_PHY_HPD;
- hdmi_writeb(hdmi, val, HDMI_PHY_POL0);
+ hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0);
+ hdmi->connector_status = connector_status_connected;
imx_hdmi_poweron(hdmi);
} else {
dev_dbg(hdmi->dev, "EVENT=plugout\n");
- val = hdmi_readb(hdmi, HDMI_PHY_POL0);
- val |= HDMI_PHY_HPD;
- hdmi_writeb(hdmi, val, HDMI_PHY_POL0);
+ hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD,
+ HDMI_PHY_POL0);
+ hdmi->connector_status = connector_status_disconnected;
imx_hdmi_poweroff(hdmi);
}
+ drm_helper_hpd_irq_event(hdmi->connector.dev);
}
hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
+ hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
return IRQ_HANDLED;
}
-static int imx_hdmi_register(struct imx_hdmi *hdmi)
+static int imx_hdmi_register(struct drm_device *drm, struct imx_hdmi *hdmi)
{
int ret;
- hdmi->connector.funcs = &imx_hdmi_connector_funcs;
- hdmi->encoder.funcs = &imx_hdmi_encoder_funcs;
+ ret = imx_drm_encoder_parse_of(drm, &hdmi->encoder,
+ hdmi->dev->of_node);
+ if (ret)
+ return ret;
- hdmi->encoder.encoder_type = DRM_MODE_ENCODER_TMDS;
- hdmi->connector.connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
drm_encoder_helper_add(&hdmi->encoder, &imx_hdmi_encoder_helper_funcs);
- ret = imx_drm_add_encoder(&hdmi->encoder, &hdmi->imx_drm_encoder,
- THIS_MODULE);
- if (ret) {
- dev_err(hdmi->dev, "adding encoder failed: %d\n", ret);
- return ret;
- }
+ drm_encoder_init(drm, &hdmi->encoder, &imx_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
drm_connector_helper_add(&hdmi->connector,
&imx_hdmi_connector_helper_funcs);
-
- ret = imx_drm_add_connector(&hdmi->connector,
- &hdmi->imx_drm_connector, THIS_MODULE);
- if (ret) {
- imx_drm_remove_encoder(hdmi->imx_drm_encoder);
- dev_err(hdmi->dev, "adding connector failed: %d\n", ret);
- return ret;
- }
+ drm_connector_init(drm, &hdmi->connector, &imx_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
hdmi->connector.encoder = &hdmi->encoder;
@@ -1750,28 +1589,33 @@ static const struct of_device_id imx_hdmi_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids);
-static int imx_hdmi_platform_probe(struct platform_device *pdev)
+static int imx_hdmi_bind(struct device *dev, struct device *master, void *data)
{
+ struct platform_device *pdev = to_platform_device(dev);
const struct of_device_id *of_id =
- of_match_device(imx_hdmi_dt_ids, &pdev->dev);
- struct device_node *np = pdev->dev.of_node;
+ of_match_device(imx_hdmi_dt_ids, dev);
+ struct drm_device *drm = data;
+ struct device_node *np = dev->of_node;
struct device_node *ddc_node;
struct imx_hdmi *hdmi;
struct resource *iores;
int ret, irq;
- hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
- hdmi->dev = &pdev->dev;
+ hdmi->dev = dev;
+ hdmi->connector_status = connector_status_disconnected;
+ hdmi->sample_rate = 48000;
+ hdmi->ratio = 100;
if (of_id) {
const struct platform_device_id *device_id = of_id->data;
hdmi->dev_type = device_id->driver_data;
}
- ddc_node = of_parse_phandle(np, "ddc", 0);
+ ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
if (ddc_node) {
hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
if (!hdmi->ddc)
@@ -1786,13 +1630,14 @@ static int imx_hdmi_platform_probe(struct platform_device *pdev)
if (irq < 0)
return -EINVAL;
- ret = devm_request_irq(&pdev->dev, irq, imx_hdmi_irq, 0,
- dev_name(&pdev->dev), hdmi);
+ ret = devm_request_threaded_irq(dev, irq, imx_hdmi_hardirq,
+ imx_hdmi_irq, IRQF_SHARED,
+ dev_name(dev), hdmi);
if (ret)
return ret;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->regs = devm_ioremap_resource(&pdev->dev, iores);
+ hdmi->regs = devm_ioremap_resource(dev, iores);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
@@ -1831,7 +1676,7 @@ static int imx_hdmi_platform_probe(struct platform_device *pdev)
}
/* Product and revision IDs */
- dev_info(&pdev->dev,
+ dev_info(dev,
"Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n",
hdmi_readb(hdmi, HDMI_DESIGN_ID),
hdmi_readb(hdmi, HDMI_REVISION_ID),
@@ -1859,13 +1704,14 @@ static int imx_hdmi_platform_probe(struct platform_device *pdev)
if (ret)
goto err_iahb;
- ret = imx_hdmi_register(hdmi);
+ ret = imx_hdmi_register(drm, hdmi);
if (ret)
goto err_iahb;
- imx_drm_encoder_add_possible_crtcs(hdmi->imx_drm_encoder, np);
+ /* Unmute interrupts */
+ hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
- platform_set_drvdata(pdev, hdmi);
+ dev_set_drvdata(dev, hdmi);
return 0;
@@ -1877,20 +1723,35 @@ err_isfr:
return ret;
}
-static int imx_hdmi_platform_remove(struct platform_device *pdev)
+static void imx_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
{
- struct imx_hdmi *hdmi = platform_get_drvdata(pdev);
- struct drm_connector *connector = &hdmi->connector;
- struct drm_encoder *encoder = &hdmi->encoder;
+ struct imx_hdmi *hdmi = dev_get_drvdata(dev);
- drm_mode_connector_detach_encoder(connector, encoder);
- imx_drm_remove_connector(hdmi->imx_drm_connector);
- imx_drm_remove_encoder(hdmi->imx_drm_encoder);
+ /* Disable all interrupts */
+ hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
+
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
i2c_put_adapter(hdmi->ddc);
+}
+
+static const struct component_ops hdmi_ops = {
+ .bind = imx_hdmi_bind,
+ .unbind = imx_hdmi_unbind,
+};
+
+static int imx_hdmi_platform_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &hdmi_ops);
+}
+static int imx_hdmi_platform_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &hdmi_ops);
return 0;
}
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c
index 7e593296ac47..fe4c1ef4e7a5 100644
--- a/drivers/staging/imx-drm/imx-ldb.c
+++ b/drivers/staging/imx-drm/imx-ldb.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/clk.h>
+#include <linux/component.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
@@ -58,9 +59,8 @@ struct imx_ldb;
struct imx_ldb_channel {
struct imx_ldb *ldb;
struct drm_connector connector;
- struct imx_drm_connector *imx_drm_connector;
struct drm_encoder encoder;
- struct imx_drm_encoder *imx_drm_encoder;
+ struct device_node *child;
int chno;
void *edid;
int edid_len;
@@ -91,11 +91,6 @@ static enum drm_connector_status imx_ldb_connector_detect(
return connector_status_connected;
}
-static void imx_ldb_connector_destroy(struct drm_connector *connector)
-{
- /* do not free here */
-}
-
static int imx_ldb_connector_get_modes(struct drm_connector *connector)
{
struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
@@ -111,6 +106,8 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode;
mode = drm_mode_create(connector->dev);
+ if (!mode)
+ return -EINVAL;
drm_mode_copy(mode, &imx_ldb_ch->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
@@ -120,12 +117,6 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
return num_modes;
}
-static int imx_ldb_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return 0;
-}
-
static struct drm_encoder *imx_ldb_connector_best_encoder(
struct drm_connector *connector)
{
@@ -168,7 +159,9 @@ static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
/* set display clock mux to LDB input clock */
ret = clk_set_parent(ldb->clk_sel[mux], ldb->clk[chno]);
if (ret)
- dev_err(ldb->dev, "unable to set di%d parent clock to ldb_di%d\n", mux, chno);
+ dev_err(ldb->dev,
+ "unable to set di%d parent clock to ldb_di%d\n", mux,
+ chno);
}
static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
@@ -179,8 +172,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
u32 pixel_fmt;
unsigned long serial_clk;
unsigned long di_clk = mode->clock * 1000;
- int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->imx_drm_encoder,
- encoder->crtc);
+ int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
/* dual channel LVDS mode */
@@ -189,7 +181,8 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
} else {
serial_clk = 7000UL * mode->clock;
- imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, di_clk);
+ imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
+ di_clk);
}
switch (imx_ldb_ch->chno) {
@@ -207,8 +200,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
pixel_fmt = V4L2_PIX_FMT_RGB24;
}
- imx_drm_crtc_panel_format(encoder->crtc, DRM_MODE_ENCODER_LVDS,
- pixel_fmt);
+ imx_drm_panel_format(encoder, pixel_fmt);
}
static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
@@ -216,8 +208,7 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
- int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->imx_drm_encoder,
- encoder->crtc);
+ int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
if (dual) {
clk_prepare_enable(ldb->clk[0]);
@@ -316,26 +307,21 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
}
}
-static void imx_ldb_encoder_destroy(struct drm_encoder *encoder)
-{
- /* do not free here */
-}
-
static struct drm_connector_funcs imx_ldb_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_ldb_connector_detect,
- .destroy = imx_ldb_connector_destroy,
+ .destroy = imx_drm_connector_destroy,
};
static struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
.get_modes = imx_ldb_connector_get_modes,
.best_encoder = imx_ldb_connector_best_encoder,
- .mode_valid = imx_ldb_connector_mode_valid,
+ .mode_valid = imx_drm_connector_mode_valid,
};
static struct drm_encoder_funcs imx_ldb_encoder_funcs = {
- .destroy = imx_ldb_encoder_destroy,
+ .destroy = imx_drm_encoder_destroy,
};
static struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
@@ -351,56 +337,47 @@ static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno)
{
char clkname[16];
- sprintf(clkname, "di%d", chno);
+ snprintf(clkname, sizeof(clkname), "di%d", chno);
ldb->clk[chno] = devm_clk_get(ldb->dev, clkname);
if (IS_ERR(ldb->clk[chno]))
return PTR_ERR(ldb->clk[chno]);
- sprintf(clkname, "di%d_pll", chno);
+ snprintf(clkname, sizeof(clkname), "di%d_pll", chno);
ldb->clk_pll[chno] = devm_clk_get(ldb->dev, clkname);
return PTR_ERR_OR_ZERO(ldb->clk_pll[chno]);
}
-static int imx_ldb_register(struct imx_ldb_channel *imx_ldb_ch)
+static int imx_ldb_register(struct drm_device *drm,
+ struct imx_ldb_channel *imx_ldb_ch)
{
- int ret;
struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int ret;
+
+ ret = imx_drm_encoder_parse_of(drm, &imx_ldb_ch->encoder,
+ imx_ldb_ch->child);
+ if (ret)
+ return ret;
ret = imx_ldb_get_clk(ldb, imx_ldb_ch->chno);
if (ret)
return ret;
+
if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
- ret |= imx_ldb_get_clk(ldb, 1);
+ ret = imx_ldb_get_clk(ldb, 1);
if (ret)
return ret;
}
- imx_ldb_ch->connector.funcs = &imx_ldb_connector_funcs;
- imx_ldb_ch->encoder.funcs = &imx_ldb_encoder_funcs;
-
- imx_ldb_ch->encoder.encoder_type = DRM_MODE_ENCODER_LVDS;
- imx_ldb_ch->connector.connector_type = DRM_MODE_CONNECTOR_LVDS;
-
drm_encoder_helper_add(&imx_ldb_ch->encoder,
&imx_ldb_encoder_helper_funcs);
- ret = imx_drm_add_encoder(&imx_ldb_ch->encoder,
- &imx_ldb_ch->imx_drm_encoder, THIS_MODULE);
- if (ret) {
- dev_err(ldb->dev, "adding encoder failed with %d\n", ret);
- return ret;
- }
+ drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
drm_connector_helper_add(&imx_ldb_ch->connector,
&imx_ldb_connector_helper_funcs);
-
- ret = imx_drm_add_connector(&imx_ldb_ch->connector,
- &imx_ldb_ch->imx_drm_connector, THIS_MODULE);
- if (ret) {
- imx_drm_remove_encoder(imx_ldb_ch->imx_drm_encoder);
- dev_err(ldb->dev, "adding connector failed with %d\n", ret);
- return ret;
- }
+ drm_connector_init(drm, &imx_ldb_ch->connector,
+ &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
&imx_ldb_ch->encoder);
@@ -459,11 +436,12 @@ static const struct of_device_id imx_ldb_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids);
-static int imx_ldb_probe(struct platform_device *pdev)
+static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
{
- struct device_node *np = pdev->dev.of_node;
+ struct drm_device *drm = data;
+ struct device_node *np = dev->of_node;
const struct of_device_id *of_id =
- of_match_device(imx_ldb_dt_ids, &pdev->dev);
+ of_match_device(imx_ldb_dt_ids, dev);
struct device_node *child;
const u8 *edidp;
struct imx_ldb *imx_ldb;
@@ -473,17 +451,17 @@ static int imx_ldb_probe(struct platform_device *pdev)
int ret;
int i;
- imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL);
+ imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL);
if (!imx_ldb)
return -ENOMEM;
imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
if (IS_ERR(imx_ldb->regmap)) {
- dev_err(&pdev->dev, "failed to get parent regmap\n");
+ dev_err(dev, "failed to get parent regmap\n");
return PTR_ERR(imx_ldb->regmap);
}
- imx_ldb->dev = &pdev->dev;
+ imx_ldb->dev = dev;
if (of_id)
imx_ldb->lvds_mux = of_id->data;
@@ -521,7 +499,7 @@ static int imx_ldb_probe(struct platform_device *pdev)
return -EINVAL;
if (dual && i > 0) {
- dev_warn(&pdev->dev, "dual-channel mode, ignoring second output\n");
+ dev_warn(dev, "dual-channel mode, ignoring second output\n");
continue;
}
@@ -531,6 +509,7 @@ static int imx_ldb_probe(struct platform_device *pdev)
channel = &imx_ldb->channel[i];
channel->ldb = imx_ldb;
channel->chno = i;
+ channel->child = child;
edidp = of_get_property(child, "edid", &channel->edid_len);
if (edidp) {
@@ -553,54 +532,67 @@ static int imx_ldb_probe(struct platform_device *pdev)
case LVDS_BIT_MAP_SPWG:
if (datawidth == 24) {
if (i == 0 || dual)
- imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
+ imx_ldb->ldb_ctrl |=
+ LDB_DATA_WIDTH_CH0_24;
if (i == 1 || dual)
- imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
+ imx_ldb->ldb_ctrl |=
+ LDB_DATA_WIDTH_CH1_24;
}
break;
case LVDS_BIT_MAP_JEIDA:
if (datawidth == 18) {
- dev_err(&pdev->dev, "JEIDA standard only supported in 24 bit\n");
+ dev_err(dev, "JEIDA standard only supported in 24 bit\n");
return -EINVAL;
}
if (i == 0 || dual)
- imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 | LDB_BIT_MAP_CH0_JEIDA;
+ imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
+ LDB_BIT_MAP_CH0_JEIDA;
if (i == 1 || dual)
- imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 | LDB_BIT_MAP_CH1_JEIDA;
+ imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
+ LDB_BIT_MAP_CH1_JEIDA;
break;
default:
- dev_err(&pdev->dev, "data mapping not specified or invalid\n");
+ dev_err(dev, "data mapping not specified or invalid\n");
return -EINVAL;
}
- ret = imx_ldb_register(channel);
+ ret = imx_ldb_register(drm, channel);
if (ret)
return ret;
-
- imx_drm_encoder_add_possible_crtcs(channel->imx_drm_encoder, child);
}
- platform_set_drvdata(pdev, imx_ldb);
+ dev_set_drvdata(dev, imx_ldb);
return 0;
}
-static int imx_ldb_remove(struct platform_device *pdev)
+static void imx_ldb_unbind(struct device *dev, struct device *master,
+ void *data)
{
- struct imx_ldb *imx_ldb = platform_get_drvdata(pdev);
+ struct imx_ldb *imx_ldb = dev_get_drvdata(dev);
int i;
for (i = 0; i < 2; i++) {
struct imx_ldb_channel *channel = &imx_ldb->channel[i];
- struct drm_connector *connector = &channel->connector;
- struct drm_encoder *encoder = &channel->encoder;
-
- drm_mode_connector_detach_encoder(connector, encoder);
- imx_drm_remove_connector(channel->imx_drm_connector);
- imx_drm_remove_encoder(channel->imx_drm_encoder);
+ channel->connector.funcs->destroy(&channel->connector);
+ channel->encoder.funcs->destroy(&channel->encoder);
}
+}
+
+static const struct component_ops imx_ldb_ops = {
+ .bind = imx_ldb_bind,
+ .unbind = imx_ldb_unbind,
+};
+static int imx_ldb_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &imx_ldb_ops);
+}
+
+static int imx_ldb_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &imx_ldb_ops);
return 0;
}
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 9abc7ca8b6cf..575533f4fd64 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/component.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
@@ -30,6 +31,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include "ipu-v3/imx-ipu-v3.h"
#include "imx-drm.h"
#define TVE_COM_CONF_REG 0x00
@@ -110,9 +112,7 @@ enum {
struct imx_tve {
struct drm_connector connector;
- struct imx_drm_connector *imx_drm_connector;
struct drm_encoder encoder;
- struct imx_drm_encoder *imx_drm_encoder;
struct device *dev;
spinlock_t lock; /* register lock */
bool enabled;
@@ -225,11 +225,6 @@ static enum drm_connector_status imx_tve_connector_detect(
return connector_status_connected;
}
-static void imx_tve_connector_destroy(struct drm_connector *connector)
-{
- /* do not free here */
-}
-
static int imx_tve_connector_get_modes(struct drm_connector *connector)
{
struct imx_tve *tve = con_to_tve(connector);
@@ -254,6 +249,11 @@ static int imx_tve_connector_mode_valid(struct drm_connector *connector,
{
struct imx_tve *tve = con_to_tve(connector);
unsigned long rate;
+ int ret;
+
+ ret = imx_drm_connector_mode_valid(connector, mode);
+ if (ret != MODE_OK)
+ return ret;
/* pixel clock with 2x oversampling */
rate = clk_round_rate(tve->clk, 2000UL * mode->clock) / 2000;
@@ -305,13 +305,11 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
switch (tve->mode) {
case TVE_MODE_VGA:
- imx_drm_crtc_panel_format_pins(encoder->crtc,
- DRM_MODE_ENCODER_DAC, IPU_PIX_FMT_GBR24,
+ imx_drm_panel_format_pins(encoder, IPU_PIX_FMT_GBR24,
tve->hsync_pin, tve->vsync_pin);
break;
case TVE_MODE_TVOUT:
- imx_drm_crtc_panel_format(encoder->crtc, DRM_MODE_ENCODER_TVDAC,
- V4L2_PIX_FMT_YUV444);
+ imx_drm_panel_format(encoder, V4L2_PIX_FMT_YUV444);
break;
}
}
@@ -364,16 +362,11 @@ static void imx_tve_encoder_disable(struct drm_encoder *encoder)
tve_disable(tve);
}
-static void imx_tve_encoder_destroy(struct drm_encoder *encoder)
-{
- /* do not free here */
-}
-
static struct drm_connector_funcs imx_tve_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_tve_connector_detect,
- .destroy = imx_tve_connector_destroy,
+ .destroy = imx_drm_connector_destroy,
};
static struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
@@ -383,7 +376,7 @@ static struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
};
static struct drm_encoder_funcs imx_tve_encoder_funcs = {
- .destroy = imx_tve_encoder_destroy,
+ .destroy = imx_drm_encoder_destroy,
};
static struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
@@ -503,34 +496,27 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem *base)
return 0;
}
-static int imx_tve_register(struct imx_tve *tve)
+static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
{
+ int encoder_type;
int ret;
- tve->connector.funcs = &imx_tve_connector_funcs;
- tve->encoder.funcs = &imx_tve_encoder_funcs;
+ encoder_type = tve->mode == TVE_MODE_VGA ?
+ DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
- tve->encoder.encoder_type = DRM_MODE_ENCODER_NONE;
- tve->connector.connector_type = DRM_MODE_CONNECTOR_VGA;
+ ret = imx_drm_encoder_parse_of(drm, &tve->encoder,
+ tve->dev->of_node);
+ if (ret)
+ return ret;
drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
- ret = imx_drm_add_encoder(&tve->encoder, &tve->imx_drm_encoder,
- THIS_MODULE);
- if (ret) {
- dev_err(tve->dev, "adding encoder failed with %d\n", ret);
- return ret;
- }
+ drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs,
+ encoder_type);
drm_connector_helper_add(&tve->connector,
&imx_tve_connector_helper_funcs);
-
- ret = imx_drm_add_connector(&tve->connector,
- &tve->imx_drm_connector, THIS_MODULE);
- if (ret) {
- imx_drm_remove_encoder(tve->imx_drm_encoder);
- dev_err(tve->dev, "adding connector failed with %d\n", ret);
- return ret;
- }
+ drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
drm_mode_connector_attach_encoder(&tve->connector, &tve->encoder);
@@ -576,9 +562,11 @@ static const int of_get_tve_mode(struct device_node *np)
return -EINVAL;
}
-static int imx_tve_probe(struct platform_device *pdev)
+static int imx_tve_bind(struct device *dev, struct device *master, void *data)
{
- struct device_node *np = pdev->dev.of_node;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct device_node *np = dev->of_node;
struct device_node *ddc_node;
struct imx_tve *tve;
struct resource *res;
@@ -587,14 +575,14 @@ static int imx_tve_probe(struct platform_device *pdev)
int irq;
int ret;
- tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL);
+ tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL);
if (!tve)
return -ENOMEM;
- tve->dev = &pdev->dev;
+ tve->dev = dev;
spin_lock_init(&tve->lock);
- ddc_node = of_parse_phandle(np, "ddc", 0);
+ ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0);
if (ddc_node) {
tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
@@ -602,7 +590,7 @@ static int imx_tve_probe(struct platform_device *pdev)
tve->mode = of_get_tve_mode(np);
if (tve->mode != TVE_MODE_VGA) {
- dev_err(&pdev->dev, "only VGA mode supported, currently\n");
+ dev_err(dev, "only VGA mode supported, currently\n");
return -EINVAL;
}
@@ -611,7 +599,7 @@ static int imx_tve_probe(struct platform_device *pdev)
&tve->hsync_pin);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to get vsync pin\n");
+ dev_err(dev, "failed to get vsync pin\n");
return ret;
}
@@ -619,40 +607,40 @@ static int imx_tve_probe(struct platform_device *pdev)
&tve->vsync_pin);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to get vsync pin\n");
+ dev_err(dev, "failed to get vsync pin\n");
return ret;
}
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
tve_regmap_config.lock_arg = tve;
- tve->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "tve", base,
+ tve->regmap = devm_regmap_init_mmio_clk(dev, "tve", base,
&tve_regmap_config);
if (IS_ERR(tve->regmap)) {
- dev_err(&pdev->dev, "failed to init regmap: %ld\n",
+ dev_err(dev, "failed to init regmap: %ld\n",
PTR_ERR(tve->regmap));
return PTR_ERR(tve->regmap);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq\n");
+ dev_err(dev, "failed to get irq\n");
return irq;
}
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ ret = devm_request_threaded_irq(dev, irq, NULL,
imx_tve_irq_handler, IRQF_ONESHOT,
"imx-tve", tve);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
+ dev_err(dev, "failed to request irq: %d\n", ret);
return ret;
}
- tve->dac_reg = devm_regulator_get(&pdev->dev, "dac");
+ tve->dac_reg = devm_regulator_get(dev, "dac");
if (!IS_ERR(tve->dac_reg)) {
regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
ret = regulator_enable(tve->dac_reg);
@@ -660,17 +648,17 @@ static int imx_tve_probe(struct platform_device *pdev)
return ret;
}
- tve->clk = devm_clk_get(&pdev->dev, "tve");
+ tve->clk = devm_clk_get(dev, "tve");
if (IS_ERR(tve->clk)) {
- dev_err(&pdev->dev, "failed to get high speed tve clock: %ld\n",
+ dev_err(dev, "failed to get high speed tve clock: %ld\n",
PTR_ERR(tve->clk));
return PTR_ERR(tve->clk);
}
/* this is the IPU DI clock input selector, can be parented to tve_di */
- tve->di_sel_clk = devm_clk_get(&pdev->dev, "di_sel");
+ tve->di_sel_clk = devm_clk_get(dev, "di_sel");
if (IS_ERR(tve->di_sel_clk)) {
- dev_err(&pdev->dev, "failed to get ipu di mux clock: %ld\n",
+ dev_err(dev, "failed to get ipu di mux clock: %ld\n",
PTR_ERR(tve->di_sel_clk));
return PTR_ERR(tve->di_sel_clk);
}
@@ -681,42 +669,51 @@ static int imx_tve_probe(struct platform_device *pdev)
ret = regmap_read(tve->regmap, TVE_COM_CONF_REG, &val);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to read configuration register: %d\n", ret);
+ dev_err(dev, "failed to read configuration register: %d\n", ret);
return ret;
}
if (val != 0x00100000) {
- dev_err(&pdev->dev, "configuration register default value indicates this is not a TVEv2\n");
+ dev_err(dev, "configuration register default value indicates this is not a TVEv2\n");
return -ENODEV;
}
/* disable cable detection for VGA mode */
ret = regmap_write(tve->regmap, TVE_CD_CONT_REG, 0);
- ret = imx_tve_register(tve);
+ ret = imx_tve_register(drm, tve);
if (ret)
return ret;
- ret = imx_drm_encoder_add_possible_crtcs(tve->imx_drm_encoder, np);
-
- platform_set_drvdata(pdev, tve);
+ dev_set_drvdata(dev, tve);
return 0;
}
-static int imx_tve_remove(struct platform_device *pdev)
+static void imx_tve_unbind(struct device *dev, struct device *master,
+ void *data)
{
- struct imx_tve *tve = platform_get_drvdata(pdev);
- struct drm_connector *connector = &tve->connector;
- struct drm_encoder *encoder = &tve->encoder;
+ struct imx_tve *tve = dev_get_drvdata(dev);
- drm_mode_connector_detach_encoder(connector, encoder);
-
- imx_drm_remove_connector(tve->imx_drm_connector);
- imx_drm_remove_encoder(tve->imx_drm_encoder);
+ tve->connector.funcs->destroy(&tve->connector);
+ tve->encoder.funcs->destroy(&tve->encoder);
if (!IS_ERR(tve->dac_reg))
regulator_disable(tve->dac_reg);
+}
+
+static const struct component_ops imx_tve_ops = {
+ .bind = imx_tve_bind,
+ .unbind = imx_tve_unbind,
+};
+static int imx_tve_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &imx_tve_ops);
+}
+
+static int imx_tve_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &imx_tve_ops);
return 0;
}
diff --git a/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h b/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
index 4826b5c0249d..c4d14ead5837 100644
--- a/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
+++ b/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
@@ -25,6 +25,8 @@ enum ipuv3_type {
IPUV3H,
};
+#define IPU_PIX_FMT_GBR24 v4l2_fourcc('G', 'B', 'R', '3')
+
/*
* Bitfield of Display Interface signal polarities.
*/
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c b/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
index d0e3bc3c53e7..d5de8bb5c803 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
@@ -262,7 +262,7 @@ void ipu_dc_disable_channel(struct ipu_dc *dc)
/* Wait for DC triple buffer to empty */
while ((readl(priv->dc_reg + DC_STAT) & val) != val) {
- msleep(2);
+ usleep_range(2000, 20000);
timeout -= 2;
if (timeout <= 0)
break;
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-di.c b/drivers/staging/imx-drm/ipu-v3/ipu-di.c
index 948a49b289ef..82a9ebad697c 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-di.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-di.c
@@ -19,9 +19,6 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include "imx-ipu-v3.h"
#include "ipu-prv.h"
@@ -33,10 +30,7 @@ struct ipu_di {
struct clk *clk_di; /* display input clock */
struct clk *clk_ipu; /* IPU bus clock */
struct clk *clk_di_pixel; /* resulting pixel clock */
- struct clk_hw clk_hw_out;
- char *clk_name;
bool inuse;
- unsigned long clkflags;
struct ipu_soc *ipu;
};
@@ -141,130 +135,6 @@ static inline void ipu_di_write(struct ipu_di *di, u32 value, unsigned offset)
writel(value, di->base + offset);
}
-static int ipu_di_clk_calc_div(unsigned long inrate, unsigned long outrate)
-{
- u64 tmp = inrate;
- int div;
-
- tmp *= 16;
-
- do_div(tmp, outrate);
-
- div = tmp;
-
- if (div < 0x10)
- div = 0x10;
-
-#ifdef WTF_IS_THIS
- /*
- * Freescale has this in their Kernel. It is neither clear what
- * it does nor why it does it
- */
- if (div & 0x10)
- div &= ~0x7;
- else {
- /* Round up divider if it gets us closer to desired pix clk */
- if ((div & 0xC) == 0xC) {
- div += 0x10;
- div &= ~0xF;
- }
- }
-#endif
- return div;
-}
-
-static unsigned long clk_di_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct ipu_di *di = container_of(hw, struct ipu_di, clk_hw_out);
- unsigned long outrate;
- u32 div = ipu_di_read(di, DI_BS_CLKGEN0);
-
- if (div < 0x10)
- div = 0x10;
-
- outrate = (parent_rate / div) * 16;
-
- return outrate;
-}
-
-static long clk_di_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct ipu_di *di = container_of(hw, struct ipu_di, clk_hw_out);
- unsigned long outrate;
- int div;
- u32 val;
-
- div = ipu_di_clk_calc_div(*prate, rate);
-
- outrate = (*prate / div) * 16;
-
- val = ipu_di_read(di, DI_GENERAL);
-
- if (!(val & DI_GEN_DI_CLK_EXT) && outrate > *prate / 2)
- outrate = *prate / 2;
-
- dev_dbg(di->ipu->dev,
- "%s: inrate: %ld div: 0x%08x outrate: %ld wanted: %ld\n",
- __func__, *prate, div, outrate, rate);
-
- return outrate;
-}
-
-static int clk_di_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct ipu_di *di = container_of(hw, struct ipu_di, clk_hw_out);
- int div;
- u32 clkgen0;
-
- clkgen0 = ipu_di_read(di, DI_BS_CLKGEN0) & ~0xfff;
-
- div = ipu_di_clk_calc_div(parent_rate, rate);
-
- ipu_di_write(di, clkgen0 | div, DI_BS_CLKGEN0);
-
- dev_dbg(di->ipu->dev, "%s: inrate: %ld desired: %ld div: 0x%08x\n",
- __func__, parent_rate, rate, div);
- return 0;
-}
-
-static u8 clk_di_get_parent(struct clk_hw *hw)
-{
- struct ipu_di *di = container_of(hw, struct ipu_di, clk_hw_out);
- u32 val;
-
- val = ipu_di_read(di, DI_GENERAL);
-
- return val & DI_GEN_DI_CLK_EXT ? 1 : 0;
-}
-
-static int clk_di_set_parent(struct clk_hw *hw, u8 index)
-{
- struct ipu_di *di = container_of(hw, struct ipu_di, clk_hw_out);
- u32 val;
-
- val = ipu_di_read(di, DI_GENERAL);
-
- if (index)
- val |= DI_GEN_DI_CLK_EXT;
- else
- val &= ~DI_GEN_DI_CLK_EXT;
-
- ipu_di_write(di, val, DI_GENERAL);
-
- return 0;
-}
-
-static struct clk_ops clk_di_ops = {
- .round_rate = clk_di_round_rate,
- .set_rate = clk_di_set_rate,
- .recalc_rate = clk_di_recalc_rate,
- .set_parent = clk_di_set_parent,
- .get_parent = clk_di_get_parent,
-};
-
static void ipu_di_data_wave_config(struct ipu_di *di,
int wave_gen,
int access_size, int component_size)
@@ -528,15 +398,125 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
ipu_di_sync_config(di, cfg_vga, 0, ARRAY_SIZE(cfg_vga));
}
+static void ipu_di_config_clock(struct ipu_di *di,
+ const struct ipu_di_signal_cfg *sig)
+{
+ struct clk *clk;
+ unsigned clkgen0;
+ uint32_t val;
+
+ if (sig->clkflags & IPU_DI_CLKMODE_EXT) {
+ /*
+ * CLKMODE_EXT means we must use the DI clock: this is
+ * needed for things like LVDS which needs to feed the
+ * DI and LDB with the same pixel clock.
+ */
+ clk = di->clk_di;
+
+ if (sig->clkflags & IPU_DI_CLKMODE_SYNC) {
+ /*
+ * CLKMODE_SYNC means that we want the DI to be
+ * clocked at the same rate as the parent clock.
+ * This is needed (eg) for LDB which needs to be
+ * fed with the same pixel clock. We assume that
+ * the LDB clock has already been set correctly.
+ */
+ clkgen0 = 1 << 4;
+ } else {
+ /*
+ * We can use the divider. We should really have
+ * a flag here indicating whether the bridge can
+ * cope with a fractional divider or not. For the
+ * time being, let's go for simplicitly and
+ * reliability.
+ */
+ unsigned long in_rate;
+ unsigned div;
+
+ clk_set_rate(clk, sig->pixelclock);
+
+ in_rate = clk_get_rate(clk);
+ div = (in_rate + sig->pixelclock / 2) / sig->pixelclock;
+ if (div == 0)
+ div = 1;
+
+ clkgen0 = div << 4;
+ }
+ } else {
+ /*
+ * For other interfaces, we can arbitarily select between
+ * the DI specific clock and the internal IPU clock. See
+ * DI_GENERAL bit 20. We select the IPU clock if it can
+ * give us a clock rate within 1% of the requested frequency,
+ * otherwise we use the DI clock.
+ */
+ unsigned long rate, clkrate;
+ unsigned div, error;
+
+ clkrate = clk_get_rate(di->clk_ipu);
+ div = (clkrate + sig->pixelclock / 2) / sig->pixelclock;
+ rate = clkrate / div;
+
+ error = rate / (sig->pixelclock / 1000);
+
+ dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
+ rate, div, (signed)(error - 1000) / 10, error % 10);
+
+ /* Allow a 1% error */
+ if (error < 1010 && error >= 990) {
+ clk = di->clk_ipu;
+
+ clkgen0 = div << 4;
+ } else {
+ unsigned long in_rate;
+ unsigned div;
+
+ clk = di->clk_di;
+
+ clk_set_rate(clk, sig->pixelclock);
+
+ in_rate = clk_get_rate(clk);
+ div = (in_rate + sig->pixelclock / 2) / sig->pixelclock;
+ if (div == 0)
+ div = 1;
+
+ clkgen0 = div << 4;
+ }
+ }
+
+ di->clk_di_pixel = clk;
+
+ /* Set the divider */
+ ipu_di_write(di, clkgen0, DI_BS_CLKGEN0);
+
+ /*
+ * Set the high/low periods. Bits 24:16 give us the falling edge,
+ * and bits 8:0 give the rising edge. LSB is fraction, and is
+ * based on the divider above. We want a 50% duty cycle, so set
+ * the falling edge to be half the divider.
+ */
+ ipu_di_write(di, (clkgen0 >> 4) << 16, DI_BS_CLKGEN1);
+
+ /* Finally select the input clock */
+ val = ipu_di_read(di, DI_GENERAL) & ~DI_GEN_DI_CLK_EXT;
+ if (clk == di->clk_di)
+ val |= DI_GEN_DI_CLK_EXT;
+ ipu_di_write(di, val, DI_GENERAL);
+
+ dev_dbg(di->ipu->dev, "Want %luHz IPU %luHz DI %luHz using %s, %luHz\n",
+ sig->pixelclock,
+ clk_get_rate(di->clk_ipu),
+ clk_get_rate(di->clk_di),
+ clk == di->clk_di ? "DI" : "IPU",
+ clk_get_rate(di->clk_di_pixel) / (clkgen0 >> 4));
+}
+
int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
{
u32 reg;
u32 di_gen, vsync_cnt;
u32 div;
u32 h_total, v_total;
- int ret;
- unsigned long round;
- struct clk *parent;
dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
di->id, sig->width, sig->height);
@@ -544,33 +524,20 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
if ((sig->v_sync_width == 0) || (sig->h_sync_width == 0))
return -EINVAL;
- if (sig->clkflags & IPU_DI_CLKMODE_EXT)
- parent = di->clk_di;
- else
- parent = di->clk_ipu;
-
- ret = clk_set_parent(di->clk_di_pixel, parent);
- if (ret) {
- dev_err(di->ipu->dev,
- "setting pixel clock to parent %s failed with %d\n",
- __clk_get_name(parent), ret);
- return ret;
- }
-
- if (sig->clkflags & IPU_DI_CLKMODE_SYNC)
- round = clk_get_rate(parent);
- else
- round = clk_round_rate(di->clk_di_pixel, sig->pixelclock);
-
- ret = clk_set_rate(di->clk_di_pixel, round);
-
h_total = sig->width + sig->h_sync_width + sig->h_start_width +
sig->h_end_width;
v_total = sig->height + sig->v_sync_width + sig->v_start_width +
sig->v_end_width;
+ dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
+ clk_get_rate(di->clk_ipu),
+ clk_get_rate(di->clk_di),
+ sig->pixelclock);
+
mutex_lock(&di_mutex);
+ ipu_di_config_clock(di, sig);
+
div = ipu_di_read(di, DI_BS_CLKGEN0) & 0xfff;
div = div / 16; /* Now divider is integer portion */
@@ -654,7 +621,11 @@ EXPORT_SYMBOL_GPL(ipu_di_init_sync_panel);
int ipu_di_enable(struct ipu_di *di)
{
- int ret = clk_prepare_enable(di->clk_di_pixel);
+ int ret;
+
+ WARN_ON(IS_ERR(di->clk_di_pixel));
+
+ ret = clk_prepare_enable(di->clk_di_pixel);
if (ret)
return ret;
@@ -666,6 +637,8 @@ EXPORT_SYMBOL_GPL(ipu_di_enable);
int ipu_di_disable(struct ipu_di *di)
{
+ WARN_ON(IS_ERR(di->clk_di_pixel));
+
ipu_module_disable(di->ipu, di->module);
clk_disable_unprepare(di->clk_di_pixel);
@@ -721,13 +694,6 @@ int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
u32 module, struct clk *clk_ipu)
{
struct ipu_di *di;
- int ret;
- const char *di_parent[2];
- struct clk_init_data init = {
- .ops = &clk_di_ops,
- .num_parents = 2,
- .flags = 0,
- };
if (id > 1)
return -ENODEV;
@@ -749,45 +715,16 @@ int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
if (!di->base)
return -ENOMEM;
- di_parent[0] = __clk_get_name(di->clk_ipu);
- di_parent[1] = __clk_get_name(di->clk_di);
-
ipu_di_write(di, 0x10, DI_BS_CLKGEN0);
- init.parent_names = (const char **)&di_parent;
- di->clk_name = kasprintf(GFP_KERNEL, "%s_di%d_pixel",
- dev_name(dev), id);
- if (!di->clk_name)
- return -ENOMEM;
-
- init.name = di->clk_name;
-
- di->clk_hw_out.init = &init;
- di->clk_di_pixel = clk_register(dev, &di->clk_hw_out);
-
- if (IS_ERR(di->clk_di_pixel)) {
- ret = PTR_ERR(di->clk_di_pixel);
- goto failed_clk_register;
- }
-
dev_dbg(dev, "DI%d base: 0x%08lx remapped to %p\n",
id, base, di->base);
di->inuse = false;
di->ipu = ipu;
return 0;
-
-failed_clk_register:
-
- kfree(di->clk_name);
-
- return ret;
}
void ipu_di_exit(struct ipu_soc *ipu, int id)
{
- struct ipu_di *di = ipu->di_priv[id];
-
- clk_unregister(di->clk_di_pixel);
- kfree(di->clk_name);
}
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c b/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
index 98070dd8c920..45213017fa4b 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
@@ -161,9 +161,6 @@ static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots,
"dmfc: using %d slots starting from segment %d for IPU channel %d\n",
slots, segment, dmfc->data->ipu_channel);
- if (!dmfc)
- return -EINVAL;
-
switch (slots) {
case 1:
field = DMFC_FIFO_SIZE_64;
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 22be104fbda9..c48f640db006 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -17,6 +17,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
+#include <linux/component.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/device.h>
@@ -120,7 +121,7 @@ static int ipu_page_flip(struct drm_crtc *crtc,
ipu_crtc->newfb = fb;
ipu_crtc->page_flip_event = event;
- crtc->fb = fb;
+ crtc->primary->fb = fb;
return 0;
}
@@ -192,7 +193,7 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
- return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode, crtc->fb,
+ return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x, y, mode->hdisplay, mode->vdisplay);
}
@@ -218,7 +219,7 @@ static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
if (ipu_crtc->newfb) {
ipu_crtc->newfb = NULL;
- ipu_plane_set_base(ipu_crtc->plane[0], ipu_crtc->base.fb,
+ ipu_plane_set_base(ipu_crtc->plane[0], ipu_crtc->base.primary->fb,
ipu_crtc->plane[0]->x, ipu_crtc->plane[0]->y);
ipu_crtc_handle_pageflip(ipu_crtc);
}
@@ -284,6 +285,7 @@ static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, u32 encoder_type,
ipu_crtc->di_clkflags = IPU_DI_CLKMODE_SYNC |
IPU_DI_CLKMODE_EXT;
break;
+ case DRM_MODE_ENCODER_TMDS:
case DRM_MODE_ENCODER_NONE:
ipu_crtc->di_clkflags = 0;
break;
@@ -334,7 +336,7 @@ err_out:
}
static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
- struct ipu_client_platformdata *pdata)
+ struct ipu_client_platformdata *pdata, struct drm_device *drm)
{
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
int dp = -EINVAL;
@@ -348,10 +350,8 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
return ret;
}
- ret = imx_drm_add_crtc(&ipu_crtc->base,
- &ipu_crtc->imx_crtc,
- &ipu_crtc_helper_funcs, THIS_MODULE,
- ipu_crtc->dev->parent->of_node, pdata->di);
+ ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
+ &ipu_crtc_helper_funcs, ipu_crtc->dev->of_node);
if (ret) {
dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
goto err_put_resources;
@@ -399,43 +399,96 @@ err_put_resources:
return ret;
}
-static int ipu_drm_probe(struct platform_device *pdev)
+static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent,
+ int port_id)
{
- struct ipu_client_platformdata *pdata = pdev->dev.platform_data;
- struct ipu_crtc *ipu_crtc;
- int ret;
+ struct device_node *port;
+ int id, ret;
+
+ port = of_get_child_by_name(parent, "port");
+ while (port) {
+ ret = of_property_read_u32(port, "reg", &id);
+ if (!ret && id == port_id)
+ return port;
+
+ do {
+ port = of_get_next_child(parent, port);
+ if (!port)
+ return NULL;
+ } while (of_node_cmp(port->name, "port"));
+ }
- if (!pdata)
- return -EINVAL;
+ return NULL;
+}
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
+static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
+{
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+ struct drm_device *drm = data;
+ struct ipu_crtc *ipu_crtc;
+ int ret;
- ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL);
+ ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
if (!ipu_crtc)
return -ENOMEM;
- ipu_crtc->dev = &pdev->dev;
+ ipu_crtc->dev = dev;
- ret = ipu_crtc_init(ipu_crtc, pdata);
+ ret = ipu_crtc_init(ipu_crtc, pdata, drm);
if (ret)
return ret;
- platform_set_drvdata(pdev, ipu_crtc);
+ dev_set_drvdata(dev, ipu_crtc);
return 0;
}
-static int ipu_drm_remove(struct platform_device *pdev)
+static void ipu_drm_unbind(struct device *dev, struct device *master,
+ void *data)
{
- struct ipu_crtc *ipu_crtc = platform_get_drvdata(pdev);
+ struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
imx_drm_remove_crtc(ipu_crtc->imx_crtc);
ipu_plane_put_resources(ipu_crtc->plane[0]);
ipu_put_resources(ipu_crtc);
+}
+
+static const struct component_ops ipu_crtc_ops = {
+ .bind = ipu_drm_bind,
+ .unbind = ipu_drm_unbind,
+};
+static int ipu_drm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+ int ret;
+
+ if (!dev->platform_data)
+ return -EINVAL;
+
+ if (!dev->of_node) {
+ /* Associate crtc device with the corresponding DI port node */
+ dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node,
+ pdata->di + 2);
+ if (!dev->of_node) {
+ dev_err(dev, "missing port@%d node in %s\n",
+ pdata->di + 2, dev->parent->of_node->full_name);
+ return -ENODEV;
+ }
+ }
+
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ return component_add(dev, &ipu_crtc_ops);
+}
+
+static int ipu_drm_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &ipu_crtc_ops);
return 0;
}
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c
index 34b642a12f8b..27a8d735dae0 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.c
+++ b/drivers/staging/imx-drm/ipuv3-plane.c
@@ -68,12 +68,12 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
if (!cma_obj) {
- DRM_LOG_KMS("entry is null.\n");
+ DRM_DEBUG_KMS("entry is null.\n");
return -EFAULT;
}
- dev_dbg(ipu_plane->base.dev->dev, "phys = 0x%x, x = %d, y = %d",
- cma_obj->paddr, x, y);
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
+ &cma_obj->paddr, x, y);
cpmem = ipu_get_cpmem(ipu_plane->ipu_ch);
ipu_cpmem_set_stride(cpmem, fb->pitches[0]);
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/staging/imx-drm/parallel-display.c
index 351d61dede00..c60b6c645f42 100644
--- a/drivers/staging/imx-drm/parallel-display.c
+++ b/drivers/staging/imx-drm/parallel-display.c
@@ -18,10 +18,12 @@
* MA 02110-1301, USA.
*/
+#include <linux/component.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
#include <linux/videodev2.h>
#include <video/of_display_timing.h>
@@ -32,15 +34,14 @@
struct imx_parallel_display {
struct drm_connector connector;
- struct imx_drm_connector *imx_drm_connector;
struct drm_encoder encoder;
- struct imx_drm_encoder *imx_drm_encoder;
struct device *dev;
void *edid;
int edid_len;
u32 interface_pix_fmt;
int mode_valid;
struct drm_display_mode mode;
+ struct drm_panel *panel;
};
static enum drm_connector_status imx_pd_connector_detect(
@@ -49,17 +50,19 @@ static enum drm_connector_status imx_pd_connector_detect(
return connector_status_connected;
}
-static void imx_pd_connector_destroy(struct drm_connector *connector)
-{
- /* do not free here */
-}
-
static int imx_pd_connector_get_modes(struct drm_connector *connector)
{
struct imx_parallel_display *imxpd = con_to_imxpd(connector);
struct device_node *np = imxpd->dev->of_node;
int num_modes = 0;
+ if (imxpd->panel && imxpd->panel->funcs &&
+ imxpd->panel->funcs->get_modes) {
+ num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
+ if (num_modes > 0)
+ return num_modes;
+ }
+
if (imxpd->edid) {
drm_mode_connector_update_edid_property(connector, imxpd->edid);
num_modes = drm_add_edid_modes(connector, imxpd->edid);
@@ -67,6 +70,8 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (imxpd->mode_valid) {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
+ if (!mode)
+ return -EINVAL;
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
drm_mode_probed_add(connector, mode);
@@ -75,6 +80,8 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (np) {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
+ if (!mode)
+ return -EINVAL;
of_get_drm_display_mode(np, &imxpd->mode, OF_USE_NATIVE_MODE);
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
@@ -85,12 +92,6 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
return num_modes;
}
-static int imx_pd_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return 0;
-}
-
static struct drm_encoder *imx_pd_connector_best_encoder(
struct drm_connector *connector)
{
@@ -101,6 +102,12 @@ static struct drm_encoder *imx_pd_connector_best_encoder(
static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode)
{
+ struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+
+ if (mode != DRM_MODE_DPMS_ON)
+ drm_panel_disable(imxpd->panel);
+ else
+ drm_panel_enable(imxpd->panel);
}
static bool imx_pd_encoder_mode_fixup(struct drm_encoder *encoder,
@@ -114,8 +121,7 @@ static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
- imx_drm_crtc_panel_format(encoder->crtc, DRM_MODE_ENCODER_NONE,
- imxpd->interface_pix_fmt);
+ imx_drm_panel_format(encoder, imxpd->interface_pix_fmt);
}
static void imx_pd_encoder_commit(struct drm_encoder *encoder)
@@ -132,26 +138,21 @@ static void imx_pd_encoder_disable(struct drm_encoder *encoder)
{
}
-static void imx_pd_encoder_destroy(struct drm_encoder *encoder)
-{
- /* do not free here */
-}
-
static struct drm_connector_funcs imx_pd_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_pd_connector_detect,
- .destroy = imx_pd_connector_destroy,
+ .destroy = imx_drm_connector_destroy,
};
static struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
.get_modes = imx_pd_connector_get_modes,
.best_encoder = imx_pd_connector_best_encoder,
- .mode_valid = imx_pd_connector_mode_valid,
+ .mode_valid = imx_drm_connector_mode_valid,
};
static struct drm_encoder_funcs imx_pd_encoder_funcs = {
- .destroy = imx_pd_encoder_destroy,
+ .destroy = imx_drm_encoder_destroy,
};
static struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
@@ -163,51 +164,46 @@ static struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
.disable = imx_pd_encoder_disable,
};
-static int imx_pd_register(struct imx_parallel_display *imxpd)
+static int imx_pd_register(struct drm_device *drm,
+ struct imx_parallel_display *imxpd)
{
int ret;
- drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder);
-
- imxpd->connector.funcs = &imx_pd_connector_funcs;
- imxpd->encoder.funcs = &imx_pd_encoder_funcs;
-
- imxpd->encoder.encoder_type = DRM_MODE_ENCODER_NONE;
- imxpd->connector.connector_type = DRM_MODE_CONNECTOR_VGA;
+ ret = imx_drm_encoder_parse_of(drm, &imxpd->encoder,
+ imxpd->dev->of_node);
+ if (ret)
+ return ret;
drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
- ret = imx_drm_add_encoder(&imxpd->encoder, &imxpd->imx_drm_encoder,
- THIS_MODULE);
- if (ret) {
- dev_err(imxpd->dev, "adding encoder failed with %d\n", ret);
- return ret;
- }
+ drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
+ DRM_MODE_ENCODER_NONE);
drm_connector_helper_add(&imxpd->connector,
&imx_pd_connector_helper_funcs);
+ drm_connector_init(drm, &imxpd->connector, &imx_pd_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
- ret = imx_drm_add_connector(&imxpd->connector,
- &imxpd->imx_drm_connector, THIS_MODULE);
- if (ret) {
- imx_drm_remove_encoder(imxpd->imx_drm_encoder);
- dev_err(imxpd->dev, "adding connector failed with %d\n", ret);
- return ret;
- }
+ if (imxpd->panel)
+ drm_panel_attach(imxpd->panel, &imxpd->connector);
+
+ drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder);
imxpd->connector.encoder = &imxpd->encoder;
return 0;
}
-static int imx_pd_probe(struct platform_device *pdev)
+static int imx_pd_bind(struct device *dev, struct device *master, void *data)
{
- struct device_node *np = pdev->dev.of_node;
+ struct drm_device *drm = data;
+ struct device_node *np = dev->of_node;
+ struct device_node *panel_node;
const u8 *edidp;
struct imx_parallel_display *imxpd;
int ret;
const char *fmt;
- imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL);
+ imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
if (!imxpd)
return -ENOMEM;
@@ -225,30 +221,43 @@ static int imx_pd_probe(struct platform_device *pdev)
imxpd->interface_pix_fmt = V4L2_PIX_FMT_BGR666;
}
- imxpd->dev = &pdev->dev;
+ panel_node = of_parse_phandle(np, "fsl,panel", 0);
+ if (panel_node)
+ imxpd->panel = of_drm_find_panel(panel_node);
- ret = imx_pd_register(imxpd);
+ imxpd->dev = dev;
+
+ ret = imx_pd_register(drm, imxpd);
if (ret)
return ret;
- ret = imx_drm_encoder_add_possible_crtcs(imxpd->imx_drm_encoder, np);
-
- platform_set_drvdata(pdev, imxpd);
+ dev_set_drvdata(dev, imxpd);
return 0;
}
-static int imx_pd_remove(struct platform_device *pdev)
+static void imx_pd_unbind(struct device *dev, struct device *master,
+ void *data)
{
- struct imx_parallel_display *imxpd = platform_get_drvdata(pdev);
- struct drm_connector *connector = &imxpd->connector;
- struct drm_encoder *encoder = &imxpd->encoder;
+ struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
- drm_mode_connector_detach_encoder(connector, encoder);
+ imxpd->encoder.funcs->destroy(&imxpd->encoder);
+ imxpd->connector.funcs->destroy(&imxpd->connector);
+}
- imx_drm_remove_connector(imxpd->imx_drm_connector);
- imx_drm_remove_encoder(imxpd->imx_drm_encoder);
+static const struct component_ops imx_pd_ops = {
+ .bind = imx_pd_bind,
+ .unbind = imx_pd_unbind,
+};
+static int imx_pd_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &imx_pd_ops);
+}
+
+static int imx_pd_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &imx_pd_ops);
return 0;
}
diff --git a/drivers/staging/keucr/common.h b/drivers/staging/keucr/common.h
index cf347ccd6a6e..f0b977616cd5 100644
--- a/drivers/staging/keucr/common.h
+++ b/drivers/staging/keucr/common.h
@@ -1,14 +1,6 @@
#ifndef COMMON_INCD
#define COMMON_INCD
-typedef u8 BOOLEAN;
-typedef u8 BYTE;
-typedef u8 *PBYTE;
-typedef u16 WORD;
-typedef u16 *PWORD;
-typedef u32 DWORD;
-typedef u32 *PDWORD;
-
#define BYTE_MASK 0xff
#endif
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c
index f5d41e0348ce..e61183906548 100644
--- a/drivers/staging/keucr/init.c
+++ b/drivers/staging/keucr/init.c
@@ -17,7 +17,7 @@
int ENE_InitMedia(struct us_data *us)
{
int result;
- BYTE MiscReg03 = 0;
+ u8 MiscReg03 = 0;
dev_info(&us->pusb_dev->dev, "--- Init Media ---\n");
result = ene_read_byte(us, REG_CARD_STATUS, &MiscReg03);
@@ -41,7 +41,7 @@ int ENE_InitMedia(struct us_data *us)
/*
* ene_read_byte() :
*/
-int ene_read_byte(struct us_data *us, WORD index, void *buf)
+int ene_read_byte(struct us_data *us, u16 index, void *buf)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
@@ -51,8 +51,8 @@ int ene_read_byte(struct us_data *us, WORD index, void *buf)
bcb->DataTransferLength = 0x01;
bcb->Flags = 0x80;
bcb->CDB[0] = 0xED;
- bcb->CDB[2] = (BYTE)(index>>8);
- bcb->CDB[3] = (BYTE)index;
+ bcb->CDB[2] = (u8)(index>>8);
+ bcb->CDB[3] = (u8)index;
result = ENE_SendScsiCmd(us, FDIR_READ, buf, 0);
return result;
@@ -65,7 +65,7 @@ int ENE_SMInit(struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- BYTE buf[0x200];
+ u8 buf[0x200];
dev_dbg(&us->pusb_dev->dev, "transport --- ENE_SMInit\n");
@@ -122,12 +122,12 @@ int ENE_SMInit(struct us_data *us)
/*
* ENE_LoadBinCode()
*/
-int ENE_LoadBinCode(struct us_data *us, BYTE flag)
+int ENE_LoadBinCode(struct us_data *us, u8 flag)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
/* void *buf; */
- PBYTE buf;
+ u8 *buf;
/* dev_info(&us->pusb_dev->dev, "transport --- ENE_LoadBinCode\n"); */
if (us->BIN_FLAG == flag)
@@ -164,7 +164,7 @@ int ENE_LoadBinCode(struct us_data *us, BYTE flag)
/*
* ENE_SendScsiCmd():
*/
-int ENE_SendScsiCmd(struct us_data *us, BYTE fDir, void *buf, int use_sg)
+int ENE_SendScsiCmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
diff --git a/drivers/staging/keucr/init.h b/drivers/staging/keucr/init.h
index c8b2cd604460..98d2e3ba8545 100644
--- a/drivers/staging/keucr/init.h
+++ b/drivers/staging/keucr/init.h
@@ -1,11 +1,11 @@
#include "common.h"
-extern DWORD MediaChange;
+extern u32 MediaChange;
extern int Check_D_MediaFmt(struct us_data *);
-static BYTE SM_Init[] = {
+static u8 SM_Init[] = {
0x7B, 0x09, 0x7C, 0xF0, 0x7D, 0x10, 0x7E, 0xE9,
0x7F, 0xCC, 0x12, 0x2F, 0x71, 0x90, 0xE9, 0xCC,
0xE0, 0xB4, 0x07, 0x12, 0x90, 0xFF, 0x09, 0xE0,
@@ -263,7 +263,7 @@ static BYTE SM_Init[] = {
0x58, 0x44, 0x2D, 0x49, 0x6E, 0x69, 0x74, 0x20,
0x20, 0x20, 0x20, 0x31, 0x30, 0x30, 0x30, 0x31 };
-static BYTE SM_Rdwr[] = {
+static u8 SM_Rdwr[] = {
0x7B, 0x0C, 0x7C, 0xF0, 0x7D, 0x10, 0x7E, 0xE9,
0x7F, 0xCC, 0x12, 0x2F, 0x71, 0x90, 0xE9, 0xC3,
0xE0, 0xB4, 0x73, 0x04, 0x74, 0x40, 0x80, 0x09,
diff --git a/drivers/staging/keucr/smil.h b/drivers/staging/keucr/smil.h
index 9136e9447261..39951738d231 100644
--- a/drivers/staging/keucr/smil.h
+++ b/drivers/staging/keucr/smil.h
@@ -169,29 +169,29 @@ SmartMedia Model & Attribute
Struct Definition
***************************************************************************/
struct keucr_media_info {
- BYTE Model;
- BYTE Attribute;
- BYTE MaxZones;
- BYTE MaxSectors;
- WORD MaxBlocks;
- WORD MaxLogBlocks;
+ u8 Model;
+ u8 Attribute;
+ u8 MaxZones;
+ u8 MaxSectors;
+ u16 MaxBlocks;
+ u16 MaxLogBlocks;
};
struct keucr_media_address {
- BYTE Zone; /* Zone Number */
- BYTE Sector; /* Sector(512byte) Number on Block */
- WORD PhyBlock; /* Physical Block Number on Zone */
- WORD LogBlock; /* Logical Block Number of Zone */
+ u8 Zone; /* Zone Number */
+ u8 Sector; /* Sector(512byte) Number on Block */
+ u16 PhyBlock; /* Physical Block Number on Zone */
+ u16 LogBlock; /* Logical Block Number of Zone */
};
struct keucr_media_area {
- BYTE Sector; /* Sector(512byte) Number on Block */
- WORD PhyBlock; /* Physical Block Number on Zone 0 */
+ u8 Sector; /* Sector(512byte) Number on Block */
+ u16 PhyBlock; /* Physical Block Number on Zone 0 */
};
-extern WORD ReadBlock;
-extern WORD WriteBlock;
-extern DWORD MediaChange;
+extern u16 ReadBlock;
+extern u16 WriteBlock;
+extern u32 MediaChange;
extern struct keucr_media_info Ssfdc;
extern struct keucr_media_address Media;
@@ -204,24 +204,24 @@ extern struct keucr_media_area CisArea;
int Init_D_SmartMedia(void);
int Pwoff_D_SmartMedia(void);
int Check_D_SmartMedia(void);
-int Check_D_Parameter(struct us_data *, WORD *, BYTE *, BYTE *);
-int Media_D_ReadSector(struct us_data *, DWORD, WORD, BYTE *);
-int Media_D_WriteSector(struct us_data *, DWORD, WORD, BYTE *);
-int Media_D_CopySector(struct us_data *, DWORD, WORD, BYTE *);
-int Media_D_EraseBlock(struct us_data *, DWORD, WORD);
+int Check_D_Parameter(struct us_data *, u16 *, u8 *, u8 *);
+int Media_D_ReadSector(struct us_data *, u32, u16, u8 *);
+int Media_D_WriteSector(struct us_data *, u32, u16, u8 *);
+int Media_D_CopySector(struct us_data *, u32, u16, u8 *);
+int Media_D_EraseBlock(struct us_data *, u32, u16);
int Media_D_EraseAll(struct us_data *);
/******************************************/
-int Media_D_OneSectWriteStart(struct us_data *, DWORD, BYTE *);
-int Media_D_OneSectWriteNext(struct us_data *, BYTE *);
+int Media_D_OneSectWriteStart(struct us_data *, u32, u8 *);
+int Media_D_OneSectWriteNext(struct us_data *, u8 *);
int Media_D_OneSectWriteFlush(struct us_data *);
/******************************************/
extern int SM_FreeMem(void); /* ENE SM function */
-void SM_EnableLED(struct us_data *, BOOLEAN);
+void SM_EnableLED(struct us_data *, bool);
void Led_D_TernOn(void);
void Led_D_TernOff(void);
-int Media_D_EraseAllRedtData(DWORD Index, BOOLEAN CheckBlock);
+int Media_D_EraseAllRedtData(u32 Index, bool CheckBlock);
/*DWORD Media_D_GetMediaInfo(struct us_data * fdoExt,
PIOCTL_MEDIA_INFO_IN pParamIn, PIOCTL_MEDIA_INFO_OUT pParamOut); */
@@ -229,31 +229,31 @@ int Media_D_EraseAllRedtData(DWORD Index, BOOLEAN CheckBlock);
* SMILSub.c
*/
/******************************************/
-int Check_D_DataBlank(BYTE *);
-int Check_D_FailBlock(BYTE *);
-int Check_D_DataStatus(BYTE *);
-int Load_D_LogBlockAddr(BYTE *);
-void Clr_D_RedundantData(BYTE *);
-void Set_D_LogBlockAddr(BYTE *);
-void Set_D_FailBlock(BYTE *);
-void Set_D_DataStaus(BYTE *);
+int Check_D_DataBlank(u8 *);
+int Check_D_FailBlock(u8 *);
+int Check_D_DataStatus(u8 *);
+int Load_D_LogBlockAddr(u8 *);
+void Clr_D_RedundantData(u8 *);
+void Set_D_LogBlockAddr(u8 *);
+void Set_D_FailBlock(u8 *);
+void Set_D_DataStaus(u8 *);
/******************************************/
void Ssfdc_D_Reset(struct us_data *);
-int Ssfdc_D_ReadCisSect(struct us_data *, BYTE *, BYTE *);
+int Ssfdc_D_ReadCisSect(struct us_data *, u8 *, u8 *);
void Ssfdc_D_WriteRedtMode(void);
-void Ssfdc_D_ReadID(BYTE *, BYTE);
-int Ssfdc_D_ReadSect(struct us_data *, BYTE *, BYTE *);
-int Ssfdc_D_ReadBlock(struct us_data *, WORD, BYTE *, BYTE *);
-int Ssfdc_D_WriteSect(struct us_data *, BYTE *, BYTE *);
-int Ssfdc_D_WriteBlock(struct us_data *, WORD, BYTE *, BYTE *);
-int Ssfdc_D_CopyBlock(struct us_data *, WORD, BYTE *, BYTE *);
-int Ssfdc_D_WriteSectForCopy(struct us_data *, BYTE *, BYTE *);
+void Ssfdc_D_ReadID(u8 *, u8);
+int Ssfdc_D_ReadSect(struct us_data *, u8 *, u8 *);
+int Ssfdc_D_ReadBlock(struct us_data *, u16, u8 *, u8 *);
+int Ssfdc_D_WriteSect(struct us_data *, u8 *, u8 *);
+int Ssfdc_D_WriteBlock(struct us_data *, u16, u8 *, u8 *);
+int Ssfdc_D_CopyBlock(struct us_data *, u16, u8 *, u8 *);
+int Ssfdc_D_WriteSectForCopy(struct us_data *, u8 *, u8 *);
int Ssfdc_D_EraseBlock(struct us_data *);
-int Ssfdc_D_ReadRedtData(struct us_data *, BYTE *);
-int Ssfdc_D_WriteRedtData(struct us_data *, BYTE *);
+int Ssfdc_D_ReadRedtData(struct us_data *, u8 *);
+int Ssfdc_D_WriteRedtData(struct us_data *, u8 *);
int Ssfdc_D_CheckStatus(void);
-int Set_D_SsfdcModel(BYTE);
+int Set_D_SsfdcModel(u8);
void Cnt_D_Reset(void);
int Cnt_D_PowerOn(void);
void Cnt_D_PowerOff(void);
@@ -263,26 +263,26 @@ int Check_D_CntPower(void);
int Check_D_CardExist(void);
int Check_D_CardStsChg(void);
int Check_D_SsfdcWP(void);
-int SM_ReadBlock(struct us_data *, BYTE *, BYTE *);
+int SM_ReadBlock(struct us_data *, u8 *, u8 *);
-int Ssfdc_D_ReadSect_DMA(struct us_data *, BYTE *, BYTE *);
-int Ssfdc_D_ReadSect_PIO(struct us_data *, BYTE *, BYTE *);
-int Ssfdc_D_WriteSect_DMA(struct us_data *, BYTE *, BYTE *);
-int Ssfdc_D_WriteSect_PIO(struct us_data *, BYTE *, BYTE *);
+int Ssfdc_D_ReadSect_DMA(struct us_data *, u8 *, u8 *);
+int Ssfdc_D_ReadSect_PIO(struct us_data *, u8 *, u8 *);
+int Ssfdc_D_WriteSect_DMA(struct us_data *, u8 *, u8 *);
+int Ssfdc_D_WriteSect_PIO(struct us_data *, u8 *, u8 *);
/******************************************/
-int Check_D_ReadError(BYTE *);
-int Check_D_Correct(BYTE *, BYTE *);
-int Check_D_CISdata(BYTE *, BYTE *);
-void Set_D_RightECC(BYTE *);
+int Check_D_ReadError(u8 *);
+int Check_D_Correct(u8 *, u8 *);
+int Check_D_CISdata(u8 *, u8 *);
+void Set_D_RightECC(u8 *);
/*
* SMILECC.c
*/
-void calculate_ecc(BYTE *, BYTE *, BYTE *, BYTE *, BYTE *);
-BYTE correct_data(BYTE *, BYTE *, BYTE, BYTE, BYTE);
-int _Correct_D_SwECC(BYTE *, BYTE *, BYTE *);
-void _Calculate_D_SwECC(BYTE *, BYTE *);
+void calculate_ecc(u8 *, u8 *, u8 *, u8 *, u8 *);
+u8 correct_data(u8 *, u8 *, u8, u8, u8);
+int _Correct_D_SwECC(u8 *, u8 *, u8 *);
+void _Calculate_D_SwECC(u8 *, u8 *);
void SM_Init(void);
diff --git a/drivers/staging/keucr/smilecc.c b/drivers/staging/keucr/smilecc.c
index 6b8f7d7a7436..ffe6030f5e4d 100644
--- a/drivers/staging/keucr/smilecc.c
+++ b/drivers/staging/keucr/smilecc.c
@@ -13,7 +13,7 @@
/* #include "EMCRIOS.h" */
/* CP0-CP5 code table */
-static BYTE ecctable[256] = {
+static u8 ecctable[256] = {
0x00, 0x55, 0x56, 0x03, 0x59, 0x0C, 0x0F, 0x5A, 0x5A, 0x0F, 0x0C, 0x59, 0x03,
0x56, 0x55, 0x00, 0x65, 0x30, 0x33, 0x66, 0x3C, 0x69, 0x6A, 0x3F, 0x3F, 0x6A,
0x69, 0x3C, 0x66, 0x33, 0x30, 0x65, 0x66, 0x33, 0x30, 0x65, 0x3F, 0x6A, 0x69,
@@ -36,7 +36,7 @@ static BYTE ecctable[256] = {
0x5A, 0x5A, 0x0F, 0x0C, 0x59, 0x03, 0x56, 0x55, 0x00
};
-static void trans_result(BYTE, BYTE, BYTE *, BYTE *);
+static void trans_result(u8, u8, u8 *, u8 *);
#define BIT7 0x80
#define BIT6 0x40
@@ -57,11 +57,11 @@ static void trans_result(BYTE, BYTE, BYTE *, BYTE *);
* *ecc1; * LP15,LP14,LP13,...
* *ecc2; * LP07,LP06,LP05,...
*/
-static void trans_result(BYTE reg2, BYTE reg3, BYTE *ecc1, BYTE *ecc2)
+static void trans_result(u8 reg2, u8 reg3, u8 *ecc1, u8 *ecc2)
{
- BYTE a; /* Working for reg2,reg3 */
- BYTE b; /* Working for ecc1,ecc2 */
- BYTE i; /* For counting */
+ u8 a; /* Working for reg2,reg3 */
+ u8 b; /* Working for ecc1,ecc2 */
+ u8 i; /* For counting */
a = BIT7; b = BIT7; /* 80h=10000000b */
*ecc1 = *ecc2 = 0; /* Clear ecc1,ecc2 */
@@ -95,21 +95,21 @@ static void trans_result(BYTE reg2, BYTE reg3, BYTE *ecc1, BYTE *ecc2)
* *ecc2; * LP07,LP06,LP05,...
* *ecc3; * CP5,CP4,CP3,...,"1","1"
*/
-void calculate_ecc(BYTE *table, BYTE *data, BYTE *ecc1, BYTE *ecc2, BYTE *ecc3)
+void calculate_ecc(u8 *table, u8 *data, u8 *ecc1, u8 *ecc2, u8 *ecc3)
{
- DWORD i; /* For counting */
- BYTE a; /* Working for table */
- BYTE reg1; /* D-all,CP5,CP4,CP3,... */
- BYTE reg2; /* LP14,LP12,L10,... */
- BYTE reg3; /* LP15,LP13,L11,... */
+ u32 i; /* For counting */
+ u8 a; /* Working for table */
+ u8 reg1; /* D-all,CP5,CP4,CP3,... */
+ u8 reg2; /* LP14,LP12,L10,... */
+ u8 reg3; /* LP15,LP13,L11,... */
reg1 = reg2 = reg3 = 0; /* Clear parameter */
for (i = 0; i < 256; ++i) {
a = table[data[i]]; /* Get CP0-CP5 code from table */
reg1 ^= (a&MASK_CPS); /* XOR with a */
if ((a&BIT6) != 0) { /* If D_all(all bit XOR) = 1 */
- reg3 ^= (BYTE)i; /* XOR with counter */
- reg2 ^= ~((BYTE)i); /* XOR with inv. of counter */
+ reg3 ^= (u8)i; /* XOR with counter */
+ reg2 ^= ~((u8)i); /* XOR with inv. of counter */
}
}
@@ -127,22 +127,22 @@ void calculate_ecc(BYTE *table, BYTE *data, BYTE *ecc1, BYTE *ecc2, BYTE *ecc3)
* ecc2; * LP07,LP06,LP05,...
* ecc3; * CP5,CP4,CP3,...,"1","1"
*/
-BYTE correct_data(BYTE *data, BYTE *eccdata, BYTE ecc1, BYTE ecc2, BYTE ecc3)
+u8 correct_data(u8 *data, u8 *eccdata, u8 ecc1, u8 ecc2, u8 ecc3)
{
- DWORD l; /* Working to check d */
- DWORD d; /* Result of comparison */
- DWORD i; /* For counting */
- BYTE d1, d2, d3; /* Result of comparison */
- BYTE a; /* Working for add */
- BYTE add; /* Byte address of cor. DATA */
- BYTE b; /* Working for bit */
- BYTE bit; /* Bit address of cor. DATA */
+ u32 l; /* Working to check d */
+ u32 d; /* Result of comparison */
+ u32 i; /* For counting */
+ u8 d1, d2, d3; /* Result of comparison */
+ u8 a; /* Working for add */
+ u8 add; /* Byte address of cor. DATA */
+ u8 b; /* Working for bit */
+ u8 bit; /* Bit address of cor. DATA */
d1 = ecc1^eccdata[1]; d2 = ecc2^eccdata[0]; /* Compare LP's */
d3 = ecc3^eccdata[2]; /* Compare CP's */
- d = ((DWORD)d1<<16) /* Result of comparison */
- +((DWORD)d2<<8)
- +(DWORD)d3;
+ d = ((u32)d1<<16) /* Result of comparison */
+ +((u32)d2<<8)
+ +(u32)d3;
if (d == 0)
return 0; /* If No error, return */
@@ -188,9 +188,9 @@ BYTE correct_data(BYTE *data, BYTE *eccdata, BYTE ecc1, BYTE ecc2, BYTE ecc3)
return 3; /* Uncorrectable error */
}
-int _Correct_D_SwECC(BYTE *buf, BYTE *redundant_ecc, BYTE *calculate_ecc)
+int _Correct_D_SwECC(u8 *buf, u8 *redundant_ecc, u8 *calculate_ecc)
{
- DWORD err;
+ u32 err;
err = correct_data(buf, redundant_ecc, *(calculate_ecc + 1),
*(calculate_ecc), *(calculate_ecc + 2));
@@ -203,7 +203,7 @@ int _Correct_D_SwECC(BYTE *buf, BYTE *redundant_ecc, BYTE *calculate_ecc)
return -1;
}
-void _Calculate_D_SwECC(BYTE *buf, BYTE *ecc)
+void _Calculate_D_SwECC(u8 *buf, u8 *ecc)
{
calculate_ecc(ecctable, buf, ecc+1, ecc+0, ecc+2);
}
diff --git a/drivers/staging/keucr/smilmain.c b/drivers/staging/keucr/smilmain.c
index 09d07e05102f..fc7cbc6f8a45 100644
--- a/drivers/staging/keucr/smilmain.c
+++ b/drivers/staging/keucr/smilmain.c
@@ -4,11 +4,11 @@
#include "smcommon.h"
#include "smil.h"
-static int Conv_D_MediaAddr(struct us_data *, DWORD);
+static int Conv_D_MediaAddr(struct us_data *, u32);
static int Inc_D_MediaAddr(struct us_data *);
-static int Media_D_ReadOneSect(struct us_data *, WORD, BYTE *);
+static int Media_D_ReadOneSect(struct us_data *, u16, u8 *);
-static int Copy_D_BlockAll(struct us_data *, DWORD);
+static int Copy_D_BlockAll(struct us_data *, u32);
static int Assign_D_WriteBlock(void);
static int Release_D_ReadBlock(struct us_data *);
@@ -16,7 +16,7 @@ static int Release_D_WriteBlock(struct us_data *);
static int Release_D_CopySector(struct us_data *);
static int Copy_D_PhyOneSect(struct us_data *);
-static int Read_D_PhyOneSect(struct us_data *, WORD, BYTE *);
+static int Read_D_PhyOneSect(struct us_data *, u16, u8 *);
static int Erase_D_PhyOneBlock(struct us_data *);
static int Set_D_PhyFmtValue(struct us_data *);
@@ -25,24 +25,24 @@ static int Make_D_LogTable(struct us_data *);
static int MarkFail_D_PhyOneBlock(struct us_data *);
-static DWORD ErrCode;
-static BYTE WorkBuf[SECTSIZE];
-static BYTE Redundant[REDTSIZE];
-static BYTE WorkRedund[REDTSIZE];
+static u32 ErrCode;
+static u8 WorkBuf[SECTSIZE];
+static u8 Redundant[REDTSIZE];
+static u8 WorkRedund[REDTSIZE];
/* 128 x 1000, Log2Phy[MAX_ZONENUM][MAX_LOGBLOCK]; */
-static WORD *Log2Phy[MAX_ZONENUM];
-static BYTE Assign[MAX_ZONENUM][MAX_BLOCKNUM / 8];
-static WORD AssignStart[MAX_ZONENUM];
-WORD ReadBlock;
-WORD WriteBlock;
-DWORD MediaChange;
-static DWORD SectCopyMode;
+static u16 *Log2Phy[MAX_ZONENUM];
+static u8 Assign[MAX_ZONENUM][MAX_BLOCKNUM / 8];
+static u16 AssignStart[MAX_ZONENUM];
+u16 ReadBlock;
+u16 WriteBlock;
+u32 MediaChange;
+static u32 SectCopyMode;
/* BIT Control Macro */
-static BYTE BitData[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
-#define Set_D_Bit(a, b) (a[(BYTE)((b) / 8)] |= BitData[(b) % 8])
-#define Clr_D_Bit(a, b) (a[(BYTE)((b) / 8)] &= ~BitData[(b) % 8])
-#define Chk_D_Bit(a, b) (a[(BYTE)((b) / 8)] & BitData[(b) % 8])
+static u8 BitData[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
+#define Set_D_Bit(a, b) (a[(u8)((b) / 8)] |= BitData[(b) % 8])
+#define Clr_D_Bit(a, b) (a[(u8)((b) / 8)] &= ~BitData[(b) % 8])
+#define Chk_D_Bit(a, b) (a[(u8)((b) / 8)] & BitData[(b) % 8])
/* ----- SM_FreeMem() ------------------------------------------------- */
int SM_FreeMem(void)
@@ -62,9 +62,9 @@ int SM_FreeMem(void)
/* SmartMedia Read/Write/Erase Function */
/* ----- Media_D_ReadSector() ------------------------------------------- */
-int Media_D_ReadSector(struct us_data *us, DWORD start, WORD count, BYTE *buf)
+int Media_D_ReadSector(struct us_data *us, u32 start, u16 count, u8 *buf)
{
- WORD len, bn;
+ u16 len, bn;
if (Conv_D_MediaAddr(us, start))
return ErrCode;
@@ -97,9 +97,9 @@ int Media_D_ReadSector(struct us_data *us, DWORD start, WORD count, BYTE *buf)
}
/* here */
/* ----- Media_D_CopySector() ------------------------------------------ */
-int Media_D_CopySector(struct us_data *us, DWORD start, WORD count, BYTE *buf)
+int Media_D_CopySector(struct us_data *us, u32 start, u16 count, u8 *buf)
{
- WORD len, bn;
+ u16 len, bn;
/* pr_info("Media_D_CopySector !!!\n"); */
if (Conv_D_MediaAddr(us, start))
@@ -186,12 +186,12 @@ int Check_D_MediaFmt(struct us_data *us)
/* SmartMedia Physical Address Control Subroutine */
/* ----- Conv_D_MediaAddr() --------------------------------------------- */
-static int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
+static int Conv_D_MediaAddr(struct us_data *us, u32 addr)
{
- DWORD temp;
+ u32 temp;
temp = addr / Ssfdc.MaxSectors;
- Media.Zone = (BYTE) (temp / Ssfdc.MaxLogBlocks);
+ Media.Zone = (u8) (temp / Ssfdc.MaxLogBlocks);
if (Log2Phy[Media.Zone] == NULL) {
if (Make_D_LogTable(us)) {
@@ -200,8 +200,8 @@ static int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
}
}
- Media.Sector = (BYTE) (addr % Ssfdc.MaxSectors);
- Media.LogBlock = (WORD) (temp % Ssfdc.MaxLogBlocks);
+ Media.Sector = (u8) (addr % Ssfdc.MaxSectors);
+ Media.LogBlock = (u16) (temp % Ssfdc.MaxLogBlocks);
if (Media.Zone < Ssfdc.MaxZones) {
Clr_D_RedundantData(Redundant);
@@ -217,7 +217,7 @@ static int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
/* ----- Inc_D_MediaAddr() ---------------------------------------------- */
static int Inc_D_MediaAddr(struct us_data *us)
{
- WORD LogBlock = Media.LogBlock;
+ u16 LogBlock = Media.LogBlock;
if (++Media.Sector < Ssfdc.MaxSectors)
return SMSUCCESS;
@@ -265,9 +265,9 @@ static int Inc_D_MediaAddr(struct us_data *us)
/* SmartMedia Read/Write Subroutine with Retry */
/* ----- Media_D_ReadOneSect() ------------------------------------------ */
-static int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf)
+static int Media_D_ReadOneSect(struct us_data *us, u16 count, u8 *buf)
{
- DWORD err, retry;
+ u32 err, retry;
if (!Read_D_PhyOneSect(us, count, buf))
return SMSUCCESS;
@@ -309,9 +309,9 @@ static int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf)
/* SmartMedia Physical Sector Data Copy Subroutine */
/* ----- Copy_D_BlockAll() ---------------------------------------------- */
-static int Copy_D_BlockAll(struct us_data *us, DWORD mode)
+static int Copy_D_BlockAll(struct us_data *us, u32 mode)
{
- BYTE sect;
+ u8 sect;
sect = Media.Sector;
@@ -381,7 +381,7 @@ static int Assign_D_WriteBlock(void)
/* ----- Release_D_ReadBlock() ------------------------------------------ */
static int Release_D_ReadBlock(struct us_data *us)
{
- DWORD mode;
+ u32 mode;
mode = SectCopyMode;
SectCopyMode = COMPLETED;
@@ -430,7 +430,7 @@ static int Release_D_WriteBlock(struct us_data *us)
static int Copy_D_PhyOneSect(struct us_data *us)
{
int i;
- DWORD err, retry;
+ u32 err, retry;
/* pr_info("Copy_D_PhyOneSect --- Sector = %x\n", Media.Sector); */
if (ReadBlock != NO_ASSIGN) {
@@ -504,10 +504,10 @@ static int Copy_D_PhyOneSect(struct us_data *us)
/* SmartMedia Physical Sector Read/Write/Erase Subroutine */
/* ----- Read_D_PhyOneSect() -------------------------------------------- */
-static int Read_D_PhyOneSect(struct us_data *us, WORD count, BYTE *buf)
+static int Read_D_PhyOneSect(struct us_data *us, u16 count, u8 *buf)
{
int i;
- DWORD retry;
+ u32 retry;
if (Media.PhyBlock == NO_ASSIGN) {
for (i = 0; i < SECTSIZE; i++)
@@ -637,10 +637,10 @@ static int Search_D_CIS(struct us_data *us)
/* ----- Make_D_LogTable() ---------------------------------------------- */
static int Make_D_LogTable(struct us_data *us)
{
- WORD phyblock, logblock;
+ u16 phyblock, logblock;
if (Log2Phy[Media.Zone] == NULL) {
- Log2Phy[Media.Zone] = kmalloc(MAX_LOGBLOCK * sizeof(WORD),
+ Log2Phy[Media.Zone] = kmalloc(MAX_LOGBLOCK * sizeof(u16),
GFP_KERNEL);
/* pr_info("ExAllocatePool Zone = %x, Addr = %x\n",
Media.Zone, Log2Phy[Media.Zone]); */
@@ -693,7 +693,7 @@ static int Make_D_LogTable(struct us_data *us)
phyblock = Media.PhyBlock;
logblock = Media.LogBlock;
- Media.Sector = (BYTE)(Ssfdc.MaxSectors - 1);
+ Media.Sector = (u8)(Ssfdc.MaxSectors - 1);
if (Ssfdc_D_ReadRedtData(us, Redundant)) {
Ssfdc_D_Reset(us);
@@ -738,7 +738,7 @@ static int Make_D_LogTable(struct us_data *us)
/* ----- MarkFail_D_PhyOneBlock() --------------------------------------- */
static int MarkFail_D_PhyOneBlock(struct us_data *us)
{
- BYTE sect;
+ u8 sect;
sect = Media.Sector;
Set_D_FailBlock(WorkRedund);
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index 16da9a9b4033..44ced8265039 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -6,16 +6,16 @@
#include "smcommon.h"
#include "smil.h"
-static BYTE _Check_D_DevCode(BYTE);
-static DWORD ErrXDCode;
-static BYTE IsSSFDCCompliance;
-static BYTE IsXDCompliance;
+static u8 _Check_D_DevCode(u8);
+static u32 ErrXDCode;
+static u8 IsSSFDCCompliance;
+static u8 IsXDCompliance;
struct keucr_media_info Ssfdc;
struct keucr_media_address Media;
struct keucr_media_area CisArea;
-static BYTE EccBuf[6];
+static u8 EccBuf[6];
#define EVEN 0 /* Even Page for 256byte/page */
#define ODD 1 /* Odd Page for 256byte/page */
@@ -24,7 +24,7 @@ static BYTE EccBuf[6];
/* SmartMedia Redundant buffer data Control Subroutine
*----- Check_D_DataBlank() --------------------------------------------
*/
-int Check_D_DataBlank(BYTE *redundant)
+int Check_D_DataBlank(u8 *redundant)
{
char i;
@@ -36,7 +36,7 @@ int Check_D_DataBlank(BYTE *redundant)
}
/* ----- Check_D_FailBlock() -------------------------------------------- */
-int Check_D_FailBlock(BYTE *redundant)
+int Check_D_FailBlock(u8 *redundant)
{
redundant += REDT_BLOCK;
@@ -51,7 +51,7 @@ int Check_D_FailBlock(BYTE *redundant)
}
/* ----- Check_D_DataStatus() ------------------------------------------- */
-int Check_D_DataStatus(BYTE *redundant)
+int Check_D_DataStatus(u8 *redundant)
{
redundant += REDT_DATA;
@@ -70,14 +70,14 @@ int Check_D_DataStatus(BYTE *redundant)
}
/* ----- Load_D_LogBlockAddr() ------------------------------------------ */
-int Load_D_LogBlockAddr(BYTE *redundant)
+int Load_D_LogBlockAddr(u8 *redundant)
{
- WORD addr1, addr2;
+ u16 addr1, addr2;
- addr1 = (WORD)*(redundant + REDT_ADDR1H)*0x0100 +
- (WORD)*(redundant + REDT_ADDR1L);
- addr2 = (WORD)*(redundant + REDT_ADDR2H)*0x0100 +
- (WORD)*(redundant + REDT_ADDR2L);
+ addr1 = (u16)*(redundant + REDT_ADDR1H)*0x0100 +
+ (u16)*(redundant + REDT_ADDR1L);
+ addr2 = (u16)*(redundant + REDT_ADDR2H)*0x0100 +
+ (u16)*(redundant + REDT_ADDR2L);
if (addr1 == addr2)
if ((addr1 & 0xF000) == 0x1000) {
@@ -85,7 +85,7 @@ int Load_D_LogBlockAddr(BYTE *redundant)
return SMSUCCESS;
}
- if (hweight16((WORD)(addr1^addr2)) != 0x01)
+ if (hweight16((u16)(addr1^addr2)) != 0x01)
return ERROR;
if ((addr1 & 0xF000) == 0x1000)
@@ -104,7 +104,7 @@ int Load_D_LogBlockAddr(BYTE *redundant)
}
/* ----- Clr_D_RedundantData() ------------------------------------------ */
-void Clr_D_RedundantData(BYTE *redundant)
+void Clr_D_RedundantData(u8 *redundant)
{
char i;
@@ -113,9 +113,9 @@ void Clr_D_RedundantData(BYTE *redundant)
}
/* ----- Set_D_LogBlockAddr() ------------------------------------------- */
-void Set_D_LogBlockAddr(BYTE *redundant)
+void Set_D_LogBlockAddr(u8 *redundant)
{
- WORD addr;
+ u16 addr;
*(redundant + REDT_BLOCK) = 0xFF;
*(redundant + REDT_DATA) = 0xFF;
@@ -125,20 +125,20 @@ void Set_D_LogBlockAddr(BYTE *redundant)
addr++;
*(redundant + REDT_ADDR1H) = *(redundant + REDT_ADDR2H) =
- (BYTE)(addr / 0x0100);
- *(redundant + REDT_ADDR1L) = *(redundant + REDT_ADDR2L) = (BYTE)addr;
+ (u8)(addr / 0x0100);
+ *(redundant + REDT_ADDR1L) = *(redundant + REDT_ADDR2L) = (u8)addr;
}
/*----- Set_D_FailBlock() ---------------------------------------------- */
-void Set_D_FailBlock(BYTE *redundant)
+void Set_D_FailBlock(u8 *redundant)
{
char i;
for (i = 0; i < REDTSIZE; i++)
- *redundant++ = (BYTE)((i == REDT_BLOCK) ? 0xF0 : 0xFF);
+ *redundant++ = (u8)((i == REDT_BLOCK) ? 0xF0 : 0xFF);
}
/* ----- Set_D_DataStaus() ---------------------------------------------- */
-void Set_D_DataStaus(BYTE *redundant)
+void Set_D_DataStaus(u8 *redundant)
{
redundant += REDT_DATA;
*redundant = 0x00;
@@ -154,10 +154,10 @@ void Ssfdc_D_Reset(struct us_data *us)
}
/* ----- Ssfdc_D_ReadCisSect() ------------------------------------------ */
-int Ssfdc_D_ReadCisSect(struct us_data *us, BYTE *buf, BYTE *redundant)
+int Ssfdc_D_ReadCisSect(struct us_data *us, u8 *buf, u8 *redundant)
{
- BYTE zone, sector;
- WORD block;
+ u8 zone, sector;
+ u16 block;
zone = Media.Zone; block = Media.PhyBlock; sector = Media.Sector;
Media.Zone = 0;
@@ -177,11 +177,11 @@ int Ssfdc_D_ReadCisSect(struct us_data *us, BYTE *buf, BYTE *redundant)
/* 6250 CMD 1 */
/* ----- Ssfdc_D_ReadSect() --------------------------------------------- */
-int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
+int Ssfdc_D_ReadSect(struct us_data *us, u8 *buf, u8 *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD addr;
+ u16 addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -190,8 +190,8 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
+ addr = (u16)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(u16)Ssfdc.MaxSectors + Media.Sector;
/* Read sect data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
@@ -200,8 +200,8 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x02;
- bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[4] = (u8)addr;
+ bcb->CDB[3] = (u8)(addr / 0x0100);
bcb->CDB[2] = Media.Zone / 2;
result = ENE_SendScsiCmd(us, FDIR_READ, buf, 0);
@@ -215,8 +215,8 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
- bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[4] = (u8)addr;
+ bcb->CDB[3] = (u8)(addr / 0x0100);
bcb->CDB[2] = Media.Zone / 2;
bcb->CDB[8] = 0;
bcb->CDB[9] = 1;
@@ -229,12 +229,12 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
}
/* ----- Ssfdc_D_ReadBlock() --------------------------------------------- */
-int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,
- BYTE *redundant)
+int Ssfdc_D_ReadBlock(struct us_data *us, u16 count, u8 *buf,
+ u8 *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD addr;
+ u16 addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -243,8 +243,8 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
+ addr = (u16)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(u16)Ssfdc.MaxSectors + Media.Sector;
/* Read sect data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
@@ -253,8 +253,8 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x02;
- bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[4] = (u8)addr;
+ bcb->CDB[3] = (u8)(addr / 0x0100);
bcb->CDB[2] = Media.Zone / 2;
result = ENE_SendScsiCmd(us, FDIR_READ, buf, 0);
@@ -268,8 +268,8 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
- bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[4] = (u8)addr;
+ bcb->CDB[3] = (u8)(addr / 0x0100);
bcb->CDB[2] = Media.Zone / 2;
bcb->CDB[8] = 0;
bcb->CDB[9] = 1;
@@ -283,12 +283,12 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,
/* ----- Ssfdc_D_CopyBlock() -------------------------------------------- */
-int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,
- BYTE *redundant)
+int Ssfdc_D_CopyBlock(struct us_data *us, u16 count, u8 *buf,
+ u8 *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD ReadAddr, WriteAddr;
+ u16 ReadAddr, WriteAddr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -297,10 +297,10 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,
return USB_STOR_TRANSPORT_ERROR;
}
- ReadAddr = (WORD)Media.Zone*Ssfdc.MaxBlocks + ReadBlock;
- ReadAddr = ReadAddr*(WORD)Ssfdc.MaxSectors;
- WriteAddr = (WORD)Media.Zone*Ssfdc.MaxBlocks + WriteBlock;
- WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors;
+ ReadAddr = (u16)Media.Zone*Ssfdc.MaxBlocks + ReadBlock;
+ ReadAddr = ReadAddr*(u16)Ssfdc.MaxSectors;
+ WriteAddr = (u16)Media.Zone*Ssfdc.MaxBlocks + WriteBlock;
+ WriteAddr = WriteAddr*(u16)Ssfdc.MaxSectors;
/* Write sect data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
@@ -309,16 +309,16 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,
bcb->Flags = 0x00;
bcb->CDB[0] = 0xF0;
bcb->CDB[1] = 0x08;
- bcb->CDB[7] = (BYTE)WriteAddr;
- bcb->CDB[6] = (BYTE)(WriteAddr / 0x0100);
+ bcb->CDB[7] = (u8)WriteAddr;
+ bcb->CDB[6] = (u8)(WriteAddr / 0x0100);
bcb->CDB[5] = Media.Zone / 2;
bcb->CDB[8] = *(redundant + REDT_ADDR1H);
bcb->CDB[9] = *(redundant + REDT_ADDR1L);
bcb->CDB[10] = Media.Sector;
if (ReadBlock != NO_ASSIGN) {
- bcb->CDB[4] = (BYTE)ReadAddr;
- bcb->CDB[3] = (BYTE)(ReadAddr / 0x0100);
+ bcb->CDB[4] = (u8)ReadAddr;
+ bcb->CDB[3] = (u8)(ReadAddr / 0x0100);
bcb->CDB[2] = Media.Zone / 2;
} else
bcb->CDB[11] = 1;
@@ -331,11 +331,11 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,
}
/* ----- Ssfdc_D_WriteSectForCopy() ------------------------------------- */
-int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
+int Ssfdc_D_WriteSectForCopy(struct us_data *us, u8 *buf, u8 *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD addr;
+ u16 addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -345,8 +345,8 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
+ addr = (u16)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(u16)Ssfdc.MaxSectors + Media.Sector;
/* Write sect data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
@@ -355,8 +355,8 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
bcb->Flags = 0x00;
bcb->CDB[0] = 0xF0;
bcb->CDB[1] = 0x04;
- bcb->CDB[7] = (BYTE)addr;
- bcb->CDB[6] = (BYTE)(addr / 0x0100);
+ bcb->CDB[7] = (u8)addr;
+ bcb->CDB[6] = (u8)(addr / 0x0100);
bcb->CDB[5] = Media.Zone / 2;
bcb->CDB[8] = *(redundant + REDT_ADDR1H);
bcb->CDB[9] = *(redundant + REDT_ADDR1L);
@@ -374,7 +374,7 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD addr;
+ u16 addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -383,8 +383,8 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors;
+ addr = (u16)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(u16)Ssfdc.MaxSectors;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -392,8 +392,8 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF2;
bcb->CDB[1] = 0x06;
- bcb->CDB[7] = (BYTE)addr;
- bcb->CDB[6] = (BYTE)(addr / 0x0100);
+ bcb->CDB[7] = (u8)addr;
+ bcb->CDB[6] = (u8)(addr / 0x0100);
bcb->CDB[5] = Media.Zone / 2;
result = ENE_SendScsiCmd(us, FDIR_READ, NULL, 0);
@@ -405,12 +405,12 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
/* 6250 CMD 2 */
/*----- Ssfdc_D_ReadRedtData() ----------------------------------------- */
-int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
+int Ssfdc_D_ReadRedtData(struct us_data *us, u8 *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD addr;
- BYTE *buf;
+ u16 addr;
+ u8 *buf;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -419,8 +419,8 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
+ addr = (u16)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(u16)Ssfdc.MaxSectors + Media.Sector;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -428,8 +428,8 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
- bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[4] = (u8)addr;
+ bcb->CDB[3] = (u8)(addr / 0x0100);
bcb->CDB[2] = Media.Zone / 2;
bcb->CDB[8] = 0;
bcb->CDB[9] = 1;
@@ -446,11 +446,11 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
/* 6250 CMD 4 */
/* ----- Ssfdc_D_WriteRedtData() ---------------------------------------- */
-int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
+int Ssfdc_D_WriteRedtData(struct us_data *us, u8 *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD addr;
+ u16 addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -459,8 +459,8 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
+ addr = (u16)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(u16)Ssfdc.MaxSectors + Media.Sector;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -468,8 +468,8 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF2;
bcb->CDB[1] = 0x05;
- bcb->CDB[7] = (BYTE)addr;
- bcb->CDB[6] = (BYTE)(addr / 0x0100);
+ bcb->CDB[7] = (u8)addr;
+ bcb->CDB[6] = (u8)(addr / 0x0100);
bcb->CDB[5] = Media.Zone / 2;
bcb->CDB[8] = *(redundant + REDT_ADDR1H);
bcb->CDB[9] = *(redundant + REDT_ADDR1L);
@@ -492,7 +492,7 @@ int Ssfdc_D_CheckStatus(void)
/* SmartMedia ID Code Check & Mode Set Subroutine
* ----- Set_D_SsfdcModel() ---------------------------------------------
*/
-int Set_D_SsfdcModel(BYTE dcode)
+int Set_D_SsfdcModel(u8 dcode)
{
switch (_Check_D_DevCode(dcode)) {
case SSFDC1MB:
@@ -600,7 +600,7 @@ int Set_D_SsfdcModel(BYTE dcode)
}
/* ----- _Check_D_DevCode() --------------------------------------------- */
-BYTE _Check_D_DevCode(BYTE dcode)
+static u8 _Check_D_DevCode(u8 dcode)
{
switch (dcode) {
case 0x6E:
@@ -630,21 +630,21 @@ BYTE _Check_D_DevCode(BYTE dcode)
/* SmartMedia ECC Control Subroutine
* ----- Check_D_ReadError() ----------------------------------------------
*/
-int Check_D_ReadError(BYTE *redundant)
+int Check_D_ReadError(u8 *redundant)
{
return SMSUCCESS;
}
/* ----- Check_D_Correct() ---------------------------------------------- */
-int Check_D_Correct(BYTE *buf, BYTE *redundant)
+int Check_D_Correct(u8 *buf, u8 *redundant)
{
return SMSUCCESS;
}
/* ----- Check_D_CISdata() ---------------------------------------------- */
-int Check_D_CISdata(BYTE *buf, BYTE *redundant)
+int Check_D_CISdata(u8 *buf, u8 *redundant)
{
- BYTE cis[] = {0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02,
+ u8 cis[] = {0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02,
0xDF, 0x01, 0x20};
int cis_len = sizeof(cis);
@@ -669,7 +669,7 @@ int Check_D_CISdata(BYTE *buf, BYTE *redundant)
}
/* ----- Set_D_RightECC() ---------------------------------------------- */
-void Set_D_RightECC(BYTE *redundant)
+void Set_D_RightECC(u8 *redundant)
{
/* Driver ECC Check */
return;
diff --git a/drivers/staging/keucr/smscsi.c b/drivers/staging/keucr/smscsi.c
index 5c03eca4dba8..20858f6777c8 100644
--- a/drivers/staging/keucr/smscsi.c
+++ b/drivers/staging/keucr/smscsi.c
@@ -68,7 +68,7 @@ static int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb)
/* ----- SM_SCSI_Inquiry() --------------------------------------------- */
static int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
{
- BYTE data_ptr[36] = {0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ u8 data_ptr[36] = {0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00,
0x55, 0x53, 0x42, 0x32, 0x2E, 0x30, 0x20,
0x20, 0x43, 0x61, 0x72, 0x64, 0x52, 0x65,
0x61, 0x64, 0x65, 0x72, 0x20, 0x20, 0x20,
@@ -82,9 +82,9 @@ static int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
/* ----- SM_SCSI_Mode_Sense() ------------------------------------------ */
static int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
{
- BYTE mediaNoWP[12] = {0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
+ u8 mediaNoWP[12] = {0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
- BYTE mediaWP[12] = {0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
+ u8 mediaWP[12] = {0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
if (us->SM_Status.WtP)
@@ -101,9 +101,9 @@ static int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
{
unsigned int offset = 0;
struct scatterlist *sg = NULL;
- DWORD bl_num;
- WORD bl_len;
- BYTE buf[8];
+ u32 bl_num;
+ u16 bl_len;
+ u8 buf[8];
dev_dbg(&us->pusb_dev->dev, "SM_SCSI_Read_Capacity\n");
@@ -132,13 +132,13 @@ static int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
static int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
{
int result = 0;
- PBYTE Cdb = srb->cmnd;
- DWORD bn = ((Cdb[2] << 24) & 0xff000000) |
+ u8 *Cdb = srb->cmnd;
+ u32 bn = ((Cdb[2] << 24) & 0xff000000) |
((Cdb[3] << 16) & 0x00ff0000) |
((Cdb[4] << 8) & 0x0000ff00) |
((Cdb[5] << 0) & 0x000000ff);
- WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
- DWORD blenByte = blen * 0x200;
+ u16 blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
+ u32 blenByte = blen * 0x200;
void *buf;
@@ -164,13 +164,13 @@ static int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
static int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
{
int result = 0;
- PBYTE Cdb = srb->cmnd;
- DWORD bn = ((Cdb[2] << 24) & 0xff000000) |
+ u8 *Cdb = srb->cmnd;
+ u32 bn = ((Cdb[2] << 24) & 0xff000000) |
((Cdb[3] << 16) & 0x00ff0000) |
((Cdb[4] << 8) & 0x0000ff00) |
((Cdb[5] << 0) & 0x000000ff);
- WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
- DWORD blenByte = blen * 0x200;
+ u16 blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
+ u32 blenByte = blen * 0x200;
void *buf;
diff --git a/drivers/staging/keucr/transport.c b/drivers/staging/keucr/transport.c
index aeb2186d62ca..ae9414755d2f 100644
--- a/drivers/staging/keucr/transport.c
+++ b/drivers/staging/keucr/transport.c
@@ -83,8 +83,8 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
*/
static void usb_stor_print_cmd(struct us_data *us, struct scsi_cmnd *srb)
{
- PBYTE Cdb = srb->cmnd;
- DWORD cmd = Cdb[0];
+ u8 *Cdb = srb->cmnd;
+ u32 cmd = Cdb[0];
switch (cmd) {
case TEST_UNIT_READY:
@@ -545,8 +545,8 @@ Handle_Errors:
*/
void BuildSenseBuffer(struct scsi_cmnd *srb, int SrbStatus)
{
- BYTE *buf = srb->sense_buffer;
- BYTE asc;
+ u8 *buf = srb->sense_buffer;
+ u8 asc;
pr_info("transport --- BuildSenseBuffer\n");
switch (SrbStatus) {
diff --git a/drivers/staging/keucr/transport.h b/drivers/staging/keucr/transport.h
index df34474ae568..abd8e5a3dd6e 100644
--- a/drivers/staging/keucr/transport.h
+++ b/drivers/staging/keucr/transport.h
@@ -58,9 +58,9 @@ extern void usb_stor_set_xfer_buf(struct us_data*, unsigned char *buffer,
extern void ENE_stor_invoke_transport(struct scsi_cmnd *, struct us_data *);
extern int ENE_InitMedia(struct us_data *);
extern int ENE_SMInit(struct us_data *);
-extern int ENE_SendScsiCmd(struct us_data*, BYTE, void*, int);
-extern int ENE_LoadBinCode(struct us_data*, BYTE);
-extern int ene_read_byte(struct us_data*, WORD index, void *buf);
+extern int ENE_SendScsiCmd(struct us_data*, u8, void*, int);
+extern int ENE_LoadBinCode(struct us_data*, u8);
+extern int ene_read_byte(struct us_data*, u16 index, void *buf);
extern int ENE_Read_Data(struct us_data*, void *buf, unsigned int length);
extern int ENE_Write_Data(struct us_data*, void *buf, unsigned int length);
extern void BuildSenseBuffer(struct scsi_cmnd *, int);
diff --git a/drivers/staging/keucr/usb.c b/drivers/staging/keucr/usb.c
index 3e3ca6365fbc..12ebde7315cd 100644
--- a/drivers/staging/keucr/usb.c
+++ b/drivers/staging/keucr/usb.c
@@ -50,7 +50,7 @@ static int eucr_suspend(struct usb_interface *iface, pm_message_t message)
static int eucr_resume(struct usb_interface *iface)
{
- BYTE tmp = 0;
+ u8 tmp = 0;
struct us_data *us = usb_get_intfdata(iface);
pr_info("--- eucr_resume---\n");
@@ -71,7 +71,7 @@ static int eucr_resume(struct usb_interface *iface)
static int eucr_reset_resume(struct usb_interface *iface)
{
- BYTE tmp = 0;
+ u8 tmp = 0;
struct us_data *us = usb_get_intfdata(iface);
pr_info("--- eucr_reset_resume---\n");
@@ -528,7 +528,7 @@ static int eucr_probe(struct usb_interface *intf,
struct Scsi_Host *host;
struct us_data *us;
int result;
- BYTE MiscReg03 = 0;
+ u8 MiscReg03 = 0;
struct task_struct *th;
pr_info("usb --- eucr_probe\n");
diff --git a/drivers/staging/keucr/usb.h b/drivers/staging/keucr/usb.h
index d665af177b96..e894f840c708 100644
--- a/drivers/staging/keucr/usb.h
+++ b/drivers/staging/keucr/usb.h
@@ -52,34 +52,34 @@ struct us_unusual_dev {
#define FDIR_READ 1
struct keucr_sd_status {
- BYTE Insert:1;
- BYTE Ready:1;
- BYTE MediaChange:1;
- BYTE IsMMC:1;
- BYTE HiCapacity:1;
- BYTE HiSpeed:1;
- BYTE WtP:1;
- BYTE Reserved:1;
+ u8 Insert:1;
+ u8 Ready:1;
+ u8 MediaChange:1;
+ u8 IsMMC:1;
+ u8 HiCapacity:1;
+ u8 HiSpeed:1;
+ u8 WtP:1;
+ u8 Reserved:1;
};
struct keucr_ms_status {
- BYTE Insert:1;
- BYTE Ready:1;
- BYTE MediaChange:1;
- BYTE IsMSPro:1;
- BYTE IsMSPHG:1;
- BYTE Reserved1:1;
- BYTE WtP:1;
- BYTE Reserved2:1;
+ u8 Insert:1;
+ u8 Ready:1;
+ u8 MediaChange:1;
+ u8 IsMSPro:1;
+ u8 IsMSPHG:1;
+ u8 Reserved1:1;
+ u8 WtP:1;
+ u8 Reserved2:1;
};
struct keucr_sm_status {
- BYTE Insert:1;
- BYTE Ready:1;
- BYTE MediaChange:1;
- BYTE Reserved:3;
- BYTE WtP:1;
- BYTE IsMS:1;
+ u8 Insert:1;
+ u8 Ready:1;
+ u8 MediaChange:1;
+ u8 Reserved:3;
+ u8 WtP:1;
+ u8 IsMS:1;
};
/* SD Block Length */
@@ -184,38 +184,38 @@ struct us_data {
/* ----- SD Control Data ---------------- */
/* SD_REGISTER SD_Regs; */
- WORD SD_Block_Mult;
- BYTE SD_READ_BL_LEN;
- WORD SD_C_SIZE;
- BYTE SD_C_SIZE_MULT;
+ u16 SD_Block_Mult;
+ u8 SD_READ_BL_LEN;
+ u16 SD_C_SIZE;
+ u8 SD_C_SIZE_MULT;
/* SD/MMC New spec. */
- BYTE SD_SPEC_VER;
- BYTE SD_CSD_VER;
- BYTE SD20_HIGH_CAPACITY;
- DWORD HC_C_SIZE;
- BYTE MMC_SPEC_VER;
- BYTE MMC_BusWidth;
- BYTE MMC_HIGH_CAPACITY;
+ u8 SD_SPEC_VER;
+ u8 SD_CSD_VER;
+ u8 SD20_HIGH_CAPACITY;
+ u32 HC_C_SIZE;
+ u8 MMC_SPEC_VER;
+ u8 MMC_BusWidth;
+ u8 MMC_HIGH_CAPACITY;
/* ----- MS Control Data ---------------- */
- BOOLEAN MS_SWWP;
- DWORD MSP_TotalBlock;
+ bool MS_SWWP;
+ u32 MSP_TotalBlock;
/* MS_LibControl MS_Lib; */
- BOOLEAN MS_IsRWPage;
- WORD MS_Model;
+ bool MS_IsRWPage;
+ u16 MS_Model;
/* ----- SM Control Data ---------------- */
- BYTE SM_DeviceID;
- BYTE SM_CardID;
+ u8 SM_DeviceID;
+ u8 SM_CardID;
- PBYTE testbuf;
- BYTE BIN_FLAG;
- DWORD bl_num;
+ u8 *testbuf;
+ u8 BIN_FLAG;
+ u32 bl_num;
int SrbStatus;
/* ------Power Managerment --------------- */
- BOOLEAN Power_IsResum;
+ bool Power_IsResum;
};
/* Convert between us_data and the corresponding Scsi_Host */
diff --git a/drivers/staging/line6/audio.c b/drivers/staging/line6/audio.c
index a92e21f7d55b..171d80c1b020 100644
--- a/drivers/staging/line6/audio.c
+++ b/drivers/staging/line6/audio.c
@@ -24,8 +24,9 @@ int line6_init_audio(struct usb_line6 *line6)
struct snd_card *card;
int err;
- err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
- THIS_MODULE, 0, &card);
+ err = snd_card_new(line6->ifcdev,
+ SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, 0, &card);
if (err < 0)
return err;
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
index f8316b71f13d..0eda51d2e9a2 100644
--- a/drivers/staging/line6/capture.c
+++ b/drivers/staging/line6/capture.c
@@ -157,6 +157,7 @@ void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf, int fsize)
copy two separate chunks.
*/
int len;
+
len = runtime->buffer_size - line6pcm->pos_in_done;
if (len > 0) {
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 7a6d85ebb29b..77f1b421e957 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -57,28 +57,32 @@ static const struct usb_device_id line6_id_table[] = {
MODULE_DEVICE_TABLE(usb, line6_id_table);
+#define L6PROP(dev_bit, dev_id, dev_name, dev_cap)\
+ {.device_bit = LINE6_BIT_##dev_bit, .id = dev_id,\
+ .name = dev_name, .capabilities = LINE6_BIT_##dev_cap}
+
/* *INDENT-OFF* */
-static struct line6_properties line6_properties_table[] = {
- { LINE6_BIT_BASSPODXT, "BassPODxt", "BassPODxt", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_BASSPODXTLIVE, "BassPODxtLive", "BassPODxt Live", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_BASSPODXTPRO, "BassPODxtPro", "BassPODxt Pro", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_GUITARPORT, "GuitarPort", "GuitarPort", LINE6_BIT_PCM },
- { LINE6_BIT_POCKETPOD, "PocketPOD", "Pocket POD", LINE6_BIT_CONTROL },
- { LINE6_BIT_PODHD300, "PODHD300", "POD HD300", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_PODHD400, "PODHD400", "POD HD400", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_PODHD500, "PODHD500", "POD HD500", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_PODSTUDIO_GX, "PODStudioGX", "POD Studio GX", LINE6_BIT_PCM },
- { LINE6_BIT_PODSTUDIO_UX1, "PODStudioUX1", "POD Studio UX1", LINE6_BIT_PCM },
- { LINE6_BIT_PODSTUDIO_UX2, "PODStudioUX2", "POD Studio UX2", LINE6_BIT_PCM },
- { LINE6_BIT_PODX3, "PODX3", "POD X3", LINE6_BIT_PCM },
- { LINE6_BIT_PODX3LIVE, "PODX3Live", "POD X3 Live", LINE6_BIT_PCM },
- { LINE6_BIT_PODXT, "PODxt", "PODxt", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_PODXTLIVE, "PODxtLive", "PODxt Live", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_PODXTPRO, "PODxtPro", "PODxt Pro", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_TONEPORT_GX, "TonePortGX", "TonePort GX", LINE6_BIT_PCM },
- { LINE6_BIT_TONEPORT_UX1, "TonePortUX1", "TonePort UX1", LINE6_BIT_PCM },
- { LINE6_BIT_TONEPORT_UX2, "TonePortUX2", "TonePort UX2", LINE6_BIT_PCM },
- { LINE6_BIT_VARIAX, "Variax", "Variax Workbench", LINE6_BIT_CONTROL },
+static const struct line6_properties line6_properties_table[] = {
+ L6PROP(BASSPODXT, "BassPODxt", "BassPODxt", CTRL_PCM_HW),
+ L6PROP(BASSPODXTLIVE, "BassPODxtLive", "BassPODxt Live", CTRL_PCM_HW),
+ L6PROP(BASSPODXTPRO, "BassPODxtPro", "BassPODxt Pro", CTRL_PCM_HW),
+ L6PROP(GUITARPORT, "GuitarPort", "GuitarPort", PCM),
+ L6PROP(POCKETPOD, "PocketPOD", "Pocket POD", CONTROL),
+ L6PROP(PODHD300, "PODHD300", "POD HD300", CTRL_PCM_HW),
+ L6PROP(PODHD400, "PODHD400", "POD HD400", CTRL_PCM_HW),
+ L6PROP(PODHD500, "PODHD500", "POD HD500", CTRL_PCM_HW),
+ L6PROP(PODSTUDIO_GX, "PODStudioGX", "POD Studio GX", PCM),
+ L6PROP(PODSTUDIO_UX1, "PODStudioUX1", "POD Studio UX1", PCM),
+ L6PROP(PODSTUDIO_UX2, "PODStudioUX2", "POD Studio UX2", PCM),
+ L6PROP(PODX3, "PODX3", "POD X3", PCM),
+ L6PROP(PODX3LIVE, "PODX3Live", "POD X3 Live", PCM),
+ L6PROP(PODXT, "PODxt", "PODxt", CTRL_PCM_HW),
+ L6PROP(PODXTLIVE, "PODxtLive", "PODxt Live", CTRL_PCM_HW),
+ L6PROP(PODXTPRO, "PODxtPro", "PODxt Pro", CTRL_PCM_HW),
+ L6PROP(TONEPORT_GX, "TonePortGX", "TonePort GX", PCM),
+ L6PROP(TONEPORT_UX1, "TonePortUX1", "TonePort UX1", PCM),
+ L6PROP(TONEPORT_UX2, "TonePortUX2", "TonePort UX2", PCM),
+ L6PROP(VARIAX, "Variax", "Variax Workbench", CONTROL),
};
/* *INDENT-ON* */
@@ -152,10 +156,10 @@ int line6_send_raw_message(struct usb_line6 *line6, const char *buffer,
int retval;
retval = usb_interrupt_msg(line6->usbdev,
- usb_sndintpipe(line6->usbdev,
- line6->ep_control_write),
- (char *)frag_buf, frag_size,
- &partial, LINE6_TIMEOUT * HZ);
+ usb_sndintpipe(line6->usbdev,
+ line6->ep_control_write),
+ (char *)frag_buf, frag_size,
+ &partial, LINE6_TIMEOUT * HZ);
if (retval) {
dev_err(line6->ifcdev,
@@ -217,7 +221,7 @@ static int line6_send_raw_message_async_part(struct message *msg,
Setup and start timer.
*/
void line6_start_timer(struct timer_list *timer, unsigned int msecs,
- void (*function) (unsigned long), unsigned long data)
+ void (*function)(unsigned long), unsigned long data)
{
setup_timer(timer, function, data);
timer->expires = jiffies + msecs * HZ / 1000;
diff --git a/drivers/staging/line6/driver.h b/drivers/staging/line6/driver.h
index 34ae95e7e512..16e3fc2f1f15 100644
--- a/drivers/staging/line6/driver.h
+++ b/drivers/staging/line6/driver.h
@@ -204,7 +204,7 @@ extern int line6_send_sysex_message(struct usb_line6 *line6,
extern ssize_t line6_set_raw(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
extern void line6_start_timer(struct timer_list *timer, unsigned int msecs,
- void (*function) (unsigned long),
+ void (*function)(unsigned long),
unsigned long data);
extern int line6_transmit_parameter(struct usb_line6 *line6, int param,
u8 value);
diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c
index 3f6d78c585fb..02345fb06e3d 100644
--- a/drivers/staging/line6/midi.c
+++ b/drivers/staging/line6/midi.c
@@ -47,7 +47,7 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
struct snd_line6_midi *line6midi = line6->line6midi;
struct midi_buffer *mb = &line6midi->midibuf_out;
unsigned long flags;
- unsigned char chunk[line6->max_packet_size];
+ unsigned char chunk[LINE6_FALLBACK_MAXPACKETSIZE];
int req, done;
spin_lock_irqsave(&line6->line6midi->midi_transmit_lock, flags);
@@ -64,7 +64,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
}
for (;;) {
- done = line6_midibuf_read(mb, chunk, line6->max_packet_size);
+ done = line6_midibuf_read(mb, chunk,
+ LINE6_FALLBACK_MAXPACKETSIZE);
if (done == 0)
break;
@@ -307,8 +308,6 @@ int line6_init_midi(struct usb_line6 *line6)
if (err < 0)
return err;
- snd_card_set_dev(line6->card, line6->ifcdev);
-
err = snd_line6_new_midi(line6midi);
if (err < 0)
return err;
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index df8331bce175..661080b3c39d 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -501,8 +501,6 @@ int line6_init_pcm(struct usb_line6 *line6,
if (err < 0)
return err;
- snd_card_set_dev(line6->card, line6->ifcdev);
-
err = snd_line6_new_pcm(line6pcm);
if (err < 0)
return err;
diff --git a/drivers/staging/line6/usbdefs.h b/drivers/staging/line6/usbdefs.h
index 90cadddec56e..2d1cc472bead 100644
--- a/drivers/staging/line6/usbdefs.h
+++ b/drivers/staging/line6/usbdefs.h
@@ -91,9 +91,9 @@ enum {
LINE6_BITS_PODXTALL = LINE6_BIT_PODXT | LINE6_BIT_PODXTLIVE |
LINE6_BIT_PODXTPRO,
LINE6_BITS_PODX3ALL = LINE6_BIT_PODX3 | LINE6_BIT_PODX3LIVE,
- LINE6_BITS_PODHDALL = LINE6_BIT_PODHD300 |
- LINE6_BIT_PODHD400 |
- LINE6_BIT_PODHD500,
+ LINE6_BITS_PODHDALL = LINE6_BIT_PODHD300 |
+ LINE6_BIT_PODHD400 |
+ LINE6_BIT_PODHD500,
LINE6_BITS_BASSPODXTALL = LINE6_BIT_BASSPODXT |
LINE6_BIT_BASSPODXTLIVE |
LINE6_BIT_BASSPODXTPRO
@@ -106,7 +106,7 @@ enum {
/* device support hardware monitoring */
#define LINE6_BIT_HWMON (1 << 2)
-#define LINE6_BIT_CONTROL_PCM_HWMON (LINE6_BIT_CONTROL | \
+#define LINE6_BIT_CTRL_PCM_HW (LINE6_BIT_CONTROL | \
LINE6_BIT_PCM | \
LINE6_BIT_HWMON)
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index 507d16b9213c..8fd47c98fd33 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -44,13 +44,6 @@
#define __LIBCFS_CURPROC_H__
/*
- * Portable API to access common characteristics of "current" UNIX process.
- *
- * Implemented in portals/include/libcfs/<os>/
- */
-int cfs_curproc_groups_nr(void);
-
-/*
* Plus, platform-specific constant
*
* CFS_CURPROC_COMM_MAX,
@@ -91,8 +84,6 @@ void cfs_cap_raise(cfs_cap_t cap);
void cfs_cap_lower(cfs_cap_t cap);
int cfs_cap_raised(cfs_cap_t cap);
cfs_cap_t cfs_curproc_cap_pack(void);
-void cfs_curproc_cap_unpack(cfs_cap_t cap);
-int cfs_capable(cfs_cap_t cap);
/* __LIBCFS_CURPROC_H__ */
#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 9d5ee1a69c0c..e5d5db255622 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -65,8 +65,6 @@
#include <linux/hash.h>
-#define cfs_hash_long(val, bits) hash_long(val, bits)
-
/** disable debug */
#define CFS_HASH_DEBUG_NONE 0
/** record hash depth and output to console when it's too deep,
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index 74dda57b98a8..49ba62a4daa8 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -211,10 +211,10 @@ static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
}
-extern int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
-extern int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
-extern int libcfs_ioctl_getdata(char *buf, char *end, void *arg);
-extern int libcfs_ioctl_popdata(void *arg, void *buf, int size);
+int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
+int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
+int libcfs_ioctl_getdata(char *buf, char *end, void *arg);
+int libcfs_ioctl_popdata(void *arg, void *buf, int size);
#endif /* __LIBCFS_IOCTL_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
index e6e417aeefd5..7cf34aa78f79 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -41,19 +41,10 @@
#define __LIBCFS_PRIM_H__
/*
- * Schedule
- */
-void cfs_pause(cfs_duration_t ticks);
-
-/*
* Timer
*/
typedef void (cfs_timer_func_t)(ulong_ptr_t);
-void schedule_timeout_and_set_state(long, int64_t);
-void init_waitqueue_entry_current(wait_queue_t *link);
-int64_t waitq_timedwait(wait_queue_t *, long, int64_t);
-void waitq_wait(wait_queue_t *, long);
void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
void cfs_init_timer(struct timer_list *t);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index a6bac9c36339..509dc1e5c3b1 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -49,18 +49,6 @@ int cfs_strncasecmp(const char *s1, const char *s2, size_t n);
/* Convert a text string to a bitmask */
int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
int *oldmask, int minmask, int allmask);
-
-/* Allocate space for and copy an existing string.
- * Must free with kfree().
- */
-char *cfs_strdup(const char *str, u_int32_t flags);
-
-/* safe vsnprintf */
-int cfs_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
-
-/* safe snprintf */
-int cfs_snprintf(char *buf, size_t size, const char *fmt, ...);
-
/* trim leading and trailing space characters */
char *cfs_firststr(char *str, size_t size);
@@ -90,27 +78,10 @@ struct cfs_expr_list {
struct list_head el_exprs;
};
-static inline int
-cfs_iswhite(char c)
-{
- switch (c) {
- case ' ':
- case '\t':
- case '\n':
- case '\r':
- return 1;
- default:
- break;
- }
- return 0;
-}
-
char *cfs_trimwhite(char *str);
int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res);
int cfs_str2num_check(char *str, int nob, unsigned *num,
unsigned min, unsigned max);
-int cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
- int single_tok, struct cfs_range_expr **expr);
int cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list);
int cfs_expr_list_values(struct cfs_expr_list *expr_list,
int max, __u32 **values);
@@ -124,7 +95,6 @@ cfs_expr_list_values_free(__u32 *values, int num)
}
void cfs_expr_list_free(struct cfs_expr_list *expr_list);
-void cfs_expr_list_print(struct cfs_expr_list *expr_list);
int cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
struct cfs_expr_list **elpp);
void cfs_expr_list_free_list(struct list_head *list);
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index 3ac2bb5fd2db..856fcfaafafa 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -628,7 +628,7 @@ void lnet_ni_free(lnet_ni_t *ni);
static inline int
lnet_nid2peerhash(lnet_nid_t nid)
{
- return cfs_hash_long(nid, LNET_PEER_HASH_BITS);
+ return hash_long(nid, LNET_PEER_HASH_BITS);
}
static inline struct list_head *
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index dd8edcf1b5c0..1c13ef7df80e 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -282,16 +282,14 @@ typedef struct lnet_libmd {
#define LNET_MD_FLAG_AUTO_UNLINK (1 << 1)
#ifdef LNET_USE_LIB_FREELIST
-typedef struct
-{
+typedef struct {
void *fl_objs; /* single contiguous array of objects */
int fl_nobjs; /* the number of them */
int fl_objsize; /* the size (including overhead) of each of them */
struct list_head fl_list; /* where they are enqueued */
} lnet_freelist_t;
-typedef struct
-{
+typedef struct {
struct list_head fo_list; /* enqueue on fl_list */
void *fo_contents; /* aligned contents */
} lnet_freeobj_t;
@@ -312,8 +310,7 @@ typedef struct {
struct lnet_ni; /* forward ref */
-typedef struct lnet_lnd
-{
+typedef struct lnet_lnd {
/* fields managed by portals */
struct list_head lnd_list; /* stash in the LND table */
int lnd_refcount; /* # active instances */
@@ -321,8 +318,8 @@ typedef struct lnet_lnd
/* fields initialised by the LND */
unsigned int lnd_type;
- int (*lnd_startup) (struct lnet_ni *ni);
- void (*lnd_shutdown) (struct lnet_ni *ni);
+ int (*lnd_startup)(struct lnet_ni *ni);
+ void (*lnd_shutdown)(struct lnet_ni *ni);
int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
/* In data movement APIs below, payload buffers are described as a set
@@ -668,8 +665,7 @@ struct lnet_msg_container {
#define LNET_RC_STATE_RUNNING 1 /* started up OK */
#define LNET_RC_STATE_STOPPING 2 /* telling thread to stop */
-typedef struct
-{
+typedef struct {
/* CPU partition table of LNet */
struct cfs_cpt_table *ln_cpt_table;
/* number of CPTs in ln_cpt_table */
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 644a0000130a..0061c8afa206 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -41,7 +41,7 @@
#include "o2iblnd.h"
#include <asm/div64.h>
-lnd_t the_o2iblnd = {
+static lnd_t the_o2iblnd = {
.lnd_type = O2IBLND,
.lnd_startup = kiblnd_startup,
.lnd_shutdown = kiblnd_shutdown,
@@ -53,8 +53,8 @@ lnd_t the_o2iblnd = {
kib_data_t kiblnd_data;
-__u32
-kiblnd_cksum (void *ptr, int nob)
+static __u32
+kiblnd_cksum(void *ptr, int nob)
{
char *c = ptr;
__u32 sum = 0;
@@ -429,8 +429,8 @@ kiblnd_unlink_peer_locked (kib_peer_t *peer)
kiblnd_peer_decref(peer);
}
-int
-kiblnd_get_peer_info (lnet_ni_t *ni, int index,
+static int
+kiblnd_get_peer_info(lnet_ni_t *ni, int index,
lnet_nid_t *nidp, int *count)
{
kib_peer_t *peer;
@@ -468,8 +468,8 @@ kiblnd_get_peer_info (lnet_ni_t *ni, int index,
return -ENOENT;
}
-void
-kiblnd_del_peer_locked (kib_peer_t *peer)
+static void
+kiblnd_del_peer_locked(kib_peer_t *peer)
{
struct list_head *ctmp;
struct list_head *cnxt;
@@ -489,8 +489,8 @@ kiblnd_del_peer_locked (kib_peer_t *peer)
* last ref on it. */
}
-int
-kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
+static int
+kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
{
LIST_HEAD (zombies);
struct list_head *ptmp;
@@ -543,8 +543,8 @@ kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
return rc;
}
-kib_conn_t *
-kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
+static kib_conn_t *
+kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
{
kib_peer_t *peer;
struct list_head *ptmp;
@@ -584,74 +584,6 @@ kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
return NULL;
}
-void
-kiblnd_debug_rx (kib_rx_t *rx)
-{
- CDEBUG(D_CONSOLE, " %p status %d msg_type %x cred %d\n",
- rx, rx->rx_status, rx->rx_msg->ibm_type,
- rx->rx_msg->ibm_credits);
-}
-
-void
-kiblnd_debug_tx (kib_tx_t *tx)
-{
- CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lx "
- "cookie "LPX64" msg %s%s type %x cred %d\n",
- tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
- tx->tx_status, tx->tx_deadline, tx->tx_cookie,
- tx->tx_lntmsg[0] == NULL ? "-" : "!",
- tx->tx_lntmsg[1] == NULL ? "-" : "!",
- tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits);
-}
-
-void
-kiblnd_debug_conn (kib_conn_t *conn)
-{
- struct list_head *tmp;
- int i;
-
- spin_lock(&conn->ibc_lock);
-
- CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
- atomic_read(&conn->ibc_refcount), conn,
- conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- CDEBUG(D_CONSOLE, " state %d nposted %d/%d cred %d o_cred %d r_cred %d\n",
- conn->ibc_state, conn->ibc_noops_posted,
- conn->ibc_nsends_posted, conn->ibc_credits,
- conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
- CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error);
-
- CDEBUG(D_CONSOLE, " early_rxs:\n");
- list_for_each(tmp, &conn->ibc_early_rxs)
- kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list));
-
- CDEBUG(D_CONSOLE, " tx_noops:\n");
- list_for_each(tmp, &conn->ibc_tx_noops)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
-
- CDEBUG(D_CONSOLE, " tx_queue_nocred:\n");
- list_for_each(tmp, &conn->ibc_tx_queue_nocred)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
-
- CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n");
- list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
-
- CDEBUG(D_CONSOLE, " tx_queue:\n");
- list_for_each(tmp, &conn->ibc_tx_queue)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
-
- CDEBUG(D_CONSOLE, " active_txs:\n");
- list_for_each(tmp, &conn->ibc_active_txs)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
-
- CDEBUG(D_CONSOLE, " rxs:\n");
- for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++)
- kiblnd_debug_rx(&conn->ibc_rxs[i]);
-
- spin_unlock(&conn->ibc_lock);
-}
-
int
kiblnd_translate_mtu(int value)
{
@@ -1039,8 +971,8 @@ kiblnd_close_stale_conns_locked (kib_peer_t *peer,
return count;
}
-int
-kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
+static int
+kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
{
kib_peer_t *peer;
struct list_head *ptmp;
@@ -1440,7 +1372,7 @@ kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
return mr;
}
-void
+static void
kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
{
LASSERT (pool->fpo_map_count == 0);
@@ -1454,7 +1386,7 @@ kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
}
-void
+static void
kiblnd_destroy_fmr_pool_list(struct list_head *head)
{
kib_fmr_pool_t *pool;
@@ -1480,7 +1412,7 @@ static int kiblnd_fmr_flush_trigger(int ncpts)
return max(IBLND_FMR_POOL_FLUSH, size);
}
-int
+static int
kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo)
{
/* FMR pool for RDMA */
@@ -1719,7 +1651,7 @@ kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
pool->po_size = size;
}
-void
+static void
kiblnd_destroy_pool_list(struct list_head *head)
{
kib_pool_t *pool;
@@ -2192,7 +2124,7 @@ kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
tx->tx_cookie = tps->tps_next_tx_cookie ++;
}
-void
+static void
kiblnd_net_fini_pools(kib_net_t *net)
{
int i;
@@ -2234,7 +2166,7 @@ kiblnd_net_fini_pools(kib_net_t *net)
}
}
-int
+static int
kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
{
unsigned long flags;
@@ -2408,7 +2340,7 @@ kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
return -EINVAL;
}
-void
+static void
kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
{
int i;
@@ -2442,7 +2374,7 @@ kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
LIBCFS_FREE(hdev, sizeof(*hdev));
}
-int
+static int
kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
{
struct ib_mr *mr;
@@ -2746,7 +2678,7 @@ kiblnd_destroy_dev (kib_dev_t *dev)
LIBCFS_FREE(dev, sizeof(*dev));
}
-kib_dev_t *
+static kib_dev_t *
kiblnd_create_dev(char *ifname)
{
struct net_device *netdev;
@@ -2800,7 +2732,7 @@ kiblnd_create_dev(char *ifname)
return dev;
}
-void
+static void
kiblnd_base_shutdown(void)
{
struct kib_sched_info *sched;
@@ -2842,7 +2774,8 @@ kiblnd_base_shutdown(void)
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
atomic_read(&kiblnd_data.kib_nthreads));
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
/* fall through */
@@ -2903,7 +2836,8 @@ kiblnd_shutdown (lnet_ni_t *ni)
"%s: waiting for %d peers to disconnect\n",
libcfs_nid2str(ni->ni_nid),
atomic_read(&net->ibn_npeers));
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
kiblnd_net_fini_pools(net);
@@ -2940,7 +2874,7 @@ out:
return;
}
-int
+static int
kiblnd_base_startup(void)
{
struct kib_sched_info *sched;
@@ -3030,7 +2964,7 @@ kiblnd_base_startup(void)
return -ENETDOWN;
}
-int
+static int
kiblnd_start_schedulers(struct kib_sched_info *sched)
{
int rc = 0;
@@ -3071,7 +3005,7 @@ kiblnd_start_schedulers(struct kib_sched_info *sched)
return rc;
}
-int
+static int
kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts)
{
int cpt;
@@ -3097,7 +3031,7 @@ kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts)
return 0;
}
-kib_dev_t *
+static kib_dev_t *
kiblnd_dev_search(char *ifname)
{
kib_dev_t *alias = NULL;
@@ -3226,13 +3160,13 @@ failed:
return -ENETDOWN;
}
-void __exit
+static void __exit
kiblnd_module_fini (void)
{
lnet_unregister_lnd(&the_o2iblnd);
}
-int __init
+static int __init
kiblnd_module_init (void)
{
int rc;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 6f58ead20393..6173e74d7492 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -3127,7 +3127,7 @@ kiblnd_connd (void *arg)
cfs_block_allsigs ();
- init_waitqueue_entry_current (&wait);
+ init_waitqueue_entry(&wait, current);
kiblnd_data.kib_connd = current;
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
@@ -3208,7 +3208,7 @@ kiblnd_connd (void *arg)
add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
- waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
+ schedule_timeout(timeout);
set_current_state(TASK_RUNNING);
remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
@@ -3324,7 +3324,7 @@ kiblnd_scheduler(void *arg)
cfs_block_allsigs();
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
@@ -3423,7 +3423,7 @@ kiblnd_scheduler(void *arg)
add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
spin_unlock_irqrestore(&sched->ibs_lock, flags);
- waitq_wait(&wait, TASK_INTERRUPTIBLE);
+ schedule();
busy_loops = 0;
remove_wait_queue(&sched->ibs_waitq, &wait);
@@ -3450,7 +3450,7 @@ kiblnd_failover_thread(void *arg)
cfs_block_allsigs ();
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
write_lock_irqsave(glock, flags);
while (!kiblnd_data.kib_shutdown) {
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 8f74d0be32f1..21d36ee5378a 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -2336,7 +2336,8 @@ ksocknal_base_shutdown(void)
"waiting for %d threads to terminate\n",
ksocknal_data.ksnd_nthreads);
read_unlock(&ksocknal_data.ksnd_global_lock);
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
read_lock(&ksocknal_data.ksnd_global_lock);
}
read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -2584,7 +2585,8 @@ ksocknal_shutdown (lnet_ni_t *ni)
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d peers to disconnect\n",
net->ksnn_npeers);
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
ksocknal_debug_peerhash(ni);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index b7b53b579c85..bdf95ea8a54d 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -189,7 +189,8 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
int bufnob;
if (ksocknal_data.ksnd_stall_tx != 0) {
- cfs_pause(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
}
LASSERT (tx->tx_resid != 0);
@@ -345,7 +346,8 @@ ksocknal_receive (ksock_conn_t *conn)
int rc;
if (ksocknal_data.ksnd_stall_rx != 0) {
- cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
}
rc = ksocknal_connsock_addref(conn);
@@ -2140,7 +2142,7 @@ ksocknal_connd (void *arg)
cfs_block_allsigs ();
- init_waitqueue_entry_current (&wait);
+ init_waitqueue_entry(&wait, current);
spin_lock_bh(connd_lock);
@@ -2229,7 +2231,7 @@ ksocknal_connd (void *arg)
spin_unlock_bh(connd_lock);
nloops = 0;
- waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
+ schedule_timeout(timeout);
set_current_state(TASK_RUNNING);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
@@ -2532,7 +2534,7 @@ ksocknal_reaper (void *arg)
cfs_block_allsigs ();
INIT_LIST_HEAD(&enomem_conns);
- init_waitqueue_entry_current (&wait);
+ init_waitqueue_entry(&wait, current);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2639,8 +2641,7 @@ ksocknal_reaper (void *arg)
if (!ksocknal_data.ksnd_shuttingdown &&
list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
list_empty (&ksocknal_data.ksnd_zombie_conns))
- waitq_timedwait (&wait, TASK_INTERRUPTIBLE,
- timeout);
+ schedule_timeout(timeout);
set_current_state (TASK_RUNNING);
remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
index 733c79e1f12d..b87b246111c0 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
@@ -337,7 +337,8 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
/* NB we can't trust socket ops to either consume our iovs
* or leave them alone. */
- if ((addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages)) != NULL) {
+ addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
+ if (addr != NULL) {
nob = scratchiov[0].iov_len;
n = 1;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 71205e2015ce..2d91571cbab2 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -165,7 +165,8 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
LASSERT (tx_ack == NULL ||
tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
- if ((tx = conn->ksnc_tx_carrier) == NULL) {
+ tx = conn->ksnc_tx_carrier;
+ if (tx == NULL) {
if (tx_ack != NULL) {
list_add_tail(&tx_ack->tx_list,
&conn->ksnc_tx_queue);
@@ -392,7 +393,8 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
if (tx == NULL)
return -ENOMEM;
- if ((rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) == 0)
+ rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id);
+ if (rc == 0)
return 0;
ksocknal_free_tx(tx);
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index cb2ecd717714..09ea6cb1492c 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -371,7 +371,8 @@ lnet_acceptor(void *arg)
if (rc != 0) {
if (rc != -EAGAIN) {
CWARN("Accept error %d: pausing...\n", rc);
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
continue;
}
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index c562ff3e9283..45c23194081b 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -770,7 +770,7 @@ lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
if (number == 1)
return 0;
- val = cfs_hash_long(key, LNET_CPT_BITS);
+ val = hash_long(key, LNET_CPT_BITS);
/* NB: LNET_CP_NUMBER doesn't have to be PO2 */
if (val < number)
return val;
@@ -994,7 +994,8 @@ lnet_shutdown_lndnis (void)
"Waiting for zombie LNI %s\n",
libcfs_nid2str(ni->ni_nid));
}
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
lnet_net_lock(LNET_LOCK_EX);
continue;
}
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 6a07b0a65d12..d97464e95ddb 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -443,7 +443,7 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str)
/* Split 'str' into separate commands */
for (;;) {
/* skip leading whitespace */
- while (cfs_iswhite(*str))
+ while (isspace(*str))
str++;
/* scan for separator or comment */
@@ -460,7 +460,7 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str)
}
for (i = 0; i < nob; i++)
- if (cfs_iswhite(str[i]))
+ if (isspace(str[i]))
ltb->ltb_text[i] = ' ';
else
ltb->ltb_text[i] = str[i];
@@ -667,7 +667,7 @@ lnet_parse_route(char *str, int *im_a_router)
sep = str;
for (;;) {
/* scan for token start */
- while (cfs_iswhite(*sep))
+ while (isspace(*sep))
sep++;
if (*sep == 0) {
if (ntokens < (got_hops ? 3 : 2))
@@ -679,7 +679,7 @@ lnet_parse_route(char *str, int *im_a_router)
token = sep++;
/* scan for token end */
- while (*sep != 0 && !cfs_iswhite(*sep))
+ while (*sep != 0 && !isspace(*sep))
sep++;
if (*sep != 0)
*sep++ = 0;
@@ -858,7 +858,7 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
sep = tokens;
for (;;) {
/* scan for token start */
- while (cfs_iswhite(*sep))
+ while (isspace(*sep))
sep++;
if (*sep == 0)
break;
@@ -866,7 +866,7 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
token = sep++;
/* scan for token end */
- while (*sep != 0 && !cfs_iswhite(*sep))
+ while (*sep != 0 && !isspace(*sep))
sep++;
if (*sep != 0)
*sep++ = 0;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index 4ce68d3b0931..7ce07f61b2e1 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -334,21 +334,20 @@ lnet_eq_wait_locked(int *timeout_ms)
if (tms == 0)
return -1; /* don't want to wait and no new event */
- init_waitqueue_entry_current(&wl);
+ init_waitqueue_entry(&wl, current);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
lnet_eq_wait_unlock();
if (tms < 0) {
- waitq_wait(&wl, TASK_INTERRUPTIBLE);
+ schedule();
} else {
struct timeval tv;
now = cfs_time_current();
- waitq_timedwait(&wl, TASK_INTERRUPTIBLE,
- cfs_time_seconds(tms) / 1000);
+ schedule_timeout(cfs_time_seconds(tms) / 1000);
cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
if (tms < 0) /* no more wait but may have new event */
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 6fffd5e96f9c..920df69960a5 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -366,7 +366,7 @@ lnet_mt_match_head(struct lnet_match_table *mtable,
unsigned long hash = mbits + id.nid + id.pid;
LASSERT(lnet_ptl_is_unique(ptl));
- hash = cfs_hash_long(hash, LNET_MT_HASH_BITS);
+ hash = hash_long(hash, LNET_MT_HASH_BITS);
return &mtable->mt_mhash[hash];
}
}
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
index 286977691393..72802b0404a4 100644
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -145,7 +145,8 @@ lnet_peer_tables_cleanup(void)
"Waiting for %d peers on peer table\n",
ptable->pt_number);
}
- cfs_pause(cfs_time_seconds(1) / 2);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 2);
lnet_net_lock(i);
}
list_splice_init(&ptable->pt_deathrow, &deathrow);
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index d1ee44232eef..995f50976c42 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -779,7 +779,8 @@ lnet_wait_known_routerstate(void)
if (all_known)
return;
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
}
@@ -1147,7 +1148,8 @@ lnet_prune_rc_data(int wait_unlink)
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for rc buffers to unlink\n");
- cfs_pause(cfs_time_seconds(1) / 4);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 4);
lnet_net_lock(LNET_LOCK_EX);
}
@@ -1206,11 +1208,11 @@ rescan:
lnet_prune_rc_data(0); /* don't wait for UNLINK */
- /* Call cfs_pause() here always adds 1 to load average
+ /* Call schedule_timeout() here always adds 1 to load average
* because kernel counts # active tasks as nr_running
* + nr_uninterruptible. */
- schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 20d53e08705e..0cbd9fc98e07 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -837,8 +837,8 @@ static int __proc_lnet_portal_rotor(void *data, int write,
rc = -EINVAL;
lnet_res_lock(0);
for (i = 0; portal_rotors[i].pr_name != NULL; i++) {
- if (cfs_strncasecmp(portal_rotors[i].pr_name, tmp,
- strlen(portal_rotors[i].pr_name)) == 0) {
+ if (strncasecmp(portal_rotors[i].pr_name, tmp,
+ strlen(portal_rotors[i].pr_name)) == 0) {
portal_rotor = portal_rotors[i].pr_value;
rc = 0;
break;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 53d58924737b..8d1eea4cef6f 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -1346,7 +1346,8 @@ lstcon_rpc_cleanup_wait(void)
mutex_unlock(&console_session.ses_mutex);
CWARN("Session is shutting down, waiting for termination of transactions\n");
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
mutex_lock(&console_session.ses_mutex);
}
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 2a8eddc7db52..352fc96398d2 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -61,7 +61,7 @@ do { \
lstcon_session_t console_session;
-void
+static void
lstcon_node_get(lstcon_node_t *nd)
{
LASSERT (nd->nd_ref >= 1);
@@ -114,7 +114,7 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
return 0;
}
-void
+static void
lstcon_node_put(lstcon_node_t *nd)
{
lstcon_ndlink_t *ndl;
@@ -344,7 +344,7 @@ lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new)
}
}
-int
+static int
lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
{
lstcon_group_t *grp = (lstcon_group_t *)arg;
@@ -373,7 +373,7 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
return 1;
}
-int
+static int
lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
lstcon_rpc_ent_t *ent_up)
{
@@ -830,7 +830,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
return 0;
}
-int
+static int
lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
{
lstcon_batch_t *bat;
@@ -998,7 +998,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
return rc;
}
-int
+static int
lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
{
switch (transop) {
@@ -1141,7 +1141,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
}
-int
+static int
lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
{
lstcon_test_t *test;
@@ -1370,7 +1370,7 @@ out:
return rc;
}
-int
+static int
lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
{
lstcon_test_t *test;
@@ -1385,7 +1385,7 @@ lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
return -ENOENT;
}
-int
+static int
lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg,
lstcon_rpc_ent_t *ent_up)
{
@@ -1464,7 +1464,7 @@ lstcon_test_batch_query(char *name, int testidx, int client,
return rc;
}
-int
+static int
lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
lstcon_rpc_ent_t *ent_up)
{
@@ -1488,7 +1488,7 @@ lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
return 0;
}
-int
+static int
lstcon_ndlist_stat(struct list_head *ndlist,
int timeout, struct list_head *result_up)
{
@@ -1577,7 +1577,7 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
return rc;
}
-int
+static int
lstcon_debug_ndlist(struct list_head *ndlist,
struct list_head *translist,
int timeout, struct list_head *result_up)
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index d838985f51cb..9fc0429e8296 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -1585,7 +1585,8 @@ srpc_startup (void)
spin_lock_init(&srpc_data.rpc_glock);
/* 1 second pause to avoid timestamp reuse */
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
srpc_data.rpc_state = SRPC_STATE_NONE;
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 228927e0f962..f4806a6bc942 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -572,7 +572,11 @@ swi_state2str (int state)
#undef STATE2STR
}
-#define selftest_wait_events() cfs_pause(cfs_time_seconds(1) / 10)
+#define selftest_wait_events() \
+ do { \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout(cfs_time_seconds(1) / 10); \
+ } while (0)
#define lst_wait_until(cond, lock, fmt, ...) \
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 79fc2fe131a2..3401c9ad42ac 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -196,8 +196,8 @@ static int seq_client_alloc_seq(const struct lu_env *env,
if (range_is_exhausted(&seq->lcs_space)) {
rc = seq_client_alloc_meta(env, seq);
if (rc) {
- CERROR("%s: Can't allocate new meta-sequence,"
- "rc %d\n", seq->lcs_name, rc);
+ CERROR("%s: Can't allocate new meta-sequence, rc %d\n",
+ seq->lcs_name, rc);
return rc;
} else {
CDEBUG(D_INFO, "%s: New range - "DRANGE"\n",
@@ -225,7 +225,7 @@ static int seq_fid_alloc_prep(struct lu_client_seq *seq,
set_current_state(TASK_UNINTERRUPTIBLE);
mutex_unlock(&seq->lcs_mutex);
- waitq_wait(link, TASK_UNINTERRUPTIBLE);
+ schedule();
mutex_lock(&seq->lcs_mutex);
remove_wait_queue(&seq->lcs_waitq, link);
@@ -256,7 +256,7 @@ int seq_client_get_seq(const struct lu_env *env,
LASSERT(seqnr != NULL);
mutex_lock(&seq->lcs_mutex);
- init_waitqueue_entry_current(&link);
+ init_waitqueue_entry(&link, current);
while (1) {
rc = seq_fid_alloc_prep(seq, &link);
@@ -266,15 +266,15 @@ int seq_client_get_seq(const struct lu_env *env,
rc = seq_client_alloc_seq(env, seq, seqnr);
if (rc) {
- CERROR("%s: Can't allocate new sequence, "
- "rc %d\n", seq->lcs_name, rc);
+ CERROR("%s: Can't allocate new sequence, rc %d\n",
+ seq->lcs_name, rc);
seq_fid_alloc_fini(seq);
mutex_unlock(&seq->lcs_mutex);
return rc;
}
- CDEBUG(D_INFO, "%s: allocate sequence "
- "[0x%16.16"LPF64"x]\n", seq->lcs_name, *seqnr);
+ CDEBUG(D_INFO, "%s: allocate sequence [0x%16.16"LPF64"x]\n",
+ seq->lcs_name, *seqnr);
/* Since the caller require the whole seq,
* so marked this seq to be used */
@@ -306,7 +306,7 @@ int seq_client_alloc_fid(const struct lu_env *env,
LASSERT(seq != NULL);
LASSERT(fid != NULL);
- init_waitqueue_entry_current(&link);
+ init_waitqueue_entry(&link, current);
mutex_lock(&seq->lcs_mutex);
if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
@@ -329,15 +329,15 @@ int seq_client_alloc_fid(const struct lu_env *env,
rc = seq_client_alloc_seq(env, seq, &seqnr);
if (rc) {
- CERROR("%s: Can't allocate new sequence, "
- "rc %d\n", seq->lcs_name, rc);
+ CERROR("%s: Can't allocate new sequence, rc %d\n",
+ seq->lcs_name, rc);
seq_fid_alloc_fini(seq);
mutex_unlock(&seq->lcs_mutex);
return rc;
}
- CDEBUG(D_INFO, "%s: Switch to sequence "
- "[0x%16.16"LPF64"x]\n", seq->lcs_name, seqnr);
+ CDEBUG(D_INFO, "%s: Switch to sequence [0x%16.16"LPF64"x]\n",
+ seq->lcs_name, seqnr);
seq->lcs_fid.f_oid = LUSTRE_FID_INIT_OID;
seq->lcs_fid.f_seq = seqnr;
@@ -370,7 +370,7 @@ void seq_client_flush(struct lu_client_seq *seq)
wait_queue_t link;
LASSERT(seq != NULL);
- init_waitqueue_entry_current(&link);
+ init_waitqueue_entry(&link, current);
mutex_lock(&seq->lcs_mutex);
while (seq->lcs_update) {
@@ -378,7 +378,7 @@ void seq_client_flush(struct lu_client_seq *seq)
set_current_state(TASK_UNINTERRUPTIBLE);
mutex_unlock(&seq->lcs_mutex);
- waitq_wait(&link, TASK_UNINTERRUPTIBLE);
+ schedule();
mutex_lock(&seq->lcs_mutex);
remove_wait_queue(&seq->lcs_waitq, &link);
@@ -428,8 +428,8 @@ static int seq_client_proc_init(struct lu_client_seq *seq)
rc = lprocfs_add_vars(seq->lcs_proc_dir,
seq_client_proc_list, seq);
if (rc) {
- CERROR("%s: Can't init sequence manager "
- "proc, rc %d\n", seq->lcs_name, rc);
+ CERROR("%s: Can't init sequence manager proc, rc %d\n",
+ seq->lcs_name, rc);
GOTO(out_cleanup, rc);
}
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 6c379301df82..a06a642f549e 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -236,8 +236,8 @@ static int fld_cache_shrink(struct fld_cache *cache)
num++;
}
- CDEBUG(D_INFO, "%s: FLD cache - Shrunk by "
- "%d entries\n", cache->fci_name, num);
+ CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n",
+ cache->fci_name, num);
return 0;
}
@@ -355,7 +355,7 @@ static void fld_cache_overlap_handle(struct fld_cache *cache,
fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
} else
CERROR("NEW range ="DRANGE" curr = "DRANGE"\n",
- PRANGE(range),PRANGE(&f_curr->fce_range));
+ PRANGE(range), PRANGE(&f_curr->fce_range));
}
struct fld_cache_entry
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 5f3935cc0fd7..8661a788f120 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -90,7 +90,7 @@ struct fld_cache {
int fci_threshold;
/**
- * Prefered number of cached entries */
+ * Preferred number of cached entries */
int fci_cache_size;
/**
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index 896f9fe83ffd..1f8abba31428 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -138,9 +138,8 @@ fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq)
return target;
}
- CERROR("%s: Can't find target by hash %d (seq "LPX64"). "
- "Targets (%d):\n", fld->lcf_name, hash, seq,
- fld->lcf_count);
+ CERROR("%s: Can't find target by hash %d (seq "LPX64"). Targets (%d):\n",
+ fld->lcf_name, hash, seq, fld->lcf_count);
list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
const char *srv_name = target->ft_srv != NULL ?
@@ -209,9 +208,8 @@ int fld_client_add_target(struct lu_client_fld *fld,
LASSERT(tar->ft_srv != NULL || tar->ft_exp != NULL);
if (fld->lcf_flags != LUSTRE_FLD_INIT) {
- CERROR("%s: Attempt to add target %s (idx "LPU64") "
- "on fly - skip it\n", fld->lcf_name, name,
- tar->ft_idx);
+ CERROR("%s: Attempt to add target %s (idx "LPU64") on fly - skip it\n",
+ fld->lcf_name, name, tar->ft_idx);
return 0;
} else {
CDEBUG(D_INFO, "%s: Adding target %s (idx "
@@ -476,9 +474,8 @@ int fld_client_lookup(struct lu_client_fld *fld, seqno_t seq, mdsno_t *mds,
target = fld_client_get_target(fld, seq);
LASSERT(target != NULL);
- CDEBUG(D_INFO, "%s: Lookup fld entry (seq: "LPX64") on "
- "target %s (idx "LPU64")\n", fld->lcf_name, seq,
- fld_target_name(target), target->ft_idx);
+ CDEBUG(D_INFO, "%s: Lookup fld entry (seq: "LPX64") on target %s (idx "LPU64")\n",
+ fld->lcf_name, seq, fld_target_name(target), target->ft_idx);
res.lsr_start = seq;
fld_range_set_type(&res, flags);
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index 4d692dcd96cf..c809239c0866 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -1314,7 +1314,7 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
* calls. To achieve this, every layer can implement ->clo_fits_into() method,
* that is called by lock matching code (cl_lock_lookup()), and that can be
* used to selectively disable matching of certain locks for certain IOs. For
- * exmaple, lov layer implements lov_lock_fits_into() that allow multi-stripe
+ * example, lov layer implements lov_lock_fits_into() that allow multi-stripe
* locks to be matched only for truncates and O_APPEND writes.
*
* Interaction with DLM
@@ -2385,14 +2385,18 @@ struct cl_io {
* Check if layout changed after the IO finishes. Mainly for HSM
* requirement. If IO occurs to openning files, it doesn't need to
* verify layout because HSM won't release openning files.
- * Right now, only two opertaions need to verify layout: glimpse
+ * Right now, only two operations need to verify layout: glimpse
* and setattr.
*/
ci_verify_layout:1,
/**
* file is released, restore has to to be triggered by vvp layer
*/
- ci_restore_needed:1;
+ ci_restore_needed:1,
+ /**
+ * O_NOATIME
+ */
+ ci_noatime:1;
/**
* Number of pages owned by this IO. For invariant checking.
*/
@@ -2552,7 +2556,7 @@ struct cl_req_obj {
*/
struct cl_req {
enum cl_req_type crq_type;
- /** A list of pages being transfered */
+ /** A list of pages being transferred */
struct list_head crq_pages;
/** Number of pages in cl_req::crq_pages */
unsigned crq_nrpages;
@@ -3224,7 +3228,7 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret);
*
* - call chains have no non-lustre portions inserted between lustre code.
*
- * On a client both these assumtpion fails, because every user thread can
+ * On a client both these assumption fails, because every user thread can
* potentially execute lustre code as part of a system call, and lustre calls
* into VFS or MM that call back into lustre.
*
diff --git a/drivers/staging/lustre/lustre/include/ioctl.h b/drivers/staging/lustre/lustre/include/ioctl.h
index 227c261b2ae9..b986920926bd 100644
--- a/drivers/staging/lustre/lustre/include/ioctl.h
+++ b/drivers/staging/lustre/lustre/include/ioctl.h
@@ -38,7 +38,7 @@
* and on newer kernels this header is shared as _ASM_GENERIC_IOCTL_H.
*
* We can avoid any problems with the kernel header being included again by
- * defining _ASM_I386_IOCTL_H here so that a later occurence of <asm/ioctl.h>
+ * defining _ASM_I386_IOCTL_H here so that a later occurrence of <asm/ioctl.h>
* does not include the kernel's ioctl.h after this one. b=14746 */
#define _ASM_I386_IOCTL_H
#define _ASM_GENERIC_IOCTL_H
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
index 27316f7b7a70..827209ea6bd0 100644
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ b/drivers/staging/lustre/lustre/include/lclient.h
@@ -118,8 +118,8 @@ struct ccc_io {
};
/**
- * True, if \a io is a normal io, False for other (sendfile, splice*).
- * must be impementated in arch specific code.
+ * True, if \a io is a normal io, False for splice_{read,write}.
+ * must be implemented in arch specific code.
*/
int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_acl.h b/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
index 778b123ce31e..a91c5497d22c 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
@@ -44,7 +44,7 @@
#define _LUSTRE_LINUX_ACL_H
#ifndef _LUSTRE_ACL_H
-#error Shoud not include direectly. use #include <lustre_acl.h> instead
+#error Should not include directly. use #include <lustre_acl.h> instead
#endif
#include <linux/fs.h>
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index eefdb8d061b1..81cc7a0134bb 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -105,8 +105,8 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
#define ll_vfs_unlink(inode,entry,mnt) vfs_unlink(inode,entry)
#define ll_vfs_mknod(dir,entry,mnt,mode,dev) vfs_mknod(dir,entry,mode,dev)
#define ll_security_inode_unlink(dir,entry,mnt) security_inode_unlink(dir,entry)
-#define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1,delegated_inode) \
- vfs_rename(old,old_dir,new,new_dir,delegated_inode)
+#define ll_vfs_rename(old, old_dir, mnt, new, new_dir, mnt1) \
+ vfs_rename(old, old_dir, new, new_dir, NULL, 0)
#define cfs_bio_io_error(a,b) bio_io_error((a))
#define cfs_bio_endio(a,b,c) bio_endio((a),(c))
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
index 01a50265239d..dc36f75eb635 100644
--- a/drivers/staging/lustre/lustre/include/linux/obd.h
+++ b/drivers/staging/lustre/lustre/include/linux/obd.h
@@ -96,7 +96,8 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
LCONSOLE_WARN("====== for current process =====\n");
dump_stack();
LCONSOLE_WARN("====== end =======\n");
- cfs_pause(1000 * HZ);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1000 * HZ);
}
cpu_relax();
}
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 5da31c54924a..87905bbc68e1 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -91,8 +91,8 @@
#ifndef _LUSTRE_IDL_H_
#define _LUSTRE_IDL_H_
-#if !defined(LASSERT) && !defined(LPU64)
-#include <linux/libcfs/libcfs.h> /* for LASSERT, LPUX64, etc */
+#if !defined(LPU64)
+#include <linux/libcfs/libcfs.h> /* for LPUX64, etc */
#endif
/* Defn's shared with user-space. */
@@ -232,7 +232,6 @@ static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
static inline void fld_range_set_type(struct lu_seq_range *range,
unsigned flags)
{
- LASSERT(!(flags & ~LU_SEQ_RANGE_MASK));
range->lsr_flags |= flags;
}
@@ -351,7 +350,7 @@ struct som_attrs {
/** Bitfield for supported data in this structure. For future use. */
__u32 som_compat;
- /** Incompat feature list. The supported feature mask is availabe in
+ /** Incompat feature list. The supported feature mask is available in
* SOM_INCOMPAT_SUPP */
__u32 som_incompat;
@@ -615,7 +614,6 @@ static inline obd_id fid_idif_id(obd_seq seq, __u32 oid, __u32 ver)
/* extract ost index from IDIF FID */
static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
{
- LASSERT(fid_is_idif(fid));
return (fid_seq(fid) >> 16) & 0xffff;
}
@@ -833,11 +831,6 @@ static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
*/
static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
{
- /* check that all fields are converted */
- CLASSERT(sizeof(*src) ==
- sizeof(fid_seq(src)) +
- sizeof(fid_oid(src)) +
- sizeof(fid_ver(src)));
dst->f_seq = cpu_to_le64(fid_seq(src));
dst->f_oid = cpu_to_le32(fid_oid(src));
dst->f_ver = cpu_to_le32(fid_ver(src));
@@ -845,11 +838,6 @@ static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
{
- /* check that all fields are converted */
- CLASSERT(sizeof(*src) ==
- sizeof(fid_seq(src)) +
- sizeof(fid_oid(src)) +
- sizeof(fid_ver(src)));
dst->f_seq = le64_to_cpu(fid_seq(src));
dst->f_oid = le32_to_cpu(fid_oid(src));
dst->f_ver = le32_to_cpu(fid_ver(src));
@@ -857,11 +845,6 @@ static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
{
- /* check that all fields are converted */
- CLASSERT(sizeof(*src) ==
- sizeof(fid_seq(src)) +
- sizeof(fid_oid(src)) +
- sizeof(fid_ver(src)));
dst->f_seq = cpu_to_be64(fid_seq(src));
dst->f_oid = cpu_to_be32(fid_oid(src));
dst->f_ver = cpu_to_be32(fid_ver(src));
@@ -869,11 +852,6 @@ static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
{
- /* check that all fields are converted */
- CLASSERT(sizeof(*src) ==
- sizeof(fid_seq(src)) +
- sizeof(fid_oid(src)) +
- sizeof(fid_ver(src)));
dst->f_seq = be64_to_cpu(fid_seq(src));
dst->f_oid = be32_to_cpu(fid_oid(src));
dst->f_ver = be32_to_cpu(fid_ver(src));
@@ -897,11 +875,6 @@ extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);
static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
{
- /* Check that there is no alignment padding. */
- CLASSERT(sizeof(*f0) ==
- sizeof(f0->f_seq) +
- sizeof(f0->f_oid) +
- sizeof(f0->f_ver));
return memcmp(f0, f1, sizeof(*f0)) == 0;
}
@@ -960,7 +933,7 @@ enum lu_dirent_attrs {
LUDA_TYPE = 0x0002,
LUDA_64BITHASH = 0x0004,
- /* The following attrs are used for MDT interanl only,
+ /* The following attrs are used for MDT internal only,
* not visible to client */
/* Verify the dirent consistency */
@@ -1331,6 +1304,9 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
#define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
#define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
+#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
+#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
+
/* XXX README XXX:
* Please DO NOT add flag values here before first ensuring that this same
* flag value is not in use on some other branch. Please clear any such
@@ -1368,7 +1344,10 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
OBD_CONNECT_EINPROGRESS | \
OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
- OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE)
+ OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\
+ OBD_CONNECT_FLOCK_DEAD | \
+ OBD_CONNECT_DISP_STRIPE)
+
#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
@@ -1771,7 +1750,6 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
-#define OBD_MD_FLXATTRLOCKED OBD_MD_FLGETATTRLOCK
#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
/* don't forget obdo_fid which is way down at the bottom so it can
@@ -2134,19 +2112,32 @@ extern void lustre_swab_generic_32s (__u32 *val);
#define DISP_LOOKUP_POS 0x00000008
#define DISP_OPEN_CREATE 0x00000010
#define DISP_OPEN_OPEN 0x00000020
-#define DISP_ENQ_COMPLETE 0x00400000
+#define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */
#define DISP_ENQ_OPEN_REF 0x00800000
#define DISP_ENQ_CREATE_REF 0x01000000
#define DISP_OPEN_LOCK 0x02000000
#define DISP_OPEN_LEASE 0x04000000
+#define DISP_OPEN_STRIPE 0x08000000
/* INODE LOCK PARTS */
-#define MDS_INODELOCK_LOOKUP 0x000001 /* dentry, mode, owner, group */
-#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
-#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
-#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
-#define MDS_INODELOCK_PERM 0x000010 /* for permission */
-#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
+#define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
+ * was used to protect permission (mode,
+ * owner, group etc) before 2.4. */
+#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
+#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
+#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
+
+/* The PERM bit is added int 2.4, and it is used to protect permission(mode,
+ * owner, group, acl etc), so to separate the permission from LOOKUP lock.
+ * Because for remote directories(in DNE), these locks will be granted by
+ * different MDTs(different ldlm namespace).
+ *
+ * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
+ * For Remote directory, the master MDT, where the remote directory is, will
+ * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
+ * will grant LOOKUP_LOCK. */
+#define MDS_INODELOCK_PERM 0x000010
+#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
#define MDS_INODELOCK_MAXSHIFT 5
/* This FULL lock is useful to take on unlink sort of operations */
@@ -2595,7 +2586,7 @@ struct mdt_rec_setxattr {
* Do NOT change the size of various members, otherwise the value
* will be broken in lustre_swab_mdt_rec_reint().
*
- * If you add new members in other mdt_reint_xxx structres and need to use the
+ * If you add new members in other mdt_reint_xxx structures and need to use the
* rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
*/
struct mdt_rec_reint {
@@ -3328,9 +3319,10 @@ struct obdo {
#define o_grant_used o_data_version
static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
- struct obdo *wobdo, struct obdo *lobdo)
+ struct obdo *wobdo,
+ const struct obdo *lobdo)
{
- memcpy(wobdo, lobdo, sizeof(*lobdo));
+ *wobdo = *lobdo;
wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
if (ocd == NULL)
return;
@@ -3345,16 +3337,15 @@ static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
}
static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
- struct obdo *lobdo, struct obdo *wobdo)
+ struct obdo *lobdo,
+ const struct obdo *wobdo)
{
obd_flag local_flags = 0;
if (lobdo->o_valid & OBD_MD_FLFLAGS)
local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
- LASSERT(!(wobdo->o_flags & OBD_FL_LOCAL_MASK));
-
- memcpy(lobdo, wobdo, sizeof(*lobdo));
+ *lobdo = *wobdo;
if (local_flags != 0) {
lobdo->o_valid |= OBD_MD_FLFLAGS;
lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 7893d83e131f..f5f369e603de 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -265,13 +265,11 @@ struct ost_id {
#define MAX_OBD_NAME 128 /* If this changes, a NEW ioctl must be added */
-/* Hopefully O_LOV_DELAY_CREATE does not conflict with standard O_xxx flags.
- * Previously it was defined as 0100000000 and conflicts with FMODE_NONOTIFY
- * which was added since kernel 2.6.36, so we redefine it as 020000000.
- * To be compatible with old version's statically linked binary, finally we
- * define it as (020000000 | 0100000000).
- * */
-#define O_LOV_DELAY_CREATE 0120000000
+/* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular
+ * files, but are unlikely to be used in practice and are not harmful if
+ * used incorrectly. O_NOCTTY and FASYNC are only meaningful for character
+ * devices and are safe for use on new files (See LU-812, LU-4209). */
+#define O_LOV_DELAY_CREATE (O_NOCTTY | FASYNC)
#define LL_FILE_IGNORE_LOCK 0x00000001
#define LL_FILE_GROUP_LOCKED 0x00000002
@@ -300,7 +298,7 @@ struct ost_id {
#define LOV_MAX_STRIPE_COUNT_OLD 160
/* This calculation is crafted so that input of 4096 will result in 160
* which in turn is equal to old maximal stripe count.
- * XXX: In fact this is too simpified for now, what it also need is to get
+ * XXX: In fact this is too simplified for now, what it also need is to get
* ea_type argument to clearly know how much space each stripe consumes.
*
* The limit of 12 pages is somewhat arbitrary, but is a reasonably large
@@ -930,7 +928,7 @@ struct hsm_state_set_ioc {
/*
* This structure describes the current in-progress action for a file.
- * it is retuned to user space and send over the wire
+ * it is returned to user space and send over the wire
*/
struct hsm_current_action {
/** The current undergoing action, if there is one */
@@ -1159,12 +1157,6 @@ struct hsm_progress {
__u32 padding;
};
-/**
- * Use by copytool during any hsm request they handled.
- * This structure is initialized by llapi_hsm_copy_start()
- * which is an helper over the ioctl() interface
- * Store Lustre, internal use only, data.
- */
struct hsm_copy {
__u64 hc_data_version;
__u16 hc_flags;
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
index e14a5f674e85..3680668a8920 100644
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -88,6 +88,8 @@ enum lcfg_command_type {
LCFG_SET_LDLM_TIMEOUT = 0x00ce030, /**< set ldlm_timeout */
LCFG_PRE_CLEANUP = 0x00cf031, /**< call type-specific pre
* cleanup cleanup */
+ LCFG_SET_PARAM = 0x00ce032, /**< use set_param syntax to set
+ *a proc parameters */
};
struct lustre_cfg_bufs {
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 1de9a8bed497..ac08164793cb 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -99,6 +99,8 @@
#define LDD_F_IR_CAPABLE 0x2000
/** the MGS refused to register the target. */
#define LDD_F_ERROR 0x4000
+/** process at lctl conf_param */
+#define LDD_F_PARAM2 0x8000
/* opc for target register */
#define LDD_F_OPC_REG 0x10000000
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index ec4bb5e3c13e..3e25f001c07d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -330,7 +330,7 @@ enum {
};
typedef enum {
- /** invalide type */
+ /** invalid type */
LDLM_NS_TYPE_UNKNOWN = 0,
/** mdc namespace */
LDLM_NS_TYPE_MDC,
@@ -1185,7 +1185,7 @@ ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
/**
* Update Lock Value Block Operations (LVBO) on a resource taking into account
- * data from reqest \a r
+ * data from request \a r
*/
static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
struct ptlrpc_request *r, int increase)
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index 75716f17f64b..16dcdbfae689 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -35,10 +35,10 @@
#ifndef LDLM_ALL_FLAGS_MASK
/** l_flags bits marked as "all_flags" bits */
-#define LDLM_FL_ALL_FLAGS_MASK 0x00FFFFFFC08F132FULL
+#define LDLM_FL_ALL_FLAGS_MASK 0x00FFFFFFC08F932FULL
/** l_flags bits marked as "ast" bits */
-#define LDLM_FL_AST_MASK 0x0000000080000000ULL
+#define LDLM_FL_AST_MASK 0x0000000080008000ULL
/** l_flags bits marked as "blocked" bits */
#define LDLM_FL_BLOCKED_MASK 0x000000000000000EULL
@@ -56,7 +56,7 @@
#define LDLM_FL_LOCAL_ONLY_MASK 0x00FFFFFF00000000ULL
/** l_flags bits marked as "on_wire" bits */
-#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F132FULL
+#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F932FULL
/** extent, mode, or resource changed */
#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL // bit 0
@@ -114,6 +114,12 @@
#define ldlm_set_has_intent(_l) LDLM_SET_FLAG(( _l), 1ULL << 12)
#define ldlm_clear_has_intent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 12)
+/** flock deadlock detected */
+#define LDLM_FL_FLOCK_DEADLOCK 0x0000000000008000ULL /* bit 15 */
+#define ldlm_is_flock_deadlock(_l) LDLM_TEST_FLAG((_l), 1ULL << 15)
+#define ldlm_set_flock_deadlock(_l) LDLM_SET_FLAG((_l), 1ULL << 15)
+#define ldlm_clear_flock_deadlock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 15)
+
/** discard (no writeback) on cancel */
#define LDLM_FL_DISCARD_DATA 0x0000000000010000ULL // bit 16
#define ldlm_is_discard_data(_l) LDLM_TEST_FLAG(( _l), 1ULL << 16)
@@ -141,7 +147,7 @@
#define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19)
/**
- * Immediatelly cancel such locks when they block some other locks. Send
+ * Immediately cancel such locks when they block some other locks. Send
* cancel notification to original lock holder, but expect no reply. This
* is for clients (like liblustre) that cannot be expected to reliably
* response to blocking AST. */
@@ -242,7 +248,7 @@
/**
* A lock contributes to the known minimum size (KMS) calculation until it
- * has finished the part of its cancelation that performs write back on its
+ * has finished the part of its cancellation that performs write back on its
* dirty pages. It can remain on the granted list during this whole time.
* Threads racing to update the KMS after performing their writeback need
* to know to exclude each other's locks from the calculation as they walk
@@ -390,6 +396,7 @@ static int hf_lustre_ldlm_fl_ast_sent = -1;
static int hf_lustre_ldlm_fl_replay = -1;
static int hf_lustre_ldlm_fl_intent_only = -1;
static int hf_lustre_ldlm_fl_has_intent = -1;
+static int hf_lustre_ldlm_fl_flock_deadlock = -1;
static int hf_lustre_ldlm_fl_discard_data = -1;
static int hf_lustre_ldlm_fl_no_timeout = -1;
static int hf_lustre_ldlm_fl_block_nowait = -1;
@@ -431,6 +438,7 @@ const value_string lustre_ldlm_flags_vals[] = {
{LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
{LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
{LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
+ {LDLM_FL_FLOCK_DEADLOCK, "LDLM_FL_FLOCK_DEADLOCK"},
{LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
{LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
{LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
index 2feb38b51af2..103f7a8bd83f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ b/drivers/staging/lustre/lustre/include/lustre_export.h
@@ -192,9 +192,9 @@ struct obd_export {
struct obd_import *exp_imp_reverse;
struct nid_stat *exp_nid_stats;
struct lprocfs_stats *exp_md_stats;
- /** Active connetion */
+ /** Active connection */
struct ptlrpc_connection *exp_connection;
- /** Connection count value from last succesful reconnect rpc */
+ /** Connection count value from last successful reconnect rpc */
__u32 exp_conn_cnt;
/** Hash list of all ldlm locks granted on this export */
struct cfs_hash *exp_lock_hash;
@@ -380,6 +380,23 @@ static inline bool imp_connect_lvb_type(struct obd_import *imp)
return false;
}
+static inline __u64 exp_connect_ibits(struct obd_export *exp)
+{
+ struct obd_connect_data *ocd;
+
+ ocd = &exp->exp_connect_data;
+ return ocd->ocd_ibits_known;
+}
+
+static inline bool imp_connect_disp_stripe(struct obd_import *imp)
+{
+ struct obd_connect_data *ocd;
+
+ LASSERT(imp != NULL);
+ ocd = &imp->imp_connect_data;
+ return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE;
+}
+
extern struct obd_export *class_conn2export(struct lustre_handle *conn);
extern struct obd_device *class_conn2obd(struct lustre_handle *conn);
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 84a897eed1df..5e7b3165a851 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -339,7 +339,7 @@ struct lu_client_seq {
struct mutex lcs_mutex;
/*
- * Range of allowed for allocation sequeces. When using lu_client_seq on
+ * Range of allowed for allocation sequences. When using lu_client_seq on
* clients, this contains meta-sequence range. And for servers this
* contains super-sequence range.
*/
@@ -398,7 +398,7 @@ struct lu_server_seq {
/* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */
enum lu_mgr_type lss_type;
- /* Client interafce to request controller */
+ /* Client interface to request controller */
struct lu_client_seq *lss_cli;
/* Mutex for protecting allocation */
@@ -568,14 +568,14 @@ fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
* finally, when we replace ost_id with FID in data stack.
*
* Currently, resid from the old client, whose res[0] = object_id,
- * res[1] = object_seq, is just oposite with Metatdata
+ * res[1] = object_seq, is just opposite with Metatdata
* resid, where, res[0] = fid->f_seq, res[1] = fid->f_oid.
- * To unifiy the resid identification, we will reverse the data
+ * To unify the resid identification, we will reverse the data
* resid to keep it same with Metadata resid, i.e.
*
* For resid from the old client,
* res[0] = objid, res[1] = 0, still keep the original order,
- * for compatiblity.
+ * for compatibility.
*
* For new resid
* res will be built from normal FID directly, i.e. res[0] = f_seq,
@@ -685,7 +685,7 @@ static inline __u32 fid_hash(const struct lu_fid *f, int bits)
{
/* all objects with same id and different versions will belong to same
* collisions list. */
- return cfs_hash_long(fid_flatten(f), bits);
+ return hash_long(fid_flatten(f), bits);
}
/**
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index 67259eb43cde..01ed786b40b9 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -152,7 +152,7 @@ struct import_state_hist {
};
/**
- * Defintion of PortalRPC import structure.
+ * Definition of PortalRPC import structure.
* Imports are representing client-side view to remote target.
*/
struct obd_import {
@@ -180,6 +180,17 @@ struct obd_import {
struct list_head imp_delayed_list;
/** @} */
+ /**
+ * List of requests that are retained for committed open replay. Once
+ * open is committed, open replay request will be moved from the
+ * imp_replay_list into the imp_committed_list.
+ * The imp_replay_cursor is for accelerating searching during replay.
+ * @{
+ */
+ struct list_head imp_committed_list;
+ struct list_head *imp_replay_cursor;
+ /** @} */
+
/** obd device for this import */
struct obd_device *imp_obd;
@@ -219,7 +230,7 @@ struct obd_import {
* after a check to save on unnecessary replay list iterations
*/
int imp_last_generation_checked;
- /** Last tranno we replayed */
+ /** Last transno we replayed */
__u64 imp_last_replay_transno;
/** Last transno committed on remote side */
__u64 imp_peer_committed_transno;
@@ -237,7 +248,7 @@ struct obd_import {
struct lustre_handle imp_remote_handle;
/** When to perform next ping. time in jiffies. */
cfs_time_t imp_next_ping;
- /** When we last succesfully connected. time in 64bit jiffies */
+ /** When we last successfully connected. time in 64bit jiffies */
__u64 imp_last_success_conn;
/** List of all possible connection for import. */
@@ -268,7 +279,7 @@ struct obd_import {
imp_no_lock_replay:1,
/* recovery by versions was failed */
imp_vbr_failed:1,
- /* force an immidiate ping */
+ /* force an immediate ping */
imp_force_verify:1,
/* force a scheduled ping */
imp_force_next_verify:1,
@@ -281,7 +292,7 @@ struct obd_import {
/* need IR MNE swab */
imp_need_mne_swab:1,
/* import must be reconnected instead of
- * chouse new connection */
+ * chose new connection */
imp_force_reconnect:1,
/* import has tried to connect with server */
imp_connect_tried:1;
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 609a090484a6..0368ca6ffcc1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -536,7 +536,7 @@ do { \
if (condition) \
break; \
\
- init_waitqueue_entry_current(&__wait); \
+ init_waitqueue_entry(&__wait, current); \
l_add_wait(&wq, &__wait); \
\
/* Block all signals (just the non-fatal ones if no timeout). */ \
@@ -558,15 +558,13 @@ do { \
break; \
\
if (__timeout == 0) { \
- waitq_wait(&__wait, __wstate); \
+ schedule(); \
} else { \
cfs_duration_t interval = info->lwi_interval? \
min_t(cfs_duration_t, \
info->lwi_interval,__timeout):\
__timeout; \
- cfs_duration_t remaining = waitq_timedwait(&__wait,\
- __wstate, \
- interval); \
+ cfs_duration_t remaining = schedule_timeout(interval);\
__timeout = cfs_time_sub(__timeout, \
cfs_time_sub(interval, remaining));\
if (__timeout == 0) { \
diff --git a/drivers/staging/lustre/lustre/include/lustre_linkea.h b/drivers/staging/lustre/lustre/include/lustre_linkea.h
index 5790be913bf6..500ace30cfbf 100644
--- a/drivers/staging/lustre/lustre/include/lustre_linkea.h
+++ b/drivers/staging/lustre/lustre/include/lustre_linkea.h
@@ -33,7 +33,7 @@ struct linkea_data {
*/
struct lu_buf *ld_buf;
/**
- * The matched header, entry and its lenght in the EA
+ * The matched header, entry and its length in the EA
*/
struct link_ea_header *ld_leh;
struct link_ea_entry *ld_lee;
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index c1e02702b931..468f36344a34 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -166,6 +166,17 @@ void it_clear_disposition(struct lookup_intent *it, int flag);
void it_set_disposition(struct lookup_intent *it, int flag);
int it_open_error(int phase, struct lookup_intent *it);
+static inline bool cl_is_lov_delay_create(unsigned int flags)
+{
+ return (flags & O_LOV_DELAY_CREATE) == O_LOV_DELAY_CREATE;
+}
+
+static inline void cl_lov_delay_create_clear(unsigned int *flags)
+{
+ if ((*flags & O_LOV_DELAY_CREATE) == O_LOV_DELAY_CREATE)
+ *flags &= ~O_LOV_DELAY_CREATE;
+}
+
/** @} mdc */
#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index d8d088035428..745adbb74cfc 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -445,7 +445,7 @@ struct ptlrpc_reply_state {
lnet_handle_md_t rs_md_h;
atomic_t rs_refcount;
- /** Context for the sevice thread */
+ /** Context for the service thread */
struct ptlrpc_svc_ctx *rs_svc_ctx;
/** Reply buffer (actually sent to the client), encoded if needed */
struct lustre_msg *rs_repbuf; /* wrapper */
@@ -454,9 +454,9 @@ struct ptlrpc_reply_state {
/** Size of the reply message */
int rs_repdata_len; /* wrapper msg length */
/**
- * Actual reply message. Its content is encrupted (if needed) to
+ * Actual reply message. Its content is encrypted (if needed) to
* produce reply buffer for actual sending. In simple case
- * of no network encryption we jus set \a rs_repbuf to \a rs_msg
+ * of no network encryption we just set \a rs_repbuf to \a rs_msg
*/
struct lustre_msg *rs_msg; /* reply message */
@@ -497,7 +497,7 @@ struct ptlrpc_request_pool {
spinlock_t prp_lock;
/** list of ptlrpc_request structs */
struct list_head prp_req_list;
- /** Maximum message size that would fit into a rquest from this pool */
+ /** Maximum message size that would fit into a request from this pool */
int prp_rq_size;
/** Function to allocate more requests for this pool */
void (*prp_populate)(struct ptlrpc_request_pool *, int);
@@ -904,7 +904,7 @@ struct ptlrpc_nrs_pol_conf {
*/
struct module *nc_owner;
/**
- * Policy registration flags; a bitmast of \e nrs_policy_flags
+ * Policy registration flags; a bitmask of \e nrs_policy_flags
*/
unsigned nc_flags;
};
@@ -1351,7 +1351,7 @@ struct nrs_orr_data {
*/
enum nrs_orr_supp od_supp;
/**
- * Round Robin quantum; the maxium number of RPCs that each request
+ * Round Robin quantum; the maximum number of RPCs that each request
* batch for each object or OST can have in a scheduling round.
*/
__u16 od_quantum;
@@ -1486,7 +1486,7 @@ struct ptlrpc_nrs_request {
*/
struct nrs_fifo_req fifo;
/**
- * CRR-N request defintion
+ * CRR-N request definition
*/
struct nrs_crrn_req crr;
/** ORR and TRR share the same request definition */
@@ -1550,7 +1550,7 @@ struct ptlrpc_request {
* requests in time
*/
struct list_head rq_timed_list;
- /** server-side history, used for debuging purposes. */
+ /** server-side history, used for debugging purposes. */
struct list_head rq_history_list;
/** server-side per-export list */
struct list_head rq_exp_list;
@@ -1611,7 +1611,7 @@ struct ptlrpc_request {
enum rq_phase rq_phase; /* one of RQ_PHASE_* */
enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
atomic_t rq_refcount;/* client-side refcount for SENT race,
- server-side refcounf for multiple replies */
+ server-side refcount for multiple replies */
/** Portal to which this request would be sent */
short rq_request_portal; /* XXX FIXME bug 249 */
@@ -1637,7 +1637,7 @@ struct ptlrpc_request {
/** xid */
__u64 rq_xid;
/**
- * List item to for replay list. Not yet commited requests get linked
+ * List item to for replay list. Not yet committed requests get linked
* there.
* Also see \a rq_replay comment above.
*/
@@ -1952,7 +1952,7 @@ void _debug_req(struct ptlrpc_request *req,
__attribute__ ((format (printf, 3, 4)));
/**
- * Helper that decides if we need to print request accordig to current debug
+ * Helper that decides if we need to print request according to current debug
* level settings
*/
#define debug_req(msgdata, mask, cdls, req, fmt, a...) \
@@ -1966,7 +1966,7 @@ do { \
} while(0)
/**
- * This is the debug print function you need to use to print request sturucture
+ * This is the debug print function you need to use to print request structure
* content into lustre debug log.
* for most callers (level is a constant) this is resolved at compile time */
#define DEBUG_REQ(level, req, fmt, args...) \
@@ -2621,6 +2621,8 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
* request queues, request management, etc.
* @{
*/
+void ptlrpc_request_committed(struct ptlrpc_request *req, int force);
+
void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
struct ptlrpc_client *);
void ptlrpc_cleanup_client(struct obd_import *imp);
diff --git a/drivers/staging/lustre/lustre/include/lustre_quota.h b/drivers/staging/lustre/lustre/include/lustre_quota.h
index 71b5d97e0343..07cb7c310bcc 100644
--- a/drivers/staging/lustre/lustre/include/lustre_quota.h
+++ b/drivers/staging/lustre/lustre/include/lustre_quota.h
@@ -140,7 +140,7 @@ struct qsd_instance;
* (i.e. when ->ldo_recovery_complete is called). This is used
* to notify the qsd layer that quota should now be enforced
* again via the qsd_op_begin/end functions. The last step of the
- * reintegration prodecure (namely usage reconciliation) will be
+ * reintegration procedure (namely usage reconciliation) will be
* completed during start.
*
* - qsd_fini(): is used to release a qsd_instance structure allocated with
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 885247d28b3a..bf3ee3915c28 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -572,7 +572,7 @@ struct ptlrpc_sec_cops {
/**
* Called then the reference of \a ctx dropped to 0. The policy module
* is supposed to destroy this context or whatever else according to
- * its cache maintainance mechamism.
+ * its cache maintenance mechanism.
*
* \param sync if zero, we shouldn't wait for the context being
* destroyed completely.
@@ -1002,7 +1002,7 @@ struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec);
void sptlrpc_sec_put(struct ptlrpc_sec *sec);
/*
- * internal apis which only used by policy impelentation
+ * internal apis which only used by policy implementation
*/
int sptlrpc_get_next_secid(void);
void sptlrpc_sec_destroy(struct ptlrpc_sec *sec);
diff --git a/drivers/staging/lustre/lustre/include/md_object.h b/drivers/staging/lustre/lustre/include/md_object.h
index 7b45b47b48f9..ef46b2c461a6 100644
--- a/drivers/staging/lustre/lustre/include/md_object.h
+++ b/drivers/staging/lustre/lustre/include/md_object.h
@@ -35,7 +35,7 @@
*
* lustre/include/md_object.h
*
- * Extention of lu_object.h for metadata objects
+ * Extension of lu_object.h for metadata objects
*/
#ifndef _LUSTRE_MD_OBJECT_H
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index c3470ce62cff..72cf3fe4b0c9 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -158,7 +158,7 @@ struct obd_info {
/* statfs data specific for every OSC, if needed at all. */
struct obd_statfs *oi_osfs;
/* An update callback which is called to update some data on upper
- * level. E.g. it is used for update lsm->lsm_oinfo at every recieved
+ * level. E.g. it is used for update lsm->lsm_oinfo at every received
* request in osc level for enqueue requests. It is also possible to
* update some caller data from LOV layer if needed. */
obd_enqueue_update_f oi_cb_up;
@@ -1042,8 +1042,8 @@ static inline int it_to_lock_mode(struct lookup_intent *it)
}
struct md_op_data {
- struct lu_fid op_fid1; /* operation fid1 (usualy parent) */
- struct lu_fid op_fid2; /* operation fid2 (usualy child) */
+ struct lu_fid op_fid1; /* operation fid1 (usually parent) */
+ struct lu_fid op_fid2; /* operation fid2 (usually child) */
struct lu_fid op_fid3; /* 2 extra fids to find conflicting */
struct lu_fid op_fid4; /* to the operation locks. */
mdsno_t op_mds; /* what mds server open will go to */
@@ -1323,7 +1323,8 @@ struct md_open_data {
struct obd_client_handle *mod_och;
struct ptlrpc_request *mod_open_req;
struct ptlrpc_request *mod_close_req;
- atomic_t mod_refcount;
+ atomic_t mod_refcount;
+ bool mod_is_create;
};
struct lookup_intent;
@@ -1392,7 +1393,7 @@ struct md_ops {
int (*m_set_open_replay_data)(struct obd_export *,
struct obd_client_handle *,
- struct ptlrpc_request *);
+ struct lookup_intent *);
int (*m_clear_open_replay_data)(struct obd_export *,
struct obd_client_handle *);
int (*m_set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *);
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 983718fe1e55..9d1f266e55e4 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -175,9 +175,13 @@ enum {
CONFIG_T_CONFIG = 0,
CONFIG_T_SPTLRPC = 1,
CONFIG_T_RECOVER = 2,
- CONFIG_T_MAX = 3
+ CONFIG_T_PARAMS = 3,
+ CONFIG_T_MAX = 4
};
+#define PARAMS_FILENAME "params"
+#define LCTL_UPCALL "lctl"
+
/* list of active configuration logs */
struct config_llog_data {
struct ldlm_res_id cld_resid;
@@ -185,7 +189,8 @@ struct config_llog_data {
struct list_head cld_list_chain;
atomic_t cld_refcount;
struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */
- struct config_llog_data *cld_recover; /* imperative recover log */
+ struct config_llog_data *cld_params; /* common parameters log */
+ struct config_llog_data *cld_recover;/* imperative recover log */
struct obd_export *cld_mgcexp;
struct mutex cld_lock;
int cld_type;
@@ -1626,7 +1631,7 @@ static inline int obd_health_check(const struct lu_env *env,
{
/* returns: 0 on healthy
* >0 on unhealthy + reason code/flag
- * however the only suppored reason == 1 right now
+ * however the only supported reason == 1 right now
* We'll need to define some better reasons
* or flags in the future.
* <0 on error
@@ -1996,11 +2001,11 @@ static inline int md_getxattr(struct obd_export *exp,
static inline int md_set_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och,
- struct ptlrpc_request *open_req)
+ struct lookup_intent *it)
{
EXP_CHECK_MD_OP(exp, set_open_replay_data);
EXP_MD_COUNTER_INCREMENT(exp, set_open_replay_data);
- return MDP(exp->exp_obd, set_open_replay_data)(exp, och, open_req);
+ return MDP(exp->exp_obd, set_open_replay_data)(exp, och, it);
}
static inline int md_clear_open_replay_data(struct obd_export *exp,
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 977bc231df9d..5ec336968fc8 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -681,7 +681,7 @@ do { \
*
* Be very careful when changing this value, especially when decreasing it,
* since vmalloc in Linux doesn't perform well on multi-cores system, calling
- * vmalloc in critical path would hurt peformance badly. See LU-66.
+ * vmalloc in critical path would hurt performance badly. See LU-66.
*/
#define OBD_ALLOC_BIG (4 * PAGE_CACHE_SIZE)
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index 94b164127e0c..6907a16dbbd1 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -1196,14 +1196,14 @@ static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
- init_waitqueue_entry_current(&waiter);
+ init_waitqueue_entry(&waiter, current);
add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&header->loh_ref) == 1)
break;
- waitq_wait(&waiter, TASK_UNINTERRUPTIBLE);
+ schedule();
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index c9aae132f98a..986bf384bff7 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -205,6 +205,26 @@ ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
return 0;
}
+static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
+ struct list_head *work_list)
+{
+ CDEBUG(D_INFO, "reprocess deadlock req=%p\n", lock);
+
+ if ((exp_connect_flags(lock->l_export) &
+ OBD_CONNECT_FLOCK_DEAD) == 0) {
+ CERROR(
+ "deadlock found, but client doesn't support flock canceliation\n");
+ } else {
+ LASSERT(lock->l_completion_ast);
+ LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
+ lock->l_flags |= LDLM_FL_AST_SENT | LDLM_FL_CANCEL_ON_BLOCK |
+ LDLM_FL_FLOCK_DEADLOCK;
+ ldlm_flock_blocking_unlink(lock);
+ ldlm_resource_unlink_lock(lock);
+ ldlm_add_ast_work_item(lock, NULL, work_list);
+ }
+}
+
/**
* Process a granting attempt for flock lock.
* Must be called under ns lock held.
@@ -272,6 +292,7 @@ reprocess:
}
}
} else {
+ int reprocess_failed = 0;
lockmode_verify(mode);
/* This loop determines if there are existing locks
@@ -293,8 +314,15 @@ reprocess:
if (!ldlm_flocks_overlap(lock, req))
continue;
- if (!first_enq)
- return LDLM_ITER_CONTINUE;
+ if (!first_enq) {
+ reprocess_failed = 1;
+ if (ldlm_flock_deadlock(req, lock)) {
+ ldlm_flock_cancel_on_deadlock(req,
+ work_list);
+ return LDLM_ITER_CONTINUE;
+ }
+ continue;
+ }
if (*flags & LDLM_FL_BLOCK_NOWAIT) {
ldlm_flock_destroy(req, mode, *flags);
@@ -330,6 +358,8 @@ reprocess:
*flags |= LDLM_FL_BLOCK_GRANTED;
return LDLM_ITER_STOP;
}
+ if (reprocess_failed)
+ return LDLM_ITER_CONTINUE;
}
if (*flags & LDLM_FL_TEST_LOCK) {
@@ -646,7 +676,10 @@ granted:
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
list_del_init(&lock->l_res_link);
- if (flags & LDLM_FL_TEST_LOCK) {
+ if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) {
+ LDLM_DEBUG(lock, "client-side enqueue deadlock received");
+ rc = -EDEADLK;
+ } else if (flags & LDLM_FL_TEST_LOCK) {
/* fcntl(F_GETLK) request */
/* The old mode was saved in getlk->fl_type so that if the mode
* in the lock changes we can decref the appropriate refcount.*/
@@ -672,7 +705,7 @@ granted:
ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
}
unlock_res_and_lock(lock);
- return 0;
+ return rc;
}
EXPORT_SYMBOL(ldlm_flock_completion_ast);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 692623beee12..0548aca45e8e 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -145,8 +145,6 @@ char *ldlm_it2str(int it)
return "getxattr";
case IT_LAYOUT:
return "layout";
- case IT_SETXATTR:
- return "setxattr";
default:
CERROR("Unknown intent %d\n", it);
return "UNKNOWN";
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 3ed020eb89c0..7e63cf355cd9 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -192,8 +192,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
int to = cfs_time_seconds(1);
while (to > 0) {
- schedule_timeout_and_set_state(
- TASK_INTERRUPTIBLE, to);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(to);
if (lock->l_granted_mode == lock->l_req_mode ||
lock->l_flags & LDLM_FL_DESTROYED)
break;
@@ -228,6 +228,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
lock_res_and_lock(lock);
LASSERT(lock->l_lvb_data == NULL);
+ lock->l_lvb_type = LVB_T_LAYOUT;
lock->l_lvb_data = lvb_data;
lock->l_lvb_len = lvb_len;
unlock_res_and_lock(lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index c0e54aead2ce..fcc7a99ce395 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -299,7 +299,7 @@ EXPORT_SYMBOL(ldlm_completion_ast);
* A helper to build a blocking AST function
*
* Perform a common operation for blocking ASTs:
- * defferred lock cancellation.
+ * deferred lock cancellation.
*
* \param lock the lock blocking or canceling AST was called on
* \retval 0
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 5f89864cda14..2824d4a9a85b 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -421,9 +421,9 @@ static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
} else {
val = fid_oid(&fid);
}
- hash = cfs_hash_long(hash, hs->hs_bkt_bits);
+ hash = hash_long(hash, hs->hs_bkt_bits);
/* give me another random factor */
- hash -= cfs_hash_long((unsigned long)hs, val % 11 + 3);
+ hash -= hash_long((unsigned long)hs, val % 11 + 3);
hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
index f30c84f195aa..1e4c5ad26157 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -368,7 +368,7 @@ void libcfs_debug_dumplog(void)
/* we're being careful to ensure that the kernel thread is
* able to set our state to running as it exits before we
* get to schedule() */
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&debug_ctlwq, &wait);
@@ -379,7 +379,7 @@ void libcfs_debug_dumplog(void)
printk(KERN_ERR "LustreError: cannot start log dump thread:"
" %ld\n", PTR_ERR(dumper));
else
- waitq_wait(&wait, TASK_INTERRUPTIBLE);
+ schedule();
/* be sure to teardown if cfs_create_thread() failed */
remove_wait_queue(&debug_ctlwq, &wait);
diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lustre/libcfs/fail.c
index c54448d69008..ba43ff7f7900 100644
--- a/drivers/staging/lustre/lustre/libcfs/fail.c
+++ b/drivers/staging/lustre/lustre/libcfs/fail.c
@@ -127,8 +127,8 @@ int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
if (ret) {
CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
id, ms);
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
- cfs_time_seconds(ms) / 1000);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(ms) / 1000);
set_current_state(TASK_RUNNING);
CERROR("cfs_fail_timeout id %x awake\n", id);
}
diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
index 7b2c31599327..b6ddc998f750 100644
--- a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
+++ b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
@@ -44,106 +44,6 @@
#include <linux/libcfs/libcfs.h>
-#ifdef LUSTRE_UTILS
-/* This is the userspace side. */
-
-/** Start the userspace side of a KUC pipe.
- * @param link Private descriptor for pipe/socket.
- * @param groups KUC broadcast group to listen to
- * (can be null for unicast to this pid)
- */
-int libcfs_ukuc_start(lustre_kernelcomm *link, int group)
-{
- int pfd[2];
-
- if (pipe(pfd) < 0)
- return -errno;
-
- memset(link, 0, sizeof(*link));
- link->lk_rfd = pfd[0];
- link->lk_wfd = pfd[1];
- link->lk_group = group;
- link->lk_uid = getpid();
- return 0;
-}
-
-int libcfs_ukuc_stop(lustre_kernelcomm *link)
-{
- if (link->lk_wfd > 0)
- close(link->lk_wfd);
- return close(link->lk_rfd);
-}
-
-#define lhsz sizeof(*kuch)
-
-/** Read a message from the link.
- * Allocates memory, returns handle
- *
- * @param link Private descriptor for pipe/socket.
- * @param buf Buffer to read into, must include size for kuc_hdr
- * @param maxsize Maximum message size allowed
- * @param transport Only listen to messages on this transport
- * (and the generic transport)
- */
-int libcfs_ukuc_msg_get(lustre_kernelcomm *link, char *buf, int maxsize,
- int transport)
-{
- struct kuc_hdr *kuch;
- int rc = 0;
-
- memset(buf, 0, maxsize);
-
- CDEBUG(D_KUC, "Waiting for message from kernel on fd %d\n",
- link->lk_rfd);
-
- while (1) {
- /* Read header first to get message size */
- rc = read(link->lk_rfd, buf, lhsz);
- if (rc <= 0) {
- rc = -errno;
- break;
- }
- kuch = (struct kuc_hdr *)buf;
-
- CDEBUG(D_KUC, "Received message mg=%x t=%d m=%d l=%d\n",
- kuch->kuc_magic, kuch->kuc_transport, kuch->kuc_msgtype,
- kuch->kuc_msglen);
-
- if (kuch->kuc_magic != KUC_MAGIC) {
- CERROR("bad message magic %x != %x\n",
- kuch->kuc_magic, KUC_MAGIC);
- rc = -EPROTO;
- break;
- }
-
- if (kuch->kuc_msglen > maxsize) {
- rc = -EMSGSIZE;
- break;
- }
-
- /* Read payload */
- rc = read(link->lk_rfd, buf + lhsz, kuch->kuc_msglen - lhsz);
- if (rc < 0) {
- rc = -errno;
- break;
- }
- if (rc < (kuch->kuc_msglen - lhsz)) {
- CERROR("short read: got %d of %d bytes\n",
- rc, kuch->kuc_msglen);
- rc = -EPROTO;
- break;
- }
-
- if (kuch->kuc_transport == transport ||
- kuch->kuc_transport == KUC_TRANSPORT_GENERIC) {
- return 0;
- }
- /* Drop messages for other transports */
- }
- return rc;
-}
-
-#else /* LUSTRE_UTILS */
/* This is the kernel side (liblustre as well). */
/**
@@ -338,5 +238,3 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
return rc;
}
EXPORT_SYMBOL(libcfs_kkuc_group_foreach);
-
-#endif /* LUSTRE_UTILS */
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
index 922debd0a412..ed0a6b531058 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
@@ -42,26 +42,6 @@
#include <linux/libcfs/libcfs.h>
-/* non-0 = don't match */
-int cfs_strncasecmp(const char *s1, const char *s2, size_t n)
-{
- if (s1 == NULL || s2 == NULL)
- return 1;
-
- if (n == 0)
- return 0;
-
- while (n-- != 0 && tolower(*s1) == tolower(*s2)) {
- if (n == 0 || *s1 == '\0' || *s2 == '\0')
- break;
- s1++;
- s2++;
- }
-
- return tolower(*(unsigned char *)s1) - tolower(*(unsigned char *)s2);
-}
-EXPORT_SYMBOL(cfs_strncasecmp);
-
/* Convert a text string to a bitmask */
int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
int *oldmask, int minmask, int allmask)
@@ -101,7 +81,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
debugstr = bit2str(i);
if (debugstr != NULL &&
strlen(debugstr) == len &&
- cfs_strncasecmp(str, debugstr, len) == 0) {
+ strncasecmp(str, debugstr, len) == 0) {
if (op == '-')
newmask &= ~(1 << i);
else
@@ -111,7 +91,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
}
}
if (!found && len == 3 &&
- (cfs_strncasecmp(str, "ALL", len) == 0)) {
+ (strncasecmp(str, "ALL", len) == 0)) {
if (op == '-')
newmask = minmask;
else
@@ -129,7 +109,6 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
*oldmask = newmask;
return 0;
}
-EXPORT_SYMBOL(cfs_str2mask);
/* get the first string out of @str */
char *cfs_firststr(char *str, size_t size)
@@ -164,12 +143,12 @@ cfs_trimwhite(char *str)
{
char *end;
- while (cfs_iswhite(*str))
+ while (isspace(*str))
str++;
end = str + strlen(str);
while (end > str) {
- if (!cfs_iswhite(end[-1]))
+ if (!isspace(end[-1]))
break;
end--;
}
@@ -199,7 +178,7 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
/* skip leading white spaces */
while (next->ls_len) {
- if (!cfs_iswhite(*next->ls_str))
+ if (!isspace(*next->ls_str))
break;
next->ls_str++;
next->ls_len--;
@@ -226,14 +205,13 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
/* skip ending whitespaces */
while (--end != res->ls_str) {
- if (!cfs_iswhite(*end))
+ if (!isspace(*end))
break;
}
res->ls_len = end - res->ls_str + 1;
return 1;
}
-EXPORT_SYMBOL(cfs_gettok);
/**
* Converts string to integer.
@@ -256,13 +234,12 @@ cfs_str2num_check(char *str, int nob, unsigned *num,
return 0;
for (; endp < str + nob; endp++) {
- if (!cfs_iswhite(*endp))
+ if (!isspace(*endp))
return 0;
}
return (*num >= min && *num <= max);
}
-EXPORT_SYMBOL(cfs_str2num_check);
/**
* Parses \<range_expr\> token of the syntax. If \a bracketed is false,
@@ -277,7 +254,7 @@ EXPORT_SYMBOL(cfs_str2num_check);
* \retval 0 will be returned if it can be parsed, otherwise -EINVAL or
* -ENOMEM will be returned.
*/
-int
+static int
cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
int bracketed, struct cfs_range_expr **expr)
{
@@ -340,7 +317,6 @@ cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
LIBCFS_FREE(re, sizeof(*re));
return -EINVAL;
}
-EXPORT_SYMBOL(cfs_range_expr_parse);
/**
* Matches value (\a value) against ranges expression list \a expr_list.
@@ -361,7 +337,6 @@ cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list)
return 0;
}
-EXPORT_SYMBOL(cfs_expr_list_match);
/**
* Convert express list (\a expr_list) to an array of all matched values
@@ -432,18 +407,6 @@ cfs_expr_list_free(struct cfs_expr_list *expr_list)
}
EXPORT_SYMBOL(cfs_expr_list_free);
-void
-cfs_expr_list_print(struct cfs_expr_list *expr_list)
-{
- struct cfs_range_expr *expr;
-
- list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
- CDEBUG(D_WARNING, "%d-%d/%d\n",
- expr->re_lo, expr->re_hi, expr->re_stride);
- }
-}
-EXPORT_SYMBOL(cfs_expr_list_print);
-
/**
* Parses \<cfs_expr_list\> token of the syntax.
*
@@ -526,7 +489,6 @@ cfs_expr_list_free_list(struct list_head *list)
cfs_expr_list_free(el);
}
}
-EXPORT_SYMBOL(cfs_expr_list_free_list);
int
cfs_ip_addr_parse(char *str, int len, struct list_head *list)
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
index 58bb256ee047..77b1ef64ecc0 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
@@ -952,6 +952,7 @@ static int
cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ bool warn;
switch (action) {
case CPU_DEAD:
@@ -962,9 +963,21 @@ cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
cpt_data.cpt_version++;
spin_unlock(&cpt_data.cpt_lock);
default:
- CWARN("Lustre: can't support CPU hotplug well now, "
- "performance and stability could be impacted"
- "[CPU %u notify: %lx]\n", cpu, action);
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
+ CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
+ cpu, action);
+ break;
+ }
+
+ down(&cpt_data.cpt_mutex);
+ /* if all HTs in a core are offline, it may break affinity */
+ cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask);
+ warn = any_online_cpu(*cpt_data.cpt_cpumask) >= nr_cpu_ids;
+ up(&cpt_data.cpt_mutex);
+ CDEBUG(warn ? D_WARNING : D_INFO,
+ "Lustre: can't support CPU plug-out well now, "
+ "performance and stability could be impacted "
+ "[CPU %u action: %lx]\n", cpu, action);
}
return NOTIFY_OK;
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
index a2ef64c3403d..e74c3e28a972 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
@@ -55,25 +55,13 @@
* for Linux kernel.
*/
-int cfs_curproc_groups_nr(void)
-{
- int nr;
-
- task_lock(current);
- nr = current_cred()->group_info->ngroups;
- task_unlock(current);
- return nr;
-}
-
-/* Currently all the CFS_CAP_* defines match CAP_* ones. */
-#define cfs_cap_pack(cap) (cap)
-#define cfs_cap_unpack(cap) (cap)
-
void cfs_cap_raise(cfs_cap_t cap)
{
struct cred *cred;
- if ((cred = prepare_creds())) {
- cap_raise(cred->cap_effective, cfs_cap_unpack(cap));
+
+ cred = prepare_creds();
+ if (cred) {
+ cap_raise(cred->cap_effective, cap);
commit_creds(cred);
}
}
@@ -81,42 +69,28 @@ void cfs_cap_raise(cfs_cap_t cap)
void cfs_cap_lower(cfs_cap_t cap)
{
struct cred *cred;
- if ((cred = prepare_creds())) {
- cap_lower(cred->cap_effective, cfs_cap_unpack(cap));
+
+ cred = prepare_creds();
+ if (cred) {
+ cap_lower(cred->cap_effective, cap);
commit_creds(cred);
}
}
int cfs_cap_raised(cfs_cap_t cap)
{
- return cap_raised(current_cap(), cfs_cap_unpack(cap));
+ return cap_raised(current_cap(), cap);
}
void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap)
{
-#if defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x19980330
- *cap = cfs_cap_pack(kcap);
-#elif defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x20071026
- *cap = cfs_cap_pack(kcap[0]);
-#elif defined(_KERNEL_CAPABILITY_VERSION) && _KERNEL_CAPABILITY_VERSION == 0x20080522
/* XXX lost high byte */
- *cap = cfs_cap_pack(kcap.cap[0]);
-#else
- #error "need correct _KERNEL_CAPABILITY_VERSION "
-#endif
+ *cap = kcap.cap[0];
}
void cfs_kernel_cap_unpack(kernel_cap_t *kcap, cfs_cap_t cap)
{
-#if defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x19980330
- *kcap = cfs_cap_unpack(cap);
-#elif defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x20071026
- (*kcap)[0] = cfs_cap_unpack(cap);
-#elif defined(_KERNEL_CAPABILITY_VERSION) && _KERNEL_CAPABILITY_VERSION == 0x20080522
- kcap->cap[0] = cfs_cap_unpack(cap);
-#else
- #error "need correct _KERNEL_CAPABILITY_VERSION "
-#endif
+ kcap->cap[0] = cap;
}
cfs_cap_t cfs_curproc_cap_pack(void)
@@ -126,20 +100,6 @@ cfs_cap_t cfs_curproc_cap_pack(void)
return cap;
}
-void cfs_curproc_cap_unpack(cfs_cap_t cap)
-{
- struct cred *cred;
- if ((cred = prepare_creds())) {
- cfs_kernel_cap_unpack(&cred->cap_effective, cap);
- commit_creds(cred);
- }
-}
-
-int cfs_capable(cfs_cap_t cap)
-{
- return capable(cfs_cap_unpack(cap));
-}
-
static int cfs_access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, int write)
{
@@ -292,13 +252,10 @@ out:
}
EXPORT_SYMBOL(cfs_get_environ);
-EXPORT_SYMBOL(cfs_curproc_groups_nr);
EXPORT_SYMBOL(cfs_cap_raise);
EXPORT_SYMBOL(cfs_cap_lower);
EXPORT_SYMBOL(cfs_cap_raised);
EXPORT_SYMBOL(cfs_curproc_cap_pack);
-EXPORT_SYMBOL(cfs_curproc_cap_unpack);
-EXPORT_SYMBOL(cfs_capable);
/*
* Local variables:
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
index 55296a3591d5..e6eae0666f0d 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
@@ -150,12 +150,12 @@ static long libcfs_ioctl(struct file *file,
/* Handle platform-dependent IOC requests */
switch (cmd) {
case IOC_LIBCFS_PANIC:
- if (!cfs_capable(CFS_CAP_SYS_BOOT))
+ if (!capable(CFS_CAP_SYS_BOOT))
return (-EPERM);
panic("debugctl-invoked panic");
return (0);
case IOC_LIBCFS_MEMHOG:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
/* go thought */
}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
index c7bc7fcccb8e..9a40d1415a65 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
@@ -46,13 +46,6 @@
#include <asm/kgdb.h>
#endif
-void
-init_waitqueue_entry_current(wait_queue_t *link)
-{
- init_waitqueue_entry(link, current);
-}
-EXPORT_SYMBOL(init_waitqueue_entry_current);
-
/**
* wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
* waiting threads, which is not always desirable because all threads will
@@ -77,37 +70,6 @@ add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
}
EXPORT_SYMBOL(add_wait_queue_exclusive_head);
-void
-waitq_wait(wait_queue_t *link, long state)
-{
- schedule();
-}
-EXPORT_SYMBOL(waitq_wait);
-
-int64_t
-waitq_timedwait(wait_queue_t *link, long state, int64_t timeout)
-{
- return schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(waitq_timedwait);
-
-void
-schedule_timeout_and_set_state(long state, int64_t timeout)
-{
- set_current_state(state);
- schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(schedule_timeout_and_set_state);
-
-/* deschedule for a bit... */
-void
-cfs_pause(cfs_duration_t ticks)
-{
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(ticks);
-}
-EXPORT_SYMBOL(cfs_pause);
-
void cfs_init_timer(struct timer_list *t)
{
init_timer(t);
diff --git a/drivers/staging/lustre/lustre/libcfs/nidstrings.c b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
index 732ae5540bf4..cfb274fc9cbd 100644
--- a/drivers/staging/lustre/lustre/libcfs/nidstrings.c
+++ b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
@@ -65,23 +65,20 @@ void libcfs_init_nidstrings(void)
spin_lock_init(&libcfs_nidstring_lock);
}
-# define NIDSTR_LOCK(f) spin_lock_irqsave(&libcfs_nidstring_lock, f)
-# define NIDSTR_UNLOCK(f) spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
-
static char *
libcfs_next_nidstring(void)
{
char *str;
unsigned long flags;
- NIDSTR_LOCK(flags);
+ spin_lock_irqsave(&libcfs_nidstring_lock, flags);
str = libcfs_nidstrings[libcfs_nidstring_idx++];
if (libcfs_nidstring_idx ==
sizeof(libcfs_nidstrings)/sizeof(libcfs_nidstrings[0]))
libcfs_nidstring_idx = 0;
- NIDSTR_UNLOCK(flags);
+ spin_unlock_irqrestore(&libcfs_nidstring_lock, flags);
return str;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index 54290ce6bb43..c8599eefeb76 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -1076,11 +1076,10 @@ end_loop:
break;
}
}
- init_waitqueue_entry_current(&__wait);
+ init_waitqueue_entry(&__wait, current);
add_wait_queue(&tctl->tctl_waitq, &__wait);
set_current_state(TASK_INTERRUPTIBLE);
- waitq_timedwait(&__wait, TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
+ schedule_timeout(cfs_time_seconds(1));
remove_wait_queue(&tctl->tctl_waitq, &__wait);
}
complete(&tctl->tctl_stop);
diff --git a/drivers/staging/lustre/lustre/libcfs/upcall_cache.c b/drivers/staging/lustre/lustre/libcfs/upcall_cache.c
index 245b46f0dd96..8085e32e5e7a 100644
--- a/drivers/staging/lustre/lustre/libcfs/upcall_cache.c
+++ b/drivers/staging/lustre/lustre/libcfs/upcall_cache.c
@@ -218,13 +218,12 @@ find_again:
MAX_SCHEDULE_TIMEOUT;
long left;
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
add_wait_queue(&entry->ue_waitq, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&cache->uc_lock);
- left = waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
- expiry);
+ left = schedule_timeout(expiry);
spin_lock(&cache->uc_lock);
remove_wait_queue(&entry->ue_waitq, &wait);
diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
index 1a55c81892e0..ba16fd5db704 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -334,7 +334,8 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
sched->ws_nthreads, sched->ws_name);
spin_unlock(&cfs_wi_data.wi_glock);
- cfs_pause(cfs_time_seconds(1) / 20);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 20);
spin_lock(&cfs_wi_data.wi_glock);
}
@@ -389,11 +390,11 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
spin_unlock(&cfs_wi_data.wi_glock);
if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) {
- snprintf(name, sizeof(name), "%s_%02d_%02d",
+ snprintf(name, sizeof(name), "%s_%02d_%02u",
sched->ws_name, sched->ws_cpt,
sched->ws_nthreads);
} else {
- snprintf(name, sizeof(name), "%s_%02d",
+ snprintf(name, sizeof(name), "%s_%02u",
sched->ws_name, sched->ws_nthreads);
}
@@ -459,7 +460,8 @@ cfs_wi_shutdown (void)
while (sched->ws_nthreads != 0) {
spin_unlock(&cfs_wi_data.wi_glock);
- cfs_pause(cfs_time_seconds(1) / 20);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 20);
spin_lock(&cfs_wi_data.wi_glock);
}
spin_unlock(&cfs_wi_data.wi_glock);
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index cbd663ed030c..8b5508086174 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -160,7 +160,7 @@ static int ll_ddelete(const struct dentry *de)
/* kernel >= 2.6.38 last refcount is decreased after this function. */
LASSERT(d_count(de) == 1);
- /* Disable this piece of code temproarily because this is called
+ /* Disable this piece of code temporarily because this is called
* inside dcache_lock so it's not appropriate to do lots of work
* here. ATTENTION: Before this piece of code enabling, LU-2487 must be
* resolved. */
@@ -176,7 +176,7 @@ static int ll_ddelete(const struct dentry *de)
return 0;
}
-static int ll_set_dd(struct dentry *de)
+int ll_d_init(struct dentry *de)
{
LASSERT(de != NULL);
@@ -190,40 +190,22 @@ static int ll_set_dd(struct dentry *de)
OBD_ALLOC_PTR(lld);
if (likely(lld != NULL)) {
spin_lock(&de->d_lock);
- if (likely(de->d_fsdata == NULL))
+ if (likely(de->d_fsdata == NULL)) {
de->d_fsdata = lld;
- else
+ __d_lustre_invalidate(de);
+ } else {
OBD_FREE_PTR(lld);
+ }
spin_unlock(&de->d_lock);
} else {
return -ENOMEM;
}
}
+ LASSERT(de->d_op == &ll_d_ops);
return 0;
}
-int ll_dops_init(struct dentry *de, int block, int init_sa)
-{
- struct ll_dentry_data *lld = ll_d2d(de);
- int rc = 0;
-
- if (lld == NULL && block != 0) {
- rc = ll_set_dd(de);
- if (rc)
- return rc;
-
- lld = ll_d2d(de);
- }
-
- if (lld != NULL && init_sa != 0)
- lld->lld_sa_generation = 0;
-
- /* kernel >= 2.6.38 d_op is set in d_alloc() */
- LASSERT(de->d_op == &ll_d_ops);
- return rc;
-}
-
void ll_intent_drop_lock(struct lookup_intent *it)
{
if (it->it_op && it->d.lustre.it_lock_mode) {
@@ -259,9 +241,6 @@ void ll_intent_release(struct lookup_intent *it)
ptlrpc_req_finished(it->d.lustre.it_data); /* ll_file_open */
if (it_disposition(it, DISP_ENQ_CREATE_REF)) /* create rec */
ptlrpc_req_finished(it->d.lustre.it_data);
- if (it_disposition(it, DISP_ENQ_COMPLETE)) /* saved req from revalidate
- * to lookup */
- ptlrpc_req_finished(it->d.lustre.it_data);
it->d.lustre.it_disposition = 0;
it->d.lustre.it_data = NULL;
@@ -346,268 +325,32 @@ void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft)
}
-int ll_revalidate_it(struct dentry *de, int lookup_flags,
- struct lookup_intent *it)
+static int ll_revalidate_dentry(struct dentry *dentry,
+ unsigned int lookup_flags)
{
- struct md_op_data *op_data;
- struct ptlrpc_request *req = NULL;
- struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
- struct obd_export *exp;
- struct inode *parent = de->d_parent->d_inode;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%s,intent=%s\n", de->d_name.name,
- LL_IT2STR(it));
+ struct inode *dir = dentry->d_parent->d_inode;
- if (de->d_inode == NULL) {
- __u64 ibits;
-
- /* We can only use negative dentries if this is stat or lookup,
- for opens and stuff we do need to query server. */
- /* If there is IT_CREAT in intent op set, then we must throw
- away this negative dentry and actually do the request to
- kernel to create whatever needs to be created (if possible)*/
- if (it && (it->it_op & IT_CREAT))
- return 0;
-
- if (d_lustre_invalid(de))
- return 0;
-
- ibits = MDS_INODELOCK_UPDATE;
- rc = ll_have_md_lock(parent, &ibits, LCK_MINMODE);
- GOTO(out_sa, rc);
- }
-
- /* Never execute intents for mount points.
- * Attributes will be fixed up in ll_inode_revalidate_it */
- if (d_mountpoint(de))
- GOTO(out_sa, rc = 1);
-
- /* need to get attributes in case root got changed from other client */
- if (de == de->d_sb->s_root) {
- rc = __ll_inode_revalidate_it(de, it, MDS_INODELOCK_LOOKUP);
- if (rc == 0)
- rc = 1;
- GOTO(out_sa, rc);
- }
-
- exp = ll_i2mdexp(de->d_inode);
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_REVALIDATE_PAUSE, 5);
- ll_frob_intent(&it, &lookup_it);
- LASSERT(it);
+ /*
+ * if open&create is set, talk to MDS to make sure file is created if
+ * necessary, because we can't do this in ->open() later since that's
+ * called on an inode. return 0 here to let lookup to handle this.
+ */
+ if ((lookup_flags & (LOOKUP_OPEN | LOOKUP_CREATE)) ==
+ (LOOKUP_OPEN | LOOKUP_CREATE))
+ return 0;
- if (it->it_op == IT_LOOKUP && !d_lustre_invalid(de))
+ if (lookup_flags & (LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE))
return 1;
- if (it->it_op == IT_OPEN) {
- struct inode *inode = de->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_client_handle **och_p;
- __u64 ibits;
-
- /*
- * We used to check for MDS_INODELOCK_OPEN here, but in fact
- * just having LOOKUP lock is enough to justify inode is the
- * same. And if inode is the same and we have suitable
- * openhandle, then there is no point in doing another OPEN RPC
- * just to throw away newly received openhandle. There are no
- * security implications too, if file owner or access mode is
- * change, LOOKUP lock is revoked.
- */
-
-
- if (it->it_flags & FMODE_WRITE)
- och_p = &lli->lli_mds_write_och;
- else if (it->it_flags & FMODE_EXEC)
- och_p = &lli->lli_mds_exec_och;
- else
- och_p = &lli->lli_mds_read_och;
-
- /* Check for the proper lock. */
- ibits = MDS_INODELOCK_LOOKUP;
- if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
- goto do_lock;
- mutex_lock(&lli->lli_och_mutex);
- if (*och_p) { /* Everything is open already, do nothing */
- /* Originally it was idea to do not let them steal our
- * open handle from under us by (*och_usecount)++ here.
- * But in case we have the handle, but we cannot use it
- * due to later checks (e.g. O_CREAT|O_EXCL flags set),
- * nobody would decrement counter increased here. So we
- * just hope the lock won't be invalidated in between.
- * But if it would be, we'll reopen the open request to
- * MDS later during file open path.
- */
- mutex_unlock(&lli->lli_och_mutex);
- return 1;
- }
- mutex_unlock(&lli->lli_och_mutex);
- }
-
- if (it->it_op == IT_GETATTR) {
- rc = ll_statahead_enter(parent, &de, 0);
- if (rc == 1)
- goto mark;
- else if (rc != -EAGAIN && rc != 0)
- GOTO(out, rc = 0);
- }
-
-do_lock:
- op_data = ll_prep_md_op_data(NULL, parent, de->d_inode,
- de->d_name.name, de->d_name.len,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- if (!IS_POSIXACL(parent) || !exp_connect_umask(exp))
- it->it_create_mode &= ~current_umask();
- it->it_create_mode |= M_CHECK_STALE;
- rc = md_intent_lock(exp, op_data, NULL, 0, it,
- lookup_flags,
- &req, ll_md_blocking_ast, 0);
- it->it_create_mode &= ~M_CHECK_STALE;
- ll_finish_md_op_data(op_data);
-
- /* If req is NULL, then md_intent_lock only tried to do a lock match;
- * if all was well, it will return 1 if it found locks, 0 otherwise. */
- if (req == NULL && rc >= 0) {
- if (!rc)
- goto do_lookup;
- GOTO(out, rc);
- }
-
- if (rc < 0) {
- if (rc != -ESTALE) {
- CDEBUG(D_INFO, "ll_intent_lock: rc %d : it->it_status "
- "%d\n", rc, it->d.lustre.it_status);
- }
- GOTO(out, rc = 0);
- }
-
-revalidate_finish:
- rc = ll_revalidate_it_finish(req, it, de);
- if (rc != 0) {
- if (rc != -ESTALE && rc != -ENOENT)
- ll_intent_release(it);
- GOTO(out, rc = 0);
- }
-
- if ((it->it_op & IT_OPEN) && de->d_inode &&
- !S_ISREG(de->d_inode->i_mode) &&
- !S_ISDIR(de->d_inode->i_mode)) {
- ll_release_openhandle(de, it);
- }
- rc = 1;
-
-out:
- /* We do not free request as it may be reused during following lookup
- * (see comment in mdc/mdc_locks.c::mdc_intent_lock()), request will
- * be freed in ll_lookup_it or in ll_intent_release. But if
- * request was not completed, we need to free it. (bug 5154, 9903) */
- if (req != NULL && !it_disposition(it, DISP_ENQ_COMPLETE))
- ptlrpc_req_finished(req);
- if (rc == 0) {
- /* mdt may grant layout lock for the newly created file, so
- * release the lock to avoid leaking */
- ll_intent_drop_lock(it);
- ll_invalidate_aliases(de->d_inode);
- } else {
- __u64 bits = 0;
- __u64 matched_bits = 0;
-
- CDEBUG(D_DENTRY, "revalidated dentry %.*s (%p) parent %p "
- "inode %p refc %d\n", de->d_name.len,
- de->d_name.name, de, de->d_parent, de->d_inode,
- d_count(de));
-
- ll_set_lock_data(exp, de->d_inode, it, &bits);
-
- /* Note: We have to match both LOOKUP and PERM lock
- * here to make sure the dentry is valid and no one
- * changing the permission.
- * But if the client connects < 2.4 server, which will
- * only grant LOOKUP lock, so we can only Match LOOKUP
- * lock for old server */
- if (exp_connect_flags(ll_i2mdexp(de->d_inode)) &&
- OBD_CONNECT_LVB_TYPE)
- matched_bits =
- MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM;
- else
- matched_bits = MDS_INODELOCK_LOOKUP;
-
- if (((bits & matched_bits) == matched_bits) &&
- d_lustre_invalid(de))
- d_lustre_revalidate(de);
- ll_lookup_finish_locks(it, de);
- }
-
-mark:
- if (it != NULL && it->it_op == IT_GETATTR && rc > 0)
- ll_statahead_mark(parent, de);
- return rc;
-
- /*
- * This part is here to combat evil-evil race in real_lookup on 2.6
- * kernels. The race details are: We enter do_lookup() looking for some
- * name, there is nothing in dcache for this name yet and d_lookup()
- * returns NULL. We proceed to real_lookup(), and while we do this,
- * another process does open on the same file we looking up (most simple
- * reproducer), open succeeds and the dentry is added. Now back to
- * us. In real_lookup() we do d_lookup() again and suddenly find the
- * dentry, so we call d_revalidate on it, but there is no lock, so
- * without this code we would return 0, but unpatched real_lookup just
- * returns -ENOENT in such a case instead of retrying the lookup. Once
- * this is dealt with in real_lookup(), all of this ugly mess can go and
- * we can just check locks in ->d_revalidate without doing any RPCs
- * ever.
- */
-do_lookup:
- if (it != &lookup_it) {
- /* MDS_INODELOCK_UPDATE needed for IT_GETATTR case. */
- if (it->it_op == IT_GETATTR)
- lookup_it.it_op = IT_GETATTR;
- ll_lookup_finish_locks(it, de);
- it = &lookup_it;
- }
+ if (d_need_statahead(dir, dentry) <= 0)
+ return 1;
- /* Do real lookup here. */
- op_data = ll_prep_md_op_data(NULL, parent, NULL, de->d_name.name,
- de->d_name.len, 0, (it->it_op & IT_CREAT ?
- LUSTRE_OPC_CREATE :
- LUSTRE_OPC_ANY), NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- rc = md_intent_lock(exp, op_data, NULL, 0, it, 0, &req,
- ll_md_blocking_ast, 0);
- if (rc >= 0) {
- struct mdt_body *mdt_body;
- struct lu_fid fid = {.f_seq = 0, .f_oid = 0, .f_ver = 0};
- mdt_body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-
- if (de->d_inode)
- fid = *ll_inode2fid(de->d_inode);
-
- /* see if we got same inode, if not - return error */
- if (lu_fid_eq(&fid, &mdt_body->fid1)) {
- ll_finish_md_op_data(op_data);
- op_data = NULL;
- goto revalidate_finish;
- }
- ll_intent_release(it);
- }
- ll_finish_md_op_data(op_data);
- GOTO(out, rc = 0);
+ if (lookup_flags & LOOKUP_RCU)
+ return -ECHILD;
-out_sa:
- /*
- * For rc == 1 case, should not return directly to prevent losing
- * statahead windows; for rc == 0 case, the "lookup" will be done later.
- */
- if (it != NULL && it->it_op == IT_GETATTR && rc == 1)
- ll_statahead_enter(parent, &de, 1);
- goto mark;
+ do_statahead_enter(dir, &dentry, dentry->d_inode == NULL);
+ ll_statahead_mark(dir, dentry);
+ return 1;
}
/*
@@ -615,24 +358,13 @@ out_sa:
*/
int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
{
- struct inode *parent = dentry->d_parent->d_inode;
- int unplug = 0;
+ int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%s,flags=%u\n",
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%s, flags=%u\n",
dentry->d_name.name, flags);
- if (!(flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE)) &&
- ll_need_statahead(parent, dentry) > 0) {
- if (flags & LOOKUP_RCU)
- return -ECHILD;
-
- if (dentry->d_inode == NULL)
- unplug = 1;
- do_statahead_enter(parent, &dentry, unplug);
- ll_statahead_mark(parent, dentry);
- }
-
- return 1;
+ rc = ll_revalidate_dentry(dentry, flags);
+ return rc;
}
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 52b7731bcc38..7fbc18e3e654 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -362,7 +362,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
struct ptlrpc_request *request;
struct md_op_data *op_data;
- op_data = ll_prep_md_op_data(NULL, dir, NULL, NULL, 0, 0,
+ op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return (void *)op_data;
@@ -1048,20 +1048,25 @@ progress:
}
-static int copy_and_ioctl(int cmd, struct obd_export *exp, void *data, int len)
+static int copy_and_ioctl(int cmd, struct obd_export *exp,
+ const void __user *data, size_t size)
{
- void *ptr;
+ void *copy;
int rc;
- OBD_ALLOC(ptr, len);
- if (ptr == NULL)
+ OBD_ALLOC(copy, size);
+ if (copy == NULL)
return -ENOMEM;
- if (copy_from_user(ptr, data, len)) {
- OBD_FREE(ptr, len);
- return -EFAULT;
+
+ if (copy_from_user(copy, data, size)) {
+ rc = -EFAULT;
+ goto out;
}
- rc = obd_iocontrol(cmd, exp, len, data, NULL);
- OBD_FREE(ptr, len);
+
+ rc = obd_iocontrol(cmd, exp, size, copy, NULL);
+out:
+ OBD_FREE(copy, size);
+
return rc;
}
@@ -1080,7 +1085,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
case Q_QUOTAOFF:
case Q_SETQUOTA:
case Q_SETINFO:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ if (!capable(CFS_CAP_SYS_ADMIN) ||
sbi->ll_flags & LL_SBI_RMT_CLIENT)
return -EPERM;
break;
@@ -1089,7 +1094,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
!uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
(type == GRPQUOTA &&
!in_egroup_p(make_kgid(&init_user_ns, id)))) &&
- (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ (!capable(CFS_CAP_SYS_ADMIN) ||
sbi->ll_flags & LL_SBI_RMT_CLIENT))
return -EPERM;
break;
@@ -1395,7 +1400,7 @@ lmv_out_free:
if (tmp == NULL)
GOTO(free_lmv, rc = -ENOMEM);
- memcpy(tmp, &lum, sizeof(lum));
+ *tmp = lum;
tmp->lum_type = LMV_STRIPE_TYPE;
tmp->lum_stripe_count = 1;
mdtindex = ll_get_mdt_idx(inode);
@@ -1597,7 +1602,7 @@ out_rmdir:
struct obd_quotactl *oqctl;
int error = 0;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ if (!capable(CFS_CAP_SYS_ADMIN) ||
sbi->ll_flags & LL_SBI_RMT_CLIENT)
return -EPERM;
@@ -1621,7 +1626,7 @@ out_rmdir:
case OBD_IOC_POLL_QUOTACHECK: {
struct if_quotacheck *check;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ if (!capable(CFS_CAP_SYS_ADMIN) ||
sbi->ll_flags & LL_SBI_RMT_CLIENT)
return -EPERM;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index c12821aedc2f..8e844a6371e0 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -205,7 +205,7 @@ out:
return rc;
}
-int ll_md_real_close(struct inode *inode, int flags)
+int ll_md_real_close(struct inode *inode, fmode_t fmode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_client_handle **och_p;
@@ -213,30 +213,33 @@ int ll_md_real_close(struct inode *inode, int flags)
__u64 *och_usecount;
int rc = 0;
- if (flags & FMODE_WRITE) {
+ if (fmode & FMODE_WRITE) {
och_p = &lli->lli_mds_write_och;
och_usecount = &lli->lli_open_fd_write_count;
- } else if (flags & FMODE_EXEC) {
+ } else if (fmode & FMODE_EXEC) {
och_p = &lli->lli_mds_exec_och;
och_usecount = &lli->lli_open_fd_exec_count;
} else {
- LASSERT(flags & FMODE_READ);
+ LASSERT(fmode & FMODE_READ);
och_p = &lli->lli_mds_read_och;
och_usecount = &lli->lli_open_fd_read_count;
}
mutex_lock(&lli->lli_och_mutex);
- if (*och_usecount) { /* There are still users of this handle, so
- skip freeing it. */
+ if (*och_usecount > 0) {
+ /* There are still users of this handle, so skip
+ * freeing it. */
mutex_unlock(&lli->lli_och_mutex);
return 0;
}
+
och=*och_p;
*och_p = NULL;
mutex_unlock(&lli->lli_och_mutex);
- if (och) { /* There might be a race and somebody have freed this och
- already */
+ if (och != NULL) {
+ /* There might be a race and this handle may already
+ be closed. */
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
inode, och, NULL);
}
@@ -443,8 +446,7 @@ static int ll_intent_file_open(struct file *file, void *lmm,
itp, NULL);
out:
- ptlrpc_req_finished(itp->d.lustre.it_data);
- it_clear_disposition(itp, DISP_ENQ_COMPLETE);
+ ptlrpc_req_finished(req);
ll_intent_drop_lock(itp);
return rc;
@@ -477,7 +479,7 @@ static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
och->och_flags = it->it_flags;
- return md_set_open_replay_data(md_exp, och, req);
+ return md_set_open_replay_data(md_exp, och, it);
}
int ll_local_open(struct file *file, struct lookup_intent *it,
@@ -671,14 +673,13 @@ restart:
ll_capa_open(inode);
- if (!lli->lli_has_smd) {
- if (file->f_flags & O_LOV_DELAY_CREATE ||
- !(file->f_mode & FMODE_WRITE)) {
- CDEBUG(D_INODE, "object creation was delayed\n");
- GOTO(out_och_free, rc);
- }
+ if (!lli->lli_has_smd &&
+ (cl_is_lov_delay_create(file->f_flags) ||
+ (file->f_mode & FMODE_WRITE) == 0)) {
+ CDEBUG(D_INODE, "object creation was delayed\n");
+ GOTO(out_och_free, rc);
}
- file->f_flags &= ~O_LOV_DELAY_CREATE;
+ cl_lov_delay_create_clear(&file->f_flags);
GOTO(out_och_free, rc);
out_och_free:
@@ -813,10 +814,7 @@ struct obd_client_handle *ll_lease_open(struct inode *inode, struct file *file,
* doesn't deal with openhandle, so normal openhandle will be leaked. */
LDLM_FL_NO_LRU | LDLM_FL_EXCL);
ll_finish_md_op_data(op_data);
- if (req != NULL) {
- ptlrpc_req_finished(req);
- it_clear_disposition(&it, DISP_ENQ_COMPLETE);
- }
+ ptlrpc_req_finished(req);
if (rc < 0)
GOTO(out_release_it, rc);
@@ -1033,6 +1031,33 @@ int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
return rc;
}
+static bool file_is_noatime(const struct file *file)
+{
+ const struct vfsmount *mnt = file->f_path.mnt;
+ const struct inode *inode = file->f_path.dentry->d_inode;
+
+ /* Adapted from file_accessed() and touch_atime().*/
+ if (file->f_flags & O_NOATIME)
+ return true;
+
+ if (inode->i_flags & S_NOATIME)
+ return true;
+
+ if (IS_NOATIME(inode))
+ return true;
+
+ if (mnt->mnt_flags & (MNT_NOATIME | MNT_READONLY))
+ return true;
+
+ if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
+ return true;
+
+ if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+ return true;
+
+ return false;
+}
+
void ll_io_init(struct cl_io *io, const struct file *file, int write)
{
struct inode *inode = file->f_dentry->d_inode;
@@ -1052,6 +1077,8 @@ void ll_io_init(struct cl_io *io, const struct file *file, int write)
} else if (file->f_flags & O_APPEND) {
io->ci_lockreq = CILR_MANDATORY;
}
+
+ io->ci_noatime = file_is_noatime(file);
}
static ssize_t
@@ -1092,16 +1119,12 @@ restart:
down_read(&lli->lli_trunc_sem);
}
break;
- case IO_SENDFILE:
- vio->u.sendfile.cui_actor = args->u.sendfile.via_actor;
- vio->u.sendfile.cui_target = args->u.sendfile.via_target;
- break;
case IO_SPLICE:
vio->u.splice.cui_pipe = args->u.splice.via_pipe;
vio->u.splice.cui_flags = args->u.splice.via_flags;
break;
default:
- CERROR("Unknow IO type - %u\n", vio->cui_io_subtype);
+ CERROR("Unknown IO type - %u\n", vio->cui_io_subtype);
LBUG();
}
result = cl_io_loop(env, io);
@@ -1340,7 +1363,7 @@ static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg)
struct ll_recreate_obj ucreat;
struct ost_id oi;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&ucreat, (struct ll_recreate_obj *)arg,
@@ -1358,7 +1381,7 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
struct ost_id oi;
obd_count ost_idx;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&fid, (struct lu_fid *)arg, sizeof(fid)))
@@ -1381,23 +1404,25 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
ccc_inode_lsm_put(inode, lsm);
CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
inode->i_ino);
- return -EEXIST;
+ GOTO(out, rc = -EEXIST);
}
ll_inode_size_lock(inode);
rc = ll_intent_file_open(file, lum, lum_size, &oit);
if (rc)
- GOTO(out, rc);
+ GOTO(out_unlock, rc);
rc = oit.d.lustre.it_status;
if (rc < 0)
GOTO(out_req_free, rc);
ll_release_openhandle(file->f_dentry, &oit);
- out:
+out_unlock:
ll_inode_size_unlock(inode);
ll_intent_release(&oit);
ccc_inode_lsm_put(inode, lsm);
+out:
+ cl_lov_delay_create_clear(&file->f_flags);
return rc;
out_req_free:
ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
@@ -1497,7 +1522,7 @@ static int ll_lov_setea(struct inode *inode, struct file *file,
sizeof(struct lov_user_ost_data);
int rc;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
OBD_ALLOC_LARGE(lump, lum_size);
@@ -1747,7 +1772,7 @@ int ll_fid2path(struct inode *inode, void *arg)
struct getinfo_fid2path *gfout, *gfin;
int outsize, rc;
- if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
+ if (!capable(CFS_CAP_DAC_READ_SEARCH) &&
!(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
return -EPERM;
@@ -2005,7 +2030,7 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
llss->ia2.ia_valid = ATTR_MTIME | ATTR_ATIME;
}
- /* ultimate check, before swaping the layouts we check if
+ /* ultimate check, before swapping the layouts we check if
* dataversion has changed (if requested) */
if (llss->check_dv1) {
rc = ll_data_version(llss->inode1, &dv, 0);
@@ -2093,7 +2118,7 @@ static int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
/* Non-root users are forbidden to set or clear flags which are
* NOT defined in HSM_USER_MASK. */
if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
- !cfs_capable(CFS_CAP_SYS_ADMIN))
+ !capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
@@ -2670,7 +2695,7 @@ int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
/* flocks are whole-file locks */
flock.l_flock.end = OFFSET_MAX;
- /* For flocks owner is determined by the local file desctiptor*/
+ /* For flocks owner is determined by the local file descriptor*/
flock.l_flock.owner = (unsigned long)file_lock->fl_file;
} else if (file_lock->fl_flags & FL_POSIX) {
flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
@@ -2891,7 +2916,7 @@ int __ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
oit.it_op = IT_LOOKUP;
/* Call getattr by fid, so do not provide name at all. */
- op_data = ll_prep_md_op_data(NULL, dentry->d_parent->d_inode,
+ op_data = ll_prep_md_op_data(NULL, dentry->d_inode,
dentry->d_inode, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
@@ -3175,7 +3200,7 @@ struct inode_operations ll_file_inode_operations = {
.get_acl = ll_get_acl,
};
-/* dynamic ioctl number support routins */
+/* dynamic ioctl number support routines */
static struct llioc_ctl_data {
struct rw_semaphore ioc_sem;
struct list_head ioc_head;
@@ -3299,7 +3324,7 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
if (result == 0) {
/* it can only be allowed to match after layout is
* applied to inode otherwise false layout would be
- * seen. Applying layout shoud happen before dropping
+ * seen. Applying layout should happen before dropping
* the intent lock. */
ldlm_lock_allow_match(lock);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index e2996c46b2c0..38c2d0e947db 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -348,7 +348,7 @@ static int ll_close_thread(void *arg)
break;
inode = ll_info2i(lli);
- CDEBUG(D_INFO, "done_writting for inode %lu/%u\n",
+ CDEBUG(D_INFO, "done_writing for inode %lu/%u\n",
inode->i_ino, inode->i_generation);
ll_done_writing(inode);
iput(inode);
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 7ee5c02783f9..69aba0afca41 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -296,13 +296,6 @@ int ll_xattr_cache_get(struct inode *inode,
size_t size,
__u64 valid);
-int ll_xattr_cache_update(struct inode *inode,
- const char *name,
- const char *newval,
- size_t size,
- __u64 valid,
- int flags);
-
/*
* Locking to guarantee consistency of non-atomic updates to long long i_size,
* consistency between file size and KMS.
@@ -532,7 +525,7 @@ struct ll_sb_info {
atomic_t ll_agl_total; /* AGL thread started count */
dev_t ll_sdev_orig; /* save s_dev before assign for
- * clustred nfs */
+ * clustered nfs */
struct rmtacl_ctl_table ll_rct;
struct eacl_table ll_et;
__kernel_fsid_t ll_fsid;
@@ -782,7 +775,7 @@ int ll_local_open(struct file *file,
int ll_release_openhandle(struct dentry *, struct lookup_intent *);
int ll_md_close(struct obd_export *md_exp, struct inode *inode,
struct file *file);
-int ll_md_real_close(struct inode *inode, int flags);
+int ll_md_real_close(struct inode *inode, fmode_t fmode);
void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
struct obd_client_handle **och, unsigned long flags);
void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data);
@@ -828,7 +821,7 @@ int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
/* llite/dcache.c */
-int ll_dops_init(struct dentry *de, int block, int init_sa);
+int ll_d_init(struct dentry *de);
extern struct dentry_operations ll_d_ops;
void ll_intent_drop_lock(struct lookup_intent *);
void ll_intent_release(struct lookup_intent *);
@@ -915,12 +908,10 @@ struct ccc_object *cl_inode2ccc(struct inode *inode);
void vvp_write_pending (struct ccc_object *club, struct ccc_page *page);
void vvp_write_complete(struct ccc_object *club, struct ccc_page *page);
-/* specific achitecture can implement only part of this list */
+/* specific architecture can implement only part of this list */
enum vvp_io_subtype {
/** normal IO */
IO_NORMAL,
- /** io called from .sendfile */
- IO_SENDFILE,
/** io started from splice_{read|write} */
IO_SPLICE
};
@@ -932,10 +923,6 @@ struct vvp_io {
union {
struct {
- read_actor_t cui_actor;
- void *cui_target;
- } sendfile;
- struct {
struct pipe_inode_info *cui_pipe;
unsigned int cui_flags;
} splice;
@@ -981,7 +968,7 @@ struct vvp_io {
* IO arguments for various VFS I/O interfaces.
*/
struct vvp_io_args {
- /** normal/sendfile/splice */
+ /** normal/splice */
enum vvp_io_subtype via_io_subtype;
union {
@@ -991,10 +978,6 @@ struct vvp_io_args {
unsigned long via_nrsegs;
} normal;
struct {
- read_actor_t via_actor;
- void *via_target;
- } sendfile;
- struct {
struct pipe_inode_info *via_pipe;
unsigned int via_flags;
} splice;
@@ -1320,12 +1303,13 @@ ll_statahead_mark(struct inode *dir, struct dentry *dentry)
if (lli->lli_opendir_pid != current_pid())
return;
- if (sai != NULL && ldd != NULL)
+ LASSERT(ldd != NULL);
+ if (sai != NULL)
ldd->lld_sa_generation = sai->sai_generation;
}
static inline int
-ll_need_statahead(struct inode *dir, struct dentry *dentryp)
+d_need_statahead(struct inode *dir, struct dentry *dentryp)
{
struct ll_inode_info *lli;
struct ll_dentry_data *ldd;
@@ -1370,14 +1354,14 @@ ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int only_unplug)
{
int ret;
- ret = ll_need_statahead(dir, *dentryp);
+ ret = d_need_statahead(dir, *dentryp);
if (ret <= 0)
return ret;
return do_statahead_enter(dir, dentryp, only_unplug);
}
-/* llite ioctl register support rountine */
+/* llite ioctl register support routine */
enum llioc_iter {
LLIOC_CONT = 0,
LLIOC_STOP
@@ -1389,7 +1373,7 @@ enum llioc_iter {
* Rules to write a callback function:
*
* Parameters:
- * @magic: Dynamic ioctl call routine will feed this vaule with the pointer
+ * @magic: Dynamic ioctl call routine will feed this value with the pointer
* returned to ll_iocontrol_register. Callback functions should use this
* data to check the potential collasion of ioctl cmd. If collasion is
* found, callback function should return LLIOC_CONT.
@@ -1414,7 +1398,7 @@ enum llioc_iter ll_iocontrol_call(struct inode *inode, struct file *file,
* @cb: callback function, it will be called if an ioctl command is found to
* belong to the command list @cmd.
*
- * Return vaule:
+ * Return value:
* A magic pointer will be returned if success;
* otherwise, NULL will be returned.
* */
@@ -1524,7 +1508,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
* separate locks in different namespaces, Master MDT,
* where the name entry is, will grant LOOKUP lock,
* remote MDT, where the object is, will grant
- * UPDATE|PERM lock. The inode will be attched to both
+ * UPDATE|PERM lock. The inode will be attached to both
* LOOKUP and PERM locks, so revoking either locks will
* case the dcache being cleared */
if (it->d.lustre.it_remote_lock_mode) {
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 6cfdb9e4b74b..7c4fd97a7fa0 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -155,11 +155,6 @@ void ll_free_sbi(struct super_block *sb)
}
}
-static struct dentry_operations ll_d_root_ops = {
- .d_compare = ll_dcompare,
- .d_revalidate = ll_revalidate_nd,
-};
-
static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
struct vfsmount *mnt)
{
@@ -211,7 +206,10 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_EINPROGRESS |
OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
OBD_CONNECT_LAYOUTLOCK |
- OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE;
+ OBD_CONNECT_PINGLESS |
+ OBD_CONNECT_MAX_EASIZE |
+ OBD_CONNECT_FLOCK_DEAD |
+ OBD_CONNECT_DISP_STRIPE;
if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
data->ocd_connect_flags |= OBD_CONNECT_SOM;
@@ -281,7 +279,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* For mount, we only need fs info from MDT0, and also in DNE, it
* can make sure the client can be mounted as long as MDT0 is
- * avaible */
+ * available */
err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
OBD_STATFS_FOR_MDT0);
@@ -579,10 +577,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
GOTO(out_root, err = -ENOMEM);
}
- /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
- d_set_d_op(sb->s_root, &ll_d_root_ops);
- sb->s_d_op = &ll_d_ops;
-
sbi->ll_sdev_orig = sb->s_dev;
/* We set sb->s_dev equal on all lustre clients in order to support
@@ -723,7 +717,7 @@ void ll_kill_super(struct super_block *sb)
return;
sbi = ll_s2sbi(sb);
- /* we need restore s_dev from changed for clustred NFS before put_super
+ /* we need to restore s_dev from changed for clustered NFS before put_super
* because new kernels have cached s_dev and change sb->s_dev in
* put_super not affected real removing devices */
if (sbi) {
@@ -740,7 +734,8 @@ char *ll_read_opt(const char *opt, char *data)
CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
if (strncmp(opt, data, strlen(opt)))
return NULL;
- if ((value = strchr(data, '=')) == NULL)
+ value = strchr(data, '=');
+ if (value == NULL)
return NULL;
value++;
@@ -1013,6 +1008,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
GOTO(out_free, err);
sb->s_bdi = &lsi->lsi_bdi;
+ /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
+ sb->s_d_op = &ll_d_ops;
/* Generate a string unique to this super, in case some joker tries
to mount the same fs at two mount points.
@@ -1067,7 +1064,7 @@ out_free:
void ll_put_super(struct super_block *sb)
{
- struct config_llog_instance cfg;
+ struct config_llog_instance cfg, params_cfg;
struct obd_device *obd;
struct lustre_sb_info *lsi = s2lsi(sb);
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -1081,6 +1078,9 @@ void ll_put_super(struct super_block *sb)
cfg.cfg_instance = sb;
lustre_end_log(sb, profilenm, &cfg);
+ params_cfg.cfg_instance = sb;
+ lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
+
if (sbi->ll_md_exp) {
obd = class_exp2obd(sbi->ll_md_exp);
if (obd)
@@ -1405,7 +1405,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
/* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
if (attr->ia_valid & TIMES_SET_FLAGS) {
if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
- !cfs_capable(CFS_CAP_FOWNER))
+ !capable(CFS_CAP_FOWNER))
return -EPERM;
}
@@ -1877,7 +1877,7 @@ void ll_delete_inode(struct inode *inode)
cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
CL_FSYNC_DISCARD, 1);
- truncate_inode_pages(&inode->i_data, 0);
+ truncate_inode_pages_final(&inode->i_data);
/* Workaround for LU-118 */
if (inode->i_data.nrpages) {
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index caed6423e4ef..90b2c0d275f9 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -285,7 +285,7 @@ static inline int to_fault_error(int result)
* Lustre implementation of a vm_operations_struct::fault() method, called by
* VM to server page fault (both in kernel and user space).
*
- * \param vma - is virtiual area struct related to page fault
+ * \param vma - is virtual area struct related to page fault
* \param vmf - structure which describe type and address where hit fault
*
* \return allocated and filled _locked_ page for address
@@ -370,7 +370,7 @@ restart:
goto restart;
}
- result |= VM_FAULT_LOCKED;
+ result = VM_FAULT_LOCKED;
}
cfs_restore_sigs(set);
return result;
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 1767c741fb72..3580069789fc 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -167,10 +167,10 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
}
result = d_obtain_alias(inode);
- if (IS_ERR(result))
+ if (IS_ERR(result)) {
+ iput(inode);
return result;
-
- ll_dops_init(result, 1, 0);
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index 0718905adeb2..f78eda235c7a 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -255,7 +255,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
* to store parity;
* 2. Reserve the # of (page_count * depth) cl_pages from the reserved
* pool. Afterwards, the clio would allocate the pages from reserved
- * pool, this guarantees we neeedn't allocate the cl_pages from
+ * pool, this guarantees we needn't allocate the cl_pages from
* generic cl_page slab cache.
* Of course, if there is NOT enough pages in the pool, we might
* be asked to write less pages once, this purely depends on
@@ -325,7 +325,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
bio = &(*bio)->bi_next;
}
if (*bio) {
- /* Some of bios can't be mergable. */
+ /* Some of bios can't be mergeable. */
lo->lo_bio = *bio;
*bio = NULL;
} else {
@@ -658,7 +658,7 @@ static struct block_device_operations lo_fops = {
* ll_iocontrol_call.
*
* This is a llite regular file ioctl function. It takes the responsibility
- * of attaching or detaching a file by a lloop's device numner.
+ * of attaching or detaching a file by a lloop's device number.
*/
static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
unsigned int cmd, unsigned long arg,
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index fc8d264f6c9a..25a6ea580f00 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -195,101 +195,107 @@ static void ll_invalidate_negative_children(struct inode *dir)
int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag)
{
- int rc;
struct lustre_handle lockh;
+ int rc;
switch (flag) {
case LDLM_CB_BLOCKING:
ldlm_lock2handle(lock, &lockh);
rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
if (rc < 0) {
- CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
+ CDEBUG(D_INODE, "ldlm_cli_cancel: rc = %d\n", rc);
return rc;
}
break;
case LDLM_CB_CANCELING: {
struct inode *inode = ll_inode_from_resource_lock(lock);
- struct ll_inode_info *lli;
__u64 bits = lock->l_policy_data.l_inodebits.bits;
- struct lu_fid *fid;
- ldlm_mode_t mode = lock->l_req_mode;
/* Inode is set to lock->l_resource->lr_lvb_inode
* for mdc - bug 24555 */
LASSERT(lock->l_ast_data == NULL);
- /* Invalidate all dentries associated with this inode */
if (inode == NULL)
break;
+ /* Invalidate all dentries associated with this inode */
LASSERT(lock->l_flags & LDLM_FL_CANCELING);
- if (bits & MDS_INODELOCK_XATTR)
+ if (!fid_res_name_eq(ll_inode2fid(inode),
+ &lock->l_resource->lr_name)) {
+ LDLM_ERROR(lock, "data mismatch with object "DFID"(%p)",
+ PFID(ll_inode2fid(inode)), inode);
+ LBUG();
+ }
+
+ if (bits & MDS_INODELOCK_XATTR) {
ll_xattr_cache_destroy(inode);
+ bits &= ~MDS_INODELOCK_XATTR;
+ }
/* For OPEN locks we differentiate between lock modes
* LCK_CR, LCK_CW, LCK_PR - bug 22891 */
- if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
- MDS_INODELOCK_LAYOUT | MDS_INODELOCK_PERM))
- ll_have_md_lock(inode, &bits, LCK_MINMODE);
-
if (bits & MDS_INODELOCK_OPEN)
- ll_have_md_lock(inode, &bits, mode);
-
- fid = ll_inode2fid(inode);
- if (!fid_res_name_eq(fid, &lock->l_resource->lr_name))
- LDLM_ERROR(lock, "data mismatch with object "
- DFID" (%p)", PFID(fid), inode);
+ ll_have_md_lock(inode, &bits, lock->l_req_mode);
if (bits & MDS_INODELOCK_OPEN) {
- int flags = 0;
+ fmode_t fmode;
+
switch (lock->l_req_mode) {
case LCK_CW:
- flags = FMODE_WRITE;
+ fmode = FMODE_WRITE;
break;
case LCK_PR:
- flags = FMODE_EXEC;
+ fmode = FMODE_EXEC;
break;
case LCK_CR:
- flags = FMODE_READ;
+ fmode = FMODE_READ;
break;
default:
- CERROR("Unexpected lock mode for OPEN lock "
- "%d, inode %ld\n", lock->l_req_mode,
- inode->i_ino);
+ LDLM_ERROR(lock, "bad lock mode for OPEN lock");
+ LBUG();
}
- ll_md_real_close(inode, flags);
+
+ ll_md_real_close(inode, fmode);
}
- lli = ll_i2info(inode);
+ if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
+ MDS_INODELOCK_LAYOUT | MDS_INODELOCK_PERM))
+ ll_have_md_lock(inode, &bits, LCK_MINMODE);
+
if (bits & MDS_INODELOCK_LAYOUT) {
- struct cl_object_conf conf = { { 0 } };
+ struct cl_object_conf conf = {
+ .coc_opc = OBJECT_CONF_INVALIDATE,
+ .coc_inode = inode,
+ };
- conf.coc_opc = OBJECT_CONF_INVALIDATE;
- conf.coc_inode = inode;
rc = ll_layout_conf(inode, &conf);
- if (rc)
- CDEBUG(D_INODE, "invaliding layout %d.\n", rc);
+ if (rc < 0)
+ CDEBUG(D_INODE, "cannot invalidate layout of "
+ DFID": rc = %d\n",
+ PFID(ll_inode2fid(inode)), rc);
}
if (bits & MDS_INODELOCK_UPDATE) {
+ struct ll_inode_info *lli = ll_i2info(inode);
+
spin_lock(&lli->lli_lock);
lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
spin_unlock(&lli->lli_lock);
}
- if (S_ISDIR(inode->i_mode) &&
- (bits & MDS_INODELOCK_UPDATE)) {
+ if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
CDEBUG(D_INODE, "invalidating inode %lu\n",
inode->i_ino);
truncate_inode_pages(inode->i_mapping, 0);
ll_invalidate_negative_children(inode);
}
- if (inode->i_sb->s_root &&
- inode != inode->i_sb->s_root->d_inode &&
- (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)))
+ if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
+ inode->i_sb->s_root != NULL &&
+ inode != inode->i_sb->s_root->d_inode)
ll_invalidate_aliases(inode);
+
iput(inode);
break;
}
@@ -400,11 +406,16 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
{
struct dentry *new;
+ int rc;
if (inode) {
new = ll_find_alias(inode, de);
if (new) {
- ll_dops_init(new, 1, 1);
+ rc = ll_d_init(new);
+ if (rc < 0) {
+ dput(new);
+ return ERR_PTR(rc);
+ }
d_move(new, de);
iput(inode);
CDEBUG(D_DENTRY,
@@ -413,8 +424,9 @@ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
return new;
}
}
- ll_dops_init(de, 1, 1);
- __d_lustre_invalidate(de);
+ rc = ll_d_init(de);
+ if (rc < 0)
+ return ERR_PTR(rc);
d_add(de, inode);
CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
de, de->d_inode, d_count(de), de->d_flags);
@@ -453,10 +465,22 @@ int ll_lookup_it_finish(struct ptlrpc_request *request,
}
/* Only hash *de if it is unhashed (new dentry).
- * Atoimc_open may passin hashed dentries for open.
+ * Atoimc_open may passing hashed dentries for open.
*/
- if (d_unhashed(*de))
- *de = ll_splice_alias(inode, *de);
+ if (d_unhashed(*de)) {
+ struct dentry *alias;
+
+ alias = ll_splice_alias(inode, *de);
+ if (IS_ERR(alias))
+ return PTR_ERR(alias);
+ *de = alias;
+ } else if (!it_disposition(it, DISP_LOOKUP_NEG) &&
+ !it_disposition(it, DISP_OPEN_CREATE)) {
+ /* With DISP_OPEN_CREATE dentry will
+ instantiated in ll_create_it. */
+ LASSERT((*de)->d_inode == NULL);
+ d_instantiate(*de, inode);
+ }
if (!it_disposition(it, DISP_LOOKUP_NEG)) {
/* we have lookup look - unhide dentry */
@@ -505,16 +529,6 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
ll_frob_intent(&it, &lookup_it);
- /* As do_lookup is called before follow_mount, root dentry may be left
- * not valid, revalidate it here. */
- if (parent->i_sb->s_root && (parent->i_sb->s_root->d_inode == parent) &&
- (it->it_op & (IT_OPEN | IT_CREAT))) {
- rc = ll_inode_revalidate_it(parent->i_sb->s_root, it,
- MDS_INODELOCK_LOOKUP);
- if (rc)
- return ERR_PTR(rc);
- }
-
if (it->it_op == IT_GETATTR) {
rc = ll_statahead_enter(parent, &dentry, 0);
if (rc == 1) {
@@ -584,12 +598,8 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
parent->i_generation, parent, flags);
/* Optimize away (CREATE && !OPEN). Let .create handle the race. */
- if ((flags & LOOKUP_CREATE ) && !(flags & LOOKUP_OPEN)) {
- ll_dops_init(dentry, 1, 1);
- __d_lustre_invalidate(dentry);
- d_add(dentry, NULL);
+ if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN))
return NULL;
- }
if (flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE))
itp = NULL;
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index e9ba38a553cf..416f7a094a6d 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -135,7 +135,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
}
/*
- * Loop-back driver calls ->prepare_write() and ->sendfile()
+ * Loop-back driver calls ->prepare_write().
* methods directly, bypassing file system ->write() operation,
* so cl_io has to be created here.
*/
@@ -558,10 +558,10 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
* striped over, rather than having a constant value for all files here. */
/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
- * Temprarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
+ * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
* by default, this should be adjusted corresponding with max_read_ahead_mb
* and max_read_ahead_per_file_mb otherwise the readahead budget can be used
- * up quickly which will affect read performance siginificantly. See LU-2816 */
+ * up quickly which will affect read performance significantly. See LU-2816 */
#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
static inline int stride_io_mode(struct ll_readahead_state *ras)
@@ -570,7 +570,7 @@ static inline int stride_io_mode(struct ll_readahead_state *ras)
}
/* The function calculates how much pages will be read in
* [off, off + length], in such stride IO area,
- * stride_offset = st_off, stride_lengh = st_len,
+ * stride_offset = st_off, stride_length = st_len,
* stride_pages = st_pgs
*
* |------------------|*****|------------------|*****|------------|*****|....
@@ -1090,7 +1090,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
ras_set_start(inode, ras, index);
if (stride_io_mode(ras))
- /* Since stride readahead is sentivite to the offset
+ /* Since stride readahead is sensitive to the offset
* of read-ahead, so we use original offset here,
* instead of ras_window_start, which is RPC aligned */
ras->ras_next_readahead = max(index, ras->ras_next_readahead);
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index f6b5f4b95f37..c8624b5a9875 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -577,7 +577,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
* Someone triggered glimpse within 1 sec before.
* 1) The former glimpse succeeded with glimpse lock granted by OST, and
* if the lock is still cached on client, AGL needs to do nothing. If
- * it is cancelled by other client, AGL maybe cannot obtaion new lock
+ * it is cancelled by other client, AGL maybe cannot obtain new lock
* for no glimpse callback triggered by AGL.
* 2) The former glimpse succeeded, but OST did not grant glimpse lock.
* Under such case, it is quite possible that the OST will not grant
@@ -877,9 +877,6 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
if (d_mountpoint(dentry))
return 1;
- if (unlikely(dentry == dentry->d_sb->s_root))
- return 1;
-
entry->se_inode = igrab(inode);
rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),NULL);
if (rc == 1) {
@@ -1588,8 +1585,15 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
ll_inode2fid(inode), &bits);
if (rc == 1) {
if ((*dentryp)->d_inode == NULL) {
- *dentryp = ll_splice_alias(inode,
+ struct dentry *alias;
+
+ alias = ll_splice_alias(inode,
*dentryp);
+ if (IS_ERR(alias)) {
+ ll_sai_unplug(sai, entry);
+ return PTR_ERR(alias);
+ }
+ *dentryp = alias;
} else if ((*dentryp)->d_inode != inode) {
/* revalidate, but inode is recreated */
CDEBUG(D_READA,
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 93cbfbb7e7f7..c7d70091246e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -51,7 +51,7 @@ static struct vvp_io *cl2vvp_io(const struct lu_env *env,
const struct cl_io_slice *slice);
/**
- * True, if \a io is a normal io, False for sendfile() / splice_{read|write}
+ * True, if \a io is a normal io, False for splice_{read,write}
*/
int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
{
@@ -474,20 +474,6 @@ static void vvp_io_setattr_fini(const struct lu_env *env,
vvp_io_fini(env, ios);
}
-static ssize_t lustre_generic_file_read(struct file *file,
- struct ccc_io *vio, loff_t *ppos)
-{
- return generic_file_aio_read(vio->cui_iocb, vio->cui_iov,
- vio->cui_nrsegs, *ppos);
-}
-
-static ssize_t lustre_generic_file_write(struct file *file,
- struct ccc_io *vio, loff_t *ppos)
-{
- return generic_file_aio_write(vio->cui_iocb, vio->cui_iov,
- vio->cui_nrsegs, *ppos);
-}
-
static int vvp_io_read_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
@@ -540,8 +526,11 @@ static int vvp_io_read_start(const struct lu_env *env,
file_accessed(file);
switch (vio->cui_io_subtype) {
case IO_NORMAL:
- result = lustre_generic_file_read(file, cio, &pos);
- break;
+ LASSERT(cio->cui_iocb->ki_pos == pos);
+ result = generic_file_aio_read(cio->cui_iocb,
+ cio->cui_iov, cio->cui_nrsegs,
+ cio->cui_iocb->ki_pos);
+ break;
case IO_SPLICE:
result = generic_file_splice_read(file, &pos,
vio->u.splice.cui_pipe, cnt,
@@ -586,7 +575,6 @@ static int vvp_io_write_start(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
struct inode *inode = ccc_object_inode(obj);
- struct file *file = cio->cui_fd->fd_file;
ssize_t result = 0;
loff_t pos = io->u.ci_wr.wr.crw_pos;
size_t cnt = io->u.ci_wr.wr.crw_count;
@@ -601,6 +589,8 @@ static int vvp_io_write_start(const struct lu_env *env,
*/
pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
cio->cui_iocb->ki_pos = pos;
+ } else {
+ LASSERT(cio->cui_iocb->ki_pos == pos);
}
CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
@@ -608,8 +598,9 @@ static int vvp_io_write_start(const struct lu_env *env,
if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */
result = 0;
else
- result = lustre_generic_file_write(file, cio, &pos);
-
+ result = generic_file_aio_write(cio->cui_iocb,
+ cio->cui_iov, cio->cui_nrsegs,
+ cio->cui_iocb->ki_pos);
if (result > 0) {
if (result < cnt)
io->ci_continue = 0;
@@ -655,7 +646,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
if (cfio->fault.ft_flags & VM_FAULT_RETRY)
return -EAGAIN;
- CERROR("unknow error in page fault %d!\n", cfio->fault.ft_flags);
+ CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags);
return -EINVAL;
}
@@ -1201,7 +1192,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
if (result == -ENOENT)
/* If the inode on MDS has been removed, but the objects
* on OSTs haven't been destroyed (async unlink), layout
- * fetch will return -ENOENT, we'd ingore this error
+ * fetch will return -ENOENT, we'd ignore this error
* and continue with dirty flush. LU-3230. */
result = 0;
if (result < 0)
@@ -1216,7 +1207,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
static struct vvp_io *cl2vvp_io(const struct lu_env *env,
const struct cl_io_slice *slice)
{
- /* Caling just for assertion */
+ /* Calling just for assertion */
cl2ccc_io(env, slice);
return vvp_env_io(env);
}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 3a7d03c12dd9..b1ed4d9ea6be 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -95,7 +95,7 @@ int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type)
if (xattr_type == XATTR_USER_T && !(sbi->ll_flags & LL_SBI_USER_XATTR))
return -EOPNOTSUPP;
- if (xattr_type == XATTR_TRUSTED_T && !cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (xattr_type == XATTR_TRUSTED_T && !capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
if (xattr_type == XATTR_OTHER_T)
return -EOPNOTSUPP;
@@ -183,17 +183,11 @@ int ll_setxattr_common(struct inode *inode, const char *name,
valid |= rce_ops2valid(rce->rce_ops);
}
#endif
- if (sbi->ll_xattr_cache_enabled &&
- (rce == NULL || rce->rce_ops == RMT_LSETFACL)) {
- rc = ll_xattr_cache_update(inode, name, pv, size, valid, flags);
- } else {
oc = ll_mdscapa_get(inode);
rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
valid, name, pv, size, 0, flags,
ll_i2suppgid(inode), &req);
capa_put(oc);
- }
-
#ifdef CONFIG_FS_POSIX_ACL
if (new_value != NULL)
lustre_posix_acl_xattr_free(new_value, size);
@@ -292,6 +286,7 @@ int ll_getxattr_common(struct inode *inode, const char *name,
void *xdata;
struct obd_capa *oc;
struct rmtacl_ctl_entry *rce = NULL;
+ struct ll_inode_info *lli = ll_i2info(inode);
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
inode->i_ino, inode->i_generation, inode);
@@ -339,7 +334,7 @@ int ll_getxattr_common(struct inode *inode, const char *name,
*/
if (xattr_type == XATTR_ACL_ACCESS_T &&
!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
- struct ll_inode_info *lli = ll_i2info(inode);
+
struct posix_acl *acl;
spin_lock(&lli->lli_lock);
@@ -358,13 +353,27 @@ int ll_getxattr_common(struct inode *inode, const char *name,
#endif
do_getxattr:
- if (sbi->ll_xattr_cache_enabled && (rce == NULL ||
- rce->rce_ops == RMT_LGETFACL ||
- rce->rce_ops == RMT_LSETFACL)) {
+ if (sbi->ll_xattr_cache_enabled && xattr_type != XATTR_ACL_ACCESS_T) {
rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
+ if (rc == -EAGAIN)
+ goto getxattr_nocache;
if (rc < 0)
GOTO(out_xattr, rc);
+
+ /* Add "system.posix_acl_access" to the list */
+ if (lli->lli_posix_acl != NULL && valid & OBD_MD_FLXATTRLS) {
+ if (size == 0) {
+ rc += sizeof(XATTR_NAME_ACL_ACCESS);
+ } else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) {
+ memcpy(buffer + rc, XATTR_NAME_ACL_ACCESS,
+ sizeof(XATTR_NAME_ACL_ACCESS));
+ rc += sizeof(XATTR_NAME_ACL_ACCESS);
+ } else {
+ GOTO(out_xattr, rc = -ERANGE);
+ }
+ }
} else {
+getxattr_nocache:
oc = ll_mdscapa_get(inode);
rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 3e3be1f13502..4defa2fd83b3 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -98,13 +98,13 @@ static int ll_xattr_cache_find(struct list_head *cache,
}
/**
- * This adds or updates an xattr.
+ * This adds an xattr.
*
* Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
- * if the attribute already exists, then update its value.
*
* \retval 0 success
* \retval -ENOMEM if no memory could be allocated for the cached attr
+ * \retval -EPROTO if duplicate xattr is being added
*/
static int ll_xattr_cache_add(struct list_head *cache,
const char *xattr_name,
@@ -116,27 +116,8 @@ static int ll_xattr_cache_add(struct list_head *cache,
if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
- /* Found a cached EA, update it */
-
- if (xattr_val_len != xattr->xe_vallen) {
- char *val;
- OBD_ALLOC(val, xattr_val_len);
- if (val == NULL) {
- CDEBUG(D_CACHE,
- "failed to allocate %u bytes for xattr %s update\n",
- xattr_val_len, xattr_name);
- return -ENOMEM;
- }
- OBD_FREE(xattr->xe_value, xattr->xe_vallen);
- xattr->xe_value = val;
- xattr->xe_vallen = xattr_val_len;
- }
- memcpy(xattr->xe_value, xattr_val, xattr_val_len);
-
- CDEBUG(D_CACHE, "update: [%s]=%.*s\n", xattr_name,
- xattr_val_len, xattr_val);
-
- return 0;
+ CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
+ return -EPROTO;
}
OBD_SLAB_ALLOC_PTR_GFP(xattr, xattr_kmem, __GFP_IO);
@@ -261,7 +242,7 @@ int ll_xattr_cache_valid(struct ll_inode_info *lli)
*
* Free all xattr memory. @lli is the inode info pointer.
*
- * \retval 0 no error occured
+ * \retval 0 no error occurred
*/
static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
{
@@ -292,14 +273,14 @@ int ll_xattr_cache_destroy(struct inode *inode)
}
/**
- * Match or enqueue a PR or PW LDLM lock.
+ * Match or enqueue a PR lock.
*
* Find or request an LDLM lock with xattr data.
* Since LDLM does not provide API for atomic match_or_enqueue,
* the function handles it with a separate enq lock.
* If successful, the function exits with the list lock held.
*
- * \retval 0 no error occured
+ * \retval 0 no error occurred
* \retval -ENOMEM not enough memory
*/
static int ll_xattr_find_get_lock(struct inode *inode,
@@ -322,9 +303,7 @@ static int ll_xattr_find_get_lock(struct inode *inode,
mutex_lock(&lli->lli_xattrs_enq_lock);
/* Try matching first. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
- oit->it_op == IT_SETXATTR ? LCK_PW :
- (LCK_PR | LCK_PW));
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0, LCK_PR);
if (mode != 0) {
/* fake oit in mdc_revalidate_lock() manner */
oit->d.lustre.it_lock_handle = lockh.cookie;
@@ -340,13 +319,7 @@ static int ll_xattr_find_get_lock(struct inode *inode,
return PTR_ERR(op_data);
}
- op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS |
- OBD_MD_FLXATTRLOCKED;
-#ifdef CONFIG_FS_POSIX_ACL
- /* If working with ACLs, we would like to cache local ACLs */
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- op_data->op_valid |= OBD_MD_FLRMTLGETFACL;
-#endif
+ op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
rc = md_enqueue(exp, &einfo, oit, op_data, &lockh, NULL, 0, NULL, 0);
ll_finish_md_op_data(op_data);
@@ -374,7 +347,7 @@ out:
* a read or a write xattr lock depending on operation in @oit.
* Intent is dropped on exit unless the operation is setxattr.
*
- * \retval 0 no error occured
+ * \retval 0 no error occurred
* \retval -EPROTO network protocol error
* \retval -ENOMEM not enough memory for the cache
*/
@@ -409,7 +382,11 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
if (oit->d.lustre.it_status < 0) {
CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
oit->d.lustre.it_status, PFID(ll_inode2fid(inode)));
- GOTO(out_destroy, rc = oit->d.lustre.it_status);
+ rc = oit->d.lustre.it_status;
+ /* xattr data is so large that we don't want to cache it */
+ if (rc == -ERANGE)
+ rc = -EAGAIN;
+ GOTO(out_destroy, rc);
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
@@ -447,6 +424,11 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
rc = -EPROTO;
} else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
rc = -ENOMEM;
+ } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
+ /* Filter out ACL ACCESS since it's cached separately */
+ CDEBUG(D_CACHE, "not caching %s\n",
+ XATTR_NAME_ACL_ACCESS);
+ rc = 0;
} else {
rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
*xsizes);
@@ -467,8 +449,7 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
GOTO(out_maybe_drop, rc);
out_maybe_drop:
- /* drop lock on error or getxattr */
- if (rc != 0 || oit->it_op != IT_SETXATTR)
+
ll_intent_drop_lock(oit);
if (rc != 0)
@@ -496,7 +477,7 @@ out_destroy:
* The resulting value/list is stored in @buffer if the former
* is not larger than @size.
*
- * \retval 0 no error occured
+ * \retval 0 no error occurred
* \retval -EPROTO network protocol error
* \retval -ENOMEM not enough memory for the cache
* \retval -ERANGE the buffer is not large enough
@@ -553,65 +534,3 @@ out:
return rc;
}
-
-
-/**
- * Set/update an xattr value or remove xattr using the write-through cache.
- *
- * Set/update the xattr value (if @valid has OBD_MD_FLXATTR) of @name to @newval
- * or
- * remove the xattr @name (@valid has OBD_MD_FLXATTRRM set) from @inode.
- * @flags is either XATTR_CREATE or XATTR_REPLACE as defined by setxattr(2)
- *
- * \retval 0 no error occured
- * \retval -EPROTO network protocol error
- * \retval -ENOMEM not enough memory for the cache
- * \retval -ERANGE the buffer is not large enough
- * \retval -ENODATA no such attr (in the removal case)
- */
-int ll_xattr_cache_update(struct inode *inode,
- const char *name,
- const char *newval,
- size_t size,
- __u64 valid,
- int flags)
-{
- struct lookup_intent oit = { .it_op = IT_SETXATTR };
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_capa *oc;
- int rc;
-
-
-
- LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRRM));
-
- rc = ll_xattr_cache_refill(inode, &oit);
- if (rc)
- return rc;
-
- oc = ll_mdscapa_get(inode);
- rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
- valid | OBD_MD_FLXATTRLOCKED, name, newval,
- size, 0, flags, ll_i2suppgid(inode), &req);
- capa_put(oc);
-
- if (rc) {
- ll_intent_drop_lock(&oit);
- GOTO(out, rc);
- }
-
- if (valid & OBD_MD_FLXATTR)
- rc = ll_xattr_cache_add(&lli->lli_xattrs, name, newval, size);
- else if (valid & OBD_MD_FLXATTRRM)
- rc = ll_xattr_cache_del(&lli->lli_xattrs, name);
-
- ll_intent_drop_lock(&oit);
- GOTO(out, rc);
-out:
- up_write(&lli->lli_xattrs_list_rwsem);
- ptlrpc_req_finished(req);
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 56dedceaf0a0..9ba5a0a57390 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -119,7 +119,6 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
CDEBUG(D_INODE, "REMOTE_INTENT with fid="DFID" -> mds #%d\n",
PFID(&body->fid1), tgt->ltd_idx);
- it->d.lustre.it_disposition &= ~DISP_ENQ_COMPLETE;
rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it,
flags, &req, cb_blocking, extra_lock_flags);
if (rc)
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 1bddd8f62fbf..3ba0a0a1d945 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -1744,7 +1744,6 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
it->d.lustre.it_data = NULL;
fid1 = body->fid1;
- it->d.lustre.it_disposition &= ~DISP_ENQ_COMPLETE;
ptlrpc_req_finished(req);
tgt = lmv_find_target(lmv, &fid1);
@@ -2593,7 +2592,7 @@ int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
int lmv_set_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och,
- struct ptlrpc_request *open_req)
+ struct lookup_intent *it)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2603,7 +2602,7 @@ int lmv_set_open_replay_data(struct obd_export *exp,
if (IS_ERR(tgt))
return PTR_ERR(tgt);
- return md_set_open_replay_data(tgt->ltd_exp, och, open_req);
+ return md_set_open_replay_data(tgt->ltd_exp, och, it);
}
int lmv_clear_open_replay_data(struct obd_export *exp,
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index b355d01410e4..5d5c3081c467 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -87,8 +87,9 @@ static int lmv_placement_seq_show(struct seq_file *m, void *v)
#define MAX_POLICY_STRING_SIZE 64
-static ssize_t lmv_placement_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t lmv_placement_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
char dummy[MAX_POLICY_STRING_SIZE + 1];
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index e6c60151dc65..6f356e025543 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -98,7 +98,7 @@ struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size)
OBD_ALLOC_LARGE(lsm, *size);
if (!lsm)
- return NULL;;
+ return NULL;
for (i = 0; i < stripe_count; i++) {
OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, __GFP_IO);
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 5a6ab70ed0a1..65133ea308b6 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -194,6 +194,7 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
sub_io->ci_lockreq = io->ci_lockreq;
sub_io->ci_type = io->ci_type;
sub_io->ci_no_srvlock = io->ci_no_srvlock;
+ sub_io->ci_noatime = io->ci_noatime;
lov_sub_enter(sub);
result = cl_io_sub_init(sub->sub_env, sub_io,
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 50a77c5ef69a..02509d0cb106 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -339,7 +339,7 @@ static int lov_disconnect(struct obd_export *exp)
for (i = 0; i < lov->desc.ld_tgt_count; i++) {
if (lov->lov_tgts[i] && lov->lov_tgts[i]->ltd_exp) {
/* Disconnection is the last we know about an obd */
- lov_del_target(obd, i, 0, lov->lov_tgts[i]->ltd_gen);
+ lov_del_target(obd, i, NULL, lov->lov_tgts[i]->ltd_gen);
}
}
obd_putref(obd);
@@ -644,7 +644,7 @@ out:
if (rc) {
CERROR("add failed (%d), deleting %s\n", rc,
obd_uuid2str(&tgt->ltd_uuid));
- lov_del_target(obd, index, 0, 0);
+ lov_del_target(obd, index, NULL, 0);
}
obd_putref(obd);
return rc;
@@ -768,7 +768,7 @@ void lov_fix_desc(struct lov_desc *desc)
int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
- struct lprocfs_static_vars lvars = { 0 };
+ struct lprocfs_static_vars lvars = { NULL };
struct lov_desc *desc;
struct lov_obd *lov = &obd->u.lov;
int rc;
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index df8b5b5b7cf4..d6b2cb45b938 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -122,8 +122,8 @@ static struct cl_object *lov_sub_find(const struct lu_env *env,
}
static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
- struct cl_object *stripe,
- struct lov_layout_raid0 *r0, int idx)
+ struct cl_object *stripe, struct lov_layout_raid0 *r0,
+ int idx)
{
struct cl_object_header *hdr;
struct cl_object_header *subhdr;
@@ -144,7 +144,6 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
hdr = cl_object_header(lov2cl(lov));
subhdr = cl_object_header(stripe);
- parent = subhdr->coh_parent;
oinfo = lov->lo_lsm->lsm_oinfo[idx];
CDEBUG(D_INODE, DFID"@%p[%d] -> "DFID"@%p: ostid: "DOSTID
@@ -153,8 +152,12 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
PFID(&hdr->coh_lu.loh_fid), hdr, POSTID(&oinfo->loi_oi),
oinfo->loi_ost_idx, oinfo->loi_ost_gen);
+ /* reuse ->coh_attr_guard to protect coh_parent change */
+ spin_lock(&subhdr->coh_attr_guard);
+ parent = subhdr->coh_parent;
if (parent == NULL) {
subhdr->coh_parent = hdr;
+ spin_unlock(&subhdr->coh_attr_guard);
subhdr->coh_nesting = hdr->coh_nesting + 1;
lu_object_ref_add(&stripe->co_lu, "lov-parent", lov);
r0->lo_sub[idx] = cl2lovsub(stripe);
@@ -166,6 +169,7 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
struct lov_object *old_lov;
unsigned int mask = D_INODE;
+ spin_unlock(&subhdr->coh_attr_guard);
old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
LASSERT(old_obj != NULL);
old_lov = cl2lov(lu2cl(old_obj));
@@ -306,7 +310,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
* ->lo_sub[] slot in lovsub_object_fini() */
if (r0->lo_sub[idx] == los) {
waiter = &lov_env_info(env)->lti_waiter;
- init_waitqueue_entry_current(waiter);
+ init_waitqueue_entry(waiter, current);
add_wait_queue(&bkt->lsb_marche_funebre, waiter);
set_current_state(TASK_UNINTERRUPTIBLE);
while (1) {
@@ -316,7 +320,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
spin_lock(&r0->lo_sub_lock);
if (r0->lo_sub[idx] == los) {
spin_unlock(&r0->lo_sub_lock);
- waitq_wait(waiter, TASK_UNINTERRUPTIBLE);
+ schedule();
} else {
spin_unlock(&r0->lo_sub_lock);
set_current_state(TASK_RUNNING);
@@ -508,7 +512,7 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
return result;
}
-const static struct lov_layout_operations lov_dispatch[] = {
+static const struct lov_layout_operations lov_dispatch[] = {
[LLT_EMPTY] = {
.llo_init = lov_init_empty,
.llo_delete = lov_delete_empty,
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 27ed27e6fa6a..74200cf1b331 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -339,7 +339,8 @@ int lov_free_memmd(struct lov_stripe_md **lsmp)
*lsmp = NULL;
LASSERT(atomic_read(&lsm->lsm_refc) > 0);
- if ((refc = atomic_dec_return(&lsm->lsm_refc)) == 0) {
+ refc = atomic_dec_return(&lsm->lsm_refc);
+ if (refc == 0) {
LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
lsm_op_find(lsm->lsm_magic)->lsm_free(lsm);
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index ca81cac9041c..a5481d7eb5d6 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -50,7 +50,7 @@ static void lov_init_set(struct lov_request_set *set)
atomic_set(&set->set_completes, 0);
atomic_set(&set->set_success, 0);
atomic_set(&set->set_finish_checked, 0);
- set->set_cookies = 0;
+ set->set_cookies = NULL;
INIT_LIST_HEAD(&set->set_list);
atomic_set(&set->set_refcount, 1);
init_waitqueue_head(&set->set_waitq);
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index 998ea1cbc7bb..926c35a25ceb 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -131,6 +131,10 @@ static struct lu_device *lovsub_device_free(const struct lu_env *env,
struct lovsub_device *lsd = lu2lovsub_dev(d);
struct lu_device *next = cl2lu_dev(lsd->acid_next);
+ if (atomic_read(&d->ld_ref) && d->ld_site) {
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
+ lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
+ }
cl_device_fini(lu2cl_dev(d));
OBD_FREE_PTR(lsd);
return next;
diff --git a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
index 428ffd8c37b7..374a9b78e1d2 100644
--- a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
+++ b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
@@ -55,7 +55,7 @@
struct lprocfs_stats *obd_memory = NULL;
EXPORT_SYMBOL(obd_memory);
-/* refine later and change to seqlock or simlar from libcfs */
+/* refine later and change to seqlock or similar from libcfs */
/* Debugging check only needed during development */
#ifdef OBD_CTXT_DEBUG
@@ -223,7 +223,7 @@ int lustre_rename(struct dentry *dir, struct vfsmount *mnt,
GOTO(put_old, err = PTR_ERR(dchild_new));
err = ll_vfs_rename(dir->d_inode, dchild_old, mnt,
- dir->d_inode, dchild_new, mnt, NULL);
+ dir->d_inode, dchild_new, mnt);
dput(dchild_new);
put_old:
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index 506982996c0e..c78bf003c2c5 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -101,7 +101,7 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
struct lustre_handle *lockh, void *lmm, int lmmsize,
struct ptlrpc_request **req, __u64 extra_lock_flags);
-int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
+int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
struct list_head *cancels, ldlm_mode_t mode,
__u64 bits);
/* mdc/mdc_request.c */
@@ -122,7 +122,7 @@ int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md);
int mdc_set_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och,
- struct ptlrpc_request *open_req);
+ struct lookup_intent *it);
int mdc_clear_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index 91f6876dac3f..5b9f37141512 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -197,7 +197,7 @@ static __u64 mds_pack_open_flags(__u64 flags, __u32 mode)
if (flags & FMODE_EXEC)
cr_flags |= MDS_FMODE_EXEC;
#endif
- if (flags & O_LOV_DELAY_CREATE)
+ if (cl_is_lov_delay_create(flags))
cr_flags |= MDS_OPEN_DELAY_CREATE;
if (flags & O_NONBLOCK)
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index 8aa7c80c2002..53022ec390f0 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -37,15 +37,15 @@
#define DEBUG_SUBSYSTEM S_MDC
# include <linux/module.h>
-# include <linux/pagemap.h>
-# include <linux/miscdevice.h>
-#include <lustre_acl.h>
+#include <linux/lustre_intent.h>
+#include <obd.h>
#include <obd_class.h>
#include <lustre_dlm.h>
-/* fid_res_name_eq() */
-#include <lustre_fid.h>
-#include <lprocfs_status.h>
+#include <lustre_fid.h> /* fid_res_name_eq() */
+#include <lustre_mdc.h>
+#include <lustre_net.h>
+#include <lustre_req_layout.h>
#include "mdc_internal.h"
struct mdc_getattr_args {
@@ -121,7 +121,7 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
struct ldlm_lock *lock;
struct inode *new_inode = data;
- if(bits)
+ if (bits)
*bits = 0;
if (!*lockh)
@@ -160,6 +160,8 @@ ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
ldlm_mode_t rc;
fid_build_reg_res_name(fid, &res_id);
+ /* LU-4405: Clear bits not supported by server */
+ policy->l_inodebits.bits &= exp_connect_ibits(exp);
rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags,
&res_id, type, policy, mode, lockh, 0);
return rc;
@@ -194,7 +196,7 @@ int mdc_null_inode(struct obd_export *exp,
fid_build_reg_res_name(fid, &res_id);
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
- if(res == NULL)
+ if (res == NULL)
return 0;
lock_res(res);
@@ -334,9 +336,9 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
max(lmmsize, obddev->u.cli.cl_default_mds_easize));
rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
- if (rc) {
+ if (rc < 0) {
ptlrpc_request_free(req);
- return NULL;
+ return ERR_PTR(rc);
}
spin_lock(&req->rq_lock);
@@ -378,13 +380,6 @@ mdc_intent_getxattr_pack(struct obd_export *exp,
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
- if (it->it_op == IT_SETXATTR)
- /* If we want to upgrade to LCK_PW, let's cancel LCK_PR
- * locks now. This avoids unnecessary ASTs. */
- count = mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, LCK_PW,
- MDS_INODELOCK_XATTR);
-
rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
@@ -646,7 +641,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
* happens immediately after swabbing below, new reply
* is swabbed by that handler correctly.
*/
- mdc_set_open_replay_data(NULL, NULL, req);
+ mdc_set_open_replay_data(NULL, NULL, it);
}
if ((body->valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) {
@@ -758,6 +753,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
/* install lvb_data */
lock_res_and_lock(lock);
if (lock->l_lvb_data == NULL) {
+ lock->l_lvb_type = LVB_T_LAYOUT;
lock->l_lvb_data = lmm;
lock->l_lvb_len = lvb_len;
lmm = NULL;
@@ -842,7 +838,7 @@ resend:
return -EOPNOTSUPP;
req = mdc_intent_layout_pack(exp, it, op_data);
lvb_type = LVB_T_LAYOUT;
- } else if (it->it_op & (IT_GETXATTR | IT_SETXATTR)) {
+ } else if (it->it_op & IT_GETXATTR) {
req = mdc_intent_getxattr_pack(exp, it, op_data);
} else {
LBUG();
@@ -880,7 +876,7 @@ resend:
rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, policy, &flags, NULL,
0, lvb_type, lockh, 0);
if (!it) {
- /* For flock requests we immediatelly return without further
+ /* For flock requests we immediately return without further
delay and let caller deal with the rest, since rest of
this function metadata processing makes no sense for flock
requests anyway. But in case of problem during comms with
@@ -973,7 +969,6 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
if (fid_is_sane(&op_data->op_fid2) &&
it->it_create_mode & M_CHECK_STALE &&
it->it_op != IT_GETATTR) {
- it_set_disposition(it, DISP_ENQ_COMPLETE);
/* Also: did we find the same inode? */
/* sever can return one of two fids:
@@ -1068,7 +1063,23 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
fid_build_reg_res_name(fid, &res_id);
switch (it->it_op) {
case IT_GETATTR:
- policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
+ /* File attributes are held under multiple bits:
+ * nlink is under lookup lock, size and times are
+ * under UPDATE lock and recently we've also got
+ * a separate permissions lock for owner/group/acl that
+ * were protected by lookup lock before.
+ * Getattr must provide all of that information,
+ * so we need to ensure we have all of those locks.
+ * Unfortunately, if the bits are split across multiple
+ * locks, there's no easy way to match all of them here,
+ * so an extra RPC would be performed to fetch all
+ * of those bits at once for now. */
+ /* For new MDTs(> 2.4), UPDATE|PERM should be enough,
+ * but for old MDTs (< 2.4), permission is covered
+ * by LOOKUP lock, so it needs to match all bits here.*/
+ policy.l_inodebits.bits = MDS_INODELOCK_UPDATE |
+ MDS_INODELOCK_LOOKUP |
+ MDS_INODELOCK_PERM;
break;
case IT_LAYOUT:
policy.l_inodebits.bits = MDS_INODELOCK_LAYOUT;
@@ -1077,10 +1088,11 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
policy.l_inodebits.bits = MDS_INODELOCK_LOOKUP;
break;
}
- mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
- LDLM_FL_BLOCK_GRANTED, &res_id,
+
+ mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED, fid,
LDLM_IBITS, &policy,
- LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh, 0);
+ LCK_CR | LCK_CW | LCK_PR | LCK_PW,
+ &lockh);
}
if (mode) {
@@ -1127,6 +1139,12 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
ldlm_blocking_callback cb_blocking,
__u64 extra_lock_flags)
{
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = it_to_lock_mode(it),
+ .ei_cb_bl = cb_blocking,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
struct lustre_handle lockh;
int rc = 0;
@@ -1152,42 +1170,19 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
- /* lookup_it may be called only after revalidate_it has run, because
- * revalidate_it cannot return errors, only zero. Returning zero causes
- * this call to lookup, which *can* return an error.
- *
- * We only want to execute the request associated with the intent one
- * time, however, so don't send the request again. Instead, skip past
- * this and use the request from revalidate. In this case, revalidate
- * never dropped its reference, so the refcounts are all OK */
- if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_IBITS,
- .ei_mode = it_to_lock_mode(it),
- .ei_cb_bl = cb_blocking,
- .ei_cb_cp = ldlm_completion_ast,
- };
-
- /* For case if upper layer did not alloc fid, do it now. */
- if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
- rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
- if (rc < 0) {
- CERROR("Can't alloc new fid, rc %d\n", rc);
- return rc;
- }
- }
- rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh,
- lmm, lmmsize, NULL, extra_lock_flags);
- if (rc < 0)
+ /* For case if upper layer did not alloc fid, do it now. */
+ if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
+ rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
+ if (rc < 0) {
+ CERROR("Can't alloc new fid, rc %d\n", rc);
return rc;
- } else if (!fid_is_sane(&op_data->op_fid2) ||
- !(it->it_create_mode & M_CHECK_STALE)) {
- /* DISP_ENQ_COMPLETE set means there is extra reference on
- * request referenced from this intent, saved for subsequent
- * lookup. This path is executed when we proceed to this
- * lookup, so we clear DISP_ENQ_COMPLETE */
- it_clear_disposition(it, DISP_ENQ_COMPLETE);
+ }
}
+ rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh, lmm, lmmsize, NULL,
+ extra_lock_flags);
+ if (rc < 0)
+ return rc;
+
*reqp = it->d.lustre.it_data;
rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh);
return rc;
@@ -1269,8 +1264,8 @@ int mdc_intent_getattr_async(struct obd_export *exp,
fid_build_reg_res_name(&op_data->op_fid1, &res_id);
req = mdc_intent_getattr_pack(exp, it, op_data);
- if (!req)
- return -ENOMEM;
+ if (IS_ERR(req))
+ return PTR_ERR(req);
rc = mdc_enter_request(&obddev->u.cli);
if (rc != 0) {
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index 9f3a345f34e0..d79aa1641fef 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -66,7 +66,7 @@ static int mdc_reint(struct ptlrpc_request *request,
/* Find and cancel locally locks matched by inode @bits & @mode in the resource
* found by @fid. Found locks are added into @cancel list. Returns the amount of
* locks added to @cancels list. */
-int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
+int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
struct list_head *cancels, ldlm_mode_t mode,
__u64 bits)
{
@@ -165,6 +165,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
req->rq_cb_data = *mod;
(*mod)->mod_open_req = req;
req->rq_commit_cb = mdc_commit_open;
+ (*mod)->mod_is_create = true;
/**
* Take an extra reference on \var mod, it protects \var
* mod from being freed on eviction (commit callback is
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 83013927e131..bde9f93c149b 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -355,10 +355,32 @@ static int mdc_xattr_common(struct obd_export *exp,const struct req_format *fmt,
input_size);
}
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
+ /* Flush local XATTR locks to get rid of a possible cancel RPC */
+ if (opcode == MDS_REINT && fid_is_sane(fid) &&
+ exp->exp_connect_data.ocd_ibits_known & MDS_INODELOCK_XATTR) {
+ LIST_HEAD(cancels);
+ int count;
+
+ /* Without that packing would fail */
+ if (input_size == 0)
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
+ RCL_CLIENT, 0);
+
+ count = mdc_resource_get_unused(exp, fid,
+ &cancels, LCK_EX,
+ MDS_INODELOCK_XATTR);
+
+ rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+ } else {
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
}
if (opcode == MDS_REINT) {
@@ -700,11 +722,12 @@ void mdc_commit_open(struct ptlrpc_request *req)
int mdc_set_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och,
- struct ptlrpc_request *open_req)
+ struct lookup_intent *it)
{
struct md_open_data *mod;
struct mdt_rec_create *rec;
struct mdt_body *body;
+ struct ptlrpc_request *open_req = it->d.lustre.it_data;
struct obd_import *imp = open_req->rq_import;
if (!open_req->rq_replay)
@@ -738,6 +761,8 @@ int mdc_set_open_replay_data(struct obd_export *exp,
spin_lock(&open_req->rq_lock);
och->och_mod = mod;
mod->mod_och = och;
+ mod->mod_is_create = it_disposition(it, DISP_OPEN_CREATE) ||
+ it_disposition(it, DISP_OPEN_STRIPE);
mod->mod_open_req = open_req;
open_req->rq_cb_data = mod;
open_req->rq_commit_cb = mdc_commit_open;
@@ -758,6 +783,23 @@ int mdc_set_open_replay_data(struct obd_export *exp,
return 0;
}
+static void mdc_free_open(struct md_open_data *mod)
+{
+ int committed = 0;
+
+ if (mod->mod_is_create == 0 &&
+ imp_connect_disp_stripe(mod->mod_open_req->rq_import))
+ committed = 1;
+
+ LASSERT(mod->mod_open_req->rq_replay == 0);
+
+ DEBUG_REQ(D_RPCTRACE, mod->mod_open_req, "free open request\n");
+
+ ptlrpc_request_committed(mod->mod_open_req, committed);
+ if (mod->mod_close_req)
+ ptlrpc_request_committed(mod->mod_close_req, committed);
+}
+
int mdc_clear_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och)
{
@@ -771,6 +813,8 @@ int mdc_clear_open_replay_data(struct obd_export *exp,
return 0;
LASSERT(mod != LP_POISON);
+ LASSERT(mod->mod_open_req != NULL);
+ mdc_free_open(mod);
mod->mod_och = NULL;
och->och_mod = NULL;
@@ -969,6 +1013,9 @@ int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
if (mod) {
if (rc != 0)
mod->mod_close_req = NULL;
+ LASSERT(mod->mod_open_req != NULL);
+ mdc_free_open(mod);
+
/* Since now, mod is accessed through setattr req only,
* thus DW req does not keep a reference on mod anymore. */
obd_mod_put(mod);
@@ -1527,8 +1574,8 @@ static int mdc_changelog_send_thread(void *csdata)
rc = llog_cat_process(NULL, llh, changelog_kkuc_cb, cs, 0, 0);
/* Send EOF no matter what our result */
- if ((kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch),
- cs->cs_flags))) {
+ kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags);
+ if (kuch) {
kuch->kuc_msgtype = CL_EOF;
libcfs_kkuc_msg_put(cs->cs_fp, kuch);
}
@@ -1650,11 +1697,16 @@ static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
if (rc)
CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
- if (req->rq_repmsg &&
- (oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL))) {
- *oqctl = *oqc;
+ if (req->rq_repmsg) {
+ oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
+ if (oqc) {
+ *oqctl = *oqc;
+ } else if (!rc) {
+ CERROR ("Can't unpack obd_quotactl\n");
+ rc = -EPROTO;
+ }
} else if (!rc) {
- CERROR ("Can't unpack obd_quotactl\n");
+ CERROR("Can't unpack obd_quotactl\n");
rc = -EPROTO;
}
ptlrpc_req_finished(req);
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 3bdbb94e020f..de9fb1433edd 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -56,7 +56,7 @@ static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
{
__u64 resname = 0;
- if (len > 8) {
+ if (len > sizeof(resname)) {
CERROR("name too long: %s\n", name);
return -EINVAL;
}
@@ -76,6 +76,7 @@ static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
resname = 0;
break;
case CONFIG_T_RECOVER:
+ case CONFIG_T_PARAMS:
resname = type;
break;
default:
@@ -101,10 +102,13 @@ int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type)
int len;
/* logname consists of "fsname-nodetype".
- * e.g. "lustre-MDT0001", "SUN-000-client" */
+ * e.g. "lustre-MDT0001", "SUN-000-client"
+ * there is an exception: llog "params" */
name_end = strrchr(logname, '-');
- LASSERT(name_end);
- len = name_end - logname;
+ if (!name_end)
+ len = strlen(logname);
+ else
+ len = name_end - logname;
return mgc_name2resid(logname, len, res_id, type);
}
@@ -140,6 +144,8 @@ static void config_log_put(struct config_llog_data *cld)
config_log_put(cld->cld_recover);
if (cld->cld_sptlrpc)
config_log_put(cld->cld_sptlrpc);
+ if (cld->cld_params)
+ config_log_put(cld->cld_params);
if (cld_is_sptlrpc(cld))
sptlrpc_conf_log_stop(cld->cld_logname);
@@ -271,6 +277,19 @@ static struct config_llog_data *config_recover_log_add(struct obd_device *obd,
return cld;
}
+static struct config_llog_data *config_params_log_add(struct obd_device *obd,
+ struct config_llog_instance *cfg, struct super_block *sb)
+{
+ struct config_llog_instance lcfg = *cfg;
+ struct config_llog_data *cld;
+
+ lcfg.cfg_instance = sb;
+
+ cld = do_config_log_add(obd, PARAMS_FILENAME, CONFIG_T_PARAMS,
+ &lcfg, sb);
+
+ return cld;
+}
/** Add this log to the list of active logs watched by an MGC.
* Active means we're watching for updates.
@@ -284,8 +303,10 @@ static int config_log_add(struct obd_device *obd, char *logname,
struct lustre_sb_info *lsi = s2lsi(sb);
struct config_llog_data *cld;
struct config_llog_data *sptlrpc_cld;
- char seclogname[32];
- char *ptr;
+ struct config_llog_data *params_cld;
+ char seclogname[32];
+ char *ptr;
+ int rc;
CDEBUG(D_MGC, "adding config log %s:%p\n", logname, cfg->cfg_instance);
@@ -308,32 +329,49 @@ static int config_log_add(struct obd_device *obd, char *logname,
CONFIG_T_SPTLRPC, NULL, NULL);
if (IS_ERR(sptlrpc_cld)) {
CERROR("can't create sptlrpc log: %s\n", seclogname);
- return PTR_ERR(sptlrpc_cld);
+ GOTO(out_err, rc = PTR_ERR(sptlrpc_cld));
}
}
+ params_cld = config_params_log_add(obd, cfg, sb);
+ if (IS_ERR(params_cld)) {
+ rc = PTR_ERR(params_cld);
+ CERROR("%s: can't create params log: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out_err1, rc);
+ }
cld = do_config_log_add(obd, logname, CONFIG_T_CONFIG, cfg, sb);
if (IS_ERR(cld)) {
CERROR("can't create log: %s\n", logname);
- config_log_put(sptlrpc_cld);
- return PTR_ERR(cld);
+ GOTO(out_err2, rc = PTR_ERR(cld));
}
cld->cld_sptlrpc = sptlrpc_cld;
+ cld->cld_params = params_cld;
LASSERT(lsi->lsi_lmd);
if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) {
struct config_llog_data *recover_cld;
*strrchr(seclogname, '-') = 0;
recover_cld = config_recover_log_add(obd, seclogname, cfg, sb);
- if (IS_ERR(recover_cld)) {
- config_log_put(cld);
- return PTR_ERR(recover_cld);
- }
+ if (IS_ERR(recover_cld))
+ GOTO(out_err3, rc = PTR_ERR(recover_cld));
cld->cld_recover = recover_cld;
}
return 0;
+
+out_err3:
+ config_log_put(cld);
+
+out_err2:
+ config_log_put(params_cld);
+
+out_err1:
+ config_log_put(sptlrpc_cld);
+
+out_err:
+ return rc;
}
DEFINE_MUTEX(llog_process_lock);
@@ -344,6 +382,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
{
struct config_llog_data *cld;
struct config_llog_data *cld_sptlrpc = NULL;
+ struct config_llog_data *cld_params = NULL;
struct config_llog_data *cld_recover = NULL;
int rc = 0;
@@ -382,11 +421,20 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
spin_lock(&config_list_lock);
cld_sptlrpc = cld->cld_sptlrpc;
cld->cld_sptlrpc = NULL;
+ cld_params = cld->cld_params;
+ cld->cld_params = NULL;
spin_unlock(&config_list_lock);
if (cld_sptlrpc)
config_log_put(cld_sptlrpc);
+ if (cld_params) {
+ mutex_lock(&cld_params->cld_lock);
+ cld_params->cld_stopping = 1;
+ mutex_unlock(&cld_params->cld_lock);
+ config_log_put(cld_params);
+ }
+
/* drop the ref from the find */
config_log_put(cld);
/* drop the start ref */
@@ -1584,7 +1632,7 @@ static int mgc_llog_local_copy(const struct lu_env *env,
/*
* - copy it to backup using llog_backup()
* - copy remote llog to logname using llog_backup()
- * - if failed then move bakup to logname again
+ * - if failed then move backup to logname again
*/
OBD_ALLOC(temp_log, strlen(logname) + 1);
@@ -1664,7 +1712,7 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
LCONSOLE_ERROR_MSG(0x13a,
"Failed to get MGS log %s and no local copy.\n",
cld->cld_logname);
- GOTO(out_pop, rc = -ENOTCONN);
+ GOTO(out_pop, rc = -ENOENT);
}
CDEBUG(D_MGC,
"Failed to get MGS log %s, using local copy for now, will try to update later.\n",
@@ -1863,6 +1911,20 @@ static int mgc_process_config(struct obd_device *obd, obd_count len, void *buf)
if (rc)
CERROR("Cannot process recover llog %d\n", rc);
}
+
+ if (rc == 0 && cld->cld_params != NULL) {
+ rc = mgc_process_log(obd, cld->cld_params);
+ if (rc == -ENOENT) {
+ CDEBUG(D_MGC,
+ "There is no params config file yet\n");
+ rc = 0;
+ }
+ /* params log is optional */
+ if (rc)
+ CERROR(
+ "%s: can't process params llog: rc = %d\n",
+ obd->obd_name, rc);
+ }
config_log_put(cld);
break;
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index e048500edd60..3bebc78e7673 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -942,7 +942,7 @@ int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
struct cl_page *page;
int result = 0;
- CERROR("Canceling ongoing page trasmission\n");
+ CERROR("Canceling ongoing page transmission\n");
cl_page_list_for_each(page, queue) {
int rc;
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index 749eb082f979..d795cef3f164 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -932,7 +932,7 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
* LU-305 */
blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
- init_waitqueue_entry_current(&waiter);
+ init_waitqueue_entry(&waiter, current);
add_wait_queue(&lock->cll_wq, &waiter);
set_current_state(TASK_INTERRUPTIBLE);
cl_lock_mutex_put(env, lock);
@@ -943,7 +943,7 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
* can be restarted if signals are pending here */
result = -ERESTARTSYS;
if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
- waitq_wait(&waiter, TASK_INTERRUPTIBLE);
+ schedule();
if (!cfs_signal_pending())
result = 0;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 1a926036724b..0fc256f59e92 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -508,7 +508,7 @@ EXPORT_SYMBOL(cl_site_stats_print);
* about journal_info. Currently following fields in task_struct are identified
* can be used for this purpose:
* - cl_env: for liblustre.
- * - tux_info: ony on RedHat kernel.
+ * - tux_info: only on RedHat kernel.
* - ...
* \note As long as we use task_struct to store cl_env, we assume that once
* called into Lustre, we'll never call into the other part of the kernel
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index d9f750d42c80..f2bdea33041d 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -1010,6 +1010,8 @@ struct obd_import *class_new_import(struct obd_device *obd)
INIT_LIST_HEAD(&imp->imp_replay_list);
INIT_LIST_HEAD(&imp->imp_sending_list);
INIT_LIST_HEAD(&imp->imp_delayed_list);
+ INIT_LIST_HEAD(&imp->imp_committed_list);
+ imp->imp_replay_cursor = &imp->imp_committed_list;
spin_lock_init(&imp->imp_lock);
imp->imp_last_success_conn = 0;
imp->imp_state = LUSTRE_IMP_NEW;
@@ -1532,8 +1534,8 @@ void obd_exports_barrier(struct obd_device *obd)
spin_lock(&obd->obd_dev_lock);
while (!list_empty(&obd->obd_unlinked_exports)) {
spin_unlock(&obd->obd_dev_lock);
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
- cfs_time_seconds(waited));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(waited));
if (waited > 5 && IS_PO2(waited)) {
LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
"more than %d seconds. "
@@ -1625,7 +1627,7 @@ static int obd_zombie_impexp_check(void *arg)
}
/**
- * Add export to the obd_zombe thread and notify it.
+ * Add export to the obd_zombie thread and notify it.
*/
static void obd_zombie_export_add(struct obd_export *exp) {
spin_lock(&exp->exp_obd->obd_dev_lock);
@@ -1641,7 +1643,7 @@ static void obd_zombie_export_add(struct obd_export *exp) {
}
/**
- * Add import to the obd_zombe thread and notify it.
+ * Add import to the obd_zombie thread and notify it.
*/
static void obd_zombie_import_add(struct obd_import *imp) {
LASSERT(imp->imp_sec == NULL);
@@ -1661,7 +1663,7 @@ static void obd_zombie_import_add(struct obd_import *imp) {
static void obd_zombie_impexp_notify(void)
{
/*
- * Make sure obd_zomebie_impexp_thread get this notification.
+ * Make sure obd_zombie_impexp_thread get this notification.
* It is possible this signal only get by obd_zombie_barrier, and
* barrier gulps this notification and sleeps away and hangs ensues
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 121a856d5052..ba20776ebfa1 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -184,7 +184,7 @@ static long obd_class_ioctl(struct file *filp, unsigned int cmd,
int err = 0;
/* Allow non-root access for OBD_IOC_PING_TARGET - used by lfs check */
- if (!cfs_capable(CFS_CAP_SYS_ADMIN) && (cmd != OBD_IOC_PING_TARGET))
+ if (!capable(CFS_CAP_SYS_ADMIN) && (cmd != OBD_IOC_PING_TARGET))
return err = -EACCES;
if ((cmd & 0xffffff00) == ((int)'T') << 8) /* ignore all tty ioctls */
return err = -ENOTTY;
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index c0f3af725747..1d999310ec92 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -551,9 +551,8 @@ int llog_cat_process_cb(const struct lu_env *env, struct llog_handle *cat_llh,
if (rec->lrh_index < d->lpd_startcat)
/* Skip processing of the logs until startcat */
- return 0;
-
- if (d->lpd_startidx > 0) {
+ rc = 0;
+ else if (d->lpd_startidx > 0) {
struct llog_process_cat_data cd;
cd.lpcd_first_idx = d->lpd_startidx;
@@ -566,6 +565,7 @@ int llog_cat_process_cb(const struct lu_env *env, struct llog_handle *cat_llh,
rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
NULL, false);
}
+
llog_handle_put(llh);
return rc;
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c b/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c
index 5385d8e658cf..d86bb8c60354 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c
@@ -376,7 +376,7 @@ static void llog_skip_over(__u64 *off, int curr, int goal)
/* sets:
* - cur_offset to the furthest point read in the log file
- * - cur_idx to the log index preceeding cur_offset
+ * - cur_idx to the log index preceding cur_offset
* returns -EIO/-EINVAL on error
*/
static int llog_lvfs_next_block(const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_osd.c b/drivers/staging/lustre/lustre/obdclass/llog_osd.c
index 654c8e189653..682279de8bea 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_osd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_osd.c
@@ -514,7 +514,7 @@ static void llog_skip_over(__u64 *off, int curr, int goal)
/* sets:
* - cur_offset to the furthest point read in the log file
- * - cur_idx to the log index preceeding cur_offset
+ * - cur_idx to the log index preceding cur_offset
* returns -EIO/-EINVAL on error
*/
static int llog_osd_next_block(const struct lu_env *env,
@@ -1073,7 +1073,7 @@ static int llog_osd_setup(const struct lu_env *env, struct obd_device *obd,
LASSERT(ctxt);
/* initialize data allowing to generate new fids,
- * literally we need a sequece */
+ * literally we need a sequence */
lgi->lgi_fid.f_seq = FID_SEQ_LLOG;
lgi->lgi_fid.f_oid = 1;
lgi->lgi_fid.f_ver = 0;
@@ -1280,7 +1280,7 @@ int llog_osd_put_cat_list(const struct lu_env *env, struct dt_device *d,
lgi->lgi_buf.lb_len = size;
rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
if (rc)
- CDEBUG(D_INODE, "error writeing CATALOGS: rc = %d\n", rc);
+ CDEBUG(D_INODE, "error writing CATALOGS: rc = %d\n", rc);
out_trans:
dt_trans_stop(env, d, th);
out:
diff --git a/drivers/staging/lustre/lustre/obdclass/local_storage.c b/drivers/staging/lustre/lustre/obdclass/local_storage.c
index e79e4beb3628..e76f7d044231 100644
--- a/drivers/staging/lustre/lustre/obdclass/local_storage.c
+++ b/drivers/staging/lustre/lustre/obdclass/local_storage.c
@@ -737,7 +737,7 @@ int lastid_compat_check(const struct lu_env *env, struct dt_device *dev,
* All dynamic fids will be generated with the same sequence and incremented
* OIDs
*
- * Returned local_oid_storage is in-memory representaion of OID storage
+ * Returned local_oid_storage is in-memory representation of OID storage
*/
int local_oid_storage_init(const struct lu_env *env, struct dt_device *dev,
const struct lu_fid *first_fid,
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index ec3b605dae6b..1432dd74fe95 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -98,6 +98,8 @@ static const char * const obd_connect_names[] = {
"lightweight_conn",
"short_io",
"pingless",
+ "flock_deadlock",
+ "disp_stripe",
"unknown",
NULL
};
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 9887d8fffb6e..92e8a15a5e5d 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -571,7 +571,7 @@ static struct lu_object *htable_lookup(struct lu_site *s,
* drained), and moreover, lookup has to wait until object is freed.
*/
- init_waitqueue_entry_current(waiter);
+ init_waitqueue_entry(waiter, current);
add_wait_queue(&bkt->lsb_marche_funebre, waiter);
set_current_state(TASK_UNINTERRUPTIBLE);
lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
@@ -712,7 +712,7 @@ struct lu_object *lu_object_find_at(const struct lu_env *env,
* lu_object_find_try() already added waiter into the
* wait queue.
*/
- waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
+ schedule();
bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
}
@@ -890,10 +890,10 @@ static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
hash = fid_flatten32(fid);
hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
- hash = cfs_hash_long(hash, hs->hs_bkt_bits);
+ hash = hash_long(hash, hs->hs_bkt_bits);
/* give me another random factor */
- hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
+ hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
@@ -2100,7 +2100,7 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
EXPORT_SYMBOL(lu_object_assign_fid);
/**
- * allocates object with 0 (non-assiged) fid
+ * allocates object with 0 (non-assigned) fid
* XXX: temporary solution to be able to assign fid in ->do_create()
* till we have fully-functional OST fids
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/md_attrs.c b/drivers/staging/lustre/lustre/obdclass/md_attrs.c
index f7187829e276..f080cceb384c 100644
--- a/drivers/staging/lustre/lustre/obdclass/md_attrs.c
+++ b/drivers/staging/lustre/lustre/obdclass/md_attrs.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(lustre_lma_init);
*/
void lustre_lma_swab(struct lustre_mdt_attrs *lma)
{
- /* Use LUSTRE_MSG_MAGIC to detect local endianess. */
+ /* Use LUSTRE_MSG_MAGIC to detect local endianness. */
if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) {
__swab32s(&lma->lma_compat);
__swab32s(&lma->lma_incompat);
@@ -77,7 +77,7 @@ EXPORT_SYMBOL(lustre_lma_swab);
*/
void lustre_som_swab(struct som_attrs *attrs)
{
- /* Use LUSTRE_MSG_MAGIC to detect local endianess. */
+ /* Use LUSTRE_MSG_MAGIC to detect local endianness. */
if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) {
__swab32s(&attrs->som_compat);
__swab32s(&attrs->som_incompat);
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(lustre_buf2som);
*/
void lustre_hsm_swab(struct hsm_attrs *attrs)
{
- /* Use LUSTRE_MSG_MAGIC to detect local endianess. */
+ /* Use LUSTRE_MSG_MAGIC to detect local endianness. */
if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) {
__swab32s(&attrs->hsm_compat);
__swab32s(&attrs->hsm_flags);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 362ae541b209..2d5777699f47 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -61,7 +61,8 @@ int class_find_param(char *buf, char *key, char **valp)
if (!buf)
return 1;
- if ((ptr = strstr(buf, key)) == NULL)
+ ptr = strstr(buf, key);
+ if (ptr == NULL)
return 1;
if (valp)
@@ -592,7 +593,7 @@ int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg)
}
EXPORT_SYMBOL(class_detach);
-/** Start shutting down the obd. There may be in-progess ops when
+/** Start shutting down the obd. There may be in-progress ops when
* this is called. We tell them to start shutting down with a call
* to class_disconnect_exports().
*/
@@ -655,7 +656,7 @@ int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
/* The three references that should be remaining are the
* obd_self_export and the attach and setup references. */
if (atomic_read(&obd->obd_refcount) > 3) {
- /* refcounf - 3 might be the number of real exports
+ /* refcount - 3 might be the number of real exports
(excluding self export). But class_incref is called
by other things as well, so don't count on it. */
CDEBUG(D_IOCTL, "%s: forcing exports to disconnect: %d\n",
@@ -1027,6 +1028,46 @@ struct lustre_cfg *lustre_cfg_rename(struct lustre_cfg *cfg,
}
EXPORT_SYMBOL(lustre_cfg_rename);
+static int process_param2_config(struct lustre_cfg *lcfg)
+{
+ char *param = lustre_cfg_string(lcfg, 1);
+ char *upcall = lustre_cfg_string(lcfg, 2);
+ char *argv[] = {
+ [0] = "/usr/sbin/lctl",
+ [1] = "set_param",
+ [2] = param,
+ [3] = NULL
+ };
+ struct timeval start;
+ struct timeval end;
+ int rc;
+
+
+ /* Add upcall processing here. Now only lctl is supported */
+ if (strcmp(upcall, LCTL_UPCALL) != 0) {
+ CERROR("Unsupported upcall %s\n", upcall);
+ return -EINVAL;
+ }
+
+ do_gettimeofday(&start);
+ rc = USERMODEHELPER(argv[0], argv, NULL);
+ do_gettimeofday(&end);
+
+ if (rc < 0) {
+ CERROR(
+ "lctl: error invoking upcall %s %s %s: rc = %d; time %ldus\n",
+ argv[0], argv[1], argv[2], rc,
+ cfs_timeval_sub(&end, &start, NULL));
+ } else {
+ CDEBUG(D_HA, "lctl: invoked upcall %s %s %s, time %ldus\n",
+ argv[0], argv[1], argv[2],
+ cfs_timeval_sub(&end, &start, NULL));
+ rc = 0;
+ }
+
+ return rc;
+}
+
void lustre_register_quota_process_config(int (*qpc)(struct lustre_cfg *lcfg))
{
quota_process_config = qpc;
@@ -1142,11 +1183,14 @@ int class_process_config(struct lustre_cfg *lcfg)
err = (*quota_process_config)(lcfg);
GOTO(out, err);
}
- /* Fall through */
+
break;
}
+ case LCFG_SET_PARAM: {
+ err = process_param2_config(lcfg);
+ GOTO(out, 0);
+ }
}
-
/* Commands that require a device */
obd = class_name2obd(lustre_cfg_string(lcfg, 0));
if (obd == NULL) {
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index a69a630b596e..6f8ba54e9667 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -501,7 +501,7 @@ static int lustre_stop_mgc(struct super_block *sb)
}
/* The MGC has no recoverable data in any case.
- * force shotdown set in umount_begin */
+ * force shutdown set in umount_begin */
obd->obd_no_recov = 1;
if (obd->u.cli.cl_mgc_mgsexp) {
@@ -754,7 +754,7 @@ int server_name2index(const char *svname, __u32 *idx, const char **endptr)
}
EXPORT_SYMBOL(server_name2index);
-/*************** mount common betweeen server and client ***************/
+/*************** mount common between server and client ***************/
/* Common umount */
int lustre_common_put_super(struct super_block *sb)
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 70997648a4f3..3b1b28afd6b1 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -191,7 +191,7 @@ int obdo_cmp_md(struct obdo *dst, struct obdo *src, obd_flag compare)
}
if ( compare & OBD_MD_FLGENER )
res = (res || (dst->o_parent_oid != src->o_parent_oid));
- /* XXX Don't know if thses should be included here - wasn't previously
+ /* XXX Don't know if these should be included here - wasn't previously
if ( compare & OBD_MD_FLINLINE )
res = (res || memcmp(dst->o_inline, src->o_inline));
*/
@@ -233,7 +233,7 @@ void obdo_from_iattr(struct obdo *oa, struct iattr *attr, unsigned int ia_valid)
oa->o_mode = attr->ia_mode;
oa->o_valid |= OBD_MD_FLTYPE | OBD_MD_FLMODE;
if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
- !cfs_capable(CFS_CAP_FSETID))
+ !capable(CFS_CAP_FSETID))
oa->o_mode &= ~S_ISGID;
}
if (ia_valid & ATTR_UID) {
@@ -282,7 +282,7 @@ void iattr_from_obdo(struct iattr *attr, struct obdo *oa, obd_flag valid)
attr->ia_mode = (attr->ia_mode & S_IFMT)|(oa->o_mode & ~S_IFMT);
attr->ia_valid |= ATTR_MODE;
if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
- !cfs_capable(CFS_CAP_FSETID))
+ !capable(CFS_CAP_FSETID))
attr->ia_mode &= ~S_ISGID;
}
if (valid & OBD_MD_FLUID) {
diff --git a/drivers/staging/lustre/lustre/obdecho/echo.c b/drivers/staging/lustre/lustre/obdecho/echo.c
index debb9cec4900..96a807f82ec1 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo.c
@@ -606,7 +606,8 @@ static int echo_cleanup(struct obd_device *obd)
/* XXX Bug 3413; wait for a bit to ensure the BL callback has
* happened before calling ldlm_namespace_free() */
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE, cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
obd->obd_namespace = NULL;
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 9b2dea292363..754aa8e55665 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -997,8 +997,8 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
spin_unlock(&ec->ec_lock);
CERROR("echo_client still has objects at cleanup time, "
"wait for 1 second\n");
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
- cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
lu_site_purge(env, &ed->ed_site->cs_lu, -1);
spin_lock(&ec->ec_lock);
}
@@ -2764,7 +2764,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
switch (cmd) {
case OBD_IOC_CREATE: /* may create echo object */
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1,
@@ -2778,7 +2778,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
int dirlen;
__u64 id;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO(out, rc = -EPERM);
count = data->ioc_count;
@@ -2806,7 +2806,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
__u64 seq;
int max_count;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO(out, rc = -EPERM);
cl_env = cl_env_get(&refcheck);
@@ -2838,7 +2838,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO(out, rc);
}
case OBD_IOC_DESTROY:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
rc = echo_get_object(&eco, ed, oa);
@@ -2863,7 +2863,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO(out, rc);
case OBD_IOC_SETATTR:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
rc = echo_get_object(&eco, ed, oa);
@@ -2878,7 +2878,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO(out, rc);
case OBD_IOC_BRW_WRITE:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
rw = OBD_BRW_WRITE;
@@ -2897,7 +2897,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO(out, rc);
case ECHO_IOC_SET_STRIPE:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
if (data->ioc_pbuf1 == NULL) { /* unset */
@@ -2914,7 +2914,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO (out, rc);
case ECHO_IOC_ENQUEUE:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
rc = echo_client_enqueue(exp, oa,
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index be4511e78c04..fe9989a49f81 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1020,7 +1020,7 @@ out:
/**
* This function is used to make the extent prepared for transfer.
- * A race with flusing page - ll_writepage() has to be handled cautiously.
+ * A race with flushing page - ll_writepage() has to be handled cautiously.
*/
static int osc_extent_make_ready(const struct lu_env *env,
struct osc_extent *ext)
@@ -2146,7 +2146,7 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
oap->oap_obj_off = offset;
LASSERT(!(offset & ~CFS_PAGE_MASK));
- if (!client_is_remote(exp) && cfs_capable(CFS_CAP_SYS_RESOURCE))
+ if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE))
oap->oap_brw_flags = OBD_BRW_NOQUOTA;
INIT_LIST_HEAD(&oap->oap_pending_item);
@@ -2186,7 +2186,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
/* Set the OBD_BRW_SRVLOCK before the page is queued. */
brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
if (!client_is_remote(osc_export(osc)) &&
- cfs_capable(CFS_CAP_SYS_RESOURCE)) {
+ capable(CFS_CAP_SYS_RESOURCE)) {
brw_flags |= OBD_BRW_NOQUOTA;
cmd |= OBD_BRW_NOQUOTA;
}
@@ -2394,6 +2394,12 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
* really sending the RPC. */
case OES_TRUNC:
/* race with truncate, page will be redirtied */
+ case OES_ACTIVE:
+ /* The extent is active so we need to abort and let the caller
+ * re-dirty the page. If we continued on here, and we were the
+ * one making the extent active, we could deadlock waiting for
+ * the page writeback to clear but it won't because the extent
+ * is active and won't be written out. */
GOTO(out, rc = -EAGAIN);
default:
break;
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 681d60a7d5ef..5f3c545418d1 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -297,7 +297,7 @@ static int osc_io_commit_write(const struct lu_env *env,
*/
osc_page_touch(env, cl2osc_page(slice), to);
if (!client_is_remote(osc_export(obj)) &&
- cfs_capable(CFS_CAP_SYS_RESOURCE))
+ capable(CFS_CAP_SYS_RESOURCE))
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
if (oio->oi_lockless)
@@ -512,19 +512,15 @@ static int osc_io_read_start(const struct lu_env *env,
struct osc_io *oio = cl2osc_io(env, slice);
struct cl_object *obj = slice->cis_obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- int result = 0;
+ int rc = 0;
- if (oio->oi_lockless == 0) {
+ if (oio->oi_lockless == 0 && !slice->cis_io->ci_noatime) {
cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- if (result == 0) {
- attr->cat_atime = LTIME_S(CURRENT_TIME);
- result = cl_object_attr_set(env, obj, attr,
- CAT_ATIME);
- }
+ attr->cat_atime = LTIME_S(CURRENT_TIME);
+ rc = cl_object_attr_set(env, obj, attr, CAT_ATIME);
cl_object_attr_unlock(obj);
}
- return result;
+ return rc;
}
static int osc_io_write_start(const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 4909e486dc5c..96cb6e2b9c4e 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -561,7 +561,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
if (!client_is_remote(osc_export(obj)) &&
- cfs_capable(CFS_CAP_SYS_RESOURCE)) {
+ capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
oap->oap_cmd |= OBD_BRW_NOQUOTA;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
index 6045a78a2baa..0235fabaaffe 100644
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -51,11 +51,8 @@ int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
if (oqi) {
- obd_uid id = oqi->oqi_id;
-
- LASSERTF(id == qid[type],
- "The ids don't match %u != %u\n",
- id, qid[type]);
+ /* do not try to access oqi here, it could have been
+ * freed by osc_quota_setdq() */
/* the slot is busy, the user is about to run out of
* quota space on this OST */
@@ -268,11 +265,16 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
if (rc)
CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
- if (req->rq_repmsg &&
- (oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL))) {
- *oqctl = *oqc;
+ if (req->rq_repmsg) {
+ oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
+ if (oqc) {
+ *oqctl = *oqc;
+ } else if (!rc) {
+ CERROR("Can't unpack obd_quotactl\n");
+ rc = -EPROTO;
+ }
} else if (!rc) {
- CERROR ("Can't unpack obd_quotactl\n");
+ CERROR("Can't unpack obd_quotactl\n");
rc = -EPROTO;
}
ptlrpc_req_finished(req);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index d90efe408414..4c9e00695087 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -48,6 +48,7 @@
#include "ptlrpc_internal.h"
static int ptlrpc_send_new_req(struct ptlrpc_request *req);
+static int ptlrpcd_check_work(struct ptlrpc_request *req);
/**
* Initialize passed in client structure \a cl.
@@ -62,7 +63,7 @@ void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
EXPORT_SYMBOL(ptlrpc_init_client);
/**
- * Return PortalRPC connection for remore uud \a uuid
+ * Return PortalRPC connection for remote uud \a uuid
*/
struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
{
@@ -127,7 +128,7 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
* Prepare bulk descriptor for specified outgoing request \a req that
* can fit \a npages * pages. \a type is bulk type. \a portal is where
* the bulk to be sent. Used on client-side.
- * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
+ * Returns pointer to newly allocated initialized bulk descriptor or NULL on
* error.
*/
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
@@ -631,7 +632,7 @@ int ptlrpc_request_pack(struct ptlrpc_request *request,
/* For some old 1.8 clients (< 1.8.7), they will LASSERT the size of
* ptlrpc_body sent from server equal to local ptlrpc_body size, so we
- * have to send old ptlrpc_body to keep interoprability with these
+ * have to send old ptlrpc_body to keep interoperability with these
* clients.
*
* Only three kinds of server->client RPCs so far:
@@ -639,7 +640,7 @@ int ptlrpc_request_pack(struct ptlrpc_request *request,
* - LDLM_CP_CALLBACK
* - LDLM_GL_CALLBACK
*
- * XXX This should be removed whenever we drop the interoprability with
+ * XXX This should be removed whenever we drop the interoperability with
* the these old clients.
*/
if (opcode == LDLM_BL_CALLBACK || opcode == LDLM_CP_CALLBACK ||
@@ -686,7 +687,7 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
/**
* Helper function for creating a request.
- * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
+ * Calls __ptlrpc_request_alloc to allocate new request structure and inits
* buffer structures according to capsule template \a format.
* Returns allocated request structure pointer or NULL on error.
*/
@@ -743,7 +744,7 @@ void ptlrpc_request_free(struct ptlrpc_request *request)
EXPORT_SYMBOL(ptlrpc_request_free);
/**
- * Allocate new request for operatione \a opcode and immediatelly pack it for
+ * Allocate new request for operation \a opcode and immediately pack it for
* network transfer.
* Only used for simple requests like OBD_PING where the only important
* part of the request is operation itself.
@@ -768,7 +769,7 @@ struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
/**
- * Prepare request (fetched from pool \a poolif not NULL) on import \a imp
+ * Prepare request (fetched from pool \a pool if not NULL) on import \a imp
* for operation \a opcode. Request would contain \a count buffers.
* Sizes of buffers are described in array \a lengths and buffers themselves
* are provided by a pointer \a bufs.
@@ -1073,7 +1074,7 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
}
/**
- * Decide if the eror message regarding provided request \a req
+ * Decide if the error message regarding provided request \a req
* should be printed to the console or not.
* Makes it's decision on request status and other properties.
* Returns 1 to print error on the system console or 0 if not.
@@ -1159,7 +1160,7 @@ static void ptlrpc_save_versions(struct ptlrpc_request *req)
/**
* Callback function called when client receives RPC reply for \a req.
* Returns 0 on success or error code.
- * The return alue would be assigned to req->rq_status by the caller
+ * The return value would be assigned to req->rq_status by the caller
* as request processing status.
* This function also decides if the request needs to be saved for later replay.
*/
@@ -1190,7 +1191,9 @@ static int after_reply(struct ptlrpc_request *req)
* will roundup it */
req->rq_replen = req->rq_nob_received;
req->rq_nob_received = 0;
+ spin_lock(&req->rq_lock);
req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
return 0;
}
@@ -1313,7 +1316,11 @@ static int after_reply(struct ptlrpc_request *req)
/** version recovery */
ptlrpc_save_versions(req);
ptlrpc_retain_replayable_request(req, imp);
- } else if (req->rq_commit_cb != NULL) {
+ } else if (req->rq_commit_cb != NULL &&
+ list_empty(&req->rq_replay_list)) {
+ /* NB: don't call rq_commit_cb if it's already on
+ * rq_replay_list, ptlrpc_free_committed() will call
+ * it later, see LU-3618 for details */
spin_unlock(&imp->imp_lock);
req->rq_commit_cb(req);
spin_lock(&imp->imp_lock);
@@ -1408,7 +1415,9 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
req->rq_status = rc;
return 1;
} else {
+ spin_lock(&req->rq_lock);
req->rq_wait_ctx = 1;
+ spin_unlock(&req->rq_lock);
return 0;
}
}
@@ -1423,7 +1432,9 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
rc = ptl_send_rpc(req, 0);
if (rc) {
DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
+ spin_lock(&req->rq_lock);
req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
return rc;
}
return 0;
@@ -1688,6 +1699,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
spin_lock(&req->rq_lock);
req->rq_net_err = 1;
spin_unlock(&req->rq_lock);
+ continue;
}
/* need to reset the timeout */
force_timer_recalc = 1;
@@ -1773,6 +1785,10 @@ interpret:
ptlrpc_req_interpret(env, req, req->rq_status);
+ if (ptlrpcd_check_work(req)) {
+ atomic_dec(&set->set_remaining);
+ continue;
+ }
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
@@ -2031,7 +2047,7 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
EXPORT_SYMBOL(ptlrpc_set_next_timeout);
/**
- * Send all unset request from the set and then wait untill all
+ * Send all unset request from the set and then wait until all
* requests in the set complete (either get a reply, timeout, get an
* error or otherwise be interrupted).
* Returns 0 on success or error code otherwise.
@@ -2156,7 +2172,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
EXPORT_SYMBOL(ptlrpc_set_wait);
/**
- * Helper fuction for request freeing.
+ * Helper function for request freeing.
* Called when request count reached zero and request needs to be freed.
* Removes request from all sorts of sending/replay lists it might be on,
* frees network buffers if any are present.
@@ -2223,7 +2239,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
/**
* Drop one request reference. Must be called with import imp_lock held.
- * When reference count drops to zero, reuqest is freed.
+ * When reference count drops to zero, request is freed.
*/
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
{
@@ -2236,7 +2252,7 @@ EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
* Helper function
* Drops one reference count for request \a request.
* \a locked set indicates that caller holds import imp_lock.
- * Frees the request whe reference count reaches zero.
+ * Frees the request when reference count reaches zero.
*/
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
{
@@ -2360,19 +2376,52 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
}
EXPORT_SYMBOL(ptlrpc_unregister_reply);
+static void ptlrpc_free_request(struct ptlrpc_request *req)
+{
+ spin_lock(&req->rq_lock);
+ req->rq_replay = 0;
+ spin_unlock(&req->rq_lock);
+
+ if (req->rq_commit_cb != NULL)
+ req->rq_commit_cb(req);
+ list_del_init(&req->rq_replay_list);
+
+ __ptlrpc_req_finished(req, 1);
+}
+
+/**
+ * the request is committed and dropped from the replay list of its import
+ */
+void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
+{
+ struct obd_import *imp = req->rq_import;
+
+ spin_lock(&imp->imp_lock);
+ if (list_empty(&req->rq_replay_list)) {
+ spin_unlock(&imp->imp_lock);
+ return;
+ }
+
+ if (force || req->rq_transno <= imp->imp_peer_committed_transno)
+ ptlrpc_free_request(req);
+
+ spin_unlock(&imp->imp_lock);
+}
+EXPORT_SYMBOL(ptlrpc_request_committed);
+
/**
* Iterates through replay_list on import and prunes
* all requests have transno smaller than last_committed for the
* import and don't have rq_replay set.
- * Since requests are sorted in transno order, stops when meetign first
+ * Since requests are sorted in transno order, stops when meeting first
* transno bigger than last_committed.
* caller must hold imp->imp_lock
*/
void ptlrpc_free_committed(struct obd_import *imp)
{
- struct list_head *tmp, *saved;
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req, *saved;
struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
+ bool skip_committed_list = true;
LASSERT(imp != NULL);
@@ -2388,13 +2437,15 @@ void ptlrpc_free_committed(struct obd_import *imp)
CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
imp->imp_generation);
+
+ if (imp->imp_generation != imp->imp_last_generation_checked)
+ skip_committed_list = false;
+
imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
imp->imp_last_generation_checked = imp->imp_generation;
- list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
- req = list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
-
+ list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
+ rq_replay_list) {
/* XXX ok to remove when 1357 resolved - rread 05/29/03 */
LASSERT(req != last_req);
last_req = req;
@@ -2408,27 +2459,34 @@ void ptlrpc_free_committed(struct obd_import *imp)
GOTO(free_req, 0);
}
- if (req->rq_replay) {
- DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
- continue;
- }
-
/* not yet committed */
if (req->rq_transno > imp->imp_peer_committed_transno) {
DEBUG_REQ(D_RPCTRACE, req, "stopping search");
break;
}
+ if (req->rq_replay) {
+ DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
+ list_move_tail(&req->rq_replay_list,
+ &imp->imp_committed_list);
+ continue;
+ }
+
DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
imp->imp_peer_committed_transno);
free_req:
- spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- spin_unlock(&req->rq_lock);
- if (req->rq_commit_cb != NULL)
- req->rq_commit_cb(req);
- list_del_init(&req->rq_replay_list);
- __ptlrpc_req_finished(req, 1);
+ ptlrpc_free_request(req);
+ }
+ if (skip_committed_list)
+ return;
+
+ list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
+ rq_replay_list) {
+ LASSERT(req->rq_transno != 0);
+ if (req->rq_import_generation < imp->imp_generation) {
+ DEBUG_REQ(D_RPCTRACE, req, "free stale open request");
+ ptlrpc_free_request(req);
+ }
}
}
@@ -2585,7 +2643,7 @@ struct ptlrpc_replay_async_args {
/**
* Callback used for replayed requests reply processing.
- * In case of succesful reply calls registeresd request replay callback.
+ * In case of successful reply calls registered request replay callback.
* In case of error restart replay process.
*/
static int ptlrpc_replay_interpret(const struct lu_env *env,
@@ -2834,7 +2892,7 @@ void ptlrpc_init_xid(void)
ptlrpc_last_xid = (__u64)now << 20;
}
- /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
+ /* Always need to be aligned to a power-of-two for multi-bulk BRW */
CLASSERT((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) == 0);
ptlrpc_last_xid &= PTLRPC_BULK_OPS_MASK;
}
@@ -2904,22 +2962,50 @@ EXPORT_SYMBOL(ptlrpc_sample_next_xid);
* have delay before it really runs by ptlrpcd thread.
*/
struct ptlrpc_work_async_args {
- __u64 magic;
int (*cb)(const struct lu_env *, void *);
void *cbdata;
};
-#define PTLRPC_WORK_MAGIC 0x6655436b676f4f44ULL /* magic code */
+static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
+{
+ /* re-initialize the req */
+ req->rq_timeout = obd_timeout;
+ req->rq_sent = cfs_time_current_sec();
+ req->rq_deadline = req->rq_sent + req->rq_timeout;
+ req->rq_reply_deadline = req->rq_deadline;
+ req->rq_phase = RQ_PHASE_INTERPRET;
+ req->rq_next_phase = RQ_PHASE_COMPLETE;
+ req->rq_xid = ptlrpc_next_xid();
+ req->rq_import_generation = req->rq_import->imp_generation;
+
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+}
static int work_interpreter(const struct lu_env *env,
struct ptlrpc_request *req, void *data, int rc)
{
struct ptlrpc_work_async_args *arg = data;
- LASSERT(arg->magic == PTLRPC_WORK_MAGIC);
+ LASSERT(ptlrpcd_check_work(req));
LASSERT(arg->cb != NULL);
- return arg->cb(env, arg->cbdata);
+ rc = arg->cb(env, arg->cbdata);
+
+ list_del_init(&req->rq_set_chain);
+ req->rq_set = NULL;
+
+ if (atomic_dec_return(&req->rq_refcount) > 1) {
+ atomic_set(&req->rq_refcount, 2);
+ ptlrpcd_add_work_req(req);
+ }
+ return rc;
+}
+
+static int worker_format;
+
+static int ptlrpcd_check_work(struct ptlrpc_request *req)
+{
+ return req->rq_pill.rc_fmt == (void *)&worker_format;
}
/**
@@ -2952,6 +3038,7 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
req->rq_receiving_reply = 0;
req->rq_must_unlink = 0;
req->rq_no_delay = req->rq_no_resend = 1;
+ req->rq_pill.rc_fmt = (void *)&worker_format;
spin_lock_init(&req->rq_lock);
INIT_LIST_HEAD(&req->rq_list);
@@ -2965,7 +3052,6 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
args = ptlrpc_req_async_args(req);
- args->magic = PTLRPC_WORK_MAGIC;
args->cb = cb;
args->cbdata = cbdata;
@@ -2995,25 +3081,8 @@ int ptlrpcd_queue_work(void *handler)
* req as opaque data. - Jinshan
*/
LASSERT(atomic_read(&req->rq_refcount) > 0);
- if (atomic_read(&req->rq_refcount) > 1)
- return -EBUSY;
-
- if (atomic_inc_return(&req->rq_refcount) > 2) { /* race */
- atomic_dec(&req->rq_refcount);
- return -EBUSY;
- }
-
- /* re-initialize the req */
- req->rq_timeout = obd_timeout;
- req->rq_sent = cfs_time_current_sec();
- req->rq_deadline = req->rq_sent + req->rq_timeout;
- req->rq_reply_deadline = req->rq_deadline;
- req->rq_phase = RQ_PHASE_INTERPRET;
- req->rq_next_phase = RQ_PHASE_COMPLETE;
- req->rq_xid = ptlrpc_next_xid();
- req->rq_import_generation = req->rq_import->imp_generation;
-
- ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ if (atomic_inc_return(&req->rq_refcount) == 2)
+ ptlrpcd_add_work_req(req);
return 0;
}
EXPORT_SYMBOL(ptlrpcd_queue_work);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index f66cfea87acf..6ea0a491cfb3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -545,7 +545,7 @@ int ptlrpc_ni_init(void)
* different depending on... */
/* kernel LNet calls our master callback when there are new event,
* because we are guaranteed to get every event via callback,
- * so we just set EQ size to 0 to avoid overhread of serializing
+ * so we just set EQ size to 0 to avoid overhead of serializing
* enqueue/dequeue operations in LNet. */
rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
if (rc == 0)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c
index 55247af3910e..c279edf5b2a5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c
@@ -336,7 +336,7 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
if (rc) {
/* If any _real_ denial be made, we expect server return
* -EACCES reply or return success but indicate gss error
- * inside reply messsage. All other errors are treated as
+ * inside reply message. All other errors are treated as
* timeout, caller might try the negotiation repeatedly,
* leave recovery decisions to general ptlrpc layer.
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
index 56c28286c9c1..8dc5c724958d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
@@ -144,7 +144,8 @@ int der_read_length(unsigned char **buf, int *bufsize)
sf = *(*buf)++;
(*bufsize)--;
if (sf & 0x80) {
- if ((sf &= 0x7f) > ((*bufsize) - 1))
+ sf &= 0x7f;
+ if (((*bufsize) - 1) < sf)
return -1;
if (sf > SIZEOF_INT)
return -1;
@@ -199,27 +200,32 @@ __u32 g_verify_token_header(rawobj_t *mech, int *body_size,
rawobj_t toid;
int ret = 0;
- if ((toksize -= 1) < 0)
+ toksize -= 1;
+ if (0 > toksize)
return (G_BAD_TOK_HEADER);
if (*buf++ != 0x60)
return (G_BAD_TOK_HEADER);
- if ((seqsize = der_read_length(&buf, &toksize)) < 0)
+ seqsize = der_read_length(&buf, &toksize);
+ if (seqsize < 0)
return(G_BAD_TOK_HEADER);
if (seqsize != toksize)
return (G_BAD_TOK_HEADER);
- if ((toksize -= 1) < 0)
+ toksize -= 1;
+ if (0 > toksize)
return (G_BAD_TOK_HEADER);
if (*buf++ != 0x06)
return (G_BAD_TOK_HEADER);
- if ((toksize -= 1) < 0)
+ toksize -= 1;
+ if (0 > toksize)
return (G_BAD_TOK_HEADER);
toid.len = *buf++;
- if ((toksize -= toid.len) < 0)
+ toksize -= toid.len;
+ if (0 > toksize)
return (G_BAD_TOK_HEADER);
toid.data = buf;
buf += toid.len;
@@ -231,7 +237,8 @@ __u32 g_verify_token_header(rawobj_t *mech, int *body_size,
* important to return G_BAD_TOK_HEADER if the token header is
* in fact bad
*/
- if ((toksize -= 2) < 0)
+ toksize -= 2;
+ if (0 > toksize)
return (G_BAD_TOK_HEADER);
if (ret)
@@ -256,24 +263,29 @@ __u32 g_get_mech_oid(rawobj_t *mech, rawobj_t *in_buf)
int ret = 0;
int seqsize;
- if ((len -= 1) < 0)
+ len -= 1;
+ if (0 > len)
return (G_BAD_TOK_HEADER);
if (*buf++ != 0x60)
return (G_BAD_TOK_HEADER);
- if ((seqsize = der_read_length(&buf, &len)) < 0)
+ seqsize = der_read_length(&buf, &len);
+ if (seqsize < 0)
return (G_BAD_TOK_HEADER);
- if ((len -= 1) < 0)
+ len -= 1;
+ if (0 > len)
return (G_BAD_TOK_HEADER);
if (*buf++ != 0x06)
return (G_BAD_TOK_HEADER);
- if ((len -= 1) < 0)
+ len -= 1;
+ if (0 > len)
return (G_BAD_TOK_HEADER);
mech->len = *buf++;
- if ((len -= mech->len) < 0)
+ len -= mech->len;
+ if (0 > len)
return (G_BAD_TOK_HEADER);
OBD_ALLOC_LARGE(mech->data, mech->len);
if (!mech->data)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
index d43a13c69669..4642bbfb9273 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
@@ -1176,7 +1176,7 @@ int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
/*
* called with key semaphore write locked. it means we can operate
- * on the context without fear of loosing refcount.
+ * on the context without fear of losing refcount.
*/
static
int gss_kt_update(struct key *key, const void *data, size_t datalen)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
index b9fa3b4a40db..d03f6c114171 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
@@ -54,7 +54,6 @@
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/mutex.h>
-#include <linux/crypto.h>
#include <obd.h>
#include <obd_class.h>
@@ -679,7 +678,8 @@ __s32 krb5_make_checksum(__u32 enctype,
__u32 code = GSS_S_FAILURE;
int rc;
- if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
+ tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0);
+ if (!tfm) {
CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
return GSS_S_FAILURE;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c
index c624518c181a..7a1ff4fecbe3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c
@@ -85,7 +85,7 @@ static void gss_sec_pipe_upcall_fini(struct gss_sec *gsec)
}
/****************************************
- * internel context helpers *
+ * internal context helpers *
****************************************/
static
@@ -652,7 +652,7 @@ __u32 mech_name2idx(const char *name)
/* pipefs dentries for each mechanisms */
static struct dentry *de_pipes[MECH_MAX] = { NULL, };
-/* all upcall messgaes linked here */
+/* all upcall messages linked here */
static struct list_head upcall_lists[MECH_MAX];
/* and protected by this */
static spinlock_t upcall_locks[MECH_MAX];
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c
index 5b5365b4629f..359c48ec2f5b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c
@@ -102,7 +102,7 @@ static inline unsigned long hash_mem(char *buf, int length, int bits)
len++;
if ((len & (BITS_PER_LONG/8-1)) == 0)
- hash = cfs_hash_long(hash^l, BITS_PER_LONG);
+ hash = hash_long(hash^l, BITS_PER_LONG);
} while (len);
return hash >> (BITS_PER_LONG - bits);
@@ -586,7 +586,7 @@ static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
goto out;
/* currently the expiry time passed down from user-space
- * is invalid, here we retrive it from mech. */
+ * is invalid, here we retrieve it from mech. */
if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
CERROR("unable to get expire time, drop it\n");
goto out;
@@ -880,7 +880,7 @@ int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
cache_get(&rsip->h); /* take an extra ref */
init_waitqueue_head(&rsip->waitq);
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
add_wait_queue(&rsip->waitq, &wait);
cache_check:
@@ -1067,7 +1067,7 @@ int __init gss_init_svc_upcall(void)
* the init upcall channel, otherwise there's big chance that the first
* upcall issued before the channel be opened thus nfsv4 cache code will
* drop the request direclty, thus lead to unnecessary recovery time.
- * here we wait at miximum 1.5 seconds. */
+ * here we wait at maximum 1.5 seconds. */
for (i = 0; i < 6; i++) {
if (atomic_read(&rsi_cache.readers) > 0)
break;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c b/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
index 8ce6271a5daa..383601cdd4e6 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
@@ -382,7 +382,7 @@ void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
/* At this point this ctx might have been marked as dead by
* someone else, in which case nobody will make further use
* of it. we don't care, and mark it UPTODATE will help
- * destroying server side context when it be destroied. */
+ * destroying server side context when it be destroyed. */
set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
if (sec_is_reverse(ctx->cc_sec)) {
@@ -676,7 +676,7 @@ redo:
* lead to the sequence number fall behind the window on server and
* be dropped. also applies to gss_cli_ctx_seal().
*
- * Note: null mode dosen't check sequence number. */
+ * Note: null mode doesn't check sequence number. */
if (svc != SPTLRPC_SVC_NULL &&
atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
int behind = atomic_read(&gctx->gc_seq) - seq;
@@ -1215,7 +1215,7 @@ int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
/*
* remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
* this is to avoid potential problems of client side reverse svc ctx
- * be mis-destroyed in various recovery senarios. anyway client can
+ * be mis-destroyed in various recovery scenarios. anyway client can
* manage its reverse ctx well by associating it with its buddy ctx.
*/
if (sec_is_reverse(sec))
@@ -1882,7 +1882,7 @@ int gss_svc_sign(struct ptlrpc_request *req,
LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
- /* embedded lustre_msg might have been shrinked */
+ /* embedded lustre_msg might have been shrunk */
if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
@@ -2596,7 +2596,7 @@ static int gss_svc_seal(struct ptlrpc_request *req,
int msglen, rc;
/* get clear data length. note embedded lustre_msg might
- * have been shrinked */
+ * have been shrunk */
if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0))
msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
else
@@ -2765,7 +2765,7 @@ int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
* replay.
*
* each reverse root ctx will record its latest sequence number on its
- * buddy svcctx before be destroied, so here we continue use it.
+ * buddy svcctx before be destroyed, so here we continue use it.
*/
atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
@@ -2836,7 +2836,7 @@ int __init sptlrpc_gss_init(void)
if (rc)
goto out_svc_upcall;
- /* register policy after all other stuff be intialized, because it
+ /* register policy after all other stuff be initialized, because it
* might be in used immediately after the registration. */
rc = gss_init_keyring();
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index f465547eb95e..537aa6204a51 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -560,17 +560,30 @@ static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
struct ptlrpc_request *req;
struct list_head *tmp;
- if (list_empty(&imp->imp_replay_list))
- return 0;
- tmp = imp->imp_replay_list.next;
- req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
- *transno = req->rq_transno;
- if (req->rq_transno == 0) {
- DEBUG_REQ(D_ERROR, req, "zero transno in replay");
- LBUG();
+ /* The requests in committed_list always have smaller transnos than
+ * the requests in replay_list */
+ if (!list_empty(&imp->imp_committed_list)) {
+ tmp = imp->imp_committed_list.next;
+ req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ *transno = req->rq_transno;
+ if (req->rq_transno == 0) {
+ DEBUG_REQ(D_ERROR, req,
+ "zero transno in committed_list");
+ LBUG();
+ }
+ return 1;
}
-
- return 1;
+ if (!list_empty(&imp->imp_replay_list)) {
+ tmp = imp->imp_replay_list.next;
+ req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ *transno = req->rq_transno;
+ if (req->rq_transno == 0) {
+ DEBUG_REQ(D_ERROR, req, "zero transno in replay_list");
+ LBUG();
+ }
+ return 1;
+ }
+ return 0;
}
/**
@@ -1042,7 +1055,7 @@ finish:
if ((ocd->ocd_cksum_types &
cksum_types_supported_client()) == 0) {
LCONSOLE_WARN("The negotiation of the checksum "
- "alogrithm to use with server %s "
+ "algorithm to use with server %s "
"failed (%x/%x), disabling "
"checksums\n",
obd2cli_tgt(imp->imp_obd),
@@ -1260,7 +1273,7 @@ static int ptlrpc_invalidate_import_thread(void *data)
/**
* This is the state machine for client-side recovery on import.
*
- * Typicaly we have two possibly paths. If we came to server and it is not
+ * Typically we have two possibly paths. If we came to server and it is not
* in recovery, we just enter IMP_EVICTED state, invalidate our import
* state and reconnect from scratch.
* If we came to server that is in recovery, we enter IMP_REPLAY import state.
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index dfcb410fe485..41c12e00129f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -295,7 +295,8 @@ static const struct req_msg_field *mds_reint_setxattr_client[] = {
&RMF_REC_REINT,
&RMF_CAPA1,
&RMF_NAME,
- &RMF_EADATA
+ &RMF_EADATA,
+ &RMF_DLM_REQ
};
static const struct req_msg_field *mdt_swap_layouts[] = {
@@ -2154,7 +2155,7 @@ EXPORT_SYMBOL(req_capsule_server_sized_swab_get);
* request (if the caller is executing on the server-side) or reply (if the
* caller is executing on the client-side).
*
- * This function convienient for use is code that could be executed on the
+ * This function convenient for use is code that could be executed on the
* client and server alike.
*/
const void *req_capsule_other_get(struct req_capsule *pill,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 1be978609c59..58f1c8bf25b0 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -616,7 +616,7 @@ out:
}
/**
- * The longest valid command string is the maxium policy name size, plus the
+ * The longest valid command string is the maximum policy name size, plus the
* length of the " reg" substring
*/
#define LPROCFS_NRS_WR_MAX_CMD (NRS_POL_NAME_MAX + sizeof(" reg") - 1)
@@ -1184,7 +1184,7 @@ int lprocfs_wr_evict_client(struct file *file, const char *buffer,
}
tmpbuf = cfs_firststr(kbuf, min_t(unsigned long, BUFLEN - 1, count));
/* Kludge code(deadlock situation): the lprocfs lock has been held
- * since the client is evicted by writting client's
+ * since the client is evicted by writing client's
* uuid/nid to procfs "evict_client" entry. However,
* obd_export_evict_by_uuid() will call lprocfs_remove() to destroy
* the proc entries under the being destroyed export{}, so I have
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index 3c6bf23415f9..a47a8d807d5b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -349,7 +349,7 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
/**
* Send request reply from request \a req reply buffer.
* \a flags defines reply types
- * Returns 0 on sucess or error code
+ * Returns 0 on success or error code
*/
int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
{
@@ -389,7 +389,7 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
* ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
* reply buffer on client will be overflow.
*
- * XXX Remove this whenver we drop the interoprability with such client.
+ * XXX Remove this whenever we drop the interoprability with such client.
*/
req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
sizeof(struct ptlrpc_body_v2), 1);
@@ -511,7 +511,9 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
request->rq_import->imp_obd->obd_name);
/* this prevents us from waiting in ptlrpc_queue_wait */
+ spin_lock(&request->rq_lock);
request->rq_err = 1;
+ spin_unlock(&request->rq_lock);
request->rq_status = -ENODEV;
return -ENODEV;
}
@@ -553,7 +555,9 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
if (rc) {
/* this prevents us from looping in
* ptlrpc_queue_wait */
+ spin_lock(&request->rq_lock);
request->rq_err = 1;
+ spin_unlock(&request->rq_lock);
request->rq_status = rc;
GOTO(cleanup_bulk, rc);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 0abcd6d82273..bcba1c8e8693 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -1322,7 +1322,7 @@ EXPORT_SYMBOL(ptlrpc_nrs_policy_unregister);
* Setup NRS heads on all service partitions of service \a svc, and register
* all compatible policies on those NRS heads.
*
- * To be called from withing ptl
+ * To be called from within ptl
* \param[in] svc the service to setup
*
* \retval -ve error, the calling logic should eventually call
@@ -1736,7 +1736,7 @@ fail:
}
/**
- * Removes all policy desciptors from nrs_core::nrs_policies, and frees the
+ * Removes all policy descriptors from nrs_core::nrs_policies, and frees the
* policy descriptors.
*
* Since all PTLRPC services are stopped at this point, there are no more
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 464479c0f00b..45c0b84621e4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -123,7 +123,7 @@ int lustre_msg_early_size(void)
* with the old client (< 2.3) which doesn't have pb_jobid
* in the ptlrpc_body.
*
- * XXX Remove this whenever we dorp interoprability with such
+ * XXX Remove this whenever we drop interoprability with such
* client.
*/
__u32 pblen = sizeof(struct ptlrpc_body_v2);
@@ -244,15 +244,7 @@ int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
/* only use new format, we don't need to be compatible with 1.4 */
- magic = LUSTRE_MSG_MAGIC_V2;
-
- switch (magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_pack_request_v2(req, count, lens, bufs);
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", magic);
- return -EINVAL;
- }
+ return lustre_pack_request_v2(req, count, lens, bufs);
}
EXPORT_SYMBOL(lustre_pack_request);
@@ -1545,7 +1537,7 @@ void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
__u32 opc = lustre_msg_get_opc(msg);
struct ptlrpc_body *pb;
- /* Don't set jobid for ldlm ast RPCs, they've been shrinked.
+ /* Don't set jobid for ldlm ast RPCs, they've been shrunk.
* See the comment in ptlrpc_request_pack(). */
if (!opc || opc == LDLM_BL_CALLBACK ||
opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 2d26fd543d46..ca734ce079c1 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -229,7 +229,7 @@ void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
spin_unlock(&req->rq_lock);
l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
} else if (req->rq_set) {
- /* If we have a vaid "rq_set", just reuse it to avoid double
+ /* If we have a valid "rq_set", just reuse it to avoid double
* linked. */
LASSERT(req->rq_phase == RQ_PHASE_NEW);
LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
@@ -471,7 +471,7 @@ static int ptlrpcd(void *arg)
* be better. But it breaks former data transfer policy.
*
* So we shouldn't be blind for avoiding the data transfer. We make some
- * compromise: divide the ptlrpcd threds pool into two parts. One part is
+ * compromise: divide the ptlrpcd threads pool into two parts. One part is
* for bound mode, each ptlrpcd thread in this part is bound to some CPU
* core. The other part is for free mode, all the ptlrpcd threads in the
* part can be scheduled on any CPU core. We specify some partnership
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 84c39e083ea4..48ae328ce24e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -105,24 +105,59 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
* imp_lock is being held by ptlrpc_replay, but it's not. it's
* just a little race...
*/
- list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
+
+ /* Replay all the committed open requests on committed_list first */
+ if (!list_empty(&imp->imp_committed_list)) {
+ tmp = imp->imp_committed_list.prev;
req = list_entry(tmp, struct ptlrpc_request,
rq_replay_list);
- /* If need to resend the last sent transno (because a
- reconnect has occurred), then stop on the matching
- req and send it again. If, however, the last sent
- transno has been committed then we continue replay
- from the next request. */
+ /* The last request on committed_list hasn't been replayed */
if (req->rq_transno > last_transno) {
- if (imp->imp_resend_replay)
- lustre_msg_add_flags(req->rq_reqmsg,
- MSG_RESENT);
- break;
+ /* Since the imp_committed_list is immutable before
+ * all of it's requests being replayed, it's safe to
+ * use a cursor to accelerate the search */
+ imp->imp_replay_cursor = imp->imp_replay_cursor->next;
+
+ while (imp->imp_replay_cursor !=
+ &imp->imp_committed_list) {
+ req = list_entry(imp->imp_replay_cursor,
+ struct ptlrpc_request,
+ rq_replay_list);
+ if (req->rq_transno > last_transno)
+ break;
+
+ req = NULL;
+ imp->imp_replay_cursor =
+ imp->imp_replay_cursor->next;
+ }
+ } else {
+ /* All requests on committed_list have been replayed */
+ imp->imp_replay_cursor = &imp->imp_committed_list;
+ req = NULL;
+ }
+ }
+
+ /* All the requests in committed list have been replayed, let's replay
+ * the imp_replay_list */
+ if (req == NULL) {
+ list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_replay_list);
+
+ if (req->rq_transno > last_transno)
+ break;
+ req = NULL;
}
- req = NULL;
}
+ /* If need to resend the last sent transno (because a reconnect
+ * has occurred), then stop on the matching req and send it again.
+ * If, however, the last sent transno has been committed then we
+ * continue replay from the next request. */
+ if (req != NULL && imp->imp_resend_replay)
+ lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
+
spin_lock(&imp->imp_lock);
imp->imp_resend_replay = 0;
spin_unlock(&imp->imp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index 962b31d163de..d80418057f29 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -543,8 +543,8 @@ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
"ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
newctx, newctx->cc_flags);
- schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
- HZ);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
} else {
/*
* it's possible newctx == oldctx if we're switching
@@ -779,7 +779,7 @@ again:
* e.g. ptlrpc_abort_inflight();
*/
if (!cli_ctx_is_refreshed(ctx)) {
- /* timed out or interruptted */
+ /* timed out or interrupted */
req_off_ctx_list(req, ctx);
LASSERT(rc != 0);
@@ -805,7 +805,7 @@ void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
LASSERT(req->rq_cli_ctx->cc_sec);
LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
- /* special security flags accoding to opcode */
+ /* special security flags according to opcode */
switch (opcode) {
case OST_READ:
case MDS_READPAGE:
@@ -1218,7 +1218,7 @@ static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
LASSERT(policy->sp_cops->destroy_sec);
- CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
+ CDEBUG(D_SEC, "%s@%p: being destroyed\n", sec->ps_policy->sp_name, sec);
policy->sp_cops->destroy_sec(sec);
sptlrpc_policy_put(policy);
@@ -1264,7 +1264,7 @@ void sptlrpc_sec_put(struct ptlrpc_sec *sec)
EXPORT_SYMBOL(sptlrpc_sec_put);
/*
- * policy module is responsible for taking refrence of import
+ * policy module is responsible for taking reference of import
*/
static
struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
@@ -1419,7 +1419,7 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp,
sp = imp->imp_obd->u.cli.cl_sp_me;
} else {
- /* reverse import, determine flavor from incoming reqeust */
+ /* reverse import, determine flavor from incoming request */
sf = *flvr;
if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
@@ -2057,7 +2057,7 @@ int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
/*
* if it's not null flavor (which means embedded packing msg),
- * reset the swab mask for the comming inner msg unpacking.
+ * reset the swab mask for the coming inner msg unpacking.
*/
if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
req->rq_req_swab_mask = 0;
@@ -2108,7 +2108,7 @@ int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
/**
* Used by ptlrpc server, to perform transformation upon reply message.
*
- * \post req->rq_reply_off is set to approriate server-controlled reply offset.
+ * \post req->rq_reply_off is set to appropriate server-controlled reply offset.
* \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
*/
int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 316103ab7c3c..965668132747 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -113,7 +113,7 @@ static struct ptlrpc_enc_page_pool {
unsigned long epp_st_missings; /* # of cache missing */
unsigned long epp_st_lowfree; /* lowest free pages reached */
unsigned int epp_st_max_wqlen; /* highest waitqueue length */
- cfs_time_t epp_st_max_wait; /* in jeffies */
+ cfs_time_t epp_st_max_wait; /* in jiffies */
/*
* pointers to pools
*/
@@ -545,11 +545,11 @@ again:
page_pools.epp_waitqlen;
set_current_state(TASK_UNINTERRUPTIBLE);
- init_waitqueue_entry_current(&waitlink);
+ init_waitqueue_entry(&waitlink, current);
add_wait_queue(&page_pools.epp_waitq, &waitlink);
spin_unlock(&page_pools.epp_lock);
- waitq_wait(&waitlink, TASK_UNINTERRUPTIBLE);
+ schedule();
remove_wait_queue(&page_pools.epp_waitq, &waitlink);
LASSERT(page_pools.epp_waitqlen > 0);
spin_lock(&page_pools.epp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index 6cc3f23c27cc..bf56120abfaf 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -255,7 +255,7 @@ void sptlrpc_rule_set_free(struct sptlrpc_rule_set *rset)
EXPORT_SYMBOL(sptlrpc_rule_set_free);
/*
- * return 0 if the rule set could accomodate one more rule.
+ * return 0 if the rule set could accommodate one more rule.
*/
int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset)
{
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 590fa8df8b7f..192adec5382a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -177,7 +177,7 @@ ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
/**
* Part of Rep-Ack logic.
- * Puts a lock and its mode into reply state assotiated to request reply.
+ * Puts a lock and its mode into reply state associated to request reply.
*/
void
ptlrpc_save_lock(struct ptlrpc_request *req,
@@ -252,7 +252,7 @@ struct rs_batch {
static struct ptlrpc_hr_service ptlrpc_hr;
/**
- * maximum mumber of replies scheduled in one batch
+ * maximum number of replies scheduled in one batch
*/
#define MAX_SCHEDULED 256
@@ -612,7 +612,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
- /* acitve requests and hp requests */
+ /* active requests and hp requests */
spin_lock_init(&svcpt->scp_req_lock);
/* reply states */
@@ -752,7 +752,7 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
spin_lock_init(&service->srv_lock);
service->srv_name = conf->psc_name;
service->srv_watchdog_factor = conf->psc_watchdog_factor;
- INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
+ INIT_LIST_HEAD(&service->srv_list); /* for safety of cleanup */
/* buffer configuration */
service->srv_nbuf_per_group = test_req_buffer_pressure ?
@@ -1982,7 +1982,7 @@ put_conn:
do_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
+ "%s:%s+%d:%d:x"LPU64":%s:%d Request processed in "
"%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
current_comm(),
(request->rq_export ?
@@ -2736,7 +2736,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
spin_lock(&svcpt->scp_lock);
--svcpt->scp_nthrs_starting;
if (thread_is_stopping(thread)) {
- /* this ptlrpc_thread is being hanled
+ /* this ptlrpc_thread is being handled
* by ptlrpc_svcpt_stop_threads now
*/
thread_add_flags(thread, SVC_STOPPED);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 3aa445952024..3c8846006a7b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -1152,6 +1152,8 @@ void lustre_assert_wire_constants(void)
OBD_CONNECT_SHORTIO);
LASSERTF(OBD_CONNECT_PINGLESS == 0x4000000000000ULL, "found 0x%.16llxULL\n",
OBD_CONNECT_PINGLESS);
+ LASSERTF(OBD_CONNECT_FLOCK_DEAD == 0x8000000000000ULL,
+ "found 0x%.16llxULL\n", OBD_CONNECT_FLOCK_DEAD);
LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
(unsigned)OBD_CKSUM_CRC32);
LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 22b0c9d6f046..a9f2e63a7c9c 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -41,6 +41,8 @@ source "drivers/staging/media/solo6x10/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
+source "drivers/staging/media/rtl2832u_sdr/Kconfig"
+
# Keep LIRC at the end, as it has sub-menus
source "drivers/staging/media/lirc/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index bedc62aaede6..8e2c5d272162 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_USB_SN9C102) += sn9c102/
obj-$(CONFIG_VIDEO_OMAP2) += omap24xx/
obj-$(CONFIG_VIDEO_TCM825X) += omap24xx/
+obj-$(CONFIG_DVB_RTL2832_SDR) += rtl2832u_sdr/
+
diff --git a/drivers/staging/media/as102/as10x_handle.h b/drivers/staging/media/as102/as10x_handle.h
index 62b9795ee424..5638b191b780 100644
--- a/drivers/staging/media/as102/as10x_handle.h
+++ b/drivers/staging/media/as102/as10x_handle.h
@@ -28,26 +28,26 @@ struct as102_dev_t;
#define REGMODE32 32
struct as102_priv_ops_t {
- int (*upload_fw_pkt) (struct as10x_bus_adapter_t *bus_adap,
+ int (*upload_fw_pkt)(struct as10x_bus_adapter_t *bus_adap,
unsigned char *buf, int buflen, int swap32);
- int (*send_cmd) (struct as10x_bus_adapter_t *bus_adap,
+ int (*send_cmd)(struct as10x_bus_adapter_t *bus_adap,
unsigned char *buf, int buflen);
- int (*xfer_cmd) (struct as10x_bus_adapter_t *bus_adap,
+ int (*xfer_cmd)(struct as10x_bus_adapter_t *bus_adap,
unsigned char *send_buf, int send_buf_len,
unsigned char *recv_buf, int recv_buf_len);
- int (*start_stream) (struct as102_dev_t *dev);
- void (*stop_stream) (struct as102_dev_t *dev);
+ int (*start_stream)(struct as102_dev_t *dev);
+ void (*stop_stream)(struct as102_dev_t *dev);
- int (*reset_target) (struct as10x_bus_adapter_t *bus_adap);
+ int (*reset_target)(struct as10x_bus_adapter_t *bus_adap);
int (*read_write)(struct as10x_bus_adapter_t *bus_adap, uint8_t mode,
uint32_t rd_addr, uint16_t rd_len,
uint32_t wr_addr, uint16_t wr_len);
- int (*as102_read_ep2) (struct as10x_bus_adapter_t *bus_adap,
+ int (*as102_read_ep2)(struct as10x_bus_adapter_t *bus_adap,
unsigned char *recv_buf,
int recv_buf_len);
};
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 6cb74dacc69d..a2a5182570c5 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -329,11 +329,14 @@ static int init(struct cxd *ci)
break;
#if 0
- status = write_reg(ci, 0x09, 0x4D); /* Input Mode C, BYPass Serial, TIVAL = low, MSB */
+ /* Input Mode C, BYPass Serial, TIVAL = low, MSB */
+ status = write_reg(ci, 0x09, 0x4D);
if (status < 0)
break;
#endif
- status = write_reg(ci, 0x0A, 0xA7); /* TOSTRT = 8, Mode B (gated clock), falling Edge, Serial, POL=HIGH, MSB */
+ /* TOSTRT = 8, Mode B (gated clock), falling Edge,
+ * Serial, POL=HIGH, MSB */
+ status = write_reg(ci, 0x0A, 0xA7);
if (status < 0)
break;
@@ -589,7 +592,7 @@ static int campoll(struct cxd *ci)
read_reg(ci, 0x01, &slotstat);
if (!(2&slotstat)) {
if (!ci->slot_stat) {
- ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_PRESENT;
+ ci->slot_stat = DVB_CA_EN50221_POLL_CAM_PRESENT;
write_regm(ci, 0x03, 0x08, 0x08);
}
@@ -601,7 +604,8 @@ static int campoll(struct cxd *ci)
ci->ready = 0;
}
}
- if (istat&8 && ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) {
+ if (istat&8 &&
+ ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) {
ci->ready = 1;
ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY;
}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
index 2d36b60bdbf1..b2daf5e63f88 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -267,7 +267,7 @@ int config_ipipe_hw(struct vpfe_ipipe_device *ipipe)
}
ipipe_mode = get_ipipe_mode(ipipe);
- if (ipipe < 0) {
+ if (ipipe_mode < 0) {
pr_err("Failed to get ipipe mode");
return -EINVAL;
}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index d8ce20d2fbda..cda8388cbb89 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -298,7 +298,7 @@ static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
{
int ret = 0;
- ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, IRQF_DISABLED,
+ ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, 0,
"vpfe_capture0", vpfe_dev);
if (ret < 0) {
v4l2_err(&vpfe_dev->v4l2_dev,
@@ -306,7 +306,7 @@ static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
return ret;
}
- ret = request_irq(vpfe_dev->ccdc_irq1, vpfe_vdint1_isr, IRQF_DISABLED,
+ ret = request_irq(vpfe_dev->ccdc_irq1, vpfe_vdint1_isr, 0,
"vpfe_capture1", vpfe_dev);
if (ret < 0) {
v4l2_err(&vpfe_dev->v4l2_dev,
@@ -316,7 +316,7 @@ static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
}
ret = request_irq(vpfe_dev->imp_dma_irq, vpfe_imp_dma_isr,
- IRQF_DISABLED, "Imp_Sdram_Irq", vpfe_dev);
+ 0, "Imp_Sdram_Irq", vpfe_dev);
if (ret < 0) {
v4l2_err(&vpfe_dev->v4l2_dev,
"Error: requesting IMP IRQ interrupt\n");
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 1f3b0f9a8d10..8c101cbbee97 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -1201,8 +1201,6 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
unsigned long addr;
int ret;
- if (count == 0)
- return -ENOBUFS;
ret = mutex_lock_interruptible(&video->lock);
if (ret)
goto streamoff;
@@ -1327,6 +1325,7 @@ static int vpfe_reqbufs(struct file *file, void *priv,
q->type = req_buf->type;
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->drv_priv = fh;
+ q->min_buffers_needed = 1;
q->ops = &video_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.h b/drivers/staging/media/davinci_vpfe/vpfe_video.h
index df0aeec8b588..ca9a70245233 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.h
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.h
@@ -32,7 +32,7 @@ struct vpfe_device;
* if there was no buffer previously queued.
*/
struct vpfe_video_operations {
- int(*queue) (struct vpfe_device *vpfe_dev, unsigned long addr);
+ int (*queue)(struct vpfe_device *vpfe_dev, unsigned long addr);
};
enum vpfe_pipeline_stream_state {
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index e729e52639c5..afbc2e519606 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -31,7 +31,6 @@
#include "dt3155v4l.h"
-#define DT3155_VENDOR_ID 0x8086
#define DT3155_DEVICE_ID 0x1223
/* DT3155_CHUNK_SIZE is 4M (2^22) 8 full size buffers */
@@ -299,7 +298,7 @@ dt3155_buf_queue(struct vb2_buffer *vb)
* end driver-specific callbacks
*/
-const struct vb2_ops q_ops = {
+static const struct vb2_ops q_ops = {
.queue_setup = dt3155_queue_setup,
.wait_prepare = dt3155_wait_prepare,
.wait_finish = dt3155_wait_finish,
@@ -391,7 +390,7 @@ dt3155_open(struct file *filp)
goto err_alloc_queue;
}
pd->q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- pd->q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ pd->q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
pd->q->io_modes = VB2_READ | VB2_MMAP;
pd->q->ops = &q_ops;
pd->q->mem_ops = &vb2_dma_contig_memops;
@@ -975,7 +974,7 @@ dt3155_remove(struct pci_dev *pdev)
}
static const struct pci_device_id pci_ids[] = {
- { PCI_DEVICE(DT3155_VENDOR_ID, DT3155_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, DT3155_DEVICE_ID) },
{ 0, /* zero marks the end */ },
};
MODULE_DEVICE_TABLE(pci, pci_ids);
diff --git a/drivers/staging/media/go7007/Kconfig b/drivers/staging/media/go7007/Kconfig
index 04bd0fba7b1f..95a3af644a92 100644
--- a/drivers/staging/media/go7007/Kconfig
+++ b/drivers/staging/media/go7007/Kconfig
@@ -13,7 +13,6 @@ config VIDEO_GO7007
select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
- default N
---help---
This is a video4linux driver for the WIS GO7007 MPEG
encoder chip.
@@ -24,7 +23,6 @@ config VIDEO_GO7007
config VIDEO_GO7007_USB
tristate "WIS GO7007 USB support"
depends on VIDEO_GO7007 && USB
- default N
---help---
This is a video4linux driver for the WIS GO7007 MPEG
encoder chip over USB.
@@ -46,7 +44,6 @@ config VIDEO_GO7007_LOADER
config VIDEO_GO7007_USB_S2250_BOARD
tristate "Sensoray 2250/2251 support"
depends on VIDEO_GO7007_USB && USB
- default N
---help---
This is a video4linux driver for the Sensoray 2250/2251 device.
diff --git a/drivers/staging/media/go7007/go7007-loader.c b/drivers/staging/media/go7007/go7007-loader.c
index eecb1f2a5574..491d0e697f5a 100644
--- a/drivers/staging/media/go7007/go7007-loader.c
+++ b/drivers/staging/media/go7007/go7007-loader.c
@@ -28,7 +28,7 @@ struct fw_config {
const char * const fw_name2;
};
-struct fw_config fw_configs[] = {
+static struct fw_config fw_configs[] = {
{ 0x1943, 0xa250, "go7007/s2250-1.fw", "go7007/s2250-2.fw" },
{ 0x093b, 0xa002, "go7007/px-m402u.fw", NULL },
{ 0x093b, 0xa004, "go7007/px-tv402u.fw", NULL },
diff --git a/drivers/staging/media/go7007/go7007-v4l2.c b/drivers/staging/media/go7007/go7007-v4l2.c
index edc52e2630a9..b397aa3c0f44 100644
--- a/drivers/staging/media/go7007/go7007-v4l2.c
+++ b/drivers/staging/media/go7007/go7007-v4l2.c
@@ -433,7 +433,8 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
return set_capture_size(go, fmt, 0);
}
-static int go7007_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int go7007_queue_setup(struct vb2_queue *q,
+ const struct v4l2_format *fmt,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -470,7 +471,7 @@ static int go7007_buf_prepare(struct vb2_buffer *vb)
return 0;
}
-static int go7007_buf_finish(struct vb2_buffer *vb)
+static void go7007_buf_finish(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct go7007 *go = vb2_get_drv_priv(vq);
@@ -483,7 +484,6 @@ static int go7007_buf_finish(struct vb2_buffer *vb)
V4L2_BUF_FLAG_PFRAME);
buf->flags |= frame_type_flag;
buf->field = V4L2_FIELD_NONE;
- return 0;
}
static int go7007_start_streaming(struct vb2_queue *q, unsigned int count)
@@ -737,7 +737,8 @@ static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
if (a->index >= go->board_info->num_aud_inputs)
return -EINVAL;
- strlcpy(a->name, go->board_info->aud_inputs[a->index].name, sizeof(a->name));
+ strlcpy(a->name, go->board_info->aud_inputs[a->index].name,
+ sizeof(a->name));
a->capability = V4L2_AUDCAP_STEREO;
return 0;
}
@@ -747,12 +748,14 @@ static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
struct go7007 *go = video_drvdata(file);
a->index = go->aud_input;
- strlcpy(a->name, go->board_info->aud_inputs[go->aud_input].name, sizeof(a->name));
+ strlcpy(a->name, go->board_info->aud_inputs[go->aud_input].name,
+ sizeof(a->name));
a->capability = V4L2_AUDCAP_STEREO;
return 0;
}
-static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *a)
+static int vidioc_s_audio(struct file *file, void *fh,
+ const struct v4l2_audio *a)
{
struct go7007 *go = video_drvdata(file);
@@ -760,7 +763,7 @@ static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *
return -EINVAL;
go->aud_input = a->index;
v4l2_subdev_call(go->sd_audio, audio, s_routing,
- go->board_info->aud_inputs[go->aud_input].audio_input, 0, 0);
+ go->board_info->aud_inputs[go->aud_input].audio_input, 0, 0);
return 0;
}
@@ -960,8 +963,10 @@ int go7007_v4l2_ctrl_init(struct go7007 *go)
V4L2_MPEG_VIDEO_ASPECT_1x1);
ctrl = v4l2_ctrl_new_std(hdl, NULL,
V4L2_CID_JPEG_ACTIVE_MARKER, 0,
- V4L2_JPEG_ACTIVE_MARKER_DQT | V4L2_JPEG_ACTIVE_MARKER_DHT, 0,
- V4L2_JPEG_ACTIVE_MARKER_DQT | V4L2_JPEG_ACTIVE_MARKER_DHT);
+ V4L2_JPEG_ACTIVE_MARKER_DQT |
+ V4L2_JPEG_ACTIVE_MARKER_DHT, 0,
+ V4L2_JPEG_ACTIVE_MARKER_DQT |
+ V4L2_JPEG_ACTIVE_MARKER_DHT);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (hdl->error) {
@@ -989,7 +994,7 @@ int go7007_v4l2_init(struct go7007 *go)
go->vidq.mem_ops = &vb2_vmalloc_memops;
go->vidq.drv_priv = go;
go->vidq.buf_struct_size = sizeof(struct go7007_buffer);
- go->vidq.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ go->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
go->vidq.lock = &go->queue_lock;
rv = vb2_queue_init(&go->vidq);
if (rv)
diff --git a/drivers/staging/media/go7007/snd-go7007.c b/drivers/staging/media/go7007/snd-go7007.c
index 16dd64920767..9eb2a20ae40a 100644
--- a/drivers/staging/media/go7007/snd-go7007.c
+++ b/drivers/staging/media/go7007/snd-go7007.c
@@ -245,8 +245,8 @@ int go7007_snd_init(struct go7007 *go)
spin_lock_init(&gosnd->lock);
gosnd->hw_ptr = gosnd->w_idx = gosnd->avail = 0;
gosnd->capturing = 0;
- ret = snd_card_create(index[dev], id[dev], THIS_MODULE, 0,
- &gosnd->card);
+ ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0,
+ &gosnd->card);
if (ret < 0) {
kfree(gosnd);
return ret;
@@ -257,7 +257,6 @@ int go7007_snd_init(struct go7007 *go)
kfree(gosnd);
return ret;
}
- snd_card_set_dev(gosnd->card, go->dev);
ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm);
if (ret < 0) {
snd_card_free(gosnd->card);
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index f2dcc4a292da..f508a1372ad8 100644
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
@@ -62,7 +62,7 @@
/* debugging support */
#ifdef CONFIG_USB_DEBUG
-static bool debug = 1;
+static bool debug = true;
#else
static bool debug;
#endif
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index f2d396cc4a4c..a5b62eec5e21 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -943,13 +943,17 @@ alloc_status_switch:
usb_free_urb(tx_urb);
case 6:
usb_free_urb(rx_urb);
+ /* fall-through */
case 5:
if (rbuf)
lirc_buffer_free(rbuf);
+ /* fall-through */
case 4:
kfree(rbuf);
+ /* fall-through */
case 3:
kfree(driver);
+ /* fall-through */
case 2:
kfree(context);
context = NULL;
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index 0b589892351a..62f5137b947b 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -68,29 +68,29 @@
static bool debug;
static bool check_pselecd;
-unsigned int irq = LIRC_IRQ;
-unsigned int io = LIRC_PORT;
+static unsigned int irq = LIRC_IRQ;
+static unsigned int io = LIRC_PORT;
#ifdef LIRC_TIMER
-unsigned int timer;
-unsigned int default_timer = LIRC_TIMER;
+static unsigned int timer;
+static unsigned int default_timer = LIRC_TIMER;
#endif
#define RBUF_SIZE (256) /* this must be a power of 2 larger than 1 */
static int rbuf[RBUF_SIZE];
-DECLARE_WAIT_QUEUE_HEAD(lirc_wait);
+static DECLARE_WAIT_QUEUE_HEAD(lirc_wait);
-unsigned int rptr;
-unsigned int wptr;
-unsigned int lost_irqs;
-int is_open;
+static unsigned int rptr;
+static unsigned int wptr;
+static unsigned int lost_irqs;
+static int is_open;
-struct parport *pport;
-struct pardevice *ppdevice;
-int is_claimed;
+static struct parport *pport;
+static struct pardevice *ppdevice;
+static int is_claimed;
-unsigned int tx_mask = 1;
+static unsigned int tx_mask = 1;
/*** Internal Functions ***/
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index d2445fdd9015..81f90e17e1e6 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -74,7 +74,7 @@ static void usb_tx_callback(struct urb *urb);
static int vfd_open(struct inode *inode, struct file *file);
static long vfd_ioctl(struct file *file, unsigned cmd, unsigned long arg);
static int vfd_close(struct inode *inode, struct file *file);
-static ssize_t vfd_write(struct file *file, const char *buf,
+static ssize_t vfd_write(struct file *file, const char __user *buf,
size_t n_bytes, loff_t *pos);
/* LIRC driver function prototypes */
@@ -120,7 +120,7 @@ struct sasem_context {
static const struct file_operations vfd_fops = {
.owner = THIS_MODULE,
.open = &vfd_open,
- .write = &vfd_write,
+ .write = vfd_write,
.unlocked_ioctl = &vfd_ioctl,
.release = &vfd_close,
.llseek = noop_llseek,
@@ -360,7 +360,7 @@ static int send_packet(struct sasem_context *context)
* and requires data in 9 consecutive USB interrupt packets,
* each packet carrying 8 bytes.
*/
-static ssize_t vfd_write(struct file *file, const char *buf,
+static ssize_t vfd_write(struct file *file, const char __user *buf,
size_t n_bytes, loff_t *pos)
{
int i;
@@ -389,7 +389,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
goto exit;
}
- data_buf = memdup_user(buf, n_bytes);
+ data_buf = memdup_user((void const __user *)buf, n_bytes);
if (IS_ERR(data_buf)) {
retval = PTR_ERR(data_buf);
goto exit;
@@ -865,15 +865,20 @@ alloc_status_switch:
usb_free_urb(tx_urb);
case 6:
usb_free_urb(rx_urb);
+ /* fall-through */
case 5:
lirc_buffer_free(rbuf);
+ /* fall-through */
case 4:
kfree(rbuf);
+ /* fall-through */
case 3:
kfree(driver);
+ /* fall-through */
case 2:
kfree(context);
context = NULL;
+ /* fall-through */
case 1:
if (retval == 0)
retval = -ENOMEM;
diff --git a/drivers/staging/media/msi3101/Kconfig b/drivers/staging/media/msi3101/Kconfig
index 0c349c8595e4..de0b3bba3873 100644
--- a/drivers/staging/media/msi3101/Kconfig
+++ b/drivers/staging/media/msi3101/Kconfig
@@ -1,5 +1,10 @@
config USB_MSI3101
tristate "Mirics MSi3101 SDR Dongle"
- depends on USB && VIDEO_DEV && VIDEO_V4L2
+ depends on USB && VIDEO_DEV && VIDEO_V4L2 && SPI
select VIDEOBUF2_CORE
select VIDEOBUF2_VMALLOC
+ select MEDIA_TUNER_MSI001
+
+config MEDIA_TUNER_MSI001
+ tristate "Mirics MSi001"
+ depends on VIDEO_V4L2 && SPI
diff --git a/drivers/staging/media/msi3101/Makefile b/drivers/staging/media/msi3101/Makefile
index 3730654b0eb9..daf4f58d9a56 100644
--- a/drivers/staging/media/msi3101/Makefile
+++ b/drivers/staging/media/msi3101/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_USB_MSI3101) += sdr-msi3101.o
+obj-$(CONFIG_MEDIA_TUNER_MSI001) += msi001.o
diff --git a/drivers/staging/media/msi3101/msi001.c b/drivers/staging/media/msi3101/msi001.c
new file mode 100644
index 000000000000..ac43bae10102
--- /dev/null
+++ b/drivers/staging/media/msi3101/msi001.c
@@ -0,0 +1,500 @@
+/*
+ * Mirics MSi001 silicon tuner driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/gcd.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+static const struct v4l2_frequency_band bands[] = {
+ {
+ .type = V4L2_TUNER_RF,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 49000000,
+ .rangehigh = 263000000,
+ }, {
+ .type = V4L2_TUNER_RF,
+ .index = 1,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 390000000,
+ .rangehigh = 960000000,
+ },
+};
+
+struct msi001 {
+ struct spi_device *spi;
+ struct v4l2_subdev sd;
+
+ /* Controls */
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *bandwidth_auto;
+ struct v4l2_ctrl *bandwidth;
+ struct v4l2_ctrl *lna_gain;
+ struct v4l2_ctrl *mixer_gain;
+ struct v4l2_ctrl *if_gain;
+
+ unsigned int f_tuner;
+};
+
+static inline struct msi001 *sd_to_msi001(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct msi001, sd);
+}
+
+static int msi001_wreg(struct msi001 *s, u32 data)
+{
+ /* Register format: 4 bits addr + 20 bits value */
+ return spi_write(s->spi, &data, 3);
+};
+
+static int msi001_set_gain(struct msi001 *s, int lna_gain, int mixer_gain,
+ int if_gain)
+{
+ int ret;
+ u32 reg;
+ dev_dbg(&s->spi->dev, "%s: lna=%d mixer=%d if=%d\n", __func__,
+ lna_gain, mixer_gain, if_gain);
+
+ reg = 1 << 0;
+ reg |= (59 - if_gain) << 4;
+ reg |= 0 << 10;
+ reg |= (1 - mixer_gain) << 12;
+ reg |= (1 - lna_gain) << 13;
+ reg |= 4 << 14;
+ reg |= 0 << 17;
+ ret = msi001_wreg(s, reg);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&s->spi->dev, "%s: failed %d\n", __func__, ret);
+ return ret;
+};
+
+static int msi001_set_tuner(struct msi001 *s)
+{
+ int ret, i;
+ unsigned int n, m, thresh, frac, vco_step, tmp, f_if1;
+ u32 reg;
+ u64 f_vco, tmp64;
+ u8 mode, filter_mode, lo_div;
+ static const struct {
+ u32 rf;
+ u8 mode;
+ u8 lo_div;
+ } band_lut[] = {
+ { 50000000, 0xe1, 16}, /* AM_MODE2, antenna 2 */
+ {108000000, 0x42, 32}, /* VHF_MODE */
+ {330000000, 0x44, 16}, /* B3_MODE */
+ {960000000, 0x48, 4}, /* B45_MODE */
+ { ~0U, 0x50, 2}, /* BL_MODE */
+ };
+ static const struct {
+ u32 freq;
+ u8 filter_mode;
+ } if_freq_lut[] = {
+ { 0, 0x03}, /* Zero IF */
+ { 450000, 0x02}, /* 450 kHz IF */
+ {1620000, 0x01}, /* 1.62 MHz IF */
+ {2048000, 0x00}, /* 2.048 MHz IF */
+ };
+ static const struct {
+ u32 freq;
+ u8 val;
+ } bandwidth_lut[] = {
+ { 200000, 0x00}, /* 200 kHz */
+ { 300000, 0x01}, /* 300 kHz */
+ { 600000, 0x02}, /* 600 kHz */
+ {1536000, 0x03}, /* 1.536 MHz */
+ {5000000, 0x04}, /* 5 MHz */
+ {6000000, 0x05}, /* 6 MHz */
+ {7000000, 0x06}, /* 7 MHz */
+ {8000000, 0x07}, /* 8 MHz */
+ };
+
+ unsigned int f_rf = s->f_tuner;
+
+ /*
+ * bandwidth (Hz)
+ * 200000, 300000, 600000, 1536000, 5000000, 6000000, 7000000, 8000000
+ */
+ unsigned int bandwidth;
+
+ /*
+ * intermediate frequency (Hz)
+ * 0, 450000, 1620000, 2048000
+ */
+ unsigned int f_if = 0;
+ #define F_REF 24000000
+ #define R_REF 4
+ #define F_OUT_STEP 1
+
+ dev_dbg(&s->spi->dev,
+ "%s: f_rf=%d f_if=%d\n",
+ __func__, f_rf, f_if);
+
+ for (i = 0; i < ARRAY_SIZE(band_lut); i++) {
+ if (f_rf <= band_lut[i].rf) {
+ mode = band_lut[i].mode;
+ lo_div = band_lut[i].lo_div;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(band_lut)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* AM_MODE is upconverted */
+ if ((mode >> 0) & 0x1)
+ f_if1 = 5 * F_REF;
+ else
+ f_if1 = 0;
+
+ for (i = 0; i < ARRAY_SIZE(if_freq_lut); i++) {
+ if (f_if == if_freq_lut[i].freq) {
+ filter_mode = if_freq_lut[i].filter_mode;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(if_freq_lut)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* filters */
+ bandwidth = s->bandwidth->val;
+ bandwidth = clamp(bandwidth, 200000U, 8000000U);
+
+ for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) {
+ if (bandwidth <= bandwidth_lut[i].freq) {
+ bandwidth = bandwidth_lut[i].val;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(bandwidth_lut)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ s->bandwidth->val = bandwidth_lut[i].freq;
+
+ dev_dbg(&s->spi->dev, "%s: bandwidth selected=%d\n",
+ __func__, bandwidth_lut[i].freq);
+
+ f_vco = (f_rf + f_if + f_if1) * lo_div;
+ tmp64 = f_vco;
+ m = do_div(tmp64, F_REF * R_REF);
+ n = (unsigned int) tmp64;
+
+ vco_step = F_OUT_STEP * lo_div;
+ thresh = (F_REF * R_REF) / vco_step;
+ frac = 1ul * thresh * m / (F_REF * R_REF);
+
+ /* Find out greatest common divisor and divide to smaller. */
+ tmp = gcd(thresh, frac);
+ thresh /= tmp;
+ frac /= tmp;
+
+ /* Force divide to reg max. Resolution will be reduced. */
+ tmp = DIV_ROUND_UP(thresh, 4095);
+ thresh = DIV_ROUND_CLOSEST(thresh, tmp);
+ frac = DIV_ROUND_CLOSEST(frac, tmp);
+
+ /* calc real RF set */
+ tmp = 1ul * F_REF * R_REF * n;
+ tmp += 1ul * F_REF * R_REF * frac / thresh;
+ tmp /= lo_div;
+
+ dev_dbg(&s->spi->dev,
+ "%s: rf=%u:%u n=%d thresh=%d frac=%d\n",
+ __func__, f_rf, tmp, n, thresh, frac);
+
+ ret = msi001_wreg(s, 0x00000e);
+ if (ret)
+ goto err;
+
+ ret = msi001_wreg(s, 0x000003);
+ if (ret)
+ goto err;
+
+ reg = 0 << 0;
+ reg |= mode << 4;
+ reg |= filter_mode << 12;
+ reg |= bandwidth << 14;
+ reg |= 0x02 << 17;
+ reg |= 0x00 << 20;
+ ret = msi001_wreg(s, reg);
+ if (ret)
+ goto err;
+
+ reg = 5 << 0;
+ reg |= thresh << 4;
+ reg |= 1 << 19;
+ reg |= 1 << 21;
+ ret = msi001_wreg(s, reg);
+ if (ret)
+ goto err;
+
+ reg = 2 << 0;
+ reg |= frac << 4;
+ reg |= n << 16;
+ ret = msi001_wreg(s, reg);
+ if (ret)
+ goto err;
+
+ ret = msi001_set_gain(s, s->lna_gain->cur.val, s->mixer_gain->cur.val,
+ s->if_gain->cur.val);
+ if (ret)
+ goto err;
+
+ reg = 6 << 0;
+ reg |= 63 << 4;
+ reg |= 4095 << 10;
+ ret = msi001_wreg(s, reg);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&s->spi->dev, "%s: failed %d\n", __func__, ret);
+ return ret;
+};
+
+static int msi001_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct msi001 *s = sd_to_msi001(sd);
+ int ret;
+ dev_dbg(&s->spi->dev, "%s: on=%d\n", __func__, on);
+
+ if (on)
+ ret = 0;
+ else
+ ret = msi001_wreg(s, 0x000000);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_core_ops msi001_core_ops = {
+ .s_power = msi001_s_power,
+};
+
+static int msi001_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *v)
+{
+ struct msi001 *s = sd_to_msi001(sd);
+ dev_dbg(&s->spi->dev, "%s: index=%d\n", __func__, v->index);
+
+ strlcpy(v->name, "Mirics MSi001", sizeof(v->name));
+ v->type = V4L2_TUNER_RF;
+ v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ v->rangelow = 49000000;
+ v->rangehigh = 960000000;
+
+ return 0;
+}
+
+static int msi001_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *v)
+{
+ struct msi001 *s = sd_to_msi001(sd);
+ dev_dbg(&s->spi->dev, "%s: index=%d\n", __func__, v->index);
+ return 0;
+}
+
+static int msi001_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
+{
+ struct msi001 *s = sd_to_msi001(sd);
+ dev_dbg(&s->spi->dev, "%s: tuner=%d\n", __func__, f->tuner);
+ f->frequency = s->f_tuner;
+ return 0;
+}
+
+static int msi001_s_frequency(struct v4l2_subdev *sd,
+ const struct v4l2_frequency *f)
+{
+ struct msi001 *s = sd_to_msi001(sd);
+ unsigned int band;
+ dev_dbg(&s->spi->dev, "%s: tuner=%d type=%d frequency=%u\n",
+ __func__, f->tuner, f->type, f->frequency);
+
+ if (f->frequency < ((bands[0].rangehigh + bands[1].rangelow) / 2))
+ band = 0;
+ else
+ band = 1;
+ s->f_tuner = clamp_t(unsigned int, f->frequency,
+ bands[band].rangelow, bands[band].rangehigh);
+
+ return msi001_set_tuner(s);
+}
+
+static int msi001_enum_freq_bands(struct v4l2_subdev *sd,
+ struct v4l2_frequency_band *band)
+{
+ struct msi001 *s = sd_to_msi001(sd);
+ dev_dbg(&s->spi->dev, "%s: tuner=%d type=%d index=%d\n",
+ __func__, band->tuner, band->type, band->index);
+
+ if (band->index >= ARRAY_SIZE(bands))
+ return -EINVAL;
+
+ band->capability = bands[band->index].capability;
+ band->rangelow = bands[band->index].rangelow;
+ band->rangehigh = bands[band->index].rangehigh;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_tuner_ops msi001_tuner_ops = {
+ .g_tuner = msi001_g_tuner,
+ .s_tuner = msi001_s_tuner,
+ .g_frequency = msi001_g_frequency,
+ .s_frequency = msi001_s_frequency,
+ .enum_freq_bands = msi001_enum_freq_bands,
+};
+
+static const struct v4l2_subdev_ops msi001_ops = {
+ .core = &msi001_core_ops,
+ .tuner = &msi001_tuner_ops,
+};
+
+static int msi001_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct msi001 *s = container_of(ctrl->handler, struct msi001, hdl);
+
+ int ret;
+ dev_dbg(&s->spi->dev,
+ "%s: id=%d name=%s val=%d min=%d max=%d step=%d\n",
+ __func__, ctrl->id, ctrl->name, ctrl->val,
+ ctrl->minimum, ctrl->maximum, ctrl->step);
+
+ switch (ctrl->id) {
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
+ case V4L2_CID_RF_TUNER_BANDWIDTH:
+ ret = msi001_set_tuner(s);
+ break;
+ case V4L2_CID_RF_TUNER_LNA_GAIN:
+ ret = msi001_set_gain(s, s->lna_gain->val,
+ s->mixer_gain->cur.val, s->if_gain->cur.val);
+ break;
+ case V4L2_CID_RF_TUNER_MIXER_GAIN:
+ ret = msi001_set_gain(s, s->lna_gain->cur.val,
+ s->mixer_gain->val, s->if_gain->cur.val);
+ break;
+ case V4L2_CID_RF_TUNER_IF_GAIN:
+ ret = msi001_set_gain(s, s->lna_gain->cur.val,
+ s->mixer_gain->cur.val, s->if_gain->val);
+ break;
+ default:
+ dev_dbg(&s->spi->dev, "%s: unkown control %d\n",
+ __func__, ctrl->id);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops msi001_ctrl_ops = {
+ .s_ctrl = msi001_s_ctrl,
+};
+
+static int msi001_probe(struct spi_device *spi)
+{
+ struct msi001 *s;
+ int ret;
+ dev_dbg(&spi->dev, "%s:\n", __func__);
+
+ s = kzalloc(sizeof(struct msi001), GFP_KERNEL);
+ if (s == NULL) {
+ ret = -ENOMEM;
+ dev_dbg(&spi->dev, "Could not allocate memory for msi001\n");
+ goto err_kfree;
+ }
+
+ s->spi = spi;
+ s->f_tuner = bands[0].rangelow;
+ v4l2_spi_subdev_init(&s->sd, spi, &msi001_ops);
+
+ /* Register controls */
+ v4l2_ctrl_handler_init(&s->hdl, 5);
+ s->bandwidth_auto = v4l2_ctrl_new_std(&s->hdl, &msi001_ctrl_ops,
+ V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 1, 1);
+ s->bandwidth = v4l2_ctrl_new_std(&s->hdl, &msi001_ctrl_ops,
+ V4L2_CID_RF_TUNER_BANDWIDTH, 200000, 8000000, 1, 200000);
+ v4l2_ctrl_auto_cluster(2, &s->bandwidth_auto, 0, false);
+ s->lna_gain = v4l2_ctrl_new_std(&s->hdl, &msi001_ctrl_ops,
+ V4L2_CID_RF_TUNER_LNA_GAIN, 0, 1, 1, 1);
+ s->mixer_gain = v4l2_ctrl_new_std(&s->hdl, &msi001_ctrl_ops,
+ V4L2_CID_RF_TUNER_MIXER_GAIN, 0, 1, 1, 1);
+ s->if_gain = v4l2_ctrl_new_std(&s->hdl, &msi001_ctrl_ops,
+ V4L2_CID_RF_TUNER_IF_GAIN, 0, 59, 1, 0);
+ if (s->hdl.error) {
+ ret = s->hdl.error;
+ dev_err(&s->spi->dev, "Could not initialize controls\n");
+ /* control init failed, free handler */
+ goto err_ctrl_handler_free;
+ }
+
+ s->sd.ctrl_handler = &s->hdl;
+ return 0;
+
+err_ctrl_handler_free:
+ v4l2_ctrl_handler_free(&s->hdl);
+err_kfree:
+ kfree(s);
+ return ret;
+}
+
+static int msi001_remove(struct spi_device *spi)
+{
+ struct v4l2_subdev *sd = spi_get_drvdata(spi);
+ struct msi001 *s = sd_to_msi001(sd);
+ dev_dbg(&spi->dev, "%s:\n", __func__);
+
+ /*
+ * Registered by v4l2_spi_new_subdev() from master driver, but we must
+ * unregister it from here. Weird.
+ */
+ v4l2_device_unregister_subdev(&s->sd);
+ v4l2_ctrl_handler_free(&s->hdl);
+ kfree(s);
+ return 0;
+}
+
+static const struct spi_device_id msi001_id[] = {
+ {"msi001", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, msi001_id);
+
+static struct spi_driver msi001_driver = {
+ .driver = {
+ .name = "msi001",
+ .owner = THIS_MODULE,
+ },
+ .probe = msi001_probe,
+ .remove = msi001_remove,
+ .id_table = msi001_id,
+};
+module_spi_driver(msi001_driver);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Mirics MSi001");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/msi3101/sdr-msi3101.c b/drivers/staging/media/msi3101/sdr-msi3101.c
index 4c3bf776bb20..260d1b736721 100644
--- a/drivers/staging/media/msi3101/sdr-msi3101.c
+++ b/drivers/staging/media/msi3101/sdr-msi3101.c
@@ -21,25 +21,10 @@
* (C) 1999-2004 Nemosoft Unv.
* (C) 2004-2006 Luc Saillard (luc@saillard.org)
* (C) 2011 Hans de Goede <hdegoede@redhat.com>
- *
- * Development tree of that driver will be on:
- * http://git.linuxtv.org/anttip/media_tree.git/shortlog/refs/heads/mirics
- *
- * GNU Radio plugin "gr-kernel" for device usage will be on:
- * http://git.linuxtv.org/anttip/gr-kernel.git
- *
- * TODO:
- * Help is very highly welcome for these + all the others you could imagine:
- * - split USB ADC interface and RF tuner to own drivers (msi2500 and msi001)
- * - move controls to V4L2 API
- * - use libv4l2 for stream format conversions
- * - gr-kernel: switch to v4l2_mmap (current read eats a lot of cpu)
- * - SDRSharp support
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/gcd.h>
#include <asm/div64.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -47,317 +32,7 @@
#include <media/v4l2-event.h>
#include <linux/usb.h>
#include <media/videobuf2-vmalloc.h>
-
-struct msi3101_gain {
- u8 tot:7;
- u8 baseband:6;
- bool lna:1;
- bool mixer:1;
-};
-
-/* 60 – 120 MHz band, lna 24dB, mixer 19dB */
-static const struct msi3101_gain msi3101_gain_lut_120[] = {
- { 0, 0, 0, 0},
- { 1, 1, 0, 0},
- { 2, 2, 0, 0},
- { 3, 3, 0, 0},
- { 4, 4, 0, 0},
- { 5, 5, 0, 0},
- { 6, 6, 0, 0},
- { 7, 7, 0, 0},
- { 8, 8, 0, 0},
- { 9, 9, 0, 0},
- { 10, 10, 0, 0},
- { 11, 11, 0, 0},
- { 12, 12, 0, 0},
- { 13, 13, 0, 0},
- { 14, 14, 0, 0},
- { 15, 15, 0, 0},
- { 16, 16, 0, 0},
- { 17, 17, 0, 0},
- { 18, 18, 0, 0},
- { 19, 19, 0, 0},
- { 20, 20, 0, 0},
- { 21, 21, 0, 0},
- { 22, 22, 0, 0},
- { 23, 23, 0, 0},
- { 24, 24, 0, 0},
- { 25, 25, 0, 0},
- { 26, 26, 0, 0},
- { 27, 27, 0, 0},
- { 28, 28, 0, 0},
- { 29, 5, 1, 0},
- { 30, 6, 1, 0},
- { 31, 7, 1, 0},
- { 32, 8, 1, 0},
- { 33, 9, 1, 0},
- { 34, 10, 1, 0},
- { 35, 11, 1, 0},
- { 36, 12, 1, 0},
- { 37, 13, 1, 0},
- { 38, 14, 1, 0},
- { 39, 15, 1, 0},
- { 40, 16, 1, 0},
- { 41, 17, 1, 0},
- { 42, 18, 1, 0},
- { 43, 19, 1, 0},
- { 44, 20, 1, 0},
- { 45, 21, 1, 0},
- { 46, 22, 1, 0},
- { 47, 23, 1, 0},
- { 48, 24, 1, 0},
- { 49, 25, 1, 0},
- { 50, 26, 1, 0},
- { 51, 27, 1, 0},
- { 52, 28, 1, 0},
- { 53, 29, 1, 0},
- { 54, 30, 1, 0},
- { 55, 31, 1, 0},
- { 56, 32, 1, 0},
- { 57, 33, 1, 0},
- { 58, 34, 1, 0},
- { 59, 35, 1, 0},
- { 60, 36, 1, 0},
- { 61, 37, 1, 0},
- { 62, 38, 1, 0},
- { 63, 39, 1, 0},
- { 64, 40, 1, 0},
- { 65, 41, 1, 0},
- { 66, 42, 1, 0},
- { 67, 43, 1, 0},
- { 68, 44, 1, 0},
- { 69, 45, 1, 0},
- { 70, 46, 1, 0},
- { 71, 47, 1, 0},
- { 72, 48, 1, 0},
- { 73, 49, 1, 0},
- { 74, 50, 1, 0},
- { 75, 51, 1, 0},
- { 76, 52, 1, 0},
- { 77, 53, 1, 0},
- { 78, 54, 1, 0},
- { 79, 55, 1, 0},
- { 80, 56, 1, 0},
- { 81, 57, 1, 0},
- { 82, 58, 1, 0},
- { 83, 40, 1, 1},
- { 84, 41, 1, 1},
- { 85, 42, 1, 1},
- { 86, 43, 1, 1},
- { 87, 44, 1, 1},
- { 88, 45, 1, 1},
- { 89, 46, 1, 1},
- { 90, 47, 1, 1},
- { 91, 48, 1, 1},
- { 92, 49, 1, 1},
- { 93, 50, 1, 1},
- { 94, 51, 1, 1},
- { 95, 52, 1, 1},
- { 96, 53, 1, 1},
- { 97, 54, 1, 1},
- { 98, 55, 1, 1},
- { 99, 56, 1, 1},
- {100, 57, 1, 1},
- {101, 58, 1, 1},
- {102, 59, 1, 1},
-};
-
-/* 120 – 245 MHz band, lna 24dB, mixer 19dB */
-static const struct msi3101_gain msi3101_gain_lut_245[] = {
- { 0, 0, 0, 0},
- { 1, 1, 0, 0},
- { 2, 2, 0, 0},
- { 3, 3, 0, 0},
- { 4, 4, 0, 0},
- { 5, 5, 0, 0},
- { 6, 6, 0, 0},
- { 7, 7, 0, 0},
- { 8, 8, 0, 0},
- { 9, 9, 0, 0},
- { 10, 10, 0, 0},
- { 11, 11, 0, 0},
- { 12, 12, 0, 0},
- { 13, 13, 0, 0},
- { 14, 14, 0, 0},
- { 15, 15, 0, 0},
- { 16, 16, 0, 0},
- { 17, 17, 0, 0},
- { 18, 18, 0, 0},
- { 19, 19, 0, 0},
- { 20, 20, 0, 0},
- { 21, 21, 0, 0},
- { 22, 22, 0, 0},
- { 23, 23, 0, 0},
- { 24, 24, 0, 0},
- { 25, 25, 0, 0},
- { 26, 26, 0, 0},
- { 27, 27, 0, 0},
- { 28, 28, 0, 0},
- { 29, 5, 1, 0},
- { 30, 6, 1, 0},
- { 31, 7, 1, 0},
- { 32, 8, 1, 0},
- { 33, 9, 1, 0},
- { 34, 10, 1, 0},
- { 35, 11, 1, 0},
- { 36, 12, 1, 0},
- { 37, 13, 1, 0},
- { 38, 14, 1, 0},
- { 39, 15, 1, 0},
- { 40, 16, 1, 0},
- { 41, 17, 1, 0},
- { 42, 18, 1, 0},
- { 43, 19, 1, 0},
- { 44, 20, 1, 0},
- { 45, 21, 1, 0},
- { 46, 22, 1, 0},
- { 47, 23, 1, 0},
- { 48, 24, 1, 0},
- { 49, 25, 1, 0},
- { 50, 26, 1, 0},
- { 51, 27, 1, 0},
- { 52, 28, 1, 0},
- { 53, 29, 1, 0},
- { 54, 30, 1, 0},
- { 55, 31, 1, 0},
- { 56, 32, 1, 0},
- { 57, 33, 1, 0},
- { 58, 34, 1, 0},
- { 59, 35, 1, 0},
- { 60, 36, 1, 0},
- { 61, 37, 1, 0},
- { 62, 38, 1, 0},
- { 63, 39, 1, 0},
- { 64, 40, 1, 0},
- { 65, 41, 1, 0},
- { 66, 42, 1, 0},
- { 67, 43, 1, 0},
- { 68, 44, 1, 0},
- { 69, 45, 1, 0},
- { 70, 46, 1, 0},
- { 71, 47, 1, 0},
- { 72, 48, 1, 0},
- { 73, 49, 1, 0},
- { 74, 50, 1, 0},
- { 75, 51, 1, 0},
- { 76, 52, 1, 0},
- { 77, 53, 1, 0},
- { 78, 54, 1, 0},
- { 79, 55, 1, 0},
- { 80, 56, 1, 0},
- { 81, 57, 1, 0},
- { 82, 58, 1, 0},
- { 83, 40, 1, 1},
- { 84, 41, 1, 1},
- { 85, 42, 1, 1},
- { 86, 43, 1, 1},
- { 87, 44, 1, 1},
- { 88, 45, 1, 1},
- { 89, 46, 1, 1},
- { 90, 47, 1, 1},
- { 91, 48, 1, 1},
- { 92, 49, 1, 1},
- { 93, 50, 1, 1},
- { 94, 51, 1, 1},
- { 95, 52, 1, 1},
- { 96, 53, 1, 1},
- { 97, 54, 1, 1},
- { 98, 55, 1, 1},
- { 99, 56, 1, 1},
- {100, 57, 1, 1},
- {101, 58, 1, 1},
- {102, 59, 1, 1},
-};
-
-/* 420 – 1000 MHz band, lna 7dB, mixer 19dB */
-static const struct msi3101_gain msi3101_gain_lut_1000[] = {
- { 0, 0, 0, 0},
- { 1, 1, 0, 0},
- { 2, 2, 0, 0},
- { 3, 3, 0, 0},
- { 4, 4, 0, 0},
- { 5, 5, 0, 0},
- { 6, 6, 0, 0},
- { 7, 7, 0, 0},
- { 8, 8, 0, 0},
- { 9, 9, 0, 0},
- { 10, 10, 0, 0},
- { 11, 11, 0, 0},
- { 12, 5, 1, 0},
- { 13, 6, 1, 0},
- { 14, 7, 1, 0},
- { 15, 8, 1, 0},
- { 16, 9, 1, 0},
- { 17, 10, 1, 0},
- { 18, 11, 1, 0},
- { 19, 12, 1, 0},
- { 20, 13, 1, 0},
- { 21, 14, 1, 0},
- { 22, 15, 1, 0},
- { 23, 16, 1, 0},
- { 24, 17, 1, 0},
- { 25, 18, 1, 0},
- { 26, 19, 1, 0},
- { 27, 20, 1, 0},
- { 28, 21, 1, 0},
- { 29, 22, 1, 0},
- { 30, 23, 1, 0},
- { 31, 24, 1, 0},
- { 32, 25, 1, 0},
- { 33, 26, 1, 0},
- { 34, 27, 1, 0},
- { 35, 28, 1, 0},
- { 36, 29, 1, 0},
- { 37, 30, 1, 0},
- { 38, 31, 1, 0},
- { 39, 32, 1, 0},
- { 40, 33, 1, 0},
- { 41, 34, 1, 0},
- { 42, 35, 1, 0},
- { 43, 36, 1, 0},
- { 44, 37, 1, 0},
- { 45, 38, 1, 0},
- { 46, 39, 1, 0},
- { 47, 40, 1, 0},
- { 48, 41, 1, 0},
- { 49, 42, 1, 0},
- { 50, 43, 1, 0},
- { 51, 44, 1, 0},
- { 52, 45, 1, 0},
- { 53, 46, 1, 0},
- { 54, 47, 1, 0},
- { 55, 48, 1, 0},
- { 56, 49, 1, 0},
- { 57, 50, 1, 0},
- { 58, 51, 1, 0},
- { 59, 52, 1, 0},
- { 60, 53, 1, 0},
- { 61, 54, 1, 0},
- { 62, 55, 1, 0},
- { 63, 56, 1, 0},
- { 64, 57, 1, 0},
- { 65, 58, 1, 0},
- { 66, 40, 1, 1},
- { 67, 41, 1, 1},
- { 68, 42, 1, 1},
- { 69, 43, 1, 1},
- { 70, 44, 1, 1},
- { 71, 45, 1, 1},
- { 72, 46, 1, 1},
- { 73, 47, 1, 1},
- { 74, 48, 1, 1},
- { 75, 49, 1, 1},
- { 76, 50, 1, 1},
- { 77, 51, 1, 1},
- { 78, 52, 1, 1},
- { 79, 53, 1, 1},
- { 80, 54, 1, 1},
- { 81, 55, 1, 1},
- { 82, 56, 1, 1},
- { 83, 57, 1, 1},
- { 84, 58, 1, 1},
- { 85, 59, 1, 1},
-};
+#include <linux/spi/spi.h>
/*
* iConfiguration 0
@@ -377,13 +52,54 @@ static const struct msi3101_gain msi3101_gain_lut_1000[] = {
#define MAX_ISOC_ERRORS 20
/* TODO: These should be moved to V4L2 API */
-#define MSI3101_CID_SAMPLING_MODE ((V4L2_CID_USER_BASE | 0xf000) + 0)
-#define MSI3101_CID_SAMPLING_RATE ((V4L2_CID_USER_BASE | 0xf000) + 1)
-#define MSI3101_CID_SAMPLING_RESOLUTION ((V4L2_CID_USER_BASE | 0xf000) + 2)
-#define MSI3101_CID_TUNER_RF ((V4L2_CID_USER_BASE | 0xf000) + 10)
-#define MSI3101_CID_TUNER_BW ((V4L2_CID_USER_BASE | 0xf000) + 11)
-#define MSI3101_CID_TUNER_IF ((V4L2_CID_USER_BASE | 0xf000) + 12)
-#define MSI3101_CID_TUNER_GAIN ((V4L2_CID_USER_BASE | 0xf000) + 13)
+#define V4L2_PIX_FMT_SDR_S8 v4l2_fourcc('D', 'S', '0', '8') /* signed 8-bit */
+#define V4L2_PIX_FMT_SDR_S12 v4l2_fourcc('D', 'S', '1', '2') /* signed 12-bit */
+#define V4L2_PIX_FMT_SDR_S14 v4l2_fourcc('D', 'S', '1', '4') /* signed 14-bit */
+#define V4L2_PIX_FMT_SDR_MSI2500_384 v4l2_fourcc('M', '3', '8', '4') /* Mirics MSi2500 format 384 */
+
+static const struct v4l2_frequency_band bands[] = {
+ {
+ .tuner = 0,
+ .type = V4L2_TUNER_ADC,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 1200000,
+ .rangehigh = 15000000,
+ },
+};
+
+/* stream formats */
+struct msi3101_format {
+ char *name;
+ u32 pixelformat;
+};
+
+/* format descriptions for capture and preview */
+static struct msi3101_format formats[] = {
+ {
+ .name = "IQ U8",
+ .pixelformat = V4L2_SDR_FMT_CU8,
+ }, {
+ .name = "IQ U16LE",
+ .pixelformat = V4L2_SDR_FMT_CU16LE,
+#if 0
+ }, {
+ .name = "8-bit signed",
+ .pixelformat = V4L2_PIX_FMT_SDR_S8,
+ }, {
+ .name = "10+2-bit signed",
+ .pixelformat = V4L2_PIX_FMT_SDR_MSI2500_384,
+ }, {
+ .name = "12-bit signed",
+ .pixelformat = V4L2_PIX_FMT_SDR_S12,
+ }, {
+ .name = "14-bit signed",
+ .pixelformat = V4L2_PIX_FMT_SDR_S14,
+#endif
+ },
+};
+
+static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
/* intermediate buffers with raw data from the USB device */
struct msi3101_frame_buf {
@@ -394,6 +110,8 @@ struct msi3101_frame_buf {
struct msi3101_state {
struct video_device vdev;
struct v4l2_device v4l2_dev;
+ struct v4l2_subdev *v4l2_subdev;
+ struct spi_master *master;
/* videobuf2 queue and queued buffers list */
struct vb2_queue vb_queue;
@@ -407,24 +125,22 @@ struct msi3101_state {
/* Pointer to our usb_device, will be NULL after unplug */
struct usb_device *udev; /* Both mutexes most be hold when setting! */
+ unsigned int f_adc;
+ u32 pixelformat;
+
unsigned int isoc_errors; /* number of contiguous ISOC errors */
unsigned int vb_full; /* vb is full and packets dropped */
struct urb *urbs[MAX_ISO_BUFS];
- int (*convert_stream) (struct msi3101_state *s, u32 *dst, u8 *src,
+ int (*convert_stream)(struct msi3101_state *s, u8 *dst, u8 *src,
unsigned int src_len);
/* Controls */
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_ctrl *ctrl_sampling_rate;
- struct v4l2_ctrl *ctrl_tuner_rf;
- struct v4l2_ctrl *ctrl_tuner_bw;
- struct v4l2_ctrl *ctrl_tuner_if;
- struct v4l2_ctrl *ctrl_tuner_gain;
+ struct v4l2_ctrl_handler hdl;
u32 next_sample; /* for track lost packets */
u32 sample; /* for sample rate calc */
- unsigned long jiffies;
+ unsigned long jiffies_next;
unsigned int sample_ctrl_bit[4];
};
@@ -448,98 +164,79 @@ leave:
/*
* +===========================================================================
- * | 00-1023 | USB packet type '384'
+ * | 00-1023 | USB packet type '504'
* +===========================================================================
* | 00- 03 | sequence number of first sample in that USB packet
* +---------------------------------------------------------------------------
* | 04- 15 | garbage
* +---------------------------------------------------------------------------
- * | 16- 175 | samples
- * +---------------------------------------------------------------------------
- * | 176- 179 | control bits for previous samples
- * +---------------------------------------------------------------------------
- * | 180- 339 | samples
- * +---------------------------------------------------------------------------
- * | 340- 343 | control bits for previous samples
- * +---------------------------------------------------------------------------
- * | 344- 503 | samples
- * +---------------------------------------------------------------------------
- * | 504- 507 | control bits for previous samples
- * +---------------------------------------------------------------------------
- * | 508- 667 | samples
- * +---------------------------------------------------------------------------
- * | 668- 671 | control bits for previous samples
- * +---------------------------------------------------------------------------
- * | 672- 831 | samples
- * +---------------------------------------------------------------------------
- * | 832- 835 | control bits for previous samples
- * +---------------------------------------------------------------------------
- * | 836- 995 | samples
- * +---------------------------------------------------------------------------
- * | 996- 999 | control bits for previous samples
- * +---------------------------------------------------------------------------
- * | 1000-1023 | garbage
+ * | 16-1023 | samples
* +---------------------------------------------------------------------------
- *
- * Bytes 4 - 7 could have some meaning?
- *
- * Control bits for previous samples is 32-bit field, containing 16 x 2-bit
- * numbers. This results one 2-bit number for 8 samples. It is likely used for
- * for bit shifting sample by given bits, increasing actual sampling resolution.
- * Number 2 (0b10) was never seen.
- *
- * 6 * 16 * 2 * 4 = 768 samples. 768 * 4 = 3072 bytes
+ * signed 8-bit sample
+ * 504 * 2 = 1008 samples
*/
+static int msi3101_convert_stream_504(struct msi3101_state *s, u8 *dst,
+ u8 *src, unsigned int src_len)
+{
+ int i, i_max, dst_len = 0;
+ u32 sample_num[3];
-/*
- * Integer to 32-bit IEEE floating point representation routine is taken
- * from Radeon R600 driver (drivers/gpu/drm/radeon/r600_blit_kms.c).
- *
- * TODO: Currently we do conversion here in Kernel, but in future that will
- * be moved to the libv4l2 library as video format conversions are.
- */
-#define I2F_FRAC_BITS 23
-#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+ /* There could be 1-3 1024 bytes URB frames */
+ i_max = src_len / 1024;
-/*
- * Converts signed 8-bit integer into 32-bit IEEE floating point
- * representation.
- */
-static u32 msi3101_convert_sample_504(struct msi3101_state *s, u16 x)
-{
- u32 msb, exponent, fraction, sign;
+ for (i = 0; i < i_max; i++) {
+ sample_num[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | src[0] << 0;
+ if (i == 0 && s->next_sample != sample_num[0]) {
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%d samples lost, %d %08x:%08x\n",
+ sample_num[0] - s->next_sample,
+ src_len, s->next_sample, sample_num[0]);
+ }
- /* Zero is special */
- if (!x)
- return 0;
+ /*
+ * Dump all unknown 'garbage' data - maybe we will discover
+ * someday if there is something rational...
+ */
+ dev_dbg_ratelimited(&s->udev->dev, "%*ph\n", 12, &src[4]);
- /* Negative / positive value */
- if (x & (1 << 7)) {
- x = -x;
- x &= 0x7f; /* result is 7 bit ... + sign */
- sign = 1 << 31;
- } else {
- sign = 0 << 31;
+ /* 504 x I+Q samples */
+ src += 16;
+ memcpy(dst, src, 1008);
+ src += 1008;
+ dst += 1008;
+ dst_len += 1008;
}
- /* Get location of the most significant bit */
- msb = __fls(x);
+ /* calculate samping rate and output it in 10 seconds intervals */
+ if ((s->jiffies_next + msecs_to_jiffies(10000)) <= jiffies) {
+ unsigned long jiffies_now = jiffies;
+ unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies_next);
+ unsigned int samples = sample_num[i_max - 1] - s->sample;
+ s->jiffies_next = jiffies_now;
+ s->sample = sample_num[i_max - 1];
+ dev_dbg(&s->udev->dev,
+ "slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs);
+ }
- fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
- exponent = (127 + msb) << I2F_FRAC_BITS;
+ /* next sample (sample = sample + i * 504) */
+ s->next_sample = sample_num[i_max - 1] + 504;
- return (fraction + exponent) | sign;
+ return dst_len;
}
-static int msi3101_convert_stream_504(struct msi3101_state *s, u32 *dst,
+static int msi3101_convert_stream_504_u8(struct msi3101_state *s, u8 *dst,
u8 *src, unsigned int src_len)
{
int i, j, i_max, dst_len = 0;
- u16 sample[2];
u32 sample_num[3];
+ s8 *s8src;
+ u8 *u8dst;
/* There could be 1-3 1024 bytes URB frames */
i_max = src_len / 1024;
+ u8dst = (u8 *) dst;
for (i = 0; i < i_max; i++) {
sample_num[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | src[0] << 0;
@@ -556,30 +253,28 @@ static int msi3101_convert_stream_504(struct msi3101_state *s, u32 *dst,
*/
dev_dbg_ratelimited(&s->udev->dev, "%*ph\n", 12, &src[4]);
+ /* 504 x I+Q samples */
src += 16;
- for (j = 0; j < 1008; j += 2) {
- sample[0] = src[j + 0];
- sample[1] = src[j + 1];
- *dst++ = msi3101_convert_sample_504(s, sample[0]);
- *dst++ = msi3101_convert_sample_504(s, sample[1]);
- }
- /* 504 x I+Q 32bit float samples */
- dst_len += 504 * 2 * 4;
+ s8src = (s8 *) src;
+ for (j = 0; j < 1008; j++)
+ *u8dst++ = *s8src++ + 128;
+
src += 1008;
+ dst += 1008;
+ dst_len += 1008;
}
/* calculate samping rate and output it in 10 seconds intervals */
- if ((s->jiffies + msecs_to_jiffies(10000)) <= jiffies) {
- unsigned long jiffies_now = jiffies;
- unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies);
+ if (unlikely(time_is_before_jiffies(s->jiffies_next))) {
+#define MSECS 10000UL
unsigned int samples = sample_num[i_max - 1] - s->sample;
- s->jiffies = jiffies_now;
+ s->jiffies_next = jiffies + msecs_to_jiffies(MSECS);
s->sample = sample_num[i_max - 1];
dev_dbg(&s->udev->dev,
"slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
- src_len, samples, msecs,
- samples * 1000UL / msecs);
+ src_len, samples, MSECS,
+ samples * 1000UL / MSECS);
}
/* next sample (sample = sample + i * 504) */
@@ -589,48 +284,53 @@ static int msi3101_convert_stream_504(struct msi3101_state *s, u32 *dst,
}
/*
- * Converts signed ~10+2-bit integer into 32-bit IEEE floating point
- * representation.
+ * +===========================================================================
+ * | 00-1023 | USB packet type '384'
+ * +===========================================================================
+ * | 00- 03 | sequence number of first sample in that USB packet
+ * +---------------------------------------------------------------------------
+ * | 04- 15 | garbage
+ * +---------------------------------------------------------------------------
+ * | 16- 175 | samples
+ * +---------------------------------------------------------------------------
+ * | 176- 179 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 180- 339 | samples
+ * +---------------------------------------------------------------------------
+ * | 340- 343 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 344- 503 | samples
+ * +---------------------------------------------------------------------------
+ * | 504- 507 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 508- 667 | samples
+ * +---------------------------------------------------------------------------
+ * | 668- 671 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 672- 831 | samples
+ * +---------------------------------------------------------------------------
+ * | 832- 835 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 836- 995 | samples
+ * +---------------------------------------------------------------------------
+ * | 996- 999 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 1000-1023 | garbage
+ * +---------------------------------------------------------------------------
+ *
+ * Bytes 4 - 7 could have some meaning?
+ *
+ * Control bits for previous samples is 32-bit field, containing 16 x 2-bit
+ * numbers. This results one 2-bit number for 8 samples. It is likely used for
+ * for bit shifting sample by given bits, increasing actual sampling resolution.
+ * Number 2 (0b10) was never seen.
+ *
+ * 6 * 16 * 2 * 4 = 768 samples. 768 * 4 = 3072 bytes
*/
-static u32 msi3101_convert_sample_384(struct msi3101_state *s, u16 x, int shift)
-{
- u32 msb, exponent, fraction, sign;
- s->sample_ctrl_bit[shift]++;
-
- /* Zero is special */
- if (!x)
- return 0;
-
- if (shift == 3)
- shift = 2;
-
- /* Convert 10-bit two's complement to 12-bit */
- if (x & (1 << 9)) {
- x |= ~0U << 10; /* set all the rest bits to one */
- x <<= shift;
- x = -x;
- x &= 0x7ff; /* result is 11 bit ... + sign */
- sign = 1 << 31;
- } else {
- x <<= shift;
- sign = 0 << 31;
- }
-
- /* Get location of the most significant bit */
- msb = __fls(x);
-
- fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
- exponent = (127 + msb) << I2F_FRAC_BITS;
-
- return (fraction + exponent) | sign;
-}
-
-static int msi3101_convert_stream_384(struct msi3101_state *s, u32 *dst,
+static int msi3101_convert_stream_384(struct msi3101_state *s, u8 *dst,
u8 *src, unsigned int src_len)
{
- int i, j, k, l, i_max, dst_len = 0;
- u16 sample[4];
- u32 bits;
+ int i, i_max, dst_len = 0;
u32 sample_num[3];
/* There could be 1-3 1024 bytes URB frames */
@@ -651,38 +351,20 @@ static int msi3101_convert_stream_384(struct msi3101_state *s, u32 *dst,
dev_dbg_ratelimited(&s->udev->dev,
"%*ph %*ph\n", 12, &src[4], 24, &src[1000]);
+ /* 384 x I+Q samples */
src += 16;
- for (j = 0; j < 6; j++) {
- bits = src[160 + 3] << 24 | src[160 + 2] << 16 | src[160 + 1] << 8 | src[160 + 0] << 0;
- for (k = 0; k < 16; k++) {
- for (l = 0; l < 10; l += 5) {
- sample[0] = (src[l + 0] & 0xff) >> 0 | (src[l + 1] & 0x03) << 8;
- sample[1] = (src[l + 1] & 0xfc) >> 2 | (src[l + 2] & 0x0f) << 6;
- sample[2] = (src[l + 2] & 0xf0) >> 4 | (src[l + 3] & 0x3f) << 4;
- sample[3] = (src[l + 3] & 0xc0) >> 6 | (src[l + 4] & 0xff) << 2;
-
- *dst++ = msi3101_convert_sample_384(s, sample[0], (bits >> (2 * k)) & 0x3);
- *dst++ = msi3101_convert_sample_384(s, sample[1], (bits >> (2 * k)) & 0x3);
- *dst++ = msi3101_convert_sample_384(s, sample[2], (bits >> (2 * k)) & 0x3);
- *dst++ = msi3101_convert_sample_384(s, sample[3], (bits >> (2 * k)) & 0x3);
- }
- src += 10;
- }
- dev_dbg_ratelimited(&s->udev->dev,
- "sample control bits %08x\n", bits);
- src += 4;
- }
- /* 384 x I+Q 32bit float samples */
- dst_len += 384 * 2 * 4;
- src += 24;
+ memcpy(dst, src, 984);
+ src += 984 + 24;
+ dst += 984;
+ dst_len += 984;
}
/* calculate samping rate and output it in 10 seconds intervals */
- if ((s->jiffies + msecs_to_jiffies(10000)) <= jiffies) {
+ if ((s->jiffies_next + msecs_to_jiffies(10000)) <= jiffies) {
unsigned long jiffies_now = jiffies;
- unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies);
+ unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies_next);
unsigned int samples = sample_num[i_max - 1] - s->sample;
- s->jiffies = jiffies_now;
+ s->jiffies_next = jiffies_now;
s->sample = sample_num[i_max - 1];
dev_dbg(&s->udev->dev,
"slen=%d samples=%u msecs=%lu sampling rate=%lu bits=%d.%d.%d.%d\n",
@@ -699,40 +381,21 @@ static int msi3101_convert_stream_384(struct msi3101_state *s, u32 *dst,
}
/*
- * Converts signed 12-bit integer into 32-bit IEEE floating point
- * representation.
+ * +===========================================================================
+ * | 00-1023 | USB packet type '336'
+ * +===========================================================================
+ * | 00- 03 | sequence number of first sample in that USB packet
+ * +---------------------------------------------------------------------------
+ * | 04- 15 | garbage
+ * +---------------------------------------------------------------------------
+ * | 16-1023 | samples
+ * +---------------------------------------------------------------------------
+ * signed 12-bit sample
*/
-static u32 msi3101_convert_sample_336(struct msi3101_state *s, u16 x)
-{
- u32 msb, exponent, fraction, sign;
-
- /* Zero is special */
- if (!x)
- return 0;
-
- /* Negative / positive value */
- if (x & (1 << 11)) {
- x = -x;
- x &= 0x7ff; /* result is 11 bit ... + sign */
- sign = 1 << 31;
- } else {
- sign = 0 << 31;
- }
-
- /* Get location of the most significant bit */
- msb = __fls(x);
-
- fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
- exponent = (127 + msb) << I2F_FRAC_BITS;
-
- return (fraction + exponent) | sign;
-}
-
-static int msi3101_convert_stream_336(struct msi3101_state *s, u32 *dst,
+static int msi3101_convert_stream_336(struct msi3101_state *s, u8 *dst,
u8 *src, unsigned int src_len)
{
- int i, j, i_max, dst_len = 0;
- u16 sample[2];
+ int i, i_max, dst_len = 0;
u32 sample_num[3];
/* There could be 1-3 1024 bytes URB frames */
@@ -753,25 +416,20 @@ static int msi3101_convert_stream_336(struct msi3101_state *s, u32 *dst,
*/
dev_dbg_ratelimited(&s->udev->dev, "%*ph\n", 12, &src[4]);
+ /* 336 x I+Q samples */
src += 16;
- for (j = 0; j < 1008; j += 3) {
- sample[0] = (src[j + 0] & 0xff) >> 0 | (src[j + 1] & 0x0f) << 8;
- sample[1] = (src[j + 1] & 0xf0) >> 4 | (src[j + 2] & 0xff) << 4;
-
- *dst++ = msi3101_convert_sample_336(s, sample[0]);
- *dst++ = msi3101_convert_sample_336(s, sample[1]);
- }
- /* 336 x I+Q 32bit float samples */
- dst_len += 336 * 2 * 4;
+ memcpy(dst, src, 1008);
src += 1008;
+ dst += 1008;
+ dst_len += 1008;
}
/* calculate samping rate and output it in 10 seconds intervals */
- if ((s->jiffies + msecs_to_jiffies(10000)) <= jiffies) {
+ if ((s->jiffies_next + msecs_to_jiffies(10000)) <= jiffies) {
unsigned long jiffies_now = jiffies;
- unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies);
+ unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies_next);
unsigned int samples = sample_num[i_max - 1] - s->sample;
- s->jiffies = jiffies_now;
+ s->jiffies_next = jiffies_now;
s->sample = sample_num[i_max - 1];
dev_dbg(&s->udev->dev,
"slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
@@ -786,41 +444,75 @@ static int msi3101_convert_stream_336(struct msi3101_state *s, u32 *dst,
}
/*
- * Converts signed 14-bit integer into 32-bit IEEE floating point
- * representation.
+ * +===========================================================================
+ * | 00-1023 | USB packet type '252'
+ * +===========================================================================
+ * | 00- 03 | sequence number of first sample in that USB packet
+ * +---------------------------------------------------------------------------
+ * | 04- 15 | garbage
+ * +---------------------------------------------------------------------------
+ * | 16-1023 | samples
+ * +---------------------------------------------------------------------------
+ * signed 14-bit sample
*/
-static u32 msi3101_convert_sample_252(struct msi3101_state *s, u16 x)
+static int msi3101_convert_stream_252(struct msi3101_state *s, u8 *dst,
+ u8 *src, unsigned int src_len)
{
- u32 msb, exponent, fraction, sign;
+ int i, i_max, dst_len = 0;
+ u32 sample_num[3];
- /* Zero is special */
- if (!x)
- return 0;
+ /* There could be 1-3 1024 bytes URB frames */
+ i_max = src_len / 1024;
- /* Negative / positive value */
- if (x & (1 << 13)) {
- x = -x;
- x &= 0x1fff; /* result is 13 bit ... + sign */
- sign = 1 << 31;
- } else {
- sign = 0 << 31;
+ for (i = 0; i < i_max; i++) {
+ sample_num[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | src[0] << 0;
+ if (i == 0 && s->next_sample != sample_num[0]) {
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%d samples lost, %d %08x:%08x\n",
+ sample_num[0] - s->next_sample,
+ src_len, s->next_sample, sample_num[0]);
+ }
+
+ /*
+ * Dump all unknown 'garbage' data - maybe we will discover
+ * someday if there is something rational...
+ */
+ dev_dbg_ratelimited(&s->udev->dev, "%*ph\n", 12, &src[4]);
+
+ /* 252 x I+Q samples */
+ src += 16;
+ memcpy(dst, src, 1008);
+ src += 1008;
+ dst += 1008;
+ dst_len += 1008;
}
- /* Get location of the most significant bit */
- msb = __fls(x);
+ /* calculate samping rate and output it in 10 seconds intervals */
+ if ((s->jiffies_next + msecs_to_jiffies(10000)) <= jiffies) {
+ unsigned long jiffies_now = jiffies;
+ unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies_next);
+ unsigned int samples = sample_num[i_max - 1] - s->sample;
+ s->jiffies_next = jiffies_now;
+ s->sample = sample_num[i_max - 1];
+ dev_dbg(&s->udev->dev,
+ "slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs);
+ }
- fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
- exponent = (127 + msb) << I2F_FRAC_BITS;
+ /* next sample (sample = sample + i * 252) */
+ s->next_sample = sample_num[i_max - 1] + 252;
- return (fraction + exponent) | sign;
+ return dst_len;
}
-static int msi3101_convert_stream_252(struct msi3101_state *s, u32 *dst,
+static int msi3101_convert_stream_252_u16(struct msi3101_state *s, u8 *dst,
u8 *src, unsigned int src_len)
{
int i, j, i_max, dst_len = 0;
- u16 sample[2];
u32 sample_num[3];
+ u16 *u16dst = (u16 *) dst;
+ struct {signed int x:14;} se;
/* There could be 1-3 1024 bytes URB frames */
i_max = src_len / 1024;
@@ -840,30 +532,44 @@ static int msi3101_convert_stream_252(struct msi3101_state *s, u32 *dst,
*/
dev_dbg_ratelimited(&s->udev->dev, "%*ph\n", 12, &src[4]);
+ /* 252 x I+Q samples */
src += 16;
+
for (j = 0; j < 1008; j += 4) {
- sample[0] = src[j + 0] >> 0 | src[j + 1] << 8;
- sample[1] = src[j + 2] >> 0 | src[j + 3] << 8;
+ unsigned int usample[2];
+ int ssample[2];
+
+ usample[0] = src[j + 0] >> 0 | src[j + 1] << 8;
+ usample[1] = src[j + 2] >> 0 | src[j + 3] << 8;
- *dst++ = msi3101_convert_sample_252(s, sample[0]);
- *dst++ = msi3101_convert_sample_252(s, sample[1]);
+ /* sign extension from 14-bit to signed int */
+ ssample[0] = se.x = usample[0];
+ ssample[1] = se.x = usample[1];
+
+ /* from signed to unsigned */
+ usample[0] = ssample[0] + 8192;
+ usample[1] = ssample[1] + 8192;
+
+ /* from 14-bit to 16-bit */
+ *u16dst++ = (usample[0] << 2) | (usample[0] >> 12);
+ *u16dst++ = (usample[1] << 2) | (usample[1] >> 12);
}
- /* 252 x I+Q 32bit float samples */
- dst_len += 252 * 2 * 4;
+
src += 1008;
+ dst += 1008;
+ dst_len += 1008;
}
/* calculate samping rate and output it in 10 seconds intervals */
- if ((s->jiffies + msecs_to_jiffies(10000)) <= jiffies) {
- unsigned long jiffies_now = jiffies;
- unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies);
+ if (unlikely(time_is_before_jiffies(s->jiffies_next))) {
+#define MSECS 10000UL
unsigned int samples = sample_num[i_max - 1] - s->sample;
- s->jiffies = jiffies_now;
+ s->jiffies_next = jiffies + msecs_to_jiffies(MSECS);
s->sample = sample_num[i_max - 1];
dev_dbg(&s->udev->dev,
"slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
- src_len, samples, msecs,
- samples * 1000UL / msecs);
+ src_len, samples, MSECS,
+ samples * 1000UL / MSECS);
}
/* next sample (sample = sample + i * 252) */
@@ -883,14 +589,14 @@ static void msi3101_isoc_handler(struct urb *urb)
unsigned char *iso_buf = NULL;
struct msi3101_frame_buf *fbuf;
- if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
- urb->status == -ESHUTDOWN) {
+ if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN)) {
dev_dbg(&s->udev->dev, "URB (%p) unlinked %ssynchronuously\n",
urb, urb->status == -ENOENT ? "" : "a");
return;
}
- if (urb->status != 0) {
+ if (unlikely(urb->status != 0)) {
dev_dbg(&s->udev->dev,
"msi3101_isoc_handler() called with status %d\n",
urb->status);
@@ -910,28 +616,28 @@ static void msi3101_isoc_handler(struct urb *urb)
/* Check frame error */
fstatus = urb->iso_frame_desc[i].status;
- if (fstatus) {
+ if (unlikely(fstatus)) {
dev_dbg_ratelimited(&s->udev->dev,
"frame=%d/%d has error %d skipping\n",
i, urb->number_of_packets, fstatus);
- goto skip;
+ continue;
}
/* Check if that frame contains data */
flen = urb->iso_frame_desc[i].actual_length;
- if (flen == 0)
- goto skip;
+ if (unlikely(flen == 0))
+ continue;
iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
/* Get free framebuffer */
fbuf = msi3101_get_next_fill_buf(s);
- if (fbuf == NULL) {
+ if (unlikely(fbuf == NULL)) {
s->vb_full++;
dev_dbg_ratelimited(&s->udev->dev,
"videobuf is full, %d packets dropped\n",
s->vb_full);
- goto skip;
+ continue;
}
/* fill framebuffer */
@@ -939,13 +645,11 @@ static void msi3101_isoc_handler(struct urb *urb)
flen = s->convert_stream(s, ptr, iso_buf, flen);
vb2_set_plane_payload(&fbuf->vb, 0, flen);
vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
-skip:
- ;
}
handler_end:
i = usb_submit_urb(urb, GFP_ATOMIC);
- if (i != 0)
+ if (unlikely(i != 0))
dev_dbg(&s->udev->dev,
"Error (%d) re-submitting urb in msi3101_isoc_handler\n",
i);
@@ -1008,7 +712,7 @@ static int msi3101_isoc_init(struct msi3101_state *s)
udev = s->udev;
ret = usb_set_interface(s->udev, 0, 1);
- if (ret < 0)
+ if (ret)
return ret;
/* Allocate and init Isochronuous urbs */
@@ -1094,9 +798,9 @@ static void msi3101_disconnect(struct usb_interface *intf)
mutex_lock(&s->v4l2_lock);
/* No need to keep the urbs around after disconnection */
s->udev = NULL;
-
v4l2_device_disconnect(&s->v4l2_dev);
video_unregister_device(&s->vdev);
+ spi_unregister_master(s->master);
mutex_unlock(&s->v4l2_lock);
mutex_unlock(&s->vb_queue_lock);
@@ -1112,14 +816,12 @@ static int msi3101_querycap(struct file *file, void *fh,
strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
strlcpy(cap->card, s->vdev.name, sizeof(cap->card));
usb_make_path(s->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
- cap->device_caps = V4L2_CAP_TUNER;
+ cap->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE | V4L2_CAP_TUNER;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
-
/* Videobuf2 operations */
static int msi3101_queue_setup(struct vb2_queue *vq,
const struct v4l2_format *fmt, unsigned int *nbuffers,
@@ -1129,31 +831,20 @@ static int msi3101_queue_setup(struct vb2_queue *vq,
dev_dbg(&s->udev->dev, "%s: *nbuffers=%d\n", __func__, *nbuffers);
/* Absolute min and max number of buffers available for mmap() */
- *nbuffers = 32;
+ *nbuffers = clamp_t(unsigned int, *nbuffers, 8, 32);
*nplanes = 1;
/*
* 3, wMaxPacketSize 3x 1024 bytes
* 504, max IQ sample pairs per 1024 frame
* 2, two samples, I and Q
- * 4, 32-bit float
+ * 2, 16-bit is enough for single sample
*/
- sizes[0] = PAGE_ALIGN(3 * 504 * 2 * 4); /* = 12096 */
+ sizes[0] = PAGE_ALIGN(3 * 504 * 2 * 2);
dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
__func__, *nbuffers, sizes[0]);
return 0;
}
-static int msi3101_buf_prepare(struct vb2_buffer *vb)
-{
- struct msi3101_state *s = vb2_get_drv_priv(vb->vb2_queue);
-
- /* Don't allow queing new buffers after device disconnection */
- if (!s->udev)
- return -ENODEV;
-
- return 0;
-}
-
static void msi3101_buf_queue(struct vb2_buffer *vb)
{
struct msi3101_state *s = vb2_get_drv_priv(vb->vb2_queue);
@@ -1162,7 +853,7 @@ static void msi3101_buf_queue(struct vb2_buffer *vb)
unsigned long flags = 0;
/* Check the device has not disconnected between prep and queuing */
- if (!s->udev) {
+ if (unlikely(!s->udev)) {
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
return;
}
@@ -1209,41 +900,63 @@ static int msi3101_ctrl_msg(struct msi3101_state *s, u8 cmd, u32 data)
return ret;
};
-static int msi3101_tuner_write(struct msi3101_state *s, u32 data)
-{
- return msi3101_ctrl_msg(s, CMD_WREG, data << 8 | 0x09);
-};
-
#define F_REF 24000000
#define DIV_R_IN 2
static int msi3101_set_usb_adc(struct msi3101_state *s)
{
int ret, div_n, div_m, div_r_out, f_sr, f_vco, fract;
u32 reg3, reg4, reg7;
+ struct v4l2_ctrl *bandwidth_auto;
+ struct v4l2_ctrl *bandwidth;
- f_sr = s->ctrl_sampling_rate->val64;
+ f_sr = s->f_adc;
+
+ /* set tuner, subdev, filters according to sampling rate */
+ bandwidth_auto = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO);
+ bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
+ if (v4l2_ctrl_g_ctrl(bandwidth_auto)) {
+ bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
+ v4l2_ctrl_s_ctrl(bandwidth, s->f_adc);
+ }
/* select stream format */
- if (f_sr < 6000000) {
- s->convert_stream = msi3101_convert_stream_252;
+ switch (s->pixelformat) {
+ case V4L2_SDR_FMT_CU8:
+ s->convert_stream = msi3101_convert_stream_504_u8;
+ reg7 = 0x000c9407;
+ break;
+ case V4L2_SDR_FMT_CU16LE:
+ s->convert_stream = msi3101_convert_stream_252_u16;
reg7 = 0x00009407;
- } else if (f_sr < 8000000) {
- s->convert_stream = msi3101_convert_stream_336;
- reg7 = 0x00008507;
- } else if (f_sr < 9000000) {
+ break;
+ case V4L2_PIX_FMT_SDR_S8:
+ s->convert_stream = msi3101_convert_stream_504;
+ reg7 = 0x000c9407;
+ break;
+ case V4L2_PIX_FMT_SDR_MSI2500_384:
s->convert_stream = msi3101_convert_stream_384;
reg7 = 0x0000a507;
- } else {
- s->convert_stream = msi3101_convert_stream_504;
+ break;
+ case V4L2_PIX_FMT_SDR_S12:
+ s->convert_stream = msi3101_convert_stream_336;
+ reg7 = 0x00008507;
+ break;
+ case V4L2_PIX_FMT_SDR_S14:
+ s->convert_stream = msi3101_convert_stream_252;
+ reg7 = 0x00009407;
+ break;
+ default:
+ s->convert_stream = msi3101_convert_stream_504_u8;
reg7 = 0x000c9407;
+ break;
}
/*
* Synthesizer config is just a educated guess...
*
* [7:0] 0x03, register address
- * [8] 1, always
- * [9] ?
+ * [8] 1, power control
+ * [9] ?, power control
* [12:10] output divider
* [13] 0 ?
* [14] 0 ?
@@ -1334,224 +1047,6 @@ err:
return ret;
};
-static int msi3101_set_tuner(struct msi3101_state *s)
-{
- int ret, i, len;
- unsigned int n, m, thresh, frac, vco_step, tmp, f_if1;
- u32 reg;
- u64 f_vco, tmp64;
- u8 mode, filter_mode, lo_div;
- const struct msi3101_gain *gain_lut;
- static const struct {
- u32 rf;
- u8 mode;
- u8 lo_div;
- } band_lut[] = {
- { 50000000, 0xe1, 16}, /* AM_MODE2, antenna 2 */
- {108000000, 0x42, 32}, /* VHF_MODE */
- {330000000, 0x44, 16}, /* B3_MODE */
- {960000000, 0x48, 4}, /* B45_MODE */
- { ~0U, 0x50, 2}, /* BL_MODE */
- };
- static const struct {
- u32 freq;
- u8 filter_mode;
- } if_freq_lut[] = {
- { 0, 0x03}, /* Zero IF */
- { 450000, 0x02}, /* 450 kHz IF */
- {1620000, 0x01}, /* 1.62 MHz IF */
- {2048000, 0x00}, /* 2.048 MHz IF */
- };
- static const struct {
- u32 freq;
- u8 val;
- } bandwidth_lut[] = {
- { 200000, 0x00}, /* 200 kHz */
- { 300000, 0x01}, /* 300 kHz */
- { 600000, 0x02}, /* 600 kHz */
- {1536000, 0x03}, /* 1.536 MHz */
- {5000000, 0x04}, /* 5 MHz */
- {6000000, 0x05}, /* 6 MHz */
- {7000000, 0x06}, /* 7 MHz */
- {8000000, 0x07}, /* 8 MHz */
- };
-
- unsigned int f_rf = s->ctrl_tuner_rf->val64;
-
- /*
- * bandwidth (Hz)
- * 200000, 300000, 600000, 1536000, 5000000, 6000000, 7000000, 8000000
- */
- unsigned int bandwidth = s->ctrl_tuner_bw->val;
-
- /*
- * intermediate frequency (Hz)
- * 0, 450000, 1620000, 2048000
- */
- unsigned int f_if = s->ctrl_tuner_if->val;
-
- /*
- * gain reduction (dB)
- * 0 - 102 below 420 MHz
- * 0 - 85 above 420 MHz
- */
- int gain = s->ctrl_tuner_gain->val;
-
- dev_dbg(&s->udev->dev,
- "%s: f_rf=%d bandwidth=%d f_if=%d gain=%d\n",
- __func__, f_rf, bandwidth, f_if, gain);
-
- ret = -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(band_lut); i++) {
- if (f_rf <= band_lut[i].rf) {
- mode = band_lut[i].mode;
- lo_div = band_lut[i].lo_div;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(band_lut))
- goto err;
-
- /* AM_MODE is upconverted */
- if ((mode >> 0) & 0x1)
- f_if1 = 5 * F_REF;
- else
- f_if1 = 0;
-
- for (i = 0; i < ARRAY_SIZE(if_freq_lut); i++) {
- if (f_if == if_freq_lut[i].freq) {
- filter_mode = if_freq_lut[i].filter_mode;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(if_freq_lut))
- goto err;
-
- for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) {
- if (bandwidth == bandwidth_lut[i].freq) {
- bandwidth = bandwidth_lut[i].val;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(bandwidth_lut))
- goto err;
-
-#define F_OUT_STEP 1
-#define R_REF 4
- f_vco = (f_rf + f_if + f_if1) * lo_div;
-
- tmp64 = f_vco;
- m = do_div(tmp64, F_REF * R_REF);
- n = (unsigned int) tmp64;
-
- vco_step = F_OUT_STEP * lo_div;
- thresh = (F_REF * R_REF) / vco_step;
- frac = 1ul * thresh * m / (F_REF * R_REF);
-
- /* Find out greatest common divisor and divide to smaller. */
- tmp = gcd(thresh, frac);
- thresh /= tmp;
- frac /= tmp;
-
- /* Force divide to reg max. Resolution will be reduced. */
- tmp = DIV_ROUND_UP(thresh, 4095);
- thresh = DIV_ROUND_CLOSEST(thresh, tmp);
- frac = DIV_ROUND_CLOSEST(frac, tmp);
-
- /* calc real RF set */
- tmp = 1ul * F_REF * R_REF * n;
- tmp += 1ul * F_REF * R_REF * frac / thresh;
- tmp /= lo_div;
-
- dev_dbg(&s->udev->dev,
- "%s: rf=%u:%u n=%d thresh=%d frac=%d\n",
- __func__, f_rf, tmp, n, thresh, frac);
-
- ret = msi3101_tuner_write(s, 0x00000e);
- if (ret)
- goto err;
-
- ret = msi3101_tuner_write(s, 0x000003);
- if (ret)
- goto err;
-
- reg = 0 << 0;
- reg |= mode << 4;
- reg |= filter_mode << 12;
- reg |= bandwidth << 14;
- reg |= 0x02 << 17;
- reg |= 0x00 << 20;
- ret = msi3101_tuner_write(s, reg);
- if (ret)
- goto err;
-
- reg = 5 << 0;
- reg |= thresh << 4;
- reg |= 1 << 19;
- reg |= 1 << 21;
- ret = msi3101_tuner_write(s, reg);
- if (ret)
- goto err;
-
- reg = 2 << 0;
- reg |= frac << 4;
- reg |= n << 16;
- ret = msi3101_tuner_write(s, reg);
- if (ret)
- goto err;
-
- if (f_rf < 120000000) {
- gain_lut = msi3101_gain_lut_120;
- len = ARRAY_SIZE(msi3101_gain_lut_120);
- } else if (f_rf < 245000000) {
- gain_lut = msi3101_gain_lut_245;
- len = ARRAY_SIZE(msi3101_gain_lut_120);
- } else {
- gain_lut = msi3101_gain_lut_1000;
- len = ARRAY_SIZE(msi3101_gain_lut_1000);
- }
-
- for (i = 0; i < len; i++) {
- if (gain_lut[i].tot >= gain)
- break;
- }
-
- if (i == len)
- goto err;
-
- dev_dbg(&s->udev->dev,
- "%s: gain tot=%d baseband=%d lna=%d mixer=%d\n",
- __func__, gain_lut[i].tot, gain_lut[i].baseband,
- gain_lut[i].lna, gain_lut[i].mixer);
-
- reg = 1 << 0;
- reg |= gain_lut[i].baseband << 4;
- reg |= 0 << 10;
- reg |= gain_lut[i].mixer << 12;
- reg |= gain_lut[i].lna << 13;
- reg |= 4 << 14;
- reg |= 0 << 17;
- ret = msi3101_tuner_write(s, reg);
- if (ret)
- goto err;
-
- reg = 6 << 0;
- reg |= 63 << 4;
- reg |= 4095 << 10;
- ret = msi3101_tuner_write(s, reg);
- if (ret)
- goto err;
-
- return 0;
-err:
- dev_dbg(&s->udev->dev, "%s: failed %d\n", __func__, ret);
- return ret;
-};
-
static int msi3101_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct msi3101_state *s = vb2_get_drv_priv(vq);
@@ -1564,6 +1059,9 @@ static int msi3101_start_streaming(struct vb2_queue *vq, unsigned int count)
if (mutex_lock_interruptible(&s->v4l2_lock))
return -ERESTARTSYS;
+ /* wake-up tuner */
+ v4l2_subdev_call(s->v4l2_subdev, core, s_power, 1);
+
ret = msi3101_set_usb_adc(s);
ret = msi3101_isoc_init(s);
@@ -1594,6 +1092,12 @@ static int msi3101_stop_streaming(struct vb2_queue *vq)
msleep(20);
msi3101_ctrl_msg(s, CMD_STOP_STREAMING, 0);
+ /* sleep USB IF / ADC */
+ msi3101_ctrl_msg(s, CMD_WREG, 0x01000003);
+
+ /* sleep tuner */
+ v4l2_subdev_call(s->v4l2_subdev, core, s_power, 0);
+
mutex_unlock(&s->v4l2_lock);
return 0;
@@ -1601,7 +1105,6 @@ static int msi3101_stop_streaming(struct vb2_queue *vq)
static struct vb2_ops msi3101_vb2_ops = {
.queue_setup = msi3101_queue_setup,
- .buf_prepare = msi3101_buf_prepare,
.buf_queue = msi3101_buf_queue,
.start_streaming = msi3101_start_streaming,
.stop_streaming = msi3101_stop_streaming,
@@ -1609,66 +1112,195 @@ static struct vb2_ops msi3101_vb2_ops = {
.wait_finish = vb2_ops_wait_finish,
};
-static int msi3101_enum_input(struct file *file, void *fh, struct v4l2_input *i)
+static int msi3101_enum_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
- if (i->index != 0)
+ struct msi3101_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s: index=%d\n", __func__, f->index);
+
+ if (f->index >= NUM_FORMATS)
return -EINVAL;
- strlcpy(i->name, "SDR data", sizeof(i->name));
- i->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(f->description, formats[f->index].name, sizeof(f->description));
+ f->pixelformat = formats[f->index].pixelformat;
+
+ return 0;
+}
+
+static int msi3101_g_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct msi3101_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
+ (char *)&s->pixelformat);
+
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ f->fmt.sdr.pixelformat = s->pixelformat;
return 0;
}
-static int msi3101_g_input(struct file *file, void *fh, unsigned int *i)
+static int msi3101_s_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
{
- *i = 0;
+ struct msi3101_state *s = video_drvdata(file);
+ struct vb2_queue *q = &s->vb_queue;
+ int i;
+ dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
+ (char *)&f->fmt.sdr.pixelformat);
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
+ s->pixelformat = f->fmt.sdr.pixelformat;
+ return 0;
+ }
+ }
+
+ f->fmt.sdr.pixelformat = formats[0].pixelformat;
+ s->pixelformat = formats[0].pixelformat;
return 0;
}
-static int msi3101_s_input(struct file *file, void *fh, unsigned int i)
+static int msi3101_try_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
{
- return i ? -EINVAL : 0;
+ struct msi3101_state *s = video_drvdata(file);
+ int i;
+ dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
+ (char *)&f->fmt.sdr.pixelformat);
+
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat)
+ return 0;
+ }
+
+ f->fmt.sdr.pixelformat = formats[0].pixelformat;
+
+ return 0;
}
-static int vidioc_s_tuner(struct file *file, void *priv,
+static int msi3101_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *v)
{
struct msi3101_state *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ int ret;
+ dev_dbg(&s->udev->dev, "%s: index=%d\n", __func__, v->index);
- return 0;
+ if (v->index == 0)
+ ret = 0;
+ else if (v->index == 1)
+ ret = v4l2_subdev_call(s->v4l2_subdev, tuner, s_tuner, v);
+ else
+ ret = -EINVAL;
+
+ return ret;
}
-static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v)
+static int msi3101_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v)
{
struct msi3101_state *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ int ret;
+ dev_dbg(&s->udev->dev, "%s: index=%d\n", __func__, v->index);
+
+ if (v->index == 0) {
+ strlcpy(v->name, "Mirics MSi2500", sizeof(v->name));
+ v->type = V4L2_TUNER_ADC;
+ v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ v->rangelow = 1200000;
+ v->rangehigh = 15000000;
+ ret = 0;
+ } else if (v->index == 1) {
+ ret = v4l2_subdev_call(s->v4l2_subdev, tuner, g_tuner, v);
+ } else {
+ ret = -EINVAL;
+ }
- strcpy(v->name, "SDR RX");
- v->capability = V4L2_TUNER_CAP_LOW;
+ return ret;
+}
- return 0;
+static int msi3101_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct msi3101_state *s = video_drvdata(file);
+ int ret = 0;
+ dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d\n",
+ __func__, f->tuner, f->type);
+
+ if (f->tuner == 0) {
+ f->frequency = s->f_adc;
+ ret = 0;
+ } else if (f->tuner == 1) {
+ f->type = V4L2_TUNER_RF;
+ ret = v4l2_subdev_call(s->v4l2_subdev, tuner, g_frequency, f);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
}
-static int vidioc_s_frequency(struct file *file, void *priv,
+static int msi3101_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
struct msi3101_state *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s: frequency=%lu Hz (%u)\n",
- __func__, f->frequency * 625UL / 10UL, f->frequency);
+ int ret;
+ dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d frequency=%u\n",
+ __func__, f->tuner, f->type, f->frequency);
+
+ if (f->tuner == 0) {
+ s->f_adc = clamp_t(unsigned int, f->frequency,
+ bands[0].rangelow,
+ bands[0].rangehigh);
+ dev_dbg(&s->udev->dev, "%s: ADC frequency=%u Hz\n",
+ __func__, s->f_adc);
+ ret = msi3101_set_usb_adc(s);
+ } else if (f->tuner == 1) {
+ ret = v4l2_subdev_call(s->v4l2_subdev, tuner, s_frequency, f);
+ } else {
+ ret = -EINVAL;
+ }
- return v4l2_ctrl_s_ctrl_int64(s->ctrl_tuner_rf,
- f->frequency * 625UL / 10UL);
+ return ret;
+}
+
+static int msi3101_enum_freq_bands(struct file *file, void *priv,
+ struct v4l2_frequency_band *band)
+{
+ struct msi3101_state *s = video_drvdata(file);
+ int ret;
+ dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d index=%d\n",
+ __func__, band->tuner, band->type, band->index);
+
+ if (band->tuner == 0) {
+ if (band->index >= ARRAY_SIZE(bands)) {
+ ret = -EINVAL;
+ } else {
+ *band = bands[band->index];
+ ret = 0;
+ }
+ } else if (band->tuner == 1) {
+ ret = v4l2_subdev_call(s->v4l2_subdev, tuner,
+ enum_freq_bands, band);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
}
static const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
.vidioc_querycap = msi3101_querycap,
- .vidioc_enum_input = msi3101_enum_input,
- .vidioc_g_input = msi3101_g_input,
- .vidioc_s_input = msi3101_s_input,
+ .vidioc_enum_fmt_sdr_cap = msi3101_enum_fmt_sdr_cap,
+ .vidioc_g_fmt_sdr_cap = msi3101_g_fmt_sdr_cap,
+ .vidioc_s_fmt_sdr_cap = msi3101_s_fmt_sdr_cap,
+ .vidioc_try_fmt_sdr_cap = msi3101_try_fmt_sdr_cap,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
@@ -1680,9 +1312,12 @@ static const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = msi3101_g_tuner,
+ .vidioc_s_tuner = msi3101_s_tuner,
+
+ .vidioc_g_frequency = msi3101_g_frequency,
+ .vidioc_s_frequency = msi3101_s_frequency,
+ .vidioc_enum_freq_bands = msi3101_enum_freq_bands,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
@@ -1706,129 +1341,52 @@ static struct video_device msi3101_template = {
.ioctl_ops = &msi3101_ioctl_ops,
};
-static int msi3101_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct msi3101_state *s =
- container_of(ctrl->handler, struct msi3101_state,
- ctrl_handler);
- int ret;
- dev_dbg(&s->udev->dev,
- "%s: id=%d name=%s val=%d min=%d max=%d step=%d\n",
- __func__, ctrl->id, ctrl->name, ctrl->val,
- ctrl->minimum, ctrl->maximum, ctrl->step);
-
- switch (ctrl->id) {
- case MSI3101_CID_SAMPLING_MODE:
- case MSI3101_CID_SAMPLING_RATE:
- case MSI3101_CID_SAMPLING_RESOLUTION:
- ret = 0;
- break;
- case MSI3101_CID_TUNER_RF:
- case MSI3101_CID_TUNER_BW:
- case MSI3101_CID_TUNER_IF:
- case MSI3101_CID_TUNER_GAIN:
- ret = msi3101_set_tuner(s);
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static const struct v4l2_ctrl_ops msi3101_ctrl_ops = {
- .s_ctrl = msi3101_s_ctrl,
-};
-
static void msi3101_video_release(struct v4l2_device *v)
{
struct msi3101_state *s =
container_of(v, struct msi3101_state, v4l2_dev);
- v4l2_ctrl_handler_free(&s->ctrl_handler);
+ v4l2_ctrl_handler_free(&s->hdl);
v4l2_device_unregister(&s->v4l2_dev);
kfree(s);
}
+static int msi3101_transfer_one_message(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct msi3101_state *s = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ int ret = 0;
+ u32 data;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ dev_dbg(&s->udev->dev, "%s: msg=%*ph\n",
+ __func__, t->len, t->tx_buf);
+ data = 0x09; /* reg 9 is SPI adapter */
+ data |= ((u8 *)t->tx_buf)[0] << 8;
+ data |= ((u8 *)t->tx_buf)[1] << 16;
+ data |= ((u8 *)t->tx_buf)[2] << 24;
+ ret = msi3101_ctrl_msg(s, CMD_WREG, data);
+ }
+
+ m->status = ret;
+ spi_finalize_current_message(master);
+ return ret;
+}
+
static int msi3101_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct msi3101_state *s = NULL;
+ struct v4l2_subdev *sd;
+ struct spi_master *master;
int ret;
- static const char * const ctrl_sampling_mode_qmenu_strings[] = {
- "Quadrature Sampling",
- NULL,
- };
- static const struct v4l2_ctrl_config ctrl_sampling_mode = {
- .ops = &msi3101_ctrl_ops,
- .id = MSI3101_CID_SAMPLING_MODE,
- .type = V4L2_CTRL_TYPE_MENU,
- .flags = V4L2_CTRL_FLAG_INACTIVE,
- .name = "Sampling Mode",
- .qmenu = ctrl_sampling_mode_qmenu_strings,
- };
- static const struct v4l2_ctrl_config ctrl_sampling_rate = {
- .ops = &msi3101_ctrl_ops,
- .id = MSI3101_CID_SAMPLING_RATE,
- .type = V4L2_CTRL_TYPE_INTEGER64,
- .name = "Sampling Rate",
- .min = 500000,
- .max = 12000000,
- .def = 2048000,
- .step = 1,
- };
- static const struct v4l2_ctrl_config ctrl_sampling_resolution = {
- .ops = &msi3101_ctrl_ops,
- .id = MSI3101_CID_SAMPLING_RESOLUTION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .flags = V4L2_CTRL_FLAG_INACTIVE,
- .name = "Sampling Resolution",
- .min = 10,
- .max = 10,
- .def = 10,
- .step = 1,
- };
- static const struct v4l2_ctrl_config ctrl_tuner_rf = {
- .ops = &msi3101_ctrl_ops,
- .id = MSI3101_CID_TUNER_RF,
- .type = V4L2_CTRL_TYPE_INTEGER64,
- .name = "Tuner RF",
- .min = 40000000,
- .max = 2000000000,
- .def = 100000000,
- .step = 1,
- };
- static const struct v4l2_ctrl_config ctrl_tuner_bw = {
- .ops = &msi3101_ctrl_ops,
- .id = MSI3101_CID_TUNER_BW,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Tuner BW",
- .min = 200000,
- .max = 8000000,
- .def = 600000,
- .step = 1,
- };
- static const struct v4l2_ctrl_config ctrl_tuner_if = {
- .ops = &msi3101_ctrl_ops,
- .id = MSI3101_CID_TUNER_IF,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .flags = V4L2_CTRL_FLAG_INACTIVE,
- .name = "Tuner IF",
- .min = 0,
- .max = 2048000,
- .def = 0,
- .step = 1,
- };
- static const struct v4l2_ctrl_config ctrl_tuner_gain = {
- .ops = &msi3101_ctrl_ops,
- .id = MSI3101_CID_TUNER_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Tuner Gain",
- .min = 0,
- .max = 102,
- .def = 0,
- .step = 1,
+ static struct spi_board_info board_info = {
+ .modalias = "msi001",
+ .bus_num = 0,
+ .chip_select = 0,
+ .max_speed_hz = 12000000,
};
s = kzalloc(sizeof(struct msi3101_state), GFP_KERNEL);
@@ -1841,19 +1399,20 @@ static int msi3101_probe(struct usb_interface *intf,
mutex_init(&s->vb_queue_lock);
spin_lock_init(&s->queued_bufs_lock);
INIT_LIST_HEAD(&s->queued_bufs);
-
s->udev = udev;
+ s->f_adc = bands[0].rangelow;
+ s->pixelformat = V4L2_SDR_FMT_CU8;
/* Init videobuf2 queue structure */
- s->vb_queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ s->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
s->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
s->vb_queue.drv_priv = s;
s->vb_queue.buf_struct_size = sizeof(struct msi3101_frame_buf);
s->vb_queue.ops = &msi3101_vb2_ops;
s->vb_queue.mem_ops = &vb2_vmalloc_memops;
- s->vb_queue.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ s->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(&s->vb_queue);
- if (ret < 0) {
+ if (ret) {
dev_err(&s->udev->dev, "Could not initialize vb2 queue\n");
goto err_free_mem;
}
@@ -1865,36 +1424,59 @@ static int msi3101_probe(struct usb_interface *intf,
set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev.flags);
video_set_drvdata(&s->vdev, s);
- /* Register controls */
- v4l2_ctrl_handler_init(&s->ctrl_handler, 7);
- v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_sampling_mode, NULL);
- s->ctrl_sampling_rate = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_sampling_rate, NULL);
- v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_sampling_resolution, NULL);
- s->ctrl_tuner_rf = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_tuner_rf, NULL);
- s->ctrl_tuner_bw = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_tuner_bw, NULL);
- s->ctrl_tuner_if = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_tuner_if, NULL);
- s->ctrl_tuner_gain = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_tuner_gain, NULL);
- if (s->ctrl_handler.error) {
- ret = s->ctrl_handler.error;
- dev_err(&s->udev->dev, "Could not initialize controls\n");
- goto err_free_controls;
- }
-
/* Register the v4l2_device structure */
s->v4l2_dev.release = msi3101_video_release;
ret = v4l2_device_register(&intf->dev, &s->v4l2_dev);
if (ret) {
dev_err(&s->udev->dev,
"Failed to register v4l2-device (%d)\n", ret);
+ goto err_free_mem;
+ }
+
+ /* SPI master adapter */
+ master = spi_alloc_master(&s->udev->dev, 0);
+ if (master == NULL) {
+ ret = -ENOMEM;
+ goto err_unregister_v4l2_dev;
+ }
+
+ s->master = master;
+ master->bus_num = 0;
+ master->num_chipselect = 1;
+ master->transfer_one_message = msi3101_transfer_one_message;
+ spi_master_set_devdata(master, s);
+ ret = spi_register_master(master);
+ if (ret) {
+ spi_master_put(master);
+ goto err_unregister_v4l2_dev;
+ }
+
+ /* load v4l2 subdevice */
+ sd = v4l2_spi_new_subdev(&s->v4l2_dev, master, &board_info);
+ s->v4l2_subdev = sd;
+ if (sd == NULL) {
+ dev_err(&s->udev->dev, "cannot get v4l2 subdevice\n");
+ ret = -ENODEV;
+ goto err_unregister_master;
+ }
+
+ /* Register controls */
+ v4l2_ctrl_handler_init(&s->hdl, 0);
+ if (s->hdl.error) {
+ ret = s->hdl.error;
+ dev_err(&s->udev->dev, "Could not initialize controls\n");
goto err_free_controls;
}
- s->v4l2_dev.ctrl_handler = &s->ctrl_handler;
+ /* currently all controls are from subdev */
+ v4l2_ctrl_add_handler(&s->hdl, sd->ctrl_handler, NULL);
+
+ s->v4l2_dev.ctrl_handler = &s->hdl;
s->vdev.v4l2_dev = &s->v4l2_dev;
s->vdev.lock = &s->v4l2_lock;
- ret = video_register_device(&s->vdev, VFL_TYPE_GRABBER, -1);
- if (ret < 0) {
+ ret = video_register_device(&s->vdev, VFL_TYPE_SDR, -1);
+ if (ret) {
dev_err(&s->udev->dev,
"Failed to register as video device (%d)\n",
ret);
@@ -1905,10 +1487,12 @@ static int msi3101_probe(struct usb_interface *intf,
return 0;
+err_free_controls:
+ v4l2_ctrl_handler_free(&s->hdl);
+err_unregister_master:
+ spi_unregister_master(s->master);
err_unregister_v4l2_dev:
v4l2_device_unregister(&s->v4l2_dev);
-err_free_controls:
- v4l2_ctrl_handler_free(&s->ctrl_handler);
err_free_mem:
kfree(s);
return ret;
diff --git a/drivers/staging/media/omap24xx/tcm825x.c b/drivers/staging/media/omap24xx/tcm825x.c
index b1ae8e9c7e14..f4dd32df2353 100644
--- a/drivers/staging/media/omap24xx/tcm825x.c
+++ b/drivers/staging/media/omap24xx/tcm825x.c
@@ -249,11 +249,11 @@ static struct vcontrol {
};
-static const struct tcm825x_reg *tcm825x_siz_reg[NUM_IMAGE_SIZES] =
-{ &subqcif, &qqvga, &qcif, &qvga, &cif, &vga };
+static const struct tcm825x_reg *tcm825x_siz_reg[NUM_IMAGE_SIZES] = {
+ &subqcif, &qqvga, &qcif, &qvga, &cif, &vga };
-static const struct tcm825x_reg *tcm825x_fmt_reg[NUM_PIXEL_FORMATS] =
-{ &yuv422, &rgb565 };
+static const struct tcm825x_reg *tcm825x_fmt_reg[NUM_PIXEL_FORMATS] = {
+ &yuv422, &rgb565 };
/*
* Read a value from a register in an TCM825X sensor device. The value is
diff --git a/drivers/staging/media/omap24xx/tcm825x.h b/drivers/staging/media/omap24xx/tcm825x.h
index e2d1bcd0bcbe..9970fb1c596a 100644
--- a/drivers/staging/media/omap24xx/tcm825x.h
+++ b/drivers/staging/media/omap24xx/tcm825x.h
@@ -178,7 +178,7 @@ struct tcm825x_platform_data {
/* Set power state, zero is off, non-zero is on. */
int (*power_set)(int power);
/* Default registers written after power-on or reset. */
- const struct tcm825x_reg *(*default_regs)(void);
+ const struct tcm825x_reg * (*default_regs)(void);
int (*needs_reset)(struct v4l2_int_device *s, void *buf,
struct v4l2_pix_format *fmt);
int (*ifparm)(struct v4l2_ifparm *p);
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 8c7f35029cd5..ded31ea6bd39 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -1074,7 +1074,7 @@ static int iss_video_open(struct file *file)
q->ops = &iss_video_vb2ops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct iss_buffer);
- q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(q);
if (ret) {
diff --git a/drivers/staging/media/rtl2832u_sdr/Kconfig b/drivers/staging/media/rtl2832u_sdr/Kconfig
new file mode 100644
index 000000000000..3ede5fe8f0a5
--- /dev/null
+++ b/drivers/staging/media/rtl2832u_sdr/Kconfig
@@ -0,0 +1,7 @@
+config DVB_RTL2832_SDR
+ tristate "Realtek RTL2832 SDR"
+ depends on USB && DVB_CORE && I2C && VIDEO_V4L2 && DVB_USB_RTL28XXU
+ select DVB_RTL2832
+ select VIDEOBUF2_VMALLOC
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+
diff --git a/drivers/staging/media/rtl2832u_sdr/Makefile b/drivers/staging/media/rtl2832u_sdr/Makefile
new file mode 100644
index 000000000000..7e00a0df4631
--- /dev/null
+++ b/drivers/staging/media/rtl2832u_sdr/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_DVB_RTL2832_SDR) += rtl2832_sdr.o
+
+ccflags-y += -Idrivers/media/dvb-core
+ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -Idrivers/media/tuners
+ccflags-y += -Idrivers/media/usb/dvb-usb-v2
diff --git a/drivers/staging/media/rtl2832u_sdr/rtl2832_sdr.c b/drivers/staging/media/rtl2832u_sdr/rtl2832_sdr.c
new file mode 100644
index 000000000000..104ee8af79af
--- /dev/null
+++ b/drivers/staging/media/rtl2832u_sdr/rtl2832_sdr.c
@@ -0,0 +1,1500 @@
+/*
+ * Realtek RTL2832U SDR driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * GNU Radio plugin "gr-kernel" for device usage will be on:
+ * http://git.linuxtv.org/anttip/gr-kernel.git
+ *
+ */
+
+#include "dvb_frontend.h"
+#include "rtl2832_sdr.h"
+#include "dvb_usb.h"
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include <linux/jiffies.h>
+#include <linux/math64.h>
+
+#define MAX_BULK_BUFS (10)
+#define BULK_BUFFER_SIZE (128 * 512)
+
+static const struct v4l2_frequency_band bands_adc[] = {
+ {
+ .tuner = 0,
+ .type = V4L2_TUNER_ADC,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 300000,
+ .rangehigh = 300000,
+ },
+ {
+ .tuner = 0,
+ .type = V4L2_TUNER_ADC,
+ .index = 1,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 900001,
+ .rangehigh = 2800000,
+ },
+ {
+ .tuner = 0,
+ .type = V4L2_TUNER_ADC,
+ .index = 2,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 3200000,
+ .rangehigh = 3200000,
+ },
+};
+
+static const struct v4l2_frequency_band bands_fm[] = {
+ {
+ .tuner = 1,
+ .type = V4L2_TUNER_RF,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 50000000,
+ .rangehigh = 2000000000,
+ },
+};
+
+/* stream formats */
+struct rtl2832_sdr_format {
+ char *name;
+ u32 pixelformat;
+};
+
+static struct rtl2832_sdr_format formats[] = {
+ {
+ .name = "IQ U8",
+ .pixelformat = V4L2_SDR_FMT_CU8,
+ }, {
+ .name = "IQ U16LE (emulated)",
+ .pixelformat = V4L2_SDR_FMT_CU16LE,
+ },
+};
+
+static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
+
+/* intermediate buffers with raw data from the USB device */
+struct rtl2832_sdr_frame_buf {
+ struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
+ struct list_head list;
+};
+
+struct rtl2832_sdr_state {
+#define POWER_ON (1 << 1)
+#define URB_BUF (1 << 2)
+ unsigned long flags;
+
+ const struct rtl2832_config *cfg;
+ struct dvb_frontend *fe;
+ struct dvb_usb_device *d;
+ struct i2c_adapter *i2c;
+ u8 bank;
+
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+
+ /* videobuf2 queue and queued buffers list */
+ struct vb2_queue vb_queue;
+ struct list_head queued_bufs;
+ spinlock_t queued_bufs_lock; /* Protects queued_bufs */
+ unsigned sequence; /* buffer sequence counter */
+
+ /* Note if taking both locks v4l2_lock must always be locked first! */
+ struct mutex v4l2_lock; /* Protects everything else */
+ struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */
+
+ /* Pointer to our usb_device, will be NULL after unplug */
+ struct usb_device *udev; /* Both mutexes most be hold when setting! */
+
+ unsigned int vb_full; /* vb is full and packets dropped */
+
+ struct urb *urb_list[MAX_BULK_BUFS];
+ int buf_num;
+ unsigned long buf_size;
+ u8 *buf_list[MAX_BULK_BUFS];
+ dma_addr_t dma_addr[MAX_BULK_BUFS];
+ int urbs_initialized;
+ int urbs_submitted;
+
+ unsigned int f_adc, f_tuner;
+ u32 pixelformat;
+
+ /* Controls */
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *bandwidth_auto;
+ struct v4l2_ctrl *bandwidth;
+
+ /* for sample rate calc */
+ unsigned int sample;
+ unsigned int sample_measured;
+ unsigned long jiffies_next;
+};
+
+/* write multiple hardware registers */
+static int rtl2832_sdr_wr(struct rtl2832_sdr_state *s, u8 reg, const u8 *val,
+ int len)
+{
+ int ret;
+#define MAX_WR_LEN 24
+#define MAX_WR_XFER_LEN (MAX_WR_LEN + 1)
+ u8 buf[MAX_WR_XFER_LEN];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = s->cfg->i2c_addr,
+ .flags = 0,
+ .len = 1 + len,
+ .buf = buf,
+ }
+ };
+
+ if (WARN_ON(len > MAX_WR_LEN))
+ return -EINVAL;
+
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+ ret = i2c_transfer(s->i2c, msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ dev_err(&s->i2c->dev,
+ "%s: I2C wr failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+ return ret;
+}
+
+/* read multiple hardware registers */
+static int rtl2832_sdr_rd(struct rtl2832_sdr_state *s, u8 reg, u8 *val, int len)
+{
+ int ret;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = s->cfg->i2c_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ }, {
+ .addr = s->cfg->i2c_addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = val,
+ }
+ };
+
+ ret = i2c_transfer(s->i2c, msg, 2);
+ if (ret == 2) {
+ ret = 0;
+ } else {
+ dev_err(&s->i2c->dev,
+ "%s: I2C rd failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+ return ret;
+}
+
+/* write multiple registers */
+static int rtl2832_sdr_wr_regs(struct rtl2832_sdr_state *s, u16 reg,
+ const u8 *val, int len)
+{
+ int ret;
+ u8 reg2 = (reg >> 0) & 0xff;
+ u8 bank = (reg >> 8) & 0xff;
+
+ /* switch bank if needed */
+ if (bank != s->bank) {
+ ret = rtl2832_sdr_wr(s, 0x00, &bank, 1);
+ if (ret)
+ return ret;
+
+ s->bank = bank;
+ }
+
+ return rtl2832_sdr_wr(s, reg2, val, len);
+}
+
+/* read multiple registers */
+static int rtl2832_sdr_rd_regs(struct rtl2832_sdr_state *s, u16 reg, u8 *val,
+ int len)
+{
+ int ret;
+ u8 reg2 = (reg >> 0) & 0xff;
+ u8 bank = (reg >> 8) & 0xff;
+
+ /* switch bank if needed */
+ if (bank != s->bank) {
+ ret = rtl2832_sdr_wr(s, 0x00, &bank, 1);
+ if (ret)
+ return ret;
+
+ s->bank = bank;
+ }
+
+ return rtl2832_sdr_rd(s, reg2, val, len);
+}
+
+/* write single register */
+static int rtl2832_sdr_wr_reg(struct rtl2832_sdr_state *s, u16 reg, u8 val)
+{
+ return rtl2832_sdr_wr_regs(s, reg, &val, 1);
+}
+
+#if 0
+/* read single register */
+static int rtl2832_sdr_rd_reg(struct rtl2832_sdr_state *s, u16 reg, u8 *val)
+{
+ return rtl2832_sdr_rd_regs(s, reg, val, 1);
+}
+#endif
+
+/* write single register with mask */
+static int rtl2832_sdr_wr_reg_mask(struct rtl2832_sdr_state *s, u16 reg,
+ u8 val, u8 mask)
+{
+ int ret;
+ u8 tmp;
+
+ /* no need for read if whole reg is written */
+ if (mask != 0xff) {
+ ret = rtl2832_sdr_rd_regs(s, reg, &tmp, 1);
+ if (ret)
+ return ret;
+
+ val &= mask;
+ tmp &= ~mask;
+ val |= tmp;
+ }
+
+ return rtl2832_sdr_wr_regs(s, reg, &val, 1);
+}
+
+#if 0
+/* read single register with mask */
+static int rtl2832_sdr_rd_reg_mask(struct rtl2832_sdr_state *s, u16 reg,
+ u8 *val, u8 mask)
+{
+ int ret, i;
+ u8 tmp;
+
+ ret = rtl2832_sdr_rd_regs(s, reg, &tmp, 1);
+ if (ret)
+ return ret;
+
+ tmp &= mask;
+
+ /* find position of the first bit */
+ for (i = 0; i < 8; i++) {
+ if ((mask >> i) & 0x01)
+ break;
+ }
+ *val = tmp >> i;
+
+ return 0;
+}
+#endif
+
+/* Private functions */
+static struct rtl2832_sdr_frame_buf *rtl2832_sdr_get_next_fill_buf(
+ struct rtl2832_sdr_state *s)
+{
+ unsigned long flags = 0;
+ struct rtl2832_sdr_frame_buf *buf = NULL;
+
+ spin_lock_irqsave(&s->queued_bufs_lock, flags);
+ if (list_empty(&s->queued_bufs))
+ goto leave;
+
+ buf = list_entry(s->queued_bufs.next,
+ struct rtl2832_sdr_frame_buf, list);
+ list_del(&buf->list);
+leave:
+ spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
+ return buf;
+}
+
+static unsigned int rtl2832_sdr_convert_stream(struct rtl2832_sdr_state *s,
+ void *dst, const u8 *src, unsigned int src_len)
+{
+ unsigned int dst_len;
+
+ if (s->pixelformat == V4L2_SDR_FMT_CU8) {
+ /* native stream, no need to convert */
+ memcpy(dst, src, src_len);
+ dst_len = src_len;
+ } else if (s->pixelformat == V4L2_SDR_FMT_CU16LE) {
+ /* convert u8 to u16 */
+ unsigned int i;
+ u16 *u16dst = dst;
+ for (i = 0; i < src_len; i++)
+ *u16dst++ = (src[i] << 8) | (src[i] >> 0);
+ dst_len = 2 * src_len;
+ } else {
+ dst_len = 0;
+ }
+
+ /* calculate samping rate and output it in 10 seconds intervals */
+ if (unlikely(time_is_before_jiffies(s->jiffies_next))) {
+#define MSECS 10000UL
+ unsigned int samples = s->sample - s->sample_measured;
+ s->jiffies_next = jiffies + msecs_to_jiffies(MSECS);
+ s->sample_measured = s->sample;
+ dev_dbg(&s->udev->dev,
+ "slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
+ src_len, samples, MSECS,
+ samples * 1000UL / MSECS);
+ }
+
+ /* total number of I+Q pairs */
+ s->sample += src_len / 2;
+
+ return dst_len;
+}
+
+/*
+ * This gets called for the bulk stream pipe. This is done in interrupt
+ * time, so it has to be fast, not crash, and not stall. Neat.
+ */
+static void rtl2832_sdr_urb_complete(struct urb *urb)
+{
+ struct rtl2832_sdr_state *s = urb->context;
+ struct rtl2832_sdr_frame_buf *fbuf;
+
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%s: status=%d length=%d/%d errors=%d\n",
+ __func__, urb->status, urb->actual_length,
+ urb->transfer_buffer_length, urb->error_count);
+
+ switch (urb->status) {
+ case 0: /* success */
+ case -ETIMEDOUT: /* NAK */
+ break;
+ case -ECONNRESET: /* kill */
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default: /* error */
+ dev_err_ratelimited(&s->udev->dev, "urb failed=%d\n",
+ urb->status);
+ break;
+ }
+
+ if (likely(urb->actual_length > 0)) {
+ void *ptr;
+ unsigned int len;
+ /* get free framebuffer */
+ fbuf = rtl2832_sdr_get_next_fill_buf(s);
+ if (unlikely(fbuf == NULL)) {
+ s->vb_full++;
+ dev_notice_ratelimited(&s->udev->dev,
+ "videobuf is full, %d packets dropped\n",
+ s->vb_full);
+ goto skip;
+ }
+
+ /* fill framebuffer */
+ ptr = vb2_plane_vaddr(&fbuf->vb, 0);
+ len = rtl2832_sdr_convert_stream(s, ptr, urb->transfer_buffer,
+ urb->actual_length);
+ vb2_set_plane_payload(&fbuf->vb, 0, len);
+ v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp);
+ fbuf->vb.v4l2_buf.sequence = s->sequence++;
+ vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+ }
+skip:
+ usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+static int rtl2832_sdr_kill_urbs(struct rtl2832_sdr_state *s)
+{
+ int i;
+
+ for (i = s->urbs_submitted - 1; i >= 0; i--) {
+ dev_dbg(&s->udev->dev, "%s: kill urb=%d\n", __func__, i);
+ /* stop the URB */
+ usb_kill_urb(s->urb_list[i]);
+ }
+ s->urbs_submitted = 0;
+
+ return 0;
+}
+
+static int rtl2832_sdr_submit_urbs(struct rtl2832_sdr_state *s)
+{
+ int i, ret;
+
+ for (i = 0; i < s->urbs_initialized; i++) {
+ dev_dbg(&s->udev->dev, "%s: submit urb=%d\n", __func__, i);
+ ret = usb_submit_urb(s->urb_list[i], GFP_ATOMIC);
+ if (ret) {
+ dev_err(&s->udev->dev,
+ "Could not submit urb no. %d - get them all back\n",
+ i);
+ rtl2832_sdr_kill_urbs(s);
+ return ret;
+ }
+ s->urbs_submitted++;
+ }
+
+ return 0;
+}
+
+static int rtl2832_sdr_free_stream_bufs(struct rtl2832_sdr_state *s)
+{
+ if (s->flags & USB_STATE_URB_BUF) {
+ while (s->buf_num) {
+ s->buf_num--;
+ dev_dbg(&s->udev->dev, "%s: free buf=%d\n",
+ __func__, s->buf_num);
+ usb_free_coherent(s->udev, s->buf_size,
+ s->buf_list[s->buf_num],
+ s->dma_addr[s->buf_num]);
+ }
+ }
+ s->flags &= ~USB_STATE_URB_BUF;
+
+ return 0;
+}
+
+static int rtl2832_sdr_alloc_stream_bufs(struct rtl2832_sdr_state *s)
+{
+ s->buf_num = 0;
+ s->buf_size = BULK_BUFFER_SIZE;
+
+ dev_dbg(&s->udev->dev,
+ "%s: all in all I will use %u bytes for streaming\n",
+ __func__, MAX_BULK_BUFS * BULK_BUFFER_SIZE);
+
+ for (s->buf_num = 0; s->buf_num < MAX_BULK_BUFS; s->buf_num++) {
+ s->buf_list[s->buf_num] = usb_alloc_coherent(s->udev,
+ BULK_BUFFER_SIZE, GFP_ATOMIC,
+ &s->dma_addr[s->buf_num]);
+ if (!s->buf_list[s->buf_num]) {
+ dev_dbg(&s->udev->dev, "%s: alloc buf=%d failed\n",
+ __func__, s->buf_num);
+ rtl2832_sdr_free_stream_bufs(s);
+ return -ENOMEM;
+ }
+
+ dev_dbg(&s->udev->dev, "%s: alloc buf=%d %p (dma %llu)\n",
+ __func__, s->buf_num,
+ s->buf_list[s->buf_num],
+ (long long)s->dma_addr[s->buf_num]);
+ s->flags |= USB_STATE_URB_BUF;
+ }
+
+ return 0;
+}
+
+static int rtl2832_sdr_free_urbs(struct rtl2832_sdr_state *s)
+{
+ int i;
+
+ rtl2832_sdr_kill_urbs(s);
+
+ for (i = s->urbs_initialized - 1; i >= 0; i--) {
+ if (s->urb_list[i]) {
+ dev_dbg(&s->udev->dev, "%s: free urb=%d\n",
+ __func__, i);
+ /* free the URBs */
+ usb_free_urb(s->urb_list[i]);
+ }
+ }
+ s->urbs_initialized = 0;
+
+ return 0;
+}
+
+static int rtl2832_sdr_alloc_urbs(struct rtl2832_sdr_state *s)
+{
+ int i, j;
+
+ /* allocate the URBs */
+ for (i = 0; i < MAX_BULK_BUFS; i++) {
+ dev_dbg(&s->udev->dev, "%s: alloc urb=%d\n", __func__, i);
+ s->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!s->urb_list[i]) {
+ dev_dbg(&s->udev->dev, "%s: failed\n", __func__);
+ for (j = 0; j < i; j++)
+ usb_free_urb(s->urb_list[j]);
+ return -ENOMEM;
+ }
+ usb_fill_bulk_urb(s->urb_list[i],
+ s->udev,
+ usb_rcvbulkpipe(s->udev, 0x81),
+ s->buf_list[i],
+ BULK_BUFFER_SIZE,
+ rtl2832_sdr_urb_complete, s);
+
+ s->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ s->urb_list[i]->transfer_dma = s->dma_addr[i];
+ s->urbs_initialized++;
+ }
+
+ return 0;
+}
+
+/* Must be called with vb_queue_lock hold */
+static void rtl2832_sdr_cleanup_queued_bufs(struct rtl2832_sdr_state *s)
+{
+ unsigned long flags = 0;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ spin_lock_irqsave(&s->queued_bufs_lock, flags);
+ while (!list_empty(&s->queued_bufs)) {
+ struct rtl2832_sdr_frame_buf *buf;
+ buf = list_entry(s->queued_bufs.next,
+ struct rtl2832_sdr_frame_buf, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
+}
+
+/* The user yanked out the cable... */
+static void rtl2832_sdr_release_sec(struct dvb_frontend *fe)
+{
+ struct rtl2832_sdr_state *s = fe->sec_priv;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ mutex_lock(&s->vb_queue_lock);
+ mutex_lock(&s->v4l2_lock);
+ /* No need to keep the urbs around after disconnection */
+ s->udev = NULL;
+
+ v4l2_device_disconnect(&s->v4l2_dev);
+ video_unregister_device(&s->vdev);
+ mutex_unlock(&s->v4l2_lock);
+ mutex_unlock(&s->vb_queue_lock);
+
+ v4l2_device_put(&s->v4l2_dev);
+
+ fe->sec_priv = NULL;
+}
+
+static int rtl2832_sdr_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct rtl2832_sdr_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strlcpy(cap->card, s->vdev.name, sizeof(cap->card));
+ usb_make_path(s->udev, cap->bus_info, sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE | V4L2_CAP_TUNER;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+/* Videobuf2 operations */
+static int rtl2832_sdr_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct rtl2832_sdr_state *s = vb2_get_drv_priv(vq);
+ dev_dbg(&s->udev->dev, "%s: *nbuffers=%d\n", __func__, *nbuffers);
+
+ /* Need at least 8 buffers */
+ if (vq->num_buffers + *nbuffers < 8)
+ *nbuffers = 8 - vq->num_buffers;
+ *nplanes = 1;
+ /* 2 = max 16-bit sample returned */
+ sizes[0] = PAGE_ALIGN(BULK_BUFFER_SIZE * 2);
+ dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
+ __func__, *nbuffers, sizes[0]);
+ return 0;
+}
+
+static int rtl2832_sdr_buf_prepare(struct vb2_buffer *vb)
+{
+ struct rtl2832_sdr_state *s = vb2_get_drv_priv(vb->vb2_queue);
+
+ /* Don't allow queing new buffers after device disconnection */
+ if (!s->udev)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void rtl2832_sdr_buf_queue(struct vb2_buffer *vb)
+{
+ struct rtl2832_sdr_state *s = vb2_get_drv_priv(vb->vb2_queue);
+ struct rtl2832_sdr_frame_buf *buf =
+ container_of(vb, struct rtl2832_sdr_frame_buf, vb);
+ unsigned long flags = 0;
+
+ /* Check the device has not disconnected between prep and queuing */
+ if (!s->udev) {
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ spin_lock_irqsave(&s->queued_bufs_lock, flags);
+ list_add_tail(&buf->list, &s->queued_bufs);
+ spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
+}
+
+static int rtl2832_sdr_set_adc(struct rtl2832_sdr_state *s)
+{
+ struct dvb_frontend *fe = s->fe;
+ int ret;
+ unsigned int f_sr, f_if;
+ u8 buf[4], u8tmp1, u8tmp2;
+ u64 u64tmp;
+ u32 u32tmp;
+ dev_dbg(&s->udev->dev, "%s: f_adc=%u\n", __func__, s->f_adc);
+
+ if (!test_bit(POWER_ON, &s->flags))
+ return 0;
+
+ if (s->f_adc == 0)
+ return 0;
+
+ f_sr = s->f_adc;
+
+ ret = rtl2832_sdr_wr_regs(s, 0x13e, "\x00\x00", 2);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_wr_regs(s, 0x115, "\x00\x00\x00\x00", 4);
+ if (ret)
+ goto err;
+
+ /* get IF from tuner */
+ if (fe->ops.tuner_ops.get_if_frequency)
+ ret = fe->ops.tuner_ops.get_if_frequency(fe, &f_if);
+ else
+ ret = -EINVAL;
+
+ if (ret)
+ goto err;
+
+ /* program IF */
+ u64tmp = f_if % s->cfg->xtal;
+ u64tmp *= 0x400000;
+ u64tmp = div_u64(u64tmp, s->cfg->xtal);
+ u64tmp = -u64tmp;
+ u32tmp = u64tmp & 0x3fffff;
+
+ dev_dbg(&s->udev->dev, "%s: f_if=%u if_ctl=%08x\n",
+ __func__, f_if, u32tmp);
+
+ buf[0] = (u32tmp >> 16) & 0xff;
+ buf[1] = (u32tmp >> 8) & 0xff;
+ buf[2] = (u32tmp >> 0) & 0xff;
+
+ ret = rtl2832_sdr_wr_regs(s, 0x119, buf, 3);
+ if (ret)
+ goto err;
+
+ /* BB / IF mode */
+ /* POR: 0x1b1=0x1f, 0x008=0x0d, 0x006=0x80 */
+ if (f_if) {
+ u8tmp1 = 0x1a; /* disable Zero-IF */
+ u8tmp2 = 0x8d; /* enable ADC I */
+ } else {
+ u8tmp1 = 0x1b; /* enable Zero-IF, DC, IQ */
+ u8tmp2 = 0xcd; /* enable ADC I, ADC Q */
+ }
+
+ ret = rtl2832_sdr_wr_reg(s, 0x1b1, u8tmp1);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_wr_reg(s, 0x008, u8tmp2);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_wr_reg(s, 0x006, 0x80);
+ if (ret)
+ goto err;
+
+ /* program sampling rate (resampling down) */
+ u32tmp = div_u64(s->cfg->xtal * 0x400000ULL, f_sr * 4U);
+ u32tmp <<= 2;
+ buf[0] = (u32tmp >> 24) & 0xff;
+ buf[1] = (u32tmp >> 16) & 0xff;
+ buf[2] = (u32tmp >> 8) & 0xff;
+ buf[3] = (u32tmp >> 0) & 0xff;
+ ret = rtl2832_sdr_wr_regs(s, 0x19f, buf, 4);
+ if (ret)
+ goto err;
+
+ /* low-pass filter */
+ ret = rtl2832_sdr_wr_regs(s, 0x11c,
+ "\xca\xdc\xd7\xd8\xe0\xf2\x0e\x35\x06\x50\x9c\x0d\x71\x11\x14\x71\x74\x19\x41\xa5",
+ 20);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_wr_regs(s, 0x017, "\x11\x10", 2);
+ if (ret)
+ goto err;
+
+ /* mode */
+ ret = rtl2832_sdr_wr_regs(s, 0x019, "\x05", 1);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_wr_regs(s, 0x01a, "\x1b\x16\x0d\x06\x01\xff", 6);
+ if (ret)
+ goto err;
+
+ /* FSM */
+ ret = rtl2832_sdr_wr_regs(s, 0x192, "\x00\xf0\x0f", 3);
+ if (ret)
+ goto err;
+
+ /* PID filter */
+ ret = rtl2832_sdr_wr_regs(s, 0x061, "\x60", 1);
+ if (ret)
+ goto err;
+
+ /* used RF tuner based settings */
+ switch (s->cfg->tuner) {
+ case RTL2832_TUNER_E4000:
+ ret = rtl2832_sdr_wr_regs(s, 0x112, "\x5a", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x102, "\x40", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x103, "\x5a", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1c7, "\x30", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x104, "\xd0", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x105, "\xbe", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1c8, "\x18", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x106, "\x35", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1c9, "\x21", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1ca, "\x21", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1cb, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x107, "\x40", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1cd, "\x10", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1ce, "\x10", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x108, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x109, "\x7f", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x10a, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x10b, "\x7f", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x00e, "\xfc", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x00e, "\xfc", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x011, "\xd4", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1e5, "\xf0", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1d9, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1db, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1dd, "\x14", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1de, "\xec", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1d8, "\x0c", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1e6, "\x02", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1d7, "\x09", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x00d, "\x83", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x010, "\x49", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x00d, "\x87", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x00d, "\x85", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x013, "\x02", 1);
+ break;
+ case RTL2832_TUNER_FC0012:
+ case RTL2832_TUNER_FC0013:
+ ret = rtl2832_sdr_wr_regs(s, 0x112, "\x5a", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x102, "\x40", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x103, "\x5a", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1c7, "\x2c", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x104, "\xcc", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x105, "\xbe", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1c8, "\x16", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x106, "\x35", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1c9, "\x21", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1ca, "\x21", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1cb, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x107, "\x40", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1cd, "\x10", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1ce, "\x10", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x108, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x109, "\x7f", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x10a, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x10b, "\x7f", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x00e, "\xfc", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x00e, "\xfc", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x011, "\xe9\xbf", 2);
+ ret = rtl2832_sdr_wr_regs(s, 0x1e5, "\xf0", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1d9, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1db, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1dd, "\x11", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1de, "\xef", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1d8, "\x0c", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1e6, "\x02", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1d7, "\x09", 1);
+ break;
+ case RTL2832_TUNER_R820T:
+ ret = rtl2832_sdr_wr_regs(s, 0x112, "\x5a", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x102, "\x40", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x115, "\x01", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x103, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1c7, "\x24", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x104, "\xcc", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x105, "\xbe", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1c8, "\x14", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x106, "\x35", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1c9, "\x21", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1ca, "\x21", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1cb, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x107, "\x40", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1cd, "\x10", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x1ce, "\x10", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x108, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x109, "\x7f", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x10a, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x10b, "\x7f", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x00e, "\xfc", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x00e, "\xfc", 1);
+ ret = rtl2832_sdr_wr_regs(s, 0x011, "\xf4", 1);
+ break;
+ default:
+ dev_notice(&s->udev->dev, "Unsupported tuner\n");
+ }
+
+ /* software reset */
+ ret = rtl2832_sdr_wr_reg_mask(s, 0x101, 0x04, 0x04);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_wr_reg_mask(s, 0x101, 0x00, 0x04);
+ if (ret)
+ goto err;
+err:
+ return ret;
+};
+
+static void rtl2832_sdr_unset_adc(struct rtl2832_sdr_state *s)
+{
+ int ret;
+
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ /* PID filter */
+ ret = rtl2832_sdr_wr_regs(s, 0x061, "\xe0", 1);
+ if (ret)
+ goto err;
+
+ /* mode */
+ ret = rtl2832_sdr_wr_regs(s, 0x019, "\x20", 1);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_wr_regs(s, 0x017, "\x11\x10", 2);
+ if (ret)
+ goto err;
+
+ /* FSM */
+ ret = rtl2832_sdr_wr_regs(s, 0x192, "\x00\x0f\xff", 3);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_wr_regs(s, 0x13e, "\x40\x00", 2);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_wr_regs(s, 0x115, "\x06\x3f\xce\xcc", 4);
+ if (ret)
+ goto err;
+err:
+ return;
+};
+
+static int rtl2832_sdr_set_tuner_freq(struct rtl2832_sdr_state *s)
+{
+ struct dvb_frontend *fe = s->fe;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct v4l2_ctrl *bandwidth_auto;
+ struct v4l2_ctrl *bandwidth;
+
+ /*
+ * tuner RF (Hz)
+ */
+ if (s->f_tuner == 0)
+ return 0;
+
+ /*
+ * bandwidth (Hz)
+ */
+ bandwidth_auto = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO);
+ bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
+ if (v4l2_ctrl_g_ctrl(bandwidth_auto)) {
+ c->bandwidth_hz = s->f_adc;
+ v4l2_ctrl_s_ctrl(bandwidth, s->f_adc);
+ } else {
+ c->bandwidth_hz = v4l2_ctrl_g_ctrl(bandwidth);
+ }
+
+ c->frequency = s->f_tuner;
+ c->delivery_system = SYS_DVBT;
+
+ dev_dbg(&s->udev->dev, "%s: frequency=%u bandwidth=%d\n",
+ __func__, c->frequency, c->bandwidth_hz);
+
+ if (!test_bit(POWER_ON, &s->flags))
+ return 0;
+
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe);
+
+ return 0;
+};
+
+static int rtl2832_sdr_set_tuner(struct rtl2832_sdr_state *s)
+{
+ struct dvb_frontend *fe = s->fe;
+
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ if (fe->ops.tuner_ops.init)
+ fe->ops.tuner_ops.init(fe);
+
+ return 0;
+};
+
+static void rtl2832_sdr_unset_tuner(struct rtl2832_sdr_state *s)
+{
+ struct dvb_frontend *fe = s->fe;
+
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ if (fe->ops.tuner_ops.sleep)
+ fe->ops.tuner_ops.sleep(fe);
+
+ return;
+};
+
+static int rtl2832_sdr_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct rtl2832_sdr_state *s = vb2_get_drv_priv(vq);
+ int ret;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ if (!s->udev)
+ return -ENODEV;
+
+ if (mutex_lock_interruptible(&s->v4l2_lock))
+ return -ERESTARTSYS;
+
+ if (s->d->props->power_ctrl)
+ s->d->props->power_ctrl(s->d, 1);
+
+ set_bit(POWER_ON, &s->flags);
+
+ ret = rtl2832_sdr_set_tuner(s);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_set_tuner_freq(s);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_set_adc(s);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_alloc_stream_bufs(s);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_sdr_alloc_urbs(s);
+ if (ret)
+ goto err;
+
+ s->sequence = 0;
+
+ ret = rtl2832_sdr_submit_urbs(s);
+ if (ret)
+ goto err;
+
+err:
+ mutex_unlock(&s->v4l2_lock);
+
+ return ret;
+}
+
+static int rtl2832_sdr_stop_streaming(struct vb2_queue *vq)
+{
+ struct rtl2832_sdr_state *s = vb2_get_drv_priv(vq);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ if (mutex_lock_interruptible(&s->v4l2_lock))
+ return -ERESTARTSYS;
+
+ rtl2832_sdr_kill_urbs(s);
+ rtl2832_sdr_free_urbs(s);
+ rtl2832_sdr_free_stream_bufs(s);
+ rtl2832_sdr_cleanup_queued_bufs(s);
+ rtl2832_sdr_unset_adc(s);
+ rtl2832_sdr_unset_tuner(s);
+
+ clear_bit(POWER_ON, &s->flags);
+
+ if (s->d->props->power_ctrl)
+ s->d->props->power_ctrl(s->d, 0);
+
+ mutex_unlock(&s->v4l2_lock);
+
+ return 0;
+}
+
+static struct vb2_ops rtl2832_sdr_vb2_ops = {
+ .queue_setup = rtl2832_sdr_queue_setup,
+ .buf_prepare = rtl2832_sdr_buf_prepare,
+ .buf_queue = rtl2832_sdr_buf_queue,
+ .start_streaming = rtl2832_sdr_start_streaming,
+ .stop_streaming = rtl2832_sdr_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int rtl2832_sdr_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *v)
+{
+ struct rtl2832_sdr_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s: index=%d type=%d\n",
+ __func__, v->index, v->type);
+
+ if (v->index == 0) {
+ strlcpy(v->name, "ADC: Realtek RTL2832", sizeof(v->name));
+ v->type = V4L2_TUNER_ADC;
+ v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ v->rangelow = 300000;
+ v->rangehigh = 3200000;
+ } else if (v->index == 1) {
+ strlcpy(v->name, "RF: <unknown>", sizeof(v->name));
+ v->type = V4L2_TUNER_RF;
+ v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ v->rangelow = 50000000;
+ v->rangehigh = 2000000000;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtl2832_sdr_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *v)
+{
+ struct rtl2832_sdr_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ if (v->index > 1)
+ return -EINVAL;
+ return 0;
+}
+
+static int rtl2832_sdr_enum_freq_bands(struct file *file, void *priv,
+ struct v4l2_frequency_band *band)
+{
+ struct rtl2832_sdr_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d index=%d\n",
+ __func__, band->tuner, band->type, band->index);
+
+ if (band->tuner == 0) {
+ if (band->index >= ARRAY_SIZE(bands_adc))
+ return -EINVAL;
+
+ *band = bands_adc[band->index];
+ } else if (band->tuner == 1) {
+ if (band->index >= ARRAY_SIZE(bands_fm))
+ return -EINVAL;
+
+ *band = bands_fm[band->index];
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtl2832_sdr_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct rtl2832_sdr_state *s = video_drvdata(file);
+ int ret = 0;
+ dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d\n",
+ __func__, f->tuner, f->type);
+
+ if (f->tuner == 0) {
+ f->frequency = s->f_adc;
+ f->type = V4L2_TUNER_ADC;
+ } else if (f->tuner == 1) {
+ f->frequency = s->f_tuner;
+ f->type = V4L2_TUNER_RF;
+ } else {
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int rtl2832_sdr_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
+{
+ struct rtl2832_sdr_state *s = video_drvdata(file);
+ int ret, band;
+
+ dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d frequency=%u\n",
+ __func__, f->tuner, f->type, f->frequency);
+
+ /* ADC band midpoints */
+ #define BAND_ADC_0 ((bands_adc[0].rangehigh + bands_adc[1].rangelow) / 2)
+ #define BAND_ADC_1 ((bands_adc[1].rangehigh + bands_adc[2].rangelow) / 2)
+
+ if (f->tuner == 0 && f->type == V4L2_TUNER_ADC) {
+ if (f->frequency < BAND_ADC_0)
+ band = 0;
+ else if (f->frequency < BAND_ADC_1)
+ band = 1;
+ else
+ band = 2;
+
+ s->f_adc = clamp_t(unsigned int, f->frequency,
+ bands_adc[band].rangelow,
+ bands_adc[band].rangehigh);
+
+ dev_dbg(&s->udev->dev, "%s: ADC frequency=%u Hz\n",
+ __func__, s->f_adc);
+ ret = rtl2832_sdr_set_adc(s);
+ } else if (f->tuner == 1) {
+ s->f_tuner = clamp_t(unsigned int, f->frequency,
+ bands_fm[0].rangelow,
+ bands_fm[0].rangehigh);
+ dev_dbg(&s->udev->dev, "%s: RF frequency=%u Hz\n",
+ __func__, f->frequency);
+
+ ret = rtl2832_sdr_set_tuner_freq(s);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int rtl2832_sdr_enum_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct rtl2832_sdr_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ if (f->index >= NUM_FORMATS)
+ return -EINVAL;
+
+ strlcpy(f->description, formats[f->index].name, sizeof(f->description));
+ f->pixelformat = formats[f->index].pixelformat;
+
+ return 0;
+}
+
+static int rtl2832_sdr_g_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rtl2832_sdr_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ f->fmt.sdr.pixelformat = s->pixelformat;
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+
+ return 0;
+}
+
+static int rtl2832_sdr_s_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rtl2832_sdr_state *s = video_drvdata(file);
+ struct vb2_queue *q = &s->vb_queue;
+ int i;
+ dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
+ (char *)&f->fmt.sdr.pixelformat);
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
+ s->pixelformat = f->fmt.sdr.pixelformat;
+ return 0;
+ }
+ }
+
+ f->fmt.sdr.pixelformat = formats[0].pixelformat;
+ s->pixelformat = formats[0].pixelformat;
+
+ return 0;
+}
+
+static int rtl2832_sdr_try_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rtl2832_sdr_state *s = video_drvdata(file);
+ int i;
+ dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
+ (char *)&f->fmt.sdr.pixelformat);
+
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat)
+ return 0;
+ }
+
+ f->fmt.sdr.pixelformat = formats[0].pixelformat;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops rtl2832_sdr_ioctl_ops = {
+ .vidioc_querycap = rtl2832_sdr_querycap,
+
+ .vidioc_enum_fmt_sdr_cap = rtl2832_sdr_enum_fmt_sdr_cap,
+ .vidioc_g_fmt_sdr_cap = rtl2832_sdr_g_fmt_sdr_cap,
+ .vidioc_s_fmt_sdr_cap = rtl2832_sdr_s_fmt_sdr_cap,
+ .vidioc_try_fmt_sdr_cap = rtl2832_sdr_try_fmt_sdr_cap,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_g_tuner = rtl2832_sdr_g_tuner,
+ .vidioc_s_tuner = rtl2832_sdr_s_tuner,
+
+ .vidioc_enum_freq_bands = rtl2832_sdr_enum_freq_bands,
+ .vidioc_g_frequency = rtl2832_sdr_g_frequency,
+ .vidioc_s_frequency = rtl2832_sdr_s_frequency,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+};
+
+static const struct v4l2_file_operations rtl2832_sdr_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static struct video_device rtl2832_sdr_template = {
+ .name = "Realtek RTL2832 SDR",
+ .release = video_device_release_empty,
+ .fops = &rtl2832_sdr_fops,
+ .ioctl_ops = &rtl2832_sdr_ioctl_ops,
+};
+
+static int rtl2832_sdr_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct rtl2832_sdr_state *s =
+ container_of(ctrl->handler, struct rtl2832_sdr_state,
+ hdl);
+ struct dvb_frontend *fe = s->fe;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ dev_dbg(&s->udev->dev,
+ "%s: id=%d name=%s val=%d min=%d max=%d step=%d\n",
+ __func__, ctrl->id, ctrl->name, ctrl->val,
+ ctrl->minimum, ctrl->maximum, ctrl->step);
+
+ switch (ctrl->id) {
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
+ case V4L2_CID_RF_TUNER_BANDWIDTH:
+ /* TODO: these controls should be moved to tuner drivers */
+ if (s->bandwidth_auto->val) {
+ /* Round towards the closest legal value */
+ s32 val = s->f_adc + s->bandwidth->step / 2;
+ u32 offset;
+ val = clamp(val, s->bandwidth->minimum, s->bandwidth->maximum);
+ offset = val - s->bandwidth->minimum;
+ offset = s->bandwidth->step * (offset / s->bandwidth->step);
+ s->bandwidth->val = s->bandwidth->minimum + offset;
+ }
+
+ c->bandwidth_hz = s->bandwidth->val;
+
+ if (!test_bit(POWER_ON, &s->flags))
+ return 0;
+
+ if (fe->ops.tuner_ops.set_params)
+ ret = fe->ops.tuner_ops.set_params(fe);
+ else
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops rtl2832_sdr_ctrl_ops = {
+ .s_ctrl = rtl2832_sdr_s_ctrl,
+};
+
+static void rtl2832_sdr_video_release(struct v4l2_device *v)
+{
+ struct rtl2832_sdr_state *s =
+ container_of(v, struct rtl2832_sdr_state, v4l2_dev);
+
+ v4l2_ctrl_handler_free(&s->hdl);
+ v4l2_device_unregister(&s->v4l2_dev);
+ kfree(s);
+}
+
+struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
+ struct v4l2_subdev *sd)
+{
+ int ret;
+ struct rtl2832_sdr_state *s;
+ const struct v4l2_ctrl_ops *ops = &rtl2832_sdr_ctrl_ops;
+ struct dvb_usb_device *d = i2c_get_adapdata(i2c);
+
+ s = kzalloc(sizeof(struct rtl2832_sdr_state), GFP_KERNEL);
+ if (s == NULL) {
+ dev_err(&d->udev->dev,
+ "Could not allocate memory for rtl2832_sdr_state\n");
+ return NULL;
+ }
+
+ /* setup the state */
+ s->fe = fe;
+ s->d = d;
+ s->udev = d->udev;
+ s->i2c = i2c;
+ s->cfg = cfg;
+ s->f_adc = bands_adc[0].rangelow;
+ s->f_tuner = bands_fm[0].rangelow;
+ s->pixelformat = V4L2_SDR_FMT_CU8;
+
+ mutex_init(&s->v4l2_lock);
+ mutex_init(&s->vb_queue_lock);
+ spin_lock_init(&s->queued_bufs_lock);
+ INIT_LIST_HEAD(&s->queued_bufs);
+
+ /* Init videobuf2 queue structure */
+ s->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
+ s->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ s->vb_queue.drv_priv = s;
+ s->vb_queue.buf_struct_size = sizeof(struct rtl2832_sdr_frame_buf);
+ s->vb_queue.ops = &rtl2832_sdr_vb2_ops;
+ s->vb_queue.mem_ops = &vb2_vmalloc_memops;
+ s->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ ret = vb2_queue_init(&s->vb_queue);
+ if (ret) {
+ dev_err(&s->udev->dev, "Could not initialize vb2 queue\n");
+ goto err_free_mem;
+ }
+
+ /* Register controls */
+ switch (s->cfg->tuner) {
+ case RTL2832_TUNER_E4000:
+ v4l2_ctrl_handler_init(&s->hdl, 9);
+ if (sd)
+ v4l2_ctrl_add_handler(&s->hdl, sd->ctrl_handler, NULL);
+ break;
+ case RTL2832_TUNER_R820T:
+ v4l2_ctrl_handler_init(&s->hdl, 2);
+ s->bandwidth_auto = v4l2_ctrl_new_std(&s->hdl, ops, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 1, 1);
+ s->bandwidth = v4l2_ctrl_new_std(&s->hdl, ops, V4L2_CID_RF_TUNER_BANDWIDTH, 0, 8000000, 100000, 0);
+ v4l2_ctrl_auto_cluster(2, &s->bandwidth_auto, 0, false);
+ break;
+ case RTL2832_TUNER_FC0012:
+ case RTL2832_TUNER_FC0013:
+ v4l2_ctrl_handler_init(&s->hdl, 2);
+ s->bandwidth_auto = v4l2_ctrl_new_std(&s->hdl, ops, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 1, 1);
+ s->bandwidth = v4l2_ctrl_new_std(&s->hdl, ops, V4L2_CID_RF_TUNER_BANDWIDTH, 6000000, 8000000, 1000000, 6000000);
+ v4l2_ctrl_auto_cluster(2, &s->bandwidth_auto, 0, false);
+ break;
+ default:
+ v4l2_ctrl_handler_init(&s->hdl, 0);
+ dev_notice(&s->udev->dev, "%s: Unsupported tuner\n",
+ KBUILD_MODNAME);
+ goto err_free_controls;
+ }
+
+ if (s->hdl.error) {
+ ret = s->hdl.error;
+ dev_err(&s->udev->dev, "Could not initialize controls\n");
+ goto err_free_controls;
+ }
+
+ /* Init video_device structure */
+ s->vdev = rtl2832_sdr_template;
+ s->vdev.queue = &s->vb_queue;
+ s->vdev.queue->lock = &s->vb_queue_lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev.flags);
+ video_set_drvdata(&s->vdev, s);
+
+ /* Register the v4l2_device structure */
+ s->v4l2_dev.release = rtl2832_sdr_video_release;
+ ret = v4l2_device_register(&s->udev->dev, &s->v4l2_dev);
+ if (ret) {
+ dev_err(&s->udev->dev,
+ "Failed to register v4l2-device (%d)\n", ret);
+ goto err_free_controls;
+ }
+
+ s->v4l2_dev.ctrl_handler = &s->hdl;
+ s->vdev.v4l2_dev = &s->v4l2_dev;
+ s->vdev.lock = &s->v4l2_lock;
+ s->vdev.vfl_dir = VFL_DIR_RX;
+
+ ret = video_register_device(&s->vdev, VFL_TYPE_SDR, -1);
+ if (ret) {
+ dev_err(&s->udev->dev,
+ "Failed to register as video device (%d)\n",
+ ret);
+ goto err_unregister_v4l2_dev;
+ }
+ dev_info(&s->udev->dev, "Registered as %s\n",
+ video_device_node_name(&s->vdev));
+
+ fe->sec_priv = s;
+ fe->ops.release_sec = rtl2832_sdr_release_sec;
+
+ dev_info(&s->i2c->dev, "%s: Realtek RTL2832 SDR attached\n",
+ KBUILD_MODNAME);
+ return fe;
+
+err_unregister_v4l2_dev:
+ v4l2_device_unregister(&s->v4l2_dev);
+err_free_controls:
+ v4l2_ctrl_handler_free(&s->hdl);
+err_free_mem:
+ kfree(s);
+ return NULL;
+}
+EXPORT_SYMBOL(rtl2832_sdr_attach);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Realtek RTL2832 SDR driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/rtl2832u_sdr/rtl2832_sdr.h b/drivers/staging/media/rtl2832u_sdr/rtl2832_sdr.h
new file mode 100644
index 000000000000..b865fadf184f
--- /dev/null
+++ b/drivers/staging/media/rtl2832u_sdr/rtl2832_sdr.h
@@ -0,0 +1,54 @@
+/*
+ * Realtek RTL2832U SDR driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * GNU Radio plugin "gr-kernel" for device usage will be on:
+ * http://git.linuxtv.org/anttip/gr-kernel.git
+ *
+ * TODO:
+ * Help is very highly welcome for these + all the others you could imagine:
+ * - move controls to V4L2 API
+ * - use libv4l2 for stream format conversions
+ * - gr-kernel: switch to v4l2_mmap (current read eats a lot of cpu)
+ * - SDRSharp support
+ */
+
+#ifndef RTL2832_SDR_H
+#define RTL2832_SDR_H
+
+#include <linux/kconfig.h>
+#include <media/v4l2-subdev.h>
+
+/* for config struct */
+#include "rtl2832.h"
+
+#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR)
+extern struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
+ struct v4l2_subdev *sd);
+#else
+static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
+ struct v4l2_subdev *sd)
+{
+ dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif /* RTL2832_SDR_H */
diff --git a/drivers/staging/media/sn9c102/sn9c102_core.c b/drivers/staging/media/sn9c102/sn9c102_core.c
index 2cb44de2b92c..71f594f5aa71 100644
--- a/drivers/staging/media/sn9c102/sn9c102_core.c
+++ b/drivers/staging/media/sn9c102/sn9c102_core.c
@@ -158,8 +158,8 @@ sn9c102_request_buffers(struct sn9c102_device* cam, u32 count,
cam->nbuffers = count;
while (cam->nbuffers > 0) {
- if ((buff = vmalloc_32_user(cam->nbuffers *
- PAGE_ALIGN(imagesize))))
+ buff = vmalloc_32_user(cam->nbuffers * PAGE_ALIGN(imagesize));
+ if (buff)
break;
cam->nbuffers--;
}
@@ -1121,7 +1121,8 @@ static ssize_t sn9c102_show_val(struct device* cd,
return -ENODEV;
}
- if ((val = sn9c102_read_reg(cam, cam->sysfs.reg)) < 0) {
+ val = sn9c102_read_reg(cam, cam->sysfs.reg);
+ if (val < 0) {
mutex_unlock(&sn9c102_sysfs_lock);
return -EIO;
}
@@ -1256,7 +1257,8 @@ static ssize_t sn9c102_show_i2c_val(struct device* cd,
return -ENOSYS;
}
- if ((val = sn9c102_i2c_read(cam, cam->sysfs.i2c_reg)) < 0) {
+ val = sn9c102_i2c_read(cam, cam->sysfs.i2c_reg);
+ if (val < 0) {
mutex_unlock(&sn9c102_sysfs_lock);
return -EIO;
}
@@ -1440,27 +1442,35 @@ static int sn9c102_create_sysfs(struct sn9c102_device* cam)
struct device *dev = &(cam->v4ldev->dev);
int err = 0;
- if ((err = device_create_file(dev, &dev_attr_reg)))
+ err = device_create_file(dev, &dev_attr_reg);
+ if (err)
goto err_out;
- if ((err = device_create_file(dev, &dev_attr_val)))
+ err = device_create_file(dev, &dev_attr_val);
+ if (err)
goto err_reg;
- if ((err = device_create_file(dev, &dev_attr_frame_header)))
+ err = device_create_file(dev, &dev_attr_frame_header);
+ if (err)
goto err_val;
if (cam->sensor.sysfs_ops) {
- if ((err = device_create_file(dev, &dev_attr_i2c_reg)))
+ err = device_create_file(dev, &dev_attr_i2c_reg);
+ if (err)
goto err_frame_header;
- if ((err = device_create_file(dev, &dev_attr_i2c_val)))
+ err = device_create_file(dev, &dev_attr_i2c_val);
+ if (err)
goto err_i2c_reg;
}
if (cam->bridge == BRIDGE_SN9C101 || cam->bridge == BRIDGE_SN9C102) {
- if ((err = device_create_file(dev, &dev_attr_green)))
+ err = device_create_file(dev, &dev_attr_green);
+ if (err)
goto err_i2c_val;
} else {
- if ((err = device_create_file(dev, &dev_attr_blue)))
+ err = device_create_file(dev, &dev_attr_blue);
+ if (err)
goto err_i2c_val;
- if ((err = device_create_file(dev, &dev_attr_red)))
+ err = device_create_file(dev, &dev_attr_red);
+ if (err)
goto err_blue;
}
@@ -1684,11 +1694,13 @@ static int sn9c102_init(struct sn9c102_device* cam)
else
DBG(3, "Uncompressed video format is active");
- if (s->set_crop)
- if ((err = s->set_crop(cam, rect))) {
+ if (s->set_crop) {
+ err = s->set_crop(cam, rect);
+ if (err) {
DBG(3, "set_crop() failed");
return err;
}
+ }
if (s->set_ctrl) {
for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
@@ -1835,7 +1847,8 @@ static int sn9c102_open(struct file *filp)
cam->state &= ~DEV_MISCONFIGURED;
}
- if ((err = sn9c102_start_transfer(cam)))
+ err = sn9c102_start_transfer(cam);
+ if (err)
goto out;
filp->private_data = cam;
@@ -2308,7 +2321,8 @@ sn9c102_vidioc_s_ctrl(struct sn9c102_device* cam, void __user * arg)
}
if (i == ARRAY_SIZE(s->qctrl))
return -EINVAL;
- if ((err = s->set_ctrl(cam, &ctrl)))
+ err = s->set_ctrl(cam, &ctrl);
+ if (err)
return err;
s->_qctrl[i].default_value = ctrl.value;
@@ -2416,9 +2430,11 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
} else
scale = 1;
- if (cam->stream == STREAM_ON)
- if ((err = sn9c102_stream_interrupt(cam)))
+ if (cam->stream == STREAM_ON) {
+ err = sn9c102_stream_interrupt(cam);
+ if (err)
return err;
+ }
if (copy_to_user(arg, &crop, sizeof(crop))) {
cam->stream = stream;
@@ -2672,9 +2688,11 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
return -EBUSY;
}
- if (cam->stream == STREAM_ON)
- if ((err = sn9c102_stream_interrupt(cam)))
+ if (cam->stream == STREAM_ON) {
+ err = sn9c102_stream_interrupt(cam);
+ if (err)
return err;
+ }
if (copy_to_user(arg, &format, sizeof(format))) {
cam->stream = stream;
@@ -2746,9 +2764,11 @@ sn9c102_vidioc_s_jpegcomp(struct sn9c102_device* cam, void __user * arg)
if (jc.quality != 0 && jc.quality != 1)
return -EINVAL;
- if (cam->stream == STREAM_ON)
- if ((err = sn9c102_stream_interrupt(cam)))
+ if (cam->stream == STREAM_ON) {
+ err = sn9c102_stream_interrupt(cam);
+ if (err)
return err;
+ }
err += sn9c102_set_compression(cam, &jc);
if (err) { /* atomic, no rollback in ioctl() */
@@ -2794,9 +2814,11 @@ sn9c102_vidioc_reqbufs(struct sn9c102_device* cam, void __user * arg)
return -EBUSY;
}
- if (cam->stream == STREAM_ON)
- if ((err = sn9c102_stream_interrupt(cam)))
+ if (cam->stream == STREAM_ON) {
+ err = sn9c102_stream_interrupt(cam);
+ if (err)
return err;
+ }
sn9c102_empty_framequeues(cam);
@@ -2974,9 +2996,11 @@ sn9c102_vidioc_streamoff(struct sn9c102_device* cam, void __user * arg)
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
return -EINVAL;
- if (cam->stream == STREAM_ON)
- if ((err = sn9c102_stream_interrupt(cam)))
+ if (cam->stream == STREAM_ON) {
+ err = sn9c102_stream_interrupt(cam);
+ if (err)
return err;
+ }
sn9c102_empty_framequeues(cam);
@@ -3250,7 +3274,8 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
unsigned int i;
int err = 0, r;
- if (!(cam = kzalloc(sizeof(struct sn9c102_device), GFP_KERNEL)))
+ cam = kzalloc(sizeof(struct sn9c102_device), GFP_KERNEL);
+ if (!cam)
return -ENOMEM;
cam->usbdev = udev;
@@ -3262,13 +3287,15 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- if (!(cam->control_buffer = kzalloc(8, GFP_KERNEL))) {
+ cam->control_buffer = kzalloc(8, GFP_KERNEL);
+ if (!cam->control_buffer) {
DBG(1, "kzalloc() failed");
err = -ENOMEM;
goto fail;
}
- if (!(cam->v4ldev = video_device_alloc())) {
+ cam->v4ldev = video_device_alloc();
+ if (!cam->v4ldev) {
DBG(1, "video_device_alloc() failed");
err = -ENOMEM;
goto fail;
diff --git a/drivers/staging/media/sn9c102/sn9c102_hv7131d.c b/drivers/staging/media/sn9c102/sn9c102_hv7131d.c
index 2dce5c908c8e..468072176527 100644
--- a/drivers/staging/media/sn9c102/sn9c102_hv7131d.c
+++ b/drivers/staging/media/sn9c102/sn9c102_hv7131d.c
@@ -53,27 +53,32 @@ static int hv7131d_get_ctrl(struct sn9c102_device* cam,
}
return 0;
case V4L2_CID_RED_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x31)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x31);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = 0x3f - (ctrl->value & 0x3f);
return 0;
case V4L2_CID_BLUE_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x33)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x33);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = 0x3f - (ctrl->value & 0x3f);
return 0;
case SN9C102_V4L2_CID_GREEN_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x32)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x32);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = 0x3f - (ctrl->value & 0x3f);
return 0;
case SN9C102_V4L2_CID_RESET_LEVEL:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x30)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x30);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x3f;
return 0;
case SN9C102_V4L2_CID_PIXEL_BIAS_VOLTAGE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x34)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x34);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x07;
return 0;
diff --git a/drivers/staging/media/sn9c102/sn9c102_hv7131r.c b/drivers/staging/media/sn9c102/sn9c102_hv7131r.c
index 4295887ff609..26a91115b831 100644
--- a/drivers/staging/media/sn9c102/sn9c102_hv7131r.c
+++ b/drivers/staging/media/sn9c102/sn9c102_hv7131r.c
@@ -142,26 +142,31 @@ static int hv7131r_get_ctrl(struct sn9c102_device* cam,
{
switch (ctrl->id) {
case V4L2_CID_GAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x30)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x30);
+ if (ctrl->value < 0)
return -EIO;
return 0;
case V4L2_CID_RED_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x31)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x31);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = ctrl->value & 0x3f;
return 0;
case V4L2_CID_BLUE_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x33)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x33);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = ctrl->value & 0x3f;
return 0;
case SN9C102_V4L2_CID_GREEN_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x32)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x32);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = ctrl->value & 0x3f;
return 0;
case V4L2_CID_BLACK_LEVEL:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x01)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x01);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x08) ? 1 : 0;
return 0;
diff --git a/drivers/staging/media/sn9c102/sn9c102_ov7630.c b/drivers/staging/media/sn9c102/sn9c102_ov7630.c
index 803712c29f02..d3a1bd8d5648 100644
--- a/drivers/staging/media/sn9c102/sn9c102_ov7630.c
+++ b/drivers/staging/media/sn9c102/sn9c102_ov7630.c
@@ -260,7 +260,8 @@ static int ov7630_get_ctrl(struct sn9c102_device* cam,
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x10)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x10);
+ if (ctrl->value < 0)
return -EIO;
break;
case V4L2_CID_RED_BALANCE:
@@ -280,37 +281,44 @@ static int ov7630_get_ctrl(struct sn9c102_device* cam,
break;
break;
case V4L2_CID_GAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x00)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x00);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x3f;
break;
case V4L2_CID_DO_WHITE_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0c)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0c);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x3f;
break;
case V4L2_CID_WHITENESS:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0d)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0d);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x3f;
break;
case V4L2_CID_AUTOGAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x13)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x13);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x01;
break;
case V4L2_CID_VFLIP:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x75)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x75);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x80) ? 1 : 0;
break;
case SN9C102_V4L2_CID_GAMMA:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x14)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x14);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x02) ? 1 : 0;
break;
case SN9C102_V4L2_CID_BAND_FILTER:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x2d)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x2d);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x02) ? 1 : 0;
break;
diff --git a/drivers/staging/media/sn9c102/sn9c102_ov7660.c b/drivers/staging/media/sn9c102/sn9c102_ov7660.c
index 7977795d342b..530157a234e6 100644
--- a/drivers/staging/media/sn9c102/sn9c102_ov7660.c
+++ b/drivers/staging/media/sn9c102/sn9c102_ov7660.c
@@ -278,41 +278,49 @@ static int ov7660_get_ctrl(struct sn9c102_device* cam,
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x10)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x10);
+ if (ctrl->value < 0)
return -EIO;
break;
case V4L2_CID_DO_WHITE_BALANCE:
- if ((ctrl->value = sn9c102_read_reg(cam, 0x02)) < 0)
+ ctrl->value = sn9c102_read_reg(cam, 0x02);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x04) ? 1 : 0;
break;
case V4L2_CID_RED_BALANCE:
- if ((ctrl->value = sn9c102_read_reg(cam, 0x05)) < 0)
+ ctrl->value = sn9c102_read_reg(cam, 0x05);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x7f;
break;
case V4L2_CID_BLUE_BALANCE:
- if ((ctrl->value = sn9c102_read_reg(cam, 0x06)) < 0)
+ ctrl->value = sn9c102_read_reg(cam, 0x06);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x7f;
break;
case SN9C102_V4L2_CID_GREEN_BALANCE:
- if ((ctrl->value = sn9c102_read_reg(cam, 0x07)) < 0)
+ ctrl->value = sn9c102_read_reg(cam, 0x07);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x7f;
break;
case SN9C102_V4L2_CID_BAND_FILTER:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x3b)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x3b);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x08;
break;
case V4L2_CID_GAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x00)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x00);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x1f;
break;
case V4L2_CID_AUTOGAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x13)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x13);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x01;
break;
diff --git a/drivers/staging/media/sn9c102/sn9c102_pas106b.c b/drivers/staging/media/sn9c102/sn9c102_pas106b.c
index 81cd969c1b7b..47bd82de80f9 100644
--- a/drivers/staging/media/sn9c102/sn9c102_pas106b.c
+++ b/drivers/staging/media/sn9c102/sn9c102_pas106b.c
@@ -62,32 +62,38 @@ static int pas106b_get_ctrl(struct sn9c102_device* cam,
}
return 0;
case V4L2_CID_RED_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0c)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0c);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x1f;
return 0;
case V4L2_CID_BLUE_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x09)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x09);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x1f;
return 0;
case V4L2_CID_GAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0e)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0e);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x1f;
return 0;
case V4L2_CID_CONTRAST:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0f)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0f);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x07;
return 0;
case SN9C102_V4L2_CID_GREEN_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0a)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0a);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x1f) << 1;
return 0;
case SN9C102_V4L2_CID_DAC_MAGNITUDE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x08)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x08);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0xf8;
return 0;
diff --git a/drivers/staging/media/sn9c102/sn9c102_pas202bcb.c b/drivers/staging/media/sn9c102/sn9c102_pas202bcb.c
index 2e86fdc86989..cbfacc2dad84 100644
--- a/drivers/staging/media/sn9c102/sn9c102_pas202bcb.c
+++ b/drivers/staging/media/sn9c102/sn9c102_pas202bcb.c
@@ -92,27 +92,32 @@ static int pas202bcb_get_ctrl(struct sn9c102_device* cam,
}
return 0;
case V4L2_CID_RED_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x09)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x09);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x0f;
return 0;
case V4L2_CID_BLUE_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x07)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x07);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x0f;
return 0;
case V4L2_CID_GAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x10)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x10);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x1f;
return 0;
case SN9C102_V4L2_CID_GREEN_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x08)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x08);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x0f;
return 0;
case SN9C102_V4L2_CID_DAC_MAGNITUDE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0c)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0c);
+ if (ctrl->value < 0)
return -EIO;
return 0;
default:
diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
index 480b7c4064cc..f67046955ef6 100644
--- a/drivers/staging/media/solo6x10/solo6x10-core.c
+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
@@ -40,7 +40,7 @@ MODULE_AUTHOR("Bluecherry <maintainers@bluecherrydvr.com>");
MODULE_VERSION(SOLO6X10_VERSION);
MODULE_LICENSE("GPL");
-unsigned video_nr = -1;
+static unsigned video_nr = -1;
module_param(video_nr, uint, 0644);
MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect (default)");
diff --git a/drivers/staging/media/solo6x10/solo6x10-g723.c b/drivers/staging/media/solo6x10/solo6x10-g723.c
index 1db18c7972a0..74f037b6166c 100644
--- a/drivers/staging/media/solo6x10/solo6x10-g723.c
+++ b/drivers/staging/media/solo6x10/solo6x10-g723.c
@@ -366,8 +366,9 @@ int solo_g723_init(struct solo_dev *solo_dev)
/* Allows for easier mapping between video and audio */
sprintf(name, "Softlogic%d", solo_dev->vfd->num);
- ret = snd_card_create(SNDRV_DEFAULT_IDX1, name, THIS_MODULE, 0,
- &solo_dev->snd_card);
+ ret = snd_card_new(&solo_dev->pdev->dev,
+ SNDRV_DEFAULT_IDX1, name, THIS_MODULE, 0,
+ &solo_dev->snd_card);
if (ret < 0)
return ret;
@@ -377,7 +378,6 @@ int solo_g723_init(struct solo_dev *solo_dev)
strcpy(card->shortname, "SOLO-6x10 Audio");
sprintf(card->longname, "%s on %s IRQ %d", card->shortname,
pci_name(solo_dev->pdev), solo_dev->pdev->irq);
- snd_card_set_dev(card, &solo_dev->pdev->dev);
ret = snd_device_new(card, SNDRV_DEV_LOWLEVEL, solo_dev, &ops);
if (ret < 0)
diff --git a/drivers/staging/media/solo6x10/solo6x10-tw28.c b/drivers/staging/media/solo6x10/solo6x10-tw28.c
index af65ea655f15..36daa1720b54 100644
--- a/drivers/staging/media/solo6x10/solo6x10-tw28.c
+++ b/drivers/staging/media/solo6x10/solo6x10-tw28.c
@@ -516,7 +516,7 @@ static int tw2815_setup(struct solo_dev *solo_dev, u8 dev_addr)
static void saa712x_write_regs(struct solo_dev *dev, const uint8_t *vals,
int start, int n)
{
- for (;start < n; start++, vals++) {
+ for (; start < n; start++, vals++) {
/* Skip read-only registers */
switch (start) {
/* case 0x00 ... 0x25: */
diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
index ce9e5aaf7fd4..2cbe088f1697 100644
--- a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
@@ -399,8 +399,8 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
if (solo_enc->desc_count <= 1)
return 0;
- return solo_p2m_dma_desc(solo_dev, solo_enc->desc_items, solo_enc->desc_dma,
- solo_enc->desc_count - 1);
+ return solo_p2m_dma_desc(solo_dev, solo_enc->desc_items,
+ solo_enc->desc_dma, solo_enc->desc_count - 1);
}
/* Extract values from VOP header - VE_STATUSxx */
@@ -472,8 +472,7 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
if (vb2_plane_size(vb, 0) < vop_jpeg_size(vh) + solo_enc->jpeg_len)
return -EIO;
- frame_size = (vop_jpeg_size(vh) + solo_enc->jpeg_len + (DMA_ALIGN - 1))
- & ~(DMA_ALIGN - 1);
+ frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN);
vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len);
/* may discard all previous data in vbuf->sgl */
@@ -506,21 +505,22 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
return -EIO;
/* If this is a key frame, add extra header */
- vb->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME);
+ vb->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_BFRAME);
if (!vop_type(vh)) {
skip = solo_enc->vop_len;
vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
- vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) + solo_enc->vop_len);
+ vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) +
+ solo_enc->vop_len);
} else {
vb->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh));
}
/* Now get the actual mpeg payload */
- frame_off = (vop_mpeg_offset(vh) - SOLO_MP4E_EXT_ADDR(solo_dev) + sizeof(*vh))
- % SOLO_MP4E_EXT_SIZE(solo_dev);
- frame_size = (vop_mpeg_size(vh) + skip + (DMA_ALIGN - 1))
- & ~(DMA_ALIGN - 1);
+ frame_off = (vop_mpeg_offset(vh) - SOLO_MP4E_EXT_ADDR(solo_dev) +
+ sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
+ frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN);
/* may discard all previous data in vbuf->sgl */
dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
@@ -589,7 +589,8 @@ static void solo_enc_handle_one(struct solo_enc_dev *solo_enc,
spin_unlock_irqrestore(&solo_enc->av_lock, flags);
goto unlock;
}
- vb = list_first_entry(&solo_enc->vidq_active, struct solo_vb2_buf, list);
+ vb = list_first_entry(&solo_enc->vidq_active, struct solo_vb2_buf,
+ list);
list_del(&vb->list);
spin_unlock_irqrestore(&solo_enc->av_lock, flags);
@@ -645,7 +646,8 @@ static void solo_handle_ring(struct solo_dev *solo_dev)
enc_buf.vh = solo_dev->vh_buf;
/* Sanity check */
- if (vop_mpeg_offset(enc_buf.vh) != SOLO_MP4E_EXT_ADDR(solo_dev) + off)
+ if (vop_mpeg_offset(enc_buf.vh) !=
+ SOLO_MP4E_EXT_ADDR(solo_dev) + off)
continue;
if (solo_motion_detected(solo_enc))
@@ -680,9 +682,11 @@ static int solo_ring_thread(void *data)
return 0;
}
-static int solo_enc_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
- unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+static int solo_enc_queue_setup(struct vb2_queue *q,
+ const struct v4l2_format *fmt,
+ unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ void *alloc_ctxs[])
{
sizes[0] = FRAME_BUF_SIZE;
*num_planes = 1;
@@ -1112,14 +1116,15 @@ static int solo_s_ctrl(struct v4l2_ctrl *ctrl)
solo_enc->motion_thresh = ctrl->val;
if (!solo_enc->motion_global || !solo_enc->motion_enabled)
return 0;
- return solo_set_motion_threshold(solo_dev, solo_enc->ch, ctrl->val);
+ return solo_set_motion_threshold(solo_dev, solo_enc->ch,
+ ctrl->val);
case V4L2_CID_MOTION_MODE:
solo_enc->motion_global = ctrl->val == 1;
solo_enc->motion_enabled = ctrl->val > 0;
if (ctrl->val) {
if (solo_enc->motion_global)
- solo_set_motion_threshold(solo_dev, solo_enc->ch,
- solo_enc->motion_thresh);
+ solo_set_motion_threshold(solo_dev,
+ solo_enc->ch, solo_enc->motion_thresh);
else
solo_set_motion_block(solo_dev, solo_enc->ch,
&solo_enc->motion_thresholds);
@@ -1290,7 +1295,7 @@ static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev,
solo_enc->vidq.mem_ops = &vb2_dma_sg_memops;
solo_enc->vidq.drv_priv = solo_enc;
solo_enc->vidq.gfp_flags = __GFP_DMA32;
- solo_enc->vidq.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ solo_enc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
solo_enc->vidq.buf_struct_size = sizeof(struct solo_vb2_buf);
solo_enc->vidq.lock = &solo_enc->lock;
ret = vb2_queue_init(&solo_enc->vidq);
@@ -1307,7 +1312,8 @@ static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev,
solo_enc->desc_nelts = 32;
solo_enc->desc_items = pci_alloc_consistent(solo_dev->pdev,
sizeof(struct solo_p2m_desc) *
- solo_enc->desc_nelts, &solo_enc->desc_dma);
+ solo_enc->desc_nelts,
+ &solo_enc->desc_dma);
ret = -ENOMEM;
if (solo_enc->desc_items == NULL)
goto hdl_free;
diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2.c b/drivers/staging/media/solo6x10/solo6x10-v4l2.c
index 47e72dac9b13..1815f765d033 100644
--- a/drivers/staging/media/solo6x10/solo6x10-v4l2.c
+++ b/drivers/staging/media/solo6x10/solo6x10-v4l2.c
@@ -676,7 +676,7 @@ int solo_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
solo_dev->vidq.ops = &solo_video_qops;
solo_dev->vidq.mem_ops = &vb2_dma_contig_memops;
solo_dev->vidq.drv_priv = solo_dev;
- solo_dev->vidq.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ solo_dev->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
solo_dev->vidq.gfp_flags = __GFP_DMA32;
solo_dev->vidq.buf_struct_size = sizeof(struct solo_vb2_buf);
solo_dev->vidq.lock = &solo_dev->lock;
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 31b269a5fff7..c83e3375104b 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -307,7 +307,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
}
static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ void *accel_priv,
+ select_queue_fallback_t fallback)
{
return (u16)smp_processor_id();
}
@@ -614,8 +615,6 @@ static void xlr_config_translate_table(struct xlr_net_priv *priv)
k = (k + 1) % j;
b2 = bkts[k];
k = (k + 1) % j;
- val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
- (c2 << 7) | (b2 << 1) | (use_bkt << 0));
val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
(c2 << 7) | (b2 << 1) | (use_bkt << 0));
diff --git a/drivers/staging/nokia_h4p/Kconfig b/drivers/staging/nokia_h4p/Kconfig
new file mode 100644
index 000000000000..4336c0ad065b
--- /dev/null
+++ b/drivers/staging/nokia_h4p/Kconfig
@@ -0,0 +1,9 @@
+config BT_NOKIA_H4P
+ tristate "HCI driver with H4 Nokia extensions"
+ depends on BT && ARCH_OMAP
+ help
+ Bluetooth HCI driver with H4 extensions. This driver provides
+ support for H4+ Bluetooth chip with vendor-specific H4 extensions.
+
+ Say Y here to compile support for h4 extended devices into the kernel
+ or say M to compile it as module (btnokia_h4p).
diff --git a/drivers/staging/nokia_h4p/Makefile b/drivers/staging/nokia_h4p/Makefile
new file mode 100644
index 000000000000..9625db4a9af3
--- /dev/null
+++ b/drivers/staging/nokia_h4p/Makefile
@@ -0,0 +1,6 @@
+
+obj-$(CONFIG_BT_NOKIA_H4P) += btnokia_h4p.o
+btnokia_h4p-objs := nokia_core.o nokia_fw.o nokia_uart.o nokia_fw-csr.o \
+ nokia_fw-bcm.o nokia_fw-ti1273.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/nokia_h4p/TODO b/drivers/staging/nokia_h4p/TODO
new file mode 100644
index 000000000000..0ec5823e0ca8
--- /dev/null
+++ b/drivers/staging/nokia_h4p/TODO
@@ -0,0 +1,132 @@
+Few attempts to submission have been made, last review comments were received in
+
+Date: Wed, 15 Jan 2014 19:01:51 -0800
+From: Marcel Holtmann <marcel@holtmann.org>
+Subject: Re: [PATCH v6] Bluetooth: Add hci_h4p driver
+
+Some code refactoring is still needed.
+
+TODO:
+
+> +++ b/drivers/bluetooth/hci_h4p.h
+
+can we please get the naming straight. File names do not start with
+hci_ anymore. We moved away from it since that term is too generic.
+
+> +struct hci_h4p_info {
+
+Can we please get rid of the hci_ prefix for everything. Copying from
+drivers that are over 10 years old is not a good idea. Please look at
+recent ones.
+
+> + struct timer_list lazy_release;
+
+Timer? Not delayed work?
+
+> +void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val);
+> +u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset);
+> +void hci_h4p_set_rts(struct hci_h4p_info *info, int active);
+> +int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active, int timeout_ms);
+> +void __hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which);
+> +void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which);
+> +void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed);
+> +int hci_h4p_reset_uart(struct hci_h4p_info *info);
+> +void hci_h4p_init_uart(struct hci_h4p_info *info);
+> +void hci_h4p_enable_tx(struct hci_h4p_info *info);
+> +void hci_h4p_store_regs(struct hci_h4p_info *info);
+> +void hci_h4p_restore_regs(struct hci_h4p_info *info);
+> +void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable);
+
+These are a lot of public functions. Are they all really needed or can
+the code be done smart.
+
+> +static ssize_t hci_h4p_store_bdaddr(struct device *dev,
+> + struct device_attribute *attr,
+> + const char *buf, size_t count)
+> +{
+> + struct hci_h4p_info *info = dev_get_drvdata(dev);
+
+Since none of these devices can function without having a valid
+address, the way this should work is that we should not register the
+HCI device when probing the platform device.
+
+The HCI device should be registered once a valid address has been
+written into the sysfs file. I do not want to play the tricks with
+bringing up the device without a valid address.
+
+> + hdev->close = hci_h4p_hci_close;
+> + hdev->flush = hci_h4p_hci_flush;
+> + hdev->send = hci_h4p_hci_send_frame;
+
+It needs to use hdev->setup to load the firmware. I assume the
+firmware only needs to be loaded once. That is exactly what
+hdev->setup does. It gets executed once.
+
+> + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
+Is this quirk really needed? Normally only Bluetooth 1.1 and early
+devices qualify for it.
+
+> +static int hci_h4p_bcm_set_bdaddr(struct hci_h4p_info *info, struct sk_buff *skb)
+> +{
+> + int i;
+> + static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
+> + int not_valid;
+
+Has this actually been confirmed that we can just randomly set an
+address out of the Nokia range. I do not think so. This is a pretty
+bad idea.
+
+I have no interest in merging a driver with such a hack.
+
+> + not_valid = 1;
+> + for (i = 0; i < 6; i++) {
+> + if (info->bd_addr[i] != 0x00) {
+> + not_valid = 0;
+> + break;
+> + }
+> + }
+
+Anybody every heard of memcmp or bacmp and BDADDR_ANY?
+
+> + if (not_valid) {
+> + dev_info(info->dev, "Valid bluetooth address not found,"
+> + " setting some random\n");
+> + /* When address is not valid, use some random */
+> + memcpy(info->bd_addr, nokia_oui, 3);
+> + get_random_bytes(info->bd_addr + 3, 3);
+> + }
+
+
+And why does every single chip firmware does this differently. Seriously, this is a mess.
+
+> +void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb)
+> +{
+> + switch (info->man_id) {
+> + case H4P_ID_CSR:
+> + hci_h4p_bc4_parse_fw_event(info, skb);
+> + break;
+...
+> +}
+
+We have proper HCI sync command handling in recent kernels. I really
+do not know why this is hand coded these days. Check how the Intel
+firmware loading inside btusb.c does it.
+
+> +inline u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset)
+> +{
+> + return __raw_readb(info->uart_base + (offset << 2));
+> +}
+
+Inline in a *.c file for a non-static function. Makes no sense to me.
+
+> +/**
+> + * struct hci_h4p_platform data - hci_h4p Platform data structure
+> + */
+> +struct hci_h4p_platform_data {
+
+please have a proper name here. For example
+btnokia_h4p_platform_data.
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
+Pavel Machek <pavel@ucw.cz>
diff --git a/drivers/staging/nokia_h4p/hci_h4p.h b/drivers/staging/nokia_h4p/hci_h4p.h
new file mode 100644
index 000000000000..99c4da61a56c
--- /dev/null
+++ b/drivers/staging/nokia_h4p/hci_h4p.h
@@ -0,0 +1,222 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2005-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __DRIVERS_BLUETOOTH_HCI_H4P_H
+#define __DRIVERS_BLUETOOTH_HCI_H4P_H
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci.h>
+
+#define UART_SYSC_OMAP_RESET 0x03
+#define UART_SYSS_RESETDONE 0x01
+#define UART_OMAP_SCR_EMPTY_THR 0x08
+#define UART_OMAP_SCR_WAKEUP 0x10
+#define UART_OMAP_SSR_WAKEUP 0x02
+#define UART_OMAP_SSR_TXFULL 0x01
+
+#define UART_OMAP_SYSC_IDLEMODE 0x03
+#define UART_OMAP_SYSC_IDLEMASK (3 << UART_OMAP_SYSC_IDLEMODE)
+
+#define UART_OMAP_SYSC_FORCE_IDLE (0 << UART_OMAP_SYSC_IDLEMODE)
+#define UART_OMAP_SYSC_NO_IDLE (1 << UART_OMAP_SYSC_IDLEMODE)
+#define UART_OMAP_SYSC_SMART_IDLE (2 << UART_OMAP_SYSC_IDLEMODE)
+
+#define H4P_TRANSFER_MODE 1
+#define H4P_SCHED_TRANSFER_MODE 2
+#define H4P_ACTIVE_MODE 3
+
+struct hci_h4p_info {
+ struct timer_list lazy_release;
+ struct hci_dev *hdev;
+ spinlock_t lock;
+
+ void __iomem *uart_base;
+ unsigned long uart_phys_base;
+ int irq;
+ struct device *dev;
+ u8 chip_type;
+ u8 bt_wakeup_gpio;
+ u8 host_wakeup_gpio;
+ u8 reset_gpio;
+ u8 reset_gpio_shared;
+ u8 bt_sysclk;
+ u8 man_id;
+ u8 ver_id;
+
+ struct sk_buff_head fw_queue;
+ struct sk_buff *alive_cmd_skb;
+ struct completion init_completion;
+ struct completion fw_completion;
+ struct completion test_completion;
+ int fw_error;
+ int init_error;
+
+ struct sk_buff_head txq;
+
+ struct sk_buff *rx_skb;
+ long rx_count;
+ unsigned long rx_state;
+ unsigned long garbage_bytes;
+
+ u8 bd_addr[6];
+ struct sk_buff_head *fw_q;
+
+ int pm_enabled;
+ int tx_enabled;
+ int autorts;
+ int rx_enabled;
+ unsigned long pm_flags;
+
+ int tx_clocks_en;
+ int rx_clocks_en;
+ spinlock_t clocks_lock;
+ struct clk *uart_iclk;
+ struct clk *uart_fclk;
+ atomic_t clk_users;
+ u16 dll;
+ u16 dlh;
+ u16 ier;
+ u16 mdr1;
+ u16 efr;
+};
+
+struct hci_h4p_radio_hdr {
+ __u8 evt;
+ __u8 dlen;
+} __packed;
+
+struct hci_h4p_neg_hdr {
+ __u8 dlen;
+} __packed;
+#define H4P_NEG_HDR_SIZE 1
+
+#define H4P_NEG_REQ 0x00
+#define H4P_NEG_ACK 0x20
+#define H4P_NEG_NAK 0x40
+
+#define H4P_PROTO_PKT 0x44
+#define H4P_PROTO_BYTE 0x4c
+
+#define H4P_ID_CSR 0x02
+#define H4P_ID_BCM2048 0x04
+#define H4P_ID_TI1271 0x31
+
+struct hci_h4p_neg_cmd {
+ __u8 ack;
+ __u16 baud;
+ __u16 unused1;
+ __u8 proto;
+ __u16 sys_clk;
+ __u16 unused2;
+} __packed;
+
+struct hci_h4p_neg_evt {
+ __u8 ack;
+ __u16 baud;
+ __u16 unused1;
+ __u8 proto;
+ __u16 sys_clk;
+ __u16 unused2;
+ __u8 man_id;
+ __u8 ver_id;
+} __packed;
+
+#define H4P_ALIVE_REQ 0x55
+#define H4P_ALIVE_RESP 0xcc
+
+struct hci_h4p_alive_hdr {
+ __u8 dlen;
+} __packed;
+#define H4P_ALIVE_HDR_SIZE 1
+
+struct hci_h4p_alive_pkt {
+ __u8 mid;
+ __u8 unused;
+} __packed;
+
+#define MAX_BAUD_RATE 921600
+#define BC4_MAX_BAUD_RATE 3692300
+#define UART_CLOCK 48000000
+#define BT_INIT_DIVIDER 320
+#define BT_BAUDRATE_DIVIDER 384000000
+#define BT_SYSCLK_DIV 1000
+#define INIT_SPEED 120000
+
+#define H4_TYPE_SIZE 1
+#define H4_RADIO_HDR_SIZE 2
+
+/* H4+ packet types */
+#define H4_CMD_PKT 0x01
+#define H4_ACL_PKT 0x02
+#define H4_SCO_PKT 0x03
+#define H4_EVT_PKT 0x04
+#define H4_NEG_PKT 0x06
+#define H4_ALIVE_PKT 0x07
+#define H4_RADIO_PKT 0x08
+
+/* TX states */
+#define WAIT_FOR_PKT_TYPE 1
+#define WAIT_FOR_HEADER 2
+#define WAIT_FOR_DATA 3
+
+struct hci_fw_event {
+ struct hci_event_hdr hev;
+ struct hci_ev_cmd_complete cmd;
+ u8 status;
+} __packed;
+
+int hci_h4p_send_alive_packet(struct hci_h4p_info *info);
+
+void hci_h4p_bcm_parse_fw_event(struct hci_h4p_info *info,
+ struct sk_buff *skb);
+int hci_h4p_bcm_send_fw(struct hci_h4p_info *info,
+ struct sk_buff_head *fw_queue);
+
+void hci_h4p_bc4_parse_fw_event(struct hci_h4p_info *info,
+ struct sk_buff *skb);
+int hci_h4p_bc4_send_fw(struct hci_h4p_info *info,
+ struct sk_buff_head *fw_queue);
+
+void hci_h4p_ti1273_parse_fw_event(struct hci_h4p_info *info,
+ struct sk_buff *skb);
+int hci_h4p_ti1273_send_fw(struct hci_h4p_info *info,
+ struct sk_buff_head *fw_queue);
+
+int hci_h4p_read_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue);
+int hci_h4p_send_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue);
+void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb);
+
+void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val);
+u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset);
+void hci_h4p_set_rts(struct hci_h4p_info *info, int active);
+int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active, int timeout_ms);
+void __hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which);
+void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which);
+void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed);
+int hci_h4p_reset_uart(struct hci_h4p_info *info);
+void hci_h4p_init_uart(struct hci_h4p_info *info);
+void hci_h4p_enable_tx(struct hci_h4p_info *info);
+void hci_h4p_store_regs(struct hci_h4p_info *info);
+void hci_h4p_restore_regs(struct hci_h4p_info *info);
+void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable);
+
+#endif /* __DRIVERS_BLUETOOTH_HCI_H4P_H */
diff --git a/drivers/staging/nokia_h4p/nokia_core.c b/drivers/staging/nokia_h4p/nokia_core.c
new file mode 100644
index 000000000000..5e19cd6ccda3
--- /dev/null
+++ b/drivers/staging/nokia_h4p/nokia_core.c
@@ -0,0 +1,1206 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2005-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * Thanks to all the Nokia people that helped with this driver,
+ * including Ville Tervo and Roger Quadros.
+ *
+ * Power saving functionality was removed from this driver to make
+ * merging easier.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/serial_reg.h>
+#include <linux/skbuff.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/timer.h>
+#include <linux/kthread.h>
+#include <linux/io.h>
+#include <linux/completion.h>
+#include <linux/sizes.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci.h>
+
+#include <linux/platform_data/bt-nokia-h4p.h>
+
+#include "hci_h4p.h"
+
+/* This should be used in function that cannot release clocks */
+static void hci_h4p_set_clk(struct hci_h4p_info *info, int *clock, int enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->clocks_lock, flags);
+ if (enable && !*clock) {
+ BT_DBG("Enabling %p", clock);
+ clk_prepare_enable(info->uart_fclk);
+ clk_prepare_enable(info->uart_iclk);
+ if (atomic_read(&info->clk_users) == 0)
+ hci_h4p_restore_regs(info);
+ atomic_inc(&info->clk_users);
+ }
+
+ if (!enable && *clock) {
+ BT_DBG("Disabling %p", clock);
+ if (atomic_dec_and_test(&info->clk_users))
+ hci_h4p_store_regs(info);
+ clk_disable_unprepare(info->uart_fclk);
+ clk_disable_unprepare(info->uart_iclk);
+ }
+
+ *clock = enable;
+ spin_unlock_irqrestore(&info->clocks_lock, flags);
+}
+
+static void hci_h4p_lazy_clock_release(unsigned long data)
+{
+ struct hci_h4p_info *info = (struct hci_h4p_info *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+ if (!info->tx_enabled)
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+/* Power management functions */
+void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable)
+{
+ u8 v;
+
+ v = hci_h4p_inb(info, UART_OMAP_SYSC);
+ v &= ~(UART_OMAP_SYSC_IDLEMASK);
+
+ if (enable)
+ v |= UART_OMAP_SYSC_SMART_IDLE;
+ else
+ v |= UART_OMAP_SYSC_NO_IDLE;
+
+ hci_h4p_outb(info, UART_OMAP_SYSC, v);
+}
+
+static inline void h4p_schedule_pm(struct hci_h4p_info *info)
+{
+}
+
+static void hci_h4p_disable_tx(struct hci_h4p_info *info)
+{
+ if (!info->pm_enabled)
+ return;
+
+ /* Re-enable smart-idle */
+ hci_h4p_smart_idle(info, 1);
+
+ gpio_set_value(info->bt_wakeup_gpio, 0);
+ mod_timer(&info->lazy_release, jiffies + msecs_to_jiffies(100));
+ info->tx_enabled = 0;
+}
+
+void hci_h4p_enable_tx(struct hci_h4p_info *info)
+{
+ unsigned long flags;
+
+ if (!info->pm_enabled)
+ return;
+
+ h4p_schedule_pm(info);
+
+ spin_lock_irqsave(&info->lock, flags);
+ del_timer(&info->lazy_release);
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
+ info->tx_enabled = 1;
+ gpio_set_value(info->bt_wakeup_gpio, 1);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ /*
+ * Disable smart-idle as UART TX interrupts
+ * are not wake-up capable
+ */
+ hci_h4p_smart_idle(info, 0);
+
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+static void hci_h4p_disable_rx(struct hci_h4p_info *info)
+{
+ if (!info->pm_enabled)
+ return;
+
+ info->rx_enabled = 0;
+
+ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_DR)
+ return;
+
+ if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT))
+ return;
+
+ __hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS);
+ info->autorts = 0;
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 0);
+}
+
+static void hci_h4p_enable_rx(struct hci_h4p_info *info)
+{
+ if (!info->pm_enabled)
+ return;
+
+ h4p_schedule_pm(info);
+
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 1);
+ info->rx_enabled = 1;
+
+ if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT))
+ return;
+
+ __hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS);
+ info->autorts = 1;
+}
+
+/* Negotiation functions */
+int hci_h4p_send_alive_packet(struct hci_h4p_info *info)
+{
+ struct hci_h4p_alive_hdr *hdr;
+ struct hci_h4p_alive_pkt *pkt;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int len;
+
+ BT_DBG("Sending alive packet");
+
+ len = H4_TYPE_SIZE + sizeof(*hdr) + sizeof(*pkt);
+ skb = bt_skb_alloc(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(skb->data, 0x00, len);
+ *skb_put(skb, 1) = H4_ALIVE_PKT;
+ hdr = (struct hci_h4p_alive_hdr *)skb_put(skb, sizeof(*hdr));
+ hdr->dlen = sizeof(*pkt);
+ pkt = (struct hci_h4p_alive_pkt *)skb_put(skb, sizeof(*pkt));
+ pkt->mid = H4P_ALIVE_REQ;
+
+ skb_queue_tail(&info->txq, skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ BT_DBG("Alive packet sent");
+
+ return 0;
+}
+
+static void hci_h4p_alive_packet(struct hci_h4p_info *info,
+ struct sk_buff *skb)
+{
+ struct hci_h4p_alive_hdr *hdr;
+ struct hci_h4p_alive_pkt *pkt;
+
+ BT_DBG("Received alive packet");
+ hdr = (struct hci_h4p_alive_hdr *)skb->data;
+ if (hdr->dlen != sizeof(*pkt)) {
+ dev_err(info->dev, "Corrupted alive message\n");
+ info->init_error = -EIO;
+ goto finish_alive;
+ }
+
+ pkt = (struct hci_h4p_alive_pkt *)skb_pull(skb, sizeof(*hdr));
+ if (pkt->mid != H4P_ALIVE_RESP) {
+ dev_err(info->dev, "Could not negotiate hci_h4p settings\n");
+ info->init_error = -EINVAL;
+ }
+
+finish_alive:
+ complete(&info->init_completion);
+ kfree_skb(skb);
+}
+
+static int hci_h4p_send_negotiation(struct hci_h4p_info *info)
+{
+ struct hci_h4p_neg_cmd *neg_cmd;
+ struct hci_h4p_neg_hdr *neg_hdr;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int err, len;
+ u16 sysclk;
+
+ BT_DBG("Sending negotiation..");
+
+ switch (info->bt_sysclk) {
+ case 1:
+ sysclk = 12000;
+ break;
+ case 2:
+ sysclk = 38400;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ len = sizeof(*neg_cmd) + sizeof(*neg_hdr) + H4_TYPE_SIZE;
+ skb = bt_skb_alloc(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(skb->data, 0x00, len);
+ *skb_put(skb, 1) = H4_NEG_PKT;
+ neg_hdr = (struct hci_h4p_neg_hdr *)skb_put(skb, sizeof(*neg_hdr));
+ neg_cmd = (struct hci_h4p_neg_cmd *)skb_put(skb, sizeof(*neg_cmd));
+
+ neg_hdr->dlen = sizeof(*neg_cmd);
+ neg_cmd->ack = H4P_NEG_REQ;
+ neg_cmd->baud = cpu_to_le16(BT_BAUDRATE_DIVIDER/MAX_BAUD_RATE);
+ neg_cmd->proto = H4P_PROTO_BYTE;
+ neg_cmd->sys_clk = cpu_to_le16(sysclk);
+
+ hci_h4p_change_speed(info, INIT_SPEED);
+
+ hci_h4p_set_rts(info, 1);
+ info->init_error = 0;
+ init_completion(&info->init_completion);
+ skb_queue_tail(&info->txq, skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ if (!wait_for_completion_interruptible_timeout(&info->init_completion,
+ msecs_to_jiffies(1000)))
+ return -ETIMEDOUT;
+
+ if (info->init_error < 0)
+ return info->init_error;
+
+ /* Change to operational settings */
+ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS);
+ hci_h4p_set_rts(info, 0);
+ hci_h4p_change_speed(info, MAX_BAUD_RATE);
+
+ err = hci_h4p_wait_for_cts(info, 1, 100);
+ if (err < 0)
+ return err;
+
+ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS);
+ init_completion(&info->init_completion);
+ err = hci_h4p_send_alive_packet(info);
+
+ if (err < 0)
+ return err;
+
+ if (!wait_for_completion_interruptible_timeout(&info->init_completion,
+ msecs_to_jiffies(1000)))
+ return -ETIMEDOUT;
+
+ if (info->init_error < 0)
+ return info->init_error;
+
+ BT_DBG("Negotiation successful");
+ return 0;
+}
+
+static void hci_h4p_negotiation_packet(struct hci_h4p_info *info,
+ struct sk_buff *skb)
+{
+ struct hci_h4p_neg_hdr *hdr;
+ struct hci_h4p_neg_evt *evt;
+
+ hdr = (struct hci_h4p_neg_hdr *)skb->data;
+ if (hdr->dlen != sizeof(*evt)) {
+ info->init_error = -EIO;
+ goto finish_neg;
+ }
+
+ evt = (struct hci_h4p_neg_evt *)skb_pull(skb, sizeof(*hdr));
+
+ if (evt->ack != H4P_NEG_ACK) {
+ dev_err(info->dev, "Could not negotiate hci_h4p settings\n");
+ info->init_error = -EINVAL;
+ }
+
+ info->man_id = evt->man_id;
+ info->ver_id = evt->ver_id;
+
+finish_neg:
+
+ complete(&info->init_completion);
+ kfree_skb(skb);
+}
+
+/* H4 packet handling functions */
+static int hci_h4p_get_hdr_len(struct hci_h4p_info *info, u8 pkt_type)
+{
+ long retval;
+
+ switch (pkt_type) {
+ case H4_EVT_PKT:
+ retval = HCI_EVENT_HDR_SIZE;
+ break;
+ case H4_ACL_PKT:
+ retval = HCI_ACL_HDR_SIZE;
+ break;
+ case H4_SCO_PKT:
+ retval = HCI_SCO_HDR_SIZE;
+ break;
+ case H4_NEG_PKT:
+ retval = H4P_NEG_HDR_SIZE;
+ break;
+ case H4_ALIVE_PKT:
+ retval = H4P_ALIVE_HDR_SIZE;
+ break;
+ case H4_RADIO_PKT:
+ retval = H4_RADIO_HDR_SIZE;
+ break;
+ default:
+ dev_err(info->dev, "Unknown H4 packet type 0x%.2x\n", pkt_type);
+ retval = -1;
+ break;
+ }
+
+ return retval;
+}
+
+static unsigned int hci_h4p_get_data_len(struct hci_h4p_info *info,
+ struct sk_buff *skb)
+{
+ long retval = -1;
+ struct hci_acl_hdr *acl_hdr;
+ struct hci_sco_hdr *sco_hdr;
+ struct hci_event_hdr *evt_hdr;
+ struct hci_h4p_neg_hdr *neg_hdr;
+ struct hci_h4p_alive_hdr *alive_hdr;
+ struct hci_h4p_radio_hdr *radio_hdr;
+
+ switch (bt_cb(skb)->pkt_type) {
+ case H4_EVT_PKT:
+ evt_hdr = (struct hci_event_hdr *)skb->data;
+ retval = evt_hdr->plen;
+ break;
+ case H4_ACL_PKT:
+ acl_hdr = (struct hci_acl_hdr *)skb->data;
+ retval = le16_to_cpu(acl_hdr->dlen);
+ break;
+ case H4_SCO_PKT:
+ sco_hdr = (struct hci_sco_hdr *)skb->data;
+ retval = sco_hdr->dlen;
+ break;
+ case H4_RADIO_PKT:
+ radio_hdr = (struct hci_h4p_radio_hdr *)skb->data;
+ retval = radio_hdr->dlen;
+ break;
+ case H4_NEG_PKT:
+ neg_hdr = (struct hci_h4p_neg_hdr *)skb->data;
+ retval = neg_hdr->dlen;
+ break;
+ case H4_ALIVE_PKT:
+ alive_hdr = (struct hci_h4p_alive_hdr *)skb->data;
+ retval = alive_hdr->dlen;
+ break;
+ }
+
+ return retval;
+}
+
+static inline void hci_h4p_recv_frame(struct hci_h4p_info *info,
+ struct sk_buff *skb)
+{
+ if (unlikely(!test_bit(HCI_RUNNING, &info->hdev->flags))) {
+ switch (bt_cb(skb)->pkt_type) {
+ case H4_NEG_PKT:
+ hci_h4p_negotiation_packet(info, skb);
+ info->rx_state = WAIT_FOR_PKT_TYPE;
+ return;
+ case H4_ALIVE_PKT:
+ hci_h4p_alive_packet(info, skb);
+ info->rx_state = WAIT_FOR_PKT_TYPE;
+ return;
+ }
+
+ if (!test_bit(HCI_UP, &info->hdev->flags)) {
+ BT_DBG("fw_event");
+ hci_h4p_parse_fw_event(info, skb);
+ return;
+ }
+ }
+
+ hci_recv_frame(info->hdev, skb);
+ BT_DBG("Frame sent to upper layer");
+}
+
+static inline void hci_h4p_handle_byte(struct hci_h4p_info *info, u8 byte)
+{
+ switch (info->rx_state) {
+ case WAIT_FOR_PKT_TYPE:
+ bt_cb(info->rx_skb)->pkt_type = byte;
+ info->rx_count = hci_h4p_get_hdr_len(info, byte);
+ if (info->rx_count < 0) {
+ info->hdev->stat.err_rx++;
+ kfree_skb(info->rx_skb);
+ info->rx_skb = NULL;
+ } else {
+ info->rx_state = WAIT_FOR_HEADER;
+ }
+ break;
+ case WAIT_FOR_HEADER:
+ info->rx_count--;
+ *skb_put(info->rx_skb, 1) = byte;
+ if (info->rx_count != 0)
+ break;
+ info->rx_count = hci_h4p_get_data_len(info, info->rx_skb);
+ if (info->rx_count > skb_tailroom(info->rx_skb)) {
+ dev_err(info->dev, "frame too long\n");
+ info->garbage_bytes = info->rx_count
+ - skb_tailroom(info->rx_skb);
+ kfree_skb(info->rx_skb);
+ info->rx_skb = NULL;
+ break;
+ }
+ info->rx_state = WAIT_FOR_DATA;
+ break;
+ case WAIT_FOR_DATA:
+ info->rx_count--;
+ *skb_put(info->rx_skb, 1) = byte;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ if (info->rx_count == 0) {
+ /* H4+ devices should always send word aligned packets */
+ if (!(info->rx_skb->len % 2))
+ info->garbage_bytes++;
+ hci_h4p_recv_frame(info, info->rx_skb);
+ info->rx_skb = NULL;
+ }
+}
+
+static void hci_h4p_rx_tasklet(unsigned long data)
+{
+ u8 byte;
+ struct hci_h4p_info *info = (struct hci_h4p_info *)data;
+
+ BT_DBG("tasklet woke up");
+ BT_DBG("rx_tasklet woke up");
+
+ while (hci_h4p_inb(info, UART_LSR) & UART_LSR_DR) {
+ byte = hci_h4p_inb(info, UART_RX);
+ if (info->garbage_bytes) {
+ info->garbage_bytes--;
+ continue;
+ }
+ if (info->rx_skb == NULL) {
+ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE,
+ GFP_ATOMIC | GFP_DMA);
+ if (!info->rx_skb) {
+ dev_err(info->dev,
+ "No memory for new packet\n");
+ goto finish_rx;
+ }
+ info->rx_state = WAIT_FOR_PKT_TYPE;
+ info->rx_skb->dev = (void *)info->hdev;
+ }
+ info->hdev->stat.byte_rx++;
+ hci_h4p_handle_byte(info, byte);
+ }
+
+ if (!info->rx_enabled) {
+ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT &&
+ info->autorts) {
+ __hci_h4p_set_auto_ctsrts(info, 0 , UART_EFR_RTS);
+ info->autorts = 0;
+ }
+ /* Flush posted write to avoid spurious interrupts */
+ hci_h4p_inb(info, UART_OMAP_SCR);
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 0);
+ }
+
+finish_rx:
+ BT_DBG("rx_ended");
+}
+
+static void hci_h4p_tx_tasklet(unsigned long data)
+{
+ unsigned int sent = 0;
+ struct sk_buff *skb;
+ struct hci_h4p_info *info = (struct hci_h4p_info *)data;
+
+ BT_DBG("tasklet woke up");
+ BT_DBG("tx_tasklet woke up");
+
+ if (info->autorts != info->rx_enabled) {
+ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) {
+ if (info->autorts && !info->rx_enabled) {
+ __hci_h4p_set_auto_ctsrts(info, 0,
+ UART_EFR_RTS);
+ info->autorts = 0;
+ }
+ if (!info->autorts && info->rx_enabled) {
+ __hci_h4p_set_auto_ctsrts(info, 1,
+ UART_EFR_RTS);
+ info->autorts = 1;
+ }
+ } else {
+ hci_h4p_outb(info, UART_OMAP_SCR,
+ hci_h4p_inb(info, UART_OMAP_SCR) |
+ UART_OMAP_SCR_EMPTY_THR);
+ goto finish_tx;
+ }
+ }
+
+ skb = skb_dequeue(&info->txq);
+ if (!skb) {
+ /* No data in buffer */
+ BT_DBG("skb ready");
+ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) {
+ hci_h4p_outb(info, UART_IER,
+ hci_h4p_inb(info, UART_IER) &
+ ~UART_IER_THRI);
+ hci_h4p_inb(info, UART_OMAP_SCR);
+ hci_h4p_disable_tx(info);
+ return;
+ }
+ hci_h4p_outb(info, UART_OMAP_SCR,
+ hci_h4p_inb(info, UART_OMAP_SCR) |
+ UART_OMAP_SCR_EMPTY_THR);
+ goto finish_tx;
+ }
+
+ /* Copy data to tx fifo */
+ while (!(hci_h4p_inb(info, UART_OMAP_SSR) & UART_OMAP_SSR_TXFULL) &&
+ (sent < skb->len)) {
+ hci_h4p_outb(info, UART_TX, skb->data[sent]);
+ sent++;
+ }
+
+ info->hdev->stat.byte_tx += sent;
+ if (skb->len == sent) {
+ kfree_skb(skb);
+ } else {
+ skb_pull(skb, sent);
+ skb_queue_head(&info->txq, skb);
+ }
+
+ hci_h4p_outb(info, UART_OMAP_SCR, hci_h4p_inb(info, UART_OMAP_SCR) &
+ ~UART_OMAP_SCR_EMPTY_THR);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+
+finish_tx:
+ /* Flush posted write to avoid spurious interrupts */
+ hci_h4p_inb(info, UART_OMAP_SCR);
+
+}
+
+static irqreturn_t hci_h4p_interrupt(int irq, void *data)
+{
+ struct hci_h4p_info *info = (struct hci_h4p_info *)data;
+ u8 iir, msr;
+ int ret;
+
+ ret = IRQ_NONE;
+
+ iir = hci_h4p_inb(info, UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ return IRQ_HANDLED;
+
+ BT_DBG("In interrupt handler iir 0x%.2x", iir);
+
+ iir &= UART_IIR_ID;
+
+ if (iir == UART_IIR_MSI) {
+ msr = hci_h4p_inb(info, UART_MSR);
+ ret = IRQ_HANDLED;
+ }
+ if (iir == UART_IIR_RLSI) {
+ hci_h4p_inb(info, UART_RX);
+ hci_h4p_inb(info, UART_LSR);
+ ret = IRQ_HANDLED;
+ }
+
+ if (iir == UART_IIR_RDI) {
+ hci_h4p_rx_tasklet((unsigned long)data);
+ ret = IRQ_HANDLED;
+ }
+
+ if (iir == UART_IIR_THRI) {
+ hci_h4p_tx_tasklet((unsigned long)data);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t hci_h4p_wakeup_interrupt(int irq, void *dev_inst)
+{
+ struct hci_h4p_info *info = dev_inst;
+ int should_wakeup;
+ struct hci_dev *hdev;
+
+ if (!info->hdev)
+ return IRQ_HANDLED;
+
+ should_wakeup = gpio_get_value(info->host_wakeup_gpio);
+ hdev = info->hdev;
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags)) {
+ if (should_wakeup == 1)
+ complete_all(&info->test_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ BT_DBG("gpio interrupt %d", should_wakeup);
+
+ /* Check if wee have missed some interrupts */
+ if (info->rx_enabled == should_wakeup)
+ return IRQ_HANDLED;
+
+ if (should_wakeup)
+ hci_h4p_enable_rx(info);
+ else
+ hci_h4p_disable_rx(info);
+
+ return IRQ_HANDLED;
+}
+
+static inline void hci_h4p_set_pm_limits(struct hci_h4p_info *info, bool set)
+{
+ struct hci_h4p_platform_data *bt_plat_data = info->dev->platform_data;
+ const char *sset = set ? "set" : "clear";
+
+ if (unlikely(!bt_plat_data || !bt_plat_data->set_pm_limits))
+ return;
+
+ if (set != !!test_bit(H4P_ACTIVE_MODE, &info->pm_flags)) {
+ bt_plat_data->set_pm_limits(info->dev, set);
+ if (set)
+ set_bit(H4P_ACTIVE_MODE, &info->pm_flags);
+ else
+ clear_bit(H4P_ACTIVE_MODE, &info->pm_flags);
+ BT_DBG("Change pm constraints to: %s", sset);
+ return;
+ }
+
+ BT_DBG("pm constraints remains: %s", sset);
+}
+
+static int hci_h4p_reset(struct hci_h4p_info *info)
+{
+ int err;
+
+ err = hci_h4p_reset_uart(info);
+ if (err < 0) {
+ dev_err(info->dev, "Uart reset failed\n");
+ return err;
+ }
+ hci_h4p_init_uart(info);
+ hci_h4p_set_rts(info, 0);
+
+ gpio_set_value(info->reset_gpio, 0);
+ gpio_set_value(info->bt_wakeup_gpio, 1);
+ msleep(10);
+
+ if (gpio_get_value(info->host_wakeup_gpio) == 1) {
+ dev_err(info->dev, "host_wakeup_gpio not low\n");
+ return -EPROTO;
+ }
+
+ init_completion(&info->test_completion);
+ gpio_set_value(info->reset_gpio, 1);
+
+ if (!wait_for_completion_interruptible_timeout(&info->test_completion,
+ msecs_to_jiffies(100))) {
+ dev_err(info->dev, "wakeup test timed out\n");
+ complete_all(&info->test_completion);
+ return -EPROTO;
+ }
+
+ err = hci_h4p_wait_for_cts(info, 1, 100);
+ if (err < 0) {
+ dev_err(info->dev, "No cts from bt chip\n");
+ return err;
+ }
+
+ hci_h4p_set_rts(info, 1);
+
+ return 0;
+}
+
+/* hci callback functions */
+static int hci_h4p_hci_flush(struct hci_dev *hdev)
+{
+ struct hci_h4p_info *info = hci_get_drvdata(hdev);
+ skb_queue_purge(&info->txq);
+
+ return 0;
+}
+
+static int hci_h4p_bt_wakeup_test(struct hci_h4p_info *info)
+{
+ /*
+ * Test Sequence:
+ * Host de-asserts the BT_WAKE_UP line.
+ * Host polls the UART_CTS line, waiting for it to be de-asserted.
+ * Host asserts the BT_WAKE_UP line.
+ * Host polls the UART_CTS line, waiting for it to be asserted.
+ * Host de-asserts the BT_WAKE_UP line (allow the Bluetooth device to
+ * sleep).
+ * Host polls the UART_CTS line, waiting for it to be de-asserted.
+ */
+ int err;
+ int ret = -ECOMM;
+
+ if (!info)
+ return -EINVAL;
+
+ /* Disable wakeup interrupts */
+ disable_irq(gpio_to_irq(info->host_wakeup_gpio));
+
+ gpio_set_value(info->bt_wakeup_gpio, 0);
+ err = hci_h4p_wait_for_cts(info, 0, 100);
+ if (err) {
+ dev_warn(info->dev,
+ "bt_wakeup_test: fail: CTS low timed out: %d\n",
+ err);
+ goto out;
+ }
+
+ gpio_set_value(info->bt_wakeup_gpio, 1);
+ err = hci_h4p_wait_for_cts(info, 1, 100);
+ if (err) {
+ dev_warn(info->dev,
+ "bt_wakeup_test: fail: CTS high timed out: %d\n",
+ err);
+ goto out;
+ }
+
+ gpio_set_value(info->bt_wakeup_gpio, 0);
+ err = hci_h4p_wait_for_cts(info, 0, 100);
+ if (err) {
+ dev_warn(info->dev,
+ "bt_wakeup_test: fail: CTS re-low timed out: %d\n",
+ err);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+
+ /* Re-enable wakeup interrupts */
+ enable_irq(gpio_to_irq(info->host_wakeup_gpio));
+
+ return ret;
+}
+
+static int hci_h4p_hci_open(struct hci_dev *hdev)
+{
+ struct hci_h4p_info *info;
+ int err, retries = 0;
+ struct sk_buff_head fw_queue;
+ unsigned long flags;
+
+ info = hci_get_drvdata(hdev);
+
+ if (test_bit(HCI_RUNNING, &hdev->flags))
+ return 0;
+
+ /* TI1271 has HW bug and boot up might fail. Retry up to three times */
+again:
+
+ info->rx_enabled = 1;
+ info->rx_state = WAIT_FOR_PKT_TYPE;
+ info->rx_count = 0;
+ info->garbage_bytes = 0;
+ info->rx_skb = NULL;
+ info->pm_enabled = 0;
+ init_completion(&info->fw_completion);
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 1);
+ skb_queue_head_init(&fw_queue);
+
+ err = hci_h4p_reset(info);
+ if (err < 0)
+ goto err_clean;
+
+ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_CTS | UART_EFR_RTS);
+ info->autorts = 1;
+
+ err = hci_h4p_send_negotiation(info);
+
+ err = hci_h4p_read_fw(info, &fw_queue);
+ if (err < 0) {
+ dev_err(info->dev, "Cannot read firmware\n");
+ goto err_clean;
+ }
+
+ err = hci_h4p_send_fw(info, &fw_queue);
+ if (err < 0) {
+ dev_err(info->dev, "Sending firmware failed.\n");
+ goto err_clean;
+ }
+
+ info->pm_enabled = 1;
+
+ err = hci_h4p_bt_wakeup_test(info);
+ if (err < 0) {
+ dev_err(info->dev, "BT wakeup test failed.\n");
+ goto err_clean;
+ }
+
+ spin_lock_irqsave(&info->lock, flags);
+ info->rx_enabled = gpio_get_value(info->host_wakeup_gpio);
+ hci_h4p_set_clk(info, &info->rx_clocks_en, info->rx_enabled);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
+
+ kfree_skb(info->alive_cmd_skb);
+ info->alive_cmd_skb = NULL;
+ set_bit(HCI_RUNNING, &hdev->flags);
+
+ BT_DBG("hci up and running");
+ return 0;
+
+err_clean:
+ hci_h4p_hci_flush(hdev);
+ hci_h4p_reset_uart(info);
+ del_timer_sync(&info->lazy_release);
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 0);
+ gpio_set_value(info->reset_gpio, 0);
+ gpio_set_value(info->bt_wakeup_gpio, 0);
+ skb_queue_purge(&fw_queue);
+ kfree_skb(info->alive_cmd_skb);
+ info->alive_cmd_skb = NULL;
+ kfree_skb(info->rx_skb);
+ info->rx_skb = NULL;
+
+ if (retries++ < 3) {
+ dev_err(info->dev, "FW loading try %d fail. Retry.\n", retries);
+ goto again;
+ }
+
+ return err;
+}
+
+static int hci_h4p_hci_close(struct hci_dev *hdev)
+{
+ struct hci_h4p_info *info = hci_get_drvdata(hdev);
+
+ if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
+ return 0;
+
+ hci_h4p_hci_flush(hdev);
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 1);
+ hci_h4p_reset_uart(info);
+ del_timer_sync(&info->lazy_release);
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 0);
+ gpio_set_value(info->reset_gpio, 0);
+ gpio_set_value(info->bt_wakeup_gpio, 0);
+ kfree_skb(info->rx_skb);
+
+ return 0;
+}
+
+static int hci_h4p_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_h4p_info *info;
+ int err = 0;
+
+ BT_DBG("dev %p, skb %p", hdev, skb);
+
+ info = hci_get_drvdata(hdev);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags)) {
+ dev_warn(info->dev, "Frame for non-running device\n");
+ return -EIO;
+ }
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+ }
+
+ /* Push frame type to skb */
+ *skb_push(skb, 1) = (bt_cb(skb)->pkt_type);
+ /* We should allways send word aligned data to h4+ devices */
+ if (skb->len % 2) {
+ err = skb_pad(skb, 1);
+ if (!err)
+ *skb_put(skb, 1) = 0x00;
+ }
+ if (err)
+ return err;
+
+ skb_queue_tail(&info->txq, skb);
+ hci_h4p_enable_tx(info);
+
+ return 0;
+}
+
+static ssize_t hci_h4p_store_bdaddr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hci_h4p_info *info = dev_get_drvdata(dev);
+ unsigned int bdaddr[6];
+ int ret, i;
+
+ ret = sscanf(buf, "%2x:%2x:%2x:%2x:%2x:%2x\n",
+ &bdaddr[0], &bdaddr[1], &bdaddr[2],
+ &bdaddr[3], &bdaddr[4], &bdaddr[5]);
+
+ if (ret != 6)
+ return -EINVAL;
+
+ for (i = 0; i < 6; i++) {
+ if (bdaddr[i] > 0xff)
+ return -EINVAL;
+ info->bd_addr[i] = bdaddr[i] & 0xff;
+ }
+
+ return count;
+}
+
+static ssize_t hci_h4p_show_bdaddr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hci_h4p_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%pMR\n", info->bd_addr);
+}
+
+static DEVICE_ATTR(bdaddr, S_IRUGO | S_IWUSR, hci_h4p_show_bdaddr,
+ hci_h4p_store_bdaddr);
+
+static int hci_h4p_sysfs_create_files(struct device *dev)
+{
+ return device_create_file(dev, &dev_attr_bdaddr);
+}
+
+static void hci_h4p_sysfs_remove_files(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_bdaddr);
+}
+
+static int hci_h4p_register_hdev(struct hci_h4p_info *info)
+{
+ struct hci_dev *hdev;
+
+ /* Initialize and register HCI device */
+
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ dev_err(info->dev, "Can't allocate memory for device\n");
+ return -ENOMEM;
+ }
+ info->hdev = hdev;
+
+ hdev->bus = HCI_UART;
+ hci_set_drvdata(hdev, info);
+
+ hdev->open = hci_h4p_hci_open;
+ hdev->close = hci_h4p_hci_close;
+ hdev->flush = hci_h4p_hci_flush;
+ hdev->send = hci_h4p_hci_send_frame;
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
+ SET_HCIDEV_DEV(hdev, info->dev);
+
+ if (hci_h4p_sysfs_create_files(info->dev) < 0) {
+ dev_err(info->dev, "failed to create sysfs files\n");
+ goto free;
+ }
+
+ if (hci_register_dev(hdev) >= 0)
+ return 0;
+
+ dev_err(info->dev, "hci_register failed %s.\n", hdev->name);
+ hci_h4p_sysfs_remove_files(info->dev);
+free:
+ hci_free_dev(info->hdev);
+ return -ENODEV;
+}
+
+static int hci_h4p_probe(struct platform_device *pdev)
+{
+ struct hci_h4p_platform_data *bt_plat_data;
+ struct hci_h4p_info *info;
+ int err;
+
+ dev_info(&pdev->dev, "Registering HCI H4P device\n");
+ info = devm_kzalloc(&pdev->dev, sizeof(struct hci_h4p_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = &pdev->dev;
+ info->tx_enabled = 1;
+ info->rx_enabled = 1;
+ spin_lock_init(&info->lock);
+ spin_lock_init(&info->clocks_lock);
+ skb_queue_head_init(&info->txq);
+
+ if (pdev->dev.platform_data == NULL) {
+ dev_err(&pdev->dev, "Could not get Bluetooth config data\n");
+ return -ENODATA;
+ }
+
+ bt_plat_data = pdev->dev.platform_data;
+ info->chip_type = bt_plat_data->chip_type;
+ info->bt_wakeup_gpio = bt_plat_data->bt_wakeup_gpio;
+ info->host_wakeup_gpio = bt_plat_data->host_wakeup_gpio;
+ info->reset_gpio = bt_plat_data->reset_gpio;
+ info->reset_gpio_shared = bt_plat_data->reset_gpio_shared;
+ info->bt_sysclk = bt_plat_data->bt_sysclk;
+
+ BT_DBG("RESET gpio: %d", info->reset_gpio);
+ BT_DBG("BTWU gpio: %d", info->bt_wakeup_gpio);
+ BT_DBG("HOSTWU gpio: %d", info->host_wakeup_gpio);
+ BT_DBG("sysclk: %d", info->bt_sysclk);
+
+ init_completion(&info->test_completion);
+ complete_all(&info->test_completion);
+
+ if (!info->reset_gpio_shared) {
+ err = devm_gpio_request_one(&pdev->dev, info->reset_gpio,
+ GPIOF_OUT_INIT_LOW, "bt_reset");
+ if (err < 0) {
+ dev_err(&pdev->dev, "Cannot get GPIO line %d\n",
+ info->reset_gpio);
+ return err;
+ }
+ }
+
+ err = devm_gpio_request_one(&pdev->dev, info->bt_wakeup_gpio,
+ GPIOF_OUT_INIT_LOW, "bt_wakeup");
+
+ if (err < 0) {
+ dev_err(info->dev, "Cannot get GPIO line 0x%d",
+ info->bt_wakeup_gpio);
+ return err;
+ }
+
+ err = devm_gpio_request_one(&pdev->dev, info->host_wakeup_gpio,
+ GPIOF_DIR_IN, "host_wakeup");
+ if (err < 0) {
+ dev_err(info->dev, "Cannot get GPIO line %d",
+ info->host_wakeup_gpio);
+ return err;
+ }
+
+ info->irq = bt_plat_data->uart_irq;
+ info->uart_base = devm_ioremap(&pdev->dev, bt_plat_data->uart_base,
+ SZ_2K);
+ info->uart_iclk = devm_clk_get(&pdev->dev, bt_plat_data->uart_iclk);
+ info->uart_fclk = devm_clk_get(&pdev->dev, bt_plat_data->uart_fclk);
+
+ err = devm_request_irq(&pdev->dev, info->irq, hci_h4p_interrupt,
+ IRQF_DISABLED, "hci_h4p", info);
+ if (err < 0) {
+ dev_err(info->dev, "hci_h4p: unable to get IRQ %d\n",
+ info->irq);
+ return err;
+ }
+
+ err = devm_request_irq(&pdev->dev, gpio_to_irq(info->host_wakeup_gpio),
+ hci_h4p_wakeup_interrupt, IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING | IRQF_DISABLED,
+ "hci_h4p_wkup", info);
+ if (err < 0) {
+ dev_err(info->dev, "hci_h4p: unable to get wakeup IRQ %d\n",
+ gpio_to_irq(info->host_wakeup_gpio));
+ return err;
+ }
+
+ err = irq_set_irq_wake(gpio_to_irq(info->host_wakeup_gpio), 1);
+ if (err < 0) {
+ dev_err(info->dev, "hci_h4p: unable to set wakeup for IRQ %d\n",
+ gpio_to_irq(info->host_wakeup_gpio));
+ return err;
+ }
+
+ init_timer_deferrable(&info->lazy_release);
+ info->lazy_release.function = hci_h4p_lazy_clock_release;
+ info->lazy_release.data = (unsigned long)info;
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
+ err = hci_h4p_reset_uart(info);
+ if (err < 0)
+ return err;
+ gpio_set_value(info->reset_gpio, 0);
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
+
+ platform_set_drvdata(pdev, info);
+
+ if (hci_h4p_register_hdev(info) < 0) {
+ dev_err(info->dev, "failed to register hci_h4p hci device\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hci_h4p_remove(struct platform_device *pdev)
+{
+ struct hci_h4p_info *info;
+
+ info = platform_get_drvdata(pdev);
+
+ hci_h4p_sysfs_remove_files(info->dev);
+ hci_h4p_hci_close(info->hdev);
+ hci_unregister_dev(info->hdev);
+ hci_free_dev(info->hdev);
+
+ return 0;
+}
+
+static struct platform_driver hci_h4p_driver = {
+ .probe = hci_h4p_probe,
+ .remove = hci_h4p_remove,
+ .driver = {
+ .name = "hci_h4p",
+ },
+};
+
+module_platform_driver(hci_h4p_driver);
+
+MODULE_ALIAS("platform:hci_h4p");
+MODULE_DESCRIPTION("Bluetooth h4 driver with nokia extensions");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ville Tervo");
diff --git a/drivers/staging/nokia_h4p/nokia_fw-bcm.c b/drivers/staging/nokia_h4p/nokia_fw-bcm.c
new file mode 100644
index 000000000000..111ae94032d1
--- /dev/null
+++ b/drivers/staging/nokia_h4p/nokia_fw-bcm.c
@@ -0,0 +1,148 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2005-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/serial_reg.h>
+
+#include "hci_h4p.h"
+
+static int hci_h4p_bcm_set_bdaddr(struct hci_h4p_info *info,
+ struct sk_buff *skb)
+{
+ int i;
+ static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
+ int not_valid;
+
+ not_valid = 1;
+ for (i = 0; i < 6; i++) {
+ if (info->bd_addr[i] != 0x00) {
+ not_valid = 0;
+ break;
+ }
+ }
+
+ if (not_valid) {
+ dev_info(info->dev, "Valid bluetooth address not found, setting some random\n");
+ /* When address is not valid, use some random but Nokia MAC */
+ memcpy(info->bd_addr, nokia_oui, 3);
+ get_random_bytes(info->bd_addr + 3, 3);
+ }
+
+ for (i = 0; i < 6; i++)
+ skb->data[9 - i] = info->bd_addr[i];
+
+ return 0;
+}
+
+void hci_h4p_bcm_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb)
+{
+ struct sk_buff *fw_skb;
+ int err;
+ unsigned long flags;
+
+ if (skb->data[5] != 0x00) {
+ dev_err(info->dev, "Firmware sending command failed 0x%.2x\n",
+ skb->data[5]);
+ info->fw_error = -EPROTO;
+ }
+
+ kfree_skb(skb);
+
+ fw_skb = skb_dequeue(info->fw_q);
+ if (fw_skb == NULL || info->fw_error) {
+ complete(&info->fw_completion);
+ return;
+ }
+
+ if (fw_skb->data[1] == 0x01 && fw_skb->data[2] == 0xfc && fw_skb->len >= 10) {
+ BT_DBG("Setting bluetooth address");
+ err = hci_h4p_bcm_set_bdaddr(info, fw_skb);
+ if (err < 0) {
+ kfree_skb(fw_skb);
+ info->fw_error = err;
+ complete(&info->fw_completion);
+ return;
+ }
+ }
+
+ skb_queue_tail(&info->txq, fw_skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+
+int hci_h4p_bcm_send_fw(struct hci_h4p_info *info,
+ struct sk_buff_head *fw_queue)
+{
+ struct sk_buff *skb;
+ unsigned long flags, time;
+
+ info->fw_error = 0;
+
+ BT_DBG("Sending firmware");
+
+ time = jiffies;
+
+ info->fw_q = fw_queue;
+ skb = skb_dequeue(fw_queue);
+ if (!skb)
+ return -ENODATA;
+
+ BT_DBG("Sending commands");
+
+ /*
+ * Disable smart-idle as UART TX interrupts
+ * are not wake-up capable
+ */
+ hci_h4p_smart_idle(info, 0);
+
+ /* Check if this is bd_address packet */
+ init_completion(&info->fw_completion);
+ skb_queue_tail(&info->txq, skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ if (!wait_for_completion_timeout(&info->fw_completion,
+ msecs_to_jiffies(2000))) {
+ dev_err(info->dev, "No reply to fw command\n");
+ return -ETIMEDOUT;
+ }
+
+ if (info->fw_error) {
+ dev_err(info->dev, "FW error\n");
+ return -EPROTO;
+ }
+
+ BT_DBG("Firmware sent in %d msecs",
+ jiffies_to_msecs(jiffies-time));
+
+ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS);
+ hci_h4p_set_rts(info, 0);
+ hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE);
+ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS);
+
+ return 0;
+}
diff --git a/drivers/staging/nokia_h4p/nokia_fw-csr.c b/drivers/staging/nokia_h4p/nokia_fw-csr.c
new file mode 100644
index 000000000000..fe6b704b3d97
--- /dev/null
+++ b/drivers/staging/nokia_h4p/nokia_fw-csr.c
@@ -0,0 +1,149 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2005-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/serial_reg.h>
+
+#include "hci_h4p.h"
+
+void hci_h4p_bc4_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb)
+{
+ /* Check if this is fw packet */
+ if (skb->data[0] != 0xff) {
+ hci_recv_frame(info->hdev, skb);
+ return;
+ }
+
+ if (skb->data[11] || skb->data[12]) {
+ dev_err(info->dev, "Firmware sending command failed\n");
+ info->fw_error = -EPROTO;
+ }
+
+ kfree_skb(skb);
+ complete(&info->fw_completion);
+}
+
+int hci_h4p_bc4_send_fw(struct hci_h4p_info *info,
+ struct sk_buff_head *fw_queue)
+{
+ static const u8 nokia_oui[3] = {0x00, 0x19, 0x4F};
+ struct sk_buff *skb;
+ unsigned int offset;
+ int retries, count, i, not_valid;
+ unsigned long flags;
+
+ info->fw_error = 0;
+
+ BT_DBG("Sending firmware");
+ skb = skb_dequeue(fw_queue);
+
+ if (!skb)
+ return -ENOMSG;
+
+ /* Check if this is bd_address packet */
+ if (skb->data[15] == 0x01 && skb->data[16] == 0x00) {
+ offset = 21;
+ skb->data[offset + 1] = 0x00;
+ skb->data[offset + 5] = 0x00;
+
+ not_valid = 1;
+ for (i = 0; i < 6; i++) {
+ if (info->bd_addr[i] != 0x00) {
+ not_valid = 0;
+ break;
+ }
+ }
+
+ if (not_valid) {
+ dev_info(info->dev, "Valid bluetooth address not found, setting some random\n");
+ /* When address is not valid, use some random */
+ memcpy(info->bd_addr, nokia_oui, 3);
+ get_random_bytes(info->bd_addr + 3, 3);
+ }
+
+ skb->data[offset + 7] = info->bd_addr[0];
+ skb->data[offset + 6] = info->bd_addr[1];
+ skb->data[offset + 4] = info->bd_addr[2];
+ skb->data[offset + 0] = info->bd_addr[3];
+ skb->data[offset + 3] = info->bd_addr[4];
+ skb->data[offset + 2] = info->bd_addr[5];
+ }
+
+ for (count = 1; ; count++) {
+ BT_DBG("Sending firmware command %d", count);
+ init_completion(&info->fw_completion);
+ skb_queue_tail(&info->txq, skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ skb = skb_dequeue(fw_queue);
+ if (!skb)
+ break;
+
+ if (!wait_for_completion_timeout(&info->fw_completion,
+ msecs_to_jiffies(1000))) {
+ dev_err(info->dev, "No reply to fw command\n");
+ return -ETIMEDOUT;
+ }
+
+ if (info->fw_error) {
+ dev_err(info->dev, "FW error\n");
+ return -EPROTO;
+ }
+ };
+
+ /* Wait for chip warm reset */
+ retries = 100;
+ while ((!skb_queue_empty(&info->txq) ||
+ !(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) &&
+ retries--) {
+ msleep(10);
+ }
+ if (!retries) {
+ dev_err(info->dev, "Transmitter not empty\n");
+ return -ETIMEDOUT;
+ }
+
+ hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE);
+
+ if (hci_h4p_wait_for_cts(info, 1, 100)) {
+ dev_err(info->dev, "cts didn't deassert after final speed\n");
+ return -ETIMEDOUT;
+ }
+
+ retries = 100;
+ do {
+ init_completion(&info->init_completion);
+ hci_h4p_send_alive_packet(info);
+ retries--;
+ } while (!wait_for_completion_timeout(&info->init_completion, 100) &&
+ retries > 0);
+
+ if (!retries) {
+ dev_err(info->dev, "No alive reply after speed change\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/nokia_h4p/nokia_fw-ti1273.c b/drivers/staging/nokia_h4p/nokia_fw-ti1273.c
new file mode 100644
index 000000000000..f5500f71c839
--- /dev/null
+++ b/drivers/staging/nokia_h4p/nokia_fw-ti1273.c
@@ -0,0 +1,110 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2009 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/serial_reg.h>
+
+#include "hci_h4p.h"
+
+static struct sk_buff_head *fw_q;
+
+void hci_h4p_ti1273_parse_fw_event(struct hci_h4p_info *info,
+ struct sk_buff *skb)
+{
+ struct sk_buff *fw_skb;
+ unsigned long flags;
+
+ if (skb->data[5] != 0x00) {
+ dev_err(info->dev, "Firmware sending command failed 0x%.2x\n",
+ skb->data[5]);
+ info->fw_error = -EPROTO;
+ }
+
+ kfree_skb(skb);
+
+ fw_skb = skb_dequeue(fw_q);
+ if (fw_skb == NULL || info->fw_error) {
+ complete(&info->fw_completion);
+ return;
+ }
+
+ skb_queue_tail(&info->txq, fw_skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+
+int hci_h4p_ti1273_send_fw(struct hci_h4p_info *info,
+ struct sk_buff_head *fw_queue)
+{
+ struct sk_buff *skb;
+ unsigned long flags, time;
+
+ info->fw_error = 0;
+
+ BT_DBG("Sending firmware");
+
+ time = jiffies;
+
+ fw_q = fw_queue;
+ skb = skb_dequeue(fw_queue);
+ if (!skb)
+ return -ENODATA;
+
+ BT_DBG("Sending commands");
+ /* Check if this is bd_address packet */
+ init_completion(&info->fw_completion);
+ hci_h4p_smart_idle(info, 0);
+ skb_queue_tail(&info->txq, skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ if (!wait_for_completion_timeout(&info->fw_completion,
+ msecs_to_jiffies(2000))) {
+ dev_err(info->dev, "No reply to fw command\n");
+ return -ETIMEDOUT;
+ }
+
+ if (info->fw_error) {
+ dev_err(info->dev, "FW error\n");
+ return -EPROTO;
+ }
+
+ BT_DBG("Firmware sent in %d msecs",
+ jiffies_to_msecs(jiffies-time));
+
+ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS);
+ hci_h4p_set_rts(info, 0);
+ hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE);
+ if (hci_h4p_wait_for_cts(info, 1, 100)) {
+ dev_err(info->dev,
+ "cts didn't go down after final speed change\n");
+ return -ETIMEDOUT;
+ }
+ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS);
+
+ return 0;
+}
diff --git a/drivers/staging/nokia_h4p/nokia_fw.c b/drivers/staging/nokia_h4p/nokia_fw.c
new file mode 100644
index 000000000000..14ba2193efb6
--- /dev/null
+++ b/drivers/staging/nokia_h4p/nokia_fw.c
@@ -0,0 +1,208 @@
+/*
+ * This file is part of hci_h4p bluetooth driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation.
+ *
+ * Contact: Ville Tervo <ville.tervo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/clk.h>
+
+#include <net/bluetooth/bluetooth.h>
+
+#include "hci_h4p.h"
+
+#define FW_NAME_TI1271_PRELE "ti1273_prele.bin"
+#define FW_NAME_TI1271_LE "ti1273_le.bin"
+#define FW_NAME_TI1271 "ti1273.bin"
+#define FW_NAME_BCM2048 "bcmfw.bin"
+#define FW_NAME_CSR "bc4fw.bin"
+
+static int fw_pos;
+
+/* Firmware handling */
+static int hci_h4p_open_firmware(struct hci_h4p_info *info,
+ const struct firmware **fw_entry)
+{
+ int err;
+
+ fw_pos = 0;
+ BT_DBG("Opening firmware man_id 0x%.2x ver_id 0x%.2x",
+ info->man_id, info->ver_id);
+ switch (info->man_id) {
+ case H4P_ID_TI1271:
+ switch (info->ver_id) {
+ case 0xe1:
+ err = request_firmware(fw_entry, FW_NAME_TI1271_PRELE,
+ info->dev);
+ break;
+ case 0xd1:
+ case 0xf1:
+ err = request_firmware(fw_entry, FW_NAME_TI1271_LE,
+ info->dev);
+ break;
+ default:
+ err = request_firmware(fw_entry, FW_NAME_TI1271,
+ info->dev);
+ }
+ break;
+ case H4P_ID_CSR:
+ err = request_firmware(fw_entry, FW_NAME_CSR, info->dev);
+ break;
+ case H4P_ID_BCM2048:
+ err = request_firmware(fw_entry, FW_NAME_BCM2048, info->dev);
+ break;
+ default:
+ dev_err(info->dev, "Invalid chip type\n");
+ *fw_entry = NULL;
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static void hci_h4p_close_firmware(const struct firmware *fw_entry)
+{
+ release_firmware(fw_entry);
+}
+
+/* Read fw. Return length of the command. If no more commands in
+ * fw 0 is returned. In error case return value is negative.
+ */
+static int hci_h4p_read_fw_cmd(struct hci_h4p_info *info, struct sk_buff **skb,
+ const struct firmware *fw_entry, gfp_t how)
+{
+ unsigned int cmd_len;
+
+ if (fw_pos >= fw_entry->size)
+ return 0;
+
+ if (fw_pos + 2 > fw_entry->size) {
+ dev_err(info->dev, "Corrupted firmware image 1\n");
+ return -EMSGSIZE;
+ }
+
+ cmd_len = fw_entry->data[fw_pos++];
+ cmd_len += fw_entry->data[fw_pos++] << 8;
+ if (cmd_len == 0)
+ return 0;
+
+ if (fw_pos + cmd_len > fw_entry->size) {
+ dev_err(info->dev, "Corrupted firmware image 2\n");
+ return -EMSGSIZE;
+ }
+
+ *skb = bt_skb_alloc(cmd_len, how);
+ if (!*skb) {
+ dev_err(info->dev, "Cannot reserve memory for buffer\n");
+ return -ENOMEM;
+ }
+ memcpy(skb_put(*skb, cmd_len), &fw_entry->data[fw_pos], cmd_len);
+
+ fw_pos += cmd_len;
+
+ return (*skb)->len;
+}
+
+int hci_h4p_read_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue)
+{
+ const struct firmware *fw_entry = NULL;
+ struct sk_buff *skb = NULL;
+ int err;
+
+ err = hci_h4p_open_firmware(info, &fw_entry);
+ if (err < 0 || !fw_entry)
+ goto err_clean;
+
+ while ((err = hci_h4p_read_fw_cmd(info, &skb, fw_entry, GFP_KERNEL))) {
+ if (err < 0 || !skb)
+ goto err_clean;
+
+ skb_queue_tail(fw_queue, skb);
+ }
+
+ /* Chip detection code does neg and alive stuff
+ * discard two first skbs */
+ skb = skb_dequeue(fw_queue);
+ if (!skb) {
+ err = -EMSGSIZE;
+ goto err_clean;
+ }
+ kfree_skb(skb);
+ skb = skb_dequeue(fw_queue);
+ if (!skb) {
+ err = -EMSGSIZE;
+ goto err_clean;
+ }
+ kfree_skb(skb);
+
+err_clean:
+ hci_h4p_close_firmware(fw_entry);
+ return err;
+}
+
+int hci_h4p_send_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue)
+{
+ int err;
+
+ switch (info->man_id) {
+ case H4P_ID_CSR:
+ err = hci_h4p_bc4_send_fw(info, fw_queue);
+ break;
+ case H4P_ID_TI1271:
+ err = hci_h4p_ti1273_send_fw(info, fw_queue);
+ break;
+ case H4P_ID_BCM2048:
+ err = hci_h4p_bcm_send_fw(info, fw_queue);
+ break;
+ default:
+ dev_err(info->dev, "Don't know how to send firmware\n");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb)
+{
+ switch (info->man_id) {
+ case H4P_ID_CSR:
+ hci_h4p_bc4_parse_fw_event(info, skb);
+ break;
+ case H4P_ID_TI1271:
+ hci_h4p_ti1273_parse_fw_event(info, skb);
+ break;
+ case H4P_ID_BCM2048:
+ hci_h4p_bcm_parse_fw_event(info, skb);
+ break;
+ default:
+ dev_err(info->dev, "Don't know how to parse fw event\n");
+ info->fw_error = -EINVAL;
+ }
+
+ return;
+}
+
+MODULE_FIRMWARE(FW_NAME_TI1271_PRELE);
+MODULE_FIRMWARE(FW_NAME_TI1271_LE);
+MODULE_FIRMWARE(FW_NAME_TI1271);
+MODULE_FIRMWARE(FW_NAME_BCM2048);
+MODULE_FIRMWARE(FW_NAME_CSR);
diff --git a/drivers/staging/nokia_h4p/nokia_uart.c b/drivers/staging/nokia_h4p/nokia_uart.c
new file mode 100644
index 000000000000..0fb57de4b750
--- /dev/null
+++ b/drivers/staging/nokia_h4p/nokia_uart.c
@@ -0,0 +1,199 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/serial_reg.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <linux/io.h>
+
+#include "hci_h4p.h"
+
+inline void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val)
+{
+ __raw_writeb(val, info->uart_base + (offset << 2));
+}
+
+inline u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset)
+{
+ return __raw_readb(info->uart_base + (offset << 2));
+}
+
+void hci_h4p_set_rts(struct hci_h4p_info *info, int active)
+{
+ u8 b;
+
+ b = hci_h4p_inb(info, UART_MCR);
+ if (active)
+ b |= UART_MCR_RTS;
+ else
+ b &= ~UART_MCR_RTS;
+ hci_h4p_outb(info, UART_MCR, b);
+}
+
+int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active,
+ int timeout_ms)
+{
+ unsigned long timeout;
+ int state;
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ for (;;) {
+ state = hci_h4p_inb(info, UART_MSR) & UART_MSR_CTS;
+ if (active) {
+ if (state)
+ return 0;
+ } else {
+ if (!state)
+ return 0;
+ }
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ msleep(1);
+ }
+}
+
+void __hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which)
+{
+ u8 lcr, b;
+
+ lcr = hci_h4p_inb(info, UART_LCR);
+ hci_h4p_outb(info, UART_LCR, 0xbf);
+ b = hci_h4p_inb(info, UART_EFR);
+ if (on)
+ b |= which;
+ else
+ b &= ~which;
+ hci_h4p_outb(info, UART_EFR, b);
+ hci_h4p_outb(info, UART_LCR, lcr);
+}
+
+void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+ __hci_h4p_set_auto_ctsrts(info, on, which);
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed)
+{
+ unsigned int divisor;
+ u8 lcr, mdr1;
+
+ BT_DBG("Setting speed %lu", speed);
+
+ if (speed >= 460800) {
+ divisor = UART_CLOCK / 13 / speed;
+ mdr1 = 3;
+ } else {
+ divisor = UART_CLOCK / 16 / speed;
+ mdr1 = 0;
+ }
+
+ /* Make sure UART mode is disabled */
+ hci_h4p_outb(info, UART_OMAP_MDR1, 7);
+
+ lcr = hci_h4p_inb(info, UART_LCR);
+ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB); /* Set DLAB */
+ hci_h4p_outb(info, UART_DLL, divisor & 0xff); /* Set speed */
+ hci_h4p_outb(info, UART_DLM, divisor >> 8);
+ hci_h4p_outb(info, UART_LCR, lcr);
+
+ /* Make sure UART mode is enabled */
+ hci_h4p_outb(info, UART_OMAP_MDR1, mdr1);
+}
+
+int hci_h4p_reset_uart(struct hci_h4p_info *info)
+{
+ int count = 0;
+
+ /* Reset the UART */
+ hci_h4p_outb(info, UART_OMAP_SYSC, UART_SYSC_OMAP_RESET);
+ while (!(hci_h4p_inb(info, UART_OMAP_SYSS) & UART_SYSS_RESETDONE)) {
+ if (count++ > 100) {
+ dev_err(info->dev, "hci_h4p: UART reset timeout\n");
+ return -ENODEV;
+ }
+ udelay(1);
+ }
+
+ return 0;
+}
+
+void hci_h4p_store_regs(struct hci_h4p_info *info)
+{
+ u16 lcr = 0;
+
+ lcr = hci_h4p_inb(info, UART_LCR);
+ hci_h4p_outb(info, UART_LCR, 0xBF);
+ info->dll = hci_h4p_inb(info, UART_DLL);
+ info->dlh = hci_h4p_inb(info, UART_DLM);
+ info->efr = hci_h4p_inb(info, UART_EFR);
+ hci_h4p_outb(info, UART_LCR, lcr);
+ info->mdr1 = hci_h4p_inb(info, UART_OMAP_MDR1);
+ info->ier = hci_h4p_inb(info, UART_IER);
+}
+
+void hci_h4p_restore_regs(struct hci_h4p_info *info)
+{
+ u16 lcr = 0;
+
+ hci_h4p_init_uart(info);
+
+ hci_h4p_outb(info, UART_OMAP_MDR1, 7);
+ lcr = hci_h4p_inb(info, UART_LCR);
+ hci_h4p_outb(info, UART_LCR, 0xBF);
+ hci_h4p_outb(info, UART_DLL, info->dll); /* Set speed */
+ hci_h4p_outb(info, UART_DLM, info->dlh);
+ hci_h4p_outb(info, UART_EFR, info->efr);
+ hci_h4p_outb(info, UART_LCR, lcr);
+ hci_h4p_outb(info, UART_OMAP_MDR1, info->mdr1);
+ hci_h4p_outb(info, UART_IER, info->ier);
+}
+
+void hci_h4p_init_uart(struct hci_h4p_info *info)
+{
+ u8 mcr, efr;
+
+ /* Enable and setup FIFO */
+ hci_h4p_outb(info, UART_OMAP_MDR1, 0x00);
+
+ hci_h4p_outb(info, UART_LCR, 0xbf);
+ efr = hci_h4p_inb(info, UART_EFR);
+ hci_h4p_outb(info, UART_EFR, UART_EFR_ECB);
+ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB);
+ mcr = hci_h4p_inb(info, UART_MCR);
+ hci_h4p_outb(info, UART_MCR, UART_MCR_TCRTLR);
+ hci_h4p_outb(info, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT |
+ (3 << 6) | (0 << 4));
+ hci_h4p_outb(info, UART_LCR, 0xbf);
+ hci_h4p_outb(info, UART_TI752_TLR, 0xed);
+ hci_h4p_outb(info, UART_TI752_TCR, 0xef);
+ hci_h4p_outb(info, UART_EFR, efr);
+ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB);
+ hci_h4p_outb(info, UART_MCR, 0x00);
+ hci_h4p_outb(info, UART_LCR, UART_LCR_WLEN8);
+ hci_h4p_outb(info, UART_IER, UART_IER_RDI);
+ hci_h4p_outb(info, UART_OMAP_SYSC, (1 << 0) | (1 << 2) | (2 << 3));
+}
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 3ee0b1887a54..90f1c4d7fa89 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -678,8 +678,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
nvec->rx->data[nvec->rx->pos++] = received;
else
dev_err(nvec->dev,
- "RX buffer overflow on %p: "
- "Trying to write byte %u of %u\n",
+ "RX buffer overflow on %p: Trying to write byte %u of %u\n",
nvec->rx, nvec->rx ? nvec->rx->pos : 0,
NVEC_MSG_SIZE);
break;
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index 06dbb02085a9..45b2f1308e01 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -106,7 +106,7 @@ static int nvec_mouse_probe(struct platform_device *pdev)
struct serio *ser_dev;
char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
- ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL);
if (ser_dev == NULL)
return -ENOMEM;
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 5a001d9b4252..8b8ce7293c52 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -465,6 +465,112 @@ struct octeon_hcd {
#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
/**
+ * struct octeon_temp_buffer - a bounce buffer for USB transfers
+ * @temp_buffer: the newly allocated temporary buffer (including meta-data)
+ * @orig_buffer: the original buffer passed by the USB stack
+ * @data: the newly allocated temporary buffer (excluding meta-data)
+ *
+ * Both the DMA engine and FIFO mode will always transfer full 32-bit words. If
+ * the buffer is too short, we need to allocate a temporary one, and this struct
+ * represents it.
+ */
+struct octeon_temp_buffer {
+ void *temp_buffer;
+ void *orig_buffer;
+ u8 data[0];
+};
+
+/**
+ * octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer
+ * (if needed)
+ * @urb: URB.
+ * @mem_flags: Memory allocation flags.
+ *
+ * This function allocates a temporary bounce buffer whenever it's needed
+ * due to HW limitations.
+ */
+static int octeon_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
+{
+ struct octeon_temp_buffer *temp;
+
+ if (urb->num_sgs || urb->sg ||
+ (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) ||
+ !(urb->transfer_buffer_length % sizeof(u32)))
+ return 0;
+
+ temp = kmalloc(ALIGN(urb->transfer_buffer_length, sizeof(u32)) +
+ sizeof(*temp), mem_flags);
+ if (!temp)
+ return -ENOMEM;
+
+ temp->temp_buffer = temp;
+ temp->orig_buffer = urb->transfer_buffer;
+ if (usb_urb_dir_out(urb))
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+}
+
+/**
+ * octeon_free_temp_buffer - free a temporary buffer used by USB transfers.
+ * @urb: URB.
+ *
+ * Frees a buffer allocated by octeon_alloc_temp_buffer().
+ */
+static void octeon_free_temp_buffer(struct urb *urb)
+{
+ struct octeon_temp_buffer *temp;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ temp = container_of(urb->transfer_buffer, struct octeon_temp_buffer,
+ data);
+ if (usb_urb_dir_in(urb))
+ memcpy(temp->orig_buffer, urb->transfer_buffer,
+ urb->actual_length);
+ urb->transfer_buffer = temp->orig_buffer;
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+ kfree(temp->temp_buffer);
+}
+
+/**
+ * octeon_map_urb_for_dma - Octeon-specific map_urb_for_dma().
+ * @hcd: USB HCD structure.
+ * @urb: URB.
+ * @mem_flags: Memory allocation flags.
+ */
+static int octeon_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ ret = octeon_alloc_temp_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ octeon_free_temp_buffer(urb);
+
+ return ret;
+}
+
+/**
+ * octeon_unmap_urb_for_dma - Octeon-specific unmap_urb_for_dma()
+ * @hcd: USB HCD structure.
+ * @urb: URB.
+ */
+static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+ octeon_free_temp_buffer(urb);
+}
+
+/**
* Read a USB 32bit CSR. It performs the necessary address swizzle
* for 32bit CSRs and logs the value in a readable format if
* debugging is on.
@@ -605,7 +711,8 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
* 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
* USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0
*/
- usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.u64 =
+ __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
usbn_clk_ctl.s.por = 1;
usbn_clk_ctl.s.hrst = 0;
usbn_clk_ctl.s.prst = 0;
@@ -691,7 +798,8 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
* USBP control and status register:
* USBN_USBP_CTL_STATUS[ATE_RESET] = 1
*/
- usbn_usbp_ctl_status.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index));
+ usbn_usbp_ctl_status.u64 = __cvmx_usb_read_csr64(usb,
+ CVMX_USBNX_USBP_CTL_STATUS(usb->index));
usbn_usbp_ctl_status.s.ate_reset = 1;
__cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
usbn_usbp_ctl_status.u64);
@@ -758,7 +866,8 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
if (OCTEON_IS_MODEL(OCTEON_CN31XX))
usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
usbcx_gahbcfg.u32 = 0;
- usbcx_gahbcfg.s.dmaen = !(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
+ usbcx_gahbcfg.s.dmaen = !(usb->init_flags &
+ CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
/* Only use one channel with non DMA */
usb->idle_hardware_channels = 0x1;
@@ -783,7 +892,8 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
*/
{
union cvmx_usbcx_gusbcfg usbcx_gusbcfg;
- usbcx_gusbcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
+ usbcx_gusbcfg.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GUSBCFG(usb->index));
usbcx_gusbcfg.s.toutcal = 0;
usbcx_gusbcfg.s.ddrsel = 0;
usbcx_gusbcfg.s.usbtrdtim = 0x5;
@@ -801,7 +911,8 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
union cvmx_usbcx_gintmsk usbcx_gintmsk;
int channel;
- usbcx_gintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
+ usbcx_gintmsk.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GINTMSK(usb->index));
usbcx_gintmsk.s.otgintmsk = 1;
usbcx_gintmsk.s.modemismsk = 1;
usbcx_gintmsk.s.hchintmsk = 1;
@@ -817,7 +928,8 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
* later.
*/
for (channel = 0; channel < 8; channel++)
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
}
{
@@ -827,26 +939,30 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
* 1. Program the host-port interrupt-mask field to unmask,
* USBC_GINTMSK[PRTINT] = 1
*/
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk,
- prtintmsk, 1);
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk,
- disconnintmsk, 1);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk, prtintmsk, 1);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk, disconnintmsk, 1);
/*
* 2. Program the USBC_HCFG register to select full-speed host
* or high-speed host.
*/
{
union cvmx_usbcx_hcfg usbcx_hcfg;
- usbcx_hcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
+ usbcx_hcfg.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCFG(usb->index));
usbcx_hcfg.s.fslssupp = 0;
usbcx_hcfg.s.fslspclksel = 0;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCFG(usb->index),
+ usbcx_hcfg.u32);
}
/*
* 3. Program the port power bit to drive VBUS on the USB,
* USBC_HPRT[PRTPWR] = 1
*/
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtpwr, 1);
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index),
+ union cvmx_usbcx_hprt, prtpwr, 1);
/*
* Steps 4-15 from the manual are done later in the port enable
@@ -879,7 +995,8 @@ static int cvmx_usb_shutdown(struct cvmx_usb_state *usb)
return -EBUSY;
/* Disable the clocks and put them in power on reset */
- usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb,
+ CVMX_USBNX_CLK_CTL(usb->index));
usbn_clk_ctl.s.enable = 1;
usbn_clk_ctl.s.por = 1;
usbn_clk_ctl.s.hclk_rst = 1;
@@ -903,7 +1020,8 @@ static int cvmx_usb_enable(struct cvmx_usb_state *usb)
{
union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
- usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPRT(usb->index));
/*
* If the port is already enabled the just return. We don't need to do
@@ -913,12 +1031,12 @@ static int cvmx_usb_enable(struct cvmx_usb_state *usb)
return 0;
/* If there is nothing plugged into the port then fail immediately */
- if (!usb->usbcx_hprt.s.prtconnsts) {
+ if (!usb->usbcx_hprt.s.prtconnsts)
return -ETIMEDOUT;
- }
/* Program the port reset bit to start the reset process */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtrst, 1);
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt,
+ prtrst, 1);
/*
* Wait at least 50ms (high speed), or 10ms (full speed) for the reset
@@ -927,26 +1045,30 @@ static int cvmx_usb_enable(struct cvmx_usb_state *usb)
mdelay(50);
/* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtrst, 0);
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt,
+ prtrst, 0);
/* Wait for the USBC_HPRT[PRTENA]. */
- if (CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt,
- prtena, ==, 1, 100000))
+ if (CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_HPRT(usb->index),
+ union cvmx_usbcx_hprt, prtena, ==, 1, 100000))
return -ETIMEDOUT;
/*
* Read the port speed field to get the enumerated speed,
* USBC_HPRT[PRTSPD].
*/
- usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- usbcx_ghwcfg3.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GHWCFG3(usb->index));
+ usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPRT(usb->index));
+ usbcx_ghwcfg3.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GHWCFG3(usb->index));
/*
* 13. Program the USBC_GRXFSIZ register to select the size of the
* receive FIFO (25%).
*/
- USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), union cvmx_usbcx_grxfsiz,
- rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
+ USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index),
+ union cvmx_usbcx_grxfsiz, rxfdep,
+ usbcx_ghwcfg3.s.dfifodepth / 4);
/*
* 14. Program the USBC_GNPTXFSIZ register to select the size and the
* start address of the non- periodic transmit FIFO for nonperiodic
@@ -954,10 +1076,12 @@ static int cvmx_usb_enable(struct cvmx_usb_state *usb)
*/
{
union cvmx_usbcx_gnptxfsiz siz;
- siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
+ siz.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GNPTXFSIZ(usb->index));
siz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
siz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), siz.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index),
+ siz.u32);
}
/*
* 15. Program the USBC_HPTXFSIZ register to select the size and start
@@ -966,18 +1090,25 @@ static int cvmx_usb_enable(struct cvmx_usb_state *usb)
*/
{
union cvmx_usbcx_hptxfsiz siz;
- siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
+ siz.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPTXFSIZ(usb->index));
siz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
siz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), siz.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index),
+ siz.u32);
}
/* Flush all FIFOs */
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, txfnum, 0x10);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, txfflsh, 1);
- CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl,
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ union cvmx_usbcx_grstctl, txfnum, 0x10);
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ union cvmx_usbcx_grstctl, txfflsh, 1);
+ CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ union cvmx_usbcx_grstctl,
txfflsh, ==, 0, 100);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, rxfflsh, 1);
- CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl,
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ union cvmx_usbcx_grstctl, rxfflsh, 1);
+ CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ union cvmx_usbcx_grstctl,
rxfflsh, ==, 0, 100);
return 0;
@@ -997,7 +1128,8 @@ static int cvmx_usb_enable(struct cvmx_usb_state *usb)
static int cvmx_usb_disable(struct cvmx_usb_state *usb)
{
/* Disable the port */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtena, 1);
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt,
+ prtena, 1);
return 0;
}
@@ -1013,20 +1145,23 @@ static int cvmx_usb_disable(struct cvmx_usb_state *usb)
*
* Returns: Port status information
*/
-static struct cvmx_usb_port_status cvmx_usb_get_status(struct cvmx_usb_state *usb)
+static struct cvmx_usb_port_status cvmx_usb_get_status(
+ struct cvmx_usb_state *usb)
{
union cvmx_usbcx_hprt usbc_hprt;
struct cvmx_usb_port_status result;
memset(&result, 0, sizeof(result));
- usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usbc_hprt.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPRT(usb->index));
result.port_enabled = usbc_hprt.s.prtena;
result.port_over_current = usbc_hprt.s.prtovrcurract;
result.port_powered = usbc_hprt.s.prtpwr;
result.port_speed = usbc_hprt.s.prtspd;
result.connected = usbc_hprt.s.prtconnsts;
- result.connect_change = (result.connected != usb->port_status.connected);
+ result.connect_change =
+ (result.connected != usb->port_status.connected);
return result;
}
@@ -1121,7 +1256,8 @@ static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb,
if (unlikely((device_speed != CVMX_USB_SPEED_HIGH) &&
(multi_count != 0)))
return NULL;
- if (unlikely((hub_device_addr < 0) || (hub_device_addr > MAX_USB_ADDRESS)))
+ if (unlikely((hub_device_addr < 0) ||
+ (hub_device_addr > MAX_USB_ADDRESS)))
return NULL;
if (unlikely((hub_port < 0) || (hub_port > MAX_USB_HUB_PORT)))
return NULL;
@@ -1186,7 +1322,8 @@ static void __cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb)
uint64_t address;
uint32_t *ptr;
- rx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GRXSTSPH(usb->index));
+ rx_status.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GRXSTSPH(usb->index));
/* Only read data if IN data is there */
if (rx_status.s.pktsts != 2)
return;
@@ -1236,7 +1373,8 @@ static int __cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb,
while (available && (fifo->head != fifo->tail)) {
int i = fifo->tail;
const uint32_t *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
- uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, usb->index) ^ 4;
+ uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel,
+ usb->index) ^ 4;
int words = available;
/* Limit the amount of data to waht the SW fifo has */
@@ -1260,7 +1398,8 @@ static int __cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb,
cvmx_write64_uint32(csr_address, *ptr++);
cvmx_write64_uint32(csr_address, *ptr++);
cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ cvmx_read64_uint64(
+ CVMX_USBNX_DMA0_INB_CHN0(usb->index));
words -= 3;
}
cvmx_write64_uint32(csr_address, *ptr++);
@@ -1284,20 +1423,32 @@ static void __cvmx_usb_poll_tx_fifo(struct cvmx_usb_state *usb)
{
if (usb->periodic.head != usb->periodic.tail) {
union cvmx_usbcx_hptxsts tx_status;
- tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXSTS(usb->index));
- if (__cvmx_usb_fill_tx_hw(usb, &usb->periodic, tx_status.s.ptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, ptxfempmsk, 1);
+ tx_status.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPTXSTS(usb->index));
+ if (__cvmx_usb_fill_tx_hw(usb, &usb->periodic,
+ tx_status.s.ptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk,
+ ptxfempmsk, 1);
else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, ptxfempmsk, 0);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk,
+ ptxfempmsk, 0);
}
if (usb->nonperiodic.head != usb->nonperiodic.tail) {
union cvmx_usbcx_gnptxsts tx_status;
- tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXSTS(usb->index));
- if (__cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic, tx_status.s.nptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, nptxfempmsk, 1);
+ tx_status.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GNPTXSTS(usb->index));
+ if (__cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic,
+ tx_status.s.nptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk,
+ nptxfempmsk, 1);
else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, nptxfempmsk, 0);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk,
+ nptxfempmsk, 0);
}
return;
@@ -1318,12 +1469,14 @@ static void __cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
struct cvmx_usb_tx_fifo *fifo;
/* We only need to fill data on outbound channels */
- hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+ hcchar.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index));
if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
return;
/* OUT Splits only have data on the start and not the complete */
- usbc_hcsplt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index));
+ usbc_hcsplt.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCSPLTX(channel, usb->index));
if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
return;
@@ -1331,7 +1484,8 @@ static void __cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
* Find out how many bytes we need to fill and convert it into 32bit
* words.
*/
- usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel, usb->index));
if (!usbc_hctsiz.s.xfersize)
return;
@@ -1371,11 +1525,13 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
node);
union cvmx_usb_control_header *header =
cvmx_phys_to_ptr(transaction->control_header);
- int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
+ int bytes_to_transfer = transaction->buffer_length -
+ transaction->actual_bytes;
int packets_to_transfer;
union cvmx_usbcx_hctsizx usbc_hctsiz;
- usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel, usb->index));
switch (transaction->stage) {
case CVMX_USB_STAGE_NON_CONTROL:
@@ -1423,12 +1579,14 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
((header->s.request_type & 0x80) ?
CVMX_USB_DIRECTION_IN :
CVMX_USB_DIRECTION_OUT));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
+ union cvmx_usbcx_hcspltx, compsplt, 1);
break;
case CVMX_USB_STAGE_STATUS:
usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir,
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ union cvmx_usbcx_hccharx, epdir,
((header->s.request_type & 0x80) ?
CVMX_USB_DIRECTION_OUT :
CVMX_USB_DIRECTION_IN));
@@ -1436,11 +1594,13 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir,
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ union cvmx_usbcx_hccharx, epdir,
((header->s.request_type & 0x80) ?
CVMX_USB_DIRECTION_OUT :
CVMX_USB_DIRECTION_IN));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
+ union cvmx_usbcx_hcspltx, compsplt, 1);
break;
}
@@ -1458,10 +1618,12 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
* Calculate the number of packets to transfer. If the length is zero
* we still need to transfer one packet
*/
- packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
+ packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) /
+ pipe->max_packet;
if (packets_to_transfer == 0)
packets_to_transfer = 1;
- else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ else if ((packets_to_transfer > 1) &&
+ (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
/*
* Limit to one packet when not using DMA. Channels must be
* restarted between every packet for IN transactions, so there
@@ -1481,7 +1643,8 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
usbc_hctsiz.s.xfersize = bytes_to_transfer;
usbc_hctsiz.s.pktcnt = packets_to_transfer;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index),
+ usbc_hctsiz.u32);
return;
}
@@ -1519,8 +1682,11 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
union cvmx_usbcx_haintmsk usbc_haintmsk;
/* Clear all channel status bits */
- usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index), usbc_hcint.u32);
+ usbc_hcint.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCINTX(channel, usb->index));
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCINTX(channel, usb->index),
+ usbc_hcint.u32);
usbc_hcintmsk.u32 = 0;
usbc_hcintmsk.s.chhltdmsk = 1;
@@ -1567,14 +1733,17 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0};
union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0};
int packets_to_transfer;
- int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
+ int bytes_to_transfer = transaction->buffer_length -
+ transaction->actual_bytes;
/*
* ISOCHRONOUS transactions store each individual transfer size
* in the packet structure, not the global buffer_length
*/
if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- bytes_to_transfer = transaction->iso_packets[0].length - transaction->actual_bytes;
+ bytes_to_transfer =
+ transaction->iso_packets[0].length -
+ transaction->actual_bytes;
/*
* We need to do split transactions when we are talking to non
@@ -1589,16 +1758,19 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
*/
if ((transaction->stage&1) == 0) {
if (transaction->type == CVMX_USB_TRANSFER_BULK)
- pipe->split_sc_frame = (usb->frame_number + 1) & 0x7f;
+ pipe->split_sc_frame =
+ (usb->frame_number + 1) & 0x7f;
else
- pipe->split_sc_frame = (usb->frame_number + 2) & 0x7f;
+ pipe->split_sc_frame =
+ (usb->frame_number + 2) & 0x7f;
} else
pipe->split_sc_frame = -1;
usbc_hcsplt.s.spltena = 1;
usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
usbc_hcsplt.s.prtaddr = pipe->hub_port;
- usbc_hcsplt.s.compsplt = (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
+ usbc_hcsplt.s.compsplt = (transaction->stage ==
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
/*
* SPLIT transactions can only ever transmit one data
@@ -1614,8 +1786,10 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
* begin/middle/end of the data or all
*/
if (!usbc_hcsplt.s.compsplt &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
+ (pipe->transfer_dir ==
+ CVMX_USB_DIRECTION_OUT) &&
+ (pipe->transfer_type ==
+ CVMX_USB_TRANSFER_ISOCHRONOUS)) {
/*
* Clear the split complete frame number as
* there isn't going to be a split complete
@@ -1667,7 +1841,8 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
* Round MAX_TRANSFER_BYTES to a multiple of out packet
* size
*/
- bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
+ bytes_to_transfer = MAX_TRANSFER_BYTES /
+ pipe->max_packet;
bytes_to_transfer *= pipe->max_packet;
}
@@ -1675,10 +1850,14 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
* Calculate the number of packets to transfer. If the length is
* zero we still need to transfer one packet
*/
- packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
+ packets_to_transfer =
+ (bytes_to_transfer + pipe->max_packet - 1) /
+ pipe->max_packet;
if (packets_to_transfer == 0)
packets_to_transfer = 1;
- else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ else if ((packets_to_transfer > 1) &&
+ (usb->init_flags &
+ CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
/*
* Limit to one packet when not using DMA. Channels must
* be restarted between every packet for IN
@@ -1686,14 +1865,16 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
* packets in a row
*/
packets_to_transfer = 1;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ bytes_to_transfer = packets_to_transfer *
+ pipe->max_packet;
} else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
/*
* Limit the number of packet and data transferred to
* what the hardware can handle
*/
packets_to_transfer = MAX_TRANSFER_PACKETS;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ bytes_to_transfer = packets_to_transfer *
+ pipe->max_packet;
}
usbc_hctsiz.s.xfersize = bytes_to_transfer;
@@ -1707,8 +1888,11 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
if (pipe->flags & __CVMX_USB_PIPE_FLAGS_NEED_PING)
usbc_hctsiz.s.dopng = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index), usbc_hcsplt.u32);
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCSPLTX(channel, usb->index),
+ usbc_hcsplt.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel,
+ usb->index), usbc_hctsiz.u32);
}
/* Setup the Host Channel Characteristics Register */
@@ -1739,11 +1923,14 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
/* Set the rest of the endpoint specific settings */
usbc_hcchar.s.devaddr = pipe->device_addr;
usbc_hcchar.s.eptype = transaction->type;
- usbc_hcchar.s.lspddev = (pipe->device_speed == CVMX_USB_SPEED_LOW);
+ usbc_hcchar.s.lspddev =
+ (pipe->device_speed == CVMX_USB_SPEED_LOW);
usbc_hcchar.s.epdir = pipe->transfer_dir;
usbc_hcchar.s.epnum = pipe->endpoint_num;
usbc_hcchar.s.mps = pipe->max_packet;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index),
+ usbc_hcchar.u32);
}
/* Do transaction type specific fixups as needed */
@@ -1762,22 +1949,33 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
*/
if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
if (pipe->multi_count < 2) /* Need DATA0 */
- USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), union cvmx_usbcx_hctsizx, pid, 0);
+ USB_SET_FIELD32(
+ CVMX_USBCX_HCTSIZX(channel,
+ usb->index),
+ union cvmx_usbcx_hctsizx,
+ pid, 0);
else /* Need MDATA */
- USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), union cvmx_usbcx_hctsizx, pid, 3);
+ USB_SET_FIELD32(
+ CVMX_USBCX_HCTSIZX(channel,
+ usb->index),
+ union cvmx_usbcx_hctsizx,
+ pid, 3);
}
}
break;
}
{
- union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index))};
+ union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 =
+ __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel, usb->index))};
transaction->xfersize = usbc_hctsiz.s.xfersize;
transaction->pktcnt = usbc_hctsiz.s.pktcnt;
}
/* Remeber when we start a split transaction */
if (__cvmx_usb_pipe_needs_split(usb, pipe))
usb->active_split = transaction;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, chena, 1);
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ union cvmx_usbcx_hccharx, chena, 1);
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
__cvmx_usb_fill_tx_fifo(usb, channel);
return;
@@ -1793,16 +1991,22 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
*
* Returns: Pipe or NULL if none are ready
*/
-static struct cvmx_usb_pipe *__cvmx_usb_find_ready_pipe(struct cvmx_usb_state *usb, struct list_head *list, uint64_t current_frame)
+static struct cvmx_usb_pipe *__cvmx_usb_find_ready_pipe(
+ struct cvmx_usb_state *usb,
+ struct list_head *list,
+ uint64_t current_frame)
{
struct cvmx_usb_pipe *pipe;
list_for_each_entry(pipe, list, node) {
struct cvmx_usb_transaction *t =
- list_first_entry(&pipe->transactions, typeof(*t), node);
+ list_first_entry(&pipe->transactions, typeof(*t),
+ node);
if (!(pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED) && t &&
(pipe->next_tx_frame <= current_frame) &&
- ((pipe->split_sc_frame == -1) || ((((int)current_frame - (int)pipe->split_sc_frame) & 0x7f) < 0x40)) &&
+ ((pipe->split_sc_frame == -1) ||
+ ((((int)current_frame - (int)pipe->split_sc_frame)
+ & 0x7f) < 0x40)) &&
(!usb->active_split || (usb->active_split == t))) {
CVMX_PREFETCH(pipe, 128);
CVMX_PREFETCH(t, 0);
@@ -1852,14 +2056,26 @@ static void __cvmx_usb_schedule(struct cvmx_usb_state *usb, int is_sof)
* way we are sure that the periodic data is sent in the
* beginning of the frame
*/
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_ISOCHRONOUS, usb->frame_number);
+ pipe = __cvmx_usb_find_ready_pipe(usb,
+ usb->active_pipes +
+ CVMX_USB_TRANSFER_ISOCHRONOUS,
+ usb->frame_number);
if (likely(!pipe))
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_INTERRUPT, usb->frame_number);
+ pipe = __cvmx_usb_find_ready_pipe(usb,
+ usb->active_pipes +
+ CVMX_USB_TRANSFER_INTERRUPT,
+ usb->frame_number);
}
if (likely(!pipe)) {
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_CONTROL, usb->frame_number);
+ pipe = __cvmx_usb_find_ready_pipe(usb,
+ usb->active_pipes +
+ CVMX_USB_TRANSFER_CONTROL,
+ usb->frame_number);
if (likely(!pipe))
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_BULK, usb->frame_number);
+ pipe = __cvmx_usb_find_ready_pipe(usb,
+ usb->active_pipes +
+ CVMX_USB_TRANSFER_BULK,
+ usb->frame_number);
}
if (!pipe)
break;
@@ -1873,7 +2089,8 @@ done:
* future that might need to be scheduled
*/
need_sof = 0;
- for (ttype = CVMX_USB_TRANSFER_CONTROL; ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
+ for (ttype = CVMX_USB_TRANSFER_CONTROL;
+ ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
list_for_each_entry(pipe, &usb->active_pipes[ttype], node) {
if (pipe->next_tx_frame > usb->frame_number) {
need_sof = 1;
@@ -1881,7 +2098,8 @@ done:
}
}
}
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, sofmsk, need_sof);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk, sofmsk, need_sof);
return;
}
@@ -1932,10 +2150,13 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
/* Recalculate the transfer size by adding up each packet */
urb->actual_length = 0;
for (i = 0; i < urb->number_of_packets; i++) {
- if (iso_packet[i].status == CVMX_USB_COMPLETE_SUCCESS) {
+ if (iso_packet[i].status ==
+ CVMX_USB_COMPLETE_SUCCESS) {
urb->iso_frame_desc[i].status = 0;
- urb->iso_frame_desc[i].actual_length = iso_packet[i].length;
- urb->actual_length += urb->iso_frame_desc[i].actual_length;
+ urb->iso_frame_desc[i].actual_length =
+ iso_packet[i].length;
+ urb->actual_length +=
+ urb->iso_frame_desc[i].actual_length;
} else {
dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%p transaction=%p size=%d\n",
i, urb->number_of_packets,
@@ -1997,10 +2218,11 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
* @complete_code:
* Completion code
*/
-static void __cvmx_usb_perform_complete(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- enum cvmx_usb_complete complete_code)
+static void __cvmx_usb_perform_complete(
+ struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ enum cvmx_usb_complete complete_code)
{
/* If this was a split then clear our split in progress marker */
if (usb->active_split == transaction)
@@ -2019,7 +2241,8 @@ static void __cvmx_usb_perform_complete(struct cvmx_usb_state *usb,
* If there are more ISOs pending and we succeeded, schedule the
* next one
*/
- if ((transaction->iso_number_packets > 1) && (complete_code == CVMX_USB_COMPLETE_SUCCESS)) {
+ if ((transaction->iso_number_packets > 1) &&
+ (complete_code == CVMX_USB_COMPLETE_SUCCESS)) {
/* No bytes transferred for this packet as of yet */
transaction->actual_bytes = 0;
/* One less ISO waiting to transfer */
@@ -2067,16 +2290,17 @@ done:
*
* Returns: Transaction or NULL on failure.
*/
-static struct cvmx_usb_transaction *__cvmx_usb_submit_transaction(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- enum cvmx_usb_transfer type,
- uint64_t buffer,
- int buffer_length,
- uint64_t control_header,
- int iso_start_frame,
- int iso_number_packets,
- struct cvmx_usb_iso_packet *iso_packets,
- struct urb *urb)
+static struct cvmx_usb_transaction *__cvmx_usb_submit_transaction(
+ struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ enum cvmx_usb_transfer type,
+ uint64_t buffer,
+ int buffer_length,
+ uint64_t control_header,
+ int iso_start_frame,
+ int iso_number_packets,
+ struct cvmx_usb_iso_packet *iso_packets,
+ struct urb *urb)
{
struct cvmx_usb_transaction *transaction;
@@ -2128,9 +2352,10 @@ static struct cvmx_usb_transaction *__cvmx_usb_submit_transaction(struct cvmx_us
*
* Returns: A submitted transaction or NULL on failure.
*/
-static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
+static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(
+ struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
{
return __cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_BULK,
urb->transfer_dma,
@@ -2152,9 +2377,10 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(struct cvmx_usb_state *
*
* Returns: A submitted transaction or NULL on failure.
*/
-static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
+static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(
+ struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
{
return __cvmx_usb_submit_transaction(usb, pipe,
CVMX_USB_TRANSFER_INTERRUPT,
@@ -2177,9 +2403,10 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(struct cvmx_usb_st
*
* Returns: A submitted transaction or NULL on failure.
*/
-static struct cvmx_usb_transaction *cvmx_usb_submit_control(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
+static struct cvmx_usb_transaction *cvmx_usb_submit_control(
+ struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
{
int buffer_length = urb->transfer_buffer_length;
uint64_t control_header = urb->setup_dma;
@@ -2209,9 +2436,10 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_control(struct cvmx_usb_stat
*
* Returns: A submitted transaction or NULL on failure.
*/
-static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
+static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(
+ struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
{
struct cvmx_usb_iso_packet *packets;
@@ -2257,17 +2485,22 @@ static int cvmx_usb_cancel(struct cvmx_usb_state *usb,
CVMX_SYNCW;
- usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
/*
* If the channel isn't enabled then the transaction already
* completed.
*/
if (usbc_hcchar.s.chena) {
usbc_hcchar.s.chdis = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index), usbc_hcchar.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCCHARX(pipe->channel,
+ usb->index),
+ usbc_hcchar.u32);
}
}
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_CANCEL);
+ __cvmx_usb_perform_complete(usb, pipe, transaction,
+ CVMX_USB_COMPLETE_CANCEL);
return 0;
}
@@ -2331,7 +2564,8 @@ static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb)
int frame_number;
union cvmx_usbcx_hfnum usbc_hfnum;
- usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
+ usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HFNUM(usb->index));
frame_number = usbc_hfnum.s.frnum;
return frame_number;
@@ -2359,10 +2593,12 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
int buffer_space_left;
/* Read the interrupt status bits for the channel */
- usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
+ usbc_hcint.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCINTX(channel, usb->index));
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index));
if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
/*
@@ -2370,7 +2606,10 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* interrupt IN transfers to get stuck until we do a
* write of HCCHARX without changing things
*/
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel,
+ usb->index),
+ usbc_hcchar.u32);
return 0;
}
@@ -2384,9 +2623,15 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
/* Disable all interrupts except CHHLTD */
hcintmsk.u32 = 0;
hcintmsk.s.chhltdmsk = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), hcintmsk.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCINTMSKX(channel,
+ usb->index),
+ hcintmsk.u32);
usbc_hcchar.s.chdis = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel,
+ usb->index),
+ usbc_hcchar.u32);
return 0;
} else if (usbc_hcint.s.xfercompl) {
/*
@@ -2394,7 +2639,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* Channel halt isn't needed.
*/
} else {
- cvmx_dprintf("USB%d: Channel %d interrupt without halt\n", usb->index, channel);
+ cvmx_dprintf("USB%d: Channel %d interrupt without halt\n",
+ usb->index, channel);
return 0;
}
}
@@ -2417,7 +2663,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
CVMX_PREFETCH(pipe, 128);
if (!pipe)
return 0;
- transaction = list_first_entry(&pipe->transactions, typeof(*transaction),
+ transaction = list_first_entry(&pipe->transactions,
+ typeof(*transaction),
node);
CVMX_PREFETCH(transaction, 0);
@@ -2432,8 +2679,10 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* Read the channel config info so we can figure out how much data
* transfered
*/
- usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
- usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index));
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel, usb->index));
/*
* Calculating the number of bytes successfully transferred is dependent
@@ -2447,7 +2696,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* the current value of xfersize from its starting value and we
* know how many bytes were written to the buffer
*/
- bytes_this_transfer = transaction->xfersize - usbc_hctsiz.s.xfersize;
+ bytes_this_transfer = transaction->xfersize -
+ usbc_hctsiz.s.xfersize;
} else {
/*
* OUT transaction don't decrement xfersize. Instead pktcnt is
@@ -2465,7 +2715,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
}
/* Figure out how many bytes were in the last packet of the transfer */
if (packets_processed)
- bytes_in_last_packet = bytes_this_transfer - (packets_processed-1) * usbc_hcchar.s.mps;
+ bytes_in_last_packet = bytes_this_transfer -
+ (packets_processed - 1) * usbc_hcchar.s.mps;
else
bytes_in_last_packet = bytes_this_transfer;
@@ -2485,9 +2736,11 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
*/
transaction->actual_bytes += bytes_this_transfer;
if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- buffer_space_left = transaction->iso_packets[0].length - transaction->actual_bytes;
+ buffer_space_left = transaction->iso_packets[0].length -
+ transaction->actual_bytes;
else
- buffer_space_left = transaction->buffer_length - transaction->actual_bytes;
+ buffer_space_left = transaction->buffer_length -
+ transaction->actual_bytes;
/*
* We need to remember the PID toggle state for the next transaction.
@@ -2513,7 +2766,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* the actual bytes transferred
*/
pipe->pid_toggle = 0;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_STALL);
+ __cvmx_usb_perform_complete(usb, pipe, transaction,
+ CVMX_USB_COMPLETE_STALL);
} else if (usbc_hcint.s.xacterr) {
/*
* We know at least one packet worked if we get a ACK or NAK.
@@ -2528,7 +2782,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* something wrong with the transfer. For example, PID
* toggle errors cause these
*/
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_XACTERR);
+ __cvmx_usb_perform_complete(usb, pipe, transaction,
+ CVMX_USB_COMPLETE_XACTERR);
} else {
/*
* If this was a split then clear our split in progress
@@ -2544,12 +2799,15 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
pipe->split_sc_frame = -1;
pipe->next_tx_frame += pipe->interval;
if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number + pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ pipe->next_tx_frame =
+ usb->frame_number + pipe->interval -
+ (usb->frame_number -
+ pipe->next_tx_frame) % pipe->interval;
}
} else if (usbc_hcint.s.bblerr) {
/* Babble Error (BblErr) */
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_BABBLEERR);
+ __cvmx_usb_perform_complete(usb, pipe, transaction,
+ CVMX_USB_COMPLETE_BABBLEERR);
} else if (usbc_hcint.s.datatglerr) {
/* We'll retry the exact same transaction again */
transaction->retries++;
@@ -2566,8 +2824,11 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* If there is more data to go then we need to try
* again. Otherwise this transaction is complete
*/
- if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet))
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet < pipe->max_packet))
+ __cvmx_usb_perform_complete(usb, pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
} else {
/*
* Split transactions retry the split complete 4 times
@@ -2605,12 +2866,14 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
case CVMX_USB_STAGE_NON_CONTROL:
case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
/* This should be impossible */
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
+ __cvmx_usb_perform_complete(usb, pipe,
+ transaction, CVMX_USB_COMPLETE_ERROR);
break;
case CVMX_USB_STAGE_SETUP:
pipe->pid_toggle = 1;
if (__cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage = CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
+ transaction->stage =
+ CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
else {
union cvmx_usb_control_header *header =
cvmx_phys_to_ptr(transaction->control_header);
@@ -2632,7 +2895,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
break;
case CVMX_USB_STAGE_DATA:
if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
+ transaction->stage =
+ CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
/*
* For setup OUT data that are splits,
* the hardware doesn't appear to count
@@ -2641,31 +2905,45 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
*/
if (!usbc_hcchar.s.epdir) {
if (buffer_space_left < pipe->max_packet)
- transaction->actual_bytes += buffer_space_left;
+ transaction->actual_bytes +=
+ buffer_space_left;
else
- transaction->actual_bytes += pipe->max_packet;
+ transaction->actual_bytes +=
+ pipe->max_packet;
}
- } else if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ } else if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet <
+ pipe->max_packet)) {
pipe->pid_toggle = 1;
- transaction->stage = CVMX_USB_STAGE_STATUS;
+ transaction->stage =
+ CVMX_USB_STAGE_STATUS;
}
break;
case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet <
+ pipe->max_packet)) {
pipe->pid_toggle = 1;
- transaction->stage = CVMX_USB_STAGE_STATUS;
+ transaction->stage =
+ CVMX_USB_STAGE_STATUS;
} else {
- transaction->stage = CVMX_USB_STAGE_DATA;
+ transaction->stage =
+ CVMX_USB_STAGE_DATA;
}
break;
case CVMX_USB_STAGE_STATUS:
if (__cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage = CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
+ transaction->stage =
+ CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
else
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ __cvmx_usb_perform_complete(usb, pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
break;
case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ __cvmx_usb_perform_complete(usb, pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
break;
}
break;
@@ -2678,27 +2956,49 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* data is needed
*/
if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ if (transaction->stage ==
+ CVMX_USB_STAGE_NON_CONTROL)
+ transaction->stage =
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
else {
- if (buffer_space_left && (bytes_in_last_packet == pipe->max_packet))
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ if (buffer_space_left &&
+ (bytes_in_last_packet ==
+ pipe->max_packet))
+ transaction->stage =
+ CVMX_USB_STAGE_NON_CONTROL;
else {
- if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ if (transaction->type ==
+ CVMX_USB_TRANSFER_INTERRUPT)
+ pipe->next_tx_frame +=
+ pipe->interval;
+ __cvmx_usb_perform_complete(
+ usb,
+ pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
}
}
} else {
- if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ if ((pipe->device_speed ==
+ CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_type ==
+ CVMX_USB_TRANSFER_BULK) &&
+ (pipe->transfer_dir ==
+ CVMX_USB_DIRECTION_OUT) &&
(usbc_hcint.s.nak))
- pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
- if (!buffer_space_left || (bytes_in_last_packet < pipe->max_packet)) {
- if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ pipe->flags |=
+ __CVMX_USB_PIPE_FLAGS_NEED_PING;
+ if (!buffer_space_left ||
+ (bytes_in_last_packet <
+ pipe->max_packet)) {
+ if (transaction->type ==
+ CVMX_USB_TRANSFER_INTERRUPT)
+ pipe->next_tx_frame +=
+ pipe->interval;
+ __cvmx_usb_perform_complete(usb,
+ pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
}
}
break;
@@ -2719,28 +3019,45 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* complete. Otherwise start it again to
* send the next 188 bytes
*/
- if (!buffer_space_left || (bytes_this_transfer < 188)) {
+ if (!buffer_space_left ||
+ (bytes_this_transfer < 188)) {
pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ __cvmx_usb_perform_complete(
+ usb,
+ pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
}
} else {
- if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
+ if (transaction->stage ==
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
/*
* We are in the incoming data
* phase. Keep getting data
* until we run out of space or
* get a small packet
*/
- if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet <
+ pipe->max_packet)) {
+ pipe->next_tx_frame +=
+ pipe->interval;
+ __cvmx_usb_perform_complete(
+ usb,
+ pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
}
} else
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ transaction->stage =
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
}
} else {
pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ __cvmx_usb_perform_complete(usb,
+ pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
}
break;
}
@@ -2760,8 +3077,10 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
transaction->stage &= ~1;
pipe->next_tx_frame += pipe->interval;
if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number + pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ pipe->next_tx_frame = usb->frame_number +
+ pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) %
+ pipe->interval;
} else {
struct cvmx_usb_port_status port;
port = cvmx_usb_get_status(usb);
@@ -2773,7 +3092,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* We get channel halted interrupts with no result bits
* sets when the cable is unplugged
*/
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
+ __cvmx_usb_perform_complete(usb, pipe, transaction,
+ CVMX_USB_COMPLETE_ERROR);
}
}
return 0;
@@ -2856,9 +3176,11 @@ static int cvmx_usb_poll(struct cvmx_usb_state *usb)
*/
octeon_usb_port_callback(usb);
/* Clear the port change bits */
- usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usbc_hprt.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPRT(usb->index));
usbc_hprt.s.prtena = 0;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index), usbc_hprt.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index),
+ usbc_hprt.u32);
}
if (usbc_gintsts.s.hchint) {
/*
@@ -3002,13 +3324,15 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
}
pipe = cvmx_usb_open_pipe(&priv->usb, usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe), speed,
- le16_to_cpu(ep->desc.wMaxPacketSize) & 0x7ff,
+ le16_to_cpu(ep->desc.wMaxPacketSize)
+ & 0x7ff,
transfer_type,
usb_pipein(urb->pipe) ?
CVMX_USB_DIRECTION_IN :
CVMX_USB_DIRECTION_OUT,
urb->interval,
- (le16_to_cpu(ep->desc.wMaxPacketSize) >> 11) & 0x3,
+ (le16_to_cpu(ep->desc.wMaxPacketSize)
+ >> 11) & 0x3,
split_device, split_port);
if (!pipe) {
spin_unlock_irqrestore(&priv->lock, flags);
@@ -3023,7 +3347,8 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS:
dev_dbg(dev, "Submit isochronous to %d.%d\n",
- usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe));
/*
* Allocate a structure to use for our private list of
* isochronous packets.
@@ -3035,9 +3360,12 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
int i;
/* Fill the list with the data from the URB */
for (i = 0; i < urb->number_of_packets; i++) {
- iso_packet[i].offset = urb->iso_frame_desc[i].offset;
- iso_packet[i].length = urb->iso_frame_desc[i].length;
- iso_packet[i].status = CVMX_USB_COMPLETE_ERROR;
+ iso_packet[i].offset =
+ urb->iso_frame_desc[i].offset;
+ iso_packet[i].length =
+ urb->iso_frame_desc[i].length;
+ iso_packet[i].status =
+ CVMX_USB_COMPLETE_ERROR;
}
/*
* Store a pointer to the list in the URB setup_packet
@@ -3059,17 +3387,20 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
break;
case PIPE_INTERRUPT:
dev_dbg(dev, "Submit interrupt to %d.%d\n",
- usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe));
transaction = cvmx_usb_submit_interrupt(&priv->usb, pipe, urb);
break;
case PIPE_CONTROL:
dev_dbg(dev, "Submit control to %d.%d\n",
- usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe));
transaction = cvmx_usb_submit_control(&priv->usb, pipe, urb);
break;
case PIPE_BULK:
dev_dbg(dev, "Submit bulk to %d.%d\n",
- usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe));
transaction = cvmx_usb_submit_bulk(&priv->usb, pipe, urb);
break;
}
@@ -3100,7 +3431,9 @@ static void octeon_usb_urb_dequeue_work(unsigned long arg)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static int octeon_usb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+static int octeon_usb_urb_dequeue(struct usb_hcd *hcd,
+ struct urb *urb,
+ int status)
{
struct octeon_hcd *priv = hcd_to_octeon(hcd);
unsigned long flags;
@@ -3120,7 +3453,8 @@ static int octeon_usb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int stat
return 0;
}
-static void octeon_usb_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+static void octeon_usb_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
{
struct device *dev = hcd->self.controller;
@@ -3203,7 +3537,8 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
dev_dbg(dev, " C_CONNECTION\n");
/* Clears drivers internal connect status change flag */
spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status = cvmx_usb_get_status(&priv->usb);
+ priv->usb.port_status =
+ cvmx_usb_get_status(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
break;
case USB_PORT_FEAT_C_RESET:
@@ -3212,7 +3547,8 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* Clears the driver's internal Port Reset Change flag.
*/
spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status = cvmx_usb_get_status(&priv->usb);
+ priv->usb.port_status =
+ cvmx_usb_get_status(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
break;
case USB_PORT_FEAT_C_ENABLE:
@@ -3222,7 +3558,8 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* Change flag.
*/
spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status = cvmx_usb_get_status(&priv->usb);
+ priv->usb.port_status =
+ cvmx_usb_get_status(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
break;
case USB_PORT_FEAT_C_SUSPEND:
@@ -3237,7 +3574,8 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
dev_dbg(dev, " C_OVER_CURRENT\n");
/* Clears the driver's overcurrent Change flag */
spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status = cvmx_usb_get_status(&priv->usb);
+ priv->usb.port_status =
+ cvmx_usb_get_status(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
break;
default:
@@ -3369,6 +3707,8 @@ static const struct hc_driver octeon_hc_driver = {
.get_frame_number = octeon_usb_get_frame_number,
.hub_status_data = octeon_usb_hub_status_data,
.hub_control = octeon_usb_hub_control,
+ .map_urb_for_dma = octeon_map_urb_for_dma,
+ .unmap_urb_for_dma = octeon_unmap_urb_for_dma,
};
static int octeon_usb_probe(struct platform_device *pdev)
@@ -3411,7 +3751,8 @@ static int octeon_usb_probe(struct platform_device *pdev)
initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
break;
default:
- dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n", clock_rate);
+ dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n",
+ clock_rate);
return -ENXIO;
}
@@ -3477,7 +3818,8 @@ static int octeon_usb_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
- tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv);
+ tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work,
+ (unsigned long)priv);
INIT_LIST_HEAD(&priv->dequeue_list);
status = cvmx_usb_initialize(&priv->usb, usb_num, initialize_flags);
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index bdaec8d2ca0c..2a98a2153e16 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -33,10 +33,6 @@
* driver will use this memory instead of kernel memory for pools. This
* allows 32bit userspace application to access the buffers, but also
* requires all received packets to be copied.
- * CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
- * This kernel config option allows the user to control the number of
- * packet and work queue buffers allocated by the driver. If this is zero,
- * the driver uses the default from below.
* USE_SKBUFFS_IN_HW
* Tells the driver to populate the packet buffers with kernel skbuffs.
* This allows the driver to receive packets without copying them. It also
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 83b103091cf2..3f067f189b3d 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -127,23 +127,21 @@ static void cvm_oct_adjust_link(struct net_device *dev)
link_info.s.link_up = priv->last_link ? 1 : 0;
link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
link_info.s.speed = priv->phydev->speed;
- cvmx_helper_link_set( priv->port, link_info);
+ cvmx_helper_link_set(priv->port, link_info);
if (priv->last_link) {
netif_carrier_on(dev);
if (priv->queue != -1)
printk_ratelimited("%s: %u Mbps %s duplex, "
- "port %2d, queue %2d\n",
- dev->name, priv->phydev->speed,
- priv->phydev->duplex ?
- "Full" : "Half",
- priv->port, priv->queue);
+ "port %2d, queue %2d\n", dev->name,
+ priv->phydev->speed,
+ priv->phydev->duplex ? "Full" : "Half",
+ priv->port, priv->queue);
else
printk_ratelimited("%s: %u Mbps %s duplex, "
- "port %2d, POW\n",
- dev->name, priv->phydev->speed,
- priv->phydev->duplex ?
- "Full" : "Half",
- priv->port);
+ "port %2d, POW\n", dev->name,
+ priv->phydev->speed,
+ priv->phydev->duplex ? "Full" : "Half",
+ priv->port);
} else {
netif_carrier_off(dev);
printk_ratelimited("%s: Link down\n", dev->name);
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 199059d64c9b..bf666b023190 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -30,6 +30,7 @@
#include <asm/octeon/octeon.h>
+#include "ethernet-mem.h"
#include "ethernet-defines.h"
#include <asm/octeon/cvmx-fpa.h>
@@ -79,10 +80,10 @@ static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
} while (memory);
if (elements < 0)
- pr_warning("Freeing of pool %u had too many skbuffs (%d)\n",
+ pr_warn("Freeing of pool %u had too many skbuffs (%d)\n",
pool, elements);
else if (elements > 0)
- pr_warning("Freeing of pool %u is missing %d skbuffs\n",
+ pr_warn("Freeing of pool %u is missing %d skbuffs\n",
pool, elements);
}
@@ -113,7 +114,7 @@ static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
*/
memory = kmalloc(size + 256, GFP_ATOMIC);
if (unlikely(memory == NULL)) {
- pr_warning("Unable to allocate %u bytes for FPA pool %d\n",
+ pr_warn("Unable to allocate %u bytes for FPA pool %d\n",
elements * size, pool);
break;
}
@@ -146,10 +147,10 @@ static void cvm_oct_free_hw_memory(int pool, int size, int elements)
} while (fpa);
if (elements < 0)
- pr_warning("Freeing of pool %u had too many buffers (%d)\n",
+ pr_warn("Freeing of pool %u had too many buffers (%d)\n",
pool, elements);
else if (elements > 0)
- pr_warning("Warning: Freeing of pool %u is missing %d buffers\n",
+ pr_warn("Warning: Freeing of pool %u is missing %d buffers\n",
pool, elements);
}
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index ea53af30dfa7..0ec0da328215 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -43,7 +43,7 @@
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
-DEFINE_SPINLOCK(global_register_lock);
+static DEFINE_SPINLOCK(global_register_lock);
static int number_rgmii_ports;
@@ -72,7 +72,8 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
* If the 10Mbps preamble workaround is supported and we're
* at 10Mbps we may need to do some special checking.
*/
- if (USE_10MBPS_PREAMBLE_WORKAROUND && (link_info.s.speed == 10)) {
+ if (USE_10MBPS_PREAMBLE_WORKAROUND &&
+ (link_info.s.speed == 10)) {
/*
* Read the GMXX_RXX_INT_REG[PCTERR] bit and
@@ -166,9 +167,8 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
if (use_global_register_lock)
spin_unlock_irqrestore(&global_register_lock, flags);
- else {
+ else
mutex_unlock(&priv->phydev->bus->mdio_lock);
- }
if (priv->phydev == NULL) {
/* Tell core. */
@@ -232,8 +232,10 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
(interface, index)];
struct octeon_ethernet *priv = netdev_priv(dev);
- if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
- queue_work(cvm_oct_poll_queue, &priv->port_work);
+ if (dev &&
+ !atomic_read(&cvm_oct_poll_queue_stopping))
+ queue_work(cvm_oct_poll_queue,
+ &priv->port_work);
gmx_rx_int_reg.u64 = 0;
gmx_rx_int_reg.s.phy_dupx = 1;
@@ -274,8 +276,10 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
(interface, index)];
struct octeon_ethernet *priv = netdev_priv(dev);
- if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
- queue_work(cvm_oct_poll_queue, &priv->port_work);
+ if (dev &&
+ !atomic_read(&cvm_oct_poll_queue_stopping))
+ queue_work(cvm_oct_poll_queue,
+ &priv->port_work);
gmx_rx_int_reg.u64 = 0;
gmx_rx_int_reg.s.phy_dupx = 1;
@@ -327,7 +331,8 @@ int cvm_oct_rgmii_stop(struct net_device *dev)
static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
{
- struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_work);
+ struct octeon_ethernet *priv =
+ container_of(work, struct octeon_ethernet, port_work);
cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 47541e1608f3..4e54d8540219 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -95,7 +95,7 @@ static void cvm_oct_kick_tx_poll_watchdog(void)
cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
}
-void cvm_oct_free_tx_skbs(struct net_device *dev)
+static void cvm_oct_free_tx_skbs(struct net_device *dev)
{
int32_t skb_to_free;
int qos, queues_per_port;
@@ -554,7 +554,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
printk_ratelimited("%s: Failed to allocate a work queue entry\n",
dev->name);
priv->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return 0;
}
@@ -565,7 +565,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
dev->name);
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
priv->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return 0;
}
@@ -682,7 +682,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
work->grp);
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return 0;
}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 089dc4b9efd4..ff7214aac9dd 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -55,17 +55,11 @@
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-smix-defs.h>
-#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
- && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
-int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
-#else
-int num_packet_buffers = 1024;
-#endif
+static int num_packet_buffers = 1024;
module_param(num_packet_buffers, int, 0444);
MODULE_PARM_DESC(num_packet_buffers, "\n"
"\tNumber of packet buffers to allocate and store in the\n"
- "\tFPA. By default, 1024 packet buffers are used unless\n"
- "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined.");
+ "\tFPA. By default, 1024 packet buffers are used.\n");
int pow_receive_group = 15;
module_param(pow_receive_group, int, 0444);
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 9360e22e0739..4cf3884070fa 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -58,7 +58,7 @@ struct octeon_ethernet {
/* Last negotiated link state */
uint64_t link_info;
/* Called periodically to check link status */
- void (*poll) (struct net_device *dev);
+ void (*poll)(struct net_device *dev);
struct delayed_work port_periodic_work;
struct work_struct port_work; /* may be unused. */
struct device_node *of_node;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index e2663b189c66..aec98958f795 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -37,7 +37,7 @@
/* Load Delay Locked Loop (DLL) settings for clock delay */
#define MEM_DLL_CLOCK_DELAY (1<<0)
/* Memory controller power down function */
-#define MEM_POWER_DOWN (1<<8)
+#define MEM_POWER_DOWN (1<<8)
/* Memory controller software reset */
#define MEM_SOFT_RESET (1<<0)
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 5de5981b3bba..10c0a96ce8bb 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -217,7 +217,7 @@ static int oz_set_active_pd(const u8 *addr)
pd = oz_pd_find(addr);
if (pd) {
spin_lock_bh(&g_cdev.lock);
- memcpy(g_cdev.active_addr, addr, ETH_ALEN);
+ ether_addr_copy(g_cdev.active_addr, addr);
old_pd = g_cdev.active_pd;
g_cdev.active_pd = pd;
spin_unlock_bh(&g_cdev.lock);
@@ -283,7 +283,7 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
u8 addr[ETH_ALEN];
oz_dbg(ON, "OZ_IOCTL_GET_ACTIVE_PD\n");
spin_lock_bh(&g_cdev.lock);
- memcpy(addr, g_cdev.active_addr, ETH_ALEN);
+ ether_addr_copy(addr, g_cdev.active_addr);
spin_unlock_bh(&g_cdev.lock);
if (copy_to_user((void __user *)arg, addr, ETH_ALEN))
return -EFAULT;
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index efaf26f734c3..b3d401ae41b4 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -354,7 +354,8 @@ static struct oz_endpoint *oz_ep_alloc(int buffer_size, gfp_t mem_flags)
* disabled.
* Context: softirq or process
*/
-static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb)
+static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd,
+ struct urb *urb)
{
struct oz_urb_link *urbl;
struct list_head *e;
@@ -1986,8 +1987,7 @@ static void oz_get_hub_descriptor(struct usb_hcd *hcd,
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
- desc->wHubCharacteristics = (__force __u16)
- __constant_cpu_to_le16(0x0001);
+ desc->wHubCharacteristics = (__force __u16)cpu_to_le16(0x0001);
desc->bNbrPorts = OZ_NB_PORTS;
}
@@ -2181,7 +2181,7 @@ static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
break;
case GetHubStatus:
oz_dbg(HUB, "GetHubStatus: req_type = 0x%x\n", req_type);
- put_unaligned(__constant_cpu_to_le32(0), (__le32 *)buf);
+ put_unaligned(cpu_to_le32(0), (__le32 *)buf);
break;
case GetPortStatus:
err = oz_get_port_status(hcd, windex, buf);
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
index 743695077346..10f1b3ac8832 100644
--- a/drivers/staging/ozwpan/ozpd.c
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -8,6 +8,7 @@
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/errno.h>
#include "ozdbg.h"
#include "ozprotocol.h"
@@ -173,7 +174,7 @@ struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
pd->last_rx_pkt_num = 0xffffffff;
oz_pd_set_state(pd, OZ_PD_S_IDLE);
pd->max_tx_size = OZ_MAX_TX_SIZE;
- memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(pd->mac_addr, mac_addr);
if (0 != oz_elt_buf_init(&pd->elt_buff)) {
kfree(pd);
pd = NULL;
@@ -284,11 +285,11 @@ int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
ai->app_id);
break;
}
- oz_polling_lock_bh();
+ spin_lock_bh(&g_polling_lock);
pd->total_apps |= (1<<ai->app_id);
if (resume)
pd->paused_apps &= ~(1<<ai->app_id);
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
}
}
return rc;
@@ -304,14 +305,14 @@ void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
- oz_polling_lock_bh();
+ spin_lock_bh(&g_polling_lock);
if (pause) {
pd->paused_apps |= (1<<ai->app_id);
} else {
pd->total_apps &= ~(1<<ai->app_id);
pd->paused_apps &= ~(1<<ai->app_id);
}
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
ai->stop(pd, pause);
}
}
@@ -349,17 +350,17 @@ void oz_pd_stop(struct oz_pd *pd)
oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
oz_pd_indicate_farewells(pd);
- oz_polling_lock_bh();
+ spin_lock_bh(&g_polling_lock);
stop_apps = pd->total_apps;
pd->total_apps = 0;
pd->paused_apps = 0;
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
oz_services_stop(pd, stop_apps, 0);
- oz_polling_lock_bh();
+ spin_lock_bh(&g_polling_lock);
oz_pd_set_state(pd, OZ_PD_S_STOPPED);
/* Remove from PD list.*/
list_del(&pd->link);
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
oz_pd_put(pd);
}
@@ -372,9 +373,9 @@ int oz_pd_sleep(struct oz_pd *pd)
int do_stop = 0;
u16 stop_apps;
- oz_polling_lock_bh();
+ spin_lock_bh(&g_polling_lock);
if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
return 0;
}
if (pd->keep_alive && pd->session_id)
@@ -383,7 +384,7 @@ int oz_pd_sleep(struct oz_pd *pd)
do_stop = 1;
stop_apps = pd->total_apps;
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
if (do_stop) {
oz_pd_stop(pd);
} else {
@@ -999,15 +1000,15 @@ void oz_pd_indicate_farewells(struct oz_pd *pd)
const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
while (1) {
- oz_polling_lock_bh();
+ spin_lock_bh(&g_polling_lock);
if (list_empty(&pd->farewell_list)) {
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
break;
}
f = list_first_entry(&pd->farewell_list,
struct oz_farewell, link);
list_del(&f->link);
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
if (ai->farewell)
ai->farewell(pd, f->ep_num, f->report, f->len);
kfree(f);
diff --git a/drivers/staging/ozwpan/ozpd.h b/drivers/staging/ozwpan/ozpd.h
index 12c712956888..ad5fe7a6e619 100644
--- a/drivers/staging/ozwpan/ozpd.h
+++ b/drivers/staging/ozwpan/ozpd.h
@@ -22,6 +22,11 @@
#define OZ_TIMER_HEARTBEAT 2
#define OZ_TIMER_STOP 3
+/*
+ *External spinlock variable
+ */
+extern spinlock_t g_polling_lock;
+
/* Data structure that hold information on a frame for transmisson. This is
* built when the frame is first transmitted and is used to rebuild the frame
* if a re-transmission is required.
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index 5d965cf06d59..f09acd0bb013 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -38,9 +38,13 @@ struct oz_binding {
};
/*
+ * External variable
+ */
+
+DEFINE_SPINLOCK(g_polling_lock);
+/*
* Static external variables.
*/
-static DEFINE_SPINLOCK(g_polling_lock);
static LIST_HEAD(g_pd_list);
static LIST_HEAD(g_binding);
static DEFINE_SPINLOCK(g_binding_lock);
@@ -664,32 +668,26 @@ void oz_binding_add(const char *net_dev)
{
struct oz_binding *binding;
- binding = kmalloc(sizeof(struct oz_binding), GFP_KERNEL);
- if (binding) {
- binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
- binding->ptype.func = oz_pkt_recv;
- if (net_dev && *net_dev) {
- memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
- oz_dbg(ON, "Adding binding: %s\n", net_dev);
- binding->ptype.dev =
- dev_get_by_name(&init_net, net_dev);
- if (binding->ptype.dev == NULL) {
- oz_dbg(ON, "Netdev %s not found\n", net_dev);
- kfree(binding);
- binding = NULL;
- }
- } else {
- oz_dbg(ON, "Binding to all netcards\n");
- memset(binding->name, 0, OZ_MAX_BINDING_LEN);
- binding->ptype.dev = NULL;
- }
- if (binding) {
- dev_add_pack(&binding->ptype);
- spin_lock_bh(&g_binding_lock);
- list_add_tail(&binding->link, &g_binding);
- spin_unlock_bh(&g_binding_lock);
+ binding = kzalloc(sizeof(struct oz_binding), GFP_KERNEL);
+ if (!binding)
+ return;
+
+ binding->ptype.type = htons(OZ_ETHERTYPE);
+ binding->ptype.func = oz_pkt_recv;
+ if (net_dev && *net_dev) {
+ memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
+ oz_dbg(ON, "Adding binding: %s\n", net_dev);
+ binding->ptype.dev = dev_get_by_name(&init_net, net_dev);
+ if (binding->ptype.dev == NULL) {
+ oz_dbg(ON, "Netdev %s not found\n", net_dev);
+ kfree(binding);
+ return;
}
}
+ dev_add_pack(&binding->ptype);
+ spin_lock_bh(&g_binding_lock);
+ list_add_tail(&binding->link, &g_binding);
+ spin_unlock_bh(&g_binding_lock);
}
/*
@@ -800,12 +798,3 @@ int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
return count;
}
-void oz_polling_lock_bh(void)
-{
- spin_lock_bh(&g_polling_lock);
-}
-
-void oz_polling_unlock_bh(void)
-{
- spin_unlock_bh(&g_polling_lock);
-}
diff --git a/drivers/staging/ozwpan/ozproto.h b/drivers/staging/ozwpan/ozproto.h
index 0c49c8a0e815..cb38e02c968e 100644
--- a/drivers/staging/ozwpan/ozproto.h
+++ b/drivers/staging/ozwpan/ozproto.h
@@ -59,8 +59,6 @@ void oz_binding_remove(const char *net_dev);
void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time);
void oz_timer_delete(struct oz_pd *pd, int type);
void oz_pd_request_heartbeat(struct oz_pd *pd);
-void oz_polling_lock_bh(void);
-void oz_polling_unlock_bh(void);
void oz_pd_heartbeat_handler(unsigned long data);
void oz_pd_timeout_handler(unsigned long data);
enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer);
diff --git a/drivers/staging/ozwpan/ozprotocol.h b/drivers/staging/ozwpan/ozprotocol.h
index 17b09b9a5b08..9bbb182f2776 100644
--- a/drivers/staging/ozwpan/ozprotocol.h
+++ b/drivers/staging/ozwpan/ozprotocol.h
@@ -192,7 +192,7 @@ struct oz_get_desc_req {
u16 size;
u8 req_type;
u8 desc_type;
- u16 w_index;
+ __le16 w_index;
u8 index;
} PACKED;
@@ -219,8 +219,8 @@ struct oz_get_desc_rsp {
u8 elt_seq_num;
u8 type;
u8 req_id;
- u16 offset;
- u16 total_size;
+ __le16 offset;
+ __le16 total_size;
u8 rcode;
u8 data[1];
} PACKED;
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
index 8531438d7586..4249fa374012 100644
--- a/drivers/staging/ozwpan/ozusbif.h
+++ b/drivers/staging/ozwpan/ozusbif.h
@@ -23,7 +23,7 @@ int oz_usb_stream_delete(void *hpd, u8 ep_num);
int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
const u8 *data, int data_len);
int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
- u8 index, u16 windex, int offset, int len);
+ u8 index, __le16 windex, int offset, int len);
int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb);
void oz_usb_request_heartbeat(void *hpd);
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index 617f51cdaea7..f32d01427f77 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -54,7 +54,7 @@ static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
* Context: softirq
*/
int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
- u8 index, u16 windex, int offset, int len)
+ u8 index, __le16 windex, int offset, int len)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
struct oz_pd *pd = usb_ctx->pd;
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index ec4b1fd14021..08f9a4896116 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -171,8 +171,8 @@ struct logical_input {
union {
struct { /* valid when type == INPUT_TYPE_STD */
- void (*press_fct) (int);
- void (*release_fct) (int);
+ void (*press_fct)(int);
+ void (*release_fct)(int);
int press_data;
int release_data;
} std;
@@ -417,9 +417,9 @@ static char lcd_must_clear;
static char lcd_left_shift;
static char init_in_progress;
-static void (*lcd_write_cmd) (int);
-static void (*lcd_write_data) (int);
-static void (*lcd_clear_fast) (void);
+static void (*lcd_write_cmd)(int);
+static void (*lcd_write_data)(int);
+static void (*lcd_clear_fast)(void);
static DEFINE_SPINLOCK(pprt_lock);
static struct timer_list scan_timer;
@@ -457,14 +457,12 @@ MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
static int lcd_type = -1;
module_param(lcd_type, int, 0000);
MODULE_PARM_DESC(lcd_type,
- "LCD type: 0=none, 1=old //, 2=serial ks0074, "
- "3=hantronix //, 4=nexcom //, 5=compiled-in");
+ "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
static int lcd_proto = -1;
module_param(lcd_proto, int, 0000);
MODULE_PARM_DESC(lcd_proto,
- "LCD communication: 0=parallel (//), 1=serial,"
- "2=TI LCD Interface");
+ "LCD communication: 0=parallel (//), 1=serial, 2=TI LCD Interface");
static int lcd_charset = -1;
module_param(lcd_charset, int, 0000);
@@ -473,8 +471,7 @@ MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074");
static int keypad_type = -1;
module_param(keypad_type, int, 0000);
MODULE_PARM_DESC(keypad_type,
- "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, "
- "3=nexcom 4 keys");
+ "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, 3=nexcom 4 keys");
static int profile = DEFAULT_PROFILE;
module_param(profile, int, 0000);
@@ -494,38 +491,32 @@ MODULE_PARM_DESC(profile,
static int lcd_e_pin = PIN_NOT_SET;
module_param(lcd_e_pin, int, 0000);
MODULE_PARM_DESC(lcd_e_pin,
- "# of the // port pin connected to LCD 'E' signal, "
- "with polarity (-17..17)");
+ "# of the // port pin connected to LCD 'E' signal, with polarity (-17..17)");
static int lcd_rs_pin = PIN_NOT_SET;
module_param(lcd_rs_pin, int, 0000);
MODULE_PARM_DESC(lcd_rs_pin,
- "# of the // port pin connected to LCD 'RS' signal, "
- "with polarity (-17..17)");
+ "# of the // port pin connected to LCD 'RS' signal, with polarity (-17..17)");
static int lcd_rw_pin = PIN_NOT_SET;
module_param(lcd_rw_pin, int, 0000);
MODULE_PARM_DESC(lcd_rw_pin,
- "# of the // port pin connected to LCD 'RW' signal, "
- "with polarity (-17..17)");
+ "# of the // port pin connected to LCD 'RW' signal, with polarity (-17..17)");
static int lcd_bl_pin = PIN_NOT_SET;
module_param(lcd_bl_pin, int, 0000);
MODULE_PARM_DESC(lcd_bl_pin,
- "# of the // port pin connected to LCD backlight, "
- "with polarity (-17..17)");
+ "# of the // port pin connected to LCD backlight, with polarity (-17..17)");
static int lcd_da_pin = PIN_NOT_SET;
module_param(lcd_da_pin, int, 0000);
MODULE_PARM_DESC(lcd_da_pin,
- "# of the // port pin connected to serial LCD 'SDA' "
- "signal, with polarity (-17..17)");
+ "# of the // port pin connected to serial LCD 'SDA' signal, with polarity (-17..17)");
static int lcd_cl_pin = PIN_NOT_SET;
module_param(lcd_cl_pin, int, 0000);
MODULE_PARM_DESC(lcd_cl_pin,
- "# of the // port pin connected to serial LCD 'SCL' "
- "signal, with polarity (-17..17)");
+ "# of the // port pin connected to serial LCD 'SCL' signal, with polarity (-17..17)");
static const unsigned char *lcd_char_conv;
@@ -2017,9 +2008,9 @@ static struct logical_input *panel_bind_key(const char *name, const char *press,
* be bound.
*/
static struct logical_input *panel_bind_callback(char *name,
- void (*press_fct) (int),
+ void (*press_fct)(int),
int press_data,
- void (*release_fct) (int),
+ void (*release_fct)(int),
int release_data)
{
struct logical_input *callback;
diff --git a/drivers/staging/rtl8187se/Kconfig b/drivers/staging/rtl8187se/Kconfig
index 3162aabbeb07..ff8d41ebca36 100644
--- a/drivers/staging/rtl8187se/Kconfig
+++ b/drivers/staging/rtl8187se/Kconfig
@@ -6,6 +6,5 @@ config R8187SE
select WEXT_PRIV
select EEPROM_93CX6
select CRYPTO
- default N
---help---
If built as a module, it will be called r8187se.ko.
diff --git a/drivers/staging/rtl8187se/Module.symvers b/drivers/staging/rtl8187se/Module.symvers
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/staging/rtl8187se/Module.symvers
diff --git a/drivers/staging/rtl8187se/ieee80211/dot11d.h b/drivers/staging/rtl8187se/ieee80211/dot11d.h
index 63f4f3c72f10..f996691307bf 100644
--- a/drivers/staging/rtl8187se/ieee80211/dot11d.h
+++ b/drivers/staging/rtl8187se/ieee80211/dot11d.h
@@ -11,13 +11,13 @@ typedef struct _CHNL_TXPOWER_TRIPLE {
u8 FirstChnl;
u8 NumChnls;
u8 MaxTxPowerInDbm;
-}CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
+} CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
typedef enum _DOT11D_STATE {
DOT11D_STATE_NONE = 0,
DOT11D_STATE_LEARNED,
DOT11D_STATE_DONE,
-}DOT11D_STATE;
+} DOT11D_STATE;
typedef struct _RT_DOT11D_INFO {
/* DECLARE_RT_OBJECT(RT_DOT12D_INFO); */
@@ -35,9 +35,10 @@ typedef struct _RT_DOT11D_INFO {
u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
DOT11D_STATE State;
-}RT_DOT11D_INFO, *PRT_DOT11D_INFO;
-#define eqMacAddr(a,b) ( ((a)[0]==(b)[0] && (a)[1]==(b)[1] && (a)[2]==(b)[2] && (a)[3]==(b)[3] && (a)[4]==(b)[4] && (a)[5]==(b)[5]) ? 1:0 )
-#define cpMacAddr(des,src) ((des)[0]=(src)[0],(des)[1]=(src)[1],(des)[2]=(src)[2],(des)[3]=(src)[3],(des)[4]=(src)[4],(des)[5]=(src)[5])
+} RT_DOT11D_INFO, *PRT_DOT11D_INFO;
+
+#define eqMacAddr(a, b) (((a)[0] == (b)[0] && (a)[1] == (b)[1] && (a)[2] == (b)[2] && (a)[3] == (b)[3] && (a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1:0)
+#define cpMacAddr(des, src) ((des)[0] = (src)[0], (des)[1] = (src)[1], (des)[2] = (src)[2], (des)[3] = (src)[3], (des)[4] = (src)[4], (des)[5] = (src)[5])
#define GET_DOT11D_INFO(__pIeeeDev) ((PRT_DOT11D_INFO)((__pIeeeDev)->pDot11dInfo))
#define IS_DOT11D_ENABLE(__pIeeeDev) GET_DOT11D_INFO(__pIeeeDev)->bEnabled
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 09ffd9bc8991..d1763b7b8f27 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -473,7 +473,7 @@ enum {
};
struct ieee80211_header_data {
- u16 frame_ctl;
+ __le16 frame_ctl;
u16 duration_id;
u8 addr1[6];
u8 addr2[6];
@@ -482,7 +482,7 @@ struct ieee80211_header_data {
};
struct ieee80211_hdr_4addr {
- u16 frame_ctl;
+ __le16 frame_ctl;
u16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
@@ -709,10 +709,10 @@ enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
#define MAX_IE_LEN 0xFF //+YJ,080625
-typedef struct _CHANNEL_LIST{
- u8 Channel[MAX_CHANNEL_NUMBER + 1];
- u8 Len;
-}CHANNEL_LIST, *PCHANNEL_LIST;
+struct rtl8187se_channel_list {
+ u8 channel[MAX_CHANNEL_NUMBER + 1];
+ u8 len;
+};
//by amy for ps
#define IEEE80211_WATCH_DOG_TIME 2000
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
index c8013d373a77..4fe253818630 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
@@ -58,7 +58,7 @@ struct ieee80211_ccmp_data {
u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
};
-void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
+static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
const u8 pt[16], u8 ct[16])
{
crypto_cipher_encrypt_one((void *)tfm, ct, pt);
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
index c5907968e1a7..6c1acc5dfba7 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
@@ -187,8 +187,7 @@ static inline u16 Mk16_le(u16 *v)
}
-static const u16 Sbox[256] =
-{
+static const u16 Sbox[256] = {
0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
@@ -307,7 +306,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
int len;
u8 *pos;
struct ieee80211_hdr_4addr *hdr;
- u8 rc4key[16],*icv;
+ u8 rc4key[16], *icv;
u32 crc;
struct scatterlist sg;
int ret;
@@ -348,7 +347,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[3] = crc >> 24;
crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, len + 4);
- ret= crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+ ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
tkey->tx_iv16++;
if (tkey->tx_iv16 == 0) {
@@ -537,9 +536,9 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len,
michael_mic_hdr(skb, tkey->tx_hdr);
- if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
+ if (IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
- }
+
pos = skb_put(skb, 8);
if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
@@ -583,9 +582,8 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
return -1;
michael_mic_hdr(skb, tkey->rx_hdr);
- if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
+ if (IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
- }
if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
index f114f9a33e17..f25367224941 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
@@ -11,7 +11,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-//#include <linux/config.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/random.h>
@@ -28,8 +27,6 @@ MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Host AP crypt: WEP");
MODULE_LICENSE("GPL");
-
-
struct prism2_wep_data {
u32 iv;
#define WEP_KEY_LEN 13
@@ -40,7 +37,6 @@ struct prism2_wep_data {
struct crypto_blkcipher *rx_tfm;
};
-
static void *prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
@@ -79,7 +75,6 @@ fail:
return NULL;
}
-
static void prism2_wep_deinit(void *priv)
{
struct prism2_wep_data *_priv = priv;
@@ -94,7 +89,6 @@ static void prism2_wep_deinit(void *priv)
kfree(priv);
}
-
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
* for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
* so the payload length increases with 8 bytes.
@@ -157,7 +151,6 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
}
-
/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
* the frame: IV (4 bytes), encrypted payload (including SNAP header),
* ICV (4 bytes). len includes both IV and ICV.
@@ -219,7 +212,6 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return 0;
}
-
static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
{
struct prism2_wep_data *wep = priv;
@@ -233,7 +225,6 @@ static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
return 0;
}
-
static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
{
struct prism2_wep_data *wep = priv;
@@ -246,7 +237,6 @@ static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
return wep->key_len;
}
-
static char *prism2_wep_print_stats(char *p, void *priv)
{
struct prism2_wep_data *wep = priv;
@@ -255,7 +245,6 @@ static char *prism2_wep_print_stats(char *p, void *priv)
return p;
}
-
static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
.name = "WEP",
.init = prism2_wep_init,
@@ -272,21 +261,17 @@ static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
.owner = THIS_MODULE,
};
-
int ieee80211_crypto_wep_init(void)
{
return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
}
-
void ieee80211_crypto_wep_exit(void)
{
ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
}
-
void ieee80211_wep_null(void)
{
-// printk("============>%s()\n", __func__);
return;
}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index c27392d8b640..03eb164798cd 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -13,7 +13,6 @@
* released under the GPL
*/
-
#include "ieee80211.h"
#include <linux/random.h>
@@ -24,14 +23,6 @@
#include <linux/etherdevice.h>
#include "dot11d.h"
-u8 rsn_authen_cipher_suite[16][4] = {
- {0x00, 0x0F, 0xAC, 0x00}, //Use group key, //Reserved
- {0x00, 0x0F, 0xAC, 0x01}, //WEP-40 //RSNA default
- {0x00, 0x0F, 0xAC, 0x02}, //TKIP //NONE //{used just as default}
- {0x00, 0x0F, 0xAC, 0x03}, //WRAP-historical
- {0x00, 0x0F, 0xAC, 0x04}, //CCMP
- {0x00, 0x0F, 0xAC, 0x05}, //WEP-104
-};
short ieee80211_is_54g(const struct ieee80211_network *net)
{
@@ -62,14 +53,13 @@ static unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
}
/* place the MFIE rate, tag to the memory (double) poised.
- * Then it updates the pointer so that
- * it points after the new MFIE tag added.
+ * Then it updates the pointer so that it points after the new MFIE tag added.
*/
static void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
- if (ieee->modulation & IEEE80211_CCK_MODULATION){
+ if (ieee->modulation & IEEE80211_CCK_MODULATION) {
*tag++ = MFIE_TYPE_RATES;
*tag++ = 4;
*tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
@@ -86,8 +76,7 @@ static void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
- if (ieee->modulation & IEEE80211_OFDM_MODULATION){
-
+ if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
*tag++ = MFIE_TYPE_RATES_EX;
*tag++ = 8;
*tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
@@ -100,22 +89,20 @@ static void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
*tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
}
-
/* We may add an option for custom rates that specific HW might support */
*tag_p = tag;
}
-
static void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
- *tag++ = MFIE_TYPE_GENERIC; //0
+ *tag++ = MFIE_TYPE_GENERIC; /* 0 */
*tag++ = 7;
*tag++ = 0x00;
*tag++ = 0x50;
*tag++ = 0xf2;
- *tag++ = 0x02;//5
+ *tag++ = 0x02; /* 5 */
*tag++ = 0x00;
*tag++ = 0x01;
#ifdef SUPPORT_USPD
@@ -148,32 +135,23 @@ static void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p)
static void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
{
int nh;
- nh = (ieee->mgmt_queue_head +1) % MGMT_QUEUE_NUM;
+ nh = (ieee->mgmt_queue_head + 1) % MGMT_QUEUE_NUM;
-/*
- * if the queue is full but we have newer frames then
- * just overwrites the oldest.
- *
- * if (nh == ieee->mgmt_queue_tail)
- * return -1;
- */
ieee->mgmt_queue_head = nh;
ieee->mgmt_queue_ring[nh] = skb;
-
- //return 0;
}
static struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
{
struct sk_buff *ret;
- if(ieee->mgmt_queue_tail == ieee->mgmt_queue_head)
+ if (ieee->mgmt_queue_tail == ieee->mgmt_queue_head)
return NULL;
ret = ieee->mgmt_queue_ring[ieee->mgmt_queue_tail];
ieee->mgmt_queue_tail =
- (ieee->mgmt_queue_tail+1) % MGMT_QUEUE_NUM;
+ (ieee->mgmt_queue_tail + 1) % MGMT_QUEUE_NUM;
return ret;
}
@@ -183,7 +161,6 @@ static void init_mgmt_queue(struct ieee80211_device *ieee)
ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
}
-
void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl);
inline void softmac_mgmt_xmit(struct sk_buff *skb,
@@ -191,20 +168,18 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb,
{
unsigned long flags;
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
- struct ieee80211_hdr_3addr *header=
+ struct ieee80211_hdr_3addr *header =
(struct ieee80211_hdr_3addr *) skb->data;
-
spin_lock_irqsave(&ieee->lock, flags);
/* called with 2nd param 0, no mgmt lock required */
- ieee80211_sta_wakeup(ieee,0);
+ ieee80211_sta_wakeup(ieee, 0);
- if(single){
- if(ieee->queue_stop){
-
- enqueue_mgmt(ieee,skb);
- }else{
+ if (single) {
+ if (ieee->queue_stop) {
+ enqueue_mgmt(ieee, skb);
+ } else {
header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -214,11 +189,11 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb,
/* avoid watchdog triggers */
ieee->dev->trans_start = jiffies;
- ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
+ ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
}
spin_unlock_irqrestore(&ieee->lock, flags);
- }else{
+ } else {
spin_unlock_irqrestore(&ieee->lock, flags);
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
@@ -231,24 +206,20 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb,
/* avoid watchdog triggers */
ieee->dev->trans_start = jiffies;
- ieee->softmac_hard_start_xmit(skb,ieee->dev);
+ ieee->softmac_hard_start_xmit(skb, ieee->dev);
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags);
}
}
-
inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
struct ieee80211_device *ieee)
{
-
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
struct ieee80211_hdr_3addr *header =
(struct ieee80211_hdr_3addr *) skb->data;
-
- if(single){
-
+ if (single) {
header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -258,10 +229,8 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
/* avoid watchdog triggers */
ieee->dev->trans_start = jiffies;
- ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
-
- }else{
-
+ ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
+ } else {
header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -271,12 +240,10 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
/* avoid watchdog triggers */
ieee->dev->trans_start = jiffies;
- ieee->softmac_hard_start_xmit(skb,ieee->dev);
-
+ ieee->softmac_hard_start_xmit(skb, ieee->dev);
}
-// dev_kfree_skb_any(skb);//edit by thomas
}
-//by amy for power save
+
inline struct sk_buff *
ieee80211_disassociate_skb(struct ieee80211_network *beacon,
struct ieee80211_device *ieee, u8 asRsn)
@@ -288,7 +255,7 @@ ieee80211_disassociate_skb(struct ieee80211_network *beacon,
if (!skb)
return NULL;
- disass = (struct ieee80211_disassoc_frame *) skb_put(skb,sizeof(struct ieee80211_disassoc_frame));
+ disass = (struct ieee80211_disassoc_frame *) skb_put(skb, sizeof(struct ieee80211_disassoc_frame));
disass->header.frame_control = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
disass->header.duration_id = 0;
@@ -299,21 +266,19 @@ ieee80211_disassociate_skb(struct ieee80211_network *beacon,
disass->reasoncode = asRsn;
return skb;
}
+
void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta, u8 asRsn)
{
- struct ieee80211_network *beacon = &ieee->current_network;
- struct sk_buff *skb;
- skb = ieee80211_disassociate_skb(beacon,ieee,asRsn);
- if (skb){
- softmac_mgmt_xmit(skb, ieee);
- //dev_kfree_skb_any(skb);//edit by thomas
- }
+ struct ieee80211_network *beacon = &ieee->current_network;
+ struct sk_buff *skb;
+ skb = ieee80211_disassociate_skb(beacon, ieee, asRsn);
+ if (skb)
+ softmac_mgmt_xmit(skb, ieee);
}
-//by amy for power save
inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
{
- unsigned int len,rate_len;
+ unsigned int len, rate_len;
u8 *tag;
struct sk_buff *skb;
struct ieee80211_probe_request *req;
@@ -327,75 +292,45 @@ inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
if (!skb)
return NULL;
- req = (struct ieee80211_probe_request *) skb_put(skb,sizeof(struct ieee80211_probe_request));
+ req = (struct ieee80211_probe_request *) skb_put(skb, sizeof(struct ieee80211_probe_request));
req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- req->header.duration_id = 0; //FIXME: is this OK ?
+ req->header.duration_id = 0; /* FIXME: is this OK ? */
memset(req->header.addr1, 0xff, ETH_ALEN);
memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memset(req->header.addr3, 0xff, ETH_ALEN);
- tag = (u8 *) skb_put(skb,len+2+rate_len);
+ tag = (u8 *) skb_put(skb, len + 2 + rate_len);
*tag++ = MFIE_TYPE_SSID;
*tag++ = len;
memcpy(tag, ieee->current_network.ssid, len);
tag += len;
- ieee80211_MFIE_Brate(ieee,&tag);
- ieee80211_MFIE_Grate(ieee,&tag);
+ ieee80211_MFIE_Brate(ieee, &tag);
+ ieee80211_MFIE_Grate(ieee, &tag);
return skb;
}
struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee);
-void ext_ieee80211_send_beacon_wq(struct ieee80211_device *ieee)
-{
- struct sk_buff *skb;
-
- //unsigned long flags;
-
- skb = ieee80211_get_beacon_(ieee);
-
- if (skb){
- softmac_mgmt_xmit(skb, ieee);
- ieee->softmac_stats.tx_beacons++;
- dev_kfree_skb_any(skb);//edit by thomas
- }
-
-
- //printk(KERN_WARNING "[1] beacon sending!\n");
- ieee->beacon_timer.expires = jiffies +
- (MSECS( ieee->current_network.beacon_interval -5));
-
- //spin_lock_irqsave(&ieee->beacon_lock,flags);
- if(ieee->beacon_txing)
- add_timer(&ieee->beacon_timer);
- //spin_unlock_irqrestore(&ieee->beacon_lock,flags);
-}
-
static void ieee80211_send_beacon(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
- //unsigned long flags;
-
skb = ieee80211_get_beacon_(ieee);
- if (skb){
+ if (skb) {
softmac_mgmt_xmit(skb, ieee);
ieee->softmac_stats.tx_beacons++;
- dev_kfree_skb_any(skb);//edit by thomas
+ dev_kfree_skb_any(skb);
}
- //printk(KERN_WARNING "[1] beacon sending!\n");
ieee->beacon_timer.expires = jiffies +
- (MSECS( ieee->current_network.beacon_interval -5));
+ (MSECS(ieee->current_network.beacon_interval - 5));
- //spin_lock_irqsave(&ieee->beacon_lock,flags);
- if(ieee->beacon_txing)
+ if (ieee->beacon_txing)
add_timer(&ieee->beacon_timer);
- //spin_unlock_irqrestore(&ieee->beacon_lock,flags);
}
@@ -415,16 +350,15 @@ static void ieee80211_send_probe(struct ieee80211_device *ieee)
struct sk_buff *skb;
skb = ieee80211_probe_req(ieee);
- if (skb){
+ if (skb) {
softmac_mgmt_xmit(skb, ieee);
ieee->softmac_stats.tx_probe_rq++;
- //dev_kfree_skb_any(skb);//edit by thomas
}
}
static void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
{
- if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)){
+ if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)) {
ieee80211_send_probe(ieee);
ieee80211_send_probe(ieee);
}
@@ -439,17 +373,14 @@ static void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
u8 channel_map[MAX_CHANNEL_NUMBER+1];
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
down(&ieee->scan_sem);
-// printk("==================> Sync scan\n");
- while(1)
- {
-
- do{
+ while (1) {
+ do {
ch++;
if (ch > MAX_CHANNEL_NUMBER)
goto out; /* scan completed */
- }while(!channel_map[ch]);
+ } while (!channel_map[ch]);
/* this function can be called in two situations
* 1- We have switched to ad-hoc mode and we are
* performing a complete syncro scan before conclude
@@ -473,101 +404,74 @@ static void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
goto out;
ieee->set_chan(ieee->dev, ch);
-// printk("=====>channel=%d ",ch);
- if(channel_map[ch] == 1)
- {
-// printk("====send probe request\n");
+ if (channel_map[ch] == 1)
ieee80211_send_probe_requests(ieee);
- }
+
/* this prevent excessive time wait when we
* need to wait for a syncro scan to end..
*/
if (ieee->sync_scan_hurryup)
goto out;
-
msleep_interruptible_rtl(IEEE80211_SOFTMAC_SCAN_TIME);
-
}
out:
ieee->sync_scan_hurryup = 0;
up(&ieee->scan_sem);
- if(IS_DOT11D_ENABLE(ieee))
+ if (IS_DOT11D_ENABLE(ieee))
DOT11D_ScanComplete(ieee);
}
void ieee80211_softmac_ips_scan_syncro(struct ieee80211_device *ieee)
{
int ch;
- unsigned int watch_dog = 0;
+ unsigned int watch_dog = 0;
u8 channel_map[MAX_CHANNEL_NUMBER+1];
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
- down(&ieee->scan_sem);
+ down(&ieee->scan_sem);
ch = ieee->current_network.channel;
-// if(ieee->sync_scan_hurryup)
-// {
-
-// printk("stop scan sync\n");
-// goto out;
-// }
-// printk("=======hh===============>ips scan\n");
- while(1)
- {
- /* this function can be called in two situations
- * 1- We have switched to ad-hoc mode and we are
- * performing a complete syncro scan before conclude
- * there are no interesting cell and to create a
- * new one. In this case the link state is
- * IEEE80211_NOLINK until we found an interesting cell.
- * If so the ieee8021_new_net, called by the RX path
- * will set the state to IEEE80211_LINKED, so we stop
- * scanning
- * 2- We are linked and the root uses run iwlist scan.
- * So we switch to IEEE80211_LINKED_SCANNING to remember
- * that we are still logically linked (not interested in
- * new network events, despite for updating the net list,
- * but we are temporarily 'unlinked' as the driver shall
- * not filter RX frames and the channel is changing.
- * So the only situation in witch are interested is to check
- * if the state become LINKED because of the #1 situation
- */
+
+ while (1) {
+ /* this function can be called in two situations
+ * 1- We have switched to ad-hoc mode and we are
+ * performing a complete syncro scan before conclude
+ * there are no interesting cell and to create a
+ * new one. In this case the link state is
+ * IEEE80211_NOLINK until we found an interesting cell.
+ * If so the ieee8021_new_net, called by the RX path
+ * will set the state to IEEE80211_LINKED, so we stop
+ * scanning
+ * 2- We are linked and the root uses run iwlist scan.
+ * So we switch to IEEE80211_LINKED_SCANNING to remember
+ * that we are still logically linked (not interested in
+ * new network events, despite for updating the net list,
+ * but we are temporarily 'unlinked' as the driver shall
+ * not filter RX frames and the channel is changing.
+ * So the only situation in witch are interested is to check
+ * if the state become LINKED because of the #1 situation
+ */
if (ieee->state == IEEE80211_LINKED)
- {
goto out;
- }
- if(channel_map[ieee->current_network.channel] > 0)
- {
+
+ if (channel_map[ieee->current_network.channel] > 0)
ieee->set_chan(ieee->dev, ieee->current_network.channel);
-// printk("======>channel=%d ",ieee->current_network.channel);
- }
- if(channel_map[ieee->current_network.channel] == 1)
- {
-// printk("====send probe request\n");
+
+ if (channel_map[ieee->current_network.channel] == 1)
ieee80211_send_probe_requests(ieee);
- }
- /* this prevent excessive time wait when we
- * need to wait for a syncro scan to end..
- */
-// if (ieee->sync_scan_hurryup)
-// goto out;
msleep_interruptible_rtl(IEEE80211_SOFTMAC_SCAN_TIME);
- do{
+ do {
if (watch_dog++ >= MAX_CHANNEL_NUMBER)
- // if (++watch_dog >= 15);//MAX_CHANNEL_NUMBER) //YJ,modified,080630
goto out; /* scan completed */
ieee->current_network.channel = (ieee->current_network.channel + 1)%MAX_CHANNEL_NUMBER;
- }while(!channel_map[ieee->current_network.channel]);
- }
+ } while (!channel_map[ieee->current_network.channel]);
+ }
out:
- //ieee->sync_scan_hurryup = 0;
- //ieee->set_chan(ieee->dev, ch);
- //ieee->current_network.channel = ch;
ieee->actscanning = false;
up(&ieee->scan_sem);
- if(IS_DOT11D_ENABLE(ieee))
+ if (IS_DOT11D_ENABLE(ieee))
DOT11D_ScanComplete(ieee);
}
@@ -575,29 +479,24 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
- static short watchdog = 0;
+ static short watchdog;
u8 channel_map[MAX_CHANNEL_NUMBER+1];
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
-// printk("ieee80211_softmac_scan_wq ENABLE_IPS\n");
-// printk("in %s\n",__func__);
down(&ieee->scan_sem);
- do{
+ do {
ieee->current_network.channel =
(ieee->current_network.channel + 1) % MAX_CHANNEL_NUMBER;
if (watchdog++ > MAX_CHANNEL_NUMBER)
goto out; /* no good chans */
+ } while (!channel_map[ieee->current_network.channel]);
- }while(!channel_map[ieee->current_network.channel]);
-
- //printk("current_network.channel:%d\n", ieee->current_network.channel);
- if (ieee->scanning == 0 )
- {
+ if (ieee->scanning == 0) {
printk("error out, scanning = 0\n");
goto out;
}
ieee->set_chan(ieee->dev, ieee->current_network.channel);
- if(channel_map[ieee->current_network.channel] == 1)
+ if (channel_map[ieee->current_network.channel] == 1)
ieee80211_send_probe_requests(ieee);
queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
@@ -609,7 +508,7 @@ out:
ieee->scanning = 0;
up(&ieee->scan_sem);
- if(IS_DOT11D_ENABLE(ieee))
+ if (IS_DOT11D_ENABLE(ieee))
DOT11D_ScanComplete(ieee);
return;
}
@@ -618,62 +517,51 @@ static void ieee80211_beacons_start(struct ieee80211_device *ieee)
{
unsigned long flags;
- spin_lock_irqsave(&ieee->beacon_lock,flags);
+ spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 1;
ieee80211_send_beacon(ieee);
- spin_unlock_irqrestore(&ieee->beacon_lock,flags);
+ spin_unlock_irqrestore(&ieee->beacon_lock, flags);
}
static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
{
unsigned long flags;
- spin_lock_irqsave(&ieee->beacon_lock,flags);
+ spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 0;
- del_timer_sync(&ieee->beacon_timer);
-
- spin_unlock_irqrestore(&ieee->beacon_lock,flags);
+ del_timer_sync(&ieee->beacon_timer);
+ spin_unlock_irqrestore(&ieee->beacon_lock, flags);
}
-
void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
{
- if(ieee->stop_send_beacons)
+ if (ieee->stop_send_beacons)
ieee->stop_send_beacons(ieee->dev);
if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
ieee80211_beacons_stop(ieee);
}
-
void ieee80211_start_send_beacons(struct ieee80211_device *ieee)
{
- if(ieee->start_send_beacons)
+ if (ieee->start_send_beacons)
ieee->start_send_beacons(ieee->dev);
- if(ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
+ if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
ieee80211_beacons_start(ieee);
}
-
static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
{
-// unsigned long flags;
-
- //ieee->sync_scan_hurryup = 1;
-
down(&ieee->scan_sem);
-// spin_lock_irqsave(&ieee->lock, flags);
- if (ieee->scanning == 1){
+ if (ieee->scanning == 1) {
ieee->scanning = 0;
- //del_timer_sync(&ieee->scan_timer);
cancel_delayed_work(&ieee->softmac_scan_wq);
}
-// spin_unlock_irqrestore(&ieee->lock, flags);
up(&ieee->scan_sem);
}
@@ -688,38 +576,28 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee)
/* called with ieee->lock held */
void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
{
- if(IS_DOT11D_ENABLE(ieee) )
- {
- if(IS_COUNTRY_IE_VALID(ieee))
- {
+ if (IS_DOT11D_ENABLE(ieee)) {
+ if (IS_COUNTRY_IE_VALID(ieee))
RESET_CIE_WATCHDOG(ieee);
- }
}
- if (ieee->softmac_features & IEEE_SOFTMAC_SCAN){
- if (ieee->scanning == 0)
- {
+
+ if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
+ if (ieee->scanning == 0) {
ieee->scanning = 1;
- //ieee80211_softmac_scan(ieee);
- // queue_work(ieee->wq, &ieee->softmac_scan_wq);
- //care this,1203,2007,by lawrence
#if 1
- queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq,0);
+ queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, 0);
#endif
}
}else
ieee->start_scan(ieee->dev);
-
}
/* called with wx_sem held */
void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
{
- if(IS_DOT11D_ENABLE(ieee) )
- {
- if(IS_COUNTRY_IE_VALID(ieee))
- {
+ if (IS_DOT11D_ENABLE(ieee)) {
+ if (IS_COUNTRY_IE_VALID(ieee))
RESET_CIE_WATCHDOG(ieee);
- }
}
ieee->sync_scan_hurryup = 0;
@@ -727,7 +605,6 @@ void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
ieee80211_softmac_scan_syncro(ieee);
else
ieee->scan_syncro(ieee->dev);
-
}
inline struct sk_buff *
@@ -739,15 +616,17 @@ ieee80211_authentication_req(struct ieee80211_network *beacon,
skb = dev_alloc_skb(sizeof(struct ieee80211_authentication) + challengelen);
- if (!skb) return NULL;
+ if (!skb)
+ return NULL;
auth = (struct ieee80211_authentication *)
skb_put(skb, sizeof(struct ieee80211_authentication));
auth->header.frame_ctl = IEEE80211_STYPE_AUTH;
- if (challengelen) auth->header.frame_ctl |= IEEE80211_FCTL_WEP;
+ if (challengelen)
+ auth->header.frame_ctl |= IEEE80211_FCTL_WEP;
- auth->header.duration_id = 0x013a; //FIXME
+ auth->header.duration_id = 0x013a; /* FIXME */
memcpy(auth->header.addr1, beacon->bssid, ETH_ALEN);
memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -761,7 +640,6 @@ ieee80211_authentication_req(struct ieee80211_network *beacon,
auth->status = cpu_to_le16(WLAN_STATUS_SUCCESS);
return skb;
-
}
static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee,
@@ -772,29 +650,30 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee,
struct ieee80211_probe_response *beacon_buf;
struct sk_buff *skb;
int encrypt;
- int atim_len,erp_len;
- struct ieee80211_crypt_data* crypt;
+ int atim_len, erp_len;
+ struct ieee80211_crypt_data *crypt;
char *ssid = ieee->current_network.ssid;
int ssid_len = ieee->current_network.ssid_len;
int rate_len = ieee->current_network.rates_len+2;
int rate_ex_len = ieee->current_network.rates_ex_len;
int wpa_ie_len = ieee->wpa_ie_len;
- if(rate_ex_len > 0) rate_ex_len+=2;
+ if (rate_ex_len > 0)
+ rate_ex_len += 2;
- if(ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
+ if (ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
atim_len = 4;
else
atim_len = 0;
- if(ieee80211_is_54g(&ieee->current_network))
+ if (ieee80211_is_54g(&ieee->current_network))
erp_len = 3;
else
erp_len = 0;
beacon_size = sizeof(struct ieee80211_probe_response)+
ssid_len
- +3 //channel
+ +3 /* channel */
+rate_len
+rate_ex_len
+atim_len
@@ -806,19 +685,19 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee,
if (!skb)
return NULL;
- beacon_buf = (struct ieee80211_probe_response*) skb_put(skb, beacon_size);
+ beacon_buf = (struct ieee80211_probe_response *) skb_put(skb, beacon_size);
- memcpy (beacon_buf->header.addr1, dest,ETH_ALEN);
- memcpy (beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy (beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
+ memcpy(beacon_buf->header.addr1, dest, ETH_ALEN);
+ memcpy(beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
- beacon_buf->header.duration_id = 0; //FIXME
+ beacon_buf->header.duration_id = 0; /* FIXME */
beacon_buf->beacon_interval =
cpu_to_le16(ieee->current_network.beacon_interval);
beacon_buf->capability =
cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_IBSS);
- if(ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
+ if (ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
crypt = ieee->crypt[ieee->tx_keyidx];
@@ -835,51 +714,52 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee,
beacon_buf->info_element.id = MFIE_TYPE_SSID;
beacon_buf->info_element.len = ssid_len;
- tag = (u8*) beacon_buf->info_element.data;
+ tag = (u8 *) beacon_buf->info_element.data;
memcpy(tag, ssid, ssid_len);
tag += ssid_len;
*(tag++) = MFIE_TYPE_RATES;
- *(tag++) = rate_len-2;
- memcpy(tag,ieee->current_network.rates,rate_len-2);
- tag+=rate_len-2;
+ *(tag++) = rate_len - 2;
+ memcpy(tag, ieee->current_network.rates, rate_len-2);
+ tag += rate_len - 2;
*(tag++) = MFIE_TYPE_DS_SET;
*(tag++) = 1;
*(tag++) = ieee->current_network.channel;
- if(atim_len){
+ if (atim_len) {
*(tag++) = MFIE_TYPE_IBSS_SET;
*(tag++) = 2;
- *((u16*)(tag)) = cpu_to_le16(ieee->current_network.atim_window);
- tag+=2;
+ *((u16 *)(tag)) = cpu_to_le16(ieee->current_network.atim_window);
+ tag += 2;
}
- if(erp_len){
+ if (erp_len) {
*(tag++) = MFIE_TYPE_ERP;
*(tag++) = 1;
*(tag++) = 0;
}
- if(rate_ex_len){
+ if (rate_ex_len) {
*(tag++) = MFIE_TYPE_RATES_EX;
*(tag++) = rate_ex_len-2;
- memcpy(tag,ieee->current_network.rates_ex,rate_ex_len-2);
- tag+=rate_ex_len-2;
+ memcpy(tag, ieee->current_network.rates_ex, rate_ex_len-2);
+ tag += rate_ex_len - 2;
}
- if (wpa_ie_len)
- {
- if (ieee->iw_mode == IW_MODE_ADHOC)
- {//as Windows will set pairwise key same as the group key which is not allowed in Linux, so set this for IOT issue. WB 2008.07.07
+ if (wpa_ie_len) {
+ if (ieee->iw_mode == IW_MODE_ADHOC) {
+ /* as Windows will set pairwise key same as the group
+ * key which is not allowed in Linux, so set this for
+ * IOT issue.
+ */
memcpy(&ieee->wpa_ie[14], &ieee->wpa_ie[8], 4);
}
memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
}
-
skb->dev = ieee->dev;
return skb;
}
@@ -888,9 +768,9 @@ static struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee,
u8 *dest)
{
struct sk_buff *skb;
- u8* tag;
+ u8 *tag;
- struct ieee80211_crypt_data* crypt;
+ struct ieee80211_crypt_data *crypt;
struct ieee80211_assoc_response_frame *assoc;
short encrypt;
@@ -903,34 +783,36 @@ static struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee,
return NULL;
assoc = (struct ieee80211_assoc_response_frame *)
- skb_put(skb,sizeof(struct ieee80211_assoc_response_frame));
+ skb_put(skb, sizeof(struct ieee80211_assoc_response_frame));
assoc->header.frame_control = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
- memcpy(assoc->header.addr1, dest,ETH_ALEN);
+ memcpy(assoc->header.addr1, dest, ETH_ALEN);
memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
assoc->capability = cpu_to_le16(ieee->iw_mode == IW_MODE_MASTER ?
WLAN_CAPABILITY_BSS : WLAN_CAPABILITY_IBSS);
-
- if(ieee->short_slot)
+ if (ieee->short_slot)
assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
if (ieee->host_encrypt)
crypt = ieee->crypt[ieee->tx_keyidx];
- else crypt = NULL;
+ else
+ crypt = NULL;
- encrypt = ( crypt && crypt->ops);
+ encrypt = (crypt && crypt->ops);
if (encrypt)
assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
assoc->status = 0;
assoc->aid = cpu_to_le16(ieee->assoc_id);
- if (ieee->assoc_id == 0x2007) ieee->assoc_id=0;
- else ieee->assoc_id++;
+ if (ieee->assoc_id == 0x2007)
+ ieee->assoc_id = 0;
+ else
+ ieee->assoc_id++;
- tag = (u8*) skb_put(skb, rate_len);
+ tag = (u8 *) skb_put(skb, rate_len);
ieee80211_MFIE_Brate(ieee, &tag);
ieee80211_MFIE_Grate(ieee, &tag);
@@ -962,21 +844,19 @@ static struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,
memcpy(auth->header.addr1, dest, ETH_ALEN);
auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH);
return skb;
-
-
}
static struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee, short pwr)
{
struct sk_buff *skb;
- struct ieee80211_hdr_3addr* hdr;
+ struct ieee80211_hdr_3addr *hdr;
skb = dev_alloc_skb(sizeof(struct ieee80211_hdr_3addr));
if (!skb)
return NULL;
- hdr = (struct ieee80211_hdr_3addr*)skb_put(skb,sizeof(struct ieee80211_hdr_3addr));
+ hdr = (struct ieee80211_hdr_3addr *)skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
memcpy(hdr->addr1, ieee->current_network.bssid, ETH_ALEN);
memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -987,83 +867,64 @@ static struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee, short
(pwr ? IEEE80211_FCTL_PM:0));
return skb;
-
-
}
-
static void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
{
struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest);
- if (buf){
+ if (buf) {
softmac_mgmt_xmit(buf, ieee);
- dev_kfree_skb_any(buf);//edit by thomas
+ dev_kfree_skb_any(buf);
}
}
-
static void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8 *dest)
{
struct sk_buff *buf = ieee80211_auth_resp(ieee, s, dest);
- if (buf){
+ if (buf) {
softmac_mgmt_xmit(buf, ieee);
- dev_kfree_skb_any(buf);//edit by thomas
+ dev_kfree_skb_any(buf);
}
}
-
static void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
{
-
struct sk_buff *buf = ieee80211_probe_resp(ieee, dest);
if (buf) {
softmac_mgmt_xmit(buf, ieee);
- dev_kfree_skb_any(buf);//edit by thomas
+ dev_kfree_skb_any(buf);
}
}
-
inline struct sk_buff *
ieee80211_association_req(struct ieee80211_network *beacon,
struct ieee80211_device *ieee)
{
struct sk_buff *skb;
- //unsigned long flags;
struct ieee80211_assoc_request_frame *hdr;
u8 *tag;
- //short info_addr = 0;
- //int i;
- //u16 suite_count = 0;
- //u8 suit_select = 0;
unsigned int wpa_len = beacon->wpa_ie_len;
- //struct net_device *dev = ieee->dev;
- //union iwreq_data wrqu;
- //u8 *buff;
- //u8 *p;
#if 1
- // for testing purpose
+ /* for testing purpose */
unsigned int rsn_len = beacon->rsn_ie_len;
-#else
- unsigned int rsn_len = beacon->rsn_ie_len - 4;
#endif
unsigned int rate_len = ieee80211_MFIE_rate_len(ieee);
unsigned int wmm_info_len = beacon->QoS_Enable?9:0;
unsigned int turbo_info_len = beacon->Turbo_Enable?9:0;
u8 encry_proto = ieee->wpax_type_notify & 0xff;
- //u8 pairwise_type = (ieee->wpax_type_notify >> 8) & 0xff;
- //u8 authen_type = (ieee->wpax_type_notify >> 16) & 0xff;
int len = 0;
- //[0] Notify type of encryption: WPA/WPA2
- //[1] pair wise type
- //[2] authen type
- if(ieee->wpax_type_set) {
+ /* [0] Notify type of encryption: WPA/WPA2
+ * [1] pair wise type
+ * [2] authen type
+ */
+ if (ieee->wpax_type_set) {
if (IEEE_PROTO_WPA == encry_proto) {
rsn_len = 0;
} else if (IEEE_PROTO_RSN == encry_proto) {
@@ -1071,8 +932,8 @@ ieee80211_association_req(struct ieee80211_network *beacon,
}
}
len = sizeof(struct ieee80211_assoc_request_frame)+
- + beacon->ssid_len//essid tagged val
- + rate_len//rates tagged val
+ + beacon->ssid_len /* essid tagged val */
+ + rate_len /* rates tagged val */
+ wpa_len
+ rsn_len
+ wmm_info_len
@@ -1086,24 +947,23 @@ ieee80211_association_req(struct ieee80211_network *beacon,
hdr = (struct ieee80211_assoc_request_frame *)
skb_put(skb, sizeof(struct ieee80211_assoc_request_frame));
-
hdr->header.frame_control = IEEE80211_STYPE_ASSOC_REQ;
- hdr->header.duration_id= 37; //FIXME
+ hdr->header.duration_id = 37; /* FIXME */
memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(hdr->header.addr3, beacon->bssid, ETH_ALEN);
- memcpy(ieee->ap_mac_addr, beacon->bssid, ETH_ALEN);//for HW security, John
+ memcpy(ieee->ap_mac_addr, beacon->bssid, ETH_ALEN); /* for HW security */
hdr->capability = cpu_to_le16(WLAN_CAPABILITY_BSS);
- if (beacon->capability & WLAN_CAPABILITY_PRIVACY )
+ if (beacon->capability & WLAN_CAPABILITY_PRIVACY)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
if (beacon->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
- if(ieee->short_slot)
+ if (ieee->short_slot)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
- hdr->listen_interval = 0xa; //FIXME
+ hdr->listen_interval = 0xa; /* FIXME */
hdr->info_element.id = MFIE_TYPE_SSID;
@@ -1116,27 +976,26 @@ ieee80211_association_req(struct ieee80211_network *beacon,
ieee80211_MFIE_Brate(ieee, &tag);
ieee80211_MFIE_Grate(ieee, &tag);
- //add rsn==0 condition for ap's mix security mode(wpa+wpa2), john2007.8.9
- //choose AES encryption as default algorithm while using mixed mode
+ /* add rsn==0 condition for ap's mix security mode(wpa+wpa2)
+ * choose AES encryption as default algorithm while using mixed mode.
+ */
- tag = skb_put(skb,ieee->wpa_ie_len);
- memcpy(tag,ieee->wpa_ie,ieee->wpa_ie_len);
+ tag = skb_put(skb, ieee->wpa_ie_len);
+ memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
- tag = skb_put(skb,wmm_info_len);
- if(wmm_info_len) {
+ tag = skb_put(skb, wmm_info_len);
+ if (wmm_info_len)
ieee80211_WMM_Info(ieee, &tag);
- }
- tag = skb_put(skb,turbo_info_len);
- if(turbo_info_len) {
- ieee80211_TURBO_Info(ieee, &tag);
- }
+
+ tag = skb_put(skb, turbo_info_len);
+ if (turbo_info_len)
+ ieee80211_TURBO_Info(ieee, &tag);
return skb;
}
void ieee80211_associate_abort(struct ieee80211_device *ieee)
{
-
unsigned long flags;
spin_lock_irqsave(&ieee->lock, flags);
@@ -1148,17 +1007,17 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
* Here we will check if there are good nets to associate
* with, so we retry or just get back to NO_LINK and scanning
*/
- if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING){
+ if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING) {
IEEE80211_DEBUG_MGMT("Authentication failed\n");
ieee->softmac_stats.no_auth_rs++;
- }else{
+ } else {
IEEE80211_DEBUG_MGMT("Association failed\n");
ieee->softmac_stats.no_ass_rs++;
}
ieee->state = IEEE80211_ASSOCIATING_RETRY;
- queue_delayed_work(ieee->wq, &ieee->associate_retry_wq,IEEE80211_SOFTMAC_ASSOC_RETRY_TIME);
+ queue_delayed_work(ieee->wq, &ieee->associate_retry_wq, IEEE80211_SOFTMAC_ASSOC_RETRY_TIME);
spin_unlock_irqrestore(&ieee->lock, flags);
}
@@ -1168,7 +1027,6 @@ static void ieee80211_associate_abort_cb(unsigned long dev)
ieee80211_associate_abort((struct ieee80211_device *) dev);
}
-
static void ieee80211_associate_step1(struct ieee80211_device *ieee)
{
struct ieee80211_network *beacon = &ieee->current_network;
@@ -1176,26 +1034,24 @@ static void ieee80211_associate_step1(struct ieee80211_device *ieee)
IEEE80211_DEBUG_MGMT("Stopping scan\n");
ieee->softmac_stats.tx_auth_rq++;
- skb=ieee80211_authentication_req(beacon, ieee, 0);
- if (!skb){
-
+ skb = ieee80211_authentication_req(beacon, ieee, 0);
+ if (!skb) {
ieee80211_associate_abort(ieee);
- }
- else{
- ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATING ;
+ } else {
+ ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATING;
IEEE80211_DEBUG_MGMT("Sending authentication request\n");
- //printk("---Sending authentication request\n");
softmac_mgmt_xmit(skb, ieee);
- //BUGON when you try to add_timer twice, using mod_timer may be better, john0709
- if(!timer_pending(&ieee->associate_timer)){
+ /* BUGON when you try to add_timer twice, using mod_timer may
+ * be better.
+ */
+ if (!timer_pending(&ieee->associate_timer)) {
ieee->associate_timer.expires = jiffies + (HZ / 2);
add_timer(&ieee->associate_timer);
}
- //If call dev_kfree_skb_any,a warning will ocur....
- //KERNEL: assertion (!atomic_read(&skb->users)) failed at net/core/dev.c (1708)
- //So ... 1204 by lawrence.
- //printk("\nIn %s,line %d call kfree skb.",__func__,__LINE__);
- //dev_kfree_skb_any(skb);//edit by thomas
+ /* If call dev_kfree_skb_any,a warning will ocur....
+ * KERNEL: assertion (!atomic_read(&skb->users)) failed at
+ * net/core/dev.c (1708)
+ */
}
}
@@ -1205,7 +1061,6 @@ static void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *chal
u8 *c;
struct sk_buff *skb;
struct ieee80211_network *beacon = &ieee->current_network;
-// int hlen = sizeof(struct ieee80211_authentication);
del_timer_sync(&ieee->associate_timer);
ieee->associate_seq++;
ieee->softmac_stats.tx_auth_rq++;
@@ -1213,7 +1068,7 @@ static void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *chal
skb = ieee80211_authentication_req(beacon, ieee, chlen+2);
if (!skb)
ieee80211_associate_abort(ieee);
- else{
+ else {
c = skb_put(skb, chlen+2);
*(c++) = MFIE_TYPE_CHALLENGE;
*(c++) = chlen;
@@ -1221,38 +1076,36 @@ static void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *chal
IEEE80211_DEBUG_MGMT("Sending authentication challenge response\n");
- ieee80211_encrypt_fragment(ieee, skb, sizeof(struct ieee80211_hdr_3addr ));
+ ieee80211_encrypt_fragment(ieee, skb, sizeof(struct ieee80211_hdr_3addr));
softmac_mgmt_xmit(skb, ieee);
- if (!timer_pending(&ieee->associate_timer)){
- //printk("=========>add timer again, to crash\n");
+ if (!timer_pending(&ieee->associate_timer)) {
ieee->associate_timer.expires = jiffies + (HZ / 2);
add_timer(&ieee->associate_timer);
}
- dev_kfree_skb_any(skb);//edit by thomas
+ dev_kfree_skb_any(skb);
}
kfree(challenge);
}
static void ieee80211_associate_step2(struct ieee80211_device *ieee)
{
- struct sk_buff* skb;
+ struct sk_buff *skb;
struct ieee80211_network *beacon = &ieee->current_network;
del_timer_sync(&ieee->associate_timer);
IEEE80211_DEBUG_MGMT("Sending association request\n");
ieee->softmac_stats.tx_ass_rq++;
- skb=ieee80211_association_req(beacon, ieee);
+ skb = ieee80211_association_req(beacon, ieee);
if (!skb)
ieee80211_associate_abort(ieee);
- else{
+ else {
softmac_mgmt_xmit(skb, ieee);
- if (!timer_pending(&ieee->associate_timer)){
+ if (!timer_pending(&ieee->associate_timer)) {
ieee->associate_timer.expires = jiffies + (HZ / 2);
add_timer(&ieee->associate_timer);
}
- //dev_kfree_skb_any(skb);//edit by thomas
}
}
@@ -1261,12 +1114,11 @@ static void ieee80211_associate_complete_wq(struct work_struct *work)
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
printk(KERN_INFO "Associated successfully\n");
- if(ieee80211_is_54g(&ieee->current_network) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION)){
-
+ if (ieee80211_is_54g(&ieee->current_network) &&
+ (ieee->modulation & IEEE80211_OFDM_MODULATION)) {
ieee->rate = 540;
printk(KERN_INFO"Using G rates\n");
- }else{
+ } else {
ieee->rate = 110;
printk(KERN_INFO"Using B rates\n");
}
@@ -1279,12 +1131,8 @@ static void ieee80211_associate_complete_wq(struct work_struct *work)
static void ieee80211_associate_complete(struct ieee80211_device *ieee)
{
- int i;
del_timer_sync(&ieee->associate_timer);
- for(i = 0; i < 6; i++) {
- //ieee->seq_ctrl[i] = 0;
- }
ieee->state = IEEE80211_LINKED;
IEEE80211_DEBUG_MGMT("Successfully associated\n");
@@ -1316,7 +1164,7 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
u8 tmp_ssid[IW_ESSID_MAX_SIZE+1];
int tmp_ssid_len = 0;
- short apset,ssidset,ssidbroad,apmatch,ssidmatch;
+ short apset, ssidset, ssidbroad, apmatch, ssidmatch;
/* we are interested in new new only if we are not associated
* and we are not associating / authenticating
@@ -1330,74 +1178,70 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
if ((ieee->iw_mode == IW_MODE_ADHOC) && !(net->capability & WLAN_CAPABILITY_IBSS))
return;
-
- if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC){
+ if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
/* if the user specified the AP MAC, we need also the essid
* This could be obtained by beacons or, if the network does not
* broadcast it, it can be put manually.
*/
- apset = ieee->wap_set;//(memcmp(ieee->current_network.bssid, zero,ETH_ALEN)!=0 );
- ssidset = ieee->ssid_set;//ieee->current_network.ssid[0] != '\0';
- ssidbroad = !(net->ssid_len == 0 || net->ssid[0]== '\0');
- apmatch = (memcmp(ieee->current_network.bssid, net->bssid, ETH_ALEN)==0);
+ apset = ieee->wap_set;
+ ssidset = ieee->ssid_set;
+ ssidbroad = !(net->ssid_len == 0 || net->ssid[0] == '\0');
+ apmatch = (memcmp(ieee->current_network.bssid, net->bssid, ETH_ALEN) == 0);
- if(ieee->current_network.ssid_len != net->ssid_len)
+ if (ieee->current_network.ssid_len != net->ssid_len)
ssidmatch = 0;
else
- ssidmatch = (0==strncmp(ieee->current_network.ssid, net->ssid, net->ssid_len));
-
- //printk("cur: %s, %d, net:%s, %d\n", ieee->current_network.ssid, ieee->current_network.ssid_len, net->ssid, net->ssid_len);
- //printk("apset=%d apmatch=%d ssidset=%d ssidbroad=%d ssidmatch=%d\n",apset,apmatch,ssidset,ssidbroad,ssidmatch);
-
- if ( /* if the user set the AP check if match.
- * if the network does not broadcast essid we check the user supplied ANY essid
- * if the network does broadcast and the user does not set essid it is OK
- * if the network does broadcast and the user did set essid chech if essid match
- */
- ( apset && apmatch &&
- ((ssidset && ssidbroad && ssidmatch) || (ssidbroad && !ssidset) || (!ssidbroad && ssidset)) ) ||
- /* if the ap is not set, check that the user set the bssid
- * and the network does broadcast and that those two bssid matches
- */
- (!apset && ssidset && ssidbroad && ssidmatch)
- ){
-
-
+ ssidmatch = (0 == strncmp(ieee->current_network.ssid, net->ssid, net->ssid_len));
+
+ /* if the user set the AP check if match.
+ * if the network does not broadcast essid we check the user
+ * supplied ANY essid
+ * if the network does broadcast and the user does not set essid
+ * it is OK
+ * if the network does broadcast and the user did set essid
+ * chech if essid match
+ * (apset && apmatch && ((ssidset && ssidbroad && ssidmatch) ||
+ * (ssidbroad && !ssidset) || (!ssidbroad && ssidset))) ||
+ * if the ap is not set, check that the user set the bssid and
+ * the network does broadcast and that those two bssid matches
+ * (!apset && ssidset && ssidbroad && ssidmatch)
+ */
+ if ((apset && apmatch && ((ssidset && ssidbroad && ssidmatch) ||
+ (ssidbroad && !ssidset) || (!ssidbroad && ssidset))) ||
+ (!apset && ssidset && ssidbroad && ssidmatch)) {
/* if the essid is hidden replace it with the
* essid provided by the user.
*/
- if (!ssidbroad){
+ if (!ssidbroad) {
strncpy(tmp_ssid, ieee->current_network.ssid, IW_ESSID_MAX_SIZE);
tmp_ssid_len = ieee->current_network.ssid_len;
}
memcpy(&ieee->current_network, net, sizeof(struct ieee80211_network));
- if (!ssidbroad){
+ if (!ssidbroad) {
strncpy(ieee->current_network.ssid, tmp_ssid, IW_ESSID_MAX_SIZE);
ieee->current_network.ssid_len = tmp_ssid_len;
}
- printk(KERN_INFO"Linking with %s: channel is %d\n",ieee->current_network.ssid,ieee->current_network.channel);
+ printk(KERN_INFO"Linking with %s: channel is %d\n", ieee->current_network.ssid, ieee->current_network.channel);
- if (ieee->iw_mode == IW_MODE_INFRA){
+ if (ieee->iw_mode == IW_MODE_INFRA) {
ieee->state = IEEE80211_ASSOCIATING;
ieee->beinretry = false;
queue_work(ieee->wq, &ieee->associate_procedure_wq);
- }else{
- if(ieee80211_is_54g(&ieee->current_network) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION)){
+ } else {
+ if (ieee80211_is_54g(&ieee->current_network) &&
+ (ieee->modulation & IEEE80211_OFDM_MODULATION)) {
ieee->rate = 540;
printk(KERN_INFO"Using G rates\n");
- }else{
+ } else {
ieee->rate = 110;
printk(KERN_INFO"Using B rates\n");
}
ieee->state = IEEE80211_LINKED;
ieee->beinretry = false;
}
-
}
}
-
}
void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
@@ -1407,60 +1251,52 @@ void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(target, &ieee->network_list, list) {
-
/* if the state become different that NOLINK means
* we had found what we are searching for
*/
-
if (ieee->state != IEEE80211_NOLINK)
break;
if (ieee->scan_age == 0 || time_after(target->last_scanned + ieee->scan_age, jiffies))
ieee80211_softmac_new_net(ieee, target);
}
-
spin_unlock_irqrestore(&ieee->lock, flags);
-
}
-
static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
{
struct ieee80211_authentication *a;
u8 *t;
- if (skb->len < (sizeof(struct ieee80211_authentication)-sizeof(struct ieee80211_info_element))){
- IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n",skb->len);
+ if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
+ IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
return 0xcafe;
}
*challenge = NULL;
- a = (struct ieee80211_authentication*) skb->data;
- if(skb->len > (sizeof(struct ieee80211_authentication) +3)){
+ a = (struct ieee80211_authentication *) skb->data;
+ if (skb->len > (sizeof(struct ieee80211_authentication) + 3)) {
t = skb->data + sizeof(struct ieee80211_authentication);
- if(*(t++) == MFIE_TYPE_CHALLENGE){
+ if (*(t++) == MFIE_TYPE_CHALLENGE) {
*chlen = *(t++);
*challenge = kmemdup(t, *chlen, GFP_ATOMIC);
if (!*challenge)
return -ENOMEM;
}
}
-
return cpu_to_le16(a->status);
-
}
-
static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
{
struct ieee80211_authentication *a;
- if (skb->len < (sizeof(struct ieee80211_authentication)-sizeof(struct ieee80211_info_element))){
- IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n",skb->len);
+ if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
+ IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n", skb->len);
return -1;
}
- a = (struct ieee80211_authentication*) skb->data;
+ a = (struct ieee80211_authentication *) skb->data;
- memcpy(dest,a->header.addr2, ETH_ALEN);
+ memcpy(dest, a->header.addr2, ETH_ALEN);
if (le16_to_cpu(a->algorithm) != WLAN_AUTH_OPEN)
return WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
@@ -1473,23 +1309,23 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
{
u8 *tag;
u8 *skbend;
- u8 *ssid=NULL;
+ u8 *ssid = NULL;
u8 ssidlen = 0;
struct ieee80211_hdr_3addr *header =
(struct ieee80211_hdr_3addr *) skb->data;
- if (skb->len < sizeof (struct ieee80211_hdr_3addr ))
+ if (skb->len < sizeof(struct ieee80211_hdr_3addr))
return -1; /* corrupted */
- memcpy(src,header->addr2, ETH_ALEN);
+ memcpy(src, header->addr2, ETH_ALEN);
- skbend = (u8*)skb->data + skb->len;
+ skbend = (u8 *)skb->data + skb->len;
- tag = skb->data + sizeof (struct ieee80211_hdr_3addr );
+ tag = skb->data + sizeof(struct ieee80211_hdr_3addr);
- while (tag+1 < skbend){
- if (*tag == 0){
+ while (tag+1 < skbend) {
+ if (*tag == 0) {
ssid = tag+2;
ssidlen = *(tag+1);
break;
@@ -1499,10 +1335,12 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
tag++; /* point to the next tag */
}
- //IEEE80211DMESG("Card MAC address is "MACSTR, MAC2STR(src));
- if (ssidlen == 0) return 1;
+ if (ssidlen == 0)
+ return 1;
+
+ if (!ssid)
+ return 1; /* ssid not found in tagged param */
- if (!ssid) return 1; /* ssid not found in tagged param */
return (!strncmp(ssid, ieee->current_network.ssid, ssidlen));
}
@@ -1514,13 +1352,13 @@ static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
if (skb->len < (sizeof(struct ieee80211_assoc_request_frame) -
sizeof(struct ieee80211_info_element))) {
- IEEE80211_DEBUG_MGMT("invalid len in auth request:%d \n", skb->len);
+ IEEE80211_DEBUG_MGMT("invalid len in auth request:%d\n", skb->len);
return -1;
}
- a = (struct ieee80211_assoc_request_frame*) skb->data;
+ a = (struct ieee80211_assoc_request_frame *) skb->data;
- memcpy(dest,a->header.addr2,ETH_ALEN);
+ memcpy(dest, a->header.addr2, ETH_ALEN);
return 0;
}
@@ -1528,12 +1366,12 @@ static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
static inline u16 assoc_parse(struct sk_buff *skb, int *aid)
{
struct ieee80211_assoc_response_frame *a;
- if (skb->len < sizeof(struct ieee80211_assoc_response_frame)){
+ if (skb->len < sizeof(struct ieee80211_assoc_response_frame)) {
IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
return 0xcafe;
}
- a = (struct ieee80211_assoc_response_frame*) skb->data;
+ a = (struct ieee80211_assoc_response_frame *) skb->data;
*aid = le16_to_cpu(a->aid) & 0x3fff;
return le16_to_cpu(a->status);
}
@@ -1543,11 +1381,8 @@ static inline void ieee80211_rx_probe_rq(struct ieee80211_device *ieee,
{
u8 dest[ETH_ALEN];
- //IEEE80211DMESG("Rx probe");
ieee->softmac_stats.rx_probe_rq++;
- //DMESG("Dest is "MACSTR, MAC2STR(dest));
- if (probe_rq_parse(ieee, skb, dest)){
- //IEEE80211DMESG("Was for me!");
+ if (probe_rq_parse(ieee, skb, dest)) {
ieee->softmac_stats.tx_probe_rs++;
ieee80211_resp_to_probe(ieee, dest);
}
@@ -1558,115 +1393,92 @@ inline void ieee80211_rx_auth_rq(struct ieee80211_device *ieee,
{
u8 dest[ETH_ALEN];
int status;
- //IEEE80211DMESG("Rx probe");
ieee->softmac_stats.rx_auth_rq++;
status = auth_rq_parse(skb, dest);
- if (status != -1) {
+ if (status != -1)
ieee80211_resp_to_auth(ieee, status, dest);
- }
- //DMESG("Dest is "MACSTR, MAC2STR(dest));
-
}
- inline void
+inline void
ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
{
u8 dest[ETH_ALEN];
- //unsigned long flags;
ieee->softmac_stats.rx_ass_rq++;
- if (assoc_rq_parse(skb,dest) != -1){
+ if (assoc_rq_parse(skb, dest) != -1)
ieee80211_resp_to_assoc_rq(ieee, dest);
- }
+
printk(KERN_INFO"New client associated: %pM\n", dest);
}
-
-
void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr)
{
-
struct sk_buff *buf = ieee80211_null_func(ieee, pwr);
if (buf)
softmac_ps_mgmt_xmit(buf, ieee);
-
}
-
static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
u32 *time_l)
{
- int timeout = 0;
+ int timeout = 0;
u8 dtim;
- /*if(ieee->ps == IEEE80211_PS_DISABLED ||
- ieee->iw_mode != IW_MODE_INFRA ||
- ieee->state != IEEE80211_LINKED)
-
- return 0;
- */
dtim = ieee->current_network.dtim_data;
- //printk("DTIM\n");
- if(!(dtim & IEEE80211_DTIM_VALID))
+ if (!(dtim & IEEE80211_DTIM_VALID))
return 0;
- else
- timeout = ieee->current_network.beacon_interval;
+ else
+ timeout = ieee->current_network.beacon_interval;
- //printk("VALID\n");
ieee->current_network.dtim_data = IEEE80211_DTIM_INVALID;
- if(dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST)& ieee->ps))
+ if (dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST) & ieee->ps))
return 2;
- if(!time_after(jiffies, ieee->dev->trans_start + MSECS(timeout)))
+ if (!time_after(jiffies, ieee->dev->trans_start + MSECS(timeout)))
return 0;
- if(!time_after(jiffies, ieee->last_rx_ps_time + MSECS(timeout)))
+ if (!time_after(jiffies, ieee->last_rx_ps_time + MSECS(timeout)))
return 0;
- if((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE ) &&
+ if ((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) &&
(ieee->mgmt_queue_tail != ieee->mgmt_queue_head))
return 0;
- if(time_l){
+ if (time_l) {
*time_l = ieee->current_network.last_dtim_sta_time[0]
+ MSECS((ieee->current_network.beacon_interval));
- //* ieee->current_network.dtim_period));
- //printk("beacon_interval:%x, dtim_period:%x, totol to Msecs:%x, HZ:%x\n", ieee->current_network.beacon_interval, ieee->current_network.dtim_period, MSECS(((ieee->current_network.beacon_interval * ieee->current_network.dtim_period))), HZ);
}
- if(time_h){
+ if (time_h) {
*time_h = ieee->current_network.last_dtim_sta_time[1];
- if(time_l && *time_l < ieee->current_network.last_dtim_sta_time[0])
+ if (time_l && *time_l < ieee->current_network.last_dtim_sta_time[0])
*time_h += 1;
}
return 1;
-
-
}
static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
{
- u32 th,tl;
+ u32 th, tl;
short sleep;
- unsigned long flags,flags2;
+ unsigned long flags, flags2;
spin_lock_irqsave(&ieee->lock, flags);
- if((ieee->ps == IEEE80211_PS_DISABLED ||
-
+ if ((ieee->ps == IEEE80211_PS_DISABLED ||
ieee->iw_mode != IW_MODE_INFRA ||
- ieee->state != IEEE80211_LINKED)){
+ ieee->state != IEEE80211_LINKED)) {
- //#warning CHECK_LOCK_HERE
+ /* #warning CHECK_LOCK_HERE */
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
ieee80211_sta_wakeup(ieee, 1);
@@ -1674,71 +1486,57 @@ static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
}
- sleep = ieee80211_sta_ps_sleep(ieee,&th, &tl);
-// printk("===>%s,%d[2 wake, 1 sleep, 0 do nothing], ieee->sta_sleep = %d\n",__func__, sleep,ieee->sta_sleep);
+ sleep = ieee80211_sta_ps_sleep(ieee, &th, &tl);
/* 2 wake, 1 sleep, 0 do nothing */
- if(sleep == 0)
+ if (sleep == 0)
goto out;
- if(sleep == 1){
-
- if(ieee->sta_sleep == 1)
- ieee->enter_sleep_state(ieee->dev,th,tl);
+ if (sleep == 1) {
+ if (ieee->sta_sleep == 1)
+ ieee->enter_sleep_state(ieee->dev, th, tl);
- else if(ieee->sta_sleep == 0){
- // printk("send null 1\n");
+ else if (ieee->sta_sleep == 0) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
-
- if(ieee->ps_is_queue_empty(ieee->dev)){
-
-
+ if (ieee->ps_is_queue_empty(ieee->dev)) {
ieee->sta_sleep = 2;
ieee->ps_request_tx_ack(ieee->dev);
- ieee80211_sta_ps_send_null_frame(ieee,1);
+ ieee80211_sta_ps_send_null_frame(ieee, 1);
ieee->ps_th = th;
ieee->ps_tl = tl;
}
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
-
}
-
-
- }else if(sleep == 2){
-//#warning CHECK_LOCK_HERE
+ } else if (sleep == 2) {
+ /* #warning CHECK_LOCK_HERE */
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
- // printk("send wakeup packet\n");
- ieee80211_sta_wakeup(ieee,1);
+ ieee80211_sta_wakeup(ieee, 1);
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
}
-
out:
spin_unlock_irqrestore(&ieee->lock, flags);
-
}
void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl)
{
- if(ieee->sta_sleep == 0){
- if(nl){
- // printk("Warning: driver is probably failing to report TX ps error\n");
+ if (ieee->sta_sleep == 0) {
+ if (nl) {
ieee->ps_request_tx_ack(ieee->dev);
ieee80211_sta_ps_send_null_frame(ieee, 0);
}
return;
-
}
- if(ieee->sta_sleep == 1)
+ if (ieee->sta_sleep == 1)
ieee->sta_wake_up(ieee->dev);
ieee->sta_sleep = 0;
- if(nl){
+ if (nl) {
ieee->ps_request_tx_ack(ieee->dev);
ieee80211_sta_ps_send_null_frame(ieee, 0);
}
@@ -1746,25 +1544,20 @@ void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl)
void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success)
{
- unsigned long flags,flags2;
+ unsigned long flags, flags2;
spin_lock_irqsave(&ieee->lock, flags);
- if(ieee->sta_sleep == 2){
+ if (ieee->sta_sleep == 2) {
/* Null frame with PS bit set */
- if(success){
-
- // printk("==================> %s::enter sleep state\n",__func__);
+ if (success) {
ieee->sta_sleep = 1;
- ieee->enter_sleep_state(ieee->dev,ieee->ps_th,ieee->ps_tl);
+ ieee->enter_sleep_state(ieee->dev, ieee->ps_th, ieee->ps_tl);
}
/* if the card report not success we can't be sure the AP
* has not RXed so we can't assume the AP believe us awake
*/
- }
- /* 21112005 - tx again null without PS bit if lost */
- else {
-
- if((ieee->sta_sleep == 0) && !success){
+ } else {
+ if ((ieee->sta_sleep == 0) && !success) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
ieee80211_sta_ps_send_null_frame(ieee, 0);
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
@@ -1780,16 +1573,16 @@ inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
{
struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data;
u16 errcode;
- u8* challenge=NULL;
- int chlen=0;
- int aid=0;
+ u8 *challenge = NULL;
+ int chlen = 0;
+ int aid = 0;
struct ieee80211_assoc_response_frame *assoc_resp;
struct ieee80211_info_element *info_element;
- if(!ieee->proto_started)
+ if (!ieee->proto_started)
return 0;
- if(ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED &&
+ if (ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED &&
ieee->iw_mode == IW_MODE_INFRA &&
ieee->state == IEEE80211_LINKED))
@@ -1800,30 +1593,27 @@ inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
ieee->last_rx_ps_time = jiffies;
switch (WLAN_FC_GET_STYPE(header->frame_control)) {
-
case IEEE80211_STYPE_ASSOC_RESP:
case IEEE80211_STYPE_REASSOC_RESP:
-
IEEE80211_DEBUG_MGMT("received [RE]ASSOCIATION RESPONSE (%d)\n",
WLAN_FC_GET_STYPE(header->frame_ctl));
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED &&
- ieee->iw_mode == IW_MODE_INFRA){
- if (0 == (errcode=assoc_parse(skb, &aid))){
+ ieee->iw_mode == IW_MODE_INFRA) {
+ errcode = assoc_parse(skb, &aid);
+ if (0 == errcode) {
u16 left;
- ieee->state=IEEE80211_LINKED;
+ ieee->state = IEEE80211_LINKED;
ieee->assoc_id = aid;
ieee->softmac_stats.rx_ass_ok++;
-
- //printk(KERN_WARNING "nic_type = %s", (rx_stats->nic_type == 1)?"rtl8187":"rtl8187B");
- if(1 == rx_stats->nic_type) //card type is 8187
- {
+ /* card type is 8187 */
+ if (1 == rx_stats->nic_type)
goto associate_complete;
- }
- assoc_resp = (struct ieee80211_assoc_response_frame*)skb->data;
- info_element = &assoc_resp->info_element;
- left = skb->len - ((void*)info_element - (void*)assoc_resp);
+
+ assoc_resp = (struct ieee80211_assoc_response_frame *)skb->data;
+ info_element = &assoc_resp->info_element;
+ left = skb->len - ((void *)info_element - (void *)assoc_resp);
while (left >= sizeof(struct ieee80211_info_element_hdr)) {
if (sizeof(struct ieee80211_info_element_hdr) + info_element->len > left) {
@@ -1832,32 +1622,33 @@ inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
}
switch (info_element->id) {
case MFIE_TYPE_GENERIC:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len);
+ IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len);
if (info_element->len >= 8 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x50 &&
info_element->data[2] == 0xf2 &&
info_element->data[3] == 0x02 &&
info_element->data[4] == 0x01) {
- // Not care about version at present.
- //WMM Parameter Element
- memcpy(ieee->current_network.wmm_param,(u8*)(info_element->data\
- + 8),(info_element->len - 8));
-
- if (((ieee->current_network.wmm_info^info_element->data[6])& \
- 0x0f)||(!ieee->init_wmmparam_flag)) {
- // refresh parameter element for current network
- // update the register parameter for hardware
- ieee->init_wmmparam_flag = 1;
- queue_work(ieee->wq, &ieee->wmm_param_update_wq);
-
- }
- //update info_element for current network
- ieee->current_network.wmm_info = info_element->data[6];
+ /* Not care about version at present.
+ * WMM Parameter Element.
+ */
+ memcpy(ieee->current_network.wmm_param, (u8 *)(info_element->data\
+ + 8), (info_element->len - 8));
+
+ if (((ieee->current_network.wmm_info^info_element->data[6])& \
+ 0x0f) || (!ieee->init_wmmparam_flag)) {
+ /* refresh parameter element for current network
+ * update the register parameter for hardware.
+ */
+ ieee->init_wmmparam_flag = 1;
+ queue_work(ieee->wq, &ieee->wmm_param_update_wq);
+ }
+ /* update info_element for current network */
+ ieee->current_network.wmm_info = info_element->data[6];
}
break;
default:
- //nothing to do at present!!!
+ /* nothing to do at present!!! */
break;
}
@@ -1866,14 +1657,14 @@ inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
info_element = (struct ieee80211_info_element *)
&info_element->data[info_element->len];
}
- if(!ieee->init_wmmparam_flag) //legacy AP, reset the AC_xx_param register
- {
- queue_work(ieee->wq,&ieee->wmm_param_update_wq);
- ieee->init_wmmparam_flag = 1;//indicate AC_xx_param upated since last associate
+ /* legacy AP, reset the AC_xx_param register */
+ if (!ieee->init_wmmparam_flag) {
+ queue_work(ieee->wq, &ieee->wmm_param_update_wq);
+ ieee->init_wmmparam_flag = 1; /* indicate AC_xx_param upated since last associate */
}
associate_complete:
ieee80211_associate_complete(ieee);
- }else{
+ } else {
ieee->softmac_stats.rx_ass_err++;
IEEE80211_DEBUG_MGMT(
"Association response status code 0x%x\n",
@@ -1882,47 +1673,41 @@ associate_complete:
}
}
break;
-
case IEEE80211_STYPE_ASSOC_REQ:
case IEEE80211_STYPE_REASSOC_REQ:
-
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
ieee->iw_mode == IW_MODE_MASTER)
ieee80211_rx_assoc_rq(ieee, skb);
break;
-
case IEEE80211_STYPE_AUTH:
-
- if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE){
+ if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) {
if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING &&
ieee->iw_mode == IW_MODE_INFRA){
-
IEEE80211_DEBUG_MGMT("Received authentication response");
- if (0 == (errcode=auth_parse(skb, &challenge, &chlen))){
- if(ieee->open_wep || !challenge){
+ errcode = auth_parse(skb, &challenge, &chlen);
+ if (0 == errcode) {
+ if (ieee->open_wep || !challenge) {
ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATED;
ieee->softmac_stats.rx_auth_rs_ok++;
ieee80211_associate_step2(ieee);
- }else{
+ } else {
ieee80211_rtl_auth_challenge(ieee, challenge, chlen);
}
- }else{
+ } else {
ieee->softmac_stats.rx_auth_rs_err++;
- IEEE80211_DEBUG_MGMT("Authentication response status code 0x%x",errcode);
+ IEEE80211_DEBUG_MGMT("Authentication response status code 0x%x", errcode);
ieee80211_associate_abort(ieee);
}
- }else if (ieee->iw_mode == IW_MODE_MASTER){
+ } else if (ieee->iw_mode == IW_MODE_MASTER) {
ieee80211_rx_auth_rq(ieee, skb);
}
}
break;
-
case IEEE80211_STYPE_PROBE_REQ:
-
if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
((ieee->iw_mode == IW_MODE_ADHOC ||
ieee->iw_mode == IW_MODE_MASTER) &&
@@ -1930,36 +1715,28 @@ associate_complete:
ieee80211_rx_probe_rq(ieee, skb);
break;
-
case IEEE80211_STYPE_DISASSOC:
case IEEE80211_STYPE_DEAUTH:
/* FIXME for now repeat all the association procedure
- * both for disassociation and deauthentication
- */
+ * both for disassociation and deauthentication
+ */
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
(ieee->state == IEEE80211_LINKED) &&
(ieee->iw_mode == IW_MODE_INFRA) &&
- (!memcmp(header->addr2,ieee->current_network.bssid,ETH_ALEN))){
+ (!memcmp(header->addr2, ieee->current_network.bssid, ETH_ALEN))) {
ieee->state = IEEE80211_ASSOCIATING;
ieee->softmac_stats.reassoc++;
- //notify_wx_assoc_event(ieee); //YJ,del,080828, do not notify os here
queue_work(ieee->wq, &ieee->associate_procedure_wq);
}
-
break;
-
default:
return -1;
break;
}
-
- //dev_kfree_skb_any(skb);
return 0;
}
-
-
/* following are for a simpler TX queue management.
* Instead of using netif_[stop/wake]_queue the driver
* will uses these two function (plus a reset one), that
@@ -1982,27 +1759,23 @@ associate_complete:
void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
struct ieee80211_device *ieee)
{
-
-
unsigned long flags;
int i;
- spin_lock_irqsave(&ieee->lock,flags);
+ spin_lock_irqsave(&ieee->lock, flags);
/* called with 2nd parm 0, no tx mgmt lock required */
- ieee80211_sta_wakeup(ieee,0);
-
- for(i = 0; i < txb->nr_frags; i++) {
+ ieee80211_sta_wakeup(ieee, 0);
- if (ieee->queue_stop){
+ for (i = 0; i < txb->nr_frags; i++) {
+ if (ieee->queue_stop) {
ieee->tx_pending.txb = txb;
ieee->tx_pending.frag = i;
goto exit;
- }else{
+ } else {
ieee->softmac_data_hard_start_xmit(
txb->fragments[i],
- ieee->dev,ieee->rate);
- //(i+1)<txb->nr_frags);
+ ieee->dev, ieee->rate);
ieee->stats.tx_packets++;
ieee->stats.tx_bytes += txb->fragments[i]->len;
ieee->dev->trans_start = jiffies;
@@ -2012,66 +1785,59 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
ieee80211_txb_free(txb);
exit:
- spin_unlock_irqrestore(&ieee->lock,flags);
-
+ spin_unlock_irqrestore(&ieee->lock, flags);
}
/* called with ieee->lock acquired */
static void ieee80211_resume_tx(struct ieee80211_device *ieee)
{
int i;
- for(i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
+ for (i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
- if (ieee->queue_stop){
+ if (ieee->queue_stop) {
ieee->tx_pending.frag = i;
return;
- }else{
-
+ } else {
ieee->softmac_data_hard_start_xmit(
ieee->tx_pending.txb->fragments[i],
- ieee->dev,ieee->rate);
- //(i+1)<ieee->tx_pending.txb->nr_frags);
+ ieee->dev, ieee->rate);
ieee->stats.tx_packets++;
ieee->dev->trans_start = jiffies;
}
}
-
ieee80211_txb_free(ieee->tx_pending.txb);
ieee->tx_pending.txb = NULL;
}
-
void ieee80211_reset_queue(struct ieee80211_device *ieee)
{
unsigned long flags;
- spin_lock_irqsave(&ieee->lock,flags);
+ spin_lock_irqsave(&ieee->lock, flags);
init_mgmt_queue(ieee);
- if (ieee->tx_pending.txb){
+ if (ieee->tx_pending.txb) {
ieee80211_txb_free(ieee->tx_pending.txb);
ieee->tx_pending.txb = NULL;
}
ieee->queue_stop = 0;
- spin_unlock_irqrestore(&ieee->lock,flags);
-
+ spin_unlock_irqrestore(&ieee->lock, flags);
}
void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
{
-
unsigned long flags;
struct sk_buff *skb;
struct ieee80211_hdr_3addr *header;
- spin_lock_irqsave(&ieee->lock,flags);
- if (! ieee->queue_stop) goto exit;
+ spin_lock_irqsave(&ieee->lock, flags);
+ if (!ieee->queue_stop)
+ goto exit;
ieee->queue_stop = 0;
- if(ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE){
- while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))){
-
+ if (ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) {
+ while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))) {
header = (struct ieee80211_hdr_3addr *) skb->data;
header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
@@ -2081,42 +1847,32 @@ void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
else
ieee->seq_ctrl[0]++;
- //printk(KERN_ALERT "ieee80211_wake_queue \n");
- ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
- dev_kfree_skb_any(skb);//edit by thomas
+ ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
+ dev_kfree_skb_any(skb);
}
}
if (!ieee->queue_stop && ieee->tx_pending.txb)
ieee80211_resume_tx(ieee);
- if (!ieee->queue_stop && netif_queue_stopped(ieee->dev)){
+ if (!ieee->queue_stop && netif_queue_stopped(ieee->dev)) {
ieee->softmac_stats.swtxawake++;
netif_wake_queue(ieee->dev);
}
-
-exit :
- spin_unlock_irqrestore(&ieee->lock,flags);
+exit:
+ spin_unlock_irqrestore(&ieee->lock, flags);
}
-
void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
{
- //unsigned long flags;
- //spin_lock_irqsave(&ieee->lock,flags);
-
- if (! netif_queue_stopped(ieee->dev)){
+ if (!netif_queue_stopped(ieee->dev)) {
netif_stop_queue(ieee->dev);
ieee->softmac_stats.swtxstop++;
}
ieee->queue_stop = 1;
- //spin_unlock_irqrestore(&ieee->lock,flags);
-
}
-
inline void ieee80211_randomize_cell(struct ieee80211_device *ieee)
{
-
random_ether_addr(ieee->current_network.bssid);
}
@@ -2125,7 +1881,7 @@ void ieee80211_start_master_bss(struct ieee80211_device *ieee)
{
ieee->assoc_id = 1;
- if (ieee->current_network.ssid_len == 0){
+ if (ieee->current_network.ssid_len == 0) {
strncpy(ieee->current_network.ssid,
IEEE80211_DEFAULT_TX_ESSID,
IW_ESSID_MAX_SIZE);
@@ -2149,7 +1905,7 @@ void ieee80211_start_master_bss(struct ieee80211_device *ieee)
static void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
{
- if(ieee->raw_tx){
+ if (ieee->raw_tx) {
if (ieee->data_hard_resume)
ieee->data_hard_resume(ieee->dev);
@@ -2173,9 +1929,8 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
down(&ieee->wx_sem);
-
- if (ieee->current_network.ssid_len == 0){
- strcpy(ieee->current_network.ssid,IEEE80211_DEFAULT_TX_ESSID);
+ if (ieee->current_network.ssid_len == 0) {
+ strcpy(ieee->current_network.ssid, IEEE80211_DEFAULT_TX_ESSID);
ieee->current_network.ssid_len = strlen(IEEE80211_DEFAULT_TX_ESSID);
ieee->ssid_set = 1;
}
@@ -2183,7 +1938,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
/* check if we have this cell in our network list */
ieee80211_softmac_check_all_nets(ieee);
- if(ieee->state == IEEE80211_NOLINK)
+ if (ieee->state == IEEE80211_NOLINK)
ieee->current_network.channel = 10;
/* if not then the state is not linked. Maybe the user switched to
* ad-hoc mode just after being in monitor mode, or just after
@@ -2203,13 +1958,12 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
ieee80211_start_scan_syncro(ieee);
/* the network definitively is not here.. create a new cell */
- if (ieee->state == IEEE80211_NOLINK){
+ if (ieee->state == IEEE80211_NOLINK) {
printk("creating new IBSS cell\n");
- if(!ieee->wap_set)
+ if (!ieee->wap_set)
ieee80211_randomize_cell(ieee);
- if(ieee->modulation & IEEE80211_CCK_MODULATION){
-
+ if (ieee->modulation & IEEE80211_CCK_MODULATION) {
ieee->current_network.rates_len = 4;
ieee->current_network.rates[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
@@ -2217,10 +1971,10 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
ieee->current_network.rates[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
ieee->current_network.rates[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
- }else
+ } else
ieee->current_network.rates_len = 0;
- if(ieee->modulation & IEEE80211_OFDM_MODULATION){
+ if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
ieee->current_network.rates_ex_len = 8;
ieee->current_network.rates_ex[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
@@ -2233,19 +1987,18 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
ieee->current_network.rates_ex[7] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
ieee->rate = 540;
- }else{
+ } else {
ieee->current_network.rates_ex_len = 0;
ieee->rate = 110;
}
- // By default, WMM function will be disabled in IBSS mode
+ /* By default, WMM function will be disabled in IBSS mode */
ieee->current_network.QoS_Enable = 0;
ieee->current_network.atim_window = 0;
ieee->current_network.capability = WLAN_CAPABILITY_IBSS;
- if(ieee->short_slot)
+ if (ieee->short_slot)
ieee->current_network.capability |= WLAN_CAPABILITY_SHORT_SLOT;
-
}
ieee->state = IEEE80211_LINKED;
@@ -2264,6 +2017,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
up(&ieee->wx_sem);
}
+
inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
{
queue_delayed_work(ieee->wq, &ieee->start_ibss_wq, 100);
@@ -2273,19 +2027,15 @@ inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
void ieee80211_start_bss(struct ieee80211_device *ieee)
{
unsigned long flags;
- //
- // Ref: 802.11d 11.1.3.3
- // STA shall not start a BSS unless properly formed Beacon frame including a Country IE.
- //
- if(IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee))
- {
- if(! ieee->bGlobalDomain)
- {
+ /* Ref: 802.11d 11.1.3.3
+ * STA shall not start a BSS unless properly formed Beacon frame
+ * including a Country IE.
+ */
+ if (IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee)) {
+ if (!ieee->bGlobalDomain)
return;
- }
}
- /* check if we have already found the net we
- * are interested in (if any).
+ /* check if we have already found the net we are interested in (if any).
* if not (we are disassociated and we are not
* in associating / authenticating phase) start the background scanning.
*/
@@ -2300,14 +2050,10 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
*/
spin_lock_irqsave(&ieee->lock, flags);
-//#ifdef ENABLE_IPS
-// printk("start bss ENABLE_IPS\n");
-//#else
- if (ieee->state == IEEE80211_NOLINK){
+ if (ieee->state == IEEE80211_NOLINK) {
ieee->actscanning = true;
ieee80211_rtl_start_scan(ieee);
}
-//#endif
spin_unlock_irqrestore(&ieee->lock, flags);
}
@@ -2322,7 +2068,7 @@ void ieee80211_disassociate(struct ieee80211_device *ieee)
if (ieee->data_hard_stop)
ieee->data_hard_stop(ieee->dev);
- if(IS_DOT11D_ENABLE(ieee))
+ if (IS_DOT11D_ENABLE(ieee))
Dot11d_Reset(ieee);
ieee->link_change(ieee->dev);
@@ -2337,9 +2083,9 @@ static void ieee80211_associate_retry_wq(struct work_struct *work)
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
unsigned long flags;
down(&ieee->wx_sem);
- if(!ieee->proto_started)
+ if (!ieee->proto_started)
goto exit;
- if(ieee->state != IEEE80211_ASSOCIATING_RETRY)
+ if (ieee->state != IEEE80211_ASSOCIATING_RETRY)
goto exit;
/* until we do not set the state to IEEE80211_NOLINK
* there are no possibility to have someone else trying
@@ -2360,17 +2106,13 @@ static void ieee80211_associate_retry_wq(struct work_struct *work)
spin_lock_irqsave(&ieee->lock, flags);
- if(ieee->state == IEEE80211_NOLINK){
+ if (ieee->state == IEEE80211_NOLINK) {
ieee->beinretry = false;
ieee->actscanning = true;
ieee80211_rtl_start_scan(ieee);
}
- //YJ,add,080828, notify os here
- if(ieee->state == IEEE80211_NOLINK)
- {
+ if (ieee->state == IEEE80211_NOLINK)
notify_wx_assoc_event(ieee);
- }
- //YJ,add,080828,end
spin_unlock_irqrestore(&ieee->lock, flags);
exit:
@@ -2379,7 +2121,7 @@ exit:
struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee)
{
- u8 broadcast_addr[] = {0xff,0xff,0xff,0xff,0xff,0xff};
+ u8 broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct sk_buff *skb = NULL;
struct ieee80211_probe_response *b;
@@ -2392,7 +2134,6 @@ struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee)
b->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_BEACON);
return skb;
-
}
struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee)
@@ -2401,7 +2142,7 @@ struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee)
struct ieee80211_probe_response *b;
skb = ieee80211_get_beacon_(ieee);
- if(!skb)
+ if (!skb)
return NULL;
b = (struct ieee80211_probe_response *) skb->data;
@@ -2423,7 +2164,6 @@ void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee)
up(&ieee->wx_sem);
}
-
void ieee80211_stop_protocol(struct ieee80211_device *ieee)
{
if (!ieee->proto_started)
@@ -2432,9 +2172,9 @@ void ieee80211_stop_protocol(struct ieee80211_device *ieee)
ieee->proto_started = 0;
ieee80211_stop_send_beacons(ieee);
- if((ieee->iw_mode == IW_MODE_INFRA)&&(ieee->state == IEEE80211_LINKED)) {
- SendDisassociation(ieee,NULL,WLAN_REASON_DISASSOC_STA_HAS_LEFT);
- }
+ if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state == IEEE80211_LINKED))
+ SendDisassociation(ieee, NULL, WLAN_REASON_DISASSOC_STA_HAS_LEFT);
+
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
cancel_delayed_work(&ieee->start_ibss_wq);
@@ -2454,36 +2194,35 @@ void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee)
void ieee80211_start_protocol(struct ieee80211_device *ieee)
{
short ch = 0;
- int i = 0;
+ int i = 0;
if (ieee->proto_started)
return;
ieee->proto_started = 1;
- if (ieee->current_network.channel == 0){
- do{
+ if (ieee->current_network.channel == 0) {
+ do {
ch++;
if (ch > MAX_CHANNEL_NUMBER)
return; /* no channel found */
- }while(!GET_DOT11D_INFO(ieee)->channel_map[ch]);
+ } while (!GET_DOT11D_INFO(ieee)->channel_map[ch]);
ieee->current_network.channel = ch;
}
if (ieee->current_network.beacon_interval == 0)
ieee->current_network.beacon_interval = 100;
- ieee->set_chan(ieee->dev,ieee->current_network.channel);
+ ieee->set_chan(ieee->dev, ieee->current_network.channel);
- for(i = 0; i < 17; i++) {
- ieee->last_rxseq_num[i] = -1;
- ieee->last_rxfrag_num[i] = -1;
- ieee->last_packet_time[i] = 0;
+ for (i = 0; i < 17; i++) {
+ ieee->last_rxseq_num[i] = -1;
+ ieee->last_rxfrag_num[i] = -1;
+ ieee->last_packet_time[i] = 0;
}
- ieee->init_wmmparam_flag = 0;//reinitialize AC_xx_PARAM registers.
-
+ ieee->init_wmmparam_flag = 0; /* reinitialize AC_xx_PARAM registers. */
/* if the user set the MAC of the ad-hoc cell and then
* switch to managed mode, shall we make sure that association
@@ -2493,7 +2232,7 @@ void ieee80211_start_protocol(struct ieee80211_device *ieee)
switch (ieee->iw_mode) {
case IW_MODE_AUTO:
ieee->iw_mode = IW_MODE_INFRA;
- //not set break here intentionly
+ /* not set break here intentionly */
case IW_MODE_INFRA:
ieee80211_start_bss(ieee);
break;
@@ -2517,7 +2256,6 @@ void ieee80211_start_protocol(struct ieee80211_device *ieee)
}
}
-
#define DRV_NAME "Ieee80211"
void ieee80211_softmac_init(struct ieee80211_device *ieee)
{
@@ -2526,36 +2264,29 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->state = IEEE80211_NOLINK;
ieee->sync_scan_hurryup = 0;
- for(i = 0; i < 5; i++) {
- ieee->seq_ctrl[i] = 0;
- }
+ for (i = 0; i < 5; i++)
+ ieee->seq_ctrl[i] = 0;
ieee->assoc_id = 0;
ieee->queue_stop = 0;
ieee->scanning = 0;
- ieee->softmac_features = 0; //so IEEE2100-like driver are happy
+ ieee->softmac_features = 0; /* so IEEE2100-like driver are happy */
ieee->wap_set = 0;
ieee->ssid_set = 0;
ieee->proto_started = 0;
ieee->basic_rate = IEEE80211_DEFAULT_BASIC_RATE;
ieee->rate = 3;
-//#ifdef ENABLE_LPS
ieee->ps = IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST;
-//#else
-// ieee->ps = IEEE80211_PS_DISABLED;
-//#endif
ieee->sta_sleep = 0;
-//by amy
ieee->bInactivePs = false;
ieee->actscanning = false;
ieee->ListenInterval = 2;
- ieee->NumRxDataInPeriod = 0; //YJ,add,080828
- ieee->NumRxBcnInPeriod = 0; //YJ,add,080828
- ieee->NumRxOkTotal = 0;//+by amy 080312
- ieee->NumRxUnicast = 0;//YJ,add,080828,for keep alive
+ ieee->NumRxDataInPeriod = 0;
+ ieee->NumRxBcnInPeriod = 0;
+ ieee->NumRxOkTotal = 0;
+ ieee->NumRxUnicast = 0; /* for keep alive */
ieee->beinretry = false;
ieee->bHwRadioOff = false;
-//by amy
init_mgmt_queue(ieee);
@@ -2571,13 +2302,12 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->wq = create_workqueue(DRV_NAME);
- INIT_DELAYED_WORK(&ieee->start_ibss_wq,(void*) ieee80211_start_ibss_wq);
- INIT_WORK(&ieee->associate_complete_wq,(void*) ieee80211_associate_complete_wq);
- INIT_WORK(&ieee->associate_procedure_wq,(void*) ieee80211_associate_procedure_wq);
- INIT_DELAYED_WORK(&ieee->softmac_scan_wq,(void*) ieee80211_softmac_scan_wq);
- INIT_DELAYED_WORK(&ieee->associate_retry_wq,(void*) ieee80211_associate_retry_wq);
- INIT_WORK(&ieee->wx_sync_scan_wq,(void*) ieee80211_wx_sync_scan_wq);
-// INIT_WORK(&ieee->watch_dog_wq,(void*) ieee80211_watch_dog_wq);
+ INIT_DELAYED_WORK(&ieee->start_ibss_wq, (void *) ieee80211_start_ibss_wq);
+ INIT_WORK(&ieee->associate_complete_wq, (void *) ieee80211_associate_complete_wq);
+ INIT_WORK(&ieee->associate_procedure_wq, (void *) ieee80211_associate_procedure_wq);
+ INIT_DELAYED_WORK(&ieee->softmac_scan_wq, (void *) ieee80211_softmac_scan_wq);
+ INIT_DELAYED_WORK(&ieee->associate_retry_wq, (void *) ieee80211_associate_retry_wq);
+ INIT_WORK(&ieee->wx_sync_scan_wq, (void *) ieee80211_wx_sync_scan_wq);
sema_init(&ieee->wx_sem, 1);
sema_init(&ieee->scan_sem, 1);
@@ -2598,8 +2328,7 @@ void ieee80211_softmac_free(struct ieee80211_device *ieee)
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
-
- //add for RF power on power of by lizhaoming 080512
+ /* add for RF power on power of */
cancel_delayed_work(&ieee->GPIOChangeRFWorkItem);
destroy_workqueue(ieee->wq);
@@ -2607,22 +2336,16 @@ void ieee80211_softmac_free(struct ieee80211_device *ieee)
up(&ieee->wx_sem);
}
-/********************************************************
- * Start of WPA code. *
- * this is stolen from the ipw2200 driver *
- ********************************************************/
-
-
+/* Start of WPA code. This is stolen from the ipw2200 driver */
static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value)
{
/* This is called when wpa_supplicant loads and closes the driver
* interface. */
- printk("%s WPA\n",value ? "enabling" : "disabling");
+ printk("%s WPA\n", value ? "enabling" : "disabling");
ieee->wpa_enabled = value;
return 0;
}
-
static void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie,
int wpa_ie_len)
{
@@ -2632,16 +2355,14 @@ static void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_i
ieee80211_disassociate(ieee);
}
-
static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command,
int reason)
{
-
int ret = 0;
switch (command) {
case IEEE_MLME_STA_DEAUTH:
- // silently ignore
+ /* silently ignore */
break;
case IEEE_MLME_STA_DISASSOC:
@@ -2656,7 +2377,6 @@ static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command,
return ret;
}
-
static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
struct ieee_param *param, int plen)
{
@@ -2690,7 +2410,6 @@ static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value)
{
-
struct ieee80211_security sec = {
.flags = SEC_AUTH_MODE,
};
@@ -2715,7 +2434,7 @@ static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value)
static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name,
u32 value)
{
- int ret=0;
+ int ret = 0;
unsigned long flags;
switch (name) {
@@ -2724,7 +2443,7 @@ static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name,
break;
case IEEE_PARAM_TKIP_COUNTERMEASURES:
- ieee->tkip_countermeasures=value;
+ ieee->tkip_countermeasures = value;
break;
case IEEE_PARAM_DROP_UNENCRYPTED: {
@@ -2743,15 +2462,14 @@ static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name,
.flags = SEC_ENABLED,
.enabled = value,
};
- ieee->drop_unencrypted = value;
+ ieee->drop_unencrypted = value;
/* We only change SEC_LEVEL for open mode. Others
* are set by ipw_wpa_set_encryption.
*/
if (!value) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_0;
- }
- else {
+ } else {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
}
@@ -2761,27 +2479,22 @@ static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name,
}
case IEEE_PARAM_PRIVACY_INVOKED:
- ieee->privacy_invoked=value;
+ ieee->privacy_invoked = value;
break;
-
case IEEE_PARAM_AUTH_ALGS:
ret = ieee80211_wpa_set_auth_algs(ieee, value);
break;
-
case IEEE_PARAM_IEEE_802_1X:
- ieee->ieee802_1x=value;
+ ieee->ieee802_1x = value;
break;
case IEEE_PARAM_WPAX_SELECT:
- // added for WPA2 mixed mode
- //printk(KERN_WARNING "------------------------>wpax value = %x\n", value);
- spin_lock_irqsave(&ieee->wpax_suitlist_lock,flags);
+ spin_lock_irqsave(&ieee->wpax_suitlist_lock, flags);
ieee->wpax_type_set = 1;
ieee->wpax_type_notify = value;
- spin_unlock_irqrestore(&ieee->wpax_suitlist_lock,flags);
+ spin_unlock_irqrestore(&ieee->wpax_suitlist_lock, flags);
break;
-
default:
- printk("Unknown WPA param: %d\n",name);
+ printk("Unknown WPA param: %d\n", name);
ret = -EOPNOTSUPP;
}
@@ -2823,8 +2536,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
if (strcmp(param->u.crypt.alg, "none") == 0) {
if (crypt) {
sec.enabled = 0;
- // FIXME FIXME
- //sec.encrypt = 0;
+ /* FIXME FIXME */
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_ENABLED | SEC_LEVEL;
ieee80211_crypt_delayed_deinit(ieee, crypt);
@@ -2832,8 +2544,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
goto done;
}
sec.enabled = 1;
-// FIXME FIXME
-// sec.encrypt = 1;
+ /* FIXME FIXME */
sec.flags |= SEC_ENABLED;
/* IPW HW cannot build TKIP MIC, host decryption still needed. */
@@ -2942,12 +2653,11 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
struct iw_point *p)
{
struct ieee_param *param;
- int ret=0;
+ int ret = 0;
down(&ieee->wx_sem);
- //IEEE_DEBUG_INFO("wpa_supplicant: len=%d\n", p->length);
- if (p->length < sizeof(struct ieee_param) || !p->pointer){
+ if (p->length < sizeof(struct ieee_param) || !p->pointer) {
ret = -EINVAL;
goto out;
}
@@ -2959,27 +2669,22 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
}
switch (param->cmd) {
-
case IEEE_CMD_SET_WPA_PARAM:
ret = ieee80211_wpa_set_param(ieee, param->u.wpa_param.name,
param->u.wpa_param.value);
break;
-
case IEEE_CMD_SET_WPA_IE:
ret = ieee80211_wpa_set_wpa_ie(ieee, param, p->length);
break;
-
case IEEE_CMD_SET_ENCRYPTION:
ret = ieee80211_wpa_set_encryption(ieee, param, p->length);
break;
-
case IEEE_CMD_MLME:
ret = ieee80211_wpa_mlme(ieee, param->u.mlme.command,
param->u.mlme.reason_code);
break;
-
default:
- printk("Unknown WPA supplicant request: %d\n",param->cmd);
+ printk("Unknown WPA supplicant request: %d\n", param->cmd);
ret = -EOPNOTSUPP;
break;
}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
index 3b7955f0ff98..07c3f715a6f5 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
@@ -1,38 +1,38 @@
-/******************************************************************************
-
- Copyright(c) 2004 Intel Corporation. All rights reserved.
-
- Portions of this file are based on the WEP enablement code provided by the
- Host AP project hostap-drivers v0.1.3
- Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- <jkmaline@cc.hut.fi>
- Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc., 59
- Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
- The full GNU General Public License is included in this distribution in the
- file called LICENSE.
-
- Contact Information:
- James P. Ketrenos <ipw2100-admin@linux.intel.com>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+/*
+ * Copyright(c) 2004 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are based on the WEP enablement code provided by the
+ * Host AP project hostap-drivers v0.1.3
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * James P. Ketrenos <ipw2100-admin@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
-******************************************************************************/
#include <linux/wireless.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/etherdevice.h>
#include "ieee80211.h"
static const char *ieee80211_modes[] = {
@@ -54,7 +54,7 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
/* First entry *MUST* be the AP MAC address */
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
+ ether_addr_copy(iwe.u.ap_addr.sa_data, network->bssid);
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
/* Remaining entries will be displayed in the order we provide them */
@@ -62,17 +62,13 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
/* Add the ESSID */
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
- //YJ,modified,080903,for hidden ap
- //if (network->flags & NETWORK_EMPTY_ESSID) {
if (network->ssid_len == 0) {
- //YJ,modified,080903,end
iwe.u.data.length = sizeof("<hidden>");
start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>");
} else {
- iwe.u.data.length = min(network->ssid_len, (u8)32);
+ iwe.u.data.length = min_t(u8, network->ssid_len, 32);
start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
}
- //printk("ESSID: %s\n",network->ssid);
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", ieee80211_modes[network->mode]);
@@ -92,8 +88,6 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
/* Add frequency/channel */
iwe.cmd = SIOCGIWFREQ;
-/* iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode);
- iwe.u.freq.e = 3; */
iwe.u.freq.m = network->channel;
iwe.u.freq.e = 0;
iwe.u.freq.i = 0;
@@ -145,10 +139,9 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
/* Add quality statistics */
/* TODO: Fix these values... */
if (network->stats.signal == 0 || network->stats.rssi == 0)
- printk("========>signal:%d, rssi:%d\n", network->stats.signal,
- network->stats.rssi);
+ netdev_info(ieee->dev, "========>signal:%d, rssi:%d\n",
+ network->stats.signal, network->stats.rssi);
iwe.cmd = IWEVQUAL;
-// printk("SIGNAL: %d,RSSI: %d,NOISE: %d\n",network->stats.signal,network->stats.rssi,network->stats.noise);
iwe.u.qual.qual = network->stats.signalstrength;
iwe.u.qual.level = network->stats.signal;
iwe.u.qual.noise = network->stats.noise;
@@ -171,7 +164,6 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
memset(&iwe, 0, sizeof(iwe));
if (network->wpa_ie_len) {
- // printk("wpa_ie_len:%d\n", network->wpa_ie_len);
char buf[MAX_WPA_IE_LEN];
memcpy(buf, network->wpa_ie, network->wpa_ie_len);
iwe.cmd = IWEVGENIE;
@@ -181,7 +173,6 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
memset(&iwe, 0, sizeof(iwe));
if (network->rsn_ie_len) {
- // printk("=====>rsn_ie_len:\n", network->rsn_ie_len);
char buf[MAX_WPA_IE_LEN];
memcpy(buf, network->rsn_ie, network->rsn_ie_len);
iwe.cmd = IWEVGENIE;
@@ -190,7 +181,8 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
}
/* Add EXTRA: Age to display seconds since last beacon/probe response
- * for given network. */
+ * for given network.
+ */
iwe.cmd = IWEVCUSTOM;
p = custom;
p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
@@ -210,8 +202,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
unsigned long flags;
int err = 0;
char *ev = extra;
- char *stop = ev + wrqu->data.length;//IW_SCAN_MAX_DATA;
- //char *stop = ev + IW_SCAN_MAX_DATA;
+ char *stop = ev + wrqu->data.length;
int i = 0;
IEEE80211_DEBUG_WX("Getting scan\n");
@@ -287,7 +278,8 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
IEEE80211_DEBUG_WX("Disabling encryption.\n");
/* Check all the keys to see if any are still configured,
- * and if no key index was provided, de-init them all */
+ * and if no key index was provided, de-init them all.
+ */
for (i = 0; i < WEP_KEYS; i++) {
if (ieee->crypt[i] != NULL) {
if (key_provided)
@@ -306,15 +298,14 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
goto done;
}
-
-
sec.enabled = 1;
sec.flags |= SEC_ENABLED;
if (*crypt != NULL && (*crypt)->ops != NULL &&
strcmp((*crypt)->ops->name, "WEP") != 0) {
/* changing to use WEP; deinit previously used algorithm
- * on this key */
+ * on this key.
+ */
ieee80211_crypt_delayed_deinit(ieee, crypt);
}
@@ -359,10 +350,11 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
(*crypt)->priv);
sec.flags |= (1 << key);
/* This ensures a key will be activated if no key is
- * explicitly set */
+ * explicitly set.
+ */
if (key == sec.active_key)
sec.flags |= SEC_ACTIVE_KEY;
- ieee->tx_keyidx = key;//by wb 080312
+ ieee->tx_keyidx = key;
} else {
len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
NULL, (*crypt)->priv);
@@ -395,7 +387,8 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
"OPEN" : "SHARED KEY");
/* For now we just support WEP, so only set that security level...
- * TODO: When WPA is added this is one place that needs to change */
+ * TODO: When WPA is added this is one place that needs to change
+ */
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
@@ -406,7 +399,8 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
* generate new IEEE 802.11 authentication which may end up in looping
* with IEEE 802.1X. If your hardware requires a reset after WEP
* configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack. */
+ * the callbacks structures used to initialize the 802.11 stack.
+ */
if (ieee->reset_on_keychange &&
ieee->iw_mode != IW_MODE_INFRA &&
ieee->reset_port && ieee->reset_port(dev)) {
@@ -448,7 +442,8 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
if (strcmp(crypt->ops->name, "WEP") != 0) {
/* only WEP is supported with wireless extensions, so just
- * report that encryption is used */
+ * report that encryption is used.
+ */
erq->length = 0;
erq->flags |= IW_ENCODE_ENABLED;
return 0;
@@ -483,7 +478,6 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
struct ieee80211_security sec = {
.flags = 0,
};
- //printk("======>encoding flag:%x,ext flag:%x, ext alg:%d\n", encoding->flags,ext->ext_flags, ext->alg);
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
if (idx < 1 || idx > WEP_KEYS)
@@ -497,7 +491,6 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
group_key = 1;
} else {
/* some Cisco APs use idx>0 for unicast in dynamic WEP */
- //printk("not group key, flags:%x, ext->alg:%d\n", ext->ext_flags, ext->alg);
if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
return -EINVAL;
if (ieee->iw_mode == IW_MODE_INFRA)
@@ -506,7 +499,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
return -EINVAL;
}
- sec.flags |= SEC_ENABLED;// | SEC_ENCRYPT;
+ sec.flags |= SEC_ENABLED;
if ((encoding->flags & IW_ENCODE_DISABLED) ||
ext->alg == IW_ENCODE_ALG_NONE) {
if (*crypt)
@@ -518,16 +511,13 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
if (i == WEP_KEYS) {
sec.enabled = 0;
- // sec.encrypt = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_LEVEL;
}
- //printk("disabled: flag:%x\n", encoding->flags);
goto done;
}
sec.enabled = 1;
- // sec.encrypt = 1;
switch (ext->alg) {
case IW_ENCODE_ALG_WEP:
@@ -545,7 +535,6 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
ret = -EINVAL;
goto done;
}
-// printk("8-09-08-9=====>%s, alg name:%s\n",__func__, alg);
ops = ieee80211_get_crypto_ops(alg);
if (ops == NULL)
@@ -553,7 +542,8 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
if (ops == NULL) {
IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
dev->name, ext->alg);
- printk("========>unknown crypto alg %d\n", ext->alg);
+ netdev_err(ieee->dev, "========>unknown crypto alg %d\n",
+ ext->alg);
ret = -EINVAL;
goto done;
}
@@ -584,13 +574,11 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
(*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
(*crypt)->priv) < 0) {
IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name);
- printk("key setting failed\n");
+ netdev_err(ieee->dev, "key setting failed\n");
ret = -EINVAL;
goto done;
}
#if 1
- //skip_host_crypt:
- //printk("skip_host_crypt:ext_flags:%x\n", ext->ext_flags);
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
ieee->tx_keyidx = idx;
sec.active_key = idx;
@@ -602,15 +590,12 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
sec.key_sizes[idx] = ext->key_len;
sec.flags |= (1 << idx);
if (ext->alg == IW_ENCODE_ALG_WEP) {
- // sec.encode_alg[idx] = SEC_ALG_WEP;
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
} else if (ext->alg == IW_ENCODE_ALG_TKIP) {
- // sec.encode_alg[idx] = SEC_ALG_TKIP;
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_2;
} else if (ext->alg == IW_ENCODE_ALG_CCMP) {
- // sec.encode_alg[idx] = SEC_ALG_CCMP;
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_3;
}
@@ -632,20 +617,19 @@ done:
return ret;
}
+
int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_mlme *mlme = (struct iw_mlme *) extra;
-// printk("\ndkgadfslkdjgalskdf===============>%s(), cmd:%x\n", __func__, mlme->cmd);
#if 1
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
case IW_MLME_DISASSOC:
- // printk("disassoc now\n");
ieee80211_disassociate(ieee);
break;
- default:
+ default:
return -EOPNOTSUPP;
}
#endif
@@ -656,24 +640,16 @@ int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
struct iw_request_info *info,
struct iw_param *data, char *extra)
{
-/*
- struct ieee80211_security sec = {
- .flags = SEC_AUTH_MODE,
- }
-*/
- //printk("set auth:flag:%x, data value:%x\n", data->flags, data->value);
switch (data->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
- /*need to support wpa2 here*/
- //printk("wpa version:%x\n", data->value);
+ /* need to support wpa2 here */
break;
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_KEY_MGMT:
- /*
- * * Host AP driver does not use these parameters and allows
- * * wpa_supplicant to control them internally.
- * */
+ /* Host AP driver does not use these parameters and allows
+ * wpa_supplicant to control them internally.
+ */
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
ieee->tkip_countermeasures = data->value;
@@ -684,13 +660,11 @@ int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
case IW_AUTH_80211_AUTH_ALG:
ieee->open_wep = (data->value&IW_AUTH_ALG_OPEN_SYSTEM) ? 1 : 0;
- //printk("open_wep:%d\n", ieee->open_wep);
break;
#if 1
case IW_AUTH_WPA_ENABLED:
ieee->wpa_enabled = (data->value) ? 1 : 0;
- //printk("enable wpa:%d\n", ieee->wpa_enabled);
break;
#endif
@@ -712,13 +686,13 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
u8 *buf = NULL;
if (len > MAX_WPA_IE_LEN || (len && ie == NULL)) {
- printk("return error out, len:%zu\n", len);
+ netdev_err(ieee->dev, "return error out, len:%zu\n", len);
return -EINVAL;
}
if (len) {
if (len != ie[1]+2) {
- printk("len:%zu, ie:%d\n", len, ie[1]);
+ netdev_err(ieee->dev, "len:%zu, ie:%d\n", len, ie[1]);
return -EINVAL;
}
buf = kmemdup(ie, len, GFP_KERNEL);
@@ -732,7 +706,6 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
ieee->wpa_ie = NULL;
ieee->wpa_ie_len = 0;
}
-// printk("<=====out %s()\n", __func__);
return 0;
diff --git a/drivers/staging/rtl8187se/r8180.h b/drivers/staging/rtl8187se/r8180.h
index 8999ec62450d..9f931dba1d82 100644
--- a/drivers/staging/rtl8187se/r8180.h
+++ b/drivers/staging/rtl8187se/r8180.h
@@ -1,19 +1,18 @@
/*
- This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
- Released under the terms of GPL (General Public Licence)
-
- Parts of this driver are based on the GPL part of the
- official realtek driver
-
- Parts of this driver are based on the rtl8180 driver skeleton
- from Patric Schenke & Andres Salomon
-
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
-
- We want to thanks the Authors of those projects and the Ndiswrapper
- project Authors.
-*/
+ * This is part of rtl8180 OpenSource driver.
+ * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
+ * Released under the terms of GPL (General Public Licence)
+ *
+ * Parts of this driver are based on the GPL part of the official realtek driver
+ *
+ * Parts of this driver are based on the rtl8180 driver skeleton from Patric
+ * Schenke & Andres Salomon
+ *
+ * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
+ *
+ * We want to thanks the Authors of those projects and the Ndiswrapper project
+ * Authors.
+ */
#ifndef R8180H
#define R8180H
@@ -21,13 +20,12 @@
#include <linux/interrupt.h>
#define RTL8180_MODULE_NAME "r8180"
-#define DMESG(x,a...) printk(KERN_INFO RTL8180_MODULE_NAME ": " x "\n", ## a)
-#define DMESGW(x,a...) printk(KERN_WARNING RTL8180_MODULE_NAME ": WW:" x "\n", ## a)
-#define DMESGE(x,a...) printk(KERN_WARNING RTL8180_MODULE_NAME ": EE:" x "\n", ## a)
+#define DMESG(x, a...) printk(KERN_INFO RTL8180_MODULE_NAME ": " x "\n", ## a)
+#define DMESGW(x, a...) printk(KERN_WARNING RTL8180_MODULE_NAME ": WW:" x "\n", ## a)
+#define DMESGE(x, a...) printk(KERN_WARNING RTL8180_MODULE_NAME ": EE:" x "\n", ## a)
#include <linux/module.h>
#include <linux/kernel.h>
-//#include <linux/config.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -36,22 +34,21 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
-#include <linux/rtnetlink.h> //for rtnl_lock()
+#include <linux/rtnetlink.h> /* for rtnl_lock() */
#include <linux/wireless.h>
#include <linux/timer.h>
-#include <linux/proc_fs.h> // Necessary because we use the proc fs
+#include <linux/proc_fs.h> /* Necessary because we use the proc fs. */
#include <linux/if_arp.h>
#include "ieee80211/ieee80211.h"
#include <asm/io.h>
-//#include <asm/semaphore.h>
#define EPROM_93c46 0
#define EPROM_93c56 1
-#define RTL_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
+#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
#define DEFAULT_FRAG_THRESHOLD 2342U
-#define MIN_FRAG_THRESHOLD 256U
+#define MIN_FRAG_THRESHOLD 256U
#define DEFAULT_RTS_THRESHOLD 2342U
#define MIN_RTS_THRESHOLD 0U
#define MAX_RTS_THRESHOLD 2342U
@@ -60,132 +57,99 @@
#define DEFAULT_RETRY_RTS 7
#define DEFAULT_RETRY_DATA 7
-#define BEACON_QUEUE 6
+#define BEACON_QUEUE 6
-#define aSifsTime 10
+#define aSifsTime 10
-#define sCrcLng 4
-#define sAckCtsLng 112 // bits in ACK and CTS frames
-//+by amy 080312
-#define RATE_ADAPTIVE_TIMER_PERIOD 300
+#define sCrcLng 4
+#define sAckCtsLng 112 /* bits in ACK and CTS frames. */
+/* +by amy 080312. */
+#define RATE_ADAPTIVE_TIMER_PERIOD 300
-typedef enum _WIRELESS_MODE {
+enum wireless_mode {
WIRELESS_MODE_UNKNOWN = 0x00,
WIRELESS_MODE_A = 0x01,
WIRELESS_MODE_B = 0x02,
WIRELESS_MODE_G = 0x04,
WIRELESS_MODE_AUTO = 0x08,
-} WIRELESS_MODE;
-
-typedef struct ChnlAccessSetting {
- u16 SIFS_Timer;
- u16 DIFS_Timer;
- u16 SlotTimeTimer;
- u16 EIFS_Timer;
- u16 CWminIndex;
- u16 CWmaxIndex;
-}*PCHANNEL_ACCESS_SETTING,CHANNEL_ACCESS_SETTING;
-
-typedef enum{
- NIC_8185 = 1,
- NIC_8185B
- } nic_t;
+};
+
+struct chnl_access_setting {
+ u16 sifs_timer;
+ u16 difs_timer;
+ u16 slot_time_timer;
+ u16 eifs_timer;
+ u16 cwmin_index;
+ u16 cwmax_index;
+};
+
+enum nic_t {
+ NIC_8185 = 1,
+ NIC_8185B
+};
typedef u32 AC_CODING;
-#define AC0_BE 0 // ACI: 0x00 // Best Effort
-#define AC1_BK 1 // ACI: 0x01 // Background
-#define AC2_VI 2 // ACI: 0x10 // Video
-#define AC3_VO 3 // ACI: 0x11 // Voice
-#define AC_MAX 4 // Max: define total number; Should not to be used as a real enum.
-
-//
-// ECWmin/ECWmax field.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.13.
-//
-typedef union _ECW{
- u8 charData;
- struct
- {
- u8 ECWmin:4;
- u8 ECWmax:4;
- }f; // Field
-}ECW, *PECW;
-
-//
-// ACI/AIFSN Field.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
-//
-typedef union _ACI_AIFSN{
- u8 charData;
-
- struct
- {
- u8 AIFSN:4;
- u8 ACM:1;
- u8 ACI:2;
- u8 Reserved:1;
- }f; // Field
-}ACI_AIFSN, *PACI_AIFSN;
-
-//
-// AC Parameters Record Format.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
-//
-typedef union _AC_PARAM{
- u32 longData;
- u8 charData[4];
-
- struct
- {
- ACI_AIFSN AciAifsn;
- ECW Ecw;
- u16 TXOPLimit;
- }f; // Field
-}AC_PARAM, *PAC_PARAM;
-
-/* it is a wrong definition. -xiong-2006-11-17
-typedef struct ThreeWireReg {
- u16 longData;
+#define AC0_BE 0 /* ACI: 0x00 */ /* Best Effort. */
+#define AC1_BK 1 /* ACI: 0x01 */ /* Background. */
+#define AC2_VI 2 /* ACI: 0x10 */ /* Video. */
+#define AC3_VO 3 /* ACI: 0x11 */ /* Voice. */
+#define AC_MAX 4 /* Max: define total number; Should not to be used as a real
+ * enum.
+ */
+
+/*
+ * ECWmin/ECWmax field.
+ * Ref: WMM spec 2.2.2: WME Parameter Element, p.13.
+ */
+typedef union _ECW {
+ u8 charData;
+ struct {
+ u8 ECWmin:4;
+ u8 ECWmax:4;
+ } f; /* Field */
+} ECW, *PECW;
+
+/*
+ * ACI/AIFSN Field. Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
+ */
+typedef union _ACI_AIFSN {
+ u8 charData;
+
struct {
- u8 enableB;
- u8 data;
- u8 clk;
- u8 read_write;
- } struc;
-} ThreeWireReg;
-*/
-
-typedef union _ThreeWire{
- struct _ThreeWireStruc{
- u16 data:1;
- u16 clk:1;
- u16 enableB:1;
- u16 read_write:1;
- u16 resv1:12;
-// u2Byte resv2:14;
-// u2Byte ThreeWireEnable:1;
-// u2Byte resv3:1;
- }struc;
- u16 longData;
-}ThreeWireReg;
-
-
-typedef struct buffer
-{
+ u8 AIFSN:4;
+ u8 ACM:1;
+ u8 ACI:2;
+ u8 Reserved:1;
+ } f; /* Field */
+} ACI_AIFSN, *PACI_AIFSN;
+
+/*
+ * AC Parameters Record Format.
+ * Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
+ */
+typedef union _AC_PARAM {
+ u32 longData;
+ u8 charData[4];
+
+ struct {
+ ACI_AIFSN AciAifsn;
+ ECW Ecw;
+ u16 TXOPLimit;
+ } f; /* Field */
+} AC_PARAM, *PAC_PARAM;
+
+struct buffer {
struct buffer *next;
u32 *buf;
dma_addr_t dma;
-} buffer;
+};
-//YJ,modified,080828
-typedef struct Stats
-{
+/* YJ,modified,080828. */
+struct stats {
unsigned long txrdu;
unsigned long rxrdu;
unsigned long rxnolast;
unsigned long rxnodata;
-// unsigned long rxreset;
-// unsigned long rxwrkaround;
unsigned long rxnopointer;
unsigned long txnperr;
unsigned long txresumed;
@@ -207,126 +171,123 @@ typedef struct Stats
unsigned long txbeaconerr;
unsigned long txlpokint;
unsigned long txlperr;
- unsigned long txretry;//retry number tony 20060601
- unsigned long rxcrcerrmin;//crc error (0-500)
- unsigned long rxcrcerrmid;//crc error (500-1000)
- unsigned long rxcrcerrmax;//crc error (>1000)
- unsigned long rxicverr;//ICV error
-} Stats;
+ unsigned long txretry; /* retry number tony 20060601 */
+ unsigned long rxcrcerrmin; /* crc error (0-500) */
+ unsigned long rxcrcerrmid; /* crc error (500-1000) */
+ unsigned long rxcrcerrmax; /* crc error (>1000) */
+ unsigned long rxicverr; /* ICV error */
+};
#define MAX_LD_SLOT_NUM 10
-#define KEEP_ALIVE_INTERVAL 20 // in seconds.
-#define CHECK_FOR_HANG_PERIOD 2 //be equal to watchdog check time
-#define DEFAULT_KEEP_ALIVE_LEVEL 1
-#define DEFAULT_SLOT_NUM 2
-#define POWER_PROFILE_AC 0
-#define POWER_PROFILE_BATTERY 1
-
-typedef struct _link_detect_t
-{
- u32 RxFrameNum[MAX_LD_SLOT_NUM]; // number of Rx Frame / CheckForHang_period to determine link status
- u16 SlotNum; // number of CheckForHang period to determine link status, default is 2
- u16 SlotIndex;
-
- u32 NumTxOkInPeriod; //number of packet transmitted during CheckForHang
- u32 NumRxOkInPeriod; //number of packet received during CheckForHang
-
- u8 IdleCount; // (KEEP_ALIVE_INTERVAL / CHECK_FOR_HANG_PERIOD)
- u32 LastNumTxUnicast;
- u32 LastNumRxUnicast;
-
- bool bBusyTraffic; //when it is set to 1, UI cann't scan at will.
-}link_detect_t, *plink_detect_t;
-
-//YJ,modified,080828,end
-
-//by amy for led
-//================================================================================
-// LED customization.
-//================================================================================
-
-typedef enum _LED_STRATEGY_8185{
- SW_LED_MODE0, //
- SW_LED_MODE1, //
- HW_LED, // HW control 2 LEDs, LED0 and LED1 (there are 4 different control modes)
-}LED_STRATEGY_8185, *PLED_STRATEGY_8185;
-//by amy for led
-//by amy for power save
-typedef enum _LED_CTL_MODE{
- LED_CTL_POWER_ON = 1,
- LED_CTL_LINK = 2,
- LED_CTL_NO_LINK = 3,
- LED_CTL_TX = 4,
- LED_CTL_RX = 5,
- LED_CTL_SITE_SURVEY = 6,
- LED_CTL_POWER_OFF = 7
-}LED_CTL_MODE;
-
-typedef enum _RT_RF_POWER_STATE
-{
- eRfOn,
- eRfSleep,
- eRfOff
-}RT_RF_POWER_STATE;
-
-enum _ReasonCode{
- unspec_reason = 0x1,
- auth_not_valid = 0x2,
- deauth_lv_ss = 0x3,
- inactivity = 0x4,
- ap_overload = 0x5,
- class2_err = 0x6,
- class3_err = 0x7,
- disas_lv_ss = 0x8,
- asoc_not_auth = 0x9,
-
- //----MIC_CHECK
- mic_failure = 0xe,
- //----END MIC_CHECK
-
- // Reason code defined in 802.11i D10.0 p.28.
- invalid_IE = 0x0d,
- four_way_tmout = 0x0f,
- two_way_tmout = 0x10,
- IE_dismatch = 0x11,
+#define KEEP_ALIVE_INTERVAL 20 /* in seconds. */
+#define CHECK_FOR_HANG_PERIOD 2 /* be equal to watchdog check time. */
+#define DEFAULT_KEEP_ALIVE_LEVEL 1
+#define DEFAULT_SLOT_NUM 2
+#define POWER_PROFILE_AC 0
+#define POWER_PROFILE_BATTERY 1
+
+struct link_detect_t {
+ u32 rx_frame_num[MAX_LD_SLOT_NUM]; /* number of Rx Frame.
+ * CheckForHang_period to determine
+ * link status.
+ */
+ u16 slot_num; /* number of CheckForHang period to determine link status,
+ * default is 2.
+ */
+ u16 slot_index;
+ u32 num_tx_ok_in_period; /* number of packet transmitted during
+ * CheckForHang.
+ */
+ u32 num_rx_ok_in_period; /* number of packet received during
+ * CheckForHang.
+ */
+ u8 idle_count; /* (KEEP_ALIVE_INTERVAL / CHECK_FOR_HANG_PERIOD) */
+ u32 last_num_tx_unicast;
+ u32 last_num_rx_unicast;
+
+ bool b_busy_traffic; /* when it is set to 1, UI cann't scan at will. */
+};
+
+/* YJ,modified,080828,end */
+
+/* by amy for led
+ * ==========================================================================
+ * LED customization.
+ * ==========================================================================
+ */
+enum led_strategy_8185 {
+ SW_LED_MODE0,
+ SW_LED_MODE1,
+ HW_LED, /* HW control 2 LEDs, LED0 and LED1 (there are 4 different
+ * control modes). */
+};
+
+enum rt_rf_power_state {
+ RF_ON,
+ RF_SLEEP,
+ RF_OFF
+};
+
+enum _ReasonCode {
+ unspec_reason = 0x1,
+ auth_not_valid = 0x2,
+ deauth_lv_ss = 0x3,
+ inactivity = 0x4,
+ ap_overload = 0x5,
+ class2_err = 0x6,
+ class3_err = 0x7,
+ disas_lv_ss = 0x8,
+ asoc_not_auth = 0x9,
+
+ /* ----MIC_CHECK */
+ mic_failure = 0xe,
+ /* ----END MIC_CHECK */
+
+ /* Reason code defined in 802.11i D10.0 p.28. */
+ invalid_IE = 0x0d,
+ four_way_tmout = 0x0f,
+ two_way_tmout = 0x10,
+ IE_dismatch = 0x11,
invalid_Gcipher = 0x12,
invalid_Pcipher = 0x13,
- invalid_AKMP = 0x14,
+ invalid_AKMP = 0x14,
unsup_RSNIEver = 0x15,
- invalid_RSNIE = 0x16,
- auth_802_1x_fail= 0x17,
- ciper_reject = 0x18,
-
- // Reason code defined in 7.3.1.7, 802.1e D13.0, p.42. Added by Annie, 2005-11-15.
- QoS_unspec = 0x20, // 32
- QAP_bandwidth = 0x21, // 33
- poor_condition = 0x22, // 34
- no_facility = 0x23, // 35
- // Where is 36???
- req_declined = 0x25, // 37
- invalid_param = 0x26, // 38
- req_not_honored= 0x27, // 39
- TS_not_created = 0x2F, // 47
- DL_not_allowed = 0x30, // 48
- dest_not_exist = 0x31, // 49
- dest_not_QSTA = 0x32, // 50
+ invalid_RSNIE = 0x16,
+ auth_802_1x_fail = 0x17,
+ ciper_reject = 0x18,
+
+ /* Reason code defined in 7.3.1.7, 802.1e D13.0, p.42. Added by Annie,
+ * 2005-11-15.
+ */
+ QoS_unspec = 0x20, /* 32 */
+ QAP_bandwidth = 0x21, /* 33 */
+ poor_condition = 0x22, /* 34 */
+ no_facility = 0x23, /* 35 */
+ /* Where is 36??? */
+ req_declined = 0x25, /* 37 */
+ invalid_param = 0x26, /* 38 */
+ req_not_honored = 0x27, /* 39 */
+ TS_not_created = 0x2F, /* 47 */
+ DL_not_allowed = 0x30, /* 48 */
+ dest_not_exist = 0x31, /* 49 */
+ dest_not_QSTA = 0x32, /* 50 */
+};
+
+enum rt_ps_mode {
+ ACTIVE, /* Active/Continuous access. */
+ MAX_PS, /* Max power save mode. */
+ FAST_PS /* Fast power save mode. */
};
-typedef enum _RT_PS_MODE
-{
- eActive, // Active/Continuous access.
- eMaxPs, // Max power save mode.
- eFastPs // Fast power save mode.
-}RT_PS_MODE;
-//by amy for power save
-typedef struct r8180_priv
-{
+
+/* by amy for power save. */
+struct r8180_priv {
struct pci_dev *pdev;
short epromtype;
int irq;
struct ieee80211_device *ieee80211;
- short plcp_preamble_mode; // 0:auto 1:short 2:long
+ short plcp_preamble_mode; /* 0:auto 1:short 2:long */
spinlock_t irq_th_lock;
spinlock_t tx_lock;
@@ -339,19 +300,15 @@ typedef struct r8180_priv
short chan;
short sens;
short max_sens;
- u8 chtxpwr[15]; //channels from 1 to 14, 0 not used
- u8 chtxpwr_ofdm[15]; //channels from 1 to 14, 0 not used
- //u8 challow[15]; //channels from 1 to 14, 0 not used
- u8 channel_plan; // it's the channel plan index
+ u8 chtxpwr[15]; /* channels from 1 to 14, 0 not used. */
+ u8 chtxpwr_ofdm[15]; /* channels from 1 to 14, 0 not used. */
+ u8 channel_plan; /* it's the channel plan index. */
short up;
- short crcmon; //if 1 allow bad crc frame reception in monitor mode
+ short crcmon; /* if 1 allow bad crc frame reception in monitor mode. */
struct timer_list scan_timer;
- /*short scanpending;
- short stopscan;*/
spinlock_t scan_lock;
u8 active_probe;
- //u8 active_scan_num;
struct semaphore wx_sem;
short hw_wep;
@@ -359,20 +316,20 @@ typedef struct r8180_priv
short antb;
short diversity;
u32 key0[4];
- short (*rf_set_sens)(struct net_device *dev,short sens);
- void (*rf_set_chan)(struct net_device *dev,short ch);
+ short (*rf_set_sens)(struct net_device *dev, short sens);
+ void (*rf_set_chan)(struct net_device *dev, short ch);
void (*rf_close)(struct net_device *dev);
void (*rf_init)(struct net_device *dev);
void (*rf_sleep)(struct net_device *dev);
void (*rf_wakeup)(struct net_device *dev);
- //short rate;
+ /* short rate; */
short promisc;
- /*stats*/
- struct Stats stats;
- struct _link_detect_t link_detect; //YJ,add,080828
+ /* stats */
+ struct stats stats;
+ struct link_detect_t link_detect; /* YJ,add,080828 */
struct iw_statistics wstats;
- /*RX stuff*/
+ /* RX stuff. */
u32 *rxring;
u32 *rxringtail;
dma_addr_t rxringdma;
@@ -387,27 +344,6 @@ typedef struct r8180_priv
u32 rx_prevlen;
- /*TX stuff*/
-/*
- u32 *txlpring;
- u32 *txhpring;
- u32 *txnpring;
- dma_addr_t txlpringdma;
- dma_addr_t txhpringdma;
- dma_addr_t txnpringdma;
- u32 *txlpringtail;
- u32 *txhpringtail;
- u32 *txnpringtail;
- u32 *txlpringhead;
- u32 *txhpringhead;
- u32 *txnpringhead;
- struct buffer *txlpbufs;
- struct buffer *txhpbufs;
- struct buffer *txnpbufs;
- struct buffer *txlpbufstail;
- struct buffer *txhpbufstail;
- struct buffer *txnpbufstail;
-*/
u32 *txmapring;
u32 *txbkpring;
u32 *txbepring;
@@ -447,54 +383,47 @@ typedef struct r8180_priv
int txringcount;
int txbuffsize;
- //struct tx_pendingbuf txnp_pending;
- //struct tasklet_struct irq_tx_tasklet;
struct tasklet_struct irq_rx_tasklet;
u8 dma_poll_mask;
- //short tx_suspend;
- /* adhoc/master mode stuff */
+ /* adhoc/master mode stuff. */
u32 *txbeaconringtail;
dma_addr_t txbeaconringdma;
u32 *txbeaconring;
int txbeaconcount;
struct buffer *txbeaconbufs;
struct buffer *txbeaconbufstail;
- //char *master_essid;
- //u16 master_beaconinterval;
- //u32 master_beaconsize;
- //u16 beacon_interval;
u8 retry_data;
u8 retry_rts;
u16 rts;
-//by amy for led
- LED_STRATEGY_8185 LedStrategy;
-//by amy for led
+ /* by amy for led. */
+ enum led_strategy_8185 led_strategy;
+ /* by amy for led. */
-//by amy for power save
+ /* by amy for power save. */
struct timer_list watch_dog_timer;
bool bInactivePs;
bool bSwRfProcessing;
- RT_RF_POWER_STATE eInactivePowerState;
- RT_RF_POWER_STATE eRFPowerState;
+ enum rt_rf_power_state eInactivePowerState;
+ enum rt_rf_power_state eRFPowerState;
u32 RfOffReason;
bool RFChangeInProgress;
bool SetRFPowerStateInProgress;
- u8 RFProgType;
+ u8 RFProgType;
bool bLeisurePs;
- RT_PS_MODE dot11PowerSaveMode;
- //u32 NumRxOkInPeriod; //YJ,del,080828
- //u32 NumTxOkInPeriod; //YJ,del,080828
- u8 TxPollingTimes;
+ enum rt_ps_mode dot11PowerSaveMode;
+ u8 TxPollingTimes;
- bool bApBufOurFrame;// TRUE if AP buffer our unicast data , we will keep eAwake until receive data or timeout.
- u8 WaitBufDataBcnCount;
- u8 WaitBufDataTimeOut;
+ bool bApBufOurFrame; /* TRUE if AP buffer our unicast data , we will
+ * keep eAwake until receive data or timeout.
+ */
+ u8 WaitBufDataBcnCount;
+ u8 WaitBufDataTimeOut;
-//by amy for power save
-//by amy for antenna
+ /* by amy for power save. */
+ /* by amy for antenna. */
u8 EEPROMSwAntennaDiversity;
bool EEPROMDefaultAntenna1;
u8 RegSwAntennaDiversityMechanism;
@@ -503,115 +432,128 @@ typedef struct r8180_priv
bool bDefaultAntenna1;
u8 SignalStrength;
long Stats_SignalStrength;
- long LastSignalStrengthInPercent; // In percentage, used for smoothing, e.g. Moving Average.
- u8 SignalQuality; // in 0-100 index.
+ long LastSignalStrengthInPercent; /* In percentage, used for smoothing,
+ * e.g. Moving Average.
+ */
+ u8 SignalQuality; /* in 0-100 index. */
long Stats_SignalQuality;
- long RecvSignalPower; // in dBm.
+ long RecvSignalPower; /* in dBm. */
long Stats_RecvSignalPower;
- u8 LastRxPktAntenna; // +by amy 080312 Antenna which received the lasted packet. 0: Aux, 1:Main. Added by Roger, 2008.01.25.
+ u8 LastRxPktAntenna; /* +by amy 080312 Antenna which received the lasted
+ * packet. 0: Aux, 1:Main. Added by Roger,
+ * 2008.01.25.
+ */
u32 AdRxOkCnt;
long AdRxSignalStrength;
- u8 CurrAntennaIndex; // Index to current Antenna (both Tx and Rx).
- u8 AdTickCount; // Times of SwAntennaDiversityTimer happened.
- u8 AdCheckPeriod; // # of period SwAntennaDiversityTimer to check Rx signal strength for SW Antenna Diversity.
- u8 AdMinCheckPeriod; // Min value of AdCheckPeriod.
- u8 AdMaxCheckPeriod; // Max value of AdCheckPeriod.
- long AdRxSsThreshold; // Signal strength threshold to switch antenna.
- long AdMaxRxSsThreshold; // Max value of AdRxSsThreshold.
- bool bAdSwitchedChecking; // TRUE if we shall shall check Rx signal strength for last time switching antenna.
- long AdRxSsBeforeSwitched; // Rx signal strength before we switched antenna.
+ u8 CurrAntennaIndex; /* Index to current Antenna (both Tx and Rx). */
+ u8 AdTickCount; /* Times of SwAntennaDiversityTimer happened. */
+ u8 AdCheckPeriod; /* # of period SwAntennaDiversityTimer to check Rx
+ * signal strength for SW Antenna Diversity.
+ */
+ u8 AdMinCheckPeriod; /* Min value of AdCheckPeriod. */
+ u8 AdMaxCheckPeriod; /* Max value of AdCheckPeriod. */
+ long AdRxSsThreshold; /* Signal strength threshold to switch antenna. */
+ long AdMaxRxSsThreshold; /* Max value of AdRxSsThreshold. */
+ bool bAdSwitchedChecking; /* TRUE if we shall shall check Rx signal
+ * strength for last time switching antenna.
+ */
+ long AdRxSsBeforeSwitched; /* Rx signal strength before we switched
+ * antenna.
+ */
struct timer_list SwAntennaDiversityTimer;
-//by amy for antenna
-//{by amy 080312
-//
- // Crystal calibration.
- // Added by Roger, 2007.12.11.
- //
- bool bXtalCalibration; // Crystal calibration.
- u8 XtalCal_Xin; // Crystal calibration for Xin. 0~7.5pF
- u8 XtalCal_Xout; // Crystal calibration for Xout. 0~7.5pF
- //
- // Tx power tracking with thermal meter indication.
- // Added by Roger, 2007.12.11.
- //
- bool bTxPowerTrack; // Tx Power tracking.
- u8 ThermalMeter; // Thermal meter reference indication.
- //
- // Dynamic Initial Gain Adjustment Mechanism. Added by Bruce, 2007-02-14.
- //
- bool bDigMechanism; // TRUE if DIG is enabled, FALSE ow.
- bool bRegHighPowerMechanism; // For High Power Mechanism. 061010, by rcnjko.
- u32 FalseAlarmRegValue;
- u8 RegDigOfdmFaUpTh; // Upper threshold of OFDM false alarm, which is used in DIG.
- u8 DIG_NumberFallbackVote;
- u8 DIG_NumberUpgradeVote;
- // For HW antenna diversity, added by Roger, 2008.01.30.
- u32 AdMainAntennaRxOkCnt; // Main antenna Rx OK count.
- u32 AdAuxAntennaRxOkCnt; // Aux antenna Rx OK count.
- bool bHWAdSwitched; // TRUE if we has switched default antenna by HW evaluation.
- // RF High Power upper/lower threshold.
- u8 RegHiPwrUpperTh;
- u8 RegHiPwrLowerTh;
- // RF RSSI High Power upper/lower Threshold.
- u8 RegRSSIHiPwrUpperTh;
- u8 RegRSSIHiPwrLowerTh;
- // Current CCK RSSI value to determine CCK high power, asked by SD3 DZ, by Bruce, 2007-04-12.
- u8 CurCCKRSSI;
- bool bCurCCKPkt;
- //
- // High Power Mechanism. Added by amy, 080312.
- //
- bool bToUpdateTxPwr;
- long UndecoratedSmoothedSS;
- long UndercorateSmoothedRxPower;
- u8 RSSI;
- char RxPower;
- u8 InitialGain;
- //For adjust Dig Threshold during Legacy/Leisure Power Save Mode
- u32 DozePeriodInPast2Sec;
- // Don't access BB/RF under disable PLL situation.
- u8 InitialGainBackUp;
- u8 RegBModeGainStage;
-//by amy for rate adaptive
- struct timer_list rateadapter_timer;
- u32 RateAdaptivePeriod;
- bool bEnhanceTxPwr;
- bool bUpdateARFR;
- int ForcedDataRate; // Force Data Rate. 0: Auto, 0x02: 1M ~ 0x6C: 54M.)
- u32 NumTxUnicast; //YJ,add,080828,for keep alive
- u8 keepAliveLevel; //YJ,add,080828,for KeepAlive
- unsigned long NumTxOkTotal;
- u16 LastRetryCnt;
- u16 LastRetryRate;
- unsigned long LastTxokCnt;
- unsigned long LastRxokCnt;
- u16 CurrRetryCnt;
- unsigned long LastTxOKBytes;
- unsigned long NumTxOkBytesTotal;
- u8 LastFailTxRate;
- long LastFailTxRateSS;
- u8 FailTxRateCount;
- u32 LastTxThroughput;
- //for up rate
- unsigned short bTryuping;
- u8 CurrTxRate; //the rate before up
- u16 CurrRetryRate;
- u16 TryupingCount;
- u8 TryDownCountLowData;
- u8 TryupingCountNoData;
-
- u8 CurrentOperaRate;
-//by amy for rate adaptive
-//by amy 080312}
-// short wq_hurryup;
-// struct workqueue_struct *workqueue;
+ /* by amy for antenna {by amy 080312 */
+
+ /* Crystal calibration. Added by Roger, 2007.12.11. */
+
+ bool bXtalCalibration; /* Crystal calibration.*/
+ u8 XtalCal_Xin; /* Crystal calibration for Xin. 0~7.5pF */
+ u8 XtalCal_Xout; /* Crystal calibration for Xout. 0~7.5pF */
+
+ /* Tx power tracking with thermal meter indication.
+ * Added by Roger, 2007.12.11.
+ */
+
+ bool bTxPowerTrack; /* Tx Power tracking. */
+ u8 ThermalMeter; /* Thermal meter reference indication. */
+
+ /* Dynamic Initial Gain Adjustment Mechanism. Added by Bruce,
+ * 2007-02-14.
+ */
+ bool bDigMechanism; /* TRUE if DIG is enabled, FALSE ow. */
+ bool bRegHighPowerMechanism; /* For High Power Mechanism. 061010,
+ * by rcnjko.
+ */
+ u32 FalseAlarmRegValue;
+ u8 RegDigOfdmFaUpTh; /* Upper threshold of OFDM false alarm, which is
+ * used in DIG.
+ */
+ u8 DIG_NumberFallbackVote;
+ u8 DIG_NumberUpgradeVote;
+ /* For HW antenna diversity, added by Roger, 2008.01.30. */
+ u32 AdMainAntennaRxOkCnt; /* Main antenna Rx OK count. */
+ u32 AdAuxAntennaRxOkCnt; /* Aux antenna Rx OK count. */
+ bool bHWAdSwitched; /* TRUE if we has switched default antenna by HW
+ * evaluation.
+ */
+ /* RF High Power upper/lower threshold. */
+ u8 RegHiPwrUpperTh;
+ u8 RegHiPwrLowerTh;
+ /* RF RSSI High Power upper/lower Threshold. */
+ u8 RegRSSIHiPwrUpperTh;
+ u8 RegRSSIHiPwrLowerTh;
+ /* Current CCK RSSI value to determine CCK high power, asked by SD3 DZ,
+ * by Bruce, 2007-04-12.
+ */
+ u8 CurCCKRSSI;
+ bool bCurCCKPkt;
+ /* High Power Mechanism. Added by amy, 080312. */
+ bool bToUpdateTxPwr;
+ long UndecoratedSmoothedSS;
+ long UndecoratedSmoothedRxPower;
+ u8 RSSI;
+ char RxPower;
+ u8 InitialGain;
+ /* For adjust Dig Threshold during Legacy/Leisure Power Save Mode. */
+ u32 DozePeriodInPast2Sec;
+ /* Don't access BB/RF under disable PLL situation. */
+ u8 InitialGainBackUp;
+ u8 RegBModeGainStage;
+ /* by amy for rate adaptive */
+ struct timer_list rateadapter_timer;
+ u32 RateAdaptivePeriod;
+ bool bEnhanceTxPwr;
+ bool bUpdateARFR;
+ int ForcedDataRate; /* Force Data Rate. 0: Auto, 0x02: 1M ~ 0x6C: 54M.)
+ */
+ u32 NumTxUnicast; /* YJ,add,080828,for keep alive. */
+ u8 keepAliveLevel; /*YJ,add,080828,for KeepAlive. */
+ unsigned long NumTxOkTotal;
+ u16 LastRetryCnt;
+ u16 LastRetryRate;
+ unsigned long LastTxokCnt;
+ unsigned long LastRxokCnt;
+ u16 CurrRetryCnt;
+ unsigned long LastTxOKBytes;
+ unsigned long NumTxOkBytesTotal;
+ u8 LastFailTxRate;
+ long LastFailTxRateSS;
+ u8 FailTxRateCount;
+ u32 LastTxThroughput;
+ /* for up rate. */
+ unsigned short bTryuping;
+ u8 CurrTxRate; /* the rate before up. */
+ u16 CurrRetryRate;
+ u16 TryupingCount;
+ u8 TryDownCountLowData;
+ u8 TryupingCountNoData;
+
+ u8 CurrentOperaRate;
struct work_struct reset_wq;
struct work_struct watch_dog_wq;
short ack_tx_to_ieee;
u8 dma_poll_stop_mask;
- //u8 RegThreeWireMode;
u16 ShortRetryLimit;
u16 LongRetryLimit;
u16 EarlyRxThreshold;
@@ -619,8 +561,8 @@ typedef struct r8180_priv
u32 ReceiveConfig;
u32 IntrMask;
- struct ChnlAccessSetting ChannelAccessSetting;
-}r8180_priv;
+ struct chnl_access_setting ChannelAccessSetting;
+};
#define MANAGE_PRIORITY 0
#define BK_PRIORITY 1
@@ -632,14 +574,14 @@ typedef struct r8180_priv
#define LOW_PRIORITY VI_PRIORITY
#define NORM_PRIORITY VO_PRIORITY
-//AC2Queue mapping
+/* AC2Queue mapping. */
#define AC2Q(_ac) (((_ac) == WME_AC_VO) ? VO_PRIORITY : \
((_ac) == WME_AC_VI) ? VI_PRIORITY : \
((_ac) == WME_AC_BK) ? BK_PRIORITY : \
BE_PRIORITY)
short rtl8180_tx(struct net_device *dev, u8 *skbuf, int len, int priority,
- short morefrag, short fragdesc, int rate);
+ bool morefrag, short fragdesc, int rate);
u8 read_nic_byte(struct net_device *dev, int x);
u32 read_nic_dword(struct net_device *dev, int x);
@@ -673,7 +615,6 @@ void UpdateInitialGain(struct net_device *dev);
bool SetAntennaConfig87SE(struct net_device *dev, u8 DefaultAnt,
bool bAntDiversity);
-//#ifdef CONFIG_RTL8185B
void rtl8185b_adapter_start(struct net_device *dev);
void rtl8185b_rx_enable(struct net_device *dev);
void rtl8185b_tx_enable(struct net_device *dev);
@@ -682,9 +623,8 @@ void rtl8185b_irq_enable(struct net_device *dev);
void fix_rx_fifo(struct net_device *dev);
void fix_tx_fifo(struct net_device *dev);
void rtl8225z2_SetTXPowerLevel(struct net_device *dev, short ch);
-void rtl8180_rate_adapter(struct work_struct * work);
-//#endif
-bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
+void rtl8180_rate_adapter(struct work_struct *work);
+bool MgntActSet_RF_State(struct net_device *dev, enum rt_rf_power_state StateToSet,
u32 ChangeSource);
#endif
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 6cafee22bec4..a6022d4e7573 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -1,31 +1,31 @@
/*
- This is part of rtl818x pci OpenSource driver - v 0.1
- Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
- Released under the terms of GPL (General Public License)
-
- Parts of this driver are based on the GPL part of the official
- Realtek driver.
-
- Parts of this driver are based on the rtl8180 driver skeleton
- from Patric Schenke & Andres Salomon.
-
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
-
- Parts of BB/RF code are derived from David Young rtl8180 netbsd driver.
-
- RSSI calc function from 'The Deuce'
-
- Some ideas borrowed from the 8139too.c driver included in linux kernel.
-
- We (I?) want to thanks the Authors of those projecs and also the
- Ndiswrapper's project Authors.
-
- A big big thanks goes also to Realtek corp. for their help in my attempt to
- add RTL8185 and RTL8225 support, and to David Young also.
-
- Power management interface routines.
- Written by Mariusz Matuszek.
-*/
+ * This is part of rtl818x pci OpenSource driver - v 0.1
+ * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
+ * Released under the terms of GPL (General Public License)
+ *
+ * Parts of this driver are based on the GPL part of the official
+ * Realtek driver.
+ *
+ * Parts of this driver are based on the rtl8180 driver skeleton
+ * from Patric Schenke & Andres Salomon.
+ *
+ * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
+ *
+ * Parts of BB/RF code are derived from David Young rtl8180 netbsd driver.
+ *
+ * RSSI calc function from 'The Deuce'
+ *
+ * Some ideas borrowed from the 8139too.c driver included in linux kernel.
+ *
+ * We (I?) want to thanks the Authors of those projecs and also the
+ * Ndiswrapper's project Authors.
+ *
+ * A big big thanks goes also to Realtek corp. for their help in my attempt to
+ * add RTL8185 and RTL8225 support, and to David Young also.
+ *
+ * Power management interface routines.
+ * Written by Mariusz Matuszek.
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -258,7 +258,9 @@ static int proc_get_stats_tx(struct seq_file *m, void *v)
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
unsigned long totalOK;
- totalOK = priv->stats.txnpokint+priv->stats.txhpokint+priv->stats.txlpokint;
+ totalOK = priv->stats.txnpokint + priv->stats.txhpokint +
+ priv->stats.txlpokint;
+
seq_printf(m,
"TX OK: %lu\n"
"TX Error: %lu\n"
@@ -347,9 +349,9 @@ static void rtl8180_proc_init_one(struct net_device *dev)
}
/*
- FIXME: check if we can use some standard already-existent
- data type+functions in kernel
-*/
+ * FIXME: check if we can use some standard already-existent
+ * data type+functions in kernel.
+ */
static short buffer_add(struct buffer **buffer, u32 *buf, dma_addr_t dma,
struct buffer **bufferhead)
@@ -468,9 +470,11 @@ static short check_nic_enought_desc(struct net_device *dev, int priority)
{
struct r8180_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = netdev_priv(dev);
- int requiredbyte, required;
+ int requiredbyte;
+ int required;
- requiredbyte = priv->ieee80211->fts + sizeof(struct ieee80211_header_data);
+ requiredbyte = priv->ieee80211->fts +
+ sizeof(struct ieee80211_header_data);
if (ieee->current_network.QoS_Enable)
requiredbyte += 2;
@@ -484,7 +488,7 @@ static short check_nic_enought_desc(struct net_device *dev, int priority)
* between the tail and the head
*/
- return (required+2 < get_curr_tx_free_desc(dev, priority));
+ return required + 2 < get_curr_tx_free_desc(dev, priority);
}
void fix_tx_fifo(struct net_device *dev)
@@ -649,7 +653,7 @@ void rtl8180_set_chan(struct net_device *dev, short ch)
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
if ((ch > 14) || (ch < 1)) {
- printk("In %s: Invalid chnanel %d\n", __func__, ch);
+ netdev_err(dev, "In %s: Invalid channel %d\n", __func__, ch);
return;
}
@@ -742,43 +746,50 @@ static short alloc_tx_desc_ring(struct net_device *dev, int bufsize, int count,
switch (addr) {
case TX_MANAGEPRIORITY_RING_ADDR:
- if (-1 == buffer_add(&(priv->txmapbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txmapbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer NP");
return -ENOMEM;
}
break;
case TX_BKPRIORITY_RING_ADDR:
- if (-1 == buffer_add(&(priv->txbkpbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txbkpbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer LP");
return -ENOMEM;
}
break;
case TX_BEPRIORITY_RING_ADDR:
- if (-1 == buffer_add(&(priv->txbepbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txbepbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer NP");
return -ENOMEM;
}
break;
case TX_VIPRIORITY_RING_ADDR:
- if (-1 == buffer_add(&(priv->txvipbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txvipbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer LP");
return -ENOMEM;
}
break;
case TX_VOPRIORITY_RING_ADDR:
- if (-1 == buffer_add(&(priv->txvopbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txvopbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer NP");
return -ENOMEM;
}
break;
case TX_HIGHPRIORITY_RING_ADDR:
- if (-1 == buffer_add(&(priv->txhpbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txhpbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer HP");
return -ENOMEM;
}
break;
case TX_BEACON_RING_ADDR:
- if (-1 == buffer_add(&(priv->txbeaconbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txbeaconbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer BP");
return -ENOMEM;
}
@@ -897,8 +908,8 @@ static short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
return -1;
}
- desc = (u32 *)pci_alloc_consistent(pdev, sizeof(u32)*rx_desc_size*count+256,
- &dma_desc);
+ desc = (u32 *)pci_alloc_consistent(pdev,
+ sizeof(u32) * rx_desc_size * count + 256, &dma_desc);
if (dma_desc & 0xff)
/*
@@ -935,7 +946,8 @@ static short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
tmp = tmp+rx_desc_size;
}
- *(tmp-rx_desc_size) = *(tmp-rx_desc_size) | (1<<30); /* this is the last descriptor */
+ /* this is the last descriptor */
+ *(tmp - rx_desc_size) = *(tmp - rx_desc_size) | (1 << 30);
return 0;
}
@@ -1009,7 +1021,8 @@ inline u16 ieeerate2rtlrate(int rate)
}
}
-static u16 rtl_rate[] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540, 720};
+static u16 rtl_rate[] = {10, 20, 55, 110, 60,
+ 90, 120, 180, 240, 360, 480, 540, 720};
inline u16 rtl8180_rate2rate(short rate)
{
@@ -1143,23 +1156,30 @@ static long TranslateToDbm8185(u8 SignalStrengthIndex)
/*
* Perform signal smoothing for dynamic mechanism.
* This is different with PerformSignalSmoothing8185 in smoothing formula.
- * No dramatic adjustion is apply because dynamic mechanism need some degree
- * of correctness. Ported from 8187B.
+ * No dramatic adjustment is applied because dynamic mechanism need some
+ * degree of correctness. Ported from 8187B.
*/
static void PerformUndecoratedSignalSmoothing8185(struct r8180_priv *priv,
bool bCckRate)
{
- /* Determin the current packet is CCK rate. */
+ long smoothedSS;
+ long smoothedRx;
+
+ /* Determine the current packet is CCK rate. */
priv->bCurCCKPkt = bCckRate;
+ smoothedSS = priv->SignalStrength * 10;
+
if (priv->UndecoratedSmoothedSS >= 0)
- priv->UndecoratedSmoothedSS = ((priv->UndecoratedSmoothedSS * 5) +
- (priv->SignalStrength * 10)) / 6;
- else
- priv->UndecoratedSmoothedSS = priv->SignalStrength * 10;
+ smoothedSS = ((priv->UndecoratedSmoothedSS * 5) +
+ smoothedSS) / 6;
- priv->UndercorateSmoothedRxPower = ((priv->UndercorateSmoothedRxPower * 50) +
- (priv->RxPower * 11)) / 60;
+ priv->UndecoratedSmoothedSS = smoothedSS;
+
+ smoothedRx = ((priv->UndecoratedSmoothedRxPower * 50) +
+ (priv->RxPower * 11)) / 60;
+
+ priv->UndecoratedSmoothedRxPower = smoothedRx;
if (bCckRate)
priv->CurCCKRSSI = priv->RSSI;
@@ -1206,8 +1226,9 @@ static void rtl8180_rx(struct net_device *dev)
rx_desc_size = 8;
if ((*(priv->rxringtail)) & (1<<31)) {
- /* we have got an RX int, but the descriptor
- * we are pointing is empty */
+ /* we have got an RX int, but the descriptor. we are pointing
+ * is empty.
+ */
priv->stats.rxnodata++;
priv->ieee80211->stats.rx_errors++;
@@ -1216,7 +1237,8 @@ static void rtl8180_rx(struct net_device *dev)
tmp = priv->rxringtail;
do {
if (tmp == priv->rxring)
- tmp = priv->rxring + (priv->rxringcount - 1)*rx_desc_size;
+ tmp = priv->rxring + (priv->rxringcount - 1) *
+ rx_desc_size;
else
tmp -= rx_desc_size;
@@ -1237,7 +1259,6 @@ static void rtl8180_rx(struct net_device *dev)
if (*(priv->rxringtail) & (1<<27)) {
priv->stats.rxdmafail++;
- /* DMESG("EE: RX DMA FAILED at buffer pointed by descriptor %x",(u32)priv->rxringtail); */
goto drop;
}
@@ -1254,10 +1275,9 @@ static void rtl8180_rx(struct net_device *dev)
if (last) {
lastlen = ((*priv->rxringtail) & 0xfff);
- /* if the last descriptor (that should
- * tell us the total packet len) tell
- * us something less than the descriptors
- * len we had until now, then there is some
+ /* if the last descriptor (that should tell us the total
+ * packet len) tell us something less than the
+ * descriptors len we had until now, then there is some
* problem..
* workaround to prevent kernel panic
*/
@@ -1293,31 +1313,36 @@ static void rtl8180_rx(struct net_device *dev)
priv->rx_prevlen += len;
if (priv->rx_prevlen > MAX_FRAG_THRESHOLD + 100) {
- /* HW is probably passing several buggy frames
- * without FD or LD flag set.
- * Throw this garbage away to prevent skb
- * memory exhausting
- */
+ /* HW is probably passing several buggy frames without
+ * FD or LD flag set.
+ * Throw this garbage away to prevent skb memory
+ * exhausting
+ */
if (!priv->rx_skb_complete)
dev_kfree_skb_any(priv->rx_skb);
priv->rx_skb_complete = 1;
}
- signal = (unsigned char)(((*(priv->rxringtail+3)) & (0x00ff0000))>>16);
+ signal = (unsigned char)((*(priv->rxringtail + 3) &
+ 0x00ff0000) >> 16);
signal = (signal & 0xfe) >> 1;
quality = (unsigned char)((*(priv->rxringtail+3)) & (0xff));
stats.mac_time[0] = *(priv->rxringtail+1);
stats.mac_time[1] = *(priv->rxringtail+2);
- rxpower = ((char)(((*(priv->rxringtail+4)) & (0x00ff0000))>>16))/2 - 42;
- RSSI = ((u8)(((*(priv->rxringtail+3)) & (0x0000ff00))>>8)) & (0x7f);
+
+ rxpower = ((char)((*(priv->rxringtail + 4) &
+ 0x00ff0000) >> 16)) / 2 - 42;
+
+ RSSI = ((u8)((*(priv->rxringtail + 3) &
+ 0x0000ff00) >> 8)) & 0x7f;
rate = ((*(priv->rxringtail)) &
((1<<23)|(1<<22)|(1<<21)|(1<<20)))>>20;
stats.rate = rtl8180_rate2rate(rate);
- Antenna = (((*(priv->rxringtail+3)) & (0x00008000)) == 0) ? 0 : 1;
+ Antenna = (*(priv->rxringtail + 3) & 0x00008000) == 0 ? 0 : 1;
if (!rtl8180_IsWirelessBMode(stats.rate)) { /* OFDM rate. */
RxAGC_dBm = rxpower+1; /* bias */
} else { /* CCK rate. */
@@ -1326,7 +1351,8 @@ static void rtl8180_rx(struct net_device *dev)
LNA = (u8) (RxAGC_dBm & 0x60) >> 5; /* bit 6~ bit 5 */
BB = (u8) (RxAGC_dBm & 0x1F); /* bit 4 ~ bit 0 */
- RxAGC_dBm = -(LNA_gain[LNA] + (BB*2)); /* Pin_11b=-(LNA_gain+BB_gain) (dBm) */
+ /* Pin_11b=-(LNA_gain+BB_gain) (dBm) */
+ RxAGC_dBm = -(LNA_gain[LNA] + (BB * 2));
RxAGC_dBm += 4; /* bias */
}
@@ -1354,21 +1380,23 @@ static void rtl8180_rx(struct net_device *dev)
priv->RSSI = RSSI;
/* SQ translation formula is provided by SD3 DZ. 2006.06.27 */
if (quality >= 127)
- quality = 1; /*0; */ /* 0 will cause epc to show signal zero , walk around now; */
+ /* 0 causes epc to show signal zero, walk around now */
+ quality = 1;
else if (quality < 27)
quality = 100;
else
quality = 127 - quality;
priv->SignalQuality = quality;
- stats.signal = (u8)quality; /*priv->wstats.qual.level = priv->SignalStrength; */
+ stats.signal = (u8) quality;
+
stats.signalstrength = RXAGC;
if (stats.signalstrength > 100)
stats.signalstrength = 100;
- stats.signalstrength = (stats.signalstrength * 70)/100 + 30;
- /* printk("==========================>rx : RXAGC is %d,signalstrength is %d\n",RXAGC,stats.signalstrength); */
+ stats.signalstrength = (stats.signalstrength * 70) / 100 + 30;
stats.rssi = priv->wstats.qual.qual = priv->SignalQuality;
- stats.noise = priv->wstats.qual.noise = 100 - priv->wstats.qual.qual;
+ stats.noise = priv->wstats.qual.noise =
+ 100 - priv->wstats.qual.qual;
bHwError = (((*(priv->rxringtail)) & (0x00000fff)) == 4080) |
(((*(priv->rxringtail)) & (0x04000000)) != 0) |
(((*(priv->rxringtail)) & (0x08000000)) != 0) |
@@ -1397,27 +1425,40 @@ static void rtl8180_rx(struct net_device *dev)
/* For good-looking singal strength. */
SignalStrengthIndex = NetgearSignalStrengthTranslate(
- priv->LastSignalStrengthInPercent,
- priv->SignalStrength);
+ priv->LastSignalStrengthInPercent,
+ priv->SignalStrength);
priv->LastSignalStrengthInPercent = SignalStrengthIndex;
- priv->Stats_SignalStrength = TranslateToDbm8185((u8)SignalStrengthIndex);
- /*
- * We need more correct power of received packets and the "SignalStrength" of RxStats is beautified,
- * so we record the correct power here.
- */
- priv->Stats_SignalQuality = (long)(priv->Stats_SignalQuality * 5 + (long)priv->SignalQuality + 5) / 6;
- priv->Stats_RecvSignalPower = (long)(priv->Stats_RecvSignalPower * 5 + priv->RecvSignalPower - 1) / 6;
+ priv->Stats_SignalStrength =
+ TranslateToDbm8185((u8)SignalStrengthIndex);
+
+ /*
+ * We need more correct power of received packets and
+ * the "SignalStrength" of RxStats is beautified, so we
+ * record the correct power here.
+ */
- /* Figure out which antenna that received the last packet. */
- priv->LastRxPktAntenna = Antenna ? 1 : 0; /* 0: aux, 1: main. */
+ priv->Stats_SignalQuality = (long)(
+ priv->Stats_SignalQuality * 5 +
+ (long)priv->SignalQuality + 5) / 6;
+
+ priv->Stats_RecvSignalPower = (long)(
+ priv->Stats_RecvSignalPower * 5 +
+ priv->RecvSignalPower - 1) / 6;
+
+ /*
+ * Figure out which antenna received the last packet.
+ * 0: aux, 1: main
+ */
+ priv->LastRxPktAntenna = Antenna ? 1 : 0;
SwAntennaDiversityRxOk8185(dev, priv->SignalStrength);
}
if (first) {
if (!priv->rx_skb_complete) {
/* seems that HW sometimes fails to receive and
- doesn't provide the last descriptor */
+ * doesn't provide the last descriptor.
+ */
dev_kfree_skb_any(priv->rx_skb);
priv->stats.rxnolast++;
}
@@ -1428,15 +1469,16 @@ static void rtl8180_rx(struct net_device *dev)
priv->rx_skb_complete = 0;
priv->rx_skb->dev = dev;
} else {
- /* if we are here we should have already RXed
- * the first frame.
- * If we get here and the skb is not allocated then
- * we have just throw out garbage (skb not allocated)
- * and we are still rxing garbage....
- */
+ /* if we are here we should have already RXed the first
+ * frame.
+ * If we get here and the skb is not allocated then
+ * we have just throw out garbage (skb not allocated)
+ * and we are still rxing garbage....
+ */
if (!priv->rx_skb_complete) {
- tmp_skb = dev_alloc_skb(priv->rx_skb->len+len+2);
+ tmp_skb = dev_alloc_skb(
+ priv->rx_skb->len + len + 2);
if (!tmp_skb)
goto drop;
@@ -1454,13 +1496,8 @@ static void rtl8180_rx(struct net_device *dev)
}
if (!priv->rx_skb_complete) {
- if (padding) {
- memcpy(skb_put(priv->rx_skb, len),
- (((unsigned char *)priv->rxbuffer->buf) + 2), len);
- } else {
- memcpy(skb_put(priv->rx_skb, len),
- priv->rxbuffer->buf, len);
- }
+ memcpy(skb_put(priv->rx_skb, len), ((unsigned char *)
+ priv->rxbuffer->buf) + (padding ? 2 : 0), len);
}
if (last && !priv->rx_skb_complete) {
@@ -1538,8 +1575,8 @@ static void rtl8180_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
int mode;
- struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *) skb->data;
- short morefrag = (h->frame_control) & IEEE80211_FCTL_MOREFRAGS;
+ struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *)skb->data;
+ bool morefrag = le16_to_cpu(h->frame_control) & IEEE80211_FCTL_MOREFRAGS;
unsigned long flags;
int priority;
@@ -1547,11 +1584,10 @@ static void rtl8180_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
rate = ieeerate2rtlrate(rate);
/*
- * This function doesn't require lock because we make
- * sure it's called with the tx_lock already acquired.
- * this come from the kernel's hard_xmit callback (through
- * the ieee stack, or from the try_wake_queue (again through
- * the ieee stack.
+ * This function doesn't require lock because we make sure it's called
+ * with the tx_lock already acquired.
+ * This come from the kernel's hard_xmit callback (through the ieee
+ * stack, or from the try_wake_queue (again through the ieee stack.
*/
priority = AC2Q(skb->priority);
spin_lock_irqsave(&priv->tx_lock, flags);
@@ -1613,55 +1649,6 @@ static int rtl8180_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-/* longpre 144+48 shortpre 72+24 */
-u16 rtl8180_len2duration(u32 len, short rate, short *ext)
-{
- u16 duration;
- u16 drift;
- *ext = 0;
-
- switch (rate) {
- case 0: /* 1mbps */
- *ext = 0;
- duration = ((len+4)<<4) / 0x2;
- drift = ((len+4)<<4) % 0x2;
- if (drift == 0)
- break;
- duration++;
- break;
- case 1: /* 2mbps */
- *ext = 0;
- duration = ((len+4)<<4) / 0x4;
- drift = ((len+4)<<4) % 0x4;
- if (drift == 0)
- break;
- duration++;
- break;
- case 2: /* 5.5mbps */
- *ext = 0;
- duration = ((len+4)<<4) / 0xb;
- drift = ((len+4)<<4) % 0xb;
- if (drift == 0)
- break;
- duration++;
- break;
- default:
- case 3: /* 11mbps */
- *ext = 0;
- duration = ((len+4)<<4) / 0x16;
- drift = ((len+4)<<4) % 0x16;
- if (drift == 0)
- break;
- duration++;
- if (drift > 6)
- break;
- *ext = 1;
- break;
- }
-
- return duration;
-}
-
static void rtl8180_prepare_beacon(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
@@ -1669,7 +1656,10 @@ static void rtl8180_prepare_beacon(struct net_device *dev)
u16 word = read_nic_word(dev, BcnItv);
word &= ~BcnItv_BcnItv; /* clear Bcn_Itv */
- word |= cpu_to_le16(priv->ieee80211->current_network.beacon_interval); /* 0x64; */
+
+ /* word |= 0x64; */
+ word |= cpu_to_le16(priv->ieee80211->current_network.beacon_interval);
+
write_nic_word(dev, BcnItv, word);
skb = ieee80211_get_beacon(priv->ieee80211);
@@ -1681,12 +1671,12 @@ static void rtl8180_prepare_beacon(struct net_device *dev)
}
/*
- * This function do the real dirty work: it enqueues a TX command
- * descriptor in the ring buffer, copyes the frame in a TX buffer
- * and kicks the NIC to ensure it does the DMA transfer.
+ * This function do the real dirty work: it enqueues a TX command descriptor in
+ * the ring buffer, copyes the frame in a TX buffer and kicks the NIC to ensure
+ * it does the DMA transfer.
*/
short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
- short morefrag, short descfrag, int rate)
+ bool morefrag, short descfrag, int rate)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u32 *tail, *temp_tail;
@@ -1697,16 +1687,17 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
int buflen;
int count;
struct buffer *buflist;
- struct ieee80211_hdr_3addr *frag_hdr = (struct ieee80211_hdr_3addr *)txbuf;
+ struct ieee80211_hdr_3addr *frag_hdr =
+ (struct ieee80211_hdr_3addr *)txbuf;
u8 dest[ETH_ALEN];
- u8 bUseShortPreamble = 0;
- u8 bCTSEnable = 0;
- u8 bRTSEnable = 0;
- u16 Duration = 0;
- u16 RtsDur = 0;
- u16 ThisFrameTime = 0;
- u16 TxDescDuration = 0;
- bool ownbit_flag = false;
+ u8 bUseShortPreamble = 0;
+ u8 bCTSEnable = 0;
+ u8 bRTSEnable = 0;
+ u16 Duration = 0;
+ u16 RtsDur = 0;
+ u16 ThisFrameTime = 0;
+ u16 TxDescDuration = 0;
+ bool ownbit_flag = false;
switch (priority) {
case MANAGE_PRIORITY:
@@ -1756,74 +1747,79 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
break;
}
- memcpy(&dest, frag_hdr->addr1, ETH_ALEN);
- if (is_multicast_ether_addr(dest)) {
- Duration = 0;
- RtsDur = 0;
- bRTSEnable = 0;
+ memcpy(&dest, frag_hdr->addr1, ETH_ALEN);
+ if (is_multicast_ether_addr(dest)) {
+ Duration = 0;
+ RtsDur = 0;
+ bRTSEnable = 0;
+ bCTSEnable = 0;
+
+ ThisFrameTime = ComputeTxTime(len + sCrcLng,
+ rtl8180_rate2rate(rate), 0, bUseShortPreamble);
+ TxDescDuration = ThisFrameTime;
+ } else { /* Unicast packet */
+ u16 AckTime;
+
+ /* for Keep alive */
+ priv->NumTxUnicast++;
+
+ /* Figure out ACK rate according to BSS basic rate
+ * and Tx rate.
+ * AckCTSLng = 14 use 1M bps send
+ */
+ AckTime = ComputeTxTime(14, 10, 0, 0);
+
+ if (((len + sCrcLng) > priv->rts) && priv->rts) { /* RTS/CTS. */
+ u16 RtsTime, CtsTime;
+ bRTSEnable = 1;
bCTSEnable = 0;
- ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate),
- 0, bUseShortPreamble);
- TxDescDuration = ThisFrameTime;
- } else { /* Unicast packet */
- u16 AckTime;
-
- /* YJ,add,080828,for Keep alive */
- priv->NumTxUnicast++;
-
- /* Figure out ACK rate according to BSS basic rate
- * and Tx rate. */
- AckTime = ComputeTxTime(14, 10, 0, 0); /* AckCTSLng = 14 use 1M bps send */
-
- if (((len + sCrcLng) > priv->rts) && priv->rts) { /* RTS/CTS. */
- u16 RtsTime, CtsTime;
- /* u16 CtsRate; */
- bRTSEnable = 1;
- bCTSEnable = 0;
-
- /* Rate and time required for RTS. */
- RtsTime = ComputeTxTime(sAckCtsLng/8, priv->ieee80211->basic_rate, 0, 0);
- /* Rate and time required for CTS. */
- CtsTime = ComputeTxTime(14, 10, 0, 0); /* AckCTSLng = 14 use 1M bps send */
-
- /* Figure out time required to transmit this frame. */
- ThisFrameTime = ComputeTxTime(len + sCrcLng,
- rtl8180_rate2rate(rate),
- 0,
- bUseShortPreamble);
-
- /* RTS-CTS-ThisFrame-ACK. */
- RtsDur = CtsTime + ThisFrameTime + AckTime + 3*aSifsTime;
-
- TxDescDuration = RtsTime + RtsDur;
- } else { /* Normal case. */
- bCTSEnable = 0;
- bRTSEnable = 0;
- RtsDur = 0;
-
- ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate),
- 0, bUseShortPreamble);
- TxDescDuration = ThisFrameTime + aSifsTime + AckTime;
- }
+ /* Rate and time required for RTS. */
+ RtsTime = ComputeTxTime(sAckCtsLng / 8,
+ priv->ieee80211->basic_rate, 0, 0);
- if (!(frag_hdr->frame_control & IEEE80211_FCTL_MOREFRAGS)) {
- /* ThisFrame-ACK. */
- Duration = aSifsTime + AckTime;
- } else { /* One or more fragments remained. */
- u16 NextFragTime;
- NextFragTime = ComputeTxTime(len + sCrcLng, /* pretend following packet length equal current packet */
- rtl8180_rate2rate(rate),
- 0,
- bUseShortPreamble);
-
- /* ThisFrag-ACk-NextFrag-ACK. */
- Duration = NextFragTime + 3*aSifsTime + 2*AckTime;
- }
+ /* Rate and time required for CTS.
+ * AckCTSLng = 14 use 1M bps send
+ */
+ CtsTime = ComputeTxTime(14, 10, 0, 0);
+
+ /* Figure out time required to transmit this frame. */
+ ThisFrameTime = ComputeTxTime(len + sCrcLng,
+ rtl8180_rate2rate(rate), 0,
+ bUseShortPreamble);
+
+ /* RTS-CTS-ThisFrame-ACK. */
+ RtsDur = CtsTime + ThisFrameTime +
+ AckTime + 3 * aSifsTime;
+
+ TxDescDuration = RtsTime + RtsDur;
+ } else { /* Normal case. */
+ bCTSEnable = 0;
+ bRTSEnable = 0;
+ RtsDur = 0;
- } /* End of Unicast packet */
+ ThisFrameTime = ComputeTxTime(len + sCrcLng,
+ rtl8180_rate2rate(rate), 0, bUseShortPreamble);
+ TxDescDuration = ThisFrameTime + aSifsTime + AckTime;
+ }
+
+ if (!(le16_to_cpu(frag_hdr->frame_control) & IEEE80211_FCTL_MOREFRAGS)) {
+ /* ThisFrame-ACK. */
+ Duration = aSifsTime + AckTime;
+ } else { /* One or more fragments remained. */
+ u16 NextFragTime;
+
+ /* pretend following packet length = current packet */
+ NextFragTime = ComputeTxTime(len + sCrcLng,
+ rtl8180_rate2rate(rate), 0, bUseShortPreamble);
+
+ /* ThisFrag-ACk-NextFrag-ACK. */
+ Duration = NextFragTime + 3 * aSifsTime + 2 * AckTime;
+ }
+
+ } /* End of Unicast packet */
- frag_hdr->duration_id = Duration;
+ frag_hdr->duration_id = Duration;
buflen = priv->txbuffsize;
remain = len;
@@ -1832,7 +1828,8 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
while (remain != 0) {
mb();
if (!buflist) {
- DMESGE("TX buffer error, cannot TX frames. pri %d.", priority);
+ DMESGE("TX buffer error, cannot TX frames. pri %d.",
+ priority);
return -1;
}
buf = buflist->buf;
@@ -1851,43 +1848,43 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
*(tail+6) = 0;
*(tail+7) = 0;
- /* FIXME: this should be triggered by HW encryption parameters.*/
+ /* FIXME: should be triggered by HW encryption parameters.*/
*tail |= (1<<15); /* no encrypt */
if (remain == len && !descfrag) {
ownbit_flag = false;
- *tail = *tail | (1<<29); /* fist segment of the packet */
+ *tail = *tail | (1 << 29); /* first segment of packet */
*tail = *tail | (len);
} else {
ownbit_flag = true;
}
for (i = 0; i < buflen && remain > 0; i++, remain--) {
- ((u8 *)buf)[i] = txbuf[i]; /* copy data into descriptor pointed DMAble buffer */
+ /* copy data into descriptor pointed DMAble buffer */
+ ((u8 *)buf)[i] = txbuf[i];
+
if (remain == 4 && i+4 >= buflen)
break;
/* ensure the last desc has at least 4 bytes payload */
-
}
txbuf = txbuf + i;
*(tail+3) = *(tail+3) & ~0xfff;
*(tail+3) = *(tail+3) | i; /* buffer length */
- /* Use short preamble or not */
- if (priv->ieee80211->current_network.capability&WLAN_CAPABILITY_SHORT_PREAMBLE)
- if (priv->plcp_preamble_mode == 1 && rate != 0) /* short mode now, not long! */
- ; /* *tail |= (1<<16); */ /* enable short preamble mode. */
if (bCTSEnable)
*tail |= (1<<18);
if (bRTSEnable) { /* rts enable */
- *tail |= ((ieeerate2rtlrate(priv->ieee80211->basic_rate))<<19); /* RTS RATE */
+ /* RTS RATE */
+ *tail |= (ieeerate2rtlrate(
+ priv->ieee80211->basic_rate) << 19);
+
*tail |= (1<<23); /* rts enable */
*(tail+1) |= (RtsDur&0xffff); /* RTS Duration */
}
*(tail+3) |= ((TxDescDuration&0xffff)<<16); /* DURATION */
- /* *(tail+3) |= (0xe6<<16); */
- *(tail+5) |= (11<<8); /* (priv->retry_data<<8); */ /* retry lim; */
+
+ *(tail + 5) |= (11 << 8); /* retry lim; */
*tail = *tail | ((rate&0xf) << 24);
@@ -1901,7 +1898,8 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
wmb();
if (ownbit_flag)
- *tail = *tail | (1<<31); /* descriptor ready to be txed */
+ /* descriptor ready to be txed */
+ *tail |= (1 << 31);
if ((tail - begin)/8 == count-1)
tail = begin;
@@ -1983,7 +1981,8 @@ static void rtl8180_rq_tx_ack(struct net_device *dev)
struct r8180_priv *priv = ieee80211_priv(dev);
- write_nic_byte(dev, CONFIG4, read_nic_byte(dev, CONFIG4) | CONFIG4_PWRMGT);
+ write_nic_byte(dev, CONFIG4,
+ read_nic_byte(dev, CONFIG4) | CONFIG4_PWRMGT);
priv->ack_tx_to_ieee = 1;
}
@@ -2031,7 +2030,8 @@ static void rtl8180_hw_wakeup(struct net_device *dev)
struct r8180_priv *priv = ieee80211_priv(dev);
spin_lock_irqsave(&priv->ps_lock, flags);
- write_nic_byte(dev, CONFIG4, read_nic_byte(dev, CONFIG4) & ~CONFIG4_PWRMGT);
+ write_nic_byte(dev, CONFIG4,
+ read_nic_byte(dev, CONFIG4) & ~CONFIG4_PWRMGT);
if (priv->rf_wakeup)
priv->rf_wakeup(dev);
spin_unlock_irqrestore(&priv->ps_lock, flags);
@@ -2063,13 +2063,13 @@ static void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
tl -= MSECS(4+16+7);
/*
- * If the interval in witch we are requested to sleep is too
+ * If the interval in which we are requested to sleep is too
* short then give up and remain awake
*/
if (((tl >= rb) && (tl-rb) <= MSECS(MIN_SLEEP_TIME))
|| ((rb > tl) && (rb-tl) < MSECS(MIN_SLEEP_TIME))) {
spin_unlock_irqrestore(&priv->ps_lock, flags);
- printk("too short to sleep\n");
+ netdev_warn(dev, "too short to sleep\n");
return;
}
@@ -2078,7 +2078,8 @@ static void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
priv->DozePeriodInPast2Sec += jiffies_to_msecs(tmp);
/* as tl may be less than rb */
- queue_delayed_work(priv->ieee80211->wq, &priv->ieee80211->hw_wakeup_wq, tmp);
+ queue_delayed_work(priv->ieee80211->wq,
+ &priv->ieee80211->hw_wakeup_wq, tmp);
}
/*
* If we suspect the TimerInt is gone beyond tl
@@ -2095,16 +2096,49 @@ static void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
spin_unlock_irqrestore(&priv->ps_lock, flags);
}
+static void rtl8180_wmm_single_param_update(struct net_device *dev,
+ u8 mode, AC_CODING eACI, PAC_PARAM param)
+{
+ u8 u1bAIFS;
+ u32 u4bAcParam;
+
+ /* Retrieve parameters to update. */
+ /* Mode G/A: slotTimeTimer = 9; Mode B: 20 */
+ u1bAIFS = param->f.AciAifsn.f.AIFSN * ((mode & IEEE_G) == IEEE_G ?
+ 9 : 20) + aSifsTime;
+ u4bAcParam = (((u32)param->f.TXOPLimit << AC_PARAM_TXOP_LIMIT_OFFSET) |
+ ((u32)param->f.Ecw.f.ECWmax << AC_PARAM_ECW_MAX_OFFSET) |
+ ((u32)param->f.Ecw.f.ECWmin << AC_PARAM_ECW_MIN_OFFSET) |
+ ((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
+
+ switch (eACI) {
+ case AC1_BK:
+ write_nic_dword(dev, AC_BK_PARAM, u4bAcParam);
+ return;
+ case AC0_BE:
+ write_nic_dword(dev, AC_BE_PARAM, u4bAcParam);
+ return;
+ case AC2_VI:
+ write_nic_dword(dev, AC_VI_PARAM, u4bAcParam);
+ return;
+ case AC3_VO:
+ write_nic_dword(dev, AC_VO_PARAM, u4bAcParam);
+ return;
+ default:
+ pr_warn("SetHwReg8185(): invalid ACI: %d!\n", eACI);
+ return;
+ }
+}
+
static void rtl8180_wmm_param_update(struct work_struct *work)
{
- struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wmm_param_update_wq);
+ struct ieee80211_device *ieee = container_of(work,
+ struct ieee80211_device, wmm_param_update_wq);
struct net_device *dev = ieee->dev;
u8 *ac_param = (u8 *)(ieee->current_network.wmm_param);
u8 mode = ieee->current_network.mode;
- AC_CODING eACI;
- AC_PARAM AcParam;
- PAC_PARAM pAcParam;
- u8 i;
+ AC_CODING eACI;
+ AC_PARAM AcParam;
if (!ieee->current_network.QoS_Enable) {
/* legacy ac_xx_param update */
@@ -2114,83 +2148,26 @@ static void rtl8180_wmm_param_update(struct work_struct *work)
AcParam.f.Ecw.f.ECWmin = 3; /* Follow 802.11 CWmin. */
AcParam.f.Ecw.f.ECWmax = 7; /* Follow 802.11 CWmax. */
AcParam.f.TXOPLimit = 0;
+
for (eACI = 0; eACI < AC_MAX; eACI++) {
AcParam.f.AciAifsn.f.ACI = (u8)eACI;
- {
- u8 u1bAIFS;
- u32 u4bAcParam;
- pAcParam = (PAC_PARAM)(&AcParam);
- /* Retrieve parameters to update. */
- u1bAIFS = pAcParam->f.AciAifsn.f.AIFSN * (((mode&IEEE_G) == IEEE_G) ? 9 : 20) + aSifsTime;
- u4bAcParam = ((((u32)(pAcParam->f.TXOPLimit))<<AC_PARAM_TXOP_LIMIT_OFFSET)|
- (((u32)(pAcParam->f.Ecw.f.ECWmax))<<AC_PARAM_ECW_MAX_OFFSET)|
- (((u32)(pAcParam->f.Ecw.f.ECWmin))<<AC_PARAM_ECW_MIN_OFFSET)|
- (((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET));
- switch (eACI) {
- case AC1_BK:
- write_nic_dword(dev, AC_BK_PARAM, u4bAcParam);
- break;
- case AC0_BE:
- write_nic_dword(dev, AC_BE_PARAM, u4bAcParam);
- break;
- case AC2_VI:
- write_nic_dword(dev, AC_VI_PARAM, u4bAcParam);
- break;
- case AC3_VO:
- write_nic_dword(dev, AC_VO_PARAM, u4bAcParam);
- break;
- default:
- pr_warn("SetHwReg8185():invalid ACI: %d!\n",
- eACI);
- break;
- }
- }
+
+ rtl8180_wmm_single_param_update(dev, mode, eACI,
+ (PAC_PARAM)&AcParam);
}
return;
}
- for (i = 0; i < AC_MAX; i++) {
- /* AcParam.longData = 0; */
- pAcParam = (AC_PARAM *)ac_param;
- {
- AC_CODING eACI;
- u8 u1bAIFS;
- u32 u4bAcParam;
-
- /* Retrieve parameters to update. */
- eACI = pAcParam->f.AciAifsn.f.ACI;
- /* Mode G/A: slotTimeTimer = 9; Mode B: 20 */
- u1bAIFS = pAcParam->f.AciAifsn.f.AIFSN * (((mode&IEEE_G) == IEEE_G) ? 9 : 20) + aSifsTime;
- u4bAcParam = ((((u32)(pAcParam->f.TXOPLimit)) << AC_PARAM_TXOP_LIMIT_OFFSET) |
- (((u32)(pAcParam->f.Ecw.f.ECWmax)) << AC_PARAM_ECW_MAX_OFFSET) |
- (((u32)(pAcParam->f.Ecw.f.ECWmin)) << AC_PARAM_ECW_MIN_OFFSET) |
- (((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET));
-
- switch (eACI) {
- case AC1_BK:
- write_nic_dword(dev, AC_BK_PARAM, u4bAcParam);
- break;
- case AC0_BE:
- write_nic_dword(dev, AC_BE_PARAM, u4bAcParam);
- break;
- case AC2_VI:
- write_nic_dword(dev, AC_VI_PARAM, u4bAcParam);
- break;
- case AC3_VO:
- write_nic_dword(dev, AC_VO_PARAM, u4bAcParam);
- break;
- default:
- pr_warn("SetHwReg8185(): invalid ACI: %d !\n",
- eACI);
- break;
- }
- }
- ac_param += (sizeof(AC_PARAM));
+ for (eACI = 0; eACI < AC_MAX; eACI++) {
+ rtl8180_wmm_single_param_update(dev, mode,
+ ((PAC_PARAM)ac_param)->f.AciAifsn.f.ACI,
+ (PAC_PARAM)ac_param);
+
+ ac_param += sizeof(AC_PARAM);
}
}
void rtl8180_restart_wq(struct work_struct *work);
-/* void rtl8180_rq_tx_ack(struct work_struct *work); */
void rtl8180_watch_dog_wq(struct work_struct *work);
void rtl8180_hw_wakeup_wq(struct work_struct *work);
void rtl8180_hw_sleep_wq(struct work_struct *work);
@@ -2208,7 +2185,8 @@ static void watch_dog_adaptive(unsigned long data)
/* Tx High Power Mechanism. */
if (CheckHighPower((struct net_device *)data))
- queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->tx_pw_wq);
+ queue_work(priv->ieee80211->wq,
+ (void *)&priv->ieee80211->tx_pw_wq);
/* Tx Power Tracking on 87SE. */
if (CheckTxPwrTracking((struct net_device *)data))
@@ -2216,27 +2194,59 @@ static void watch_dog_adaptive(unsigned long data)
/* Perform DIG immediately. */
if (CheckDig((struct net_device *)data))
- queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->hw_dig_wq);
+ queue_work(priv->ieee80211->wq,
+ (void *)&priv->ieee80211->hw_dig_wq);
+
rtl8180_watch_dog((struct net_device *)data);
- queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->GPIOChangeRFWorkItem);
+ queue_work(priv->ieee80211->wq,
+ (void *)&priv->ieee80211->GPIOChangeRFWorkItem);
+
+ priv->watch_dog_timer.expires = jiffies +
+ MSECS(IEEE80211_WATCH_DOG_TIME);
- priv->watch_dog_timer.expires = jiffies + MSECS(IEEE80211_WATCH_DOG_TIME);
add_timer(&priv->watch_dog_timer);
}
-static CHANNEL_LIST ChannelPlan[] = {
- {{1,2,3,4,5,6,7,8,9,10,11,36,40,44,48,52,56,60,64},19}, /* FCC */
- {{1,2,3,4,5,6,7,8,9,10,11},11}, /* IC */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, /* ETSI */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, /* Spain. Change to ETSI. */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, /* France. Change to ETSI. */
- {{14,36,40,44,48,52,56,60,64},9}, /* MKK */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,14, 36,40,44,48,52,56,60,64},22},/* MKK1 */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, /* Israel. */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,34,38,42,46},17}, /* For 11a , TELEC */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14}, /* For Global Domain. 1-11:active scan, 12-14 passive scan. //+YJ, 080626 */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13},13} /* world wide 13: ch1~ch11 active scan, ch12~13 passive //lzm add 080826 */
+static struct rtl8187se_channel_list channel_plan_list[] = {
+ /* FCC */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40,
+ 44, 48, 52, 56, 60, 64}, 19},
+
+ /* IC */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11},
+
+ /* ETSI */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40,
+ 44, 48, 52, 56, 60, 64}, 21},
+
+ /* Spain. Change to ETSI. */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40,
+ 44, 48, 52, 56, 60, 64}, 21},
+
+ /* France. Change to ETSI. */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40,
+ 44, 48, 52, 56, 60, 64}, 21},
+
+ /* MKK */
+ {{14, 36, 40, 44, 48, 52, 56, 60, 64}, 9},
+
+ /* MKK1 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36,
+ 40, 44, 48, 52, 56, 60, 64}, 22},
+
+ /* Israel. */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40,
+ 44, 48, 52, 56, 60, 64}, 21},
+
+ /* For 11a , TELEC */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 34, 38, 42, 46}, 17},
+
+ /* For Global Domain. 1-11 active, 12-14 passive. */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14},
+
+ /* world wide 13: ch1~ch11 active, ch12~13 passive */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}
};
static void rtl8180_set_channel_map(u8 channel_plan,
@@ -2244,7 +2254,6 @@ static void rtl8180_set_channel_map(u8 channel_plan,
{
int i;
- /* lzm add 080826 */
ieee->MinPassiveChnlNum = MAX_CHANNEL_NUMBER+1;
ieee->IbssStartChnl = 0;
@@ -2261,13 +2270,13 @@ static void rtl8180_set_channel_map(u8 channel_plan,
{
Dot11d_Init(ieee);
ieee->bGlobalDomain = false;
- if (ChannelPlan[channel_plan].Len != 0) {
+ if (channel_plan_list[channel_plan].len != 0) {
/* Clear old channel map */
memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
/* Set new channel map */
- for (i = 0; i < ChannelPlan[channel_plan].Len; i++) {
- if (ChannelPlan[channel_plan].Channel[i] <= 14)
- GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan[channel_plan].Channel[i]] = 1;
+ for (i = 0; i < channel_plan_list[channel_plan].len; i++) {
+ if (channel_plan_list[channel_plan].channel[i] <= 14)
+ GET_DOT11D_INFO(ieee)->channel_map[channel_plan_list[channel_plan].channel[i]] = 1;
}
}
break;
@@ -2279,7 +2288,7 @@ static void rtl8180_set_channel_map(u8 channel_plan,
ieee->bGlobalDomain = true;
break;
}
- case COUNTRY_CODE_WORLD_WIDE_13_INDEX:/* lzm add 080826 */
+ case COUNTRY_CODE_WORLD_WIDE_13_INDEX:
{
ieee->MinPassiveChnlNum = 12;
ieee->IbssStartChnl = 10;
@@ -2299,19 +2308,17 @@ static void rtl8180_set_channel_map(u8 channel_plan,
void GPIOChangeRFWorkItemCallBack(struct work_struct *work);
-/* YJ,add,080828 */
-static void rtl8180_statistics_init(struct Stats *pstats)
+static void rtl8180_statistics_init(struct stats *pstats)
{
- memset(pstats, 0, sizeof(struct Stats));
+ memset(pstats, 0, sizeof(struct stats));
}
-static void rtl8180_link_detect_init(plink_detect_t plink_detect)
+static void rtl8180_link_detect_init(struct link_detect_t *plink_detect)
{
- memset(plink_detect, 0, sizeof(link_detect_t));
- plink_detect->SlotNum = DEFAULT_SLOT_NUM;
+ memset(plink_detect, 0, sizeof(struct link_detect_t));
+ plink_detect->slot_num = DEFAULT_SLOT_NUM;
}
-/* YJ,add,080828,end */
static void rtl8187se_eeprom_register_read(struct eeprom_93cx6 *eeprom)
{
struct net_device *dev = eeprom->data;
@@ -2360,7 +2367,7 @@ static short rtl8180_init(struct net_device *dev)
eeprom_93cx6_read(&eeprom, EEPROM_COUNTRY_CODE>>1, &eeprom_val);
priv->channel_plan = eeprom_val & 0xFF;
if (priv->channel_plan > COUNTRY_CODE_GLOBAL_DOMAIN) {
- printk("rtl8180_init:Error channel plan! Set to default.\n");
+ netdev_err(dev, "rtl8180_init: Invalid channel plan! Set to default.\n");
priv->channel_plan = 0;
}
@@ -2385,7 +2392,8 @@ static short rtl8180_init(struct net_device *dev)
rtl8180_link_detect_init(&priv->link_detect);
priv->ack_tx_to_ieee = 0;
- priv->ieee80211->current_network.beacon_interval = DEFAULT_BEACONINTERVAL;
+ priv->ieee80211->current_network.beacon_interval =
+ DEFAULT_BEACONINTERVAL;
priv->ieee80211->iw_mode = IW_MODE_INFRA;
priv->ieee80211->softmac_features = IEEE_SOFTMAC_SCAN |
IEEE_SOFTMAC_ASSOCIATE | IEEE_SOFTMAC_PROBERQ |
@@ -2410,12 +2418,12 @@ static short rtl8180_init(struct net_device *dev)
priv->bInactivePs = true; /* false; */
priv->ieee80211->bInactivePs = priv->bInactivePs;
priv->bSwRfProcessing = false;
- priv->eRFPowerState = eRfOff;
+ priv->eRFPowerState = RF_OFF;
priv->RfOffReason = 0;
- priv->LedStrategy = SW_LED_MODE0;
- priv->TxPollingTimes = 0; /* lzm add 080826 */
+ priv->led_strategy = SW_LED_MODE0;
+ priv->TxPollingTimes = 0;
priv->bLeisurePs = true;
- priv->dot11PowerSaveMode = eActive;
+ priv->dot11PowerSaveMode = ACTIVE;
priv->AdMinCheckPeriod = 5;
priv->AdMaxCheckPeriod = 10;
priv->AdMaxRxSsThreshold = 30; /* 60->30 */
@@ -2431,7 +2439,8 @@ static short rtl8180_init(struct net_device *dev)
priv->AdRxSsBeforeSwitched = 0;
init_timer(&priv->SwAntennaDiversityTimer);
priv->SwAntennaDiversityTimer.data = (unsigned long)dev;
- priv->SwAntennaDiversityTimer.function = (void *)SwAntennaDiversityTimerCallback;
+ priv->SwAntennaDiversityTimer.function =
+ (void *)SwAntennaDiversityTimerCallback;
priv->bDigMechanism = true;
priv->InitialGain = 6;
priv->bXtalCalibration = false;
@@ -2440,7 +2449,8 @@ static short rtl8180_init(struct net_device *dev)
priv->bTxPowerTrack = false;
priv->ThermalMeter = 0;
priv->FalseAlarmRegValue = 0;
- priv->RegDigOfdmFaUpTh = 0xc; /* Upper threshold of OFDM false alarm, which is used in DIG. */
+ priv->RegDigOfdmFaUpTh = 0xc; /* Upper threshold of OFDM false alarm,
+ which is used in DIG. */
priv->DIG_NumberFallbackVote = 0;
priv->DIG_NumberUpgradeVote = 0;
priv->LastSignalStrengthInPercent = 0;
@@ -2585,7 +2595,8 @@ static short rtl8180_init(struct net_device *dev)
priv->bSwAntennaDiverity = priv->EEPROMSwAntennaDiversity;
else
/* 1:disable antenna diversity, 2: enable antenna diversity. */
- priv->bSwAntennaDiverity = priv->RegSwAntennaDiversityMechanism == 2;
+ priv->bSwAntennaDiverity =
+ priv->RegSwAntennaDiversityMechanism == 2;
if (priv->RegDefaultAntenna == 0)
/* 0: default from EEPROM. */
@@ -2669,7 +2680,8 @@ static short rtl8180_init(struct net_device *dev)
TX_BEACON_RING_ADDR))
return -ENOMEM;
- if (request_irq(dev->irq, rtl8180_interrupt, IRQF_SHARED, dev->name, dev)) {
+ if (request_irq(dev->irq, rtl8180_interrupt,
+ IRQF_SHARED, dev->name, dev)) {
DMESGE("Error allocating IRQ %d", dev->irq);
return -1;
} else {
@@ -2718,8 +2730,6 @@ void rtl8180_set_hw_wep(struct net_device *dev)
void rtl8185_rf_pins_enable(struct net_device *dev)
{
- /* u16 tmp; */
- /* tmp = read_nic_word(dev, RFPinsEnable); */
write_nic_word(dev, RFPinsEnable, 0x1fff); /* | tmp); */
}
@@ -2768,16 +2778,11 @@ static void rtl8185_write_phy(struct net_device *dev, u8 adr, u32 data)
phyw = ((data<<8) | adr);
- /* Note that, we must write 0xff7c after 0x7d-0x7f to write BB register. */
+ /* Note: we must write 0xff7c after 0x7d-0x7f to write BB register. */
write_nic_byte(dev, 0x7f, ((phyw & 0xff000000) >> 24));
write_nic_byte(dev, 0x7e, ((phyw & 0x00ff0000) >> 16));
write_nic_byte(dev, 0x7d, ((phyw & 0x0000ff00) >> 8));
write_nic_byte(dev, 0x7c, ((phyw & 0x000000ff)));
-
- /* this is ok to fail when we write AGC table. check for AGC table might be
- * done by masking with 0x7f instead of 0xff
- */
- /* if (phyr != (data&0xff)) DMESGW("Phy write timeout %x %x %x", phyr, data, adr); */
}
inline void write_phy_ofdm(struct net_device *dev, u8 adr, u32 data)
@@ -2812,9 +2817,9 @@ void rtl8180_start_tx_beacon(struct net_device *dev)
word = read_nic_word(dev, BintrItv);
word &= ~BintrItv_BintrItv;
word |= 1000; /* priv->ieee80211->current_network.beacon_interval *
- ((priv->txbeaconcount > 1)?(priv->txbeaconcount-1):1);
- // FIXME: check if correct ^^ worked with 0x3e8;
- */
+ * ((priv->txbeaconcount > 1)?(priv->txbeaconcount-1):1);
+ * FIXME: check if correct ^^ worked with 0x3e8;
+ */
write_nic_word(dev, BintrItv, word);
rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
@@ -2833,7 +2838,7 @@ static struct net_device_stats *rtl8180_stats(struct net_device *dev)
* Change current and default preamble mode.
*/
static bool MgntActSet_802_11_PowerSaveMode(struct r8180_priv *priv,
- RT_PS_MODE rtPsMode)
+ enum rt_ps_mode rtPsMode)
{
/* Currently, we do not change power save mode on IBSS mode. */
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
@@ -2846,25 +2851,26 @@ static bool MgntActSet_802_11_PowerSaveMode(struct r8180_priv *priv,
static void LeisurePSEnter(struct r8180_priv *priv)
{
- if (priv->bLeisurePs) {
+ if (priv->bLeisurePs)
if (priv->ieee80211->ps == IEEE80211_PS_DISABLED)
/* IEEE80211_PS_ENABLE */
- MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST);
- }
+ MgntActSet_802_11_PowerSaveMode(priv,
+ IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST);
}
static void LeisurePSLeave(struct r8180_priv *priv)
{
- if (priv->bLeisurePs) {
+ if (priv->bLeisurePs)
if (priv->ieee80211->ps != IEEE80211_PS_DISABLED)
- MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_DISABLED);
- }
+ MgntActSet_802_11_PowerSaveMode(
+ priv, IEEE80211_PS_DISABLED);
}
void rtl8180_hw_wakeup_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, hw_wakeup_wq);
+ struct ieee80211_device *ieee = container_of(
+ dwork, struct ieee80211_device, hw_wakeup_wq);
struct net_device *dev = ieee->dev;
rtl8180_hw_wakeup(dev);
@@ -2873,7 +2879,8 @@ void rtl8180_hw_wakeup_wq(struct work_struct *work)
void rtl8180_hw_sleep_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, hw_sleep_wq);
+ struct ieee80211_device *ieee = container_of(
+ dwork, struct ieee80211_device, hw_sleep_wq);
struct net_device *dev = ieee->dev;
rtl8180_hw_sleep_down(dev);
@@ -2890,23 +2897,30 @@ static void MgntLinkKeepAlive(struct r8180_priv *priv)
*/
if ((priv->keepAliveLevel == 2) ||
- (priv->link_detect.LastNumTxUnicast == priv->NumTxUnicast &&
- priv->link_detect.LastNumRxUnicast == priv->ieee80211->NumRxUnicast)
+ (priv->link_detect.last_num_tx_unicast ==
+ priv->NumTxUnicast &&
+ priv->link_detect.last_num_rx_unicast ==
+ priv->ieee80211->NumRxUnicast)
) {
- priv->link_detect.IdleCount++;
+ priv->link_detect.idle_count++;
/*
- * Send a Keep-Alive packet packet to AP if we had been idle for a while.
+ * Send a Keep-Alive packet packet to AP if we had
+ * been idle for a while.
*/
- if (priv->link_detect.IdleCount >= ((KEEP_ALIVE_INTERVAL / CHECK_FOR_HANG_PERIOD)-1)) {
- priv->link_detect.IdleCount = 0;
- ieee80211_sta_ps_send_null_frame(priv->ieee80211, false);
+ if (priv->link_detect.idle_count >=
+ KEEP_ALIVE_INTERVAL /
+ CHECK_FOR_HANG_PERIOD - 1) {
+ priv->link_detect.idle_count = 0;
+ ieee80211_sta_ps_send_null_frame(
+ priv->ieee80211, false);
}
} else {
- priv->link_detect.IdleCount = 0;
+ priv->link_detect.idle_count = 0;
}
- priv->link_detect.LastNumTxUnicast = priv->NumTxUnicast;
- priv->link_detect.LastNumRxUnicast = priv->ieee80211->NumRxUnicast;
+ priv->link_detect.last_num_tx_unicast = priv->NumTxUnicast;
+ priv->link_detect.last_num_rx_unicast =
+ priv->ieee80211->NumRxUnicast;
}
}
@@ -2922,36 +2936,42 @@ void rtl8180_watch_dog(struct net_device *dev)
if ((priv->ieee80211->iw_mode != IW_MODE_ADHOC) &&
(priv->ieee80211->state == IEEE80211_NOLINK) &&
(priv->ieee80211->beinretry == false) &&
- (priv->eRFPowerState == eRfOn))
+ (priv->eRFPowerState == RF_ON))
IPSEnter(dev);
}
- /* YJ,add,080828,for link state check */
- if ((priv->ieee80211->state == IEEE80211_LINKED) && (priv->ieee80211->iw_mode == IW_MODE_INFRA)) {
- SlotIndex = (priv->link_detect.SlotIndex++) % priv->link_detect.SlotNum;
- priv->link_detect.RxFrameNum[SlotIndex] = priv->ieee80211->NumRxDataInPeriod + priv->ieee80211->NumRxBcnInPeriod;
- for (i = 0; i < priv->link_detect.SlotNum; i++)
- TotalRxNum += priv->link_detect.RxFrameNum[i];
+ if ((priv->ieee80211->state == IEEE80211_LINKED) &&
+ (priv->ieee80211->iw_mode == IW_MODE_INFRA)) {
+ SlotIndex = (priv->link_detect.slot_index++) %
+ priv->link_detect.slot_num;
+
+ priv->link_detect.rx_frame_num[SlotIndex] =
+ priv->ieee80211->NumRxDataInPeriod +
+ priv->ieee80211->NumRxBcnInPeriod;
+
+ for (i = 0; i < priv->link_detect.slot_num; i++)
+ TotalRxNum += priv->link_detect.rx_frame_num[i];
if (TotalRxNum == 0) {
priv->ieee80211->state = IEEE80211_ASSOCIATING;
- queue_work(priv->ieee80211->wq, &priv->ieee80211->associate_procedure_wq);
+ queue_work(priv->ieee80211->wq,
+ &priv->ieee80211->associate_procedure_wq);
}
}
- /* YJ,add,080828,for KeepAlive */
MgntLinkKeepAlive(priv);
- /* YJ,add,080828,for LPS */
LeisurePSLeave(priv);
if (priv->ieee80211->state == IEEE80211_LINKED) {
- priv->link_detect.NumRxOkInPeriod = priv->ieee80211->NumRxDataInPeriod;
- if (priv->link_detect.NumRxOkInPeriod > 666 ||
- priv->link_detect.NumTxOkInPeriod > 666) {
+ priv->link_detect.num_rx_ok_in_period =
+ priv->ieee80211->NumRxDataInPeriod;
+ if (priv->link_detect.num_rx_ok_in_period > 666 ||
+ priv->link_detect.num_tx_ok_in_period > 666) {
bBusyTraffic = true;
}
- if (((priv->link_detect.NumRxOkInPeriod + priv->link_detect.NumTxOkInPeriod) > 8)
- || (priv->link_detect.NumRxOkInPeriod > 2)) {
+ if ((priv->link_detect.num_rx_ok_in_period +
+ priv->link_detect.num_tx_ok_in_period > 8)
+ || (priv->link_detect.num_rx_ok_in_period > 2)) {
bEnterPS = false;
} else
bEnterPS = true;
@@ -2962,9 +2982,9 @@ void rtl8180_watch_dog(struct net_device *dev)
LeisurePSLeave(priv);
} else
LeisurePSLeave(priv);
- priv->link_detect.bBusyTraffic = bBusyTraffic;
- priv->link_detect.NumRxOkInPeriod = 0;
- priv->link_detect.NumTxOkInPeriod = 0;
+ priv->link_detect.b_busy_traffic = bBusyTraffic;
+ priv->link_detect.num_rx_ok_in_period = 0;
+ priv->link_detect.num_tx_ok_in_period = 0;
priv->ieee80211->NumRxDataInPeriod = 0;
priv->ieee80211->NumRxBcnInPeriod = 0;
}
@@ -3047,15 +3067,17 @@ int rtl8180_down(struct net_device *dev)
cancel_delayed_work(&priv->ieee80211->hw_dig_wq);
cancel_delayed_work(&priv->ieee80211->tx_pw_wq);
del_timer_sync(&priv->SwAntennaDiversityTimer);
- SetZebraRFPowerState8185(dev, eRfOff);
- memset(&(priv->ieee80211->current_network), 0, sizeof(struct ieee80211_network));
+ SetZebraRFPowerState8185(dev, RF_OFF);
+ memset(&priv->ieee80211->current_network,
+ 0, sizeof(struct ieee80211_network));
priv->ieee80211->state = IEEE80211_NOLINK;
return 0;
}
void rtl8180_restart_wq(struct work_struct *work)
{
- struct r8180_priv *priv = container_of(work, struct r8180_priv, reset_wq);
+ struct r8180_priv *priv = container_of(
+ work, struct r8180_priv, reset_wq);
struct net_device *dev = priv->dev;
down(&priv->wx_sem);
@@ -3116,7 +3138,8 @@ static int r8180_set_mac_adr(struct net_device *dev, void *mac)
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
if (priv->ieee80211->iw_mode == IW_MODE_MASTER)
- memcpy(priv->ieee80211->current_network.bssid, dev->dev_addr, ETH_ALEN);
+ memcpy(priv->ieee80211->current_network.bssid,
+ dev->dev_addr, ETH_ALEN);
if (priv->up) {
rtl8180_down(dev);
@@ -3137,7 +3160,8 @@ static int rtl8180_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch (cmd) {
case RTL_IOCTL_WPA_SUPPLICANT:
- ret = ieee80211_wpa_supplicant_ioctl(priv->ieee80211, &wrq->u.data);
+ ret = ieee80211_wpa_supplicant_ioctl(
+ priv->ieee80211, &wrq->u.data);
return ret;
default:
return -EOPNOTSUPP;
@@ -3387,7 +3411,7 @@ static void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
int j, i;
int hd;
if (error)
- priv->stats.txretry++; /* tony 20060601 */
+ priv->stats.txretry++;
spin_lock_irqsave(&priv->tx_lock, flag);
switch (pri) {
case MANAGE_PRIORITY:
@@ -3540,7 +3564,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *netdev)
spin_lock_irqsave(&priv->irq_th_lock, flags);
/* ISR: 4bytes */
- inta = read_nic_dword(dev, ISR); /* & priv->IntrMask; */
+ inta = read_nic_dword(dev, ISR);
write_nic_dword(dev, ISR, inta); /* reset int situation */
priv->stats.shints++;
@@ -3586,7 +3610,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *netdev)
}
if (inta & ISR_THPDOK) { /* High priority tx ok */
- priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
+ priv->link_detect.num_tx_ok_in_period++;
priv->stats.txhpokint++;
rtl8180_tx_isr(dev, HI_PRIORITY, 0);
}
@@ -3649,14 +3673,14 @@ static irqreturn_t rtl8180_interrupt(int irq, void *netdev)
priv->stats.txoverflow++;
if (inta & ISR_TNPDOK) { /* Normal priority tx ok */
- priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
+ priv->link_detect.num_tx_ok_in_period++;
priv->stats.txnpokint++;
rtl8180_tx_isr(dev, NORM_PRIORITY, 0);
rtl8180_try_wake_queue(dev, NORM_PRIORITY);
}
if (inta & ISR_TLPDOK) { /* Low priority tx ok */
- priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
+ priv->link_detect.num_tx_ok_in_period++;
priv->stats.txlpokint++;
rtl8180_tx_isr(dev, LOW_PRIORITY, 0);
rtl8180_try_wake_queue(dev, LOW_PRIORITY);
@@ -3664,14 +3688,14 @@ static irqreturn_t rtl8180_interrupt(int irq, void *netdev)
if (inta & ISR_TBKDOK) { /* corresponding to BK_PRIORITY */
priv->stats.txbkpokint++;
- priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
+ priv->link_detect.num_tx_ok_in_period++;
rtl8180_tx_isr(dev, BK_PRIORITY, 0);
rtl8180_try_wake_queue(dev, BE_PRIORITY);
}
if (inta & ISR_TBEDOK) { /* corresponding to BE_PRIORITY */
priv->stats.txbeperr++;
- priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
+ priv->link_detect.num_tx_ok_in_period++;
rtl8180_tx_isr(dev, BE_PRIORITY, 0);
rtl8180_try_wake_queue(dev, BE_PRIORITY);
}
@@ -3688,17 +3712,19 @@ void rtl8180_irq_rx_tasklet(struct r8180_priv *priv)
void GPIOChangeRFWorkItemCallBack(struct work_struct *work)
{
- struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, GPIOChangeRFWorkItem.work);
+ struct ieee80211_device *ieee = container_of(
+ work, struct ieee80211_device, GPIOChangeRFWorkItem.work);
struct net_device *dev = ieee->dev;
struct r8180_priv *priv = ieee80211_priv(dev);
u8 btPSR;
u8 btConfig0;
- RT_RF_POWER_STATE eRfPowerStateToSet;
+ enum rt_rf_power_state eRfPowerStateToSet;
bool bActuallySet = false;
char *argv[3];
static char *RadioPowerPath = "/etc/acpi/events/RadioPower.sh";
- static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", NULL};
+ static char *envp[] = {"HOME=/", "TERM=linux",
+ "PATH=/usr/bin:/bin", NULL};
static int readf_count;
readf_count = (readf_count+1)%0xffff;
@@ -3708,24 +3734,24 @@ void GPIOChangeRFWorkItemCallBack(struct work_struct *work)
btPSR = read_nic_byte(dev, PSR);
write_nic_byte(dev, PSR, (btPSR & ~BIT3));
- /* It need to delay 4us suggested by Jong, 2008-01-16 */
+ /* It need to delay 4us suggested */
udelay(4);
/* HW radio On/Off according to the value of FF51[4](config0) */
btConfig0 = btPSR = read_nic_byte(dev, CONFIG0);
- eRfPowerStateToSet = (btConfig0 & BIT4) ? eRfOn : eRfOff;
+ eRfPowerStateToSet = (btConfig0 & BIT4) ? RF_ON : RF_OFF;
/* Turn LED back on when radio enabled */
- if (eRfPowerStateToSet == eRfOn)
+ if (eRfPowerStateToSet == RF_ON)
write_nic_byte(dev, PSR, btPSR | BIT3);
if ((priv->ieee80211->bHwRadioOff == true) &&
- (eRfPowerStateToSet == eRfOn)) {
+ (eRfPowerStateToSet == RF_ON)) {
priv->ieee80211->bHwRadioOff = false;
bActuallySet = true;
} else if ((priv->ieee80211->bHwRadioOff == false) &&
- (eRfPowerStateToSet == eRfOff)) {
+ (eRfPowerStateToSet == RF_OFF)) {
priv->ieee80211->bHwRadioOff = true;
bActuallySet = true;
}
diff --git a/drivers/staging/rtl8187se/r8180_dm.c b/drivers/staging/rtl8187se/r8180_dm.c
index 2ccd2cb70fac..8c020e064869 100644
--- a/drivers/staging/rtl8187se/r8180_dm.c
+++ b/drivers/staging/rtl8187se/r8180_dm.c
@@ -1116,14 +1116,14 @@ bool CheckTxPwrTracking(struct net_device *dev)
void SwAntennaDiversityTimerCallback(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- RT_RF_POWER_STATE rtState;
+ enum rt_rf_power_state rtState;
/* We do NOT need to switch antenna while RF is off. */
rtState = priv->eRFPowerState;
do {
- if (rtState == eRfOff) {
+ if (rtState == RF_OFF) {
break;
- } else if (rtState == eRfSleep) {
+ } else if (rtState == RF_SLEEP) {
/* Don't access BB/RF under Disable PLL situation. */
break;
}
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225.h b/drivers/staging/rtl8187se/r8180_rtl8225.h
index de084f07a071..7df73927b3cc 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225.h
+++ b/drivers/staging/rtl8187se/r8180_rtl8225.h
@@ -1,14 +1,13 @@
/*
- This is part of the rtl8180-sa2400 driver
- released under the GPL (See file COPYING for details).
- Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
-
- This files contains programming code for the rtl8225
- radio frontend.
-
- *Many* thanks to Realtek Corp. for their great support!
-
-*/
+ * This is part of the rtl8180-sa2400 driver released under the GPL (See file
+ * COPYING for details).
+ *
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * This files contains programming code for the rtl8225 radio frontend.
+ *
+ * *Many* thanks to Realtek Corp. for their great support!
+ */
#include "r8180.h"
@@ -29,7 +28,7 @@ u16 RF_ReadReg(struct net_device *dev, u8 offset);
void rtl8180_set_mode(struct net_device *dev, int mode);
void rtl8180_set_mode(struct net_device *dev, int mode);
bool SetZebraRFPowerState8185(struct net_device *dev,
- RT_RF_POWER_STATE eRFPowerState);
+ enum rt_rf_power_state eRFPowerState);
void rtl8225z4_rf_sleep(struct net_device *dev);
void rtl8225z4_rf_wakeup(struct net_device *dev);
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225z2.c b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
index 7c9a8bfe6d88..47104fa05c55 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225z2.c
+++ b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
@@ -279,8 +279,8 @@ void rtl8225z2_rf_close(struct net_device *dev)
* Map dBm into Tx power index according to current HW model, for example,
* RF and PA, and current wireless mode.
*/
-static s8 DbmToTxPwrIdx(struct r8180_priv *priv, WIRELESS_MODE WirelessMode,
- s32 PowerInDbm)
+static s8 DbmToTxPwrIdx(struct r8180_priv *priv,
+ enum wireless_mode mode, s32 PowerInDbm)
{
bool bUseDefault = true;
s8 TxPwrIdx = 0;
@@ -291,7 +291,7 @@ static s8 DbmToTxPwrIdx(struct r8180_priv *priv, WIRELESS_MODE WirelessMode,
*/
s32 tmp = 0;
- if (WirelessMode == WIRELESS_MODE_G) {
+ if (mode == WIRELESS_MODE_G) {
bUseDefault = false;
tmp = (2 * PowerInDbm);
@@ -301,7 +301,7 @@ static s8 DbmToTxPwrIdx(struct r8180_priv *priv, WIRELESS_MODE WirelessMode,
TxPwrIdx = 40;
else
TxPwrIdx = (s8)tmp;
- } else if (WirelessMode == WIRELESS_MODE_B) {
+ } else if (mode == WIRELESS_MODE_B) {
bUseDefault = false;
tmp = (4 * PowerInDbm) - 52;
@@ -606,51 +606,12 @@ void rtl8225z2_rf_init(struct net_device *dev)
rtl8225z2_rf_set_chan(dev, priv->chan);
}
-void rtl8225z2_rf_set_mode(struct net_device *dev)
-{
- struct r8180_priv *priv = ieee80211_priv(dev);
-
- if (priv->ieee80211->mode == IEEE_A) {
- write_rtl8225(dev, 0x5, 0x1865);
- write_nic_dword(dev, RF_PARA, 0x10084);
- write_nic_dword(dev, RF_TIMING, 0xa8008);
- write_phy_ofdm(dev, 0x0, 0x0);
- write_phy_ofdm(dev, 0xa, 0x6);
- write_phy_ofdm(dev, 0xb, 0x99);
- write_phy_ofdm(dev, 0xf, 0x20);
- write_phy_ofdm(dev, 0x11, 0x7);
-
- rtl8225z2_set_gain(dev, 4);
-
- write_phy_ofdm(dev, 0x15, 0x40);
- write_phy_ofdm(dev, 0x17, 0x40);
-
- write_nic_dword(dev, 0x94, 0x10000000);
- } else {
- write_rtl8225(dev, 0x5, 0x1864);
- write_nic_dword(dev, RF_PARA, 0x10044);
- write_nic_dword(dev, RF_TIMING, 0xa8008);
- write_phy_ofdm(dev, 0x0, 0x1);
- write_phy_ofdm(dev, 0xa, 0x6);
- write_phy_ofdm(dev, 0xb, 0x99);
- write_phy_ofdm(dev, 0xf, 0x20);
- write_phy_ofdm(dev, 0x11, 0x7);
-
- rtl8225z2_set_gain(dev, 4);
-
- write_phy_ofdm(dev, 0x15, 0x40);
- write_phy_ofdm(dev, 0x17, 0x40);
-
- write_nic_dword(dev, 0x94, 0x04000002);
- }
-}
-
#define MAX_DOZE_WAITING_TIMES_85B 20
#define MAX_POLLING_24F_TIMES_87SE 10
#define LPS_MAX_SLEEP_WAITING_TIMES_87SE 5
bool SetZebraRFPowerState8185(struct net_device *dev,
- RT_RF_POWER_STATE eRFPowerState)
+ enum rt_rf_power_state eRFPowerState)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u8 btCR9346, btConfig3;
@@ -672,7 +633,7 @@ bool SetZebraRFPowerState8185(struct net_device *dev,
write_nic_byte(dev, CONFIG3, (btConfig3 | CONFIG3_PARM_En));
switch (eRFPowerState) {
- case eRfOn:
+ case RF_ON:
write_nic_word(dev, 0x37C, 0x00EC);
/* turn on AFE */
@@ -697,7 +658,7 @@ bool SetZebraRFPowerState8185(struct net_device *dev,
u1bTmp = read_nic_byte(dev, 0x24E);
write_nic_byte(dev, 0x24E, (u1bTmp & (~(BIT5 | BIT6))));
break;
- case eRfSleep:
+ case RF_SLEEP:
for (QueueID = 0, i = 0; QueueID < 6;) {
if (get_curr_tx_free_desc(dev, QueueID) ==
priv->txringcount) {
@@ -764,7 +725,7 @@ bool SetZebraRFPowerState8185(struct net_device *dev,
}
}
break;
- case eRfOff:
+ case RF_OFF:
for (QueueID = 0, i = 0; QueueID < 6;) {
if (get_curr_tx_free_desc(dev, QueueID) ==
priv->txringcount) {
@@ -841,10 +802,10 @@ bool SetZebraRFPowerState8185(struct net_device *dev,
void rtl8225z4_rf_sleep(struct net_device *dev)
{
- MgntActSet_RF_State(dev, eRfSleep, RF_CHANGE_BY_PS);
+ MgntActSet_RF_State(dev, RF_SLEEP, RF_CHANGE_BY_PS);
}
void rtl8225z4_rf_wakeup(struct net_device *dev)
{
- MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_PS);
+ MgntActSet_RF_State(dev, RF_ON, RF_CHANGE_BY_PS);
}
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index 9b676e027cad..b55249170f18 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -29,7 +29,7 @@ static u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
#define RATE_COUNT ARRAY_SIZE(rtl8180_rates)
-static CHANNEL_LIST DefaultChannelPlan[] = {
+static struct rtl8187se_channel_list default_channel_plan[] = {
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64}, 19}, /* FCC */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* IC */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* ETSI */
@@ -337,7 +337,7 @@ static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
} else {
/* prevent scan in BusyTraffic */
/* FIXME: Need to consider last scan time */
- if ((priv->link_detect.bBusyTraffic) && (true)) {
+ if ((priv->link_detect.b_busy_traffic) && (true)) {
ret = 0;
printk("Now traffic is busy, please try later!\n");
} else
@@ -1030,15 +1030,15 @@ static int r8180_wx_set_channelplan(struct net_device *dev,
/* unsigned long flags; */
down(&priv->wx_sem);
- if (DefaultChannelPlan[*val].Len != 0) {
+ if (default_channel_plan[*val].len != 0) {
priv->channel_plan = *val;
/* Clear old channel map 8 */
for (i = 1; i <= MAX_CHANNEL_NUMBER; i++)
GET_DOT11D_INFO(priv->ieee80211)->channel_map[i] = 0;
/* Set new channel map */
- for (i = 1; i <= DefaultChannelPlan[*val].Len; i++)
- GET_DOT11D_INFO(priv->ieee80211)->channel_map[DefaultChannelPlan[*val].Channel[i-1]] = 1;
+ for (i = 1; i <= default_channel_plan[*val].len; i++)
+ GET_DOT11D_INFO(priv->ieee80211)->channel_map[default_channel_plan[*val].channel[i-1]] = 1;
}
up(&priv->wx_sem);
diff --git a/drivers/staging/rtl8187se/r8185b_init.c b/drivers/staging/rtl8187se/r8185b_init.c
index c8b9baff1dbc..cc6f100814f3 100644
--- a/drivers/staging/rtl8187se/r8185b_init.c
+++ b/drivers/staging/rtl8187se/r8185b_init.c
@@ -619,10 +619,10 @@ void UpdateInitialGain(struct net_device *dev)
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
/* lzm add 080826 */
- if (priv->eRFPowerState != eRfOn) {
+ if (priv->eRFPowerState != RF_ON) {
/* Don't access BB/RF under disable PLL situation.
* RT_TRACE(COMP_DIG, DBG_LOUD, ("UpdateInitialGain -
- * pHalData->eRFPowerState!=eRfOn\n"));
+ * pHalData->eRFPowerState!=RF_ON\n"));
* Back to the original state
*/
priv->InitialGain = priv->InitialGainBackUp;
@@ -872,8 +872,8 @@ static u8 GetSupportedWirelessMode8185(struct net_device *dev)
static void
ActUpdateChannelAccessSetting(struct net_device *dev,
- WIRELESS_MODE WirelessMode,
- PCHANNEL_ACCESS_SETTING ChnlAccessSetting)
+ enum wireless_mode mode,
+ struct chnl_access_setting *chnl_access_setting)
{
AC_CODING eACI;
@@ -890,25 +890,25 @@ ActUpdateChannelAccessSetting(struct net_device *dev,
*/
/* Suggested by Jong, 2005.12.08. */
- ChnlAccessSetting->SIFS_Timer = 0x22;
- ChnlAccessSetting->DIFS_Timer = 0x1C; /* 2006.06.02, by rcnjko. */
- ChnlAccessSetting->SlotTimeTimer = 9; /* 2006.06.02, by rcnjko. */
+ chnl_access_setting->sifs_timer = 0x22;
+ chnl_access_setting->difs_timer = 0x1C; /* 2006.06.02, by rcnjko. */
+ chnl_access_setting->slot_time_timer = 9; /* 2006.06.02, by rcnjko. */
/*
* Suggested by wcchu, it is the default value of EIFS register,
* 2005.12.08.
*/
- ChnlAccessSetting->EIFS_Timer = 0x5B;
- ChnlAccessSetting->CWminIndex = 3; /* 2006.06.02, by rcnjko. */
- ChnlAccessSetting->CWmaxIndex = 7; /* 2006.06.02, by rcnjko. */
+ chnl_access_setting->eifs_timer = 0x5B;
+ chnl_access_setting->cwmin_index = 3; /* 2006.06.02, by rcnjko. */
+ chnl_access_setting->cwmax_index = 7; /* 2006.06.02, by rcnjko. */
- write_nic_byte(dev, SIFS, ChnlAccessSetting->SIFS_Timer);
+ write_nic_byte(dev, SIFS, chnl_access_setting->sifs_timer);
/*
* Rewrited from directly use PlatformEFIOWrite1Byte(),
* by Annie, 2006-03-29.
*/
- write_nic_byte(dev, SLOT, ChnlAccessSetting->SlotTimeTimer);
+ write_nic_byte(dev, SLOT, chnl_access_setting->slot_time_timer);
- write_nic_byte(dev, EIFS, ChnlAccessSetting->EIFS_Timer);
+ write_nic_byte(dev, EIFS, chnl_access_setting->eifs_timer);
/*
* <RJ_EXPR_QOS> Suggested by wcchu, it is the default value of EIFS
@@ -959,7 +959,7 @@ static void ActSetWirelessMode8185(struct net_device *dev, u8 btWirelessMode)
* wireless mode if we switch to specified band successfully.
*/
- ieee->mode = (WIRELESS_MODE)btWirelessMode;
+ ieee->mode = (enum wireless_mode)btWirelessMode;
/* 3. Change related setting. */
if (ieee->mode == WIRELESS_MODE_A)
@@ -1085,7 +1085,7 @@ static bool MgntDisconnect(struct net_device *dev, u8 asRsn)
* PASSIVE LEVEL.
*/
static bool SetRFPowerState(struct net_device *dev,
- RT_RF_POWER_STATE eRFPowerState)
+ enum rt_rf_power_state eRFPowerState)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bResult = false;
@@ -1098,13 +1098,13 @@ static bool SetRFPowerState(struct net_device *dev,
return bResult;
}
-bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
+bool MgntActSet_RF_State(struct net_device *dev, enum rt_rf_power_state StateToSet,
u32 ChangeSource)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bActionAllowed = false;
bool bConnectBySSID = false;
- RT_RF_POWER_STATE rtState;
+ enum rt_rf_power_state rtState;
u16 RFWaitCounter = 0;
unsigned long flag;
/*
@@ -1140,7 +1140,7 @@ bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
rtState = priv->eRFPowerState;
switch (StateToSet) {
- case eRfOn:
+ case RF_ON:
/*
* Turn On RF no matter the IPS setting because we need to
* update the RF state to Ndis under Vista, or the Windows
@@ -1153,13 +1153,13 @@ bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
priv->RfOffReason = 0;
bActionAllowed = true;
- if (rtState == eRfOff &&
+ if (rtState == RF_OFF &&
ChangeSource >= RF_CHANGE_BY_HW)
bConnectBySSID = true;
}
break;
- case eRfOff:
+ case RF_OFF:
/* 070125, rcnjko: we always keep connected in AP mode. */
if (priv->RfOffReason > RF_CHANGE_BY_IPS) {
@@ -1182,7 +1182,7 @@ bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
priv->RfOffReason |= ChangeSource;
bActionAllowed = true;
break;
- case eRfSleep:
+ case RF_SLEEP:
priv->RfOffReason |= ChangeSource;
bActionAllowed = true;
break;
@@ -1233,7 +1233,7 @@ static void InactivePowerSave(struct net_device *dev)
void IPSEnter(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- RT_RF_POWER_STATE rtState;
+ enum rt_rf_power_state rtState;
if (priv->bInactivePs) {
rtState = priv->eRFPowerState;
@@ -1245,9 +1245,9 @@ void IPSEnter(struct net_device *dev)
* trigger IPS)(4) IBSS (send Beacon)
* (5) AP mode (send Beacon)
*/
- if (rtState == eRfOn && !priv->bSwRfProcessing
+ if (rtState == RF_ON && !priv->bSwRfProcessing
&& (priv->ieee80211->state != IEEE80211_LINKED)) {
- priv->eInactivePowerState = eRfOff;
+ priv->eInactivePowerState = RF_OFF;
InactivePowerSave(dev);
}
}
@@ -1255,13 +1255,13 @@ void IPSEnter(struct net_device *dev)
void IPSLeave(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- RT_RF_POWER_STATE rtState;
+ enum rt_rf_power_state rtState;
if (priv->bInactivePs) {
rtState = priv->eRFPowerState;
- if ((rtState == eRfOff || rtState == eRfSleep) &&
+ if ((rtState == RF_OFF || rtState == RF_SLEEP) &&
!priv->bSwRfProcessing
&& priv->RfOffReason <= RF_CHANGE_BY_IPS) {
- priv->eInactivePowerState = eRfOn;
+ priv->eInactivePowerState = RF_ON;
InactivePowerSave(dev);
}
}
@@ -1385,23 +1385,23 @@ void rtl8185b_adapter_start(struct net_device *dev)
/* Initialize RegWirelessMode if it is not a valid one. */
if (bInvalidWirelessMode)
- ieee->mode = (WIRELESS_MODE)InitWirelessMode;
+ ieee->mode = (enum wireless_mode)InitWirelessMode;
} else {
/* One of B, G, A. */
InitWirelessMode = ieee->mode;
}
- priv->eRFPowerState = eRfOff;
+ priv->eRFPowerState = RF_OFF;
priv->RfOffReason = 0;
{
- MgntActSet_RF_State(dev, eRfOn, 0);
+ MgntActSet_RF_State(dev, RF_ON, 0);
}
/*
* If inactive power mode is enabled, disable rf while in
* disconnected state.
*/
if (priv->bInactivePs)
- MgntActSet_RF_State(dev , eRfOff, RF_CHANGE_BY_IPS);
+ MgntActSet_RF_State(dev , RF_OFF, RF_CHANGE_BY_IPS);
ActSetWirelessMode8185(dev, (u8)(InitWirelessMode));
diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig
index c9c548f17494..e45c106c2162 100644
--- a/drivers/staging/rtl8188eu/Kconfig
+++ b/drivers/staging/rtl8188eu/Kconfig
@@ -3,7 +3,6 @@ config R8188EU
depends on WLAN && USB
select WIRELESS_EXT
select WEXT_PRIV
- default N
---help---
This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
If built as a module, it will be called r8188eu.
@@ -12,7 +11,7 @@ if R8188EU
config 88EU_AP_MODE
bool "Realtek RTL8188EU AP mode"
- default Y
+ default y
---help---
This option enables Access Point mode. Unless you know that your system
will never be used as an AP, or the target system has limited memory,
@@ -20,7 +19,7 @@ config 88EU_AP_MODE
config 88EU_P2P
bool "Realtek RTL8188EU Peer-to-peer mode"
- default Y
+ default y
---help---
This option enables peer-to-peer mode for the r8188eu driver. Unless you
know that peer-to-peer (P2P) mode will never be used, or the target system has
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
index 0a617b42cc99..6a138ff699e9 100644
--- a/drivers/staging/rtl8188eu/Makefile
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -34,7 +34,6 @@ r8188eu-y := \
hal/hal_com.o \
hal/odm.o \
hal/odm_debug.o \
- hal/odm_interface.o \
hal/odm_HWConfig.o \
hal/odm_RegConfig8188E.o\
hal/odm_RTL8188E.o \
diff --git a/drivers/staging/rtl8188eu/TODO b/drivers/staging/rtl8188eu/TODO
index f7f389c40e71..b574b235b340 100644
--- a/drivers/staging/rtl8188eu/TODO
+++ b/drivers/staging/rtl8188eu/TODO
@@ -9,6 +9,11 @@ TODO:
- merge Realtek's bugfixes and new features into the driver
- switch to use LIB80211
- switch to use MAC80211
+- figure out what to do with this code in rtw_recv_indicatepkt():
+ rcu_read_lock();
+ rcu_dereference(padapter->pnetdev->rx_handler_data);
+ rcu_read_unlock();
+ Perhaps delete it, perhaps assign to some local variable.
Please send any patches to Greg Kroah-Hartman <gregkh@linux.com>,
and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 8ebe6bc40022..ff74d0d7efa3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -94,7 +94,7 @@ static void update_BCNTIM(struct adapter *padapter)
} else {
tim_ielen = 0;
- /* calucate head_len */
+ /* calculate head_len */
offset = _FIXED_IE_LENGTH_;
offset += pnetwork_mlmeext->Ssid.SsidLength + 2;
@@ -129,7 +129,7 @@ static void update_BCNTIM(struct adapter *padapter)
*dst_ie++ = tim_ielen;
*dst_ie++ = 0;/* DTIM count */
- *dst_ie++ = 1;/* DTIM peroid */
+ *dst_ie++ = 1;/* DTIM period */
if (pstapriv->tim_bitmap&BIT(0))/* for bc/mc frames */
*dst_ie++ = BIT(0);/* bitmap ctrl */
@@ -285,12 +285,12 @@ void expire_timeout_chk(struct adapter *padapter)
spin_lock_bh(&pstapriv->auth_list_lock);
phead = &pstapriv->auth_list;
- plist = get_next(phead);
+ plist = phead->next;
/* check auth_queue */
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, auth_list);
- plist = get_next(plist);
+ psta = container_of(plist, struct sta_info, auth_list);
+ plist = plist->next;
if (psta->expire_to > 0) {
psta->expire_to--;
@@ -319,12 +319,12 @@ void expire_timeout_chk(struct adapter *padapter)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* check asoc_queue */
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ psta = container_of(plist, struct sta_info, asoc_list);
+ plist = plist->next;
if (chk_sta_is_alive(psta) || !psta->expire_to) {
psta->expire_to = pstapriv->expire_to;
@@ -821,7 +821,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
/* update cur_wireless_mode */
update_wireless_mode(padapter);
- /* udpate capability after cur_wireless_mode updated */
+ /* update capability after cur_wireless_mode updated */
update_capinfo(padapter, rtw_get_capability((struct wlan_bssid_ex *)pnetwork));
/* let pnetwork_mlmeext == pnetwork_mlme. */
@@ -980,7 +980,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p = rtw_get_ie(p, _SSN_IE_1_, &ie_len,
(pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if ((p) && (_rtw_memcmp(p+2, OUI1, 4))) {
+ if ((p) && (!memcmp(p+2, OUI1, 4))) {
if (rtw_parse_wpa_ie(p, ie_len+2, &group_cipher,
&pairwise_cipher, NULL) == _SUCCESS) {
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
@@ -1005,7 +1005,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len,
(pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if ((p) && _rtw_memcmp(p+2, WMM_PARA_IE, 6)) {
+ if ((p) && !memcmp(p+2, WMM_PARA_IE, 6)) {
pmlmepriv->qospriv.qos_option = 1;
*(p+8) |= BIT(7);/* QoS Info, support U-APSD */
@@ -1144,13 +1144,13 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
- plist = get_next(phead);
+ plist = phead->next;
while (!rtw_end_of_queue_search(phead, plist)) {
- paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
- plist = get_next(plist);
+ paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
+ plist = plist->next;
- if (_rtw_memcmp(paclnode->addr, addr, ETH_ALEN)) {
+ if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
if (paclnode->valid) {
added = true;
DBG_88E("%s, sta has been added\n", __func__);
@@ -1205,13 +1205,13 @@ int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
- plist = get_next(phead);
+ plist = phead->next;
while (!rtw_end_of_queue_search(phead, plist)) {
- paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
- plist = get_next(plist);
+ paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
+ plist = plist->next;
- if (_rtw_memcmp(paclnode->addr, addr, ETH_ALEN)) {
+ if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
if (paclnode->valid) {
paclnode->valid = false;
@@ -1339,8 +1339,7 @@ static void update_bcn_wps_ie(struct adapter *padapter)
pnetwork->IELength = wps_offset + (wps_ielen+2) + remainder_ielen;
}
- if (pbackup_remainder_ie)
- kfree(pbackup_remainder_ie);
+ kfree(pbackup_remainder_ie);
}
static void update_bcn_p2p_ie(struct adapter *padapter)
@@ -1351,13 +1350,13 @@ static void update_bcn_vendor_spec_ie(struct adapter *padapter, u8 *oui)
{
DBG_88E("%s\n", __func__);
- if (_rtw_memcmp(RTW_WPA_OUI, oui, 4))
+ if (!memcmp(RTW_WPA_OUI, oui, 4))
update_bcn_wpa_ie(padapter);
- else if (_rtw_memcmp(WMM_OUI, oui, 4))
+ else if (!memcmp(WMM_OUI, oui, 4))
update_bcn_wmm_ie(padapter);
- else if (_rtw_memcmp(WPS_OUI, oui, 4))
+ else if (!memcmp(WPS_OUI, oui, 4))
update_bcn_wps_ie(padapter);
- else if (_rtw_memcmp(P2P_OUI, oui, 4))
+ else if (!memcmp(P2P_OUI, oui, 4))
update_bcn_p2p_ie(padapter);
else
DBG_88E("unknown OUI type!\n");
@@ -1415,7 +1414,7 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
/*
op_mode
-Set to 0 (HT pure) under the followign conditions
+Set to 0 (HT pure) under the following conditions
- all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
- all STAs in the BSS are 20 MHz HT in 20 MHz BSS
Set to 1 (HT non-member protection) if there may be non-HT STAs
@@ -1494,7 +1493,7 @@ static int rtw_ht_operation_update(struct adapter *padapter)
void associated_clients_update(struct adapter *padapter, u8 updated)
{
- /* update associcated stations cap. */
+ /* update associated stations cap. */
if (updated) {
struct list_head *phead, *plist;
struct sta_info *psta = NULL;
@@ -1503,13 +1502,13 @@ void associated_clients_update(struct adapter *padapter, u8 updated)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* check asoc_queue */
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ plist = plist->next;
VCS_update(padapter, psta);
}
@@ -1647,7 +1646,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
update_beacon(padapter, _HT_ADD_INFO_IE_, NULL, true);
}
- /* update associcated stations cap. */
+ /* update associated stations cap. */
associated_clients_update(padapter, beacon_updated);
DBG_88E("%s, updated =%d\n", __func__, beacon_updated);
@@ -1711,7 +1710,7 @@ u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
update_beacon(padapter, _HT_ADD_INFO_IE_, NULL, true);
}
- /* update associcated stations cap. */
+ /* update associated stations cap. */
DBG_88E("%s, updated =%d\n", __func__, beacon_updated);
@@ -1777,12 +1776,12 @@ int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* for each sta in asoc_queue */
while (!rtw_end_of_queue_search(phead, plist)) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ psta = container_of(plist, struct sta_info, asoc_list);
+ plist = plist->next;
issue_action_spct_ch_switch(padapter, psta->hwaddr, new_ch, ch_offset);
psta->expire_to = ((pstapriv->expire_to * 2) > 5) ? 5 : (pstapriv->expire_to * 2);
@@ -1811,13 +1810,13 @@ int rtw_sta_flush(struct adapter *padapter)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* free sta asoc_queue */
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ plist = plist->next;
rtw_list_delete(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
@@ -1942,10 +1941,10 @@ void stop_ap_mode(struct adapter *padapter)
/* for ACL */
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
- plist = get_next(phead);
+ plist = phead->next;
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
- plist = get_next(plist);
+ paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
+ plist = plist->next;
if (paclnode->valid) {
paclnode->valid = false;
diff --git a/drivers/staging/rtl8188eu/core/rtw_br_ext.c b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
index 75e38d4ff4c3..e843c6bd4525 100644
--- a/drivers/staging/rtl8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
@@ -409,7 +409,7 @@ static void __nat25_db_network_insert(struct adapter *priv,
db = priv->nethash[hash];
while (db != NULL) {
if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
- memcpy(db->macAddr, macAddr, ETH_ALEN);
+ ether_addr_copy(db->macAddr, macAddr);
db->ageing_timer = jiffies;
spin_unlock_bh(&priv->br_ext_lock);
return;
@@ -422,7 +422,7 @@ static void __nat25_db_network_insert(struct adapter *priv,
return;
}
memcpy(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN);
- memcpy(db->macAddr, macAddr, ETH_ALEN);
+ ether_addr_copy(db->macAddr, macAddr);
atomic_set(&db->use_count, 1);
db->ageing_timer = jiffies;
@@ -543,13 +543,14 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
if (!__nat25_db_network_lookup_and_replace(priv, skb, networkAddr)) {
if (*((unsigned char *)&iph->daddr + 3) == 0xff) {
/* L2 is unicast but L3 is broadcast, make L2 bacome broadcast */
- DEBUG_INFO("NAT25: Set DA as boardcast\n");
+ DEBUG_INFO("NAT25: Set DA as broadcast\n");
memset(skb->data, 0xff, ETH_ALEN);
} else {
- /* forward unknow IP packet to upper TCP/IP */
+ /* forward unknown IP packet to upper TCP/IP */
DEBUG_INFO("NAT25: Replace DA with BR's MAC\n");
if ((*(u32 *)priv->br_mac) == 0 && (*(u16 *)(priv->br_mac+4)) == 0) {
- printk("Re-init netdev_br_init() due to br_mac == 0!\n");
+ netdev_info(skb->dev,
+ "Re-init netdev_br_init() due to br_mac == 0!\n");
netdev_br_init(priv->pnetdev);
}
memcpy(skb->data, priv->br_mac, ETH_ALEN);
@@ -932,7 +933,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
(ph->code == PADO_CODE ? "PADO" : "PADS"), skb->dev->name);
} else { /* not add relay tag */
if (!priv->pppoe_connection_in_progress) {
- DEBUG_ERR("Discard PPPoE packet due to no connection in progresss!\n");
+ DEBUG_ERR("Discard PPPoE packet due to no connection in progress!\n");
return -1;
}
memcpy(skb->data, priv->pppoe_addr, ETH_ALEN);
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 82fe8c47a1de..c2fb050337d5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -32,11 +32,10 @@ Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
No irqsave is necessary.
*/
-int _rtw_init_cmd_priv (struct cmd_priv *pcmdpriv)
+int _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
int res = _SUCCESS;
-_func_enter_;
sema_init(&(pcmdpriv->cmd_queue_sema), 0);
/* sema_init(&(pcmdpriv->cmd_done_sema), 0); */
@@ -71,7 +70,6 @@ _func_enter_;
pcmdpriv->cmd_done_cnt = 0;
pcmdpriv->rsp_cnt = 0;
exit:
-_func_exit_;
return res;
}
@@ -81,24 +79,21 @@ int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
{
int res = _SUCCESS;
-_func_enter_;
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
atomic_set(&pevtpriv->event_seq, 0);
pevtpriv->evt_done_cnt = 0;
- _init_workitem(&pevtpriv->c2h_wk, c2h_wk_callback, NULL);
+ INIT_WORK(&pevtpriv->c2h_wk, c2h_wk_callback);
pevtpriv->c2h_wk_alive = false;
pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN+1);
-_func_exit_;
return res;
}
-void rtw_free_evt_priv(struct evt_priv *pevtpriv)
+void rtw_free_evt_priv(struct evt_priv *pevtpriv)
{
-_func_enter_;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+rtw_free_evt_priv\n"));
@@ -113,21 +108,15 @@ _func_enter_;
}
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("-rtw_free_evt_priv\n"));
-_func_exit_;
}
-void _rtw_free_cmd_priv (struct cmd_priv *pcmdpriv)
+void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
{
-_func_enter_;
if (pcmdpriv) {
- if (pcmdpriv->cmd_allocated_buf)
- kfree(pcmdpriv->cmd_allocated_buf);
-
- if (pcmdpriv->rsp_allocated_buf)
- kfree(pcmdpriv->rsp_allocated_buf);
+ kfree(pcmdpriv->cmd_allocated_buf);
+ kfree(pcmdpriv->rsp_allocated_buf);
}
-_func_exit_;
}
/*
@@ -140,11 +129,10 @@ ISR/Call-Back functions can't call this sub-function.
*/
-int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
+int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
{
unsigned long irqL;
-_func_enter_;
if (obj == NULL)
goto exit;
@@ -157,7 +145,6 @@ _func_enter_;
exit:
-_func_exit_;
return _SUCCESS;
}
@@ -167,47 +154,39 @@ struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
unsigned long irqL;
struct cmd_obj *obj;
-_func_enter_;
spin_lock_irqsave(&queue->lock, irqL);
if (rtw_is_list_empty(&(queue->queue))) {
obj = NULL;
} else {
- obj = LIST_CONTAINOR(get_next(&(queue->queue)), struct cmd_obj, list);
+ obj = container_of((&queue->queue)->next, struct cmd_obj, list);
rtw_list_delete(&obj->list);
}
spin_unlock_irqrestore(&queue->lock, irqL);
-_func_exit_;
return obj;
}
-u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
u32 res;
-_func_enter_;
- res = _rtw_init_cmd_priv (pcmdpriv);
-_func_exit_;
+ res = _rtw_init_cmd_priv(pcmdpriv);
return res;
}
-u32 rtw_init_evt_priv (struct evt_priv *pevtpriv)
+u32 rtw_init_evt_priv(struct evt_priv *pevtpriv)
{
- int res;
-_func_enter_;
+ int res;
res = _rtw_init_evt_priv(pevtpriv);
-_func_exit_;
return res;
}
-void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
+void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
{
-_func_enter_;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("rtw_free_cmd_priv\n"));
_rtw_free_cmd_priv(pcmdpriv);
-_func_exit_;
}
static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
@@ -238,7 +217,6 @@ u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
int res = _FAIL;
struct adapter *padapter = pcmdpriv->padapter;
-_func_enter_;
if (cmd_obj == NULL)
goto exit;
@@ -258,34 +236,28 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
-struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
+struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
{
struct cmd_obj *cmd_obj;
-_func_enter_;
cmd_obj = _rtw_dequeue_cmd(&pcmdpriv->cmd_queue);
-_func_exit_;
return cmd_obj;
}
-void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv)
+void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv)
{
-_func_enter_;
pcmdpriv->cmd_done_cnt++;
/* up(&(pcmdpriv->cmd_done_sema)); */
-_func_exit_;
}
void rtw_free_cmd_obj(struct cmd_obj *pcmd)
{
-_func_enter_;
if ((pcmd->cmdcode != _JoinBss_CMD_) && (pcmd->cmdcode != _CreateBss_CMD_)) {
/* free parmbuf in cmd_obj */
@@ -302,7 +274,6 @@ _func_enter_;
/* free cmd_obj */
kfree(pcmd);
-_func_exit_;
}
int rtw_cmd_thread(void *context)
@@ -315,7 +286,6 @@ int rtw_cmd_thread(void *context)
struct adapter *padapter = (struct adapter *)context;
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
-_func_enter_;
thread_enter("RTW_CMD_THREAD");
@@ -410,7 +380,6 @@ post_process:
up(&pcmdpriv->terminate_cmdthread_sema);
-_func_exit_;
complete_and_exit(NULL, 0);
}
@@ -423,15 +392,14 @@ u8 rtw_setstandby_cmd(struct adapter *padapter, uint action)
u8 ret = _SUCCESS;
-_func_enter_;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
if (ph2c == NULL) {
ret = _FAIL;
goto exit;
}
- psetusbsuspend = (struct usb_suspend_parm *)rtw_zmalloc(sizeof(struct usb_suspend_parm));
+ psetusbsuspend = kzalloc(sizeof(struct usb_suspend_parm), GFP_KERNEL);
if (psetusbsuspend == NULL) {
kfree(ph2c);
ret = _FAIL;
@@ -446,7 +414,6 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
@@ -465,14 +432,11 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
- }
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
p2p_ps_wk_cmd(padapter, P2P_PS_SCAN, 1);
- }
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL)
@@ -500,9 +464,6 @@ _func_enter_;
if (ssid[i].SsidLength) {
memcpy(&psurveyPara->ssid[i], &ssid[i], sizeof(struct ndis_802_11_ssid));
psurveyPara->ssid_num++;
- if (0)
- DBG_88E(FUNC_ADPT_FMT" ssid:(%s, %d)\n", FUNC_ADPT_ARG(padapter),
- psurveyPara->ssid[i].Ssid, psurveyPara->ssid[i].SsidLength);
}
}
}
@@ -514,9 +475,6 @@ _func_enter_;
if (ch[i].hw_value && !(ch[i].flags & RTW_IEEE80211_CHAN_DISABLED)) {
memcpy(&psurveyPara->ch[i], &ch[i], sizeof(struct rtw_ieee80211_channel));
psurveyPara->ch_num++;
- if (0)
- DBG_88E(FUNC_ADPT_FMT" ch:%u\n", FUNC_ADPT_ARG(padapter),
- psurveyPara->ch[i].hw_value);
}
}
}
@@ -537,7 +495,6 @@ _func_enter_;
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
}
-_func_exit_;
return res;
}
@@ -549,7 +506,6 @@ u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -570,7 +526,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -582,7 +537,6 @@ u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -604,7 +558,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -624,7 +577,6 @@ u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -648,7 +600,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -659,7 +610,6 @@ u8 rtw_setbbreg_cmd(struct adapter *padapter, u8 offset, u8 val)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
@@ -680,7 +630,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -691,7 +640,6 @@ u8 rtw_getbbreg_cmd(struct adapter *padapter, u8 offset, u8 *pval)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
@@ -715,7 +663,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -725,7 +672,6 @@ u8 rtw_setrfreg_cmd(struct adapter *padapter, u8 offset, u32 val)
struct writeRF_parm *pwriterfparm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
@@ -746,7 +692,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -757,7 +702,6 @@ u8 rtw_getrfreg_cmd(struct adapter *padapter, u8 offset, u8 *pval)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -785,33 +729,28 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
- _func_enter_;
kfree(pcmd->parmbuf);
kfree(pcmd);
if (padapter->registrypriv.mp_mode == 1)
padapter->mppriv.workparam.bcompleted = true;
-_func_exit_;
}
void rtw_readtssi_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
- _func_enter_;
kfree(pcmd->parmbuf);
kfree(pcmd);
if (padapter->registrypriv.mp_mode == 1)
padapter->mppriv.workparam.bcompleted = true;
-_func_exit_;
}
u8 rtw_createbss_cmd(struct adapter *padapter)
@@ -822,7 +761,6 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
struct wlan_bssid_ex *pdev_network = &padapter->registrypriv.dev_network;
u8 res = _SUCCESS;
-_func_enter_;
rtw_led_control(padapter, LED_CTL_START_TO_LINK);
@@ -847,7 +785,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
exit:
-_func_exit_;
return res;
}
@@ -858,7 +795,6 @@ u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss, unsigned
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
@@ -877,7 +813,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -898,15 +833,13 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
-_func_enter_;
rtw_led_control(padapter, LED_CTL_START_TO_LINK);
- if (pmlmepriv->assoc_ssid.SsidLength == 0) {
+ if (pmlmepriv->assoc_ssid.SsidLength == 0)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n"));
- } else {
+ else
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
- }
pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
@@ -952,11 +885,10 @@ _func_enter_;
psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->IELength;
- if ((psecnetwork->IELength-12) < (256-1)) {
+ if ((psecnetwork->IELength-12) < (256-1))
memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], psecnetwork->IELength-12);
- } else {
+ else
memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], (256-1));
- }
psecnetwork->IELength = 0;
/* Added by Albert 2009/02/18 */
@@ -987,9 +919,12 @@ _func_enter_;
phtpriv->ht_option = false;
if (pregistrypriv->ht_enable) {
- /* Added by Albert 2010/06/23 */
- /* For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
- /* Especially for Realtek 8192u SoftAP. */
+ /*
+ * Added by Albert 2010/06/23
+ * For the WEP mode, we will use the bg mode to do
+ * the connection to avoid some IOT issue.
+ * Especially for Realtek 8192u SoftAP.
+ */
if ((padapter->securitypriv.dot11PrivacyAlgrthm != _WEP40_) &&
(padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
(padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
@@ -1020,7 +955,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1032,7 +966,6 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
struct cmd_priv *cmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_disassoc_cmd\n"));
@@ -1063,7 +996,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1076,7 +1008,6 @@ u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra n
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1098,7 +1029,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1115,7 +1045,6 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key)
struct sta_info *sta = (struct sta_info *)psta;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1142,7 +1071,7 @@ _func_enter_;
ph2c->rsp = (u8 *)psetstakey_rsp;
ph2c->rspsz = sizeof(struct set_stakey_rsp);
- memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
+ ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
psetstakey_para->algorithm = (unsigned char) psecuritypriv->dot11PrivacyAlgrthm;
@@ -1161,7 +1090,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1175,7 +1103,6 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
struct sta_info *sta = (struct sta_info *)psta;
u8 res = _SUCCESS;
-_func_enter_;
if (!enqueue) {
clear_cam_entry(padapter, entry);
@@ -1205,7 +1132,7 @@ _func_enter_;
ph2c->rsp = (u8 *)psetstakey_rsp;
ph2c->rspsz = sizeof(struct set_stakey_rsp);
- memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
+ ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
psetstakey_para->algorithm = _NO_PRIVACY_;
@@ -1215,7 +1142,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return res;
}
@@ -1226,7 +1152,6 @@ u8 rtw_setrttbl_cmd(struct adapter *padapter, struct setratable_parm *prate_tab
struct setratable_parm *psetrttblparm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1247,7 +1172,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -1257,7 +1181,6 @@ u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval)
struct getratable_parm *pgetrttblparm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1272,8 +1195,6 @@ _func_enter_;
goto exit;
}
-/* init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm, GEN_CMD_CODE(_SetRaTable)); */
-
_rtw_init_listhead(&ph2c->list);
ph2c->cmdcode = GEN_CMD_CODE(_GetRaTable);
ph2c->parmbuf = (unsigned char *)pgetrttblparm;
@@ -1285,7 +1206,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -1298,7 +1218,6 @@ u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr)
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1324,16 +1243,15 @@ _func_enter_;
ph2c->rsp = (u8 *)psetassocsta_rsp;
ph2c->rspsz = sizeof(struct set_assocsta_rsp);
- memcpy(psetassocsta_para->addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(psetassocsta_para->addr, mac_addr);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
- }
+}
u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
{
@@ -1342,7 +1260,6 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
struct addBaReq_parm *paddbareq_parm;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1369,7 +1286,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1381,7 +1297,6 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1406,7 +1321,6 @@ _func_enter_;
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -1418,7 +1332,6 @@ u8 rtw_set_ch_cmd(struct adapter *padapter, u8 ch, u8 bw, u8 ch_offset, u8 enque
u8 res = _SUCCESS;
-_func_enter_;
DBG_88E(FUNC_NDEV_FMT" ch:%u, bw:%u, ch_offset:%u\n",
FUNC_NDEV_ARG(padapter->pnetdev), ch, bw, ch_offset);
@@ -1460,7 +1373,6 @@ exit:
DBG_88E(FUNC_NDEV_FMT" res:%u\n", FUNC_NDEV_ARG(padapter->pnetdev), res);
-_func_exit_;
return res;
}
@@ -1473,7 +1385,6 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
u8 res = _SUCCESS;
-_func_enter_;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_set_chplan_cmd\n"));
@@ -1516,7 +1427,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1529,7 +1439,6 @@ u8 rtw_led_blink_cmd(struct adapter *padapter, struct LED_871x *pLed)
u8 res = _SUCCESS;
-_func_enter_;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_led_blink_cmd\n"));
@@ -1553,7 +1462,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1566,7 +1474,6 @@ u8 rtw_set_csa_cmd(struct adapter *padapter, u8 new_ch_no)
u8 res = _SUCCESS;
-_func_enter_;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_set_csa_cmd\n"));
@@ -1590,7 +1497,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1685,7 +1591,6 @@ static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
u8 mstatus;
-_func_enter_;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true))
@@ -1724,7 +1629,6 @@ _func_enter_;
break;
}
-_func_exit_;
}
u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
@@ -1735,11 +1639,6 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
/* struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; */
u8 res = _SUCCESS;
-_func_enter_;
-
- /* if (!pwrctrlpriv->bLeisurePs) */
- /* return res; */
-
if (enqueue) {
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1767,7 +1666,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1785,7 +1683,6 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
@@ -1806,7 +1703,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -1824,7 +1720,6 @@ u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
u8 support_ant_div;
u8 res = _SUCCESS;
-_func_enter_;
rtw_hal_get_def_var(padapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &support_ant_div);
if (!support_ant_div)
return res;
@@ -1854,7 +1749,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return res;
}
@@ -1873,7 +1767,6 @@ u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
return res;
@@ -1892,8 +1785,8 @@ _func_enter_;
}
pdrvextra_cmd_parm->ec_id = P2P_PROTO_WK_CID;
- pdrvextra_cmd_parm->type_size = intCmdType; /* As the command tppe. */
- pdrvextra_cmd_parm->pbuf = NULL; /* Must be NULL here */
+ pdrvextra_cmd_parm->type_size = intCmdType; /* As the command tppe. */
+ pdrvextra_cmd_parm->pbuf = NULL; /* Must be NULL here */
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
@@ -1901,7 +1794,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1914,7 +1806,6 @@ u8 rtw_ps_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ppscmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ppscmd == NULL) {
@@ -1937,7 +1828,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -2084,12 +1974,16 @@ static void c2h_wk_callback(struct work_struct *work)
evtpriv->c2h_wk_alive = true;
while (!rtw_cbuf_empty(evtpriv->c2h_queue)) {
- if ((c2h_evt = (struct c2h_evt_hdr *)rtw_cbuf_pop(evtpriv->c2h_queue)) != NULL) {
+ c2h_evt = (struct c2h_evt_hdr *)
+ rtw_cbuf_pop(evtpriv->c2h_queue);
+ if (c2h_evt != NULL)
/* This C2H event is read, clear it */
c2h_evt_clear(adapter);
- } else if ((c2h_evt = (struct c2h_evt_hdr *)rtw_malloc(16)) != NULL) {
+ else {
+ c2h_evt = (struct c2h_evt_hdr *)rtw_malloc(16);
/* This C2H event is not read, read & clear now */
- if (c2h_evt_read(adapter, (u8 *)c2h_evt) != _SUCCESS)
+ if (c2h_evt != NULL &&
+ c2h_evt_read(adapter, (u8 *)c2h_evt) != _SUCCESS)
continue;
}
@@ -2147,8 +2041,10 @@ u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
p2p_ps_wk_hdl(padapter, pdrvextra_cmd->type_size);
break;
case P2P_PROTO_WK_CID:
- /* Commented by Albert 2011/07/01 */
- /* I used the type_size as the type command */
+ /*
+ * Commented by Albert 2011/07/01
+ * I used the type_size as the type command
+ */
p2p_protocol_wk_hdl(padapter, pdrvextra_cmd->type_size);
break;
#endif
@@ -2174,13 +2070,12 @@ void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
if (pcmd->res == H2C_DROPPED) {
/* TODO: cancel timer and do timeout handler directly... */
/* need to make timeout handlerOS independent */
_set_timer(&pmlmepriv->scan_to_timer, 1);
- } else if (pcmd->res != H2C_SUCCESS) {
+ } else if (pcmd->res != H2C_SUCCESS) {
_set_timer(&pmlmepriv->scan_to_timer, 1);
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ********Error: MgntActrtw_set_802_11_bssid_LIST_SCAN Fail ************\n\n."));
}
@@ -2188,13 +2083,11 @@ _func_enter_;
/* free cmd */
rtw_free_cmd_obj(pcmd);
-_func_exit_;
}
void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
if (pcmd->res != H2C_SUCCESS) {
spin_lock_bh(&pmlmepriv->lock);
@@ -2202,24 +2095,18 @@ _func_enter_;
spin_unlock_bh(&pmlmepriv->lock);
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ***Error: disconnect_cmd_callback Fail ***\n."));
-
- goto exit;
+ return;
} else /* clear bridge database */
nat25_db_cleanup(padapter);
/* free cmd */
rtw_free_cmd_obj(pcmd);
-
-exit:
-
-_func_exit_;
}
void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
if (pcmd->res == H2C_DROPPED) {
/* TODO: cancel timer and do timeout handler directly... */
@@ -2232,7 +2119,6 @@ _func_enter_;
rtw_free_cmd_obj(pcmd);
-_func_exit_;
}
void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
@@ -2244,9 +2130,8 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
-_func_enter_;
- if ((pcmd->res != H2C_SUCCESS)) {
+ if (pcmd->res != H2C_SUCCESS) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ********Error: rtw_createbss_cmd_callback Fail ************\n\n."));
_set_timer(&pmlmepriv->assoc_timer, 1);
}
@@ -2261,7 +2146,7 @@ _func_enter_;
psta = rtw_alloc_stainfo(&padapter->stapriv, pnetwork->MacAddress);
if (psta == NULL) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nCan't alloc sta_info when createbss_cmd_callback\n"));
- goto createbss_cmd_fail ;
+ goto createbss_cmd_fail;
}
}
@@ -2298,7 +2183,6 @@ createbss_cmd_fail:
rtw_free_cmd_obj(pcmd);
-_func_exit_;
}
void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
@@ -2307,7 +2191,6 @@ void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pc
struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)(pcmd->rsp);
struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr);
-_func_enter_;
if (psta == NULL) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: rtw_setstaKey_cmdrsp_callback => can't get sta_info\n\n"));
@@ -2315,7 +2198,6 @@ _func_enter_;
}
exit:
rtw_free_cmd_obj(pcmd);
-_func_exit_;
}
void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
@@ -2326,7 +2208,6 @@ void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *
struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *)(pcmd->rsp);
struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr);
-_func_enter_;
if (psta == NULL) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: setassocsta_cmdrsp_callbac => can't get sta_info\n\n"));
@@ -2347,5 +2228,4 @@ _func_enter_;
exit:
rtw_free_cmd_obj(pcmd);
-_func_exit_;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index af32041a1e97..2beb2695e0f2 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -233,7 +233,7 @@ int proc_get_rf_info(char *page, char **start,
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
int len = 0;
- len += snprintf(page + len, count - len, "cur_ch=%d, cur_bw=%d, cur_ch_offet=%d\n",
+ len += snprintf(page + len, count - len, "cur_ch=%d, cur_bw=%d, cur_ch_offset=%d\n",
pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
*eof = 1;
return len;
@@ -783,7 +783,7 @@ int proc_set_rx_stbc(struct file *file, const char __user *buffer,
if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
if (pregpriv) {
pregpriv->rx_stbc = mode;
- printk("rx_stbc=%d\n", mode);
+ netdev_info(dev, "rx_stbc=%d\n", mode);
}
}
return count;
@@ -820,7 +820,7 @@ int proc_set_rssi_disp(struct file *file, const char __user *buffer,
if (enable) {
DBG_88E("Turn On Rx RSSI Display Function\n");
- padapter->bRxRSSIDisplay = enable ;
+ padapter->bRxRSSIDisplay = enable;
} else {
DBG_88E("Turn Off Rx RSSI Display Function\n");
padapter->bRxRSSIDisplay = 0;
@@ -851,12 +851,12 @@ int proc_get_all_sta_info(char *page, char **start,
for (i = 0; i < NUM_STA; i++) {
phead = &(pstapriv->sta_hash[i]);
- plist = get_next(phead);
+ plist = phead->next;
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+ psta = container_of(plist, struct sta_info, hash_list);
- plist = get_next(plist);
+ plist = plist->next;
len += snprintf(page + len, count - len, "sta's macaddr: %pM\n", psta->hwaddr);
len += snprintf(page + len, count - len, "rtsen=%d, cts2slef=%d\n", psta->rtsen, psta->cts2self);
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 6149e3aaa011..40afe48a12ef 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -76,11 +76,10 @@ Efuse_Write1ByteToFakeContent(
{
if (Offset >= EFUSE_MAX_HW_SIZE)
return false;
- if (fakeEfuseBank == 0) {
+ if (fakeEfuseBank == 0)
fakeEfuseContent[Offset] = Value;
- } else {
+ else
fakeBTEfuseContent[fakeEfuseBank-1][Offset] = Value;
- }
return true;
}
@@ -156,17 +155,15 @@ Efuse_CalculateWordCnts(u8 word_en)
return word_cnts;
}
-/* */
-/* Description: */
-/* Execute E-Fuse read byte operation. */
-/* Referred from SD1 Richard. */
-/* */
-/* Assumption: */
-/* 1. Boot from E-Fuse and successfully auto-load. */
-/* 2. PASSIVE_LEVEL (USB interface) */
-/* */
-/* Created by Roger, 2008.10.21. */
-/* */
+/*
+ * Description:
+ * Execute E-Fuse read byte operation.
+ * Referred from SD1 Richard.
+ * Assumption:
+ * 1. Boot from E-Fuse and successfully auto-load.
+ * 2. PASSIVE_LEVEL (USB interface)
+ * Created by Roger, 2008.10.21.
+ */
void
ReadEFuseByte(
struct adapter *Adapter,
@@ -210,23 +207,21 @@ ReadEFuseByte(
*pbuf = (u8)(value32 & 0xff);
}
-/* */
-/* Description: */
-/* 1. Execute E-Fuse read byte operation according as map offset and */
-/* save to E-Fuse table. */
-/* 2. Referred from SD1 Richard. */
-/* */
-/* Assumption: */
-/* 1. Boot from E-Fuse and successfully auto-load. */
-/* 2. PASSIVE_LEVEL (USB interface) */
-/* */
-/* Created by Roger, 2008.10.21. */
-/* */
-/* 2008/12/12 MH 1. Reorganize code flow and reserve bytes. and add description. */
-/* 2. Add efuse utilization collect. */
-/* 2008/12/22 MH Read Efuse must check if we write section 1 data again!!! Sec1 */
-/* write addr must be after sec5. */
-/* */
+/* Description:
+ * 1. Execute E-Fuse read byte operation according as map offset and
+ * save to E-Fuse table.
+ * 2. Referred from SD1 Richard.
+ * Assumption:
+ * 1. Boot from E-Fuse and successfully auto-load.
+ * 2. PASSIVE_LEVEL (USB interface)
+ * Created by Roger, 2008.10.21.
+ * 2008/12/12 MH
+ * 1. Reorganize code flow and reserve bytes. and add description.
+ * 2. Add efuse utilization collect.
+ * 2008/12/22 MH
+ * Read Efuse must check if we write section 1 data again!!!
+ * Sec1 write addr must be after sec5.
+ */
static void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool pseudo)
{
@@ -450,7 +445,7 @@ u8 rtw_efuse_access(struct adapter *padapter, u8 write, u16 start_addr, u16 cnts
{
int i = 0;
u16 real_content_len = 0, max_available_size = 0;
- u8 res = _FAIL ;
+ u8 res = _FAIL;
u8 (*rw8)(struct adapter *, u16, u8*);
EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&real_content_len, false);
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index e6f98fb8f113..0552019d1cf7 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -147,7 +147,6 @@ u8 *rtw_set_ie
uint *frlen /* frame length */
)
{
-_func_enter_;
*pbuf = (u8)index;
*(pbuf + 1) = (u8)len;
@@ -157,7 +156,6 @@ _func_enter_;
*frlen = *frlen + (len + 2);
-_func_exit_;
return pbuf + len + 2;
}
@@ -221,11 +219,8 @@ u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit)
{
int tmp, i;
u8 *p;
-_func_enter_;
- if (limit < 1) {
- _func_exit_;
+ if (limit < 1)
return NULL;
- }
p = pbuf;
i = 0;
@@ -242,7 +237,6 @@ _func_enter_;
if (i >= limit)
break;
}
-_func_exit_;
return NULL;
}
@@ -273,7 +267,7 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u
cnt = 0;
while (cnt < in_len) {
- if (eid == in_ie[cnt] && (!oui || _rtw_memcmp(&in_ie[cnt+2], oui, oui_len))) {
+ if (eid == in_ie[cnt] && (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) {
target_ie = &in_ie[cnt];
if (ie)
@@ -339,7 +333,6 @@ exit:
void rtw_set_supported_rate(u8 *SupportedRates, uint mode)
{
-_func_enter_;
_rtw_memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
@@ -361,13 +354,11 @@ _func_enter_;
memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
break;
}
-_func_exit_;
}
uint rtw_get_rateset_len(u8 *rateset)
{
uint i = 0;
-_func_enter_;
while (1) {
if ((rateset[i]) == 0)
break;
@@ -375,7 +366,6 @@ _func_enter_;
break;
i++;
}
-_func_exit_;
return i;
}
@@ -386,7 +376,6 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
u8 *ie = pdev_network->IEs;
-_func_enter_;
/* timestamp will be inserted by hardware */
sz += 8;
@@ -444,7 +433,6 @@ _func_enter_;
if (rateLen > 8)
ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz);
-_func_exit_;
return sz;
}
@@ -463,7 +451,7 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
if (pbuf) {
/* check if oui matches... */
- if (_rtw_memcmp((pbuf + 2), wpa_oui_type, sizeof (wpa_oui_type)) == false)
+ if (!memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type)) == false)
goto check_next_ie;
/* check version... */
@@ -497,15 +485,15 @@ unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit)
int rtw_get_wpa_cipher_suite(u8 *s)
{
- if (_rtw_memcmp(s, WPA_CIPHER_SUITE_NONE, WPA_SELECTOR_LEN) == true)
+ if (!memcmp(s, WPA_CIPHER_SUITE_NONE, WPA_SELECTOR_LEN))
return WPA_CIPHER_NONE;
- if (_rtw_memcmp(s, WPA_CIPHER_SUITE_WEP40, WPA_SELECTOR_LEN) == true)
+ if (!memcmp(s, WPA_CIPHER_SUITE_WEP40, WPA_SELECTOR_LEN))
return WPA_CIPHER_WEP40;
- if (_rtw_memcmp(s, WPA_CIPHER_SUITE_TKIP, WPA_SELECTOR_LEN) == true)
+ if (!memcmp(s, WPA_CIPHER_SUITE_TKIP, WPA_SELECTOR_LEN))
return WPA_CIPHER_TKIP;
- if (_rtw_memcmp(s, WPA_CIPHER_SUITE_CCMP, WPA_SELECTOR_LEN) == true)
+ if (!memcmp(s, WPA_CIPHER_SUITE_CCMP, WPA_SELECTOR_LEN))
return WPA_CIPHER_CCMP;
- if (_rtw_memcmp(s, WPA_CIPHER_SUITE_WEP104, WPA_SELECTOR_LEN) == true)
+ if (!memcmp(s, WPA_CIPHER_SUITE_WEP104, WPA_SELECTOR_LEN))
return WPA_CIPHER_WEP104;
return 0;
@@ -513,15 +501,15 @@ int rtw_get_wpa_cipher_suite(u8 *s)
int rtw_get_wpa2_cipher_suite(u8 *s)
{
- if (_rtw_memcmp(s, RSN_CIPHER_SUITE_NONE, RSN_SELECTOR_LEN) == true)
+ if (!memcmp(s, RSN_CIPHER_SUITE_NONE, RSN_SELECTOR_LEN))
return WPA_CIPHER_NONE;
- if (_rtw_memcmp(s, RSN_CIPHER_SUITE_WEP40, RSN_SELECTOR_LEN) == true)
+ if (!memcmp(s, RSN_CIPHER_SUITE_WEP40, RSN_SELECTOR_LEN))
return WPA_CIPHER_WEP40;
- if (_rtw_memcmp(s, RSN_CIPHER_SUITE_TKIP, RSN_SELECTOR_LEN) == true)
+ if (!memcmp(s, RSN_CIPHER_SUITE_TKIP, RSN_SELECTOR_LEN))
return WPA_CIPHER_TKIP;
- if (_rtw_memcmp(s, RSN_CIPHER_SUITE_CCMP, RSN_SELECTOR_LEN) == true)
+ if (!memcmp(s, RSN_CIPHER_SUITE_CCMP, RSN_SELECTOR_LEN))
return WPA_CIPHER_CCMP;
- if (_rtw_memcmp(s, RSN_CIPHER_SUITE_WEP104, RSN_SELECTOR_LEN) == true)
+ if (!memcmp(s, RSN_CIPHER_SUITE_WEP104, RSN_SELECTOR_LEN))
return WPA_CIPHER_WEP104;
return 0;
@@ -542,7 +530,7 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie+1) != (u8)(wpa_ie_len - 2)) ||
- (_rtw_memcmp(wpa_ie+2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN) != true))
+ (memcmp(wpa_ie+2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN)))
return _FAIL;
pos = wpa_ie;
@@ -587,7 +575,7 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
if (is_8021x) {
if (left >= 6) {
pos += 2;
- if (_rtw_memcmp(pos, SUITE_1X, 4) == 1) {
+ if (!memcmp(pos, SUITE_1X, 4)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s : there has 802.1x auth\n", __func__));
*is_8021x = 1;
}
@@ -657,7 +645,7 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
if (is_8021x) {
if (left >= 6) {
pos += 2;
- if (_rtw_memcmp(pos, SUITE_1X, 4) == 1) {
+ if (!memcmp(pos, SUITE_1X, 4)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s (): there has 802.1x auth\n", __func__));
*is_8021x = 1;
}
@@ -672,7 +660,6 @@ int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie,
u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
uint cnt;
-_func_enter_;
/* Search required WPA or WPA2 IE and copy to sec_ie[] */
@@ -683,7 +670,7 @@ _func_enter_;
while (cnt < in_len) {
authmode = in_ie[cnt];
- if ((authmode == _WPA_IE_ID_) && (_rtw_memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) {
+ if ((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
("\n rtw_get_wpa_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n",
sec_idx, in_ie[cnt+1]+2));
@@ -726,7 +713,6 @@ _func_enter_;
}
}
-_func_exit_;
return *rsn_len + *wpa_len;
}
@@ -741,7 +727,7 @@ u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
eid = ie_ptr[0];
- if ((eid == _WPA_IE_ID_) && (_rtw_memcmp(&ie_ptr[2], wps_oui, 4))) {
+ if ((eid == _WPA_IE_ID_) && (!memcmp(&ie_ptr[2], wps_oui, 4))) {
*wps_ielen = ie_ptr[1]+2;
match = true;
}
@@ -774,7 +760,7 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
while (cnt < in_len) {
eid = in_ie[cnt];
- if ((eid == _WPA_IE_ID_) && (_rtw_memcmp(&in_ie[cnt+2], wps_oui, 4))) {
+ if ((eid == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], wps_oui, 4))) {
wpsie_ptr = &in_ie[cnt];
if (wps_ie)
@@ -813,7 +799,7 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id , u8 *buf_at
*len_attr = 0;
if ((wps_ie[0] != _VENDOR_SPECIFIC_IE_) ||
- (_rtw_memcmp(wps_ie + 2, wps_oui , 4) != true))
+ (memcmp(wps_ie + 2, wps_oui , 4)))
return attr_ptr;
/* 6 = 1(Element ID) + 1(Length) + 4(WPS OUI) */
@@ -1223,7 +1209,7 @@ u8 *rtw_get_p2p_ie(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen)
dump_stack();
return NULL;
}
- if ((eid == _VENDOR_SPECIFIC_IE_) && (_rtw_memcmp(&in_ie[cnt+2], p2p_oui, 4) == true)) {
+ if ((eid == _VENDOR_SPECIFIC_IE_) && (!memcmp(&in_ie[cnt+2], p2p_oui, 4))) {
p2p_ie_ptr = in_ie + cnt;
if (p2p_ie != NULL)
@@ -1258,7 +1244,7 @@ u8 *rtw_get_p2p_attr(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id , u8 *buf_att
*len_attr = 0;
if (!p2p_ie || (p2p_ie[0] != _VENDOR_SPECIFIC_IE_) ||
- (_rtw_memcmp(p2p_ie + 2, p2p_oui , 4) != true))
+ (memcmp(p2p_ie + 2, p2p_oui , 4)))
return attr_ptr;
/* 6 = 1(Element ID) + 1(Length) + 3 (OUI) + 1(OUI Type) */
diff --git a/drivers/staging/rtl8188eu/core/rtw_io.c b/drivers/staging/rtl8188eu/core/rtw_io.c
index ff0398fca52b..7530532b3ff0 100644
--- a/drivers/staging/rtl8188eu/core/rtw_io.c
+++ b/drivers/staging/rtl8188eu/core/rtw_io.c
@@ -59,10 +59,8 @@ u8 _rtw_read8(struct adapter *adapter, u32 addr)
struct intf_hdl *pintfhdl = &(pio_priv->intf);
u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
- _func_enter_;
_read8 = pintfhdl->io_ops._read8;
r_val = _read8(pintfhdl, addr);
- _func_exit_;
return r_val;
}
@@ -72,11 +70,9 @@ u16 _rtw_read16(struct adapter *adapter, u32 addr)
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
-_func_enter_;
_read16 = pintfhdl->io_ops._read16;
r_val = _read16(pintfhdl, addr);
-_func_exit_;
return r_val;
}
@@ -86,11 +82,9 @@ u32 _rtw_read32(struct adapter *adapter, u32 addr)
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
-_func_enter_;
_read32 = pintfhdl->io_ops._read32;
r_val = _read32(pintfhdl, addr);
-_func_exit_;
return r_val;
}
@@ -100,11 +94,9 @@ int _rtw_write8(struct adapter *adapter, u32 addr, u8 val)
struct intf_hdl *pintfhdl = &(pio_priv->intf);
int (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
int ret;
- _func_enter_;
_write8 = pintfhdl->io_ops._write8;
ret = _write8(pintfhdl, addr, val);
- _func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -115,11 +107,9 @@ int _rtw_write16(struct adapter *adapter, u32 addr, u16 val)
struct intf_hdl *pintfhdl = &(pio_priv->intf);
int (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
int ret;
- _func_enter_;
_write16 = pintfhdl->io_ops._write16;
ret = _write16(pintfhdl, addr, val);
- _func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -129,11 +119,9 @@ int _rtw_write32(struct adapter *adapter, u32 addr, u32 val)
struct intf_hdl *pintfhdl = &(pio_priv->intf);
int (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
int ret;
- _func_enter_;
_write32 = pintfhdl->io_ops._write32;
ret = _write32(pintfhdl, addr, val);
- _func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -144,11 +132,9 @@ int _rtw_writeN(struct adapter *adapter, u32 addr , u32 length , u8 *pdata)
struct intf_hdl *pintfhdl = (struct intf_hdl *)(&(pio_priv->intf));
int (*_writeN)(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata);
int ret;
- _func_enter_;
_writeN = pintfhdl->io_ops._writeN;
ret = _writeN(pintfhdl, addr, length, pdata);
- _func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -158,11 +144,9 @@ int _rtw_write8_async(struct adapter *adapter, u32 addr, u8 val)
struct intf_hdl *pintfhdl = &(pio_priv->intf);
int (*_write8_async)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
int ret;
- _func_enter_;
_write8_async = pintfhdl->io_ops._write8_async;
ret = _write8_async(pintfhdl, addr, val);
- _func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -174,10 +158,8 @@ int _rtw_write16_async(struct adapter *adapter, u32 addr, u16 val)
int (*_write16_async)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
int ret;
-_func_enter_;
_write16_async = pintfhdl->io_ops._write16_async;
ret = _write16_async(pintfhdl, addr, val);
-_func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -189,10 +171,8 @@ int _rtw_write32_async(struct adapter *adapter, u32 addr, u32 val)
int (*_write32_async)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
int ret;
-_func_enter_;
_write32_async = pintfhdl->io_ops._write32_async;
ret = _write32_async(pintfhdl, addr, val);
-_func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -203,7 +183,6 @@ void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
- _func_enter_;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
("rtw_read_mem:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
@@ -212,7 +191,6 @@ void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
}
_read_mem = pintfhdl->io_ops._read_mem;
_read_mem(pintfhdl, addr, cnt, pmem);
- _func_exit_;
}
void _rtw_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
@@ -221,13 +199,11 @@ void _rtw_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
- _func_enter_;
_write_mem = pintfhdl->io_ops._write_mem;
_write_mem(pintfhdl, addr, cnt, pmem);
- _func_exit_;
}
void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
@@ -236,7 +212,6 @@ void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
- _func_enter_;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
@@ -249,7 +224,6 @@ void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
_read_port(pintfhdl, addr, cnt, pmem);
- _func_exit_;
}
void _rtw_read_port_cancel(struct adapter *adapter)
@@ -271,13 +245,11 @@ u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
struct intf_hdl *pintfhdl = &(pio_priv->intf);
u32 ret = _SUCCESS;
- _func_enter_;
_write_port = pintfhdl->io_ops._write_port;
ret = _write_port(pintfhdl, addr, cnt, pmem);
- _func_exit_;
return ret;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index e25b39b97d9e..f1398ab01d7b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -42,7 +42,6 @@ u8 rtw_validate_ssid(struct ndis_802_11_ssid *ssid)
u8 i;
u8 ret = true;
-_func_enter_;
if (ssid->SsidLength > 32) {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("ssid length >32\n"));
@@ -61,8 +60,6 @@ _func_enter_;
exit:
-_func_exit_;
-
return ret;
}
@@ -74,11 +71,10 @@ u8 rtw_do_join(struct adapter *padapter)
struct __queue *queue = &(pmlmepriv->scanned_queue);
u8 ret = _SUCCESS;
-_func_enter_;
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("\n rtw_do_join: phead = %p; plist = %p\n\n\n", phead, plist));
@@ -170,7 +166,6 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
@@ -181,7 +176,6 @@ u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
u32 cur_time = 0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
DBG_88E_LEVEL(_drv_info_, "set bssid:%pM\n", bssid);
@@ -205,7 +199,7 @@ _func_enter_;
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
- if (_rtw_memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, ETH_ALEN)) {
+ if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, ETH_ALEN)) {
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)
goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */
} else {
@@ -257,7 +251,6 @@ exit:
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
("rtw_set_802_11_bssid: status=%d\n", status));
-_func_exit_;
return status;
}
@@ -270,7 +263,6 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *pnetwork = &pmlmepriv->cur_network;
-_func_enter_;
DBG_88E_LEVEL(_drv_info_, "set ssid [%s] fw_state=0x%08x\n",
ssid->Ssid, get_fwstate(pmlmepriv));
@@ -285,18 +277,17 @@ _func_enter_;
spin_lock_bh(&pmlmepriv->lock);
DBG_88E("Set SSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
goto handle_tkip_countermeasure;
- } else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
+ else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true)
goto release_mlme_lock;
- }
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
("set_ssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
if ((pmlmepriv->assoc_ssid.SsidLength == ssid->SsidLength) &&
- (_rtw_memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid, ssid->SsidLength))) {
+ (!memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid, ssid->SsidLength))) {
if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)) {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
("Set SSID is the same ssid, fw_state = 0x%08x\n",
@@ -357,11 +348,10 @@ handle_tkip_countermeasure:
memcpy(&pmlmepriv->assoc_ssid, ssid, sizeof(struct ndis_802_11_ssid));
pmlmepriv->assoc_by_bssid = false;
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
pmlmepriv->to_join = true;
- } else {
+ else
status = rtw_do_join(padapter);
- }
release_mlme_lock:
spin_unlock_bh(&pmlmepriv->lock);
@@ -369,7 +359,6 @@ release_mlme_lock:
exit:
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
("-rtw_set_802_11_ssid: status =%d\n", status));
-_func_exit_;
return status;
}
@@ -380,7 +369,6 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
struct wlan_network *cur_network = &pmlmepriv->cur_network;
enum ndis_802_11_network_infra *pold_state = &(cur_network->network.InfrastructureMode);
-_func_enter_;
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_notice_,
("+rtw_set_802_11_infrastructure_mode: old =%d new =%d fw_state = 0x%08x\n",
@@ -411,7 +399,7 @@ _func_enter_;
if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) {
if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
- rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have chked whether issue dis-assoc_cmd or not */
+ rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have checked whether issue dis-assoc_cmd or not */
}
*pold_state = networktype;
@@ -438,7 +426,6 @@ _func_enter_;
spin_unlock_bh(&pmlmepriv->lock);
}
-_func_exit_;
return true;
}
@@ -448,7 +435,6 @@ u8 rtw_set_802_11_disassociate(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
spin_lock_bh(&pmlmepriv->lock);
@@ -464,7 +450,6 @@ _func_enter_;
spin_unlock_bh(&pmlmepriv->lock);
-_func_exit_;
return true;
}
@@ -474,7 +459,6 @@ u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_s
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 res = true;
-_func_enter_;
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("+rtw_set_802_11_bssid_list_scan(), fw_state =%x\n", get_fwstate(pmlmepriv)));
@@ -494,11 +478,12 @@ _func_enter_;
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_bssid_list_scan fail since fw_state = %x\n", get_fwstate(pmlmepriv)));
res = true;
- if (check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)) == true) {
+ if (check_fwstate(pmlmepriv,
+ (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)) == true)
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###_FW_UNDER_SURVEY|_FW_UNDER_LINKING\n\n"));
- } else {
+ else
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###pmlmepriv->sitesurveyctrl.traffic_busy == true\n\n"));
- }
+
} else {
if (rtw_is_scan_deny(padapter)) {
DBG_88E(FUNC_ADPT_FMT": scan deny\n", FUNC_ADPT_ARG(padapter));
@@ -514,7 +499,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return res;
}
@@ -525,7 +509,6 @@ u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum ndis_802_11
int res;
u8 ret;
-_func_enter_;
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_802_11_auth.mode(): mode =%x\n", authmode));
@@ -545,7 +528,6 @@ _func_enter_;
else
ret = false;
-_func_exit_;
return ret;
}
@@ -556,7 +538,6 @@ u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep)
struct security_priv *psecuritypriv = &(padapter->securitypriv);
u8 ret = _SUCCESS;
-_func_enter_;
keyid = wep->KeyIndex & 0x3fffffff;
@@ -581,7 +562,7 @@ _func_enter_;
break;
}
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- ("rtw_set_802_11_add_wep:befor memcpy, wep->KeyLength = 0x%x wep->KeyIndex = 0x%x keyid =%x\n",
+ ("rtw_set_802_11_add_wep:before memcpy, wep->KeyLength = 0x%x wep->KeyIndex = 0x%x keyid =%x\n",
wep->KeyLength, wep->KeyIndex, keyid));
memcpy(&(psecuritypriv->dot11DefKey[keyid].skey[0]), &(wep->KeyMaterial), wep->KeyLength);
@@ -611,7 +592,6 @@ _func_enter_;
if (res == _FAIL)
ret = false;
exit:
-_func_exit_;
return ret;
}
@@ -619,7 +599,6 @@ u8 rtw_set_802_11_remove_wep(struct adapter *padapter, u32 keyindex)
{
u8 ret = _SUCCESS;
-_func_enter_;
if (keyindex >= 0x80000000 || padapter == NULL) {
ret = false;
goto exit;
@@ -638,7 +617,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return ret;
}
@@ -651,7 +629,6 @@ u8 rtw_set_802_11_add_key(struct adapter *padapter, struct ndis_802_11_key *key)
u8 bgrouptkey = false;/* can be removed later */
u8 ret = _SUCCESS;
-_func_enter_;
if (((key->KeyIndex & 0x80000000) == 0) && ((key->KeyIndex & 0x40000000) > 0)) {
/* It is invalid to clear bit 31 and set bit 30. If the miniport driver encounters this combination, */
@@ -992,7 +969,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return ret;
}
@@ -1004,7 +980,6 @@ u8 rtw_set_802_11_remove_key(struct adapter *padapter, struct ndis_802_11_remove
u8 keyIndex = (u8)key->KeyIndex & 0x03;
u8 ret = _SUCCESS;
-_func_enter_;
if ((key->KeyIndex & 0xbffffffc) > 0) {
ret = _FAIL;
@@ -1032,7 +1007,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return ret;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index afac53709843..42b41ab1bce1 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -34,7 +34,7 @@ void BlinkTimerCallback(void *data)
if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
return;
- _set_workitem(&(pLed->BlinkWorkItem));
+ schedule_work(&(pLed->BlinkWorkItem));
}
/* */
@@ -80,7 +80,7 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed, enum LED_PIN_8
_init_timer(&(pLed->BlinkTimer), padapter->pnetdev, BlinkTimerCallback, pLed);
- _init_workitem(&(pLed->BlinkWorkItem), BlinkWorkItemCallback, pLed);
+ INIT_WORK(&(pLed->BlinkWorkItem), BlinkWorkItemCallback);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index c7382303088f..769d4ddc6754 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -31,11 +31,12 @@
#include <wlan_bssdef.h>
#include <rtw_ioctl_set.h>
#include <usb_osintf.h>
+#include <linux/vmalloc.h>
extern unsigned char MCS_rate_2R[16];
extern unsigned char MCS_rate_1R[16];
-int _rtw_init_mlme_priv (struct adapter *padapter)
+int _rtw_init_mlme_priv(struct adapter *padapter)
{
int i;
u8 *pbuf;
@@ -43,9 +44,7 @@ int _rtw_init_mlme_priv (struct adapter *padapter)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int res = _SUCCESS;
-_func_enter_;
-
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
pmlmepriv->nic_hdl = (u8 *)padapter;
@@ -62,7 +61,7 @@ _func_enter_;
_rtw_memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
- pbuf = rtw_zvmalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
+ pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
if (pbuf == NULL) {
res = _FAIL;
@@ -87,13 +86,10 @@ _func_enter_;
rtw_init_mlme_timer(padapter);
exit:
-
-_func_exit_;
-
return res;
}
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
{
kfree(*ppie);
@@ -122,24 +118,18 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
}
#endif
-void _rtw_free_mlme_priv (struct mlme_priv *pmlmepriv)
+void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
-_func_enter_;
-
rtw_free_mlme_priv_ie_data(pmlmepriv);
if (pmlmepriv) {
- if (pmlmepriv->free_bss_buf) {
- rtw_vmfree(pmlmepriv->free_bss_buf, MAX_BSS_CNT * sizeof(struct wlan_network));
- }
+ if (pmlmepriv->free_bss_buf)
+ vfree(pmlmepriv->free_bss_buf);
}
-_func_exit_;
}
int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork)
{
-_func_enter_;
-
if (pnetwork == NULL)
goto exit;
@@ -150,9 +140,6 @@ _func_enter_;
spin_unlock_bh(&queue->lock);
exit:
-
-_func_exit_;
-
return _SUCCESS;
}
@@ -160,22 +147,18 @@ struct wlan_network *_rtw_dequeue_network(struct __queue *queue)
{
struct wlan_network *pnetwork;
-_func_enter_;
-
spin_lock_bh(&queue->lock);
if (_rtw_queue_empty(queue)) {
pnetwork = NULL;
} else {
- pnetwork = LIST_CONTAINOR(get_next(&queue->queue), struct wlan_network, list);
+ pnetwork = container_of((&queue->queue)->next, struct wlan_network, list);
rtw_list_delete(&(pnetwork->list));
}
spin_unlock_bh(&queue->lock);
-_func_exit_;
-
return pnetwork;
}
@@ -185,17 +168,15 @@ struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *f
struct __queue *free_queue = &pmlmepriv->free_bss_pool;
struct list_head *plist = NULL;
-_func_enter_;
-
spin_lock_bh(&free_queue->lock);
if (_rtw_queue_empty(free_queue) == true) {
pnetwork = NULL;
goto exit;
}
- plist = get_next(&(free_queue->queue));
+ plist = free_queue->queue.next;
- pnetwork = LIST_CONTAINOR(plist , struct wlan_network, list);
+ pnetwork = container_of(plist , struct wlan_network, list);
rtw_list_delete(&pnetwork->list);
@@ -211,8 +192,6 @@ _func_enter_;
exit:
spin_unlock_bh(&free_queue->lock);
-_func_exit_;
-
return pnetwork;
}
@@ -222,13 +201,11 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv , struct wlan_network *pnetwo
u32 lifetime = SCANQUEUE_LIFETIME;
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
-_func_enter_;
-
if (pnetwork == NULL)
- goto exit;
+ return;
if (pnetwork->fixed)
- goto exit;
+ return;
curr_time = jiffies;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
@@ -236,33 +213,26 @@ _func_enter_;
if (!isfreeall) {
delta_time = (curr_time - pnetwork->last_scanned)/HZ;
if (delta_time < lifetime)/* unit:sec */
- goto exit;
+ return;
}
spin_lock_bh(&free_queue->lock);
rtw_list_delete(&(pnetwork->list));
rtw_list_insert_tail(&(pnetwork->list), &(free_queue->queue));
pmlmepriv->num_of_scanned--;
spin_unlock_bh(&free_queue->lock);
-
-exit:
-_func_exit_;
}
void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork)
{
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
-_func_enter_;
if (pnetwork == NULL)
- goto exit;
+ return;
if (pnetwork->fixed)
- goto exit;
+ return;
rtw_list_delete(&(pnetwork->list));
rtw_list_insert_tail(&(pnetwork->list), get_list_head(free_queue));
pmlmepriv->num_of_scanned--;
-exit:
-
-_func_exit_;
}
/*
@@ -276,24 +246,22 @@ struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr)
struct wlan_network *pnetwork = NULL;
u8 zero_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
-_func_enter_;
- if (_rtw_memcmp(zero_addr, addr, ETH_ALEN)) {
+ if (!memcmp(zero_addr, addr, ETH_ALEN)) {
pnetwork = NULL;
goto exit;
}
phead = get_list_head(scanned_queue);
- plist = get_next(phead);
+ plist = phead->next;
while (plist != phead) {
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network , list);
- if (_rtw_memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN) == true)
+ pnetwork = container_of(plist, struct wlan_network , list);
+ if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN))
break;
- plist = get_next(plist);
+ plist = plist->next;
}
if (plist == phead)
pnetwork = NULL;
exit:
-_func_exit_;
return pnetwork;
}
@@ -305,29 +273,24 @@ void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *scanned_queue = &pmlmepriv->scanned_queue;
-_func_enter_;
-
-
spin_lock_bh(&scanned_queue->lock);
phead = get_list_head(scanned_queue);
- plist = get_next(phead);
+ plist = phead->next;
while (rtw_end_of_queue_search(phead, plist) == false) {
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
- plist = get_next(plist);
+ plist = plist->next;
_rtw_free_network(pmlmepriv, pnetwork, isfreeall);
}
spin_unlock_bh(&scanned_queue->lock);
-_func_exit_;
}
int rtw_if_up(struct adapter *padapter)
{
int res;
-_func_enter_;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
(check_fwstate(&padapter->mlmepriv, _FW_LINKED) == false)) {
@@ -338,8 +301,6 @@ _func_enter_;
} else {
res = true;
}
-
-_func_exit_;
return res;
}
@@ -347,14 +308,12 @@ void rtw_generate_random_ibss(u8 *pibss)
{
u32 curtime = jiffies;
-_func_enter_;
pibss[0] = 0x02; /* in ad-hoc mode bit1 must set to 1 */
pibss[1] = 0x11;
pibss[2] = 0x87;
pibss[3] = (u8)(curtime & 0xff);/* p[0]; */
pibss[4] = (u8)((curtime>>8) & 0xff);/* p[1]; */
pibss[5] = (u8)((curtime>>16) & 0xff);/* p[2]; */
-_func_exit_;
return;
}
@@ -367,11 +326,9 @@ u8 *rtw_get_capability_from_ie(u8 *ie)
u16 rtw_get_capability(struct wlan_bssid_ex *bss)
{
__le16 val;
-_func_enter_;
memcpy((u8 *)&val, rtw_get_capability_from_ie(bss->IEs), 2);
-_func_exit_;
return le16_to_cpu(val);
}
@@ -385,46 +342,34 @@ u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
return ie + 8;
}
-int rtw_init_mlme_priv (struct adapter *padapter)/* struct mlme_priv *pmlmepriv) */
+int rtw_init_mlme_priv(struct adapter *padapter)
{
int res;
-_func_enter_;
res = _rtw_init_mlme_priv(padapter);/* (pmlmepriv); */
-_func_exit_;
return res;
}
-void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv)
+void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
-_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_mlme_priv\n"));
- _rtw_free_mlme_priv (pmlmepriv);
-_func_exit_;
+ _rtw_free_mlme_priv(pmlmepriv);
}
static struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
{
- struct wlan_network *pnetwork;
-_func_enter_;
- pnetwork = _rtw_alloc_network(pmlmepriv);
-_func_exit_;
- return pnetwork;
+ return _rtw_alloc_network(pmlmepriv);
}
static void rtw_free_network_nolock(struct mlme_priv *pmlmepriv,
struct wlan_network *pnetwork)
{
-_func_enter_;
_rtw_free_network_nolock(pmlmepriv, pnetwork);
-_func_exit_;
}
void rtw_free_network_queue(struct adapter *dev, u8 isfreeall)
{
-_func_enter_;
_rtw_free_network_queue(dev, isfreeall);
-_func_exit_;
}
/*
@@ -458,7 +403,7 @@ int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork)
static int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b)
{
return (a->Ssid.SsidLength == b->Ssid.SsidLength) &&
- _rtw_memcmp(a->Ssid.Ssid, b->Ssid.Ssid, a->Ssid.SsidLength);
+ !memcmp(a->Ssid.Ssid, b->Ssid.Ssid, a->Ssid.SsidLength);
}
int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
@@ -466,7 +411,6 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
u16 s_cap, d_cap;
__le16 le_scap, le_dcap;
-_func_enter_;
memcpy((u8 *)&le_scap, rtw_get_capability_from_ie(src->IEs), 2);
memcpy((u8 *)&le_dcap, rtw_get_capability_from_ie(dst->IEs), 2);
@@ -474,11 +418,9 @@ _func_enter_;
s_cap = le16_to_cpu(le_scap);
d_cap = le16_to_cpu(le_dcap);
-_func_exit_;
-
return ((src->Ssid.SsidLength == dst->Ssid.SsidLength) &&
- ((_rtw_memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN)) == true) &&
- ((_rtw_memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength)) == true) &&
+ ((!memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN)) == true) &&
+ ((!memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength)) == true) &&
((s_cap & WLAN_CAPABILITY_IBSS) ==
(d_cap & WLAN_CAPABILITY_IBSS)) &&
((s_cap & WLAN_CAPABILITY_BSS) ==
@@ -491,25 +433,23 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
struct wlan_network *pwlan = NULL;
struct wlan_network *oldest = NULL;
-_func_enter_;
phead = get_list_head(scanned_queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pwlan = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pwlan = container_of(plist, struct wlan_network, list);
if (!pwlan->fixed) {
if (oldest == NULL || time_after(oldest->last_scanned, pwlan->last_scanned))
oldest = pwlan;
}
- plist = get_next(plist);
+ plist = plist->next;
}
-_func_exit_;
return oldest;
}
@@ -522,7 +462,6 @@ void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
u8 sq_final;
long rssi_final;
-_func_enter_;
rtw_hal_antdiv_rssi_compared(padapter, dst, src); /* this will update src.Rssi, need consider again */
/* The rule below is 1/5 for sample value, 4/5 for history value */
@@ -553,22 +492,18 @@ _func_enter_;
dst->PhyInfo.SignalQuality = sq_final;
dst->Rssi = rssi_final;
-_func_exit_;
}
static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex *pnetwork)
{
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
-_func_enter_;
-
if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) &&
(is_same_network(&(pmlmepriv->cur_network.network), pnetwork))) {
update_network(&(pmlmepriv->cur_network.network), pnetwork, adapter, true);
rtw_update_protection(adapter, (pmlmepriv->cur_network.network.IEs) + sizeof(struct ndis_802_11_fixed_ie),
pmlmepriv->cur_network.network.IELength);
}
-_func_exit_;
}
/*
@@ -583,24 +518,22 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
struct wlan_network *pnetwork = NULL;
struct wlan_network *oldest = NULL;
-_func_enter_;
-
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (is_same_network(&(pnetwork->network), target))
break;
if ((oldest == ((struct wlan_network *)0)) ||
time_after(oldest->last_scanned, pnetwork->last_scanned))
oldest = pnetwork;
- plist = get_next(plist);
+ plist = plist->next;
}
/* If we didn't find a match, then get a new network slot to initialize
* with this beacon's information */
@@ -663,27 +596,26 @@ _func_enter_;
exit:
spin_unlock_bh(&queue->lock);
-_func_exit_;
}
static void rtw_add_network(struct adapter *adapter,
struct wlan_bssid_ex *pnetwork)
{
-_func_enter_;
#if defined(CONFIG_88EU_P2P)
rtw_wlan_bssid_ex_remove_p2p_attr(pnetwork, P2P_ATTR_GROUP_INFO);
#endif
update_current_network(adapter, pnetwork);
rtw_update_scanned_network(adapter, pnetwork);
-_func_exit_;
}
-/* select the desired network based on the capability of the (i)bss. */
-/* check items: (1) security */
-/* (2) network_type */
-/* (3) WMM */
-/* (4) HT */
-/* (5) others */
+/*
+ * select the desired network based on the capability of the (i)bss.
+ * check items: (1) security
+ * (2) network_type
+ * (3) WMM
+ * (4) HT
+ * (5) others
+ */
static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwork)
{
struct security_priv *psecuritypriv = &adapter->securitypriv;
@@ -728,9 +660,7 @@ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *
/* TODO: Perry: For Power Management */
void rtw_atimdone_event_callback(struct adapter *adapter , u8 *pbuf)
{
-_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_evet\n"));
-_func_exit_;
return;
}
@@ -741,8 +671,6 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
struct wlan_bssid_ex *pnetwork;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
-_func_enter_;
-
pnetwork = (struct wlan_bssid_ex *)pbuf;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_survey_event_callback, ssid=%s\n", pnetwork->Ssid.Ssid));
@@ -756,7 +684,7 @@ _func_enter_;
/* update IBSS_network 's timestamp */
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) {
- if (_rtw_memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) {
+ if (!memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) {
struct wlan_network *ibss_wlan = NULL;
memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8);
@@ -781,20 +709,14 @@ _func_enter_;
exit:
spin_unlock_bh(&pmlmepriv->lock);
-
-_func_exit_;
-
return;
}
-
-
void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
{
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
struct mlme_ext_priv *pmlmeext;
-_func_enter_;
spin_lock_bh(&pmlmepriv->lock);
if (pmlmepriv->wps_probe_req_ie) {
@@ -884,7 +806,6 @@ _func_enter_;
pmlmeext = &adapter->mlmeextpriv;
if (pmlmeext->sitesurvey_res.bss_cnt == 0)
rtw_hal_sreset_reset(adapter);
-_func_exit_;
}
void rtw_dummy_event_callback(struct adapter *adapter , u8 *pbuf)
@@ -901,17 +822,15 @@ static void free_scanqueue(struct mlme_priv *pmlmepriv)
struct __queue *scan_queue = &pmlmepriv->scanned_queue;
struct list_head *plist, *phead, *ptemp;
-_func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+free_scanqueue\n"));
spin_lock_bh(&scan_queue->lock);
spin_lock_bh(&free_queue->lock);
phead = get_list_head(scan_queue);
- plist = get_next(phead);
+ plist = phead->next;
while (plist != phead) {
- ptemp = get_next(plist);
+ ptemp = plist->next;
rtw_list_delete(plist);
rtw_list_insert_tail(plist, &free_queue->queue);
plist = ptemp;
@@ -920,8 +839,6 @@ _func_enter_;
spin_unlock_bh(&free_queue->lock);
spin_unlock_bh(&scan_queue->lock);
-
-_func_exit_;
}
/*
@@ -934,8 +851,6 @@ void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
struct sta_priv *pstapriv = &adapter->stapriv;
struct wlan_network *tgt_network = &pmlmepriv->cur_network;
-_func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_free_assoc_resources\n"));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
("tgt_network->network.MacAddress=%pM ssid=%s\n",
@@ -979,7 +894,6 @@ _func_enter_;
if (lock_scanned_queue)
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
pmlmepriv->key_mask = 0;
-_func_exit_;
}
/*
@@ -989,8 +903,6 @@ void rtw_indicate_connect(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_connect\n"));
pmlmepriv->to_join = false;
@@ -1008,7 +920,6 @@ _func_enter_;
rtw_set_scan_deny(padapter, 3000);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("-rtw_indicate_connect: fw_state=0x%08x\n", get_fwstate(pmlmepriv)));
-_func_exit_;
}
/*
@@ -1018,7 +929,6 @@ void rtw_indicate_disconnect(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_disconnect\n"));
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING | WIFI_UNDER_WPS);
@@ -1038,8 +948,6 @@ _func_enter_;
p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_DISCONNECT, 1);
-
-_func_exit_;
}
inline void rtw_indicate_scan_done(struct adapter *padapter, bool aborted)
@@ -1100,9 +1008,12 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
_rtw_memset((u8 *)&psta->dot11txpn, 0, sizeof(union pn48));
_rtw_memset((u8 *)&psta->dot11rxpn, 0, sizeof(union pn48));
}
- /* Commented by Albert 2012/07/21 */
- /* When doing the WPS, the wps_ie_len won't equal to 0 */
- /* And the Wi-Fi driver shouldn't allow the data packet to be tramsmitted. */
+ /*
+ * Commented by Albert 2012/07/21
+ * When doing the WPS, the wps_ie_len won't equal to 0
+ * And the Wi-Fi driver shouldn't allow the data
+ * packet to be tramsmitted.
+ */
if (padapter->securitypriv.wps_ie_len != 0) {
psta->ieee8021x_blocked = true;
padapter->securitypriv.wps_ie_len = 0;
@@ -1206,8 +1117,6 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
struct wlan_network *pcur_wlan = NULL, *ptarget_wlan = NULL;
unsigned int the_same_macaddr = false;
-_func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("joinbss event call back received with res=%d\n", pnetwork->join_res));
rtw_get_encrypt_decrypt_from_registrypriv(adapter);
@@ -1218,12 +1127,12 @@ _func_enter_;
else
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ rtw_joinbss_event_callback for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
- the_same_macaddr = _rtw_memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
+ the_same_macaddr = !memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
pnetwork->network.Length = get_wlan_bssid_ex_sz(&pnetwork->network);
if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n ***joinbss_evt_callback return a wrong bss ***\n\n"));
- goto ignore_nolock;
+ return;
}
spin_lock_bh(&pmlmepriv->lock);
@@ -1319,27 +1228,21 @@ _func_enter_;
ignore_joinbss_callback:
spin_unlock_bh(&pmlmepriv->lock);
-ignore_nolock:
-_func_exit_;
}
void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf)
{
struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
-_func_enter_;
-
mlmeext_joinbss_event_callback(adapter, pnetwork->join_res);
rtw_os_xmit_schedule(adapter);
-
-_func_exit_;
}
static u8 search_max_mac_id(struct adapter *padapter)
{
u8 mac_id;
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
u8 aid;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1347,7 +1250,7 @@ static u8 search_max_mac_id(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
for (aid = (pstapriv->max_num_sta); aid > 0; aid--) {
if (pstapriv->sta_aid[aid-1] != NULL)
@@ -1388,19 +1291,17 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
struct wlan_network *cur_network = &(pmlmepriv->cur_network);
struct wlan_network *ptarget_wlan = NULL;
-_func_enter_;
-
if (rtw_access_ctrl(adapter, pstassoc->macaddr) == false)
return;
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
if (psta) {
ap_sta_info_defer_update(adapter, psta);
rtw_stassoc_hw_rpt(adapter, psta);
}
- goto exit;
+ return;
}
#endif
/* for AD-HOC mode */
@@ -1408,12 +1309,12 @@ _func_enter_;
if (psta != NULL) {
/* the sta have been in sta_info_queue => do nothing */
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error: rtw_stassoc_event_callback: sta has been in sta_hash_queue\n"));
- goto exit; /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */
+ return; /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */
}
psta = rtw_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr);
if (psta == NULL) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't alloc sta_info when rtw_stassoc_event_callback\n"));
- goto exit;
+ return;
}
/* to do: init sta_info variable */
psta->qos_option = 0;
@@ -1440,8 +1341,6 @@ _func_enter_;
}
spin_unlock_bh(&pmlmepriv->lock);
mlmeext_sta_add_event_callback(adapter, psta);
-exit:
-_func_exit_;
}
void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
@@ -1456,8 +1355,6 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
struct sta_priv *pstapriv = &adapter->stapriv;
struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
-_func_enter_;
-
psta = rtw_get_stainfo(&adapter->stapriv, pstadel->macaddr);
if (psta)
mac_id = psta->mac_id;
@@ -1541,14 +1438,11 @@ _func_enter_;
}
}
spin_unlock_bh(&pmlmepriv->lock);
-_func_exit_;
}
void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
{
-_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_cpwm_event_callback !!!\n"));
-_func_exit_;
}
/*
@@ -1560,8 +1454,6 @@ void _rtw_join_timeout_handler (struct adapter *adapter)
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
int do_join_r;
-_func_enter_;
-
DBG_88E("%s, fw_state=%x\n", __func__, get_fwstate(pmlmepriv));
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
@@ -1592,7 +1484,6 @@ _func_enter_;
free_scanqueue(pmlmepriv);/* */
}
spin_unlock_bh(&pmlmepriv->lock);
-_func_exit_;
}
/*
@@ -1658,14 +1549,12 @@ void rtw_dynamic_check_timer_handlder(struct adapter *adapter)
/* expire NAT2.5 entry */
nat25_db_expire(adapter);
- if (adapter->pppoe_connection_in_progress > 0) {
+ if (adapter->pppoe_connection_in_progress > 0)
adapter->pppoe_connection_in_progress--;
- }
/* due to rtw_dynamic_check_timer_handlder() is called every 2 seconds */
- if (adapter->pppoe_connection_in_progress > 0) {
+ if (adapter->pppoe_connection_in_progress > 0)
adapter->pppoe_connection_in_progress--;
- }
}
rcu_read_unlock();
@@ -1687,14 +1576,14 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
/* check bssid, if needed */
if (pmlmepriv->assoc_by_bssid) {
- if (!_rtw_memcmp(competitor->network.MacAddress, pmlmepriv->assoc_bssid, ETH_ALEN))
+ if (memcmp(competitor->network.MacAddress, pmlmepriv->assoc_bssid, ETH_ALEN))
goto exit;
}
/* check ssid, if needed */
- if (pmlmepriv->assoc_ssid.Ssid && pmlmepriv->assoc_ssid.SsidLength) {
+ if (pmlmepriv->assoc_ssid.SsidLength) {
if (competitor->network.Ssid.SsidLength != pmlmepriv->assoc_ssid.SsidLength ||
- _rtw_memcmp(competitor->network.Ssid.Ssid, pmlmepriv->assoc_ssid.Ssid, pmlmepriv->assoc_ssid.SsidLength) == false)
+ !memcmp(competitor->network.Ssid.Ssid, pmlmepriv->assoc_ssid.Ssid, pmlmepriv->assoc_ssid.SsidLength) == false)
goto exit;
}
@@ -1742,20 +1631,18 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
struct wlan_network *candidate = NULL;
u8 supp_ant_div = false;
-_func_enter_;
-
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
adapter = (struct adapter *)pmlmepriv->nic_hdl;
- pmlmepriv->pscanned = get_next(phead);
+ pmlmepriv->pscanned = phead->next;
while (!rtw_end_of_queue_search(phead, pmlmepriv->pscanned)) {
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
if (pnetwork == NULL) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork==NULL)\n", __func__));
ret = _FAIL;
goto exit;
}
- pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+ pmlmepriv->pscanned = pmlmepriv->pscanned->next;
rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
}
if (candidate == NULL) {
@@ -1792,9 +1679,6 @@ _func_enter_;
exit:
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
-_func_exit_;
-
return ret;
}
@@ -1805,8 +1689,6 @@ int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
struct cmd_priv *pcmdpriv = &(adapter->cmdpriv);
int res = _SUCCESS;
-_func_enter_;
-
pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL; /* try again */
@@ -1832,7 +1714,6 @@ _func_enter_;
psecuritypriv->dot11AuthAlgrthm));
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
exit:
-_func_exit_;
return res;
}
@@ -1845,7 +1726,6 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
int res = _SUCCESS;
-_func_enter_;
pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL; /* try again */
@@ -1914,7 +1794,6 @@ _func_enter_;
_rtw_init_listhead(&pcmd->list);
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
exit:
-_func_exit_;
return res;
}
@@ -1946,17 +1825,15 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
return ielength;
}
-/* */
-/* Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
-/* Added by Annie, 2006-05-07. */
-/* */
-/* Search by BSSID, */
-/* Return Value: */
-/* -1 :if there is no pre-auth key in the table */
-/* >= 0 :if there is pre-auth key, and return the entry id */
-/* */
-/* */
-
+/*
+ * Ported from 8185: IsInPreAuthKeyList().
+ * (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.)
+ * Added by Annie, 2006-05-07.
+ * Search by BSSID,
+ * Return Value:
+ * -1 :if there is no pre-auth key in the table
+ * >= 0 :if there is pre-auth key, and return the entry id
+ */
static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
{
struct security_priv *psecuritypriv = &Adapter->securitypriv;
@@ -1964,7 +1841,7 @@ static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
do {
if ((psecuritypriv->PMKIDList[i].bUsed) &&
- (_rtw_memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN) == true)) {
+ (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN))) {
break;
} else {
i++;
@@ -1973,11 +1850,9 @@ static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
} while (i < NUM_PMKID_CACHE);
- if (i == NUM_PMKID_CACHE) {
+ if (i == NUM_PMKID_CACHE)
i = -1;/* Could not find. */
- } else {
- /* There is one Pre-Authentication Key for the specific BSSID. */
- }
+
return i;
}
@@ -2018,8 +1893,6 @@ int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
uint ndisauthmode = psecuritypriv->ndisauthtype;
uint ndissecuritytype = psecuritypriv->ndisencryptstatus;
-_func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
("+rtw_restruct_sec_ie: ndisauthmode=%d ndissecuritytype=%d\n",
ndisauthmode, ndissecuritytype));
@@ -2052,9 +1925,6 @@ _func_enter_;
if (authmode == _WPA2_IE_ID_)
ielength = rtw_append_pmkid(adapter, iEntry, out_ie, ielength);
}
-
-_func_exit_;
-
return ielength;
}
@@ -2065,8 +1935,6 @@ void rtw_init_registrypriv_dev_network(struct adapter *adapter)
struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
u8 *myhwaddr = myid(peepriv);
-_func_enter_;
-
memcpy(pdev_network->MacAddress, myhwaddr, ETH_ALEN);
memcpy(&pdev_network->Ssid, &pregistrypriv->ssid, sizeof(struct ndis_802_11_ssid));
@@ -2077,8 +1945,6 @@ _func_enter_;
pdev_network->Configuration.FHConfig.HopPattern = 0;
pdev_network->Configuration.FHConfig.HopSet = 0;
pdev_network->Configuration.FHConfig.DwellTime = 0;
-
-_func_exit_;
}
void rtw_update_registrypriv_dev_network(struct adapter *adapter)
@@ -2089,8 +1955,6 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter)
struct security_priv *psecuritypriv = &adapter->securitypriv;
struct wlan_network *cur_network = &adapter->mlmepriv.cur_network;
-_func_enter_;
-
pdev_network->Privacy = (psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0); /* adhoc no 802.1x */
pdev_network->Rssi = 0;
@@ -2140,13 +2004,10 @@ _func_enter_;
/* notes: translate IELength & Length after assign the Length to cmdsz in createbss_cmd(); */
/* pdev_network->IELength = cpu_to_le32(sz); */
-_func_exit_;
}
void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter)
{
-_func_enter_;
-_func_exit_;
}
/* the function is at passive_level */
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 6f7e415ecb6c..3ed5941bedc3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -171,7 +171,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
{0x03}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */
};
-static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the conbination for max channel numbers */
+static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the combination for max channel numbers */
/*
* Search the @param channel_num in given @param channel_set
@@ -414,21 +414,21 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
}
}
-static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, union recv_frame *precv_frame)
+static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, struct recv_frame *precv_frame)
{
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
if (ptable->func) {
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
- if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- !_rtw_memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
+ memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
return;
ptable->func(padapter, precv_frame);
}
}
-void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
+void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
{
int index;
struct mlme_handler *ptable;
@@ -436,7 +436,7 @@ void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
#endif /* CONFIG_88EU_AP_MODE */
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
@@ -449,8 +449,8 @@ void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
}
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
- if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- !_rtw_memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
+ memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
return;
ptable = mlme_sta_tbl;
@@ -465,13 +465,15 @@ void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
if (psta != NULL) {
if (GetRetry(pframe)) {
- if (precv_frame->u.hdr.attrib.seq_num == psta->RxMgmtFrameSeqNum) {
+ if (precv_frame->attrib.seq_num ==
+ psta->RxMgmtFrameSeqNum) {
/* drop the duplicate management frame */
- DBG_88E("Drop duplicate management frame with seq_num=%d.\n", precv_frame->u.hdr.attrib.seq_num);
+ DBG_88E("Drop duplicate management frame with seq_num=%d.\n",
+ precv_frame->attrib.seq_num);
return;
}
}
- psta->RxMgmtFrameSeqNum = precv_frame->u.hdr.attrib.seq_num;
+ psta->RxMgmtFrameSeqNum = precv_frame->attrib.seq_num;
}
#ifdef CONFIG_88EU_AP_MODE
@@ -532,7 +534,7 @@ Following are the callback functions for each subtype of the management frames
*****************************************************************************/
-unsigned int OnProbeReq(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame)
{
unsigned int ielen;
unsigned char *p;
@@ -540,8 +542,8 @@ unsigned int OnProbeReq(struct adapter *padapter, union recv_frame *precv_frame)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint len = precv_frame->len;
u8 is_valid_p2p_probereq = false;
#ifdef CONFIG_88EU_P2P
@@ -596,7 +598,7 @@ _continue:
if (is_valid_p2p_probereq)
goto _issue_probersp;
- if ((ielen != 0 && !_rtw_memcmp((void *)(p+2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength)) ||
+ if ((ielen != 0 && memcmp((void *)(p+2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength)) ||
(ielen == 0 && pmlmeinfo->hidden_ssid_mode))
return _SUCCESS;
@@ -609,18 +611,18 @@ _issue_probersp:
return _SUCCESS;
}
-unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnProbeRsp(struct adapter *padapter, struct recv_frame *precv_frame)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
#endif
#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
if (pwdinfo->tx_prov_disc_info.benable) {
- if (_rtw_memcmp(pwdinfo->tx_prov_disc_info.peerIFAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
+ if (!memcmp(pwdinfo->tx_prov_disc_info.peerIFAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
pwdinfo->tx_prov_disc_info.benable = false;
issue_p2p_provision_request(padapter,
@@ -638,7 +640,7 @@ unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame)
} else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) {
if (pwdinfo->nego_req_info.benable) {
DBG_88E("[%s] P2P State is GONEGO ING!\n", __func__);
- if (_rtw_memcmp(pwdinfo->nego_req_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
+ if (!memcmp(pwdinfo->nego_req_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
pwdinfo->nego_req_info.benable = false;
issue_p2p_GO_request(padapter, pwdinfo->nego_req_info.peerDevAddr);
}
@@ -646,7 +648,7 @@ unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame)
} else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_INVITE_REQ)) {
if (pwdinfo->invitereq_info.benable) {
DBG_88E("[%s] P2P_STATE_TX_INVITE_REQ!\n", __func__);
- if (_rtw_memcmp(pwdinfo->invitereq_info.peer_macaddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
+ if (!memcmp(pwdinfo->invitereq_info.peer_macaddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
pwdinfo->invitereq_info.benable = false;
issue_p2p_invitation_request(padapter, pwdinfo->invitereq_info.peer_macaddr);
}
@@ -663,7 +665,7 @@ unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame)
return _SUCCESS;
}
-unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame)
{
int cam_idx;
struct sta_info *psta;
@@ -671,8 +673,8 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint len = precv_frame->len;
struct wlan_bssid_ex *pbss;
int ret = _SUCCESS;
@@ -681,7 +683,7 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
return _SUCCESS;
}
- if (_rtw_memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) {
+ if (!memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) {
if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
/* we should update current network before auth, or some IE is wrong */
pbss = (struct wlan_bssid_ex *)rtw_malloc(sizeof(struct wlan_bssid_ex));
@@ -753,7 +755,7 @@ _END_ONBEACON_:
return _SUCCESS;
}
-unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAuth(struct adapter *padapter, struct recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
unsigned int auth_mode, ie_len;
@@ -767,8 +769,8 @@ unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint len = precv_frame->len;
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
return _FAIL;
@@ -878,7 +880,7 @@ unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
goto auth_fail;
}
- if (_rtw_memcmp((void *)(p + 2), pstat->chg_txt, 128)) {
+ if (!memcmp((void *)(p + 2), pstat->chg_txt, 128)) {
pstat->state &= (~WIFI_FW_AUTH_STATE);
pstat->state |= WIFI_FW_AUTH_SUCCESS;
/* challenging txt is correct... */
@@ -926,20 +928,20 @@ auth_fail:
return _FAIL;
}
-unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAuthClient(struct adapter *padapter, struct recv_frame *precv_frame)
{
unsigned int seq, len, status, offset;
unsigned char *p;
unsigned int go2asoc = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint pkt_len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint pkt_len = precv_frame->len;
DBG_88E("%s\n", __func__);
/* check A1 matches or not */
- if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
+ if (memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
return _SUCCESS;
if (!(pmlmeinfo->state & WIFI_FW_AUTH_STATE))
@@ -1001,7 +1003,7 @@ authclnt_fail:
return _FAIL;
}
-unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
u16 capab_info;
@@ -1020,8 +1022,8 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint pkt_len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint pkt_len = precv_frame->len;
#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
u8 p2p_status_code = P2P_STATUS_SUCCESS;
@@ -1097,7 +1099,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
status = _STATS_FAILURE_;
} else {
/* check if ssid match */
- if (!_rtw_memcmp((void *)(p+2), cur->Ssid.Ssid, cur->Ssid.SsidLength))
+ if (memcmp((void *)(p+2), cur->Ssid.Ssid, cur->Ssid.SsidLength))
status = _STATS_FAILURE_;
if (ie_len != cur->Ssid.SsidLength)
@@ -1270,7 +1272,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
for (;;) {
p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
if (p != NULL) {
- if (_rtw_memcmp(p+2, WMM_IE, 6)) {
+ if (!memcmp(p+2, WMM_IE, 6)) {
pstat->flags |= WLAN_STA_WME;
pstat->qos_option = 1;
@@ -1470,7 +1472,7 @@ OnAssocReqFail:
return _FAIL;
}
-unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame)
{
uint i;
int res;
@@ -1480,13 +1482,13 @@ unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
/* struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); */
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint pkt_len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint pkt_len = precv_frame->len;
DBG_88E("%s\n", __func__);
/* check A1 matches or not */
- if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
+ if (memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
return _SUCCESS;
if (!(pmlmeinfo->state & (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE)))
@@ -1524,7 +1526,7 @@ unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- if (_rtw_memcmp(pIE->data, WMM_PARA_OUI, 6)) /* WMM */
+ if (!memcmp(pIE->data, WMM_PARA_OUI, 6)) /* WMM */
WMM_param_handler(padapter, pIE);
break;
case _HT_CAPABILITY_IE_: /* HT caps */
@@ -1560,19 +1562,20 @@ report_assoc_result:
return _SUCCESS;
}
-unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
{
unsigned short reason;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
#endif /* CONFIG_88EU_P2P */
/* check A3 */
- if (!(_rtw_memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network),
+ ETH_ALEN))
return _SUCCESS;
#ifdef CONFIG_88EU_P2P
@@ -1623,19 +1626,20 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
return _SUCCESS;
}
-unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame)
{
u16 reason;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
#endif /* CONFIG_88EU_P2P */
/* check A3 */
- if (!(_rtw_memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network),
+ ETH_ALEN))
return _SUCCESS;
#ifdef CONFIG_88EU_P2P
@@ -1685,18 +1689,18 @@ unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
return _SUCCESS;
}
-unsigned int OnAtim(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAtim(struct adapter *padapter, struct recv_frame *precv_frame)
{
DBG_88E("%s\n", __func__);
return _SUCCESS;
}
-unsigned int on_action_spct(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int on_action_spct(struct adapter *padapter, struct recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
u8 *frame_body = (u8 *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
u8 category;
u8 action;
@@ -1729,17 +1733,17 @@ exit:
return ret;
}
-unsigned int OnAction_qos(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction_qos(struct adapter *padapter, struct recv_frame *precv_frame)
{
return _SUCCESS;
}
-unsigned int OnAction_dls(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction_dls(struct adapter *padapter, struct recv_frame *precv_frame)
{
return _SUCCESS;
}
-unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction_back(struct adapter *padapter, struct recv_frame *precv_frame)
{
u8 *addr;
struct sta_info *psta = NULL;
@@ -1749,10 +1753,11 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
unsigned short tid, status, reason_code = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
struct sta_priv *pstapriv = &padapter->stapriv;
/* check RA matches or not */
- if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
+ if (memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe),
+ ETH_ALEN))/* for if1, sta/ap mode */
return _SUCCESS;
DBG_88E("%s\n", __func__);
@@ -1937,7 +1942,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
/* Commented by Albert 20110306 */
- /* According to the P2P Specification, the group negoitation request frame should contain 9 P2P attributes */
+ /* According to the P2P Specification, the group negotiation request frame should contain 9 P2P attributes */
/* 1. P2P Capability */
/* 2. Group Owner Intent */
/* 3. Configuration Timeout */
@@ -2280,7 +2285,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
/* Commented by Kurt 20120113 */
/* If some device wants to do p2p handshake without sending prov_disc_req */
/* We have to get peer_req_cm from here. */
- if (_rtw_memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
+ if (!memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
if (wps_devicepassword_id == WPS_DPID_USER_SPEC)
memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
@@ -2302,7 +2307,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
/* Commented by Albert 20100908 */
- /* According to the P2P Specification, the group negoitation response frame should contain 9 P2P attributes */
+ /* According to the P2P Specification, the group negotiation response frame should contain 9 P2P attributes */
/* 1. Status */
/* 2. P2P Capability */
/* 3. Group Owner Intent */
@@ -2604,7 +2609,7 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
/* Commented by Albert 20110306 */
- /* According to the P2P Specification, the group negoitation request frame should contain 5 P2P attributes */
+ /* According to the P2P Specification, the group negotiation request frame should contain 5 P2P attributes */
/* 1. Status */
/* 2. P2P Capability */
/* 3. Operating Channel */
@@ -2825,7 +2830,8 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
/* Channel Number */
p2pie[p2pielen++] = pwdinfo->invitereq_info.operating_ch; /* operating channel number */
- if (_rtw_memcmp(myid(&padapter->eeprompriv), pwdinfo->invitereq_info.go_bssid, ETH_ALEN)) {
+ if (!memcmp(myid(&padapter->eeprompriv),
+ pwdinfo->invitereq_info.go_bssid, ETH_ALEN)) {
/* P2P Group BSSID */
/* Type: */
p2pie[p2pielen++] = P2P_ATTR_GROUP_BSSID;
@@ -3260,7 +3266,7 @@ static u8 is_matched_in_profilelist(u8 *peermacaddr, struct profile_info *profil
for (i = 0; i < P2P_MAX_PERSISTENT_GROUP_NUM; i++, profileinfo++) {
DBG_88E("[%s] profileinfo_mac=%.2X %.2X %.2X %.2X %.2X %.2X\n", __func__,
profileinfo->peermac[0], profileinfo->peermac[1], profileinfo->peermac[2], profileinfo->peermac[3], profileinfo->peermac[4], profileinfo->peermac[5]);
- if (_rtw_memcmp(peermacaddr, profileinfo->peermac, ETH_ALEN)) {
+ if (!memcmp(peermacaddr, profileinfo->peermac, ETH_ALEN)) {
match_result = 1;
DBG_88E("[%s] Match!\n", __func__);
break;
@@ -3853,13 +3859,13 @@ exit:
#endif /* CONFIG_88EU_P2P */
-static s32 rtw_action_public_decache(union recv_frame *recv_frame, s32 token)
+static s32 rtw_action_public_decache(struct recv_frame *recv_frame, s32 token)
{
- struct adapter *adapter = recv_frame->u.hdr.adapter;
+ struct adapter *adapter = recv_frame->adapter;
struct mlme_ext_priv *mlmeext = &(adapter->mlmeextpriv);
- u8 *frame = recv_frame->u.hdr.rx_data;
- u16 seq_ctrl = ((recv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
- (recv_frame->u.hdr.attrib.frag_num & 0xf);
+ u8 *frame = recv_frame->rx_data;
+ u16 seq_ctrl = ((recv_frame->attrib.seq_num&0xffff) << 4) |
+ (recv_frame->attrib.frag_num & 0xf);
if (GetRetry(frame)) {
if (token >= 0) {
@@ -3885,14 +3891,14 @@ static s32 rtw_action_public_decache(union recv_frame *recv_frame, s32 token)
return _SUCCESS;
}
-static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
+static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
{
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
u8 *frame_body;
u8 dialogToken = 0;
#ifdef CONFIG_88EU_P2P
- struct adapter *padapter = precv_frame->u.hdr.adapter;
- uint len = precv_frame->u.hdr.len;
+ struct adapter *padapter = precv_frame->adapter;
+ uint len = precv_frame->len;
u8 *p2p_ie;
u32 p2p_ielen;
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
@@ -3939,7 +3945,8 @@ static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
/* Commented by Kurt 20120113 */
/* Get peer_dev_addr here if peer doesn't issue prov_disc frame. */
- if (_rtw_memcmp(pwdinfo->rx_prov_disc_info.peerDevAddr, empty_addr, ETH_ALEN))
+ if (!memcmp(pwdinfo->rx_prov_disc_info.peerDevAddr, empty_addr,
+ ETH_ALEN))
memcpy(pwdinfo->rx_prov_disc_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN);
result = process_p2p_group_negotation_req(pwdinfo, frame_body, len);
@@ -4021,7 +4028,7 @@ static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
_rtw_memset(&group_id, 0x00, sizeof(struct group_id_info));
rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, (u8 *)&group_id, &attr_contentlen);
if (attr_contentlen) {
- if (_rtw_memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ if (!memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
/* The p2p device sending this p2p invitation request wants this Wi-Fi device to be the persistent GO. */
rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_GO);
rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
@@ -4069,7 +4076,7 @@ static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
_rtw_memset(&group_id, 0x00, sizeof(struct group_id_info));
rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, (u8 *)&group_id, &attr_contentlen);
if (attr_contentlen) {
- if (_rtw_memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ if (!memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
/* In this case, the GO can't be myself. */
rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
@@ -4116,7 +4123,7 @@ static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
pwdinfo->invitereq_info.benable = false;
if (attr_content == P2P_STATUS_SUCCESS) {
- if (_rtw_memcmp(pwdinfo->invitereq_info.go_bssid, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ if (!memcmp(pwdinfo->invitereq_info.go_bssid, myid(&padapter->eeprompriv), ETH_ALEN)) {
rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
} else {
rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
@@ -4175,23 +4182,22 @@ static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
return _SUCCESS;
}
-static unsigned int on_action_public_vendor(union recv_frame *precv_frame)
+static unsigned int on_action_public_vendor(struct recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
- if (_rtw_memcmp(frame_body + 2, P2P_OUI, 4) == true) {
+ if (!memcmp(frame_body + 2, P2P_OUI, 4))
ret = on_action_public_p2p(precv_frame);
- }
return ret;
}
-static unsigned int on_action_public_default(union recv_frame *precv_frame, u8 action)
+static unsigned int on_action_public_default(struct recv_frame *precv_frame, u8 action)
{
unsigned int ret = _FAIL;
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
u8 token;
@@ -4206,15 +4212,15 @@ exit:
return ret;
}
-unsigned int on_action_public(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int on_action_public(struct adapter *padapter, struct recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
u8 category, action;
/* check RA matches or not */
- if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))
+ if (memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))
goto exit;
category = frame_body[0];
@@ -4235,30 +4241,30 @@ exit:
return ret;
}
-unsigned int OnAction_ht(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction_ht(struct adapter *padapter, struct recv_frame *precv_frame)
{
return _SUCCESS;
}
-unsigned int OnAction_wmm(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction_wmm(struct adapter *padapter, struct recv_frame *precv_frame)
{
return _SUCCESS;
}
-unsigned int OnAction_p2p(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_P2P
u8 *frame_body;
u8 category, OUI_Subtype;
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint len = precv_frame->len;
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
DBG_88E("%s\n", __func__);
/* check RA matches or not */
- if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
+ if (memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
return _SUCCESS;
frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
@@ -4290,13 +4296,13 @@ unsigned int OnAction_p2p(struct adapter *padapter, union recv_frame *precv_fram
return _SUCCESS;
}
-unsigned int OnAction(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction(struct adapter *padapter, struct recv_frame *precv_frame)
{
int i;
unsigned char category;
struct action_handler *ptable;
unsigned char *frame_body;
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
@@ -4310,7 +4316,7 @@ unsigned int OnAction(struct adapter *padapter, union recv_frame *precv_frame)
return _SUCCESS;
}
-unsigned int DoReserved(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int DoReserved(struct adapter *padapter, struct recv_frame *precv_frame)
{
return _SUCCESS;
}
@@ -4341,7 +4347,7 @@ struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
/****************************************************************************
-Following are some TX fuctions for WiFi MLME
+Following are some TX functions for WiFi MLME
*****************************************************************************/
@@ -4432,7 +4438,7 @@ s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmg
}
pxmitpriv->ack_tx = false;
- _exit_critical_mutex(&pxmitpriv->ack_tx_mutex, NULL);
+ mutex_unlock(&pxmitpriv->ack_tx_mutex);
return ret;
}
@@ -4995,7 +5001,7 @@ exit:
return ret;
}
-/* if psta == NULL, indiate we are station(client) now... */
+/* if psta == NULL, indicate we are station(client) now... */
void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status)
{
struct xmit_frame *pmgntframe;
@@ -5234,7 +5240,7 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
for (pbuf = ie + _BEACON_IE_OFFSET_;; pbuf += (ie_len + 2)) {
pbuf = rtw_get_ie(pbuf, _VENDOR_SPECIFIC_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if (pbuf && _rtw_memcmp(pbuf+2, WMM_PARA_IE, 6)) {
+ if (pbuf && !memcmp(pbuf+2, WMM_PARA_IE, 6)) {
memcpy(pframe, pbuf, ie_len+2);
pframe += (ie_len+2);
pattrib->pktlen += (ie_len+2);
@@ -5439,14 +5445,14 @@ void issue_assocreq(struct adapter *padapter)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- if ((_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4)) ||
- (_rtw_memcmp(pIE->data, WMM_OUI, 4)) ||
- (_rtw_memcmp(pIE->data, WPS_OUI, 4))) {
+ if ((!memcmp(pIE->data, RTW_WPA_OUI, 4)) ||
+ (!memcmp(pIE->data, WMM_OUI, 4)) ||
+ (!memcmp(pIE->data, WPS_OUI, 4))) {
if (!padapter->registrypriv.wifi_spec) {
/* Commented by Kurt 20110629 */
/* In some older APs, WPS handshake */
/* would be fail if we append vender extensions informations to AP */
- if (_rtw_memcmp(pIE->data, WPS_OUI, 4))
+ if (!memcmp(pIE->data, WPS_OUI, 4))
pIE->Length = 14;
}
pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, pIE->Length, pIE->data, &(pattrib->pktlen));
@@ -5606,7 +5612,7 @@ exit:
return;
}
-/* when wait_ack is ture, this function shoule be called at process context */
+/* when wait_ack is true, this function should be called at process context */
static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int wait_ack)
{
int ret = _FAIL;
@@ -5676,7 +5682,7 @@ exit:
}
-/* when wait_ms > 0 , this function shoule be called at process context */
+/* when wait_ms > 0 , this function should be called at process context */
/* da == NULL for station mode */
int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms)
{
@@ -5686,7 +5692,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- /* da == NULL, assum it's null data for sta to ap*/
+ /* da == NULL, assume it's null data for sta to ap*/
if (da == NULL)
da = get_my_bssid(&(pmlmeinfo->network));
@@ -5721,7 +5727,7 @@ exit:
return ret;
}
-/* when wait_ack is ture, this function shoule be called at process context */
+/* when wait_ack is true, this function should be called at process context */
static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int wait_ack)
{
int ret = _FAIL;
@@ -5799,7 +5805,7 @@ exit:
return ret;
}
-/* when wait_ms > 0 , this function shoule be called at process context */
+/* when wait_ms > 0 , this function should be called at process context */
/* da == NULL for station mode */
int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms)
{
@@ -5809,7 +5815,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- /* da == NULL, assum it's null data for sta to ap*/
+ /* da == NULL, assume it's null data for sta to ap*/
if (da == NULL)
da = get_my_bssid(&(pmlmeinfo->network));
@@ -6103,17 +6109,26 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
case 1: /* ADDBA rsp */
pframe = rtw_set_fixed_ie(pframe, 1, &(pmlmeinfo->ADDBA_req.dialog_token), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&status), &(pattrib->pktlen));
+
+ BA_para_set = le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f;
rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
- if (MAX_AMPDU_FACTOR_64K == max_rx_ampdu_factor)
- BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
- else if (MAX_AMPDU_FACTOR_32K == max_rx_ampdu_factor)
- BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0800); /* 32 buffer size */
- else if (MAX_AMPDU_FACTOR_16K == max_rx_ampdu_factor)
- BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0400); /* 16 buffer size */
- else if (MAX_AMPDU_FACTOR_8K == max_rx_ampdu_factor)
- BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0200); /* 8 buffer size */
- else
- BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
+ switch (max_rx_ampdu_factor) {
+ case MAX_AMPDU_FACTOR_64K:
+ BA_para_set |= 0x1000; /* 64 buffer size */
+ break;
+ case MAX_AMPDU_FACTOR_32K:
+ BA_para_set |= 0x0800; /* 32 buffer size */
+ break;
+ case MAX_AMPDU_FACTOR_16K:
+ BA_para_set |= 0x0400; /* 16 buffer size */
+ break;
+ case MAX_AMPDU_FACTOR_8K:
+ BA_para_set |= 0x0200; /* 8 buffer size */
+ break;
+ default:
+ BA_para_set |= 0x1000; /* 64 buffer size */
+ break;
+ }
if (pregpriv->ampdu_amsdu == 0)/* disabled */
BA_para_set = BA_para_set & ~BIT(0);
@@ -6222,7 +6237,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
int len;
@@ -6232,9 +6247,9 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
if (rtw_end_of_queue_search(phead, plist))
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
- plist = get_next(plist);
+ plist = plist->next;
pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
@@ -6355,7 +6370,7 @@ unsigned int send_beacon(struct adapter *padapter)
/****************************************************************************
-Following are some utitity fuctions for WiFi MLME
+Following are some utility functions for WiFi MLME
*****************************************************************************/
@@ -6468,7 +6483,7 @@ void site_survey(struct adapter *padapter)
{
/* 20100721:Interrupt scan operation here. */
/* For SW antenna diversity before link, it needs to switch to another antenna and scan again. */
- /* It compares the scan result and select beter one to do connection. */
+ /* It compares the scan result and select better one to do connection. */
if (rtw_hal_antdiv_before_linked(padapter)) {
pmlmeext->sitesurvey_res.bss_cnt = 0;
pmlmeext->sitesurvey_res.channel_idx = -1;
@@ -6526,14 +6541,14 @@ void site_survey(struct adapter *padapter)
}
/* collect bss info from Beacon and Probe request/response frames. */
-u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, struct wlan_bssid_ex *bssid)
+u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, struct wlan_bssid_ex *bssid)
{
int i;
u32 len;
u8 *p;
u16 val16, subtype;
- u8 *pframe = precv_frame->u.hdr.rx_data;
- u32 packet_len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ u32 packet_len = precv_frame->len;
u8 ie_offset;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -6572,10 +6587,10 @@ u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, str
bssid->IELength = len;
memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
- /* get the signal strength */
- bssid->Rssi = precv_frame->u.hdr.attrib.phy_info.recvpower; /* in dBM.raw data */
- bssid->PhyInfo.SignalQuality = precv_frame->u.hdr.attrib.phy_info.SignalQuality;/* in percentage */
- bssid->PhyInfo.SignalStrength = precv_frame->u.hdr.attrib.phy_info.SignalStrength;/* in percentage */
+ /* get the signal strength in dBM.raw data */
+ bssid->Rssi = precv_frame->attrib.phy_info.recvpower;
+ bssid->PhyInfo.SignalQuality = precv_frame->attrib.phy_info.SignalQuality;/* in percentage */
+ bssid->PhyInfo.SignalStrength = precv_frame->attrib.phy_info.SignalStrength;/* in percentage */
rtw_hal_get_def_var(padapter, HAL_DEF_CURRENT_ANTENNA, &bssid->PhyInfo.Optimum_antenna);
/* checking SSID */
@@ -6707,7 +6722,7 @@ void start_create_ibss(struct adapter *padapter)
/* update wireless mode */
update_wireless_mode(padapter);
- /* udpate capability */
+ /* update capability */
caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
update_capinfo(padapter, caps);
if (caps&cap_IBSS) {/* adhoc master */
@@ -6759,7 +6774,7 @@ void start_clnt_join(struct adapter *padapter)
/* update wireless mode */
update_wireless_mode(padapter);
- /* udpate capability */
+ /* update capability */
caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
update_capinfo(padapter, caps);
if (caps&cap_ESS) {
@@ -6851,7 +6866,7 @@ unsigned int receive_disconnect(struct adapter *padapter, unsigned char *MacAddr
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
/* check A3 */
- if (!(_rtw_memcmp(MacAddr, get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ if (memcmp(MacAddr, get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
return _SUCCESS;
DBG_88E("%s\n", __func__);
@@ -7028,7 +7043,8 @@ Following are the functions to report events
*****************************************************************************/
-void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame)
+void report_survey_event(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
struct cmd_obj *pcmd_obj;
u8 *pevtcmd;
@@ -7037,8 +7053,6 @@ void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame
struct C2HEvent_Header *pc2h_evt_hdr;
struct mlme_ext_priv *pmlmeext;
struct cmd_priv *pcmdpriv;
- /* u8 *pframe = precv_frame->u.hdr.rx_data; */
- /* uint len = precv_frame->u.hdr.len; */
if (!padapter)
return;
@@ -7373,7 +7387,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* turn on dynamic functions */
Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
- /* update IOT-releated issue */
+ /* update IOT-related issue */
update_IOT_info(padapter);
rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates);
@@ -7381,7 +7395,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* BCN interval */
rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&pmlmeinfo->bcn_interval));
- /* udpate capability */
+ /* update capability */
update_capinfo(padapter, pmlmeinfo->capability);
/* WMM, Update EDCA param */
@@ -7902,7 +7916,7 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:/* Get WMM IE. */
- if (_rtw_memcmp(pIE->data, WMM_OUI, 4))
+ if (!memcmp(pIE->data, WMM_OUI, 4))
pmlmeinfo->WMM_enable = 1;
break;
case _HT_CAPABILITY_IE_: /* Get HT Cap IE. */
@@ -8016,7 +8030,7 @@ static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_c
set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, in[i].hw_value);
if (in[i].hw_value && !(in[i].flags & RTW_IEEE80211_CHAN_DISABLED) &&
set_idx >= 0) {
- memcpy(&out[j], &in[i], sizeof(struct rtw_ieee80211_channel));
+ out[j] = in[i];
if (pmlmeext->channel_set[set_idx].ScanType == SCAN_PASSIVE)
out[j].flags &= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
@@ -8261,7 +8275,6 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
int len_diff = 0;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -8290,7 +8303,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -8368,12 +8380,12 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
spin_lock_bh(&psta_bmc->sleep_q.lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ xmitframe_plist = xmitframe_plist->next;
rtw_list_delete(&pxmitframe->list);
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c
index 6451efdfb132..705f666bca6b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp.c
@@ -23,6 +23,7 @@
#include "odm_precomp.h"
#include "rtl8188e_hal.h"
+#include <linux/vmalloc.h>
u32 read_macreg(struct adapter *padapter, u32 addr, u32 sz)
{
@@ -406,7 +407,7 @@ s32 mp_start_test(struct adapter *padapter)
goto end_of_mp_start_test;
}
- /* 3 3. join psudo AdHoc */
+ /* 3 3. join pseudo AdHoc */
tgt_network->join_res = 1;
tgt_network->aid = 1;
psta->aid = 1;
@@ -442,7 +443,7 @@ void mp_stop_test(struct adapter *padapter)
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == false)
goto end_of_mp_stop_test;
- /* 3 1. disconnect psudo AdHoc */
+ /* 3 1. disconnect pseudo AdHoc */
rtw_indicate_disconnect(padapter);
/* 3 2. clear psta used in mp test mode. */
@@ -882,7 +883,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
{
u32 i, psd_pts = 0, psd_start = 0, psd_stop = 0;
u32 psd_data = 0;
-
+ int ret;
if (!netif_running(pAdapter->pnetdev)) {
RT_TRACE(_module_mp_, _drv_warning_, ("mp_query_psd: Fail! interface not opened!\n"));
@@ -899,7 +900,10 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
psd_start = 64;
psd_stop = 128;
} else {
- sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
+ ret = sscanf(data, "pts =%d, start =%d, stop =%d",
+ &psd_pts, &psd_start, &psd_stop);
+ if (ret != 3)
+ return 0;
}
_rtw_memset(data, '\0', sizeof(*data));
@@ -943,7 +947,7 @@ void _rtw_mp_xmit_priv(struct xmit_priv *pxmitpriv)
}
if (pxmitpriv->pallocated_xmit_extbuf)
- rtw_vmfree(pxmitpriv->pallocated_xmit_extbuf, num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+ vfree(pxmitpriv->pallocated_xmit_extbuf);
if (padapter->registrypriv.mp_mode == 0) {
max_xmit_extbuf_size = 20000;
@@ -956,7 +960,7 @@ void _rtw_mp_xmit_priv(struct xmit_priv *pxmitpriv)
/* Init xmit extension buff */
_rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
- pxmitpriv->pallocated_xmit_extbuf = rtw_zvmalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+ pxmitpriv->pallocated_xmit_extbuf = vzalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
if (pxmitpriv->pallocated_xmit_extbuf == NULL) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_extbuf fail!\n"));
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c b/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
index edcd8a5042be..e783968b29ea 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
@@ -33,7 +33,6 @@ int rtl8188eu_oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->information_buf_len < sizeof(u8))
return NDIS_STATUS_INVALID_LENGTH;
@@ -48,7 +47,6 @@ _func_enter_;
status = NDIS_STATUS_NOT_ACCEPTED;
}
-_func_exit_;
return status;
}
@@ -61,7 +59,6 @@ int rtl8188eu_oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_write_bb_reg_hdl\n"));
@@ -87,7 +84,6 @@ _func_enter_;
write_bbreg(Adapter, offset, 0xFFFFFFFF, value);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -100,7 +96,6 @@ int rtl8188eu_oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_read_bb_reg_hdl\n"));
@@ -126,7 +121,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_notice_,
("-rtl8188eu_oid_rt_pro_read_bb_reg_hdl: offset=0x%03X value:0x%08X\n",
offset, value));
-_func_exit_;
return status;
}
@@ -140,7 +134,6 @@ int rtl8188eu_oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_write_rf_reg_hdl\n"));
@@ -171,7 +164,6 @@ _func_enter_;
write_rfreg(Adapter, path, offset, value);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -185,7 +177,6 @@ int rtl8188eu_oid_rt_pro_read_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
int status = NDIS_STATUS_SUCCESS;
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_read_rf_reg_hdl\n"));
@@ -217,7 +208,6 @@ _func_enter_;
("-rtl8188eu_oid_rt_pro_read_rf_reg_hdl: path=%d offset=0x%02X value=0x%05X\n",
path, offset, value));
-_func_exit_;
return status;
}
@@ -232,7 +222,6 @@ int rtl8188eu_oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_,
("+rtl8188eu_oid_rt_pro_set_data_rate_hdl\n"));
@@ -255,7 +244,6 @@ _func_enter_;
SetDataRate(Adapter);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -266,7 +254,6 @@ int rtl8188eu_oid_rt_pro_start_test_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_start_test_hdl\n"));
@@ -293,7 +280,6 @@ exit:
RT_TRACE(_module_mp_, _drv_notice_, ("-rtl8188eu_oid_rt_pro_start_test_hdl: mp_mode=%d\n", Adapter->mppriv.mode));
-_func_exit_;
return status;
}
@@ -303,7 +289,6 @@ int rtl8188eu_oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+Set OID_RT_PRO_STOP_TEST\n"));
@@ -316,7 +301,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("-Set OID_RT_PRO_STOP_TEST\n"));
-_func_exit_;
return status;
}
@@ -327,7 +311,6 @@ int rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_p
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl\n"));
@@ -352,7 +335,6 @@ _func_enter_;
SetChannel(Adapter);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -364,7 +346,6 @@ int rtl8188eu_oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *padapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_,
("+rtl8188eu_oid_rt_set_bandwidth_hdl\n"));
@@ -391,7 +372,6 @@ _func_enter_;
("-rtl8188eu_oid_rt_set_bandwidth_hdl: bandwidth=%d channel_offset=%d\n",
bandwidth, channel_offset));
-_func_exit_;
return status;
}
@@ -402,7 +382,6 @@ int rtl8188eu_oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_antenna_bb_hdl\n"));
@@ -426,7 +405,6 @@ _func_enter_;
*(u32 *)poid_par_priv->information_buf = antenna;
}
-_func_exit_;
return status;
}
@@ -437,7 +415,6 @@ int rtl8188eu_oid_rt_pro_set_tx_power_control_hdl(struct oid_par_priv *poid_par_
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_pro_set_tx_power_control_hdl\n"));
@@ -461,7 +438,6 @@ _func_enter_;
SetTxPower(Adapter);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -474,7 +450,6 @@ int rtl8188eu_oid_rt_pro_query_tx_packet_sent_hdl(struct oid_par_priv *poid_par_
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != QUERY_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -488,7 +463,6 @@ _func_enter_;
status = NDIS_STATUS_INVALID_LENGTH;
}
-_func_exit_;
return status;
}
@@ -498,7 +472,6 @@ int rtl8188eu_oid_rt_pro_query_rx_packet_received_hdl(struct oid_par_priv *poid_
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != QUERY_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -513,7 +486,6 @@ _func_enter_;
status = NDIS_STATUS_INVALID_LENGTH;
}
-_func_exit_;
return status;
}
@@ -523,7 +495,6 @@ int rtl8188eu_oid_rt_pro_query_rx_packet_crc32_error_hdl(struct oid_par_priv *po
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != QUERY_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -538,7 +509,6 @@ _func_enter_;
status = NDIS_STATUS_INVALID_LENGTH;
}
-_func_exit_;
return status;
}
@@ -549,7 +519,6 @@ int rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != SET_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -559,7 +528,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_alert_, ("===> rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl.\n"));
Adapter->mppriv.tx_pktcount = 0;
-_func_exit_;
return status;
}
@@ -569,7 +537,6 @@ int rtl8188eu_oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *poid_
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != SET_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -583,7 +550,6 @@ _func_enter_;
status = NDIS_STATUS_INVALID_LENGTH;
}
-_func_exit_;
return status;
}
@@ -593,7 +559,6 @@ int rtl8188eu_oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != SET_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -604,7 +569,6 @@ _func_enter_;
ResetPhyRxPktCount(Adapter);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -614,7 +578,6 @@ int rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_pa
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl\n"));
@@ -632,7 +595,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("-rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl: recv_ok=%d\n", *(u32 *)poid_par_priv->information_buf));
-_func_exit_;
return status;
}
@@ -642,7 +604,6 @@ int rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *poid
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl\n"));
@@ -663,7 +624,6 @@ _func_enter_;
("-rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl: recv_err =%d\n",
*(u32 *)poid_par_priv->information_buf));
-_func_exit_;
return status;
}
@@ -674,7 +634,6 @@ int rtl8188eu_oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_pri
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_continuous_tx_hdl\n"));
@@ -698,7 +657,6 @@ _func_enter_;
}
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -709,7 +667,6 @@ int rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_alert_, ("+rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl\n"));
@@ -733,7 +690,6 @@ _func_enter_;
}
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -744,7 +700,6 @@ int rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *poi
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl\n"));
@@ -768,7 +723,6 @@ _func_enter_;
}
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -779,7 +733,6 @@ int rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_pr
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_alert_, ("+rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl\n"));
@@ -792,7 +745,6 @@ _func_enter_;
SetSingleToneTx(Adapter, (u8)bStartTest);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -806,7 +758,6 @@ int rtl8188eu_oid_rt_pro_trigger_gpio_hdl(struct oid_par_priv *poid_par_priv)
{
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
int status = NDIS_STATUS_SUCCESS;
-_func_enter_;
if (poid_par_priv->type_of_oid != SET_OID)
return NDIS_STATUS_NOT_ACCEPTED;
@@ -815,7 +766,6 @@ _func_enter_;
rtw_hal_set_hwreg(Adapter, HW_VAR_TRIGGER_GPIO_0, NULL);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -833,7 +783,6 @@ int rtl8188eu_oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_,
("+rtl8188eu_oid_rt_pro_read_register_hdl\n"));
@@ -870,7 +819,6 @@ _func_enter_;
*poid_par_priv->bytes_rw = width;
-_func_exit_;
return status;
}
@@ -882,7 +830,6 @@ int rtl8188eu_oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *padapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_,
("+rtl8188eu_oid_rt_pro_write_register_hdl\n"));
@@ -929,7 +876,6 @@ _func_enter_;
("-rtl8188eu_oid_rt_pro_write_register_hdl: offset=0x%08X width=%d value=0x%X\n",
offset, width, value));
-_func_exit_;
return status;
}
@@ -1002,7 +948,6 @@ int rtl8188eu_oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv *poid_par_priv
int status = NDIS_STATUS_SUCCESS;
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+OID_RT_PRO_SET_DATA_RATE_EX\n"));
@@ -1016,7 +961,6 @@ _func_enter_;
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -1027,7 +971,6 @@ int rtl8188eu_oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv)
u8 thermal = 0;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_get_thermal_meter_hdl\n"));
@@ -1044,7 +987,6 @@ _func_enter_;
*(u32 *)poid_par_priv->information_buf = (u32)thermal;
*poid_par_priv->bytes_rw = sizeof(u32);
-_func_exit_;
return status;
}
@@ -1060,7 +1002,6 @@ int rtl8188eu_oid_rt_pro_set_power_tracking_hdl(struct oid_par_priv *poid_par_pr
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->information_buf_len < sizeof(u8))
return NDIS_STATUS_INVALID_LENGTH;
@@ -1079,7 +1020,6 @@ _func_enter_;
}
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -1143,7 +1083,6 @@ int rtl8188eu_oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != QUERY_OID)
return NDIS_STATUS_NOT_ACCEPTED;
@@ -1176,7 +1115,6 @@ _func_enter_;
}
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -1190,7 +1128,6 @@ int rtl8188eu_oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv)
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != SET_OID)
return NDIS_STATUS_NOT_ACCEPTED;
@@ -1216,7 +1153,6 @@ _func_enter_;
status = NDIS_STATUS_FAILURE;
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -1227,7 +1163,6 @@ int rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
*poid_par_priv->bytes_rw = 0;
@@ -1267,7 +1202,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_info_,
("-rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl: status=0x%08X\n", status));
-_func_exit_;
return status;
}
@@ -1279,7 +1213,6 @@ int rtl8188eu_oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_pr
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != QUERY_OID)
return NDIS_STATUS_NOT_ACCEPTED;
@@ -1296,7 +1229,6 @@ _func_enter_;
} else {
status = NDIS_STATUS_FAILURE;
}
-_func_exit_;
return status;
}
@@ -1306,7 +1238,6 @@ int rtl8188eu_oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != QUERY_OID)
return NDIS_STATUS_NOT_ACCEPTED;
@@ -1321,7 +1252,6 @@ _func_enter_;
("-rtl8188eu_oid_rt_get_efuse_max_size_hdl: size=%d status=0x%08X\n",
*(int *)poid_par_priv->information_buf, status));
-_func_exit_;
return status;
}
@@ -1330,7 +1260,6 @@ int rtl8188eu_oid_rt_pro_efuse_hdl(struct oid_par_priv *poid_par_priv)
{
int status;
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_pro_efuse_hdl\n"));
@@ -1341,7 +1270,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_info_, ("-rtl8188eu_oid_rt_pro_efuse_hdl: status=0x%08X\n", status));
-_func_exit_;
return status;
}
@@ -1353,7 +1281,6 @@ int rtl8188eu_oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv)
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
u16 maplen = 0;
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_efuse_map_hdl\n"));
@@ -1398,7 +1325,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_info_,
("-rtl8188eu_oid_rt_pro_efuse_map_hdl: status=0x%08X\n", status));
-_func_exit_;
return status;
}
@@ -1414,7 +1340,6 @@ int rtl8188eu_oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
u8 rx_pkt_type;
int status = NDIS_STATUS_SUCCESS;
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_set_rx_packet_type_hdl\n"));
@@ -1427,7 +1352,6 @@ _func_enter_;
rx_pkt_type = *((u8 *)poid_par_priv->information_buf);/* 4 */
RT_TRACE(_module_mp_, _drv_info_, ("rx_pkt_type: %x\n", rx_pkt_type));
-_func_exit_;
return status;
}
@@ -1482,7 +1406,6 @@ int rtl8188eu_oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv)
{
int status = NDIS_STATUS_SUCCESS;
-_func_enter_;
if (poid_par_priv->type_of_oid != SET_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -1497,7 +1420,6 @@ _func_enter_;
/* CALL the power_down function */
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_p2p.c b/drivers/staging/rtl8188eu/core/rtw_p2p.c
index 6e8c06e840b3..9425c4991ccd 100644
--- a/drivers/staging/rtl8188eu/core/rtw_p2p.c
+++ b/drivers/staging/rtl8188eu/core/rtw_p2p.c
@@ -57,13 +57,13 @@ static u32 go_add_group_info_attr(struct wifidirect_info *pwdinfo, u8 *pbuf)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* look up sta asoc_queue */
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ plist = plist->next;
if (psta->is_p2p_device) {
@@ -824,7 +824,7 @@ u32 process_probe_req_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pframe, uint l
if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
p2pie = rtw_get_p2p_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_ , len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_ , NULL, &p2pielen);
if (p2pie) {
- if ((p != NULL) && _rtw_memcmp((void *)(p+2), (void *)pwdinfo->p2p_wildcard_ssid , 7)) {
+ if ((p != NULL) && !memcmp((void *)(p+2), (void *)pwdinfo->p2p_wildcard_ssid , 7)) {
/* todo: */
/* Check Requested Device Type attributes in WSC IE. */
/* Check Device ID attribute in P2P IE */
@@ -972,24 +972,24 @@ u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
u32 attr_contentlen = 0;
if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen)) {
- if (_rtw_memcmp(pwdinfo->device_addr, groupid, ETH_ALEN) &&
- _rtw_memcmp(pwdinfo->p2p_group_ssid, groupid+ETH_ALEN, pwdinfo->p2p_group_ssid_len)) {
+ if (!memcmp(pwdinfo->device_addr, groupid, ETH_ALEN) &&
+ !memcmp(pwdinfo->p2p_group_ssid, groupid+ETH_ALEN, pwdinfo->p2p_group_ssid_len)) {
attr_contentlen = 0;
if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_ID, dev_addr, &attr_contentlen)) {
struct list_head *phead, *plist;
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* look up sta asoc_queue */
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ plist = plist->next;
if (psta->is_p2p_device && (psta->dev_cap&P2P_DEVCAP_CLIENT_DISCOVERABILITY) &&
- _rtw_memcmp(psta->dev_addr, dev_addr, ETH_ALEN)) {
+ !memcmp(psta->dev_addr, dev_addr, ETH_ALEN)) {
/* issue GO Discoverability Request */
issue_group_disc_req(pwdinfo, psta->hwaddr);
status = P2P_STATUS_SUCCESS;
@@ -1118,7 +1118,7 @@ u8 process_p2p_group_negotation_req(struct wifidirect_info *pwdinfo, u8 *pframe,
/* Commented by Kurt 20120113 */
/* If some device wants to do p2p handshake without sending prov_disc_req */
/* We have to get peer_req_cm from here. */
- if (_rtw_memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
+ if (!memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
rtw_get_wps_attr_content(wpsie, wps_ielen, WPS_ATTR_DEVICE_PWID, (u8 *)&be_tmp, &wps_devicepassword_id_len);
wps_devicepassword_id = be16_to_cpu(be_tmp);
@@ -1498,7 +1498,6 @@ static void find_phase_handler(struct adapter *padapter)
struct ndis_802_11_ssid ssid;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
_rtw_memset((unsigned char *)&ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN);
@@ -1509,7 +1508,6 @@ _func_enter_;
spin_lock_bh(&pmlmepriv->lock);
rtw_sitesurvey_cmd(padapter, &ssid, 1, NULL, 0);
spin_unlock_bh(&pmlmepriv->lock);
-_func_exit_;
}
void p2p_concurrent_handler(struct adapter *padapter);
@@ -1518,7 +1516,6 @@ static void restore_p2p_state_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-_func_enter_;
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL))
rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
@@ -1528,54 +1525,46 @@ _func_enter_;
/* because this P2P client should stay at the operating channel of P2P GO. */
set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
}
-_func_exit_;
}
static void pre_tx_invitereq_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
u8 val8 = 1;
-_func_enter_;
set_channel_bwmode(padapter, pwdinfo->invitereq_info.peer_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
issue_probereq_p2p(padapter, NULL);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
-_func_exit_;
}
static void pre_tx_provdisc_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
u8 val8 = 1;
-_func_enter_;
set_channel_bwmode(padapter, pwdinfo->tx_prov_disc_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
issue_probereq_p2p(padapter, NULL);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
-_func_exit_;
}
static void pre_tx_negoreq_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
u8 val8 = 1;
-_func_enter_;
set_channel_bwmode(padapter, pwdinfo->nego_req_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
issue_probereq_p2p(padapter, NULL);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
-_func_exit_;
}
void p2p_protocol_wk_hdl(struct adapter *padapter, int intCmdType)
{
-_func_enter_;
switch (intCmdType) {
case P2P_FIND_PHASE_WK:
find_phase_handler(padapter);
@@ -1594,7 +1583,6 @@ _func_enter_;
break;
}
-_func_exit_;
}
void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength)
@@ -1610,7 +1598,6 @@ void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength)
u8 find_p2p = false, find_p2p_ps = false;
u8 noa_offset, noa_num, noa_index;
-_func_enter_;
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
return;
@@ -1683,7 +1670,6 @@ _func_enter_;
p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
}
-_func_exit_;
}
void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
@@ -1691,7 +1677,6 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
-_func_enter_;
/* Pre action for p2p state */
switch (p2p_ps_state) {
@@ -1738,7 +1723,6 @@ _func_enter_;
break;
}
-_func_exit_;
}
u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue)
@@ -1749,7 +1733,6 @@ u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
return res;
@@ -1781,7 +1764,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -2024,11 +2006,11 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
/* Disable P2P function */
if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
- _cancel_timer_ex(&pwdinfo->find_phase_timer);
- _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
- _cancel_timer_ex(&pwdinfo->pre_tx_scan_timer);
- _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
- _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey2);
+ del_timer_sync(&pwdinfo->find_phase_timer);
+ del_timer_sync(&pwdinfo->restore_p2p_state_timer);
+ del_timer_sync(&pwdinfo->pre_tx_scan_timer);
+ del_timer_sync(&pwdinfo->reset_ch_sitesurvey);
+ del_timer_sync(&pwdinfo->reset_ch_sitesurvey2);
reset_ch_sitesurvey_timer_process(padapter);
reset_ch_sitesurvey_timer_process2(padapter);
rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE);
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index b5db22cc81ed..f6583734aefa 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -226,11 +226,8 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
u8 rpwm;
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-_func_enter_;
-
pslv = PS_STATE(pslv);
-
if (pwrpriv->btcoex_rfon) {
if (pslv < PS_STATE_S4)
pslv = PS_STATE_S3;
@@ -274,8 +271,6 @@ _func_enter_;
pwrpriv->tog += 0x80;
pwrpriv->cpwm = pslv;
-
-_func_exit_;
}
static u8 PS_RDY_CHECK(struct adapter *padapter)
@@ -313,8 +308,6 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
#endif /* CONFIG_88EU_P2P */
-_func_enter_;
-
RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
("%s: PowerMode=%d Smart_PS=%d\n",
__func__, ps_mode, smart_ps));
@@ -362,8 +355,6 @@ _func_enter_;
rtw_set_rpwm(padapter, PS_STATE_S2);
}
}
-
-_func_exit_;
}
/*
@@ -410,8 +401,6 @@ void LPS_Enter(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-_func_enter_;
-
if (PS_RDY_CHECK(padapter) == false)
return;
@@ -428,8 +417,6 @@ _func_enter_;
pwrpriv->LpsIdleCount++;
}
}
-
-_func_exit_;
}
#define LPS_LEAVE_TIMEOUT_MS 100
@@ -440,8 +427,6 @@ void LPS_Leave(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-_func_enter_;
-
if (pwrpriv->bLeisurePs) {
if (pwrpriv->pwr_mode != PS_MODE_ACTIVE) {
rtw_set_ps_mode(padapter, PS_MODE_ACTIVE, 0, 0);
@@ -452,8 +437,6 @@ _func_enter_;
}
pwrpriv->bpower_saving = false;
-
-_func_exit_;
}
/* */
@@ -465,23 +448,17 @@ void LeaveAllPowerSaveMode(struct adapter *Adapter)
struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
u8 enqueue = 0;
-_func_enter_;
-
if (check_fwstate(pmlmepriv, _FW_LINKED)) { /* connect */
p2p_ps_wk_cmd(Adapter, P2P_PS_DISABLE, enqueue);
rtw_lps_ctrl_wk_cmd(Adapter, LPS_CTRL_LEAVE, enqueue);
}
-
-_func_exit_;
}
void rtw_init_pwrctrl_priv(struct adapter *padapter)
{
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
-_func_enter_;
-
_init_pwrlock(&pwrctrlpriv->lock);
pwrctrlpriv->rf_pwrstate = rf_on;
pwrctrlpriv->ips_enter_cnts = 0;
@@ -518,8 +495,6 @@ _func_enter_;
pwrctrlpriv->btcoex_rfon = false;
_init_timer(&(pwrctrlpriv->pwr_state_check_timer), padapter->pnetdev, pwr_state_check_handler, (u8 *)padapter);
-
-_func_exit_;
}
u8 rtw_interface_ps_func(struct adapter *padapter, enum hal_intf_ps_func efunc_id, u8 *val)
@@ -540,7 +515,7 @@ inline void rtw_set_ips_deny(struct adapter *padapter, u32 ms)
/*
* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
* @adapter: pointer to struct adapter structure
-* @ips_deffer_ms: the ms wiil prevent from falling into IPS after wakeup
+* @ips_deffer_ms: the ms will prevent from falling into IPS after wakeup
* Return _SUCCESS or _FAIL
*/
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index c9c180649c12..636ec553ae83 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -23,11 +23,12 @@
#include <drv_types.h>
#include <recv_osdep.h>
#include <mlme_osdep.h>
-#include <ip.h>
-#include <if_ether.h>
-#include <ethernet.h>
#include <usb_ops.h>
#include <wifi.h>
+#include <linux/vmalloc.h>
+
+#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
+#define LLC_HEADER_SIZE 6 /* LLC Header Length */
static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37};
static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
@@ -45,7 +46,6 @@ void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
-_func_enter_;
_rtw_memset((u8 *)psta_recvpriv, 0, sizeof (struct sta_recv_priv));
@@ -53,18 +53,16 @@ _func_enter_;
_rtw_init_queue(&psta_recvpriv->defrag_q);
-_func_exit_;
}
int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
{
int i;
- union recv_frame *precvframe;
+ struct recv_frame *precvframe;
int res = _SUCCESS;
-_func_enter_;
spin_lock_init(&precvpriv->lock);
_rtw_init_queue(&precvpriv->free_recv_queue);
@@ -77,7 +75,7 @@ _func_enter_;
rtw_os_recv_resource_init(precvpriv, padapter);
- precvpriv->pallocated_frame_buf = rtw_zvmalloc(NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
+ precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(struct recv_frame) + RXFRAME_ALIGN_SZ);
if (precvpriv->pallocated_frame_buf == NULL) {
res = _FAIL;
@@ -86,18 +84,19 @@ _func_enter_;
precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
- precvframe = (union recv_frame *)precvpriv->precv_frame_buf;
+ precvframe = (struct recv_frame *)precvpriv->precv_frame_buf;
for (i = 0; i < NR_RECVFRAME; i++) {
- _rtw_init_listhead(&(precvframe->u.list));
+ _rtw_init_listhead(&(precvframe->list));
- rtw_list_insert_tail(&(precvframe->u.list), &(precvpriv->free_recv_queue.queue));
+ rtw_list_insert_tail(&(precvframe->list),
+ &(precvpriv->free_recv_queue.queue));
res = rtw_os_recv_resource_alloc(padapter, precvframe);
- precvframe->u.hdr.len = 0;
+ precvframe->len = 0;
- precvframe->u.hdr.adapter = padapter;
+ precvframe->adapter = padapter;
precvframe++;
}
precvpriv->rx_pending_cnt = 1;
@@ -113,7 +112,6 @@ _func_enter_;
rtw_set_signal_stat_timer(precvpriv);
exit:
-_func_exit_;
return res;
}
@@ -122,40 +120,37 @@ void _rtw_free_recv_priv (struct recv_priv *precvpriv)
{
struct adapter *padapter = precvpriv->adapter;
-_func_enter_;
rtw_free_uc_swdec_pending_queue(padapter);
rtw_os_recv_resource_free(precvpriv);
if (precvpriv->pallocated_frame_buf) {
- rtw_vmfree(precvpriv->pallocated_frame_buf, NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
+ vfree(precvpriv->pallocated_frame_buf);
}
rtw_hal_free_recv_priv(padapter);
-_func_exit_;
}
-union recv_frame *_rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
+struct recv_frame *_rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
{
- union recv_frame *precvframe;
+ struct recv_frame *hdr;
struct list_head *plist, *phead;
struct adapter *padapter;
struct recv_priv *precvpriv;
-_func_enter_;
if (_rtw_queue_empty(pfree_recv_queue)) {
- precvframe = NULL;
+ hdr = NULL;
} else {
phead = get_list_head(pfree_recv_queue);
- plist = get_next(phead);
+ plist = phead->next;
- precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ hdr = container_of(plist, struct recv_frame, list);
- rtw_list_delete(&precvframe->u.hdr.list);
- padapter = precvframe->u.hdr.adapter;
+ rtw_list_delete(&hdr->list);
+ padapter = hdr->adapter;
if (padapter != NULL) {
precvpriv = &padapter->recvpriv;
if (pfree_recv_queue == &precvpriv->free_recv_queue)
@@ -163,14 +158,13 @@ _func_enter_;
}
}
-_func_exit_;
- return precvframe;
+ return (struct recv_frame *)hdr;
}
-union recv_frame *rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
+struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
{
- union recv_frame *precvframe;
+ struct recv_frame *precvframe;
spin_lock_bh(&pfree_recv_queue->lock);
@@ -181,36 +175,36 @@ union recv_frame *rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
return precvframe;
}
-void rtw_init_recvframe(union recv_frame *precvframe, struct recv_priv *precvpriv)
+void rtw_init_recvframe(struct recv_frame *precvframe, struct recv_priv *precvpriv)
{
/* Perry: This can be removed */
- _rtw_init_listhead(&precvframe->u.hdr.list);
+ _rtw_init_listhead(&precvframe->list);
- precvframe->u.hdr.len = 0;
+ precvframe->len = 0;
}
-int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue)
+int rtw_free_recvframe(struct recv_frame *precvframe,
+ struct __queue *pfree_recv_queue)
{
struct adapter *padapter;
struct recv_priv *precvpriv;
-_func_enter_;
if (!precvframe)
return _FAIL;
- padapter = precvframe->u.hdr.adapter;
+ padapter = precvframe->adapter;
precvpriv = &padapter->recvpriv;
- if (precvframe->u.hdr.pkt) {
- dev_kfree_skb_any(precvframe->u.hdr.pkt);/* free skb by driver */
- precvframe->u.hdr.pkt = NULL;
+ if (precvframe->pkt) {
+ dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */
+ precvframe->pkt = NULL;
}
spin_lock_bh(&pfree_recv_queue->lock);
- rtw_list_delete(&(precvframe->u.hdr.list));
+ rtw_list_delete(&(precvframe->list));
- precvframe->u.hdr.len = 0;
+ precvframe->len = 0;
- rtw_list_insert_tail(&(precvframe->u.hdr.list), get_list_head(pfree_recv_queue));
+ rtw_list_insert_tail(&(precvframe->list), get_list_head(pfree_recv_queue));
if (padapter != NULL) {
if (pfree_recv_queue == &precvpriv->free_recv_queue)
@@ -219,32 +213,29 @@ _func_enter_;
spin_unlock_bh(&pfree_recv_queue->lock);
-_func_exit_;
return _SUCCESS;
}
-int _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
+int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
{
- struct adapter *padapter = precvframe->u.hdr.adapter;
+ struct adapter *padapter = precvframe->adapter;
struct recv_priv *precvpriv = &padapter->recvpriv;
-_func_enter_;
- rtw_list_delete(&(precvframe->u.hdr.list));
- rtw_list_insert_tail(&(precvframe->u.hdr.list), get_list_head(queue));
+ rtw_list_delete(&(precvframe->list));
+ rtw_list_insert_tail(&(precvframe->list), get_list_head(queue));
if (padapter != NULL) {
if (queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt++;
}
-_func_exit_;
return _SUCCESS;
}
-int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
+int rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
{
int ret;
@@ -265,32 +256,30 @@ using spinlock to protect
void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue)
{
- union recv_frame *precvframe;
+ struct recv_frame *hdr;
struct list_head *plist, *phead;
-_func_enter_;
spin_lock(&pframequeue->lock);
phead = get_list_head(pframequeue);
- plist = get_next(phead);
+ plist = phead->next;
while (rtw_end_of_queue_search(phead, plist) == false) {
- precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ hdr = container_of(plist, struct recv_frame, list);
- plist = get_next(plist);
+ plist = plist->next;
- rtw_free_recvframe(precvframe, pfree_recv_queue);
+ rtw_free_recvframe((struct recv_frame *)hdr, pfree_recv_queue);
}
spin_unlock(&pframequeue->lock);
-_func_exit_;
}
u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
{
u32 cnt = 0;
- union recv_frame *pending_frame;
+ struct recv_frame *pending_frame;
while ((pending_frame = rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) {
rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue);
DBG_88E("%s: dequeue uc_swdec_pending_queue\n", __func__);
@@ -337,9 +326,9 @@ struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue)
} else {
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
- precvbuf = LIST_CONTAINOR(plist, struct recv_buf, list);
+ precvbuf = container_of(plist, struct recv_buf, list);
rtw_list_delete(&precvbuf->list);
}
@@ -349,7 +338,8 @@ struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue)
return precvbuf;
}
-static int recvframe_chkmic(struct adapter *adapter, union recv_frame *precvframe)
+static int recvframe_chkmic(struct adapter *adapter,
+ struct recv_frame *precvframe)
{
int i, res = _SUCCESS;
u32 datalen;
@@ -358,12 +348,11 @@ static int recvframe_chkmic(struct adapter *adapter, union recv_frame *precvfra
u8 *pframe, *payload, *pframemic;
u8 *mickey;
struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &precvframe->u.hdr.attrib;
+ struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
struct security_priv *psecuritypriv = &adapter->securitypriv;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
-_func_enter_;
stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
@@ -375,23 +364,24 @@ _func_enter_;
/* calculate mic code */
if (stainfo != NULL) {
if (IS_MCAST(prxattrib->ra)) {
- mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic: bcmc key\n"));
-
if (!psecuritypriv) {
res = _FAIL;
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n"));
DBG_88E("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n");
goto exit;
}
+ mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic: bcmc key\n"));
} else {
mickey = &stainfo->dot11tkiprxmickey.skey[0];
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic: unicast key\n"));
}
- datalen = precvframe->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len-prxattrib->icv_len-8;/* icv_len included the mic code */
- pframe = precvframe->u.hdr.rx_data;
+ /* icv_len included the mic code */
+ datalen = precvframe->len-prxattrib->hdrlen -
+ prxattrib->iv_len-prxattrib->icv_len-8;
+ pframe = precvframe->rx_data;
payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
@@ -424,16 +414,30 @@ _func_enter_;
*(pframemic-10), *(pframemic-9)));
{
uint i;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n ======demp packet (len=%d)======\n", precvframe->u.hdr.len));
- for (i = 0; i < precvframe->u.hdr.len; i = i+8) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
- *(precvframe->u.hdr.rx_data+i), *(precvframe->u.hdr.rx_data+i+1),
- *(precvframe->u.hdr.rx_data+i+2), *(precvframe->u.hdr.rx_data+i+3),
- *(precvframe->u.hdr.rx_data+i+4), *(precvframe->u.hdr.rx_data+i+5),
- *(precvframe->u.hdr.rx_data+i+6), *(precvframe->u.hdr.rx_data+i+7)));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n ======demp packet (len=%d)======\n",
+ precvframe->len));
+ for (i = 0; i < precvframe->len; i += 8) {
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
+ *(precvframe->rx_data+i),
+ *(precvframe->rx_data+i+1),
+ *(precvframe->rx_data+i+2),
+ *(precvframe->rx_data+i+3),
+ *(precvframe->rx_data+i+4),
+ *(precvframe->rx_data+i+5),
+ *(precvframe->rx_data+i+6),
+ *(precvframe->rx_data+i+7)));
}
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n ====== demp packet end [len=%d]======\n", precvframe->u.hdr.len));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n hrdlen=%d,\n", prxattrib->hdrlen));
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("\n ====== demp packet end [len=%d]======\n",
+ precvframe->len));
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("\n hrdlen=%d,\n",
+ prxattrib->hdrlen));
}
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
@@ -471,24 +475,23 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
/* decrypt and set the ivlen, icvlen of the recv_frame */
-static union recv_frame *decryptor(struct adapter *padapter, union recv_frame *precv_frame)
+static struct recv_frame *decryptor(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
- struct rx_pkt_attrib *prxattrib = &precv_frame->u.hdr.attrib;
+ struct rx_pkt_attrib *prxattrib = &precv_frame->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- union recv_frame *return_packet = precv_frame;
+ struct recv_frame *return_packet = precv_frame;
u32 res = _SUCCESS;
-_func_enter_;
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("prxstat->decrypted=%x prxattrib->encrypt=0x%03x\n", prxattrib->bdecrypted, prxattrib->encrypt));
if (prxattrib->encrypt > 0) {
- u8 *iv = precv_frame->u.hdr.rx_data+prxattrib->hdrlen;
+ u8 *iv = precv_frame->rx_data+prxattrib->hdrlen;
prxattrib->key_index = (((iv[3])>>6)&0x3);
if (prxattrib->key_index > WEP_KEYS) {
@@ -534,34 +537,33 @@ _func_enter_;
return_packet = NULL;
}
-_func_exit_;
return return_packet;
}
/* set the security information in the recv_frame */
-static union recv_frame *portctrl(struct adapter *adapter, union recv_frame *precv_frame)
+static struct recv_frame *portctrl(struct adapter *adapter,
+ struct recv_frame *precv_frame)
{
u8 *psta_addr = NULL, *ptr;
uint auth_alg;
- struct recv_frame_hdr *pfhdr;
+ struct recv_frame *pfhdr;
struct sta_info *psta;
struct sta_priv *pstapriv;
- union recv_frame *prtnframe;
+ struct recv_frame *prtnframe;
u16 ether_type = 0;
u16 eapol_type = 0x888e;/* for Funia BD's WPA issue */
struct rx_pkt_attrib *pattrib;
__be16 be_tmp;
-_func_enter_;
pstapriv = &adapter->stapriv;
psta = rtw_get_stainfo(pstapriv, psta_addr);
auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
- ptr = get_recvframe_data(precv_frame);
- pfhdr = &precv_frame->u.hdr;
+ ptr = precv_frame->rx_data;
+ pfhdr = precv_frame;
pattrib = &pfhdr->attrib;
psta_addr = pattrib->ta;
@@ -593,7 +595,9 @@ _func_enter_;
/* allowed */
/* check decryption status, and decrypt the frame if needed */
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:psta->ieee8021x_blocked==0\n"));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("portctrl:precv_frame->hdr.attrib.privacy=%x\n", precv_frame->u.hdr.attrib.privacy));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("portctrl:precv_frame->hdr.attrib.privacy=%x\n",
+ precv_frame->attrib.privacy));
if (pattrib->bdecrypted == 0)
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("portctrl:prxstat->decrypted=%x\n", pattrib->bdecrypted));
@@ -613,19 +617,18 @@ _func_enter_;
prtnframe = precv_frame;
}
-_func_exit_;
return prtnframe;
}
-static int recv_decache(union recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache)
+static int recv_decache(struct recv_frame *precv_frame, u8 bretry,
+ struct stainfo_rxcache *prxcache)
{
- int tid = precv_frame->u.hdr.attrib.priority;
+ int tid = precv_frame->attrib.priority;
- u16 seq_ctrl = ((precv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
- (precv_frame->u.hdr.attrib.frag_num & 0xf);
+ u16 seq_ctrl = ((precv_frame->attrib.seq_num&0xffff) << 4) |
+ (precv_frame->attrib.frag_num & 0xf);
-_func_enter_;
if (tid > 15) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_decache, (tid>15)! seq_ctrl=0x%x, tid=0x%x\n", seq_ctrl, tid));
@@ -643,18 +646,17 @@ _func_enter_;
prxcache->tid_rxseq[tid] = seq_ctrl;
-_func_exit_;
return _SUCCESS;
}
-void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame);
-void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame)
+void process_pwrbit_data(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
unsigned char pwrbit;
- u8 *ptr = precv_frame->u.hdr.rx_data;
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ u8 *ptr = precv_frame->rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta = NULL;
@@ -675,10 +677,11 @@ void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame
#endif
}
-static void process_wmmps_data(struct adapter *padapter, union recv_frame *precv_frame)
+static void process_wmmps_data(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta = NULL;
@@ -730,15 +733,17 @@ static void process_wmmps_data(struct adapter *padapter, union recv_frame *precv
#endif
}
-static void count_rx_stats(struct adapter *padapter, union recv_frame *prframe, struct sta_info *sta)
+static void count_rx_stats(struct adapter *padapter,
+ struct recv_frame *prframe,
+ struct sta_info *sta)
{
int sz;
struct sta_info *psta = NULL;
struct stainfo_stats *pstats = NULL;
- struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &prframe->attrib;
struct recv_priv *precvpriv = &padapter->recvpriv;
- sz = get_recvframe_len(prframe);
+ sz = prframe->len;
precvpriv->rx_bytes += sz;
padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++;
@@ -749,7 +754,7 @@ static void count_rx_stats(struct adapter *padapter, union recv_frame *prframe,
if (sta)
psta = sta;
else
- psta = prframe->u.hdr.psta;
+ psta = prframe->psta;
if (psta) {
pstats = &psta->sta_stats;
@@ -761,15 +766,16 @@ static void count_rx_stats(struct adapter *padapter, union recv_frame *prframe,
int sta2sta_data_frame(
struct adapter *adapter,
- union recv_frame *precv_frame,
+ struct recv_frame *precv_frame,
struct sta_info **psta
);
-int sta2sta_data_frame(struct adapter *adapter, union recv_frame *precv_frame, struct sta_info **psta)
+int sta2sta_data_frame(struct adapter *adapter, struct recv_frame *precv_frame,
+ struct sta_info **psta)
{
- u8 *ptr = precv_frame->u.hdr.rx_data;
+ u8 *ptr = precv_frame->rx_data;
int ret = _SUCCESS;
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *mybssid = get_bssid(pmlmepriv);
@@ -777,25 +783,24 @@ int sta2sta_data_frame(struct adapter *adapter, union recv_frame *precv_frame, s
u8 *sta_addr = NULL;
int bmcast = IS_MCAST(pattrib->dst);
-_func_enter_;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
/* filter packets that SA is myself or multicast or broadcast */
- if (_rtw_memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
+ if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" SA==myself\n"));
ret = _FAIL;
goto exit;
}
- if ((!_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
+ if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
ret = _FAIL;
goto exit;
}
- if (_rtw_memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- _rtw_memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- !_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
+ if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
ret = _FAIL;
goto exit;
}
@@ -803,7 +808,7 @@ _func_enter_;
sta_addr = pattrib->src;
} else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
/* For Station mode, sa and bssid should always be BSSID, and DA is my mac-address */
- if (!_rtw_memcmp(pattrib->bssid, pattrib->src, ETH_ALEN)) {
+ if (memcmp(pattrib->bssid, pattrib->src, ETH_ALEN)) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("bssid!=TA under STATION_MODE; drop pkt\n"));
ret = _FAIL;
goto exit;
@@ -818,7 +823,7 @@ _func_enter_;
}
} else { /* not mc-frame */
/* For AP mode, if DA is non-MCAST, then it must be BSSID, and bssid == BSSID */
- if (!_rtw_memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN)) {
+ if (memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN)) {
ret = _FAIL;
goto exit;
}
@@ -853,17 +858,16 @@ _func_enter_;
}
exit:
-_func_exit_;
return ret;
}
static int ap2sta_data_frame (
struct adapter *adapter,
- union recv_frame *precv_frame,
+ struct recv_frame *precv_frame,
struct sta_info **psta)
{
- u8 *ptr = precv_frame->u.hdr.rx_data;
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ u8 *ptr = precv_frame->rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
int ret = _SUCCESS;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
@@ -871,20 +875,19 @@ static int ap2sta_data_frame (
u8 *myhwaddr = myid(&adapter->eeprompriv);
int bmcast = IS_MCAST(pattrib->dst);
-_func_enter_;
if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) &&
(check_fwstate(pmlmepriv, _FW_LINKED) == true ||
check_fwstate(pmlmepriv, _FW_UNDER_LINKING))) {
/* filter packets that SA is myself or multicast or broadcast */
- if (_rtw_memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
+ if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" SA==myself\n"));
ret = _FAIL;
goto exit;
}
/* da should be for me */
- if ((!_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
+ if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
(" ap2sta_data_frame: compare DA fail; DA=%pM\n", (pattrib->dst)));
ret = _FAIL;
@@ -892,9 +895,9 @@ _func_enter_;
}
/* check BSSID */
- if (_rtw_memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- _rtw_memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- (!_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
+ if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
(" ap2sta_data_frame: compare BSSID fail ; BSSID=%pM\n", (pattrib->bssid)));
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("mybssid=%pM\n", (mybssid)));
@@ -950,7 +953,7 @@ _func_enter_;
ret = RTW_RX_HANDLED;
goto exit;
} else {
- if (_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN) && (!bmcast)) {
+ if (!memcmp(myhwaddr, pattrib->dst, ETH_ALEN) && (!bmcast)) {
*psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
if (*psta == NULL) {
DBG_88E("issue_deauth to the ap =%pM for the reason(7)\n", (pattrib->bssid));
@@ -964,27 +967,25 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
static int sta2ap_data_frame(struct adapter *adapter,
- union recv_frame *precv_frame,
+ struct recv_frame *precv_frame,
struct sta_info **psta)
{
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- u8 *ptr = precv_frame->u.hdr.rx_data;
+ u8 *ptr = precv_frame->rx_data;
unsigned char *mybssid = get_bssid(pmlmepriv);
int ret = _SUCCESS;
-_func_enter_;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
/* For AP mode, RA = BSSID, TX = STA(SRC_ADDR), A3 = DST_ADDR */
- if (!_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
+ if (memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
ret = _FAIL;
goto exit;
}
@@ -1014,7 +1015,7 @@ _func_enter_;
}
} else {
u8 *myhwaddr = myid(&adapter->eeprompriv);
- if (!_rtw_memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
+ if (memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
ret = RTW_RX_HANDLED;
goto exit;
}
@@ -1026,25 +1027,23 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
static int validate_recv_ctrl_frame(struct adapter *padapter,
- union recv_frame *precv_frame)
+ struct recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->u.hdr.rx_data;
- /* uint len = precv_frame->u.hdr.len; */
+ u8 *pframe = precv_frame->rx_data;
if (GetFrameType(pframe) != WIFI_CTRL_TYPE)
return _FAIL;
/* receive the frames that ra(a1) is my address */
- if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN))
+ if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN))
return _FAIL;
/* only handle ps-poll */
@@ -1098,12 +1097,12 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
if ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == false) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ xmitframe_plist = xmitframe_plist->next;
rtw_list_delete(&pxmitframe->list);
@@ -1124,7 +1123,7 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
if (psta->sleepq_len == 0) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
/* update_BCNTIM(padapter); */
update_beacon(padapter, _TIM_IE_, NULL, false);
}
@@ -1142,7 +1141,7 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
pstapriv->tim_bitmap &= ~BIT(psta->aid);
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
/* update_BCNTIM(padapter); */
update_beacon(padapter, _TIM_IE_, NULL, false);
}
@@ -1157,10 +1156,11 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
return _FAIL;
}
-union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union recv_frame *precv_frame);
+struct recv_frame *recvframe_chk_defrag(struct adapter *padapter,
+ struct recv_frame *precv_frame);
static int validate_recv_mgnt_frame(struct adapter *padapter,
- union recv_frame *precv_frame)
+ struct recv_frame *precv_frame)
{
struct sta_info *psta;
@@ -1173,18 +1173,20 @@ static int validate_recv_mgnt_frame(struct adapter *padapter,
}
/* for rx pkt statistics */
- psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(precv_frame->u.hdr.rx_data));
+ psta = rtw_get_stainfo(&padapter->stapriv,
+ GetAddr2Ptr(precv_frame->rx_data));
if (psta) {
psta->sta_stats.rx_mgnt_pkts++;
- if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_BEACON) {
+ if (GetFrameSubType(precv_frame->rx_data) == WIFI_BEACON) {
psta->sta_stats.rx_beacon_pkts++;
- } else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBEREQ) {
+ } else if (GetFrameSubType(precv_frame->rx_data) == WIFI_PROBEREQ) {
psta->sta_stats.rx_probereq_pkts++;
- } else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBERSP) {
- if (_rtw_memcmp(padapter->eeprompriv.mac_addr, GetAddr1Ptr(precv_frame->u.hdr.rx_data), ETH_ALEN) == true)
+ } else if (GetFrameSubType(precv_frame->rx_data) == WIFI_PROBERSP) {
+ if (!memcmp(padapter->eeprompriv.mac_addr,
+ GetAddr1Ptr(precv_frame->rx_data), ETH_ALEN))
psta->sta_stats.rx_probersp_pkts++;
- else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data)) ||
- is_multicast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data)))
+ else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->rx_data)) ||
+ is_multicast_mac_addr(GetAddr1Ptr(precv_frame->rx_data)))
psta->sta_stats.rx_probersp_bm_pkts++;
else
psta->sta_stats.rx_probersp_uo_pkts++;
@@ -1197,17 +1199,16 @@ static int validate_recv_mgnt_frame(struct adapter *padapter,
}
static int validate_recv_data_frame(struct adapter *adapter,
- union recv_frame *precv_frame)
+ struct recv_frame *precv_frame)
{
u8 bretry;
u8 *psa, *pda, *pbssid;
struct sta_info *psta = NULL;
- u8 *ptr = precv_frame->u.hdr.rx_data;
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ u8 *ptr = precv_frame->rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct security_priv *psecuritypriv = &adapter->securitypriv;
int ret = _SUCCESS;
-_func_enter_;
bretry = GetRetry(ptr);
pda = get_da(ptr);
@@ -1265,7 +1266,7 @@ _func_enter_;
/* psta->rssi = prxcmd->rssi; */
/* psta->signal_quality = prxcmd->sq; */
- precv_frame->u.hdr.psta = psta;
+ precv_frame->psta = psta;
pattrib->amsdu = 0;
pattrib->ack_policy = 0;
@@ -1286,7 +1287,7 @@ _func_enter_;
if (pattrib->order)/* HT-CTRL 11n */
pattrib->hdrlen += 4;
- precv_frame->u.hdr.preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
+ precv_frame->preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
/* decache, drop duplicate recv packets */
if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) {
@@ -1312,12 +1313,12 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
-static int validate_recv_frame(struct adapter *adapter, union recv_frame *precv_frame)
+static int validate_recv_frame(struct adapter *adapter,
+ struct recv_frame *precv_frame)
{
/* shall check frame subtype, to / from ds, da, bssid */
@@ -1327,12 +1328,11 @@ static int validate_recv_frame(struct adapter *adapter, union recv_frame *precv_
u8 subtype;
int retval = _SUCCESS;
u8 bDumpRxPkt;
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
- u8 *ptr = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
+ u8 *ptr = precv_frame->rx_data;
u8 ver = (unsigned char) (*ptr)&0x3;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
-_func_enter_;
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
int ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, rtw_get_oper_ch(adapter));
@@ -1422,14 +1422,13 @@ _func_enter_;
exit:
-_func_exit_;
return retval;
}
/* remove the wlanhdr and add the eth_hdr */
-static int wlanhdr_to_ethhdr (union recv_frame *precvframe)
+static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
{
int rmv_len;
u16 eth_type, len;
@@ -1439,13 +1438,10 @@ static int wlanhdr_to_ethhdr (union recv_frame *precvframe)
struct ieee80211_snap_hdr *psnap;
int ret = _SUCCESS;
- struct adapter *adapter = precvframe->u.hdr.adapter;
+ struct adapter *adapter = precvframe->adapter;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
-
- u8 *ptr = get_recvframe_data(precvframe); /* point to frame_ctrl field */
- struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib;
-
-_func_enter_;
+ u8 *ptr = precvframe->rx_data;
+ struct rx_pkt_attrib *pattrib = &precvframe->attrib;
if (pattrib->encrypt)
recvframe_pull_tail(precvframe, pattrib->icv_len);
@@ -1453,10 +1449,10 @@ _func_enter_;
psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len);
psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
/* convert hdr + possible LLC headers into Ethernet header */
- if ((_rtw_memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
- (_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) &&
- (_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_AARP, 2) == false)) ||
- _rtw_memcmp(psnap, rtw_bridge_tunnel_header, SNAP_SIZE)) {
+ if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
+ (!memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) &&
+ (!memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_AARP, 2) == false)) ||
+ !memcmp(psnap, rtw_bridge_tunnel_header, SNAP_SIZE)) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
bsnaphdr = true;
} else {
@@ -1465,7 +1461,7 @@ _func_enter_;
}
rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0);
- len = precvframe->u.hdr.len - rmv_len;
+ len = precvframe->len - rmv_len;
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
("\n===pattrib->hdrlen: %x, pattrib->iv_len:%x===\n\n", pattrib->hdrlen, pattrib->iv_len));
@@ -1496,30 +1492,29 @@ _func_enter_;
memcpy(ptr+12, &be_tmp, 2);
}
-_func_exit_;
return ret;
}
/* perform defrag */
-static union recv_frame *recvframe_defrag(struct adapter *adapter, struct __queue *defrag_q)
+static struct recv_frame *recvframe_defrag(struct adapter *adapter,
+ struct __queue *defrag_q)
{
struct list_head *plist, *phead;
u8 wlanhdr_offset;
u8 curfragnum;
- struct recv_frame_hdr *pfhdr, *pnfhdr;
- union recv_frame *prframe, *pnextrframe;
+ struct recv_frame *pfhdr, *pnfhdr;
+ struct recv_frame *prframe, *pnextrframe;
struct __queue *pfree_recv_queue;
-_func_enter_;
curfragnum = 0;
pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
phead = get_list_head(defrag_q);
- plist = get_next(phead);
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
- pfhdr = &prframe->u.hdr;
- rtw_list_delete(&(prframe->u.list));
+ plist = phead->next;
+ pfhdr = container_of(plist, struct recv_frame, list);
+ prframe = (struct recv_frame *)pfhdr;
+ rtw_list_delete(&(prframe->list));
if (curfragnum != pfhdr->attrib.frag_num) {
/* the first fragment number must be 0 */
@@ -1534,11 +1529,11 @@ _func_enter_;
plist = get_list_head(defrag_q);
- plist = get_next(plist);
+ plist = plist->next;
while (rtw_end_of_queue_search(phead, plist) == false) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame , u);
- pnfhdr = &pnextrframe->u.hdr;
+ pnfhdr = container_of(plist, struct recv_frame, list);
+ pnextrframe = (struct recv_frame *)pnfhdr;
/* check the fragment sequence (2nd ~n fragment frame) */
@@ -1568,7 +1563,7 @@ _func_enter_;
recvframe_put(prframe, pnfhdr->len);
pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
- plist = get_next(plist);
+ plist = plist->next;
}
/* free the defrag_q queue and return the prframe */
@@ -1576,29 +1571,28 @@ _func_enter_;
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("Performance defrag!!!!!\n"));
-_func_exit_;
return prframe;
}
/* check if need to defrag, if needed queue the frame to defrag_q */
-union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union recv_frame *precv_frame)
+struct recv_frame *recvframe_chk_defrag(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
u8 ismfrag;
u8 fragnum;
u8 *psta_addr;
- struct recv_frame_hdr *pfhdr;
+ struct recv_frame *pfhdr;
struct sta_info *psta;
struct sta_priv *pstapriv;
struct list_head *phead;
- union recv_frame *prtnframe = NULL;
+ struct recv_frame *prtnframe = NULL;
struct __queue *pfree_recv_queue, *pdefrag_q;
-_func_enter_;
pstapriv = &padapter->stapriv;
- pfhdr = &precv_frame->u.hdr;
+ pfhdr = precv_frame;
pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
@@ -1670,7 +1664,7 @@ _func_enter_;
}
}
- if ((prtnframe != NULL) && (prtnframe->u.hdr.attrib.privacy)) {
+ if ((prtnframe != NULL) && (prtnframe->attrib.privacy)) {
/* after defrag we must check tkip mic code */
if (recvframe_chkmic(padapter, prtnframe) == _FAIL) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chkmic(padapter, prtnframe)==_FAIL\n"));
@@ -1679,12 +1673,11 @@ _func_enter_;
}
}
-_func_exit_;
return prtnframe;
}
-static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
+static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
{
int a_len, padding_len;
u16 eth_type, nSubframe_Length;
@@ -1698,16 +1691,16 @@ static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
int ret = _SUCCESS;
nr_subframes = 0;
- pattrib = &prframe->u.hdr.attrib;
+ pattrib = &prframe->attrib;
- recvframe_pull(prframe, prframe->u.hdr.attrib.hdrlen);
+ recvframe_pull(prframe, prframe->attrib.hdrlen);
- if (prframe->u.hdr.attrib.iv_len > 0)
- recvframe_pull(prframe, prframe->u.hdr.attrib.iv_len);
+ if (prframe->attrib.iv_len > 0)
+ recvframe_pull(prframe, prframe->attrib.iv_len);
- a_len = prframe->u.hdr.len;
+ a_len = prframe->len;
- pdata = prframe->u.hdr.rx_data;
+ pdata = prframe->rx_data;
while (a_len > ETH_HLEN) {
/* Offset 12 denote 2 mac address */
@@ -1729,7 +1722,7 @@ static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
memcpy(data_ptr, pdata, nSubframe_Length);
} else {
- sub_skb = skb_clone(prframe->u.hdr.pkt, GFP_ATOMIC);
+ sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
if (sub_skb) {
sub_skb->data = pdata;
sub_skb->len = nSubframe_Length;
@@ -1768,9 +1761,9 @@ static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
/* convert hdr + possible LLC headers into Ethernet header */
eth_type = RTW_GET_BE16(&sub_skb->data[6]);
if (sub_skb->len >= 8 &&
- ((_rtw_memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) &&
+ ((!memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) &&
eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
- _rtw_memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))) {
+ !memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
skb_pull(sub_skb, SNAP_SIZE);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
@@ -1796,7 +1789,7 @@ static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
exit:
- prframe->u.hdr.len = 0;
+ prframe->len = 0;
rtw_free_recvframe(prframe, pfree_recv_queue);/* free this recv_frame */
return ret;
@@ -1832,70 +1825,72 @@ static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_n
return true;
}
-int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe);
-int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe)
+int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
+ struct recv_frame *prframe)
{
- struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &prframe->attrib;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
struct list_head *phead, *plist;
- union recv_frame *pnextrframe;
+ struct recv_frame *hdr;
struct rx_pkt_attrib *pnextattrib;
phead = get_list_head(ppending_recvframe_queue);
- plist = get_next(phead);
+ plist = phead->next;
while (rtw_end_of_queue_search(phead, plist) == false) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
- pnextattrib = &pnextrframe->u.hdr.attrib;
+ hdr = container_of(plist, struct recv_frame, list);
+ pnextattrib = &hdr->attrib;
if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
- plist = get_next(plist);
+ plist = plist->next;
else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
return false;
else
break;
}
- rtw_list_delete(&(prframe->u.hdr.list));
+ rtw_list_delete(&(prframe->list));
- rtw_list_insert_tail(&(prframe->u.hdr.list), plist);
+ rtw_list_insert_tail(&(prframe->list), plist);
return true;
}
static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced)
{
struct list_head *phead, *plist;
- union recv_frame *prframe;
+ struct recv_frame *prframe;
+ struct recv_frame *prhdr;
struct rx_pkt_attrib *pattrib;
int bPktInBuf = false;
struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
phead = get_list_head(ppending_recvframe_queue);
- plist = get_next(phead);
+ plist = phead->next;
/* Handling some condition for forced indicate case. */
if (bforced) {
if (rtw_is_list_empty(phead))
return true;
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
- pattrib = &prframe->u.hdr.attrib;
+ prhdr = container_of(plist, struct recv_frame, list);
+ pattrib = &prhdr->attrib;
preorder_ctrl->indicate_seq = pattrib->seq_num;
}
/* Prepare indication list and indication. */
/* Check if there is any packet need indicate. */
while (!rtw_is_list_empty(phead)) {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
- pattrib = &prframe->u.hdr.attrib;
+ prhdr = container_of(plist, struct recv_frame, list);
+ prframe = (struct recv_frame *)prhdr;
+ pattrib = &prframe->attrib;
if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
("recv_indicatepkts_in_order: indicate=%d seq=%d amsdu=%d\n",
preorder_ctrl->indicate_seq, pattrib->seq_num, pattrib->amsdu));
- plist = get_next(plist);
- rtw_list_delete(&(prframe->u.hdr.list));
+ plist = plist->next;
+ rtw_list_delete(&(prframe->list));
if (SN_EQUAL(preorder_ctrl->indicate_seq, pattrib->seq_num))
preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
@@ -1924,11 +1919,12 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
return bPktInBuf;
}
-static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *prframe)
+static int recv_indicatepkt_reorder(struct adapter *padapter,
+ struct recv_frame *prframe)
{
int retval = _SUCCESS;
- struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
- struct recv_reorder_ctrl *preorder_ctrl = prframe->u.hdr.preorder_ctrl;
+ struct rx_pkt_attrib *pattrib = &prframe->attrib;
+ struct recv_reorder_ctrl *preorder_ctrl = prframe->preorder_ctrl;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
if (!pattrib->amsdu) {
@@ -2001,7 +1997,7 @@ static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *
spin_unlock_bh(&ppending_recvframe_queue->lock);
} else {
spin_unlock_bh(&ppending_recvframe_queue->lock);
- _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
}
_success_exit:
@@ -2032,17 +2028,14 @@ void rtw_reordering_ctrl_timeout_handler(void *pcontext)
spin_unlock_bh(&ppending_recvframe_queue->lock);
}
-static int process_recv_indicatepkts(struct adapter *padapter, union recv_frame *prframe)
+static int process_recv_indicatepkts(struct adapter *padapter,
+ struct recv_frame *prframe)
{
int retval = _SUCCESS;
- /* struct recv_priv *precvpriv = &padapter->recvpriv; */
- /* struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib; */
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
if (phtpriv->ht_option) { /* B/G/N Mode */
- /* prframe->u.hdr.preorder_ctrl = &precvpriv->recvreorder_ctrl[pattrib->priority]; */
-
if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) {
/* including perform A-MPDU Rx Ordering Buffer Control */
if ((!padapter->bDriverStopped) &&
@@ -2075,10 +2068,11 @@ static int process_recv_indicatepkts(struct adapter *padapter, union recv_frame
return retval;
}
-static int recv_func_prehandle(struct adapter *padapter, union recv_frame *rframe)
+static int recv_func_prehandle(struct adapter *padapter,
+ struct recv_frame *rframe)
{
int ret = _SUCCESS;
- struct rx_pkt_attrib *pattrib = &rframe->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &rframe->attrib;
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -2110,10 +2104,11 @@ exit:
return ret;
}
-static int recv_func_posthandle(struct adapter *padapter, union recv_frame *prframe)
+static int recv_func_posthandle(struct adapter *padapter,
+ struct recv_frame *prframe)
{
int ret = _SUCCESS;
- union recv_frame *orig_prframe = prframe;
+ struct recv_frame *orig_prframe = prframe;
struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
@@ -2155,16 +2150,16 @@ _recv_data_drop:
return ret;
}
-static int recv_func(struct adapter *padapter, union recv_frame *rframe)
+static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
{
int ret;
- struct rx_pkt_attrib *prxattrib = &rframe->u.hdr.attrib;
+ struct rx_pkt_attrib *prxattrib = &rframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_priv *mlmepriv = &padapter->mlmepriv;
/* check if need to handle uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
- union recv_frame *pending_frame;
+ struct recv_frame *pending_frame;
while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) {
if (recv_func_posthandle(padapter, pending_frame) == _SUCCESS)
@@ -2193,15 +2188,14 @@ exit:
return ret;
}
-s32 rtw_recv_entry(union recv_frame *precvframe)
+s32 rtw_recv_entry(struct recv_frame *precvframe)
{
struct adapter *padapter;
struct recv_priv *precvpriv;
s32 ret = _SUCCESS;
-_func_enter_;
- padapter = precvframe->u.hdr.adapter;
+ padapter = precvframe->adapter;
precvpriv = &padapter->recvpriv;
@@ -2213,7 +2207,6 @@ _func_enter_;
precvpriv->rx_pkts++;
-_func_exit_;
return ret;
@@ -2222,7 +2215,6 @@ _recv_entry_drop:
if (padapter->registrypriv.mp_mode == 1)
padapter->mppriv.rx_pktloss = precvpriv->rx_drop;
-_func_exit_;
return ret;
}
@@ -2244,13 +2236,13 @@ void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS)
} else {
if (recvpriv->signal_strength_data.update_req == 0) {/* update_req is clear, means we got rx */
avg_signal_strength = recvpriv->signal_strength_data.avg_val;
- /* after avg_vals are accquired, we can re-stat the signal values */
+ /* after avg_vals are acquired, we can re-stat the signal values */
recvpriv->signal_strength_data.update_req = 1;
}
if (recvpriv->signal_qual_data.update_req == 0) {/* update_req is clear, means we got rx */
avg_signal_qual = recvpriv->signal_qual_data.avg_val;
- /* after avg_vals are accquired, we can re-stat the signal values */
+ /* after avg_vals are acquired, we can re-stat the signal values */
recvpriv->signal_qual_data.update_req = 1;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index e08845729772..c4b16ea6348a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -41,7 +41,6 @@ static void arcfour_init(struct arc4context *parc4ctx, u8 *key, u32 key_len)
u32 stateindex;
u8 *state;
u32 counter;
-_func_enter_;
state = parc4ctx->state;
parc4ctx->x = 0;
parc4ctx->y = 0;
@@ -58,7 +57,6 @@ _func_enter_;
if (++keyindex >= key_len)
keyindex = 0;
}
-_func_exit_;
}
static u32 arcfour_byte(struct arc4context *parc4ctx)
@@ -67,7 +65,6 @@ static u32 arcfour_byte(struct arc4context *parc4ctx)
u32 y;
u32 sx, sy;
u8 *state;
-_func_enter_;
state = parc4ctx->state;
x = (parc4ctx->x + 1) & 0xff;
sx = state[x];
@@ -77,17 +74,14 @@ _func_enter_;
parc4ctx->y = y;
state[y] = (u8)sx;
state[x] = (u8)sy;
-_func_exit_;
return state[(sx + sy) & 0xff];
}
static void arcfour_encrypt(struct arc4context *parc4ctx, u8 *dest, u8 *src, u32 len)
{
u32 i;
-_func_enter_;
for (i = 0; i < len; i++)
dest[i] = src[i] ^ (unsigned char)arcfour_byte(parc4ctx);
-_func_exit_;
}
static int bcrc32initialized;
@@ -102,9 +96,8 @@ static u8 crc32_reverseBit(u8 data)
static void crc32_init(void)
{
-_func_enter_;
if (bcrc32initialized == 1) {
- goto exit;
+ return;
} else {
int i, j;
u32 c;
@@ -126,15 +119,12 @@ _func_enter_;
}
bcrc32initialized = 1;
}
-exit:
-_func_exit_;
}
static __le32 getcrc32(u8 *buf, int len)
{
u8 *p;
u32 crc;
-_func_enter_;
if (bcrc32initialized == 0)
crc32_init();
@@ -142,7 +132,6 @@ _func_enter_;
for (p = buf; len > 0; ++p, --len)
crc = crc32_table[(crc ^ *p) & 0xff] ^ (crc >> 8);
-_func_exit_;
return cpu_to_le32(~crc); /* transmit complement, per CRC-32 spec */
}
@@ -165,7 +154,6 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-_func_enter_;
if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
return;
@@ -206,7 +194,6 @@ _func_enter_;
}
}
-_func_exit_;
}
void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
@@ -218,12 +205,11 @@ void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
u32 keylength;
u8 *pframe, *payload, *iv, wepkey[16];
u8 keyindex;
- struct rx_pkt_attrib *prxattrib = &(((union recv_frame *)precvframe)->u.hdr.attrib);
+ struct rx_pkt_attrib *prxattrib = &(((struct recv_frame *)precvframe)->attrib);
struct security_priv *psecuritypriv = &padapter->securitypriv;
-_func_enter_;
- pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+ pframe = (unsigned char *)((struct recv_frame *)precvframe)->rx_data;
/* start to decrypt recvframe */
if ((prxattrib->encrypt == _WEP40_) || (prxattrib->encrypt == _WEP104_)) {
@@ -232,7 +218,7 @@ _func_enter_;
keylength = psecuritypriv->dot11DefKeylen[keyindex];
memcpy(&wepkey[0], iv, 3);
memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[keyindex].skey[0], keylength);
- length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+ length = ((struct recv_frame *)precvframe)->len-prxattrib->hdrlen-prxattrib->iv_len;
payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
@@ -252,7 +238,6 @@ _func_enter_;
&crc, &payload[length-4]));
}
}
-_func_exit_;
return;
}
@@ -263,10 +248,8 @@ static u32 secmicgetuint32(u8 *p)
{
s32 i;
u32 res = 0;
-_func_enter_;
for (i = 0; i < 4; i++)
res |= ((u32)(*p++)) << (8*i);
-_func_exit_;
return res;
}
@@ -274,39 +257,32 @@ static void secmicputuint32(u8 *p, u32 val)
/* Convert from Us3232 to Byte[] in a portable way */
{
long i;
-_func_enter_;
for (i = 0; i < 4; i++) {
*p++ = (u8) (val & 0xff);
val >>= 8;
}
-_func_exit_;
}
static void secmicclear(struct mic_data *pmicdata)
{
/* Reset the state to the empty message. */
-_func_enter_;
pmicdata->L = pmicdata->K0;
pmicdata->R = pmicdata->K1;
pmicdata->nBytesInM = 0;
pmicdata->M = 0;
-_func_exit_;
}
void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key)
{
/* Set the key */
-_func_enter_;
pmicdata->K0 = secmicgetuint32(key);
pmicdata->K1 = secmicgetuint32(key + 4);
/* and reset the message */
secmicclear(pmicdata);
-_func_exit_;
}
void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b)
{
-_func_enter_;
/* Append the byte to our word-sized buffer */
pmicdata->M |= ((unsigned long)b) << (8*pmicdata->nBytesInM);
pmicdata->nBytesInM++;
@@ -325,23 +301,19 @@ _func_enter_;
pmicdata->M = 0;
pmicdata->nBytesInM = 0;
}
-_func_exit_;
}
void rtw_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nbytes)
{
-_func_enter_;
/* This is simple */
while (nbytes > 0) {
rtw_secmicappendbyte(pmicdata, *src++);
nbytes--;
}
-_func_exit_;
}
void rtw_secgetmic(struct mic_data *pmicdata, u8 *dst)
{
-_func_enter_;
/* Append the minimum padding */
rtw_secmicappendbyte(pmicdata, 0x5a);
rtw_secmicappendbyte(pmicdata, 0);
@@ -356,14 +328,12 @@ _func_enter_;
secmicputuint32(dst+4, pmicdata->R);
/* Reset to the empty message. */
secmicclear(pmicdata);
-_func_exit_;
}
void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_code, u8 pri)
{
struct mic_data micdata;
u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
-_func_enter_;
rtw_secmicsetkey(&micdata, key);
priority[0] = pri;
@@ -386,7 +356,6 @@ _func_enter_;
rtw_secmicappend(&micdata, data, data_len);
rtw_secgetmic(&micdata, mic_code);
-_func_exit_;
}
@@ -505,7 +474,6 @@ static const unsigned short Sbox1[2][256] = { /* Sbox for hash (can be in ROM)
static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
{
int i;
-_func_enter_;
/* Initialize the 80 bits of P1K[] from IV32 and TA[0..5] */
p1k[0] = Lo16(iv32);
p1k[1] = Hi16(iv32);
@@ -523,7 +491,6 @@ _func_enter_;
p1k[4] += _S_(p1k[3] ^ TK16((i&1)+0));
p1k[4] += (unsigned short)i; /* avoid "slide attacks" */
}
-_func_exit_;
}
/*
@@ -553,7 +520,6 @@ static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
{
int i;
u16 PPK[6]; /* temporary key for mixing */
-_func_enter_;
/* Note: all adds in the PPK[] equations below are mod 2**16 */
for (i = 0; i < 5; i++)
PPK[i] = p1k[i]; /* first, copy P1K to PPK */
@@ -590,7 +556,6 @@ _func_enter_;
rc4key[4+2*i] = Lo8(PPK[i]);
rc4key[5+2*i] = Hi8(PPK[i]);
}
-_func_exit_;
}
/* The hlen isn't include the IV */
@@ -612,7 +577,6 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
u32 res = _SUCCESS;
-_func_enter_;
if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
return _FAIL;
@@ -672,7 +636,6 @@ _func_enter_;
res = _FAIL;
}
}
-_func_exit_;
return res;
}
@@ -690,13 +653,12 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
+ struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
u32 res = _SUCCESS;
-_func_enter_;
- pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+ pframe = (unsigned char *)((struct recv_frame *)precvframe)->rx_data;
/* 4 start to decrypt recvframe */
if (prxattrib->encrypt == _TKIP_) {
@@ -716,7 +678,7 @@ _func_enter_;
iv = pframe+prxattrib->hdrlen;
payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
- length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+ length = ((struct recv_frame *)precvframe)->len-prxattrib->hdrlen-prxattrib->iv_len;
GET_TKIP_PN(iv, dot11txpn);
@@ -747,7 +709,6 @@ _func_enter_;
res = _FAIL;
}
}
-_func_exit_;
exit:
return res;
}
@@ -821,19 +782,15 @@ static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext);
static void xor_128(u8 *a, u8 *b, u8 *out)
{
int i;
-_func_enter_;
for (i = 0; i < 16; i++)
out[i] = a[i] ^ b[i];
-_func_exit_;
}
static void xor_32(u8 *a, u8 *b, u8 *out)
{
int i;
-_func_enter_;
for (i = 0; i < 4; i++)
out[i] = a[i] ^ b[i];
-_func_exit_;
}
static u8 sbox(u8 a)
@@ -849,7 +806,6 @@ static void next_key(u8 *key, int round)
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
0x1b, 0x36, 0x36, 0x36
};
-_func_enter_;
sbox_key[0] = sbox(key[13]);
sbox_key[1] = sbox(key[14]);
sbox_key[2] = sbox(key[15]);
@@ -863,21 +819,17 @@ _func_enter_;
xor_32(&key[4], &key[0], &key[4]);
xor_32(&key[8], &key[4], &key[8]);
xor_32(&key[12], &key[8], &key[12]);
-_func_exit_;
}
static void byte_sub(u8 *in, u8 *out)
{
int i;
-_func_enter_;
for (i = 0; i < 16; i++)
out[i] = sbox(in[i]);
-_func_exit_;
}
static void shift_row(u8 *in, u8 *out)
{
-_func_enter_;
out[0] = in[0];
out[1] = in[5];
out[2] = in[10];
@@ -894,7 +846,6 @@ _func_enter_;
out[13] = in[1];
out[14] = in[6];
out[15] = in[11];
-_func_exit_;
}
static void mix_column(u8 *in, u8 *out)
@@ -908,7 +859,6 @@ static void mix_column(u8 *in, u8 *out)
u8 rotr[4];
u8 temp[4];
u8 tempb[4];
-_func_enter_;
for (i = 0 ; i < 4; i++) {
if ((in[i] & 0x80) == 0x80)
add1b[i] = 0x1b;
@@ -952,7 +902,6 @@ _func_enter_;
xor_32(add1bf7, rotr, temp);
xor_32(swap_halfs, rotl, tempb);
xor_32(temp, tempb, out);
-_func_exit_;
}
static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
@@ -962,7 +911,6 @@ static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
u8 intermediatea[16];
u8 intermediateb[16];
u8 round_key[16];
-_func_enter_;
for (i = 0; i < 16; i++)
round_key[i] = key[i];
for (round = 0; round < 11; round++) {
@@ -984,7 +932,6 @@ _func_enter_;
next_key(round_key, round);
}
}
-_func_exit_;
}
/************************************************/
@@ -995,7 +942,6 @@ static void construct_mic_iv(u8 *mic_iv, int qc_exists, int a4_exists, u8 *mpdu,
uint payload_length, u8 *pn_vector)
{
int i;
-_func_enter_;
mic_iv[0] = 0x59;
if (qc_exists && a4_exists)
mic_iv[1] = mpdu[30] & 0x0f; /* QoS_TC */
@@ -1009,7 +955,6 @@ _func_enter_;
mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
mic_iv[14] = (unsigned char) (payload_length / 256);
mic_iv[15] = (unsigned char) (payload_length % 256);
-_func_exit_;
}
/************************************************/
@@ -1019,7 +964,6 @@ _func_exit_;
/************************************************/
static void construct_mic_header1(u8 *mic_header1, int header_length, u8 *mpdu)
{
-_func_enter_;
mic_header1[0] = (u8)((header_length - 2) / 256);
mic_header1[1] = (u8)((header_length - 2) % 256);
mic_header1[2] = mpdu[0] & 0xcf; /* Mute CF poll & CF ack bits */
@@ -1036,7 +980,6 @@ _func_enter_;
mic_header1[13] = mpdu[13];
mic_header1[14] = mpdu[14];
mic_header1[15] = mpdu[15];
-_func_exit_;
}
/************************************************/
@@ -1047,7 +990,6 @@ _func_exit_;
static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists, int qc_exists)
{
int i;
-_func_enter_;
for (i = 0; i < 16; i++)
mic_header2[i] = 0x00;
@@ -1079,7 +1021,6 @@ _func_enter_;
mic_header2[15] = mpdu[31] & 0x00;
}
-_func_exit_;
}
/************************************************/
@@ -1090,7 +1031,6 @@ _func_exit_;
static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists, u8 *mpdu, u8 *pn_vector, int c)
{
int i;
-_func_enter_;
for (i = 0; i < 16; i++)
ctr_preload[i] = 0x00;
i = 0;
@@ -1107,7 +1047,6 @@ _func_enter_;
ctr_preload[i] = pn_vector[13 - i]; /* ctr_preload[8:13] = PN[5:0] */
ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */
ctr_preload[15] = (unsigned char) (c % 256);
-_func_exit_;
}
/************************************/
@@ -1117,10 +1056,8 @@ _func_exit_;
static void bitwise_xor(u8 *ina, u8 *inb, u8 *out)
{
int i;
-_func_enter_;
for (i = 0; i < 16; i++)
out[i] = ina[i] ^ inb[i];
-_func_exit_;
}
static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
@@ -1142,7 +1079,6 @@ static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
uint frtype = GetFrameType(pframe);
uint frsubtype = GetFrameSubType(pframe);
-_func_enter_;
frsubtype = frsubtype>>4;
_rtw_memset((void *)mic_iv, 0, 16);
@@ -1253,7 +1189,6 @@ _func_enter_;
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < 8; j++)
pframe[payload_index++] = chain_buffer[j];
-_func_exit_;
return _SUCCESS;
}
@@ -1274,7 +1209,6 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
/* uint offset = 0; */
u32 res = _SUCCESS;
-_func_enter_;
if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
return _FAIL;
@@ -1318,7 +1252,6 @@ _func_enter_;
}
-_func_exit_;
return res;
}
@@ -1344,7 +1277,6 @@ static int aes_decipher(u8 *key, uint hdrlen,
/* uint offset = 0; */
uint frtype = GetFrameType(pframe);
uint frsubtype = GetFrameSubType(pframe);
-_func_enter_;
frsubtype = frsubtype>>4;
_rtw_memset((void *)mic_iv, 0, 16);
@@ -1514,7 +1446,6 @@ _func_enter_;
res = _FAIL;
}
}
-_func_exit_;
return res;
}
@@ -1524,11 +1455,10 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
int length;
u8 *pframe, *prwskey; /* *payload,*iv */
struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
+ struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
u32 res = _SUCCESS;
-_func_enter_;
- pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+ pframe = (unsigned char *)((struct recv_frame *)precvframe)->rx_data;
/* 4 start to encrypt each fragment */
if ((prxattrib->encrypt == _AES_)) {
stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
@@ -1552,14 +1482,13 @@ _func_enter_;
} else {
prwskey = &stainfo->dot118021x_UncstKey.skey[0];
}
- length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+ length = ((struct recv_frame *)precvframe)->len-prxattrib->hdrlen-prxattrib->iv_len;
res = aes_decipher(prwskey, prxattrib->hdrlen, pframe, length);
} else {
RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo==NULL!!!\n"));
res = _FAIL;
}
}
-_func_exit_;
exit:
return res;
}
@@ -1767,7 +1696,6 @@ void rtw_use_tkipkey_handler(void *FunctionContext)
{
struct adapter *padapter = (struct adapter *)FunctionContext;
-_func_enter_;
RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("^^^rtw_use_tkipkey_handler ^^^\n"));
@@ -1775,5 +1703,4 @@ _func_enter_;
RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("^^^rtw_use_tkipkey_handler padapter->securitypriv.busetkipkey=%d^^^\n", padapter->securitypriv.busetkipkey));
-_func_exit_;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 02e1e1f8b3ea..2d0b60686a01 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -25,10 +25,10 @@
#include <xmit_osdep.h>
#include <mlme_osdep.h>
#include <sta_info.h>
+#include <linux/vmalloc.h>
static void _rtw_init_stainfo(struct sta_info *psta)
{
-_func_enter_;
_rtw_memset((u8 *)psta, 0, sizeof (struct sta_info));
spin_lock_init(&psta->lock);
@@ -69,7 +69,6 @@ _func_enter_;
#endif /* CONFIG_88EU_AP_MODE */
-_func_exit_;
}
u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
@@ -77,9 +76,8 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
struct sta_info *psta;
s32 i;
-_func_enter_;
- pstapriv->pallocated_stainfo_buf = rtw_zvmalloc(sizeof(struct sta_info) * NUM_STA + 4);
+ pstapriv->pallocated_stainfo_buf = vzalloc(sizeof(struct sta_info) * NUM_STA + 4);
if (!pstapriv->pallocated_stainfo_buf)
return _FAIL;
@@ -125,7 +123,6 @@ _func_enter_;
pstapriv->max_num_sta = NUM_STA;
#endif
-_func_exit_;
return _SUCCESS;
}
@@ -154,21 +151,19 @@ static void rtw_mfree_all_stainfo(struct sta_priv *pstapriv)
struct list_head *plist, *phead;
struct sta_info *psta = NULL;
-_func_enter_;
spin_lock_bh(&pstapriv->sta_hash_lock);
phead = get_list_head(&pstapriv->free_sta_queue);
- plist = get_next(phead);
+ plist = phead->next;
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info , list);
- plist = get_next(plist);
+ psta = container_of(plist, struct sta_info , list);
+ plist = plist->next;
}
spin_unlock_bh(&pstapriv->sta_hash_lock);
-_func_exit_;
}
static void rtw_mfree_sta_priv_lock(struct sta_priv *pstapriv)
@@ -183,22 +178,21 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
struct recv_reorder_ctrl *preorder_ctrl;
int index;
-_func_enter_;
if (pstapriv) {
/* delete all reordering_ctrl_timer */
spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
- plist = get_next(phead);
+ plist = phead->next;
while ((rtw_end_of_queue_search(phead, plist)) == false) {
int i;
- psta = LIST_CONTAINOR(plist, struct sta_info , hash_list);
- plist = get_next(plist);
+ psta = container_of(plist, struct sta_info , hash_list);
+ plist = plist->next;
for (i = 0; i < 16; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
- _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
}
}
}
@@ -208,10 +202,9 @@ _func_enter_;
rtw_mfree_sta_priv_lock(pstapriv);
if (pstapriv->pallocated_stainfo_buf)
- rtw_vmfree(pstapriv->pallocated_stainfo_buf, sizeof(struct sta_info)*NUM_STA+4);
+ vfree(pstapriv->pallocated_stainfo_buf);
}
-_func_exit_;
return _SUCCESS;
}
@@ -225,7 +218,6 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
int i = 0;
u16 wRxSeqInitialValue = 0xffff;
-_func_enter_;
pfree_sta_queue = &pstapriv->free_sta_queue;
@@ -235,7 +227,7 @@ _func_enter_;
spin_unlock_bh(&pfree_sta_queue->lock);
psta = NULL;
} else {
- psta = LIST_CONTAINOR(get_next(&pfree_sta_queue->queue), struct sta_info, list);
+ psta = container_of((&pfree_sta_queue->queue)->next, struct sta_info, list);
rtw_list_delete(&(psta->list));
spin_unlock_bh(&pfree_sta_queue->lock);
_rtw_init_stainfo(psta);
@@ -297,9 +289,6 @@ _func_enter_;
}
exit:
-
-_func_exit_;
-
return psta;
}
@@ -313,7 +302,6 @@ u32 rtw_free_stainfo(struct adapter *padapter , struct sta_info *psta)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
-_func_enter_;
if (psta == NULL)
goto exit;
@@ -353,32 +341,34 @@ _func_enter_;
_rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
_rtw_init_sta_recv_priv(&psta->sta_recvpriv);
- _cancel_timer_ex(&psta->addba_retry_timer);
+ del_timer_sync(&psta->addba_retry_timer);
/* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
for (i = 0; i < 16; i++) {
struct list_head *phead, *plist;
- union recv_frame *prframe;
+ struct recv_frame *prhdr;
+ struct recv_frame *prframe;
struct __queue *ppending_recvframe_queue;
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
preorder_ctrl = &psta->recvreorder_ctrl[i];
- _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
spin_lock_bh(&ppending_recvframe_queue->lock);
phead = get_list_head(ppending_recvframe_queue);
- plist = get_next(phead);
+ plist = phead->next;
while (!rtw_is_list_empty(phead)) {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prhdr = container_of(plist, struct recv_frame, list);
+ prframe = (struct recv_frame *)prhdr;
- plist = get_next(plist);
+ plist = plist->next;
- rtw_list_delete(&(prframe->u.hdr.list));
+ rtw_list_delete(&(prframe->list));
rtw_free_recvframe(prframe, pfree_recv_queue);
}
@@ -428,7 +418,6 @@ _func_enter_;
exit:
-_func_exit_;
return _SUCCESS;
}
@@ -442,32 +431,26 @@ void rtw_free_all_stainfo(struct adapter *padapter)
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo(padapter);
-_func_enter_;
if (pstapriv->asoc_sta_count == 1)
- goto exit;
+ return;
spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
- plist = get_next(phead);
+ plist = phead->next;
while ((!rtw_end_of_queue_search(phead, plist))) {
- psta = LIST_CONTAINOR(plist, struct sta_info , hash_list);
+ psta = container_of(plist, struct sta_info , hash_list);
- plist = get_next(plist);
+ plist = plist->next;
if (pbcmc_stainfo != psta)
rtw_free_stainfo(padapter , psta);
}
}
-
spin_unlock_bh(&pstapriv->sta_hash_lock);
-
-exit:
-
-_func_exit_;
}
/* any station allocated can be searched by hash list */
@@ -479,7 +462,6 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
u8 *addr;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-_func_enter_;
if (hwaddr == NULL)
return NULL;
@@ -494,21 +476,20 @@ _func_enter_;
spin_lock_bh(&pstapriv->sta_hash_lock);
phead = &(pstapriv->sta_hash[index]);
- plist = get_next(phead);
+ plist = phead->next;
while ((!rtw_end_of_queue_search(phead, plist))) {
- psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+ psta = container_of(plist, struct sta_info, hash_list);
- if ((_rtw_memcmp(psta->hwaddr, addr, ETH_ALEN)) == true) {
+ if ((!memcmp(psta->hwaddr, addr, ETH_ALEN)) == true) {
/* if found the matched address */
break;
}
psta = NULL;
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pstapriv->sta_hash_lock);
-_func_exit_;
return psta;
}
@@ -519,7 +500,6 @@ u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
unsigned char bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct sta_priv *pstapriv = &padapter->stapriv;
-_func_enter_;
psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
@@ -533,7 +513,6 @@ _func_enter_;
psta->mac_id = 1;
exit:
-_func_exit_;
return res;
}
@@ -542,9 +521,7 @@ struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-_func_enter_;
psta = rtw_get_stainfo(pstapriv, bc_addr);
-_func_exit_;
return psta;
}
@@ -561,12 +538,12 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
- plist = get_next(phead);
+ plist = phead->next;
while ((!rtw_end_of_queue_search(phead, plist))) {
- paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
- plist = get_next(plist);
+ paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
+ plist = plist->next;
- if (_rtw_memcmp(paclnode->addr, mac_addr, ETH_ALEN)) {
+ if (!memcmp(paclnode->addr, mac_addr, ETH_ALEN)) {
if (paclnode->valid) {
match = true;
break;
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 96df62f95b6b..3dd90599fd4b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -929,7 +929,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
return _FAIL;
}
- if (_rtw_memcmp(cur_network->network.MacAddress, pbssid, 6) == false) {
+ if (!memcmp(cur_network->network.MacAddress, pbssid, 6) == false) {
DBG_88E("Oops: rtw_check_network_encrypt linked but recv other bssid bcn\n%pM %pM\n",
(pbssid), (cur_network->network.MacAddress));
return true;
@@ -1014,7 +1014,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
bssid->Ssid.SsidLength, cur_network->network.Ssid.Ssid,
cur_network->network.Ssid.SsidLength));
- if (!_rtw_memcmp(bssid->Ssid.Ssid, cur_network->network.Ssid.Ssid, 32) ||
+ if (memcmp(bssid->Ssid.Ssid, cur_network->network.Ssid.Ssid, 32) ||
bssid->Ssid.SsidLength != cur_network->network.Ssid.SsidLength) {
if (bssid->Ssid.Ssid[0] != '\0' && bssid->Ssid.SsidLength != 0) { /* not hidden ssid */
DBG_88E("%s(), SSID is not match return FAIL\n", __func__);
@@ -1050,7 +1050,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
if (cur_network->BcnInfo.encryp_protocol != encryp_protocol) {
- DBG_88E("%s(): enctyp is not match , return FAIL\n", __func__);
+ DBG_88E("%s(): encryption protocol is not match , return FAIL\n", __func__);
goto _mismatch;
}
@@ -1090,12 +1090,10 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
kfree(bssid);
- _func_exit_;
return _SUCCESS;
_mismatch:
kfree(bssid);
- _func_exit_;
return _FAIL;
}
@@ -1141,11 +1139,11 @@ unsigned int is_ap_in_tkip(struct adapter *padapter)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- if ((_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4)) && (_rtw_memcmp((pIE->data + 12), WPA_TKIP_CIPHER, 4)))
+ if ((!memcmp(pIE->data, RTW_WPA_OUI, 4)) && (!memcmp((pIE->data + 12), WPA_TKIP_CIPHER, 4)))
return true;
break;
case _RSN_IE_2_:
- if (_rtw_memcmp((pIE->data + 8), RSN_TKIP_CIPHER, 4))
+ if (!memcmp((pIE->data + 8), RSN_TKIP_CIPHER, 4))
return true;
default:
break;
@@ -1172,14 +1170,14 @@ unsigned int should_forbid_n_rate(struct adapter *padapter)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- if (_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4) &&
- ((_rtw_memcmp((pIE->data + 12), WPA_CIPHER_SUITE_CCMP, 4)) ||
- (_rtw_memcmp((pIE->data + 16), WPA_CIPHER_SUITE_CCMP, 4))))
+ if (!memcmp(pIE->data, RTW_WPA_OUI, 4) &&
+ ((!memcmp((pIE->data + 12), WPA_CIPHER_SUITE_CCMP, 4)) ||
+ (!memcmp((pIE->data + 16), WPA_CIPHER_SUITE_CCMP, 4))))
return false;
break;
case _RSN_IE_2_:
- if ((_rtw_memcmp((pIE->data + 8), RSN_CIPHER_SUITE_CCMP, 4)) ||
- (_rtw_memcmp((pIE->data + 12), RSN_CIPHER_SUITE_CCMP, 4)))
+ if ((!memcmp((pIE->data + 8), RSN_CIPHER_SUITE_CCMP, 4)) ||
+ (!memcmp((pIE->data + 12), RSN_CIPHER_SUITE_CCMP, 4)))
return false;
default:
break;
@@ -1208,7 +1206,7 @@ unsigned int is_ap_in_wep(struct adapter *padapter)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- if (_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4))
+ if (!memcmp(pIE->data, RTW_WPA_OUI, 4))
return false;
break;
case _RSN_IE_2_:
@@ -1400,35 +1398,35 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- if ((_rtw_memcmp(pIE->data, ARTHEROS_OUI1, 3)) ||
- (_rtw_memcmp(pIE->data, ARTHEROS_OUI2, 3))) {
+ if ((!memcmp(pIE->data, ARTHEROS_OUI1, 3)) ||
+ (!memcmp(pIE->data, ARTHEROS_OUI2, 3))) {
DBG_88E("link to Artheros AP\n");
return HT_IOT_PEER_ATHEROS;
- } else if ((_rtw_memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
- (_rtw_memcmp(pIE->data, BROADCOM_OUI2, 3)) ||
- (_rtw_memcmp(pIE->data, BROADCOM_OUI2, 3))) {
+ } else if ((!memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
+ (!memcmp(pIE->data, BROADCOM_OUI2, 3)) ||
+ (!memcmp(pIE->data, BROADCOM_OUI2, 3))) {
DBG_88E("link to Broadcom AP\n");
return HT_IOT_PEER_BROADCOM;
- } else if (_rtw_memcmp(pIE->data, MARVELL_OUI, 3)) {
+ } else if (!memcmp(pIE->data, MARVELL_OUI, 3)) {
DBG_88E("link to Marvell AP\n");
return HT_IOT_PEER_MARVELL;
- } else if (_rtw_memcmp(pIE->data, RALINK_OUI, 3)) {
+ } else if (!memcmp(pIE->data, RALINK_OUI, 3)) {
if (!ralink_vendor_flag) {
ralink_vendor_flag = 1;
} else {
DBG_88E("link to Ralink AP\n");
return HT_IOT_PEER_RALINK;
}
- } else if (_rtw_memcmp(pIE->data, CISCO_OUI, 3)) {
+ } else if (!memcmp(pIE->data, CISCO_OUI, 3)) {
DBG_88E("link to Cisco AP\n");
return HT_IOT_PEER_CISCO;
- } else if (_rtw_memcmp(pIE->data, REALTEK_OUI, 3)) {
+ } else if (!memcmp(pIE->data, REALTEK_OUI, 3)) {
DBG_88E("link to Realtek 96B\n");
return HT_IOT_PEER_REALTEK;
- } else if (_rtw_memcmp(pIE->data, AIRGOCAP_OUI, 3)) {
+ } else if (!memcmp(pIE->data, AIRGOCAP_OUI, 3)) {
DBG_88E("link to Airgo Cap\n");
return HT_IOT_PEER_AIRGO;
- } else if (_rtw_memcmp(pIE->data, EPIGRAM_OUI, 3)) {
+ } else if (!memcmp(pIE->data, EPIGRAM_OUI, 3)) {
epigram_vendor_flag = 1;
if (ralink_vendor_flag) {
DBG_88E("link to Tenda W311R AP\n");
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 24182fbc6a71..8d4265fb486d 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -23,25 +23,22 @@
#include <drv_types.h>
#include <wifi.h>
#include <osdep_intf.h>
-#include <ip.h>
#include <usb_ops.h>
#include <usb_osintf.h>
+#include <linux/vmalloc.h>
static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
static void _init_txservq(struct tx_servq *ptxservq)
{
-_func_enter_;
_rtw_init_listhead(&ptxservq->tx_pending);
_rtw_init_queue(&ptxservq->sta_pending);
ptxservq->qcnt = 0;
-_func_exit_;
}
void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
{
-_func_enter_;
_rtw_memset((unsigned char *)psta_xmitpriv, 0, sizeof (struct sta_xmit_priv));
spin_lock_init(&psta_xmitpriv->lock);
_init_txservq(&psta_xmitpriv->be_q);
@@ -51,7 +48,6 @@ _func_enter_;
_rtw_init_listhead(&psta_xmitpriv->legacy_dz);
_rtw_init_listhead(&psta_xmitpriv->apsd);
-_func_exit_;
}
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
@@ -63,9 +59,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
-_func_enter_;
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
spin_lock_init(&pxmitpriv->lock);
sema_init(&pxmitpriv->xmit_sema, 0);
@@ -91,7 +86,7 @@ _func_enter_;
Please also apply free_txobj to link_up all the xmit_frames...
*/
- pxmitpriv->pallocated_frame_buf = rtw_zvmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
+ pxmitpriv->pallocated_frame_buf = vzalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
if (pxmitpriv->pallocated_frame_buf == NULL) {
pxmitpriv->pxmit_frame_buf = NULL;
@@ -129,7 +124,7 @@ _func_enter_;
_rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
_rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
- pxmitpriv->pallocated_xmitbuf = rtw_zvmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
+ pxmitpriv->pallocated_xmitbuf = vzalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
if (pxmitpriv->pallocated_xmitbuf == NULL) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_buf fail!\n"));
@@ -171,7 +166,7 @@ _func_enter_;
/* Init xmit extension buff */
_rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
- pxmitpriv->pallocated_xmit_extbuf = rtw_zvmalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+ pxmitpriv->pallocated_xmit_extbuf = vzalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
if (pxmitpriv->pallocated_xmit_extbuf == NULL) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_extbuf fail!\n"));
@@ -226,7 +221,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -240,12 +234,11 @@ void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
- _func_enter_;
rtw_hal_free_xmit_priv(padapter);
if (pxmitpriv->pxmit_frame_buf == NULL)
- goto out;
+ return;
for (i = 0; i < NR_XMITFRAME; i++) {
rtw_os_xmit_complete(padapter, pxmitframe);
@@ -259,10 +252,10 @@ void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
}
if (pxmitpriv->pallocated_frame_buf)
- rtw_vmfree(pxmitpriv->pallocated_frame_buf, NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
+ vfree(pxmitpriv->pallocated_frame_buf);
if (pxmitpriv->pallocated_xmitbuf)
- rtw_vmfree(pxmitpriv->pallocated_xmitbuf, NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
+ vfree(pxmitpriv->pallocated_xmitbuf);
/* free xmit extension buff */
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
@@ -272,16 +265,12 @@ void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
}
if (pxmitpriv->pallocated_xmit_extbuf) {
- rtw_vmfree(pxmitpriv->pallocated_xmit_extbuf, num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+ vfree(pxmitpriv->pallocated_xmit_extbuf);
}
rtw_free_hwxmits(padapter);
mutex_destroy(&pxmitpriv->ack_tx_mutex);
-
-out:
-
-_func_exit_;
}
static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -455,7 +444,6 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
int res = _SUCCESS;
- _func_enter_;
_rtw_open_pktfile(pkt, &pktfile);
_rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
@@ -639,7 +627,6 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
exit:
-_func_exit_;
return res;
}
@@ -662,7 +649,6 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
else
stainfo = rtw_get_stainfo(&padapter->stapriv , &pattrib->ra[0]);
-_func_enter_;
hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
@@ -676,12 +662,12 @@ _func_enter_;
pframe = pxmitframe->buf_addr + hw_hdr_offset;
if (bmcst) {
- if (_rtw_memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16))
+ if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16))
return _FAIL;
/* start to calculate the mic code */
rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
} else {
- if (_rtw_memcmp(&stainfo->dot11tkiptxmickey.skey[0], null_key, 16) == true) {
+ if (!memcmp(&stainfo->dot11tkiptxmickey.skey[0], null_key, 16)) {
/* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
/* msleep(10); */
return _FAIL;
@@ -760,7 +746,6 @@ _func_enter_;
}
}
-_func_exit_;
return _SUCCESS;
}
@@ -769,7 +754,6 @@ static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmi
{
struct pkt_attrib *pattrib = &pxmitframe->attrib;
-_func_enter_;
if (pattrib->bswenc) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("### xmitframe_swencrypt\n"));
@@ -791,7 +775,6 @@ _func_enter_;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("### xmitframe_hwencrypt\n"));
}
-_func_exit_;
return _SUCCESS;
}
@@ -812,7 +795,6 @@ s32 rtw_make_wlanhdr (struct adapter *padapter , u8 *hdr, struct pkt_attrib *pat
int bmcst = IS_MCAST(pattrib->ra);
-_func_enter_;
if (pattrib->psta) {
psta = pattrib->psta;
@@ -918,7 +900,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return res;
}
@@ -1007,7 +988,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
s32 bmcst = IS_MCAST(pattrib->ra);
s32 res = _SUCCESS;
-_func_enter_;
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
@@ -1145,7 +1125,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1162,7 +1141,6 @@ s32 rtw_put_snap(u8 *data, u16 h_proto)
struct ieee80211_snap_hdr *snap;
u8 *oui;
-_func_enter_;
snap = (struct ieee80211_snap_hdr *)data;
snap->dsap = 0xaa;
@@ -1180,7 +1158,6 @@ _func_enter_;
*(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
-_func_exit_;
return SNAP_SIZE + sizeof(u16);
}
@@ -1193,7 +1170,6 @@ void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
-_func_enter_;
switch (pxmitpriv->vcs_setting) {
case DISABLE_VCS:
@@ -1220,7 +1196,6 @@ _func_enter_;
break;
}
-_func_exit_;
}
void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz)
@@ -1250,7 +1225,6 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
struct list_head *plist, *phead;
struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
-_func_enter_;
spin_lock_irqsave(&pfree_queue->lock, irql);
@@ -1259,9 +1233,9 @@ _func_enter_;
} else {
phead = get_list_head(pfree_queue);
- plist = get_next(phead);
+ plist = phead->next;
- pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+ pxmitbuf = container_of(plist, struct xmit_buf, list);
rtw_list_delete(&(pxmitbuf->list));
}
@@ -1280,7 +1254,6 @@ _func_enter_;
spin_unlock_irqrestore(&pfree_queue->lock, irql);
-_func_exit_;
return pxmitbuf;
}
@@ -1290,7 +1263,6 @@ s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
unsigned long irql;
struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
-_func_enter_;
if (pxmitbuf == NULL)
return _FAIL;
@@ -1304,7 +1276,6 @@ _func_enter_;
spin_unlock_irqrestore(&pfree_queue->lock, irql);
-_func_exit_;
return _SUCCESS;
}
@@ -1316,7 +1287,6 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
struct list_head *plist, *phead;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
-_func_enter_;
/* DBG_88E("+rtw_alloc_xmitbuf\n"); */
@@ -1327,9 +1297,9 @@ _func_enter_;
} else {
phead = get_list_head(pfree_xmitbuf_queue);
- plist = get_next(phead);
+ plist = phead->next;
- pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+ pxmitbuf = container_of(plist, struct xmit_buf, list);
rtw_list_delete(&(pxmitbuf->list));
}
@@ -1344,7 +1314,6 @@ _func_enter_;
}
spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
-_func_exit_;
return pxmitbuf;
}
@@ -1354,7 +1323,6 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
unsigned long irql;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
-_func_enter_;
if (pxmitbuf == NULL)
return _FAIL;
@@ -1376,7 +1344,6 @@ _func_enter_;
spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
}
-_func_exit_;
return _SUCCESS;
}
@@ -1405,7 +1372,6 @@ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pf
struct list_head *plist, *phead;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
-_func_enter_;
spin_lock_bh(&pfree_xmit_queue->lock);
@@ -1415,9 +1381,9 @@ _func_enter_;
} else {
phead = get_list_head(pfree_xmit_queue);
- plist = get_next(phead);
+ plist = phead->next;
- pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+ pxframe = container_of(plist, struct xmit_frame, list);
rtw_list_delete(&(pxframe->list));
}
@@ -1444,7 +1410,6 @@ _func_enter_;
spin_unlock_bh(&pfree_xmit_queue->lock);
-_func_exit_;
return pxframe;
}
@@ -1455,7 +1420,6 @@ s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitfram
struct adapter *padapter = pxmitpriv->adapter;
struct sk_buff *pndis_pkt = NULL;
-_func_enter_;
if (pxmitframe == NULL) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("====== rtw_free_xmitframe():pxmitframe == NULL!!!!!!!!!!\n"));
@@ -1483,7 +1447,6 @@ _func_enter_;
exit:
-_func_exit_;
return _SUCCESS;
}
@@ -1493,23 +1456,21 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
struct list_head *plist, *phead;
struct xmit_frame *pxmitframe;
-_func_enter_;
spin_lock_bh(&(pframequeue->lock));
phead = get_list_head(pframequeue);
- plist = get_next(phead);
+ plist = phead->next;
while (!rtw_end_of_queue_search(phead, plist)) {
- pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+ pxmitframe = container_of(plist, struct xmit_frame, list);
- plist = get_next(plist);
+ plist = plist->next;
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
spin_unlock_bh(&(pframequeue->lock));
-_func_exit_;
}
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -1530,12 +1491,12 @@ static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, str
struct xmit_frame *pxmitframe = NULL;
xmitframe_phead = get_list_head(pframe_queue);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
if (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ xmitframe_plist = xmitframe_plist->next;
rtw_list_delete(&pxmitframe->list);
@@ -1555,7 +1516,6 @@ struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmi
struct registry_priv *pregpriv = &padapter->registrypriv;
int i, inx[4];
-_func_enter_;
inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
@@ -1572,10 +1532,10 @@ _func_enter_;
phwxmit = phwxmit_i + inx[i];
sta_phead = get_list_head(phwxmit->sta_queue);
- sta_plist = get_next(sta_phead);
+ sta_plist = sta_phead->next;
while (!rtw_end_of_queue_search(sta_phead, sta_plist)) {
- ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq, tx_pending);
+ ptxservq = container_of(sta_plist, struct tx_servq, tx_pending);
pframe_queue = &ptxservq->sta_pending;
@@ -1590,12 +1550,11 @@ _func_enter_;
goto exit;
}
- sta_plist = get_next(sta_plist);
+ sta_plist = sta_plist->next;
}
}
exit:
spin_unlock_bh(&pxmitpriv->lock);
-_func_exit_;
return pxmitframe;
}
@@ -1603,7 +1562,6 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
{
struct tx_servq *ptxservq;
-_func_enter_;
switch (up) {
case 1:
case 2:
@@ -1632,7 +1590,6 @@ _func_enter_;
break;
}
-_func_exit_;
return ptxservq;
}
@@ -1651,7 +1608,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
int res = _SUCCESS;
-_func_enter_;
if (pattrib->psta) {
psta = pattrib->psta;
@@ -1676,7 +1632,6 @@ _func_enter_;
phwxmits[ac_index].accnt++;
exit:
-_func_exit_;
return res;
}
@@ -1719,10 +1674,8 @@ void rtw_free_hwxmits(struct adapter *padapter)
void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry)
{
int i;
-_func_enter_;
for (i = 0; i < entry; i++, phwxmit++)
phwxmit->accnt = 0;
-_func_exit_;
}
static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
@@ -1997,7 +1950,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
pstapriv->tim_bitmap |= BIT(0);/* */
pstapriv->sta_dz_bitmap |= BIT(0);
- update_beacon(padapter, _TIM_IE_, NULL, false);/* tx bc/mc packets after upate bcn */
+ update_beacon(padapter, _TIM_IE_, NULL, false);/* tx bc/mc packets after update bcn */
ret = true;
}
@@ -2047,7 +2000,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
pstapriv->tim_bitmap |= BIT(psta->aid);
if (psta->sleepq_len == 1) {
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
update_beacon(padapter, _TIM_IE_, NULL, false);
}
}
@@ -2070,12 +2023,12 @@ static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struc
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
phead = get_list_head(pframequeue);
- plist = get_next(phead);
+ plist = phead->next;
while (!rtw_end_of_queue_search(phead, plist)) {
- pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+ pxmitframe = container_of(plist, struct xmit_frame, list);
- plist = get_next(plist);
+ plist = plist->next;
xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe);
@@ -2137,12 +2090,12 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ xmitframe_plist = xmitframe_plist->next;
rtw_list_delete(&pxmitframe->list);
@@ -2218,12 +2171,12 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
spin_lock_bh(&psta_bmc->sleep_q.lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ xmitframe_plist = xmitframe_plist->next;
rtw_list_delete(&pxmitframe->list);
@@ -2265,12 +2218,12 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ xmitframe_plist = xmitframe_plist->next;
switch (pxmitframe->attrib.priority) {
case 1:
@@ -2316,7 +2269,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
update_beacon(padapter, _TIM_IE_, NULL, false);
}
}
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
index 3df33bc7197a..dea220b507ad 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -331,6 +331,7 @@ static void odm_RateDecision_8188E(struct odm_dm_struct *dm_odm,
static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_info *pRaInfo)
{ /* Wilson 2011/10/26 */
+ struct adapter *adapt = dm_odm->Adapter;
u32 MaskFromReg;
s8 i;
@@ -357,19 +358,19 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf
pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x0000000d;
break;
case 12:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR0);
+ MaskFromReg = rtw_read32(adapt, REG_ARFR0);
pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
break;
case 13:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR1);
+ MaskFromReg = rtw_read32(adapt, REG_ARFR1);
pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
break;
case 14:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR2);
+ MaskFromReg = rtw_read32(adapt, REG_ARFR2);
pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
break;
case 15:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR3);
+ MaskFromReg = rtw_read32(adapt, REG_ARFR3);
pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
break;
default:
@@ -667,7 +668,9 @@ void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rssi)
void ODM_RA_Set_TxRPT_Time(struct odm_dm_struct *dm_odm, u16 minRptTime)
{
- ODM_Write2Byte(dm_odm, REG_TX_RPT_TIME, minRptTime);
+ struct adapter *adapt = dm_odm->Adapter;
+
+ rtw_write16(adapt, REG_TX_RPT_TIME, minRptTime);
}
void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16 TxRPT_Len, u32 macid_entry0, u32 macid_entry1)
diff --git a/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
index 15e8e3f62198..056052ddd82e 100644
--- a/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
+++ b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
@@ -217,7 +217,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
for (i = 0; i < CCK_TABLE_SIZE; i++) {
if (dm_odm->RFCalibrateInfo.bCCKinCH14) {
- if (ODM_CompareMemory(dm_odm, (void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4) == 0) {
+ if (memcmp(&TempCCk, &CCKSwingTable_Ch14[i][2], 4)) {
CCK_index_old = (u8)i;
dm_odm->BbSwingIdxCckBase = (u8)i;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
@@ -229,7 +229,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("RegA24: 0x%X, CCKSwingTable_Ch1_Ch13[%d][2]: CCKSwingTable_Ch1_Ch13[i][2]: 0x%X\n",
TempCCk, i, CCKSwingTable_Ch1_Ch13[i][2]));
- if (ODM_CompareMemory(dm_odm, (void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4) == 0) {
+ if (memcmp(&TempCCk, &CCKSwingTable_Ch1_Ch13[i][2], 4)) {
CCK_index_old = (u8)i;
dm_odm->BbSwingIdxCckBase = (u8)i;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
@@ -839,9 +839,9 @@ static void _PHY_SaveMACRegisters(
struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save MAC parameters.\n"));
for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
- MACBackup[i] = ODM_Read1Byte(dm_odm, MACReg[i]);
+ MACBackup[i] = rtw_read8(adapt, MACReg[i]);
}
- MACBackup[i] = ODM_Read4Byte(dm_odm, MACReg[i]);
+ MACBackup[i] = rtw_read32(adapt, MACReg[i]);
}
static void reload_adda_reg(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegiesterNum)
@@ -868,9 +868,9 @@ _PHY_ReloadMACRegisters(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Reload MAC parameters !\n"));
for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
- ODM_Write1Byte(dm_odm, MACReg[i], (u8)MACBackup[i]);
+ rtw_write8(adapt, MACReg[i], (u8)MACBackup[i]);
}
- ODM_Write4Byte(dm_odm, MACReg[i], MACBackup[i]);
+ rtw_write32(adapt, MACReg[i], MACBackup[i]);
}
void
@@ -912,12 +912,12 @@ _PHY_MACSettingCalibration(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("MAC settings for Calibration.\n"));
- ODM_Write1Byte(dm_odm, MACReg[i], 0x3F);
+ rtw_write8(adapt, MACReg[i], 0x3F);
for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) {
- ODM_Write1Byte(dm_odm, MACReg[i], (u8)(MACBackup[i]&(~BIT3)));
+ rtw_write8(adapt, MACReg[i], (u8)(MACBackup[i]&(~BIT3)));
}
- ODM_Write1Byte(dm_odm, MACReg[i], (u8)(MACBackup[i]&(~BIT5)));
+ rtw_write8(adapt, MACReg[i], (u8)(MACBackup[i]&(~BIT5)));
}
void
@@ -1223,16 +1223,14 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
{
u8 tmpreg;
u32 RF_Amode = 0, RF_Bmode = 0, LC_Cal;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
/* Check continuous TX and Packet TX */
- tmpreg = ODM_Read1Byte(dm_odm, 0xd03);
+ tmpreg = rtw_read8(adapt, 0xd03);
if ((tmpreg&0x70) != 0) /* Deal with contisuous TX case */
- ODM_Write1Byte(dm_odm, 0xd03, tmpreg&0x8F); /* disable all continuous TX */
+ rtw_write8(adapt, 0xd03, tmpreg&0x8F); /* disable all continuous TX */
else /* Deal with Packet TX case */
- ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0xFF); /* block all queues */
+ rtw_write8(adapt, REG_TXPAUSE, 0xFF); /* block all queues */
if ((tmpreg&0x70) != 0) {
/* 1. Read original RF mode */
@@ -1264,7 +1262,7 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
if ((tmpreg&0x70) != 0) {
/* Deal with continuous TX case */
/* Path-A */
- ODM_Write1Byte(dm_odm, 0xd03, tmpreg);
+ rtw_write8(adapt, 0xd03, tmpreg);
PHY_SetRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
/* Path-B */
@@ -1272,7 +1270,7 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
PHY_SetRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
} else {
/* Deal with Packet TX case */
- ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0x00);
+ rtw_write8(adapt, REG_TXPAUSE, 0x00);
}
}
@@ -1468,13 +1466,10 @@ void PHY_LCCalibrate_8188E(struct adapter *adapt)
static void phy_setrfpathswitch_8188e(struct adapter *adapt, bool main, bool is2t)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
-
if (!adapt->hw_init_completed) {
u8 u1btmp;
- u1btmp = ODM_Read1Byte(dm_odm, REG_LEDCFG2) | BIT7;
- ODM_Write1Byte(dm_odm, REG_LEDCFG2, u1btmp);
+ u1btmp = rtw_read8(adapt, REG_LEDCFG2) | BIT7;
+ rtw_write8(adapt, REG_LEDCFG2, u1btmp);
PHY_SetBBReg(adapt, rFPGA0_XAB_RFParameter, BIT13, 0x01);
}
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index 598140464f05..d75ca7a27c86 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -116,8 +116,6 @@ uint rtw_hal_deinit(struct adapter *adapt)
{
uint status = _SUCCESS;
-_func_enter_;
-
status = adapt->HalFunc.hal_deinit(adapt);
if (status == _SUCCESS)
@@ -125,8 +123,6 @@ _func_enter_;
else
DBG_88E("\n rtw_hal_deinit: hal_init fail\n");
-_func_exit_;
-
return status;
}
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 3555ffaa4e06..89a26e3e217f 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -887,9 +887,10 @@ void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres)
{
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+ struct adapter *adapt = pDM_Odm->Adapter;
if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres) /* modify by Guo.Mingzhi 2012-01-03 */
- ODM_Write1Byte(pDM_Odm, ODM_REG(CCK_CCA, pDM_Odm), CurCCK_CCAThres);
+ rtw_write8(adapt, ODM_REG(CCK_CCA, pDM_Odm), CurCCK_CCAThres);
pDM_DigTable->PreCCK_CCAThres = pDM_DigTable->CurCCK_CCAThres;
pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
}
@@ -1301,8 +1302,8 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
psta = pDM_Odm->pODM_StaInfo[i];
if (IS_STA_VALID(psta) &&
(psta->state & WIFI_ASOC_STATE) &&
- !_rtw_memcmp(psta->hwaddr, bcast_addr, ETH_ALEN) &&
- !_rtw_memcmp(psta->hwaddr, myid(&Adapter->eeprompriv), ETH_ALEN)) {
+ memcmp(psta->hwaddr, bcast_addr, ETH_ALEN) &&
+ memcmp(psta->hwaddr, myid(&Adapter->eeprompriv), ETH_ALEN)) {
if (psta->rssi_stat.UndecoratedSmoothedPWDB < tmpEntryMinPWDB)
tmpEntryMinPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
@@ -1433,10 +1434,10 @@ void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm)
pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
Adapter->recvpriv.bIsAnyNonBEPkts = false;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VO PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_VO_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VI PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_VI_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BE PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_BE_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BK PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_BK_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VO PARAM: 0x%x\n", rtw_read32(Adapter, ODM_EDCA_VO_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VI PARAM: 0x%x\n", rtw_read32(Adapter, ODM_EDCA_VI_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BE PARAM: 0x%x\n", rtw_read32(Adapter, ODM_EDCA_BE_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BK PARAM: 0x%x\n", rtw_read32(Adapter, ODM_EDCA_BK_PARAM)));
} /* ODM_InitEdcaTurbo */
void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
diff --git a/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c b/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
index 6193d9fafb98..a9886122b459 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
@@ -66,7 +66,9 @@ void odm_ConfigRF_RadioB_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data
void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
{
- ODM_Write1Byte(pDM_Odm, Addr, Data);
+ struct adapter *adapt = pDM_Odm->Adapter;
+
+ rtw_write8(adapt, Addr, Data);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigMACWithHeaderFile: [MAC_REG] %08X %08X\n", Addr, Data));
}
diff --git a/drivers/staging/rtl8188eu/hal/odm_interface.c b/drivers/staging/rtl8188eu/hal/odm_interface.c
deleted file mode 100644
index 3cd68212afd1..000000000000
--- a/drivers/staging/rtl8188eu/hal/odm_interface.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-
-#include "odm_precomp.h"
-/* ODM IO Relative API. */
-
-u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return rtw_read8(Adapter, RegAddr);
-}
-
-u16 ODM_Read2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return rtw_read16(Adapter, RegAddr);
-}
-
-u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return rtw_read32(Adapter, RegAddr);
-}
-
-void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- rtw_write8(Adapter, RegAddr, Data);
-}
-
-void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- rtw_write16(Adapter, RegAddr, Data);
-}
-
-void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- rtw_write32(Adapter, RegAddr, Data);
-}
-
-/* ODM Memory relative API. */
-void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length)
-{
- *pPtr = rtw_zvmalloc(length);
-}
-
-/* length could be ignored, used to detect memory leakage. */
-void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length)
-{
- rtw_vmfree(pPtr, length);
-}
-
-s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2, u32 length)
-{
- return _rtw_memcmp(pBuf1, pBuf2, length);
-}
-
-void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer, u32 msDelay)
-{
- _set_timer(pTimer, msDelay); /* ms */
-}
-
-void ODM_InitializeTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer,
- void *CallBackFunc, void *pContext,
- const char *szID)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- _init_timer(pTimer, Adapter->pnetdev, CallBackFunc, pDM_Odm);
-}
-
-void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer)
-{
- _cancel_timer_ex(pTimer);
-}
-
-/* ODM FW relative API. */
-u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
- u32 *pElementID, u32 *pCmdLen,
- u8 **pCmbBuffer, u8 *CmdStartSeq)
-{
- return true;
-}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index ca0a7085445f..021e5879abcf 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -72,7 +72,6 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
u32 h2c_cmd_ex = 0;
s32 ret = _FAIL;
-_func_enter_;
if (!adapt->bFWReady) {
DBG_88E("FillH2CCmd_88E(): return H2C cmd because fw is not ready\n");
@@ -125,7 +124,6 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
@@ -134,7 +132,6 @@ u8 rtl8188e_set_rssi_cmd(struct adapter *adapt, u8 *param)
{
u8 res = _SUCCESS;
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
-_func_enter_;
if (haldata->fw_ractrl) {
;
@@ -143,7 +140,6 @@ _func_enter_;
res = _FAIL;
}
-_func_exit_;
return res;
}
@@ -154,7 +150,6 @@ u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
u8 res = _SUCCESS;
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
-_func_enter_;
if (haldata->fw_ractrl) {
__le32 lmask;
@@ -168,7 +163,6 @@ _func_enter_;
res = _FAIL;
}
-_func_exit_;
return res;
}
@@ -215,7 +209,6 @@ void rtl8188e_set_FwPwrMode_cmd(struct adapter *adapt, u8 Mode)
struct setpwrmode_parm H2CSetPwrMode;
struct pwrctrl_priv *pwrpriv = &adapt->pwrctrlpriv;
u8 RLBM = 0; /* 0:Min, 1:Max, 2:User define */
-_func_enter_;
DBG_88E("%s: Mode=%d SmartPS=%d UAPSD=%d\n", __func__,
Mode, pwrpriv->smart_ps, adapt->registrypriv.uapsd_enable);
@@ -256,7 +249,6 @@ _func_enter_;
FillH2CCmd_88E(adapt, H2C_PS_PWR_MODE, sizeof(H2CSetPwrMode), (u8 *)&H2CSetPwrMode);
-_func_exit_;
}
void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
@@ -617,7 +609,6 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
u8 DLBcnCount = 0;
u32 poll = 0;
-_func_enter_;
DBG_88E("%s mstatus(%x)\n", __func__, mstatus);
@@ -701,7 +692,6 @@ _func_enter_;
haldata->RegCR_1 &= (~BIT0);
rtw_write8(adapt, REG_CR+1, haldata->RegCR_1);
}
-_func_exit_;
}
void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state)
@@ -712,7 +702,6 @@ void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state)
struct P2P_PS_Offload_t *p2p_ps_offload = &haldata->p2p_ps_offload;
u8 i;
-_func_enter_;
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
@@ -775,5 +764,4 @@ _func_enter_;
FillH2CCmd_88E(adapt, H2C_PS_P2P_OFFLOAD, 1, (u8 *)p2p_ps_offload);
#endif
-_func_exit_;
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 4c934e2e0911..cf88bf247c85 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -160,7 +160,6 @@ void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
u8 hw_init_completed = false;
struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
- _func_enter_;
hw_init_completed = Adapter->hw_init_completed;
if (!hw_init_completed)
@@ -178,7 +177,6 @@ void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
/* Calculate Tx/Rx statistics. */
dm_CheckStatistics(Adapter);
- _func_exit_;
}
/* ODM */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 5921db86547f..f9d5558431e0 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -20,6 +20,7 @@
#define _HAL_INIT_C_
#include <linux/firmware.h>
+#include <linux/vmalloc.h>
#include <drv_types.h>
#include <rtw_efuse.h>
@@ -365,7 +366,7 @@ void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int data_len)
u32 fifo_data, reg_140;
u32 addr, rstatus, loop = 0;
u16 data_cnts = (data_len/8)+1;
- u8 *pbuf = rtw_zvmalloc(data_len+10);
+ u8 *pbuf = vzalloc(data_len+10);
DBG_88E("###### %s ######\n", __func__);
rtw_write8(Adapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
@@ -387,7 +388,7 @@ void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int data_len)
} while (!rstatus && (loop++ < 10));
}
rtw_IOL_cmd_buf_dump(Adapter, data_len, pbuf);
- rtw_vmfree(pbuf, data_len+10);
+ vfree(pbuf);
}
DBG_88E("###### %s ######\n", __func__);
}
@@ -583,59 +584,70 @@ static s32 _FWFreeToGo(struct adapter *padapter)
#define IS_FW_81xxC(padapter) (((GET_HAL_DATA(padapter))->FirmwareSignature & 0xFFF0) == 0x88C0)
-s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
+static int load_firmware(struct rt_firmware *pFirmware, struct device *device)
{
- s32 rtStatus = _SUCCESS;
- u8 writeFW_retry = 0;
- u32 fwdl_start_time;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
- struct device *device = dvobj_to_dev(dvobj);
- struct rt_firmware *pFirmware = NULL;
+ int rtstatus = _SUCCESS;
const struct firmware *fw;
- struct rt_firmware_hdr *pFwHdr = NULL;
- u8 *pFirmwareBuf;
- u32 FirmwareLen;
- char fw_name[] = "rtlwifi/rtl8188eufw.bin";
- static int log_version;
-
- RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
- pFirmware = (struct rt_firmware *)rtw_zmalloc(sizeof(struct rt_firmware));
- if (!pFirmware) {
- rtStatus = _FAIL;
- goto Exit;
- }
+ const char fw_name[] = "rtlwifi/rtl8188eufw.bin";
if (request_firmware(&fw, fw_name, device)) {
- rtStatus = _FAIL;
- goto Exit;
+ rtstatus = _FAIL;
+ goto exit;
}
if (!fw) {
pr_err("Firmware %s not available\n", fw_name);
- rtStatus = _FAIL;
- goto Exit;
+ rtstatus = _FAIL;
+ goto exit;
}
if (fw->size > FW_8188E_SIZE) {
- rtStatus = _FAIL;
- RT_TRACE(_module_hal_init_c_, _drv_err_, ("Firmware size exceed 0x%X. Check it.\n", FW_8188E_SIZE));
- goto Exit;
+ rtstatus = _FAIL;
+ RT_TRACE(_module_hal_init_c_, _drv_err_,
+ ("Firmware size exceed 0x%X. Check it.\n",
+ FW_8188E_SIZE));
+ goto exit;
}
pFirmware->szFwBuffer = kzalloc(FW_8188E_SIZE, GFP_KERNEL);
if (!pFirmware->szFwBuffer) {
- rtStatus = _FAIL;
- goto Exit;
+ rtstatus = _FAIL;
+ goto exit;
}
memcpy(pFirmware->szFwBuffer, fw->data, fw->size);
pFirmware->ulFwLength = fw->size;
- pFirmwareBuf = pFirmware->szFwBuffer;
- FirmwareLen = pFirmware->ulFwLength;
release_firmware(fw);
- DBG_88E_LEVEL(_drv_info_, "+%s: !bUsedWoWLANFw, FmrmwareLen:%d+\n", __func__, FirmwareLen);
+ DBG_88E_LEVEL(_drv_info_,
+ "+%s: !bUsedWoWLANFw, FmrmwareLen:%d+\n", __func__,
+ pFirmware->ulFwLength);
+exit:
+ return rtstatus;
+}
+
+s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
+{
+ s32 rtStatus = _SUCCESS;
+ u8 writeFW_retry = 0;
+ u32 fwdl_start_time;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
+ struct device *device = dvobj_to_dev(dvobj);
+ struct rt_firmware_hdr *pFwHdr = NULL;
+ u8 *pFirmwareBuf;
+ u32 FirmwareLen;
+ static int log_version;
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
+ if (!dvobj->firmware.szFwBuffer)
+ rtStatus = load_firmware(&dvobj->firmware, device);
+ if (rtStatus == _FAIL) {
+ dvobj->firmware.szFwBuffer = NULL;
+ goto Exit;
+ }
+ pFirmwareBuf = dvobj->firmware.szFwBuffer;
+ FirmwareLen = dvobj->firmware.ulFwLength;
/* To Check Fw header. Added by tynli. 2009.12.04. */
- pFwHdr = (struct rt_firmware_hdr *)pFirmware->szFwBuffer;
+ pFwHdr = (struct rt_firmware_hdr *)dvobj->firmware.szFwBuffer;
pHalData->FirmwareVersion = le16_to_cpu(pFwHdr->Version);
pHalData->FirmwareSubVersion = pFwHdr->Subversion;
@@ -687,10 +699,7 @@ s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
goto Exit;
}
RT_TRACE(_module_hal_init_c_, _drv_info_, ("Firmware is ready to run!\n"));
- kfree(pFirmware->szFwBuffer);
Exit:
-
- kfree(pFirmware);
return rtStatus;
}
@@ -707,10 +716,8 @@ void rtl8188e_InitializeFirmwareVars(struct adapter *padapter)
static void rtl8188e_free_hal_data(struct adapter *padapter)
{
-_func_enter_;
kfree(padapter->HalData);
padapter->HalData = NULL;
-_func_exit_;
}
/* */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c b/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
index 3d0e6c9e0310..a4d057cf7db2 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
@@ -115,14 +115,12 @@ void Hal_MPT_CCKTxPowerAdjust(struct adapter *Adapter, bool bInCH14)
/* Write 0xa24 ~ 0xa27 */
- TempVal2 = 0;
TempVal2 = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][2] +
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][3]<<8) +
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][4]<<16)+
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][5]<<24);
/* Write 0xa28 0xa29 */
- TempVal3 = 0;
TempVal3 = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][6] +
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][7]<<8);
} else {
@@ -139,14 +137,12 @@ void Hal_MPT_CCKTxPowerAdjust(struct adapter *Adapter, bool bInCH14)
(CCKSwingTable_Ch14[CCKSwingIndex][1]<<8);
/* Write 0xa24 ~ 0xa27 */
- TempVal2 = 0;
TempVal2 = CCKSwingTable_Ch14[CCKSwingIndex][2] +
(CCKSwingTable_Ch14[CCKSwingIndex][3]<<8) +
(CCKSwingTable_Ch14[CCKSwingIndex][4]<<16)+
(CCKSwingTable_Ch14[CCKSwingIndex][5]<<24);
/* Write 0xa28 0xa29 */
- TempVal3 = 0;
TempVal3 = CCKSwingTable_Ch14[CCKSwingIndex][6] +
(CCKSwingTable_Ch14[CCKSwingIndex][7]<<8);
}
@@ -184,12 +180,12 @@ void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *pAdapter, bool beven)
TempCCk = read_bbreg(pAdapter, rCCK0_TxFilter2, bMaskDWord) & bMaskCCK;
for (i = 0; i < CCK_TABLE_SIZE; i++) {
if (pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
- if (_rtw_memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4)) {
+ if (!memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4)) {
CCK_index_old = (u8)i;
break;
}
} else {
- if (_rtw_memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4)) {
+ if (!memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4)) {
CCK_index_old = (u8)i;
break;
}
@@ -201,6 +197,8 @@ void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *pAdapter, bool beven)
else
CCK_index = CCK_index_old + 1;
+ if (CCK_index > 32)
+ CCK_index = 32;
/* Adjust CCK according to gain index */
if (!pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
rtw_write8(pAdapter, 0xa22, CCKSwingTable_Ch1_Ch13[CCK_index][0]);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index 511f61cbb9e0..43eb960e4e0b 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -23,9 +23,9 @@
#include <drv_types.h>
#include <rtl8188e_hal.h>
-static void process_rssi(struct adapter *padapter, union recv_frame *prframe)
+static void process_rssi(struct adapter *padapter, struct recv_frame *prframe)
{
- struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &prframe->attrib;
struct signal_stat *signal_stat = &padapter->recvpriv.signal_strength_data;
if (signal_stat->update_req) {
@@ -39,7 +39,8 @@ static void process_rssi(struct adapter *padapter, union recv_frame *prframe)
signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
} /* Process_UI_RSSI_8192C */
-static void process_link_qual(struct adapter *padapter, union recv_frame *prframe)
+static void process_link_qual(struct adapter *padapter,
+ struct recv_frame *prframe)
{
struct rx_pkt_attrib *pattrib;
struct signal_stat *signal_stat;
@@ -47,7 +48,7 @@ static void process_link_qual(struct adapter *padapter, union recv_frame *prfram
if (prframe == NULL || padapter == NULL)
return;
- pattrib = &prframe->u.hdr.attrib;
+ pattrib = &prframe->attrib;
signal_stat = &padapter->recvpriv.signal_qual_data;
if (signal_stat->update_req) {
@@ -63,7 +64,7 @@ static void process_link_qual(struct adapter *padapter, union recv_frame *prfram
void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
{
- union recv_frame *precvframe = (union recv_frame *)prframe;
+ struct recv_frame *precvframe = (struct recv_frame *)prframe;
/* Check RSSI */
process_rssi(padapter, precvframe);
@@ -71,7 +72,8 @@ void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
process_link_qual(padapter, precvframe);
}
-void update_recvframe_attrib_88e(union recv_frame *precvframe, struct recv_stat *prxstat)
+void update_recvframe_attrib_88e(struct recv_frame *precvframe,
+ struct recv_stat *prxstat)
{
struct rx_pkt_attrib *pattrib;
struct recv_stat report;
@@ -83,7 +85,7 @@ void update_recvframe_attrib_88e(union recv_frame *precvframe, struct recv_stat
report.rxdw4 = prxstat->rxdw4;
report.rxdw5 = prxstat->rxdw5;
- pattrib = &precvframe->u.hdr.attrib;
+ pattrib = &precvframe->attrib;
_rtw_memset(pattrib, 0, sizeof(struct rx_pkt_attrib));
pattrib->crc_err = (u8)((le32_to_cpu(report.rxdw0) >> 14) & 0x1);/* u8)prxreport->crc32; */
@@ -136,12 +138,13 @@ void update_recvframe_attrib_88e(union recv_frame *precvframe, struct recv_stat
/*
* Notice:
* Before calling this function,
- * precvframe->u.hdr.rx_data should be ready!
+ * precvframe->rx_data should be ready!
*/
-void update_recvframe_phyinfo_88e(union recv_frame *precvframe, struct phy_stat *pphy_status)
+void update_recvframe_phyinfo_88e(struct recv_frame *precvframe,
+ struct phy_stat *pphy_status)
{
- struct adapter *padapter = precvframe->u.hdr.adapter;
- struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib;
+ struct adapter *padapter = precvframe->adapter;
+ struct rx_pkt_attrib *pattrib = &precvframe->attrib;
struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
struct odm_phy_status_info *pPHYInfo = (struct odm_phy_status_info *)(&pattrib->phy_info);
u8 *wlanhdr;
@@ -154,15 +157,15 @@ void update_recvframe_phyinfo_88e(union recv_frame *precvframe, struct phy_stat
pkt_info.bPacketToSelf = false;
pkt_info.bPacketBeacon = false;
- wlanhdr = get_recvframe_data(precvframe);
+ wlanhdr = precvframe->rx_data;
pkt_info.bPacketMatchBSSID = ((!IsFrameTypeCtrl(wlanhdr)) &&
!pattrib->icv_err && !pattrib->crc_err &&
- _rtw_memcmp(get_hdr_bssid(wlanhdr),
+ !memcmp(get_hdr_bssid(wlanhdr),
get_bssid(&padapter->mlmepriv), ETH_ALEN));
pkt_info.bPacketToSelf = pkt_info.bPacketMatchBSSID &&
- (_rtw_memcmp(get_da(wlanhdr),
+ (!memcmp(get_da(wlanhdr),
myid(&padapter->eeprompriv), ETH_ALEN));
pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
@@ -185,17 +188,17 @@ void update_recvframe_phyinfo_88e(union recv_frame *precvframe, struct phy_stat
ODM_PhyStatusQuery(&pHalData->odmpriv, pPHYInfo, (u8 *)pphy_status, &(pkt_info));
- precvframe->u.hdr.psta = NULL;
+ precvframe->psta = NULL;
if (pkt_info.bPacketMatchBSSID &&
(check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE))) {
if (psta) {
- precvframe->u.hdr.psta = psta;
+ precvframe->psta = psta;
rtl8188e_process_phy_info(padapter, precvframe);
}
} else if (pkt_info.bPacketToSelf || pkt_info.bPacketBeacon) {
if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) {
if (psta)
- precvframe->u.hdr.psta = psta;
+ precvframe->psta = psta;
}
rtl8188e_process_phy_info(padapter, precvframe);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index 17c94f4cc477..b1b1584af1cd 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -22,9 +22,6 @@
#include <drv_types.h>
#include <recv_osdep.h>
#include <mlme_osdep.h>
-#include <ip.h>
-#include <if_ether.h>
-#include <ethernet.h>
#include <usb_ops.h>
#include <wifi.h>
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index 6fb6a46f04fe..3476f8898330 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -537,11 +537,11 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&ptxservq->sta_pending);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
+ xmitframe_plist = xmitframe_plist->next;
pxmitframe->agg_num = 0; /* not first frame of aggregation */
pxmitframe->pkt_offset = 0; /* not first frame of aggregation, no need to reserve offset */
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index b24ad495062c..c92067f0ef15 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -709,7 +709,6 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
#define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
-_func_enter_;
HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN);
@@ -967,7 +966,6 @@ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
DBG_88E("%s in %dms\n", __func__, rtw_get_passing_time_ms(init_start_time));
-_func_exit_;
return status;
}
@@ -1084,7 +1082,6 @@ static unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
struct recv_priv *precvpriv = &(Adapter->recvpriv);
u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
-_func_enter_;
_read_port = pintfhdl->io_ops._read_port;
@@ -1112,7 +1109,6 @@ exit:
RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("<=== usb_inirp_init\n"));
-_func_exit_;
return status;
}
@@ -1402,7 +1398,6 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
struct dm_priv *pdmpriv = &haldata->dmpriv;
struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-_func_enter_;
switch (variable) {
case HW_VAR_MEDIA_STATUS:
@@ -1921,14 +1916,12 @@ _func_enter_;
default:
break;
}
-_func_exit_;
}
static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-_func_enter_;
switch (variable) {
case HW_VAR_BASIC_RATE:
@@ -1980,7 +1973,6 @@ _func_enter_;
break;
}
-_func_exit_;
}
/* */
@@ -2302,7 +2294,6 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt)
{
struct hal_ops *halfunc = &adapt->HalFunc;
-_func_enter_;
adapt->HalData = rtw_zmalloc(sizeof(struct hal_data_8188e));
if (adapt->HalData == NULL)
@@ -2342,5 +2333,4 @@ _func_enter_;
halfunc->interface_ps_func = &rtl8188eu_ps_func;
rtl8188e_set_hal_ops(halfunc);
-_func_exit_;
}
diff --git a/drivers/staging/rtl8188eu/hal/usb_ops_linux.c b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
index 31ae21a54c92..1fa5370f1da6 100644
--- a/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
@@ -111,7 +111,7 @@ static int usbctrl_vendorreq(struct intf_hdl *pintfhdl, u8 request, u16 value, u
break;
}
release_mutex:
- _exit_critical_mutex(&dvobjpriv->usb_vendor_req_mutex, NULL);
+ mutex_unlock(&dvobjpriv->usb_vendor_req_mutex);
exit:
return status;
}
@@ -125,7 +125,6 @@ static u8 usb_read8(struct intf_hdl *pintfhdl, u32 addr)
u16 len;
u8 data = 0;
- _func_enter_;
request = 0x05;
requesttype = 0x01;/* read_in */
@@ -136,7 +135,6 @@ static u8 usb_read8(struct intf_hdl *pintfhdl, u32 addr)
usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
- _func_exit_;
return data;
@@ -151,14 +149,12 @@ static u16 usb_read16(struct intf_hdl *pintfhdl, u32 addr)
u16 len;
__le32 data;
-_func_enter_;
request = 0x05;
requesttype = 0x01;/* read_in */
index = 0;/* n/a */
wvalue = (u16)(addr&0x0000ffff);
len = 2;
usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
-_func_exit_;
return (u16)(le32_to_cpu(data)&0xffff);
}
@@ -172,7 +168,6 @@ static u32 usb_read32(struct intf_hdl *pintfhdl, u32 addr)
u16 len;
__le32 data;
-_func_enter_;
request = 0x05;
requesttype = 0x01;/* read_in */
@@ -183,7 +178,6 @@ _func_enter_;
usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
-_func_exit_;
return le32_to_cpu(data);
}
@@ -198,7 +192,6 @@ static int usb_write8(struct intf_hdl *pintfhdl, u32 addr, u8 val)
u8 data;
int ret;
- _func_enter_;
request = 0x05;
requesttype = 0x00;/* write_out */
index = 0;/* n/a */
@@ -206,7 +199,6 @@ static int usb_write8(struct intf_hdl *pintfhdl, u32 addr, u8 val)
len = 1;
data = val;
ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
- _func_exit_;
return ret;
}
@@ -220,7 +212,6 @@ static int usb_write16(struct intf_hdl *pintfhdl, u32 addr, u16 val)
__le32 data;
int ret;
- _func_enter_;
request = 0x05;
requesttype = 0x00;/* write_out */
@@ -233,7 +224,6 @@ static int usb_write16(struct intf_hdl *pintfhdl, u32 addr, u16 val)
ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
- _func_exit_;
return ret;
}
@@ -248,7 +238,6 @@ static int usb_write32(struct intf_hdl *pintfhdl, u32 addr, u32 val)
__le32 data;
int ret;
- _func_enter_;
request = 0x05;
requesttype = 0x00;/* write_out */
@@ -260,7 +249,6 @@ static int usb_write32(struct intf_hdl *pintfhdl, u32 addr, u32 val)
ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
- _func_exit_;
return ret;
}
@@ -275,7 +263,6 @@ static int usb_writeN(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata
u8 buf[VENDOR_CMD_MAX_DATA_LEN] = {0};
int ret;
- _func_enter_;
request = 0x05;
requesttype = 0x00;/* write_out */
@@ -287,7 +274,6 @@ static int usb_writeN(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata
ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, buf, len, requesttype);
- _func_exit_;
return ret;
}
@@ -320,7 +306,7 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
struct recv_stat *prxstat;
struct phy_stat *pphy_status = NULL;
struct sk_buff *pkt_copy = NULL;
- union recv_frame *precvframe = NULL;
+ struct recv_frame *precvframe = NULL;
struct rx_pkt_attrib *pattrib = NULL;
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
struct recv_priv *precvpriv = &adapt->recvpriv;
@@ -346,13 +332,13 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
goto _exit_recvbuf2recvframe;
}
- _rtw_init_listhead(&precvframe->u.hdr.list);
- precvframe->u.hdr.precvbuf = NULL; /* can't access the precvbuf for new arch. */
- precvframe->u.hdr.len = 0;
+ _rtw_init_listhead(&precvframe->list);
+ precvframe->precvbuf = NULL; /* can't access the precvbuf for new arch. */
+ precvframe->len = 0;
update_recvframe_attrib_88e(precvframe, prxstat);
- pattrib = &precvframe->u.hdr.attrib;
+ pattrib = &precvframe->attrib;
if ((pattrib->crc_err) || (pattrib->icv_err)) {
DBG_88E("%s: RX Warning! crc_err=%d icv_err=%d, skip!\n", __func__, pattrib->crc_err, pattrib->icv_err);
@@ -399,26 +385,26 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
pkt_copy = netdev_alloc_skb(adapt->pnetdev, alloc_sz);
if (pkt_copy) {
pkt_copy->dev = adapt->pnetdev;
- precvframe->u.hdr.pkt = pkt_copy;
- precvframe->u.hdr.rx_head = pkt_copy->data;
- precvframe->u.hdr.rx_end = pkt_copy->data + alloc_sz;
+ precvframe->pkt = pkt_copy;
+ precvframe->rx_head = pkt_copy->data;
+ precvframe->rx_end = pkt_copy->data + alloc_sz;
skb_reserve(pkt_copy, 8 - ((size_t)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
memcpy(pkt_copy->data, (pbuf + pattrib->drvinfo_sz + RXDESC_SIZE), skb_len);
- precvframe->u.hdr.rx_tail = pkt_copy->data;
- precvframe->u.hdr.rx_data = pkt_copy->data;
+ precvframe->rx_tail = pkt_copy->data;
+ precvframe->rx_data = pkt_copy->data;
} else {
if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
DBG_88E("recvbuf2recvframe: alloc_skb fail , drop frag frame\n");
rtw_free_recvframe(precvframe, pfree_recv_queue);
goto _exit_recvbuf2recvframe;
}
- precvframe->u.hdr.pkt = skb_clone(pskb, GFP_ATOMIC);
- if (precvframe->u.hdr.pkt) {
- precvframe->u.hdr.rx_tail = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE;
- precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_tail;
- precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail;
- precvframe->u.hdr.rx_end = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE + alloc_sz;
+ precvframe->pkt = skb_clone(pskb, GFP_ATOMIC);
+ if (precvframe->pkt) {
+ precvframe->rx_tail = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE;
+ precvframe->rx_head = precvframe->rx_tail;
+ precvframe->rx_data = precvframe->rx_tail;
+ precvframe->rx_end = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE + alloc_sz;
} else {
DBG_88E("recvbuf2recvframe: skb_clone fail\n");
rtw_free_recvframe(precvframe, pfree_recv_queue);
@@ -451,17 +437,17 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
/* enqueue recvframe to txrtp queue */
if (pattrib->pkt_rpt_type == TX_REPORT1) {
/* CCX-TXRPT ack for xmit mgmt frames. */
- handle_txrpt_ccx_88e(adapt, precvframe->u.hdr.rx_data);
+ handle_txrpt_ccx_88e(adapt, precvframe->rx_data);
} else if (pattrib->pkt_rpt_type == TX_REPORT2) {
ODM_RA_TxRPT2Handle_8188E(
&haldata->odmpriv,
- precvframe->u.hdr.rx_data,
+ precvframe->rx_data,
pattrib->pkt_len,
pattrib->MacIDValidEntry[0],
pattrib->MacIDValidEntry[1]
);
} else if (pattrib->pkt_rpt_type == HIS_REPORT) {
- interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->u.hdr.rx_data);
+ interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data);
}
rtw_free_recvframe(precvframe, pfree_recv_queue);
}
@@ -519,7 +505,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
DBG_88E("%s() RX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) bReadPortCancel(%d)\n",
__func__, adapt->bDriverStopped,
adapt->bSurpriseRemoved, adapt->bReadPortCancel);
- goto exit;
+ return;
}
if (purb->status == 0) { /* SUCCESS */
@@ -579,9 +565,6 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
break;
}
}
-
-exit:
-_func_exit_;
}
static u32 usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
@@ -598,7 +581,6 @@ static u32 usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
size_t alignment = 0;
u32 ret = _SUCCESS;
-_func_enter_;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
adapter->pwrctrlpriv.pnp_bstop_trx) {
@@ -672,7 +654,6 @@ _func_enter_;
ret = _FAIL;
}
-_func_exit_;
return ret;
}
@@ -702,7 +683,6 @@ void rtl8188eu_xmit_tasklet(void *priv)
void rtl8188eu_set_intf_ops(struct _io_ops *pops)
{
- _func_enter_;
_rtw_memset((u8 *)pops, 0, sizeof(struct _io_ops));
pops->_read8 = &usb_read8;
pops->_read16 = &usb_read16;
@@ -717,7 +697,6 @@ void rtl8188eu_set_intf_ops(struct _io_ops *pops)
pops->_write_port = &usb_write_port;
pops->_read_port_cancel = &usb_read_port_cancel;
pops->_write_port_cancel = &usb_write_port_cancel;
- _func_exit_;
}
void rtl8188eu_set_hw_type(struct adapter *adapt)
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index a492a1c547ae..936c196699af 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -159,9 +159,15 @@ struct registry_priv {
#define MAX_CONTINUAL_URB_ERR 4
+struct rt_firmware {
+ u8 *szFwBuffer;
+ u32 ulFwLength;
+};
+
struct dvobj_priv {
struct adapter *if1;
struct adapter *if2;
+ struct rt_firmware firmware;
/* For 92D, DMDP have 2 interface. */
u8 InterfaceNumber;
diff --git a/drivers/staging/rtl8188eu/include/ethernet.h b/drivers/staging/rtl8188eu/include/ethernet.h
deleted file mode 100644
index a59f9120cd7e..000000000000
--- a/drivers/staging/rtl8188eu/include/ethernet.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-/*! \file */
-#ifndef __INC_ETHERNET_H
-#define __INC_ETHERNET_H
-
-#define ETHERNET_ADDRESS_LENGTH 6 /* Ethernet Address Length */
-#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
-#define LLC_HEADER_SIZE 6 /* LLC Header Length */
-#define TYPE_LENGTH_FIELD_SIZE 2 /* Type/Length Size */
-#define MINIMUM_ETHERNET_PACKET_SIZE 60 /* Min Ethernet Packet Size */
-#define MAXIMUM_ETHERNET_PACKET_SIZE 1514 /* Max Ethernet Packet Size */
-
-/* Is Multicast Address? */
-#define RT_ETH_IS_MULTICAST(_addr) ((((u8 *)(_addr))[0]&0x01) != 0)
-#define RT_ETH_IS_BROADCAST(_addr) ( \
- ((u8 *)(_addr))[0] == 0xff && \
- ((u8 *)(_addr))[1] == 0xff && \
- ((u8 *)(_addr))[2] == 0xff && \
- ((u8 *)(_addr))[3] == 0xff && \
- ((u8 *)(_addr))[4] == 0xff && \
- ((u8 *)(_addr))[5] == 0xff) /* Is Broadcast Address? */
-
-
-#endif /* #ifndef __INC_ETHERNET_H */
diff --git a/drivers/staging/rtl8188eu/include/if_ether.h b/drivers/staging/rtl8188eu/include/if_ether.h
deleted file mode 100644
index db157712a203..000000000000
--- a/drivers/staging/rtl8188eu/include/if_ether.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-
-#ifndef _LINUX_IF_ETHER_H
-#define _LINUX_IF_ETHER_H
-
-/*
- * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
- * and FCS/CRC (frame check sequence).
- */
-
-#define ETH_ALEN 6 /* Octets in one ethernet addr */
-#define ETH_HLEN 14 /* Total octets in header. */
-#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
-#define ETH_DATA_LEN 1500 /* Max. octets in payload */
-#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
-
-/*
- * These are the defined Ethernet Protocol ID's.
- */
-
-#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
-#define ETH_P_PUP 0x0200 /* Xerox PUP packet */
-#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */
-#define ETH_P_IP 0x0800 /* Internet Protocol packet */
-#define ETH_P_X25 0x0805 /* CCITT X.25 */
-#define ETH_P_ARP 0x0806 /* Address Resolution packet */
-#define ETH_P_BPQ 0x08FF /* G8BPQ AX.25 Ethernet Packet */
-#define ETH_P_IEEEPUP 0x0a00 /* Xerox IEEE802.3 PUP packet */
-#define ETH_P_IEEEPUPAT 0x0a01 /* Xerox IEEE802.3 PUP */
-#define ETH_P_DEC 0x6000 /* DEC Assigned proto */
-#define ETH_P_DNA_DL 0x6001 /* DEC DNA Dump/Load */
-#define ETH_P_DNA_RC 0x6002 /* DEC DNA Remote Console */
-#define ETH_P_DNA_RT 0x6003 /* DEC DNA Routing */
-#define ETH_P_LAT 0x6004 /* DEC LAT */
-#define ETH_P_DIAG 0x6005 /* DEC Diagnostics */
-#define ETH_P_CUST 0x6006 /* DEC Customer use */
-#define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */
-#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
-#define ETH_P_ATALK 0x809B /* Appletalk DDP */
-#define ETH_P_AARP 0x80F3 /* Appletalk AARP */
-#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */
-#define ETH_P_IPX 0x8137 /* IPX over DIX */
-#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
-#define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */
-#define ETH_P_PPP_SES 0x8864 /* PPPoE session messages */
-#define ETH_P_ATMMPOA 0x884c /* MultiProtocol Over ATM */
-#define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport
- * over Ethernet
- */
-
-/*
- * Non DIX types. Won't clash for 1500 types.
- */
-
-#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */
-#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */
-#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
-#define ETH_P_802_2 0x0004 /* 802.2 frames */
-#define ETH_P_SNAP 0x0005 /* Internal only */
-#define ETH_P_DDCMP 0x0006 /* DEC DDCMP: Internal only */
-#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/
-#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */
-#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */
-#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/
-#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */
-#define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */
-#define ETH_P_CONTROL 0x0016 /* Card specific control frames */
-#define ETH_P_IRDA 0x0017 /* Linux-IrDA */
-#define ETH_P_ECONET 0x0018 /* Acorn Econet */
-
-/*
- * This is an Ethernet frame header.
- */
-
-struct ethhdr {
- unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
- unsigned char h_source[ETH_ALEN]; /* source ether addr */
- unsigned short h_proto; /* packet type ID field */
-};
-
-struct _vlan {
- unsigned short h_vlan_TCI; /* Encap prio and VLAN ID */
- unsigned short h_vlan_encapsulated_proto;
-};
-
-#define get_vlan_id(pvlan) \
- ((ntohs((unsigned short)pvlan->h_vlan_TCI)) & 0xfff)
-#define get_vlan_priority(pvlan) \
- ((ntohs((unsigned short)pvlan->h_vlan_TCI))>>13)
-#define get_vlan_encap_proto(pvlan) \
- (ntohs((unsigned short)pvlan->h_vlan_encapsulated_proto))
-
-#endif /* _LINUX_IF_ETHER_H */
diff --git a/drivers/staging/rtl8188eu/include/ioctl_cfg80211.h b/drivers/staging/rtl8188eu/include/ioctl_cfg80211.h
deleted file mode 100644
index 037e9a5e5af9..000000000000
--- a/drivers/staging/rtl8188eu/include/ioctl_cfg80211.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-#ifndef __IOCTL_CFG80211_H__
-#define __IOCTL_CFG80211_H__
-
-struct rtw_wdev_invit_info {
- u8 token;
- u8 flags;
- u8 status;
- u8 req_op_ch;
- u8 rsp_op_ch;
-};
-
-#define rtw_wdev_invit_info_init(invit_info) \
- do { \
- (invit_info)->token = 0; \
- (invit_info)->flags = 0x00; \
- (invit_info)->status = 0xff; \
- (invit_info)->req_op_ch = 0; \
- (invit_info)->rsp_op_ch = 0; \
- } while (0)
-
-struct rtw_wdev_priv {
- struct wireless_dev *rtw_wdev;
-
- struct adapter *padapter;
-
- struct cfg80211_scan_request *scan_request;
- spinlock_t scan_req_lock;
-
- struct net_device *pmon_ndev;/* for monitor interface */
- char ifname_mon[IFNAMSIZ + 1]; /* name of monitor interface */
-
- u8 p2p_enabled;
-
- u8 provdisc_req_issued;
-
- struct rtw_wdev_invit_info invit_info;
-
- u8 bandroid_scan;
- bool block;
- bool power_mgmt;
-};
-
-#define wdev_to_priv(w) ((struct rtw_wdev_priv *)(wdev_priv(w)))
-
-#define wiphy_to_wdev(x) \
-((struct wireless_dev *)(((struct rtw_wdev_priv *)wiphy_priv(x))->rtw_wdev))
-
-int rtw_wdev_alloc(struct adapter *padapter, struct device *dev);
-void rtw_wdev_free(struct wireless_dev *wdev);
-void rtw_wdev_unregister(struct wireless_dev *wdev);
-
-void rtw_cfg80211_init_wiphy(struct adapter *padapter);
-
-void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter);
-
-void rtw_cfg80211_indicate_connect(struct adapter *padapter);
-void rtw_cfg80211_indicate_disconnect(struct adapter *padapter);
-void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
- bool aborted);
-
-#ifdef CONFIG_88EU_AP_MODE
-void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter,
- u8 *pmgmt_frame, uint frame_len);
-void rtw_cfg80211_indicate_sta_disassoc(struct adapter *padapter,
- unsigned char *da,
- unsigned short reason);
-#endif /* CONFIG_88EU_AP_MODE */
-
-void rtw_cfg80211_issue_p2p_provision_request(struct adapter *padapter,
- const u8 *buf, size_t len);
-void rtw_cfg80211_rx_p2p_action_public(struct adapter *padapter,
- u8 *pmgmt_frame, uint frame_len);
-void rtw_cfg80211_rx_action_p2p(struct adapter *padapter, u8 *pmgmt_frame,
- uint frame_len);
-void rtw_cfg80211_rx_action(struct adapter *adapter, u8 *frame,
- uint frame_len, const char *msg);
-
-int rtw_cfg80211_set_mgnt_wpsp2pie(struct net_device *net,
- char *buf, int len, int type);
-
-bool rtw_cfg80211_pwr_mgmt(struct adapter *adapter);
-
-#define rtw_cfg80211_rx_mgmt(dev, freq, sig_dbm, buf, len, gfp) \
- cfg80211_rx_mgmt(dev, freq, sig_dbm, buf, len, gfp)
-#define rtw_cfg80211_send_rx_assoc(dev, bss, buf, len) \
- cfg80211_send_rx_assoc(dev, bss, buf, len)
-
-#endif /* __IOCTL_CFG80211_H__ */
diff --git a/drivers/staging/rtl8188eu/include/ip.h b/drivers/staging/rtl8188eu/include/ip.h
deleted file mode 100644
index 9fdac6d42d17..000000000000
--- a/drivers/staging/rtl8188eu/include/ip.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-#ifndef _LINUX_IP_H
-#define _LINUX_IP_H
-
-/* SOL_IP socket options */
-
-#define IPTOS_TOS_MASK 0x1E
-#define IPTOS_TOS(tos) ((tos)&IPTOS_TOS_MASK)
-#define IPTOS_LOWDELAY 0x10
-#define IPTOS_THROUGHPUT 0x08
-#define IPTOS_RELIABILITY 0x04
-#define IPTOS_MINCOST 0x02
-
-#define IPTOS_PREC_MASK 0xE0
-#define IPTOS_PREC(tos) ((tos)&IPTOS_PREC_MASK)
-#define IPTOS_PREC_NETCONTROL 0xe0
-#define IPTOS_PREC_INTERNETCONTROL 0xc0
-#define IPTOS_PREC_CRITIC_ECP 0xa0
-#define IPTOS_PREC_FLASHOVERRIDE 0x80
-#define IPTOS_PREC_FLASH 0x60
-#define IPTOS_PREC_IMMEDIATE 0x40
-#define IPTOS_PREC_PRIORITY 0x20
-#define IPTOS_PREC_ROUTINE 0x00
-
-
-/* IP options */
-#define IPOPT_COPY 0x80
-#define IPOPT_CLASS_MASK 0x60
-#define IPOPT_NUMBER_MASK 0x1f
-
-#define IPOPT_COPIED(o) ((o)&IPOPT_COPY)
-#define IPOPT_CLASS(o) ((o)&IPOPT_CLASS_MASK)
-#define IPOPT_NUMBER(o) ((o)&IPOPT_NUMBER_MASK)
-
-#define IPOPT_CONTROL 0x00
-#define IPOPT_RESERVED1 0x20
-#define IPOPT_MEASUREMENT 0x40
-#define IPOPT_RESERVED2 0x60
-
-#define IPOPT_END (0 | IPOPT_CONTROL)
-#define IPOPT_NOOP (1 | IPOPT_CONTROL)
-#define IPOPT_SEC (2 | IPOPT_CONTROL | IPOPT_COPY)
-#define IPOPT_LSRR (3 | IPOPT_CONTROL | IPOPT_COPY)
-#define IPOPT_TIMESTAMP (4 | IPOPT_MEASUREMENT)
-#define IPOPT_RR (7 | IPOPT_CONTROL)
-#define IPOPT_SID (8 | IPOPT_CONTROL | IPOPT_COPY)
-#define IPOPT_SSRR (9 | IPOPT_CONTROL | IPOPT_COPY)
-#define IPOPT_RA (20 | IPOPT_CONTROL | IPOPT_COPY)
-
-#define IPVERSION 4
-#define MAXTTL 255
-#define IPDEFTTL 64
-#define IPOPT_OPTVAL 0
-#define IPOPT_OLEN 1
-#define IPOPT_OFFSET 2
-#define IPOPT_MINOFF 4
-#define MAX_IPOPTLEN 40
-#define IPOPT_NOP IPOPT_NOOP
-#define IPOPT_EOL IPOPT_END
-#define IPOPT_TS IPOPT_TIMESTAMP
-
-#define IPOPT_TS_TSONLY 0 /* timestamps only */
-#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */
-#define IPOPT_TS_PRESPEC 3 /* specified modules only */
-
-struct ip_options {
- __u32 faddr; /* Saved first hop address */
- unsigned char optlen;
- unsigned char srr;
- unsigned char rr;
- unsigned char ts;
- unsigned char is_setbyuser:1, /* Set by setsockopt? */
- is_data:1, /* Options in __data, rather than skb*/
- is_strictroute:1,/* Strict source route */
- srr_is_hit:1, /* Packet destn addr was ours */
- is_changed:1, /* IP checksum more not valid */
- rr_needaddr:1, /* Need to record addr of out dev*/
- ts_needtime:1, /* Need to record timestamp */
- ts_needaddr:1; /* Need to record addr of out dev */
- unsigned char router_alert;
- unsigned char __pad1;
- unsigned char __pad2;
- unsigned char __data[0];
-};
-
-#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)
-
-struct iphdr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 ihl:4,
- version:4;
-#elif defined(__BIG_ENDIAN_BITFIELD)
- __u8 version:4,
- ihl:4;
-#endif
- __u8 tos;
- __u16 tot_len;
- __u16 id;
- __u16 frag_off;
- __u8 ttl;
- __u8 protocol;
- __u16 check;
- __u32 saddr;
- __u32 daddr;
- /*The options start here. */
-};
-
-#endif /* _LINUX_IP_H */
diff --git a/drivers/staging/rtl8188eu/include/nic_spec.h b/drivers/staging/rtl8188eu/include/nic_spec.h
deleted file mode 100644
index d42244788ca9..000000000000
--- a/drivers/staging/rtl8188eu/include/nic_spec.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-
-
-#ifndef __NIC_SPEC_H__
-#define __NIC_SPEC_H__
-
-#define RTL8711_MCTRL_ (0x20000)
-#define RTL8711_UART_ (0x30000)
-#define RTL8711_TIMER_ (0x40000)
-#define RTL8711_FINT_ (0x50000)
-#define RTL8711_HINT_ (0x50000)
-#define RTL8711_GPIO_ (0x60000)
-#define RTL8711_WLANCTRL_ (0x200000)
-#define RTL8711_WLANFF_ (0xe00000)
-#define RTL8711_HCICTRL_ (0x600000)
-#define RTL8711_SYSCFG_ (0x620000)
-#define RTL8711_SYSCTRL_ (0x620000)
-#define RTL8711_MCCTRL_ (0x020000)
-
-
-#include <rtl8711_regdef.h>
-
-#include <rtl8711_bitdef.h>
-
-
-#endif /* __RTL8711_SPEC_H__ */
diff --git a/drivers/staging/rtl8188eu/include/odm_interface.h b/drivers/staging/rtl8188eu/include/odm_interface.h
index a50eae3ec68e..548a309db199 100644
--- a/drivers/staging/rtl8188eu/include/odm_interface.h
+++ b/drivers/staging/rtl8188eu/include/odm_interface.h
@@ -77,32 +77,9 @@ typedef void (*RT_WORKITEM_CALL_BACK)(void *pContext);
/* =========== EXtern Function Prototype */
-u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
-
-u16 ODM_Read2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
-
-u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
-
-void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data);
-
-void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data);
-
-void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data);
-
/* Memory Relative Function. */
-void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length);
-void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length);
-
-s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2,
- u32 length);
/* ODM Timer relative API. */
-void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer,
- u32 msDelay);
-
-void ODM_InitializeTimer(struct odm_dm_struct *pDM_Odm,
- struct timer_list *pTimer, void *CallBackFunc,
- void *pContext, const char *szID);
void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer);
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 7956f0cdb96b..5889f58ed7d3 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -62,20 +62,11 @@ struct __queue {
spinlock_t lock;
};
-static inline struct list_head *get_next(struct list_head *list)
-{
- return list->next;
-}
-
static inline struct list_head *get_list_head(struct __queue *queue)
{
return &(queue->queue);
}
-
-#define LIST_CONTAINOR(ptr, type, member) \
- ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member)))
-
static inline int _enter_critical_mutex(struct mutex *pmutex,
unsigned long *pirqL)
{
@@ -85,13 +76,6 @@ static inline int _enter_critical_mutex(struct mutex *pmutex,
return ret;
}
-
-static inline void _exit_critical_mutex(struct mutex *pmutex,
- unsigned long *pirqL)
-{
- mutex_unlock(pmutex);
-}
-
static inline void rtw_list_delete(struct list_head *plist)
{
list_del_init(plist);
@@ -122,17 +106,6 @@ static inline void _cancel_timer(struct timer_list *ptimer, u8 *bcancelled)
#define RTW_DECLARE_TIMER_HDL(name) \
void RTW_TIMER_HDL_NAME(name)(RTW_TIMER_HDL_ARGS)
-static inline void _init_workitem(struct work_struct *pwork, void *pfunc,
- void *cntx)
-{
- INIT_WORK(pwork, pfunc);
-}
-
-static inline void _set_workitem(struct work_struct *pwork)
-{
- schedule_work(pwork);
-}
-
static inline void _cancel_workitem_sync(struct work_struct *pwork)
{
cancel_work_sync(pwork);
@@ -230,15 +203,9 @@ extern unsigned char WPA_TKIP_CIPHER[4];
extern unsigned char RSN_TKIP_CIPHER[4];
#define rtw_update_mem_stat(flag, sz) do {} while (0)
-u8 *_rtw_vmalloc(u32 sz);
-u8 *_rtw_zvmalloc(u32 sz);
-void _rtw_vmfree(u8 *pbuf, u32 sz);
u8 *_rtw_zmalloc(u32 sz);
u8 *_rtw_malloc(u32 sz);
void _rtw_mfree(u8 *pbuf, u32 sz);
-#define rtw_vmalloc(sz) _rtw_vmalloc((sz))
-#define rtw_zvmalloc(sz) _rtw_zvmalloc((sz))
-#define rtw_vmfree(pbuf, sz) _rtw_vmfree((pbuf), (sz))
#define rtw_malloc(sz) _rtw_malloc((sz))
#define rtw_zmalloc(sz) _rtw_zmalloc((sz))
#define rtw_mfree(pbuf, sz) _rtw_mfree((pbuf), (sz))
@@ -247,7 +214,6 @@ void *rtw_malloc2d(int h, int w, int size);
void rtw_mfree2d(void *pbuf, int h, int w, int size);
void _rtw_memcpy(void *dec, void *sour, u32 sz);
-int _rtw_memcmp(void *dst, void *src, u32 sz);
void _rtw_memset(void *pbuf, int c, u32 sz);
void _rtw_init_listhead(struct list_head *list);
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index 691238078075..d76cc2db5028 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -28,18 +28,20 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
void _rtw_free_recv_priv(struct recv_priv *precvpriv);
-s32 rtw_recv_entry(union recv_frame *precv_frame);
-int rtw_recv_indicatepkt(struct adapter *adapter, union recv_frame *recv_frame);
+s32 rtw_recv_entry(struct recv_frame *precv_frame);
+int rtw_recv_indicatepkt(struct adapter *adapter,
+ struct recv_frame *recv_frame);
void rtw_recv_returnpacket(struct net_device *cnxt, struct sk_buff *retpkt);
-void rtw_hostapd_mlme_rx(struct adapter *padapter, union recv_frame *recv_fr);
+void rtw_hostapd_mlme_rx(struct adapter *padapter, struct recv_frame *recv_fr);
void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
void rtw_free_recv_priv(struct recv_priv *precvpriv);
int rtw_os_recv_resource_init(struct recv_priv *recvpr, struct adapter *adapt);
-int rtw_os_recv_resource_alloc(struct adapter *adapt, union recv_frame *recvfr);
+int rtw_os_recv_resource_alloc(struct adapter *adapt,
+ struct recv_frame *recvfr);
void rtw_os_recv_resource_free(struct recv_priv *precvpriv);
int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 161f1e5af9e6..75e41c4aeb27 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -76,17 +76,6 @@
(le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x2300 || \
(le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x88E0)
-enum firmware_source {
- FW_SOURCE_IMG_FILE = 0,
- FW_SOURCE_HEADER_FILE = 1, /* from header file */
-};
-
-struct rt_firmware {
- enum firmware_source eFWSource;
- u8 *szFwBuffer;
- u32 ulFwLength;
-};
-
/* This structure must be careful with byte-ordering */
struct rt_firmware_hdr {
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index a8facf00eac0..07e5f5227336 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -61,9 +61,10 @@ s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
void rtl8188eu_free_recv_priv(struct adapter *padapter);
void rtl8188eu_recv_hdl(struct adapter *padapter, struct recv_buf *precvbuf);
void rtl8188eu_recv_tasklet(void *priv);
-void rtl8188e_query_rx_phy_status(union recv_frame *fr, struct phy_stat *phy);
+void rtl8188e_query_rx_phy_status(struct recv_frame *fr, struct phy_stat *phy);
void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe);
-void update_recvframe_phyinfo_88e(union recv_frame *fra, struct phy_stat *phy);
-void update_recvframe_attrib_88e(union recv_frame *fra, struct recv_stat *stat);
+void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
+void update_recvframe_attrib_88e(struct recv_frame *fra,
+ struct recv_stat *stat);
#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index c6b193a2e795..ae05141f5ddf 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -99,20 +99,6 @@ extern u32 GlobalDebugLevel;
} \
} while (0)
-#define _func_enter_ \
- do { \
- if (GlobalDebugLevel >= _drv_debug_) \
- pr_info("%s : %s enters at %d\n", \
- DRIVER_PREFIX, __func__, __LINE__); \
- } while (0)
-
-#define _func_exit_ \
- do { \
- if (GlobalDebugLevel >= _drv_debug_) \
- pr_info("%s : %s exits at %d\n", \
- DRIVER_PREFIX, __func__, __LINE__); \
- } while (0)
-
#define RT_PRINT_DATA(_comp, _level, _titlestring, _hexdata, _hexdatalen)\
do { \
if (_level <= GlobalDebugLevel) { \
@@ -277,14 +263,4 @@ int proc_get_rssi_disp(char *page, char **start,
int proc_set_rssi_disp(struct file *file, const char __user *buffer,
unsigned long count, void *data);
-#ifdef CONFIG_BT_COEXIST
-int proc_get_btcoex_dbg(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_btcoex_dbg(struct file *file, const char *buffer,
- signed long count, void *data);
-
-#endif /* CONFIG_BT_COEXIST */
-
#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
index 3d1dfcc1b603..e8790f8f913e 100644
--- a/drivers/staging/rtl8188eu/include/rtw_io.h
+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
@@ -99,7 +99,6 @@
struct intf_priv;
struct intf_hdl;
-struct io_queue;
struct _io_ops {
u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
@@ -117,7 +116,6 @@ struct _io_ops {
u8 *pmem);
void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
u8 *pmem);
- void (*_sync_irp_protocol_rw)(struct io_queue *pio_q);
u32 (*_read_interrupt)(struct intf_hdl *pintfhdl, u32 addr);
u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
u8 *pmem);
@@ -237,34 +235,11 @@ struct reg_protocol_wt {
Below is the data structure used by _io_handler
*/
-struct io_queue {
- spinlock_t lock;
- struct list_head free_ioreqs;
- struct list_head pending; /* The io_req list that will be served
- * in the single protocol read/write.*/
- struct list_head processing;
- u8 *free_ioreqs_buf; /* 4-byte aligned */
- u8 *pallocated_free_ioreqs_buf;
- struct intf_hdl intf;
-};
-
struct io_priv {
struct adapter *padapter;
struct intf_hdl intf;
};
-uint ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
-void sync_ioreq_enqueue(struct io_req *preq, struct io_queue *ioqueue);
-uint sync_ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
-uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue);
-struct io_req *alloc_ioreq(struct io_queue *pio_q);
-
-uint register_intf_hdl(u8 *dev, struct intf_hdl *pintfhdl);
-void unregister_intf_hdl(struct intf_hdl *pintfhdl);
-
-void _rtw_attrib_read(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-void _rtw_attrib_write(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-
u8 _rtw_read8(struct adapter *adapter, u32 addr);
u16 _rtw_read16(struct adapter *adapter, u32 addr);
u32 _rtw_read32(struct adapter *adapter, u32 addr);
@@ -363,25 +338,6 @@ void async_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
int rtw_init_io_priv(struct adapter *padapter,
void (*set_intf_ops)(struct _io_ops *pops));
-uint alloc_io_queue(struct adapter *adapter);
-void free_io_queue(struct adapter *adapter);
-void async_bus_io(struct io_queue *pio_q);
-void bus_sync_io(struct io_queue *pio_q);
-u32 _ioreq2rwmem(struct io_queue *pio_q);
void dev_power_down(struct adapter *Adapter, u8 bpwrup);
-#define PlatformEFIOWrite1Byte(_a, _b, _c) \
- rtw_write8(_a, _b, _c)
-#define PlatformEFIOWrite2Byte(_a, _b, _c) \
- rtw_write16(_a, _b, _c)
-#define PlatformEFIOWrite4Byte(_a, _b, _c) \
- rtw_write32(_a, _b, _c)
-
-#define PlatformEFIORead1Byte(_a, _b) \
- rtw_read8(_a, _b)
-#define PlatformEFIORead2Byte(_a, _b) \
- rtw_read16(_a, _b)
-#define PlatformEFIORead4Byte(_a, _b) \
- rtw_read32(_a, _b)
-
#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
index 8772d1d178c6..f3aa924f2029 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
@@ -102,8 +102,6 @@ struct oid_obj_priv {
#if defined(_RTW_MP_IOCTL_C_)
static int oid_null_function(struct oid_par_priv *poid_par_priv) {
- _func_enter_;
- _func_exit_;
return NDIS_STATUS_SUCCESS;
}
#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 6cd988f867da..45c22efe93fe 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -106,13 +106,6 @@ SHALL not lock up more than one lock at a time!
#define traffic_threshold 10
#define traffic_scan_period 500
-struct sitesurvey_ctrl {
- u64 last_tx_pkts;
- uint last_rx_pkts;
- int traffic_busy;
- struct timer_list sitesurvey_ctrl_timer;
-};
-
struct rt_link_detect {
u32 NumTxOkInPeriod;
u32 NumRxOkInPeriod;
@@ -304,31 +297,6 @@ struct wifidirect_info {
u32 noa_start_time[P2P_MAX_NOA_NUM];
};
-struct tdls_ss_record { /* signal strength record */
- u8 macaddr[ETH_ALEN];
- u8 RxPWDBAll;
- u8 is_tdls_sta; /* true: direct link sta, false: else */
-};
-
-struct tdls_info {
- u8 ap_prohibited;
- uint setup_state;
- u8 sta_cnt;
- u8 sta_maximum; /* 1:tdls sta is equal (NUM_STA-1), reach max direct link number; 0: else; */
- struct tdls_ss_record ss_record;
- u8 macid_index; /* macid entry that is ready to write */
- u8 clear_cam; /* cam entry that is trying to clear, using it in direct link teardown */
- u8 ch_sensing;
- u8 cur_channel;
- u8 candidate_ch;
- u8 collect_pkt_num[MAX_CHANNEL_NUM];
- spinlock_t cmd_lock;
- spinlock_t hdl_lock;
- u8 watchdog_count;
- u8 dev_discovered; /* WFD_TDLS: for sigma test */
- u8 enable;
-};
-
struct mlme_priv {
spinlock_t lock;
int fw_state; /* shall we protect this variable? maybe not necessarily... */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index f0c982d6d5f2..09e2a3980ea7 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -236,13 +236,13 @@ enum SCAN_STATE {
struct mlme_handler {
unsigned int num;
char *str;
- unsigned int (*func)(struct adapter *adapt, union recv_frame *frame);
+ unsigned int (*func)(struct adapter *adapt, struct recv_frame *frame);
};
struct action_handler {
unsigned int num;
char *str;
- unsigned int (*func)(struct adapter *adapt, union recv_frame *frame);
+ unsigned int (*func)(struct adapter *adapt, struct recv_frame *frame);
};
struct ss_res {
@@ -490,7 +490,7 @@ int allocate_fw_sta_entry(struct adapter *padapter);
void flush_all_cam_entry(struct adapter *padapter);
void site_survey(struct adapter *padapter);
-u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame,
+u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame,
struct wlan_bssid_ex *bssid);
void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
struct adapter *adapter, bool update_ie);
@@ -544,7 +544,8 @@ unsigned int is_ap_in_wep(struct adapter *padapter);
unsigned int should_forbid_n_rate(struct adapter *padapter);
void report_join_res(struct adapter *padapter, int res);
-void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame);
+void report_survey_event(struct adapter *padapter,
+ struct recv_frame *precv_frame);
void report_surveydone_event(struct adapter *padapter);
void report_del_sta_event(struct adapter *padapter,
unsigned char *addr, unsigned short reason);
@@ -609,46 +610,46 @@ void start_clnt_join(struct adapter *padapter);
void start_create_ibss(struct adapter *padapter);
unsigned int OnAssocReq(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAssocRsp(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnProbeReq(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnProbeRsp(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int DoReserved(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnBeacon(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAtim(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnDisassoc(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAuth(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAuthClient(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnDeAuth(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int on_action_spct(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction_qos(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction_dls(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction_back(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int on_action_public(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction_ht(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction_wmm(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction_p2p(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res);
void mlmeext_sta_del_event_callback(struct adapter *padapter);
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index 4a0e9ff3d479..9a42859d612b 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -206,10 +206,6 @@ struct pwrctrl_priv {
u8 bInternalAutoSuspend;
u8 bInSuspend;
-#ifdef CONFIG_BT_COEXIST
- u8 bAutoResume;
- u8 autopm_cnt;
-#endif
u8 bSupportRemoteWakeup;
struct timer_list pwr_state_check_timer;
int pwr_state_check_interval;
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index be9c30c57419..bcbce46cec85 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -270,7 +270,7 @@ struct recv_buf {
len = (unsigned int )(tail - data);
*/
-struct recv_frame_hdr {
+struct recv_frame {
struct list_head list;
struct sk_buff *pkt;
struct sk_buff *pkt_newalloc;
@@ -289,23 +289,16 @@ struct recv_frame_hdr {
struct recv_reorder_ctrl *preorder_ctrl;
};
-union recv_frame {
- union {
- struct list_head list;
- struct recv_frame_hdr hdr;
- uint mem[RECVFRAME_HDR_ALIGN>>2];
- } u;
-};
-
-union recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
-union recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
-void rtw_init_recvframe(union recv_frame *precvframe,
+struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
+struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
+void rtw_init_recvframe(struct recv_frame *precvframe,
struct recv_priv *precvpriv);
-int rtw_free_recvframe(union recv_frame *precvframe,
+int rtw_free_recvframe(struct recv_frame *precvframe,
struct __queue *pfree_recv_queue);
#define rtw_dequeue_recvframe(queue) rtw_alloc_recvframe(queue)
-int _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue);
-int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue);
+int _rtw_enqueue_recvframe(struct recv_frame *precvframe,
+ struct __queue *queue);
+int rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue);
void rtw_free_recvframe_queue(struct __queue *pframequeue,
struct __queue *pfree_recv_queue);
u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter);
@@ -315,29 +308,20 @@ struct recv_buf *rtw_dequeue_recvbuf(struct __queue *queue);
void rtw_reordering_ctrl_timeout_handler(void *pcontext);
-static inline u8 *get_rxmem(union recv_frame *precvframe)
+static inline u8 *get_rxmem(struct recv_frame *precvframe)
{
/* always return rx_head... */
if (precvframe == NULL)
return NULL;
- return precvframe->u.hdr.rx_head;
+ return precvframe->rx_head;
}
-static inline u8 *get_rx_status(union recv_frame *precvframe)
+static inline u8 *get_rx_status(struct recv_frame *precvframe)
{
return get_rxmem(precvframe);
}
-static inline u8 *get_recvframe_data(union recv_frame *precvframe)
-{
- /* always return rx_data */
- if (precvframe == NULL)
- return NULL;
-
- return precvframe->u.hdr.rx_data;
-}
-
-static inline u8 *recvframe_push(union recv_frame *precvframe, int sz)
+static inline u8 *recvframe_push(struct recv_frame *precvframe, int sz)
{
/* append data before rx_data */
@@ -348,16 +332,16 @@ static inline u8 *recvframe_push(union recv_frame *precvframe, int sz)
*/
if (precvframe == NULL)
return NULL;
- precvframe->u.hdr.rx_data -= sz ;
- if (precvframe->u.hdr.rx_data < precvframe->u.hdr.rx_head) {
- precvframe->u.hdr.rx_data += sz;
+ precvframe->rx_data -= sz;
+ if (precvframe->rx_data < precvframe->rx_head) {
+ precvframe->rx_data += sz;
return NULL;
}
- precvframe->u.hdr.len += sz;
- return precvframe->u.hdr.rx_data;
+ precvframe->len += sz;
+ return precvframe->rx_data;
}
-static inline u8 *recvframe_pull(union recv_frame *precvframe, int sz)
+static inline u8 *recvframe_pull(struct recv_frame *precvframe, int sz)
{
/* rx_data += sz; move rx_data sz bytes hereafter */
@@ -366,16 +350,16 @@ static inline u8 *recvframe_pull(union recv_frame *precvframe, int sz)
if (precvframe == NULL)
return NULL;
- precvframe->u.hdr.rx_data += sz;
- if (precvframe->u.hdr.rx_data > precvframe->u.hdr.rx_tail) {
- precvframe->u.hdr.rx_data -= sz;
+ precvframe->rx_data += sz;
+ if (precvframe->rx_data > precvframe->rx_tail) {
+ precvframe->rx_data -= sz;
return NULL;
}
- precvframe->u.hdr.len -= sz;
- return precvframe->u.hdr.rx_data;
+ precvframe->len -= sz;
+ return precvframe->rx_data;
}
-static inline u8 *recvframe_put(union recv_frame *precvframe, int sz)
+static inline u8 *recvframe_put(struct recv_frame *precvframe, int sz)
{
/* used for append sz bytes from ptr to rx_tail, update rx_tail
* and return the updated rx_tail to the caller */
@@ -384,17 +368,17 @@ static inline u8 *recvframe_put(union recv_frame *precvframe, int sz)
if (precvframe == NULL)
return NULL;
- precvframe->u.hdr.rx_tail += sz;
+ precvframe->rx_tail += sz;
- if (precvframe->u.hdr.rx_tail > precvframe->u.hdr.rx_end) {
- precvframe->u.hdr.rx_tail -= sz;
+ if (precvframe->rx_tail > precvframe->rx_end) {
+ precvframe->rx_tail -= sz;
return NULL;
}
- precvframe->u.hdr.len += sz;
- return precvframe->u.hdr.rx_tail;
+ precvframe->len += sz;
+ return precvframe->rx_tail;
}
-static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, int sz)
+static inline u8 *recvframe_pull_tail(struct recv_frame *precvframe, int sz)
{
/* rmv data from rx_tail (by yitsen) */
@@ -404,64 +388,13 @@ static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, int sz)
if (precvframe == NULL)
return NULL;
- precvframe->u.hdr.rx_tail -= sz;
- if (precvframe->u.hdr.rx_tail < precvframe->u.hdr.rx_data) {
- precvframe->u.hdr.rx_tail += sz;
+ precvframe->rx_tail -= sz;
+ if (precvframe->rx_tail < precvframe->rx_data) {
+ precvframe->rx_tail += sz;
return NULL;
}
- precvframe->u.hdr.len -= sz;
- return precvframe->u.hdr.rx_tail;
-}
-
-static inline unsigned char *get_rxbuf_desc(union recv_frame *precvframe)
-{
- unsigned char *buf_desc;
-
- if (precvframe == NULL)
- return NULL;
- return buf_desc;
-}
-
-static inline union recv_frame *rxmem_to_recvframe(u8 *rxmem)
-{
- /* due to the design of 2048 bytes alignment of recv_frame,
- * we can reference the union recv_frame */
- /* from any given member of recv_frame. */
- /* rxmem indicates the any member/address in recv_frame */
-
- return (union recv_frame *)(((size_t)rxmem >> RXFRAME_ALIGN) << RXFRAME_ALIGN);
-}
-
-static inline union recv_frame *pkt_to_recvframe(struct sk_buff *pkt)
-{
- u8 *buf_star;
- union recv_frame *precv_frame;
- precv_frame = rxmem_to_recvframe((unsigned char *)buf_star);
-
- return precv_frame;
-}
-
-static inline u8 *pkt_to_recvmem(struct sk_buff *pkt)
-{
- /* return the rx_head */
-
- union recv_frame *precv_frame = pkt_to_recvframe(pkt);
-
- return precv_frame->u.hdr.rx_head;
-}
-
-static inline u8 *pkt_to_recvdata(struct sk_buff *pkt)
-{
- /* return the rx_data */
-
- union recv_frame *precv_frame = pkt_to_recvframe(pkt);
-
- return precv_frame->u.hdr.rx_data;
-}
-
-static inline int get_recvframe_len(union recv_frame *precvframe)
-{
- return precvframe->u.hdr.len;
+ precvframe->len -= sz;
+ return precvframe->rx_tail;
}
static inline s32 translate_percentage_to_dbm(u32 sig_stren_index)
@@ -480,6 +413,6 @@ struct sta_info;
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv);
-void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame);
+void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame);
#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index 1ac1dd31db68..62f5db169523 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -105,11 +105,6 @@ struct tx_desc {
__le32 txdw7;
};
-union txdesc {
- struct tx_desc txdesc;
- unsigned int value[TXDESC_SIZE>>2];
-};
-
struct hw_xmit {
struct __queue *sta_queue;
int accnt;
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 2e7307f259b6..a88ebf41bba1 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -694,7 +694,7 @@ struct WMM_para_element {
struct ADDBA_request {
unsigned char dialog_token;
- unsigned short BA_para_set;
+ __le16 BA_para_set;
unsigned short BA_timeout_value;
unsigned short BA_starting_seqctrl;
} __packed;
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 4ad80ae1067f..2636e7f3dbb8 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -35,6 +35,7 @@
#include <rtw_mp.h>
#include <rtw_iol.h>
+#include <linux/vmalloc.h>
#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
@@ -472,8 +473,6 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
#endif /* CONFIG_88EU_P2P */
-_func_enter_;
-
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
@@ -614,9 +613,6 @@ _func_enter_;
exit:
kfree(pwep);
-
-_func_exit_;
-
return ret;
}
@@ -770,8 +766,6 @@ static int rtw_wx_get_name(struct net_device *dev,
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("cmd_code =%x\n", info->cmd));
- _func_enter_;
-
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) {
/* parsing HT_CAP_IE */
p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12);
@@ -806,9 +800,6 @@ static int rtw_wx_get_name(struct net_device *dev,
} else {
snprintf(wrqu->name, IFNAMSIZ, "unassociated");
}
-
- _func_exit_;
-
return 0;
}
@@ -816,12 +807,7 @@ static int rtw_wx_set_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- _func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_wx_set_freq\n"));
-
- _func_exit_;
-
return 0;
}
@@ -854,8 +840,6 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
enum ndis_802_11_network_infra networkType;
int ret = 0;
- _func_enter_;
-
if (_FAIL == rtw_pwr_wakeup(padapter)) {
ret = -EPERM;
goto exit;
@@ -894,7 +878,6 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
}
rtw_setopmode_cmd(padapter, networkType);
exit:
- _func_exit_;
return ret;
}
@@ -906,8 +889,6 @@ static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_get_mode\n"));
- _func_enter_;
-
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
wrqu->mode = IW_MODE_INFRA;
else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
@@ -918,8 +899,6 @@ static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
else
wrqu->mode = IW_MODE_AUTO;
- _func_exit_;
-
return 0;
}
@@ -1011,8 +990,6 @@ static int rtw_wx_get_range(struct net_device *dev,
u16 val;
int i;
- _func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_range. cmd_code =%x\n", info->cmd));
wrqu->data.length = sizeof(*range);
@@ -1093,8 +1070,6 @@ static int rtw_wx_get_range(struct net_device *dev,
range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE |
IW_SCAN_CAPA_BSSID | IW_SCAN_CAPA_CHANNEL |
IW_SCAN_CAPA_MODE | IW_SCAN_CAPA_RATE;
- _func_exit_;
-
return 0;
}
@@ -1118,8 +1093,6 @@ static int rtw_wx_set_wap(struct net_device *dev,
struct wlan_network *pnetwork = NULL;
enum ndis_802_11_auth_mode authmode;
- _func_enter_;
-
if (_FAIL == rtw_pwr_wakeup(padapter)) {
ret = -1;
goto exit;
@@ -1138,15 +1111,15 @@ static int rtw_wx_set_wap(struct net_device *dev,
authmode = padapter->securitypriv.ndisauthtype;
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
- pmlmepriv->pscanned = get_next(phead);
+ pmlmepriv->pscanned = phead->next;
while (1) {
if ((rtw_end_of_queue_search(phead, pmlmepriv->pscanned)) == true)
break;
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
- pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+ pmlmepriv->pscanned = pmlmepriv->pscanned->next;
dst_bssid = pnetwork->network.MacAddress;
@@ -1173,8 +1146,6 @@ static int rtw_wx_set_wap(struct net_device *dev,
exit:
- _func_exit_;
-
return ret;
}
@@ -1192,17 +1163,12 @@ static int rtw_wx_get_wap(struct net_device *dev,
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_wap\n"));
- _func_enter_;
-
if (((check_fwstate(pmlmepriv, _FW_LINKED)) == true) ||
((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) ||
((check_fwstate(pmlmepriv, WIFI_AP_STATE)) == true))
memcpy(wrqu->ap_addr.sa_data, pcur_bss->MacAddress, ETH_ALEN);
else
_rtw_memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
-
- _func_exit_;
-
return 0;
}
@@ -1252,7 +1218,6 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
#endif /* CONFIG_88EU_P2P */
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_set_scan\n"));
-_func_enter_;
if (padapter->registrypriv.mp_mode == 1) {
if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
ret = -1;
@@ -1383,7 +1348,6 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
@@ -1407,8 +1371,6 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan\n"));
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, (" Start of Query SIOCGIWSCAN .\n"));
- _func_enter_;
-
if (padapter->pwrctrlpriv.brfoffbyhw && padapter->bDriverStopped) {
ret = -EINVAL;
goto exit;
@@ -1440,7 +1402,7 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist))
@@ -1451,13 +1413,13 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
break;
}
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
/* report network only if the current channel set contains the channel to which this network belongs */
if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.Configuration.DSConfig) >= 0)
ev = translate_scan(padapter, a, pnetwork, ev, stop);
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -1466,7 +1428,6 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
wrqu->data.flags = 0;
exit:
- _func_exit_;
return ret;
}
@@ -1490,7 +1451,6 @@ static int rtw_wx_set_essid(struct net_device *dev,
uint ret = 0, len;
- _func_enter_;
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
("+rtw_wx_set_essid: fw_state = 0x%08x\n", get_fwstate(pmlmepriv)));
@@ -1530,7 +1490,7 @@ static int rtw_wx_set_essid(struct net_device *dev,
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: ssid =[%s]\n", src_ssid));
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
- pmlmepriv->pscanned = get_next(phead);
+ pmlmepriv->pscanned = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, pmlmepriv->pscanned) == true) {
@@ -1540,9 +1500,9 @@ static int rtw_wx_set_essid(struct net_device *dev,
break;
}
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
- pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+ pmlmepriv->pscanned = pmlmepriv->pscanned->next;
dst_ssid = pnetwork->network.Ssid.Ssid;
@@ -1583,7 +1543,6 @@ exit:
DBG_88E("<=%s, ret %d\n", __func__, ret);
- _func_exit_;
return ret;
}
@@ -1599,7 +1558,6 @@ static int rtw_wx_get_essid(struct net_device *dev,
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_essid\n"));
- _func_enter_;
if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
@@ -1617,7 +1575,6 @@ static int rtw_wx_get_essid(struct net_device *dev,
exit:
- _func_exit_;
return ret;
}
@@ -1634,7 +1591,6 @@ static int rtw_wx_set_rate(struct net_device *dev,
u32 ratevalue = 0;
u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
-_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_set_rate\n"));
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("target_rate = %d, fixed = %d\n", target_rate, fixed));
@@ -1706,7 +1662,6 @@ set_rate:
ret = -1;
}
-_func_exit_;
return ret;
}
@@ -1734,7 +1689,6 @@ static int rtw_wx_set_rts(struct net_device *dev,
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- _func_enter_;
if (wrqu->rts.disabled) {
padapter->registrypriv.rts_thresh = 2347;
@@ -1748,7 +1702,6 @@ static int rtw_wx_set_rts(struct net_device *dev,
DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
- _func_exit_;
return 0;
}
@@ -1759,7 +1712,6 @@ static int rtw_wx_get_rts(struct net_device *dev,
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- _func_enter_;
DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
@@ -1767,7 +1719,6 @@ static int rtw_wx_get_rts(struct net_device *dev,
wrqu->rts.fixed = 0; /* no auto select */
/* wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); */
- _func_exit_;
return 0;
}
@@ -1778,7 +1729,6 @@ static int rtw_wx_set_frag(struct net_device *dev,
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- _func_enter_;
if (wrqu->frag.disabled) {
padapter->xmitpriv.frag_len = MAX_FRAG_THRESHOLD;
@@ -1792,7 +1742,6 @@ static int rtw_wx_set_frag(struct net_device *dev,
DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
- _func_exit_;
return 0;
}
@@ -1803,14 +1752,12 @@ static int rtw_wx_get_frag(struct net_device *dev,
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- _func_enter_;
DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
wrqu->frag.value = padapter->xmitpriv.frag_len;
wrqu->frag.fixed = 0; /* no auto select */
- _func_exit_;
return 0;
}
@@ -1844,7 +1791,6 @@ static int rtw_wx_set_enc(struct net_device *dev,
key = erq->flags & IW_ENCODE_INDEX;
- _func_enter_;
if (erq->flags & IW_ENCODE_DISABLED) {
DBG_88E("EncryptionDisabled\n");
@@ -1939,7 +1885,6 @@ static int rtw_wx_set_enc(struct net_device *dev,
exit:
- _func_exit_;
return ret;
}
@@ -1953,7 +1898,6 @@ static int rtw_wx_get_enc(struct net_device *dev,
struct iw_point *erq = &(wrqu->encoding);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- _func_enter_;
if (check_fwstate(pmlmepriv, _FW_LINKED) != true) {
if (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
@@ -2007,7 +1951,6 @@ static int rtw_wx_get_enc(struct net_device *dev,
erq->flags |= IW_ENCODE_DISABLED;
break;
}
- _func_exit_;
return ret;
}
@@ -2210,6 +2153,7 @@ static int rtw_wx_read32(struct net_device *dev,
u32 data32;
u32 bytes;
u8 *ptmp;
+ int rv;
padapter = (struct adapter *)rtw_netdev_priv(dev);
p = &wrqu->data;
@@ -2225,7 +2169,11 @@ static int rtw_wx_read32(struct net_device *dev,
bytes = 0;
addr = 0;
- sscanf(ptmp, "%d,%x", &bytes, &addr);
+ rv = sscanf(ptmp, "%d,%x", &bytes, &addr);
+ if (rv != 2) {
+ kfree(ptmp);
+ return -EINVAL;
+ }
switch (bytes) {
case 1:
@@ -2255,6 +2203,7 @@ static int rtw_wx_write32(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int rv;
u32 addr;
u32 data32;
@@ -2263,7 +2212,9 @@ static int rtw_wx_write32(struct net_device *dev,
bytes = 0;
addr = 0;
data32 = 0;
- sscanf(extra, "%d,%x,%x", &bytes, &addr, &data32);
+ rv = sscanf(extra, "%d,%x,%x", &bytes, &addr, &data32);
+ if (rv != 3)
+ return -EINVAL;
switch (bytes) {
case 1:
@@ -2607,13 +2558,13 @@ static int rtw_get_ap_info(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (hwaddr_aton_i(data, bssid)) {
DBG_88E("Invalid BSSID '%s'.\n", (u8 *)data);
@@ -2638,7 +2589,7 @@ static int rtw_get_ap_info(struct net_device *dev,
}
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -2690,13 +2641,13 @@ static int rtw_wps_start(struct net_device *dev,
struct iw_point *pdata = &wrqu->data;
u32 u32wps_start = 0;
- ret = copy_from_user((void *)&u32wps_start, pdata->pointer, 4);
- if (ret) {
+ if ((padapter->bDriverStopped) || (pdata == NULL)) {
ret = -EINVAL;
goto exit;
}
- if ((padapter->bDriverStopped) || (pdata == NULL)) {
+ ret = copy_from_user((void *)&u32wps_start, pdata->pointer, 4);
+ if (ret) {
ret = -EINVAL;
goto exit;
}
@@ -3110,13 +3061,13 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
u8 *wpsie;
uint wpsie_len = 0;
@@ -3134,7 +3085,7 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
}
break;
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3180,13 +3131,13 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
/* Commented by Albert 2011/05/18 */
/* Match the device address located in the P2P IE */
@@ -3215,7 +3166,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
}
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3264,13 +3215,13 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
u8 *wpsie;
uint wpsie_len = 0;
@@ -3295,7 +3246,7 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
break;
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3343,13 +3294,13 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
u8 *wpsie;
uint wpsie_len = 0;
@@ -3366,7 +3317,7 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
break;
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3414,13 +3365,13 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
/* Commented by Albert 20121226 */
/* Match the device address located in the P2P IE */
@@ -3440,7 +3391,7 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
}
}
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3496,19 +3447,19 @@ static int rtw_p2p_connect(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
uintPeerChannel = pnetwork->network.Configuration.DSConfig;
break;
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3591,13 +3542,13 @@ static int rtw_p2p_invite_req(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
/* Commented by Albert 2011/05/18 */
/* Match the device address located in the P2P IE */
@@ -3622,7 +3573,7 @@ static int rtw_p2p_invite_req(struct net_device *dev,
}
}
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3741,7 +3692,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
@@ -3750,7 +3701,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
if (uintPeerChannel != 0)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
/* Commented by Albert 2011/05/18 */
/* Match the device address located in the P2P IE */
@@ -3781,7 +3732,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
}
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3914,24 +3865,33 @@ static int rtw_p2p_get(struct net_device *dev,
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
if (padapter->bShowGetP2PState)
- DBG_88E("[%s] extra = %s\n", __func__, (char *)wrqu->data.pointer);
- if (!memcmp(wrqu->data.pointer, "status", 6)) {
+ DBG_88E("[%s] extra = %s\n", __func__,
+ (char __user *)wrqu->data.pointer);
+ if (!memcmp((__force const char *)wrqu->data.pointer,
+ "status", 6)) {
rtw_p2p_get_status(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "role", 4)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "role", 4)) {
rtw_p2p_get_role(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "peer_ifa", 8)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "peer_ifa", 8)) {
rtw_p2p_get_peer_ifaddr(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "req_cm", 6)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "req_cm", 6)) {
rtw_p2p_get_req_cm(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "peer_deva", 9)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "peer_deva", 9)) {
/* Get the P2P device address when receiving the provision discovery request frame. */
rtw_p2p_get_peer_devaddr(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "group_id", 8)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "group_id", 8)) {
rtw_p2p_get_groupid(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "peer_deva_inv", 9)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "peer_deva_inv", 9)) {
/* Get the P2P device address when receiving the P2P Invitation request frame. */
rtw_p2p_get_peer_devaddr_by_invitation(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "op_ch", 5)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "op_ch", 5)) {
rtw_p2p_get_op_ch(dev, info, wrqu, extra);
}
#endif /* CONFIG_88EU_P2P */
@@ -3945,7 +3905,8 @@ static int rtw_p2p_get2(struct net_device *dev,
int ret = 0;
#ifdef CONFIG_88EU_P2P
- DBG_88E("[%s] extra = %s\n", __func__, (char *)wrqu->data.pointer);
+ DBG_88E("[%s] extra = %s\n", __func__,
+ (char __user *)wrqu->data.pointer);
if (!memcmp(extra, "wpsCM =", 6)) {
wrqu->data.length -= 6;
rtw_p2p_get_wps_configmethod(dev, info, wrqu, &extra[6]);
@@ -4436,12 +4397,12 @@ static int rtw_dbg_port(struct net_device *dev,
for (i = 0; i < NUM_STA; i++) {
phead = &(pstapriv->sta_hash[i]);
- plist = get_next(phead);
+ plist = phead->next;
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+ psta = container_of(plist, struct sta_info, hash_list);
- plist = get_next(plist);
+ plist = plist->next;
if (extra_arg == psta->aid) {
DBG_88E("sta's macaddr:%pM\n", (psta->hwaddr));
@@ -4507,11 +4468,9 @@ static int rtw_dbg_port(struct net_device *dev,
struct registry_priv *pregpriv = &padapter->registrypriv;
/* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, 0x3: enable both 2.4g and 5g */
/* default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
- if (pregpriv &&
- (extra_arg == 0 ||
- extra_arg == 1 ||
- extra_arg == 2 ||
- extra_arg == 3)) {
+ if (!pregpriv)
+ break;
+ if (extra_arg >= 0 && extra_arg <= 3) {
pregpriv->rx_stbc = extra_arg;
DBG_88E("set rx_stbc =%d\n", pregpriv->rx_stbc);
} else {
@@ -4523,7 +4482,9 @@ static int rtw_dbg_port(struct net_device *dev,
{
struct registry_priv *pregpriv = &padapter->registrypriv;
/* 0: disable, 0x1:enable (but wifi_spec should be 0), 0x2: force enable (don't care wifi_spec) */
- if (pregpriv && extra_arg >= 0 && extra_arg < 3) {
+ if (!pregpriv)
+ break;
+ if (extra_arg >= 0 && extra_arg < 3) {
pregpriv->ampdu_enable = extra_arg;
DBG_88E("set ampdu_enable =%d\n", pregpriv->ampdu_enable);
} else {
@@ -5648,12 +5609,12 @@ static int rtw_wx_set_priv(struct net_device *dev,
return -EFAULT;
len = dwrq->length;
- ext = rtw_vmalloc(len);
+ ext = vmalloc(len);
if (!ext)
return -ENOMEM;
if (copy_from_user(ext, dwrq->pointer, len)) {
- rtw_vmfree(ext, len);
+ vfree(ext);
return -EFAULT;
}
@@ -5693,7 +5654,7 @@ static int rtw_wx_set_priv(struct net_device *dev,
FREE_EXT:
- rtw_vmfree(ext, len);
+ vfree(ext);
return ret;
}
@@ -5709,10 +5670,14 @@ static int rtw_pm_set(struct net_device *dev,
DBG_88E("[%s] extra = %s\n", __func__, extra);
if (!memcmp(extra, "lps =", 4)) {
- sscanf(extra+4, "%u", &mode);
+ ret = sscanf(extra+4, "%u", &mode);
+ if (ret != 1)
+ return -EINVAL;
ret = rtw_pm_set_lps(padapter, mode);
} else if (!memcmp(extra, "ips =", 4)) {
- sscanf(extra+4, "%u", &mode);
+ ret = sscanf(extra+4, "%u", &mode);
+ if (ret != 1)
+ return -EINVAL;
ret = rtw_pm_set_ips(padapter, mode);
} else {
ret = -EINVAL;
@@ -6812,8 +6777,11 @@ static int rtw_mp_bandwidth(struct net_device *dev,
{
u32 bandwidth = 0, sg = 0;
struct adapter *padapter = rtw_netdev_priv(dev);
+ int rv;
- sscanf(extra, "40M =%d, shortGI =%d", &bandwidth, &sg);
+ rv = sscanf(extra, "40M =%d, shortGI =%d", &bandwidth, &sg);
+ if (rv != 2)
+ return -EINVAL;
if (bandwidth != HT_CHANNEL_WIDTH_40)
bandwidth = HT_CHANNEL_WIDTH_20;
@@ -6833,6 +6801,7 @@ static int rtw_mp_txpower(struct net_device *dev,
u32 idx_a = 0, idx_b = 0;
char *input = kmalloc(wrqu->length, GFP_KERNEL);
struct adapter *padapter = rtw_netdev_priv(dev);
+ int rv;
if (!input)
return -ENOMEM;
@@ -6840,7 +6809,11 @@ static int rtw_mp_txpower(struct net_device *dev,
kfree(input);
return -EFAULT;
}
- sscanf(input, "patha =%d, pathb =%d", &idx_a, &idx_b);
+ rv = sscanf(input, "patha =%d, pathb =%d", &idx_a, &idx_b);
+ if (rv != 2) {
+ kfree(input);
+ return -EINVAL;
+ }
sprintf(extra, "Set power level path_A:%d path_B:%d", idx_a, idx_b);
padapter->mppriv.txpoweridx = (u8)idx_a;
@@ -6934,6 +6907,7 @@ static int rtw_mp_ctx(struct net_device *dev,
u32 pkTx = 1, countPkTx = 1, cotuTx = 1, CarrSprTx = 1, scTx = 1, sgleTx = 1, stop = 1;
u32 bStartTest = 1;
u32 count = 0;
+ int rv;
struct mp_priv *pmp_priv;
struct pkt_attrib *pattrib;
@@ -6953,7 +6927,9 @@ static int rtw_mp_ctx(struct net_device *dev,
sgleTx = strncmp(extra, "background, stone", 20);
pkTx = strncmp(extra, "background, pkt", 20);
stop = strncmp(extra, "stop", 4);
- sscanf(extra, "count =%d, pkt", &count);
+ rv = sscanf(extra, "count =%d, pkt", &count);
+ if (rv != 2)
+ return -EINVAL;
_rtw_memset(extra, '\0', sizeof(*extra));
@@ -7312,6 +7288,7 @@ static int rtw_mp_phypara(struct net_device *dev,
{
char *input = kmalloc(wrqu->length, GFP_KERNEL);
u32 valxcap;
+ int rv;
if (!input)
return -ENOMEM;
@@ -7322,7 +7299,11 @@ static int rtw_mp_phypara(struct net_device *dev,
DBG_88E("%s:iwpriv in =%s\n", __func__, input);
- sscanf(input, "xcap =%d", &valxcap);
+ rv = sscanf(input, "xcap =%d", &valxcap);
+ if (rv != 1) {
+ kfree(input);
+ return -EINVAL;
+ }
kfree(input);
return 0;
@@ -7888,6 +7869,7 @@ static int rtw_ioctl_wext_private(struct net_device *dev, union iwreq_data *wrq_
s32 len;
u8 *extra = NULL;
u32 extra_size = 0;
+ int rv;
s32 k;
const iw_handler *priv; /* Private ioctl */
@@ -7913,7 +7895,11 @@ static int rtw_ioctl_wext_private(struct net_device *dev, union iwreq_data *wrq_
ptr = input;
len = input_len;
- sscanf(ptr, "%16s", cmdname);
+ rv = sscanf(ptr, "%16s", cmdname);
+ if (rv != 1) {
+ err = -EINVAL;
+ goto exit;
+ }
cmdlen = strlen(cmdname);
DBG_88E("%s: cmd =%s\n", __func__, cmdname);
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
index 57d1ff750d52..0624378efd6f 100644
--- a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -61,12 +61,10 @@ void rtw_init_mlme_timer(struct adapter *padapter)
void rtw_os_indicate_connect(struct adapter *adapter)
{
-_func_enter_;
rtw_indicate_wx_assoc_event(adapter);
netif_carrier_on(adapter->pnetdev);
if (adapter->pid[2] != 0)
rtw_signal_process(adapter->pid[2], SIGALRM);
-_func_exit_;
}
void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted)
@@ -119,11 +117,9 @@ void rtw_reset_securitypriv(struct adapter *adapter)
void rtw_os_indicate_disconnect(struct adapter *adapter)
{
-_func_enter_;
netif_carrier_off(adapter->pnetdev); /* Do it first for tx broadcast pkt after disconnection issue! */
rtw_indicate_wx_disassoc_event(adapter);
rtw_reset_securitypriv(adapter);
-_func_exit_;
}
void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
@@ -132,7 +128,6 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
u8 *buff, *p, i;
union iwreq_data wrqu;
-_func_enter_;
RT_TRACE(_module_mlme_osdep_c_, _drv_info_,
("+rtw_report_sec_ie, authmode=%d\n", authmode));
buff = NULL;
@@ -141,7 +136,7 @@ _func_enter_;
("rtw_report_sec_ie, authmode=%d\n", authmode));
buff = rtw_malloc(IW_CUSTOM_MAX);
if (!buff)
- goto exit;
+ return;
_rtw_memset(buff, 0, IW_CUSTOM_MAX);
p = buff;
p += sprintf(p, "ASSOCINFO(ReqIEs =");
@@ -157,8 +152,6 @@ _func_enter_;
wireless_send_event(adapter->pnetdev, IWEVCUSTOM, &wrqu, buff);
kfree(buff);
}
-exit:
-_func_exit_;
}
static void _survey_timer_hdl(void *FunctionContext)
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 7c9ee58f47bb..b225d1c07210 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -520,7 +520,6 @@ static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
uint status = _SUCCESS;
struct registry_priv *registry_par = &padapter->registrypriv;
-_func_enter_;
GlobalDebugLevel = rtw_debug;
registry_par->chip_version = (u8)rtw_chip_version;
@@ -588,7 +587,6 @@ _func_enter_;
snprintf(registry_par->ifname, 16, "%s", ifname);
snprintf(registry_par->if2name, 16, "%s", if2name);
registry_par->notch_filter = (u8)rtw_notch_filter;
-_func_exit_;
return status;
}
@@ -855,7 +853,6 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
{
u8 ret8 = _SUCCESS;
-_func_enter_;
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_init_drv_sw\n"));
@@ -930,7 +927,6 @@ _func_enter_;
exit:
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw\n"));
- _func_exit_;
return ret8;
}
@@ -1115,7 +1111,7 @@ int netdev_open(struct net_device *pnetdev)
_enter_critical_mutex(padapter->hw_init_mutex, NULL);
ret = _netdev_open(pnetdev);
- _exit_critical_mutex(padapter->hw_init_mutex, NULL);
+ mutex_unlock(padapter->hw_init_mutex);
return ret;
}
@@ -1208,6 +1204,7 @@ int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
int netdev_close(struct net_device *pnetdev)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - drv_close\n"));
@@ -1246,6 +1243,9 @@ int netdev_close(struct net_device *pnetdev)
rtw_p2p_enable(padapter, P2P_ROLE_DISABLE);
#endif /* CONFIG_88EU_P2P */
+ kfree(dvobj->firmware.szFwBuffer);
+ dvobj->firmware.szFwBuffer = NULL;
+
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n"));
DBG_88E("-88eu_drv - drv_close, bup =%d\n", padapter->bup);
return 0;
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 8c3b077448af..2579a404a766 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -55,27 +55,6 @@ u32 rtw_atoi(u8 *s)
return num;
}
-inline u8 *_rtw_vmalloc(u32 sz)
-{
- u8 *pbuf;
- pbuf = vmalloc(sz);
- return pbuf;
-}
-
-inline u8 *_rtw_zvmalloc(u32 sz)
-{
- u8 *pbuf;
- pbuf = _rtw_vmalloc(sz);
- if (pbuf != NULL)
- memset(pbuf, 0, sz);
- return pbuf;
-}
-
-inline void _rtw_vmfree(u8 *pbuf, u32 sz)
-{
- vfree(pbuf);
-}
-
u8 *_rtw_malloc(u32 sz)
{
u8 *pbuf = NULL;
@@ -114,16 +93,6 @@ void rtw_mfree2d(void *pbuf, int h, int w, int size)
kfree(pbuf);
}
-int _rtw_memcmp(void *dst, void *src, u32 sz)
-{
-/* under Linux/GNU/GLibc, the return value of memcmp for two same
- * mem. chunk is 0 */
- if (!(memcmp(dst, src, sz)))
- return true;
- else
- return false;
-}
-
void _rtw_memset(void *pbuf, int c, u32 sz)
{
memset(pbuf, c, sz);
@@ -252,7 +221,7 @@ struct net_device *rtw_alloc_etherdev(int sizeof_priv)
pnpi = netdev_priv(pnetdev);
- pnpi->priv = rtw_zvmalloc(sizeof_priv);
+ pnpi->priv = vzalloc(sizeof_priv);
if (!pnpi->priv) {
free_netdev(pnetdev);
pnetdev = NULL;
@@ -276,7 +245,7 @@ void rtw_free_netdev(struct net_device *netdev)
if (!pnpi->priv)
goto RETURN;
- rtw_vmfree(pnpi->priv, pnpi->sizeof_priv);
+ vfree(pnpi->priv);
free_netdev(netdev);
RETURN:
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index 2a18b3208a00..da397e4c6773 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -26,7 +26,6 @@
#include <recv_osdep.h>
#include <osdep_intf.h>
-#include <ethernet.h>
#include <usb_ops.h>
/* init os related resource in struct recv_priv */
@@ -36,16 +35,16 @@ int rtw_os_recv_resource_init(struct recv_priv *precvpriv,
return _SUCCESS;
}
-/* alloc os related resource in union recv_frame */
+/* alloc os related resource in struct recv_frame */
int rtw_os_recv_resource_alloc(struct adapter *padapter,
- union recv_frame *precvframe)
+ struct recv_frame *precvframe)
{
- precvframe->u.hdr.pkt_newalloc = NULL;
- precvframe->u.hdr.pkt = NULL;
+ precvframe->pkt_newalloc = NULL;
+ precvframe->pkt = NULL;
return _SUCCESS;
}
-/* free os related resource in union recv_frame */
+/* free os related resource in struct recv_frame */
void rtw_os_recv_resource_free(struct recv_priv *precvpriv)
{
}
@@ -118,24 +117,23 @@ void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
}
void rtw_hostapd_mlme_rx(struct adapter *padapter,
- union recv_frame *precv_frame)
+ struct recv_frame *precv_frame)
{
}
int rtw_recv_indicatepkt(struct adapter *padapter,
- union recv_frame *precv_frame)
+ struct recv_frame *precv_frame)
{
struct recv_priv *precvpriv;
struct __queue *pfree_recv_queue;
struct sk_buff *skb;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
precvpriv = &(padapter->recvpriv);
pfree_recv_queue = &(precvpriv->free_recv_queue);
- skb = precv_frame->u.hdr.pkt;
+ skb = precv_frame->pkt;
if (skb == NULL) {
RT_TRACE(_module_recv_osdep_c_, _drv_err_,
("rtw_recv_indicatepkt():skb == NULL something wrong!!!!\n"));
@@ -145,18 +143,18 @@ _func_enter_;
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
("rtw_recv_indicatepkt():skb != NULL !!!\n"));
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
- ("rtw_recv_indicatepkt():precv_frame->u.hdr.rx_head =%p precv_frame->hdr.rx_data =%p\n",
- precv_frame->u.hdr.rx_head, precv_frame->u.hdr.rx_data));
+ ("rtw_recv_indicatepkt():precv_frame->rx_head =%p precv_frame->hdr.rx_data =%p\n",
+ precv_frame->rx_head, precv_frame->rx_data));
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
- ("precv_frame->hdr.rx_tail =%p precv_frame->u.hdr.rx_end =%p precv_frame->hdr.len =%d\n",
- precv_frame->u.hdr.rx_tail, precv_frame->u.hdr.rx_end,
- precv_frame->u.hdr.len));
+ ("precv_frame->hdr.rx_tail =%p precv_frame->rx_end =%p precv_frame->hdr.len =%d\n",
+ precv_frame->rx_tail, precv_frame->rx_end,
+ precv_frame->len));
- skb->data = precv_frame->u.hdr.rx_data;
+ skb->data = precv_frame->rx_data;
- skb_set_tail_pointer(skb, precv_frame->u.hdr.len);
+ skb_set_tail_pointer(skb, precv_frame->len);
- skb->len = precv_frame->u.hdr.len;
+ skb->len = precv_frame->len;
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
("skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n",
@@ -167,11 +165,11 @@ _func_enter_;
struct sk_buff *pskb2 = NULL;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
int bmcast = IS_MCAST(pattrib->dst);
- if (!_rtw_memcmp(pattrib->dst, myid(&padapter->eeprompriv),
- ETH_ALEN)) {
+ if (memcmp(pattrib->dst, myid(&padapter->eeprompriv),
+ ETH_ALEN)) {
if (bmcast) {
psta = rtw_get_bcmc_stainfo(padapter);
pskb2 = skb_clone(skb, GFP_ATOMIC);
@@ -209,14 +207,13 @@ _func_enter_;
_recv_indicatepkt_end:
/* pointers to NULL before rtw_free_recvframe() */
- precv_frame->u.hdr.pkt = NULL;
+ precv_frame->pkt = NULL;
rtw_free_recvframe(precv_frame, pfree_recv_queue);
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
("\n rtw_recv_indicatepkt :after netif_rx!!!!\n"));
-_func_exit_;
return _SUCCESS;
@@ -225,7 +222,6 @@ _recv_indicatepkt_drop:
/* enqueue back to free_recv_queue */
rtw_free_recvframe(precv_frame, pfree_recv_queue);
-_func_exit_;
return _FAIL;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index a3c2bc5922e4..ca2736d60b62 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -24,7 +24,6 @@
#include <rtw_android.h>
#include <osdep_service.h>
#include <rtw_debug.h>
-#include <ioctl_cfg80211.h>
#include <rtw_ioctl_set.h>
static const char *android_wifi_cmd_str[ANDROID_WIFI_CMD_MAX] = {
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 2f40ff5901d6..2e49cd583212 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -26,6 +26,7 @@
#include <hal_intf.h>
#include <rtw_version.h>
#include <linux/usb.h>
+#include <linux/vmalloc.h>
#include <osdep_intf.h>
#include <usb_vendor_req.h>
@@ -162,7 +163,6 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
struct usb_endpoint_descriptor *pendp_desc;
struct usb_device *pusbd;
-_func_enter_;
pdvobjpriv = (struct dvobj_priv *)rtw_zmalloc(sizeof(*pdvobjpriv));
if (pdvobjpriv == NULL)
@@ -255,7 +255,6 @@ free_dvobj:
pdvobjpriv = NULL;
}
exit:
-_func_exit_;
return pdvobjpriv;
}
@@ -263,7 +262,6 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
{
struct dvobj_priv *dvobj = usb_get_intfdata(usb_intf);
-_func_enter_;
usb_set_intfdata(usb_intf, NULL);
if (dvobj) {
@@ -288,7 +286,6 @@ _func_enter_;
usb_put_dev(interface_to_usbdev(usb_intf));
-_func_exit_;
}
static void chip_by_usb_id(struct adapter *padapter,
@@ -390,7 +387,6 @@ int rtw_hw_suspend(struct adapter *padapter)
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct net_device *pnetdev = padapter->pnetdev;
- _func_enter_;
if ((!padapter->bup) || (padapter->bDriverStopped) ||
(padapter->bSurpriseRemoved)) {
@@ -443,7 +439,6 @@ int rtw_hw_suspend(struct adapter *padapter)
} else {
goto error_exit;
}
- _func_exit_;
return 0;
error_exit:
@@ -456,7 +451,6 @@ int rtw_hw_resume(struct adapter *padapter)
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct net_device *pnetdev = padapter->pnetdev;
- _func_enter_;
if (padapter) { /* system resume */
DBG_88E("==> rtw_hw_resume\n");
@@ -488,7 +482,6 @@ int rtw_hw_resume(struct adapter *padapter)
goto error_exit;
}
- _func_exit_;
return 0;
error_exit:
@@ -507,7 +500,6 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
int ret = 0;
u32 start_time = jiffies;
- _func_enter_;
DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
@@ -564,7 +556,6 @@ exit:
DBG_88E("<=== %s return %d.............. in %dms\n", __func__
, ret, rtw_get_passing_time_ms(start_time));
- _func_exit_;
return ret;
}
@@ -588,7 +579,6 @@ int rtw_resume_process(struct adapter *padapter)
struct pwrctrl_priv *pwrpriv = NULL;
int ret = -1;
u32 start_time = jiffies;
- _func_enter_;
DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
@@ -627,7 +617,6 @@ exit:
DBG_88E("<=== %s return %d.............. in %dms\n", __func__,
ret, rtw_get_passing_time_ms(start_time));
- _func_exit_;
return ret;
}
@@ -647,7 +636,7 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
struct net_device *pnetdev = NULL;
int status = _FAIL;
- padapter = (struct adapter *)rtw_zvmalloc(sizeof(*padapter));
+ padapter = (struct adapter *)vzalloc(sizeof(*padapter));
if (padapter == NULL)
goto exit;
padapter->dvobj = dvobj;
@@ -747,7 +736,7 @@ free_adapter:
if (pnetdev)
rtw_free_netdev(pnetdev);
else if (padapter)
- rtw_vmfree((u8 *)padapter, sizeof(*padapter));
+ vfree(padapter);
padapter = NULL;
}
exit:
@@ -836,7 +825,6 @@ static void rtw_dev_remove(struct usb_interface *pusb_intf)
struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
struct adapter *padapter = dvobj->if1;
-_func_enter_;
DBG_88E("+rtw_dev_remove\n");
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+dev_remove()\n"));
@@ -855,7 +843,6 @@ _func_enter_;
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-dev_remove()\n"));
DBG_88E("-r871xu_dev_remove, done\n");
-_func_exit_;
return;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index 7e3f2fadd5bf..fb0bba85373b 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -80,7 +80,6 @@ static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct hal_data_8188e *haldata;
-_func_enter_;
switch (pxmitbuf->flags) {
case VO_QUEUE_INX:
@@ -156,8 +155,6 @@ check_completion:
rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
-
-_func_exit_;
}
u32 usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
@@ -174,7 +171,6 @@ u32 usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data;
struct usb_device *pusbd = pdvobj->pusbdev;
-_func_enter_;
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("+usb_write_port\n"));
@@ -255,7 +251,6 @@ _func_enter_;
exit:
if (ret != _SUCCESS)
rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
-_func_exit_;
return ret;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 9005971084b7..2c8e3f72dec7 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -23,8 +23,6 @@
#include <osdep_service.h>
#include <drv_types.h>
-#include <if_ether.h>
-#include <ip.h>
#include <wifi.h>
#include <mlme_osdep.h>
#include <xmit_osdep.h>
@@ -39,7 +37,6 @@ uint rtw_remainder_len(struct pkt_file *pfile)
void _rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
{
-_func_enter_;
pfile->pkt = pktptr;
pfile->cur_addr = pktptr->data;
@@ -49,14 +46,12 @@ _func_enter_;
pfile->cur_buffer = pfile->buf_start;
-_func_exit_;
}
uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen)
{
uint len = 0;
-_func_enter_;
len = rtw_remainder_len(pfile);
len = (rlen > len) ? len : rlen;
@@ -67,21 +62,17 @@ _func_enter_;
pfile->cur_addr += len;
pfile->pkt_len -= len;
-_func_exit_;
return len;
}
int rtw_endofpktfile(struct pkt_file *pfile)
{
-_func_enter_;
if (pfile->pkt_len == 0) {
- _func_exit_;
return true;
}
-_func_exit_;
return false;
}
@@ -200,13 +191,13 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* free sta asoc_queue */
while (!rtw_end_of_queue_search(phead, plist)) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ plist = plist->next;
/* avoid come from STA1 and send back STA1 */
if (!memcmp(psta->hwaddr, &skb->data[6], 6))
@@ -246,7 +237,6 @@ int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
s32 res = 0;
-_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("+xmit_enry\n"));
@@ -282,7 +272,6 @@ drop_packet:
exit:
-_func_exit_;
return 0;
}
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index eb33c517fcc8..53da61072992 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -133,14 +133,12 @@ void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
pTriple = (struct chnl_txpow_triple *)(pCoutryIe + 3);
for (i = 0; i < NumTriples; i++) {
if (MaxChnlNum >= pTriple->FirstChnl) {
- printk(KERN_INFO "Dot11d_UpdateCountryIe(): Invalid"
- " country IE, skip it........1\n");
+ printk(KERN_INFO "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
return;
}
if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl +
pTriple->NumChnls)) {
- printk(KERN_INFO "Dot11d_UpdateCountryIe(): Invalid "
- "country IE, skip it........2\n");
+ printk(KERN_INFO "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
return;
}
@@ -167,8 +165,7 @@ u8 DOT11D_GetMaxTxPwrInDbm(struct rtllib_device *dev, u8 Channel)
u8 MaxTxPwrInDbm = 255;
if (MAX_CHANNEL_NUMBER < Channel) {
- printk(KERN_INFO "DOT11D_GetMaxTxPwrInDbm(): Invalid "
- "Channel\n");
+ printk(KERN_INFO "DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
return MaxTxPwrInDbm;
}
if (pDot11dInfo->channel_map[Channel])
diff --git a/drivers/staging/rtl8192e/rtl8192e/Kconfig b/drivers/staging/rtl8192e/rtl8192e/Kconfig
index 50e0d91a409a..ad82bc348a75 100644
--- a/drivers/staging/rtl8192e/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/rtl8192e/Kconfig
@@ -5,5 +5,4 @@ config RTL8192E
select WIRELESS_EXT
select WEXT_PRIV
select CRYPTO
- default N
---help---
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
index 356aec437963..4b94653c50d9 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
@@ -28,31 +28,6 @@
#include <linux/types.h>
#include <linux/pci.h>
-static inline void NdisRawWritePortUlong(u32 port, u32 val)
-{
- outl(val, port);
-}
-
-static inline void NdisRawWritePortUchar(u32 port, u8 val)
-{
- outb(val, port);
-}
-
-static inline void NdisRawReadPortUchar(u32 port, u8 *pval)
-{
- *pval = inb(port);
-}
-
-static inline void NdisRawReadPortUshort(u32 port, u16 *pval)
-{
- *pval = inw(port);
-}
-
-static inline void NdisRawReadPortUlong(u32 port, u32 *pval)
-{
- *pval = inl(port);
-}
-
struct mp_adapter {
u8 LinkCtrlReg;
@@ -70,33 +45,6 @@ struct mp_adapter {
u8 PciBridgeLinkCtrlReg;
};
-struct rt_pci_capab_header {
- unsigned char CapabilityID;
- unsigned char Next;
-};
-
-#define PCI_MAX_BRIDGE_NUMBER 255
-#define PCI_MAX_DEVICES 32
-#define PCI_MAX_FUNCTION 8
-
-#define PCI_CONF_ADDRESS 0x0CF8
-#define PCI_CONF_DATA 0x0CFC
-
-#define PCI_CLASS_BRIDGE_DEV 0x06
-#define PCI_SUBCLASS_BR_PCI_TO_PCI 0x04
-
-#define U1DONTCARE 0xFF
-#define U2DONTCARE 0xFFFF
-#define U4DONTCARE 0xFFFFFFFF
-
-#define INTEL_VENDOR_ID 0x8086
-#define SIS_VENDOR_ID 0x1039
-#define ATI_VENDOR_ID 0x1002
-#define ATI_DEVICE_ID 0x7914
-#define AMD_VENDOR_ID 0x1022
-
-#define PCI_CAPABILITY_ID_PCI_EXPRESS 0x10
-
struct net_device;
bool rtl8192_pci_findadapter(struct pci_dev *pdev, struct net_device *dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 6202358c2984..498995d833e7 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -609,7 +609,7 @@ static int r8192_wx_set_nick(struct net_device *dev,
if (wrqu->data.length > IW_ESSID_MAX_SIZE)
return -E2BIG;
down(&priv->wx_sem);
- wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
+ wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
up(&priv->wx_sem);
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 77964885b3f2..11d0a9d8ee59 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -939,12 +939,12 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
if (txb) {
if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
dev->stats.tx_packets++;
- dev->stats.tx_bytes += txb->payload_size;
+ dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
rtllib_softmac_xmit(txb, ieee);
} else {
if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
stats->tx_packets++;
- stats->tx_bytes += txb->payload_size;
+ stats->tx_bytes += le16_to_cpu(txb->payload_size);
return 0;
}
rtllib_txb_free(txb);
diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig
index 3f055091b35f..3ee9d0d00fb6 100644
--- a/drivers/staging/rtl8192u/Kconfig
+++ b/drivers/staging/rtl8192u/Kconfig
@@ -5,5 +5,4 @@ config RTL8192U
select WIRELESS_EXT
select WEXT_PRIV
select CRYPTO
- default N
---help---
diff --git a/drivers/staging/rtl8192u/ieee80211/EndianFree.h b/drivers/staging/rtl8192u/ieee80211/EndianFree.h
deleted file mode 100644
index dc85fb913dc2..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/EndianFree.h
+++ /dev/null
@@ -1,194 +0,0 @@
-#ifndef __INC_ENDIANFREE_H
-#define __INC_ENDIANFREE_H
-
-/*
- * Call endian free function when
- * 1. Read/write packet content.
- * 2. Before write integer to IO.
- * 3. After read integer from IO.
- */
-
-#define __MACHINE_LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
-#define __MACHINE_BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net, ppc */
-
-#define BYTE_ORDER __MACHINE_LITTLE_ENDIAN
-
-#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
-// Convert data
-#define EF1Byte(_val) ((u8)(_val))
-#define EF2Byte(_val) ((u16)(_val))
-#define EF4Byte(_val) ((u32)(_val))
-
-#else
-// Convert data
-#define EF1Byte(_val) ((u8)(_val))
-#define EF2Byte(_val) (((((u16)(_val))&0x00ff)<<8)|((((u16)(_val))&0xff00)>>8))
-#define EF4Byte(_val) (((((u32)(_val))&0x000000ff)<<24)|\
- ((((u32)(_val))&0x0000ff00)<<8)|\
- ((((u32)(_val))&0x00ff0000)>>8)|\
- ((((u32)(_val))&0xff000000)>>24))
-#endif
-
-// Read data from memory
-#define ReadEF1Byte(_ptr) EF1Byte(*((u8 *)(_ptr)))
-#define ReadEF2Byte(_ptr) EF2Byte(*((u16 *)(_ptr)))
-#define ReadEF4Byte(_ptr) EF4Byte(*((u32 *)(_ptr)))
-
-// Write data to memory
-#define WriteEF1Byte(_ptr, _val) (*((u8 *)(_ptr)))=EF1Byte(_val)
-#define WriteEF2Byte(_ptr, _val) (*((u16 *)(_ptr)))=EF2Byte(_val)
-#define WriteEF4Byte(_ptr, _val) (*((u32 *)(_ptr)))=EF4Byte(_val)
-// Convert Host system specific byte ording (litten or big endia) to Network byte ording (big endian).
-// 2006.05.07, by rcnjko.
-#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
-#define H2N1BYTE(_val) ((u8)(_val))
-#define H2N2BYTE(_val) (((((u16)(_val))&0x00ff)<<8)|\
- ((((u16)(_val))&0xff00)>>8))
-#define H2N4BYTE(_val) (((((u32)(_val))&0x000000ff)<<24)|\
- ((((u32)(_val))&0x0000ff00)<<8) |\
- ((((u32)(_val))&0x00ff0000)>>8) |\
- ((((u32)(_val))&0xff000000)>>24))
-#else
-#define H2N1BYTE(_val) ((u8)(_val))
-#define H2N2BYTE(_val) ((u16)(_val))
-#define H2N4BYTE(_val) ((u32)(_val))
-#endif
-
-// Convert from Network byte ording (big endian) to Host system specific byte ording (litten or big endia).
-// 2006.05.07, by rcnjko.
-#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
-#define N2H1BYTE(_val) ((u8)(_val))
-#define N2H2BYTE(_val) (((((u16)(_val))&0x00ff)<<8)|\
- ((((u16)(_val))&0xff00)>>8))
-#define N2H4BYTE(_val) (((((u32)(_val))&0x000000ff)<<24)|\
- ((((u32)(_val))&0x0000ff00)<<8) |\
- ((((u32)(_val))&0x00ff0000)>>8) |\
- ((((u32)(_val))&0xff000000)>>24))
-#else
-#define N2H1BYTE(_val) ((u8)(_val))
-#define N2H2BYTE(_val) ((u16)(_val))
-#define N2H4BYTE(_val) ((u32)(_val))
-#endif
-
-//
-// Example:
-// BIT_LEN_MASK_32(0) => 0x00000000
-// BIT_LEN_MASK_32(1) => 0x00000001
-// BIT_LEN_MASK_32(2) => 0x00000003
-// BIT_LEN_MASK_32(32) => 0xFFFFFFFF
-//
-#define BIT_LEN_MASK_32(__BitLen) (0xFFFFFFFF >> (32 - (__BitLen)))
-//
-// Example:
-// BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
-// BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
-//
-#define BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen) (BIT_LEN_MASK_32(__BitLen) << (__BitOffset))
-
-//
-// Description:
-// Return 4-byte value in host byte ordering from
-// 4-byte pointer in litten-endian system.
-//
-#define LE_P4BYTE_TO_HOST_4BYTE(__pStart) (EF4Byte(*((u32 *)(__pStart))))
-
-//
-// Description:
-// Translate subfield (continuous bits in little-endian) of 4-byte value in litten byte to
-// 4-byte value in host byte ordering.
-//
-#define LE_BITS_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- ( LE_P4BYTE_TO_HOST_4BYTE(__pStart) >> (__BitOffset) ) \
- & \
- BIT_LEN_MASK_32(__BitLen) \
- )
-
-//
-// Description:
-// Mask subfield (continuous bits in little-endian) of 4-byte value in litten byte oredering
-// and return the result in 4-byte value in host byte ordering.
-//
-#define LE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- LE_P4BYTE_TO_HOST_4BYTE(__pStart) \
- & \
- ( ~BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen) ) \
- )
-
-//
-// Description:
-// Set subfield of little-endian 4-byte value to specified value.
-//
-#define SET_BITS_TO_LE_4BYTE(__pStart, __BitOffset, __BitLen, __Value) \
- *((u32 *)(__pStart)) = \
- EF4Byte( \
- LE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
- | \
- ( (((u32)__Value) & BIT_LEN_MASK_32(__BitLen)) << (__BitOffset) ) \
- );
-
-
-#define BIT_LEN_MASK_16(__BitLen) \
- (0xFFFF >> (16 - (__BitLen)))
-
-#define BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen) \
- (BIT_LEN_MASK_16(__BitLen) << (__BitOffset))
-
-#define LE_P2BYTE_TO_HOST_2BYTE(__pStart) \
- (EF2Byte(*((u16 *)(__pStart))))
-
-#define LE_BITS_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- ( LE_P2BYTE_TO_HOST_2BYTE(__pStart) >> (__BitOffset) ) \
- & \
- BIT_LEN_MASK_16(__BitLen) \
- )
-
-#define LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- LE_P2BYTE_TO_HOST_2BYTE(__pStart) \
- & \
- ( ~BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen) ) \
- )
-
-#define SET_BITS_TO_LE_2BYTE(__pStart, __BitOffset, __BitLen, __Value) \
- *((u16 *)(__pStart)) = \
- EF2Byte( \
- LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
- | \
- ( (((u16)__Value) & BIT_LEN_MASK_16(__BitLen)) << (__BitOffset) ) \
- );
-
-#define BIT_LEN_MASK_8(__BitLen) \
- (0xFF >> (8 - (__BitLen)))
-
-#define BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen) \
- (BIT_LEN_MASK_8(__BitLen) << (__BitOffset))
-
-#define LE_P1BYTE_TO_HOST_1BYTE(__pStart) \
- (EF1Byte(*((u8 *)(__pStart))))
-
-#define LE_BITS_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- ( LE_P1BYTE_TO_HOST_1BYTE(__pStart) >> (__BitOffset) ) \
- & \
- BIT_LEN_MASK_8(__BitLen) \
- )
-
-#define LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- LE_P1BYTE_TO_HOST_1BYTE(__pStart) \
- & \
- ( ~BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen) ) \
- )
-
-#define SET_BITS_TO_LE_1BYTE(__pStart, __BitOffset, __BitLen, __Value) \
- *((u8 *)(__pStart)) = \
- EF1Byte( \
- LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
- | \
- ( (((u8)__Value) & BIT_LEN_MASK_8(__BitLen)) << (__BitOffset) ) \
- );
-
-#endif // #ifndef __INC_ENDIANFREE_H
diff --git a/drivers/staging/rtl8192u/ieee80211/aes.c b/drivers/staging/rtl8192u/ieee80211/aes.c
deleted file mode 100644
index abc1023cef65..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/aes.c
+++ /dev/null
@@ -1,468 +0,0 @@
-/*
- * Cryptographic API.
- *
- * AES Cipher Algorithm.
- *
- * Based on Brian Gladman's code.
- *
- * Linux developers:
- * Alexander Kjeldaas <astor@fast.no>
- * Herbert Valerio Riedel <hvr@hvrlab.org>
- * Kyle McMartin <kyle@debian.org>
- * Adam J. Richter <adam@yggdrasil.com> (conversion to 2.5 API).
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * ---------------------------------------------------------------------------
- * Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK.
- * All rights reserved.
- *
- * LICENSE TERMS
- *
- * The free distribution and use of this software in both source and binary
- * form is allowed (with or without changes) provided that:
- *
- * 1. distributions of this source code include the above copyright
- * notice, this list of conditions and the following disclaimer;
- *
- * 2. distributions in binary form include the above copyright
- * notice, this list of conditions and the following disclaimer
- * in the documentation and/or other associated materials;
- *
- * 3. the copyright holder's name is not used to endorse products
- * built using this software without specific written permission.
- *
- * ALTERNATIVELY, provided that this notice is retained in full, this product
- * may be distributed under the terms of the GNU General Public License (GPL),
- * in which case the provisions of the GPL apply INSTEAD OF those given above.
- *
- * DISCLAIMER
- *
- * This software is provided 'as is' with no explicit or implied warranties
- * in respect of its properties, including, but not limited to, correctness
- * and/or fitness for purpose.
- * ---------------------------------------------------------------------------
- */
-
-/* Some changes from the Gladman version:
- s/RIJNDAEL(e_key)/E_KEY/g
- s/RIJNDAEL(d_key)/D_KEY/g
-*/
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <asm/byteorder.h>
-
-#define AES_MIN_KEY_SIZE 16
-#define AES_MAX_KEY_SIZE 32
-
-#define AES_BLOCK_SIZE 16
-
-static inline
-u32 generic_rotr32 (const u32 x, const unsigned bits)
-{
- const unsigned n = bits % 32;
- return (x >> n) | (x << (32 - n));
-}
-
-static inline
-u32 generic_rotl32 (const u32 x, const unsigned bits)
-{
- const unsigned n = bits % 32;
- return (x << n) | (x >> (32 - n));
-}
-
-#define rotl generic_rotl32
-#define rotr generic_rotr32
-
-/*
- * #define byte(x, nr) ((unsigned char)((x) >> (nr*8)))
- */
-inline static u8
-byte(const u32 x, const unsigned n)
-{
- return x >> (n << 3);
-}
-
-#define u32_in(x) le32_to_cpu(*(const u32 *)(x))
-#define u32_out(to, from) (*(u32 *)(to) = cpu_to_le32(from))
-
-struct aes_ctx {
- int key_length;
- u32 E[60];
- u32 D[60];
-};
-
-#define E_KEY ctx->E
-#define D_KEY ctx->D
-
-static u8 pow_tab[256] __initdata;
-static u8 log_tab[256] __initdata;
-static u8 sbx_tab[256] __initdata;
-static u8 isb_tab[256] __initdata;
-static u32 rco_tab[10];
-static u32 ft_tab[4][256];
-static u32 it_tab[4][256];
-
-static u32 fl_tab[4][256];
-static u32 il_tab[4][256];
-
-static inline u8 __init
-f_mult (u8 a, u8 b)
-{
- u8 aa = log_tab[a], cc = aa + log_tab[b];
-
- return pow_tab[cc + (cc < aa ? 1 : 0)];
-}
-
-#define ff_mult(a,b) (a && b ? f_mult(a, b) : 0)
-
-#define f_rn(bo, bi, n, k) \
- bo[n] = ft_tab[0][byte(bi[n],0)] ^ \
- ft_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
- ft_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
- ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
-
-#define i_rn(bo, bi, n, k) \
- bo[n] = it_tab[0][byte(bi[n],0)] ^ \
- it_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
- it_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
- it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
-
-#define ls_box(x) \
- ( fl_tab[0][byte(x, 0)] ^ \
- fl_tab[1][byte(x, 1)] ^ \
- fl_tab[2][byte(x, 2)] ^ \
- fl_tab[3][byte(x, 3)] )
-
-#define f_rl(bo, bi, n, k) \
- bo[n] = fl_tab[0][byte(bi[n],0)] ^ \
- fl_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
- fl_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
- fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
-
-#define i_rl(bo, bi, n, k) \
- bo[n] = il_tab[0][byte(bi[n],0)] ^ \
- il_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
- il_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
- il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
-
-static void __init
-gen_tabs (void)
-{
- u32 i, t;
- u8 p, q;
-
- /* log and power tables for GF(2**8) finite field with
- 0x011b as modular polynomial - the simplest primitive
- root is 0x03, used here to generate the tables */
-
- for (i = 0, p = 1; i < 256; ++i) {
- pow_tab[i] = (u8) p;
- log_tab[p] = (u8) i;
-
- p ^= (p << 1) ^ (p & 0x80 ? 0x01b : 0);
- }
-
- log_tab[1] = 0;
-
- for (i = 0, p = 1; i < 10; ++i) {
- rco_tab[i] = p;
-
- p = (p << 1) ^ (p & 0x80 ? 0x01b : 0);
- }
-
- for (i = 0; i < 256; ++i) {
- p = (i ? pow_tab[255 - log_tab[i]] : 0);
- q = ((p >> 7) | (p << 1)) ^ ((p >> 6) | (p << 2));
- p ^= 0x63 ^ q ^ ((q >> 6) | (q << 2));
- sbx_tab[i] = p;
- isb_tab[p] = (u8) i;
- }
-
- for (i = 0; i < 256; ++i) {
- p = sbx_tab[i];
-
- t = p;
- fl_tab[0][i] = t;
- fl_tab[1][i] = rotl (t, 8);
- fl_tab[2][i] = rotl (t, 16);
- fl_tab[3][i] = rotl (t, 24);
-
- t = ((u32) ff_mult (2, p)) |
- ((u32) p << 8) |
- ((u32) p << 16) | ((u32) ff_mult (3, p) << 24);
-
- ft_tab[0][i] = t;
- ft_tab[1][i] = rotl (t, 8);
- ft_tab[2][i] = rotl (t, 16);
- ft_tab[3][i] = rotl (t, 24);
-
- p = isb_tab[i];
-
- t = p;
- il_tab[0][i] = t;
- il_tab[1][i] = rotl (t, 8);
- il_tab[2][i] = rotl (t, 16);
- il_tab[3][i] = rotl (t, 24);
-
- t = ((u32) ff_mult (14, p)) |
- ((u32) ff_mult (9, p) << 8) |
- ((u32) ff_mult (13, p) << 16) |
- ((u32) ff_mult (11, p) << 24);
-
- it_tab[0][i] = t;
- it_tab[1][i] = rotl (t, 8);
- it_tab[2][i] = rotl (t, 16);
- it_tab[3][i] = rotl (t, 24);
- }
-}
-
-#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
-
-#define imix_col(y,x) \
- u = star_x(x); \
- v = star_x(u); \
- w = star_x(v); \
- t = w ^ (x); \
- (y) = u ^ v ^ w; \
- (y) ^= rotr(u ^ t, 8) ^ \
- rotr(v ^ t, 16) ^ \
- rotr(t,24)
-
-/* initialise the key schedule from the user supplied key */
-
-#define loop4(i) \
-{ t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \
- t ^= E_KEY[4 * i]; E_KEY[4 * i + 4] = t; \
- t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t; \
- t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t; \
- t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t; \
-}
-
-#define loop6(i) \
-{ t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \
- t ^= E_KEY[6 * i]; E_KEY[6 * i + 6] = t; \
- t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t; \
- t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t; \
- t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t; \
- t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t; \
- t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t; \
-}
-
-#define loop8(i) \
-{ t = rotr(t, 8); ; t = ls_box(t) ^ rco_tab[i]; \
- t ^= E_KEY[8 * i]; E_KEY[8 * i + 8] = t; \
- t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t; \
- t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t; \
- t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t; \
- t = E_KEY[8 * i + 4] ^ ls_box(t); \
- E_KEY[8 * i + 12] = t; \
- t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t; \
- t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t; \
- t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
-}
-
-static int
-aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
-{
- struct aes_ctx *ctx = ctx_arg;
- u32 i, t, u, v, w;
-
- if (key_len != 16 && key_len != 24 && key_len != 32) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
-
- ctx->key_length = key_len;
-
- E_KEY[0] = u32_in (in_key);
- E_KEY[1] = u32_in (in_key + 4);
- E_KEY[2] = u32_in (in_key + 8);
- E_KEY[3] = u32_in (in_key + 12);
-
- switch (key_len) {
- case 16:
- t = E_KEY[3];
- for (i = 0; i < 10; ++i)
- loop4 (i);
- break;
-
- case 24:
- E_KEY[4] = u32_in (in_key + 16);
- t = E_KEY[5] = u32_in (in_key + 20);
- for (i = 0; i < 8; ++i)
- loop6 (i);
- break;
-
- case 32:
- E_KEY[4] = u32_in (in_key + 16);
- E_KEY[5] = u32_in (in_key + 20);
- E_KEY[6] = u32_in (in_key + 24);
- t = E_KEY[7] = u32_in (in_key + 28);
- for (i = 0; i < 7; ++i)
- loop8 (i);
- break;
- }
-
- D_KEY[0] = E_KEY[0];
- D_KEY[1] = E_KEY[1];
- D_KEY[2] = E_KEY[2];
- D_KEY[3] = E_KEY[3];
-
- for (i = 4; i < key_len + 24; ++i) {
- imix_col (D_KEY[i], E_KEY[i]);
- }
-
- return 0;
-}
-
-/* encrypt a block of text */
-
-#define f_nround(bo, bi, k) \
- f_rn(bo, bi, 0, k); \
- f_rn(bo, bi, 1, k); \
- f_rn(bo, bi, 2, k); \
- f_rn(bo, bi, 3, k); \
- k += 4
-
-#define f_lround(bo, bi, k) \
- f_rl(bo, bi, 0, k); \
- f_rl(bo, bi, 1, k); \
- f_rl(bo, bi, 2, k); \
- f_rl(bo, bi, 3, k)
-
-static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in)
-{
- const struct aes_ctx *ctx = ctx_arg;
- u32 b0[4], b1[4];
- const u32 *kp = E_KEY + 4;
-
- b0[0] = u32_in (in) ^ E_KEY[0];
- b0[1] = u32_in (in + 4) ^ E_KEY[1];
- b0[2] = u32_in (in + 8) ^ E_KEY[2];
- b0[3] = u32_in (in + 12) ^ E_KEY[3];
-
- if (ctx->key_length > 24) {
- f_nround (b1, b0, kp);
- f_nround (b0, b1, kp);
- }
-
- if (ctx->key_length > 16) {
- f_nround (b1, b0, kp);
- f_nround (b0, b1, kp);
- }
-
- f_nround (b1, b0, kp);
- f_nround (b0, b1, kp);
- f_nround (b1, b0, kp);
- f_nround (b0, b1, kp);
- f_nround (b1, b0, kp);
- f_nround (b0, b1, kp);
- f_nround (b1, b0, kp);
- f_nround (b0, b1, kp);
- f_nround (b1, b0, kp);
- f_lround (b0, b1, kp);
-
- u32_out (out, b0[0]);
- u32_out (out + 4, b0[1]);
- u32_out (out + 8, b0[2]);
- u32_out (out + 12, b0[3]);
-}
-
-/* decrypt a block of text */
-
-#define i_nround(bo, bi, k) \
- i_rn(bo, bi, 0, k); \
- i_rn(bo, bi, 1, k); \
- i_rn(bo, bi, 2, k); \
- i_rn(bo, bi, 3, k); \
- k -= 4
-
-#define i_lround(bo, bi, k) \
- i_rl(bo, bi, 0, k); \
- i_rl(bo, bi, 1, k); \
- i_rl(bo, bi, 2, k); \
- i_rl(bo, bi, 3, k)
-
-static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in)
-{
- const struct aes_ctx *ctx = ctx_arg;
- u32 b0[4], b1[4];
- const int key_len = ctx->key_length;
- const u32 *kp = D_KEY + key_len + 20;
-
- b0[0] = u32_in (in) ^ E_KEY[key_len + 24];
- b0[1] = u32_in (in + 4) ^ E_KEY[key_len + 25];
- b0[2] = u32_in (in + 8) ^ E_KEY[key_len + 26];
- b0[3] = u32_in (in + 12) ^ E_KEY[key_len + 27];
-
- if (key_len > 24) {
- i_nround (b1, b0, kp);
- i_nround (b0, b1, kp);
- }
-
- if (key_len > 16) {
- i_nround (b1, b0, kp);
- i_nround (b0, b1, kp);
- }
-
- i_nround (b1, b0, kp);
- i_nround (b0, b1, kp);
- i_nround (b1, b0, kp);
- i_nround (b0, b1, kp);
- i_nround (b1, b0, kp);
- i_nround (b0, b1, kp);
- i_nround (b1, b0, kp);
- i_nround (b0, b1, kp);
- i_nround (b1, b0, kp);
- i_lround (b0, b1, kp);
-
- u32_out (out, b0[0]);
- u32_out (out + 4, b0[1]);
- u32_out (out + 8, b0[2]);
- u32_out (out + 12, b0[3]);
-}
-
-
-static struct crypto_alg aes_alg = {
- .cra_name = "aes",
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
- .cra_u = {
- .cipher = {
- .cia_min_keysize = AES_MIN_KEY_SIZE,
- .cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = aes_set_key,
- .cia_encrypt = aes_encrypt,
- .cia_decrypt = aes_decrypt
- }
- }
-};
-
-static int __init aes_init(void)
-{
- gen_tabs();
- return crypto_register_alg(&aes_alg);
-}
-
-static void __exit aes_fini(void)
-{
- crypto_unregister_alg(&aes_alg);
-}
-
-module_init(aes_init);
-module_exit(aes_fini);
-
-MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/staging/rtl8192u/ieee80211/arc4.c b/drivers/staging/rtl8192u/ieee80211/arc4.c
deleted file mode 100644
index b790e9ad104c..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/arc4.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Cryptographic API
- *
- * ARC4 Cipher Algorithm
- *
- * Jon Oberheide <jon@oberheide.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include "rtl_crypto.h"
-
-#define ARC4_MIN_KEY_SIZE 1
-#define ARC4_MAX_KEY_SIZE 256
-#define ARC4_BLOCK_SIZE 1
-
-struct arc4_ctx {
- u8 S[256];
- u8 x, y;
-};
-
-static int arc4_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
-{
- struct arc4_ctx *ctx = ctx_arg;
- int i, j = 0, k = 0;
-
- ctx->x = 1;
- ctx->y = 0;
-
- for(i = 0; i < 256; i++)
- ctx->S[i] = i;
-
- for(i = 0; i < 256; i++)
- {
- u8 a = ctx->S[i];
- j = (j + in_key[k] + a) & 0xff;
- ctx->S[i] = ctx->S[j];
- ctx->S[j] = a;
- if((unsigned int)++k >= key_len)
- k = 0;
- }
-
- return 0;
-}
-
-static void arc4_crypt(void *ctx_arg, u8 *out, const u8 *in)
-{
- struct arc4_ctx *ctx = ctx_arg;
-
- u8 *const S = ctx->S;
- u8 x = ctx->x;
- u8 y = ctx->y;
- u8 a, b;
-
- a = S[x];
- y = (y + a) & 0xff;
- b = S[y];
- S[x] = b;
- S[y] = a;
- x = (x + 1) & 0xff;
- *out++ = *in ^ S[(a + b) & 0xff];
-
- ctx->x = x;
- ctx->y = y;
-}
-
-static struct crypto_alg arc4_alg = {
- .cra_name = "arc4",
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = ARC4_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct arc4_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(arc4_alg.cra_list),
- .cra_u = { .cipher = {
- .cia_min_keysize = ARC4_MIN_KEY_SIZE,
- .cia_max_keysize = ARC4_MAX_KEY_SIZE,
- .cia_setkey = arc4_set_key,
- .cia_encrypt = arc4_crypt,
- .cia_decrypt = arc4_crypt } }
-};
-
-static int __init arc4_init(void)
-{
- return crypto_register_alg(&arc4_alg);
-}
-
-
-static void __exit arc4_exit(void)
-{
- crypto_unregister_alg(&arc4_alg);
-}
-
-module_init(arc4_init);
-module_exit(arc4_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
-MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");
diff --git a/drivers/staging/rtl8192u/ieee80211/autoload.c b/drivers/staging/rtl8192u/ieee80211/autoload.c
deleted file mode 100644
index c97756f3b2ea..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/autoload.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Algorithm autoloader.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include "kmap_types.h"
-
-#include <linux/kernel.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/string.h>
-#include <linux/kmod.h>
-#include "internal.h"
-
-/*
- * A far more intelligent version of this is planned. For now, just
- * try an exact match on the name of the algorithm.
- */
-void crypto_alg_autoload(const char *name)
-{
- request_module(name);
-}
-
-struct crypto_alg *crypto_alg_mod_lookup(const char *name)
-{
- struct crypto_alg *alg = crypto_alg_lookup(name);
- if (alg == NULL) {
- crypto_alg_autoload(name);
- alg = crypto_alg_lookup(name);
- }
- return alg;
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/cipher.c b/drivers/staging/rtl8192u/ieee80211/cipher.c
deleted file mode 100644
index d47345c4adcf..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/cipher.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Cipher operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include <linux/kernel.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <asm/scatterlist.h>
-#include "internal.h"
-#include "scatterwalk.h"
-
-typedef void (cryptfn_t)(void *, u8 *, const u8 *);
-typedef void (procfn_t)(struct crypto_tfm *, u8 *,
- u8*, cryptfn_t, int enc, void *, int);
-
-static inline void xor_64(u8 *a, const u8 *b)
-{
- ((u32 *)a)[0] ^= ((u32 *)b)[0];
- ((u32 *)a)[1] ^= ((u32 *)b)[1];
-}
-
-static inline void xor_128(u8 *a, const u8 *b)
-{
- ((u32 *)a)[0] ^= ((u32 *)b)[0];
- ((u32 *)a)[1] ^= ((u32 *)b)[1];
- ((u32 *)a)[2] ^= ((u32 *)b)[2];
- ((u32 *)a)[3] ^= ((u32 *)b)[3];
-}
-
-
-/*
- * Generic encrypt/decrypt wrapper for ciphers, handles operations across
- * multiple page boundaries by using temporary blocks. In user context,
- * the kernel is given a chance to schedule us once per block.
- */
-static int crypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, cryptfn_t crfn,
- procfn_t prfn, int enc, void *info)
-{
- struct scatter_walk walk_in, walk_out;
- const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
- u8 tmp_src[bsize];
- u8 tmp_dst[bsize];
-
- if (!nbytes)
- return 0;
-
- if (nbytes % bsize) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
- return -EINVAL;
- }
-
- scatterwalk_start(&walk_in, src);
- scatterwalk_start(&walk_out, dst);
-
- for(;;) {
- u8 *src_p, *dst_p;
- int in_place;
-
- scatterwalk_map(&walk_in);
- scatterwalk_map(&walk_out);
- src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
- dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
- in_place = scatterwalk_samebuf(&walk_in, &walk_out,
- src_p, dst_p);
-
- nbytes -= bsize;
-
- scatterwalk_copychunks(src_p, &walk_in, bsize, 0);
-
- prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);
-
- scatterwalk_done(&walk_in, nbytes);
-
- scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
- scatterwalk_done(&walk_out, nbytes);
-
- if (!nbytes)
- return 0;
-
- crypto_yield(tfm);
- }
-}
-
-static void cbc_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
- cryptfn_t fn, int enc, void *info, int in_place)
-{
- u8 *iv = info;
-
- /* Null encryption */
- if (!iv)
- return;
-
- if (enc) {
- tfm->crt_u.cipher.cit_xor_block(iv, src);
- fn(crypto_tfm_ctx(tfm), dst, iv);
- memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm));
- } else {
- u8 stack[in_place ? crypto_tfm_alg_blocksize(tfm) : 0];
- u8 *buf = in_place ? stack : dst;
-
- fn(crypto_tfm_ctx(tfm), buf, src);
- tfm->crt_u.cipher.cit_xor_block(buf, iv);
- memcpy(iv, src, crypto_tfm_alg_blocksize(tfm));
- if (buf != dst)
- memcpy(dst, buf, crypto_tfm_alg_blocksize(tfm));
- }
-}
-
-static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
- cryptfn_t fn, int enc, void *info, int in_place)
-{
- fn(crypto_tfm_ctx(tfm), dst, src);
-}
-
-static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
-{
- struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
-
- if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- } else
- return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen,
- &tfm->crt_flags);
-}
-
-static int ecb_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return crypt(tfm, dst, src, nbytes,
- tfm->__crt_alg->cra_cipher.cia_encrypt,
- ecb_process, 1, NULL);
-}
-
-static int ecb_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypt(tfm, dst, src, nbytes,
- tfm->__crt_alg->cra_cipher.cia_decrypt,
- ecb_process, 1, NULL);
-}
-
-static int cbc_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypt(tfm, dst, src, nbytes,
- tfm->__crt_alg->cra_cipher.cia_encrypt,
- cbc_process, 1, tfm->crt_cipher.cit_iv);
-}
-
-static int cbc_encrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- return crypt(tfm, dst, src, nbytes,
- tfm->__crt_alg->cra_cipher.cia_encrypt,
- cbc_process, 1, iv);
-}
-
-static int cbc_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypt(tfm, dst, src, nbytes,
- tfm->__crt_alg->cra_cipher.cia_decrypt,
- cbc_process, 0, tfm->crt_cipher.cit_iv);
-}
-
-static int cbc_decrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- return crypt(tfm, dst, src, nbytes,
- tfm->__crt_alg->cra_cipher.cia_decrypt,
- cbc_process, 0, iv);
-}
-
-static int nocrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return -ENOSYS;
-}
-
-static int nocrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- return -ENOSYS;
-}
-
-int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
-{
- u32 mode = flags & CRYPTO_TFM_MODE_MASK;
-
- tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
- if (flags & CRYPTO_TFM_REQ_WEAK_KEY)
- tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY;
-
- return 0;
-}
-
-int crypto_init_cipher_ops(struct crypto_tfm *tfm)
-{
- int ret = 0;
- struct cipher_tfm *ops = &tfm->crt_cipher;
-
- ops->cit_setkey = setkey;
-
- switch (tfm->crt_cipher.cit_mode) {
- case CRYPTO_TFM_MODE_ECB:
- ops->cit_encrypt = ecb_encrypt;
- ops->cit_decrypt = ecb_decrypt;
- break;
-
- case CRYPTO_TFM_MODE_CBC:
- ops->cit_encrypt = cbc_encrypt;
- ops->cit_decrypt = cbc_decrypt;
- ops->cit_encrypt_iv = cbc_encrypt_iv;
- ops->cit_decrypt_iv = cbc_decrypt_iv;
- break;
-
- case CRYPTO_TFM_MODE_CFB:
- ops->cit_encrypt = nocrypt;
- ops->cit_decrypt = nocrypt;
- ops->cit_encrypt_iv = nocrypt_iv;
- ops->cit_decrypt_iv = nocrypt_iv;
- break;
-
- case CRYPTO_TFM_MODE_CTR:
- ops->cit_encrypt = nocrypt;
- ops->cit_decrypt = nocrypt;
- ops->cit_encrypt_iv = nocrypt_iv;
- ops->cit_decrypt_iv = nocrypt_iv;
- break;
-
- default:
- BUG();
- }
-
- if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
-
- switch (crypto_tfm_alg_blocksize(tfm)) {
- case 8:
- ops->cit_xor_block = xor_64;
- break;
-
- case 16:
- ops->cit_xor_block = xor_128;
- break;
-
- default:
- printk(KERN_WARNING "%s: block size %u not supported\n",
- crypto_tfm_alg_name(tfm),
- crypto_tfm_alg_blocksize(tfm));
- ret = -EINVAL;
- goto out;
- }
-
- ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
- ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL);
- if (ops->cit_iv == NULL)
- ret = -ENOMEM;
- }
-
-out:
- return ret;
-}
-
-void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
-{
- kfree(tfm->crt_cipher.cit_iv);
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/compress.c b/drivers/staging/rtl8192u/ieee80211/compress.c
deleted file mode 100644
index 5416ab63a738..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/compress.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Compression operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include <linux/types.h>
-/*#include <linux/crypto.h>*/
-#include "rtl_crypto.h"
-#include <linux/errno.h>
-#include <linux/scatterlist.h>
-#include <linux/string.h>
-#include "internal.h"
-
-static int crypto_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return tfm->__crt_alg->cra_compress.coa_compress(crypto_tfm_ctx(tfm),
- src, slen, dst,
- dlen);
-}
-
-static int crypto_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return tfm->__crt_alg->cra_compress.coa_decompress(crypto_tfm_ctx(tfm),
- src, slen, dst,
- dlen);
-}
-
-int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags)
-{
- return flags ? -EINVAL : 0;
-}
-
-int crypto_init_compress_ops(struct crypto_tfm *tfm)
-{
- int ret = 0;
- struct compress_tfm *ops = &tfm->crt_compress;
-
- ret = tfm->__crt_alg->cra_compress.coa_init(crypto_tfm_ctx(tfm));
- if (ret)
- goto out;
-
- ops->cot_compress = crypto_compress;
- ops->cot_decompress = crypto_decompress;
-
-out:
- return ret;
-}
-
-void crypto_exit_compress_ops(struct crypto_tfm *tfm)
-{
- tfm->__crt_alg->cra_compress.coa_exit(crypto_tfm_ctx(tfm));
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/crypto_compat.h b/drivers/staging/rtl8192u/ieee80211/crypto_compat.h
deleted file mode 100644
index 2ba374a64178..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/crypto_compat.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Header file to maintain compatibility among different kernel versions.
- *
- * Copyright (c) 2004-2006 <lawrence_wang@realsil.com.cn>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- */
-
-#include <linux/crypto.h>
-
-static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
-}
-
-
-static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
-}
-
- struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
-{
- struct crypto_tfm *tfm = NULL;
- int err;
- printk("call crypto_alloc_tfm!!!\n");
- do {
- struct crypto_alg *alg;
-
- alg = crypto_alg_mod_lookup(name, 0, CRYPTO_ALG_ASYNC);
- err = PTR_ERR(alg);
- if (IS_ERR(alg))
- continue;
-
- tfm = __crypto_alloc_tfm(alg, flags);
- err = 0;
- if (IS_ERR(tfm)) {
- crypto_mod_put(alg);
- err = PTR_ERR(tfm);
- tfm = NULL;
- }
- } while (err == -EAGAIN && !signal_pending(current));
-
- return tfm;
-}
-//EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
-//EXPORT_SYMBOL_GPL(crypto_free_tfm);
diff --git a/drivers/staging/rtl8192u/ieee80211/digest.c b/drivers/staging/rtl8192u/ieee80211/digest.c
deleted file mode 100644
index 05e7497fd106..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/digest.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Digest operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/highmem.h>
-#include <asm/scatterlist.h>
-#include "internal.h"
-
-static void init(struct crypto_tfm *tfm)
-{
- tfm->__crt_alg->cra_digest.dia_init(crypto_tfm_ctx(tfm));
-}
-
-static void update(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg)
-{
- unsigned int i;
-
- for (i = 0; i < nsg; i++) {
-
- struct page *pg = sg[i].page;
- unsigned int offset = sg[i].offset;
- unsigned int l = sg[i].length;
-
- do {
- unsigned int bytes_from_page = min(l, ((unsigned int)
- (PAGE_SIZE)) -
- offset);
- char *p = kmap_atomic(pg) + offset;
-
- tfm->__crt_alg->cra_digest.dia_update
- (crypto_tfm_ctx(tfm), p,
- bytes_from_page);
- kunmap_atomic(p);
- crypto_yield(tfm);
- offset = 0;
- pg++;
- l -= bytes_from_page;
- } while (l > 0);
- }
-}
-
-static void final(struct crypto_tfm *tfm, u8 *out)
-{
- tfm->__crt_alg->cra_digest.dia_final(crypto_tfm_ctx(tfm), out);
-}
-
-static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
-{
- u32 flags;
- if (tfm->__crt_alg->cra_digest.dia_setkey == NULL)
- return -ENOSYS;
- return tfm->__crt_alg->cra_digest.dia_setkey(crypto_tfm_ctx(tfm),
- key, keylen, &flags);
-}
-
-static void digest(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg, u8 *out)
-{
- unsigned int i;
-
- tfm->crt_digest.dit_init(tfm);
-
- for (i = 0; i < nsg; i++) {
- char *p = kmap_atomic(sg[i].page) + sg[i].offset;
- tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm),
- p, sg[i].length);
- kunmap_atomic(p);
- crypto_yield(tfm);
- }
- crypto_digest_final(tfm, out);
-}
-
-int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
-{
- return flags ? -EINVAL : 0;
-}
-
-int crypto_init_digest_ops(struct crypto_tfm *tfm)
-{
- struct digest_tfm *ops = &tfm->crt_digest;
-
- ops->dit_init = init;
- ops->dit_update = update;
- ops->dit_final = final;
- ops->dit_digest = digest;
- ops->dit_setkey = setkey;
-
- return crypto_alloc_hmac_block(tfm);
-}
-
-void crypto_exit_digest_ops(struct crypto_tfm *tfm)
-{
- crypto_free_hmac_block(tfm);
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
index 34edcfab96be..90ace791f2d7 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c
@@ -1,16 +1,8 @@
-//-----------------------------------------------------------------------------
-// File:
-// Dot11d.c
-//
-// Description:
-// Implement 802.11d.
-//
-//-----------------------------------------------------------------------------
+/* Implement 802.11d. */
#include "dot11d.h"
-void
-Dot11d_Init(struct ieee80211_device *ieee)
+void Dot11d_Init(struct ieee80211_device *ieee)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
@@ -22,55 +14,42 @@ Dot11d_Init(struct ieee80211_device *ieee)
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
RESET_CIE_WATCHDOG(ieee);
- printk("Dot11d_Init()\n");
+ netdev_info(ieee->dev, "Dot11d_Init()\n");
}
+EXPORT_SYMBOL(Dot11d_Init);
-//
-// Description:
-// Reset to the state as we are just entering a regulatory domain.
-//
-void
-Dot11d_Reset(struct ieee80211_device *ieee)
+/* Reset to the state as we are just entering a regulatory domain. */
+void Dot11d_Reset(struct ieee80211_device *ieee)
{
u32 i;
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
- // Clear old channel map
+ /* Clear old channel map */
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
- // Set new channel map
- for (i=1; i<=11; i++) {
+ /* Set new channel map */
+ for (i = 1; i <= 11; i++)
(pDot11dInfo->channel_map)[i] = 1;
- }
- for (i=12; i<=14; i++) {
+
+ for (i = 12; i <= 14; i++)
(pDot11dInfo->channel_map)[i] = 2;
- }
pDot11dInfo->State = DOT11D_STATE_NONE;
pDot11dInfo->CountryIeLen = 0;
RESET_CIE_WATCHDOG(ieee);
-
- //printk("Dot11d_Reset()\n");
}
+EXPORT_SYMBOL(Dot11d_Reset);
-//
-// Description:
-// Update country IE from Beacon or Probe Resopnse
-// and configure PHY for operation in the regulatory domain.
-//
-// TODO:
-// Configure Tx power.
-//
-// Assumption:
-// 1. IS_DOT11D_ENABLE() is TRUE.
-// 2. Input IE is an valid one.
-//
-void
-Dot11d_UpdateCountryIe(
- struct ieee80211_device *dev,
- u8 *pTaddr,
- u16 CoutryIeLen,
- u8 *pCoutryIe
- )
+/*
+ * Update country IE from Beacon or Probe Resopnse and configure PHY for
+ * operation in the regulatory domain.
+ *
+ * TODO: Configure Tx power.
+ * Assumption:
+ * 1. IS_DOT11D_ENABLE() is TRUE.
+ * 2. Input IE is an valid one.
+ */
+void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr,
+ u16 CoutryIeLen, u8 *pCoutryIe)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 i, j, NumTriples, MaxChnlNum;
@@ -79,23 +58,25 @@ Dot11d_UpdateCountryIe(
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
MaxChnlNum = 0;
- NumTriples = (CoutryIeLen - 3) / 3; // skip 3-byte country string.
+ NumTriples = (CoutryIeLen - 3) / 3; /* skip 3-byte country string. */
pTriple = (PCHNL_TXPOWER_TRIPLE)(pCoutryIe + 3);
- for(i = 0; i < NumTriples; i++)
- {
- if(MaxChnlNum >= pTriple->FirstChnl)
- { // It is not in a monotonically increasing order, so stop processing.
- printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
+ for (i = 0; i < NumTriples; i++) {
+ if (MaxChnlNum >= pTriple->FirstChnl) {
+ /* It is not in a monotonically increasing order, so
+ * stop processing.
+ */
+ netdev_err(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
return;
}
- if(MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls))
- { // It is not a valid set of channel id, so stop processing.
- printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
+ if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls)) {
+ /* It is not a valid set of channel id, so stop
+ * processing.
+ */
+ netdev_err(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
return;
}
- for(j = 0 ; j < pTriple->NumChnls; j++)
- {
+ for (j = 0; j < pTriple->NumChnls; j++) {
pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] = pTriple->MaxTxPowerInDbm;
MaxChnlNum = pTriple->FirstChnl + j;
@@ -103,60 +84,48 @@ Dot11d_UpdateCountryIe(
pTriple = (PCHNL_TXPOWER_TRIPLE)((u8 *)pTriple + 3);
}
- //printk("Dot11d_UpdateCountryIe(): Channel List:\n");
- printk("Channel List:");
- for(i=1; i<= MAX_CHANNEL_NUMBER; i++)
- if(pDot11dInfo->channel_map[i] > 0)
- printk(" %d", i);
- printk("\n");
+ netdev_info(dev->dev, "Channel List:");
+ for (i = 1; i <= MAX_CHANNEL_NUMBER; i++)
+ if (pDot11dInfo->channel_map[i] > 0)
+ netdev_info(dev->dev, " %d", i);
+ netdev_info(dev->dev, "\n");
UPDATE_CIE_SRC(dev, pTaddr);
pDot11dInfo->CountryIeLen = CoutryIeLen;
- memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe,CoutryIeLen);
+ memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe, CoutryIeLen);
pDot11dInfo->State = DOT11D_STATE_LEARNED;
}
+EXPORT_SYMBOL(Dot11d_UpdateCountryIe);
-
-u8
-DOT11D_GetMaxTxPwrInDbm(
- struct ieee80211_device *dev,
- u8 Channel
- )
+u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 MaxTxPwrInDbm = 255;
- if(MAX_CHANNEL_NUMBER < Channel)
- {
- printk("DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
+ if (MAX_CHANNEL_NUMBER < Channel) {
+ netdev_err(dev->dev, "DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
return MaxTxPwrInDbm;
}
- if(pDot11dInfo->channel_map[Channel])
- {
+ if (pDot11dInfo->channel_map[Channel])
MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
- }
return MaxTxPwrInDbm;
}
+EXPORT_SYMBOL(DOT11D_GetMaxTxPwrInDbm);
-
-void
-DOT11D_ScanComplete(
- struct ieee80211_device *dev
- )
+void DOT11D_ScanComplete(struct ieee80211_device *dev)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
- switch (pDot11dInfo->State)
- {
+ switch (pDot11dInfo->State) {
case DOT11D_STATE_LEARNED:
pDot11dInfo->State = DOT11D_STATE_DONE;
break;
case DOT11D_STATE_DONE:
- if( GET_CIE_WATCHDOG(dev) == 0 )
- { // Reset country IE if previous one is gone.
+ if (GET_CIE_WATCHDOG(dev) == 0) {
+ /* Reset country IE if previous one is gone. */
Dot11d_Reset(dev);
}
break;
@@ -164,57 +133,43 @@ DOT11D_ScanComplete(
break;
}
}
+EXPORT_SYMBOL(DOT11D_ScanComplete);
-int IsLegalChannel(
- struct ieee80211_device *dev,
- u8 channel
-)
+int IsLegalChannel(struct ieee80211_device *dev, u8 channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
- if(MAX_CHANNEL_NUMBER < channel)
- {
- printk("IsLegalChannel(): Invalid Channel\n");
+ if (MAX_CHANNEL_NUMBER < channel) {
+ netdev_err(dev->dev, "IsLegalChannel(): Invalid Channel\n");
return 0;
}
- if(pDot11dInfo->channel_map[channel] > 0)
+ if (pDot11dInfo->channel_map[channel] > 0)
return 1;
return 0;
}
+EXPORT_SYMBOL(IsLegalChannel);
-int ToLegalChannel(
- struct ieee80211_device *dev,
- u8 channel
-)
+int ToLegalChannel(struct ieee80211_device *dev, u8 channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 default_chn = 0;
u32 i = 0;
- for (i=1; i<= MAX_CHANNEL_NUMBER; i++)
- {
- if(pDot11dInfo->channel_map[i] > 0)
- {
+ for (i = 1; i <= MAX_CHANNEL_NUMBER; i++) {
+ if (pDot11dInfo->channel_map[i] > 0) {
default_chn = i;
break;
}
}
- if(MAX_CHANNEL_NUMBER < channel)
- {
- printk("IsLegalChannel(): Invalid Channel\n");
+ if (MAX_CHANNEL_NUMBER < channel) {
+ netdev_err(dev->dev, "IsLegalChannel(): Invalid Channel\n");
return default_chn;
}
- if(pDot11dInfo->channel_map[channel] > 0)
+ if (pDot11dInfo->channel_map[channel] > 0)
return channel;
return default_chn;
}
-EXPORT_SYMBOL(Dot11d_Init);
-EXPORT_SYMBOL(Dot11d_Reset);
-EXPORT_SYMBOL(Dot11d_UpdateCountryIe);
-EXPORT_SYMBOL(DOT11D_GetMaxTxPwrInDbm);
-EXPORT_SYMBOL(DOT11D_ScanComplete);
-EXPORT_SYMBOL(IsLegalChannel);
EXPORT_SYMBOL(ToLegalChannel);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index bc64f05a7e6a..cac2056b7f82 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1152,7 +1152,7 @@ struct ieee80211_probe_request {
struct ieee80211_probe_response {
struct ieee80211_hdr_3addr header;
- u32 time_stamp[2];
+ __le32 time_stamp[2];
__le16 beacon_interval;
__le16 capability;
/* SSID, supported rates, FH params, DS params,
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index e730ed64c0fe..a98414a72bf5 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -517,11 +517,8 @@ drop:
return 1;
}
-bool
-AddReorderEntry(
- PRX_TS_RECORD pTS,
- PRX_REORDER_ENTRY pReorderEntry
- )
+
+static bool AddReorderEntry(PRX_TS_RECORD pTS, PRX_REORDER_ENTRY pReorderEntry)
{
struct list_head *pList = &pTS->RxPendingPktList;
while(pList->next != &pTS->RxPendingPktList)
@@ -601,10 +598,9 @@ void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_
}
-void RxReorderIndicatePacket( struct ieee80211_device *ieee,
- struct ieee80211_rxb *prxb,
- PRX_TS_RECORD pTS,
- u16 SeqNum)
+static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
+ struct ieee80211_rxb *prxb,
+ PRX_TS_RECORD pTS, u16 SeqNum)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
PRX_REORDER_ENTRY pReorderEntry = NULL;
@@ -771,9 +767,9 @@ void RxReorderIndicatePacket( struct ieee80211_device *ieee,
}
}
-u8 parse_subframe(struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats,
- struct ieee80211_rxb *rxb,u8 *src,u8 *dst)
+static u8 parse_subframe(struct sk_buff *skb,
+ struct ieee80211_rx_stats *rx_stats,
+ struct ieee80211_rxb *rxb,u8 *src,u8 *dst)
{
struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
@@ -2200,7 +2196,7 @@ static inline int ieee80211_network_init(
network->last_scanned = jiffies;
network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]);
network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]);
- network->beacon_interval = le32_to_cpu(beacon->beacon_interval);
+ network->beacon_interval = le16_to_cpu(beacon->beacon_interval);
/* Where to pull this? beacon->listen_interval;*/
network->listen_interval = 0x0A;
network->rates_len = network->rates_ex_len = 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 662c7e41cd5c..2131912113d7 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -24,15 +24,6 @@
#include "dot11d.h"
-u8 rsn_authen_cipher_suite[16][4] = {
- {0x00,0x0F,0xAC,0x00}, //Use group key, //Reserved
- {0x00,0x0F,0xAC,0x01}, //WEP-40 //RSNA default
- {0x00,0x0F,0xAC,0x02}, //TKIP //NONE //{used just as default}
- {0x00,0x0F,0xAC,0x03}, //WRAP-historical
- {0x00,0x0F,0xAC,0x04}, //CCMP
- {0x00,0x0F,0xAC,0x05}, //WEP-104
-};
-
short ieee80211_is_54g(const struct ieee80211_network *net)
{
return (net->rates_ex_len > 0) || (net->rates_len > 4);
@@ -47,7 +38,7 @@ short ieee80211_is_shortslot(const struct ieee80211_network *net)
* tag and the EXTENDED RATE MFIE tag if needed.
* It encludes two bytes per tag for the tag itself and its len
*/
-unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
+static unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
{
unsigned int rate_len = 0;
@@ -65,7 +56,7 @@ unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
* Then it updates the pointer so that
* it points after the new MFIE tag added.
*/
-void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
+static void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -82,7 +73,7 @@ void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
*tag_p = tag;
}
-void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
+static void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -106,7 +97,8 @@ void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
}
-void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p) {
+static void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p)
+{
u8 *tag = *tag_p;
*tag++ = MFIE_TYPE_GENERIC; //0
@@ -148,7 +140,7 @@ void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p) {
}
#endif
-void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
+static void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
{
int nh;
nh = (ieee->mgmt_queue_head +1) % MGMT_QUEUE_NUM;
@@ -166,7 +158,7 @@ void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
//return 0;
}
-struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
+static struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
{
struct sk_buff *ret;
@@ -181,12 +173,12 @@ struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
return ret;
}
-void init_mgmt_queue(struct ieee80211_device *ieee)
+static void init_mgmt_queue(struct ieee80211_device *ieee)
{
ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
}
-u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee)
+static u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
u8 rate;
@@ -365,7 +357,8 @@ inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
}
struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee);
-void ieee80211_send_beacon(struct ieee80211_device *ieee)
+
+static void ieee80211_send_beacon(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
if(!ieee->ieee_up)
@@ -391,7 +384,7 @@ void ieee80211_send_beacon(struct ieee80211_device *ieee)
}
-void ieee80211_send_beacon_cb(unsigned long _ieee)
+static void ieee80211_send_beacon_cb(unsigned long _ieee)
{
struct ieee80211_device *ieee =
(struct ieee80211_device *) _ieee;
@@ -403,7 +396,7 @@ void ieee80211_send_beacon_cb(unsigned long _ieee)
}
-void ieee80211_send_probe(struct ieee80211_device *ieee)
+static void ieee80211_send_probe(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
@@ -494,7 +487,7 @@ out:
}
-void ieee80211_softmac_scan_wq(struct work_struct *work)
+static void ieee80211_softmac_scan_wq(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
@@ -538,7 +531,7 @@ out:
-void ieee80211_beacons_start(struct ieee80211_device *ieee)
+static void ieee80211_beacons_start(struct ieee80211_device *ieee)
{
unsigned long flags;
spin_lock_irqsave(&ieee->beacon_lock,flags);
@@ -549,7 +542,7 @@ void ieee80211_beacons_start(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->beacon_lock,flags);
}
-void ieee80211_beacons_stop(struct ieee80211_device *ieee)
+static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -581,7 +574,7 @@ void ieee80211_start_send_beacons(struct ieee80211_device *ieee)
}
-void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
+static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
{
// unsigned long flags;
@@ -609,7 +602,7 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee)
}
/* called with ieee->lock held */
-void ieee80211_start_scan(struct ieee80211_device *ieee)
+static void ieee80211_start_scan(struct ieee80211_device *ieee)
{
if(IS_DOT11D_ENABLE(ieee) )
{
@@ -841,7 +834,8 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
}
-struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
+static struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee,
+ u8 *dest)
{
struct sk_buff *skb;
u8 *tag;
@@ -896,7 +890,8 @@ struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
return skb;
}
-struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,int status, u8 *dest)
+static struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,
+ int status, u8 *dest)
{
struct sk_buff *skb;
struct ieee80211_authentication *auth;
@@ -924,7 +919,8 @@ struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,int status, u8
}
-struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
+static struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee,
+ short pwr)
{
struct sk_buff *skb;
struct ieee80211_hdr_3addr *hdr;
@@ -950,7 +946,7 @@ struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
}
-void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
+static void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
{
struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest);
@@ -959,7 +955,8 @@ void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
}
-void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8 *dest)
+static void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s,
+ u8 *dest)
{
struct sk_buff *buf = ieee80211_auth_resp(ieee, s, dest);
@@ -968,7 +965,7 @@ void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8 *dest)
}
-void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
+static void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
{
@@ -1250,13 +1247,13 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-void ieee80211_associate_abort_cb(unsigned long dev)
+static void ieee80211_associate_abort_cb(unsigned long dev)
{
ieee80211_associate_abort((struct ieee80211_device *) dev);
}
-void ieee80211_associate_step1(struct ieee80211_device *ieee)
+static void ieee80211_associate_step1(struct ieee80211_device *ieee)
{
struct ieee80211_network *beacon = &ieee->current_network;
struct sk_buff *skb;
@@ -1282,7 +1279,9 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee)
}
}
-void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
+static void ieee80211_auth_challenge(struct ieee80211_device *ieee,
+ u8 *challenge,
+ int chlen)
{
u8 *c;
struct sk_buff *skb;
@@ -1312,7 +1311,7 @@ void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int
kfree(challenge);
}
-void ieee80211_associate_step2(struct ieee80211_device *ieee)
+static void ieee80211_associate_step2(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
struct ieee80211_network *beacon = &ieee->current_network;
@@ -1331,7 +1330,7 @@ void ieee80211_associate_step2(struct ieee80211_device *ieee)
//dev_kfree_skb_any(skb);//edit by thomas
}
}
-void ieee80211_associate_complete_wq(struct work_struct *work)
+static void ieee80211_associate_complete_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
printk(KERN_INFO "Associated successfully\n");
@@ -1378,7 +1377,7 @@ void ieee80211_associate_complete_wq(struct work_struct *work)
netif_carrier_on(ieee->dev);
}
-void ieee80211_associate_complete(struct ieee80211_device *ieee)
+static void ieee80211_associate_complete(struct ieee80211_device *ieee)
{
// int i;
// struct net_device* dev = ieee->dev;
@@ -1389,7 +1388,7 @@ void ieee80211_associate_complete(struct ieee80211_device *ieee)
queue_work(ieee->wq, &ieee->associate_complete_wq);
}
-void ieee80211_associate_procedure_wq(struct work_struct *work)
+static void ieee80211_associate_procedure_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq);
ieee->sync_scan_hurryup = 1;
@@ -1562,7 +1561,7 @@ static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
}
-int auth_rq_parse(struct sk_buff *skb,u8 *dest)
+static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
{
struct ieee80211_authentication *a;
@@ -1618,7 +1617,7 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
}
-int assoc_rq_parse(struct sk_buff *skb,u8 *dest)
+static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
{
struct ieee80211_assoc_request_frame *a;
@@ -1712,7 +1711,8 @@ ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
-void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr)
+static void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee,
+ short pwr)
{
struct sk_buff *buf = ieee80211_null_func(ieee, pwr);
@@ -1723,7 +1723,8 @@ void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr)
}
-short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *time_l)
+static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
+ u32 *time_l)
{
int timeout = ieee->ps_timeout;
u8 dtim;
@@ -1771,7 +1772,7 @@ short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *ti
}
-inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
+static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
{
u32 th,tl;
@@ -1888,7 +1889,8 @@ void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success)
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
-void ieee80211_process_action(struct ieee80211_device *ieee, struct sk_buff *skb)
+static void ieee80211_process_action(struct ieee80211_device *ieee,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *header = (struct ieee80211_hdr *)skb->data;
u8 *act = ieee80211_get_payload(header);
@@ -1959,7 +1961,8 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_network network_resp;
struct ieee80211_network *network = &network_resp;
- if (0 == (errcode=assoc_parse(ieee,skb, &aid))){
+ errcode = assoc_parse(ieee, skb, &aid);
+ if (!errcode) {
ieee->state=IEEE80211_LINKED;
ieee->assoc_id = aid;
ieee->softmac_stats.rx_ass_ok++;
@@ -2017,7 +2020,8 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
IEEE80211_DEBUG_MGMT("Received authentication response");
- if (0 == (errcode=auth_parse(skb, &challenge, &chlen))){
+ errcode = auth_parse(skb, &challenge, &chlen);
+ if (!errcode) {
if(ieee->open_wep || !challenge){
ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATED;
ieee->softmac_stats.rx_auth_rs_ok++;
@@ -2189,7 +2193,7 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *
}
/* called with ieee->lock acquired */
-void ieee80211_resume_tx(struct ieee80211_device *ieee)
+static void ieee80211_resume_tx(struct ieee80211_device *ieee)
{
int i;
for(i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
@@ -2318,7 +2322,7 @@ void ieee80211_start_master_bss(struct ieee80211_device *ieee)
netif_carrier_on(ieee->dev);
}
-void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
+static void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
{
if(ieee->raw_tx){
@@ -2328,7 +2332,7 @@ void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
netif_carrier_on(ieee->dev);
}
}
-void ieee80211_start_ibss_wq(struct work_struct *work)
+static void ieee80211_start_ibss_wq(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
@@ -2501,7 +2505,7 @@ void ieee80211_disassociate(struct ieee80211_device *ieee)
notify_wx_assoc_event(ieee);
}
-void ieee80211_associate_retry_wq(struct work_struct *work)
+static void ieee80211_associate_retry_wq(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
@@ -2772,7 +2776,8 @@ static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value)
}
-void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie, int wpa_ie_len)
+static void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee,
+ char *wpa_ie, int wpa_ie_len)
{
/* make sure WPA is enabled */
ieee80211_wpa_enable(ieee, 1);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 157b2d7466e5..6779bdd561d7 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -378,7 +378,8 @@ FORCED_AGG_SETTING:
return;
}
-extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device *ieee, cb_desc *tcb_desc)
+static void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device *ieee,
+ cb_desc *tcb_desc)
{
tcb_desc->bUseShortPreamble = false;
if (tcb_desc->data_rate == 2)
@@ -391,7 +392,7 @@ extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device *ieee, cb_
}
return;
}
-extern void
+static void
ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index e1fe54acb4b8..bdf67ec5df21 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -39,7 +39,7 @@ struct modes_unit {
char *mode_string;
int mode_size;
};
-struct modes_unit ieee80211_modes[] = {
+static struct modes_unit ieee80211_modes[] = {
{"a",1},
{"b",1},
{"g",1},
diff --git a/drivers/staging/rtl8192u/ieee80211/internal.h b/drivers/staging/rtl8192u/ieee80211/internal.h
deleted file mode 100644
index 6f54cfe8a467..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/internal.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#ifndef _CRYPTO_INTERNAL_H
-#define _CRYPTO_INTERNAL_H
-
-
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/init.h>
-#include <asm/hardirq.h>
-#include <asm/softirq.h>
-#include <asm/kmap_types.h>
-
-
-static inline void crypto_yield(struct crypto_tfm *tfm)
-{
- if (!in_softirq())
- cond_resched();
-}
-
-static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
-{
- return (void *)&tfm[1];
-}
-
-struct crypto_alg *crypto_alg_lookup(const char *name);
-
-#ifdef CONFIG_KMOD
-void crypto_alg_autoload(const char *name);
-struct crypto_alg *crypto_alg_mod_lookup(const char *name);
-#else
-static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name)
-{
- return crypto_alg_lookup(name);
-}
-#endif
-
-#ifdef CONFIG_CRYPTO_HMAC
-int crypto_alloc_hmac_block(struct crypto_tfm *tfm);
-void crypto_free_hmac_block(struct crypto_tfm *tfm);
-#else
-static inline int crypto_alloc_hmac_block(struct crypto_tfm *tfm)
-{
- return 0;
-}
-
-static inline void crypto_free_hmac_block(struct crypto_tfm *tfm)
-{ }
-#endif
-
-#ifdef CONFIG_PROC_FS
-void __init crypto_init_proc(void);
-#else
-static inline void crypto_init_proc(void)
-{ }
-#endif
-
-int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags);
-int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags);
-int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags);
-
-int crypto_init_digest_ops(struct crypto_tfm *tfm);
-int crypto_init_cipher_ops(struct crypto_tfm *tfm);
-int crypto_init_compress_ops(struct crypto_tfm *tfm);
-
-void crypto_exit_digest_ops(struct crypto_tfm *tfm);
-void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
-void crypto_exit_compress_ops(struct crypto_tfm *tfm);
-
-#endif /* _CRYPTO_INTERNAL_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/michael_mic.c b/drivers/staging/rtl8192u/ieee80211/michael_mic.c
deleted file mode 100644
index df256e487c20..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/michael_mic.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Cryptographic API
- *
- * Michael MIC (IEEE 802.11i/TKIP) keyed digest
- *
- * Copyright (c) 2004 Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/string.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-
-
-struct michael_mic_ctx {
- u8 pending[4];
- size_t pending_len;
-
- u32 l, r;
-};
-
-
-static inline u32 rotl(u32 val, int bits)
-{
- return (val << bits) | (val >> (32 - bits));
-}
-
-
-static inline u32 rotr(u32 val, int bits)
-{
- return (val >> bits) | (val << (32 - bits));
-}
-
-
-static inline u32 xswap(u32 val)
-{
- return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
-}
-
-
-#define michael_block(l, r) \
-do { \
- r ^= rotl(l, 17); \
- l += r; \
- r ^= xswap(l); \
- l += r; \
- r ^= rotl(l, 3); \
- l += r; \
- r ^= rotr(l, 2); \
- l += r; \
-} while (0)
-
-
-static inline u32 get_le32(const u8 *p)
-{
- return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
-}
-
-
-static inline void put_le32(u8 *p, u32 v)
-{
- p[0] = v;
- p[1] = v >> 8;
- p[2] = v >> 16;
- p[3] = v >> 24;
-}
-
-
-static void michael_init(void *ctx)
-{
- struct michael_mic_ctx *mctx = ctx;
- mctx->pending_len = 0;
-}
-
-
-static void michael_update(void *ctx, const u8 *data, unsigned int len)
-{
- struct michael_mic_ctx *mctx = ctx;
-
- if (mctx->pending_len) {
- int flen = 4 - mctx->pending_len;
- if (flen > len)
- flen = len;
- memcpy(&mctx->pending[mctx->pending_len], data, flen);
- mctx->pending_len += flen;
- data += flen;
- len -= flen;
-
- if (mctx->pending_len < 4)
- return;
-
- mctx->l ^= get_le32(mctx->pending);
- michael_block(mctx->l, mctx->r);
- mctx->pending_len = 0;
- }
-
- while (len >= 4) {
- mctx->l ^= get_le32(data);
- michael_block(mctx->l, mctx->r);
- data += 4;
- len -= 4;
- }
-
- if (len > 0) {
- mctx->pending_len = len;
- memcpy(mctx->pending, data, len);
- }
-}
-
-
-static void michael_final(void *ctx, u8 *out)
-{
- struct michael_mic_ctx *mctx = ctx;
- u8 *data = mctx->pending;
-
- /* Last block and padding (0x5a, 4..7 x 0) */
- switch (mctx->pending_len) {
- case 0:
- mctx->l ^= 0x5a;
- break;
- case 1:
- mctx->l ^= data[0] | 0x5a00;
- break;
- case 2:
- mctx->l ^= data[0] | (data[1] << 8) | 0x5a0000;
- break;
- case 3:
- mctx->l ^= data[0] | (data[1] << 8) | (data[2] << 16) |
- 0x5a000000;
- break;
- }
- michael_block(mctx->l, mctx->r);
- /* l ^= 0; */
- michael_block(mctx->l, mctx->r);
-
- put_le32(out, mctx->l);
- put_le32(out + 4, mctx->r);
-}
-
-
-static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen,
- u32 *flags)
-{
- struct michael_mic_ctx *mctx = ctx;
- if (keylen != 8) {
- if (flags)
- *flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
- mctx->l = get_le32(key);
- mctx->r = get_le32(key + 4);
- return 0;
-}
-
-
-static struct crypto_alg michael_mic_alg = {
- .cra_name = "michael_mic",
- .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
- .cra_blocksize = 8,
- .cra_ctxsize = sizeof(struct michael_mic_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(michael_mic_alg.cra_list),
- .cra_u = { .digest = {
- .dia_digestsize = 8,
- .dia_init = michael_init,
- .dia_update = michael_update,
- .dia_final = michael_final,
- .dia_setkey = michael_setkey } }
-};
-
-
-static int __init michael_mic_init(void)
-{
- return crypto_register_alg(&michael_mic_alg);
-}
-
-
-static void __exit michael_mic_exit(void)
-{
- crypto_unregister_alg(&michael_mic_alg);
-}
-
-
-module_init(michael_mic_init);
-module_exit(michael_mic_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Michael MIC");
-MODULE_AUTHOR("Jouni Malinen <jkmaline@cc.hut.fi>");
diff --git a/drivers/staging/rtl8192u/ieee80211/proc.c b/drivers/staging/rtl8192u/ieee80211/proc.c
deleted file mode 100644
index c426dfdd9fdd..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/proc.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Scatterlist Cryptographic API.
- *
- * Procfs information.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include <linux/init.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/rwsem.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include "internal.h"
-
-extern struct list_head crypto_alg_list;
-extern struct rw_semaphore crypto_alg_sem;
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
- struct list_head *v;
- loff_t n = *pos;
-
- down_read(&crypto_alg_sem);
- list_for_each(v, &crypto_alg_list)
- if (!n--)
- return list_entry(v, struct crypto_alg, cra_list);
- return NULL;
-}
-
-static void *c_next(struct seq_file *m, void *p, loff_t *pos)
-{
- struct list_head *v = p;
-
- (*pos)++;
- v = v->next;
- return (v == &crypto_alg_list) ?
- NULL : list_entry(v, struct crypto_alg, cra_list);
-}
-
-static void c_stop(struct seq_file *m, void *p)
-{
- up_read(&crypto_alg_sem);
-}
-
-static int c_show(struct seq_file *m, void *p)
-{
- struct crypto_alg *alg = (struct crypto_alg *)p;
-
- seq_printf(m, "name : %s\n", alg->cra_name);
- seq_printf(m, "module : %s\n",
- (alg->cra_module ?
- alg->cra_module->name :
- "kernel"));
-
- switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_CIPHER:
- seq_printf(m, "type : cipher\n");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "min keysize : %u\n",
- alg->cra_cipher.cia_min_keysize);
- seq_printf(m, "max keysize : %u\n",
- alg->cra_cipher.cia_max_keysize);
- break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- seq_printf(m, "type : digest\n");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "digestsize : %u\n",
- alg->cra_digest.dia_digestsize);
- break;
- case CRYPTO_ALG_TYPE_COMPRESS:
- seq_printf(m, "type : compression\n");
- break;
- default:
- seq_printf(m, "type : unknown\n");
- break;
- }
-
- seq_putc(m, '\n');
- return 0;
-}
-
-static struct seq_operations crypto_seq_ops = {
- .start = c_start,
- .next = c_next,
- .stop = c_stop,
- .show = c_show
-};
-
-static int crypto_info_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &crypto_seq_ops);
-}
-
-static const struct file_operations proc_crypto_ops = {
- .open = crypto_info_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
-
-void __init crypto_init_proc(void)
-{
- proc_create("crypto", 0, NULL, &proc_crypto_ops);
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 3684da340bd4..7bf55e393554 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -13,7 +13,7 @@
* u16 Time //indicate time delay.
* output: none
********************************************************************************************************************/
-void ActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA, u16 Time)
+static void ActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA, u16 Time)
{
pBA->bValid = true;
if(Time != 0)
@@ -25,7 +25,7 @@ void ActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA, u16 Time)
* input: PBA_RECORD pBA //BA entry to be disabled
* output: none
********************************************************************************************************************/
-void DeActivateBAEntry( struct ieee80211_device *ieee, PBA_RECORD pBA)
+static void DeActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA)
{
pBA->bValid = false;
del_timer_sync(&pBA->Timer);
@@ -37,7 +37,7 @@ void DeActivateBAEntry( struct ieee80211_device *ieee, PBA_RECORD pBA)
* output: none
* notice: As PTX_TS_RECORD structure will be defined in QOS, so wait to be merged. //FIXME
********************************************************************************************************************/
-u8 TxTsDeleteBA( struct ieee80211_device *ieee, PTX_TS_RECORD pTxTs)
+static u8 TxTsDeleteBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTs)
{
PBA_RECORD pAdmittedBa = &pTxTs->TxAdmittedBARecord; //These two BA entries must exist in TS structure
PBA_RECORD pPendingBa = &pTxTs->TxPendingBARecord;
@@ -67,7 +67,7 @@ u8 TxTsDeleteBA( struct ieee80211_device *ieee, PTX_TS_RECORD pTxTs)
* output: none
* notice: As PRX_TS_RECORD structure will be defined in QOS, so wait to be merged. //FIXME, same with above
********************************************************************************************************************/
-u8 RxTsDeleteBA( struct ieee80211_device *ieee, PRX_TS_RECORD pRxTs)
+static u8 RxTsDeleteBA(struct ieee80211_device *ieee, PRX_TS_RECORD pRxTs)
{
PBA_RECORD pBa = &pRxTs->RxAdmittedBARecord;
u8 bSendDELBA = false;
@@ -307,7 +307,9 @@ static void ieee80211_send_ADDBARsp(struct ieee80211_device *ieee, u8 *dst,
* notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
********************************************************************************************************************/
-void ieee80211_send_DELBA(struct ieee80211_device *ieee, u8 *dst, PBA_RECORD pBA, TR_SELECT TxRxSelect, u16 ReasonCode)
+static void ieee80211_send_DELBA(struct ieee80211_device *ieee, u8 *dst,
+ PBA_RECORD pBA, TR_SELECT TxRxSelect,
+ u16 ReasonCode)
{
struct sk_buff *skb = NULL;
skb = ieee80211_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode); //construct ACT_ADDBARSP frames
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index e956da5a2d76..53ec2d435ffe 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -219,7 +219,7 @@ void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString)
/*
* Return: true if station in half n mode and AP supports 40 bw
*/
-bool IsHTHalfNmode40Bandwidth(struct ieee80211_device *ieee)
+static bool IsHTHalfNmode40Bandwidth(struct ieee80211_device *ieee)
{
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -238,7 +238,7 @@ bool IsHTHalfNmode40Bandwidth(struct ieee80211_device *ieee)
return retValue;
}
-bool IsHTHalfNmodeSGI(struct ieee80211_device *ieee, bool is40MHz)
+static bool IsHTHalfNmodeSGI(struct ieee80211_device *ieee, bool is40MHz)
{
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -376,7 +376,7 @@ bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee)
* return:
* notice:
* *****************************************************************************************************************/
-void HTIOTPeerDetermine(struct ieee80211_device *ieee)
+static void HTIOTPeerDetermine(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
struct ieee80211_network *net = &ieee->current_network;
@@ -413,7 +413,7 @@ void HTIOTPeerDetermine(struct ieee80211_device *ieee)
* output: none
* return: return 1 if driver should declare MCS13 only(otherwise return 0)
* *****************************************************************************************************************/
-u8 HTIOTActIsDisableMCS14(struct ieee80211_device *ieee, u8 *PeerMacAddr)
+static u8 HTIOTActIsDisableMCS14(struct ieee80211_device *ieee, u8 *PeerMacAddr)
{
u8 ret = 0;
return ret;
@@ -432,7 +432,7 @@ u8 HTIOTActIsDisableMCS14(struct ieee80211_device *ieee, u8 *PeerMacAddr)
* Return: true if driver should disable MCS15
* 2008.04.15 Emily
*/
-bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)
+static bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)
{
bool retValue = false;
@@ -469,7 +469,8 @@ bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)
* Return: true if driver should disable all two spatial stream packet
* 2008.04.21 Emily
*/
-bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device *ieee, u8 *PeerMacAddr)
+static bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device *ieee,
+ u8 *PeerMacAddr)
{
bool retValue = false;
@@ -486,7 +487,8 @@ bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device *ieee, u8 *Pee
* output: none
* return: return 1 if driver should disable EDCA turbo mode(otherwise return 0)
* *****************************************************************************************************************/
-u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device *ieee, u8 *PeerMacAddr)
+static u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device *ieee,
+ u8 *PeerMacAddr)
{
u8 retValue = false; // default enable EDCA Turbo mode.
// Set specific EDCA parameter for different AP in DM handler.
@@ -500,7 +502,7 @@ u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device *ieee, u8 *PeerMacAddr)
* output: none
* return: return 1 if true
* *****************************************************************************************************************/
-u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
+static u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
{
u8 retValue = 0;
@@ -515,7 +517,7 @@ u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
return retValue;
}
-u8 HTIOTActIsCCDFsync(u8 *PeerMacAddr)
+static u8 HTIOTActIsCCDFsync(u8 *PeerMacAddr)
{
u8 retValue = 0;
if( (memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3)==0) ||
@@ -792,7 +794,7 @@ void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg,
* return: always we return true
* notice:
* *****************************************************************************************************************/
-u8 HT_PickMCSRate(struct ieee80211_device *ieee, u8 *pOperateMCS)
+static u8 HT_PickMCSRate(struct ieee80211_device *ieee, u8 *pOperateMCS)
{
u8 i;
if (pOperateMCS == NULL)
@@ -907,7 +909,8 @@ u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSF
**
** \pHTSupportedCap: the connected STA's supported rate Capability element
*/
-u8 HTFilterMCSRate( struct ieee80211_device *ieee, u8 *pSupportMCS, u8 *pOperateMCS)
+static u8 HTFilterMCSRate(struct ieee80211_device *ieee, u8 *pSupportMCS,
+ u8 *pOperateMCS)
{
u8 i=0;
@@ -1317,51 +1320,6 @@ void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee, struct ieee80211_
}
}
-void HTUseDefaultSetting(struct ieee80211_device *ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-// u8 regBwOpMode;
-
- if(pHTInfo->bEnableHT)
- {
- pHTInfo->bCurrentHTSupport = true;
-
- pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK;
-
- pHTInfo->bCurBW40MHz = pHTInfo->bRegBW40MHz;
-
- pHTInfo->bCurShortGI20MHz= pHTInfo->bRegShortGI20MHz;
-
- pHTInfo->bCurShortGI40MHz= pHTInfo->bRegShortGI40MHz;
-
- pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
-
- pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
-
- pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
-
- pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
-
- pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
-
- // Set BWOpMode register
-
- //update RATR index0
- HTFilterMCSRate(ieee, ieee->Regdot11HTOperationalRateSet, ieee->dot11HTOperationalRateSet);
- //function below is not implemented at all. WB
-#ifdef TODO
- Adapter->HalFunc.InitHalRATRTableHandler( Adapter, &pMgntInfo->dot11OperationalRateSet, pMgntInfo->dot11HTOperationalRateSet);
-#endif
- ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, MCS_FILTER_ALL);
- ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
-
- }
- else
- {
- pHTInfo->bCurrentHTSupport = false;
- }
- return;
-}
/********************************************************************************************************************
*function: check whether HT control field exists
* input: struct ieee80211_device *ieee
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index 3058120a3243..426f223e6a21 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -3,13 +3,13 @@
#include <linux/slab.h>
#include "rtl819x_TS.h"
-void TsSetupTimeOut(unsigned long data)
+static void TsSetupTimeOut(unsigned long data)
{
// Not implement yet
// This is used for WMMSA and ACM , that would send ADDTSReq frame.
}
-void TsInactTimeout(unsigned long data)
+static void TsInactTimeout(unsigned long data)
{
// Not implement yet
// This is used for WMMSA and ACM.
@@ -22,7 +22,7 @@ void TsInactTimeout(unsigned long data)
* return: NULL
* notice:
********************************************************************************************************************/
-void RxPktPendingTimeout(unsigned long data)
+static void RxPktPendingTimeout(unsigned long data)
{
PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)data;
struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
@@ -99,7 +99,7 @@ void RxPktPendingTimeout(unsigned long data)
* return: NULL
* notice:
********************************************************************************************************************/
-void TsAddBaProcess(unsigned long data)
+static void TsAddBaProcess(unsigned long data)
{
PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)data;
u8 num = pTxTs->num;
@@ -110,7 +110,7 @@ void TsAddBaProcess(unsigned long data)
}
-void ResetTsCommonInfo(PTS_COMMON_INFO pTsCommonInfo)
+static void ResetTsCommonInfo(PTS_COMMON_INFO pTsCommonInfo)
{
memset(pTsCommonInfo->Addr, 0, 6);
memset(&pTsCommonInfo->TSpec, 0, sizeof(TSPEC_BODY));
@@ -119,7 +119,7 @@ void ResetTsCommonInfo(PTS_COMMON_INFO pTsCommonInfo)
pTsCommonInfo->TClasNum = 0;
}
-void ResetTxTsEntry(PTX_TS_RECORD pTS)
+static void ResetTxTsEntry(PTX_TS_RECORD pTS)
{
ResetTsCommonInfo(&pTS->TsCommonInfo);
pTS->TxCurSeq = 0;
@@ -130,7 +130,7 @@ void ResetTxTsEntry(PTX_TS_RECORD pTS)
ResetBaEntry(&pTS->TxPendingBARecord);
}
-void ResetRxTsEntry(PRX_TS_RECORD pTS)
+static void ResetRxTsEntry(PRX_TS_RECORD pTS)
{
ResetTsCommonInfo(&pTS->TsCommonInfo);
pTS->RxIndicateSeq = 0xffff; // This indicate the RxIndicateSeq is not used now!!
@@ -224,7 +224,8 @@ void TSInitialize(struct ieee80211_device *ieee)
}
-void AdmitTS(struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, u32 InactTime)
+static void AdmitTS(struct ieee80211_device *ieee,
+ PTS_COMMON_INFO pTsCommonInfo, u32 InactTime)
{
del_timer_sync(&pTsCommonInfo->SetupTimer);
del_timer_sync(&pTsCommonInfo->InactTimer);
@@ -234,7 +235,9 @@ void AdmitTS(struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, u32 I
}
-PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8 *Addr, u8 TID, TR_SELECT TxRxSelect)
+static PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee,
+ u8 *Addr, u8 TID,
+ TR_SELECT TxRxSelect)
{
//DIRECTION_VALUE dir;
u8 dir;
@@ -309,14 +312,9 @@ PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8 *Addr, u8
return NULL;
}
-void MakeTSEntry(
- PTS_COMMON_INFO pTsCommonInfo,
- u8 *Addr,
- PTSPEC_BODY pTSPEC,
- PQOS_TCLAS pTCLAS,
- u8 TCLAS_Num,
- u8 TCLAS_Proc
- )
+static void MakeTSEntry(PTS_COMMON_INFO pTsCommonInfo, u8 *Addr,
+ PTSPEC_BODY pTSPEC, PQOS_TCLAS pTCLAS, u8 TCLAS_Num,
+ u8 TCLAS_Proc)
{
u8 count;
@@ -472,11 +470,8 @@ bool GetTs(
}
}
-void RemoveTsEntry(
- struct ieee80211_device *ieee,
- PTS_COMMON_INFO pTs,
- TR_SELECT TxRxSelect
- )
+static void RemoveTsEntry(struct ieee80211_device *ieee, PTS_COMMON_INFO pTs,
+ TR_SELECT TxRxSelect)
{
//u32 flags = 0;
unsigned long flags = 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl_crypto.h b/drivers/staging/rtl8192u/ieee80211/rtl_crypto.h
deleted file mode 100644
index c3c87108ee9e..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/rtl_crypto.h
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * Scatterlist Cryptographic API.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * Copyright (c) 2002 David S. Miller (davem@redhat.com)
- *
- * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
- * and Nettle, by Niels Mé°ˆler.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#ifndef _LINUX_CRYPTO_H
-#define _LINUX_CRYPTO_H
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/string.h>
-#include <asm/page.h>
-#include <asm/errno.h>
-
-#define crypto_register_alg crypto_register_alg_rsl
-#define crypto_unregister_alg crypto_unregister_alg_rsl
-#define crypto_alloc_tfm crypto_alloc_tfm_rsl
-#define crypto_free_tfm crypto_free_tfm_rsl
-#define crypto_alg_available crypto_alg_available_rsl
-
-/*
- * Algorithm masks and types.
- */
-#define CRYPTO_ALG_TYPE_MASK 0x000000ff
-#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
-#define CRYPTO_ALG_TYPE_DIGEST 0x00000002
-#define CRYPTO_ALG_TYPE_COMPRESS 0x00000004
-
-/*
- * Transform masks and values (for crt_flags).
- */
-#define CRYPTO_TFM_MODE_MASK 0x000000ff
-#define CRYPTO_TFM_REQ_MASK 0x000fff00
-#define CRYPTO_TFM_RES_MASK 0xfff00000
-
-#define CRYPTO_TFM_MODE_ECB 0x00000001
-#define CRYPTO_TFM_MODE_CBC 0x00000002
-#define CRYPTO_TFM_MODE_CFB 0x00000004
-#define CRYPTO_TFM_MODE_CTR 0x00000008
-
-#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
-#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
-#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
-#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
-#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
-#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
-
-/*
- * Miscellaneous stuff.
- */
-#define CRYPTO_UNSPEC 0
-#define CRYPTO_MAX_ALG_NAME 64
-
-struct scatterlist;
-
-/*
- * Algorithms: modular crypto algorithm implementations, managed
- * via crypto_register_alg() and crypto_unregister_alg().
- */
-struct cipher_alg {
- unsigned int cia_min_keysize;
- unsigned int cia_max_keysize;
- int (*cia_setkey)(void *ctx, const u8 *key,
- unsigned int keylen, u32 *flags);
- void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src);
- void (*cia_decrypt)(void *ctx, u8 *dst, const u8 *src);
-};
-
-struct digest_alg {
- unsigned int dia_digestsize;
- void (*dia_init)(void *ctx);
- void (*dia_update)(void *ctx, const u8 *data, unsigned int len);
- void (*dia_final)(void *ctx, u8 *out);
- int (*dia_setkey)(void *ctx, const u8 *key,
- unsigned int keylen, u32 *flags);
-};
-
-struct compress_alg {
- int (*coa_init)(void *ctx);
- void (*coa_exit)(void *ctx);
- int (*coa_compress)(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*coa_decompress)(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-};
-
-#define cra_cipher cra_u.cipher
-#define cra_digest cra_u.digest
-#define cra_compress cra_u.compress
-
-struct crypto_alg {
- struct list_head cra_list;
- u32 cra_flags;
- unsigned int cra_blocksize;
- unsigned int cra_ctxsize;
- const char cra_name[CRYPTO_MAX_ALG_NAME];
-
- union {
- struct cipher_alg cipher;
- struct digest_alg digest;
- struct compress_alg compress;
- } cra_u;
-
- struct module *cra_module;
-};
-
-/*
- * Algorithm registration interface.
- */
-int crypto_register_alg(struct crypto_alg *alg);
-int crypto_unregister_alg(struct crypto_alg *alg);
-
-/*
- * Algorithm query interface.
- */
-int crypto_alg_available(const char *name, u32 flags);
-
-/*
- * Transforms: user-instantiated objects which encapsulate algorithms
- * and core processing logic. Managed via crypto_alloc_tfm() and
- * crypto_free_tfm(), as well as the various helpers below.
- */
-struct crypto_tfm;
-
-struct cipher_tfm {
- void *cit_iv;
- unsigned int cit_ivsize;
- u32 cit_mode;
- int (*cit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
- int (*cit_encrypt)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes);
- int (*cit_encrypt_iv)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv);
- int (*cit_decrypt)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes);
- int (*cit_decrypt_iv)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv);
- void (*cit_xor_block)(u8 *dst, const u8 *src);
-};
-
-struct digest_tfm {
- void (*dit_init)(struct crypto_tfm *tfm);
- void (*dit_update)(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg);
- void (*dit_final)(struct crypto_tfm *tfm, u8 *out);
- void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg,
- unsigned int nsg, u8 *out);
- int (*dit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
-#ifdef CONFIG_CRYPTO_HMAC
- void *dit_hmac_block;
-#endif
-};
-
-struct compress_tfm {
- int (*cot_compress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*cot_decompress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-};
-
-#define crt_cipher crt_u.cipher
-#define crt_digest crt_u.digest
-#define crt_compress crt_u.compress
-
-struct crypto_tfm {
-
- u32 crt_flags;
-
- union {
- struct cipher_tfm cipher;
- struct digest_tfm digest;
- struct compress_tfm compress;
- } crt_u;
-
- struct crypto_alg *__crt_alg;
-};
-
-/*
- * Transform user interface.
- */
-
-/*
- * crypto_alloc_tfm() will first attempt to locate an already loaded algorithm.
- * If that fails and the kernel supports dynamically loadable modules, it
- * will then attempt to load a module of the same name or alias. A refcount
- * is grabbed on the algorithm which is then associated with the new transform.
- *
- * crypto_free_tfm() frees up the transform and any associated resources,
- * then drops the refcount on the associated algorithm.
- */
-struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
-void crypto_free_tfm(struct crypto_tfm *tfm);
-
-/*
- * Transform helpers which query the underlying algorithm.
- */
-static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_name;
-}
-
-static inline const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
-
- if (alg->cra_module)
- return alg->cra_module->name;
- else
- return NULL;
-}
-
-static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
-}
-
-static inline unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->__crt_alg->cra_cipher.cia_min_keysize;
-}
-
-static inline unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->__crt_alg->cra_cipher.cia_max_keysize;
-}
-
-static inline unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_ivsize;
-}
-
-static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_blocksize;
-}
-
-static inline unsigned int crypto_tfm_alg_digestsize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- return tfm->__crt_alg->cra_digest.dia_digestsize;
-}
-
-/*
- * API wrappers.
- */
-static inline void crypto_digest_init(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- tfm->crt_digest.dit_init(tfm);
-}
-
-static inline void crypto_digest_update(struct crypto_tfm *tfm,
- struct scatterlist *sg,
- unsigned int nsg)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- tfm->crt_digest.dit_update(tfm, sg, nsg);
-}
-
-static inline void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- tfm->crt_digest.dit_final(tfm, out);
-}
-
-static inline void crypto_digest_digest(struct crypto_tfm *tfm,
- struct scatterlist *sg,
- unsigned int nsg, u8 *out)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- tfm->crt_digest.dit_digest(tfm, sg, nsg, out);
-}
-
-static inline int crypto_digest_setkey(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- if (tfm->crt_digest.dit_setkey == NULL)
- return -ENOSYS;
- return tfm->crt_digest.dit_setkey(tfm, key, keylen);
-}
-
-static inline int crypto_cipher_setkey(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_setkey(tfm, key, keylen);
-}
-
-static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
-}
-
-static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
- return tfm->crt_cipher.cit_encrypt_iv(tfm, dst, src, nbytes, iv);
-}
-
-static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
-}
-
-static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
- return tfm->crt_cipher.cit_decrypt_iv(tfm, dst, src, nbytes, iv);
-}
-
-static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
- const u8 *src, unsigned int len)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- memcpy(tfm->crt_cipher.cit_iv, src, len);
-}
-
-static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm,
- u8 *dst, unsigned int len)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- memcpy(dst, tfm->crt_cipher.cit_iv, len);
-}
-
-static inline int crypto_comp_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
- return tfm->crt_compress.cot_compress(tfm, src, slen, dst, dlen);
-}
-
-static inline int crypto_comp_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
- return tfm->crt_compress.cot_decompress(tfm, src, slen, dst, dlen);
-}
-
-/*
- * HMAC support.
- */
-#ifdef CONFIG_CRYPTO_HMAC
-void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen);
-void crypto_hmac_update(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg);
-void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
- unsigned int *keylen, u8 *out);
-void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen,
- struct scatterlist *sg, unsigned int nsg, u8 *out);
-#endif /* CONFIG_CRYPTO_HMAC */
-
-#endif /* _LINUX_CRYPTO_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/scatterwalk.c b/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
deleted file mode 100644
index 8b73f6cefcf9..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Cipher operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * 2002 Adam J. Richter <adam@yggdrasil.com>
- * 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/highmem.h>
-#include <asm/scatterlist.h>
-#include "internal.h"
-#include "scatterwalk.h"
-
-void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
-{
- if (nbytes <= walk->len_this_page &&
- (((unsigned long)walk->data) & (PAGE_CACHE_SIZE - 1)) + nbytes <=
- PAGE_CACHE_SIZE)
- return walk->data;
- else
- return scratch;
-}
-
-static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
-{
- if (out)
- memcpy(sgdata, buf, nbytes);
- else
- memcpy(buf, sgdata, nbytes);
-}
-
-void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
-{
- unsigned int rest_of_page;
-
- walk->sg = sg;
-
- walk->page = sg->page;
- walk->len_this_segment = sg->length;
-
- rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1));
- walk->len_this_page = min(sg->length, rest_of_page);
- walk->offset = sg->offset;
-}
-
-void scatterwalk_map(struct scatter_walk *walk)
-{
- walk->data = kmap_atomic(walk->page) + walk->offset;
-}
-
-static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
- unsigned int more)
-{
- /* walk->data may be pointing the first byte of the next page;
- however, we know we transferred at least one byte. So,
- walk->data - 1 will be a virtual address in the mapped page. */
-
- if (out)
- flush_dcache_page(walk->page);
-
- if (more) {
- walk->len_this_segment -= walk->len_this_page;
-
- if (walk->len_this_segment) {
- walk->page++;
- walk->len_this_page = min(walk->len_this_segment,
- (unsigned)PAGE_CACHE_SIZE);
- walk->offset = 0;
- }
- else
- scatterwalk_start(walk, sg_next(walk->sg));
- }
-}
-
-void scatterwalk_done(struct scatter_walk *walk, int out, int more)
-{
- crypto_kunmap(walk->data, out);
- if (walk->len_this_page == 0 || !more)
- scatterwalk_pagedone(walk, out, more);
-}
-
-/*
- * Do not call this unless the total length of all of the fragments
- * has been verified as multiple of the block size.
- */
-int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
- size_t nbytes)
-{
- if (buf != walk->data) {
- while (nbytes > walk->len_this_page) {
- memcpy_dir(buf, walk->data, walk->len_this_page, out);
- buf += walk->len_this_page;
- nbytes -= walk->len_this_page;
-
- kunmap_atomic(walk->data);
- scatterwalk_pagedone(walk, out, 1);
- scatterwalk_map(walk);
- }
-
- memcpy_dir(buf, walk->data, nbytes, out);
- }
-
- walk->offset += nbytes;
- walk->len_this_page -= nbytes;
- walk->len_this_segment -= nbytes;
- return 0;
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/scatterwalk.h b/drivers/staging/rtl8192u/ieee80211/scatterwalk.h
deleted file mode 100644
index b16446519017..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/scatterwalk.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com>
- * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#ifndef _CRYPTO_SCATTERWALK_H
-#define _CRYPTO_SCATTERWALK_H
-#include <linux/mm.h>
-#include <asm/scatterlist.h>
-
-struct scatter_walk {
- struct scatterlist *sg;
- struct page *page;
- void *data;
- unsigned int len_this_page;
- unsigned int len_this_segment;
- unsigned int offset;
-};
-
-/* Define sg_next is an inline routine now in case we want to change
- scatterlist to a linked list later. */
-static inline struct scatterlist *sg_next(struct scatterlist *sg)
-{
- return sg + 1;
-}
-
-static inline int scatterwalk_samebuf(struct scatter_walk *walk_in,
- struct scatter_walk *walk_out,
- void *src_p, void *dst_p)
-{
- return walk_in->page == walk_out->page &&
- walk_in->offset == walk_out->offset &&
- walk_in->data == src_p && walk_out->data == dst_p;
-}
-
-void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch);
-void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
-int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out);
-void scatterwalk_map(struct scatter_walk *walk, int out);
-void scatterwalk_done(struct scatter_walk *walk, int out, int more);
-
-#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.c b/drivers/staging/rtl8192u/r8180_93cx6.c
index c61729b727ef..cd06054fbedb 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.c
+++ b/drivers/staging/rtl8192u/r8180_93cx6.c
@@ -20,7 +20,7 @@
#include "r8180_93cx6.h"
-void eprom_cs(struct net_device *dev, short bit)
+static void eprom_cs(struct net_device *dev, short bit)
{
u8 cmdreg;
@@ -37,7 +37,7 @@ void eprom_cs(struct net_device *dev, short bit)
}
-void eprom_ck_cycle(struct net_device *dev)
+static void eprom_ck_cycle(struct net_device *dev)
{
u8 cmdreg;
@@ -53,7 +53,7 @@ void eprom_ck_cycle(struct net_device *dev)
}
-void eprom_w(struct net_device *dev,short bit)
+static void eprom_w(struct net_device *dev,short bit)
{
u8 cmdreg;
@@ -68,7 +68,7 @@ void eprom_w(struct net_device *dev,short bit)
}
-short eprom_r(struct net_device *dev)
+static short eprom_r(struct net_device *dev)
{
u8 bit;
@@ -82,7 +82,7 @@ short eprom_r(struct net_device *dev)
}
-void eprom_send_bits_string(struct net_device *dev, short b[], int len)
+static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
{
int i;
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index c2bcbe230ed3..1bb6143cdf0f 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -238,7 +238,7 @@ static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv *priv)
-void CamResetAllEntry(struct net_device *dev)
+static void CamResetAllEntry(struct net_device *dev)
{
u32 ulcommand = 0;
//2004/02/11 In static WEP, OID_ADD_KEY or OID_ADD_WEP are set before STA associate to AP.
@@ -591,12 +591,6 @@ static void rtl8192_proc_module_init(void)
rtl8192_proc = proc_mkdir(RTL819xU_MODULE_NAME, init_net.proc_net);
}
-
-void rtl8192_proc_module_remove(void)
-{
- remove_proc_entry(RTL819xU_MODULE_NAME, init_net.proc_net);
-}
-
/*
* seq_file wrappers for procfile show routines.
*/
@@ -673,7 +667,7 @@ short check_nic_enough_desc(struct net_device *dev, int queue_index)
return (used < MAX_TX_URB);
}
-void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -689,24 +683,6 @@ void dump_eprom(struct net_device *dev)
RT_TRACE(COMP_EPROM, "EEPROM addr %x : %x", i, eprom_read(dev, i));
}
-
-/****************************************************************************
- ------------------------------HW STUFF---------------------------
-*****************************************************************************/
-
-
-void rtl8192_set_mode(struct net_device *dev, int mode)
-{
- u8 ecmd;
- read_nic_byte(dev, EPROM_CMD, &ecmd);
- ecmd = ecmd & ~EPROM_CMD_OPERATING_MODE_MASK;
- ecmd = ecmd | (mode<<EPROM_CMD_OPERATING_MODE_SHIFT);
- ecmd = ecmd & ~EPROM_CS_BIT;
- ecmd = ecmd & ~EPROM_CK_BIT;
- write_nic_byte(dev, EPROM_CMD, ecmd);
-}
-
-
void rtl8192_update_msr(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -903,12 +879,6 @@ void rtl8192_rtx_disable(struct net_device *dev)
return;
}
-
-int alloc_tx_beacon_desc_ring(struct net_device *dev, int count)
-{
- return 0;
-}
-
inline u16 ieeerate2rtlrate(int rate)
{
switch (rate) {
@@ -1011,13 +981,13 @@ static u32 rtl819xusb_rx_command_packet(struct net_device *dev,
}
-void rtl8192_data_hard_stop(struct net_device *dev)
+static void rtl8192_data_hard_stop(struct net_device *dev)
{
//FIXME !!
}
-void rtl8192_data_hard_resume(struct net_device *dev)
+static void rtl8192_data_hard_resume(struct net_device *dev)
{
// FIXME !!
}
@@ -1025,7 +995,7 @@ void rtl8192_data_hard_resume(struct net_device *dev)
/* this function TX data frames when the ieee80211 stack requires this.
* It checks also if we need to stop the ieee tx queue, eventually do it
*/
-void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int rate)
+static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int rate)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
int ret;
@@ -1053,7 +1023,7 @@ void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int rat
* If the ring is full packet are dropped (for data frame the queue
* is stopped before this can happen).
*/
-int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
int ret;
@@ -1318,7 +1288,8 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
/* Don't send data frame during scanning.*/
if ((skb_queue_len(&priv->ieee80211->skb_waitQ[queue_index]) != 0) &&
(!(priv->ieee80211->queue_stop))) {
- if (NULL != (skb = skb_dequeue(&(priv->ieee80211->skb_waitQ[queue_index]))))
+ skb = skb_dequeue(&(priv->ieee80211->skb_waitQ[queue_index]));
+ if (skb)
priv->ieee80211->softmac_hard_start_xmit(skb, dev);
return; //modified by david to avoid further processing AMSDU
@@ -1358,25 +1329,7 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
}
-void rtl8192_beacon_stop(struct net_device *dev)
-{
- u8 msr, msrm, msr2;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- read_nic_byte(dev, MSR, &msr);
- msrm = msr & MSR_LINK_MASK;
- msr2 = msr & ~MSR_LINK_MASK;
-
- if (NIC_8192U == priv->card_8192)
- usb_kill_urb(priv->rx_urb[MAX_RX_URB]);
- if ((msrm == (MSR_LINK_ADHOC<<MSR_LINK_SHIFT) ||
- (msrm == (MSR_LINK_MASTER<<MSR_LINK_SHIFT)))) {
- write_nic_byte(dev, MSR, msr2 | MSR_LINK_NONE);
- write_nic_byte(dev, MSR, msr);
- }
-}
-
-void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
+static void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_network *net;
@@ -1423,7 +1376,7 @@ void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
-void rtl8192_update_cap(struct net_device *dev, u16 cap)
+static void rtl8192_update_cap(struct net_device *dev, u16 cap)
{
u32 tmp = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1445,7 +1398,7 @@ void rtl8192_update_cap(struct net_device *dev, u16 cap)
}
}
-void rtl8192_net_update(struct net_device *dev)
+static void rtl8192_net_update(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1560,11 +1513,6 @@ u16 N_DBPSOfRate(u16 DataRate)
return N_DBPS;
}
-void rtl819xU_cmd_isr(struct urb *tx_cmd_urb, struct pt_regs *regs)
-{
- usb_free_urb(tx_cmd_urb);
-}
-
unsigned int txqueue2outpipe(struct r8192_priv *priv, unsigned int tx_queue)
{
if (tx_queue >= 9) {
@@ -1673,7 +1621,7 @@ static u8 MapHwQueueToFirmwareQueue(u8 QueueID)
return QueueSelect;
}
-u8 MRateToHwRate8190Pci(u8 rate)
+static u8 MRateToHwRate8190Pci(u8 rate)
{
u8 ret = DESC90_RATE1M;
@@ -2039,7 +1987,7 @@ void rtl8192_usb_deleteendpoints(struct net_device *dev)
#endif
extern void rtl8192_update_ratr_table(struct net_device *dev);
-void rtl8192_link_change(struct net_device *dev)
+static void rtl8192_link_change(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
@@ -2071,7 +2019,7 @@ static struct ieee80211_qos_parameters def_qos_parameters = {
};
-void rtl8192_update_beacon(struct work_struct *work)
+static void rtl8192_update_beacon(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, update_beacon_wq.work);
struct net_device *dev = priv->ieee80211->dev;
@@ -2086,8 +2034,8 @@ void rtl8192_update_beacon(struct work_struct *work)
/*
* background support to run QoS activate functionality
*/
-int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI, EDCAPARA_VO};
-void rtl8192_qos_activate(struct work_struct *work)
+static int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI, EDCAPARA_VO};
+static void rtl8192_qos_activate(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, qos_activate);
struct net_device *dev = priv->ieee80211->dev;
@@ -2315,7 +2263,7 @@ static bool GetNmodeSupportBySecCfg8192(struct net_device *dev)
return true;
}
-bool GetHalfNmodeSupportByAPs819xUsb(struct net_device *dev)
+static bool GetHalfNmodeSupportByAPs819xUsb(struct net_device *dev)
{
bool Reval;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2329,7 +2277,7 @@ bool GetHalfNmodeSupportByAPs819xUsb(struct net_device *dev)
return Reval;
}
-void rtl8192_refresh_supportrate(struct r8192_priv *priv)
+static void rtl8192_refresh_supportrate(struct r8192_priv *priv)
{
struct ieee80211_device *ieee = priv->ieee80211;
//we do not consider set support rate for ABG mode, only HT MCS rate is set here.
@@ -2340,7 +2288,7 @@ void rtl8192_refresh_supportrate(struct r8192_priv *priv)
return;
}
-u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
+static u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 ret = 0;
@@ -2359,7 +2307,7 @@ u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
}
return ret;
}
-void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
+static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 bSupportMode = rtl8192_getSupportedWireleeMode(dev);
@@ -2779,7 +2727,7 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
return;
}
-short rtl8192_get_channel_map(struct net_device *dev)
+static short rtl8192_get_channel_map(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
if (priv->ChannelPlan > COUNTRY_CODE_GLOBAL_DOMAIN) {
@@ -2792,7 +2740,7 @@ short rtl8192_get_channel_map(struct net_device *dev)
return 0;
}
-short rtl8192_init(struct net_device *dev)
+static short rtl8192_init(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2840,7 +2788,7 @@ short rtl8192_init(struct net_device *dev)
* return: none
* notice: This part need to modified according to the rate set we filtered
* ****************************************************************************/
-void rtl8192_hwconfig(struct net_device *dev)
+static void rtl8192_hwconfig(struct net_device *dev)
{
u32 regRATR = 0, regRRSR = 0;
u8 regBwOpMode = 0, regTmp = 0;
@@ -2923,7 +2871,7 @@ void rtl8192_hwconfig(struct net_device *dev)
//InitializeAdapter and PhyCfg
-bool rtl8192_adapter_start(struct net_device *dev)
+static bool rtl8192_adapter_start(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 dwRegRead = 0;
@@ -3179,7 +3127,7 @@ static bool HalTxCheckStuck819xUsb(struct net_device *dev)
* <Assumption: RT_TX_SPINLOCK is acquired.>
* First added: 2006.11.19 by emily
*/
-RESET_TYPE TxCheckStuck(struct net_device *dev)
+static RESET_TYPE TxCheckStuck(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 QueueID;
@@ -3282,7 +3230,7 @@ static RESET_TYPE RxCheckStuck(struct net_device *dev)
*
* 8185 and 8185b does not implement this function. This is added by Emily at 2006.11.24
*/
-RESET_TYPE rtl819x_ifcheck_resetornot(struct net_device *dev)
+static RESET_TYPE rtl819x_ifcheck_resetornot(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
RESET_TYPE TxResetType = RESET_TYPE_NORESET;
@@ -3321,7 +3269,7 @@ int rtl8192_close(struct net_device *dev);
-void CamRestoreAllEntry(struct net_device *dev)
+static void CamRestoreAllEntry(struct net_device *dev)
{
u8 EntryId = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3397,7 +3345,7 @@ void CamRestoreAllEntry(struct net_device *dev)
// The method checking Tx/Rx stuck of this function is supported by FW,
// which reports Tx and Rx counter to register 0x128 and 0x130.
//////////////////////////////////////////////////////////////
-void rtl819x_ifsilentreset(struct net_device *dev)
+static void rtl819x_ifsilentreset(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 reset_times = 0;
@@ -3517,7 +3465,7 @@ void CAM_read_entry(struct net_device *dev, u32 iIndex)
printk("\n");
}
-void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
+static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
u32 *TotalRxDataNum)
{
u16 SlotIndex;
@@ -3536,7 +3484,7 @@ void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
}
-extern void rtl819x_watchdog_wqcallback(struct work_struct *work)
+void rtl819x_watchdog_wqcallback(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
struct r8192_priv *priv = container_of(dwork, struct r8192_priv, watch_dog_wq);
@@ -3634,7 +3582,7 @@ int _rtl8192_up(struct net_device *dev)
}
-int rtl8192_open(struct net_device *dev)
+static int rtl8192_open(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
@@ -3754,7 +3702,7 @@ static void r8192_set_multicast(struct net_device *dev)
}
-int r8192_set_mac_adr(struct net_device *dev, void *mac)
+static int r8192_set_mac_adr(struct net_device *dev, void *mac)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct sockaddr *addr = mac;
@@ -3770,7 +3718,7 @@ int r8192_set_mac_adr(struct net_device *dev, void *mac)
}
/* based on ipw2200 driver */
-int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct iwreq *wrq = (struct iwreq *)rq;
@@ -3861,7 +3809,7 @@ out:
return ret;
}
-u8 HwRateToMRate90(bool bIsHT, u8 rate)
+static u8 HwRateToMRate90(bool bIsHT, u8 rate)
{
u8 ret_rate = 0xff;
@@ -3947,7 +3895,7 @@ static void UpdateRxPktTimeStamp8190(struct net_device *dev,
//by amy 080606
-long rtl819x_translate_todbm(u8 signal_strength_index)// 0-100 index.
+static long rtl819x_translate_todbm(u8 signal_strength_index)// 0-100 index.
{
long signal_power; // in dBm.
@@ -3963,7 +3911,9 @@ long rtl819x_translate_todbm(u8 signal_strength_index)// 0-100 index.
be a local static. Otherwise, it may increase when we return from S3/S4. The
value will be kept in memory or disk. Declare the value in the adaptor
and it will be reinitialized when returned from S3/S4. */
-void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer, struct ieee80211_rx_stats *pprevious_stats, struct ieee80211_rx_stats *pcurrent_stats)
+static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
+ struct ieee80211_rx_stats *pprevious_stats,
+ struct ieee80211_rx_stats *pcurrent_stats)
{
bool bcheck = false;
u8 rfpath;
@@ -4449,8 +4399,8 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
}
} /* QueryRxPhyStatus8190Pci */
-void rtl8192_record_rxdesc_forlateruse(struct ieee80211_rx_stats *psrc_stats,
- struct ieee80211_rx_stats *ptarget_stats)
+static void rtl8192_record_rxdesc_forlateruse(struct ieee80211_rx_stats *psrc_stats,
+ struct ieee80211_rx_stats *ptarget_stats)
{
ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
@@ -4721,7 +4671,7 @@ u32 GetRxPacketShiftBytes819xUsb(struct ieee80211_rx_stats *Status, bool bIsRxA
+ Status->RxBufShift);
}
-void rtl8192_rx_nomal(struct sk_buff *skb)
+static void rtl8192_rx_nomal(struct sk_buff *skb)
{
rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
struct net_device *dev = info->dev;
@@ -4871,8 +4821,8 @@ void rtl8192_rx_nomal(struct sk_buff *skb)
}
-void rtl819xusb_process_received_packet(struct net_device *dev,
- struct ieee80211_rx_stats *pstats)
+static void rtl819xusb_process_received_packet(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats)
{
u8 *frame;
u16 frame_len = 0;
@@ -4932,7 +4882,7 @@ static void query_rx_cmdpkt_desc_status(struct sk_buff *skb,
}
-void rtl8192_rx_cmd(struct sk_buff *skb)
+static void rtl8192_rx_cmd(struct sk_buff *skb)
{
struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
struct net_device *dev = info->dev;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 41fb67b7337d..d97ad7b909bc 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -50,34 +50,7 @@ DRxPathSel DM_RxPathSelTable;
/*--------------------Define export function prototype-----------------------*/
-extern void init_hal_dm(struct net_device *dev);
-extern void deinit_hal_dm(struct net_device *dev);
-
-extern void hal_dm_watchdog(struct net_device *dev);
-
-
-extern void init_rate_adaptive(struct net_device *dev);
-extern void dm_txpower_trackingcallback(struct work_struct *work);
-
-extern void dm_cck_txpower_adjust(struct net_device *dev,bool binch14);
-extern void dm_restore_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_backup_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
- u32 dm_type,
- u32 dm_value);
-extern void DM_ChangeFsyncSetting(struct net_device *dev,
- s32 DM_Type,
- s32 DM_Value);
-extern void dm_force_tx_fw_info(struct net_device *dev,
- u32 force_type,
- u32 force_value);
-extern void dm_init_edca_turbo(struct net_device *dev);
-extern void dm_rf_operation_test_callback(unsigned long data);
-extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-extern void dm_fsync_timer_callback(unsigned long data);
extern void dm_check_fsync(struct net_device *dev);
-extern void dm_shadow_init(struct net_device *dev);
-
/*--------------------Define export function prototype-----------------------*/
@@ -155,8 +128,7 @@ static void dm_ctstoself(struct net_device *dev);
// This function is only invoked at driver intialization once.
//
//
-extern void
-init_hal_dm(struct net_device *dev)
+void init_hal_dm(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -176,7 +148,7 @@ init_hal_dm(struct net_device *dev)
} // InitHalDm
-extern void deinit_hal_dm(struct net_device *dev)
+void deinit_hal_dm(struct net_device *dev)
{
dm_deInit_fsync(dev);
@@ -242,7 +214,7 @@ void dm_CheckRxAggregation(struct net_device *dev) {
-extern void hal_dm_watchdog(struct net_device *dev)
+void hal_dm_watchdog(struct net_device *dev)
{
//struct r8192_priv *priv = ieee80211_priv(dev);
@@ -275,7 +247,7 @@ extern void hal_dm_watchdog(struct net_device *dev)
* 01/16/2008 MHC RF_Type is assigned in ReadAdapterInfo(). We must call
* the function after making sure RF_Type.
*/
-extern void init_rate_adaptive(struct net_device *dev)
+void init_rate_adaptive(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -875,7 +847,7 @@ static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
priv->txpower_count = 0;
}
-extern void dm_txpower_trackingcallback(struct work_struct *work)
+void dm_txpower_trackingcallback(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work,struct delayed_work,work);
struct r8192_priv *priv = container_of(dwork,struct r8192_priv,txpower_tracking_wq);
@@ -1606,10 +1578,7 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
-extern void dm_cck_txpower_adjust(
- struct net_device *dev,
- bool binch14
-)
+void dm_cck_txpower_adjust(struct net_device *dev, bool binch14)
{ // dm_CCKTxPowerAdjust
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1642,7 +1611,7 @@ static void dm_txpower_reset_recovery(
} // dm_TXPowerResetRecovery
-extern void dm_restore_dynamic_mechanism_state(struct net_device *dev)
+void dm_restore_dynamic_mechanism_state(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 reg_ratr = priv->rate_adaptive.last_ratr;
@@ -1718,7 +1687,7 @@ static void dm_bb_initialgain_restore(struct net_device *dev)
} // dm_BBInitialGainRestore
-extern void dm_backup_dynamic_mechanism_state(struct net_device *dev)
+void dm_backup_dynamic_mechanism_state(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1773,9 +1742,9 @@ static void dm_bb_initialgain_backup(struct net_device *dev)
* 05/29/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
- u32 dm_type,
- u32 dm_value)
+
+void dm_change_dynamic_initgain_thresh(struct net_device *dev, u32 dm_type,
+ u32 dm_value)
{
if (dm_type == DIG_TYPE_THRESH_HIGH)
{
@@ -1842,24 +1811,8 @@ extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
dm_digtable.rx_gain_range_max = (u8)dm_value;
}
} /* DM_ChangeDynamicInitGainThresh */
-extern void
-dm_change_fsync_setting(
- struct net_device *dev,
- s32 DM_Type,
- s32 DM_Value)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- if (DM_Type == 0) // monitor 0xc38 register
- {
- if(DM_Value > 1)
- DM_Value = 1;
- priv->framesyncMonitor = (u8)DM_Value;
- //DbgPrint("pHalData->framesyncMonitor = %d", pHalData->framesyncMonitor);
- }
-}
-
-extern void
+void
dm_change_rxpath_selection_setting(
struct net_device *dev,
s32 DM_Type,
@@ -2540,7 +2493,7 @@ static void dm_cs_ratio(
}
}
-extern void dm_init_edca_turbo(struct net_device *dev)
+void dm_init_edca_turbo(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2660,26 +2613,6 @@ dm_CheckEdcaTurbo_EXIT:
lastRxOkCnt = priv->stats.rxbytesunicast;
} // dm_CheckEdcaTurbo
-extern void DM_CTSToSelfSetting(struct net_device *dev,u32 DM_Type, u32 DM_Value)
-{
- struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
-
- if (DM_Type == 0) // CTS to self disable/enable
- {
- if(DM_Value > 1)
- DM_Value = 1;
- priv->ieee80211->bCTSToSelfEnable = (bool)DM_Value;
- //DbgPrint("pMgntInfo->bCTSToSelfEnable = %d\n", pMgntInfo->bCTSToSelfEnable);
- }
- else if(DM_Type == 1) //CTS to self Th
- {
- if(DM_Value >= 50)
- DM_Value = 50;
- priv->ieee80211->CTSToSelfTH = (u8)DM_Value;
- //DbgPrint("pMgntInfo->CTSToSelfTH = %d\n", pMgntInfo->CTSToSelfTH);
- }
-}
-
static void dm_init_ctstoself(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
@@ -2779,7 +2712,7 @@ static void dm_check_pbc_gpio(struct net_device *dev)
* 01/30/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
-extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
+void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work,struct delayed_work,work);
struct r8192_priv *priv = container_of(dwork,struct r8192_priv,rfpath_check_wq);
@@ -3139,7 +3072,7 @@ static void dm_deInit_fsync(struct net_device *dev)
del_timer_sync(&priv->fsync_timer);
}
-extern void dm_fsync_timer_callback(unsigned long data)
+void dm_fsync_timer_callback(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct r8192_priv *priv = ieee80211_priv((struct net_device *)data);
@@ -3478,7 +3411,7 @@ void dm_check_fsync(struct net_device *dev)
* 05/29/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-extern void dm_shadow_init(struct net_device *dev)
+void dm_shadow_init(struct net_device *dev)
{
u8 page;
u16 offset;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
index ae550520311b..3008f91ad4cf 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ b/drivers/staging/rtl8192u/r8192U_dm.h
@@ -200,9 +200,9 @@ typedef struct tag_Tx_Config_Cmd_Format {
/*------------------------Export global variable----------------------------*/
-extern dig_t dm_digtable;
-extern u8 dm_shadow[16][256];
-extern DRxPathSel DM_RxPathSelTable;
+extern dig_t dm_digtable;
+extern u8 dm_shadow[16][256];
+extern DRxPathSel DM_RxPathSelTable;
/*------------------------Export global variable----------------------------*/
@@ -212,25 +212,23 @@ extern DRxPathSel DM_RxPathSelTable;
/*--------------------------Exported Function prototype---------------------*/
-extern void init_hal_dm(struct net_device *dev);
-extern void deinit_hal_dm(struct net_device *dev);
-
+extern void init_hal_dm(struct net_device *dev);
+extern void deinit_hal_dm(struct net_device *dev);
extern void hal_dm_watchdog(struct net_device *dev);
-
-extern void init_rate_adaptive(struct net_device *dev);
-extern void dm_txpower_trackingcallback(struct work_struct *work);
-extern void dm_restore_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_backup_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
- u32 dm_type, u32 dm_value);
-extern void dm_force_tx_fw_info(struct net_device *dev,
- u32 force_type, u32 force_value);
-extern void dm_init_edca_turbo(struct net_device *dev);
-extern void dm_rf_operation_test_callback(unsigned long data);
-extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-extern void dm_fsync_timer_callback(unsigned long data);
-extern void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
-extern void dm_shadow_init(struct net_device *dev);
+extern void init_rate_adaptive(struct net_device *dev);
+extern void dm_txpower_trackingcallback(struct work_struct *work);
+extern void dm_restore_dynamic_mechanism_state(struct net_device *dev);
+extern void dm_backup_dynamic_mechanism_state(struct net_device *dev);
+extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
+ u32 dm_type, u32 dm_value);
+extern void dm_force_tx_fw_info(struct net_device *dev,
+ u32 force_type, u32 force_value);
+extern void dm_init_edca_turbo(struct net_device *dev);
+extern void dm_rf_operation_test_callback(unsigned long data);
+extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
+extern void dm_fsync_timer_callback(unsigned long data);
+extern void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
+extern void dm_shadow_init(struct net_device *dev);
extern void dm_initialize_txpower_tracking(struct net_device *dev);
/*--------------------------Exported Function prototype---------------------*/
diff --git a/drivers/staging/rtl8192u/r819xU_HTGen.h b/drivers/staging/rtl8192u/r819xU_HTGen.h
deleted file mode 100644
index 6a4678f7da5f..000000000000
--- a/drivers/staging/rtl8192u/r819xU_HTGen.h
+++ /dev/null
@@ -1,12 +0,0 @@
-//
-// IOT Action for different AP
-//
-typedef enum _HT_IOT_ACTION{
- HT_IOT_ACT_TX_USE_AMSDU_4K = 0x00000001,
- HT_IOT_ACT_TX_USE_AMSDU_8K = 0x00000002,
- HT_IOT_ACT_DECLARE_MCS13 = 0x00000004,
- HT_IOT_ACT_DISABLE_EDCA_TURBO = 0x00000008,
- HT_IOT_ACT_MGNT_USE_CCK_6M = 0x00000010,
- HT_IOT_ACT_CDD_FSYNC = 0x00000020,
- HT_IOT_ACT_PURE_N_MODE = 0x00000040,
-}HT_IOT_ACTION_E, *PHT_IOT_ACTION_E;
diff --git a/drivers/staging/rtl8192u/r819xU_HTType.h b/drivers/staging/rtl8192u/r819xU_HTType.h
deleted file mode 100644
index 2cbb8e6584f8..000000000000
--- a/drivers/staging/rtl8192u/r819xU_HTType.h
+++ /dev/null
@@ -1,379 +0,0 @@
-#ifndef _R819XU_HTTYPE_H_
-#define _R819XU_HTTYPE_H_
-
-
-/*----------------------------------------------------------------------
- * The HT Capability element is present in beacons, association request,
- * reassociation request and probe response frames
- *----------------------------------------------------------------------*/
-
-/* Operation mode value */
-#define HT_OPMODE_NO_PROTECT 0
-#define HT_OPMODE_OPTIONAL 1
-#define HT_OPMODE_40MHZ_PROTECT 2
-#define HT_OPMODE_MIXED 3
-
-/* MIMO Power Save Settings */
-#define MIMO_PS_STATIC 0
-#define MIMO_PS_DYNAMIC 1
-#define MIMO_PS_NOLIMIT 3
-
-
-/* There should be 128 bits to cover all of the MCS rates. However, since
- * 8190 does not support too much rates, one integer is quite enough. */
-
-#define sHTCLng 4
-
-
-#define HT_SUPPORTED_MCS_1SS_BITMAP 0x000000ff
-#define HT_SUPPORTED_MCS_2SS_BITMAP 0x0000ff00
-#define HT_SUPPORTED_MCS_1SS_2SS_BITMAP \
- (HT_MCS_1SS_BITMAP | HT_MCS_1SS_2SS_BITMAP)
-
-
-typedef enum _HT_MCS_RATE {
- HT_MCS0 = 0x00000001,
- HT_MCS1 = 0x00000002,
- HT_MCS2 = 0x00000004,
- HT_MCS3 = 0x00000008,
- HT_MCS4 = 0x00000010,
- HT_MCS5 = 0x00000020,
- HT_MCS6 = 0x00000040,
- HT_MCS7 = 0x00000080,
- HT_MCS8 = 0x00000100,
- HT_MCS9 = 0x00000200,
- HT_MCS10 = 0x00000400,
- HT_MCS11 = 0x00000800,
- HT_MCS12 = 0x00001000,
- HT_MCS13 = 0x00002000,
- HT_MCS14 = 0x00004000,
- HT_MCS15 = 0x00008000,
- /* Do not define MCS32 here although 8190 support MCS32 */
-} HT_MCS_RATE, *PHT_MCS_RATE;
-
-/* Represent Channel Width in HT Capabilities */
-typedef enum _HT_CHANNEL_WIDTH {
- HT_CHANNEL_WIDTH_20 = 0,
- HT_CHANNEL_WIDTH_20_40 = 1,
-} HT_CHANNEL_WIDTH, *PHT_CHANNEL_WIDTH;
-
-/* Represent Extension Channel Offset in HT Capabilities
- * This is available only in 40Mhz mode. */
-typedef enum _HT_EXTCHNL_OFFSET {
- HT_EXTCHNL_OFFSET_NO_EXT = 0,
- HT_EXTCHNL_OFFSET_UPPER = 1,
- HT_EXTCHNL_OFFSET_NO_DEF = 2,
- HT_EXTCHNL_OFFSET_LOWER = 3,
-} HT_EXTCHNL_OFFSET, *PHT_EXTCHNL_OFFSET;
-
-typedef enum _CHNLOP {
- CHNLOP_NONE = 0, /* No Action now */
- CHNLOP_SCAN = 1, /* Scan in progress */
- CHNLOP_SWBW = 2, /* Bandwidth switching in progress */
- CHNLOP_SWCHNL = 3, /* Software Channel switching in progress */
-} CHNLOP, *PCHNLOP;
-
-/* Determine if the Channel Operation is in progress */
-#define CHHLOP_IN_PROGRESS(_pHTInfo) \
- (((_pHTInfo)->ChnlOp > CHNLOP_NONE) ? TRUE : FALSE)
-
-
-typedef enum _HT_ACTION {
- ACT_RECOMMAND_WIDTH = 0,
- ACT_MIMO_PWR_SAVE = 1,
- ACT_PSMP = 2,
- ACT_SET_PCO_PHASE = 3,
- ACT_MIMO_CHL_MEASURE = 4,
- ACT_RECIPROCITY_CORRECT = 5,
- ACT_MIMO_CSI_MATRICS = 6,
- ACT_MIMO_NOCOMPR_STEER = 7,
- ACT_MIMO_COMPR_STEER = 8,
- ACT_ANTENNA_SELECT = 9,
-} HT_ACTION, *PHT_ACTION;
-
-
-/* Define sub-carrier mode for 40MHZ. */
-typedef enum _HT_Bandwidth_40MHZ_Sub_Carrier {
- SC_MODE_DUPLICATE = 0,
- SC_MODE_LOWER = 1,
- SC_MODE_UPPER = 2,
- SC_MODE_FULL40MHZ = 3,
-} HT_BW40_SC_E;
-
-typedef struct _HT_CAPABILITY_ELE {
-
- /* HT capability info */
- u8 AdvCoding:1;
- u8 ChlWidth:1;
- u8 MimoPwrSave:2;
- u8 GreenField:1;
- u8 ShortGI20Mhz:1;
- u8 ShortGI40Mhz:1;
- u8 TxSTBC:1;
- u8 RxSTBC:2;
- u8 DelayBA:1;
- u8 MaxAMSDUSize:1;
- u8 DssCCk:1;
- u8 PSMP:1;
- u8 Rsvd1:1;
- u8 LSigTxopProtect:1;
-
- /* MAC HT parameters info */
- u8 MaxRxAMPDUFactor:2;
- u8 MPDUDensity:3;
- u8 Rsvd2:3;
-
- /* Supported MCS set */
- u8 MCS[16];
-
-
- /* Extended HT Capability Info */
- u16 ExtHTCapInfo;
-
- /* TXBF Capabilities */
- u8 TxBFCap[4];
-
- /* Antenna Selection Capabilities */
- u8 ASCap;
-
-} __packed HT_CAPABILITY_ELE, *PHT_CAPABILITY_ELE;
-
-/*------------------------------------------------------------
- * The HT Information element is present in beacons
- * Only AP is required to include this element
- *------------------------------------------------------------*/
-
-typedef struct _HT_INFORMATION_ELE {
- u8 ControlChl;
-
- u8 ExtChlOffset:2;
- u8 RecommemdedTxWidth:1;
- u8 RIFS:1;
- u8 PSMPAccessOnly:1;
- u8 SrvIntGranularity:3;
-
- u8 OptMode:2;
- u8 NonGFDevPresent:1;
- u8 Revd1:5;
- u8 Revd2:8;
-
- u8 Rsvd3:6;
- u8 DualBeacon:1;
- u8 DualCTSProtect:1;
-
- u8 SecondaryBeacon:1;
- u8 LSigTxopProtectFull:1;
- u8 PcoActive:1;
- u8 PcoPhase:1;
- u8 Rsvd4:4;
-
- u8 BasicMSC[16];
-} __packed HT_INFORMATION_ELE, *PHT_INFORMATION_ELE;
-
-/* MIMO Power Save control field.
- * This is appear in MIMO Power Save Action Frame */
-typedef struct _MIMOPS_CTRL {
- u8 MimoPsEnable:1;
- u8 MimoPsMode:1;
- u8 Reserved:6;
-} MIMOPS_CTRL, *PMIMOPS_CTRL;
-
-typedef enum _HT_SPEC_VER {
- HT_SPEC_VER_IEEE = 0,
- HT_SPEC_VER_EWC = 1,
-} HT_SPEC_VER, *PHT_SPEC_VER;
-
-typedef enum _HT_AGGRE_MODE_E {
- HT_AGG_AUTO = 0,
- HT_AGG_FORCE_ENABLE = 1,
- HT_AGG_FORCE_DISABLE = 2,
-} HT_AGGRE_MODE_E, *PHT_AGGRE_MODE_E;
-
-/*----------------------------------------------------------------------------
- * The Data structure is used to keep HT related variables when card is
- * configured as non-AP STA mode.
- * **Note** Current_xxx should be set to default value in HTInitializeHTInfo()
- *----------------------------------------------------------------------------*/
-
-typedef struct _RT_HIGH_THROUGHPUT {
- u8 bEnableHT;
- u8 bCurrentHTSupport;
- /* Tx 40MHz channel capability */
- u8 bRegBW40MHz;
- u8 bCurBW40MHz;
- /* Tx Short GI for 40Mhz */
- u8 bRegShortGI40MHz;
- u8 bCurShortGI40MHz;
- /* Tx Short GI for 20MHz */
- u8 bRegShortGI20MHz;
- u8 bCurShortGI20MHz;
- /* Tx CCK rate capability */
- u8 bRegSuppCCK;
- u8 bCurSuppCCK;
-
- /* 802.11n spec version for "peer" */
- HT_SPEC_VER ePeerHTSpecVer;
-
-
- /* HT related information for "Self" */
- /* This is HT cap element sent to peer STA, which also indicate
- * HT Rx capabilities. */
- HT_CAPABILITY_ELE SelfHTCap;
- HT_INFORMATION_ELE SelfHTInfo;
-
- /* HT related information for "Peer" */
- u8 PeerHTCapBuf[32];
- u8 PeerHTInfoBuf[32];
-
-
- /* A-MSDU related */
- /* This indicates Tx A-MSDU capability */
- u8 bAMSDU_Support;
- u16 nAMSDU_MaxSize;
- u8 bCurrent_AMSDU_Support;
- u16 nCurrent_AMSDU_MaxSize;
-
-
- /* A-MPDU related */
- /* This indicate Tx A-MPDU capability */
- u8 bAMPDUEnable;
- u8 bCurrentAMPDUEnable;
- u8 AMPDU_Factor;
- u8 CurrentAMPDUFactor;
- u8 MPDU_Density;
- u8 CurrentMPDUDensity;
-
- /* Forced A-MPDU enable */
- HT_AGGRE_MODE_E ForcedAMPDUMode;
- u8 ForcedAMPDUFactor;
- u8 ForcedMPDUDensity;
-
- /* Forced A-MSDU enable */
- HT_AGGRE_MODE_E ForcedAMSDUMode;
- u16 ForcedAMSDUMaxSize;
-
- u8 bForcedShortGI;
-
- u8 CurrentOpMode;
-
- /* MIMO PS related */
- u8 SelfMimoPs;
- u8 PeerMimoPs;
-
- /* 40MHz Channel Offset settings. */
- HT_EXTCHNL_OFFSET CurSTAExtChnlOffset;
- u8 bCurTxBW40MHz; /* If we use 40 MHz to Tx */
- u8 PeerBandwidth;
-
- /* For Bandwidth Switching */
- u8 bSwBwInProgress;
- CHNLOP ChnlOp; /* sw switching channel in progress. */
- u8 SwBwStep;
- struct timer_list SwBwTimer;
-
- /* For Realtek proprietary A-MPDU factor for aggregation */
- u8 bRegRT2RTAggregation;
- u8 bCurrentRT2RTAggregation;
- u8 bCurrentRT2RTLongSlotTime;
- u8 szRT2RTAggBuffer[10];
-
- /* Rx Reorder control */
- u8 bRegRxReorderEnable;
- u8 bCurRxReorderEnable;
- u8 RxReorderWinSize;
- u8 RxReorderPendingTime;
- u16 RxReorderDropCounter;
-
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- u8 UsbTxAggrNum;
-#endif
-#ifdef USB_RX_AGGREGATION_SUPPORT
- u8 UsbRxFwAggrEn;
- u8 UsbRxFwAggrPageNum;
- u8 UsbRxFwAggrPacketNum;
- u8 UsbRxFwAggrTimeout;
-#endif
-
- /* Add for Broadcom(Linksys) IOT. */
- u8 bIsPeerBcm;
-
- /* For IOT issue. */
- u32 IOTAction;
-} RT_HIGH_THROUGHPUT, *PRT_HIGH_THROUGHPUT;
-
-
-/*----------------------------------------------------------------------
- * The Data structure is used to keep HT related variable for "each Sta"
- * when card is configured as "AP mode"
- *----------------------------------------------------------------------*/
-
-typedef struct _RT_HTINFO_STA_ENTRY {
- u8 bEnableHT;
-
- u8 bSupportCck;
-
- u16 AMSDU_MaxSize;
-
- u8 AMPDU_Factor;
- u8 MPDU_Density;
-
- u8 HTHighestOperaRate;
-
- u8 bBw40MHz;
-
- u8 MimoPs;
-
- u8 McsRateSet[16];
-
-
-} RT_HTINFO_STA_ENTRY, *PRT_HTINFO_STA_ENTRY;
-
-
-
-
-
-/*---------------------------------------------------------------------
- * The Data structure is used to keep HT related variable for "each AP"
- * when card is configured as "STA mode"
- *---------------------------------------------------------------------*/
-
-typedef struct _BSS_HT {
-
- u8 bdSupportHT;
-
- /* HT related elements */
- u8 bdHTCapBuf[32];
- u16 bdHTCapLen;
- u8 bdHTInfoBuf[32];
- u16 bdHTInfoLen;
-
- HT_SPEC_VER bdHTSpecVer;
-
- u8 bdRT2RTAggregation;
- u8 bdRT2RTLongSlotTime;
-} BSS_HT, *PBSS_HT;
-
-typedef struct _MIMO_RSSI {
- u32 EnableAntenna;
- u32 AntennaA;
- u32 AntennaB;
- u32 AntennaC;
- u32 AntennaD;
- u32 Average;
-} MIMO_RSSI, *PMIMO_RSSI;
-
-typedef struct _MIMO_EVM {
- u32 EVM1;
- u32 EVM2;
-} MIMO_EVM, *PMIMO_EVM;
-
-typedef struct _FALSE_ALARM_STATISTICS {
- u32 Cnt_Parity_Fail;
- u32 Cnt_Rate_Illegal;
- u32 Cnt_Crc8_fail;
- u32 Cnt_all;
-} FALSE_ALARM_STATISTICS, *PFALSE_ALARM_STATISTICS;
-
-
-
-#endif
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 7bdcbd39a3b2..723c8630e9e2 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -182,7 +182,7 @@ static void cmpk_handle_tx_feedback(struct net_device *dev, u8 *pmsg)
}
-void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
+static void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tx_rate;
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index d6a6de3a64f5..ecfb66538eb3 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -17,7 +17,8 @@
#include "r819xU_firmware_img.h"
#include "r819xU_firmware.h"
#include <linux/firmware.h>
-void firmware_init_param(struct net_device *dev)
+
+static void firmware_init_param(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
rt_firmware *pfirmware = priv->pFirmware;
@@ -29,7 +30,8 @@ void firmware_init_param(struct net_device *dev)
* segment the img and use the ptr and length to remember info on each segment
*
*/
-bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buffer_len)
+static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
+ u32 buffer_len)
{
struct r8192_priv *priv = ieee80211_priv(dev);
bool rt_status = true;
@@ -103,46 +105,6 @@ bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buff
}
-bool
-fwSendNullPacket(
- struct net_device *dev,
- u32 Length
-)
-{
- bool rtStatus = true;
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct sk_buff *skb;
- cb_desc *tcb_desc;
- unsigned char *ptr_buf;
- bool bLastInitPacket = false;
-
- //PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK);
-
- //Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
- skb = dev_alloc_skb(Length+ 4);
- memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
- tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- tcb_desc->queue_index = TXCMD_QUEUE;
- tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
- tcb_desc->bLastIniPkt = bLastInitPacket;
- ptr_buf = skb_put(skb, Length);
- memset(ptr_buf,0,Length);
- tcb_desc->txbuf_size= (u16)Length;
-
- if (!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
- (priv->ieee80211->queue_stop) ) {
- RT_TRACE(COMP_FIRMWARE,"===================NULL packet==================================> tx full!\n");
- skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
- } else {
- priv->ieee80211->softmac_hard_start_xmit(skb,dev);
- }
-
- //PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
- return rtStatus;
-}
-
-
//-----------------------------------------------------------------------------
// Procedure: Check whether main code is download OK. If OK, turn on CPU
//
@@ -156,7 +118,7 @@ fwSendNullPacket(
// NDIS_STATUS_FAILURE - the following initialization process should be terminated
// NDIS_STATUS_SUCCESS - if firmware initialization process success
//-----------------------------------------------------------------------------
-bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev)
+static bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev)
{
bool rt_status = true;
int check_putcodeOK_time = 200000, check_bootOk_time = 200000;
@@ -205,7 +167,7 @@ CPUCheckMainCodeOKAndTurnOnCPU_Fail:
return rt_status;
}
-bool CPUcheck_firmware_ready(struct net_device *dev)
+static bool CPUcheck_firmware_ready(struct net_device *dev)
{
bool rt_status = true;
diff --git a/drivers/staging/rtl8192u/r819xU_firmware_img.c b/drivers/staging/rtl8192u/r819xU_firmware_img.c
index df0f9d1648ec..0785de72a5ea 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware_img.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware_img.c
@@ -1,5 +1,6 @@
/*Created on 2008/ 7/16, 5:31*/
#include <linux/types.h>
+#include "r819xU_firmware_img.h"
u32 Rtl8192UsbPHY_REGArray[] = {
0x0, };
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index 39cd426bc5e5..b9f35313c7ab 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -44,7 +44,7 @@ static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
* output: none
* return: u32 return the shift bit position of the mask
******************************************************************************/
-u32 rtl8192_CalculateBitShift(u32 bitmask)
+static u32 rtl8192_CalculateBitShift(u32 bitmask)
{
u32 i;
@@ -144,8 +144,8 @@ static void phy_FwRFSerialWrite(struct net_device *dev,
* Driver here need to implement (1) and (2)
* ---need more spec for this information.
******************************************************************************/
-u32 rtl8192_phy_RFSerialRead(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
- u32 offset)
+static u32 rtl8192_phy_RFSerialRead(struct net_device *dev,
+ RF90_RADIO_PATH_E eRFPath, u32 offset)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 ret = 0;
@@ -229,8 +229,9 @@ u32 rtl8192_phy_RFSerialRead(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
* Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf)
* ---------------------------------------------------------------------------
*****************************************************************************/
-void rtl8192_phy_RFSerialWrite(struct net_device *dev,
- RF90_RADIO_PATH_E eRFPath, u32 offset, u32 data)
+static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
+ RF90_RADIO_PATH_E eRFPath, u32 offset,
+ u32 data)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 DataAndAddr = 0, new_offset = 0;
@@ -571,7 +572,7 @@ void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType)
* notice: Initialization value here is constant and it should never
* be changed
*****************************************************************************/
-void rtl8192_InitBBRFRegDef(struct net_device *dev)
+static void rtl8192_InitBBRFRegDef(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -780,7 +781,7 @@ u8 rtl8192_phy_checkBBAndRF(struct net_device *dev, HW90_BLOCK_E CheckBlock,
* notice: Initialization value may change all the time, so please make
* sure it has been synced with the newest.
******************************************************************************/
-void rtl8192_BB_Config_ParaFile(struct net_device *dev)
+static void rtl8192_BB_Config_ParaFile(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 reg_u8 = 0, eCheckItem = 0, status = 0;
@@ -1070,7 +1071,7 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
* return: none
* notice:
******************************************************************************/
-void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel)
+static void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 powerlevel = priv->TxPowerLevelCCK[channel-1];
@@ -1239,9 +1240,9 @@ bool rtl8192_SetRFPowerState(struct net_device *dev,
* return: true if finished, false otherwise
* notice:
******************************************************************************/
-u8 rtl8192_phy_SetSwChnlCmdArray(SwChnlCmd *CmdTable, u32 CmdTableIdx,
- u32 CmdTableSz, SwChnlCmdID CmdID, u32 Para1,
- u32 Para2, u32 msDelay)
+static u8 rtl8192_phy_SetSwChnlCmdArray(SwChnlCmd *CmdTable, u32 CmdTableIdx,
+ u32 CmdTableSz, SwChnlCmdID CmdID,
+ u32 Para1, u32 Para2, u32 msDelay)
{
SwChnlCmd *pCmd;
@@ -1276,8 +1277,8 @@ u8 rtl8192_phy_SetSwChnlCmdArray(SwChnlCmd *CmdTable, u32 CmdTableIdx,
* return: true if finished, false otherwise
* notice: Wait for simpler function to replace it
*****************************************************************************/
-u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel, u8 *stage,
- u8 *step, u32 *delay)
+static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
+ u8 *stage, u8 *step, u32 *delay)
{
struct r8192_priv *priv = ieee80211_priv(dev);
SwChnlCmd PreCommonCmd[MAX_PRECMD_CNT];
@@ -1433,7 +1434,7 @@ u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel, u8 *stage,
* return: none
* notice: We should not call this function directly
*****************************************************************************/
-void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel)
+static void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 delay = 0;
diff --git a/drivers/staging/rtl8712/Kconfig b/drivers/staging/rtl8712/Kconfig
index 6a43312380e0..f160eee52f09 100644
--- a/drivers/staging/rtl8712/Kconfig
+++ b/drivers/staging/rtl8712/Kconfig
@@ -4,7 +4,6 @@ config R8712U
select WIRELESS_EXT
select WEXT_PRIV
select FW_LOADER
- default N
---help---
This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130.
If built as a module, it will be called r8712u.
@@ -12,7 +11,6 @@ config R8712U
config R8712_TX_AGGR
bool "Realtek RTL8712U Transmit Aggregation code"
depends on R8712U && BROKEN
- default N
---help---
This option provides transmit aggregation for the Realtek RTL8712 USB device.
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index a074fe810169..3362e5e32bcc 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -168,7 +168,7 @@ struct _adapter {
struct task_struct *xmitThread;
pid_t recvThread;
uint(*dvobj_init)(struct _adapter *adapter);
- void (*dvobj_deinit)(struct _adapter *adapter);
+ void (*dvobj_deinit)(struct _adapter *adapter);
struct net_device *pnetdev;
int bup;
struct net_device_stats stats;
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index cc68d9748edb..57fef70ad984 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -78,9 +78,9 @@ uint r8712_is_cckrates_included(u8 *rate)
if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
(((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
return true;
- i++;
- }
- return false;
+ i++;
+ }
+ return false;
}
uint r8712_is_cckratesonly_included(u8 *rate)
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 37fe33005c02..6bd08213cb70 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -239,7 +239,7 @@ static u32 start_drv_threads(struct _adapter *padapter)
{
padapter->cmdThread = kthread_run(r8712_cmd_thread, padapter, "%s",
padapter->pnetdev->name);
- if (IS_ERR(padapter->cmdThread) < 0)
+ if (IS_ERR(padapter->cmdThread))
return _FAIL;
return _SUCCESS;
}
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index f1ccc7ebbda7..566235a14a80 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -147,7 +147,8 @@ static inline u32 _queue_empty(struct __queue *pqueue)
return is_list_empty(&(pqueue->queue));
}
-static inline u32 end_of_queue_search(struct list_head *head, struct list_head *plist)
+static inline u32 end_of_queue_search(struct list_head *head,
+ struct list_head *plist)
{
if (head == plist)
return true;
@@ -164,7 +165,7 @@ static inline void sleep_schedulable(int ms)
delta = 1;/* 1 ms */
set_current_state(TASK_INTERRUPTIBLE);
if (schedule_timeout(delta) != 0)
- return ;
+ return;
}
static inline u8 *_malloc(u32 sz)
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index ea965370d1ac..0723b2f73aad 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -90,7 +90,6 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
pskb = netdev_alloc_skb(padapter->pnetdev, MAX_RECVBUF_SZ +
RECVBUFF_ALIGN_SZ);
if (pskb) {
- pskb->dev = padapter->pnetdev;
tmpaddr = (addr_t)pskb->data;
alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
@@ -1083,7 +1082,6 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
alloc_sz += 6;
pkt_copy = netdev_alloc_skb(padapter->pnetdev, alloc_sz);
if (pkt_copy) {
- pkt_copy->dev = padapter->pnetdev;
precvframe->u.hdr.pkt = pkt_copy;
skb_reserve(pkt_copy, 4 - ((addr_t)(pkt_copy->data)
% 4));
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index c71c7e55bb36..a67185db392b 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -468,10 +468,9 @@ u8 r8712_createbss_cmd(struct _adapter *padapter)
pcmd->rsp = NULL;
pcmd->rspsz = 0;
/* notes: translate IELength & Length after assign to cmdsz; */
- pdev_network->Length = cpu_to_le32(pcmd->cmdsz);
- pdev_network->IELength = cpu_to_le32(pdev_network->IELength);
- pdev_network->Ssid.SsidLength = cpu_to_le32(
- pdev_network->Ssid.SsidLength);
+ pdev_network->Length = pcmd->cmdsz;
+ pdev_network->IELength = pdev_network->IELength;
+ pdev_network->Ssid.SsidLength = pdev_network->Ssid.SsidLength;
r8712_enqueue_cmd(pcmdpriv, pcmd);
return _SUCCESS;
}
@@ -911,7 +910,7 @@ void r8712_joinbss_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if ((pcmd->res != H2C_SUCCESS))
+ if (pcmd->res != H2C_SUCCESS)
_set_timer(&pmlmepriv->assoc_timer, 1);
r8712_free_cmd_obj(pcmd);
}
@@ -928,7 +927,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
pcmd->parmbuf;
struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
- if ((pcmd->res != H2C_SUCCESS))
+ if (pcmd->res != H2C_SUCCESS)
_set_timer(&pmlmepriv->assoc_timer, 1);
_cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
#ifdef __BIG_ENDIAN
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index aae5125a2e7e..5ffc489e9501 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -1244,17 +1244,18 @@ static sint aes_decipher(u8 *key, uint hdrlen,
(frtype == WIFI_DATA_CFPOLL) ||
(frtype == WIFI_DATA_CFACKPOLL)) {
qc_exists = 1;
- if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
hdrlen += 2;
- } else if ((frsubtype == 0x08) ||
+ } else if ((frsubtype == 0x08) ||
(frsubtype == 0x09) ||
(frsubtype == 0x0a) ||
(frsubtype == 0x0b)) {
- if (hdrlen != WLAN_HDR_A3_QOS_LEN)
- hdrlen += 2;
- qc_exists = 1;
- } else
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+ qc_exists = 1;
+ } else {
qc_exists = 0;
+ }
/* now, decrypt pframe with hdrlen offset and plen long */
payload_index = hdrlen + 8; /* 8 is for extiv */
for (i = 0; i < num_blocks; i++) {
diff --git a/drivers/staging/rtl8723au/Kconfig b/drivers/staging/rtl8723au/Kconfig
new file mode 100644
index 000000000000..07fb5e4e50fa
--- /dev/null
+++ b/drivers/staging/rtl8723au/Kconfig
@@ -0,0 +1,38 @@
+config R8723AU
+ tristate "Realtek RTL8723AU Wireless LAN NIC driver"
+ depends on USB && WLAN && RFKILL
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ select CFG80211
+ default n
+ ---help---
+ This option adds the Realtek RTL8723AU USB device such as found in
+ the Lenovo Yogi 13 tablet. If built as a module, it will be called r8723au.
+
+if R8723AU
+
+config 8723AU_AP_MODE
+ bool "Realtek RTL8723AU AP mode"
+ default y
+ ---help---
+ This option enables Access Point mode. Unless you know that your system
+ will never be used as an AP, or the target system has limited memory,
+ "Y" should be selected.
+
+config 8723AU_P2P
+ bool "Realtek RTL8723AU Peer-to-peer mode"
+ default y
+ ---help---
+ This option enables peer-to-peer mode for the r8723au driver. Unless you
+ know that peer-to-peer (P2P) mode will never be used, or the target system has
+ limited memory, "Y" should be selected.
+
+config 8723AU_BT_COEXIST
+ bool "Realtek RTL8723AU BlueTooth Coexistence"
+ default y
+ ---help---
+ This option enables icoexistence with BlueTooth communications for the r8723au driver.
+ Unless you know that this driver will never by used with BT, or the target system has
+ limited memory, "Y" should be selected.
+
+endif
diff --git a/drivers/staging/rtl8723au/Makefile b/drivers/staging/rtl8723au/Makefile
new file mode 100644
index 000000000000..11c6dd486462
--- /dev/null
+++ b/drivers/staging/rtl8723au/Makefile
@@ -0,0 +1,58 @@
+r8723au-y := \
+ core/rtw_ap.o \
+ core/rtw_cmd.o \
+ core/rtw_efuse.o \
+ core/rtw_io.o \
+ core/rtw_ioctl_set.o \
+ core/rtw_ieee80211.o \
+ core/rtw_led.o \
+ core/rtw_mlme.o \
+ core/rtw_mlme_ext.o \
+ core/rtw_p2p.o \
+ core/rtw_pwrctrl.o \
+ core/rtw_recv.o \
+ core/rtw_security.o \
+ core/rtw_sreset.o \
+ core/rtw_sta_mgt.o \
+ core/rtw_xmit.o \
+ core/rtw_wlan_util.o \
+ hal/hal_com.o \
+ hal/hal_intf.o \
+ hal/Hal8723PwrSeq.o \
+ hal/Hal8723UHWImg_CE.o \
+ hal/HalDMOutSrc8723A_CE.o \
+ hal/HalHWImg8723A_BB.o \
+ hal/HalHWImg8723A_MAC.o \
+ hal/HalHWImg8723A_RF.o \
+ hal/HalPwrSeqCmd.o \
+ hal/odm_RegConfig8723A.o \
+ hal/rtl8723a_bt-coexist.o \
+ hal/odm_debug.o \
+ hal/odm_interface.o \
+ hal/odm_HWConfig.o \
+ hal/odm.o \
+ hal/rtl8723a_cmd.o \
+ hal/rtl8723a_dm.o \
+ hal/rtl8723a_hal_init.o \
+ hal/rtl8723a_phycfg.o \
+ hal/rtl8723a_rf6052.o \
+ hal/rtl8723a_rxdesc.o \
+ hal/rtl8723a_sreset.o \
+ hal/rtl8723a_xmit.o \
+ hal/rtl8723au_led.o \
+ hal/rtl8723au_recv.o \
+ hal/rtl8723au_xmit.o \
+ hal/usb_halinit.o \
+ hal/usb_ops_linux.o \
+ os_dep/ioctl_cfg80211.o \
+ os_dep/mlme_linux.o \
+ os_dep/osdep_service.o \
+ os_dep/os_intfs.o \
+ os_dep/recv_linux.o \
+ os_dep/usb_intf.o \
+ os_dep/usb_ops_linux.o \
+ os_dep/xmit_linux.o
+
+obj-$(CONFIG_R8723AU) := r8723au.o
+
+ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/include
diff --git a/drivers/staging/rtl8723au/TODO b/drivers/staging/rtl8723au/TODO
new file mode 100644
index 000000000000..175a0ceb7421
--- /dev/null
+++ b/drivers/staging/rtl8723au/TODO
@@ -0,0 +1,13 @@
+TODO:
+- find and remove code valid only for 5 HGz. Many of the obvious
+ ones have been removed, but things like channel > 14 still exist.
+- find and remove any code for other chips that is left over
+- convert any remaining unusual variable types
+- find codes that can use %pM and %Nph formatting
+- checkpatch.pl fixes - most of the remaining ones are lines too long. Many
+ of them will require refactoring
+- merge Realtek's bugfixes and new features into the driver
+- switch to use MAC80211
+
+Please send any patches to Greg Kroah-Hartman <gregkh@linux.com>,
+Jes Sorensen <Jes.Sorensen@redhat.com>, and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtl8723au/core/rtw_ap.c b/drivers/staging/rtl8723au/core/rtw_ap.c
new file mode 100644
index 000000000000..a357e98cb83e
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_ap.c
@@ -0,0 +1,2087 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_AP_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <linux/ieee80211.h>
+#include <wifi.h>
+
+#ifdef CONFIG_8723AU_AP_MODE
+
+extern unsigned char RTW_WPA_OUI23A[];
+extern unsigned char WMM_OUI23A[];
+extern unsigned char WPS_OUI23A[];
+extern unsigned char P2P_OUI23A[];
+extern unsigned char WFD_OUI23A[];
+
+void init_mlme_ap_info23a(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+
+ spin_lock_init(&pmlmepriv->bcn_update_lock);
+
+ /* for ACL */
+ _rtw_init_queue23a(&pacl_list->acl_node_q);
+
+ start_ap_mode23a(padapter);
+}
+
+void free_mlme_ap_info23a(struct rtw_adapter *padapter)
+{
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ pmlmepriv->update_bcn = false;
+ pmlmeext->bstart_bss = false;
+
+ rtw_sta_flush23a(padapter);
+
+ pmlmeinfo->state = _HW_STATE_NOLINK_;
+
+ /* free_assoc_sta_resources */
+ rtw_free_all_stainfo23a(padapter);
+
+ /* free bc/mc sta_info */
+ psta = rtw_get_bcmc_stainfo23a(padapter);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ rtw_free_stainfo23a(padapter, psta);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+}
+
+static void update_BCNTIM(struct rtw_adapter *padapter)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork_mlmeext = &pmlmeinfo->network;
+ unsigned char *pie = pnetwork_mlmeext->IEs;
+ u8 *p, *dst_ie, *premainder_ie = NULL, *pbackup_remainder_ie = NULL;
+ u16 tim_bitmap_le;
+ uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen;
+
+ tim_bitmap_le = cpu_to_le16(pstapriv->tim_bitmap);
+
+ p = rtw_get_ie23a(pie + _FIXED_IE_LENGTH_, _TIM_IE_, &tim_ielen, pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_);
+ if (p != NULL && tim_ielen>0) {
+ tim_ielen += 2;
+
+ premainder_ie = p+tim_ielen;
+
+ tim_ie_offset = (int)(p -pie);
+
+ remainder_ielen = pnetwork_mlmeext->IELength - tim_ie_offset - tim_ielen;
+
+ /* append TIM IE from dst_ie offset */
+ dst_ie = p;
+ } else {
+ tim_ielen = 0;
+
+ /* calulate head_len */
+ offset = _FIXED_IE_LENGTH_;
+
+ /* get ssid_ie len */
+ p = rtw_get_ie23a(pie + _BEACON_IE_OFFSET_, _SSID_IE_, &tmp_len, (pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_));
+ if (p != NULL)
+ offset += tmp_len+2;
+
+ /* get supported rates len */
+ p = rtw_get_ie23a(pie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &tmp_len, (pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_));
+ if (p != NULL)
+ offset += tmp_len+2;
+
+ /* DS Parameter Set IE, len = 3 */
+ offset += 3;
+
+ premainder_ie = pie + offset;
+
+ remainder_ielen = pnetwork_mlmeext->IELength - offset - tim_ielen;
+
+ /* append TIM IE from offset */
+ dst_ie = pie + offset;
+ }
+
+ if (remainder_ielen > 0) {
+ pbackup_remainder_ie = kmalloc(remainder_ielen, GFP_ATOMIC);
+ if (pbackup_remainder_ie && premainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
+ }
+
+ *dst_ie++= _TIM_IE_;
+
+ if ((pstapriv->tim_bitmap&0xff00) && (pstapriv->tim_bitmap&0x00fc))
+ tim_ielen = 5;
+ else
+ tim_ielen = 4;
+
+ *dst_ie++= tim_ielen;
+
+ *dst_ie++= 0;/* DTIM count */
+ *dst_ie++= 1;/* DTIM peroid */
+
+ if (pstapriv->tim_bitmap & BIT(0))/* for bc/mc frames */
+ *dst_ie++ = BIT(0);/* bitmap ctrl */
+ else
+ *dst_ie++ = 0;
+
+ if (tim_ielen == 4) {
+ *dst_ie++ = *(u8*)&tim_bitmap_le;
+ } else if (tim_ielen == 5) {
+ memcpy(dst_ie, &tim_bitmap_le, 2);
+ dst_ie+= 2;
+ }
+
+ /* copy remainder IE */
+ if (pbackup_remainder_ie) {
+ memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
+
+ kfree(pbackup_remainder_ie);
+ }
+
+ offset = (uint)(dst_ie - pie);
+ pnetwork_mlmeext->IELength = offset + remainder_ielen;
+
+ set_tx_beacon_cmd23a(padapter);
+}
+
+static u8 chk_sta_is_alive(struct sta_info *psta)
+{
+ u8 ret = false;
+
+ if ((psta->sta_stats.last_rx_data_pkts +
+ psta->sta_stats.last_rx_ctrl_pkts) !=
+ (psta->sta_stats.rx_data_pkts + psta->sta_stats.rx_ctrl_pkts))
+ ret = true;
+
+ sta_update_last_rx_pkts(psta);
+
+ return ret;
+}
+
+void expire_timeout_chk23a(struct rtw_adapter *padapter)
+{
+ struct list_head *phead, *plist, *ptmp;
+ u8 updated = 0;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 chk_alive_num = 0;
+ char chk_alive_list[NUM_STA];
+ int i;
+
+ spin_lock_bh(&pstapriv->auth_list_lock);
+
+ phead = &pstapriv->auth_list;
+
+ /* check auth_queue */
+ list_for_each_safe(plist, ptmp, phead) {
+ psta = container_of(plist, struct sta_info, auth_list);
+
+ if (psta->expire_to>0) {
+ psta->expire_to--;
+ if (psta->expire_to == 0) {
+ list_del_init(&psta->auth_list);
+ pstapriv->auth_list_cnt--;
+
+ DBG_8723A("auth expire %pM\n", psta->hwaddr);
+
+ spin_unlock_bh(&pstapriv->auth_list_lock);
+
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ rtw_free_stainfo23a(padapter, psta);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+
+ spin_lock_bh(&pstapriv->auth_list_lock);
+ }
+ }
+
+ }
+
+ spin_unlock_bh(&pstapriv->auth_list_lock);
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+
+ phead = &pstapriv->asoc_list;
+
+ /* check asoc_queue */
+ list_for_each_safe(plist, ptmp, phead) {
+ psta = container_of(plist, struct sta_info, asoc_list);
+
+ if (chk_sta_is_alive(psta) || !psta->expire_to) {
+ psta->expire_to = pstapriv->expire_to;
+ psta->keep_alive_trycnt = 0;
+ } else {
+ psta->expire_to--;
+ }
+
+ if (psta->expire_to <= 0)
+ {
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (padapter->registrypriv.wifi_spec == 1)
+ {
+ psta->expire_to = pstapriv->expire_to;
+ continue;
+ }
+
+ if (psta->state & WIFI_SLEEP_STATE) {
+ if (!(psta->state & WIFI_STA_ALIVE_CHK_STATE)) {
+ /* to check if alive by another methods if staion is at ps mode. */
+ psta->expire_to = pstapriv->expire_to;
+ psta->state |= WIFI_STA_ALIVE_CHK_STATE;
+
+ /* to update bcn with tim_bitmap for this station */
+ pstapriv->tim_bitmap |= CHKBIT(psta->aid);
+ update_beacon23a(padapter, _TIM_IE_, NULL, false);
+
+ if (!pmlmeext->active_keep_alive_check)
+ continue;
+ }
+ }
+
+ if (pmlmeext->active_keep_alive_check) {
+ int stainfo_offset;
+
+ stainfo_offset = rtw_stainfo_offset23a(pstapriv, psta);
+ if (stainfo_offset_valid(stainfo_offset)) {
+ chk_alive_list[chk_alive_num++] = stainfo_offset;
+ }
+
+ continue;
+ }
+
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+
+ DBG_8723A("asoc expire "MAC_FMT", state = 0x%x\n", MAC_ARG(psta->hwaddr), psta->state);
+ updated = ap_free_sta23a(padapter, psta, false, WLAN_REASON_DEAUTH_LEAVING);
+ } else {
+ /* TODO: Aging mechanism to digest frames in sleep_q to avoid running out of xmitframe */
+ if (psta->sleepq_len > (NR_XMITFRAME/pstapriv->asoc_list_cnt)
+ && padapter->xmitpriv.free_xmitframe_cnt < ((NR_XMITFRAME/pstapriv->asoc_list_cnt)/2)
+ ) {
+ DBG_8723A("%s sta:"MAC_FMT", sleepq_len:%u, free_xmitframe_cnt:%u, asoc_list_cnt:%u, clear sleep_q\n", __func__,
+ MAC_ARG(psta->hwaddr),
+ psta->sleepq_len,
+ padapter->xmitpriv.free_xmitframe_cnt,
+ pstapriv->asoc_list_cnt);
+ wakeup_sta_to_xmit23a(padapter, psta);
+ }
+ }
+ }
+
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ if (chk_alive_num) {
+
+ u8 backup_oper_channel = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ /* switch to correct channel of current network before issue keep-alive frames */
+ if (rtw_get_oper_ch23a(padapter) != pmlmeext->cur_channel) {
+ backup_oper_channel = rtw_get_oper_ch23a(padapter);
+ SelectChannel23a(padapter, pmlmeext->cur_channel);
+ }
+
+ /* issue null data to check sta alive*/
+ for (i = 0; i < chk_alive_num; i++) {
+
+ int ret = _FAIL;
+
+ psta = rtw_get_stainfo23a_by_offset23a(pstapriv, chk_alive_list[i]);
+ if (!(psta->state &_FW_LINKED))
+ continue;
+
+ if (psta->state & WIFI_SLEEP_STATE)
+ ret = issue_nulldata23a(padapter, psta->hwaddr, 0, 1, 50);
+ else
+ ret = issue_nulldata23a(padapter, psta->hwaddr, 0, 3, 50);
+
+ psta->keep_alive_trycnt++;
+ if (ret == _SUCCESS)
+ {
+ DBG_8723A("asoc check, sta(" MAC_FMT ") is alive\n", MAC_ARG(psta->hwaddr));
+ psta->expire_to = pstapriv->expire_to;
+ psta->keep_alive_trycnt = 0;
+ continue;
+ }
+ else if (psta->keep_alive_trycnt <= 3)
+ {
+ DBG_8723A("ack check for asoc expire, keep_alive_trycnt =%d\n", psta->keep_alive_trycnt);
+ psta->expire_to = 1;
+ continue;
+ }
+
+ psta->keep_alive_trycnt = 0;
+
+ DBG_8723A("asoc expire "MAC_FMT", state = 0x%x\n", MAC_ARG(psta->hwaddr), psta->state);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ if (!list_empty(&psta->asoc_list)) {
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta23a(padapter, psta, false, WLAN_REASON_DEAUTH_LEAVING);
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ }
+
+ if (backup_oper_channel>0) /* back to the original operation channel */
+ SelectChannel23a(padapter, backup_oper_channel);
+}
+
+ associated_clients_update23a(padapter, updated);
+}
+
+void add_RATid23a(struct rtw_adapter *padapter, struct sta_info *psta, u8 rssi_level)
+{
+ int i;
+ u8 rf_type;
+ u32 init_rate = 0;
+ unsigned char sta_band = 0, raid, shortGIrate = false;
+ unsigned char limit;
+ unsigned int tx_ra_bitmap = 0;
+ struct ht_priv *psta_ht = NULL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+
+ if (psta)
+ psta_ht = &psta->htpriv;
+ else
+ return;
+
+ if (!(psta->state & _FW_LINKED))
+ return;
+
+ /* b/g mode ra_bitmap */
+ for (i = 0; i<sizeof(psta->bssrateset); i++)
+ {
+ if (psta->bssrateset[i])
+ tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value23a(psta->bssrateset[i]&0x7f);
+ }
+ /* n mode ra_bitmap */
+ if (psta_ht->ht_option)
+ {
+ rtw23a_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ if (rf_type == RF_2T2R)
+ limit = 16;/* 2R */
+ else
+ limit = 8;/* 1R */
+
+ for (i = 0; i<limit; i++) {
+ if (psta_ht->ht_cap.mcs.rx_mask[i/8] & BIT(i%8))
+ tx_ra_bitmap |= CHKBIT(i+12);
+ }
+
+ /* max short GI rate */
+ shortGIrate = psta_ht->sgi;
+ }
+
+ if (pcur_network->Configuration.DSConfig > 14) {
+ /* 5G band */
+ if (tx_ra_bitmap & 0xffff000)
+ sta_band |= WIRELESS_11_5N | WIRELESS_11A;
+ else
+ sta_band |= WIRELESS_11A;
+ } else {
+ if (tx_ra_bitmap & 0xffff000)
+ sta_band |= WIRELESS_11_24N | WIRELESS_11G | WIRELESS_11B;
+ else if (tx_ra_bitmap & 0xff0)
+ sta_band |= WIRELESS_11G |WIRELESS_11B;
+ else
+ sta_band |= WIRELESS_11B;
+ }
+
+ psta->wireless_mode = sta_band;
+
+ raid = networktype_to_raid23a(sta_band);
+ init_rate = get_highest_rate_idx23a(tx_ra_bitmap&0x0fffffff)&0x3f;
+
+ if (psta->aid < NUM_STA)
+ {
+ u8 arg = 0;
+
+ arg = psta->mac_id&0x1f;
+
+ arg |= BIT(7);/* support entry 2~31 */
+
+ if (shortGIrate == true)
+ arg |= BIT(5);
+
+ tx_ra_bitmap |= ((raid<<28)&0xf0000000);
+
+ DBG_8723A("%s => mac_id:%d , raid:%d , bitmap = 0x%x, arg = "
+ "0x%x\n",
+ __func__, psta->mac_id, raid, tx_ra_bitmap, arg);
+
+ /* bitmap[0:27] = tx_rate_bitmap */
+ /* bitmap[28:31]= Rate Adaptive id */
+ /* arg[0:4] = macid */
+ /* arg[5] = Short GI */
+ rtw_hal_add_ra_tid23a(padapter, tx_ra_bitmap, arg, rssi_level);
+
+ if (shortGIrate == true)
+ init_rate |= BIT(6);
+
+ /* set ra_id, init_rate */
+ psta->raid = raid;
+ psta->init_rate = init_rate;
+
+ }
+ else
+ {
+ DBG_8723A("station aid %d exceed the max number\n", psta->aid);
+ }
+}
+
+static void update_bmc_sta(struct rtw_adapter *padapter)
+{
+ u32 init_rate = 0;
+ unsigned char network_type, raid;
+ int i, supportRateNum = 0;
+ unsigned int tx_ra_bitmap = 0;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ struct sta_info *psta = rtw_get_bcmc_stainfo23a(padapter);
+
+ if (psta)
+ {
+ psta->aid = 0;/* default set to 0 */
+ psta->mac_id = psta->aid + 1;
+
+ psta->qos_option = 0;
+ psta->htpriv.ht_option = false;
+
+ psta->ieee8021x_blocked = 0;
+
+ memset((void*)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+
+ /* prepare for add_RATid23a */
+ supportRateNum = rtw_get_rateset_len23a((u8*)&pcur_network->SupportedRates);
+ network_type = rtw_check_network_type23a((u8*)&pcur_network->SupportedRates, supportRateNum, 1);
+
+ memcpy(psta->bssrateset, &pcur_network->SupportedRates, supportRateNum);
+ psta->bssratelen = supportRateNum;
+
+ /* b/g mode ra_bitmap */
+ for (i = 0; i<supportRateNum; i++)
+ {
+ if (psta->bssrateset[i])
+ tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value23a(psta->bssrateset[i]&0x7f);
+ }
+
+ if (pcur_network->Configuration.DSConfig > 14) {
+ /* force to A mode. 5G doesn't support CCK rates */
+ network_type = WIRELESS_11A;
+ tx_ra_bitmap = 0x150; /* 6, 12, 24 Mbps */
+ } else {
+ /* force to b mode */
+ network_type = WIRELESS_11B;
+ tx_ra_bitmap = 0xf;
+ }
+
+ raid = networktype_to_raid23a(network_type);
+ init_rate = get_highest_rate_idx23a(tx_ra_bitmap&0x0fffffff)&0x3f;
+
+ /* ap mode */
+ rtw_hal_set_odm_var23a(padapter, HAL_ODM_STA_INFO, psta, true);
+
+ {
+ u8 arg = 0;
+
+ arg = psta->mac_id&0x1f;
+
+ arg |= BIT(7);
+
+ tx_ra_bitmap |= ((raid<<28)&0xf0000000);
+
+ DBG_8723A("update_bmc_sta, mask = 0x%x, arg = 0x%x\n", tx_ra_bitmap, arg);
+
+ /* bitmap[0:27] = tx_rate_bitmap */
+ /* bitmap[28:31]= Rate Adaptive id */
+ /* arg[0:4] = macid */
+ /* arg[5] = Short GI */
+ rtw_hal_add_ra_tid23a(padapter, tx_ra_bitmap, arg, 0);
+
+ }
+
+ /* set ra_id, init_rate */
+ psta->raid = raid;
+ psta->init_rate = init_rate;
+
+ rtw_stassoc_hw_rpt23a(padapter, psta);
+
+ spin_lock_bh(&psta->lock);
+ psta->state = _FW_LINKED;
+ spin_unlock_bh(&psta->lock);
+
+ }
+ else
+ {
+ DBG_8723A("add_RATid23a_bmc_sta error!\n");
+ }
+}
+
+/* notes: */
+/* AID: 1~MAX for sta and 0 for bc/mc in ap/adhoc mode */
+/* MAC_ID = AID+1 for sta in ap/adhoc mode */
+/* MAC_ID = 1 for bc/mc for sta/ap/adhoc */
+/* MAC_ID = 0 for bssid for sta/ap/adhoc */
+/* CAM_ID = 0~3 for default key, cmd_id = macid + 3, macid = aid+1; */
+
+void update_sta_info23a_apmode23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
+ struct ht_priv *phtpriv_sta = &psta->htpriv;
+ /* set intf_tag to if1 */
+
+ psta->mac_id = psta->aid+1;
+ DBG_8723A("%s\n", __func__);
+
+ /* ap mode */
+ rtw_hal_set_odm_var23a(padapter, HAL_ODM_STA_INFO, psta, true);
+
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
+ psta->ieee8021x_blocked = true;
+ else
+ psta->ieee8021x_blocked = false;
+
+ /* update sta's cap */
+
+ /* ERP */
+ VCS_update23a(padapter, psta);
+ /* HT related cap */
+ if (phtpriv_sta->ht_option)
+ {
+ /* check if sta supports rx ampdu */
+ phtpriv_sta->ampdu_enable = phtpriv_ap->ampdu_enable;
+
+ /* check if sta support s Short GI */
+ if ((phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info) & cpu_to_le16(IEEE80211_HT_CAP_SGI_20|IEEE80211_HT_CAP_SGI_40))
+ phtpriv_sta->sgi = true;
+
+ /* bwmode */
+ if ((phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info) & cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40)) {
+ /* phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_40; */
+ phtpriv_sta->bwmode = pmlmeext->cur_bwmode;
+ phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
+
+ }
+
+ psta->qos_option = true;
+
+ }
+ else
+ {
+ phtpriv_sta->ampdu_enable = false;
+
+ phtpriv_sta->sgi = false;
+ phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_20;
+ phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+
+ /* Rx AMPDU */
+ send_delba23a(padapter, 0, psta->hwaddr);/* recipient */
+
+ /* TX AMPDU */
+ send_delba23a(padapter, 1, psta->hwaddr);/* originator */
+ phtpriv_sta->agg_enable_bitmap = 0x0;/* reset */
+ phtpriv_sta->candidate_tid_bitmap = 0x0;/* reset */
+
+ /* todo: init other variables */
+
+ memset((void*)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+
+ spin_lock_bh(&psta->lock);
+ psta->state |= _FW_LINKED;
+ spin_unlock_bh(&psta->lock);
+}
+
+static void update_hw_ht_param(struct rtw_adapter *padapter)
+{
+ unsigned char max_AMPDU_len;
+ unsigned char min_MPDU_spacing;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ DBG_8723A("%s\n", __func__);
+
+ /* handle A-MPDU parameter field */
+ /*
+ AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ AMPDU_para [4:2]:Min MPDU Start Spacing
+ */
+ max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
+
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+
+ /* Config SM Power Save setting */
+ pmlmeinfo->SM_PS = (pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info & 0x0C) >> 2;
+ if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
+ DBG_8723A("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
+}
+
+static void start_bss_network(struct rtw_adapter *padapter, u8 *pbuf)
+{
+ u8 *p;
+ u8 val8, cur_channel, cur_bwmode, cur_ch_offset;
+ u16 bcn_interval;
+ u32 acparm;
+ int ie_len;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv* psecuritypriv = &padapter->securitypriv;
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork_mlmeext = &pmlmeinfo->network;
+ struct HT_info_element *pht_info = NULL;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_8723AU_P2P */
+
+ bcn_interval = (u16)pnetwork->Configuration.BeaconPeriod;
+ cur_channel = pnetwork->Configuration.DSConfig;
+ cur_bwmode = HT_CHANNEL_WIDTH_20;;
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ /* check if there is wps ie, */
+ /* if there is wpsie in beacon, the hostapd will update beacon twice when stating hostapd, */
+ /* and at first time the security ie (RSN/WPA IE) will not include in beacon. */
+ if (NULL == rtw_get_wps_ie23a(pnetwork->IEs+_FIXED_IE_LENGTH_, pnetwork->IELength-_FIXED_IE_LENGTH_, NULL, NULL))
+ pmlmeext->bstart_bss = true;
+
+ /* todo: update wmm, ht cap */
+ /* pmlmeinfo->WMM_enable; */
+ /* pmlmeinfo->HT_enable; */
+ if (pmlmepriv->qospriv.qos_option)
+ pmlmeinfo->WMM_enable = true;
+ if (pmlmepriv->htpriv.ht_option) {
+ pmlmeinfo->WMM_enable = true;
+ pmlmeinfo->HT_enable = true;
+
+ update_hw_ht_param(padapter);
+ }
+
+ if (pmlmepriv->cur_network.join_res != true) {
+ /* setting only at first time */
+ /* WEP Key will be set before this function, do not clear CAM. */
+ if ((psecuritypriv->dot11PrivacyAlgrthm != _WEP40_) && (psecuritypriv->dot11PrivacyAlgrthm != _WEP104_))
+ flush_all_cam_entry23a(padapter); /* clear CAM */
+ }
+
+ /* set MSR to AP_Mode */
+ Set_MSR23a(padapter, _HW_STATE_AP_);
+
+ /* Set BSSID REG */
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BSSID, pnetwork->MacAddress);
+
+ /* Set EDCA param reg */
+ acparm = 0x002F3217; /* VO */
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acparm));
+ acparm = 0x005E4317; /* VI */
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acparm));
+ acparm = 0x005ea42b;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acparm));
+ acparm = 0x0000A444; /* BK */
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acparm));
+
+ /* Set Security */
+ val8 = (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)? 0xcc: 0xcf;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* Beacon Control related register */
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&bcn_interval));
+
+ UpdateBrateTbl23a(padapter, pnetwork->SupportedRates);
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BASIC_RATE, pnetwork->SupportedRates);
+
+ if (!pmlmepriv->cur_network.join_res) {
+ /* setting only at first time */
+
+ /* disable dynamic functions, such as high power, DIG */
+
+ /* turn on all dynamic functions */
+ Switch_DM_Func23a(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
+ }
+ /* set channel, bwmode */
+ p = rtw_get_ie23a((pnetwork->IEs + sizeof(struct ndis_802_11_fixed_ies)),
+ _HT_ADD_INFO_IE_, &ie_len, (pnetwork->IELength -
+ sizeof(struct ndis_802_11_fixed_ies)));
+ if (p && ie_len) {
+ pht_info = (struct HT_info_element *)(p+2);
+
+ if ((pregpriv->cbw40_enable) && (pht_info->infos[0] & BIT(2))) {
+ /* switch to the 40M Hz mode */
+ cur_bwmode = HT_CHANNEL_WIDTH_40;
+ switch (pht_info->infos[0] & 0x3) {
+ case 1:
+ /* pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER; */
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+ case 3:
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+ default:
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+ }
+ }
+ /* TODO: need to judge the phy parameters on concurrent mode for single phy */
+ set_channel_bwmode23a(padapter, cur_channel, cur_ch_offset, cur_bwmode);
+
+ DBG_8723A("CH =%d, BW =%d, offset =%d\n", cur_channel, cur_bwmode,
+ cur_ch_offset);
+
+ pmlmeext->cur_channel = cur_channel;
+ pmlmeext->cur_bwmode = cur_bwmode;
+ pmlmeext->cur_ch_offset = cur_ch_offset;
+ pmlmeext->cur_wireless_mode = pmlmepriv->cur_network.network_type;
+
+ /* update cur_wireless_mode */
+ update_wireless_mode23a(padapter);
+
+ /* udpate capability after cur_wireless_mode updated */
+ update_capinfo23a(padapter, rtw_get_capability23a((struct wlan_bssid_ex *)pnetwork));
+
+ /* let pnetwork_mlmeext == pnetwork_mlme. */
+ memcpy(pnetwork_mlmeext, pnetwork, pnetwork->Length);
+
+#ifdef CONFIG_8723AU_P2P
+ memcpy(pwdinfo->p2p_group_ssid, pnetwork->Ssid.ssid,
+ pnetwork->Ssid.ssid_len);
+ pwdinfo->p2p_group_ssid_len = pnetwork->Ssid.ssid_len;
+#endif /* CONFIG_8723AU_P2P */
+
+ if (pmlmeext->bstart_bss) {
+ update_beacon23a(padapter, _TIM_IE_, NULL, false);
+
+ /* issue beacon frame */
+ if (send_beacon23a(padapter) == _FAIL)
+ DBG_8723A("issue_beacon23a, fail!\n");
+ }
+
+ /* update bc/mc sta_info */
+ update_bmc_sta(padapter);
+}
+
+int rtw_check_beacon_data23a(struct rtw_adapter *padapter, u8 *pbuf, int len)
+{
+ int ret = _SUCCESS;
+ u8 *p;
+ u8 *pHT_caps_ie = NULL;
+ u8 *pHT_info_ie = NULL;
+ struct sta_info *psta = NULL;
+ u16 cap, ht_cap = false;
+ uint ie_len = 0;
+ int group_cipher, pairwise_cipher;
+ u8 channel, network_type, supportRate[NDIS_802_11_LENGTH_RATES_EX];
+ int supportRateNum = 0;
+ u8 OUI1[] = {0x00, 0x50, 0xf2, 0x01};
+ u8 WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *pbss_network = &pmlmepriv->cur_network.network;
+ u8 *ie = pbss_network->IEs;
+
+ /* SSID */
+ /* Supported rates */
+ /* DS Params */
+ /* WLAN_EID_COUNTRY */
+ /* ERP Information element */
+ /* Extended supported rates */
+ /* WPA/WPA2 */
+ /* Wi-Fi Wireless Multimedia Extensions */
+ /* ht_capab, ht_oper */
+ /* WPS IE */
+
+ DBG_8723A("%s, len =%d\n", __func__, len);
+
+ if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ return _FAIL;
+
+ if (len>MAX_IE_SZ)
+ return _FAIL;
+
+ pbss_network->IELength = len;
+
+ memset(ie, 0, MAX_IE_SZ);
+
+ memcpy(ie, pbuf, pbss_network->IELength);
+
+ if (pbss_network->InfrastructureMode!= Ndis802_11APMode)
+ return _FAIL;
+
+ pbss_network->Rssi = 0;
+
+ memcpy(pbss_network->MacAddress, myid(&padapter->eeprompriv), ETH_ALEN);
+
+ /* beacon interval */
+ /* ie + 8; 8: TimeStamp, 2: Beacon Interval 2:Capability */
+ p = rtw_get_beacon_interval23a_from_ie(ie);
+ pbss_network->Configuration.BeaconPeriod = get_unaligned_le16(p);
+
+ /* capability */
+ cap = get_unaligned_le16(ie);
+
+ /* SSID */
+ p = rtw_get_ie23a(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len,
+ (pbss_network->IELength -_BEACON_IE_OFFSET_));
+ if (p && ie_len > 0) {
+ memset(&pbss_network->Ssid, 0, sizeof(struct cfg80211_ssid));
+ memcpy(pbss_network->Ssid.ssid, (p + 2), ie_len);
+ pbss_network->Ssid.ssid_len = ie_len;
+ }
+
+ /* chnnel */
+ channel = 0;
+ pbss_network->Configuration.Length = 0;
+ p = rtw_get_ie23a(ie + _BEACON_IE_OFFSET_, _DSSET_IE_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0)
+ channel = *(p + 2);
+
+ pbss_network->Configuration.DSConfig = channel;
+
+ memset(supportRate, 0, NDIS_802_11_LENGTH_RATES_EX);
+ /* get supported rates */
+ p = rtw_get_ie23a(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p) {
+ memcpy(supportRate, p+2, ie_len);
+ supportRateNum = ie_len;
+ }
+
+ /* get ext_supported rates */
+ p = rtw_get_ie23a(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_,
+ &ie_len, pbss_network->IELength - _BEACON_IE_OFFSET_);
+ if (p) {
+ memcpy(supportRate+supportRateNum, p+2, ie_len);
+ supportRateNum += ie_len;
+ }
+
+ network_type = rtw_check_network_type23a(supportRate,
+ supportRateNum, channel);
+
+ rtw_set_supported_rate23a(pbss_network->SupportedRates, network_type);
+
+ /* parsing ERP_IE */
+ p = rtw_get_ie23a(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0)
+ ERP_IE_handler23a(padapter, (struct ndis_802_11_var_ies *)p);
+
+ /* update privacy/security */
+ if (cap & BIT(4))
+ pbss_network->Privacy = 1;
+ else
+ pbss_network->Privacy = 0;
+
+ psecuritypriv->wpa_psk = 0;
+
+ /* wpa2 */
+ group_cipher = 0; pairwise_cipher = 0;
+ psecuritypriv->wpa2_group_cipher = _NO_PRIVACY_;
+ psecuritypriv->wpa2_pairwise_cipher = _NO_PRIVACY_;
+ p = rtw_get_ie23a(ie + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0) {
+ if (rtw_parse_wpa2_ie23a(p, ie_len+2, &group_cipher,
+ &pairwise_cipher, NULL) == _SUCCESS) {
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ psecuritypriv->dot8021xalg = 1;/* psk, todo:802.1x */
+ psecuritypriv->wpa_psk |= BIT(1);
+
+ psecuritypriv->wpa2_group_cipher = group_cipher;
+ psecuritypriv->wpa2_pairwise_cipher = pairwise_cipher;
+ }
+ }
+
+ /* wpa */
+ ie_len = 0;
+ group_cipher = 0;
+ pairwise_cipher = 0;
+ psecuritypriv->wpa_group_cipher = _NO_PRIVACY_;
+ psecuritypriv->wpa_pairwise_cipher = _NO_PRIVACY_;
+ for (p = ie + _BEACON_IE_OFFSET_; ;p += (ie_len + 2)) {
+ p = rtw_get_ie23a(p, _SSN_IE_1_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_ -
+ (ie_len + 2)));
+ if ((p) && (!memcmp(p+2, OUI1, 4))) {
+ if (rtw_parse_wpa_ie23a(p, ie_len+2, &group_cipher,
+ &pairwise_cipher, NULL) == _SUCCESS) {
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ /* psk, todo:802.1x */
+ psecuritypriv->dot8021xalg = 1;
+
+ psecuritypriv->wpa_psk |= BIT(0);
+
+ psecuritypriv->wpa_group_cipher = group_cipher;
+ psecuritypriv->wpa_pairwise_cipher = pairwise_cipher;
+ }
+ break;
+ }
+
+ if ((p == NULL) || (ie_len == 0))
+ break;
+ }
+
+ /* wmm */
+ ie_len = 0;
+ pmlmepriv->qospriv.qos_option = 0;
+ if (pregistrypriv->wmm_enable) {
+ for (p = ie + _BEACON_IE_OFFSET_; ;p += (ie_len + 2)) {
+ p = rtw_get_ie23a(p, _VENDOR_SPECIFIC_IE_, &ie_len,
+ (pbss_network->IELength -
+ _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ if ((p) && !memcmp(p+2, WMM_PARA_IE, 6)) {
+ pmlmepriv->qospriv.qos_option = 1;
+
+ *(p+8) |= BIT(7);/* QoS Info, support U-APSD */
+
+ /* disable all ACM bits since the WMM admission
+ * control is not supported
+ */
+ *(p + 10) &= ~BIT(4); /* BE */
+ *(p + 14) &= ~BIT(4); /* BK */
+ *(p + 18) &= ~BIT(4); /* VI */
+ *(p + 22) &= ~BIT(4); /* VO */
+ break;
+ }
+ if ((p == NULL) || (ie_len == 0))
+ break;
+ }
+ }
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie23a(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0) {
+ u8 rf_type;
+
+ struct ieee80211_ht_cap *pht_cap = (struct ieee80211_ht_cap *)(p+2);
+
+ pHT_caps_ie = p;
+
+ ht_cap = true;
+ network_type |= WIRELESS_11_24N;
+
+ rtw23a_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
+ (psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP))
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_DENSITY & (0x07<<2));
+ else
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_DENSITY&0x00);
+
+ /* set Max Rx AMPDU size to 64K */
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_FACTOR & 0x03);
+
+ if (rf_type == RF_1T1R) {
+ pht_cap->mcs.rx_mask[0] = 0xff;
+ pht_cap->mcs.rx_mask[1] = 0x0;
+ }
+
+ memcpy(&pmlmepriv->htpriv.ht_cap, p+2, ie_len);
+ }
+
+ /* parsing HT_INFO_IE */
+ p = rtw_get_ie23a(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0)
+ pHT_info_ie = p;
+
+ switch (network_type) {
+ case WIRELESS_11B:
+ pbss_network->NetworkTypeInUse = Ndis802_11DS;
+ break;
+ case WIRELESS_11G:
+ case WIRELESS_11BG:
+ case WIRELESS_11G_24N:
+ case WIRELESS_11BG_24N:
+ pbss_network->NetworkTypeInUse = Ndis802_11OFDM24;
+ break;
+ case WIRELESS_11A:
+ pbss_network->NetworkTypeInUse = Ndis802_11OFDM5;
+ break;
+ default :
+ pbss_network->NetworkTypeInUse = Ndis802_11OFDM24;
+ break;
+ }
+
+ pmlmepriv->cur_network.network_type = network_type;
+
+ pmlmepriv->htpriv.ht_option = false;
+
+ /* ht_cap */
+ if (pregistrypriv->ht_enable && ht_cap) {
+ pmlmepriv->htpriv.ht_option = true;
+ pmlmepriv->qospriv.qos_option = 1;
+
+ if (pregistrypriv->ampdu_enable == 1)
+ pmlmepriv->htpriv.ampdu_enable = true;
+
+ HT_caps_handler23a(padapter, (struct ndis_802_11_var_ies *)pHT_caps_ie);
+
+ HT_info_handler23a(padapter, (struct ndis_802_11_var_ies *)pHT_info_ie);
+ }
+
+ pbss_network->Length = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pbss_network);
+
+ /* issue beacon to start bss network */
+ start_bss_network(padapter, (u8*)pbss_network);
+
+ /* alloc sta_info for ap itself */
+ psta = rtw_get_stainfo23a(&padapter->stapriv, pbss_network->MacAddress);
+ if (!psta) {
+ psta = rtw_alloc_stainfo23a(&padapter->stapriv, pbss_network->MacAddress);
+ if (!psta)
+ return _FAIL;
+ }
+ /* fix bug of flush_cam_entry at STOP AP mode */
+ psta->state |= WIFI_AP_STATE;
+ rtw_indicate_connect23a(padapter);
+
+ /* for check if already set beacon */
+ pmlmepriv->cur_network.join_res = true;
+
+ return ret;
+}
+
+void rtw_set_macaddr_acl23a(struct rtw_adapter *padapter, int mode)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+
+ DBG_8723A("%s, mode =%d\n", __func__, mode);
+
+ pacl_list->mode = mode;
+}
+
+int rtw_acl_add_sta23a(struct rtw_adapter *padapter, u8 *addr)
+{
+ struct list_head *plist, *phead;
+ u8 added = false;
+ int i, ret = 0;
+ struct rtw_wlan_acl_node *paclnode;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ DBG_8723A("%s(acl_num =%d) =" MAC_FMT "\n", __func__, pacl_list->num, MAC_ARG(addr));
+
+ if ((NUM_ACL-1) < pacl_list->num)
+ return -1;
+
+ spin_lock_bh(&pacl_node_q->lock);
+
+ phead = get_list_head(pacl_node_q);
+
+ list_for_each(plist, phead) {
+ paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
+
+ if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
+ if (paclnode->valid == true) {
+ added = true;
+ DBG_8723A("%s, sta has been added\n", __func__);
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&pacl_node_q->lock);
+
+ if (added)
+ return ret;
+
+ spin_lock_bh(&pacl_node_q->lock);
+
+ for (i = 0; i < NUM_ACL; i++) {
+ paclnode = &pacl_list->aclnode[i];
+
+ if (!paclnode->valid) {
+ INIT_LIST_HEAD(&paclnode->list);
+
+ memcpy(paclnode->addr, addr, ETH_ALEN);
+
+ paclnode->valid = true;
+
+ list_add_tail(&paclnode->list, get_list_head(pacl_node_q));
+
+ pacl_list->num++;
+
+ break;
+ }
+ }
+
+ DBG_8723A("%s, acl_num =%d\n", __func__, pacl_list->num);
+
+ spin_unlock_bh(&pacl_node_q->lock);
+ return ret;
+}
+
+int rtw_acl_remove_sta23a(struct rtw_adapter *padapter, u8 *addr)
+{
+ struct list_head *plist, *phead, *ptmp;
+ struct rtw_wlan_acl_node *paclnode;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q;
+ int ret = 0;
+
+ DBG_8723A("%s(acl_num =%d) = %pM\n", __func__, pacl_list->num, addr);
+
+ spin_lock_bh(&pacl_node_q->lock);
+
+ phead = get_list_head(pacl_node_q);
+
+ list_for_each_safe(plist, ptmp, phead) {
+ paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
+
+ if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
+ if (paclnode->valid) {
+ paclnode->valid = false;
+
+ list_del_init(&paclnode->list);
+
+ pacl_list->num--;
+ }
+ }
+ }
+
+ spin_unlock_bh(&pacl_node_q->lock);
+
+ DBG_8723A("%s, acl_num =%d\n", __func__, pacl_list->num);
+
+ return ret;
+}
+
+static void update_bcn_fixed_ie(struct rtw_adapter *padapter)
+{
+ DBG_8723A("%s\n", __func__);
+}
+
+static void update_bcn_erpinfo_ie(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
+ unsigned char *p, *ie = pnetwork->IEs;
+ u32 len = 0;
+
+ DBG_8723A("%s, ERP_enable =%d\n", __func__, pmlmeinfo->ERP_enable);
+
+ if (!pmlmeinfo->ERP_enable)
+ return;
+
+ /* parsing ERP_IE */
+ p = rtw_get_ie23a(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &len, (pnetwork->IELength - _BEACON_IE_OFFSET_));
+ if (p && len>0)
+ {
+ struct ndis_802_11_var_ies * pIE = (struct ndis_802_11_var_ies *)p;
+
+ if (pmlmepriv->num_sta_non_erp == 1)
+ pIE->data[0] |= WLAN_ERP_NON_ERP_PRESENT |
+ WLAN_ERP_USE_PROTECTION;
+ else
+ pIE->data[0] &= ~(WLAN_ERP_NON_ERP_PRESENT |
+ WLAN_ERP_USE_PROTECTION);
+
+ if (pmlmepriv->num_sta_no_short_preamble > 0)
+ pIE->data[0] |= WLAN_ERP_BARKER_PREAMBLE;
+ else
+ pIE->data[0] &= ~(WLAN_ERP_BARKER_PREAMBLE);
+
+ ERP_IE_handler23a(padapter, pIE);
+ }
+}
+
+static void update_bcn_htcap_ie(struct rtw_adapter *padapter)
+{
+ DBG_8723A("%s\n", __func__);
+}
+
+static void update_bcn_htinfo_ie(struct rtw_adapter *padapter)
+{
+ DBG_8723A("%s\n", __func__);
+}
+
+static void update_bcn_rsn_ie(struct rtw_adapter *padapter)
+{
+ DBG_8723A("%s\n", __func__);
+}
+
+static void update_bcn_wpa_ie(struct rtw_adapter *padapter)
+{
+ DBG_8723A("%s\n", __func__);
+}
+
+static void update_bcn_wmm_ie(struct rtw_adapter *padapter)
+{
+ DBG_8723A("%s\n", __func__);
+}
+
+static void update_bcn_wps_ie(struct rtw_adapter *padapter)
+{
+ u8 *pwps_ie = NULL, *pwps_ie_src, *premainder_ie, *pbackup_remainder_ie = NULL;
+ uint wps_ielen = 0, wps_offset, remainder_ielen;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
+ unsigned char *ie = pnetwork->IEs;
+ u32 ielen = pnetwork->IELength;
+
+ DBG_8723A("%s\n", __func__);
+
+ pwps_ie = rtw_get_wps_ie23a(ie+_FIXED_IE_LENGTH_, ielen-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
+
+ if (pwps_ie == NULL || wps_ielen == 0)
+ return;
+
+ wps_offset = (uint)(pwps_ie-ie);
+
+ premainder_ie = pwps_ie + wps_ielen;
+
+ remainder_ielen = ielen - wps_offset - wps_ielen;
+
+ if (remainder_ielen > 0) {
+ pbackup_remainder_ie = kmalloc(remainder_ielen, GFP_ATOMIC);
+ if (pbackup_remainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie,
+ remainder_ielen);
+ }
+
+ pwps_ie_src = pmlmepriv->wps_beacon_ie;
+ if (pwps_ie_src == NULL)
+ return;
+
+ wps_ielen = (uint)pwps_ie_src[1];/* to get ie data len */
+ if ((wps_offset+wps_ielen+2+remainder_ielen)<= MAX_IE_SZ)
+ {
+ memcpy(pwps_ie, pwps_ie_src, wps_ielen+2);
+ pwps_ie += (wps_ielen+2);
+
+ if (pbackup_remainder_ie)
+ memcpy(pwps_ie, pbackup_remainder_ie, remainder_ielen);
+
+ /* update IELength */
+ pnetwork->IELength = wps_offset + (wps_ielen+2) + remainder_ielen;
+ }
+
+ if (pbackup_remainder_ie)
+ kfree(pbackup_remainder_ie);
+}
+
+static void update_bcn_p2p_ie(struct rtw_adapter *padapter)
+{
+}
+
+static void update_bcn_vendor_spec_ie(struct rtw_adapter *padapter, u8*oui)
+{
+ DBG_8723A("%s\n", __func__);
+
+ if (!memcmp(RTW_WPA_OUI23A, oui, 4))
+ {
+ update_bcn_wpa_ie(padapter);
+ }
+ else if (!memcmp(WMM_OUI23A, oui, 4))
+ {
+ update_bcn_wmm_ie(padapter);
+ }
+ else if (!memcmp(WPS_OUI23A, oui, 4))
+ {
+ update_bcn_wps_ie(padapter);
+ }
+ else if (!memcmp(P2P_OUI23A, oui, 4))
+ {
+ update_bcn_p2p_ie(padapter);
+ }
+ else
+ {
+ DBG_8723A("unknown OUI type!\n");
+ }
+}
+
+void update_beacon23a(struct rtw_adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
+{
+ struct mlme_priv *pmlmepriv;
+ struct mlme_ext_priv *pmlmeext;
+ /* struct mlme_ext_info *pmlmeinfo; */
+
+ /* DBG_8723A("%s\n", __func__); */
+
+ if (!padapter)
+ return;
+
+ pmlmepriv = &padapter->mlmepriv;
+ pmlmeext = &padapter->mlmeextpriv;
+ /* pmlmeinfo = &pmlmeext->mlmext_info; */
+
+ if (false == pmlmeext->bstart_bss)
+ return;
+
+ spin_lock_bh(&pmlmepriv->bcn_update_lock);
+
+ switch (ie_id)
+ {
+ case 0xFF:
+
+ update_bcn_fixed_ie(padapter);/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
+
+ break;
+
+ case _TIM_IE_:
+
+ update_BCNTIM(padapter);
+
+ break;
+
+ case _ERPINFO_IE_:
+
+ update_bcn_erpinfo_ie(padapter);
+
+ break;
+
+ case _HT_CAPABILITY_IE_:
+
+ update_bcn_htcap_ie(padapter);
+
+ break;
+
+ case _RSN_IE_2_:
+
+ update_bcn_rsn_ie(padapter);
+
+ break;
+
+ case _HT_ADD_INFO_IE_:
+
+ update_bcn_htinfo_ie(padapter);
+
+ break;
+
+ case _VENDOR_SPECIFIC_IE_:
+
+ update_bcn_vendor_spec_ie(padapter, oui);
+
+ break;
+
+ default:
+ break;
+ }
+
+ pmlmepriv->update_bcn = true;
+
+ spin_unlock_bh(&pmlmepriv->bcn_update_lock);
+
+ if (tx)
+ set_tx_beacon_cmd23a(padapter);
+}
+
+/*
+op_mode
+Set to 0 (HT pure) under the followign conditions
+ - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
+ - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
+Set to 1 (HT non-member protection) if there may be non-HT STAs
+ in both the primary and the secondary channel
+Set to 2 if only HT STAs are associated in BSS,
+ however and at least one 20 MHz HT STA is associated
+Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
+ (currently non-GF HT station is considered as non-HT STA also)
+*/
+static int rtw_ht_operation_update(struct rtw_adapter *padapter)
+{
+ u16 cur_op_mode, new_op_mode;
+ int op_mode_changes = 0;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
+
+ if (pmlmepriv->htpriv.ht_option == true)
+ return 0;
+
+ /* if (!iface->conf->ieee80211n || iface->conf->ht_op_mode_fixed) */
+ /* return 0; */
+
+ DBG_8723A("%s current operation mode = 0x%X\n",
+ __func__, pmlmepriv->ht_op_mode);
+
+ if (!(pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT)
+ && pmlmepriv->num_sta_ht_no_gf) {
+ pmlmepriv->ht_op_mode |=
+ HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT;
+ op_mode_changes++;
+ } else if ((pmlmepriv->ht_op_mode &
+ HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT) &&
+ pmlmepriv->num_sta_ht_no_gf == 0) {
+ pmlmepriv->ht_op_mode &=
+ ~HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT;
+ op_mode_changes++;
+ }
+
+ if (!(pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT) &&
+ (pmlmepriv->num_sta_no_ht || pmlmepriv->olbc_ht)) {
+ pmlmepriv->ht_op_mode |= HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT;
+ op_mode_changes++;
+ } else if ((pmlmepriv->ht_op_mode &
+ HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT) &&
+ (pmlmepriv->num_sta_no_ht == 0 && !pmlmepriv->olbc_ht)) {
+ pmlmepriv->ht_op_mode &=
+ ~HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT;
+ op_mode_changes++;
+ }
+
+ /* Note: currently we switch to the MIXED op mode if HT non-greenfield
+ * station is associated. Probably it's a theoretical case, since
+ * it looks like all known HT STAs support greenfield.
+ */
+ new_op_mode = 0;
+ if (pmlmepriv->num_sta_no_ht ||
+ (pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT))
+ new_op_mode = OP_MODE_MIXED;
+ else if ((phtpriv_ap->ht_cap.cap_info & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ && pmlmepriv->num_sta_ht_20mhz)
+ new_op_mode = OP_MODE_20MHZ_HT_STA_ASSOCED;
+ else if (pmlmepriv->olbc_ht)
+ new_op_mode = OP_MODE_MAY_BE_LEGACY_STAS;
+ else
+ new_op_mode = OP_MODE_PURE;
+
+ cur_op_mode = pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_OP_MODE_MASK;
+ if (cur_op_mode != new_op_mode) {
+ pmlmepriv->ht_op_mode &= ~HT_INFO_OPERATION_MODE_OP_MODE_MASK;
+ pmlmepriv->ht_op_mode |= new_op_mode;
+ op_mode_changes++;
+ }
+
+ DBG_8723A("%s new operation mode = 0x%X changes =%d\n",
+ __func__, pmlmepriv->ht_op_mode, op_mode_changes);
+
+ return op_mode_changes;
+}
+
+void associated_clients_update23a(struct rtw_adapter *padapter, u8 updated)
+{
+ /* update associcated stations cap. */
+ if (updated == true)
+ {
+ struct list_head *phead, *plist, *ptmp;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+
+ phead = &pstapriv->asoc_list;
+
+ list_for_each_safe(plist, ptmp, phead) {
+ psta = container_of(plist, struct sta_info, asoc_list);
+
+ VCS_update23a(padapter, psta);
+ }
+
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+ }
+}
+
+/* called > TSR LEVEL for USB or SDIO Interface*/
+void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ u8 beacon_updated = false;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE))
+ {
+ if (!psta->no_short_preamble_set)
+ {
+ psta->no_short_preamble_set = 1;
+
+ pmlmepriv->num_sta_no_short_preamble++;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_preamble == 1))
+ {
+ beacon_updated = true;
+ update_beacon23a(padapter, 0xFF, NULL, true);
+ }
+
+ }
+ }
+ else
+ {
+ if (psta->no_short_preamble_set)
+ {
+ psta->no_short_preamble_set = 0;
+
+ pmlmepriv->num_sta_no_short_preamble--;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_preamble == 0))
+ {
+ beacon_updated = true;
+ update_beacon23a(padapter, 0xFF, NULL, true);
+ }
+
+ }
+ }
+
+ if (psta->flags & WLAN_STA_NONERP)
+ {
+ if (!psta->nonerp_set)
+ {
+ psta->nonerp_set = 1;
+
+ pmlmepriv->num_sta_non_erp++;
+
+ if (pmlmepriv->num_sta_non_erp == 1)
+ {
+ beacon_updated = true;
+ update_beacon23a(padapter, _ERPINFO_IE_, NULL, true);
+ }
+ }
+
+ }
+ else
+ {
+ if (psta->nonerp_set)
+ {
+ psta->nonerp_set = 0;
+
+ pmlmepriv->num_sta_non_erp--;
+
+ if (pmlmepriv->num_sta_non_erp == 0)
+ {
+ beacon_updated = true;
+ update_beacon23a(padapter, _ERPINFO_IE_, NULL, true);
+ }
+ }
+
+ }
+
+ if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME))
+ {
+ if (!psta->no_short_slot_time_set)
+ {
+ psta->no_short_slot_time_set = 1;
+
+ pmlmepriv->num_sta_no_short_slot_time++;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_slot_time == 1))
+ {
+ beacon_updated = true;
+ update_beacon23a(padapter, 0xFF, NULL, true);
+ }
+
+ }
+ }
+ else
+ {
+ if (psta->no_short_slot_time_set)
+ {
+ psta->no_short_slot_time_set = 0;
+
+ pmlmepriv->num_sta_no_short_slot_time--;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_slot_time == 0))
+ {
+ beacon_updated = true;
+ update_beacon23a(padapter, 0xFF, NULL, true);
+ }
+ }
+ }
+
+ if (psta->flags & WLAN_STA_HT)
+ {
+ u16 ht_capab = le16_to_cpu(psta->htpriv.ht_cap.cap_info);
+
+ DBG_8723A("HT: STA " MAC_FMT " HT Capabilities "
+ "Info: 0x%04x\n", MAC_ARG(psta->hwaddr), ht_capab);
+
+ if (psta->no_ht_set) {
+ psta->no_ht_set = 0;
+ pmlmepriv->num_sta_no_ht--;
+ }
+
+ if ((ht_capab & IEEE80211_HT_CAP_GRN_FLD) == 0) {
+ if (!psta->no_ht_gf_set) {
+ psta->no_ht_gf_set = 1;
+ pmlmepriv->num_sta_ht_no_gf++;
+ }
+ DBG_8723A("%s STA " MAC_FMT " - no "
+ "greenfield, num of non-gf stations %d\n",
+ __func__, MAC_ARG(psta->hwaddr),
+ pmlmepriv->num_sta_ht_no_gf);
+ }
+
+ if ((ht_capab & IEEE80211_HT_CAP_SUP_WIDTH_20_40) == 0) {
+ if (!psta->ht_20mhz_set) {
+ psta->ht_20mhz_set = 1;
+ pmlmepriv->num_sta_ht_20mhz++;
+ }
+ DBG_8723A("%s STA " MAC_FMT " - 20 MHz HT, "
+ "num of 20MHz HT STAs %d\n",
+ __func__, MAC_ARG(psta->hwaddr),
+ pmlmepriv->num_sta_ht_20mhz);
+ }
+
+ }
+ else
+ {
+ if (!psta->no_ht_set) {
+ psta->no_ht_set = 1;
+ pmlmepriv->num_sta_no_ht++;
+ }
+ if (pmlmepriv->htpriv.ht_option == true) {
+ DBG_8723A("%s STA " MAC_FMT
+ " - no HT, num of non-HT stations %d\n",
+ __func__, MAC_ARG(psta->hwaddr),
+ pmlmepriv->num_sta_no_ht);
+ }
+ }
+
+ if (rtw_ht_operation_update(padapter) > 0)
+ {
+ update_beacon23a(padapter, _HT_CAPABILITY_IE_, NULL, false);
+ update_beacon23a(padapter, _HT_ADD_INFO_IE_, NULL, true);
+ }
+
+ /* update associcated stations cap. */
+ associated_clients_update23a(padapter, beacon_updated);
+
+ DBG_8723A("%s, updated =%d\n", __func__, beacon_updated);
+}
+
+u8 bss_cap_update_on_sta_leave23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ u8 beacon_updated = false;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (!psta)
+ return beacon_updated;
+
+ if (psta->no_short_preamble_set) {
+ psta->no_short_preamble_set = 0;
+ pmlmepriv->num_sta_no_short_preamble--;
+ if (pmlmeext->cur_wireless_mode > WIRELESS_11B
+ && pmlmepriv->num_sta_no_short_preamble == 0)
+ {
+ beacon_updated = true;
+ update_beacon23a(padapter, 0xFF, NULL, true);
+ }
+ }
+
+ if (psta->nonerp_set) {
+ psta->nonerp_set = 0;
+ pmlmepriv->num_sta_non_erp--;
+ if (pmlmepriv->num_sta_non_erp == 0)
+ {
+ beacon_updated = true;
+ update_beacon23a(padapter, _ERPINFO_IE_, NULL, true);
+ }
+ }
+
+ if (psta->no_short_slot_time_set) {
+ psta->no_short_slot_time_set = 0;
+ pmlmepriv->num_sta_no_short_slot_time--;
+ if (pmlmeext->cur_wireless_mode > WIRELESS_11B
+ && pmlmepriv->num_sta_no_short_slot_time == 0)
+ {
+ beacon_updated = true;
+ update_beacon23a(padapter, 0xFF, NULL, true);
+ }
+ }
+
+ if (psta->no_ht_gf_set) {
+ psta->no_ht_gf_set = 0;
+ pmlmepriv->num_sta_ht_no_gf--;
+ }
+
+ if (psta->no_ht_set) {
+ psta->no_ht_set = 0;
+ pmlmepriv->num_sta_no_ht--;
+ }
+
+ if (psta->ht_20mhz_set) {
+ psta->ht_20mhz_set = 0;
+ pmlmepriv->num_sta_ht_20mhz--;
+ }
+
+ if (rtw_ht_operation_update(padapter) > 0)
+ {
+ update_beacon23a(padapter, _HT_CAPABILITY_IE_, NULL, false);
+ update_beacon23a(padapter, _HT_ADD_INFO_IE_, NULL, true);
+ }
+
+ /* update associcated stations cap. */
+
+ DBG_8723A("%s, updated =%d\n", __func__, beacon_updated);
+
+ return beacon_updated;
+}
+
+u8 ap_free_sta23a(struct rtw_adapter *padapter, struct sta_info *psta, bool active, u16 reason)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 beacon_updated = false;
+
+ if (!psta)
+ return beacon_updated;
+
+ if (active == true)
+ {
+ /* tear down Rx AMPDU */
+ send_delba23a(padapter, 0, psta->hwaddr);/* recipient */
+
+ /* tear down TX AMPDU */
+ send_delba23a(padapter, 1, psta->hwaddr);/* originator */
+
+ issue_deauth23a(padapter, psta->hwaddr, reason);
+ }
+
+ psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
+ psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
+
+ /* report_del_sta_event23a(padapter, psta->hwaddr, reason); */
+
+ /* clear cam entry / key */
+ /* clear_cam_entry23a(padapter, (psta->mac_id + 3)); */
+ rtw_clearstakey_cmd23a(padapter, (u8*)psta, (u8)(psta->mac_id + 3), true);
+
+ spin_lock_bh(&psta->lock);
+ psta->state &= ~_FW_LINKED;
+ spin_unlock_bh(&psta->lock);
+
+ rtw_cfg80211_indicate_sta_disassoc(padapter, psta->hwaddr, reason);
+
+ report_del_sta_event23a(padapter, psta->hwaddr, reason);
+
+ beacon_updated = bss_cap_update_on_sta_leave23a(padapter, psta);
+
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ rtw_free_stainfo23a(padapter, psta);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+
+ return beacon_updated;
+}
+
+int rtw_ap_inform_ch_switch23a (struct rtw_adapter *padapter, u8 new_ch, u8 ch_offset)
+{
+ struct list_head *phead, *plist;
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return ret;
+
+ DBG_8723A(FUNC_NDEV_FMT" with ch:%u, offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev), new_ch, ch_offset);
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ phead = &pstapriv->asoc_list;
+
+ list_for_each(plist, phead) {
+ psta = container_of(plist, struct sta_info, asoc_list);
+
+ issue_action_spct_ch_switch23a (padapter, psta->hwaddr, new_ch, ch_offset);
+ psta->expire_to = ((pstapriv->expire_to * 2) > 5) ? 5 : (pstapriv->expire_to * 2);
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ issue_action_spct_ch_switch23a (padapter, bc_addr, new_ch, ch_offset);
+
+ return ret;
+}
+
+int rtw_sta_flush23a(struct rtw_adapter *padapter)
+{
+ struct list_head *phead, *plist, *ptmp;
+ int ret = 0;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 chk_alive_num = 0;
+ char chk_alive_list[NUM_STA];
+ int i;
+
+ DBG_8723A(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(padapter->pnetdev));
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return ret;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ phead = &pstapriv->asoc_list;
+
+ list_for_each_safe(plist, ptmp, phead) {
+ int stainfo_offset;
+
+ psta = container_of(plist, struct sta_info, asoc_list);
+
+ /* Remove sta from asoc_list */
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+
+ /* Keep sta for ap_free_sta23a() beyond this asoc_list loop */
+ stainfo_offset = rtw_stainfo_offset23a(pstapriv, psta);
+ if (stainfo_offset_valid(stainfo_offset)) {
+ chk_alive_list[chk_alive_num++] = stainfo_offset;
+ }
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ /* For each sta in chk_alive_list, call ap_free_sta23a */
+ for (i = 0; i < chk_alive_num; i++) {
+ psta = rtw_get_stainfo23a_by_offset23a(pstapriv, chk_alive_list[i]);
+ ap_free_sta23a(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ }
+
+ issue_deauth23a(padapter, bc_addr, WLAN_REASON_DEAUTH_LEAVING);
+
+ associated_clients_update23a(padapter, true);
+
+ return ret;
+}
+
+/* called > TSR LEVEL for USB or SDIO Interface*/
+void sta_info_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ int flags = psta->flags;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ /* update wmm cap. */
+ if (WLAN_STA_WME&flags)
+ psta->qos_option = 1;
+ else
+ psta->qos_option = 0;
+
+ if (pmlmepriv->qospriv.qos_option == 0)
+ psta->qos_option = 0;
+
+ /* update 802.11n ht cap. */
+ if (WLAN_STA_HT&flags)
+ {
+ psta->htpriv.ht_option = true;
+ psta->qos_option = 1;
+ }
+ else
+ {
+ psta->htpriv.ht_option = false;
+ }
+
+ if (pmlmepriv->htpriv.ht_option == false)
+ psta->htpriv.ht_option = false;
+
+ update_sta_info23a_apmode23a(padapter, psta);
+}
+
+/* called >= TSR LEVEL for USB or SDIO Interface*/
+void ap_sta_info_defer_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ if (psta->state & _FW_LINKED)
+ {
+ /* add ratid */
+ add_RATid23a(padapter, psta, 0);/* DM_RATR_STA_INIT */
+ }
+}
+
+/* restore hw setting from sw data structures */
+void rtw_ap_restore_network(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *mlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct sta_priv * pstapriv = &padapter->stapriv;
+ struct sta_info *psta;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct list_head *phead, *plist, *ptmp;
+ u8 chk_alive_num = 0;
+ char chk_alive_list[NUM_STA];
+ int i;
+
+ rtw_setopmode_cmd23a(padapter, Ndis802_11APMode);
+
+ set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ start_bss_network(padapter, (u8*)&mlmepriv->cur_network.network);
+
+ if ((padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_) ||
+ (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_))
+ {
+ /* restore group key, WEP keys is restored in ips_leave23a() */
+ rtw_set_key23a(padapter, psecuritypriv, psecuritypriv->dot118021XGrpKeyid, 0);
+ }
+
+ /* per sta pairwise key and settings */
+ if ((padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_) &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm != _AES_)) {
+ return;
+ }
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+
+ phead = &pstapriv->asoc_list;
+
+ list_for_each_safe(plist, ptmp, phead) {
+ int stainfo_offset;
+
+ psta = container_of(plist, struct sta_info, asoc_list);
+
+ stainfo_offset = rtw_stainfo_offset23a(pstapriv, psta);
+ if (stainfo_offset_valid(stainfo_offset)) {
+ chk_alive_list[chk_alive_num++] = stainfo_offset;
+ }
+ }
+
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ for (i = 0; i < chk_alive_num; i++) {
+ psta = rtw_get_stainfo23a_by_offset23a(pstapriv, chk_alive_list[i]);
+
+ if (psta == NULL) {
+ DBG_8723A(FUNC_ADPT_FMT" sta_info is null\n", FUNC_ADPT_ARG(padapter));
+ }
+ else if (psta->state &_FW_LINKED)
+ {
+ Update_RA_Entry23a(padapter, psta);
+ /* pairwise key */
+ rtw_setstakey_cmd23a(padapter, (unsigned char *)psta, true);
+ }
+ }
+}
+
+void start_ap_mode23a(struct rtw_adapter *padapter)
+{
+ int i;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+
+ pmlmepriv->update_bcn = false;
+
+ /* init_mlme_ap_info23a(padapter); */
+ pmlmeext->bstart_bss = false;
+
+ pmlmepriv->num_sta_non_erp = 0;
+
+ pmlmepriv->num_sta_no_short_slot_time = 0;
+
+ pmlmepriv->num_sta_no_short_preamble = 0;
+
+ pmlmepriv->num_sta_ht_no_gf = 0;
+ pmlmepriv->num_sta_no_ht = 0;
+ pmlmepriv->num_sta_ht_20mhz = 0;
+
+ pmlmepriv->olbc = false;
+
+ pmlmepriv->olbc_ht = false;
+
+ pmlmepriv->ht_op_mode = 0;
+
+ for (i = 0; i<NUM_STA; i++)
+ pstapriv->sta_aid[i] = NULL;
+
+ pmlmepriv->wps_beacon_ie = NULL;
+ pmlmepriv->wps_probe_resp_ie = NULL;
+ pmlmepriv->wps_assoc_resp_ie = NULL;
+
+ pmlmepriv->p2p_beacon_ie = NULL;
+ pmlmepriv->p2p_probe_resp_ie = NULL;
+
+ /* for ACL */
+ INIT_LIST_HEAD(&pacl_list->acl_node_q.queue);
+ pacl_list->num = 0;
+ pacl_list->mode = 0;
+ for (i = 0; i < NUM_ACL; i++) {
+ INIT_LIST_HEAD(&pacl_list->aclnode[i].list);
+ pacl_list->aclnode[i].valid = false;
+ }
+}
+
+void stop_ap_mode23a(struct rtw_adapter *padapter)
+{
+ struct list_head *phead, *plist, *ptmp;
+ struct rtw_wlan_acl_node *paclnode;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ pmlmepriv->update_bcn = false;
+ pmlmeext->bstart_bss = false;
+
+ /* reset and init security priv , this can refine with rtw_reset_securitypriv23a */
+ memset((unsigned char *)&padapter->securitypriv, 0, sizeof (struct security_priv));
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
+
+ /* for ACL */
+ spin_lock_bh(&pacl_node_q->lock);
+ phead = get_list_head(pacl_node_q);
+
+ list_for_each_safe(plist, ptmp, phead) {
+ paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
+
+ if (paclnode->valid == true) {
+ paclnode->valid = false;
+
+ list_del_init(&paclnode->list);
+
+ pacl_list->num--;
+ }
+ }
+ spin_unlock_bh(&pacl_node_q->lock);
+
+ DBG_8723A("%s, free acl_node_queue, num =%d\n", __func__, pacl_list->num);
+
+ rtw_sta_flush23a(padapter);
+
+ /* free_assoc_sta_resources */
+ rtw_free_all_stainfo23a(padapter);
+
+ psta = rtw_get_bcmc_stainfo23a(padapter);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ rtw_free_stainfo23a(padapter, psta);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+
+ rtw_init_bcmc_stainfo23a(padapter);
+
+ rtw23a_free_mlme_priv_ie_data(pmlmepriv);
+}
+
+#endif /* CONFIG_8723AU_AP_MODE */
diff --git a/drivers/staging/rtl8723au/core/rtw_cmd.c b/drivers/staging/rtl8723au/core/rtw_cmd.c
new file mode 100644
index 000000000000..5e3088a01800
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_cmd.c
@@ -0,0 +1,1876 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_CMD_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <cmd_osdep.h>
+#include <mlme_osdep.h>
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+#include <rtl8723a_hal.h>
+#endif /* CONFIG_8723AU_BT_COEXIST */
+
+static struct cmd_hdl wlancmds[] = {
+ GEN_DRV_CMD_HANDLER(0, NULL) /*0*/
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL) /*10*/
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct wlan_bssid_ex), join_cmd_hdl23a) /*14*/
+ GEN_MLME_EXT_HANDLER(sizeof (struct disconnect_parm), disconnect_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof (struct wlan_bssid_ex), createbss_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setopmode_parm), setopmode_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof (struct sitesurvey_parm), sitesurvey_cmd_hdl23a) /*18*/
+ GEN_MLME_EXT_HANDLER(sizeof (struct setauth_parm), setauth_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setkey_parm), setkey_hdl23a) /*20*/
+ GEN_MLME_EXT_HANDLER(sizeof (struct set_stakey_parm), set_stakey_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof (struct set_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct del_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setstapwrstate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setbasicrate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct getbasicrate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setdatarate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct getdatarate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setphyinfo_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct getphyinfo_parm), NULL) /*30*/
+ GEN_MLME_EXT_HANDLER(sizeof (struct setphy_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct getphy_parm), NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL) /*40*/
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct addBaReq_parm), add_ba_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_ch_parm), set_ch_hdl23a) /* 46 */
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL) /*50*/
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct Tx_Beacon_param), tx_beacon_hdl23a) /*55*/
+
+ GEN_MLME_EXT_HANDLER(0, mlme_evt_hdl23a) /*56*/
+ GEN_MLME_EXT_HANDLER(0, rtw_drvextra_cmd_hdl23a) /*57*/
+
+ GEN_MLME_EXT_HANDLER(0, h2c_msg_hdl23a) /*58*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelPlan_param), set_chplan_hdl23a) /*59*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct LedBlink_param), led_blink_hdl23a) /*60*/
+
+ GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelSwitch_param), set_csa_hdl23a) /*61*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct TDLSoption_param), tdls_hdl23a) /*62*/
+};
+
+struct _cmd_callback rtw_cmd_callback[] = {
+ {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
+ {GEN_CMD_CODE(_Write_MACREG), NULL},
+ {GEN_CMD_CODE(_Read_BBREG), &rtw_getbbrfreg_cmdrsp_callback23a},
+ {GEN_CMD_CODE(_Write_BBREG), NULL},
+ {GEN_CMD_CODE(_Read_RFREG), &rtw_getbbrfreg_cmdrsp_callback23a},
+ {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
+ {GEN_CMD_CODE(_Read_EEPROM), NULL},
+ {GEN_CMD_CODE(_Write_EEPROM), NULL},
+ {GEN_CMD_CODE(_Read_EFUSE), NULL},
+ {GEN_CMD_CODE(_Write_EFUSE), NULL},
+
+ {GEN_CMD_CODE(_Read_CAM), NULL}, /*10*/
+ {GEN_CMD_CODE(_Write_CAM), NULL},
+ {GEN_CMD_CODE(_setBCNITV), NULL},
+ {GEN_CMD_CODE(_setMBIDCFG), NULL},
+ {GEN_CMD_CODE(_JoinBss), &rtw_joinbss_cmd23a_callback}, /*14*/
+ {GEN_CMD_CODE(_DisConnect), &rtw_disassoc_cmd23a_callback}, /*15*/
+ {GEN_CMD_CODE(_CreateBss), &rtw_createbss_cmd23a_callback},
+ {GEN_CMD_CODE(_SetOpMode), NULL},
+ {GEN_CMD_CODE(_SiteSurvey), &rtw_survey_cmd_callback23a}, /*18*/
+ {GEN_CMD_CODE(_SetAuth), NULL},
+
+ {GEN_CMD_CODE(_SetKey), NULL}, /*20*/
+ {GEN_CMD_CODE(_SetStaKey), &rtw_setstaKey_cmdrsp_callback23a},
+ {GEN_CMD_CODE(_SetAssocSta), &rtw_setassocsta_cmdrsp_callback23a},
+ {GEN_CMD_CODE(_DelAssocSta), NULL},
+ {GEN_CMD_CODE(_SetStaPwrState), NULL},
+ {GEN_CMD_CODE(_SetBasicRate), NULL}, /*25*/
+ {GEN_CMD_CODE(_GetBasicRate), NULL},
+ {GEN_CMD_CODE(_SetDataRate), NULL},
+ {GEN_CMD_CODE(_GetDataRate), NULL},
+ {GEN_CMD_CODE(_SetPhyInfo), NULL},
+
+ {GEN_CMD_CODE(_GetPhyInfo), NULL}, /*30*/
+ {GEN_CMD_CODE(_SetPhy), NULL},
+ {GEN_CMD_CODE(_GetPhy), NULL},
+ {GEN_CMD_CODE(_readRssi), NULL},
+ {GEN_CMD_CODE(_readGain), NULL},
+ {GEN_CMD_CODE(_SetAtim), NULL}, /*35*/
+ {GEN_CMD_CODE(_SetPwrMode), NULL},
+ {GEN_CMD_CODE(_JoinbssRpt), NULL},
+ {GEN_CMD_CODE(_SetRaTable), NULL},
+ {GEN_CMD_CODE(_GetRaTable), NULL},
+
+ {GEN_CMD_CODE(_GetCCXReport), NULL}, /*40*/
+ {GEN_CMD_CODE(_GetDTMReport), NULL},
+ {GEN_CMD_CODE(_GetTXRateStatistics), NULL},
+ {GEN_CMD_CODE(_SetUsbSuspend), NULL},
+ {GEN_CMD_CODE(_SetH2cLbk), NULL},
+ {GEN_CMD_CODE(_AddBAReq), NULL}, /*45*/
+ {GEN_CMD_CODE(_SetChannel), NULL}, /*46*/
+ {GEN_CMD_CODE(_SetTxPower), NULL},
+ {GEN_CMD_CODE(_SwitchAntenna), NULL},
+ {GEN_CMD_CODE(_SetCrystalCap), NULL},
+ {GEN_CMD_CODE(_SetSingleCarrierTx), NULL}, /*50*/
+
+ {GEN_CMD_CODE(_SetSingleToneTx), NULL}, /*51*/
+ {GEN_CMD_CODE(_SetCarrierSuppressionTx), NULL},
+ {GEN_CMD_CODE(_SetContinuousTx), NULL},
+ {GEN_CMD_CODE(_SwitchBandwidth), NULL}, /*54*/
+ {GEN_CMD_CODE(_TX_Beacon), NULL},/*55*/
+
+ {GEN_CMD_CODE(_Set_MLME_EVT), NULL},/*56*/
+ {GEN_CMD_CODE(_Set_Drv_Extra), NULL},/*57*/
+ {GEN_CMD_CODE(_Set_H2C_MSG), NULL},/*58*/
+ {GEN_CMD_CODE(_SetChannelPlan), NULL},/*59*/
+ {GEN_CMD_CODE(_LedBlink), NULL},/*60*/
+
+ {GEN_CMD_CODE(_SetChannelSwitch), NULL},/*61*/
+ {GEN_CMD_CODE(_TDLS), NULL},/*62*/
+};
+
+/*
+Caller and the rtw_cmd_thread23a can protect cmd_q by spin_lock.
+No irqsave is necessary.
+*/
+
+int rtw_init_cmd_priv23a(struct cmd_priv *pcmdpriv)
+{
+ int res = _SUCCESS;
+
+ sema_init(&pcmdpriv->cmd_queue_sema, 0);
+ sema_init(&pcmdpriv->terminate_cmdthread_sema, 0);
+
+ _rtw_init_queue23a(&pcmdpriv->cmd_queue);
+
+ pcmdpriv->cmd_seq = 1;
+
+ pcmdpriv->cmd_allocated_buf = kzalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ,
+ GFP_KERNEL);
+
+ if (pcmdpriv->cmd_allocated_buf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ -
+ ((unsigned long)(pcmdpriv->cmd_allocated_buf) &
+ (CMDBUFF_ALIGN_SZ - 1));
+
+ pcmdpriv->rsp_allocated_buf = kzalloc(MAX_RSPSZ + 4, GFP_KERNEL);
+
+ if (!pcmdpriv->rsp_allocated_buf) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 -
+ ((unsigned long)(pcmdpriv->rsp_allocated_buf) & 3);
+
+ pcmdpriv->cmd_issued_cnt = 0;
+ pcmdpriv->cmd_done_cnt = 0;
+ pcmdpriv->rsp_cnt = 0;
+
+exit:
+
+ return res;
+}
+
+/* forward definition */
+
+static void c2h_wk_callback(struct work_struct *work);
+int _rtw_init_evt_priv23a(struct evt_priv *pevtpriv)
+{
+ int res = _SUCCESS;
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+ atomic_set(&pevtpriv->event_seq, 0);
+ pevtpriv->evt_done_cnt = 0;
+
+ INIT_WORK(&pevtpriv->c2h_wk, c2h_wk_callback);
+ pevtpriv->c2h_wk_alive = false;
+ pevtpriv->c2h_queue = rtw_cbuf_alloc23a(C2H_QUEUE_MAX_LEN + 1);
+
+ return res;
+}
+
+void _rtw_free_evt_priv23a (struct evt_priv *pevtpriv)
+{
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ ("+_rtw_free_evt_priv23a\n"));
+ cancel_work_sync(&pevtpriv->c2h_wk);
+ while(pevtpriv->c2h_wk_alive)
+ msleep(10);
+
+ while (!rtw_cbuf_empty23a(pevtpriv->c2h_queue)) {
+ void *c2h;
+ if ((c2h = rtw_cbuf_pop23a(pevtpriv->c2h_queue)) != NULL &&
+ c2h != (void *)pevtpriv) {
+ kfree(c2h);
+ }
+ }
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ ("-_rtw_free_evt_priv23a\n"));
+}
+
+void _rtw_free_cmd_priv23a(struct cmd_priv *pcmdpriv)
+{
+ if (pcmdpriv) {
+ kfree(pcmdpriv->cmd_allocated_buf);
+ kfree(pcmdpriv->rsp_allocated_buf);
+ }
+}
+
+/*
+Calling Context:
+rtw_enqueue_cmd23a can only be called between kernel thread,
+since only spin_lock is used.
+
+ISR/Call-Back functions can't call this sub-function.
+*/
+
+int _rtw_enqueue_cmd23a(struct rtw_queue *queue, struct cmd_obj *obj)
+{
+ unsigned long irqL;
+
+ if (obj == NULL)
+ goto exit;
+
+ spin_lock_irqsave(&queue->lock, irqL);
+
+ list_add_tail(&obj->list, &queue->queue);
+
+ spin_unlock_irqrestore(&queue->lock, irqL);
+
+exit:
+
+ return _SUCCESS;
+}
+
+u32 rtw_init_evt_priv23a(struct evt_priv *pevtpriv)
+{
+ int res;
+
+ res = _rtw_init_evt_priv23a(pevtpriv);
+
+ return res;
+}
+
+void rtw_free_evt_priv23a(struct evt_priv *pevtpriv)
+{
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ ("rtw_free_evt_priv23a\n"));
+ _rtw_free_evt_priv23a(pevtpriv);
+}
+
+void rtw_free_cmd_priv23a(struct cmd_priv *pcmdpriv)
+{
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ ("rtw_free_cmd_priv23a\n"));
+ _rtw_free_cmd_priv23a(pcmdpriv);
+}
+
+static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
+{
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ /* set to true to allow enqueuing cmd when hw_init_completed is false */
+ u8 bAllow = false;
+
+ /* To decide allow or not */
+ if (pcmdpriv->padapter->pwrctrlpriv.bHWPwrPindetect &&
+ !pcmdpriv->padapter->registrypriv.usbss_enable) {
+ if (cmd_obj->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) {
+ pdrvextra_cmd_parm =
+ (struct drvextra_cmd_parm *)cmd_obj->parmbuf;
+ if (pdrvextra_cmd_parm->ec_id ==
+ POWER_SAVING_CTRL_WK_CID)
+ bAllow = true;
+ }
+ }
+
+ if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan))
+ bAllow = true;
+
+ if ((pcmdpriv->padapter->hw_init_completed == false &&
+ bAllow == false) || pcmdpriv->cmdthd_running == false)
+ return _FAIL;
+ return _SUCCESS;
+}
+
+u32 rtw_enqueue_cmd23a(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
+{
+ int res = _FAIL;
+ struct rtw_adapter *padapter = pcmdpriv->padapter;
+
+ if (!cmd_obj)
+ goto exit;
+
+ cmd_obj->padapter = padapter;
+
+ res = rtw_cmd_filter(pcmdpriv, cmd_obj);
+ if (res == _FAIL) {
+ rtw_free_cmd_obj23a(cmd_obj);
+ goto exit;
+ }
+
+ res = _rtw_enqueue_cmd23a(&pcmdpriv->cmd_queue, cmd_obj);
+
+ if (res == _SUCCESS)
+ up(&pcmdpriv->cmd_queue_sema);
+
+exit:
+ return res;
+}
+
+static struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
+{
+ struct cmd_obj *obj;
+ struct rtw_queue *queue = &pcmdpriv->cmd_queue;
+ unsigned long irqL;
+
+ spin_lock_irqsave(&queue->lock, irqL);
+ if (list_empty(&queue->queue))
+ obj = NULL;
+ else {
+ obj = container_of((&queue->queue)->next, struct cmd_obj, list);
+ list_del_init(&obj->list);
+ }
+
+ spin_unlock_irqrestore(&queue->lock, irqL);
+
+ return obj;
+}
+
+void rtw_cmd_clr_isr23a(struct cmd_priv *pcmdpriv)
+{
+ pcmdpriv->cmd_done_cnt++;
+}
+
+void rtw_free_cmd_obj23a(struct cmd_obj *pcmd)
+{
+
+ if (pcmd->cmdcode != _JoinBss_CMD_ &&
+ pcmd->cmdcode != _CreateBss_CMD_) {
+ /* free parmbuf in cmd_obj */
+ kfree(pcmd->parmbuf);
+ }
+
+ if (pcmd->rsp) {
+ if (pcmd->rspsz != 0) {
+ /* free rsp in cmd_obj */
+ kfree(pcmd->rsp);
+ }
+ }
+
+ kfree(pcmd);
+}
+
+int rtw_cmd_thread23a(void *context)
+{
+ u8 ret;
+ struct cmd_obj *pcmd;
+ u8 *pcmdbuf, *prspbuf;
+ u8 (*cmd_hdl)(struct rtw_adapter *padapter, u8* pbuf);
+ void (*pcmd_callback)(struct rtw_adapter *dev, struct cmd_obj *pcmd);
+ struct rtw_adapter *padapter = (struct rtw_adapter *)context;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ allow_signal(SIGTERM);
+
+ pcmdbuf = pcmdpriv->cmd_buf;
+ prspbuf = pcmdpriv->rsp_buf;
+
+ pcmdpriv->cmdthd_running = true;
+ up(&pcmdpriv->terminate_cmdthread_sema);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ ("start r871x rtw_cmd_thread23a !!!!\n"));
+
+ while(1) {
+ if (down_interruptible(&pcmdpriv->cmd_queue_sema))
+ break;
+_next:
+ if ((padapter->bDriverStopped == true) ||
+ (padapter->bSurpriseRemoved == true)) {
+ DBG_8723A("%s: DriverStopped(%d) SurpriseRemoved(%d) "
+ "break at line %d\n", __func__,
+ padapter->bDriverStopped,
+ padapter->bSurpriseRemoved, __LINE__);
+ break;
+ }
+
+ if (!(pcmd = rtw_dequeue_cmd(pcmdpriv)))
+ continue;
+
+ if (rtw_cmd_filter(pcmdpriv, pcmd) == _FAIL) {
+ pcmd->res = H2C_DROPPED;
+ goto post_process;
+ }
+
+ pcmdpriv->cmd_issued_cnt++;
+
+ pcmd->cmdsz = ALIGN(pcmd->cmdsz, 4);
+
+ memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
+
+ if (pcmd->cmdcode < (sizeof(wlancmds)/sizeof(struct cmd_hdl))) {
+ cmd_hdl = wlancmds[pcmd->cmdcode].h2cfuns;
+
+ if (cmd_hdl) {
+ ret = cmd_hdl(pcmd->padapter, pcmdbuf);
+ pcmd->res = ret;
+ }
+
+ pcmdpriv->cmd_seq++;
+ } else
+ pcmd->res = H2C_PARAMETERS_ERROR;
+
+ cmd_hdl = NULL;
+
+post_process:
+ /* call callback function for post-processed */
+ if (pcmd->cmdcode < (sizeof(rtw_cmd_callback) /
+ sizeof(struct _cmd_callback))) {
+ pcmd_callback =
+ rtw_cmd_callback[pcmd->cmdcode].callback;
+ if (!pcmd_callback) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ ("mlme_cmd_hdl(): pcmd_callback = "
+ "0x%p, cmdcode = 0x%x\n",
+ pcmd_callback, pcmd->cmdcode));
+ rtw_free_cmd_obj23a(pcmd);
+ } else {
+ /* todo: !!! fill rsp_buf to pcmd->rsp
+ if (pcmd->rsp!= NULL) */
+ /* need conider that free cmd_obj in
+ rtw_cmd_callback */
+ pcmd_callback(pcmd->padapter, pcmd);
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("%s: cmdcode = 0x%x callback not defined!\n",
+ __func__, pcmd->cmdcode));
+ rtw_free_cmd_obj23a(pcmd);
+ }
+
+ if (signal_pending (current))
+ flush_signals(current);
+
+ goto _next;
+
+ }
+ pcmdpriv->cmdthd_running = false;
+
+ /* free all cmd_obj resources */
+ do {
+ pcmd = rtw_dequeue_cmd(pcmdpriv);
+ if (!pcmd)
+ break;
+
+ rtw_free_cmd_obj23a(pcmd);
+ } while(1);
+
+ up(&pcmdpriv->terminate_cmdthread_sema);
+
+ complete_and_exit(NULL, 0);
+}
+
+u8 rtw_sitesurvey_cmd23a(struct rtw_adapter *padapter,
+ struct cfg80211_ssid *ssid, int ssid_num,
+ struct rtw_ieee80211_channel *ch, int ch_num)
+{
+ u8 res = _FAIL;
+ struct cmd_obj *ph2c;
+ struct sitesurvey_parm *psurveyPara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_SCAN, 1);
+
+#ifdef CONFIG_8723AU_P2P
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ p2p_ps_wk_cmd23a(padapter, P2P_PS_SCAN, 1);
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ if (!ph2c)
+ return _FAIL;
+
+ psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
+ if (!psurveyPara) {
+ kfree(ph2c);
+ return _FAIL;
+ }
+
+ rtw_free_network_queue23a(padapter, false);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ ("%s: flush network queue\n", __func__));
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara,
+ GEN_CMD_CODE(_SiteSurvey));
+
+ /* psurveyPara->bsslimit = 48; */
+ psurveyPara->scan_mode = pmlmepriv->scan_mode;
+
+ /* prepare ssid list */
+ if (ssid) {
+ int i;
+ for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) {
+ if (ssid[i].ssid_len) {
+ memcpy(&psurveyPara->ssid[i], &ssid[i],
+ sizeof(struct cfg80211_ssid));
+ psurveyPara->ssid_num++;
+ if (0)
+ DBG_8723A(FUNC_ADPT_FMT" ssid:(%s, %d)\n",
+ FUNC_ADPT_ARG(padapter),
+ psurveyPara->ssid[i].ssid,
+ psurveyPara->ssid[i].ssid_len);
+ }
+ }
+ }
+
+ /* prepare channel list */
+ if (ch) {
+ int i;
+ for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
+ if (ch[i].hw_value &&
+ !(ch[i].flags & IEEE80211_CHAN_DISABLED)) {
+ memcpy(&psurveyPara->ch[i], &ch[i],
+ sizeof(struct rtw_ieee80211_channel));
+ psurveyPara->ch_num++;
+ if (0)
+ DBG_8723A(FUNC_ADPT_FMT" ch:%u\n",
+ FUNC_ADPT_ARG(padapter),
+ psurveyPara->ch[i].hw_value);
+ }
+ }
+ }
+
+ set_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+
+ if (res == _SUCCESS) {
+ mod_timer(&pmlmepriv->scan_to_timer, jiffies +
+ msecs_to_jiffies(SCANNING_TIMEOUT));
+
+ rtw_led_control(padapter, LED_CTL_SITE_SURVEY);
+
+ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
+ } else
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+
+ return res;
+}
+
+void rtw_getbbrfreg_cmdrsp_callback23a(struct rtw_adapter *padapter,
+ struct cmd_obj *pcmd)
+{
+ kfree(pcmd->parmbuf);
+ kfree(pcmd);
+}
+
+u8 rtw_createbss_cmd23a(struct rtw_adapter *padapter)
+{
+ struct cmd_obj *pcmd;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *pdev_network;
+ u8 res = _SUCCESS;
+
+ pdev_network = &padapter->registrypriv.dev_network;
+
+ rtw_led_control(padapter, LED_CTL_START_TO_LINK);
+
+ if (pmlmepriv->assoc_ssid.ssid_len == 0) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ (" createbss for Any SSid:%s\n",
+ pmlmepriv->assoc_ssid.ssid));
+ } else {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ (" createbss for SSid:%s\n",
+ pmlmepriv->assoc_ssid.ssid));
+ }
+
+ pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ if (!pcmd) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ INIT_LIST_HEAD(&pcmd->list);
+ pcmd->cmdcode = _CreateBss_CMD_;
+ pcmd->parmbuf = (unsigned char *)pdev_network;
+ pcmd->cmdsz = get_wlan_bssid_ex_sz((struct wlan_bssid_ex*)pdev_network);
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ pdev_network->Length = pcmd->cmdsz;
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, pcmd);
+
+exit:
+
+ return res;
+}
+
+u8 rtw_joinbss_cmd23a(struct rtw_adapter *padapter,
+ struct wlan_network * pnetwork)
+{
+ u8 *auth, res = _SUCCESS;
+ uint t_len = 0;
+ struct wlan_bssid_ex *psecnetwork;
+ struct cmd_obj *pcmd;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ enum ndis_802_11_net_infra ndis_network_mode;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ ndis_network_mode = pnetwork->network.InfrastructureMode;
+
+ rtw_led_control(padapter, LED_CTL_START_TO_LINK);
+
+ if (pmlmepriv->assoc_ssid.ssid_len == 0) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ ("+Join cmd: Any SSid\n"));
+ } else {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_,
+ ("+Join cmd: SSid =[%s]\n",
+ pmlmepriv->assoc_ssid.ssid));
+ }
+
+ pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ if (!pcmd) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("rtw_joinbss_cmd23a: memory allocate for cmd_obj "
+ "fail!!!\n"));
+ goto exit;
+ }
+ /* for IEs is fix buf size */
+ t_len = sizeof(struct wlan_bssid_ex);
+
+ /* for hidden ap to set fw_state here */
+ if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE)) {
+ switch (ndis_network_mode) {
+ case Ndis802_11IBSS:
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ break;
+ case Ndis802_11Infrastructure:
+ set_fwstate(pmlmepriv, WIFI_STATION_STATE);
+ break;
+ case Ndis802_11APMode:
+ case Ndis802_11AutoUnknown:
+ case Ndis802_11InfrastructureMax:
+ break;
+ }
+ }
+
+ psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
+ if (!psecnetwork) {
+ if (pcmd)
+ kfree(pcmd);
+
+ res = _FAIL;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("rtw_joinbss_cmd23a :psecnetwork == NULL!!!\n"));
+
+ goto exit;
+ }
+
+ memset(psecnetwork, 0, t_len);
+
+ memcpy(psecnetwork, &pnetwork->network,
+ get_wlan_bssid_ex_sz(&pnetwork->network));
+
+ auth = &psecuritypriv->authenticator_ie[0];
+ psecuritypriv->authenticator_ie[0] =
+ (unsigned char)psecnetwork->IELength;
+
+ if ((psecnetwork->IELength-12) < (256-1)) {
+ memcpy(&psecuritypriv->authenticator_ie[1],
+ &psecnetwork->IEs[12], psecnetwork->IELength - 12);
+ } else {
+ memcpy(&psecuritypriv->authenticator_ie[1],
+ &psecnetwork->IEs[12], 256 - 1);
+ }
+
+ psecnetwork->IELength = 0;
+ /* Added by Albert 2009/02/18 */
+ /* If the the driver wants to use the bssid to create the
+ * connection. If not, we have to copy the connecting AP's
+ * MAC address to it so that the driver just has the bssid
+ * information for PMKIDList searching. */
+
+ if (pmlmepriv->assoc_by_bssid == false)
+ ether_addr_copy(&pmlmepriv->assoc_bssid[0],
+ &pnetwork->network.MacAddress[0]);
+
+ psecnetwork->IELength =
+ rtw_restruct_sec_ie23a(padapter, &pnetwork->network.IEs[0],
+ &psecnetwork->IEs[0],
+ pnetwork->network.IELength);
+
+ pqospriv->qos_option = 0;
+
+ if (pregistrypriv->wmm_enable) {
+ u32 tmp_len;
+
+ tmp_len = rtw_restruct_wmm_ie23a(padapter,
+ &pnetwork->network.IEs[0],
+ &psecnetwork->IEs[0],
+ pnetwork->network.IELength,
+ psecnetwork->IELength);
+
+ if (psecnetwork->IELength != tmp_len) {
+ psecnetwork->IELength = tmp_len;
+ /* There is WMM IE in this corresp. beacon */
+ pqospriv->qos_option = 1;
+ } else {
+ /* There is no WMM IE in this corresp. beacon */
+ pqospriv->qos_option = 0;
+ }
+ }
+
+ phtpriv->ht_option = false;
+ if (pregistrypriv->ht_enable) {
+ /* Added by Albert 2010/06/23 */
+ /* For the WEP mode, we will use the bg mode to do
+ the connection to avoid some IOT issue. */
+ /* Especially for Realtek 8192u SoftAP. */
+ if ((padapter->securitypriv.dot11PrivacyAlgrthm != _WEP40_) &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
+ /* rtw_restructure_ht_ie23a */
+ rtw_restructure_ht_ie23a(padapter,
+ &pnetwork->network.IEs[0],
+ &psecnetwork->IEs[0],
+ pnetwork->network.IELength,
+ &psecnetwork->IELength);
+ }
+ }
+
+ pmlmeinfo->assoc_AP_vendor =
+ check_assoc_AP23a(pnetwork->network.IEs,
+ pnetwork->network.IELength);
+
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_TENDA)
+ padapter->pwrctrlpriv.smart_ps = 0;
+ else
+ padapter->pwrctrlpriv.smart_ps =
+ padapter->registrypriv.smart_ps;
+
+ DBG_8723A("%s: smart_ps =%d\n", __func__,
+ padapter->pwrctrlpriv.smart_ps);
+
+ /* get cmdsz before endian conversion */
+ pcmd->cmdsz = get_wlan_bssid_ex_sz(psecnetwork);
+
+ INIT_LIST_HEAD(&pcmd->list);
+ pcmd->cmdcode = _JoinBss_CMD_;/* GEN_CMD_CODE(_JoinBss) */
+ pcmd->parmbuf = (unsigned char *)psecnetwork;
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, pcmd);
+exit:
+
+ return res;
+}
+
+u8 rtw_disassoc_cmd23a(struct rtw_adapter*padapter, u32 deauth_timeout_ms,
+ bool enqueue)
+{
+ struct cmd_obj *cmdobj = NULL;
+ struct disconnect_parm *param = NULL;
+ struct cmd_priv *cmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_,
+ ("+rtw_disassoc_cmd23a\n"));
+
+ /* prepare cmd parameter */
+ param = kzalloc(sizeof(*param), GFP_ATOMIC);
+ if (param == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ param->deauth_timeout_ms = deauth_timeout_ms;
+
+ if (enqueue) {
+ /* need enqueue, prepare cmd_obj and enqueue */
+ cmdobj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ if (!cmdobj) {
+ res = _FAIL;
+ kfree(param);
+ goto exit;
+ }
+ init_h2fwcmd_w_parm_no_rsp(cmdobj, param, _DisConnect_CMD_);
+ res = rtw_enqueue_cmd23a(cmdpriv, cmdobj);
+ } else {
+ /* no need to enqueue, do the cmd hdl directly and
+ free cmd parameter */
+ if (H2C_SUCCESS != disconnect_hdl23a(padapter, (u8 *)param))
+ res = _FAIL;
+ kfree(param);
+ }
+
+exit:
+ return res;
+}
+
+u8 rtw_setopmode_cmd23a(struct rtw_adapter *padapter,
+ enum ndis_802_11_net_infra networktype)
+{
+ struct cmd_obj *ph2c;
+ struct setopmode_parm *psetop;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ if (!ph2c) {
+ res = false;
+ goto exit;
+ }
+ psetop = kzalloc(sizeof(struct setopmode_parm), GFP_KERNEL);
+
+ if (!psetop) {
+ kfree(ph2c);
+ res = false;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetop, _SetOpMode_CMD_);
+ psetop->mode = (u8)networktype;
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+exit:
+ return res;
+}
+
+u8 rtw_setstakey_cmd23a(struct rtw_adapter *padapter, u8 *psta, u8 unicast_key)
+{
+ struct cmd_obj *ph2c;
+ struct set_stakey_parm *psetstakey_para;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct set_stakey_rsp *psetstakey_rsp = NULL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct sta_info *sta = (struct sta_info*)psta;
+ u8 res = _SUCCESS;
+
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ if (!ph2c) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL);
+ if (!psetstakey_para) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp), GFP_KERNEL);
+ if (!psetstakey_rsp) {
+ kfree(ph2c);
+ kfree(psetstakey_para);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
+ ph2c->rsp = (u8 *) psetstakey_rsp;
+ ph2c->rspsz = sizeof(struct set_stakey_rsp);
+
+ ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ psetstakey_para->algorithm =
+ (unsigned char)psecuritypriv->dot11PrivacyAlgrthm;
+ } else {
+ GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm,
+ false);
+ }
+
+ if (unicast_key == true) {
+ memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16);
+ } else {
+ int idx = psecuritypriv->dot118021XGrpKeyid;
+ memcpy(&psetstakey_para->key,
+ &psecuritypriv->dot118021XGrpKey[idx].skey, 16);
+ }
+
+ /* jeff: set this becasue at least sw key is ready */
+ padapter->securitypriv.busetkipkey = true;
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+
+exit:
+
+ return res;
+}
+
+u8 rtw_clearstakey_cmd23a(struct rtw_adapter *padapter, u8 *psta, u8 entry,
+ u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct set_stakey_parm *psetstakey_para;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct set_stakey_rsp *psetstakey_rsp = NULL;
+ struct sta_info *sta = (struct sta_info *)psta;
+ u8 res = _SUCCESS;
+
+ if (!enqueue) {
+ clear_cam_entry23a(padapter, entry);
+ } else {
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ if (!ph2c) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_para = kzalloc(sizeof(struct set_stakey_parm),
+ GFP_KERNEL);
+ if (!psetstakey_para) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp),
+ GFP_KERNEL);
+ if (!psetstakey_rsp) {
+ kfree(ph2c);
+ kfree(psetstakey_para);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para,
+ _SetStaKey_CMD_);
+ ph2c->rsp = (u8 *) psetstakey_rsp;
+ ph2c->rspsz = sizeof(struct set_stakey_rsp);
+
+ ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
+
+ psetstakey_para->algorithm = _NO_PRIVACY_;
+
+ psetstakey_para->id = entry;
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+ }
+exit:
+ return res;
+}
+
+u8 rtw_addbareq_cmd23a(struct rtw_adapter*padapter, u8 tid, u8 *addr)
+{
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct cmd_obj *ph2c;
+ struct addBaReq_parm *paddbareq_parm;
+ u8 res = _SUCCESS;
+
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ if (!ph2c) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_ATOMIC);
+ if (!paddbareq_parm) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ paddbareq_parm->tid = tid;
+ ether_addr_copy(paddbareq_parm->addr, addr);
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm,
+ GEN_CMD_CODE(_AddBAReq));
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+exit:
+ return res;
+}
+
+u8 rtw_dynamic_chk_wk_cmd23a(struct rtw_adapter*padapter)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ if (!ph2c) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
+ if (!pdrvextra_cmd_parm) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = DYNAMIC_CHK_WK_CID;
+ pdrvextra_cmd_parm->type_size = 0;
+ pdrvextra_cmd_parm->pbuf = (u8 *)padapter;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm,
+ GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+exit:
+
+ return res;
+}
+
+/*
+ * This is only ever called from on_action_spct23a_ch_switch () which isn't
+ * called from anywhere itself
+ */
+u8 rtw_set_ch_cmd23a(struct rtw_adapter*padapter, u8 ch, u8 bw, u8 ch_offset,
+ u8 enqueue)
+{
+ struct cmd_obj *pcmdobj;
+ struct set_ch_parm *set_ch_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+ DBG_8723A(FUNC_NDEV_FMT" ch:%u, bw:%u, ch_offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev), ch, bw, ch_offset);
+
+ /* check input parameter */
+
+ /* prepare cmd parameter */
+ set_ch_parm = kzalloc(sizeof(*set_ch_parm), GFP_KERNEL);
+ if (!set_ch_parm) {
+ res = _FAIL;
+ goto exit;
+ }
+ set_ch_parm->ch = ch;
+ set_ch_parm->bw = bw;
+ set_ch_parm->ch_offset = ch_offset;
+
+ if (enqueue) {
+ /* need enqueue, prepare cmd_obj and enqueue */
+ pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ if (!pcmdobj) {
+ kfree(set_ch_parm);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(pcmdobj, set_ch_parm,
+ GEN_CMD_CODE(_SetChannel));
+ res = rtw_enqueue_cmd23a(pcmdpriv, pcmdobj);
+ } else {
+ /* no need to enqueue, do the cmd hdl directly and
+ free cmd parameter */
+ if (H2C_SUCCESS != set_ch_hdl23a(padapter, (u8 *)set_ch_parm))
+ res = _FAIL;
+
+ kfree(set_ch_parm);
+ }
+
+ /* do something based on res... */
+exit:
+
+ DBG_8723A(FUNC_NDEV_FMT" res:%u\n", FUNC_NDEV_ARG(padapter->pnetdev),
+ res);
+
+ return res;
+}
+
+static void traffic_status_watchdog(struct rtw_adapter *padapter)
+{
+ u8 bEnterPS;
+ u8 bBusyTraffic = false, bTxBusyTraffic = false, bRxBusyTraffic = false;
+ u8 bHigherBusyTraffic = false, bHigherBusyRxTraffic = false;
+ u8 bHigherBusyTxTraffic = false;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+#ifndef CONFIG_8723AU_BT_COEXIST
+ int BusyThreshold = 100;
+#endif
+ /* */
+ /* Determine if our traffic is busy now */
+ /* */
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+#ifdef CONFIG_8723AU_BT_COEXIST
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 50 ||
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 50)
+#else /* !CONFIG_8723AU_BT_COEXIST */
+ /* if we raise bBusyTraffic in last watchdog, using
+ lower threshold. */
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
+ BusyThreshold = 75;
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > BusyThreshold ||
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > BusyThreshold)
+#endif /* !CONFIG_8723AU_BT_COEXIST */
+ {
+ bBusyTraffic = true;
+
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod >
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ bRxBusyTraffic = true;
+ else
+ bTxBusyTraffic = true;
+ }
+
+ /* Higher Tx/Rx data. */
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 4000 ||
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 4000) {
+ bHigherBusyTraffic = true;
+
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod >
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ bHigherBusyRxTraffic = true;
+ else
+ bHigherBusyTxTraffic = true;
+ }
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ if (BT_1Ant(padapter) == false)
+#endif
+ {
+ /* check traffic for powersaving. */
+ if (((pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod +
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod) > 8) ||
+ (pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod > 2))
+ bEnterPS = false;
+ else
+ bEnterPS = true;
+
+ /* LeisurePS only work in infra mode. */
+ if (bEnterPS)
+ LPS_Enter23a(padapter);
+ else
+ LPS_Leave23a(padapter);
+ }
+ } else
+ LPS_Leave23a(padapter);
+
+ pmlmepriv->LinkDetectInfo.NumRxOkInPeriod = 0;
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod = 0;
+ pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod = 0;
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bTxBusyTraffic = bTxBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bRxBusyTraffic = bRxBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bHigherBusyTraffic = bHigherBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
+ pmlmepriv->LinkDetectInfo.bHigherBusyTxTraffic = bHigherBusyTxTraffic;
+}
+
+void dynamic_chk_wk_hdl(struct rtw_adapter *padapter, u8 *pbuf, int sz)
+{
+ struct mlme_priv *pmlmepriv;
+
+ padapter = (struct rtw_adapter *)pbuf;
+ pmlmepriv = &padapter->mlmepriv;
+
+#ifdef CONFIG_8723AU_AP_MODE
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ expire_timeout_chk23a(padapter);
+#endif
+
+ rtw_hal_sreset_xmit_status_check23a(padapter);
+
+ linked_status_chk23a(padapter);
+ traffic_status_watchdog(padapter);
+
+ rtw_hal_dm_watchdog23a(padapter);
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ /* */
+ /* BT-Coexist */
+ /* */
+ BT_CoexistMechanism(padapter);
+#endif
+}
+
+void lps_ctrl_wk_hdl(struct rtw_adapter *padapter, u8 lps_ctrl_type)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 mstatus;
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true))
+ return;
+
+ switch (lps_ctrl_type)
+ {
+ case LPS_CTRL_SCAN:
+#ifdef CONFIG_8723AU_BT_COEXIST
+ BT_WifiScanNotify(padapter, true);
+ if (BT_1Ant(padapter) == false)
+#endif
+ {
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ LPS_Leave23a(padapter);
+ }
+ break;
+ case LPS_CTRL_JOINBSS:
+ LPS_Leave23a(padapter);
+ break;
+ case LPS_CTRL_CONNECT:
+ mstatus = 1;/* connect */
+ /* Reset LPS Setting */
+ padapter->pwrctrlpriv.LpsIdleCount = 0;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_H2C_FW_JOINBSSRPT,
+ (u8 *)&mstatus);
+#ifdef CONFIG_8723AU_BT_COEXIST
+ BT_WifiMediaStatusNotify(padapter, mstatus);
+#endif
+ break;
+ case LPS_CTRL_DISCONNECT:
+ mstatus = 0;/* disconnect */
+#ifdef CONFIG_8723AU_BT_COEXIST
+ BT_WifiMediaStatusNotify(padapter, mstatus);
+ if (BT_1Ant(padapter) == false)
+#endif
+ {
+ LPS_Leave23a(padapter);
+ }
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_H2C_FW_JOINBSSRPT,
+ (u8 *)&mstatus);
+ break;
+ case LPS_CTRL_SPECIAL_PACKET:
+ pwrpriv->DelayLPSLastTimeStamp = jiffies;
+#ifdef CONFIG_8723AU_BT_COEXIST
+ BT_SpecialPacketNotify(padapter);
+ if (BT_1Ant(padapter) == false)
+#endif
+ {
+ LPS_Leave23a(padapter);
+ }
+ break;
+ case LPS_CTRL_LEAVE:
+#ifdef CONFIG_8723AU_BT_COEXIST
+ BT_LpsLeave(padapter);
+ if (BT_1Ant(padapter) == false)
+#endif
+ {
+ LPS_Leave23a(padapter);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+u8 rtw_lps_ctrl_wk_cmd23a(struct rtw_adapter *padapter,
+ u8 lps_ctrl_type, u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ if (enqueue) {
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ if (!ph2c) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+ GFP_ATOMIC);
+ if (!pdrvextra_cmd_parm) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = LPS_CTRL_WK_CID;
+ pdrvextra_cmd_parm->type_size = lps_ctrl_type;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm,
+ GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+ } else
+ lps_ctrl_wk_hdl(padapter, lps_ctrl_type);
+exit:
+
+ return res;
+}
+
+static void power_saving_wk_hdl(struct rtw_adapter *padapter, u8 *pbuf, int sz)
+{
+ rtw_ps_processor23a(padapter);
+}
+
+#ifdef CONFIG_8723AU_P2P
+u8 p2p_protocol_wk_cmd23a(struct rtw_adapter*padapter, int intCmdType)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ {
+ return res;
+ }
+
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ if (!ph2c) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+ GFP_ATOMIC);
+ if (!pdrvextra_cmd_parm) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = P2P_PROTO_WK_CID;
+ pdrvextra_cmd_parm->type_size = intCmdType; /* As the command tppe. */
+ pdrvextra_cmd_parm->pbuf = NULL; /* Must be NULL here */
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm,
+ GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+exit:
+
+ return res;
+}
+#endif /* CONFIG_8723AU_P2P */
+
+u8 rtw_ps_cmd23a(struct rtw_adapter*padapter)
+{
+ struct cmd_obj *ppscmd;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+ ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ if (!ppscmd) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+ GFP_ATOMIC);
+ if (!pdrvextra_cmd_parm) {
+ kfree(ppscmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = POWER_SAVING_CTRL_WK_CID;
+ pdrvextra_cmd_parm->pbuf = NULL;
+ init_h2fwcmd_w_parm_no_rsp(ppscmd, pdrvextra_cmd_parm,
+ GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ppscmd);
+exit:
+
+ return res;
+}
+
+#ifdef CONFIG_8723AU_AP_MODE
+
+static void rtw_chk_hi_queue_hdl(struct rtw_adapter *padapter)
+{
+ int cnt = 0;
+ struct sta_info *psta_bmc;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
+ if (!psta_bmc)
+ return;
+
+ if (psta_bmc->sleepq_len == 0) {
+ u8 val = 0;
+
+ rtw23a_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+
+ while(val == false) {
+ msleep(100);
+
+ cnt++;
+
+ if (cnt>10)
+ break;
+
+ rtw23a_hal_get_hwreg(padapter,
+ HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+ }
+
+ if (cnt <= 10) {
+ pstapriv->tim_bitmap &= ~BIT(0);
+ pstapriv->sta_dz_bitmap &= ~BIT(0);
+
+ update_beacon23a(padapter, _TIM_IE_, NULL, false);
+ } else /* re check again */
+ rtw_chk_hi_queue_cmd23a(padapter);
+ }
+}
+
+u8 rtw_chk_hi_queue_cmd23a(struct rtw_adapter*padapter)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ if (!ph2c) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+ GFP_ATOMIC);
+ if (!pdrvextra_cmd_parm) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = CHECK_HIQ_WK_CID;
+ pdrvextra_cmd_parm->type_size = 0;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm,
+ GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+exit:
+
+ return res;
+}
+#endif
+
+u8 rtw_c2h_wk_cmd23a(struct rtw_adapter *padapter, u8 *c2h_evt)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ if (!ph2c) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+ GFP_ATOMIC);
+ if (!pdrvextra_cmd_parm) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = C2H_WK_CID;
+ pdrvextra_cmd_parm->type_size = c2h_evt?16:0;
+ pdrvextra_cmd_parm->pbuf = c2h_evt;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm,
+ GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+
+exit:
+
+ return res;
+}
+
+s32 c2h_evt_hdl(struct rtw_adapter *adapter, struct c2h_evt_hdr *c2h_evt,
+ c2h_id_filter filter)
+{
+ s32 ret = _FAIL;
+ u8 buf[16];
+
+ if (!c2h_evt) {
+ /* No c2h event in cmd_obj, read c2h event before handling*/
+ if (c2h_evt_read23a(adapter, buf) == _SUCCESS) {
+ c2h_evt = (struct c2h_evt_hdr *)buf;
+
+ if (filter && filter(c2h_evt->id) == false)
+ goto exit;
+
+ ret = rtw_hal_c2h_handler23a(adapter, c2h_evt);
+ }
+ } else {
+
+ if (filter && filter(c2h_evt->id) == false)
+ goto exit;
+
+ ret = rtw_hal_c2h_handler23a(adapter, c2h_evt);
+ }
+exit:
+ return ret;
+}
+
+static void c2h_wk_callback(struct work_struct *work)
+{
+ struct evt_priv *evtpriv;
+ struct rtw_adapter *adapter;
+ struct c2h_evt_hdr *c2h_evt;
+ c2h_id_filter ccx_id_filter;
+
+ evtpriv = container_of(work, struct evt_priv, c2h_wk);
+ adapter = container_of(evtpriv, struct rtw_adapter, evtpriv);
+ ccx_id_filter = rtw_hal_c2h_id_filter_ccx23a(adapter);
+
+ evtpriv->c2h_wk_alive = true;
+
+ while (!rtw_cbuf_empty23a(evtpriv->c2h_queue)) {
+ c2h_evt = (struct c2h_evt_hdr *)
+ rtw_cbuf_pop23a(evtpriv->c2h_queue);
+ if (c2h_evt) {
+ /* This C2H event is read, clear it */
+ c2h_evt_clear23a(adapter);
+ } else if ((c2h_evt = (struct c2h_evt_hdr *)
+ kmalloc(16, GFP_ATOMIC))) {
+ /* This C2H event is not read, read & clear now */
+ if (c2h_evt_read23a(adapter, (u8*)c2h_evt) != _SUCCESS)
+ continue;
+ }
+
+ /* Special pointer to trigger c2h_evt_clear23a only */
+ if ((void *)c2h_evt == (void *)evtpriv)
+ continue;
+
+ if (!c2h_evt_exist(c2h_evt)) {
+ kfree(c2h_evt);
+ continue;
+ }
+
+ if (ccx_id_filter(c2h_evt->id) == true) {
+ /* Handle CCX report here */
+ rtw_hal_c2h_handler23a(adapter, c2h_evt);
+ kfree(c2h_evt);
+ } else {
+ /* Enqueue into cmd_thread for others */
+ rtw_c2h_wk_cmd23a(adapter, (u8 *)c2h_evt);
+ }
+ }
+
+ evtpriv->c2h_wk_alive = false;
+}
+
+u8 rtw_drvextra_cmd_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf)
+{
+ struct drvextra_cmd_parm *pdrvextra_cmd;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ pdrvextra_cmd = (struct drvextra_cmd_parm *)pbuf;
+
+ switch (pdrvextra_cmd->ec_id)
+ {
+ case DYNAMIC_CHK_WK_CID:
+ dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf,
+ pdrvextra_cmd->type_size);
+ break;
+ case POWER_SAVING_CTRL_WK_CID:
+ power_saving_wk_hdl(padapter, pdrvextra_cmd->pbuf,
+ pdrvextra_cmd->type_size);
+ break;
+ case LPS_CTRL_WK_CID:
+ lps_ctrl_wk_hdl(padapter, (u8)pdrvextra_cmd->type_size);
+ break;
+#ifdef CONFIG_8723AU_P2P
+ case P2P_PS_WK_CID:
+ p2p_ps_wk_hdl23a(padapter, pdrvextra_cmd->type_size);
+ break;
+ case P2P_PROTO_WK_CID:
+ /* Commented by Albert 2011/07/01 */
+ /* I used the type_size as the type command */
+ p2p_protocol_wk_hdl23a(padapter, pdrvextra_cmd->type_size);
+ break;
+#endif /* CONFIG_8723AU_P2P */
+#ifdef CONFIG_8723AU_AP_MODE
+ case CHECK_HIQ_WK_CID:
+ rtw_chk_hi_queue_hdl(padapter);
+ break;
+#endif /* CONFIG_8723AU_AP_MODE */
+ case C2H_WK_CID:
+ c2h_evt_hdl(padapter,
+ (struct c2h_evt_hdr *)pdrvextra_cmd->pbuf, NULL);
+ break;
+
+ default:
+ break;
+ }
+
+ if (pdrvextra_cmd->pbuf && (pdrvextra_cmd->type_size > 0)) {
+ kfree(pdrvextra_cmd->pbuf);
+ pdrvextra_cmd->pbuf = NULL;
+ }
+
+ return H2C_SUCCESS;
+}
+
+void rtw_survey_cmd_callback23a(struct rtw_adapter *padapter,
+ struct cmd_obj *pcmd)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (pcmd->res == H2C_DROPPED) {
+ /* TODO: cancel timer and do timeout handler directly... */
+ /* need to make timeout handlerOS independent */
+ mod_timer(&pmlmepriv->scan_to_timer,
+ jiffies + msecs_to_jiffies(1));
+ } else if (pcmd->res != H2C_SUCCESS) {
+ mod_timer(&pmlmepriv->scan_to_timer,
+ jiffies + msecs_to_jiffies(1));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\n ********Error: MgntActrtw_set_802_11_bssid23a_"
+ "LIST_SCAN Fail ************\n\n."));
+ }
+
+ /* free cmd */
+ rtw_free_cmd_obj23a(pcmd);
+}
+
+void rtw_disassoc_cmd23a_callback(struct rtw_adapter *padapter,
+ struct cmd_obj *pcmd)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (pcmd->res != H2C_SUCCESS) {
+ spin_lock_bh(&pmlmepriv->lock);
+ set_fwstate(pmlmepriv, _FW_LINKED);
+ spin_unlock_bh(&pmlmepriv->lock);
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\n ***Error: disconnect_cmd_callback Fail ***\n."));
+ return;
+ }
+
+ /* free cmd */
+ rtw_free_cmd_obj23a(pcmd);
+}
+
+void rtw_joinbss_cmd23a_callback(struct rtw_adapter *padapter,
+ struct cmd_obj *pcmd)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (pcmd->res == H2C_DROPPED) {
+ /* TODO: cancel timer and do timeout handler directly... */
+ /* need to make timeout handlerOS independent */
+ mod_timer(&pmlmepriv->assoc_timer,
+ jiffies + msecs_to_jiffies(1));
+ } else if (pcmd->res != H2C_SUCCESS) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("********Error:rtw_select_and_join_from_scanned_"
+ "queue Wait Sema Fail ************\n"));
+ mod_timer(&pmlmepriv->assoc_timer,
+ jiffies + msecs_to_jiffies(1));
+ }
+
+ rtw_free_cmd_obj23a(pcmd);
+}
+
+void rtw_createbss_cmd23a_callback(struct rtw_adapter *padapter,
+ struct cmd_obj *pcmd)
+{
+ struct sta_info *psta;
+ struct wlan_network *pwlan;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
+ struct wlan_network *tgt_network = &pmlmepriv->cur_network;
+
+ if (pcmd->res != H2C_SUCCESS) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\n ********Error: rtw_createbss_cmd23a_callback "
+ "Fail ************\n\n."));
+ mod_timer(&pmlmepriv->assoc_timer,
+ jiffies + msecs_to_jiffies(1));
+ }
+
+ del_timer_sync(&pmlmepriv->assoc_timer);
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ psta = rtw_get_stainfo23a(&padapter->stapriv,
+ pnetwork->MacAddress);
+ if (!psta) {
+ psta = rtw_alloc_stainfo23a(&padapter->stapriv,
+ pnetwork->MacAddress);
+ if (!psta) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\nCan't alloc sta_info when "
+ "createbss_cmd_callback\n"));
+ goto createbss_cmd_fail ;
+ }
+ }
+
+ rtw_indicate_connect23a(padapter);
+ } else {
+ pwlan = rtw_alloc_network(pmlmepriv);
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+ if (!pwlan) {
+ pwlan = rtw_get_oldest_wlan_network23a(&pmlmepriv->scanned_queue);
+ if (!pwlan) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\n Error: can't get pwlan in "
+ "rtw23a_joinbss_event_cb\n"));
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ goto createbss_cmd_fail;
+ }
+ pwlan->last_scanned = jiffies;
+ } else {
+ list_add_tail(&pwlan->list,
+ &pmlmepriv->scanned_queue.queue);
+ }
+
+ pnetwork->Length = get_wlan_bssid_ex_sz(pnetwork);
+ memcpy(&pwlan->network, pnetwork, pnetwork->Length);
+ /* pwlan->fixed = true; */
+
+ /* list_add_tail(&pwlan->list,
+ &pmlmepriv->scanned_queue.queue); */
+
+ /* copy pdev_network information to
+ pmlmepriv->cur_network */
+ memcpy(&tgt_network->network, pnetwork,
+ get_wlan_bssid_ex_sz(pnetwork));
+
+ /* reset DSConfig */
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ /* we will set _FW_LINKED when there is one more sat to
+ join us (rtw_stassoc_event_callback23a) */
+ }
+
+createbss_cmd_fail:
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+ rtw_free_cmd_obj23a(pcmd);
+}
+
+void rtw_setstaKey_cmdrsp_callback23a(struct rtw_adapter *padapter,
+ struct cmd_obj *pcmd)
+{
+ struct sta_priv *pstapriv;
+ struct set_stakey_rsp *psetstakey_rsp;
+ struct sta_info *psta;
+
+ pstapriv = &padapter->stapriv;
+ psetstakey_rsp = (struct set_stakey_rsp*) (pcmd->rsp);
+ psta = rtw_get_stainfo23a(pstapriv, psetstakey_rsp->addr);
+
+ if (!psta) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\nERROR: rtw_setstaKey_cmdrsp_callback23a => "
+ "can't get sta_info\n\n"));
+ goto exit;
+ }
+
+exit:
+
+ rtw_free_cmd_obj23a(pcmd);
+}
+
+void rtw_setassocsta_cmdrsp_callback23a(struct rtw_adapter *padapter,
+ struct cmd_obj *pcmd)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct set_assocsta_parm* passocsta_parm;
+ struct set_assocsta_rsp* passocsta_rsp;
+ struct sta_info *psta;
+
+ passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf);
+ passocsta_rsp = (struct set_assocsta_rsp*) (pcmd->rsp);
+ psta = rtw_get_stainfo23a(pstapriv, passocsta_parm->addr);
+
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\nERROR: setassocsta_cmdrsp_callbac => can't "
+ "get sta_info\n\n"));
+ goto exit;
+ }
+
+ psta->aid = psta->mac_id = passocsta_rsp->cam_id;
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) &&
+ (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true))
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ set_fwstate(pmlmepriv, _FW_LINKED);
+ spin_unlock_bh(&pmlmepriv->lock);
+
+exit:
+ rtw_free_cmd_obj23a(pcmd);
+}
+
+void rtw_getrttbl_cmd_cmdrsp_callback(struct rtw_adapter *padapter,
+ struct cmd_obj *pcmd)
+{
+ rtw_free_cmd_obj23a(pcmd);
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_efuse.c b/drivers/staging/rtl8723au/core/rtw_efuse.c
new file mode 100644
index 000000000000..35b177fd0510
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_efuse.c
@@ -0,0 +1,716 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_EFUSE_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <rtw_efuse.h>
+
+/*------------------------Define local variable------------------------------*/
+
+/* */
+#define REG_EFUSE_CTRL 0x0030
+#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
+/* */
+
+/*-----------------------------------------------------------------------------
+ * Function: Efuse_PowerSwitch23a
+ *
+ * Overview: When we want to enable write operation, we should change to
+ * pwr on state. When we stop write, we should switch to 500k mode
+ * and disable LDO 2.5V.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/17/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void
+Efuse_PowerSwitch23a(
+ struct rtw_adapter * pAdapter,
+ u8 bWrite,
+ u8 PwrState)
+{
+ pAdapter->HalFunc.EfusePowerSwitch(pAdapter, bWrite, PwrState);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: efuse_GetCurrentSize23a
+ *
+ * Overview: Get current efuse size!!!
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/16/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+u16
+Efuse_GetCurrentSize23a(struct rtw_adapter *pAdapter, u8 efuseType)
+{
+ u16 ret = 0;
+
+ ret = pAdapter->HalFunc.EfuseGetCurrentSize(pAdapter, efuseType);
+
+ return ret;
+}
+
+/* 11/16/2008 MH Add description. Get current efuse area enabled word!!. */
+u8
+Efuse_CalculateWordCnts23a(u8 word_en)
+{
+ u8 word_cnts = 0;
+ if (!(word_en & BIT(0))) word_cnts++; /* 0 : write enable */
+ if (!(word_en & BIT(1))) word_cnts++;
+ if (!(word_en & BIT(2))) word_cnts++;
+ if (!(word_en & BIT(3))) word_cnts++;
+ return word_cnts;
+}
+
+/* */
+/* Description: */
+/* Execute E-Fuse read byte operation. */
+/* Refered from SD1 Richard. */
+/* */
+/* Assumption: */
+/* 1. Boot from E-Fuse and successfully auto-load. */
+/* 2. PASSIVE_LEVEL (USB interface) */
+/* */
+/* Created by Roger, 2008.10.21. */
+/* */
+void
+ReadEFuseByte23a(struct rtw_adapter *Adapter, u16 _offset, u8 *pbuf)
+{
+ u32 value32;
+ u8 readbyte;
+ u16 retry;
+
+ /* Write Address */
+ rtw_write8(Adapter, EFUSE_CTRL+1, (_offset & 0xff));
+ readbyte = rtw_read8(Adapter, EFUSE_CTRL+2);
+ rtw_write8(Adapter, EFUSE_CTRL+2, ((_offset >> 8) & 0x03) | (readbyte & 0xfc));
+
+ /* Write bit 32 0 */
+ readbyte = rtw_read8(Adapter, EFUSE_CTRL+3);
+ rtw_write8(Adapter, EFUSE_CTRL+3, (readbyte & 0x7f));
+
+ /* Check bit 32 read-ready */
+ retry = 0;
+ value32 = rtw_read32(Adapter, EFUSE_CTRL);
+ /* while(!(((value32 >> 24) & 0xff) & 0x80) && (retry<10)) */
+ while(!(((value32 >> 24) & 0xff) & 0x80) && (retry<10000))
+ {
+ value32 = rtw_read32(Adapter, EFUSE_CTRL);
+ retry++;
+ }
+
+ /* 20100205 Joseph: Add delay suggested by SD1 Victor. */
+ /* This fix the problem that Efuse read error in high temperature condition. */
+ /* Designer says that there shall be some delay after ready bit is set, or the */
+ /* result will always stay on last data we read. */
+ udelay(50);
+ value32 = rtw_read32(Adapter, EFUSE_CTRL);
+
+ *pbuf = (u8)(value32 & 0xff);
+}
+
+/* */
+/* Description: */
+/* 1. Execute E-Fuse read byte operation according as map offset and */
+/* save to E-Fuse table. */
+/* 2. Refered from SD1 Richard. */
+/* */
+/* Assumption: */
+/* 1. Boot from E-Fuse and successfully auto-load. */
+/* 2. PASSIVE_LEVEL (USB interface) */
+/* */
+/* Created by Roger, 2008.10.21. */
+/* */
+/* 2008/12/12 MH 1. Reorganize code flow and reserve bytes. and add description. */
+/* 2. Add efuse utilization collect. */
+/* 2008/12/22 MH Read Efuse must check if we write section 1 data again!!! Sec1 */
+/* write addr must be after sec5. */
+/* */
+
+void
+efuse_ReadEFuse(struct rtw_adapter *Adapter, u8 efuseType,
+ u16 _offset, u16 _size_byte, u8 *pbuf);
+void
+efuse_ReadEFuse(struct rtw_adapter *Adapter, u8 efuseType,
+ u16 _offset, u16 _size_byte, u8 *pbuf)
+{
+ Adapter->HalFunc.ReadEFuse(Adapter, efuseType, _offset,
+ _size_byte, pbuf);
+}
+
+void
+EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
+ u8 type, void *pOut)
+{
+ pAdapter->HalFunc.EFUSEGetEfuseDefinition(pAdapter, efuseType,
+ type, pOut);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_Read1Byte23a
+ *
+ * Overview: Copy from WMAC fot EFUSE read 1 byte.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 09/23/2008 MHC Copy from WMAC.
+ *
+ *---------------------------------------------------------------------------*/
+u8
+EFUSE_Read1Byte23a(struct rtw_adapter *Adapter, u16 Address)
+{
+ u8 data;
+ u8 Bytetemp = {0x00};
+ u8 temp = {0x00};
+ u32 k = 0;
+ u16 contentLen = 0;
+
+ EFUSE_GetEfuseDefinition23a(Adapter, EFUSE_WIFI,
+ TYPE_EFUSE_REAL_CONTENT_LEN,
+ (void *)&contentLen);
+
+ if (Address < contentLen) /* E-fuse 512Byte */
+ {
+ /* Write E-fuse Register address bit0~7 */
+ temp = Address & 0xFF;
+ rtw_write8(Adapter, EFUSE_CTRL+1, temp);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+2);
+ /* Write E-fuse Register address bit8~9 */
+ temp = ((Address >> 8) & 0x03) | (Bytetemp & 0xFC);
+ rtw_write8(Adapter, EFUSE_CTRL+2, temp);
+
+ /* Write 0x30[31]= 0 */
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ temp = Bytetemp & 0x7F;
+ rtw_write8(Adapter, EFUSE_CTRL+3, temp);
+
+ /* Wait Write-ready (0x30[31]= 1) */
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ while(!(Bytetemp & 0x80))
+ {
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ k++;
+ if (k == 1000)
+ {
+ k = 0;
+ break;
+ }
+ }
+ data = rtw_read8(Adapter, EFUSE_CTRL);
+ return data;
+ }
+ else
+ return 0xFF;
+}/* EFUSE_Read1Byte23a */
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_Write1Byte
+ *
+ * Overview: Copy from WMAC fot EFUSE write 1 byte.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 09/23/2008 MHC Copy from WMAC.
+ *
+ *---------------------------------------------------------------------------*/
+
+void
+EFUSE_Write1Byte(
+ struct rtw_adapter * Adapter,
+ u16 Address,
+ u8 Value);
+void
+EFUSE_Write1Byte(
+ struct rtw_adapter * Adapter,
+ u16 Address,
+ u8 Value)
+{
+ u8 Bytetemp = {0x00};
+ u8 temp = {0x00};
+ u32 k = 0;
+ u16 contentLen = 0;
+
+ /* RT_TRACE(COMP_EFUSE, DBG_LOUD, ("Addr =%x Data =%x\n", Address, Value)); */
+ EFUSE_GetEfuseDefinition23a(Adapter, EFUSE_WIFI,
+ TYPE_EFUSE_REAL_CONTENT_LEN,
+ (void *)&contentLen);
+
+ if (Address < contentLen) /* E-fuse 512Byte */
+ {
+ rtw_write8(Adapter, EFUSE_CTRL, Value);
+
+ /* Write E-fuse Register address bit0~7 */
+ temp = Address & 0xFF;
+ rtw_write8(Adapter, EFUSE_CTRL+1, temp);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+2);
+
+ /* Write E-fuse Register address bit8~9 */
+ temp = ((Address >> 8) & 0x03) | (Bytetemp & 0xFC);
+ rtw_write8(Adapter, EFUSE_CTRL+2, temp);
+
+ /* Write 0x30[31]= 1 */
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ temp = Bytetemp | 0x80;
+ rtw_write8(Adapter, EFUSE_CTRL+3, temp);
+
+ /* Wait Write-ready (0x30[31]= 0) */
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ while(Bytetemp & 0x80)
+ {
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ k++;
+ if (k == 100)
+ {
+ k = 0;
+ break;
+ }
+ }
+ }
+}/* EFUSE_Write1Byte */
+
+/* 11/16/2008 MH Read one byte from real Efuse. */
+u8
+efuse_OneByteRead23a(struct rtw_adapter *pAdapter, u16 addr, u8 *data)
+{
+ u8 tmpidx = 0;
+ u8 bResult;
+
+ /* -----------------e-fuse reg ctrl --------------------------------- */
+ /* address */
+ rtw_write8(pAdapter, EFUSE_CTRL+1, (u8)(addr&0xff));
+ rtw_write8(pAdapter, EFUSE_CTRL+2, ((u8)((addr>>8) &0x03)) |
+ (rtw_read8(pAdapter, EFUSE_CTRL+2)&0xFC));
+
+ rtw_write8(pAdapter, EFUSE_CTRL+3, 0x72);/* read cmd */
+
+ while(!(0x80 &rtw_read8(pAdapter, EFUSE_CTRL+3)) && (tmpidx<100))
+ tmpidx++;
+ if (tmpidx < 100) {
+ *data = rtw_read8(pAdapter, EFUSE_CTRL);
+ bResult = true;
+ } else {
+ *data = 0xff;
+ bResult = false;
+ }
+ return bResult;
+}
+
+/* 11/16/2008 MH Write one byte to reald Efuse. */
+u8
+efuse_OneByteWrite23a(struct rtw_adapter *pAdapter, u16 addr, u8 data)
+{
+ u8 tmpidx = 0;
+ u8 bResult;
+
+ /* RT_TRACE(COMP_EFUSE, DBG_LOUD, ("Addr = %x Data =%x\n", addr, data)); */
+
+ /* return 0; */
+
+ /* -----------------e-fuse reg ctrl --------------------------------- */
+ /* address */
+ rtw_write8(pAdapter, EFUSE_CTRL+1, (u8)(addr&0xff));
+ rtw_write8(pAdapter, EFUSE_CTRL+2,
+ (rtw_read8(pAdapter, EFUSE_CTRL+2)&0xFC)|(u8)((addr>>8)&0x03));
+ rtw_write8(pAdapter, EFUSE_CTRL, data);/* data */
+
+ rtw_write8(pAdapter, EFUSE_CTRL+3, 0xF2);/* write cmd */
+
+ while((0x80 & rtw_read8(pAdapter, EFUSE_CTRL+3)) && (tmpidx<100)) {
+ tmpidx++;
+ }
+
+ if (tmpidx<100)
+ {
+ bResult = true;
+ }
+ else
+ {
+ bResult = false;
+ }
+
+ return bResult;
+}
+
+int
+Efuse_PgPacketRead23a(struct rtw_adapter *pAdapter, u8 offset, u8 *data)
+{
+ int ret = 0;
+
+ ret = pAdapter->HalFunc.Efuse_PgPacketRead23a(pAdapter, offset, data);
+
+ return ret;
+}
+
+int
+Efuse_PgPacketWrite23a(struct rtw_adapter *pAdapter, u8 offset,
+ u8 word_en, u8 *data)
+{
+ int ret;
+
+ ret = pAdapter->HalFunc.Efuse_PgPacketWrite23a(pAdapter, offset,
+ word_en, data);
+
+ return ret;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: efuse_WordEnableDataRead23a
+ *
+ * Overview: Read allowed word in current efuse section data.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/16/2008 MHC Create Version 0.
+ * 11/21/2008 MHC Fix Write bug when we only enable late word.
+ *
+ *---------------------------------------------------------------------------*/
+void
+efuse_WordEnableDataRead23a(u8 word_en,
+ u8 *sourdata,
+ u8 *targetdata)
+{
+ if (!(word_en&BIT(0)))
+ {
+ targetdata[0] = sourdata[0];
+ targetdata[1] = sourdata[1];
+ }
+ if (!(word_en&BIT(1)))
+ {
+ targetdata[2] = sourdata[2];
+ targetdata[3] = sourdata[3];
+ }
+ if (!(word_en&BIT(2)))
+ {
+ targetdata[4] = sourdata[4];
+ targetdata[5] = sourdata[5];
+ }
+ if (!(word_en&BIT(3)))
+ {
+ targetdata[6] = sourdata[6];
+ targetdata[7] = sourdata[7];
+ }
+}
+
+u8
+Efuse_WordEnableDataWrite23a(struct rtw_adapter *pAdapter, u16 efuse_addr,
+ u8 word_en, u8 *data)
+{
+ u8 ret = 0;
+
+ ret = pAdapter->HalFunc.Efuse_WordEnableDataWrite23a(pAdapter, efuse_addr,
+ word_en, data);
+
+ return ret;
+}
+
+static u8 efuse_read8(struct rtw_adapter *padapter, u16 address, u8 *value)
+{
+ return efuse_OneByteRead23a(padapter, address, value);
+}
+
+static u8 efuse_write8(struct rtw_adapter *padapter, u16 address, u8 *value)
+{
+ return efuse_OneByteWrite23a(padapter, address, *value);
+}
+
+/*
+ * read/wirte raw efuse data
+ */
+u8 rtw_efuse_access23a(struct rtw_adapter *padapter, u8 bWrite, u16 start_addr,
+ u16 cnts, u8 *data)
+{
+ int i = 0;
+ u16 real_content_len = 0, max_available_size = 0;
+ u8 res = _FAIL ;
+ u8 (*rw8)(struct rtw_adapter *, u16, u8*);
+
+ EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI,
+ TYPE_EFUSE_REAL_CONTENT_LEN,
+ (void *)&real_content_len);
+ EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI,
+ TYPE_AVAILABLE_EFUSE_BYTES_TOTAL,
+ (void *)&max_available_size);
+
+ if (start_addr > real_content_len)
+ return _FAIL;
+
+ if (true == bWrite) {
+ if ((start_addr + cnts) > max_available_size)
+ return _FAIL;
+ rw8 = &efuse_write8;
+ } else
+ rw8 = &efuse_read8;
+
+ Efuse_PowerSwitch23a(padapter, bWrite, true);
+
+ /* e-fuse one byte read / write */
+ for (i = 0; i < cnts; i++) {
+ if (start_addr >= real_content_len) {
+ res = _FAIL;
+ break;
+ }
+
+ res = rw8(padapter, start_addr++, data++);
+ if (_FAIL == res) break;
+ }
+
+ Efuse_PowerSwitch23a(padapter, bWrite, false);
+
+ return res;
+}
+/* */
+u16 efuse_GetMaxSize23a(struct rtw_adapter *padapter)
+{
+ u16 max_size;
+ EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI,
+ TYPE_AVAILABLE_EFUSE_BYTES_TOTAL,
+ (void *)&max_size);
+ return max_size;
+}
+/* */
+u8 efuse_GetCurrentSize23a(struct rtw_adapter *padapter, u16 *size)
+{
+ Efuse_PowerSwitch23a(padapter, false, true);
+ *size = Efuse_GetCurrentSize23a(padapter, EFUSE_WIFI);
+ Efuse_PowerSwitch23a(padapter, false, false);
+
+ return _SUCCESS;
+}
+/* */
+u8 rtw_efuse_map_read23a(struct rtw_adapter *padapter, u16 addr, u16 cnts, u8 *data)
+{
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI,
+ TYPE_EFUSE_MAP_LEN, (void *)&mapLen);
+
+ if ((addr + cnts) > mapLen)
+ return _FAIL;
+
+ Efuse_PowerSwitch23a(padapter, false, true);
+
+ efuse_ReadEFuse(padapter, EFUSE_WIFI, addr, cnts, data);
+
+ Efuse_PowerSwitch23a(padapter, false, false);
+
+ return _SUCCESS;
+}
+
+u8 rtw_BT_efuse_map_read23a(struct rtw_adapter *padapter, u16 addr, u16 cnts, u8 *data)
+{
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition23a(padapter, EFUSE_BT,
+ TYPE_EFUSE_MAP_LEN, (void *)&mapLen);
+
+ if ((addr + cnts) > mapLen)
+ return _FAIL;
+
+ Efuse_PowerSwitch23a(padapter, false, true);
+
+ efuse_ReadEFuse(padapter, EFUSE_BT, addr, cnts, data);
+
+ Efuse_PowerSwitch23a(padapter, false, false);
+
+ return _SUCCESS;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: Efuse_ReadAllMap
+ *
+ * Overview: Read All Efuse content
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/11/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void
+Efuse_ReadAllMap(struct rtw_adapter *pAdapter, u8 efuseType, u8 *Efuse);
+void
+Efuse_ReadAllMap(struct rtw_adapter *pAdapter, u8 efuseType, u8 *Efuse)
+{
+ u16 mapLen = 0;
+
+ Efuse_PowerSwitch23a(pAdapter, false, true);
+
+ EFUSE_GetEfuseDefinition23a(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN,
+ (void *)&mapLen);
+
+ efuse_ReadEFuse(pAdapter, efuseType, 0, mapLen, Efuse);
+
+ Efuse_PowerSwitch23a(pAdapter, false, false);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: efuse_ShadowRead1Byte
+ * efuse_ShadowRead2Byte
+ * efuse_ShadowRead4Byte
+ *
+ * Overview: Read from efuse init map by one/two/four bytes !!!!!
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/12/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+static void
+efuse_ShadowRead1Byte(
+ struct rtw_adapter * pAdapter,
+ u16 Offset,
+ u8 *Value)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+
+ *Value = pEEPROM->efuse_eeprom_data[Offset];
+} /* EFUSE_ShadowRead23a1Byte */
+
+/* Read Two Bytes */
+static void
+efuse_ShadowRead2Byte(
+ struct rtw_adapter * pAdapter,
+ u16 Offset,
+ u16 *Value)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+
+ *Value = pEEPROM->efuse_eeprom_data[Offset];
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
+} /* EFUSE_ShadowRead23a2Byte */
+
+/* Read Four Bytes */
+static void
+efuse_ShadowRead4Byte(
+ struct rtw_adapter * pAdapter,
+ u16 Offset,
+ u32 *Value)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+
+ *Value = pEEPROM->efuse_eeprom_data[Offset];
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+2]<<16;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+3]<<24;
+} /* efuse_ShadowRead4Byte */
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_ShadowMapUpdate23a
+ *
+ * Overview: Transfer current EFUSE content to shadow init and modify map.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/13/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void EFUSE_ShadowMapUpdate23a(struct rtw_adapter *pAdapter, u8 efuseType)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition23a(pAdapter, efuseType,
+ TYPE_EFUSE_MAP_LEN, (void *)&mapLen);
+
+ if (pEEPROM->bautoload_fail_flag == true)
+ memset(pEEPROM->efuse_eeprom_data, 0xFF, mapLen);
+ else
+ Efuse_ReadAllMap(pAdapter, efuseType,
+ pEEPROM->efuse_eeprom_data);
+
+}/* EFUSE_ShadowMapUpdate23a */
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_ShadowRead23a
+ *
+ * Overview: Read from efuse init map !!!!!
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/12/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void
+EFUSE_ShadowRead23a(
+ struct rtw_adapter * pAdapter,
+ u8 Type,
+ u16 Offset,
+ u32 *Value )
+{
+ if (Type == 1)
+ efuse_ShadowRead1Byte(pAdapter, Offset, (u8 *)Value);
+ else if (Type == 2)
+ efuse_ShadowRead2Byte(pAdapter, Offset, (u16 *)Value);
+ else if (Type == 4)
+ efuse_ShadowRead4Byte(pAdapter, Offset, (u32 *)Value);
+} /* EFUSE_ShadowRead23a */
diff --git a/drivers/staging/rtl8723au/core/rtw_ieee80211.c b/drivers/staging/rtl8723au/core/rtw_ieee80211.c
new file mode 100644
index 000000000000..780631fd3b6d
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_ieee80211.c
@@ -0,0 +1,1861 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _IEEE80211_C
+
+#include <drv_types.h>
+#include <linux/ieee80211.h>
+#include <ieee80211.h>
+#include <wifi.h>
+#include <osdep_service.h>
+#include <wlan_bssdef.h>
+
+u8 RTW_WPA_OUI23A_TYPE[] = { 0x00, 0x50, 0xf2, 1 };
+u16 RTW_WPA_VERSION23A = 1;
+u8 WPA_AUTH_KEY_MGMT_NONE23A[] = { 0x00, 0x50, 0xf2, 0 };
+u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X23A[] = { 0x00, 0x50, 0xf2, 1 };
+u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X23A[] = { 0x00, 0x50, 0xf2, 2 };
+u8 WPA_CIPHER_SUITE_NONE23A[] = { 0x00, 0x50, 0xf2, 0 };
+u8 WPA_CIPHER_SUITE_WEP4023A[] = { 0x00, 0x50, 0xf2, 1 };
+u8 WPA_CIPHER_SUITE_TKIP23A[] = { 0x00, 0x50, 0xf2, 2 };
+u8 WPA_CIPHER_SUITE_WRAP23A[] = { 0x00, 0x50, 0xf2, 3 };
+u8 WPA_CIPHER_SUITE_CCMP23A[] = { 0x00, 0x50, 0xf2, 4 };
+u8 WPA_CIPHER_SUITE_WEP10423A[] = { 0x00, 0x50, 0xf2, 5 };
+
+u16 RSN_VERSION_BSD23A = 1;
+u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X23A[] = { 0x00, 0x0f, 0xac, 1 };
+u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X23A[] = { 0x00, 0x0f, 0xac, 2 };
+u8 RSN_CIPHER_SUITE_NONE23A[] = { 0x00, 0x0f, 0xac, 0 };
+u8 RSN_CIPHER_SUITE_WEP4023A[] = { 0x00, 0x0f, 0xac, 1 };
+u8 RSN_CIPHER_SUITE_TKIP23A[] = { 0x00, 0x0f, 0xac, 2 };
+u8 RSN_CIPHER_SUITE_WRAP23A[] = { 0x00, 0x0f, 0xac, 3 };
+u8 RSN_CIPHER_SUITE_CCMP23A[] = { 0x00, 0x0f, 0xac, 4 };
+u8 RSN_CIPHER_SUITE_WEP10423A[] = { 0x00, 0x0f, 0xac, 5 };
+/* */
+/* for adhoc-master to generate ie and provide supported-rate to fw */
+/* */
+
+static u8 WIFI_CCKRATES[] =
+{(IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK),
+ (IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK),
+ (IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK),
+ (IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK)};
+
+static u8 WIFI_OFDMRATES[] =
+{(IEEE80211_OFDM_RATE_6MB),
+ (IEEE80211_OFDM_RATE_9MB),
+ (IEEE80211_OFDM_RATE_12MB),
+ (IEEE80211_OFDM_RATE_18MB),
+ (IEEE80211_OFDM_RATE_24MB),
+ IEEE80211_OFDM_RATE_36MB,
+ IEEE80211_OFDM_RATE_48MB,
+ IEEE80211_OFDM_RATE_54MB};
+
+int rtw_get_bit_value_from_ieee_value23a(u8 val)
+{
+ unsigned char dot11_rate_table[]=
+ {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0};
+
+ int i = 0;
+ while (dot11_rate_table[i] != 0) {
+ if (dot11_rate_table[i] == val)
+ return BIT(i);
+ i++;
+ }
+ return 0;
+}
+
+uint rtw_is_cckrates_included23a(u8 *rate)
+{
+ u32 i = 0;
+
+ while (rate[i] != 0) {
+ if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
+ (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
+ return true;
+ i++;
+ }
+
+ return false;
+}
+
+uint rtw_is_cckratesonly_included23a(u8 *rate)
+{
+ u32 i = 0;
+
+ while (rate[i] != 0) {
+ if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
+ (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
+ return false;
+
+ i++;
+ }
+
+ return true;
+}
+
+int rtw_check_network_type23a(unsigned char *rate, int ratelen, int channel)
+{
+ if (channel > 14) {
+ if ((rtw_is_cckrates_included23a(rate)) == true)
+ return WIRELESS_INVALID;
+ else
+ return WIRELESS_11A;
+ } else { /* could be pure B, pure G, or B/G */
+ if ((rtw_is_cckratesonly_included23a(rate)) == true)
+ return WIRELESS_11B;
+ else if ((rtw_is_cckrates_included23a(rate)) == true)
+ return WIRELESS_11BG;
+ else
+ return WIRELESS_11G;
+ }
+}
+
+u8 *rtw_set_fixed_ie23a(unsigned char *pbuf, unsigned int len,
+ unsigned char *source, unsigned int *frlen)
+{
+ memcpy((void *)pbuf, (void *)source, len);
+ *frlen = *frlen + len;
+ return pbuf + len;
+}
+
+/* rtw_set_ie23a will update frame length */
+u8 *rtw_set_ie23a(u8 *pbuf, int index, uint len, u8 *source, uint *frlen)
+{
+
+ *pbuf = (u8)index;
+
+ *(pbuf + 1) = (u8)len;
+
+ if (len > 0)
+ memcpy((void *)(pbuf + 2), (void *)source, len);
+
+ *frlen = *frlen + (len + 2);
+
+
+ return pbuf + len + 2;
+}
+
+inline u8 *rtw_set_ie23a_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode,
+ u8 new_ch, u8 ch_switch_cnt)
+{
+ u8 ie_data[3];
+
+ ie_data[0] = ch_switch_mode;
+ ie_data[1] = new_ch;
+ ie_data[2] = ch_switch_cnt;
+ return rtw_set_ie23a(buf, WLAN_EID_CHANNEL_SWITCH, 3, ie_data, buf_len);
+}
+
+inline u8 secondary_ch_offset_to_hal_ch_offset23a(u8 ch_offset)
+{
+ if (ch_offset == SCN)
+ return HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ else if (ch_offset == SCA)
+ return HAL_PRIME_CHNL_OFFSET_UPPER;
+ else if (ch_offset == SCB)
+ return HAL_PRIME_CHNL_OFFSET_LOWER;
+
+ return HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+}
+
+inline u8 hal_ch_offset_to_secondary_ch_offset23a(u8 ch_offset)
+{
+ if (ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
+ return SCN;
+ else if (ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
+ return SCB;
+ else if (ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
+ return SCA;
+
+ return SCN;
+}
+
+inline u8 *rtw_set_ie23a_secondary_ch_offset(u8 *buf, u32 *buf_len,
+ u8 secondary_ch_offset)
+{
+ return rtw_set_ie23a(buf, WLAN_EID_SECONDARY_CHANNEL_OFFSET,
+ 1, &secondary_ch_offset, buf_len);
+}
+
+inline u8 *rtw_set_ie23a_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl,
+ u8 flags, u16 reason, u16 precedence)
+{
+ u8 ie_data[6];
+
+ ie_data[0] = ttl;
+ ie_data[1] = flags;
+ put_unaligned_le16(reason, (u8*)&ie_data[2]);
+ put_unaligned_le16(precedence, (u8*)&ie_data[4]);
+
+ return rtw_set_ie23a(buf, 0x118, 6, ie_data, buf_len);
+}
+
+/*----------------------------------------------------------------------------
+index: the information element id index, limit is the limit for search
+-----------------------------------------------------------------------------*/
+u8 *rtw_get_ie23a(u8 *pbuf, int index, int *len, int limit)
+{
+ int tmp, i;
+ u8 *p;
+
+ if (limit < 1) {
+
+ return NULL;
+ }
+
+ p = pbuf;
+ i = 0;
+ *len = 0;
+ while (1) {
+ if (*p == index) {
+ *len = *(p + 1);
+ return p;
+ } else {
+ tmp = *(p + 1);
+ p += (tmp + 2);
+ i += (tmp + 2);
+ }
+ if (i >= limit)
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * rtw_get_ie23a_ex - Search specific IE from a series of IEs
+ * @in_ie: Address of IEs to search
+ * @in_len: Length limit from in_ie
+ * @eid: Element ID to match
+ * @oui: OUI to match
+ * @oui_len: OUI length
+ * @ie: If not NULL and the specific IE is found, the IE will be copied
+ * to the buf starting from the specific IE
+ * @ielen: If not NULL and the specific IE is found, will set to the length
+ * of the entire IE
+ *
+ * Returns: The address of the specific IE found, or NULL
+ */
+u8 *rtw_get_ie23a_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len,
+ u8 *ie, uint *ielen)
+{
+ uint cnt;
+ u8 *target_ie = NULL;
+
+ if (ielen)
+ *ielen = 0;
+
+ if (!in_ie || in_len <= 0)
+ return target_ie;
+
+ cnt = 0;
+
+ while (cnt < in_len) {
+ if (eid == in_ie[cnt] &&
+ (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) {
+ target_ie = &in_ie[cnt];
+
+ if (ie)
+ memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ if (ielen)
+ *ielen = in_ie[cnt+1]+2;
+ break;
+ } else {
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
+ }
+ }
+
+ return target_ie;
+}
+
+/**
+ * rtw_ies_remove_ie23a - Find matching IEs and remove
+ * @ies: Address of IEs to search
+ * @ies_len: Pointer of length of ies, will update to new length
+ * @offset: The offset to start scarch
+ * @eid: Element ID to match
+ * @oui: OUI to match
+ * @oui_len: OUI length
+ *
+ * Returns: _SUCCESS: ies is updated, _FAIL: not updated
+ */
+int rtw_ies_remove_ie23a(u8 *ies, uint *ies_len, uint offset, u8 eid,
+ u8 *oui, u8 oui_len)
+{
+ int ret = _FAIL;
+ u8 *target_ie;
+ u32 target_ielen;
+ u8 *start;
+ uint search_len;
+
+ if (!ies || !ies_len || *ies_len <= offset)
+ goto exit;
+
+ start = ies + offset;
+ search_len = *ies_len - offset;
+
+ while (1) {
+ target_ie = rtw_get_ie23a_ex(start, search_len, eid, oui, oui_len,
+ NULL, &target_ielen);
+ if (target_ie && target_ielen) {
+ u8 buf[MAX_IE_SZ] = {0};
+ u8 *remain_ies = target_ie + target_ielen;
+ uint remain_len = search_len - (remain_ies - start);
+
+ memcpy(buf, remain_ies, remain_len);
+ memcpy(target_ie, buf, remain_len);
+ *ies_len = *ies_len - target_ielen;
+ ret = _SUCCESS;
+
+ start = target_ie;
+ search_len = remain_len;
+ } else {
+ break;
+ }
+ }
+exit:
+ return ret;
+}
+
+void rtw_set_supported_rate23a(u8* SupportedRates, uint mode)
+{
+
+
+ memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
+
+ switch (mode)
+ {
+ case WIRELESS_11B:
+ memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
+ break;
+
+ case WIRELESS_11G:
+ case WIRELESS_11A:
+ case WIRELESS_11_5N:
+ case WIRELESS_11A_5N:/* Todo: no basic rate for ofdm ? */
+ memcpy(SupportedRates, WIFI_OFDMRATES,
+ IEEE80211_NUM_OFDM_RATESLEN);
+ break;
+
+ case WIRELESS_11BG:
+ case WIRELESS_11G_24N:
+ case WIRELESS_11_24N:
+ case WIRELESS_11BG_24N:
+ memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
+ memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES,
+ IEEE80211_NUM_OFDM_RATESLEN);
+ break;
+ }
+
+}
+
+uint rtw_get_rateset_len23a(u8 *rateset)
+{
+ uint i = 0;
+
+ while(1) {
+ if ((rateset[i]) == 0)
+ break;
+
+ if (i > 12)
+ break;
+
+ i++;
+ }
+
+ return i;
+}
+
+int rtw_generate_ie23a(struct registry_priv *pregistrypriv)
+{
+ u8 wireless_mode;
+ int sz = 0, rateLen;
+ struct wlan_bssid_ex* pdev_network = &pregistrypriv->dev_network;
+ u8* ie = pdev_network->IEs;
+
+
+
+ /* timestamp will be inserted by hardware */
+ sz += 8;
+ ie += sz;
+
+ /* beacon interval : 2bytes */
+ /* BCN_INTERVAL; */
+ *(u16*)ie = cpu_to_le16((u16)pdev_network->Configuration.BeaconPeriod);
+ sz += 2;
+ ie += 2;
+
+ /* capability info */
+ *(u16*)ie = 0;
+
+ *(u16*)ie |= cpu_to_le16(cap_IBSS);
+
+ if (pregistrypriv->preamble == PREAMBLE_SHORT)
+ *(u16*)ie |= cpu_to_le16(cap_ShortPremble);
+
+ if (pdev_network->Privacy)
+ *(u16*)ie |= cpu_to_le16(cap_Privacy);
+
+ sz += 2;
+ ie += 2;
+
+ /* SSID */
+ ie = rtw_set_ie23a(ie, _SSID_IE_, pdev_network->Ssid.ssid_len,
+ pdev_network->Ssid.ssid, &sz);
+
+ /* supported rates */
+ if (pregistrypriv->wireless_mode == WIRELESS_11ABGN) {
+ if (pdev_network->Configuration.DSConfig > 14)
+ wireless_mode = WIRELESS_11A_5N;
+ else
+ wireless_mode = WIRELESS_11BG_24N;
+ } else {
+ wireless_mode = pregistrypriv->wireless_mode;
+ }
+
+ rtw_set_supported_rate23a(pdev_network->SupportedRates, wireless_mode) ;
+
+ rateLen = rtw_get_rateset_len23a(pdev_network->SupportedRates);
+
+ if (rateLen > 8) {
+ ie = rtw_set_ie23a(ie, _SUPPORTEDRATES_IE_, 8,
+ pdev_network->SupportedRates, &sz);
+ /* ie = rtw_set_ie23a(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz); */
+ } else {
+ ie = rtw_set_ie23a(ie, _SUPPORTEDRATES_IE_, rateLen,
+ pdev_network->SupportedRates, &sz);
+ }
+
+ /* DS parameter set */
+ ie = rtw_set_ie23a(ie, _DSSET_IE_, 1,
+ (u8 *)&pdev_network->Configuration.DSConfig, &sz);
+
+ /* IBSS Parameter Set */
+
+ ie = rtw_set_ie23a(ie, _IBSS_PARA_IE_, 2,
+ (u8 *)&pdev_network->Configuration.ATIMWindow, &sz);
+
+ if (rateLen > 8) {
+ ie = rtw_set_ie23a(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8),
+ (pdev_network->SupportedRates + 8), &sz);
+ }
+
+
+
+ /* return _SUCCESS; */
+
+ return sz;
+}
+
+unsigned char *rtw_get_wpa_ie23a(unsigned char *pie, int *wpa_ie_len, int limit)
+{
+ int len;
+ u16 val16;
+ unsigned char wpa_oui_type[] = {0x00, 0x50, 0xf2, 0x01};
+ u8 *pbuf = pie;
+ int limit_new = limit;
+
+ while(1) {
+ pbuf = rtw_get_ie23a(pbuf, _WPA_IE_ID_, &len, limit_new);
+
+ if (pbuf) {
+ /* check if oui matches... */
+ if (memcmp((pbuf + 2), wpa_oui_type,
+ sizeof(wpa_oui_type))) {
+ goto check_next_ie;
+ }
+
+ /* check version... */
+ memcpy((u8 *)&val16, (pbuf + 6), sizeof(val16));
+
+ val16 = le16_to_cpu(val16);
+ if (val16 != 0x0001)
+ goto check_next_ie;
+
+ *wpa_ie_len = *(pbuf + 1);
+
+ return pbuf;
+ } else {
+ *wpa_ie_len = 0;
+ return NULL;
+ }
+
+check_next_ie:
+
+ limit_new = limit - (pbuf - pie) - 2 - len;
+
+ if (limit_new <= 0)
+ break;
+
+ pbuf += (2 + len);
+ }
+
+ *wpa_ie_len = 0;
+
+ return NULL;
+}
+
+unsigned char *rtw_get_wpa2_ie23a(unsigned char *pie, int *rsn_ie_len, int limit)
+{
+ return rtw_get_ie23a(pie, _WPA2_IE_ID_, rsn_ie_len, limit);
+}
+
+int rtw_get_wpa_cipher_suite23a(u8 *s)
+{
+ if (!memcmp(s, WPA_CIPHER_SUITE_NONE23A, WPA_SELECTOR_LEN))
+ return WPA_CIPHER_NONE;
+ if (!memcmp(s, WPA_CIPHER_SUITE_WEP4023A, WPA_SELECTOR_LEN))
+ return WPA_CIPHER_WEP40;
+ if (!memcmp(s, WPA_CIPHER_SUITE_TKIP23A, WPA_SELECTOR_LEN))
+ return WPA_CIPHER_TKIP;
+ if (!memcmp(s, WPA_CIPHER_SUITE_CCMP23A, WPA_SELECTOR_LEN))
+ return WPA_CIPHER_CCMP;
+ if (!memcmp(s, WPA_CIPHER_SUITE_WEP10423A, WPA_SELECTOR_LEN))
+ return WPA_CIPHER_WEP104;
+
+ return 0;
+}
+
+int rtw_get_wpa2_cipher_suite23a(u8 *s)
+{
+ if (!memcmp(s, RSN_CIPHER_SUITE_NONE23A, RSN_SELECTOR_LEN))
+ return WPA_CIPHER_NONE;
+ if (!memcmp(s, RSN_CIPHER_SUITE_WEP4023A, RSN_SELECTOR_LEN))
+ return WPA_CIPHER_WEP40;
+ if (!memcmp(s, RSN_CIPHER_SUITE_TKIP23A, RSN_SELECTOR_LEN))
+ return WPA_CIPHER_TKIP;
+ if (!memcmp(s, RSN_CIPHER_SUITE_CCMP23A, RSN_SELECTOR_LEN))
+ return WPA_CIPHER_CCMP;
+ if (!memcmp(s, RSN_CIPHER_SUITE_WEP10423A, RSN_SELECTOR_LEN))
+ return WPA_CIPHER_WEP104;
+
+ return 0;
+}
+
+int rtw_parse_wpa_ie23a(u8* wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
+{
+ int i, ret = _SUCCESS;
+ int left, count;
+ u8 *pos;
+ u8 SUITE_1X[4] = {0x00, 0x50, 0xf2, 1};
+
+ if (wpa_ie_len <= 0) {
+ /* No WPA IE - fail silently */
+ return _FAIL;
+ }
+
+ if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie+1) != (u8)(wpa_ie_len - 2)) ||
+ memcmp(wpa_ie + 2, RTW_WPA_OUI23A_TYPE, WPA_SELECTOR_LEN)) {
+ return _FAIL;
+ }
+
+ pos = wpa_ie;
+
+ pos += 8;
+ left = wpa_ie_len - 8;
+
+ /* group_cipher */
+ if (left >= WPA_SELECTOR_LEN) {
+
+ *group_cipher = rtw_get_wpa_cipher_suite23a(pos);
+
+ pos += WPA_SELECTOR_LEN;
+ left -= WPA_SELECTOR_LEN;
+ } else if (left > 0) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("%s: ie length mismatch, %u too much",
+ __func__, left));
+
+ return _FAIL;
+ }
+
+ /* pairwise_cipher */
+ if (left >= 2) {
+ /* count = le16_to_cpu(*(u16*)pos); */
+ count = get_unaligned_le16(pos);
+ pos += 2;
+ left -= 2;
+
+ if (count == 0 || left < count * WPA_SELECTOR_LEN) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("%s: ie count botch (pairwise), "
+ "count %u left %u", __func__,
+ count, left));
+ return _FAIL;
+ }
+
+ for (i = 0; i < count; i++) {
+ *pairwise_cipher |= rtw_get_wpa_cipher_suite23a(pos);
+
+ pos += WPA_SELECTOR_LEN;
+ left -= WPA_SELECTOR_LEN;
+ }
+ } else if (left == 1) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("%s: ie too short (for key mgmt)", __func__));
+ return _FAIL;
+ }
+
+ if (is_8021x) {
+ if (left >= 6) {
+ pos += 2;
+ if (!memcmp(pos, SUITE_1X, 4)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s : there has 802.1x auth\n",
+ __func__));
+ *is_8021x = 1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+int rtw_parse_wpa2_ie23a(u8* rsn_ie, int rsn_ie_len, int *group_cipher,
+ int *pairwise_cipher, int *is_8021x)
+{
+ int i, ret = _SUCCESS;
+ int left, count;
+ u8 *pos;
+ u8 SUITE_1X[4] = {0x00, 0x0f, 0xac, 0x01};
+
+ if (rsn_ie_len <= 0) {
+ /* No RSN IE - fail silently */
+ return _FAIL;
+ }
+
+ if ((*rsn_ie!= _WPA2_IE_ID_) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2))) {
+ return _FAIL;
+ }
+
+ pos = rsn_ie;
+ pos += 4;
+ left = rsn_ie_len - 4;
+
+ /* group_cipher */
+ if (left >= RSN_SELECTOR_LEN) {
+ *group_cipher = rtw_get_wpa2_cipher_suite23a(pos);
+
+ pos += RSN_SELECTOR_LEN;
+ left -= RSN_SELECTOR_LEN;
+ } else if (left > 0) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("%s: ie length mismatch, %u too much",
+ __func__, left));
+ return _FAIL;
+ }
+
+ /* pairwise_cipher */
+ if (left >= 2) {
+ /* count = le16_to_cpu(*(u16*)pos); */
+ count = get_unaligned_le16(pos);
+ pos += 2;
+ left -= 2;
+
+ if (count == 0 || left < count * RSN_SELECTOR_LEN) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("%s: ie count botch (pairwise), "
+ "count %u left %u",
+ __func__, count, left));
+ return _FAIL;
+ }
+
+ for (i = 0; i < count; i++) {
+ *pairwise_cipher |= rtw_get_wpa2_cipher_suite23a(pos);
+
+ pos += RSN_SELECTOR_LEN;
+ left -= RSN_SELECTOR_LEN;
+ }
+ } else if (left == 1) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("%s: ie too short (for key mgmt)", __func__));
+
+ return _FAIL;
+ }
+
+ if (is_8021x) {
+ if (left >= 6) {
+ pos += 2;
+ if (!memcmp(pos, SUITE_1X, 4)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s (): there has 802.1x auth\n",
+ __func__));
+ *is_8021x = 1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+int rtw_get_sec_ie23a(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len,
+ u8 *wpa_ie, u16 *wpa_len)
+{
+ u8 authmode, sec_idx, i;
+ u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
+ uint cnt;
+
+
+
+ /* Search required WPA or WPA2 IE and copy to sec_ie[ ] */
+
+ cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_);
+
+ sec_idx = 0;
+
+ while(cnt < in_len) {
+ authmode = in_ie[cnt];
+
+ if ((authmode == _WPA_IE_ID_) &&
+ !memcmp(&in_ie[cnt+2], &wpa_oui[0], 4)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("\n rtw_get_wpa_ie23a: sec_idx =%d "
+ "in_ie[cnt+1]+2 =%d\n",
+ sec_idx, in_ie[cnt + 1] + 2));
+
+ if (wpa_ie) {
+ memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ for (i = 0; i < (in_ie[cnt + 1] + 2); i = i + 8) {
+ RT_TRACE(_module_rtl871x_mlme_c_,
+ _drv_info_,
+ ("\n %2x,%2x,%2x,%2x,%2x,%2x,"
+ "%2x,%2x\n", wpa_ie[i],
+ wpa_ie[i + 1], wpa_ie[i + 2],
+ wpa_ie[i + 3], wpa_ie[i + 4],
+ wpa_ie[i + 5], wpa_ie[i + 6],
+ wpa_ie[i + 7]));
+ }
+ }
+
+ *wpa_len = in_ie[cnt + 1] + 2;
+ cnt += in_ie[cnt + 1] + 2; /* get next */
+ } else {
+ if (authmode == _WPA2_IE_ID_) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("\n get_rsn_ie: sec_idx =%d in_ie"
+ "[cnt+1]+2 =%d\n", sec_idx,
+ in_ie[cnt + 1] + 2));
+
+ if (rsn_ie) {
+ memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
+
+ for (i = 0; i < (in_ie[cnt + 1] + 2); i = i + 8) {
+ RT_TRACE(_module_rtl871x_mlme_c_,
+ _drv_info_,
+ ("\n %2x,%2x,%2x,%2x,%2x,%2x,"
+ "%2x,%2x\n", rsn_ie[i],
+ rsn_ie[i + 1], rsn_ie[i + 2],
+ rsn_ie[i + 3], rsn_ie[i + 4],
+ rsn_ie[i + 5], rsn_ie[i + 6],
+ rsn_ie[i + 7]));
+ }
+ }
+
+ *rsn_len = in_ie[cnt + 1] + 2;
+ cnt += in_ie[cnt + 1] + 2; /* get next */
+ } else {
+ cnt += in_ie[cnt + 1] + 2; /* get next */
+ }
+ }
+ }
+
+
+
+ return *rsn_len + *wpa_len;
+}
+
+u8 rtw_is_wps_ie23a(u8 *ie_ptr, uint *wps_ielen)
+{
+ u8 match = false;
+ u8 eid, wps_oui[4]= {0x0, 0x50, 0xf2, 0x04};
+
+ if (!ie_ptr)
+ return match;
+
+ eid = ie_ptr[0];
+
+ if ((eid == _WPA_IE_ID_) && !memcmp(&ie_ptr[2], wps_oui, 4)) {
+ /* DBG_8723A("==> found WPS_IE.....\n"); */
+ *wps_ielen = ie_ptr[1] + 2;
+ match = true;
+ }
+ return match;
+}
+
+/**
+ * rtw_get_wps_ie23a - Search WPS IE from a series of IEs
+ * @in_ie: Address of IEs to search
+ * @in_len: Length limit from in_ie
+ * @wps_ie: If not NULL and WPS IE is found, WPS IE will be copied to the
+ * buf starting from wps_ie
+ * @wps_ielen: If not NULL and WPS IE is found, will set to the length of
+ * the entire WPS IE
+ *
+ * Returns: The address of the WPS IE found, or NULL
+ */
+u8 *rtw_get_wps_ie23a(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
+{
+ uint cnt;
+ u8 *wpsie_ptr = NULL;
+ u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+
+ if (wps_ielen)
+ *wps_ielen = 0;
+
+ if (!in_ie || in_len <= 0)
+ return wpsie_ptr;
+
+ cnt = 0;
+
+ while (cnt < in_len) {
+ eid = in_ie[cnt];
+
+ if ((eid == _WPA_IE_ID_) && !memcmp(&in_ie[cnt+2], wps_oui, 4)) {
+ wpsie_ptr = &in_ie[cnt];
+
+ if (wps_ie)
+ memcpy(wps_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
+
+ if (wps_ielen)
+ *wps_ielen = in_ie[cnt + 1] + 2;
+
+ cnt += in_ie[cnt + 1] + 2;
+
+ break;
+ } else {
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
+ }
+ }
+
+ return wpsie_ptr;
+}
+
+/**
+ * rtw_get_wps_attr23a - Search a specific WPS attribute from a given WPS IE
+ * @wps_ie: Address of WPS IE to search
+ * @wps_ielen: Length limit from wps_ie
+ * @target_attr_id: The attribute ID of WPS attribute to search
+ * @buf_attr: If not NULL and the WPS attribute is found, WPS attribute
+ * will be copied to the buf starting from buf_attr
+ * @len_attr: If not NULL and the WPS attribute is found, will set to the
+ * length of the entire WPS attribute
+ *
+ * Returns: the address of the specific WPS attribute found, or NULL
+ */
+u8 *rtw_get_wps_attr23a(u8 *wps_ie, uint wps_ielen, u16 target_attr_id,
+ u8 *buf_attr, u32 *len_attr)
+{
+ u8 *attr_ptr = NULL;
+ u8 * target_attr_ptr = NULL;
+ u8 wps_oui[4] = {0x00, 0x50, 0xF2, 0x04};
+
+ if (len_attr)
+ *len_attr = 0;
+
+ if ((wps_ie[0] != _VENDOR_SPECIFIC_IE_) ||
+ memcmp(wps_ie + 2, wps_oui, 4)) {
+ return attr_ptr;
+ }
+
+ /* 6 = 1(Element ID) + 1(Length) + 4(WPS OUI) */
+ attr_ptr = wps_ie + 6; /* goto first attr */
+
+ while (attr_ptr - wps_ie < wps_ielen) {
+ /* 4 = 2(Attribute ID) + 2(Length) */
+ u16 attr_id = get_unaligned_be16(attr_ptr);
+ u16 attr_data_len = get_unaligned_be16(attr_ptr + 2);
+ u16 attr_len = attr_data_len + 4;
+
+ /* DBG_8723A("%s attr_ptr:%p, id:%u, length:%u\n", __func__, attr_ptr, attr_id, attr_data_len); */
+ if (attr_id == target_attr_id) {
+ target_attr_ptr = attr_ptr;
+
+ if (buf_attr)
+ memcpy(buf_attr, attr_ptr, attr_len);
+
+ if (len_attr)
+ *len_attr = attr_len;
+
+ break;
+ } else {
+ attr_ptr += attr_len; /* goto next */
+ }
+ }
+
+ return target_attr_ptr;
+}
+
+/**
+ * rtw_get_wps_attr_content23a - Search a specific WPS attribute content
+ * from a given WPS IE
+ * @wps_ie: Address of WPS IE to search
+ * @wps_ielen: Length limit from wps_ie
+ * @target_attr_id: The attribute ID of WPS attribute to search
+ * @buf_content: If not NULL and the WPS attribute is found, WPS attribute
+ * content will be copied to the buf starting from buf_content
+ * @len_content: If not NULL and the WPS attribute is found, will set to the
+ * length of the WPS attribute content
+ *
+ * Returns: the address of the specific WPS attribute content found, or NULL
+ */
+u8 *rtw_get_wps_attr_content23a(u8 *wps_ie, uint wps_ielen, u16 target_attr_id,
+ u8 *buf_content, uint *len_content)
+{
+ u8 *attr_ptr;
+ u32 attr_len;
+
+ if (len_content)
+ *len_content = 0;
+
+ attr_ptr = rtw_get_wps_attr23a(wps_ie, wps_ielen, target_attr_id,
+ NULL, &attr_len);
+
+ if (attr_ptr && attr_len) {
+ if (buf_content)
+ memcpy(buf_content, attr_ptr + 4, attr_len - 4);
+
+ if (len_content)
+ *len_content = attr_len - 4;
+
+ return attr_ptr + 4;
+ }
+
+ return NULL;
+}
+
+static int
+rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
+ struct rtw_ieee802_11_elems *elems,
+ int show_errors)
+{
+ unsigned int oui;
+
+ /* first 3 bytes in vendor specific information element are the IEEE
+ * OUI of the vendor. The following byte is used a vendor specific
+ * sub-type. */
+ if (elen < 4) {
+ if (show_errors) {
+ DBG_8723A("short vendor specific "
+ "information element ignored (len =%lu)\n",
+ (unsigned long) elen);
+ }
+ return -1;
+ }
+
+ oui = RTW_GET_BE24(pos);
+ switch (oui) {
+ case WLAN_OUI_MICROSOFT:
+ /* Microsoft/Wi-Fi information elements are further typed and
+ * subtyped */
+ switch (pos[3]) {
+ case 1:
+ /* Microsoft OUI (00:50:F2) with OUI Type 1:
+ * real WPA information element */
+ elems->wpa_ie = pos;
+ elems->wpa_ie_len = elen;
+ break;
+ case WME_OUI_TYPE: /* this is a Wi-Fi WME info. element */
+ if (elen < 5) {
+ DBG_8723A("short WME "
+ "information element ignored "
+ "(len =%lu)\n",
+ (unsigned long) elen);
+ return -1;
+ }
+ switch (pos[4]) {
+ case WME_OUI_SUBTYPE_INFORMATION_ELEMENT:
+ case WME_OUI_SUBTYPE_PARAMETER_ELEMENT:
+ elems->wme = pos;
+ elems->wme_len = elen;
+ break;
+ case WME_OUI_SUBTYPE_TSPEC_ELEMENT:
+ elems->wme_tspec = pos;
+ elems->wme_tspec_len = elen;
+ break;
+ default:
+ DBG_8723A("unknown WME "
+ "information element ignored "
+ "(subtype =%d len =%lu)\n",
+ pos[4], (unsigned long) elen);
+ return -1;
+ }
+ break;
+ case 4:
+ /* Wi-Fi Protected Setup (WPS) IE */
+ elems->wps_ie = pos;
+ elems->wps_ie_len = elen;
+ break;
+ default:
+ DBG_8723A("Unknown Microsoft "
+ "information element ignored "
+ "(type =%d len =%lu)\n",
+ pos[3], (unsigned long) elen);
+ return -1;
+ }
+ break;
+
+ case OUI_BROADCOM:
+ switch (pos[3]) {
+ case VENDOR_HT_CAPAB_OUI_TYPE:
+ elems->vendor_ht_cap = pos;
+ elems->vendor_ht_cap_len = elen;
+ break;
+ default:
+ DBG_8723A("Unknown Broadcom "
+ "information element ignored "
+ "(type =%d len =%lu)\n",
+ pos[3], (unsigned long) elen);
+ return -1;
+ }
+ break;
+
+ default:
+ DBG_8723A("unknown vendor specific information "
+ "element ignored (vendor OUI %02x:%02x:%02x "
+ "len =%lu)\n",
+ pos[0], pos[1], pos[2], (unsigned long) elen);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * ieee802_11_parse_elems - Parse information elements in management frames
+ * @start: Pointer to the start of IEs
+ * @len: Length of IE buffer in octets
+ * @elems: Data structure for parsed elements
+ * @show_errors: Whether to show parsing errors in debug log
+ * Returns: Parsing result
+ */
+enum parse_res rtw_ieee802_11_parse_elems23a(u8 *start, uint len,
+ struct rtw_ieee802_11_elems *elems,
+ int show_errors)
+{
+ uint left = len;
+ u8 *pos = start;
+ int unknown = 0;
+
+ memset(elems, 0, sizeof(*elems));
+
+ while (left >= 2) {
+ u8 id, elen;
+
+ id = *pos++;
+ elen = *pos++;
+ left -= 2;
+
+ if (elen > left) {
+ if (show_errors) {
+ DBG_8723A("IEEE 802.11 element "
+ "parse failed (id =%d elen =%d "
+ "left =%lu)\n",
+ id, elen, (unsigned long) left);
+ }
+ return ParseFailed;
+ }
+
+ switch (id) {
+ case WLAN_EID_SSID:
+ elems->ssid = pos;
+ elems->ssid_len = elen;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ elems->supp_rates = pos;
+ elems->supp_rates_len = elen;
+ break;
+ case WLAN_EID_FH_PARAMS:
+ elems->fh_params = pos;
+ elems->fh_params_len = elen;
+ break;
+ case WLAN_EID_DS_PARAMS:
+ elems->ds_params = pos;
+ elems->ds_params_len = elen;
+ break;
+ case WLAN_EID_CF_PARAMS:
+ elems->cf_params = pos;
+ elems->cf_params_len = elen;
+ break;
+ case WLAN_EID_TIM:
+ elems->tim = pos;
+ elems->tim_len = elen;
+ break;
+ case WLAN_EID_IBSS_PARAMS:
+ elems->ibss_params = pos;
+ elems->ibss_params_len = elen;
+ break;
+ case WLAN_EID_CHALLENGE:
+ elems->challenge = pos;
+ elems->challenge_len = elen;
+ break;
+ case WLAN_EID_ERP_INFO:
+ elems->erp_info = pos;
+ elems->erp_info_len = elen;
+ break;
+ case WLAN_EID_EXT_SUPP_RATES:
+ elems->ext_supp_rates = pos;
+ elems->ext_supp_rates_len = elen;
+ break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (rtw_ieee802_11_parse_vendor_specific(pos, elen,
+ elems,
+ show_errors))
+ unknown++;
+ break;
+ case WLAN_EID_RSN:
+ elems->rsn_ie = pos;
+ elems->rsn_ie_len = elen;
+ break;
+ case WLAN_EID_PWR_CAPABILITY:
+ elems->power_cap = pos;
+ elems->power_cap_len = elen;
+ break;
+ case WLAN_EID_SUPPORTED_CHANNELS:
+ elems->supp_channels = pos;
+ elems->supp_channels_len = elen;
+ break;
+ case WLAN_EID_MOBILITY_DOMAIN:
+ elems->mdie = pos;
+ elems->mdie_len = elen;
+ break;
+ case WLAN_EID_FAST_BSS_TRANSITION:
+ elems->ftie = pos;
+ elems->ftie_len = elen;
+ break;
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ elems->timeout_int = pos;
+ elems->timeout_int_len = elen;
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ elems->ht_capabilities = pos;
+ elems->ht_capabilities_len = elen;
+ break;
+ case WLAN_EID_HT_OPERATION:
+ elems->ht_operation = pos;
+ elems->ht_operation_len = elen;
+ break;
+ default:
+ unknown++;
+ if (!show_errors)
+ break;
+ DBG_8723A("IEEE 802.11 element parse "
+ "ignored unknown element (id =%d elen =%d)\n",
+ id, elen);
+ break;
+ }
+
+ left -= elen;
+ pos += elen;
+ }
+
+ if (left)
+ return ParseFailed;
+
+ return unknown ? ParseUnknown : ParseOK;
+}
+
+static u8 key_char2num(u8 ch)
+{
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ else if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ else if ((ch >= 'A') && (ch <= 'F'))
+ return ch - 'A' + 10;
+ else
+ return 0xff;
+}
+
+u8 str_2char2num23a(u8 hch, u8 lch)
+{
+ return (key_char2num(hch) * 10) + key_char2num(lch);
+}
+
+u8 key_2char2num23a(u8 hch, u8 lch)
+{
+ return (key_char2num(hch) << 4) | key_char2num(lch);
+}
+
+void rtw_macaddr_cfg23a(u8 *mac_addr)
+{
+ u8 mac[ETH_ALEN];
+ if (!mac_addr)
+ return;
+
+ memcpy(mac, mac_addr, ETH_ALEN);
+
+ if (is_broadcast_ether_addr(mac) || is_zero_ether_addr(mac)) {
+ mac[0] = 0x00;
+ mac[1] = 0xe0;
+ mac[2] = 0x4c;
+ mac[3] = 0x87;
+ mac[4] = 0x00;
+ mac[5] = 0x00;
+ /* use default mac addresss */
+ memcpy(mac_addr, mac, ETH_ALEN);
+ DBG_8723A("MAC Address from efuse error, assign default "
+ "one !!!\n");
+ }
+ DBG_8723A("rtw_macaddr_cfg23a MAC Address = "MAC_FMT"\n",
+ MAC_ARG(mac_addr));
+}
+
+void dump_ies23a(u8 *buf, u32 buf_len) {
+ u8* pos = (u8*)buf;
+ u8 id, len;
+
+ while (pos-buf <= buf_len) {
+ id = *pos;
+ len = *(pos + 1);
+
+ DBG_8723A("%s ID:%u, LEN:%u\n", __func__, id, len);
+#ifdef CONFIG_8723AU_P2P
+ dump_p2p_ie23a(pos, len);
+#endif
+ dump_wps_ie23a(pos, len);
+
+ pos += (2 + len);
+ }
+}
+
+void dump_wps_ie23a(u8 *ie, u32 ie_len) {
+ u8* pos = (u8*)ie;
+ u16 id;
+ u16 len;
+
+ u8 *wps_ie;
+ uint wps_ielen;
+
+ wps_ie = rtw_get_wps_ie23a(ie, ie_len, NULL, &wps_ielen);
+ if (wps_ie != ie || wps_ielen == 0)
+ return;
+
+ pos+= 6;
+ while (pos-ie < ie_len) {
+ id = get_unaligned_be16(pos);
+ len = get_unaligned_be16(pos + 2);
+
+ DBG_8723A("%s ID:0x%04x, LEN:%u\n", __func__, id, len);
+
+ pos += (4 + len);
+ }
+}
+
+#ifdef CONFIG_8723AU_P2P
+void dump_p2p_ie23a(u8 *ie, u32 ie_len) {
+ u8* pos = (u8*)ie;
+ u8 id;
+ u16 len;
+
+ u8 *p2p_ie;
+ uint p2p_ielen;
+
+ p2p_ie = rtw_get_p2p_ie23a(ie, ie_len, NULL, &p2p_ielen);
+ if (p2p_ie != ie || p2p_ielen == 0)
+ return;
+
+ pos += 6;
+ while (pos-ie < ie_len) {
+ id = *pos;
+ len = get_unaligned_le16(pos+1);
+
+ DBG_8723A("%s ID:%u, LEN:%u\n", __func__, id, len);
+
+ pos+= (3+len);
+ }
+}
+
+/**
+ * rtw_get_p2p_ie23a - Search P2P IE from a series of IEs
+ * @in_ie: Address of IEs to search
+ * @in_len: Length limit from in_ie
+ * @p2p_ie: If not NULL and P2P IE is found, P2P IE will be copied to the
+ * buf starting from p2p_ie
+ * @p2p_ielen: If not NULL and P2P IE is found, will set to the length of
+ * the entire P2P IE
+ *
+ * Returns: The address of the P2P IE found, or NULL
+ */
+u8 *rtw_get_p2p_ie23a(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen)
+{
+ uint cnt = 0;
+ u8 *p2p_ie_ptr;
+ u8 eid, p2p_oui[4]={0x50, 0x6F, 0x9A, 0x09};
+
+ if (p2p_ielen)
+ *p2p_ielen = 0;
+
+ while (cnt<in_len) {
+ eid = in_ie[cnt];
+ if ((in_len < 0) || (cnt > MAX_IE_SZ)) {
+ dump_stack();
+ return NULL;
+ }
+ if ((eid == _VENDOR_SPECIFIC_IE_) &&
+ !memcmp(&in_ie[cnt + 2], p2p_oui, 4)) {
+ p2p_ie_ptr = in_ie + cnt;
+
+ if (p2p_ie != NULL) {
+ memcpy(p2p_ie, &in_ie[cnt],
+ in_ie[cnt + 1] + 2);
+ }
+
+ if (p2p_ielen != NULL) {
+ *p2p_ielen = in_ie[cnt + 1] + 2;
+ }
+
+ return p2p_ie_ptr;
+
+ break;
+ } else {
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * rtw_get_p2p_attr23a - Search a specific P2P attribute from a given P2P IE
+ * @p2p_ie: Address of P2P IE to search
+ * @p2p_ielen: Length limit from p2p_ie
+ * @target_attr_id: The attribute ID of P2P attribute to search
+ * @buf_attr: If not NULL and the P2P attribute is found, P2P attribute will
+ * be copied to the buf starting from buf_attr
+ * @len_attr: If not NULL and the P2P attribute is found, will set to the
+ * length of the entire P2P attribute
+ *
+ * Returns: the address of the specific WPS attribute found, or NULL
+ */
+u8 *rtw_get_p2p_attr23a(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id,
+ u8 *buf_attr, u32 *len_attr)
+{
+ u8 *attr_ptr = NULL;
+ u8 *target_attr_ptr = NULL;
+ u8 p2p_oui[4]={0x50, 0x6F, 0x9A, 0x09};
+
+ if (len_attr)
+ *len_attr = 0;
+
+ if (!p2p_ie || (p2p_ie[0] != _VENDOR_SPECIFIC_IE_) ||
+ memcmp(p2p_ie + 2, p2p_oui, 4)) {
+ return attr_ptr;
+ }
+
+ /* 6 = 1(Element ID) + 1(Length) + 3 (OUI) + 1(OUI Type) */
+ attr_ptr = p2p_ie + 6; /* goto first attr */
+
+ while (attr_ptr - p2p_ie < p2p_ielen) {
+ /* 3 = 1(Attribute ID) + 2(Length) */
+ u8 attr_id = *attr_ptr;
+ u16 attr_data_len = get_unaligned_le16(attr_ptr + 1);
+ u16 attr_len = attr_data_len + 3;
+
+ /* DBG_8723A("%s attr_ptr:%p, id:%u, length:%u\n", __func__, attr_ptr, attr_id, attr_data_len); */
+ if (attr_id == target_attr_id) {
+ target_attr_ptr = attr_ptr;
+
+ if (buf_attr)
+ memcpy(buf_attr, attr_ptr, attr_len);
+
+ if (len_attr)
+ *len_attr = attr_len;
+
+ break;
+ } else {
+ attr_ptr += attr_len; /* goto next */
+ }
+ }
+
+ return target_attr_ptr;
+}
+
+/**
+ * rtw_get_p2p_attr23a_content - Search a specific P2P attribute content from
+ * a given P2P IE
+ * @p2p_ie: Address of P2P IE to search
+ * @p2p_ielen: Length limit from p2p_ie
+ * @target_attr_id: The attribute ID of P2P attribute to search
+ * @buf_content: If not NULL and the P2P attribute is found, P2P attribute
+ * content will be copied to the buf starting from buf_content
+ * @len_content: If not NULL and the P2P attribute is found, will set to the
+ * length of the P2P attribute content
+ *
+ * Returns: the address of the specific P2P attribute content found, or NULL
+ */
+u8 *rtw_get_p2p_attr23a_content(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id,
+ u8 *buf_content, uint *len_content)
+{
+ u8 *attr_ptr;
+ u32 attr_len;
+
+ if (len_content)
+ *len_content = 0;
+
+ attr_ptr = rtw_get_p2p_attr23a(p2p_ie, p2p_ielen, target_attr_id,
+ NULL, &attr_len);
+
+ if (attr_ptr && attr_len) {
+ if (buf_content)
+ memcpy(buf_content, attr_ptr + 3, attr_len - 3);
+
+ if (len_content)
+ *len_content = attr_len - 3;
+
+ return attr_ptr+3;
+ }
+
+ return NULL;
+}
+
+u32 rtw_set_p2p_attr_content23a(u8 *pbuf, u8 attr_id, u16 attr_len, u8 *pdata_attr)
+{
+ u32 a_len;
+
+ *pbuf = attr_id;
+
+ /* u16*)(pbuf + 1) = cpu_to_le16(attr_len); */
+ put_unaligned_le16(attr_len, pbuf + 1);
+
+ if (pdata_attr)
+ memcpy(pbuf + 3, pdata_attr, attr_len);
+
+ a_len = attr_len + 3;
+
+ return a_len;
+}
+
+static uint rtw_p2p_attr_remove(u8 *ie, uint ielen_ori, u8 attr_id)
+{
+ u8 *target_attr;
+ u32 target_attr_len;
+ uint ielen = ielen_ori;
+
+ while(1) {
+ target_attr = rtw_get_p2p_attr23a(ie, ielen, attr_id, NULL,
+ &target_attr_len);
+ if (target_attr && target_attr_len) {
+ u8 *next_attr = target_attr+target_attr_len;
+ uint remain_len = ielen-(next_attr-ie);
+ /* dump_ies23a(ie, ielen); */
+
+ memset(target_attr, 0, target_attr_len);
+ memcpy(target_attr, next_attr, remain_len);
+ memset(target_attr+remain_len, 0, target_attr_len);
+ *(ie + 1) -= target_attr_len;
+ ielen -= target_attr_len;
+ } else {
+ /* if (index>0) */
+ /* dump_ies23a(ie, ielen); */
+ break;
+ }
+ }
+
+ return ielen;
+}
+
+void rtw_wlan_bssid_ex_remove_p2p_attr23a(struct wlan_bssid_ex *bss_ex, u8 attr_id)
+{
+ u8 *p2p_ie;
+ uint p2p_ielen, p2p_ielen_ori;
+
+ if ((p2p_ie = rtw_get_p2p_ie23a(bss_ex->IEs + _FIXED_IE_LENGTH_,
+ bss_ex->IELength - _FIXED_IE_LENGTH_,
+ NULL, &p2p_ielen_ori))) {
+ p2p_ielen = rtw_p2p_attr_remove(p2p_ie, p2p_ielen_ori, attr_id);
+ if (p2p_ielen != p2p_ielen_ori) {
+ u8 *next_ie_ori = p2p_ie+p2p_ielen_ori;
+ u8 *next_ie = p2p_ie+p2p_ielen;
+ uint remain_len;
+ remain_len = bss_ex->IELength-(next_ie_ori-bss_ex->IEs);
+
+ memcpy(next_ie, next_ie_ori, remain_len);
+ memset(next_ie+remain_len, 0, p2p_ielen_ori-p2p_ielen);
+ bss_ex->IELength -= p2p_ielen_ori-p2p_ielen;
+ }
+ }
+}
+
+#endif /* CONFIG_8723AU_P2P */
+
+#ifdef CONFIG_8723AU_P2P
+int rtw_get_wfd_ie(u8 *in_ie, int in_len, u8 *wfd_ie, uint *wfd_ielen)
+{
+ int match;
+ uint cnt = 0;
+ u8 eid, wfd_oui[4] = {0x50, 0x6F, 0x9A, 0x0A};
+
+ match = false;
+
+ if (in_len < 0) {
+ return match;
+ }
+
+ while (cnt < in_len)
+ {
+ eid = in_ie[cnt];
+
+ if ((eid == _VENDOR_SPECIFIC_IE_) &&
+ !memcmp(&in_ie[cnt+2], wfd_oui, 4)) {
+ if (wfd_ie != NULL) {
+ memcpy(wfd_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
+
+ } else {
+ if (wfd_ielen != NULL) {
+ *wfd_ielen = 0;
+ }
+ }
+
+ if (wfd_ielen != NULL) {
+ *wfd_ielen = in_ie[cnt + 1] + 2;
+ }
+
+ cnt += in_ie[cnt + 1] + 2;
+
+ match = true;
+ break;
+ } else {
+ cnt += in_ie[cnt + 1] +2; /* goto next */
+ }
+ }
+
+ if (match == true) {
+ match = cnt;
+ }
+
+ return match;
+}
+
+/* attr_content: The output buffer, contains the "body field" of
+ WFD attribute. */
+/* attr_contentlen: The data length of the "body field" of WFD
+ attribute. */
+int rtw_get_wfd_attr_content(u8 *wfd_ie, uint wfd_ielen, u8 target_attr_id,
+ u8 *attr_content, uint *attr_contentlen)
+{
+ int match;
+ uint cnt = 0;
+ u8 attr_id, wfd_oui[4] = {0x50, 0x6F, 0x9A, 0x0A};
+
+ match = false;
+
+ if ((wfd_ie[0] != _VENDOR_SPECIFIC_IE_) ||
+ memcmp(wfd_ie + 2, wfd_oui, 4)) {
+ return match;
+ }
+
+ /* 1 (WFD IE) + 1 (Length) + 3 (OUI) + 1 (OUI Type) */
+ cnt = 6;
+ while (cnt < wfd_ielen) {
+ u16 attrlen = get_unaligned_be16(wfd_ie + cnt + 1);
+
+ attr_id = wfd_ie[cnt];
+ if (attr_id == target_attr_id) {
+ /* 3 -> 1 byte for attribute ID field, 2
+ bytes for length field */
+ if (attr_content)
+ memcpy(attr_content, &wfd_ie[cnt + 3], attrlen);
+
+ if (attr_contentlen)
+ *attr_contentlen = attrlen;
+
+ cnt += attrlen + 3;
+
+ match = true;
+ break;
+ } else {
+ cnt += attrlen + 3; /* goto next */
+ }
+ }
+
+ return match;
+}
+#endif /* CONFIG_8723AU_P2P */
+
+/* Baron adds to avoid FreeBSD warning */
+int ieee80211_is_empty_essid23a(const char *essid, int essid_len)
+{
+ /* Single white space is for Linksys APs */
+ if (essid_len == 1 && essid[0] == ' ')
+ return 1;
+
+ /* Otherwise, if the entire essid is 0, we assume it is hidden */
+ while (essid_len) {
+ essid_len--;
+ if (essid[essid_len] != '\0')
+ return 0;
+ }
+
+ return 1;
+}
+
+static int rtw_get_cipher_info(struct wlan_network *pnetwork)
+{
+ u32 wpa_ielen;
+ unsigned char *pbuf;
+ int group_cipher = 0, pairwise_cipher = 0, is8021x = 0;
+ int ret = _FAIL;
+ int r;
+ pbuf = rtw_get_wpa_ie23a(&pnetwork->network.IEs[12], &wpa_ielen,
+ pnetwork->network.IELength - 12);
+
+ if (pbuf && (wpa_ielen > 0)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("rtw_get_cipher_info: wpa_ielen: %d", wpa_ielen));
+ r = rtw_parse_wpa_ie23a(pbuf, wpa_ielen + 2, &group_cipher,
+ &pairwise_cipher, &is8021x);
+ if (r == _SUCCESS) {
+ pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
+ pnetwork->BcnInfo.group_cipher = group_cipher;
+ pnetwork->BcnInfo.is_8021x = is8021x;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s: pnetwork->pairwise_cipher: %d, is_"
+ "8021x is %d", __func__,
+ pnetwork->BcnInfo.pairwise_cipher,
+ pnetwork->BcnInfo.is_8021x));
+ ret = _SUCCESS;
+ }
+ } else {
+ pbuf = rtw_get_wpa2_ie23a(&pnetwork->network.IEs[12], &wpa_ielen,
+ pnetwork->network.IELength - 12);
+
+ if (pbuf && (wpa_ielen > 0)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("get RSN IE\n"));
+ r = rtw_parse_wpa2_ie23a(pbuf, wpa_ielen + 2,
+ &group_cipher, &pairwise_cipher,
+ &is8021x);
+ if (r == _SUCCESS) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("get RSN IE OK!!!\n"));
+ pnetwork->BcnInfo.pairwise_cipher =
+ pairwise_cipher;
+ pnetwork->BcnInfo.group_cipher = group_cipher;
+ pnetwork->BcnInfo.is_8021x = is8021x;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s: pnetwork->pairwise_cipher: %d,"
+ "pnetwork->group_cipher is %d, "
+ "is_8021x is %d", __func__,
+ pnetwork->BcnInfo.pairwise_cipher,
+ pnetwork->BcnInfo.group_cipher,
+ pnetwork->BcnInfo.is_8021x));
+ ret = _SUCCESS;
+ }
+ }
+ }
+
+ return ret;
+}
+
+void rtw_get_bcn_info23a(struct wlan_network *pnetwork)
+{
+ unsigned short cap = 0;
+ u8 bencrypt = 0;
+ /* u8 wpa_ie[255], rsn_ie[255]; */
+ u16 wpa_len = 0, rsn_len = 0;
+ struct HT_info_element *pht_info = NULL;
+ struct ieee80211_ht_cap *pht_cap = NULL;
+ unsigned int len;
+ unsigned char *p;
+
+ memcpy(&cap, rtw_get_capability23a_from_ie(pnetwork->network.IEs), 2);
+ cap = le16_to_cpu(cap);
+ if (cap & WLAN_CAPABILITY_PRIVACY) {
+ bencrypt = 1;
+ pnetwork->network.Privacy = 1;
+ } else {
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_OPENSYS;
+ }
+ rtw_get_sec_ie23a(pnetwork->network.IEs, pnetwork->network.IELength,
+ NULL, &rsn_len, NULL, &wpa_len);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("rtw_get_bcn_info23a: ssid =%s\n", pnetwork->network.Ssid.ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("rtw_get_bcn_info23a: wpa_len =%d rsn_len =%d\n",
+ wpa_len, rsn_len));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("rtw_get_bcn_info23a: ssid =%s\n", pnetwork->network.Ssid.ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("rtw_get_bcn_info23a: wpa_len =%d rsn_len =%d\n",
+ wpa_len, rsn_len));
+
+ if (rsn_len > 0) {
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA2;
+ } else if (wpa_len > 0) {
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA;
+ } else {
+ if (bencrypt)
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WEP;
+ }
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("rtw_get_bcn_info23a: pnetwork->encryp_protocol is %x\n",
+ pnetwork->BcnInfo.encryp_protocol));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("rtw_get_bcn_info23a: pnetwork->encryp_protocol is %x\n",
+ pnetwork->BcnInfo.encryp_protocol));
+ rtw_get_cipher_info(pnetwork);
+
+ /* get bwmode and ch_offset */
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie23a(pnetwork->network.IEs + _FIXED_IE_LENGTH_,
+ _HT_CAPABILITY_IE_, &len,
+ pnetwork->network.IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_cap = (struct ieee80211_ht_cap *)(p + 2);
+ pnetwork->BcnInfo.ht_cap_info = pht_cap->cap_info;
+ } else {
+ pnetwork->BcnInfo.ht_cap_info = 0;
+ }
+ /* parsing HT_INFO_IE */
+ p = rtw_get_ie23a(pnetwork->network.IEs + _FIXED_IE_LENGTH_,
+ _HT_ADD_INFO_IE_, &len,
+ pnetwork->network.IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_info = (struct HT_info_element *)(p + 2);
+ pnetwork->BcnInfo.ht_info_infos_0 = pht_info->infos[0];
+ } else {
+ pnetwork->BcnInfo.ht_info_infos_0 = 0;
+ }
+}
+
+/* show MCS rate, unit: 100Kbps */
+u16 rtw_mcs_rate23a(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40,
+ unsigned char * MCS_rate)
+{
+ u16 max_rate = 0;
+
+ if (rf_type == RF_1T1R) {
+ if (MCS_rate[0] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?1500:1350):
+ ((short_GI_20)?722:650);
+ else if (MCS_rate[0] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?1350:1215):
+ ((short_GI_20)?650:585);
+ else if (MCS_rate[0] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?1200:1080):
+ ((short_GI_20)?578:520);
+ else if (MCS_rate[0] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?900:810):
+ ((short_GI_20)?433:390);
+ else if (MCS_rate[0] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?600:540):
+ ((short_GI_20)?289:260);
+ else if (MCS_rate[0] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?450:405):
+ ((short_GI_20)?217:195);
+ else if (MCS_rate[0] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?300:270):
+ ((short_GI_20)?144:130);
+ else if (MCS_rate[0] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?150:135):
+ ((short_GI_20)?72:65);
+ } else {
+ if (MCS_rate[1]) {
+ if (MCS_rate[1] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?3000:2700):((short_GI_20)?1444:1300);
+ else if (MCS_rate[1] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?2700:2430):((short_GI_20)?1300:1170);
+ else if (MCS_rate[1] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?2400:2160):((short_GI_20)?1156:1040);
+ else if (MCS_rate[1] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?1800:1620):((short_GI_20)?867:780);
+ else if (MCS_rate[1] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?1200:1080):((short_GI_20)?578:520);
+ else if (MCS_rate[1] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?900:810):((short_GI_20)?433:390);
+ else if (MCS_rate[1] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?600:540):((short_GI_20)?289:260);
+ else if (MCS_rate[1] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?300:270):((short_GI_20)?144:130);
+ } else {
+ if (MCS_rate[0] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?1500:1350):((short_GI_20)?722:650);
+ else if (MCS_rate[0] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?1350:1215):((short_GI_20)?650:585);
+ else if (MCS_rate[0] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?1200:1080):((short_GI_20)?578:520);
+ else if (MCS_rate[0] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?900:810):((short_GI_20)?433:390);
+ else if (MCS_rate[0] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?600:540):((short_GI_20)?289:260);
+ else if (MCS_rate[0] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?450:405):((short_GI_20)?217:195);
+ else if (MCS_rate[0] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?300:270):((short_GI_20)?144:130);
+ else if (MCS_rate[0] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI_40)?150:135):((short_GI_20)?72:65);
+ }
+ }
+ return max_rate;
+}
+
+int rtw_action_frame_parse23a(const u8 *frame, u32 frame_len, u8* category,
+ u8 *action)
+{
+ const u8 *frame_body = frame + sizeof(struct ieee80211_hdr_3addr);
+ u16 fc;
+ u8 c, a = 0;
+
+ fc = le16_to_cpu(((struct ieee80211_hdr_3addr *)frame)->frame_control);
+
+ if ((fc & (IEEE80211_FCTL_FTYPE|IEEE80211_FCTL_STYPE)) !=
+ (IEEE80211_FTYPE_MGMT|IEEE80211_STYPE_ACTION)) {
+ return false;
+ }
+
+ c = frame_body[0];
+
+ switch (c) {
+ case WLAN_CATEGORY_VENDOR_SPECIFIC: /* vendor-specific */
+ break;
+ default:
+ a = frame_body[1];
+ }
+
+ if (category)
+ *category = c;
+ if (action)
+ *action = a;
+
+ return true;
+}
+
+static const char *_action_public_str23a[] = {
+ "ACT_PUB_BSSCOEXIST",
+ "ACT_PUB_DSE_ENABLE",
+ "ACT_PUB_DSE_DEENABLE",
+ "ACT_PUB_DSE_REG_LOCATION",
+ "ACT_PUB_EXT_CHL_SWITCH",
+ "ACT_PUB_DSE_MSR_REQ",
+ "ACT_PUB_DSE_MSR_RPRT",
+ "ACT_PUB_MP",
+ "ACT_PUB_DSE_PWR_CONSTRAINT",
+ "ACT_PUB_VENDOR",
+ "ACT_PUB_GAS_INITIAL_REQ",
+ "ACT_PUB_GAS_INITIAL_RSP",
+ "ACT_PUB_GAS_COMEBACK_REQ",
+ "ACT_PUB_GAS_COMEBACK_RSP",
+ "ACT_PUB_TDLS_DISCOVERY_RSP",
+ "ACT_PUB_LOCATION_TRACK",
+ "ACT_PUB_RSVD",
+};
+
+const char *action_public_str23a(u8 action)
+{
+ action = (action >= ACT_PUBLIC_MAX) ? ACT_PUBLIC_MAX : action;
+ return _action_public_str23a[action];
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_io.c b/drivers/staging/rtl8723au/core/rtw_io.c
new file mode 100644
index 000000000000..1cae8d7659b9
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_io.c
@@ -0,0 +1,266 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/*
+
+The purpose of rtw_io.c
+
+a. provides the API
+
+b. provides the protocol engine
+
+c. provides the software interface between caller and the hardware interface
+
+Compiler Flag Option:
+
+1. For USB:
+ a. USE_ASYNC_IRP: Both sync/async operations are provided.
+
+Only sync read/rtw_write_mem operations are provided.
+
+jackson@realtek.com.tw
+
+*/
+
+#define _RTW_IO_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_io.h>
+#include <osdep_intf.h>
+
+#include <usb_ops.h>
+
+u8 _rtw_read823a(struct rtw_adapter *adapter, u32 addr)
+{
+ u8 r_val;
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+
+ r_val = pintfhdl->io_ops._read8(pintfhdl, addr);
+
+ return r_val;
+}
+
+u16 _rtw_read1623a(struct rtw_adapter *adapter, u32 addr)
+{
+ u16 r_val;
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+
+ r_val = pintfhdl->io_ops._read16(pintfhdl, addr);
+
+ return le16_to_cpu(r_val);
+}
+
+u32 _rtw_read3223a(struct rtw_adapter *adapter, u32 addr)
+{
+ u32 r_val;
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+
+ r_val = pintfhdl->io_ops._read32(pintfhdl, addr);
+
+ return le32_to_cpu(r_val);
+}
+
+int _rtw_write823a(struct rtw_adapter *adapter, u32 addr, u8 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+ int ret;
+
+ ret = pintfhdl->io_ops._write8(pintfhdl, addr, val);
+
+ return RTW_STATUS_CODE23a(ret);
+}
+
+int _rtw_write1623a(struct rtw_adapter *adapter, u32 addr, u16 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+ int ret;
+
+ val = cpu_to_le16(val);
+ ret = pintfhdl->io_ops._write16(pintfhdl, addr, val);
+
+ return RTW_STATUS_CODE23a(ret);
+}
+int _rtw_write3223a(struct rtw_adapter *adapter, u32 addr, u32 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+ int ret;
+
+ val = cpu_to_le32(val);
+ ret = pintfhdl->io_ops._write32(pintfhdl, addr, val);
+
+ return RTW_STATUS_CODE23a(ret);
+}
+
+int _rtw_writeN23a(struct rtw_adapter *adapter, u32 addr , u32 length , u8 *pdata)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = (struct intf_hdl*)&pio_priv->intf;
+ int ret;
+
+ ret = pintfhdl->io_ops._writeN(pintfhdl, addr, length, pdata);
+
+ return RTW_STATUS_CODE23a(ret);
+}
+int _rtw_write823a_async23a(struct rtw_adapter *adapter, u32 addr, u8 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+ int ret;
+
+ ret = pintfhdl->io_ops._write8_async(pintfhdl, addr, val);
+
+ return RTW_STATUS_CODE23a(ret);
+}
+int _rtw_write1623a_async(struct rtw_adapter *adapter, u32 addr, u16 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+ int ret;
+
+ val = cpu_to_le16(val);
+ ret = pintfhdl->io_ops._write16_async(pintfhdl, addr, val);
+
+ return RTW_STATUS_CODE23a(ret);
+}
+int _rtw_write3223a_async23a(struct rtw_adapter *adapter, u32 addr, u32 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+ int ret;
+
+ val = cpu_to_le32(val);
+ ret = pintfhdl->io_ops._write32_async(pintfhdl, addr, val);
+
+ return RTW_STATUS_CODE23a(ret);
+}
+
+void _rtw_read_mem23a(struct rtw_adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+
+ if ((adapter->bDriverStopped == true) ||
+ (adapter->bSurpriseRemoved == true)) {
+ RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
+ ("rtw_read_mem:bDriverStopped(%d) OR "
+ "bSurpriseRemoved(%d)", adapter->bDriverStopped,
+ adapter->bSurpriseRemoved));
+ return;
+ }
+
+ pintfhdl->io_ops._read_mem(pintfhdl, addr, cnt, pmem);
+}
+
+void _rtw_write_mem23a(struct rtw_adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+
+ pintfhdl->io_ops._write_mem(pintfhdl, addr, cnt, pmem);
+}
+
+void _rtw_read_port23a(struct rtw_adapter *adapter, u32 addr, u32 cnt,
+ struct recv_buf *rbuf)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+
+ if ((adapter->bDriverStopped == true) ||
+ (adapter->bSurpriseRemoved == true)) {
+ RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
+ ("rtw_read_port:bDriverStopped(%d) OR "
+ "bSurpriseRemoved(%d)", adapter->bDriverStopped,
+ adapter->bSurpriseRemoved));
+ return;
+ }
+
+ pintfhdl->io_ops._read_port(pintfhdl, addr, cnt, rbuf);
+}
+
+void _rtw_read_port23a_cancel(struct rtw_adapter *adapter)
+{
+ void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+
+ _read_port_cancel = pintfhdl->io_ops._read_port_cancel;
+
+ if (_read_port_cancel)
+ _read_port_cancel(pintfhdl);
+}
+
+u32 _rtw_write_port23a(struct rtw_adapter *adapter, u32 addr, u32 cnt,
+ struct xmit_buf *xbuf)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+ u32 ret = _SUCCESS;
+
+ ret = pintfhdl->io_ops._write_port(pintfhdl, addr, cnt, xbuf);
+
+ return ret;
+}
+
+u32 _rtw_write_port23a_and_wait23a(struct rtw_adapter *adapter, u32 addr, u32 cnt,
+ struct xmit_buf *pxmitbuf, int timeout_ms)
+{
+ int ret = _SUCCESS;
+ struct submit_ctx sctx;
+
+ rtw_sctx_init23a(&sctx, timeout_ms);
+ pxmitbuf->sctx = &sctx;
+
+ ret = _rtw_write_port23a(adapter, addr, cnt, pxmitbuf);
+
+ if (ret == _SUCCESS)
+ ret = rtw_sctx_wait23a(&sctx);
+
+ return ret;
+}
+
+void _rtw_write_port23a_cancel(struct rtw_adapter *adapter)
+{
+ void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &pio_priv->intf;
+
+ _write_port_cancel = pintfhdl->io_ops._write_port_cancel;
+
+ if (_write_port_cancel)
+ _write_port_cancel(pintfhdl);
+}
+
+int rtw_init_io_priv23a(struct rtw_adapter *padapter,
+ void (*set_intf_ops)(struct _io_ops *pops))
+{
+ struct io_priv *piopriv = &padapter->iopriv;
+ struct intf_hdl *pintf = &piopriv->intf;
+
+ if (set_intf_ops == NULL)
+ return _FAIL;
+
+ piopriv->padapter = padapter;
+ pintf->padapter = padapter;
+ pintf->pintf_dev = adapter_to_dvobj(padapter);
+
+ set_intf_ops(&pintf->io_ops);
+
+ return _SUCCESS;
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_ioctl_set.c b/drivers/staging/rtl8723au/core/rtw_ioctl_set.c
new file mode 100644
index 000000000000..30d7185e5637
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_ioctl_set.c
@@ -0,0 +1,601 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_IOCTL_SET_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_ioctl_set.h>
+#include <hal_intf.h>
+
+#include <usb_osintf.h>
+#include <usb_ops.h>
+#include <linux/ieee80211.h>
+
+u8 rtw_do_join23a(struct rtw_adapter *padapter)
+{
+ struct list_head *plist, *phead;
+ u8* pibss = NULL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct rtw_queue *queue = &pmlmepriv->scanned_queue;
+ u8 ret = _SUCCESS;
+
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+ phead = get_list_head(queue);
+ plist = phead->next;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("\n rtw_do_join23a: phead = %p; plist = %p\n\n\n",
+ phead, plist));
+
+ pmlmepriv->cur_network.join_res = -2;
+
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+
+ pmlmepriv->to_join = true;
+
+ if (_rtw_queue_empty23a(queue) == true) {
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ /* when set_ssid/set_bssid for rtw_do_join23a(), but
+ scanning queue is empty */
+ /* we try to issue sitesurvey firstly */
+
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic == false ||
+ rtw_to_roaming(padapter) > 0) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_do_join23a(): site survey if scanned_queue "
+ "is empty\n."));
+ /* submit site_survey23a_cmd */
+ ret = rtw_sitesurvey_cmd23a(padapter,
+ &pmlmepriv->assoc_ssid, 1,
+ NULL, 0);
+ if (ret != _SUCCESS) {
+ pmlmepriv->to_join = false;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_do_join23a(): site survey return "
+ "error\n."));
+ }
+ } else {
+ pmlmepriv->to_join = false;
+ ret = _FAIL;
+ }
+
+ goto exit;
+ } else {
+ int select_ret;
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ select_ret = rtw_select_and_join_from_scanned_queue23a(pmlmepriv);
+ if (select_ret == _SUCCESS) {
+ pmlmepriv->to_join = false;
+ mod_timer(&pmlmepriv->assoc_timer,
+ jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT));
+ } else {
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) {
+ struct wlan_bssid_ex *pdev_network;
+ /* submit createbss_cmd to change to a
+ ADHOC_MASTER */
+
+ /* pmlmepriv->lock has been acquired by
+ caller... */
+ pdev_network =
+ &padapter->registrypriv.dev_network;
+
+ pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
+
+ pibss = padapter->registrypriv.dev_network.MacAddress;
+
+ memcpy(&pdev_network->Ssid,
+ &pmlmepriv->assoc_ssid,
+ sizeof(struct cfg80211_ssid));
+
+ rtw_update_registrypriv_dev_network23a(padapter);
+
+ rtw_generate_random_ibss23a(pibss);
+
+ if (rtw_createbss_cmd23a(padapter) != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_,
+ _drv_err_,
+ ("***Error =>do_goin: rtw_creat"
+ "ebss_cmd status FAIL***\n"));
+ ret = false;
+ goto exit;
+ }
+
+ pmlmepriv->to_join = false;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_,
+ _drv_info_,
+ ("***Error => rtw_select_and_join_from"
+ "_scanned_queue FAIL under STA_Mode"
+ "***\n "));
+ } else {
+ /* can't associate ; reset under-linking */
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ /* when set_ssid/set_bssid for rtw_do_join23a(),
+ but there are no desired bss in scanning
+ queue */
+ /* we try to issue sitesurvey firstly */
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic ==
+ false || rtw_to_roaming(padapter) > 0) {
+ /* DBG_8723A("rtw_do_join23a() when no "
+ "desired bss in scanning queue\n");
+ */
+ ret = rtw_sitesurvey_cmd23a(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
+ if (ret != _SUCCESS) {
+ pmlmepriv->to_join = false;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("do_join(): site survey return error\n."));
+ }
+ } else {
+ ret = _FAIL;
+ pmlmepriv->to_join = false;
+ }
+ }
+ }
+ }
+
+exit:
+
+ return ret;
+}
+
+u8 rtw_set_802_11_ssid23a(struct rtw_adapter* padapter, struct cfg80211_ssid *ssid)
+{
+ u8 status = _SUCCESS;
+ u32 cur_time = 0;
+
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *pnetwork = &pmlmepriv->cur_network;
+
+
+
+ DBG_8723A_LEVEL(_drv_always_, "set ssid [%s] fw_state = 0x%08x\n",
+ ssid->ssid, get_fwstate(pmlmepriv));
+
+ if (padapter->hw_init_completed == false) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("set_ssid: hw_init_completed == false =>exit!!!\n"));
+ status = _FAIL;
+ goto exit;
+ }
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ DBG_8723A("Set SSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ goto handle_tkip_countermeasure;
+ } else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
+ goto release_mlme_lock;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true)
+ {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("set_ssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
+
+ if ((pmlmepriv->assoc_ssid.ssid_len == ssid->ssid_len) &&
+ !memcmp(&pmlmepriv->assoc_ssid.ssid, ssid->ssid,
+ ssid->ssid_len)) {
+ if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false))
+ {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("Set SSID is the same ssid, fw_state = 0x%08x\n",
+ get_fwstate(pmlmepriv)));
+
+ if (rtw_is_same_ibss23a(padapter, pnetwork) == false)
+ {
+ /* if in WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE, create bss or rejoin again */
+ rtw_disassoc_cmd23a(padapter, 0, true);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect23a(padapter);
+
+ rtw_free_assoc_resources23a(padapter, 1);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+ } else {
+ goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */
+ }
+ } else {
+ rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_JOINBSS, 1);
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("Set SSID not the same ssid\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("set_ssid =[%s] len = 0x%x\n", ssid->ssid,
+ (unsigned int)ssid->ssid_len));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("assoc_ssid =[%s] len = 0x%x\n",
+ pmlmepriv->assoc_ssid.ssid,
+ (unsigned int)pmlmepriv->assoc_ssid.ssid_len));
+
+ rtw_disassoc_cmd23a(padapter, 0, true);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect23a(padapter);
+
+ rtw_free_assoc_resources23a(padapter, 1);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+ }
+ }
+
+handle_tkip_countermeasure:
+
+ if (padapter->securitypriv.btkip_countermeasure == true) {
+ cur_time = jiffies;
+
+ if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ)
+ {
+ padapter->securitypriv.btkip_countermeasure = false;
+ padapter->securitypriv.btkip_countermeasure_time = 0;
+ }
+ else
+ {
+ status = _FAIL;
+ goto release_mlme_lock;
+ }
+ }
+
+ memcpy(&pmlmepriv->assoc_ssid, ssid, sizeof(struct cfg80211_ssid));
+ pmlmepriv->assoc_by_bssid = false;
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ pmlmepriv->to_join = true;
+ }
+ else {
+ status = rtw_do_join23a(padapter);
+ }
+
+release_mlme_lock:
+ spin_unlock_bh(&pmlmepriv->lock);
+
+exit:
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("-rtw_set_802_11_ssid23a: status =%d\n", status));
+
+
+
+ return status;
+}
+
+u8 rtw_set_802_11_infrastructure_mode23a(struct rtw_adapter* padapter,
+ enum ndis_802_11_net_infra networktype)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ enum ndis_802_11_net_infra* pold_state = &cur_network->network.InfrastructureMode;
+
+
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_notice_,
+ ("+rtw_set_802_11_infrastructure_mode23a: old =%d new =%d fw_state = 0x%08x\n",
+ *pold_state, networktype, get_fwstate(pmlmepriv)));
+
+ if (*pold_state != networktype)
+ {
+ spin_lock_bh(&pmlmepriv->lock);
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, (" change mode!"));
+ /* DBG_8723A("change mode, old_mode =%d, new_mode =%d, fw_state = 0x%x\n", *pold_state, networktype, get_fwstate(pmlmepriv)); */
+
+ if (*pold_state == Ndis802_11APMode)
+ {
+ /* change to other mode from Ndis802_11APMode */
+ cur_network->join_res = -1;
+
+#ifdef CONFIG_8723AU_AP_MODE
+ stop_ap_mode23a(padapter);
+#endif
+ }
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) ||(*pold_state == Ndis802_11IBSS))
+ rtw_disassoc_cmd23a(padapter, 0, true);
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true))
+ rtw_free_assoc_resources23a(padapter, 1);
+
+ if ((*pold_state == Ndis802_11Infrastructure) ||(*pold_state == Ndis802_11IBSS))
+ {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ rtw_indicate_disconnect23a(padapter); /* will clr Linked_state; before this function, we must have chked whether issue dis-assoc_cmd or not */
+ }
+ }
+
+ *pold_state = networktype;
+
+ _clr_fwstate_(pmlmepriv, ~WIFI_NULL_STATE);
+
+ switch (networktype)
+ {
+ case Ndis802_11IBSS:
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ break;
+
+ case Ndis802_11Infrastructure:
+ set_fwstate(pmlmepriv, WIFI_STATION_STATE);
+ break;
+
+ case Ndis802_11APMode:
+ set_fwstate(pmlmepriv, WIFI_AP_STATE);
+#ifdef CONFIG_8723AU_AP_MODE
+ start_ap_mode23a(padapter);
+ /* rtw_indicate_connect23a(padapter); */
+#endif
+
+ break;
+
+ case Ndis802_11AutoUnknown:
+ case Ndis802_11InfrastructureMax:
+ break;
+ }
+
+ /* SecClearAllKeys(adapter); */
+
+ /* RT_TRACE(COMP_OID_SET, DBG_LOUD, ("set_infrastructure: fw_state:%x after changing mode\n", */
+ /* get_fwstate(pmlmepriv))); */
+
+ spin_unlock_bh(&pmlmepriv->lock);
+ }
+
+
+
+ return true;
+}
+
+u8 rtw_set_802_11_bssid23a_list_scan(struct rtw_adapter *padapter,
+ struct cfg80211_ssid *pssid, int ssid_max_num)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 res = true;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("+rtw_set_802_11_bssid23a_list_scan(), fw_state =%x\n",
+ get_fwstate(pmlmepriv)));
+
+ if (!padapter) {
+ res = false;
+ goto exit;
+ }
+ if (padapter->hw_init_completed == false) {
+ res = false;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("\n === rtw_set_802_11_bssid23a_list_scan:"
+ "hw_init_completed == false ===\n"));
+ goto exit;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING) ||
+ (pmlmepriv->LinkDetectInfo.bBusyTraffic == true)) {
+ /* Scan or linking is in progress, do nothing. */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_set_802_11_bssid23a_list_scan fail since fw_state "
+ "= %x\n", get_fwstate(pmlmepriv)));
+ res = true;
+
+ if (check_fwstate(pmlmepriv,
+ (_FW_UNDER_SURVEY|_FW_UNDER_LINKING))) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("\n###_FW_UNDER_SURVEY|_FW_UNDER_LINKING\n"));
+ } else {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("\n###pmlmepriv->sitesurveyctrl.traffic_"
+ "busy == true\n"));
+ }
+ } else {
+ if (rtw_is_scan_deny(padapter)) {
+ DBG_8723A(FUNC_ADPT_FMT": scan deny\n",
+ FUNC_ADPT_ARG(padapter));
+ return _SUCCESS;
+ }
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ res = rtw_sitesurvey_cmd23a(padapter, pssid, ssid_max_num,
+ NULL, 0);
+
+ spin_unlock_bh(&pmlmepriv->lock);
+ }
+exit:
+ return res;
+}
+
+u8 rtw_set_802_11_authentication_mode23a(struct rtw_adapter* padapter,
+ enum ndis_802_11_auth_mode authmode)
+{
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ int res;
+ u8 ret;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("set_802_11_auth.mode(): mode =%x\n", authmode));
+
+ psecuritypriv->ndisauthtype = authmode;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_authentication_mode23a:"
+ "psecuritypriv->ndisauthtype =%d",
+ psecuritypriv->ndisauthtype));
+
+ if (psecuritypriv->ndisauthtype > 3)
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ res = rtw_set_auth23a(padapter, psecuritypriv);
+
+ if (res == _SUCCESS)
+ ret = true;
+ else
+ ret = false;
+
+ return ret;
+}
+
+u8 rtw_set_802_11_add_wep23a(struct rtw_adapter* padapter,
+ struct ndis_802_11_wep *wep)
+{
+ u8 bdefaultkey;
+ u8 btransmitkey;
+ int keyid, res;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u8 ret = _SUCCESS;
+
+ bdefaultkey = (wep->KeyIndex & 0x40000000) > 0 ? false : true;
+ btransmitkey = (wep->KeyIndex & 0x80000000) > 0 ? true : false;
+ keyid = wep->KeyIndex & 0x3fffffff;
+
+ if (keyid >= 4) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("MgntActrtw_set_802_11_add_wep23a:keyid>4 =>fail\n"));
+ ret = false;
+ goto exit;
+ }
+
+ switch (wep->KeyLength)
+ {
+ case 5:
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("MgntActrtw_set_802_11_add_wep23a:wep->KeyLength = 5\n"));
+ break;
+ case 13:
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("MgntActrtw_set_802_11_add_wep23a:wep->KeyLength = 13\n"));
+ break;
+ default:
+ psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("MgntActrtw_set_802_11_add_wep23a:wep->KeyLength!= 5 "
+ "or 13\n"));
+ break;
+ }
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_wep23a:befor memcpy, wep->KeyLength = 0x%x "
+ "wep->KeyIndex = 0x%x keyid =%x\n",
+ wep->KeyLength, wep->KeyIndex, keyid));
+
+ memcpy(&psecuritypriv->dot11DefKey[keyid].skey[0],
+ &wep->KeyMaterial, wep->KeyLength);
+
+ psecuritypriv->dot11DefKeylen[keyid] = wep->KeyLength;
+
+ psecuritypriv->dot11PrivacyKeyIndex = keyid;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_wep23a:security key material : %x %x %x %x "
+ "%x %x %x %x %x %x %x %x %x\n",
+ psecuritypriv->dot11DefKey[keyid].skey[0],
+ psecuritypriv->dot11DefKey[keyid].skey[1],
+ psecuritypriv->dot11DefKey[keyid].skey[2],
+ psecuritypriv->dot11DefKey[keyid].skey[3],
+ psecuritypriv->dot11DefKey[keyid].skey[4],
+ psecuritypriv->dot11DefKey[keyid].skey[5],
+ psecuritypriv->dot11DefKey[keyid].skey[6],
+ psecuritypriv->dot11DefKey[keyid].skey[7],
+ psecuritypriv->dot11DefKey[keyid].skey[8],
+ psecuritypriv->dot11DefKey[keyid].skey[9],
+ psecuritypriv->dot11DefKey[keyid].skey[10],
+ psecuritypriv->dot11DefKey[keyid].skey[11],
+ psecuritypriv->dot11DefKey[keyid].skey[12]));
+
+ res = rtw_set_key23a(padapter, psecuritypriv, keyid, 1);
+
+ if (res == _FAIL)
+ ret = false;
+exit:
+
+ return ret;
+}
+
+/*
+* rtw_get_cur_max_rate23a -
+* @adapter: pointer to _adapter structure
+*
+* Return 0 or 100Kbps
+*/
+u16 rtw_get_cur_max_rate23a(struct rtw_adapter *adapter)
+{
+ int i = 0;
+ u8 *p;
+ u16 rate = 0, max_rate = 0;
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+ struct ieee80211_ht_cap *pht_capie;
+ u8 rf_type = 0;
+ u8 bw_40MHz = 0, short_GI_20 = 0, short_GI_40 = 0;
+ u16 mcs_rate = 0;
+ u32 ht_ielen = 0;
+
+ if (!check_fwstate(pmlmepriv, _FW_LINKED) &&
+ !check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))
+ return 0;
+
+ if (pmlmeext->cur_wireless_mode & (WIRELESS_11_24N|WIRELESS_11_5N)) {
+ p = rtw_get_ie23a(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_,
+ &ht_ielen, pcur_bss->IELength - 12);
+ if (p && ht_ielen > 0) {
+ pht_capie = (struct ieee80211_ht_cap *)(p + 2);
+
+ memcpy(&mcs_rate, &pht_capie->mcs, 2);
+
+ /* bw_40MHz = (pht_capie->cap_info&
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1:0; */
+ /* cur_bwmod is updated by beacon, pmlmeinfo is
+ updated by association response */
+ bw_40MHz = (pmlmeext->cur_bwmode &&
+ (HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH &
+ pmlmeinfo->HT_info.infos[0])) ? 1:0;
+
+ /* short_GI = (pht_capie->cap_info & (IEEE80211_HT_CAP
+ _SGI_20|IEEE80211_HT_CAP_SGI_40)) ? 1 : 0; */
+ short_GI_20 = (pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info&IEEE80211_HT_CAP_SGI_20) ? 1:0;
+ short_GI_40 = (pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info&IEEE80211_HT_CAP_SGI_40) ? 1:0;
+
+ rtw23a_hal_get_hwreg(adapter, HW_VAR_RF_TYPE,
+ (u8 *)(&rf_type));
+ max_rate = rtw_mcs_rate23a(rf_type, bw_40MHz &
+ pregistrypriv->cbw40_enable,
+ short_GI_20, short_GI_40,
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate
+ );
+ }
+ } else {
+ while ((pcur_bss->SupportedRates[i] != 0) &&
+ (pcur_bss->SupportedRates[i] != 0xFF)) {
+ rate = pcur_bss->SupportedRates[i] & 0x7F;
+ if (rate>max_rate)
+ max_rate = rate;
+ i++;
+ }
+
+ max_rate = max_rate * 10 / 2;
+ }
+
+ return max_rate;
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_led.c b/drivers/staging/rtl8723au/core/rtw_led.c
new file mode 100644
index 000000000000..68532a3b2c14
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_led.c
@@ -0,0 +1,1899 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include <drv_types.h>
+#include <rtl8723a_led.h>
+
+/* */
+/* Description: */
+/* Callback function of LED BlinkTimer, */
+/* it just schedules to corresponding BlinkWorkItem/led_blink_hdl23a */
+/* */
+static void BlinkTimerCallback(unsigned long data)
+{
+ struct led_8723a *pLed = (struct led_8723a *)data;
+ struct rtw_adapter *padapter = pLed->padapter;
+
+ /* DBG_8723A("%s\n", __func__); */
+
+ if ((padapter->bSurpriseRemoved == true) || (padapter->bDriverStopped == true))
+ {
+ /* DBG_8723A("%s bSurpriseRemoved:%d, bDriverStopped:%d\n", __func__, padapter->bSurpriseRemoved, padapter->bDriverStopped); */
+ return;
+ }
+ schedule_work(&pLed->BlinkWorkItem);
+}
+
+/* */
+/* Description: */
+/* Callback function of LED BlinkWorkItem. */
+/* We dispatch acture LED blink action according to LedStrategy. */
+/* */
+void BlinkWorkItemCallback23a(struct work_struct *work)
+{
+ struct led_8723a *pLed = container_of(work, struct led_8723a, BlinkWorkItem);
+ BlinkHandler23a(pLed);
+}
+
+/* */
+/* Description: */
+/* Reset status of led_8723a object. */
+/* */
+void ResetLedStatus23a(struct led_8723a * pLed) {
+
+ pLed->CurrLedState = RTW_LED_OFF; /* Current LED state. */
+ pLed->bLedOn = false; /* true if LED is ON, false if LED is OFF. */
+
+ pLed->bLedBlinkInProgress = false; /* true if it is blinking, false o.w.. */
+ pLed->bLedWPSBlinkInProgress = false;
+
+ pLed->BlinkTimes = 0; /* Number of times to toggle led state for blinking. */
+ pLed->BlinkingLedState = LED_UNKNOWN; /* Next state for blinking, either RTW_LED_ON or RTW_LED_OFF are. */
+
+ pLed->bLedNoLinkBlinkInProgress = false;
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedStartToLinkBlinkInProgress = false;
+ pLed->bLedScanBlinkInProgress = false;
+}
+
+ /* */
+/* Description: */
+/* Initialize an led_8723a object. */
+/* */
+void
+InitLed871x23a(struct rtw_adapter *padapter, struct led_8723a *pLed, enum led_pin_8723a LedPin)
+{
+ pLed->padapter = padapter;
+ pLed->LedPin = LedPin;
+
+ ResetLedStatus23a(pLed);
+
+ setup_timer(&pLed->BlinkTimer, BlinkTimerCallback, (unsigned long)pLed);
+
+ INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback23a);
+}
+
+/* */
+/* Description: */
+/* DeInitialize an led_8723a object. */
+/* */
+void
+DeInitLed871x23a(struct led_8723a *pLed)
+{
+ cancel_work_sync(&pLed->BlinkWorkItem);
+ del_timer_sync(&pLed->BlinkTimer);
+ ResetLedStatus23a(pLed);
+}
+
+/* Description: */
+/* Implementation of LED blinking behavior. */
+/* It toggle off LED and schedule corresponding timer if necessary. */
+
+static void SwLedBlink(struct led_8723a *pLed)
+{
+ struct rtw_adapter *padapter = pLed->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ /* Determine if we shall change LED state again. */
+ pLed->BlinkTimes--;
+ switch (pLed->CurrLedState) {
+
+ case LED_BLINK_NORMAL:
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ break;
+ case LED_BLINK_StartToBlink:
+ if (check_fwstate(pmlmepriv, _FW_LINKED) &&
+ check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ bStopBlinking = true;
+ if (check_fwstate(pmlmepriv, _FW_LINKED) &&
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
+ bStopBlinking = true;
+ else if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ break;
+ case LED_BLINK_WPS:
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ break;
+ default:
+ bStopBlinking = true;
+ break;
+ }
+
+ if (bStopBlinking) {
+ if ((check_fwstate(pmlmepriv, _FW_LINKED)) && !pLed->bLedOn)
+ SwLedOn23a(padapter, pLed);
+ else if ((check_fwstate(pmlmepriv, _FW_LINKED)) && pLed->bLedOn)
+ SwLedOff23a(padapter, pLed);
+
+ pLed->BlinkTimes = 0;
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ /* Assign LED state to toggle. */
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ /* Schedule a timer to toggle LED state. */
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_NORMAL:
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
+ break;
+ case LED_BLINK_SLOWLY:
+ case LED_BLINK_StartToBlink:
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
+ break;
+ case LED_BLINK_WPS:
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_LONG_INTERVAL));
+ break;
+ default:
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
+ break;
+ }
+ }
+}
+
+static void SwLedBlink1(struct led_8723a *pLed)
+{
+ struct rtw_adapter *padapter = pLed->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long delay = 0;
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
+ ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
+ ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff23a(padapter, pLed);
+ ResetLedStatus23a(pLed);
+ return;
+ }
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_SLOWLY:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
+ break;
+ case LED_BLINK_NORMAL:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_LINK_INTERVAL_ALPHA;
+ break;
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ pLed->bLedLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_LINK_INTERVAL_ALPHA;
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ } else if (check_fwstate(pmlmepriv, _FW_LINKED) == false) {
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
+ }
+ break;
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ pLed->bLedLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_LINK_INTERVAL_ALPHA;
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ } else if (check_fwstate(pmlmepriv,
+ _FW_LINKED) == false) {
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->BlinkTimes = 0;
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
+ }
+ break;
+ case LED_BLINK_WPS:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
+ break;
+ case LED_BLINK_WPS_STOP: /* WPS success */
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ bStopBlinking = false;
+ else
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ pLed->bLedLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_LINK_INTERVAL_ALPHA;
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+
+ pLed->bLedWPSBlinkInProgress = false;
+ } else {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ delay = LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA;
+ }
+ break;
+ default:
+ break;
+ }
+ if (delay)
+ mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
+}
+
+static void SwLedBlink2(struct led_8723a *pLed)
+{
+ struct rtw_adapter *padapter = pLed->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
+ ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
+ ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff23a(padapter, pLed);
+ } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ SwLedOn23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
+ ("stop scan blink CurrLedState %d\n",
+ pLed->CurrLedState));
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ SwLedOff23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
+ ("stop scan blink CurrLedState %d\n",
+ pLed->CurrLedState));
+ }
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff23a(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer,
+ jiffies + msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
+ }
+ }
+ break;
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff23a(padapter, pLed);
+ } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ SwLedOn23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
+ ("stop CurrLedState %d\n", pLed->CurrLedState));
+
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ SwLedOff23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
+ ("stop CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff23a(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer,
+ jiffies + msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void SwLedBlink3(struct led_8723a *pLed)
+{
+ struct rtw_adapter *padapter = pLed->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ {
+ SwLedOn23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ }
+ else
+ {
+ if (pLed->CurrLedState != LED_BLINK_WPS_STOP)
+ SwLedOff23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ switch (pLed->CurrLedState)
+ {
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ {
+ bStopBlinking = true;
+ }
+
+ if (bStopBlinking)
+ {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
+ {
+ SwLedOff23a(padapter, pLed);
+ }
+ else if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (!pLed->bLedOn)
+ SwLedOn23a(padapter, pLed);
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ else if (check_fwstate(pmlmepriv, _FW_LINKED) == false)
+ {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedOn)
+ SwLedOff23a(padapter, pLed);
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ else
+ {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
+ {
+ SwLedOff23a(padapter, pLed);
+ }
+ else
+ {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer,
+ jiffies + msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
+ }
+ }
+ break;
+
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ {
+ bStopBlinking = true;
+ }
+ if (bStopBlinking)
+ {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
+ {
+ SwLedOff23a(padapter, pLed);
+ }
+ else if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ if (!pLed->bLedOn)
+ SwLedOn23a(padapter, pLed);
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ else if (check_fwstate(pmlmepriv, _FW_LINKED) == false)
+ {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+
+ if (pLed->bLedOn)
+ SwLedOff23a(padapter, pLed);
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedBlinkInProgress = false;
+ }
+ else
+ {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
+ {
+ SwLedOff23a(padapter, pLed);
+ }
+ else
+ {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer,
+ jiffies + msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ }
+ break;
+
+ case LED_BLINK_WPS:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
+ break;
+
+ case LED_BLINK_WPS_STOP: /* WPS success */
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA));
+ bStopBlinking = false;
+ } else {
+ bStopBlinking = true;
+ }
+
+ if (bStopBlinking)
+ {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
+ {
+ SwLedOff23a(padapter, pLed);
+ }
+ else
+ {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ SwLedOn23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void SwLedBlink4(struct led_8723a *pLed)
+{
+ struct rtw_adapter *padapter = pLed->padapter;
+ struct led_priv *ledpriv = &padapter->ledpriv;
+ struct led_8723a *pLed1 = &ledpriv->SwLed1;
+ u8 bStopBlinking = false;
+ unsigned long delay = 0;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ {
+ SwLedOn23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ if (!pLed1->bLedWPSBlinkInProgress && pLed1->BlinkingLedState == LED_UNKNOWN)
+ {
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ pLed1->CurrLedState = RTW_LED_OFF;
+ SwLedOff23a(padapter, pLed1);
+ }
+
+ switch (pLed->CurrLedState)
+ {
+ case LED_BLINK_SLOWLY:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
+ break;
+
+ case LED_BLINK_StartToBlink:
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ delay = LED_BLINK_SLOWLY_INTERVAL;
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_NORMAL_INTERVAL;
+ }
+ break;
+
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0) {
+ bStopBlinking = false;
+ }
+
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff23a(padapter, pLed);
+ } else {
+ pLed->bLedNoLinkBlinkInProgress = false;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
+ }
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff23a(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
+ }
+ }
+ break;
+
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0) {
+ bStopBlinking = true;
+ }
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff23a(padapter, pLed);
+ } else {
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
+ }
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff23a(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
+ }
+ }
+ break;
+
+ case LED_BLINK_WPS:
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ delay = LED_BLINK_SLOWLY_INTERVAL;
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_NORMAL_INTERVAL;
+ }
+ break;
+
+ case LED_BLINK_WPS_STOP: /* WPS authentication fail */
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ delay = LED_BLINK_NORMAL_INTERVAL;
+ break;
+
+ case LED_BLINK_WPS_STOP_OVERLAP: /* WPS session overlap */
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0) {
+ if (pLed->bLedOn) {
+ pLed->BlinkTimes = 1;
+ } else {
+ bStopBlinking = true;
+ }
+ }
+
+ if (bStopBlinking) {
+ pLed->BlinkTimes = 10;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_LINK_INTERVAL_ALPHA;
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ delay = LED_BLINK_NORMAL_INTERVAL;
+ }
+ break;
+
+ default:
+ break;
+ }
+ if (delay)
+ mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("SwLedBlink4 CurrLedState %d\n", pLed->CurrLedState));
+}
+
+static void SwLedBlink5(struct led_8723a *pLed)
+{
+ struct rtw_adapter *padapter = pLed->padapter;
+ u8 bStopBlinking = false;
+ unsigned long delay = 0;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ switch (pLed->CurrLedState)
+ {
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0) {
+ bStopBlinking = true;
+ }
+
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedOn)
+ SwLedOff23a(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (!pLed->bLedOn)
+ delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
+ }
+
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff23a(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
+ }
+ }
+ break;
+
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0) {
+ bStopBlinking = true;
+ }
+
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedOn)
+ SwLedOff23a(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (!pLed->bLedOn)
+ delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
+ }
+
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff23a(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (delay)
+ mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("SwLedBlink5 CurrLedState %d\n", pLed->CurrLedState));
+}
+
+static void SwLedBlink6(struct led_8723a *pLed)
+{
+ struct rtw_adapter *padapter = pLed->padapter;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff23a(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("<==== blink6\n"));
+}
+
+/* ALPHA, added by chiyoko, 20090106 */
+static void
+SwLedControlMode1(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
+{
+ struct led_priv *ledpriv = &padapter->ledpriv;
+ struct led_8723a *pLed = &ledpriv->SwLed0;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ long delay = -1;
+
+ switch (LedAction)
+ {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_START_TO_LINK:
+ case LED_CTL_NO_LINK:
+ if (pLed->bLedNoLinkBlinkInProgress == false) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN ||
+ IS_LED_WPS_BLINKING(pLed)) {
+ return;
+ }
+ if (pLed->bLedLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
+ }
+ break;
+
+ case LED_CTL_LINK:
+ if (pLed->bLedLinkBlinkInProgress == false) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN ||
+ IS_LED_WPS_BLINKING(pLed)) {
+ return;
+ }
+ if (pLed->bLedNoLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_LINK_INTERVAL_ALPHA;
+ }
+ break;
+
+ case LED_CTL_SITE_SURVEY:
+ if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) &&
+ (check_fwstate(pmlmepriv, _FW_LINKED) == true))
+ ;
+ else if (pLed->bLedScanBlinkInProgress == false) {
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if (pLed->bLedNoLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
+ }
+ break;
+
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if (pLed->bLedBlinkInProgress == false) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN ||
+ IS_LED_WPS_BLINKING(pLed)) {
+ return;
+ }
+ if (pLed->bLedNoLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
+ }
+ break;
+
+ case LED_CTL_START_WPS: /* wait until xinpin finish */
+ case LED_CTL_START_WPS_BOTTON:
+ if (pLed->bLedWPSBlinkInProgress == false) {
+ if (pLed->bLedNoLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
+ }
+ break;
+
+ case LED_CTL_STOP_WPS:
+ if (pLed->bLedNoLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ } else {
+ pLed->bLedWPSBlinkInProgress = true;
+ }
+
+ pLed->CurrLedState = LED_BLINK_WPS_STOP;
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ delay = LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA;
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = 0;
+ }
+ break;
+
+ case LED_CTL_STOP_WPS_FAIL:
+ if (pLed->bLedWPSBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
+ break;
+
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedScanBlinkInProgress = false;
+ }
+
+ SwLedOff23a(padapter, pLed);
+ break;
+
+ default:
+ break;
+
+ }
+
+ if (delay != -1)
+ mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
+}
+
+ /* Arcadyan/Sitecom , added by chiyoko, 20090216 */
+static void
+SwLedControlMode2(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
+{
+ struct led_priv *ledpriv = &padapter->ledpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct led_8723a *pLed = &ledpriv->SwLed0;
+ long delay = -1;
+
+ switch (LedAction) {
+ case LED_CTL_SITE_SURVEY:
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
+ ;
+ else if (pLed->bLedScanBlinkInProgress == false) {
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
+ }
+ break;
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if ((pLed->bLedBlinkInProgress == false) &&
+ (check_fwstate(pmlmepriv, _FW_LINKED) == true)) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN ||
+ IS_LED_WPS_BLINKING(pLed)) {
+ return;
+ }
+
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
+ }
+ break;
+ case LED_CTL_LINK:
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (pLed->bLedBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedScanBlinkInProgress = false;
+ }
+
+ delay = 0;
+ break;
+ case LED_CTL_START_WPS: /* wait until xinpin finish */
+ case LED_CTL_START_WPS_BOTTON:
+ if (pLed->bLedWPSBlinkInProgress == false) {
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = 0;
+ }
+ break;
+ case LED_CTL_STOP_WPS:
+ pLed->bLedWPSBlinkInProgress = false;
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff23a(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = 0;
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ break;
+ case LED_CTL_STOP_WPS_FAIL:
+ pLed->bLedWPSBlinkInProgress = false;
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff23a(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ delay = 0;
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ break;
+ case LED_CTL_START_TO_LINK:
+ case LED_CTL_NO_LINK:
+ if (!IS_LED_BLINKING(pLed))
+ {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ delay = 0;
+ }
+ break;
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ delay = 0;
+ break;
+ default:
+ break;
+
+ }
+
+ if (delay != -1)
+ mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+}
+
+ /* COREGA, added by chiyoko, 20090316 */
+static void
+SwLedControlMode3(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
+{
+ struct led_priv *ledpriv = &padapter->ledpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct led_8723a *pLed = &ledpriv->SwLed0;
+ long delay = -1;
+
+ switch (LedAction)
+ {
+ case LED_CTL_SITE_SURVEY:
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
+ ;
+ else if (pLed->bLedScanBlinkInProgress == false) {
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
+ }
+ break;
+
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if ((pLed->bLedBlinkInProgress == false) &&
+ (check_fwstate(pmlmepriv, _FW_LINKED) == true)) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN ||
+ IS_LED_WPS_BLINKING(pLed)) {
+ return;
+ }
+
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
+ }
+ break;
+
+ case LED_CTL_LINK:
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (pLed->bLedBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedScanBlinkInProgress = false;
+ }
+
+ delay = 0;
+ break;
+
+ case LED_CTL_START_WPS: /* wait until xinpin finish */
+ case LED_CTL_START_WPS_BOTTON:
+ if (pLed->bLedWPSBlinkInProgress == false) {
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
+ }
+ break;
+
+ case LED_CTL_STOP_WPS:
+ if (pLed->bLedWPSBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedWPSBlinkInProgress = false;
+ } else {
+ pLed->bLedWPSBlinkInProgress = true;
+ }
+
+ pLed->CurrLedState = LED_BLINK_WPS_STOP;
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ delay = LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA;
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ delay = 0;
+ }
+
+ break;
+
+ case LED_CTL_STOP_WPS_FAIL:
+ if (pLed->bLedWPSBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ delay = 0;
+ break;
+
+ case LED_CTL_START_TO_LINK:
+ case LED_CTL_NO_LINK:
+ if (!IS_LED_BLINKING(pLed))
+ {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ delay = 0;
+ }
+ break;
+
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ delay = 0;
+ break;
+
+ default:
+ break;
+
+ }
+
+ if (delay != -1)
+ mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+}
+
+ /* Edimax-Belkin, added by chiyoko, 20090413 */
+static void
+SwLedControlMode4(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
+{
+ struct led_priv *ledpriv = &padapter->ledpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct led_8723a *pLed = &ledpriv->SwLed0;
+ struct led_8723a *pLed1 = &ledpriv->SwLed1;
+
+ switch (LedAction)
+ {
+ case LED_CTL_START_TO_LINK:
+ if (pLed1->bLedWPSBlinkInProgress) {
+ pLed1->bLedWPSBlinkInProgress = false;
+ del_timer_sync(&pLed1->BlinkTimer);
+
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ pLed1->CurrLedState = RTW_LED_OFF;
+
+ if (pLed1->bLedOn)
+ mod_timer(&pLed->BlinkTimer, jiffies);
+ }
+
+ if (pLed->bLedStartToLinkBlinkInProgress == false) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN ||
+ IS_LED_WPS_BLINKING(pLed)) {
+ return;
+ }
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedNoLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+
+ pLed->bLedStartToLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_StartToBlink;
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ mod_timer(&pLed->BlinkTimer,
+ jiffies + msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer,
+ jiffies + msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
+ }
+ }
+ break;
+
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ /* LED1 settings */
+ if (LedAction == LED_CTL_LINK) {
+ if (pLed1->bLedWPSBlinkInProgress) {
+ pLed1->bLedWPSBlinkInProgress = false;
+ del_timer_sync(&pLed1->BlinkTimer);
+
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ pLed1->CurrLedState = RTW_LED_OFF;
+
+ if (pLed1->bLedOn)
+ mod_timer(&pLed->BlinkTimer, jiffies);
+ }
+ }
+
+ if (pLed->bLedNoLinkBlinkInProgress == false) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN ||
+ IS_LED_WPS_BLINKING(pLed)) {
+ return;
+ }
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+ }
+ break;
+
+ case LED_CTL_SITE_SURVEY:
+ if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) &&
+ (check_fwstate(pmlmepriv, _FW_LINKED) == true))
+ ;
+ else if (pLed->bLedScanBlinkInProgress == false) {
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if (pLed->bLedNoLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
+ }
+ break;
+
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if (pLed->bLedBlinkInProgress == false) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN ||
+ IS_LED_WPS_BLINKING(pLed)) {
+ return;
+ }
+ if (pLed->bLedNoLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ break;
+
+ case LED_CTL_START_WPS: /* wait until xinpin finish */
+ case LED_CTL_START_WPS_BOTTON:
+ if (pLed1->bLedWPSBlinkInProgress) {
+ pLed1->bLedWPSBlinkInProgress = false;
+ del_timer_sync(&pLed1->BlinkTimer);
+
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ pLed1->CurrLedState = RTW_LED_OFF;
+
+ if (pLed1->bLedOn)
+ mod_timer(&pLed->BlinkTimer, jiffies);
+ }
+
+ if (pLed->bLedWPSBlinkInProgress == false) {
+ if (pLed->bLedNoLinkBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress == true) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ if (pLed->bLedOn)
+ {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
+ }
+ }
+ break;
+
+ case LED_CTL_STOP_WPS: /* WPS connect success */
+ if (pLed->bLedWPSBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+ break;
+
+ case LED_CTL_STOP_WPS_FAIL: /* WPS authentication fail */
+ if (pLed->bLedWPSBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+
+ /* LED1 settings */
+ if (pLed1->bLedWPSBlinkInProgress)
+ del_timer_sync(&pLed1->BlinkTimer);
+ else
+ pLed1->bLedWPSBlinkInProgress = true;
+
+ pLed1->CurrLedState = LED_BLINK_WPS_STOP;
+ if (pLed1->bLedOn)
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed1->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
+
+ break;
+
+ case LED_CTL_STOP_WPS_FAIL_OVERLAP: /* WPS session overlap */
+ if (pLed->bLedWPSBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+
+ /* LED1 settings */
+ if (pLed1->bLedWPSBlinkInProgress)
+ del_timer_sync(&pLed1->BlinkTimer);
+ else
+ pLed1->bLedWPSBlinkInProgress = true;
+
+ pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
+ pLed1->BlinkTimes = 10;
+ if (pLed1->bLedOn)
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed1->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
+
+ break;
+
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ if (pLed->bLedStartToLinkBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedStartToLinkBlinkInProgress = false;
+ }
+
+ if (pLed1->bLedWPSBlinkInProgress) {
+ del_timer_sync(&pLed1->BlinkTimer);
+ pLed1->bLedWPSBlinkInProgress = false;
+ }
+
+ pLed1->BlinkingLedState = LED_UNKNOWN;
+ SwLedOff23a(padapter, pLed);
+ SwLedOff23a(padapter, pLed1);
+ break;
+
+ default:
+ break;
+
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
+}
+
+ /* Sercomm-Belkin, added by chiyoko, 20090415 */
+static void
+SwLedControlMode5(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
+{
+ struct led_priv *ledpriv = &padapter->ledpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct led_8723a *pLed = &ledpriv->SwLed0;
+
+ switch (LedAction)
+ {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_NO_LINK:
+ case LED_CTL_LINK: /* solid blue */
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ mod_timer(&pLed->BlinkTimer, jiffies);
+ break;
+
+ case LED_CTL_SITE_SURVEY:
+ if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED) == true))
+ ;
+ else if (pLed->bLedScanBlinkInProgress == false)
+ {
+ if (pLed->bLedBlinkInProgress == true)
+ {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
+ }
+ break;
+
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if (pLed->bLedBlinkInProgress == false) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN) {
+ return;
+ }
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed->BlinkTimer, jiffies +
+ msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ break;
+
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+
+ if (pLed->bLedBlinkInProgress) {
+ del_timer_sync(&pLed->BlinkTimer);
+ pLed->bLedBlinkInProgress = false;
+ }
+
+ SwLedOff23a(padapter, pLed);
+ break;
+
+ default:
+ break;
+
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
+}
+
+ /* WNC-Corega, added by chiyoko, 20090902 */
+static void SwLedControlMode6(struct rtw_adapter *padapter,
+ enum led_ctl_mode LedAction)
+{
+ struct led_priv *ledpriv = &padapter->ledpriv;
+ struct led_8723a *pLed0 = &ledpriv->SwLed0;
+
+ switch (LedAction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ del_timer_sync(&pLed0->BlinkTimer);
+ pLed0->CurrLedState = RTW_LED_ON;
+ pLed0->BlinkingLedState = RTW_LED_ON;
+ mod_timer(&pLed0->BlinkTimer, jiffies);
+ break;
+ case LED_CTL_POWER_OFF:
+ SwLedOff23a(padapter, pLed0);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("ledcontrol 6 Led %d\n", pLed0->CurrLedState));
+}
+
+/* */
+/* Description: */
+/* Handler function of LED Blinking. */
+/* We dispatch acture LED blink action according to LedStrategy. */
+/* */
+void BlinkHandler23a(struct led_8723a *pLed)
+{
+ struct rtw_adapter *padapter = pLed->padapter;
+ struct led_priv *ledpriv = &padapter->ledpriv;
+
+ /* DBG_8723A("%s (%s:%d)\n", __func__, current->comm, current->pid); */
+
+ if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
+ return;
+
+ switch (ledpriv->LedStrategy) {
+ case SW_LED_MODE0:
+ SwLedBlink(pLed);
+ break;
+ case SW_LED_MODE1:
+ SwLedBlink1(pLed);
+ break;
+ case SW_LED_MODE2:
+ SwLedBlink2(pLed);
+ break;
+ case SW_LED_MODE3:
+ SwLedBlink3(pLed);
+ break;
+ case SW_LED_MODE4:
+ SwLedBlink4(pLed);
+ break;
+ case SW_LED_MODE5:
+ SwLedBlink5(pLed);
+ break;
+ case SW_LED_MODE6:
+ SwLedBlink6(pLed);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+LedControl871x23a(struct rtw_adapter *padapter, enum led_ctl_mode LedAction) {
+ struct led_priv *ledpriv = &padapter->ledpriv;
+
+ if ((padapter->bSurpriseRemoved == true) ||
+ (padapter->bDriverStopped == true) ||
+ (padapter->hw_init_completed == false)) {
+ return;
+ }
+
+ if (ledpriv->bRegUseLed == false)
+ return;
+
+ /* if (!priv->up) */
+ /* return; */
+
+ /* if (priv->bInHctTest) */
+ /* return; */
+
+ if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on &&
+ padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) &&
+ (LedAction == LED_CTL_TX || LedAction == LED_CTL_RX ||
+ LedAction == LED_CTL_SITE_SURVEY ||
+ LedAction == LED_CTL_LINK ||
+ LedAction == LED_CTL_NO_LINK ||
+ LedAction == LED_CTL_POWER_ON)) {
+ return;
+ }
+
+ switch (ledpriv->LedStrategy) {
+ case SW_LED_MODE0:
+ break;
+ case SW_LED_MODE1:
+ SwLedControlMode1(padapter, LedAction);
+ break;
+ case SW_LED_MODE2:
+ SwLedControlMode2(padapter, LedAction);
+ break;
+ case SW_LED_MODE3:
+ SwLedControlMode3(padapter, LedAction);
+ break;
+ case SW_LED_MODE4:
+ SwLedControlMode4(padapter, LedAction);
+ break;
+ case SW_LED_MODE5:
+ SwLedControlMode5(padapter, LedAction);
+ break;
+ case SW_LED_MODE6:
+ SwLedControlMode6(padapter, LedAction);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("LedStrategy:%d, LedAction %d\n", ledpriv->LedStrategy, LedAction));
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme.c b/drivers/staging/rtl8723au/core/rtw_mlme.c
new file mode 100644
index 000000000000..6cee78785bdc
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_mlme.c
@@ -0,0 +1,2500 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_MLME_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <xmit_osdep.h>
+#include <hal_intf.h>
+#include <mlme_osdep.h>
+#include <sta_info.h>
+#include <linux/ieee80211.h>
+#include <wifi.h>
+#include <wlan_bssdef.h>
+#include <rtw_ioctl_set.h>
+
+extern u8 rtw_do_join23a(struct rtw_adapter * padapter);
+
+static void rtw_init_mlme_timer(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ setup_timer(&pmlmepriv->assoc_timer, rtw23a_join_to_handler,
+ (unsigned long)padapter);
+
+ setup_timer(&pmlmepriv->scan_to_timer, rtw_scan_timeout_handler23a,
+ (unsigned long)padapter);
+
+ setup_timer(&pmlmepriv->dynamic_chk_timer,
+ rtw_dynamic_check_timer_handler, (unsigned long)padapter);
+
+ setup_timer(&pmlmepriv->set_scan_deny_timer,
+ rtw_set_scan_deny_timer_hdl, (unsigned long)padapter);
+}
+
+int _rtw_init_mlme_priv23a(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int res = _SUCCESS;
+
+ pmlmepriv->nic_hdl = padapter;
+
+ pmlmepriv->fw_state = 0;
+ pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
+ pmlmepriv->scan_mode=SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
+
+ spin_lock_init(&pmlmepriv->lock);
+ _rtw_init_queue23a(&pmlmepriv->scanned_queue);
+
+ memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct cfg80211_ssid));
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+
+ rtw_clear_scan_deny(padapter);
+
+ rtw_init_mlme_timer(padapter);
+ return res;
+}
+
+#ifdef CONFIG_8723AU_AP_MODE
+static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
+{
+ if(*ppie)
+ {
+ kfree(*ppie);
+ *plen = 0;
+ *ppie=NULL;
+ }
+}
+#endif
+
+void rtw23a_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
+{
+#ifdef CONFIG_8723AU_AP_MODE
+ kfree(pmlmepriv->assoc_req);
+ kfree(pmlmepriv->assoc_rsp);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_beacon_ie, &pmlmepriv->wps_beacon_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_req_ie, &pmlmepriv->wps_probe_req_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_resp_ie, &pmlmepriv->wps_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_assoc_resp_ie, &pmlmepriv->wps_assoc_resp_ie_len);
+
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_beacon_ie, &pmlmepriv->p2p_beacon_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_req_ie, &pmlmepriv->p2p_probe_req_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_resp_ie, &pmlmepriv->p2p_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_go_probe_resp_ie, &pmlmepriv->p2p_go_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_assoc_req_ie, &pmlmepriv->p2p_assoc_req_ie_len);
+
+ rtw_free_mlme_ie_data(&pmlmepriv->wfd_beacon_ie, &pmlmepriv->wfd_beacon_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wfd_probe_req_ie, &pmlmepriv->wfd_probe_req_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wfd_probe_resp_ie, &pmlmepriv->wfd_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wfd_go_probe_resp_ie, &pmlmepriv->wfd_go_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wfd_assoc_req_ie, &pmlmepriv->wfd_assoc_req_ie_len);
+#endif
+}
+
+void _rtw_free_mlme_priv23a(struct mlme_priv *pmlmepriv)
+{
+
+ rtw23a_free_mlme_priv_ie_data(pmlmepriv);
+
+}
+
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
+{
+ struct wlan_network *pnetwork;
+
+ pnetwork = kzalloc(sizeof(struct wlan_network), GFP_ATOMIC);
+ if (pnetwork) {
+ INIT_LIST_HEAD(&pnetwork->list);
+ pnetwork->network_type = 0;
+ pnetwork->fixed = false;
+ pnetwork->last_scanned = jiffies;
+ pnetwork->aid = 0;
+ pnetwork->join_res = 0;
+ }
+
+ return pnetwork;
+}
+
+void _rtw_free_network23a(struct mlme_priv *pmlmepriv,
+ struct wlan_network *pnetwork, u8 isfreeall)
+{
+ u32 lifetime = SCANQUEUE_LIFETIME;
+
+ if (!pnetwork)
+ return;
+
+ if (pnetwork->fixed == true)
+ return;
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true))
+ lifetime = 1;
+
+ list_del_init(&pnetwork->list);
+
+ kfree(pnetwork);
+}
+
+void _rtw_free_network23a_nolock23a(struct mlme_priv *pmlmepriv,
+ struct wlan_network *pnetwork)
+{
+
+ if (pnetwork == NULL)
+ return;
+
+ if (pnetwork->fixed == true)
+ return;
+
+ list_del_init(&pnetwork->list);
+
+ kfree(pnetwork);
+}
+
+/*
+ return the wlan_network with the matching addr
+
+ Shall be calle under atomic context... to avoid possible racing condition...
+*/
+struct wlan_network *
+_rtw_find_network23a(struct rtw_queue *scanned_queue, u8 *addr)
+{
+ struct list_head *phead, *plist;
+ struct wlan_network *pnetwork = NULL;
+
+ if (is_zero_ether_addr(addr)) {
+ pnetwork = NULL;
+ goto exit;
+ }
+
+ /* spin_lock_bh(&scanned_queue->lock); */
+
+ phead = get_list_head(scanned_queue);
+ plist = phead->next;
+
+ while (plist != phead) {
+ pnetwork = container_of(plist, struct wlan_network, list);
+
+ if (ether_addr_equal(addr, pnetwork->network.MacAddress))
+ break;
+
+ plist = plist->next;
+ }
+
+ if(plist == phead)
+ pnetwork = NULL;
+
+ /* spin_unlock_bh(&scanned_queue->lock); */
+
+exit:
+
+ return pnetwork;
+}
+
+void _rtw_free_network23a_queue23a(struct rtw_adapter *padapter, u8 isfreeall)
+{
+ struct list_head *phead, *plist, *ptmp;
+ struct wlan_network *pnetwork;
+ struct mlme_priv* pmlmepriv = &padapter->mlmepriv;
+ struct rtw_queue *scanned_queue = &pmlmepriv->scanned_queue;
+
+ spin_lock_bh(&scanned_queue->lock);
+
+ phead = get_list_head(scanned_queue);
+
+ list_for_each_safe(plist, ptmp, phead) {
+ pnetwork = container_of(plist, struct wlan_network, list);
+
+ _rtw_free_network23a(pmlmepriv,pnetwork, isfreeall);
+ }
+
+ spin_unlock_bh(&scanned_queue->lock);
+
+}
+
+int rtw_if_up23a(struct rtw_adapter *padapter) {
+
+ int res;
+
+ if(padapter->bDriverStopped || padapter->bSurpriseRemoved ||
+ (check_fwstate(&padapter->mlmepriv, _FW_LINKED)== false)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_if_up23a:bDriverStopped(%d) OR bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved));
+ res=false;
+ }
+ else
+ res= true;
+
+ return res;
+}
+
+void rtw_generate_random_ibss23a(u8* pibss)
+{
+ unsigned long curtime = jiffies;
+
+ pibss[0] = 0x02; /* in ad-hoc mode bit1 must set to 1 */
+ pibss[1] = 0x11;
+ pibss[2] = 0x87;
+ pibss[3] = (u8)(curtime & 0xff) ;/* p[0]; */
+ pibss[4] = (u8)((curtime>>8) & 0xff) ;/* p[1]; */
+ pibss[5] = (u8)((curtime>>16) & 0xff) ;/* p[2]; */
+
+ return;
+}
+
+u8 *rtw_get_capability23a_from_ie(u8 *ie)
+{
+ return ie + 8 + 2;
+}
+
+u16 rtw_get_capability23a(struct wlan_bssid_ex *bss)
+{
+ u16 val;
+
+ memcpy((u8 *)&val, rtw_get_capability23a_from_ie(bss->IEs), 2);
+
+ return le16_to_cpu(val);
+}
+
+u8 *rtw_get_timestampe_from_ie23a(u8 *ie)
+{
+ return ie + 0;
+}
+
+u8 *rtw_get_beacon_interval23a_from_ie(u8 *ie)
+{
+ return ie + 8;
+}
+
+int rtw_init_mlme_priv23a (struct rtw_adapter *padapter)/* struct mlme_priv *pmlmepriv) */
+{
+ int res;
+
+ res = _rtw_init_mlme_priv23a(padapter);/* (pmlmepriv); */
+
+ return res;
+}
+
+void rtw_free_mlme_priv23a (struct mlme_priv *pmlmepriv)
+{
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_mlme_priv23a\n"));
+ _rtw_free_mlme_priv23a(pmlmepriv);
+
+}
+
+void rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork, u8 is_freeall);
+void rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork, u8 is_freeall)/* struct wlan_network *pnetwork, _queue *free_queue) */
+{
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("rtw_free_network ==> ssid = %s\n\n" ,
+ pnetwork->network.Ssid.ssid));
+ _rtw_free_network23a(pmlmepriv, pnetwork, is_freeall);
+
+}
+
+void rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork);
+void rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork)
+{
+
+ /* RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_network ==> ssid = %s\n\n" , pnetwork->network.Ssid.ssid)); */
+ _rtw_free_network23a_nolock23a(pmlmepriv, pnetwork);
+
+}
+
+void rtw_free_network_queue23a(struct rtw_adapter* dev, u8 isfreeall)
+{
+
+ _rtw_free_network23a_queue23a(dev, isfreeall);
+
+}
+
+/*
+ return the wlan_network with the matching addr
+
+ Shall be calle under atomic context... to avoid possible racing condition...
+*/
+struct wlan_network *
+rtw_find_network23a(struct rtw_queue *scanned_queue, u8 *addr)
+{
+ struct wlan_network *pnetwork;
+
+ pnetwork = _rtw_find_network23a(scanned_queue, addr);
+
+ return pnetwork;
+}
+
+int rtw_is_same_ibss23a(struct rtw_adapter *adapter, struct wlan_network *pnetwork)
+{
+ int ret = true;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+
+ if ((psecuritypriv->dot11PrivacyAlgrthm != _NO_PRIVACY_) &&
+ (pnetwork->network.Privacy == 0))
+ {
+ ret = false;
+ }
+ else if ((psecuritypriv->dot11PrivacyAlgrthm == _NO_PRIVACY_) &&
+ (pnetwork->network.Privacy == 1))
+ {
+ ret = false;
+ }
+ else
+ {
+ ret = true;
+ }
+
+ return ret;
+}
+
+inline int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b);
+inline int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b)
+{
+ /* RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("(%s,%d)(%s,%d)\n", */
+ /* a->Ssid.Ssid, a->Ssid.SsidLength, b->Ssid.Ssid, b->Ssid.SsidLength)); */
+ return (a->Ssid.ssid_len == b->Ssid.ssid_len) &&
+ !memcmp(a->Ssid.ssid, b->Ssid.ssid, a->Ssid.ssid_len);
+}
+
+int is_same_network23a(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
+{
+ u16 s_cap, d_cap;
+
+ memcpy((u8 *)&s_cap, rtw_get_capability23a_from_ie(src->IEs), 2);
+ memcpy((u8 *)&d_cap, rtw_get_capability23a_from_ie(dst->IEs), 2);
+
+ s_cap = le16_to_cpu(s_cap);
+ d_cap = le16_to_cpu(d_cap);
+
+ return ((src->Ssid.ssid_len == dst->Ssid.ssid_len) &&
+ /* (src->Configuration.DSConfig == dst->Configuration.DSConfig) && */
+ ether_addr_equal(src->MacAddress, dst->MacAddress) &&
+ ((!memcmp(src->Ssid.ssid, dst->Ssid.ssid, src->Ssid.ssid_len))) &&
+ ((s_cap & WLAN_CAPABILITY_IBSS) ==
+ (d_cap & WLAN_CAPABILITY_IBSS)) &&
+ ((s_cap & WLAN_CAPABILITY_ESS) ==
+ (d_cap & WLAN_CAPABILITY_ESS)));
+}
+
+struct wlan_network *rtw_get_oldest_wlan_network23a(struct rtw_queue *scanned_queue)
+{
+ struct list_head *plist, *phead;
+
+ struct wlan_network *pwlan;
+ struct wlan_network *oldest = NULL;
+
+ phead = get_list_head(scanned_queue);
+
+ list_for_each(plist, phead) {
+ pwlan = container_of(plist, struct wlan_network, list);
+
+ if (pwlan->fixed != true) {
+ if (!oldest || time_after(oldest->last_scanned,
+ pwlan->last_scanned))
+ oldest = pwlan;
+ }
+ }
+
+ return oldest;
+}
+
+void update_network23a(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
+ struct rtw_adapter * padapter, bool update_ie)
+{
+ u8 ss_ori = dst->PhyInfo.SignalStrength;
+ u8 sq_ori = dst->PhyInfo.SignalQuality;
+ long rssi_ori = dst->Rssi;
+
+ u8 ss_smp = src->PhyInfo.SignalStrength;
+ u8 sq_smp = src->PhyInfo.SignalQuality;
+ long rssi_smp = src->Rssi;
+
+ u8 ss_final;
+ u8 sq_final;
+ long rssi_final;
+
+ DBG_8723A("%s %s(%pM, ch%u) ss_ori:%3u, sq_ori:%3u, rssi_ori:%3ld, ss_smp:%3u, sq_smp:%3u, rssi_smp:%3ld\n",
+ __func__, src->Ssid.ssid, src->MacAddress,
+ src->Configuration.DSConfig, ss_ori, sq_ori, rssi_ori,
+ ss_smp, sq_smp, rssi_smp
+ );
+
+ /* The rule below is 1/5 for sample value, 4/5 for history value */
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) && is_same_network23a(&padapter->mlmepriv.cur_network.network, src)) {
+ /* Take the recvpriv's value for the connected AP*/
+ ss_final = padapter->recvpriv.signal_strength;
+ sq_final = padapter->recvpriv.signal_qual;
+ /* the rssi value here is undecorated, and will be used for antenna diversity */
+ if (sq_smp != 101) /* from the right channel */
+ rssi_final = (src->Rssi+dst->Rssi*4)/5;
+ else
+ rssi_final = rssi_ori;
+ }
+ else {
+ if (sq_smp != 101) { /* from the right channel */
+ ss_final = ((u32)(src->PhyInfo.SignalStrength)+(u32)(dst->PhyInfo.SignalStrength)*4)/5;
+ sq_final = ((u32)(src->PhyInfo.SignalQuality)+(u32)(dst->PhyInfo.SignalQuality)*4)/5;
+ rssi_final = (src->Rssi+dst->Rssi*4)/5;
+ } else {
+ /* bss info not receving from the right channel, use the original RX signal infos */
+ ss_final = dst->PhyInfo.SignalStrength;
+ sq_final = dst->PhyInfo.SignalQuality;
+ rssi_final = dst->Rssi;
+ }
+
+ }
+
+ if (update_ie)
+ memcpy((u8 *)dst, (u8 *)src, get_wlan_bssid_ex_sz(src));
+
+ dst->PhyInfo.SignalStrength = ss_final;
+ dst->PhyInfo.SignalQuality = sq_final;
+ dst->Rssi = rssi_final;
+
+ DBG_8723A("%s %s(%pM), SignalStrength:%u, SignalQuality:%u, RawRSSI:%ld\n",
+ __func__, dst->Ssid.ssid, dst->MacAddress,
+ dst->PhyInfo.SignalStrength,
+ dst->PhyInfo.SignalQuality, dst->Rssi);
+
+}
+
+static void update_current_network(struct rtw_adapter *adapter, struct wlan_bssid_ex *pnetwork)
+{
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED)== true) && (is_same_network23a(&pmlmepriv->cur_network.network, pnetwork)))
+ {
+ /* RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,"Same Network\n"); */
+
+ /* if(pmlmepriv->cur_network.network.IELength<= pnetwork->IELength) */
+ {
+ update_network23a(&pmlmepriv->cur_network.network, pnetwork,adapter, true);
+ rtw_update_protection23a(adapter, (pmlmepriv->cur_network.network.IEs) + sizeof (struct ndis_802_11_fixed_ies),
+ pmlmepriv->cur_network.network.IELength);
+ }
+ }
+
+}
+
+/*
+
+Caller must hold pmlmepriv->lock first.
+
+*/
+void rtw_update_scanned_network23a(struct rtw_adapter *adapter, struct wlan_bssid_ex *target)
+{
+ struct list_head *plist, *phead;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct wlan_network *pnetwork = NULL;
+ struct wlan_network *oldest = NULL;
+ struct rtw_queue *queue = &pmlmepriv->scanned_queue;
+ u32 bssid_ex_sz;
+ int found = 0;
+
+ spin_lock_bh(&queue->lock);
+ phead = get_list_head(queue);
+
+ list_for_each(plist, phead) {
+ pnetwork = container_of(plist, struct wlan_network, list);
+
+ if (is_same_network23a(&pnetwork->network, target)) {
+ found = 1;
+ break;
+ }
+ if (!oldest || time_after(oldest->last_scanned,
+ pnetwork->last_scanned))
+ oldest = pnetwork;
+ }
+
+ /* If we didn't find a match, then get a new network slot to initialize
+ * with this beacon's information */
+ if (!found) {
+ pnetwork = rtw_alloc_network(pmlmepriv);
+ if (!pnetwork) {
+ if (!oldest) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n\n\nsomething wrong here\n\n\n"));
+ goto exit;
+ }
+ pnetwork = oldest;
+ } else
+ list_add_tail(&pnetwork->list, &queue->queue);
+
+ bssid_ex_sz = get_wlan_bssid_ex_sz(target);
+ target->Length = bssid_ex_sz;
+ memcpy(&pnetwork->network, target, bssid_ex_sz);
+
+ /* variable initialize */
+ pnetwork->fixed = false;
+ pnetwork->last_scanned = jiffies;
+
+ pnetwork->network_type = 0;
+ pnetwork->aid = 0;
+ pnetwork->join_res = 0;
+
+ /* bss info not receving from the right channel */
+ if (pnetwork->network.PhyInfo.SignalQuality == 101)
+ pnetwork->network.PhyInfo.SignalQuality = 0;
+ } else {
+ /*
+ * we have an entry and we are going to update it. But
+ * this entry may be already expired. In this case we
+ * do the same as we found a new net and call the
+ * new_net handler
+ */
+ bool update_ie = true;
+
+ pnetwork->last_scanned = jiffies;
+
+ /* target.reserved == 1, means that scanned network is
+ * a bcn frame. */
+ if ((pnetwork->network.IELength>target->IELength) &&
+ (target->reserved == 1))
+ update_ie = false;
+
+ update_network23a(&pnetwork->network, target,adapter, update_ie);
+ }
+
+exit:
+ spin_unlock_bh(&queue->lock);
+
+}
+
+void rtw_add_network(struct rtw_adapter *adapter, struct wlan_bssid_ex *pnetwork)
+{
+ update_current_network(adapter, pnetwork);
+ rtw_update_scanned_network23a(adapter, pnetwork);
+}
+
+/* select the desired network based on the capability of the (i)bss. */
+/* check items: (1) security */
+/* (2) network_type */
+/* (3) WMM */
+/* (4) HT */
+/* (5) others */
+int rtw_is_desired_network(struct rtw_adapter *adapter, struct wlan_network *pnetwork)
+{
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u32 desired_encmode;
+ u32 privacy;
+
+ /* u8 wps_ie[512]; */
+ uint wps_ielen;
+
+ int bselected = true;
+
+ desired_encmode = psecuritypriv->ndisencryptstatus;
+ privacy = pnetwork->network.Privacy;
+
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS))
+ {
+ if (rtw_get_wps_ie23a(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen)!= NULL)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+ if (adapter->registrypriv.wifi_spec == 1) /* for correct flow of 8021X to do.... */
+ {
+ if ((desired_encmode == Ndis802_11EncryptionDisabled) && (privacy != 0))
+ bselected = false;
+ }
+
+ if ((desired_encmode != Ndis802_11EncryptionDisabled) && (privacy == 0)) {
+ DBG_8723A("desired_encmode: %d, privacy: %d\n", desired_encmode, privacy);
+ bselected = false;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)
+ {
+ if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode)
+ bselected = false;
+ }
+
+ return bselected;
+}
+
+/* TODO: Perry : For Power Management */
+void rtw_atimdone_event_callback23a(struct rtw_adapter *adapter , u8 *pbuf)
+{
+
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,("receive atimdone_evet\n"));
+
+ return;
+}
+
+void rtw_survey_event_cb23a(struct rtw_adapter *adapter, u8 *pbuf)
+{
+ u32 len;
+ struct wlan_bssid_ex *pnetwork;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ pnetwork = (struct wlan_bssid_ex *)pbuf;
+
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("rtw_survey_event_cb23a, ssid=%s\n", pnetwork->Ssid.ssid));
+
+ len = get_wlan_bssid_ex_sz(pnetwork);
+ if(len > (sizeof(struct wlan_bssid_ex)))
+ {
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,("\n ****rtw_survey_event_cb23a: return a wrong bss ***\n"));
+ return;
+ }
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ /* update IBSS_network 's timestamp */
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true)
+ {
+ /* RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,"rtw_survey_event_cb23a : WIFI_ADHOC_MASTER_STATE\n\n"); */
+ if (ether_addr_equal(pmlmepriv->cur_network.network.MacAddress,
+ pnetwork->MacAddress)) {
+ struct wlan_network* ibss_wlan = NULL;
+
+ memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8);
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+ ibss_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, pnetwork->MacAddress);
+ if (ibss_wlan)
+ {
+ memcpy(ibss_wlan->network.IEs , pnetwork->IEs, 8);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ goto exit;
+ }
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ }
+ }
+
+ /* lock pmlmepriv->lock when you accessing network_q */
+ if ((check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) == false)
+ {
+ if (pnetwork->Ssid.ssid[0] == 0)
+ pnetwork->Ssid.ssid_len = 0;
+
+ rtw_add_network(adapter, pnetwork);
+ }
+
+exit:
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+ return;
+}
+
+void rtw_surveydone_event_callback23a(struct rtw_adapter *adapter, u8 *pbuf)
+{
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if (pmlmepriv->wps_probe_req_ie) {
+ pmlmepriv->wps_probe_req_ie_len = 0;
+ kfree(pmlmepriv->wps_probe_req_ie);
+ pmlmepriv->wps_probe_req_ie = NULL;
+ }
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_surveydone_event_callback23a: fw_state:%x\n\n", get_fwstate(pmlmepriv)));
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+ del_timer_sync(&pmlmepriv->scan_to_timer);
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+ } else {
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("nic status =%x, survey done event comes too late!\n", get_fwstate(pmlmepriv)));
+ }
+
+ rtw_set_signal_stat_timer(&adapter->recvpriv);
+
+ if (pmlmepriv->to_join == true) {
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == false) {
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+
+ if (rtw_select_and_join_from_scanned_queue23a(pmlmepriv) == _SUCCESS) {
+ mod_timer(&pmlmepriv->assoc_timer,
+ jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT));
+ } else {
+ struct wlan_bssid_ex *pdev_network = &adapter->registrypriv.dev_network;
+ u8 *pibss = adapter->registrypriv.dev_network.MacAddress;
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("switching to adhoc master\n"));
+
+ memset(&pdev_network->Ssid, 0, sizeof(struct cfg80211_ssid));
+ memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct cfg80211_ssid));
+
+ rtw_update_registrypriv_dev_network23a(adapter);
+ rtw_generate_random_ibss23a(pibss);
+
+ pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
+
+ if (rtw_createbss_cmd23a(adapter)!= _SUCCESS)
+ {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error =>rtw_createbss_cmd23a status FAIL\n"));
+ }
+
+ pmlmepriv->to_join = false;
+ }
+ }
+ } else {
+ int ret;
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+ pmlmepriv->to_join = false;
+ ret = rtw_select_and_join_from_scanned_queue23a(pmlmepriv);
+ if (ret == _SUCCESS) {
+ unsigned long e;
+ e = msecs_to_jiffies(MAX_JOIN_TIMEOUT);
+ mod_timer(&pmlmepriv->assoc_timer, jiffies + e);
+ } else if (ret == 2)/* there is no need to wait for join */
+ {
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ rtw_indicate_connect23a(adapter);
+ } else {
+ DBG_8723A("try_to_join, but select scanning queue fail, to_roaming:%d\n", rtw_to_roaming(adapter));
+ if (rtw_to_roaming(adapter) != 0) {
+ if (--pmlmepriv->to_roaming == 0
+ || _SUCCESS != rtw_sitesurvey_cmd23a(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0)
+ ) {
+ rtw_set_roaming(adapter, 0);
+ rtw_free_assoc_resources23a(adapter, 1);
+ rtw_indicate_disconnect23a(adapter);
+ } else {
+ pmlmepriv->to_join = true;
+ }
+ }
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ }
+ }
+ }
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+#ifdef CONFIG_8723AU_P2P
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ p2p_ps_wk_cmd23a(adapter, P2P_PS_SCAN_DONE, 0);
+#endif /* CONFIG_8723AU_P2P */
+
+ rtw_os_xmit_schedule23a(adapter);
+
+ if(pmlmeext->sitesurvey_res.bss_cnt == 0)
+ rtw_hal_sreset_reset23a(adapter);
+
+ rtw_cfg80211_surveydone_event_callback(adapter);
+
+}
+
+void rtw_dummy_event_callback23a(struct rtw_adapter *adapter , u8 *pbuf)
+{
+}
+
+void rtw23a_fwdbg_event_callback(struct rtw_adapter *adapter , u8 *pbuf)
+{
+}
+
+static void free_scanqueue(struct mlme_priv *pmlmepriv)
+{
+ struct wlan_network *pnetwork;
+ struct rtw_queue *scan_queue = &pmlmepriv->scanned_queue;
+ struct list_head *plist, *phead, *ptemp;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+free_scanqueue\n"));
+ spin_lock_bh(&scan_queue->lock);
+
+ phead = get_list_head(scan_queue);
+
+ list_for_each_safe(plist, ptemp, phead) {
+ list_del_init(plist);
+ pnetwork = container_of(plist, struct wlan_network, list);
+ kfree(pnetwork);
+ }
+
+ spin_unlock_bh(&scan_queue->lock);
+
+}
+
+/*
+*rtw_free_assoc_resources23a: the caller has to lock pmlmepriv->lock
+*/
+void rtw_free_assoc_resources23a(struct rtw_adapter *adapter, int lock_scanned_queue)
+{
+ struct wlan_network* pwlan = NULL;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct wlan_network *tgt_network = &pmlmepriv->cur_network;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_free_assoc_resources23a\n"));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("tgt_network->network.MacAddress="MAC_FMT" ssid=%s\n",
+ MAC_ARG(tgt_network->network.MacAddress), tgt_network->network.Ssid.ssid));
+
+ if(check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_AP_STATE))
+ {
+ struct sta_info* psta;
+
+ psta = rtw_get_stainfo23a(&adapter->stapriv, tgt_network->network.MacAddress);
+
+ {
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ rtw_free_stainfo23a(adapter, psta);
+ }
+
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE))
+ {
+ struct sta_info* psta;
+
+ rtw_free_all_stainfo23a(adapter);
+
+ psta = rtw_get_bcmc_stainfo23a(adapter);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ rtw_free_stainfo23a(adapter, psta);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+
+ rtw_init_bcmc_stainfo23a(adapter);
+ }
+
+ if(lock_scanned_queue)
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+
+ pwlan = rtw_find_network23a(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
+ if(pwlan)
+ pwlan->fixed = false;
+ else
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,("rtw_free_assoc_resources23a : pwlan== NULL\n\n"));
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) && (adapter->stapriv.asoc_sta_count == 1))
+ rtw_free_network_nolock(pmlmepriv, pwlan);
+
+ if(lock_scanned_queue)
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+
+ pmlmepriv->key_mask = 0;
+
+}
+
+/*
+*rtw_indicate_connect23a: the caller has to lock pmlmepriv->lock
+*/
+void rtw_indicate_connect23a(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_connect23a\n"));
+
+ pmlmepriv->to_join = false;
+
+ if(!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
+ set_fwstate(pmlmepriv, _FW_LINKED);
+
+ rtw_led_control(padapter, LED_CTL_LINK);
+
+ rtw_os_indicate_connect23a(padapter);
+ }
+
+ rtw_set_roaming(padapter, 0);
+
+ rtw_set_scan_deny(padapter, 3000);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("-rtw_indicate_connect23a: fw_state=0x%08x\n", get_fwstate(pmlmepriv)));
+
+}
+
+/*
+*rtw_indicate_disconnect23a: the caller has to lock pmlmepriv->lock
+*/
+void rtw_indicate_disconnect23a(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_disconnect23a\n"));
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING|WIFI_UNDER_WPS);
+
+ /* DBG_8723A("clear wps when %s\n", __func__); */
+
+ if (rtw_to_roaming(padapter) > 0)
+ _clr_fwstate_(pmlmepriv, _FW_LINKED);
+
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) ||
+ (rtw_to_roaming(padapter) <= 0)) {
+ rtw_os_indicate_disconnect23a(padapter);
+
+ /* set ips_deny_time to avoid enter IPS before LPS leave */
+ padapter->pwrctrlpriv.ips_deny_time =
+ jiffies + msecs_to_jiffies(3000);
+
+ _clr_fwstate_(pmlmepriv, _FW_LINKED);
+
+ rtw_led_control(padapter, LED_CTL_NO_LINK);
+
+ rtw_clear_scan_deny(padapter);
+
+ }
+
+#ifdef CONFIG_8723AU_P2P
+ p2p_ps_wk_cmd23a(padapter, P2P_PS_DISABLE, 1);
+#endif /* CONFIG_8723AU_P2P */
+
+ rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_DISCONNECT, 1);
+
+}
+
+inline void rtw_indicate_scan_done23a(struct rtw_adapter *padapter, bool aborted)
+{
+ rtw_os_indicate_scan_done23a(padapter, aborted);
+}
+
+void rtw_scan_abort23a(struct rtw_adapter *adapter)
+{
+ unsigned long start;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+
+ start = jiffies;
+ pmlmeext->scan_abort = true;
+ while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) &&
+ jiffies_to_msecs(jiffies - start) <= 200) {
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ break;
+
+ DBG_8723A(FUNC_NDEV_FMT"fw_state = _FW_UNDER_SURVEY!\n", FUNC_NDEV_ARG(adapter->pnetdev));
+ msleep(20);
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+ if (!adapter->bDriverStopped && !adapter->bSurpriseRemoved)
+ DBG_8723A(FUNC_NDEV_FMT"waiting for scan_abort time out!\n", FUNC_NDEV_ARG(adapter->pnetdev));
+ rtw_indicate_scan_done23a(adapter, true);
+ }
+ pmlmeext->scan_abort = false;
+}
+
+static struct sta_info *rtw_joinbss_update_stainfo(struct rtw_adapter *padapter, struct wlan_network *pnetwork)
+{
+ int i;
+ struct sta_info *bmc_sta, *psta = NULL;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ psta = rtw_get_stainfo23a(pstapriv, pnetwork->network.MacAddress);
+ if (psta == NULL) {
+ psta = rtw_alloc_stainfo23a(pstapriv, pnetwork->network.MacAddress);
+ }
+
+ if (psta) /* update ptarget_sta */
+ {
+ DBG_8723A("%s\n", __func__);
+
+ psta->aid = pnetwork->join_res;
+ psta->mac_id = 0;
+
+ /* sta mode */
+ rtw_hal_set_odm_var23a(padapter, HAL_ODM_STA_INFO, psta, true);
+
+ /* security related */
+ if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
+ {
+ padapter->securitypriv.binstallGrpkey = false;
+ padapter->securitypriv.busetkipkey = false;
+ padapter->securitypriv.bgrpkey_handshake = false;
+
+ psta->ieee8021x_blocked = true;
+ psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
+
+ memset((u8 *)&psta->dot118021x_UncstKey, 0, sizeof (union Keytype));
+
+ memset((u8 *)&psta->dot11tkiprxmickey, 0, sizeof (union Keytype));
+ memset((u8 *)&psta->dot11tkiptxmickey, 0, sizeof (union Keytype));
+
+ memset((u8 *)&psta->dot11txpn, 0, sizeof (union pn48));
+ memset((u8 *)&psta->dot11rxpn, 0, sizeof (union pn48));
+ }
+
+ /* Commented by Albert 2012/07/21 */
+ /* When doing the WPS, the wps_ie_len won't equal to 0 */
+ /* And the Wi-Fi driver shouldn't allow the data packet to be tramsmitted. */
+ if (padapter->securitypriv.wps_ie_len != 0)
+ {
+ psta->ieee8021x_blocked = true;
+ padapter->securitypriv.wps_ie_len = 0;
+ }
+
+ /* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info */
+ /* if A-MPDU Rx is enabled, reseting rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */
+ /* todo: check if AP can send A-MPDU packets */
+ for (i = 0; i < 16 ; i++)
+ {
+ /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */
+ }
+
+ bmc_sta = rtw_get_bcmc_stainfo23a(padapter);
+ if (bmc_sta)
+ {
+ for (i = 0; i < 16 ; i++)
+ {
+ /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
+ preorder_ctrl = &bmc_sta->recvreorder_ctrl[i];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */
+ }
+ }
+
+ /* misc. */
+ update_sta_info23a(padapter, psta);
+
+ }
+
+ return psta;
+}
+
+/* pnetwork : returns from rtw23a_joinbss_event_cb */
+/* ptarget_wlan: found from scanned_queue */
+static void rtw_joinbss_update_network23a(struct rtw_adapter *padapter, struct wlan_network *ptarget_wlan, struct wlan_network *pnetwork)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+
+ DBG_8723A("%s\n", __func__);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\nfw_state:%x, BSSID:"MAC_FMT"\n"
+ , get_fwstate(pmlmepriv), MAC_ARG(pnetwork->network.MacAddress)));
+
+ /* why not use ptarget_wlan?? */
+ memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length);
+ /* some IEs in pnetwork is wrong, so we should use ptarget_wlan IEs */
+ cur_network->network.IELength = ptarget_wlan->network.IELength;
+ memcpy(&cur_network->network.IEs[0], &ptarget_wlan->network.IEs[0], MAX_IE_SZ);
+
+ cur_network->aid = pnetwork->join_res;
+
+ rtw_set_signal_stat_timer(&padapter->recvpriv);
+ padapter->recvpriv.signal_strength = ptarget_wlan->network.PhyInfo.SignalStrength;
+ padapter->recvpriv.signal_qual = ptarget_wlan->network.PhyInfo.SignalQuality;
+ /* the ptarget_wlan->network.Rssi is raw data, we use ptarget_wlan->network.PhyInfo.SignalStrength instead (has scaled) */
+ padapter->recvpriv.rssi = translate_percentage_to_dbm(ptarget_wlan->network.PhyInfo.SignalStrength);
+ DBG_8723A("%s signal_strength:%3u, rssi:%3d, signal_qual:%3u\n",
+ __func__, padapter->recvpriv.signal_strength,
+ padapter->recvpriv.rssi, padapter->recvpriv.signal_qual);
+ rtw_set_signal_stat_timer(&padapter->recvpriv);
+
+ /* update fw_state will clr _FW_UNDER_LINKING here indirectly */
+ switch (pnetwork->network.InfrastructureMode) {
+ case Ndis802_11Infrastructure:
+ if (pmlmepriv->fw_state&WIFI_UNDER_WPS)
+ pmlmepriv->fw_state = WIFI_STATION_STATE|WIFI_UNDER_WPS;
+ else
+ pmlmepriv->fw_state = WIFI_STATION_STATE;
+ break;
+ case Ndis802_11IBSS:
+ pmlmepriv->fw_state = WIFI_ADHOC_STATE;
+ break;
+ default:
+ pmlmepriv->fw_state = WIFI_NULL_STATE;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Invalid network_mode\n"));
+ break;
+ }
+
+ rtw_update_protection23a(padapter, (cur_network->network.IEs) + sizeof (struct ndis_802_11_fixed_ies),
+ (cur_network->network.IELength));
+
+ rtw_update_ht_cap23a(padapter, cur_network->network.IEs, cur_network->network.IELength);
+}
+
+/* Notes: the fucntion could be > passive_level (the same context as Rx tasklet) */
+/* pnetwork : returns from rtw23a_joinbss_event_cb */
+/* ptarget_wlan: found from scanned_queue */
+/* if join_res > 0, for (fw_state==WIFI_STATION_STATE), we check if "ptarget_sta" & "ptarget_wlan" exist. */
+/* if join_res > 0, for (fw_state==WIFI_ADHOC_STATE), we only check if "ptarget_wlan" exist. */
+/* if join_res > 0, update "cur_network->network" from "pnetwork->network" if (ptarget_wlan !=NULL). */
+
+void rtw_joinbss_event_prehandle23a(struct rtw_adapter *adapter, u8 *pbuf)
+{
+ static u8 retry=0;
+ struct sta_info *ptarget_sta= NULL, *pcur_sta = NULL;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ struct wlan_network *pcur_wlan = NULL, *ptarget_wlan = NULL;
+ unsigned int the_same_macaddr = false;
+
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("joinbss event call back received with res=%d\n", pnetwork->join_res));
+
+ rtw_get_encrypt_decrypt_from_registrypriv23a(adapter);
+
+ if (pmlmepriv->assoc_ssid.ssid_len == 0) {
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,("@@@@@ joinbss event call back for Any SSid\n"));
+ } else {
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,
+ ("@@@@@ rtw23a_joinbss_event_cb for SSid:%s\n",
+ pmlmepriv->assoc_ssid.ssid));
+ }
+
+ if (ether_addr_equal(pnetwork->network.MacAddress,
+ cur_network->network.MacAddress))
+ the_same_macaddr = true;
+ else
+ the_same_macaddr = false;
+
+ pnetwork->network.Length = get_wlan_bssid_ex_sz(&pnetwork->network);
+ if(pnetwork->network.Length > sizeof(struct wlan_bssid_ex))
+ {
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,("\n\n ***joinbss_evt_callback return a wrong bss ***\n\n"));
+ return;
+ }
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("\n rtw23a_joinbss_event_cb !! _enter_critical\n"));
+
+ if(pnetwork->join_res > 0)
+ {
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+ retry = 0;
+ if (check_fwstate(pmlmepriv,_FW_UNDER_LINKING))
+ {
+ /* s1. find ptarget_wlan */
+ if(check_fwstate(pmlmepriv, _FW_LINKED))
+ {
+ if(the_same_macaddr == true)
+ {
+ ptarget_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
+ }
+ else
+ {
+ pcur_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
+ if(pcur_wlan) pcur_wlan->fixed = false;
+
+ pcur_sta = rtw_get_stainfo23a(pstapriv, cur_network->network.MacAddress);
+ if(pcur_sta) {
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ rtw_free_stainfo23a(adapter, pcur_sta);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+ }
+
+ ptarget_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress);
+ if(check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ if(ptarget_wlan) ptarget_wlan->fixed = true;
+ }
+ }
+
+ }
+ else
+ {
+ ptarget_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress);
+ if(check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ if(ptarget_wlan) ptarget_wlan->fixed = true;
+ }
+ }
+
+ /* s2. update cur_network */
+ if(ptarget_wlan)
+ {
+ rtw_joinbss_update_network23a(adapter, ptarget_wlan, pnetwork);
+ }
+ else
+ {
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,("Can't find ptarget_wlan when joinbss_event callback\n"));
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ goto ignore_joinbss_callback;
+ }
+
+ /* s3. find ptarget_sta & update ptarget_sta after update cur_network only for station mode */
+ if(check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)
+ {
+ ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork);
+ if(ptarget_sta==NULL)
+ {
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,("Can't update stainfo when joinbss_event callback\n"));
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ goto ignore_joinbss_callback;
+ }
+ }
+
+ /* s4. indicate connect */
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)
+ {
+ rtw_indicate_connect23a(adapter);
+ } else {
+ /* adhoc mode will rtw_indicate_connect23a when rtw_stassoc_event_callback23a */
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("adhoc mode, fw_state:%x", get_fwstate(pmlmepriv)));
+ }
+
+ /* s5. Cancle assoc_timer */
+ del_timer_sync(&pmlmepriv->assoc_timer);
+
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("Cancle assoc_timer\n"));
+ } else {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("rtw23a_joinbss_event_cb err: fw_state:%x",
+ get_fwstate(pmlmepriv)));
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ goto ignore_joinbss_callback;
+ }
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ } else if(pnetwork->join_res == -4) {
+ rtw_reset_securitypriv23a(adapter);
+ mod_timer(&pmlmepriv->assoc_timer,
+ jiffies + msecs_to_jiffies(1));
+
+ /* rtw_free_assoc_resources23a(adapter, 1); */
+
+ if((check_fwstate(pmlmepriv, _FW_UNDER_LINKING))) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("fail! clear _FW_UNDER_LINKING ^^^fw_state=%x\n",
+ get_fwstate(pmlmepriv)));
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ }
+
+ } else {
+ /* if join_res < 0 (join fails), then try again */
+ mod_timer(&pmlmepriv->assoc_timer,
+ jiffies + msecs_to_jiffies(1));
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ }
+
+ignore_joinbss_callback:
+
+ spin_unlock_bh(&pmlmepriv->lock);
+}
+
+void rtw23a_joinbss_event_cb(struct rtw_adapter *adapter, u8 *pbuf)
+{
+ struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
+
+ mlmeext_joinbss_event_callback23a(adapter, pnetwork->join_res);
+
+ rtw_os_xmit_schedule23a(adapter);
+
+}
+
+/* FOR AP , AD-HOC mode */
+void rtw_stassoc_hw_rpt23a(struct rtw_adapter *adapter, struct sta_info *psta)
+{
+ u16 media_status;
+
+ if (psta == NULL) return;
+
+ media_status = (psta->mac_id<<8)|1; /* MACID|OPMODE:1 connect */
+ rtw_hal_set_hwreg23a(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+}
+
+void rtw_stassoc_event_callback23a(struct rtw_adapter *adapter, u8 *pbuf)
+{
+ struct sta_info *psta;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct stassoc_event *pstassoc = (struct stassoc_event*)pbuf;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ struct wlan_network *ptarget_wlan = NULL;
+
+ if(rtw_access_ctrl23a(adapter, pstassoc->macaddr) == false)
+ return;
+
+#ifdef CONFIG_8723AU_AP_MODE
+ if(check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ {
+ psta = rtw_get_stainfo23a(&adapter->stapriv, pstassoc->macaddr);
+ if (psta) {
+ /* bss_cap_update_on_sta_join23a(adapter, psta); */
+ /* sta_info_update23a(adapter, psta); */
+ ap_sta_info_defer_update23a(adapter, psta);
+
+ rtw_stassoc_hw_rpt23a(adapter,psta);
+ }
+ return;
+ }
+#endif
+ /* for AD-HOC mode */
+ psta = rtw_get_stainfo23a(&adapter->stapriv, pstassoc->macaddr);
+ if (psta != NULL) {
+ /* the sta have been in sta_info_queue => do nothing */
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,("Error: rtw_stassoc_event_callback23a: sta has been in sta_hash_queue\n"));
+ return; /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */
+ }
+
+ psta = rtw_alloc_stainfo23a(&adapter->stapriv, pstassoc->macaddr);
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,("Can't alloc sta_info when rtw_stassoc_event_callback23a\n"));
+ return;
+ }
+
+ /* to do : init sta_info variable */
+ psta->qos_option = 0;
+ psta->mac_id = (uint)pstassoc->cam_id;
+ /* psta->aid = (uint)pstassoc->cam_id; */
+ DBG_8723A("%s\n",__func__);
+ /* for ad-hoc mode */
+ rtw_hal_set_odm_var23a(adapter,HAL_ODM_STA_INFO,psta,true);
+
+ rtw_stassoc_hw_rpt23a(adapter,psta);
+
+ if(adapter->securitypriv.dot11AuthAlgrthm==dot11AuthAlgrthm_8021X)
+ psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
+
+ psta->ieee8021x_blocked = false;
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if ( (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)==true ) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)==true ) )
+ {
+ if(adapter->stapriv.asoc_sta_count== 2)
+ {
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+ ptarget_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
+ if(ptarget_wlan) ptarget_wlan->fixed = true;
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
+ rtw_indicate_connect23a(adapter);
+ }
+ }
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+ mlmeext_sta_add_event_callback23a(adapter, psta);
+}
+
+void rtw_stadel_event_callback23a(struct rtw_adapter *adapter, u8 *pbuf)
+{
+ int mac_id=-1;
+ struct sta_info *psta;
+ struct wlan_network* pwlan = NULL;
+ struct wlan_bssid_ex *pdev_network=NULL;
+ u8* pibss = NULL;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct stadel_event *pstadel = (struct stadel_event*)pbuf;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct wlan_network *tgt_network = &pmlmepriv->cur_network;
+
+ psta = rtw_get_stainfo23a(&adapter->stapriv, pstadel->macaddr);
+ if(psta)
+ mac_id = psta->mac_id;
+ else
+ mac_id = pstadel->mac_id;
+
+ DBG_8723A("%s(mac_id=%d)=" MAC_FMT "\n", __func__, mac_id, MAC_ARG(pstadel->macaddr));
+
+ if(mac_id>=0) {
+ u16 media_status;
+ media_status = (mac_id<<8)|0; /* MACID|OPMODE:0 means disconnect */
+ /* for STA,AP,ADHOC mode, report disconnect stauts to FW */
+ rtw_hal_set_hwreg23a(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ {
+ return;
+ }
+
+ mlmeext_sta_del_event_callback23a(adapter);
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ {
+ if (rtw_to_roaming(adapter) > 0)
+ pmlmepriv->to_roaming--; /* this stadel_event is caused by roaming, decrease to_roaming */
+ else if (rtw_to_roaming(adapter) == 0)
+ rtw_set_roaming(adapter, adapter->registrypriv.max_roaming_times);
+ if (*((unsigned short *)(pstadel->rsvd)) != WLAN_REASON_EXPIRATION_CHK)
+ rtw_set_roaming(adapter, 0); /* don't roam */
+
+ rtw_free_uc_swdec_pending_queue23a(adapter);
+
+ rtw_free_assoc_resources23a(adapter, 1);
+ rtw_indicate_disconnect23a(adapter);
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+ /* remove the network entry in scanned_queue */
+ pwlan = rtw_find_network23a(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
+ if (pwlan) {
+ pwlan->fixed = false;
+ rtw_free_network_nolock(pmlmepriv, pwlan);
+ }
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+
+ _rtw23a_roaming(adapter, tgt_network);
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))
+ {
+
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ rtw_free_stainfo23a(adapter, psta);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+
+ if (adapter->stapriv.asoc_sta_count == 1) /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
+ {
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+ /* free old ibss network */
+ /* pwlan = rtw_find_network23a(&pmlmepriv->scanned_queue, pstadel->macaddr); */
+ pwlan = rtw_find_network23a(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
+ if (pwlan)
+ {
+ pwlan->fixed = false;
+ rtw_free_network_nolock(pmlmepriv, pwlan);
+ }
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ /* re-create ibss */
+ pdev_network = &adapter->registrypriv.dev_network;
+ pibss = adapter->registrypriv.dev_network.MacAddress;
+
+ memcpy(pdev_network, &tgt_network->network, get_wlan_bssid_ex_sz(&tgt_network->network));
+
+ memset(&pdev_network->Ssid, 0,
+ sizeof(struct cfg80211_ssid));
+ memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid,
+ sizeof(struct cfg80211_ssid));
+
+ rtw_update_registrypriv_dev_network23a(adapter);
+
+ rtw_generate_random_ibss23a(pibss);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))
+ {
+ set_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+
+ if (rtw_createbss_cmd23a(adapter)!= _SUCCESS)
+ {
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error =>stadel_event_callback: rtw_createbss_cmd23a status FAIL***\n "));
+
+ }
+
+ }
+
+ }
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+}
+
+void rtw_cpwm_event_callback23a(struct rtw_adapter *padapter, u8 *pbuf)
+{
+
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,("+rtw_cpwm_event_callback23a !!!\n"));
+
+}
+
+/*
+* rtw23a_join_to_handler - Timeout/faliure handler for CMD JoinBss
+* @adapter: pointer to _adapter structure
+*/
+void rtw23a_join_to_handler (unsigned long data)
+{
+ struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ int do_join_r;
+
+ DBG_8723A("%s, fw_state=%x\n", __func__, get_fwstate(pmlmepriv));
+
+ if(adapter->bDriverStopped ||adapter->bSurpriseRemoved)
+ return;
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if (rtw_to_roaming(adapter) > 0) { /* join timeout caused by roaming */
+ while(1) {
+ pmlmepriv->to_roaming--;
+ if (rtw_to_roaming(adapter) != 0) { /* try another */
+ DBG_8723A("%s try another roaming\n", __func__);
+ if (_SUCCESS!= (do_join_r = rtw_do_join23a(adapter))) {
+ DBG_8723A("%s roaming do_join return %d\n", __func__ , do_join_r);
+ continue;
+ }
+ break;
+ } else {
+ DBG_8723A("%s We've try roaming but fail\n", __func__);
+ rtw_indicate_disconnect23a(adapter);
+ break;
+ }
+ }
+ } else {
+ rtw_indicate_disconnect23a(adapter);
+ free_scanqueue(pmlmepriv);/* */
+
+ /* indicate disconnect for the case that join_timeout and check_fwstate != FW_LINKED */
+ rtw_cfg80211_indicate_disconnect(adapter);
+ }
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+}
+
+/*
+* rtw_scan_timeout_handler23a - Timeout/Faliure handler for CMD SiteSurvey
+* @data: pointer to _adapter structure
+*/
+void rtw_scan_timeout_handler23a(unsigned long data)
+{
+ struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ DBG_8723A(FUNC_ADPT_FMT" fw_state =%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+ rtw_indicate_scan_done23a(adapter, true);
+}
+
+static void rtw_auto_scan_handler(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ /* auto site survey per 60sec */
+ if (pmlmepriv->scan_interval > 0) {
+ pmlmepriv->scan_interval--;
+ if (pmlmepriv->scan_interval == 0) {
+ DBG_8723A("%s\n", __func__);
+ rtw_set_802_11_bssid23a_list_scan(padapter, NULL, 0);
+ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
+ }
+ }
+}
+
+void rtw_dynamic_check_timer_handler(unsigned long data)
+{
+ struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+
+ if (adapter->hw_init_completed == false)
+ goto out;
+
+ if ((adapter->bDriverStopped == true)||(adapter->bSurpriseRemoved == true))
+ goto out;
+
+ if (adapter->net_closed == true)
+ goto out;
+
+ rtw_dynamic_chk_wk_cmd23a(adapter);
+
+ if (pregistrypriv->wifi_spec == 1)
+ {
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+#endif
+ {
+ /* auto site survey */
+ rtw_auto_scan_handler(adapter);
+ }
+ }
+out:
+ mod_timer(&adapter->mlmepriv.dynamic_chk_timer,
+ jiffies + msecs_to_jiffies(2000));
+}
+
+inline bool rtw_is_scan_deny(struct rtw_adapter *adapter)
+{
+ struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+ return (atomic_read(&mlmepriv->set_scan_deny) != 0) ? true : false;
+}
+
+void rtw_clear_scan_deny(struct rtw_adapter *adapter)
+{
+ struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+ atomic_set(&mlmepriv->set_scan_deny, 0);
+ if (0)
+ DBG_8723A(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(adapter));
+}
+
+void rtw_set_scan_deny_timer_hdl(unsigned long data)
+{
+ struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+ rtw_clear_scan_deny(adapter);
+}
+
+void rtw_set_scan_deny(struct rtw_adapter *adapter, u32 ms)
+{
+ struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+
+ if (0)
+ DBG_8723A(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(adapter));
+ atomic_set(&mlmepriv->set_scan_deny, 1);
+ mod_timer(&mlmepriv->set_scan_deny_timer,
+ jiffies + msecs_to_jiffies(ms));
+
+}
+
+#if defined(IEEE80211_SCAN_RESULT_EXPIRE)
+#define RTW_SCAN_RESULT_EXPIRE IEEE80211_SCAN_RESULT_EXPIRE/HZ*1000 -1000 /* 3000 -1000 */
+#else
+#define RTW_SCAN_RESULT_EXPIRE 2000
+#endif
+
+/*
+* Select a new join candidate from the original @param candidate and @param competitor
+* @return true: candidate is updated
+* @return false: candidate is not updated
+*/
+static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
+ , struct wlan_network **candidate, struct wlan_network *competitor)
+{
+ int updated = false;
+ struct rtw_adapter *adapter = container_of(pmlmepriv, struct rtw_adapter, mlmepriv);
+
+ /* check bssid, if needed */
+ if (pmlmepriv->assoc_by_bssid == true) {
+ if (!ether_addr_equal(competitor->network.MacAddress,
+ pmlmepriv->assoc_bssid))
+ goto exit;
+ }
+
+ /* check ssid, if needed */
+ if (pmlmepriv->assoc_ssid.ssid_len) {
+ if (competitor->network.Ssid.ssid_len !=
+ pmlmepriv->assoc_ssid.ssid_len ||
+ memcmp(competitor->network.Ssid.ssid,
+ pmlmepriv->assoc_ssid.ssid,
+ pmlmepriv->assoc_ssid.ssid_len))
+ goto exit;
+ }
+
+ if (rtw_is_desired_network(adapter, competitor) == false)
+ goto exit;
+
+ if (rtw_to_roaming(adapter) > 0) {
+ unsigned int passed;
+
+ passed = jiffies_to_msecs(jiffies - competitor->last_scanned);
+ if (passed >= RTW_SCAN_RESULT_EXPIRE ||
+ is_same_ess(&competitor->network,
+ &pmlmepriv->cur_network.network) == false)
+ goto exit;
+ }
+
+ if (*candidate == NULL ||(*candidate)->network.Rssi<competitor->network.Rssi) {
+ *candidate = competitor;
+ updated = true;
+ }
+
+ if (updated) {
+ DBG_8723A("[by_bssid:%u][assoc_ssid:%s][to_roaming:%u] new candidate: %s("MAC_FMT") rssi:%d\n",
+ pmlmepriv->assoc_by_bssid,
+ pmlmepriv->assoc_ssid.ssid,
+ rtw_to_roaming(adapter),
+ (*candidate)->network.Ssid.ssid,
+ MAC_ARG((*candidate)->network.MacAddress),
+ (int)(*candidate)->network.Rssi);
+ }
+
+exit:
+ return updated;
+}
+
+/*
+Calling context:
+The caller of the sub-routine will be in critical section...
+
+The caller must hold the following spinlock
+
+pmlmepriv->lock
+
+*/
+
+int rtw_select_and_join_from_scanned_queue23a(struct mlme_priv *pmlmepriv)
+{
+ int ret;
+ struct list_head *phead, *plist, *ptmp;
+ struct rtw_adapter *adapter;
+ struct rtw_queue *queue = &pmlmepriv->scanned_queue;
+ struct wlan_network *pnetwork = NULL;
+ struct wlan_network *candidate = NULL;
+
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+ phead = get_list_head(queue);
+ adapter = pmlmepriv->nic_hdl;
+
+ list_for_each_safe(plist, ptmp, phead) {
+ pnetwork = container_of(plist, struct wlan_network, list);
+ if (!pnetwork) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("%s return _FAIL:(pnetwork == NULL)\n",
+ __func__));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
+ }
+
+ if (!candidate) {
+ DBG_8723A("%s: return _FAIL(candidate == NULL)\n", __func__);
+ ret = _FAIL;
+ goto exit;
+ } else {
+ DBG_8723A("%s: candidate: %s("MAC_FMT", ch:%u)\n", __func__,
+ candidate->network.Ssid.ssid,
+ MAC_ARG(candidate->network.MacAddress),
+ candidate->network.Configuration.DSConfig);
+ }
+
+ /* check for situation of _FW_LINKED */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ DBG_8723A("%s: _FW_LINKED while ask_for_joinbss!!!\n",
+ __func__);
+
+ rtw_disassoc_cmd23a(adapter, 0, true);
+ rtw_indicate_disconnect23a(adapter);
+ rtw_free_assoc_resources23a(adapter, 0);
+ }
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+ ret = rtw_joinbss_cmd23a(adapter, candidate);
+
+exit:
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+
+ return ret;
+}
+
+int rtw_set_auth23a(struct rtw_adapter * adapter,
+ struct security_priv *psecuritypriv)
+{
+ struct cmd_obj* pcmd;
+ struct setauth_parm *psetauthparm;
+ struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
+ int res = _SUCCESS;
+
+ pcmd = (struct cmd_obj *)kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ if (!pcmd) {
+ res = _FAIL; /* try again */
+ goto exit;
+ }
+
+ psetauthparm = (struct setauth_parm*)
+ kzalloc(sizeof(struct setauth_parm), GFP_KERNEL);
+ if (!psetauthparm) {
+ kfree(pcmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetauthparm->mode = (unsigned char)psecuritypriv->dot11AuthAlgrthm;
+
+ pcmd->cmdcode = _SetAuth_CMD_;
+ pcmd->parmbuf = (unsigned char *)psetauthparm;
+ pcmd->cmdsz = (sizeof(struct setauth_parm));
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ INIT_LIST_HEAD(&pcmd->list);
+
+ RT_TRACE(_module_rtl871x_mlme_c_,_drv_err_,
+ ("after enqueue set_auth_cmd, auth_mode=%x\n",
+ psecuritypriv->dot11AuthAlgrthm));
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, pcmd);
+
+exit:
+
+ return res;
+}
+
+int rtw_set_key23a(struct rtw_adapter *adapter,
+ struct security_priv *psecuritypriv, int keyid, u8 set_tx)
+{
+ u8 keylen;
+ struct cmd_obj *pcmd;
+ struct setkey_parm *psetkeyparm;
+ struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ int res = _SUCCESS;
+
+ pcmd = (struct cmd_obj *)kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ if (!pcmd) {
+ res = _FAIL; /* try again */
+ goto exit;
+ }
+ psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL);
+ if (!psetkeyparm) {
+ kfree(pcmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ psetkeyparm->algorithm = (unsigned char)
+ psecuritypriv->dot118021XGrpPrivacy;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n rtw_set_key23a: psetkeyparm->algorithm = (unsigned "
+ "char)psecuritypriv->dot118021XGrpPrivacy =%d\n",
+ psetkeyparm->algorithm));
+ } else {
+ psetkeyparm->algorithm = (u8)psecuritypriv->dot11PrivacyAlgrthm;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n rtw_set_key23a: psetkeyparm->algorithm = (u8)"
+ "psecuritypriv->dot11PrivacyAlgrthm =%d\n",
+ psetkeyparm->algorithm));
+ }
+ psetkeyparm->keyid = (u8)keyid;/* 0~3 */
+ psetkeyparm->set_tx = set_tx;
+ if (is_wep_enc(psetkeyparm->algorithm))
+ pmlmepriv->key_mask |= CHKBIT(psetkeyparm->keyid);
+
+ DBG_8723A("==> rtw_set_key23a algorithm(%x), keyid(%x), key_mask(%x)\n",
+ psetkeyparm->algorithm, psetkeyparm->keyid,
+ pmlmepriv->key_mask);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n rtw_set_key23a: psetkeyparm->algorithm =%d psetkeyparm->"
+ "keyid = (u8)keyid =%d\n", psetkeyparm->algorithm, keyid));
+
+ switch (psetkeyparm->algorithm) {
+ case _WEP40_:
+ keylen = 5;
+ memcpy(&psetkeyparm->key[0],
+ &psecuritypriv->dot11DefKey[keyid].skey[0], keylen);
+ break;
+ case _WEP104_:
+ keylen = 13;
+ memcpy(&psetkeyparm->key[0],
+ &psecuritypriv->dot11DefKey[keyid].skey[0], keylen);
+ break;
+ case _TKIP_:
+ keylen = 16;
+ memcpy(&psetkeyparm->key,
+ &psecuritypriv->dot118021XGrpKey[keyid], keylen);
+ psetkeyparm->grpkey = 1;
+ break;
+ case _AES_:
+ keylen = 16;
+ memcpy(&psetkeyparm->key,
+ &psecuritypriv->dot118021XGrpKey[keyid], keylen);
+ psetkeyparm->grpkey = 1;
+ break;
+ default:
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n rtw_set_key23a:psecuritypriv->dot11PrivacyAlgrthm = "
+ "%x (must be 1 or 2 or 4 or 5)\n",
+ psecuritypriv->dot11PrivacyAlgrthm));
+ res = _FAIL;
+ kfree(pcmd);
+ kfree(psetkeyparm);
+ goto exit;
+ }
+
+ pcmd->cmdcode = _SetKey_CMD_;
+ pcmd->parmbuf = (u8 *)psetkeyparm;
+ pcmd->cmdsz = (sizeof(struct setkey_parm));
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ INIT_LIST_HEAD(&pcmd->list);
+
+ /* sema_init(&pcmd->cmd_sem, 0); */
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, pcmd);
+
+exit:
+
+ return res;
+}
+
+/* adjust IEs for rtw_joinbss_cmd23a in WMM */
+int rtw_restruct_wmm_ie23a(struct rtw_adapter *adapter, u8 *in_ie,
+ u8 *out_ie, uint in_len, uint initial_out_len)
+{
+ unsigned int ielength = 0;
+ unsigned int i, j;
+
+ i = 12; /* after the fixed IE */
+ while(i < in_len) {
+ ielength = initial_out_len;
+
+ /* WMM element ID and OUI */
+ if (in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 &&
+ in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 &&
+ in_ie[i + 5] == 0x02 && i+5 < in_len) {
+
+ /* Append WMM IE to the last index of out_ie */
+ for (j = i; j < i + 9; j++) {
+ out_ie[ielength] = in_ie[j];
+ ielength++;
+ }
+ out_ie[initial_out_len + 1] = 0x07;
+ out_ie[initial_out_len + 6] = 0x00;
+ out_ie[initial_out_len + 8] = 0x00;
+
+ break;
+ }
+
+ i += (in_ie[i + 1] + 2); /* to the next IE element */
+ }
+
+ return ielength;
+}
+
+/* */
+/* Ported from 8185: IsInPreAuthKeyList().
+ (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
+/* Added by Annie, 2006-05-07. */
+/* */
+/* Search by BSSID, */
+/* Return Value: */
+/* -1 :if there is no pre-auth key in the table */
+/* >= 0 :if there is pre-auth key, and return the entry id */
+/* */
+/* */
+
+static int SecIsInPMKIDList(struct rtw_adapter *Adapter, u8 *bssid)
+{
+ struct security_priv *psecuritypriv = &Adapter->securitypriv;
+ int i = 0;
+
+ do {
+ if (psecuritypriv->PMKIDList[i].bUsed &&
+ ether_addr_equal(psecuritypriv->PMKIDList[i].Bssid, bssid)) {
+ break;
+ } else {
+ i++;
+ /* continue; */
+ }
+ } while(i < NUM_PMKID_CACHE);
+
+ if (i == NUM_PMKID_CACHE) {
+ i = -1;/* Could not find. */
+ } else {
+ /* There is one Pre-Authentication Key for
+ the specific BSSID. */
+ }
+
+ return i;
+}
+
+/* */
+/* Check the RSN IE length */
+/* If the RSN IE length <= 20, the RSN IE didn't include
+ the PMKID information */
+/* 0-11th element in the array are the fixed IE */
+/* 12th element in the array is the IE */
+/* 13th element in the array is the IE length */
+/* */
+
+static int rtw_append_pmkid(struct rtw_adapter *Adapter, int iEntry,
+ u8 *ie, uint ie_len)
+{
+ struct security_priv *psecuritypriv = &Adapter->securitypriv;
+
+ if (ie[13] <= 20) {
+ /* The RSN IE didn't include the PMK ID,
+ append the PMK information */
+ ie[ie_len] = 1;
+ ie_len++;
+ ie[ie_len] = 0; /* PMKID count = 0x0100 */
+ ie_len++;
+ memcpy(&ie[ie_len],
+ &psecuritypriv->PMKIDList[iEntry].PMKID, 16);
+
+ ie_len += 16;
+ ie[13] += 18;/* PMKID length = 2+16 */
+ }
+ return ie_len;
+}
+int rtw_restruct_sec_ie23a(struct rtw_adapter *adapter, u8 *in_ie, u8 *out_ie,
+ uint in_len)
+{
+ u8 authmode;
+ uint ielength;
+ int iEntry;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ uint ndisauthmode = psecuritypriv->ndisauthtype;
+ uint ndissecuritytype = psecuritypriv->ndisencryptstatus;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("+rtw_restruct_sec_ie23a: ndisauthmode=%d ndissecuritytype=%d\n",
+ ndisauthmode, ndissecuritytype));
+
+ /* copy fixed ie only */
+ memcpy(out_ie, in_ie, 12);
+ ielength = 12;
+ if ((ndisauthmode==Ndis802_11AuthModeWPA) ||
+ (ndisauthmode==Ndis802_11AuthModeWPAPSK))
+ authmode=_WPA_IE_ID_;
+ if ((ndisauthmode==Ndis802_11AuthModeWPA2) ||
+ (ndisauthmode==Ndis802_11AuthModeWPA2PSK))
+ authmode=_WPA2_IE_ID_;
+
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
+ memcpy(out_ie + ielength, psecuritypriv->wps_ie,
+ psecuritypriv->wps_ie_len);
+
+ ielength += psecuritypriv->wps_ie_len;
+ } else if ((authmode==_WPA_IE_ID_) || (authmode==_WPA2_IE_ID_)) {
+ /* copy RSN or SSN */
+ memcpy(&out_ie[ielength], &psecuritypriv->supplicant_ie[0],
+ psecuritypriv->supplicant_ie[1] + 2);
+ ielength += psecuritypriv->supplicant_ie[1] + 2;
+ rtw_report_sec_ie23a(adapter, authmode,
+ psecuritypriv->supplicant_ie);
+ }
+
+ iEntry = SecIsInPMKIDList(adapter, pmlmepriv->assoc_bssid);
+ if (iEntry < 0) {
+ return ielength;
+ } else {
+ if (authmode == _WPA2_IE_ID_) {
+ ielength=rtw_append_pmkid(adapter, iEntry,
+ out_ie, ielength);
+ }
+ }
+
+ return ielength;
+}
+
+void rtw_init_registrypriv_dev_network23a(struct rtw_adapter* adapter)
+{
+ struct registry_priv* pregistrypriv = &adapter->registrypriv;
+ struct eeprom_priv* peepriv = &adapter->eeprompriv;
+ struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
+ u8 *myhwaddr = myid(peepriv);
+
+ ether_addr_copy(pdev_network->MacAddress, myhwaddr);
+
+ memcpy(&pdev_network->Ssid, &pregistrypriv->ssid,
+ sizeof(struct cfg80211_ssid));
+
+ pdev_network->Configuration.Length=sizeof(struct ndis_802_11_config);
+ pdev_network->Configuration.BeaconPeriod = 100;
+ pdev_network->Configuration.FHConfig.Length = 0;
+ pdev_network->Configuration.FHConfig.HopPattern = 0;
+ pdev_network->Configuration.FHConfig.HopSet = 0;
+ pdev_network->Configuration.FHConfig.DwellTime = 0;
+
+}
+
+void rtw_update_registrypriv_dev_network23a(struct rtw_adapter* adapter)
+{
+ int sz = 0;
+ struct registry_priv* pregistrypriv = &adapter->registrypriv;
+ struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ struct wlan_network *cur_network = &adapter->mlmepriv.cur_network;
+ /* struct xmit_priv *pxmitpriv = &adapter->xmitpriv; */
+
+ pdev_network->Privacy =
+ (psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0);
+
+ pdev_network->Rssi = 0;
+
+ switch (pregistrypriv->wireless_mode)
+ {
+ case WIRELESS_11B:
+ pdev_network->NetworkTypeInUse = Ndis802_11DS;
+ break;
+ case WIRELESS_11G:
+ case WIRELESS_11BG:
+ case WIRELESS_11_24N:
+ case WIRELESS_11G_24N:
+ case WIRELESS_11BG_24N:
+ pdev_network->NetworkTypeInUse = Ndis802_11OFDM24;
+ break;
+ case WIRELESS_11A:
+ case WIRELESS_11A_5N:
+ pdev_network->NetworkTypeInUse = Ndis802_11OFDM5;
+ break;
+ case WIRELESS_11ABGN:
+ if (pregistrypriv->channel > 14)
+ pdev_network->NetworkTypeInUse = Ndis802_11OFDM5;
+ else
+ pdev_network->NetworkTypeInUse = Ndis802_11OFDM24;
+ break;
+ default :
+ /* TODO */
+ break;
+ }
+
+ pdev_network->Configuration.DSConfig = pregistrypriv->channel;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("pregistrypriv->channel =%d, pdev_network->Configuration."
+ "DSConfig = 0x%x\n", pregistrypriv->channel,
+ pdev_network->Configuration.DSConfig));
+
+ if (cur_network->network.InfrastructureMode == Ndis802_11IBSS)
+ pdev_network->Configuration.ATIMWindow = 0;
+
+ pdev_network->InfrastructureMode =
+ cur_network->network.InfrastructureMode;
+
+ /* 1. Supported rates */
+ /* 2. IE */
+
+ sz = rtw_generate_ie23a(pregistrypriv);
+
+ pdev_network->IELength = sz;
+
+ pdev_network->Length =
+ get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
+
+ /* notes: translate IELength & Length after assign the
+ Length to cmdsz in createbss_cmd(); */
+ /* pdev_network->IELength = cpu_to_le32(sz); */
+
+}
+
+void rtw_get_encrypt_decrypt_from_registrypriv23a(struct rtw_adapter* adapter)
+{
+
+}
+
+/* the fucntion is at passive_level */
+void rtw_joinbss_reset23a(struct rtw_adapter *padapter)
+{
+ u8 threshold;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ /* todo: if you want to do something io/reg/hw setting
+ before join_bss, please add code here */
+
+ pmlmepriv->num_FortyMHzIntolerant = 0;
+
+ pmlmepriv->num_sta_no_ht = 0;
+
+ phtpriv->ampdu_enable = false;/* reset to disabled */
+
+ /* TH = 1 => means that invalidate usb rx aggregation */
+ /* TH = 0 => means that validate usb rx aggregation, use init value. */
+ if (phtpriv->ht_option) {
+ if (padapter->registrypriv.wifi_spec == 1)
+ threshold = 1;
+ else
+ threshold = 0;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_RXDMA_AGG_PG_TH,
+ (u8 *)(&threshold));
+ } else {
+ threshold = 1;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_RXDMA_AGG_PG_TH,
+ (u8 *)(&threshold));
+ }
+}
+
+/* the fucntion is >= passive_level */
+unsigned int rtw_restructure_ht_ie23a(struct rtw_adapter *padapter, u8 *in_ie,
+ u8 *out_ie, uint in_len, uint *pout_len)
+{
+ u32 ielen, out_len;
+ int max_rx_ampdu_factor;
+ unsigned char *p, *pframe;
+ struct ieee80211_ht_cap ht_capie;
+ unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ phtpriv->ht_option = false;
+
+ p = rtw_get_ie23a(in_ie + 12, _HT_CAPABILITY_IE_, &ielen, in_len - 12);
+
+ if (p && ielen > 0) {
+ u32 rx_packet_offset, max_recvbuf_sz;
+ if (pqospriv->qos_option == 0) {
+ out_len = *pout_len;
+ pframe = rtw_set_ie23a(out_ie + out_len,
+ _VENDOR_SPECIFIC_IE_,
+ _WMM_IE_Length_, WMM_IE, pout_len);
+
+ pqospriv->qos_option = 1;
+ }
+
+ out_len = *pout_len;
+
+ memset(&ht_capie, 0, sizeof(struct ieee80211_ht_cap));
+
+ ht_capie.cap_info = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_DSSSCCK40;
+
+ rtw_hal_get_def_var23a(padapter, HAL_DEF_RX_PACKET_OFFSET,
+ &rx_packet_offset);
+ rtw_hal_get_def_var23a(padapter, HAL_DEF_MAX_RECVBUF_SZ,
+ &max_recvbuf_sz);
+
+ rtw_hal_get_def_var23a(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR,
+ &max_rx_ampdu_factor);
+ ht_capie.ampdu_params_info = max_rx_ampdu_factor & 0x03;
+
+ if (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
+ ht_capie.ampdu_params_info |=
+ (IEEE80211_HT_AMPDU_PARM_DENSITY& (0x07 << 2));
+ else
+ ht_capie.ampdu_params_info |=
+ (IEEE80211_HT_AMPDU_PARM_DENSITY & 0x00);
+
+ pframe = rtw_set_ie23a(out_ie + out_len, _HT_CAPABILITY_IE_,
+ sizeof(struct ieee80211_ht_cap),
+ (unsigned char*)&ht_capie, pout_len);
+
+ phtpriv->ht_option = true;
+
+ p = rtw_get_ie23a(in_ie + 12, _HT_ADD_INFO_IE_, &ielen, in_len-12);
+ if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
+ out_len = *pout_len;
+ pframe = rtw_set_ie23a(out_ie + out_len, _HT_ADD_INFO_IE_,
+ ielen, p + 2 , pout_len);
+ }
+ }
+
+ return phtpriv->ht_option;
+}
+
+/* the fucntion is > passive_level (in critical_section) */
+void rtw_update_ht_cap23a(struct rtw_adapter *padapter, u8 *pie, uint ie_len)
+{
+ u8 *p, max_ampdu_sz;
+ int len;
+ /* struct sta_info *bmc_sta, *psta; */
+ struct ieee80211_ht_cap *pht_capie;
+ struct ieee80211_ht_addt_info *pht_addtinfo;
+ /* struct recv_reorder_ctrl *preorder_ctrl; */
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ /* struct recv_priv *precvpriv = &padapter->recvpriv; */
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ /* struct wlan_network *pcur_network = &pmlmepriv->cur_network;; */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if (!phtpriv->ht_option)
+ return;
+
+ if ((!pmlmeinfo->HT_info_enable) || (!pmlmeinfo->HT_caps_enable))
+ return;
+
+ DBG_8723A("+rtw_update_ht_cap23a()\n");
+
+ /* maybe needs check if ap supports rx ampdu. */
+ if ((phtpriv->ampdu_enable == false) && (pregistrypriv->ampdu_enable == 1)) {
+ if (pregistrypriv->wifi_spec == 1)
+ phtpriv->ampdu_enable = false;
+ else
+ phtpriv->ampdu_enable = true;
+ } else if (pregistrypriv->ampdu_enable == 2) {
+ phtpriv->ampdu_enable = true;
+ }
+
+ /* check Max Rx A-MPDU Size */
+ len = 0;
+ p = rtw_get_ie23a(pie+sizeof (struct ndis_802_11_fixed_ies), _HT_CAPABILITY_IE_, &len, ie_len-sizeof (struct ndis_802_11_fixed_ies));
+ if (p && len > 0) {
+ pht_capie = (struct ieee80211_ht_cap *)(p+2);
+ max_ampdu_sz = (pht_capie->ampdu_params_info & IEEE80211_HT_AMPDU_PARM_FACTOR);
+ max_ampdu_sz = 1 << (max_ampdu_sz+3); /* max_ampdu_sz (kbytes); */
+
+ /* DBG_8723A("rtw_update_ht_cap23a(): max_ampdu_sz =%d\n", max_ampdu_sz); */
+ phtpriv->rx_ampdu_maxlen = max_ampdu_sz;
+
+ }
+
+ len = 0;
+ p = rtw_get_ie23a(pie+sizeof (struct ndis_802_11_fixed_ies), _HT_ADD_INFO_IE_, &len, ie_len-sizeof (struct ndis_802_11_fixed_ies));
+ if (p && len>0)
+ {
+ pht_addtinfo = (struct ieee80211_ht_addt_info *)(p+2);
+ /* todo: */
+ }
+
+ /* update cur_bwmode & cur_ch_offset */
+ if ((pregistrypriv->cbw40_enable) &&
+ (pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info & BIT(1)) &&
+ (pmlmeinfo->HT_info.infos[0] & BIT(2)))
+ {
+ int i;
+ u8 rf_type;
+
+ padapter->HalFunc.GetHwRegHandler(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ /* update the MCS rates */
+ for (i = 0; i < 16; i++)
+ {
+ if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R23A[i];
+ else
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R23A[i];
+ }
+ /* switch to the 40M Hz mode accoring to the AP */
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
+ switch ((pmlmeinfo->HT_info.infos[0] & 0x3))
+ {
+ case HT_EXTCHNL_OFFSET_UPPER:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+
+ case HT_EXTCHNL_OFFSET_LOWER:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+
+ default:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+ }
+
+ /* */
+ /* Config SM Power Save setting */
+ /* */
+ pmlmeinfo->SM_PS = (pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info & 0x0C) >> 2;
+ if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
+ DBG_8723A("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
+
+ /* */
+ /* Config current HT Protection mode. */
+ /* */
+ pmlmeinfo->HT_protection = pmlmeinfo->HT_info.infos[1] & 0x3;
+}
+
+void rtw_issue_addbareq_cmd23a(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ u8 issued;
+ int priority;
+ struct sta_info *psta = NULL;
+ struct ht_priv *phtpriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ s32 bmcst = is_multicast_ether_addr(pattrib->ra);
+
+ if (bmcst || (padapter->mlmepriv.LinkDetectInfo.NumTxOkInPeriod<100))
+ return;
+
+ priority = pattrib->priority;
+
+ if (pattrib->psta)
+ psta = pattrib->psta;
+ else
+ {
+ DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
+ psta = rtw_get_stainfo23a(&padapter->stapriv, pattrib->ra);
+ }
+
+ if (psta == NULL)
+ {
+ DBG_8723A("%s, psta == NUL\n", __func__);
+ return;
+ }
+
+ if (!(psta->state &_FW_LINKED))
+ {
+ DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
+ return;
+ }
+
+ phtpriv = &psta->htpriv;
+
+ if ((phtpriv->ht_option == true) && (phtpriv->ampdu_enable == true))
+ {
+ issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
+ issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
+
+ if (0 == issued)
+ {
+ DBG_8723A("rtw_issue_addbareq_cmd23a, p =%d\n", priority);
+ psta->htpriv.candidate_tid_bitmap |= CHKBIT((u8)priority);
+ rtw_addbareq_cmd23a(padapter, (u8) priority, pattrib->ra);
+ }
+ }
+}
+
+inline void rtw_set_roaming(struct rtw_adapter *adapter, u8 to_roaming)
+{
+ if (to_roaming == 0)
+ adapter->mlmepriv.to_join = false;
+ adapter->mlmepriv.to_roaming = to_roaming;
+}
+
+inline u8 rtw_to_roaming(struct rtw_adapter *adapter)
+{
+ return adapter->mlmepriv.to_roaming;
+}
+
+void rtw23a_roaming(struct rtw_adapter *padapter, struct wlan_network *tgt_network)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ spin_lock_bh(&pmlmepriv->lock);
+ _rtw23a_roaming(padapter, tgt_network);
+ spin_unlock_bh(&pmlmepriv->lock);
+}
+void _rtw23a_roaming(struct rtw_adapter *padapter, struct wlan_network *tgt_network)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *pnetwork;
+ int do_join_r;
+
+ if (tgt_network != NULL)
+ pnetwork = tgt_network;
+ else
+ pnetwork = &pmlmepriv->cur_network;
+
+ if (0 < rtw_to_roaming(padapter)) {
+ DBG_8723A("roaming from %s("MAC_FMT"), length:%d\n",
+ pnetwork->network.Ssid.ssid,
+ MAC_ARG(pnetwork->network.MacAddress),
+ pnetwork->network.Ssid.ssid_len);
+ memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.Ssid,
+ sizeof(struct cfg80211_ssid));
+
+ pmlmepriv->assoc_by_bssid = false;
+
+ while(1) {
+ if (_SUCCESS == (do_join_r = rtw_do_join23a(padapter))) {
+ break;
+ } else {
+ DBG_8723A("roaming do_join return %d\n", do_join_r);
+ pmlmepriv->to_roaming--;
+
+ if (0 < rtw_to_roaming(padapter)) {
+ continue;
+ } else {
+ DBG_8723A("%s(%d) -to roaming fail, indicate_disconnect\n", __func__, __LINE__);
+ rtw_indicate_disconnect23a(padapter);
+ break;
+ }
+ }
+ }
+ }
+}
+
+int rtw_linked_check(struct rtw_adapter *padapter)
+{
+ if ((check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE)) ||
+ (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE))) {
+ if (padapter->stapriv.asoc_sta_count > 2)
+ return true;
+ } else { /* Station mode */
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) == true)
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
new file mode 100644
index 000000000000..4c753639ea5a
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
@@ -0,0 +1,9990 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_MLME_EXT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <rtw_mlme_ext.h>
+#include <wlan_bssdef.h>
+#include <mlme_osdep.h>
+#include <recv_osdep.h>
+#include <ethernet.h>
+#include <linux/ieee80211.h>
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+#include <rtl8723a_hal.h>
+#endif
+
+static struct mlme_handler mlme_sta_tbl[]={
+ {"OnAssocReq23a", &OnAssocReq23a},
+ {"OnAssocRsp23a", &OnAssocRsp23a},
+ {"OnReAssocReq", &OnAssocReq23a},
+ {"OnReAssocRsp", &OnAssocRsp23a},
+ {"OnProbeReq23a", &OnProbeReq23a},
+ {"OnProbeRsp23a", &OnProbeRsp23a},
+
+ /*----------------------------------------------------------
+ below 2 are reserved
+ -----------------------------------------------------------*/
+ {"DoReserved23a", &DoReserved23a},
+ {"DoReserved23a", &DoReserved23a},
+ {"OnBeacon23a", &OnBeacon23a},
+ {"OnATIM", &OnAtim23a},
+ {"OnDisassoc23a", &OnDisassoc23a},
+ {"OnAuth23a", &OnAuth23aClient23a},
+ {"OnDeAuth23a", &OnDeAuth23a},
+ {"OnAction23a", &OnAction23a},
+};
+
+static struct action_handler OnAction23a_tbl[]={
+ {WLAN_CATEGORY_SPECTRUM_MGMT, "ACTION_SPECTRUM_MGMT", on_action_spct23a},
+ {WLAN_CATEGORY_QOS, "ACTION_QOS", &OnAction23a_qos},
+ {WLAN_CATEGORY_DLS, "ACTION_DLS", &OnAction23a_dls},
+ {WLAN_CATEGORY_BACK, "ACTION_BACK", &OnAction23a_back23a},
+ {WLAN_CATEGORY_PUBLIC, "ACTION_PUBLIC", on_action_public23a},
+ {WLAN_CATEGORY_HT, "ACTION_HT", &OnAction23a_ht},
+ {WLAN_CATEGORY_SA_QUERY, "ACTION_SA_QUERY", &DoReserved23a},
+ {WLAN_CATEGORY_WMM, "ACTION_WMM", &OnAction23a_wmm},
+ {WLAN_CATEGORY_VENDOR_SPECIFIC, "ACTION_P2P", &OnAction23a_p2p},
+};
+
+static u8 null_addr[ETH_ALEN]= {0, 0, 0, 0, 0, 0};
+
+/**************************************************
+OUI definitions for the vendor specific IE
+***************************************************/
+unsigned char RTW_WPA_OUI23A[] = {0x00, 0x50, 0xf2, 0x01};
+unsigned char WMM_OUI23A[] = {0x00, 0x50, 0xf2, 0x02};
+unsigned char WPS_OUI23A[] = {0x00, 0x50, 0xf2, 0x04};
+unsigned char P2P_OUI23A[] = {0x50, 0x6F, 0x9A, 0x09};
+unsigned char WFD_OUI23A[] = {0x50, 0x6F, 0x9A, 0x0A};
+
+unsigned char WMM_INFO_OUI23A[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
+unsigned char WMM_PARA_OUI23A[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
+
+unsigned char WPA_TKIP_CIPHER23A[4] = {0x00, 0x50, 0xf2, 0x02};
+unsigned char RSN_TKIP_CIPHER23A[4] = {0x00, 0x0f, 0xac, 0x02};
+
+
+/********************************************************
+MCS rate definitions
+*********************************************************/
+unsigned char MCS_rate_2R23A[16] = {
+ 0xff, 0xff, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+unsigned char MCS_rate_1R23A[16] = {
+ 0xff, 0x00, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+
+/********************************************************
+ChannelPlan definitions
+*********************************************************/
+
+static struct rt_channel_plan_2g RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x00, RT_CHANNEL_DOMAIN_2G_WORLD , Passive scan CH 12, 13 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x01, RT_CHANNEL_DOMAIN_2G_ETSI1 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* 0x02, RT_CHANNEL_DOMAIN_2G_FCC1 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14}, /* 0x03, RT_CHANNEL_DOMAIN_2G_MIKK1 */
+ {{10, 11, 12, 13}, 4}, /* 0x04, RT_CHANNEL_DOMAIN_2G_ETSI2 */
+ {{}, 0}, /* 0x05, RT_CHANNEL_DOMAIN_2G_NULL */
+};
+
+static struct rt_channel_plan_5g RTW_ChannelPlan5G[RT_CHANNEL_DOMAIN_5G_MAX] = {
+ {{}, 0}, /* 0x00, RT_CHANNEL_DOMAIN_5G_NULL */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}, 19}, /* 0x01, RT_CHANNEL_DOMAIN_5G_ETSI1 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165}, 24}, /* 0x02, RT_CHANNEL_DOMAIN_5G_ETSI2 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 149, 153, 157, 161, 165}, 22}, /* 0x03, RT_CHANNEL_DOMAIN_5G_ETSI3 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165}, 24}, /* 0x04, RT_CHANNEL_DOMAIN_5G_FCC1 */
+ {{36, 40, 44, 48, 149, 153, 157, 161, 165}, 9}, /* 0x05, RT_CHANNEL_DOMAIN_5G_FCC2 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 149, 153, 157, 161, 165}, 13}, /* 0x06, RT_CHANNEL_DOMAIN_5G_FCC3 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 149, 153, 157, 161}, 12}, /* 0x07, RT_CHANNEL_DOMAIN_5G_FCC4 */
+ {{149, 153, 157, 161, 165}, 5}, /* 0x08, RT_CHANNEL_DOMAIN_5G_FCC5 */
+ {{36, 40, 44, 48, 52, 56, 60, 64}, 8}, /* 0x09, RT_CHANNEL_DOMAIN_5G_FCC6 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 136, 140, 149, 153, 157, 161, 165}, 20}, /* 0x0A, RT_CHANNEL_DOMAIN_5G_FCC7_IC1 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 149, 153, 157, 161, 165}, 20}, /* 0x0B, RT_CHANNEL_DOMAIN_5G_KCC1 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}, 19}, /* 0x0C, RT_CHANNEL_DOMAIN_5G_MKK1 */
+ {{36, 40, 44, 48, 52, 56, 60, 64}, 8}, /* 0x0D, RT_CHANNEL_DOMAIN_5G_MKK2 */
+ {{100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}, 11}, /* 0x0E, RT_CHANNEL_DOMAIN_5G_MKK3 */
+ {{56, 60, 64, 100, 104, 108, 112, 116, 136, 140, 149, 153, 157, 161, 165}, 15}, /* 0x0F, RT_CHANNEL_DOMAIN_5G_NCC1 */
+ {{56, 60, 64, 149, 153, 157, 161, 165}, 8}, /* 0x10, RT_CHANNEL_DOMAIN_5G_NCC2 */
+
+ /* Driver self defined for old channel plan Compatible , Remember to modify if have new channel plan definition ===== */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 132, 136, 140, 149, 153, 157, 161, 165}, 21}, /* 0x11, RT_CHANNEL_DOMAIN_5G_FCC */
+ {{36, 40, 44, 48}, 4}, /* 0x12, RT_CHANNEL_DOMAIN_5G_JAPAN_NO_DFS */
+ {{36, 40, 44, 48, 149, 153, 157, 161}, 8}, /* 0x13, RT_CHANNEL_DOMAIN_5G_FCC4_NO_DFS */
+};
+
+static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
+ /* 0x00 ~ 0x1F , Old Define ===== */
+ {0x02, 0x11}, /* 0x00, RT_CHANNEL_DOMAIN_FCC */
+ {0x02, 0x0A}, /* 0x01, RT_CHANNEL_DOMAIN_IC */
+ {0x01, 0x01}, /* 0x02, RT_CHANNEL_DOMAIN_ETSI */
+ {0x01, 0x00}, /* 0x03, RT_CHANNEL_DOMAIN_SPAIN */
+ {0x01, 0x00}, /* 0x04, RT_CHANNEL_DOMAIN_FRANCE */
+ {0x03, 0x00}, /* 0x05, RT_CHANNEL_DOMAIN_MKK */
+ {0x03, 0x00}, /* 0x06, RT_CHANNEL_DOMAIN_MKK1 */
+ {0x01, 0x09}, /* 0x07, RT_CHANNEL_DOMAIN_ISRAEL */
+ {0x03, 0x09}, /* 0x08, RT_CHANNEL_DOMAIN_TELEC */
+ {0x03, 0x00}, /* 0x09, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN */
+ {0x00, 0x00}, /* 0x0A, RT_CHANNEL_DOMAIN_WORLD_WIDE_13 */
+ {0x02, 0x0F}, /* 0x0B, RT_CHANNEL_DOMAIN_TAIWAN */
+ {0x01, 0x08}, /* 0x0C, RT_CHANNEL_DOMAIN_CHINA */
+ {0x02, 0x06}, /* 0x0D, RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO */
+ {0x02, 0x0B}, /* 0x0E, RT_CHANNEL_DOMAIN_KOREA */
+ {0x02, 0x09}, /* 0x0F, RT_CHANNEL_DOMAIN_TURKEY */
+ {0x01, 0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
+ {0x02, 0x05}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
+ {0x01, 0x12}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+ {0x00, 0x04}, /* 0x13, RT_CHANNEL_DOMAIN_WORLD_WIDE_5G */
+ {0x02, 0x10}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
+ {0x00, 0x12}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
+ {0x00, 0x13}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
+ {0x03, 0x12}, /* 0x17, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+ {0x05, 0x08}, /* 0x18, RT_CHANNEL_DOMAIN_PAKISTAN_NO_DFS */
+ {0x02, 0x08}, /* 0x19, RT_CHANNEL_DOMAIN_TAIWAN2_NO_DFS */
+ {0x00, 0x00}, /* 0x1A, */
+ {0x00, 0x00}, /* 0x1B, */
+ {0x00, 0x00}, /* 0x1C, */
+ {0x00, 0x00}, /* 0x1D, */
+ {0x00, 0x00}, /* 0x1E, */
+ {0x05, 0x04}, /* 0x1F, RT_CHANNEL_DOMAIN_WORLD_WIDE_ONLY_5G */
+ /* 0x20 ~ 0x7F , New Define ===== */
+ {0x00, 0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
+ {0x01, 0x00}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
+ {0x02, 0x00}, /* 0x22, RT_CHANNEL_DOMAIN_FCC1_NULL */
+ {0x03, 0x00}, /* 0x23, RT_CHANNEL_DOMAIN_MKK1_NULL */
+ {0x04, 0x00}, /* 0x24, RT_CHANNEL_DOMAIN_ETSI2_NULL */
+ {0x02, 0x04}, /* 0x25, RT_CHANNEL_DOMAIN_FCC1_FCC1 */
+ {0x00, 0x01}, /* 0x26, RT_CHANNEL_DOMAIN_WORLD_ETSI1 */
+ {0x03, 0x0C}, /* 0x27, RT_CHANNEL_DOMAIN_MKK1_MKK1 */
+ {0x00, 0x0B}, /* 0x28, RT_CHANNEL_DOMAIN_WORLD_KCC1 */
+ {0x00, 0x05}, /* 0x29, RT_CHANNEL_DOMAIN_WORLD_FCC2 */
+ {0x00, 0x00}, /* 0x2A, */
+ {0x00, 0x00}, /* 0x2B, */
+ {0x00, 0x00}, /* 0x2C, */
+ {0x00, 0x00}, /* 0x2D, */
+ {0x00, 0x00}, /* 0x2E, */
+ {0x00, 0x00}, /* 0x2F, */
+ {0x00, 0x06}, /* 0x30, RT_CHANNEL_DOMAIN_WORLD_FCC3 */
+ {0x00, 0x07}, /* 0x31, RT_CHANNEL_DOMAIN_WORLD_FCC4 */
+ {0x00, 0x08}, /* 0x32, RT_CHANNEL_DOMAIN_WORLD_FCC5 */
+ {0x00, 0x09}, /* 0x33, RT_CHANNEL_DOMAIN_WORLD_FCC6 */
+ {0x02, 0x0A}, /* 0x34, RT_CHANNEL_DOMAIN_FCC1_FCC7 */
+ {0x00, 0x02}, /* 0x35, RT_CHANNEL_DOMAIN_WORLD_ETSI2 */
+ {0x00, 0x03}, /* 0x36, RT_CHANNEL_DOMAIN_WORLD_ETSI3 */
+ {0x03, 0x0D}, /* 0x37, RT_CHANNEL_DOMAIN_MKK1_MKK2 */
+ {0x03, 0x0E}, /* 0x38, RT_CHANNEL_DOMAIN_MKK1_MKK3 */
+ {0x02, 0x0F}, /* 0x39, RT_CHANNEL_DOMAIN_FCC1_NCC1 */
+ {0x00, 0x00}, /* 0x3A, */
+ {0x00, 0x00}, /* 0x3B, */
+ {0x00, 0x00}, /* 0x3C, */
+ {0x00, 0x00}, /* 0x3D, */
+ {0x00, 0x00}, /* 0x3E, */
+ {0x00, 0x00}, /* 0x3F, */
+ {0x02, 0x10}, /* 0x40, RT_CHANNEL_DOMAIN_FCC1_NCC2 */
+ {0x03, 0x00}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */
+};
+
+static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03, 0x02}; /* use the conbination for max channel numbers */
+
+static struct fwevent wlanevents[] =
+{
+ {0, rtw_dummy_event_callback23a}, /*0*/
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, &rtw_survey_event_cb23a}, /*8*/
+ {sizeof (struct surveydone_event), &rtw_surveydone_event_callback23a}, /*9*/
+
+ {0, &rtw23a_joinbss_event_cb}, /*10*/
+ {sizeof(struct stassoc_event), &rtw_stassoc_event_callback23a},
+ {sizeof(struct stadel_event), &rtw_stadel_event_callback23a},
+ {0, &rtw_atimdone_event_callback23a},
+ {0, rtw_dummy_event_callback23a},
+ {0, NULL}, /*15*/
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, rtw23a_fwdbg_event_callback},
+ {0, NULL}, /*20*/
+ {0, NULL},
+ {0, NULL},
+ {0, &rtw_cpwm_event_callback23a},
+ {0, NULL},
+};
+
+
+/*
+ * Search the @param channel_num in given @param channel_set
+ * @ch_set: the given channel set
+ * @ch: the given channel number
+ *
+ * return the index of channel_num in channel_set, -1 if not found
+ */
+int rtw_ch_set_search_ch23a(struct rt_channel_info *ch_set, const u32 ch)
+{
+ int i;
+ for (i = 0; ch_set[i]. ChannelNum != 0; i++) {
+ if (ch == ch_set[i].ChannelNum)
+ break;
+ }
+
+ if (i >= ch_set[i].ChannelNum)
+ return -1;
+ return i;
+}
+
+/****************************************************************************
+
+Following are the initialization functions for WiFi MLME
+
+*****************************************************************************/
+
+int init_hw_mlme_ext23a(struct rtw_adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ set_channel_bwmode23a(padapter, pmlmeext->cur_channel,
+ pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+ return _SUCCESS;
+}
+
+static void init_mlme_ext_priv23a_value(struct rtw_adapter* padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ unsigned char mixed_datarate[NumRates] = {
+ _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
+ _9M_RATE_, _12M_RATE_, _18M_RATE_, _24M_RATE_, _36M_RATE_,
+ _48M_RATE_, _54M_RATE_, 0xff};
+ unsigned char mixed_basicrate[NumRates] = {
+ _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
+ _12M_RATE_, _24M_RATE_, 0xff,};
+
+ atomic_set(&pmlmeext->event_seq, 0);
+ /* reset to zero when disconnect at client mode */
+ pmlmeext->mgnt_seq = 0;
+
+ pmlmeext->cur_channel = padapter->registrypriv.channel;
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ pmlmeext->retry = 0;
+
+ pmlmeext->cur_wireless_mode = padapter->registrypriv.wireless_mode;
+
+ memcpy(pmlmeext->datarate, mixed_datarate, NumRates);
+ memcpy(pmlmeext->basicrate, mixed_basicrate, NumRates);
+
+ if (pmlmeext->cur_channel > 14)
+ pmlmeext->tx_rate = IEEE80211_OFDM_RATE_6MB;
+ else
+ pmlmeext->tx_rate = IEEE80211_CCK_RATE_1MB;
+
+ pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
+ pmlmeext->sitesurvey_res.channel_idx = 0;
+ pmlmeext->sitesurvey_res.bss_cnt = 0;
+ pmlmeext->scan_abort = false;
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ pmlmeinfo->reauth_count = 0;
+ pmlmeinfo->reassoc_count = 0;
+ pmlmeinfo->link_count = 0;
+ pmlmeinfo->auth_seq = 0;
+ pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
+ pmlmeinfo->key_index = 0;
+ pmlmeinfo->iv = 0;
+
+ pmlmeinfo->enc_algo = _NO_PRIVACY_;
+ pmlmeinfo->authModeToggle = 0;
+
+ memset(pmlmeinfo->chg_txt, 0, 128);
+
+ pmlmeinfo->slotTime = SHORT_SLOT_TIME;
+ pmlmeinfo->preamble_mode = PREAMBLE_AUTO;
+
+ pmlmeinfo->dialogToken = 0;
+
+ pmlmeext->action_public_rxseq = 0xffff;
+ pmlmeext->action_public_dialog_token = 0xff;
+}
+
+static int has_channel(struct rt_channel_info *channel_set,
+ u8 chanset_size, u8 chan) {
+ int i;
+
+ for (i = 0; i < chanset_size; i++) {
+ if (channel_set[i].ChannelNum == chan)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void init_channel_list(struct rtw_adapter *padapter,
+ struct rt_channel_info *channel_set,
+ u8 chanset_size,
+ struct p2p_channels *channel_list) {
+
+ struct p2p_oper_class_map op_class[] = {
+ { IEEE80211G, 81, 1, 13, 1, BW20 },
+ { IEEE80211G, 82, 14, 14, 1, BW20 },
+ { IEEE80211A, 115, 36, 48, 4, BW20 },
+ { IEEE80211A, 116, 36, 44, 8, BW40PLUS },
+ { IEEE80211A, 117, 40, 48, 8, BW40MINUS },
+ { IEEE80211A, 124, 149, 161, 4, BW20 },
+ { IEEE80211A, 125, 149, 169, 4, BW20 },
+ { IEEE80211A, 126, 149, 157, 8, BW40PLUS },
+ { IEEE80211A, 127, 153, 161, 8, BW40MINUS },
+ { -1, 0, 0, 0, 0, BW20 }
+ };
+
+ int cla, op;
+
+ cla = 0;
+
+ for (op = 0; op_class[op].op_class; op++) {
+ u8 ch;
+ struct p2p_oper_class_map *o = &op_class[op];
+ struct p2p_reg_class *reg = NULL;
+
+ for (ch = o->min_chan; ch <= o->max_chan; ch += o->inc) {
+ if (!has_channel(channel_set, chanset_size, ch))
+ continue;
+
+ if ((0 == padapter->registrypriv.ht_enable) &&
+ (o->inc == 8))
+ continue;
+
+ if ((0 == (padapter->registrypriv.cbw40_enable & BIT(1))) &&
+ ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
+ continue;
+
+ if (reg == NULL) {
+ reg = &channel_list->reg_class[cla];
+ cla++;
+ reg->reg_class = o->op_class;
+ reg->channels = 0;
+ }
+ reg->channel[reg->channels] = ch;
+ reg->channels++;
+ }
+ }
+ channel_list->reg_classes = cla;
+}
+
+static u8 init_channel_set(struct rtw_adapter* padapter, u8 ChannelPlan,
+ struct rt_channel_info *channel_set)
+{
+ u8 index, chanset_size = 0;
+ u8 b5GBand = false, b2_4GBand = false;
+ u8 Index2G = 0, Index5G = 0;
+
+ memset(channel_set, 0, sizeof(struct rt_channel_info)*MAX_CHANNEL_NUM);
+
+ if (ChannelPlan >= RT_CHANNEL_DOMAIN_MAX &&
+ ChannelPlan != RT_CHANNEL_DOMAIN_REALTEK_DEFINE) {
+ DBG_8723A("ChannelPlan ID %x error !!!!!\n", ChannelPlan);
+ return chanset_size;
+ }
+
+ if (padapter->registrypriv.wireless_mode & WIRELESS_11G) {
+ b2_4GBand = true;
+ if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+ Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
+ else
+ Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
+ }
+
+ if (padapter->registrypriv.wireless_mode & WIRELESS_11A) {
+ b5GBand = true;
+ if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+ Index5G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index5G;
+ else
+ Index5G = RTW_ChannelPlanMap[ChannelPlan].Index5G;
+ }
+
+ if (b2_4GBand) {
+ for (index = 0; index<RTW_ChannelPlan2G[Index2G].Len; index++) {
+ channel_set[chanset_size].ChannelNum =
+ RTW_ChannelPlan2G[Index2G].Channel[index];
+
+ if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == ChannelPlan) ||
+ /* Channel 1~11 is active, and 12~14 is passive */
+ (RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G == ChannelPlan)){
+ if (channel_set[chanset_size].ChannelNum >= 1 &&
+ channel_set[chanset_size].ChannelNum <= 11)
+ channel_set[chanset_size].ScanType =
+ SCAN_ACTIVE;
+ else if ((channel_set[chanset_size].ChannelNum >= 12 &&
+ channel_set[chanset_size].ChannelNum <= 14))
+ channel_set[chanset_size].ScanType =
+ SCAN_PASSIVE;
+ } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 ==
+ ChannelPlan ||
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_5G ==
+ ChannelPlan ||
+ RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) {
+ /* channel 12~13, passive scan */
+ if (channel_set[chanset_size].ChannelNum <= 11)
+ channel_set[chanset_size].ScanType =
+ SCAN_ACTIVE;
+ else
+ channel_set[chanset_size].ScanType =
+ SCAN_PASSIVE;
+ } else
+ channel_set[chanset_size].ScanType =
+ SCAN_ACTIVE;
+
+ chanset_size++;
+ }
+ }
+
+ if (b5GBand) {
+ for (index = 0;index<RTW_ChannelPlan5G[Index5G].Len;index++) {
+ if (RTW_ChannelPlan5G[Index5G].Channel[index] <= 48 ||
+ RTW_ChannelPlan5G[Index5G].Channel[index] >= 149) {
+ channel_set[chanset_size].ChannelNum =
+ RTW_ChannelPlan5G[Index5G].Channel[index];
+ if (RT_CHANNEL_DOMAIN_WORLD_WIDE_5G ==
+ ChannelPlan) {
+ /* passive scan for all 5G channels */
+ channel_set[chanset_size].ScanType =
+ SCAN_PASSIVE;
+ } else
+ channel_set[chanset_size].ScanType =
+ SCAN_ACTIVE;
+ DBG_8723A("%s(): channel_set[%d].ChannelNum = "
+ "%d\n", __func__, chanset_size,
+ channel_set[chanset_size].ChannelNum);
+ chanset_size++;
+ }
+ }
+ }
+
+ return chanset_size;
+}
+
+int init_mlme_ext_priv23a(struct rtw_adapter* padapter)
+{
+ int res = _SUCCESS;
+ struct registry_priv* pregistrypriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ pmlmeext->padapter = padapter;
+
+ init_mlme_ext_priv23a_value(padapter);
+ pmlmeinfo->bAcceptAddbaReq = pregistrypriv->bAcceptAddbaReq;
+
+ init_mlme_ext_timer23a(padapter);
+
+#ifdef CONFIG_8723AU_AP_MODE
+ init_mlme_ap_info23a(padapter);
+#endif
+
+ pmlmeext->max_chan_nums = init_channel_set(padapter,
+ pmlmepriv->ChannelPlan,
+ pmlmeext->channel_set);
+ init_channel_list(padapter, pmlmeext->channel_set,
+ pmlmeext->max_chan_nums, &pmlmeext->channel_list);
+
+ pmlmeext->chan_scan_time = SURVEY_TO;
+ pmlmeext->mlmeext_init = true;
+
+ pmlmeext->active_keep_alive_check = true;
+ return res;
+}
+
+void free_mlme_ext_priv23a (struct mlme_ext_priv *pmlmeext)
+{
+ struct rtw_adapter *padapter = pmlmeext->padapter;
+
+ if (!padapter)
+ return;
+
+ if (padapter->bDriverStopped == true) {
+ del_timer_sync(&pmlmeext->survey_timer);
+ del_timer_sync(&pmlmeext->link_timer);
+ /* del_timer_sync(&pmlmeext->ADDBA_timer); */
+ }
+}
+
+static void
+_mgt_dispatcher23a(struct rtw_adapter *padapter, struct mlme_handler *ptable,
+ struct recv_frame *precv_frame)
+{
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+ if (ptable->func) {
+ /* receive the frames that ra(a1) is my address
+ or ra(a1) is bc address. */
+ if (!ether_addr_equal(hdr->addr1, myid(&padapter->eeprompriv))&&
+ !is_broadcast_ether_addr(hdr->addr1))
+ return;
+
+ ptable->func(padapter, precv_frame);
+ }
+}
+
+void mgt_dispatcher23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ int index;
+ struct mlme_handler *ptable;
+#ifdef CONFIG_8723AU_AP_MODE
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+#endif /* CONFIG_8723AU_AP_MODE */
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u16 stype;
+ struct sta_info *psta;
+
+ if (!ieee80211_is_mgmt(hdr->frame_control))
+ return;
+
+ /* receive the frames that ra(a1) is my address or ra(a1) is
+ bc address. */
+ if (!ether_addr_equal(hdr->addr1, myid(&padapter->eeprompriv)) &&
+ !is_broadcast_ether_addr(hdr->addr1))
+ return;
+
+ ptable = mlme_sta_tbl;
+
+ stype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
+ index = stype >> 4;
+
+ if (index > 13) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("Currently we do not support reserved sub-fr-type ="
+ "%d\n", index));
+ return;
+ }
+ ptable += index;
+
+ psta = rtw_get_stainfo23a(&padapter->stapriv, hdr->addr2);
+
+ if (psta) {
+ if (ieee80211_has_retry(hdr->frame_control)) {
+ if (precv_frame->attrib.seq_num ==
+ psta->RxMgmtFrameSeqNum) {
+ /* drop the duplicate management frame */
+ DBG_8723A("Drop duplicate management frame "
+ "with seq_num = %d.\n",
+ precv_frame->attrib.seq_num);
+ return;
+ }
+ }
+ psta->RxMgmtFrameSeqNum = precv_frame->attrib.seq_num;
+ }
+
+#ifdef CONFIG_8723AU_AP_MODE
+ switch (stype)
+ {
+ case IEEE80211_STYPE_AUTH:
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ ptable->func = &OnAuth23a;
+ else
+ ptable->func = &OnAuth23aClient23a;
+ /* pass through */
+ case IEEE80211_STYPE_ASSOC_REQ:
+ case IEEE80211_STYPE_REASSOC_REQ:
+ _mgt_dispatcher23a(padapter, ptable, precv_frame);
+ break;
+ case IEEE80211_STYPE_PROBE_REQ:
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ _mgt_dispatcher23a(padapter, ptable, precv_frame);
+ else
+ _mgt_dispatcher23a(padapter, ptable, precv_frame);
+ break;
+ case IEEE80211_STYPE_BEACON:
+ _mgt_dispatcher23a(padapter, ptable, precv_frame);
+ break;
+ case IEEE80211_STYPE_ACTION:
+ /* if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) */
+ _mgt_dispatcher23a(padapter, ptable, precv_frame);
+ break;
+ default:
+ _mgt_dispatcher23a(padapter, ptable, precv_frame);
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ rtw_hostapd_mlme_rx23a(padapter, precv_frame);
+ break;
+ }
+#else
+ _mgt_dispatcher23a(padapter, ptable, precv_frame);
+#endif
+}
+
+#ifdef CONFIG_8723AU_P2P
+static u32 p2p_listen_state_process(struct rtw_adapter *padapter,
+ unsigned char *da)
+{
+ bool response = true;
+
+ if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled == false ||
+ padapter->mlmepriv.wps_probe_resp_ie == NULL ||
+ padapter->mlmepriv.p2p_probe_resp_ie == NULL) {
+ DBG_8723A("DON'T issue_probersp23a_p2p23a: p2p_enabled:%d, "
+ "wps_probe_resp_ie:%p, p2p_probe_resp_ie:%p\n",
+ wdev_to_priv(padapter->rtw_wdev)->p2p_enabled,
+ padapter->mlmepriv.wps_probe_resp_ie,
+ padapter->mlmepriv.p2p_probe_resp_ie);
+ response = false;
+ }
+
+ if (response == true)
+ issue_probersp23a_p2p23a(padapter, da);
+
+ return _SUCCESS;
+}
+#endif /* CONFIG_8723AU_P2P */
+
+/****************************************************************************
+
+Following are the callback functions for each subtype of the management frames
+
+*****************************************************************************/
+
+unsigned int OnProbeReq23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ unsigned int ielen;
+ unsigned char *p;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur = &pmlmeinfo->network;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *pframe = skb->data;
+ uint len = skb->len;
+ u8 is_valid_p2p_probereq = false;
+
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 wifi_test_chk_rate = 1;
+
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) &&
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE) &&
+ !rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT) &&
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH) &&
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN)) {
+ /* mcs_rate = 0 -> CCK 1M rate */
+ /* mcs_rate = 1 -> CCK 2M rate */
+ /* mcs_rate = 2 -> CCK 5.5M rate */
+ /* mcs_rate = 3 -> CCK 11M rate */
+ /* In the P2P mode, the driver should not support
+ the CCK rate */
+
+ /* IOT issue: Google Nexus7 use 1M rate to send
+ p2p_probe_req after GO nego completed and Nexus7
+ is client */
+ if (wifi_test_chk_rate == 1) {
+ if ((is_valid_p2p_probereq =
+ process_probe_req_p2p_ie23a(pwdinfo, pframe,
+ len)) == true) {
+ if (rtw_p2p_chk_role(pwdinfo,
+ P2P_ROLE_DEVICE)) {
+ u8 *sa = ieee80211_get_SA(hdr);
+ p2p_listen_state_process(padapter, sa);
+ return _SUCCESS;
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ goto _continue;
+ }
+ }
+ }
+ }
+
+_continue:
+#endif /* CONFIG_8723AU_P2P */
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ return _SUCCESS;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == false &&
+ check_fwstate(pmlmepriv,
+ WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE) == false) {
+ return _SUCCESS;
+ }
+
+ p = rtw_get_ie23a(pframe + sizeof(struct ieee80211_hdr_3addr) +
+ _PROBEREQ_IE_OFFSET_, _SSID_IE_, (int *)&ielen,
+ len - sizeof(struct ieee80211_hdr_3addr) -
+ _PROBEREQ_IE_OFFSET_);
+
+ /* check (wildcard) SSID */
+ if (p) {
+ if (is_valid_p2p_probereq == true) {
+ goto _issue_probersp23a;
+ }
+
+ if ((ielen != 0 &&
+ memcmp((void *)(p+2), cur->Ssid.ssid,
+ cur->Ssid.ssid_len)) ||
+ (ielen == 0 && pmlmeinfo->hidden_ssid_mode)) {
+ return _SUCCESS;
+ }
+
+_issue_probersp23a:
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
+ pmlmepriv->cur_network.join_res == true) {
+ /* DBG_8723A("+issue_probersp23a during ap mode\n"); */
+ issue_probersp23a(padapter, ieee80211_get_SA(hdr),
+ is_valid_p2p_probereq);
+ }
+ }
+
+ return _SUCCESS;
+}
+
+unsigned int OnProbeRsp23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+#ifdef CONFIG_8723AU_P2P
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif
+
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
+ if (pwdinfo->tx_prov_disc_info.benable == true) {
+ if (ether_addr_equal(pwdinfo->tx_prov_disc_info.peerIFAddr,
+ hdr->addr2)) {
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ pwdinfo->tx_prov_disc_info.benable = false;
+ issue_p2p_provision_request23a(padapter,
+ pwdinfo->tx_prov_disc_info.ssid.ssid,
+ pwdinfo->tx_prov_disc_info.ssid.ssid_len,
+ pwdinfo->tx_prov_disc_info.peerDevAddr);
+ }
+ else if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ {
+ pwdinfo->tx_prov_disc_info.benable = false;
+ issue_p2p_provision_request23a(padapter,
+ NULL,
+ 0,
+ pwdinfo->tx_prov_disc_info.peerDevAddr);
+ }
+ }
+ }
+ return _SUCCESS;
+ } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) {
+ if (pwdinfo->nego_req_info.benable == true) {
+ DBG_8723A("[%s] P2P State is GONEGO ING!\n", __func__);
+ if (ether_addr_equal(pwdinfo->nego_req_info.peerDevAddr,
+ hdr->addr2)) {
+ pwdinfo->nego_req_info.benable = false;
+ issue_p2p_GO_request23a(padapter, pwdinfo->nego_req_info.peerDevAddr);
+ }
+ }
+ } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_INVITE_REQ)) {
+ if (pwdinfo->invitereq_info.benable == true) {
+ DBG_8723A("[%s] P2P_STATE_TX_INVITE_REQ!\n", __func__);
+ if (ether_addr_equal(
+ pwdinfo->invitereq_info.peer_macaddr,
+ hdr->addr2)) {
+ pwdinfo->invitereq_info.benable = false;
+ issue_p2p_invitation_request23a(padapter, pwdinfo->invitereq_info.peer_macaddr);
+ }
+ }
+ }
+#endif
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ report_survey_event23a(padapter, precv_frame);
+ return _SUCCESS;
+ }
+
+ return _SUCCESS;
+}
+
+unsigned int OnBeacon23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ int cam_idx;
+ struct sta_info *psta;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *pframe = skb->data;
+ uint len = skb->len;
+ struct wlan_bssid_ex *pbss;
+ int ret = _SUCCESS;
+ u8 *p = NULL;
+ u32 ielen = 0;
+
+ p = rtw_get_ie23a(pframe + sizeof(struct ieee80211_hdr_3addr) +
+ _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ielen,
+ len - sizeof(struct ieee80211_hdr_3addr) -
+ _BEACON_IE_OFFSET_);
+ if ((p != NULL) && (ielen > 0)) {
+ if ((*(p + 1 + ielen) == 0x2D) && (*(p + 2 + ielen) != 0x2D)) {
+ /* Invalid value 0x2D is detected in Extended Supported
+ * Rates (ESR) IE. Try to fix the IE length to avoid
+ * failed Beacon parsing.
+ */
+ DBG_8723A("[WIFIDBG] Error in ESR IE is detected in "
+ "Beacon of BSSID: %pM. Fix the length of "
+ "ESR IE to avoid failed Beacon parsing.\n",
+ hdr->addr3);
+ *(p + 1) = ielen - 1;
+ }
+ }
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ report_survey_event23a(padapter, precv_frame);
+ return _SUCCESS;
+ }
+
+ if (ether_addr_equal(hdr->addr3, get_my_bssid23a(&pmlmeinfo->network))){
+ if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
+ /* we should update current network before auth,
+ or some IE is wrong */
+ pbss = (struct wlan_bssid_ex *)
+ kmalloc(sizeof(struct wlan_bssid_ex),
+ GFP_ATOMIC);
+ if (pbss) {
+ if (collect_bss_info23a(padapter, precv_frame,
+ pbss) == _SUCCESS) {
+ update_network23a(&pmlmepriv->cur_network.network, pbss, padapter, true);
+ rtw_get_bcn_info23a(&pmlmepriv->cur_network);
+ }
+ kfree(pbss);
+ }
+
+ /* check the vendor of the assoc AP */
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP23a(pframe + sizeof(struct ieee80211_hdr_3addr), len-sizeof(struct ieee80211_hdr_3addr));
+
+ /* update TSF Value */
+ update_TSF23a(pmlmeext, pframe, len);
+
+ /* start auth */
+ start_clnt_auth23a(padapter);
+
+ return _SUCCESS;
+ }
+
+ if (((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) &&
+ (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) {
+ psta = rtw_get_stainfo23a(pstapriv, hdr->addr2);
+ if (psta) {
+ ret = rtw_check_bcn_info23a(padapter, pframe,
+ len);
+ if (!ret) {
+ DBG_8723A_LEVEL(_drv_always_,
+ "ap has changed, "
+ "disconnect now\n");
+ receive_disconnect23a(padapter, pmlmeinfo->network.MacAddress, 65535);
+ return _SUCCESS;
+ }
+ /* update WMM, ERP in the beacon */
+ /* todo: the timer is used instead of
+ the number of the beacon received */
+ if ((sta_rx_pkts(psta) & 0xf) == 0) {
+ /* DBG_8723A("update_bcn_info\n"); */
+ update_beacon23a_info(padapter, pframe,
+ len, psta);
+ }
+
+#ifdef CONFIG_8723AU_P2P
+ process_p2p_ps_ie23a(padapter, (pframe + sizeof(struct ieee80211_hdr_3addr)), (len - sizeof(struct ieee80211_hdr_3addr)));
+#endif /* CONFIG_8723AU_P2P */
+ }
+ } else if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ psta = rtw_get_stainfo23a(pstapriv, hdr->addr2);
+ if (psta) {
+ /* update WMM, ERP in the beacon */
+ /* todo: the timer is used instead of the
+ number of the beacon received */
+ if ((sta_rx_pkts(psta) & 0xf) == 0) {
+ /* DBG_8723A("update_bcn_info\n"); */
+ update_beacon23a_info(padapter, pframe,
+ len, psta);
+ }
+ } else {
+ /* allocate a new CAM entry for IBSS station */
+ cam_idx = allocate_fw_sta_entry23a(padapter);
+ if (cam_idx == NUM_STA)
+ goto _END_ONBEACON_;
+
+ /* get supported rate */
+ if (update_sta_support_rate23a(padapter, (pframe + sizeof(struct ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_), (len - sizeof(struct ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_), cam_idx) == _FAIL) {
+ pmlmeinfo->FW_sta_info[cam_idx].status = 0;
+ goto _END_ONBEACON_;
+ }
+
+ /* update TSF Value */
+ update_TSF23a(pmlmeext, pframe, len);
+
+ /* report sta add event */
+ report_add_sta_event23a(padapter, hdr->addr2,
+ cam_idx);
+ }
+ }
+ }
+
+_END_ONBEACON_:
+
+ return _SUCCESS;
+}
+
+unsigned int OnAuth23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+#ifdef CONFIG_8723AU_AP_MODE
+ unsigned int auth_mode, seq, ie_len;
+ unsigned char *sa, *p;
+ u16 algorithm;
+ int status;
+ static struct sta_info stat;
+ struct sta_info *pstat = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *pframe = skb->data;
+ uint len = skb->len;
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return _FAIL;
+
+ DBG_8723A("+OnAuth23a\n");
+
+ sa = hdr->addr2;
+
+ auth_mode = psecuritypriv->dot11AuthAlgrthm;
+ seq = cpu_to_le16(*(u16*)((unsigned long)pframe +
+ sizeof(struct ieee80211_hdr_3addr) + 2));
+ algorithm = cpu_to_le16(*(u16*)((unsigned long)pframe +
+ sizeof(struct ieee80211_hdr_3addr)));
+
+ DBG_8723A("auth alg =%x, seq =%X\n", algorithm, seq);
+
+ if (auth_mode == 2 &&
+ psecuritypriv->dot11PrivacyAlgrthm != _WEP40_ &&
+ psecuritypriv->dot11PrivacyAlgrthm != _WEP104_)
+ auth_mode = 0;
+
+ /* rx a shared-key auth but shared not enabled, or */
+ /* rx a open-system auth but shared-key is enabled */
+ if ((algorithm > 0 && auth_mode == 0) ||
+ (algorithm == 0 && auth_mode == 1)) {
+ DBG_8723A("auth rejected due to bad alg [alg =%d, auth_mib "
+ "=%d] %02X%02X%02X%02X%02X%02X\n",
+ algorithm, auth_mode,
+ sa[0], sa[1], sa[2], sa[3], sa[4], sa[5]);
+
+ status = WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
+
+ goto auth_fail;
+ }
+
+ if (rtw_access_ctrl23a(padapter, sa) == false) {
+ status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
+ goto auth_fail;
+ }
+
+ pstat = rtw_get_stainfo23a(pstapriv, sa);
+ if (!pstat) {
+ /* allocate a new one */
+ DBG_8723A("going to alloc stainfo for sa ="MAC_FMT"\n",
+ MAC_ARG(sa));
+ pstat = rtw_alloc_stainfo23a(pstapriv, sa);
+ if (!pstat) {
+ DBG_8723A(" Exceed the upper limit of supported "
+ "clients...\n");
+ status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
+ goto auth_fail;
+ }
+
+ pstat->state = WIFI_FW_AUTH_NULL;
+ pstat->auth_seq = 0;
+
+ /* pstat->flags = 0; */
+ /* pstat->capability = 0; */
+ } else {
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ if (!list_empty(&pstat->asoc_list)) {
+ list_del_init(&pstat->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ if (pstat->expire_to > 0)
+ {
+ /* TODO: STA re_auth within expire_to */
+ }
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ if (seq == 1) {
+ /* TODO: STA re_auth and auth timeout */
+ }
+ }
+
+ spin_lock_bh(&pstapriv->auth_list_lock);
+ if (list_empty(&pstat->auth_list)) {
+ list_add_tail(&pstat->auth_list, &pstapriv->auth_list);
+ pstapriv->auth_list_cnt++;
+ }
+ spin_unlock_bh(&pstapriv->auth_list_lock);
+
+ if (pstat->auth_seq == 0)
+ pstat->expire_to = pstapriv->auth_to;
+
+ if ((pstat->auth_seq + 1) != seq) {
+ DBG_8723A("(1)auth rejected because out of seq [rx_seq =%d, "
+ "exp_seq =%d]!\n", seq, pstat->auth_seq+1);
+ status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
+ goto auth_fail;
+ }
+
+ if (algorithm == 0 && (auth_mode == 0 || auth_mode == 2)) {
+ if (seq == 1) {
+ pstat->state &= ~WIFI_FW_AUTH_NULL;
+ pstat->state |= WIFI_FW_AUTH_SUCCESS;
+ pstat->expire_to = pstapriv->assoc_to;
+ pstat->authalg = algorithm;
+ } else {
+ DBG_8723A("(2)auth rejected because out of seq "
+ "[rx_seq =%d, exp_seq =%d]!\n",
+ seq, pstat->auth_seq+1);
+ status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
+ goto auth_fail;
+ }
+ } else { /* shared system or auto authentication */
+ if (seq == 1) {
+ /* prepare for the challenging txt... */
+ pstat->state &= ~WIFI_FW_AUTH_NULL;
+ pstat->state |= WIFI_FW_AUTH_STATE;
+ pstat->authalg = algorithm;
+ pstat->auth_seq = 2;
+ } else if (seq == 3) {
+ /* checking for challenging txt... */
+ DBG_8723A("checking for challenging txt...\n");
+
+ p = rtw_get_ie23a(pframe +
+ sizeof(struct ieee80211_hdr_3addr) +
+ 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_,
+ (int *)&ie_len, len -
+ sizeof(struct ieee80211_hdr_3addr) -
+ _AUTH_IE_OFFSET_ - 4);
+
+ if ((p == NULL) || (ie_len<= 0)) {
+ DBG_8723A("auth rejected because challenge "
+ "failure!(1)\n");
+ status = WLAN_STATUS_CHALLENGE_FAIL;
+ goto auth_fail;
+ }
+
+ if (!memcmp((void *)(p + 2), pstat->chg_txt, 128)) {
+ pstat->state &= (~WIFI_FW_AUTH_STATE);
+ pstat->state |= WIFI_FW_AUTH_SUCCESS;
+ /* challenging txt is correct... */
+ pstat->expire_to = pstapriv->assoc_to;
+ } else {
+ DBG_8723A("auth rejected because challenge "
+ "failure!\n");
+ status = WLAN_STATUS_CHALLENGE_FAIL;
+ goto auth_fail;
+ }
+ } else {
+ DBG_8723A("(3)auth rejected because out of seq "
+ "[rx_seq =%d, exp_seq =%d]!\n",
+ seq, pstat->auth_seq+1);
+ status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
+ goto auth_fail;
+ }
+ }
+
+ /* Now, we are going to issue_auth23a... */
+ pstat->auth_seq = seq + 1;
+
+ issue_auth23a(padapter, pstat, (unsigned short)WLAN_STATUS_SUCCESS);
+
+ if (pstat->state & WIFI_FW_AUTH_SUCCESS)
+ pstat->auth_seq = 0;
+
+ return _SUCCESS;
+
+auth_fail:
+
+ if (pstat)
+ rtw_free_stainfo23a(padapter, pstat);
+
+ pstat = &stat;
+ memset((char *)pstat, '\0', sizeof(stat));
+ pstat->auth_seq = 2;
+ memcpy(pstat->hwaddr, sa, 6);
+
+ issue_auth23a(padapter, pstat, (unsigned short)status);
+
+#endif
+ return _FAIL;
+}
+
+unsigned int OnAuth23aClient23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ unsigned int seq, len, status, algthm, offset;
+ unsigned char *p;
+ unsigned int go2asoc = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *pframe = skb->data;
+ uint pkt_len = skb->len;
+
+ DBG_8723A("%s\n", __func__);
+
+ /* check A1 matches or not */
+ if (!ether_addr_equal(myid(&padapter->eeprompriv),
+ ieee80211_get_DA(hdr)))
+ return _SUCCESS;
+
+ if (!(pmlmeinfo->state & WIFI_FW_AUTH_STATE))
+ return _SUCCESS;
+
+ offset = ieee80211_has_protected(hdr->frame_control) ? 4: 0;
+
+ algthm = le16_to_cpu(*(unsigned short *)((unsigned long)pframe + sizeof(struct ieee80211_hdr_3addr) + offset));
+ seq = le16_to_cpu(*(unsigned short *)((unsigned long)pframe + sizeof(struct ieee80211_hdr_3addr) + offset + 2));
+ status = le16_to_cpu(*(unsigned short *)((unsigned long)pframe + sizeof(struct ieee80211_hdr_3addr) + offset + 4));
+
+ if (status != 0)
+ {
+ DBG_8723A("clnt auth fail, status: %d\n", status);
+ if (status == 13)/* pmlmeinfo->auth_algo == dot11AuthAlgrthm_Auto) */
+ {
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
+ pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
+ else
+ pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared;
+ /* pmlmeinfo->reauth_count = 0; */
+ }
+
+ set_link_timer(pmlmeext, 1);
+ goto authclnt_fail;
+ }
+
+ if (seq == 2)
+ {
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
+ {
+ /* legendary shared system */
+ p = rtw_get_ie23a(pframe + sizeof(struct ieee80211_hdr_3addr) + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&len,
+ pkt_len - sizeof(struct ieee80211_hdr_3addr) - _AUTH_IE_OFFSET_);
+
+ if (p == NULL)
+ {
+ /* DBG_8723A("marc: no challenge text?\n"); */
+ goto authclnt_fail;
+ }
+
+ memcpy((void *)(pmlmeinfo->chg_txt), (void *)(p + 2), len);
+ pmlmeinfo->auth_seq = 3;
+ issue_auth23a(padapter, NULL, 0);
+ set_link_timer(pmlmeext, REAUTH_TO);
+
+ return _SUCCESS;
+ }
+ else
+ {
+ /* open system */
+ go2asoc = 1;
+ }
+ }
+ else if (seq == 4)
+ {
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
+ {
+ go2asoc = 1;
+ }
+ else
+ {
+ goto authclnt_fail;
+ }
+ }
+ else
+ {
+ /* this is also illegal */
+ /* DBG_8723A("marc: clnt auth failed due to illegal seq =%x\n", seq); */
+ goto authclnt_fail;
+ }
+
+ if (go2asoc)
+ {
+ DBG_8723A_LEVEL(_drv_always_, "auth success, start assoc\n");
+ start_clnt_assoc23a(padapter);
+ return _SUCCESS;
+ }
+
+authclnt_fail:
+
+ /* pmlmeinfo->state &= ~(WIFI_FW_AUTH_STATE); */
+
+ return _FAIL;
+}
+
+unsigned int OnAssocReq23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
+{
+#ifdef CONFIG_8723AU_AP_MODE
+ u16 capab_info, listen_interval;
+ struct rtw_ieee802_11_elems elems;
+ struct sta_info *pstat;
+ unsigned char reassoc, *p, *pos, *wpa_ie;
+ unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
+ int i, ie_len, wpa_ie_len, left;
+ unsigned char supportRate[16];
+ int supportRateNum;
+ unsigned short status = WLAN_STATUS_SUCCESS;
+ unsigned short ie_offset;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur = &pmlmeinfo->network;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sk_buff *skb = precv_frame->pkt;
+ u8 *pframe = skb->data;
+ uint pkt_len = skb->len;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u16 frame_control;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 p2p_status_code = P2P_STATUS_SUCCESS;
+ u8 *p2pie;
+ u32 p2pielen = 0;
+ u8 wfd_ie[ 128 ] = { 0x00 };
+ u32 wfd_ielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return _FAIL;
+
+ frame_control = hdr->frame_control;
+ if (ieee80211_is_assoc_req(frame_control)) {
+ reassoc = 0;
+ ie_offset = _ASOCREQ_IE_OFFSET_;
+ } else { /* WIFI_REASSOCREQ */
+ reassoc = 1;
+ ie_offset = _REASOCREQ_IE_OFFSET_;
+ }
+
+ if (pkt_len < sizeof(struct ieee80211_hdr_3addr) + ie_offset) {
+ DBG_8723A("handle_assoc(reassoc =%d) - too short payload (len =%lu)"
+ "\n", reassoc, (unsigned long)pkt_len);
+ return _FAIL;
+ }
+
+ pstat = rtw_get_stainfo23a(pstapriv, hdr->addr2);
+ if (!pstat) {
+ status = WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA;
+ goto asoc_class2_error;
+ }
+
+ capab_info = get_unaligned_le16(pframe + sizeof(struct ieee80211_hdr_3addr));
+ /* capab_info = le16_to_cpu(*(unsigned short *)(pframe + sizeof(struct ieee80211_hdr_3addr))); */
+ /* listen_interval = le16_to_cpu(*(unsigned short *)(pframe + sizeof(struct ieee80211_hdr_3addr)+2)); */
+ listen_interval = get_unaligned_le16(pframe + sizeof(struct ieee80211_hdr_3addr)+2);
+
+ left = pkt_len - (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
+ pos = pframe + (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
+
+ DBG_8723A("%s\n", __func__);
+
+ /* check if this stat has been successfully authenticated/assocated */
+ if (!((pstat->state) & WIFI_FW_AUTH_SUCCESS))
+ {
+ if (!((pstat->state) & WIFI_FW_ASSOC_SUCCESS))
+ {
+ status = WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA;
+ goto asoc_class2_error;
+ }
+ else
+ {
+ pstat->state &= (~WIFI_FW_ASSOC_SUCCESS);
+ pstat->state |= WIFI_FW_ASSOC_STATE;
+ }
+ }
+ else
+ {
+ pstat->state &= (~WIFI_FW_AUTH_SUCCESS);
+ pstat->state |= WIFI_FW_ASSOC_STATE;
+ }
+
+ pstat->capability = capab_info;
+
+ /* now parse all ieee802_11 ie to point to elems */
+ if (rtw_ieee802_11_parse_elems23a(pos, left, &elems, 1) == ParseFailed ||
+ !elems.ssid) {
+ DBG_8723A("STA " MAC_FMT " sent invalid association request\n",
+ MAC_ARG(pstat->hwaddr));
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto OnAssocReq23aFail;
+ }
+
+ /* now we should check all the fields... */
+ /* checking SSID */
+ p = rtw_get_ie23a(pframe + sizeof(struct ieee80211_hdr_3addr) + ie_offset, _SSID_IE_, &ie_len,
+ pkt_len - sizeof(struct ieee80211_hdr_3addr) - ie_offset);
+ if (p == NULL)
+ {
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ }
+
+ if (ie_len == 0) /* broadcast ssid, however it is not allowed in assocreq */
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ else {
+ /* check if ssid match */
+ if (memcmp((void *)(p+2), cur->Ssid.ssid, cur->Ssid.ssid_len))
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+
+ if (ie_len != cur->Ssid.ssid_len)
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ }
+
+ if (WLAN_STATUS_SUCCESS != status)
+ goto OnAssocReq23aFail;
+
+ /* check if the supported rate is ok */
+ p = rtw_get_ie23a(pframe + sizeof(struct ieee80211_hdr_3addr) + ie_offset, _SUPPORTEDRATES_IE_, &ie_len, pkt_len - sizeof(struct ieee80211_hdr_3addr) - ie_offset);
+ if (p == NULL) {
+ DBG_8723A("Rx a sta assoc-req which supported rate is empty!\n");
+ /* use our own rate set as statoin used */
+ /* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
+ /* supportRateNum = AP_BSSRATE_LEN; */
+
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto OnAssocReq23aFail;
+ } else {
+ memcpy(supportRate, p+2, ie_len);
+ supportRateNum = ie_len;
+
+ p = rtw_get_ie23a(pframe + sizeof(struct ieee80211_hdr_3addr) + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len,
+ pkt_len - sizeof(struct ieee80211_hdr_3addr) - ie_offset);
+ if (p != NULL) {
+
+ if (supportRateNum<= sizeof(supportRate))
+ {
+ memcpy(supportRate+supportRateNum, p+2, ie_len);
+ supportRateNum += ie_len;
+ }
+ }
+ }
+
+ /* todo: mask supportRate between AP & STA -> move to update raid */
+ /* get_matched_rate(pmlmeext, supportRate, &supportRateNum, 0); */
+
+ /* update station supportRate */
+ pstat->bssratelen = supportRateNum;
+ memcpy(pstat->bssrateset, supportRate, supportRateNum);
+ Update23aTblForSoftAP(pstat->bssrateset, pstat->bssratelen);
+
+ /* check RSN/WPA/WPS */
+ pstat->dot8021xalg = 0;
+ pstat->wpa_psk = 0;
+ pstat->wpa_group_cipher = 0;
+ pstat->wpa2_group_cipher = 0;
+ pstat->wpa_pairwise_cipher = 0;
+ pstat->wpa2_pairwise_cipher = 0;
+ memset(pstat->wpa_ie, 0, sizeof(pstat->wpa_ie));
+ if ((psecuritypriv->wpa_psk & BIT(1)) && elems.rsn_ie) {
+
+ int group_cipher = 0, pairwise_cipher = 0;
+
+ wpa_ie = elems.rsn_ie;
+ wpa_ie_len = elems.rsn_ie_len;
+
+ if (rtw_parse_wpa2_ie23a(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ pstat->dot8021xalg = 1;/* psk, todo:802.1x */
+ pstat->wpa_psk |= BIT(1);
+
+ pstat->wpa2_group_cipher = group_cipher&psecuritypriv->wpa2_group_cipher;
+ pstat->wpa2_pairwise_cipher = pairwise_cipher&psecuritypriv->wpa2_pairwise_cipher;
+
+ if (!pstat->wpa2_group_cipher)
+ status = WLAN_REASON_INVALID_GROUP_CIPHER;
+
+ if (!pstat->wpa2_pairwise_cipher)
+ status = WLAN_REASON_INVALID_PAIRWISE_CIPHER;
+ } else {
+ status = WLAN_STATUS_INVALID_IE;
+ }
+
+ } else if ((psecuritypriv->wpa_psk & BIT(0)) && elems.wpa_ie) {
+
+ int group_cipher = 0, pairwise_cipher = 0;
+
+ wpa_ie = elems.wpa_ie;
+ wpa_ie_len = elems.wpa_ie_len;
+
+ if (rtw_parse_wpa_ie23a(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ pstat->dot8021xalg = 1;/* psk, todo:802.1x */
+ pstat->wpa_psk |= BIT(0);
+
+ pstat->wpa_group_cipher = group_cipher&psecuritypriv->wpa_group_cipher;
+ pstat->wpa_pairwise_cipher = pairwise_cipher&psecuritypriv->wpa_pairwise_cipher;
+
+ if (!pstat->wpa_group_cipher)
+ status = WLAN_STATUS_INVALID_GROUP_CIPHER;
+
+ if (!pstat->wpa_pairwise_cipher)
+ status = WLAN_STATUS_INVALID_PAIRWISE_CIPHER;
+
+ } else {
+ status = WLAN_STATUS_INVALID_IE;
+ }
+
+ } else {
+ wpa_ie = NULL;
+ wpa_ie_len = 0;
+ }
+
+ if (WLAN_STATUS_SUCCESS != status)
+ goto OnAssocReq23aFail;
+
+ pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
+ if (wpa_ie == NULL) {
+ if (elems.wps_ie) {
+ DBG_8723A("STA included WPS IE in "
+ "(Re)Association Request - assume WPS is "
+ "used\n");
+ pstat->flags |= WLAN_STA_WPS;
+ } else {
+ DBG_8723A("STA did not include WPA/RSN IE "
+ "in (Re)Association Request - possible WPS "
+ "use\n");
+ pstat->flags |= WLAN_STA_MAYBE_WPS;
+ }
+
+ /* AP support WPA/RSN, and sta is going to do WPS, but AP is not ready */
+ /* that the selected registrar of AP is _FLASE */
+ if ((psecuritypriv->wpa_psk > 0) &&
+ (pstat->flags & (WLAN_STA_WPS|WLAN_STA_MAYBE_WPS))) {
+ if (pmlmepriv->wps_beacon_ie) {
+ u8 selected_registrar = 0;
+
+ rtw_get_wps_attr_content23a(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len,
+ WPS_ATTR_SELECTED_REGISTRAR, &selected_registrar, NULL);
+
+ if (!selected_registrar) {
+ DBG_8723A("selected_registrar is false , or AP is not ready to do WPS\n");
+
+ status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
+
+ goto OnAssocReq23aFail;
+ }
+ }
+ }
+ } else {
+ int copy_len;
+
+ if (psecuritypriv->wpa_psk == 0) {
+ DBG_8723A("STA " MAC_FMT ": WPA/RSN IE in association "
+ "request, but AP don't support WPA/RSN\n", MAC_ARG(pstat->hwaddr));
+
+ status = WLAN_STATUS_INVALID_IE;
+
+ goto OnAssocReq23aFail;
+ }
+
+ if (elems.wps_ie) {
+ DBG_8723A("STA included WPS IE in "
+ "(Re)Association Request - WPS is "
+ "used\n");
+ pstat->flags |= WLAN_STA_WPS;
+ copy_len = 0;
+ } else {
+ copy_len = ((wpa_ie_len+2) > sizeof(pstat->wpa_ie)) ? (sizeof(pstat->wpa_ie)):(wpa_ie_len+2);
+ }
+
+ if (copy_len>0)
+ memcpy(pstat->wpa_ie, wpa_ie-2, copy_len);
+
+ }
+
+ /* check if there is WMM IE & support WWM-PS */
+ pstat->flags &= ~WLAN_STA_WME;
+ pstat->qos_option = 0;
+ pstat->qos_info = 0;
+ pstat->has_legacy_ac = true;
+ pstat->uapsd_vo = 0;
+ pstat->uapsd_vi = 0;
+ pstat->uapsd_be = 0;
+ pstat->uapsd_bk = 0;
+ if (pmlmepriv->qospriv.qos_option)
+ {
+ p = pframe + sizeof(struct ieee80211_hdr_3addr) + ie_offset; ie_len = 0;
+ for (;;)
+ {
+ p = rtw_get_ie23a(p, _VENDOR_SPECIFIC_IE_, &ie_len, pkt_len - sizeof(struct ieee80211_hdr_3addr) - ie_offset);
+ if (p != NULL) {
+ if (!memcmp(p+2, WMM_IE, 6)) {
+
+ pstat->flags |= WLAN_STA_WME;
+
+ pstat->qos_option = 1;
+ pstat->qos_info = *(p+8);
+
+ pstat->max_sp_len = (pstat->qos_info>>5)&0x3;
+
+ if ((pstat->qos_info&0xf) != 0xf)
+ pstat->has_legacy_ac = true;
+ else
+ pstat->has_legacy_ac = false;
+
+ if (pstat->qos_info&0xf)
+ {
+ if (pstat->qos_info&BIT(0))
+ pstat->uapsd_vo = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_vo = 0;
+
+ if (pstat->qos_info&BIT(1))
+ pstat->uapsd_vi = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_vi = 0;
+
+ if (pstat->qos_info&BIT(2))
+ pstat->uapsd_bk = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_bk = 0;
+
+ if (pstat->qos_info&BIT(3))
+ pstat->uapsd_be = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_be = 0;
+
+ }
+
+ break;
+ }
+ }
+ else {
+ break;
+ }
+ p = p + ie_len + 2;
+ }
+ }
+
+ /* save HT capabilities in the sta object */
+ memset(&pstat->htpriv.ht_cap, 0, sizeof(struct ieee80211_ht_cap));
+ if (elems.ht_capabilities && elems.ht_capabilities_len >= sizeof(struct ieee80211_ht_cap))
+ {
+ pstat->flags |= WLAN_STA_HT;
+
+ pstat->flags |= WLAN_STA_WME;
+
+ memcpy(&pstat->htpriv.ht_cap, elems.ht_capabilities, sizeof(struct ieee80211_ht_cap));
+
+ } else
+ pstat->flags &= ~WLAN_STA_HT;
+
+ if ((pmlmepriv->htpriv.ht_option == false) && (pstat->flags&WLAN_STA_HT))
+ {
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto OnAssocReq23aFail;
+ }
+
+ if ((pstat->flags & WLAN_STA_HT) &&
+ ((pstat->wpa2_pairwise_cipher&WPA_CIPHER_TKIP) ||
+ (pstat->wpa_pairwise_cipher&WPA_CIPHER_TKIP)))
+ {
+ DBG_8723A("HT: " MAC_FMT " tried to "
+ "use TKIP with HT association\n", MAC_ARG(pstat->hwaddr));
+
+ /* status = WLAN_STATUS_CIPHER_REJECTED_PER_POLICY; */
+ /* goto OnAssocReq23aFail; */
+ }
+
+ /* */
+ pstat->flags |= WLAN_STA_NONERP;
+ for (i = 0; i < pstat->bssratelen; i++) {
+ if ((pstat->bssrateset[i] & 0x7f) > 22) {
+ pstat->flags &= ~WLAN_STA_NONERP;
+ break;
+ }
+ }
+
+ if (pstat->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ pstat->flags |= WLAN_STA_SHORT_PREAMBLE;
+ else
+ pstat->flags &= ~WLAN_STA_SHORT_PREAMBLE;
+
+ if (status != WLAN_STATUS_SUCCESS)
+ goto OnAssocReq23aFail;
+
+#ifdef CONFIG_8723AU_P2P
+ pstat->is_p2p_device = false;
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ {
+ if ((p2pie = rtw_get_p2p_ie23a(pframe + sizeof(struct ieee80211_hdr_3addr) + ie_offset, pkt_len - sizeof(struct ieee80211_hdr_3addr) - ie_offset, NULL, &p2pielen)))
+ {
+ pstat->is_p2p_device = true;
+ if ((p2p_status_code = (u8)process_assoc_req_p2p_ie23a(pwdinfo, pframe, pkt_len, pstat))>0)
+ {
+ pstat->p2p_status_code = p2p_status_code;
+ status = WLAN_STATUS_CAPS_UNSUPPORTED;
+ goto OnAssocReq23aFail;
+ }
+ }
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_get_wfd_ie(pframe + sizeof(struct ieee80211_hdr_3addr) + ie_offset, pkt_len - sizeof(struct ieee80211_hdr_3addr) - ie_offset, wfd_ie, &wfd_ielen))
+ {
+ u8 attr_content[ 10 ] = { 0x00 };
+ u32 attr_contentlen = 0;
+
+ DBG_8723A("[%s] WFD IE Found!!\n", __func__);
+ rtw_get_wfd_attr_content(wfd_ie, wfd_ielen, WFD_ATTR_DEVICE_INFO, attr_content, &attr_contentlen);
+ if (attr_contentlen)
+ {
+ pwdinfo->wfd_info->peer_rtsp_ctrlport = get_unaligned_be16(attr_content + 2);
+ DBG_8723A("[%s] Peer PORT NUM = %d\n", __func__, pwdinfo->wfd_info->peer_rtsp_ctrlport);
+ }
+ }
+#endif
+ }
+ pstat->p2p_status_code = p2p_status_code;
+#endif /* CONFIG_8723AU_P2P */
+
+ /* TODO: identify_proprietary_vendor_ie(); */
+ /* Realtek proprietary IE */
+ /* identify if this is Broadcom sta */
+ /* identify if this is ralink sta */
+ /* Customer proprietary IE */
+
+ /* get a unique AID */
+ if (pstat->aid > 0) {
+ DBG_8723A(" old AID %d\n", pstat->aid);
+ } else {
+ for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
+ if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
+ break;
+
+ if (pstat->aid > NUM_STA)
+ pstat->aid = NUM_STA;
+ if (pstat->aid > pstapriv->max_num_sta) {
+
+ pstat->aid = 0;
+
+ DBG_8723A(" no room for more AIDs\n");
+
+ status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
+
+ goto OnAssocReq23aFail;
+
+ } else {
+ pstapriv->sta_aid[pstat->aid - 1] = pstat;
+ DBG_8723A("allocate new AID = (%d)\n", pstat->aid);
+ }
+ }
+
+ pstat->state &= (~WIFI_FW_ASSOC_STATE);
+ pstat->state |= WIFI_FW_ASSOC_SUCCESS;
+
+ spin_lock_bh(&pstapriv->auth_list_lock);
+ if (!list_empty(&pstat->auth_list)) {
+ list_del_init(&pstat->auth_list);
+ pstapriv->auth_list_cnt--;
+ }
+ spin_unlock_bh(&pstapriv->auth_list_lock);
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ if (list_empty(&pstat->asoc_list)) {
+ pstat->expire_to = pstapriv->expire_to;
+ list_add_tail(&pstat->asoc_list, &pstapriv->asoc_list);
+ pstapriv->asoc_list_cnt++;
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ /* now the station is qualified to join our BSS... */
+ if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) &&
+ (WLAN_STATUS_SUCCESS == status)) {
+#ifdef CONFIG_8723AU_AP_MODE
+ /* 1 bss_cap_update & sta_info_update23a */
+ bss_cap_update_on_sta_join23a(padapter, pstat);
+ sta_info_update23a(padapter, pstat);
+
+ /* issue assoc rsp before notify station join event. */
+ if (ieee80211_is_assoc_req(frame_control))
+ issue_asocrsp23a(padapter, status, pstat, WIFI_ASSOCRSP);
+ else
+ issue_asocrsp23a(padapter, status, pstat, WIFI_REASSOCRSP);
+
+ /* 2 - report to upper layer */
+ DBG_8723A("indicate_sta_join_event to upper layer - hostapd\n");
+ rtw_cfg80211_indicate_sta_assoc(padapter, pframe, pkt_len);
+
+ /* 3-(1) report sta add event */
+ report_add_sta_event23a(padapter, pstat->hwaddr, pstat->aid);
+#endif
+ }
+
+ return _SUCCESS;
+
+asoc_class2_error:
+
+#ifdef CONFIG_8723AU_AP_MODE
+ issue_deauth23a(padapter, hdr->addr2, status);
+#endif
+
+ return _FAIL;
+
+OnAssocReq23aFail:
+
+#ifdef CONFIG_8723AU_AP_MODE
+ pstat->aid = 0;
+ if (ieee80211_is_assoc_req(frame_control))
+ issue_asocrsp23a(padapter, status, pstat, WIFI_ASSOCRSP);
+ else
+ issue_asocrsp23a(padapter, status, pstat, WIFI_REASSOCRSP);
+#endif
+
+#endif /* CONFIG_8723AU_AP_MODE */
+
+ return _FAIL;
+}
+
+unsigned int OnAssocRsp23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
+{
+ uint i;
+ int res;
+ unsigned short status;
+ struct ndis_802_11_var_ies *pIE;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *pframe = skb->data;
+ uint pkt_len = skb->len;
+
+ DBG_8723A("%s\n", __func__);
+
+ /* check A1 matches or not */
+ if (!ether_addr_equal(myid(&padapter->eeprompriv),
+ ieee80211_get_DA(hdr)))
+ return _SUCCESS;
+
+ if (!(pmlmeinfo->state & (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE)))
+ return _SUCCESS;
+
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
+ return _SUCCESS;
+
+ del_timer_sync(&pmlmeext->link_timer);
+
+ /* status */
+ if ((status = le16_to_cpu(*(unsigned short *)(pframe + sizeof(struct ieee80211_hdr_3addr) + 2))) > 0)
+ {
+ DBG_8723A("assoc reject, status code: %d\n", status);
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ res = -4;
+ goto report_assoc_result;
+ }
+
+ /* get capabilities */
+ pmlmeinfo->capability = le16_to_cpu(*(unsigned short *)(pframe + sizeof(struct ieee80211_hdr_3addr)));
+
+ /* set slot time */
+ pmlmeinfo->slotTime = (pmlmeinfo->capability & BIT(10))? 9: 20;
+
+ /* AID */
+ res = pmlmeinfo->aid = (int)(le16_to_cpu(*(unsigned short *)(pframe + sizeof(struct ieee80211_hdr_3addr) + 4))&0x3fff);
+
+ /* following are moved to join event callback function */
+ /* to handle HT, WMM, rate adaptive, update MAC reg */
+ /* for not to handle the synchronous IO in the tasklet */
+ for (i = (6 + sizeof(struct ieee80211_hdr_3addr)); i < pkt_len;) {
+ pIE = (struct ndis_802_11_var_ies *)(pframe + i);
+
+ switch (pIE->ElementID)
+ {
+ case _VENDOR_SPECIFIC_IE_:
+ if (!memcmp(pIE->data, WMM_PARA_OUI23A, 6))/* WMM */
+ WMM_param_handler23a(padapter, pIE);
+#if defined(CONFIG_8723AU_P2P)
+ else if (!memcmp(pIE->data, WFD_OUI23A, 4)) { /* WFD */
+ DBG_8723A("[%s] Found WFD IE\n", __func__);
+ WFD_info_handler(padapter, pIE);
+ }
+#endif
+ break;
+
+ case _HT_CAPABILITY_IE_: /* HT caps */
+ HT_caps_handler23a(padapter, pIE);
+ break;
+
+ case _HT_EXTRA_INFO_IE_: /* HT info */
+ HT_info_handler23a(padapter, pIE);
+ break;
+
+ case _ERPINFO_IE_:
+ ERP_IE_handler23a(padapter, pIE);
+
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ pmlmeinfo->state &= (~WIFI_FW_ASSOC_STATE);
+ pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
+
+ /* Update Basic Rate Table for spec, 2010-12-28 , by thomas */
+ UpdateBrateTbl23a(padapter, pmlmeinfo->network.SupportedRates);
+
+report_assoc_result:
+ pmlmepriv->assoc_rsp_len = 0;
+ if (res > 0) {
+ kfree(pmlmepriv->assoc_rsp);
+ pmlmepriv->assoc_rsp = kmalloc(pkt_len, GFP_ATOMIC);
+ if (pmlmepriv->assoc_rsp) {
+ memcpy(pmlmepriv->assoc_rsp, pframe, pkt_len);
+ pmlmepriv->assoc_rsp_len = pkt_len;
+ }
+ } else
+ kfree(pmlmepriv->assoc_rsp);
+
+ report_join_res23a(padapter, res);
+
+ return _SUCCESS;
+}
+
+unsigned int OnDeAuth23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ unsigned short reason;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *pframe = skb->data;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_8723AU_P2P */
+
+ /* check A3 */
+ if (!ether_addr_equal(hdr->addr3, get_my_bssid23a(&pmlmeinfo->network)))
+ return _SUCCESS;
+
+#ifdef CONFIG_8723AU_P2P
+ if (pwdinfo->rx_invitereq_info.scan_op_ch_only) {
+ mod_timer(&pwdinfo->reset_ch_sitesurvey,
+ jiffies + msecs_to_jiffies(10));
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ reason = le16_to_cpu(*(unsigned short *)(pframe + sizeof(struct ieee80211_hdr_3addr)));
+
+ DBG_8723A("%s Reason code(%d)\n", __func__, reason);
+
+#ifdef CONFIG_8723AU_AP_MODE
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_8723A_LEVEL(_drv_always_, "ap recv deauth reason code(%d) "
+ "sta:%pM\n", reason, hdr->addr2);
+
+ psta = rtw_get_stainfo23a(pstapriv, hdr->addr2);
+ if (psta) {
+ u8 updated = 0;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ if (!list_empty(&psta->asoc_list)) {
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta23a(padapter, psta,
+ false, reason);
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ associated_clients_update23a(padapter, updated);
+ }
+
+ return _SUCCESS;
+ }
+ else
+#endif
+ {
+ DBG_8723A_LEVEL(_drv_always_, "sta recv deauth reason code(%d) "
+ "sta:%pM\n", reason, hdr->addr3);
+
+ receive_disconnect23a(padapter, hdr->addr3, reason);
+ }
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
+ return _SUCCESS;
+}
+
+unsigned int OnDisassoc23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
+{
+ unsigned short reason;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *pframe = skb->data;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_8723AU_P2P */
+
+ /* check A3 */
+ if (!ether_addr_equal(hdr->addr3, get_my_bssid23a(&pmlmeinfo->network)))
+ return _SUCCESS;
+
+#ifdef CONFIG_8723AU_P2P
+ if (pwdinfo->rx_invitereq_info.scan_op_ch_only)
+ {
+ mod_timer(&pwdinfo->reset_ch_sitesurvey,
+ jiffies + msecs_to_jiffies(10));
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ reason = le16_to_cpu(*(unsigned short *)
+ (pframe + sizeof(struct ieee80211_hdr_3addr)));
+
+ DBG_8723A("%s Reason code(%d)\n", __func__, reason);
+
+#ifdef CONFIG_8723AU_AP_MODE
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_8723A_LEVEL(_drv_always_, "ap recv disassoc reason code(%d)"
+ " sta:%pM\n", reason, hdr->addr2);
+
+ psta = rtw_get_stainfo23a(pstapriv, hdr->addr2);
+ if (psta) {
+ u8 updated = 0;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ if (!list_empty(&psta->asoc_list)) {
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta23a(padapter, psta,
+ false, reason);
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ associated_clients_update23a(padapter, updated);
+ }
+
+ return _SUCCESS;
+ }
+ else
+#endif
+ {
+ DBG_8723A_LEVEL(_drv_always_, "ap recv disassoc reason "
+ "code(%d) sta:%pM\n", reason, hdr->addr3);
+
+ receive_disconnect23a(padapter, hdr->addr3, reason);
+ }
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
+ return _SUCCESS;
+}
+
+unsigned int OnAtim23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
+{
+ DBG_8723A("%s\n", __func__);
+ return _SUCCESS;
+}
+
+unsigned int on_action_spct23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
+{
+ return _FAIL;
+}
+
+unsigned int OnAction23a_qos(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+unsigned int OnAction23a_dls(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+unsigned int OnAction23a_back23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
+{
+ u8 *addr;
+ struct sta_info *psta = NULL;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ unsigned char *frame_body;
+ unsigned char category, action;
+ unsigned short tid, status, reason_code = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *pframe = skb->data;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* check RA matches or not */
+ if (!ether_addr_equal(myid(&padapter->eeprompriv), hdr->addr1))
+ return _SUCCESS;
+
+ DBG_8723A("%s\n", __func__);
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
+ return _SUCCESS;
+
+ addr = hdr->addr2;
+ psta = rtw_get_stainfo23a(pstapriv, addr);
+
+ if (!psta)
+ return _SUCCESS;
+
+ frame_body = (unsigned char *)
+ (pframe + sizeof(struct ieee80211_hdr_3addr));
+
+ category = frame_body[0];
+ if (category == WLAN_CATEGORY_BACK) { /* representing Block Ack */
+ if (!pmlmeinfo->HT_enable)
+ return _SUCCESS;
+ action = frame_body[1];
+ DBG_8723A("%s, action =%d\n", __func__, action);
+ switch (action) {
+ case WLAN_ACTION_ADDBA_REQ: /* ADDBA request */
+ memcpy(&pmlmeinfo->ADDBA_req, &frame_body[2],
+ sizeof(struct ADDBA_request));
+ process_addba_req23a(padapter,
+ (u8 *)&pmlmeinfo->ADDBA_req, addr);
+ if (pmlmeinfo->bAcceptAddbaReq == true)
+ issue_action_BA23a(padapter, addr,
+ WLAN_ACTION_ADDBA_RESP, 0);
+ else {
+ /* reject ADDBA Req */
+ issue_action_BA23a(padapter, addr,
+ WLAN_ACTION_ADDBA_RESP, 37);
+ }
+ break;
+ case WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
+ status = get_unaligned_le16(&frame_body[3]);
+ tid = ((frame_body[5] >> 2) & 0x7);
+ if (status == 0) { /* successful */
+ DBG_8723A("agg_enable for TID =%d\n", tid);
+ psta->htpriv.agg_enable_bitmap |= 1 << tid;
+ psta->htpriv.candidate_tid_bitmap &=
+ ~CHKBIT(tid);
+ } else
+ psta->htpriv.agg_enable_bitmap &= ~CHKBIT(tid);
+ break;
+
+ case WLAN_ACTION_DELBA: /* DELBA */
+ if ((frame_body[3] & BIT(3)) == 0) {
+ psta->htpriv.agg_enable_bitmap &=
+ ~(1 << ((frame_body[3] >> 4) & 0xf));
+ psta->htpriv.candidate_tid_bitmap &=
+ ~(1 << ((frame_body[3] >> 4) & 0xf));
+
+ /* reason_code = frame_body[4] | (frame_body[5] << 8); */
+ reason_code = get_unaligned_le16(&frame_body[4]);
+ } else if ((frame_body[3] & BIT(3)) == BIT(3)) {
+ tid = (frame_body[3] >> 4) & 0x0F;
+
+ preorder_ctrl = &psta->recvreorder_ctrl[tid];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
+ }
+
+ DBG_8723A("%s(): DELBA: %x(%x)\n", __func__,
+ pmlmeinfo->agg_enable_bitmap, reason_code);
+ /* todo: how to notify the host while receiving
+ DELETE BA */
+ break;
+ default:
+ break;
+ }
+ }
+ return _SUCCESS;
+}
+
+#ifdef CONFIG_8723AU_P2P
+
+static int get_reg_classes_full_count(struct p2p_channels channel_list) {
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < channel_list.reg_classes; i++)
+ cnt += channel_list.reg_class[i].channels;
+
+ return cnt;
+}
+
+void issue_p2p_GO_request23a(struct rtw_adapter *padapter, u8* raddr)
+{
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_GO_NEGO_REQ;
+ u8 wpsie[ 255 ] = { 0x00 }, p2pie[ 255 ] = { 0x00 };
+ u8 wpsielen = 0, p2pielen = 0;
+ u16 len_channellist_attr = 0;
+#ifdef CONFIG_8723AU_P2P
+ u32 wfdielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ return;
+
+ DBG_8723A("[%s] In\n", __func__);
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ ether_addr_copy(pwlanhdr->addr1, raddr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, myid(&padapter->eeprompriv));
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &action, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 4, (unsigned char *)&p2poui,
+ &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &oui_subtype, &pattrib->pktlen);
+ pwdinfo->negotiation_dialog_token = 1; /*Initialize the dialog value*/
+ pframe = rtw_set_fixed_ie23a(pframe, 1,
+ &pwdinfo->negotiation_dialog_token,
+ &pattrib->pktlen);
+
+ /* WPS Section */
+ wpsielen = 0;
+ /* WPS OUI */
+ *(u32*) (wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* Device Password ID */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_PWID);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+
+ if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PEER_DISPLAY_PIN)
+ {
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_DPID_USER_SPEC);
+ }
+ else if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_SELF_DISPLAY_PIN)
+ {
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_DPID_REGISTRAR_SPEC);
+ }
+ else if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PBC)
+ {
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_DPID_PBC);
+ }
+
+ wpsielen += 2;
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *) wpsie, &pattrib->pktlen);
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110306 */
+ /* According to the P2P Specification, the group negoitation request frame should contain 9 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Group Owner Intent */
+ /* 3. Configuration Timeout */
+ /* 4. Listen Channel */
+ /* 5. Extended Listen Timing */
+ /* 6. Intended P2P Interface Address */
+ /* 7. Channel List */
+ /* 8. P2P Device Info */
+ /* 9. Operating Channel */
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ {
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN | P2P_GRPCAP_PERSISTENT_GROUP;
+ }
+ else
+ {
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN;
+ }
+
+ /* Group Owner Intent */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GO_INTENT;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Todo the tie breaker bit. */
+ p2pie[p2pielen++] = ((pwdinfo->intent << 1) | BIT(0));
+
+ /* Configuration Timeout */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
+
+ /* Listen Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_LISTEN_CH;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->listen_channel; /* listening channel number */
+
+ /* Extended Listen Timing ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0004);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Availability Period */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Availability Interval */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Intended P2P Interface Address */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_INTENTED_IF_ADDR;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Length: */
+ /* Country String(3) */
+ /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
+ /* + number of channels in all classes */
+ len_channellist_attr = 3
+ + (1 + 1) * (u16)(pmlmeext->channel_list.reg_classes)
+ + get_reg_classes_full_count(pmlmeext->channel_list);
+
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Channel Entry List */
+
+ {
+ int i, j;
+ for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
+ /* Operating Class */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
+
+ /* Number of Channels */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
+
+ /* Channel List */
+ for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
+ }
+ }
+ }
+
+ /* Device Info */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
+
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ *(u32*) (p2pie + p2pielen) = cpu_to_be32(WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name,
+ pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ if (pwdinfo->operating_channel <= 14)
+ {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+ }
+ else if ((pwdinfo->operating_channel >= 36) && (pwdinfo->operating_channel <= 48))
+ {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x73;
+ }
+ else
+ {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x7c;
+ }
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *) p2pie, &pattrib->pktlen);
+
+#ifdef CONFIG_8723AU_P2P
+ wfdielen = build_nego_req_wfd_ie(pwdinfo, pframe);
+ pframe += wfdielen;
+ pattrib->pktlen += wfdielen;
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+
+ return;
+}
+
+static void issue_p2p_GO_response(struct rtw_adapter *padapter, u8* raddr, u8* frame_body, uint len, u8 result)
+{
+
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_GO_NEGO_RESP;
+ u8 wpsie[255] = { 0x00 }, p2pie[255] = { 0x00 };
+ u8 p2pielen = 0;
+ uint wpsielen = 0;
+ u16 wps_devicepassword_id = 0x0000;
+ uint wps_devicepassword_id_len = 0;
+ u16 len_channellist_attr = 0;
+ int i, j;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#ifdef CONFIG_8723AU_P2P
+ u32 wfdielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ return;
+
+ DBG_8723A("[%s] In, result = %d\n", __func__, result);
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ ether_addr_copy(pwlanhdr->addr1, raddr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, myid(&padapter->eeprompriv));
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &action, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 4, (unsigned char *) &p2poui,
+ &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &oui_subtype, &pattrib->pktlen);
+ /* The Dialog Token of provisioning discovery request frame. */
+ pwdinfo->negotiation_dialog_token = frame_body[7];
+ pframe = rtw_set_fixed_ie23a(pframe, 1,
+ &pwdinfo->negotiation_dialog_token,
+ &pattrib->pktlen);
+
+ /* Commented by Albert 20110328 */
+ /* Try to get the device password ID from the WPS IE of group
+ negotiation request frame */
+ /* WiFi Direct test plan 5.1.15 */
+ rtw_get_wps_ie23a(frame_body + _PUBLIC_ACTION_IE_OFFSET_,
+ len - _PUBLIC_ACTION_IE_OFFSET_, wpsie, &wpsielen);
+ rtw_get_wps_attr_content23a(wpsie, wpsielen, WPS_ATTR_DEVICE_PWID,
+ (u8 *)&wps_devicepassword_id,
+ &wps_devicepassword_id_len);
+ wps_devicepassword_id = be16_to_cpu(wps_devicepassword_id);
+
+ memset(wpsie, 0x00, 255);
+ wpsielen = 0;
+
+ /* WPS Section */
+ wpsielen = 0;
+ /* WPS OUI */
+ *(u32*) (wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* Device Password ID */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_PWID);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ if (wps_devicepassword_id == WPS_DPID_USER_SPEC) {
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(WPS_DPID_REGISTRAR_SPEC);
+ } else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC) {
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_DPID_USER_SPEC);
+ } else {
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_DPID_PBC);
+ }
+ wpsielen += 2;
+
+ /* Commented by Kurt 20120113 */
+ /* If some device wants to do p2p handshake without sending prov_disc_req */
+ /* We have to get peer_req_cm from here. */
+ if (!memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
+ if (wps_devicepassword_id == WPS_DPID_USER_SPEC) {
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
+ } else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC) {
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pad", 3);
+ } else {
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pbc", 3);
+ }
+ }
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, wpsielen,
+ (unsigned char *) wpsie, &pattrib->pktlen);
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20100908 */
+ /* According to the P2P Specification, the group negoitation
+ response frame should contain 9 P2P attributes */
+ /* 1. Status */
+ /* 2. P2P Capability */
+ /* 3. Group Owner Intent */
+ /* 4. Configuration Timeout */
+ /* 5. Operating Channel */
+ /* 6. Intended P2P Interface Address */
+ /* 7. Channel List */
+ /* 8. Device Info */
+ /* 9. Group ID (Only GO) */
+
+ /* ToDo: */
+
+ /* P2P Status */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_STATUS;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = result;
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ /* Commented by Albert 2011/03/08 */
+ /* According to the P2P specification */
+ /* if the sending device will be client, the P2P
+ Capability should be reserved of group negotation
+ response frame */
+ p2pie[p2pielen++] = 0;
+ } else {
+ /* Be group owner or meet the error case */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+ }
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported) {
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN |
+ P2P_GRPCAP_PERSISTENT_GROUP;
+ } else {
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN;
+ }
+
+ /* Group Owner Intent */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GO_INTENT;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ if (pwdinfo->peer_intent & 0x01) {
+ /* Peer's tie breaker bit is 1, our tie breaker
+ bit should be 0 */
+ p2pie[p2pielen++] = (pwdinfo->intent << 1);
+ } else {
+ /* Peer's tie breaker bit is 0, our tie breaker bit
+ should be 1 */
+ p2pie[p2pielen++] = ((pwdinfo->intent << 1) | BIT(0));
+ }
+
+ /* Configuration Timeout */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* 2 seconds needed to be the P2P GO */
+ p2pie[p2pielen++] = 200;
+ /* 2 seconds needed to be the P2P Client */
+ p2pie[p2pielen++] = 200;
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ if (pwdinfo->operating_channel <= 14) {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+ } else if ((pwdinfo->operating_channel >= 36) &&
+ (pwdinfo->operating_channel <= 48)) {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x73;
+ } else {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x7c;
+ }
+
+ /* Channel Number */
+ /* operating channel number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel;
+
+ /* Intended P2P Interface Address */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_INTENTED_IF_ADDR;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Country String(3) */
+ /* + (Operating Class (1) + Number of Channels(1)) *
+ Operation Classes (?) */
+ /* + number of channels in all classes */
+ len_channellist_attr = 3 +
+ (1 + 1) * (u16)pmlmeext->channel_list.reg_classes +
+ get_reg_classes_full_count(pmlmeext->channel_list);
+
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
+
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Channel Entry List */
+
+ for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
+ /* Operating Class */
+ p2pie[p2pielen++] =
+ pmlmeext->channel_list.reg_class[j].reg_class;
+
+ /* Number of Channels */
+ p2pie[p2pielen++] =
+ pmlmeext->channel_list.reg_class[j].channels;
+
+ /* Channel List */
+ for (i = 0;
+ i < pmlmeext->channel_list.reg_class[j].channels; i++) {
+ p2pie[p2pielen++] =
+ pmlmeext->channel_list.reg_class[j].channel[i];
+ }
+ }
+
+ /* Device Info */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) +
+ Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field
+ (2bytes) + WPS Device Name Len field (2bytes) */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
+
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ *(u32*) (p2pie + p2pielen) = cpu_to_be32(WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name,
+ pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ {
+ /* Group ID Attribute */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) =
+ cpu_to_le16(ETH_ALEN + pwdinfo->nego_ssidlen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* p2P Device Address */
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* SSID */
+ memcpy(p2pie + p2pielen, pwdinfo->nego_ssid,
+ pwdinfo->nego_ssidlen);
+ p2pielen += pwdinfo->nego_ssidlen;
+
+ }
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, p2pielen,
+ (unsigned char *) p2pie, &pattrib->pktlen);
+
+#ifdef CONFIG_8723AU_P2P
+ wfdielen = build_nego_resp_wfd_ie(pwdinfo, pframe);
+ pframe += wfdielen;
+ pattrib->pktlen += wfdielen;
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+
+ return;
+}
+
+static void issue_p2p_GO_confirm(struct rtw_adapter *padapter, u8* raddr,
+ u8 result)
+{
+
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_GO_NEGO_CONF;
+ u8 p2pie[ 255 ] = { 0x00 };
+ u8 p2pielen = 0;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#ifdef CONFIG_8723AU_P2P
+ u32 wfdielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ return;
+
+ DBG_8723A("[%s] In\n", __func__);
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ ether_addr_copy(pwlanhdr->addr1, raddr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, myid(&padapter->eeprompriv));
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &action, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 4, (unsigned char *)&p2poui,
+ &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &oui_subtype, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1,
+ &pwdinfo->negotiation_dialog_token,
+ &pattrib->pktlen);
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110306 */
+ /* According to the P2P Specification, the group negoitation
+ request frame should contain 5 P2P attributes */
+ /* 1. Status */
+ /* 2. P2P Capability */
+ /* 3. Operating Channel */
+ /* 4. Channel List */
+ /* 5. Group ID (if this WiFi is GO) */
+
+ /* P2P Status */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_STATUS;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = result;
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported) {
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN |
+ P2P_GRPCAP_PERSISTENT_GROUP;
+ } else {
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN;
+ }
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ if (pwdinfo->peer_operating_ch <= 14) {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+ } else if ((pwdinfo->peer_operating_ch >= 36) &&
+ (pwdinfo->peer_operating_ch <= 48)) {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x73;
+ } else {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x7c;
+ }
+
+ p2pie[p2pielen++] = pwdinfo->peer_operating_ch;
+ } else {
+ if (pwdinfo->operating_channel <= 14) {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+ }
+ else if ((pwdinfo->operating_channel >= 36) &&
+ (pwdinfo->operating_channel <= 48)) {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x73;
+ } else {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x7c;
+ }
+
+ /* Channel Number */
+ /* Use the listen channel as the operating channel */
+ p2pie[p2pielen++] = pwdinfo->operating_channel;
+ }
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) =
+ cpu_to_le16(pwdinfo->channel_list_attr_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->channel_list_attr,
+ pwdinfo->channel_list_attr_len);
+ p2pielen += pwdinfo->channel_list_attr_len;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* Group ID Attribute */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) =
+ cpu_to_le16(ETH_ALEN + pwdinfo->nego_ssidlen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* p2P Device Address */
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* SSID */
+ memcpy(p2pie + p2pielen, pwdinfo->nego_ssid,
+ pwdinfo->nego_ssidlen);
+ p2pielen += pwdinfo->nego_ssidlen;
+ }
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, p2pielen,
+ (unsigned char *)p2pie, &pattrib->pktlen);
+
+#ifdef CONFIG_8723AU_P2P
+ wfdielen = build_nego_confirm_wfd_ie(pwdinfo, pframe);
+ pframe += wfdielen;
+ pattrib->pktlen += wfdielen;
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+
+ return;
+}
+
+void issue_p2p_invitation_request23a(struct rtw_adapter *padapter, u8* raddr)
+{
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_INVIT_REQ;
+ u8 p2pie[ 255 ] = { 0x00 };
+ u8 p2pielen = 0;
+ u8 dialogToken = 3;
+ u16 len_channellist_attr = 0;
+#ifdef CONFIG_8723AU_P2P
+ u32 wfdielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+ int i, j;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ ether_addr_copy(pwlanhdr->addr1, raddr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, raddr);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &action, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 4, (unsigned char *) &p2poui,
+ &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &oui_subtype, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &dialogToken, &pattrib->pktlen);
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20101011 */
+ /* According to the P2P Specification, the P2P Invitation
+ request frame should contain 7 P2P attributes */
+ /* 1. Configuration Timeout */
+ /* 2. Invitation Flags */
+ /* 3. Operating Channel (Only GO) */
+ /* 4. P2P Group BSSID (Should be included if I am the GO) */
+ /* 5. Channel List */
+ /* 6. P2P Group ID */
+ /* 7. P2P Device Info */
+
+ /* Configuration Timeout */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* 2 seconds needed to be the P2P GO */
+ p2pie[p2pielen++] = 200;
+ /* 2 seconds needed to be the P2P Client */
+ p2pie[p2pielen++] = 200;
+
+ /* Invitation Flags */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_INVITATION_FLAGS;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = P2P_INVITATION_FLAGS_PERSISTENT;
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ if (pwdinfo->invitereq_info.operating_ch <= 14)
+ p2pie[p2pielen++] = 0x51;
+ else if ((pwdinfo->invitereq_info.operating_ch >= 36) &&
+ (pwdinfo->invitereq_info.operating_ch <= 48))
+ p2pie[p2pielen++] = 0x73;
+ else
+ p2pie[p2pielen++] = 0x7c;
+
+ /* Channel Number */
+ /* operating channel number */
+ p2pie[p2pielen++] = pwdinfo->invitereq_info.operating_ch;
+
+ if (ether_addr_equal(myid(&padapter->eeprompriv),
+ pwdinfo->invitereq_info.go_bssid)) {
+ /* P2P Group BSSID */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_BSSID;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address for GO */
+ memcpy(p2pie + p2pielen, pwdinfo->invitereq_info.go_bssid,
+ ETH_ALEN);
+ p2pielen += ETH_ALEN;
+ }
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Length: */
+ /* Country String(3) */
+ /* + (Operating Class (1) + Number of Channels(1)) *
+ Operation Classes (?) */
+ /* + number of channels in all classes */
+ len_channellist_attr = 3 +
+ (1 + 1) * (u16)pmlmeext->channel_list.reg_classes +
+ get_reg_classes_full_count(pmlmeext->channel_list);
+
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Channel Entry List */
+ for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
+ /* Operating Class */
+ p2pie[p2pielen++] =
+ pmlmeext->channel_list.reg_class[j].reg_class;
+
+ /* Number of Channels */
+ p2pie[p2pielen++] =
+ pmlmeext->channel_list.reg_class[j].channels;
+
+ /* Channel List */
+ for (i = 0;
+ i < pmlmeext->channel_list.reg_class[j].channels; i++) {
+ p2pie[p2pielen++] =
+ pmlmeext->channel_list.reg_class[j].channel[i];
+ }
+ }
+
+ /* P2P Group ID */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) =
+ cpu_to_le16(6 + pwdinfo->invitereq_info.ssidlen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address for GO */
+ memcpy(p2pie + p2pielen, pwdinfo->invitereq_info.go_bssid, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* SSID */
+ memcpy(p2pie + p2pielen, pwdinfo->invitereq_info.go_ssid,
+ pwdinfo->invitereq_info.ssidlen);
+ p2pielen += pwdinfo->invitereq_info.ssidlen;
+
+ /* Device Info */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) +
+ Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field
+ (2bytes) + WPS Device Name Len field (2bytes) */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_DISPLAY);
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ *(u32*) (p2pie + p2pielen) = cpu_to_be32(WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name,
+ pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, p2pielen,
+ (unsigned char *) p2pie, &pattrib->pktlen);
+
+#ifdef CONFIG_8723AU_P2P
+ wfdielen = build_invitation_req_wfd_ie(pwdinfo, pframe);
+ pframe += wfdielen;
+ pattrib->pktlen += wfdielen;
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+
+ return;
+}
+
+void issue_p2p_invitation_response23a(struct rtw_adapter *padapter, u8 *raddr,
+ u8 dialogToken, u8 status_code)
+{
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_INVIT_RESP;
+ u8 p2pie[ 255 ] = { 0x00 };
+ u8 p2pielen = 0;
+ u16 len_channellist_attr = 0;
+#ifdef CONFIG_8723AU_P2P
+ u32 wfdielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+ int i, j;
+
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ ether_addr_copy(pwlanhdr->addr1, raddr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, raddr);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &action, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 4, (unsigned char *)&p2poui,
+ &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &oui_subtype, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &dialogToken, &pattrib->pktlen);
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20101005 */
+ /* According to the P2P Specification, the P2P Invitation
+ response frame should contain 5 P2P attributes */
+ /* 1. Status */
+ /* 2. Configuration Timeout */
+ /* 3. Operating Channel (Only GO) */
+ /* 4. P2P Group BSSID (Only GO) */
+ /* 5. Channel List */
+
+ /* P2P Status */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_STATUS;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ /* When status code is P2P_STATUS_FAIL_INFO_UNAVAILABLE. */
+ /* Sent the event receiving the P2P Invitation Req frame
+ to DMP UI. */
+ /* DMP had to compare the MAC address to find out the profile. */
+ /* So, the WiFi driver will send the
+ P2P_STATUS_FAIL_INFO_UNAVAILABLE to NB. */
+ /* If the UI found the corresponding profile, the WiFi driver
+ sends the P2P Invitation Req */
+ /* to NB to rebuild the persistent group. */
+ p2pie[p2pielen++] = status_code;
+
+ /* Configuration Timeout */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* 2 seconds needed to be the P2P GO */
+ p2pie[p2pielen++] = 200;
+ /* 2 seconds needed to be the P2P Client */
+ p2pie[p2pielen++] = 200;
+
+ if (status_code == P2P_STATUS_SUCCESS) {
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* The P2P Invitation request frame asks this
+ Wi-Fi device to be the P2P GO */
+ /* In this case, the P2P Invitation response
+ frame should carry the two more P2P attributes. */
+ /* First one is operating channel attribute. */
+ /* Second one is P2P Group BSSID attribute. */
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute"
+ section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ /* Copy from SD7 */
+ p2pie[p2pielen++] = 0x51;
+
+ /* Channel Number */
+ /* operating channel number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel;
+
+ /* P2P Group BSSID */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_BSSID;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address for GO */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv),
+ ETH_ALEN);
+ p2pielen += ETH_ALEN;
+ }
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Length: */
+ /* Country String(3) */
+ /* + (Operating Class (1) + Number of Channels(1)) *
+ Operation Classes (?) */
+ /* + number of channels in all classes */
+ len_channellist_attr = 3 +
+ (1 + 1) * (u16)pmlmeext->channel_list.reg_classes +
+ get_reg_classes_full_count(pmlmeext->channel_list);
+
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Channel Entry List */
+ for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
+ /* Operating Class */
+ p2pie[p2pielen++] =
+ pmlmeext->channel_list.reg_class[j].reg_class;
+
+ /* Number of Channels */
+ p2pie[p2pielen++] =
+ pmlmeext->channel_list.reg_class[j].channels;
+
+ /* Channel List */
+ for (i = 0;
+ i < pmlmeext->channel_list.reg_class[j].channels;
+ i++) {
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
+ }
+ }
+ }
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, p2pielen,
+ (unsigned char *)p2pie, &pattrib->pktlen);
+
+#ifdef CONFIG_8723AU_P2P
+ wfdielen = build_invitation_resp_wfd_ie(pwdinfo, pframe);
+ pframe += wfdielen;
+ pattrib->pktlen += wfdielen;
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+
+ return;
+}
+
+void issue_p2p_provision_request23a(struct rtw_adapter *padapter, u8 *pssid,
+ u8 ussidlen, u8 *pdev_raddr)
+{
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u8 dialogToken = 1;
+ u32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_PROVISION_DISC_REQ;
+ u8 wpsie[100] = { 0x00 };
+ u8 wpsielen = 0;
+ u32 p2pielen = 0;
+#ifdef CONFIG_8723AU_P2P
+ u32 wfdielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ return;
+
+ DBG_8723A("[%s] In\n", __func__);
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ ether_addr_copy(pwlanhdr->addr1, pdev_raddr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, pdev_raddr);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &action, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 4, (unsigned char *)&p2poui,
+ &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &oui_subtype, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &dialogToken, &pattrib->pktlen);
+
+ p2pielen = build_prov_disc_request_p2p_ie23a(pwdinfo, pframe, pssid,
+ ussidlen, pdev_raddr);
+
+ pframe += p2pielen;
+ pattrib->pktlen += p2pielen;
+
+ wpsielen = 0;
+ /* WPS OUI */
+ *(u32*) (wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* Config Method */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(pwdinfo->tx_prov_disc_info.wps_config_method_request);
+ wpsielen += 2;
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, wpsielen,
+ (unsigned char *) wpsie, &pattrib->pktlen);
+
+#ifdef CONFIG_8723AU_P2P
+ wfdielen = build_provdisc_req_wfd_ie(pwdinfo, pframe);
+ pframe += wfdielen;
+ pattrib->pktlen += wfdielen;
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+
+ return;
+}
+
+static u8 is_matched_in_profilelist(u8 *peermacaddr,
+ struct profile_info *profileinfo)
+{
+ u8 i, match_result = 0;
+
+ DBG_8723A("[%s] peermac = %.2X %.2X %.2X %.2X %.2X %.2X\n", __func__,
+ peermacaddr[0], peermacaddr[1], peermacaddr[2],
+ peermacaddr[3], peermacaddr[4], peermacaddr[5]);
+
+ for (i = 0; i < P2P_MAX_PERSISTENT_GROUP_NUM; i++, profileinfo++) {
+ DBG_8723A("[%s] profileinfo_mac = %.2X %.2X %.2X %.2X %.2X "
+ "%.2X\n", __func__, profileinfo->peermac[0],
+ profileinfo->peermac[1], profileinfo->peermac[2],
+ profileinfo->peermac[3], profileinfo->peermac[4],
+ profileinfo->peermac[5]);
+ if (ether_addr_equal(peermacaddr, profileinfo->peermac)) {
+ match_result = 1;
+ DBG_8723A("[%s] Match!\n", __func__);
+ break;
+ }
+ }
+
+ return match_result;
+}
+
+void issue_probersp23a_p2p23a(struct rtw_adapter *padapter, unsigned char *da)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned char *mac;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u16 beacon_interval = 100;
+ u16 capInfo = 0;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 wpsie[255] = { 0x00 };
+ u32 wpsielen = 0, p2pielen = 0;
+#ifdef CONFIG_8723AU_P2P
+ u32 wfdielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+ struct cfg80211_wifidirect_info *pcfg80211_wdinfo =
+ &padapter->cfg80211_wdinfo;
+ struct ieee80211_channel *ieee_ch =
+ &pcfg80211_wdinfo->remain_on_ch_channel;
+ u8 listen_channel =
+ (u8)ieee80211_frequency_to_channel(ieee_ch->center_freq);
+
+ /* DBG_8723A("%s\n", __func__); */
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ {
+ return;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ mac = myid(&padapter->eeprompriv);
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+ ether_addr_copy(pwlanhdr->addr1, da);
+ ether_addr_copy(pwlanhdr->addr2, mac);
+
+ /* Use the device address for BSSID field. */
+ ether_addr_copy(pwlanhdr->addr3, mac);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(fctrl, WIFI_PROBERSP);
+
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = pattrib->hdrlen;
+ pframe += pattrib->hdrlen;
+
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pattrib->pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+ memcpy(pframe, (unsigned char *) &beacon_interval, 2);
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* capability info: 2 bytes */
+ /* ESS and IBSS bits must be 0 (defined in the 3.1.2.1.1 of
+ WiFi Direct Spec) */
+ capInfo |= cap_ShortPremble;
+ capInfo |= cap_ShortSlot;
+
+ memcpy(pframe, (unsigned char *) &capInfo, 2);
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* SSID */
+ pframe = rtw_set_ie23a(pframe, _SSID_IE_, 7, pwdinfo->p2p_wildcard_ssid,
+ &pattrib->pktlen);
+
+ /* supported rates... */
+ /* Use the OFDM rate in the P2P probe response frame.
+ (6(B), 9(B), 12, 18, 24, 36, 48, 54) */
+ pframe = rtw_set_ie23a(pframe, _SUPPORTEDRATES_IE_, 8,
+ pwdinfo->support_rate, &pattrib->pktlen);
+
+ /* DS parameter set */
+ if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled &&
+ listen_channel != 0) {
+ pframe = rtw_set_ie23a(pframe, _DSSET_IE_, 1, (unsigned char *)
+ &listen_channel, &pattrib->pktlen);
+ } else {
+ pframe = rtw_set_ie23a(pframe, _DSSET_IE_, 1, (unsigned char *)
+ &pwdinfo->listen_channel,
+ &pattrib->pktlen);
+ }
+
+ if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled) {
+ if (pmlmepriv->wps_probe_resp_ie &&
+ pmlmepriv->p2p_probe_resp_ie) {
+ /* WPS IE */
+ memcpy(pframe, pmlmepriv->wps_probe_resp_ie,
+ pmlmepriv->wps_probe_resp_ie_len);
+ pattrib->pktlen += pmlmepriv->wps_probe_resp_ie_len;
+ pframe += pmlmepriv->wps_probe_resp_ie_len;
+
+ /* P2P IE */
+ memcpy(pframe, pmlmepriv->p2p_probe_resp_ie,
+ pmlmepriv->p2p_probe_resp_ie_len);
+ pattrib->pktlen += pmlmepriv->p2p_probe_resp_ie_len;
+ pframe += pmlmepriv->p2p_probe_resp_ie_len;
+ }
+ } else {
+
+ /* Todo: WPS IE */
+ /* Noted by Albert 20100907 */
+ /* According to the WPS specification, all the WPS
+ attribute is presented by Big Endian. */
+
+ wpsielen = 0;
+ /* WPS OUI */
+ *(u32*) (wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* WiFi Simple Config State */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(WPS_ATTR_SIMPLE_CONF_STATE);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_WSC_STATE_NOT_CONFIG;
+
+ /* Response Type */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_RESP_TYPE);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_RESPONSE_TYPE_8021X;
+
+ /* UUID-E */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_UUID_E);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0010);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ wpsielen += 0x10;
+
+ /* Manufacturer */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_MANUFACTURER);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0007);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, "Realtek", 7);
+ wpsielen += 7;
+
+ /* Model Name */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_MODEL_NAME);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0006);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, "8192CU", 6);
+ wpsielen += 6;
+
+ /* Model Number */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_MODEL_NUMBER);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[ wpsielen++ ] = 0x31; /* character 1 */
+
+ /* Serial Number */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(WPS_ATTR_SERIAL_NUMBER);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(ETH_ALEN);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, "123456", ETH_ALEN);
+ wpsielen += ETH_ALEN;
+
+ /* Primary Device Type */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(WPS_ATTR_PRIMARY_DEV_TYPE);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0008);
+ wpsielen += 2;
+
+ /* Value: */
+ /* Category ID */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ wpsielen += 2;
+
+ /* OUI */
+ *(u32*) (wpsie + wpsielen) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* Sub Category ID */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ wpsielen += 2;
+
+ /* Device Name */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(pwdinfo->device_name_len);
+ wpsielen += 2;
+
+ /* Value: */
+ if (pwdinfo->device_name_len) {
+ memcpy(wpsie + wpsielen, pwdinfo->device_name,
+ pwdinfo->device_name_len);
+ wpsielen += pwdinfo->device_name_len;
+ }
+
+ /* Config Method */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(pwdinfo->supported_wps_cm);
+ wpsielen += 2;
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, wpsielen,
+ (unsigned char *)wpsie,
+ &pattrib->pktlen);
+
+ p2pielen = build_probe_resp_p2p_ie23a(pwdinfo, pframe);
+ pframe += p2pielen;
+ pattrib->pktlen += p2pielen;
+ }
+
+#ifdef CONFIG_8723AU_P2P
+ if (pwdinfo->wfd_info->wfd_enable) {
+ wfdielen = build_probe_resp_wfd_ie(pwdinfo, pframe, 0);
+ pframe += wfdielen;
+ pattrib->pktlen += wfdielen;
+ } else if (pmlmepriv->wfd_probe_resp_ie &&
+ pmlmepriv->wfd_probe_resp_ie_len > 0) {
+ /* WFD IE */
+ memcpy(pframe, pmlmepriv->wfd_probe_resp_ie,
+ pmlmepriv->wfd_probe_resp_ie_len);
+ pattrib->pktlen += pmlmepriv->wfd_probe_resp_ie_len;
+ pframe += pmlmepriv->wfd_probe_resp_ie_len;
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+
+ return;
+}
+
+static int _issue23a_probereq_p2p(struct rtw_adapter *padapter, u8 *da,
+ int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned char *mac;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 wpsie[255] = {0x00}, p2pie[255] = {0x00};
+ u16 wpsielen = 0, p2pielen = 0;
+#ifdef CONFIG_8723AU_P2P
+ u32 wfdielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ mac = myid(&padapter->eeprompriv);
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ if (da) {
+ ether_addr_copy(pwlanhdr->addr1, da);
+ ether_addr_copy(pwlanhdr->addr3, da);
+ } else {
+ if ((pwdinfo->p2p_info.scan_op_ch_only) ||
+ (pwdinfo->rx_invitereq_info.scan_op_ch_only)) {
+ /* This two flags will be set when this is
+ only the P2P client mode. */
+ ether_addr_copy(pwlanhdr->addr1,
+ pwdinfo->p2p_peer_interface_addr);
+ ether_addr_copy(pwlanhdr->addr3,
+ pwdinfo->p2p_peer_interface_addr);
+ } else {
+ /* broadcast probe request frame */
+ ether_addr_copy(pwlanhdr->addr1, bc_addr);
+ ether_addr_copy(pwlanhdr->addr3, bc_addr);
+ }
+ }
+ ether_addr_copy(pwlanhdr->addr2, mac);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_PROBEREQ);
+
+ pframe += sizeof (struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof (struct ieee80211_hdr_3addr);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
+ pframe = rtw_set_ie23a(pframe, _SSID_IE_,
+ pwdinfo->tx_prov_disc_info.ssid.ssid_len,
+ pwdinfo->tx_prov_disc_info.ssid.ssid,
+ &pattrib->pktlen);
+ } else {
+ pframe = rtw_set_ie23a(pframe, _SSID_IE_,
+ P2P_WILDCARD_SSID_LEN,
+ pwdinfo->p2p_wildcard_ssid,
+ &pattrib->pktlen);
+ }
+ /* Use the OFDM rate in the P2P probe request frame.
+ (6(B), 9(B), 12(B), 24(B), 36, 48, 54) */
+ pframe = rtw_set_ie23a(pframe, _SUPPORTEDRATES_IE_, 8,
+ pwdinfo->support_rate, &pattrib->pktlen);
+
+ if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled) {
+ if (pmlmepriv->wps_probe_req_ie &&
+ pmlmepriv->p2p_probe_req_ie) {
+ /* WPS IE */
+ memcpy(pframe, pmlmepriv->wps_probe_req_ie,
+ pmlmepriv->wps_probe_req_ie_len);
+ pattrib->pktlen += pmlmepriv->wps_probe_req_ie_len;
+ pframe += pmlmepriv->wps_probe_req_ie_len;
+
+ /* P2P IE */
+ memcpy(pframe, pmlmepriv->p2p_probe_req_ie,
+ pmlmepriv->p2p_probe_req_ie_len);
+ pattrib->pktlen += pmlmepriv->p2p_probe_req_ie_len;
+ pframe += pmlmepriv->p2p_probe_req_ie_len;
+ }
+ } else {
+
+ /* WPS IE */
+ /* Noted by Albert 20110221 */
+ /* According to the WPS specification, all the WPS
+ attribute is presented by Big Endian. */
+
+ wpsielen = 0;
+ /* WPS OUI */
+ *(u32*) (wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ if (pmlmepriv->wps_probe_req_ie == NULL) {
+ /* UUID-E */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(WPS_ATTR_UUID_E);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0010);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, myid(&padapter->eeprompriv),
+ ETH_ALEN);
+ wpsielen += 0x10;
+
+ /* Config Method */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(WPS_ATTR_CONF_METHOD);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(pwdinfo->supported_wps_cm);
+ wpsielen += 2;
+ }
+
+ /* Device Name */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(pwdinfo->device_name_len);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, pwdinfo->device_name,
+ pwdinfo->device_name_len);
+ wpsielen += pwdinfo->device_name_len;
+
+ /* Primary Device Type */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(WPS_ATTR_PRIMARY_DEV_TYPE);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0008);
+ wpsielen += 2;
+
+ /* Value: */
+ /* Category ID */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_PDT_CID_RTK_WIDI);
+ wpsielen += 2;
+
+ /* OUI */
+ *(u32*) (wpsie + wpsielen) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* Sub Category ID */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_PDT_SCID_RTK_DMP);
+ wpsielen += 2;
+
+ /* Device Password ID */
+ /* Type: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_PWID);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16*) (wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ /* Registrar-specified */
+ *(u16*) (wpsie + wpsielen) =
+ cpu_to_be16(WPS_DPID_REGISTRAR_SPEC);
+ wpsielen += 2;
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, wpsielen,
+ (unsigned char *)wpsie,
+ &pattrib->pktlen);
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110221 */
+ /* According to the P2P Specification, the probe request
+ frame should contain 5 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. P2P Device ID if this probe request wants to
+ find the specific P2P device */
+ /* 3. Listen Channel */
+ /* 4. Extended Listen Timing */
+ /* 5. Operating Channel if this WiFi is working as
+ the group owner now */
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP |
+ DMP_P2P_GRPCAP_SUPPORT;
+ else
+ p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
+
+ /* Listen Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_LISTEN_CH;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
+
+ /* Channel Number */
+ /* listen channel */
+ p2pie[p2pielen++] = pwdinfo->listen_channel;
+
+ /* Extended Listen Timing */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0004);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Availability Period */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Availability Interval */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* Operating Channel (if this WiFi is working as
+ the group owner now) */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute"
+ section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
+
+ /* Channel Number */
+ /* operating channel number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel;
+ }
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, p2pielen,
+ (unsigned char *)p2pie,
+ &pattrib->pktlen);
+
+ if (pmlmepriv->wps_probe_req_ie) {
+ /* WPS IE */
+ memcpy(pframe, pmlmepriv->wps_probe_req_ie,
+ pmlmepriv->wps_probe_req_ie_len);
+ pattrib->pktlen += pmlmepriv->wps_probe_req_ie_len;
+ pframe += pmlmepriv->wps_probe_req_ie_len;
+ }
+ }
+
+#ifdef CONFIG_8723AU_P2P
+ if (pwdinfo->wfd_info->wfd_enable) {
+ wfdielen = build_probe_req_wfd_ie(pwdinfo, pframe);
+ pframe += wfdielen;
+ pattrib->pktlen += wfdielen;
+ } else if (pmlmepriv->wfd_probe_req_ie &&
+ pmlmepriv->wfd_probe_req_ie_len>0) {
+ /* WFD IE */
+ memcpy(pframe, pmlmepriv->wfd_probe_req_ie,
+ pmlmepriv->wfd_probe_req_ie_len);
+ pattrib->pktlen += pmlmepriv->wfd_probe_req_ie_len;
+ pframe += pmlmepriv->wfd_probe_req_ie_len;
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("issuing probe_req, tx_len =%d\n", pattrib->last_txcmdsz));
+
+ if (wait_ack) {
+ ret = dump_mgntframe23a_and_wait_ack23a(padapter, pmgntframe);
+ } else {
+ dump_mgntframe23a(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+inline void issue23a_probereq_p2p(struct rtw_adapter *adapter, u8 *da)
+{
+ _issue23a_probereq_p2p(adapter, da, false);
+}
+
+int issue23a_probereq_p2p_ex(struct rtw_adapter *adapter, u8 *da,
+ int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ unsigned long start = jiffies;
+
+ do {
+ ret = _issue23a_probereq_p2p(adapter, da,
+ wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ msleep(wait_ms);
+
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_8723A(FUNC_ADPT_FMT" to "MAC_FMT", ch:%u%s, %d/%d "
+ "in %u ms\n", FUNC_ADPT_ARG(adapter),
+ MAC_ARG(da), rtw_get_oper_ch23a(adapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt,
+ jiffies_to_msecs(jiffies - start));
+ else
+ DBG_8723A(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(adapter),
+ rtw_get_oper_ch23a(adapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt,
+ jiffies_to_msecs(jiffies - start));
+ }
+exit:
+ return ret;
+}
+
+#endif /* CONFIG_8723AU_P2P */
+
+static s32 rtw_action_public_decache(struct recv_frame *recv_frame, s32 token)
+{
+ struct rtw_adapter *adapter = recv_frame->adapter;
+ struct mlme_ext_priv *mlmeext = &adapter->mlmeextpriv;
+ struct sk_buff *skb = recv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u16 seq_ctrl;
+
+ seq_ctrl = ((recv_frame->attrib.seq_num&0xffff) << 4) |
+ (recv_frame->attrib.frag_num & 0xf);
+
+ if (ieee80211_has_retry(hdr->frame_control)) {
+ if (token >= 0) {
+ if ((seq_ctrl == mlmeext->action_public_rxseq) &&
+ (token == mlmeext->action_public_dialog_token)) {
+ DBG_8723A(FUNC_ADPT_FMT" seq_ctrl = 0x%x, "
+ "rxseq = 0x%x, token:%d\n",
+ FUNC_ADPT_ARG(adapter), seq_ctrl,
+ mlmeext->action_public_rxseq, token);
+ return _FAIL;
+ }
+ } else {
+ if (seq_ctrl == mlmeext->action_public_rxseq) {
+ DBG_8723A(FUNC_ADPT_FMT" seq_ctrl = 0x%x, "
+ "rxseq = 0x%x\n",
+ FUNC_ADPT_ARG(adapter), seq_ctrl,
+ mlmeext->action_public_rxseq);
+ return _FAIL;
+ }
+ }
+ }
+
+ mlmeext->action_public_rxseq = seq_ctrl;
+
+ if (token >= 0)
+ mlmeext->action_public_dialog_token = token;
+
+ return _SUCCESS;
+}
+
+static unsigned int on_action_public23a_p2p(struct recv_frame *precv_frame)
+{
+ struct sk_buff *skb = precv_frame->pkt;
+ u8 *pframe = skb->data;
+ u8 *frame_body;
+ u8 dialogToken = 0;
+#ifdef CONFIG_8723AU_P2P
+ struct rtw_adapter *padapter = precv_frame->adapter;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ uint len = skb->len;
+ u8 *p2p_ie;
+ u32 p2p_ielen;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 result = P2P_STATUS_SUCCESS;
+#endif /* CONFIG_8723AU_P2P */
+
+ frame_body = (unsigned char *)
+ (pframe + sizeof(struct ieee80211_hdr_3addr));
+
+ dialogToken = frame_body[7];
+
+ if (rtw_action_public_decache(precv_frame, dialogToken) == _FAIL)
+ return _FAIL;
+
+#ifdef CONFIG_8723AU_P2P
+ del_timer_sync(&pwdinfo->reset_ch_sitesurvey);
+ if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled) {
+ rtw_cfg80211_rx_p2p_action_public(padapter, pframe, len);
+ } else {
+ /* Do nothing if the driver doesn't enable the P2P function. */
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE))
+ return _SUCCESS;
+
+ len -= sizeof(struct ieee80211_hdr_3addr);
+
+ switch (frame_body[ 6 ])/* OUI Subtype */
+ {
+ case P2P_GO_NEGO_REQ:
+ DBG_8723A("[%s] Got GO Nego Req Frame\n", __func__);
+ memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_REQ))
+ {
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+ }
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL))
+ {
+ /* Commented by Albert 20110526 */
+ /* In this case, this means the previous nego fail doesn't be reset yet. */
+ del_timer_sync(&pwdinfo->restore_p2p_state_timer);
+ /* Restore the previous p2p state */
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+ DBG_8723A("[%s] Restore the previous p2p state to %d\n", __func__, rtw_p2p_state(pwdinfo));
+ }
+
+ /* Commented by Kurt 20110902 */
+ /* Add if statement to avoid receiving duplicate prov disc req. such that pre_p2p_state would be covered. */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING))
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+
+ /* Commented by Kurt 20120113 */
+ /* Get peer_dev_addr here if peer doesn't issue prov_disc frame. */
+ if (is_zero_ether_addr(pwdinfo->rx_prov_disc_info.peerDevAddr))
+ ether_addr_copy(pwdinfo->rx_prov_disc_info.peerDevAddr, hdr->addr2);
+
+ result = process_p2p_group_negotation_req23a(pwdinfo, frame_body, len);
+ issue_p2p_GO_response(padapter, hdr->addr2,
+ frame_body, len, result);
+
+ /* Commented by Albert 20110718 */
+ /* No matter negotiating or negotiation failure, the driver should set up the restore P2P state timer. */
+ mod_timer(&pwdinfo->restore_p2p_state_timer,
+ jiffies + msecs_to_jiffies(5000));
+ break;
+
+ case P2P_GO_NEGO_RESP:
+ DBG_8723A("[%s] Got GO Nego Resp Frame\n", __func__);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING))
+ {
+ /* Commented by Albert 20110425 */
+ /* The restore timer is enabled when issuing the nego request frame of rtw_p2p_connect function. */
+ del_timer_sync(&pwdinfo->restore_p2p_state_timer);
+ pwdinfo->nego_req_info.benable = false;
+ result = process_p2p_group_negotation_resp23a(pwdinfo, frame_body, len);
+ issue_p2p_GO_confirm(pwdinfo->padapter,
+ hdr->addr2,
+ result);
+ if (result == P2P_STATUS_SUCCESS) {
+ if (rtw_p2p_role(pwdinfo) ==
+ P2P_ROLE_CLIENT) {
+ pwdinfo->p2p_info.operation_ch[ 0 ] = pwdinfo->peer_operating_ch;
+ pwdinfo->p2p_info.scan_op_ch_only = 1;
+ mod_timer(&pwdinfo->reset_ch_sitesurvey2, jiffies + msecs_to_jiffies(P2P_RESET_SCAN_CH));
+ }
+ }
+
+ /* Reset the dialog token for group negotiation frames. */
+ pwdinfo->negotiation_dialog_token = 1;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL))
+ {
+ mod_timer(&pwdinfo->restore_p2p_state_timer, jiffies + msecs_to_jiffies(5000));
+ }
+ } else {
+ DBG_8723A("[%s] Skipped GO Nego Resp Frame (p2p_state != P2P_STATE_GONEGO_ING)\n", __func__);
+ }
+
+ break;
+
+ case P2P_GO_NEGO_CONF:
+
+ DBG_8723A("[%s] Got GO Nego Confirm Frame\n", __func__);
+ result = process_p2p_group_negotation_confirm23a(pwdinfo, frame_body, len);
+ if (P2P_STATUS_SUCCESS == result)
+ {
+ if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT)
+ {
+ pwdinfo->p2p_info.operation_ch[ 0 ] = pwdinfo->peer_operating_ch;
+ pwdinfo->p2p_info.scan_op_ch_only = 1;
+ mod_timer(&pwdinfo->reset_ch_sitesurvey2, jiffies + msecs_to_jiffies(P2P_RESET_SCAN_CH));
+ }
+ }
+ break;
+
+ case P2P_INVIT_REQ:
+ /* Added by Albert 2010/10/05 */
+ /* Received the P2P Invite Request frame. */
+
+ DBG_8723A("[%s] Got invite request frame!\n", __func__);
+ if ((p2p_ie = rtw_get_p2p_ie23a(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &p2p_ielen)))
+ {
+ /* Parse the necessary information from the P2P Invitation Request frame. */
+ /* For example: The MAC address of sending this P2P Invitation Request frame. */
+ u32 attr_contentlen = 0;
+ u8 status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ struct group_id_info group_id;
+ u8 invitation_flag = 0;
+
+ rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_INVITATION_FLAGS, &invitation_flag, &attr_contentlen);
+ if (attr_contentlen)
+ {
+
+ rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_BSSID, pwdinfo->p2p_peer_interface_addr, &attr_contentlen);
+ /* Commented by Albert 20120510 */
+ /* Copy to the pwdinfo->p2p_peer_interface_addr. */
+ /* So that the WFD UI (or Sigma) can get the peer interface address by using the following command. */
+ /* #> iwpriv wlan0 p2p_get peer_ifa */
+ /* After having the peer interface address, the sigma can find the correct conf file for wpa_supplicant. */
+
+ if (attr_contentlen)
+ {
+ DBG_8723A("[%s] GO's BSSID = %.2X %.2X %.2X %.2X %.2X %.2X\n", __func__,
+ pwdinfo->p2p_peer_interface_addr[0], pwdinfo->p2p_peer_interface_addr[1],
+ pwdinfo->p2p_peer_interface_addr[2], pwdinfo->p2p_peer_interface_addr[3],
+ pwdinfo->p2p_peer_interface_addr[4], pwdinfo->p2p_peer_interface_addr[5]);
+ }
+
+ if (invitation_flag & P2P_INVITATION_FLAGS_PERSISTENT)
+ {
+ /* Re-invoke the persistent group. */
+
+ memset(&group_id, 0x00, sizeof(struct group_id_info));
+ rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, (u8*) &group_id, &attr_contentlen);
+ if (attr_contentlen) {
+ if (ether_addr_equal(group_id.go_device_addr, myid(&padapter->eeprompriv))) {
+ /* The p2p device sending this p2p invitation request wants this Wi-Fi device to be the persistent GO. */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_GO);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ status_code = P2P_STATUS_SUCCESS;
+ }
+ else
+ {
+ /* The p2p device sending this p2p invitation request wants to be the persistent GO. */
+ if (is_matched_in_profilelist(pwdinfo->p2p_peer_interface_addr, &pwdinfo->profileinfo[ 0 ]))
+ {
+ u8 operatingch_info[5] = { 0x00 };
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen))
+ {
+ if (rtw_ch_set_search_ch23a(padapter->mlmeextpriv.channel_set, (u32)operatingch_info[4]))
+ {
+ /* The operating channel is acceptable for this device. */
+ pwdinfo->rx_invitereq_info.operation_ch[0]= operatingch_info[4];
+ pwdinfo->rx_invitereq_info.scan_op_ch_only = 1;
+ mod_timer(&pwdinfo->reset_ch_sitesurvey, jiffies + msecs_to_jiffies(P2P_RESET_SCAN_CH));
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_MATCH);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ status_code = P2P_STATUS_SUCCESS;
+ }
+ else
+ {
+ /* The operating channel isn't supported by this device. */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ status_code = P2P_STATUS_FAIL_NO_COMMON_CH;
+ mod_timer(&pwdinfo->restore_p2p_state_timer, jiffies + msecs_to_jiffies(3000));
+ }
+ }
+ else {
+ /* Commented by Albert 20121130 */
+ /* Intel will use the different P2P IE to store the operating channel information */
+ /* Workaround for Intel WiDi 3.5 */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_MATCH);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ status_code = P2P_STATUS_SUCCESS;
+ }
+ }
+ else
+ {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
+
+ status_code = P2P_STATUS_FAIL_UNKNOWN_P2PGROUP;
+ }
+ }
+ }
+ else
+ {
+ DBG_8723A("[%s] P2P Group ID Attribute NOT FOUND!\n", __func__);
+ status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+ }
+ else
+ {
+ /* Received the invitation to join a P2P group. */
+
+ memset(&group_id, 0x00, sizeof(struct group_id_info));
+ rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, (u8*) &group_id, &attr_contentlen);
+ if (attr_contentlen)
+ {
+ if (ether_addr_equal(group_id.go_device_addr, myid(&padapter->eeprompriv))) {
+ /* In this case, the GO can't be myself. */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
+ status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+ else
+ {
+ /* The p2p device sending this p2p invitation request wants to join an existing P2P group */
+ /* Commented by Albert 2012/06/28 */
+ /* In this case, this Wi-Fi device should use the iwpriv command to get the peer device address. */
+ /* The peer device address should be the destination address for the provisioning discovery request. */
+ /* Then, this Wi-Fi device should use the iwpriv command to get the peer interface address. */
+ /* The peer interface address should be the address for WPS mac address */
+ ether_addr_copy(pwdinfo->p2p_peer_device_addr, group_id.go_device_addr);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_JOIN);
+ status_code = P2P_STATUS_SUCCESS;
+ }
+ }
+ else
+ {
+ DBG_8723A("[%s] P2P Group ID Attribute NOT FOUND!\n", __func__);
+ status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+ }
+ }
+ else
+ {
+ DBG_8723A("[%s] P2P Invitation Flags Attribute NOT FOUND!\n", __func__);
+ status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+
+ DBG_8723A("[%s] status_code = %d\n", __func__, status_code);
+
+ pwdinfo->inviteresp_info.token = frame_body[ 7 ];
+ issue_p2p_invitation_response23a(padapter, hdr->addr2, pwdinfo->inviteresp_info.token, status_code);
+ }
+ break;
+
+ case P2P_INVIT_RESP:
+ {
+ u8 attr_content = 0x00;
+ u32 attr_contentlen = 0;
+
+ DBG_8723A("[%s] Got invite response frame!\n", __func__);
+ del_timer_sync(&pwdinfo->restore_p2p_state_timer);
+ if ((p2p_ie = rtw_get_p2p_ie23a(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &p2p_ielen)))
+ {
+ rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, &attr_content, &attr_contentlen);
+
+ if (attr_contentlen == 1)
+ {
+ DBG_8723A("[%s] Status = %d\n", __func__, attr_content);
+ pwdinfo->invitereq_info.benable = false;
+
+ if (attr_content == P2P_STATUS_SUCCESS)
+ {
+ if (ether_addr_equal(pwdinfo->invitereq_info.go_bssid, myid(&padapter->eeprompriv))) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ else
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_OK);
+ }
+ else
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL);
+ }
+ }
+ else
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL);
+ }
+ }
+ else
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL);
+ }
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL)) {
+ mod_timer(&pwdinfo->restore_p2p_state_timer, jiffies + msecs_to_jiffies(5000));
+ }
+ break;
+ }
+ case P2P_DEVDISC_REQ:
+
+ process_p2p_devdisc_req23a(pwdinfo, pframe, len);
+
+ break;
+
+ case P2P_DEVDISC_RESP:
+
+ process_p2p_devdisc_resp23a(pwdinfo, pframe, len);
+
+ break;
+
+ case P2P_PROVISION_DISC_REQ:
+ DBG_8723A("[%s] Got Provisioning Discovery Request Frame\n", __func__);
+ process_p2p_provdisc_req23a(pwdinfo, pframe, len);
+ ether_addr_copy(pwdinfo->rx_prov_disc_info.peerDevAddr, hdr->addr2);
+
+ /* 20110902 Kurt */
+ /* Add the following statement to avoid receiving duplicate prov disc req. such that pre_p2p_state would be covered. */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_REQ))
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_REQ);
+ mod_timer(&pwdinfo->restore_p2p_state_timer,
+ jiffies + msecs_to_jiffies(P2P_PROVISION_TIMEOUT));
+ break;
+
+ case P2P_PROVISION_DISC_RESP:
+ /* Commented by Albert 20110707 */
+ /* Should we check the pwdinfo->tx_prov_disc_info.bsent flag here?? */
+ DBG_8723A("[%s] Got Provisioning Discovery Response Frame\n", __func__);
+ /* Commented by Albert 20110426 */
+ /* The restore timer is enabled when issuing the provisioing request frame in rtw_p2p_prov_disc function. */
+ del_timer_sync(&pwdinfo->restore_p2p_state_timer);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_RSP);
+ process_p2p_provdisc_resp23a(pwdinfo, pframe);
+ mod_timer(&pwdinfo->restore_p2p_state_timer,
+ jiffies + msecs_to_jiffies(P2P_PROVISION_TIMEOUT));
+ break;
+
+ }
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ return _SUCCESS;
+}
+
+static unsigned int on_action_public23a_vendor(struct recv_frame *precv_frame)
+{
+ unsigned int ret = _FAIL;
+ struct sk_buff *skb = precv_frame->pkt;
+ u8 *pframe = skb->data;
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
+
+ if (!memcmp(frame_body + 2, P2P_OUI23A, 4)) {
+ ret = on_action_public23a_p2p(precv_frame);
+ }
+
+ return ret;
+}
+
+static unsigned int
+on_action_public23a_default(struct recv_frame *precv_frame, u8 action)
+{
+ unsigned int ret = _FAIL;
+ struct sk_buff *skb = precv_frame->pkt;
+ u8 *pframe = skb->data;
+ uint frame_len = skb->len;
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
+ u8 token;
+ struct rtw_adapter *adapter = precv_frame->adapter;
+ int cnt = 0;
+ char msg[64];
+
+ token = frame_body[2];
+
+ if (rtw_action_public_decache(precv_frame, token) == _FAIL)
+ goto exit;
+
+ cnt += sprintf((msg+cnt), "%s(token:%u)",
+ action_public_str23a(action), token);
+ rtw_cfg80211_rx_action(adapter, pframe, frame_len, msg);
+
+ ret = _SUCCESS;
+
+exit:
+ return ret;
+}
+
+unsigned int on_action_public23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ unsigned int ret = _FAIL;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *pframe = skb->data;
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
+ u8 category, action;
+
+ /* check RA matches or not */
+ if (!ether_addr_equal(myid(&padapter->eeprompriv), hdr->addr1))
+ goto exit;
+
+ category = frame_body[0];
+ if (category != WLAN_CATEGORY_PUBLIC)
+ goto exit;
+
+ action = frame_body[1];
+ switch (action) {
+ case ACT_PUBLIC_VENDOR:
+ ret = on_action_public23a_vendor(precv_frame);
+ break;
+ default:
+ ret = on_action_public23a_default(precv_frame, action);
+ break;
+ }
+
+exit:
+ return ret;
+}
+
+unsigned int OnAction23a_ht(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+unsigned int OnAction23a_wmm(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+unsigned int OnAction23a_p2p(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+#ifdef CONFIG_8723AU_P2P
+ u8 *frame_body;
+ u8 category, OUI_Subtype, dialogToken = 0;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *pframe = skb->data;
+ uint len = skb->len;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ DBG_8723A("%s\n", __func__);
+
+ /* check RA matches or not */
+ if (!ether_addr_equal(myid(&padapter->eeprompriv), hdr->addr1))
+ return _SUCCESS;
+
+ frame_body = (unsigned char *)
+ (pframe + sizeof(struct ieee80211_hdr_3addr));
+
+ category = frame_body[0];
+ if (category != WLAN_CATEGORY_VENDOR_SPECIFIC)
+ return _SUCCESS;
+
+ if (cpu_to_be32(*((u32*) (frame_body + 1))) != P2POUI)
+ return _SUCCESS;
+
+ if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled) {
+ rtw_cfg80211_rx_action_p2p(padapter, pframe, len);
+ return _SUCCESS;
+ } else {
+ len -= sizeof(struct ieee80211_hdr_3addr);
+ OUI_Subtype = frame_body[5];
+ dialogToken = frame_body[6];
+
+ switch (OUI_Subtype)
+ {
+ case P2P_NOTICE_OF_ABSENCE:
+ break;
+
+ case P2P_PRESENCE_REQUEST:
+ process_p2p_presence_req23a(pwdinfo, pframe, len);
+ break;
+
+ case P2P_PRESENCE_RESPONSE:
+ break;
+
+ case P2P_GO_DISC_REQUEST:
+ break;
+
+ default:
+ break;
+ }
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ return _SUCCESS;
+}
+
+unsigned int OnAction23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ int i;
+ unsigned char category;
+ struct action_handler *ptable;
+ unsigned char *frame_body;
+ struct sk_buff *skb = precv_frame->pkt;
+ u8 *pframe = skb->data;
+
+ frame_body = (unsigned char *)
+ (pframe + sizeof(struct ieee80211_hdr_3addr));
+
+ category = frame_body[0];
+
+ for (i = 0;
+ i < sizeof(OnAction23a_tbl) / sizeof(struct action_handler); i++) {
+ ptable = &OnAction23a_tbl[i];
+
+ if (category == ptable->num)
+ ptable->func(padapter, precv_frame);
+ }
+
+ return _SUCCESS;
+}
+
+unsigned int DoReserved23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+struct xmit_frame *alloc_mgtxmitframe23a(struct xmit_priv *pxmitpriv)
+{
+ struct xmit_frame *pmgntframe;
+ struct xmit_buf *pxmitbuf;
+
+ pmgntframe = rtw_alloc_xmitframe23a_ext(pxmitpriv);
+
+ if (!pmgntframe) {
+ DBG_8723A(FUNC_ADPT_FMT" alloc xmitframe fail\n",
+ FUNC_ADPT_ARG(pxmitpriv->adapter));
+ goto exit;
+ }
+
+ pxmitbuf = rtw_alloc_xmitbuf23a_ext(pxmitpriv);
+ if (!pxmitbuf) {
+ DBG_8723A(FUNC_ADPT_FMT" alloc xmitbuf fail\n",
+ FUNC_ADPT_ARG(pxmitpriv->adapter));
+ rtw_free_xmitframe23a(pxmitpriv, pmgntframe);
+ pmgntframe = NULL;
+ goto exit;
+ }
+
+ pmgntframe->frame_tag = MGNT_FRAMETAG;
+ pmgntframe->pxmitbuf = pxmitbuf;
+ pmgntframe->buf_addr = pxmitbuf->pbuf;
+ pxmitbuf->priv_data = pmgntframe;
+
+exit:
+ return pmgntframe;
+}
+
+/****************************************************************************
+
+Following are some TX fuctions for WiFi MLME
+
+*****************************************************************************/
+
+void update_mgnt_tx_rate23a(struct rtw_adapter *padapter, u8 rate)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ pmlmeext->tx_rate = rate;
+ DBG_8723A("%s(): rate = %x\n", __func__, rate);
+}
+
+void update_mgntframe_attrib23a(struct rtw_adapter *padapter,
+ struct pkt_attrib *pattrib)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ memset((u8 *)pattrib, 0, sizeof(struct pkt_attrib));
+
+ pattrib->hdrlen = 24;
+ pattrib->nr_frags = 1;
+ pattrib->priority = 7;
+ pattrib->mac_id = 0;
+ pattrib->qsel = 0x12;
+
+ pattrib->pktlen = 0;
+
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
+ pattrib->raid = 6;/* b mode */
+ else
+ pattrib->raid = 5;/* a/g mode */
+
+ pattrib->encrypt = _NO_PRIVACY_;
+ pattrib->bswenc = false;
+
+ pattrib->qos_en = false;
+ pattrib->ht_en = false;
+ pattrib->bwmode = HT_CHANNEL_WIDTH_20;
+ pattrib->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pattrib->sgi = false;
+
+ pattrib->seqnum = pmlmeext->mgnt_seq;
+
+ pattrib->retry_ctrl = true;
+}
+
+void dump_mgntframe23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pmgntframe)
+{
+ if (padapter->bSurpriseRemoved == true ||
+ padapter->bDriverStopped == true)
+ return;
+
+ rtw_hal_mgnt_xmit23a(padapter, pmgntframe);
+}
+
+s32 dump_mgntframe23a_and_wait(struct rtw_adapter *padapter,
+ struct xmit_frame *pmgntframe, int timeout_ms)
+{
+ s32 ret = _FAIL;
+ unsigned long irqL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct xmit_buf *pxmitbuf = pmgntframe->pxmitbuf;
+ struct submit_ctx sctx;
+
+ if (padapter->bSurpriseRemoved == true ||
+ padapter->bDriverStopped == true)
+ return ret;
+
+ rtw_sctx_init23a(&sctx, timeout_ms);
+ pxmitbuf->sctx = &sctx;
+
+ ret = rtw_hal_mgnt_xmit23a(padapter, pmgntframe);
+
+ if (ret == _SUCCESS)
+ ret = rtw_sctx_wait23a(&sctx);
+
+ spin_lock_irqsave(&pxmitpriv->lock_sctx, irqL);
+ pxmitbuf->sctx = NULL;
+ spin_unlock_irqrestore(&pxmitpriv->lock_sctx, irqL);
+
+ return ret;
+}
+
+s32 dump_mgntframe23a_and_wait_ack23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pmgntframe)
+{
+ s32 ret = _FAIL;
+ u32 timeout_ms = 500;/* 500ms */
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ if (padapter->bSurpriseRemoved == true ||
+ padapter->bDriverStopped == true)
+ return -1;
+
+ mutex_lock(&pxmitpriv->ack_tx_mutex);
+ pxmitpriv->ack_tx = true;
+
+ pmgntframe->ack_report = 1;
+ if (rtw_hal_mgnt_xmit23a(padapter, pmgntframe) == _SUCCESS) {
+ ret = rtw_ack_tx_wait23a(pxmitpriv, timeout_ms);
+ }
+
+ pxmitpriv->ack_tx = false;
+ mutex_unlock(&pxmitpriv->ack_tx_mutex);
+
+ return ret;
+}
+
+static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
+{
+ u8 *ssid_ie;
+ int ssid_len_ori;
+ int len_diff = 0;
+ u8 *next_ie;
+ u32 remain_len;
+
+ ssid_ie = rtw_get_ie23a(ies, WLAN_EID_SSID, &ssid_len_ori, ies_len);
+
+ /* DBG_8723A("%s hidden_ssid_mode:%u, ssid_ie:%p, ssid_len_ori:%d\n",
+ __func__, hidden_ssid_mode, ssid_ie, ssid_len_ori); */
+
+ if (ssid_ie && ssid_len_ori > 0) {
+ switch (hidden_ssid_mode)
+ {
+ case 1:
+ next_ie = ssid_ie + 2 + ssid_len_ori;
+ remain_len = 0;
+
+ remain_len = ies_len -(next_ie-ies);
+
+ ssid_ie[1] = 0;
+ memcpy(ssid_ie+2, next_ie, remain_len);
+ len_diff -= ssid_len_ori;
+
+ break;
+ case 2:
+ memset(&ssid_ie[2], 0, ssid_len_ori);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return len_diff;
+}
+
+void issue_beacon23a(struct rtw_adapter *padapter, int timeout_ms)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned int rate_len;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_8723AU_P2P */
+ u8 *wps_ie;
+ u32 wps_ielen;
+ u8 sr = 0;
+ int len_diff;
+
+ /* DBG_8723A("%s\n", __func__); */
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL) {
+ DBG_8723A("%s, alloc mgnt frame fail\n", __func__);
+ return;
+ }
+#ifdef CONFIG_8723AU_AP_MODE
+ spin_lock_bh(&pmlmepriv->bcn_update_lock);
+#endif
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+ pattrib->qsel = 0x10;
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ ether_addr_copy(pwlanhdr->addr1, bc_addr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, get_my_bssid23a(cur_network));
+
+ SetSeqNum(pwlanhdr, 0 /*pmlmeext->mgnt_seq*/);
+ /* pmlmeext->mgnt_seq++; */
+ SetFrameSubType(pframe, WIFI_BEACON);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ /* DBG_8723A("ie len =%d\n", cur_network->IELength); */
+#ifdef CONFIG_8723AU_P2P
+ /* for P2P : Primary Device Type & Device Name */
+ u32 insert_len = 0;
+ wps_ie = rtw_get_wps_ie23a(cur_network->IEs + _FIXED_IE_LENGTH_,
+ cur_network->IELength -
+ _FIXED_IE_LENGTH_, NULL, &wps_ielen);
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && wps_ie &&
+ wps_ielen > 0) {
+ uint wps_offset, remainder_ielen;
+ u8 *premainder_ie, *pframe_wscie;
+
+ wps_offset = (uint)(wps_ie - cur_network->IEs);
+
+ premainder_ie = wps_ie + wps_ielen;
+
+ remainder_ielen = cur_network->IELength - wps_offset -
+ wps_ielen;
+
+ if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled) {
+ if (pmlmepriv->wps_beacon_ie &&
+ pmlmepriv->wps_beacon_ie_len>0) {
+ memcpy(pframe, cur_network->IEs,
+ wps_offset);
+ pframe += wps_offset;
+ pattrib->pktlen += wps_offset;
+
+ memcpy(pframe, pmlmepriv->wps_beacon_ie,
+ pmlmepriv->wps_beacon_ie_len);
+ pframe += pmlmepriv->wps_beacon_ie_len;
+ pattrib->pktlen +=
+ pmlmepriv->wps_beacon_ie_len;
+
+ /* copy remainder_ie to pframe */
+ memcpy(pframe, premainder_ie,
+ remainder_ielen);
+ pframe += remainder_ielen;
+ pattrib->pktlen += remainder_ielen;
+ } else {
+ memcpy(pframe, cur_network->IEs,
+ cur_network->IELength);
+ pframe += cur_network->IELength;
+ pattrib->pktlen +=
+ cur_network->IELength;
+ }
+ } else {
+ pframe_wscie = pframe + wps_offset;
+ memcpy(pframe, cur_network->IEs,
+ wps_offset + wps_ielen);
+ pframe += (wps_offset + wps_ielen);
+ pattrib->pktlen += (wps_offset + wps_ielen);
+
+ /* now pframe is end of wsc ie, insert Primary
+ Device Type & Device Name */
+ /* Primary Device Type */
+ /* Type: */
+ *(u16*) (pframe + insert_len) =
+ cpu_to_be16(WPS_ATTR_PRIMARY_DEV_TYPE);
+ insert_len += 2;
+
+ /* Length: */
+ *(u16*) (pframe + insert_len) =
+ cpu_to_be16(0x0008);
+ insert_len += 2;
+
+ /* Value: */
+ /* Category ID */
+ *(u16*) (pframe + insert_len) =
+ cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ insert_len += 2;
+
+ /* OUI */
+ *(u32*) (pframe + insert_len) =
+ cpu_to_be32(WPSOUI);
+ insert_len += 4;
+
+ /* Sub Category ID */
+ *(u16*) (pframe + insert_len) =
+ cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ insert_len += 2;
+
+ /* Device Name */
+ /* Type: */
+ *(u16*) (pframe + insert_len) =
+ cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ insert_len += 2;
+
+ /* Length: */
+ *(u16*) (pframe + insert_len) =
+ cpu_to_be16(pwdinfo->device_name_len);
+ insert_len += 2;
+
+ /* Value: */
+ memcpy(pframe + insert_len,
+ pwdinfo->device_name,
+ pwdinfo->device_name_len);
+ insert_len += pwdinfo->device_name_len;
+
+ /* update wsc ie length */
+ *(pframe_wscie+1) = (wps_ielen -2) + insert_len;
+
+ /* pframe move to end */
+ pframe+= insert_len;
+ pattrib->pktlen += insert_len;
+
+ /* copy remainder_ie to pframe */
+ memcpy(pframe, premainder_ie, remainder_ielen);
+ pframe += remainder_ielen;
+ pattrib->pktlen += remainder_ielen;
+ }
+ } else
+#endif /* CONFIG_8723AU_P2P */
+ memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ len_diff = update_hidden_ssid(pframe + _BEACON_IE_OFFSET_,
+ cur_network->IELength -
+ _BEACON_IE_OFFSET_,
+ pmlmeinfo->hidden_ssid_mode);
+ pframe += (cur_network->IELength+len_diff);
+ pattrib->pktlen += (cur_network->IELength+len_diff);
+
+ wps_ie = rtw_get_wps_ie23a(pmgntframe->buf_addr + TXDESC_OFFSET+
+ sizeof (struct ieee80211_hdr_3addr) +
+ _BEACON_IE_OFFSET_, pattrib->pktlen -
+ sizeof (struct ieee80211_hdr_3addr) -
+ _BEACON_IE_OFFSET_, NULL,
+ &wps_ielen);
+ if (wps_ie && wps_ielen > 0) {
+ rtw_get_wps_attr_content23a(wps_ie, wps_ielen,
+ WPS_ATTR_SELECTED_REGISTRAR,
+ (u8*)&sr, NULL);
+ }
+ if (sr != 0)
+ set_fwstate(pmlmepriv, WIFI_UNDER_WPS);
+ else
+ _clr_fwstate_(pmlmepriv, WIFI_UNDER_WPS);
+
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ u32 len;
+ if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled) {
+ len = pmlmepriv->p2p_beacon_ie_len;
+ if (pmlmepriv->p2p_beacon_ie && len > 0)
+ memcpy(pframe,
+ pmlmepriv->p2p_beacon_ie, len);
+ } else
+ len = build_beacon_p2p_ie23a(pwdinfo, pframe);
+
+ pframe += len;
+ pattrib->pktlen += len;
+
+ if (true == pwdinfo->wfd_info->wfd_enable) {
+ len = build_beacon_wfd_ie(pwdinfo, pframe);
+ } else {
+ len = 0;
+ if (pmlmepriv->wfd_beacon_ie &&
+ pmlmepriv->wfd_beacon_ie_len>0) {
+ len = pmlmepriv->wfd_beacon_ie_len;
+ memcpy(pframe,
+ pmlmepriv->wfd_beacon_ie, len);
+ }
+ }
+ pframe += len;
+ pattrib->pktlen += len;
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ goto _issue_bcn;
+ }
+
+ /* below for ad-hoc mode */
+
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pattrib->pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)
+ rtw_get_beacon_interval23a_from_ie(cur_network->IEs), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* capability info: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)
+ rtw_get_capability23a_from_ie(cur_network->IEs), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* SSID */
+ pframe = rtw_set_ie23a(pframe, _SSID_IE_, cur_network->Ssid.ssid_len,
+ cur_network->Ssid.ssid, &pattrib->pktlen);
+
+ /* supported rates... */
+ rate_len = rtw_get_rateset_len23a(cur_network->SupportedRates);
+ pframe = rtw_set_ie23a(pframe, _SUPPORTEDRATES_IE_,
+ ((rate_len > 8)? 8: rate_len),
+ cur_network->SupportedRates, &pattrib->pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie23a(pframe, _DSSET_IE_, 1, (unsigned char *)
+ &cur_network->Configuration.DSConfig,
+ &pattrib->pktlen);
+
+ /* if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) */
+ {
+ u8 erpinfo = 0;
+ u32 ATIMWindow;
+ /* IBSS Parameter Set... */
+ /* ATIMWindow = cur->Configuration.ATIMWindow; */
+ ATIMWindow = 0;
+ pframe = rtw_set_ie23a(pframe, _IBSS_PARA_IE_, 2,
+ (unsigned char *)&ATIMWindow,
+ &pattrib->pktlen);
+
+ /* ERP IE */
+ pframe = rtw_set_ie23a(pframe, _ERPINFO_IE_, 1,
+ &erpinfo, &pattrib->pktlen);
+ }
+
+ /* EXTERNDED SUPPORTED RATE */
+ if (rate_len > 8)
+ pframe = rtw_set_ie23a(pframe, _EXT_SUPPORTEDRATES_IE_,
+ rate_len - 8,
+ cur_network->SupportedRates + 8,
+ &pattrib->pktlen);
+
+ /* todo:HT for adhoc */
+
+_issue_bcn:
+
+#ifdef CONFIG_8723AU_AP_MODE
+ pmlmepriv->update_bcn = false;
+
+ spin_unlock_bh(&pmlmepriv->bcn_update_lock);
+#endif
+
+ if ((pattrib->pktlen + TXDESC_SIZE) > 512) {
+ DBG_8723A("beacon frame too large\n");
+ return;
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ /* DBG_8723A("issue bcn_sz =%d\n", pattrib->last_txcmdsz); */
+ if (timeout_ms > 0)
+ dump_mgntframe23a_and_wait(padapter, pmgntframe, timeout_ms);
+ else
+ dump_mgntframe23a(padapter, pmgntframe);
+}
+
+void issue_probersp23a(struct rtw_adapter *padapter, unsigned char *da,
+ u8 is_valid_p2p_probereq)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned char *mac, *bssid;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+#ifdef CONFIG_8723AU_AP_MODE
+ u8 *pwps_ie;
+ uint wps_ielen;
+ u8 *ssid_ie;
+ int ssid_ielen;
+ int ssid_ielen_diff;
+ u8 buf[MAX_IE_SZ];
+ u8 *ies;
+#endif
+#if defined(CONFIG_8723AU_AP_MODE) || defined(CONFIG_8723AU_P2P)
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+#endif
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
+ unsigned int rate_len;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_8723AU_P2P */
+
+ /* DBG_8723A("%s\n", __func__); */
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ {
+ DBG_8723A("%s, alloc mgnt frame fail\n", __func__);
+ return;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ mac = myid(&padapter->eeprompriv);
+ bssid = cur_network->MacAddress;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+ ether_addr_copy(pwlanhdr->addr1, da);
+ ether_addr_copy(pwlanhdr->addr2, mac);
+ ether_addr_copy(pwlanhdr->addr3, bssid);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(fctrl, WIFI_PROBERSP);
+
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = pattrib->hdrlen;
+ pframe += pattrib->hdrlen;
+
+ if (cur_network->IELength > MAX_IE_SZ)
+ return;
+
+#ifdef CONFIG_8723AU_AP_MODE
+ if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
+ pwps_ie = rtw_get_wps_ie23a(cur_network->IEs +
+ _FIXED_IE_LENGTH_,
+ cur_network->IELength -
+ _FIXED_IE_LENGTH_, NULL,
+ &wps_ielen);
+
+ /* inerset & update wps_probe_resp_ie */
+ if ((pmlmepriv->wps_probe_resp_ie != NULL) && pwps_ie &&
+ (wps_ielen > 0)) {
+ uint wps_offset, remainder_ielen;
+ u8 *premainder_ie;
+
+ wps_offset = (uint)(pwps_ie - cur_network->IEs);
+
+ premainder_ie = pwps_ie + wps_ielen;
+
+ remainder_ielen = cur_network->IELength - wps_offset -
+ wps_ielen;
+
+ memcpy(pframe, cur_network->IEs, wps_offset);
+ pframe += wps_offset;
+ pattrib->pktlen += wps_offset;
+
+ /* to get ie data len */
+ wps_ielen = (uint)pmlmepriv->wps_probe_resp_ie[1];
+ if ((wps_offset+wps_ielen+2)<= MAX_IE_SZ) {
+ memcpy(pframe, pmlmepriv->wps_probe_resp_ie,
+ wps_ielen+2);
+ pframe += wps_ielen+2;
+ pattrib->pktlen += wps_ielen+2;
+ }
+
+ if ((wps_offset+wps_ielen+2+remainder_ielen) <=
+ MAX_IE_SZ) {
+ memcpy(pframe, premainder_ie, remainder_ielen);
+ pframe += remainder_ielen;
+ pattrib->pktlen += remainder_ielen;
+ }
+ } else {
+ memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ pframe += cur_network->IELength;
+ pattrib->pktlen += cur_network->IELength;
+ }
+
+ /* retrieve SSID IE from cur_network->Ssid */
+ ies = pmgntframe->buf_addr + TXDESC_OFFSET +
+ sizeof(struct ieee80211_hdr_3addr);
+
+ ssid_ie = rtw_get_ie23a(ies+_FIXED_IE_LENGTH_, _SSID_IE_,
+ &ssid_ielen,
+ (pframe-ies)-_FIXED_IE_LENGTH_);
+
+ ssid_ielen_diff = cur_network->Ssid.ssid_len - ssid_ielen;
+
+ if (ssid_ie && cur_network->Ssid.ssid_len) {
+ uint remainder_ielen;
+ u8 *remainder_ie;
+ remainder_ie = ssid_ie + 2;
+ remainder_ielen = (pframe-remainder_ie);
+
+ DBG_8723A_LEVEL(_drv_warning_, FUNC_ADPT_FMT
+ " remainder_ielen > MAX_IE_SZ\n",
+ FUNC_ADPT_ARG(padapter));
+ if (remainder_ielen > MAX_IE_SZ) {
+ remainder_ielen = MAX_IE_SZ;
+ }
+
+ memcpy(buf, remainder_ie, remainder_ielen);
+ memcpy(remainder_ie+ssid_ielen_diff, buf,
+ remainder_ielen);
+ *(ssid_ie+1) = cur_network->Ssid.ssid_len;
+ memcpy(ssid_ie+2, cur_network->Ssid.ssid,
+ cur_network->Ssid.ssid_len);
+
+ pframe += ssid_ielen_diff;
+ pattrib->pktlen += ssid_ielen_diff;
+ }
+ } else
+#endif
+ {
+
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pattrib->pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)
+ rtw_get_beacon_interval23a_from_ie(cur_network->IEs), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* capability info: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)
+ rtw_get_capability23a_from_ie(cur_network->IEs), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* below for ad-hoc mode */
+
+ /* SSID */
+ pframe = rtw_set_ie23a(pframe, _SSID_IE_,
+ cur_network->Ssid.ssid_len,
+ cur_network->Ssid.ssid, &pattrib->pktlen);
+
+ /* supported rates... */
+ rate_len = rtw_get_rateset_len23a(cur_network->SupportedRates);
+ pframe = rtw_set_ie23a(pframe, _SUPPORTEDRATES_IE_,
+ ((rate_len > 8)? 8: rate_len),
+ cur_network->SupportedRates,
+ &pattrib->pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie23a(pframe, _DSSET_IE_, 1, (unsigned char *)
+ &cur_network->Configuration.DSConfig,
+ &pattrib->pktlen);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ u8 erpinfo = 0;
+ u32 ATIMWindow;
+ /* IBSS Parameter Set... */
+ /* ATIMWindow = cur->Configuration.ATIMWindow; */
+ ATIMWindow = 0;
+ pframe = rtw_set_ie23a(pframe, _IBSS_PARA_IE_, 2,
+ (unsigned char *)&ATIMWindow,
+ &pattrib->pktlen);
+
+ /* ERP IE */
+ pframe = rtw_set_ie23a(pframe, _ERPINFO_IE_, 1,
+ &erpinfo, &pattrib->pktlen);
+ }
+
+ /* EXTERNDED SUPPORTED RATE */
+ if (rate_len > 8)
+ pframe = rtw_set_ie23a(pframe, _EXT_SUPPORTEDRATES_IE_,
+ rate_len - 8,
+ cur_network->SupportedRates + 8,
+ &pattrib->pktlen);
+
+ /* todo:HT for adhoc */
+ }
+
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && is_valid_p2p_probereq) {
+ u32 len;
+ if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled) {
+ /* if pwdinfo->role == P2P_ROLE_DEVICE will call
+ issue_probersp23a_p2p23a() */
+ len = pmlmepriv->p2p_go_probe_resp_ie_len;
+ if (pmlmepriv->p2p_go_probe_resp_ie && len>0)
+ memcpy(pframe, pmlmepriv->p2p_go_probe_resp_ie,
+ len);
+ } else
+ len = build_probe_resp_p2p_ie23a(pwdinfo, pframe);
+
+ pframe += len;
+ pattrib->pktlen += len;
+
+ if (true == pwdinfo->wfd_info->wfd_enable) {
+ len = build_probe_resp_wfd_ie(pwdinfo, pframe, 0);
+ } else {
+ len = 0;
+ if (pmlmepriv->wfd_probe_resp_ie &&
+ pmlmepriv->wfd_probe_resp_ie_len > 0) {
+ len = pmlmepriv->wfd_probe_resp_ie_len;
+ memcpy(pframe, pmlmepriv->wfd_probe_resp_ie,
+ len);
+ }
+ }
+ pframe += len;
+ pattrib->pktlen += len;
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+
+ return;
+}
+
+static int _issue_probereq23a(struct rtw_adapter *padapter,
+ struct cfg80211_ssid *pssid, u8 *da, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned char *mac;
+ unsigned char bssrate[NumRates];
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ int bssrate_len = 0;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("+issue_probereq23a\n"));
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ mac = myid(&padapter->eeprompriv);
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ if (da) {
+ /* unicast probe request frame */
+ ether_addr_copy(pwlanhdr->addr1, da);
+ ether_addr_copy(pwlanhdr->addr3, da);
+ } else {
+ /* broadcast probe request frame */
+ ether_addr_copy(pwlanhdr->addr1, bc_addr);
+ ether_addr_copy(pwlanhdr->addr3, bc_addr);
+ }
+
+ ether_addr_copy(pwlanhdr->addr2, mac);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_PROBEREQ);
+
+ pframe += sizeof (struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof (struct ieee80211_hdr_3addr);
+
+ if (pssid)
+ pframe = rtw_set_ie23a(pframe, _SSID_IE_, pssid->ssid_len,
+ pssid->ssid, &pattrib->pktlen);
+ else
+ pframe = rtw_set_ie23a(pframe, _SSID_IE_, 0, NULL,
+ &pattrib->pktlen);
+
+ get_rate_set23a(padapter, bssrate, &bssrate_len);
+
+ if (bssrate_len > 8) {
+ pframe = rtw_set_ie23a(pframe, _SUPPORTEDRATES_IE_, 8,
+ bssrate, &pattrib->pktlen);
+ pframe = rtw_set_ie23a(pframe, _EXT_SUPPORTEDRATES_IE_,
+ (bssrate_len - 8), (bssrate + 8),
+ &pattrib->pktlen);
+ } else {
+ pframe = rtw_set_ie23a(pframe, _SUPPORTEDRATES_IE_,
+ bssrate_len, bssrate, &pattrib->pktlen);
+ }
+
+ /* add wps_ie for wps2.0 */
+ if (pmlmepriv->wps_probe_req_ie_len>0 && pmlmepriv->wps_probe_req_ie) {
+ memcpy(pframe, pmlmepriv->wps_probe_req_ie,
+ pmlmepriv->wps_probe_req_ie_len);
+ pframe += pmlmepriv->wps_probe_req_ie_len;
+ pattrib->pktlen += pmlmepriv->wps_probe_req_ie_len;
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("issuing probe_req, tx_len =%d\n", pattrib->last_txcmdsz));
+
+ if (wait_ack) {
+ ret = dump_mgntframe23a_and_wait_ack23a(padapter, pmgntframe);
+ } else {
+ dump_mgntframe23a(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+inline void issue_probereq23a(struct rtw_adapter *padapter,
+ struct cfg80211_ssid *pssid, u8 *da)
+{
+ _issue_probereq23a(padapter, pssid, da, false);
+}
+
+int issue_probereq23a_ex23a(struct rtw_adapter *padapter,
+ struct cfg80211_ssid *pssid, u8 *da,
+ int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ unsigned long start = jiffies;
+
+ do {
+ ret = _issue_probereq23a(padapter, pssid, da,
+ wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ msleep(wait_ms);
+
+ } while((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_8723A(FUNC_ADPT_FMT" to "MAC_FMT", ch:%u%s, %d/%d "
+ "in %u ms\n", FUNC_ADPT_ARG(padapter),
+ MAC_ARG(da), rtw_get_oper_ch23a(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt,
+ jiffies_to_msecs(jiffies - start));
+ else
+ DBG_8723A(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter),
+ rtw_get_oper_ch23a(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt,
+ jiffies_to_msecs(jiffies - start));
+ }
+exit:
+ return ret;
+}
+
+/* if psta == NULL, indiate we are station(client) now... */
+void issue_auth23a(struct rtw_adapter *padapter, struct sta_info *psta,
+ unsigned short status)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned int val32;
+ unsigned short val16;
+ int use_shared_key = 0;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_AUTH);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ if (psta) { /* for AP mode */
+#ifdef CONFIG_8723AU_AP_MODE
+
+ ether_addr_copy(pwlanhdr->addr1, psta->hwaddr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, myid(&padapter->eeprompriv));
+
+ /* setting auth algo number */
+ val16 = (u16)psta->authalg;
+
+ if (status != WLAN_STATUS_SUCCESS)
+ val16 = 0;
+
+ if (val16) {
+ val16 = cpu_to_le16(val16);
+ use_shared_key = 1;
+ }
+
+ pframe = rtw_set_fixed_ie23a(pframe, _AUTH_ALGM_NUM_,
+ (unsigned char *)&val16,
+ &pattrib->pktlen);
+
+ /* setting auth seq number */
+ val16 = (u16)psta->auth_seq;
+ val16 = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie23a(pframe, _AUTH_SEQ_NUM_,
+ (unsigned char *)&val16,
+ &pattrib->pktlen);
+
+ /* setting status code... */
+ val16 = status;
+ val16 = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie23a(pframe, _STATUS_CODE_,
+ (unsigned char *)&val16,
+ &pattrib->pktlen);
+
+ /* added challenging text... */
+ if ((psta->auth_seq == 2) &&
+ (psta->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1))
+ pframe = rtw_set_ie23a(pframe, _CHLGETXT_IE_, 128,
+ psta->chg_txt, &pattrib->pktlen);
+#endif
+ } else {
+ ether_addr_copy(pwlanhdr->addr1,
+ get_my_bssid23a(&pmlmeinfo->network));
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3,
+ get_my_bssid23a(&pmlmeinfo->network));
+
+ /* setting auth algo number */
+ /* 0:OPEN System, 1:Shared key */
+ val16 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)? 1: 0;
+ if (val16) {
+ val16 = cpu_to_le16(val16);
+ use_shared_key = 1;
+ }
+ /* DBG_8723A("%s auth_algo = %s auth_seq =%d\n", __func__,
+ (pmlmeinfo->auth_algo == 0)?"OPEN":"SHARED",
+ pmlmeinfo->auth_seq); */
+
+ /* setting IV for auth seq #3 */
+ if ((pmlmeinfo->auth_seq == 3) &&
+ (pmlmeinfo->state & WIFI_FW_AUTH_STATE) &&
+ (use_shared_key == 1)) {
+ /* DBG_8723A("==> iv(%d), key_index(%d)\n",
+ pmlmeinfo->iv, pmlmeinfo->key_index); */
+ val32 = ((pmlmeinfo->iv++) |
+ (pmlmeinfo->key_index << 30));
+ val32 = cpu_to_le32(val32);
+ pframe = rtw_set_fixed_ie23a(pframe, 4,
+ (unsigned char *)&val32,
+ &pattrib->pktlen);
+
+ pattrib->iv_len = 4;
+ }
+
+ pframe = rtw_set_fixed_ie23a(pframe, _AUTH_ALGM_NUM_,
+ (unsigned char *)&val16,
+ &pattrib->pktlen);
+
+ /* setting auth seq number */
+ val16 = pmlmeinfo->auth_seq;
+ val16 = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie23a(pframe, _AUTH_SEQ_NUM_,
+ (unsigned char *)&val16,
+ &pattrib->pktlen);
+
+ /* setting status code... */
+ val16 = status;
+ val16 = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie23a(pframe, _STATUS_CODE_,
+ (unsigned char *)&val16,
+ &pattrib->pktlen);
+
+ /* then checking to see if sending challenging text... */
+ if ((pmlmeinfo->auth_seq == 3) &&
+ (pmlmeinfo->state & WIFI_FW_AUTH_STATE) &&
+ (use_shared_key == 1)) {
+ pframe = rtw_set_ie23a(pframe, _CHLGETXT_IE_, 128,
+ pmlmeinfo->chg_txt,
+ &pattrib->pktlen);
+
+ SetPrivacy(fctrl);
+
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pattrib->encrypt = _WEP40_;
+
+ pattrib->icv_len = 4;
+
+ pattrib->pktlen += pattrib->icv_len;
+ }
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ rtw_wep_encrypt23a(padapter, pmgntframe);
+ DBG_8723A("%s\n", __func__);
+ dump_mgntframe23a(padapter, pmgntframe);
+
+ return;
+}
+
+void issue_asocrsp23a(struct rtw_adapter *padapter, unsigned short status,
+ struct sta_info *pstat, int pkt_type)
+{
+#ifdef CONFIG_8723AU_AP_MODE
+ struct xmit_frame *pmgntframe;
+ struct ieee80211_hdr *pwlanhdr;
+ struct pkt_attrib *pattrib;
+ unsigned char *pbuf, *pframe;
+ unsigned short val;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
+ u8 *ie = pnetwork->IEs;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u32 wfdielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+
+ DBG_8723A("%s\n", __func__);
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ pwlanhdr->frame_control = 0;
+
+ ether_addr_copy(pwlanhdr->addr1, pstat->hwaddr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, get_my_bssid23a(&pmlmeinfo->network));
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ if ((pkt_type == WIFI_ASSOCRSP) || (pkt_type == WIFI_REASSOCRSP))
+ SetFrameSubType(pwlanhdr, pkt_type);
+ else
+ return;
+
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen += pattrib->hdrlen;
+ pframe += pattrib->hdrlen;
+
+ /* capability */
+ val = *(unsigned short *)rtw_get_capability23a_from_ie(ie);
+
+ pframe = rtw_set_fixed_ie23a(pframe, _CAPABILITY_,
+ (unsigned char *)&val, &pattrib->pktlen);
+
+ status = cpu_to_le16(status);
+ pframe = rtw_set_fixed_ie23a(pframe, _STATUS_CODE_,
+ (unsigned char *)&status,
+ &pattrib->pktlen);
+
+ val = cpu_to_le16(pstat->aid | BIT(14) | BIT(15));
+ pframe = rtw_set_fixed_ie23a(pframe, _ASOC_ID_, (unsigned char *)&val,
+ &pattrib->pktlen);
+
+ if (pstat->bssratelen <= 8) {
+ pframe = rtw_set_ie23a(pframe, _SUPPORTEDRATES_IE_,
+ pstat->bssratelen, pstat->bssrateset,
+ &pattrib->pktlen);
+ } else {
+ pframe = rtw_set_ie23a(pframe, _SUPPORTEDRATES_IE_, 8,
+ pstat->bssrateset, &pattrib->pktlen);
+ pframe = rtw_set_ie23a(pframe, _EXT_SUPPORTEDRATES_IE_,
+ pstat->bssratelen - 8,
+ pstat->bssrateset + 8, &pattrib->pktlen);
+ }
+
+ if ((pstat->flags & WLAN_STA_HT) && (pmlmepriv->htpriv.ht_option)) {
+ uint ie_len = 0;
+
+ /* FILL HT CAP INFO IE */
+ /* p = hostapd_eid_ht_capabilities_info(hapd, p); */
+ pbuf = rtw_get_ie23a(ie + _BEACON_IE_OFFSET_,
+ _HT_CAPABILITY_IE_, &ie_len,
+ pnetwork->IELength - _BEACON_IE_OFFSET_);
+ if (pbuf && ie_len>0) {
+ memcpy(pframe, pbuf, ie_len + 2);
+ pframe += (ie_len + 2);
+ pattrib->pktlen += (ie_len + 2);
+ }
+
+ /* FILL HT ADD INFO IE */
+ /* p = hostapd_eid_ht_operation(hapd, p); */
+ pbuf = rtw_get_ie23a(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_,
+ &ie_len,
+ pnetwork->IELength - _BEACON_IE_OFFSET_);
+ if (pbuf && ie_len > 0) {
+ memcpy(pframe, pbuf, ie_len + 2);
+ pframe += (ie_len + 2);
+ pattrib->pktlen += (ie_len + 2);
+ }
+ }
+
+ /* FILL WMM IE */
+ if ((pstat->flags & WLAN_STA_WME) && pmlmepriv->qospriv.qos_option) {
+ uint ie_len = 0;
+ unsigned char WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02,
+ 0x01, 0x01};
+
+ for (pbuf = ie + _BEACON_IE_OFFSET_; ; pbuf += (ie_len + 2)) {
+ pbuf = rtw_get_ie23a(pbuf, _VENDOR_SPECIFIC_IE_,
+ &ie_len, (pnetwork->IELength -
+ _BEACON_IE_OFFSET_ -
+ (ie_len + 2)));
+ if (pbuf && !memcmp(pbuf + 2, WMM_PARA_IE, 6)) {
+ memcpy(pframe, pbuf, ie_len + 2);
+ pframe += (ie_len + 2);
+ pattrib->pktlen += (ie_len + 2);
+
+ break;
+ }
+
+ if ((!pbuf) || (ie_len == 0))
+ break;
+ }
+ }
+
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK) {
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, 6,
+ REALTEK_96B_IE23A, &pattrib->pktlen);
+ }
+
+ /* add WPS IE ie for wps 2.0 */
+ if (pmlmepriv->wps_assoc_resp_ie &&
+ pmlmepriv->wps_assoc_resp_ie_len > 0) {
+ memcpy(pframe, pmlmepriv->wps_assoc_resp_ie,
+ pmlmepriv->wps_assoc_resp_ie_len);
+
+ pframe += pmlmepriv->wps_assoc_resp_ie_len;
+ pattrib->pktlen += pmlmepriv->wps_assoc_resp_ie_len;
+ }
+
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) &&
+ pwdinfo->wfd_info->wfd_enable) {
+ wfdielen = build_assoc_resp_wfd_ie(pwdinfo, pframe);
+ pframe += wfdielen;
+ pattrib->pktlen += wfdielen;
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+#endif
+}
+
+void issue_assocreq23a(struct rtw_adapter *padapter)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe, *p;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned short val16;
+ unsigned int i, j, ie_len, index = 0;
+ unsigned char rf_type, bssrate[NumRates], sta_bssrate[NumRates];
+ struct ndis_802_11_var_ies *pIE;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ int bssrate_len = 0, sta_bssrate_len = 0;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 p2pie[255] = { 0x00 };
+ u16 p2pielen = 0;
+ u32 wfdielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+ ether_addr_copy(pwlanhdr->addr1, get_my_bssid23a(&pmlmeinfo->network));
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, get_my_bssid23a(&pmlmeinfo->network));
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ASSOCREQ);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ /* caps */
+ memcpy(pframe, rtw_get_capability23a_from_ie(pmlmeinfo->network.IEs),
+ 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* listen interval */
+ /* todo: listen interval for power saving */
+ val16 = cpu_to_le16(3);
+ memcpy(pframe, (unsigned char *)&val16, 2);
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* SSID */
+ pframe = rtw_set_ie23a(pframe, _SSID_IE_,
+ pmlmeinfo->network.Ssid.ssid_len,
+ pmlmeinfo->network.Ssid.ssid, &pattrib->pktlen);
+
+ /* supported rate & extended supported rate */
+
+ get_rate_set23a(padapter, sta_bssrate, &sta_bssrate_len);
+ /* DBG_8723A("sta_bssrate_len =%d\n", sta_bssrate_len); */
+
+ /* for JAPAN, channel 14 can only uses B Mode(CCK) */
+ if (pmlmeext->cur_channel == 14)
+ sta_bssrate_len = 4;
+
+ /* for (i = 0; i < sta_bssrate_len; i++) { */
+ /* DBG_8723A("sta_bssrate[%d]=%02X\n", i, sta_bssrate[i]); */
+ /* */
+
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ if (pmlmeinfo->network.SupportedRates[i] == 0)
+ break;
+ DBG_8723A("network.SupportedRates[%d]=%02X\n", i,
+ pmlmeinfo->network.SupportedRates[i]);
+ }
+
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ if (pmlmeinfo->network.SupportedRates[i] == 0)
+ break;
+
+ /* Check if the AP's supported rates are also
+ supported by STA. */
+ for (j = 0; j < sta_bssrate_len; j++) {
+ /* Avoid the proprietary data rate (22Mbps) of
+ Handlink WSG-4000 AP */
+ if ((pmlmeinfo->network.SupportedRates[i] |
+ IEEE80211_BASIC_RATE_MASK) ==
+ (sta_bssrate[j]|IEEE80211_BASIC_RATE_MASK)) {
+ /* DBG_8723A("match i = %d, j =%d\n", i, j); */
+ break;
+ }
+ }
+
+ if (j == sta_bssrate_len) {
+ /* the rate is not supported by STA */
+ DBG_8723A("%s(): the rate[%d]=%02X is not supported by "
+ "STA!\n", __func__, i,
+ pmlmeinfo->network.SupportedRates[i]);
+ } else {
+ /* the rate is supported by STA */
+ bssrate[index++] = pmlmeinfo->network.SupportedRates[i];
+ }
+ }
+
+ bssrate_len = index;
+ DBG_8723A("bssrate_len = %d\n", bssrate_len);
+
+ if (bssrate_len == 0) {
+ rtw_free_xmitbuf23a(pxmitpriv, pmgntframe->pxmitbuf);
+ rtw_free_xmitframe23a(pxmitpriv, pmgntframe);
+ goto exit; /* don't connect to AP if no joint supported rate */
+ }
+
+ if (bssrate_len > 8) {
+ pframe = rtw_set_ie23a(pframe, _SUPPORTEDRATES_IE_, 8,
+ bssrate, &pattrib->pktlen);
+ pframe = rtw_set_ie23a(pframe, _EXT_SUPPORTEDRATES_IE_,
+ (bssrate_len - 8), (bssrate + 8),
+ &pattrib->pktlen);
+ } else
+ pframe = rtw_set_ie23a(pframe, _SUPPORTEDRATES_IE_,
+ bssrate_len, bssrate, &pattrib->pktlen);
+
+ /* RSN */
+ p = rtw_get_ie23a((pmlmeinfo->network.IEs +
+ sizeof(struct ndis_802_11_fixed_ies)), _RSN_IE_2_,
+ &ie_len, (pmlmeinfo->network.IELength -
+ sizeof(struct ndis_802_11_fixed_ies)));
+ if (p)
+ pframe = rtw_set_ie23a(pframe, _RSN_IE_2_, ie_len, (p + 2),
+ &pattrib->pktlen);
+
+ /* HT caps */
+ if (padapter->mlmepriv.htpriv.ht_option == true) {
+ p = rtw_get_ie23a((pmlmeinfo->network.IEs +
+ sizeof(struct ndis_802_11_fixed_ies)),
+ _HT_CAPABILITY_IE_, &ie_len,
+ (pmlmeinfo->network.IELength -
+ sizeof(struct ndis_802_11_fixed_ies)));
+ if ((p != NULL) && (!(is_ap_in_tkip23a(padapter)))) {
+ memcpy(&pmlmeinfo->HT_caps, (p + 2),
+ sizeof(struct HT_caps_element));
+
+ /* to disable 40M Hz support while gd_bw_40MHz_en = 0 */
+ if (pregpriv->cbw40_enable == 0) {
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info &= (~(BIT(6) | BIT(1)));
+ } else {
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= BIT(1);
+ }
+
+ /* todo: disable SM power save mode */
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |=
+ 0x000c;
+
+ rtw23a_hal_get_hwreg(padapter, HW_VAR_RF_TYPE,
+ (u8 *)(&rf_type));
+ /* switch (pregpriv->rf_config) */
+ switch (rf_type)
+ {
+ case RF_1T1R:
+
+ if (pregpriv->rx_stbc)
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0100);/* RX STBC One spatial stream */
+
+ memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_1R23A, 16);
+ break;
+
+ case RF_2T2R:
+ case RF_1T2R:
+ default:
+
+ /* enable for 2.4/5 GHz */
+ if ((pregpriv->rx_stbc == 0x3) ||
+ ((pmlmeext->cur_wireless_mode &
+ WIRELESS_11_24N) &&
+ /* enable for 2.4GHz */
+ (pregpriv->rx_stbc == 0x1)) ||
+ ((pmlmeext->cur_wireless_mode &
+ WIRELESS_11_5N) &&
+ (pregpriv->rx_stbc == 0x2)) ||
+ /* enable for 5GHz */
+ (pregpriv->wifi_spec == 1)) {
+ DBG_8723A("declare supporting RX "
+ "STBC\n");
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0200);/* RX STBC two spatial stream */
+ }
+ memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_2R23A, 16);
+ break;
+ }
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info =
+ cpu_to_le16(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info);
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ if (BT_1Ant(padapter) == true) {
+ /* set to 8K */
+ pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para &= (u8)~IEEE80211_HT_AMPDU_PARM_FACTOR;
+/* pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para |= MAX_AMPDU_FACTOR_8K */
+ }
+#endif
+
+ pframe = rtw_set_ie23a(pframe, _HT_CAPABILITY_IE_,
+ ie_len,
+ (u8 *)&pmlmeinfo->HT_caps,
+ &pattrib->pktlen);
+ }
+ }
+
+ /* vendor specific IE, such as WPA, WMM, WPS */
+ for (i = sizeof(struct ndis_802_11_fixed_ies);
+ i < pmlmeinfo->network.IELength;) {
+ pIE = (struct ndis_802_11_var_ies *)
+ (pmlmeinfo->network.IEs + i);
+
+ switch (pIE->ElementID)
+ {
+ case _VENDOR_SPECIFIC_IE_:
+ if (!memcmp(pIE->data, RTW_WPA_OUI23A, 4) ||
+ !memcmp(pIE->data, WMM_OUI23A, 4) ||
+ !memcmp(pIE->data, WPS_OUI23A, 4)) {
+ if (!padapter->registrypriv.wifi_spec) {
+ /* Commented by Kurt 20110629 */
+ /* In some older APs, WPS handshake */
+ /* would be fail if we append vender
+ extensions informations to AP */
+ if (!memcmp(pIE->data, WPS_OUI23A, 4))
+ pIE->Length = 14;
+ }
+ pframe = rtw_set_ie23a(pframe,
+ _VENDOR_SPECIFIC_IE_,
+ pIE->Length, pIE->data,
+ &pattrib->pktlen);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, 6,
+ REALTEK_96B_IE23A, &pattrib->pktlen);
+
+#ifdef CONFIG_8723AU_P2P
+
+ if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled) {
+ if (pmlmepriv->p2p_assoc_req_ie &&
+ pmlmepriv->p2p_assoc_req_ie_len>0) {
+ memcpy(pframe, pmlmepriv->p2p_assoc_req_ie,
+ pmlmepriv->p2p_assoc_req_ie_len);
+ pframe += pmlmepriv->p2p_assoc_req_ie_len;
+ pattrib->pktlen += pmlmepriv->p2p_assoc_req_ie_len;
+ }
+ } else {
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) &&
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE)) {
+ /* Should add the P2P IE in the association
+ request frame. */
+ /* P2P OUI */
+
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20101109 */
+ /* According to the P2P Specification, the
+ association request frame should contain
+ 3 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Extended Listen Timing */
+ /* 3. Device Info */
+ /* Commented by Albert 20110516 */
+ /* 4. P2P Interface */
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] =
+ P2P_GRPCAP_PERSISTENT_GROUP |
+ DMP_P2P_GRPCAP_SUPPORT;
+ else
+ p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
+
+ /* Extended Listen Timing */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x0004);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Availability Period */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Availability Interval */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Device Info */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config
+ Methods (2bytes) + Primary Device
+ Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device
+ Name ID field (2bytes) + WPS Device Name
+ Len field (2bytes) */
+ *(u16*) (p2pie + p2pielen) =
+ cpu_to_le16(21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen,
+ myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian.
+ Noted by P2P specification. */
+ if ((pwdinfo->ui_got_wps_info ==
+ P2P_GOT_WPSINFO_PEER_DISPLAY_PIN) ||
+ (pwdinfo->ui_got_wps_info ==
+ P2P_GOT_WPSINFO_SELF_DISPLAY_PIN))
+ *(u16*) (p2pie + p2pielen) =
+ cpu_to_be16(WPS_CONFIG_METHOD_DISPLAY);
+ else
+ *(u16*) (p2pie + p2pielen) =
+ cpu_to_be16(WPS_CONFIG_METHOD_PBC);
+
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ *(u16*) (p2pie + p2pielen) =
+ cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ *(u32*) (p2pie + p2pielen) = cpu_to_be32(WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ *(u16*) (p2pie + p2pielen) =
+ cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ /* No Secondary Device Type List */
+ p2pie[p2pielen++] = 0x00;
+
+ /* Device Name */
+ /* Type: */
+ *(u16*) (p2pie + p2pielen) =
+ cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) =
+ cpu_to_be16(pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name,
+ pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ /* P2P Interface */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_INTERFACE;
+
+ /* Length: */
+ *(u16*) (p2pie + p2pielen) = cpu_to_le16(0x000D);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr,
+ ETH_ALEN); /* P2P Device Address */
+ p2pielen += ETH_ALEN;
+
+ /* P2P Interface Address Count */
+ p2pie[p2pielen++] = 1;
+
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr,
+ ETH_ALEN); /* P2P Interface Address List */
+ p2pielen += ETH_ALEN;
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_,
+ p2pielen, (unsigned char *)p2pie,
+ &pattrib->pktlen);
+
+ /* wfdielen = build_assoc_req_wfd_ie(pwdinfo, pframe);*/
+ /* pframe += wfdielen; */
+ /* pattrib->pktlen += wfdielen; */
+ }
+ }
+
+ if (true == pwdinfo->wfd_info->wfd_enable) {
+ wfdielen = build_assoc_req_wfd_ie(pwdinfo, pframe);
+ pframe += wfdielen;
+ pattrib->pktlen += wfdielen;
+ } else if (pmlmepriv->wfd_assoc_req_ie != NULL &&
+ pmlmepriv->wfd_assoc_req_ie_len > 0) {
+ /* WFD IE */
+ memcpy(pframe, pmlmepriv->wfd_assoc_req_ie,
+ pmlmepriv->wfd_assoc_req_ie_len);
+ pattrib->pktlen += pmlmepriv->wfd_assoc_req_ie_len;
+ pframe += pmlmepriv->wfd_assoc_req_ie_len;
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+ dump_mgntframe23a(padapter, pmgntframe);
+
+ ret = _SUCCESS;
+
+exit:
+ pmlmepriv->assoc_req_len = 0;
+ if (ret == _SUCCESS) {
+ kfree(pmlmepriv->assoc_req);
+ pmlmepriv->assoc_req = kmalloc(pattrib->pktlen, GFP_ATOMIC);
+ if (pmlmepriv->assoc_req) {
+ memcpy(pmlmepriv->assoc_req, pwlanhdr,
+ pattrib->pktlen);
+ pmlmepriv->assoc_req_len = pattrib->pktlen;
+ }
+ } else
+ kfree(pmlmepriv->assoc_req);
+
+ return;
+}
+
+/* when wait_ack is ture, this function shoule be called at process context */
+static int _issue_nulldata23a(struct rtw_adapter *padapter, unsigned char *da,
+ unsigned int power_mode, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+
+ /* DBG_8723A("%s:%d\n", __func__, power_mode); */
+
+ if (!padapter)
+ goto exit;
+
+ pxmitpriv = &padapter->xmitpriv;
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+ pattrib->retry_ctrl = false;
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
+ SetFrDs(fctrl);
+ else if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ SetToDs(fctrl);
+
+ if (power_mode)
+ SetPwrMgt(fctrl);
+
+ ether_addr_copy(pwlanhdr->addr1, da);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, get_my_bssid23a(&pmlmeinfo->network));
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_DATA_NULL);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ if (wait_ack)
+ ret = dump_mgntframe23a_and_wait_ack23a(padapter, pmgntframe);
+ else {
+ dump_mgntframe23a(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+/* when wait_ms >0 , this function shoule be called at process context */
+/* da == NULL for station mode */
+int issue_nulldata23a(struct rtw_adapter *padapter, unsigned char *da,
+ unsigned int power_mode, int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ unsigned long start = jiffies;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ /* da == NULL, assum it's null data for sta to ap*/
+ if (da == NULL)
+ da = get_my_bssid23a(&pmlmeinfo->network);
+
+ do {
+ ret = _issue_nulldata23a(padapter, da, power_mode,
+ wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ msleep(wait_ms);
+
+ } while((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_8723A(FUNC_ADPT_FMT" to "MAC_FMT", ch:%u%s, %d/%d "
+ "in %u ms\n", FUNC_ADPT_ARG(padapter),
+ MAC_ARG(da), rtw_get_oper_ch23a(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt,
+ jiffies_to_msecs(jiffies - start));
+ else
+ DBG_8723A(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter),
+ rtw_get_oper_ch23a(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt,
+ jiffies_to_msecs(jiffies - start));
+ }
+exit:
+ return ret;
+}
+
+/* when wait_ack is ture, this function shoule be called at process context */
+static int _issue_qos_nulldata23a(struct rtw_adapter *padapter,
+ unsigned char *da, u16 tid, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl, *qc;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ DBG_8723A("%s\n", __func__);
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ pattrib->hdrlen += 2;
+ pattrib->qos_en = true;
+ pattrib->eosp = 1;
+ pattrib->ack_policy = 0;
+ pattrib->mdata = 0;
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
+ SetFrDs(fctrl);
+ else if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ SetToDs(fctrl);
+
+ if (pattrib->mdata)
+ SetMData(fctrl);
+
+ qc = (unsigned short *)(pframe + pattrib->hdrlen - 2);
+
+ SetPriority(qc, tid);
+
+ SetEOSP(qc, pattrib->eosp);
+
+ SetAckpolicy(qc, pattrib->ack_policy);
+
+ ether_addr_copy(pwlanhdr->addr1, da);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, get_my_bssid23a(&pmlmeinfo->network));
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
+
+ pframe += sizeof(struct ieee80211_qos_hdr);
+ pattrib->pktlen = sizeof(struct ieee80211_qos_hdr);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ if (wait_ack)
+ ret = dump_mgntframe23a_and_wait_ack23a(padapter, pmgntframe);
+ else {
+ dump_mgntframe23a(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+/* when wait_ms >0 , this function shoule be called at process context */
+/* da == NULL for station mode */
+int issue_qos_nulldata23a(struct rtw_adapter *padapter, unsigned char *da,
+ u16 tid, int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ unsigned long start = jiffies;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ /* da == NULL, assum it's null data for sta to ap*/
+ if (da == NULL)
+ da = get_my_bssid23a(&pmlmeinfo->network);
+
+ do {
+ ret = _issue_qos_nulldata23a(padapter, da, tid,
+ wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ msleep(wait_ms);
+ } while((i < try_cnt) && ((ret == _FAIL)||(wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_8723A(FUNC_ADPT_FMT" to "MAC_FMT", ch:%u%s, %d/%d "
+ "in %u ms\n", FUNC_ADPT_ARG(padapter),
+ MAC_ARG(da), rtw_get_oper_ch23a(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt,
+ jiffies_to_msecs(jiffies - start));
+ else
+ DBG_8723A(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter),
+ rtw_get_oper_ch23a(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt,
+ jiffies_to_msecs(jiffies - start));
+ }
+exit:
+ return ret;
+}
+
+static int _issue_deauth23a(struct rtw_adapter *padapter, unsigned char *da,
+ unsigned short reason, u8 wait_ack)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ int ret = _FAIL;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_8723AU_P2P */
+
+ /* DBG_8723A("%s to "MAC_FMT"\n", __func__, MAC_ARG(da)); */
+
+#ifdef CONFIG_8723AU_P2P
+ if (!(rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) &&
+ (pwdinfo->rx_invitereq_info.scan_op_ch_only)) {
+ mod_timer(&pwdinfo->reset_ch_sitesurvey,
+ jiffies + msecs_to_jiffies(10));
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+ pattrib->retry_ctrl = false;
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ ether_addr_copy(pwlanhdr->addr1, da);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, get_my_bssid23a(&pmlmeinfo->network));
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_DEAUTH);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ reason = cpu_to_le16(reason);
+ pframe = rtw_set_fixed_ie23a(pframe, WLAN_REASON_PREV_AUTH_NOT_VALID,
+ (unsigned char *)&reason,
+ &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ if (wait_ack)
+ ret = dump_mgntframe23a_and_wait_ack23a(padapter, pmgntframe);
+ else {
+ dump_mgntframe23a(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+int issue_deauth23a(struct rtw_adapter *padapter, unsigned char *da,
+ unsigned short reason)
+{
+ DBG_8723A("%s to "MAC_FMT"\n", __func__, MAC_ARG(da));
+ return _issue_deauth23a(padapter, da, reason, false);
+}
+
+int issue_deauth23a_ex23a(struct rtw_adapter *padapter, u8 *da,
+ unsigned short reason, int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ unsigned long start = jiffies;
+
+ do {
+ ret = _issue_deauth23a(padapter, da, reason,
+ wait_ms >0 ? true : false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ msleep(wait_ms);
+
+ } while((i < try_cnt) && ((ret == _FAIL)||(wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_8723A(FUNC_ADPT_FMT" to "MAC_FMT", ch:%u%s, %d/%d "
+ "in %u ms\n", FUNC_ADPT_ARG(padapter),
+ MAC_ARG(da), rtw_get_oper_ch23a(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt,
+ jiffies_to_msecs(jiffies - start));
+ else
+ DBG_8723A(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter),
+ rtw_get_oper_ch23a(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt,
+ jiffies_to_msecs(jiffies - start));
+ }
+exit:
+ return ret;
+}
+
+void issue_action_spct_ch_switch23a(struct rtw_adapter *padapter,
+ u8 *ra, u8 new_ch, u8 ch_offset)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ u8 category, action;
+
+ DBG_8723A(FUNC_NDEV_FMT" ra ="MAC_FMT", ch:%u, offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev), MAC_ARG(ra),
+ new_ch, ch_offset);
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ ether_addr_copy(pwlanhdr->addr1, ra); /* RA */
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv)); /* TA */
+ ether_addr_copy(pwlanhdr->addr3, ra); /* DA = RA */
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ /* category, action */
+ category = WLAN_CATEGORY_SPECTRUM_MGMT;
+ action = WLAN_ACTION_SPCT_CHL_SWITCH;
+
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &action, &pattrib->pktlen);
+
+ pframe = rtw_set_ie23a_ch_switch (pframe, &pattrib->pktlen, 0,
+ new_ch, 0);
+ pframe = rtw_set_ie23a_secondary_ch_offset(pframe, &pattrib->pktlen,
+ hal_ch_offset_to_secondary_ch_offset23a(ch_offset));
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+}
+
+void issue_action_BA23a(struct rtw_adapter *padapter, unsigned char *raddr,
+ unsigned char action, unsigned short status)
+{
+ u8 category = WLAN_CATEGORY_BACK;
+ u16 start_seq;
+ u16 BA_para_set;
+ u16 reason_code;
+ u16 BA_timeout_value;
+ u16 BA_starting_seqctrl;
+ int max_rx_ampdu_factor;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ u8 *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+#ifdef CONFIG_8723AU_BT_COEXIST
+ u8 tendaAPMac[] = {0xC8, 0x3A, 0x35};
+#endif
+
+ DBG_8723A("%s, category =%d, action =%d, status =%d\n",
+ __func__, category, action, status);
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ /* memcpy(pwlanhdr->addr1, get_my_bssid23a(&pmlmeinfo->network), ETH_ALEN); */
+ ether_addr_copy(pwlanhdr->addr1, raddr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, get_my_bssid23a(&pmlmeinfo->network));
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &action, &pattrib->pktlen);
+
+ status = cpu_to_le16(status);
+
+ if (category != 3)
+ goto out;
+
+ switch (action)
+ {
+ case 0: /* ADDBA req */
+ do {
+ pmlmeinfo->dialogToken++;
+ } while (pmlmeinfo->dialogToken == 0);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &pmlmeinfo->dialogToken,
+ &pattrib->pktlen);
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ if ((BT_1Ant(padapter) == true) &&
+ ((pmlmeinfo->assoc_AP_vendor != broadcomAP) ||
+ memcmp(raddr, tendaAPMac, 3))) {
+ /* A-MSDU NOT Supported */
+ BA_para_set = 0;
+ /* immediate Block Ack */
+ BA_para_set |= (1 << 1) &
+ IEEE80211_ADDBA_PARAM_POLICY_MASK;
+ /* TID */
+ BA_para_set |= (status << 2) &
+ IEEE80211_ADDBA_PARAM_TID_MASK;
+ /* max buffer size is 8 MSDU */
+ BA_para_set |= (8 << 6) &
+ IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
+ } else
+#endif
+ {
+ /* immediate ack & 64 buffer size */
+ BA_para_set = (0x1002 | ((status & 0xf) << 2));
+ }
+ BA_para_set = cpu_to_le16(BA_para_set);
+ pframe = rtw_set_fixed_ie23a(pframe, 2,
+ (unsigned char *)&BA_para_set,
+ &pattrib->pktlen);
+
+ BA_timeout_value = 5000;/* 5ms */
+ BA_timeout_value = cpu_to_le16(BA_timeout_value);
+ pframe = rtw_set_fixed_ie23a(pframe, 2, (unsigned char *)
+ &BA_timeout_value,
+ &pattrib->pktlen);
+
+ /* if ((psta = rtw_get_stainfo23a(pstapriv,
+ pmlmeinfo->network.MacAddress)) != NULL) */
+ if ((psta = rtw_get_stainfo23a(pstapriv, raddr))) {
+ start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07]&0xfff) + 1;
+
+ DBG_8723A("BA_starting_seqctrl = %d for TID =%d\n",
+ start_seq, status & 0x07);
+
+ psta->BA_starting_seqctrl[status & 0x07] = start_seq;
+
+ BA_starting_seqctrl = start_seq << 4;
+ }
+
+ BA_starting_seqctrl = cpu_to_le16(BA_starting_seqctrl);
+ pframe = rtw_set_fixed_ie23a(pframe, 2, (unsigned char *)&BA_starting_seqctrl, &pattrib->pktlen);
+ break;
+
+ case 1: /* ADDBA rsp */
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &pmlmeinfo->ADDBA_req.dialog_token, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 2,
+ (unsigned char *)&status,
+ &pattrib->pktlen);
+ rtw_hal_get_def_var23a(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR,
+ &max_rx_ampdu_factor);
+ if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_64K)
+ BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
+ else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_32K)
+ BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0800); /* 32 buffer size */
+ else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_16K)
+ BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0400); /* 16 buffer size */
+ else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_8K)
+ BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0200); /* 8 buffer size */
+ else
+ BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ if ((BT_1Ant(padapter) == true) &&
+ ((pmlmeinfo->assoc_AP_vendor != broadcomAP) ||
+ memcmp(raddr, tendaAPMac, 3))) {
+ /* max buffer size is 8 MSDU */
+ BA_para_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
+ BA_para_set |= (8 << 6) &
+ IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
+ }
+#endif
+
+ if (pregpriv->ampdu_amsdu == 0)/* disabled */
+ BA_para_set = cpu_to_le16(BA_para_set & ~BIT(0));
+ else if (pregpriv->ampdu_amsdu == 1)/* enabled */
+ BA_para_set = cpu_to_le16(BA_para_set | BIT(0));
+ else /* auto */
+ BA_para_set = cpu_to_le16(BA_para_set);
+
+ pframe = rtw_set_fixed_ie23a(pframe, 2,
+ (unsigned char *)&BA_para_set,
+ &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 2, (unsigned char *)&pmlmeinfo->ADDBA_req.BA_timeout_value, &pattrib->pktlen);
+ break;
+ case 2:/* DELBA */
+ BA_para_set = (status & 0x1F) << 3;
+ BA_para_set = cpu_to_le16(BA_para_set);
+ pframe = rtw_set_fixed_ie23a(pframe, 2,
+ (unsigned char *)&BA_para_set,
+ &pattrib->pktlen);
+
+ reason_code = 37;/* Requested from peer STA as it does not
+ want to use the mechanism */
+ reason_code = cpu_to_le16(reason_code);
+ pframe = rtw_set_fixed_ie23a(pframe, 2,
+ (unsigned char *)&reason_code,
+ &pattrib->pktlen);
+ break;
+ default:
+ break;
+ }
+
+out:
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+}
+
+static void issue_action_BSSCoexistPacket(struct rtw_adapter *padapter)
+{
+ struct list_head *plist, *phead, *ptmp;
+ unsigned char category, action;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct wlan_network *pnetwork = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct rtw_queue *queue = &pmlmepriv->scanned_queue;
+ u8 InfoContent[16] = {0};
+ u8 ICS[8][15];
+
+ if ((pmlmepriv->num_FortyMHzIntolerant == 0) || (pmlmepriv->num_sta_no_ht == 0))
+ return;
+
+ if (true == pmlmeinfo->bwmode_updated)
+ return;
+
+ DBG_8723A("%s\n", __func__);
+
+ category = WLAN_CATEGORY_PUBLIC;
+ action = ACT_PUBLIC_BSSCOEXIST;
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ {
+ return;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ ether_addr_copy(pwlanhdr->addr1, get_my_bssid23a(&pmlmeinfo->network));
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, get_my_bssid23a(&pmlmeinfo->network));
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &action, &pattrib->pktlen);
+
+ /* */
+ if (pmlmepriv->num_FortyMHzIntolerant>0)
+ {
+ u8 iedata = 0;
+
+ iedata |= BIT(2);/* 20 MHz BSS Width Request */
+
+ pframe = rtw_set_ie23a(pframe, EID_BSSCoexistence, 1, &iedata, &pattrib->pktlen);
+
+ }
+
+ /* */
+ memset(ICS, 0, sizeof(ICS));
+ if (pmlmepriv->num_sta_no_ht>0)
+ {
+ int i;
+
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+
+ phead = get_list_head(queue);
+ plist = phead->next;
+
+ list_for_each_safe(plist, ptmp, phead) {
+ int len;
+ u8 *p;
+ struct wlan_bssid_ex *pbss_network;
+
+ pnetwork = container_of(plist, struct wlan_network,
+ list);
+
+ pbss_network = &pnetwork->network;
+
+ p = rtw_get_ie23a(pbss_network->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->IELength - _FIXED_IE_LENGTH_);
+ if ((p == NULL) || (len == 0))/* non-HT */
+ {
+ if ((pbss_network->Configuration.DSConfig<= 0) || (pbss_network->Configuration.DSConfig>14))
+ continue;
+
+ ICS[0][pbss_network->Configuration.DSConfig]= 1;
+
+ if (ICS[0][0] == 0)
+ ICS[0][0] = 1;
+ }
+
+ }
+
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+
+ for (i = 0;i<8;i++)
+ {
+ if (ICS[i][0] == 1)
+ {
+ int j, k = 0;
+
+ InfoContent[k] = i;
+ /* SET_BSS_INTOLERANT_ELE_REG_CLASS(InfoContent, i); */
+ k++;
+
+ for (j = 1;j<= 14;j++)
+ {
+ if (ICS[i][j]== 1)
+ {
+ if (k<16)
+ {
+ InfoContent[k] = j; /* channel number */
+ /* SET_BSS_INTOLERANT_ELE_CHANNEL(InfoContent+k, j); */
+ k++;
+ }
+ }
+ }
+
+ pframe = rtw_set_ie23a(pframe, EID_BSSIntolerantChlReport, k, InfoContent, &pattrib->pktlen);
+
+ }
+
+ }
+
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+}
+
+unsigned int send_delba23a(struct rtw_adapter *padapter, u8 initiator, u8 *addr)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+ /* struct recv_reorder_ctrl *preorder_ctrl; */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u16 tid;
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
+ return _SUCCESS;
+
+ psta = rtw_get_stainfo23a(pstapriv, addr);
+ if (psta == NULL)
+ return _SUCCESS;
+
+ if (initiator == 0) { /* recipient */
+ for (tid = 0; tid < MAXTID; tid++) {
+ if (psta->recvreorder_ctrl[tid].enable == true) {
+ DBG_8723A("rx agg disable tid(%d)\n", tid);
+ issue_action_BA23a(padapter, addr, WLAN_ACTION_DELBA, (((tid <<1) |initiator)&0x1F));
+ psta->recvreorder_ctrl[tid].enable = false;
+ psta->recvreorder_ctrl[tid].indicate_seq = 0xffff;
+ }
+ }
+ } else if (initiator == 1) { /* originator */
+ for (tid = 0; tid < MAXTID; tid++) {
+ if (psta->htpriv.agg_enable_bitmap & BIT(tid)) {
+ DBG_8723A("tx agg disable tid(%d)\n", tid);
+ issue_action_BA23a(padapter, addr, WLAN_ACTION_DELBA, (((tid <<1) |initiator)&0x1F));
+ psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+
+ }
+ }
+ }
+ return _SUCCESS;
+}
+
+unsigned int send_beacon23a(struct rtw_adapter *padapter)
+{
+ u8 bxmitok = false;
+ int issue = 0;
+ int poll = 0;
+ unsigned long start = jiffies;
+ unsigned int passing_time;
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BCN_VALID, NULL);
+ do {
+ issue_beacon23a(padapter, 100);
+ issue++;
+ do {
+ yield();
+ rtw23a_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
+ poll++;
+ } while ((poll%10)!= 0 && false == bxmitok &&
+ !padapter->bSurpriseRemoved &&
+ !padapter->bDriverStopped);
+
+ } while (!bxmitok && issue<100 && !padapter->bSurpriseRemoved &&
+ !padapter->bDriverStopped);
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return _FAIL;
+
+ passing_time = jiffies_to_msecs(jiffies - start);
+
+ if (!bxmitok) {
+ DBG_8723A("%s fail! %u ms\n", __func__, passing_time);
+ return _FAIL;
+ } else {
+
+ if (passing_time > 100 || issue > 3)
+ DBG_8723A("%s success, issue:%d, poll:%d, %u ms\n",
+ __func__, issue, poll, passing_time);
+ return _SUCCESS;
+ }
+}
+
+/****************************************************************************
+
+Following are some utitity fuctions for WiFi MLME
+
+*****************************************************************************/
+
+bool IsLegal5GChannel(struct rtw_adapter *Adapter, u8 channel)
+{
+
+ int i = 0;
+ u8 Channel_5G[45] = {36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
+ 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122,
+ 124, 126, 128, 130, 132, 134, 136, 138, 140, 149, 151, 153, 155, 157, 159,
+ 161, 163, 165};
+ for (i = 0; i < sizeof(Channel_5G); i++)
+ if (channel == Channel_5G[i])
+ return true;
+ return false;
+}
+
+void site_survey23a(struct rtw_adapter *padapter)
+{
+ unsigned char survey_channel = 0, val8;
+ enum rt_scan_type ScanType = SCAN_PASSIVE;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u32 initialgain = 0;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) ||
+ (pwdinfo->p2p_info.scan_op_ch_only)) {
+ if (pwdinfo->rx_invitereq_info.scan_op_ch_only)
+ survey_channel = pwdinfo->rx_invitereq_info.operation_ch[pmlmeext->sitesurvey_res.channel_idx];
+ else
+ survey_channel = pwdinfo->p2p_info.operation_ch[pmlmeext->sitesurvey_res.channel_idx];
+ ScanType = SCAN_ACTIVE;
+ } else if (rtw_p2p_findphase_ex_is_social(pwdinfo)) {
+ /* The driver is in the find phase, it should go through the social channel. */
+ int ch_set_idx;
+ survey_channel = pwdinfo->social_chan[pmlmeext->sitesurvey_res.channel_idx];
+ ch_set_idx = rtw_ch_set_search_ch23a(pmlmeext->channel_set, survey_channel);
+ if (ch_set_idx >= 0)
+ ScanType = pmlmeext->channel_set[ch_set_idx].ScanType;
+ else
+ ScanType = SCAN_ACTIVE;
+ } else
+#endif /* CONFIG_8723AU_P2P */
+ {
+ struct rtw_ieee80211_channel *ch;
+ if (pmlmeext->sitesurvey_res.channel_idx < pmlmeext->sitesurvey_res.ch_num) {
+ ch = &pmlmeext->sitesurvey_res.ch[pmlmeext->sitesurvey_res.channel_idx];
+ survey_channel = ch->hw_value;
+ ScanType = (ch->flags & IEEE80211_CHAN_NO_IR) ? SCAN_PASSIVE : SCAN_ACTIVE;
+}
+ }
+
+ if (survey_channel != 0) {
+ /* PAUSE 4-AC Queue when site_survey23a */
+ /* rtw23a_hal_get_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
+ /* val8 |= 0x0f; */
+ /* rtw_hal_set_hwreg23a(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
+ if (pmlmeext->sitesurvey_res.channel_idx == 0)
+ set_channel_bwmode23a(padapter, survey_channel,
+ HAL_PRIME_CHNL_OFFSET_DONT_CARE,
+ HT_CHANNEL_WIDTH_20);
+ else
+ SelectChannel23a(padapter, survey_channel);
+
+ if (ScanType == SCAN_ACTIVE) /* obey the channel plan setting... */
+ {
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) ||
+ rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH)
+ )
+ {
+ issue23a_probereq_p2p(padapter, NULL);
+ issue23a_probereq_p2p(padapter, NULL);
+ issue23a_probereq_p2p(padapter, NULL);
+ }
+ else
+#endif /* CONFIG_8723AU_P2P */
+ {
+ int i;
+ for (i = 0;i<RTW_SSID_SCAN_AMOUNT;i++) {
+ if (pmlmeext->sitesurvey_res.ssid[i].ssid_len) {
+ /* todo: to issue two probe req??? */
+ issue_probereq23a(padapter, &pmlmeext->sitesurvey_res.ssid[i], NULL);
+ /* msleep(SURVEY_TO>>1); */
+ issue_probereq23a(padapter, &pmlmeext->sitesurvey_res.ssid[i], NULL);
+ }
+ }
+
+ if (pmlmeext->sitesurvey_res.scan_mode == SCAN_ACTIVE) {
+ /* todo: to issue two probe req??? */
+ issue_probereq23a(padapter, NULL, NULL);
+ /* msleep(SURVEY_TO>>1); */
+ issue_probereq23a(padapter, NULL, NULL);
+ }
+ }
+ }
+
+ set_survey_timer(pmlmeext, pmlmeext->chan_scan_time);
+ } else {
+
+ /* channel number is 0 or this channel is not valid. */
+
+
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH))
+ {
+ if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) || (pwdinfo->p2p_info.scan_op_ch_only))
+ {
+ /* Set the find_phase_state_exchange_cnt to P2P_FINDPHASE_EX_CNT. */
+ /* This will let the following flow to run the scanning end. */
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_MAX);
+ }
+ }
+
+ if (rtw_p2p_findphase_ex_is_needed(pwdinfo))
+ {
+ /* Set the P2P State to the listen state of find phase and set the current channel to the listen channel */
+ set_channel_bwmode23a(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_LISTEN);
+ pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
+
+ initialgain = 0xff; /* restore RX GAIN */
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ /* turn on dynamic functions */
+ Restore_DM_Func_Flag23a(padapter);
+ /* Switch_DM_Func23a(padapter, DYNAMIC_FUNC_DIG|DYNAMIC_FUNC_HP|DYNAMIC_FUNC_SS, true); */
+
+ mod_timer(&pwdinfo->find_phase_timer, jiffies +
+ msecs_to_jiffies(pwdinfo->listen_dwell * 100));
+ } else
+#endif /* CONFIG_8723AU_P2P */
+ {
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH))
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_NONE);
+#endif /* CONFIG_8723AU_P2P */
+
+ pmlmeext->sitesurvey_res.state = SCAN_COMPLETE;
+
+ /* switch back to the original channel */
+
+ set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ /* flush 4-AC Queue after site_survey23a */
+ /* val8 = 0; */
+ /* rtw_hal_set_hwreg23a(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
+
+ /* config MSR */
+ Set_MSR23a(padapter, (pmlmeinfo->state & 0x3));
+
+ initialgain = 0xff; /* restore RX GAIN */
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ /* turn on dynamic functions */
+ Restore_DM_Func_Flag23a(padapter);
+ /* Switch_DM_Func23a(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */
+
+ if (is_client_associated_to_ap23a(padapter) == true)
+ {
+ issue_nulldata23a(padapter, NULL, 0, 3, 500);
+
+ }
+
+ val8 = 0; /* survey done */
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+
+ report_surveydone_event23a(padapter);
+
+ pmlmeext->chan_scan_time = SURVEY_TO;
+ pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
+
+ issue_action_BSSCoexistPacket(padapter);
+ issue_action_BSSCoexistPacket(padapter);
+ issue_action_BSSCoexistPacket(padapter);
+
+ }
+ }
+
+ return;
+}
+
+/* collect bss info from Beacon and Probe request/response frames. */
+u8 collect_bss_info23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame, struct wlan_bssid_ex *bssid)
+{
+ int i;
+ u32 len;
+ u8 *p;
+ u16 val16;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *pframe = skb->data;
+ u32 packet_len = skb->len;
+ u8 ie_offset;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ len = packet_len - sizeof(struct ieee80211_hdr_3addr);
+
+ if (len > MAX_IE_SZ)
+ {
+ /* DBG_8723A("IE too long for survey event\n"); */
+ return _FAIL;
+ }
+
+ memset(bssid, 0, sizeof(struct wlan_bssid_ex));
+
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ bssid->reserved = 1;
+ ie_offset = _BEACON_IE_OFFSET_;
+ } else {
+ /* FIXME : more type */
+ if (ieee80211_is_probe_req(hdr->frame_control)) {
+ ie_offset = _PROBEREQ_IE_OFFSET_;
+ bssid->reserved = 2;
+ } else if (ieee80211_is_probe_resp(hdr->frame_control)) {
+ ie_offset = _PROBERSP_IE_OFFSET_;
+ bssid->reserved = 3;
+ } else {
+ bssid->reserved = 0;
+ ie_offset = _FIXED_IE_LENGTH_;
+ }
+ }
+
+ bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
+
+ /* below is to copy the information element */
+ bssid->IELength = len;
+ memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
+
+ /* get the signal strength */
+ bssid->Rssi = precv_frame->attrib.phy_info.RecvSignalPower; /* in dBM.raw data */
+ bssid->PhyInfo.SignalQuality = precv_frame->attrib.phy_info.SignalQuality;/* in percentage */
+ bssid->PhyInfo.SignalStrength = precv_frame->attrib.phy_info.SignalStrength;/* in percentage */
+
+ /* checking SSID */
+ if ((p = rtw_get_ie23a(bssid->IEs + ie_offset, _SSID_IE_, &len, bssid->IELength - ie_offset)) == NULL)
+ {
+ DBG_8723A("marc: cannot find SSID for survey event\n");
+ return _FAIL;
+ }
+
+ if (*(p + 1)) {
+ if (len > IEEE80211_MAX_SSID_LEN) {
+ DBG_8723A("%s()-%d: IE too long (%d) for survey "
+ "event\n", __func__, __LINE__, len);
+ return _FAIL;
+ }
+ memcpy(bssid->Ssid.ssid, (p + 2), *(p + 1));
+ bssid->Ssid.ssid_len = *(p + 1);
+ } else {
+ bssid->Ssid.ssid_len = 0;
+ }
+
+ memset(bssid->SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
+
+ /* checking rate info... */
+ i = 0;
+ p = rtw_get_ie23a(bssid->IEs + ie_offset, _SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
+ if (p != NULL)
+ {
+ if (len > NDIS_802_11_LENGTH_RATES_EX)
+ {
+ DBG_8723A("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ return _FAIL;
+ }
+ memcpy(bssid->SupportedRates, (p + 2), len);
+ i = len;
+ }
+
+ p = rtw_get_ie23a(bssid->IEs + ie_offset, _EXT_SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
+ if (p != NULL)
+ {
+ if (len > (NDIS_802_11_LENGTH_RATES_EX-i))
+ {
+ DBG_8723A("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ return _FAIL;
+ }
+ memcpy(bssid->SupportedRates + i, (p + 2), len);
+ }
+
+ /* todo: */
+ {
+ bssid->NetworkTypeInUse = Ndis802_11OFDM24;
+ }
+
+ if (bssid->IELength < 12)
+ return _FAIL;
+
+ /* Checking for DSConfig */
+ p = rtw_get_ie23a(bssid->IEs + ie_offset, _DSSET_IE_, &len, bssid->IELength - ie_offset);
+
+ bssid->Configuration.DSConfig = 0;
+ bssid->Configuration.Length = 0;
+
+ if (p)
+ {
+ bssid->Configuration.DSConfig = *(p + 2);
+ }
+ else
+ {/* In 5G, some ap do not have DSSET IE */
+ /* checking HT info for channel */
+ p = rtw_get_ie23a(bssid->IEs + ie_offset, _HT_ADD_INFO_IE_, &len, bssid->IELength - ie_offset);
+ if (p)
+ {
+ struct HT_info_element *HT_info = (struct HT_info_element *)(p + 2);
+ bssid->Configuration.DSConfig = HT_info->primary_channel;
+ }
+ else
+ { /* use current channel */
+ bssid->Configuration.DSConfig = rtw_get_oper_ch23a(padapter);
+ }
+ }
+
+ if (ieee80211_is_probe_req(hdr->frame_control)) {
+ /* FIXME */
+ bssid->InfrastructureMode = Ndis802_11Infrastructure;
+ ether_addr_copy(bssid->MacAddress, hdr->addr2);
+ bssid->Privacy = 1;
+ return _SUCCESS;
+ }
+
+ memcpy(&bssid->Configuration.BeaconPeriod, rtw_get_beacon_interval23a_from_ie(bssid->IEs), 2);
+ bssid->Configuration.BeaconPeriod = le32_to_cpu(bssid->Configuration.BeaconPeriod);
+
+ val16 = rtw_get_capability23a(bssid);
+
+ if (val16 & BIT(0)) {
+ bssid->InfrastructureMode = Ndis802_11Infrastructure;
+ ether_addr_copy(bssid->MacAddress, hdr->addr2);
+ } else {
+ bssid->InfrastructureMode = Ndis802_11IBSS;
+ ether_addr_copy(bssid->MacAddress, hdr->addr3);
+ }
+
+ if (val16 & BIT(4))
+ bssid->Privacy = 1;
+ else
+ bssid->Privacy = 0;
+
+ bssid->Configuration.ATIMWindow = 0;
+
+ /* 20/40 BSS Coexistence check */
+ if ((pregistrypriv->wifi_spec == 1) && (false == pmlmeinfo->bwmode_updated))
+ {
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ p = rtw_get_ie23a(bssid->IEs + ie_offset, _HT_CAPABILITY_IE_, &len, bssid->IELength - ie_offset);
+ if (p && len > 0) {
+ struct HT_caps_element *pHT_caps;
+ pHT_caps = (struct HT_caps_element *)(p + 2);
+
+ if (pHT_caps->u.HT_cap_element.HT_caps_info & BIT(14))
+ pmlmepriv->num_FortyMHzIntolerant++;
+ } else
+ {
+ pmlmepriv->num_sta_no_ht++;
+ }
+ }
+
+
+ /* mark bss info receving from nearby channel as SignalQuality 101 */
+ if (bssid->Configuration.DSConfig != rtw_get_oper_ch23a(padapter))
+ bssid->PhyInfo.SignalQuality = 101;
+
+ return _SUCCESS;
+}
+
+void start_create_ibss23a(struct rtw_adapter* padapter)
+{
+ unsigned short caps;
+ u8 val8;
+ u8 join_type;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
+ pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
+ pmlmeinfo->bcn_interval = get_beacon_interval23a(pnetwork);
+
+ /* update wireless mode */
+ update_wireless_mode23a(padapter);
+
+ /* udpate capability */
+ caps = rtw_get_capability23a(pnetwork);
+ update_capinfo23a(padapter, caps);
+ if (caps&cap_IBSS)/* adhoc master */
+ {
+ val8 = 0xcf;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* switch channel */
+ /* SelectChannel23a(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE); */
+ set_channel_bwmode23a(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+
+ beacon_timing_control23a(padapter);
+
+ /* set msr to WIFI_FW_ADHOC_STATE */
+ pmlmeinfo->state = WIFI_FW_ADHOC_STATE;
+ Set_MSR23a(padapter, (pmlmeinfo->state & 0x3));
+
+ /* issue beacon */
+ if (send_beacon23a(padapter) == _FAIL)
+ {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("issuing beacon frame fail....\n"));
+
+ report_join_res23a(padapter, -1);
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ }
+ else
+ {
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BSSID, padapter->registrypriv.dev_network.MacAddress);
+ join_type = 0;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+
+ report_join_res23a(padapter, 1);
+ pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
+ }
+ }
+ else
+ {
+ DBG_8723A("start_create_ibss23a, invalid cap:%x\n", caps);
+ return;
+ }
+}
+
+void start_clnt_join23a(struct rtw_adapter* padapter)
+{
+ unsigned short caps;
+ u8 val8;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
+ int beacon_timeout;
+
+ pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
+ pmlmeinfo->bcn_interval = get_beacon_interval23a(pnetwork);
+
+ /* update wireless mode */
+ update_wireless_mode23a(padapter);
+
+ /* udpate capability */
+ caps = rtw_get_capability23a(pnetwork);
+ update_capinfo23a(padapter, caps);
+ if (caps&cap_ESS) {
+ /* switch channel */
+ set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ Set_MSR23a(padapter, WIFI_FW_STATION_STATE);
+
+ val8 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_8021X)? 0xcc: 0xcf;
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* switch channel */
+ /* set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
+
+ /* here wait for receiving the beacon to start auth */
+ /* and enable a timer */
+ beacon_timeout = decide_wait_for_beacon_timeout23a(pmlmeinfo->bcn_interval);
+ set_link_timer(pmlmeext, beacon_timeout);
+ mod_timer(&padapter->mlmepriv.assoc_timer, jiffies +
+ msecs_to_jiffies((REAUTH_TO * REAUTH_LIMIT) + (REASSOC_TO*REASSOC_LIMIT) + beacon_timeout));
+ pmlmeinfo->state = WIFI_FW_AUTH_NULL | WIFI_FW_STATION_STATE;
+ }
+ else if (caps&cap_IBSS) /* adhoc client */
+ {
+ Set_MSR23a(padapter, WIFI_FW_ADHOC_STATE);
+
+ val8 = 0xcf;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* switch channel */
+ set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ beacon_timing_control23a(padapter);
+
+ pmlmeinfo->state = WIFI_FW_ADHOC_STATE;
+
+ report_join_res23a(padapter, 1);
+ }
+ else
+ {
+ /* DBG_8723A("marc: invalid cap:%x\n", caps); */
+ return;
+ }
+}
+
+void start_clnt_auth23a(struct rtw_adapter* padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ del_timer_sync(&pmlmeext->link_timer);
+
+ pmlmeinfo->state &= (~WIFI_FW_AUTH_NULL);
+ pmlmeinfo->state |= WIFI_FW_AUTH_STATE;
+
+ pmlmeinfo->auth_seq = 1;
+ pmlmeinfo->reauth_count = 0;
+ pmlmeinfo->reassoc_count = 0;
+ pmlmeinfo->link_count = 0;
+ pmlmeext->retry = 0;
+
+ /* Because of AP's not receiving deauth before */
+ /* AP may: 1)not response auth or 2)deauth us after link is complete */
+ /* issue deauth before issuing auth to deal with the situation */
+ /* Commented by Albert 2012/07/21 */
+ /* For the Win8 P2P connection, it will be hard to have a successful connection if this Wi-Fi doesn't connect to it. */
+ issue_deauth23a(padapter, (&pmlmeinfo->network)->MacAddress, WLAN_REASON_DEAUTH_LEAVING);
+
+ DBG_8723A_LEVEL(_drv_always_, "start auth\n");
+ issue_auth23a(padapter, NULL, 0);
+
+ set_link_timer(pmlmeext, REAUTH_TO);
+}
+
+void start_clnt_assoc23a(struct rtw_adapter* padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ del_timer_sync(&pmlmeext->link_timer);
+
+ pmlmeinfo->state &= (~(WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE));
+ pmlmeinfo->state |= (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE);
+
+ issue_assocreq23a(padapter);
+
+ set_link_timer(pmlmeext, REASSOC_TO);
+}
+
+unsigned int receive_disconnect23a(struct rtw_adapter *padapter, unsigned char *MacAddr, unsigned short reason)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ /* check A3 */
+ if (!ether_addr_equal(MacAddr, get_my_bssid23a(&pmlmeinfo->network)))
+ return _SUCCESS;
+
+ DBG_8723A("%s\n", __func__);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ {
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
+ {
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_del_sta_event23a(padapter, MacAddr, reason);
+
+ }
+ else if (pmlmeinfo->state & WIFI_FW_LINKING_STATE)
+ {
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_join_res23a(padapter, -2);
+ }
+ }
+
+ return _SUCCESS;
+}
+
+static void process_80211d(struct rtw_adapter *padapter, struct wlan_bssid_ex *bssid)
+{
+ struct registry_priv *pregistrypriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct rt_channel_info *chplan_new;
+ u8 channel;
+ u8 i;
+
+ pregistrypriv = &padapter->registrypriv;
+ pmlmeext = &padapter->mlmeextpriv;
+
+ /* Adjust channel plan by AP Country IE */
+ if (pregistrypriv->enable80211d &&
+ (!pmlmeext->update_channel_plan_by_ap_done))
+ {
+ u8 *ie, *p;
+ u32 len;
+ struct rt_channel_plan chplan_ap;
+ struct rt_channel_info chplan_sta[MAX_CHANNEL_NUM];
+ u8 country[4];
+ u8 fcn; /* first channel number */
+ u8 noc; /* number of channel */
+ u8 j, k;
+
+ ie = rtw_get_ie23a(bssid->IEs + _FIXED_IE_LENGTH_, _COUNTRY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (!ie) return;
+ if (len < 6) return;
+
+ ie += 2;
+ p = ie;
+ ie += len;
+
+ memset(country, 0, 4);
+ memcpy(country, p, 3);
+ p += 3;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("%s: 802.11d country =%s\n", __func__, country));
+
+ i = 0;
+ while ((ie - p) >= 3)
+ {
+ fcn = *(p++);
+ noc = *(p++);
+ p++;
+
+ for (j = 0; j < noc; j++)
+ {
+ if (fcn <= 14) channel = fcn + j; /* 2.4 GHz */
+ else channel = fcn + j*4; /* 5 GHz */
+
+ chplan_ap.Channel[i++] = channel;
+ }
+ }
+ chplan_ap.Len = i;
+
+ memcpy(chplan_sta, pmlmeext->channel_set, sizeof(chplan_sta));
+ memset(pmlmeext->channel_set, 0, sizeof(pmlmeext->channel_set));
+ chplan_new = pmlmeext->channel_set;
+
+ i = j = k = 0;
+ if (pregistrypriv->wireless_mode & WIRELESS_11G) {
+ do {
+ if ((i == MAX_CHANNEL_NUM) ||
+ (chplan_sta[i].ChannelNum == 0) ||
+ (chplan_sta[i].ChannelNum > 14))
+ break;
+
+ if ((j == chplan_ap.Len) || (chplan_ap.Channel[j] > 14))
+ break;
+
+ if (chplan_sta[i].ChannelNum == chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ i++;
+ j++;
+ k++;
+ } else if (chplan_sta[i].ChannelNum < chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = SCAN_PASSIVE;
+ i++;
+ k++;
+ } else if (chplan_sta[i].ChannelNum > chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ j++;
+ k++;
+ }
+ } while (1);
+
+ /* change AP not support channel to Passive scan */
+ while ((i < MAX_CHANNEL_NUM) &&
+ (chplan_sta[i].ChannelNum != 0) &&
+ (chplan_sta[i].ChannelNum <= 14)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = SCAN_PASSIVE;
+ i++;
+ k++;
+ }
+
+ /* add channel AP supported */
+ while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14)) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ j++;
+ k++;
+ }
+ } else {
+ /* keep original STA 2.4G channel plan */
+ while ((i < MAX_CHANNEL_NUM) &&
+ (chplan_sta[i].ChannelNum != 0) &&
+ (chplan_sta[i].ChannelNum <= 14)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = chplan_sta[i].ScanType;
+ i++;
+ k++;
+ }
+
+ /* skip AP 2.4G channel plan */
+ while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14)) {
+ j++;
+ }
+ }
+
+ if (pregistrypriv->wireless_mode & WIRELESS_11A) {
+ do {
+ if ((i == MAX_CHANNEL_NUM) ||
+ (chplan_sta[i].ChannelNum == 0))
+ break;
+
+ if ((j == chplan_ap.Len) || (chplan_ap.Channel[j] == 0))
+ break;
+
+ if (chplan_sta[i].ChannelNum == chplan_ap.Channel[j])
+ {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ i++;
+ j++;
+ k++;
+ }
+ else if (chplan_sta[i].ChannelNum < chplan_ap.Channel[j])
+ {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+/* chplan_new[k].ScanType = chplan_sta[i].ScanType; */
+ chplan_new[k].ScanType = SCAN_PASSIVE;
+ i++;
+ k++;
+ }
+ else if (chplan_sta[i].ChannelNum > chplan_ap.Channel[j])
+ {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ j++;
+ k++;
+ }
+ } while (1);
+
+ /* change AP not support channel to Passive scan */
+ while ((i < MAX_CHANNEL_NUM) && (chplan_sta[i].ChannelNum != 0)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = SCAN_PASSIVE;
+ i++;
+ k++;
+ }
+
+ /* add channel AP supported */
+ while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] != 0)) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ j++;
+ k++;
+ }
+ } else {
+ /* keep original STA 5G channel plan */
+ while ((i < MAX_CHANNEL_NUM) && (chplan_sta[i].ChannelNum != 0)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = chplan_sta[i].ScanType;
+ i++;
+ k++;
+ }
+ }
+ pmlmeext->update_channel_plan_by_ap_done = 1;
+ }
+
+ /* If channel is used by AP, set channel scan type to active */
+ channel = bssid->Configuration.DSConfig;
+ chplan_new = pmlmeext->channel_set;
+ i = 0;
+ while ((i < MAX_CHANNEL_NUM) && (chplan_new[i].ChannelNum != 0)) {
+ if (chplan_new[i].ChannelNum == channel)
+ {
+ if (chplan_new[i].ScanType == SCAN_PASSIVE) {
+ /* 5G Bnad 2, 3 (DFS) doesn't change to active scan */
+ if (channel >= 52 && channel <= 144)
+ break;
+
+ chplan_new[i].ScanType = SCAN_ACTIVE;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("%s: change channel %d scan type from passive to active\n",
+ __func__, channel));
+ }
+ break;
+ }
+ i++;
+ }
+}
+
+/****************************************************************************
+
+Following are the functions to report events
+
+*****************************************************************************/
+
+void report_survey_event23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct survey_event *psurvey_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext;
+ struct cmd_priv *pcmdpriv;
+
+ if (!padapter)
+ return;
+
+ pmlmeext = &padapter->mlmeextpriv;
+ pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)kzalloc(sizeof(struct cmd_obj),
+ GFP_ATOMIC);
+ if (!pcmd_obj)
+ return;
+
+ cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
+ if (!pevtcmd) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ INIT_LIST_HEAD(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header*)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct survey_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
+
+ psurvey_evt = (struct survey_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+
+ if (collect_bss_info23a(padapter, precv_frame, &psurvey_evt->bss) == _FAIL) {
+ kfree(pcmd_obj);
+ kfree(pevtcmd);
+ return;
+ }
+
+ process_80211d(padapter, &psurvey_evt->bss);
+
+ rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
+
+ pmlmeext->sitesurvey_res.bss_cnt++;
+
+ return;
+}
+
+void report_surveydone_event23a(struct rtw_adapter *padapter)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct surveydone_event *psurveydone_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)kzalloc(sizeof(struct cmd_obj),
+ GFP_ATOMIC);
+ if (!pcmd_obj)
+ return;
+
+ cmdsz = (sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
+ if (!pevtcmd) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ INIT_LIST_HEAD(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header*)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct surveydone_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
+
+ psurveydone_evt = (struct surveydone_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+ psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
+
+ DBG_8723A("survey done event(%x)\n", psurveydone_evt->bss_cnt);
+
+ rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+void report_join_res23a(struct rtw_adapter *padapter, int res)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct joinbss_event *pjoinbss_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)kzalloc(sizeof(struct cmd_obj),
+ GFP_ATOMIC);
+ if (!pcmd_obj)
+ return;
+
+ cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
+ if (!pevtcmd) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ INIT_LIST_HEAD(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header*)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct joinbss_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
+
+ pjoinbss_evt = (struct joinbss_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)&pjoinbss_evt->network.network,
+ &pmlmeinfo->network, sizeof(struct wlan_bssid_ex));
+ pjoinbss_evt->network.join_res = pjoinbss_evt->network.aid = res;
+
+ DBG_8723A("report_join_res23a(%d)\n", res);
+
+ rtw_joinbss_event_prehandle23a(padapter, (u8 *)&pjoinbss_evt->network);
+
+ rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+void report_del_sta_event23a(struct rtw_adapter *padapter, unsigned char* MacAddr, unsigned short reason)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct sta_info *psta;
+ int mac_id;
+ struct stadel_event *pdel_sta_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)kzalloc(sizeof(struct cmd_obj),
+ GFP_ATOMIC);
+ if (!pcmd_obj)
+ return;
+
+ cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
+ if (!pevtcmd) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ INIT_LIST_HEAD(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header*)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct stadel_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
+
+ pdel_sta_evt = (struct stadel_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+ ether_addr_copy((unsigned char *)&pdel_sta_evt->macaddr, MacAddr);
+ memcpy((unsigned char *)pdel_sta_evt->rsvd, (unsigned char *)&reason,
+ 2);
+
+ psta = rtw_get_stainfo23a(&padapter->stapriv, MacAddr);
+ if (psta)
+ mac_id = (int)psta->mac_id;
+ else
+ mac_id = (-1);
+
+ pdel_sta_evt->mac_id = mac_id;
+
+ DBG_8723A("report_del_sta_event23a: delete STA, mac_id =%d\n", mac_id);
+
+ rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+void report_add_sta_event23a(struct rtw_adapter *padapter, unsigned char* MacAddr, int cam_idx)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct stassoc_event *padd_sta_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)kzalloc(sizeof(struct cmd_obj),
+ GFP_ATOMIC);
+ if (!pcmd_obj)
+ return;
+
+ cmdsz = (sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
+ if (!pevtcmd) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ INIT_LIST_HEAD(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header*)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct stassoc_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
+
+ padd_sta_evt = (struct stassoc_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+ ether_addr_copy((unsigned char *)&padd_sta_evt->macaddr, MacAddr);
+ padd_sta_evt->cam_id = cam_idx;
+
+ DBG_8723A("report_add_sta_event23a: add STA\n");
+
+ rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+/****************************************************************************
+
+Following are the event callback functions
+
+*****************************************************************************/
+
+/* for sta/adhoc mode */
+void update_sta_info23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ /* ERP */
+ VCS_update23a(padapter, psta);
+
+ /* HT */
+ if (pmlmepriv->htpriv.ht_option)
+ {
+ psta->htpriv.ht_option = true;
+
+ psta->htpriv.ampdu_enable = pmlmepriv->htpriv.ampdu_enable;
+
+ if (support_short_GI23a(padapter, &pmlmeinfo->HT_caps))
+ psta->htpriv.sgi = true;
+
+ psta->qos_option = true;
+
+ }
+ else
+ {
+ psta->htpriv.ht_option = false;
+
+ psta->htpriv.ampdu_enable = false;
+
+ psta->htpriv.sgi = false;
+ psta->qos_option = false;
+
+ }
+ psta->htpriv.bwmode = pmlmeext->cur_bwmode;
+ psta->htpriv.ch_offset = pmlmeext->cur_ch_offset;
+
+ psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
+ psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
+
+ /* QoS */
+ if (pmlmepriv->qospriv.qos_option)
+ psta->qos_option = true;
+
+ psta->state = _FW_LINKED;
+}
+
+void mlmeext_joinbss_event_callback23a(struct rtw_adapter *padapter, int join_res)
+{
+ struct sta_info *psta, *psta_bmc;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 join_type;
+ u16 media_status;
+
+ if (join_res < 0)
+ {
+ join_type = 1;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BSSID, null_addr);
+
+ /* restore to initial setting. */
+ update_tx_basic_rate23a(padapter, padapter->registrypriv.wireless_mode);
+
+ goto exit_mlmeext_joinbss_event_callback23a;
+ }
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)
+ {
+ /* for bc/mc */
+ psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
+ if (psta_bmc)
+ {
+ pmlmeinfo->FW_sta_info[psta_bmc->mac_id].psta = psta_bmc;
+ update_bmc_sta_support_rate23a(padapter, psta_bmc->mac_id);
+ Update_RA_Entry23a(padapter, psta_bmc);
+ }
+ }
+
+ /* turn on dynamic functions */
+ Switch_DM_Func23a(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
+
+ /* update IOT-releated issue */
+ update_IOT_info23a(padapter);
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates);
+
+ /* BCN interval */
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&pmlmeinfo->bcn_interval));
+
+ /* udpate capability */
+ update_capinfo23a(padapter, pmlmeinfo->capability);
+
+ /* WMM, Update EDCA param */
+ WMMOnAssocRsp23a(padapter);
+
+ /* HT */
+ HTOnAssocRsp23a(padapter);
+
+ /* Set cur_channel&cur_bwmode&cur_ch_offset */
+ set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ psta = rtw_get_stainfo23a(pstapriv, cur_network->MacAddress);
+ if (psta) /* only for infra. mode */
+ {
+ pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
+
+ /* DBG_8723A("set_sta_rate23a\n"); */
+
+ psta->wireless_mode = pmlmeext->cur_wireless_mode;
+
+ /* set per sta rate after updating HT cap. */
+ set_sta_rate23a(padapter, psta);
+
+ media_status = (psta->mac_id<<8)|1; /* MACID|OPMODE: 1 means connect */
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+ }
+
+ join_type = 2;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ {
+ /* correcting TSF */
+ correct_TSF23a(padapter, pmlmeext);
+
+ /* set_link_timer(pmlmeext, DISCONNECT_TO); */
+ }
+
+ rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_CONNECT, 0);
+
+exit_mlmeext_joinbss_event_callback23a:
+ DBG_8723A("=>%s\n", __func__);
+}
+
+void mlmeext_sta_add_event_callback23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u8 join_type;
+
+ DBG_8723A("%s\n", __func__);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)
+ {
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)/* adhoc master or sta_count>1 */
+ {
+ /* nothing to do */
+ }
+ else/* adhoc client */
+ {
+ /* update TSF Value */
+ /* update_TSF23a(pmlmeext, pframe, len); */
+
+ /* correcting TSF */
+ correct_TSF23a(padapter, pmlmeext);
+
+ /* start beacon */
+ if (send_beacon23a(padapter) == _FAIL)
+ {
+ pmlmeinfo->FW_sta_info[psta->mac_id].status = 0;
+
+ pmlmeinfo->state ^= WIFI_FW_ADHOC_STATE;
+
+ return;
+ }
+
+ pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
+
+ }
+
+ join_type = 2;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ }
+
+ pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
+
+ /* rate radaptive */
+ Update_RA_Entry23a(padapter, psta);
+
+ /* update adhoc sta_info */
+ update_sta_info23a(padapter, psta);
+}
+
+void mlmeext_sta_del_event_callback23a(struct rtw_adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if (is_client_associated_to_ap23a(padapter) || is_IBSS_empty23a(padapter))
+ {
+ /* set_opmode_cmd(padapter, infra_client_with_mlme); */
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BSSID, null_addr);
+
+ /* restore to initial setting. */
+ update_tx_basic_rate23a(padapter, padapter->registrypriv.wireless_mode);
+
+ /* switch to the 20M Hz mode after disconnect */
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ /* SelectChannel23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset); */
+ set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ flush_all_cam_entry23a(padapter);
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+
+ /* set MSR to no link state -> infra. mode */
+ Set_MSR23a(padapter, _HW_STATE_STATION_);
+
+ del_timer_sync(&pmlmeext->link_timer);
+ }
+}
+
+/****************************************************************************
+
+Following are the functions for the timer handlers
+
+*****************************************************************************/
+void linked23a_rx_sig_stren_disp(struct rtw_adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u8 mac_id;
+ int UndecoratedSmoothedPWDB;
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ mac_id = 0;
+ else if ((pmlmeinfo->state&0x03) == _HW_STATE_AP_)
+ mac_id = 2;
+
+ rtw_hal_get_def_var23a(padapter, HW_DEF_RA_INFO_DUMP,&mac_id);
+
+ rtw_hal_get_def_var23a(padapter, HAL_DEF_UNDERCORATEDSMOOTHEDPWDB, &UndecoratedSmoothedPWDB);
+ DBG_8723A("UndecoratedSmoothedPWDB:%d\n", UndecoratedSmoothedPWDB);
+}
+
+static u8 chk_ap_is_alive(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ u8 ret = false;
+
+ if ((sta_rx_data_pkts(psta) == sta_last_rx_data_pkts(psta)) &&
+ sta_rx_beacon_pkts(psta) == sta_last_rx_beacon_pkts(psta) &&
+ sta_rx_probersp_pkts(psta) == sta_last_rx_probersp_pkts(psta))
+ ret = false;
+ else
+ ret = true;
+
+ sta_update_last_rx_pkts(psta);
+ return ret;
+}
+
+void linked_status_chk23a(struct rtw_adapter *padapter)
+{
+ u32 i;
+ struct sta_info *psta;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (padapter->bRxRSSIDisplay)
+ linked23a_rx_sig_stren_disp(padapter);
+
+ rtw_hal_sreset_linked_status_check23a(padapter);
+
+ if (is_client_associated_to_ap23a(padapter))
+ {
+ /* linked infrastructure client mode */
+
+ int tx_chk = _SUCCESS, rx_chk = _SUCCESS;
+ int rx_chk_limit;
+
+ rx_chk_limit = 4;
+
+ if ((psta = rtw_get_stainfo23a(pstapriv, pmlmeinfo->network.MacAddress)) != NULL)
+ {
+ bool is_p2p_enable = false;
+#ifdef CONFIG_8723AU_P2P
+ is_p2p_enable = !rtw_p2p_chk_state(&padapter->wdinfo, P2P_STATE_NONE);
+#endif
+
+ if (chk_ap_is_alive(padapter, psta) == false)
+ rx_chk = _FAIL;
+
+ if (pxmitpriv->last_tx_pkts == pxmitpriv->tx_pkts)
+ tx_chk = _FAIL;
+
+ if (pmlmeext->active_keep_alive_check && (rx_chk == _FAIL || tx_chk == _FAIL)) {
+ u8 backup_oper_channel = 0;
+
+ /* switch to correct channel of current network before issue keep-alive frames */
+ if (rtw_get_oper_ch23a(padapter) != pmlmeext->cur_channel) {
+ backup_oper_channel = rtw_get_oper_ch23a(padapter);
+ SelectChannel23a(padapter, pmlmeext->cur_channel);
+ }
+
+ if (rx_chk != _SUCCESS)
+ issue_probereq23a_ex23a(padapter, &pmlmeinfo->network.Ssid, psta->hwaddr, 3, 1);
+
+ if ((tx_chk != _SUCCESS && pmlmeinfo->link_count++ == 0xf) || rx_chk != _SUCCESS) {
+ tx_chk = issue_nulldata23a(padapter, psta->hwaddr, 0, 3, 1);
+ /* if tx acked and p2p disabled, set rx_chk _SUCCESS to reset retry count */
+ if (tx_chk == _SUCCESS && !is_p2p_enable)
+ rx_chk = _SUCCESS;
+ }
+
+ /* back to the original operation channel */
+ if (backup_oper_channel>0)
+ SelectChannel23a(padapter, backup_oper_channel);
+
+ } else {
+ if (rx_chk != _SUCCESS) {
+ if (pmlmeext->retry == 0) {
+ issue_probereq23a(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
+ issue_probereq23a(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
+ issue_probereq23a(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
+ }
+ }
+
+ if (tx_chk != _SUCCESS && pmlmeinfo->link_count++ == 0xf)
+ tx_chk = issue_nulldata23a(padapter, NULL, 0, 1, 0);
+ }
+
+ if (rx_chk == _FAIL) {
+ pmlmeext->retry++;
+ if (pmlmeext->retry > rx_chk_limit) {
+ DBG_8723A_LEVEL(_drv_always_, FUNC_ADPT_FMT" disconnect or roaming\n",
+ FUNC_ADPT_ARG(padapter));
+ receive_disconnect23a(padapter, pmlmeinfo->network.MacAddress,
+ WLAN_REASON_EXPIRATION_CHK);
+ return;
+ }
+ } else {
+ pmlmeext->retry = 0;
+ }
+
+ if (tx_chk == _FAIL) {
+ pmlmeinfo->link_count &= 0xf;
+ } else {
+ pxmitpriv->last_tx_pkts = pxmitpriv->tx_pkts;
+ pmlmeinfo->link_count = 0;
+ }
+
+ } /* end of if ((psta = rtw_get_stainfo23a(pstapriv, passoc_res->network.MacAddress)) != NULL) */
+ }
+ else if (is_client_associated_to_ibss23a(padapter))
+ {
+ /* linked IBSS mode */
+ /* for each assoc list entry to check the rx pkt counter */
+ for (i = IBSS_START_MAC_ID; i < NUM_STA; i++)
+ {
+ if (pmlmeinfo->FW_sta_info[i].status == 1)
+ {
+ psta = pmlmeinfo->FW_sta_info[i].psta;
+
+ if (NULL == psta) continue;
+
+ if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta))
+ {
+
+ if (pmlmeinfo->FW_sta_info[i].retry<3)
+ {
+ pmlmeinfo->FW_sta_info[i].retry++;
+ }
+ else
+ {
+ pmlmeinfo->FW_sta_info[i].retry = 0;
+ pmlmeinfo->FW_sta_info[i].status = 0;
+ report_del_sta_event23a(padapter, psta->hwaddr,
+ 65535/* indicate disconnect caused by no rx */
+ );
+ }
+ }
+ else
+ {
+ pmlmeinfo->FW_sta_info[i].retry = 0;
+ pmlmeinfo->FW_sta_info[i].rx_pkt = (u32)sta_rx_pkts(psta);
+ }
+ }
+ }
+
+ /* set_link_timer(pmlmeext, DISCONNECT_TO); */
+
+ }
+}
+
+static void survey_timer_hdl(unsigned long data)
+{
+ struct rtw_adapter *padapter = (struct rtw_adapter *)data;
+ struct cmd_obj *ph2c;
+ struct sitesurvey_parm *psurveyPara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif
+
+ /* issue rtw_sitesurvey_cmd23a */
+ if (pmlmeext->sitesurvey_res.state > SCAN_START) {
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS)
+ pmlmeext->sitesurvey_res.channel_idx++;
+
+ if (pmlmeext->scan_abort == true)
+ {
+#ifdef CONFIG_8723AU_P2P
+ if (!rtw_p2p_chk_state(&padapter->wdinfo, P2P_STATE_NONE))
+ {
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_MAX);
+ pmlmeext->sitesurvey_res.channel_idx = 3;
+ DBG_8723A("%s idx:%d, cnt:%u\n", __func__,
+ pmlmeext->sitesurvey_res.channel_idx,
+ pwdinfo->find_phase_state_exchange_cnt);
+ } else
+ #endif
+ {
+ pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
+ DBG_8723A("%s idx:%d\n", __func__,
+ pmlmeext->sitesurvey_res.channel_idx);
+ }
+
+ pmlmeext->scan_abort = false;/* reset */
+ }
+
+ ph2c = (struct cmd_obj *)kzalloc(sizeof(struct cmd_obj),
+ GFP_ATOMIC);
+ if (!ph2c)
+ goto exit_survey_timer_hdl;
+
+ psurveyPara = (struct sitesurvey_parm*)
+ kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
+ if (!psurveyPara) {
+ kfree(ph2c);
+ goto exit_survey_timer_hdl;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
+ rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+ }
+
+exit_survey_timer_hdl:
+ return;
+}
+
+static void link_timer_hdl(unsigned long data)
+{
+ struct rtw_adapter *padapter = (struct rtw_adapter *)data;
+ /* static unsigned int rx_pkt = 0; */
+ /* static u64 tx_cnt = 0; */
+ /* struct xmit_priv *pxmitpriv = &padapter->xmitpriv; */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ /* struct sta_priv *pstapriv = &padapter->stapriv; */
+
+ if (pmlmeinfo->state & WIFI_FW_AUTH_NULL)
+ {
+ DBG_8723A("link_timer_hdl:no beacon while connecting\n");
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_join_res23a(padapter, -3);
+ }
+ else if (pmlmeinfo->state & WIFI_FW_AUTH_STATE)
+ {
+ /* re-auth timer */
+ if (++pmlmeinfo->reauth_count > REAUTH_LIMIT)
+ {
+ /* if (pmlmeinfo->auth_algo != dot11AuthAlgrthm_Auto) */
+ /* */
+ pmlmeinfo->state = 0;
+ report_join_res23a(padapter, -1);
+ return;
+ /* */
+ /* else */
+ /* */
+ /* pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared; */
+ /* pmlmeinfo->reauth_count = 0; */
+ /* */
+ }
+
+ DBG_8723A("link_timer_hdl: auth timeout and try again\n");
+ pmlmeinfo->auth_seq = 1;
+ issue_auth23a(padapter, NULL, 0);
+ set_link_timer(pmlmeext, REAUTH_TO);
+ }
+ else if (pmlmeinfo->state & WIFI_FW_ASSOC_STATE)
+ {
+ /* re-assoc timer */
+ if (++pmlmeinfo->reassoc_count > REASSOC_LIMIT)
+ {
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_join_res23a(padapter, -2);
+ return;
+ }
+
+ DBG_8723A("link_timer_hdl: assoc timeout and try again\n");
+ issue_assocreq23a(padapter);
+ set_link_timer(pmlmeext, REASSOC_TO);
+ }
+
+ return;
+}
+
+static void addba_timer_hdl(unsigned long data)
+{
+ struct sta_info *psta = (struct sta_info *)data;
+ struct ht_priv *phtpriv;
+
+ if (!psta)
+ return;
+
+ phtpriv = &psta->htpriv;
+
+ if ((phtpriv->ht_option == true) && (phtpriv->ampdu_enable == true))
+ {
+ if (phtpriv->candidate_tid_bitmap)
+ phtpriv->candidate_tid_bitmap = 0x0;
+
+ }
+}
+
+void init_addba_retry_timer23a(struct sta_info *psta)
+{
+ setup_timer(&psta->addba_retry_timer, addba_timer_hdl,
+ (unsigned long)psta);
+}
+
+void init_mlme_ext_timer23a(struct rtw_adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ setup_timer(&pmlmeext->survey_timer, survey_timer_hdl,
+ (unsigned long)padapter);
+
+ setup_timer(&pmlmeext->link_timer, link_timer_hdl,
+ (unsigned long)padapter);
+}
+
+u8 NULL_hdl23a(struct rtw_adapter *padapter, u8 *pbuf)
+{
+ return H2C_SUCCESS;
+}
+
+u8 setopmode_hdl23a(struct rtw_adapter *padapter, u8 *pbuf)
+{
+ u8 type;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct setopmode_parm *psetop = (struct setopmode_parm *)pbuf;
+
+ if (psetop->mode == Ndis802_11APMode)
+ {
+ pmlmeinfo->state = WIFI_FW_AP_STATE;
+ type = _HW_STATE_AP_;
+ }
+ else if (psetop->mode == Ndis802_11Infrastructure)
+ {
+ pmlmeinfo->state &= ~(BIT(0)|BIT(1));/* clear state */
+ pmlmeinfo->state |= WIFI_FW_STATION_STATE;/* set to STATION_STATE */
+ type = _HW_STATE_STATION_;
+ }
+ else if (psetop->mode == Ndis802_11IBSS)
+ {
+ type = _HW_STATE_ADHOC_;
+ }
+ else
+ {
+ type = _HW_STATE_NOLINK_;
+ }
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_SET_OPMODE, (u8 *)(&type));
+ /* Set_NETYPE0_MSR(padapter, type); */
+
+ return H2C_SUCCESS;
+}
+
+u8 createbss_hdl23a(struct rtw_adapter *padapter, u8 *pbuf)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
+ struct wlan_bssid_ex *pparm = (struct wlan_bssid_ex *)pbuf;
+ /* u32 initialgain; */
+
+ if (pparm->InfrastructureMode == Ndis802_11APMode) {
+#ifdef CONFIG_8723AU_AP_MODE
+
+ if (pmlmeinfo->state == WIFI_FW_AP_STATE)
+ {
+ /* todo: */
+ return H2C_SUCCESS;
+ }
+#endif
+ }
+
+ /* below is for ad-hoc master */
+ if (pparm->InfrastructureMode == Ndis802_11IBSS) {
+ rtw_joinbss_reset23a(padapter);
+
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmlmeinfo->ERP_enable = 0;
+ pmlmeinfo->WMM_enable = 0;
+ pmlmeinfo->HT_enable = 0;
+ pmlmeinfo->HT_caps_enable = 0;
+ pmlmeinfo->HT_info_enable = 0;
+ pmlmeinfo->agg_enable_bitmap = 0;
+ pmlmeinfo->candidate_tid_bitmap = 0;
+
+ /* disable dynamic functions, such as high power, DIG */
+ Save_DM_Func_Flag23a(padapter);
+ Switch_DM_Func23a(padapter, DYNAMIC_FUNC_DISABLE, false);
+
+ /* config the initial gain under linking, need to write the BB registers */
+ /* initialgain = 0x1E; */
+ /* rtw_hal_set_hwreg23a(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain)); */
+
+ /* cancel link timer */
+ del_timer_sync(&pmlmeext->link_timer);
+
+ /* clear CAM */
+ flush_all_cam_entry23a(padapter);
+
+ if (pparm->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
+ return H2C_PARAMETERS_ERROR;
+
+ memcpy(pnetwork, pparm, sizeof(struct wlan_bssid_ex));
+
+ start_create_ibss23a(padapter);
+ }
+
+ return H2C_SUCCESS;
+}
+
+u8 join_cmd_hdl23a(struct rtw_adapter *padapter, u8 *pbuf)
+{
+ u8 join_type;
+ struct ndis_802_11_var_ies * pIE;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
+ struct wlan_bssid_ex *pparm = (struct wlan_bssid_ex *)pbuf;
+ struct HT_info_element *pht_info;
+ u32 i;
+ /* u32 initialgain; */
+ /* u32 acparm; */
+
+ /* check already connecting to AP or not */
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
+ {
+ if (pmlmeinfo->state & WIFI_FW_STATION_STATE)
+ issue_deauth23a_ex23a(padapter, pnetwork->MacAddress,
+ WLAN_REASON_DEAUTH_LEAVING, 5, 100);
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+
+ /* clear CAM */
+ flush_all_cam_entry23a(padapter);
+
+ del_timer_sync(&pmlmeext->link_timer);
+
+ /* set MSR to nolink -> infra. mode */
+ /* Set_MSR23a(padapter, _HW_STATE_NOLINK_); */
+ Set_MSR23a(padapter, _HW_STATE_STATION_);
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ }
+
+ rtw_joinbss_reset23a(padapter);
+
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmlmeinfo->ERP_enable = 0;
+ pmlmeinfo->WMM_enable = 0;
+ pmlmeinfo->HT_enable = 0;
+ pmlmeinfo->HT_caps_enable = 0;
+ pmlmeinfo->HT_info_enable = 0;
+ pmlmeinfo->agg_enable_bitmap = 0;
+ pmlmeinfo->candidate_tid_bitmap = 0;
+ pmlmeinfo->bwmode_updated = false;
+ /* pmlmeinfo->assoc_AP_vendor = HT_IOT_PEER_MAX; */
+
+ if (pparm->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
+ return H2C_PARAMETERS_ERROR;
+
+ memcpy(pnetwork, pbuf, sizeof(struct wlan_bssid_ex));
+
+ /* Check AP vendor to move rtw_joinbss_cmd23a() */
+ /* pmlmeinfo->assoc_AP_vendor = check_assoc_AP23a(pnetwork->IEs,
+ pnetwork->IELength); */
+
+ for (i = sizeof(struct ndis_802_11_fixed_ies); i < pnetwork->IELength;)
+ {
+ pIE = (struct ndis_802_11_var_ies *)(pnetwork->IEs + i);
+
+ switch (pIE->ElementID)
+ {
+ case _VENDOR_SPECIFIC_IE_:/* Get WMM IE. */
+ if (!memcmp(pIE->data, WMM_OUI23A, 4))
+ pmlmeinfo->WMM_enable = 1;
+ break;
+
+ case _HT_CAPABILITY_IE_: /* Get HT Cap IE. */
+ pmlmeinfo->HT_caps_enable = 1;
+ break;
+
+ case _HT_EXTRA_INFO_IE_: /* Get HT Info IE. */
+ pmlmeinfo->HT_info_enable = 1;
+
+ /* spec case only for cisco's ap because cisco's ap
+ * issue assoc rsp using mcs rate @40MHz or @20MHz */
+ pht_info = (struct HT_info_element *)(pIE->data);
+
+ if ((pregpriv->cbw40_enable) &&
+ (pht_info->infos[0] & BIT(2))) {
+ /* switch to the 40M Hz mode according to AP */
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
+ switch (pht_info->infos[0] & 0x3)
+ {
+ case 1:
+ pmlmeext->cur_ch_offset =
+ HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+
+ case 3:
+ pmlmeext->cur_ch_offset =
+ HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+
+ default:
+ pmlmeext->cur_ch_offset =
+ HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+
+ DBG_8723A("set ch/bw before connected\n");
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+ /* disable dynamic functions, such as high power, DIG */
+ /* Switch_DM_Func23a(padapter, DYNAMIC_FUNC_DISABLE, false); */
+
+ /* config the initial gain under linking, need to write the BB
+ registers */
+ /* initialgain = 0x1E; */
+ /* rtw_hal_set_hwreg23a(padapter, HW_VAR_INITIAL_GAIN,
+ (u8 *)(&initialgain)); */
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BSSID,
+ pmlmeinfo->network.MacAddress);
+ join_type = 0;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+
+ /* cancel link timer */
+ del_timer_sync(&pmlmeext->link_timer);
+
+ start_clnt_join23a(padapter);
+
+ return H2C_SUCCESS;
+}
+
+u8 disconnect_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf)
+{
+ struct disconnect_parm *param = (struct disconnect_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
+ u8 val8;
+
+ if (is_client_associated_to_ap23a(padapter))
+ {
+ issue_deauth23a_ex23a(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100);
+ }
+
+ /* set_opmode_cmd(padapter, infra_client_with_mlme); */
+
+ /* pmlmeinfo->state = WIFI_FW_NULL_STATE; */
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BSSID, null_addr);
+
+ /* restore to initial setting. */
+ update_tx_basic_rate23a(padapter, padapter->registrypriv.wireless_mode);
+
+ if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE))
+ {
+ /* Stop BCN */
+ val8 = 0;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BCN_FUNC, (u8 *)(&val8));
+ }
+
+ /* set MSR to no link state -> infra. mode */
+ Set_MSR23a(padapter, _HW_STATE_STATION_);
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+
+ /* switch to the 20M Hz mode after disconnect */
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ flush_all_cam_entry23a(padapter);
+
+ del_timer_sync(&pmlmeext->link_timer);
+
+ rtw_free_uc_swdec_pending_queue23a(padapter);
+
+ return H2C_SUCCESS;
+}
+
+static int rtw_scan_ch_decision(struct rtw_adapter *padapter, struct rtw_ieee80211_channel *out,
+ u32 out_num, struct rtw_ieee80211_channel *in, u32 in_num)
+{
+ int i, j;
+ int scan_ch_num = 0;
+ int set_idx;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ /* clear out first */
+ memset(out, 0, sizeof(struct rtw_ieee80211_channel)*out_num);
+
+ /* acquire channels from in */
+ j = 0;
+ for (i = 0;i<in_num;i++) {
+ if (0)
+ DBG_8723A(FUNC_ADPT_FMT" "CHAN_FMT"\n", FUNC_ADPT_ARG(padapter), CHAN_ARG(&in[i]));
+ if (in[i].hw_value && !(in[i].flags & IEEE80211_CHAN_DISABLED)
+ && (set_idx = rtw_ch_set_search_ch23a(pmlmeext->channel_set, in[i].hw_value)) >= 0
+ )
+ {
+ memcpy(&out[j], &in[i], sizeof(struct rtw_ieee80211_channel));
+
+ if (pmlmeext->channel_set[set_idx].ScanType == SCAN_PASSIVE)
+ out[j].flags &= IEEE80211_CHAN_NO_IR;
+
+ j++;
+ }
+ if (j>= out_num)
+ break;
+ }
+
+ /* if out is empty, use channel_set as default */
+ if (j == 0) {
+ for (i = 0;i<pmlmeext->max_chan_nums;i++) {
+ out[i].hw_value = pmlmeext->channel_set[i].ChannelNum;
+
+ if (pmlmeext->channel_set[i].ScanType == SCAN_PASSIVE)
+ out[i].flags &= IEEE80211_CHAN_NO_IR;
+
+ j++;
+ }
+ }
+
+ if (padapter->setband == GHZ_24) { /* 2.4G */
+ for (i = 0; i < j ; i++) {
+ if (out[i].hw_value > 35)
+ memset(&out[i], 0,
+ sizeof(struct rtw_ieee80211_channel));
+ else
+ scan_ch_num++;
+ }
+ j = scan_ch_num;
+ } else if (padapter->setband == GHZ_50) { /* 5G */
+ for (i = 0; i < j ; i++) {
+ if (out[i].hw_value > 35) {
+ memcpy(&out[scan_ch_num++], &out[i], sizeof(struct rtw_ieee80211_channel));
+ }
+ }
+ j = scan_ch_num;
+ } else
+ {}
+
+ return j;
+}
+
+u8 sitesurvey_cmd_hdl23a(struct rtw_adapter *padapter, u8 *pbuf)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct sitesurvey_parm *pparm = (struct sitesurvey_parm *)pbuf;
+ u8 bdelayscan = false;
+ u8 val8;
+ u32 initialgain;
+ u32 i;
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_DISABLE) {
+ /* for first time sitesurvey_cmd */
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_CHECK_TXBUF, NULL);
+
+ pmlmeext->sitesurvey_res.state = SCAN_START;
+ pmlmeext->sitesurvey_res.bss_cnt = 0;
+ pmlmeext->sitesurvey_res.channel_idx = 0;
+
+ for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
+ if (pparm->ssid[i].ssid_len) {
+ memcpy(pmlmeext->sitesurvey_res.ssid[i].ssid,
+ pparm->ssid[i].ssid, IW_ESSID_MAX_SIZE);
+ pmlmeext->sitesurvey_res.ssid[i].ssid_len =
+ pparm->ssid[i].ssid_len;
+ } else {
+ pmlmeext->sitesurvey_res.ssid[i].ssid_len = 0;
+ }
+ }
+
+ pmlmeext->sitesurvey_res.ch_num =
+ rtw_scan_ch_decision(padapter,
+ pmlmeext->sitesurvey_res.ch,
+ RTW_CHANNEL_SCAN_AMOUNT,
+ pparm->ch, pparm->ch_num);
+
+ pmlmeext->sitesurvey_res.scan_mode = pparm->scan_mode;
+
+ /* issue null data if associating to the AP */
+ if (is_client_associated_to_ap23a(padapter)) {
+ pmlmeext->sitesurvey_res.state = SCAN_TXNULL;
+
+ /* switch to correct channel of current network
+ before issue keep-alive frames */
+ if (rtw_get_oper_ch23a(padapter) != pmlmeext->cur_channel)
+ SelectChannel23a(padapter, pmlmeext->cur_channel);
+
+ issue_nulldata23a(padapter, NULL, 1, 3, 500);
+
+ bdelayscan = true;
+ }
+
+ if (bdelayscan) {
+ /* delay 50ms to protect nulldata(1). */
+ set_survey_timer(pmlmeext, 50);
+ return H2C_SUCCESS;
+ }
+ }
+
+ if ((pmlmeext->sitesurvey_res.state == SCAN_START) ||
+ (pmlmeext->sitesurvey_res.state == SCAN_TXNULL)) {
+ /* disable dynamic functions, such as high power, DIG */
+ Save_DM_Func_Flag23a(padapter);
+ Switch_DM_Func23a(padapter, DYNAMIC_FUNC_DISABLE, false);
+
+ /* config the initial gain under scaning, need to
+ write the BB registers */
+ if ((wdev_to_priv(padapter->rtw_wdev))->p2p_enabled == true) {
+ initialgain = 0x30;
+ } else
+ initialgain = 0x1E;
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_INITIAL_GAIN,
+ (u8 *)(&initialgain));
+
+ /* set MSR to no link state */
+ Set_MSR23a(padapter, _HW_STATE_NOLINK_);
+
+ val8 = 1; /* under site survey */
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_SITESURVEY,
+ (u8 *)(&val8));
+
+ pmlmeext->sitesurvey_res.state = SCAN_PROCESS;
+ }
+
+ site_survey23a(padapter);
+
+ return H2C_SUCCESS;
+}
+
+u8 setauth_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf)
+{
+ struct setauth_parm *pparm = (struct setauth_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if (pparm->mode < 4)
+ {
+ pmlmeinfo->auth_algo = pparm->mode;
+ }
+
+ return H2C_SUCCESS;
+}
+
+u8 setkey_hdl23a(struct rtw_adapter *padapter, u8 *pbuf)
+{
+ unsigned short ctrl;
+ struct setkey_parm *pparm = (struct setkey_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ /* main tx key for wep. */
+ if (pparm->set_tx)
+ pmlmeinfo->key_index = pparm->keyid;
+
+ /* write cam */
+ ctrl = BIT(15) | ((pparm->algorithm) << 2) | pparm->keyid;
+
+ DBG_8723A_LEVEL(_drv_always_, "set group key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) "
+ "keyid:%d\n", pparm->algorithm, pparm->keyid);
+ write_cam23a(padapter, pparm->keyid, ctrl, null_sta, pparm->key);
+
+ /* allow multicast packets to driver */
+ padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_ON_RCR_AM, null_addr);
+
+ return H2C_SUCCESS;
+}
+
+u8 set_stakey_hdl23a(struct rtw_adapter *padapter, u8 *pbuf)
+{
+ u16 ctrl = 0;
+ u8 cam_id;/* cam_entry */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct set_stakey_parm *pparm = (struct set_stakey_parm *)pbuf;
+
+ /* cam_entry: */
+ /* 0~3 for default key */
+
+ /* for concurrent mode (ap+sta): */
+ /* default key is disable, using sw encrypt/decrypt */
+ /* cam_entry = 4 for sta mode (macid = 0) */
+ /* cam_entry(macid+3) = 5 ~ N for ap mode (aid = 1~N, macid = 2 ~N) */
+
+ /* for concurrent mode (sta+sta): */
+ /* default key is disable, using sw encrypt/decrypt */
+ /* cam_entry = 4 mapping to macid = 0 */
+ /* cam_entry = 5 mapping to macid = 2 */
+
+ cam_id = 4;
+
+ DBG_8723A_LEVEL(_drv_always_, "set pairwise key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) camid:%d\n",
+ pparm->algorithm, cam_id);
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
+ {
+
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (pparm->algorithm == _NO_PRIVACY_) /* clear cam entry */
+ {
+ clear_cam_entry23a(padapter, pparm->id);
+ return H2C_SUCCESS_RSP;
+ }
+
+ psta = rtw_get_stainfo23a(pstapriv, pparm->addr);
+ if (psta)
+ {
+ ctrl = (BIT(15) | ((pparm->algorithm) << 2));
+
+ DBG_8723A("r871x_set_stakey_hdl23a(): enc_algorithm =%d\n", pparm->algorithm);
+
+ if ((psta->mac_id<1) || (psta->mac_id>(NUM_STA-4)))
+ {
+ DBG_8723A("r871x_set_stakey_hdl23a():set_stakey failed, mac_id(aid) =%d\n", psta->mac_id);
+ return H2C_REJECTED;
+ }
+
+ cam_id = (psta->mac_id + 3);/* 0~3 for default key, cmd_id = macid + 3, macid = aid+1; */
+
+ DBG_8723A("Write CAM, mac_addr =%x:%x:%x:%x:%x:%x, cam_entry =%d\n", pparm->addr[0],
+ pparm->addr[1], pparm->addr[2], pparm->addr[3], pparm->addr[4],
+ pparm->addr[5], cam_id);
+
+ write_cam23a(padapter, cam_id, ctrl, pparm->addr, pparm->key);
+
+ return H2C_SUCCESS_RSP;
+
+ }
+ else
+ {
+ DBG_8723A("r871x_set_stakey_hdl23a(): sta has been free\n");
+ return H2C_REJECTED;
+ }
+
+ }
+
+ /* below for sta mode */
+
+ if (pparm->algorithm == _NO_PRIVACY_) /* clear cam entry */
+ {
+ clear_cam_entry23a(padapter, pparm->id);
+ return H2C_SUCCESS;
+ }
+
+ ctrl = BIT(15) | ((pparm->algorithm) << 2);
+
+ write_cam23a(padapter, cam_id, ctrl, pparm->addr, pparm->key);
+
+ pmlmeinfo->enc_algo = pparm->algorithm;
+
+ return H2C_SUCCESS;
+}
+
+u8 add_ba_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf)
+{
+ struct addBaReq_parm *pparm = (struct addBaReq_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ struct sta_info *psta = rtw_get_stainfo23a(&padapter->stapriv, pparm->addr);
+
+ if (!psta)
+ return H2C_SUCCESS;
+
+ if (((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) &&
+ (pmlmeinfo->HT_enable)) ||
+ ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) {
+ issue_action_BA23a(padapter, pparm->addr,
+ WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid);
+ mod_timer(&psta->addba_retry_timer,
+ jiffies + msecs_to_jiffies(ADDBA_TO));
+ } else {
+ psta->htpriv.candidate_tid_bitmap &= ~CHKBIT(pparm->tid);
+ }
+ return H2C_SUCCESS;
+}
+
+u8 set_tx_beacon_cmd23a(struct rtw_adapter* padapter)
+{
+ struct cmd_obj *ph2c;
+ struct Tx_Beacon_param *ptxBeacon_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u8 res = _SUCCESS;
+ int len_diff = 0;
+
+
+
+ ph2c = (struct cmd_obj *)kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ if (!ph2c) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ ptxBeacon_parm = (struct Tx_Beacon_param *)
+ kzalloc(sizeof(struct Tx_Beacon_param), GFP_ATOMIC);
+ if (!ptxBeacon_parm) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ memcpy(&ptxBeacon_parm->network, &pmlmeinfo->network,
+ sizeof(struct wlan_bssid_ex));
+
+ len_diff = update_hidden_ssid(
+ ptxBeacon_parm->network.IEs+_BEACON_IE_OFFSET_,
+ ptxBeacon_parm->network.IELength-_BEACON_IE_OFFSET_,
+ pmlmeinfo->hidden_ssid_mode);
+ ptxBeacon_parm->network.IELength += len_diff;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, ptxBeacon_parm, GEN_CMD_CODE(_TX_Beacon));
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+
+exit:
+
+
+
+ return res;
+}
+
+u8 mlme_evt_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf)
+{
+ u8 evt_code, evt_seq;
+ u16 evt_sz;
+ uint *peventbuf;
+ void (*event_callback)(struct rtw_adapter *dev, u8 *pbuf);
+ struct evt_priv *pevt_priv = &padapter->evtpriv;
+
+ peventbuf = (uint*)pbuf;
+ evt_sz = (u16)(*peventbuf&0xffff);
+ evt_seq = (u8)((*peventbuf>>24)&0x7f);
+ evt_code = (u8)((*peventbuf>>16)&0xff);
+
+ /* checking if event code is valid */
+ if (evt_code >= MAX_C2HEVT) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nEvent Code(%d) mismatch!\n", evt_code));
+ goto _abort_event_;
+ }
+
+ /* checking if event size match the event parm size */
+ if ((wlanevents[evt_code].parmsize != 0) &&
+ (wlanevents[evt_code].parmsize != evt_sz)) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nEvent(%d) Parm Size mismatch (%d vs %d)!\n",
+ evt_code, wlanevents[evt_code].parmsize, evt_sz));
+ goto _abort_event_;
+ }
+
+ atomic_inc(&pevt_priv->event_seq);
+
+ peventbuf += 2;
+
+ if (peventbuf) {
+ event_callback = wlanevents[evt_code].event_callback;
+ event_callback(padapter, (u8*)peventbuf);
+
+ pevt_priv->evt_done_cnt++;
+ }
+
+_abort_event_:
+
+ return H2C_SUCCESS;
+}
+
+u8 h2c_msg_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf)
+{
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ return H2C_SUCCESS;
+}
+
+u8 tx_beacon_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf)
+{
+ if (send_beacon23a(padapter) == _FAIL)
+ {
+ DBG_8723A("issue_beacon23a, fail!\n");
+ return H2C_PARAMETERS_ERROR;
+ }
+#ifdef CONFIG_8723AU_AP_MODE
+ else /* tx bc/mc frames after update TIM */
+ {
+ struct sta_info *psta_bmc;
+ struct list_head *plist, *phead, *ptmp;
+ struct xmit_frame *pxmitframe;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* for BC/MC Frames */
+ psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
+ if (!psta_bmc)
+ return H2C_SUCCESS;
+
+ if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len>0))
+ {
+ msleep(10);/* 10ms, ATIM(HIQ) Windows */
+ /* spin_lock_bh(&psta_bmc->sleep_q.lock); */
+ spin_lock_bh(&pxmitpriv->lock);
+
+ phead = get_list_head(&psta_bmc->sleep_q);
+
+ list_for_each_safe(plist, ptmp, phead) {
+ pxmitframe = container_of(plist,
+ struct xmit_frame,
+ list);
+
+ list_del_init(&pxmitframe->list);
+
+ psta_bmc->sleepq_len--;
+ if (psta_bmc->sleepq_len>0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ pxmitframe->attrib.triggered = 1;
+
+ pxmitframe->attrib.qsel = 0x11;/* HIQ */
+
+ rtw_hal_xmit23aframe_enqueue(padapter, pxmitframe);
+ }
+
+ /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
+ }
+
+ }
+#endif
+
+ return H2C_SUCCESS;
+}
+
+u8 set_ch_hdl23a(struct rtw_adapter *padapter, u8 *pbuf)
+{
+ struct set_ch_parm *set_ch_parm;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ set_ch_parm = (struct set_ch_parm *)pbuf;
+
+ DBG_8723A(FUNC_NDEV_FMT" ch:%u, bw:%u, ch_offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev),
+ set_ch_parm->ch, set_ch_parm->bw, set_ch_parm->ch_offset);
+
+ pmlmeext->cur_channel = set_ch_parm->ch;
+ pmlmeext->cur_ch_offset = set_ch_parm->ch_offset;
+ pmlmeext->cur_bwmode = set_ch_parm->bw;
+
+ set_channel_bwmode23a(padapter, set_ch_parm->ch, set_ch_parm->ch_offset, set_ch_parm->bw);
+
+ return H2C_SUCCESS;
+}
+
+u8 set_chplan_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf)
+{
+ struct SetChannelPlan_param *setChannelPlan_param;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ setChannelPlan_param = (struct SetChannelPlan_param *)pbuf;
+
+ pmlmeext->max_chan_nums = init_channel_set(padapter, setChannelPlan_param->channel_plan, pmlmeext->channel_set);
+ init_channel_list(padapter, pmlmeext->channel_set, pmlmeext->max_chan_nums, &pmlmeext->channel_list);
+
+ return H2C_SUCCESS;
+}
+
+u8 led_blink_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf)
+{
+ struct LedBlink_param *ledBlink_param;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ ledBlink_param = (struct LedBlink_param *)pbuf;
+
+ return H2C_SUCCESS;
+}
+
+u8 set_csa_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf)
+{
+ return H2C_REJECTED;
+}
+
+/* TDLS_WRCR : write RCR DATA BIT */
+/* TDLS_SD_PTI : issue peer traffic indication */
+/* TDLS_CS_OFF : go back to the channel linked with AP, terminating channel switch procedure */
+/* TDLS_INIT_CH_SEN : init channel sensing, receive all data and mgnt frame */
+/* TDLS_DONE_CH_SEN: channel sensing and report candidate channel */
+/* TDLS_OFF_CH : first time set channel to off channel */
+/* TDLS_BASE_CH : go back tp the channel linked with AP when set base channel as target channel */
+/* TDLS_P_OFF_CH : periodically go to off channel */
+/* TDLS_P_BASE_CH : periodically go back to base channel */
+/* TDLS_RS_RCR : restore RCR */
+/* TDLS_CKALV_PH1 : check alive timer phase1 */
+/* TDLS_CKALV_PH2 : check alive timer phase2 */
+/* TDLS_FREE_STA : free tdls sta */
+u8 tdls_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf)
+{
+ return H2C_REJECTED;
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_p2p.c b/drivers/staging/rtl8723au/core/rtw_p2p.c
new file mode 100644
index 000000000000..27a6cc76973d
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_p2p.c
@@ -0,0 +1,4001 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_P2P_C_
+
+#include <drv_types.h>
+#include <rtw_p2p.h>
+#include <wifi.h>
+
+#ifdef CONFIG_8723AU_P2P
+
+static int rtw_p2p_is_channel_list_ok(u8 desired_ch, u8* ch_list, u8 ch_cnt)
+{
+ int found = 0, i = 0;
+
+ for (i = 0; i < ch_cnt; i++)
+ {
+ if (ch_list[ i ] == desired_ch)
+ {
+ found = 1;
+ break;
+ }
+ }
+ return found;
+}
+
+static int is_any_client_associated(struct rtw_adapter *padapter)
+{
+ return padapter->stapriv.asoc_list_cnt ? true : false;
+}
+
+static u32 go_add_group_info_attr(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ struct list_head *phead, *plist;
+ u32 len = 0;
+ u16 attr_len = 0;
+ u8 tmplen, *pdata_attr, *pstart, *pcur;
+ struct sta_info *psta;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_8723A("%s\n", __func__);
+
+ pdata_attr = kzalloc(MAX_P2P_IE_LEN, GFP_ATOMIC);
+
+ pstart = pdata_attr;
+ pcur = pdata_attr;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ phead = &pstapriv->asoc_list;
+
+ list_for_each(plist, phead) {
+ psta = container_of(plist, struct sta_info, asoc_list);
+
+ if (psta->is_p2p_device)
+ {
+ tmplen = 0;
+
+ pcur++;
+
+ /* P2P device address */
+ memcpy(pcur, psta->dev_addr, ETH_ALEN);
+ pcur += ETH_ALEN;
+
+ /* P2P interface address */
+ memcpy(pcur, psta->hwaddr, ETH_ALEN);
+ pcur += ETH_ALEN;
+
+ *pcur = psta->dev_cap;
+ pcur++;
+
+ /* u16*)(pcur) = cpu_to_be16(psta->config_methods); */
+ put_unaligned_be16(psta->config_methods, pcur);
+ pcur += 2;
+
+ memcpy(pcur, psta->primary_dev_type, 8);
+ pcur += 8;
+
+ *pcur = psta->num_of_secdev_type;
+ pcur++;
+
+ memcpy(pcur, psta->secdev_types_list, psta->num_of_secdev_type*8);
+ pcur += psta->num_of_secdev_type*8;
+
+ if (psta->dev_name_len>0)
+ {
+ /* u16*)(pcur) = cpu_to_be16(WPS_ATTR_DEVICE_NAME); */
+ put_unaligned_be16(WPS_ATTR_DEVICE_NAME, pcur);
+ pcur += 2;
+
+ /* u16*)(pcur) = cpu_to_be16(psta->dev_name_len); */
+ put_unaligned_be16(psta->dev_name_len, pcur);
+ pcur += 2;
+
+ memcpy(pcur, psta->dev_name, psta->dev_name_len);
+ pcur += psta->dev_name_len;
+ }
+
+ tmplen = (u8)(pcur-pstart);
+
+ *pstart = (tmplen-1);
+
+ attr_len += tmplen;
+
+ /* pstart += tmplen; */
+ pstart = pcur;
+
+ }
+
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ if (attr_len>0)
+ {
+ len = rtw_set_p2p_attr_content23a(pbuf, P2P_ATTR_GROUP_INFO, attr_len, pdata_attr);
+ }
+
+ kfree(pdata_attr);
+
+ return len;
+}
+
+static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ unsigned char category = WLAN_CATEGORY_VENDOR_SPECIFIC;/* P2P action frame */
+ u32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_GO_DISC_REQUEST;
+ u8 dialogToken = 0;
+
+ DBG_8723A("[%s]\n", __func__);
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ {
+ return;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pwdinfo->interface_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pwdinfo->interface_addr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ /* Build P2P action frame header */
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 4, (unsigned char *)&p2poui, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &oui_subtype, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &dialogToken, &pattrib->pktlen);
+
+ /* there is no IE in this P2P action frame */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+}
+
+static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 status, u8 dialogToken)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_DEVDISC_RESP;
+ u8 p2pie[8] = { 0x00 };
+ u32 p2pielen = 0;
+
+ DBG_8723A("[%s]\n", __func__);
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ {
+ return;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pwdinfo->device_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pwdinfo->device_addr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ /* Build P2P public action frame header */
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &action, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 4, (unsigned char *) &p2poui,
+ &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &oui_subtype, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &dialogToken, &pattrib->pktlen);
+
+ /* Build P2P IE */
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[ p2pielen++ ] = 0x50;
+ p2pie[ p2pielen++ ] = 0x6F;
+ p2pie[ p2pielen++ ] = 0x9A;
+ p2pie[ p2pielen++ ] = 0x09; /* WFA P2P v1.0 */
+
+ /* P2P_ATTR_STATUS */
+ p2pielen += rtw_set_p2p_attr_content23a(&p2pie[p2pielen], P2P_ATTR_STATUS, 1, &status);
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, p2pie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+}
+
+static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8* raddr, u8* frame_body, u16 config_method)
+{
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u8 dialogToken = frame_body[7]; /* The Dialog Token of provisioning discovery request frame. */
+ u32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_PROVISION_DISC_RESP;
+ u8 wpsie[ 100 ] = { 0x00 };
+ u8 wpsielen = 0;
+#ifdef CONFIG_8723AU_P2P
+ u32 wfdielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&padapter->eeprompriv), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &action, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 4, (unsigned char *) &p2poui,
+ &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &oui_subtype, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &dialogToken, &pattrib->pktlen);
+
+ wpsielen = 0;
+ /* WPS OUI */
+ /* u32*) (wpsie) = cpu_to_be32(WPSOUI); */
+ put_unaligned_be32(WPSOUI, wpsie);
+ wpsielen += 4;
+
+ /* Config Method */
+ /* Type: */
+ /* u16*) (wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD); */
+ put_unaligned_be16(WPS_ATTR_CONF_METHOD, wpsie + wpsielen);
+ wpsielen += 2;
+
+ /* Length: */
+ /* u16*) (wpsie + wpsielen) = cpu_to_be16(0x0002); */
+ put_unaligned_be16(0x0002, wpsie + wpsielen);
+ wpsielen += 2;
+
+ /* Value: */
+ /* u16*) (wpsie + wpsielen) = cpu_to_be16(config_method); */
+ put_unaligned_be16(config_method, wpsie + wpsielen);
+ wpsielen += 2;
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *) wpsie, &pattrib->pktlen);
+
+#ifdef CONFIG_8723AU_P2P
+ wfdielen = build_provdisc_resp_wfd_ie(pwdinfo, pframe);
+ pframe += wfdielen;
+ pattrib->pktlen += wfdielen;
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+
+ return;
+}
+
+static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 status, u8 dialogToken)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ unsigned char category = WLAN_CATEGORY_VENDOR_SPECIFIC;/* P2P action frame */
+ u32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_PRESENCE_RESPONSE;
+ u8 p2pie[ MAX_P2P_IE_LEN] = { 0x00 };
+ u8 noa_attr_content[32] = { 0x00 };
+ u32 p2pielen = 0;
+
+ DBG_8723A("[%s]\n", __func__);
+
+ if ((pmgntframe = alloc_mgtxmitframe23a(pxmitpriv)) == NULL)
+ {
+ return;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pwdinfo->interface_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pwdinfo->interface_addr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ /* Build P2P action frame header */
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 4, (unsigned char *) &p2poui,
+ &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &oui_subtype, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &dialogToken, &pattrib->pktlen);
+
+ /* Add P2P IE header */
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[ p2pielen++ ] = 0x50;
+ p2pie[ p2pielen++ ] = 0x6F;
+ p2pie[ p2pielen++ ] = 0x9A;
+ p2pie[ p2pielen++ ] = 0x09; /* WFA P2P v1.0 */
+
+ /* Add Status attribute in P2P IE */
+ p2pielen += rtw_set_p2p_attr_content23a(&p2pie[p2pielen], P2P_ATTR_STATUS, 1, &status);
+
+ /* Add NoA attribute in P2P IE */
+ noa_attr_content[0] = 0x1;/* index */
+ noa_attr_content[1] = 0x0;/* CTWindow and OppPS Parameters */
+
+ /* todo: Notice of Absence Descriptor(s) */
+
+ p2pielen += rtw_set_p2p_attr_content23a(&p2pie[p2pielen], P2P_ATTR_NOA, 2, noa_attr_content);
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, p2pie,
+ &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+}
+
+u32 build_beacon_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 p2pie[ MAX_P2P_IE_LEN] = { 0x00 };
+ u16 capability = 0;
+ u32 len = 0, p2pielen = 0;
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[ p2pielen++ ] = 0x50;
+ p2pie[ p2pielen++ ] = 0x6F;
+ p2pie[ p2pielen++ ] = 0x9A;
+ p2pie[ p2pielen++ ] = 0x09; /* WFA P2P v1.0 */
+
+ /* According to the P2P Specification, the beacon frame should contain 3 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. P2P Device ID */
+ /* 3. Notice of Absence (NOA) */
+
+ /* P2P Capability ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ /* Be able to participate in additional P2P Groups and */
+ /* support the P2P Invitation Procedure */
+ /* Group Capability Bitmap, 1 byte */
+ capability = P2P_DEVCAP_INVITATION_PROC|P2P_DEVCAP_CLIENT_DISCOVERABILITY;
+ capability |= ((P2P_GRPCAP_GO | P2P_GRPCAP_INTRABSS) << 8);
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
+ capability |= (P2P_GRPCAP_GROUP_FORMATION<<8);
+
+ capability = cpu_to_le16(capability);
+
+ p2pielen += rtw_set_p2p_attr_content23a(&p2pie[p2pielen], P2P_ATTR_CAPABILITY, 2, (u8*)&capability);
+
+ /* P2P Device ID ATTR */
+ p2pielen += rtw_set_p2p_attr_content23a(&p2pie[p2pielen], P2P_ATTR_DEVICE_ID, ETH_ALEN, pwdinfo->device_addr);
+
+ /* Notice of Absence ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+
+ /* go_add_noa_attr(pwdinfo); */
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *) p2pie, &len);
+
+ return len;
+}
+
+#ifdef CONFIG_8723AU_P2P
+u32 build_beacon_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 wfdie[ MAX_WFD_IE_LEN] = { 0x00 };
+ u32 len = 0, wfdielen = 0;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wifi_display_info* pwfd_info = padapter->wdinfo.wfd_info;
+
+ /* WFD OUI */
+ wfdielen = 0;
+ wfdie[ wfdielen++ ] = 0x50;
+ wfdie[ wfdielen++ ] = 0x6F;
+ wfdie[ wfdielen++ ] = 0x9A;
+ wfdie[ wfdielen++ ] = 0x0A; /* WFA WFD v1.0 */
+
+ /* Commented by Albert 20110812 */
+ /* According to the WFD Specification, the beacon frame should contain 4 WFD attributes */
+ /* 1. WFD Device Information */
+ /* 2. Associated BSSID */
+ /* 3. Coupled Sink Information */
+
+ /* WFD Device Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value1: */
+ /* WFD device information */
+
+ if (P2P_ROLE_GO == pwdinfo->role)
+ {
+ if (is_any_client_associated(pwdinfo->padapter))
+ {
+ /* WFD primary sink + WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_WSD, wfdie + wfdielen);
+ }
+ else
+ {
+ /* WFD primary sink + available for WFD session + WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_SESSION_AVAIL |
+ WFD_DEVINFO_WSD, wfdie + wfdielen);
+ }
+
+ }
+ else
+ {
+ /* WFD primary sink + available for WFD session + WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_SESSION_AVAIL |
+ WFD_DEVINFO_WSD, wfdie + wfdielen);
+ }
+
+ wfdielen += 2;
+
+ /* Value2: */
+ /* Session Management Control Port */
+ /* Default TCP port for RTSP messages is 554 */
+ put_unaligned_be16(pwfd_info->rtsp_ctrlport, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value3: */
+ /* WFD Device Maximum Throughput */
+ /* 300Mbps is the maximum throughput */
+ put_unaligned_be16(300, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Associated BSSID ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_ASSOC_BSSID;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Associated BSSID */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ memcpy(wfdie + wfdielen, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ }
+ else
+ {
+ memset(wfdie + wfdielen, 0x00, ETH_ALEN);
+ }
+
+ wfdielen += ETH_ALEN;
+
+ /* Coupled Sink Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_COUPLED_SINK_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0007, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Coupled Sink Status bitmap */
+ /* Not coupled/available for Coupling */
+ wfdie[ wfdielen++ ] = 0;
+ /* MAC Addr. */
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, wfdielen, (unsigned char *) wfdie, &len);
+
+ return len;
+}
+
+u32 build_probe_req_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 wfdie[ MAX_WFD_IE_LEN] = { 0x00 };
+ u32 len = 0, wfdielen = 0;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wifi_display_info* pwfd_info = padapter->wdinfo.wfd_info;
+
+ /* WFD OUI */
+ wfdielen = 0;
+ wfdie[ wfdielen++ ] = 0x50;
+ wfdie[ wfdielen++ ] = 0x6F;
+ wfdie[ wfdielen++ ] = 0x9A;
+ wfdie[ wfdielen++ ] = 0x0A; /* WFA WFD v1.0 */
+
+ /* Commented by Albert 20110812 */
+ /* According to the WFD Specification, the probe request frame should contain 4 WFD attributes */
+ /* 1. WFD Device Information */
+ /* 2. Associated BSSID */
+ /* 3. Coupled Sink Information */
+
+ /* WFD Device Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value1: */
+ /* WFD device information */
+
+ if (1 == pwdinfo->wfd_tdls_enable)
+ {
+ /* WFD primary sink + available for WFD session + WiFi TDLS mode + WSC (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_SESSION_AVAIL |
+ WFD_DEVINFO_WSD |
+ WFD_DEVINFO_PC_TDLS, wfdie + wfdielen);
+ }
+ else
+ {
+ /* WFD primary sink + available for WFD session + WiFi Direct mode + WSC (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_SESSION_AVAIL |
+ WFD_DEVINFO_WSD, wfdie + wfdielen);
+ }
+
+ wfdielen += 2;
+
+ /* Value2: */
+ /* Session Management Control Port */
+ /* Default TCP port for RTSP messages is 554 */
+ put_unaligned_be16(pwfd_info->rtsp_ctrlport, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value3: */
+ /* WFD Device Maximum Throughput */
+ /* 300Mbps is the maximum throughput */
+ put_unaligned_be16(300, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Associated BSSID ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_ASSOC_BSSID;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Associated BSSID */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ memcpy(wfdie + wfdielen, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ }
+ else
+ {
+ memset(wfdie + wfdielen, 0x00, ETH_ALEN);
+ }
+
+ wfdielen += ETH_ALEN;
+
+ /* Coupled Sink Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_COUPLED_SINK_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0007, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Coupled Sink Status bitmap */
+ /* Not coupled/available for Coupling */
+ wfdie[ wfdielen++ ] = 0;
+ /* MAC Addr. */
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, wfdielen, (unsigned char *) wfdie, &len);
+
+ return len;
+}
+
+u32 build_probe_resp_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf, u8 tunneled)
+{
+ u8 wfdie[ MAX_WFD_IE_LEN] = { 0x00 };
+ u32 len = 0, wfdielen = 0;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wifi_display_info* pwfd_info = padapter->wdinfo.wfd_info;
+
+ /* WFD OUI */
+ wfdielen = 0;
+ wfdie[ wfdielen++ ] = 0x50;
+ wfdie[ wfdielen++ ] = 0x6F;
+ wfdie[ wfdielen++ ] = 0x9A;
+ wfdie[ wfdielen++ ] = 0x0A; /* WFA WFD v1.0 */
+
+ /* Commented by Albert 20110812 */
+ /* According to the WFD Specification, the probe response frame should contain 4 WFD attributes */
+ /* 1. WFD Device Information */
+ /* 2. Associated BSSID */
+ /* 3. Coupled Sink Information */
+ /* 4. WFD Session Information */
+
+ /* WFD Device Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value1: */
+ /* WFD device information */
+ /* WFD primary sink + available for WFD session + WiFi Direct mode */
+
+ if (true == pwdinfo->session_available)
+ {
+ if (P2P_ROLE_GO == pwdinfo->role)
+ {
+ if (is_any_client_associated(pwdinfo->padapter))
+ {
+ if (pwdinfo->wfd_tdls_enable)
+ {
+ /* TDLS mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type | WFD_DEVINFO_WSD | WFD_DEVINFO_PC_TDLS | WFD_DEVINFO_HDCP_SUPPORT, wfdie + wfdielen);
+ }
+ else
+ {
+ /* WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type | WFD_DEVINFO_WSD | WFD_DEVINFO_HDCP_SUPPORT, wfdie + wfdielen);
+ }
+ }
+ else
+ {
+ if (pwdinfo->wfd_tdls_enable)
+ {
+ /* available for WFD session + TDLS mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type | WFD_DEVINFO_SESSION_AVAIL | WFD_DEVINFO_WSD | WFD_DEVINFO_PC_TDLS | WFD_DEVINFO_HDCP_SUPPORT, wfdie + wfdielen);
+ }
+ else
+ {
+ /* available for WFD session + WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type | WFD_DEVINFO_SESSION_AVAIL | WFD_DEVINFO_WSD | WFD_DEVINFO_HDCP_SUPPORT, wfdie + wfdielen);
+ }
+ }
+ }
+ else
+ {
+ if (pwdinfo->wfd_tdls_enable)
+ {
+ /* available for WFD session + WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_SESSION_AVAIL |
+ WFD_DEVINFO_WSD |
+ WFD_DEVINFO_PC_TDLS |
+ WFD_DEVINFO_HDCP_SUPPORT,
+ wfdie + wfdielen);
+ }
+ else
+ {
+
+ /* available for WFD session + WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_SESSION_AVAIL |
+ WFD_DEVINFO_WSD |
+ WFD_DEVINFO_HDCP_SUPPORT,
+ wfdie + wfdielen);
+ }
+ }
+ }
+ else
+ {
+ if (pwdinfo->wfd_tdls_enable)
+ {
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_WSD |
+ WFD_DEVINFO_PC_TDLS |
+ WFD_DEVINFO_HDCP_SUPPORT,
+ wfdie + wfdielen);
+ }
+ else
+ {
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_WSD |
+ WFD_DEVINFO_HDCP_SUPPORT,
+ wfdie + wfdielen);
+ }
+
+ }
+
+ wfdielen += 2;
+
+ /* Value2: */
+ /* Session Management Control Port */
+ /* Default TCP port for RTSP messages is 554 */
+ put_unaligned_be16(pwfd_info->rtsp_ctrlport, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value3: */
+ /* WFD Device Maximum Throughput */
+ /* 300Mbps is the maximum throughput */
+ put_unaligned_be16(300, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Associated BSSID ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_ASSOC_BSSID;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Associated BSSID */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ memcpy(wfdie + wfdielen, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ }
+ else
+ {
+ memset(wfdie + wfdielen, 0x00, ETH_ALEN);
+ }
+
+ wfdielen += ETH_ALEN;
+
+ /* Coupled Sink Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_COUPLED_SINK_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0007, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Coupled Sink Status bitmap */
+ /* Not coupled/available for Coupling */
+ wfdie[ wfdielen++ ] = 0;
+ /* MAC Addr. */
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ {
+ /* WFD Session Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_SESSION_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0000, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Todo: to add the list of WFD device info descriptor in WFD group. */
+
+ }
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, wfdielen, (unsigned char *) wfdie, &len);
+
+ return len;
+}
+
+u32 build_assoc_req_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 wfdie[ MAX_WFD_IE_LEN] = { 0x00 };
+ u32 len = 0, wfdielen = 0;
+ struct rtw_adapter *padapter = NULL;
+ struct mlme_priv *pmlmepriv = NULL;
+ struct wifi_display_info *pwfd_info = NULL;
+
+ /* WFD OUI */
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE))
+ {
+ return 0;
+ }
+
+ padapter = pwdinfo->padapter;
+ pmlmepriv = &padapter->mlmepriv;
+ pwfd_info = padapter->wdinfo.wfd_info;
+
+ wfdielen = 0;
+ wfdie[ wfdielen++ ] = 0x50;
+ wfdie[ wfdielen++ ] = 0x6F;
+ wfdie[ wfdielen++ ] = 0x9A;
+ wfdie[ wfdielen++ ] = 0x0A; /* WFA WFD v1.0 */
+
+ /* Commented by Albert 20110812 */
+ /* According to the WFD Specification, the probe request frame should contain 4 WFD attributes */
+ /* 1. WFD Device Information */
+ /* 2. Associated BSSID */
+ /* 3. Coupled Sink Information */
+
+ /* WFD Device Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value1: */
+ /* WFD device information */
+ /* WFD primary sink + available for WFD session + WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_SESSION_AVAIL |
+ WFD_DEVINFO_WSD, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value2: */
+ /* Session Management Control Port */
+ /* Default TCP port for RTSP messages is 554 */
+ put_unaligned_be16(pwfd_info->rtsp_ctrlport, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value3: */
+ /* WFD Device Maximum Throughput */
+ /* 300Mbps is the maximum throughput */
+ put_unaligned_be16(300, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Associated BSSID ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_ASSOC_BSSID;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Associated BSSID */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ memcpy(wfdie + wfdielen, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ }
+ else
+ {
+ memset(wfdie + wfdielen, 0x00, ETH_ALEN);
+ }
+
+ wfdielen += ETH_ALEN;
+
+ /* Coupled Sink Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_COUPLED_SINK_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0007, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Coupled Sink Status bitmap */
+ /* Not coupled/available for Coupling */
+ wfdie[ wfdielen++ ] = 0;
+ /* MAC Addr. */
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, wfdielen, (unsigned char *) wfdie, &len);
+
+ return len;
+}
+
+u32 build_assoc_resp_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 wfdie[ MAX_WFD_IE_LEN] = { 0x00 };
+ u32 len = 0, wfdielen = 0;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wifi_display_info* pwfd_info = padapter->wdinfo.wfd_info;
+
+ /* WFD OUI */
+ wfdielen = 0;
+ wfdie[ wfdielen++ ] = 0x50;
+ wfdie[ wfdielen++ ] = 0x6F;
+ wfdie[ wfdielen++ ] = 0x9A;
+ wfdie[ wfdielen++ ] = 0x0A; /* WFA WFD v1.0 */
+
+ /* Commented by Albert 20110812 */
+ /* According to the WFD Specification, the probe request frame should contain 4 WFD attributes */
+ /* 1. WFD Device Information */
+ /* 2. Associated BSSID */
+ /* 3. Coupled Sink Information */
+
+ /* WFD Device Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value1: */
+ /* WFD device information */
+ /* WFD primary sink + available for WFD session + WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_SESSION_AVAIL |
+ WFD_DEVINFO_WSD, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value2: */
+ /* Session Management Control Port */
+ /* Default TCP port for RTSP messages is 554 */
+ put_unaligned_be16(pwfd_info->rtsp_ctrlport, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value3: */
+ /* WFD Device Maximum Throughput */
+ /* 300Mbps is the maximum throughput */
+ put_unaligned_be16(300, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Associated BSSID ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_ASSOC_BSSID;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Associated BSSID */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ memcpy(wfdie + wfdielen, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ }
+ else
+ {
+ memset(wfdie + wfdielen, 0x00, ETH_ALEN);
+ }
+
+ wfdielen += ETH_ALEN;
+
+ /* Coupled Sink Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_COUPLED_SINK_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0007, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Coupled Sink Status bitmap */
+ /* Not coupled/available for Coupling */
+ wfdie[ wfdielen++ ] = 0;
+ /* MAC Addr. */
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, wfdielen, (unsigned char *) wfdie, &len);
+
+ return len;
+}
+
+u32 build_nego_req_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 wfdie[ MAX_WFD_IE_LEN] = { 0x00 };
+ u32 len = 0, wfdielen = 0;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wifi_display_info* pwfd_info = padapter->wdinfo.wfd_info;
+
+ /* WFD OUI */
+ wfdielen = 0;
+ wfdie[ wfdielen++ ] = 0x50;
+ wfdie[ wfdielen++ ] = 0x6F;
+ wfdie[ wfdielen++ ] = 0x9A;
+ wfdie[ wfdielen++ ] = 0x0A; /* WFA WFD v1.0 */
+
+ /* Commented by Albert 20110825 */
+ /* According to the WFD Specification, the negotiation request frame should contain 3 WFD attributes */
+ /* 1. WFD Device Information */
+ /* 2. Associated BSSID (Optional) */
+ /* 3. Local IP Adress (Optional) */
+
+ /* WFD Device Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value1: */
+ /* WFD device information */
+ /* WFD primary sink + WiFi Direct mode + WSD (WFD Service Discovery) + WFD Session Available */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_WSD | WFD_DEVINFO_SESSION_AVAIL,
+ wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value2: */
+ /* Session Management Control Port */
+ /* Default TCP port for RTSP messages is 554 */
+ put_unaligned_be16(pwfd_info->rtsp_ctrlport, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value3: */
+ /* WFD Device Maximum Throughput */
+ /* 300Mbps is the maximum throughput */
+ put_unaligned_be16(300, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Associated BSSID ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_ASSOC_BSSID;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Associated BSSID */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ memcpy(wfdie + wfdielen, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ }
+ else
+ {
+ memset(wfdie + wfdielen, 0x00, ETH_ALEN);
+ }
+
+ wfdielen += ETH_ALEN;
+
+ /* Coupled Sink Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_COUPLED_SINK_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0007, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Coupled Sink Status bitmap */
+ /* Not coupled/available for Coupling */
+ wfdie[ wfdielen++ ] = 0;
+ /* MAC Addr. */
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, wfdielen, (unsigned char *) wfdie, &len);
+
+ return len;
+}
+
+u32 build_nego_resp_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 wfdie[ MAX_WFD_IE_LEN] = { 0x00 };
+ u32 len = 0, wfdielen = 0;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wifi_display_info* pwfd_info = padapter->wdinfo.wfd_info;
+
+ /* WFD OUI */
+ wfdielen = 0;
+ wfdie[ wfdielen++ ] = 0x50;
+ wfdie[ wfdielen++ ] = 0x6F;
+ wfdie[ wfdielen++ ] = 0x9A;
+ wfdie[ wfdielen++ ] = 0x0A; /* WFA WFD v1.0 */
+
+ /* Commented by Albert 20110825 */
+ /* According to the WFD Specification, the negotiation request frame should contain 3 WFD attributes */
+ /* 1. WFD Device Information */
+ /* 2. Associated BSSID (Optional) */
+ /* 3. Local IP Adress (Optional) */
+
+ /* WFD Device Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value1: */
+ /* WFD device information */
+ /* WFD primary sink + WiFi Direct mode + WSD (WFD Service Discovery) + WFD Session Available */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_WSD | WFD_DEVINFO_SESSION_AVAIL,
+ wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value2: */
+ /* Session Management Control Port */
+ /* Default TCP port for RTSP messages is 554 */
+ put_unaligned_be16(pwfd_info->rtsp_ctrlport, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value3: */
+ /* WFD Device Maximum Throughput */
+ /* 300Mbps is the maximum throughput */
+ put_unaligned_be16(300, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Associated BSSID ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_ASSOC_BSSID;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Associated BSSID */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ memcpy(wfdie + wfdielen, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ }
+ else
+ {
+ memset(wfdie + wfdielen, 0x00, ETH_ALEN);
+ }
+
+ wfdielen += ETH_ALEN;
+
+ /* Coupled Sink Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_COUPLED_SINK_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0007, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Coupled Sink Status bitmap */
+ /* Not coupled/available for Coupling */
+ wfdie[ wfdielen++ ] = 0;
+ /* MAC Addr. */
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, wfdielen, (unsigned char *) wfdie, &len);
+
+ return len;
+}
+
+u32 build_nego_confirm_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 wfdie[ MAX_WFD_IE_LEN] = { 0x00 };
+ u32 len = 0, wfdielen = 0;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wifi_display_info* pwfd_info = padapter->wdinfo.wfd_info;
+
+ /* WFD OUI */
+ wfdielen = 0;
+ wfdie[ wfdielen++ ] = 0x50;
+ wfdie[ wfdielen++ ] = 0x6F;
+ wfdie[ wfdielen++ ] = 0x9A;
+ wfdie[ wfdielen++ ] = 0x0A; /* WFA WFD v1.0 */
+
+ /* Commented by Albert 20110825 */
+ /* According to the WFD Specification, the negotiation request frame should contain 3 WFD attributes */
+ /* 1. WFD Device Information */
+ /* 2. Associated BSSID (Optional) */
+ /* 3. Local IP Adress (Optional) */
+
+ /* WFD Device Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value1: */
+ /* WFD device information */
+ /* WFD primary sink + WiFi Direct mode + WSD (WFD Service Discovery) + WFD Session Available */
+ put_unaligned_be16(pwfd_info->wfd_device_type | WFD_DEVINFO_WSD |
+ WFD_DEVINFO_SESSION_AVAIL, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value2: */
+ /* Session Management Control Port */
+ /* Default TCP port for RTSP messages is 554 */
+ put_unaligned_be16(pwfd_info->rtsp_ctrlport, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value3: */
+ /* WFD Device Maximum Throughput */
+ /* 300Mbps is the maximum throughput */
+ put_unaligned_be16(300, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Associated BSSID ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_ASSOC_BSSID;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Associated BSSID */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ memcpy(wfdie + wfdielen, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ }
+ else
+ {
+ memset(wfdie + wfdielen, 0x00, ETH_ALEN);
+ }
+
+ wfdielen += ETH_ALEN;
+
+ /* Coupled Sink Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_COUPLED_SINK_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0007, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Coupled Sink Status bitmap */
+ /* Not coupled/available for Coupling */
+ wfdie[ wfdielen++ ] = 0;
+ /* MAC Addr. */
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, wfdielen, (unsigned char *) wfdie, &len);
+
+ return len;
+}
+
+u32 build_invitation_req_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 wfdie[ MAX_WFD_IE_LEN] = { 0x00 };
+ u32 len = 0, wfdielen = 0;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wifi_display_info* pwfd_info = padapter->wdinfo.wfd_info;
+
+ /* WFD OUI */
+ wfdielen = 0;
+ wfdie[ wfdielen++ ] = 0x50;
+ wfdie[ wfdielen++ ] = 0x6F;
+ wfdie[ wfdielen++ ] = 0x9A;
+ wfdie[ wfdielen++ ] = 0x0A; /* WFA WFD v1.0 */
+
+ /* Commented by Albert 20110825 */
+ /* According to the WFD Specification, the provision discovery request frame should contain 3 WFD attributes */
+ /* 1. WFD Device Information */
+ /* 2. Associated BSSID (Optional) */
+ /* 3. Local IP Adress (Optional) */
+
+ /* WFD Device Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value1: */
+ /* WFD device information */
+ /* WFD primary sink + available for WFD session + WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_SESSION_AVAIL | WFD_DEVINFO_WSD,
+ wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value2: */
+ /* Session Management Control Port */
+ /* Default TCP port for RTSP messages is 554 */
+ put_unaligned_be16(pwfd_info->rtsp_ctrlport, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value3: */
+ /* WFD Device Maximum Throughput */
+ /* 300Mbps is the maximum throughput */
+ put_unaligned_be16(300, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Associated BSSID ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_ASSOC_BSSID;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Associated BSSID */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ memcpy(wfdie + wfdielen, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ }
+ else
+ {
+ memset(wfdie + wfdielen, 0x00, ETH_ALEN);
+ }
+
+ wfdielen += ETH_ALEN;
+
+ /* Coupled Sink Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_COUPLED_SINK_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0007, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Coupled Sink Status bitmap */
+ /* Not coupled/available for Coupling */
+ wfdie[ wfdielen++ ] = 0;
+ /* MAC Addr. */
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+
+ if (P2P_ROLE_GO == pwdinfo->role)
+ {
+ /* WFD Session Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_SESSION_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0000, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Todo: to add the list of WFD device info descriptor in WFD group. */
+
+ }
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, wfdielen, (unsigned char *) wfdie, &len);
+
+ return len;
+}
+
+u32 build_invitation_resp_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 wfdie[ MAX_WFD_IE_LEN] = { 0x00 };
+ u32 len = 0, wfdielen = 0;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wifi_display_info* pwfd_info = padapter->wdinfo.wfd_info;
+
+ /* WFD OUI */
+ wfdielen = 0;
+ wfdie[ wfdielen++ ] = 0x50;
+ wfdie[ wfdielen++ ] = 0x6F;
+ wfdie[ wfdielen++ ] = 0x9A;
+ wfdie[ wfdielen++ ] = 0x0A; /* WFA WFD v1.0 */
+
+ /* Commented by Albert 20110825 */
+ /* According to the WFD Specification, the provision discovery request frame should contain 3 WFD attributes */
+ /* 1. WFD Device Information */
+ /* 2. Associated BSSID (Optional) */
+ /* 3. Local IP Adress (Optional) */
+
+ /* WFD Device Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value1: */
+ /* WFD device information */
+ /* WFD primary sink + available for WFD session + WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_SESSION_AVAIL | WFD_DEVINFO_WSD,
+ wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value2: */
+ /* Session Management Control Port */
+ /* Default TCP port for RTSP messages is 554 */
+ put_unaligned_be16(pwfd_info->rtsp_ctrlport, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value3: */
+ /* WFD Device Maximum Throughput */
+ /* 300Mbps is the maximum throughput */
+ put_unaligned_be16(300, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Associated BSSID ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_ASSOC_BSSID;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Associated BSSID */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ memcpy(wfdie + wfdielen, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ }
+ else
+ {
+ memset(wfdie + wfdielen, 0x00, ETH_ALEN);
+ }
+
+ wfdielen += ETH_ALEN;
+
+ /* Coupled Sink Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_COUPLED_SINK_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0007, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Coupled Sink Status bitmap */
+ /* Not coupled/available for Coupling */
+ wfdie[ wfdielen++ ] = 0;
+ /* MAC Addr. */
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+
+ if (P2P_ROLE_GO == pwdinfo->role)
+ {
+ /* WFD Session Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_SESSION_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0000, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Todo: to add the list of WFD device info descriptor in WFD group. */
+
+ }
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, wfdielen, (unsigned char *) wfdie, &len);
+
+ return len;
+}
+
+u32 build_provdisc_req_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 wfdie[ MAX_WFD_IE_LEN] = { 0x00 };
+ u32 len = 0, wfdielen = 0;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wifi_display_info* pwfd_info = padapter->wdinfo.wfd_info;
+
+ /* WFD OUI */
+ wfdielen = 0;
+ wfdie[ wfdielen++ ] = 0x50;
+ wfdie[ wfdielen++ ] = 0x6F;
+ wfdie[ wfdielen++ ] = 0x9A;
+ wfdie[ wfdielen++ ] = 0x0A; /* WFA WFD v1.0 */
+
+ /* Commented by Albert 20110825 */
+ /* According to the WFD Specification, the provision discovery request frame should contain 3 WFD attributes */
+ /* 1. WFD Device Information */
+ /* 2. Associated BSSID (Optional) */
+ /* 3. Local IP Adress (Optional) */
+
+ /* WFD Device Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value1: */
+ /* WFD device information */
+ /* WFD primary sink + available for WFD session + WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_SESSION_AVAIL | WFD_DEVINFO_WSD,
+ wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value2: */
+ /* Session Management Control Port */
+ /* Default TCP port for RTSP messages is 554 */
+ put_unaligned_be16(pwfd_info->rtsp_ctrlport, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value3: */
+ /* WFD Device Maximum Throughput */
+ /* 300Mbps is the maximum throughput */
+ put_unaligned_be16(300, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Associated BSSID ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_ASSOC_BSSID;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Associated BSSID */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ memcpy(wfdie + wfdielen, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ }
+ else
+ {
+ memset(wfdie + wfdielen, 0x00, ETH_ALEN);
+ }
+
+ wfdielen += ETH_ALEN;
+
+ /* Coupled Sink Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_COUPLED_SINK_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0007, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Coupled Sink Status bitmap */
+ /* Not coupled/available for Coupling */
+ wfdie[ wfdielen++ ] = 0;
+ /* MAC Addr. */
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, wfdielen, (unsigned char *) wfdie, &len);
+
+ return len;
+}
+
+u32 build_provdisc_resp_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 wfdie[ MAX_WFD_IE_LEN] = { 0x00 };
+ u32 len = 0, wfdielen = 0;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wifi_display_info* pwfd_info = padapter->wdinfo.wfd_info;
+
+ /* WFD OUI */
+ wfdielen = 0;
+ wfdie[ wfdielen++ ] = 0x50;
+ wfdie[ wfdielen++ ] = 0x6F;
+ wfdie[ wfdielen++ ] = 0x9A;
+ wfdie[ wfdielen++ ] = 0x0A; /* WFA WFD v1.0 */
+
+ /* Commented by Albert 20110825 */
+ /* According to the WFD Specification, the provision discovery response frame should contain 3 WFD attributes */
+ /* 1. WFD Device Information */
+ /* 2. Associated BSSID (Optional) */
+ /* 3. Local IP Adress (Optional) */
+
+ /* WFD Device Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value1: */
+ /* WFD device information */
+ /* WFD primary sink + available for WFD session + WiFi Direct mode + WSD (WFD Service Discovery) */
+ put_unaligned_be16(pwfd_info->wfd_device_type |
+ WFD_DEVINFO_SESSION_AVAIL | WFD_DEVINFO_WSD,
+ wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value2: */
+ /* Session Management Control Port */
+ /* Default TCP port for RTSP messages is 554 */
+ put_unaligned_be16(pwfd_info->rtsp_ctrlport, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value3: */
+ /* WFD Device Maximum Throughput */
+ /* 300Mbps is the maximum throughput */
+ put_unaligned_be16(300, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Associated BSSID ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_ASSOC_BSSID;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0006, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Associated BSSID */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ memcpy(wfdie + wfdielen, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ }
+ else
+ {
+ memset(wfdie + wfdielen, 0x00, ETH_ALEN);
+ }
+
+ wfdielen += ETH_ALEN;
+
+ /* Coupled Sink Information ATTR */
+ /* Type: */
+ wfdie[ wfdielen++ ] = WFD_ATTR_COUPLED_SINK_INFO;
+
+ /* Length: */
+ /* Note: In the WFD specification, the size of length field is 2. */
+ put_unaligned_be16(0x0007, wfdie + wfdielen);
+ wfdielen += 2;
+
+ /* Value: */
+ /* Coupled Sink Status bitmap */
+ /* Not coupled/available for Coupling */
+ wfdie[ wfdielen++ ] = 0;
+ /* MAC Addr. */
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+ wfdie[ wfdielen++ ] = 0;
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, wfdielen, (unsigned char *) wfdie, &len);
+
+ return len;
+}
+
+#endif /* CONFIG_8723AU_P2P */
+
+u32 build_probe_resp_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 p2pie[ MAX_P2P_IE_LEN] = { 0x00 };
+ u32 len = 0, p2pielen = 0;
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[ p2pielen++ ] = 0x50;
+ p2pie[ p2pielen++ ] = 0x6F;
+ p2pie[ p2pielen++ ] = 0x9A;
+ p2pie[ p2pielen++ ] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20100907 */
+ /* According to the P2P Specification, the probe response frame should contain 5 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Extended Listen Timing */
+ /* 3. Notice of Absence (NOA) (Only GO needs this) */
+ /* 4. Device Info */
+ /* 5. Group Info (Only GO need this) */
+
+ /* P2P Capability ATTR */
+ /* Type: */
+ p2pie[ p2pielen++ ] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002); */
+ put_unaligned_le16(0x0002, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[ p2pielen++ ] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ {
+ p2pie[ p2pielen ] = (P2P_GRPCAP_GO | P2P_GRPCAP_INTRABSS);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
+ p2pie[ p2pielen ] |= P2P_GRPCAP_GROUP_FORMATION;
+
+ p2pielen++;
+ }
+ else if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE))
+ {
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[ p2pielen++ ] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
+ else
+ p2pie[ p2pielen++ ] = DMP_P2P_GRPCAP_SUPPORT;
+ }
+
+ /* Extended Listen Timing ATTR */
+ /* Type: */
+ p2pie[ p2pielen++ ] = P2P_ATTR_EX_LISTEN_TIMING;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0x0004); */
+ put_unaligned_le16(0x0004, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Availability Period */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF); */
+ put_unaligned_le16(0xFFFF, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Availability Interval */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF); */
+ put_unaligned_le16(0xFFFF, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Notice of Absence ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ {
+ /* go_add_noa_attr(pwdinfo); */
+ }
+
+ /* Device Info ATTR */
+ /* Type: */
+ p2pie[ p2pielen++ ] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len); */
+ put_unaligned_le16(21 + pwdinfo->device_name_len, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->supported_wps_cm); */
+ put_unaligned_be16(pwdinfo->supported_wps_cm, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA); */
+ put_unaligned_be16(WPS_PDT_CID_MULIT_MEDIA, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* OUI */
+ /* u32*) (p2pie + p2pielen) = cpu_to_be32(WPSOUI); */
+ put_unaligned_be32(WPSOUI, p2pie + p2pielen);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER); */
+ put_unaligned_be16(WPS_PDT_SCID_MEDIA_SERVER, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[ p2pielen++ ] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME); */
+ put_unaligned_be16(WPS_ATTR_DEVICE_NAME, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len); */
+ put_unaligned_be16(pwdinfo->device_name_len, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ /* Group Info ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ {
+ p2pielen += go_add_group_info_attr(pwdinfo, p2pie + p2pielen);
+ }
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *) p2pie, &len);
+
+ return len;
+}
+
+u32 build_prov_disc_request_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pbuf, u8* pssid, u8 ussidlen, u8* pdev_raddr)
+{
+ u8 p2pie[ MAX_P2P_IE_LEN] = { 0x00 };
+ u32 len = 0, p2pielen = 0;
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[ p2pielen++ ] = 0x50;
+ p2pie[ p2pielen++ ] = 0x6F;
+ p2pie[ p2pielen++ ] = 0x9A;
+ p2pie[ p2pielen++ ] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110301 */
+ /* According to the P2P Specification, the provision discovery request frame should contain 3 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Device Info */
+ /* 3. Group ID (When joining an operating P2P Group) */
+
+ /* P2P Capability ATTR */
+ /* Type: */
+ p2pie[ p2pielen++ ] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002); */
+ put_unaligned_le16(0x0002, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[ p2pielen++ ] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[ p2pielen++ ] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
+ else
+ p2pie[ p2pielen++ ] = DMP_P2P_GRPCAP_SUPPORT;
+
+ /* Device Info ATTR */
+ /* Type: */
+ p2pie[ p2pielen++ ] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len); */
+ put_unaligned_le16(21 + pwdinfo->device_name_len, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+ if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PBC)
+ {
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_PBC); */
+ put_unaligned_be16(WPS_CONFIG_METHOD_PBC, p2pie + p2pielen);
+ }
+ else
+ {
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_DISPLAY); */
+ put_unaligned_be16(WPS_CONFIG_METHOD_DISPLAY, p2pie + p2pielen);
+ }
+
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA); */
+ put_unaligned_be16(WPS_PDT_CID_MULIT_MEDIA, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* OUI */
+ /* u32*) (p2pie + p2pielen) = cpu_to_be32(WPSOUI); */
+ put_unaligned_be32(WPSOUI, p2pie + p2pielen);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER); */
+ put_unaligned_be16(WPS_PDT_SCID_MEDIA_SERVER, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[ p2pielen++ ] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME); */
+ put_unaligned_be16(WPS_ATTR_DEVICE_NAME, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len); */
+ put_unaligned_be16(pwdinfo->device_name_len, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT))
+ {
+ /* Added by Albert 2011/05/19 */
+ /* In this case, the pdev_raddr is the device address of the group owner. */
+
+ /* P2P Group ID ATTR */
+ /* Type: */
+ p2pie[ p2pielen++ ] = P2P_ATTR_GROUP_ID;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(ETH_ALEN + ussidlen); */
+ put_unaligned_le16(ETH_ALEN + ussidlen, p2pie + p2pielen);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pdev_raddr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ memcpy(p2pie + p2pielen, pssid, ussidlen);
+ p2pielen += ussidlen;
+
+ }
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *) p2pie, &len);
+
+ return len;
+}
+
+u32 build_assoc_resp_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pbuf, u8 status_code)
+{
+ u8 p2pie[ MAX_P2P_IE_LEN] = { 0x00 };
+ u32 len = 0, p2pielen = 0;
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[ p2pielen++ ] = 0x50;
+ p2pie[ p2pielen++ ] = 0x6F;
+ p2pie[ p2pielen++ ] = 0x9A;
+ p2pie[ p2pielen++ ] = 0x09; /* WFA P2P v1.0 */
+
+ /* According to the P2P Specification, the Association response frame should contain 2 P2P attributes */
+ /* 1. Status */
+ /* 2. Extended Listen Timing (optional) */
+
+ /* Status ATTR */
+ p2pielen += rtw_set_p2p_attr_content23a(&p2pie[p2pielen], P2P_ATTR_STATUS, 1, &status_code);
+
+ /* Extended Listen Timing ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+
+ pbuf = rtw_set_ie23a(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *) p2pie, &len);
+
+ return len;
+}
+
+u32 build_deauth_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u32 len = 0;
+
+ return len;
+}
+
+u32 process_probe_req_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 *p;
+ u32 ret = false;
+ u8 *p2pie;
+ u32 p2pielen = 0;
+ int ssid_len = 0, rate_cnt = 0;
+
+ p = rtw_get_ie23a(pframe + sizeof(struct ieee80211_hdr_3addr) + _PROBEREQ_IE_OFFSET_, _SUPPORTEDRATES_IE_, (int *)&rate_cnt,
+ len - sizeof(struct ieee80211_hdr_3addr) - _PROBEREQ_IE_OFFSET_);
+
+ if (rate_cnt <= 4)
+ {
+ int i, g_rate = 0;
+
+ for (i = 0; i < rate_cnt; i++)
+ {
+ if (((*(p + 2 + i) & 0xff) != 0x02) &&
+ ((*(p + 2 + i) & 0xff) != 0x04) &&
+ ((*(p + 2 + i) & 0xff) != 0x0B) &&
+ ((*(p + 2 + i) & 0xff) != 0x16))
+ {
+ g_rate = 1;
+ }
+ }
+
+ if (g_rate == 0)
+ {
+ /* There is no OFDM rate included in SupportedRates IE of this probe request frame */
+ /* The driver should response this probe request. */
+ return ret;
+ }
+ }
+ else
+ {
+ /* rate_cnt > 4 means the SupportRates IE contains the OFDM rate because the count of CCK rates are 4. */
+ /* We should proceed the following check for this probe request. */
+ }
+
+ /* Added comments by Albert 20100906 */
+ /* There are several items we should check here. */
+ /* 1. This probe request frame must contain the P2P IE. (Done) */
+ /* 2. This probe request frame must contain the wildcard SSID. (Done) */
+ /* 3. Wildcard BSSID. (Todo) */
+ /* 4. Destination Address. (Done in mgt_dispatcher23a function) */
+ /* 5. Requested Device Type in WSC IE. (Todo) */
+ /* 6. Device ID attribute in P2P IE. (Todo) */
+
+ p = rtw_get_ie23a(pframe + sizeof(struct ieee80211_hdr_3addr) + _PROBEREQ_IE_OFFSET_, _SSID_IE_, (int *)&ssid_len,
+ len - sizeof(struct ieee80211_hdr_3addr) - _PROBEREQ_IE_OFFSET_);
+
+ ssid_len &= 0xff; /* Just last 1 byte is valid for ssid len of the probe request */
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ {
+ if ((p2pie = rtw_get_p2p_ie23a(pframe + sizeof(struct ieee80211_hdr_3addr) + _PROBEREQ_IE_OFFSET_, len - sizeof(struct ieee80211_hdr_3addr) - _PROBEREQ_IE_OFFSET_, NULL, &p2pielen)))
+ {
+ if ((p) && !memcmp((void *)(p+2), (void *)pwdinfo->p2p_wildcard_ssid, 7))
+ {
+ /* todo: */
+ /* Check Requested Device Type attributes in WSC IE. */
+ /* Check Device ID attribute in P2P IE */
+
+ ret = true;
+ }
+ else if ((p != NULL) && (ssid_len == 0))
+ {
+ ret = true;
+ }
+ }
+ else
+ {
+ /* non -p2p device */
+ }
+
+ }
+
+ return ret;
+}
+
+u32 process_assoc_req_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pframe, uint len, struct sta_info *psta)
+{
+ u8 status_code = P2P_STATUS_SUCCESS;
+ u8 *pbuf, *pattr_content = NULL;
+ u32 attr_contentlen = 0;
+ u16 cap_attr = 0;
+ unsigned short ie_offset;
+ u8 * ies;
+ u32 ies_len;
+ u8 * p2p_ie;
+ u32 p2p_ielen = 0;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)pframe;
+
+ if (!rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ return P2P_STATUS_FAIL_REQUEST_UNABLE;
+
+ if (ieee80211_is_assoc_req(hdr->frame_control))
+ ie_offset = _ASOCREQ_IE_OFFSET_;
+ else /* WIFI_REASSOCREQ */
+ ie_offset = _REASOCREQ_IE_OFFSET_;
+
+ ies = pframe + sizeof(struct ieee80211_hdr_3addr) + ie_offset;
+ ies_len = len - sizeof(struct ieee80211_hdr_3addr) - ie_offset;
+
+ p2p_ie = rtw_get_p2p_ie23a(ies, ies_len, NULL, &p2p_ielen);
+
+ if (!p2p_ie)
+ {
+ DBG_8723A("[%s] P2P IE not Found!!\n", __func__);
+ status_code = P2P_STATUS_FAIL_INVALID_PARAM;
+ }
+ else
+ {
+ DBG_8723A("[%s] P2P IE Found!!\n", __func__);
+ }
+
+ while (p2p_ie)
+ {
+ /* Check P2P Capability ATTR */
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_CAPABILITY, (u8*)&cap_attr, (uint*) &attr_contentlen))
+ {
+ DBG_8723A("[%s] Got P2P Capability Attr!!\n", __func__);
+ cap_attr = le16_to_cpu(cap_attr);
+ psta->dev_cap = cap_attr&0xff;
+ }
+
+ /* Check Extended Listen Timing ATTR */
+
+ /* Check P2P Device Info ATTR */
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_INFO, NULL, (uint*)&attr_contentlen))
+ {
+ DBG_8723A("[%s] Got P2P DEVICE INFO Attr!!\n", __func__);
+ pattr_content = pbuf = kzalloc(attr_contentlen,
+ GFP_ATOMIC);
+ if (pattr_content) {
+ u8 num_of_secdev_type;
+ u16 dev_name_len;
+
+ rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_INFO, pattr_content, (uint*)&attr_contentlen);
+
+ memcpy(psta->dev_addr, pattr_content, ETH_ALEN);/* P2P Device Address */
+
+ pattr_content += ETH_ALEN;
+
+ memcpy(&psta->config_methods, pattr_content, 2);/* Config Methods */
+ psta->config_methods = be16_to_cpu(psta->config_methods);
+
+ pattr_content += 2;
+
+ memcpy(psta->primary_dev_type, pattr_content, 8);
+
+ pattr_content += 8;
+
+ num_of_secdev_type = *pattr_content;
+ pattr_content += 1;
+
+ if (num_of_secdev_type == 0)
+ {
+ psta->num_of_secdev_type = 0;
+ }
+ else
+ {
+ u32 len;
+
+ psta->num_of_secdev_type = num_of_secdev_type;
+
+ len = (sizeof(psta->secdev_types_list)<(num_of_secdev_type*8)) ? (sizeof(psta->secdev_types_list)) : (num_of_secdev_type*8);
+
+ memcpy(psta->secdev_types_list, pattr_content, len);
+
+ pattr_content += (num_of_secdev_type*8);
+ }
+
+ /* dev_name_len = attr_contentlen - ETH_ALEN - 2 - 8 - 1 - (num_of_secdev_type*8); */
+ psta->dev_name_len = 0;
+ if (WPS_ATTR_DEVICE_NAME == be16_to_cpu(*(u16*)pattr_content))
+ {
+ dev_name_len = be16_to_cpu(*(u16*)(pattr_content+2));
+
+ psta->dev_name_len = (sizeof(psta->dev_name)<dev_name_len) ? sizeof(psta->dev_name):dev_name_len;
+
+ memcpy(psta->dev_name, pattr_content+4, psta->dev_name_len);
+ }
+
+ kfree(pbuf);
+
+ }
+
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie23a(p2p_ie+p2p_ielen, ies_len -(p2p_ie -ies + p2p_ielen), NULL, &p2p_ielen);
+
+ }
+
+ return status_code;
+}
+
+u32 process_p2p_devdisc_req23a(struct wifidirect_info *pwdinfo, u8 *pframe,
+ uint len)
+{
+ u8 *frame_body;
+ u8 status, dialogToken;
+ struct sta_info *psta = NULL;
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *p2p_ie;
+ u32 p2p_ielen = 0;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) pframe;
+
+ frame_body = (unsigned char *)
+ (pframe + sizeof(struct ieee80211_hdr_3addr));
+
+ dialogToken = frame_body[7];
+ status = P2P_STATUS_FAIL_UNKNOWN_P2PGROUP;
+
+ if ((p2p_ie = rtw_get_p2p_ie23a(frame_body + _PUBLIC_ACTION_IE_OFFSET_,
+ len - _PUBLIC_ACTION_IE_OFFSET_, NULL,
+ &p2p_ielen))) {
+ u8 groupid[38] = { 0x00 };
+ u8 dev_addr[ETH_ALEN] = { 0x00 };
+ u32 attr_contentlen = 0;
+
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen,
+ P2P_ATTR_GROUP_ID, groupid,
+ &attr_contentlen)) {
+ if (!memcmp(pwdinfo->device_addr, groupid, ETH_ALEN) &&
+ !memcmp(pwdinfo->p2p_group_ssid, groupid + ETH_ALEN,
+ pwdinfo->p2p_group_ssid_len)) {
+ attr_contentlen = 0;
+
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen,
+ P2P_ATTR_DEVICE_ID,
+ dev_addr,
+ &attr_contentlen)) {
+ struct list_head *phead, *plist, *ptmp;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ phead = &pstapriv->asoc_list;
+
+ list_for_each_safe(plist, ptmp, phead) {
+ psta = container_of(plist, struct sta_info, asoc_list);
+
+ if (psta->is_p2p_device && (psta->dev_cap&P2P_DEVCAP_CLIENT_DISCOVERABILITY) &&
+ !memcmp(psta->dev_addr, dev_addr, ETH_ALEN))
+ {
+ /* spin_unlock_bh(&pstapriv->asoc_list_lock); */
+ /* issue GO Discoverability Request */
+ issue_group_disc_req(pwdinfo, psta->hwaddr);
+ /* spin_lock_bh(&pstapriv->asoc_list_lock); */
+ status = P2P_STATUS_SUCCESS;
+ break;
+ } else {
+ status = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+ } else {
+ status = P2P_STATUS_FAIL_INVALID_PARAM;
+ }
+ } else {
+ status = P2P_STATUS_FAIL_INVALID_PARAM;
+ }
+ }
+ }
+
+ /* issue Device Discoverability Response */
+ issue_p2p_devdisc_resp(pwdinfo, hdr->addr2, status, dialogToken);
+
+ return (status == P2P_STATUS_SUCCESS) ? true:false;
+}
+
+u32 process_p2p_devdisc_resp23a(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ return true;
+}
+
+u8 process_p2p_provdisc_req23a(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len)
+{
+ u8 *frame_body;
+ u8 *wpsie;
+ u8 *ptr = NULL;
+ uint wps_ielen = 0, attr_contentlen = 0;
+ u16 uconfig_method = 0;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)pframe;
+
+ frame_body = (pframe + sizeof(struct ieee80211_hdr_3addr));
+
+ wpsie = rtw_get_wps_ie23a(frame_body + _PUBLIC_ACTION_IE_OFFSET_,
+ len - _PUBLIC_ACTION_IE_OFFSET_, NULL,
+ &wps_ielen);
+ if (!wpsie)
+ goto out;
+
+ if (!rtw_get_wps_attr_content23a(wpsie, wps_ielen, WPS_ATTR_CONF_METHOD,
+ (u8 *)&uconfig_method, &attr_contentlen))
+ goto out;
+
+ uconfig_method = be16_to_cpu(uconfig_method);
+ ptr = pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req;
+
+ switch (uconfig_method)
+ {
+ case WPS_CM_DISPLYA:
+ memcpy(ptr, "dis", 3);
+ break;
+
+ case WPS_CM_LABEL:
+ memcpy(ptr, "lab", 3);
+ break;
+
+ case WPS_CM_PUSH_BUTTON:
+ memcpy(ptr, "pbc", 3);
+ break;
+
+ case WPS_CM_KEYPAD:
+ memcpy(ptr, "pad", 3);
+ break;
+ }
+ issue_p2p_provision_resp(pwdinfo, hdr->addr2, frame_body,
+ uconfig_method);
+
+out:
+ DBG_8723A("[%s] config method = %s\n", __func__, ptr);
+
+ return true;
+}
+
+u8 process_p2p_provdisc_resp23a(struct wifidirect_info *pwdinfo, u8 *pframe)
+{
+
+ return true;
+}
+
+static u8 rtw_p2p_get_peer_ch_list(struct wifidirect_info *pwdinfo, u8 *ch_content, u8 ch_cnt, u8 *peer_ch_list)
+{
+ u8 i = 0, j = 0;
+ u8 temp = 0;
+ u8 ch_no = 0;
+ ch_content += 3;
+ ch_cnt -= 3;
+
+ while(ch_cnt > 0)
+ {
+ ch_content += 1;
+ ch_cnt -= 1;
+ temp = *ch_content;
+ for (i = 0 ; i < temp ; i++, j++)
+ {
+ peer_ch_list[j] = *(ch_content + 1 + i);
+ }
+ ch_content += (temp + 1);
+ ch_cnt -= (temp + 1);
+ ch_no += temp ;
+ }
+
+ return ch_no;
+}
+
+static u8 rtw_p2p_ch_inclusion(struct mlme_ext_priv *pmlmeext, u8 *peer_ch_list, u8 peer_ch_num, u8 *ch_list_inclusioned)
+{
+ int i = 0, j = 0, temp = 0;
+ u8 ch_no = 0;
+
+ for (i = 0; i < peer_ch_num; i++)
+ {
+ for (j = temp; j < pmlmeext->max_chan_nums; j++)
+ {
+ if (*(peer_ch_list + i) == pmlmeext->channel_set[ j ].ChannelNum)
+ {
+ ch_list_inclusioned[ ch_no++ ] = *(peer_ch_list + i);
+ temp = j;
+ break;
+ }
+ }
+ }
+
+ return ch_no;
+}
+
+u8 process_p2p_group_negotation_req23a(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ u8 result = P2P_STATUS_SUCCESS;
+ u32 p2p_ielen = 0, wps_ielen = 0;
+ u8 * ies;
+ u32 ies_len;
+ u8 *p2p_ie;
+ u8 *wpsie;
+ u16 wps_devicepassword_id = 0x0000;
+ uint wps_devicepassword_id_len = 0;
+#ifdef CONFIG_8723AU_P2P
+ u8 wfd_ie[ 128 ] = { 0x00 };
+ u32 wfd_ielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+
+ if ((wpsie = rtw_get_wps_ie23a(pframe + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &wps_ielen)))
+ {
+ /* Commented by Kurt 20120113 */
+ /* If some device wants to do p2p handshake without sending prov_disc_req */
+ /* We have to get peer_req_cm from here. */
+ if (!memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3))
+ {
+ rtw_get_wps_attr_content23a(wpsie, wps_ielen, WPS_ATTR_DEVICE_PWID, (u8*) &wps_devicepassword_id, &wps_devicepassword_id_len);
+ wps_devicepassword_id = be16_to_cpu(wps_devicepassword_id);
+
+ if (wps_devicepassword_id == WPS_DPID_USER_SPEC)
+ {
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
+ }
+ else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
+ {
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pad", 3);
+ }
+ else
+ {
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pbc", 3);
+ }
+ }
+ }
+ else
+ {
+ DBG_8723A("[%s] WPS IE not Found!!\n", __func__);
+ result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ return result;
+ }
+
+ if (pwdinfo->ui_got_wps_info == P2P_NO_WPSINFO)
+ {
+ result = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_INFOR_NOREADY);
+ return result;
+ }
+
+ ies = pframe + _PUBLIC_ACTION_IE_OFFSET_;
+ ies_len = len - _PUBLIC_ACTION_IE_OFFSET_;
+
+ p2p_ie = rtw_get_p2p_ie23a(ies, ies_len, NULL, &p2p_ielen);
+
+ if (!p2p_ie)
+ {
+ DBG_8723A("[%s] P2P IE not Found!!\n", __func__);
+ result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ }
+
+ while (p2p_ie)
+ {
+ u8 attr_content = 0x00;
+ u32 attr_contentlen = 0;
+ u8 ch_content[50] = { 0x00 };
+ uint ch_cnt = 0;
+ u8 peer_ch_list[50] = { 0x00 };
+ u8 peer_ch_num = 0;
+ u8 ch_list_inclusioned[50] = { 0x00 };
+ u8 ch_num_inclusioned = 0;
+ u16 cap_attr;
+
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_ING);
+
+ /* Check P2P Capability ATTR */
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_CAPABILITY, (u8*)&cap_attr, (uint*)&attr_contentlen))
+ cap_attr = le16_to_cpu(cap_attr);
+
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_GO_INTENT, &attr_content, &attr_contentlen))
+ {
+ DBG_8723A("[%s] GO Intent = %d, tie = %d\n", __func__, attr_content >> 1, attr_content & 0x01);
+ pwdinfo->peer_intent = attr_content; /* include both intent and tie breaker values. */
+
+ if (pwdinfo->intent == (pwdinfo->peer_intent >> 1))
+ {
+ /* Try to match the tie breaker value */
+ if (pwdinfo->intent == P2P_MAX_INTENT)
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ result = P2P_STATUS_FAIL_BOTH_GOINTENT_15;
+ }
+ else
+ {
+ if (attr_content & 0x01)
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+ else
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ }
+ }
+ else if (pwdinfo->intent > (pwdinfo->peer_intent >> 1))
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ else
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ {
+ /* Store the group id information. */
+ memcpy(pwdinfo->groupid_info.go_device_addr, pwdinfo->device_addr, ETH_ALEN);
+ memcpy(pwdinfo->groupid_info.ssid, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
+ }
+ }
+
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_INTENTED_IF_ADDR, pwdinfo->p2p_peer_interface_addr, &attr_contentlen))
+ {
+ if (attr_contentlen != ETH_ALEN)
+ {
+ memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
+ }
+ }
+
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_CH_LIST, ch_content, &ch_cnt))
+ {
+ peer_ch_num = rtw_p2p_get_peer_ch_list(pwdinfo, ch_content, ch_cnt, peer_ch_list);
+ ch_num_inclusioned = rtw_p2p_ch_inclusion(&padapter->mlmeextpriv, peer_ch_list, peer_ch_num, ch_list_inclusioned);
+
+ if (ch_num_inclusioned == 0)
+ {
+ DBG_8723A("[%s] No common channel in channel list!\n", __func__);
+ result = P2P_STATUS_FAIL_NO_COMMON_CH;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ break;
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ {
+ if (!rtw_p2p_is_channel_list_ok(pwdinfo->operating_channel,
+ ch_list_inclusioned, ch_num_inclusioned))
+ {
+ {
+ u8 operatingch_info[5] = { 0x00 }, peer_operating_ch = 0;
+ attr_contentlen = 0;
+
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen))
+ {
+ peer_operating_ch = operatingch_info[4];
+ }
+
+ if (rtw_p2p_is_channel_list_ok(peer_operating_ch,
+ ch_list_inclusioned, ch_num_inclusioned))
+ {
+ /**
+ * Change our operating channel as peer's for compatibility.
+ */
+ pwdinfo->operating_channel = peer_operating_ch;
+ DBG_8723A("[%s] Change op ch to %02x as peer's\n", __func__, pwdinfo->operating_channel);
+ }
+ else
+ {
+ /* Take first channel of ch_list_inclusioned as operating channel */
+ pwdinfo->operating_channel = ch_list_inclusioned[0];
+ DBG_8723A("[%s] Change op ch to %02x\n", __func__, pwdinfo->operating_channel);
+ }
+ }
+
+ }
+ }
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie23a(p2p_ie+p2p_ielen, ies_len -(p2p_ie -ies + p2p_ielen), NULL, &p2p_ielen);
+ }
+
+#ifdef CONFIG_8723AU_P2P
+ /* Added by Albert 20110823 */
+ /* Try to get the TCP port information when receiving the negotiation request. */
+ if (rtw_get_wfd_ie(pframe + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, wfd_ie, &wfd_ielen))
+ {
+ u8 attr_content[ 10 ] = { 0x00 };
+ u32 attr_contentlen = 0;
+
+ DBG_8723A("[%s] WFD IE Found!!\n", __func__);
+ rtw_get_wfd_attr_content(wfd_ie, wfd_ielen, WFD_ATTR_DEVICE_INFO, attr_content, &attr_contentlen);
+ if (attr_contentlen)
+ {
+ pwdinfo->wfd_info->peer_rtsp_ctrlport = get_unaligned_be16(attr_content + 2);
+ DBG_8723A("[%s] Peer PORT NUM = %d\n", __func__, pwdinfo->wfd_info->peer_rtsp_ctrlport);
+ }
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ return result;
+}
+
+u8 process_p2p_group_negotation_resp23a(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ struct rtw_adapter *padapter = pwdinfo->padapter;
+ u8 result = P2P_STATUS_SUCCESS;
+ u32 p2p_ielen, wps_ielen;
+ u8 * ies;
+ u32 ies_len;
+ u8 * p2p_ie;
+#ifdef CONFIG_8723AU_P2P
+ u8 wfd_ie[ 128 ] = { 0x00 };
+ u32 wfd_ielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+
+ ies = pframe + _PUBLIC_ACTION_IE_OFFSET_;
+ ies_len = len - _PUBLIC_ACTION_IE_OFFSET_;
+
+ /* Be able to know which one is the P2P GO and which one is P2P client. */
+
+ if (rtw_get_wps_ie23a(ies, ies_len, NULL, &wps_ielen))
+ {
+
+ }
+ else
+ {
+ DBG_8723A("[%s] WPS IE not Found!!\n", __func__);
+ result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ }
+
+ p2p_ie = rtw_get_p2p_ie23a(ies, ies_len, NULL, &p2p_ielen);
+ if (!p2p_ie)
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
+ }
+ else
+ {
+
+ u8 attr_content = 0x00;
+ u32 attr_contentlen = 0;
+ u8 operatingch_info[5] = { 0x00 };
+ u8 groupid[ 38 ];
+ u16 cap_attr;
+ u8 peer_ch_list[50] = { 0x00 };
+ u8 peer_ch_num = 0;
+ u8 ch_list_inclusioned[50] = { 0x00 };
+ u8 ch_num_inclusioned = 0;
+
+ while (p2p_ie) /* Found the P2P IE. */
+ {
+
+ /* Check P2P Capability ATTR */
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_CAPABILITY, (u8*)&cap_attr, (uint*)&attr_contentlen))
+ cap_attr = le16_to_cpu(cap_attr);
+
+ rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, &attr_content, &attr_contentlen);
+ if (attr_contentlen == 1)
+ {
+ DBG_8723A("[%s] Status = %d\n", __func__, attr_content);
+ if (attr_content == P2P_STATUS_SUCCESS)
+ {
+ /* Do nothing. */
+ }
+ else
+ {
+ if (P2P_STATUS_FAIL_INFO_UNAVAILABLE == attr_content) {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INFOR_NOREADY);
+ } else {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ }
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ result = attr_content;
+ break;
+ }
+ }
+
+ /* Try to get the peer's interface address */
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_INTENTED_IF_ADDR, pwdinfo->p2p_peer_interface_addr, &attr_contentlen))
+ {
+ if (attr_contentlen != ETH_ALEN)
+ {
+ memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
+ }
+ }
+
+ /* Try to get the peer's intent and tie breaker value. */
+ attr_content = 0x00;
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_GO_INTENT, &attr_content, &attr_contentlen))
+ {
+ DBG_8723A("[%s] GO Intent = %d, tie = %d\n", __func__, attr_content >> 1, attr_content & 0x01);
+ pwdinfo->peer_intent = attr_content; /* include both intent and tie breaker values. */
+
+ if (pwdinfo->intent == (pwdinfo->peer_intent >> 1))
+ {
+ /* Try to match the tie breaker value */
+ if (pwdinfo->intent == P2P_MAX_INTENT)
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ result = P2P_STATUS_FAIL_BOTH_GOINTENT_15;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ }
+ else
+ {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ if (attr_content & 0x01)
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+ else
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ }
+ }
+ else if (pwdinfo->intent > (pwdinfo->peer_intent >> 1))
+ {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ else
+ {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ {
+ /* Store the group id information. */
+ memcpy(pwdinfo->groupid_info.go_device_addr, pwdinfo->device_addr, ETH_ALEN);
+ memcpy(pwdinfo->groupid_info.ssid, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
+
+ }
+ }
+
+ /* Try to get the operation channel information */
+
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen))
+ {
+ DBG_8723A("[%s] Peer's operating channel = %d\n", __func__, operatingch_info[4]);
+ pwdinfo->peer_operating_ch = operatingch_info[4];
+ }
+
+ /* Try to get the channel list information */
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_CH_LIST, pwdinfo->channel_list_attr, &pwdinfo->channel_list_attr_len))
+ {
+ DBG_8723A("[%s] channel list attribute found, len = %d\n", __func__, pwdinfo->channel_list_attr_len);
+
+ peer_ch_num = rtw_p2p_get_peer_ch_list(pwdinfo, pwdinfo->channel_list_attr, pwdinfo->channel_list_attr_len, peer_ch_list);
+ ch_num_inclusioned = rtw_p2p_ch_inclusion(&padapter->mlmeextpriv, peer_ch_list, peer_ch_num, ch_list_inclusioned);
+
+ if (ch_num_inclusioned == 0)
+ {
+ DBG_8723A("[%s] No common channel in channel list!\n", __func__);
+ result = P2P_STATUS_FAIL_NO_COMMON_CH;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ break;
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ {
+ if (!rtw_p2p_is_channel_list_ok(pwdinfo->operating_channel,
+ ch_list_inclusioned, ch_num_inclusioned))
+ {
+ {
+ u8 operatingch_info[5] = { 0x00 }, peer_operating_ch = 0;
+ attr_contentlen = 0;
+
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen))
+ {
+ peer_operating_ch = operatingch_info[4];
+ }
+
+ if (rtw_p2p_is_channel_list_ok(peer_operating_ch,
+ ch_list_inclusioned, ch_num_inclusioned))
+ {
+ /**
+ * Change our operating channel as peer's for compatibility.
+ */
+ pwdinfo->operating_channel = peer_operating_ch;
+ DBG_8723A("[%s] Change op ch to %02x as peer's\n", __func__, pwdinfo->operating_channel);
+ }
+ else
+ {
+ /* Take first channel of ch_list_inclusioned as operating channel */
+ pwdinfo->operating_channel = ch_list_inclusioned[0];
+ DBG_8723A("[%s] Change op ch to %02x\n", __func__, pwdinfo->operating_channel);
+ }
+ }
+
+ }
+ }
+
+ }
+ else
+ {
+ DBG_8723A("[%s] channel list attribute not found!\n", __func__);
+ }
+
+ /* Try to get the group id information if peer is GO */
+ attr_contentlen = 0;
+ memset(groupid, 0x00, 38);
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen))
+ {
+ memcpy(pwdinfo->groupid_info.go_device_addr, &groupid[0], ETH_ALEN);
+ memcpy(pwdinfo->groupid_info.ssid, &groupid[6], attr_contentlen - ETH_ALEN);
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie23a(p2p_ie+p2p_ielen, ies_len -(p2p_ie -ies + p2p_ielen), NULL, &p2p_ielen);
+ }
+
+ }
+
+#ifdef CONFIG_8723AU_P2P
+ /* Added by Albert 20111122 */
+ /* Try to get the TCP port information when receiving the negotiation response. */
+ if (rtw_get_wfd_ie(pframe + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, wfd_ie, &wfd_ielen))
+ {
+ u8 attr_content[ 10 ] = { 0x00 };
+ u32 attr_contentlen = 0;
+
+ DBG_8723A("[%s] WFD IE Found!!\n", __func__);
+ rtw_get_wfd_attr_content(wfd_ie, wfd_ielen, WFD_ATTR_DEVICE_INFO, attr_content, &attr_contentlen);
+ if (attr_contentlen)
+ {
+ pwdinfo->wfd_info->peer_rtsp_ctrlport = get_unaligned_be16(attr_content + 2);
+ DBG_8723A("[%s] Peer PORT NUM = %d\n", __func__, pwdinfo->wfd_info->peer_rtsp_ctrlport);
+ }
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ return result;
+}
+
+u8 process_p2p_group_negotation_confirm23a(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 * ies;
+ u32 ies_len;
+ u8 * p2p_ie;
+ u32 p2p_ielen = 0;
+ u8 result = P2P_STATUS_SUCCESS;
+ ies = pframe + _PUBLIC_ACTION_IE_OFFSET_;
+ ies_len = len - _PUBLIC_ACTION_IE_OFFSET_;
+
+ p2p_ie = rtw_get_p2p_ie23a(ies, ies_len, NULL, &p2p_ielen);
+ while (p2p_ie) /* Found the P2P IE. */
+ {
+ u8 attr_content = 0x00, operatingch_info[5] = { 0x00 };
+ u8 groupid[ 38 ] = { 0x00 };
+ u32 attr_contentlen = 0;
+
+ pwdinfo->negotiation_dialog_token = 1;
+ rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, &attr_content, &attr_contentlen);
+ if (attr_contentlen == 1)
+ {
+ DBG_8723A("[%s] Status = %d\n", __func__, attr_content);
+ result = attr_content;
+
+ if (attr_content == P2P_STATUS_SUCCESS)
+ {
+ del_timer_sync(&pwdinfo->restore_p2p_state_timer);
+
+ /* Commented by Albert 20100911 */
+ /* Todo: Need to handle the case which both Intents are the same. */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ if ((pwdinfo->intent) > (pwdinfo->peer_intent >> 1))
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ else if ((pwdinfo->intent) < (pwdinfo->peer_intent >> 1))
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+ else
+ {
+ /* Have to compare the Tie Breaker */
+ if (pwdinfo->peer_intent & 0x01)
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+ else
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ }
+ }
+ else
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ break;
+ }
+ }
+
+ /* Try to get the group id information */
+ attr_contentlen = 0;
+ memset(groupid, 0x00, 38);
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen))
+ {
+ DBG_8723A("[%s] Ssid = %s, ssidlen = %zu\n", __func__, &groupid[ETH_ALEN], strlen(&groupid[ETH_ALEN]));
+ memcpy(pwdinfo->groupid_info.go_device_addr, &groupid[0], ETH_ALEN);
+ memcpy(pwdinfo->groupid_info.ssid, &groupid[6], attr_contentlen - ETH_ALEN);
+ }
+
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen))
+ {
+ DBG_8723A("[%s] Peer's operating channel = %d\n", __func__, operatingch_info[4]);
+ pwdinfo->peer_operating_ch = operatingch_info[4];
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie23a(p2p_ie+p2p_ielen, ies_len -(p2p_ie -ies + p2p_ielen), NULL, &p2p_ielen);
+
+ }
+
+ return result;
+}
+
+u8 process_p2p_presence_req23a(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 *frame_body;
+ u8 dialogToken = 0;
+ u8 status = P2P_STATUS_SUCCESS;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) pframe;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
+
+ dialogToken = frame_body[6];
+
+ /* todo: check NoA attribute */
+
+ issue_p2p_presence_resp(pwdinfo, hdr->addr2, status, dialogToken);
+
+ return true;
+}
+
+static void find_phase_handler(struct rtw_adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct cfg80211_ssid ssid;
+ u8 _status = 0;
+
+
+
+ memset((unsigned char*)&ssid, 0, sizeof(struct cfg80211_ssid));
+ memcpy(ssid.ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN);
+ ssid.ssid_len = P2P_WILDCARD_SSID_LEN;
+
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH);
+
+ spin_lock_bh(&pmlmepriv->lock);
+ _status = rtw_sitesurvey_cmd23a(padapter, &ssid, 1, NULL, 0);
+ spin_unlock_bh(&pmlmepriv->lock);
+
+
+}
+
+void p2p_concurrent_handler(struct rtw_adapter* padapter);
+
+static void restore_p2p_state_handler(struct rtw_adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL))
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
+ /* In the P2P client mode, the driver should not switch back to its listen channel */
+ /* because this P2P client should stay at the operating channel of P2P GO. */
+ set_channel_bwmode23a(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ }
+}
+
+static void pre_tx_invitereq_handler(struct rtw_adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 val8 = 1;
+
+ set_channel_bwmode23a(padapter, pwdinfo->invitereq_info.peer_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ issue23a_probereq_p2p(padapter, NULL);
+ mod_timer(&pwdinfo->pre_tx_scan_timer,
+ jiffies + msecs_to_jiffies(P2P_TX_PRESCAN_TIMEOUT));
+
+
+}
+
+static void pre_tx_provdisc_handler(struct rtw_adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 val8 = 1;
+
+
+ set_channel_bwmode23a(padapter, pwdinfo->tx_prov_disc_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ issue23a_probereq_p2p(padapter, NULL);
+ mod_timer(&pwdinfo->pre_tx_scan_timer,
+ jiffies + msecs_to_jiffies(P2P_TX_PRESCAN_TIMEOUT));
+
+
+}
+
+static void pre_tx_negoreq_handler(struct rtw_adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 val8 = 1;
+
+
+ set_channel_bwmode23a(padapter, pwdinfo->nego_req_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ issue23a_probereq_p2p(padapter, NULL);
+ mod_timer(&pwdinfo->pre_tx_scan_timer,
+ jiffies + msecs_to_jiffies(P2P_TX_PRESCAN_TIMEOUT));
+
+
+}
+
+static void ro_ch_handler(struct rtw_adapter *padapter)
+{
+ struct cfg80211_wifidirect_info *pcfg80211_wdinfo = &padapter->cfg80211_wdinfo;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (pcfg80211_wdinfo->restore_channel != pmlmeext->cur_channel) {
+ if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED))
+ pmlmeext->cur_channel = pcfg80211_wdinfo->restore_channel;
+
+ set_channel_bwmode23a(padapter, pmlmeext->cur_channel,
+ HAL_PRIME_CHNL_OFFSET_DONT_CARE,
+ HT_CHANNEL_WIDTH_20);
+ }
+
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+
+ pcfg80211_wdinfo->is_ro_ch = false;
+
+ DBG_8723A("cfg80211_remain_on_channel_expired\n");
+
+ rtw_cfg80211_remain_on_channel_expired(padapter,
+ pcfg80211_wdinfo->remain_on_ch_cookie,
+ &pcfg80211_wdinfo->remain_on_ch_channel,
+ pcfg80211_wdinfo->remain_on_ch_type, GFP_KERNEL);
+}
+
+static void ro_ch_timer_process (unsigned long data)
+{
+ struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+
+ p2p_protocol_wk_cmd23a(adapter, P2P_RO_CH_WK);
+}
+
+#ifdef CONFIG_8723AU_P2P
+void rtw_append_wfd_ie(struct rtw_adapter *padapter, u8 *buf, u32* len)
+{
+ unsigned char *frame_body;
+ u8 category, action, OUI_Subtype, dialogToken = 0;
+ u32 wfdielen = 0;
+
+ frame_body = (unsigned char *)(buf + sizeof(struct ieee80211_hdr_3addr));
+ category = frame_body[0];
+
+ if (category == WLAN_CATEGORY_PUBLIC) {
+ action = frame_body[1];
+ if (action == ACT_PUBLIC_VENDOR &&
+ !memcmp(frame_body+2, P2P_OUI23A, 4)) {
+ OUI_Subtype = frame_body[6];
+ dialogToken = frame_body[7];
+ switch (OUI_Subtype)/* OUI Subtype */ {
+ case P2P_GO_NEGO_REQ:
+ wfdielen = build_nego_req_wfd_ie(&padapter->wdinfo, buf + (*len));
+ (*len) += wfdielen;
+ break;
+ case P2P_GO_NEGO_RESP:
+ wfdielen = build_nego_resp_wfd_ie(&padapter->wdinfo, buf + (*len));
+ (*len) += wfdielen;
+ break;
+ case P2P_GO_NEGO_CONF:
+ wfdielen = build_nego_confirm_wfd_ie(&padapter->wdinfo, buf + (*len));
+ (*len) += wfdielen;
+ break;
+ case P2P_INVIT_REQ:
+ wfdielen = build_invitation_req_wfd_ie(&padapter->wdinfo, buf + (*len));
+ (*len) += wfdielen;
+ break;
+ case P2P_INVIT_RESP:
+ wfdielen = build_invitation_resp_wfd_ie(&padapter->wdinfo, buf + (*len));
+ (*len) += wfdielen;
+ break;
+ case P2P_DEVDISC_REQ:
+ break;
+ case P2P_DEVDISC_RESP:
+ break;
+ case P2P_PROVISION_DISC_REQ:
+ wfdielen = build_provdisc_req_wfd_ie(&padapter->wdinfo, buf + (*len));
+ (*len) += wfdielen;
+ break;
+ case P2P_PROVISION_DISC_RESP:
+ wfdielen = build_provdisc_resp_wfd_ie(&padapter->wdinfo, buf + (*len));
+ (*len) += wfdielen;
+ break;
+ default:
+ break;
+ }
+ }
+ } else if (category == WLAN_CATEGORY_VENDOR_SPECIFIC) {
+ OUI_Subtype = frame_body[5];
+ dialogToken = frame_body[6];
+ } else {
+ DBG_8723A("%s, action frame category =%d\n", __func__, category);
+ }
+}
+#endif
+
+int rtw_p2p_check_frames(struct rtw_adapter *padapter, const u8 *buf, u32 len, u8 tx)
+{
+ int is_p2p_frame = (-1);
+ unsigned char *frame_body;
+ u8 category, action, OUI_Subtype, dialogToken = 0;
+ u8 *p2p_ie = NULL;
+ uint p2p_ielen = 0;
+ struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
+
+ frame_body = (unsigned char *)(buf + sizeof(struct ieee80211_hdr_3addr));
+ category = frame_body[0];
+ /* just for check */
+ if (category == WLAN_CATEGORY_PUBLIC)
+ {
+ action = frame_body[1];
+ if (action == ACT_PUBLIC_VENDOR &&
+ !memcmp(frame_body+2, P2P_OUI23A, 4)) {
+ OUI_Subtype = frame_body[6];
+ dialogToken = frame_body[7];
+ is_p2p_frame = OUI_Subtype;
+ p2p_ie = rtw_get_p2p_ie23a(
+ (u8 *)buf+sizeof(struct ieee80211_hdr_3addr)+_PUBLIC_ACTION_IE_OFFSET_,
+ len-sizeof(struct ieee80211_hdr_3addr)-_PUBLIC_ACTION_IE_OFFSET_,
+ NULL, &p2p_ielen);
+
+ switch (OUI_Subtype) {/* OUI Subtype */
+ u8 *cont;
+ uint cont_len;
+ case P2P_GO_NEGO_REQ:
+ DBG_8723A("RTW_%s:P2P_GO_NEGO_REQ, dialogToken =%d\n", (tx == true)?"Tx":"Rx", dialogToken);
+ break;
+ case P2P_GO_NEGO_RESP:
+ cont = rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, NULL, &cont_len);
+ DBG_8723A("RTW_%s:P2P_GO_NEGO_RESP, dialogToken =%d, status:%d\n", (tx == true)?"Tx":"Rx", dialogToken, cont?*cont:-1);
+
+ if (!tx)
+ pwdev_priv->provdisc_req_issued = false;
+ break;
+ case P2P_GO_NEGO_CONF:
+ cont = rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, NULL, &cont_len);
+ DBG_8723A("RTW_%s:P2P_GO_NEGO_CONF, dialogToken =%d, status:%d\n",
+ (tx == true)?"Tx":"Rx", dialogToken, cont?*cont:-1);
+ break;
+ case P2P_INVIT_REQ:
+ {
+ struct rtw_wdev_invit_info* invit_info = &pwdev_priv->invit_info;
+ int flags = -1;
+ int op_ch = 0;
+
+ if ((cont = rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_INVITATION_FLAGS, NULL, &cont_len)))
+ flags = *cont;
+ if ((cont = rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, NULL, &cont_len)))
+ op_ch = *(cont+4);
+
+ if (invit_info->token != dialogToken)
+ rtw_wdev_invit_info_init(invit_info);
+
+ invit_info->token = dialogToken;
+ invit_info->flags = (flags ==-1) ? 0x0 : flags;
+ invit_info->req_op_ch = op_ch;
+
+ DBG_8723A("RTW_%s:P2P_INVIT_REQ, dialogToken =%d, flags:0x%02x, op_ch:%d\n",
+ (tx) ? "Tx" : "Rx", dialogToken, flags, op_ch);
+ break;
+ }
+ case P2P_INVIT_RESP:
+ {
+ struct rtw_wdev_invit_info* invit_info = &pwdev_priv->invit_info;
+ int status = -1;
+ int op_ch = 0;
+
+ if ((cont = rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, NULL, &cont_len)))
+ status = *cont;
+ if ((cont = rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, NULL, &cont_len)))
+ op_ch = *(cont+4);
+
+ if (invit_info->token != dialogToken) {
+ rtw_wdev_invit_info_init(invit_info);
+ } else {
+ invit_info->token = 0;
+ invit_info->status = (status ==-1) ? 0xff : status;
+ invit_info->rsp_op_ch = op_ch;
+ }
+
+ DBG_8723A("RTW_%s:P2P_INVIT_RESP, dialogToken =%d, status:%d, op_ch:%d\n",
+ (tx == true)?"Tx":"Rx", dialogToken, status, op_ch);
+ break;
+ }
+ case P2P_DEVDISC_REQ:
+ DBG_8723A("RTW_%s:P2P_DEVDISC_REQ, dialogToken =%d\n", (tx == true)?"Tx":"Rx", dialogToken);
+ break;
+ case P2P_DEVDISC_RESP:
+ cont = rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, NULL, &cont_len);
+ DBG_8723A("RTW_%s:P2P_DEVDISC_RESP, dialogToken =%d, status:%d\n", (tx == true)?"Tx":"Rx", dialogToken, cont?*cont:-1);
+ break;
+ case P2P_PROVISION_DISC_REQ:
+ {
+ size_t frame_body_len = len - sizeof(struct ieee80211_hdr_3addr);
+ u8 *p2p_ie;
+ uint p2p_ielen = 0;
+ uint contentlen = 0;
+
+ DBG_8723A("RTW_%s:P2P_PROVISION_DISC_REQ, dialogToken =%d\n", (tx == true)?"Tx":"Rx", dialogToken);
+
+ pwdev_priv->provdisc_req_issued = false;
+
+ p2p_ie = rtw_get_p2p_ie23a(frame_body + _PUBLIC_ACTION_IE_OFFSET_,
+ frame_body_len - _PUBLIC_ACTION_IE_OFFSET_,
+ NULL, &p2p_ielen);
+ if (p2p_ie) {
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, NULL, &contentlen))
+ pwdev_priv->provdisc_req_issued = false;/* case: p2p_client join p2p GO */
+ else
+ pwdev_priv->provdisc_req_issued = true;/* case: p2p_devices connection before Nego req. */
+ }
+ }
+ break;
+ case P2P_PROVISION_DISC_RESP:
+ DBG_8723A("RTW_%s:P2P_PROVISION_DISC_RESP, dialogToken =%d\n", (tx == true)?"Tx":"Rx", dialogToken);
+ break;
+ default:
+ DBG_8723A("RTW_%s:OUI_Subtype =%d, dialogToken =%d\n", (tx == true)?"Tx":"Rx", OUI_Subtype, dialogToken);
+ break;
+ }
+
+ }
+
+ }
+ else if (category == WLAN_CATEGORY_VENDOR_SPECIFIC)
+ {
+ OUI_Subtype = frame_body[5];
+ dialogToken = frame_body[6];
+
+ is_p2p_frame = OUI_Subtype;
+
+ switch (OUI_Subtype) {
+ case P2P_NOTICE_OF_ABSENCE:
+ DBG_8723A("RTW_%s:P2P_NOTICE_OF_ABSENCE, dialogToken =%d\n", (tx == true)?"TX":"RX", dialogToken);
+ break;
+ case P2P_PRESENCE_REQUEST:
+ DBG_8723A("RTW_%s:P2P_PRESENCE_REQUEST, dialogToken =%d\n", (tx == true)?"TX":"RX", dialogToken);
+ break;
+ case P2P_PRESENCE_RESPONSE:
+ DBG_8723A("RTW_%s:P2P_PRESENCE_RESPONSE, dialogToken =%d\n", (tx == true)?"TX":"RX", dialogToken);
+ break;
+ case P2P_GO_DISC_REQUEST:
+ DBG_8723A("RTW_%s:P2P_GO_DISC_REQUEST, dialogToken =%d\n", (tx == true)?"TX":"RX", dialogToken);
+ break;
+ default:
+ DBG_8723A("RTW_%s:OUI_Subtype =%d, dialogToken =%d\n", (tx == true)?"TX":"RX", OUI_Subtype, dialogToken);
+ break;
+ }
+
+ } else {
+ DBG_8723A("RTW_%s:action frame category =%d\n", (tx == true)?"TX":"RX", category);
+ }
+ return is_p2p_frame;
+}
+
+void rtw_init_cfg80211_wifidirect_info(struct rtw_adapter *padapter)
+{
+ struct cfg80211_wifidirect_info *pcfg80211_wdinfo = &padapter->cfg80211_wdinfo;
+
+ memset(pcfg80211_wdinfo, 0x00, sizeof(struct cfg80211_wifidirect_info));
+
+ setup_timer(&pcfg80211_wdinfo->remain_on_ch_timer,
+ ro_ch_timer_process, (unsigned long)padapter);
+}
+
+void p2p_protocol_wk_hdl23a(struct rtw_adapter *padapter, int intCmdType)
+{
+ switch (intCmdType) {
+ case P2P_FIND_PHASE_WK:
+ find_phase_handler(padapter);
+ break;
+ case P2P_RESTORE_STATE_WK:
+ restore_p2p_state_handler(padapter);
+ break;
+ case P2P_PRE_TX_PROVDISC_PROCESS_WK:
+ pre_tx_provdisc_handler(padapter);
+ break;
+ case P2P_PRE_TX_INVITEREQ_PROCESS_WK:
+ pre_tx_invitereq_handler(padapter);
+ break;
+ case P2P_PRE_TX_NEGOREQ_PROCESS_WK:
+ pre_tx_negoreq_handler(padapter);
+ break;
+ case P2P_RO_CH_WK:
+ ro_ch_handler(padapter);
+ break;
+ }
+}
+
+#ifdef CONFIG_8723AU_P2P
+void process_p2p_ps_ie23a(struct rtw_adapter *padapter, u8 *IEs, u32 IELength)
+{
+ u8 * ies;
+ u32 ies_len;
+ u8 * p2p_ie;
+ u32 p2p_ielen = 0;
+ u8 noa_attr[MAX_P2P_IE_LEN] = { 0x00 };/* NoA length should be n*(13) + 2 */
+ u32 attr_contentlen = 0;
+
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 find_p2p = false, find_p2p_ps = false;
+ u8 noa_offset, noa_num, noa_index;
+
+
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ {
+ return;
+ }
+ if (IELength <= _BEACON_IE_OFFSET_)
+ return;
+
+ ies = IEs + _BEACON_IE_OFFSET_;
+ ies_len = IELength - _BEACON_IE_OFFSET_;
+
+ p2p_ie = rtw_get_p2p_ie23a(ies, ies_len, NULL, &p2p_ielen);
+
+ while(p2p_ie)
+ {
+ find_p2p = true;
+ /* Get Notice of Absence IE. */
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_NOA, noa_attr, &attr_contentlen))
+ {
+ find_p2p_ps = true;
+ noa_index = noa_attr[0];
+
+ if ((pwdinfo->p2p_ps_mode == P2P_PS_NONE) ||
+ (noa_index != pwdinfo->noa_index))/* if index change, driver should reconfigure related setting. */
+ {
+ pwdinfo->noa_index = noa_index;
+ pwdinfo->opp_ps = noa_attr[1] >> 7;
+ pwdinfo->ctwindow = noa_attr[1] & 0x7F;
+
+ noa_offset = 2;
+ noa_num = 0;
+ /* NoA length should be n*(13) + 2 */
+ if (attr_contentlen > 2)
+ {
+ while(noa_offset < attr_contentlen)
+ {
+ /* memcpy(&wifidirect_info->noa_count[noa_num], &noa_attr[noa_offset], 1); */
+ pwdinfo->noa_count[noa_num] = noa_attr[noa_offset];
+ noa_offset += 1;
+
+ memcpy(&pwdinfo->noa_duration[noa_num], &noa_attr[noa_offset], 4);
+ noa_offset += 4;
+
+ memcpy(&pwdinfo->noa_interval[noa_num], &noa_attr[noa_offset], 4);
+ noa_offset += 4;
+
+ memcpy(&pwdinfo->noa_start_time[noa_num], &noa_attr[noa_offset], 4);
+ noa_offset += 4;
+
+ noa_num++;
+ }
+ }
+ pwdinfo->noa_num = noa_num;
+
+ if (pwdinfo->opp_ps == 1)
+ {
+ pwdinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
+ /* driver should wait LPS for entering CTWindow */
+ if (padapter->pwrctrlpriv.bFwCurrentInPSMode == true)
+ {
+ p2p_ps_wk_cmd23a(padapter, P2P_PS_ENABLE, 1);
+ }
+ }
+ else if (pwdinfo->noa_num > 0)
+ {
+ pwdinfo->p2p_ps_mode = P2P_PS_NOA;
+ p2p_ps_wk_cmd23a(padapter, P2P_PS_ENABLE, 1);
+ }
+ else if (pwdinfo->p2p_ps_mode > P2P_PS_NONE)
+ {
+ p2p_ps_wk_cmd23a(padapter, P2P_PS_DISABLE, 1);
+ }
+ }
+
+ break; /* find target, just break. */
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie23a(p2p_ie+p2p_ielen, ies_len -(p2p_ie -ies + p2p_ielen), NULL, &p2p_ielen);
+
+ }
+
+ if (find_p2p == true)
+ {
+ if ((pwdinfo->p2p_ps_mode > P2P_PS_NONE) && (find_p2p_ps == false))
+ {
+ p2p_ps_wk_cmd23a(padapter, P2P_PS_DISABLE, 1);
+ }
+ }
+
+
+}
+
+void p2p_ps_wk_hdl23a(struct rtw_adapter *padapter, u8 p2p_ps_state)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+
+
+ /* Pre action for p2p state */
+ switch (p2p_ps_state)
+ {
+ case P2P_PS_DISABLE:
+ pwdinfo->p2p_ps_state = p2p_ps_state;
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+
+ pwdinfo->noa_index = 0;
+ pwdinfo->ctwindow = 0;
+ pwdinfo->opp_ps = 0;
+ pwdinfo->noa_num = 0;
+ pwdinfo->p2p_ps_mode = P2P_PS_NONE;
+ if (padapter->pwrctrlpriv.bFwCurrentInPSMode == true)
+ {
+ if (pwrpriv->smart_ps == 0)
+ {
+ pwrpriv->smart_ps = 2;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)&padapter->pwrctrlpriv.pwr_mode);
+ }
+ }
+ break;
+ case P2P_PS_ENABLE:
+ if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
+ pwdinfo->p2p_ps_state = p2p_ps_state;
+
+ if (pwdinfo->ctwindow > 0)
+ {
+ if (pwrpriv->smart_ps != 0)
+ {
+ pwrpriv->smart_ps = 0;
+ DBG_8723A("%s(): Enter CTW, change SmartPS\n", __func__);
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)&padapter->pwrctrlpriv.pwr_mode);
+ }
+ }
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ }
+ break;
+ case P2P_PS_SCAN:
+ case P2P_PS_SCAN_DONE:
+ case P2P_PS_ALLSTASLEEP:
+ if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
+ pwdinfo->p2p_ps_state = p2p_ps_state;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ }
+ break;
+ default:
+ break;
+ }
+
+
+}
+
+u8 p2p_ps_wk_cmd23a(struct rtw_adapter*padapter, u8 p2p_ps_state, u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return res;
+
+ if (enqueue) {
+ ph2c = (struct cmd_obj *)kzalloc(sizeof(struct cmd_obj),
+ GFP_ATOMIC);
+ if (!ph2c) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)
+ kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = P2P_PS_WK_CID;
+ pdrvextra_cmd_parm->type_size = p2p_ps_state;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+ }
+ else
+ {
+ p2p_ps_wk_hdl23a(padapter, p2p_ps_state);
+ }
+
+exit:
+
+
+
+ return res;
+}
+#endif /* CONFIG_8723AU_P2P */
+
+static void reset_ch_sitesurvey_timer_process(unsigned long data)
+{
+ struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ DBG_8723A("[%s] In\n", __func__);
+ /* Reset the operation channel information */
+ pwdinfo->rx_invitereq_info.operation_ch[0] = 0;
+ pwdinfo->rx_invitereq_info.scan_op_ch_only = 0;
+}
+
+static void reset_ch_sitesurvey_timer_process2(unsigned long data)
+{
+ struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ DBG_8723A("[%s] In\n", __func__);
+ /* Reset the operation channel information */
+ pwdinfo->p2p_info.operation_ch[0] = 0;
+ pwdinfo->p2p_info.scan_op_ch_only = 0;
+}
+
+static void restore_p2p_state_timer_process (unsigned long data)
+{
+ struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ p2p_protocol_wk_cmd23a(adapter, P2P_RESTORE_STATE_WK);
+}
+
+static void pre_tx_scan_timer_process (unsigned long data)
+{
+ struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ))
+ {
+ if (true == pwdinfo->tx_prov_disc_info.benable) /* the provision discovery request frame is trigger to send or not */
+ {
+ p2p_protocol_wk_cmd23a(adapter, P2P_PRE_TX_PROVDISC_PROCESS_WK);
+ /* issue23a_probereq_p2p(adapter, NULL); */
+ /* _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT); */
+ }
+ }
+ else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING))
+ {
+ if (true == pwdinfo->nego_req_info.benable)
+ {
+ p2p_protocol_wk_cmd23a(adapter, P2P_PRE_TX_NEGOREQ_PROCESS_WK);
+ }
+ }
+ else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_INVITE_REQ))
+ {
+ if (true == pwdinfo->invitereq_info.benable)
+ {
+ p2p_protocol_wk_cmd23a(adapter, P2P_PRE_TX_INVITEREQ_PROCESS_WK);
+ }
+ }
+ else
+ {
+ DBG_8723A("[%s] p2p_state is %d, ignore!!\n", __func__, rtw_p2p_state(pwdinfo));
+ }
+
+ spin_unlock_bh(&pmlmepriv->lock);
+}
+
+static void find_phase_timer_process (unsigned long data)
+{
+ struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ adapter->wdinfo.find_phase_state_exchange_cnt++;
+
+ p2p_protocol_wk_cmd23a(adapter, P2P_FIND_PHASE_WK);
+}
+
+void reset_global_wifidirect_info23a(struct rtw_adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo;
+
+ pwdinfo = &padapter->wdinfo;
+ pwdinfo->persistent_supported = 0;
+ pwdinfo->session_available = true;
+ pwdinfo->wfd_tdls_enable = 0;
+ pwdinfo->wfd_tdls_weaksec = 0;
+}
+
+#ifdef CONFIG_8723AU_P2P
+int rtw_init_wifi_display_info(struct rtw_adapter* padapter)
+{
+ int res = _SUCCESS;
+ struct wifi_display_info *pwfd_info = &padapter->wfd_info;
+
+ /* Used in P2P and TDLS */
+ pwfd_info->rtsp_ctrlport = 554;
+ pwfd_info->peer_rtsp_ctrlport = 0; /* Reset to 0 */
+ pwfd_info->wfd_enable = false;
+ pwfd_info->wfd_device_type = WFD_DEVINFO_PSINK;
+ pwfd_info->scan_result_type = SCAN_RESULT_P2P_ONLY;
+
+ /* Used in P2P */
+ pwfd_info->peer_session_avail = true;
+ pwfd_info->wfd_pc = false;
+
+ /* Used in TDLS */
+ memset(pwfd_info->ip_address, 0x00, 4);
+ memset(pwfd_info->peer_ip_address, 0x00, 4);
+ return res;
+}
+#endif /* CONFIG_8723AU_P2P */
+
+void rtw_init_wifidirect_timers23a(struct rtw_adapter* padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ setup_timer(&pwdinfo->find_phase_timer, find_phase_timer_process,
+ (unsigned long)padapter);
+ setup_timer(&pwdinfo->restore_p2p_state_timer,
+ restore_p2p_state_timer_process, (unsigned long)padapter);
+ setup_timer(&pwdinfo->pre_tx_scan_timer, pre_tx_scan_timer_process,
+ (unsigned long)padapter);
+ setup_timer(&pwdinfo->reset_ch_sitesurvey,
+ reset_ch_sitesurvey_timer_process, (unsigned long)padapter);
+ setup_timer(&pwdinfo->reset_ch_sitesurvey2,
+ reset_ch_sitesurvey_timer_process2,
+ (unsigned long)padapter);
+}
+
+void rtw_init_wifidirect_addrs23a(struct rtw_adapter* padapter, u8 *dev_addr, u8 *iface_addr)
+{
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ /*init device&interface address */
+ if (dev_addr) {
+ memcpy(pwdinfo->device_addr, dev_addr, ETH_ALEN);
+ }
+ if (iface_addr) {
+ memcpy(pwdinfo->interface_addr, iface_addr, ETH_ALEN);
+ }
+#endif
+}
+
+void init_wifidirect_info23a(struct rtw_adapter *padapter, enum P2P_ROLE role)
+{
+ struct wifidirect_info *pwdinfo;
+#ifdef CONFIG_8723AU_P2P
+ struct wifi_display_info *pwfd_info = &padapter->wfd_info;
+#endif
+
+ pwdinfo = &padapter->wdinfo;
+
+ pwdinfo->padapter = padapter;
+
+ /* 1, 6, 11 are the social channel defined in the WiFi Direct specification. */
+ pwdinfo->social_chan[0] = 1;
+ pwdinfo->social_chan[1] = 6;
+ pwdinfo->social_chan[2] = 11;
+ pwdinfo->social_chan[3] = 0; /* channel 0 for scanning ending in site survey function. */
+
+ /* Use the channel 11 as the listen channel */
+ pwdinfo->listen_channel = 11;
+
+ if (role == P2P_ROLE_DEVICE)
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_LISTEN);
+ pwdinfo->intent = 1;
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_LISTEN);
+ }
+ else if (role == P2P_ROLE_CLIENT)
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ pwdinfo->intent = 1;
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ }
+ else if (role == P2P_ROLE_GO)
+ {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ pwdinfo->intent = 15;
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ }
+
+/* Use the OFDM rate in the P2P probe response frame. (6(B), 9(B), 12, 18, 24, 36, 48, 54) */
+ pwdinfo->support_rate[0] = 0x8c; /* 6(B) */
+ pwdinfo->support_rate[1] = 0x92; /* 9(B) */
+ pwdinfo->support_rate[2] = 0x18; /* 12 */
+ pwdinfo->support_rate[3] = 0x24; /* 18 */
+ pwdinfo->support_rate[4] = 0x30; /* 24 */
+ pwdinfo->support_rate[5] = 0x48; /* 36 */
+ pwdinfo->support_rate[6] = 0x60; /* 48 */
+ pwdinfo->support_rate[7] = 0x6c; /* 54 */
+
+ memcpy((void*) pwdinfo->p2p_wildcard_ssid, "DIRECT-", 7);
+
+ memset(pwdinfo->device_name, 0x00, WPS_MAX_DEVICE_NAME_LEN);
+ pwdinfo->device_name_len = 0;
+
+ memset(&pwdinfo->invitereq_info, 0x00, sizeof(struct tx_invite_req_info));
+ pwdinfo->invitereq_info.token = 3; /* Token used for P2P invitation request frame. */
+
+ memset(&pwdinfo->inviteresp_info, 0x00, sizeof(struct tx_invite_resp_info));
+ pwdinfo->inviteresp_info.token = 0;
+
+ pwdinfo->profileindex = 0;
+ memset(&pwdinfo->profileinfo[ 0 ], 0x00, sizeof(struct profile_info) * P2P_MAX_PERSISTENT_GROUP_NUM);
+
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_NONE);
+
+ pwdinfo->listen_dwell = (u8) ((jiffies % 3) + 1);
+ /* DBG_8723A("[%s] listen_dwell time is %d00ms\n", __func__, pwdinfo->listen_dwell); */
+
+ memset(&pwdinfo->tx_prov_disc_info, 0x00, sizeof(struct tx_provdisc_req_info));
+ pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_NONE;
+
+ memset(&pwdinfo->nego_req_info, 0x00, sizeof(struct tx_nego_req_info));
+
+ pwdinfo->device_password_id_for_nego = WPS_DPID_PBC;
+ pwdinfo->negotiation_dialog_token = 1;
+
+ memset(pwdinfo->nego_ssid, 0x00, IEEE80211_MAX_SSID_LEN);
+ pwdinfo->nego_ssidlen = 0;
+
+ pwdinfo->ui_got_wps_info = P2P_NO_WPSINFO;
+#ifdef CONFIG_8723AU_P2P
+ pwdinfo->supported_wps_cm = WPS_CONFIG_METHOD_DISPLAY | WPS_CONFIG_METHOD_PBC;
+ pwdinfo->wfd_info = pwfd_info;
+#else
+ pwdinfo->supported_wps_cm = WPS_CONFIG_METHOD_DISPLAY | WPS_CONFIG_METHOD_PBC | WPS_CONFIG_METHOD_KEYPAD;
+#endif /* CONFIG_8723AU_P2P */
+ pwdinfo->channel_list_attr_len = 0;
+ memset(pwdinfo->channel_list_attr, 0x00, 100);
+
+ memset(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, 0x00, 4);
+ memset(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, '0', 3);
+ memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
+ pwdinfo->wfd_tdls_enable = 0;
+ memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
+ memset(pwdinfo->p2p_peer_device_addr, 0x00, ETH_ALEN);
+
+ pwdinfo->rx_invitereq_info.operation_ch[0] = 0;
+ pwdinfo->rx_invitereq_info.operation_ch[1] = 0; /* Used to indicate the scan end in site survey function */
+ pwdinfo->rx_invitereq_info.scan_op_ch_only = 0;
+ pwdinfo->p2p_info.operation_ch[0] = 0;
+ pwdinfo->p2p_info.operation_ch[1] = 0; /* Used to indicate the scan end in site survey function */
+ pwdinfo->p2p_info.scan_op_ch_only = 0;
+}
+
+int rtw_p2p_enable23a(struct rtw_adapter *padapter, enum P2P_ROLE role)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ int ret = _SUCCESS;
+
+ if (role == P2P_ROLE_DEVICE || role == P2P_ROLE_CLIENT ||
+ role == P2P_ROLE_GO) {
+ /* leave IPS/Autosuspend */
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* Added by Albert 2011/03/22 */
+ /* In the P2P mode, the driver should not support the b mode. */
+ /* So, the Tx packet shouldn't use the CCK rate */
+ update_tx_basic_rate23a(padapter, WIRELESS_11AGN);
+
+ /* Enable P2P function */
+ init_wifidirect_info23a(padapter, role);
+
+ rtw_hal_set_odm_var23a(padapter, HAL_ODM_P2P_STATE, NULL, true);
+ #ifdef CONFIG_8723AU_P2P
+ rtw_hal_set_odm_var23a(padapter, HAL_ODM_WIFI_DISPLAY_STATE, NULL, true);
+ #endif
+
+ }
+ else if (role == P2P_ROLE_DISABLE)
+ {
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* Disable P2P function */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ {
+ del_timer_sync(&pwdinfo->find_phase_timer);
+ del_timer_sync(&pwdinfo->restore_p2p_state_timer);
+ del_timer_sync(&pwdinfo->pre_tx_scan_timer);
+ del_timer_sync(&pwdinfo->reset_ch_sitesurvey);
+ del_timer_sync(&pwdinfo->reset_ch_sitesurvey2);
+ reset_ch_sitesurvey_timer_process((unsigned long)padapter);
+ reset_ch_sitesurvey_timer_process2((unsigned long)padapter);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DISABLE);
+ memset(&pwdinfo->rx_prov_disc_info, 0x00, sizeof(struct rx_provdisc_req_info));
+ }
+
+ rtw_hal_set_odm_var23a(padapter, HAL_ODM_P2P_STATE, NULL, false);
+ #ifdef CONFIG_8723AU_P2P
+ rtw_hal_set_odm_var23a(padapter, HAL_ODM_WIFI_DISPLAY_STATE, NULL, false);
+ #endif
+
+ /* Restore to initial setting. */
+ update_tx_basic_rate23a(padapter, padapter->registrypriv.wireless_mode);
+ }
+
+exit:
+ return ret;
+}
+
+#endif /* CONFIG_8723AU_P2P */
diff --git a/drivers/staging/rtl8723au/core/rtw_pwrctrl.c b/drivers/staging/rtl8723au/core/rtw_pwrctrl.c
new file mode 100644
index 000000000000..8ddd67f194ba
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_pwrctrl.c
@@ -0,0 +1,689 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_PWRCTRL_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <osdep_intf.h>
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+#include <rtl8723a_hal.h>
+#endif
+
+void ips_enter23a(struct rtw_adapter * padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
+ down(&pwrpriv->lock);
+
+ pwrpriv->bips_processing = true;
+
+ /* syn ips_mode with request */
+ pwrpriv->ips_mode = pwrpriv->ips_mode_req;
+
+ pwrpriv->ips_enter23a_cnts++;
+ DBG_8723A("==>ips_enter23a cnts:%d\n", pwrpriv->ips_enter23a_cnts);
+#ifdef CONFIG_8723AU_BT_COEXIST
+ BTDM_TurnOffBtCoexistBeforeEnterIPS(padapter);
+#endif
+ if (rf_off == pwrpriv->change_rfpwrstate)
+ {
+ pwrpriv->bpower_saving = true;
+ DBG_8723A_LEVEL(_drv_always_, "nolinked power save enter\n");
+
+ if (pwrpriv->ips_mode == IPS_LEVEL_2)
+ pwrpriv->bkeepfwalive = true;
+
+ rtw_ips_pwr_down23a(padapter);
+ pwrpriv->rf_pwrstate = rf_off;
+ }
+ pwrpriv->bips_processing = false;
+
+ up(&pwrpriv->lock);
+}
+
+int ips_leave23a(struct rtw_adapter * padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int result = _SUCCESS;
+ int keyid;
+
+ down(&pwrpriv->lock);
+
+ if ((pwrpriv->rf_pwrstate == rf_off) &&!pwrpriv->bips_processing)
+ {
+ pwrpriv->bips_processing = true;
+ pwrpriv->change_rfpwrstate = rf_on;
+ pwrpriv->ips_leave23a_cnts++;
+ DBG_8723A("==>ips_leave23a cnts:%d\n", pwrpriv->ips_leave23a_cnts);
+
+ if ((result = rtw_ips_pwr_up23a(padapter)) == _SUCCESS) {
+ pwrpriv->rf_pwrstate = rf_on;
+ }
+ DBG_8723A_LEVEL(_drv_always_, "nolinked power save leave\n");
+
+ if ((_WEP40_ == psecuritypriv->dot11PrivacyAlgrthm) ||(_WEP104_ == psecuritypriv->dot11PrivacyAlgrthm))
+ {
+ DBG_8723A("==>%s, channel(%d), processing(%x)\n", __func__, padapter->mlmeextpriv.cur_channel, pwrpriv->bips_processing);
+ set_channel_bwmode23a(padapter, padapter->mlmeextpriv.cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ for (keyid = 0;keyid<4;keyid++) {
+ if (pmlmepriv->key_mask & CHKBIT(keyid)) {
+ if (keyid == psecuritypriv->dot11PrivacyKeyIndex)
+ result = rtw_set_key23a(padapter, psecuritypriv, keyid, 1);
+ else
+ result = rtw_set_key23a(padapter, psecuritypriv, keyid, 0);
+ }
+ }
+ }
+
+ DBG_8723A("==> ips_leave23a.....LED(0x%08x)...\n", rtw_read32(padapter, 0x4c));
+ pwrpriv->bips_processing = false;
+
+ pwrpriv->bkeepfwalive = false;
+ pwrpriv->bpower_saving = false;
+ }
+
+ up(&pwrpriv->lock);
+
+ return result;
+}
+
+
+static bool rtw_pwr_unassociated_idle(struct rtw_adapter *adapter)
+{
+ struct rtw_adapter *buddy = adapter->pbuddy_adapter;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct xmit_priv *pxmit_priv = &adapter->xmitpriv;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ bool ret = false;
+
+ if (time_after_eq(adapter->pwrctrlpriv.ips_deny_time, jiffies))
+ goto exit;
+
+ if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR)
+ || check_fwstate(pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS)
+ || check_fwstate(pmlmepriv, WIFI_AP_STATE)
+ || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE)
+ || !rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)
+ ) {
+ goto exit;
+ }
+
+ /* consider buddy, if exist */
+ if (buddy) {
+ struct mlme_priv *b_pmlmepriv = &buddy->mlmepriv;
+ struct wifidirect_info *b_pwdinfo = &buddy->wdinfo;
+
+ if (check_fwstate(b_pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR)
+ || check_fwstate(b_pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS)
+ || check_fwstate(b_pmlmepriv, WIFI_AP_STATE)
+ || check_fwstate(b_pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE)
+ || !rtw_p2p_chk_state(b_pwdinfo, P2P_STATE_NONE)
+ ) {
+ goto exit;
+ }
+ }
+
+ if (pxmit_priv->free_xmitbuf_cnt != NR_XMITBUFF ||
+ pxmit_priv->free_xmit_extbuf_cnt != NR_XMIT_EXTBUFF) {
+ DBG_8723A_LEVEL(_drv_always_, "There are some pkts to transmit\n");
+ DBG_8723A_LEVEL(_drv_info_, "free_xmitbuf_cnt: %d, free_xmit_extbuf_cnt: %d\n",
+ pxmit_priv->free_xmitbuf_cnt, pxmit_priv->free_xmit_extbuf_cnt);
+ goto exit;
+ }
+
+ ret = true;
+
+exit:
+ return ret;
+}
+
+void rtw_ps_processor23a(struct rtw_adapter*padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ enum rt_rf_power_state rfpwrstate;
+
+ pwrpriv->ps_processing = true;
+
+ if (pwrpriv->bips_processing == true)
+ goto exit;
+
+ if (padapter->pwrctrlpriv.bHWPwrPindetect) {
+ rfpwrstate = RfOnOffDetect23a(padapter);
+ DBG_8723A("@@@@- #2 %s ==> rfstate:%s\n", __func__, (rfpwrstate == rf_on)?"rf_on":"rf_off");
+
+ if (rfpwrstate!= pwrpriv->rf_pwrstate) {
+ if (rfpwrstate == rf_off) {
+ pwrpriv->change_rfpwrstate = rf_off;
+ pwrpriv->brfoffbyhw = true;
+ padapter->bCardDisableWOHSM = true;
+ rtw_hw_suspend23a(padapter);
+ } else {
+ pwrpriv->change_rfpwrstate = rf_on;
+ rtw_hw_resume23a(padapter);
+ }
+ DBG_8723A("current rf_pwrstate(%s)\n", (pwrpriv->rf_pwrstate == rf_off)?"rf_off":"rf_on");
+ }
+ pwrpriv->pwr_state_check_cnts ++;
+ }
+
+ if (pwrpriv->ips_mode_req == IPS_NONE)
+ goto exit;
+
+ if (rtw_pwr_unassociated_idle(padapter) == false)
+ goto exit;
+
+ if ((pwrpriv->rf_pwrstate == rf_on) && ((pwrpriv->pwr_state_check_cnts%4) == 0))
+ {
+ DBG_8723A("==>%s .fw_state(%x)\n", __func__, get_fwstate(pmlmepriv));
+ pwrpriv->change_rfpwrstate = rf_off;
+ ips_enter23a(padapter);
+ }
+exit:
+ rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
+ pwrpriv->ps_processing = false;
+ return;
+}
+
+static void pwr_state_check_handler(unsigned long data)
+{
+ struct rtw_adapter *padapter = (struct rtw_adapter *)data;
+ rtw_ps_cmd23a(padapter);
+}
+
+/*
+ *
+ * Parameters
+ * padapter
+ * pslv power state level, only could be PS_STATE_S0 ~ PS_STATE_S4
+ *
+ */
+void rtw_set_rpwm23a(struct rtw_adapter *padapter, u8 pslv)
+{
+ u8 rpwm;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
+
+
+ pslv = PS_STATE(pslv);
+
+ if (true == pwrpriv->btcoex_rfon)
+ {
+ if (pslv < PS_STATE_S4)
+ pslv = PS_STATE_S3;
+ }
+
+ if (pwrpriv->rpwm == pslv) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: Already set rpwm[0x%02X], new = 0x%02X!\n", __func__, pwrpriv->rpwm, pslv));
+ return;
+ }
+
+ if ((padapter->bSurpriseRemoved == true) ||
+ (padapter->hw_init_completed == false)) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: SurpriseRemoved(%d) hw_init_completed(%d)\n",
+ __func__, padapter->bSurpriseRemoved, padapter->hw_init_completed));
+
+ pwrpriv->cpwm = PS_STATE_S4;
+
+ return;
+ }
+
+ if (padapter->bDriverStopped == true) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: change power state(0x%02X) when DriverStopped\n", __func__, pslv));
+
+ if (pslv < PS_STATE_S2) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: Reject to enter PS_STATE(0x%02X) lower than S2 when DriverStopped!!\n", __func__, pslv));
+ return;
+ }
+ }
+
+ rpwm = pslv | pwrpriv->tog;
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("rtw_set_rpwm23a: rpwm = 0x%02x cpwm = 0x%02x\n", rpwm, pwrpriv->cpwm));
+
+ pwrpriv->rpwm = pslv;
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_SET_RPWM, (u8 *)(&rpwm));
+
+ pwrpriv->tog += 0x80;
+ pwrpriv->cpwm = pslv;
+
+
+}
+
+u8 PS_RDY_CHECK(struct rtw_adapter * padapter)
+{
+ unsigned long delta_time;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ delta_time = jiffies - pwrpriv->DelayLPSLastTimeStamp;
+
+ if (delta_time < LPS_DELAY_TIME)
+ {
+ return false;
+ }
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == false) ||
+ (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true))
+ return false;
+ if (true == pwrpriv->bInSuspend)
+ return false;
+ if ((padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) && (padapter->securitypriv.binstallGrpkey == false))
+ {
+ DBG_8723A("Group handshake still in progress !!!\n");
+ return false;
+ }
+ if (!rtw_cfg80211_pwr_mgmt(padapter))
+ return false;
+
+ return true;
+}
+
+void rtw_set_ps_mode23a(struct rtw_adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_ant_mode)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_8723AU_P2P */
+
+
+
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("%s: PowerMode =%d Smart_PS =%d\n",
+ __func__, ps_mode, smart_ps));
+
+ if (ps_mode > PM_Card_Disable) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_, ("ps_mode:%d error\n", ps_mode));
+ return;
+ }
+
+ if (pwrpriv->pwr_mode == ps_mode)
+ {
+ if (PS_MODE_ACTIVE == ps_mode) return;
+
+ if ((pwrpriv->smart_ps == smart_ps) &&
+ (pwrpriv->bcn_ant_mode == bcn_ant_mode))
+ {
+ return;
+ }
+ }
+
+ if (ps_mode == PS_MODE_ACTIVE) {
+#ifdef CONFIG_8723AU_P2P
+ if (pwdinfo->opp_ps == 0)
+#endif /* CONFIG_8723AU_P2P */
+ {
+ DBG_8723A("rtw_set_ps_mode23a: Leave 802.11 power save\n");
+
+ pwrpriv->pwr_mode = ps_mode;
+ rtw_set_rpwm23a(padapter, PS_STATE_S4);
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+ pwrpriv->bFwCurrentInPSMode = false;
+ }
+ }
+ else
+ {
+ if (PS_RDY_CHECK(padapter)
+#ifdef CONFIG_8723AU_BT_COEXIST
+ || (BT_1Ant(padapter) == true)
+#endif
+ )
+ {
+ DBG_8723A("%s: Enter 802.11 power save\n", __func__);
+
+ pwrpriv->bFwCurrentInPSMode = true;
+ pwrpriv->pwr_mode = ps_mode;
+ pwrpriv->smart_ps = smart_ps;
+ pwrpriv->bcn_ant_mode = bcn_ant_mode;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+
+#ifdef CONFIG_8723AU_P2P
+ /* Set CTWindow after LPS */
+ if (pwdinfo->opp_ps == 1)
+ p2p_ps_wk_cmd23a(padapter, P2P_PS_ENABLE, 0);
+#endif /* CONFIG_8723AU_P2P */
+
+ rtw_set_rpwm23a(padapter, PS_STATE_S2);
+ }
+ }
+
+
+}
+
+/*
+ * Return:
+ * 0: Leave OK
+ * -1: Timeout
+ * -2: Other error
+ */
+s32 LPS_RF_ON_check23a(struct rtw_adapter *padapter, u32 delay_ms)
+{
+ unsigned long start_time, end_time;
+ u8 bAwake = false;
+ s32 err = 0;
+
+ start_time = jiffies;
+ end_time = start_time + msecs_to_jiffies(delay_ms);
+
+ while (1)
+ {
+ rtw23a_hal_get_hwreg(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
+ if (true == bAwake)
+ break;
+
+ if (true == padapter->bSurpriseRemoved)
+ {
+ err = -2;
+ DBG_8723A("%s: device surprise removed!!\n", __func__);
+ break;
+ }
+
+ if (time_after(jiffies, end_time)) {
+ err = -1;
+ DBG_8723A("%s: Wait for FW LPS leave more than %u ms!!!\n", __func__, delay_ms);
+ break;
+ }
+ udelay(100);
+ }
+
+ return err;
+}
+
+/* Description: */
+/* Enter the leisure power save mode. */
+void LPS_Enter23a(struct rtw_adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
+ if (!PS_RDY_CHECK(padapter))
+ return;
+
+ if (pwrpriv->bLeisurePs) {
+ /* Idle for a while if we connect to AP a while ago. */
+ if (pwrpriv->LpsIdleCount >= 2) { /* 4 Sec */
+ if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) {
+ pwrpriv->bpower_saving = true;
+ DBG_8723A("%s smart_ps:%d\n", __func__, pwrpriv->smart_ps);
+ /* For Tenda W311R IOT issue */
+ rtw_set_ps_mode23a(padapter, pwrpriv->power_mgnt, pwrpriv->smart_ps, 0);
+ }
+ } else {
+ pwrpriv->LpsIdleCount++;
+ }
+ }
+}
+
+/* Description: */
+/* Leave the leisure power save mode. */
+void LPS_Leave23a(struct rtw_adapter *padapter)
+{
+#define LPS_LEAVE_TIMEOUT_MS 100
+
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
+ if (pwrpriv->bLeisurePs) {
+ if (pwrpriv->pwr_mode != PS_MODE_ACTIVE) {
+ rtw_set_ps_mode23a(padapter, PS_MODE_ACTIVE, 0, 0);
+
+ if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
+ LPS_RF_ON_check23a(padapter, LPS_LEAVE_TIMEOUT_MS);
+ }
+ }
+
+ pwrpriv->bpower_saving = false;
+}
+
+/* Description: Leave all power save mode: LPS, FwLPS, IPS if needed. */
+/* Move code to function by tynli. 2010.03.26. */
+void LeaveAllPowerSaveMode23a(struct rtw_adapter *Adapter)
+{
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ u8 enqueue = 0;
+
+
+
+ /* DBG_8723A("%s.....\n", __func__); */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ { /* connect */
+#ifdef CONFIG_8723AU_P2P
+ p2p_ps_wk_cmd23a(Adapter, P2P_PS_DISABLE, enqueue);
+#endif /* CONFIG_8723AU_P2P */
+
+ rtw_lps_ctrl_wk_cmd23a(Adapter, LPS_CTRL_LEAVE, enqueue);
+ }
+
+
+}
+
+void rtw_init_pwrctrl_priv23a(struct rtw_adapter *padapter)
+{
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+ sema_init(&pwrctrlpriv->lock, 1);
+ pwrctrlpriv->rf_pwrstate = rf_on;
+ pwrctrlpriv->ips_enter23a_cnts = 0;
+ pwrctrlpriv->ips_leave23a_cnts = 0;
+ pwrctrlpriv->bips_processing = false;
+
+ pwrctrlpriv->ips_mode = padapter->registrypriv.ips_mode;
+ pwrctrlpriv->ips_mode_req = padapter->registrypriv.ips_mode;
+
+ pwrctrlpriv->pwr_state_check_interval = RTW_PWR_STATE_CHK_INTERVAL;
+ pwrctrlpriv->pwr_state_check_cnts = 0;
+ pwrctrlpriv->bInternalAutoSuspend = false;
+ pwrctrlpriv->bInSuspend = false;
+ pwrctrlpriv->bkeepfwalive = false;
+
+ pwrctrlpriv->LpsIdleCount = 0;
+ pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
+ pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt)?true:false;
+
+ pwrctrlpriv->bFwCurrentInPSMode = false;
+
+ pwrctrlpriv->rpwm = 0;
+ pwrctrlpriv->cpwm = PS_STATE_S4;
+
+ pwrctrlpriv->pwr_mode = PS_MODE_ACTIVE;
+ pwrctrlpriv->smart_ps = padapter->registrypriv.smart_ps;
+ pwrctrlpriv->bcn_ant_mode = 0;
+
+ pwrctrlpriv->tog = 0x80;
+
+ pwrctrlpriv->btcoex_rfon = false;
+
+ setup_timer(&pwrctrlpriv->pwr_state_check_timer,
+ pwr_state_check_handler, (unsigned long)padapter);
+
+
+}
+
+void rtw_free_pwrctrl_priv(struct rtw_adapter *adapter)
+{
+}
+
+u8 rtw_interface_ps_func23a(struct rtw_adapter *padapter, enum hal_intf_ps_func efunc_id, u8* val)
+{
+ u8 bResult = true;
+ rtw_hal_intf_ps_func23a(padapter, efunc_id, val);
+
+ return bResult;
+}
+
+inline void rtw_set_ips_deny23a(struct rtw_adapter *padapter, u32 ms)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ pwrpriv->ips_deny_time = jiffies + msecs_to_jiffies(ms);
+}
+
+/*
+* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
+* @adapter: pointer to _adapter structure
+* @ips_deffer_ms: the ms wiil prevent from falling into IPS after wakeup
+* Return _SUCCESS or _FAIL
+*/
+
+int _rtw_pwr_wakeup23a(struct rtw_adapter *padapter, u32 ips_deffer_ms, const char *caller)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int ret = _SUCCESS;
+ unsigned long start = jiffies;
+ unsigned long new_deny_time;
+
+ new_deny_time = jiffies + msecs_to_jiffies(ips_deffer_ms);
+
+ if (time_before(pwrpriv->ips_deny_time, new_deny_time))
+ pwrpriv->ips_deny_time = new_deny_time;
+
+ if (pwrpriv->ps_processing) {
+ DBG_8723A("%s wait ps_processing...\n", __func__);
+ while (pwrpriv->ps_processing &&
+ jiffies_to_msecs(jiffies - start) <= 3000)
+ msleep(10);
+ if (pwrpriv->ps_processing)
+ DBG_8723A("%s wait ps_processing timeout\n", __func__);
+ else
+ DBG_8723A("%s wait ps_processing done\n", __func__);
+ }
+
+ if (rtw_hal_sreset_inprogress(padapter)) {
+ DBG_8723A("%s wait sreset_inprogress...\n", __func__);
+ while (rtw_hal_sreset_inprogress(padapter) &&
+ jiffies_to_msecs(jiffies - start) <= 4000)
+ msleep(10);
+ if (rtw_hal_sreset_inprogress(padapter))
+ DBG_8723A("%s wait sreset_inprogress timeout\n", __func__);
+ else
+ DBG_8723A("%s wait sreset_inprogress done\n", __func__);
+ }
+
+ if (pwrpriv->bInternalAutoSuspend == false && pwrpriv->bInSuspend) {
+ DBG_8723A("%s wait bInSuspend...\n", __func__);
+ while (pwrpriv->bInSuspend &&
+ (jiffies_to_msecs(jiffies - start) <= 3000)) {
+ msleep(10);
+ }
+ if (pwrpriv->bInSuspend)
+ DBG_8723A("%s wait bInSuspend timeout\n", __func__);
+ else
+ DBG_8723A("%s wait bInSuspend done\n", __func__);
+ }
+
+ /* System suspend is not allowed to wakeup */
+ if ((pwrpriv->bInternalAutoSuspend == false) && (true == pwrpriv->bInSuspend)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* block??? */
+ if ((pwrpriv->bInternalAutoSuspend == true) && (padapter->net_closed == true)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* I think this should be check in IPS, LPS, autosuspend functions... */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (rf_off == pwrpriv->rf_pwrstate) {
+ DBG_8723A("%s call ips_leave23a....\n", __func__);
+ if (_FAIL == ips_leave23a(padapter)) {
+ DBG_8723A("======> ips_leave23a fail.............\n");
+ ret = _FAIL;
+ goto exit;
+ }
+ }
+
+ /* TODO: the following checking need to be merged... */
+ if (padapter->bDriverStopped || !padapter->bup ||
+ !padapter->hw_init_completed) {
+ DBG_8723A("%s: bDriverStopped =%d, bup =%d, hw_init_completed "
+ "=%u\n", caller, padapter->bDriverStopped,
+ padapter->bup, padapter->hw_init_completed);
+ ret = false;
+ goto exit;
+ }
+
+exit:
+ new_deny_time = jiffies + msecs_to_jiffies(ips_deffer_ms);
+ if (time_before(pwrpriv->ips_deny_time, new_deny_time))
+ pwrpriv->ips_deny_time = new_deny_time;
+ return ret;
+}
+
+int rtw_pm_set_lps23a(struct rtw_adapter *padapter, u8 mode)
+{
+ int ret = 0;
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+ if (mode < PS_MODE_NUM)
+ {
+ if (pwrctrlpriv->power_mgnt != mode)
+ {
+ if (PS_MODE_ACTIVE == mode)
+ {
+ LeaveAllPowerSaveMode23a(padapter);
+ }
+ else
+ {
+ pwrctrlpriv->LpsIdleCount = 2;
+ }
+ pwrctrlpriv->power_mgnt = mode;
+ pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt)?true:false;
+ }
+ }
+ else
+ {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int rtw_pm_set_ips23a(struct rtw_adapter *padapter, u8 mode)
+{
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+ if (mode == IPS_NORMAL || mode == IPS_LEVEL_2) {
+ rtw_ips_mode_req(pwrctrlpriv, mode);
+ DBG_8723A("%s %s\n", __func__, mode == IPS_NORMAL?"IPS_NORMAL":"IPS_LEVEL_2");
+ return 0;
+ }
+ else if (mode == IPS_NONE) {
+ rtw_ips_mode_req(pwrctrlpriv, mode);
+ DBG_8723A("%s %s\n", __func__, "IPS_NONE");
+ if ((padapter->bSurpriseRemoved == 0)&&_FAIL == rtw_pwr_wakeup(padapter))
+ return -EFAULT;
+ }
+ else {
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c
new file mode 100644
index 000000000000..0b2455e4f5b6
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_recv.c
@@ -0,0 +1,2471 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_RECV_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <mlme_osdep.h>
+#include <linux/ip.h>
+#include <linux/if_ether.h>
+#include <ethernet.h>
+#include <usb_ops.h>
+#include <linux/ieee80211.h>
+#include <wifi.h>
+
+void rtw_signal_stat_timer_hdl23a(unsigned long data);
+
+void _rtw_init_sta_recv_priv23a(struct sta_recv_priv *psta_recvpriv)
+{
+
+
+
+ spin_lock_init(&psta_recvpriv->lock);
+
+ /* for (i = 0; i<MAX_RX_NUMBLKS; i++) */
+ /* _rtw_init_queue23a(&psta_recvpriv->blk_strms[i]); */
+
+ _rtw_init_queue23a(&psta_recvpriv->defrag_q);
+
+
+}
+
+int _rtw_init_recv_priv23a(struct recv_priv *precvpriv,
+ struct rtw_adapter *padapter)
+{
+ struct recv_frame *precvframe;
+ int i;
+ int res = _SUCCESS;
+
+
+
+ /* We don't need to memset padapter->XXX to zero, because
+ adapter is allocated by rtw_zvmalloc(). */
+ /* memset((unsigned char *)precvpriv, 0, sizeof (struct recv_priv)); */
+
+ spin_lock_init(&precvpriv->lock);
+
+ _rtw_init_queue23a(&precvpriv->free_recv_queue);
+ _rtw_init_queue23a(&precvpriv->recv_pending_queue);
+ _rtw_init_queue23a(&precvpriv->uc_swdec_pending_queue);
+
+ precvpriv->adapter = padapter;
+
+ precvpriv->free_recvframe_cnt = NR_RECVFRAME;
+
+ precvpriv->pallocated_frame_buf =
+ rtw_zvmalloc(NR_RECVFRAME * sizeof(struct recv_frame));
+
+ if (precvpriv->pallocated_frame_buf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ precvframe = precvpriv->pallocated_frame_buf;
+
+ for (i = 0; i < NR_RECVFRAME ; i++) {
+ INIT_LIST_HEAD(&precvframe->list);
+
+ list_add_tail(&precvframe->list,
+ &precvpriv->free_recv_queue.queue);
+
+ res = rtw_os_recv_resource_alloc23a(padapter, precvframe);
+
+ precvframe->adapter = padapter;
+ precvframe++;
+ }
+
+ precvpriv->rx_pending_cnt = 1;
+
+ sema_init(&precvpriv->allrxreturnevt, 0);
+
+ res = rtw_hal_init23a_recv_priv(padapter);
+
+ setup_timer(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl23a,
+ (unsigned long)padapter);
+
+ precvpriv->signal_stat_sampling_interval = 1000; /* ms */
+
+ rtw_set_signal_stat_timer(precvpriv);
+
+exit:
+
+
+
+ return res;
+}
+
+void _rtw_free_recv_priv23a (struct recv_priv *precvpriv)
+{
+ struct rtw_adapter *padapter = precvpriv->adapter;
+
+
+
+ rtw_free_uc_swdec_pending_queue23a(padapter);
+
+ if (precvpriv->pallocated_frame_buf) {
+ rtw_vmfree(precvpriv->pallocated_frame_buf,
+ NR_RECVFRAME * sizeof(struct recv_frame));
+ }
+
+ rtw_hal_free_recv_priv23a(padapter);
+
+
+}
+
+struct recv_frame *rtw_alloc_recvframe23a(struct rtw_queue *pfree_recv_queue)
+{
+ struct recv_frame *pframe;
+ struct list_head *plist, *phead;
+ struct rtw_adapter *padapter;
+ struct recv_priv *precvpriv;
+
+ spin_lock_bh(&pfree_recv_queue->lock);
+
+ if (_rtw_queue_empty23a(pfree_recv_queue) == true)
+ pframe = NULL;
+ else {
+ phead = get_list_head(pfree_recv_queue);
+
+ plist = phead->next;
+
+ pframe = container_of(plist, struct recv_frame, list);
+
+ list_del_init(&pframe->list);
+ padapter = pframe->adapter;
+ if (padapter) {
+ precvpriv = &padapter->recvpriv;
+ if (pfree_recv_queue == &precvpriv->free_recv_queue)
+ precvpriv->free_recvframe_cnt--;
+ }
+ }
+
+ spin_unlock_bh(&pfree_recv_queue->lock);
+
+ return pframe;
+}
+
+int rtw_free_recvframe23a(struct recv_frame *precvframe, struct rtw_queue *pfree_recv_queue)
+{
+ struct rtw_adapter *padapter = precvframe->adapter;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+
+
+ if (precvframe->pkt) {
+ dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */
+ precvframe->pkt = NULL;
+ }
+
+ spin_lock_bh(&pfree_recv_queue->lock);
+
+ list_del_init(&precvframe->list);
+
+ list_add_tail(&precvframe->list, get_list_head(pfree_recv_queue));
+
+ if (padapter) {
+ if (pfree_recv_queue == &precvpriv->free_recv_queue)
+ precvpriv->free_recvframe_cnt++;
+ }
+
+ spin_unlock_bh(&pfree_recv_queue->lock);
+
+
+
+ return _SUCCESS;
+}
+
+int rtw_enqueue_recvframe23a(struct recv_frame *precvframe, struct rtw_queue *queue)
+{
+ struct rtw_adapter *padapter = precvframe->adapter;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ spin_lock_bh(&queue->lock);
+
+ list_del_init(&precvframe->list);
+
+ list_add_tail(&precvframe->list, get_list_head(queue));
+
+ if (padapter) {
+ if (queue == &precvpriv->free_recv_queue)
+ precvpriv->free_recvframe_cnt++;
+ }
+
+ spin_unlock_bh(&queue->lock);
+
+ return _SUCCESS;
+}
+
+/*
+caller : defrag ; recvframe_chk_defrag23a in recv_thread (passive)
+pframequeue: defrag_queue : will be accessed in recv_thread (passive)
+
+using spinlock to protect
+
+*/
+
+void rtw_free_recvframe23a_queue(struct rtw_queue *pframequeue, struct rtw_queue *pfree_recv_queue)
+{
+ struct recv_frame *hdr;
+ struct list_head *plist, *phead, *ptmp;
+
+
+ spin_lock(&pframequeue->lock);
+
+ phead = get_list_head(pframequeue);
+ plist = phead->next;
+
+ list_for_each_safe(plist, ptmp, phead) {
+ hdr = container_of(plist, struct recv_frame, list);
+ rtw_free_recvframe23a(hdr, pfree_recv_queue);
+ }
+
+ spin_unlock(&pframequeue->lock);
+
+
+}
+
+u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter)
+{
+ u32 cnt = 0;
+ struct recv_frame *pending_frame;
+ while ((pending_frame = rtw_alloc_recvframe23a(&adapter->recvpriv.uc_swdec_pending_queue))) {
+ rtw_free_recvframe23a(pending_frame,
+ &adapter->recvpriv.free_recv_queue);
+ DBG_8723A("%s: dequeue uc_swdec_pending_queue\n", __func__);
+ cnt++;
+ }
+
+ return cnt;
+}
+
+int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *queue)
+{
+ spin_lock_bh(&queue->lock);
+
+ list_del_init(&precvbuf->list);
+ list_add(&precvbuf->list, get_list_head(queue));
+
+ spin_unlock_bh(&queue->lock);
+
+ return _SUCCESS;
+}
+
+int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue)
+{
+ unsigned long irqL;
+ spin_lock_irqsave(&queue->lock, irqL);
+
+ list_del_init(&precvbuf->list);
+
+ list_add_tail(&precvbuf->list, get_list_head(queue));
+ spin_unlock_irqrestore(&queue->lock, irqL);
+ return _SUCCESS;
+}
+
+struct recv_buf *rtw_dequeue_recvbuf23a (struct rtw_queue *queue)
+{
+ unsigned long irqL;
+ struct recv_buf *precvbuf;
+ struct list_head *plist, *phead;
+
+ spin_lock_irqsave(&queue->lock, irqL);
+
+ if (_rtw_queue_empty23a(queue) == true) {
+ precvbuf = NULL;
+ } else {
+ phead = get_list_head(queue);
+
+ plist = phead->next;
+
+ precvbuf = container_of(plist, struct recv_buf, list);
+
+ list_del_init(&precvbuf->list);
+ }
+
+ spin_unlock_irqrestore(&queue->lock, irqL);
+
+ return precvbuf;
+}
+
+int recvframe_chkmic(struct rtw_adapter *adapter,
+ struct recv_frame *precvframe);
+int recvframe_chkmic(struct rtw_adapter *adapter,
+ struct recv_frame *precvframe) {
+
+ int i, res = _SUCCESS;
+ u32 datalen;
+ u8 miccode[8];
+ u8 bmic_err = false, brpt_micerror = true;
+ u8 *pframe, *payload,*pframemic;
+ u8 *mickey;
+ /* u8 *iv, rxdata_key_idx = 0; */
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+
+ stainfo = rtw_get_stainfo23a(&adapter->stapriv, &prxattrib->ta[0]);
+
+ if (prxattrib->encrypt == _TKIP_) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n recvframe_chkmic:prxattrib->encrypt == _TKIP_\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n recvframe_chkmic:da = 0x%02x:0x%02x:0x%02x:0x%02x:"
+ "0x%02x:0x%02x\n", prxattrib->ra[0],
+ prxattrib->ra[1], prxattrib->ra[2], prxattrib->ra[3],
+ prxattrib->ra[4], prxattrib->ra[5]));
+
+ /* calculate mic code */
+ if (stainfo != NULL) {
+ if (is_multicast_ether_addr(prxattrib->ra)) {
+ mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n recvframe_chkmic: bcmc key\n"));
+
+ if (psecuritypriv->binstallGrpkey == false) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("\n recvframe_chkmic:didn't "
+ "install group key!!!!!!\n"));
+ DBG_8723A("\n recvframe_chkmic:didn't "
+ "install group key!!!!!!\n");
+ goto exit;
+ }
+ } else {
+ mickey = &stainfo->dot11tkiprxmickey.skey[0];
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n recvframe_chkmic: unicast "
+ "key\n"));
+ }
+
+ /* icv_len included the mic code */
+ datalen = precvframe->pkt->len-prxattrib->
+ hdrlen-prxattrib->iv_len-prxattrib->icv_len - 8;
+ pframe = precvframe->pkt->data;
+ payload = pframe + prxattrib->hdrlen +
+ prxattrib->iv_len;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n prxattrib->iv_len =%d prxattrib->icv_len ="
+ "%d\n", prxattrib->iv_len,
+ prxattrib->icv_len));
+
+ /* care the length of the data */
+ rtw_seccalctkipmic23a(mickey, pframe, payload,
+ datalen, &miccode[0],
+ (unsigned char)prxattrib->priority);
+
+ pframemic = payload + datalen;
+
+ bmic_err = false;
+
+ for (i = 0; i < 8; i++) {
+ if (miccode[i] != *(pframemic + i)) {
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("recvframe_chkmic:miccode"
+ "[%d](%02x) != *(pframemic+"
+ "%d)(%02x) ", i, miccode[i],
+ i, *(pframemic + i)));
+ bmic_err = true;
+ }
+ }
+
+ if (bmic_err == true) {
+ int i;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n *(pframemic-8)-*(pframemic-1) ="
+ "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:"
+ "0x%02x:0x%02x:0x%02x\n",
+ *(pframemic - 8), *(pframemic - 7),
+ *(pframemic - 6), *(pframemic - 5),
+ *(pframemic - 4), *(pframemic - 3),
+ *(pframemic - 2), *(pframemic - 1)));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n *(pframemic-16)-*(pframemic-9) ="
+ "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:"
+ "0x%02x:0x%02x:0x%02x\n",
+ *(pframemic - 16), *(pframemic - 15),
+ *(pframemic - 14), *(pframemic - 13),
+ *(pframemic - 12), *(pframemic - 11),
+ *(pframemic - 10), *(pframemic - 9)));
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n ====== demp packet (len =%d) ======"
+ "\n", precvframe->pkt->len));
+ for (i = 0; i < precvframe->pkt->len; i = i + 8) {
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_, ("0x%02x:0x%02x:0x"
+ "%02x:0x%02x:0x%0"
+ "2x:0x%02x:0x%02x"
+ ":0x%02x",
+ *(precvframe->pkt->data+i),*(precvframe->pkt->data+i+1),
+ *(precvframe->pkt->data+i+2),*(precvframe->pkt->data+i+3),
+ *(precvframe->pkt->data+i+4),*(precvframe->pkt->data+i+5),
+ *(precvframe->pkt->data+i+6),*(precvframe->pkt->data+i+7)));
+ }
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n ====== demp packet end [len =%d]"
+ "======\n", precvframe->pkt->len));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n hrdlen =%d,\n",
+ prxattrib->hdrlen));
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("ra = 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%."
+ "2x 0x%.2x psecuritypriv->"
+ "binstallGrpkey =%d ",
+ prxattrib->ra[0], prxattrib->ra[1],
+ prxattrib->ra[2], prxattrib->ra[3],
+ prxattrib->ra[4], prxattrib->ra[5],
+ psecuritypriv->binstallGrpkey));
+
+ /* double check key_index for some timing
+ issue, cannot compare with
+ psecuritypriv->dot118021XGrpKeyid also
+ cause timing issue */
+ if ((is_multicast_ether_addr(prxattrib->ra)) &&
+ (prxattrib->key_index !=
+ pmlmeinfo->key_index))
+ brpt_micerror = false;
+
+ if ((prxattrib->bdecrypted == true) &&
+ (brpt_micerror == true)) {
+ rtw_handle_tkip_mic_err23a(adapter, (u8)is_multicast_ether_addr(prxattrib->ra));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted =%d ", prxattrib->bdecrypted));
+ DBG_8723A(" mic error :prxattrib->"
+ "bdecrypted =%d\n",
+ prxattrib->bdecrypted);
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ (" mic error :prxattrib->"
+ "bdecrypted =%d ",
+ prxattrib->bdecrypted));
+ DBG_8723A(" mic error :prxattrib->"
+ "bdecrypted =%d\n",
+ prxattrib->bdecrypted);
+ }
+
+ res = _FAIL;
+ } else {
+ /* mic checked ok */
+ if ((psecuritypriv->bcheck_grpkey == false) &&
+ (is_multicast_ether_addr(prxattrib->ra))) {
+ psecuritypriv->bcheck_grpkey = true;
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("psecuritypriv->bcheck_grp"
+ "key = true"));
+ }
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("recvframe_chkmic: rtw_get_stainfo23a =="
+ "NULL!!!\n"));
+ }
+
+ skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
+ }
+
+exit:
+
+
+
+ return res;
+}
+
+/* decrypt and set the ivlen, icvlen of the recv_frame */
+struct recv_frame *decryptor(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame);
+struct recv_frame *decryptor(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ struct rx_pkt_attrib *prxattrib = &precv_frame->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct recv_frame *return_packet = precv_frame;
+ u32 res = _SUCCESS;
+
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("prxstat->decrypted =%x prxattrib->encrypt = 0x%03x\n",
+ prxattrib->bdecrypted, prxattrib->encrypt));
+
+ if (prxattrib->encrypt > 0) {
+ u8 *iv = precv_frame->pkt->data + prxattrib->hdrlen;
+ prxattrib->key_index = (((iv[3]) >> 6) & 0x3);
+
+ if (prxattrib->key_index > WEP_KEYS) {
+ DBG_8723A("prxattrib->key_index(%d) > WEP_KEYS\n",
+ prxattrib->key_index);
+
+ switch (prxattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ prxattrib->key_index =
+ psecuritypriv->dot11PrivacyKeyIndex;
+ break;
+ case _TKIP_:
+ case _AES_:
+ default:
+ prxattrib->key_index =
+ psecuritypriv->dot118021XGrpKeyid;
+ break;
+ }
+ }
+ }
+
+ if ((prxattrib->encrypt > 0) && ((prxattrib->bdecrypted == 0))) {
+ psecuritypriv->hw_decrypted = false;
+ switch (prxattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ rtw_wep_decrypt23a(padapter, precv_frame);
+ break;
+ case _TKIP_:
+ res = rtw_tkip_decrypt23a(padapter, precv_frame);
+ break;
+ case _AES_:
+ res = rtw_aes_decrypt23a(padapter, precv_frame);
+ break;
+ default:
+ break;
+ }
+ } else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 &&
+ (psecuritypriv->busetkipkey == 1 ||
+ prxattrib->encrypt != _TKIP_)) {
+ psecuritypriv->hw_decrypted = true;
+ }
+
+ if (res == _FAIL) {
+ rtw_free_recvframe23a(return_packet,
+ &padapter->recvpriv.free_recv_queue);
+ return_packet = NULL;
+ }
+
+
+
+ return return_packet;
+}
+
+/* set the security information in the recv_frame */
+static struct recv_frame *portctrl(struct rtw_adapter *adapter,
+ struct recv_frame *precv_frame)
+{
+ u8 *psta_addr = NULL, *ptr;
+ uint auth_alg;
+ struct recv_frame *pfhdr;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv ;
+ struct recv_frame *prtnframe;
+ u16 ether_type = 0;
+ u16 eapol_type = 0x888e;/* for Funia BD's WPA issue */
+ struct rx_pkt_attrib *pattrib;
+
+ pstapriv = &adapter->stapriv;
+ psta = rtw_get_stainfo23a(pstapriv, psta_addr);
+
+ auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
+
+ ptr = precv_frame->pkt->data;
+ pfhdr = precv_frame;
+ pattrib = &pfhdr->attrib;
+ psta_addr = pattrib->ta;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("########portctrl:adapter->securitypriv.dot11AuthAlgrthm ="
+ "%d\n", adapter->securitypriv.dot11AuthAlgrthm));
+
+ if (auth_alg == 2) {
+ if ((psta != NULL) && (psta->ieee8021x_blocked)) {
+ /* blocked */
+ /* only accept EAPOL frame */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("########portctrl:psta->ieee8021x_blocked =="
+ "1\n"));
+
+ prtnframe = precv_frame;
+
+ /* get ether_type */
+ ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
+ memcpy(&ether_type, ptr, 2);
+ ether_type = ntohs((unsigned short)ether_type);
+
+ if (ether_type == eapol_type) {
+ prtnframe = precv_frame;
+ } else {
+ /* free this frame */
+ rtw_free_recvframe23a(precv_frame,
+ &adapter->recvpriv.free_recv_queue);
+ prtnframe = NULL;
+ }
+ } else {
+ /* allowed */
+ /* check decryption status, and decrypt the frame if needed */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("########portctrl:psta->ieee8021x_blocked =="
+ "0\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("portctrl:precv_frame->hdr.attrib.privacy ="
+ "%x\n", precv_frame->attrib.privacy));
+
+ if (pattrib->bdecrypted == 0) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("portctrl:prxstat->decrypted =%x\n",
+ pattrib->bdecrypted));
+ }
+
+ prtnframe = precv_frame;
+ /* check is the EAPOL frame or not (Rekey) */
+ if (ether_type == eapol_type) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("########portctrl:ether_type == "
+ "0x888e\n"));
+ /* check Rekey */
+
+ prtnframe = precv_frame;
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("########portctrl:ether_type = 0x%04x"
+ "\n", ether_type));
+ }
+ }
+ } else {
+ prtnframe = precv_frame;
+ }
+
+
+
+ return prtnframe;
+}
+
+int recv_decache(struct recv_frame *precv_frame, u8 bretry,
+ struct stainfo_rxcache *prxcache);
+int recv_decache(struct recv_frame *precv_frame, u8 bretry,
+ struct stainfo_rxcache *prxcache)
+{
+ int tid = precv_frame->attrib.priority;
+
+ u16 seq_ctrl = ((precv_frame->attrib.seq_num & 0xffff) << 4) |
+ (precv_frame->attrib.frag_num & 0xf);
+
+
+
+ if (tid > 15) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("recv_decache, (tid>15)! seq_ctrl = 0x%x, tid = 0x%x\n",
+ seq_ctrl, tid));
+
+ return _FAIL;
+ }
+
+ if (1) { /* if (bretry) */
+ if (seq_ctrl == prxcache->tid_rxseq[tid]) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("recv_decache, seq_ctrl = 0x%x, tid = 0x%x, "
+ "tid_rxseq = 0x%x\n",
+ seq_ctrl, tid, prxcache->tid_rxseq[tid]));
+
+ return _FAIL;
+ }
+ }
+
+ prxcache->tid_rxseq[tid] = seq_ctrl;
+
+
+
+ return _SUCCESS;
+}
+
+void process23a_pwrbit_data(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame);
+void process23a_pwrbit_data(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+#ifdef CONFIG_8723AU_AP_MODE
+ unsigned char pwrbit;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+
+ psta = rtw_get_stainfo23a(pstapriv, pattrib->src);
+
+ if (psta) {
+ pwrbit = ieee80211_has_pm(hdr->frame_control);
+
+ if (pwrbit) {
+ if (!(psta->state & WIFI_SLEEP_STATE))
+ stop_sta_xmit23a(padapter, psta);
+ } else {
+ if (psta->state & WIFI_SLEEP_STATE)
+ wakeup_sta_to_xmit23a(padapter, psta);
+ }
+ }
+
+#endif
+}
+
+void process_wmmps_data(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame);
+void process_wmmps_data(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+#ifdef CONFIG_8723AU_AP_MODE
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+
+ psta = rtw_get_stainfo23a(pstapriv, pattrib->src);
+
+ if (!psta)
+ return;
+
+
+ if (!psta->qos_option)
+ return;
+
+ if (!(psta->qos_info & 0xf))
+ return;
+
+ if (psta->state & WIFI_SLEEP_STATE) {
+ u8 wmmps_ac = 0;
+
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk & BIT(1);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi & BIT(1);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo & BIT(1);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be & BIT(1);
+ break;
+ }
+
+ if (wmmps_ac) {
+ if (psta->sleepq_ac_len > 0) {
+ /* process received triggered frame */
+ xmit_delivery_enabled_frames23a(padapter, psta);
+ } else {
+ /* issue one qos null frame with More data bit = 0 and the EOSP bit set (= 1) */
+ issue_qos_nulldata23a(padapter, psta->hwaddr,
+ (u16)pattrib->priority,
+ 0, 0);
+ }
+ }
+ }
+
+#endif
+}
+
+static void count_rx_stats(struct rtw_adapter *padapter,
+ struct recv_frame *prframe, struct sta_info *sta)
+{
+ int sz;
+ struct sta_info *psta = NULL;
+ struct stainfo_stats *pstats = NULL;
+ struct rx_pkt_attrib *pattrib = & prframe->attrib;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ sz = prframe->pkt->len;
+ precvpriv->rx_bytes += sz;
+
+ padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++;
+
+ if ((!is_broadcast_ether_addr(pattrib->dst)) &&
+ (!is_multicast_ether_addr(pattrib->dst)))
+ padapter->mlmepriv.LinkDetectInfo.NumRxUnicastOkInPeriod++;
+
+ if (sta)
+ psta = sta;
+ else
+ psta = prframe->psta;
+
+ if (psta) {
+ pstats = &psta->sta_stats;
+
+ pstats->rx_data_pkts++;
+ pstats->rx_bytes += sz;
+ }
+}
+
+static int sta2sta_data_frame(struct rtw_adapter *adapter,
+ struct recv_frame *precv_frame,
+ struct sta_info**psta)
+{
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ int ret = _SUCCESS;
+ struct rx_pkt_attrib *pattrib = & precv_frame->attrib;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u8 *mybssid = get_bssid(pmlmepriv);
+ u8 *myhwaddr = myid(&adapter->eeprompriv);
+ u8 *sta_addr = NULL;
+ int bmcast = is_multicast_ether_addr(pattrib->dst);
+
+
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
+
+ /* filter packets that SA is myself or multicast or broadcast */
+ if (ether_addr_equal(myhwaddr, pattrib->src)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ (" SA == myself\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (!ether_addr_equal(myhwaddr, pattrib->dst) && !bmcast) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (ether_addr_equal(pattrib->bssid, "\x0\x0\x0\x0\x0\x0") ||
+ ether_addr_equal(mybssid, "\x0\x0\x0\x0\x0\x0") ||
+ !ether_addr_equal(pattrib->bssid, mybssid)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ sta_addr = pattrib->src;
+ } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ /* For Station mode, sa and bssid should always be BSSID,
+ and DA is my mac-address */
+ if (!ether_addr_equal(pattrib->bssid, pattrib->src)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("bssid != TA under STATION_MODE; drop "
+ "pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ sta_addr = pattrib->bssid;
+
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ if (bmcast) {
+ /* For AP mode, if DA == MCAST, then BSSID should be also MCAST */
+ if (!is_multicast_ether_addr(pattrib->bssid)) {
+ ret = _FAIL;
+ goto exit;
+ }
+ } else { /* not mc-frame */
+ /* For AP mode, if DA is non-MCAST, then it must
+ be BSSID, and bssid == BSSID */
+ if (!ether_addr_equal(pattrib->bssid, pattrib->dst)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ sta_addr = pattrib->src;
+ }
+ } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) {
+ ether_addr_copy(pattrib->dst, hdr->addr1);
+ ether_addr_copy(pattrib->src, hdr->addr2);
+ ether_addr_copy(pattrib->bssid, hdr->addr3);
+ ether_addr_copy(pattrib->ra, pattrib->dst);
+ ether_addr_copy(pattrib->ta, pattrib->src);
+
+ sta_addr = mybssid;
+ } else {
+ ret = _FAIL;
+ }
+
+ if (bmcast)
+ *psta = rtw_get_bcmc_stainfo23a(adapter);
+ else
+ *psta = rtw_get_stainfo23a(pstapriv, sta_addr); /* get ap_info */
+
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("can't get psta under sta2sta_data_frame ; drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+exit:
+
+ return ret;
+}
+
+int ap2sta_data_frame(struct rtw_adapter *adapter,
+ struct recv_frame *precv_frame,
+ struct sta_info **psta);
+int ap2sta_data_frame(struct rtw_adapter *adapter,
+ struct recv_frame *precv_frame,
+ struct sta_info **psta)
+{
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct rx_pkt_attrib *pattrib = & precv_frame->attrib;
+ int ret = _SUCCESS;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u8 *mybssid = get_bssid(pmlmepriv);
+ u8 *myhwaddr = myid(&adapter->eeprompriv);
+ int bmcast = is_multicast_ether_addr(pattrib->dst);
+
+
+
+ if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) &&
+ (check_fwstate(pmlmepriv, _FW_LINKED) == true ||
+ check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true)) {
+
+ /* filter packets that SA is myself or multicast or broadcast */
+ if (ether_addr_equal(myhwaddr, pattrib->src)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ (" SA == myself\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* da should be for me */
+ if (!ether_addr_equal(myhwaddr, pattrib->dst) && !bmcast) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ (" ap2sta_data_frame: compare DA fail; DA ="
+ MAC_FMT"\n", MAC_ARG(pattrib->dst)));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* check BSSID */
+ if (ether_addr_equal(pattrib->bssid, "\x0\x0\x0\x0\x0\x0") ||
+ ether_addr_equal(mybssid, "\x0\x0\x0\x0\x0\x0") ||
+ !ether_addr_equal(pattrib->bssid, mybssid)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ (" ap2sta_data_frame: compare BSSID fail ; "
+ "BSSID ="MAC_FMT"\n", MAC_ARG(pattrib->bssid)));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("mybssid ="MAC_FMT"\n", MAC_ARG(mybssid)));
+
+ if (!bmcast) {
+ DBG_8723A("issue_deauth23a to the nonassociated "
+ "ap =" MAC_FMT " for the reason(7)\n",
+ MAC_ARG(pattrib->bssid));
+ issue_deauth23a(adapter, pattrib->bssid,
+ WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+ }
+
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (bmcast)
+ *psta = rtw_get_bcmc_stainfo23a(adapter);
+ else
+ /* get ap_info */
+ *psta = rtw_get_stainfo23a(pstapriv, pattrib->bssid);
+
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("ap2sta: can't get psta under STATION_MODE ;"
+ " drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (ieee80211_is_nullfunc(hdr->frame_control)) {
+ /* No data, will not indicate to upper layer,
+ temporily count it here */
+ count_rx_stats(adapter, precv_frame, *psta);
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+
+ } else if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) &&
+ (check_fwstate(pmlmepriv, _FW_LINKED) == true)) {
+ ether_addr_copy(pattrib->dst, hdr->addr1);
+ ether_addr_copy(pattrib->src, hdr->addr2);
+ ether_addr_copy(pattrib->bssid, hdr->addr3);
+ ether_addr_copy(pattrib->ra, pattrib->dst);
+ ether_addr_copy(pattrib->ta, pattrib->src);
+
+ /* */
+ ether_addr_copy(pattrib->bssid, mybssid);
+
+ /* get sta_info */
+ *psta = rtw_get_stainfo23a(pstapriv, pattrib->bssid);
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("can't get psta under MP_MODE ; drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ /* Special case */
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ } else {
+ if (ether_addr_equal(myhwaddr, pattrib->dst) && !bmcast) {
+ *psta = rtw_get_stainfo23a(pstapriv, pattrib->bssid);
+ if (*psta == NULL) {
+ DBG_8723A("issue_deauth23a to the ap =" MAC_FMT
+ " for the reason(7)\n",
+ MAC_ARG(pattrib->bssid));
+
+ issue_deauth23a(adapter, pattrib->bssid,
+ WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+ }
+ }
+
+ ret = _FAIL;
+ }
+
+exit:
+
+
+
+ return ret;
+}
+
+int sta2ap_data_frame(struct rtw_adapter *adapter,
+ struct recv_frame *precv_frame,
+ struct sta_info **psta);
+int sta2ap_data_frame(struct rtw_adapter *adapter,
+ struct recv_frame *precv_frame,
+ struct sta_info **psta)
+{
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct rx_pkt_attrib *pattrib = & precv_frame->attrib;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ unsigned char *mybssid = get_bssid(pmlmepriv);
+ int ret = _SUCCESS;
+
+
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ /* For AP mode, RA = BSSID, TX = STA(SRC_ADDR), A3 = DST_ADDR */
+ if (!ether_addr_equal(pattrib->bssid, mybssid)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ *psta = rtw_get_stainfo23a(pstapriv, pattrib->src);
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("can't get psta under AP_MODE; drop pkt\n"));
+ DBG_8723A("issue_deauth23a to sta =" MAC_FMT
+ " for the reason(7)\n",
+ MAC_ARG(pattrib->src));
+
+ issue_deauth23a(adapter, pattrib->src,
+ WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+
+ process23a_pwrbit_data(adapter, precv_frame);
+
+ /* We only get here if it's a data frame, so no need to
+ * confirm data frame type first */
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ process_wmmps_data(adapter, precv_frame);
+
+ if (ieee80211_is_nullfunc(hdr->frame_control)) {
+ /* No data, will not indicate to upper layer,
+ temporily count it here */
+ count_rx_stats(adapter, precv_frame, *psta);
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+ } else {
+ u8 *myhwaddr = myid(&adapter->eeprompriv);
+ if (!ether_addr_equal(pattrib->ra, myhwaddr)) {
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+ DBG_8723A("issue_deauth23a to sta =" MAC_FMT " for the reason(7)\n",
+ MAC_ARG(pattrib->src));
+ issue_deauth23a(adapter, pattrib->src,
+ WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+
+exit:
+
+
+
+ return ret;
+}
+
+int validate_recv_ctrl_frame(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame);
+int validate_recv_ctrl_frame(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+#ifdef CONFIG_8723AU_AP_MODE
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *pframe = skb->data;
+
+ if (!ieee80211_is_ctl(hdr->frame_control))
+ return _FAIL;
+
+ /* receive the frames that ra(a1) is my address */
+ if (!ether_addr_equal(hdr->addr1, myid(&padapter->eeprompriv)))
+ return _FAIL;
+
+ /* only handle ps-poll */
+ if (ieee80211_is_pspoll(hdr->frame_control)) {
+ u16 aid;
+ u8 wmmps_ac = 0;
+ struct sta_info *psta = NULL;
+
+ aid = GetAid(pframe);
+ psta = rtw_get_stainfo23a(pstapriv, hdr->addr2);
+
+ if ((!psta) || (psta->aid != aid))
+ return _FAIL;
+
+ /* for rx pkt statistics */
+ psta->sta_stats.rx_ctrl_pkts++;
+
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk & BIT(0);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi & BIT(0);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo & BIT(0);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be & BIT(0);
+ break;
+ }
+
+ if (wmmps_ac)
+ return _FAIL;
+
+ if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
+ DBG_8723A("%s alive check-rx ps-poll\n", __func__);
+ psta->expire_to = pstapriv->expire_to;
+ psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
+ }
+
+ if ((psta->state & WIFI_SLEEP_STATE) &&
+ (pstapriv->sta_dz_bitmap & CHKBIT(psta->aid))) {
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ spin_lock_bh(&pxmitpriv->lock);
+
+ xmitframe_phead = get_list_head(&psta->sleep_q);
+ xmitframe_plist = xmitframe_phead->next;
+
+ if (!list_empty(xmitframe_phead)) {
+ pxmitframe = container_of(xmitframe_plist,
+ struct xmit_frame,
+ list);
+
+ xmitframe_plist = xmitframe_plist->next;
+
+ list_del_init(&pxmitframe->list);
+
+ psta->sleepq_len--;
+
+ if (psta->sleepq_len>0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ pxmitframe->attrib.triggered = 1;
+
+ /* DBG_8723A("handling ps-poll, q_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
+
+ rtw_hal_xmit23aframe_enqueue(padapter, pxmitframe);
+
+ if (psta->sleepq_len == 0) {
+ pstapriv->tim_bitmap &= ~CHKBIT(psta->aid);
+
+ /* DBG_8723A("after handling ps-poll, tim =%x\n", pstapriv->tim_bitmap); */
+
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon23a(padapter, _TIM_IE_,
+ NULL, false);
+ }
+
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ } else {
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ /* DBG_8723A("no buffered packets to xmit\n"); */
+ if (pstapriv->tim_bitmap & CHKBIT(psta->aid)) {
+ if (psta->sleepq_len == 0) {
+ DBG_8723A("no buffered packets "
+ "to xmit\n");
+
+ /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
+ issue_nulldata23a(padapter,
+ psta->hwaddr,
+ 0, 0, 0);
+ } else {
+ DBG_8723A("error!psta->sleepq"
+ "_len =%d\n",
+ psta->sleepq_len);
+ psta->sleepq_len = 0;
+ }
+
+ pstapriv->tim_bitmap &= ~CHKBIT(psta->aid);
+
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon23a(padapter, _TIM_IE_,
+ NULL, false);
+ }
+ }
+ }
+ }
+
+#endif
+ return _FAIL;
+}
+
+struct recv_frame* recvframe_chk_defrag23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame);
+int validate_recv_mgnt_frame(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame);
+int validate_recv_mgnt_frame(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ struct sta_info *psta;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ /* struct mlme_priv *pmlmepriv = &adapter->mlmepriv; */
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("+validate_recv_mgnt_frame\n"));
+
+ precv_frame = recvframe_chk_defrag23a(padapter, precv_frame);
+ if (precv_frame == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("%s: fragment packet\n", __func__));
+ return _SUCCESS;
+ }
+
+ skb = precv_frame->pkt;
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ /* for rx pkt statistics */
+ psta = rtw_get_stainfo23a(&padapter->stapriv, hdr->addr2);
+ if (psta) {
+ psta->sta_stats.rx_mgnt_pkts++;
+
+ if (ieee80211_is_beacon(hdr->frame_control))
+ psta->sta_stats.rx_beacon_pkts++;
+ else if (ieee80211_is_probe_req(hdr->frame_control))
+ psta->sta_stats.rx_probereq_pkts++;
+ else if (ieee80211_is_probe_resp(hdr->frame_control)) {
+ if (ether_addr_equal(padapter->eeprompriv.mac_addr,
+ hdr->addr1))
+ psta->sta_stats.rx_probersp_pkts++;
+ else if (is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1))
+ psta->sta_stats.rx_probersp_bm_pkts++;
+ else
+ psta->sta_stats.rx_probersp_uo_pkts++;
+ }
+ }
+
+ mgt_dispatcher23a(padapter, precv_frame);
+
+ return _SUCCESS;
+}
+
+int validate_recv_data_frame(struct rtw_adapter *adapter,
+ struct recv_frame *precv_frame);
+int validate_recv_data_frame(struct rtw_adapter *adapter,
+ struct recv_frame *precv_frame)
+{
+ u8 bretry;
+ u8 *psa, *pda, *pbssid;
+ struct sta_info *psta = NULL;
+ u8 *ptr = precv_frame->pkt->data;
+ struct rx_pkt_attrib *pattrib = & precv_frame->attrib;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ int ret = _SUCCESS;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+
+
+ bretry = ieee80211_has_retry(hdr->frame_control);
+ pda = ieee80211_get_DA(hdr);
+ psa = ieee80211_get_SA(hdr);
+ pbssid = get_hdr_bssid(ptr);
+
+ if (pbssid == NULL) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ ether_addr_copy(pattrib->dst, pda);
+ ether_addr_copy(pattrib->src, psa);
+
+ ether_addr_copy(pattrib->bssid, pbssid);
+
+ switch (pattrib->to_fr_ds)
+ {
+ case 0:
+ ether_addr_copy(pattrib->ra, pda);
+ ether_addr_copy(pattrib->ta, psa);
+ ret = sta2sta_data_frame(adapter, precv_frame, &psta);
+ break;
+
+ case 1:
+ ether_addr_copy(pattrib->ra, pda);
+ ether_addr_copy(pattrib->ta, pbssid);
+ ret = ap2sta_data_frame(adapter, precv_frame, &psta);
+ break;
+
+ case 2:
+ ether_addr_copy(pattrib->ra, pbssid);
+ ether_addr_copy(pattrib->ta, psa);
+ ret = sta2ap_data_frame(adapter, precv_frame, &psta);
+ break;
+
+ case 3:
+ ether_addr_copy(pattrib->ra, hdr->addr1);
+ ether_addr_copy(pattrib->ta, hdr->addr2);
+ ret = _FAIL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" case 3\n"));
+ break;
+
+ default:
+ ret = _FAIL;
+ break;
+ }
+
+ if ((ret == _FAIL) || (ret == RTW_RX_HANDLED))
+ goto exit;
+
+ if (!psta) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ (" after to_fr_ds_chk; psta == NULL\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* psta->rssi = prxcmd->rssi; */
+ /* psta->signal_quality = prxcmd->sq; */
+ precv_frame->psta = psta;
+
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
+ if (ieee80211_has_a4(hdr->frame_control))
+ pattrib->hdrlen += ETH_ALEN;
+
+ /* parsing QC field */
+ if (pattrib->qos == 1) {
+ __le16 *qptr = (__le16 *)ieee80211_get_qos_ctl(hdr);
+ u16 qos_ctrl = le16_to_cpu(*qptr);
+
+ pattrib->priority = qos_ctrl & IEEE80211_QOS_CTL_TID_MASK;
+ pattrib->ack_policy = (qos_ctrl >> 5) & 3;
+ pattrib->amsdu =
+ (qos_ctrl & IEEE80211_QOS_CTL_A_MSDU_PRESENT) >> 7;
+ pattrib->hdrlen += IEEE80211_QOS_CTL_LEN;
+
+ if (pattrib->priority != 0 && pattrib->priority != 3) {
+ adapter->recvpriv.bIsAnyNonBEPkts = true;
+ }
+ } else {
+ pattrib->priority = 0;
+ pattrib->ack_policy = 0;
+ pattrib->amsdu = 0;
+ }
+
+ if (pattrib->order) { /* HT-CTRL 11n */
+ pattrib->hdrlen += 4;
+ }
+
+ precv_frame->preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
+
+ /* decache, drop duplicate recv packets */
+ if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) ==
+ _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("decache : drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (pattrib->privacy) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("validate_recv_data_frame:pattrib->privacy =%x\n",
+ pattrib->privacy));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n ^^^^^^^^^^^is_multicast_ether_addr"
+ "(pattrib->ra(0x%02x)) =%d^^^^^^^^^^^^^^^6\n",
+ pattrib->ra[0],
+ is_multicast_ether_addr(pattrib->ra)));
+
+ GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt,
+ is_multicast_ether_addr(pattrib->ra));
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n pattrib->encrypt =%d\n", pattrib->encrypt));
+
+ switch (pattrib->encrypt)
+ {
+ case _WEP40_:
+ case _WEP104_:
+ pattrib->iv_len = 4;
+ pattrib->icv_len = 4;
+ break;
+ case _TKIP_:
+ pattrib->iv_len = 8;
+ pattrib->icv_len = 4;
+ break;
+ case _AES_:
+ pattrib->iv_len = 8;
+ pattrib->icv_len = 8;
+ break;
+ case _SMS4_:
+ pattrib->iv_len = 18;
+ pattrib->icv_len = 16;
+ break;
+ default:
+ pattrib->iv_len = 0;
+ pattrib->icv_len = 0;
+ break;
+ }
+ } else {
+ pattrib->encrypt = 0;
+ pattrib->iv_len = 0;
+ pattrib->icv_len = 0;
+ }
+
+exit:
+
+
+
+ return ret;
+}
+
+static void dump_rx_pkt(struct sk_buff *skb, u16 type, int level)
+{
+ int i;
+ u8 *ptr;
+
+ if ((level == 1) ||
+ ((level == 2) && (type == IEEE80211_FTYPE_MGMT)) ||
+ ((level == 3) && (type == IEEE80211_FTYPE_DATA))) {
+
+ ptr = skb->data;
+
+ DBG_8723A("#############################\n");
+
+ for (i = 0; i < 64; i = i + 8)
+ DBG_8723A("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n",
+ *(ptr + i), *(ptr + i + 1), *(ptr + i + 2),
+ *(ptr + i + 3), *(ptr + i + 4),
+ *(ptr + i + 5), *(ptr + i + 6),
+ *(ptr + i + 7));
+ DBG_8723A("#############################\n");
+ }
+}
+
+static int validate_recv_frame(struct rtw_adapter *adapter,
+ struct recv_frame *precv_frame)
+{
+ /* shall check frame subtype, to / from ds, da, bssid */
+
+ /* then call check if rx seq/frag. duplicated. */
+ u8 type;
+ u8 subtype;
+ int retval = _SUCCESS;
+ struct rx_pkt_attrib *pattrib = & precv_frame->attrib;
+ struct sk_buff *skb = precv_frame->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 ver;
+ u8 bDumpRxPkt;
+ u16 seq_ctrl, fctl;
+
+ fctl = le16_to_cpu(hdr->frame_control);
+ ver = fctl & IEEE80211_FCTL_VERS;
+ type = fctl & IEEE80211_FCTL_FTYPE;
+ subtype = fctl & IEEE80211_FCTL_STYPE;
+
+ /* add version chk */
+ if (ver != 0) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("validate_recv_data_frame fail! (ver!= 0)\n"));
+ retval = _FAIL;
+ goto exit;
+ }
+
+ pattrib->to_fr_ds = get_tofr_ds(hdr->frame_control);
+
+ seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
+ pattrib->frag_num = seq_ctrl & IEEE80211_SCTL_FRAG;
+ pattrib->seq_num = seq_ctrl >> 4;
+
+ pattrib->pw_save = ieee80211_has_pm(hdr->frame_control);
+ pattrib->mfrag = ieee80211_has_morefrags(hdr->frame_control);
+ pattrib->mdata = ieee80211_has_moredata(hdr->frame_control);
+ pattrib->privacy = ieee80211_has_protected(hdr->frame_control);
+ pattrib->order = ieee80211_has_order(hdr->frame_control);
+
+ rtw_hal_get_def_var23a(adapter, HAL_DEF_DBG_DUMP_RXPKT, &bDumpRxPkt);
+
+ if (unlikely(bDumpRxPkt == 1))
+ dump_rx_pkt(skb, type, bDumpRxPkt);
+
+ switch (type)
+ {
+ case IEEE80211_FTYPE_MGMT:
+ retval = validate_recv_mgnt_frame(adapter, precv_frame);
+ if (retval == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("validate_recv_mgnt_frame fail\n"));
+ }
+ retval = _FAIL; /* only data frame return _SUCCESS */
+ break;
+ case IEEE80211_FTYPE_CTL:
+ retval = validate_recv_ctrl_frame(adapter, precv_frame);
+ if (retval == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("validate_recv_ctrl_frame fail\n"));
+ }
+ retval = _FAIL; /* only data frame return _SUCCESS */
+ break;
+ case IEEE80211_FTYPE_DATA:
+ rtw_led_control(adapter, LED_CTL_RX);
+ pattrib->qos = (subtype & IEEE80211_STYPE_QOS_DATA) ? 1 : 0;
+ retval = validate_recv_data_frame(adapter, precv_frame);
+ if (retval == _FAIL) {
+ struct recv_priv *precvpriv = &adapter->recvpriv;
+ /* RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_data_frame fail\n")); */
+ precvpriv->rx_drop++;
+ }
+ break;
+ default:
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("validate_recv_data_frame fail! type = 0x%x\n", type));
+ retval = _FAIL;
+ break;
+ }
+
+exit:
+ return retval;
+}
+
+/* remove the wlanhdr and add the eth_hdr */
+
+static int wlanhdr_to_ethhdr (struct recv_frame *precvframe)
+{
+ u16 eth_type, len, hdrlen;
+ u8 bsnaphdr;
+ u8 *psnap;
+
+ int ret = _SUCCESS;
+ struct rtw_adapter *adapter = precvframe->adapter;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ struct sk_buff *skb = precvframe->pkt;
+ u8 *ptr;
+ struct rx_pkt_attrib *pattrib = &precvframe->attrib;
+
+
+
+ ptr = skb->data;
+ hdrlen = pattrib->hdrlen;
+ psnap = ptr + hdrlen;
+ eth_type = (psnap[6] << 8) | psnap[7];
+ /* convert hdr + possible LLC headers into Ethernet header */
+ /* eth_type = (psnap_type[0] << 8) | psnap_type[1]; */
+ if ((ether_addr_equal(psnap, rfc1042_header) &&
+ eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
+ ether_addr_equal(psnap, bridge_tunnel_header)) {
+ /* remove RFC1042 or Bridge-Tunnel encapsulation
+ and replace EtherType */
+ bsnaphdr = true;
+ hdrlen += SNAP_SIZE;
+ } else {
+ /* Leave Ethernet header part of hdr and full payload */
+ bsnaphdr = false;
+ eth_type = (psnap[0] << 8) | psnap[1];
+ }
+
+ len = skb->len - hdrlen;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n === pattrib->hdrlen: %x, pattrib->iv_len:%x ===\n\n",
+ pattrib->hdrlen, pattrib->iv_len));
+
+ pattrib->eth_type = eth_type;
+ if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)) {
+ ptr += hdrlen;
+ *ptr = 0x87;
+ *(ptr + 1) = 0x12;
+
+ eth_type = 0x8712;
+ /* append rx status for mp test packets */
+
+ ptr = skb_pull(skb, (hdrlen - sizeof(struct ethhdr) + 2) - 24);
+ memcpy(ptr, skb->head, 24);
+ ptr += 24;
+ } else {
+ ptr = skb_pull(skb, (hdrlen - sizeof(struct ethhdr) +
+ (bsnaphdr ? 2:0)));
+ }
+
+ ether_addr_copy(ptr, pattrib->dst);
+ ether_addr_copy(ptr + ETH_ALEN, pattrib->src);
+
+ if (!bsnaphdr) {
+ len = htons(len);
+ memcpy(ptr + 12, &len, 2);
+ }
+
+
+ return ret;
+}
+
+/* perform defrag */
+struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter,
+ struct rtw_queue *defrag_q);
+struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter,
+ struct rtw_queue *defrag_q)
+{
+ struct list_head *plist, *phead, *ptmp;
+ u8 *data, wlanhdr_offset;
+ u8 curfragnum;
+ struct recv_frame *pnfhdr;
+ struct recv_frame *prframe, *pnextrframe;
+ struct rtw_queue *pfree_recv_queue;
+ struct sk_buff *skb;
+
+
+
+ curfragnum = 0;
+ pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
+
+ phead = get_list_head(defrag_q);
+ plist = phead->next;
+ prframe = container_of(plist, struct recv_frame, list);
+ list_del_init(&prframe->list);
+ skb = prframe->pkt;
+
+ if (curfragnum != prframe->attrib.frag_num) {
+ /* the first fragment number must be 0 */
+ /* free the whole queue */
+ rtw_free_recvframe23a(prframe, pfree_recv_queue);
+ rtw_free_recvframe23a_queue(defrag_q, pfree_recv_queue);
+
+ return NULL;
+ }
+
+ curfragnum++;
+
+ phead = get_list_head(defrag_q);
+
+ data = prframe->pkt->data;
+
+ list_for_each_safe(plist, ptmp, phead) {
+ pnfhdr = container_of(plist, struct recv_frame, list);
+ pnextrframe = (struct recv_frame *)pnfhdr;
+ /* check the fragment sequence (2nd ~n fragment frame) */
+
+ if (curfragnum != pnfhdr->attrib.frag_num) {
+ /* the fragment number must be increasing
+ (after decache) */
+ /* release the defrag_q & prframe */
+ rtw_free_recvframe23a(prframe, pfree_recv_queue);
+ rtw_free_recvframe23a_queue(defrag_q, pfree_recv_queue);
+ return NULL;
+ }
+
+ curfragnum++;
+
+ /* copy the 2nd~n fragment frame's payload to the
+ first fragment */
+ /* get the 2nd~last fragment frame's payload */
+
+ wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
+
+ skb_pull(pnfhdr->pkt, wlanhdr_offset);
+
+ /* append to first fragment frame's tail
+ (if privacy frame, pull the ICV) */
+
+ skb_trim(skb, skb->len - prframe->attrib.icv_len);
+
+ memcpy(skb_tail_pointer(skb), pnfhdr->pkt->data,
+ pnfhdr->pkt->len);
+
+ skb_put(skb, pnfhdr->pkt->len);
+
+ prframe->attrib.icv_len = pnfhdr->attrib.icv_len;
+ };
+
+ /* free the defrag_q queue and return the prframe */
+ rtw_free_recvframe23a_queue(defrag_q, pfree_recv_queue);
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("Performance defrag!!!!!\n"));
+
+
+
+ return prframe;
+}
+
+/* check if need to defrag, if needed queue the frame to defrag_q */
+struct recv_frame* recvframe_chk_defrag23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ u8 ismfrag;
+ u8 fragnum;
+ u8 *psta_addr;
+ struct recv_frame *pfhdr;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv;
+ struct list_head *phead;
+ struct recv_frame *prtnframe = NULL;
+ struct rtw_queue *pfree_recv_queue, *pdefrag_q;
+
+
+
+ pstapriv = &padapter->stapriv;
+
+ pfhdr = precv_frame;
+
+ pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ /* need to define struct of wlan header frame ctrl */
+ ismfrag = pfhdr->attrib.mfrag;
+ fragnum = pfhdr->attrib.frag_num;
+
+ psta_addr = pfhdr->attrib.ta;
+ psta = rtw_get_stainfo23a(pstapriv, psta_addr);
+ if (!psta) {
+ struct ieee80211_hdr *hdr =
+ (struct ieee80211_hdr *) pfhdr->pkt->data;
+ if (!ieee80211_is_data(hdr->frame_control)) {
+ psta = rtw_get_bcmc_stainfo23a(padapter);
+ pdefrag_q = &psta->sta_recvpriv.defrag_q;
+ } else
+ pdefrag_q = NULL;
+ } else
+ pdefrag_q = &psta->sta_recvpriv.defrag_q;
+
+ if ((ismfrag == 0) && (fragnum == 0)) {
+ prtnframe = precv_frame;/* isn't a fragment frame */
+ }
+
+ if (ismfrag == 1) {
+ /* 0~(n-1) fragment frame */
+ /* enqueue to defraf_g */
+ if (pdefrag_q != NULL) {
+ if (fragnum == 0) {
+ /* the first fragment */
+ if (_rtw_queue_empty23a(pdefrag_q) == false) {
+ /* free current defrag_q */
+ rtw_free_recvframe23a_queue(pdefrag_q, pfree_recv_queue);
+ }
+ }
+
+ /* Then enqueue the 0~(n-1) fragment into the
+ defrag_q */
+
+ /* spin_lock(&pdefrag_q->lock); */
+ phead = get_list_head(pdefrag_q);
+ list_add_tail(&pfhdr->list, phead);
+ /* spin_unlock(&pdefrag_q->lock); */
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("Enqueuq: ismfrag = %d, fragnum = %d\n",
+ ismfrag, fragnum));
+
+ prtnframe = NULL;
+
+ } else {
+ /* can't find this ta's defrag_queue,
+ so free this recv_frame */
+ rtw_free_recvframe23a(precv_frame, pfree_recv_queue);
+ prtnframe = NULL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("Free because pdefrag_q == NULL: ismfrag = "
+ "%d, fragnum = %d\n", ismfrag, fragnum));
+ }
+ }
+
+ if ((ismfrag == 0) && (fragnum != 0)) {
+ /* the last fragment frame */
+ /* enqueue the last fragment */
+ if (pdefrag_q != NULL) {
+ /* spin_lock(&pdefrag_q->lock); */
+ phead = get_list_head(pdefrag_q);
+ list_add_tail(&pfhdr->list, phead);
+ /* spin_unlock(&pdefrag_q->lock); */
+
+ /* call recvframe_defrag to defrag */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("defrag: ismfrag = %d, fragnum = %d\n",
+ ismfrag, fragnum));
+ precv_frame = recvframe_defrag(padapter, pdefrag_q);
+ prtnframe = precv_frame;
+ } else {
+ /* can't find this ta's defrag_queue,
+ so free this recv_frame */
+ rtw_free_recvframe23a(precv_frame, pfree_recv_queue);
+ prtnframe = NULL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("Free because pdefrag_q == NULL: ismfrag = "
+ "%d, fragnum = %d\n", ismfrag, fragnum));
+ }
+
+ }
+
+ if ((prtnframe != NULL) && (prtnframe->attrib.privacy)) {
+ /* after defrag we must check tkip mic code */
+ if (recvframe_chkmic(padapter, prtnframe) == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("recvframe_chkmic(padapter, prtnframe) =="
+ "_FAIL\n"));
+ rtw_free_recvframe23a(prtnframe, pfree_recv_queue);
+ prtnframe = NULL;
+ }
+ }
+
+
+
+ return prtnframe;
+}
+
+int amsdu_to_msdu(struct rtw_adapter *padapter, struct recv_frame *prframe);
+int amsdu_to_msdu(struct rtw_adapter *padapter, struct recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib;
+ struct sk_buff *skb, *sub_skb;
+ struct sk_buff_head skb_list;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct rtw_queue *pfree_recv_queue = &precvpriv->free_recv_queue;
+
+ pattrib = &prframe->attrib;
+
+ skb = prframe->pkt;
+ skb_pull(skb, prframe->attrib.hdrlen);
+ __skb_queue_head_init(&skb_list);
+
+ ieee80211_amsdu_to_8023s(skb, &skb_list, NULL, 0, 0, false);
+
+ while (!skb_queue_empty(&skb_list)) {
+ sub_skb = __skb_dequeue(&skb_list);
+
+ sub_skb->protocol = eth_type_trans(sub_skb, padapter->pnetdev);
+ sub_skb->dev = padapter->pnetdev;
+
+ sub_skb->ip_summed = CHECKSUM_NONE;
+
+ netif_rx(sub_skb);
+ }
+
+ prframe->pkt = NULL;
+ rtw_free_recvframe23a(prframe, pfree_recv_queue);
+ return _SUCCESS;
+}
+
+int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num);
+int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num)
+{
+ u8 wsize = preorder_ctrl->wsize_b;
+ u16 wend = (preorder_ctrl->indicate_seq + wsize -1) & 0xFFF;
+
+ /* Rx Reorder initialize condition. */
+ if (preorder_ctrl->indicate_seq == 0xFFFF)
+ preorder_ctrl->indicate_seq = seq_num;
+
+ /* Drop out the packet which SeqNum is smaller than WinStart */
+ if (SN_LESS(seq_num, preorder_ctrl->indicate_seq))
+ return false;
+
+ /* */
+ /* Sliding window manipulation. Conditions includes: */
+ /* 1. Incoming SeqNum is equal to WinStart =>Window shift 1 */
+ /* 2. Incoming SeqNum is larger than the WinEnd => Window shift N */
+ /* */
+ if (SN_EQUAL(seq_num, preorder_ctrl->indicate_seq)) {
+ preorder_ctrl->indicate_seq =
+ (preorder_ctrl->indicate_seq + 1) & 0xFFF;
+ } else if (SN_LESS(wend, seq_num)) {
+ /* boundary situation, when seq_num cross 0xFFF */
+ if (seq_num >= (wsize - 1))
+ preorder_ctrl->indicate_seq = seq_num + 1 -wsize;
+ else
+ preorder_ctrl->indicate_seq = 0xFFF - (wsize - (seq_num + 1)) + 1;
+ }
+ return true;
+}
+
+int enqueue_reorder_recvframe23a(struct recv_reorder_ctrl *preorder_ctrl,
+ struct recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib = &prframe->attrib;
+ struct rtw_queue *ppending_recvframe_queue;
+ struct list_head *phead, *plist, *ptmp;
+ struct recv_frame *hdr;
+ struct rx_pkt_attrib *pnextattrib;
+
+ ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+ /* DbgPrint("+enqueue_reorder_recvframe23a()\n"); */
+
+ /* spin_lock_irqsave(&ppending_recvframe_queue->lock); */
+ /* spin_lock_ex(&ppending_recvframe_queue->lock); */
+
+ phead = get_list_head(ppending_recvframe_queue);
+
+ list_for_each_safe(plist, ptmp, phead) {
+ hdr = container_of(plist, struct recv_frame, list);
+ pnextattrib = &hdr->attrib;
+
+ if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num)) {
+ continue;
+ } else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num)) {
+ /* Duplicate entry is found!! Do not insert current entry. */
+ /* RT_TRACE(COMP_RX_REORDER, DBG_TRACE, ("InsertRxReorderList(): Duplicate packet is dropped!! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum)); */
+
+ /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */
+ return false;
+ } else {
+ break;
+ }
+
+ /* DbgPrint("enqueue_reorder_recvframe23a():while\n"); */
+ }
+
+ /* spin_lock_irqsave(&ppending_recvframe_queue->lock); */
+ /* spin_lock_ex(&ppending_recvframe_queue->lock); */
+
+ list_del_init(&prframe->list);
+
+ list_add_tail(&prframe->list, plist);
+
+ /* spin_unlock_ex(&ppending_recvframe_queue->lock); */
+ /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */
+
+ /* RT_TRACE(COMP_RX_REORDER, DBG_TRACE, ("InsertRxReorderList(): Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum)); */
+ return true;
+}
+
+int recv_indicatepkts_in_order(struct rtw_adapter *padapter,
+ struct recv_reorder_ctrl *preorder_ctrl,
+ int bforced);
+int recv_indicatepkts_in_order(struct rtw_adapter *padapter,
+ struct recv_reorder_ctrl *preorder_ctrl,
+ int bforced)
+{
+ /* u8 bcancelled; */
+ struct list_head *phead, *plist;
+ struct recv_frame *prframe;
+ struct rx_pkt_attrib *pattrib;
+ /* u8 index = 0; */
+ int bPktInBuf = false;
+ struct recv_priv *precvpriv;
+ struct rtw_queue *ppending_recvframe_queue;
+
+ precvpriv = &padapter->recvpriv;
+ ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+ /* DbgPrint("+recv_indicatepkts_in_order\n"); */
+
+ /* spin_lock_irqsave(&ppending_recvframe_queue->lock); */
+ /* spin_lock_ex(&ppending_recvframe_queue->lock); */
+
+ phead = get_list_head(ppending_recvframe_queue);
+ plist = phead->next;
+
+ /* Handling some condition for forced indicate case. */
+ if (bforced) {
+ if (list_empty(phead)) {
+ /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */
+ /* spin_unlock_ex(&ppending_recvframe_queue->lock); */
+ return true;
+ }
+
+ prframe = container_of(plist, struct recv_frame, list);
+ pattrib = &prframe->attrib;
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
+ }
+
+ /* Prepare indication list and indication. */
+ /* Check if there is any packet need indicate. */
+ while (!list_empty(phead)) {
+
+ prframe = container_of(plist, struct recv_frame, list);
+ pattrib = &prframe->attrib;
+
+ if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("recv_indicatepkts_in_order: indicate =%d "
+ "seq =%d amsdu =%d\n",
+ preorder_ctrl->indicate_seq,
+ pattrib->seq_num, pattrib->amsdu));
+
+ plist = plist->next;
+ list_del_init(&prframe->list);
+
+ if (SN_EQUAL(preorder_ctrl->indicate_seq,
+ pattrib->seq_num)) {
+ preorder_ctrl->indicate_seq =
+ (preorder_ctrl->indicate_seq + 1)&0xFFF;
+ }
+
+ if (!pattrib->amsdu) {
+ if ((padapter->bDriverStopped == false) &&
+ (padapter->bSurpriseRemoved == false)) {
+ rtw_recv_indicatepkt23a(padapter, prframe);
+ }
+ } else {
+ if (amsdu_to_msdu(padapter, prframe) !=
+ _SUCCESS) {
+ rtw_free_recvframe23a(prframe,
+ &precvpriv->free_recv_queue);
+ }
+ }
+
+ /* Update local variables. */
+ bPktInBuf = false;
+
+ } else {
+ bPktInBuf = true;
+ break;
+ }
+
+ /* DbgPrint("recv_indicatepkts_in_order():while\n"); */
+ }
+
+ /* spin_unlock_ex(&ppending_recvframe_queue->lock); */
+ /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */
+
+ return bPktInBuf;
+}
+
+int recv_indicatepkt_reorder(struct rtw_adapter *padapter,
+ struct recv_frame *prframe);
+int recv_indicatepkt_reorder(struct rtw_adapter *padapter,
+ struct recv_frame *prframe)
+{
+ int retval = _SUCCESS;
+ struct rx_pkt_attrib *pattrib;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct rtw_queue *ppending_recvframe_queue;
+
+ pattrib = &prframe->attrib;
+ preorder_ctrl = prframe->preorder_ctrl;
+ ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+ if (!pattrib->amsdu) {
+ /* s1. */
+ wlanhdr_to_ethhdr(prframe);
+
+ if ((pattrib->qos!= 1) || (pattrib->eth_type == 0x0806) ||
+ (pattrib->ack_policy != 0)) {
+ if ((padapter->bDriverStopped == false) &&
+ (padapter->bSurpriseRemoved == false)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("@@@@ recv_indicatepkt_reorder -"
+ "recv_func recv_indicatepkt\n"));
+
+ rtw_recv_indicatepkt23a(padapter, prframe);
+ return _SUCCESS;
+ }
+
+ return _FAIL;
+ }
+
+ if (preorder_ctrl->enable == false) {
+ /* indicate this recv_frame */
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
+ rtw_recv_indicatepkt23a(padapter, prframe);
+
+ preorder_ctrl->indicate_seq =
+ (preorder_ctrl->indicate_seq + 1) % 4096;
+ return _SUCCESS;
+ }
+ } else {
+ /* temp filter -> means didn't support A-MSDUs in a A-MPDU */
+ if (preorder_ctrl->enable == false) {
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
+ retval = amsdu_to_msdu(padapter, prframe);
+
+ preorder_ctrl->indicate_seq =
+ (preorder_ctrl->indicate_seq + 1) % 4096;
+ return retval;
+ }
+ }
+
+ spin_lock_bh(&ppending_recvframe_queue->lock);
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("recv_indicatepkt_reorder: indicate =%d seq =%d\n",
+ preorder_ctrl->indicate_seq, pattrib->seq_num));
+
+ /* s2. check if winstart_b(indicate_seq) needs to been updated */
+ if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num)) {
+ goto _err_exit;
+ }
+
+ /* s3. Insert all packet into Reorder Queue to maintain its ordering. */
+ if (!enqueue_reorder_recvframe23a(preorder_ctrl, prframe)) {
+ goto _err_exit;
+ }
+
+ /* s4. */
+ /* Indication process. */
+ /* After Packet dropping and Sliding Window shifting as above,
+ we can now just indicate the packets */
+ /* with the SeqNum smaller than latest WinStart and buffer
+ other packets. */
+ /* */
+ /* For Rx Reorder condition: */
+ /* 1. All packets with SeqNum smaller than WinStart => Indicate */
+ /* 2. All packets with SeqNum larger than or equal to WinStart =>
+ Buffer it. */
+ /* */
+
+ if (recv_indicatepkts_in_order(padapter, preorder_ctrl, false) == true) {
+ mod_timer(&preorder_ctrl->reordering_ctrl_timer,
+ jiffies + msecs_to_jiffies(REORDER_WAIT_TIME));
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
+ } else {
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
+ }
+ return _SUCCESS;
+
+_err_exit:
+
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
+ return _FAIL;
+}
+
+void rtw_reordering_ctrl_timeout_handler23a(unsigned long pcontext)
+{
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct rtw_adapter *padapter;
+ struct rtw_queue *ppending_recvframe_queue;
+
+ preorder_ctrl = (struct recv_reorder_ctrl *)pcontext;
+ padapter = preorder_ctrl->padapter;
+ ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
+ return;
+ }
+
+ /* DBG_8723A("+rtw_reordering_ctrl_timeout_handler23a() =>\n"); */
+
+ spin_lock_bh(&ppending_recvframe_queue->lock);
+
+ if (recv_indicatepkts_in_order(padapter, preorder_ctrl, true) == true) {
+ mod_timer(&preorder_ctrl->reordering_ctrl_timer,
+ jiffies + msecs_to_jiffies(REORDER_WAIT_TIME));
+ }
+
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
+}
+
+int process_recv_indicatepkts(struct rtw_adapter *padapter,
+ struct recv_frame *prframe);
+int process_recv_indicatepkts(struct rtw_adapter *padapter,
+ struct recv_frame *prframe)
+{
+ int retval = _SUCCESS;
+ /* struct recv_priv *precvpriv = &padapter->recvpriv; */
+ /* struct rx_pkt_attrib *pattrib = &prframe->attrib; */
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (phtpriv->ht_option == true) { /* B/G/N Mode */
+ /* prframe->preorder_ctrl = &precvpriv->recvreorder_ctrl[pattrib->priority]; */
+
+ /* including perform A-MPDU Rx Ordering Buffer Control */
+ if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) {
+ if ((padapter->bDriverStopped == false) &&
+ (padapter->bSurpriseRemoved == false)) {
+ retval = _FAIL;
+ return retval;
+ }
+ }
+ } else /* B/G mode */
+ {
+ retval = wlanhdr_to_ethhdr(prframe);
+ if (retval != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("wlanhdr_to_ethhdr: drop pkt\n"));
+ return retval;
+ }
+
+ if ((padapter->bDriverStopped == false) &&
+ (padapter->bSurpriseRemoved == false)) {
+ /* indicate this recv_frame */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("@@@@ process_recv_indicatepkts- "
+ "recv_func recv_indicatepkt\n"));
+ rtw_recv_indicatepkt23a(padapter, prframe);
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("@@@@ process_recv_indicatepkts- "
+ "recv_func free_indicatepkt\n"));
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("recv_func:bDriverStopped(%d) OR "
+ "bSurpriseRemoved(%d)",
+ padapter->bDriverStopped,
+ padapter->bSurpriseRemoved));
+ retval = _FAIL;
+ return retval;
+ }
+
+ }
+
+ return retval;
+}
+
+static int recv_func_prehandle(struct rtw_adapter *padapter,
+ struct recv_frame *rframe)
+{
+ struct rtw_queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+ int ret = _SUCCESS;
+
+ /* check the frame crtl field and decache */
+ ret = validate_recv_frame(padapter, rframe);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("recv_func: validate_recv_frame fail! drop pkt\n"));
+ rtw_free_recvframe23a(rframe, pfree_recv_queue);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int recv_func_posthandle(struct rtw_adapter *padapter,
+ struct recv_frame *prframe)
+{
+ int ret = _SUCCESS;
+ struct recv_frame *orig_prframe = prframe;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct rtw_queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ /* DATA FRAME */
+ rtw_led_control(padapter, LED_CTL_RX);
+
+ prframe = decryptor(padapter, prframe);
+ if (prframe == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("decryptor: drop pkt\n"));
+ ret = _FAIL;
+ goto _recv_data_drop;
+ }
+
+ prframe = recvframe_chk_defrag23a(padapter, prframe);
+ if (!prframe) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("recvframe_chk_defrag23a: drop pkt\n"));
+ goto _recv_data_drop;
+ }
+
+ /*
+ * Pull off crypto headers
+ */
+ if (prframe->attrib.iv_len > 0) {
+ skb_pull(prframe->pkt, prframe->attrib.iv_len);
+ }
+
+ if (prframe->attrib.icv_len > 0) {
+ skb_trim(prframe->pkt,
+ prframe->pkt->len - prframe->attrib.icv_len);
+ }
+
+ prframe = portctrl(padapter, prframe);
+ if (!prframe) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("portctrl: drop pkt\n"));
+ ret = _FAIL;
+ goto _recv_data_drop;
+ }
+
+ count_rx_stats(padapter, prframe, NULL);
+
+ ret = process_recv_indicatepkts(padapter, prframe);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("recv_func: process_recv_indicatepkts fail!\n"));
+ rtw_free_recvframe23a(orig_prframe, pfree_recv_queue);/* free this recv_frame */
+ goto _recv_data_drop;
+ }
+ return ret;
+
+_recv_data_drop:
+ precvpriv->rx_drop++;
+ return ret;
+}
+
+int rtw_recv_entry23a(struct recv_frame *rframe)
+{
+ int ret, r;
+ struct rtw_adapter *padapter = rframe->adapter;
+ struct rx_pkt_attrib *prxattrib = &rframe->attrib;
+ struct recv_priv *recvpriv = &padapter->recvpriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *mlmepriv = &padapter->mlmepriv;
+
+ /* check if need to handle uc_swdec_pending_queue*/
+ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
+ psecuritypriv->busetkipkey) {
+ struct recv_frame *pending_frame;
+
+ while ((pending_frame = rtw_alloc_recvframe23a(&padapter->recvpriv.uc_swdec_pending_queue))) {
+ r = recv_func_posthandle(padapter, pending_frame);
+ if (r == _SUCCESS)
+ DBG_8723A("%s: dequeue uc_swdec_pending_queue\n", __func__);
+ }
+ }
+
+ ret = recv_func_prehandle(padapter, rframe);
+
+ if (ret == _SUCCESS) {
+ /* check if need to enqueue into uc_swdec_pending_queue*/
+ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
+ !is_multicast_ether_addr(prxattrib->ra) &&
+ prxattrib->encrypt > 0 &&
+ (prxattrib->bdecrypted == 0) &&
+ !is_wep_enc(psecuritypriv->dot11PrivacyAlgrthm) &&
+ !psecuritypriv->busetkipkey) {
+ rtw_enqueue_recvframe23a(rframe, &padapter->recvpriv.uc_swdec_pending_queue);
+ DBG_8723A("%s: no key, enqueue uc_swdec_pending_queue\n", __func__);
+ goto exit;
+ }
+
+ ret = recv_func_posthandle(padapter, rframe);
+
+ recvpriv->rx_pkts++;
+ }
+
+exit:
+ return ret;
+}
+
+void rtw_signal_stat_timer_hdl23a(unsigned long data)
+{
+ struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+ struct recv_priv *recvpriv = &adapter->recvpriv;
+
+ u32 tmp_s, tmp_q;
+ u8 avg_signal_strength = 0;
+ u8 avg_signal_qual = 0;
+ u32 num_signal_strength = 0;
+ u32 num_signal_qual = 0;
+ u8 _alpha = 3; /* this value is based on converging_constant = 5000 */
+ /* and sampling_interval = 1000 */
+
+ if (adapter->recvpriv.is_signal_dbg) {
+ /* update the user specific value, signal_strength_dbg, */
+ /* to signal_strength, rssi */
+ adapter->recvpriv.signal_strength =
+ adapter->recvpriv.signal_strength_dbg;
+ adapter->recvpriv.rssi =
+ (s8)translate_percentage_to_dbm((u8)adapter->recvpriv.signal_strength_dbg);
+ } else {
+ if (recvpriv->signal_strength_data.update_req == 0) {
+ /* update_req is clear, means we got rx */
+ avg_signal_strength =
+ recvpriv->signal_strength_data.avg_val;
+ num_signal_strength =
+ recvpriv->signal_strength_data.total_num;
+ /* after avg_vals are accquired, we can re-stat */
+ /* the signal values */
+ recvpriv->signal_strength_data.update_req = 1;
+ }
+
+ if (recvpriv->signal_qual_data.update_req == 0) {
+ /* update_req is clear, means we got rx */
+ avg_signal_qual = recvpriv->signal_qual_data.avg_val;
+ num_signal_qual = recvpriv->signal_qual_data.total_num;
+ /* after avg_vals are accquired, we can re-stat */
+ /*the signal values */
+ recvpriv->signal_qual_data.update_req = 1;
+ }
+
+ /* update value of signal_strength, rssi, signal_qual */
+ if (check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY) ==
+ false) {
+ tmp_s = (avg_signal_strength + (_alpha - 1) *
+ recvpriv->signal_strength);
+ if (tmp_s %_alpha)
+ tmp_s = tmp_s / _alpha + 1;
+ else
+ tmp_s = tmp_s / _alpha;
+ if (tmp_s > 100)
+ tmp_s = 100;
+
+ tmp_q = (avg_signal_qual + (_alpha - 1) *
+ recvpriv->signal_qual);
+ if (tmp_q %_alpha)
+ tmp_q = tmp_q / _alpha + 1;
+ else
+ tmp_q = tmp_q / _alpha;
+ if (tmp_q > 100)
+ tmp_q = 100;
+
+ recvpriv->signal_strength = tmp_s;
+ recvpriv->rssi = (s8)translate_percentage_to_dbm(tmp_s);
+ recvpriv->signal_qual = tmp_q;
+
+ DBG_8723A("%s signal_strength:%3u, rssi:%3d, "
+ "signal_qual:%3u, num_signal_strength:%u, "
+ "num_signal_qual:%u\n",
+ __func__, recvpriv->signal_strength,
+ recvpriv->rssi, recvpriv->signal_qual,
+ num_signal_strength, num_signal_qual
+ );
+ }
+ }
+ rtw_set_signal_stat_timer(recvpriv);
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_security.c b/drivers/staging/rtl8723au/core/rtw_security.c
new file mode 100644
index 000000000000..fd43e71bf6d6
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_security.c
@@ -0,0 +1,1652 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_SECURITY_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <osdep_intf.h>
+
+/* WEP related ===== */
+
+#define CRC32_POLY 0x04c11db7
+
+struct arc4context
+{
+ u32 x;
+ u32 y;
+ u8 state[256];
+};
+
+static void arcfour_init(struct arc4context *parc4ctx, u8 * key, u32 key_len)
+{
+ u32 t, u;
+ u32 keyindex;
+ u32 stateindex;
+ u8 * state;
+ u32 counter;
+
+ state = parc4ctx->state;
+ parc4ctx->x = 0;
+ parc4ctx->y = 0;
+ for (counter = 0; counter < 256; counter++)
+ state[counter] = (u8)counter;
+ keyindex = 0;
+ stateindex = 0;
+ for (counter = 0; counter < 256; counter++)
+ {
+ t = state[counter];
+ stateindex = (stateindex + key[keyindex] + t) & 0xff;
+ u = state[stateindex];
+ state[stateindex] = (u8)t;
+ state[counter] = (u8)u;
+ if (++keyindex >= key_len)
+ keyindex = 0;
+ }
+
+}
+static u32 arcfour_byte( struct arc4context *parc4ctx)
+{
+ u32 x;
+ u32 y;
+ u32 sx, sy;
+ u8 * state;
+
+ state = parc4ctx->state;
+ x = (parc4ctx->x + 1) & 0xff;
+ sx = state[x];
+ y = (sx + parc4ctx->y) & 0xff;
+ sy = state[y];
+ parc4ctx->x = x;
+ parc4ctx->y = y;
+ state[y] = (u8)sx;
+ state[x] = (u8)sy;
+
+ return state[(sx + sy) & 0xff];
+}
+
+static void arcfour_encrypt( struct arc4context *parc4ctx,
+ u8 * dest,
+ u8 * src,
+ u32 len)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ dest[i] = src[i] ^ (unsigned char)arcfour_byte(parc4ctx);
+
+}
+
+static int bcrc32initialized = 0;
+static u32 crc32_table[256];
+
+static u8 crc32_reverseBit(u8 data)
+{
+ u8 retval = ((data << 7) & 0x80) | ((data << 5) & 0x40) |
+ ((data << 3) & 0x20) | ((data << 1) & 0x10) |
+ ((data >> 1) & 0x08) | ((data >> 3) & 0x04) |
+ ((data >> 5) & 0x02) | ((data >> 7) & 0x01);
+ return retval;
+}
+
+static void crc32_init(void)
+{
+
+ if (bcrc32initialized == 1)
+ return;
+ else{
+ int i, j;
+ u32 c;
+ u8 *p = (u8 *)&c, *p1;
+ u8 k;
+
+ c = 0x12340000;
+
+ for (i = 0; i < 256; ++i)
+ {
+ k = crc32_reverseBit((u8)i);
+ for (c = ((u32)k) << 24, j = 8; j > 0; --j) {
+ c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+ }
+ p1 = (u8 *)&crc32_table[i];
+
+ p1[0] = crc32_reverseBit(p[3]);
+ p1[1] = crc32_reverseBit(p[2]);
+ p1[2] = crc32_reverseBit(p[1]);
+ p1[3] = crc32_reverseBit(p[0]);
+ }
+ bcrc32initialized = 1;
+ }
+}
+
+static u32 getcrc32(u8 *buf, int len)
+{
+ u8 *p;
+ u32 crc;
+
+ if (bcrc32initialized == 0) crc32_init();
+
+ crc = 0xffffffff; /* preload shift register, per CRC-32 spec */
+
+ for (p = buf; len > 0; ++p, --len)
+ crc = crc32_table[ (crc ^ *p) & 0xff] ^ (crc >> 8);
+
+ return ~crc; /* transmit complement, per CRC-32 spec */
+}
+
+/* Need to consider the fragment situation */
+void rtw_wep_encrypt23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe)
+{
+ /* exclude ICV */
+ unsigned char crc[4];
+ struct arc4context mycontext;
+ int curfragnum, length, index;
+ u32 keylength;
+ u8 *pframe, *payload, *iv; /* wepkey */
+ u8 wepkey[16];
+ u8 hw_hdr_offset = 0;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ if (!pxmitframe->buf_addr)
+ return;
+
+ hw_hdr_offset = TXDESC_OFFSET;
+
+ pframe = pxmitframe->buf_addr + hw_hdr_offset;
+
+ /* start to encrypt each fragment */
+ if ((pattrib->encrypt != _WEP40_) && (pattrib->encrypt != _WEP104_))
+ return;
+
+ index = psecuritypriv->dot11PrivacyKeyIndex;
+ keylength = psecuritypriv->dot11DefKeylen[index];
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags ; curfragnum++) {
+ iv = pframe + pattrib->hdrlen;
+ memcpy(&wepkey[0], iv, 3);
+ memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[index].skey[0],
+ keylength);
+ payload = pframe + pattrib->iv_len + pattrib->hdrlen;
+
+ if ((curfragnum + 1) == pattrib->nr_frags) {
+ /* the last fragment */
+ length = pattrib->last_txcmdsz - pattrib->hdrlen -
+ pattrib->iv_len- pattrib->icv_len;
+
+ *((u32 *)crc) = cpu_to_le32(getcrc32(payload, length));
+
+ arcfour_init(&mycontext, wepkey, 3 + keylength);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload + length, crc, 4);
+ } else {
+ length = pxmitpriv->frag_len - pattrib->hdrlen -
+ pattrib->iv_len - pattrib->icv_len;
+ *((u32 *)crc) = cpu_to_le32(getcrc32(payload, length));
+ arcfour_init(&mycontext, wepkey, 3 + keylength);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload + length, crc, 4);
+
+ pframe += pxmitpriv->frag_len;
+ pframe = PTR_ALIGN(pframe, 4);
+ }
+ }
+
+}
+
+void rtw_wep_decrypt23a(struct rtw_adapter *padapter,
+ struct recv_frame *precvframe)
+{
+ /* exclude ICV */
+ u8 crc[4];
+ struct arc4context mycontext;
+ int length;
+ u32 keylength;
+ u8 *pframe, *payload, *iv, wepkey[16];
+ u8 keyindex;
+ struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct sk_buff * skb = precvframe->pkt;
+
+ pframe = skb->data;
+
+ /* start to decrypt recvframe */
+ if ((prxattrib->encrypt != _WEP40_) && (prxattrib->encrypt != _WEP104_))
+ return;
+
+ iv = pframe + prxattrib->hdrlen;
+ /* keyindex = (iv[3]&0x3); */
+ keyindex = prxattrib->key_index;
+ keylength = psecuritypriv->dot11DefKeylen[keyindex];
+ memcpy(&wepkey[0], iv, 3);
+ /* memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[psecuritypriv->dot11PrivacyKeyIndex].skey[0], keylength); */
+ memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[keyindex].skey[0],
+ keylength);
+ length = skb->len - prxattrib->hdrlen - prxattrib->iv_len;
+
+ payload = pframe + prxattrib->iv_len + prxattrib->hdrlen;
+
+ /* decrypt payload include icv */
+ arcfour_init(&mycontext, wepkey, 3 + keylength);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+
+ /* calculate icv and compare the icv */
+ *((u32 *)crc) = le32_to_cpu(getcrc32(payload, length - 4));
+
+ if (crc[3] != payload[length - 1] || crc[2] != payload[length - 2] ||
+ crc[1] != payload[length - 3] || crc[0] != payload[length - 4]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("rtw_wep_decrypt23a:icv error crc[3](%x)!= payload"
+ "[length-1](%x) || crc[2](%x)!= payload[length-2](%x)"
+ " || crc[1](%x)!= payload[length-3](%x) || crc[0](%x)"
+ "!= payload[length-4](%x)\n",
+ crc[3], payload[length - 1],
+ crc[2], payload[length - 2],
+ crc[1], payload[length - 3],
+ crc[0], payload[length - 4]));
+ }
+
+ return;
+}
+
+/* 3 ===== TKIP related ===== */
+
+static u32 secmicgetuint32(u8 * p)
+/* Convert from Byte[] to u32 in a portable way */
+{
+ s32 i;
+ u32 res = 0;
+
+ for (i = 0; i<4; i++)
+ {
+ res |= ((u32)(*p++)) << (8*i);
+ }
+
+ return res;
+}
+
+static void secmicputuint32(u8 * p, u32 val)
+/* Convert from long to Byte[] in a portable way */
+{
+ long i;
+
+ for (i = 0; i<4; i++)
+ {
+ *p++ = (u8) (val & 0xff);
+ val >>= 8;
+ }
+
+}
+
+static void secmicclear(struct mic_data *pmicdata)
+{
+/* Reset the state to the empty message. */
+
+ pmicdata->L = pmicdata->K0;
+ pmicdata->R = pmicdata->K1;
+ pmicdata->nBytesInM = 0;
+ pmicdata->M = 0;
+
+}
+
+void rtw_secmicsetkey23a(struct mic_data *pmicdata, u8 * key)
+{
+ /* Set the key */
+
+ pmicdata->K0 = secmicgetuint32(key);
+ pmicdata->K1 = secmicgetuint32(key + 4);
+ /* and reset the message */
+ secmicclear(pmicdata);
+
+}
+
+void rtw_secmicappend23abyte23a(struct mic_data *pmicdata, u8 b)
+{
+
+ /* Append the byte to our word-sized buffer */
+ pmicdata->M |= ((unsigned long)b) << (8*pmicdata->nBytesInM);
+ pmicdata->nBytesInM++;
+ /* Process the word if it is full. */
+ if (pmicdata->nBytesInM >= 4)
+ {
+ pmicdata->L ^= pmicdata->M;
+ pmicdata->R ^= ROL32(pmicdata->L, 17);
+ pmicdata->L += pmicdata->R;
+ pmicdata->R ^= ((pmicdata->L & 0xff00ff00) >> 8) | ((pmicdata->L & 0x00ff00ff) << 8);
+ pmicdata->L += pmicdata->R;
+ pmicdata->R ^= ROL32(pmicdata->L, 3);
+ pmicdata->L += pmicdata->R;
+ pmicdata->R ^= ROR32(pmicdata->L, 2);
+ pmicdata->L += pmicdata->R;
+ /* Clear the buffer */
+ pmicdata->M = 0;
+ pmicdata->nBytesInM = 0;
+ }
+
+}
+
+void rtw_secmicappend23a(struct mic_data *pmicdata, u8 * src, u32 nbytes)
+{
+
+ /* This is simple */
+ while(nbytes > 0)
+ {
+ rtw_secmicappend23abyte23a(pmicdata, *src++);
+ nbytes--;
+ }
+
+}
+
+void rtw_secgetmic23a(struct mic_data *pmicdata, u8 * dst)
+{
+
+ /* Append the minimum padding */
+ rtw_secmicappend23abyte23a(pmicdata, 0x5a);
+ rtw_secmicappend23abyte23a(pmicdata, 0);
+ rtw_secmicappend23abyte23a(pmicdata, 0);
+ rtw_secmicappend23abyte23a(pmicdata, 0);
+ rtw_secmicappend23abyte23a(pmicdata, 0);
+ /* and then zeroes until the length is a multiple of 4 */
+ while(pmicdata->nBytesInM != 0)
+ {
+ rtw_secmicappend23abyte23a(pmicdata, 0);
+ }
+ /* The appendByte function has already computed the result. */
+ secmicputuint32(dst, pmicdata->L);
+ secmicputuint32(dst+4, pmicdata->R);
+ /* Reset to the empty message. */
+ secmicclear(pmicdata);
+
+}
+
+void rtw_seccalctkipmic23a(u8 * key, u8 *header, u8 *data, u32 data_len, u8 *mic_code, u8 pri)
+{
+
+ struct mic_data micdata;
+ u8 priority[4]={0x0, 0x0, 0x0, 0x0};
+
+ rtw_secmicsetkey23a(&micdata, key);
+ priority[0]= pri;
+
+ /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
+ if (header[1]&1) { /* ToDS == 1 */
+ rtw_secmicappend23a(&micdata, &header[16], 6); /* DA */
+ if (header[1]&2) /* From Ds == 1 */
+ rtw_secmicappend23a(&micdata, &header[24], 6);
+ else
+ rtw_secmicappend23a(&micdata, &header[10], 6);
+ }
+ else{ /* ToDS == 0 */
+ rtw_secmicappend23a(&micdata, &header[4], 6); /* DA */
+ if (header[1]&2) /* From Ds == 1 */
+ rtw_secmicappend23a(&micdata, &header[16], 6);
+ else
+ rtw_secmicappend23a(&micdata, &header[10], 6);
+
+ }
+ rtw_secmicappend23a(&micdata, &priority[0], 4);
+
+ rtw_secmicappend23a(&micdata, data, data_len);
+
+ rtw_secgetmic23a(&micdata, mic_code);
+
+}
+
+/* macros for extraction/creation of unsigned char/unsigned short values */
+#define RotR1(v16) ((((v16) >> 1) & 0x7FFF) ^ (((v16) & 1) << 15))
+#define Lo8(v16) ((u8)((v16) & 0x00FF))
+#define Hi8(v16) ((u8)(((v16) >> 8) & 0x00FF))
+#define Lo16(v32) ((u16)((v32) & 0xFFFF))
+#define Hi16(v32) ((u16)(((v32) >>16) & 0xFFFF))
+#define Mk16(hi, lo) ((lo) ^ (((u16)(hi)) << 8))
+
+/* select the Nth 16-bit word of the temporal key unsigned char array TK[] */
+#define TK16(N) Mk16(tk[2*(N)+1], tk[2*(N)])
+
+/* S-box lookup: 16 bits --> 16 bits */
+#define _S_(v16) (Sbox1[0][Lo8(v16)] ^ Sbox1[1][Hi8(v16)])
+
+/* fixed algorithm "parameters" */
+#define PHASE1_LOOP_CNT 8 /* this needs to be "big enough" */
+#define TA_SIZE 6 /* 48-bit transmitter address */
+#define TK_SIZE 16 /* 128-bit temporal key */
+#define P1K_SIZE 10 /* 80-bit Phase1 key */
+#define RC4_KEY_SIZE 16 /* 128-bit RC4KEY (104 bits unknown) */
+
+/* 2-unsigned char by 2-unsigned char subset of the full AES S-box table */
+static const unsigned short Sbox1[2][256]= /* Sbox for hash (can be in ROM) */
+{ {
+ 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
+ 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
+ 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
+ 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
+ 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
+ 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
+ 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
+ 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
+ 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
+ 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
+ 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
+ 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
+ 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
+ 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
+ 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
+ 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
+ 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
+ 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
+ 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
+ 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
+ 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
+ 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
+ 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
+ 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
+ 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
+ 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
+ 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
+ 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
+ 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
+ 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
+ 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
+ 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
+ },
+
+ { /* second half of table is unsigned char-reversed version of first! */
+ 0xA5C6, 0x84F8, 0x99EE, 0x8DF6, 0x0DFF, 0xBDD6, 0xB1DE, 0x5491,
+ 0x5060, 0x0302, 0xA9CE, 0x7D56, 0x19E7, 0x62B5, 0xE64D, 0x9AEC,
+ 0x458F, 0x9D1F, 0x4089, 0x87FA, 0x15EF, 0xEBB2, 0xC98E, 0x0BFB,
+ 0xEC41, 0x67B3, 0xFD5F, 0xEA45, 0xBF23, 0xF753, 0x96E4, 0x5B9B,
+ 0xC275, 0x1CE1, 0xAE3D, 0x6A4C, 0x5A6C, 0x417E, 0x02F5, 0x4F83,
+ 0x5C68, 0xF451, 0x34D1, 0x08F9, 0x93E2, 0x73AB, 0x5362, 0x3F2A,
+ 0x0C08, 0x5295, 0x6546, 0x5E9D, 0x2830, 0xA137, 0x0F0A, 0xB52F,
+ 0x090E, 0x3624, 0x9B1B, 0x3DDF, 0x26CD, 0x694E, 0xCD7F, 0x9FEA,
+ 0x1B12, 0x9E1D, 0x7458, 0x2E34, 0x2D36, 0xB2DC, 0xEEB4, 0xFB5B,
+ 0xF6A4, 0x4D76, 0x61B7, 0xCE7D, 0x7B52, 0x3EDD, 0x715E, 0x9713,
+ 0xF5A6, 0x68B9, 0x0000, 0x2CC1, 0x6040, 0x1FE3, 0xC879, 0xEDB6,
+ 0xBED4, 0x468D, 0xD967, 0x4B72, 0xDE94, 0xD498, 0xE8B0, 0x4A85,
+ 0x6BBB, 0x2AC5, 0xE54F, 0x16ED, 0xC586, 0xD79A, 0x5566, 0x9411,
+ 0xCF8A, 0x10E9, 0x0604, 0x81FE, 0xF0A0, 0x4478, 0xBA25, 0xE34B,
+ 0xF3A2, 0xFE5D, 0xC080, 0x8A05, 0xAD3F, 0xBC21, 0x4870, 0x04F1,
+ 0xDF63, 0xC177, 0x75AF, 0x6342, 0x3020, 0x1AE5, 0x0EFD, 0x6DBF,
+ 0x4C81, 0x1418, 0x3526, 0x2FC3, 0xE1BE, 0xA235, 0xCC88, 0x392E,
+ 0x5793, 0xF255, 0x82FC, 0x477A, 0xACC8, 0xE7BA, 0x2B32, 0x95E6,
+ 0xA0C0, 0x9819, 0xD19E, 0x7FA3, 0x6644, 0x7E54, 0xAB3B, 0x830B,
+ 0xCA8C, 0x29C7, 0xD36B, 0x3C28, 0x79A7, 0xE2BC, 0x1D16, 0x76AD,
+ 0x3BDB, 0x5664, 0x4E74, 0x1E14, 0xDB92, 0x0A0C, 0x6C48, 0xE4B8,
+ 0x5D9F, 0x6EBD, 0xEF43, 0xA6C4, 0xA839, 0xA431, 0x37D3, 0x8BF2,
+ 0x32D5, 0x438B, 0x596E, 0xB7DA, 0x8C01, 0x64B1, 0xD29C, 0xE049,
+ 0xB4D8, 0xFAAC, 0x07F3, 0x25CF, 0xAFCA, 0x8EF4, 0xE947, 0x1810,
+ 0xD56F, 0x88F0, 0x6F4A, 0x725C, 0x2438, 0xF157, 0xC773, 0x5197,
+ 0x23CB, 0x7CA1, 0x9CE8, 0x213E, 0xDD96, 0xDC61, 0x860D, 0x850F,
+ 0x90E0, 0x427C, 0xC471, 0xAACC, 0xD890, 0x0506, 0x01F7, 0x121C,
+ 0xA3C2, 0x5F6A, 0xF9AE, 0xD069, 0x9117, 0x5899, 0x273A, 0xB927,
+ 0x38D9, 0x13EB, 0xB32B, 0x3322, 0xBBD2, 0x70A9, 0x8907, 0xA733,
+ 0xB62D, 0x223C, 0x9215, 0x20C9, 0x4987, 0xFFAA, 0x7850, 0x7AA5,
+ 0x8F03, 0xF859, 0x8009, 0x171A, 0xDA65, 0x31D7, 0xC684, 0xB8D0,
+ 0xC382, 0xB029, 0x775A, 0x111E, 0xCB7B, 0xFCA8, 0xD66D, 0x3A2C,
+ }
+};
+
+ /*
+**********************************************************************
+* Routine: Phase 1 -- generate P1K, given TA, TK, IV32
+*
+* Inputs:
+* tk[] = temporal key [128 bits]
+* ta[] = transmitter's MAC address [ 48 bits]
+* iv32 = upper 32 bits of IV [ 32 bits]
+* Output:
+* p1k[] = Phase 1 key [ 80 bits]
+*
+* Note:
+* This function only needs to be called every 2**16 packets,
+* although in theory it could be called every packet.
+*
+**********************************************************************
+*/
+static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
+{
+ int i;
+
+ /* Initialize the 80 bits of P1K[] from IV32 and TA[0..5] */
+ p1k[0] = Lo16(iv32);
+ p1k[1] = Hi16(iv32);
+ p1k[2] = Mk16(ta[1], ta[0]); /* use TA[] as little-endian */
+ p1k[3] = Mk16(ta[3], ta[2]);
+ p1k[4] = Mk16(ta[5], ta[4]);
+
+ /* Now compute an unbalanced Feistel cipher with 80-bit block */
+ /* size on the 80-bit block P1K[], using the 128-bit key TK[] */
+ for (i = 0; i < PHASE1_LOOP_CNT ;i++)
+ { /* Each add operation here is mod 2**16 */
+ p1k[0] += _S_(p1k[4] ^ TK16((i&1)+0));
+ p1k[1] += _S_(p1k[0] ^ TK16((i&1)+2));
+ p1k[2] += _S_(p1k[1] ^ TK16((i&1)+4));
+ p1k[3] += _S_(p1k[2] ^ TK16((i&1)+6));
+ p1k[4] += _S_(p1k[3] ^ TK16((i&1)+0));
+ p1k[4] += (unsigned short)i; /* avoid "slide attacks" */
+ }
+
+}
+
+/*
+**********************************************************************
+* Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
+*
+* Inputs:
+* tk[] = Temporal key [128 bits]
+* p1k[] = Phase 1 output key [ 80 bits]
+* iv16 = low 16 bits of IV counter [ 16 bits]
+* Output:
+* rc4key[] = the key used to encrypt the packet [128 bits]
+*
+* Note:
+* The value {TA, IV32, IV16} for Phase1/Phase2 must be unique
+* across all packets using the same key TK value. Then, for a
+* given value of TK[], this TKIP48 construction guarantees that
+* the final RC4KEY value is unique across all packets.
+*
+* Suggested implementation optimization: if PPK[] is "overlaid"
+* appropriately on RC4KEY[], there is no need for the final
+* for loop below that copies the PPK[] result into RC4KEY[].
+*
+**********************************************************************
+*/
+static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
+{
+ int i;
+ u16 PPK[6]; /* temporary key for mixing */
+
+ /* Note: all adds in the PPK[] equations below are mod 2**16 */
+ for (i = 0;i<5;i++) PPK[i]= p1k[i]; /* first, copy P1K to PPK */
+ PPK[5] = p1k[4] +iv16; /* next, add in IV16 */
+
+ /* Bijective non-linear mixing of the 96 bits of PPK[0..5] */
+ PPK[0] += _S_(PPK[5] ^ TK16(0)); /* Mix key in each "round" */
+ PPK[1] += _S_(PPK[0] ^ TK16(1));
+ PPK[2] += _S_(PPK[1] ^ TK16(2));
+ PPK[3] += _S_(PPK[2] ^ TK16(3));
+ PPK[4] += _S_(PPK[3] ^ TK16(4));
+ PPK[5] += _S_(PPK[4] ^ TK16(5)); /* Total # S-box lookups == 6 */
+
+ /* Final sweep: bijective, "linear". Rotates kill LSB correlations */
+ PPK[0] += RotR1(PPK[5] ^ TK16(6));
+ PPK[1] += RotR1(PPK[0] ^ TK16(7)); /* Use all of TK[] in Phase2 */
+ PPK[2] += RotR1(PPK[1]);
+ PPK[3] += RotR1(PPK[2]);
+ PPK[4] += RotR1(PPK[3]);
+ PPK[5] += RotR1(PPK[4]);
+ /* Note: At this point, for a given key TK[0..15], the 96-bit output */
+ /* value PPK[0..5] is guaranteed to be unique, as a function */
+ /* of the 96-bit "input" value {TA, IV32, IV16}. That is, P1K */
+ /* is now a keyed permutation of {TA, IV32, IV16}. */
+
+ /* Set RC4KEY[0..3], which includes "cleartext" portion of RC4 key */
+ rc4key[0] = Hi8(iv16); /* RC4KEY[0..2] is the WEP IV */
+ rc4key[1] = (Hi8(iv16) | 0x20) & 0x7F; /* Help avoid weak (FMS) keys */
+ rc4key[2] = Lo8(iv16);
+ rc4key[3] = Lo8((PPK[5] ^ TK16(0)) >> 1);
+
+ /* Copy 96 bits of PPK[0..5] to RC4KEY[4..15] (little-endian) */
+ for (i = 0;i<6;i++)
+ {
+ rc4key[4+2*i] = Lo8(PPK[i]);
+ rc4key[5+2*i] = Hi8(PPK[i]);
+ }
+
+}
+
+/* The hlen isn't include the IV */
+u32 rtw_tkip_encrypt23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe)
+{ /* exclude ICV */
+ u16 pnl;
+ u32 pnh;
+ u8 rc4key[16];
+ u8 ttkey[16];
+ u8 crc[4];
+ u8 hw_hdr_offset = 0;
+ struct arc4context mycontext;
+ int curfragnum, length;
+ u32 prwskeylen;
+
+ u8 *pframe, *payload,*iv,*prwskey;
+ union pn48 dot11txpn;
+ struct sta_info *stainfo;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u32 res = _SUCCESS;
+
+ if (!pxmitframe->buf_addr)
+ return _FAIL;
+
+ hw_hdr_offset = TXDESC_OFFSET;
+
+ pframe = pxmitframe->buf_addr + hw_hdr_offset;
+ /* 4 start to encrypt each fragment */
+ if (pattrib->encrypt == _TKIP_) {
+
+ if (pattrib->psta)
+ {
+ stainfo = pattrib->psta;
+ }
+ else
+ {
+ DBG_8723A("%s, call rtw_get_stainfo()\n", __func__);
+ stainfo = rtw_get_stainfo23a(&padapter->stapriv,
+ &pattrib->ra[0]);
+ }
+
+ if (stainfo!= NULL) {
+
+ if (!(stainfo->state &_FW_LINKED))
+ {
+ DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state);
+ return _FAIL;
+ }
+
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt23a: stainfo!= NULL!!!\n"));
+
+ if (is_multicast_ether_addr(pattrib->ra))
+ prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
+ else
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+
+ prwskeylen = 16;
+
+ for (curfragnum = 0;curfragnum<pattrib->nr_frags;curfragnum++) {
+ iv = pframe+pattrib->hdrlen;
+ payload = pframe+pattrib->iv_len+pattrib->hdrlen;
+
+ GET_TKIP_PN(iv, dot11txpn);
+
+ pnl = (u16)(dot11txpn.val);
+ pnh = (u32)(dot11txpn.val>>16);
+
+ phase1((u16 *)&ttkey[0], prwskey,&pattrib->ta[0], pnh);
+
+ phase2(&rc4key[0], prwskey, (u16 *)&ttkey[0], pnl);
+
+ if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len- pattrib->icv_len;
+ RT_TRACE(_module_rtl871x_security_c_, _drv_info_, ("pattrib->iv_len =%x, pattrib->icv_len =%x\n", pattrib->iv_len, pattrib->icv_len));
+ *((u32 *)crc) = cpu_to_le32(getcrc32(payload, length));/* modified by Amy*/
+
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+
+ }
+ else{
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len ;
+ *((u32 *)crc) = cpu_to_le32(getcrc32(payload, length));/* modified by Amy*/
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+
+ pframe+= pxmitpriv->frag_len;
+ pframe = PTR_ALIGN(pframe, 4);
+ }
+ }
+
+ }
+ else{
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt23a: stainfo == NULL!!!\n"));
+ DBG_8723A("%s, psta == NUL\n", __func__);
+ res = _FAIL;
+ }
+
+ }
+
+ return res;
+}
+
+/* The hlen isn't include the IV */
+u32 rtw_tkip_decrypt23a(struct rtw_adapter *padapter,
+ struct recv_frame *precvframe)
+{
+ /* exclude ICV */
+ u16 pnl;
+ u32 pnh;
+ u8 rc4key[16];
+ u8 ttkey[16];
+ u8 crc[4];
+ struct arc4context mycontext;
+ int length;
+ u32 prwskeylen;
+ u8 *pframe, *payload,*iv,*prwskey;
+ union pn48 dot11txpn;
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct sk_buff * skb = precvframe->pkt;
+ u32 res = _SUCCESS;
+
+ pframe = skb->data;
+
+ /* 4 start to decrypt recvframe */
+ if (prxattrib->encrypt == _TKIP_) {
+
+ stainfo = rtw_get_stainfo23a(&padapter->stapriv,
+ &prxattrib->ta[0]);
+ if (stainfo!= NULL) {
+
+ if (is_multicast_ether_addr(prxattrib->ra)) {
+ if (psecuritypriv->binstallGrpkey == false) {
+ res = _FAIL;
+ DBG_8723A("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
+ goto exit;
+ }
+ prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
+ prwskeylen = 16;
+ } else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt23a: stainfo!= NULL!!!\n"));
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+ prwskeylen = 16;
+ }
+
+ iv = pframe+prxattrib->hdrlen;
+ payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
+ length = skb->len - prxattrib->hdrlen-prxattrib->iv_len;
+
+ GET_TKIP_PN(iv, dot11txpn);
+
+ pnl = (u16)(dot11txpn.val);
+ pnh = (u32)(dot11txpn.val>>16);
+
+ phase1((u16 *)&ttkey[0], prwskey,&prxattrib->ta[0], pnh);
+ phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
+
+ /* 4 decrypt payload include icv */
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+
+ *((u32 *)crc) = le32_to_cpu(getcrc32(payload, length-4));
+
+ if (crc[3]!= payload[length-1] || crc[2]!= payload[length-2] || crc[1]!= payload[length-3] || crc[0]!= payload[length-4])
+ {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_wep_decrypt23a:icv error crc[3](%x)!= payload[length-1](%x) || crc[2](%x)!= payload[length-2](%x) || crc[1](%x)!= payload[length-3](%x) || crc[0](%x)!= payload[length-4](%x)\n",
+ crc[3], payload[length-1], crc[2], payload[length-2], crc[1], payload[length-3], crc[0], payload[length-4]));
+ res = _FAIL;
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt23a: stainfo == NULL!!!\n"));
+ res = _FAIL;
+ }
+ }
+exit:
+ return res;
+}
+
+/* 3 ===== AES related ===== */
+
+#define MAX_MSG_SIZE 2048
+/*****************************/
+/******** SBOX Table *********/
+/*****************************/
+
+static u8 sbox_table[256] = {
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
+ 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
+ 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
+ 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
+ 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
+ 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
+ 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
+ 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+ 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
+ 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
+ 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
+ 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
+ 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+};
+
+/*****************************/
+/**** Function Prototypes ****/
+/*****************************/
+
+static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists,
+ int qc_exists);
+
+static void xor_128(u8 *a, u8 *b, u8 *out)
+{
+ int i;
+
+ for (i = 0;i<16; i++)
+ out[i] = a[i] ^ b[i];
+}
+
+static void xor_32(u8 *a, u8 *b, u8 *out)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ out[i] = a[i] ^ b[i];
+}
+
+static u8 sbox(u8 a)
+{
+ return sbox_table[(int)a];
+}
+
+static void next_key(u8 *key, int round)
+{
+ u8 rcon;
+ u8 sbox_key[4];
+ u8 rcon_table[12] =
+ {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
+ 0x1b, 0x36, 0x36, 0x36
+ };
+
+ sbox_key[0] = sbox(key[13]);
+ sbox_key[1] = sbox(key[14]);
+ sbox_key[2] = sbox(key[15]);
+ sbox_key[3] = sbox(key[12]);
+
+ rcon = rcon_table[round];
+
+ xor_32(&key[0], sbox_key, &key[0]);
+ key[0] = key[0] ^ rcon;
+
+ xor_32(&key[4], &key[0], &key[4]);
+ xor_32(&key[8], &key[4], &key[8]);
+ xor_32(&key[12], &key[8], &key[12]);
+
+}
+
+static void byte_sub(u8 *in, u8 *out)
+{
+ int i;
+
+ for (i = 0; i< 16; i++)
+ {
+ out[i] = sbox(in[i]);
+ }
+
+}
+
+static void shift_row(u8 *in, u8 *out)
+{
+
+ out[0] = in[0];
+ out[1] = in[5];
+ out[2] = in[10];
+ out[3] = in[15];
+ out[4] = in[4];
+ out[5] = in[9];
+ out[6] = in[14];
+ out[7] = in[3];
+ out[8] = in[8];
+ out[9] = in[13];
+ out[10] = in[2];
+ out[11] = in[7];
+ out[12] = in[12];
+ out[13] = in[1];
+ out[14] = in[6];
+ out[15] = in[11];
+
+}
+
+static void mix_column(u8 *in, u8 *out)
+{
+ int i;
+ u8 add1b[4];
+ u8 add1bf7[4];
+ u8 rotl[4];
+ u8 swap_halfs[4];
+ u8 andf7[4];
+ u8 rotr[4];
+ u8 temp[4];
+ u8 tempb[4];
+
+ for (i = 0 ; i<4; i++)
+ {
+ if ((in[i] & 0x80) == 0x80)
+ add1b[i] = 0x1b;
+ else
+ add1b[i] = 0x00;
+ }
+
+ swap_halfs[0] = in[2]; /* Swap halfs */
+ swap_halfs[1] = in[3];
+ swap_halfs[2] = in[0];
+ swap_halfs[3] = in[1];
+
+ rotl[0] = in[3]; /* Rotate left 8 bits */
+ rotl[1] = in[0];
+ rotl[2] = in[1];
+ rotl[3] = in[2];
+
+ andf7[0] = in[0] & 0x7f;
+ andf7[1] = in[1] & 0x7f;
+ andf7[2] = in[2] & 0x7f;
+ andf7[3] = in[3] & 0x7f;
+
+ for (i = 3; i>0; i--) /* logical shift left 1 bit */
+ {
+ andf7[i] = andf7[i] << 1;
+ if ((andf7[i-1] & 0x80) == 0x80)
+ {
+ andf7[i] = (andf7[i] | 0x01);
+ }
+ }
+ andf7[0] = andf7[0] << 1;
+ andf7[0] = andf7[0] & 0xfe;
+
+ xor_32(add1b, andf7, add1bf7);
+
+ xor_32(in, add1bf7, rotr);
+
+ temp[0] = rotr[0]; /* Rotate right 8 bits */
+ rotr[0] = rotr[1];
+ rotr[1] = rotr[2];
+ rotr[2] = rotr[3];
+ rotr[3] = temp[0];
+
+ xor_32(add1bf7, rotr, temp);
+ xor_32(swap_halfs, rotl, tempb);
+ xor_32(temp, tempb, out);
+
+}
+
+static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
+{
+ int round;
+ int i;
+ u8 intermediatea[16];
+ u8 intermediateb[16];
+ u8 round_key[16];
+
+ for (i = 0; i<16; i++) round_key[i] = key[i];
+
+ for (round = 0; round < 11; round++)
+ {
+ if (round == 0)
+ {
+ xor_128(round_key, data, ciphertext);
+ next_key(round_key, round);
+ }
+ else if (round == 10)
+ {
+ byte_sub(ciphertext, intermediatea);
+ shift_row(intermediatea, intermediateb);
+ xor_128(intermediateb, round_key, ciphertext);
+ }
+ else /* 1 - 9 */
+ {
+ byte_sub(ciphertext, intermediatea);
+ shift_row(intermediatea, intermediateb);
+ mix_column(&intermediateb[0], &intermediatea[0]);
+ mix_column(&intermediateb[4], &intermediatea[4]);
+ mix_column(&intermediateb[8], &intermediatea[8]);
+ mix_column(&intermediateb[12], &intermediatea[12]);
+ xor_128(intermediatea, round_key, ciphertext);
+ next_key(round_key, round);
+ }
+ }
+
+}
+
+/************************************************/
+/* construct_mic_iv() */
+/* Builds the MIC IV from header fields and PN */
+/************************************************/
+static void construct_mic_iv(u8 *mic_iv, int qc_exists, int a4_exists, u8 *mpdu,
+ uint payload_length, u8 *pn_vector)
+{
+ int i;
+
+ mic_iv[0] = 0x59;
+ if (qc_exists && a4_exists)
+ mic_iv[1] = mpdu[30] & 0x0f; /* QoS_TC */
+ if (qc_exists && !a4_exists)
+ mic_iv[1] = mpdu[24] & 0x0f; /* mute bits 7-4 */
+ if (!qc_exists)
+ mic_iv[1] = 0x00;
+ for (i = 2; i < 8; i++)
+ mic_iv[i] = mpdu[i + 8]; /* mic_iv[2:7] = A2[0:5] = mpdu[10:15] */
+ for (i = 8; i < 14; i++)
+ mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
+ mic_iv[14] = (unsigned char)(payload_length / 256);
+ mic_iv[15] = (unsigned char)(payload_length % 256);
+}
+
+/************************************************/
+/* construct_mic_header1() */
+/* Builds the first MIC header block from */
+/* header fields. */
+/************************************************/
+static void construct_mic_header1(u8 *mic_header1, int header_length, u8 *mpdu)
+{
+ mic_header1[0] = (u8)((header_length - 2) / 256);
+ mic_header1[1] = (u8)((header_length - 2) % 256);
+ mic_header1[2] = mpdu[0] & 0xcf; /* Mute CF poll & CF ack bits */
+ mic_header1[3] = mpdu[1] & 0xc7; /* Mute retry, more data and pwr mgt bits */
+ mic_header1[4] = mpdu[4]; /* A1 */
+ mic_header1[5] = mpdu[5];
+ mic_header1[6] = mpdu[6];
+ mic_header1[7] = mpdu[7];
+ mic_header1[8] = mpdu[8];
+ mic_header1[9] = mpdu[9];
+ mic_header1[10] = mpdu[10]; /* A2 */
+ mic_header1[11] = mpdu[11];
+ mic_header1[12] = mpdu[12];
+ mic_header1[13] = mpdu[13];
+ mic_header1[14] = mpdu[14];
+ mic_header1[15] = mpdu[15];
+
+}
+
+/************************************************/
+ /* construct_mic_header2() */
+/* Builds the last MIC header block from */
+/* header fields. */
+/************************************************/
+static void construct_mic_header2(
+ u8 *mic_header2,
+ u8 *mpdu,
+ int a4_exists,
+ int qc_exists
+ )
+{
+ int i;
+
+ for (i = 0; i<16; i++) mic_header2[i]= 0x00;
+
+ mic_header2[0] = mpdu[16]; /* A3 */
+ mic_header2[1] = mpdu[17];
+ mic_header2[2] = mpdu[18];
+ mic_header2[3] = mpdu[19];
+ mic_header2[4] = mpdu[20];
+ mic_header2[5] = mpdu[21];
+
+ mic_header2[6] = 0x00;
+ mic_header2[7] = 0x00; /* mpdu[23]; */
+
+ if (!qc_exists && a4_exists)
+ {
+ for (i = 0;i<6;i++) mic_header2[8+i] = mpdu[24+i]; /* A4 */
+
+ }
+
+ if (qc_exists && !a4_exists)
+ {
+ mic_header2[8] = mpdu[24] & 0x0f; /* mute bits 15 - 4 */
+ mic_header2[9] = mpdu[25] & 0x00;
+ }
+
+ if (qc_exists && a4_exists)
+ {
+ for (i = 0;i<6;i++) mic_header2[8+i] = mpdu[24+i]; /* A4 */
+
+ mic_header2[14] = mpdu[30] & 0x0f;
+ mic_header2[15] = mpdu[31] & 0x00;
+ }
+
+}
+
+/************************************************/
+/* construct_mic_header2() */
+/* Builds the last MIC header block from */
+/* header fields. */
+/************************************************/
+static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists,
+ u8 *mpdu, u8 *pn_vector, int c)
+{
+ int i = 0;
+
+ for (i = 0; i<16; i++) ctr_preload[i] = 0x00;
+ i = 0;
+
+ ctr_preload[0] = 0x01; /* flag */
+ if (qc_exists && a4_exists)
+ ctr_preload[1] = mpdu[30] & 0x0f; /* QoC_Control */
+ if (qc_exists && !a4_exists)
+ ctr_preload[1] = mpdu[24] & 0x0f;
+
+ for (i = 2; i < 8; i++)
+ ctr_preload[i] = mpdu[i + 8]; /* ctr_preload[2:7] = A2[0:5] = mpdu[10:15] */
+ for (i = 8; i < 14; i++)
+ ctr_preload[i] = pn_vector[13 - i]; /* ctr_preload[8:13] = PN[5:0] */
+ ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */
+ ctr_preload[15] = (unsigned char) (c % 256);
+
+}
+
+/************************************/
+/* bitwise_xor() */
+/* A 128 bit, bitwise exclusive or */
+/************************************/
+static void bitwise_xor(u8 *ina, u8 *inb, u8 *out)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ out[i] = ina[i] ^ inb[i];
+}
+
+static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
+{
+ uint qc_exists, a4_exists, i, j, payload_remainder,
+ num_blocks, payload_index;
+ u8 pn_vector[6];
+ u8 mic_iv[16];
+ u8 mic_header1[16];
+ u8 mic_header2[16];
+ u8 ctr_preload[16];
+ /* Intermediate Buffers */
+ u8 chain_buffer[16];
+ u8 aes_out[16];
+ u8 padded_buffer[16];
+ u8 mic[8];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)pframe;
+ u16 frsubtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
+
+ memset((void *)mic_iv, 0, 16);
+ memset((void *)mic_header1, 0, 16);
+ memset((void *)mic_header2, 0, 16);
+ memset((void *)ctr_preload, 0, 16);
+ memset((void *)chain_buffer, 0, 16);
+ memset((void *)aes_out, 0, 16);
+ memset((void *)padded_buffer, 0, 16);
+
+ if ((hdrlen == sizeof(struct ieee80211_hdr_3addr) ||
+ (hdrlen == sizeof(struct ieee80211_qos_hdr))))
+ a4_exists = 0;
+ else
+ a4_exists = 1;
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ if ((frsubtype == IEEE80211_STYPE_DATA_CFACK) ||
+ (frsubtype == IEEE80211_STYPE_DATA_CFPOLL) ||
+ (frsubtype == IEEE80211_STYPE_DATA_CFACKPOLL)) {
+ qc_exists = 1;
+ if (hdrlen != sizeof(struct ieee80211_qos_hdr))
+ hdrlen += 2;
+ } else if ((frsubtype == IEEE80211_STYPE_QOS_DATA) ||
+ (frsubtype == IEEE80211_STYPE_QOS_DATA_CFACK) ||
+ (frsubtype == IEEE80211_STYPE_QOS_DATA_CFPOLL) ||
+ (frsubtype == IEEE80211_STYPE_QOS_DATA_CFACKPOLL)) {
+ if (hdrlen != sizeof(struct ieee80211_qos_hdr))
+ hdrlen += 2;
+ qc_exists = 1;
+ } else {
+ qc_exists = 0;
+ }
+ } else {
+ qc_exists = 0;
+ }
+ pn_vector[0]= pframe[hdrlen];
+ pn_vector[1]= pframe[hdrlen+1];
+ pn_vector[2]= pframe[hdrlen+4];
+ pn_vector[3]= pframe[hdrlen+5];
+ pn_vector[4]= pframe[hdrlen+6];
+ pn_vector[5]= pframe[hdrlen+7];
+
+ construct_mic_iv(mic_iv, qc_exists, a4_exists, pframe, plen, pn_vector);
+
+ construct_mic_header1(mic_header1, hdrlen, pframe);
+ construct_mic_header2(mic_header2, pframe, a4_exists, qc_exists);
+
+ payload_remainder = plen % 16;
+ num_blocks = plen / 16;
+
+ /* Find start of payload */
+ payload_index = (hdrlen + 8);
+
+ /* Calculate MIC */
+ aes128k128d(key, mic_iv, aes_out);
+ bitwise_xor(aes_out, mic_header1, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ bitwise_xor(aes_out, mic_header2, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+
+ for (i = 0; i < num_blocks; i++) {
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
+
+ payload_index += 16;
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ /* Add on the final payload block if it needs padding */
+ if (payload_remainder > 0) {
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = pframe[payload_index++];
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ for (j = 0; j < 8; j++)
+ mic[j] = aes_out[j];
+
+ /* Insert MIC into payload */
+ for (j = 0; j < 8; j++)
+ pframe[payload_index+j] = mic[j];
+
+ payload_index = hdrlen + 8;
+ for (i = 0; i < num_blocks; i++) {
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists,
+ pframe, pn_vector, i+1);
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
+ for (j = 0; j < 16; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+
+ if (payload_remainder > 0) {
+ /* If there is a short final block, then pad it,
+ * encrypt it and copy the unpadded part back
+ */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe,
+ pn_vector, num_blocks+1);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = pframe[payload_index+j];
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < payload_remainder;j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+
+ /* Encrypt the MIC */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe,
+ pn_vector, 0);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < 8; j++)
+ padded_buffer[j] = pframe[j+hdrlen+8+plen];
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < 8;j++)
+ pframe[payload_index++] = chain_buffer[j];
+
+ return _SUCCESS;
+}
+
+u32 rtw_aes_encrypt23a(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe)
+{ /* exclude ICV */
+ /* Intermediate Buffers */
+ int curfragnum, length;
+ u32 prwskeylen;
+ u8 *pframe, *prwskey; /* *payload,*iv */
+ u8 hw_hdr_offset = 0;
+ struct sta_info *stainfo;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u32 res = _SUCCESS;
+
+ if (!pxmitframe->buf_addr)
+ return _FAIL;
+
+ hw_hdr_offset = TXDESC_OFFSET;
+
+ pframe = pxmitframe->buf_addr + hw_hdr_offset;
+
+ /* 4 start to encrypt each fragment */
+ if (pattrib->encrypt != _AES_)
+ return _FAIL;
+
+ if (pattrib->psta) {
+ stainfo = pattrib->psta;
+ } else {
+ DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
+ stainfo = rtw_get_stainfo23a(&padapter->stapriv, &pattrib->ra[0]);
+ }
+
+ if (!stainfo) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("rtw_aes_encrypt23a: stainfo == NULL!!!\n"));
+ DBG_8723A("%s, psta == NUL\n", __func__);
+ res = _FAIL;
+ goto out;
+ }
+ if (!(stainfo->state &_FW_LINKED)) {
+ DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n",
+ __func__, stainfo->state);
+ return _FAIL;
+ }
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("rtw_aes_encrypt23a: stainfo!= NULL!!!\n"));
+
+ if (is_multicast_ether_addr(pattrib->ra))
+ prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
+ else
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+
+ prwskeylen = 16;
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ /* 4 the last fragment */
+ if ((curfragnum + 1) == pattrib->nr_frags) {
+ length = pattrib->last_txcmdsz -
+ pattrib->hdrlen-pattrib->iv_len -
+ pattrib->icv_len;
+
+ aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
+ } else {
+ length = pxmitpriv->frag_len-pattrib->hdrlen -
+ pattrib->iv_len - pattrib->icv_len;
+
+ aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
+ pframe += pxmitpriv->frag_len;
+ pframe = PTR_ALIGN(pframe, 4);
+ }
+ }
+out:
+ return res;
+}
+
+static int aes_decipher(u8 *key, uint hdrlen,
+ u8 *pframe, uint plen)
+{
+ static u8 message[MAX_MSG_SIZE];
+ uint qc_exists, a4_exists, i, j, payload_remainder,
+ num_blocks, payload_index;
+ int res = _SUCCESS;
+ u8 pn_vector[6];
+ u8 mic_iv[16];
+ u8 mic_header1[16];
+ u8 mic_header2[16];
+ u8 ctr_preload[16];
+ /* Intermediate Buffers */
+ u8 chain_buffer[16];
+ u8 aes_out[16];
+ u8 padded_buffer[16];
+ u8 mic[8];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)pframe;
+ u16 frsubtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
+
+ memset((void *)mic_iv, 0, 16);
+ memset((void *)mic_header1, 0, 16);
+ memset((void *)mic_header2, 0, 16);
+ memset((void *)ctr_preload, 0, 16);
+ memset((void *)chain_buffer, 0, 16);
+ memset((void *)aes_out, 0, 16);
+ memset((void *)padded_buffer, 0, 16);
+
+ /* start to decrypt the payload */
+
+ num_blocks = (plen-8) / 16; /* plen including llc, payload_length and mic) */
+
+ payload_remainder = (plen-8) % 16;
+
+ pn_vector[0] = pframe[hdrlen];
+ pn_vector[1] = pframe[hdrlen+1];
+ pn_vector[2] = pframe[hdrlen+4];
+ pn_vector[3] = pframe[hdrlen+5];
+ pn_vector[4] = pframe[hdrlen+6];
+ pn_vector[5] = pframe[hdrlen+7];
+
+ if ((hdrlen == sizeof(struct ieee80211_hdr_3addr) ||
+ (hdrlen == sizeof(struct ieee80211_qos_hdr))))
+ a4_exists = 0;
+ else
+ a4_exists = 1;
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ if ((frsubtype == IEEE80211_STYPE_DATA_CFACK) ||
+ (frsubtype == IEEE80211_STYPE_DATA_CFPOLL) ||
+ (frsubtype == IEEE80211_STYPE_DATA_CFACKPOLL)) {
+ qc_exists = 1;
+ if (hdrlen != sizeof(struct ieee80211_hdr_3addr))
+ hdrlen += 2;
+ } else if ((frsubtype == IEEE80211_STYPE_QOS_DATA) ||
+ (frsubtype == IEEE80211_STYPE_QOS_DATA_CFACK) ||
+ (frsubtype == IEEE80211_STYPE_QOS_DATA_CFPOLL) ||
+ (frsubtype == IEEE80211_STYPE_QOS_DATA_CFACKPOLL)) {
+ if (hdrlen != sizeof(struct ieee80211_hdr_3addr))
+ hdrlen += 2;
+ qc_exists = 1;
+ } else {
+ qc_exists = 0;
+ }
+ } else {
+ qc_exists = 0;
+ }
+
+ /* now, decrypt pframe with hdrlen offset and plen long */
+
+ payload_index = hdrlen + 8; /* 8 is for extiv */
+
+ for (i = 0; i < num_blocks; i++) {
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists,
+ pframe, pn_vector, i+1);
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
+
+ for (j = 0; j < 16; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+
+ if (payload_remainder > 0) {
+ /* If there is a short final block, then pad it,
+ * encrypt it and copy the unpadded part back
+ */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe,
+ pn_vector, num_blocks+1);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = pframe[payload_index+j];
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < payload_remainder; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+
+ /* start to calculate the mic */
+ if ((hdrlen +plen+8) <= MAX_MSG_SIZE)
+ memcpy(message, pframe, (hdrlen+plen+8)); /* 8 is for ext iv len */
+
+ pn_vector[0] = pframe[hdrlen];
+ pn_vector[1] = pframe[hdrlen+1];
+ pn_vector[2] = pframe[hdrlen+4];
+ pn_vector[3] = pframe[hdrlen+5];
+ pn_vector[4] = pframe[hdrlen+6];
+ pn_vector[5] = pframe[hdrlen+7];
+
+ construct_mic_iv(mic_iv, qc_exists, a4_exists, message,
+ plen-8, pn_vector);
+
+ construct_mic_header1(mic_header1, hdrlen, message);
+ construct_mic_header2(mic_header2, message, a4_exists, qc_exists);
+
+ payload_remainder = (plen-8) % 16;
+ num_blocks = (plen-8) / 16;
+
+ /* Find start of payload */
+ payload_index = (hdrlen + 8);
+
+ /* Calculate MIC */
+ aes128k128d(key, mic_iv, aes_out);
+ bitwise_xor(aes_out, mic_header1, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ bitwise_xor(aes_out, mic_header2, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+
+ for (i = 0; i < num_blocks; i++) {
+ bitwise_xor(aes_out, &message[payload_index], chain_buffer);
+
+ payload_index += 16;
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ /* Add on the final payload block if it needs padding */
+ if (payload_remainder > 0) {
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = message[payload_index++];
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ for (j = 0 ; j < 8; j++)
+ mic[j] = aes_out[j];
+
+ /* Insert MIC into payload */
+ for (j = 0; j < 8; j++)
+ message[payload_index+j] = mic[j];
+
+ payload_index = hdrlen + 8;
+ for (i = 0; i< num_blocks; i++) {
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists,
+ message, pn_vector, i+1);
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &message[payload_index], chain_buffer);
+ for (j = 0; j < 16; j++)
+ message[payload_index++] = chain_buffer[j];
+ }
+
+ if (payload_remainder > 0) {
+ /* If there is a short final block, then pad it,
+ * encrypt it and copy the unpadded part back
+ */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists,
+ message, pn_vector, num_blocks+1);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = message[payload_index+j];
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < payload_remainder; j++)
+ message[payload_index++] = chain_buffer[j];
+ }
+
+ /* Encrypt the MIC */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message,
+ pn_vector, 0);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < 8; j++)
+ padded_buffer[j] = message[j+hdrlen+8+plen-8];
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < 8; j++)
+ message[payload_index++] = chain_buffer[j];
+
+ /* compare the mic */
+ for (i = 0; i < 8; i++) {
+ if (pframe[hdrlen+8+plen-8+i] != message[hdrlen+8+plen-8+i]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("aes_decipher:mic check error mic[%d]: pframe(%x) != message(%x)\n",
+ i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]));
+ DBG_8723A("aes_decipher:mic check error mic[%d]: pframe(%x) != message(%x)\n",
+ i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]);
+ res = _FAIL;
+ }
+ }
+ return res;
+}
+
+u32 rtw_aes_decrypt23a(struct rtw_adapter *padapter, struct recv_frame *precvframe)
+{ /* exclude ICV */
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct sk_buff *skb = precvframe->pkt;
+ int length;
+ u8 *pframe, *prwskey; /* *payload,*iv */
+ u32 res = _SUCCESS;
+
+ pframe = skb->data;
+ /* 4 start to encrypt each fragment */
+ if (prxattrib->encrypt != _AES_)
+ return _FAIL;
+
+ stainfo = rtw_get_stainfo23a(&padapter->stapriv, &prxattrib->ta[0]);
+ if (!stainfo) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("rtw_aes_encrypt23a: stainfo == NULL!!!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("rtw_aes_decrypt23a: stainfo!= NULL!!!\n"));
+
+ if (is_multicast_ether_addr(prxattrib->ra)) {
+ /* in concurrent we should use sw decrypt in group key,
+ so we remove this message */
+ if (!psecuritypriv->binstallGrpkey) {
+ res = _FAIL;
+ DBG_8723A("%s:rx bc/mc packets, but didn't install "
+ "group key!!!!!!!!!!\n", __func__);
+ goto exit;
+ }
+ prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
+ if (psecuritypriv->dot118021XGrpKeyid != prxattrib->key_index) {
+ DBG_8723A("not match packet_index =%d, install_index ="
+ "%d\n", prxattrib->key_index,
+ psecuritypriv->dot118021XGrpKeyid);
+ res = _FAIL;
+ goto exit;
+ }
+ } else {
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+ }
+
+ length = skb->len - prxattrib->hdrlen - prxattrib->iv_len;
+
+ res = aes_decipher(prwskey, prxattrib->hdrlen, pframe, length);
+exit:
+ return res;
+}
+
+void rtw_use_tkipkey_handler23a(void *FunctionContext)
+{
+ struct rtw_adapter *padapter = (struct rtw_adapter *)FunctionContext;
+
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("^^^rtw_use_tkipkey_handler23a ^^^\n"));
+ padapter->securitypriv.busetkipkey = true;
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("^^^rtw_use_tkipkey_handler23a padapter->securitypriv.busetkipkey =%d^^^\n",
+ padapter->securitypriv.busetkipkey));
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_sreset.c b/drivers/staging/rtl8723au/core/rtw_sreset.c
new file mode 100644
index 000000000000..4f7459203390
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_sreset.c
@@ -0,0 +1,255 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include<rtw_sreset.h>
+
+void sreset_init_value23a(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+
+ mutex_init(&psrtpriv->silentreset_mutex);
+ psrtpriv->silent_reset_inprogress = false;
+ psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
+ psrtpriv->last_tx_time = 0;
+ psrtpriv->last_tx_complete_time = 0;
+}
+void sreset_reset_value23a(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+
+ psrtpriv->silent_reset_inprogress = false;
+ psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
+ psrtpriv->last_tx_time = 0;
+ psrtpriv->last_tx_complete_time = 0;
+}
+
+u8 sreset_get_wifi_status23a(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+ u8 status = WIFI_STATUS_SUCCESS;
+ u32 val32 = 0;
+
+ if (psrtpriv->silent_reset_inprogress)
+ return status;
+ val32 = rtw_read32(padapter, REG_TXDMA_STATUS);
+ if (val32 == 0xeaeaeaea) {
+ psrtpriv->Wifi_Error_Status = WIFI_IF_NOT_EXIST;
+ } else if (val32 != 0) {
+ DBG_8723A("txdmastatu(%x)\n", val32);
+ psrtpriv->Wifi_Error_Status = WIFI_MAC_TXDMA_ERROR;
+ }
+
+ if (WIFI_STATUS_SUCCESS != psrtpriv->Wifi_Error_Status) {
+ DBG_8723A("==>%s error_status(0x%x)\n", __func__, psrtpriv->Wifi_Error_Status);
+ status = (psrtpriv->Wifi_Error_Status &~(USB_READ_PORT_FAIL|USB_WRITE_PORT_FAIL));
+ }
+ DBG_8723A("==> %s wifi_status(0x%x)\n", __func__, status);
+
+ /* status restore */
+ psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
+
+ return status;
+}
+
+void sreset_set_wifi_error_status23a(struct rtw_adapter *padapter, u32 status)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->srestpriv.Wifi_Error_Status = status;
+}
+
+void sreset_set_trigger_point(struct rtw_adapter *padapter, s32 tgp)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->srestpriv.dbg_trigger_point = tgp;
+}
+
+bool sreset_inprogress(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ return pHalData->srestpriv.silent_reset_inprogress;
+}
+
+static void sreset_restore_security_station(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *mlmepriv = &padapter->mlmepriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta;
+ struct mlme_ext_info *pmlmeinfo = &padapter->mlmeextpriv.mlmext_info;
+ u8 val8;
+
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_8021X)
+ val8 = 0xcc;
+ else
+ val8 = 0xcf;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ if ((padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_) ||
+ (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
+ psta = rtw_get_stainfo23a(pstapriv, get_bssid(mlmepriv));
+ if (psta == NULL) {
+ /* DEBUG_ERR(("Set wpa_set_encryption: Obtain Sta_info fail\n")); */
+ } else {
+ /* pairwise key */
+ rtw_setstakey_cmd23a(padapter, (unsigned char *)psta, true);
+ /* group key */
+ rtw_set_key23a(padapter,&padapter->securitypriv, padapter->securitypriv.dot118021XGrpKeyid, 0);
+ }
+ }
+}
+
+static void sreset_restore_network_station(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *mlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u8 threshold;
+
+ rtw_setopmode_cmd23a(padapter, Ndis802_11Infrastructure);
+
+ /* TH = 1 => means that invalidate usb rx aggregation */
+ /* TH = 0 => means that validate usb rx aggregation, use init value. */
+ if (mlmepriv->htpriv.ht_option) {
+ if (padapter->registrypriv.wifi_spec == 1)
+ threshold = 1;
+ else
+ threshold = 0;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
+ } else {
+ threshold = 1;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
+ }
+
+ set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ /* disable dynamic functions, such as high power, DIG */
+ /* Switch_DM_Func23a(padapter, DYNAMIC_FUNC_DISABLE, false); */
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BSSID, pmlmeinfo->network.MacAddress);
+
+ {
+ u8 join_type = 0;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ }
+
+ Set_MSR23a(padapter, (pmlmeinfo->state & 0x3));
+
+ mlmeext_joinbss_event_callback23a(padapter, 1);
+ /* restore Sequence No. */
+ rtw_write8(padapter, 0x4dc, padapter->xmitpriv.nqos_ssn);
+
+ sreset_restore_security_station(padapter);
+}
+
+static void sreset_restore_network_status(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *mlmepriv = &padapter->mlmepriv;
+
+ if (check_fwstate(mlmepriv, WIFI_STATION_STATE)) {
+ DBG_8723A(FUNC_ADPT_FMT" fwstate:0x%08x - WIFI_STATION_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(mlmepriv));
+ sreset_restore_network_station(padapter);
+#ifdef CONFIG_8723AU_AP_MODE
+ } else if (check_fwstate(mlmepriv, WIFI_AP_STATE)) {
+ DBG_8723A(FUNC_ADPT_FMT" fwstate:0x%08x - WIFI_AP_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(mlmepriv));
+ rtw_ap_restore_network(padapter);
+#endif
+ } else if (check_fwstate(mlmepriv, WIFI_ADHOC_STATE)) {
+ DBG_8723A(FUNC_ADPT_FMT" fwstate:0x%08x - WIFI_ADHOC_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(mlmepriv));
+ } else {
+ DBG_8723A(FUNC_ADPT_FMT" fwstate:0x%08x - ???\n", FUNC_ADPT_ARG(padapter), get_fwstate(mlmepriv));
+ }
+}
+
+static void sreset_stop_adapter(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ if (padapter == NULL)
+ return;
+
+ DBG_8723A(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+
+ if (!rtw_netif_queue_stopped(padapter->pnetdev))
+ netif_tx_stop_all_queues(padapter->pnetdev);
+
+ rtw_cancel_all_timer23a(padapter);
+
+ /* TODO: OS and HCI independent */
+ tasklet_kill(&pxmitpriv->xmit_tasklet);
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
+ rtw_scan_abort23a(padapter);
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
+ rtw23a_join_to_handler((unsigned long)padapter);
+}
+
+static void sreset_start_adapter(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ if (padapter == NULL)
+ return;
+
+ DBG_8723A(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ sreset_restore_network_status(padapter);
+ }
+
+ /* TODO: OS and HCI independent */
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
+
+ mod_timer(&padapter->mlmepriv.dynamic_chk_timer,
+ jiffies + msecs_to_jiffies(2000));
+
+ if (rtw_netif_queue_stopped(padapter->pnetdev))
+ netif_tx_wake_all_queues(padapter->pnetdev);
+}
+
+void sreset_reset(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ unsigned long start = jiffies;
+
+ DBG_8723A("%s\n", __func__);
+
+ psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
+
+ mutex_lock(&psrtpriv->silentreset_mutex);
+ psrtpriv->silent_reset_inprogress = true;
+ pwrpriv->change_rfpwrstate = rf_off;
+
+ sreset_stop_adapter(padapter);
+
+ ips_enter23a(padapter);
+ ips_leave23a(padapter);
+
+ sreset_start_adapter(padapter);
+ psrtpriv->silent_reset_inprogress = false;
+ mutex_unlock(&psrtpriv->silentreset_mutex);
+
+ DBG_8723A("%s done in %d ms\n", __func__,
+ jiffies_to_msecs(jiffies - start));
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_sta_mgt.c b/drivers/staging/rtl8723au/core/rtw_sta_mgt.c
new file mode 100644
index 000000000000..451b58f47287
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_sta_mgt.c
@@ -0,0 +1,509 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_STA_MGT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <xmit_osdep.h>
+#include <mlme_osdep.h>
+#include <sta_info.h>
+
+void _rtw_init_stainfo(struct sta_info *psta)
+{
+ memset((u8 *)psta, 0, sizeof (struct sta_info));
+ spin_lock_init(&psta->lock);
+ INIT_LIST_HEAD(&psta->list);
+ INIT_LIST_HEAD(&psta->hash_list);
+ _rtw_init_queue23a(&psta->sleep_q);
+ psta->sleepq_len = 0;
+ _rtw_init_sta_xmit_priv23a(&psta->sta_xmitpriv);
+ _rtw_init_sta_recv_priv23a(&psta->sta_recvpriv);
+#ifdef CONFIG_8723AU_AP_MODE
+ INIT_LIST_HEAD(&psta->asoc_list);
+ INIT_LIST_HEAD(&psta->auth_list);
+ psta->expire_to = 0;
+ psta->flags = 0;
+ psta->capability = 0;
+ psta->bpairwise_key_installed = false;
+ psta->nonerp_set = 0;
+ psta->no_short_slot_time_set = 0;
+ psta->no_short_preamble_set = 0;
+ psta->no_ht_gf_set = 0;
+ psta->no_ht_set = 0;
+ psta->ht_20mhz_set = 0;
+ psta->keep_alive_trycnt = 0;
+#endif /* CONFIG_8723AU_AP_MODE */
+}
+
+u32 _rtw_init_sta_priv23a(struct sta_priv *pstapriv)
+{
+ struct sta_info *psta;
+ s32 i;
+
+ pstapriv->pallocated_stainfo_buf = rtw_zvmalloc(sizeof(struct sta_info) * NUM_STA+ 4);
+
+ if (!pstapriv->pallocated_stainfo_buf)
+ return _FAIL;
+
+ pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 -
+ ((unsigned long)(pstapriv->pallocated_stainfo_buf) & 3);
+ _rtw_init_queue23a(&pstapriv->free_sta_queue);
+ spin_lock_init(&pstapriv->sta_hash_lock);
+ pstapriv->asoc_sta_count = 0;
+ _rtw_init_queue23a(&pstapriv->sleep_q);
+ _rtw_init_queue23a(&pstapriv->wakeup_q);
+ psta = (struct sta_info *)(pstapriv->pstainfo_buf);
+
+ for (i = 0; i < NUM_STA; i++) {
+ _rtw_init_stainfo(psta);
+ INIT_LIST_HEAD(&pstapriv->sta_hash[i]);
+ list_add_tail(&psta->list, get_list_head(&pstapriv->free_sta_queue));
+ psta++;
+ }
+#ifdef CONFIG_8723AU_AP_MODE
+ pstapriv->sta_dz_bitmap = 0;
+ pstapriv->tim_bitmap = 0;
+ INIT_LIST_HEAD(&pstapriv->asoc_list);
+ INIT_LIST_HEAD(&pstapriv->auth_list);
+ spin_lock_init(&pstapriv->asoc_list_lock);
+ spin_lock_init(&pstapriv->auth_list_lock);
+ pstapriv->asoc_list_cnt = 0;
+ pstapriv->auth_list_cnt = 0;
+ pstapriv->auth_to = 3; /* 3*2 = 6 sec */
+ pstapriv->assoc_to = 3;
+ /* pstapriv->expire_to = 900; 900*2 = 1800 sec = 30 min, expire after no any traffic. */
+ /* pstapriv->expire_to = 30; 30*2 = 60 sec = 1 min, expire after no any traffic. */
+ pstapriv->expire_to = 3; /* 3*2 = 6 sec */
+ pstapriv->max_num_sta = NUM_STA;
+#endif
+ return _SUCCESS;
+}
+
+inline int rtw_stainfo_offset23a(struct sta_priv *stapriv, struct sta_info *sta)
+{
+ int offset = (((u8 *)sta) - stapriv->pstainfo_buf)/sizeof(struct sta_info);
+
+ if (!stainfo_offset_valid(offset))
+ DBG_8723A("%s invalid offset(%d), out of range!!!", __func__, offset);
+ return offset;
+}
+
+inline struct sta_info *rtw_get_stainfo23a_by_offset23a(struct sta_priv *stapriv, int offset)
+{
+ if (!stainfo_offset_valid(offset))
+ DBG_8723A("%s invalid offset(%d), out of range!!!", __func__, offset);
+ return (struct sta_info *)(stapriv->pstainfo_buf + offset * sizeof(struct sta_info));
+}
+
+/* this function is used to free the memory of lock || sema for all stainfos */
+void rtw_mfree_all_stainfo(struct sta_priv *pstapriv)
+{
+ struct list_head *plist, *phead;
+ struct sta_info *psta;
+
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+
+ phead = get_list_head(&pstapriv->free_sta_queue);
+
+ /* we really achieve a lot in this loop .... */
+ list_for_each(plist, phead)
+ psta = container_of(plist, struct sta_info, list);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+}
+
+void rtw_mfree_sta_priv_lock(struct sta_priv *pstapriv)
+{
+ rtw_mfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */
+}
+
+u32 _rtw_free_sta_priv23a(struct sta_priv *pstapriv)
+{
+ struct list_head *phead, *plist, *ptmp;
+ struct sta_info *psta;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ int index;
+
+ if (pstapriv) {
+ /* delete all reordering_ctrl_timer */
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ for (index = 0; index < NUM_STA; index++) {
+ phead = &pstapriv->sta_hash[index];
+
+ list_for_each_safe(plist, ptmp, phead) {
+ int i;
+ psta = container_of(plist, struct sta_info,
+ hash_list);
+ for (i = 0; i < 16 ; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
+ }
+ }
+ }
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+ /*===============================*/
+
+ rtw_mfree_sta_priv_lock(pstapriv);
+
+ if (pstapriv->pallocated_stainfo_buf)
+ rtw_vmfree(pstapriv->pallocated_stainfo_buf, sizeof(struct sta_info)*NUM_STA+4);
+ }
+ return _SUCCESS;
+}
+
+struct sta_info *rtw_alloc_stainfo23a(struct sta_priv *pstapriv, u8 *hwaddr)
+{
+ struct list_head *phash_list;
+ struct sta_info *psta;
+ struct rtw_queue *pfree_sta_queue;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ uint tmp_aid;
+ s32 index;
+ int i = 0;
+ u16 wRxSeqInitialValue = 0xffff;
+
+ pfree_sta_queue = &pstapriv->free_sta_queue;
+
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+
+ if (_rtw_queue_empty23a(pfree_sta_queue)) {
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+ return NULL;
+ }
+ psta = container_of((&pfree_sta_queue->queue)->next, struct sta_info, list);
+
+ list_del_init(&psta->list);
+
+ tmp_aid = psta->aid;
+
+ _rtw_init_stainfo(psta);
+
+ psta->padapter = pstapriv->padapter;
+
+ memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
+
+ index = wifi_mac_hash(hwaddr);
+
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
+ ("rtw_alloc_stainfo23a: index = %x", index));
+ if (index >= NUM_STA) {
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
+ ("ERROR => rtw_alloc_stainfo23a: index >= NUM_STA"));
+ psta = NULL;
+ goto exit;
+ }
+ phash_list = &pstapriv->sta_hash[index];
+
+ list_add_tail(&psta->hash_list, phash_list);
+
+ pstapriv->asoc_sta_count ++ ;
+
+/* For the SMC router, the sequence number of first packet of WPS handshake will be 0. */
+/* In this case, this packet will be dropped by recv_decache function if we use the 0x00 as the default value for tid_rxseq variable. */
+/* So, we initialize the tid_rxseq variable as the 0xffff. */
+
+ for (i = 0; i < 16; i++)
+ memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i], &wRxSeqInitialValue, 2);
+
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
+ ("alloc number_%d stainfo with hwaddr = %pM\n",
+ pstapriv->asoc_sta_count, hwaddr));
+
+ init_addba_retry_timer23a(psta);
+
+ /* for A-MPDU Rx reordering buffer control */
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+
+ preorder_ctrl->padapter = pstapriv->padapter;
+
+ preorder_ctrl->enable = false;
+
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->wend_b = 0xffff;
+ /* preorder_ctrl->wsize_b = (NR_RECVBUFF-2); */
+ preorder_ctrl->wsize_b = 64;/* 64; */
+
+ _rtw_init_queue23a(&preorder_ctrl->pending_recvframe_queue);
+
+ rtw_init_recv_timer23a(preorder_ctrl);
+ }
+ /* init for DM */
+ psta->rssi_stat.UndecoratedSmoothedPWDB = (-1);
+ psta->rssi_stat.UndecoratedSmoothedCCK = (-1);
+
+ /* init for the sequence number of received management frame */
+ psta->RxMgmtFrameSeqNum = 0xffff;
+exit:
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+ return psta;
+}
+
+/* using pstapriv->sta_hash_lock to protect */
+u32 rtw_free_stainfo23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ struct rtw_queue *pfree_sta_queue;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct sta_xmit_priv *pstaxmitpriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct hw_xmit *phwxmit;
+ int i;
+
+ if (psta == NULL)
+ goto exit;
+
+ spin_lock_bh(&psta->lock);
+ psta->state &= ~_FW_LINKED;
+ spin_unlock_bh(&psta->lock);
+
+ pfree_sta_queue = &pstapriv->free_sta_queue;
+
+ pstaxmitpriv = &psta->sta_xmitpriv;
+
+ spin_lock_bh(&pxmitpriv->lock);
+
+ rtw_free_xmitframe_queue23a(pxmitpriv, &psta->sleep_q);
+ psta->sleepq_len = 0;
+
+ /* vo */
+ rtw_free_xmitframe_queue23a(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
+ list_del_init(&pstaxmitpriv->vo_q.tx_pending);
+ phwxmit = pxmitpriv->hwxmits;
+ phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt;
+ pstaxmitpriv->vo_q.qcnt = 0;
+
+ /* vi */
+ rtw_free_xmitframe_queue23a(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
+ list_del_init(&pstaxmitpriv->vi_q.tx_pending);
+ phwxmit = pxmitpriv->hwxmits+1;
+ phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt;
+ pstaxmitpriv->vi_q.qcnt = 0;
+
+ /* be */
+ rtw_free_xmitframe_queue23a(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
+ phwxmit = pxmitpriv->hwxmits+2;
+ phwxmit->accnt -= pstaxmitpriv->be_q.qcnt;
+ pstaxmitpriv->be_q.qcnt = 0;
+
+ /* bk */
+ rtw_free_xmitframe_queue23a(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
+ list_del_init(&pstaxmitpriv->bk_q.tx_pending);
+ phwxmit = pxmitpriv->hwxmits+3;
+ phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt;
+ pstaxmitpriv->bk_q.qcnt = 0;
+
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ list_del_init(&psta->hash_list);
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("\n free number_%d stainfo with hwaddr = 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x \n", pstapriv->asoc_sta_count, psta->hwaddr[0], psta->hwaddr[1], psta->hwaddr[2], psta->hwaddr[3], psta->hwaddr[4], psta->hwaddr[5]));
+ pstapriv->asoc_sta_count --;
+
+ /* re-init sta_info; 20061114 will be init in alloc_stainfo */
+ /* _rtw_init_sta_xmit_priv23a(&psta->sta_xmitpriv); */
+ /* _rtw_init_sta_recv_priv23a(&psta->sta_recvpriv); */
+
+ del_timer_sync(&psta->addba_retry_timer);
+
+ /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
+ for (i = 0; i < 16; i++) {
+ struct list_head *phead, *plist;
+ struct recv_frame *prframe;
+ struct rtw_queue *ppending_recvframe_queue;
+ struct rtw_queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
+
+ ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+ spin_lock_bh(&ppending_recvframe_queue->lock);
+ phead = get_list_head(ppending_recvframe_queue);
+ plist = phead->next;
+
+ while (!list_empty(phead)) {
+ prframe = container_of(plist, struct recv_frame, list);
+ plist = plist->next;
+ list_del_init(&prframe->list);
+ rtw_free_recvframe23a(prframe, pfree_recv_queue);
+ }
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
+ }
+ if (!(psta->state & WIFI_AP_STATE))
+ rtw_hal_set_odm_var23a(padapter, HAL_ODM_STA_INFO, psta, false);
+#ifdef CONFIG_8723AU_AP_MODE
+ spin_lock_bh(&pstapriv->auth_list_lock);
+ if (!list_empty(&psta->auth_list)) {
+ list_del_init(&psta->auth_list);
+ pstapriv->auth_list_cnt--;
+ }
+ spin_unlock_bh(&pstapriv->auth_list_lock);
+
+ psta->expire_to = 0;
+
+ psta->sleepq_ac_len = 0;
+ psta->qos_info = 0;
+
+ psta->max_sp_len = 0;
+ psta->uapsd_bk = 0;
+ psta->uapsd_be = 0;
+ psta->uapsd_vi = 0;
+ psta->uapsd_vo = 0;
+
+ psta->has_legacy_ac = 0;
+
+ pstapriv->sta_dz_bitmap &= ~CHKBIT(psta->aid);
+ pstapriv->tim_bitmap &= ~CHKBIT(psta->aid);
+
+ if ((psta->aid >0) && (pstapriv->sta_aid[psta->aid - 1] == psta)) {
+ pstapriv->sta_aid[psta->aid - 1] = NULL;
+ psta->aid = 0;
+ }
+#endif /* CONFIG_8723AU_AP_MODE */
+ list_add_tail(&psta->list, get_list_head(pfree_sta_queue));
+exit:
+ return _SUCCESS;
+}
+
+/* free all stainfo which in sta_hash[all] */
+void rtw_free_all_stainfo23a(struct rtw_adapter *padapter)
+{
+ struct list_head *plist, *phead, *ptmp;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info* pbcmc_stainfo = rtw_get_bcmc_stainfo23a(padapter);
+ s32 index; if (pstapriv->asoc_sta_count == 1)
+ return;
+
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+
+ for (index = 0; index < NUM_STA; index++) {
+ phead = &pstapriv->sta_hash[index];
+
+ list_for_each_safe(plist, ptmp, phead) {
+ psta = container_of(plist, struct sta_info, hash_list);
+
+ if (pbcmc_stainfo!= psta)
+ rtw_free_stainfo23a(padapter, psta);
+ }
+ }
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+}
+
+/* any station allocated can be searched by hash list */
+struct sta_info *rtw_get_stainfo23a(struct sta_priv *pstapriv, u8 *hwaddr)
+{
+ struct list_head *plist, *phead;
+ struct sta_info *psta = NULL;
+ u32 index;
+ u8 *addr;
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ if (hwaddr == NULL)
+ return NULL;
+
+ if (is_multicast_ether_addr(hwaddr))
+ addr = bc_addr;
+ else
+ addr = hwaddr;
+
+ index = wifi_mac_hash(addr);
+
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+
+ phead = &pstapriv->sta_hash[index];
+
+ list_for_each(plist, phead) {
+ psta = container_of(plist, struct sta_info, hash_list);
+
+ if (!memcmp(psta->hwaddr, addr, ETH_ALEN)) {
+ /* if found the matched address */
+ break;
+ }
+ psta = NULL;
+ }
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+ return psta;
+}
+
+u32 rtw_init_bcmc_stainfo23a(struct rtw_adapter* padapter)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta;
+ struct tx_servq *ptxservq;
+ u32 res = _SUCCESS;
+ unsigned char bcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ psta = rtw_alloc_stainfo23a(pstapriv, bcast_addr);
+ if (psta == NULL) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
+ ("rtw_alloc_stainfo23a fail"));
+ return res;
+ }
+ /* default broadcast & multicast use macid 1 */
+ psta->mac_id = 1;
+
+ ptxservq = &psta->sta_xmitpriv.be_q;
+ return _SUCCESS;
+}
+
+struct sta_info *rtw_get_bcmc_stainfo23a(struct rtw_adapter *padapter)
+{
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ psta = rtw_get_stainfo23a(pstapriv, bc_addr);
+ return psta;
+}
+
+u8 rtw_access_ctrl23a(struct rtw_adapter *padapter, u8 *mac_addr)
+{
+ u8 res = true;
+#ifdef CONFIG_8723AU_AP_MODE
+ struct list_head *plist, *phead;
+ struct rtw_wlan_acl_node *paclnode;
+ u8 match = false;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ spin_lock_bh(&pacl_node_q->lock);
+ phead = get_list_head(pacl_node_q);
+
+ list_for_each(plist, phead) {
+ paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
+
+ if (!memcmp(paclnode->addr, mac_addr, ETH_ALEN)) {
+ if (paclnode->valid) {
+ match = true;
+ break;
+ }
+ }
+ }
+ spin_unlock_bh(&pacl_node_q->lock);
+
+ if (pacl_list->mode == 1)/* accept unless in deny list */
+ res = (match) ? false : true;
+ else if (pacl_list->mode == 2)/* deny unless in accept list */
+ res = (match) ? true : false;
+ else
+ res = true;
+#endif
+ return res;
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_wlan_util.c b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
new file mode 100644
index 000000000000..0dfcfbce3b52
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
@@ -0,0 +1,1760 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_WLAN_UTIL_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <linux/ieee80211.h>
+#include <wifi.h>
+
+static unsigned char ARTHEROS_OUI1[] = {0x00, 0x03, 0x7f};
+static unsigned char ARTHEROS_OUI2[] = {0x00, 0x13, 0x74};
+
+static unsigned char BROADCOM_OUI1[] = {0x00, 0x10, 0x18};
+static unsigned char BROADCOM_OUI2[] = {0x00, 0x0a, 0xf7};
+
+static unsigned char CISCO_OUI[] = {0x00, 0x40, 0x96};
+static unsigned char MARVELL_OUI[] = {0x00, 0x50, 0x43};
+static unsigned char RALINK_OUI[] = {0x00, 0x0c, 0x43};
+static unsigned char REALTEK_OUI[] = {0x00, 0xe0, 0x4c};
+static unsigned char AIRGOCAP_OUI[] = {0x00, 0x0a, 0xf5};
+static unsigned char EPIGRAM_OUI[] = {0x00, 0x90, 0x4c};
+
+unsigned char REALTEK_96B_IE23A[] = {0x00, 0xe0, 0x4c, 0x02, 0x01, 0x20};
+
+#define R2T_PHY_DELAY (0)
+
+/* define WAIT_FOR_BCN_TO_MIN (3000) */
+#define WAIT_FOR_BCN_TO_MIN (6000)
+#define WAIT_FOR_BCN_TO_MAX (20000)
+
+static u8 rtw_basic_rate_cck[4] = {
+ IEEE80211_CCK_RATE_1MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_5MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB|IEEE80211_BASIC_RATE_MASK
+};
+
+static u8 rtw_basic_rate_ofdm[3] = {
+ IEEE80211_OFDM_RATE_6MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_24MB|IEEE80211_BASIC_RATE_MASK
+};
+
+static u8 rtw_basic_rate_mix[7] = {
+ IEEE80211_CCK_RATE_1MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_5MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_6MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_24MB|IEEE80211_BASIC_RATE_MASK
+};
+
+int cckrates_included23a(unsigned char *rate, int ratelen)
+{
+ int i;
+
+ for (i = 0; i < ratelen; i++) {
+ if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
+ (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
+ return true;
+ }
+
+ return false;
+}
+
+int cckratesonly_included23a(unsigned char *rate, int ratelen)
+{
+ int i;
+
+ for (i = 0; i < ratelen; i++) {
+ if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
+ (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
+ return false;
+ }
+
+ return true;
+}
+
+unsigned char networktype_to_raid23a(unsigned char network_type)
+{
+ unsigned char raid;
+
+ switch (network_type) {
+ case WIRELESS_11B:
+ raid = RATR_INX_WIRELESS_B;
+ break;
+ case WIRELESS_11A:
+ case WIRELESS_11G:
+ raid = RATR_INX_WIRELESS_G;
+ break;
+ case WIRELESS_11BG:
+ raid = RATR_INX_WIRELESS_GB;
+ break;
+ case WIRELESS_11_24N:
+ case WIRELESS_11_5N:
+ raid = RATR_INX_WIRELESS_N;
+ break;
+ case WIRELESS_11A_5N:
+ case WIRELESS_11G_24N:
+ raid = RATR_INX_WIRELESS_NG;
+ break;
+ case WIRELESS_11BG_24N:
+ raid = RATR_INX_WIRELESS_NGB;
+ break;
+ default:
+ raid = RATR_INX_WIRELESS_GB;
+ break;
+ }
+ return raid;
+}
+
+u8 judge_network_type23a(struct rtw_adapter *padapter, unsigned char *rate, int ratelen)
+{
+ u8 network_type = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if (pmlmeext->cur_channel > 14) {
+ if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_5N;
+ network_type |= WIRELESS_11A;
+ } else {
+ if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_24N;
+
+ if ((cckratesonly_included23a(rate, ratelen)) == true)
+ network_type |= WIRELESS_11B;
+ else if ((cckrates_included23a(rate, ratelen)) == true)
+ network_type |= WIRELESS_11BG;
+ else
+ network_type |= WIRELESS_11G;
+ }
+ return network_type;
+}
+
+unsigned char ratetbl_val_2wifirate(unsigned char rate)
+{
+ unsigned char val = 0;
+
+ switch (rate & 0x7f) {
+ case 0:
+ val = IEEE80211_CCK_RATE_1MB;
+ break;
+ case 1:
+ val = IEEE80211_CCK_RATE_2MB;
+ break;
+ case 2:
+ val = IEEE80211_CCK_RATE_5MB;
+ break;
+ case 3:
+ val = IEEE80211_CCK_RATE_11MB;
+ break;
+ case 4:
+ val = IEEE80211_OFDM_RATE_6MB;
+ break;
+ case 5:
+ val = IEEE80211_OFDM_RATE_9MB;
+ break;
+ case 6:
+ val = IEEE80211_OFDM_RATE_12MB;
+ break;
+ case 7:
+ val = IEEE80211_OFDM_RATE_18MB;
+ break;
+ case 8:
+ val = IEEE80211_OFDM_RATE_24MB;
+ break;
+ case 9:
+ val = IEEE80211_OFDM_RATE_36MB;
+ break;
+ case 10:
+ val = IEEE80211_OFDM_RATE_48MB;
+ break;
+ case 11:
+ val = IEEE80211_OFDM_RATE_54MB;
+ break;
+ }
+ return val;
+}
+
+int is_basicrate(struct rtw_adapter *padapter, unsigned char rate)
+{
+ int i;
+ unsigned char val;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ for (i = 0; i < NumRates; i++) {
+ val = pmlmeext->basicrate[i];
+
+ if ((val != 0xff) && (val != 0xfe)) {
+ if (rate == ratetbl_val_2wifirate(val))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+unsigned int ratetbl2rateset(struct rtw_adapter *padapter, unsigned char *rateset)
+{
+ int i;
+ unsigned char rate;
+ unsigned int len = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ for (i = 0; i < NumRates; i++) {
+ rate = pmlmeext->datarate[i];
+
+ switch (rate) {
+ case 0xff:
+ return len;
+ case 0xfe:
+ continue;
+ default:
+ rate = ratetbl_val_2wifirate(rate);
+
+ if (is_basicrate(padapter, rate) == true)
+ rate |= IEEE80211_BASIC_RATE_MASK;
+
+ rateset[len] = rate;
+ len++;
+ break;
+ }
+ }
+ return len;
+}
+
+void get_rate_set23a(struct rtw_adapter *padapter, unsigned char *pbssrate, int *bssrate_len)
+{
+ unsigned char supportedrates[NumRates];
+
+ memset(supportedrates, 0, NumRates);
+ *bssrate_len = ratetbl2rateset(padapter, supportedrates);
+ memcpy(pbssrate, supportedrates, *bssrate_len);
+}
+
+void UpdateBrateTbl23a(struct rtw_adapter *Adapter, u8 *mBratesOS)
+{
+ u8 i;
+ u8 rate;
+
+ /* 1M, 2M, 5.5M, 11M, 6M, 12M, 24M are mandatory. */
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ rate = mBratesOS[i] & 0x7f;
+ switch (rate) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ case IEEE80211_OFDM_RATE_6MB:
+ case IEEE80211_OFDM_RATE_12MB:
+ case IEEE80211_OFDM_RATE_24MB:
+ mBratesOS[i] |= IEEE80211_BASIC_RATE_MASK;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void Update23aTblForSoftAP(u8 *bssrateset, u32 bssratelen)
+{
+ u8 i;
+ u8 rate;
+
+ for (i = 0; i < bssratelen; i++) {
+ rate = bssrateset[i] & 0x7f;
+ switch (rate) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ bssrateset[i] |= IEEE80211_BASIC_RATE_MASK;
+ break;
+ }
+ }
+}
+
+void Save_DM_Func_Flag23a(struct rtw_adapter *padapter)
+{
+ u8 bSaveFlag = true;
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&bSaveFlag));
+}
+
+void Restore_DM_Func_Flag23a(struct rtw_adapter *padapter)
+{
+ u8 bSaveFlag = false;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&bSaveFlag));
+}
+
+void Switch_DM_Func23a(struct rtw_adapter *padapter, unsigned long mode, u8 enable)
+{
+ if (enable == true)
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_DM_FUNC_SET, (u8 *)(&mode));
+ else
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_DM_FUNC_CLR, (u8 *)(&mode));
+}
+
+static void Set_NETYPE0_MSR(struct rtw_adapter *padapter, u8 type)
+{
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_MEDIA_STATUS, (u8 *)(&type));
+}
+
+void Set_MSR23a(struct rtw_adapter *padapter, u8 type)
+{
+ Set_NETYPE0_MSR(padapter, type);
+}
+
+inline u8 rtw_get_oper_ch23a(struct rtw_adapter *adapter)
+{
+ return adapter_to_dvobj(adapter)->oper_channel;
+}
+
+inline void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch)
+{
+ adapter_to_dvobj(adapter)->oper_channel = ch;
+}
+
+inline u8 rtw_get_oper_bw23a(struct rtw_adapter *adapter)
+{
+ return adapter_to_dvobj(adapter)->oper_bwmode;
+}
+
+inline void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw)
+{
+ adapter_to_dvobj(adapter)->oper_bwmode = bw;
+}
+
+inline u8 rtw_get_oper_ch23aoffset(struct rtw_adapter *adapter)
+{
+ return adapter_to_dvobj(adapter)->oper_ch_offset;
+}
+
+inline void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset)
+{
+ adapter_to_dvobj(adapter)->oper_ch_offset = offset;
+}
+
+void SelectChannel23a(struct rtw_adapter *padapter, unsigned char channel)
+{
+ mutex_lock(&adapter_to_dvobj(padapter)->setch_mutex);
+
+ /* saved channel info */
+ rtw_set_oper_ch23a(padapter, channel);
+
+ rtw_hal_set_chan23a(padapter, channel);
+
+ mutex_unlock(&adapter_to_dvobj(padapter)->setch_mutex);
+}
+
+void SetBWMode23a(struct rtw_adapter *padapter, unsigned short bwmode, unsigned char channel_offset)
+{
+ mutex_lock(&adapter_to_dvobj(padapter)->setbw_mutex);
+
+ /* saved bw info */
+ rtw_set_oper_bw23a(padapter, bwmode);
+ rtw_set_oper_ch23aoffset23a(padapter, channel_offset);
+
+ rtw_hal_set_bwmode23a(padapter, (enum ht_channel_width)bwmode,
+ channel_offset);
+
+ mutex_unlock(&adapter_to_dvobj(padapter)->setbw_mutex);
+}
+
+void set_channel_bwmode23a(struct rtw_adapter *padapter, unsigned char channel,
+ unsigned char channel_offset, unsigned short bwmode)
+{
+ u8 center_ch;
+
+ if (padapter->bNotifyChannelChange)
+ DBG_8723A("[%s] ch = %d, offset = %d, bwmode = %d\n", __func__, channel, channel_offset, bwmode);
+
+ if ((bwmode == HT_CHANNEL_WIDTH_20) ||
+ (channel_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)) {
+ /* SelectChannel23a(padapter, channel); */
+ center_ch = channel;
+ } else {
+ /* switch to the proper channel */
+ if (channel_offset == HAL_PRIME_CHNL_OFFSET_LOWER) {
+ /* SelectChannel23a(padapter, channel + 2); */
+ center_ch = channel + 2;
+ } else {
+ /* SelectChannel23a(padapter, channel - 2); */
+ center_ch = channel - 2;
+ }
+ }
+
+ /* set Channel */
+ mutex_lock(&adapter_to_dvobj(padapter)->setch_mutex);
+
+ /* saved channel/bw info */
+ rtw_set_oper_ch23a(padapter, channel);
+ rtw_set_oper_bw23a(padapter, bwmode);
+ rtw_set_oper_ch23aoffset23a(padapter, channel_offset);
+
+ rtw_hal_set_chan23a(padapter, center_ch); /* set center channel */
+
+ mutex_unlock(&adapter_to_dvobj(padapter)->setch_mutex);
+
+ SetBWMode23a(padapter, bwmode, channel_offset);
+}
+
+int get_bsstype23a(unsigned short capability)
+{
+ if (capability & BIT(0))
+ return WIFI_FW_AP_STATE;
+ else if (capability & BIT(1))
+ return WIFI_FW_ADHOC_STATE;
+ return 0;
+}
+
+inline u8 *get_my_bssid23a(struct wlan_bssid_ex *pnetwork)
+{
+ return pnetwork->MacAddress;
+}
+
+u16 get_beacon_interval23a(struct wlan_bssid_ex *bss)
+{
+ unsigned short val;
+ memcpy((unsigned char *)&val, rtw_get_beacon_interval23a_from_ie(bss->IEs), 2);
+
+ return le16_to_cpu(val);
+}
+
+int is_client_associated_to_ap23a(struct rtw_adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+
+ if (!padapter)
+ return _FAIL;
+
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE))
+ return true;
+ else
+ return _FAIL;
+}
+
+int is_client_associated_to_ibss23a(struct rtw_adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) &&
+ ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE))
+ return true;
+ else
+ return _FAIL;
+}
+
+int is_IBSS_empty23a(struct rtw_adapter *padapter)
+{
+ unsigned int i;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
+ if (pmlmeinfo->FW_sta_info[i].status == 1)
+ return _FAIL;
+ }
+
+ return true;
+}
+
+unsigned int decide_wait_for_beacon_timeout23a(unsigned int bcn_interval)
+{
+ if ((bcn_interval << 2) < WAIT_FOR_BCN_TO_MIN)
+ return WAIT_FOR_BCN_TO_MIN;
+ else if ((bcn_interval << 2) > WAIT_FOR_BCN_TO_MAX)
+ return WAIT_FOR_BCN_TO_MAX;
+ else
+ return bcn_interval << 2;
+}
+
+void CAM_empty_entry23a(struct rtw_adapter *Adapter, u8 ucIndex)
+{
+ rtw_hal_set_hwreg23a(Adapter, HW_VAR_CAM_EMPTY_ENTRY, (u8 *)(&ucIndex));
+}
+
+void invalidate_cam_all23a(struct rtw_adapter *padapter)
+{
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_CAM_INVALID_ALL, NULL);
+}
+
+void write_cam23a(struct rtw_adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key)
+{
+ unsigned int i, val, addr;
+ int j;
+ u32 cam_val[2];
+
+ addr = entry << 3;
+
+ for (j = 5; j >= 0; j--) {
+ switch (j) {
+ case 0:
+ val = (ctrl | (mac[0] << 16) | (mac[1] << 24));
+ break;
+ case 1:
+ val = (mac[2] | (mac[3] << 8) | (mac[4] << 16) | (mac[5] << 24));
+ break;
+ default:
+ i = (j - 2) << 2;
+ val = (key[i] | (key[i+1] << 8) | (key[i+2] << 16) | (key[i+3] << 24));
+ break;
+ }
+
+ cam_val[0] = val;
+ cam_val[1] = addr + (unsigned int)j;
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_CAM_WRITE, (u8 *)cam_val);
+
+ /* rtw_write32(padapter, WCAMI, val); */
+
+ /* cmd = CAM_POLLINIG | CAM_WRITE | (addr + j); */
+ /* rtw_write32(padapter, RWCAM, cmd); */
+
+ /* DBG_8723A("%s => cam write: %x, %x\n", __func__, cmd, val); */
+
+ }
+}
+
+void clear_cam_entry23a(struct rtw_adapter *padapter, u8 entry)
+{
+ unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ unsigned char null_key[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ write_cam23a(padapter, entry, 0, null_sta, null_key);
+}
+
+int allocate_fw_sta_entry23a(struct rtw_adapter *padapter)
+{
+ unsigned int mac_id;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ for (mac_id = IBSS_START_MAC_ID; mac_id < NUM_STA; mac_id++) {
+ if (pmlmeinfo->FW_sta_info[mac_id].status == 0) {
+ pmlmeinfo->FW_sta_info[mac_id].status = 1;
+ pmlmeinfo->FW_sta_info[mac_id].retry = 0;
+ break;
+ }
+ }
+
+ return mac_id;
+}
+
+void flush_all_cam_entry23a(struct rtw_adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_CAM_INVALID_ALL, NULL);
+
+ memset((u8 *)(pmlmeinfo->FW_sta_info), 0, sizeof(pmlmeinfo->FW_sta_info));
+}
+
+#if defined(CONFIG_8723AU_P2P) && defined(CONFIG_8723AU_P2P)
+int WFD_info_handler(struct rtw_adapter *padapter, struct ndis_802_11_var_ies * pIE)
+{
+ struct wifidirect_info *pwdinfo;
+ u8 wfd_ie[128] = {0x00};
+ u32 wfd_ielen = 0;
+
+ pwdinfo = &padapter->wdinfo;
+ if (rtw_get_wfd_ie((u8 *) pIE, pIE->Length, wfd_ie, &wfd_ielen)) {
+ u8 attr_content[ 10 ] = { 0x00 };
+ u32 attr_contentlen = 0;
+
+ DBG_8723A("[%s] Found WFD IE\n", __func__);
+ rtw_get_wfd_attr_content(wfd_ie, wfd_ielen, WFD_ATTR_DEVICE_INFO, attr_content, &attr_contentlen);
+ if (attr_contentlen) {
+ pwdinfo->wfd_info->peer_rtsp_ctrlport = get_unaligned_be16(attr_content + 2);
+ DBG_8723A("[%s] Peer PORT NUM = %d\n", __func__, pwdinfo->wfd_info->peer_rtsp_ctrlport);
+ return true;
+ }
+ } else {
+ DBG_8723A("[%s] NO WFD IE\n", __func__);
+ }
+ return _FAIL;
+}
+#endif
+
+int WMM_param_handler23a(struct rtw_adapter *padapter, struct ndis_802_11_var_ies * pIE)
+{
+ /* struct registry_priv *pregpriv = &padapter->registrypriv; */
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if (pmlmepriv->qospriv.qos_option == 0) {
+ pmlmeinfo->WMM_enable = 0;
+ return _FAIL;
+ }
+
+ pmlmeinfo->WMM_enable = 1;
+ memcpy(&pmlmeinfo->WMM_param, (pIE->data + 6),
+ sizeof(struct WMM_para_element));
+ return true;
+}
+
+void WMMOnAssocRsp23a(struct rtw_adapter *padapter)
+{
+ u8 ACI, ACM, AIFS, ECWMin, ECWMax, aSifsTime;
+ u8 acm_mask;
+ u16 TXOP;
+ u32 acParm, i;
+ u32 edca[4], inx[4];
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ if (pmlmeinfo->WMM_enable == 0) {
+ padapter->mlmepriv.acm_mask = 0;
+ return;
+ }
+
+ acm_mask = 0;
+
+ if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
+ aSifsTime = 10;
+ else
+ aSifsTime = 16;
+
+ for (i = 0; i < 4; i++) {
+ ACI = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 5) & 0x03;
+ ACM = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 4) & 0x01;
+
+ /* AIFS = AIFSN * slot time + SIFS - r2t phy delay */
+ AIFS = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN & 0x0f) * pmlmeinfo->slotTime + aSifsTime;
+
+ ECWMin = (pmlmeinfo->WMM_param.ac_param[i].CW & 0x0f);
+ ECWMax = (pmlmeinfo->WMM_param.ac_param[i].CW & 0xf0) >> 4;
+ TXOP = le16_to_cpu(pmlmeinfo->WMM_param.ac_param[i].TXOP_limit);
+
+ acParm = AIFS | (ECWMin << 8) | (ECWMax << 12) | (TXOP << 16);
+
+ switch (ACI) {
+ case 0x0:
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acParm));
+ acm_mask |= (ACM? BIT(1):0);
+ edca[XMIT_BE_QUEUE] = acParm;
+ break;
+ case 0x1:
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acParm));
+ /* acm_mask |= (ACM? BIT(0):0); */
+ edca[XMIT_BK_QUEUE] = acParm;
+ break;
+ case 0x2:
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acParm));
+ acm_mask |= (ACM? BIT(2):0);
+ edca[XMIT_VI_QUEUE] = acParm;
+ break;
+ case 0x3:
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acParm));
+ acm_mask |= (ACM? BIT(3):0);
+ edca[XMIT_VO_QUEUE] = acParm;
+ break;
+ }
+
+ DBG_8723A("WMM(%x): %x, %x\n", ACI, ACM, acParm);
+ }
+
+ if (padapter->registrypriv.acm_method == 1)
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_ACM_CTRL, (u8 *)(&acm_mask));
+ else
+ padapter->mlmepriv.acm_mask = acm_mask;
+
+ inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
+
+ if (pregpriv->wifi_spec == 1) {
+ u32 j, tmp, change_inx;
+
+ /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
+ for (i = 0; i < 4; i++) {
+ for (j = i+1; j < 4; j++) {
+ /* compare CW and AIFS */
+ if ((edca[j] & 0xFFFF) < (edca[i] & 0xFFFF)) {
+ change_inx = true;
+ } else if ((edca[j] & 0xFFFF) == (edca[i] & 0xFFFF)) {
+ /* compare TXOP */
+ if ((edca[j] >> 16) > (edca[i] >> 16))
+ change_inx = true;
+ }
+
+ if (change_inx) {
+ tmp = edca[i];
+ edca[i] = edca[j];
+ edca[j] = tmp;
+
+ tmp = inx[i];
+ inx[i] = inx[j];
+ inx[j] = tmp;
+
+ change_inx = false;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i<4; i++) {
+ pxmitpriv->wmm_para_seq[i] = inx[i];
+ DBG_8723A("wmm_para_seq(%d): %d\n", i, pxmitpriv->wmm_para_seq[i]);
+ }
+
+ return;
+}
+
+static void bwmode_update_check(struct rtw_adapter *padapter, struct ndis_802_11_var_ies * pIE)
+{
+ struct HT_info_element *pHT_info;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ unsigned char new_bwmode;
+ unsigned char new_ch_offset;
+
+ if (!pIE)
+ return;
+ if (!phtpriv->ht_option)
+ return;
+ if (pIE->Length > sizeof(struct HT_info_element))
+ return;
+
+ pHT_info = (struct HT_info_element *)pIE->data;
+
+ if ((pHT_info->infos[0] & BIT(2)) && pregistrypriv->cbw40_enable) {
+ new_bwmode = HT_CHANNEL_WIDTH_40;
+
+ switch (pHT_info->infos[0] & 0x3) {
+ case 1:
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+ case 3:
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+ default:
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+ } else {
+ new_bwmode = HT_CHANNEL_WIDTH_20;
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+
+ if ((new_bwmode!= pmlmeext->cur_bwmode) ||
+ (new_ch_offset!= pmlmeext->cur_ch_offset)) {
+ pmlmeinfo->bwmode_updated = true;
+
+ pmlmeext->cur_bwmode = new_bwmode;
+ pmlmeext->cur_ch_offset = new_ch_offset;
+
+ /* update HT info also */
+ HT_info_handler23a(padapter, pIE);
+ } else {
+ pmlmeinfo->bwmode_updated = false;
+ }
+
+ if (pmlmeinfo->bwmode_updated) {
+ struct sta_info *psta;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
+
+ /* update ap's stainfo */
+ psta = rtw_get_stainfo23a(pstapriv, cur_network->MacAddress);
+ if (psta) {
+ struct ht_priv *phtpriv_sta = &psta->htpriv;
+
+ if (phtpriv_sta->ht_option) {
+ /* bwmode */
+ phtpriv_sta->bwmode = pmlmeext->cur_bwmode;
+ phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
+ } else {
+ phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_20;
+ phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+
+ }
+ }
+}
+
+void HT_caps_handler23a(struct rtw_adapter *padapter, struct ndis_802_11_var_ies * pIE)
+{
+ unsigned int i;
+ u8 rf_type;
+ u8 max_AMPDU_len, min_MPDU_spacing;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (pIE == NULL) return;
+
+ if (phtpriv->ht_option == false) return;
+
+ pmlmeinfo->HT_caps_enable = 1;
+
+ for (i = 0; i < (pIE->Length); i++) {
+ if (i != 2) {
+ /* Commented by Albert 2010/07/12 */
+ /* Got the endian issue here. */
+ pmlmeinfo->HT_caps.u.HT_cap[i] &= (pIE->data[i]);
+ } else {
+ /* modify from fw by Thomas 2010/11/17 */
+ if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3) > (pIE->data[i] & 0x3))
+ max_AMPDU_len = (pIE->data[i] & 0x3);
+ else
+ max_AMPDU_len = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3);
+
+ if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) > (pIE->data[i] & 0x1c))
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c);
+ else
+ min_MPDU_spacing = (pIE->data[i] & 0x1c);
+
+ pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para = max_AMPDU_len | min_MPDU_spacing;
+ }
+ }
+
+ /* Commented by Albert 2010/07/12 */
+ /* Have to handle the endian issue after copying. */
+ /* HT_ext_caps didn't be used yet. */
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info = le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info);
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_ext_caps = le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_ext_caps);
+
+ rtw23a_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ /* update the MCS rates */
+ for (i = 0; i < 16; i++) {
+ if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R23A[i];
+ else
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R23A[i];
+ }
+ return;
+}
+
+void HT_info_handler23a(struct rtw_adapter *padapter, struct ndis_802_11_var_ies * pIE)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (pIE == NULL) return;
+
+ if (phtpriv->ht_option == false) return;
+
+ if (pIE->Length > sizeof(struct HT_info_element))
+ return;
+
+ pmlmeinfo->HT_info_enable = 1;
+ memcpy(&pmlmeinfo->HT_info, pIE->data, pIE->Length);
+ return;
+}
+
+void HTOnAssocRsp23a(struct rtw_adapter *padapter)
+{
+ unsigned char max_AMPDU_len;
+ unsigned char min_MPDU_spacing;
+ /* struct registry_priv *pregpriv = &padapter->registrypriv; */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ DBG_8723A("%s\n", __func__);
+
+ if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable)) {
+ pmlmeinfo->HT_enable = 1;
+ } else {
+ pmlmeinfo->HT_enable = 0;
+ /* set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
+ return;
+ }
+
+ /* handle A-MPDU parameter field */
+ /*
+ AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ AMPDU_para [4:2]:Min MPDU Start Spacing
+ */
+ max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
+
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+}
+
+void ERP_IE_handler23a(struct rtw_adapter *padapter, struct ndis_802_11_var_ies * pIE)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if (pIE->Length>1)
+ return;
+
+ pmlmeinfo->ERP_enable = 1;
+ memcpy(&pmlmeinfo->ERP_IE, pIE->data, pIE->Length);
+}
+
+void VCS_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ switch (pregpriv->vrtl_carrier_sense) { /* 0:off 1:on 2:auto */
+ case 0: /* off */
+ psta->rtsen = 0;
+ psta->cts2self = 0;
+ break;
+ case 1: /* on */
+ if (pregpriv->vcs_type == 1) { /* 1:RTS/CTS 2:CTS to self */
+ psta->rtsen = 1;
+ psta->cts2self = 0;
+ } else {
+ psta->rtsen = 0;
+ psta->cts2self = 1;
+ }
+ break;
+ case 2: /* auto */
+ default:
+ if ((pmlmeinfo->ERP_enable) && (pmlmeinfo->ERP_IE & BIT(1))) {
+ if (pregpriv->vcs_type == 1) {
+ psta->rtsen = 1;
+ psta->cts2self = 0;
+ } else {
+ psta->rtsen = 0;
+ psta->cts2self = 1;
+ }
+ } else {
+ psta->rtsen = 0;
+ psta->cts2self = 0;
+ }
+ break;
+ }
+}
+
+int rtw_check_bcn_info23a(struct rtw_adapter *Adapter, u8 *pframe, u32 packet_len)
+{
+ unsigned int len;
+ unsigned char *p;
+ unsigned short val16;
+ struct wlan_network *cur_network = &Adapter->mlmepriv.cur_network;
+ u16 wpa_len = 0, rsn_len = 0;
+ u8 encryp_protocol = 0;
+ struct wlan_bssid_ex *bssid;
+ int group_cipher = 0, pairwise_cipher = 0, is_8021x = 0;
+ unsigned char *pbuf;
+ u32 wpa_ielen = 0;
+ u32 hidden_ssid = 0;
+ struct HT_info_element *pht_info = NULL;
+ struct ieee80211_ht_cap *pht_cap = NULL;
+ u32 bcn_channel;
+ unsigned short ht_cap_info;
+ unsigned char ht_info_infos_0;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) pframe;
+ u8 *pbssid = hdr->addr3;
+
+ if (is_client_associated_to_ap23a(Adapter) == false)
+ return true;
+
+ len = packet_len - sizeof(struct ieee80211_hdr_3addr);
+
+ if (len > MAX_IE_SZ) {
+ DBG_8723A("%s IE too long for survey event\n", __func__);
+ return _FAIL;
+ }
+
+ if (memcmp(cur_network->network.MacAddress, pbssid, 6)) {
+ DBG_8723A("Oops: rtw_check_network_encrypt linked but recv other bssid bcn\n" MAC_FMT MAC_FMT,
+ MAC_ARG(pbssid), MAC_ARG(cur_network->network.MacAddress));
+ return true;
+ }
+
+ bssid = (struct wlan_bssid_ex *)kzalloc(sizeof(struct wlan_bssid_ex),
+ GFP_ATOMIC);
+
+ if (ieee80211_is_beacon(hdr->frame_control))
+ bssid->reserved = 1;
+
+ bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
+
+ /* below is to copy the information element */
+ bssid->IELength = len;
+ memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
+
+ /* check bw and channel offset */
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie23a(bssid->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p && len>0) {
+ pht_cap = (struct ieee80211_ht_cap *)(p + 2);
+ ht_cap_info = pht_cap->cap_info;
+ } else {
+ ht_cap_info = 0;
+ }
+ /* parsing HT_INFO_IE */
+ p = rtw_get_ie23a(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p && len>0) {
+ pht_info = (struct HT_info_element *)(p + 2);
+ ht_info_infos_0 = pht_info->infos[0];
+ } else {
+ ht_info_infos_0 = 0;
+ }
+ if (ht_cap_info != cur_network->BcnInfo.ht_cap_info ||
+ ((ht_info_infos_0&0x03) != (cur_network->BcnInfo.ht_info_infos_0&0x03))) {
+ DBG_8723A("%s bcn now: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
+ ht_cap_info, ht_info_infos_0);
+ DBG_8723A("%s bcn link: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
+ cur_network->BcnInfo.ht_cap_info, cur_network->BcnInfo.ht_info_infos_0);
+ DBG_8723A("%s bw mode change, disconnect\n", __func__);
+ /* bcn_info_update */
+ cur_network->BcnInfo.ht_cap_info = ht_cap_info;
+ cur_network->BcnInfo.ht_info_infos_0 = ht_info_infos_0;
+ /* to do : need to check that whether modify related register of BB or not */
+ }
+
+ /* Checking for channel */
+ p = rtw_get_ie23a(bssid->IEs + _FIXED_IE_LENGTH_, _DSSET_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p) {
+ bcn_channel = *(p + 2);
+ } else {/* In 5G, some ap do not have DSSET IE checking HT info for channel */
+ p = rtw_get_ie23a(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (pht_info) {
+ bcn_channel = pht_info->primary_channel;
+ } else { /* we don't find channel IE, so don't check it */
+ DBG_8723A("Oops: %s we don't find channel IE, so don't check it\n", __func__);
+ bcn_channel = Adapter->mlmeextpriv.cur_channel;
+ }
+ }
+ if (bcn_channel != Adapter->mlmeextpriv.cur_channel) {
+ DBG_8723A("%s beacon channel:%d cur channel:%d disconnect\n", __func__,
+ bcn_channel, Adapter->mlmeextpriv.cur_channel);
+ goto _mismatch;
+ }
+
+ /* checking SSID */
+ if ((p = rtw_get_ie23a(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_)) == NULL) {
+ DBG_8723A("%s marc: cannot find SSID for survey event\n", __func__);
+ hidden_ssid = true;
+ } else {
+ hidden_ssid = false;
+ }
+
+ if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
+ memcpy(bssid->Ssid.ssid, (p + 2), *(p + 1));
+ bssid->Ssid.ssid_len = *(p + 1);
+ } else {
+ bssid->Ssid.ssid_len = 0;
+ bssid->Ssid.ssid[0] = '\0';
+ }
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
+ "cur_network->network.Ssid.Ssid:%s len:%d\n", __func__,
+ bssid->Ssid.ssid, bssid->Ssid.ssid_len,
+ cur_network->network.Ssid.ssid,
+ cur_network->network.Ssid.ssid_len));
+
+ if (memcmp(bssid->Ssid.ssid, cur_network->network.Ssid.ssid, 32) ||
+ bssid->Ssid.ssid_len != cur_network->network.Ssid.ssid_len) {
+ if (bssid->Ssid.ssid[0] != '\0' &&
+ bssid->Ssid.ssid_len != 0) { /* not hidden ssid */
+ DBG_8723A("%s(), SSID is not match return FAIL\n",
+ __func__);
+ goto _mismatch;
+ }
+ }
+
+ /* check encryption info */
+ val16 = rtw_get_capability23a((struct wlan_bssid_ex *)bssid);
+
+ if (val16 & BIT(4))
+ bssid->Privacy = 1;
+ else
+ bssid->Privacy = 0;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s(): cur_network->network.Privacy is %d, bssid.Privacy is %d\n",
+ __func__, cur_network->network.Privacy, bssid->Privacy));
+ if (cur_network->network.Privacy != bssid->Privacy) {
+ DBG_8723A("%s(), privacy is not match return FAIL\n", __func__);
+ goto _mismatch;
+ }
+
+ rtw_get_sec_ie23a(bssid->IEs, bssid->IELength, NULL,&rsn_len, NULL,&wpa_len);
+
+ if (rsn_len > 0) {
+ encryp_protocol = ENCRYP_PROTOCOL_WPA2;
+ } else if (wpa_len > 0) {
+ encryp_protocol = ENCRYP_PROTOCOL_WPA;
+ } else {
+ if (bssid->Privacy)
+ encryp_protocol = ENCRYP_PROTOCOL_WEP;
+ }
+
+ if (cur_network->BcnInfo.encryp_protocol != encryp_protocol) {
+ DBG_8723A("%s(): enctyp is not match , return FAIL\n", __func__);
+ goto _mismatch;
+ }
+
+ if (encryp_protocol == ENCRYP_PROTOCOL_WPA || encryp_protocol == ENCRYP_PROTOCOL_WPA2) {
+ pbuf = rtw_get_wpa_ie23a(&bssid->IEs[12], &wpa_ielen, bssid->IELength-12);
+ if (pbuf && (wpa_ielen>0)) {
+ if (_SUCCESS == rtw_parse_wpa_ie23a(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is_8021x)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s pnetwork->pairwise_cipher: %d, group_cipher is %d, is_8021x is %d\n", __func__,
+ pairwise_cipher, group_cipher, is_8021x));
+ }
+ } else {
+ pbuf = rtw_get_wpa2_ie23a(&bssid->IEs[12], &wpa_ielen, bssid->IELength-12);
+
+ if (pbuf && (wpa_ielen>0)) {
+ if (_SUCCESS == rtw_parse_wpa2_ie23a(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is_8021x)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s pnetwork->pairwise_cipher: %d, pnetwork->group_cipher is %d, is_802x is %d\n",
+ __func__, pairwise_cipher, group_cipher, is_8021x));
+ }
+ }
+ }
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("%s cur_network->group_cipher is %d: %d\n", __func__, cur_network->BcnInfo.group_cipher, group_cipher));
+ if (pairwise_cipher != cur_network->BcnInfo.pairwise_cipher || group_cipher != cur_network->BcnInfo.group_cipher) {
+ DBG_8723A("%s pairwise_cipher(%x:%x) or group_cipher(%x:%x) is not match , return FAIL\n", __func__,
+ pairwise_cipher, cur_network->BcnInfo.pairwise_cipher,
+ group_cipher, cur_network->BcnInfo.group_cipher);
+ goto _mismatch;
+ }
+
+ if (is_8021x != cur_network->BcnInfo.is_8021x) {
+ DBG_8723A("%s authentication is not match , return FAIL\n", __func__);
+ goto _mismatch;
+ }
+ }
+
+ kfree(bssid);
+ return _SUCCESS;
+
+_mismatch:
+ kfree(bssid);
+
+ return _FAIL;
+}
+
+void update_beacon23a_info(struct rtw_adapter *padapter, u8 *pframe, uint pkt_len, struct sta_info *psta)
+{
+ unsigned int i;
+ unsigned int len;
+ struct ndis_802_11_var_ies * pIE;
+
+ len = pkt_len -
+ (_BEACON_IE_OFFSET_ + sizeof(struct ieee80211_hdr_3addr));
+
+ for (i = 0; i < len;) {
+ pIE = (struct ndis_802_11_var_ies *)(pframe + (_BEACON_IE_OFFSET_ + sizeof(struct ieee80211_hdr_3addr)) + i);
+
+ switch (pIE->ElementID) {
+ case _HT_EXTRA_INFO_IE_: /* HT info */
+ /* HT_info_handler23a(padapter, pIE); */
+ bwmode_update_check(padapter, pIE);
+ break;
+ case _ERPINFO_IE_:
+ ERP_IE_handler23a(padapter, pIE);
+ VCS_update23a(padapter, psta);
+ break;
+ default:
+ break;
+ }
+ i += (pIE->Length + 2);
+ }
+}
+
+unsigned int is_ap_in_tkip23a(struct rtw_adapter *padapter)
+{
+ u32 i;
+ struct ndis_802_11_var_ies * pIE;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
+
+ if (rtw_get_capability23a((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
+ for (i = sizeof(struct ndis_802_11_fixed_ies); i < pmlmeinfo->network.IELength;) {
+ pIE = (struct ndis_802_11_var_ies *)(pmlmeinfo->network.IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if ((!memcmp(pIE->data, RTW_WPA_OUI23A, 4)) && (!memcmp((pIE->data + 12), WPA_TKIP_CIPHER23A, 4)))
+ return true;
+ break;
+ case _RSN_IE_2_:
+ if (!memcmp((pIE->data + 8), RSN_TKIP_CIPHER23A, 4))
+ return true;
+ break;
+ default:
+ break;
+ }
+ i += (pIE->Length + 2);
+ }
+ return false;
+ } else {
+ return false;
+ }
+}
+
+unsigned int should_forbid_n_rate23a(struct rtw_adapter * padapter)
+{
+ u32 i;
+ struct ndis_802_11_var_ies * pIE;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *cur_network = &pmlmepriv->cur_network.network;
+
+ if (rtw_get_capability23a((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
+ for (i = sizeof(struct ndis_802_11_fixed_ies); i < cur_network->IELength;) {
+ pIE = (struct ndis_802_11_var_ies *)(cur_network->IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if (!memcmp(pIE->data, RTW_WPA_OUI23A, 4) &&
+ ((!memcmp((pIE->data + 12), WPA_CIPHER_SUITE_CCMP23A, 4)) ||
+ (!memcmp((pIE->data + 16), WPA_CIPHER_SUITE_CCMP23A, 4))))
+ return false;
+ break;
+ case _RSN_IE_2_:
+ if ((!memcmp((pIE->data + 8), RSN_CIPHER_SUITE_CCMP23A, 4)) ||
+ (!memcmp((pIE->data + 12), RSN_CIPHER_SUITE_CCMP23A, 4)))
+ return false;
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+unsigned int is_ap_in_wep23a(struct rtw_adapter *padapter)
+{
+ u32 i;
+ struct ndis_802_11_var_ies * pIE;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
+
+ if (rtw_get_capability23a((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
+ for (i = sizeof(struct ndis_802_11_fixed_ies); i < pmlmeinfo->network.IELength;) {
+ pIE = (struct ndis_802_11_var_ies *)(pmlmeinfo->network.IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if (!memcmp(pIE->data, RTW_WPA_OUI23A, 4))
+ return false;
+ break;
+ case _RSN_IE_2_:
+ return false;
+
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ return true;
+ } else {
+ return false;
+ }
+}
+
+int wifirate2_ratetbl_inx23a(unsigned char rate)
+{
+ int inx = 0;
+ rate = rate & 0x7f;
+
+ switch (rate) {
+ case 54*2:
+ inx = 11;
+ break;
+ case 48*2:
+ inx = 10;
+ break;
+ case 36*2:
+ inx = 9;
+ break;
+ case 24*2:
+ inx = 8;
+ break;
+ case 18*2:
+ inx = 7;
+ break;
+ case 12*2:
+ inx = 6;
+ break;
+ case 9*2:
+ inx = 5;
+ break;
+ case 6*2:
+ inx = 4;
+ break;
+ case 11*2:
+ inx = 3;
+ break;
+ case 11:
+ inx = 2;
+ break;
+ case 2*2:
+ inx = 1;
+ break;
+ case 1*2:
+ inx = 0;
+ break;
+ }
+ return inx;
+}
+
+unsigned int update_basic_rate23a(unsigned char *ptn, unsigned int ptn_sz)
+{
+ unsigned int i, num_of_rate;
+ unsigned int mask = 0;
+
+ num_of_rate = (ptn_sz > NumRates)? NumRates: ptn_sz;
+
+ for (i = 0; i < num_of_rate; i++) {
+ if ((*(ptn + i)) & 0x80)
+ mask |= 0x1 << wifirate2_ratetbl_inx23a(*(ptn + i));
+ }
+ return mask;
+}
+
+unsigned int update_supported_rate23a(unsigned char *ptn, unsigned int ptn_sz)
+{
+ unsigned int i, num_of_rate;
+ unsigned int mask = 0;
+
+ num_of_rate = (ptn_sz > NumRates)? NumRates: ptn_sz;
+
+ for (i = 0; i < num_of_rate; i++)
+ mask |= 0x1 << wifirate2_ratetbl_inx23a(*(ptn + i));
+ return mask;
+}
+
+unsigned int update_MSC_rate23a(struct HT_caps_element *pHT_caps)
+{
+ unsigned int mask = 0;
+
+ mask = ((pHT_caps->u.HT_cap_element.MCS_rate[0] << 12) | (pHT_caps->u.HT_cap_element.MCS_rate[1] << 20));
+
+ return mask;
+}
+
+int support_short_GI23a(struct rtw_adapter *padapter,
+ struct HT_caps_element *pHT_caps)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ unsigned char bit_offset;
+
+ if (!(pmlmeinfo->HT_enable))
+ return _FAIL;
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_RALINK))
+ return _FAIL;
+ bit_offset = (pmlmeext->cur_bwmode & HT_CHANNEL_WIDTH_40)? 6: 5;
+
+ if (pHT_caps->u.HT_cap_element.HT_caps_info & (0x1 << bit_offset))
+ return _SUCCESS;
+ else
+ return _FAIL;
+}
+
+unsigned char get_highest_rate_idx23a(u32 mask)
+{
+ int i;
+ unsigned char rate_idx = 0;
+
+ for (i = 27; i >= 0; i--) {
+ if (mask & BIT(i)) {
+ rate_idx = i;
+ break;
+ }
+ }
+ return rate_idx;
+}
+
+unsigned char get_highest_mcs_rate(struct HT_caps_element *pHT_caps)
+{
+ int i, mcs_rate;
+
+ mcs_rate = (pHT_caps->u.HT_cap_element.MCS_rate[0] | (pHT_caps->u.HT_cap_element.MCS_rate[1] << 8));
+
+ for (i = 15; i >= 0; i--) {
+ if (mcs_rate & (0x1 << i))
+ break;
+ }
+ return i;
+}
+
+void Update_RA_Entry23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ rtw_hal_update_ra_mask23a(psta, 0);
+}
+
+void enable_rate_adaptive(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ Update_RA_Entry23a(padapter, psta);
+}
+
+void set_sta_rate23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ /* rate adaptive */
+ enable_rate_adaptive(padapter, psta);
+}
+
+/* Update RRSR and Rate for USERATE */
+void update_tx_basic_rate23a(struct rtw_adapter *padapter, u8 wirelessmode)
+{
+ unsigned char supported_rates[NDIS_802_11_LENGTH_RATES_EX];
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info* pwdinfo = &padapter->wdinfo;
+
+ /* Added by Albert 2011/03/22 */
+ /* In the P2P mode, the driver should not support the b mode. */
+ /* So, the Tx packet shouldn't use the CCK rate */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+#endif /* CONFIG_8723AU_P2P */
+
+ memset(supported_rates, 0, NDIS_802_11_LENGTH_RATES_EX);
+
+ if ((wirelessmode & WIRELESS_11B) && (wirelessmode == WIRELESS_11B)) {
+ memcpy(supported_rates, rtw_basic_rate_cck, 4);
+ } else if (wirelessmode & WIRELESS_11B) {
+ memcpy(supported_rates, rtw_basic_rate_mix, 7);
+ } else {
+ memcpy(supported_rates, rtw_basic_rate_ofdm, 3);
+ }
+
+ if (wirelessmode & WIRELESS_11B)
+ update_mgnt_tx_rate23a(padapter, IEEE80211_CCK_RATE_1MB);
+ else
+ update_mgnt_tx_rate23a(padapter, IEEE80211_OFDM_RATE_6MB);
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BASIC_RATE, supported_rates);
+}
+
+unsigned char check_assoc_AP23a(u8 *pframe, uint len)
+{
+ unsigned int i;
+ struct ndis_802_11_var_ies * pIE;
+ u8 epigram_vendor_flag;
+ u8 ralink_vendor_flag;
+ epigram_vendor_flag = 0;
+ ralink_vendor_flag = 0;
+
+ for (i = sizeof(struct ndis_802_11_fixed_ies); i < len;) {
+ pIE = (struct ndis_802_11_var_ies *)(pframe + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if ((!memcmp(pIE->data, ARTHEROS_OUI1, 3)) ||
+ (!memcmp(pIE->data, ARTHEROS_OUI2, 3))) {
+ DBG_8723A("link to Artheros AP\n");
+ return HT_IOT_PEER_ATHEROS;
+ } else if ((!memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
+ !memcmp(pIE->data, BROADCOM_OUI2, 3) ||
+ !memcmp(pIE->data, BROADCOM_OUI2, 3)) {
+ DBG_8723A("link to Broadcom AP\n");
+ return HT_IOT_PEER_BROADCOM;
+ } else if (!memcmp(pIE->data, MARVELL_OUI, 3)) {
+ DBG_8723A("link to Marvell AP\n");
+ return HT_IOT_PEER_MARVELL;
+ } else if (!memcmp(pIE->data, RALINK_OUI, 3)) {
+ if (!ralink_vendor_flag) {
+ ralink_vendor_flag = 1;
+ } else {
+ DBG_8723A("link to Ralink AP\n");
+ return HT_IOT_PEER_RALINK;
+ }
+ } else if (!memcmp(pIE->data, CISCO_OUI, 3)) {
+ DBG_8723A("link to Cisco AP\n");
+ return HT_IOT_PEER_CISCO;
+ } else if (!memcmp(pIE->data, REALTEK_OUI, 3)) {
+ DBG_8723A("link to Realtek 96B\n");
+ return HT_IOT_PEER_REALTEK;
+ } else if (!memcmp(pIE->data, AIRGOCAP_OUI, 3)) {
+ DBG_8723A("link to Airgo Cap\n");
+ return HT_IOT_PEER_AIRGO;
+ } else if (!memcmp(pIE->data, EPIGRAM_OUI, 3)) {
+ epigram_vendor_flag = 1;
+ if (ralink_vendor_flag) {
+ DBG_8723A("link to Tenda W311R AP\n");
+ return HT_IOT_PEER_TENDA;
+ } else {
+ DBG_8723A("Capture EPIGRAM_OUI\n");
+ }
+ } else {
+ break;
+ }
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ if (ralink_vendor_flag && !epigram_vendor_flag) {
+ DBG_8723A("link to Ralink AP\n");
+ return HT_IOT_PEER_RALINK;
+ } else if (ralink_vendor_flag && epigram_vendor_flag) {
+ DBG_8723A("link to Tenda W311R AP\n");
+ return HT_IOT_PEER_TENDA;
+ } else {
+ DBG_8723A("link to new AP\n");
+ return HT_IOT_PEER_UNKNOWN;
+ }
+}
+
+void update_IOT_info23a(struct rtw_adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ switch (pmlmeinfo->assoc_AP_vendor) {
+ case HT_IOT_PEER_MARVELL:
+ pmlmeinfo->turboMode_cts2self = 1;
+ pmlmeinfo->turboMode_rtsen = 0;
+ break;
+ case HT_IOT_PEER_RALINK:
+ pmlmeinfo->turboMode_cts2self = 0;
+ pmlmeinfo->turboMode_rtsen = 1;
+ /* disable high power */
+ Switch_DM_Func23a(padapter, ~DYNAMIC_BB_DYNAMIC_TXPWR,
+ false);
+ break;
+ case HT_IOT_PEER_REALTEK:
+ /* rtw_write16(padapter, 0x4cc, 0xffff); */
+ /* rtw_write16(padapter, 0x546, 0x01c0); */
+ /* disable high power */
+ Switch_DM_Func23a(padapter, ~DYNAMIC_BB_DYNAMIC_TXPWR,
+ false);
+ break;
+ default:
+ pmlmeinfo->turboMode_cts2self = 0;
+ pmlmeinfo->turboMode_rtsen = 1;
+ break;
+ }
+}
+
+void update_capinfo23a(struct rtw_adapter *Adapter, u16 updateCap)
+{
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ bool ShortPreamble;
+
+ if (updateCap & cShortPreamble) {
+ /* Short Preamble */
+ if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) {
+ /* PREAMBLE_LONG or PREAMBLE_AUTO */
+ ShortPreamble = true;
+ pmlmeinfo->preamble_mode = PREAMBLE_SHORT;
+ rtw_hal_set_hwreg23a(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ }
+ } else { /* Long Preamble */
+ if (pmlmeinfo->preamble_mode != PREAMBLE_LONG) {
+ /* PREAMBLE_SHORT or PREAMBLE_AUTO */
+ ShortPreamble = false;
+ pmlmeinfo->preamble_mode = PREAMBLE_LONG;
+ rtw_hal_set_hwreg23a(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ }
+ }
+ if (updateCap & cIBSS) {
+ /* Filen: See 802.11-2007 p.91 */
+ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
+ } else {
+ /* Filen: See 802.11-2007 p.90 */
+ if (pmlmeext->cur_wireless_mode & (WIRELESS_11G | WIRELESS_11_24N)) {
+ if (updateCap & cShortSlotTime) { /* Short Slot Time */
+ if (pmlmeinfo->slotTime != SHORT_SLOT_TIME)
+ pmlmeinfo->slotTime = SHORT_SLOT_TIME;
+ } else { /* Long Slot Time */
+ if (pmlmeinfo->slotTime != NON_SHORT_SLOT_TIME)
+ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
+ }
+ } else if (pmlmeext->cur_wireless_mode & (WIRELESS_11A | WIRELESS_11_5N)) {
+ pmlmeinfo->slotTime = SHORT_SLOT_TIME;
+ } else {
+ /* B Mode */
+ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
+ }
+ }
+ rtw_hal_set_hwreg23a(Adapter, HW_VAR_SLOT_TIME, &pmlmeinfo->slotTime);
+}
+
+void update_wireless_mode23a(struct rtw_adapter *padapter)
+{
+ int ratelen, network_type = 0;
+ u32 SIFS_Timer;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
+ unsigned char *rate = cur_network->SupportedRates;
+
+ ratelen = rtw_get_rateset_len23a(cur_network->SupportedRates);
+
+ if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable))
+ pmlmeinfo->HT_enable = 1;
+
+ if (pmlmeext->cur_channel > 14) {
+ if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_5N;
+ network_type |= WIRELESS_11A;
+ } else {
+ if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_24N;
+
+ if ((cckratesonly_included23a(rate, ratelen)) == true)
+ network_type |= WIRELESS_11B;
+ else if ((cckrates_included23a(rate, ratelen)) == true)
+ network_type |= WIRELESS_11BG;
+ else
+ network_type |= WIRELESS_11G;
+ }
+
+ pmlmeext->cur_wireless_mode = network_type & padapter->registrypriv.wireless_mode;
+
+ SIFS_Timer = 0x0a0a0808; /* 0x0808 -> for CCK, 0x0a0a -> for OFDM */
+ /* change this value if having IOT issues. */
+
+ padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_RESP_SIFS, (u8 *)&SIFS_Timer);
+
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
+ update_mgnt_tx_rate23a(padapter, IEEE80211_CCK_RATE_1MB);
+ else
+ update_mgnt_tx_rate23a(padapter, IEEE80211_OFDM_RATE_6MB);
+}
+
+void update_bmc_sta_support_rate23a(struct rtw_adapter *padapter, u32 mac_id)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B) {
+ /* Only B, B/G, and B/G/N AP could use CCK rate */
+ memcpy((pmlmeinfo->FW_sta_info[mac_id].SupportedRates), rtw_basic_rate_cck, 4);
+ } else {
+ memcpy((pmlmeinfo->FW_sta_info[mac_id].SupportedRates), rtw_basic_rate_ofdm, 4);
+ }
+}
+
+int update_sta_support_rate23a(struct rtw_adapter *padapter, u8 *pvar_ie, uint var_ie_len, int cam_idx)
+{
+ unsigned int ie_len;
+ struct ndis_802_11_var_ies *pIE;
+ int supportRateNum = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ pIE = (struct ndis_802_11_var_ies *)rtw_get_ie23a(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
+ if (pIE == NULL)
+ return _FAIL;
+
+ memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
+ supportRateNum = ie_len;
+
+ pIE = (struct ndis_802_11_var_ies *)rtw_get_ie23a(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
+ if (pIE)
+ memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
+ return _SUCCESS;
+}
+
+void process_addba_req23a(struct rtw_adapter *padapter, u8 *paddba_req, u8 *addr)
+{
+ struct sta_info *psta;
+ u16 tid, start_seq, param;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct ADDBA_request *preq = (struct ADDBA_request*)paddba_req;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ psta = rtw_get_stainfo23a(pstapriv, addr);
+
+ if (psta) {
+ start_seq = le16_to_cpu(preq->BA_starting_seqctrl) >> 4;
+
+ param = le16_to_cpu(preq->BA_para_set);
+ tid = (param>>2)&0x0f;
+
+ preorder_ctrl = &psta->recvreorder_ctrl[tid];
+
+ preorder_ctrl->indicate_seq = 0xffff;
+
+ preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq == true)? true :false;
+ }
+}
+
+void update_TSF23a(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
+{
+ u8 *pIE;
+ u32 *pbuf;
+
+ pIE = pframe + sizeof(struct ieee80211_hdr_3addr);
+ pbuf = (u32 *)pIE;
+
+ pmlmeext->TSFValue = le32_to_cpu(*(pbuf+1));
+
+ pmlmeext->TSFValue = pmlmeext->TSFValue << 32;
+
+ pmlmeext->TSFValue |= le32_to_cpu(*pbuf);
+}
+
+void correct_TSF23a(struct rtw_adapter *padapter, struct mlme_ext_priv *pmlmeext)
+{
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_CORRECT_TSF, NULL);
+}
+
+void beacon_timing_control23a(struct rtw_adapter *padapter)
+{
+ rtw_hal_bcn_related_reg_setting23a(padapter);
+}
+
+static struct rtw_adapter *pbuddy_padapter;
+
+int rtw_handle_dualmac23a(struct rtw_adapter *adapter, bool init)
+{
+ int status = _SUCCESS;
+
+ if (init) {
+ if (pbuddy_padapter == NULL) {
+ pbuddy_padapter = adapter;
+ DBG_8723A("%s(): pbuddy_padapter == NULL, Set pbuddy_padapter\n", __func__);
+ } else {
+ adapter->pbuddy_adapter = pbuddy_padapter;
+ pbuddy_padapter->pbuddy_adapter = adapter;
+ /* clear global value */
+ pbuddy_padapter = NULL;
+ DBG_8723A("%s(): pbuddy_padapter exist, Exchange Information\n", __func__);
+ }
+ } else {
+ pbuddy_padapter = NULL;
+ }
+ return status;
+}
diff --git a/drivers/staging/rtl8723au/core/rtw_xmit.c b/drivers/staging/rtl8723au/core/rtw_xmit.c
new file mode 100644
index 000000000000..0f10cfa10d39
--- /dev/null
+++ b/drivers/staging/rtl8723au/core/rtw_xmit.c
@@ -0,0 +1,2460 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_XMIT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <osdep_intf.h>
+#include <linux/ip.h>
+#include <usb_ops.h>
+
+static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
+static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
+
+static void _init_txservq(struct tx_servq *ptxservq)
+{
+
+ INIT_LIST_HEAD(&ptxservq->tx_pending);
+ _rtw_init_queue23a(&ptxservq->sta_pending);
+ ptxservq->qcnt = 0;
+
+}
+
+void _rtw_init_sta_xmit_priv23a(struct sta_xmit_priv *psta_xmitpriv)
+{
+
+ spin_lock_init(&psta_xmitpriv->lock);
+
+ /* for (i = 0 ; i < MAX_NUMBLKS; i++) */
+ /* _init_txservq(&psta_xmitpriv->blk_q[i]); */
+
+ _init_txservq(&psta_xmitpriv->be_q);
+ _init_txservq(&psta_xmitpriv->bk_q);
+ _init_txservq(&psta_xmitpriv->vi_q);
+ _init_txservq(&psta_xmitpriv->vo_q);
+ INIT_LIST_HEAD(&psta_xmitpriv->legacy_dz);
+ INIT_LIST_HEAD(&psta_xmitpriv->apsd);
+
+}
+
+s32 _rtw_init_xmit_priv23a(struct xmit_priv *pxmitpriv, struct rtw_adapter *padapter)
+{
+ int i;
+ struct xmit_buf *pxmitbuf;
+ struct xmit_frame *pxframe;
+ int res = _SUCCESS;
+ u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
+ u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
+
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
+ /* memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); */
+
+ spin_lock_init(&pxmitpriv->lock);
+ spin_lock_init(&pxmitpriv->lock_sctx);
+ sema_init(&pxmitpriv->xmit_sema, 0);
+ sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
+
+ /*
+ Please insert all the queue initializaiton using _rtw_init_queue23a below
+ */
+
+ pxmitpriv->adapter = padapter;
+
+ _rtw_init_queue23a(&pxmitpriv->be_pending);
+ _rtw_init_queue23a(&pxmitpriv->bk_pending);
+ _rtw_init_queue23a(&pxmitpriv->vi_pending);
+ _rtw_init_queue23a(&pxmitpriv->vo_pending);
+ _rtw_init_queue23a(&pxmitpriv->bm_pending);
+
+ _rtw_init_queue23a(&pxmitpriv->free_xmit_queue);
+
+ /*
+ Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
+ and initialize free_xmit_frame below.
+ Please also apply free_txobj to link_up all the xmit_frames...
+ */
+
+ pxmitpriv->pallocated_frame_buf = rtw_zvmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
+
+ if (pxmitpriv->pallocated_frame_buf == NULL) {
+ pxmitpriv->pxmit_frame_buf = NULL;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_frame fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+ pxmitpriv->pxmit_frame_buf = PTR_ALIGN(pxmitpriv->pallocated_frame_buf, 4);
+
+ pxframe = (struct xmit_frame*) pxmitpriv->pxmit_frame_buf;
+
+ for (i = 0; i < NR_XMITFRAME; i++) {
+ INIT_LIST_HEAD(&pxframe->list);
+
+ pxframe->padapter = padapter;
+ pxframe->frame_tag = NULL_FRAMETAG;
+
+ pxframe->pkt = NULL;
+
+ pxframe->buf_addr = NULL;
+ pxframe->pxmitbuf = NULL;
+
+ list_add_tail(&pxframe->list,
+ &pxmitpriv->free_xmit_queue.queue);
+
+ pxframe++;
+ }
+
+ pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME;
+
+ pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
+
+ /* init xmit_buf */
+ _rtw_init_queue23a(&pxmitpriv->free_xmitbuf_queue);
+ INIT_LIST_HEAD(&pxmitpriv->xmitbuf_list);
+ _rtw_init_queue23a(&pxmitpriv->pending_xmitbuf_queue);
+
+ for (i = 0; i < NR_XMITBUFF; i++) {
+ pxmitbuf = kzalloc(sizeof(struct xmit_buf), GFP_KERNEL);
+ if (!pxmitbuf)
+ goto fail;
+ INIT_LIST_HEAD(&pxmitbuf->list);
+ INIT_LIST_HEAD(&pxmitbuf->list2);
+
+ pxmitbuf->padapter = padapter;
+
+ /* Tx buf allocation may fail sometimes, so sleep and retry. */
+ res = rtw_os_xmit_resource_alloc23a(padapter, pxmitbuf,
+ (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ if (res == _FAIL) {
+ goto fail;
+ }
+
+ list_add_tail(&pxmitbuf->list,
+ &pxmitpriv->free_xmitbuf_queue.queue);
+ list_add_tail(&pxmitbuf->list2,
+ &pxmitpriv->xmitbuf_list);
+ }
+
+ pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
+
+ /* init xframe_ext queue, the same count as extbuf */
+ _rtw_init_queue23a(&pxmitpriv->free_xframe_ext_queue);
+
+ pxmitpriv->xframe_ext_alloc_addr = rtw_zvmalloc(num_xmit_extbuf * sizeof(struct xmit_frame) + 4);
+
+ if (pxmitpriv->xframe_ext_alloc_addr == NULL) {
+ pxmitpriv->xframe_ext = NULL;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xframe_ext fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+ pxmitpriv->xframe_ext = PTR_ALIGN(pxmitpriv->xframe_ext_alloc_addr, 4);
+ pxframe = (struct xmit_frame*)pxmitpriv->xframe_ext;
+
+ for (i = 0; i < num_xmit_extbuf; i++) {
+ INIT_LIST_HEAD(&pxframe->list);
+
+ pxframe->padapter = padapter;
+ pxframe->frame_tag = NULL_FRAMETAG;
+
+ pxframe->pkt = NULL;
+
+ pxframe->buf_addr = NULL;
+ pxframe->pxmitbuf = NULL;
+
+ pxframe->ext_tag = 1;
+
+ list_add_tail(&pxframe->list,
+ &pxmitpriv->free_xframe_ext_queue.queue);
+
+ pxframe++;
+ }
+ pxmitpriv->free_xframe_ext_cnt = num_xmit_extbuf;
+
+ /* Init xmit extension buff */
+ _rtw_init_queue23a(&pxmitpriv->free_xmit_extbuf_queue);
+ INIT_LIST_HEAD(&pxmitpriv->xmitextbuf_list);
+
+ for (i = 0; i < num_xmit_extbuf; i++) {
+ pxmitbuf = kzalloc(sizeof(struct xmit_buf), GFP_KERNEL);
+ if (!pxmitbuf)
+ goto fail;
+ INIT_LIST_HEAD(&pxmitbuf->list);
+ INIT_LIST_HEAD(&pxmitbuf->list2);
+
+ pxmitbuf->padapter = padapter;
+
+ /* Tx buf allocation may fail sometimes, so sleep and retry. */
+ res = rtw_os_xmit_resource_alloc23a(padapter, pxmitbuf,
+ max_xmit_extbuf_size + XMITBUF_ALIGN_SZ);
+ if (res == _FAIL) {
+ goto exit;
+ }
+
+ list_add_tail(&pxmitbuf->list,
+ &pxmitpriv->free_xmit_extbuf_queue.queue);
+ list_add_tail(&pxmitbuf->list2,
+ &pxmitpriv->xmitextbuf_list);
+ }
+
+ pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
+
+ rtw_alloc_hwxmits23a(padapter);
+ rtw_init_hwxmits23a(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
+
+ for (i = 0; i < 4; i ++)
+ pxmitpriv->wmm_para_seq[i] = i;
+
+ pxmitpriv->txirp_cnt = 1;
+
+ sema_init(&pxmitpriv->tx_retevt, 0);
+
+ /* per AC pending irp */
+ pxmitpriv->beq_cnt = 0;
+ pxmitpriv->bkq_cnt = 0;
+ pxmitpriv->viq_cnt = 0;
+ pxmitpriv->voq_cnt = 0;
+
+ pxmitpriv->ack_tx = false;
+ mutex_init(&pxmitpriv->ack_tx_mutex);
+ rtw_sctx_init23a(&pxmitpriv->ack_tx_ops, 0);
+ rtw_hal_init23a_xmit_priv(padapter);
+
+exit:
+
+ return res;
+fail:
+ goto exit;
+}
+
+void _rtw_free_xmit_priv23a (struct xmit_priv *pxmitpriv)
+{
+ struct rtw_adapter *padapter = pxmitpriv->adapter;
+ struct xmit_frame *pxmitframe = (struct xmit_frame*) pxmitpriv->pxmit_frame_buf;
+ struct xmit_buf *pxmitbuf;
+ struct list_head *plist, *ptmp;
+ u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
+ int i;
+
+ rtw_hal_free_xmit_priv23a(padapter);
+
+ if (pxmitpriv->pxmit_frame_buf == NULL)
+ return;
+ for (i = 0; i < NR_XMITFRAME; i++) {
+ rtw_os_xmit_complete23a(padapter, pxmitframe);
+ pxmitframe++;
+ }
+
+ list_for_each_safe(plist, ptmp, &pxmitpriv->xmitbuf_list) {
+ pxmitbuf = container_of(plist, struct xmit_buf, list2);
+ list_del_init(&pxmitbuf->list2);
+ rtw_os_xmit_resource_free23a(padapter, pxmitbuf);
+ kfree(pxmitbuf);
+ }
+
+ if (pxmitpriv->pallocated_frame_buf) {
+ rtw_vmfree(pxmitpriv->pallocated_frame_buf, NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
+ }
+
+ /* free xframe_ext queue, the same count as extbuf */
+ if ((pxmitframe = (struct xmit_frame*)pxmitpriv->xframe_ext)) {
+ for (i = 0; i<num_xmit_extbuf; i++) {
+ rtw_os_xmit_complete23a(padapter, pxmitframe);
+ pxmitframe++;
+ }
+ }
+ if (pxmitpriv->xframe_ext_alloc_addr)
+ rtw_vmfree(pxmitpriv->xframe_ext_alloc_addr, num_xmit_extbuf * sizeof(struct xmit_frame) + 4);
+
+ /* free xmit extension buff */
+ list_for_each_safe(plist, ptmp, &pxmitpriv->xmitextbuf_list) {
+ pxmitbuf = container_of(plist, struct xmit_buf, list2);
+ list_del_init(&pxmitbuf->list2);
+ rtw_os_xmit_resource_free23a(padapter, pxmitbuf);
+ kfree(pxmitbuf);
+ }
+
+ rtw_free_hwxmits23a(padapter);
+ mutex_destroy(&pxmitpriv->ack_tx_mutex);
+}
+
+static void update_attrib_vcs_info(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ u32 sz;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct sta_info *psta = pattrib->psta;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if (pattrib->psta) {
+ psta = pattrib->psta;
+ } else {
+ DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
+ psta = rtw_get_stainfo23a(&padapter->stapriv, &pattrib->ra[0]);
+ }
+
+ if (psta == NULL) {
+ DBG_8723A("%s, psta == NUL\n", __func__);
+ return;
+ }
+
+ if (!(psta->state &_FW_LINKED)) {
+ DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
+ return;
+ }
+
+ if (pattrib->nr_frags != 1)
+ sz = padapter->xmitpriv.frag_len;
+ else /* no frag */
+ sz = pattrib->last_txcmdsz;
+
+ /* (1) RTS_Threshold is compared to the MPDU, not MSDU. */
+ /* (2) If there are more than one frag in this MSDU, only the first frag uses protection frame. */
+ /* Other fragments are protected by previous fragment. */
+ /* So we only need to check the length of first fragment. */
+ if (pmlmeext->cur_wireless_mode < WIRELESS_11_24N || padapter->registrypriv.wifi_spec) {
+ if (sz > padapter->registrypriv.rts_thresh) {
+ pattrib->vcs_mode = RTS_CTS;
+ } else {
+ if (psta->rtsen)
+ pattrib->vcs_mode = RTS_CTS;
+ else if (psta->cts2self)
+ pattrib->vcs_mode = CTS_TO_SELF;
+ else
+ pattrib->vcs_mode = NONE_VCS;
+ }
+ } else {
+ while (true) {
+ /* IOT action */
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS) &&
+ (pattrib->ampdu_en) &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
+ pattrib->vcs_mode = CTS_TO_SELF;
+ break;
+ }
+
+ /* check ERP protection */
+ if (psta->rtsen || psta->cts2self) {
+ if (psta->rtsen)
+ pattrib->vcs_mode = RTS_CTS;
+ else if (psta->cts2self)
+ pattrib->vcs_mode = CTS_TO_SELF;
+
+ break;
+ }
+
+ /* check HT op mode */
+ if (pattrib->ht_en) {
+ u8 HTOpMode = pmlmeinfo->HT_protection;
+ if ((pmlmeext->cur_bwmode && (HTOpMode == 2 || HTOpMode == 3)) ||
+ (!pmlmeext->cur_bwmode && HTOpMode == 3)) {
+ pattrib->vcs_mode = RTS_CTS;
+ break;
+ }
+ }
+
+ /* check rts */
+ if (sz > padapter->registrypriv.rts_thresh) {
+ pattrib->vcs_mode = RTS_CTS;
+ break;
+ }
+
+ /* to do list: check MIMO power save condition. */
+
+ /* check AMPDU aggregation for TXOP */
+ if (pattrib->ampdu_en) {
+ pattrib->vcs_mode = RTS_CTS;
+ break;
+ }
+
+ pattrib->vcs_mode = NONE_VCS;
+ break;
+ }
+ }
+}
+
+static void update_attrib_phy_info(struct pkt_attrib *pattrib, struct sta_info *psta)
+{
+ /*if (psta->rtsen)
+ pattrib->vcs_mode = RTS_CTS;
+ else if (psta->cts2self)
+ pattrib->vcs_mode = CTS_TO_SELF;
+ else
+ pattrib->vcs_mode = NONE_VCS;*/
+
+ pattrib->mdata = 0;
+ pattrib->eosp = 0;
+ pattrib->triggered = 0;
+
+ /* qos_en, ht_en, init rate, , bw, ch_offset, sgi */
+ pattrib->qos_en = psta->qos_option;
+
+ pattrib->raid = psta->raid;
+ pattrib->ht_en = psta->htpriv.ht_option;
+ pattrib->bwmode = psta->htpriv.bwmode;
+ pattrib->ch_offset = psta->htpriv.ch_offset;
+ pattrib->sgi = psta->htpriv.sgi;
+ pattrib->ampdu_en = false;
+
+ pattrib->retry_ctrl = false;
+}
+
+u8 qos_acm23a(u8 acm_mask, u8 priority)
+{
+ u8 change_priority = priority;
+
+ switch (priority) {
+ case 0:
+ case 3:
+ if (acm_mask & BIT(1))
+ change_priority = 1;
+ break;
+ case 1:
+ case 2:
+ break;
+ case 4:
+ case 5:
+ if (acm_mask & BIT(2))
+ change_priority = 0;
+ break;
+ case 6:
+ case 7:
+ if (acm_mask & BIT(3))
+ change_priority = 5;
+ break;
+ default:
+ DBG_8723A("qos_acm23a(): invalid pattrib->priority: %d!!!\n",
+ priority);
+ break;
+ }
+
+ return change_priority;
+}
+
+static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
+{
+ struct ethhdr etherhdr;
+ struct iphdr ip_hdr;
+ s32 UserPriority = 0;
+
+ _rtw_open_pktfile23a(ppktfile->pkt, ppktfile);
+ _rtw_pktfile_read23a(ppktfile, (unsigned char*)&etherhdr, ETH_HLEN);
+
+ /* get UserPriority from IP hdr */
+ if (pattrib->ether_type == 0x0800) {
+ _rtw_pktfile_read23a(ppktfile, (u8*)&ip_hdr, sizeof(ip_hdr));
+/* UserPriority = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
+ UserPriority = ip_hdr.tos >> 5;
+ } else if (pattrib->ether_type == 0x888e) {
+ /* "When priority processing of data frames is supported, */
+ /* a STA's SME should send EAPOL-Key frames at the highest
+ priority." */
+ UserPriority = 7;
+ }
+
+ pattrib->priority = UserPriority;
+ pattrib->hdrlen = sizeof(struct ieee80211_qos_hdr);
+ pattrib->subtype = WIFI_QOS_DATA_TYPE;
+}
+
+static s32 update_attrib(struct rtw_adapter *padapter,
+ struct sk_buff *pkt, struct pkt_attrib *pattrib)
+{
+ uint i;
+ struct pkt_file pktfile;
+ struct sta_info *psta = NULL;
+ struct ethhdr etherhdr;
+
+ int bmcast;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ int res = _SUCCESS;
+
+ _rtw_open_pktfile23a(pkt, &pktfile);
+ i = _rtw_pktfile_read23a(&pktfile, (u8*)&etherhdr, ETH_HLEN);
+
+ pattrib->ether_type = ntohs(etherhdr.h_proto);
+
+ memcpy(pattrib->dst, &etherhdr.h_dest, ETH_ALEN);
+ memcpy(pattrib->src, &etherhdr.h_source, ETH_ALEN);
+
+ pattrib->pctrl = 0;
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+ }
+ else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ memcpy(pattrib->ra, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+ }
+ else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, get_bssid(pmlmepriv), ETH_ALEN);
+ }
+
+ pattrib->pktlen = pktfile.pkt_len;
+
+ if (pattrib->ether_type == ETH_P_IP) {
+ /* The following is for DHCP and ARP packet, we use cck1M
+ to tx these packets and let LPS awake some time */
+ /* to prevent DHCP protocol fail */
+ u8 tmp[24];
+ _rtw_pktfile_read23a(&pktfile, &tmp[0], 24);
+ pattrib->dhcp_pkt = 0;
+ if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
+ if (ETH_P_IP == pattrib->ether_type) {/* IP header */
+ if (((tmp[21] == 68) && (tmp[23] == 67)) ||
+ ((tmp[21] == 67) && (tmp[23] == 68))) {
+ /* 68 : UDP BOOTP client */
+ /* 67 : UDP BOOTP server */
+ RT_TRACE(_module_rtl871x_xmit_c_,
+ _drv_err_,
+ ("======================"
+ "update_attrib: get DHCP "
+ "Packet\n"));
+ pattrib->dhcp_pkt = 1;
+ }
+ }
+ }
+ } else if (0x888e == pattrib->ether_type) {
+ DBG_8723A_LEVEL(_drv_always_, "send eapol packet\n");
+ }
+
+ if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1)) {
+ rtw_set_scan_deny(padapter, 3000);
+ }
+
+ /* If EAPOL , ARP , OR DHCP packet, driver must be in active mode. */
+ if ((pattrib->ether_type == 0x0806) ||
+ (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1)) {
+ rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_SPECIAL_PACKET, 1);
+ }
+
+ bmcast = is_multicast_ether_addr(pattrib->ra);
+
+ /* get sta_info */
+ if (bmcast) {
+ psta = rtw_get_bcmc_stainfo23a(padapter);
+ } else {
+ psta = rtw_get_stainfo23a(pstapriv, pattrib->ra);
+ if (psta == NULL) { /* if we cannot get psta => drrp the pkt */
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_,
+ ("\nupdate_attrib => get sta_info fail, ra:"
+ MAC_FMT"\n", MAC_ARG(pattrib->ra)));
+ res = _FAIL;
+ goto exit;
+ } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) &&
+ (!(psta->state & _FW_LINKED))) {
+ res = _FAIL;
+ goto exit;
+ }
+ }
+
+ if (psta) {
+ pattrib->mac_id = psta->mac_id;
+ /* DBG_8723A("%s ==> mac_id(%d)\n", __func__, pattrib->mac_id); */
+ pattrib->psta = psta;
+ } else {
+ /* if we cannot get psta => drop the pkt */
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_,
+ ("\nupdate_attrib => get sta_info fail, ra:" MAC_FMT
+ "\n", MAC_ARG(pattrib->ra)));
+ res = _FAIL;
+ goto exit;
+ }
+
+ pattrib->ack_policy = 0;
+ /* get ether_hdr_len */
+
+ /* pattrib->ether_type == 0x8100) ? (14 + 4): 14; vlan tag */
+ pattrib->pkt_hdrlen = ETH_HLEN;
+
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
+ pattrib->subtype = WIFI_DATA_TYPE;
+ pattrib->priority = 0;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE | WIFI_ADHOC_STATE |
+ WIFI_ADHOC_MASTER_STATE)) {
+ if (psta->qos_option)
+ set_qos(&pktfile, pattrib);
+ } else {
+ if (pqospriv->qos_option) {
+ set_qos(&pktfile, pattrib);
+
+ if (pmlmepriv->acm_mask != 0) {
+ pattrib->priority = qos_acm23a(pmlmepriv->acm_mask,
+ pattrib->priority);
+ }
+ }
+ }
+
+ if (psta->ieee8021x_blocked == true) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("\n psta->ieee8021x_blocked == true\n"));
+
+ pattrib->encrypt = 0;
+
+ if ((pattrib->ether_type != 0x888e) &&
+ (check_fwstate(pmlmepriv, WIFI_MP_STATE) == false)) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("\npsta->ieee8021x_blocked == true, "
+ "pattrib->ether_type(%.4x) != 0x888e\n",
+ pattrib->ether_type));
+ res = _FAIL;
+ goto exit;
+ }
+ } else {
+ GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast);
+
+ switch (psecuritypriv->dot11AuthAlgrthm) {
+ case dot11AuthAlgrthm_Open:
+ case dot11AuthAlgrthm_Shared:
+ case dot11AuthAlgrthm_Auto:
+ pattrib->key_idx =
+ (u8)psecuritypriv->dot11PrivacyKeyIndex;
+ break;
+ case dot11AuthAlgrthm_8021X:
+ if (bmcast)
+ pattrib->key_idx =
+ (u8)psecuritypriv->dot118021XGrpKeyid;
+ else
+ pattrib->key_idx = 0;
+ break;
+ default:
+ pattrib->key_idx = 0;
+ break;
+ }
+
+ }
+
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ pattrib->iv_len = 4;
+ pattrib->icv_len = 4;
+ break;
+
+ case _TKIP_:
+ pattrib->iv_len = 8;
+ pattrib->icv_len = 4;
+
+ if (padapter->securitypriv.busetkipkey == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("\npadapter->securitypriv.busetkip"
+ "key(%d) == _FAIL drop packet\n",
+ padapter->securitypriv.busetkipkey));
+ res = _FAIL;
+ goto exit;
+ }
+
+ break;
+ case _AES_:
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ ("pattrib->encrypt =%d (_AES_)\n", pattrib->encrypt));
+ pattrib->iv_len = 8;
+ pattrib->icv_len = 8;
+ break;
+
+ default:
+ pattrib->iv_len = 0;
+ pattrib->icv_len = 0;
+ break;
+ }
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ ("update_attrib: encrypt =%d\n", pattrib->encrypt));
+
+ if (pattrib->encrypt && psecuritypriv->hw_decrypted == false) {
+ pattrib->bswenc = true;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("update_attrib: encrypt =%d bswenc = true\n",
+ pattrib->encrypt));
+ } else {
+ pattrib->bswenc = false;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ ("update_attrib: bswenc = false\n"));
+ }
+ update_attrib_phy_info(pattrib, psta);
+
+exit:
+
+ return res;
+}
+
+static s32 xmitframe_addmic(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe) {
+ struct mic_data micdata;
+ struct sta_info *stainfo;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ int curfragnum, length;
+ u8 *pframe, *payload, mic[8];
+ u8 priority[4]= {0x0, 0x0, 0x0, 0x0};
+ u8 hw_hdr_offset = 0;
+ int bmcst = is_multicast_ether_addr(pattrib->ra);
+
+ if (pattrib->psta) {
+ stainfo = pattrib->psta;
+ } else {
+ DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
+ stainfo = rtw_get_stainfo23a(&padapter->stapriv, &pattrib->ra[0]);
+ }
+
+ if (!stainfo) {
+ DBG_8723A("%s, psta == NUL\n", __func__);
+ return _FAIL;
+ }
+
+ if (!(stainfo->state &_FW_LINKED)) {
+ DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n",
+ __func__, stainfo->state);
+ return _FAIL;
+ }
+
+ hw_hdr_offset = TXDESC_OFFSET;
+
+ if (pattrib->encrypt == _TKIP_) {
+ /* encode mic code */
+ if (stainfo) {
+ u8 null_key[16]={0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0};
+
+ pframe = pxmitframe->buf_addr + hw_hdr_offset;
+
+ if (bmcst) {
+ if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16)) {
+ return _FAIL;
+ }
+ /* start to calculate the mic code */
+ rtw_secmicsetkey23a(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
+ } else {
+ if (!memcmp(&stainfo->dot11tkiptxmickey.skey[0],
+ null_key, 16)) {
+ return _FAIL;
+ }
+ /* start to calculate the mic code */
+ rtw_secmicsetkey23a(&micdata, &stainfo->dot11tkiptxmickey.skey[0]);
+ }
+
+ if (pframe[1] & 1) { /* ToDS == 1 */
+ /* DA */
+ rtw_secmicappend23a(&micdata, &pframe[16], 6);
+ if (pframe[1] & 2) /* From Ds == 1 */
+ rtw_secmicappend23a(&micdata,
+ &pframe[24], 6);
+ else
+ rtw_secmicappend23a(&micdata,
+ &pframe[10], 6);
+ } else { /* ToDS == 0 */
+ /* DA */
+ rtw_secmicappend23a(&micdata, &pframe[4], 6);
+ if (pframe[1] & 2) /* From Ds == 1 */
+ rtw_secmicappend23a(&micdata,
+ &pframe[16], 6);
+ else
+ rtw_secmicappend23a(&micdata,
+ &pframe[10], 6);
+ }
+
+ /* if (pqospriv->qos_option == 1) */
+ if (pattrib->qos_en)
+ priority[0] = (u8)pxmitframe->attrib.priority;
+
+ rtw_secmicappend23a(&micdata, &priority[0], 4);
+
+ payload = pframe;
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags;
+ curfragnum++) {
+ payload = PTR_ALIGN(payload, 4);
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("=== curfragnum =%d, pframe = 0x%.2x, "
+ "0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x"
+ "%.2x, 0x%.2x, 0x%.2x,!!!\n",
+ curfragnum, *payload, *(payload + 1),
+ *(payload + 2), *(payload + 3),
+ *(payload + 4), *(payload + 5),
+ *(payload + 6), *(payload + 7)));
+
+ payload = payload + pattrib->hdrlen +
+ pattrib->iv_len;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("curfragnum =%d pattrib->hdrlen =%d "
+ "pattrib->iv_len =%d", curfragnum,
+ pattrib->hdrlen, pattrib->iv_len));
+ if ((curfragnum + 1) == pattrib->nr_frags) {
+ length = pattrib->last_txcmdsz -
+ pattrib->hdrlen -
+ pattrib->iv_len -
+ ((pattrib->bswenc) ?
+ pattrib->icv_len : 0);
+ rtw_secmicappend23a(&micdata, payload,
+ length);
+ payload = payload + length;
+ } else {
+ length = pxmitpriv->frag_len -
+ pattrib->hdrlen -
+ pattrib->iv_len -
+ ((pattrib->bswenc) ?
+ pattrib->icv_len : 0);
+ rtw_secmicappend23a(&micdata, payload,
+ length);
+ payload = payload + length +
+ pattrib->icv_len;
+ RT_TRACE(_module_rtl871x_xmit_c_,
+ _drv_err_,
+ ("curfragnum =%d length =%d "
+ "pattrib->icv_len =%d",
+ curfragnum, length,
+ pattrib->icv_len));
+ }
+ }
+ rtw_secgetmic23a(&micdata, &mic[0]);
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("xmitframe_addmic: before add mic code!!\n"));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("xmitframe_addmic: pattrib->last_txcmdsz ="
+ "%d!!!\n", pattrib->last_txcmdsz));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("xmitframe_addmic: mic[0]= 0x%.2x , mic[1]="
+ "0x%.2x , mic[2]= 0x%.2x , mic[3]= 0x%.2x\n"
+ "mic[4]= 0x%.2x , mic[5]= 0x%.2x , mic[6]= 0x%.2x "
+ ", mic[7]= 0x%.2x !!!!\n", mic[0], mic[1],
+ mic[2], mic[3], mic[4], mic[5], mic[6],
+ mic[7]));
+ /* add mic code and add the mic code length
+ in last_txcmdsz */
+
+ memcpy(payload, &mic[0], 8);
+ pattrib->last_txcmdsz += 8;
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ ("\n ======== last pkt ========\n"));
+ payload = payload - pattrib->last_txcmdsz + 8;
+ for (curfragnum = 0; curfragnum < pattrib->last_txcmdsz;
+ curfragnum = curfragnum + 8)
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ (" %.2x, %.2x, %.2x, %.2x, %.2x, "
+ " %.2x, %.2x, %.2x ",
+ *(payload + curfragnum),
+ *(payload + curfragnum + 1),
+ *(payload + curfragnum + 2),
+ *(payload + curfragnum + 3),
+ *(payload + curfragnum + 4),
+ *(payload + curfragnum + 5),
+ *(payload + curfragnum + 6),
+ *(payload + curfragnum + 7)));
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("xmitframe_addmic: rtw_get_stainfo23a =="
+ "NULL!!!\n"));
+ }
+ }
+
+ return _SUCCESS;
+}
+
+static s32 xmitframe_swencrypt(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe)
+{
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+
+ /* if ((psecuritypriv->sw_encrypt)||(pattrib->bswenc)) */
+ if (pattrib->bswenc) {
+ /* DBG_8723A("start xmitframe_swencrypt\n"); */
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_,
+ ("### xmitframe_swencrypt\n"));
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ rtw_wep_encrypt23a(padapter, pxmitframe);
+ break;
+ case _TKIP_:
+ rtw_tkip_encrypt23a(padapter, pxmitframe);
+ break;
+ case _AES_:
+ rtw_aes_encrypt23a(padapter, pxmitframe);
+ break;
+ default:
+ break;
+ }
+
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_,
+ ("### xmitframe_hwencrypt\n"));
+ }
+
+ return _SUCCESS;
+}
+
+s32 rtw_make_wlanhdr23a(struct rtw_adapter *padapter, u8 *hdr,
+ struct pkt_attrib *pattrib)
+{
+ u16 *qc;
+
+ struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ u8 qos_option = false;
+ int res = _SUCCESS;
+ u16 *fctrl = &pwlanhdr->frame_control;
+
+ struct sta_info *psta;
+
+ int bmcst = is_multicast_ether_addr(pattrib->ra);
+
+ if (pattrib->psta) {
+ psta = pattrib->psta;
+ } else {
+ DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
+ if (bmcst) {
+ psta = rtw_get_bcmc_stainfo23a(padapter);
+ } else {
+ psta = rtw_get_stainfo23a(&padapter->stapriv, pattrib->ra);
+ }
+ }
+
+ if (psta == NULL) {
+ DBG_8723A("%s, psta == NUL\n", __func__);
+ return _FAIL;
+ }
+
+ if (!(psta->state &_FW_LINKED)) {
+ DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
+ return _FAIL;
+ }
+
+ memset(hdr, 0, WLANHDR_OFFSET);
+
+ SetFrameSubType(fctrl, pattrib->subtype);
+
+ if (pattrib->subtype & WIFI_DATA_TYPE) {
+ if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) {
+ /* to_ds = 1, fr_ds = 0; */
+ /* Data transfer to AP */
+ SetToDs(fctrl);
+ memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
+
+ if (pqospriv->qos_option)
+ qos_option = true;
+
+ }
+ else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)) {
+ /* to_ds = 0, fr_ds = 1; */
+ SetFrDs(fctrl);
+ memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
+
+ if (psta->qos_option)
+ qos_option = true;
+ }
+ else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
+ memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
+
+ if (psta->qos_option)
+ qos_option = true;
+ }
+ else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv)));
+ res = _FAIL;
+ goto exit;
+ }
+ if (pattrib->mdata)
+ SetMData(fctrl);
+ if (pattrib->encrypt)
+ SetPrivacy(fctrl);
+ if (qos_option) {
+ qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
+ if (pattrib->priority)
+ SetPriority(qc, pattrib->priority);
+ SetEOSP(qc, pattrib->eosp);
+ SetAckpolicy(qc, pattrib->ack_policy);
+ }
+ /* TODO: fill HT Control Field */
+
+ /* Update Seq Num will be handled by f/w */
+ if (psta) {
+ psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
+ psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
+ pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
+ SetSeqNum(hdr, pattrib->seqnum);
+ /* check if enable ampdu */
+ if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
+ if (psta->htpriv.agg_enable_bitmap & CHKBIT(pattrib->priority))
+ pattrib->ampdu_en = true;
+ }
+ /* re-check if enable ampdu by BA_starting_seqctrl */
+ if (pattrib->ampdu_en) {
+ u16 tx_seq;
+
+ tx_seq = psta->BA_starting_seqctrl[pattrib->priority & 0x0f];
+
+ /* check BA_starting_seqctrl */
+ if (SN_LESS(pattrib->seqnum, tx_seq)) {
+ /* DBG_8723A("tx ampdu seqnum(%d) < tx_seq(%d)\n", pattrib->seqnum, tx_seq); */
+ pattrib->ampdu_en = false;/* AGG BK */
+ } else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
+ psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq+1)&0xfff;
+ pattrib->ampdu_en = true;/* AGG EN */
+ } else {
+ /* DBG_8723A("tx ampdu over run\n"); */
+ psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum+1)&0xfff;
+ pattrib->ampdu_en = true;/* AGG EN */
+ }
+ }
+ }
+ }
+exit:
+ return res;
+}
+
+s32 rtw_txframes_pending23a(struct rtw_adapter *padapter)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ return (!_rtw_queue_empty23a(&pxmitpriv->be_pending)) ||
+ (!_rtw_queue_empty23a(&pxmitpriv->bk_pending)) ||
+ (!_rtw_queue_empty23a(&pxmitpriv->vi_pending)) ||
+ (!_rtw_queue_empty23a(&pxmitpriv->vo_pending));
+}
+
+s32 rtw_txframes_sta_ac_pending23a(struct rtw_adapter *padapter,
+ struct pkt_attrib *pattrib)
+{
+ struct sta_info *psta;
+ struct tx_servq *ptxservq;
+ int priority = pattrib->priority;
+
+ if (pattrib->psta) {
+ psta = pattrib->psta;
+ } else {
+ DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
+ psta = rtw_get_stainfo23a(&padapter->stapriv, &pattrib->ra[0]);
+ }
+ if (psta == NULL) {
+ DBG_8723A("%s, psta == NUL\n", __func__);
+ return 0;
+ }
+ if (!(psta->state &_FW_LINKED)) {
+ DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__,
+ psta->state);
+ return 0;
+ }
+ switch (priority) {
+ case 1:
+ case 2:
+ ptxservq = &psta->sta_xmitpriv.bk_q;
+ break;
+ case 4:
+ case 5:
+ ptxservq = &psta->sta_xmitpriv.vi_q;
+ break;
+ case 6:
+ case 7:
+ ptxservq = &psta->sta_xmitpriv.vo_q;
+ break;
+ case 0:
+ case 3:
+ default:
+ ptxservq = &psta->sta_xmitpriv.be_q;
+ break;
+ }
+ return ptxservq->qcnt;
+}
+
+/*
+ * Calculate wlan 802.11 packet MAX size from pkt_attrib
+ * This function doesn't consider fragment case
+ */
+u32 rtw_calculate_wlan_pkt_size_by_attribue23a(struct pkt_attrib *pattrib)
+{
+ u32 len = 0;
+
+ len = pattrib->hdrlen + pattrib->iv_len; /* WLAN Header and IV */
+ len += SNAP_SIZE + sizeof(u16); /* LLC */
+ len += pattrib->pktlen;
+ if (pattrib->encrypt == _TKIP_) len += 8; /* MIC */
+ len += ((pattrib->bswenc) ? pattrib->icv_len : 0); /* ICV */
+
+ return len;
+}
+
+/*
+
+This sub-routine will perform all the following:
+
+1. remove 802.3 header.
+2. create wlan_header, based on the info in pxmitframe
+3. append sta's iv/ext-iv
+4. append LLC
+5. move frag chunk from pframe to pxmitframe->mem
+6. apply sw-encrypt, if necessary.
+
+*/
+s32 rtw_xmitframe_coalesce23a(struct rtw_adapter *padapter, struct sk_buff *pkt,
+ struct xmit_frame *pxmitframe)
+{
+ struct pkt_file pktfile;
+ struct sta_info *psta;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
+ u8 *pframe, *mem_start;
+ u8 hw_hdr_offset;
+ u8 *pbuf_start;
+
+ s32 bmcst = is_multicast_ether_addr(pattrib->ra);
+ s32 res = _SUCCESS;
+
+ if (pattrib->psta) {
+ psta = pattrib->psta;
+ } else {
+ DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
+ psta = rtw_get_stainfo23a(&padapter->stapriv, pattrib->ra);
+ }
+
+ if (psta == NULL) {
+ DBG_8723A("%s, psta == NUL\n", __func__);
+ return _FAIL;
+ }
+
+ if (!(psta->state &_FW_LINKED)) {
+ DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
+ return _FAIL;
+ }
+
+ if (pxmitframe->buf_addr == NULL) {
+ DBG_8723A("==> %s buf_addr == NULL\n", __func__);
+ return _FAIL;
+ }
+
+ pbuf_start = pxmitframe->buf_addr;
+
+ hw_hdr_offset = TXDESC_OFFSET;
+
+ mem_start = pbuf_start + hw_hdr_offset;
+
+ if (rtw_make_wlanhdr23a(padapter, mem_start, pattrib) == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("rtw_xmitframe_coalesce23a: rtw_make_wlanhdr23a "
+ "fail; drop pkt\n"));
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_open_pktfile23a(pkt, &pktfile);
+ _rtw_pktfile_read23a(&pktfile, NULL, pattrib->pkt_hdrlen);
+
+ frg_inx = 0;
+ frg_len = pxmitpriv->frag_len - 4;/* 2346-4 = 2342 */
+
+ while (1) {
+ llc_sz = 0;
+
+ mpdu_len = frg_len;
+
+ pframe = mem_start;
+
+ SetMFrag(mem_start);
+
+ pframe += pattrib->hdrlen;
+ mpdu_len -= pattrib->hdrlen;
+
+ /* adding icv, if necessary... */
+ if (pattrib->iv_len) {
+ if (psta != NULL) {
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ break;
+ case _TKIP_:
+ if (bmcst)
+ TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ else
+ TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
+ break;
+ case _AES_:
+ if (bmcst)
+ AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ else
+ AES_IV(pattrib->iv, psta->dot11txpn, 0);
+ break;
+ }
+ }
+
+ memcpy(pframe, pattrib->iv, pattrib->iv_len);
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_,
+ ("rtw_xmiaframe_coalesce23a: keyid =%d pattrib"
+ "->iv[3]=%.2x pframe =%.2x %.2x %.2x %.2x\n",
+ padapter->securitypriv.dot11PrivacyKeyIndex,
+ pattrib->iv[3], *pframe, *(pframe+1),
+ *(pframe+2), *(pframe+3)));
+ pframe += pattrib->iv_len;
+ mpdu_len -= pattrib->iv_len;
+ }
+ if (frg_inx == 0) {
+ llc_sz = rtw_put_snap23a(pframe, pattrib->ether_type);
+ pframe += llc_sz;
+ mpdu_len -= llc_sz;
+ }
+
+ if ((pattrib->icv_len >0) && (pattrib->bswenc))
+ mpdu_len -= pattrib->icv_len;
+
+ if (bmcst) {
+ /* don't do fragment to broadcat/multicast packets */
+ mem_sz = _rtw_pktfile_read23a(&pktfile, pframe, pattrib->pktlen);
+ } else {
+ mem_sz = _rtw_pktfile_read23a(&pktfile, pframe, mpdu_len);
+ }
+ pframe += mem_sz;
+
+ if ((pattrib->icv_len >0) && (pattrib->bswenc)) {
+ memcpy(pframe, pattrib->icv, pattrib->icv_len);
+ pframe += pattrib->icv_len;
+ }
+
+ frg_inx++;
+
+ if (bmcst || (rtw_endofpktfile23a(&pktfile))) {
+ pattrib->nr_frags = frg_inx;
+
+ pattrib->last_txcmdsz = pattrib->hdrlen +
+ pattrib->iv_len +
+ ((pattrib->nr_frags == 1) ?
+ llc_sz : 0) +
+ ((pattrib->bswenc) ?
+ pattrib->icv_len : 0) + mem_sz;
+
+ ClearMFrag(mem_start);
+
+ break;
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __func__));
+ }
+
+ mem_start = PTR_ALIGN(pframe, 4) + hw_hdr_offset;
+ memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
+
+ }
+
+ if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n"));
+ DBG_8723A("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n");
+ res = _FAIL;
+ goto exit;
+ }
+
+ xmitframe_swencrypt(padapter, pxmitframe);
+
+ if (bmcst == false)
+ update_attrib_vcs_info(padapter, pxmitframe);
+ else
+ pattrib->vcs_mode = NONE_VCS;
+
+exit:
+ return res;
+}
+
+/* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header
+ * IEEE LLC/SNAP header contains 8 octets
+ * First 3 octets comprise the LLC portion
+ * SNAP portion, 5 octets, is divided into two fields:
+ * Organizationally Unique Identifier(OUI), 3 octets,
+ * type, defined by that organization, 2 octets.
+ */
+s32 rtw_put_snap23a(u8 *data, u16 h_proto)
+{
+ struct ieee80211_snap_hdr *snap;
+ u8 *oui;
+
+ snap = (struct ieee80211_snap_hdr *)data;
+ snap->dsap = 0xaa;
+ snap->ssap = 0xaa;
+ snap->ctrl = 0x03;
+
+ if (h_proto == 0x8137 || h_proto == 0x80f3)
+ oui = P802_1H_OUI;
+ else
+ oui = RFC1042_OUI;
+ snap->oui[0] = oui[0];
+ snap->oui[1] = oui[1];
+ snap->oui[2] = oui[2];
+ *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
+ return SNAP_SIZE + sizeof(u16);
+}
+
+void rtw_update_protection23a(struct rtw_adapter *padapter, u8 *ie, uint ie_len)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ uint protection;
+ u8 *perp;
+ int erp_len;
+
+ switch (pxmitpriv->vcs_setting) {
+ case DISABLE_VCS:
+ pxmitpriv->vcs = NONE_VCS;
+ break;
+ case ENABLE_VCS:
+ break;
+ case AUTO_VCS:
+ default:
+ perp = rtw_get_ie23a(ie, _ERPINFO_IE_, &erp_len, ie_len);
+ if (perp == NULL) {
+ pxmitpriv->vcs = NONE_VCS;
+ } else {
+ protection = (*(perp + 2)) & BIT(1);
+ if (protection) {
+ if (pregistrypriv->vcs_type == RTS_CTS)
+ pxmitpriv->vcs = RTS_CTS;
+ else
+ pxmitpriv->vcs = CTS_TO_SELF;
+ } else {
+ pxmitpriv->vcs = NONE_VCS;
+ }
+ }
+ break;
+ }
+}
+
+void rtw_count_tx_stats23a(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe, int sz)
+{
+ struct sta_info *psta = NULL;
+ struct stainfo_stats *pstats = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) {
+ pxmitpriv->tx_bytes += sz;
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod++;
+
+ psta = pxmitframe->attrib.psta;
+ if (psta) {
+ pstats = &psta->sta_stats;
+ pstats->tx_pkts++;
+ pstats->tx_bytes += sz;
+ }
+ }
+}
+
+struct xmit_buf *rtw_alloc_xmitbuf23a_ext(struct xmit_priv *pxmitpriv)
+{
+ unsigned long irqL;
+ struct xmit_buf *pxmitbuf = NULL;
+ struct list_head *phead;
+ struct rtw_queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
+
+ spin_lock_irqsave(&pfree_queue->lock, irqL);
+
+ phead = get_list_head(pfree_queue);
+
+ if (!list_empty(phead)) {
+ pxmitbuf = list_first_entry(phead, struct xmit_buf, list);
+
+ list_del_init(&pxmitbuf->list);
+
+ pxmitpriv->free_xmit_extbuf_cnt--;
+ pxmitbuf->priv_data = NULL;
+ pxmitbuf->ext_tag = true;
+
+ if (pxmitbuf->sctx) {
+ DBG_8723A("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
+ }
+ }
+
+ spin_unlock_irqrestore(&pfree_queue->lock, irqL);
+
+ return pxmitbuf;
+}
+
+s32 rtw_free_xmitbuf_ext23a(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+{
+ unsigned long irqL;
+ struct rtw_queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
+
+ if (pxmitbuf == NULL)
+ return _FAIL;
+
+ spin_lock_irqsave(&pfree_queue->lock, irqL);
+
+ list_del_init(&pxmitbuf->list);
+
+ list_add_tail(&pxmitbuf->list, get_list_head(pfree_queue));
+ pxmitpriv->free_xmit_extbuf_cnt++;
+
+ spin_unlock_irqrestore(&pfree_queue->lock, irqL);
+
+ return _SUCCESS;
+}
+
+struct xmit_buf *rtw_alloc_xmitbuf23a(struct xmit_priv *pxmitpriv)
+{
+ unsigned long irqL;
+ struct xmit_buf *pxmitbuf = NULL;
+ struct list_head *phead;
+ struct rtw_queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
+
+ /* DBG_8723A("+rtw_alloc_xmitbuf23a\n"); */
+
+ spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
+
+ phead = get_list_head(pfree_xmitbuf_queue);
+
+ if (!list_empty(phead)) {
+ pxmitbuf = list_first_entry(phead, struct xmit_buf, list);
+
+ list_del_init(&pxmitbuf->list);
+
+ pxmitpriv->free_xmitbuf_cnt--;
+ pxmitbuf->priv_data = NULL;
+ pxmitbuf->ext_tag = false;
+ pxmitbuf->flags = XMIT_VO_QUEUE;
+
+ if (pxmitbuf->sctx) {
+ DBG_8723A("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
+ }
+ }
+
+ spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL);
+
+ return pxmitbuf;
+}
+
+s32 rtw_free_xmitbuf23a(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+{
+ unsigned long irqL;
+ struct rtw_queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
+
+ /* DBG_8723A("+rtw_free_xmitbuf23a\n"); */
+
+ if (pxmitbuf == NULL)
+ return _FAIL;
+
+ if (pxmitbuf->sctx) {
+ DBG_8723A("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE);
+ }
+
+ if (pxmitbuf->ext_tag) {
+ rtw_free_xmitbuf_ext23a(pxmitpriv, pxmitbuf);
+ } else {
+ spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
+
+ list_del_init(&pxmitbuf->list);
+
+ list_add_tail(&pxmitbuf->list,
+ get_list_head(pfree_xmitbuf_queue));
+
+ pxmitpriv->free_xmitbuf_cnt++;
+ spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL);
+ }
+
+ return _SUCCESS;
+}
+
+static void rtw_init_xmitframe(struct xmit_frame *pxframe)
+{
+ if (pxframe != NULL) {
+ /* default value setting */
+ pxframe->buf_addr = NULL;
+ pxframe->pxmitbuf = NULL;
+
+ memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
+ /* pxframe->attrib.psta = NULL; */
+
+ pxframe->frame_tag = DATA_FRAMETAG;
+
+ pxframe->pkt = NULL;
+ pxframe->pkt_offset = 1;/* default use pkt_offset to fill tx desc */
+
+ pxframe->ack_report = 0;
+ }
+}
+
+/*
+Calling context:
+1. OS_TXENTRY
+2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
+
+If we turn on USE_RXTHREAD, then, no need for critical section.
+Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
+
+Must be very very cautious...
+
+*/
+struct xmit_frame *rtw_alloc_xmitframe23a(struct xmit_priv *pxmitpriv)/* _queue *pfree_xmit_queue) */
+{
+ /*
+ Please remember to use all the osdep_service api,
+ and lock/unlock or _enter/_exit critical to protect
+ pfree_xmit_queue
+ */
+
+ struct xmit_frame *pxframe = NULL;
+ struct list_head *plist, *phead;
+ struct rtw_queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
+
+ spin_lock_bh(&pfree_xmit_queue->lock);
+
+ if (_rtw_queue_empty23a(pfree_xmit_queue) == true) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe23a:%d\n", pxmitpriv->free_xmitframe_cnt));
+ pxframe = NULL;
+ } else {
+ phead = get_list_head(pfree_xmit_queue);
+
+ plist = phead->next;
+
+ pxframe = container_of(plist, struct xmit_frame, list);
+
+ list_del_init(&pxframe->list);
+ pxmitpriv->free_xmitframe_cnt--;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe23a():free_xmitframe_cnt =%d\n", pxmitpriv->free_xmitframe_cnt));
+ }
+
+ spin_unlock_bh(&pfree_xmit_queue->lock);
+
+ rtw_init_xmitframe(pxframe);
+
+ return pxframe;
+}
+
+struct xmit_frame *rtw_alloc_xmitframe23a_ext(struct xmit_priv *pxmitpriv)
+{
+ struct xmit_frame *pxframe = NULL;
+ struct list_head *plist, *phead;
+ struct rtw_queue *queue = &pxmitpriv->free_xframe_ext_queue;
+
+ spin_lock_bh(&queue->lock);
+
+ if (_rtw_queue_empty23a(queue) == true) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe23a_ext:%d\n", pxmitpriv->free_xframe_ext_cnt));
+ pxframe = NULL;
+ } else {
+ phead = get_list_head(queue);
+ plist = phead->next;
+ pxframe = container_of(plist, struct xmit_frame, list);
+
+ list_del_init(&pxframe->list);
+ pxmitpriv->free_xframe_ext_cnt--;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe23a_ext():free_xmitframe_cnt =%d\n", pxmitpriv->free_xframe_ext_cnt));
+ }
+
+ spin_unlock_bh(&queue->lock);
+
+ rtw_init_xmitframe(pxframe);
+
+ return pxframe;
+}
+
+s32 rtw_free_xmitframe23a(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe)
+{
+ struct rtw_queue *queue = NULL;
+ struct rtw_adapter *padapter = pxmitpriv->adapter;
+ struct sk_buff *pndis_pkt = NULL;
+
+ if (pxmitframe == NULL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("====== rtw_free_xmitframe23a():pxmitframe == NULL!!!!!!!!!!\n"));
+ goto exit;
+ }
+
+ if (pxmitframe->pkt) {
+ pndis_pkt = pxmitframe->pkt;
+ pxmitframe->pkt = NULL;
+ }
+
+ if (pxmitframe->ext_tag == 0)
+ queue = &pxmitpriv->free_xmit_queue;
+ else if (pxmitframe->ext_tag == 1)
+ queue = &pxmitpriv->free_xframe_ext_queue;
+
+ if (!queue)
+ goto check_pkt_complete;
+ spin_lock_bh(&queue->lock);
+
+ list_del_init(&pxmitframe->list);
+ list_add_tail(&pxmitframe->list, get_list_head(queue));
+ if (pxmitframe->ext_tag == 0) {
+ pxmitpriv->free_xmitframe_cnt++;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_, ("rtw_free_xmitframe23a():free_xmitframe_cnt =%d\n", pxmitpriv->free_xmitframe_cnt));
+ } else if (pxmitframe->ext_tag == 1) {
+ pxmitpriv->free_xframe_ext_cnt++;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_, ("rtw_free_xmitframe23a():free_xframe_ext_cnt =%d\n", pxmitpriv->free_xframe_ext_cnt));
+ }
+
+ spin_unlock_bh(&queue->lock);
+
+check_pkt_complete:
+
+ if (pndis_pkt)
+ rtw_os_pkt_complete23a(padapter, pndis_pkt);
+
+exit:
+
+ return _SUCCESS;
+}
+
+void rtw_free_xmitframe_queue23a(struct xmit_priv *pxmitpriv,
+ struct rtw_queue *pframequeue)
+{
+ struct list_head *plist, *phead, *ptmp;
+ struct xmit_frame *pxmitframe;
+
+ spin_lock_bh(&pframequeue->lock);
+
+ phead = get_list_head(pframequeue);
+
+ list_for_each_safe(plist, ptmp, phead) {
+ pxmitframe = container_of(plist, struct xmit_frame, list);
+
+ rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
+ }
+ spin_unlock_bh(&pframequeue->lock);
+
+}
+
+s32 rtw_xmitframe_enqueue23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe)
+{
+ if (rtw_xmit23a_classifier(padapter, pxmitframe) == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("rtw_xmitframe_enqueue23a: drop xmit pkt for "
+ "classifier fail\n"));
+ return _FAIL;
+ }
+
+ return _SUCCESS;
+}
+
+static struct xmit_frame *
+dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit,
+ struct tx_servq *ptxservq, struct rtw_queue *pframe_queue)
+{
+ struct list_head *phead;
+ struct xmit_frame *pxmitframe = NULL;
+
+ phead = get_list_head(pframe_queue);
+
+ if (!list_empty(phead)) {
+ pxmitframe = list_first_entry(phead, struct xmit_frame, list);
+ list_del_init(&pxmitframe->list);
+ ptxservq->qcnt--;
+ }
+ return pxmitframe;
+}
+
+struct xmit_frame *
+rtw_dequeue_xframe23a(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i,
+ int entry)
+{
+ struct list_head *sta_plist, *sta_phead, *ptmp;
+ struct hw_xmit *phwxmit;
+ struct tx_servq *ptxservq = NULL;
+ struct rtw_queue *pframe_queue = NULL;
+ struct xmit_frame *pxmitframe = NULL;
+ struct rtw_adapter *padapter = pxmitpriv->adapter;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ int i, inx[4];
+
+ inx[0] = 0;
+ inx[1] = 1;
+ inx[2] = 2;
+ inx[3] = 3;
+ if (pregpriv->wifi_spec == 1) {
+ int j;
+
+ for (j = 0; j < 4; j++)
+ inx[j] = pxmitpriv->wmm_para_seq[j];
+ }
+
+ spin_lock_bh(&pxmitpriv->lock);
+
+ for (i = 0; i < entry; i++) {
+ phwxmit = phwxmit_i + inx[i];
+
+ sta_phead = get_list_head(phwxmit->sta_queue);
+
+ list_for_each_safe(sta_plist, ptmp, sta_phead) {
+ ptxservq = container_of(sta_plist, struct tx_servq,
+ tx_pending);
+
+ pframe_queue = &ptxservq->sta_pending;
+
+ pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue);
+
+ if (pxmitframe) {
+ phwxmit->accnt--;
+
+ /* Remove sta node when there is no pending packets. */
+ if (_rtw_queue_empty23a(pframe_queue)) /* must be done after get_next and before break */
+ list_del_init(&ptxservq->tx_pending);
+ goto exit;
+ }
+ }
+ }
+exit:
+ spin_unlock_bh(&pxmitpriv->lock);
+ return pxmitframe;
+}
+
+struct tx_servq *rtw_get_sta_pending23a(struct rtw_adapter *padapter, struct sta_info *psta, int up, u8 *ac)
+{
+ struct tx_servq *ptxservq = NULL;
+
+ switch (up) {
+ case 1:
+ case 2:
+ ptxservq = &psta->sta_xmitpriv.bk_q;
+ *(ac) = 3;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending23a : BK\n"));
+ break;
+ case 4:
+ case 5:
+ ptxservq = &psta->sta_xmitpriv.vi_q;
+ *(ac) = 1;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending23a : VI\n"));
+ break;
+ case 6:
+ case 7:
+ ptxservq = &psta->sta_xmitpriv.vo_q;
+ *(ac) = 0;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending23a : VO\n"));
+ break;
+ case 0:
+ case 3:
+ default:
+ ptxservq = &psta->sta_xmitpriv.be_q;
+ *(ac) = 2;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending23a : BE\n"));
+ break;
+ }
+ return ptxservq;
+}
+
+/*
+ * Will enqueue pxmitframe to the proper queue,
+ * and indicate it to xx_pending list.....
+ */
+s32 rtw_xmit23a_classifier(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe)
+{
+ struct sta_info *psta;
+ struct tx_servq *ptxservq;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
+ u8 ac_index;
+ int res = _SUCCESS;
+
+ if (pattrib->psta) {
+ psta = pattrib->psta;
+ } else {
+ DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
+ psta = rtw_get_stainfo23a(pstapriv, pattrib->ra);
+ }
+ if (psta == NULL) {
+ res = _FAIL;
+ DBG_8723A("rtw_xmit23a_classifier: psta == NULL\n");
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("rtw_xmit23a_classifier: psta == NULL\n"));
+ goto exit;
+ }
+ if (!(psta->state & _FW_LINKED)) {
+ DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__,
+ psta->state);
+ return _FAIL;
+ }
+ ptxservq = rtw_get_sta_pending23a(padapter, psta, pattrib->priority,
+ (u8 *)(&ac_index));
+
+ if (list_empty(&ptxservq->tx_pending)) {
+ list_add_tail(&ptxservq->tx_pending,
+ get_list_head(phwxmits[ac_index].sta_queue));
+ }
+
+ list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
+ ptxservq->qcnt++;
+ phwxmits[ac_index].accnt++;
+exit:
+ return res;
+}
+
+void rtw_alloc_hwxmits23a(struct rtw_adapter *padapter)
+{
+ struct hw_xmit *hwxmits;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ int size;
+
+ pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
+
+ size = sizeof(struct hw_xmit) * (pxmitpriv->hwxmit_entry + 1);
+ pxmitpriv->hwxmits = kzalloc(size, GFP_KERNEL);
+
+ hwxmits = pxmitpriv->hwxmits;
+
+ if (pxmitpriv->hwxmit_entry == 5) {
+ /* pxmitpriv->bmc_txqueue.head = 0; */
+ /* hwxmits[0] .phwtxqueue = &pxmitpriv->bmc_txqueue; */
+ hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
+
+ /* pxmitpriv->vo_txqueue.head = 0; */
+ /* hwxmits[1] .phwtxqueue = &pxmitpriv->vo_txqueue; */
+ hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
+
+ /* pxmitpriv->vi_txqueue.head = 0; */
+ /* hwxmits[2] .phwtxqueue = &pxmitpriv->vi_txqueue; */
+ hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
+
+ /* pxmitpriv->bk_txqueue.head = 0; */
+ /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
+ hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+
+ /* pxmitpriv->be_txqueue.head = 0; */
+ /* hwxmits[4] .phwtxqueue = &pxmitpriv->be_txqueue; */
+ hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
+
+ } else if (pxmitpriv->hwxmit_entry == 4) {
+
+ /* pxmitpriv->vo_txqueue.head = 0; */
+ /* hwxmits[0] .phwtxqueue = &pxmitpriv->vo_txqueue; */
+ hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
+
+ /* pxmitpriv->vi_txqueue.head = 0; */
+ /* hwxmits[1] .phwtxqueue = &pxmitpriv->vi_txqueue; */
+ hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
+
+ /* pxmitpriv->be_txqueue.head = 0; */
+ /* hwxmits[2] .phwtxqueue = &pxmitpriv->be_txqueue; */
+ hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
+
+ /* pxmitpriv->bk_txqueue.head = 0; */
+ /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
+ hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+ } else {
+
+ }
+}
+
+void rtw_free_hwxmits23a(struct rtw_adapter *padapter)
+{
+ struct hw_xmit *hwxmits;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ hwxmits = pxmitpriv->hwxmits;
+ kfree(hwxmits);
+}
+
+void rtw_init_hwxmits23a(struct hw_xmit *phwxmit, int entry)
+{
+ int i;
+
+ for (i = 0; i < entry; i++, phwxmit++)
+ phwxmit->accnt = 0;
+}
+
+u32 rtw_get_ff_hwaddr23a(struct xmit_frame *pxmitframe)
+{
+ u32 addr;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+
+ switch (pattrib->qsel) {
+ case 0:
+ case 3:
+ addr = BE_QUEUE_INX;
+ break;
+ case 1:
+ case 2:
+ addr = BK_QUEUE_INX;
+ break;
+ case 4:
+ case 5:
+ addr = VI_QUEUE_INX;
+ break;
+ case 6:
+ case 7:
+ addr = VO_QUEUE_INX;
+ break;
+ case 0x10:
+ addr = BCN_QUEUE_INX;
+ break;
+ case 0x11:/* BC/MC in PS (HIQ) */
+ addr = HIGH_QUEUE_INX;
+ break;
+ case 0x12:
+ default:
+ addr = MGT_QUEUE_INX;
+ break;
+ }
+
+ return addr;
+}
+
+static void do_queue_select(struct rtw_adapter *padapter, struct pkt_attrib *pattrib)
+{
+ u8 qsel;
+
+ qsel = pattrib->priority;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ ("### do_queue_select priority =%d , qsel = %d\n",
+ pattrib->priority, qsel));
+
+ pattrib->qsel = qsel;
+}
+
+/*
+ * The main transmit(tx) entry
+ *
+ * Return
+ * 1 enqueue
+ * 0 success, hardware will handle this xmit frame(packet)
+ * <0 fail
+ */
+int rtw_xmit23a(struct rtw_adapter *padapter, struct sk_buff *skb)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct xmit_frame *pxmitframe = NULL;
+ s32 res;
+
+ pxmitframe = rtw_alloc_xmitframe23a(pxmitpriv);
+
+ if (pxmitframe == NULL) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_,
+ ("rtw_xmit23a: no more pxmitframe\n"));
+ return -1;
+ }
+
+ res = update_attrib(padapter, skb, &pxmitframe->attrib);
+
+ if (res == _FAIL) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit23a: update attrib fail\n"));
+ rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
+ return -1;
+ }
+ pxmitframe->pkt = skb;
+
+ rtw_led_control(padapter, LED_CTL_TX);
+
+ do_queue_select(padapter, &pxmitframe->attrib);
+
+#ifdef CONFIG_8723AU_AP_MODE
+ spin_lock_bh(&pxmitpriv->lock);
+ if (xmitframe_enqueue_for_sleeping_sta23a(padapter, pxmitframe)) {
+ spin_unlock_bh(&pxmitpriv->lock);
+ return 1;
+ }
+ spin_unlock_bh(&pxmitpriv->lock);
+#endif
+
+ if (rtw_hal_xmit23a(padapter, pxmitframe) == false)
+ return 1;
+
+ return 0;
+}
+
+#if defined(CONFIG_8723AU_AP_MODE)
+
+int xmitframe_enqueue_for_sleeping_sta23a(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ int ret = false;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int bmcst = is_multicast_ether_addr(pattrib->ra);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == false)
+ return ret;
+
+ if (pattrib->psta) {
+ psta = pattrib->psta;
+ } else {
+ DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
+ psta = rtw_get_stainfo23a(pstapriv, pattrib->ra);
+ }
+
+ if (psta == NULL) {
+ DBG_8723A("%s, psta == NUL\n", __func__);
+ return false;
+ }
+
+ if (!(psta->state & _FW_LINKED)) {
+ DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__,
+ psta->state);
+ return false;
+ }
+
+ if (pattrib->triggered == 1) {
+ if (bmcst)
+ pattrib->qsel = 0x11;/* HIQ */
+ return ret;
+ }
+
+ if (bmcst) {
+ spin_lock_bh(&psta->sleep_q.lock);
+
+ if (pstapriv->sta_dz_bitmap) {
+ /* if anyone sta is in ps mode */
+ list_del_init(&pxmitframe->list);
+
+ /* spin_lock_bh(&psta->sleep_q.lock); */
+
+ list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
+
+ psta->sleepq_len++;
+
+ pstapriv->tim_bitmap |= BIT(0);/* */
+ pstapriv->sta_dz_bitmap |= BIT(0);
+
+ /* DBG_8723A("enqueue, sq_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
+
+ update_beacon23a(padapter, _TIM_IE_, NULL, false);/* tx bc/mc packets after upate bcn */
+
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+
+ ret = true;
+
+ }
+
+ spin_unlock_bh(&psta->sleep_q.lock);
+
+ return ret;
+
+ }
+
+ spin_lock_bh(&psta->sleep_q.lock);
+
+ if (psta->state&WIFI_SLEEP_STATE) {
+ u8 wmmps_ac = 0;
+
+ if (pstapriv->sta_dz_bitmap & CHKBIT(psta->aid)) {
+ list_del_init(&pxmitframe->list);
+
+ /* spin_lock_bh(&psta->sleep_q.lock); */
+
+ list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
+
+ psta->sleepq_len++;
+
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk & BIT(0);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi & BIT(0);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo & BIT(0);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be & BIT(0);
+ break;
+ }
+
+ if (wmmps_ac)
+ psta->sleepq_ac_len++;
+
+ if (((psta->has_legacy_ac) && (!wmmps_ac)) ||
+ ((!psta->has_legacy_ac) && (wmmps_ac))) {
+ pstapriv->tim_bitmap |= CHKBIT(psta->aid);
+
+ if (psta->sleepq_len == 1) {
+ /* upate BCN for TIM IE */
+ update_beacon23a(padapter, _TIM_IE_, NULL, false);
+ }
+ }
+
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+
+ /* if (psta->sleepq_len > (NR_XMITFRAME>>3)) */
+ /* */
+ /* wakeup_sta_to_xmit23a(padapter, psta); */
+ /* */
+
+ ret = true;
+
+ }
+
+ }
+
+ spin_unlock_bh(&psta->sleep_q.lock);
+
+ return ret;
+}
+
+static void
+dequeue_xmitframes_to_sleeping_queue(struct rtw_adapter *padapter,
+ struct sta_info *psta,
+ struct rtw_queue *pframequeue)
+{
+ int ret;
+ struct list_head *plist, *phead, *ptmp;
+ u8 ac_index;
+ struct tx_servq *ptxservq;
+ struct pkt_attrib *pattrib;
+ struct xmit_frame *pxmitframe;
+ struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
+
+ phead = get_list_head(pframequeue);
+
+ list_for_each_safe(plist, ptmp, phead) {
+ pxmitframe = container_of(plist, struct xmit_frame, list);
+
+ ret = xmitframe_enqueue_for_sleeping_sta23a(padapter, pxmitframe);
+
+ if (ret == true) {
+ pattrib = &pxmitframe->attrib;
+
+ ptxservq = rtw_get_sta_pending23a(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
+
+ ptxservq->qcnt--;
+ phwxmits[ac_index].accnt--;
+ } else {
+ /* DBG_8723A("xmitframe_enqueue_for_sleeping_sta23a return false\n"); */
+ }
+ }
+}
+
+void stop_sta_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ struct sta_info *psta_bmc;
+ struct sta_xmit_priv *pstaxmitpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ pstaxmitpriv = &psta->sta_xmitpriv;
+
+ /* for BC/MC Frames */
+ psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
+
+ spin_lock_bh(&pxmitpriv->lock);
+
+ psta->state |= WIFI_SLEEP_STATE;
+
+ pstapriv->sta_dz_bitmap |= CHKBIT(psta->aid);
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
+ list_del_init(&pstaxmitpriv->vo_q.tx_pending);
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
+ list_del_init(&pstaxmitpriv->vi_q.tx_pending);
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta,
+ &pstaxmitpriv->be_q.sta_pending);
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta,
+ &pstaxmitpriv->bk_q.sta_pending);
+ list_del_init(&pstaxmitpriv->bk_q.tx_pending);
+
+ /* for BC/MC Frames */
+ pstaxmitpriv = &psta_bmc->sta_xmitpriv;
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc,
+ &pstaxmitpriv->be_q.sta_pending);
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
+
+ spin_unlock_bh(&pxmitpriv->lock);
+}
+
+void wakeup_sta_to_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ u8 update_mask = 0, wmmps_ac = 0;
+ struct sta_info *psta_bmc;
+ struct list_head *plist, *phead, *ptmp;
+ struct xmit_frame *pxmitframe = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ spin_lock_bh(&pxmitpriv->lock);
+
+ phead = get_list_head(&psta->sleep_q);
+
+ list_for_each_safe(plist, ptmp, phead) {
+ pxmitframe = container_of(plist, struct xmit_frame, list);
+ list_del_init(&pxmitframe->list);
+
+ switch (pxmitframe->attrib.priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk & BIT(1);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi & BIT(1);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo & BIT(1);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be & BIT(1);
+ break;
+ }
+
+ psta->sleepq_len--;
+ if (psta->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ if (wmmps_ac) {
+ psta->sleepq_ac_len--;
+ if (psta->sleepq_ac_len > 0) {
+ pxmitframe->attrib.mdata = 1;
+ pxmitframe->attrib.eosp = 0;
+ } else {
+ pxmitframe->attrib.mdata = 0;
+ pxmitframe->attrib.eosp = 1;
+ }
+ }
+
+ pxmitframe->attrib.triggered = 1;
+ rtw_hal_xmit23aframe_enqueue(padapter, pxmitframe);
+ }
+
+ if (psta->sleepq_len == 0) {
+ pstapriv->tim_bitmap &= ~CHKBIT(psta->aid);
+
+ /* upate BCN for TIM IE */
+ update_mask = BIT(0);
+
+ if (psta->state&WIFI_SLEEP_STATE)
+ psta->state ^= WIFI_SLEEP_STATE;
+
+ if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
+ psta->expire_to = pstapriv->expire_to;
+ psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
+ }
+
+ pstapriv->sta_dz_bitmap &= ~CHKBIT(psta->aid);
+ }
+
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ /* for BC/MC Frames */
+ psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
+ if (!psta_bmc)
+ return;
+
+ if ((pstapriv->sta_dz_bitmap&0xfffe) == 0x0) {
+ /* no any sta in ps mode */
+ spin_lock_bh(&pxmitpriv->lock);
+
+ phead = get_list_head(&psta_bmc->sleep_q);
+
+ list_for_each_safe(plist, ptmp, phead) {
+ pxmitframe = container_of(plist, struct xmit_frame,
+ list);
+
+ list_del_init(&pxmitframe->list);
+
+ psta_bmc->sleepq_len--;
+ if (psta_bmc->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ pxmitframe->attrib.triggered = 1;
+ rtw_hal_xmit23aframe_enqueue(padapter, pxmitframe);
+ }
+ if (psta_bmc->sleepq_len == 0) {
+ pstapriv->tim_bitmap &= ~BIT(0);
+ pstapriv->sta_dz_bitmap &= ~BIT(0);
+
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_mask |= BIT(1);
+ }
+
+ /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
+ }
+
+ if (update_mask)
+ update_beacon23a(padapter, _TIM_IE_, NULL, false);
+}
+
+void xmit_delivery_enabled_frames23a(struct rtw_adapter *padapter,
+ struct sta_info *psta)
+{
+ u8 wmmps_ac = 0;
+ struct list_head *plist, *phead, *ptmp;
+ struct xmit_frame *pxmitframe;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ /* spin_lock_bh(&psta->sleep_q.lock); */
+ spin_lock_bh(&pxmitpriv->lock);
+
+ phead = get_list_head(&psta->sleep_q);
+
+ list_for_each_safe(plist, ptmp, phead) {
+ pxmitframe = container_of(plist, struct xmit_frame, list);
+
+ switch (pxmitframe->attrib.priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk & BIT(1);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi & BIT(1);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo & BIT(1);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be & BIT(1);
+ break;
+ }
+
+ if (!wmmps_ac)
+ continue;
+
+ list_del_init(&pxmitframe->list);
+
+ psta->sleepq_len--;
+ psta->sleepq_ac_len--;
+
+ if (psta->sleepq_ac_len > 0) {
+ pxmitframe->attrib.mdata = 1;
+ pxmitframe->attrib.eosp = 0;
+ } else {
+ pxmitframe->attrib.mdata = 0;
+ pxmitframe->attrib.eosp = 1;
+ }
+
+ pxmitframe->attrib.triggered = 1;
+
+ rtw_hal_xmit23aframe_enqueue(padapter, pxmitframe);
+
+ if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) &&
+ (wmmps_ac)) {
+ pstapriv->tim_bitmap &= ~CHKBIT(psta->aid);
+
+ /* upate BCN for TIM IE */
+ update_beacon23a(padapter, _TIM_IE_, NULL, false);
+ }
+ }
+ spin_unlock_bh(&pxmitpriv->lock);
+}
+
+#endif
+
+void rtw_sctx_init23a(struct submit_ctx *sctx, int timeout_ms)
+{
+ sctx->timeout_ms = timeout_ms;
+ init_completion(&sctx->done);
+ sctx->status = RTW_SCTX_SUBMITTED;
+}
+
+int rtw_sctx_wait23a(struct submit_ctx *sctx)
+{
+ int ret = _FAIL;
+ unsigned long expire;
+ int status = 0;
+
+ expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) :
+ MAX_SCHEDULE_TIMEOUT;
+ if (!wait_for_completion_timeout(&sctx->done, expire)) {
+ /* timeout, do something?? */
+ status = RTW_SCTX_DONE_TIMEOUT;
+ DBG_8723A("%s timeout\n", __func__);
+ } else {
+ status = sctx->status;
+ }
+
+ if (status == RTW_SCTX_DONE_SUCCESS)
+ ret = _SUCCESS;
+
+ return ret;
+}
+
+static bool rtw_sctx_chk_waring_status(int status)
+{
+ switch (status) {
+ case RTW_SCTX_DONE_UNKNOWN:
+ case RTW_SCTX_DONE_BUF_ALLOC:
+ case RTW_SCTX_DONE_BUF_FREE:
+ case RTW_SCTX_DONE_DRV_STOP:
+ case RTW_SCTX_DONE_DEV_REMOVE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void rtw23a_sctx_done_err(struct submit_ctx **sctx, int status)
+{
+ if (*sctx) {
+ if (rtw_sctx_chk_waring_status(status))
+ DBG_8723A("%s status:%d\n", __func__, status);
+ (*sctx)->status = status;
+ complete(&(*sctx)->done);
+ *sctx = NULL;
+ }
+}
+
+void rtw_sctx_done23a(struct submit_ctx **sctx)
+{
+ rtw23a_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS);
+}
+
+int rtw_ack_tx_wait23a(struct xmit_priv *pxmitpriv, u32 timeout_ms)
+{
+ struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
+
+ pack_tx_ops->timeout_ms = timeout_ms;
+ pack_tx_ops->status = RTW_SCTX_SUBMITTED;
+
+ return rtw_sctx_wait23a(pack_tx_ops);
+}
+
+void rtw_ack_tx_done23a(struct xmit_priv *pxmitpriv, int status)
+{
+ struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
+
+ if (pxmitpriv->ack_tx)
+ rtw23a_sctx_done_err(&pack_tx_ops, status);
+ else
+ DBG_8723A("%s ack_tx not set\n", __func__);
+}
diff --git a/drivers/staging/rtl8723au/hal/Hal8723PwrSeq.c b/drivers/staging/rtl8723au/hal/Hal8723PwrSeq.c
new file mode 100644
index 000000000000..747f86cddeb9
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/Hal8723PwrSeq.c
@@ -0,0 +1,80 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "Hal8723PwrSeq.h"
+
+/*
+ drivers should parse below arrays and do the corresponding actions
+*/
+/* 3 Power on Array */
+struct wlan_pwr_cfg rtl8723AU_power_on_flow[RTL8723A_TRANS_CARDEMU_TO_ACT_STEPS+RTL8723A_TRANS_END_STEPS] = {
+ RTL8723A_TRANS_CARDEMU_TO_ACT
+ RTL8723A_TRANS_END
+};
+
+/* 3 Radio off GPIO Array */
+struct wlan_pwr_cfg rtl8723AU_radio_off_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_END_STEPS] = {
+ RTL8723A_TRANS_ACT_TO_CARDEMU
+ RTL8723A_TRANS_END
+};
+
+/* 3 Card Disable Array */
+struct wlan_pwr_cfg rtl8723AU_card_disable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723A_TRANS_END_STEPS] = {
+ RTL8723A_TRANS_ACT_TO_CARDEMU
+ RTL8723A_TRANS_CARDEMU_TO_CARDDIS
+ RTL8723A_TRANS_END
+};
+
+/* 3 Card Enable Array */
+struct wlan_pwr_cfg rtl8723AU_card_enable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723A_TRANS_END_STEPS] = {
+ RTL8723A_TRANS_CARDDIS_TO_CARDEMU
+ RTL8723A_TRANS_CARDEMU_TO_ACT
+ RTL8723A_TRANS_END
+};
+
+/* 3 Suspend Array */
+struct wlan_pwr_cfg rtl8723AU_suspend_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS+RTL8723A_TRANS_END_STEPS] = {
+ RTL8723A_TRANS_ACT_TO_CARDEMU
+ RTL8723A_TRANS_CARDEMU_TO_SUS
+ RTL8723A_TRANS_END
+};
+
+/* 3 Resume Array */
+struct wlan_pwr_cfg rtl8723AU_resume_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS+RTL8723A_TRANS_END_STEPS] = {
+ RTL8723A_TRANS_SUS_TO_CARDEMU
+ RTL8723A_TRANS_CARDEMU_TO_ACT
+ RTL8723A_TRANS_END
+};
+
+/* 3 HWPDN Array */
+struct wlan_pwr_cfg rtl8723AU_hwpdn_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723A_TRANS_END_STEPS] = {
+ RTL8723A_TRANS_ACT_TO_CARDEMU
+ RTL8723A_TRANS_CARDEMU_TO_PDN
+ RTL8723A_TRANS_END
+};
+
+/* 3 Enter LPS */
+struct wlan_pwr_cfg rtl8723AU_enter_lps_flow[RTL8723A_TRANS_ACT_TO_LPS_STEPS+RTL8723A_TRANS_END_STEPS] = {
+ /* FW behavior */
+ RTL8723A_TRANS_ACT_TO_LPS
+ RTL8723A_TRANS_END
+};
+
+/* 3 Leave LPS */
+struct wlan_pwr_cfg rtl8723AU_leave_lps_flow[RTL8723A_TRANS_LPS_TO_ACT_STEPS+RTL8723A_TRANS_END_STEPS] = {
+ /* FW behavior */
+ RTL8723A_TRANS_LPS_TO_ACT
+ RTL8723A_TRANS_END
+};
diff --git a/drivers/staging/rtl8723au/hal/Hal8723UHWImg_CE.c b/drivers/staging/rtl8723au/hal/Hal8723UHWImg_CE.c
new file mode 100644
index 000000000000..56833da63ced
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/Hal8723UHWImg_CE.c
@@ -0,0 +1,136 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+/*Created on 2013/01/14, 15:51*/
+#include "odm_precomp.h"
+
+u32 Rtl8723UPHY_REG_Array_PG[Rtl8723UPHY_REG_Array_PGLength] = {
+ 0xe00, 0xffffffff, 0x0a0c0c0c,
+ 0xe04, 0xffffffff, 0x02040608,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x0a0c0d0e,
+ 0xe14, 0xffffffff, 0x02040608,
+ 0xe18, 0xffffffff, 0x0a0c0d0e,
+ 0xe1c, 0xffffffff, 0x02040608,
+ 0x830, 0xffffffff, 0x0a0c0c0c,
+ 0x834, 0xffffffff, 0x02040608,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x0a0c0d0e,
+ 0x848, 0xffffffff, 0x02040608,
+ 0x84c, 0xffffffff, 0x0a0c0d0e,
+ 0x868, 0xffffffff, 0x02040608,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x06060606,
+ 0xe14, 0xffffffff, 0x00020406,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x06060606,
+ 0x848, 0xffffffff, 0x00020406,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ };
+
+u32 Rtl8723UMACPHY_Array_PG[Rtl8723UMACPHY_Array_PGLength] = {
+ 0x0,
+};
diff --git a/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c b/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c
new file mode 100644
index 000000000000..9796f2e5c68f
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c
@@ -0,0 +1,1063 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/* Description: */
+/* This file is for 92CE/92CU dynamic mechanism only */
+
+/* include files */
+
+#include "odm_precomp.h"
+
+#define DPK_DELTA_MAPPING_NUM 13
+#define index_mapping_HP_NUM 15
+/* 091212 chiyokolin */
+static void
+odm_TXPowerTrackingCallback_ThermalMeter_92C(
+ struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ u8 ThermalValue = 0, delta, delta_LCK, delta_IQK, delta_HP;
+ int ele_A, ele_D, TempCCk, X, value32;
+ int Y, ele_C;
+ s8 OFDM_index[2], CCK_index = 0, OFDM_index_old[2] = {0};
+ s8 CCK_index_old = 0;
+ int i = 0;
+ bool is2T = IS_92C_SERIAL(pHalData->VersionID);
+ u8 OFDM_min_index = 6, rf; /* OFDM BB Swing should be less than +3.0dB*/
+ u8 ThermalValue_HP_count = 0;
+ u32 ThermalValue_HP = 0;
+ s32 index_mapping_HP[index_mapping_HP_NUM] = {
+ 0, 1, 3, 4, 6,
+ 7, 9, 10, 12, 13,
+ 15, 16, 18, 19, 21
+ };
+ s8 index_HP;
+
+ pdmpriv->TXPowerTrackingCallbackCnt++; /* cosa add for debug */
+ pdmpriv->bTXPowerTrackingInit = true;
+
+ if (pHalData->CurrentChannel == 14 && !pdmpriv->bCCKinCH14)
+ pdmpriv->bCCKinCH14 = true;
+ else if (pHalData->CurrentChannel != 14 && pdmpriv->bCCKinCH14)
+ pdmpriv->bCCKinCH14 = false;
+
+ ThermalValue = (u8)PHY_QueryRFReg(Adapter, RF_PATH_A, RF_T_METER,
+ 0x1f);/* 0x24: RF Reg[4:0] */
+
+ rtl8723a_phy_ap_calibrate(Adapter, (ThermalValue -
+ pHalData->EEPROMThermalMeter));
+
+ if (is2T)
+ rf = 2;
+ else
+ rf = 1;
+
+ if (ThermalValue) {
+ /* Query OFDM path A default setting */
+ ele_D = PHY_QueryBBReg(Adapter, rOFDM0_XATxIQImbalance,
+ bMaskDWord)&bMaskOFDM_D;
+ for (i = 0; i < OFDM_TABLE_SIZE_92C; i++) {
+ /* find the index */
+ if (ele_D == (OFDMSwingTable23A[i]&bMaskOFDM_D)) {
+ OFDM_index_old[0] = (u8)i;
+ break;
+ }
+ }
+
+ /* Query OFDM path B default setting */
+ if (is2T) {
+ ele_D = PHY_QueryBBReg(Adapter, rOFDM0_XBTxIQImbalance,
+ bMaskDWord)&bMaskOFDM_D;
+ for (i = 0; i < OFDM_TABLE_SIZE_92C; i++) { /* find the index */
+ if (ele_D == (OFDMSwingTable23A[i]&bMaskOFDM_D)) {
+ OFDM_index_old[1] = (u8)i;
+ break;
+ }
+ }
+ }
+
+ /* Query CCK default setting From 0xa24 */
+ TempCCk = PHY_QueryBBReg(Adapter, rCCK0_TxFilter2,
+ bMaskDWord)&bMaskCCK;
+ for (i = 0 ; i < CCK_TABLE_SIZE ; i++) {
+ if (pdmpriv->bCCKinCH14) {
+ if (!memcmp(&TempCCk,
+ &CCKSwingTable_Ch1423A[i][2], 4)) {
+ CCK_index_old = (u8)i;
+ break;
+ }
+ } else {
+ if (!memcmp(&TempCCk,
+ &CCKSwingTable_Ch1_Ch1323A[i][2], 4)) {
+ CCK_index_old = (u8)i;
+ break;
+ }
+ }
+ }
+
+ if (!pdmpriv->ThermalValue) {
+ pdmpriv->ThermalValue = pHalData->EEPROMThermalMeter;
+ pdmpriv->ThermalValue_LCK = ThermalValue;
+ pdmpriv->ThermalValue_IQK = ThermalValue;
+ pdmpriv->ThermalValue_DPK = pHalData->EEPROMThermalMeter;
+
+ for (i = 0; i < rf; i++) {
+ pdmpriv->OFDM_index_HP[i] = OFDM_index_old[i];
+ pdmpriv->OFDM_index[i] = OFDM_index_old[i];
+ }
+ pdmpriv->CCK_index_HP = CCK_index_old;
+ pdmpriv->CCK_index = CCK_index_old;
+ }
+
+ if (pHalData->BoardType == BOARD_USB_High_PA) {
+ pdmpriv->ThermalValue_HP[pdmpriv->ThermalValue_HP_index] = ThermalValue;
+ pdmpriv->ThermalValue_HP_index++;
+ if (pdmpriv->ThermalValue_HP_index == HP_THERMAL_NUM)
+ pdmpriv->ThermalValue_HP_index = 0;
+
+ for (i = 0; i < HP_THERMAL_NUM; i++) {
+ if (pdmpriv->ThermalValue_HP[i]) {
+ ThermalValue_HP += pdmpriv->ThermalValue_HP[i];
+ ThermalValue_HP_count++;
+ }
+ }
+
+ if (ThermalValue_HP_count)
+ ThermalValue = (u8)(ThermalValue_HP / ThermalValue_HP_count);
+ }
+
+ delta = (ThermalValue > pdmpriv->ThermalValue) ?
+ (ThermalValue - pdmpriv->ThermalValue) :
+ (pdmpriv->ThermalValue - ThermalValue);
+ if (pHalData->BoardType == BOARD_USB_High_PA) {
+ if (pdmpriv->bDoneTxpower)
+ delta_HP = (ThermalValue > pdmpriv->ThermalValue) ?
+ (ThermalValue - pdmpriv->ThermalValue) :
+ (pdmpriv->ThermalValue - ThermalValue);
+ else
+ delta_HP = ThermalValue > pHalData->EEPROMThermalMeter ?
+ (ThermalValue - pHalData->EEPROMThermalMeter) :
+ (pHalData->EEPROMThermalMeter - ThermalValue);
+ } else {
+ delta_HP = 0;
+ }
+ delta_LCK = (ThermalValue > pdmpriv->ThermalValue_LCK) ?
+ (ThermalValue - pdmpriv->ThermalValue_LCK) :
+ (pdmpriv->ThermalValue_LCK - ThermalValue);
+ delta_IQK = (ThermalValue > pdmpriv->ThermalValue_IQK) ?
+ (ThermalValue - pdmpriv->ThermalValue_IQK) :
+ (pdmpriv->ThermalValue_IQK - ThermalValue);
+
+ if (delta_LCK > 1) {
+ pdmpriv->ThermalValue_LCK = ThermalValue;
+ rtl8723a_phy_lc_calibrate(Adapter);
+ }
+
+ if ((delta > 0 || delta_HP > 0) && pdmpriv->TxPowerTrackControl) {
+ if (pHalData->BoardType == BOARD_USB_High_PA) {
+ pdmpriv->bDoneTxpower = true;
+ delta_HP = ThermalValue > pHalData->EEPROMThermalMeter ?
+ (ThermalValue - pHalData->EEPROMThermalMeter) :
+ (pHalData->EEPROMThermalMeter - ThermalValue);
+
+ if (delta_HP > index_mapping_HP_NUM-1)
+ index_HP = index_mapping_HP[index_mapping_HP_NUM-1];
+ else
+ index_HP = index_mapping_HP[delta_HP];
+
+ if (ThermalValue > pHalData->EEPROMThermalMeter) {
+ /* set larger Tx power */
+ for (i = 0; i < rf; i++)
+ OFDM_index[i] = pdmpriv->OFDM_index_HP[i] - index_HP;
+ CCK_index = pdmpriv->CCK_index_HP - index_HP;
+ } else {
+ for (i = 0; i < rf; i++)
+ OFDM_index[i] = pdmpriv->OFDM_index_HP[i] + index_HP;
+ CCK_index = pdmpriv->CCK_index_HP + index_HP;
+ }
+
+ delta_HP = (ThermalValue > pdmpriv->ThermalValue) ?
+ (ThermalValue - pdmpriv->ThermalValue) :
+ (pdmpriv->ThermalValue - ThermalValue);
+ } else {
+ if (ThermalValue > pdmpriv->ThermalValue) {
+ for (i = 0; i < rf; i++)
+ pdmpriv->OFDM_index[i] -= delta;
+ pdmpriv->CCK_index -= delta;
+ } else {
+ for (i = 0; i < rf; i++)
+ pdmpriv->OFDM_index[i] += delta;
+ pdmpriv->CCK_index += delta;
+ }
+ }
+
+ /* no adjust */
+ if (pHalData->BoardType != BOARD_USB_High_PA) {
+ if (ThermalValue > pHalData->EEPROMThermalMeter) {
+ for (i = 0; i < rf; i++)
+ OFDM_index[i] = pdmpriv->OFDM_index[i]+1;
+ CCK_index = pdmpriv->CCK_index+1;
+ } else {
+ for (i = 0; i < rf; i++)
+ OFDM_index[i] = pdmpriv->OFDM_index[i];
+ CCK_index = pdmpriv->CCK_index;
+ }
+ }
+ for (i = 0; i < rf; i++) {
+ if (OFDM_index[i] > (OFDM_TABLE_SIZE_92C-1))
+ OFDM_index[i] = (OFDM_TABLE_SIZE_92C-1);
+ else if (OFDM_index[i] < OFDM_min_index)
+ OFDM_index[i] = OFDM_min_index;
+ }
+
+ if (CCK_index > (CCK_TABLE_SIZE-1))
+ CCK_index = (CCK_TABLE_SIZE-1);
+ else if (CCK_index < 0)
+ CCK_index = 0;
+ }
+
+ if (pdmpriv->TxPowerTrackControl && (delta != 0 || delta_HP != 0)) {
+ /* Adujst OFDM Ant_A according to IQK result */
+ ele_D = (OFDMSwingTable23A[OFDM_index[0]] & 0xFFC00000)>>22;
+ X = pdmpriv->RegE94;
+ Y = pdmpriv->RegE9C;
+
+ if (X != 0) {
+ if ((X & 0x00000200) != 0)
+ X = X | 0xFFFFFC00;
+ ele_A = ((X * ele_D)>>8)&0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+ ele_C = ((Y * ele_D)>>8)&0x000003FF;
+
+ /* write new elements A, C, D to regC80 and regC94, element B is always 0 */
+ value32 = (ele_D<<22)|((ele_C&0x3F)<<16)|ele_A;
+ PHY_SetBBReg(Adapter, rOFDM0_XATxIQImbalance, bMaskDWord, value32);
+
+ value32 = (ele_C&0x000003C0)>>6;
+ PHY_SetBBReg(Adapter, rOFDM0_XCTxAFE, bMaskH4Bits, value32);
+
+ value32 = ((X * ele_D)>>7)&0x01;
+ PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT31, value32);
+
+ value32 = ((Y * ele_D)>>7)&0x01;
+ PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT29, value32);
+ } else {
+ PHY_SetBBReg(Adapter, rOFDM0_XATxIQImbalance, bMaskDWord, OFDMSwingTable23A[OFDM_index[0]]);
+ PHY_SetBBReg(Adapter, rOFDM0_XCTxAFE, bMaskH4Bits, 0x00);
+ PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT31|BIT29, 0x00);
+ }
+
+ /* Adjust CCK according to IQK result */
+ if (!pdmpriv->bCCKinCH14) {
+ rtw_write8(Adapter, 0xa22, CCKSwingTable_Ch1_Ch1323A[CCK_index][0]);
+ rtw_write8(Adapter, 0xa23, CCKSwingTable_Ch1_Ch1323A[CCK_index][1]);
+ rtw_write8(Adapter, 0xa24, CCKSwingTable_Ch1_Ch1323A[CCK_index][2]);
+ rtw_write8(Adapter, 0xa25, CCKSwingTable_Ch1_Ch1323A[CCK_index][3]);
+ rtw_write8(Adapter, 0xa26, CCKSwingTable_Ch1_Ch1323A[CCK_index][4]);
+ rtw_write8(Adapter, 0xa27, CCKSwingTable_Ch1_Ch1323A[CCK_index][5]);
+ rtw_write8(Adapter, 0xa28, CCKSwingTable_Ch1_Ch1323A[CCK_index][6]);
+ rtw_write8(Adapter, 0xa29, CCKSwingTable_Ch1_Ch1323A[CCK_index][7]);
+ } else {
+ rtw_write8(Adapter, 0xa22, CCKSwingTable_Ch1423A[CCK_index][0]);
+ rtw_write8(Adapter, 0xa23, CCKSwingTable_Ch1423A[CCK_index][1]);
+ rtw_write8(Adapter, 0xa24, CCKSwingTable_Ch1423A[CCK_index][2]);
+ rtw_write8(Adapter, 0xa25, CCKSwingTable_Ch1423A[CCK_index][3]);
+ rtw_write8(Adapter, 0xa26, CCKSwingTable_Ch1423A[CCK_index][4]);
+ rtw_write8(Adapter, 0xa27, CCKSwingTable_Ch1423A[CCK_index][5]);
+ rtw_write8(Adapter, 0xa28, CCKSwingTable_Ch1423A[CCK_index][6]);
+ rtw_write8(Adapter, 0xa29, CCKSwingTable_Ch1423A[CCK_index][7]);
+ }
+
+ if (is2T) {
+ ele_D = (OFDMSwingTable23A[(u8)OFDM_index[1]] & 0xFFC00000)>>22;
+
+ /* new element A = element D x X */
+ X = pdmpriv->RegEB4;
+ Y = pdmpriv->RegEBC;
+
+ if (X != 0) {
+ if ((X & 0x00000200) != 0) /* consider minus */
+ X = X | 0xFFFFFC00;
+ ele_A = ((X * ele_D)>>8)&0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+ ele_C = ((Y * ele_D)>>8)&0x00003FF;
+
+ /* write new elements A, C, D to regC88 and regC9C, element B is always 0 */
+ value32 = (ele_D<<22)|((ele_C&0x3F)<<16) | ele_A;
+ PHY_SetBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
+
+ value32 = (ele_C&0x000003C0)>>6;
+ PHY_SetBBReg(Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
+
+ value32 = ((X * ele_D)>>7)&0x01;
+ PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT27, value32);
+
+ value32 = ((Y * ele_D)>>7)&0x01;
+ PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT25, value32);
+ } else {
+ PHY_SetBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable23A[OFDM_index[1]]);
+ PHY_SetBBReg(Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00);
+ PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT27|BIT25, 0x00);
+ }
+ }
+
+ }
+ if (delta_IQK > 3) {
+ pdmpriv->ThermalValue_IQK = ThermalValue;
+ rtl8723a_phy_iq_calibrate(Adapter, false);
+ }
+
+ /* update thermal meter value */
+ if (pdmpriv->TxPowerTrackControl)
+ pdmpriv->ThermalValue = ThermalValue;
+ }
+ pdmpriv->TXPowercount = 0;
+}
+
+/* Description: */
+/* - Dispatch TxPower Tracking direct call ONLY for 92s. */
+/* - We shall NOT schedule Workitem within PASSIVE LEVEL, which will cause system resource */
+/* leakage under some platform. */
+/* Assumption: */
+/* PASSIVE_LEVEL when this routine is called. */
+static void ODM_TXPowerTracking92CDirectCall(struct rtw_adapter *Adapter)
+{
+ odm_TXPowerTrackingCallback_ThermalMeter_92C(Adapter);
+}
+
+static void odm_CheckTXPowerTracking_ThermalMeter(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct dm_odm_t *podmpriv = &pHalData->odmpriv;
+
+ if (!(podmpriv->SupportAbility & ODM_RF_TX_PWR_TRACK))
+ return;
+
+ if (!pdmpriv->TM_Trigger) { /* at least delay 1 sec */
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_T_METER, bRFRegOffsetMask, 0x60);
+
+ pdmpriv->TM_Trigger = 1;
+ return;
+ } else {
+ ODM_TXPowerTracking92CDirectCall(Adapter);
+ pdmpriv->TM_Trigger = 0;
+ }
+}
+
+void rtl8723a_odm_check_tx_power_tracking(struct rtw_adapter *Adapter)
+{
+ odm_CheckTXPowerTracking_ThermalMeter(Adapter);
+}
+
+/* IQK */
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 1 /* ms */
+
+static u8 _PHY_PathA_IQK(struct rtw_adapter *pAdapter, bool configPathB)
+{
+ u32 regEAC, regE94, regE9C, regEA4;
+ u8 result = 0x00;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
+
+ /* path-A IQK setting */
+ PHY_SetBBReg(pAdapter, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1f);
+ PHY_SetBBReg(pAdapter, rRx_IQK_Tone_A, bMaskDWord, 0x10008c1f);
+ PHY_SetBBReg(pAdapter, rTx_IQK_PI_A, bMaskDWord, 0x82140102);
+
+ PHY_SetBBReg(pAdapter, rRx_IQK_PI_A, bMaskDWord, configPathB ? 0x28160202 :
+ IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID)?0x28160202:0x28160502);
+
+ /* path-B IQK setting */
+ if (configPathB) {
+ PHY_SetBBReg(pAdapter, rTx_IQK_Tone_B, bMaskDWord, 0x10008c22);
+ PHY_SetBBReg(pAdapter, rRx_IQK_Tone_B, bMaskDWord, 0x10008c22);
+ PHY_SetBBReg(pAdapter, rTx_IQK_PI_B, bMaskDWord, 0x82140102);
+ PHY_SetBBReg(pAdapter, rRx_IQK_PI_B, bMaskDWord, 0x28160202);
+ }
+
+ /* LO calibration setting */
+ PHY_SetBBReg(pAdapter, rIQK_AGC_Rsp, bMaskDWord, 0x001028d1);
+
+ /* One shot, path A LOK & IQK */
+ PHY_SetBBReg(pAdapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(pAdapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+ /* delay x ms */
+ udelay(IQK_DELAY_TIME*1000);/* PlatformStallExecution(IQK_DELAY_TIME*1000); */
+
+ /* Check failed */
+ regEAC = PHY_QueryBBReg(pAdapter, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regE94 = PHY_QueryBBReg(pAdapter, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE9C = PHY_QueryBBReg(pAdapter, rTx_Power_After_IQK_A, bMaskDWord);
+ regEA4 = PHY_QueryBBReg(pAdapter, rRx_Power_Before_IQK_A_2, bMaskDWord);
+
+ if (!(regEAC & BIT28) &&
+ (((regE94 & 0x03FF0000)>>16) != 0x142) &&
+ (((regE9C & 0x03FF0000)>>16) != 0x42))
+ result |= 0x01;
+ else /* if Tx not OK, ignore Rx */
+ return result;
+
+ if (!(regEAC & BIT27) && /* if Tx is OK, check whether Rx is OK */
+ (((regEA4 & 0x03FF0000)>>16) != 0x132) &&
+ (((regEAC & 0x03FF0000)>>16) != 0x36))
+ result |= 0x02;
+ else
+ DBG_8723A("Path A Rx IQK fail!!\n");
+ return result;
+}
+
+static u8 _PHY_PathB_IQK(struct rtw_adapter *pAdapter)
+{
+ u32 regEAC, regEB4, regEBC, regEC4, regECC;
+ u8 result = 0x00;
+
+ /* One shot, path B LOK & IQK */
+ PHY_SetBBReg(pAdapter, rIQK_AGC_Cont, bMaskDWord, 0x00000002);
+ PHY_SetBBReg(pAdapter, rIQK_AGC_Cont, bMaskDWord, 0x00000000);
+
+ /* delay x ms */
+ udelay(IQK_DELAY_TIME*1000);
+
+ /* Check failed */
+ regEAC = PHY_QueryBBReg(pAdapter, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regEB4 = PHY_QueryBBReg(pAdapter, rTx_Power_Before_IQK_B, bMaskDWord);
+ regEBC = PHY_QueryBBReg(pAdapter, rTx_Power_After_IQK_B, bMaskDWord);
+ regEC4 = PHY_QueryBBReg(pAdapter, rRx_Power_Before_IQK_B_2, bMaskDWord);
+ regECC = PHY_QueryBBReg(pAdapter, rRx_Power_After_IQK_B_2, bMaskDWord);
+
+ if (!(regEAC & BIT31) &&
+ (((regEB4 & 0x03FF0000)>>16) != 0x142) &&
+ (((regEBC & 0x03FF0000)>>16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+
+ if (!(regEAC & BIT30) &&
+ (((regEC4 & 0x03FF0000)>>16) != 0x132) &&
+ (((regECC & 0x03FF0000)>>16) != 0x36))
+ result |= 0x02;
+ else
+ DBG_8723A("Path B Rx IQK fail!!\n");
+ return result;
+}
+
+static void _PHY_PathAFillIQKMatrix(struct rtw_adapter *pAdapter,
+ bool bIQKOK,
+ int result[][8],
+ u8 final_candidate,
+ bool bTxOnly
+ )
+{
+ u32 Oldval_0, X, TX0_A, reg;
+ s32 Y, TX0_C;
+
+ DBG_8723A("Path A IQ Calibration %s !\n", (bIQKOK)?"Success":"Failed");
+
+ if (final_candidate == 0xFF) {
+ return;
+ } else if (bIQKOK) {
+ Oldval_0 = (PHY_QueryBBReg(pAdapter, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+
+ X = result[final_candidate][0];
+ if ((X & 0x00000200) != 0)
+ X = X | 0xFFFFFC00;
+ TX0_A = (X * Oldval_0) >> 8;
+ PHY_SetBBReg(pAdapter, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
+ PHY_SetBBReg(pAdapter, rOFDM0_ECCAThreshold, BIT(31), ((X * Oldval_0>>7) & 0x1));
+
+ Y = result[final_candidate][1];
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+ TX0_C = (Y * Oldval_0) >> 8;
+ PHY_SetBBReg(pAdapter, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C&0x3C0)>>6));
+ PHY_SetBBReg(pAdapter, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C&0x3F));
+ PHY_SetBBReg(pAdapter, rOFDM0_ECCAThreshold, BIT(29), ((Y * Oldval_0>>7) & 0x1));
+
+ if (bTxOnly) {
+ DBG_8723A("_PHY_PathAFillIQKMatrix only Tx OK\n");
+ return;
+ }
+
+ reg = result[final_candidate][2];
+ PHY_SetBBReg(pAdapter, rOFDM0_XARxIQImbalance, 0x3FF, reg);
+
+ reg = result[final_candidate][3] & 0x3F;
+ PHY_SetBBReg(pAdapter, rOFDM0_XARxIQImbalance, 0xFC00, reg);
+
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ PHY_SetBBReg(pAdapter, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
+ }
+}
+
+static void _PHY_PathBFillIQKMatrix(struct rtw_adapter *pAdapter, bool bIQKOK, int result[][8], u8 final_candidate, bool bTxOnly)
+{
+ u32 Oldval_1, X, TX1_A, reg;
+ s32 Y, TX1_C;
+
+ DBG_8723A("Path B IQ Calibration %s !\n", (bIQKOK)?"Success":"Failed");
+
+ if (final_candidate == 0xFF) {
+ return;
+ } else if (bIQKOK) {
+ Oldval_1 = (PHY_QueryBBReg(pAdapter, rOFDM0_XBTxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+
+ X = result[final_candidate][4];
+ if ((X & 0x00000200) != 0)
+ X = X | 0xFFFFFC00;
+ TX1_A = (X * Oldval_1) >> 8;
+ PHY_SetBBReg(pAdapter, rOFDM0_XBTxIQImbalance, 0x3FF, TX1_A);
+ PHY_SetBBReg(pAdapter, rOFDM0_ECCAThreshold, BIT(27), ((X * Oldval_1>>7) & 0x1));
+
+ Y = result[final_candidate][5];
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+ TX1_C = (Y * Oldval_1) >> 8;
+ PHY_SetBBReg(pAdapter, rOFDM0_XDTxAFE, 0xF0000000, ((TX1_C&0x3C0)>>6));
+ PHY_SetBBReg(pAdapter, rOFDM0_XBTxIQImbalance, 0x003F0000, (TX1_C&0x3F));
+ PHY_SetBBReg(pAdapter, rOFDM0_ECCAThreshold, BIT(25), ((Y * Oldval_1>>7) & 0x1));
+
+ if (bTxOnly)
+ return;
+
+ reg = result[final_candidate][6];
+ PHY_SetBBReg(pAdapter, rOFDM0_XBRxIQImbalance, 0x3FF, reg);
+
+ reg = result[final_candidate][7] & 0x3F;
+ PHY_SetBBReg(pAdapter, rOFDM0_XBRxIQImbalance, 0xFC00, reg);
+
+ reg = (result[final_candidate][7] >> 6) & 0xF;
+ PHY_SetBBReg(pAdapter, rOFDM0_AGCRSSITable, 0x0000F000, reg);
+ }
+}
+
+static void _PHY_SaveADDARegisters(struct rtw_adapter *pAdapter, u32 *ADDAReg, u32 *ADDABackup, u32 RegisterNum)
+{
+ u32 i;
+
+ for (i = 0 ; i < RegisterNum ; i++) {
+ ADDABackup[i] = PHY_QueryBBReg(pAdapter, ADDAReg[i], bMaskDWord);
+ }
+}
+
+static void _PHY_SaveMACRegisters(struct rtw_adapter *pAdapter, u32 *MACReg, u32 *MACBackup)
+{
+ u32 i;
+
+ for (i = 0 ; i < (IQK_MAC_REG_NUM - 1); i++) {
+ MACBackup[i] = rtw_read8(pAdapter, MACReg[i]);
+ }
+ MACBackup[i] = rtw_read32(pAdapter, MACReg[i]);
+}
+
+static void _PHY_ReloadADDARegisters(struct rtw_adapter *pAdapter, u32 *ADDAReg, u32 *ADDABackup, u32 RegiesterNum)
+{
+ u32 i;
+
+ for (i = 0 ; i < RegiesterNum ; i++) {
+ PHY_SetBBReg(pAdapter, ADDAReg[i], bMaskDWord, ADDABackup[i]);
+ }
+}
+
+static void _PHY_ReloadMACRegisters(struct rtw_adapter *pAdapter, u32 *MACReg, u32 *MACBackup)
+{
+ u32 i;
+
+ for (i = 0 ; i < (IQK_MAC_REG_NUM - 1); i++) {
+ rtw_write8(pAdapter, MACReg[i], (u8)MACBackup[i]);
+ }
+ rtw_write32(pAdapter, MACReg[i], MACBackup[i]);
+}
+
+static void _PHY_PathADDAOn(struct rtw_adapter *pAdapter, u32 *ADDAReg, bool isPathAOn, bool is2T)
+{
+ u32 pathOn;
+ u32 i;
+
+ pathOn = isPathAOn ? 0x04db25a4 : 0x0b1b25a4;
+ if (false == is2T) {
+ pathOn = 0x0bdb25a0;
+ PHY_SetBBReg(pAdapter, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
+ } else {
+ PHY_SetBBReg(pAdapter, ADDAReg[0], bMaskDWord, pathOn);
+ }
+
+ for (i = 1 ; i < IQK_ADDA_REG_NUM ; i++)
+ PHY_SetBBReg(pAdapter, ADDAReg[i], bMaskDWord, pathOn);
+}
+
+static void _PHY_MACSettingCalibration(struct rtw_adapter *pAdapter, u32 *MACReg, u32 *MACBackup)
+{
+ u32 i = 0;
+
+ rtw_write8(pAdapter, MACReg[i], 0x3F);
+
+ for (i = 1 ; i < (IQK_MAC_REG_NUM - 1); i++) {
+ rtw_write8(pAdapter, MACReg[i], (u8)(MACBackup[i]&(~BIT3)));
+ }
+ rtw_write8(pAdapter, MACReg[i], (u8)(MACBackup[i]&(~BIT5)));
+}
+
+static void _PHY_PathAStandBy(struct rtw_adapter *pAdapter)
+{
+ PHY_SetBBReg(pAdapter, rFPGA0_IQK, bMaskDWord, 0x0);
+ PHY_SetBBReg(pAdapter, 0x840, bMaskDWord, 0x00010000);
+ PHY_SetBBReg(pAdapter, rFPGA0_IQK, bMaskDWord, 0x80800000);
+}
+
+static void _PHY_PIModeSwitch(struct rtw_adapter *pAdapter, bool PIMode)
+{
+ u32 mode;
+
+ mode = PIMode ? 0x01000100 : 0x01000000;
+ PHY_SetBBReg(pAdapter, 0x820, bMaskDWord, mode);
+ PHY_SetBBReg(pAdapter, 0x828, bMaskDWord, mode);
+}
+
+/*
+return false => do IQK again
+*/
+static bool _PHY_SimularityCompare(struct rtw_adapter *pAdapter, int result[][8], u8 c1, u8 c2)
+{
+ u32 i, j, diff, SimularityBitMap, bound = 0;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
+ u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */
+ bool bResult = true, is2T = IS_92C_SERIAL(pHalData->VersionID);
+
+ if (is2T)
+ bound = 8;
+ else
+ bound = 4;
+
+ SimularityBitMap = 0;
+
+ for (i = 0; i < bound; i++) {
+ diff = (result[c1][i] > result[c2][i]) ? (result[c1][i] - result[c2][i]) : (result[c2][i] - result[c1][i]);
+ if (diff > MAX_TOLERANCE) {
+ if ((i == 2 || i == 6) && !SimularityBitMap) {
+ if (result[c1][i]+result[c1][i+1] == 0)
+ final_candidate[(i/4)] = c2;
+ else if (result[c2][i]+result[c2][i+1] == 0)
+ final_candidate[(i/4)] = c1;
+ else
+ SimularityBitMap = SimularityBitMap|(1<<i);
+ } else {
+ SimularityBitMap = SimularityBitMap|(1<<i);
+ }
+ }
+ }
+
+ if (SimularityBitMap == 0) {
+ for (i = 0; i < (bound/4); i++) {
+ if (final_candidate[i] != 0xFF) {
+ for (j = i*4; j < (i+1)*4-2; j++)
+ result[3][j] = result[final_candidate[i]][j];
+ bResult = false;
+ }
+ }
+ return bResult;
+ } else if (!(SimularityBitMap & 0x0F)) {
+ /* path A OK */
+ for (i = 0; i < 4; i++)
+ result[3][i] = result[c1][i];
+ return false;
+ } else if (!(SimularityBitMap & 0xF0) && is2T) {
+ /* path B OK */
+ for (i = 4; i < 8; i++)
+ result[3][i] = result[c1][i];
+ return false;
+ } else {
+ return false;
+ }
+}
+
+static void _PHY_IQCalibrate(struct rtw_adapter *pAdapter, int result[][8], u8 t, bool is2T)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ u32 i;
+ u8 PathAOK, PathBOK;
+ u32 ADDA_REG[IQK_ADDA_REG_NUM] = {
+ rFPGA0_XCD_SwitchControl, rBlue_Tooth,
+ rRx_Wait_CCA, rTx_CCK_RFON,
+ rTx_CCK_BBON, rTx_OFDM_RFON,
+ rTx_OFDM_BBON, rTx_To_Rx,
+ rTx_To_Tx, rRx_CCK,
+ rRx_OFDM, rRx_Wait_RIFS,
+ rRx_TO_Rx, rStandby,
+ rSleep, rPMPD_ANAEN
+ };
+
+ u32 IQK_MAC_REG[IQK_MAC_REG_NUM] = {
+ REG_TXPAUSE, REG_BCN_CTRL,
+ REG_BCN_CTRL_1, REG_GPIO_MUXCFG
+ };
+
+ u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
+ rOFDM0_TRxPathEnable, rOFDM0_TRMuxPar,
+ rFPGA0_XCD_RFInterfaceSW, rConfig_AntA, rConfig_AntB,
+ rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE,
+ rFPGA0_XB_RFInterfaceOE, rFPGA0_RFMOD
+ };
+
+ const u32 retryCount = 2;
+
+ /* Note: IQ calibration must be performed after loading */
+ /* PHY_REG.txt , and radio_a, radio_b.txt */
+
+ u32 bbvalue;
+
+ if (t == 0) {
+ bbvalue = PHY_QueryBBReg(pAdapter, rFPGA0_RFMOD, bMaskDWord);
+
+ /* Save ADDA parameters, turn Path A ADDA on */
+ _PHY_SaveADDARegisters(pAdapter, ADDA_REG, pdmpriv->ADDA_backup, IQK_ADDA_REG_NUM);
+ _PHY_SaveMACRegisters(pAdapter, IQK_MAC_REG, pdmpriv->IQK_MAC_backup);
+ _PHY_SaveADDARegisters(pAdapter, IQK_BB_REG_92C, pdmpriv->IQK_BB_backup, IQK_BB_REG_NUM);
+ }
+ _PHY_PathADDAOn(pAdapter, ADDA_REG, true, is2T);
+
+ if (t == 0)
+ pdmpriv->bRfPiEnable = (u8)PHY_QueryBBReg(pAdapter, rFPGA0_XA_HSSIParameter1, BIT(8));
+
+ if (!pdmpriv->bRfPiEnable) {
+ /* Switch BB to PI mode to do IQ Calibration. */
+ _PHY_PIModeSwitch(pAdapter, true);
+ }
+
+ PHY_SetBBReg(pAdapter, rFPGA0_RFMOD, BIT24, 0x00);
+ PHY_SetBBReg(pAdapter, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
+ PHY_SetBBReg(pAdapter, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
+ PHY_SetBBReg(pAdapter, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT10, 0x01);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT26, 0x01);
+ PHY_SetBBReg(pAdapter, rFPGA0_XA_RFInterfaceOE, BIT10, 0x00);
+ PHY_SetBBReg(pAdapter, rFPGA0_XB_RFInterfaceOE, BIT10, 0x00);
+
+ if (is2T) {
+ PHY_SetBBReg(pAdapter, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00010000);
+ PHY_SetBBReg(pAdapter, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00010000);
+ }
+
+ /* MAC settings */
+ _PHY_MACSettingCalibration(pAdapter, IQK_MAC_REG, pdmpriv->IQK_MAC_backup);
+
+ /* Page B init */
+ PHY_SetBBReg(pAdapter, rConfig_AntA, bMaskDWord, 0x00080000);
+
+ if (is2T)
+ PHY_SetBBReg(pAdapter, rConfig_AntB, bMaskDWord, 0x00080000);
+
+ /* IQ calibration setting */
+ PHY_SetBBReg(pAdapter, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ PHY_SetBBReg(pAdapter, rTx_IQK, bMaskDWord, 0x01007c00);
+ PHY_SetBBReg(pAdapter, rRx_IQK, bMaskDWord, 0x01004800);
+
+ for (i = 0 ; i < retryCount ; i++) {
+ PathAOK = _PHY_PathA_IQK(pAdapter, is2T);
+ if (PathAOK == 0x03) {
+ DBG_8723A("Path A IQK Success!!\n");
+ result[t][0] = (PHY_QueryBBReg(pAdapter, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ result[t][1] = (PHY_QueryBBReg(pAdapter, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ result[t][2] = (PHY_QueryBBReg(pAdapter, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][3] = (PHY_QueryBBReg(pAdapter, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ break;
+ } else if (i == (retryCount-1) && PathAOK == 0x01) {
+ /* Tx IQK OK */
+ DBG_8723A("Path A IQK Only Tx Success!!\n");
+
+ result[t][0] = (PHY_QueryBBReg(pAdapter, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ result[t][1] = (PHY_QueryBBReg(pAdapter, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ }
+ }
+
+ if (0x00 == PathAOK) {
+ DBG_8723A("Path A IQK failed!!\n");
+ }
+
+ if (is2T) {
+ _PHY_PathAStandBy(pAdapter);
+
+ /* Turn Path B ADDA on */
+ _PHY_PathADDAOn(pAdapter, ADDA_REG, false, is2T);
+
+ for (i = 0 ; i < retryCount ; i++) {
+ PathBOK = _PHY_PathB_IQK(pAdapter);
+ if (PathBOK == 0x03) {
+ DBG_8723A("Path B IQK Success!!\n");
+ result[t][4] = (PHY_QueryBBReg(pAdapter, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][5] = (PHY_QueryBBReg(pAdapter, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][6] = (PHY_QueryBBReg(pAdapter, rRx_Power_Before_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][7] = (PHY_QueryBBReg(pAdapter, rRx_Power_After_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
+ break;
+ } else if (i == (retryCount - 1) && PathBOK == 0x01) {
+ /* Tx IQK OK */
+ DBG_8723A("Path B Only Tx IQK Success!!\n");
+ result[t][4] = (PHY_QueryBBReg(pAdapter, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][5] = (PHY_QueryBBReg(pAdapter, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ }
+ }
+
+ if (0x00 == PathBOK) {
+ DBG_8723A("Path B IQK failed!!\n");
+ }
+ }
+
+ /* Back to BB mode, load original value */
+ PHY_SetBBReg(pAdapter, rFPGA0_IQK, bMaskDWord, 0);
+
+ if (t != 0) {
+ if (!pdmpriv->bRfPiEnable) {
+ /* Switch back BB to SI mode after finish IQ Calibration. */
+ _PHY_PIModeSwitch(pAdapter, false);
+ }
+
+ /* Reload ADDA power saving parameters */
+ _PHY_ReloadADDARegisters(pAdapter, ADDA_REG, pdmpriv->ADDA_backup, IQK_ADDA_REG_NUM);
+
+ /* Reload MAC parameters */
+ _PHY_ReloadMACRegisters(pAdapter, IQK_MAC_REG, pdmpriv->IQK_MAC_backup);
+
+ /* Reload BB parameters */
+ _PHY_ReloadADDARegisters(pAdapter, IQK_BB_REG_92C, pdmpriv->IQK_BB_backup, IQK_BB_REG_NUM);
+
+ /* Restore RX initial gain */
+ PHY_SetBBReg(pAdapter, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3);
+ if (is2T) {
+ PHY_SetBBReg(pAdapter, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00032ed3);
+ }
+
+ /* load 0xe30 IQC default value */
+ PHY_SetBBReg(pAdapter, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ PHY_SetBBReg(pAdapter, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+
+ }
+}
+
+static void _PHY_LCCalibrate(struct rtw_adapter *pAdapter, bool is2T)
+{
+ u8 tmpReg;
+ u32 RF_Amode = 0, RF_Bmode = 0, LC_Cal;
+
+ /* Check continuous TX and Packet TX */
+ tmpReg = rtw_read8(pAdapter, 0xd03);
+
+ if ((tmpReg&0x70) != 0) /* Deal with contisuous TX case */
+ rtw_write8(pAdapter, 0xd03, tmpReg&0x8F); /* disable all continuous TX */
+ else /* Deal with Packet TX case */
+ rtw_write8(pAdapter, REG_TXPAUSE, 0xFF); /* block all queues */
+
+ if ((tmpReg&0x70) != 0) {
+ /* 1. Read original RF mode */
+ /* Path-A */
+ RF_Amode = PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_AC, bMask12Bits);
+
+ /* Path-B */
+ if (is2T)
+ RF_Bmode = PHY_QueryRFReg(pAdapter, RF_PATH_B, RF_AC, bMask12Bits);
+
+ /* 2. Set RF mode = standby mode */
+ /* Path-A */
+ PHY_SetRFReg(pAdapter, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode&0x8FFFF)|0x10000);
+
+ /* Path-B */
+ if (is2T)
+ PHY_SetRFReg(pAdapter, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode&0x8FFFF)|0x10000);
+ }
+
+ /* 3. Read RF reg18 */
+ LC_Cal = PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_CHNLBW, bMask12Bits);
+
+ /* 4. Set LC calibration begin */
+ PHY_SetRFReg(pAdapter, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal|0x08000);
+
+ msleep(100);
+
+ /* Restore original situation */
+ if ((tmpReg&0x70) != 0) { /* Deal with contuous TX case */
+ /* Path-A */
+ rtw_write8(pAdapter, 0xd03, tmpReg);
+ PHY_SetRFReg(pAdapter, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
+
+ /* Path-B */
+ if (is2T)
+ PHY_SetRFReg(pAdapter, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
+ } else { /* Deal with Packet TX case */
+ rtw_write8(pAdapter, REG_TXPAUSE, 0x00);
+ }
+}
+
+/* Analog Pre-distortion calibration */
+#define APK_BB_REG_NUM 8
+#define APK_CURVE_REG_NUM 4
+#define PATH_NUM 2
+
+void rtl8723a_phy_iq_calibrate(struct rtw_adapter *pAdapter, bool bReCovery)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ s32 result[4][8]; /* last is final result */
+ u8 i, final_candidate;
+ bool bPathAOK, bPathBOK;
+ s32 RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4;
+ s32 RegECC, RegTmp = 0;
+ bool is12simular, is13simular, is23simular;
+ bool bStartContTx = false, bSingleTone = false;
+ bool bCarrierSuppression = false;
+ u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
+ rOFDM0_XARxIQImbalance, rOFDM0_XBRxIQImbalance,
+ rOFDM0_ECCAThreshold, rOFDM0_AGCRSSITable,
+ rOFDM0_XATxIQImbalance, rOFDM0_XBTxIQImbalance,
+ rOFDM0_XCTxAFE, rOFDM0_XDTxAFE,
+ rOFDM0_RxIQExtAnta
+ };
+
+ /* ignore IQK when continuous Tx */
+ if (bStartContTx || bSingleTone || bCarrierSuppression)
+ return;
+
+ if (bReCovery) {
+ _PHY_ReloadADDARegisters(pAdapter, IQK_BB_REG_92C, pdmpriv->IQK_BB_backup_recover, 9);
+ return;
+ }
+ DBG_8723A("IQK:Start!!!\n");
+
+ for (i = 0; i < 8; i++) {
+ result[0][i] = 0;
+ result[1][i] = 0;
+ result[2][i] = 0;
+ result[3][i] = 0;
+ }
+ final_candidate = 0xff;
+ bPathAOK = false;
+ bPathBOK = false;
+ is12simular = false;
+ is23simular = false;
+ is13simular = false;
+
+ for (i = 0; i < 3; i++) {
+ if (IS_92C_SERIAL(pHalData->VersionID)) {
+ _PHY_IQCalibrate(pAdapter, result, i, true);
+ } else {
+ /* For 88C 1T1R */
+ _PHY_IQCalibrate(pAdapter, result, i, false);
+ }
+
+ if (i == 1) {
+ is12simular = _PHY_SimularityCompare(pAdapter, result, 0, 1);
+ if (is12simular) {
+ final_candidate = 0;
+ break;
+ }
+ }
+
+ if (i == 2) {
+ is13simular = _PHY_SimularityCompare(pAdapter, result, 0, 2);
+ if (is13simular) {
+ final_candidate = 0;
+ break;
+ }
+
+ is23simular = _PHY_SimularityCompare(pAdapter, result, 1, 2);
+ if (is23simular) {
+ final_candidate = 1;
+ } else {
+ for (i = 0; i < 8; i++)
+ RegTmp += result[3][i];
+
+ if (RegTmp != 0)
+ final_candidate = 3;
+ else
+ final_candidate = 0xFF;
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ RegE94 = result[i][0];
+ RegE9C = result[i][1];
+ RegEA4 = result[i][2];
+ RegEAC = result[i][3];
+ RegEB4 = result[i][4];
+ RegEBC = result[i][5];
+ RegEC4 = result[i][6];
+ RegECC = result[i][7];
+ }
+
+ if (final_candidate != 0xff) {
+ RegE94 = result[final_candidate][0];
+ pdmpriv->RegE94 = RegE94;
+ RegE9C = result[final_candidate][1];
+ pdmpriv->RegE9C = RegE9C;
+ RegEA4 = result[final_candidate][2];
+ RegEAC = result[final_candidate][3];
+ RegEB4 = result[final_candidate][4];
+ pdmpriv->RegEB4 = RegEB4;
+ RegEBC = result[final_candidate][5];
+ pdmpriv->RegEBC = RegEBC;
+ RegEC4 = result[final_candidate][6];
+ RegECC = result[final_candidate][7];
+ DBG_8723A("IQK: final_candidate is %x\n", final_candidate);
+ DBG_8723A("IQK: RegE94 =%x RegE9C =%x RegEA4 =%x RegEAC =%x RegEB4 =%x RegEBC =%x RegEC4 =%x RegECC =%x\n ",
+ RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC);
+ bPathAOK = bPathBOK = true;
+ } else {
+ RegE94 = RegEB4 = pdmpriv->RegE94 = pdmpriv->RegEB4 = 0x100; /* X default value */
+ RegE9C = RegEBC = pdmpriv->RegE9C = pdmpriv->RegEBC = 0x0; /* Y default value */
+ }
+
+ if ((RegE94 != 0)/*&&(RegEA4 != 0)*/)
+ _PHY_PathAFillIQKMatrix(pAdapter, bPathAOK, result, final_candidate, (RegEA4 == 0));
+
+ if (IS_92C_SERIAL(pHalData->VersionID)) {
+ if ((RegEB4 != 0)/*&&(RegEC4 != 0)*/)
+ _PHY_PathBFillIQKMatrix(pAdapter, bPathBOK, result, final_candidate, (RegEC4 == 0));
+ }
+
+ _PHY_SaveADDARegisters(pAdapter, IQK_BB_REG_92C, pdmpriv->IQK_BB_backup_recover, 9);
+}
+
+void rtl8723a_phy_lc_calibrate(struct rtw_adapter *pAdapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
+ struct mlme_ext_priv *pmlmeext = &pAdapter->mlmeextpriv;
+ bool bStartContTx = false, bSingleTone = false, bCarrierSuppression = false;
+
+ /* ignore IQK when continuous Tx */
+ if (bStartContTx || bSingleTone || bCarrierSuppression)
+ return;
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS)
+ return;
+
+ if (IS_92C_SERIAL(pHalData->VersionID)) {
+ _PHY_LCCalibrate(pAdapter, true);
+ } else {
+ /* For 88C 1T1R */
+ _PHY_LCCalibrate(pAdapter, false);
+ }
+}
+
+void
+rtl8723a_phy_ap_calibrate(struct rtw_adapter *pAdapter, char delta)
+{
+}
diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c
new file mode 100644
index 000000000000..294e6a6c60db
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c
@@ -0,0 +1,726 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+******************************************************************************/
+
+#include "odm_precomp.h"
+
+static bool CheckCondition(const u32 Condition, const u32 Hex)
+{
+ u32 _board = (Hex & 0x000000FF);
+ u32 _interface = (Hex & 0x0000FF00) >> 8;
+ u32 _platform = (Hex & 0x00FF0000) >> 16;
+ u32 cond = Condition;
+
+ if (Condition == 0xCDCDCDCD)
+ return true;
+
+ cond = Condition & 0x000000FF;
+ if ((_board == cond) && cond != 0x00)
+ return false;
+
+ cond = Condition & 0x0000FF00;
+ cond = cond >> 8;
+ if ((_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = Condition & 0x00FF0000;
+ cond = cond >> 16;
+ if ((_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+/******************************************************************************
+* AGC_TAB_1T.TXT
+******************************************************************************/
+
+static u32 Array_AGC_TAB_1T_8723A[] = {
+ 0xC78, 0x7B000001,
+ 0xC78, 0x7B010001,
+ 0xC78, 0x7B020001,
+ 0xC78, 0x7B030001,
+ 0xC78, 0x7B040001,
+ 0xC78, 0x7B050001,
+ 0xC78, 0x7A060001,
+ 0xC78, 0x79070001,
+ 0xC78, 0x78080001,
+ 0xC78, 0x77090001,
+ 0xC78, 0x760A0001,
+ 0xC78, 0x750B0001,
+ 0xC78, 0x740C0001,
+ 0xC78, 0x730D0001,
+ 0xC78, 0x720E0001,
+ 0xC78, 0x710F0001,
+ 0xC78, 0x70100001,
+ 0xC78, 0x6F110001,
+ 0xC78, 0x6E120001,
+ 0xC78, 0x6D130001,
+ 0xC78, 0x6C140001,
+ 0xC78, 0x6B150001,
+ 0xC78, 0x6A160001,
+ 0xC78, 0x69170001,
+ 0xC78, 0x68180001,
+ 0xC78, 0x67190001,
+ 0xC78, 0x661A0001,
+ 0xC78, 0x651B0001,
+ 0xC78, 0x641C0001,
+ 0xC78, 0x631D0001,
+ 0xC78, 0x621E0001,
+ 0xC78, 0x611F0001,
+ 0xC78, 0x60200001,
+ 0xC78, 0x49210001,
+ 0xC78, 0x48220001,
+ 0xC78, 0x47230001,
+ 0xC78, 0x46240001,
+ 0xC78, 0x45250001,
+ 0xC78, 0x44260001,
+ 0xC78, 0x43270001,
+ 0xC78, 0x42280001,
+ 0xC78, 0x41290001,
+ 0xC78, 0x402A0001,
+ 0xC78, 0x262B0001,
+ 0xC78, 0x252C0001,
+ 0xC78, 0x242D0001,
+ 0xC78, 0x232E0001,
+ 0xC78, 0x222F0001,
+ 0xC78, 0x21300001,
+ 0xC78, 0x20310001,
+ 0xC78, 0x06320001,
+ 0xC78, 0x05330001,
+ 0xC78, 0x04340001,
+ 0xC78, 0x03350001,
+ 0xC78, 0x02360001,
+ 0xC78, 0x01370001,
+ 0xC78, 0x00380001,
+ 0xC78, 0x00390001,
+ 0xC78, 0x003A0001,
+ 0xC78, 0x003B0001,
+ 0xC78, 0x003C0001,
+ 0xC78, 0x003D0001,
+ 0xC78, 0x003E0001,
+ 0xC78, 0x003F0001,
+ 0xC78, 0x7B400001,
+ 0xC78, 0x7B410001,
+ 0xC78, 0x7B420001,
+ 0xC78, 0x7B430001,
+ 0xC78, 0x7B440001,
+ 0xC78, 0x7B450001,
+ 0xC78, 0x7A460001,
+ 0xC78, 0x79470001,
+ 0xC78, 0x78480001,
+ 0xC78, 0x77490001,
+ 0xC78, 0x764A0001,
+ 0xC78, 0x754B0001,
+ 0xC78, 0x744C0001,
+ 0xC78, 0x734D0001,
+ 0xC78, 0x724E0001,
+ 0xC78, 0x714F0001,
+ 0xC78, 0x70500001,
+ 0xC78, 0x6F510001,
+ 0xC78, 0x6E520001,
+ 0xC78, 0x6D530001,
+ 0xC78, 0x6C540001,
+ 0xC78, 0x6B550001,
+ 0xC78, 0x6A560001,
+ 0xC78, 0x69570001,
+ 0xC78, 0x68580001,
+ 0xC78, 0x67590001,
+ 0xC78, 0x665A0001,
+ 0xC78, 0x655B0001,
+ 0xC78, 0x645C0001,
+ 0xC78, 0x635D0001,
+ 0xC78, 0x625E0001,
+ 0xC78, 0x615F0001,
+ 0xC78, 0x60600001,
+ 0xC78, 0x49610001,
+ 0xC78, 0x48620001,
+ 0xC78, 0x47630001,
+ 0xC78, 0x46640001,
+ 0xC78, 0x45650001,
+ 0xC78, 0x44660001,
+ 0xC78, 0x43670001,
+ 0xC78, 0x42680001,
+ 0xC78, 0x41690001,
+ 0xC78, 0x406A0001,
+ 0xC78, 0x266B0001,
+ 0xC78, 0x256C0001,
+ 0xC78, 0x246D0001,
+ 0xC78, 0x236E0001,
+ 0xC78, 0x226F0001,
+ 0xC78, 0x21700001,
+ 0xC78, 0x20710001,
+ 0xC78, 0x06720001,
+ 0xC78, 0x05730001,
+ 0xC78, 0x04740001,
+ 0xC78, 0x03750001,
+ 0xC78, 0x02760001,
+ 0xC78, 0x01770001,
+ 0xC78, 0x00780001,
+ 0xC78, 0x00790001,
+ 0xC78, 0x007A0001,
+ 0xC78, 0x007B0001,
+ 0xC78, 0x007C0001,
+ 0xC78, 0x007D0001,
+ 0xC78, 0x007E0001,
+ 0xC78, 0x007F0001,
+ 0xC78, 0x3800001E,
+ 0xC78, 0x3801001E,
+ 0xC78, 0x3802001E,
+ 0xC78, 0x3803001E,
+ 0xC78, 0x3804001E,
+ 0xC78, 0x3805001E,
+ 0xC78, 0x3806001E,
+ 0xC78, 0x3807001E,
+ 0xC78, 0x3808001E,
+ 0xC78, 0x3C09001E,
+ 0xC78, 0x3E0A001E,
+ 0xC78, 0x400B001E,
+ 0xC78, 0x440C001E,
+ 0xC78, 0x480D001E,
+ 0xC78, 0x4C0E001E,
+ 0xC78, 0x500F001E,
+ 0xC78, 0x5210001E,
+ 0xC78, 0x5611001E,
+ 0xC78, 0x5A12001E,
+ 0xC78, 0x5E13001E,
+ 0xC78, 0x6014001E,
+ 0xC78, 0x6015001E,
+ 0xC78, 0x6016001E,
+ 0xC78, 0x6217001E,
+ 0xC78, 0x6218001E,
+ 0xC78, 0x6219001E,
+ 0xC78, 0x621A001E,
+ 0xC78, 0x621B001E,
+ 0xC78, 0x621C001E,
+ 0xC78, 0x621D001E,
+ 0xC78, 0x621E001E,
+ 0xC78, 0x621F001E,
+};
+
+#define READ_NEXT_PAIR(v1, v2, i) \
+ do { \
+ i += 2; v1 = Array[i]; v2 = Array[i+1]; \
+ } while (0)
+
+void ODM_ReadAndConfig_AGC_TAB_1T_8723A(struct dm_odm_t *pDM_Odm)
+{
+
+ u32 hex;
+ u32 i;
+ u8 platform = 0x04;
+ u8 interfaceValue = pDM_Odm->SupportInterface;
+ u8 board = pDM_Odm->BoardType;
+ u32 ArrayLen = sizeof(Array_AGC_TAB_1T_8723A)/sizeof(u32);
+ u32 *Array = Array_AGC_TAB_1T_8723A;
+
+ hex = board;
+ hex += interfaceValue << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+ for (i = 0; i < ArrayLen; i += 2) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ odm_ConfigBB_AGC_8723A(pDM_Odm, v1, bMaskDWord, v2);
+ continue;
+ } else {
+ if (!CheckCondition(Array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2 */
+ } else {
+ /* Configure matched pairs and skip to end of if-else. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2) {
+ odm_ConfigBB_AGC_8723A(pDM_Odm, v1, bMaskDWord, v2);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ while (v2 != 0xDEAD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ }
+}
+
+/******************************************************************************
+* PHY_REG_1T.TXT
+******************************************************************************/
+
+static u32 Array_PHY_REG_1T_8723A[] = {
+ 0x800, 0x80040000,
+ 0x804, 0x00000003,
+ 0x808, 0x0000FC00,
+ 0x80C, 0x0000000A,
+ 0x810, 0x10001331,
+ 0x814, 0x020C3D10,
+ 0x818, 0x02200385,
+ 0x81C, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390004,
+ 0x828, 0x00000000,
+ 0x82C, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83C, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84C, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569A569A,
+ 0x85C, 0x001B25A4,
+ 0x860, 0x66F60110,
+ 0x864, 0x061F0130,
+ 0x868, 0x00000000,
+ 0x86C, 0x32323200,
+ 0x870, 0x07000760,
+ 0x874, 0x22004000,
+ 0x878, 0x00000808,
+ 0x87C, 0x00000000,
+ 0x880, 0xC0083070,
+ 0x884, 0x000004D5,
+ 0x888, 0x00000000,
+ 0x88C, 0xCCC000C0,
+ 0x890, 0x00000800,
+ 0x894, 0xFFFFFFFE,
+ 0x898, 0x40302010,
+ 0x89C, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90C, 0x81121111,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x80FF000C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E68120F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x11144028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x1A1B0000,
+ 0xA24, 0x090E1317,
+ 0xA28, 0x00000204,
+ 0xA2C, 0x00D30000,
+ 0xA70, 0x101FBF00,
+ 0xA74, 0x00000007,
+ 0xA78, 0x00000900,
+ 0xC00, 0x48071D40,
+ 0xC04, 0x03A05611,
+ 0xC08, 0x000000E4,
+ 0xC0C, 0x6C6C6C6C,
+ 0xC10, 0x08800000,
+ 0xC14, 0x40000100,
+ 0xC18, 0x08800000,
+ 0xC1C, 0x40000100,
+ 0xC20, 0x00000000,
+ 0xC24, 0x00000000,
+ 0xC28, 0x00000000,
+ 0xC2C, 0x00000000,
+ 0xC30, 0x69E9AC44,
+ 0xFF0F011F, 0xABCD,
+ 0xC34, 0x469652CF,
+ 0xCDCDCDCD, 0xCDCD,
+ 0xC34, 0x469652AF,
+ 0xFF0F011F, 0xDEAD,
+ 0xC38, 0x49795994,
+ 0xC3C, 0x0A97971C,
+ 0xC40, 0x1F7C403F,
+ 0xC44, 0x000100B7,
+ 0xC48, 0xEC020107,
+ 0xC4C, 0x007F037F,
+ 0xC50, 0x69543420,
+ 0xC54, 0x43BC0094,
+ 0xC58, 0x69543420,
+ 0xC5C, 0x433C0094,
+ 0xC60, 0x00000000,
+ 0xFF0F011F, 0xABCD,
+ 0xC64, 0x7116848B,
+ 0xCDCDCDCD, 0xCDCD,
+ 0xC64, 0x7112848B,
+ 0xFF0F011F, 0xDEAD,
+ 0xC68, 0x47C00BFF,
+ 0xC6C, 0x00000036,
+ 0xC70, 0x2C7F000D,
+ 0xC74, 0x018610DB,
+ 0xC78, 0x0000001F,
+ 0xC7C, 0x00B91612,
+ 0xC80, 0x40000100,
+ 0xC84, 0x20F60000,
+ 0xC88, 0x40000100,
+ 0xC8C, 0x20200000,
+ 0xC90, 0x00121820,
+ 0xC94, 0x00000000,
+ 0xC98, 0x00121820,
+ 0xC9C, 0x00007F7F,
+ 0xCA0, 0x00000000,
+ 0xCA4, 0x00000080,
+ 0xCA8, 0x00000000,
+ 0xCAC, 0x00000000,
+ 0xCB0, 0x00000000,
+ 0xCB4, 0x00000000,
+ 0xCB8, 0x00000000,
+ 0xCBC, 0x28000000,
+ 0xCC0, 0x00000000,
+ 0xCC4, 0x00000000,
+ 0xCC8, 0x00000000,
+ 0xCCC, 0x00000000,
+ 0xCD0, 0x00000000,
+ 0xCD4, 0x00000000,
+ 0xCD8, 0x64B22427,
+ 0xCDC, 0x00766932,
+ 0xCE0, 0x00222222,
+ 0xCE4, 0x00000000,
+ 0xCE8, 0x37644302,
+ 0xCEC, 0x2F97D40C,
+ 0xD00, 0x00080740,
+ 0xD04, 0x00020401,
+ 0xD08, 0x0000907F,
+ 0xD0C, 0x20010201,
+ 0xD10, 0xA0633333,
+ 0xD14, 0x3333BC43,
+ 0xD18, 0x7A8F5B6B,
+ 0xD2C, 0xCC979975,
+ 0xD30, 0x00000000,
+ 0xD34, 0x80608000,
+ 0xD38, 0x00000000,
+ 0xD3C, 0x00027293,
+ 0xD40, 0x00000000,
+ 0xD44, 0x00000000,
+ 0xD48, 0x00000000,
+ 0xD4C, 0x00000000,
+ 0xD50, 0x6437140A,
+ 0xD54, 0x00000000,
+ 0xD58, 0x00000000,
+ 0xD5C, 0x30032064,
+ 0xD60, 0x4653DE68,
+ 0xD64, 0x04518A3C,
+ 0xD68, 0x00002101,
+ 0xD6C, 0x2A201C16,
+ 0xD70, 0x1812362E,
+ 0xD74, 0x322C2220,
+ 0xD78, 0x000E3C24,
+ 0xE00, 0x2A2A2A2A,
+ 0xE04, 0x2A2A2A2A,
+ 0xE08, 0x03902A2A,
+ 0xE10, 0x2A2A2A2A,
+ 0xE14, 0x2A2A2A2A,
+ 0xE18, 0x2A2A2A2A,
+ 0xE1C, 0x2A2A2A2A,
+ 0xE28, 0x00000000,
+ 0xE30, 0x1000DC1F,
+ 0xE34, 0x10008C1F,
+ 0xE38, 0x02140102,
+ 0xE3C, 0x681604C2,
+ 0xE40, 0x01007C00,
+ 0xE44, 0x01004800,
+ 0xE48, 0xFB000000,
+ 0xE4C, 0x000028D1,
+ 0xE50, 0x1000DC1F,
+ 0xE54, 0x10008C1F,
+ 0xE58, 0x02140102,
+ 0xE5C, 0x28160D05,
+ 0xE60, 0x00000008,
+ 0xE68, 0x001B25A4,
+ 0xE6C, 0x631B25A0,
+ 0xE70, 0x631B25A0,
+ 0xE74, 0x081B25A0,
+ 0xE78, 0x081B25A0,
+ 0xE7C, 0x081B25A0,
+ 0xE80, 0x081B25A0,
+ 0xE84, 0x631B25A0,
+ 0xE88, 0x081B25A0,
+ 0xE8C, 0x631B25A0,
+ 0xED0, 0x631B25A0,
+ 0xED4, 0x631B25A0,
+ 0xED8, 0x631B25A0,
+ 0xEDC, 0x001B25A0,
+ 0xEE0, 0x001B25A0,
+ 0xEEC, 0x6B1B25A0,
+ 0xF14, 0x00000003,
+ 0xF4C, 0x00000000,
+ 0xF00, 0x00000300,
+};
+
+void ODM_ReadAndConfig_PHY_REG_1T_8723A(struct dm_odm_t *pDM_Odm)
+{
+ u32 hex = 0;
+ u32 i = 0;
+ u8 platform = 0x04;
+ u8 interfaceValue = pDM_Odm->SupportInterface;
+ u8 board = pDM_Odm->BoardType;
+ u32 ArrayLen = sizeof(Array_PHY_REG_1T_8723A)/sizeof(u32);
+ u32 *Array = Array_PHY_REG_1T_8723A;
+
+ hex += board;
+ hex += interfaceValue << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+ for (i = 0; i < ArrayLen; i += 2) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ odm_ConfigBB_PHY_8723A(pDM_Odm, v1, bMaskDWord, v2);
+ continue;
+ } else {
+ if (!CheckCondition(Array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2 */
+ } else {
+ /* Configure matched pairs and skip to end of if-else. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2) {
+ odm_ConfigBB_PHY_8723A(pDM_Odm, v1, bMaskDWord, v2);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ while (v2 != 0xDEAD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ }
+}
+
+/******************************************************************************
+* PHY_REG_MP.TXT
+******************************************************************************/
+
+static u32 Array_PHY_REG_MP_8723A[] = {
+ 0xC30, 0x69E9AC4A,
+ 0xC3C, 0x0A979718,
+};
+
+void ODM_ReadAndConfig_PHY_REG_MP_8723A(struct dm_odm_t *pDM_Odm)
+{
+ u32 hex = 0;
+ u32 i = 0;
+ u8 platform = 0x04;
+ u8 interfaceValue = pDM_Odm->SupportInterface;
+ u8 board = pDM_Odm->BoardType;
+ u32 ArrayLen = sizeof(Array_PHY_REG_MP_8723A)/sizeof(u32);
+ u32 *Array = Array_PHY_REG_MP_8723A;
+
+ hex += board;
+ hex += interfaceValue << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+ for (i = 0; i < ArrayLen; i += 2) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ odm_ConfigBB_PHY_8723A(pDM_Odm, v1, bMaskDWord, v2);
+ continue;
+ } else {
+ if (!CheckCondition(Array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2 */
+ } else {
+ /* Configure matched pairs and skip to end of if-else. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2) {
+ odm_ConfigBB_PHY_8723A(pDM_Odm, v1, bMaskDWord, v2);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ while (v2 != 0xDEAD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ }
+}
+
+/******************************************************************************
+* PHY_REG_PG.TXT
+******************************************************************************/
+
+static u32 Array_PHY_REG_PG_8723A[] = {
+ 0xE00, 0xFFFFFFFF, 0x0A0C0C0C,
+ 0xE04, 0xFFFFFFFF, 0x02040608,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x0A0C0D0E,
+ 0xE14, 0xFFFFFFFF, 0x02040608,
+ 0xE18, 0xFFFFFFFF, 0x0A0C0D0E,
+ 0xE1C, 0xFFFFFFFF, 0x02040608,
+ 0x830, 0xFFFFFFFF, 0x0A0C0C0C,
+ 0x834, 0xFFFFFFFF, 0x02040608,
+ 0x838, 0xFFFFFF00, 0x00000000,
+ 0x86C, 0x000000FF, 0x00000000,
+ 0x83C, 0xFFFFFFFF, 0x0A0C0D0E,
+ 0x848, 0xFFFFFFFF, 0x02040608,
+ 0x84C, 0xFFFFFFFF, 0x0A0C0D0E,
+ 0x868, 0xFFFFFFFF, 0x02040608,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0x830, 0xFFFFFFFF, 0x00000000,
+ 0x834, 0xFFFFFFFF, 0x00000000,
+ 0x838, 0xFFFFFF00, 0x00000000,
+ 0x86C, 0x000000FF, 0x00000000,
+ 0x83C, 0xFFFFFFFF, 0x00000000,
+ 0x848, 0xFFFFFFFF, 0x00000000,
+ 0x84C, 0xFFFFFFFF, 0x00000000,
+ 0x868, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x04040404,
+ 0xE04, 0xFFFFFFFF, 0x00020204,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x06060606,
+ 0xE14, 0xFFFFFFFF, 0x00020406,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0x830, 0xFFFFFFFF, 0x04040404,
+ 0x834, 0xFFFFFFFF, 0x00020204,
+ 0x838, 0xFFFFFF00, 0x00000000,
+ 0x86C, 0x000000FF, 0x00000000,
+ 0x83C, 0xFFFFFFFF, 0x06060606,
+ 0x848, 0xFFFFFFFF, 0x00020406,
+ 0x84C, 0xFFFFFFFF, 0x00000000,
+ 0x868, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0x830, 0xFFFFFFFF, 0x00000000,
+ 0x834, 0xFFFFFFFF, 0x00000000,
+ 0x838, 0xFFFFFF00, 0x00000000,
+ 0x86C, 0x000000FF, 0x00000000,
+ 0x83C, 0xFFFFFFFF, 0x00000000,
+ 0x848, 0xFFFFFFFF, 0x00000000,
+ 0x84C, 0xFFFFFFFF, 0x00000000,
+ 0x868, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0x830, 0xFFFFFFFF, 0x00000000,
+ 0x834, 0xFFFFFFFF, 0x00000000,
+ 0x838, 0xFFFFFF00, 0x00000000,
+ 0x86C, 0x000000FF, 0x00000000,
+ 0x83C, 0xFFFFFFFF, 0x00000000,
+ 0x848, 0xFFFFFFFF, 0x00000000,
+ 0x84C, 0xFFFFFFFF, 0x00000000,
+ 0x868, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x04040404,
+ 0xE04, 0xFFFFFFFF, 0x00020204,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0x830, 0xFFFFFFFF, 0x04040404,
+ 0x834, 0xFFFFFFFF, 0x00020204,
+ 0x838, 0xFFFFFF00, 0x00000000,
+ 0x86C, 0x000000FF, 0x00000000,
+ 0x83C, 0xFFFFFFFF, 0x00000000,
+ 0x848, 0xFFFFFFFF, 0x00000000,
+ 0x84C, 0xFFFFFFFF, 0x00000000,
+ 0x868, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0x830, 0xFFFFFFFF, 0x00000000,
+ 0x834, 0xFFFFFFFF, 0x00000000,
+ 0x838, 0xFFFFFF00, 0x00000000,
+ 0x86C, 0x000000FF, 0x00000000,
+ 0x83C, 0xFFFFFFFF, 0x00000000,
+ 0x848, 0xFFFFFFFF, 0x00000000,
+ 0x84C, 0xFFFFFFFF, 0x00000000,
+ 0x868, 0xFFFFFFFF, 0x00000000,
+};
+
+void ODM_ReadAndConfig_PHY_REG_PG_8723A(struct dm_odm_t *pDM_Odm)
+{
+ u32 hex = 0;
+ u32 i = 0;
+ u8 platform = 0x04;
+ u8 interfaceValue = pDM_Odm->SupportInterface;
+ u8 board = pDM_Odm->BoardType;
+ u32 ArrayLen = sizeof(Array_PHY_REG_PG_8723A)/sizeof(u32);
+ u32 *Array = Array_PHY_REG_PG_8723A;
+
+ hex += board;
+ hex += interfaceValue << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+ for (i = 0; i < ArrayLen; i += 3) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+ u32 v3 = Array[i+2];
+
+ /* this line is a line of pure_body */
+ if (v1 < 0xCDCDCDCD) {
+ odm_ConfigBB_PHY_REG_PG_8723A(pDM_Odm, v1, v2, v3);
+ continue;
+ } else { /* this line is the start of branch */
+ if (!CheckCondition(Array[i], hex)) {
+ /* don't need the hw_body */
+ i += 2; /* skip the pair of expression */
+ v1 = Array[i];
+ v2 = Array[i+1];
+ v3 = Array[i+2];
+ while (v2 != 0xDEAD) {
+ i += 3;
+ v1 = Array[i];
+ v2 = Array[i+1];
+ v3 = Array[i+1];
+ }
+ }
+ }
+ }
+}
diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c
new file mode 100644
index 000000000000..12071453be97
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c
@@ -0,0 +1,188 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+******************************************************************************/
+
+#include "odm_precomp.h"
+
+static bool CheckCondition(const u32 Condition, const u32 Hex)
+{
+ u32 _board = (Hex & 0x000000FF);
+ u32 _interface = (Hex & 0x0000FF00) >> 8;
+ u32 _platform = (Hex & 0x00FF0000) >> 16;
+ u32 cond = Condition;
+
+ if (Condition == 0xCDCDCDCD)
+ return true;
+
+ cond = Condition & 0x000000FF;
+ if ((_board == cond) && cond != 0x00)
+ return false;
+
+ cond = Condition & 0x0000FF00;
+ cond = cond >> 8;
+ if ((_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = Condition & 0x00FF0000;
+ cond = cond >> 16;
+ if ((_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+/******************************************************************************
+* MAC_REG.TXT
+******************************************************************************/
+
+static u32 Array_MAC_REG_8723A[] = {
+ 0x420, 0x00000080,
+ 0x423, 0x00000000,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000006,
+ 0x437, 0x00000007,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43A, 0x00000000,
+ 0x43B, 0x00000001,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000006,
+ 0x43F, 0x00000007,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000015,
+ 0x445, 0x000000F0,
+ 0x446, 0x0000000F,
+ 0x447, 0x00000000,
+ 0x458, 0x00000041,
+ 0x459, 0x000000A8,
+ 0x45A, 0x00000072,
+ 0x45B, 0x000000B9,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x462, 0x00000008,
+ 0x463, 0x00000003,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x515, 0x00000010,
+ 0x516, 0x0000000A,
+ 0x517, 0x00000010,
+ 0x51A, 0x00000016,
+ 0x524, 0x0000000F,
+ 0x525, 0x0000004F,
+ 0x546, 0x00000040,
+ 0x547, 0x00000000,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55A, 0x00000002,
+ 0x55D, 0x000000FF,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x652, 0x00000020,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+};
+
+void ODM_ReadAndConfig_MAC_REG_8723A(struct dm_odm_t *pDM_Odm)
+{
+ #define READ_NEXT_PAIR(v1, v2, i) \
+ do { \
+ i += 2; v1 = Array[i]; v2 = Array[i+1]; \
+ } while (0)
+
+ u32 hex = 0;
+ u32 i = 0;
+ u8 platform = 0x04;
+ u8 interfaceValue = pDM_Odm->SupportInterface;
+ u8 board = pDM_Odm->BoardType;
+ u32 ArrayLen = sizeof(Array_MAC_REG_8723A)/sizeof(u32);
+ u32 *Array = Array_MAC_REG_8723A;
+
+ hex += board;
+ hex += interfaceValue << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+ for (i = 0; i < ArrayLen; i += 2) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ odm_ConfigMAC_8723A(pDM_Odm, v1, (u8)v2);
+ continue;
+ } else {
+ if (!CheckCondition(Array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2 */
+ } else {
+ /* Configure matched pairs and skip to end of if-else. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2) {
+ odm_ConfigMAC_8723A(pDM_Odm, v1, (u8)v2);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ }
+}
diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c
new file mode 100644
index 000000000000..0f2ae05c8eae
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c
@@ -0,0 +1,259 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+******************************************************************************/
+
+#include "odm_precomp.h"
+
+static bool CheckCondition(const u32 Condition, const u32 Hex)
+{
+ u32 _board = (Hex & 0x000000FF);
+ u32 _interface = (Hex & 0x0000FF00) >> 8;
+ u32 _platform = (Hex & 0x00FF0000) >> 16;
+ u32 cond = Condition;
+
+ if (Condition == 0xCDCDCDCD)
+ return true;
+
+ cond = Condition & 0x000000FF;
+ if ((_board == cond) && cond != 0x00)
+ return false;
+
+ cond = Condition & 0x0000FF00;
+ cond = cond >> 8;
+ if ((_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = Condition & 0x00FF0000;
+ cond = cond >> 16;
+ if ((_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+/******************************************************************************
+* RadioA_1T.TXT
+******************************************************************************/
+
+static u32 Array_RadioA_1T_8723A[] = {
+ 0x000, 0x00030159,
+ 0x001, 0x00031284,
+ 0x002, 0x00098000,
+ 0xFF0F011F, 0xABCD,
+ 0x003, 0x00018C63,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x003, 0x00039C63,
+ 0xFF0F011F, 0xDEAD,
+ 0x004, 0x000210E7,
+ 0x009, 0x0002044F,
+ 0x00A, 0x0001A3F1,
+ 0x00B, 0x00014787,
+ 0x00C, 0x000896FE,
+ 0x00D, 0x0000E02C,
+ 0x00E, 0x00039CE7,
+ 0x00F, 0x00000451,
+ 0x019, 0x00000000,
+ 0x01A, 0x00030355,
+ 0x01B, 0x00060A00,
+ 0x01C, 0x000FC378,
+ 0x01D, 0x000A1250,
+ 0x01E, 0x0000024F,
+ 0x01F, 0x00000000,
+ 0x020, 0x0000B614,
+ 0x021, 0x0006C000,
+ 0x022, 0x00000000,
+ 0x023, 0x00001558,
+ 0x024, 0x00000060,
+ 0x025, 0x00000483,
+ 0x026, 0x0004F000,
+ 0x027, 0x000EC7D9,
+ 0x028, 0x00057730,
+ 0x029, 0x00004783,
+ 0x02A, 0x00000001,
+ 0x02B, 0x00021334,
+ 0x02A, 0x00000000,
+ 0x02B, 0x00000054,
+ 0x02A, 0x00000001,
+ 0x02B, 0x00000808,
+ 0x02B, 0x00053333,
+ 0x02C, 0x0000000C,
+ 0x02A, 0x00000002,
+ 0x02B, 0x00000808,
+ 0x02B, 0x0005B333,
+ 0x02C, 0x0000000D,
+ 0x02A, 0x00000003,
+ 0x02B, 0x00000808,
+ 0x02B, 0x00063333,
+ 0x02C, 0x0000000D,
+ 0x02A, 0x00000004,
+ 0x02B, 0x00000808,
+ 0x02B, 0x0006B333,
+ 0x02C, 0x0000000D,
+ 0x02A, 0x00000005,
+ 0x02B, 0x00000808,
+ 0x02B, 0x00073333,
+ 0x02C, 0x0000000D,
+ 0x02A, 0x00000006,
+ 0x02B, 0x00000709,
+ 0x02B, 0x0005B333,
+ 0x02C, 0x0000000D,
+ 0x02A, 0x00000007,
+ 0x02B, 0x00000709,
+ 0x02B, 0x00063333,
+ 0x02C, 0x0000000D,
+ 0x02A, 0x00000008,
+ 0x02B, 0x0000060A,
+ 0x02B, 0x0004B333,
+ 0x02C, 0x0000000D,
+ 0x02A, 0x00000009,
+ 0x02B, 0x0000060A,
+ 0x02B, 0x00053333,
+ 0x02C, 0x0000000D,
+ 0x02A, 0x0000000A,
+ 0x02B, 0x0000060A,
+ 0x02B, 0x0005B333,
+ 0x02C, 0x0000000D,
+ 0x02A, 0x0000000B,
+ 0x02B, 0x0000060A,
+ 0x02B, 0x00063333,
+ 0x02C, 0x0000000D,
+ 0x02A, 0x0000000C,
+ 0x02B, 0x0000060A,
+ 0x02B, 0x0006B333,
+ 0x02C, 0x0000000D,
+ 0x02A, 0x0000000D,
+ 0x02B, 0x0000060A,
+ 0x02B, 0x00073333,
+ 0x02C, 0x0000000D,
+ 0x02A, 0x0000000E,
+ 0x02B, 0x0000050B,
+ 0x02B, 0x00066666,
+ 0x02C, 0x0000001A,
+ 0x02A, 0x000E0000,
+ 0x010, 0x0004000F,
+ 0x011, 0x000E31FC,
+ 0x010, 0x0006000F,
+ 0x011, 0x000FF9F8,
+ 0x010, 0x0002000F,
+ 0x011, 0x000203F9,
+ 0x010, 0x0003000F,
+ 0x011, 0x000FF500,
+ 0x010, 0x00000000,
+ 0x011, 0x00000000,
+ 0x010, 0x0008000F,
+ 0x011, 0x0003F100,
+ 0x010, 0x0009000F,
+ 0x011, 0x00023100,
+ 0x012, 0x00032000,
+ 0x012, 0x00071000,
+ 0x012, 0x000B0000,
+ 0x012, 0x000FC000,
+ 0x013, 0x000287B3,
+ 0x013, 0x000244B7,
+ 0x013, 0x000204AB,
+ 0x013, 0x0001C49F,
+ 0x013, 0x00018493,
+ 0x013, 0x0001429B,
+ 0x013, 0x00010299,
+ 0x013, 0x0000C29C,
+ 0x013, 0x000081A0,
+ 0x013, 0x000040AC,
+ 0x013, 0x00000020,
+ 0x014, 0x0001944C,
+ 0x014, 0x00059444,
+ 0x014, 0x0009944C,
+ 0x014, 0x000D9444,
+ 0xFF0F011F, 0xABCD,
+ 0x015, 0x0000F424,
+ 0x015, 0x0004F424,
+ 0x015, 0x0008F424,
+ 0x015, 0x000CF424,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x015, 0x0000F474,
+ 0x015, 0x0004F477,
+ 0x015, 0x0008F455,
+ 0x015, 0x000CF455,
+ 0xFF0F011F, 0xDEAD,
+ 0x016, 0x00000339,
+ 0x016, 0x00040339,
+ 0x016, 0x00080339,
+ 0xFF0F011F, 0xABCD,
+ 0x016, 0x000C0356,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x016, 0x000C0366,
+ 0xFF0F011F, 0xDEAD,
+ 0x000, 0x00010159,
+ 0x018, 0x0000F401,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x01F, 0x00000003,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x01E, 0x00000247,
+ 0x01F, 0x00000000,
+ 0x000, 0x00030159,
+};
+
+void ODM_ReadAndConfig_RadioA_1T_8723A(struct dm_odm_t *pDM_Odm)
+{
+ #define READ_NEXT_PAIR(v1, v2, i) \
+ do { \
+ i += 2; v1 = Array[i]; v2 = Array[i+1];\
+ } while (0)
+
+ u32 hex = 0;
+ u32 i = 0;
+ u8 platform = 0x04;
+ u8 interfaceValue = pDM_Odm->SupportInterface;
+ u8 board = pDM_Odm->BoardType;
+ u32 ArrayLen = sizeof(Array_RadioA_1T_8723A)/sizeof(u32);
+ u32 *Array = Array_RadioA_1T_8723A;
+
+ hex += board;
+ hex += interfaceValue << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+
+ for (i = 0; i < ArrayLen; i += 2) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ odm_ConfigRF_RadioA_8723A(pDM_Odm, v1, v2);
+ continue;
+ } else {
+ if (!CheckCondition(Array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2 */
+ } else {
+ /* Configure matched pairs and skip to end of if-else. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2) {
+ odm_ConfigRF_RadioA_8723A(pDM_Odm, v1, v2);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ }
+}
diff --git a/drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c
new file mode 100644
index 000000000000..4f6b4b72f922
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c
@@ -0,0 +1,163 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/*++
+Copyright (c) Realtek Semiconductor Corp. All rights reserved.
+
+Module Name:
+ HalPwrSeqCmd.c
+
+Abstract:
+ Implement HW Power sequence configuration CMD handling routine for
+ Realtek devices.
+
+Major Change History:
+ When Who What
+ ---------- --------------- -------------------------------
+ 2011-10-26 Lucas Modify to be compatible with SD4-CE driver.
+ 2011-07-07 Roger Create.
+
+--*/
+#include <HalPwrSeqCmd.h>
+
+/* */
+/* Description: */
+/* This routine deal with the Power Configuration CMDs parsing
+ for RTL8723/RTL8188E Series IC. */
+/* */
+/* Assumption: */
+/* We should follow specific format which was released from
+ HW SD. */
+/* */
+/* 2011.07.07, added by Roger. */
+/* */
+u8 HalPwrSeqCmdParsing23a(struct rtw_adapter *padapter, u8 CutVersion,
+ u8 FabVersion, u8 InterfaceType,
+ struct wlan_pwr_cfg PwrSeqCmd[])
+{
+ struct wlan_pwr_cfg PwrCfgCmd = { 0 };
+ u8 bPollingBit = false;
+ u32 AryIdx = 0;
+ u8 value = 0;
+ u32 offset = 0;
+ u32 pollingCount = 0; /* polling autoload done. */
+ u32 maxPollingCnt = 5000;
+
+ do {
+ PwrCfgCmd = PwrSeqCmd[AryIdx];
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("HalPwrSeqCmdParsing23a: offset(%#x) cut_msk(%#x) "
+ "fab_msk(%#x) interface_msk(%#x) base(%#x) cmd(%#x) "
+ "msk(%#x) value(%#x)\n",
+ GET_PWR_CFG_OFFSET(PwrCfgCmd),
+ GET_PWR_CFG_CUT_MASK(PwrCfgCmd),
+ GET_PWR_CFG_FAB_MASK(PwrCfgCmd),
+ GET_PWR_CFG_INTF_MASK(PwrCfgCmd),
+ GET_PWR_CFG_BASE(PwrCfgCmd),
+ GET_PWR_CFG_CMD(PwrCfgCmd),
+ GET_PWR_CFG_MASK(PwrCfgCmd),
+ GET_PWR_CFG_VALUE(PwrCfgCmd)));
+
+ /* 2 Only Handle the command whose FAB, CUT, and Interface are
+ matched */
+ if ((GET_PWR_CFG_FAB_MASK(PwrCfgCmd) & FabVersion) &&
+ (GET_PWR_CFG_CUT_MASK(PwrCfgCmd) & CutVersion) &&
+ (GET_PWR_CFG_INTF_MASK(PwrCfgCmd) & InterfaceType)) {
+ switch (GET_PWR_CFG_CMD(PwrCfgCmd)) {
+ case PWR_CMD_READ:
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("HalPwrSeqCmdParsing23a: "
+ "PWR_CMD_READ\n"));
+ break;
+
+ case PWR_CMD_WRITE:
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("HalPwrSeqCmdParsing23a: "
+ "PWR_CMD_WRITE\n"));
+ offset = GET_PWR_CFG_OFFSET(PwrCfgCmd);
+
+ /* Read the value from system register */
+ value = rtw_read8(padapter, offset);
+
+ value &= ~(GET_PWR_CFG_MASK(PwrCfgCmd));
+ value |= (GET_PWR_CFG_VALUE(PwrCfgCmd) &
+ GET_PWR_CFG_MASK(PwrCfgCmd));
+
+ /* Write the value back to sytem register */
+ rtw_write8(padapter, offset, value);
+ break;
+
+ case PWR_CMD_POLLING:
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("HalPwrSeqCmdParsing23a: "
+ "PWR_CMD_POLLING\n"));
+
+ bPollingBit = false;
+ offset = GET_PWR_CFG_OFFSET(PwrCfgCmd);
+ do {
+ value = rtw_read8(padapter, offset);
+
+ value &= GET_PWR_CFG_MASK(PwrCfgCmd);
+ if (value ==
+ (GET_PWR_CFG_VALUE(PwrCfgCmd) &
+ GET_PWR_CFG_MASK(PwrCfgCmd)))
+ bPollingBit = true;
+ else
+ udelay(10);
+
+ if (pollingCount++ > maxPollingCnt) {
+ DBG_8723A("Fail to polling "
+ "Offset[%#x]\n",
+ offset);
+ return false;
+ }
+ } while (!bPollingBit);
+
+ break;
+
+ case PWR_CMD_DELAY:
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("HalPwrSeqCmdParsing23a: "
+ "PWR_CMD_DELAY\n"));
+ if (GET_PWR_CFG_VALUE(PwrCfgCmd) ==
+ PWRSEQ_DELAY_US)
+ udelay(GET_PWR_CFG_OFFSET(PwrCfgCmd));
+ else
+ udelay(GET_PWR_CFG_OFFSET(PwrCfgCmd) *
+ 1000);
+ break;
+
+ case PWR_CMD_END:
+ /* When this command is parsed, end
+ the process */
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("HalPwrSeqCmdParsing23a: "
+ "PWR_CMD_END\n"));
+ return true;
+ break;
+
+ default:
+ RT_TRACE(_module_hal_init_c_, _drv_err_,
+ ("HalPwrSeqCmdParsing23a: "
+ "Unknown CMD!!\n"));
+ break;
+ }
+ }
+
+ AryIdx++; /* Add Array Index */
+ } while (1);
+
+ return true;
+}
diff --git a/drivers/staging/rtl8723au/hal/hal_com.c b/drivers/staging/rtl8723au/hal/hal_com.c
new file mode 100644
index 000000000000..0640f3522136
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/hal_com.c
@@ -0,0 +1,881 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <hal_intf.h>
+#include <hal_com.h>
+#include <rtl8723a_hal.h>
+
+#define _HAL_INIT_C_
+
+void dump_chip_info23a(struct hal_version ChipVersion)
+{
+ int cnt = 0;
+ u8 buf[128];
+
+ cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8723A_");
+
+ cnt += sprintf((buf + cnt), "%s_", IS_NORMAL_CHIP(ChipVersion) ?
+ "Normal_Chip" : "Test_Chip");
+ cnt += sprintf((buf + cnt), "%s_",
+ IS_CHIP_VENDOR_TSMC(ChipVersion) ? "TSMC" : "UMC");
+ if (IS_A_CUT(ChipVersion))
+ cnt += sprintf((buf + cnt), "A_CUT_");
+ else if (IS_B_CUT(ChipVersion))
+ cnt += sprintf((buf + cnt), "B_CUT_");
+ else if (IS_C_CUT(ChipVersion))
+ cnt += sprintf((buf + cnt), "C_CUT_");
+ else if (IS_D_CUT(ChipVersion))
+ cnt += sprintf((buf + cnt), "D_CUT_");
+ else if (IS_E_CUT(ChipVersion))
+ cnt += sprintf((buf + cnt), "E_CUT_");
+ else
+ cnt += sprintf((buf + cnt), "UNKNOWN_CUT(%d)_",
+ ChipVersion.CUTVersion);
+
+ if (IS_1T1R(ChipVersion))
+ cnt += sprintf((buf + cnt), "1T1R_");
+ else if (IS_1T2R(ChipVersion))
+ cnt += sprintf((buf + cnt), "1T2R_");
+ else if (IS_2T2R(ChipVersion))
+ cnt += sprintf((buf + cnt), "2T2R_");
+ else
+ cnt += sprintf((buf + cnt), "UNKNOWN_RFTYPE(%d)_",
+ ChipVersion.RFType);
+
+ cnt += sprintf((buf + cnt), "RomVer(%d)\n", ChipVersion.ROMVer);
+
+ DBG_8723A("%s", buf);
+}
+
+#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
+
+/* return the final channel plan decision */
+/* hw_channel_plan: channel plan from HW (efuse/eeprom) */
+/* sw_channel_plan: channel plan from SW (registry/module param) */
+/* def_channel_plan: channel plan used when the former two is invalid */
+u8 hal_com_get_channel_plan23a(struct rtw_adapter *padapter, u8 hw_channel_plan,
+ u8 sw_channel_plan, u8 def_channel_plan,
+ bool AutoLoadFail)
+{
+ u8 swConfig;
+ u8 chnlPlan;
+
+ swConfig = true;
+ if (!AutoLoadFail) {
+ if (!rtw_is_channel_plan_valid(sw_channel_plan))
+ swConfig = false;
+ if (hw_channel_plan & EEPROM_CHANNEL_PLAN_BY_HW_MASK)
+ swConfig = false;
+ }
+
+ if (swConfig == true)
+ chnlPlan = sw_channel_plan;
+ else
+ chnlPlan = hw_channel_plan & (~EEPROM_CHANNEL_PLAN_BY_HW_MASK);
+
+ if (!rtw_is_channel_plan_valid(chnlPlan))
+ chnlPlan = def_channel_plan;
+
+ return chnlPlan;
+}
+
+u8 MRateToHwRate23a(u8 rate)
+{
+ u8 ret = DESC_RATE1M;
+
+ switch (rate) {
+ /* CCK and OFDM non-HT rates */
+ case IEEE80211_CCK_RATE_1MB:
+ ret = DESC_RATE1M;
+ break;
+ case IEEE80211_CCK_RATE_2MB:
+ ret = DESC_RATE2M;
+ break;
+ case IEEE80211_CCK_RATE_5MB:
+ ret = DESC_RATE5_5M;
+ break;
+ case IEEE80211_CCK_RATE_11MB:
+ ret = DESC_RATE11M;
+ break;
+ case IEEE80211_OFDM_RATE_6MB:
+ ret = DESC_RATE6M;
+ break;
+ case IEEE80211_OFDM_RATE_9MB:
+ ret = DESC_RATE9M;
+ break;
+ case IEEE80211_OFDM_RATE_12MB:
+ ret = DESC_RATE12M;
+ break;
+ case IEEE80211_OFDM_RATE_18MB:
+ ret = DESC_RATE18M;
+ break;
+ case IEEE80211_OFDM_RATE_24MB:
+ ret = DESC_RATE24M;
+ break;
+ case IEEE80211_OFDM_RATE_36MB:
+ ret = DESC_RATE36M;
+ break;
+ case IEEE80211_OFDM_RATE_48MB:
+ ret = DESC_RATE48M;
+ break;
+ case IEEE80211_OFDM_RATE_54MB:
+ ret = DESC_RATE54M;
+ break;
+
+ /* HT rates since here */
+ /* case MGN_MCS0: ret = DESC_RATEMCS0; break; */
+ /* case MGN_MCS1: ret = DESC_RATEMCS1; break; */
+ /* case MGN_MCS2: ret = DESC_RATEMCS2; break; */
+ /* case MGN_MCS3: ret = DESC_RATEMCS3; break; */
+ /* case MGN_MCS4: ret = DESC_RATEMCS4; break; */
+ /* case MGN_MCS5: ret = DESC_RATEMCS5; break; */
+ /* case MGN_MCS6: ret = DESC_RATEMCS6; break; */
+ /* case MGN_MCS7: ret = DESC_RATEMCS7; break; */
+
+ default:
+ break;
+ }
+ return ret;
+}
+
+void HalSetBrateCfg23a(struct rtw_adapter *padapter, u8 *mBratesOS)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 i, is_brate, brate;
+ u16 brate_cfg = 0;
+ u8 rate_index;
+
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ is_brate = mBratesOS[i] & IEEE80211_BASIC_RATE_MASK;
+ brate = mBratesOS[i] & 0x7f;
+
+ if (is_brate) {
+ switch (brate) {
+ case IEEE80211_CCK_RATE_1MB:
+ brate_cfg |= RATE_1M;
+ break;
+ case IEEE80211_CCK_RATE_2MB:
+ brate_cfg |= RATE_2M;
+ break;
+ case IEEE80211_CCK_RATE_5MB:
+ brate_cfg |= RATE_5_5M;
+ break;
+ case IEEE80211_CCK_RATE_11MB:
+ brate_cfg |= RATE_11M;
+ break;
+ case IEEE80211_OFDM_RATE_6MB:
+ brate_cfg |= RATE_6M;
+ break;
+ case IEEE80211_OFDM_RATE_9MB:
+ brate_cfg |= RATE_9M;
+ break;
+ case IEEE80211_OFDM_RATE_12MB:
+ brate_cfg |= RATE_12M;
+ break;
+ case IEEE80211_OFDM_RATE_18MB:
+ brate_cfg |= RATE_18M;
+ break;
+ case IEEE80211_OFDM_RATE_24MB:
+ brate_cfg |= RATE_24M;
+ break;
+ case IEEE80211_OFDM_RATE_36MB:
+ brate_cfg |= RATE_36M;
+ break;
+ case IEEE80211_OFDM_RATE_48MB:
+ brate_cfg |= RATE_48M;
+ break;
+ case IEEE80211_OFDM_RATE_54MB:
+ brate_cfg |= RATE_54M;
+ break;
+ }
+ }
+ }
+
+ /* 2007.01.16, by Emily */
+ /* Select RRSR (in Legacy-OFDM and CCK) */
+ /* For 8190, we select only 24M, 12M, 6M, 11M, 5.5M, 2M,
+ and 1M from the Basic rate. */
+ /* We do not use other rates. */
+ /* 2011.03.30 add by Luke Lee */
+ /* CCK 2M ACK should be disabled for some BCM and Atheros AP IOT */
+ /* because CCK 2M has poor TXEVM */
+ /* CCK 5.5M & 11M ACK should be enabled for better
+ performance */
+
+ brate_cfg = (brate_cfg | 0xd) & 0x15d;
+ pHalData->BasicRateSet = brate_cfg;
+ brate_cfg |= 0x01; /* default enable 1M ACK rate */
+ DBG_8723A("HW_VAR_BASIC_RATE: BrateCfg(%#x)\n", brate_cfg);
+
+ /* Set RRSR rate table. */
+ rtw_write8(padapter, REG_RRSR, brate_cfg & 0xff);
+ rtw_write8(padapter, REG_RRSR + 1, (brate_cfg >> 8) & 0xff);
+ rtw_write8(padapter, REG_RRSR + 2,
+ rtw_read8(padapter, REG_RRSR + 2) & 0xf0);
+
+ rate_index = 0;
+ /* Set RTS initial rate */
+ while (brate_cfg > 0x1) {
+ brate_cfg = (brate_cfg >> 1);
+ rate_index++;
+ }
+ /* Ziv - Check */
+ rtw_write8(padapter, REG_INIRTS_RATE_SEL, rate_index);
+
+ return;
+}
+
+static void _OneOutPipeMapping(struct rtw_adapter *pAdapter)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(pAdapter);
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0]; /* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0]; /* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[0]; /* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0]; /* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD */
+}
+
+static void _TwoOutPipeMapping(struct rtw_adapter *pAdapter, bool bWIFICfg)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(pAdapter);
+
+ if (bWIFICfg) { /* WMM */
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 0, 1, 0, 1, 0, 0, 0, 0, 0 }; */
+ /* 0:H, 1:L */
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[1]; /* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0]; /* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1]; /* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0]; /* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD*/
+ } else { /* typical setting */
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 1, 1, 0, 0, 0, 0, 0, 0, 0 }; */
+ /* 0:H, 1:L */
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0]; /* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0]; /* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1]; /* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1]; /* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD*/
+ }
+}
+
+static void _ThreeOutPipeMapping(struct rtw_adapter *pAdapter, bool bWIFICfg)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(pAdapter);
+
+ if (bWIFICfg) { /* for WMM */
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 1, 2, 1, 0, 0, 0, 0, 0, 0 }; */
+ /* 0:H, 1:N, 2:L */
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0]; /* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1]; /* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2]; /* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1]; /* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD*/
+ } else { /* typical setting */
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 2, 2, 1, 0, 0, 0, 0, 0, 0 }; */
+ /* 0:H, 1:N, 2:L */
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0]; /* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1]; /* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2]; /* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[2]; /* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD*/
+ }
+}
+
+bool Hal_MappingOutPipe23a(struct rtw_adapter *pAdapter, u8 NumOutPipe)
+{
+ struct registry_priv *pregistrypriv = &pAdapter->registrypriv;
+ bool bWIFICfg = (pregistrypriv->wifi_spec) ? true : false;
+ bool result = true;
+
+ switch (NumOutPipe) {
+ case 2:
+ _TwoOutPipeMapping(pAdapter, bWIFICfg);
+ break;
+ case 3:
+ _ThreeOutPipeMapping(pAdapter, bWIFICfg);
+ break;
+ case 1:
+ _OneOutPipeMapping(pAdapter);
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+void hal_init_macaddr23a(struct rtw_adapter *adapter)
+{
+ rtw_hal_set_hwreg23a(adapter, HW_VAR_MAC_ADDR,
+ adapter->eeprompriv.mac_addr);
+}
+
+/*
+* C2H event format:
+* Field TRIGGER CONTENT CMD_SEQ CMD_LEN CMD_ID
+* BITS [127:120] [119:16] [15:8] [7:4] [3:0]
+*/
+
+void c2h_evt_clear23a(struct rtw_adapter *adapter)
+{
+ rtw_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
+}
+
+s32 c2h_evt_read23a(struct rtw_adapter *adapter, u8 *buf)
+{
+ s32 ret = _FAIL;
+ struct c2h_evt_hdr *c2h_evt;
+ int i;
+ u8 trigger;
+
+ if (buf == NULL)
+ goto exit;
+
+ trigger = rtw_read8(adapter, REG_C2HEVT_CLEAR);
+
+ if (trigger == C2H_EVT_HOST_CLOSE)
+ goto exit; /* Not ready */
+ else if (trigger != C2H_EVT_FW_CLOSE)
+ goto clear_evt; /* Not a valid value */
+
+ c2h_evt = (struct c2h_evt_hdr *)buf;
+
+ memset(c2h_evt, 0, 16);
+
+ *buf = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL);
+ *(buf + 1) = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_info_, "c2h_evt_read23a(): ",
+ &c2h_evt, sizeof(c2h_evt));
+
+ if (0) {
+ DBG_8723A("%s id:%u, len:%u, seq:%u, trigger:0x%02x\n",
+ __func__, c2h_evt->id, c2h_evt->plen, c2h_evt->seq,
+ trigger);
+ }
+
+ /* Read the content */
+ for (i = 0; i < c2h_evt->plen; i++)
+ c2h_evt->payload[i] = rtw_read8(adapter,
+ REG_C2HEVT_MSG_NORMAL +
+ sizeof(*c2h_evt) + i);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_info_,
+ "c2h_evt_read23a(): Command Content:\n", c2h_evt->payload,
+ c2h_evt->plen);
+
+ ret = _SUCCESS;
+
+clear_evt:
+ /*
+ * Clear event to notify FW we have read the command.
+ * If this field isn't clear, the FW won't update the
+ * next command message.
+ */
+ c2h_evt_clear23a(adapter);
+exit:
+ return ret;
+}
+
+void
+rtl8723a_set_ampdu_min_space(struct rtw_adapter *padapter, u8 MinSpacingToSet)
+{
+ u8 SecMinSpace;
+
+ if (MinSpacingToSet <= 7) {
+ switch (padapter->securitypriv.dot11PrivacyAlgrthm) {
+ case _NO_PRIVACY_:
+ case _AES_:
+ SecMinSpace = 0;
+ break;
+
+ case _WEP40_:
+ case _WEP104_:
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ SecMinSpace = 6;
+ break;
+ default:
+ SecMinSpace = 7;
+ break;
+ }
+
+ if (MinSpacingToSet < SecMinSpace)
+ MinSpacingToSet = SecMinSpace;
+
+ /* RT_TRACE(COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ padapter->MgntInfo.MinSpaceCfg)); */
+ MinSpacingToSet |=
+ rtw_read8(padapter, REG_AMPDU_MIN_SPACE) & 0xf8;
+ rtw_write8(padapter, REG_AMPDU_MIN_SPACE,
+ MinSpacingToSet);
+ }
+}
+
+void rtl8723a_set_ampdu_factor(struct rtw_adapter *padapter, u8 FactorToSet)
+{
+ u8 RegToSet_Normal[4] = { 0x41, 0xa8, 0x72, 0xb9 };
+ u8 MaxAggNum;
+ u8 *pRegToSet;
+ u8 index = 0;
+
+ pRegToSet = RegToSet_Normal; /* 0xb972a841; */
+#ifdef CONFIG_8723AU_BT_COEXIST
+ if ((BT_IsBtDisabled(padapter) == false) &&
+ (BT_1Ant(padapter) == true)) {
+ MaxAggNum = 0x8;
+ } else
+#endif /* CONFIG_8723AU_BT_COEXIST */
+ {
+ MaxAggNum = 0xF;
+ }
+
+ if (FactorToSet <= 3) {
+ FactorToSet = (1 << (FactorToSet + 2));
+ if (FactorToSet > MaxAggNum)
+ FactorToSet = MaxAggNum;
+
+ for (index = 0; index < 4; index++) {
+ if ((pRegToSet[index] & 0xf0) > (FactorToSet << 4))
+ pRegToSet[index] = (pRegToSet[index] & 0x0f) |
+ (FactorToSet << 4);
+
+ if ((pRegToSet[index] & 0x0f) > FactorToSet)
+ pRegToSet[index] = (pRegToSet[index] & 0xf0) |
+ FactorToSet;
+
+ rtw_write8(padapter, REG_AGGLEN_LMT + index,
+ pRegToSet[index]);
+ }
+
+ /* RT_TRACE(COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_AMPDU_FACTOR: %#x\n", FactorToSet)); */
+ }
+}
+
+void rtl8723a_set_acm_ctrl(struct rtw_adapter *padapter, u8 ctrl)
+{
+ u8 hwctrl = 0;
+
+ if (ctrl != 0) {
+ hwctrl |= AcmHw_HwEn;
+
+ if (ctrl & BIT(1)) /* BE */
+ hwctrl |= AcmHw_BeqEn;
+
+ if (ctrl & BIT(2)) /* VI */
+ hwctrl |= AcmHw_ViqEn;
+
+ if (ctrl & BIT(3)) /* VO */
+ hwctrl |= AcmHw_VoqEn;
+ }
+
+ DBG_8723A("[HW_VAR_ACM_CTRL] Write 0x%02X\n", hwctrl);
+ rtw_write8(padapter, REG_ACMHWCTRL, hwctrl);
+}
+
+void rtl8723a_set_media_status(struct rtw_adapter *padapter, u8 status)
+{
+ u8 val8;
+
+ val8 = rtw_read8(padapter, MSR) & 0x0c;
+ val8 |= status;
+ rtw_write8(padapter, MSR, val8);
+}
+
+void rtl8723a_set_media_status1(struct rtw_adapter *padapter, u8 status)
+{
+ u8 val8;
+
+ val8 = rtw_read8(padapter, MSR) & 0x03;
+ val8 |= status << 2;
+ rtw_write8(padapter, MSR, val8);
+}
+
+void rtl8723a_set_bcn_func(struct rtw_adapter *padapter, u8 val)
+{
+ if (val)
+ SetBcnCtrlReg23a(padapter, EN_BCN_FUNCTION | EN_TXBCN_RPT, 0);
+ else
+ SetBcnCtrlReg23a(padapter, 0, EN_BCN_FUNCTION | EN_TXBCN_RPT);
+}
+
+void rtl8723a_check_bssid(struct rtw_adapter *padapter, u8 val)
+{
+ u32 val32;
+ val32 = rtw_read32(padapter, REG_RCR);
+ if (val)
+ val32 |= RCR_CBSSID_DATA | RCR_CBSSID_BCN;
+ else
+ val32 &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ rtw_write32(padapter, REG_RCR, val32);
+}
+
+void rtl8723a_mlme_sitesurvey(struct rtw_adapter *padapter, u8 flag)
+{
+ if (flag) { /* under sitesurvey */
+ u32 v32;
+
+ /* config RCR to receive different BSSID & not
+ to receive data frame */
+ v32 = rtw_read32(padapter, REG_RCR);
+ v32 &= ~(RCR_CBSSID_BCN);
+ rtw_write32(padapter, REG_RCR, v32);
+ /* reject all data frame */
+ rtw_write16(padapter, REG_RXFLTMAP2, 0);
+
+ /* disable update TSF */
+ SetBcnCtrlReg23a(padapter, DIS_TSF_UDT, 0);
+ } else { /* sitesurvey done */
+
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo;
+ u32 v32;
+
+ pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if ((is_client_associated_to_ap23a(padapter) == true) ||
+ ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) ||
+ ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) {
+ /* enable to rx data frame */
+ rtw_write16(padapter, REG_RXFLTMAP2, 0xFFFF);
+
+ /* enable update TSF */
+ SetBcnCtrlReg23a(padapter, 0, DIS_TSF_UDT);
+ }
+
+ v32 = rtw_read32(padapter, REG_RCR);
+ v32 |= RCR_CBSSID_BCN;
+ rtw_write32(padapter, REG_RCR, v32);
+ }
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ BT_WifiScanNotify(padapter, flag ? true : false);
+#endif
+}
+
+void rtl8723a_on_rcr_am(struct rtw_adapter *padapter)
+{
+ rtw_write32(padapter, REG_RCR, rtw_read32(padapter, REG_RCR) | RCR_AM);
+ DBG_8723A("%s, %d, RCR = %x \n", __FUNCTION__, __LINE__,
+ rtw_read32(padapter, REG_RCR));
+}
+
+void rtl8723a_off_rcr_am(struct rtw_adapter *padapter)
+{
+ rtw_write32(padapter, REG_RCR,
+ rtw_read32(padapter, REG_RCR) & (~RCR_AM));
+ DBG_8723A("%s, %d, RCR = %x \n", __FUNCTION__, __LINE__,
+ rtw_read32(padapter, REG_RCR));
+}
+
+void rtl8723a_set_slot_time(struct rtw_adapter *padapter, u8 slottime)
+{
+ u8 u1bAIFS, aSifsTime;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ rtw_write8(padapter, REG_SLOT, slottime);
+
+ if (pmlmeinfo->WMM_enable == 0) {
+ if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
+ aSifsTime = 10;
+ else
+ aSifsTime = 16;
+
+ u1bAIFS = aSifsTime + (2 * pmlmeinfo->slotTime);
+
+ /* <Roger_EXP> Temporary removed, 2008.06.20. */
+ rtw_write8(padapter, REG_EDCA_VO_PARAM, u1bAIFS);
+ rtw_write8(padapter, REG_EDCA_VI_PARAM, u1bAIFS);
+ rtw_write8(padapter, REG_EDCA_BE_PARAM, u1bAIFS);
+ rtw_write8(padapter, REG_EDCA_BK_PARAM, u1bAIFS);
+ }
+}
+
+void rtl8723a_ack_preamble(struct rtw_adapter *padapter, u8 bShortPreamble)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 regTmp;
+
+ /* Joseph marked out for Netgear 3500 TKIP
+ channel 7 issue.(Temporarily) */
+ regTmp = (pHalData->nCur40MhzPrimeSC) << 5;
+ /* regTmp = 0; */
+ if (bShortPreamble)
+ regTmp |= 0x80;
+ rtw_write8(padapter, REG_RRSR + 2, regTmp);
+}
+
+void rtl8723a_set_sec_cfg(struct rtw_adapter *padapter, u8 sec)
+{
+ rtw_write8(padapter, REG_SECCFG, sec);
+}
+
+void rtl8723a_cam_empty_entry(struct rtw_adapter *padapter, u8 ucIndex)
+{
+ u8 i;
+ u32 ulCommand = 0;
+ u32 ulContent = 0;
+ u32 ulEncAlgo = CAM_AES;
+
+ for (i = 0; i < CAM_CONTENT_COUNT; i++) {
+ /* filled id in CAM config 2 byte */
+ if (i == 0) {
+ ulContent |= (ucIndex & 0x03) |
+ ((u16) (ulEncAlgo) << 2);
+ /* ulContent |= CAM_VALID; */
+ } else {
+ ulContent = 0;
+ }
+ /* polling bit, and No Write enable, and address */
+ ulCommand = CAM_CONTENT_COUNT * ucIndex + i;
+ ulCommand = ulCommand | CAM_POLLINIG | CAM_WRITE;
+ /* write content 0 is equall to mark invalid */
+ /* delay_ms(40); */
+ rtw_write32(padapter, WCAMI, ulContent);
+ /* RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("CAM_empty_entry23a(): WRITE A4: %lx \n", ulContent));*/
+ /* delay_ms(40); */
+ rtw_write32(padapter, RWCAM, ulCommand);
+ /* RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("CAM_empty_entry23a(): WRITE A0: %lx \n", ulCommand));*/
+ }
+}
+
+void rtl8723a_cam_invalid_all(struct rtw_adapter *padapter)
+{
+ rtw_write32(padapter, RWCAM, BIT(31) | BIT(30));
+}
+
+void rtl8723a_cam_write(struct rtw_adapter *padapter, u32 val1, u32 val2)
+{
+ u32 cmd;
+
+ rtw_write32(padapter, WCAMI, val1);
+
+ cmd = CAM_POLLINIG | CAM_WRITE | val2;
+ rtw_write32(padapter, RWCAM, cmd);
+}
+
+void rtl8723a_fifo_cleanup(struct rtw_adapter *padapter)
+{
+#define RW_RELEASE_EN BIT(18)
+#define RXDMA_IDLE BIT(17)
+
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ u8 trycnt = 100;
+
+ /* pause tx */
+ rtw_write8(padapter, REG_TXPAUSE, 0xff);
+
+ /* keep sn */
+ padapter->xmitpriv.nqos_ssn = rtw_read16(padapter, REG_NQOS_SEQ);
+
+ if (pwrpriv->bkeepfwalive != true) {
+ u32 v32;
+
+ /* RX DMA stop */
+ v32 = rtw_read32(padapter, REG_RXPKT_NUM);
+ v32 |= RW_RELEASE_EN;
+ rtw_write32(padapter, REG_RXPKT_NUM, v32);
+ do {
+ v32 = rtw_read32(padapter, REG_RXPKT_NUM) & RXDMA_IDLE;
+ if (!v32)
+ break;
+ } while (trycnt--);
+ if (trycnt == 0) {
+ DBG_8723A("Stop RX DMA failed......\n");
+ }
+
+ /* RQPN Load 0 */
+ rtw_write16(padapter, REG_RQPN_NPQ, 0);
+ rtw_write32(padapter, REG_RQPN, 0x80000000);
+ mdelay(10);
+ }
+}
+
+void rtl8723a_set_apfm_on_mac(struct rtw_adapter *padapter, u8 val)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->bMacPwrCtrlOn = val;
+ DBG_8723A("%s: bMacPwrCtrlOn =%d\n", __func__, pHalData->bMacPwrCtrlOn);
+}
+
+void rtl8723a_bcn_valid(struct rtw_adapter *padapter)
+{
+ /* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2,
+ write 1 to clear, Clear by sw */
+ rtw_write8(padapter, REG_TDECTRL + 2,
+ rtw_read8(padapter, REG_TDECTRL + 2) | BIT0);
+}
+
+void rtl8723a_set_tx_pause(struct rtw_adapter *padapter, u8 pause)
+{
+ rtw_write8(padapter, REG_TXPAUSE, pause);
+}
+
+void rtl8723a_set_beacon_interval(struct rtw_adapter *padapter, u16 interval)
+{
+ rtw_write16(padapter, REG_BCN_INTERVAL, interval);
+}
+
+void rtl8723a_set_resp_sifs(struct rtw_adapter *padapter,
+ u8 r2t1, u8 r2t2, u8 t2t1, u8 t2t2)
+{
+ /* SIFS_Timer = 0x0a0a0808; */
+ /* RESP_SIFS for CCK */
+ /* SIFS_T2T_CCK (0x08) */
+ rtw_write8(padapter, REG_R2T_SIFS, r2t1);
+ /* SIFS_R2T_CCK(0x08) */
+ rtw_write8(padapter, REG_R2T_SIFS + 1, r2t2);
+ /* RESP_SIFS for OFDM */
+ /* SIFS_T2T_OFDM (0x0a) */
+ rtw_write8(padapter, REG_T2T_SIFS, t2t1);
+ /* SIFS_R2T_OFDM(0x0a) */
+ rtw_write8(padapter, REG_T2T_SIFS + 1, t2t2);
+}
+
+void rtl8723a_set_ac_param_vo(struct rtw_adapter *padapter, u32 vo)
+{
+ rtw_write32(padapter, REG_EDCA_VO_PARAM, vo);
+}
+
+void rtl8723a_set_ac_param_vi(struct rtw_adapter *padapter, u32 vi)
+{
+ rtw_write32(padapter, REG_EDCA_VI_PARAM, vi);
+}
+
+void rtl8723a_set_ac_param_be(struct rtw_adapter *padapter, u32 be)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->AcParam_BE = be;
+ rtw_write32(padapter, REG_EDCA_BE_PARAM, be);
+}
+
+void rtl8723a_set_ac_param_bk(struct rtw_adapter *padapter, u32 bk)
+{
+ rtw_write32(padapter, REG_EDCA_BK_PARAM, bk);
+}
+
+void rtl8723a_set_rxdma_agg_pg_th(struct rtw_adapter *padapter, u8 val)
+{
+ rtw_write8(padapter, REG_RXDMA_AGG_PG_TH, val);
+}
+
+void rtl8723a_set_nav_upper(struct rtw_adapter *padapter, u32 usNavUpper)
+{
+ if (usNavUpper > HAL_8723A_NAV_UPPER_UNIT * 0xFF) {
+ RT_TRACE(_module_hal_init_c_, _drv_notice_,
+ ("The setting value (0x%08X us) of NAV_UPPER "
+ "is larger than (%d * 0xFF)!!!\n",
+ usNavUpper, HAL_8723A_NAV_UPPER_UNIT));
+ return;
+ }
+
+ /* The value of ((usNavUpper + HAL_8723A_NAV_UPPER_UNIT - 1) /
+ HAL_8723A_NAV_UPPER_UNIT) */
+ /* is getting the upper integer. */
+ usNavUpper = (usNavUpper + HAL_8723A_NAV_UPPER_UNIT - 1) /
+ HAL_8723A_NAV_UPPER_UNIT;
+ rtw_write8(padapter, REG_NAV_UPPER, (u8) usNavUpper);
+}
+
+void rtl8723a_set_initial_gain(struct rtw_adapter *padapter, u32 rx_gain)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct dig_t *pDigTable = &pHalData->odmpriv.DM_DigTable;
+
+ if (rx_gain == 0xff) /* restore rx gain */
+ ODM_Write_DIG23a(&pHalData->odmpriv, pDigTable->BackupIGValue);
+ else {
+ pDigTable->BackupIGValue = pDigTable->CurIGValue;
+ ODM_Write_DIG23a(&pHalData->odmpriv, rx_gain);
+ }
+}
+
+void rtl8723a_odm_support_ability_write(struct rtw_adapter *padapter, u32 val)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->odmpriv.SupportAbility = val;
+}
+
+void rtl8723a_odm_support_ability_backup(struct rtw_adapter *padapter, u8 val)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (val) /* save dm flag */
+ pHalData->odmpriv.BK_SupportAbility =
+ pHalData->odmpriv.SupportAbility;
+ else /* restore dm flag */
+ pHalData->odmpriv.SupportAbility =
+ pHalData->odmpriv.BK_SupportAbility;
+}
+
+void rtl8723a_odm_support_ability_set(struct rtw_adapter *padapter, u32 val)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (val == DYNAMIC_ALL_FUNC_ENABLE) {
+ pHalData->dmpriv.DMFlag = pHalData->dmpriv.InitDMFlag;
+ pHalData->odmpriv.SupportAbility = pHalData->dmpriv.InitODMFlag;
+ } else {
+ pHalData->odmpriv.SupportAbility |= val;
+ }
+}
+
+void rtl8723a_odm_support_ability_clr(struct rtw_adapter *padapter, u32 val)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->odmpriv.SupportAbility &= val;
+}
+
+void rtl8723a_set_rpwm(struct rtw_adapter *padapter, u8 val)
+{
+ rtw_write8(padapter, REG_USB_HRPWM, val);
+}
diff --git a/drivers/staging/rtl8723au/hal/hal_intf.c b/drivers/staging/rtl8723au/hal/hal_intf.c
new file mode 100644
index 000000000000..de3608b4010a
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/hal_intf.c
@@ -0,0 +1,420 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#define _HAL_INTF_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <hal_intf.h>
+
+#include <usb_hal.h>
+
+void rtw_hal_chip_configure23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.intf_chip_configure)
+ padapter->HalFunc.intf_chip_configure(padapter);
+}
+
+void rtw_hal_read_chip_info23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.read_adapter_info)
+ padapter->HalFunc.read_adapter_info(padapter);
+}
+
+void rtw_hal_read_chip_version23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.read_chip_version)
+ padapter->HalFunc.read_chip_version(padapter);
+}
+
+void rtw_hal_def_value_init23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.init_default_value)
+ padapter->HalFunc.init_default_value(padapter);
+}
+void rtw_hal_free_data23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.free_hal_data)
+ padapter->HalFunc.free_hal_data(padapter);
+}
+void rtw_hal_dm_init23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.dm_init)
+ padapter->HalFunc.dm_init(padapter);
+}
+void rtw_hal_dm_deinit23a(struct rtw_adapter *padapter)
+{
+ /* cancel dm timer */
+ if (padapter->HalFunc.dm_deinit)
+ padapter->HalFunc.dm_deinit(padapter);
+}
+void rtw_hal_sw_led_init23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.InitSwLeds)
+ padapter->HalFunc.InitSwLeds(padapter);
+}
+
+void rtw_hal_sw_led_deinit23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.DeInitSwLeds)
+ padapter->HalFunc.DeInitSwLeds(padapter);
+}
+
+u32 rtw_hal_power_on23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.hal_power_on)
+ return padapter->HalFunc.hal_power_on(padapter);
+ return _FAIL;
+}
+
+uint rtw_hal_init23a(struct rtw_adapter *padapter)
+{
+ uint status = _SUCCESS;
+
+ padapter->hw_init_completed = false;
+
+ status = padapter->HalFunc.hal_init(padapter);
+
+ if (status == _SUCCESS) {
+ padapter->hw_init_completed = true;
+
+ if (padapter->registrypriv.notch_filter == 1)
+ rtw_hal_notch_filter23a(padapter, 1);
+
+ rtw_hal_reset_security_engine23a(padapter);
+ } else {
+ padapter->hw_init_completed = false;
+ DBG_8723A("rtw_hal_init23a: hal__init fail\n");
+ }
+
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("-rtl871x_hal_init:status = 0x%x\n", status));
+
+ return status;
+}
+
+uint rtw_hal_deinit23a(struct rtw_adapter *padapter)
+{
+ uint status = _SUCCESS;
+
+ status = padapter->HalFunc.hal_deinit(padapter);
+
+ if (status == _SUCCESS)
+ padapter->hw_init_completed = false;
+ else
+ DBG_8723A("\n rtw_hal_deinit23a: hal_init fail\n");
+ return status;
+}
+
+void rtw_hal_set_hwreg23a(struct rtw_adapter *padapter, u8 variable, u8 *val)
+{
+ if (padapter->HalFunc.SetHwRegHandler)
+ padapter->HalFunc.SetHwRegHandler(padapter, variable, val);
+}
+
+void rtw23a_hal_get_hwreg(struct rtw_adapter *padapter, u8 variable, u8 *val)
+{
+ if (padapter->HalFunc.GetHwRegHandler)
+ padapter->HalFunc.GetHwRegHandler(padapter, variable, val);
+}
+
+u8 rtw_hal_set_def_var23a(struct rtw_adapter *padapter, enum hal_def_variable eVariable, void *pValue)
+{
+ if (padapter->HalFunc.SetHalDefVarHandler)
+ return padapter->HalFunc.SetHalDefVarHandler(padapter, eVariable, pValue);
+ return _FAIL;
+}
+u8 rtw_hal_get_def_var23a(struct rtw_adapter *padapter, enum hal_def_variable eVariable, void *pValue)
+{
+ if (padapter->HalFunc.GetHalDefVarHandler)
+ return padapter->HalFunc.GetHalDefVarHandler(padapter, eVariable, pValue);
+ return _FAIL;
+}
+
+void rtw_hal_set_odm_var23a(struct rtw_adapter *padapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
+{
+ if (padapter->HalFunc.SetHalODMVarHandler)
+ padapter->HalFunc.SetHalODMVarHandler(padapter, eVariable, pValue1, bSet);
+}
+void rtw_hal_get_odm_var23a(struct rtw_adapter *padapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
+{
+ if (padapter->HalFunc.GetHalODMVarHandler)
+ padapter->HalFunc.GetHalODMVarHandler(padapter, eVariable, pValue1, bSet);
+}
+
+void rtw_hal_enable_interrupt23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.enable_interrupt)
+ padapter->HalFunc.enable_interrupt(padapter);
+ else
+ DBG_8723A("%s: HalFunc.enable_interrupt is NULL!\n", __FUNCTION__);
+
+}
+void rtw_hal_disable_interrupt23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.disable_interrupt)
+ padapter->HalFunc.disable_interrupt(padapter);
+ else
+ DBG_8723A("%s: HalFunc.disable_interrupt is NULL!\n", __FUNCTION__);
+
+}
+
+u32 rtw_hal_inirp_init23a(struct rtw_adapter *padapter)
+{
+ u32 rst = _FAIL;
+ if (padapter->HalFunc.inirp_init)
+ rst = padapter->HalFunc.inirp_init(padapter);
+ else
+ DBG_8723A(" %s HalFunc.inirp_init is NULL!!!\n", __FUNCTION__);
+ return rst;
+}
+
+u32 rtw_hal_inirp_deinit23a(struct rtw_adapter *padapter)
+{
+
+ if (padapter->HalFunc.inirp_deinit)
+ return padapter->HalFunc.inirp_deinit(padapter);
+
+ return _FAIL;
+
+}
+
+u8 rtw_hal_intf_ps_func23a(struct rtw_adapter *padapter, enum hal_intf_ps_func efunc_id, u8 *val)
+{
+ if (padapter->HalFunc.interface_ps_func)
+ return padapter->HalFunc.interface_ps_func(padapter, efunc_id, val);
+ return _FAIL;
+}
+
+s32 rtw_hal_xmit23aframe_enqueue(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ if (padapter->HalFunc.hal_xmitframe_enqueue)
+ return padapter->HalFunc.hal_xmitframe_enqueue(padapter, pxmitframe);
+
+ return false;
+}
+
+s32 rtw_hal_xmit23a(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ if (padapter->HalFunc.hal_xmit)
+ return padapter->HalFunc.hal_xmit(padapter, pxmitframe);
+
+ return false;
+}
+
+s32 rtw_hal_mgnt_xmit23a(struct rtw_adapter *padapter, struct xmit_frame *pmgntframe)
+{
+ s32 ret = _FAIL;
+ if (padapter->HalFunc.mgnt_xmit)
+ ret = padapter->HalFunc.mgnt_xmit(padapter, pmgntframe);
+ return ret;
+}
+
+s32 rtw_hal_init23a_xmit_priv(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.init_xmit_priv != NULL)
+ return padapter->HalFunc.init_xmit_priv(padapter);
+ return _FAIL;
+}
+void rtw_hal_free_xmit_priv23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.free_xmit_priv != NULL)
+ padapter->HalFunc.free_xmit_priv(padapter);
+}
+
+s32 rtw_hal_init23a_recv_priv(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.init_recv_priv)
+ return padapter->HalFunc.init_recv_priv(padapter);
+
+ return _FAIL;
+}
+void rtw_hal_free_recv_priv23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.free_recv_priv)
+ padapter->HalFunc.free_recv_priv(padapter);
+}
+
+void rtw_hal_update_ra_mask23a(struct sta_info *psta, u8 rssi_level)
+{
+ struct rtw_adapter *padapter;
+ struct mlme_priv *pmlmepriv;
+
+ if (!psta)
+ return;
+
+ padapter = psta->padapter;
+
+ pmlmepriv = &padapter->mlmepriv;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+#ifdef CONFIG_8723AU_AP_MODE
+ add_RATid23a(padapter, psta, rssi_level);
+#endif
+ } else {
+ if (padapter->HalFunc.UpdateRAMaskHandler)
+ padapter->HalFunc.UpdateRAMaskHandler(padapter, psta->mac_id, rssi_level);
+ }
+}
+
+void rtw_hal_add_ra_tid23a(struct rtw_adapter *padapter, u32 bitmap, u8 arg, u8 rssi_level)
+{
+ if (padapter->HalFunc.Add_RateATid)
+ padapter->HalFunc.Add_RateATid(padapter, bitmap, arg, rssi_level);
+}
+
+/* Start specifical interface thread */
+void rtw_hal_start_thread23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.run_thread)
+ padapter->HalFunc.run_thread(padapter);
+}
+/* Start specifical interface thread */
+void rtw_hal_stop_thread23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.cancel_thread)
+ padapter->HalFunc.cancel_thread(padapter);
+}
+
+u32 rtw_hal_read_bbreg23a(struct rtw_adapter *padapter, u32 RegAddr, u32 BitMask)
+{
+ u32 data = 0;
+ if (padapter->HalFunc.read_bbreg)
+ data = padapter->HalFunc.read_bbreg(padapter, RegAddr, BitMask);
+ return data;
+}
+void rtw_hal_write_bbreg23a(struct rtw_adapter *padapter, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ if (padapter->HalFunc.write_bbreg)
+ padapter->HalFunc.write_bbreg(padapter, RegAddr, BitMask, Data);
+}
+
+u32 rtw_hal_read_rfreg23a(struct rtw_adapter *padapter, u32 eRFPath, u32 RegAddr, u32 BitMask)
+{
+ u32 data = 0;
+ if (padapter->HalFunc.read_rfreg)
+ data = padapter->HalFunc.read_rfreg(padapter, eRFPath, RegAddr, BitMask);
+ return data;
+}
+void rtw_hal_write_rfreg23a(struct rtw_adapter *padapter, u32 eRFPath, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ if (padapter->HalFunc.write_rfreg)
+ padapter->HalFunc.write_rfreg(padapter, eRFPath, RegAddr, BitMask, Data);
+}
+
+s32 rtw_hal_interrupt_handler23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.interrupt_handler)
+ return padapter->HalFunc.interrupt_handler(padapter);
+ return _FAIL;
+}
+
+void rtw_hal_set_bwmode23a(struct rtw_adapter *padapter,
+ enum ht_channel_width Bandwidth, u8 offset)
+{
+ if (padapter->HalFunc.set_bwmode_handler)
+ padapter->HalFunc.set_bwmode_handler(padapter, Bandwidth,
+ offset);
+}
+
+void rtw_hal_set_chan23a(struct rtw_adapter *padapter, u8 channel)
+{
+ if (padapter->HalFunc.set_channel_handler)
+ padapter->HalFunc.set_channel_handler(padapter, channel);
+}
+
+void rtw_hal_dm_watchdog23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.hal_dm_watchdog)
+ padapter->HalFunc.hal_dm_watchdog(padapter);
+}
+
+void rtw_hal_bcn_related_reg_setting23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.SetBeaconRelatedRegistersHandler)
+ padapter->HalFunc.SetBeaconRelatedRegistersHandler(padapter);
+}
+
+void rtw_hal_sreset_init23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.sreset_init_value23a)
+ padapter->HalFunc.sreset_init_value23a(padapter);
+}
+void rtw_hal_sreset_reset23a(struct rtw_adapter *padapter)
+{
+ padapter = GET_PRIMARY_ADAPTER(padapter);
+
+ if (padapter->HalFunc.silentreset)
+ padapter->HalFunc.silentreset(padapter);
+}
+
+void rtw_hal_sreset_reset23a_value23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.sreset_reset_value23a)
+ padapter->HalFunc.sreset_reset_value23a(padapter);
+}
+
+void rtw_hal_sreset_xmit_status_check23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.sreset_xmit_status_check)
+ padapter->HalFunc.sreset_xmit_status_check(padapter);
+}
+void rtw_hal_sreset_linked_status_check23a(struct rtw_adapter *padapter)
+{
+ if (padapter->HalFunc.sreset_linked_status_check)
+ padapter->HalFunc.sreset_linked_status_check(padapter);
+}
+u8 rtw_hal_sreset_get_wifi_status23a(struct rtw_adapter *padapter)
+{
+ u8 status = 0;
+ if (padapter->HalFunc.sreset_get_wifi_status23a)
+ status = padapter->HalFunc.sreset_get_wifi_status23a(padapter);
+ return status;
+}
+
+bool rtw_hal_sreset_inprogress(struct rtw_adapter *padapter)
+{
+ bool inprogress = false;
+
+ padapter = GET_PRIMARY_ADAPTER(padapter);
+
+ if (padapter->HalFunc.sreset_inprogress)
+ inprogress = padapter->HalFunc.sreset_inprogress(padapter);
+ return inprogress;
+}
+
+void rtw_hal_notch_filter23a(struct rtw_adapter *adapter, bool enable)
+{
+ if (adapter->HalFunc.hal_notch_filter)
+ adapter->HalFunc.hal_notch_filter(adapter, enable);
+}
+
+void rtw_hal_reset_security_engine23a(struct rtw_adapter *adapter)
+{
+ if (adapter->HalFunc.hal_reset_security_engine)
+ adapter->HalFunc.hal_reset_security_engine(adapter);
+}
+
+s32 rtw_hal_c2h_handler23a(struct rtw_adapter *adapter, struct c2h_evt_hdr *c2h_evt)
+{
+ s32 ret = _FAIL;
+ if (adapter->HalFunc.c2h_handler)
+ ret = adapter->HalFunc.c2h_handler(adapter, c2h_evt);
+ return ret;
+}
+
+c2h_id_filter rtw_hal_c2h_id_filter_ccx23a(struct rtw_adapter *adapter)
+{
+ return adapter->HalFunc.c2h_id_filter_ccx;
+}
diff --git a/drivers/staging/rtl8723au/hal/odm.c b/drivers/staging/rtl8723au/hal/odm.c
new file mode 100644
index 000000000000..584a74ed2943
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/odm.c
@@ -0,0 +1,2090 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+static const u16 dB_Invert_Table[8][12] = {
+ {1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4},
+ {4, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16},
+ {18, 20, 22, 25, 28, 32, 35, 40, 45, 50, 56, 63},
+ {71, 79, 89, 100, 112, 126, 141, 158, 178, 200, 224, 251},
+ {282, 316, 355, 398, 447, 501, 562, 631, 708, 794, 891, 1000},
+ {1122, 1259, 1413, 1585, 1778, 1995, 2239, 2512, 2818, 3162, 3548, 3981},
+ {4467, 5012, 5623, 6310, 7079, 7943, 8913, 10000, 11220, 12589, 14125, 15849},
+ {17783, 19953, 22387, 25119, 28184, 31623, 35481, 39811, 44668, 50119, 56234, 65535}
+};
+
+static u32 EDCAParam[HT_IOT_PEER_MAX][3] = { /* UL DL */
+ {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 0:unknown AP */
+ {0xa44f, 0x5ea44f, 0x5e431c}, /* 1:realtek AP */
+ {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 2:unknown AP => realtek_92SE */
+ {0x5ea32b, 0x5ea42b, 0x5e4322}, /* 3:broadcom AP */
+ {0x5ea422, 0x00a44f, 0x00a44f}, /* 4:ralink AP */
+ {0x5ea322, 0x00a630, 0x00a44f}, /* 5:atheros AP */
+ {0x5e4322, 0x5e4322, 0x5e4322},/* 6:cisco AP */
+ {0x5ea44f, 0x00a44f, 0x5ea42b}, /* 8:marvell AP */
+ {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 10:unknown AP => 92U AP */
+ {0x5ea42b, 0xa630, 0x5e431c}, /* 11:airgocap AP */
+};
+
+/* EDCA Paramter for AP/ADSL by Mingzhi 2011-11-22 */
+
+/* Global var */
+u32 OFDMSwingTable23A[OFDM_TABLE_SIZE_92D] = {
+ 0x7f8001fe, /* 0, +6.0dB */
+ 0x788001e2, /* 1, +5.5dB */
+ 0x71c001c7, /* 2, +5.0dB */
+ 0x6b8001ae, /* 3, +4.5dB */
+ 0x65400195, /* 4, +4.0dB */
+ 0x5fc0017f, /* 5, +3.5dB */
+ 0x5a400169, /* 6, +3.0dB */
+ 0x55400155, /* 7, +2.5dB */
+ 0x50800142, /* 8, +2.0dB */
+ 0x4c000130, /* 9, +1.5dB */
+ 0x47c0011f, /* 10, +1.0dB */
+ 0x43c0010f, /* 11, +0.5dB */
+ 0x40000100, /* 12, +0dB */
+ 0x3c8000f2, /* 13, -0.5dB */
+ 0x390000e4, /* 14, -1.0dB */
+ 0x35c000d7, /* 15, -1.5dB */
+ 0x32c000cb, /* 16, -2.0dB */
+ 0x300000c0, /* 17, -2.5dB */
+ 0x2d4000b5, /* 18, -3.0dB */
+ 0x2ac000ab, /* 19, -3.5dB */
+ 0x288000a2, /* 20, -4.0dB */
+ 0x26000098, /* 21, -4.5dB */
+ 0x24000090, /* 22, -5.0dB */
+ 0x22000088, /* 23, -5.5dB */
+ 0x20000080, /* 24, -6.0dB */
+ 0x1e400079, /* 25, -6.5dB */
+ 0x1c800072, /* 26, -7.0dB */
+ 0x1b00006c, /* 27. -7.5dB */
+ 0x19800066, /* 28, -8.0dB */
+ 0x18000060, /* 29, -8.5dB */
+ 0x16c0005b, /* 30, -9.0dB */
+ 0x15800056, /* 31, -9.5dB */
+ 0x14400051, /* 32, -10.0dB */
+ 0x1300004c, /* 33, -10.5dB */
+ 0x12000048, /* 34, -11.0dB */
+ 0x11000044, /* 35, -11.5dB */
+ 0x10000040, /* 36, -12.0dB */
+ 0x0f00003c,/* 37, -12.5dB */
+ 0x0e400039,/* 38, -13.0dB */
+ 0x0d800036,/* 39, -13.5dB */
+ 0x0cc00033,/* 40, -14.0dB */
+ 0x0c000030,/* 41, -14.5dB */
+ 0x0b40002d,/* 42, -15.0dB */
+};
+
+u8 CCKSwingTable_Ch1_Ch1323A[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
+};
+
+u8 CCKSwingTable_Ch1423A[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
+};
+
+/* Local Function predefine. */
+
+/* START------------COMMON INFO RELATED--------------- */
+void odm_CommonInfoSelfInit23a(struct dm_odm_t *pDM_Odm);
+
+void odm_CommonInfoSelfUpdate23a(struct dm_odm_t *pDM_Odm);
+
+void odm_CmnInfoInit_Debug23a(struct dm_odm_t *pDM_Odm);
+
+void odm_CmnInfoHook_Debug23a(struct dm_odm_t *pDM_Odm);
+
+void odm_CmnInfoUpdate_Debug23a(struct dm_odm_t *pDM_Odm);
+
+/* START---------------DIG--------------------------- */
+void odm_FalseAlarmCounterStatistics23a(struct dm_odm_t *pDM_Odm);
+
+void odm_DIG23aInit(struct dm_odm_t *pDM_Odm);
+
+void odm_DIG23a(struct dm_odm_t *pDM_Odm);
+
+void odm_CCKPacketDetectionThresh23a(struct dm_odm_t *pDM_Odm);
+/* END---------------DIG--------------------------- */
+
+/* START-------BB POWER SAVE----------------------- */
+void odm23a_DynBBPSInit(struct dm_odm_t *pDM_Odm);
+
+void odm_DynamicBBPowerSaving23a(struct dm_odm_t *pDM_Odm);
+
+void odm_1R_CCA23a(struct dm_odm_t *pDM_Odm);
+/* END---------BB POWER SAVE----------------------- */
+
+void odm_RefreshRateAdaptiveMask23aMP23a(struct dm_odm_t *pDM_Odm);
+
+void odm_RefreshRateAdaptiveMask23aCE23a(struct dm_odm_t *pDM_Odm);
+
+void odm_RefreshRateAdaptiveMask23aAPADSL23a(struct dm_odm_t *pDM_Odm);
+
+void odm_DynamicTxPower23aInit(struct dm_odm_t *pDM_Odm);
+
+void odm_DynamicTxPower23aRestorePowerIndex(struct dm_odm_t *pDM_Odm);
+
+void odm_DynamicTxPower23aSavePowerIndex(struct dm_odm_t *pDM_Odm);
+
+void odm_DynamicTxPower23aWritePowerIndex(struct dm_odm_t *pDM_Odm,
+ u8 Value);
+
+void odm_DynamicTxPower23a_92C(struct dm_odm_t *pDM_Odm);
+
+void odm_DynamicTxPower23a_92D(struct dm_odm_t *pDM_Odm);
+
+void odm_RSSIMonitorInit(struct dm_odm_t *pDM_Odm);
+
+void odm_RSSIMonitorCheck23aMP(struct dm_odm_t *pDM_Odm);
+
+void odm_RSSIMonitorCheck23aCE(struct dm_odm_t *pDM_Odm);
+void odm_RSSIMonitorCheck23aAP(struct dm_odm_t *pDM_Odm);
+
+void odm_RSSIMonitorCheck23a(struct dm_odm_t *pDM_Odm);
+void odm_DynamicTxPower23a(struct dm_odm_t *pDM_Odm);
+
+void odm_SwAntDivInit(struct dm_odm_t *pDM_Odm);
+
+void odm_SwAntDivInit_NIC(struct dm_odm_t *pDM_Odm);
+
+void odm_SwAntDivChkAntSwitch(struct dm_odm_t *pDM_Odm, u8 Step);
+
+void odm_SwAntDivChkAntSwitchNIC(struct dm_odm_t *pDM_Odm,
+ u8 Step
+ );
+
+void odm_SwAntDivChkAntSwitchCallback23a(unsigned long data);
+
+void odm_GlobalAdapterCheck(void);
+
+void odm_RefreshRateAdaptiveMask23a(struct dm_odm_t *pDM_Odm);
+
+void ODM_TXPowerTrackingCheck23a(struct dm_odm_t *pDM_Odm);
+
+void odm_TXPowerTrackingCheckAP(struct dm_odm_t *pDM_Odm);
+
+void odm_RateAdaptiveMaskInit23a(struct dm_odm_t *pDM_Odm);
+
+void odm_TXPowerTrackingThermalMeterInit23a(struct dm_odm_t *pDM_Odm);
+
+void odm_TXPowerTrackingInit23a(struct dm_odm_t *pDM_Odm);
+
+void odm_TXPowerTrackingCheckMP(struct dm_odm_t *pDM_Odm);
+
+void odm_TXPowerTrackingCheckCE23a(struct dm_odm_t *pDM_Odm);
+
+void odm_EdcaTurboCheck23a(struct dm_odm_t *pDM_Odm);
+void ODM_EdcaTurboInit23a(struct dm_odm_t *pDM_Odm);
+
+void odm_EdcaTurboCheck23aCE23a(struct dm_odm_t *pDM_Odm);
+
+#define RxDefaultAnt1 0x65a9
+#define RxDefaultAnt2 0x569a
+
+void odm_InitHybridAntDiv23a(struct dm_odm_t *pDM_Odm);
+
+bool odm_StaDefAntSel(struct dm_odm_t *pDM_Odm,
+ u32 OFDM_Ant1_Cnt,
+ u32 OFDM_Ant2_Cnt,
+ u32 CCK_Ant1_Cnt,
+ u32 CCK_Ant2_Cnt,
+ u8 *pDefAnt
+ );
+
+void odm_SetRxIdleAnt(struct dm_odm_t *pDM_Odm,
+ u8 Ant,
+ bool bDualPath
+);
+
+void odm_HwAntDiv23a(struct dm_odm_t *pDM_Odm);
+
+/* 3 Export Interface */
+
+/* 2011/09/21 MH Add to describe different team necessary resource allocate?? */
+void ODM23a_DMInit(struct dm_odm_t *pDM_Odm)
+{
+ /* For all IC series */
+ odm_CommonInfoSelfInit23a(pDM_Odm);
+ odm_CmnInfoInit_Debug23a(pDM_Odm);
+ odm_DIG23aInit(pDM_Odm);
+ odm_RateAdaptiveMaskInit23a(pDM_Odm);
+
+ if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
+ odm23a_DynBBPSInit(pDM_Odm);
+ odm_DynamicTxPower23aInit(pDM_Odm);
+ odm_TXPowerTrackingInit23a(pDM_Odm);
+ ODM_EdcaTurboInit23a(pDM_Odm);
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_InitHybridAntDiv23a(pDM_Odm);
+ else if (pDM_Odm->AntDivType == CGCS_RX_SW_ANTDIV)
+ odm_SwAntDivInit(pDM_Odm);
+ }
+}
+
+/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
+/* You can not add any dummy function here, be care, you can only use DM structure */
+/* to perform any new ODM_DM. */
+void ODM_DMWatchdog23a(struct dm_odm_t *pDM_Odm)
+{
+ /* 2012.05.03 Luke: For all IC series */
+ odm_GlobalAdapterCheck();
+ odm_CmnInfoHook_Debug23a(pDM_Odm);
+ odm_CmnInfoUpdate_Debug23a(pDM_Odm);
+ odm_CommonInfoSelfUpdate23a(pDM_Odm);
+ odm_FalseAlarmCounterStatistics23a(pDM_Odm);
+ odm_RSSIMonitorCheck23a(pDM_Odm);
+
+ /* 8723A or 8189ES platform */
+ /* NeilChen--2012--08--24-- */
+ /* Fix Leave LPS issue */
+ if ((pDM_Odm->Adapter->pwrctrlpriv.pwr_mode != PS_MODE_ACTIVE) &&/* in LPS mode */
+ (pDM_Odm->SupportICType & (ODM_RTL8723A))) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("----Step1: odm_DIG23a is in LPS mode\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("---Step2: 8723AS is in LPS mode\n"));
+ odm_DIG23abyRSSI_LPS(pDM_Odm);
+ } else {
+ odm_DIG23a(pDM_Odm);
+ }
+
+ odm_CCKPacketDetectionThresh23a(pDM_Odm);
+
+ if (*(pDM_Odm->pbPowerSaving))
+ return;
+
+ odm_RefreshRateAdaptiveMask23a(pDM_Odm);
+
+ odm_DynamicBBPowerSaving23a(pDM_Odm);
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_HwAntDiv23a(pDM_Odm);
+ else if (pDM_Odm->AntDivType == CGCS_RX_SW_ANTDIV)
+ odm_SwAntDivChkAntSwitch(pDM_Odm, SWAW_STEP_PEAK);
+
+ if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
+ ODM_TXPowerTrackingCheck23a(pDM_Odm);
+ odm_EdcaTurboCheck23a(pDM_Odm);
+ odm_DynamicTxPower23a(pDM_Odm);
+ }
+
+ odm_dtc(pDM_Odm);
+}
+
+/* */
+/* Init /.. Fixed HW value. Only init time. */
+/* */
+void ODM_CmnInfoInit23a(struct dm_odm_t *pDM_Odm,
+ enum odm_cmninfo CmnInfo,
+ u32 Value
+ )
+{
+ /* ODM_RT_TRACE(pDM_Odm,); */
+
+ /* */
+ /* This section is used for init value */
+ /* */
+ switch (CmnInfo) {
+ /* Fixed ODM value. */
+ case ODM_CMNINFO_ABILITY:
+ pDM_Odm->SupportAbility = (u32)Value;
+ break;
+ case ODM_CMNINFO_PLATFORM:
+ break;
+ case ODM_CMNINFO_INTERFACE:
+ pDM_Odm->SupportInterface = (u8)Value;
+ break;
+ case ODM_CMNINFO_MP_TEST_CHIP:
+ pDM_Odm->bIsMPChip = (u8)Value;
+ break;
+ case ODM_CMNINFO_IC_TYPE:
+ pDM_Odm->SupportICType = Value;
+ break;
+ case ODM_CMNINFO_CUT_VER:
+ pDM_Odm->CutVersion = (u8)Value;
+ break;
+ case ODM_CMNINFO_FAB_VER:
+ pDM_Odm->FabVersion = (u8)Value;
+ break;
+ case ODM_CMNINFO_RF_TYPE:
+ pDM_Odm->RFType = (u8)Value;
+ break;
+ case ODM_CMNINFO_RF_ANTENNA_TYPE:
+ pDM_Odm->AntDivType = (u8)Value;
+ break;
+ case ODM_CMNINFO_BOARD_TYPE:
+ pDM_Odm->BoardType = (u8)Value;
+ break;
+ case ODM_CMNINFO_EXT_LNA:
+ pDM_Odm->ExtLNA = (u8)Value;
+ break;
+ case ODM_CMNINFO_EXT_PA:
+ pDM_Odm->ExtPA = (u8)Value;
+ break;
+ case ODM_CMNINFO_EXT_TRSW:
+ pDM_Odm->ExtTRSW = (u8)Value;
+ break;
+ case ODM_CMNINFO_PATCH_ID:
+ pDM_Odm->PatchID = (u8)Value;
+ break;
+ case ODM_CMNINFO_BINHCT_TEST:
+ pDM_Odm->bInHctTest = (bool)Value;
+ break;
+ case ODM_CMNINFO_BWIFI_TEST:
+ pDM_Odm->bWIFITest = (bool)Value;
+ break;
+ case ODM_CMNINFO_SMART_CONCURRENT:
+ pDM_Odm->bDualMacSmartConcurrent = (bool)Value;
+ break;
+ /* To remove the compiler warning, must add an empty default statement to handle the other values. */
+ default:
+ /* do nothing */
+ break;
+ }
+
+ /* */
+ /* Tx power tracking BB swing table. */
+ /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ /* */
+ pDM_Odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
+ pDM_Odm->BbSwingIdxOfdmCurrent = 12;
+ pDM_Odm->BbSwingFlagOfdm = false;
+
+}
+
+void ODM23a_CmnInfoHook(struct dm_odm_t *pDM_Odm,
+ enum odm_cmninfo CmnInfo,
+ void *pValue
+ )
+{
+ /* Hook call by reference pointer. */
+ switch (CmnInfo) {
+ /* Dynamic call by reference pointer. */
+ case ODM_CMNINFO_MAC_PHY_MODE:
+ pDM_Odm->pMacPhyMode = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_TX_UNI:
+ pDM_Odm->pNumTxBytesUnicast = (u64 *)pValue;
+ break;
+ case ODM_CMNINFO_RX_UNI:
+ pDM_Odm->pNumRxBytesUnicast = (u64 *)pValue;
+ break;
+ case ODM_CMNINFO_WM_MODE:
+ pDM_Odm->pWirelessMode = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_BAND:
+ pDM_Odm->pBandType = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_SEC_CHNL_OFFSET:
+ pDM_Odm->pSecChOffset = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_SEC_MODE:
+ pDM_Odm->pSecurity = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_BW:
+ pDM_Odm->pBandWidth = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_CHNL:
+ pDM_Odm->pChannel = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_DMSP_GET_VALUE:
+ pDM_Odm->pbGetValueFromOtherMac = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_BUDDY_ADAPTOR:
+ pDM_Odm->pBuddyAdapter = (struct rtw_adapter **)pValue;
+ break;
+ case ODM_CMNINFO_DMSP_IS_MASTER:
+ pDM_Odm->pbMasterOfDMSP = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_SCAN:
+ pDM_Odm->pbScanInProcess = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_POWER_SAVING:
+ pDM_Odm->pbPowerSaving = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_ONE_PATH_CCA:
+ pDM_Odm->pOnePathCCA = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_DRV_STOP:
+ pDM_Odm->pbDriverStopped = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_PNP_IN:
+ pDM_Odm->pbDriverIsGoingToPnpSetPowerSleep = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_INIT_ON:
+ pDM_Odm->pinit_adpt_in_progress = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_ANT_TEST:
+ pDM_Odm->pAntennaTest = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_NET_CLOSED:
+ pDM_Odm->pbNet_closed = (bool *)pValue;
+ break;
+ /* To remove the compiler warning, must add an empty default statement to handle the other values. */
+ default:
+ /* do nothing */
+ break;
+ }
+}
+
+void ODM_CmnInfoPtrArrayHook23a(struct dm_odm_t *pDM_Odm, enum odm_cmninfo CmnInfo,
+ u16 Index, void *pValue)
+{
+ /* Hook call by reference pointer. */
+ switch (CmnInfo) {
+ /* Dynamic call by reference pointer. */
+ case ODM_CMNINFO_STA_STATUS:
+ pDM_Odm->pODM_StaInfo[Index] = (struct sta_info *)pValue;
+ break;
+ /* To remove the compiler warning, must add an empty default statement to handle the other values. */
+ default:
+ /* do nothing */
+ break;
+ }
+}
+
+/* Update Band/CHannel/.. The values are dynamic but non-per-packet. */
+void ODM_CmnInfoUpdate23a(struct dm_odm_t *pDM_Odm, u32 CmnInfo, u64 Value)
+{
+ /* This init variable may be changed in run time. */
+ switch (CmnInfo) {
+ case ODM_CMNINFO_ABILITY:
+ pDM_Odm->SupportAbility = (u32)Value;
+ break;
+ case ODM_CMNINFO_RF_TYPE:
+ pDM_Odm->RFType = (u8)Value;
+ break;
+ case ODM_CMNINFO_WIFI_DIRECT:
+ pDM_Odm->bWIFI_Direct = (bool)Value;
+ break;
+ case ODM_CMNINFO_WIFI_DISPLAY:
+ pDM_Odm->bWIFI_Display = (bool)Value;
+ break;
+ case ODM_CMNINFO_LINK:
+ pDM_Odm->bLinked = (bool)Value;
+ break;
+ case ODM_CMNINFO_RSSI_MIN:
+ pDM_Odm->RSSI_Min = (u8)Value;
+ break;
+ case ODM_CMNINFO_DBG_COMP:
+ pDM_Odm->DebugComponents = Value;
+ break;
+ case ODM_CMNINFO_DBG_LEVEL:
+ pDM_Odm->DebugLevel = (u32)Value;
+ break;
+ case ODM_CMNINFO_RA_THRESHOLD_HIGH:
+ pDM_Odm->RateAdaptive.HighRSSIThresh = (u8)Value;
+ break;
+ case ODM_CMNINFO_RA_THRESHOLD_LOW:
+ pDM_Odm->RateAdaptive.LowRSSIThresh = (u8)Value;
+ break;
+ }
+
+}
+
+void odm_CommonInfoSelfInit23a(struct dm_odm_t *pDM_Odm
+ )
+{
+ pDM_Odm->bCckHighPower = (bool) ODM_GetBBReg(pDM_Odm, 0x824, BIT9);
+ pDM_Odm->RFPathRxEnable = (u8) ODM_GetBBReg(pDM_Odm, 0xc04, 0x0F);
+ if (pDM_Odm->SupportICType & (ODM_RTL8723A))
+ pDM_Odm->AntDivType = CGCS_RX_SW_ANTDIV;
+
+ ODM_InitDebugSetting23a(pDM_Odm);
+}
+
+void odm_CommonInfoSelfUpdate23a(struct dm_odm_t *pDM_Odm)
+{
+ u8 EntryCnt = 0;
+ u8 i;
+ struct sta_info *pEntry;
+
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M) {
+ if (*(pDM_Odm->pSecChOffset) == 1)
+ pDM_Odm->ControlChannel = *(pDM_Odm->pChannel) - 2;
+ else if (*(pDM_Odm->pSecChOffset) == 2)
+ pDM_Odm->ControlChannel = *(pDM_Odm->pChannel) + 2;
+ } else {
+ pDM_Odm->ControlChannel = *(pDM_Odm->pChannel);
+ }
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ pEntry = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(pEntry))
+ EntryCnt++;
+ }
+ if (EntryCnt == 1)
+ pDM_Odm->bOneEntryOnly = true;
+ else
+ pDM_Odm->bOneEntryOnly = false;
+}
+
+void odm_CmnInfoInit_Debug23a(struct dm_odm_t *pDM_Odm)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoInit_Debug23a ==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportAbility = 0x%x\n", pDM_Odm->SupportAbility));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportInterface =%d\n", pDM_Odm->SupportInterface));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportICType = 0x%x\n", pDM_Odm->SupportICType));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("CutVersion =%d\n", pDM_Odm->CutVersion));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("FabVersion =%d\n", pDM_Odm->FabVersion));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RFType =%d\n", pDM_Odm->RFType));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("BoardType =%d\n", pDM_Odm->BoardType));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtLNA =%d\n", pDM_Odm->ExtLNA));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtPA =%d\n", pDM_Odm->ExtPA));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtTRSW =%d\n", pDM_Odm->ExtTRSW));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("PatchID =%d\n", pDM_Odm->PatchID));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bInHctTest =%d\n", pDM_Odm->bInHctTest));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFITest =%d\n", pDM_Odm->bWIFITest));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bDualMacSmartConcurrent =%d\n", pDM_Odm->bDualMacSmartConcurrent));
+
+}
+
+void odm_CmnInfoHook_Debug23a(struct dm_odm_t *pDM_Odm)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoHook_Debug23a ==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumTxBytesUnicast =%llu\n", *(pDM_Odm->pNumTxBytesUnicast)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumRxBytesUnicast =%llu\n", *(pDM_Odm->pNumRxBytesUnicast)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pWirelessMode = 0x%x\n", *(pDM_Odm->pWirelessMode)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecChOffset =%d\n", *(pDM_Odm->pSecChOffset)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecurity =%d\n", *(pDM_Odm->pSecurity)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pBandWidth =%d\n", *(pDM_Odm->pBandWidth)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pChannel =%d\n", *(pDM_Odm->pChannel)));
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbScanInProcess =%d\n", *(pDM_Odm->pbScanInProcess)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbPowerSaving =%d\n", *(pDM_Odm->pbPowerSaving)));
+}
+
+void odm_CmnInfoUpdate_Debug23a(struct dm_odm_t *pDM_Odm)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoUpdate_Debug23a ==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFI_Direct =%d\n", pDM_Odm->bWIFI_Direct));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFI_Display =%d\n", pDM_Odm->bWIFI_Display));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bLinked =%d\n", pDM_Odm->bLinked));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RSSI_Min =%d\n", pDM_Odm->RSSI_Min));
+}
+
+void ODM_Write_DIG23a(struct dm_odm_t *pDM_Odm,
+ u8 CurrentIGI
+ )
+{
+ struct dig_t *pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("ODM_REG(IGI_A, pDM_Odm) = 0x%x, ODM_BIT(IGI, pDM_Odm) = 0x%x \n",
+ ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm)));
+
+ if (pDM_DigTable->CurIGValue != CurrentIGI) {
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("CurrentIGI(0x%02x). \n", CurrentIGI));
+ pDM_DigTable->CurIGValue = CurrentIGI;
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("ODM_Write_DIG23a():CurrentIGI = 0x%x \n", CurrentIGI));
+}
+
+/* Need LPS mode for CE platform --2012--08--24--- */
+/* 8723AS/8189ES */
+void odm_DIG23abyRSSI_LPS(struct dm_odm_t *pDM_Odm)
+{
+ struct rtw_adapter *pAdapter = pDM_Odm->Adapter;
+ struct false_alarm_stats *pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
+ u8 RSSI_Lower = DM_DIG_MIN_NIC; /* 0x1E or 0x1C */
+ u8 bFwCurrentInPSMode = false;
+ u8 CurrentIGI = pDM_Odm->RSSI_Min;
+
+ if (!(pDM_Odm->SupportICType & (ODM_RTL8723A)))
+ return;
+
+ CurrentIGI = CurrentIGI+RSSI_OFFSET_DIG;
+ bFwCurrentInPSMode = pAdapter->pwrctrlpriv.bFwCurrentInPSMode;
+
+ /* ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG_LPS, ODM_DBG_LOUD, ("odm_DIG23a() ==>\n")); */
+
+ /* Using FW PS mode to make IGI */
+ if (bFwCurrentInPSMode) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("---Neil---odm_DIG23a is in LPS mode\n"));
+ /* Adjust by FA in LPS MODE */
+ if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2_LPS)
+ CurrentIGI = CurrentIGI+2;
+ else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1_LPS)
+ CurrentIGI = CurrentIGI+1;
+ else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0_LPS)
+ CurrentIGI = CurrentIGI-1;
+ } else {
+ CurrentIGI = RSSI_Lower;
+ }
+
+ /* Lower bound checking */
+
+ /* RSSI Lower bound check */
+ if ((pDM_Odm->RSSI_Min-10) > DM_DIG_MIN_NIC)
+ RSSI_Lower = (pDM_Odm->RSSI_Min-10);
+ else
+ RSSI_Lower = DM_DIG_MIN_NIC;
+
+ /* Upper and Lower Bound checking */
+ if (CurrentIGI > DM_DIG_MAX_NIC)
+ CurrentIGI = DM_DIG_MAX_NIC;
+ else if (CurrentIGI < RSSI_Lower)
+ CurrentIGI = RSSI_Lower;
+
+ ODM_Write_DIG23a(pDM_Odm, CurrentIGI);/* ODM_Write_DIG23a(pDM_Odm, pDM_DigTable->CurIGValue); */
+
+}
+
+void odm_DIG23aInit(struct dm_odm_t *pDM_Odm)
+{
+ struct dig_t *pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ pDM_DigTable->CurIGValue = (u8) ODM_GetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm));
+ pDM_DigTable->RssiLowThresh = DM_DIG_THRESH_LOW;
+ pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
+ pDM_DigTable->FALowThresh = DM_FALSEALARM_THRESH_LOW;
+ pDM_DigTable->FAHighThresh = DM_FALSEALARM_THRESH_HIGH;
+ if (pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) {
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
+ } else {
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
+ }
+ pDM_DigTable->BackoffVal = DM_DIG_BACKOFF_DEFAULT;
+ pDM_DigTable->BackoffVal_range_max = DM_DIG_BACKOFF_MAX;
+ pDM_DigTable->BackoffVal_range_min = DM_DIG_BACKOFF_MIN;
+ pDM_DigTable->PreCCK_CCAThres = 0xFF;
+ pDM_DigTable->CurCCK_CCAThres = 0x83;
+ pDM_DigTable->ForbiddenIGI = DM_DIG_MIN_NIC;
+ pDM_DigTable->LargeFAHit = 0;
+ pDM_DigTable->Recover_cnt = 0;
+ pDM_DigTable->DIG_Dynamic_MIN_0 = DM_DIG_MIN_NIC;
+ pDM_DigTable->DIG_Dynamic_MIN_1 = DM_DIG_MIN_NIC;
+ pDM_DigTable->bMediaConnect_0 = false;
+ pDM_DigTable->bMediaConnect_1 = false;
+
+ /* To Initialize pDM_Odm->bDMInitialGainEnable == false to avoid DIG error */
+ pDM_Odm->bDMInitialGainEnable = true;
+
+}
+
+void odm_DIG23a(struct dm_odm_t *pDM_Odm)
+{
+
+ struct dig_t *pDM_DigTable = &pDM_Odm->DM_DigTable;
+ struct false_alarm_stats *pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
+ u8 DIG_Dynamic_MIN;
+ u8 DIG_MaxOfMin;
+ bool FirstConnect, FirstDisConnect;
+ u8 dm_dig_max, dm_dig_min;
+ u8 CurrentIGI = pDM_DigTable->CurIGValue;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a() ==>\n"));
+ /* if (!(pDM_Odm->SupportAbility & (ODM_BB_DIG|ODM_BB_FA_CNT))) */
+ if ((!(pDM_Odm->SupportAbility&ODM_BB_DIG)) || (!(pDM_Odm->SupportAbility&ODM_BB_FA_CNT))) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG23a() Return: SupportAbility ODM_BB_DIG or ODM_BB_FA_CNT is disabled\n"));
+ return;
+ }
+
+ if (*(pDM_Odm->pbScanInProcess)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a() Return: In Scan Progress \n"));
+ return;
+ }
+
+ /* add by Neil Chen to avoid PSD is processing */
+ if (!pDM_Odm->bDMInitialGainEnable) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a() Return: PSD is Processing \n"));
+ return;
+ }
+
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
+
+ /* 1 Boundary Decision */
+ if ((pDM_Odm->SupportICType & (ODM_RTL8723A)) &&
+ ((pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) || pDM_Odm->ExtLNA)) {
+ dm_dig_max = DM_DIG_MAX_NIC_HP;
+ dm_dig_min = DM_DIG_MIN_NIC_HP;
+ DIG_MaxOfMin = DM_DIG_MAX_AP_HP;
+ } else {
+ dm_dig_max = DM_DIG_MAX_NIC;
+ dm_dig_min = DM_DIG_MIN_NIC;
+ DIG_MaxOfMin = DM_DIG_MAX_AP;
+ }
+
+ if (pDM_Odm->bLinked) {
+ /* 2 8723A Series, offset need to be 10 */
+ if (pDM_Odm->SupportICType == (ODM_RTL8723A)) {
+ /* 2 Upper Bound */
+ if ((pDM_Odm->RSSI_Min + 10) > DM_DIG_MAX_NIC)
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ else if ((pDM_Odm->RSSI_Min + 10) < DM_DIG_MIN_NIC)
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MIN_NIC;
+ else
+ pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 10;
+
+ /* 2 If BT is Concurrent, need to set Lower Bound */
+ DIG_Dynamic_MIN = DM_DIG_MIN_NIC;
+ } else {
+ /* 2 Modify DIG upper bound */
+ if ((pDM_Odm->RSSI_Min + 20) > dm_dig_max)
+ pDM_DigTable->rx_gain_range_max = dm_dig_max;
+ else if ((pDM_Odm->RSSI_Min + 20) < dm_dig_min)
+ pDM_DigTable->rx_gain_range_max = dm_dig_min;
+ else
+ pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 20;
+
+ /* 2 Modify DIG lower bound */
+ if (pDM_Odm->bOneEntryOnly) {
+ if (pDM_Odm->RSSI_Min < dm_dig_min)
+ DIG_Dynamic_MIN = dm_dig_min;
+ else if (pDM_Odm->RSSI_Min > DIG_MaxOfMin)
+ DIG_Dynamic_MIN = DIG_MaxOfMin;
+ else
+ DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG23a() : bOneEntryOnly = true, DIG_Dynamic_MIN = 0x%x\n",
+ DIG_Dynamic_MIN));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG23a() : pDM_Odm->RSSI_Min =%d\n",
+ pDM_Odm->RSSI_Min));
+ } else {
+ DIG_Dynamic_MIN = dm_dig_min;
+ }
+ }
+ } else {
+ pDM_DigTable->rx_gain_range_max = dm_dig_max;
+ DIG_Dynamic_MIN = dm_dig_min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a() : No Link\n"));
+ }
+
+ /* 1 Modify DIG lower bound, deal with abnormally large false alarm */
+ if (pFalseAlmCnt->Cnt_all > 10000) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("dm_DIG(): Abnornally false alarm case. \n"));
+
+ if (pDM_DigTable->LargeFAHit != 3)
+ pDM_DigTable->LargeFAHit++;
+ if (pDM_DigTable->ForbiddenIGI < CurrentIGI) {
+ pDM_DigTable->ForbiddenIGI = CurrentIGI;
+ pDM_DigTable->LargeFAHit = 1;
+ }
+
+ if (pDM_DigTable->LargeFAHit >= 3) {
+ if ((pDM_DigTable->ForbiddenIGI+1) > pDM_DigTable->rx_gain_range_max)
+ pDM_DigTable->rx_gain_range_min = pDM_DigTable->rx_gain_range_max;
+ else
+ pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
+ pDM_DigTable->Recover_cnt = 3600; /* 3600 = 2hr */
+ }
+ } else {
+ /* Recovery mechanism for IGI lower bound */
+ if (pDM_DigTable->Recover_cnt != 0) {
+ pDM_DigTable->Recover_cnt--;
+ } else {
+ if (pDM_DigTable->LargeFAHit < 3) {
+ if ((pDM_DigTable->ForbiddenIGI - 1) < DIG_Dynamic_MIN) {
+ pDM_DigTable->ForbiddenIGI = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
+ pDM_DigTable->rx_gain_range_min = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG23a(): Normal Case: At Lower Bound\n"));
+ } else {
+ pDM_DigTable->ForbiddenIGI--;
+ pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG23a(): Normal Case: Approach Lower Bound\n"));
+ }
+ } else {
+ pDM_DigTable->LargeFAHit = 0;
+ }
+ }
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): pDM_DigTable->LargeFAHit =%d\n", pDM_DigTable->LargeFAHit));
+
+ /* 1 Adjust initial gain by false alarm */
+ if (pDM_Odm->bLinked) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): DIG AfterLink\n"));
+ if (FirstConnect) {
+ CurrentIGI = pDM_Odm->RSSI_Min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("DIG: First Connect\n"));
+ } else {
+ if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
+ CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
+ CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue-1; */
+ }
+ } else {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): DIG BeforeLink\n"));
+ if (FirstDisConnect) {
+ CurrentIGI = pDM_DigTable->rx_gain_range_min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): First DisConnect \n"));
+ } else {
+ /* 2012.03.30 LukeLee: enable DIG before link but with very high thresholds */
+ if (pFalseAlmCnt->Cnt_all > 10000)
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ else if (pFalseAlmCnt->Cnt_all > 8000)
+ CurrentIGI = CurrentIGI + 1;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ else if (pFalseAlmCnt->Cnt_all < 500)
+ CurrentIGI = CurrentIGI - 1;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue-1; */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): England DIG \n"));
+ }
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): DIG End Adjust IGI\n"));
+ /* 1 Check initial gain by upper/lower bound */
+ if (CurrentIGI > pDM_DigTable->rx_gain_range_max)
+ CurrentIGI = pDM_DigTable->rx_gain_range_max;
+ if (CurrentIGI < pDM_DigTable->rx_gain_range_min)
+ CurrentIGI = pDM_DigTable->rx_gain_range_min;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): rx_gain_range_max = 0x%x, rx_gain_range_min = 0x%x\n",
+ pDM_DigTable->rx_gain_range_max, pDM_DigTable->rx_gain_range_min));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): TotalFA =%d\n", pFalseAlmCnt->Cnt_all));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): CurIGValue = 0x%x\n", CurrentIGI));
+
+ /* 2 High power RSSI threshold */
+
+ ODM_Write_DIG23a(pDM_Odm, CurrentIGI);/* ODM_Write_DIG23a(pDM_Odm, pDM_DigTable->CurIGValue); */
+ pDM_DigTable->bMediaConnect_0 = pDM_Odm->bLinked;
+ pDM_DigTable->DIG_Dynamic_MIN_0 = DIG_Dynamic_MIN;
+}
+
+/* 3 ============================================================ */
+/* 3 FASLE ALARM CHECK */
+/* 3 ============================================================ */
+
+void odm_FalseAlarmCounterStatistics23a(struct dm_odm_t *pDM_Odm)
+{
+ u32 ret_value;
+ struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
+ return;
+
+ if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
+ /* hold ofdm counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 1); /* hold page C counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 1); /* hold page D counter */
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Fast_Fsync = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value&0xffff0000)>>16);
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_OFDM_CCA = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Parity_Fail = ((ret_value&0xffff0000)>>16);
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Rate_Illegal = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Crc8_fail = ((ret_value&0xffff0000)>>16);
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Mcs_fail = (ret_value&0xffff);
+
+ FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail +
+ FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail +
+ FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Fast_Fsync +
+ FalseAlmCnt->Cnt_SB_Search_fail;
+ /* hold cck counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT12, 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT14, 1);
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
+ FalseAlmCnt->Cnt_Cck_fail = ret_value;
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
+ FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff) << 8;
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_CCK_CCA = ((ret_value&0xFF)<<8) | ((ret_value&0xFF00)>>8);
+
+ FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
+ FalseAlmCnt->Cnt_SB_Search_fail +
+ FalseAlmCnt->Cnt_Parity_Fail +
+ FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail +
+ FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Cck_fail);
+
+ FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
+
+ if (pDM_Odm->SupportICType >= ODM_RTL8723A) {
+ /* reset false alarm counter registers */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTC_11N, BIT31, 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTC_11N, BIT31, 0);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT27, 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT27, 0);
+ /* update ofdm counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 0); /* update page C counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 0); /* update page D counter */
+
+ /* reset CCK CCA counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT13|BIT12, 0);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT13|BIT12, 2);
+ /* reset CCK FA counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT15|BIT14, 0);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT15|BIT14, 2);
+ }
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Enter odm_FalseAlarmCounterStatistics23a\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Fast_Fsync =%d, Cnt_SB_Search_fail =%d\n",
+ FalseAlmCnt->Cnt_Fast_Fsync, FalseAlmCnt->Cnt_SB_Search_fail));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Parity_Fail =%d, Cnt_Rate_Illegal =%d\n",
+ FalseAlmCnt->Cnt_Parity_Fail, FalseAlmCnt->Cnt_Rate_Illegal));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Crc8_fail =%d, Cnt_Mcs_fail =%d\n",
+ FalseAlmCnt->Cnt_Crc8_fail, FalseAlmCnt->Cnt_Mcs_fail));
+ } else { /* FOR ODM_IC_11AC_SERIES */
+ /* read OFDM FA counter */
+ FalseAlmCnt->Cnt_Ofdm_fail = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_11AC, bMaskLWord);
+ FalseAlmCnt->Cnt_Cck_fail = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_11AC, bMaskLWord);
+ FalseAlmCnt->Cnt_all = FalseAlmCnt->Cnt_Ofdm_fail + FalseAlmCnt->Cnt_Cck_fail;
+
+ /* reset OFDM FA coutner */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RST_11AC, BIT17, 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RST_11AC, BIT17, 0);
+ /* reset CCK FA counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11AC, BIT15, 0);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11AC, BIT15, 1);
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Cck_fail =%d\n", FalseAlmCnt->Cnt_Cck_fail));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Ofdm_fail =%d\n", FalseAlmCnt->Cnt_Ofdm_fail));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Total False Alarm =%d\n", FalseAlmCnt->Cnt_all));
+}
+
+/* 3 ============================================================ */
+/* 3 CCK Packet Detect Threshold */
+/* 3 ============================================================ */
+
+void odm_CCKPacketDetectionThresh23a(struct dm_odm_t *pDM_Odm)
+{
+ struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
+ u8 CurCCK_CCAThres;
+
+ if (!(pDM_Odm->SupportAbility & (ODM_BB_CCK_PD|ODM_BB_FA_CNT)))
+ return;
+
+ if (pDM_Odm->ExtLNA)
+ return;
+
+ if (pDM_Odm->bLinked) {
+ if (pDM_Odm->RSSI_Min > 25) {
+ CurCCK_CCAThres = 0xcd;
+ } else if ((pDM_Odm->RSSI_Min <= 25) && (pDM_Odm->RSSI_Min > 10)) {
+ CurCCK_CCAThres = 0x83;
+ } else {
+ if (FalseAlmCnt->Cnt_Cck_fail > 1000)
+ CurCCK_CCAThres = 0x83;
+ else
+ CurCCK_CCAThres = 0x40;
+ }
+ } else {
+ if (FalseAlmCnt->Cnt_Cck_fail > 1000)
+ CurCCK_CCAThres = 0x83;
+ else
+ CurCCK_CCAThres = 0x40;
+ }
+
+ ODM_Write_CCK_CCA_Thres23a(pDM_Odm, CurCCK_CCAThres);
+}
+
+void ODM_Write_CCK_CCA_Thres23a(struct dm_odm_t *pDM_Odm, u8 CurCCK_CCAThres)
+{
+ struct dig_t *pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres)
+ ODM_Write1Byte(pDM_Odm, ODM_REG(CCK_CCA, pDM_Odm), CurCCK_CCAThres);
+ pDM_DigTable->PreCCK_CCAThres = pDM_DigTable->CurCCK_CCAThres;
+ pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
+
+}
+
+/* 3 ============================================================ */
+/* 3 BB Power Save */
+/* 3 ============================================================ */
+void odm23a_DynBBPSInit(struct dm_odm_t *pDM_Odm)
+{
+ struct dynamic_pwr_sav *pDM_PSTable = &pDM_Odm->DM_PSTable;
+
+ pDM_PSTable->PreCCAState = CCA_MAX;
+ pDM_PSTable->CurCCAState = CCA_MAX;
+ pDM_PSTable->PreRFState = RF_MAX;
+ pDM_PSTable->CurRFState = RF_MAX;
+ pDM_PSTable->Rssi_val_min = 0;
+ pDM_PSTable->initialize = 0;
+}
+
+void odm_DynamicBBPowerSaving23a(struct dm_odm_t *pDM_Odm)
+{
+ return;
+}
+
+void odm_1R_CCA23a(struct dm_odm_t *pDM_Odm)
+{
+ struct dynamic_pwr_sav *pDM_PSTable = &pDM_Odm->DM_PSTable;
+
+ if (pDM_Odm->RSSI_Min != 0xFF) {
+ if (pDM_PSTable->PreCCAState == CCA_2R) {
+ if (pDM_Odm->RSSI_Min >= 35)
+ pDM_PSTable->CurCCAState = CCA_1R;
+ else
+ pDM_PSTable->CurCCAState = CCA_2R;
+ } else {
+ if (pDM_Odm->RSSI_Min <= 30)
+ pDM_PSTable->CurCCAState = CCA_2R;
+ else
+ pDM_PSTable->CurCCAState = CCA_1R;
+ }
+ } else {
+ pDM_PSTable->CurCCAState = CCA_MAX;
+ }
+
+ if (pDM_PSTable->PreCCAState != pDM_PSTable->CurCCAState) {
+ if (pDM_PSTable->CurCCAState == CCA_1R) {
+ if (pDM_Odm->RFType == ODM_2T2R)
+ ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x13);
+ else
+ ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x23);
+ } else {
+ ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x33);
+ /* PHY_SetBBReg(pAdapter, 0xe70, bMaskByte3, 0x63); */
+ }
+ pDM_PSTable->PreCCAState = pDM_PSTable->CurCCAState;
+ }
+}
+
+void ODM_RF_Saving23a(struct dm_odm_t *pDM_Odm, u8 bForceInNormal)
+{
+ struct dynamic_pwr_sav *pDM_PSTable = &pDM_Odm->DM_PSTable;
+ u8 Rssi_Up_bound = 30 ;
+ u8 Rssi_Low_bound = 25;
+ if (pDM_Odm->PatchID == 40) { /* RT_CID_819x_FUNAI_TV */
+ Rssi_Up_bound = 50 ;
+ Rssi_Low_bound = 45;
+ }
+ if (pDM_PSTable->initialize == 0) {
+
+ pDM_PSTable->Reg874 = (ODM_GetBBReg(pDM_Odm, 0x874, bMaskDWord)&0x1CC000)>>14;
+ pDM_PSTable->RegC70 = (ODM_GetBBReg(pDM_Odm, 0xc70, bMaskDWord)&BIT3)>>3;
+ pDM_PSTable->Reg85C = (ODM_GetBBReg(pDM_Odm, 0x85c, bMaskDWord)&0xFF000000)>>24;
+ pDM_PSTable->RegA74 = (ODM_GetBBReg(pDM_Odm, 0xa74, bMaskDWord)&0xF000)>>12;
+ /* Reg818 = PHY_QueryBBReg(pAdapter, 0x818, bMaskDWord); */
+ pDM_PSTable->initialize = 1;
+ }
+
+ if (!bForceInNormal) {
+ if (pDM_Odm->RSSI_Min != 0xFF) {
+ if (pDM_PSTable->PreRFState == RF_Normal) {
+ if (pDM_Odm->RSSI_Min >= Rssi_Up_bound)
+ pDM_PSTable->CurRFState = RF_Save;
+ else
+ pDM_PSTable->CurRFState = RF_Normal;
+ } else {
+ if (pDM_Odm->RSSI_Min <= Rssi_Low_bound)
+ pDM_PSTable->CurRFState = RF_Normal;
+ else
+ pDM_PSTable->CurRFState = RF_Save;
+ }
+ } else {
+ pDM_PSTable->CurRFState = RF_MAX;
+ }
+ } else {
+ pDM_PSTable->CurRFState = RF_Normal;
+ }
+
+ if (pDM_PSTable->PreRFState != pDM_PSTable->CurRFState) {
+ if (pDM_PSTable->CurRFState == RF_Save) {
+ /* <tynli_note> 8723 RSSI report will be wrong. Set 0x874[5]= 1 when enter BB power saving mode. */
+ /* Suggested by SD3 Yu-Nan. 2011.01.20. */
+ if (pDM_Odm->SupportICType == ODM_RTL8723A)
+ ODM_SetBBReg(pDM_Odm, 0x874, BIT5, 0x1); /* Reg874[5]= 1b'1 */
+ ODM_SetBBReg(pDM_Odm, 0x874, 0x1C0000, 0x2); /* Reg874[20:18]= 3'b010 */
+ ODM_SetBBReg(pDM_Odm, 0xc70, BIT3, 0); /* RegC70[3]= 1'b0 */
+ ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]= 0x63 */
+ ODM_SetBBReg(pDM_Odm, 0x874, 0xC000, 0x2); /* Reg874[15:14]= 2'b10 */
+ ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, 0x3); /* RegA75[7:4]= 0x3 */
+ ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x0); /* Reg818[28]= 1'b0 */
+ ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x1); /* Reg818[28]= 1'b1 */
+ } else {
+ ODM_SetBBReg(pDM_Odm, 0x874, 0x1CC000, pDM_PSTable->Reg874);
+ ODM_SetBBReg(pDM_Odm, 0xc70, BIT3, pDM_PSTable->RegC70);
+ ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, pDM_PSTable->Reg85C);
+ ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, pDM_PSTable->RegA74);
+ ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x0);
+
+ if (pDM_Odm->SupportICType == ODM_RTL8723A)
+ ODM_SetBBReg(pDM_Odm, 0x874, BIT5, 0x0); /* Reg874[5]= 1b'0 */
+ }
+ pDM_PSTable->PreRFState = pDM_PSTable->CurRFState;
+ }
+}
+
+/* 3 ============================================================ */
+/* 3 RATR MASK */
+/* 3 ============================================================ */
+/* 3 ============================================================ */
+/* 3 Rate Adaptive */
+/* 3 ============================================================ */
+
+void odm_RateAdaptiveMaskInit23a(struct dm_odm_t *pDM_Odm)
+{
+ struct odm_rate_adapt *pOdmRA = &pDM_Odm->RateAdaptive;
+
+ pOdmRA->Type = DM_Type_ByDriver;
+ if (pOdmRA->Type == DM_Type_ByDriver)
+ pDM_Odm->bUseRAMask = true;
+ else
+ pDM_Odm->bUseRAMask = false;
+
+ pOdmRA->RATRState = DM_RATR_STA_INIT;
+ pOdmRA->HighRSSIThresh = 50;
+ pOdmRA->LowRSSIThresh = 20;
+}
+
+u32 ODM_Get_Rate_Bitmap23a(struct dm_odm_t *pDM_Odm,
+ u32 macid,
+ u32 ra_mask,
+ u8 rssi_level)
+{
+ struct sta_info *pEntry;
+ u32 rate_bitmap = 0x0fffffff;
+ u8 WirelessMode;
+ /* u8 WirelessMode =*(pDM_Odm->pWirelessMode); */
+
+ pEntry = pDM_Odm->pODM_StaInfo[macid];
+ if (!IS_STA_VALID(pEntry))
+ return ra_mask;
+
+ WirelessMode = pEntry->wireless_mode;
+
+ switch (WirelessMode) {
+ case ODM_WM_B:
+ if (ra_mask & 0x0000000c) /* 11M or 5.5M enable */
+ rate_bitmap = 0x0000000d;
+ else
+ rate_bitmap = 0x0000000f;
+ break;
+ case (ODM_WM_A|ODM_WM_G):
+ if (rssi_level == DM_RATR_STA_HIGH)
+ rate_bitmap = 0x00000f00;
+ else
+ rate_bitmap = 0x00000ff0;
+ break;
+ case (ODM_WM_B|ODM_WM_G):
+ if (rssi_level == DM_RATR_STA_HIGH)
+ rate_bitmap = 0x00000f00;
+ else if (rssi_level == DM_RATR_STA_MIDDLE)
+ rate_bitmap = 0x00000ff0;
+ else
+ rate_bitmap = 0x00000ff5;
+ break;
+ case (ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
+ case (ODM_WM_A|ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
+ if (pDM_Odm->RFType == ODM_1T2R || pDM_Odm->RFType == ODM_1T1R) {
+ if (rssi_level == DM_RATR_STA_HIGH) {
+ rate_bitmap = 0x000f0000;
+ } else if (rssi_level == DM_RATR_STA_MIDDLE) {
+ rate_bitmap = 0x000ff000;
+ } else {
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
+ rate_bitmap = 0x000ff015;
+ else
+ rate_bitmap = 0x000ff005;
+ }
+ } else {
+ if (rssi_level == DM_RATR_STA_HIGH) {
+ rate_bitmap = 0x0f8f0000;
+ } else if (rssi_level == DM_RATR_STA_MIDDLE) {
+ rate_bitmap = 0x0f8ff000;
+ } else {
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
+ rate_bitmap = 0x0f8ff015;
+ else
+ rate_bitmap = 0x0f8ff005;
+ }
+ }
+ break;
+ default:
+ /* case WIRELESS_11_24N: */
+ /* case WIRELESS_11_5N: */
+ if (pDM_Odm->RFType == RF_1T2R)
+ rate_bitmap = 0x000fffff;
+ else
+ rate_bitmap = 0x0fffffff;
+ break;
+ }
+
+ /* printk("%s ==> rssi_level:0x%02x, WirelessMode:0x%02x, rate_bitmap:0x%08x \n", __FUNCTION__, rssi_level, WirelessMode, rate_bitmap); */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, (" ==> rssi_level:0x%02x, WirelessMode:0x%02x, rate_bitmap:0x%08x \n", rssi_level, WirelessMode, rate_bitmap));
+
+ return rate_bitmap;
+
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: odm_RefreshRateAdaptiveMask23a()
+ *
+ * Overview: Update rate table mask according to rssi
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ *When Who Remark
+ *05/27/2009 hpfan Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void odm_RefreshRateAdaptiveMask23a(struct dm_odm_t *pDM_Odm)
+{
+ if (!(pDM_Odm->SupportAbility & ODM_BB_RA_MASK))
+ return;
+ /* */
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ /* */
+ odm_RefreshRateAdaptiveMask23aCE23a(pDM_Odm);
+}
+
+void odm_RefreshRateAdaptiveMask23aMP23a(struct dm_odm_t *pDM_Odm)
+{
+}
+
+void odm_RefreshRateAdaptiveMask23aCE23a(struct dm_odm_t *pDM_Odm)
+{
+ u8 i;
+ struct rtw_adapter *pAdapter = pDM_Odm->Adapter;
+
+ if (pAdapter->bDriverStopped) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_TRACE,
+ ("<---- odm_RefreshRateAdaptiveMask23a(): driver is going to unload\n"));
+ return;
+ }
+
+ if (!pDM_Odm->bUseRAMask) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD,
+ ("<---- odm_RefreshRateAdaptiveMask23a(): driver does not control rate adaptive mask\n"));
+ return;
+ }
+
+ /* printk("==> %s \n", __FUNCTION__); */
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ struct sta_info *pstat = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(pstat)) {
+ if (ODM_RAStateCheck23a(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false, &pstat->rssi_level)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD,
+ ("RSSI:%d, RSSI_LEVEL:%d\n",
+ pstat->rssi_stat.UndecoratedSmoothedPWDB,
+ pstat->rssi_level));
+ rtw_hal_update_ra_mask23a(pstat, pstat->rssi_level);
+ }
+
+ }
+ }
+
+}
+
+void odm_RefreshRateAdaptiveMask23aAPADSL23a(struct dm_odm_t *pDM_Odm)
+{
+}
+
+/* Return Value: bool */
+/* - true: RATRState is changed. */
+bool ODM_RAStateCheck23a(struct dm_odm_t *pDM_Odm, s32 RSSI, bool bForceUpdate,
+ u8 *pRATRState)
+{
+ struct odm_rate_adapt *pRA = &pDM_Odm->RateAdaptive;
+ const u8 GoUpGap = 5;
+ u8 HighRSSIThreshForRA = pRA->HighRSSIThresh;
+ u8 LowRSSIThreshForRA = pRA->LowRSSIThresh;
+ u8 RATRState;
+
+ /* Threshold Adjustment: */
+ /* when RSSI state trends to go up one or two levels, make sure RSSI is high enough. */
+ /* Here GoUpGap is added to solve the boundary's level alternation issue. */
+ switch (*pRATRState) {
+ case DM_RATR_STA_INIT:
+ case DM_RATR_STA_HIGH:
+ break;
+ case DM_RATR_STA_MIDDLE:
+ HighRSSIThreshForRA += GoUpGap;
+ break;
+ case DM_RATR_STA_LOW:
+ HighRSSIThreshForRA += GoUpGap;
+ LowRSSIThreshForRA += GoUpGap;
+ break;
+ default:
+ ODM_RT_ASSERT(pDM_Odm, false, ("wrong rssi level setting %d !", *pRATRState));
+ break;
+ }
+
+ /* Decide RATRState by RSSI. */
+ if (RSSI > HighRSSIThreshForRA)
+ RATRState = DM_RATR_STA_HIGH;
+ else if (RSSI > LowRSSIThreshForRA)
+ RATRState = DM_RATR_STA_MIDDLE;
+ else
+ RATRState = DM_RATR_STA_LOW;
+
+ if (*pRATRState != RATRState || bForceUpdate) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD,
+ ("RSSI Level %d -> %d\n", *pRATRState, RATRState));
+ *pRATRState = RATRState;
+ return true;
+ }
+ return false;
+}
+
+/* 3 ============================================================ */
+/* 3 Dynamic Tx Power */
+/* 3 ============================================================ */
+
+void odm_DynamicTxPower23aInit(struct dm_odm_t *pDM_Odm)
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+
+ pdmpriv->bDynamicTxPowerEnable = false;
+
+ pdmpriv->LastDTPLvl = TxHighPwrLevel_Normal;
+ pdmpriv->DynamicTxHighPowerLvl = TxHighPwrLevel_Normal;
+}
+
+void odm_DynamicTxPower23aSavePowerIndex(struct dm_odm_t *pDM_Odm)
+{
+ u8 index;
+ u32 Power_Index_REG[6] = {0xc90, 0xc91, 0xc92, 0xc98, 0xc99, 0xc9a};
+
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ for (index = 0; index < 6; index++)
+ pdmpriv->PowerIndex_backup[index] = rtw_read8(Adapter, Power_Index_REG[index]);
+}
+
+void odm_DynamicTxPower23aRestorePowerIndex(struct dm_odm_t *pDM_Odm)
+{
+ u8 index;
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u32 Power_Index_REG[6] = {0xc90, 0xc91, 0xc92, 0xc98, 0xc99, 0xc9a};
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ for (index = 0; index < 6; index++)
+ rtw_write8(Adapter, Power_Index_REG[index], pdmpriv->PowerIndex_backup[index]);
+}
+
+void odm_DynamicTxPower23aWritePowerIndex(struct dm_odm_t *pDM_Odm,
+ u8 Value)
+{
+
+ u8 index;
+ u32 Power_Index_REG[6] = {0xc90, 0xc91, 0xc92, 0xc98, 0xc99, 0xc9a};
+
+ for (index = 0; index < 6; index++)
+ ODM_Write1Byte(pDM_Odm, Power_Index_REG[index], Value);
+
+}
+
+void odm_DynamicTxPower23a(struct dm_odm_t *pDM_Odm)
+{
+}
+
+void odm_DynamicTxPower23a_92C(struct dm_odm_t *pDM_Odm)
+{
+}
+
+void odm_DynamicTxPower23a_92D(struct dm_odm_t *pDM_Odm)
+{
+}
+
+/* 3 ============================================================ */
+/* 3 RSSI Monitor */
+/* 3 ============================================================ */
+
+void odm_RSSIMonitorInit(struct dm_odm_t *pDM_Odm)
+{
+}
+
+void odm_RSSIMonitorCheck23a(struct dm_odm_t *pDM_Odm)
+{
+ /* For AP/ADSL use struct rtl8723a_priv * */
+ /* For CE/NIC use struct rtw_adapter * */
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_RSSI_MONITOR))
+ return;
+
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ odm_RSSIMonitorCheck23aCE(pDM_Odm);
+} /* odm_RSSIMonitorCheck23a */
+
+void odm_RSSIMonitorCheck23aMP(struct dm_odm_t *pDM_Odm)
+{
+}
+
+static void
+FindMinimumRSSI(
+ struct rtw_adapter *pAdapter
+ )
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
+
+ /* 1 1.Determine the minimum RSSI */
+
+ if ((!pDM_Odm->bLinked) &&
+ (pdmpriv->EntryMinUndecoratedSmoothedPWDB == 0))
+ pdmpriv->MinUndecoratedPWDBForDM = 0;
+ else
+ pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
+}
+
+void odm_RSSIMonitorCheck23aCE(struct dm_odm_t *pDM_Odm)
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ int i;
+ int tmpEntryMaxPWDB = 0, tmpEntryMinPWDB = 0xff;
+ u8 sta_cnt = 0;
+ u32 PWDB_rssi[NUM_STA] = {0};/* 0~15]:MACID, [16~31]:PWDB_rssi */
+ struct sta_info *psta;
+
+ if (!pDM_Odm->bLinked)
+ return;
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ psta = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(psta)) {
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB < tmpEntryMinPWDB)
+ tmpEntryMinPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
+
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB > tmpEntryMaxPWDB)
+ tmpEntryMaxPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
+
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB != (-1))
+ PWDB_rssi[sta_cnt++] = (psta->mac_id | (psta->rssi_stat.UndecoratedSmoothedPWDB<<16));
+ }
+ }
+
+ for (i = 0; i < sta_cnt; i++) {
+ if (PWDB_rssi[i] != (0)) {
+ if (pHalData->fw_ractrl) /* Report every sta's RSSI to FW */
+ rtl8723a_set_rssi_cmd(Adapter, (u8 *)&PWDB_rssi[i]);
+ }
+ }
+
+ if (tmpEntryMaxPWDB != 0) /* If associated entry is found */
+ pdmpriv->EntryMaxUndecoratedSmoothedPWDB = tmpEntryMaxPWDB;
+ else
+ pdmpriv->EntryMaxUndecoratedSmoothedPWDB = 0;
+
+ if (tmpEntryMinPWDB != 0xff) /* If associated entry is found */
+ pdmpriv->EntryMinUndecoratedSmoothedPWDB = tmpEntryMinPWDB;
+ else
+ pdmpriv->EntryMinUndecoratedSmoothedPWDB = 0;
+
+ FindMinimumRSSI(Adapter);/* get pdmpriv->MinUndecoratedPWDBForDM */
+
+ ODM_CmnInfoUpdate23a(&pHalData->odmpriv, ODM_CMNINFO_RSSI_MIN, pdmpriv->MinUndecoratedPWDBForDM);
+}
+
+void odm_RSSIMonitorCheck23aAP(struct dm_odm_t *pDM_Odm)
+{
+}
+
+void ODM_InitAllTimers(struct dm_odm_t *pDM_Odm)
+{
+ setup_timer(&pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer,
+ odm_SwAntDivChkAntSwitchCallback23a, (unsigned long)pDM_Odm);
+}
+
+void ODM_CancelAllTimers(struct dm_odm_t *pDM_Odm)
+{
+ del_timer_sync(&pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer);
+}
+
+void ODM_ReleaseAllTimers(struct dm_odm_t *pDM_Odm)
+{
+ ODM_ReleaseTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer);
+}
+
+/* endif */
+/* 3 ============================================================ */
+/* 3 Tx Power Tracking */
+/* 3 ============================================================ */
+
+void odm_TXPowerTrackingInit23a(struct dm_odm_t *pDM_Odm)
+{
+ odm_TXPowerTrackingThermalMeterInit23a(pDM_Odm);
+}
+
+void odm_TXPowerTrackingThermalMeterInit23a(struct dm_odm_t *pDM_Odm)
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+
+ pdmpriv->bTXPowerTracking = true;
+ pdmpriv->TXPowercount = 0;
+ pdmpriv->bTXPowerTrackingInit = false;
+ pdmpriv->TxPowerTrackControl = true;
+ MSG_8723A("pdmpriv->TxPowerTrackControl = %d\n", pdmpriv->TxPowerTrackControl);
+
+ pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
+}
+
+void ODM_TXPowerTrackingCheck23a(struct dm_odm_t *pDM_Odm)
+{
+ /* For AP/ADSL use struct rtl8723a_priv * */
+ /* For CE/NIC use struct rtw_adapter * */
+
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ odm_TXPowerTrackingCheckCE23a(pDM_Odm);
+}
+
+void odm_TXPowerTrackingCheckCE23a(struct dm_odm_t *pDM_Odm)
+{
+}
+
+void odm_TXPowerTrackingCheckMP(struct dm_odm_t *pDM_Odm)
+{
+}
+
+void odm_TXPowerTrackingCheckAP(struct dm_odm_t *pDM_Odm)
+{
+}
+
+/* antenna mapping info */
+/* 1: right-side antenna */
+/* 2/0: left-side antenna */
+/* PpDM_SWAT_Table->CCK_Ant1_Cnt /OFDM_Ant1_Cnt: for right-side antenna: Ant:1 RxDefaultAnt1 */
+/* PpDM_SWAT_Table->CCK_Ant2_Cnt /OFDM_Ant2_Cnt: for left-side antenna: Ant:0 RxDefaultAnt2 */
+/* We select left antenna as default antenna in initial process, modify it as needed */
+/* */
+
+/* 3 ============================================================ */
+/* 3 SW Antenna Diversity */
+/* 3 ============================================================ */
+void odm_SwAntDivInit(struct dm_odm_t *pDM_Odm)
+{
+}
+
+void ODM_SwAntDivChkPerPktRssi(struct dm_odm_t *pDM_Odm, u8 StationID, struct odm_phy_info *pPhyInfo)
+{
+}
+
+void odm_SwAntDivChkAntSwitch(struct dm_odm_t *pDM_Odm, u8 Step)
+{
+}
+
+void ODM_SwAntDivRestAfterLink(struct dm_odm_t *pDM_Odm)
+{
+}
+
+void odm_SwAntDivChkAntSwitchCallback23a(unsigned long data)
+{
+}
+
+/* 3 ============================================================ */
+/* 3 SW Antenna Diversity */
+/* 3 ============================================================ */
+
+void odm_InitHybridAntDiv23a(struct dm_odm_t *pDM_Odm)
+{
+}
+
+void odm_HwAntDiv23a(struct dm_odm_t *pDM_Odm)
+{
+}
+
+/* EDCA Turbo */
+void ODM_EdcaTurboInit23a(struct dm_odm_t *pDM_Odm)
+{
+
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+ pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
+ pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
+ Adapter->recvpriv.bIsAnyNonBEPkts = false;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VO PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_VO_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VI PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_VI_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BE PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_BE_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BK PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_BK_PARAM)));
+
+} /* ODM_InitEdcaTurbo */
+
+void odm_EdcaTurboCheck23a(struct dm_odm_t *pDM_Odm)
+{
+ /* For AP/ADSL use struct rtl8723a_priv * */
+ /* For CE/NIC use struct rtw_adapter * */
+
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("odm_EdcaTurboCheck23a ========================>\n"));
+
+ if (!(pDM_Odm->SupportAbility & ODM_MAC_EDCA_TURBO))
+ return;
+
+ odm_EdcaTurboCheck23aCE23a(pDM_Odm);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("<======================== odm_EdcaTurboCheck23a\n"));
+
+} /* odm_CheckEdcaTurbo */
+
+void odm_EdcaTurboCheck23aCE23a(struct dm_odm_t *pDM_Odm)
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ u32 trafficIndex;
+ u32 edca_param;
+ u64 cur_tx_bytes = 0;
+ u64 cur_rx_bytes = 0;
+ u8 bbtchange = false;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct xmit_priv *pxmitpriv = &Adapter->xmitpriv;
+ struct recv_priv *precvpriv = &Adapter->recvpriv;
+ struct registry_priv *pregpriv = &Adapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if ((pregpriv->wifi_spec == 1))/* (pmlmeinfo->HT_enable == 0)) */
+ goto dm_CheckEdcaTurbo_EXIT;
+
+ if (pmlmeinfo->assoc_AP_vendor >= HT_IOT_PEER_MAX)
+ goto dm_CheckEdcaTurbo_EXIT;
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ if (BT_DisableEDCATurbo(Adapter))
+ goto dm_CheckEdcaTurbo_EXIT;
+#endif
+
+ /* Check if the status needs to be changed. */
+ if ((bbtchange) || (!precvpriv->bIsAnyNonBEPkts)) {
+ cur_tx_bytes = pxmitpriv->tx_bytes - pxmitpriv->last_tx_bytes;
+ cur_rx_bytes = precvpriv->rx_bytes - precvpriv->last_rx_bytes;
+
+ /* traffic, TX or RX */
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_RALINK) ||
+ (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS)) {
+ if (cur_tx_bytes > (cur_rx_bytes << 2)) {
+ /* Uplink TP is present. */
+ trafficIndex = UP_LINK;
+ } else { /* Balance TP is present. */
+ trafficIndex = DOWN_LINK;
+ }
+ } else {
+ if (cur_rx_bytes > (cur_tx_bytes << 2)) {
+ /* Downlink TP is present. */
+ trafficIndex = DOWN_LINK;
+ } else { /* Balance TP is present. */
+ trafficIndex = UP_LINK;
+ }
+ }
+
+ if ((pDM_Odm->DM_EDCA_Table.prv_traffic_idx != trafficIndex) ||
+ (!pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA)) {
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_CISCO) &&
+ (pmlmeext->cur_wireless_mode & WIRELESS_11_24N))
+ edca_param = EDCAParam[pmlmeinfo->assoc_AP_vendor][trafficIndex];
+ else
+ edca_param = EDCAParam[HT_IOT_PEER_UNKNOWN][trafficIndex];
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, edca_param);
+
+ pDM_Odm->DM_EDCA_Table.prv_traffic_idx = trafficIndex;
+ }
+
+ pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = true;
+ } else {
+ /* Turn Off EDCA turbo here. */
+ /* Restore original EDCA according to the declaration of AP. */
+ if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, pHalData->AcParam_BE);
+ pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
+ }
+ }
+
+dm_CheckEdcaTurbo_EXIT:
+ /* Set variables for next time. */
+ precvpriv->bIsAnyNonBEPkts = false;
+ pxmitpriv->last_tx_bytes = pxmitpriv->tx_bytes;
+ precvpriv->last_rx_bytes = precvpriv->rx_bytes;
+}
+
+u32 GetPSDData(struct dm_odm_t *pDM_Odm, unsigned int point, u8 initial_gain_psd)
+{
+ u32 psd_report;
+
+ /* Set DCO frequency index, offset = (40MHz/SamplePts)*point */
+ ODM_SetBBReg(pDM_Odm, 0x808, 0x3FF, point);
+
+ /* Start PSD calculation, Reg808[22]= 0->1 */
+ ODM_SetBBReg(pDM_Odm, 0x808, BIT22, 1);
+ /* Need to wait for HW PSD report */
+ udelay(30);
+ ODM_SetBBReg(pDM_Odm, 0x808, BIT22, 0);
+ /* Read PSD report, Reg8B4[15:0] */
+ psd_report = ODM_GetBBReg(pDM_Odm, 0x8B4, bMaskDWord) & 0x0000FFFF;
+
+ psd_report = (u32)(ConvertTo_dB23a(psd_report))+(u32)(initial_gain_psd-0x1c);
+
+ return psd_report;
+}
+
+u32
+ConvertTo_dB23a(
+ u32 Value)
+{
+ u8 i;
+ u8 j;
+ u32 dB;
+
+ Value = Value & 0xFFFF;
+
+ for (i = 0; i < 8; i++) {
+ if (Value <= dB_Invert_Table[i][11])
+ break;
+ }
+
+ if (i >= 8)
+ return 96; /* maximum 96 dB */
+
+ for (j = 0; j < 12; j++) {
+ if (Value <= dB_Invert_Table[i][j])
+ break;
+ }
+
+ dB = i*12 + j + 1;
+
+ return dB;
+}
+
+/* */
+/* 2011/09/22 MH Add for 92D global spin lock utilization. */
+/* */
+void
+odm_GlobalAdapterCheck(
+ void
+ )
+{
+} /* odm_GlobalAdapterCheck */
+
+/* */
+/* Description: */
+/*Set Single/Dual Antenna default setting for products that do not do detection in advance. */
+/* */
+/* Added by Joseph, 2012.03.22 */
+/* */
+void ODM_SingleDualAntennaDefaultSetting(struct dm_odm_t *pDM_Odm)
+{
+ struct sw_ant_sw *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
+ pDM_SWAT_Table->ANTA_ON = true;
+ pDM_SWAT_Table->ANTB_ON = true;
+}
+
+/* 2 8723A ANT DETECT */
+
+static void odm_PHY_SaveAFERegisters(
+ struct dm_odm_t *pDM_Odm,
+ u32 *AFEReg,
+ u32 *AFEBackup,
+ u32 RegisterNum
+ )
+{
+ u32 i;
+
+ /* RTPRINT(FINIT, INIT_IQK, ("Save ADDA parameters.\n")); */
+ for (i = 0 ; i < RegisterNum ; i++)
+ AFEBackup[i] = ODM_GetBBReg(pDM_Odm, AFEReg[i], bMaskDWord);
+}
+
+static void odm_PHY_ReloadAFERegisters(struct dm_odm_t *pDM_Odm, u32 *AFEReg,
+ u32 *AFEBackup, u32 RegiesterNum)
+{
+ u32 i;
+
+ for (i = 0 ; i < RegiesterNum; i++)
+ ODM_SetBBReg(pDM_Odm, AFEReg[i], bMaskDWord, AFEBackup[i]);
+}
+
+/* 2 8723A ANT DETECT */
+/* Description: */
+/* Implement IQK single tone for RF DPK loopback and BB PSD scanning. */
+/* This function is cooperated with BB team Neil. */
+bool ODM_SingleDualAntennaDetection(struct dm_odm_t *pDM_Odm, u8 mode)
+{
+ struct sw_ant_sw *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
+ u32 CurrentChannel, RfLoopReg;
+ u8 n;
+ u32 Reg88c, Regc08, Reg874, Regc50;
+ u8 initial_gain = 0x5a;
+ u32 PSD_report_tmp;
+ u32 AntA_report = 0x0, AntB_report = 0x0, AntO_report = 0x0;
+ bool bResult = true;
+ u32 AFE_Backup[16];
+ u32 AFE_REG_8723A[16] = {
+ rRx_Wait_CCA, rTx_CCK_RFON,
+ rTx_CCK_BBON, rTx_OFDM_RFON,
+ rTx_OFDM_BBON, rTx_To_Rx,
+ rTx_To_Tx, rRx_CCK,
+ rRx_OFDM, rRx_Wait_RIFS,
+ rRx_TO_Rx, rStandby,
+ rSleep, rPMPD_ANAEN,
+ rFPGA0_XCD_SwitchControl, rBlue_Tooth};
+
+ if (!(pDM_Odm->SupportICType & (ODM_RTL8723A)))
+ return bResult;
+
+ if (!(pDM_Odm->SupportAbility&ODM_BB_ANT_DIV))
+ return bResult;
+ /* 1 Backup Current RF/BB Settings */
+
+ CurrentChannel = ODM_GetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask);
+ RfLoopReg = ODM_GetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, ODM_DPDT, Antenna_A); /* change to Antenna A */
+ /* Step 1: USE IQK to transmitter single tone */
+
+ udelay(10);
+
+ /* Store A Path Register 88c, c08, 874, c50 */
+ Reg88c = ODM_GetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord);
+ Regc08 = ODM_GetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord);
+ Reg874 = ODM_GetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord);
+ Regc50 = ODM_GetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, bMaskDWord);
+
+ /* Store AFE Registers */
+ odm_PHY_SaveAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
+
+ /* Set PSD 128 pts */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_PSDFunction, BIT14|BIT15, 0x0); /* 128 pts */
+
+ /* To SET CH1 to do */
+ ODM_SetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask, 0x01); /* Channel 1 */
+
+ /* AFE all on step */
+ ODM_SetBBReg(pDM_Odm, rRx_Wait_CCA, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_CCK_RFON, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_CCK_BBON, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_OFDM_RFON, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_OFDM_BBON, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_To_Rx, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_To_Tx, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rRx_CCK, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rRx_OFDM, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rRx_Wait_RIFS, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rRx_TO_Rx, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rStandby, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rSleep, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rPMPD_ANAEN, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_SwitchControl, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rBlue_Tooth, bMaskDWord, 0x6FDB25A4);
+
+ /* 3 wire Disable */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord, 0xCCF000C0);
+
+ /* BB IQK Setting */
+ ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800E4);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22208000);
+
+ /* IQK setting tone@ 4.34Mhz */
+ ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008C1C);
+ ODM_SetBBReg(pDM_Odm, rTx_IQK, bMaskDWord, 0x01007c00);
+
+ /* Page B init */
+ ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x00080000);
+ ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x0f600000);
+ ODM_SetBBReg(pDM_Odm, rRx_IQK, bMaskDWord, 0x01004800);
+ ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_A, bMaskDWord, 0x10008c1f);
+ ODM_SetBBReg(pDM_Odm, rTx_IQK_PI_A, bMaskDWord, 0x82150008);
+ ODM_SetBBReg(pDM_Odm, rRx_IQK_PI_A, bMaskDWord, 0x28150008);
+ ODM_SetBBReg(pDM_Odm, rIQK_AGC_Rsp, bMaskDWord, 0x001028d0);
+
+ /* RF loop Setting */
+ ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x0, 0xFFFFF, 0x50008);
+
+ /* IQK Single tone start */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ ODM_SetBBReg(pDM_Odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ udelay(1000);
+ PSD_report_tmp = 0x0;
+
+ for (n = 0; n < 2; n++) {
+ PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
+ if (PSD_report_tmp > AntA_report)
+ AntA_report = PSD_report_tmp;
+ }
+
+ PSD_report_tmp = 0x0;
+
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_B); /* change to Antenna B */
+ udelay(10);
+
+ for (n = 0; n < 2; n++) {
+ PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
+ if (PSD_report_tmp > AntB_report)
+ AntB_report = PSD_report_tmp;
+ }
+
+ /* change to open case */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, 0); /* change to Ant A and B all open case */
+ udelay(10);
+
+ for (n = 0; n < 2; n++) {
+ PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
+ if (PSD_report_tmp > AntO_report)
+ AntO_report = PSD_report_tmp;
+ }
+
+ /* Close IQK Single Tone function */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ PSD_report_tmp = 0x0;
+
+ /* 1 Return to antanna A */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_A);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord, Reg88c);
+ ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, Regc08);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, Reg874);
+ ODM_SetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, 0x7F, 0x40);
+ ODM_SetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, bMaskDWord, Regc50);
+ ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, CurrentChannel);
+ ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask, RfLoopReg);
+
+ /* Reload AFE Registers */
+ odm_PHY_ReloadAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_A[%d]= %d \n", 2416, AntA_report));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_B[%d]= %d \n", 2416, AntB_report));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_O[%d]= %d \n", 2416, AntO_report));
+
+ /* 2 Test Ant B based on Ant A is ON */
+ if (mode == ANTTESTB) {
+ if (AntA_report >= 100) {
+ if (AntB_report > (AntA_report+1)) {
+ pDM_SWAT_Table->ANTB_ON = false;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna A\n"));
+ } else {
+ pDM_SWAT_Table->ANTB_ON = true;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Dual Antenna is A and B\n"));
+ }
+ } else {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Need to check again\n"));
+ pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
+ bResult = false;
+ }
+ } else if (mode == ANTTESTALL) {
+ /* 2 Test Ant A and B based on DPDT Open */
+ if ((AntO_report >= 100) & (AntO_report < 118)) {
+ if (AntA_report > (AntO_report+1)) {
+ pDM_SWAT_Table->ANTA_ON = false;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant A is OFF"));
+ } else {
+ pDM_SWAT_Table->ANTA_ON = true;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant A is ON"));
+ }
+
+ if (AntB_report > (AntO_report+2)) {
+ pDM_SWAT_Table->ANTB_ON = false;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant B is OFF"));
+ } else {
+ pDM_SWAT_Table->ANTB_ON = true;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant B is ON"));
+ }
+ }
+ } else {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Need to check again\n"));
+ pDM_SWAT_Table->ANTA_ON = true; /* Set Antenna A on as default */
+ pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
+ bResult = false;
+ }
+ return bResult;
+}
+
+/* Justin: According to the current RRSI to adjust Response Frame TX power, 2012/11/05 */
+void odm_dtc(struct dm_odm_t *pDM_Odm)
+{
+}
diff --git a/drivers/staging/rtl8723au/hal/odm_HWConfig.c b/drivers/staging/rtl8723au/hal/odm_HWConfig.c
new file mode 100644
index 000000000000..72441709697e
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/odm_HWConfig.c
@@ -0,0 +1,481 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+/* */
+/* include files */
+/* */
+
+#include "odm_precomp.h"
+
+#define READ_AND_CONFIG READ_AND_CONFIG_MP
+
+#define READ_AND_CONFIG_MP(ic, txt) (ODM_ReadAndConfig##txt##ic(pDM_Odm))
+#define READ_AND_CONFIG_TC(ic, txt) (ODM_ReadAndConfig_TC##txt##ic(pDM_Odm))
+
+static u8 odm_QueryRxPwrPercentage(s8 AntPower)
+{
+ if ((AntPower <= -100) || (AntPower >= 20))
+ return 0;
+ else if (AntPower >= 0)
+ return 100;
+ else
+ return 100 + AntPower;
+}
+
+static s32 odm_SignalScaleMapping_92CSeries(struct dm_odm_t *pDM_Odm, s32 CurrSig)
+{
+ s32 RetSig = 0;
+
+ if ((pDM_Odm->SupportInterface == ODM_ITRF_USB) || (pDM_Odm->SupportInterface == ODM_ITRF_SDIO)) {
+ if (CurrSig >= 51 && CurrSig <= 100)
+ RetSig = 100;
+ else if (CurrSig >= 41 && CurrSig <= 50)
+ RetSig = 80 + ((CurrSig - 40)*2);
+ else if (CurrSig >= 31 && CurrSig <= 40)
+ RetSig = 66 + (CurrSig - 30);
+ else if (CurrSig >= 21 && CurrSig <= 30)
+ RetSig = 54 + (CurrSig - 20);
+ else if (CurrSig >= 10 && CurrSig <= 20)
+ RetSig = 42 + (((CurrSig - 10) * 2) / 3);
+ else if (CurrSig >= 5 && CurrSig <= 9)
+ RetSig = 22 + (((CurrSig - 5) * 3) / 2);
+ else if (CurrSig >= 1 && CurrSig <= 4)
+ RetSig = 6 + (((CurrSig - 1) * 3) / 2);
+ else
+ RetSig = CurrSig;
+ }
+ return RetSig;
+}
+
+static s32 odm_SignalScaleMapping(struct dm_odm_t *pDM_Odm, s32 CurrSig)
+{
+ return odm_SignalScaleMapping_92CSeries(pDM_Odm, CurrSig);
+}
+
+static u8
+odm_EVMdbToPercentage(
+ s8 Value
+ )
+{
+ /* */
+ /* -33dB~0dB to 0%~99% */
+ /* */
+ s8 ret_val;
+
+ ret_val = Value;
+
+ if (ret_val >= 0)
+ ret_val = 0;
+ if (ret_val <= -33)
+ ret_val = -33;
+
+ ret_val = 0 - ret_val;
+ ret_val *= 3;
+
+ if (ret_val == 99)
+ ret_val = 100;
+
+ return ret_val;
+}
+
+static void odm_RxPhyStatus92CSeries_Parsing(struct dm_odm_t *pDM_Odm,
+ struct odm_phy_info *pPhyInfo,
+ u8 *pPhyStatus,
+ struct odm_packet_info *pPktinfo)
+{
+ struct phy_status_rpt *pPhyStaRpt = (struct phy_status_rpt *)pPhyStatus;
+ u8 i, Max_spatial_stream;
+ s8 rx_pwr[4], rx_pwr_all = 0;
+ u8 EVM, PWDB_ALL = 0, PWDB_ALL_BT;
+ u8 RSSI, total_rssi = 0;
+ u8 isCCKrate = 0;
+ u8 rf_rx_num = 0;
+ u8 cck_highpwr = 0;
+
+ isCCKrate = (pPktinfo->Rate <= DESC92C_RATE11M) ? true : false;
+ pPhyInfo->RxMIMOSignalQuality[RF_PATH_A] = -1;
+ pPhyInfo->RxMIMOSignalQuality[RF_PATH_B] = -1;
+
+ if (isCCKrate) {
+ u8 report;
+ u8 cck_agc_rpt;
+
+ pDM_Odm->PhyDbgInfo.NumQryPhyStatusCCK++;
+ /* (1)Hardware does not provide RSSI for CCK */
+ /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+
+ cck_highpwr = pDM_Odm->bCckHighPower;
+
+ cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a ;
+
+ /* The RSSI formula should be modified according to the gain table */
+ if (!cck_highpwr) {
+ report = (cck_agc_rpt & 0xc0)>>6;
+ switch (report) {
+ /* Modify the RF RNA gain value to -40, -20, -2, 14 by Jenyu's suggestion */
+ /* Note: different RF with the different RNA gain. */
+ case 0x3:
+ rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+ break;
+ }
+ } else {
+ report = (cck_agc_rpt & 0x60)>>5;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f)<<1) ;
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f)<<1);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f)<<1) ;
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f)<<1) ;
+ break;
+ }
+ }
+
+ PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+
+ /* Modification for ext-LNA board */
+ if (pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) {
+ if ((cck_agc_rpt>>7) == 0) {
+ PWDB_ALL = (PWDB_ALL > 94) ? 100 : (PWDB_ALL+6);
+ } else {
+ if (PWDB_ALL > 38)
+ PWDB_ALL -= 16;
+ else
+ PWDB_ALL = (PWDB_ALL <= 16) ? (PWDB_ALL>>2) : (PWDB_ALL-12);
+ }
+
+ /* CCK modification */
+ if (PWDB_ALL > 25 && PWDB_ALL <= 60)
+ PWDB_ALL += 6;
+ } else { /* Modification for int-LNA board */
+ if (PWDB_ALL > 99)
+ PWDB_ALL -= 8;
+ else if (PWDB_ALL > 50 && PWDB_ALL <= 68)
+ PWDB_ALL += 4;
+ }
+ pPhyInfo->RxPWDBAll = PWDB_ALL;
+ pPhyInfo->BTRxRSSIPercentage = PWDB_ALL;
+ pPhyInfo->RecvSignalPower = rx_pwr_all;
+ /* (3) Get Signal Quality (EVM) */
+ if (pPktinfo->bPacketMatchBSSID) {
+ u8 SQ, SQ_rpt;
+
+ SQ_rpt = pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all;
+
+ if (SQ_rpt > 64)
+ SQ = 0;
+ else if (SQ_rpt < 20)
+ SQ = 100;
+ else
+ SQ = ((64-SQ_rpt) * 100) / 44;
+
+ pPhyInfo->SignalQuality = SQ;
+ pPhyInfo->RxMIMOSignalQuality[RF_PATH_A] = SQ;
+ pPhyInfo->RxMIMOSignalQuality[RF_PATH_B] = -1;
+ }
+ } else { /* is OFDM rate */
+ pDM_Odm->PhyDbgInfo.NumQryPhyStatusOFDM++;
+
+ /* (1)Get RSSI for HT rate */
+
+ for (i = RF_PATH_A; i < RF_PATH_MAX; i++) {
+ /* 2008/01/30 MH we will judge RF RX path now. */
+ if (pDM_Odm->RFPathRxEnable & BIT(i))
+ rf_rx_num++;
+
+ rx_pwr[i] = ((pPhyStaRpt->path_agc[i].gain & 0x3F)*2) - 110;
+
+ pPhyInfo->RxPwr[i] = rx_pwr[i];
+
+ /* Translate DBM to percentage. */
+ RSSI = odm_QueryRxPwrPercentage(rx_pwr[i]);
+ total_rssi += RSSI;
+
+ /* Modification for ext-LNA board */
+ if (pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) {
+ if ((pPhyStaRpt->path_agc[i].trsw) == 1)
+ RSSI = (RSSI > 94) ? 100 : (RSSI+6);
+ else
+ RSSI = (RSSI <= 16) ? (RSSI>>3) : (RSSI-16);
+
+ if ((RSSI <= 34) && (RSSI >= 4))
+ RSSI -= 4;
+ }
+
+ pPhyInfo->RxMIMOSignalStrength[i] = (u8) RSSI;
+
+ /* Get Rx snr value in DB */
+ pPhyInfo->RxSNR[i] = pDM_Odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
+ }
+
+ /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f)-110;
+
+ PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ PWDB_ALL_BT = PWDB_ALL;
+
+ pPhyInfo->RxPWDBAll = PWDB_ALL;
+ pPhyInfo->BTRxRSSIPercentage = PWDB_ALL_BT;
+ pPhyInfo->RxPower = rx_pwr_all;
+ pPhyInfo->RecvSignalPower = rx_pwr_all;
+
+ /* (3)EVM of HT rate */
+ if (pPktinfo->Rate >= DESC92C_RATEMCS8 && pPktinfo->Rate <= DESC92C_RATEMCS15)
+ Max_spatial_stream = 2; /* both spatial stream make sense */
+ else
+ Max_spatial_stream = 1; /* only spatial stream 1 makes sense */
+
+ for (i = 0; i < Max_spatial_stream; i++) {
+ /* Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment */
+ /* fill most significant bit to "zero" when doing shifting operation which may change a negative */
+ /* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
+ EVM = odm_EVMdbToPercentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
+
+ if (pPktinfo->bPacketMatchBSSID) {
+ if (i == RF_PATH_A) {
+ /* Fill value in RFD, Get the first spatial stream only */
+ pPhyInfo->SignalQuality = (u8)(EVM & 0xff);
+ }
+ pPhyInfo->RxMIMOSignalQuality[i] = (u8)(EVM & 0xff);
+ }
+ }
+ }
+ /* UI BSS List signal strength(in percentage), make it good looking, from 0~100. */
+ /* It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp(). */
+ if (isCCKrate) {
+ pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(pDM_Odm, PWDB_ALL));/* PWDB_ALL; */
+ } else {
+ if (rf_rx_num != 0)
+ pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(pDM_Odm, total_rssi /= rf_rx_num));
+ }
+}
+
+void odm_Init_RSSIForDM23a(struct dm_odm_t *pDM_Odm)
+{
+}
+
+static void odm_Process_RSSIForDM(struct dm_odm_t *pDM_Odm,
+ struct odm_phy_info *pPhyInfo,
+ struct odm_packet_info *pPktinfo)
+{
+ s32 UndecoratedSmoothedPWDB, UndecoratedSmoothedCCK;
+ s32 UndecoratedSmoothedOFDM, RSSI_Ave;
+ u8 isCCKrate = 0;
+ u8 RSSI_max, RSSI_min, i;
+ u32 OFDM_pkt = 0;
+ u32 Weighting = 0;
+ struct sta_info *pEntry;
+
+ if (pPktinfo->StationID == 0xFF)
+ return;
+
+ pEntry = pDM_Odm->pODM_StaInfo[pPktinfo->StationID];
+ if (!IS_STA_VALID(pEntry))
+ return;
+ if ((!pPktinfo->bPacketMatchBSSID))
+ return;
+
+ isCCKrate = (pPktinfo->Rate <= DESC92C_RATE11M) ? true : false;
+
+ /* Smart Antenna Debug Message------------------*/
+
+ UndecoratedSmoothedCCK = pEntry->rssi_stat.UndecoratedSmoothedCCK;
+ UndecoratedSmoothedOFDM = pEntry->rssi_stat.UndecoratedSmoothedOFDM;
+ UndecoratedSmoothedPWDB = pEntry->rssi_stat.UndecoratedSmoothedPWDB;
+
+ if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
+ if (!isCCKrate) { /* ofdm rate */
+ if (pPhyInfo->RxMIMOSignalStrength[RF_PATH_B] == 0) {
+ RSSI_Ave = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
+ } else {
+ if (pPhyInfo->RxMIMOSignalStrength[RF_PATH_A] > pPhyInfo->RxMIMOSignalStrength[RF_PATH_B]) {
+ RSSI_max = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
+ RSSI_min = pPhyInfo->RxMIMOSignalStrength[RF_PATH_B];
+ } else {
+ RSSI_max = pPhyInfo->RxMIMOSignalStrength[RF_PATH_B];
+ RSSI_min = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
+ }
+ if ((RSSI_max - RSSI_min) < 3)
+ RSSI_Ave = RSSI_max;
+ else if ((RSSI_max - RSSI_min) < 6)
+ RSSI_Ave = RSSI_max - 1;
+ else if ((RSSI_max - RSSI_min) < 10)
+ RSSI_Ave = RSSI_max - 2;
+ else
+ RSSI_Ave = RSSI_max - 3;
+ }
+
+ /* 1 Process OFDM RSSI */
+ if (UndecoratedSmoothedOFDM <= 0) {
+ /* initialize */
+ UndecoratedSmoothedOFDM = pPhyInfo->RxPWDBAll;
+ } else {
+ if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedOFDM) {
+ UndecoratedSmoothedOFDM =
+ (((UndecoratedSmoothedOFDM)*(Rx_Smooth_Factor-1)) +
+ (RSSI_Ave)) / (Rx_Smooth_Factor);
+ UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM + 1;
+ } else {
+ UndecoratedSmoothedOFDM =
+ (((UndecoratedSmoothedOFDM)*(Rx_Smooth_Factor-1)) +
+ (RSSI_Ave)) / (Rx_Smooth_Factor);
+ }
+ }
+ pEntry->rssi_stat.PacketMap = (pEntry->rssi_stat.PacketMap<<1) | BIT0;
+ } else {
+ RSSI_Ave = pPhyInfo->RxPWDBAll;
+
+ /* 1 Process CCK RSSI */
+ if (UndecoratedSmoothedCCK <= 0) {
+ /* initialize */
+ UndecoratedSmoothedCCK = pPhyInfo->RxPWDBAll;
+ } else {
+ if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedCCK) {
+ UndecoratedSmoothedCCK =
+ (((UndecoratedSmoothedCCK)*(Rx_Smooth_Factor-1)) +
+ (pPhyInfo->RxPWDBAll)) / (Rx_Smooth_Factor);
+ UndecoratedSmoothedCCK = UndecoratedSmoothedCCK + 1;
+ } else {
+ UndecoratedSmoothedCCK =
+ (((UndecoratedSmoothedCCK)*(Rx_Smooth_Factor-1)) +
+ (pPhyInfo->RxPWDBAll)) / (Rx_Smooth_Factor);
+ }
+ }
+ pEntry->rssi_stat.PacketMap = pEntry->rssi_stat.PacketMap<<1;
+ }
+
+ /* 2011.07.28 LukeLee: modified to prevent unstable CCK RSSI */
+ if (pEntry->rssi_stat.ValidBit >= 64)
+ pEntry->rssi_stat.ValidBit = 64;
+ else
+ pEntry->rssi_stat.ValidBit++;
+
+ for (i = 0; i < pEntry->rssi_stat.ValidBit; i++)
+ OFDM_pkt += (u8)(pEntry->rssi_stat.PacketMap>>i)&BIT0;
+
+ if (pEntry->rssi_stat.ValidBit == 64) {
+ Weighting = ((OFDM_pkt<<4) > 64)?64:(OFDM_pkt<<4);
+ UndecoratedSmoothedPWDB = (Weighting*UndecoratedSmoothedOFDM+(64-Weighting)*UndecoratedSmoothedCCK)>>6;
+ } else {
+ if (pEntry->rssi_stat.ValidBit != 0)
+ UndecoratedSmoothedPWDB = (OFDM_pkt*UndecoratedSmoothedOFDM+(pEntry->rssi_stat.ValidBit-OFDM_pkt)*UndecoratedSmoothedCCK)/pEntry->rssi_stat.ValidBit;
+ else
+ UndecoratedSmoothedPWDB = 0;
+ }
+ pEntry->rssi_stat.UndecoratedSmoothedCCK = UndecoratedSmoothedCCK;
+ pEntry->rssi_stat.UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM;
+ pEntry->rssi_stat.UndecoratedSmoothedPWDB = UndecoratedSmoothedPWDB;
+ }
+}
+
+/* Endianness before calling this API */
+static void ODM_PhyStatusQuery23a_92CSeries(struct dm_odm_t *pDM_Odm,
+ struct odm_phy_info *pPhyInfo,
+ u8 *pPhyStatus,
+ struct odm_packet_info *pPktinfo)
+{
+ odm_RxPhyStatus92CSeries_Parsing(pDM_Odm, pPhyInfo,
+ pPhyStatus, pPktinfo);
+ if (pDM_Odm->RSSI_test) {
+ /* Select the packets to do RSSI checking for antenna switching. */
+ if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon)
+ ODM_SwAntDivChkPerPktRssi(pDM_Odm, pPktinfo->StationID, pPhyInfo);
+ } else {
+ odm_Process_RSSIForDM(pDM_Odm, pPhyInfo, pPktinfo);
+ }
+}
+
+void ODM_PhyStatusQuery23a(struct dm_odm_t *pDM_Odm, struct odm_phy_info *pPhyInfo,
+ u8 *pPhyStatus, struct odm_packet_info *pPktinfo)
+{
+ ODM_PhyStatusQuery23a_92CSeries(pDM_Odm, pPhyInfo, pPhyStatus, pPktinfo);
+}
+
+/* For future use. */
+void ODM_MacStatusQuery23a(struct dm_odm_t *pDM_Odm, u8 *pMacStatus, u8 MacID,
+ bool bPacketMatchBSSID, bool bPacketToSelf,
+ bool bPacketBeacon)
+{
+ /* 2011/10/19 Driver team will handle in the future. */
+
+}
+
+enum hal_status
+ODM_ConfigRFWithHeaderFile23a(
+ struct dm_odm_t *pDM_Odm,
+ enum RF_RADIO_PATH Content,
+ enum RF_RADIO_PATH eRFPath
+ )
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("===>ODM_ConfigRFWithHeaderFile23a\n"));
+ if (pDM_Odm->SupportICType == ODM_RTL8723A) {
+ if (eRFPath == RF_PATH_A)
+ READ_AND_CONFIG_MP(8723A, _RadioA_1T_);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ (" ===> ODM_ConfigRFWithHeaderFile23a() Radio_A:Rtl8723RadioA_1TArray\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ (" ===> ODM_ConfigRFWithHeaderFile23a() Radio_B:Rtl8723RadioB_1TArray\n"));
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
+ ("ODM_ConfigRFWithHeaderFile23a: Radio No %x\n", eRFPath));
+ return HAL_STATUS_SUCCESS;
+}
+
+enum hal_status
+ODM_ConfigBBWithHeaderFile23a(
+ struct dm_odm_t *pDM_Odm,
+ enum odm_bb_config_type ConfigType
+ )
+{
+ if (pDM_Odm->SupportICType == ODM_RTL8723A) {
+ if (ConfigType == CONFIG_BB_PHY_REG)
+ READ_AND_CONFIG_MP(8723A, _PHY_REG_1T_);
+ else if (ConfigType == CONFIG_BB_AGC_TAB)
+ READ_AND_CONFIG_MP(8723A, _AGC_TAB_1T_);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ (" ===> phy_ConfigBBWithHeaderFile() phy:Rtl8723AGCTAB_1TArray\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ (" ===> phy_ConfigBBWithHeaderFile() agc:Rtl8723PHY_REG_1TArray\n"));
+ }
+ return HAL_STATUS_SUCCESS;
+}
+
+enum hal_status
+ODM_ConfigMACWithHeaderFile23a(
+ struct dm_odm_t *pDM_Odm
+ )
+{
+ u8 result = HAL_STATUS_SUCCESS;
+
+ if (pDM_Odm->SupportICType == ODM_RTL8723A)
+ READ_AND_CONFIG_MP(8723A, _MAC_REG_);
+ return result;
+}
diff --git a/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c b/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c
new file mode 100644
index 000000000000..d076e14f36b9
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c
@@ -0,0 +1,162 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+void
+odm_ConfigRFReg_8723A(
+ struct dm_odm_t *pDM_Odm,
+ u32 Addr,
+ u32 Data,
+ enum RF_RADIO_PATH RF_PATH,
+ u32 RegAddr
+ )
+{
+ if (Addr == 0xfe) {
+ msleep(50);
+ } else if (Addr == 0xfd) {
+ mdelay(5);
+ } else if (Addr == 0xfc) {
+ mdelay(1);
+ } else if (Addr == 0xfb) {
+ udelay(50);
+ } else if (Addr == 0xfa) {
+ udelay(5);
+ } else if (Addr == 0xf9) {
+ udelay(1);
+ } else {
+ ODM_SetRFReg(pDM_Odm, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+ }
+}
+
+void odm_ConfigRF_RadioA_8723A(struct dm_odm_t *pDM_Odm,
+ u32 Addr,
+ u32 Data
+ )
+{
+ u32 content = 0x1000; /* RF_Content: radioa_txt */
+ u32 maskforPhySet = (u32)(content&0xE000);
+
+ odm_ConfigRFReg_8723A(pDM_Odm, Addr, Data, RF_PATH_A,
+ Addr|maskforPhySet);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("===> ODM_ConfigRFWithHeaderFile23a: [RadioA] %08X %08X\n",
+ Addr, Data));
+}
+
+void odm_ConfigRF_RadioB_8723A(struct dm_odm_t *pDM_Odm,
+ u32 Addr,
+ u32 Data
+ )
+{
+ u32 content = 0x1001; /* RF_Content: radiob_txt */
+ u32 maskforPhySet = (u32)(content&0xE000);
+
+ odm_ConfigRFReg_8723A(pDM_Odm, Addr, Data, RF_PATH_B,
+ Addr|maskforPhySet);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("===> ODM_ConfigRFWithHeaderFile23a: [RadioB] %08X %08X\n",
+ Addr, Data));
+}
+
+void odm_ConfigMAC_8723A(struct dm_odm_t *pDM_Odm,
+ u32 Addr,
+ u8 Data
+ )
+{
+ ODM_Write1Byte(pDM_Odm, Addr, Data);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("===> ODM_ConfigMACWithHeaderFile23a: [MAC_REG] %08X %08X\n",
+ Addr, Data));
+}
+
+void
+odm_ConfigBB_AGC_8723A(
+ struct dm_odm_t *pDM_Odm,
+ u32 Addr,
+ u32 Bitmask,
+ u32 Data
+ )
+{
+ ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("===> ODM_ConfigBBWithHeaderFile23a: [AGC_TAB] %08X %08X\n",
+ Addr, Data));
+}
+
+void
+odm_ConfigBB_PHY_REG_PG_8723A(
+ struct dm_odm_t *pDM_Odm,
+ u32 Addr,
+ u32 Bitmask,
+ u32 Data
+ )
+{
+ if (Addr == 0xfe)
+ msleep(50);
+ else if (Addr == 0xfd)
+ mdelay(5);
+ else if (Addr == 0xfc)
+ mdelay(1);
+ else if (Addr == 0xfb)
+ udelay(50);
+ else if (Addr == 0xfa)
+ udelay(5);
+ else if (Addr == 0xf9)
+ udelay(1);
+ /* TODO: ODM_StorePwrIndexDiffRateOffset(...) */
+ /* storePwrIndexDiffRateOffset(Adapter, Addr, Bitmask, Data); */
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("===> ODM_ConfigBBWithHeaderFile23a: [PHY_REG] %08X %08X %08X\n",
+ Addr, Bitmask, Data));
+}
+
+void
+odm_ConfigBB_PHY_8723A(
+ struct dm_odm_t *pDM_Odm,
+ u32 Addr,
+ u32 Bitmask,
+ u32 Data
+ )
+{
+ if (Addr == 0xfe)
+ msleep(50);
+ else if (Addr == 0xfd)
+ mdelay(5);
+ else if (Addr == 0xfc)
+ mdelay(1);
+ else if (Addr == 0xfb)
+ udelay(50);
+ else if (Addr == 0xfa)
+ udelay(5);
+ else if (Addr == 0xf9)
+ udelay(1);
+ else if (Addr == 0xa24)
+ pDM_Odm->RFCalibrateInfo.RegA24 = Data;
+ ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("===> ODM_ConfigBBWithHeaderFile23a: [PHY_REG] %08X %08X\n",
+ Addr, Data));
+}
diff --git a/drivers/staging/rtl8723au/hal/odm_debug.c b/drivers/staging/rtl8723au/hal/odm_debug.c
new file mode 100644
index 000000000000..c912ab89bc3e
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/odm_debug.c
@@ -0,0 +1,24 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+void ODM_InitDebugSetting23a(struct dm_odm_t *pDM_Odm)
+{
+ pDM_Odm->DebugLevel = ODM_DBG_TRACE;
+ pDM_Odm->DebugComponents = 0;
+}
+
+u32 GlobalDebugLevel23A;
diff --git a/drivers/staging/rtl8723au/hal/odm_interface.c b/drivers/staging/rtl8723au/hal/odm_interface.c
new file mode 100644
index 000000000000..bef1269749d0
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/odm_interface.c
@@ -0,0 +1,236 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+/* */
+/* include files */
+/* */
+
+#include "odm_precomp.h"
+/* */
+/* ODM IO Relative API. */
+/* */
+
+u8 ODM_Read1Byte(struct dm_odm_t *pDM_Odm,
+ u32 RegAddr
+ )
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ return rtw_read8(Adapter, RegAddr);
+}
+
+u16 ODM_Read2Byte(struct dm_odm_t *pDM_Odm,
+ u32 RegAddr
+ )
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ return rtw_read16(Adapter, RegAddr);
+}
+
+u32 ODM_Read4Byte(struct dm_odm_t *pDM_Odm,
+ u32 RegAddr
+ )
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ return rtw_read32(Adapter, RegAddr);
+}
+
+void ODM_Write1Byte(
+ struct dm_odm_t *pDM_Odm,
+ u32 RegAddr,
+ u8 Data
+ )
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ rtw_write8(Adapter, RegAddr, Data);
+}
+
+void ODM_Write2Byte(
+ struct dm_odm_t *pDM_Odm,
+ u32 RegAddr,
+ u16 Data
+ )
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ rtw_write16(Adapter, RegAddr, Data);
+}
+
+void ODM_Write4Byte(
+ struct dm_odm_t *pDM_Odm,
+ u32 RegAddr,
+ u32 Data
+ )
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ rtw_write32(Adapter, RegAddr, Data);
+
+}
+
+void ODM_SetMACReg(
+ struct dm_odm_t *pDM_Odm,
+ u32 RegAddr,
+ u32 BitMask,
+ u32 Data
+ )
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
+}
+
+u32 ODM_GetMACReg(
+ struct dm_odm_t *pDM_Odm,
+ u32 RegAddr,
+ u32 BitMask
+ )
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
+}
+
+void ODM_SetBBReg(
+ struct dm_odm_t *pDM_Odm,
+ u32 RegAddr,
+ u32 BitMask,
+ u32 Data
+ )
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
+}
+
+u32 ODM_GetBBReg(
+ struct dm_odm_t *pDM_Odm,
+ u32 RegAddr,
+ u32 BitMask
+ )
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
+}
+
+void ODM_SetRFReg(
+ struct dm_odm_t *pDM_Odm,
+ enum RF_RADIO_PATH eRFPath,
+ u32 RegAddr,
+ u32 BitMask,
+ u32 Data
+ )
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ PHY_SetRFReg(Adapter, eRFPath, RegAddr, BitMask, Data);
+}
+
+u32 ODM_GetRFReg(
+ struct dm_odm_t *pDM_Odm,
+ enum RF_RADIO_PATH eRFPath,
+ u32 RegAddr,
+ u32 BitMask
+ )
+{
+ struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
+ return PHY_QueryRFReg(Adapter, eRFPath, RegAddr, BitMask);
+}
+
+/* */
+/* ODM Memory relative API. */
+/* */
+void ODM_AllocateMemory(
+ struct dm_odm_t *pDM_Odm,
+ void **pPtr,
+ u32 length
+ )
+{
+ *pPtr = rtw_zvmalloc(length);
+}
+
+/* length could be ignored, used to detect memory leakage. */
+void ODM_FreeMemory(
+ struct dm_odm_t *pDM_Odm,
+ void *pPtr,
+ u32 length
+ )
+{
+ rtw_vmfree(pPtr, length);
+}
+
+/* */
+/* ODM MISC relative API. */
+/* */
+void
+ODM_AcquireSpinLock(
+ struct dm_odm_t *pDM_Odm,
+ enum rt_spinlock_type type
+ )
+{
+}
+
+void ODM_ReleaseSpinLock(
+ struct dm_odm_t *pDM_Odm,
+ enum rt_spinlock_type type
+ )
+{
+}
+
+/* */
+/* Work item relative API. FOr MP driver only~! */
+/* */
+void ODM_InitializeWorkItem(
+ struct dm_odm_t *pDM_Odm,
+ void *pRtWorkItem,
+ RT_WORKITEM_CALL_BACK RtWorkItemCallback,
+ void *pContext,
+ const char *szID
+ )
+{
+}
+
+/* */
+/* ODM Timer relative API. */
+/* */
+void ODM_SetTimer(struct dm_odm_t *pDM_Odm, struct timer_list *pTimer, u32 msDelay)
+{
+ mod_timer(pTimer, jiffies + msecs_to_jiffies(msDelay)); /* ms */
+}
+
+void ODM_ReleaseTimer(struct dm_odm_t *pDM_Odm, struct timer_list *pTimer)
+{
+}
+
+/* */
+/* ODM FW relative API. */
+/* */
+u32 ODM_FillH2CCmd(
+ u8 *pH2CBuffer,
+ u32 H2CBufferLen,
+ u32 CmdNum,
+ u32 *pElementID,
+ u32 *pCmdLen,
+ u8 **pCmbBuffer,
+ u8 *CmdStartSeq
+ )
+{
+ return true;
+}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
new file mode 100644
index 000000000000..9d738d79de4b
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
@@ -0,0 +1,11304 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ *published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#include <drv_types.h>
+#include <rtl8723a_hal.h>
+#include <rtw_ioctl_set.h>
+
+#define DIS_PS_RX_BCN
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+
+u32 BTCoexDbgLevel = _bt_dbg_off_;
+
+#define RTPRINT(_Comp, _Level, Fmt)\
+do {\
+ if ((BTCoexDbgLevel == _bt_dbg_on_)) {\
+ printk Fmt;\
+ } \
+} while (0)
+
+#define RTPRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr)\
+if ((BTCoexDbgLevel == _bt_dbg_on_)) {\
+ u32 __i; \
+ u8 *ptr = (u8 *)_Ptr; \
+ printk printstr; \
+ printk(" "); \
+ for (__i = 0; __i < 6; __i++) \
+ printk("%02X%s", ptr[__i], (__i == 5)?"":"-"); \
+ printk("\n"); \
+}
+#define RTPRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen)\
+if ((BTCoexDbgLevel == _bt_dbg_on_)) {\
+ u32 __i; \
+ u8 *ptr = (u8 *)_HexData; \
+ printk(_TitleString); \
+ for (__i = 0; __i < (u32)_HexDataLen; __i++) { \
+ printk("%02X%s", ptr[__i], (((__i + 1) % 4) == 0)?" ":" ");\
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ printk("\n"); \
+}
+/* Added by Annie, 2005-11-22. */
+#define MAX_STR_LEN 64
+/* I want to see ASCII 33 to 126 only. Otherwise, I print '?'. */
+#define PRINTABLE(_ch) (_ch >= ' ' && _ch <= '~')
+#define RT_PRINT_STR(_Comp, _Level, _TitleString, _Ptr, _Len) \
+ { \
+ u32 __i; \
+ u8 buffer[MAX_STR_LEN]; \
+ u32 length = (_Len < MAX_STR_LEN) ? _Len : (MAX_STR_LEN-1);\
+ memset(buffer, 0, MAX_STR_LEN); \
+ memcpy(buffer, (u8 *)_Ptr, length); \
+ for (__i = 0; __i < length; __i++) { \
+ if (!PRINTABLE(buffer[__i])) \
+ buffer[__i] = '?'; \
+ } \
+ buffer[length] = '\0'; \
+ printk(_TitleString); \
+ printk(": %d, <%s>\n", _Len, buffer); \
+ }
+#endif
+
+#define DCMD_Printf(...)
+#define RT_ASSERT(...)
+
+#define rsprintf snprintf
+
+#define GetDefaultAdapter(padapter) padapter
+
+#define PlatformZeroMemory(ptr, sz) memset(ptr, 0, sz)
+
+#define PlatformProcessHCICommands(...)
+#define PlatformTxBTQueuedPackets(...)
+#define PlatformIndicateBTACLData(...) (RT_STATUS_SUCCESS)
+#define PlatformAcquireSpinLock(padapter, type)
+#define PlatformReleaseSpinLock(padapter, type)
+
+#define GET_UNDECORATED_AVERAGE_RSSI(padapter) \
+ (GET_HAL_DATA(padapter)->dmpriv.EntryMinUndecoratedSmoothedPWDB)
+#define RT_RF_CHANGE_SOURCE u32
+
+enum {
+ RT_JOIN_INFRA = 1,
+ RT_JOIN_IBSS = 2,
+ RT_START_IBSS = 3,
+ RT_NO_ACTION = 4,
+};
+
+/* power saving */
+
+#ifdef __BT_C__ /* COMMOM/BT.c */
+/* ===== Below this line is sync from SD7 driver COMMOM/BT.c ===== */
+
+static u8 BT_Operation(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pBtMgnt->BtOperationOn)
+ return true;
+ else
+ return false;
+}
+
+static u8 BT_IsLegalChannel(struct rtw_adapter *padapter, u8 channel)
+{
+ struct rt_channel_info *pChanneList = NULL;
+ u8 channelLen, i;
+
+ pChanneList = padapter->mlmeextpriv.channel_set;
+ channelLen = padapter->mlmeextpriv.max_chan_nums;
+
+ for (i = 0; i < channelLen; i++) {
+ RTPRINT(FIOCTL, IOCTL_STATE,
+ ("Check if chnl(%d) in channel plan contains bt target chnl(%d) for BT connection\n",
+ pChanneList[i].ChannelNum, channel));
+ if ((channel == pChanneList[i].ChannelNum) ||
+ (channel == pChanneList[i].ChannelNum + 2))
+ return channel;
+ }
+ return 0;
+}
+
+void BT_SignalCompensation(struct rtw_adapter *padapter, u8 *rssi_wifi, u8 *rssi_bt)
+{
+ BTDM_SignalCompensation(padapter, rssi_wifi, rssi_bt);
+}
+
+void BT_WifiScanNotify(struct rtw_adapter *padapter, u8 scanType)
+{
+ BTHCI_WifiScanNotify(padapter, scanType);
+ BTDM_CheckAntSelMode(padapter);
+ BTDM_WifiScanNotify(padapter, scanType);
+}
+
+void BT_WifiAssociateNotify(struct rtw_adapter *padapter, u8 action)
+{
+ /* action : */
+ /* true = associate start */
+ /* false = associate finished */
+ if (action)
+ BTDM_CheckAntSelMode(padapter);
+
+ BTDM_WifiAssociateNotify(padapter, action);
+}
+
+void BT_WifiMediaStatusNotify(struct rtw_adapter *padapter, enum rt_media_status mstatus)
+{
+ BTDM_MediaStatusNotify(padapter, mstatus);
+}
+
+void BT_SpecialPacketNotify(struct rtw_adapter *padapter)
+{
+ BTDM_ForDhcp(padapter);
+}
+
+void BT_HaltProcess(struct rtw_adapter *padapter)
+{
+ BTDM_ForHalt(padapter);
+}
+
+void BT_LpsLeave(struct rtw_adapter *padapter)
+{
+ BTDM_LpsLeave(padapter);
+}
+
+/* ===== End of sync from SD7 driver COMMOM/BT.c ===== */
+#endif
+
+#ifdef __BT_HANDLEPACKET_C__ /* COMMOM/bt_handlepacket.c */
+/* ===== Below this line is sync from SD7 driver COMMOM/bt_handlepacket.c ===== */
+
+/* ===== End of sync from SD7 driver COMMOM/bt_handlepacket.c ===== */
+#endif
+
+#ifdef __BT_HCI_C__ /* COMMOM/bt_hci.c */
+
+#define i64fmt "ll"
+#define UINT64_C(v) (v)
+
+#define FillOctetString(_os, _octet, _len) \
+ (_os).Octet = (u8 *)(_octet); \
+ (_os).Length = (_len);
+
+static enum rt_status PlatformIndicateBTEvent(
+ struct rtw_adapter *padapter,
+ void *pEvntData,
+ u32 dataLen
+ )
+{
+ enum rt_status rt_status = RT_STATUS_FAILURE;
+
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT_DETAIL, ("BT event start, %d bytes data to Transferred!!\n", dataLen));
+ RTPRINT_DATA(FIOCTL, IOCTL_BT_EVENT_DETAIL, "To transfer Hex Data :\n",
+ pEvntData, dataLen);
+
+ BT_EventParse(padapter, pEvntData, dataLen);
+
+ printk(KERN_WARNING "%s: Linux has no way to report BT event!!\n", __func__);
+
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT_DETAIL, ("BT event end, %s\n",
+ (rt_status == RT_STATUS_SUCCESS) ? "SUCCESS" : "FAIL"));
+
+ return rt_status;
+}
+
+/* ===== Below this line is sync from SD7 driver COMMOM/bt_hci.c ===== */
+
+static u8 bthci_GetLocalChannel(struct rtw_adapter *padapter)
+{
+ return padapter->mlmeextpriv.cur_channel;
+}
+
+static u8 bthci_GetCurrentEntryNum(struct rtw_adapter *padapter, u8 PhyHandle)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ u8 i;
+
+ for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++) {
+ if ((pBTInfo->BtAsocEntry[i].bUsed) &&
+ (pBTInfo->BtAsocEntry[i].PhyLinkCmdData.BtPhyLinkhandle == PhyHandle))
+ return i;
+ }
+
+ return 0xFF;
+}
+
+static void bthci_DecideBTChannel(struct rtw_adapter *padapter, u8 EntryNum)
+{
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct mlme_priv *pmlmepriv;
+ struct bt_30info *pBTInfo;
+ struct bt_mgnt *pBtMgnt;
+ struct bt_hci_info *pBtHciInfo;
+ struct chnl_txpower_triple *pTriple_subband = NULL;
+ struct common_triple *pTriple;
+ u8 i, j, localchnl, firstRemoteLegalChnlInTriplet = 0;
+ u8 regulatory_skipLen = 0;
+ u8 subbandTripletCnt = 0;
+
+ pmlmepriv = &padapter->mlmepriv;
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+ pBtHciInfo = &pBTInfo->BtHciInfo;
+
+ pBtMgnt->CheckChnlIsSuit = true;
+ localchnl = bthci_GetLocalChannel(padapter);
+
+ pTriple = (struct common_triple *)
+ &pBtHciInfo->BTPreChnllist[COUNTRY_STR_LEN];
+
+ /* contains country string, len is 3 */
+ for (i = 0; i < (pBtHciInfo->BtPreChnlListLen-COUNTRY_STR_LEN); i += 3, pTriple++) {
+ /* */
+ /* check every triplet, an triplet may be */
+ /* regulatory extension identifier or sub-band triplet */
+ /* */
+ if (pTriple->byte_1st == 0xc9) {
+ /* Regulatory Extension Identifier, skip it */
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO),
+ ("Find Regulatory ID, regulatory class = %d\n", pTriple->byte_2nd));
+ regulatory_skipLen += 3;
+ pTriple_subband = NULL;
+ continue;
+ } else { /* Sub-band triplet */
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Find Sub-band triplet \n"));
+ subbandTripletCnt++;
+ pTriple_subband = (struct chnl_txpower_triple *)pTriple;
+ /* if remote first legal channel not found, then find first remote channel */
+ /* and it's legal for our channel plan. */
+
+ /* search the sub-band triplet and find if remote channel is legal to our channel plan. */
+ for (j = pTriple_subband->FirstChnl; j < (pTriple_subband->FirstChnl+pTriple_subband->NumChnls); j++) {
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), (" Check if chnl(%d) is legal\n", j));
+ if (BT_IsLegalChannel(padapter, j)) {
+ /* remote channel is legal for our channel plan. */
+ firstRemoteLegalChnlInTriplet = j;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO),
+ ("Find first remote legal channel : %d\n",
+ firstRemoteLegalChnlInTriplet));
+
+ /* If we find a remote legal channel in the sub-band triplet */
+ /* and only BT connection is established(local not connect to any AP or IBSS), */
+ /* then we just switch channel to remote channel. */
+ if (!(check_fwstate(pmlmepriv, WIFI_ASOC_STATE|WIFI_ADHOC_STATE|WIFI_AP_STATE) ||
+ BTHCI_HsConnectionEstablished(padapter))) {
+ pBtMgnt->BTChannel = firstRemoteLegalChnlInTriplet;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Remote legal channel (%d) is selected, Local not connect to any!!\n", pBtMgnt->BTChannel));
+ return;
+ } else {
+ if ((localchnl >= firstRemoteLegalChnlInTriplet) &&
+ (localchnl < (pTriple_subband->FirstChnl+pTriple_subband->NumChnls))) {
+ pBtMgnt->BTChannel = localchnl;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Local channel (%d) is selected, wifi or BT connection exists\n", pBtMgnt->BTChannel));
+ return;
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ if (subbandTripletCnt) {
+ /* if any preferred channel triplet exists */
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("There are %d sub band triplet exists, ", subbandTripletCnt));
+ if (firstRemoteLegalChnlInTriplet == 0) {
+ /* no legal channel is found, reject the connection. */
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("no legal channel is found!!\n"));
+ } else {
+ /* Remote Legal channel is found but not match to local */
+ /* wifi connection exists), so reject the connection. */
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO),
+ ("Remote Legal channel is found but not match to local(wifi connection exists)!!\n"));
+ }
+ pBtMgnt->CheckChnlIsSuit = false;
+ } else {
+ /* There are not any preferred channel triplet exists */
+ /* Use current legal channel as the bt channel. */
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("No sub band triplet exists!!\n"));
+ }
+ pBtMgnt->BTChannel = localchnl;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Local channel (%d) is selected!!\n", pBtMgnt->BTChannel));
+}
+
+/* Success:return true */
+/* Fail:return false */
+static u8 bthci_GetAssocInfo(struct rtw_adapter *padapter, u8 EntryNum)
+{
+ struct bt_30info *pBTInfo;
+ struct bt_hci_info *pBtHciInfo;
+ u8 tempBuf[256];
+ u8 i = 0;
+ u8 BaseMemoryShift = 0;
+ u16 TotalLen = 0;
+ struct amp_assoc_structure *pAmpAsoc;
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("GetAssocInfo start\n"));
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtHciInfo = &pBTInfo->BtHciInfo;
+
+ if (pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar == 0) {
+ if (pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocRemLen < (MAX_AMP_ASSOC_FRAG_LEN))
+ TotalLen = pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocRemLen;
+ else if (pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocRemLen == (MAX_AMP_ASSOC_FRAG_LEN))
+ TotalLen = MAX_AMP_ASSOC_FRAG_LEN;
+ } else if (pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar > 0)
+ TotalLen = pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar;
+
+ while ((pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar >= BaseMemoryShift) || TotalLen > BaseMemoryShift) {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("GetAssocInfo, TotalLen =%d, BaseMemoryShift =%d\n", TotalLen, BaseMemoryShift));
+ memcpy(tempBuf,
+ (u8 *)pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocfragment+BaseMemoryShift,
+ TotalLen-BaseMemoryShift);
+ RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_DETAIL, "GetAssocInfo :\n",
+ tempBuf, TotalLen-BaseMemoryShift);
+
+ pAmpAsoc = (struct amp_assoc_structure *)tempBuf;
+ pAmpAsoc->Length = le16_to_cpu(pAmpAsoc->Length);
+ BaseMemoryShift += 3 + pAmpAsoc->Length;
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("TypeID = 0x%x, ", pAmpAsoc->TypeID));
+ RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD, "Hex Data: \n", pAmpAsoc->Data, pAmpAsoc->Length);
+ switch (pAmpAsoc->TypeID) {
+ case AMP_MAC_ADDR:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("==> AMP_MAC_ADDR\n"));
+ if (pAmpAsoc->Length > 6)
+ return false;
+ memcpy(pBTInfo->BtAsocEntry[EntryNum].BTRemoteMACAddr, pAmpAsoc->Data, 6);
+ RTPRINT_ADDR(FIOCTL, IOCTL_BT_HCICMD, ("Remote Mac address \n"), pBTInfo->BtAsocEntry[EntryNum].BTRemoteMACAddr);
+ break;
+ case AMP_PREFERRED_CHANNEL_LIST:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("==> AMP_PREFERRED_CHANNEL_LIST\n"));
+ pBtHciInfo->BtPreChnlListLen = pAmpAsoc->Length;
+ memcpy(pBtHciInfo->BTPreChnllist,
+ pAmpAsoc->Data,
+ pBtHciInfo->BtPreChnlListLen);
+ RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD, "Preferred channel list : \n", pBtHciInfo->BTPreChnllist, pBtHciInfo->BtPreChnlListLen);
+ bthci_DecideBTChannel(padapter, EntryNum);
+ break;
+ case AMP_CONNECTED_CHANNEL:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("==> AMP_CONNECTED_CHANNEL\n"));
+ pBtHciInfo->BTConnectChnlListLen = pAmpAsoc->Length;
+ memcpy(pBtHciInfo->BTConnectChnllist,
+ pAmpAsoc->Data,
+ pBtHciInfo->BTConnectChnlListLen);
+ break;
+ case AMP_80211_PAL_CAP_LIST:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("==> AMP_80211_PAL_CAP_LIST\n"));
+ pBTInfo->BtAsocEntry[EntryNum].BTCapability = *(u32 *)(pAmpAsoc->Data);
+ if (pBTInfo->BtAsocEntry[EntryNum].BTCapability & 0x00000001) {
+ /* TODO: */
+
+ /* Signifies PAL capable of utilizing received activity reports. */
+ }
+ if (pBTInfo->BtAsocEntry[EntryNum].BTCapability & 0x00000002) {
+ /* TODO: */
+ /* Signifies PAL is capable of utilizing scheduling information received in an activity reports. */
+ }
+ break;
+ case AMP_80211_PAL_VISION:
+ pBtHciInfo->BTPalVersion = *(u8 *)(pAmpAsoc->Data);
+ pBtHciInfo->BTPalCompanyID = *(u16 *)(((u8 *)(pAmpAsoc->Data))+1);
+ pBtHciInfo->BTPalsubversion = *(u16 *)(((u8 *)(pAmpAsoc->Data))+3);
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("==> AMP_80211_PAL_VISION PalVersion 0x%x, PalCompanyID 0x%x, Palsubversion 0x%x\n",
+ pBtHciInfo->BTPalVersion,
+ pBtHciInfo->BTPalCompanyID,
+ pBtHciInfo->BTPalsubversion));
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("==> Unsupport TypeID !!\n"));
+ break;
+ }
+ i++;
+ }
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("GetAssocInfo end\n"));
+
+ return true;
+}
+
+static u8 bthci_AddEntry(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo;
+ struct bt_mgnt *pBtMgnt;
+ u8 i;
+
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+
+ for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++) {
+ if (pBTInfo->BtAsocEntry[i].bUsed == false) {
+ pBTInfo->BtAsocEntry[i].bUsed = true;
+ pBtMgnt->CurrentConnectEntryNum = i;
+ break;
+ }
+ }
+
+ if (i == MAX_BT_ASOC_ENTRY_NUM) {
+ RTPRINT(FIOCTL, IOCTL_STATE, ("bthci_AddEntry(), Add entry fail!!\n"));
+ return false;
+ }
+ return true;
+}
+
+static u8 bthci_DiscardTxPackets(struct rtw_adapter *padapter, u16 LLH)
+{
+ return false;
+}
+
+static u8
+bthci_CheckLogLinkBehavior(
+ struct rtw_adapter *padapter,
+ struct hci_flow_spec TxFlowSpec
+ )
+{
+ u8 ID = TxFlowSpec.Identifier;
+ u8 ServiceType = TxFlowSpec.ServiceType;
+ u16 MaxSDUSize = TxFlowSpec.MaximumSDUSize;
+ u32 SDUInterArrivatime = TxFlowSpec.SDUInterArrivalTime;
+ u8 match = false;
+
+ switch (ID) {
+ case 1:
+ if (ServiceType == BT_LL_BE) {
+ match = true;
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = TX best effort flowspec\n"));
+ } else if ((ServiceType == BT_LL_GU) && (MaxSDUSize == 0xffff)) {
+ match = true;
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = RX guaranteed latency flowspec\n"));
+ } else if ((ServiceType == BT_LL_GU) && (MaxSDUSize == 2500)) {
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = RX guaranteed Large latency flowspec\n"));
+ }
+ break;
+ case 2:
+ if (ServiceType == BT_LL_BE) {
+ match = true;
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = RX best effort flowspec\n"));
+
+ }
+ break;
+ case 3:
+ if ((ServiceType == BT_LL_GU) && (MaxSDUSize == 1492)) {
+ match = true;
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = TX guaranteed latency flowspec\n"));
+ } else if ((ServiceType == BT_LL_GU) && (MaxSDUSize == 2500)) {
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = TX guaranteed Large latency flowspec\n"));
+ }
+ break;
+ case 4:
+ if (ServiceType == BT_LL_BE) {
+ if ((SDUInterArrivatime == 0xffffffff) && (ServiceType == BT_LL_BE) && (MaxSDUSize == 1492)) {
+ match = true;
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = TX/RX aggregated best effort flowspec\n"));
+ }
+ } else if (ServiceType == BT_LL_GU) {
+ if (SDUInterArrivatime == 100) {
+ match = true;
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = TX/RX guaranteed bandwidth flowspec\n"));
+ }
+ }
+ break;
+ default:
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = Unknow Type !!!!!!!!\n"));
+ break;
+ }
+
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO),
+ ("ID = 0x%x, ServiceType = 0x%x, MaximumSDUSize = 0x%x, SDUInterArrivalTime = 0x%x, AccessLatency = 0x%x, FlushTimeout = 0x%x\n",
+ TxFlowSpec.Identifier, TxFlowSpec.ServiceType, MaxSDUSize,
+ SDUInterArrivatime, TxFlowSpec.AccessLatency, TxFlowSpec.FlushTimeout));
+ return match;
+}
+
+static u16 bthci_AssocMACAddr(struct rtw_adapter *padapter, void *pbuf)
+{
+ struct amp_assoc_structure *pAssoStrc = (struct amp_assoc_structure *)pbuf;
+ pAssoStrc->TypeID = AMP_MAC_ADDR;
+ pAssoStrc->Length = 0x06;
+ memcpy(&pAssoStrc->Data[0], padapter->eeprompriv.mac_addr, 6);
+ RTPRINT_DATA(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO),
+ ("AssocMACAddr : \n"), pAssoStrc, pAssoStrc->Length+3);
+
+ return pAssoStrc->Length + 3;
+}
+
+static u16
+bthci_PALCapabilities(
+ struct rtw_adapter *padapter,
+ void *pbuf
+ )
+{
+ struct amp_assoc_structure *pAssoStrc = (struct amp_assoc_structure *)pbuf;
+
+ pAssoStrc->TypeID = AMP_80211_PAL_CAP_LIST;
+ pAssoStrc->Length = 0x04;
+
+ pAssoStrc->Data[0] = 0x00;
+ pAssoStrc->Data[1] = 0x00;
+
+ RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("PALCapabilities:\n"), pAssoStrc, pAssoStrc->Length+3);
+ RTPRINT(FIOCTL, IOCTL_BT_LOGO, ("PALCapabilities \n"));
+
+ RTPRINT(FIOCTL, IOCTL_BT_LOGO, (" TypeID = 0x%x,\n Length = 0x%x,\n Content = 0x0000\n",
+ pAssoStrc->TypeID,
+ pAssoStrc->Length));
+
+ return pAssoStrc->Length + 3;
+}
+
+static u16 bthci_AssocPreferredChannelList(struct rtw_adapter *padapter,
+ void *pbuf, u8 EntryNum)
+{
+ struct bt_30info *pBTInfo;
+ struct amp_assoc_structure *pAssoStrc;
+ struct amp_pref_chnl_regulatory *pReg;
+ struct chnl_txpower_triple *pTriple;
+ char ctrString[3] = {'X', 'X', 'X'};
+ u32 len = 0;
+ u8 preferredChnl;
+
+ pBTInfo = GET_BT_INFO(padapter);
+ pAssoStrc = (struct amp_assoc_structure *)pbuf;
+ pReg = (struct amp_pref_chnl_regulatory *)&pAssoStrc->Data[3];
+
+ preferredChnl = bthci_GetLocalChannel(padapter);
+ pAssoStrc->TypeID = AMP_PREFERRED_CHANNEL_LIST;
+
+ /* locale unknown */
+ memcpy(&pAssoStrc->Data[0], &ctrString[0], 3);
+ pReg->reXId = 201;
+ pReg->regulatoryClass = 254;
+ pReg->coverageClass = 0;
+ len += 6;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD | IOCTL_BT_LOGO), ("PREFERRED_CHNL_LIST\n"));
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD | IOCTL_BT_LOGO), ("XXX, 201, 254, 0\n"));
+ /* at the following, chnl 1~11 should be contained */
+ pTriple = (struct chnl_txpower_triple *)&pAssoStrc->Data[len];
+
+ /* (1) if any wifi or bt HS connection exists */
+ if ((pBTInfo->BtAsocEntry[EntryNum].AMPRole == AMP_BTAP_CREATOR) ||
+ (check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE |
+ WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE |
+ WIFI_AP_STATE)) ||
+ BTHCI_HsConnectionEstablished(padapter)) {
+ pTriple->FirstChnl = preferredChnl;
+ pTriple->NumChnls = 1;
+ pTriple->MaxTxPowerInDbm = 20;
+ len += 3;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD | IOCTL_BT_LOGO), ("First Channel = %d, Channel Num = %d, MaxDbm = %d\n",
+ pTriple->FirstChnl,
+ pTriple->NumChnls,
+ pTriple->MaxTxPowerInDbm));
+ }
+
+ pAssoStrc->Length = (u16)len;
+ RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD, ("AssocPreferredChannelList : \n"), pAssoStrc, pAssoStrc->Length+3);
+
+ return pAssoStrc->Length + 3;
+}
+
+static u16 bthci_AssocPALVer(struct rtw_adapter *padapter, void *pbuf)
+{
+ struct amp_assoc_structure *pAssoStrc = (struct amp_assoc_structure *)pbuf;
+ u8 *pu1Tmp;
+ u16 *pu2Tmp;
+
+ pAssoStrc->TypeID = AMP_80211_PAL_VISION;
+ pAssoStrc->Length = 0x5;
+ pu1Tmp = &pAssoStrc->Data[0];
+ *pu1Tmp = 0x1; /* PAL Version */
+ pu2Tmp = (u16 *)&pAssoStrc->Data[1];
+ *pu2Tmp = 0x5D; /* SIG Company identifier of 802.11 PAL vendor */
+ pu2Tmp = (u16 *)&pAssoStrc->Data[3];
+ *pu2Tmp = 0x1; /* PAL Sub-version specifier */
+
+ RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("AssocPALVer : \n"), pAssoStrc, pAssoStrc->Length+3);
+ RTPRINT(FIOCTL, IOCTL_BT_LOGO, ("AssocPALVer \n"));
+
+ RTPRINT(FIOCTL, IOCTL_BT_LOGO, (" TypeID = 0x%x,\n Length = 0x%x,\n PAL Version = 0x01,\n PAL vendor = 0x01,\n PAL Sub-version specifier = 0x01\n",
+ pAssoStrc->TypeID,
+ pAssoStrc->Length));
+ return pAssoStrc->Length + 3;
+}
+
+static u8 bthci_CheckRfStateBeforeConnect(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo;
+ enum rt_rf_power_state RfState;
+
+ pBTInfo = GET_BT_INFO(padapter);
+
+ RfState = padapter->pwrctrlpriv.rf_pwrstate;
+
+ if (RfState != rf_on) {
+ mod_timer(&pBTInfo->BTPsDisableTimer,
+ jiffies + msecs_to_jiffies(50));
+ return false;
+ }
+ return true;
+}
+
+static void bthci_ResponderStartToScan(struct rtw_adapter *padapter)
+{
+}
+
+static u8 bthci_PhyLinkConnectionInProgress(struct rtw_adapter *padapter, u8 PhyLinkHandle)
+{
+ struct bt_30info *pBTInfo;
+ struct bt_mgnt *pBtMgnt;
+
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pBtMgnt->bPhyLinkInProgress &&
+ (pBtMgnt->BtCurrentPhyLinkhandle == PhyLinkHandle))
+ return true;
+ return false;
+}
+
+static void bthci_ResetFlowSpec(struct rtw_adapter *padapter, u8 EntryNum, u8 index)
+{
+ struct bt_30info *pBTinfo;
+
+ pBTinfo = GET_BT_INFO(padapter);
+
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].BtLogLinkhandle = 0;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].BtPhyLinkhandle = 0;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].bLLCompleteEventIsSet = false;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].bLLCancelCMDIsSetandComplete = false;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].BtTxFlowSpecID = 0;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].TxPacketCount = 0;
+
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Tx_Flow_Spec.Identifier = 0x01;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Tx_Flow_Spec.ServiceType = SERVICE_BEST_EFFORT;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Tx_Flow_Spec.MaximumSDUSize = 0xffff;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Tx_Flow_Spec.SDUInterArrivalTime = 0xffffffff;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Tx_Flow_Spec.AccessLatency = 0xffffffff;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Tx_Flow_Spec.FlushTimeout = 0xffffffff;
+
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Rx_Flow_Spec.Identifier = 0x01;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Rx_Flow_Spec.ServiceType = SERVICE_BEST_EFFORT;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Rx_Flow_Spec.MaximumSDUSize = 0xffff;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Rx_Flow_Spec.SDUInterArrivalTime = 0xffffffff;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Rx_Flow_Spec.AccessLatency = 0xffffffff;
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Rx_Flow_Spec.FlushTimeout = 0xffffffff;
+}
+
+static void bthci_ResetEntry(struct rtw_adapter *padapter, u8 EntryNum)
+{
+ struct bt_30info *pBTinfo;
+ struct bt_mgnt *pBtMgnt;
+ u8 j;
+
+ pBTinfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTinfo->BtMgnt;
+
+ pBTinfo->BtAsocEntry[EntryNum].bUsed = false;
+ pBTinfo->BtAsocEntry[EntryNum].BtCurrentState = HCI_STATE_DISCONNECTED;
+ pBTinfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_DISCONNECTED;
+
+ pBTinfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocRemLen = 0;
+ pBTinfo->BtAsocEntry[EntryNum].AmpAsocCmdData.BtPhyLinkhandle = 0;
+ if (pBTinfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocfragment != NULL)
+ memset(pBTinfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocfragment, 0, TOTAL_ALLOCIATE_ASSOC_LEN);
+ pBTinfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar = 0;
+
+ pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyType = 0;
+ pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle = 0;
+ memset(pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKey, 0,
+ pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyLen);
+ pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyLen = 0;
+
+ /* 0x640; 0.625ms*1600 = 1000ms, 0.625ms*16000 = 10000ms */
+ pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.LinkSuperversionTimeout = 0x3e80;
+
+ pBTinfo->BtAsocEntry[EntryNum].AMPRole = AMP_BTAP_NONE;
+
+ pBTinfo->BtAsocEntry[EntryNum].mAssoc = false;
+ pBTinfo->BtAsocEntry[EntryNum].b4waySuccess = false;
+
+ /* Reset BT WPA */
+ pBTinfo->BtAsocEntry[EntryNum].KeyReplayCounter = 0;
+ pBTinfo->BtAsocEntry[EntryNum].BTWPAAuthState = STATE_WPA_AUTH_UNINITIALIZED;
+
+ pBTinfo->BtAsocEntry[EntryNum].bSendSupervisionPacket = false;
+ pBTinfo->BtAsocEntry[EntryNum].NoRxPktCnt = 0;
+ pBTinfo->BtAsocEntry[EntryNum].ShortRangeMode = 0;
+ pBTinfo->BtAsocEntry[EntryNum].rxSuvpPktCnt = 0;
+
+ for (j = 0; j < MAX_LOGICAL_LINK_NUM; j++)
+ bthci_ResetFlowSpec(padapter, EntryNum, j);
+
+ pBtMgnt->BTAuthCount = 0;
+ pBtMgnt->BTAsocCount = 0;
+ pBtMgnt->BTCurrentConnectType = BT_DISCONNECT;
+ pBtMgnt->BTReceiveConnectPkt = BT_DISCONNECT;
+
+ HALBT_RemoveKey(padapter, EntryNum);
+}
+
+static void bthci_RemoveEntryByEntryNum(struct rtw_adapter *padapter, u8 EntryNum)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ bthci_ResetEntry(padapter, EntryNum);
+
+ if (pBtMgnt->CurrentBTConnectionCnt > 0)
+ pBtMgnt->CurrentBTConnectionCnt--;
+
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], CurrentBTConnectionCnt = %d!!\n",
+ pBtMgnt->CurrentBTConnectionCnt));
+
+ if (pBtMgnt->CurrentBTConnectionCnt > 0) {
+ pBtMgnt->BtOperationOn = true;
+ } else {
+ pBtMgnt->BtOperationOn = false;
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], Bt Operation OFF!!\n"));
+ }
+
+ if (!pBtMgnt->BtOperationOn) {
+ del_timer_sync(&pBTInfo->BTHCIDiscardAclDataTimer);
+ del_timer_sync(&pBTInfo->BTBeaconTimer);
+ pBtMgnt->bStartSendSupervisionPkt = false;
+ }
+}
+
+static u8
+bthci_CommandCompleteHeader(
+ u8 *pbuf,
+ u16 OGF,
+ u16 OCF,
+ enum hci_status status
+ )
+{
+ struct packet_irp_hcievent_data *PPacketIrpEvent = (struct packet_irp_hcievent_data *)pbuf;
+ u8 NumHCI_Comm = 0x1;
+
+ PPacketIrpEvent->EventCode = HCI_EVENT_COMMAND_COMPLETE;
+ PPacketIrpEvent->Data[0] = NumHCI_Comm; /* packet # */
+ PPacketIrpEvent->Data[1] = HCIOPCODELOW(OCF, OGF);
+ PPacketIrpEvent->Data[2] = HCIOPCODEHIGHT(OCF, OGF);
+
+ if (OGF == OGF_EXTENSION) {
+ if (OCF == HCI_SET_RSSI_VALUE) {
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT_PERIODICAL),
+ ("[BT event], CommandComplete, Num_HCI_Comm = 0x%x, Opcode = 0x%02x%02x, status = 0x%x, OGF = 0x%x, OCF = 0x%x\n",
+ NumHCI_Comm, (HCIOPCODEHIGHT(OCF, OGF)), (HCIOPCODELOW(OCF, OGF)), status, OGF, OCF));
+ } else {
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_EXT),
+ ("[BT event], CommandComplete, Num_HCI_Comm = 0x%x, Opcode = 0x%02x%02x, status = 0x%x, OGF = 0x%x, OCF = 0x%x\n",
+ NumHCI_Comm, (HCIOPCODEHIGHT(OCF, OGF)), (HCIOPCODELOW(OCF, OGF)), status, OGF, OCF));
+ }
+ } else {
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO),
+ ("[BT event], CommandComplete, Num_HCI_Comm = 0x%x, Opcode = 0x%02x%02x, status = 0x%x, OGF = 0x%x, OCF = 0x%x\n",
+ NumHCI_Comm, (HCIOPCODEHIGHT(OCF, OGF)), (HCIOPCODELOW(OCF, OGF)), status, OGF, OCF));
+ }
+ return 3;
+}
+
+static u8 bthci_ExtensionEventHeaderRtk(u8 *pbuf, u8 extensionEvent)
+{
+ struct packet_irp_hcievent_data *PPacketIrpEvent = (struct packet_irp_hcievent_data *)pbuf;
+ PPacketIrpEvent->EventCode = HCI_EVENT_EXTENSION_RTK;
+ PPacketIrpEvent->Data[0] = extensionEvent; /* extension event code */
+
+ return 1;
+}
+
+static enum rt_status
+bthci_IndicateEvent(
+ struct rtw_adapter *padapter,
+ void *pEvntData,
+ u32 dataLen
+ )
+{
+ enum rt_status rt_status;
+
+ rt_status = PlatformIndicateBTEvent(padapter, pEvntData, dataLen);
+
+ return rt_status;
+}
+
+static void
+bthci_EventWriteRemoteAmpAssoc(
+ struct rtw_adapter *padapter,
+ enum hci_status status,
+ u8 PLHandle
+ )
+{
+ u8 localBuf[TmpLocalBufSize] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_STATUS_PARAMETERS,
+ HCI_WRITE_REMOTE_AMP_ASSOC,
+ status);
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("PhyLinkHandle = 0x%x, status = %d\n", PLHandle, status));
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ pRetPar[1] = PLHandle;
+ len += 2;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+}
+
+static void
+bthci_EventEnhancedFlushComplete(
+ struct rtw_adapter *padapter,
+ u16 LLH
+ )
+{
+ u8 localBuf[4] = "";
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("EventEnhancedFlushComplete, LLH = 0x%x\n", LLH));
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_ENHANCED_FLUSH_COMPLETE;
+ PPacketIrpEvent->Length = 2;
+ /* Logical link handle */
+ PPacketIrpEvent->Data[0] = TWOBYTE_LOWBYTE(LLH);
+ PPacketIrpEvent->Data[1] = TWOBYTE_HIGHTBYTE(LLH);
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 4);
+}
+
+static void
+bthci_EventShortRangeModeChangeComplete(
+ struct rtw_adapter *padapter,
+ enum hci_status HciStatus,
+ u8 ShortRangeState,
+ u8 EntryNum
+ )
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[5] = "";
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_SHORT_RANGE_MODE_CHANGE_COMPLETE)) {
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT,
+ ("[BT event], Short Range Mode Change Complete, Ignore to send this event due to event mask page 2\n"));
+ return;
+ }
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Short Range Mode Change Complete, Status = %d\n , PLH = 0x%x\n, Short_Range_Mode_State = 0x%x\n",
+ HciStatus, pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle, ShortRangeState));
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_SHORT_RANGE_MODE_CHANGE_COMPLETE;
+ PPacketIrpEvent->Length = 3;
+ PPacketIrpEvent->Data[0] = HciStatus;
+ PPacketIrpEvent->Data[1] = pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle;
+ PPacketIrpEvent->Data[2] = ShortRangeState;
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 5);
+}
+
+static void bthci_EventSendFlowSpecModifyComplete(struct rtw_adapter *padapter,
+ enum hci_status HciStatus,
+ u16 logicHandle)
+{
+ u8 localBuf[5] = "";
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+
+ if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_FLOW_SPEC_MODIFY_COMPLETE)) {
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO),
+ ("[BT event], Flow Spec Modify Complete, Ignore to send this event due to event mask page 2\n"));
+ return;
+ }
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO),
+ ("[BT event], Flow Spec Modify Complete, status = 0x%x, LLH = 0x%x\n", HciStatus, logicHandle));
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_FLOW_SPEC_MODIFY_COMPLETE;
+ PPacketIrpEvent->Length = 3;
+
+ PPacketIrpEvent->Data[0] = HciStatus;
+ /* Logical link handle */
+ PPacketIrpEvent->Data[1] = TWOBYTE_LOWBYTE(logicHandle);
+ PPacketIrpEvent->Data[2] = TWOBYTE_HIGHTBYTE(logicHandle);
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 5);
+}
+
+static void
+bthci_EventExtWifiScanNotify(
+ struct rtw_adapter *padapter,
+ u8 scanType
+ )
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ u8 len = 0;
+ u8 localBuf[7] = "";
+ u8 *pRetPar;
+ u8 *pu1Temp;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ if (!pBtMgnt->BtOperationOn)
+ return;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_ExtensionEventHeaderRtk(&localBuf[0], HCI_EVENT_EXT_WIFI_SCAN_NOTIFY);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pu1Temp = (u8 *)&pRetPar[0];
+ *pu1Temp = scanType;
+ len += 1;
+
+ PPacketIrpEvent->Length = len;
+
+ if (bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2) == RT_STATUS_SUCCESS) {
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Wifi scan notify, scan type = %d\n",
+ scanType));
+ }
+}
+
+static void
+bthci_EventAMPReceiverReport(
+ struct rtw_adapter *padapter,
+ u8 Reason
+ )
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+
+ if (pBtHciInfo->bTestNeedReport) {
+ u8 localBuf[20] = "";
+ u32 *pu4Temp;
+ u16 *pu2Temp;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), (" HCI_EVENT_AMP_RECEIVER_REPORT\n"));
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_AMP_RECEIVER_REPORT;
+ PPacketIrpEvent->Length = 2;
+
+ PPacketIrpEvent->Data[0] = pBtHciInfo->TestCtrType;
+
+ PPacketIrpEvent->Data[1] = Reason;
+
+ pu4Temp = (u32 *)&PPacketIrpEvent->Data[2];
+ *pu4Temp = pBtHciInfo->TestEventType;
+
+ pu2Temp = (u16 *)&PPacketIrpEvent->Data[6];
+ *pu2Temp = pBtHciInfo->TestNumOfFrame;
+
+ pu2Temp = (u16 *)&PPacketIrpEvent->Data[8];
+ *pu2Temp = pBtHciInfo->TestNumOfErrFrame;
+
+ pu4Temp = (u32 *)&PPacketIrpEvent->Data[10];
+ *pu4Temp = pBtHciInfo->TestNumOfBits;
+
+ pu4Temp = (u32 *)&PPacketIrpEvent->Data[14];
+ *pu4Temp = pBtHciInfo->TestNumOfErrBits;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 20);
+
+ /* Return to Idel state with RX and TX off. */
+
+ }
+
+ pBtHciInfo->TestNumOfFrame = 0x00;
+}
+
+static void
+bthci_EventChannelSelected(
+ struct rtw_adapter *padapter,
+ u8 EntryNum
+ )
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[3] = "";
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_CHANNEL_SELECT)) {
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT,
+ ("[BT event], Channel Selected, Ignore to send this event due to event mask page 2\n"));
+ return;
+ }
+
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT|IOCTL_STATE,
+ ("[BT event], Channel Selected, PhyLinkHandle %d\n",
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle));
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_CHANNEL_SELECT;
+ PPacketIrpEvent->Length = 1;
+ PPacketIrpEvent->Data[0] = pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle;
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 3);
+}
+
+static void
+bthci_EventDisconnectPhyLinkComplete(
+ struct rtw_adapter *padapter,
+ enum hci_status HciStatus,
+ enum hci_status Reason,
+ u8 EntryNum
+ )
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[5] = "";
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_DISCONNECT_PHY_LINK_COMPLETE)) {
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT,
+ ("[BT event], Disconnect Physical Link Complete, Ignore to send this event due to event mask page 2\n"));
+ return;
+ }
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT,
+ ("[BT event], Disconnect Physical Link Complete, Status = 0x%x, PLH = 0x%x Reason = 0x%x\n",
+ HciStatus, pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle, Reason));
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_DISCONNECT_PHY_LINK_COMPLETE;
+ PPacketIrpEvent->Length = 3;
+ PPacketIrpEvent->Data[0] = HciStatus;
+ PPacketIrpEvent->Data[1] = pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle;
+ PPacketIrpEvent->Data[2] = Reason;
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 5);
+}
+
+static void
+bthci_EventPhysicalLinkComplete(
+ struct rtw_adapter *padapter,
+ enum hci_status HciStatus,
+ u8 EntryNum,
+ u8 PLHandle
+ )
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+ u8 localBuf[4] = "";
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u8 PL_handle;
+
+ pBtMgnt->bPhyLinkInProgress = false;
+ pBtDbg->dbgHciInfo.hciCmdPhyLinkStatus = HciStatus;
+ if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_PHY_LINK_COMPLETE)) {
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT,
+ ("[BT event], Physical Link Complete, Ignore to send this event due to event mask page 2\n"));
+ return;
+ }
+
+ if (EntryNum == 0xff) {
+ /* connection not started yet, just use the input physical link handle to response. */
+ PL_handle = PLHandle;
+ } else {
+ /* connection is under progress, use the phy link handle we recorded. */
+ PL_handle = pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle;
+ pBTInfo->BtAsocEntry[EntryNum].bNeedPhysLinkCompleteEvent = false;
+ }
+
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Physical Link Complete, Status = 0x%x PhyLinkHandle = 0x%x\n", HciStatus,
+ PL_handle));
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_PHY_LINK_COMPLETE;
+ PPacketIrpEvent->Length = 2;
+
+ PPacketIrpEvent->Data[0] = HciStatus;
+ PPacketIrpEvent->Data[1] = PL_handle;
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 4);
+
+}
+
+static void
+bthci_EventCommandStatus(
+ struct rtw_adapter *padapter,
+ u8 OGF,
+ u16 OCF,
+ enum hci_status HciStatus
+ )
+{
+
+ u8 localBuf[6] = "";
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u8 Num_Hci_Comm = 0x1;
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT,
+ ("[BT event], CommandStatus, Opcode = 0x%02x%02x, OGF = 0x%x, OCF = 0x%x, Status = 0x%x, Num_HCI_COMM = 0x%x\n",
+ (HCIOPCODEHIGHT(OCF, OGF)), (HCIOPCODELOW(OCF, OGF)), OGF, OCF, HciStatus, Num_Hci_Comm));
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_COMMAND_STATUS;
+ PPacketIrpEvent->Length = 4;
+ PPacketIrpEvent->Data[0] = HciStatus; /* current pending */
+ PPacketIrpEvent->Data[1] = Num_Hci_Comm; /* packet # */
+ PPacketIrpEvent->Data[2] = HCIOPCODELOW(OCF, OGF);
+ PPacketIrpEvent->Data[3] = HCIOPCODEHIGHT(OCF, OGF);
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 6);
+
+}
+
+static void
+bthci_EventLogicalLinkComplete(
+ struct rtw_adapter *padapter,
+ enum hci_status HciStatus,
+ u8 PhyLinkHandle,
+ u16 LogLinkHandle,
+ u8 LogLinkIndex,
+ u8 EntryNum
+ )
+{
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[7] = "";
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_LOGICAL_LINK_COMPLETE)) {
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT,
+ ("[BT event], Logical Link Complete, Ignore to send this event due to event mask page 2\n"));
+ return;
+ }
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Logical Link Complete, PhyLinkHandle = 0x%x, LogLinkHandle = 0x%x, Status = 0x%x\n",
+ PhyLinkHandle, LogLinkHandle, HciStatus));
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_LOGICAL_LINK_COMPLETE;
+ PPacketIrpEvent->Length = 5;
+
+ PPacketIrpEvent->Data[0] = HciStatus;/* status code */
+ /* Logical link handle */
+ PPacketIrpEvent->Data[1] = TWOBYTE_LOWBYTE(LogLinkHandle);
+ PPacketIrpEvent->Data[2] = TWOBYTE_HIGHTBYTE(LogLinkHandle);
+ /* Physical link handle */
+ PPacketIrpEvent->Data[3] = TWOBYTE_LOWBYTE(PhyLinkHandle);
+ /* corresponding Tx flow spec ID */
+ if (HciStatus == HCI_STATUS_SUCCESS) {
+ PPacketIrpEvent->Data[4] =
+ pBTInfo->BtAsocEntry[EntryNum].LogLinkCmdData[LogLinkIndex].Tx_Flow_Spec.Identifier;
+ } else {
+ PPacketIrpEvent->Data[4] = 0x0;
+ }
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 7);
+}
+
+static void
+bthci_EventDisconnectLogicalLinkComplete(
+ struct rtw_adapter *padapter,
+ enum hci_status HciStatus,
+ u16 LogLinkHandle,
+ enum hci_status Reason
+ )
+{
+ u8 localBuf[6] = "";
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_DISCONNECT_LOGICAL_LINK_COMPLETE)) {
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Disconnect Logical Link Complete, Ignore to send this event due to event mask page 2\n"));
+ return;
+ }
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Disconnect Logical Link Complete, Status = 0x%x, LLH = 0x%x Reason = 0x%x\n", HciStatus, LogLinkHandle, Reason));
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_DISCONNECT_LOGICAL_LINK_COMPLETE;
+ PPacketIrpEvent->Length = 4;
+
+ PPacketIrpEvent->Data[0] = HciStatus;
+ /* Logical link handle */
+ PPacketIrpEvent->Data[1] = TWOBYTE_LOWBYTE(LogLinkHandle);
+ PPacketIrpEvent->Data[2] = TWOBYTE_HIGHTBYTE(LogLinkHandle);
+ /* Disconnect reason */
+ PPacketIrpEvent->Data[3] = Reason;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 6);
+}
+
+static void
+bthci_EventFlushOccurred(
+ struct rtw_adapter *padapter,
+ u16 LogLinkHandle
+ )
+{
+ u8 localBuf[4] = "";
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("bthci_EventFlushOccurred(), LLH = 0x%x\n", LogLinkHandle));
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_FLUSH_OCCRUED;
+ PPacketIrpEvent->Length = 2;
+ /* Logical link handle */
+ PPacketIrpEvent->Data[0] = TWOBYTE_LOWBYTE(LogLinkHandle);
+ PPacketIrpEvent->Data[1] = TWOBYTE_HIGHTBYTE(LogLinkHandle);
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 4);
+}
+
+static enum hci_status
+bthci_BuildPhysicalLink(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd,
+ u16 OCF
+)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ u8 EntryNum, PLH;
+
+ /* Send HCI Command status event to AMP. */
+ bthci_EventCommandStatus(padapter,
+ LINK_CONTROL_COMMANDS,
+ OCF,
+ HCI_STATUS_SUCCESS);
+
+ PLH = *((u8 *)pHciCmd->Data);
+
+ /* Check if resource or bt connection is under progress, if yes, reject the link creation. */
+ if (!bthci_AddEntry(padapter)) {
+ status = HCI_STATUS_CONNECT_RJT_LIMIT_RESOURCE;
+ bthci_EventPhysicalLinkComplete(padapter, status, INVALID_ENTRY_NUM, PLH);
+ return status;
+ }
+
+ EntryNum = pBtMgnt->CurrentConnectEntryNum;
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle = PLH;
+ pBtMgnt->BtCurrentPhyLinkhandle = PLH;
+
+ if (pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocfragment == NULL) {
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Create/Accept PhysicalLink, AMP controller is busy\n"));
+ status = HCI_STATUS_CONTROLLER_BUSY;
+ bthci_EventPhysicalLinkComplete(padapter, status, INVALID_ENTRY_NUM, PLH);
+ return status;
+ }
+
+ /* Record Key and the info */
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyLen = (*((u8 *)pHciCmd->Data+1));
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyType = (*((u8 *)pHciCmd->Data+2));
+ memcpy(pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKey,
+ (((u8 *)pHciCmd->Data+3)), pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyLen);
+ memcpy(pBTInfo->BtAsocEntry[EntryNum].PMK, pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKey, PMK_LEN);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("BuildPhysicalLink, EntryNum = %d, PLH = 0x%x KeyLen = 0x%x, KeyType = 0x%x\n",
+ EntryNum, pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle,
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyLen,
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyType));
+ RTPRINT_DATA(FIOCTL, (IOCTL_BT_LOGO|IOCTL_BT_HCICMD), ("BtAMPKey\n"), pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKey,
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyLen);
+ RTPRINT_DATA(FIOCTL, (IOCTL_BT_LOGO|IOCTL_BT_HCICMD), ("PMK\n"), pBTInfo->BtAsocEntry[EntryNum].PMK,
+ PMK_LEN);
+
+ if (OCF == HCI_CREATE_PHYSICAL_LINK) {
+ /* These macros require braces */
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTED, STATE_CMD_CREATE_PHY_LINK, EntryNum);
+ } else if (OCF == HCI_ACCEPT_PHYSICAL_LINK) {
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTED, STATE_CMD_ACCEPT_PHY_LINK, EntryNum);
+ }
+
+ return status;
+}
+
+static void
+bthci_BuildLogicalLink(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd,
+ u16 OCF
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTinfo->BtMgnt;
+ u8 PhyLinkHandle, EntryNum;
+ static u16 AssignLogHandle = 1;
+
+ struct hci_flow_spec TxFlowSpec;
+ struct hci_flow_spec RxFlowSpec;
+ u32 MaxSDUSize, ArriveTime, Bandwidth;
+
+ PhyLinkHandle = *((u8 *)pHciCmd->Data);
+
+ EntryNum = bthci_GetCurrentEntryNum(padapter, PhyLinkHandle);
+
+ memcpy(&TxFlowSpec,
+ &pHciCmd->Data[1], sizeof(struct hci_flow_spec));
+ memcpy(&RxFlowSpec,
+ &pHciCmd->Data[17], sizeof(struct hci_flow_spec));
+
+ MaxSDUSize = TxFlowSpec.MaximumSDUSize;
+ ArriveTime = TxFlowSpec.SDUInterArrivalTime;
+
+ if (bthci_CheckLogLinkBehavior(padapter, TxFlowSpec) && bthci_CheckLogLinkBehavior(padapter, RxFlowSpec))
+ Bandwidth = BTTOTALBANDWIDTH;
+ else if (MaxSDUSize == 0xffff && ArriveTime == 0xffffffff)
+ Bandwidth = BTTOTALBANDWIDTH;
+ else
+ Bandwidth = MaxSDUSize*8*1000/(ArriveTime+244);
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD,
+ ("BuildLogicalLink, PhyLinkHandle = 0x%x, MaximumSDUSize = 0x%x, SDUInterArrivalTime = 0x%x, Bandwidth = 0x%x\n",
+ PhyLinkHandle, MaxSDUSize, ArriveTime, Bandwidth));
+
+ if (EntryNum == 0xff) {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Invalid Physical Link handle = 0x%x, status = HCI_STATUS_UNKNOW_CONNECT_ID, return\n", PhyLinkHandle));
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+
+ /* When we receive Create/Accept logical link command, we should send command status event first. */
+ bthci_EventCommandStatus(padapter,
+ LINK_CONTROL_COMMANDS,
+ OCF,
+ status);
+ return;
+ }
+
+ if (!pBtMgnt->bLogLinkInProgress) {
+ if (bthci_PhyLinkConnectionInProgress(padapter, PhyLinkHandle)) {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Physical link connection in progress, status = HCI_STATUS_CMD_DISALLOW, return\n"));
+ status = HCI_STATUS_CMD_DISALLOW;
+
+ pBtMgnt->bPhyLinkInProgressStartLL = true;
+ /* When we receive Create/Accept logical link command, we should send command status event first. */
+ bthci_EventCommandStatus(padapter,
+ LINK_CONTROL_COMMANDS,
+ OCF,
+ status);
+
+ return;
+ }
+
+ if (Bandwidth > BTTOTALBANDWIDTH) {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("status = HCI_STATUS_QOS_REJECT, Bandwidth = 0x%x, return\n", Bandwidth));
+ status = HCI_STATUS_QOS_REJECT;
+
+ /* When we receive Create/Accept logical link command, we should send command status event first. */
+ bthci_EventCommandStatus(padapter,
+ LINK_CONTROL_COMMANDS,
+ OCF,
+ status);
+ } else {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("status = HCI_STATUS_SUCCESS\n"));
+ status = HCI_STATUS_SUCCESS;
+
+ /* When we receive Create/Accept logical link command, we should send command status event first. */
+ bthci_EventCommandStatus(padapter,
+ LINK_CONTROL_COMMANDS,
+ OCF,
+ status);
+
+ }
+
+ if (pBTinfo->BtAsocEntry[EntryNum].BtCurrentState != HCI_STATE_CONNECTED) {
+ bthci_EventLogicalLinkComplete(padapter,
+ HCI_STATUS_CMD_DISALLOW, 0, 0, 0, EntryNum);
+ } else {
+ u8 i, find = 0;
+
+ pBtMgnt->bLogLinkInProgress = true;
+
+ /* find an unused logical link index and copy the data */
+ for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
+ if (pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].BtLogLinkhandle == 0) {
+ enum hci_status LogCompEventstatus = HCI_STATUS_SUCCESS;
+
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].BtPhyLinkhandle = *((u8 *)pHciCmd->Data);
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].BtLogLinkhandle = AssignLogHandle;
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("BuildLogicalLink, EntryNum = %d, physical link handle = 0x%x, logical link handle = 0x%x\n",
+ EntryNum, pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle,
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].BtLogLinkhandle));
+ memcpy(&pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].Tx_Flow_Spec,
+ &TxFlowSpec, sizeof(struct hci_flow_spec));
+ memcpy(&pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].Rx_Flow_Spec,
+ &RxFlowSpec, sizeof(struct hci_flow_spec));
+
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].bLLCompleteEventIsSet = false;
+
+ if (pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].bLLCancelCMDIsSetandComplete)
+ LogCompEventstatus = HCI_STATUS_UNKNOW_CONNECT_ID;
+ bthci_EventLogicalLinkComplete(padapter,
+ LogCompEventstatus,
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].BtPhyLinkhandle,
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].BtLogLinkhandle, i, EntryNum);
+
+ pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].bLLCompleteEventIsSet = true;
+
+ find = 1;
+ pBtMgnt->BtCurrentLogLinkhandle = AssignLogHandle;
+ AssignLogHandle++;
+ break;
+ }
+ }
+
+ if (!find) {
+ bthci_EventLogicalLinkComplete(padapter,
+ HCI_STATUS_CONNECT_RJT_LIMIT_RESOURCE, 0, 0, 0, EntryNum);
+ }
+ pBtMgnt->bLogLinkInProgress = false;
+ }
+ } else {
+ bthci_EventLogicalLinkComplete(padapter,
+ HCI_STATUS_CONTROLLER_BUSY, 0, 0, 0, EntryNum);
+ }
+
+}
+
+static void
+bthci_StartBeaconAndConnect(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd,
+ u8 CurrentAssocNum
+ )
+{
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("StartBeaconAndConnect, CurrentAssocNum =%d, AMPRole =%d\n",
+ CurrentAssocNum,
+ pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole));
+
+ if (!pBtMgnt->CheckChnlIsSuit) {
+ bthci_EventPhysicalLinkComplete(padapter, HCI_STATUS_CONNECT_REJ_NOT_SUIT_CHNL_FOUND, CurrentAssocNum, INVALID_PL_HANDLE);
+ bthci_RemoveEntryByEntryNum(padapter, CurrentAssocNum);
+ return;
+ }
+
+ if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_CREATOR) {
+ rsprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32, "AMP-%02x-%02x-%02x-%02x-%02x-%02x",
+ padapter->eeprompriv.mac_addr[0],
+ padapter->eeprompriv.mac_addr[1],
+ padapter->eeprompriv.mac_addr[2],
+ padapter->eeprompriv.mac_addr[3],
+ padapter->eeprompriv.mac_addr[4],
+ padapter->eeprompriv.mac_addr[5]);
+ } else if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_JOINER) {
+ rsprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32, "AMP-%02x-%02x-%02x-%02x-%02x-%02x",
+ pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[0],
+ pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[1],
+ pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[2],
+ pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[3],
+ pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[4],
+ pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[5]);
+ }
+
+ FillOctetString(pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsid, pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 21);
+ pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsid.Length = 21;
+
+ /* To avoid set the start ap or connect twice, or the original connection will be disconnected. */
+ if (!pBtMgnt->bBTConnectInProgress) {
+ pBtMgnt->bBTConnectInProgress = true;
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], BT Connect in progress ON!!\n"));
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_STARTING, STATE_CMD_MAC_START_COMPLETE, CurrentAssocNum);
+
+ /* 20100325 Joseph: Check RF ON/OFF. */
+ /* If RF OFF, it reschedule connecting operation after 50ms. */
+ if (!bthci_CheckRfStateBeforeConnect(padapter))
+ return;
+
+ if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_CREATOR) {
+ /* These macros need braces */
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_CONNECTING, STATE_CMD_MAC_CONNECT_COMPLETE, CurrentAssocNum);
+ } else if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_JOINER) {
+ bthci_ResponderStartToScan(padapter);
+ }
+ }
+ RT_PRINT_STR(_module_rtl871x_mlme_c_, _drv_notice_,
+ "StartBeaconAndConnect, SSID:\n",
+ pBTInfo->BtAsocEntry[pBtMgnt->CurrentConnectEntryNum].BTSsid.Octet,
+ pBTInfo->BtAsocEntry[pBtMgnt->CurrentConnectEntryNum].BTSsid.Length);
+}
+
+static void bthci_ResetBtMgnt(struct bt_mgnt *pBtMgnt)
+{
+ pBtMgnt->BtOperationOn = false;
+ pBtMgnt->bBTConnectInProgress = false;
+ pBtMgnt->bLogLinkInProgress = false;
+ pBtMgnt->bPhyLinkInProgress = false;
+ pBtMgnt->bPhyLinkInProgressStartLL = false;
+ pBtMgnt->DisconnectEntryNum = 0xff;
+ pBtMgnt->bStartSendSupervisionPkt = false;
+ pBtMgnt->JoinerNeedSendAuth = false;
+ pBtMgnt->CurrentBTConnectionCnt = 0;
+ pBtMgnt->BTCurrentConnectType = BT_DISCONNECT;
+ pBtMgnt->BTReceiveConnectPkt = BT_DISCONNECT;
+ pBtMgnt->BTAuthCount = 0;
+ pBtMgnt->btLogoTest = 0;
+}
+
+static void bthci_ResetBtHciInfo(struct bt_hci_info *pBtHciInfo)
+{
+ pBtHciInfo->BTEventMask = 0;
+ pBtHciInfo->BTEventMaskPage2 = 0;
+ pBtHciInfo->ConnAcceptTimeout = 10000;
+ pBtHciInfo->PageTimeout = 0x30;
+ pBtHciInfo->LocationDomainAware = 0x0;
+ pBtHciInfo->LocationDomain = 0x5858;
+ pBtHciInfo->LocationDomainOptions = 0x58;
+ pBtHciInfo->LocationOptions = 0x0;
+ pBtHciInfo->FlowControlMode = 0x1; /* 0:Packet based data flow control mode(BR/EDR), 1: Data block based data flow control mode(AMP). */
+
+ pBtHciInfo->enFlush_LLH = 0;
+ pBtHciInfo->FLTO_LLH = 0;
+
+ /* Test command only */
+ pBtHciInfo->bTestIsEnd = true;
+ pBtHciInfo->bInTestMode = false;
+ pBtHciInfo->bTestNeedReport = false;
+ pBtHciInfo->TestScenario = 0xff;
+ pBtHciInfo->TestReportInterval = 0x01;
+ pBtHciInfo->TestCtrType = 0x5d;
+ pBtHciInfo->TestEventType = 0x00;
+ pBtHciInfo->TestNumOfFrame = 0;
+ pBtHciInfo->TestNumOfErrFrame = 0;
+ pBtHciInfo->TestNumOfBits = 0;
+ pBtHciInfo->TestNumOfErrBits = 0;
+}
+
+static void bthci_ResetBtSec(struct rtw_adapter *padapter, struct bt_security *pBtSec)
+{
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+
+ /* Set BT used HW or SW encrypt !! */
+ if (GET_HAL_DATA(padapter)->bBTMode)
+ pBtSec->bUsedHwEncrypt = true;
+ else
+ pBtSec->bUsedHwEncrypt = false;
+ RT_TRACE(_module_rtl871x_security_c_, _drv_info_, ("%s: bUsedHwEncrypt =%d\n", __func__, pBtSec->bUsedHwEncrypt));
+
+ pBtSec->RSNIE.Octet = pBtSec->RSNIEBuf;
+}
+
+static void bthci_ResetBtExtInfo(struct bt_mgnt *pBtMgnt)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++) {
+ pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle = 0;
+ pBtMgnt->ExtConfig.linkInfo[i].IncomingTrafficMode = 0;
+ pBtMgnt->ExtConfig.linkInfo[i].OutgoingTrafficMode = 0;
+ pBtMgnt->ExtConfig.linkInfo[i].BTProfile = BT_PROFILE_NONE;
+ pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec = BT_SPEC_2_1_EDR;
+ pBtMgnt->ExtConfig.linkInfo[i].BT_RSSI = 0;
+ pBtMgnt->ExtConfig.linkInfo[i].TrafficProfile = BT_PROFILE_NONE;
+ pBtMgnt->ExtConfig.linkInfo[i].linkRole = BT_LINK_MASTER;
+ }
+
+ pBtMgnt->ExtConfig.CurrentConnectHandle = 0;
+ pBtMgnt->ExtConfig.CurrentIncomingTrafficMode = 0;
+ pBtMgnt->ExtConfig.CurrentOutgoingTrafficMode = 0;
+ pBtMgnt->ExtConfig.MIN_BT_RSSI = 0;
+ pBtMgnt->ExtConfig.NumberOfHandle = 0;
+ pBtMgnt->ExtConfig.NumberOfSCO = 0;
+ pBtMgnt->ExtConfig.CurrentBTStatus = 0;
+ pBtMgnt->ExtConfig.HCIExtensionVer = 0;
+
+ pBtMgnt->ExtConfig.bManualControl = false;
+ pBtMgnt->ExtConfig.bBTBusy = false;
+ pBtMgnt->ExtConfig.bBTA2DPBusy = false;
+}
+
+static enum hci_status bthci_CmdReset(struct rtw_adapter *_padapter, u8 bNeedSendEvent)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct rtw_adapter *padapter;
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTInfo;
+ struct bt_mgnt *pBtMgnt;
+ struct bt_hci_info *pBtHciInfo;
+ struct bt_security *pBtSec;
+ struct bt_dgb *pBtDbg;
+ u8 i;
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("bthci_CmdReset()\n"));
+
+ padapter = GetDefaultAdapter(_padapter);
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+ pBtHciInfo = &pBTInfo->BtHciInfo;
+ pBtSec = &pBTInfo->BtSec;
+ pBtDbg = &pBTInfo->BtDbg;
+
+ pBTInfo->padapter = padapter;
+
+ for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++)
+ bthci_ResetEntry(padapter, i);
+
+ bthci_ResetBtMgnt(pBtMgnt);
+ bthci_ResetBtHciInfo(pBtHciInfo);
+ bthci_ResetBtSec(padapter, pBtSec);
+
+ pBtMgnt->BTChannel = BT_Default_Chnl;
+ pBtMgnt->CheckChnlIsSuit = true;
+
+ pBTInfo->BTBeaconTmrOn = false;
+
+ pBtMgnt->bCreateSpportQos = true;
+
+ del_timer_sync(&pBTInfo->BTHCIDiscardAclDataTimer);
+ del_timer_sync(&pBTInfo->BTBeaconTimer);
+
+ HALBT_SetRtsCtsNoLenLimit(padapter);
+ /* */
+ /* Maybe we need to take care Group != AES case !! */
+ /* now we Pairwise and Group all used AES !! */
+
+ bthci_ResetBtExtInfo(pBtMgnt);
+
+ /* send command complete event here when all data are received. */
+ if (bNeedSendEvent) {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_RESET,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdWriteRemoteAMPAssoc(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+ u8 CurrentAssocNum;
+ u8 PhyLinkHandle;
+
+ pBtDbg->dbgHciInfo.hciCmdCntWriteRemoteAmpAssoc++;
+ PhyLinkHandle = *((u8 *)pHciCmd->Data);
+ CurrentAssocNum = bthci_GetCurrentEntryNum(padapter, PhyLinkHandle);
+
+ if (CurrentAssocNum == 0xff) {
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("WriteRemoteAMPAssoc, No such Handle in the Entry\n"));
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+ bthci_EventWriteRemoteAmpAssoc(padapter, status, PhyLinkHandle);
+ return status;
+ }
+
+ if (pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocfragment == NULL) {
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("WriteRemoteAMPAssoc, AMP controller is busy\n"));
+ status = HCI_STATUS_CONTROLLER_BUSY;
+ bthci_EventWriteRemoteAmpAssoc(padapter, status, PhyLinkHandle);
+ return status;
+ }
+
+ pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.BtPhyLinkhandle = PhyLinkHandle;/* u8 *)pHciCmd->Data); */
+ pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.LenSoFar = *((u16 *)((u8 *)pHciCmd->Data+1));
+ pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocRemLen = *((u16 *)((u8 *)pHciCmd->Data+3));
+
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("WriteRemoteAMPAssoc, LenSoFar = 0x%x, AssocRemLen = 0x%x\n",
+ pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.LenSoFar,
+ pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocRemLen));
+
+ RTPRINT_DATA(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO),
+ ("WriteRemoteAMPAssoc fragment \n"),
+ pHciCmd->Data,
+ pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocRemLen+5);
+ if ((pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocRemLen) > MAX_AMP_ASSOC_FRAG_LEN) {
+ memcpy(((u8 *)pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocfragment+(pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.LenSoFar*(sizeof(u8)))),
+ (u8 *)pHciCmd->Data+5,
+ MAX_AMP_ASSOC_FRAG_LEN);
+ } else {
+ memcpy((u8 *)(pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocfragment)+(pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.LenSoFar*(sizeof(u8))),
+ ((u8 *)pHciCmd->Data+5),
+ (pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocRemLen));
+
+ RTPRINT_DATA(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), "WriteRemoteAMPAssoc :\n",
+ pHciCmd->Data+5, pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocRemLen);
+
+ if (!bthci_GetAssocInfo(padapter, CurrentAssocNum))
+ status = HCI_STATUS_INVALID_HCI_CMD_PARA_VALUE;
+
+ bthci_EventWriteRemoteAmpAssoc(padapter, status, PhyLinkHandle);
+
+ bthci_StartBeaconAndConnect(padapter, pHciCmd, CurrentAssocNum);
+ }
+
+ return status;
+}
+
+/* 7.3.13 */
+static enum hci_status bthci_CmdReadConnectionAcceptTimeout(struct rtw_adapter *padapter)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[8] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u16 *pu2Temp;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_READ_CONNECTION_ACCEPT_TIMEOUT,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ pu2Temp = (u16 *)&pRetPar[1]; /* Conn_Accept_Timeout */
+ *pu2Temp = pBtHciInfo->ConnAcceptTimeout;
+ len += 3;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+/* 7.3.3 */
+static enum hci_status
+bthci_CmdSetEventFilter(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+
+ return status;
+}
+
+/* 7.3.14 */
+static enum hci_status
+bthci_CmdWriteConnectionAcceptTimeout(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u16 *pu2Temp;
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ pu2Temp = (u16 *)&pHciCmd->Data[0];
+ pBtHciInfo->ConnAcceptTimeout = *pu2Temp;
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("ConnAcceptTimeout = 0x%x",
+ pBtHciInfo->ConnAcceptTimeout));
+
+ /* send command complete event here when all data are received. */
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_WRITE_CONNECTION_ACCEPT_TIMEOUT,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdReadPageTimeout(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[8] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u16 *pu2Temp;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_READ_PAGE_TIMEOUT,
+ status);
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Read PageTimeout = 0x%x\n", pBtHciInfo->PageTimeout));
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ pu2Temp = (u16 *)&pRetPar[1]; /* Page_Timeout */
+ *pu2Temp = pBtHciInfo->PageTimeout;
+ len += 3;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdWritePageTimeout(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u16 *pu2Temp;
+
+ pu2Temp = (u16 *)&pHciCmd->Data[0];
+ pBtHciInfo->PageTimeout = *pu2Temp;
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Write PageTimeout = 0x%x\n",
+ pBtHciInfo->PageTimeout));
+
+ /* send command complete event here when all data are received. */
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_WRITE_PAGE_TIMEOUT,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdReadLinkSupervisionTimeout(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
+ u8 physicalLinkHandle, EntryNum;
+
+ physicalLinkHandle = *((u8 *)pHciCmd->Data);
+
+ EntryNum = bthci_GetCurrentEntryNum(padapter, physicalLinkHandle);
+
+ if (EntryNum == 0xff) {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("ReadLinkSupervisionTimeout, No such Handle in the Entry\n"));
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+ return status;
+ }
+
+ if (pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle != physicalLinkHandle)
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+
+ {
+ u8 localBuf[10] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u16 *pu2Temp;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_READ_LINK_SUPERVISION_TIMEOUT,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status;
+ pRetPar[1] = pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle;
+ pRetPar[2] = 0;
+ pu2Temp = (u16 *)&pRetPar[3]; /* Conn_Accept_Timeout */
+ *pu2Temp = pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.LinkSuperversionTimeout;
+ len += 5;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdWriteLinkSupervisionTimeout(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
+ u8 physicalLinkHandle, EntryNum;
+
+ physicalLinkHandle = *((u8 *)pHciCmd->Data);
+
+ EntryNum = bthci_GetCurrentEntryNum(padapter, physicalLinkHandle);
+
+ if (EntryNum == 0xff) {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("WriteLinkSupervisionTimeout, No such Handle in the Entry\n"));
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+ } else {
+ if (pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle != physicalLinkHandle) {
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+ } else {
+ pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.LinkSuperversionTimeout = *((u16 *)(((u8 *)pHciCmd->Data)+2));
+ RTPRINT(FIOCTL, IOCTL_STATE, ("BT Write LinkSuperversionTimeout[%d] = 0x%x\n",
+ EntryNum, pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.LinkSuperversionTimeout));
+ }
+ }
+
+ {
+ u8 localBuf[8] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_WRITE_LINK_SUPERVISION_TIMEOUT,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status;
+ pRetPar[1] = pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle;
+ pRetPar[2] = 0;
+ len += 3;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdEnhancedFlush(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTinfo->BtHciInfo;
+ u16 logicHandle;
+ u8 Packet_Type;
+
+ logicHandle = *((u16 *)&pHciCmd->Data[0]);
+ Packet_Type = pHciCmd->Data[2];
+
+ if (Packet_Type != 0)
+ status = HCI_STATUS_INVALID_HCI_CMD_PARA_VALUE;
+ else
+ pBtHciInfo->enFlush_LLH = logicHandle;
+
+ if (bthci_DiscardTxPackets(padapter, pBtHciInfo->enFlush_LLH))
+ bthci_EventFlushOccurred(padapter, pBtHciInfo->enFlush_LLH);
+
+ /* should send command status event */
+ bthci_EventCommandStatus(padapter,
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_ENHANCED_FLUSH,
+ status);
+
+ if (pBtHciInfo->enFlush_LLH) {
+ bthci_EventEnhancedFlushComplete(padapter, pBtHciInfo->enFlush_LLH);
+ pBtHciInfo->enFlush_LLH = 0;
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdReadLogicalLinkAcceptTimeout(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[8] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u16 *pu2Temp;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_READ_LOGICAL_LINK_ACCEPT_TIMEOUT,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status;
+
+ pu2Temp = (u16 *)&pRetPar[1]; /* Conn_Accept_Timeout */
+ *pu2Temp = pBtHciInfo->LogicalAcceptTimeout;
+ len += 3;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdWriteLogicalLinkAcceptTimeout(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ pBtHciInfo->LogicalAcceptTimeout = *((u16 *)pHciCmd->Data);
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_WRITE_LOGICAL_LINK_ACCEPT_TIMEOUT,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status;
+
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ return status;
+}
+
+static enum hci_status
+bthci_CmdSetEventMask(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 *pu8Temp;
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ pu8Temp = (u8 *)&pHciCmd->Data[0];
+ pBtHciInfo->BTEventMask = *pu8Temp;
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("BTEventMask = 0x%"i64fmt"x\n",
+ pBtHciInfo->BTEventMask));
+
+ /* send command complete event here when all data are received. */
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_SET_EVENT_MASK,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+/* 7.3.69 */
+static enum hci_status
+bthci_CmdSetEventMaskPage2(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 *pu8Temp;
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ pu8Temp = (u8 *)&pHciCmd->Data[0];
+ pBtHciInfo->BTEventMaskPage2 = *pu8Temp;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("BTEventMaskPage2 = 0x%"i64fmt"x\n",
+ pBtHciInfo->BTEventMaskPage2));
+
+ /* send command complete event here when all data are received. */
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_SET_EVENT_MASK_PAGE_2,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdReadLocationData(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[12] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u16 *pu2Temp;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_READ_LOCATION_DATA,
+ status);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("DomainAware = 0x%x\n", pBtHciInfo->LocationDomainAware));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Domain = 0x%x\n", pBtHciInfo->LocationDomain));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("DomainOptions = 0x%x\n", pBtHciInfo->LocationDomainOptions));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Options = 0x%x\n", pBtHciInfo->LocationOptions));
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status;
+
+ pRetPar[1] = pBtHciInfo->LocationDomainAware; /* 0x0; Location_Domain_Aware */
+ pu2Temp = (u16 *)&pRetPar[2]; /* Location_Domain */
+ *pu2Temp = pBtHciInfo->LocationDomain; /* 0x5858; */
+ pRetPar[4] = pBtHciInfo->LocationDomainOptions; /* 0x58; Location_Domain_Options */
+ pRetPar[5] = pBtHciInfo->LocationOptions; /* 0x0; Location_Options */
+ len += 6;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ return status;
+}
+
+static enum hci_status
+bthci_CmdWriteLocationData(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u16 *pu2Temp;
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ pBtHciInfo->LocationDomainAware = pHciCmd->Data[0];
+ pu2Temp = (u16 *)&pHciCmd->Data[1];
+ pBtHciInfo->LocationDomain = *pu2Temp;
+ pBtHciInfo->LocationDomainOptions = pHciCmd->Data[3];
+ pBtHciInfo->LocationOptions = pHciCmd->Data[4];
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("DomainAware = 0x%x\n", pBtHciInfo->LocationDomainAware));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Domain = 0x%x\n", pBtHciInfo->LocationDomain));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("DomainOptions = 0x%x\n", pBtHciInfo->LocationDomainOptions));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Options = 0x%x\n", pBtHciInfo->LocationOptions));
+
+ /* send command complete event here when all data are received. */
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_WRITE_LOCATION_DATA,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdReadFlowControlMode(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[7] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_READ_FLOW_CONTROL_MODE,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status;
+ pRetPar[1] = pBtHciInfo->FlowControlMode; /* Flow Control Mode */
+ len += 2;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ return status;
+}
+
+static enum hci_status
+bthci_CmdWriteFlowControlMode(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ pBtHciInfo->FlowControlMode = pHciCmd->Data[0];
+
+ /* send command complete event here when all data are received. */
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_WRITE_FLOW_CONTROL_MODE,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdReadBestEffortFlushTimeout(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
+ u16 i, j, logicHandle;
+ u32 BestEffortFlushTimeout = 0xffffffff;
+ u8 find = 0;
+
+ logicHandle = *((u16 *)pHciCmd->Data);
+ /* find an matched logical link index and copy the data */
+ for (j = 0; j < MAX_BT_ASOC_ENTRY_NUM; j++) {
+ for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
+ if (pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].BtLogLinkhandle == logicHandle) {
+ BestEffortFlushTimeout = pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].BestEffortFlushTimeout;
+ find = 1;
+ break;
+ }
+ }
+ }
+
+ if (!find)
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+
+ {
+ u8 localBuf[10] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u32 *pu4Temp;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_READ_BEST_EFFORT_FLUSH_TIMEOUT,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status;
+ pu4Temp = (u32 *)&pRetPar[1]; /* Best_Effort_Flush_Timeout */
+ *pu4Temp = BestEffortFlushTimeout;
+ len += 5;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+ return status;
+}
+
+static enum hci_status
+bthci_CmdWriteBestEffortFlushTimeout(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
+ u16 i, j, logicHandle;
+ u32 BestEffortFlushTimeout = 0xffffffff;
+ u8 find = 0;
+
+ logicHandle = *((u16 *)pHciCmd->Data);
+ BestEffortFlushTimeout = *((u32 *)(pHciCmd->Data+1));
+
+ /* find an matched logical link index and copy the data */
+ for (j = 0; j < MAX_BT_ASOC_ENTRY_NUM; j++) {
+ for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
+ if (pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].BtLogLinkhandle == logicHandle) {
+ pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].BestEffortFlushTimeout = BestEffortFlushTimeout;
+ find = 1;
+ break;
+ }
+ }
+ }
+
+ if (!find)
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_WRITE_BEST_EFFORT_FLUSH_TIMEOUT,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status;
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+ return status;
+}
+
+static enum hci_status
+bthci_CmdShortRangeMode(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ u8 PhyLinkHandle, EntryNum, ShortRangeMode;
+
+ PhyLinkHandle = pHciCmd->Data[0];
+ ShortRangeMode = pHciCmd->Data[1];
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("PLH = 0x%x, Short_Range_Mode = 0x%x\n", PhyLinkHandle, ShortRangeMode));
+
+ EntryNum = bthci_GetCurrentEntryNum(padapter, PhyLinkHandle);
+ if (EntryNum != 0xff) {
+ pBTInfo->BtAsocEntry[EntryNum].ShortRangeMode = ShortRangeMode;
+ } else {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("No such PLH(0x%x)\n", PhyLinkHandle));
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+ }
+
+ bthci_EventCommandStatus(padapter,
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_SHORT_RANGE_MODE,
+ status);
+
+ bthci_EventShortRangeModeChangeComplete(padapter, status, ShortRangeMode, EntryNum);
+
+ return status;
+}
+
+static enum hci_status bthci_CmdReadLocalSupportedCommands(struct rtw_adapter *padapter)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ u8 localBuf[TmpLocalBufSize] = "";
+ u8 *pRetPar, *pSupportedCmds;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ /* send command complete event here when all data are received. */
+ PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_INFORMATIONAL_PARAMETERS,
+ HCI_READ_LOCAL_SUPPORTED_COMMANDS,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+ pSupportedCmds = &pRetPar[1];
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[5]= 0xc0\nBit [6]= Set Event Mask, [7]= Reset\n"));
+ pSupportedCmds[5] = 0xc0;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[6]= 0x01\nBit [0]= Set Event Filter\n"));
+ pSupportedCmds[6] = 0x01;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[7]= 0x0c\nBit [2]= Read Connection Accept Timeout, [3]= Write Connection Accept Timeout\n"));
+ pSupportedCmds[7] = 0x0c;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[10]= 0x80\nBit [7]= Host Number Of Completed Packets\n"));
+ pSupportedCmds[10] = 0x80;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[11]= 0x03\nBit [0]= Read Link Supervision Timeout, [1]= Write Link Supervision Timeout\n"));
+ pSupportedCmds[11] = 0x03;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[14]= 0xa8\nBit [3]= Read Local Version Information, [5]= Read Local Supported Features, [7]= Read Buffer Size\n"));
+ pSupportedCmds[14] = 0xa8;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[15]= 0x1c\nBit [2]= Read Failed Contact Count, [3]= Reset Failed Contact Count, [4]= Get Link Quality\n"));
+ pSupportedCmds[15] = 0x1c;
+ /* pSupportedCmds[16] = 0x04; */
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[19]= 0x40\nBit [6]= Enhanced Flush\n"));
+ pSupportedCmds[19] = 0x40;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[21]= 0xff\nBit [0]= Create Physical Link, [1]= Accept Physical Link, [2]= Disconnect Physical Link, [3]= Create Logical Link\n"));
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), (" [4]= Accept Logical Link, [5]= Disconnect Logical Link, [6]= Logical Link Cancel, [7]= Flow Spec Modify\n"));
+ pSupportedCmds[21] = 0xff;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[22]= 0xff\nBit [0]= Read Logical Link Accept Timeout, [1]= Write Logical Link Accept Timeout, [2]= Set Event Mask Page 2, [3]= Read Location Data\n"));
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), (" [4]= Write Location Data, [5]= Read Local AMP Info, [6]= Read Local AMP_ASSOC, [7]= Write Remote AMP_ASSOC\n"));
+ pSupportedCmds[22] = 0xff;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[23]= 0x07\nBit [0]= Read Flow Control Mode, [1]= Write Flow Control Mode, [2]= Read Data Block Size\n"));
+ pSupportedCmds[23] = 0x07;
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[24]= 0x1c\nBit [2]= Read Best Effort Flush Timeout, [3]= Write Best Effort Flush Timeout, [4]= Short Range Mode\n"));
+ pSupportedCmds[24] = 0x1c;
+ len += 64;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+static enum hci_status bthci_CmdReadLocalSupportedFeatures(struct rtw_adapter *padapter)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ u8 localBuf[TmpLocalBufSize] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ /* send command complete event here when all data are received. */
+ PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_INFORMATIONAL_PARAMETERS,
+ HCI_READ_LOCAL_SUPPORTED_FEATURES,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 9;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ return status;
+}
+
+static enum hci_status bthci_CmdReadLocalAMPAssoc(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+ u8 PhyLinkHandle, EntryNum;
+
+ pBtDbg->dbgHciInfo.hciCmdCntReadLocalAmpAssoc++;
+ PhyLinkHandle = *((u8 *)pHciCmd->Data);
+ EntryNum = bthci_GetCurrentEntryNum(padapter, PhyLinkHandle);
+
+ if ((EntryNum == 0xff) && PhyLinkHandle != 0) {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("ReadLocalAMPAssoc, EntryNum = %d !!!!!, physical link handle = 0x%x\n",
+ EntryNum, PhyLinkHandle));
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+ } else if (pBtMgnt->bPhyLinkInProgressStartLL) {
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+ pBtMgnt->bPhyLinkInProgressStartLL = false;
+ } else {
+ pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.BtPhyLinkhandle = *((u8 *)pHciCmd->Data);
+ pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar = *((u16 *)((u8 *)pHciCmd->Data+1));
+ pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.MaxRemoteASSOCLen = *((u16 *)((u8 *)pHciCmd->Data+3));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("ReadLocalAMPAssoc, LenSoFar =%d, MaxRemoteASSOCLen =%d\n",
+ pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar,
+ pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.MaxRemoteASSOCLen));
+ }
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("ReadLocalAMPAssoc, EntryNum = %d !!!!!, physical link handle = 0x%x, LengthSoFar = %x \n",
+ EntryNum, PhyLinkHandle, pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar));
+
+ /* send command complete event here when all data are received. */
+ {
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ /* PVOID buffer = padapter->IrpHCILocalbuf.Ptr; */
+ u8 localBuf[TmpLocalBufSize] = "";
+ u16 *pRemainLen;
+ u32 totalLen = 0;
+ u16 typeLen = 0, remainLen = 0, ret_index = 0;
+ u8 *pRetPar;
+
+ PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
+ /* PPacketIrpEvent = (struct packet_irp_hcievent_data *)(buffer); */
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ totalLen += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_STATUS_PARAMETERS,
+ HCI_READ_LOCAL_AMP_ASSOC,
+ status);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("ReadLocalAMPAssoc, Remaining_Len =%d \n", remainLen));
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[totalLen];
+ pRetPar[0] = status; /* status */
+ pRetPar[1] = *((u8 *)pHciCmd->Data);
+ pRemainLen = (u16 *)&pRetPar[2]; /* AMP_ASSOC_Remaining_Length */
+ totalLen += 4; /* 0]~[3] */
+ ret_index = 4;
+
+ typeLen = bthci_AssocMACAddr(padapter, &pRetPar[ret_index]);
+ totalLen += typeLen;
+ remainLen += typeLen;
+ ret_index += typeLen;
+ typeLen = bthci_AssocPreferredChannelList(padapter, &pRetPar[ret_index], EntryNum);
+ totalLen += typeLen;
+ remainLen += typeLen;
+ ret_index += typeLen;
+ typeLen = bthci_PALCapabilities(padapter, &pRetPar[ret_index]);
+ totalLen += typeLen;
+ remainLen += typeLen;
+ ret_index += typeLen;
+ typeLen = bthci_AssocPALVer(padapter, &pRetPar[ret_index]);
+ totalLen += typeLen;
+ remainLen += typeLen;
+ PPacketIrpEvent->Length = (u8)totalLen;
+ *pRemainLen = remainLen; /* AMP_ASSOC_Remaining_Length */
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("ReadLocalAMPAssoc, Remaining_Len =%d \n", remainLen));
+ RTPRINT_DATA(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("AMP_ASSOC_fragment : \n"), PPacketIrpEvent->Data, totalLen);
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, totalLen+2);
+ }
+
+ return status;
+}
+
+static enum hci_status bthci_CmdReadFailedContactCounter(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[TmpLocalBufSize] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u16 handle;
+
+ handle = *((u16 *)pHciCmd->Data);
+ /* send command complete event here when all data are received. */
+ PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_STATUS_PARAMETERS,
+ HCI_READ_FAILED_CONTACT_COUNTER,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ pRetPar[1] = TWOBYTE_LOWBYTE(handle);
+ pRetPar[2] = TWOBYTE_HIGHTBYTE(handle);
+ pRetPar[3] = TWOBYTE_LOWBYTE(pBtHciInfo->FailContactCount);
+ pRetPar[4] = TWOBYTE_HIGHTBYTE(pBtHciInfo->FailContactCount);
+ len += 5;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdResetFailedContactCounter(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u16 handle;
+ u8 localBuf[TmpLocalBufSize] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ handle = *((u16 *)pHciCmd->Data);
+ pBtHciInfo->FailContactCount = 0;
+
+ /* send command complete event here when all data are received. */
+ PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
+ /* PPacketIrpEvent = (struct packet_irp_hcievent_data *)(buffer); */
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_STATUS_PARAMETERS,
+ HCI_RESET_FAILED_CONTACT_COUNTER,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ pRetPar[1] = TWOBYTE_LOWBYTE(handle);
+ pRetPar[2] = TWOBYTE_HIGHTBYTE(handle);
+ len += 3;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ return status;
+}
+
+/* */
+/* BT 3.0+HS [Vol 2] 7.4.1 */
+/* */
+static enum hci_status
+bthci_CmdReadLocalVersionInformation(
+ struct rtw_adapter *padapter
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ /* send command complete event here when all data are received. */
+ u8 localBuf[TmpLocalBufSize] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u16 *pu2Temp;
+
+ PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_INFORMATIONAL_PARAMETERS,
+ HCI_READ_LOCAL_VERSION_INFORMATION,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ pRetPar[1] = 0x05; /* HCI_Version */
+ pu2Temp = (u16 *)&pRetPar[2]; /* HCI_Revision */
+ *pu2Temp = 0x0001;
+ pRetPar[4] = 0x05; /* LMP/PAL_Version */
+ pu2Temp = (u16 *)&pRetPar[5]; /* Manufacturer_Name */
+ *pu2Temp = 0x005d;
+ pu2Temp = (u16 *)&pRetPar[7]; /* LMP/PAL_Subversion */
+ *pu2Temp = 0x0001;
+ len += 9;
+ PPacketIrpEvent->Length = len;
+
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("LOCAL_VERSION_INFORMATION\n"));
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("Status %x\n", status));
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("HCI_Version = 0x05\n"));
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("HCI_Revision = 0x0001\n"));
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("LMP/PAL_Version = 0x05\n"));
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("Manufacturer_Name = 0x0001\n"));
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("LMP/PAL_Subversion = 0x0001\n"));
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+/* 7.4.7 */
+static enum hci_status bthci_CmdReadDataBlockSize(struct rtw_adapter *padapter)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ u8 localBuf[TmpLocalBufSize] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u16 *pu2Temp;
+
+ PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_INFORMATIONAL_PARAMETERS,
+ HCI_READ_DATA_BLOCK_SIZE,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = HCI_STATUS_SUCCESS; /* status */
+ pu2Temp = (u16 *)&pRetPar[1]; /* Max_ACL_Data_Packet_Length */
+ *pu2Temp = Max80211PALPDUSize;
+
+ pu2Temp = (u16 *)&pRetPar[3]; /* Data_Block_Length */
+ *pu2Temp = Max80211PALPDUSize;
+ pu2Temp = (u16 *)&pRetPar[5]; /* Total_Num_Data_Blocks */
+ *pu2Temp = BTTotalDataBlockNum;
+ len += 7;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+/* 7.4.5 */
+static enum hci_status bthci_CmdReadBufferSize(struct rtw_adapter *padapter)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ u8 localBuf[TmpLocalBufSize] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u16 *pu2Temp;
+
+ PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
+ /* PPacketIrpEvent = (struct packet_irp_hcievent_data *)(buffer); */
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_INFORMATIONAL_PARAMETERS,
+ HCI_READ_BUFFER_SIZE,
+ status);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Synchronous_Data_Packet_Length = 0x%x\n", BTSynDataPacketLength));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Total_Num_ACL_Data_Packets = 0x%x\n", BTTotalDataBlockNum));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Total_Num_Synchronous_Data_Packets = 0x%x\n", BTTotalDataBlockNum));
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ pu2Temp = (u16 *)&pRetPar[1]; /* HC_ACL_Data_Packet_Length */
+ *pu2Temp = Max80211PALPDUSize;
+
+ pRetPar[3] = BTSynDataPacketLength; /* HC_Synchronous_Data_Packet_Length */
+ pu2Temp = (u16 *)&pRetPar[4]; /* HC_Total_Num_ACL_Data_Packets */
+ *pu2Temp = BTTotalDataBlockNum;
+ pu2Temp = (u16 *)&pRetPar[6]; /* HC_Total_Num_Synchronous_Data_Packets */
+ *pu2Temp = BTTotalDataBlockNum;
+ len += 8;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+static enum hci_status bthci_CmdReadLocalAMPInfo(struct rtw_adapter *padapter)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct pwrctrl_priv *ppwrctrl = &padapter->pwrctrlpriv;
+ u8 localBuf[TmpLocalBufSize] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u16 *pu2Temp;
+ u32 *pu4Temp;
+ u32 TotalBandwidth = BTTOTALBANDWIDTH, MaxBandGUBandwidth = BTMAXBANDGUBANDWIDTH;
+ u8 ControlType = 0x01, AmpStatus = 0x01;
+ u32 MaxFlushTimeout = 10000, BestEffortFlushTimeout = 5000;
+ u16 MaxPDUSize = Max80211PALPDUSize, PalCap = 0x1, AmpAssocLen = Max80211AMPASSOCLen, MinLatency = 20;
+
+ if ((ppwrctrl->rfoff_reason & RF_CHANGE_BY_HW) ||
+ (ppwrctrl->rfoff_reason & RF_CHANGE_BY_SW)) {
+ AmpStatus = AMP_STATUS_NO_CAPACITY_FOR_BT;
+ }
+
+ PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
+ /* PPacketIrpEvent = (struct packet_irp_hcievent_data *)(buffer); */
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_STATUS_PARAMETERS,
+ HCI_READ_LOCAL_AMP_INFO,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ pRetPar[1] = AmpStatus; /* AMP_Status */
+ pu4Temp = (u32 *)&pRetPar[2]; /* Total_Bandwidth */
+ *pu4Temp = TotalBandwidth; /* 0x19bfcc00;0x7530; */
+ pu4Temp = (u32 *)&pRetPar[6]; /* Max_Guaranteed_Bandwidth */
+ *pu4Temp = MaxBandGUBandwidth; /* 0x19bfcc00;0x4e20; */
+ pu4Temp = (u32 *)&pRetPar[10]; /* Min_Latency */
+ *pu4Temp = MinLatency; /* 150; */
+ pu4Temp = (u32 *)&pRetPar[14]; /* Max_PDU_Size */
+ *pu4Temp = MaxPDUSize;
+ pRetPar[18] = ControlType; /* Controller_Type */
+ pu2Temp = (u16 *)&pRetPar[19]; /* PAL_Capabilities */
+ *pu2Temp = PalCap;
+ pu2Temp = (u16 *)&pRetPar[21]; /* AMP_ASSOC_Length */
+ *pu2Temp = AmpAssocLen;
+ pu4Temp = (u32 *)&pRetPar[23]; /* Max_Flush_Timeout */
+ *pu4Temp = MaxFlushTimeout;
+ pu4Temp = (u32 *)&pRetPar[27]; /* Best_Effort_Flush_Timeout */
+ *pu4Temp = BestEffortFlushTimeout;
+ len += 31;
+ PPacketIrpEvent->Length = len;
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("AmpStatus = 0x%x\n",
+ AmpStatus));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("TotalBandwidth = 0x%x, MaxBandGUBandwidth = 0x%x, MinLatency = 0x%x, \n MaxPDUSize = 0x%x, ControlType = 0x%x\n",
+ TotalBandwidth, MaxBandGUBandwidth, MinLatency, MaxPDUSize, ControlType));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("PalCap = 0x%x, AmpAssocLen = 0x%x, MaxFlushTimeout = 0x%x, BestEffortFlushTimeout = 0x%x\n",
+ PalCap, AmpAssocLen, MaxFlushTimeout, BestEffortFlushTimeout));
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ return status;
+}
+
+static enum hci_status
+bthci_CmdCreatePhysicalLink(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+
+ pBtDbg->dbgHciInfo.hciCmdCntCreatePhyLink++;
+
+ status = bthci_BuildPhysicalLink(padapter,
+ pHciCmd, HCI_CREATE_PHYSICAL_LINK);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdReadLinkQuality(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ u16 PLH;
+ u8 EntryNum, LinkQuality = 0x55;
+
+ PLH = *((u16 *)&pHciCmd->Data[0]);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("PLH = 0x%x\n", PLH));
+
+ EntryNum = bthci_GetCurrentEntryNum(padapter, (u8)PLH);
+ if (EntryNum == 0xff) {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("No such PLH(0x%x)\n", PLH));
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+ }
+
+ {
+ u8 localBuf[11] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_STATUS_PARAMETERS,
+ HCI_READ_LINK_QUALITY,
+ status);
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, (" PLH = 0x%x\n Link Quality = 0x%x\n", PLH, LinkQuality));
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ *((u16 *)&pRetPar[1]) = pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle; /* Handle */
+ pRetPar[3] = 0x55; /* Link Quailty */
+ len += 4;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status bthci_CmdReadRSSI(struct rtw_adapter *padapter)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ return status;
+}
+
+static enum hci_status
+bthci_CmdCreateLogicalLink(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+
+ pBtDbg->dbgHciInfo.hciCmdCntCreateLogLink++;
+
+ bthci_BuildLogicalLink(padapter, pHciCmd,
+ HCI_CREATE_LOGICAL_LINK);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdAcceptLogicalLink(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+
+ pBtDbg->dbgHciInfo.hciCmdCntAcceptLogLink++;
+
+ bthci_BuildLogicalLink(padapter, pHciCmd,
+ HCI_ACCEPT_LOGICAL_LINK);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdDisconnectLogicalLink(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTinfo->BtMgnt;
+ struct bt_dgb *pBtDbg = &pBTinfo->BtDbg;
+ u16 logicHandle;
+ u8 i, j, find = 0, LogLinkCount = 0;
+
+ pBtDbg->dbgHciInfo.hciCmdCntDisconnectLogLink++;
+
+ logicHandle = *((u16 *)pHciCmd->Data);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("DisconnectLogicalLink, logicHandle = 0x%x\n", logicHandle));
+
+ /* find an created logical link index and clear the data */
+ for (j = 0; j < MAX_BT_ASOC_ENTRY_NUM; j++) {
+ for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
+ if (pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].BtLogLinkhandle == logicHandle) {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("DisconnectLogicalLink, logicHandle is matched 0x%x\n", logicHandle));
+ bthci_ResetFlowSpec(padapter, j, i);
+ find = 1;
+ pBtMgnt->DisconnectEntryNum = j;
+ break;
+ }
+ }
+ }
+
+ if (!find)
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+
+ /* To check each */
+ for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
+ if (pBTinfo->BtAsocEntry[pBtMgnt->DisconnectEntryNum].LogLinkCmdData[i].BtLogLinkhandle != 0)
+ LogLinkCount++;
+ }
+
+ /* When we receive Create logical link command, we should send command status event first. */
+ bthci_EventCommandStatus(padapter,
+ LINK_CONTROL_COMMANDS,
+ HCI_DISCONNECT_LOGICAL_LINK,
+ status);
+ /* */
+ /* When we determines the logical link is established, we should send command complete event. */
+ /* */
+ if (status == HCI_STATUS_SUCCESS) {
+ bthci_EventDisconnectLogicalLinkComplete(padapter, status,
+ logicHandle, HCI_STATUS_CONNECT_TERMINATE_LOCAL_HOST);
+ }
+
+ if (LogLinkCount == 0)
+ mod_timer(&pBTinfo->BTDisconnectPhyLinkTimer,
+ jiffies + msecs_to_jiffies(100));
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdLogicalLinkCancel(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTinfo->BtMgnt;
+ u8 CurrentEntryNum, CurrentLogEntryNum;
+
+ u8 physicalLinkHandle, TxFlowSpecID, i;
+ u16 CurrentLogicalHandle;
+
+ physicalLinkHandle = *((u8 *)pHciCmd->Data);
+ TxFlowSpecID = *(((u8 *)pHciCmd->Data)+1);
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("LogicalLinkCancel, physicalLinkHandle = 0x%x, TxFlowSpecID = 0x%x\n",
+ physicalLinkHandle, TxFlowSpecID));
+
+ CurrentEntryNum = pBtMgnt->CurrentConnectEntryNum;
+ CurrentLogicalHandle = pBtMgnt->BtCurrentLogLinkhandle;
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("CurrentEntryNum = 0x%x, CurrentLogicalHandle = 0x%x\n",
+ CurrentEntryNum, CurrentLogicalHandle));
+
+ CurrentLogEntryNum = 0xff;
+ for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
+ if ((CurrentLogicalHandle == pBTinfo->BtAsocEntry[CurrentEntryNum].LogLinkCmdData[i].BtLogLinkhandle) &&
+ (physicalLinkHandle == pBTinfo->BtAsocEntry[CurrentEntryNum].LogLinkCmdData[i].BtPhyLinkhandle)) {
+ CurrentLogEntryNum = i;
+ break;
+ }
+ }
+
+ if (CurrentLogEntryNum == 0xff) {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("LogicalLinkCancel, CurrentLogEntryNum == 0xff !!!!\n"));
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+ return status;
+ } else {
+ if (pBTinfo->BtAsocEntry[CurrentEntryNum].LogLinkCmdData[CurrentLogEntryNum].bLLCompleteEventIsSet) {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("LogicalLinkCancel, LLCompleteEventIsSet!!!!\n"));
+ status = HCI_STATUS_ACL_CONNECT_EXISTS;
+ }
+ }
+
+ {
+ u8 localBuf[8] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ LINK_CONTROL_COMMANDS,
+ HCI_LOGICAL_LINK_CANCEL,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ pRetPar[1] = pBTinfo->BtAsocEntry[CurrentEntryNum].LogLinkCmdData[CurrentLogEntryNum].BtPhyLinkhandle;
+ pRetPar[2] = pBTinfo->BtAsocEntry[CurrentEntryNum].LogLinkCmdData[CurrentLogEntryNum].BtTxFlowSpecID;
+ len += 3;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ pBTinfo->BtAsocEntry[CurrentEntryNum].LogLinkCmdData[CurrentLogEntryNum].bLLCancelCMDIsSetandComplete = true;
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdFlowSpecModify(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
+ u8 i, j, find = 0;
+ u16 logicHandle;
+
+ logicHandle = *((u16 *)pHciCmd->Data);
+ /* find an matched logical link index and copy the data */
+ for (j = 0; j < MAX_BT_ASOC_ENTRY_NUM; j++) {
+ for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
+ if (pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].BtLogLinkhandle == logicHandle) {
+ memcpy(&pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].Tx_Flow_Spec,
+ &pHciCmd->Data[2], sizeof(struct hci_flow_spec));
+ memcpy(&pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].Rx_Flow_Spec,
+ &pHciCmd->Data[18], sizeof(struct hci_flow_spec));
+
+ bthci_CheckLogLinkBehavior(padapter, pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].Tx_Flow_Spec);
+ find = 1;
+ break;
+ }
+ }
+ }
+ RTPRINT(FIOCTL, IOCTL_BT_LOGO, ("FlowSpecModify, LLH = 0x%x, \n", logicHandle));
+
+ /* When we receive Flow Spec Modify command, we should send command status event first. */
+ bthci_EventCommandStatus(padapter,
+ LINK_CONTROL_COMMANDS,
+ HCI_FLOW_SPEC_MODIFY,
+ HCI_STATUS_SUCCESS);
+
+ if (!find)
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+
+ bthci_EventSendFlowSpecModifyComplete(padapter, status, logicHandle);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdAcceptPhysicalLink(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+
+ pBtDbg->dbgHciInfo.hciCmdCntAcceptPhyLink++;
+
+ status = bthci_BuildPhysicalLink(padapter,
+ pHciCmd, HCI_ACCEPT_PHYSICAL_LINK);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdDisconnectPhysicalLink(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+ u8 PLH, CurrentEntryNum, PhysLinkDisconnectReason;
+
+ pBtDbg->dbgHciInfo.hciCmdCntDisconnectPhyLink++;
+
+ PLH = *((u8 *)pHciCmd->Data);
+ PhysLinkDisconnectReason = (*((u8 *)pHciCmd->Data+1));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_DISCONNECT_PHYSICAL_LINK PhyHandle = 0x%x, Reason = 0x%x\n",
+ PLH, PhysLinkDisconnectReason));
+
+ CurrentEntryNum = bthci_GetCurrentEntryNum(padapter, PLH);
+
+ if (CurrentEntryNum == 0xff) {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD,
+ ("DisconnectPhysicalLink, No such Handle in the Entry\n"));
+ status = HCI_STATUS_UNKNOW_CONNECT_ID;
+ } else {
+ pBTInfo->BtAsocEntry[CurrentEntryNum].PhyLinkDisconnectReason =
+ (enum hci_status)PhysLinkDisconnectReason;
+ }
+ /* Send HCI Command status event to AMP. */
+ bthci_EventCommandStatus(padapter, LINK_CONTROL_COMMANDS,
+ HCI_DISCONNECT_PHYSICAL_LINK, status);
+
+ if (status != HCI_STATUS_SUCCESS)
+ return status;
+
+ /* The macros below require { and } in the if statement */
+ if (pBTInfo->BtAsocEntry[CurrentEntryNum].BtCurrentState == HCI_STATE_DISCONNECTED) {
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTED, STATE_CMD_DISCONNECT_PHY_LINK, CurrentEntryNum);
+ } else {
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTING, STATE_CMD_DISCONNECT_PHY_LINK, CurrentEntryNum);
+ }
+ return status;
+}
+
+static enum hci_status
+bthci_CmdSetACLLinkDataFlowMode(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ u8 localBuf[8] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u16 *pu2Temp;
+
+ pBtMgnt->ExtConfig.CurrentConnectHandle = *((u16 *)pHciCmd->Data);
+ pBtMgnt->ExtConfig.CurrentIncomingTrafficMode = *((u8 *)pHciCmd->Data)+2;
+ pBtMgnt->ExtConfig.CurrentOutgoingTrafficMode = *((u8 *)pHciCmd->Data)+3;
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("Connection Handle = 0x%x, Incoming Traffic mode = 0x%x, Outgoing Traffic mode = 0x%x",
+ pBtMgnt->ExtConfig.CurrentConnectHandle,
+ pBtMgnt->ExtConfig.CurrentIncomingTrafficMode,
+ pBtMgnt->ExtConfig.CurrentOutgoingTrafficMode));
+
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_EXTENSION,
+ HCI_SET_ACL_LINK_DATA_FLOW_MODE,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+
+ pu2Temp = (u16 *)&pRetPar[1];
+ *pu2Temp = pBtMgnt->ExtConfig.CurrentConnectHandle;
+ len += 3;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ return status;
+}
+
+static enum hci_status
+bthci_CmdSetACLLinkStatus(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+ u8 i;
+ u8 *pTriple;
+
+ pBtDbg->dbgHciInfo.hciCmdCntSetAclLinkStatus++;
+ RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_EXT, "SetACLLinkStatus, Hex Data :\n",
+ &pHciCmd->Data[0], pHciCmd->Length);
+
+ /* Only Core Stack v251 and later version support this command. */
+ pBtMgnt->bSupportProfile = true;
+
+ pBtMgnt->ExtConfig.NumberOfHandle = *((u8 *)pHciCmd->Data);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("NumberOfHandle = 0x%x\n", pBtMgnt->ExtConfig.NumberOfHandle));
+
+ pTriple = &pHciCmd->Data[1];
+ for (i = 0; i < pBtMgnt->ExtConfig.NumberOfHandle; i++) {
+ pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle = *((u16 *)&pTriple[0]);
+ pBtMgnt->ExtConfig.linkInfo[i].IncomingTrafficMode = pTriple[2];
+ pBtMgnt->ExtConfig.linkInfo[i].OutgoingTrafficMode = pTriple[3];
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT,
+ ("Connection_Handle = 0x%x, Incoming Traffic mode = 0x%x, Outgoing Traffic Mode = 0x%x\n",
+ pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle,
+ pBtMgnt->ExtConfig.linkInfo[i].IncomingTrafficMode,
+ pBtMgnt->ExtConfig.linkInfo[i].OutgoingTrafficMode));
+ pTriple += 4;
+ }
+
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_EXTENSION,
+ HCI_SET_ACL_LINK_STATUS,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdSetSCOLinkStatus(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+
+ pBtDbg->dbgHciInfo.hciCmdCntSetScoLinkStatus++;
+ pBtMgnt->ExtConfig.NumberOfSCO = *((u8 *)pHciCmd->Data);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("NumberOfSCO = 0x%x\n",
+ pBtMgnt->ExtConfig.NumberOfSCO));
+
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_EXTENSION,
+ HCI_SET_SCO_LINK_STATUS,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdSetRSSIValue(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ s8 min_bt_rssi = 0;
+ u8 i;
+ for (i = 0; i < pBtMgnt->ExtConfig.NumberOfHandle; i++) {
+ if (pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle == *((u16 *)&pHciCmd->Data[0])) {
+ pBtMgnt->ExtConfig.linkInfo[i].BT_RSSI = (s8)(pHciCmd->Data[2]);
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT_PERIODICAL,
+ ("Connection_Handle = 0x%x, RSSI = %d \n",
+ pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle,
+ pBtMgnt->ExtConfig.linkInfo[i].BT_RSSI));
+ }
+ /* get the minimum bt rssi value */
+ if (pBtMgnt->ExtConfig.linkInfo[i].BT_RSSI <= min_bt_rssi)
+ min_bt_rssi = pBtMgnt->ExtConfig.linkInfo[i].BT_RSSI;
+ }
+
+ pBtMgnt->ExtConfig.MIN_BT_RSSI = min_bt_rssi;
+ RTPRINT(FBT, BT_TRACE, ("[bt rssi], the min rssi is %d\n", min_bt_rssi));
+
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_EXTENSION,
+ HCI_SET_RSSI_VALUE,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdSetCurrentBluetoothStatus(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ pBtMgnt->ExtConfig.CurrentBTStatus = *((u8 *)&pHciCmd->Data[0]);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("SetCurrentBluetoothStatus, CurrentBTStatus = 0x%x\n",
+ pBtMgnt->ExtConfig.CurrentBTStatus));
+
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_EXTENSION,
+ HCI_SET_CURRENT_BLUETOOTH_STATUS,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdExtensionVersionNotify(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+
+ pBtDbg->dbgHciInfo.hciCmdCntExtensionVersionNotify++;
+ RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_EXT, "ExtensionVersionNotify, Hex Data :\n",
+ &pHciCmd->Data[0], pHciCmd->Length);
+
+ pBtMgnt->ExtConfig.HCIExtensionVer = *((u16 *)&pHciCmd->Data[0]);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCIExtensionVer = 0x%x\n", pBtMgnt->ExtConfig.HCIExtensionVer));
+
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_EXTENSION,
+ HCI_EXTENSION_VERSION_NOTIFY,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdLinkStatusNotify(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+ u8 i;
+ u8 *pTriple;
+
+ pBtDbg->dbgHciInfo.hciCmdCntLinkStatusNotify++;
+ RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_EXT, "LinkStatusNotify, Hex Data :\n",
+ &pHciCmd->Data[0], pHciCmd->Length);
+
+ /* Current only RTL8723 support this command. */
+ pBtMgnt->bSupportProfile = true;
+
+ pBtMgnt->ExtConfig.NumberOfHandle = *((u8 *)pHciCmd->Data);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("NumberOfHandle = 0x%x\n", pBtMgnt->ExtConfig.NumberOfHandle));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCIExtensionVer = %d\n", pBtMgnt->ExtConfig.HCIExtensionVer));
+
+ pTriple = &pHciCmd->Data[1];
+ for (i = 0; i < pBtMgnt->ExtConfig.NumberOfHandle; i++) {
+ if (pBtMgnt->ExtConfig.HCIExtensionVer < 1) {
+ pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle = *((u16 *)&pTriple[0]);
+ pBtMgnt->ExtConfig.linkInfo[i].BTProfile = pTriple[2];
+ pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec = pTriple[3];
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT,
+ ("Connection_Handle = 0x%x, BTProfile =%d, BTSpec =%d\n",
+ pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle,
+ pBtMgnt->ExtConfig.linkInfo[i].BTProfile,
+ pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec));
+ pTriple += 4;
+ } else if (pBtMgnt->ExtConfig.HCIExtensionVer >= 1) {
+ pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle = *((u16 *)&pTriple[0]);
+ pBtMgnt->ExtConfig.linkInfo[i].BTProfile = pTriple[2];
+ pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec = pTriple[3];
+ pBtMgnt->ExtConfig.linkInfo[i].linkRole = pTriple[4];
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT,
+ ("Connection_Handle = 0x%x, BTProfile =%d, BTSpec =%d, LinkRole =%d\n",
+ pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle,
+ pBtMgnt->ExtConfig.linkInfo[i].BTProfile,
+ pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec,
+ pBtMgnt->ExtConfig.linkInfo[i].linkRole));
+ pTriple += 5;
+ }
+
+ }
+ BTHCI_UpdateBTProfileRTKToMoto(padapter);
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_EXTENSION,
+ HCI_LINK_STATUS_NOTIFY,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdBtOperationNotify(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_EXT, "Bt Operation notify, Hex Data :\n",
+ &pHciCmd->Data[0], pHciCmd->Length);
+
+ pBtMgnt->ExtConfig.btOperationCode = *((u8 *)pHciCmd->Data);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("btOperationCode = 0x%x\n", pBtMgnt->ExtConfig.btOperationCode));
+ switch (pBtMgnt->ExtConfig.btOperationCode) {
+ case HCI_BT_OP_NONE:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Operation None!!\n"));
+ break;
+ case HCI_BT_OP_INQUIRY_START:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Inquire start!!\n"));
+ break;
+ case HCI_BT_OP_INQUIRY_FINISH:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Inquire finished!!\n"));
+ break;
+ case HCI_BT_OP_PAGING_START:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Paging is started!!\n"));
+ break;
+ case HCI_BT_OP_PAGING_SUCCESS:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Paging complete successfully!!\n"));
+ break;
+ case HCI_BT_OP_PAGING_UNSUCCESS:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Paging complete unsuccessfully!!\n"));
+ break;
+ case HCI_BT_OP_PAIRING_START:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Pairing start!!\n"));
+ break;
+ case HCI_BT_OP_PAIRING_FINISH:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Pairing finished!!\n"));
+ break;
+ case HCI_BT_OP_BT_DEV_ENABLE:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : BT Device is enabled!!\n"));
+ break;
+ case HCI_BT_OP_BT_DEV_DISABLE:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : BT Device is disabled!!\n"));
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Unknown, error!!\n"));
+ break;
+ }
+ BTDM_AdjustForBtOperation(padapter);
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_EXTENSION,
+ HCI_BT_OPERATION_NOTIFY,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdEnableWifiScanNotify(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_EXT, "Enable Wifi scan notify, Hex Data :\n",
+ &pHciCmd->Data[0], pHciCmd->Length);
+
+ pBtMgnt->ExtConfig.bEnableWifiScanNotify = *((u8 *)pHciCmd->Data);
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("bEnableWifiScanNotify = %d\n", pBtMgnt->ExtConfig.bEnableWifiScanNotify));
+
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_EXTENSION,
+ HCI_ENABLE_WIFI_SCAN_NOTIFY,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdWIFICurrentChannel(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ u8 chnl = pmlmeext->cur_channel;
+
+ if (pmlmeext->cur_bwmode == HT_CHANNEL_WIDTH_40) {
+ if (pmlmeext->cur_ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
+ chnl += 2;
+ else if (pmlmeext->cur_ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
+ chnl -= 2;
+ }
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("Current Channel = 0x%x\n", chnl));
+
+ {
+ u8 localBuf[8] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_EXTENSION,
+ HCI_WIFI_CURRENT_CHANNEL,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ pRetPar[1] = chnl; /* current channel */
+ len += 2;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdWIFICurrentBandwidth(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ enum ht_channel_width bw;
+ u8 CurrentBW = 0;
+
+ bw = padapter->mlmeextpriv.cur_bwmode;
+
+ if (bw == HT_CHANNEL_WIDTH_20)
+ CurrentBW = 0;
+ else if (bw == HT_CHANNEL_WIDTH_40)
+ CurrentBW = 1;
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("Current BW = 0x%x\n",
+ CurrentBW));
+
+ {
+ u8 localBuf[8] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_EXTENSION,
+ HCI_WIFI_CURRENT_BANDWIDTH,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ pRetPar[1] = CurrentBW; /* current BW */
+ len += 2;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdWIFIConnectionStatus(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ u8 connectStatus = HCI_WIFI_NOT_CONNECTED;
+
+ if (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE)) {
+ if (padapter->stapriv.asoc_sta_count >= 3)
+ connectStatus = HCI_WIFI_CONNECTED;
+ else
+ connectStatus = HCI_WIFI_NOT_CONNECTED;
+ } else if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE|WIFI_ASOC_STATE)) {
+ connectStatus = HCI_WIFI_CONNECTED;
+ } else if (check_fwstate(&padapter->mlmepriv, WIFI_UNDER_LINKING)) {
+ connectStatus = HCI_WIFI_CONNECT_IN_PROGRESS;
+ } else {
+ connectStatus = HCI_WIFI_NOT_CONNECTED;
+ }
+
+ {
+ u8 localBuf[8] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_EXTENSION,
+ HCI_WIFI_CONNECTION_STATUS,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ pRetPar[1] = connectStatus; /* connect status */
+ len += 2;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdEnableDeviceUnderTestMode(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+
+ pBtHciInfo->bInTestMode = true;
+ pBtHciInfo->bTestIsEnd = false;
+
+ /* send command complete event here when all data are received. */
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_TESTING_COMMANDS,
+ HCI_ENABLE_DEVICE_UNDER_TEST_MODE,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdAMPTestEnd(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 bFilterOutNonAssociatedBSSID = true;
+
+ if (!pBtHciInfo->bInTestMode) {
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Not in Test mode, return status = HCI_STATUS_CMD_DISALLOW\n"));
+ status = HCI_STATUS_CMD_DISALLOW;
+ return status;
+ }
+
+ pBtHciInfo->bTestIsEnd = true;
+
+ del_timer_sync(&pBTInfo->BTTestSendPacketTimer);
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_CHECK_BSSID, (u8 *)(&bFilterOutNonAssociatedBSSID));
+
+ /* send command complete event here when all data are received. */
+ {
+ u8 localBuf[4] = "";
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("AMP Test End Event \n"));
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_AMP_TEST_END;
+ PPacketIrpEvent->Length = 2;
+
+ PPacketIrpEvent->Data[0] = status;
+ PPacketIrpEvent->Data[1] = pBtHciInfo->TestScenario;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 4);
+ }
+
+ bthci_EventAMPReceiverReport(padapter, 0x01);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdAMPTestCommand(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+
+ if (!pBtHciInfo->bInTestMode) {
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Not in Test mode, return status = HCI_STATUS_CMD_DISALLOW\n"));
+ status = HCI_STATUS_CMD_DISALLOW;
+ return status;
+ }
+
+ pBtHciInfo->TestScenario = *((u8 *)pHciCmd->Data);
+
+ if (pBtHciInfo->TestScenario == 0x01)
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("TX Single Test \n"));
+ else if (pBtHciInfo->TestScenario == 0x02)
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Receive Frame Test \n"));
+ else
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("No Such Test !!!!!!!!!!!!!!!!!! \n"));
+
+ if (pBtHciInfo->bTestIsEnd) {
+ u8 localBuf[5] = "";
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("AMP Test End Event \n"));
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_AMP_TEST_END;
+ PPacketIrpEvent->Length = 2;
+
+ PPacketIrpEvent->Data[0] = status;
+ PPacketIrpEvent->Data[1] = pBtHciInfo->TestScenario ;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 4);
+
+ /* Return to Idel state with RX and TX off. */
+
+ return status;
+ }
+
+ /* should send command status event */
+ bthci_EventCommandStatus(padapter,
+ OGF_TESTING_COMMANDS,
+ HCI_AMP_TEST_COMMAND,
+ status);
+
+ /* The HCI_AMP_Start Test Event shall be generated when the */
+ /* HCI_AMP_Test_Command has completed and the first data is ready to be sent */
+ /* or received. */
+
+ {
+ u8 localBuf[5] = "";
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), (" HCI_AMP_Start Test Event \n"));
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ PPacketIrpEvent->EventCode = HCI_EVENT_AMP_START_TEST;
+ PPacketIrpEvent->Length = 2;
+
+ PPacketIrpEvent->Data[0] = status;
+ PPacketIrpEvent->Data[1] = pBtHciInfo->TestScenario ;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, 4);
+
+ /* Return to Idel state with RX and TX off. */
+ }
+
+ if (pBtHciInfo->TestScenario == 0x01) {
+ /*
+ When in a transmitter test scenario and the frames/bursts count have been
+ transmitted the HCI_AMP_Test_End event shall be sent.
+ */
+ mod_timer(&pBTInfo->BTTestSendPacketTimer,
+ jiffies + msecs_to_jiffies(50));
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("TX Single Test \n"));
+ } else if (pBtHciInfo->TestScenario == 0x02) {
+ u8 bFilterOutNonAssociatedBSSID = false;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_CHECK_BSSID, (u8 *)(&bFilterOutNonAssociatedBSSID));
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Receive Frame Test \n"));
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdEnableAMPReceiverReports(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+
+ if (!pBtHciInfo->bInTestMode) {
+ status = HCI_STATUS_CMD_DISALLOW;
+ /* send command complete event here when all data are received. */
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_TESTING_COMMANDS,
+ HCI_ENABLE_AMP_RECEIVER_REPORTS,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+ return status;
+ }
+
+ pBtHciInfo->bTestNeedReport = *((u8 *)pHciCmd->Data);
+ pBtHciInfo->TestReportInterval = (*((u8 *)pHciCmd->Data+2));
+
+ bthci_EventAMPReceiverReport(padapter, 0x00);
+
+ /* send command complete event here when all data are received. */
+ {
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_TESTING_COMMANDS,
+ HCI_ENABLE_AMP_RECEIVER_REPORTS,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+ }
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdHostBufferSize(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ u8 localBuf[6] = "";
+ u8 *pRetPar;
+ u8 len = 0;
+
+ pBTInfo->BtAsocEntry[pBtMgnt->CurrentConnectEntryNum].ACLPacketsData.ACLDataPacketLen = *((u16 *)pHciCmd->Data);
+ pBTInfo->BtAsocEntry[pBtMgnt->CurrentConnectEntryNum].SyncDataPacketLen = *((u8 *)(pHciCmd->Data+2));
+ pBTInfo->BtAsocEntry[pBtMgnt->CurrentConnectEntryNum].TotalNumACLDataPackets = *((u16 *)(pHciCmd->Data+3));
+ pBTInfo->BtAsocEntry[pBtMgnt->CurrentConnectEntryNum].TotalSyncNumDataPackets = *((u16 *)(pHciCmd->Data+5));
+
+ /* send command complete event here when all data are received. */
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ len += bthci_CommandCompleteHeader(&localBuf[0],
+ OGF_SET_EVENT_MASK_COMMAND,
+ HCI_HOST_BUFFER_SIZE,
+ status);
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[len];
+ pRetPar[0] = status; /* status */
+ len += 1;
+ PPacketIrpEvent->Length = len;
+
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+
+ return status;
+}
+
+static enum hci_status
+bthci_CmdHostNumberOfCompletedPackets(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+
+ return status;
+}
+
+static enum hci_status
+bthci_UnknownCMD(struct rtw_adapter *padapter, struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_UNKNOW_HCI_CMD;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+
+ pBtDbg->dbgHciInfo.hciCmdCntUnknown++;
+ bthci_EventCommandStatus(padapter,
+ (u8)pHciCmd->OGF,
+ pHciCmd->OCF,
+ status);
+
+ return status;
+}
+
+static enum hci_status
+bthci_HandleOGFInformationalParameters(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+
+ switch (pHciCmd->OCF) {
+ case HCI_READ_LOCAL_VERSION_INFORMATION:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCAL_VERSION_INFORMATION\n"));
+ status = bthci_CmdReadLocalVersionInformation(padapter);
+ break;
+ case HCI_READ_LOCAL_SUPPORTED_COMMANDS:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCAL_SUPPORTED_COMMANDS\n"));
+ status = bthci_CmdReadLocalSupportedCommands(padapter);
+ break;
+ case HCI_READ_LOCAL_SUPPORTED_FEATURES:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCAL_SUPPORTED_FEATURES\n"));
+ status = bthci_CmdReadLocalSupportedFeatures(padapter);
+ break;
+ case HCI_READ_BUFFER_SIZE:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_BUFFER_SIZE\n"));
+ status = bthci_CmdReadBufferSize(padapter);
+ break;
+ case HCI_READ_DATA_BLOCK_SIZE:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_DATA_BLOCK_SIZE\n"));
+ status = bthci_CmdReadDataBlockSize(padapter);
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("bthci_HandleOGFInformationalParameters(), Unknown case = 0x%x\n", pHciCmd->OCF));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_UNKNOWN_COMMAND\n"));
+ status = bthci_UnknownCMD(padapter, pHciCmd);
+ break;
+ }
+ return status;
+}
+
+static enum hci_status
+bthci_HandleOGFSetEventMaskCMD(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+
+ switch (pHciCmd->OCF) {
+ case HCI_SET_EVENT_MASK:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_SET_EVENT_MASK\n"));
+ status = bthci_CmdSetEventMask(padapter, pHciCmd);
+ break;
+ case HCI_RESET:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_RESET\n"));
+ status = bthci_CmdReset(padapter, true);
+ break;
+ case HCI_READ_CONNECTION_ACCEPT_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_CONNECTION_ACCEPT_TIMEOUT\n"));
+ status = bthci_CmdReadConnectionAcceptTimeout(padapter);
+ break;
+ case HCI_SET_EVENT_FILTER:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_SET_EVENT_FILTER\n"));
+ status = bthci_CmdSetEventFilter(padapter, pHciCmd);
+ break;
+ case HCI_WRITE_CONNECTION_ACCEPT_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_CONNECTION_ACCEPT_TIMEOUT\n"));
+ status = bthci_CmdWriteConnectionAcceptTimeout(padapter, pHciCmd);
+ break;
+ case HCI_READ_PAGE_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_PAGE_TIMEOUT\n"));
+ status = bthci_CmdReadPageTimeout(padapter, pHciCmd);
+ break;
+ case HCI_WRITE_PAGE_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_PAGE_TIMEOUT\n"));
+ status = bthci_CmdWritePageTimeout(padapter, pHciCmd);
+ break;
+ case HCI_HOST_NUMBER_OF_COMPLETED_PACKETS:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_HOST_NUMBER_OF_COMPLETED_PACKETS\n"));
+ status = bthci_CmdHostNumberOfCompletedPackets(padapter, pHciCmd);
+ break;
+ case HCI_READ_LINK_SUPERVISION_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LINK_SUPERVISION_TIMEOUT\n"));
+ status = bthci_CmdReadLinkSupervisionTimeout(padapter, pHciCmd);
+ break;
+ case HCI_WRITE_LINK_SUPERVISION_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_LINK_SUPERVISION_TIMEOUT\n"));
+ status = bthci_CmdWriteLinkSupervisionTimeout(padapter, pHciCmd);
+ break;
+ case HCI_ENHANCED_FLUSH:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_ENHANCED_FLUSH\n"));
+ status = bthci_CmdEnhancedFlush(padapter, pHciCmd);
+ break;
+ case HCI_READ_LOGICAL_LINK_ACCEPT_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOGICAL_LINK_ACCEPT_TIMEOUT\n"));
+ status = bthci_CmdReadLogicalLinkAcceptTimeout(padapter, pHciCmd);
+ break;
+ case HCI_WRITE_LOGICAL_LINK_ACCEPT_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_LOGICAL_LINK_ACCEPT_TIMEOUT\n"));
+ status = bthci_CmdWriteLogicalLinkAcceptTimeout(padapter, pHciCmd);
+ break;
+ case HCI_SET_EVENT_MASK_PAGE_2:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_SET_EVENT_MASK_PAGE_2\n"));
+ status = bthci_CmdSetEventMaskPage2(padapter, pHciCmd);
+ break;
+ case HCI_READ_LOCATION_DATA:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCATION_DATA\n"));
+ status = bthci_CmdReadLocationData(padapter, pHciCmd);
+ break;
+ case HCI_WRITE_LOCATION_DATA:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_LOCATION_DATA\n"));
+ status = bthci_CmdWriteLocationData(padapter, pHciCmd);
+ break;
+ case HCI_READ_FLOW_CONTROL_MODE:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_FLOW_CONTROL_MODE\n"));
+ status = bthci_CmdReadFlowControlMode(padapter, pHciCmd);
+ break;
+ case HCI_WRITE_FLOW_CONTROL_MODE:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_FLOW_CONTROL_MODE\n"));
+ status = bthci_CmdWriteFlowControlMode(padapter, pHciCmd);
+ break;
+ case HCI_READ_BEST_EFFORT_FLUSH_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_BEST_EFFORT_FLUSH_TIMEOUT\n"));
+ status = bthci_CmdReadBestEffortFlushTimeout(padapter, pHciCmd);
+ break;
+ case HCI_WRITE_BEST_EFFORT_FLUSH_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_BEST_EFFORT_FLUSH_TIMEOUT\n"));
+ status = bthci_CmdWriteBestEffortFlushTimeout(padapter, pHciCmd);
+ break;
+ case HCI_SHORT_RANGE_MODE:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_SHORT_RANGE_MODE\n"));
+ status = bthci_CmdShortRangeMode(padapter, pHciCmd);
+ break;
+ case HCI_HOST_BUFFER_SIZE:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_HOST_BUFFER_SIZE\n"));
+ status = bthci_CmdHostBufferSize(padapter, pHciCmd);
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("bthci_HandleOGFSetEventMaskCMD(), Unknown case = 0x%x\n", pHciCmd->OCF));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_UNKNOWN_COMMAND\n"));
+ status = bthci_UnknownCMD(padapter, pHciCmd);
+ break;
+ }
+ return status;
+}
+
+static enum hci_status
+bthci_HandleOGFStatusParameters(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+
+ switch (pHciCmd->OCF) {
+ case HCI_READ_FAILED_CONTACT_COUNTER:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_FAILED_CONTACT_COUNTER\n"));
+ status = bthci_CmdReadFailedContactCounter(padapter, pHciCmd);
+ break;
+ case HCI_RESET_FAILED_CONTACT_COUNTER:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_RESET_FAILED_CONTACT_COUNTER\n"));
+ status = bthci_CmdResetFailedContactCounter(padapter, pHciCmd);
+ break;
+ case HCI_READ_LINK_QUALITY:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LINK_QUALITY\n"));
+ status = bthci_CmdReadLinkQuality(padapter, pHciCmd);
+ break;
+ case HCI_READ_RSSI:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_RSSI\n"));
+ status = bthci_CmdReadRSSI(padapter);
+ break;
+ case HCI_READ_LOCAL_AMP_INFO:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCAL_AMP_INFO\n"));
+ status = bthci_CmdReadLocalAMPInfo(padapter);
+ break;
+ case HCI_READ_LOCAL_AMP_ASSOC:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCAL_AMP_ASSOC\n"));
+ status = bthci_CmdReadLocalAMPAssoc(padapter, pHciCmd);
+ break;
+ case HCI_WRITE_REMOTE_AMP_ASSOC:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_REMOTE_AMP_ASSOC\n"));
+ status = bthci_CmdWriteRemoteAMPAssoc(padapter, pHciCmd);
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("bthci_HandleOGFStatusParameters(), Unknown case = 0x%x\n", pHciCmd->OCF));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_UNKNOWN_COMMAND\n"));
+ status = bthci_UnknownCMD(padapter, pHciCmd);
+ break;
+ }
+ return status;
+}
+
+static enum hci_status
+bthci_HandleOGFLinkControlCMD(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+
+ switch (pHciCmd->OCF) {
+ case HCI_CREATE_PHYSICAL_LINK:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_CREATE_PHYSICAL_LINK\n"));
+ status = bthci_CmdCreatePhysicalLink(padapter, pHciCmd);
+ break;
+ case HCI_ACCEPT_PHYSICAL_LINK:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_ACCEPT_PHYSICAL_LINK\n"));
+ status = bthci_CmdAcceptPhysicalLink(padapter, pHciCmd);
+ break;
+ case HCI_DISCONNECT_PHYSICAL_LINK:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_DISCONNECT_PHYSICAL_LINK\n"));
+ status = bthci_CmdDisconnectPhysicalLink(padapter, pHciCmd);
+ break;
+ case HCI_CREATE_LOGICAL_LINK:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_CREATE_LOGICAL_LINK\n"));
+ status = bthci_CmdCreateLogicalLink(padapter, pHciCmd);
+ break;
+ case HCI_ACCEPT_LOGICAL_LINK:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_ACCEPT_LOGICAL_LINK\n"));
+ status = bthci_CmdAcceptLogicalLink(padapter, pHciCmd);
+ break;
+ case HCI_DISCONNECT_LOGICAL_LINK:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_DISCONNECT_LOGICAL_LINK\n"));
+ status = bthci_CmdDisconnectLogicalLink(padapter, pHciCmd);
+ break;
+ case HCI_LOGICAL_LINK_CANCEL:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_LOGICAL_LINK_CANCEL\n"));
+ status = bthci_CmdLogicalLinkCancel(padapter, pHciCmd);
+ break;
+ case HCI_FLOW_SPEC_MODIFY:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_FLOW_SPEC_MODIFY\n"));
+ status = bthci_CmdFlowSpecModify(padapter, pHciCmd);
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("bthci_HandleOGFLinkControlCMD(), Unknown case = 0x%x\n", pHciCmd->OCF));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_UNKNOWN_COMMAND\n"));
+ status = bthci_UnknownCMD(padapter, pHciCmd);
+ break;
+ }
+ return status;
+}
+
+static enum hci_status
+bthci_HandleOGFTestingCMD(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ switch (pHciCmd->OCF) {
+ case HCI_ENABLE_DEVICE_UNDER_TEST_MODE:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_ENABLE_DEVICE_UNDER_TEST_MODE\n"));
+ bthci_CmdEnableDeviceUnderTestMode(padapter, pHciCmd);
+ break;
+ case HCI_AMP_TEST_END:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_AMP_TEST_END\n"));
+ bthci_CmdAMPTestEnd(padapter, pHciCmd);
+ break;
+ case HCI_AMP_TEST_COMMAND:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_AMP_TEST_COMMAND\n"));
+ bthci_CmdAMPTestCommand(padapter, pHciCmd);
+ break;
+ case HCI_ENABLE_AMP_RECEIVER_REPORTS:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_ENABLE_AMP_RECEIVER_REPORTS\n"));
+ bthci_CmdEnableAMPReceiverReports(padapter, pHciCmd);
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_UNKNOWN_COMMAND\n"));
+ status = bthci_UnknownCMD(padapter, pHciCmd);
+ break;
+ }
+ return status;
+}
+
+static enum hci_status
+bthci_HandleOGFExtension(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd)
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ switch (pHciCmd->OCF) {
+ case HCI_SET_ACL_LINK_DATA_FLOW_MODE:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_SET_ACL_LINK_DATA_FLOW_MODE\n"));
+ status = bthci_CmdSetACLLinkDataFlowMode(padapter, pHciCmd);
+ break;
+ case HCI_SET_ACL_LINK_STATUS:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_SET_ACL_LINK_STATUS\n"));
+ status = bthci_CmdSetACLLinkStatus(padapter, pHciCmd);
+ break;
+ case HCI_SET_SCO_LINK_STATUS:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_SET_SCO_LINK_STATUS\n"));
+ status = bthci_CmdSetSCOLinkStatus(padapter, pHciCmd);
+ break;
+ case HCI_SET_RSSI_VALUE:
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT_PERIODICAL, ("HCI_SET_RSSI_VALUE\n"));
+ status = bthci_CmdSetRSSIValue(padapter, pHciCmd);
+ break;
+ case HCI_SET_CURRENT_BLUETOOTH_STATUS:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_SET_CURRENT_BLUETOOTH_STATUS\n"));
+ status = bthci_CmdSetCurrentBluetoothStatus(padapter, pHciCmd);
+ break;
+ /* The following is for RTK8723 */
+
+ case HCI_EXTENSION_VERSION_NOTIFY:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_EXTENSION_VERSION_NOTIFY\n"));
+ status = bthci_CmdExtensionVersionNotify(padapter, pHciCmd);
+ break;
+ case HCI_LINK_STATUS_NOTIFY:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_LINK_STATUS_NOTIFY\n"));
+ status = bthci_CmdLinkStatusNotify(padapter, pHciCmd);
+ break;
+ case HCI_BT_OPERATION_NOTIFY:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_BT_OPERATION_NOTIFY\n"));
+ status = bthci_CmdBtOperationNotify(padapter, pHciCmd);
+ break;
+ case HCI_ENABLE_WIFI_SCAN_NOTIFY:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_ENABLE_WIFI_SCAN_NOTIFY\n"));
+ status = bthci_CmdEnableWifiScanNotify(padapter, pHciCmd);
+ break;
+
+ /* The following is for IVT */
+ case HCI_WIFI_CURRENT_CHANNEL:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_WIFI_CURRENT_CHANNEL\n"));
+ status = bthci_CmdWIFICurrentChannel(padapter, pHciCmd);
+ break;
+ case HCI_WIFI_CURRENT_BANDWIDTH:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_WIFI_CURRENT_BANDWIDTH\n"));
+ status = bthci_CmdWIFICurrentBandwidth(padapter, pHciCmd);
+ break;
+ case HCI_WIFI_CONNECTION_STATUS:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_WIFI_CONNECTION_STATUS\n"));
+ status = bthci_CmdWIFIConnectionStatus(padapter, pHciCmd);
+ break;
+
+ default:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_UNKNOWN_COMMAND\n"));
+ status = bthci_UnknownCMD(padapter, pHciCmd);
+ break;
+ }
+ return status;
+}
+
+static void
+bthci_StateStarting(struct rtw_adapter *padapter,
+ enum hci_state_with_cmd StateCmd, u8 EntryNum)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state], [Starting], "));
+ switch (StateCmd) {
+ case STATE_CMD_CONNECT_ACCEPT_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_CONNECT_ACCEPT_TIMEOUT\n"));
+ pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_CONNECT_ACCEPT_TIMEOUT;
+ pBtMgnt->bNeedNotifyAMPNoCap = true;
+ BTHCI_DisconnectPeer(padapter, EntryNum);
+ break;
+ case STATE_CMD_DISCONNECT_PHY_LINK:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_DISCONNECT_PHY_LINK\n"));
+
+ bthci_EventDisconnectPhyLinkComplete(padapter,
+ HCI_STATUS_SUCCESS,
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason,
+ EntryNum);
+
+ del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
+
+ pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_UNKNOW_CONNECT_ID;
+
+ BTHCI_DisconnectPeer(padapter, EntryNum);
+ break;
+ case STATE_CMD_MAC_START_COMPLETE:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_MAC_START_COMPLETE\n"));
+ if (pBTInfo->BtAsocEntry[EntryNum].AMPRole == AMP_BTAP_CREATOR)
+ bthci_EventChannelSelected(padapter, EntryNum);
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("State command(%d) is Wrong !!!\n", StateCmd));
+ break;
+ }
+}
+
+static void
+bthci_StateConnecting(struct rtw_adapter *padapter,
+ enum hci_state_with_cmd StateCmd, u8 EntryNum)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state], [Connecting], "));
+ switch (StateCmd) {
+ case STATE_CMD_CONNECT_ACCEPT_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_CONNECT_ACCEPT_TIMEOUT\n"));
+ pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_CONNECT_ACCEPT_TIMEOUT;
+ pBtMgnt->bNeedNotifyAMPNoCap = true;
+ BTHCI_DisconnectPeer(padapter, EntryNum);
+ break;
+ case STATE_CMD_MAC_CONNECT_COMPLETE:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_MAC_CONNECT_COMPLETE\n"));
+
+ if (pBTInfo->BtAsocEntry[EntryNum].AMPRole == AMP_BTAP_JOINER) {
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_info_, ("StateConnecting \n"));
+ }
+ break;
+ case STATE_CMD_DISCONNECT_PHY_LINK:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_DISCONNECT_PHY_LINK\n"));
+
+ bthci_EventDisconnectPhyLinkComplete(padapter,
+ HCI_STATUS_SUCCESS,
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason,
+ EntryNum);
+
+ pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_UNKNOW_CONNECT_ID;
+
+ del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
+
+ BTHCI_DisconnectPeer(padapter, EntryNum);
+
+ break;
+ case STATE_CMD_MAC_CONNECT_CANCEL_INDICATE:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_MAC_CONNECT_CANCEL_INDICATE\n"));
+ pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_CONTROLLER_BUSY;
+ /* Because this state cmd is caused by the BTHCI_EventAMPStatusChange(), */
+ /* we don't need to send event in the following BTHCI_DisconnectPeer() again. */
+ pBtMgnt->bNeedNotifyAMPNoCap = false;
+ BTHCI_DisconnectPeer(padapter, EntryNum);
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("State command(%d) is Wrong !!!\n", StateCmd));
+ break;
+ }
+}
+
+static void
+bthci_StateConnected(struct rtw_adapter *padapter,
+ enum hci_state_with_cmd StateCmd, u8 EntryNum)
+{
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ u8 i;
+ u16 logicHandle = 0;
+
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state], [Connected], "));
+ switch (StateCmd) {
+ case STATE_CMD_DISCONNECT_PHY_LINK:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_DISCONNECT_PHY_LINK\n"));
+
+ /* When we are trying to disconnect the phy link, we should disconnect log link first, */
+ for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
+ if (pBTInfo->BtAsocEntry[EntryNum].LogLinkCmdData->BtLogLinkhandle != 0) {
+ logicHandle = pBTInfo->BtAsocEntry[EntryNum].LogLinkCmdData->BtLogLinkhandle;
+
+ bthci_EventDisconnectLogicalLinkComplete(padapter, HCI_STATUS_SUCCESS,
+ logicHandle, pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason);
+
+ pBTInfo->BtAsocEntry[EntryNum].LogLinkCmdData->BtLogLinkhandle = 0;
+ }
+ }
+
+ bthci_EventDisconnectPhyLinkComplete(padapter,
+ HCI_STATUS_SUCCESS,
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason,
+ EntryNum);
+
+ del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
+
+ BTHCI_DisconnectPeer(padapter, EntryNum);
+ break;
+
+ case STATE_CMD_MAC_DISCONNECT_INDICATE:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_MAC_DISCONNECT_INDICATE\n"));
+
+ bthci_EventDisconnectPhyLinkComplete(padapter,
+ HCI_STATUS_SUCCESS,
+ /* TODO: Remote Host not local host */
+ HCI_STATUS_CONNECT_TERMINATE_LOCAL_HOST,
+ EntryNum);
+ BTHCI_DisconnectPeer(padapter, EntryNum);
+
+ break;
+ case STATE_CMD_ENTER_STATE:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_ENTER_STATE\n"));
+
+ if (pBtMgnt->bBTConnectInProgress) {
+ pBtMgnt->bBTConnectInProgress = false;
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], BT Connect in progress OFF!!\n"));
+ }
+ pBTInfo->BtAsocEntry[EntryNum].BtCurrentState = HCI_STATE_CONNECTED;
+ pBTInfo->BtAsocEntry[EntryNum].b4waySuccess = true;
+ pBtMgnt->bStartSendSupervisionPkt = true;
+
+ /* for rate adaptive */
+
+ if (padapter->HalFunc.UpdateRAMaskHandler)
+ padapter->HalFunc.UpdateRAMaskHandler(padapter, MAX_FW_SUPPORT_MACID_NUM-1-EntryNum, 0);
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_BASIC_RATE, padapter->mlmepriv.cur_network.network.SupportedRates);
+ BTDM_SetFwChnlInfo(padapter, RT_MEDIA_CONNECT);
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("State command(%d) is Wrong !!!\n", StateCmd));
+ break;
+ }
+}
+
+static void
+bthci_StateAuth(struct rtw_adapter *padapter, enum hci_state_with_cmd StateCmd,
+ u8 EntryNum)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state], [Authenticating], "));
+ switch (StateCmd) {
+ case STATE_CMD_CONNECT_ACCEPT_TIMEOUT:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_CONNECT_ACCEPT_TIMEOUT\n"));
+ pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_CONNECT_ACCEPT_TIMEOUT;
+ pBtMgnt->bNeedNotifyAMPNoCap = true;
+ BTHCI_DisconnectPeer(padapter, EntryNum);
+ break;
+ case STATE_CMD_DISCONNECT_PHY_LINK:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_DISCONNECT_PHY_LINK\n"));
+ bthci_EventDisconnectPhyLinkComplete(padapter,
+ HCI_STATUS_SUCCESS,
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason,
+ EntryNum);
+
+ pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_UNKNOW_CONNECT_ID;
+
+ del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
+
+ BTHCI_DisconnectPeer(padapter, EntryNum);
+ break;
+ case STATE_CMD_4WAY_FAILED:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_4WAY_FAILED\n"));
+
+ pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_AUTH_FAIL;
+ pBtMgnt->bNeedNotifyAMPNoCap = true;
+
+ BTHCI_DisconnectPeer(padapter, EntryNum);
+
+ del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
+ break;
+ case STATE_CMD_4WAY_SUCCESSED:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_4WAY_SUCCESSED\n"));
+
+ bthci_EventPhysicalLinkComplete(padapter, HCI_STATUS_SUCCESS, EntryNum, INVALID_PL_HANDLE);
+
+ del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
+
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_CONNECTED, STATE_CMD_ENTER_STATE, EntryNum);
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("State command(%d) is Wrong !!!\n", StateCmd));
+ break;
+ }
+}
+
+static void
+bthci_StateDisconnecting(struct rtw_adapter *padapter,
+ enum hci_state_with_cmd StateCmd, u8 EntryNum)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state], [Disconnecting], "));
+ switch (StateCmd) {
+ case STATE_CMD_MAC_CONNECT_CANCEL_INDICATE:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_MAC_CONNECT_CANCEL_INDICATE\n"));
+ if (pBTInfo->BtAsocEntry[EntryNum].bNeedPhysLinkCompleteEvent) {
+ bthci_EventPhysicalLinkComplete(padapter,
+ pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus,
+ EntryNum, INVALID_PL_HANDLE);
+ }
+
+ if (pBtMgnt->bBTConnectInProgress) {
+ pBtMgnt->bBTConnectInProgress = false;
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], BT Connect in progress OFF!!\n"));
+ }
+
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTED, STATE_CMD_ENTER_STATE, EntryNum);
+ break;
+ case STATE_CMD_DISCONNECT_PHY_LINK:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_DISCONNECT_PHY_LINK\n"));
+
+ bthci_EventDisconnectPhyLinkComplete(padapter,
+ HCI_STATUS_SUCCESS,
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason,
+ EntryNum);
+
+ del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
+
+ BTHCI_DisconnectPeer(padapter, EntryNum);
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("State command(%d) is Wrong !!!\n", StateCmd));
+ break;
+ }
+}
+
+static void
+bthci_StateDisconnected(struct rtw_adapter *padapter,
+ enum hci_state_with_cmd StateCmd, u8 EntryNum)
+{
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state], [Disconnected], "));
+ switch (StateCmd) {
+ case STATE_CMD_CREATE_PHY_LINK:
+ case STATE_CMD_ACCEPT_PHY_LINK:
+ if (StateCmd == STATE_CMD_CREATE_PHY_LINK)
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_CREATE_PHY_LINK\n"));
+ else
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_ACCEPT_PHY_LINK\n"));
+
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT PS], Disable IPS and LPS\n"));
+ ips_leave23a(padapter);
+ LPS_Leave23a(padapter);
+
+ pBtMgnt->bPhyLinkInProgress = true;
+ pBtMgnt->BTCurrentConnectType = BT_DISCONNECT;
+ pBtMgnt->CurrentBTConnectionCnt++;
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], CurrentBTConnectionCnt = %d\n",
+ pBtMgnt->CurrentBTConnectionCnt));
+ pBtMgnt->BtOperationOn = true;
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], Bt Operation ON!! CurrentConnectEntryNum = %d\n",
+ pBtMgnt->CurrentConnectEntryNum));
+
+ if (pBtMgnt->bBTConnectInProgress) {
+ bthci_EventPhysicalLinkComplete(padapter, HCI_STATUS_CONTROLLER_BUSY, INVALID_ENTRY_NUM, pBtMgnt->BtCurrentPhyLinkhandle);
+ bthci_RemoveEntryByEntryNum(padapter, EntryNum);
+ return;
+ }
+
+ if (StateCmd == STATE_CMD_CREATE_PHY_LINK)
+ pBTInfo->BtAsocEntry[EntryNum].AMPRole = AMP_BTAP_CREATOR;
+ else
+ pBTInfo->BtAsocEntry[EntryNum].AMPRole = AMP_BTAP_JOINER;
+
+ /* 1. MAC not yet in selected channel */
+ while (check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR)) {
+ RTPRINT(FIOCTL, IOCTL_STATE, ("Scan/Roaming/Wifi Link is in Progress, wait 200 ms\n"));
+ mdelay(200);
+ }
+ /* 2. MAC already in selected channel */
+ RTPRINT(FIOCTL, IOCTL_STATE, ("Channel is Ready\n"));
+ mod_timer(&pBTInfo->BTHCIJoinTimeoutTimer,
+ jiffies + msecs_to_jiffies(pBtHciInfo->ConnAcceptTimeout));
+
+ pBTInfo->BtAsocEntry[EntryNum].bNeedPhysLinkCompleteEvent = true;
+ break;
+ case STATE_CMD_DISCONNECT_PHY_LINK:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_DISCONNECT_PHY_LINK\n"));
+
+ del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
+
+ bthci_EventDisconnectPhyLinkComplete(padapter,
+ HCI_STATUS_SUCCESS,
+ pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason,
+ EntryNum);
+
+ if (pBTInfo->BtAsocEntry[EntryNum].bNeedPhysLinkCompleteEvent) {
+ bthci_EventPhysicalLinkComplete(padapter,
+ HCI_STATUS_UNKNOW_CONNECT_ID,
+ EntryNum, INVALID_PL_HANDLE);
+ }
+
+ if (pBtMgnt->bBTConnectInProgress) {
+ pBtMgnt->bBTConnectInProgress = false;
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], BT Connect in progress OFF!!\n"));
+ }
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTED, STATE_CMD_ENTER_STATE, EntryNum);
+ bthci_RemoveEntryByEntryNum(padapter, EntryNum);
+ break;
+ case STATE_CMD_ENTER_STATE:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_ENTER_STATE\n"));
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_STATE, ("State command(%d) is Wrong !!!\n", StateCmd));
+ break;
+ }
+}
+
+void BTHCI_EventParse(struct rtw_adapter *padapter, void *pEvntData, u32 dataLen)
+{
+}
+
+u8 BTHCI_HsConnectionEstablished(struct rtw_adapter *padapter)
+{
+ u8 bBtConnectionExist = false;
+ struct bt_30info *pBtinfo = GET_BT_INFO(padapter);
+ u8 i;
+
+ for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++) {
+ if (pBtinfo->BtAsocEntry[i].b4waySuccess) {
+ bBtConnectionExist = true;
+ break;
+ }
+ }
+
+/*RTPRINT(FIOCTL, IOCTL_STATE, (" BTHCI_HsConnectionEstablished(), connection exist = %d\n", bBtConnectionExist)); */
+
+ return bBtConnectionExist;
+}
+
+static u8
+BTHCI_CheckProfileExist(struct rtw_adapter *padapter,
+ enum bt_traffic_mode_profile Profile)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ u8 IsPRofile = false;
+ u8 i = 0;
+
+ for (i = 0; i < pBtMgnt->ExtConfig.NumberOfHandle; i++) {
+ if (pBtMgnt->ExtConfig.linkInfo[i].TrafficProfile == Profile) {
+ IsPRofile = true;
+ break;
+ }
+ }
+
+ return IsPRofile;
+}
+
+void BTHCI_UpdateBTProfileRTKToMoto(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ u8 i = 0;
+
+ pBtMgnt->ExtConfig.NumberOfSCO = 0;
+
+ for (i = 0; i < pBtMgnt->ExtConfig.NumberOfHandle; i++) {
+ pBtMgnt->ExtConfig.linkInfo[i].TrafficProfile = BT_PROFILE_NONE;
+
+ if (pBtMgnt->ExtConfig.linkInfo[i].BTProfile == BT_PROFILE_SCO)
+ pBtMgnt->ExtConfig.NumberOfSCO++;
+
+ pBtMgnt->ExtConfig.linkInfo[i].TrafficProfile = pBtMgnt->ExtConfig.linkInfo[i].BTProfile;
+ switch (pBtMgnt->ExtConfig.linkInfo[i].TrafficProfile) {
+ case BT_PROFILE_SCO:
+ break;
+ case BT_PROFILE_PAN:
+ pBtMgnt->ExtConfig.linkInfo[i].IncomingTrafficMode = BT_MOTOR_EXT_BE;
+ pBtMgnt->ExtConfig.linkInfo[i].OutgoingTrafficMode = BT_MOTOR_EXT_BE;
+ break;
+ case BT_PROFILE_A2DP:
+ pBtMgnt->ExtConfig.linkInfo[i].IncomingTrafficMode = BT_MOTOR_EXT_GULB;
+ pBtMgnt->ExtConfig.linkInfo[i].OutgoingTrafficMode = BT_MOTOR_EXT_GULB;
+ break;
+ case BT_PROFILE_HID:
+ pBtMgnt->ExtConfig.linkInfo[i].IncomingTrafficMode = BT_MOTOR_EXT_GUL;
+ pBtMgnt->ExtConfig.linkInfo[i].OutgoingTrafficMode = BT_MOTOR_EXT_BE;
+ break;
+ default:
+ break;
+ }
+ }
+
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RTK, NumberOfHandle = %d, NumberOfSCO = %d\n",
+ pBtMgnt->ExtConfig.NumberOfHandle, pBtMgnt->ExtConfig.NumberOfSCO));
+}
+
+void BTHCI_WifiScanNotify(struct rtw_adapter *padapter, u8 scanType)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pBtMgnt->ExtConfig.bEnableWifiScanNotify)
+ bthci_EventExtWifiScanNotify(padapter, scanType);
+}
+
+void
+BTHCI_StateMachine(
+ struct rtw_adapter *padapter,
+ u8 StateToEnter,
+ enum hci_state_with_cmd StateCmd,
+ u8 EntryNum
+ )
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (EntryNum == 0xff) {
+ RTPRINT(FIOCTL, IOCTL_STATE, (" StateMachine, error EntryNum = 0x%x \n", EntryNum));
+ return;
+ }
+ RTPRINT(FIOCTL, IOCTL_STATE, (" StateMachine, EntryNum = 0x%x, CurrentState = 0x%x, BtNextState = 0x%x, StateCmd = 0x%x , StateToEnter = 0x%x\n",
+ EntryNum, pBTInfo->BtAsocEntry[EntryNum].BtCurrentState, pBTInfo->BtAsocEntry[EntryNum].BtNextState, StateCmd, StateToEnter));
+
+ if (pBTInfo->BtAsocEntry[EntryNum].BtNextState & StateToEnter) {
+ pBTInfo->BtAsocEntry[EntryNum].BtCurrentState = StateToEnter;
+
+ switch (StateToEnter) {
+ case HCI_STATE_STARTING:
+ pBTInfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_DISCONNECTING | HCI_STATE_CONNECTING;
+ bthci_StateStarting(padapter, StateCmd, EntryNum);
+ break;
+ case HCI_STATE_CONNECTING:
+ pBTInfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_CONNECTING | HCI_STATE_DISCONNECTING | HCI_STATE_AUTHENTICATING;
+ bthci_StateConnecting(padapter, StateCmd, EntryNum);
+ break;
+ case HCI_STATE_AUTHENTICATING:
+ pBTInfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_DISCONNECTING | HCI_STATE_CONNECTED;
+ bthci_StateAuth(padapter, StateCmd, EntryNum);
+ break;
+ case HCI_STATE_CONNECTED:
+ pBTInfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_CONNECTED | HCI_STATE_DISCONNECTING;
+ bthci_StateConnected(padapter, StateCmd, EntryNum);
+ break;
+ case HCI_STATE_DISCONNECTING:
+ pBTInfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_DISCONNECTED | HCI_STATE_DISCONNECTING;
+ bthci_StateDisconnecting(padapter, StateCmd, EntryNum);
+ break;
+ case HCI_STATE_DISCONNECTED:
+ pBTInfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_DISCONNECTED | HCI_STATE_STARTING | HCI_STATE_CONNECTING;
+ bthci_StateDisconnected(padapter, StateCmd, EntryNum);
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_STATE, (" StateMachine, Unknown state to enter!!!\n"));
+ break;
+ }
+ } else {
+ RTPRINT(FIOCTL, IOCTL_STATE, (" StateMachine, Wrong state to enter\n"));
+ }
+
+ /* 20100325 Joseph: Disable/Enable IPS/LPS according to BT status. */
+ if (!pBtMgnt->bBTConnectInProgress && !pBtMgnt->BtOperationOn) {
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT PS], ips_enter23a()\n"));
+ ips_enter23a(padapter);
+ }
+}
+
+void BTHCI_DisconnectPeer(struct rtw_adapter *padapter, u8 EntryNum)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, (" BTHCI_DisconnectPeer()\n"));
+
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTING, STATE_CMD_MAC_CONNECT_CANCEL_INDICATE, EntryNum);
+
+ if (pBTInfo->BtAsocEntry[EntryNum].bUsed) {
+/*BTPKT_SendDeauthentication(padapter, pBTInfo->BtAsocEntry[EntryNum].BTRemoteMACAddr, unspec_reason); not porting yet */
+ }
+
+ if (pBtMgnt->bBTConnectInProgress) {
+ pBtMgnt->bBTConnectInProgress = false;
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], BT Connect in progress OFF!!\n"));
+ }
+
+ bthci_RemoveEntryByEntryNum(padapter, EntryNum);
+
+ if (pBtMgnt->bNeedNotifyAMPNoCap) {
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT AMPStatus], set to invalid in BTHCI_DisconnectPeer()\n"));
+ BTHCI_EventAMPStatusChange(padapter, AMP_STATUS_NO_CAPACITY_FOR_BT);
+ }
+}
+
+void BTHCI_EventNumOfCompletedDataBlocks(struct rtw_adapter *padapter)
+{
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
+ u8 localBuf[TmpLocalBufSize] = "";
+ u8 *pRetPar, *pTriple;
+ u8 len = 0, i, j, handleNum = 0;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u16 *pu2Temp, *pPackets, *pHandle, *pDblocks;
+ u8 sent = 0;
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+
+ if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_NUM_OF_COMPLETE_DATA_BLOCKS)) {
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Num Of Completed DataBlocks, Ignore to send NumOfCompletedDataBlocksEvent due to event mask page 2\n"));
+ return;
+ }
+
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[0];
+ pTriple = &pRetPar[3];
+ for (j = 0; j < MAX_BT_ASOC_ENTRY_NUM; j++) {
+
+ for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
+ if (pBTInfo->BtAsocEntry[j].LogLinkCmdData[i].BtLogLinkhandle) {
+ handleNum++;
+ pHandle = (u16 *)&pTriple[0]; /* Handle[i] */
+ pPackets = (u16 *)&pTriple[2]; /* Num_Of_Completed_Packets[i] */
+ pDblocks = (u16 *)&pTriple[4]; /* Num_Of_Completed_Blocks[i] */
+ *pHandle = pBTInfo->BtAsocEntry[j].LogLinkCmdData[i].BtLogLinkhandle;
+ *pPackets = (u16)pBTInfo->BtAsocEntry[j].LogLinkCmdData[i].TxPacketCount;
+ *pDblocks = (u16)pBTInfo->BtAsocEntry[j].LogLinkCmdData[i].TxPacketCount;
+ if (pBTInfo->BtAsocEntry[j].LogLinkCmdData[i].TxPacketCount) {
+ sent = 1;
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT_DETAIL,
+ ("[BT event], Num Of Completed DataBlocks, Handle = 0x%x, Num_Of_Completed_Packets = 0x%x, Num_Of_Completed_Blocks = 0x%x\n",
+ *pHandle, *pPackets, *pDblocks));
+ }
+ pBTInfo->BtAsocEntry[j].LogLinkCmdData[i].TxPacketCount = 0;
+ len += 6;
+ pTriple += len;
+ }
+ }
+ }
+
+ pRetPar[2] = handleNum; /* Number_of_Handles */
+ len += 1;
+ pu2Temp = (u16 *)&pRetPar[0];
+ *pu2Temp = BTTotalDataBlockNum;
+ len += 2;
+
+ PPacketIrpEvent->EventCode = HCI_EVENT_NUM_OF_COMPLETE_DATA_BLOCKS;
+ PPacketIrpEvent->Length = len;
+ if (handleNum && sent)
+ bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
+}
+
+void BTHCI_EventAMPStatusChange(struct rtw_adapter *padapter, u8 AMP_Status)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct packet_irp_hcievent_data *PPacketIrpEvent;
+ u8 len = 0;
+ u8 localBuf[7] = "";
+ u8 *pRetPar;
+
+ if (AMP_Status == AMP_STATUS_NO_CAPACITY_FOR_BT) {
+ pBtMgnt->BTNeedAMPStatusChg = true;
+ pBtMgnt->bNeedNotifyAMPNoCap = false;
+
+ BTHCI_DisconnectAll(padapter);
+ } else if (AMP_Status == AMP_STATUS_FULL_CAPACITY_FOR_BT) {
+ pBtMgnt->BTNeedAMPStatusChg = false;
+ }
+
+ PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
+ /* Return parameters starts from here */
+ pRetPar = &PPacketIrpEvent->Data[0];
+
+ pRetPar[0] = 0; /* Status */
+ len += 1;
+ pRetPar[1] = AMP_Status; /* AMP_Status */
+ len += 1;
+
+ PPacketIrpEvent->EventCode = HCI_EVENT_AMP_STATUS_CHANGE;
+ PPacketIrpEvent->Length = len;
+ if (bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2) == RT_STATUS_SUCCESS)
+ RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_STATE), ("[BT event], AMP Status Change, AMP_Status = %d\n", AMP_Status));
+}
+
+void BTHCI_DisconnectAll(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ u8 i;
+
+ RTPRINT(FIOCTL, IOCTL_STATE, (" DisconnectALL()\n"));
+
+ for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++) {
+ if (pBTInfo->BtAsocEntry[i].b4waySuccess) {
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_CONNECTED, STATE_CMD_DISCONNECT_PHY_LINK, i);
+ } else if (pBTInfo->BtAsocEntry[i].bUsed) {
+ if (pBTInfo->BtAsocEntry[i].BtCurrentState == HCI_STATE_CONNECTING) {
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_CONNECTING, STATE_CMD_MAC_CONNECT_CANCEL_INDICATE, i);
+ } else if (pBTInfo->BtAsocEntry[i].BtCurrentState == HCI_STATE_DISCONNECTING) {
+ BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTING, STATE_CMD_MAC_CONNECT_CANCEL_INDICATE, i);
+ }
+ }
+ }
+}
+
+enum hci_status
+BTHCI_HandleHCICMD(
+ struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd
+ )
+{
+ enum hci_status status = HCI_STATUS_SUCCESS;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("\n"));
+ RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("HCI Command start, OGF = 0x%x, OCF = 0x%x, Length = 0x%x\n",
+ pHciCmd->OGF, pHciCmd->OCF, pHciCmd->Length));
+ if (pHciCmd->Length) {
+ RTPRINT_DATA(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), "HCI Command, Hex Data :\n",
+ &pHciCmd->Data[0], pHciCmd->Length);
+ }
+ if (pHciCmd->OGF == OGF_EXTENSION) {
+ if (pHciCmd->OCF == HCI_SET_RSSI_VALUE)
+ RTPRINT(FIOCTL, IOCTL_BT_EVENT_PERIODICAL, ("[BT cmd], "));
+ else
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[BT cmd], "));
+ } else {
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("[BT cmd], "));
+ }
+
+ pBtDbg->dbgHciInfo.hciCmdCnt++;
+
+ switch (pHciCmd->OGF) {
+ case LINK_CONTROL_COMMANDS:
+ status = bthci_HandleOGFLinkControlCMD(padapter, pHciCmd);
+ break;
+ case HOLD_MODE_COMMAND:
+ break;
+ case OGF_SET_EVENT_MASK_COMMAND:
+ status = bthci_HandleOGFSetEventMaskCMD(padapter, pHciCmd);
+ break;
+ case OGF_INFORMATIONAL_PARAMETERS:
+ status = bthci_HandleOGFInformationalParameters(padapter, pHciCmd);
+ break;
+ case OGF_STATUS_PARAMETERS:
+ status = bthci_HandleOGFStatusParameters(padapter, pHciCmd);
+ break;
+ case OGF_TESTING_COMMANDS:
+ status = bthci_HandleOGFTestingCMD(padapter, pHciCmd);
+ break;
+ case OGF_EXTENSION:
+ status = bthci_HandleOGFExtension(padapter, pHciCmd);
+ break;
+ default:
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI Command(), Unknown OGF = 0x%x\n", pHciCmd->OGF));
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_UNKNOWN_COMMAND\n"));
+ status = bthci_UnknownCMD(padapter, pHciCmd);
+ break;
+ }
+ RTPRINT(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("HCI Command execution end!!\n"));
+
+ return status;
+}
+
+/* ===== End of sync from SD7 driver COMMOM/bt_hci.c ===== */
+#endif
+
+#ifdef __HALBTC87231ANT_C__ /* HAL/BTCoexist/HalBtc87231Ant.c */
+
+static const char *const BtStateString[] = {
+ "BT_DISABLED",
+ "BT_NO_CONNECTION",
+ "BT_CONNECT_IDLE",
+ "BT_INQ_OR_PAG",
+ "BT_ACL_ONLY_BUSY",
+ "BT_SCO_ONLY_BUSY",
+ "BT_ACL_SCO_BUSY",
+ "BT_ACL_INQ_OR_PAG",
+ "BT_STATE_NOT_DEFINED"
+};
+
+/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc87231Ant.c ===== */
+
+static void btdm_SetFwIgnoreWlanAct(struct rtw_adapter *padapter, u8 bEnable)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 H2C_Parameter[1] = {0};
+
+ if (bEnable) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT Ignore Wlan_Act !!\n"));
+ H2C_Parameter[0] |= BIT(0); /* function enable */
+ pHalData->bt_coexist.bFWCoexistAllOff = false;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT don't ignore Wlan_Act !!\n"));
+ }
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25 = 0x%02x\n",
+ H2C_Parameter[0]));
+
+ FillH2CCmd(padapter, BT_IGNORE_WLAN_ACT_EID, 1, H2C_Parameter);
+}
+
+static void btdm_NotifyFwScan(struct rtw_adapter *padapter, u8 scanType)
+{
+ u8 H2C_Parameter[1] = {0};
+
+ if (scanType == true)
+ H2C_Parameter[0] = 0x1;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Notify FW for wifi scan, write 0x3b = 0x%02x\n",
+ H2C_Parameter[0]));
+
+ FillH2CCmd(padapter, 0x3b, 1, H2C_Parameter);
+}
+
+static void btdm_1AntSetPSMode(struct rtw_adapter *padapter,
+ u8 enable, u8 smartps, u8 mode)
+{
+ struct pwrctrl_priv *pwrctrl;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Current LPS(%s, %d), smartps =%d\n", enable == true?"ON":"OFF", mode, smartps));
+
+ pwrctrl = &padapter->pwrctrlpriv;
+
+ if (enable == true) {
+ rtw_set_ps_mode23a(padapter, PS_MODE_MIN, smartps, mode);
+ } else {
+ rtw_set_ps_mode23a(padapter, PS_MODE_ACTIVE, 0, 0);
+ LPS_RF_ON_check23a(padapter, 100);
+ }
+}
+
+static void btdm_1AntTSFSwitch(struct rtw_adapter *padapter, u8 enable)
+{
+ u8 oldVal, newVal;
+
+ oldVal = rtw_read8(padapter, 0x550);
+
+ if (enable)
+ newVal = oldVal | EN_BCN_FUNCTION;
+ else
+ newVal = oldVal & ~EN_BCN_FUNCTION;
+
+ if (oldVal != newVal)
+ rtw_write8(padapter, 0x550, newVal);
+}
+
+static u8 btdm_Is1AntPsTdmaStateChange(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_1ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
+
+ if ((pBtdm8723->bPrePsTdmaOn != pBtdm8723->bCurPsTdmaOn) ||
+ (pBtdm8723->prePsTdma != pBtdm8723->curPsTdma))
+ return true;
+ else
+ return false;
+}
+
+/* Before enter TDMA, make sure Power Saving is enable! */
+static void
+btdm_1AntPsTdma(
+ struct rtw_adapter *padapter,
+ u8 bTurnOn,
+ u8 type
+ )
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_1ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
+
+ pBtdm8723->bCurPsTdmaOn = bTurnOn;
+ pBtdm8723->curPsTdma = type;
+ if (bTurnOn) {
+ switch (type) {
+ case 1: /* A2DP Level-1 or FTP/OPP */
+ default:
+ if (btdm_Is1AntPsTdmaStateChange(padapter)) {
+ /* wide duration for WiFi */
+ BTDM_SetFw3a(padapter, 0xd3, 0x1a, 0x1a, 0x0, 0x58);
+ }
+ break;
+ case 2: /* A2DP Level-2 */
+ if (btdm_Is1AntPsTdmaStateChange(padapter)) {
+ /* normal duration for WiFi */
+ BTDM_SetFw3a(padapter, 0xd3, 0x12, 0x12, 0x0, 0x58);
+ }
+ break;
+ case 3: /* BT FTP/OPP */
+ if (btdm_Is1AntPsTdmaStateChange(padapter)) {
+ /* normal duration for WiFi */
+ BTDM_SetFw3a(padapter, 0xd3, 0x30, 0x03, 0x10, 0x58);
+
+ }
+ break;
+ case 4: /* for wifi scan & BT is connected */
+ if (btdm_Is1AntPsTdmaStateChange(padapter)) {
+ /* protect 3 beacons in 3-beacon period & no Tx pause at BT slot */
+ BTDM_SetFw3a(padapter, 0x93, 0x15, 0x03, 0x14, 0x0);
+ }
+ break;
+ case 5: /* for WiFi connected-busy & BT is Non-Connected-Idle */
+ if (btdm_Is1AntPsTdmaStateChange(padapter)) {
+ /* SCO mode, Ant fixed at WiFi, WLAN_Act toggle */
+ BTDM_SetFw3a(padapter, 0x61, 0x15, 0x03, 0x31, 0x00);
+ }
+ break;
+ case 9: /* ACL high-retry type - 2 */
+ if (btdm_Is1AntPsTdmaStateChange(padapter)) {
+ /* narrow duration for WiFi */
+ BTDM_SetFw3a(padapter, 0xd3, 0xa, 0xa, 0x0, 0x58); /* narrow duration for WiFi */
+ }
+ break;
+ case 10: /* for WiFi connect idle & BT ACL busy or WiFi Connected-Busy & BT is Inquiry */
+ if (btdm_Is1AntPsTdmaStateChange(padapter))
+ BTDM_SetFw3a(padapter, 0x13, 0xa, 0xa, 0x0, 0x40);
+ break;
+ case 11: /* ACL high-retry type - 3 */
+ if (btdm_Is1AntPsTdmaStateChange(padapter)) {
+ /* narrow duration for WiFi */
+ BTDM_SetFw3a(padapter, 0xd3, 0x05, 0x05, 0x00, 0x58);
+ }
+ break;
+ case 12: /* for WiFi Connected-Busy & BT is Connected-Idle */
+ if (btdm_Is1AntPsTdmaStateChange(padapter)) {
+ /* Allow High-Pri BT */
+ BTDM_SetFw3a(padapter, 0xeb, 0x0a, 0x03, 0x31, 0x18);
+ }
+ break;
+ case 20: /* WiFi only busy , TDMA mode for power saving */
+ if (btdm_Is1AntPsTdmaStateChange(padapter))
+ BTDM_SetFw3a(padapter, 0x13, 0x25, 0x25, 0x00, 0x00);
+ break;
+ case 27: /* WiFi DHCP/Site Survey & BT SCO busy */
+ if (btdm_Is1AntPsTdmaStateChange(padapter))
+ BTDM_SetFw3a(padapter, 0xa3, 0x25, 0x03, 0x31, 0x98);
+ break;
+ case 28: /* WiFi DHCP/Site Survey & BT idle */
+ if (btdm_Is1AntPsTdmaStateChange(padapter))
+ BTDM_SetFw3a(padapter, 0x69, 0x25, 0x03, 0x31, 0x00);
+ break;
+ case 29: /* WiFi DHCP/Site Survey & BT ACL busy */
+ if (btdm_Is1AntPsTdmaStateChange(padapter)) {
+ BTDM_SetFw3a(padapter, 0xeb, 0x1a, 0x1a, 0x01, 0x18);
+ rtw_write32(padapter, 0x6c0, 0x5afa5afa);
+ rtw_write32(padapter, 0x6c4, 0x5afa5afa);
+ }
+ break;
+ case 30: /* WiFi idle & BT Inquiry */
+ if (btdm_Is1AntPsTdmaStateChange(padapter))
+ BTDM_SetFw3a(padapter, 0x93, 0x15, 0x03, 0x14, 0x00);
+ break;
+ case 31: /* BT HID */
+ if (btdm_Is1AntPsTdmaStateChange(padapter))
+ BTDM_SetFw3a(padapter, 0xd3, 0x1a, 0x1a, 0x00, 0x58);
+ break;
+ case 32: /* BT SCO & Inquiry */
+ if (btdm_Is1AntPsTdmaStateChange(padapter))
+ BTDM_SetFw3a(padapter, 0xab, 0x0a, 0x03, 0x11, 0x98);
+ break;
+ case 33: /* BT SCO & WiFi site survey */
+ if (btdm_Is1AntPsTdmaStateChange(padapter))
+ BTDM_SetFw3a(padapter, 0xa3, 0x25, 0x03, 0x30, 0x98);
+ break;
+ case 34: /* BT HID & WiFi site survey */
+ if (btdm_Is1AntPsTdmaStateChange(padapter))
+ BTDM_SetFw3a(padapter, 0xd3, 0x1a, 0x1a, 0x00, 0x18);
+ break;
+ case 35: /* BT HID & WiFi Connecting */
+ if (btdm_Is1AntPsTdmaStateChange(padapter))
+ BTDM_SetFw3a(padapter, 0xe3, 0x1a, 0x1a, 0x00, 0x18);
+ break;
+ }
+ } else {
+ /* disable PS-TDMA */
+ switch (type) {
+ case 8:
+ if (btdm_Is1AntPsTdmaStateChange(padapter)) {
+ /* Antenna control by PTA, 0x870 = 0x310 */
+ BTDM_SetFw3a(padapter, 0x8, 0x0, 0x0, 0x0, 0x0);
+ }
+ break;
+ case 0:
+ default:
+ if (btdm_Is1AntPsTdmaStateChange(padapter)) {
+ /* Antenna control by PTA, 0x870 = 0x310 */
+ BTDM_SetFw3a(padapter, 0x0, 0x0, 0x0, 0x8, 0x0);
+ }
+ rtw_write16(padapter, 0x860, 0x210); /* Switch Antenna to BT */
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], 0x860 = 0x210, Switch Antenna to BT\n"));
+ break;
+ case 9:
+ if (btdm_Is1AntPsTdmaStateChange(padapter)) {
+ /* Antenna control by PTA, 0x870 = 0x310 */
+ BTDM_SetFw3a(padapter, 0x0, 0x0, 0x0, 0x8, 0x0);
+ }
+ rtw_write16(padapter, 0x860, 0x110); /* Switch Antenna to WiFi */
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], 0x860 = 0x110, Switch Antenna to WiFi\n"));
+ break;
+ }
+ }
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Current TDMA(%s, %d)\n",
+ pBtdm8723->bCurPsTdmaOn?"ON":"OFF", pBtdm8723->curPsTdma));
+
+ /* update pre state */
+ pBtdm8723->bPrePsTdmaOn = pBtdm8723->bCurPsTdmaOn;
+ pBtdm8723->prePsTdma = pBtdm8723->curPsTdma;
+}
+
+static void
+_btdm_1AntSetPSTDMA(struct rtw_adapter *padapter, u8 bPSEn, u8 smartps,
+ u8 psOption, u8 bTDMAOn, u8 tdmaType)
+{
+ struct pwrctrl_priv *pwrctrl;
+ struct hal_data_8723a *pHalData;
+ struct btdm_8723a_1ant *pBtdm8723;
+ u8 psMode;
+ u8 bSwitchPS;
+
+ if (!check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE) &&
+ (get_fwstate(&padapter->mlmepriv) != WIFI_NULL_STATE)) {
+ btdm_1AntPsTdma(padapter, bTDMAOn, tdmaType);
+ return;
+ }
+ psOption &= ~BIT(0);
+
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], Set LPS(%s, %d) TDMA(%s, %d)\n",
+ bPSEn == true?"ON":"OFF", psOption,
+ bTDMAOn == true?"ON":"OFF", tdmaType));
+
+ pwrctrl = &padapter->pwrctrlpriv;
+ pHalData = GET_HAL_DATA(padapter);
+ pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
+
+ if (bPSEn) {
+ if (pBtdm8723->bWiFiHalt) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Enable PS Fail, WiFi in Halt!!\n"));
+ return;
+ }
+
+ if (pwrctrl->bInSuspend) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Enable PS Fail, WiFi in Suspend!!\n"));
+ return;
+ }
+
+ if (padapter->bDriverStopped) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Enable PS Fail, WiFi driver stopped!!\n"));
+ return;
+ }
+
+ if (padapter->bSurpriseRemoved) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Enable PS Fail, WiFi Surprise Removed!!\n"));
+ return;
+ }
+
+ psMode = PS_MODE_MIN;
+ } else {
+ psMode = PS_MODE_ACTIVE;
+ psOption = 0;
+ }
+
+ if (psMode != pwrctrl->pwr_mode) {
+ bSwitchPS = true;
+ } else if (psMode != PS_MODE_ACTIVE) {
+ if (psOption != pwrctrl->bcn_ant_mode)
+ bSwitchPS = true;
+ else if (smartps != pwrctrl->smart_ps)
+ bSwitchPS = true;
+ else
+ bSwitchPS = false;
+ } else {
+ bSwitchPS = false;
+ }
+
+ if (bSwitchPS) {
+ /* disable TDMA */
+ if (pBtdm8723->bCurPsTdmaOn) {
+ if (!bTDMAOn) {
+ btdm_1AntPsTdma(padapter, false, tdmaType);
+ } else {
+ if ((BT_IsBtDisabled(padapter)) ||
+ (pHalData->bt_coexist.halCoex8723.c2hBtInfo == BT_INFO_STATE_NO_CONNECTION) ||
+ (pHalData->bt_coexist.halCoex8723.c2hBtInfo == BT_INFO_STATE_CONNECT_IDLE) ||
+ (tdmaType == 29))
+ btdm_1AntPsTdma(padapter, false, 9);
+ else
+ btdm_1AntPsTdma(padapter, false, 0);
+ }
+ }
+
+ /* change Power Save State */
+ btdm_1AntSetPSMode(padapter, bPSEn, smartps, psOption);
+ }
+
+ btdm_1AntPsTdma(padapter, bTDMAOn, tdmaType);
+}
+
+static void
+btdm_1AntSetPSTDMA(struct rtw_adapter *padapter, u8 bPSEn,
+ u8 psOption, u8 bTDMAOn, u8 tdmaType)
+{
+ _btdm_1AntSetPSTDMA(padapter, bPSEn, 0, psOption, bTDMAOn, tdmaType);
+}
+
+static void btdm_1AntWifiParaAdjust(struct rtw_adapter *padapter, u8 bEnable)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_1ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
+
+ if (bEnable) {
+ pBtdm8723->curWifiPara = 1;
+ if (pBtdm8723->preWifiPara != pBtdm8723->curWifiPara)
+ BTDM_SetSwPenaltyTxRateAdaptive(padapter, BT_TX_RATE_ADAPTIVE_LOW_PENALTY);
+ } else {
+ pBtdm8723->curWifiPara = 2;
+ if (pBtdm8723->preWifiPara != pBtdm8723->curWifiPara)
+ BTDM_SetSwPenaltyTxRateAdaptive(padapter, BT_TX_RATE_ADAPTIVE_NORMAL);
+ }
+
+}
+
+static void btdm_1AntPtaParaReload(struct rtw_adapter *padapter)
+{
+ /* PTA parameter */
+ rtw_write8(padapter, 0x6cc, 0x0); /* 1-Ant coex */
+ rtw_write32(padapter, 0x6c8, 0xffff); /* wifi break table */
+ rtw_write32(padapter, 0x6c4, 0x55555555); /* coex table */
+
+ /* Antenna switch control parameter */
+ rtw_write32(padapter, 0x858, 0xaaaaaaaa);
+ if (IS_8723A_A_CUT(GET_HAL_DATA(padapter)->VersionID)) {
+ rtw_write32(padapter, 0x870, 0x0); /* SPDT(connected with TRSW) control by hardware PTA */
+ rtw_write8(padapter, 0x40, 0x24);
+ } else {
+ rtw_write8(padapter, 0x40, 0x20);
+ rtw_write16(padapter, 0x860, 0x210); /* set antenna at bt side if ANTSW is software control */
+ rtw_write32(padapter, 0x870, 0x300); /* SPDT(connected with TRSW) control by hardware PTA */
+ rtw_write32(padapter, 0x874, 0x22804000); /* ANTSW keep by GNT_BT */
+ }
+
+ /* coexistence parameters */
+ rtw_write8(padapter, 0x778, 0x1); /* enable RTK mode PTA */
+
+ /* BT don't ignore WLAN_Act */
+ btdm_SetFwIgnoreWlanAct(padapter, false);
+}
+
+/*
+ * Return
+ *1: upgrade (add WiFi duration time)
+ *0: keep
+ *-1: downgrade (add BT duration time)
+ */
+static s8 btdm_1AntTdmaJudgement(struct rtw_adapter *padapter, u8 retry)
+{
+ struct hal_data_8723a *pHalData;
+ struct btdm_8723a_1ant *pBtdm8723;
+ static s8 up, dn, m = 1, n = 3, WaitCount;
+ s8 ret;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
+ ret = 0;
+
+ if (pBtdm8723->psTdmaMonitorCnt == 0) {
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ WaitCount = 0;
+ } else {
+ WaitCount++;
+ }
+
+ if (retry == 0) {
+ /* no retry in the last 2-second duration */
+ up++;
+ dn--;
+ if (dn < 0)
+ dn = 0;
+ if (up >= 3*m) {
+ /* retry = 0 in consecutive 3m*(2s), add WiFi duration */
+ ret = 1;
+
+ n = 3;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ }
+ } else if (retry <= 3) {
+ /* retry<= 3 in the last 2-second duration */
+ up--;
+ dn++;
+ if (up < 0)
+ up = 0;
+
+ if (dn == 2) {
+ /* retry<= 3 in consecutive 2*(2s), minus WiFi duration (add BT duration) */
+ ret = -1;
+
+ /* record how many time downgrad WiFi duration */
+ if (WaitCount <= 2)
+ m++;
+ else
+ m = 1;
+ /* the max number of m is 20 */
+ /* the longest time of upgrade WiFi duration is 20*3*2s = 120s */
+ if (m >= 20)
+ m = 20;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ }
+ } else {
+ /* retry count > 3 */
+ /* retry>3, minus WiFi duration (add BT duration) */
+ ret = -1;
+
+ /* record how many time downgrad WiFi duration */
+ if (WaitCount == 1)
+ m++;
+ else
+ m = 1;
+ if (m >= 20)
+ m = 20;
+
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ }
+ return ret;
+}
+
+static void btdm_1AntTdmaDurationAdjustForACL(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_1ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
+
+ if (pBtdm8723->psTdmaGlobalCnt != pBtdm8723->psTdmaMonitorCnt) {
+ pBtdm8723->psTdmaMonitorCnt = 0;
+ pBtdm8723->psTdmaGlobalCnt = 0;
+ }
+ if (pBtdm8723->psTdmaMonitorCnt == 0) {
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 2);
+ pBtdm8723->psTdmaDuAdjType = 2;
+ } else {
+ /* Now we only have 4 level Ps Tdma, */
+ /* if that's not the following 4 level(will changed by wifi scan, dhcp...), */
+ /* then we have to adjust it back to the previous record one. */
+ if ((pBtdm8723->curPsTdma != 1) &&
+ (pBtdm8723->curPsTdma != 2) &&
+ (pBtdm8723->curPsTdma != 9) &&
+ (pBtdm8723->curPsTdma != 11)) {
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, pBtdm8723->psTdmaDuAdjType);
+ } else {
+ s32 judge = 0;
+
+ judge = btdm_1AntTdmaJudgement(padapter, pHalData->bt_coexist.halCoex8723.btRetryCnt);
+ if (judge == -1) {
+ if (pBtdm8723->curPsTdma == 1) {
+ /* Decrease WiFi duration for high BT retry */
+ if (pHalData->bt_coexist.halCoex8723.btInfoExt)
+ pBtdm8723->psTdmaDuAdjType = 9;
+ else
+ pBtdm8723->psTdmaDuAdjType = 2;
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, pBtdm8723->psTdmaDuAdjType);
+ } else if (pBtdm8723->curPsTdma == 2) {
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 9);
+ pBtdm8723->psTdmaDuAdjType = 9;
+ } else if (pBtdm8723->curPsTdma == 9) {
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ }
+ } else if (judge == 1) {
+ if (pBtdm8723->curPsTdma == 11) {
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 9);
+ pBtdm8723->psTdmaDuAdjType = 9;
+ } else if (pBtdm8723->curPsTdma == 9) {
+ if (pHalData->bt_coexist.halCoex8723.btInfoExt)
+ pBtdm8723->psTdmaDuAdjType = 9;
+ else
+ pBtdm8723->psTdmaDuAdjType = 2;
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, pBtdm8723->psTdmaDuAdjType);
+ } else if (pBtdm8723->curPsTdma == 2) {
+ if (pHalData->bt_coexist.halCoex8723.btInfoExt)
+ pBtdm8723->psTdmaDuAdjType = 9;
+ else
+ pBtdm8723->psTdmaDuAdjType = 1;
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, pBtdm8723->psTdmaDuAdjType);
+ }
+ }
+ }
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], ACL current TDMA(%s, %d)\n",
+ (pBtdm8723->bCurPsTdmaOn ? "ON" : "OFF"), pBtdm8723->curPsTdma));
+ }
+ pBtdm8723->psTdmaMonitorCnt++;
+}
+
+static void btdm_1AntCoexProcessForWifiConnect(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv;
+ struct hal_data_8723a *pHalData;
+ struct bt_coexist_8723a *pBtCoex;
+ struct btdm_8723a_1ant *pBtdm8723;
+ u8 BtState;
+
+ pmlmepriv = &padapter->mlmepriv;
+ pHalData = GET_HAL_DATA(padapter);
+ pBtCoex = &pHalData->bt_coexist.halCoex8723;
+ pBtdm8723 = &pBtCoex->btdm1Ant;
+ BtState = pBtCoex->c2hBtInfo;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], WiFi is %s\n", BTDM_IsWifiBusy(padapter)?"Busy":"IDLE"));
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT is %s\n", BtStateString[BtState]));
+
+ padapter->pwrctrlpriv.btcoex_rfon = false;
+
+ if ((!BTDM_IsWifiBusy(padapter)) && (!check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE)) &&
+ ((BtState == BT_INFO_STATE_NO_CONNECTION) || (BtState == BT_INFO_STATE_CONNECT_IDLE))) {
+ switch (BtState) {
+ case BT_INFO_STATE_NO_CONNECTION:
+ _btdm_1AntSetPSTDMA(padapter, true, 2, 0x26, false, 9);
+ break;
+ case BT_INFO_STATE_CONNECT_IDLE:
+ _btdm_1AntSetPSTDMA(padapter, true, 2, 0x26, false, 0);
+ break;
+ }
+ } else {
+ switch (BtState) {
+ case BT_INFO_STATE_NO_CONNECTION:
+ case BT_INFO_STATE_CONNECT_IDLE:
+ /* WiFi is Busy */
+ btdm_1AntSetPSTDMA(padapter, false, 0, true, 5);
+ rtw_write32(padapter, 0x6c0, 0x5a5a5a5a);
+ rtw_write32(padapter, 0x6c4, 0x5a5a5a5a);
+ break;
+ case BT_INFO_STATE_ACL_INQ_OR_PAG:
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT PROFILE is BT_INFO_STATE_ACL_INQ_OR_PAG\n"));
+ case BT_INFO_STATE_INQ_OR_PAG:
+ padapter->pwrctrlpriv.btcoex_rfon = true;
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 30);
+ break;
+ case BT_INFO_STATE_SCO_ONLY_BUSY:
+ case BT_INFO_STATE_ACL_SCO_BUSY:
+ if (true == pBtCoex->bC2hBtInquiryPage) {
+ btdm_1AntSetPSTDMA(padapter, false, 0, true, 32);
+ } else {
+#ifdef BTCOEX_CMCC_TEST
+ btdm_1AntSetPSTDMA(padapter, false, 0, true, 23);
+#else /* !BTCOEX_CMCC_TEST */
+ btdm_1AntSetPSTDMA(padapter, false, 0, false, 8);
+ rtw_write32(padapter, 0x6c0, 0x5a5a5a5a);
+ rtw_write32(padapter, 0x6c4, 0x5a5a5a5a);
+#endif /* !BTCOEX_CMCC_TEST */
+ }
+ break;
+ case BT_INFO_STATE_ACL_ONLY_BUSY:
+ padapter->pwrctrlpriv.btcoex_rfon = true;
+ if (pBtCoex->c2hBtProfile == BT_INFO_HID) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT PROFILE is HID\n"));
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 31);
+ } else if (pBtCoex->c2hBtProfile == BT_INFO_FTP) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT PROFILE is FTP/OPP\n"));
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 3);
+ } else if (pBtCoex->c2hBtProfile == (BT_INFO_A2DP|BT_INFO_FTP)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT PROFILE is A2DP_FTP\n"));
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 11);
+ } else {
+ if (pBtCoex->c2hBtProfile == BT_INFO_A2DP)
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT PROFILE is A2DP\n"));
+ else
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT PROFILE is UNKNOWN(0x%02X)! Use A2DP Profile\n", pBtCoex->c2hBtProfile));
+ btdm_1AntTdmaDurationAdjustForACL(padapter);
+ }
+ break;
+ }
+ }
+
+ pBtdm8723->psTdmaGlobalCnt++;
+}
+
+static void btdm_1AntUpdateHalRAMask(struct rtw_adapter *padapter, u32 mac_id, u32 filter)
+{
+ u8 init_rate = 0;
+ u8 raid;
+ u32 mask;
+ u8 shortGIrate = false;
+ int supportRateNum = 0;
+ struct sta_info *psta;
+ struct hal_data_8723a *pHalData;
+ struct dm_priv *pdmpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+ struct wlan_bssid_ex *cur_network;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], %s, MACID =%d, filter = 0x%08x!!\n", __func__, mac_id, filter));
+
+ pHalData = GET_HAL_DATA(padapter);
+ pdmpriv = &pHalData->dmpriv;
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+ cur_network = &pmlmeinfo->network;
+
+ if (mac_id >= NUM_STA) { /* CAM_SIZE */
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], %s, MACID =%d illegal!!\n", __func__, mac_id));
+ return;
+ }
+
+ psta = pmlmeinfo->FW_sta_info[mac_id].psta;
+ if (psta == NULL) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], %s, Can't find station!!\n", __func__));
+ return;
+ }
+
+ raid = psta->raid;
+
+ switch (mac_id) {
+ case 0:/* for infra mode */
+ supportRateNum = rtw_get_rateset_len23a(cur_network->SupportedRates);
+ mask = update_supported_rate23a(cur_network->SupportedRates, supportRateNum);
+ mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate23a(&pmlmeinfo->HT_caps):0;
+ if (support_short_GI23a(padapter, &pmlmeinfo->HT_caps))
+ shortGIrate = true;
+ break;
+ case 1:/* for broadcast/multicast */
+ supportRateNum = rtw_get_rateset_len23a(pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
+ mask = update_basic_rate23a(cur_network->SupportedRates, supportRateNum);
+ break;
+ default: /* for each sta in IBSS */
+ supportRateNum = rtw_get_rateset_len23a(pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
+ mask = update_supported_rate23a(cur_network->SupportedRates, supportRateNum);
+ break;
+ }
+ mask |= ((raid<<28)&0xf0000000);
+ mask &= 0xffffffff;
+ mask &= ~filter;
+ init_rate = get_highest_rate_idx23a(mask)&0x3f;
+
+ if (pHalData->fw_ractrl) {
+ u8 arg = 0;
+
+ arg = mac_id&0x1f;/* MACID */
+ arg |= BIT(7);
+ if (true == shortGIrate)
+ arg |= BIT(5);
+
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], Update FW RAID entry, MASK = 0x%08x, arg = 0x%02x\n",
+ mask, arg));
+
+ rtl8723a_set_raid_cmd(padapter, mask, arg);
+ } else {
+ if (shortGIrate)
+ init_rate |= BIT(6);
+
+ rtw_write8(padapter, (REG_INIDATA_RATE_SEL+mac_id), init_rate);
+ }
+
+ psta->init_rate = init_rate;
+ pdmpriv->INIDATA_RATE[mac_id] = init_rate;
+}
+
+static void btdm_1AntUpdateHalRAMaskForSCO(struct rtw_adapter *padapter, u8 forceUpdate)
+{
+ struct btdm_8723a_1ant *pBtdm8723;
+ struct sta_priv *pstapriv;
+ struct wlan_bssid_ex *cur_network;
+ struct sta_info *psta;
+ u32 macid;
+ u32 filter = 0;
+
+ pBtdm8723 = &GET_HAL_DATA(padapter)->bt_coexist.halCoex8723.btdm1Ant;
+
+ if ((pBtdm8723->bRAChanged == true) && (forceUpdate == false))
+ return;
+
+ pstapriv = &padapter->stapriv;
+ cur_network = &padapter->mlmeextpriv.mlmext_info.network;
+ psta = rtw_get_stainfo23a(pstapriv, cur_network->MacAddress);
+ macid = psta->mac_id;
+
+ filter |= BIT(_1M_RATE_);
+ filter |= BIT(_2M_RATE_);
+ filter |= BIT(_5M_RATE_);
+ filter |= BIT(_11M_RATE_);
+ filter |= BIT(_6M_RATE_);
+ filter |= BIT(_9M_RATE_);
+
+ btdm_1AntUpdateHalRAMask(padapter, macid, filter);
+
+ pBtdm8723->bRAChanged = true;
+}
+
+static void btdm_1AntRecoverHalRAMask(struct rtw_adapter *padapter)
+{
+ struct btdm_8723a_1ant *pBtdm8723;
+ struct sta_priv *pstapriv;
+ struct wlan_bssid_ex *cur_network;
+ struct sta_info *psta;
+
+ pBtdm8723 = &GET_HAL_DATA(padapter)->bt_coexist.halCoex8723.btdm1Ant;
+
+ if (pBtdm8723->bRAChanged == false)
+ return;
+
+ pstapriv = &padapter->stapriv;
+ cur_network = &padapter->mlmeextpriv.mlmext_info.network;
+ psta = rtw_get_stainfo23a(pstapriv, cur_network->MacAddress);
+
+ Update_RA_Entry23a(padapter, psta);
+
+ pBtdm8723->bRAChanged = false;
+}
+
+static void
+btdm_1AntBTStateChangeHandler(struct rtw_adapter *padapter,
+ enum bt_state_1ant oldState, enum bt_state_1ant newState)
+{
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT state change, %s => %s\n", BtStateString[oldState], BtStateString[newState]));
+
+ /* BT default ignore wlan active, */
+ /* WiFi MUST disable this when BT is enable */
+ if (newState > BT_INFO_STATE_DISABLED)
+ btdm_SetFwIgnoreWlanAct(padapter, false);
+
+ if ((check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE)) &&
+ (BTDM_IsWifiConnectionExist(padapter))) {
+ if ((newState == BT_INFO_STATE_SCO_ONLY_BUSY) ||
+ (newState == BT_INFO_STATE_ACL_SCO_BUSY)) {
+ btdm_1AntUpdateHalRAMaskForSCO(padapter, false);
+ } else {
+ /* Recover original RA setting */
+ btdm_1AntRecoverHalRAMask(padapter);
+ }
+ } else {
+ GET_HAL_DATA(padapter)->bt_coexist.halCoex8723.btdm1Ant.bRAChanged = false;
+ }
+
+ if (oldState == newState)
+ return;
+
+ if (oldState == BT_INFO_STATE_ACL_ONLY_BUSY) {
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ pHalData->bt_coexist.halCoex8723.btdm1Ant.psTdmaMonitorCnt = 0;
+ pHalData->bt_coexist.halCoex8723.btdm1Ant.psTdmaMonitorCntForSCO = 0;
+ }
+
+ if ((oldState == BT_INFO_STATE_SCO_ONLY_BUSY) ||
+ (oldState == BT_INFO_STATE_ACL_SCO_BUSY)) {
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ pHalData->bt_coexist.halCoex8723.btdm1Ant.psTdmaMonitorCntForSCO = 0;
+ }
+
+ /* Active 2Ant mechanism when BT Connected */
+ if ((oldState == BT_INFO_STATE_DISABLED) ||
+ (oldState == BT_INFO_STATE_NO_CONNECTION)) {
+ if ((newState != BT_INFO_STATE_DISABLED) &&
+ (newState != BT_INFO_STATE_NO_CONNECTION)) {
+ BTDM_SetSwRfRxLpfCorner(padapter, BT_RF_RX_LPF_CORNER_SHRINK);
+ BTDM_AGCTable(padapter, BT_AGCTABLE_ON);
+ BTDM_BBBackOffLevel(padapter, BT_BB_BACKOFF_ON);
+ }
+ } else {
+ if ((newState == BT_INFO_STATE_DISABLED) ||
+ (newState == BT_INFO_STATE_NO_CONNECTION)) {
+ BTDM_SetSwRfRxLpfCorner(padapter, BT_RF_RX_LPF_CORNER_RESUME);
+ BTDM_AGCTable(padapter, BT_AGCTABLE_OFF);
+ BTDM_BBBackOffLevel(padapter, BT_BB_BACKOFF_OFF);
+ }
+ }
+}
+
+static void btdm_1AntBtCoexistHandler(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct bt_coexist_8723a *pBtCoex8723;
+ struct btdm_8723a_1ant *pBtdm8723;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBtCoex8723 = &pHalData->bt_coexist.halCoex8723;
+ pBtdm8723 = &pBtCoex8723->btdm1Ant;
+ padapter->pwrctrlpriv.btcoex_rfon = false;
+ if (BT_IsBtDisabled(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT is disabled\n"));
+
+ if (BTDM_IsWifiConnectionExist(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], wifi is connected\n"));
+
+ if (BTDM_IsWifiBusy(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Wifi is busy\n"));
+ btdm_1AntSetPSTDMA(padapter, false, 0, false, 9);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Wifi is idle\n"));
+ _btdm_1AntSetPSTDMA(padapter, true, 2, 1, false, 9);
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], wifi is disconnected\n"));
+
+ btdm_1AntSetPSTDMA(padapter, false, 0, false, 9);
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT is enabled\n"));
+
+ if (BTDM_IsWifiConnectionExist(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], wifi is connected\n"));
+
+ btdm_1AntWifiParaAdjust(padapter, true);
+ btdm_1AntCoexProcessForWifiConnect(padapter);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], wifi is disconnected\n"));
+
+ /* Antenna switch at BT side(0x870 = 0x300, 0x860 = 0x210) after PSTDMA off */
+ btdm_1AntWifiParaAdjust(padapter, false);
+ btdm_1AntSetPSTDMA(padapter, false, 0, false, 0);
+ }
+ }
+
+ btdm_1AntBTStateChangeHandler(padapter, pBtCoex8723->prec2hBtInfo, pBtCoex8723->c2hBtInfo);
+ pBtCoex8723->prec2hBtInfo = pBtCoex8723->c2hBtInfo;
+}
+
+void BTDM_1AntSignalCompensation(struct rtw_adapter *padapter, u8 *rssi_wifi, u8 *rssi_bt)
+{
+ struct hal_data_8723a *pHalData;
+ struct btdm_8723a_1ant *pBtdm8723;
+ u8 RSSI_WiFi_Cmpnstn, RSSI_BT_Cmpnstn;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
+ RSSI_WiFi_Cmpnstn = 0;
+ RSSI_BT_Cmpnstn = 0;
+
+ switch (pBtdm8723->curPsTdma) {
+ case 1: /* WiFi 52ms */
+ RSSI_WiFi_Cmpnstn = 11; /* 22*0.48 */
+ break;
+ case 2: /* WiFi 36ms */
+ RSSI_WiFi_Cmpnstn = 14; /* 22*0.64 */
+ break;
+ case 9: /* WiFi 20ms */
+ RSSI_WiFi_Cmpnstn = 18; /* 22*0.80 */
+ break;
+ case 11: /* WiFi 10ms */
+ RSSI_WiFi_Cmpnstn = 20; /* 22*0.90 */
+ break;
+ case 4: /* WiFi 21ms */
+ RSSI_WiFi_Cmpnstn = 17; /* 22*0.79 */
+ break;
+ case 16: /* WiFi 24ms */
+ RSSI_WiFi_Cmpnstn = 18; /* 22*0.76 */
+ break;
+ case 18: /* WiFi 37ms */
+ RSSI_WiFi_Cmpnstn = 14; /* 22*0.64 */
+ break;
+ case 23: /* Level-1, Antenna switch to BT at all time */
+ case 24: /* Level-2, Antenna switch to BT at all time */
+ case 25: /* Level-3a, Antenna switch to BT at all time */
+ case 26: /* Level-3b, Antenna switch to BT at all time */
+ case 27: /* Level-3b, Antenna switch to BT at all time */
+ case 33: /* BT SCO & WiFi site survey */
+ RSSI_WiFi_Cmpnstn = 22;
+ break;
+ default:
+ break;
+ }
+
+ if (rssi_wifi && RSSI_WiFi_Cmpnstn) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], 1AntSgnlCmpnstn, case %d, WiFiCmpnstn =%d(%d => %d)\n",
+ pBtdm8723->curPsTdma, RSSI_WiFi_Cmpnstn, *rssi_wifi, *rssi_wifi+RSSI_WiFi_Cmpnstn));
+ *rssi_wifi += RSSI_WiFi_Cmpnstn;
+ }
+
+ if (rssi_bt && RSSI_BT_Cmpnstn) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], 1AntSgnlCmpnstn, case %d, BTCmpnstn =%d(%d => %d)\n",
+ pBtdm8723->curPsTdma, RSSI_BT_Cmpnstn, *rssi_bt, *rssi_bt+RSSI_BT_Cmpnstn));
+ *rssi_bt += RSSI_BT_Cmpnstn;
+ }
+}
+
+static void BTDM_1AntParaInit(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct bt_coexist_8723a *pBtCoex;
+ struct btdm_8723a_1ant *pBtdm8723;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBtCoex = &pHalData->bt_coexist.halCoex8723;
+ pBtdm8723 = &pBtCoex->btdm1Ant;
+
+ /* Enable counter statistics */
+ rtw_write8(padapter, 0x76e, 0x4);
+ btdm_1AntPtaParaReload(padapter);
+
+ pBtdm8723->wifiRssiThresh = 48;
+
+ pBtdm8723->bWiFiHalt = false;
+ pBtdm8723->bRAChanged = false;
+
+ if ((pBtCoex->c2hBtInfo != BT_INFO_STATE_DISABLED) &&
+ (pBtCoex->c2hBtInfo != BT_INFO_STATE_NO_CONNECTION)) {
+ BTDM_SetSwRfRxLpfCorner(padapter, BT_RF_RX_LPF_CORNER_SHRINK);
+ BTDM_AGCTable(padapter, BT_AGCTABLE_ON);
+ BTDM_BBBackOffLevel(padapter, BT_BB_BACKOFF_ON);
+ }
+}
+
+static void BTDM_1AntForHalt(struct rtw_adapter *padapter)
+{
+ RTPRINT(FBT, BT_TRACE, ("\n[BTCoex], 1Ant for halt\n"));
+
+ GET_HAL_DATA(padapter)->bt_coexist.halCoex8723.btdm1Ant.bWiFiHalt = true;
+
+ btdm_1AntWifiParaAdjust(padapter, false);
+
+ /* don't use btdm_1AntSetPSTDMA() here */
+ /* it will call rtw_set_ps_mode23a() and request pwrpriv->lock. */
+ /* This will lead to deadlock, if this function is called in IPS */
+ /* Lucas@20130205 */
+ btdm_1AntPsTdma(padapter, false, 0);
+
+ btdm_SetFwIgnoreWlanAct(padapter, true);
+}
+
+static void BTDM_1AntLpsLeave(struct rtw_adapter *padapter)
+{
+ RTPRINT(FBT, BT_TRACE, ("\n[BTCoex], 1Ant for LPS Leave\n"));
+
+ /* Prevent from entering LPS again */
+ GET_HAL_DATA(padapter)->bt_coexist.halCoex8723.btdm1Ant.bWiFiHalt = true;
+
+ btdm_1AntSetPSTDMA(padapter, false, 0, false, 8);
+/*btdm_1AntPsTdma(padapter, false, 8); */
+}
+
+static void BTDM_1AntWifiAssociateNotify(struct rtw_adapter *padapter, u8 type)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ RTPRINT(FBT, BT_TRACE, ("\n[BTCoex], 1Ant for associate, type =%d\n", type));
+
+ if (type) {
+ rtl8723a_CheckAntenna_Selection(padapter);
+ if (BT_IsBtDisabled(padapter)) {
+ btdm_1AntSetPSTDMA(padapter, false, 0, false, 9);
+ } else {
+ struct bt_coexist_8723a *pBtCoex;
+ u8 BtState;
+
+ pBtCoex = &pHalData->bt_coexist.halCoex8723;
+ BtState = pBtCoex->c2hBtInfo;
+
+ btdm_1AntTSFSwitch(padapter, true);
+
+ if ((BtState == BT_INFO_STATE_NO_CONNECTION) ||
+ (BtState == BT_INFO_STATE_CONNECT_IDLE)) {
+ btdm_1AntSetPSTDMA(padapter, false, 0, true, 28);
+ } else if ((BtState == BT_INFO_STATE_SCO_ONLY_BUSY) ||
+ (BtState == BT_INFO_STATE_ACL_SCO_BUSY)) {
+ btdm_1AntSetPSTDMA(padapter, false, 0, false, 8);
+ rtw_write32(padapter, 0x6c0, 0x5a5a5a5a);
+ rtw_write32(padapter, 0x6c4, 0x5a5a5a5a);
+ } else if ((BtState == BT_INFO_STATE_ACL_ONLY_BUSY) ||
+ (BtState == BT_INFO_STATE_ACL_INQ_OR_PAG)) {
+ if (pBtCoex->c2hBtProfile == BT_INFO_HID)
+ btdm_1AntSetPSTDMA(padapter, false, 0, true, 35);
+ else
+ btdm_1AntSetPSTDMA(padapter, false, 0, true, 29);
+ }
+ }
+ } else {
+ if (BT_IsBtDisabled(padapter)) {
+ if (!BTDM_IsWifiConnectionExist(padapter)) {
+ btdm_1AntPsTdma(padapter, false, 0);
+ btdm_1AntTSFSwitch(padapter, false);
+ }
+ }
+
+ btdm_1AntBtCoexistHandler(padapter);
+ }
+}
+
+static void
+BTDM_1AntMediaStatusNotify(struct rtw_adapter *padapter,
+ enum rt_media_status mstatus)
+{
+ struct bt_coexist_8723a *pBtCoex;
+
+ pBtCoex = &GET_HAL_DATA(padapter)->bt_coexist.halCoex8723;
+
+ RTPRINT(FBT, BT_TRACE, ("\n\n[BTCoex]******************************\n"));
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], MediaStatus, WiFi %s !!\n",
+ mstatus == RT_MEDIA_CONNECT?"CONNECT":"DISCONNECT"));
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex]******************************\n"));
+
+ if (RT_MEDIA_CONNECT == mstatus) {
+ if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE)) {
+ if ((pBtCoex->c2hBtInfo == BT_INFO_STATE_SCO_ONLY_BUSY) ||
+ (pBtCoex->c2hBtInfo == BT_INFO_STATE_ACL_SCO_BUSY))
+ btdm_1AntUpdateHalRAMaskForSCO(padapter, true);
+ }
+
+ padapter->pwrctrlpriv.DelayLPSLastTimeStamp = jiffies;
+ BTDM_1AntForDhcp(padapter);
+ } else {
+ /* DBG_8723A("%s rtl8723a_DeinitAntenna_Selection\n", __func__); */
+ rtl8723a_DeinitAntenna_Selection(padapter);
+ btdm_1AntBtCoexistHandler(padapter);
+ pBtCoex->btdm1Ant.bRAChanged = false;
+ }
+}
+
+void BTDM_1AntForDhcp(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ u8 BtState;
+ struct bt_coexist_8723a *pBtCoex;
+ struct btdm_8723a_1ant *pBtdm8723;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBtCoex = &pHalData->bt_coexist.halCoex8723;
+ BtState = pBtCoex->c2hBtInfo;
+ pBtdm8723 = &pBtCoex->btdm1Ant;
+
+ RTPRINT(FBT, BT_TRACE, ("\n[BTCoex], 1Ant for DHCP\n"));
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], 1Ant for DHCP, WiFi is %s\n", BTDM_IsWifiBusy(padapter)?"Busy":"IDLE"));
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], 1Ant for DHCP, %s\n", BtStateString[BtState]));
+
+ BTDM_1AntWifiAssociateNotify(padapter, true);
+}
+
+static void BTDM_1AntWifiScanNotify(struct rtw_adapter *padapter, u8 scanType)
+{
+ struct hal_data_8723a *pHalData;
+ u8 BtState;
+ struct bt_coexist_8723a *pBtCoex;
+ struct btdm_8723a_1ant *pBtdm8723;
+
+ pHalData = GET_HAL_DATA(padapter);
+ BtState = pHalData->bt_coexist.halCoex8723.c2hBtInfo;
+ pBtCoex = &pHalData->bt_coexist.halCoex8723;
+ pBtdm8723 = &pBtCoex->btdm1Ant;
+
+ RTPRINT(FBT, BT_TRACE, ("\n[BTCoex], 1Ant for wifi scan =%d!!\n", scanType));
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], 1Ant for wifi scan, WiFi is %s\n", BTDM_IsWifiBusy(padapter)?"Busy":"IDLE"));
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], 1Ant for wifi scan, %s\n", BtStateString[BtState]));
+
+ if (scanType) {
+ rtl8723a_CheckAntenna_Selection(padapter);
+ if (BT_IsBtDisabled(padapter)) {
+ btdm_1AntSetPSTDMA(padapter, false, 0, false, 9);
+ } else if (BTDM_IsWifiConnectionExist(padapter) == false) {
+ BTDM_1AntWifiAssociateNotify(padapter, true);
+ } else {
+ if ((BtState == BT_INFO_STATE_SCO_ONLY_BUSY) ||
+ (BtState == BT_INFO_STATE_ACL_SCO_BUSY)) {
+ if (pBtCoex->bC2hBtInquiryPage) {
+ btdm_1AntSetPSTDMA(padapter, false, 0, true, 32);
+ } else {
+ padapter->pwrctrlpriv.btcoex_rfon = true;
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 33);
+ }
+ } else if (true == pBtCoex->bC2hBtInquiryPage) {
+ padapter->pwrctrlpriv.btcoex_rfon = true;
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 30);
+ } else if (BtState == BT_INFO_STATE_ACL_ONLY_BUSY) {
+ padapter->pwrctrlpriv.btcoex_rfon = true;
+ if (pBtCoex->c2hBtProfile == BT_INFO_HID)
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 34);
+ else
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 4);
+ } else {
+ padapter->pwrctrlpriv.btcoex_rfon = true;
+ btdm_1AntSetPSTDMA(padapter, true, 0, true, 5);
+ }
+ }
+
+ btdm_NotifyFwScan(padapter, 1);
+ } else {
+ /* WiFi_Finish_Scan */
+ btdm_NotifyFwScan(padapter, 0);
+ btdm_1AntBtCoexistHandler(padapter);
+ }
+}
+
+static void BTDM_1AntFwC2hBtInfo8723A(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct bt_30info *pBTInfo;
+ struct bt_mgnt *pBtMgnt;
+ struct bt_coexist_8723a *pBtCoex;
+ u8 u1tmp, btState;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+ pBtCoex = &pHalData->bt_coexist.halCoex8723;
+
+ u1tmp = pBtCoex->c2hBtInfoOriginal;
+ /* sco BUSY bit is not used on voice over PCM platform */
+ btState = u1tmp & 0xF;
+ pBtCoex->c2hBtProfile = u1tmp & 0xE0;
+
+ /* default set bt to idle state. */
+ pBtMgnt->ExtConfig.bBTBusy = false;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_IDLE;
+
+ /* check BIT2 first ==> check if bt is under inquiry or page scan */
+ if (btState & BIT(2))
+ pBtCoex->bC2hBtInquiryPage = true;
+ else
+ pBtCoex->bC2hBtInquiryPage = false;
+ btState &= ~BIT(2);
+
+ if (!(btState & BIT(0))) {
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_NO_CONNECTION;
+ } else {
+ if (btState == 0x1) {
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_CONNECT_IDLE;
+ } else if (btState == 0x9) {
+ if (pBtCoex->bC2hBtInquiryPage == true)
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_ACL_INQ_OR_PAG;
+ else
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_ACL_ONLY_BUSY;
+ pBtMgnt->ExtConfig.bBTBusy = true;
+ } else if (btState == 0x3) {
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_SCO_ONLY_BUSY;
+ pBtMgnt->ExtConfig.bBTBusy = true;
+ } else if (btState == 0xb) {
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_ACL_SCO_BUSY;
+ pBtMgnt->ExtConfig.bBTBusy = true;
+ } else {
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_MAX;
+ }
+ if (pBtMgnt->ExtConfig.bBTBusy)
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_IDLE;
+ }
+
+ if ((BT_INFO_STATE_NO_CONNECTION == pBtCoex->c2hBtInfo) ||
+ (BT_INFO_STATE_CONNECT_IDLE == pBtCoex->c2hBtInfo)) {
+ if (pBtCoex->bC2hBtInquiryPage)
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_INQ_OR_PAG;
+ }
+
+ RTPRINT(FBT, BT_TRACE, ("[BTC2H], %s(%d)\n",
+ BtStateString[pBtCoex->c2hBtInfo], pBtCoex->c2hBtInfo));
+
+ if (pBtCoex->c2hBtProfile != BT_INFO_HID)
+ pBtCoex->c2hBtProfile &= ~BT_INFO_HID;
+}
+
+void BTDM_1AntBtCoexist8723A(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv;
+ struct hal_data_8723a *pHalData;
+ unsigned long delta_time;
+
+ pmlmepriv = &padapter->mlmepriv;
+ pHalData = GET_HAL_DATA(padapter);
+
+ if (check_fwstate(pmlmepriv, WIFI_SITE_MONITOR)) {
+ /* already done in BTDM_1AntForScan() */
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], wifi is under scan progress!!\n"));
+ return;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_LINKING)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], wifi is under link progress!!\n"));
+ return;
+ }
+
+ /* under DHCP(Special packet) */
+ delta_time = jiffies - padapter->pwrctrlpriv.DelayLPSLastTimeStamp;
+ delta_time = jiffies_to_msecs(delta_time);
+ if (delta_time < 500) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], wifi is under DHCP "
+ "progress(%li ms)!!\n", delta_time));
+ return;
+ }
+
+ BTDM_CheckWiFiState(padapter);
+
+ btdm_1AntBtCoexistHandler(padapter);
+}
+
+/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc87231Ant.c ===== */
+#endif
+
+#ifdef __HALBTC87232ANT_C__ /* HAL/BTCoexist/HalBtc87232Ant.c */
+/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc87232Ant.c ===== */
+
+/* local function start with btdm_ */
+static u8 btdm_ActionAlgorithm(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+ u8 bScoExist = false, bBtLinkExist = false, bBtHsModeExist = false;
+ u8 algorithm = BT_2ANT_COEX_ALGO_UNDEFINED;
+
+ if (pBtMgnt->ExtConfig.NumberOfHandle)
+ bBtLinkExist = true;
+ if (pBtMgnt->ExtConfig.NumberOfSCO)
+ bScoExist = true;
+ if (BT_HsConnectionEstablished(padapter))
+ bBtHsModeExist = true;
+
+ /* here we get BT status first */
+ /* 1) initialize */
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_IDLE;
+
+ if ((bScoExist) || (bBtHsModeExist) ||
+ (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID))) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO or HID or HS exists, set BT non-idle !!!\n"));
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_NON_IDLE;
+ } else {
+ /* A2dp profile */
+ if ((pBtMgnt->ExtConfig.NumberOfHandle == 1) &&
+ (BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP))) {
+ if (BTDM_BtTxRxCounterL(padapter) < 100) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP, low priority tx+rx < 100, set BT connected-idle!!!\n"));
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_CONNECTED_IDLE;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP, low priority tx+rx >= 100, set BT non-idle!!!\n"));
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_NON_IDLE;
+ }
+ }
+ /* Pan profile */
+ if ((pBtMgnt->ExtConfig.NumberOfHandle == 1) &&
+ (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN))) {
+ if (BTDM_BtTxRxCounterL(padapter) < 600) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN, low priority tx+rx < 600, set BT connected-idle!!!\n"));
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_CONNECTED_IDLE;
+ } else {
+ if (pHalData->bt_coexist.halCoex8723.lowPriorityTx) {
+ if ((pHalData->bt_coexist.halCoex8723.lowPriorityRx /
+ pHalData->bt_coexist.halCoex8723.lowPriorityTx) > 9) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN, low priority rx/tx > 9, set BT connected-idle!!!\n"));
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+ }
+ }
+ if (BT_2ANT_BT_STATUS_CONNECTED_IDLE != pBtdm8723->btStatus) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN, set BT non-idle!!!\n"));
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_NON_IDLE;
+ }
+ }
+ /* Pan+A2dp profile */
+ if ((pBtMgnt->ExtConfig.NumberOfHandle == 2) &&
+ (BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) &&
+ (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN))) {
+ if (BTDM_BtTxRxCounterL(padapter) < 600) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN+A2DP, low priority tx+rx < 600, set BT connected-idle!!!\n"));
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_CONNECTED_IDLE;
+ } else {
+ if (pHalData->bt_coexist.halCoex8723.lowPriorityTx) {
+ if ((pHalData->bt_coexist.halCoex8723.lowPriorityRx /
+ pHalData->bt_coexist.halCoex8723.lowPriorityTx) > 9) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN+A2DP, low priority rx/tx > 9, set BT connected-idle!!!\n"));
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+ }
+ }
+ if (BT_2ANT_BT_STATUS_CONNECTED_IDLE != pBtdm8723->btStatus) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN+A2DP, set BT non-idle!!!\n"));
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_NON_IDLE;
+ }
+ }
+ }
+ if (BT_2ANT_BT_STATUS_IDLE != pBtdm8723->btStatus)
+ pBtMgnt->ExtConfig.bBTBusy = true;
+ else
+ pBtMgnt->ExtConfig.bBTBusy = false;
+
+ if (!bBtLinkExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], No profile exists!!!\n"));
+ return algorithm;
+ }
+
+ if (pBtMgnt->ExtConfig.NumberOfHandle == 1) {
+ if (bScoExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO only\n"));
+ algorithm = BT_2ANT_COEX_ALGO_SCO;
+ } else {
+ if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID only\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID;
+ } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP only\n"));
+ algorithm = BT_2ANT_COEX_ALGO_A2DP;
+ } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN)) {
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN(HS) only\n"));
+ algorithm = BT_2ANT_COEX_ALGO_PANHS;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN(EDR) only\n"));
+ algorithm = BT_2ANT_COEX_ALGO_PANEDR;
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! NO matched profile for NumberOfHandle =%d \n",
+ pBtMgnt->ExtConfig.NumberOfHandle));
+ }
+ }
+ } else if (pBtMgnt->ExtConfig.NumberOfHandle == 2) {
+ if (bScoExist) {
+ if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + HID\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID;
+ } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + A2DP\n"));
+ } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN)) {
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_SCO;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO exists but why NO matched ACL profile for NumberOfHandle =%d\n",
+ pBtMgnt->ExtConfig.NumberOfHandle));
+ }
+ } else {
+ if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
+ BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
+ } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
+ BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN)) {
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN) &&
+ BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_A2DP;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! NO matched profile for NumberOfHandle =%d\n",
+ pBtMgnt->ExtConfig.NumberOfHandle));
+ }
+ }
+ } else if (pBtMgnt->ExtConfig.NumberOfHandle == 3) {
+ if (bScoExist) {
+ if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
+ BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP\n"));
+ } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
+ BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN)) {
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN) &&
+ BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + A2DP + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_SCO;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + A2DP + PAN(EDR)\n"));
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO exists but why NO matched profile for NumberOfHandle =%d\n",
+ pBtMgnt->ExtConfig.NumberOfHandle));
+ }
+ } else {
+ if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
+ BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN) &&
+ BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP_PANHS;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! NO matched profile for NumberOfHandle =%d\n",
+ pBtMgnt->ExtConfig.NumberOfHandle));
+ }
+ }
+ } else if (pBtMgnt->ExtConfig.NumberOfHandle >= 3) {
+ if (bScoExist) {
+ if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
+ BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN) &&
+ BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
+ if (bBtHsModeExist)
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
+ else
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(EDR)\n"));
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO exists but why NO matched profile for NumberOfHandle =%d\n",
+ pBtMgnt->ExtConfig.NumberOfHandle));
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! NO matched profile for NumberOfHandle =%d\n",
+ pBtMgnt->ExtConfig.NumberOfHandle));
+ }
+ }
+ return algorithm;
+}
+
+static u8 btdm_NeedToDecBtPwr(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 bRet = false;
+
+ if (BT_Operation(padapter)) {
+ if (pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB > 47) {
+ RTPRINT(FBT, BT_TRACE, ("Need to decrease bt power for HS mode!!\n"));
+ bRet = true;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("NO Need to decrease bt power for HS mode!!\n"));
+ }
+ } else {
+ if (BTDM_IsWifiConnectionExist(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("Need to decrease bt power for Wifi is connected!!\n"));
+ bRet = true;
+ }
+ }
+ return bRet;
+}
+
+static void
+btdm_SetCoexTable(struct rtw_adapter *padapter, u32 val0x6c0,
+ u32 val0x6c8, u8 val0x6cc)
+{
+ RTPRINT(FBT, BT_TRACE, ("set coex table, set 0x6c0 = 0x%x\n", val0x6c0));
+ rtw_write32(padapter, 0x6c0, val0x6c0);
+
+ RTPRINT(FBT, BT_TRACE, ("set coex table, set 0x6c8 = 0x%x\n", val0x6c8));
+ rtw_write32(padapter, 0x6c8, val0x6c8);
+
+ RTPRINT(FBT, BT_TRACE, ("set coex table, set 0x6cc = 0x%x\n", val0x6cc));
+ rtw_write8(padapter, 0x6cc, val0x6cc);
+}
+
+static void
+btdm_SetSwFullTimeDacSwing(struct rtw_adapter *padapter, u8 bSwDacSwingOn,
+ u32 swDacSwingLvl)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (bSwDacSwingOn) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SwDacSwing = 0x%x\n", swDacSwingLvl));
+ PHY_SetBBReg(padapter, 0x880, 0xff000000, swDacSwingLvl);
+ pHalData->bt_coexist.bSWCoexistAllOff = false;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SwDacSwing Off!\n"));
+ PHY_SetBBReg(padapter, 0x880, 0xff000000, 0xc0);
+ }
+}
+
+static void
+btdm_SetFwDacSwingLevel(struct rtw_adapter *padapter, u8 dacSwingLvl)
+{
+ u8 H2C_Parameter[1] = {0};
+
+ H2C_Parameter[0] = dacSwingLvl;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Set Dac Swing Level = 0x%x\n", dacSwingLvl));
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], write 0x29 = 0x%x\n", H2C_Parameter[0]));
+
+ FillH2CCmd(padapter, 0x29, 1, H2C_Parameter);
+}
+
+static void btdm_2AntDecBtPwr(struct rtw_adapter *padapter, u8 bDecBtPwr)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], Dec BT power = %s\n",
+ ((bDecBtPwr) ? "ON" : "OFF")));
+ pBtdm8723->bCurDecBtPwr = bDecBtPwr;
+
+ if (pBtdm8723->bPreDecBtPwr == pBtdm8723->bCurDecBtPwr)
+ return;
+
+ BTDM_SetFwDecBtPwr(padapter, pBtdm8723->bCurDecBtPwr);
+
+ pBtdm8723->bPreDecBtPwr = pBtdm8723->bCurDecBtPwr;
+}
+
+static void
+btdm_2AntFwDacSwingLvl(struct rtw_adapter *padapter, u8 fwDacSwingLvl)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], set FW Dac Swing level = %d\n", fwDacSwingLvl));
+ pBtdm8723->curFwDacSwingLvl = fwDacSwingLvl;
+
+ /* RTPRINT(FBT, BT_TRACE, ("[BTCoex], preFwDacSwingLvl =%d, curFwDacSwingLvl =%d\n", */
+ /*pBtdm8723->preFwDacSwingLvl, pBtdm8723->curFwDacSwingLvl)); */
+
+ if (pBtdm8723->preFwDacSwingLvl == pBtdm8723->curFwDacSwingLvl)
+ return;
+
+ btdm_SetFwDacSwingLevel(padapter, pBtdm8723->curFwDacSwingLvl);
+
+ pBtdm8723->preFwDacSwingLvl = pBtdm8723->curFwDacSwingLvl;
+}
+
+static void
+btdm_2AntRfShrink(struct rtw_adapter *padapter, u8 bRxRfShrinkOn)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], turn Rx RF Shrink = %s\n",
+ ((bRxRfShrinkOn) ? "ON" : "OFF")));
+ pBtdm8723->bCurRfRxLpfShrink = bRxRfShrinkOn;
+
+ /* RTPRINT(FBT, BT_TRACE, ("[BTCoex], bPreRfRxLpfShrink =%d, bCurRfRxLpfShrink =%d\n", */
+ /*pBtdm8723->bPreRfRxLpfShrink, pBtdm8723->bCurRfRxLpfShrink)); */
+
+ if (pBtdm8723->bPreRfRxLpfShrink == pBtdm8723->bCurRfRxLpfShrink)
+ return;
+
+ BTDM_SetSwRfRxLpfCorner(padapter, (u8)pBtdm8723->bCurRfRxLpfShrink);
+
+ pBtdm8723->bPreRfRxLpfShrink = pBtdm8723->bCurRfRxLpfShrink;
+}
+
+static void
+btdm_2AntLowPenaltyRa(struct rtw_adapter *padapter, u8 bLowPenaltyRa)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], turn LowPenaltyRA = %s\n",
+ ((bLowPenaltyRa) ? "ON" : "OFF")));
+ pBtdm8723->bCurLowPenaltyRa = bLowPenaltyRa;
+
+ /* RTPRINT(FBT, BT_TRACE, ("[BTCoex], bPreLowPenaltyRa =%d, bCurLowPenaltyRa =%d\n", */
+ /*pBtdm8723->bPreLowPenaltyRa, pBtdm8723->bCurLowPenaltyRa)); */
+
+ if (pBtdm8723->bPreLowPenaltyRa == pBtdm8723->bCurLowPenaltyRa)
+ return;
+
+ BTDM_SetSwPenaltyTxRateAdaptive(padapter, (u8)pBtdm8723->bCurLowPenaltyRa);
+
+ pBtdm8723->bPreLowPenaltyRa = pBtdm8723->bCurLowPenaltyRa;
+}
+
+static void
+btdm_2AntDacSwing(struct rtw_adapter *padapter,
+ u8 bDacSwingOn, u32 dacSwingLvl)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], turn DacSwing =%s, dacSwingLvl = 0x%x\n",
+ (bDacSwingOn ? "ON" : "OFF"), dacSwingLvl));
+ pBtdm8723->bCurDacSwingOn = bDacSwingOn;
+ pBtdm8723->curDacSwingLvl = dacSwingLvl;
+
+ if ((pBtdm8723->bPreDacSwingOn == pBtdm8723->bCurDacSwingOn) &&
+ (pBtdm8723->preDacSwingLvl == pBtdm8723->curDacSwingLvl))
+ return;
+
+ mdelay(30);
+ btdm_SetSwFullTimeDacSwing(padapter, bDacSwingOn, dacSwingLvl);
+
+ pBtdm8723->bPreDacSwingOn = pBtdm8723->bCurDacSwingOn;
+ pBtdm8723->preDacSwingLvl = pBtdm8723->curDacSwingLvl;
+}
+
+static void btdm_2AntAdcBackOff(struct rtw_adapter *padapter, u8 bAdcBackOff)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], turn AdcBackOff = %s\n",
+ ((bAdcBackOff) ? "ON" : "OFF")));
+ pBtdm8723->bCurAdcBackOff = bAdcBackOff;
+
+ if (pBtdm8723->bPreAdcBackOff == pBtdm8723->bCurAdcBackOff)
+ return;
+
+ BTDM_BBBackOffLevel(padapter, (u8)pBtdm8723->bCurAdcBackOff);
+
+ pBtdm8723->bPreAdcBackOff = pBtdm8723->bCurAdcBackOff;
+}
+
+static void btdm_2AntAgcTable(struct rtw_adapter *padapter, u8 bAgcTableEn)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], %s Agc Table\n", ((bAgcTableEn) ? "Enable" : "Disable")));
+ pBtdm8723->bCurAgcTableEn = bAgcTableEn;
+
+ /* RTPRINT(FBT, BT_TRACE, ("[BTCoex], bPreAgcTableEn =%d, bCurAgcTableEn =%d\n", */
+ /*pBtdm8723->bPreAgcTableEn, pBtdm8723->bCurAgcTableEn)); */
+
+ if (pBtdm8723->bPreAgcTableEn == pBtdm8723->bCurAgcTableEn)
+ return;
+
+ BTDM_AGCTable(padapter, (u8)bAgcTableEn);
+
+ pBtdm8723->bPreAgcTableEn = pBtdm8723->bCurAgcTableEn;
+}
+
+static void
+btdm_2AntCoexTable(struct rtw_adapter *padapter,
+ u32 val0x6c0, u32 val0x6c8, u8 val0x6cc)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], write Coex Table 0x6c0 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ val0x6c0, val0x6c8, val0x6cc));
+ pBtdm8723->curVal0x6c0 = val0x6c0;
+ pBtdm8723->curVal0x6c8 = val0x6c8;
+ pBtdm8723->curVal0x6cc = val0x6cc;
+
+ /* RTPRINT(FBT, BT_TRACE, ("[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n", */
+ /*pBtdm8723->preVal0x6c0, pBtdm8723->preVal0x6c8, pBtdm8723->preVal0x6cc)); */
+ /* RTPRINT(FBT, BT_TRACE, ("[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n", */
+ /*pBtdm8723->curVal0x6c0, pBtdm8723->curVal0x6c8, pBtdm8723->curVal0x6cc)); */
+
+ if ((pBtdm8723->preVal0x6c0 == pBtdm8723->curVal0x6c0) &&
+ (pBtdm8723->preVal0x6c8 == pBtdm8723->curVal0x6c8) &&
+ (pBtdm8723->preVal0x6cc == pBtdm8723->curVal0x6cc))
+ return;
+
+ btdm_SetCoexTable(padapter, val0x6c0, val0x6c8, val0x6cc);
+
+ pBtdm8723->preVal0x6c0 = pBtdm8723->curVal0x6c0;
+ pBtdm8723->preVal0x6c8 = pBtdm8723->curVal0x6c8;
+ pBtdm8723->preVal0x6cc = pBtdm8723->curVal0x6cc;
+}
+
+static void btdm_2AntIgnoreWlanAct(struct rtw_adapter *padapter, u8 bEnable)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], turn Ignore WlanAct %s\n", (bEnable ? "ON" : "OFF")));
+ pBtdm8723->bCurIgnoreWlanAct = bEnable;
+
+
+ if (pBtdm8723->bPreIgnoreWlanAct == pBtdm8723->bCurIgnoreWlanAct)
+ return;
+
+ btdm_SetFwIgnoreWlanAct(padapter, bEnable);
+ pBtdm8723->bPreIgnoreWlanAct = pBtdm8723->bCurIgnoreWlanAct;
+}
+
+static void
+btdm_2AntSetFw3a(struct rtw_adapter *padapter, u8 byte1, u8 byte2,
+ u8 byte3, u8 byte4, u8 byte5)
+{
+ u8 H2C_Parameter[5] = {0};
+
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ /* byte1[1:0] != 0 means enable pstdma */
+ /* for 2Ant bt coexist, if byte1 != 0 means enable pstdma */
+ if (byte1)
+ pHalData->bt_coexist.bFWCoexistAllOff = false;
+ H2C_Parameter[0] = byte1;
+ H2C_Parameter[1] = byte2;
+ H2C_Parameter[2] = byte3;
+ H2C_Parameter[3] = byte4;
+ H2C_Parameter[4] = byte5;
+
+ pHalData->bt_coexist.fw3aVal[0] = byte1;
+ pHalData->bt_coexist.fw3aVal[1] = byte2;
+ pHalData->bt_coexist.fw3aVal[2] = byte3;
+ pHalData->bt_coexist.fw3aVal[3] = byte4;
+ pHalData->bt_coexist.fw3aVal[4] = byte5;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], FW write 0x3a(5bytes) = 0x%x%08x\n",
+ H2C_Parameter[0],
+ H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
+
+ FillH2CCmd(padapter, 0x3a, 5, H2C_Parameter);
+ }
+
+static void btdm_2AntPsTdma(struct rtw_adapter *padapter, u8 bTurnOn, u8 type)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+ u32 btTxRxCnt = 0;
+ u8 bTurnOnByCnt = false;
+ u8 psTdmaTypeByCnt = 0;
+
+ btTxRxCnt = BTDM_BtTxRxCounterH(padapter)+BTDM_BtTxRxCounterL(padapter);
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT TxRx Counters = %d\n", btTxRxCnt));
+ if (btTxRxCnt > 3000) {
+ bTurnOnByCnt = true;
+ psTdmaTypeByCnt = 8;
+
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], For BTTxRxCounters, turn %s PS TDMA, type =%d\n",
+ (bTurnOnByCnt ? "ON" : "OFF"), psTdmaTypeByCnt));
+ pBtdm8723->bCurPsTdmaOn = bTurnOnByCnt;
+ pBtdm8723->curPsTdma = psTdmaTypeByCnt;
+ } else {
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], turn %s PS TDMA, type =%d\n",
+ (bTurnOn ? "ON" : "OFF"), type));
+ pBtdm8723->bCurPsTdmaOn = bTurnOn;
+ pBtdm8723->curPsTdma = type;
+ }
+
+ if ((pBtdm8723->bPrePsTdmaOn == pBtdm8723->bCurPsTdmaOn) &&
+ (pBtdm8723->prePsTdma == pBtdm8723->curPsTdma))
+ return;
+
+ if (bTurnOn) {
+ switch (type) {
+ case 1:
+ default:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x1a, 0x1a, 0xa1, 0x98);
+ break;
+ case 2:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x12, 0x12, 0xa1, 0x98);
+ break;
+ case 3:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0xa, 0xa, 0xa1, 0x98);
+ break;
+ case 4:
+ btdm_2AntSetFw3a(padapter, 0xa3, 0x5, 0x5, 0xa1, 0x80);
+ break;
+ case 5:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x1a, 0x1a, 0x20, 0x98);
+ break;
+ case 6:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x12, 0x12, 0x20, 0x98);
+ break;
+ case 7:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0xa, 0xa, 0x20, 0x98);
+ break;
+ case 8:
+ btdm_2AntSetFw3a(padapter, 0xa3, 0x5, 0x5, 0x20, 0x80);
+ break;
+ case 9:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x1a, 0x1a, 0xa1, 0x98);
+ break;
+ case 10:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x12, 0x12, 0xa1, 0x98);
+ break;
+ case 11:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0xa, 0xa, 0xa1, 0x98);
+ break;
+ case 12:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x5, 0x5, 0xa1, 0x98);
+ break;
+ case 13:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x1a, 0x1a, 0x20, 0x98);
+ break;
+ case 14:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x12, 0x12, 0x20, 0x98);
+ break;
+ case 15:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0xa, 0xa, 0x20, 0x98);
+ break;
+ case 16:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x5, 0x5, 0x20, 0x98);
+ break;
+ case 17:
+ btdm_2AntSetFw3a(padapter, 0xa3, 0x2f, 0x2f, 0x20, 0x80);
+ break;
+ case 18:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x5, 0x5, 0xa1, 0x98);
+ break;
+ case 19:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x25, 0x25, 0xa1, 0x98);
+ break;
+ case 20:
+ btdm_2AntSetFw3a(padapter, 0xe3, 0x25, 0x25, 0x20, 0x98);
+ break;
+ }
+ } else {
+ /* disable PS tdma */
+ switch (type) {
+ case 0:
+ btdm_2AntSetFw3a(padapter, 0x0, 0x0, 0x0, 0x8, 0x0);
+ break;
+ case 1:
+ btdm_2AntSetFw3a(padapter, 0x0, 0x0, 0x0, 0x0, 0x0);
+ break;
+ default:
+ btdm_2AntSetFw3a(padapter, 0x0, 0x0, 0x0, 0x8, 0x0);
+ break;
+ }
+ }
+
+ /* update pre state */
+ pBtdm8723->bPrePsTdmaOn = pBtdm8723->bCurPsTdmaOn;
+ pBtdm8723->prePsTdma = pBtdm8723->curPsTdma;
+}
+
+static void btdm_2AntBtInquiryPage(struct rtw_adapter *padapter)
+{
+ btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
+ btdm_2AntIgnoreWlanAct(padapter, false);
+ btdm_2AntPsTdma(padapter, true, 8);
+}
+
+static u8 btdm_HoldForBtInqPage(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u32 curTime = jiffies;
+
+ if (pHalData->bt_coexist.halCoex8723.bC2hBtInquiryPage) {
+ /* bt inquiry or page is started. */
+ if (pHalData->bt_coexist.halCoex8723.btInqPageStartTime == 0) {
+ pHalData->bt_coexist.halCoex8723.btInqPageStartTime = curTime;
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT Inquiry/page is started at time : 0x%lx \n",
+ pHalData->bt_coexist.halCoex8723.btInqPageStartTime));
+ }
+ }
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT Inquiry/page started time : 0x%lx, curTime : 0x%x \n",
+ pHalData->bt_coexist.halCoex8723.btInqPageStartTime, curTime));
+
+ if (pHalData->bt_coexist.halCoex8723.btInqPageStartTime) {
+ if (((curTime - pHalData->bt_coexist.halCoex8723.btInqPageStartTime)/1000000) >= 10) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT Inquiry/page >= 10sec!!!"));
+ pHalData->bt_coexist.halCoex8723.btInqPageStartTime = 0;
+ }
+ }
+
+ if (pHalData->bt_coexist.halCoex8723.btInqPageStartTime) {
+ btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
+ btdm_2AntIgnoreWlanAct(padapter, false);
+ btdm_2AntPsTdma(padapter, true, 8);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static u8 btdm_Is2Ant8723ACommonAction(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+ u8 bCommon = false;
+
+ RTPRINT(FBT, BT_TRACE, ("%s :BTDM_IsWifiConnectionExist =%x check_fwstate =%x pmlmepriv->fw_state = 0x%x\n", __func__, BTDM_IsWifiConnectionExist(padapter), check_fwstate(&padapter->mlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)), padapter->mlmepriv.fw_state));
+
+ if ((!BTDM_IsWifiConnectionExist(padapter)) &&
+ (!check_fwstate(&padapter->mlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING))) &&
+ (BT_2ANT_BT_STATUS_IDLE == pBtdm8723->btStatus)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi idle + Bt idle!!\n"));
+
+ btdm_2AntLowPenaltyRa(padapter, false);
+ btdm_2AntRfShrink(padapter, false);
+ btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
+
+ btdm_2AntIgnoreWlanAct(padapter, false);
+ btdm_2AntPsTdma(padapter, false, 0);
+ btdm_2AntFwDacSwingLvl(padapter, 0x20);
+ btdm_2AntDecBtPwr(padapter, false);
+
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+
+ bCommon = true;
+ } else if (((BTDM_IsWifiConnectionExist(padapter)) ||
+ (check_fwstate(&padapter->mlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)))) &&
+ (BT_2ANT_BT_STATUS_IDLE == pBtdm8723->btStatus)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi non-idle + BT idle!!\n"));
+
+ btdm_2AntLowPenaltyRa(padapter, true);
+ btdm_2AntRfShrink(padapter, false);
+ btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
+
+ btdm_2AntIgnoreWlanAct(padapter, false);
+ btdm_2AntPsTdma(padapter, false, 0);
+ btdm_2AntFwDacSwingLvl(padapter, 0x20);
+ btdm_2AntDecBtPwr(padapter, true);
+
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+
+ bCommon = true;
+ } else if ((!BTDM_IsWifiConnectionExist(padapter)) &&
+ (!check_fwstate(&padapter->mlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING))) &&
+ (BT_2ANT_BT_STATUS_CONNECTED_IDLE == pBtdm8723->btStatus)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi idle + Bt connected idle!!\n"));
+
+ btdm_2AntLowPenaltyRa(padapter, true);
+ btdm_2AntRfShrink(padapter, true);
+ btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
+
+ btdm_2AntIgnoreWlanAct(padapter, false);
+ btdm_2AntPsTdma(padapter, false, 0);
+ btdm_2AntFwDacSwingLvl(padapter, 0x20);
+ btdm_2AntDecBtPwr(padapter, false);
+
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+
+ bCommon = true;
+ } else if (((BTDM_IsWifiConnectionExist(padapter)) ||
+ (check_fwstate(&padapter->mlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)))) &&
+ (BT_2ANT_BT_STATUS_CONNECTED_IDLE == pBtdm8723->btStatus)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi non-idle + Bt connected idle!!\n"));
+
+ btdm_2AntLowPenaltyRa(padapter, true);
+ btdm_2AntRfShrink(padapter, true);
+ btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
+
+ btdm_2AntIgnoreWlanAct(padapter, false);
+ btdm_2AntPsTdma(padapter, false, 0);
+ btdm_2AntFwDacSwingLvl(padapter, 0x20);
+ btdm_2AntDecBtPwr(padapter, true);
+
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+
+ bCommon = true;
+ } else if ((!BTDM_IsWifiConnectionExist(padapter)) &&
+ (!check_fwstate(&padapter->mlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING))) &&
+ (BT_2ANT_BT_STATUS_NON_IDLE == pBtdm8723->btStatus)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi idle + BT non-idle!!\n"));
+
+ btdm_2AntLowPenaltyRa(padapter, true);
+ btdm_2AntRfShrink(padapter, true);
+ btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
+
+ btdm_2AntIgnoreWlanAct(padapter, false);
+ btdm_2AntPsTdma(padapter, false, 0);
+ btdm_2AntFwDacSwingLvl(padapter, 0x20);
+ btdm_2AntDecBtPwr(padapter, false);
+
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+
+ bCommon = true;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi non-idle + BT non-idle!!\n"));
+ btdm_2AntLowPenaltyRa(padapter, true);
+ btdm_2AntRfShrink(padapter, true);
+ btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
+ btdm_2AntIgnoreWlanAct(padapter, false);
+ btdm_2AntFwDacSwingLvl(padapter, 0x20);
+
+ bCommon = false;
+ }
+ return bCommon;
+}
+
+static void
+btdm_2AntTdmaDurationAdjust(struct rtw_adapter *padapter, u8 bScoHid,
+ u8 bTxPause, u8 maxInterval)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+ static s32 up, dn, m, n, WaitCount;
+ s32 result; /* 0: no change, +1: increase WiFi duration, -1: decrease WiFi duration */
+ u8 retryCount = 0;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], TdmaDurationAdjust()\n"));
+
+ if (pBtdm8723->bResetTdmaAdjust) {
+ pBtdm8723->bResetTdmaAdjust = false;
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
+ if (bScoHid) {
+ if (bTxPause) {
+ if (maxInterval == 1) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (maxInterval == 2) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (maxInterval == 3) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ }
+ } else {
+ if (maxInterval == 1) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (maxInterval == 2) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (maxInterval == 3) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ }
+ }
+ } else {
+ if (bTxPause) {
+ if (maxInterval == 1) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (maxInterval == 2) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (maxInterval == 3) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ }
+ } else {
+ if (maxInterval == 1) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (maxInterval == 2) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (maxInterval == 3) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ }
+ }
+ }
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ result = 0;
+ WaitCount = 0;
+ } else {
+ /* accquire the BT TRx retry count from BT_Info byte2 */
+ retryCount = pHalData->bt_coexist.halCoex8723.btRetryCnt;
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], retryCount = %d\n", retryCount));
+ result = 0;
+ WaitCount++;
+
+ if (retryCount == 0) { /* no retry in the last 2-second duration */
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if (up >= n) { /* if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration */
+ WaitCount = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Increase wifi duration!!\n"));
+ }
+ } else if (retryCount <= 3) { /* <= 3 retry in the last 2-second duration */
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) { /* if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration */
+ if (WaitCount <= 2)
+ m++; /* ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^ */
+ else
+ m = 1;
+
+ if (m >= 20) /* m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration. */
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+ }
+ } else { /* retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration */
+ if (WaitCount == 1)
+ m++; /* ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^ */
+ else
+ m = 1;
+
+ if (m >= 20) /* m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration. */
+ m = 20;
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+ }
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], max Interval = %d\n", maxInterval));
+ if (maxInterval == 1) {
+ if (bTxPause) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], TxPause = 1\n"));
+ if (pBtdm8723->curPsTdma == 1) {
+ btdm_2AntPsTdma(padapter, true, 5);
+ pBtdm8723->psTdmaDuAdjType = 5;
+ } else if (pBtdm8723->curPsTdma == 2) {
+ btdm_2AntPsTdma(padapter, true, 6);
+ pBtdm8723->psTdmaDuAdjType = 6;
+ } else if (pBtdm8723->curPsTdma == 3) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 4) {
+ btdm_2AntPsTdma(padapter, true, 8);
+ pBtdm8723->psTdmaDuAdjType = 8;
+ }
+ if (pBtdm8723->curPsTdma == 9) {
+ btdm_2AntPsTdma(padapter, true, 13);
+ pBtdm8723->psTdmaDuAdjType = 13;
+ } else if (pBtdm8723->curPsTdma == 10) {
+ btdm_2AntPsTdma(padapter, true, 14);
+ pBtdm8723->psTdmaDuAdjType = 14;
+ } else if (pBtdm8723->curPsTdma == 11) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 12) {
+ btdm_2AntPsTdma(padapter, true, 16);
+ pBtdm8723->psTdmaDuAdjType = 16;
+ }
+
+ if (result == -1) {
+ if (pBtdm8723->curPsTdma == 5) {
+ btdm_2AntPsTdma(padapter, true, 6);
+ pBtdm8723->psTdmaDuAdjType = 6;
+ } else if (pBtdm8723->curPsTdma == 6) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 7) {
+ btdm_2AntPsTdma(padapter, true, 8);
+ pBtdm8723->psTdmaDuAdjType = 8;
+ } else if (pBtdm8723->curPsTdma == 13) {
+ btdm_2AntPsTdma(padapter, true, 14);
+ pBtdm8723->psTdmaDuAdjType = 14;
+ } else if (pBtdm8723->curPsTdma == 14) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 15) {
+ btdm_2AntPsTdma(padapter, true, 16);
+ pBtdm8723->psTdmaDuAdjType = 16;
+ }
+ } else if (result == 1) {
+ if (pBtdm8723->curPsTdma == 8) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 7) {
+ btdm_2AntPsTdma(padapter, true, 6);
+ pBtdm8723->psTdmaDuAdjType = 6;
+ } else if (pBtdm8723->curPsTdma == 6) {
+ btdm_2AntPsTdma(padapter, true, 5);
+ pBtdm8723->psTdmaDuAdjType = 5;
+ } else if (pBtdm8723->curPsTdma == 16) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 15) {
+ btdm_2AntPsTdma(padapter, true, 14);
+ pBtdm8723->psTdmaDuAdjType = 14;
+ } else if (pBtdm8723->curPsTdma == 14) {
+ btdm_2AntPsTdma(padapter, true, 13);
+ pBtdm8723->psTdmaDuAdjType = 13;
+ }
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], TxPause = 0\n"));
+ if (pBtdm8723->curPsTdma == 5) {
+ btdm_2AntPsTdma(padapter, true, 1);
+ pBtdm8723->psTdmaDuAdjType = 1;
+ } else if (pBtdm8723->curPsTdma == 6) {
+ btdm_2AntPsTdma(padapter, true, 2);
+ pBtdm8723->psTdmaDuAdjType = 2;
+ } else if (pBtdm8723->curPsTdma == 7) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 8) {
+ btdm_2AntPsTdma(padapter, true, 4);
+ pBtdm8723->psTdmaDuAdjType = 4;
+ }
+ if (pBtdm8723->curPsTdma == 13) {
+ btdm_2AntPsTdma(padapter, true, 9);
+ pBtdm8723->psTdmaDuAdjType = 9;
+ } else if (pBtdm8723->curPsTdma == 14) {
+ btdm_2AntPsTdma(padapter, true, 10);
+ pBtdm8723->psTdmaDuAdjType = 10;
+ } else if (pBtdm8723->curPsTdma == 15) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 16) {
+ btdm_2AntPsTdma(padapter, true, 12);
+ pBtdm8723->psTdmaDuAdjType = 12;
+ }
+
+ if (result == -1) {
+ if (pBtdm8723->curPsTdma == 1) {
+ btdm_2AntPsTdma(padapter, true, 2);
+ pBtdm8723->psTdmaDuAdjType = 2;
+ } else if (pBtdm8723->curPsTdma == 2) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 3) {
+ btdm_2AntPsTdma(padapter, true, 4);
+ pBtdm8723->psTdmaDuAdjType = 4;
+ } else if (pBtdm8723->curPsTdma == 9) {
+ btdm_2AntPsTdma(padapter, true, 10);
+ pBtdm8723->psTdmaDuAdjType = 10;
+ } else if (pBtdm8723->curPsTdma == 10) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 11) {
+ btdm_2AntPsTdma(padapter, true, 12);
+ pBtdm8723->psTdmaDuAdjType = 12;
+ }
+ } else if (result == 1) {
+ if (pBtdm8723->curPsTdma == 4) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 3) {
+ btdm_2AntPsTdma(padapter, true, 2);
+ pBtdm8723->psTdmaDuAdjType = 2;
+ } else if (pBtdm8723->curPsTdma == 2) {
+ btdm_2AntPsTdma(padapter, true, 1);
+ pBtdm8723->psTdmaDuAdjType = 1;
+ } else if (pBtdm8723->curPsTdma == 12) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 11) {
+ btdm_2AntPsTdma(padapter, true, 10);
+ pBtdm8723->psTdmaDuAdjType = 10;
+ } else if (pBtdm8723->curPsTdma == 10) {
+ btdm_2AntPsTdma(padapter, true, 9);
+ pBtdm8723->psTdmaDuAdjType = 9;
+ }
+ }
+ }
+ } else if (maxInterval == 2) {
+ if (bTxPause) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], TxPause = 1\n"));
+ if (pBtdm8723->curPsTdma == 1) {
+ btdm_2AntPsTdma(padapter, true, 6);
+ pBtdm8723->psTdmaDuAdjType = 6;
+ } else if (pBtdm8723->curPsTdma == 2) {
+ btdm_2AntPsTdma(padapter, true, 6);
+ pBtdm8723->psTdmaDuAdjType = 6;
+ } else if (pBtdm8723->curPsTdma == 3) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 4) {
+ btdm_2AntPsTdma(padapter, true, 8);
+ pBtdm8723->psTdmaDuAdjType = 8;
+ }
+ if (pBtdm8723->curPsTdma == 9) {
+ btdm_2AntPsTdma(padapter, true, 14);
+ pBtdm8723->psTdmaDuAdjType = 14;
+ } else if (pBtdm8723->curPsTdma == 10) {
+ btdm_2AntPsTdma(padapter, true, 14);
+ pBtdm8723->psTdmaDuAdjType = 14;
+ } else if (pBtdm8723->curPsTdma == 11) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 12) {
+ btdm_2AntPsTdma(padapter, true, 16);
+ pBtdm8723->psTdmaDuAdjType = 16;
+ }
+ if (result == -1) {
+ if (pBtdm8723->curPsTdma == 5) {
+ btdm_2AntPsTdma(padapter, true, 6);
+ pBtdm8723->psTdmaDuAdjType = 6;
+ } else if (pBtdm8723->curPsTdma == 6) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 7) {
+ btdm_2AntPsTdma(padapter, true, 8);
+ pBtdm8723->psTdmaDuAdjType = 8;
+ } else if (pBtdm8723->curPsTdma == 13) {
+ btdm_2AntPsTdma(padapter, true, 14);
+ pBtdm8723->psTdmaDuAdjType = 14;
+ } else if (pBtdm8723->curPsTdma == 14) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 15) {
+ btdm_2AntPsTdma(padapter, true, 16);
+ pBtdm8723->psTdmaDuAdjType = 16;
+ }
+ } else if (result == 1) {
+ if (pBtdm8723->curPsTdma == 8) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 7) {
+ btdm_2AntPsTdma(padapter, true, 6);
+ pBtdm8723->psTdmaDuAdjType = 6;
+ } else if (pBtdm8723->curPsTdma == 6) {
+ btdm_2AntPsTdma(padapter, true, 6);
+ pBtdm8723->psTdmaDuAdjType = 6;
+ } else if (pBtdm8723->curPsTdma == 16) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 15) {
+ btdm_2AntPsTdma(padapter, true, 14);
+ pBtdm8723->psTdmaDuAdjType = 14;
+ } else if (pBtdm8723->curPsTdma == 14) {
+ btdm_2AntPsTdma(padapter, true, 14);
+ pBtdm8723->psTdmaDuAdjType = 14;
+ }
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], TxPause = 0\n"));
+ if (pBtdm8723->curPsTdma == 5) {
+ btdm_2AntPsTdma(padapter, true, 2);
+ pBtdm8723->psTdmaDuAdjType = 2;
+ } else if (pBtdm8723->curPsTdma == 6) {
+ btdm_2AntPsTdma(padapter, true, 2);
+ pBtdm8723->psTdmaDuAdjType = 2;
+ } else if (pBtdm8723->curPsTdma == 7) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 8) {
+ btdm_2AntPsTdma(padapter, true, 4);
+ pBtdm8723->psTdmaDuAdjType = 4;
+ }
+ if (pBtdm8723->curPsTdma == 13) {
+ btdm_2AntPsTdma(padapter, true, 10);
+ pBtdm8723->psTdmaDuAdjType = 10;
+ } else if (pBtdm8723->curPsTdma == 14) {
+ btdm_2AntPsTdma(padapter, true, 10);
+ pBtdm8723->psTdmaDuAdjType = 10;
+ } else if (pBtdm8723->curPsTdma == 15) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 16) {
+ btdm_2AntPsTdma(padapter, true, 12);
+ pBtdm8723->psTdmaDuAdjType = 12;
+ }
+ if (result == -1) {
+ if (pBtdm8723->curPsTdma == 1) {
+ btdm_2AntPsTdma(padapter, true, 2);
+ pBtdm8723->psTdmaDuAdjType = 2;
+ } else if (pBtdm8723->curPsTdma == 2) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 3) {
+ btdm_2AntPsTdma(padapter, true, 4);
+ pBtdm8723->psTdmaDuAdjType = 4;
+ } else if (pBtdm8723->curPsTdma == 9) {
+ btdm_2AntPsTdma(padapter, true, 10);
+ pBtdm8723->psTdmaDuAdjType = 10;
+ } else if (pBtdm8723->curPsTdma == 10) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 11) {
+ btdm_2AntPsTdma(padapter, true, 12);
+ pBtdm8723->psTdmaDuAdjType = 12;
+ }
+ } else if (result == 1) {
+ if (pBtdm8723->curPsTdma == 4) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 3) {
+ btdm_2AntPsTdma(padapter, true, 2);
+ pBtdm8723->psTdmaDuAdjType = 2;
+ } else if (pBtdm8723->curPsTdma == 2) {
+ btdm_2AntPsTdma(padapter, true, 2);
+ pBtdm8723->psTdmaDuAdjType = 2;
+ } else if (pBtdm8723->curPsTdma == 12) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 11) {
+ btdm_2AntPsTdma(padapter, true, 10);
+ pBtdm8723->psTdmaDuAdjType = 10;
+ } else if (pBtdm8723->curPsTdma == 10) {
+ btdm_2AntPsTdma(padapter, true, 10);
+ pBtdm8723->psTdmaDuAdjType = 10;
+ }
+ }
+ }
+ } else if (maxInterval == 3) {
+ if (bTxPause) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], TxPause = 1\n"));
+ if (pBtdm8723->curPsTdma == 1) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 2) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 3) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 4) {
+ btdm_2AntPsTdma(padapter, true, 8);
+ pBtdm8723->psTdmaDuAdjType = 8;
+ }
+ if (pBtdm8723->curPsTdma == 9) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 10) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 11) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 12) {
+ btdm_2AntPsTdma(padapter, true, 16);
+ pBtdm8723->psTdmaDuAdjType = 16;
+ }
+ if (result == -1) {
+ if (pBtdm8723->curPsTdma == 5) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 6) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 7) {
+ btdm_2AntPsTdma(padapter, true, 8);
+ pBtdm8723->psTdmaDuAdjType = 8;
+ } else if (pBtdm8723->curPsTdma == 13) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 14) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 15) {
+ btdm_2AntPsTdma(padapter, true, 16);
+ pBtdm8723->psTdmaDuAdjType = 16;
+ }
+ } else if (result == 1) {
+ if (pBtdm8723->curPsTdma == 8) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 7) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 6) {
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
+ } else if (pBtdm8723->curPsTdma == 16) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 15) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ } else if (pBtdm8723->curPsTdma == 14) {
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
+ }
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], TxPause = 0\n"));
+ if (pBtdm8723->curPsTdma == 5) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 6) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 7) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 8) {
+ btdm_2AntPsTdma(padapter, true, 4);
+ pBtdm8723->psTdmaDuAdjType = 4;
+ }
+ if (pBtdm8723->curPsTdma == 13) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 14) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 15) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 16) {
+ btdm_2AntPsTdma(padapter, true, 12);
+ pBtdm8723->psTdmaDuAdjType = 12;
+ }
+ if (result == -1) {
+ if (pBtdm8723->curPsTdma == 1) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 2) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 3) {
+ btdm_2AntPsTdma(padapter, true, 4);
+ pBtdm8723->psTdmaDuAdjType = 4;
+ } else if (pBtdm8723->curPsTdma == 9) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 10) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 11) {
+ btdm_2AntPsTdma(padapter, true, 12);
+ pBtdm8723->psTdmaDuAdjType = 12;
+ }
+ } else if (result == 1) {
+ if (pBtdm8723->curPsTdma == 4) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 3) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 2) {
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
+ } else if (pBtdm8723->curPsTdma == 12) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 11) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ } else if (pBtdm8723->curPsTdma == 10) {
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
+ }
+ }
+ }
+ }
+ }
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], PsTdma type : recordPsTdma =%d\n", pBtdm8723->psTdmaDuAdjType));
+ /* if current PsTdma not match with the recorded one (when scan, dhcp...), */
+ /* then we have to adjust it back to the previous record one. */
+ if (pBtdm8723->curPsTdma != pBtdm8723->psTdmaDuAdjType) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], PsTdma type dismatch!!!, curPsTdma =%d, recordPsTdma =%d\n",
+ pBtdm8723->curPsTdma, pBtdm8723->psTdmaDuAdjType));
+
+ if (!check_fwstate(&padapter->mlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING))
+ btdm_2AntPsTdma(padapter, true, pBtdm8723->psTdmaDuAdjType);
+ else
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"));
+ }
+}
+
+/* default Action */
+/* SCO only or SCO+PAN(HS) */
+static void btdm_2Ant8723ASCOAction(struct rtw_adapter *padapter)
+{
+ u8 btRssiState, btRssiState1;
+
+ if (btdm_NeedToDecBtPwr(padapter))
+ btdm_2AntDecBtPwr(padapter, true);
+ else
+ btdm_2AntDecBtPwr(padapter, false);
+
+ if (BTDM_IsHT40(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("HT40\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ /* fw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+ btdm_2AntPsTdma(padapter, true, 11);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
+ btdm_2AntPsTdma(padapter, true, 15);
+ }
+
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
+
+ /* fw mechanism */
+ if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
+ (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+ btdm_2AntPsTdma(padapter, true, 11);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
+ btdm_2AntPsTdma(padapter, true, 15);
+ }
+
+ /* sw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ btdm_2AntAgcTable(padapter, true);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ }
+ }
+}
+
+static void btdm_2Ant8723AHIDAction(struct rtw_adapter *padapter)
+{
+ u8 btRssiState, btRssiState1;
+
+ if (btdm_NeedToDecBtPwr(padapter))
+ btdm_2AntDecBtPwr(padapter, true);
+ else
+ btdm_2AntDecBtPwr(padapter, false);
+
+ if (BTDM_IsHT40(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("HT40\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ /* fw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+ btdm_2AntPsTdma(padapter, true, 9);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
+ btdm_2AntPsTdma(padapter, true, 13);
+ }
+
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
+
+ /* fw mechanism */
+ if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
+ (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+ btdm_2AntPsTdma(padapter, true, 9);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
+ btdm_2AntPsTdma(padapter, true, 13);
+ }
+
+ /* sw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ btdm_2AntAgcTable(padapter, true);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ }
+ }
+}
+
+/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
+static void btdm_2Ant8723AA2DPAction(struct rtw_adapter *padapter)
+{
+ u8 btRssiState, btRssiState1;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 btInfoExt = pHalData->bt_coexist.halCoex8723.btInfoExt;
+
+ if (btdm_NeedToDecBtPwr(padapter))
+ btdm_2AntDecBtPwr(padapter, true);
+ else
+ btdm_2AntDecBtPwr(padapter, false);
+
+ if (BTDM_IsHT40(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("HT40\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+
+ /* fw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, false, false, 3);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, false, false, 1);
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, false, true, 3);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, false, true, 1);
+ }
+ }
+
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
+
+ /* fw mechanism */
+ if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
+ (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, false, false, 3);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, false, false, 1);
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, false, true, 3);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, false, true, 1);
+ }
+ }
+
+ /* sw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ btdm_2AntAgcTable(padapter, true);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ }
+ }
+}
+
+static void btdm_2Ant8723APANEDRAction(struct rtw_adapter *padapter)
+{
+ u8 btRssiState, btRssiState1;
+
+ if (btdm_NeedToDecBtPwr(padapter))
+ btdm_2AntDecBtPwr(padapter, true);
+ else
+ btdm_2AntDecBtPwr(padapter, false);
+
+ if (BTDM_IsHT40(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("HT40\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+
+ /* fw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+ btdm_2AntPsTdma(padapter, true, 2);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ btdm_2AntPsTdma(padapter, true, 6);
+ }
+
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
+
+ /* fw mechanism */
+ if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
+ (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+ btdm_2AntPsTdma(padapter, true, 2);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
+ btdm_2AntPsTdma(padapter, true, 6);
+ }
+
+ /* sw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ btdm_2AntAgcTable(padapter, true);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ }
+ }
+}
+
+/* PAN(HS) only */
+static void btdm_2Ant8723APANHSAction(struct rtw_adapter *padapter)
+{
+ u8 btRssiState;
+
+ if (BTDM_IsHT40(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("HT40\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 47, 0);
+ /* fw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ btdm_2AntDecBtPwr(padapter, true);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ btdm_2AntDecBtPwr(padapter, false);
+ }
+ btdm_2AntPsTdma(padapter, false, 0);
+
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 47, 0);
+
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high\n"));
+ /* fw mechanism */
+ btdm_2AntDecBtPwr(padapter, true);
+ btdm_2AntPsTdma(padapter, false, 0);
+
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, true);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low\n"));
+ /* fw mechanism */
+ btdm_2AntDecBtPwr(padapter, false);
+ btdm_2AntPsTdma(padapter, false, 0);
+
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ }
+ }
+}
+
+/* PAN(EDR)+A2DP */
+static void btdm_2Ant8723APANEDRA2DPAction(struct rtw_adapter *padapter)
+{
+ u8 btRssiState, btRssiState1, btInfoExt;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ btInfoExt = pHalData->bt_coexist.halCoex8723.btInfoExt;
+
+ if (btdm_NeedToDecBtPwr(padapter))
+ btdm_2AntDecBtPwr(padapter, true);
+ else
+ btdm_2AntDecBtPwr(padapter, false);
+
+ if (BTDM_IsHT40(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("HT40\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ /* fw mechanism */
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntPsTdma(padapter, true, 4);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntPsTdma(padapter, true, 2);
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ /* fw mechanism */
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntPsTdma(padapter, true, 8);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntPsTdma(padapter, true, 6);
+ }
+ }
+
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
+
+ if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
+ (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+ /* fw mechanism */
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntPsTdma(padapter, true, 4);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntPsTdma(padapter, true, 2);
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
+ /* fw mechanism */
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntPsTdma(padapter, true, 8);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntPsTdma(padapter, true, 6);
+ }
+ }
+
+ /* sw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ btdm_2AntAgcTable(padapter, true);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ }
+ }
+}
+
+static void btdm_2Ant8723APANEDRHIDAction(struct rtw_adapter *padapter)
+{
+ u8 btRssiState, btRssiState1;
+
+ if (btdm_NeedToDecBtPwr(padapter))
+ btdm_2AntDecBtPwr(padapter, true);
+ else
+ btdm_2AntDecBtPwr(padapter, false);
+
+ if (BTDM_IsHT40(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("HT40\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ /* fw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+ btdm_2AntPsTdma(padapter, true, 10);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ btdm_2AntPsTdma(padapter, true, 14);
+ }
+
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
+
+ /* fw mechanism */
+ if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
+ (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+ btdm_2AntPsTdma(padapter, true, 10);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
+ btdm_2AntPsTdma(padapter, true, 14);
+ }
+
+ /* sw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ btdm_2AntAgcTable(padapter, true);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ }
+ }
+}
+
+/* HID+A2DP+PAN(EDR) */
+static void btdm_2Ant8723AHIDA2DPPANEDRAction(struct rtw_adapter *padapter)
+{
+ u8 btRssiState, btRssiState1, btInfoExt;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ btInfoExt = pHalData->bt_coexist.halCoex8723.btInfoExt;
+
+ if (btdm_NeedToDecBtPwr(padapter))
+ btdm_2AntDecBtPwr(padapter, true);
+ else
+ btdm_2AntDecBtPwr(padapter, false);
+
+ if (BTDM_IsHT40(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("HT40\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntPsTdma(padapter, true, 12);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntPsTdma(padapter, true, 10);
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntPsTdma(padapter, true, 16);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntPsTdma(padapter, true, 14);
+ }
+ }
+
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
+ btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 37, 0);
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 27, 0);
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntPsTdma(padapter, true, 12);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntPsTdma(padapter, true, 10);
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntPsTdma(padapter, true, 16);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntPsTdma(padapter, true, 14);
+ }
+ }
+
+ /* sw mechanism */
+ if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
+ (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ btdm_2AntAgcTable(padapter, true);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ }
+ }
+}
+
+static void btdm_2Ant8723AHIDA2DPAction(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 btRssiState, btRssiState1, btInfoExt;
+
+ btInfoExt = pHalData->bt_coexist.halCoex8723.btInfoExt;
+
+ if (btdm_NeedToDecBtPwr(padapter))
+ btdm_2AntDecBtPwr(padapter, true);
+ else
+ btdm_2AntDecBtPwr(padapter, false);
+
+ if (BTDM_IsHT40(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("HT40\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, true, false, 3);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, true, false, 1);
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, true, true, 3);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, true, true, 1);
+ }
+ }
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ btRssiState1 = BTDM_CheckCoexRSSIState(padapter, 2, 27, 0);
+
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, true, false, 3);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, true, false, 1);
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
+ RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, true, true, 3);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, true, true, 1);
+ }
+ }
+ if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
+ (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, true);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ }
+ }
+}
+
+static void btdm_2Ant8723AA2dp(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 btRssiState, btRssiState1, btInfoExt;
+
+ btInfoExt = pHalData->bt_coexist.halCoex8723.btInfoExt;
+
+ if (btdm_NeedToDecBtPwr(padapter))
+ btdm_2AntDecBtPwr(padapter, true);
+ else
+ btdm_2AntDecBtPwr(padapter, false);
+ /* coex table */
+ btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
+ btdm_2AntIgnoreWlanAct(padapter, false);
+
+ if (BTDM_IsHT40(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("HT40\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
+ /* fw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+ btdm_2AntTdmaDurationAdjust(padapter, false, false, 1);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, false, true, 1);
+ }
+
+ /* sw mechanism */
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
+ btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 47, 0);
+ btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
+
+ /* fw mechanism */
+ if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
+ (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
+ PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
+ btdm_2AntTdmaDurationAdjust(padapter, false, false, 1);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
+ btdm_2AntTdmaDurationAdjust(padapter, false, true, 1);
+ }
+
+ /* sw mechanism */
+ if ((btRssiState == BT_RSSI_STATE_HIGH) ||
+ (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
+ btdm_2AntAgcTable(padapter, true);
+ btdm_2AntAdcBackOff(padapter, true);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+ }
+ }
+}
+
+/* extern function start with BTDM_ */
+static void BTDM_2AntParaInit(struct rtw_adapter *padapter)
+{
+
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], 2Ant Parameter Init!!\n"));
+
+ /* Enable counter statistics */
+ rtw_write8(padapter, 0x76e, 0x4);
+ rtw_write8(padapter, 0x778, 0x3);
+ rtw_write8(padapter, 0x40, 0x20);
+
+ /* force to reset coex mechanism */
+ pBtdm8723->preVal0x6c0 = 0x0;
+ btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
+
+ pBtdm8723->bPrePsTdmaOn = true;
+ btdm_2AntPsTdma(padapter, false, 0);
+
+ pBtdm8723->preFwDacSwingLvl = 0x10;
+ btdm_2AntFwDacSwingLvl(padapter, 0x20);
+
+ pBtdm8723->bPreDecBtPwr = true;
+ btdm_2AntDecBtPwr(padapter, false);
+
+ pBtdm8723->bPreAgcTableEn = true;
+ btdm_2AntAgcTable(padapter, false);
+
+ pBtdm8723->bPreAdcBackOff = true;
+ btdm_2AntAdcBackOff(padapter, false);
+
+ pBtdm8723->bPreLowPenaltyRa = true;
+ btdm_2AntLowPenaltyRa(padapter, false);
+
+ pBtdm8723->bPreRfRxLpfShrink = true;
+ btdm_2AntRfShrink(padapter, false);
+
+ pBtdm8723->bPreDacSwingOn = true;
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+
+ pBtdm8723->bPreIgnoreWlanAct = true;
+ btdm_2AntIgnoreWlanAct(padapter, false);
+}
+
+static void BTDM_2AntHwCoexAllOff8723A(struct rtw_adapter *padapter)
+{
+ btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
+}
+
+static void BTDM_2AntFwCoexAllOff8723A(struct rtw_adapter *padapter)
+{
+ btdm_2AntIgnoreWlanAct(padapter, false);
+ btdm_2AntPsTdma(padapter, false, 0);
+ btdm_2AntFwDacSwingLvl(padapter, 0x20);
+ btdm_2AntDecBtPwr(padapter, false);
+}
+
+static void BTDM_2AntSwCoexAllOff8723A(struct rtw_adapter *padapter)
+{
+ btdm_2AntAgcTable(padapter, false);
+ btdm_2AntAdcBackOff(padapter, false);
+ btdm_2AntLowPenaltyRa(padapter, false);
+ btdm_2AntRfShrink(padapter, false);
+ btdm_2AntDacSwing(padapter, false, 0xc0);
+}
+
+static void BTDM_2AntFwC2hBtInfo8723A(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+ u8 btInfo = 0;
+ u8 algorithm = BT_2ANT_COEX_ALGO_UNDEFINED;
+ u8 bBtLinkExist = false, bBtHsModeExist = false;
+
+ btInfo = pHalData->bt_coexist.halCoex8723.c2hBtInfoOriginal;
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_IDLE;
+
+ /* check BIT2 first ==> check if bt is under inquiry or page scan */
+ if (btInfo & BIT(2)) {
+ if (!pHalData->bt_coexist.halCoex8723.bC2hBtInquiryPage) {
+ pBtMgnt->ExtConfig.bHoldForBtOperation = true;
+ pBtMgnt->ExtConfig.bHoldPeriodCnt = 1;
+ btdm_2AntBtInquiryPage(padapter);
+ } else {
+ pBtMgnt->ExtConfig.bHoldPeriodCnt++;
+ btdm_HoldForBtInqPage(padapter);
+ }
+ pHalData->bt_coexist.halCoex8723.bC2hBtInquiryPage = true;
+
+ } else {
+ pHalData->bt_coexist.halCoex8723.bC2hBtInquiryPage = false;
+ pBtMgnt->ExtConfig.bHoldForBtOperation = false;
+ pBtMgnt->ExtConfig.bHoldPeriodCnt = 0;
+
+ }
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTC2H], pHalData->bt_coexist.halCoex8723.bC2hBtInquiryPage =%x pBtMgnt->ExtConfig.bHoldPeriodCnt =%x pBtMgnt->ExtConfig.bHoldForBtOperation =%x\n",
+ pHalData->bt_coexist.halCoex8723.bC2hBtInquiryPage,
+ pBtMgnt->ExtConfig.bHoldPeriodCnt,
+ pBtMgnt->ExtConfig.bHoldForBtOperation));
+
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTC2H], btInfo =%x pHalData->bt_coexist.halCoex8723.c2hBtInfoOriginal =%x\n",
+ btInfo, pHalData->bt_coexist.halCoex8723.c2hBtInfoOriginal));
+ if (btInfo&BT_INFO_ACL) {
+ RTPRINT(FBT, BT_TRACE, ("[BTC2H], BTInfo: bConnect = true btInfo =%x\n", btInfo));
+ bBtLinkExist = true;
+ if (((btInfo&(BT_INFO_FTP|BT_INFO_A2DP|BT_INFO_HID|BT_INFO_SCO_BUSY)) != 0) ||
+ pHalData->bt_coexist.halCoex8723.btRetryCnt > 0) {
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_NON_IDLE;
+ } else {
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+
+ if (btInfo&BT_INFO_SCO || btInfo&BT_INFO_SCO_BUSY) {
+ if (btInfo&BT_INFO_FTP || btInfo&BT_INFO_A2DP || btInfo&BT_INFO_HID) {
+ switch (btInfo&0xe0) {
+ case BT_INFO_HID:
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + HID\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID;
+ break;
+ case BT_INFO_A2DP:
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + A2DP\n"));
+ break;
+ case BT_INFO_FTP:
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_SCO;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ break;
+ case (BT_INFO_HID | BT_INFO_A2DP):
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
+ break;
+ case (BT_INFO_HID | BT_INFO_FTP):
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ break;
+ case (BT_INFO_A2DP | BT_INFO_FTP):
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_A2DP;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ break;
+ case (BT_INFO_HID | BT_INFO_A2DP | BT_INFO_FTP):
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ break;
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO only\n"));
+ algorithm = BT_2ANT_COEX_ALGO_SCO;
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], non SCO\n"));
+ switch (btInfo&0xe0) {
+ case BT_INFO_HID:
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID;
+ break;
+ case BT_INFO_A2DP:
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP\n"));
+ algorithm = BT_2ANT_COEX_ALGO_A2DP;
+ break;
+ case BT_INFO_FTP:
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
+ break;
+ case (BT_INFO_HID | BT_INFO_A2DP):
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
+ break;
+ case (BT_INFO_HID|BT_INFO_FTP):
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ break;
+ case (BT_INFO_A2DP|BT_INFO_FTP):
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_A2DP;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ break;
+ case (BT_INFO_HID|BT_INFO_A2DP|BT_INFO_FTP):
+ if (bBtHsModeExist) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+ algorithm = BT_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ break;
+ }
+
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTC2H], BTInfo: bConnect = false\n"));
+ pBtdm8723->btStatus = BT_2ANT_BT_STATUS_IDLE;
+ }
+
+ pBtdm8723->curAlgorithm = algorithm;
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Algorithm = %d \n", pBtdm8723->curAlgorithm));
+
+/* From */
+ BTDM_CheckWiFiState(padapter);
+ if (pBtMgnt->ExtConfig.bManualControl) {
+ RTPRINT(FBT, BT_TRACE, ("Action Manual control, won't execute bt coexist mechanism!!\n"));
+ return;
+ }
+}
+
+void BTDM_2AntBtCoexist8723A(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
+ u8 btInfoOriginal = 0;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
+
+ if (BTDM_BtProfileSupport(padapter)) {
+ if (pBtMgnt->ExtConfig.bHoldForBtOperation) {
+ RTPRINT(FBT, BT_TRACE, ("Action for BT Operation adjust!!\n"));
+ return;
+ }
+ if (pBtMgnt->ExtConfig.bHoldPeriodCnt) {
+ RTPRINT(FBT, BT_TRACE, ("Hold BT inquiry/page scan setting (cnt = %d)!!\n",
+ pBtMgnt->ExtConfig.bHoldPeriodCnt));
+ if (pBtMgnt->ExtConfig.bHoldPeriodCnt >= 11) {
+ pBtMgnt->ExtConfig.bHoldPeriodCnt = 0;
+ /* next time the coexist parameters should be reset again. */
+ } else {
+ pBtMgnt->ExtConfig.bHoldPeriodCnt++;
+ }
+ return;
+ }
+
+ if (pBtDbg->dbgCtrl)
+ RTPRINT(FBT, BT_TRACE, ("[Dbg control], "));
+
+ pBtdm8723->curAlgorithm = btdm_ActionAlgorithm(padapter);
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Algorithm = %d \n", pBtdm8723->curAlgorithm));
+
+ if (btdm_Is2Ant8723ACommonAction(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant common.\n"));
+ pBtdm8723->bResetTdmaAdjust = true;
+ } else {
+ if (pBtdm8723->curAlgorithm != pBtdm8723->preAlgorithm) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], preAlgorithm =%d, curAlgorithm =%d\n",
+ pBtdm8723->preAlgorithm, pBtdm8723->curAlgorithm));
+ pBtdm8723->bResetTdmaAdjust = true;
+ }
+ switch (pBtdm8723->curAlgorithm) {
+ case BT_2ANT_COEX_ALGO_SCO:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = SCO.\n"));
+ btdm_2Ant8723ASCOAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_HID:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HID.\n"));
+ btdm_2Ant8723AHIDAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_A2DP:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = A2DP.\n"));
+ btdm_2Ant8723AA2DPAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_PANEDR:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = PAN(EDR).\n"));
+ btdm_2Ant8723APANEDRAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_PANHS:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HS mode.\n"));
+ btdm_2Ant8723APANHSAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_PANEDR_A2DP:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = PAN+A2DP.\n"));
+ btdm_2Ant8723APANEDRA2DPAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_PANEDR_HID:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = PAN(EDR)+HID.\n"));
+ btdm_2Ant8723APANEDRHIDAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HID+A2DP+PAN.\n"));
+ btdm_2Ant8723AHIDA2DPPANEDRAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_HID_A2DP:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HID+A2DP.\n"));
+ btdm_2Ant8723AHIDA2DPAction(padapter);
+ break;
+ default:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = 0.\n"));
+ btdm_2Ant8723AA2DPAction(padapter);
+ break;
+ }
+ pBtdm8723->preAlgorithm = pBtdm8723->curAlgorithm;
+ }
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex] Get bt info by fw!!\n"));
+ /* msg shows c2h rsp for bt_info is received or not. */
+ if (pHalData->bt_coexist.halCoex8723.bC2hBtInfoReqSent)
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex] c2h for btInfo not rcvd yet!!\n"));
+
+ btInfoOriginal = pHalData->bt_coexist.halCoex8723.c2hBtInfoOriginal;
+
+ if (pBtMgnt->ExtConfig.bHoldForBtOperation) {
+ RTPRINT(FBT, BT_TRACE, ("Action for BT Operation adjust!!\n"));
+ return;
+ }
+ if (pBtMgnt->ExtConfig.bHoldPeriodCnt) {
+ RTPRINT(FBT, BT_TRACE,
+ ("Hold BT inquiry/page scan setting (cnt = %d)!!\n",
+ pBtMgnt->ExtConfig.bHoldPeriodCnt));
+ if (pBtMgnt->ExtConfig.bHoldPeriodCnt >= 11) {
+ pBtMgnt->ExtConfig.bHoldPeriodCnt = 0;
+ /* next time the coexist parameters should be reset again. */
+ } else {
+ pBtMgnt->ExtConfig.bHoldPeriodCnt++;
+ }
+ return;
+ }
+
+ if (pBtDbg->dbgCtrl)
+ RTPRINT(FBT, BT_TRACE, ("[Dbg control], "));
+ if (btdm_Is2Ant8723ACommonAction(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant common.\n"));
+ pBtdm8723->bResetTdmaAdjust = true;
+ } else {
+ if (pBtdm8723->curAlgorithm != pBtdm8723->preAlgorithm) {
+ RTPRINT(FBT, BT_TRACE,
+ ("[BTCoex], preAlgorithm =%d, curAlgorithm =%d\n",
+ pBtdm8723->preAlgorithm,
+ pBtdm8723->curAlgorithm));
+ pBtdm8723->bResetTdmaAdjust = true;
+ }
+ switch (pBtdm8723->curAlgorithm) {
+ case BT_2ANT_COEX_ALGO_SCO:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = SCO.\n"));
+ btdm_2Ant8723ASCOAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_HID:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HID.\n"));
+ btdm_2Ant8723AHIDAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_A2DP:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = A2DP.\n"));
+ btdm_2Ant8723AA2dp(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_PANEDR:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = PAN(EDR).\n"));
+ btdm_2Ant8723APANEDRAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_PANHS:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HS mode.\n"));
+ btdm_2Ant8723APANHSAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_PANEDR_A2DP:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = PAN+A2DP.\n"));
+ btdm_2Ant8723APANEDRA2DPAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_PANEDR_HID:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = PAN(EDR)+HID.\n"));
+ btdm_2Ant8723APANEDRHIDAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HID+A2DP+PAN.\n"));
+ btdm_2Ant8723AHIDA2DPPANEDRAction(padapter);
+ break;
+ case BT_2ANT_COEX_ALGO_HID_A2DP:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HID+A2DP.\n"));
+ btdm_2Ant8723AHIDA2DPAction(padapter);
+ break;
+ default:
+ RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = 0.\n"));
+ btdm_2Ant8723AA2DPAction(padapter);
+ break;
+ }
+ pBtdm8723->preAlgorithm = pBtdm8723->curAlgorithm;
+ }
+ }
+}
+
+/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc87232Ant.c ===== */
+#endif
+
+#ifdef __HALBTC8723_C__ /* HAL/BTCoexist/HalBtc8723.c */
+/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc8723.c ===== */
+
+static u8 btCoexDbgBuf[BT_TMP_BUF_SIZE];
+
+static const char *const BtProfileString[] = {
+ "NONE",
+ "A2DP",
+ "PAN",
+ "HID",
+ "SCO",
+};
+
+static const char *const BtSpecString[] = {
+ "1.0b",
+ "1.1",
+ "1.2",
+ "2.0+EDR",
+ "2.1+EDR",
+ "3.0+HS",
+ "4.0",
+};
+
+static const char *const BtLinkRoleString[] = {
+ "Master",
+ "Slave",
+};
+
+static u8 btdm_BtWifiAntNum(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct bt_coexist_8723a *pBtCoex = &pHalData->bt_coexist.halCoex8723;
+
+ if (Ant_x2 == pHalData->bt_coexist.BT_Ant_Num) {
+ if (Ant_x2 == pBtCoex->TotalAntNum)
+ return Ant_x2;
+ else
+ return Ant_x1;
+ } else {
+ return Ant_x1;
+ }
+ return Ant_x2;
+}
+
+static void btdm_BtHwCountersMonitor(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u32 regHPTxRx, regLPTxRx, u4Tmp;
+ u32 regHPTx = 0, regHPRx = 0, regLPTx = 0, regLPRx = 0;
+
+ regHPTxRx = REG_HIGH_PRIORITY_TXRX;
+ regLPTxRx = REG_LOW_PRIORITY_TXRX;
+
+ u4Tmp = rtw_read32(padapter, regHPTxRx);
+ regHPTx = u4Tmp & bMaskLWord;
+ regHPRx = (u4Tmp & bMaskHWord)>>16;
+
+ u4Tmp = rtw_read32(padapter, regLPTxRx);
+ regLPTx = u4Tmp & bMaskLWord;
+ regLPRx = (u4Tmp & bMaskHWord)>>16;
+
+ pHalData->bt_coexist.halCoex8723.highPriorityTx = regHPTx;
+ pHalData->bt_coexist.halCoex8723.highPriorityRx = regHPRx;
+ pHalData->bt_coexist.halCoex8723.lowPriorityTx = regLPTx;
+ pHalData->bt_coexist.halCoex8723.lowPriorityRx = regLPRx;
+
+ RTPRINT(FBT, BT_TRACE, ("High Priority Tx/Rx = %d / %d\n", regHPTx, regHPRx));
+ RTPRINT(FBT, BT_TRACE, ("Low Priority Tx/Rx = %d / %d\n", regLPTx, regLPRx));
+
+ /* reset counter */
+ rtw_write8(padapter, 0x76e, 0xc);
+}
+
+/* This function check if 8723 bt is disabled */
+static void btdm_BtEnableDisableCheck8723A(struct rtw_adapter *padapter)
+{
+ u8 btAlife = true;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+#ifdef CHECK_BT_EXIST_FROM_REG
+ u8 val8;
+
+ /* ox68[28]= 1 => BT enable; otherwise disable */
+ val8 = rtw_read8(padapter, 0x6B);
+ if (!(val8 & BIT(4)))
+ btAlife = false;
+
+ if (btAlife)
+ pHalData->bt_coexist.bCurBtDisabled = false;
+ else
+ pHalData->bt_coexist.bCurBtDisabled = true;
+#else
+ if (pHalData->bt_coexist.halCoex8723.highPriorityTx == 0 &&
+ pHalData->bt_coexist.halCoex8723.highPriorityRx == 0 &&
+ pHalData->bt_coexist.halCoex8723.lowPriorityTx == 0 &&
+ pHalData->bt_coexist.halCoex8723.lowPriorityRx == 0)
+ btAlife = false;
+ if (pHalData->bt_coexist.halCoex8723.highPriorityTx == 0xeaea &&
+ pHalData->bt_coexist.halCoex8723.highPriorityRx == 0xeaea &&
+ pHalData->bt_coexist.halCoex8723.lowPriorityTx == 0xeaea &&
+ pHalData->bt_coexist.halCoex8723.lowPriorityRx == 0xeaea)
+ btAlife = false;
+ if (pHalData->bt_coexist.halCoex8723.highPriorityTx == 0xffff &&
+ pHalData->bt_coexist.halCoex8723.highPriorityRx == 0xffff &&
+ pHalData->bt_coexist.halCoex8723.lowPriorityTx == 0xffff &&
+ pHalData->bt_coexist.halCoex8723.lowPriorityRx == 0xffff)
+ btAlife = false;
+ if (btAlife) {
+ pHalData->bt_coexist.btActiveZeroCnt = 0;
+ pHalData->bt_coexist.bCurBtDisabled = false;
+ RTPRINT(FBT, BT_TRACE, ("8723A BT is enabled !!\n"));
+ } else {
+ pHalData->bt_coexist.btActiveZeroCnt++;
+ RTPRINT(FBT, BT_TRACE, ("8723A bt all counters = 0, %d times!!\n",
+ pHalData->bt_coexist.btActiveZeroCnt));
+ if (pHalData->bt_coexist.btActiveZeroCnt >= 2) {
+ pHalData->bt_coexist.bCurBtDisabled = true;
+ RTPRINT(FBT, BT_TRACE, ("8723A BT is disabled !!\n"));
+ }
+ }
+#endif
+
+ if (!pHalData->bt_coexist.bCurBtDisabled) {
+ if (BTDM_IsWifiConnectionExist(padapter))
+ BTDM_SetFwChnlInfo(padapter, RT_MEDIA_CONNECT);
+ else
+ BTDM_SetFwChnlInfo(padapter, RT_MEDIA_DISCONNECT);
+ }
+
+ if (pHalData->bt_coexist.bPreBtDisabled !=
+ pHalData->bt_coexist.bCurBtDisabled) {
+ RTPRINT(FBT, BT_TRACE, ("8723A BT is from %s to %s!!\n",
+ (pHalData->bt_coexist.bPreBtDisabled ? "disabled":"enabled"),
+ (pHalData->bt_coexist.bCurBtDisabled ? "disabled":"enabled")));
+ pHalData->bt_coexist.bPreBtDisabled = pHalData->bt_coexist.bCurBtDisabled;
+ }
+}
+
+static void btdm_BTCoexist8723AHandler(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ if (btdm_BtWifiAntNum(padapter) == Ant_x2) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], 2 Ant mechanism\n"));
+ BTDM_2AntBtCoexist8723A(padapter);
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], 1 Ant mechanism\n"));
+ BTDM_1AntBtCoexist8723A(padapter);
+ }
+
+ if (!BTDM_IsSameCoexistState(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Coexist State[bitMap] change from 0x%"i64fmt"x to 0x%"i64fmt"x\n",
+ pHalData->bt_coexist.PreviousState,
+ pHalData->bt_coexist.CurrentState));
+ pHalData->bt_coexist.PreviousState = pHalData->bt_coexist.CurrentState;
+
+ RTPRINT(FBT, BT_TRACE, ("["));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_BT30)
+ RTPRINT(FBT, BT_TRACE, ("BT 3.0, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_HT20)
+ RTPRINT(FBT, BT_TRACE, ("HT20, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_HT40)
+ RTPRINT(FBT, BT_TRACE, ("HT40, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_LEGACY)
+ RTPRINT(FBT, BT_TRACE, ("Legacy, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_RSSI_LOW)
+ RTPRINT(FBT, BT_TRACE, ("Rssi_Low, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_RSSI_MEDIUM)
+ RTPRINT(FBT, BT_TRACE, ("Rssi_Mid, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_RSSI_HIGH)
+ RTPRINT(FBT, BT_TRACE, ("Rssi_High, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_IDLE)
+ RTPRINT(FBT, BT_TRACE, ("Wifi_Idle, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_UPLINK)
+ RTPRINT(FBT, BT_TRACE, ("Wifi_Uplink, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_DOWNLINK)
+ RTPRINT(FBT, BT_TRACE, ("Wifi_Downlink, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_BT_IDLE)
+ RTPRINT(FBT, BT_TRACE, ("BT_idle, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_PROFILE_HID)
+ RTPRINT(FBT, BT_TRACE, ("PRO_HID, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_PROFILE_A2DP)
+ RTPRINT(FBT, BT_TRACE, ("PRO_A2DP, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_PROFILE_PAN)
+ RTPRINT(FBT, BT_TRACE, ("PRO_PAN, "));
+ if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_PROFILE_SCO)
+ RTPRINT(FBT, BT_TRACE, ("PRO_SCO, "));
+ RTPRINT(FBT, BT_TRACE, ("]\n"));
+ }
+}
+
+/* extern function start with BTDM_ */
+u32 BTDM_BtTxRxCounterH(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u32 counters = 0;
+
+ counters = pHalData->bt_coexist.halCoex8723.highPriorityTx+
+ pHalData->bt_coexist.halCoex8723.highPriorityRx;
+ return counters;
+}
+
+u32 BTDM_BtTxRxCounterL(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u32 counters = 0;
+
+ counters = pHalData->bt_coexist.halCoex8723.lowPriorityTx+
+ pHalData->bt_coexist.halCoex8723.lowPriorityRx ;
+ return counters;
+}
+
+void BTDM_SetFwChnlInfo(struct rtw_adapter *padapter, enum rt_media_status mstatus)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ u8 H2C_Parameter[3] = {0};
+ u8 chnl;
+
+ /* opMode */
+ if (RT_MEDIA_CONNECT == mstatus)
+ H2C_Parameter[0] = 0x1; /* 0: disconnected, 1:connected */
+
+ if (check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE)) {
+ /* channel */
+ chnl = pmlmeext->cur_channel;
+ if (BTDM_IsHT40(padapter)) {
+ if (pmlmeext->cur_ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
+ chnl -= 2;
+ else if (pmlmeext->cur_ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
+ chnl += 2;
+ }
+ H2C_Parameter[1] = chnl;
+ } else { /* check if HS link is exists */
+ /* channel */
+ if (BT_Operation(padapter))
+ H2C_Parameter[1] = pBtMgnt->BTChannel;
+ else
+ H2C_Parameter[1] = pmlmeext->cur_channel;
+ }
+
+ if (BTDM_IsHT40(padapter))
+ H2C_Parameter[2] = 0x30;
+ else
+ H2C_Parameter[2] = 0x20;
+
+ FillH2CCmd(padapter, 0x19, 3, H2C_Parameter);
+}
+
+u8 BTDM_IsWifiConnectionExist(struct rtw_adapter *padapter)
+{
+ u8 bRet = false;
+
+ if (BTHCI_HsConnectionEstablished(padapter))
+ bRet = true;
+
+ if (check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE) == true)
+ bRet = true;
+
+ return bRet;
+}
+
+void BTDM_SetFw3a(
+ struct rtw_adapter *padapter,
+ u8 byte1,
+ u8 byte2,
+ u8 byte3,
+ u8 byte4,
+ u8 byte5
+ )
+{
+ u8 H2C_Parameter[5] = {0};
+
+ if (BTDM_1Ant8723A(padapter)) {
+ if ((!check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE)) &&
+ (get_fwstate(&padapter->mlmepriv) != WIFI_NULL_STATE)) {
+ /* for softap mode */
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct bt_coexist_8723a *pBtCoex = &pHalData->bt_coexist.halCoex8723;
+ u8 BtState = pBtCoex->c2hBtInfo;
+
+ if ((BtState != BT_INFO_STATE_NO_CONNECTION) &&
+ (BtState != BT_INFO_STATE_CONNECT_IDLE)) {
+ if (byte1 & BIT(4)) {
+ byte1 &= ~BIT(4);
+ byte1 |= BIT(5);
+ }
+
+ byte5 |= BIT(5);
+ if (byte5 & BIT(6))
+ byte5 &= ~BIT(6);
+ }
+ }
+ }
+
+ H2C_Parameter[0] = byte1;
+ H2C_Parameter[1] = byte2;
+ H2C_Parameter[2] = byte3;
+ H2C_Parameter[3] = byte4;
+ H2C_Parameter[4] = byte5;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], FW write 0x3a(5bytes) = 0x%02x%08x\n",
+ H2C_Parameter[0],
+ H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
+
+ FillH2CCmd(padapter, 0x3a, 5, H2C_Parameter);
+}
+
+void BTDM_QueryBtInformation(struct rtw_adapter *padapter)
+{
+ u8 H2C_Parameter[1] = {0};
+ struct hal_data_8723a *pHalData;
+ struct bt_coexist_8723a *pBtCoex;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBtCoex = &pHalData->bt_coexist.halCoex8723;
+
+ if (BT_IsBtDisabled(padapter)) {
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_DISABLED;
+ pBtCoex->bC2hBtInfoReqSent = false;
+ return;
+ }
+
+ if (pBtCoex->c2hBtInfo == BT_INFO_STATE_DISABLED)
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_NO_CONNECTION;
+
+ if (pBtCoex->bC2hBtInfoReqSent == true)
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], didn't recv previous BtInfo report!\n"));
+ else
+ pBtCoex->bC2hBtInfoReqSent = true;
+
+ H2C_Parameter[0] |= BIT(0); /* trigger */
+
+/*RTPRINT(FBT, BT_TRACE, ("[BTCoex], Query Bt information, write 0x38 = 0x%x\n", */
+/*H2C_Parameter[0])); */
+
+ FillH2CCmd(padapter, 0x38, 1, H2C_Parameter);
+}
+
+void BTDM_SetSwRfRxLpfCorner(struct rtw_adapter *padapter, u8 type)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (BT_RF_RX_LPF_CORNER_SHRINK == type) {
+ /* Shrink RF Rx LPF corner */
+ RTPRINT(FBT, BT_TRACE, ("Shrink RF Rx LPF corner!!\n"));
+ PHY_SetRFReg(padapter, PathA, 0x1e, bRFRegOffsetMask, 0xf0ff7);
+ pHalData->bt_coexist.bSWCoexistAllOff = false;
+ } else if (BT_RF_RX_LPF_CORNER_RESUME == type) {
+ /* Resume RF Rx LPF corner */
+ RTPRINT(FBT, BT_TRACE, ("Resume RF Rx LPF corner!!\n"));
+ PHY_SetRFReg(padapter, PathA, 0x1e, bRFRegOffsetMask, pHalData->bt_coexist.BtRfRegOrigin1E);
+ }
+}
+
+void
+BTDM_SetSwPenaltyTxRateAdaptive(
+ struct rtw_adapter *padapter,
+ u8 raType
+ )
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 tmpU1;
+
+ tmpU1 = rtw_read8(padapter, 0x4fd);
+ tmpU1 |= BIT(0);
+ if (BT_TX_RATE_ADAPTIVE_LOW_PENALTY == raType) {
+ tmpU1 &= ~BIT(2);
+ pHalData->bt_coexist.bSWCoexistAllOff = false;
+ } else if (BT_TX_RATE_ADAPTIVE_NORMAL == raType) {
+ tmpU1 |= BIT(2);
+ }
+
+ rtw_write8(padapter, 0x4fd, tmpU1);
+}
+
+void BTDM_SetFwDecBtPwr(struct rtw_adapter *padapter, u8 bDecBtPwr)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 H2C_Parameter[1] = {0};
+
+ H2C_Parameter[0] = 0;
+
+ if (bDecBtPwr) {
+ H2C_Parameter[0] |= BIT(1);
+ pHalData->bt_coexist.bFWCoexistAllOff = false;
+ }
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], decrease Bt Power : %s, write 0x21 = 0x%x\n",
+ (bDecBtPwr ? "Yes!!" : "No!!"), H2C_Parameter[0]));
+
+ FillH2CCmd(padapter, 0x21, 1, H2C_Parameter);
+}
+
+u8 BTDM_BtProfileSupport(struct rtw_adapter *padapter)
+{
+ u8 bRet = false;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (pBtMgnt->bSupportProfile &&
+ !pHalData->bt_coexist.halCoex8723.bForceFwBtInfo)
+ bRet = true;
+
+ return bRet;
+}
+
+static void BTDM_AdjustForBtOperation8723A(struct rtw_adapter *padapter)
+{
+ /* BTDM_2AntAdjustForBtOperation8723(padapter); */
+}
+
+static void BTDM_FwC2hBtRssi8723A(struct rtw_adapter *padapter, u8 *tmpBuf)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 percent = 0, u1tmp = 0;
+
+ u1tmp = tmpBuf[0];
+ percent = u1tmp*2+10;
+
+ pHalData->bt_coexist.halCoex8723.btRssi = percent;
+/*RTPRINT(FBT, BT_TRACE, ("[BTC2H], BT RSSI =%d\n", percent)); */
+}
+
+static void
+BTDM_FwC2hBtInfo8723A(struct rtw_adapter *padapter, u8 *tmpBuf, u8 length)
+{
+ struct hal_data_8723a *pHalData;
+ struct bt_30info *pBTInfo;
+ struct bt_mgnt *pBtMgnt;
+ struct bt_coexist_8723a *pBtCoex;
+ u8 i;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+ pBtCoex = &pHalData->bt_coexist.halCoex8723;
+
+ pBtCoex->bC2hBtInfoReqSent = false;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTC2H], BT info[%d]=[", length));
+
+ pBtCoex->btRetryCnt = 0;
+ for (i = 0; i < length; i++) {
+ switch (i) {
+ case 0:
+ pBtCoex->c2hBtInfoOriginal = tmpBuf[i];
+ break;
+ case 1:
+ pBtCoex->btRetryCnt = tmpBuf[i];
+ break;
+ case 2:
+ BTDM_FwC2hBtRssi8723A(padapter, &tmpBuf[i]);
+ break;
+ case 3:
+ pBtCoex->btInfoExt = tmpBuf[i]&BIT(0);
+ break;
+ }
+
+ if (i == length-1)
+ RTPRINT(FBT, BT_TRACE, ("0x%02x]\n", tmpBuf[i]));
+ else
+ RTPRINT(FBT, BT_TRACE, ("0x%02x, ", tmpBuf[i]));
+ }
+ RTPRINT(FBT, BT_TRACE, ("[BTC2H], BT RSSI =%d\n", pBtCoex->btRssi));
+ if (pBtCoex->btInfoExt)
+ RTPRINT(FBT, BT_TRACE, ("[BTC2H], pBtCoex->btInfoExt =%x\n", pBtCoex->btInfoExt));
+
+ if (btdm_BtWifiAntNum(padapter) == Ant_x1)
+ BTDM_1AntFwC2hBtInfo8723A(padapter);
+ else
+ BTDM_2AntFwC2hBtInfo8723A(padapter);
+
+ if (pBtMgnt->ExtConfig.bManualControl) {
+ RTPRINT(FBT, BT_TRACE, ("%s: Action Manual control!!\n", __func__));
+ return;
+ }
+
+ btdm_BTCoexist8723AHandler(padapter);
+}
+
+static void BTDM_Display8723ABtCoexInfo(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct bt_coexist_8723a *pBtCoex = &pHalData->bt_coexist.halCoex8723;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ u8 u1Tmp, u1Tmp1, u1Tmp2, i, btInfoExt, psTdmaCase = 0;
+ u32 u4Tmp[4];
+ u8 antNum = Ant_x2;
+
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+ DCMD_Printf(btCoexDbgBuf);
+
+ if (!pHalData->bt_coexist.BluetoothCoexist) {
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ DCMD_Printf(btCoexDbgBuf);
+ return;
+ }
+
+ antNum = btdm_BtWifiAntNum(padapter);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/%d ", "Ant mechanism PG/Now run :", \
+ ((pHalData->bt_coexist.BT_Ant_Num == Ant_x2) ? 2 : 1), ((antNum == Ant_x2) ? 2 : 1));
+ DCMD_Printf(btCoexDbgBuf);
+
+ if (pBtMgnt->ExtConfig.bManualControl) {
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "[Action Manual control]!!");
+ DCMD_Printf(btCoexDbgBuf);
+ } else {
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+ ((pBtMgnt->bSupportProfile) ? "Yes" : "No"), pBtMgnt->ExtConfig.HCIExtensionVer);
+ DCMD_Printf(btCoexDbgBuf);
+ }
+
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\n %-35s = / %d", "Dot11 channel / BT channel", \
+ pBtMgnt->BTChannel);
+ DCMD_Printf(btCoexDbgBuf);
+
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\n %-35s = %d / %d / %d", "Wifi/BT/HS rssi", \
+ BTDM_GetRxSS(padapter),
+ pHalData->bt_coexist.halCoex8723.btRssi,
+ pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB);
+ DCMD_Printf(btCoexDbgBuf);
+
+ if (!pBtMgnt->ExtConfig.bManualControl) {
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\n %-35s = %s / %s ", "WIfi status",
+ ((BTDM_Legacy(padapter)) ? "Legacy" : (((BTDM_IsHT40(padapter)) ? "HT40" : "HT20"))),
+ ((!BTDM_IsWifiBusy(padapter)) ? "idle" : ((BTDM_IsWifiUplink(padapter)) ? "uplink" : "downlink")));
+ DCMD_Printf(btCoexDbgBuf);
+
+ if (pBtMgnt->bSupportProfile) {
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+ ((BTHCI_CheckProfileExist(padapter, BT_PROFILE_SCO)) ? 1 : 0),
+ ((BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID)) ? 1 : 0),
+ ((BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN)) ? 1 : 0),
+ ((BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) ? 1 : 0));
+ DCMD_Printf(btCoexDbgBuf);
+
+ for (i = 0; i < pBtMgnt->ExtConfig.NumberOfHandle; i++) {
+ if (pBtMgnt->ExtConfig.HCIExtensionVer >= 1) {
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/ %s/ %s", "Bt link type/spec/role",
+ BtProfileString[pBtMgnt->ExtConfig.linkInfo[i].BTProfile],
+ BtSpecString[pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec],
+ BtLinkRoleString[pBtMgnt->ExtConfig.linkInfo[i].linkRole]);
+ DCMD_Printf(btCoexDbgBuf);
+
+ btInfoExt = pHalData->bt_coexist.halCoex8723.btInfoExt;
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "A2DP rate", \
+ (btInfoExt&BIT0) ? "Basic rate" : "EDR rate");
+ DCMD_Printf(btCoexDbgBuf);
+ } else {
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/ %s", "Bt link type/spec", \
+ BtProfileString[pBtMgnt->ExtConfig.linkInfo[i].BTProfile],
+ BtSpecString[pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec]);
+ DCMD_Printf(btCoexDbgBuf);
+ }
+ }
+ }
+ }
+
+ /* Sw mechanism */
+ if (!pBtMgnt->ExtConfig.bManualControl) {
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw BT Coex mechanism]============");
+ DCMD_Printf(btCoexDbgBuf);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "AGC Table", \
+ pBtCoex->btdm2Ant.bCurAgcTableEn);
+ DCMD_Printf(btCoexDbgBuf);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "ADC Backoff", \
+ pBtCoex->btdm2Ant.bCurAdcBackOff);
+ DCMD_Printf(btCoexDbgBuf);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "Low penalty RA", \
+ pBtCoex->btdm2Ant.bCurLowPenaltyRa);
+ DCMD_Printf(btCoexDbgBuf);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "RF Rx LPF Shrink", \
+ pBtCoex->btdm2Ant.bCurRfRxLpfShrink);
+ DCMD_Printf(btCoexDbgBuf);
+ }
+ u4Tmp[0] = PHY_QueryRFReg(padapter, PathA, 0x1e, 0xff0);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "RF-A, 0x1e[11:4]/original val", \
+ u4Tmp[0], pHalData->bt_coexist.BtRfRegOrigin1E);
+ DCMD_Printf(btCoexDbgBuf);
+
+ /* Fw mechanism */
+ if (!pBtMgnt->ExtConfig.bManualControl) {
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw BT Coex mechanism]============");
+ DCMD_Printf(btCoexDbgBuf);
+ }
+ if (!pBtMgnt->ExtConfig.bManualControl) {
+ if (btdm_BtWifiAntNum(padapter) == Ant_x1)
+ psTdmaCase = pHalData->bt_coexist.halCoex8723.btdm1Ant.curPsTdma;
+ else
+ psTdmaCase = pHalData->bt_coexist.halCoex8723.btdm2Ant.curPsTdma;
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA(0x3a)", \
+ pHalData->bt_coexist.fw3aVal[0], pHalData->bt_coexist.fw3aVal[1],
+ pHalData->bt_coexist.fw3aVal[2], pHalData->bt_coexist.fw3aVal[3],
+ pHalData->bt_coexist.fw3aVal[4], psTdmaCase);
+ DCMD_Printf(btCoexDbgBuf);
+
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "Decrease Bt Power", \
+ pBtCoex->btdm2Ant.bCurDecBtPwr);
+ DCMD_Printf(btCoexDbgBuf);
+ }
+ u1Tmp = rtw_read8(padapter, 0x778);
+ u1Tmp1 = rtw_read8(padapter, 0x783);
+ u1Tmp2 = rtw_read8(padapter, 0x796);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/ 0x783/ 0x796", \
+ u1Tmp, u1Tmp1, u1Tmp2);
+ DCMD_Printf(btCoexDbgBuf);
+
+ if (!pBtMgnt->ExtConfig.bManualControl) {
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x / 0x%x", "Sw DacSwing Ctrl/Val", \
+ pBtCoex->btdm2Ant.bCurDacSwingOn, pBtCoex->btdm2Ant.curDacSwingLvl);
+ DCMD_Printf(btCoexDbgBuf);
+ }
+ u4Tmp[0] = rtw_read32(padapter, 0x880);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x880", \
+ u4Tmp[0]);
+ DCMD_Printf(btCoexDbgBuf);
+
+ /* Hw mechanism */
+ if (!pBtMgnt->ExtConfig.bManualControl) {
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw BT Coex mechanism]============");
+ DCMD_Printf(btCoexDbgBuf);
+ }
+
+ u1Tmp = rtw_read8(padapter, 0x40);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x40", \
+ u1Tmp);
+ DCMD_Printf(btCoexDbgBuf);
+
+ u4Tmp[0] = rtw_read32(padapter, 0x550);
+ u1Tmp = rtw_read8(padapter, 0x522);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x", "0x550(bcn contrl)/0x522", \
+ u4Tmp[0], u1Tmp);
+ DCMD_Printf(btCoexDbgBuf);
+
+ u4Tmp[0] = rtw_read32(padapter, 0x484);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x484(rate adaptive)", \
+ u4Tmp[0]);
+ DCMD_Printf(btCoexDbgBuf);
+
+ u4Tmp[0] = rtw_read32(padapter, 0x50);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+ u4Tmp[0]);
+ DCMD_Printf(btCoexDbgBuf);
+
+ u4Tmp[0] = rtw_read32(padapter, 0xda0);
+ u4Tmp[1] = rtw_read32(padapter, 0xda4);
+ u4Tmp[2] = rtw_read32(padapter, 0xda8);
+ u4Tmp[3] = rtw_read32(padapter, 0xdac);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0xda0/0xda4/0xda8/0xdac(FA cnt)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2], u4Tmp[3]);
+ DCMD_Printf(btCoexDbgBuf);
+
+ u4Tmp[0] = rtw_read32(padapter, 0x6c0);
+ u4Tmp[1] = rtw_read32(padapter, 0x6c4);
+ u4Tmp[2] = rtw_read32(padapter, 0x6c8);
+ u1Tmp = rtw_read8(padapter, 0x6cc);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp);
+ DCMD_Printf(btCoexDbgBuf);
+
+ /* u4Tmp = rtw_read32(padapter, 0x770); */
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d", "0x770(Hi pri Rx[31:16]/Tx[15:0])", \
+ pHalData->bt_coexist.halCoex8723.highPriorityRx,
+ pHalData->bt_coexist.halCoex8723.highPriorityTx);
+ DCMD_Printf(btCoexDbgBuf);
+ /* u4Tmp = rtw_read32(padapter, 0x774); */
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d", "0x774(Lo pri Rx[31:16]/Tx[15:0])", \
+ pHalData->bt_coexist.halCoex8723.lowPriorityRx,
+ pHalData->bt_coexist.halCoex8723.lowPriorityTx);
+ DCMD_Printf(btCoexDbgBuf);
+
+ /* Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang */
+ u1Tmp = rtw_read8(padapter, 0x41b);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x41b (hang chk == 0xf)", \
+ u1Tmp);
+ DCMD_Printf(btCoexDbgBuf);
+ rsprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "lastHMEBoxNum", \
+ pHalData->LastHMEBoxNum);
+ DCMD_Printf(btCoexDbgBuf);
+}
+
+static void
+BTDM_8723ASignalCompensation(struct rtw_adapter *padapter,
+ u8 *rssi_wifi, u8 *rssi_bt)
+{
+ if (btdm_BtWifiAntNum(padapter) == Ant_x1)
+ BTDM_1AntSignalCompensation(padapter, rssi_wifi, rssi_bt);
+}
+
+static void BTDM_8723AInit(struct rtw_adapter *padapter)
+{
+ if (btdm_BtWifiAntNum(padapter) == Ant_x2)
+ BTDM_2AntParaInit(padapter);
+ else
+ BTDM_1AntParaInit(padapter);
+}
+
+static void BTDM_HWCoexAllOff8723A(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pBtMgnt->ExtConfig.bManualControl)
+ return;
+
+ if (btdm_BtWifiAntNum(padapter) == Ant_x2)
+ BTDM_2AntHwCoexAllOff8723A(padapter);
+}
+
+static void BTDM_FWCoexAllOff8723A(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pBtMgnt->ExtConfig.bManualControl)
+ return;
+
+ if (btdm_BtWifiAntNum(padapter) == Ant_x2)
+ BTDM_2AntFwCoexAllOff8723A(padapter);
+}
+
+static void BTDM_SWCoexAllOff8723A(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pBtMgnt->ExtConfig.bManualControl)
+ return;
+
+ if (btdm_BtWifiAntNum(padapter) == Ant_x2)
+ BTDM_2AntSwCoexAllOff8723A(padapter);
+}
+
+static void
+BTDM_Set8723ABtCoexCurrAntNum(struct rtw_adapter *padapter, u8 antNum)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct bt_coexist_8723a *pBtCoex = &pHalData->bt_coexist.halCoex8723;
+
+ if (antNum == 1)
+ pBtCoex->TotalAntNum = Ant_x1;
+ else if (antNum == 2)
+ pBtCoex->TotalAntNum = Ant_x2;
+}
+
+void BTDM_LpsLeave(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pBtMgnt->ExtConfig.bManualControl)
+ return;
+
+ if (btdm_BtWifiAntNum(padapter) == Ant_x1)
+ BTDM_1AntLpsLeave(padapter);
+}
+
+static void BTDM_ForHalt8723A(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pBtMgnt->ExtConfig.bManualControl)
+ return;
+
+ if (btdm_BtWifiAntNum(padapter) == Ant_x1)
+ BTDM_1AntForHalt(padapter);
+}
+
+static void BTDM_WifiScanNotify8723A(struct rtw_adapter *padapter, u8 scanType)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pBtMgnt->ExtConfig.bManualControl)
+ return;
+
+ if (btdm_BtWifiAntNum(padapter) == Ant_x1)
+ BTDM_1AntWifiScanNotify(padapter, scanType);
+}
+
+static void
+BTDM_WifiAssociateNotify8723A(struct rtw_adapter *padapter, u8 action)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pBtMgnt->ExtConfig.bManualControl)
+ return;
+
+ if (btdm_BtWifiAntNum(padapter) == Ant_x1)
+ BTDM_1AntWifiAssociateNotify(padapter, action);
+}
+
+static void
+BTDM_MediaStatusNotify8723A(struct rtw_adapter *padapter,
+ enum rt_media_status mstatus)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], MediaStatusNotify, %s\n",
+ mstatus?"connect":"disconnect"));
+
+ BTDM_SetFwChnlInfo(padapter, mstatus);
+
+ if (pBtMgnt->ExtConfig.bManualControl)
+ return;
+
+ if (btdm_BtWifiAntNum(padapter) == Ant_x1)
+ BTDM_1AntMediaStatusNotify(padapter, mstatus);
+}
+
+static void BTDM_ForDhcp8723A(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pBtMgnt->ExtConfig.bManualControl)
+ return;
+
+ if (btdm_BtWifiAntNum(padapter) == Ant_x1)
+ BTDM_1AntForDhcp(padapter);
+}
+
+u8 BTDM_1Ant8723A(struct rtw_adapter *padapter)
+{
+ if (btdm_BtWifiAntNum(padapter) == Ant_x1)
+ return true;
+ else
+ return false;
+}
+
+static void BTDM_BTCoexist8723A(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct bt_30info *pBTInfo;
+ struct bt_mgnt *pBtMgnt;
+ struct bt_coexist_8723a *pBtCoex;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+ pBtCoex = &pHalData->bt_coexist.halCoex8723;
+
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], beacon RSSI = 0x%x(%d)\n",
+ pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB,
+ pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB));
+
+ btdm_BtHwCountersMonitor(padapter);
+ btdm_BtEnableDisableCheck8723A(padapter);
+
+ if (pBtMgnt->ExtConfig.bManualControl) {
+ RTPRINT(FBT, BT_TRACE, ("%s: Action Manual control!!\n", __func__));
+ return;
+ }
+
+ if (pBtCoex->bC2hBtInfoReqSent) {
+ if (BT_IsBtDisabled(padapter)) {
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_DISABLED;
+ } else {
+ if (pBtCoex->c2hBtInfo == BT_INFO_STATE_DISABLED)
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_NO_CONNECTION;
+ }
+
+ btdm_BTCoexist8723AHandler(padapter);
+ } else if (BT_IsBtDisabled(padapter) == true) {
+ pBtCoex->c2hBtInfo = BT_INFO_STATE_DISABLED;
+ btdm_BTCoexist8723AHandler(padapter);
+ }
+
+ BTDM_QueryBtInformation(padapter);
+}
+
+/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc8723.c ===== */
+#endif
+
+#ifdef __HALBTCCSR1ANT_C__ /* HAL/BTCoexist/HalBtcCsr1Ant.c */
+/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtcCsr1Ant.c ===== */
+
+/* local function start with btdm_ */
+/* extern function start with BTDM_ */
+
+static void BTDM_SetAntenna(struct rtw_adapter *padapter, u8 who)
+{
+}
+
+void
+BTDM_SingleAnt(
+ struct rtw_adapter *padapter,
+ u8 bSingleAntOn,
+ u8 bInterruptOn,
+ u8 bMultiNAVOn
+ )
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 H2C_Parameter[3] = {0};
+
+ if (pHalData->bt_coexist.BT_Ant_Num != Ant_x1)
+ return;
+
+ H2C_Parameter[2] = 0;
+ H2C_Parameter[1] = 0;
+ H2C_Parameter[0] = 0;
+
+ if (bInterruptOn) {
+ H2C_Parameter[2] |= 0x02; /* BIT1 */
+ pHalData->bt_coexist.bFWCoexistAllOff = false;
+ }
+ pHalData->bt_coexist.bInterruptOn = bInterruptOn;
+
+ if (bSingleAntOn) {
+ H2C_Parameter[2] |= 0x10; /* BIT4 */
+ pHalData->bt_coexist.bFWCoexistAllOff = false;
+ }
+ pHalData->bt_coexist.bSingleAntOn = bSingleAntOn;
+
+ if (bMultiNAVOn) {
+ H2C_Parameter[2] |= 0x20; /* BIT5 */
+ pHalData->bt_coexist.bFWCoexistAllOff = false;
+ }
+ pHalData->bt_coexist.bMultiNAVOn = bMultiNAVOn;
+
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], SingleAntenna =[%s:%s:%s], write 0xe = 0x%x\n",
+ bSingleAntOn?"ON":"OFF", bInterruptOn?"ON":"OFF", bMultiNAVOn?"ON":"OFF",
+ H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]));
+}
+
+void BTDM_CheckBTIdleChange1Ant(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ u8 stateChange = false;
+ u32 BT_Polling, Ratio_Act, Ratio_STA;
+ u32 BT_Active, BT_State;
+ u32 regBTActive = 0, regBTState = 0, regBTPolling = 0;
+
+ if (!pHalData->bt_coexist.BluetoothCoexist)
+ return;
+ if (pBtMgnt->ExtConfig.bManualControl)
+ return;
+ if (pHalData->bt_coexist.BT_CoexistType != BT_CSR_BC8)
+ return;
+ if (pHalData->bt_coexist.BT_Ant_Num != Ant_x1)
+ return;
+
+ /* The following we only consider CSR BC8 and fw version should be >= 62 */
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], FirmwareVersion = 0x%x(%d)\n",
+ pHalData->FirmwareVersion, pHalData->FirmwareVersion));
+ regBTActive = REG_BT_ACTIVE;
+ regBTState = REG_BT_STATE;
+ if (pHalData->FirmwareVersion >= FW_VER_BT_REG1)
+ regBTPolling = REG_BT_POLLING1;
+ else
+ regBTPolling = REG_BT_POLLING;
+
+ BT_Active = rtw_read32(padapter, regBTActive);
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT_Active(0x%x) =%x\n", regBTActive, BT_Active));
+ BT_Active = BT_Active & 0x00ffffff;
+
+ BT_State = rtw_read32(padapter, regBTState);
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT_State(0x%x) =%x\n", regBTState, BT_State));
+ BT_State = BT_State & 0x00ffffff;
+
+ BT_Polling = rtw_read32(padapter, regBTPolling);
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT_Polling(0x%x) =%x\n", regBTPolling, BT_Polling));
+
+ if (BT_Active == 0xffffffff && BT_State == 0xffffffff && BT_Polling == 0xffffffff)
+ return;
+ if (BT_Polling == 0)
+ return;
+
+ Ratio_Act = BT_Active*1000/BT_Polling;
+ Ratio_STA = BT_State*1000/BT_Polling;
+
+ pHalData->bt_coexist.Ratio_Tx = Ratio_Act;
+ pHalData->bt_coexist.Ratio_PRI = Ratio_STA;
+
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], Ratio_Act =%d\n", Ratio_Act));
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], Ratio_STA =%d\n", Ratio_STA));
+
+ if (Ratio_STA < 60 && Ratio_Act < 500) { /* BT PAN idle */
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_PAN_IDLE;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_PAN_DOWNLINK;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_PAN_UPLINK;
+ } else {
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_PAN_IDLE;
+
+ if (Ratio_STA) {
+ /* Check if BT PAN (under BT 2.1) is uplink or downlink */
+ if ((Ratio_Act/Ratio_STA) < 2) {
+ /* BT PAN Uplink */
+ pHalData->bt_coexist.BT21TrafficStatistics.bTxBusyTraffic = true;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_PAN_UPLINK;
+ pHalData->bt_coexist.BT21TrafficStatistics.bRxBusyTraffic = false;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_PAN_DOWNLINK;
+ } else {
+ /* BT PAN downlink */
+ pHalData->bt_coexist.BT21TrafficStatistics.bTxBusyTraffic = false;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_PAN_UPLINK;
+ pHalData->bt_coexist.BT21TrafficStatistics.bRxBusyTraffic = true;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_PAN_DOWNLINK;
+ }
+ } else {
+ /* BT PAN downlink */
+ pHalData->bt_coexist.BT21TrafficStatistics.bTxBusyTraffic = false;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_PAN_UPLINK;
+ pHalData->bt_coexist.BT21TrafficStatistics.bRxBusyTraffic = true;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_PAN_DOWNLINK;
+ }
+ }
+
+ /* Check BT is idle or not */
+ if (pBtMgnt->ExtConfig.NumberOfHandle == 0 &&
+ pBtMgnt->ExtConfig.NumberOfSCO == 0) {
+ pBtMgnt->ExtConfig.bBTBusy = false;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_IDLE;
+ } else {
+ if (Ratio_STA < 60) {
+ pBtMgnt->ExtConfig.bBTBusy = false;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_IDLE;
+ } else {
+ pBtMgnt->ExtConfig.bBTBusy = true;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_IDLE;
+ }
+ }
+
+ if (pBtMgnt->ExtConfig.NumberOfHandle == 0 &&
+ pBtMgnt->ExtConfig.NumberOfSCO == 0) {
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_RSSI_LOW;
+ pBtMgnt->ExtConfig.MIN_BT_RSSI = 0;
+ BTDM_SetAntenna(padapter, BTDM_ANT_BT_IDLE);
+ } else {
+ if (pBtMgnt->ExtConfig.MIN_BT_RSSI <= -5) {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_RSSI_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], core stack notify bt rssi Low\n"));
+ } else {
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_RSSI_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], core stack notify bt rssi Normal\n"));
+ }
+ }
+
+ if (pHalData->bt_coexist.bBTBusyTraffic != pBtMgnt->ExtConfig.bBTBusy) {
+ /* BT idle or BT non-idle */
+ pHalData->bt_coexist.bBTBusyTraffic = pBtMgnt->ExtConfig.bBTBusy;
+ stateChange = true;
+ }
+
+ if (stateChange) {
+ if (!pBtMgnt->ExtConfig.bBTBusy)
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT is idle or disable\n"));
+ else
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT is non-idle\n"));
+ }
+ if (!pBtMgnt->ExtConfig.bBTBusy) {
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT is idle or disable\n"));
+ if (check_fwstate(&padapter->mlmepriv, WIFI_UNDER_LINKING|WIFI_SITE_MONITOR) == true)
+ BTDM_SetAntenna(padapter, BTDM_ANT_WIFI);
+ }
+}
+
+/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtcCsr1Ant.c ===== */
+#endif
+
+#ifdef __HALBTCCSR2ANT_C__ /* HAL/BTCoexist/HalBtcCsr2Ant.c */
+/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtcCsr2Ant.c ===== */
+
+/* local function start with btdm_ */
+
+/* Note: */
+/* In the following, FW should be done before SW mechanism. */
+/* BTDM_Balance(), BTDM_DiminishWiFi(), BT_NAV() should be done */
+/* before BTDM_AGCTable(), BTDM_BBBackOffLevel(), btdm_DacSwing(). */
+
+/* extern function start with BTDM_ */
+
+void
+BTDM_DiminishWiFi(
+ struct rtw_adapter *padapter,
+ u8 bDACOn,
+ u8 bInterruptOn,
+ u8 DACSwingLevel,
+ u8 bNAVOn
+ )
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 H2C_Parameter[3] = {0};
+
+ if (pHalData->bt_coexist.BT_Ant_Num != Ant_x2)
+ return;
+
+ if ((pHalData->bt_coexist.CurrentState & BT_COEX_STATE_BT_RSSI_LOW) &&
+ (DACSwingLevel == 0x20)) {
+ RTPRINT(FBT, BT_TRACE, ("[BT]DiminishWiFi 0x20 original, but set 0x18 for Low RSSI!\n"));
+ DACSwingLevel = 0x18;
+ }
+
+ H2C_Parameter[2] = 0;
+ H2C_Parameter[1] = DACSwingLevel;
+ H2C_Parameter[0] = 0;
+ if (bDACOn) {
+ H2C_Parameter[2] |= 0x01; /* BIT0 */
+ if (bInterruptOn)
+ H2C_Parameter[2] |= 0x02; /* BIT1 */
+ pHalData->bt_coexist.bFWCoexistAllOff = false;
+ }
+ if (bNAVOn) {
+ H2C_Parameter[2] |= 0x08; /* BIT3 */
+ pHalData->bt_coexist.bFWCoexistAllOff = false;
+ }
+
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], bDACOn = %s, bInterruptOn = %s, write 0xe = 0x%x\n",
+ bDACOn?"ON":"OFF", bInterruptOn?"ON":"OFF",
+ H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]));
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], bNAVOn = %s\n",
+ bNAVOn?"ON":"OFF"));
+}
+
+/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtcCsr2Ant.c ===== */
+#endif
+
+#ifdef __HALBTCOEXIST_C__ /* HAL/BTCoexist/HalBtCoexist.c */
+/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtCoexist.c ===== */
+
+/* local function */
+static void btdm_ResetFWCoexState(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->bt_coexist.CurrentState = 0;
+ pHalData->bt_coexist.PreviousState = 0;
+}
+
+static void btdm_InitBtCoexistDM(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ /* 20100415 Joseph: Restore RF register 0x1E and 0x1F value for further usage. */
+ pHalData->bt_coexist.BtRfRegOrigin1E = PHY_QueryRFReg(padapter, PathA, RF_RCK1, bRFRegOffsetMask);
+ pHalData->bt_coexist.BtRfRegOrigin1F = PHY_QueryRFReg(padapter, PathA, RF_RCK2, 0xf0);
+
+ pHalData->bt_coexist.CurrentState = 0;
+ pHalData->bt_coexist.PreviousState = 0;
+
+ BTDM_8723AInit(padapter);
+ pHalData->bt_coexist.bInitlized = true;
+}
+
+/* */
+/* extern function */
+/* */
+void BTDM_CheckAntSelMode(struct rtw_adapter *padapter)
+{
+}
+
+void BTDM_FwC2hBtRssi(struct rtw_adapter *padapter, u8 *tmpBuf)
+{
+ BTDM_FwC2hBtRssi8723A(padapter, tmpBuf);
+}
+
+void BTDM_FwC2hBtInfo(struct rtw_adapter *padapter, u8 *tmpBuf, u8 length)
+{
+ BTDM_FwC2hBtInfo8723A(padapter, tmpBuf, length);
+}
+
+void BTDM_DisplayBtCoexInfo(struct rtw_adapter *padapter)
+{
+ BTDM_Display8723ABtCoexInfo(padapter);
+}
+
+void BTDM_RejectAPAggregatedPacket(struct rtw_adapter *padapter, u8 bReject)
+{
+}
+
+u8 BTDM_IsHT40(struct rtw_adapter *padapter)
+{
+ u8 isht40 = true;
+ enum ht_channel_width bw;
+
+ bw = padapter->mlmeextpriv.cur_bwmode;
+
+ if (bw == HT_CHANNEL_WIDTH_20)
+ isht40 = false;
+ else if (bw == HT_CHANNEL_WIDTH_40)
+ isht40 = true;
+
+ return isht40;
+}
+
+u8 BTDM_Legacy(struct rtw_adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext;
+ u8 isLegacy = false;
+
+ pmlmeext = &padapter->mlmeextpriv;
+ if ((pmlmeext->cur_wireless_mode == WIRELESS_11B) ||
+ (pmlmeext->cur_wireless_mode == WIRELESS_11G) ||
+ (pmlmeext->cur_wireless_mode == WIRELESS_11BG))
+ isLegacy = true;
+
+ return isLegacy;
+}
+
+void BTDM_CheckWiFiState(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct mlme_priv *pmlmepriv;
+ struct bt_30info *pBTInfo;
+ struct bt_mgnt *pBtMgnt;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pmlmepriv = &padapter->mlmepriv;
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_IDLE;
+
+ if (pmlmepriv->LinkDetectInfo.bTxBusyTraffic)
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_UPLINK;
+ else
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_UPLINK;
+
+ if (pmlmepriv->LinkDetectInfo.bRxBusyTraffic)
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_DOWNLINK;
+ else
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_DOWNLINK;
+ } else {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_IDLE;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_UPLINK;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_DOWNLINK;
+ }
+
+ if (BTDM_Legacy(padapter)) {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_LEGACY;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_HT20;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_HT40;
+ } else {
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_LEGACY;
+ if (BTDM_IsHT40(padapter)) {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_HT40;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_HT20;
+ } else {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_HT20;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_HT40;
+ }
+ }
+
+ if (pBtMgnt->BtOperationOn)
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT30;
+ else
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT30;
+}
+
+s32 BTDM_GetRxSS(struct rtw_adapter *padapter)
+{
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct mlme_priv *pmlmepriv;
+ struct hal_data_8723a *pHalData;
+ s32 UndecoratedSmoothedPWDB = 0;
+
+ pmlmepriv = &padapter->mlmepriv;
+ pHalData = GET_HAL_DATA(padapter);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ UndecoratedSmoothedPWDB = GET_UNDECORATED_AVERAGE_RSSI(padapter);
+ } else { /* associated entry pwdb */
+ UndecoratedSmoothedPWDB = pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB;
+ /* pHalData->BT_EntryMinUndecoratedSmoothedPWDB */
+ }
+ RTPRINT(FBT, BT_TRACE, ("BTDM_GetRxSS() = %d\n", UndecoratedSmoothedPWDB));
+ return UndecoratedSmoothedPWDB;
+}
+
+static s32 BTDM_GetRxBeaconSS(struct rtw_adapter *padapter)
+{
+/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
+ struct mlme_priv *pmlmepriv;
+ struct hal_data_8723a *pHalData;
+ s32 pwdbBeacon = 0;
+
+ pmlmepriv = &padapter->mlmepriv;
+ pHalData = GET_HAL_DATA(padapter);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ /* pwdbBeacon = pHalData->dmpriv.UndecoratedSmoothedBeacon; */
+ pwdbBeacon = pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB;
+ }
+ RTPRINT(FBT, BT_TRACE, ("BTDM_GetRxBeaconSS() = %d\n", pwdbBeacon));
+ return pwdbBeacon;
+}
+
+/* Get beacon rssi state */
+u8 BTDM_CheckCoexBcnRssiState(struct rtw_adapter *padapter, u8 levelNum,
+ u8 RssiThresh, u8 RssiThresh1)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ s32 pwdbBeacon = 0;
+ u8 bcnRssiState = 0;
+
+ pwdbBeacon = BTDM_GetRxBeaconSS(padapter);
+
+ if (levelNum == 2) {
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_MEDIUM;
+
+ if ((pHalData->bt_coexist.preRssiStateBeacon == BT_RSSI_STATE_LOW) ||
+ (pHalData->bt_coexist.preRssiStateBeacon == BT_RSSI_STATE_STAY_LOW)) {
+ if (pwdbBeacon >= (RssiThresh+BT_FW_COEX_THRESH_TOL)) {
+ bcnRssiState = BT_RSSI_STATE_HIGH;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state switch to High\n"));
+ } else {
+ bcnRssiState = BT_RSSI_STATE_STAY_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state stay at Low\n"));
+ }
+ } else {
+ if (pwdbBeacon < RssiThresh) {
+ bcnRssiState = BT_RSSI_STATE_LOW;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_BEACON_LOW;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state switch to Low\n"));
+ } else {
+ bcnRssiState = BT_RSSI_STATE_STAY_HIGH;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state stay at High\n"));
+ }
+ }
+ } else if (levelNum == 3) {
+ if (RssiThresh > RssiThresh1) {
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON thresh error!!\n"));
+ return pHalData->bt_coexist.preRssiStateBeacon;
+ }
+
+ if ((pHalData->bt_coexist.preRssiStateBeacon == BT_RSSI_STATE_LOW) ||
+ (pHalData->bt_coexist.preRssiStateBeacon == BT_RSSI_STATE_STAY_LOW)) {
+ if (pwdbBeacon >= (RssiThresh+BT_FW_COEX_THRESH_TOL)) {
+ bcnRssiState = BT_RSSI_STATE_MEDIUM;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_BEACON_MEDIUM;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_LOW;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state switch to Medium\n"));
+ } else {
+ bcnRssiState = BT_RSSI_STATE_STAY_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state stay at Low\n"));
+ }
+ } else if ((pHalData->bt_coexist.preRssiStateBeacon == BT_RSSI_STATE_MEDIUM) ||
+ (pHalData->bt_coexist.preRssiStateBeacon == BT_RSSI_STATE_STAY_MEDIUM)) {
+ if (pwdbBeacon >= (RssiThresh1+BT_FW_COEX_THRESH_TOL)) {
+ bcnRssiState = BT_RSSI_STATE_HIGH;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_LOW;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_MEDIUM;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state switch to High\n"));
+ } else if (pwdbBeacon < RssiThresh) {
+ bcnRssiState = BT_RSSI_STATE_LOW;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_BEACON_LOW;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_MEDIUM;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state switch to Low\n"));
+ } else {
+ bcnRssiState = BT_RSSI_STATE_STAY_MEDIUM;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state stay at Medium\n"));
+ }
+ } else {
+ if (pwdbBeacon < RssiThresh1) {
+ bcnRssiState = BT_RSSI_STATE_MEDIUM;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_BEACON_MEDIUM;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state switch to Medium\n"));
+ } else {
+ bcnRssiState = BT_RSSI_STATE_STAY_HIGH;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state stay at High\n"));
+ }
+ }
+ }
+
+ pHalData->bt_coexist.preRssiStateBeacon = bcnRssiState;
+
+ return bcnRssiState;
+}
+
+u8 BTDM_CheckCoexRSSIState1(struct rtw_adapter *padapter, u8 levelNum,
+ u8 RssiThresh, u8 RssiThresh1)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ s32 UndecoratedSmoothedPWDB = 0;
+ u8 btRssiState = 0;
+
+ UndecoratedSmoothedPWDB = BTDM_GetRxSS(padapter);
+
+ if (levelNum == 2) {
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+
+ if ((pHalData->bt_coexist.preRssiState1 == BT_RSSI_STATE_LOW) ||
+ (pHalData->bt_coexist.preRssiState1 == BT_RSSI_STATE_STAY_LOW)) {
+ if (UndecoratedSmoothedPWDB >= (RssiThresh+BT_FW_COEX_THRESH_TOL)) {
+ btRssiState = BT_RSSI_STATE_HIGH;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state switch to High\n"));
+ } else {
+ btRssiState = BT_RSSI_STATE_STAY_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state stay at Low\n"));
+ }
+ } else {
+ if (UndecoratedSmoothedPWDB < RssiThresh) {
+ btRssiState = BT_RSSI_STATE_LOW;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state switch to Low\n"));
+ } else {
+ btRssiState = BT_RSSI_STATE_STAY_HIGH;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state stay at High\n"));
+ }
+ }
+ } else if (levelNum == 3) {
+ if (RssiThresh > RssiThresh1) {
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 thresh error!!\n"));
+ return pHalData->bt_coexist.preRssiState1;
+ }
+
+ if ((pHalData->bt_coexist.preRssiState1 == BT_RSSI_STATE_LOW) ||
+ (pHalData->bt_coexist.preRssiState1 == BT_RSSI_STATE_STAY_LOW)) {
+ if (UndecoratedSmoothedPWDB >= (RssiThresh+BT_FW_COEX_THRESH_TOL)) {
+ btRssiState = BT_RSSI_STATE_MEDIUM;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state switch to Medium\n"));
+ } else {
+ btRssiState = BT_RSSI_STATE_STAY_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state stay at Low\n"));
+ }
+ } else if ((pHalData->bt_coexist.preRssiState1 == BT_RSSI_STATE_MEDIUM) ||
+ (pHalData->bt_coexist.preRssiState1 == BT_RSSI_STATE_STAY_MEDIUM)) {
+ if (UndecoratedSmoothedPWDB >= (RssiThresh1+BT_FW_COEX_THRESH_TOL)) {
+ btRssiState = BT_RSSI_STATE_HIGH;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state switch to High\n"));
+ } else if (UndecoratedSmoothedPWDB < RssiThresh) {
+ btRssiState = BT_RSSI_STATE_LOW;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state switch to Low\n"));
+ } else {
+ btRssiState = BT_RSSI_STATE_STAY_MEDIUM;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state stay at Medium\n"));
+ }
+ } else {
+ if (UndecoratedSmoothedPWDB < RssiThresh1) {
+ btRssiState = BT_RSSI_STATE_MEDIUM;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state switch to Medium\n"));
+ } else {
+ btRssiState = BT_RSSI_STATE_STAY_HIGH;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state stay at High\n"));
+ }
+ }
+ }
+
+ pHalData->bt_coexist.preRssiState1 = btRssiState;
+
+ return btRssiState;
+}
+
+u8 BTDM_CheckCoexRSSIState(struct rtw_adapter *padapter, u8 levelNum,
+ u8 RssiThresh, u8 RssiThresh1)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ s32 UndecoratedSmoothedPWDB = 0;
+ u8 btRssiState = 0;
+
+ UndecoratedSmoothedPWDB = BTDM_GetRxSS(padapter);
+
+ if (levelNum == 2) {
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+
+ if ((pHalData->bt_coexist.preRssiState == BT_RSSI_STATE_LOW) ||
+ (pHalData->bt_coexist.preRssiState == BT_RSSI_STATE_STAY_LOW)) {
+ if (UndecoratedSmoothedPWDB >= (RssiThresh+BT_FW_COEX_THRESH_TOL)) {
+ btRssiState = BT_RSSI_STATE_HIGH;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_HIGH;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state switch to High\n"));
+ } else {
+ btRssiState = BT_RSSI_STATE_STAY_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state stay at Low\n"));
+ }
+ } else {
+ if (UndecoratedSmoothedPWDB < RssiThresh) {
+ btRssiState = BT_RSSI_STATE_LOW;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_LOW;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state switch to Low\n"));
+ } else {
+ btRssiState = BT_RSSI_STATE_STAY_HIGH;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state stay at High\n"));
+ }
+ }
+ } else if (levelNum == 3) {
+ if (RssiThresh > RssiThresh1) {
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI thresh error!!\n"));
+ return pHalData->bt_coexist.preRssiState;
+ }
+
+ if ((pHalData->bt_coexist.preRssiState == BT_RSSI_STATE_LOW) ||
+ (pHalData->bt_coexist.preRssiState == BT_RSSI_STATE_STAY_LOW)) {
+ if (UndecoratedSmoothedPWDB >= (RssiThresh+BT_FW_COEX_THRESH_TOL)) {
+ btRssiState = BT_RSSI_STATE_MEDIUM;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state switch to Medium\n"));
+ } else {
+ btRssiState = BT_RSSI_STATE_STAY_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state stay at Low\n"));
+ }
+ } else if ((pHalData->bt_coexist.preRssiState == BT_RSSI_STATE_MEDIUM) ||
+ (pHalData->bt_coexist.preRssiState == BT_RSSI_STATE_STAY_MEDIUM)) {
+ if (UndecoratedSmoothedPWDB >= (RssiThresh1+BT_FW_COEX_THRESH_TOL)) {
+ btRssiState = BT_RSSI_STATE_HIGH;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_HIGH;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state switch to High\n"));
+ } else if (UndecoratedSmoothedPWDB < RssiThresh) {
+ btRssiState = BT_RSSI_STATE_LOW;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_LOW;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state switch to Low\n"));
+ } else {
+ btRssiState = BT_RSSI_STATE_STAY_MEDIUM;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state stay at Medium\n"));
+ }
+ } else {
+ if (UndecoratedSmoothedPWDB < RssiThresh1) {
+ btRssiState = BT_RSSI_STATE_MEDIUM;
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+ pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state switch to Medium\n"));
+ } else {
+ btRssiState = BT_RSSI_STATE_STAY_HIGH;
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state stay at High\n"));
+ }
+ }
+ }
+
+ pHalData->bt_coexist.preRssiState = btRssiState;
+
+ return btRssiState;
+}
+
+u8 BTDM_DisableEDCATurbo(struct rtw_adapter *padapter)
+{
+ struct bt_mgnt *pBtMgnt;
+ struct hal_data_8723a *pHalData;
+ u8 bBtChangeEDCA = false;
+ u32 EDCA_BT_BE = 0x5ea42b, cur_EDCA_reg;
+ u8 bRet = false;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBtMgnt = &pHalData->BtInfo.BtMgnt;
+
+ if (!pHalData->bt_coexist.BluetoothCoexist) {
+ bRet = false;
+ pHalData->bt_coexist.lastBtEdca = 0;
+ return bRet;
+ }
+ if (!((pBtMgnt->bSupportProfile) ||
+ (pHalData->bt_coexist.BT_CoexistType == BT_CSR_BC8))) {
+ bRet = false;
+ pHalData->bt_coexist.lastBtEdca = 0;
+ return bRet;
+ }
+
+ if (BT_1Ant(padapter)) {
+ bRet = false;
+ pHalData->bt_coexist.lastBtEdca = 0;
+ return bRet;
+ }
+
+ if (pHalData->bt_coexist.exec_cnt < 3)
+ pHalData->bt_coexist.exec_cnt++;
+ else
+ pHalData->bt_coexist.bEDCAInitialized = true;
+
+ /* When BT is non idle */
+ if (!(pHalData->bt_coexist.CurrentState & BT_COEX_STATE_BT_IDLE)) {
+ RTPRINT(FBT, BT_TRACE, ("BT state non idle, set bt EDCA\n"));
+
+ /* aggr_num = 0x0909; */
+ if (pHalData->odmpriv.DM_EDCA_Table.bCurrentTurboEDCA) {
+ bBtChangeEDCA = true;
+ pHalData->odmpriv.DM_EDCA_Table.bCurrentTurboEDCA = false;
+ pHalData->dmpriv.prv_traffic_idx = 3;
+ }
+ cur_EDCA_reg = rtw_read32(padapter, REG_EDCA_BE_PARAM);
+
+ if (cur_EDCA_reg != EDCA_BT_BE)
+ bBtChangeEDCA = true;
+ if (bBtChangeEDCA || !pHalData->bt_coexist.bEDCAInitialized) {
+ rtw_write32(padapter, REG_EDCA_BE_PARAM, EDCA_BT_BE);
+ pHalData->bt_coexist.lastBtEdca = EDCA_BT_BE;
+ }
+ bRet = true;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("BT state idle, set original EDCA\n"));
+ pHalData->bt_coexist.lastBtEdca = 0;
+ bRet = false;
+ }
+ return bRet;
+}
+
+void
+BTDM_Balance(
+ struct rtw_adapter *padapter,
+ u8 bBalanceOn,
+ u8 ms0,
+ u8 ms1
+ )
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 H2C_Parameter[3] = {0};
+
+ if (bBalanceOn) {
+ H2C_Parameter[2] = 1;
+ H2C_Parameter[1] = ms1;
+ H2C_Parameter[0] = ms0;
+ pHalData->bt_coexist.bFWCoexistAllOff = false;
+ } else {
+ H2C_Parameter[2] = 0;
+ H2C_Parameter[1] = 0;
+ H2C_Parameter[0] = 0;
+ }
+ pHalData->bt_coexist.bBalanceOn = bBalanceOn;
+
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], Balance =[%s:%dms:%dms], write 0xc = 0x%x\n",
+ bBalanceOn?"ON":"OFF", ms0, ms1,
+ H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]));
+
+ FillH2CCmd(padapter, 0xc, 3, H2C_Parameter);
+}
+
+void BTDM_AGCTable(struct rtw_adapter *padapter, u8 type)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ if (type == BT_AGCTABLE_OFF) {
+ RTPRINT(FBT, BT_TRACE, ("[BT]AGCTable Off!\n"));
+ rtw_write32(padapter, 0xc78, 0x641c0001);
+ rtw_write32(padapter, 0xc78, 0x631d0001);
+ rtw_write32(padapter, 0xc78, 0x621e0001);
+ rtw_write32(padapter, 0xc78, 0x611f0001);
+ rtw_write32(padapter, 0xc78, 0x60200001);
+
+ PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0x32000);
+ PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0x71000);
+ PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0xb0000);
+ PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0xfc000);
+ PHY_SetRFReg(padapter, PathA, RF_RX_G1, bRFRegOffsetMask, 0x30355);
+
+ pHalData->bt_coexist.b8723aAgcTableOn = false;
+ } else if (type == BT_AGCTABLE_ON) {
+ RTPRINT(FBT, BT_TRACE, ("[BT]AGCTable On!\n"));
+ rtw_write32(padapter, 0xc78, 0x4e1c0001);
+ rtw_write32(padapter, 0xc78, 0x4d1d0001);
+ rtw_write32(padapter, 0xc78, 0x4c1e0001);
+ rtw_write32(padapter, 0xc78, 0x4b1f0001);
+ rtw_write32(padapter, 0xc78, 0x4a200001);
+
+ PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0xdc000);
+ PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0x90000);
+ PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0x51000);
+ PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0x12000);
+ PHY_SetRFReg(padapter, PathA, RF_RX_G1, bRFRegOffsetMask, 0x00355);
+
+ pHalData->bt_coexist.b8723aAgcTableOn = true;
+
+ pHalData->bt_coexist.bSWCoexistAllOff = false;
+ }
+}
+
+void BTDM_BBBackOffLevel(struct rtw_adapter *padapter, u8 type)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (type == BT_BB_BACKOFF_OFF) {
+ RTPRINT(FBT, BT_TRACE, ("[BT]BBBackOffLevel Off!\n"));
+ rtw_write32(padapter, 0xc04, 0x3a05611);
+ } else if (type == BT_BB_BACKOFF_ON) {
+ RTPRINT(FBT, BT_TRACE, ("[BT]BBBackOffLevel On!\n"));
+ rtw_write32(padapter, 0xc04, 0x3a07611);
+ pHalData->bt_coexist.bSWCoexistAllOff = false;
+ }
+}
+
+void BTDM_FWCoexAllOff(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);;
+
+ RTPRINT(FBT, BT_TRACE, ("BTDM_FWCoexAllOff()\n"));
+ if (pHalData->bt_coexist.bFWCoexistAllOff)
+ return;
+ RTPRINT(FBT, BT_TRACE, ("BTDM_FWCoexAllOff(), real Do\n"));
+
+ BTDM_FWCoexAllOff8723A(padapter);
+
+ pHalData->bt_coexist.bFWCoexistAllOff = true;
+}
+
+void BTDM_SWCoexAllOff(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);;
+
+ RTPRINT(FBT, BT_TRACE, ("BTDM_SWCoexAllOff()\n"));
+ if (pHalData->bt_coexist.bSWCoexistAllOff)
+ return;
+ RTPRINT(FBT, BT_TRACE, ("BTDM_SWCoexAllOff(), real Do\n"));
+ BTDM_SWCoexAllOff8723A(padapter);
+
+ pHalData->bt_coexist.bSWCoexistAllOff = true;
+}
+
+void BTDM_HWCoexAllOff(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);;
+
+ RTPRINT(FBT, BT_TRACE, ("BTDM_HWCoexAllOff()\n"));
+ if (pHalData->bt_coexist.bHWCoexistAllOff)
+ return;
+ RTPRINT(FBT, BT_TRACE, ("BTDM_HWCoexAllOff(), real Do\n"));
+
+ BTDM_HWCoexAllOff8723A(padapter);
+
+ pHalData->bt_coexist.bHWCoexistAllOff = true;
+}
+
+void BTDM_CoexAllOff(struct rtw_adapter *padapter)
+{
+ BTDM_FWCoexAllOff(padapter);
+ BTDM_SWCoexAllOff(padapter);
+ BTDM_HWCoexAllOff(padapter);
+}
+
+void BTDM_TurnOffBtCoexistBeforeEnterIPS(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct pwrctrl_priv *ppwrctrl = &padapter->pwrctrlpriv;
+
+ if (!pHalData->bt_coexist.BluetoothCoexist)
+ return;
+
+ /* 8723 1Ant doesn't need to turn off bt coexist mechanism. */
+ if (BTDM_1Ant8723A(padapter))
+ return;
+
+ /* Before enter IPS, turn off FW BT Co-exist mechanism */
+ if (ppwrctrl->reg_rfoff == rf_on) {
+ RTPRINT(FBT, BT_TRACE, ("[BT][DM], Before enter IPS, turn off all Coexist DM\n"));
+ btdm_ResetFWCoexState(padapter);
+ BTDM_CoexAllOff(padapter);
+ BTDM_SetAntenna(padapter, BTDM_ANT_BT);
+ }
+}
+
+void BTDM_SignalCompensation(struct rtw_adapter *padapter, u8 *rssi_wifi, u8 *rssi_bt)
+{
+ BTDM_8723ASignalCompensation(padapter, rssi_wifi, rssi_bt);
+}
+
+void BTDM_Coexist(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (!pHalData->bt_coexist.BluetoothCoexist) {
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT not exists!!\n"));
+ return;
+ }
+
+ if (!pHalData->bt_coexist.bInitlized) {
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], btdm_InitBtCoexistDM()\n"));
+ btdm_InitBtCoexistDM(padapter);
+ }
+
+ RTPRINT(FBT, BT_TRACE, ("\n\n[DM][BT], BTDM start!!\n"));
+
+ BTDM_PWDBMonitor(padapter);
+
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], HW type is 8723\n"));
+ BTDM_BTCoexist8723A(padapter);
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], BTDM end!!\n\n"));
+}
+
+void BTDM_UpdateCoexState(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (!BTDM_IsSameCoexistState(padapter)) {
+ RTPRINT(FBT, BT_TRACE, ("[BTCoex], Coexist State[bitMap] change from 0x%"i64fmt"x to 0x%"i64fmt"x, changeBits = 0x%"i64fmt"x\n",
+ pHalData->bt_coexist.PreviousState,
+ pHalData->bt_coexist.CurrentState,
+ (pHalData->bt_coexist.PreviousState^pHalData->bt_coexist.CurrentState)));
+ pHalData->bt_coexist.PreviousState = pHalData->bt_coexist.CurrentState;
+ }
+}
+
+u8 BTDM_IsSameCoexistState(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (pHalData->bt_coexist.PreviousState == pHalData->bt_coexist.CurrentState) {
+ return true;
+ } else {
+ RTPRINT(FBT, BT_TRACE, ("[DM][BT], Coexist state changed!!\n"));
+ return false;
+ }
+}
+
+void BTDM_PWDBMonitor(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(GetDefaultAdapter(padapter));
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 H2C_Parameter[3] = {0};
+ s32 tmpBTEntryMaxPWDB = 0, tmpBTEntryMinPWDB = 0xff;
+ u8 i;
+
+ if (pBtMgnt->BtOperationOn) {
+ for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++) {
+ if (pBTInfo->BtAsocEntry[i].bUsed) {
+ if (pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB < tmpBTEntryMinPWDB)
+ tmpBTEntryMinPWDB = pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB;
+ if (pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB > tmpBTEntryMaxPWDB)
+ tmpBTEntryMaxPWDB = pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB;
+ /* Report every BT connection (HS mode) RSSI to FW */
+ H2C_Parameter[2] = (u8)(pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB & 0xFF);
+ H2C_Parameter[0] = (MAX_FW_SUPPORT_MACID_NUM-1-i);
+ RTPRINT(FDM, DM_BT30, ("RSSI report for BT[%d], H2C_Par = 0x%x\n", i, H2C_Parameter[0]));
+ FillH2CCmd(padapter, RSSI_SETTING_EID, 3, H2C_Parameter);
+ RTPRINT_ADDR(FDM, (DM_PWDB|DM_BT30), ("BT_Entry Mac :"),
+ pBTInfo->BtAsocEntry[i].BTRemoteMACAddr)
+ RTPRINT(FDM, (DM_PWDB|DM_BT30),
+ ("BT rx pwdb[%d] = 0x%x(%d)\n", i,
+ pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB,
+ pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB));
+ }
+ }
+ if (tmpBTEntryMaxPWDB != 0) { /* If associated entry is found */
+ pHalData->dmpriv.BT_EntryMaxUndecoratedSmoothedPWDB = tmpBTEntryMaxPWDB;
+ RTPRINT(FDM, (DM_PWDB|DM_BT30), ("BT_EntryMaxPWDB = 0x%x(%d)\n",
+ tmpBTEntryMaxPWDB, tmpBTEntryMaxPWDB));
+ } else {
+ pHalData->dmpriv.BT_EntryMaxUndecoratedSmoothedPWDB = 0;
+ }
+ if (tmpBTEntryMinPWDB != 0xff) { /* If associated entry is found */
+ pHalData->dmpriv.BT_EntryMinUndecoratedSmoothedPWDB = tmpBTEntryMinPWDB;
+ RTPRINT(FDM, (DM_PWDB|DM_BT30), ("BT_EntryMinPWDB = 0x%x(%d)\n",
+ tmpBTEntryMinPWDB, tmpBTEntryMinPWDB));
+ } else {
+ pHalData->dmpriv.BT_EntryMinUndecoratedSmoothedPWDB = 0;
+ }
+ }
+}
+
+u8 BTDM_IsBTBusy(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
+
+ if (pBtMgnt->ExtConfig.bBTBusy)
+ return true;
+ else
+ return false;
+}
+
+u8 BTDM_IsWifiBusy(struct rtw_adapter *padapter)
+{
+/*PMGNT_INFO pMgntInfo = &GetDefaultAdapter(padapter)->MgntInfo; */
+ struct mlme_priv *pmlmepriv = &GetDefaultAdapter(padapter)->mlmepriv;
+ struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
+ struct bt_traffic *pBtTraffic = &pBTInfo->BtTraffic;
+
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic ||
+ pBtTraffic->Bt30TrafficStatistics.bTxBusyTraffic ||
+ pBtTraffic->Bt30TrafficStatistics.bRxBusyTraffic)
+ return true;
+ else
+ return false;
+}
+
+u8 BTDM_IsCoexistStateChanged(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (pHalData->bt_coexist.PreviousState == pHalData->bt_coexist.CurrentState)
+ return false;
+ else
+ return true;
+}
+
+u8 BTDM_IsWifiUplink(struct rtw_adapter *padapter)
+{
+/*PMGNT_INFO pMgntInfo = &GetDefaultAdapter(padapter)->MgntInfo; */
+ struct mlme_priv *pmlmepriv;
+ struct bt_30info *pBTInfo;
+ struct bt_traffic *pBtTraffic;
+
+ pmlmepriv = &padapter->mlmepriv;
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtTraffic = &pBTInfo->BtTraffic;
+
+ if ((pmlmepriv->LinkDetectInfo.bTxBusyTraffic) ||
+ (pBtTraffic->Bt30TrafficStatistics.bTxBusyTraffic))
+ return true;
+ else
+ return false;
+}
+
+u8 BTDM_IsWifiDownlink(struct rtw_adapter *padapter)
+{
+/*PMGNT_INFO pMgntInfo = &GetDefaultAdapter(padapter)->MgntInfo; */
+ struct mlme_priv *pmlmepriv;
+ struct bt_30info *pBTInfo;
+ struct bt_traffic *pBtTraffic;
+
+ pmlmepriv = &padapter->mlmepriv;
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtTraffic = &pBTInfo->BtTraffic;
+
+ if ((pmlmepriv->LinkDetectInfo.bRxBusyTraffic) ||
+ (pBtTraffic->Bt30TrafficStatistics.bRxBusyTraffic))
+ return true;
+ else
+ return false;
+}
+
+u8 BTDM_IsBTHSMode(struct rtw_adapter *padapter)
+{
+/*PMGNT_INFO pMgntInfo = &GetDefaultAdapter(padapter)->MgntInfo; */
+ struct hal_data_8723a *pHalData;
+ struct bt_mgnt *pBtMgnt;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBtMgnt = &pHalData->BtInfo.BtMgnt;
+
+ if (pBtMgnt->BtOperationOn)
+ return true;
+ else
+ return false;
+}
+
+u8 BTDM_IsBTUplink(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (pHalData->bt_coexist.BT21TrafficStatistics.bTxBusyTraffic)
+ return true;
+ else
+ return false;
+}
+
+u8 BTDM_IsBTDownlink(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (pHalData->bt_coexist.BT21TrafficStatistics.bRxBusyTraffic)
+ return true;
+ else
+ return false;
+}
+
+void BTDM_AdjustForBtOperation(struct rtw_adapter *padapter)
+{
+ RTPRINT(FBT, BT_TRACE, ("[BT][DM], BTDM_AdjustForBtOperation()\n"));
+ BTDM_AdjustForBtOperation8723A(padapter);
+}
+
+void BTDM_SetBtCoexCurrAntNum(struct rtw_adapter *padapter, u8 antNum)
+{
+ BTDM_Set8723ABtCoexCurrAntNum(padapter, antNum);
+}
+
+void BTDM_ForHalt(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (!pHalData->bt_coexist.BluetoothCoexist)
+ return;
+
+ BTDM_ForHalt8723A(padapter);
+ GET_HAL_DATA(padapter)->bt_coexist.bInitlized = false;
+}
+
+void BTDM_WifiScanNotify(struct rtw_adapter *padapter, u8 scanType)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (!pHalData->bt_coexist.BluetoothCoexist)
+ return;
+
+ BTDM_WifiScanNotify8723A(padapter, scanType);
+}
+
+void BTDM_WifiAssociateNotify(struct rtw_adapter *padapter, u8 action)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (!pHalData->bt_coexist.BluetoothCoexist)
+ return;
+
+ BTDM_WifiAssociateNotify8723A(padapter, action);
+}
+
+void BTDM_MediaStatusNotify(struct rtw_adapter *padapter, enum rt_media_status mstatus)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (!pHalData->bt_coexist.BluetoothCoexist)
+ return;
+
+ BTDM_MediaStatusNotify8723A(padapter, mstatus);
+}
+
+void BTDM_ForDhcp(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (!pHalData->bt_coexist.BluetoothCoexist)
+ return;
+
+ BTDM_ForDhcp8723A(padapter);
+}
+
+void BTDM_ResetActionProfileState(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->bt_coexist.CurrentState &= ~\
+ (BT_COEX_STATE_PROFILE_HID|BT_COEX_STATE_PROFILE_A2DP|
+ BT_COEX_STATE_PROFILE_PAN|BT_COEX_STATE_PROFILE_SCO);
+}
+
+u8 BTDM_IsActionSCO(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct bt_30info *pBTInfo;
+ struct bt_mgnt *pBtMgnt;
+ struct bt_dgb *pBtDbg;
+ u8 bRet;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+ pBtDbg = &pBTInfo->BtDbg;
+ bRet = false;
+
+ if (pBtDbg->dbgCtrl) {
+ if (pBtDbg->dbgProfile == BT_DBG_PROFILE_SCO) {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_SCO;
+ bRet = true;
+ }
+ } else {
+ if (pBtMgnt->ExtConfig.NumberOfSCO > 0) {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_SCO;
+ bRet = true;
+ }
+ }
+ return bRet;
+}
+
+u8 BTDM_IsActionHID(struct rtw_adapter *padapter)
+{
+ struct bt_30info *pBTInfo;
+ struct hal_data_8723a *pHalData;
+ struct bt_mgnt *pBtMgnt;
+ struct bt_dgb *pBtDbg;
+ u8 bRet;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+ pBtDbg = &pBTInfo->BtDbg;
+ bRet = false;
+
+ if (pBtDbg->dbgCtrl) {
+ if (pBtDbg->dbgProfile == BT_DBG_PROFILE_HID) {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_HID;
+ bRet = true;
+ }
+ } else {
+ if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
+ pBtMgnt->ExtConfig.NumberOfHandle == 1) {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_HID;
+ bRet = true;
+ }
+ }
+ return bRet;
+}
+
+u8 BTDM_IsActionA2DP(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct bt_30info *pBTInfo;
+ struct bt_mgnt *pBtMgnt;
+ struct bt_dgb *pBtDbg;
+ u8 bRet;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+ pBtDbg = &pBTInfo->BtDbg;
+ bRet = false;
+
+ if (pBtDbg->dbgCtrl) {
+ if (pBtDbg->dbgProfile == BT_DBG_PROFILE_A2DP) {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_A2DP;
+ bRet = true;
+ }
+ } else {
+ if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP) &&
+ pBtMgnt->ExtConfig.NumberOfHandle == 1) {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_A2DP;
+ bRet = true;
+ }
+ }
+ return bRet;
+}
+
+u8 BTDM_IsActionPAN(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct bt_30info *pBTInfo;
+ struct bt_mgnt *pBtMgnt;
+ struct bt_dgb *pBtDbg;
+ u8 bRet;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+ pBtDbg = &pBTInfo->BtDbg;
+ bRet = false;
+
+ if (pBtDbg->dbgCtrl) {
+ if (pBtDbg->dbgProfile == BT_DBG_PROFILE_PAN) {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_PAN;
+ bRet = true;
+ }
+ } else {
+ if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN) &&
+ pBtMgnt->ExtConfig.NumberOfHandle == 1) {
+ pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_PAN;
+ bRet = true;
+ }
+ }
+ return bRet;
+}
+
+u8 BTDM_IsActionHIDA2DP(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct bt_30info *pBTInfo;
+ struct bt_mgnt *pBtMgnt;
+ struct bt_dgb *pBtDbg;
+ u8 bRet;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtMgnt = &pBTInfo->BtMgnt;
+ pBtDbg = &pBTInfo->BtDbg;
+ bRet = false;
+
+ if (pBtDbg->dbgCtrl) {
+ if (pBtDbg->dbgProfile == BT_DBG_PROFILE_HID_A2DP) {
+ pHalData->bt_coexist.CurrentState |= (BT_COEX_STATE_PROFILE_HID|BT_COEX_STATE_PROFILE_A2DP);
+ bRet = true;
+ }
+ } else {
+ if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
+ BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
+ pHalData->bt_coexist.CurrentState |= (BT_COEX_STATE_PROFILE_HID|BT_COEX_STATE_PROFILE_A2DP);
+ bRet = true;
+ }
+ }
+ return bRet;
+}
+
+u8 BTDM_IsActionHIDPAN(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct bt_30info *pBTInfo;
+ struct bt_dgb *pBtDbg;
+ u8 bRet;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtDbg = &pBTInfo->BtDbg;
+ bRet = false;
+
+ if (pBtDbg->dbgCtrl) {
+ if (pBtDbg->dbgProfile == BT_DBG_PROFILE_HID_PAN) {
+ pHalData->bt_coexist.CurrentState |= (BT_COEX_STATE_PROFILE_HID|BT_COEX_STATE_PROFILE_PAN);
+ bRet = true;
+ }
+ } else {
+ if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
+ BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN)) {
+ pHalData->bt_coexist.CurrentState |= (BT_COEX_STATE_PROFILE_HID|BT_COEX_STATE_PROFILE_PAN);
+ bRet = true;
+ }
+ }
+ return bRet;
+}
+
+u8 BTDM_IsActionPANA2DP(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct bt_30info *pBTInfo;
+ struct bt_dgb *pBtDbg;
+ u8 bRet;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pBTInfo = GET_BT_INFO(padapter);
+ pBtDbg = &pBTInfo->BtDbg;
+ bRet = false;
+
+ if (pBtDbg->dbgCtrl) {
+ if (pBtDbg->dbgProfile == BT_DBG_PROFILE_PAN_A2DP) {
+ pHalData->bt_coexist.CurrentState |= (BT_COEX_STATE_PROFILE_PAN|BT_COEX_STATE_PROFILE_A2DP);
+ bRet = true;
+ }
+ } else {
+ if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN) && BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
+ pHalData->bt_coexist.CurrentState |= (BT_COEX_STATE_PROFILE_PAN|BT_COEX_STATE_PROFILE_A2DP);
+ bRet = true;
+ }
+ }
+ return bRet;
+}
+
+u8 BTDM_IsBtDisabled(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (pHalData->bt_coexist.bCurBtDisabled)
+ return true;
+ else
+ return false;
+}
+
+/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtCoexist.c ===== */
+#endif
+
+#ifdef __HALBT_C__ /* HAL/HalBT.c */
+/* ===== Below this line is sync from SD7 driver HAL/HalBT.c ===== */
+
+/* */
+/*local function */
+/* */
+
+static void halbt_InitHwConfig8723A(struct rtw_adapter *padapter)
+{
+}
+
+/* */
+/*extern function */
+/* */
+u8 HALBT_GetPGAntNum(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ return pHalData->bt_coexist.BT_Ant_Num;
+}
+
+void HALBT_SetKey(struct rtw_adapter *padapter, u8 EntryNum)
+{
+ struct bt_30info *pBTinfo;
+ struct bt_asoc_entry *pBtAssocEntry;
+ u16 usConfig = 0;
+
+ pBTinfo = GET_BT_INFO(padapter);
+ pBtAssocEntry = &pBTinfo->BtAsocEntry[EntryNum];
+
+ pBtAssocEntry->HwCAMIndex = BT_HWCAM_STAR + EntryNum;
+
+ usConfig = CAM_VALID | (CAM_AES << 2);
+ write_cam23a(padapter, pBtAssocEntry->HwCAMIndex, usConfig, pBtAssocEntry->BTRemoteMACAddr, pBtAssocEntry->PTK + TKIP_ENC_KEY_POS);
+}
+
+void HALBT_RemoveKey(struct rtw_adapter *padapter, u8 EntryNum)
+{
+ struct bt_30info *pBTinfo;
+ struct bt_asoc_entry *pBtAssocEntry;
+
+ pBTinfo = GET_BT_INFO(padapter);
+ pBtAssocEntry = &pBTinfo->BtAsocEntry[EntryNum];
+
+ if (pBTinfo->BtAsocEntry[EntryNum].HwCAMIndex != 0) {
+ /* ToDo : add New HALBT_RemoveKey function !! */
+ if (pBtAssocEntry->HwCAMIndex >= BT_HWCAM_STAR && pBtAssocEntry->HwCAMIndex < HALF_CAM_ENTRY)
+ CAM_empty_entry23a(padapter, pBtAssocEntry->HwCAMIndex);
+ pBTinfo->BtAsocEntry[EntryNum].HwCAMIndex = 0;
+ }
+}
+
+void HALBT_InitBTVars8723A(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->bt_coexist.BluetoothCoexist = pHalData->EEPROMBluetoothCoexist;
+ pHalData->bt_coexist.BT_Ant_Num = pHalData->EEPROMBluetoothAntNum;
+ pHalData->bt_coexist.BT_CoexistType = pHalData->EEPROMBluetoothType;
+ pHalData->bt_coexist.BT_Ant_isolation = pHalData->EEPROMBluetoothAntIsolation;
+ pHalData->bt_coexist.bt_radiosharedtype = pHalData->EEPROMBluetoothRadioShared;
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("BT Coexistance = 0x%x\n", pHalData->bt_coexist.BluetoothCoexist));
+ if (pHalData->bt_coexist.BluetoothCoexist) {
+ if (pHalData->bt_coexist.BT_Ant_Num == Ant_x2) {
+ BTDM_SetBtCoexCurrAntNum(padapter, 2);
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("BlueTooth BT_Ant_Num = Antx2\n"));
+ } else if (pHalData->bt_coexist.BT_Ant_Num == Ant_x1) {
+ BTDM_SetBtCoexCurrAntNum(padapter, 1);
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("BlueTooth BT_Ant_Num = Antx1\n"));
+ }
+ pHalData->bt_coexist.bBTBusyTraffic = false;
+ pHalData->bt_coexist.bBTTrafficModeSet = false;
+ pHalData->bt_coexist.bBTNonTrafficModeSet = false;
+ pHalData->bt_coexist.CurrentState = 0;
+ pHalData->bt_coexist.PreviousState = 0;
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("bt_radiosharedType = 0x%x\n",
+ pHalData->bt_coexist.bt_radiosharedtype));
+ }
+}
+
+u8 HALBT_IsBTExist(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (pHalData->bt_coexist.BluetoothCoexist)
+ return true;
+ else
+ return false;
+}
+
+u8 HALBT_BTChipType(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ return pHalData->bt_coexist.BT_CoexistType;
+}
+
+void HALBT_InitHwConfig(struct rtw_adapter *padapter)
+{
+ halbt_InitHwConfig8723A(padapter);
+ BTDM_Coexist(padapter);
+}
+
+void HALBT_SetRtsCtsNoLenLimit(struct rtw_adapter *padapter)
+{
+}
+
+/* ===== End of sync from SD7 driver HAL/HalBT.c ===== */
+#endif
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
new file mode 100644
index 000000000000..0b205e1204fc
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
@@ -0,0 +1,845 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTL8723A_CMD_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <cmd_osdep.h>
+#include <mlme_osdep.h>
+#include <rtw_ioctl_set.h>
+#include <rtl8723a_hal.h>
+
+#define RTL92C_MAX_H2C_BOX_NUMS 4
+#define RTL92C_MAX_CMD_LEN 5
+#define MESSAGE_BOX_SIZE 4
+#define EX_MESSAGE_BOX_SIZE 2
+
+static u8 _is_fw_read_cmd_down(struct rtw_adapter *padapter, u8 msgbox_num)
+{
+ u8 read_down = false;
+ int retry_cnts = 100;
+ u8 valid;
+
+ do {
+ valid = rtw_read8(padapter, REG_HMETFR) & BIT(msgbox_num);
+ if (0 == valid)
+ read_down = true;
+ } while ((!read_down) && (retry_cnts--));
+
+ return read_down;
+}
+
+/*****************************************
+* H2C Msg format :
+*| 31 - 8 |7 | 6 - 0 |
+*| h2c_msg |Ext_bit |CMD_ID |
+*
+******************************************/
+s32 FillH2CCmd(struct rtw_adapter *padapter, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer)
+{
+ u8 bcmd_down = false;
+ s32 retry_cnts = 100;
+ u8 h2c_box_num;
+ u32 msgbox_addr;
+ u32 msgbox_ex_addr;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u32 h2c_cmd = 0;
+ u16 h2c_cmd_ex = 0;
+ s32 ret = _FAIL;
+
+ padapter = GET_PRIMARY_ADAPTER(padapter);
+ pHalData = GET_HAL_DATA(padapter);
+
+ mutex_lock(&adapter_to_dvobj(padapter)->h2c_fwcmd_mutex);
+
+ if (!pCmdBuffer)
+ goto exit;
+ if (CmdLen > RTL92C_MAX_CMD_LEN)
+ goto exit;
+ if (padapter->bSurpriseRemoved == true)
+ goto exit;
+
+ /* pay attention to if race condition happened in H2C cmd setting. */
+ do {
+ h2c_box_num = pHalData->LastHMEBoxNum;
+
+ if (!_is_fw_read_cmd_down(padapter, h2c_box_num)) {
+ DBG_8723A(" fw read cmd failed...\n");
+ goto exit;
+ }
+
+ if (CmdLen <= 3) {
+ memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, CmdLen);
+ } else {
+ memcpy((u8 *)(&h2c_cmd_ex), pCmdBuffer, EX_MESSAGE_BOX_SIZE);
+ memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer+2, (CmdLen-EX_MESSAGE_BOX_SIZE));
+ *(u8 *)(&h2c_cmd) |= BIT(7);
+ }
+
+ *(u8 *)(&h2c_cmd) |= ElementID;
+
+ if (h2c_cmd & BIT(7)) {
+ msgbox_ex_addr = REG_HMEBOX_EXT_0 + (h2c_box_num * EX_MESSAGE_BOX_SIZE);
+ h2c_cmd_ex = le16_to_cpu(h2c_cmd_ex);
+ rtw_write16(padapter, msgbox_ex_addr, h2c_cmd_ex);
+ }
+ msgbox_addr = REG_HMEBOX_0 + (h2c_box_num * MESSAGE_BOX_SIZE);
+ h2c_cmd = le32_to_cpu(h2c_cmd);
+ rtw_write32(padapter, msgbox_addr, h2c_cmd);
+
+ bcmd_down = true;
+
+ pHalData->LastHMEBoxNum = (h2c_box_num+1) % RTL92C_MAX_H2C_BOX_NUMS;
+
+ } while ((!bcmd_down) && (retry_cnts--));
+
+ ret = _SUCCESS;
+
+exit:
+ mutex_unlock(&adapter_to_dvobj(padapter)->h2c_fwcmd_mutex);
+ return ret;
+}
+
+u8 rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u8 *param)
+{
+ u8 res = _SUCCESS;
+
+ *((u32 *)param) = cpu_to_le32(*((u32 *)param));
+
+ FillH2CCmd(padapter, RSSI_SETTING_EID, 3, param);
+
+ return res;
+}
+
+u8 rtl8723a_set_raid_cmd(struct rtw_adapter *padapter, u32 mask, u8 arg)
+{
+ u8 buf[5];
+ u8 res = _SUCCESS;
+
+ memset(buf, 0, 5);
+ mask = cpu_to_le32(mask);
+ memcpy(buf, &mask, 4);
+ buf[4] = arg;
+
+ FillH2CCmd(padapter, MACID_CONFIG_EID, 5, buf);
+
+ return res;
+
+}
+
+/* bitmap[0:27] = tx_rate_bitmap */
+/* bitmap[28:31]= Rate Adaptive id */
+/* arg[0:4] = macid */
+/* arg[5] = Short GI */
+void rtl8723a_add_rateatid(struct rtw_adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_level)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
+ u8 macid = arg&0x1f;
+ u8 raid = (bitmap>>28) & 0x0f;
+
+ bitmap &= 0x0fffffff;
+ if (rssi_level != DM_RATR_STA_INIT)
+ bitmap = ODM_Get_Rate_Bitmap23a(&pHalData->odmpriv, macid, bitmap, rssi_level);
+
+ bitmap |= ((raid<<28)&0xf0000000);
+
+ if (pHalData->fw_ractrl == true) {
+ rtl8723a_set_raid_cmd(pAdapter, bitmap, arg);
+ } else {
+ u8 init_rate, shortGIrate = false;
+
+ init_rate = get_highest_rate_idx23a(bitmap&0x0fffffff)&0x3f;
+
+ shortGIrate = (arg&BIT(5)) ? true:false;
+
+ if (shortGIrate == true)
+ init_rate |= BIT(6);
+
+ rtw_write8(pAdapter, (REG_INIDATA_RATE_SEL+macid), (u8)init_rate);
+ }
+}
+
+void rtl8723a_set_FwPwrMode_cmd(struct rtw_adapter *padapter, u8 Mode)
+{
+ struct setpwrmode_parm H2CSetPwrMode;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ DBG_8723A("%s: Mode =%d SmartPS =%d UAPSD =%d BcnMode = 0x%02x\n", __FUNCTION__,
+ Mode, pwrpriv->smart_ps, padapter->registrypriv.uapsd_enable, pwrpriv->bcn_ant_mode);
+
+ /* Forece leave RF low power mode for 1T1R to
+ prevent conficting setting in Fw power */
+ /* saving sequence. 2010.06.07. Added by tynli.
+ Suggested by SD3 yschang. */
+ if ((Mode != PS_MODE_ACTIVE) &&
+ (!IS_92C_SERIAL(pHalData->VersionID))) {
+ ODM_RF_Saving23a(&pHalData->odmpriv, true);
+ }
+
+ H2CSetPwrMode.Mode = Mode;
+ H2CSetPwrMode.SmartPS = pwrpriv->smart_ps;
+ H2CSetPwrMode.AwakeInterval = 1;
+ H2CSetPwrMode.bAllQueueUAPSD = padapter->registrypriv.uapsd_enable;
+ H2CSetPwrMode.BcnAntMode = pwrpriv->bcn_ant_mode;
+
+ FillH2CCmd(padapter, SET_PWRMODE_EID, sizeof(H2CSetPwrMode), (u8 *)&H2CSetPwrMode);
+
+}
+
+static void ConstructBeacon(struct rtw_adapter *padapter, u8 *pframe, u32 *pLength)
+{
+ struct ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ u32 rate_len, pktlen;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ /* DBG_8723A("%s\n", __FUNCTION__); */
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid23a(cur_network), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
+ /* pmlmeext->mgnt_seq++; */
+ SetFrameSubType(pframe, WIFI_BEACON);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pktlen = sizeof (struct ieee80211_hdr_3addr);
+
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval23a_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pktlen += 2;
+
+ /* capability info: 2 bytes */
+ memcpy(pframe, (unsigned char *)(rtw_get_capability23a_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pktlen += 2;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ /* DBG_8723A("ie len =%d\n", cur_network->IELength); */
+ pktlen += cur_network->IELength - sizeof(struct ndis_802_11_fixed_ies);
+ memcpy(pframe, cur_network->IEs+sizeof(struct ndis_802_11_fixed_ies), pktlen);
+
+ goto _ConstructBeacon;
+ }
+
+ /* below for ad-hoc mode */
+
+ /* SSID */
+ pframe = rtw_set_ie23a(pframe, _SSID_IE_, cur_network->Ssid.ssid_len,
+ cur_network->Ssid.ssid, &pktlen);
+
+ /* supported rates... */
+ rate_len = rtw_get_rateset_len23a(cur_network->SupportedRates);
+ pframe = rtw_set_ie23a(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ?
+ 8 : rate_len), cur_network->SupportedRates, &pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie23a(pframe, _DSSET_IE_, 1, (unsigned char *)&cur_network->Configuration.DSConfig, &pktlen);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ u32 ATIMWindow;
+ /* IBSS Parameter Set... */
+ /* ATIMWindow = cur->Configuration.ATIMWindow; */
+ ATIMWindow = 0;
+ pframe = rtw_set_ie23a(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pktlen);
+ }
+
+ /* todo: ERP IE */
+
+ /* EXTERNDED SUPPORTED RATE */
+ if (rate_len > 8)
+ pframe = rtw_set_ie23a(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pktlen);
+
+ /* todo:HT for adhoc */
+
+_ConstructBeacon:
+
+ if ((pktlen + TXDESC_SIZE) > 512) {
+ DBG_8723A("beacon frame too large\n");
+ return;
+ }
+
+ *pLength = pktlen;
+
+ /* DBG_8723A("%s bcn_sz =%d\n", __FUNCTION__, pktlen); */
+
+}
+
+static void ConstructPSPoll(struct rtw_adapter *padapter, u8 *pframe, u32 *pLength)
+{
+ struct ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ /* Frame control. */
+ fctrl = &pwlanhdr->frame_control;
+ *(fctrl) = 0;
+ SetPwrMgt(fctrl);
+ SetFrameSubType(pframe, WIFI_PSPOLL);
+
+ /* AID. */
+ SetDuration(pframe, (pmlmeinfo->aid | 0xc000));
+
+ /* BSSID. */
+ memcpy(pwlanhdr->addr1, get_my_bssid23a(&pmlmeinfo->network), ETH_ALEN);
+
+ /* TA. */
+ memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
+
+ *pLength = 16;
+}
+
+static void ConstructNullFunctionData(
+ struct rtw_adapter *padapter,
+ u8 *pframe,
+ u32 *pLength,
+ u8 *StaAddr,
+ u8 bQoS,
+ u8 AC,
+ u8 bEosp,
+ u8 bForcePowerSave)
+{
+ struct ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ u32 pktlen;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *(fctrl) = 0;
+ if (bForcePowerSave)
+ SetPwrMgt(fctrl);
+
+ switch (cur_network->network.InfrastructureMode) {
+ case Ndis802_11Infrastructure:
+ SetToDs(fctrl);
+ memcpy(pwlanhdr->addr1,
+ get_my_bssid23a(&pmlmeinfo->network), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv),
+ ETH_ALEN);
+ memcpy(pwlanhdr->addr3, StaAddr, ETH_ALEN);
+ break;
+ case Ndis802_11APMode:
+ SetFrDs(fctrl);
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2,
+ get_my_bssid23a(&pmlmeinfo->network), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&padapter->eeprompriv),
+ ETH_ALEN);
+ break;
+ case Ndis802_11IBSS:
+ default:
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3,
+ get_my_bssid23a(&pmlmeinfo->network), ETH_ALEN);
+ break;
+ }
+
+ SetSeqNum(pwlanhdr, 0);
+
+ if (bQoS == true) {
+ struct ieee80211_qos_hdr *pwlanqoshdr;
+
+ SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
+
+ pwlanqoshdr = (struct ieee80211_qos_hdr *)pframe;
+ SetPriority(&pwlanqoshdr->qos_ctrl, AC);
+ SetEOSP(&pwlanqoshdr->qos_ctrl, bEosp);
+
+ pktlen = sizeof(struct ieee80211_qos_hdr);
+ } else {
+ SetFrameSubType(pframe, WIFI_DATA_NULL);
+
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
+ }
+
+ *pLength = pktlen;
+}
+
+static void ConstructProbeRsp(struct rtw_adapter *padapter, u8 *pframe, u32 *pLength, u8 *StaAddr, bool bHideSSID)
+{
+ struct ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ u8 *mac, *bssid;
+ u32 pktlen;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
+
+ /* DBG_8723A("%s\n", __FUNCTION__); */
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ mac = myid(&padapter->eeprompriv);
+ bssid = cur_network->MacAddress;
+
+ fctrl = &pwlanhdr->frame_control;
+ *(fctrl) = 0;
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bssid, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0);
+ SetFrameSubType(fctrl, WIFI_PROBERSP);
+
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
+ pframe += pktlen;
+
+ if (cur_network->IELength > MAX_IE_SZ)
+ return;
+
+ memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ pframe += cur_network->IELength;
+ pktlen += cur_network->IELength;
+
+ *pLength = pktlen;
+}
+
+/* To check if reserved page content is destroyed by beacon beacuse beacon is too large. */
+void CheckFwRsvdPageContent23a(struct rtw_adapter *Adapter)
+{
+}
+
+/* */
+/* Description: Fill the reserved packets that FW will use to RSVD page. */
+/* Now we just send 4 types packet to rsvd page. */
+/* (1)Beacon, (2)Ps-poll, (3)Null data, (4)ProbeRsp. */
+/* Input: */
+/* bDLFinished - false: At the first time we will send all the packets as a large packet to Hw, */
+/* so we need to set the packet length to total lengh. */
+/* true: At the second time, we should send the first packet (default:beacon) */
+/* to Hw again and set the lengh in descriptor to the real beacon lengh. */
+/* 2009.10.15 by tynli. */
+static void SetFwRsvdPagePkt(struct rtw_adapter *padapter, bool bDLFinished)
+{
+ struct hal_data_8723a *pHalData;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ struct xmit_priv *pxmitpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+ u32 BeaconLength = 0, ProbeRspLength = 0, PSPollLength;
+ u32 NullDataLength, QosNullLength, BTQosNullLength;
+ u8 *ReservedPagePacket;
+ u8 PageNum, PageNeed, TxDescLen;
+ u16 BufIndex;
+ u32 TotalPacketLen;
+ struct rsvdpage_loc RsvdPageLoc;
+
+ DBG_8723A("%s\n", __FUNCTION__);
+
+ ReservedPagePacket = kzalloc(1000, GFP_KERNEL);
+ if (ReservedPagePacket == NULL) {
+ DBG_8723A("%s: alloc ReservedPagePacket fail!\n", __FUNCTION__);
+ return;
+ }
+
+ pHalData = GET_HAL_DATA(padapter);
+ pxmitpriv = &padapter->xmitpriv;
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+
+ TxDescLen = TXDESC_SIZE;
+ PageNum = 0;
+
+ /* 3 (1) beacon */
+ BufIndex = TXDESC_OFFSET;
+ ConstructBeacon(padapter, &ReservedPagePacket[BufIndex], &BeaconLength);
+
+ /* When we count the first page size, we need to reserve description size for the RSVD */
+ /* packet, it will be filled in front of the packet in TXPKTBUF. */
+ PageNeed = (u8)PageNum_128(TxDescLen + BeaconLength);
+ /* To reserved 2 pages for beacon buffer. 2010.06.24. */
+ if (PageNeed == 1)
+ PageNeed += 1;
+ PageNum += PageNeed;
+ pHalData->FwRsvdPageStartOffset = PageNum;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (2) ps-poll */
+ RsvdPageLoc.LocPsPoll = PageNum;
+ ConstructPSPoll(padapter, &ReservedPagePacket[BufIndex], &PSPollLength);
+ rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], PSPollLength, true, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + PSPollLength);
+ PageNum += PageNeed;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (3) null data */
+ RsvdPageLoc.LocNullData = PageNum;
+ ConstructNullFunctionData(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &NullDataLength,
+ get_my_bssid23a(&pmlmeinfo->network),
+ false, 0, 0, false);
+ rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], NullDataLength, false, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + NullDataLength);
+ PageNum += PageNeed;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (4) probe response */
+ RsvdPageLoc.LocProbeRsp = PageNum;
+ ConstructProbeRsp(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &ProbeRspLength,
+ get_my_bssid23a(&pmlmeinfo->network),
+ false);
+ rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], ProbeRspLength, false, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + ProbeRspLength);
+ PageNum += PageNeed;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (5) Qos null data */
+ RsvdPageLoc.LocQosNull = PageNum;
+ ConstructNullFunctionData(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &QosNullLength,
+ get_my_bssid23a(&pmlmeinfo->network),
+ true, 0, 0, false);
+ rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], QosNullLength, false, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + QosNullLength);
+ PageNum += PageNeed;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (6) BT Qos null data */
+ RsvdPageLoc.LocBTQosNull = PageNum;
+ ConstructNullFunctionData(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &BTQosNullLength,
+ get_my_bssid23a(&pmlmeinfo->network),
+ true, 0, 0, false);
+ rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], BTQosNullLength, false, true);
+
+ TotalPacketLen = BufIndex + BTQosNullLength;
+
+ pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+ pattrib->qsel = 0x10;
+ pattrib->pktlen = pattrib->last_txcmdsz = TotalPacketLen - TXDESC_OFFSET;
+ memcpy(pmgntframe->buf_addr, ReservedPagePacket, TotalPacketLen);
+
+ rtw_hal_mgnt_xmit23a(padapter, pmgntframe);
+
+ DBG_8723A("%s: Set RSVD page location to Fw\n", __FUNCTION__);
+ FillH2CCmd(padapter, RSVD_PAGE_EID, sizeof(RsvdPageLoc), (u8 *)&RsvdPageLoc);
+
+exit:
+ kfree(ReservedPagePacket);
+}
+
+void rtl8723a_set_FwJoinBssReport_cmd(struct rtw_adapter *padapter, u8 mstatus)
+{
+ struct joinbssrpt_parm JoinBssRptParm;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ DBG_8723A("%s mstatus(%x)\n", __FUNCTION__, mstatus);
+
+ if (mstatus == 1) {
+ bool bRecover = false;
+ u8 v8;
+
+ /* We should set AID, correct TSF, HW seq enable before set JoinBssReport to Fw in 88/92C. */
+ /* Suggested by filen. Added by tynli. */
+ rtw_write16(padapter, REG_BCN_PSR_RPT, (0xC000|pmlmeinfo->aid));
+ /* Do not set TSF again here or vWiFi beacon DMA INT will not work. */
+ /* correct_TSF23a(padapter, pmlmeext); */
+ /* Hw sequende enable by dedault. 2010.06.23. by tynli. */
+ /* rtw_write16(padapter, REG_NQOS_SEQ, ((pmlmeext->mgnt_seq+100)&0xFFF)); */
+ /* rtw_write8(padapter, REG_HWSEQ_CTRL, 0xFF); */
+
+ /* set REG_CR bit 8 */
+ v8 = rtw_read8(padapter, REG_CR+1);
+ v8 |= BIT(0); /* ENSWBCN */
+ rtw_write8(padapter, REG_CR+1, v8);
+
+ /* Disable Hw protection for a time which revserd for Hw sending beacon. */
+ /* Fix download reserved page packet fail that access collision with the protection time. */
+ /* 2010.05.11. Added by tynli. */
+/* SetBcnCtrlReg23a(padapter, 0, BIT(3)); */
+/* SetBcnCtrlReg23a(padapter, BIT(4), 0); */
+ SetBcnCtrlReg23a(padapter, BIT(4), BIT(3));
+
+ /* Set FWHW_TXQ_CTRL 0x422[6]= 0 to tell Hw the packet is not a real beacon frame. */
+ if (pHalData->RegFwHwTxQCtrl & BIT(6))
+ bRecover = true;
+
+ /* To tell Hw the packet is not a real beacon frame. */
+ /* U1bTmp = rtw_read8(padapter, REG_FWHW_TXQ_CTRL+2); */
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL+2, pHalData->RegFwHwTxQCtrl & ~BIT(6));
+ pHalData->RegFwHwTxQCtrl &= ~BIT(6);
+ SetFwRsvdPagePkt(padapter, 0);
+
+ /* 2010.05.11. Added by tynli. */
+ SetBcnCtrlReg23a(padapter, BIT(3), BIT(4));
+
+ /* To make sure that if there exists an adapter which would like to send beacon. */
+ /* If exists, the origianl value of 0x422[6] will be 1, we should check this to */
+ /* prevent from setting 0x422[6] to 0 after download reserved page, or it will cause */
+ /* the beacon cannot be sent by HW. */
+ /* 2010.06.23. Added by tynli. */
+ if (bRecover) {
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL+2, pHalData->RegFwHwTxQCtrl | BIT(6));
+ pHalData->RegFwHwTxQCtrl |= BIT(6);
+ }
+
+ /* Clear CR[8] or beacon packet will not be send to TxBuf anymore. */
+ v8 = rtw_read8(padapter, REG_CR+1);
+ v8 &= ~BIT(0); /* ~ENSWBCN */
+ rtw_write8(padapter, REG_CR+1, v8);
+ }
+
+ JoinBssRptParm.OpMode = mstatus;
+
+ FillH2CCmd(padapter, JOINBSS_RPT_EID, sizeof(JoinBssRptParm), (u8 *)&JoinBssRptParm);
+
+}
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+static void SetFwRsvdPagePkt_BTCoex(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ struct xmit_priv *pxmitpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+ u8 fakemac[6] = {0x00, 0xe0, 0x4c, 0x00, 0x00, 0x00};
+ u32 NullDataLength, BTQosNullLength;
+ u8 *ReservedPagePacket;
+ u8 PageNum, PageNeed, TxDescLen;
+ u16 BufIndex;
+ u32 TotalPacketLen;
+ struct rsvdpage_loc RsvdPageLoc;
+
+ DBG_8723A("+%s\n", __FUNCTION__);
+
+ ReservedPagePacket = kzalloc(1024, GFP_KERNEL);
+ if (ReservedPagePacket == NULL) {
+ DBG_8723A("%s: alloc ReservedPagePacket fail!\n", __FUNCTION__);
+ return;
+ }
+
+ pHalData = GET_HAL_DATA(padapter);
+ pxmitpriv = &padapter->xmitpriv;
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+
+ TxDescLen = TXDESC_SIZE;
+ PageNum = 0;
+
+ /* 3 (1) beacon */
+ BufIndex = TXDESC_OFFSET;
+ /* skip Beacon Packet */
+ PageNeed = 3;
+
+ PageNum += PageNeed;
+ pHalData->FwRsvdPageStartOffset = PageNum;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (3) null data */
+ RsvdPageLoc.LocNullData = PageNum;
+ ConstructNullFunctionData(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &NullDataLength,
+ fakemac,
+ false, 0, 0, false);
+ rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], NullDataLength, false, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + NullDataLength);
+ PageNum += PageNeed;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (6) BT Qos null data */
+ RsvdPageLoc.LocBTQosNull = PageNum;
+ ConstructNullFunctionData(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &BTQosNullLength,
+ fakemac,
+ true, 0, 0, false);
+ rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], BTQosNullLength, false, true);
+
+ TotalPacketLen = BufIndex + BTQosNullLength;
+
+ pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+ pattrib->qsel = 0x10;
+ pattrib->pktlen = pattrib->last_txcmdsz = TotalPacketLen - TXDESC_OFFSET;
+ memcpy(pmgntframe->buf_addr, ReservedPagePacket, TotalPacketLen);
+
+ rtw_hal_mgnt_xmit23a(padapter, pmgntframe);
+
+ DBG_8723A("%s: Set RSVD page location to Fw\n", __FUNCTION__);
+ FillH2CCmd(padapter, RSVD_PAGE_EID, sizeof(RsvdPageLoc), (u8 *)&RsvdPageLoc);
+
+exit:
+ kfree(ReservedPagePacket);
+}
+
+void rtl8723a_set_BTCoex_AP_mode_FwRsvdPkt_cmd(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ u8 bRecover = false;
+
+ DBG_8723A("+%s\n", __FUNCTION__);
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ /* Set FWHW_TXQ_CTRL 0x422[6]= 0 to tell Hw the packet is not a real beacon frame. */
+ if (pHalData->RegFwHwTxQCtrl & BIT(6))
+ bRecover = true;
+
+ /* To tell Hw the packet is not a real beacon frame. */
+ pHalData->RegFwHwTxQCtrl &= ~BIT(6);
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL+2, pHalData->RegFwHwTxQCtrl);
+ SetFwRsvdPagePkt_BTCoex(padapter);
+
+ /* To make sure that if there exists an adapter which would like to send beacon. */
+ /* If exists, the origianl value of 0x422[6] will be 1, we should check this to */
+ /* prevent from setting 0x422[6] to 0 after download reserved page, or it will cause */
+ /* the beacon cannot be sent by HW. */
+ /* 2010.06.23. Added by tynli. */
+ if (bRecover) {
+ pHalData->RegFwHwTxQCtrl |= BIT(6);
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL+2, pHalData->RegFwHwTxQCtrl);
+ }
+}
+#endif
+
+#ifdef CONFIG_8723AU_P2P
+void rtl8723a_set_p2p_ps_offload_cmd(struct rtw_adapter *padapter, u8 p2p_ps_state)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ struct P2P_PS_Offload_t *p2p_ps_offload = &pHalData->p2p_ps_offload;
+ u8 i;
+
+ switch (p2p_ps_state) {
+ case P2P_PS_DISABLE:
+ DBG_8723A("P2P_PS_DISABLE \n");
+ memset(p2p_ps_offload, 0, 1);
+ break;
+ case P2P_PS_ENABLE:
+ DBG_8723A("P2P_PS_ENABLE \n");
+ /* update CTWindow value. */
+ if (pwdinfo->ctwindow > 0) {
+ p2p_ps_offload->CTWindow_En = 1;
+ rtw_write8(padapter, REG_P2P_CTWIN, pwdinfo->ctwindow);
+ }
+
+ /* hw only support 2 set of NoA */
+ for (i = 0; i < pwdinfo->noa_num; i++) {
+ /* To control the register setting for which NOA */
+ rtw_write8(padapter, REG_NOA_DESC_SEL, (i << 4));
+ if (i == 0)
+ p2p_ps_offload->NoA0_En = 1;
+ else
+ p2p_ps_offload->NoA1_En = 1;
+
+ /* config P2P NoA Descriptor Register */
+ rtw_write32(padapter, REG_NOA_DESC_DURATION, pwdinfo->noa_duration[i]);
+
+ rtw_write32(padapter, REG_NOA_DESC_INTERVAL, pwdinfo->noa_interval[i]);
+
+ rtw_write32(padapter, REG_NOA_DESC_START, pwdinfo->noa_start_time[i]);
+
+ rtw_write8(padapter, REG_NOA_DESC_COUNT, pwdinfo->noa_count[i]);
+ }
+
+ if ((pwdinfo->opp_ps == 1) || (pwdinfo->noa_num > 0)) {
+ /* rst p2p circuit */
+ rtw_write8(padapter, REG_DUAL_TSF_RST, BIT(4));
+
+ p2p_ps_offload->Offload_En = 1;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ p2p_ps_offload->role = 1;
+ p2p_ps_offload->AllStaSleep = 0;
+ } else {
+ p2p_ps_offload->role = 0;
+ }
+
+ p2p_ps_offload->discovery = 0;
+ }
+ break;
+ case P2P_PS_SCAN:
+ DBG_8723A("P2P_PS_SCAN \n");
+ p2p_ps_offload->discovery = 1;
+ break;
+ case P2P_PS_SCAN_DONE:
+ DBG_8723A("P2P_PS_SCAN_DONE \n");
+ p2p_ps_offload->discovery = 0;
+ pwdinfo->p2p_ps_state = P2P_PS_ENABLE;
+ break;
+ default:
+ break;
+ }
+
+ FillH2CCmd(padapter, P2P_PS_OFFLOAD_EID, 1, (u8 *)p2p_ps_offload);
+}
+#endif /* CONFIG_8723AU_P2P */
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_dm.c b/drivers/staging/rtl8723au/hal/rtl8723a_dm.c
new file mode 100644
index 000000000000..f204ab1714e7
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_dm.c
@@ -0,0 +1,273 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/* */
+/* Description: */
+/* */
+/* This file is for 92CE/92CU dynamic mechanism only */
+/* */
+/* */
+/* */
+#define _RTL8723A_DM_C_
+
+/* */
+/* include files */
+/* */
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <rtl8723a_hal.h>
+
+/* */
+/* Global var */
+/* */
+
+static void dm_CheckStatistics(struct rtw_adapter *Adapter)
+{
+}
+
+static void dm_CheckPbcGPIO(struct rtw_adapter *padapter)
+{
+ u8 tmp1byte;
+ u8 bPbcPressed = false;
+
+ if (!padapter->registrypriv.hw_wps_pbc)
+ return;
+
+ tmp1byte = rtw_read8(padapter, GPIO_IO_SEL);
+ tmp1byte |= (HAL_8192C_HW_GPIO_WPS_BIT);
+ rtw_write8(padapter, GPIO_IO_SEL, tmp1byte); /* enable GPIO[2] as output mode */
+
+ tmp1byte &= ~(HAL_8192C_HW_GPIO_WPS_BIT);
+ rtw_write8(padapter, GPIO_IN, tmp1byte); /* reset the floating voltage level */
+
+ tmp1byte = rtw_read8(padapter, GPIO_IO_SEL);
+ tmp1byte &= ~(HAL_8192C_HW_GPIO_WPS_BIT);
+ rtw_write8(padapter, GPIO_IO_SEL, tmp1byte); /* enable GPIO[2] as input mode */
+
+ tmp1byte = rtw_read8(padapter, GPIO_IN);
+
+ if (tmp1byte == 0xff)
+ return;
+
+ if (tmp1byte&HAL_8192C_HW_GPIO_WPS_BIT)
+ bPbcPressed = true;
+
+ if (bPbcPressed) {
+ /* Here we only set bPbcPressed to true */
+ /* After trigger PBC, the variable will be set to false */
+ DBG_8723A("CheckPbcGPIO - PBC is pressed\n");
+
+ if (padapter->pid[0] == 0) {
+ /* 0 is the default value and it means the application
+ * monitors the HW PBC doesn't privde its pid to driver.
+ */
+ return;
+ }
+
+ rtw_signal_process(padapter->pid[0], SIGUSR1);
+ }
+}
+
+/* Initialize GPIO setting registers */
+/* functions */
+static void Init_ODM_ComInfo_8723a(struct rtw_adapter *Adapter)
+{
+
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
+ u8 cut_ver, fab_ver;
+
+ /* */
+ /* Init Value */
+ /* */
+ memset(pDM_Odm, 0, sizeof(*pDM_Odm));
+
+ pDM_Odm->Adapter = Adapter;
+ ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_PLATFORM, 0x04);
+ ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_INTERFACE, RTW_USB);/* RTL871X_HCI_TYPE */
+
+ ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_IC_TYPE, ODM_RTL8723A);
+
+ if (IS_8723A_A_CUT(pHalData->VersionID)) {
+ fab_ver = ODM_UMC;
+ cut_ver = ODM_CUT_A;
+ } else if (IS_8723A_B_CUT(pHalData->VersionID)) {
+ fab_ver = ODM_UMC;
+ cut_ver = ODM_CUT_B;
+ } else {
+ fab_ver = ODM_TSMC;
+ cut_ver = ODM_CUT_A;
+ }
+ ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_FAB_VER, fab_ver);
+ ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_CUT_VER, cut_ver);
+ ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_MP_TEST_CHIP, IS_NORMAL_CHIP(pHalData->VersionID));
+
+ ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_BOARD_TYPE, pHalData->BoardType);
+
+ if (pHalData->BoardType == BOARD_USB_High_PA) {
+ ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_EXT_LNA, true);
+ ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_EXT_PA, true);
+ }
+ ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_PATCH_ID, pHalData->CustomerID);
+ ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_BWIFI_TEST, Adapter->registrypriv.wifi_spec);
+
+ if (pHalData->rf_type == RF_1T1R)
+ ODM_CmnInfoUpdate23a(pDM_Odm, ODM_CMNINFO_RF_TYPE, ODM_1T1R);
+ else if (pHalData->rf_type == RF_2T2R)
+ ODM_CmnInfoUpdate23a(pDM_Odm, ODM_CMNINFO_RF_TYPE, ODM_2T2R);
+ else if (pHalData->rf_type == RF_1T2R)
+ ODM_CmnInfoUpdate23a(pDM_Odm, ODM_CMNINFO_RF_TYPE, ODM_1T2R);
+}
+
+static void Update_ODM_ComInfo_8723a(struct rtw_adapter *Adapter)
+{
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ int i;
+ pdmpriv->InitODMFlag = ODM_BB_DIG |
+ ODM_BB_RA_MASK |
+ ODM_BB_DYNAMIC_TXPWR |
+ ODM_BB_FA_CNT |
+ ODM_BB_RSSI_MONITOR |
+ ODM_BB_CCK_PD |
+ ODM_BB_PWR_SAVE |
+ ODM_MAC_EDCA_TURBO |
+ ODM_RF_TX_PWR_TRACK |
+ ODM_RF_CALIBRATION;
+ /* Pointer reference */
+
+ ODM_CmnInfoUpdate23a(pDM_Odm, ODM_CMNINFO_ABILITY, pdmpriv->InitODMFlag);
+
+ ODM23a_CmnInfoHook(pDM_Odm, ODM_CMNINFO_TX_UNI,
+ &Adapter->xmitpriv.tx_bytes);
+ ODM23a_CmnInfoHook(pDM_Odm, ODM_CMNINFO_RX_UNI,
+ &Adapter->recvpriv.rx_bytes);
+ ODM23a_CmnInfoHook(pDM_Odm, ODM_CMNINFO_WM_MODE,
+ &pmlmeext->cur_wireless_mode);
+ ODM23a_CmnInfoHook(pDM_Odm, ODM_CMNINFO_SEC_CHNL_OFFSET,
+ &pHalData->nCur40MhzPrimeSC);
+ ODM23a_CmnInfoHook(pDM_Odm, ODM_CMNINFO_SEC_MODE,
+ &Adapter->securitypriv.dot11PrivacyAlgrthm);
+ ODM23a_CmnInfoHook(pDM_Odm, ODM_CMNINFO_BW,
+ &pHalData->CurrentChannelBW);
+ ODM23a_CmnInfoHook(pDM_Odm, ODM_CMNINFO_CHNL,
+ &pHalData->CurrentChannel);
+ ODM23a_CmnInfoHook(pDM_Odm, ODM_CMNINFO_NET_CLOSED, &Adapter->net_closed);
+
+ ODM23a_CmnInfoHook(pDM_Odm, ODM_CMNINFO_SCAN, &pmlmepriv->bScanInProcess);
+ ODM23a_CmnInfoHook(pDM_Odm, ODM_CMNINFO_POWER_SAVING,
+ &pwrctrlpriv->bpower_saving);
+
+ for (i = 0; i < NUM_STA; i++)
+ ODM_CmnInfoPtrArrayHook23a(pDM_Odm, ODM_CMNINFO_STA_STATUS, i, NULL);
+}
+
+void rtl8723a_InitHalDm(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
+ u8 i;
+
+ pdmpriv->DM_Type = DM_Type_ByDriver;
+ pdmpriv->DMFlag = DYNAMIC_FUNC_DISABLE;
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ pdmpriv->DMFlag |= DYNAMIC_FUNC_BT;
+#endif
+ pdmpriv->InitDMFlag = pdmpriv->DMFlag;
+
+ Update_ODM_ComInfo_8723a(Adapter);
+ ODM23a_DMInit(pDM_Odm);
+ /* Save REG_INIDATA_RATE_SEL value for TXDESC. */
+ for (i = 0; i < 32; i++)
+ pdmpriv->INIDATA_RATE[i] = rtw_read8(Adapter, REG_INIDATA_RATE_SEL+i) & 0x3f;
+}
+
+void
+rtl8723a_HalDmWatchDog(
+ struct rtw_adapter *Adapter
+ )
+{
+ bool bFwCurrentInPSMode = false;
+ bool bFwPSAwake = true;
+ u8 hw_init_completed = false;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+
+ hw_init_completed = Adapter->hw_init_completed;
+
+ if (hw_init_completed == false)
+ goto skip_dm;
+
+ bFwCurrentInPSMode = Adapter->pwrctrlpriv.bFwCurrentInPSMode;
+ rtw23a_hal_get_hwreg(Adapter, HW_VAR_FWLPS_RF_ON, (u8 *)(&bFwPSAwake));
+
+#ifdef CONFIG_8723AU_P2P
+ /* Fw is under p2p powersaving mode, driver should stop dynamic mechanism. */
+ /* modifed by thomas. 2011.06.11. */
+ if (Adapter->wdinfo.p2p_ps_mode)
+ bFwPSAwake = false;
+#endif /* CONFIG_8723AU_P2P */
+
+ if ((hw_init_completed) && ((!bFwCurrentInPSMode) && bFwPSAwake)) {
+ /* Calculate Tx/Rx statistics. */
+ dm_CheckStatistics(Adapter);
+
+ /* Read REG_INIDATA_RATE_SEL value for TXDESC. */
+ if (check_fwstate(&Adapter->mlmepriv, WIFI_STATION_STATE)) {
+ pdmpriv->INIDATA_RATE[0] = rtw_read8(Adapter, REG_INIDATA_RATE_SEL) & 0x3f;
+ } else {
+ u8 i;
+ for (i = 1 ; i < (Adapter->stapriv.asoc_sta_count + 1); i++)
+ pdmpriv->INIDATA_RATE[i] = rtw_read8(Adapter, (REG_INIDATA_RATE_SEL+i)) & 0x3f;
+ }
+ }
+
+ /* ODM */
+ if (hw_init_completed == true) {
+ u8 bLinked = false;
+
+ if (rtw_linked_check(Adapter))
+ bLinked = true;
+
+ ODM_CmnInfoUpdate23a(&pHalData->odmpriv, ODM_CMNINFO_LINK,
+ bLinked);
+ ODM_DMWatchdog23a(&pHalData->odmpriv);
+ }
+
+skip_dm:
+
+ /* Check GPIO to determine current RF on/off and Pbc status. */
+ /* Check Hardware Radio ON/OFF or not */
+ dm_CheckPbcGPIO(Adapter);
+}
+
+void rtl8723a_init_dm_priv(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+
+ memset(pdmpriv, 0, sizeof(struct dm_priv));
+ Init_ODM_ComInfo_8723a(Adapter);
+}
+
+void rtl8723a_deinit_dm_priv(struct rtw_adapter *Adapter)
+{
+}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
new file mode 100644
index 000000000000..0982b0a4ab9b
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -0,0 +1,3452 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _HAL_INIT_C_
+
+#include <linux/firmware.h>
+#include <drv_types.h>
+#include <rtw_efuse.h>
+
+#include <rtl8723a_hal.h>
+
+static void _FWDownloadEnable(struct rtw_adapter *padapter, bool enable)
+{
+ u8 tmp;
+
+ if (enable) {
+ /* 8051 enable */
+ tmp = rtw_read8(padapter, REG_SYS_FUNC_EN + 1);
+ rtw_write8(padapter, REG_SYS_FUNC_EN + 1, tmp | 0x04);
+
+ /* MCU firmware download enable. */
+ tmp = rtw_read8(padapter, REG_MCUFWDL);
+ rtw_write8(padapter, REG_MCUFWDL, tmp | 0x01);
+
+ /* 8051 reset */
+ tmp = rtw_read8(padapter, REG_MCUFWDL + 2);
+ rtw_write8(padapter, REG_MCUFWDL + 2, tmp & 0xf7);
+ } else {
+ /* MCU firmware download disable. */
+ tmp = rtw_read8(padapter, REG_MCUFWDL);
+ rtw_write8(padapter, REG_MCUFWDL, tmp & 0xfe);
+
+ /* Reserved for fw extension. */
+ rtw_write8(padapter, REG_MCUFWDL + 1, 0x00);
+ }
+}
+
+static int _BlockWrite(struct rtw_adapter *padapter, void *buffer, u32 buffSize)
+{
+ int ret = _SUCCESS;
+ /* (Default) Phase #1 : PCI muse use 4-byte write to download FW */
+ u32 blockSize_p1 = 4;
+ /* Phase #2 : Use 8-byte, if Phase#1 use big size to write FW. */
+ u32 blockSize_p2 = 8;
+ /* Phase #3 : Use 1-byte, the remnant of FW image. */
+ u32 blockSize_p3 = 1;
+ u32 blockCount_p1 = 0, blockCount_p2 = 0, blockCount_p3 = 0;
+ u32 remainSize_p1 = 0, remainSize_p2 = 0;
+ u8 *bufferPtr = (u8 *) buffer;
+ u32 i = 0, offset = 0;
+
+ blockSize_p1 = 254;
+
+ /* 3 Phase #1 */
+ blockCount_p1 = buffSize / blockSize_p1;
+ remainSize_p1 = buffSize % blockSize_p1;
+
+ if (blockCount_p1) {
+ RT_TRACE(_module_hal_init_c_, _drv_notice_,
+ ("_BlockWrite: [P1] buffSize(%d) blockSize_p1(%d) "
+ "blockCount_p1(%d) remainSize_p1(%d)\n",
+ buffSize, blockSize_p1, blockCount_p1,
+ remainSize_p1));
+ }
+
+ for (i = 0; i < blockCount_p1; i++) {
+ ret = rtw_writeN(padapter,
+ (FW_8723A_START_ADDRESS + i * blockSize_p1),
+ blockSize_p1, (bufferPtr + i * blockSize_p1));
+ if (ret == _FAIL)
+ goto exit;
+ }
+
+ /* 3 Phase #2 */
+ if (remainSize_p1) {
+ offset = blockCount_p1 * blockSize_p1;
+
+ blockCount_p2 = remainSize_p1 / blockSize_p2;
+ remainSize_p2 = remainSize_p1 % blockSize_p2;
+
+ if (blockCount_p2) {
+ RT_TRACE(_module_hal_init_c_, _drv_notice_,
+ ("_BlockWrite: [P2] buffSize_p2(%d) "
+ "blockSize_p2(%d) blockCount_p2(%d) "
+ "remainSize_p2(%d)\n",
+ (buffSize - offset), blockSize_p2,
+ blockCount_p2, remainSize_p2));
+ }
+
+ for (i = 0; i < blockCount_p2; i++) {
+ ret = rtw_writeN(padapter,
+ (FW_8723A_START_ADDRESS + offset +
+ i * blockSize_p2), blockSize_p2,
+ (bufferPtr + offset +
+ i * blockSize_p2));
+
+ if (ret == _FAIL)
+ goto exit;
+ }
+ }
+
+ /* 3 Phase #3 */
+ if (remainSize_p2) {
+ offset = (blockCount_p1 * blockSize_p1) +
+ (blockCount_p2 * blockSize_p2);
+
+ blockCount_p3 = remainSize_p2 / blockSize_p3;
+
+ RT_TRACE(_module_hal_init_c_, _drv_notice_,
+ ("_BlockWrite: [P3] buffSize_p3(%d) blockSize_p3(%d) "
+ "blockCount_p3(%d)\n",
+ (buffSize - offset), blockSize_p3, blockCount_p3));
+
+ for (i = 0; i < blockCount_p3; i++) {
+ ret = rtw_write8(padapter,
+ (FW_8723A_START_ADDRESS + offset + i),
+ *(bufferPtr + offset + i));
+
+ if (ret == _FAIL)
+ goto exit;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+static int
+_PageWrite(struct rtw_adapter *padapter, u32 page, void *buffer, u32 size)
+{
+ u8 value8;
+ u8 u8Page = (u8) (page & 0x07);
+
+ value8 = (rtw_read8(padapter, REG_MCUFWDL + 2) & 0xF8) | u8Page;
+ rtw_write8(padapter, REG_MCUFWDL + 2, value8);
+
+ return _BlockWrite(padapter, buffer, size);
+}
+
+static int _WriteFW(struct rtw_adapter *padapter, void *buffer, u32 size)
+{
+ /* Since we need dynamic decide method of dwonload fw, so we
+ call this function to get chip version. */
+ /* We can remove _ReadChipVersion from ReadpadapterInfo8192C later. */
+ int ret = _SUCCESS;
+ u32 pageNums, remainSize;
+ u32 page, offset;
+ u8 *bufferPtr = (u8 *) buffer;
+
+ pageNums = size / MAX_PAGE_SIZE;
+ /* RT_ASSERT((pageNums <= 4),
+ ("Page numbers should not greater then 4 \n")); */
+ remainSize = size % MAX_PAGE_SIZE;
+
+ for (page = 0; page < pageNums; page++) {
+ offset = page * MAX_PAGE_SIZE;
+ ret = _PageWrite(padapter, page, bufferPtr + offset,
+ MAX_PAGE_SIZE);
+
+ if (ret == _FAIL)
+ goto exit;
+ }
+ if (remainSize) {
+ offset = pageNums * MAX_PAGE_SIZE;
+ page = pageNums;
+ ret = _PageWrite(padapter, page, bufferPtr + offset,
+ remainSize);
+
+ if (ret == _FAIL)
+ goto exit;
+ }
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("_WriteFW Done- for Normal chip.\n"));
+
+exit:
+ return ret;
+}
+
+static s32 _FWFreeToGo(struct rtw_adapter *padapter)
+{
+ u32 counter = 0;
+ u32 value32;
+
+ /* polling CheckSum report */
+ do {
+ value32 = rtw_read32(padapter, REG_MCUFWDL);
+ if (value32 & FWDL_ChkSum_rpt)
+ break;
+ } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
+
+ if (counter >= POLLING_READY_TIMEOUT_COUNT) {
+ RT_TRACE(_module_hal_init_c_, _drv_err_,
+ ("%s: chksum report fail! REG_MCUFWDL:0x%08x\n",
+ __func__, value32));
+ return _FAIL;
+ }
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("%s: Checksum report OK! REG_MCUFWDL:0x%08x\n", __func__,
+ value32));
+
+ value32 = rtw_read32(padapter, REG_MCUFWDL);
+ value32 |= MCUFWDL_RDY;
+ value32 &= ~WINTINI_RDY;
+ rtw_write32(padapter, REG_MCUFWDL, value32);
+
+ /* polling for FW ready */
+ counter = 0;
+ do {
+ value32 = rtw_read32(padapter, REG_MCUFWDL);
+ if (value32 & WINTINI_RDY) {
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("%s: Polling FW ready success!! "
+ "REG_MCUFWDL:0x%08x\n",
+ __func__, value32));
+ return _SUCCESS;
+ }
+ udelay(5);
+ } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
+
+ RT_TRACE(_module_hal_init_c_, _drv_err_,
+ ("%s: Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
+ __func__, value32));
+ return _FAIL;
+}
+
+#define IS_FW_81xxC(padapter) (((GET_HAL_DATA(padapter))->FirmwareSignature & 0xFFF0) == 0x88C0)
+
+void rtl8723a_FirmwareSelfReset(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 u1bTmp;
+ u8 Delay = 100;
+
+ if (!(IS_FW_81xxC(padapter) &&
+ ((pHalData->FirmwareVersion < 0x21) ||
+ (pHalData->FirmwareVersion == 0x21 &&
+ pHalData->FirmwareSubVersion < 0x01)))) {
+ /* after 88C Fw v33.1 */
+ /* 0x1cf = 0x20. Inform 8051 to reset. 2009.12.25. tynli_test */
+ rtw_write8(padapter, REG_HMETFR + 3, 0x20);
+
+ u1bTmp = rtw_read8(padapter, REG_SYS_FUNC_EN + 1);
+ while (u1bTmp & BIT2) {
+ Delay--;
+ if (Delay == 0)
+ break;
+ udelay(50);
+ u1bTmp = rtw_read8(padapter, REG_SYS_FUNC_EN + 1);
+ }
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("-%s: 8051 reset success (%d)\n", __func__,
+ Delay));
+
+ if ((Delay == 0)) {
+ /* force firmware reset */
+ u1bTmp = rtw_read8(padapter, REG_SYS_FUNC_EN + 1);
+ rtw_write8(padapter, REG_SYS_FUNC_EN + 1,
+ u1bTmp & (~BIT2));
+ }
+ }
+}
+
+/* */
+/* Description: */
+/* Download 8192C firmware code. */
+/* */
+/* */
+s32 rtl8723a_FirmwareDownload(struct rtw_adapter *padapter)
+{
+ s32 rtStatus = _SUCCESS;
+ u8 writeFW_retry = 0;
+ unsigned long fwdl_start_time;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
+ struct device *device = dvobj_to_dev(dvobj);
+ struct rt_8723a_firmware_hdr *pFwHdr = NULL;
+ const struct firmware *fw;
+ char *fw_name;
+ u8 *firmware_buf = NULL;
+ u8 *buf;
+ int fw_size;
+ static int log_version;
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
+
+ if (IS_8723A_A_CUT(pHalData->VersionID)) {
+ fw_name = "rtlwifi/rtl8723aufw.bin";
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("rtl8723a_FirmwareDownload: R8723FwImageArray_UMC "
+ "for RTL8723A A CUT\n"));
+ } else if (IS_8723A_B_CUT(pHalData->VersionID)) {
+ /* WLAN Fw. */
+ if (padapter->registrypriv.wifi_spec == 1) {
+ fw_name = "rtlwifi/rtl8723aufw_B_NoBT.bin";
+ DBG_8723A(" Rtl8723_FwUMCBCutImageArrayWithoutBT for "
+ "RTL8723A B CUT\n");
+ } else {
+#ifdef CONFIG_8723AU_BT_COEXIST
+ fw_name = "rtlwifi/rtl8723aufw_B.bin";
+ DBG_8723A(" Rtl8723_FwUMCBCutImageArrayWithBT for "
+ "RTL8723A B CUT\n");
+#else
+ fw_name = "rtlwifi/rtl8723aufw_B_NoBT.bin";
+ DBG_8723A(" Rtl8723_FwUMCBCutImageArrayWithoutBT for "
+ "RTL8723A B CUT\n");
+#endif
+ }
+ } else {
+ /* <Roger_TODO> We should download proper RAM Code here
+ to match the ROM code. */
+ RT_TRACE(_module_hal_init_c_, _drv_err_,
+ ("%s: unknow version!\n", __func__));
+ rtStatus = _FAIL;
+ goto Exit;
+ }
+
+ pr_info("rtl8723au: Loading firmware %s\n", fw_name);
+ if (request_firmware(&fw, fw_name, device)) {
+ pr_err("rtl8723au: request_firmware load failed\n");
+ rtStatus = _FAIL;
+ goto Exit;
+ }
+ if (!fw) {
+ pr_err("rtl8723au: Firmware %s not available\n", fw_name);
+ rtStatus = _FAIL;
+ goto Exit;
+ }
+ firmware_buf = kzalloc(fw->size, GFP_KERNEL);
+ if (!firmware_buf) {
+ rtStatus = _FAIL;
+ goto Exit;
+ }
+ memcpy(firmware_buf, fw->data, fw->size);
+ buf = firmware_buf;
+ fw_size = fw->size;
+ release_firmware(fw);
+
+ /* To Check Fw header. Added by tynli. 2009.12.04. */
+ pFwHdr = (struct rt_8723a_firmware_hdr *)firmware_buf;
+
+ pHalData->FirmwareVersion = le16_to_cpu(pFwHdr->Version);
+ pHalData->FirmwareSubVersion = pFwHdr->Subversion;
+ pHalData->FirmwareSignature = le16_to_cpu(pFwHdr->Signature);
+
+ DBG_8723A("%s: fw_ver =%d fw_subver =%d sig = 0x%x\n",
+ __func__, pHalData->FirmwareVersion,
+ pHalData->FirmwareSubVersion, pHalData->FirmwareSignature);
+
+ if (!log_version++)
+ pr_info("%sFirmware Version %d, SubVersion %d, Signature "
+ "0x%x\n", DRIVER_PREFIX, pHalData->FirmwareVersion,
+ pHalData->FirmwareSubVersion,
+ pHalData->FirmwareSignature);
+
+ if (IS_FW_HEADER_EXIST(pFwHdr)) {
+ /* Shift 32 bytes for FW header */
+ buf = buf + 32;
+ fw_size = fw_size - 32;
+ }
+
+ /* Suggested by Filen. If 8051 is running in RAM code, driver should
+ inform Fw to reset by itself, */
+ /* or it will cause download Fw fail. 2010.02.01. by tynli. */
+ if (rtw_read8(padapter, REG_MCUFWDL) & RAM_DL_SEL) {
+ /* 8051 RAM code */
+ rtl8723a_FirmwareSelfReset(padapter);
+ rtw_write8(padapter, REG_MCUFWDL, 0x00);
+ }
+
+ _FWDownloadEnable(padapter, true);
+ fwdl_start_time = jiffies;
+ while (1) {
+ /* reset the FWDL chksum */
+ rtw_write8(padapter, REG_MCUFWDL,
+ rtw_read8(padapter, REG_MCUFWDL) | FWDL_ChkSum_rpt);
+
+ rtStatus = _WriteFW(padapter, buf, fw_size);
+
+ if (rtStatus == _SUCCESS ||
+ (jiffies_to_msecs(jiffies - fwdl_start_time) > 500 &&
+ writeFW_retry++ >= 3))
+ break;
+
+ DBG_8723A("%s writeFW_retry:%u, time after fwdl_start_time:"
+ "%ums\n", __func__, writeFW_retry,
+ jiffies_to_msecs(jiffies - fwdl_start_time));
+ }
+ _FWDownloadEnable(padapter, false);
+ if (_SUCCESS != rtStatus) {
+ DBG_8723A("DL Firmware failed!\n");
+ goto Exit;
+ }
+
+ rtStatus = _FWFreeToGo(padapter);
+ if (_SUCCESS != rtStatus) {
+ RT_TRACE(_module_hal_init_c_, _drv_err_,
+ ("DL Firmware failed!\n"));
+ goto Exit;
+ }
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("Firmware is ready to run!\n"));
+
+Exit:
+ kfree(firmware_buf);
+ return rtStatus;
+}
+
+void rtl8723a_InitializeFirmwareVars(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ /* Init Fw LPS related. */
+ padapter->pwrctrlpriv.bFwCurrentInPSMode = false;
+
+ /* Init H2C counter. by tynli. 2009.12.09. */
+ pHalData->LastHMEBoxNum = 0;
+}
+
+static void rtl8723a_free_hal_data(struct rtw_adapter *padapter)
+{
+
+ kfree(padapter->HalData);
+ padapter->HalData = NULL;
+
+}
+
+/* */
+/* Efuse related code */
+/* */
+static u8
+hal_EfuseSwitchToBank(struct rtw_adapter *padapter, u8 bank)
+{
+ u8 bRet = false;
+ u32 value32 = 0;
+
+ DBG_8723A("%s: Efuse switch bank to %d\n", __func__, bank);
+ value32 = rtw_read32(padapter, EFUSE_TEST);
+ bRet = true;
+ switch (bank) {
+ case 0:
+ value32 = (value32 & ~EFUSE_SEL_MASK) |
+ EFUSE_SEL(EFUSE_WIFI_SEL_0);
+ break;
+ case 1:
+ value32 = (value32 & ~EFUSE_SEL_MASK) |
+ EFUSE_SEL(EFUSE_BT_SEL_0);
+ break;
+ case 2:
+ value32 = (value32 & ~EFUSE_SEL_MASK) |
+ EFUSE_SEL(EFUSE_BT_SEL_1);
+ break;
+ case 3:
+ value32 = (value32 & ~EFUSE_SEL_MASK) |
+ EFUSE_SEL(EFUSE_BT_SEL_2);
+ break;
+ default:
+ value32 = (value32 & ~EFUSE_SEL_MASK) |
+ EFUSE_SEL(EFUSE_WIFI_SEL_0);
+ bRet = false;
+ break;
+ }
+ rtw_write32(padapter, EFUSE_TEST, value32);
+
+ return bRet;
+}
+
+static void
+Hal_GetEfuseDefinition(struct rtw_adapter *padapter,
+ u8 efuseType, u8 type, void *pOut)
+{
+ u8 *pu1Tmp;
+ u16 *pu2Tmp;
+ u8 *pMax_section;
+
+ switch (type) {
+ case TYPE_EFUSE_MAX_SECTION:
+ pMax_section = (u8 *) pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pMax_section = EFUSE_MAX_SECTION_8723A;
+ else
+ *pMax_section = EFUSE_BT_MAX_SECTION;
+ break;
+
+ case TYPE_EFUSE_REAL_CONTENT_LEN:
+ pu2Tmp = (u16 *) pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pu2Tmp = EFUSE_REAL_CONTENT_LEN_8723A;
+ else
+ *pu2Tmp = EFUSE_BT_REAL_CONTENT_LEN;
+ break;
+
+ case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
+ pu2Tmp = (u16 *) pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pu2Tmp = (EFUSE_REAL_CONTENT_LEN_8723A -
+ EFUSE_OOB_PROTECT_BYTES);
+ else
+ *pu2Tmp = (EFUSE_BT_REAL_BANK_CONTENT_LEN -
+ EFUSE_PROTECT_BYTES_BANK);
+ break;
+
+ case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
+ pu2Tmp = (u16 *) pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pu2Tmp = (EFUSE_REAL_CONTENT_LEN_8723A -
+ EFUSE_OOB_PROTECT_BYTES);
+ else
+ *pu2Tmp = (EFUSE_BT_REAL_CONTENT_LEN -
+ (EFUSE_PROTECT_BYTES_BANK * 3));
+ break;
+
+ case TYPE_EFUSE_MAP_LEN:
+ pu2Tmp = (u16 *) pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pu2Tmp = EFUSE_MAP_LEN_8723A;
+ else
+ *pu2Tmp = EFUSE_BT_MAP_LEN;
+ break;
+
+ case TYPE_EFUSE_PROTECT_BYTES_BANK:
+ pu1Tmp = (u8 *) pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pu1Tmp = EFUSE_OOB_PROTECT_BYTES;
+ else
+ *pu1Tmp = EFUSE_PROTECT_BYTES_BANK;
+ break;
+
+ case TYPE_EFUSE_CONTENT_LEN_BANK:
+ pu2Tmp = (u16 *) pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pu2Tmp = EFUSE_REAL_CONTENT_LEN_8723A;
+ else
+ *pu2Tmp = EFUSE_BT_REAL_BANK_CONTENT_LEN;
+ break;
+
+ default:
+ pu1Tmp = (u8 *) pOut;
+ *pu1Tmp = 0;
+ break;
+ }
+}
+
+#define VOLTAGE_V25 0x03
+#define LDOE25_SHIFT 28
+
+static void
+Hal_EfusePowerSwitch(struct rtw_adapter *padapter, u8 bWrite, u8 PwrState)
+{
+ u8 tempval;
+ u16 tmpV16;
+
+ if (PwrState == true) {
+ rtw_write8(padapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
+
+ /* 1.2V Power: From VDDON with Power
+ Cut(0x0000h[15]), defualt valid */
+ tmpV16 = rtw_read16(padapter, REG_SYS_ISO_CTRL);
+ if (!(tmpV16 & PWC_EV12V)) {
+ tmpV16 |= PWC_EV12V;
+ rtw_write16(padapter, REG_SYS_ISO_CTRL, tmpV16);
+ }
+ /* Reset: 0x0000h[28], default valid */
+ tmpV16 = rtw_read16(padapter, REG_SYS_FUNC_EN);
+ if (!(tmpV16 & FEN_ELDR)) {
+ tmpV16 |= FEN_ELDR;
+ rtw_write16(padapter, REG_SYS_FUNC_EN, tmpV16);
+ }
+
+ /* Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock
+ from ANA, default valid */
+ tmpV16 = rtw_read16(padapter, REG_SYS_CLKR);
+ if ((!(tmpV16 & LOADER_CLK_EN)) || (!(tmpV16 & ANA8M))) {
+ tmpV16 |= (LOADER_CLK_EN | ANA8M);
+ rtw_write16(padapter, REG_SYS_CLKR, tmpV16);
+ }
+
+ if (bWrite == true) {
+ /* Enable LDO 2.5V before read/write action */
+ tempval = rtw_read8(padapter, EFUSE_TEST + 3);
+ tempval &= 0x0F;
+ tempval |= (VOLTAGE_V25 << 4);
+ rtw_write8(padapter, EFUSE_TEST + 3, (tempval | 0x80));
+ }
+ } else {
+ rtw_write8(padapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
+
+ if (bWrite == true) {
+ /* Disable LDO 2.5V after read/write action */
+ tempval = rtw_read8(padapter, EFUSE_TEST + 3);
+ rtw_write8(padapter, EFUSE_TEST + 3, (tempval & 0x7F));
+ }
+ }
+}
+
+static void
+hal_ReadEFuse_WiFi(struct rtw_adapter *padapter,
+ u16 _offset, u16 _size_byte, u8 *pbuf)
+{
+ u8 *efuseTbl = NULL;
+ u16 eFuse_Addr = 0;
+ u8 offset, wden;
+ u8 efuseHeader, efuseExtHdr, efuseData;
+ u16 i, total, used;
+
+ /* Do NOT excess total size of EFuse table.
+ Added by Roger, 2008.11.10. */
+ if ((_offset + _size_byte) > EFUSE_MAP_LEN_8723A) {
+ DBG_8723A("%s: Invalid offset(%#x) with read bytes(%#x)!!\n",
+ __func__, _offset, _size_byte);
+ return;
+ }
+
+ efuseTbl = (u8 *) kmalloc(EFUSE_MAP_LEN_8723A, GFP_KERNEL);
+ if (efuseTbl == NULL) {
+ DBG_8723A("%s: alloc efuseTbl fail!\n", __func__);
+ return;
+ }
+ /* 0xff will be efuse default value instead of 0x00. */
+ memset(efuseTbl, 0xFF, EFUSE_MAP_LEN_8723A);
+
+ /* switch bank back to bank 0 for later BT and wifi use. */
+ hal_EfuseSwitchToBank(padapter, 0);
+
+ while (AVAILABLE_EFUSE_ADDR(eFuse_Addr)) {
+ ReadEFuseByte23a(padapter, eFuse_Addr++, &efuseHeader);
+ if (efuseHeader == 0xFF) {
+ DBG_8723A("%s: data end at address =%#x\n", __func__,
+ eFuse_Addr);
+ break;
+ }
+
+ /* Check PG header for section num. */
+ if (EXT_HEADER(efuseHeader)) { /* extended header */
+ offset = GET_HDR_OFFSET_2_0(efuseHeader);
+
+ ReadEFuseByte23a(padapter, eFuse_Addr++, &efuseExtHdr);
+ if (ALL_WORDS_DISABLED(efuseExtHdr)) {
+ continue;
+ }
+
+ offset |= ((efuseExtHdr & 0xF0) >> 1);
+ wden = (efuseExtHdr & 0x0F);
+ } else {
+ offset = ((efuseHeader >> 4) & 0x0f);
+ wden = (efuseHeader & 0x0f);
+ }
+
+ if (offset < EFUSE_MAX_SECTION_8723A) {
+ u16 addr;
+ /* Get word enable value from PG header */
+
+ addr = offset * PGPKT_DATA_SIZE;
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ /* Check word enable condition in the section */
+ if (!(wden & (0x01 << i))) {
+ ReadEFuseByte23a(padapter, eFuse_Addr++,
+ &efuseData);
+ efuseTbl[addr] = efuseData;
+
+ ReadEFuseByte23a(padapter, eFuse_Addr++,
+ &efuseData);
+ efuseTbl[addr + 1] = efuseData;
+ }
+ addr += 2;
+ }
+ } else {
+ DBG_8723A(KERN_ERR "%s: offset(%d) is illegal!!\n",
+ __func__, offset);
+ eFuse_Addr += Efuse_CalculateWordCnts23a(wden) * 2;
+ }
+ }
+
+ /* Copy from Efuse map to output pointer memory!!! */
+ for (i = 0; i < _size_byte; i++)
+ pbuf[i] = efuseTbl[_offset + i];
+
+ /* Calculate Efuse utilization */
+ EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI,
+ TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, &total);
+ used = eFuse_Addr - 1;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_EFUSE_BYTES, (u8 *)&used);
+
+ kfree(efuseTbl);
+}
+
+static void
+hal_ReadEFuse_BT(struct rtw_adapter *padapter,
+ u16 _offset, u16 _size_byte, u8 *pbuf)
+{
+ u8 *efuseTbl;
+ u8 bank;
+ u16 eFuse_Addr;
+ u8 efuseHeader, efuseExtHdr, efuseData;
+ u8 offset, wden;
+ u16 i, total, used;
+
+ /* Do NOT excess total size of EFuse table.
+ Added by Roger, 2008.11.10. */
+ if ((_offset + _size_byte) > EFUSE_BT_MAP_LEN) {
+ DBG_8723A("%s: Invalid offset(%#x) with read bytes(%#x)!!\n",
+ __func__, _offset, _size_byte);
+ return;
+ }
+
+ efuseTbl = kmalloc(EFUSE_BT_MAP_LEN, GFP_KERNEL);
+ if (efuseTbl == NULL) {
+ DBG_8723A("%s: efuseTbl malloc fail!\n", __func__);
+ return;
+ }
+ /* 0xff will be efuse default value instead of 0x00. */
+ memset(efuseTbl, 0xFF, EFUSE_BT_MAP_LEN);
+
+ EFUSE_GetEfuseDefinition23a(padapter, EFUSE_BT,
+ TYPE_AVAILABLE_EFUSE_BYTES_BANK, &total);
+
+ for (bank = 1; bank < EFUSE_MAX_BANK; bank++) {
+ if (hal_EfuseSwitchToBank(padapter, bank) == false) {
+ DBG_8723A("%s: hal_EfuseSwitchToBank Fail!!\n",
+ __func__);
+ goto exit;
+ }
+
+ eFuse_Addr = 0;
+
+ while (AVAILABLE_EFUSE_ADDR(eFuse_Addr)) {
+ ReadEFuseByte23a(padapter, eFuse_Addr++, &efuseHeader);
+ if (efuseHeader == 0xFF)
+ break;
+
+ /* Check PG header for section num. */
+ if (EXT_HEADER(efuseHeader)) { /* extended header */
+ offset = GET_HDR_OFFSET_2_0(efuseHeader);
+
+ ReadEFuseByte23a(padapter, eFuse_Addr++,
+ &efuseExtHdr);
+ if (ALL_WORDS_DISABLED(efuseExtHdr)) {
+ continue;
+ }
+
+ offset |= ((efuseExtHdr & 0xF0) >> 1);
+ wden = (efuseExtHdr & 0x0F);
+ } else {
+ offset = ((efuseHeader >> 4) & 0x0f);
+ wden = (efuseHeader & 0x0f);
+ }
+
+ if (offset < EFUSE_BT_MAX_SECTION) {
+ u16 addr;
+
+ /* Get word enable value from PG header */
+
+ addr = offset * PGPKT_DATA_SIZE;
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ /* Check word enable condition in
+ the section */
+ if (!(wden & (0x01 << i))) {
+ ReadEFuseByte23a(padapter,
+ eFuse_Addr++,
+ &efuseData);
+ efuseTbl[addr] = efuseData;
+
+ ReadEFuseByte23a(padapter,
+ eFuse_Addr++,
+ &efuseData);
+ efuseTbl[addr + 1] = efuseData;
+ }
+ addr += 2;
+ }
+ } else {
+ DBG_8723A(KERN_ERR
+ "%s: offset(%d) is illegal!!\n",
+ __func__, offset);
+ eFuse_Addr += Efuse_CalculateWordCnts23a(wden) * 2;
+ }
+ }
+
+ if ((eFuse_Addr - 1) < total) {
+ DBG_8723A("%s: bank(%d) data end at %#x\n",
+ __func__, bank, eFuse_Addr - 1);
+ break;
+ }
+ }
+
+ /* switch bank back to bank 0 for later BT and wifi use. */
+ hal_EfuseSwitchToBank(padapter, 0);
+
+ /* Copy from Efuse map to output pointer memory!!! */
+ for (i = 0; i < _size_byte; i++)
+ pbuf[i] = efuseTbl[_offset + i];
+
+ /* */
+ /* Calculate Efuse utilization. */
+ /* */
+ EFUSE_GetEfuseDefinition23a(padapter, EFUSE_BT,
+ TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, &total);
+ used = (EFUSE_BT_REAL_BANK_CONTENT_LEN * (bank - 1)) + eFuse_Addr - 1;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_EFUSE_BT_BYTES, (u8 *) &used);
+
+exit:
+ kfree(efuseTbl);
+}
+
+static void
+Hal_ReadEFuse(struct rtw_adapter *padapter,
+ u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf)
+{
+ if (efuseType == EFUSE_WIFI)
+ hal_ReadEFuse_WiFi(padapter, _offset, _size_byte, pbuf);
+ else
+ hal_ReadEFuse_BT(padapter, _offset, _size_byte, pbuf);
+}
+
+static u16
+hal_EfuseGetCurrentSize_WiFi(struct rtw_adapter *padapter)
+{
+ u16 efuse_addr = 0;
+ u8 hoffset = 0, hworden = 0;
+ u8 efuse_data, word_cnts = 0;
+
+ rtw23a_hal_get_hwreg(padapter, HW_VAR_EFUSE_BYTES, (u8 *) &efuse_addr);
+
+ DBG_8723A("%s: start_efuse_addr = 0x%X\n", __func__, efuse_addr);
+
+ /* switch bank back to bank 0 for later BT and wifi use. */
+ hal_EfuseSwitchToBank(padapter, 0);
+
+ while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+ if (efuse_OneByteRead23a(padapter, efuse_addr, &efuse_data) ==
+ false) {
+ DBG_8723A(KERN_ERR "%s: efuse_OneByteRead23a Fail! "
+ "addr = 0x%X !!\n", __func__, efuse_addr);
+ break;
+ }
+
+ if (efuse_data == 0xFF)
+ break;
+
+ if (EXT_HEADER(efuse_data)) {
+ hoffset = GET_HDR_OFFSET_2_0(efuse_data);
+ efuse_addr++;
+ efuse_OneByteRead23a(padapter, efuse_addr, &efuse_data);
+ if (ALL_WORDS_DISABLED(efuse_data)) {
+ continue;
+ }
+
+ hoffset |= ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
+ } else {
+ hoffset = (efuse_data >> 4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ }
+
+ word_cnts = Efuse_CalculateWordCnts23a(hworden);
+ efuse_addr += (word_cnts * 2) + 1;
+ }
+
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_EFUSE_BYTES, (u8 *) &efuse_addr);
+
+ DBG_8723A("%s: CurrentSize =%d\n", __func__, efuse_addr);
+
+ return efuse_addr;
+}
+
+static u16
+hal_EfuseGetCurrentSize_BT(struct rtw_adapter *padapter)
+{
+ u16 btusedbytes;
+ u16 efuse_addr;
+ u8 bank, startBank;
+ u8 hoffset = 0, hworden = 0;
+ u8 efuse_data, word_cnts = 0;
+ u16 retU2 = 0;
+
+ rtw23a_hal_get_hwreg(padapter, HW_VAR_EFUSE_BT_BYTES, (u8 *) &btusedbytes);
+
+ efuse_addr = (u16) ((btusedbytes % EFUSE_BT_REAL_BANK_CONTENT_LEN));
+ startBank = (u8) (1 + (btusedbytes / EFUSE_BT_REAL_BANK_CONTENT_LEN));
+
+ DBG_8723A("%s: start from bank =%d addr = 0x%X\n", __func__, startBank,
+ efuse_addr);
+
+ EFUSE_GetEfuseDefinition23a(padapter, EFUSE_BT,
+ TYPE_AVAILABLE_EFUSE_BYTES_BANK, &retU2);
+
+ for (bank = startBank; bank < EFUSE_MAX_BANK; bank++) {
+ if (hal_EfuseSwitchToBank(padapter, bank) == false) {
+ DBG_8723A(KERN_ERR "%s: switch bank(%d) Fail!!\n",
+ __func__, bank);
+ bank = EFUSE_MAX_BANK;
+ break;
+ }
+
+ /* only when bank is switched we have to reset
+ the efuse_addr. */
+ if (bank != startBank)
+ efuse_addr = 0;
+
+ while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+ if (efuse_OneByteRead23a(padapter, efuse_addr,
+ &efuse_data) == false) {
+ DBG_8723A(KERN_ERR "%s: efuse_OneByteRead23a Fail!"
+ " addr = 0x%X !!\n",
+ __func__, efuse_addr);
+ bank = EFUSE_MAX_BANK;
+ break;
+ }
+
+ if (efuse_data == 0xFF)
+ break;
+
+ if (EXT_HEADER(efuse_data)) {
+ hoffset = GET_HDR_OFFSET_2_0(efuse_data);
+ efuse_addr++;
+ efuse_OneByteRead23a(padapter, efuse_addr,
+ &efuse_data);
+ if (ALL_WORDS_DISABLED(efuse_data)) {
+ efuse_addr++;
+ continue;
+ }
+
+ hoffset |= ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
+ } else {
+ hoffset = (efuse_data >> 4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ }
+ word_cnts = Efuse_CalculateWordCnts23a(hworden);
+ /* read next header */
+ efuse_addr += (word_cnts * 2) + 1;
+ }
+
+ /* Check if we need to check next bank efuse */
+ if (efuse_addr < retU2) {
+ break; /* don't need to check next bank. */
+ }
+ }
+
+ retU2 = ((bank - 1) * EFUSE_BT_REAL_BANK_CONTENT_LEN) + efuse_addr;
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_EFUSE_BT_BYTES, (u8 *)&retU2);
+
+ DBG_8723A("%s: CurrentSize =%d\n", __func__, retU2);
+ return retU2;
+}
+
+static u16
+Hal_EfuseGetCurrentSize(struct rtw_adapter *pAdapter, u8 efuseType)
+{
+ u16 ret = 0;
+
+ if (efuseType == EFUSE_WIFI)
+ ret = hal_EfuseGetCurrentSize_WiFi(pAdapter);
+ else
+ ret = hal_EfuseGetCurrentSize_BT(pAdapter);
+
+ return ret;
+}
+
+static u8
+Hal_EfuseWordEnableDataWrite(struct rtw_adapter *padapter,
+ u16 efuse_addr, u8 word_en, u8 *data)
+{
+ u16 tmpaddr = 0;
+ u16 start_addr = efuse_addr;
+ u8 badworden = 0x0F;
+ u8 tmpdata[PGPKT_DATA_SIZE];
+
+ memset(tmpdata, 0xFF, PGPKT_DATA_SIZE);
+
+ if (!(word_en & BIT(0))) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite23a(padapter, start_addr++, data[0]);
+ efuse_OneByteWrite23a(padapter, start_addr++, data[1]);
+
+ efuse_OneByteRead23a(padapter, tmpaddr, &tmpdata[0]);
+ efuse_OneByteRead23a(padapter, tmpaddr + 1, &tmpdata[1]);
+ if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1])) {
+ badworden &= (~BIT(0));
+ }
+ }
+ if (!(word_en & BIT(1))) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite23a(padapter, start_addr++, data[2]);
+ efuse_OneByteWrite23a(padapter, start_addr++, data[3]);
+
+ efuse_OneByteRead23a(padapter, tmpaddr, &tmpdata[2]);
+ efuse_OneByteRead23a(padapter, tmpaddr + 1, &tmpdata[3]);
+ if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3])) {
+ badworden &= (~BIT(1));
+ }
+ }
+ if (!(word_en & BIT(2))) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite23a(padapter, start_addr++, data[4]);
+ efuse_OneByteWrite23a(padapter, start_addr++, data[5]);
+
+ efuse_OneByteRead23a(padapter, tmpaddr, &tmpdata[4]);
+ efuse_OneByteRead23a(padapter, tmpaddr + 1, &tmpdata[5]);
+ if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5])) {
+ badworden &= (~BIT(2));
+ }
+ }
+ if (!(word_en & BIT(3))) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite23a(padapter, start_addr++, data[6]);
+ efuse_OneByteWrite23a(padapter, start_addr++, data[7]);
+
+ efuse_OneByteRead23a(padapter, tmpaddr, &tmpdata[6]);
+ efuse_OneByteRead23a(padapter, tmpaddr + 1, &tmpdata[7]);
+ if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7])) {
+ badworden &= (~BIT(3));
+ }
+ }
+
+ return badworden;
+}
+
+static s32
+Hal_EfusePgPacketRead(struct rtw_adapter *padapter, u8 offset, u8 *data)
+{
+ u8 efuse_data, word_cnts = 0;
+ u16 efuse_addr = 0;
+ u8 hoffset = 0, hworden = 0;
+ u8 i;
+ u8 max_section = 0;
+ s32 ret;
+
+ if (data == NULL)
+ return false;
+
+ EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION,
+ &max_section);
+ if (offset > max_section) {
+ DBG_8723A("%s: Packet offset(%d) is illegal(>%d)!\n",
+ __func__, offset, max_section);
+ return false;
+ }
+
+ memset(data, 0xFF, PGPKT_DATA_SIZE);
+ ret = true;
+
+ /* */
+ /* <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the
+ end of Efuse by CP. */
+ /* Skip dummy parts to prevent unexpected data read from Efuse. */
+ /* By pass right now. 2009.02.19. */
+ /* */
+ while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+ if (efuse_OneByteRead23a(padapter, efuse_addr++, &efuse_data) ==
+ false) {
+ ret = false;
+ break;
+ }
+
+ if (efuse_data == 0xFF)
+ break;
+
+ if (EXT_HEADER(efuse_data)) {
+ hoffset = GET_HDR_OFFSET_2_0(efuse_data);
+ efuse_OneByteRead23a(padapter, efuse_addr++, &efuse_data);
+ if (ALL_WORDS_DISABLED(efuse_data)) {
+ DBG_8723A("%s: Error!! All words disabled!\n",
+ __func__);
+ continue;
+ }
+
+ hoffset |= ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
+ } else {
+ hoffset = (efuse_data >> 4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ }
+
+ if (hoffset == offset) {
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ /* Check word enable condition in the section */
+ if (!(hworden & (0x01 << i))) {
+ ReadEFuseByte23a(padapter, efuse_addr++,
+ &efuse_data);
+ data[i * 2] = efuse_data;
+
+ ReadEFuseByte23a(padapter, efuse_addr++,
+ &efuse_data);
+ data[(i * 2) + 1] = efuse_data;
+ }
+ }
+ } else {
+ word_cnts = Efuse_CalculateWordCnts23a(hworden);
+ efuse_addr += word_cnts * 2;
+ }
+ }
+
+ return ret;
+}
+
+static u8
+hal_EfusePgCheckAvailableAddr(struct rtw_adapter *pAdapter, u8 efuseType)
+{
+ u16 max_available = 0;
+ u16 current_size;
+
+ EFUSE_GetEfuseDefinition23a(pAdapter, efuseType,
+ TYPE_AVAILABLE_EFUSE_BYTES_TOTAL,
+ &max_available);
+
+ current_size = Efuse_GetCurrentSize23a(pAdapter, efuseType);
+ if (current_size >= max_available) {
+ DBG_8723A("%s: Error!! current_size(%d)>max_available(%d)\n",
+ __func__, current_size, max_available);
+ return false;
+ }
+ return true;
+}
+
+static void
+hal_EfuseConstructPGPkt(u8 offset, u8 word_en, u8 *pData,
+ struct pg_pkt_struct *pTargetPkt)
+{
+ memset(pTargetPkt->data, 0xFF, PGPKT_DATA_SIZE);
+ pTargetPkt->offset = offset;
+ pTargetPkt->word_en = word_en;
+ efuse_WordEnableDataRead23a(word_en, pData, pTargetPkt->data);
+ pTargetPkt->word_cnts = Efuse_CalculateWordCnts23a(pTargetPkt->word_en);
+}
+
+static u8
+hal_EfusePartialWriteCheck(struct rtw_adapter *padapter, u8 efuseType,
+ u16 *pAddr, struct pg_pkt_struct *pTargetPkt)
+{
+ u8 bRet = false;
+ u16 startAddr = 0, efuse_max_available_len = 0, efuse_max = 0;
+ u8 efuse_data = 0;
+
+ EFUSE_GetEfuseDefinition23a(padapter, efuseType,
+ TYPE_AVAILABLE_EFUSE_BYTES_TOTAL,
+ &efuse_max_available_len);
+ EFUSE_GetEfuseDefinition23a(padapter, efuseType,
+ TYPE_EFUSE_CONTENT_LEN_BANK, &efuse_max);
+
+ if (efuseType == EFUSE_WIFI) {
+ rtw23a_hal_get_hwreg(padapter, HW_VAR_EFUSE_BYTES,
+ (u8 *) &startAddr);
+ } else {
+ rtw23a_hal_get_hwreg(padapter, HW_VAR_EFUSE_BT_BYTES,
+ (u8 *) &startAddr);
+ }
+ startAddr %= efuse_max;
+
+ while (1) {
+ if (startAddr >= efuse_max_available_len) {
+ bRet = false;
+ DBG_8723A("%s: startAddr(%d) >= efuse_max_available_"
+ "len(%d)\n", __func__, startAddr,
+ efuse_max_available_len);
+ break;
+ }
+
+ if (efuse_OneByteRead23a(padapter, startAddr, &efuse_data) &&
+ (efuse_data != 0xFF)) {
+ bRet = false;
+ DBG_8723A("%s: Something Wrong! last bytes(%#X = 0x%02X) "
+ "is not 0xFF\n", __func__,
+ startAddr, efuse_data);
+ break;
+ } else {
+ /* not used header, 0xff */
+ *pAddr = startAddr;
+ bRet = true;
+ break;
+ }
+ }
+
+ return bRet;
+}
+
+static u8
+hal_EfusePgPacketWrite1ByteHeader(struct rtw_adapter *pAdapter, u8 efuseType,
+ u16 *pAddr, struct pg_pkt_struct *pTargetPkt)
+{
+ u8 pg_header = 0, tmp_header = 0;
+ u16 efuse_addr = *pAddr;
+ u8 repeatcnt = 0;
+
+ pg_header = ((pTargetPkt->offset << 4) & 0xf0) | pTargetPkt->word_en;
+
+ do {
+ efuse_OneByteWrite23a(pAdapter, efuse_addr, pg_header);
+ efuse_OneByteRead23a(pAdapter, efuse_addr, &tmp_header);
+ if (tmp_header != 0xFF)
+ break;
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_) {
+ DBG_8723A("%s: Repeat over limit for pg_header!!\n",
+ __func__);
+ return false;
+ }
+ } while (1);
+
+ if (tmp_header != pg_header) {
+ DBG_8723A(KERN_ERR "%s: PG Header Fail!!(pg = 0x%02X "
+ "read = 0x%02X)\n", __func__,
+ pg_header, tmp_header);
+ return false;
+ }
+
+ *pAddr = efuse_addr;
+
+ return true;
+}
+
+static u8
+hal_EfusePgPacketWrite2ByteHeader(struct rtw_adapter *padapter, u8 efuseType,
+ u16 *pAddr, struct pg_pkt_struct *pTargetPkt)
+{
+ u16 efuse_addr, efuse_max_available_len = 0;
+ u8 pg_header = 0, tmp_header = 0;
+ u8 repeatcnt = 0;
+
+ EFUSE_GetEfuseDefinition23a(padapter, efuseType,
+ TYPE_AVAILABLE_EFUSE_BYTES_BANK,
+ &efuse_max_available_len);
+
+ efuse_addr = *pAddr;
+ if (efuse_addr >= efuse_max_available_len) {
+ DBG_8723A("%s: addr(%d) over avaliable(%d)!!\n", __func__,
+ efuse_addr, efuse_max_available_len);
+ return false;
+ }
+
+ pg_header = ((pTargetPkt->offset & 0x07) << 5) | 0x0F;
+
+ do {
+ efuse_OneByteWrite23a(padapter, efuse_addr, pg_header);
+ efuse_OneByteRead23a(padapter, efuse_addr, &tmp_header);
+ if (tmp_header != 0xFF)
+ break;
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_) {
+ DBG_8723A("%s: Repeat over limit for pg_header!!\n",
+ __func__);
+ return false;
+ }
+ } while (1);
+
+ if (tmp_header != pg_header) {
+ DBG_8723A(KERN_ERR
+ "%s: PG Header Fail!!(pg = 0x%02X read = 0x%02X)\n",
+ __func__, pg_header, tmp_header);
+ return false;
+ }
+
+ /* to write ext_header */
+ efuse_addr++;
+ pg_header = ((pTargetPkt->offset & 0x78) << 1) | pTargetPkt->word_en;
+
+ do {
+ efuse_OneByteWrite23a(padapter, efuse_addr, pg_header);
+ efuse_OneByteRead23a(padapter, efuse_addr, &tmp_header);
+ if (tmp_header != 0xFF)
+ break;
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_) {
+ DBG_8723A("%s: Repeat over limit for ext_header!!\n",
+ __func__);
+ return false;
+ }
+ } while (1);
+
+ if (tmp_header != pg_header) { /* offset PG fail */
+ DBG_8723A(KERN_ERR
+ "%s: PG EXT Header Fail!!(pg = 0x%02X read = 0x%02X)\n",
+ __func__, pg_header, tmp_header);
+ return false;
+ }
+
+ *pAddr = efuse_addr;
+
+ return true;
+}
+
+static u8
+hal_EfusePgPacketWriteHeader(struct rtw_adapter *padapter, u8 efuseType,
+ u16 *pAddr, struct pg_pkt_struct *pTargetPkt)
+{
+ u8 bRet = false;
+
+ if (pTargetPkt->offset >= EFUSE_MAX_SECTION_BASE) {
+ bRet = hal_EfusePgPacketWrite2ByteHeader(padapter, efuseType,
+ pAddr, pTargetPkt);
+ } else {
+ bRet = hal_EfusePgPacketWrite1ByteHeader(padapter, efuseType,
+ pAddr, pTargetPkt);
+ }
+
+ return bRet;
+}
+
+static u8
+hal_EfusePgPacketWriteData(struct rtw_adapter *pAdapter, u8 efuseType,
+ u16 *pAddr, struct pg_pkt_struct *pTargetPkt)
+{
+ u16 efuse_addr;
+ u8 badworden;
+
+ efuse_addr = *pAddr;
+ badworden =
+ Efuse_WordEnableDataWrite23a(pAdapter, efuse_addr + 1,
+ pTargetPkt->word_en, pTargetPkt->data);
+ if (badworden != 0x0F) {
+ DBG_8723A("%s: Fail!!\n", __func__);
+ return false;
+ }
+
+ return true;
+}
+
+static s32
+Hal_EfusePgPacketWrite(struct rtw_adapter *padapter,
+ u8 offset, u8 word_en, u8 *pData)
+{
+ struct pg_pkt_struct targetPkt;
+ u16 startAddr = 0;
+ u8 efuseType = EFUSE_WIFI;
+
+ if (!hal_EfusePgCheckAvailableAddr(padapter, efuseType))
+ return false;
+
+ hal_EfuseConstructPGPkt(offset, word_en, pData, &targetPkt);
+
+ if (!hal_EfusePartialWriteCheck(padapter, efuseType,
+ &startAddr, &targetPkt))
+ return false;
+
+ if (!hal_EfusePgPacketWriteHeader(padapter, efuseType,
+ &startAddr, &targetPkt))
+ return false;
+
+ if (!hal_EfusePgPacketWriteData(padapter, efuseType,
+ &startAddr, &targetPkt))
+ return false;
+
+ return true;
+}
+
+static bool
+Hal_EfusePgPacketWrite_BT(struct rtw_adapter *pAdapter,
+ u8 offset, u8 word_en, u8 *pData)
+{
+ struct pg_pkt_struct targetPkt;
+ u16 startAddr = 0;
+ u8 efuseType = EFUSE_BT;
+
+ if (!hal_EfusePgCheckAvailableAddr(pAdapter, efuseType))
+ return false;
+
+ hal_EfuseConstructPGPkt(offset, word_en, pData, &targetPkt);
+
+ if (!hal_EfusePartialWriteCheck(pAdapter, efuseType,
+ &startAddr, &targetPkt))
+ return false;
+
+ if (!hal_EfusePgPacketWriteHeader(pAdapter, efuseType,
+ &startAddr, &targetPkt))
+ return false;
+
+ if (!hal_EfusePgPacketWriteData(pAdapter, efuseType,
+ &startAddr, &targetPkt))
+ return false;
+
+ return true;
+}
+
+static struct hal_version ReadChipVersion8723A(struct rtw_adapter *padapter)
+{
+ u32 value32;
+ struct hal_version ChipVersion;
+ struct hal_data_8723a *pHalData;
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ value32 = rtw_read32(padapter, REG_SYS_CFG);
+ ChipVersion.ICType = CHIP_8723A;
+ ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
+ ChipVersion.RFType = RF_TYPE_1T1R;
+ ChipVersion.VendorType =
+ ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
+ ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
+
+ /* For regulator mode. by tynli. 2011.01.14 */
+ pHalData->RegulatorMode = ((value32 & SPS_SEL) ?
+ RT_LDO_REGULATOR : RT_SWITCHING_REGULATOR);
+
+ value32 = rtw_read32(padapter, REG_GPIO_OUTSTS);
+ /* ROM code version. */
+ ChipVersion.ROMVer = ((value32 & RF_RL_ID) >> 20);
+
+ /* For multi-function consideration. Added by Roger, 2010.10.06. */
+ pHalData->MultiFunc = RT_MULTI_FUNC_NONE;
+ value32 = rtw_read32(padapter, REG_MULTI_FUNC_CTRL);
+ pHalData->MultiFunc |=
+ ((value32 & WL_FUNC_EN) ? RT_MULTI_FUNC_WIFI : 0);
+ pHalData->MultiFunc |= ((value32 & BT_FUNC_EN) ? RT_MULTI_FUNC_BT : 0);
+ pHalData->MultiFunc |=
+ ((value32 & GPS_FUNC_EN) ? RT_MULTI_FUNC_GPS : 0);
+ pHalData->PolarityCtl =
+ ((value32 & WL_HWPDN_SL) ? RT_POLARITY_HIGH_ACT :
+ RT_POLARITY_LOW_ACT);
+ dump_chip_info23a(ChipVersion);
+ pHalData->VersionID = ChipVersion;
+
+ if (IS_1T2R(ChipVersion))
+ pHalData->rf_type = RF_1T2R;
+ else if (IS_2T2R(ChipVersion))
+ pHalData->rf_type = RF_2T2R;
+ else
+ pHalData->rf_type = RF_1T1R;
+
+ MSG_8723A("RF_Type is %x!!\n", pHalData->rf_type);
+
+ return ChipVersion;
+}
+
+static void rtl8723a_read_chip_version(struct rtw_adapter *padapter)
+{
+ ReadChipVersion8723A(padapter);
+}
+
+/* */
+/* */
+/* 20100209 Joseph: */
+/* This function is used only for 92C to set REG_BCN_CTRL(0x550) register. */
+/* We just reserve the value of the register in variable
+ pHalData->RegBcnCtrlVal and then operate */
+/* the value of the register via atomic operation. */
+/* This prevents from race condition when setting this register. */
+/* The value of pHalData->RegBcnCtrlVal is initialized in
+ HwConfigureRTL8192CE() function. */
+/* */
+void SetBcnCtrlReg23a(struct rtw_adapter *padapter, u8 SetBits, u8 ClearBits)
+{
+ struct hal_data_8723a *pHalData;
+ u32 addr;
+ u8 *pRegBcnCtrlVal;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pRegBcnCtrlVal = (u8 *)&pHalData->RegBcnCtrlVal;
+
+ addr = REG_BCN_CTRL;
+
+ *pRegBcnCtrlVal = rtw_read8(padapter, addr);
+ *pRegBcnCtrlVal |= SetBits;
+ *pRegBcnCtrlVal &= ~ClearBits;
+
+ rtw_write8(padapter, addr, *pRegBcnCtrlVal);
+}
+
+void rtl8723a_InitBeaconParameters(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ rtw_write16(padapter, REG_BCN_CTRL, 0x1010);
+ pHalData->RegBcnCtrlVal = 0x1010;
+
+ /* TODO: Remove these magic number */
+ rtw_write16(padapter, REG_TBTT_PROHIBIT, 0x6404); /* ms */
+ /* Firmware will control REG_DRVERLYINT when power saving is enable, */
+ /* so don't set this register on STA mode. */
+ if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE) == false)
+ rtw_write8(padapter, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);
+ /* 2ms */
+ rtw_write8(padapter, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
+
+ /* Suggested by designer timchen. Change beacon AIFS to the
+ largest number beacause test chip does not contension before
+ sending beacon. by tynli. 2009.11.03 */
+ rtw_write16(padapter, REG_BCNTCFG, 0x660F);
+}
+
+static void ResumeTxBeacon(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ /* 2010.03.01. Marked by tynli. No need to call workitem beacause
+ we record the value */
+ /* which should be read from register to a global variable. */
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("+ResumeTxBeacon\n"));
+
+ pHalData->RegFwHwTxQCtrl |= BIT(6);
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL + 2, pHalData->RegFwHwTxQCtrl);
+ rtw_write8(padapter, REG_TBTT_PROHIBIT + 1, 0xff);
+ pHalData->RegReg542 |= BIT(0);
+ rtw_write8(padapter, REG_TBTT_PROHIBIT + 2, pHalData->RegReg542);
+}
+
+static void StopTxBeacon(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ /* 2010.03.01. Marked by tynli. No need to call workitem beacause
+ we record the value */
+ /* which should be read from register to a global variable. */
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("+StopTxBeacon\n"));
+
+ pHalData->RegFwHwTxQCtrl &= ~BIT(6);
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL + 2, pHalData->RegFwHwTxQCtrl);
+ rtw_write8(padapter, REG_TBTT_PROHIBIT + 1, 0x64);
+ pHalData->RegReg542 &= ~BIT(0);
+ rtw_write8(padapter, REG_TBTT_PROHIBIT + 2, pHalData->RegReg542);
+
+ CheckFwRsvdPageContent23a(padapter); /* 2010.06.23. Added by tynli. */
+}
+
+static void _BeaconFunctionEnable(struct rtw_adapter *padapter, u8 Enable,
+ u8 Linked)
+{
+ SetBcnCtrlReg23a(padapter, DIS_TSF_UDT | EN_BCN_FUNCTION | DIS_BCNQ_SUB,
+ 0);
+ rtw_write8(padapter, REG_RD_CTRL + 1, 0x6F);
+}
+
+static void rtl8723a_SetBeaconRelatedRegisters(struct rtw_adapter *padapter)
+{
+ u32 value32;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ /* reset TSF, enable update TSF, correcting TSF On Beacon */
+
+ /* REG_BCN_INTERVAL */
+ /* REG_BCNDMATIM */
+ /* REG_ATIMWND */
+ /* REG_TBTT_PROHIBIT */
+ /* REG_DRVERLYINT */
+ /* REG_BCN_MAX_ERR */
+ /* REG_BCNTCFG (0x510) */
+ /* REG_DUAL_TSF_RST */
+ /* REG_BCN_CTRL (0x550) */
+
+ /* */
+ /* ATIM window */
+ /* */
+ rtw_write16(padapter, REG_ATIMWND, 2);
+
+ /* */
+ /* Beacon interval (in unit of TU). */
+ /* */
+ rtw_write16(padapter, REG_BCN_INTERVAL, pmlmeinfo->bcn_interval);
+
+ rtl8723a_InitBeaconParameters(padapter);
+
+ rtw_write8(padapter, REG_SLOT, 0x09);
+
+ /* */
+ /* Reset TSF Timer to zero, added by Roger. 2008.06.24 */
+ /* */
+ value32 = rtw_read32(padapter, REG_TCR);
+ value32 &= ~TSFRST;
+ rtw_write32(padapter, REG_TCR, value32);
+
+ value32 |= TSFRST;
+ rtw_write32(padapter, REG_TCR, value32);
+
+ /* NOTE: Fix test chip's bug (about contention windows's randomness) */
+ if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE |
+ WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE) == true) {
+ rtw_write8(padapter, REG_RXTSF_OFFSET_CCK, 0x50);
+ rtw_write8(padapter, REG_RXTSF_OFFSET_OFDM, 0x50);
+ }
+
+ _BeaconFunctionEnable(padapter, true, true);
+
+ ResumeTxBeacon(padapter);
+ SetBcnCtrlReg23a(padapter, DIS_BCNQ_SUB, 0);
+}
+
+static void rtl8723a_GetHalODMVar(struct rtw_adapter *Adapter,
+ enum hal_odm_variable eVariable,
+ void *pValue1, bool bSet)
+{
+ switch (eVariable) {
+ case HAL_ODM_STA_INFO:
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtl8723a_SetHalODMVar(struct rtw_adapter *Adapter,
+ enum hal_odm_variable eVariable,
+ void *pValue1, bool bSet)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_odm_t *podmpriv = &pHalData->odmpriv;
+ switch (eVariable) {
+ case HAL_ODM_STA_INFO:
+ {
+ struct sta_info *psta = (struct sta_info *)pValue1;
+
+ if (bSet) {
+ DBG_8723A("Set STA_(%d) info\n", psta->mac_id);
+ ODM_CmnInfoPtrArrayHook23a(podmpriv,
+ ODM_CMNINFO_STA_STATUS,
+ psta->mac_id, psta);
+ } else {
+ DBG_8723A("Clean STA_(%d) info\n", psta->mac_id);
+ ODM_CmnInfoPtrArrayHook23a(podmpriv,
+ ODM_CMNINFO_STA_STATUS,
+ psta->mac_id, NULL);
+ }
+ }
+ break;
+ case HAL_ODM_P2P_STATE:
+ ODM_CmnInfoUpdate23a(podmpriv, ODM_CMNINFO_WIFI_DIRECT, bSet);
+ break;
+ case HAL_ODM_WIFI_DISPLAY_STATE:
+ ODM_CmnInfoUpdate23a(podmpriv, ODM_CMNINFO_WIFI_DISPLAY, bSet);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hal_notch_filter_8723a(struct rtw_adapter *adapter, bool enable)
+{
+ if (enable) {
+ DBG_8723A("Enable notch filter\n");
+ rtw_write8(adapter, rOFDM0_RxDSP + 1,
+ rtw_read8(adapter, rOFDM0_RxDSP + 1) | BIT1);
+ } else {
+ DBG_8723A("Disable notch filter\n");
+ rtw_write8(adapter, rOFDM0_RxDSP + 1,
+ rtw_read8(adapter, rOFDM0_RxDSP + 1) & ~BIT1);
+ }
+}
+
+s32 c2h_id_filter_ccx_8723a(u8 id)
+{
+ s32 ret = false;
+ if (id == C2H_CCX_TX_RPT)
+ ret = true;
+
+ return ret;
+}
+
+static s32 c2h_handler_8723a(struct rtw_adapter *padapter,
+ struct c2h_evt_hdr *c2h_evt)
+{
+ s32 ret = _SUCCESS;
+ u8 i = 0;
+
+ if (c2h_evt == NULL) {
+ DBG_8723A("%s c2h_evt is NULL\n", __func__);
+ ret = _FAIL;
+ goto exit;
+ }
+
+ switch (c2h_evt->id) {
+ case C2H_DBG:
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("C2HCommandHandler: %s\n", c2h_evt->payload));
+ break;
+
+ case C2H_CCX_TX_RPT:
+ handle_txrpt_ccx_8723a(padapter, c2h_evt->payload);
+ break;
+ case C2H_EXT_RA_RPT:
+ break;
+ case C2H_HW_INFO_EXCH:
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("[BT], C2H_HW_INFO_EXCH\n"));
+ for (i = 0; i < c2h_evt->plen; i++) {
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("[BT], tmpBuf[%d]= 0x%x\n", i,
+ c2h_evt->payload[i]));
+ }
+ break;
+
+ case C2H_C2H_H2C_TEST:
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("[BT], C2H_H2C_TEST\n"));
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("[BT], tmpBuf[0]/[1]/[2]/[3]/[4]= 0x%x/ 0x%x/ "
+ "0x%x/ 0x%x/ 0x%x\n", c2h_evt->payload[0],
+ c2h_evt->payload[1], c2h_evt->payload[2],
+ c2h_evt->payload[3], c2h_evt->payload[4]));
+ break;
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ case C2H_BT_INFO:
+ DBG_8723A("%s , Got C2H_BT_INFO \n", __func__);
+ BT_FwC2hBtInfo(padapter, c2h_evt->payload, c2h_evt->plen);
+ break;
+#endif
+
+ default:
+ ret = _FAIL;
+ break;
+ }
+
+exit:
+ return ret;
+}
+
+void rtl8723a_set_hal_ops(struct hal_ops *pHalFunc)
+{
+ pHalFunc->free_hal_data = &rtl8723a_free_hal_data;
+
+ pHalFunc->dm_init = &rtl8723a_init_dm_priv;
+ pHalFunc->dm_deinit = &rtl8723a_deinit_dm_priv;
+
+ pHalFunc->read_chip_version = &rtl8723a_read_chip_version;
+
+ pHalFunc->set_bwmode_handler = &PHY_SetBWMode23a8723A;
+ pHalFunc->set_channel_handler = &PHY_SwChnl8723A;
+
+ pHalFunc->hal_dm_watchdog = &rtl8723a_HalDmWatchDog;
+
+ pHalFunc->SetBeaconRelatedRegistersHandler =
+ &rtl8723a_SetBeaconRelatedRegisters;
+
+ pHalFunc->Add_RateATid = &rtl8723a_add_rateatid;
+ pHalFunc->run_thread = &rtl8723a_start_thread;
+ pHalFunc->cancel_thread = &rtl8723a_stop_thread;
+
+ pHalFunc->read_bbreg = &PHY_QueryBBReg;
+ pHalFunc->write_bbreg = &PHY_SetBBReg;
+ pHalFunc->read_rfreg = &PHY_QueryRFReg;
+ pHalFunc->write_rfreg = &PHY_SetRFReg;
+
+ /* Efuse related function */
+ pHalFunc->EfusePowerSwitch = &Hal_EfusePowerSwitch;
+ pHalFunc->ReadEFuse = &Hal_ReadEFuse;
+ pHalFunc->EFUSEGetEfuseDefinition = &Hal_GetEfuseDefinition;
+ pHalFunc->EfuseGetCurrentSize = &Hal_EfuseGetCurrentSize;
+ pHalFunc->Efuse_PgPacketRead23a = &Hal_EfusePgPacketRead;
+ pHalFunc->Efuse_PgPacketWrite23a = &Hal_EfusePgPacketWrite;
+ pHalFunc->Efuse_WordEnableDataWrite23a = &Hal_EfuseWordEnableDataWrite;
+ pHalFunc->Efuse_PgPacketWrite23a_BT = &Hal_EfusePgPacketWrite_BT;
+
+ pHalFunc->sreset_init_value23a = &sreset_init_value23a;
+ pHalFunc->sreset_reset_value23a = &sreset_reset_value23a;
+ pHalFunc->silentreset = &sreset_reset;
+ pHalFunc->sreset_xmit_status_check = &rtl8723a_sreset_xmit_status_check;
+ pHalFunc->sreset_linked_status_check =
+ &rtl8723a_sreset_linked_status_check;
+ pHalFunc->sreset_get_wifi_status23a = &sreset_get_wifi_status23a;
+ pHalFunc->sreset_inprogress = &sreset_inprogress;
+ pHalFunc->GetHalODMVarHandler = &rtl8723a_GetHalODMVar;
+ pHalFunc->SetHalODMVarHandler = &rtl8723a_SetHalODMVar;
+
+ pHalFunc->hal_notch_filter = &hal_notch_filter_8723a;
+
+ pHalFunc->c2h_handler = c2h_handler_8723a;
+ pHalFunc->c2h_id_filter_ccx = c2h_id_filter_ccx_8723a;
+}
+
+void rtl8723a_InitAntenna_Selection(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ u8 val;
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ val = rtw_read8(padapter, REG_LEDCFG2);
+ /* Let 8051 take control antenna settting */
+ val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */
+ rtw_write8(padapter, REG_LEDCFG2, val);
+}
+
+void rtl8723a_CheckAntenna_Selection(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ u8 val;
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ val = rtw_read8(padapter, REG_LEDCFG2);
+ /* Let 8051 take control antenna settting */
+ if (!(val & BIT(7))) {
+ val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */
+ rtw_write8(padapter, REG_LEDCFG2, val);
+ }
+}
+
+void rtl8723a_DeinitAntenna_Selection(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ u8 val;
+
+ pHalData = GET_HAL_DATA(padapter);
+ val = rtw_read8(padapter, REG_LEDCFG2);
+ /* Let 8051 take control antenna settting */
+ val &= ~BIT(7); /* DPDT_SEL_EN, clear 0x4C[23] */
+ rtw_write8(padapter, REG_LEDCFG2, val);
+}
+
+void rtl8723a_init_default_value(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct dm_priv *pdmpriv;
+ u8 i;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pdmpriv = &pHalData->dmpriv;
+
+ /* init default value */
+ pHalData->fw_ractrl = false;
+ pHalData->bIQKInitialized = false;
+ if (!padapter->pwrctrlpriv.bkeepfwalive)
+ pHalData->LastHMEBoxNum = 0;
+
+ pHalData->bIQKInitialized = false;
+
+ /* init dm default value */
+ pdmpriv->TM_Trigger = 0; /* for IQK */
+/* pdmpriv->binitialized = false; */
+/* pdmpriv->prv_traffic_idx = 3; */
+/* pdmpriv->initialize = 0; */
+
+ pdmpriv->ThermalValue_HP_index = 0;
+ for (i = 0; i < HP_THERMAL_NUM; i++)
+ pdmpriv->ThermalValue_HP[i] = 0;
+
+ /* init Efuse variables */
+ pHalData->EfuseUsedBytes = 0;
+ pHalData->BTEfuseUsedBytes = 0;
+}
+
+u8 GetEEPROMSize8723A(struct rtw_adapter *padapter)
+{
+ u8 size = 0;
+ u32 cr;
+
+ cr = rtw_read16(padapter, REG_9346CR);
+ /* 6: EEPROM used is 93C46, 4: boot from E-Fuse. */
+ size = (cr & BOOT_FROM_EEPROM) ? 6 : 4;
+
+ MSG_8723A("EEPROM type is %s\n", size == 4 ? "E-FUSE" : "93C46");
+
+ return size;
+}
+
+/* */
+/* */
+/* LLT R/W/Init function */
+/* */
+/* */
+static s32 _LLTWrite(struct rtw_adapter *padapter, u32 address, u32 data)
+{
+ s32 status = _SUCCESS;
+ s32 count = 0;
+ u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) |
+ _LLT_OP(_LLT_WRITE_ACCESS);
+ u16 LLTReg = REG_LLT_INIT;
+
+ rtw_write32(padapter, LLTReg, value);
+
+ /* polling */
+ do {
+ value = rtw_read32(padapter, LLTReg);
+ if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) {
+ break;
+ }
+
+ if (count > POLLING_LLT_THRESHOLD) {
+ RT_TRACE(_module_hal_init_c_, _drv_err_,
+ ("Failed to polling write LLT done at "
+ "address %d!\n", address));
+ status = _FAIL;
+ break;
+ }
+ } while (count++);
+
+ return status;
+}
+
+s32 InitLLTTable23a(struct rtw_adapter *padapter, u32 boundary)
+{
+ s32 status = _SUCCESS;
+ u32 i;
+ u32 txpktbuf_bndy = boundary;
+ u32 Last_Entry_Of_TxPktBuf = LAST_ENTRY_OF_TX_PKT_BUFFER;
+
+ for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+ status = _LLTWrite(padapter, i, i + 1);
+ if (_SUCCESS != status) {
+ return status;
+ }
+ }
+
+ /* end of list */
+ status = _LLTWrite(padapter, (txpktbuf_bndy - 1), 0xFF);
+ if (_SUCCESS != status) {
+ return status;
+ }
+
+ /* Make the other pages as ring buffer */
+ /* This ring buffer is used as beacon buffer if we config this
+ MAC as two MAC transfer. */
+ /* Otherwise used as local loopback buffer. */
+ for (i = txpktbuf_bndy; i < Last_Entry_Of_TxPktBuf; i++) {
+ status = _LLTWrite(padapter, i, (i + 1));
+ if (_SUCCESS != status) {
+ return status;
+ }
+ }
+
+ /* Let last entry point to the start entry of ring buffer */
+ status = _LLTWrite(padapter, Last_Entry_Of_TxPktBuf, txpktbuf_bndy);
+ if (_SUCCESS != status) {
+ return status;
+ }
+
+ return status;
+}
+
+static void _DisableGPIO(struct rtw_adapter *padapter)
+{
+/***************************************
+j. GPIO_PIN_CTRL 0x44[31:0]= 0x000
+k.Value = GPIO_PIN_CTRL[7:0]
+l. GPIO_PIN_CTRL 0x44[31:0] = 0x00FF0000 | (value <<8); write external PIN level
+m. GPIO_MUXCFG 0x42 [15:0] = 0x0780
+n. LEDCFG 0x4C[15:0] = 0x8080
+***************************************/
+ u32 value32;
+ u32 u4bTmp;
+
+ /* 1. Disable GPIO[7:0] */
+ rtw_write16(padapter, REG_GPIO_PIN_CTRL + 2, 0x0000);
+ value32 = rtw_read32(padapter, REG_GPIO_PIN_CTRL) & 0xFFFF00FF;
+ u4bTmp = value32 & 0x000000FF;
+ value32 |= ((u4bTmp << 8) | 0x00FF0000);
+ rtw_write32(padapter, REG_GPIO_PIN_CTRL, value32);
+
+ /* */
+ /* <Roger_Notes> For RTL8723u multi-function configuration which
+ was autoload from Efuse offset 0x0a and 0x0b, */
+ /* WLAN HW GPIO[9], GPS HW GPIO[10] and BT HW GPIO[11]. */
+ /* Added by Roger, 2010.10.07. */
+ /* */
+ /* 2. Disable GPIO[8] and GPIO[12] */
+
+ /* Configure all pins as input mode. */
+ rtw_write16(padapter, REG_GPIO_IO_SEL_2, 0x0000);
+ value32 = rtw_read32(padapter, REG_GPIO_PIN_CTRL_2) & 0xFFFF001F;
+ u4bTmp = value32 & 0x0000001F;
+ /* Set pin 8, 10, 11 and pin 12 to output mode. */
+ value32 |= ((u4bTmp << 8) | 0x001D0000);
+ rtw_write32(padapter, REG_GPIO_PIN_CTRL_2, value32);
+
+ /* 3. Disable LED0 & 1 */
+ rtw_write16(padapter, REG_LEDCFG0, 0x8080);
+} /* end of _DisableGPIO() */
+
+static void _DisableRFAFEAndResetBB8192C(struct rtw_adapter *padapter)
+{
+/**************************************
+a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue
+b. RF path 0 offset 0x00 = 0x00 disable RF
+c. APSD_CTRL 0x600[7:0] = 0x40
+d. SYS_FUNC_EN 0x02[7:0] = 0x16 reset BB state machine
+e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine
+***************************************/
+ u8 eRFPath = 0, value8 = 0;
+
+ rtw_write8(padapter, REG_TXPAUSE, 0xFF);
+
+ PHY_SetRFReg(padapter, (enum RF_RADIO_PATH) eRFPath, 0x0, bMaskByte0, 0x0);
+
+ value8 |= APSDOFF;
+ rtw_write8(padapter, REG_APSD_CTRL, value8); /* 0x40 */
+
+ /* Set BB reset at first */
+ value8 = 0;
+ value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn);
+ rtw_write8(padapter, REG_SYS_FUNC_EN, value8); /* 0x16 */
+
+ /* Set global reset. */
+ value8 &= ~FEN_BB_GLB_RSTn;
+ rtw_write8(padapter, REG_SYS_FUNC_EN, value8); /* 0x14 */
+
+ /* 2010/08/12 MH We need to set BB/GLBAL reset to save power
+ for SS mode. */
+
+/* RT_TRACE(COMP_INIT, DBG_LOUD, ("======> RF off and reset BB.\n")); */
+}
+
+static void _DisableRFAFEAndResetBB(struct rtw_adapter *padapter)
+{
+ _DisableRFAFEAndResetBB8192C(padapter);
+}
+
+static void _ResetDigitalProcedure1_92C(struct rtw_adapter *padapter,
+ bool bWithoutHWSM)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (IS_FW_81xxC(padapter) && (pHalData->FirmwareVersion <= 0x20)) {
+ /*****************************
+ f. MCUFWDL 0x80[7:0]= 0 reset MCU ready status
+ g. SYS_FUNC_EN 0x02[10]= 0 reset MCU register, (8051 reset)
+ h. SYS_FUNC_EN 0x02[15-12]= 5 reset MAC register, DCORE
+ i. SYS_FUNC_EN 0x02[10]= 1 enable MCU register,
+ (8051 enable)
+ ******************************/
+ u16 valu16 = 0;
+ rtw_write8(padapter, REG_MCUFWDL, 0);
+
+ valu16 = rtw_read16(padapter, REG_SYS_FUNC_EN);
+ /* reset MCU , 8051 */
+ rtw_write16(padapter, REG_SYS_FUNC_EN, (valu16 & (~FEN_CPUEN)));
+
+ valu16 = rtw_read16(padapter, REG_SYS_FUNC_EN) & 0x0FFF;
+ rtw_write16(padapter, REG_SYS_FUNC_EN,
+ (valu16 | (FEN_HWPDN | FEN_ELDR))); /* reset MAC */
+
+ valu16 = rtw_read16(padapter, REG_SYS_FUNC_EN);
+ /* enable MCU , 8051 */
+ rtw_write16(padapter, REG_SYS_FUNC_EN, (valu16 | FEN_CPUEN));
+ } else {
+ u8 retry_cnts = 0;
+
+ /* 2010/08/12 MH For USB SS, we can not stop 8051 when we
+ are trying to enter IPS/HW&SW radio off. For
+ S3/S4/S5/Disable, we can stop 8051 because */
+ /* we will init FW when power on again. */
+ /* if (!pDevice->RegUsbSS) */
+ /* If we want to SS mode, we can not reset 8051. */
+ if (rtw_read8(padapter, REG_MCUFWDL) & BIT1) {
+ /* IF fw in RAM code, do reset */
+ if (padapter->bFWReady) {
+ /* 2010/08/25 MH Accordign to RD alfred's
+ suggestion, we need to disable other */
+ /* HRCV INT to influence 8051 reset. */
+ rtw_write8(padapter, REG_FWIMR, 0x20);
+ /* 2011/02/15 MH According to Alex's
+ suggestion, close mask to prevent
+ incorrect FW write operation. */
+ rtw_write8(padapter, REG_FTIMR, 0x00);
+ rtw_write8(padapter, REG_FSIMR, 0x00);
+
+ /* 8051 reset by self */
+ rtw_write8(padapter, REG_HMETFR + 3, 0x20);
+
+ while ((retry_cnts++ < 100) &&
+ (FEN_CPUEN &
+ rtw_read16(padapter, REG_SYS_FUNC_EN))) {
+ udelay(50); /* us */
+ }
+
+ if (retry_cnts >= 100) {
+ /* Reset MAC and Enable 8051 */
+ rtw_write8(padapter,
+ REG_SYS_FUNC_EN + 1, 0x50);
+ mdelay(10);
+ }
+ }
+ }
+ /* Reset MAC and Enable 8051 */
+ rtw_write8(padapter, REG_SYS_FUNC_EN + 1, 0x54);
+ rtw_write8(padapter, REG_MCUFWDL, 0);
+ }
+
+ if (bWithoutHWSM) {
+ /*****************************
+ Without HW auto state machine
+ g. SYS_CLKR 0x08[15:0] = 0x30A3 disable MAC clock
+ h. AFE_PLL_CTRL 0x28[7:0] = 0x80 disable AFE PLL
+ i. AFE_XTAL_CTRL 0x24[15:0] = 0x880F gated AFE DIG_CLOCK
+ j. SYS_ISO_CTRL 0x00[7:0] = 0xF9 isolated digital to PON
+ ******************************/
+ /* modify to 0x70A3 by Scott. */
+ rtw_write16(padapter, REG_SYS_CLKR, 0x70A3);
+ rtw_write8(padapter, REG_AFE_PLL_CTRL, 0x80);
+ rtw_write16(padapter, REG_AFE_XTAL_CTRL, 0x880F);
+ rtw_write8(padapter, REG_SYS_ISO_CTRL, 0xF9);
+ } else {
+ /* Disable all RF/BB power */
+ rtw_write8(padapter, REG_RF_CTRL, 0x00);
+ }
+}
+
+static void _ResetDigitalProcedure1(struct rtw_adapter *padapter,
+ bool bWithoutHWSM)
+{
+ _ResetDigitalProcedure1_92C(padapter, bWithoutHWSM);
+}
+
+static void _ResetDigitalProcedure2(struct rtw_adapter *padapter)
+{
+/*****************************
+k. SYS_FUNC_EN 0x03[7:0] = 0x44 disable ELDR runction
+l. SYS_CLKR 0x08[15:0] = 0x3083 disable ELDR clock
+m. SYS_ISO_CTRL 0x01[7:0] = 0x83 isolated ELDR to PON
+******************************/
+ /* modify to 0x70a3 by Scott. */
+ rtw_write16(padapter, REG_SYS_CLKR, 0x70a3);
+ /* modify to 0x82 by Scott. */
+ rtw_write8(padapter, REG_SYS_ISO_CTRL + 1, 0x82);
+}
+
+static void _DisableAnalog(struct rtw_adapter *padapter, bool bWithoutHWSM)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u16 value16 = 0;
+ u8 value8 = 0;
+
+ if (bWithoutHWSM) {
+ /*****************************
+ n. LDOA15_CTRL 0x20[7:0] = 0x04 disable A15 power
+ o. LDOV12D_CTRL 0x21[7:0] = 0x54 disable digital core power
+ r. When driver call disable, the ASIC will turn off remaining
+ clock automatically
+ ******************************/
+
+ rtw_write8(padapter, REG_LDOA15_CTRL, 0x04);
+ /* rtw_write8(padapter, REG_LDOV12D_CTRL, 0x54); */
+
+ value8 = rtw_read8(padapter, REG_LDOV12D_CTRL);
+ value8 &= (~LDV12_EN);
+ rtw_write8(padapter, REG_LDOV12D_CTRL, value8);
+/* RT_TRACE(COMP_INIT, DBG_LOUD,
+ (" REG_LDOV12D_CTRL Reg0x21:0x%02x.\n", value8)); */
+ }
+
+ /*****************************
+ h. SPS0_CTRL 0x11[7:0] = 0x23 enter PFM mode
+ i. APS_FSMCO 0x04[15:0] = 0x4802 set USB suspend
+ ******************************/
+ value8 = 0x23;
+ if (IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID))
+ value8 |= BIT3;
+
+ rtw_write8(padapter, REG_SPS0_CTRL, value8);
+
+ if (bWithoutHWSM) {
+ /* value16 |= (APDM_HOST | FSM_HSUS |/PFM_ALDN); */
+ /* 2010/08/31 According to Filen description, we need to
+ use HW to shut down 8051 automatically. */
+ /* Becasue suspend operatione need the asistance of 8051
+ to wait for 3ms. */
+ value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN);
+ } else {
+ value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN);
+ }
+
+ rtw_write16(padapter, REG_APS_FSMCO, value16); /* 0x4802 */
+
+ rtw_write8(padapter, REG_RSV_CTRL, 0x0e);
+}
+
+/* HW Auto state machine */
+s32 CardDisableHWSM(struct rtw_adapter *padapter, u8 resetMCU)
+{
+ int rtStatus = _SUCCESS;
+
+ if (padapter->bSurpriseRemoved) {
+ return rtStatus;
+ }
+ /* RF Off Sequence ==== */
+ _DisableRFAFEAndResetBB(padapter);
+
+ /* ==== Reset digital sequence ====== */
+ _ResetDigitalProcedure1(padapter, false);
+
+ /* ==== Pull GPIO PIN to balance level and LED control ====== */
+ _DisableGPIO(padapter);
+
+ /* ==== Disable analog sequence === */
+ _DisableAnalog(padapter, false);
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("======> Card disable finished.\n"));
+
+ return rtStatus;
+}
+
+/* without HW Auto state machine */
+s32 CardDisableWithoutHWSM(struct rtw_adapter *padapter)
+{
+ s32 rtStatus = _SUCCESS;
+
+ /* RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("======> Card Disable Without HWSM .\n")); */
+ if (padapter->bSurpriseRemoved) {
+ return rtStatus;
+ }
+
+ /* RF Off Sequence ==== */
+ _DisableRFAFEAndResetBB(padapter);
+
+ /* ==== Reset digital sequence ====== */
+ _ResetDigitalProcedure1(padapter, true);
+
+ /* ==== Pull GPIO PIN to balance level and LED control ====== */
+ _DisableGPIO(padapter);
+
+ /* ==== Reset digital sequence ====== */
+ _ResetDigitalProcedure2(padapter);
+
+ /* ==== Disable analog sequence === */
+ _DisableAnalog(padapter, true);
+
+ /* RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("<====== Card Disable Without HWSM .\n")); */
+ return rtStatus;
+}
+
+void Hal_InitPGData(struct rtw_adapter *padapter, u8 *PROMContent)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ if (false == pEEPROM->bautoload_fail_flag) { /* autoload OK. */
+ if (!pEEPROM->EepromOrEfuse) {
+ /* Read EFUSE real map to shadow. */
+ EFUSE_ShadowMapUpdate23a(padapter, EFUSE_WIFI);
+ memcpy((void *)PROMContent,
+ (void *)pEEPROM->efuse_eeprom_data,
+ HWSET_MAX_SIZE);
+ }
+ } else { /* autoload fail */
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_,
+ ("AutoLoad Fail reported from CR9346!!\n"));
+/* pHalData->AutoloadFailFlag = true; */
+ /* update to default value 0xFF */
+ if (false == pEEPROM->EepromOrEfuse)
+ EFUSE_ShadowMapUpdate23a(padapter, EFUSE_WIFI);
+ memcpy((void *)PROMContent, (void *)pEEPROM->efuse_eeprom_data,
+ HWSET_MAX_SIZE);
+ }
+}
+
+void Hal_EfuseParseIDCode(struct rtw_adapter *padapter, u8 *hwinfo)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+/* struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); */
+ u16 EEPROMId;
+
+ /* Checl 0x8129 again for making sure autoload status!! */
+ EEPROMId = le16_to_cpu(*((u16 *) hwinfo));
+ if (EEPROMId != RTL_EEPROM_ID) {
+ DBG_8723A("EEPROM ID(%#x) is invalid!!\n", EEPROMId);
+ pEEPROM->bautoload_fail_flag = true;
+ } else {
+ pEEPROM->bautoload_fail_flag = false;
+ }
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_,
+ ("EEPROM ID = 0x%04x\n", EEPROMId));
+}
+
+static void Hal_EEValueCheck(u8 EEType, void *pInValue, void *pOutValue)
+{
+ switch (EEType) {
+ case EETYPE_TX_PWR:
+ {
+ u8 *pIn, *pOut;
+ pIn = (u8 *) pInValue;
+ pOut = (u8 *) pOutValue;
+ if (*pIn >= 0 && *pIn <= 63) {
+ *pOut = *pIn;
+ } else {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
+ ("EETYPE_TX_PWR, value =%d is invalid, set "
+ "to default = 0x%x\n",
+ *pIn, EEPROM_Default_TxPowerLevel));
+ *pOut = EEPROM_Default_TxPowerLevel;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+Hal_ReadPowerValueFromPROM_8723A(struct txpowerinfo *pwrInfo,
+ u8 *PROMContent, bool AutoLoadFail)
+{
+ u32 rfPath, eeAddr, group, rfPathMax = 1;
+
+ memset(pwrInfo, 0, sizeof(*pwrInfo));
+
+ if (AutoLoadFail) {
+ for (group = 0; group < MAX_CHNL_GROUP; group++) {
+ for (rfPath = 0; rfPath < rfPathMax; rfPath++) {
+ pwrInfo->CCKIndex[rfPath][group] =
+ EEPROM_Default_TxPowerLevel;
+ pwrInfo->HT40_1SIndex[rfPath][group] =
+ EEPROM_Default_TxPowerLevel;
+ pwrInfo->HT40_2SIndexDiff[rfPath][group] =
+ EEPROM_Default_HT40_2SDiff;
+ pwrInfo->HT20IndexDiff[rfPath][group] =
+ EEPROM_Default_HT20_Diff;
+ pwrInfo->OFDMIndexDiff[rfPath][group] =
+ EEPROM_Default_LegacyHTTxPowerDiff;
+ pwrInfo->HT40MaxOffset[rfPath][group] =
+ EEPROM_Default_HT40_PwrMaxOffset;
+ pwrInfo->HT20MaxOffset[rfPath][group] =
+ EEPROM_Default_HT20_PwrMaxOffset;
+ }
+ }
+ pwrInfo->TSSI_A[0] = EEPROM_Default_TSSI;
+ return;
+ }
+
+ for (rfPath = 0; rfPath < rfPathMax; rfPath++) {
+ for (group = 0; group < MAX_CHNL_GROUP; group++) {
+ eeAddr =
+ EEPROM_CCK_TX_PWR_INX_8723A + (rfPath * 3) + group;
+ /* pwrInfo->CCKIndex[rfPath][group] =
+ PROMContent[eeAddr]; */
+ Hal_EEValueCheck(EETYPE_TX_PWR, &PROMContent[eeAddr],
+ &pwrInfo->CCKIndex[rfPath][group]);
+ eeAddr = EEPROM_HT40_1S_TX_PWR_INX_8723A +
+ (rfPath * 3) + group;
+ /* pwrInfo->HT40_1SIndex[rfPath][group] =
+ PROMContent[eeAddr]; */
+ Hal_EEValueCheck(EETYPE_TX_PWR, &PROMContent[eeAddr],
+ &pwrInfo->HT40_1SIndex[rfPath][group]);
+ }
+ }
+
+ for (group = 0; group < MAX_CHNL_GROUP; group++) {
+ for (rfPath = 0; rfPath < rfPathMax; rfPath++) {
+ pwrInfo->HT40_2SIndexDiff[rfPath][group] = 0;
+ pwrInfo->HT20IndexDiff[rfPath][group] =
+ (PROMContent
+ [EEPROM_HT20_TX_PWR_INX_DIFF_8723A +
+ group] >> (rfPath * 4)) & 0xF;
+ /* 4bit sign number to 8 bit sign number */
+ if (pwrInfo->HT20IndexDiff[rfPath][group] & BIT3)
+ pwrInfo->HT20IndexDiff[rfPath][group] |= 0xF0;
+
+ pwrInfo->OFDMIndexDiff[rfPath][group] =
+ (PROMContent[EEPROM_OFDM_TX_PWR_INX_DIFF_8723A +
+ group] >> (rfPath * 4)) & 0xF;
+
+ pwrInfo->HT40MaxOffset[rfPath][group] =
+ (PROMContent[EEPROM_HT40_MAX_PWR_OFFSET_8723A +
+ group] >> (rfPath * 4)) & 0xF;
+
+ pwrInfo->HT20MaxOffset[rfPath][group] =
+ (PROMContent[EEPROM_HT20_MAX_PWR_OFFSET_8723A +
+ group] >> (rfPath * 4)) & 0xF;
+ }
+ }
+
+ pwrInfo->TSSI_A[0] = PROMContent[EEPROM_TSSI_A_8723A];
+}
+
+static u8 Hal_GetChnlGroup(u8 chnl)
+{
+ u8 group = 0;
+
+ if (chnl < 3) /* Cjanel 1-3 */
+ group = 0;
+ else if (chnl < 9) /* Channel 4-9 */
+ group = 1;
+ else /* Channel 10-14 */
+ group = 2;
+
+ return group;
+}
+
+void
+Hal_EfuseParsetxpowerinfo_8723A(struct rtw_adapter *padapter,
+ u8 *PROMContent, bool AutoLoadFail)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct txpowerinfo pwrInfo;
+ u8 rfPath, ch, group, rfPathMax = 1;
+ u8 pwr, diff;
+
+ Hal_ReadPowerValueFromPROM_8723A(&pwrInfo, PROMContent, AutoLoadFail);
+ for (rfPath = 0; rfPath < rfPathMax; rfPath++) {
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
+ group = Hal_GetChnlGroup(ch);
+
+ pHalData->TxPwrLevelCck[rfPath][ch] =
+ pwrInfo.CCKIndex[rfPath][group];
+ pHalData->TxPwrLevelHT40_1S[rfPath][ch] =
+ pwrInfo.HT40_1SIndex[rfPath][group];
+
+ pHalData->TxPwrHt20Diff[rfPath][ch] =
+ pwrInfo.HT20IndexDiff[rfPath][group];
+ pHalData->TxPwrLegacyHtDiff[rfPath][ch] =
+ pwrInfo.OFDMIndexDiff[rfPath][group];
+ pHalData->PwrGroupHT20[rfPath][ch] =
+ pwrInfo.HT20MaxOffset[rfPath][group];
+ pHalData->PwrGroupHT40[rfPath][ch] =
+ pwrInfo.HT40MaxOffset[rfPath][group];
+
+ pwr = pwrInfo.HT40_1SIndex[rfPath][group];
+ diff = pwrInfo.HT40_2SIndexDiff[rfPath][group];
+
+ pHalData->TxPwrLevelHT40_2S[rfPath][ch] =
+ (pwr > diff) ? (pwr - diff) : 0;
+ }
+ }
+ for (rfPath = 0; rfPath < RF_PATH_MAX; rfPath++) {
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("RF(%u)-Ch(%u) [CCK / HT40_1S / HT40_2S] = "
+ "[0x%x / 0x%x / 0x%x]\n",
+ rfPath, ch,
+ pHalData->TxPwrLevelCck[rfPath][ch],
+ pHalData->TxPwrLevelHT40_1S[rfPath][ch],
+ pHalData->TxPwrLevelHT40_2S[rfPath][ch]));
+
+ }
+ }
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("RF-A Ht20 to HT40 Diff[%u] = 0x%x(%d)\n", ch,
+ pHalData->TxPwrHt20Diff[RF_PATH_A][ch],
+ pHalData->TxPwrHt20Diff[RF_PATH_A][ch]));
+ }
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++)
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("RF-A Legacy to Ht40 Diff[%u] = 0x%x\n", ch,
+ pHalData->TxPwrLegacyHtDiff[RF_PATH_A][ch]));
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("RF-B Ht20 to HT40 Diff[%u] = 0x%x(%d)\n", ch,
+ pHalData->TxPwrHt20Diff[RF_PATH_B][ch],
+ pHalData->TxPwrHt20Diff[RF_PATH_B][ch]));
+ }
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++)
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("RF-B Legacy to HT40 Diff[%u] = 0x%x\n", ch,
+ pHalData->TxPwrLegacyHtDiff[RF_PATH_B][ch]));
+ if (!AutoLoadFail) {
+ struct registry_priv *registry_par = &padapter->registrypriv;
+ if (registry_par->regulatory_tid == 0xff) {
+ if (PROMContent[RF_OPTION1_8723A] == 0xff)
+ pHalData->EEPROMRegulatory = 0;
+ else
+ pHalData->EEPROMRegulatory =
+ PROMContent[RF_OPTION1_8723A] & 0x7;
+ } else {
+ pHalData->EEPROMRegulatory =
+ registry_par->regulatory_tid;
+ }
+ } else {
+ pHalData->EEPROMRegulatory = 0;
+ }
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("EEPROMRegulatory = 0x%x\n", pHalData->EEPROMRegulatory));
+
+ if (!AutoLoadFail)
+ pHalData->bTXPowerDataReadFromEEPORM = true;
+}
+
+void
+Hal_EfuseParseBTCoexistInfo_8723A(struct rtw_adapter *padapter,
+ u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u8 tempval;
+ u32 tmpu4;
+
+ if (!AutoLoadFail) {
+ tmpu4 = rtw_read32(padapter, REG_MULTI_FUNC_CTRL);
+ if (tmpu4 & BT_FUNC_EN)
+ pHalData->EEPROMBluetoothCoexist = 1;
+ else
+ pHalData->EEPROMBluetoothCoexist = 0;
+ pHalData->EEPROMBluetoothType = BT_RTL8723A;
+
+ /* The following need to be checked with newer version of */
+ /* eeprom spec */
+ tempval = hwinfo[RF_OPTION4_8723A];
+ pHalData->EEPROMBluetoothAntNum = (tempval & 0x1);
+ pHalData->EEPROMBluetoothAntIsolation = ((tempval & 0x10) >> 4);
+ pHalData->EEPROMBluetoothRadioShared = ((tempval & 0x20) >> 5);
+ } else {
+ pHalData->EEPROMBluetoothCoexist = 0;
+ pHalData->EEPROMBluetoothType = BT_RTL8723A;
+ pHalData->EEPROMBluetoothAntNum = Ant_x2;
+ pHalData->EEPROMBluetoothAntIsolation = 0;
+ pHalData->EEPROMBluetoothRadioShared = BT_Radio_Shared;
+ }
+#ifdef CONFIG_8723AU_BT_COEXIST
+ BT_InitHalVars(padapter);
+#endif
+}
+
+void
+Hal_EfuseParseEEPROMVer(struct rtw_adapter *padapter,
+ u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (!AutoLoadFail)
+ pHalData->EEPROMVersion = hwinfo[EEPROM_VERSION_8723A];
+ else
+ pHalData->EEPROMVersion = 1;
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("Hal_EfuseParseEEPROMVer(), EEVer = %d\n",
+ pHalData->EEPROMVersion));
+}
+
+void
+rtl8723a_EfuseParseChnlPlan(struct rtw_adapter *padapter,
+ u8 *hwinfo, bool AutoLoadFail)
+{
+ padapter->mlmepriv.ChannelPlan =
+ hal_com_get_channel_plan23a(padapter, hwinfo ?
+ hwinfo[EEPROM_ChannelPlan_8723A]:0xFF,
+ padapter->registrypriv.channel_plan,
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_13,
+ AutoLoadFail);
+
+ DBG_8723A("mlmepriv.ChannelPlan = 0x%02x\n",
+ padapter->mlmepriv.ChannelPlan);
+}
+
+void
+Hal_EfuseParseCustomerID(struct rtw_adapter *padapter,
+ u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ if (!AutoLoadFail) {
+ pHalData->EEPROMCustomerID = hwinfo[EEPROM_CustomID_8723A];
+ pHalData->EEPROMSubCustomerID =
+ hwinfo[EEPROM_SubCustomID_8723A];
+ } else {
+ pHalData->EEPROMCustomerID = 0;
+ pHalData->EEPROMSubCustomerID = 0;
+ }
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("EEPROM Customer ID: 0x%2x\n", pHalData->EEPROMCustomerID));
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("EEPROM SubCustomer ID: 0x%02x\n",
+ pHalData->EEPROMSubCustomerID));
+}
+
+void
+Hal_EfuseParseAntennaDiversity(struct rtw_adapter *padapter,
+ u8 *hwinfo, bool AutoLoadFail)
+{
+}
+
+void
+Hal_EfuseParseRateIndicationOption(struct rtw_adapter *padapter,
+ u8 *hwinfo, bool AutoLoadFail)
+{
+}
+
+void
+Hal_EfuseParseXtal_8723A(struct rtw_adapter *pAdapter,
+ u8 *hwinfo, u8 AutoLoadFail)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
+
+ if (!AutoLoadFail) {
+ pHalData->CrystalCap = hwinfo[EEPROM_XTAL_K_8723A];
+ if (pHalData->CrystalCap == 0xFF)
+ pHalData->CrystalCap = EEPROM_Default_CrystalCap_8723A;
+ } else {
+ pHalData->CrystalCap = EEPROM_Default_CrystalCap_8723A;
+ }
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("%s: CrystalCap = 0x%2x\n", __func__,
+ pHalData->CrystalCap));
+}
+
+void
+Hal_EfuseParseThermalMeter_8723A(struct rtw_adapter *padapter,
+ u8 *PROMContent, u8 AutoloadFail)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ /* */
+ /* ThermalMeter from EEPROM */
+ /* */
+ if (false == AutoloadFail)
+ pHalData->EEPROMThermalMeter =
+ PROMContent[EEPROM_THERMAL_METER_8723A];
+ else
+ pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
+
+ if ((pHalData->EEPROMThermalMeter == 0xff) || (true == AutoloadFail)) {
+ pHalData->bAPKThermalMeterIgnore = true;
+ pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
+ }
+
+ DBG_8723A("%s: ThermalMeter = 0x%x\n", __func__,
+ pHalData->EEPROMThermalMeter);
+}
+
+void Hal_InitChannelPlan23a(struct rtw_adapter *padapter)
+{
+}
+
+static void rtl8723a_cal_txdesc_chksum(struct tx_desc *ptxdesc)
+{
+ u16 *usPtr = (u16 *) ptxdesc;
+ u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
+ u32 index;
+ u16 checksum = 0;
+
+ /* Clear first */
+ ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
+
+ for (index = 0; index < count; index++) {
+ checksum ^= le16_to_cpu(*(usPtr + index));
+ }
+
+ ptxdesc->txdw7 |= cpu_to_le32(checksum & 0x0000ffff);
+}
+
+static void fill_txdesc_sectype(struct pkt_attrib *pattrib,
+ struct txdesc_8723a *ptxdesc)
+{
+ if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
+ switch (pattrib->encrypt) {
+ /* SEC_TYPE */
+ case _WEP40_:
+ case _WEP104_:
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ ptxdesc->sectype = 1;
+ break;
+
+ case _AES_:
+ ptxdesc->sectype = 3;
+ break;
+
+ case _NO_PRIVACY_:
+ default:
+ break;
+ }
+ }
+}
+
+static void fill_txdesc_vcs(struct pkt_attrib *pattrib,
+ struct txdesc_8723a *ptxdesc)
+{
+ /* DBG_8723A("cvs_mode =%d\n", pattrib->vcs_mode); */
+
+ switch (pattrib->vcs_mode) {
+ case RTS_CTS:
+ ptxdesc->rtsen = 1;
+ break;
+
+ case CTS_TO_SELF:
+ ptxdesc->cts2self = 1;
+ break;
+
+ case NONE_VCS:
+ default:
+ break;
+ }
+
+ if (pattrib->vcs_mode) {
+ ptxdesc->hw_rts_en = 1; /* ENABLE HW RTS */
+
+ /* Set RTS BW */
+ if (pattrib->ht_en) {
+ if (pattrib->bwmode & HT_CHANNEL_WIDTH_40)
+ ptxdesc->rts_bw = 1;
+
+ switch (pattrib->ch_offset) {
+ case HAL_PRIME_CHNL_OFFSET_DONT_CARE:
+ ptxdesc->rts_sc = 0;
+ break;
+
+ case HAL_PRIME_CHNL_OFFSET_LOWER:
+ ptxdesc->rts_sc = 1;
+ break;
+
+ case HAL_PRIME_CHNL_OFFSET_UPPER:
+ ptxdesc->rts_sc = 2;
+ break;
+
+ default:
+ ptxdesc->rts_sc = 3; /* Duplicate */
+ break;
+ }
+ }
+ }
+}
+
+static void fill_txdesc_phy(struct pkt_attrib *pattrib,
+ struct txdesc_8723a *ptxdesc)
+{
+ if (pattrib->ht_en) {
+ if (pattrib->bwmode & HT_CHANNEL_WIDTH_40)
+ ptxdesc->data_bw = 1;
+
+ switch (pattrib->ch_offset) {
+ case HAL_PRIME_CHNL_OFFSET_DONT_CARE:
+ ptxdesc->data_sc = 0;
+ break;
+
+ case HAL_PRIME_CHNL_OFFSET_LOWER:
+ ptxdesc->data_sc = 1;
+ break;
+
+ case HAL_PRIME_CHNL_OFFSET_UPPER:
+ ptxdesc->data_sc = 2;
+ break;
+
+ default:
+ ptxdesc->data_sc = 3; /* Duplicate */
+ break;
+ }
+ }
+}
+
+static void rtl8723a_fill_default_txdesc(struct xmit_frame *pxmitframe,
+ u8 *pbuf)
+{
+ struct rtw_adapter *padapter;
+ struct hal_data_8723a *pHalData;
+ struct dm_priv *pdmpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+ struct pkt_attrib *pattrib;
+ struct txdesc_8723a *ptxdesc;
+ s32 bmcst;
+
+ padapter = pxmitframe->padapter;
+ pHalData = GET_HAL_DATA(padapter);
+ pdmpriv = &pHalData->dmpriv;
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+
+ pattrib = &pxmitframe->attrib;
+ bmcst = is_multicast_ether_addr(pattrib->ra);
+
+ ptxdesc = (struct txdesc_8723a *)pbuf;
+
+ if (pxmitframe->frame_tag == DATA_FRAMETAG) {
+ ptxdesc->macid = pattrib->mac_id; /* CAM_ID(MAC_ID) */
+
+ if (pattrib->ampdu_en == true)
+ ptxdesc->agg_en = 1; /* AGG EN */
+ else
+ ptxdesc->bk = 1; /* AGG BK */
+
+ ptxdesc->qsel = pattrib->qsel;
+ ptxdesc->rate_id = pattrib->raid;
+
+ fill_txdesc_sectype(pattrib, ptxdesc);
+
+ ptxdesc->seq = pattrib->seqnum;
+
+ if ((pattrib->ether_type != 0x888e) &&
+ (pattrib->ether_type != 0x0806) &&
+ (pattrib->dhcp_pkt != 1)) {
+ /* Non EAP & ARP & DHCP type data packet */
+
+ fill_txdesc_vcs(pattrib, ptxdesc);
+ fill_txdesc_phy(pattrib, ptxdesc);
+
+ ptxdesc->rtsrate = 8; /* RTS Rate = 24M */
+ ptxdesc->data_ratefb_lmt = 0x1F;
+ ptxdesc->rts_ratefb_lmt = 0xF;
+
+ /* use REG_INIDATA_RATE_SEL value */
+ ptxdesc->datarate =
+ pdmpriv->INIDATA_RATE[pattrib->mac_id];
+
+ } else {
+ /* EAP data packet and ARP packet. */
+ /* Use the 1M data rate to send the EAP/ARP packet. */
+ /* This will maybe make the handshake smooth. */
+
+ ptxdesc->bk = 1; /* AGG BK */
+ ptxdesc->userate = 1; /* driver uses rate */
+ if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
+ ptxdesc->data_short = 1;
+ ptxdesc->datarate = MRateToHwRate23a(pmlmeext->tx_rate);
+ }
+ } else if (pxmitframe->frame_tag == MGNT_FRAMETAG) {
+/* RT_TRACE(_module_hal_xmit_c_, _drv_notice_,
+ ("%s: MGNT_FRAMETAG\n", __func__)); */
+
+ ptxdesc->macid = pattrib->mac_id; /* CAM_ID(MAC_ID) */
+ ptxdesc->qsel = pattrib->qsel;
+ ptxdesc->rate_id = pattrib->raid; /* Rate ID */
+ ptxdesc->seq = pattrib->seqnum;
+ ptxdesc->userate = 1; /* driver uses rate, 1M */
+ ptxdesc->rty_lmt_en = 1; /* retry limit enable */
+ ptxdesc->data_rt_lmt = 6; /* retry limit = 6 */
+
+ /* CCX-TXRPT ack for xmit mgmt frames. */
+ if (pxmitframe->ack_report)
+ ptxdesc->ccx = 1;
+
+ ptxdesc->datarate = MRateToHwRate23a(pmlmeext->tx_rate);
+ } else if (pxmitframe->frame_tag == TXAGG_FRAMETAG) {
+ RT_TRACE(_module_hal_xmit_c_, _drv_warning_,
+ ("%s: TXAGG_FRAMETAG\n", __func__));
+ } else {
+ RT_TRACE(_module_hal_xmit_c_, _drv_warning_,
+ ("%s: frame_tag = 0x%x\n", __func__,
+ pxmitframe->frame_tag));
+
+ ptxdesc->macid = 4; /* CAM_ID(MAC_ID) */
+ ptxdesc->rate_id = 6; /* Rate ID */
+ ptxdesc->seq = pattrib->seqnum;
+ ptxdesc->userate = 1; /* driver uses rate */
+ ptxdesc->datarate = MRateToHwRate23a(pmlmeext->tx_rate);
+ }
+
+ ptxdesc->pktlen = pattrib->last_txcmdsz;
+ ptxdesc->offset = TXDESC_SIZE + OFFSET_SZ;
+ if (bmcst)
+ ptxdesc->bmc = 1;
+ ptxdesc->ls = 1;
+ ptxdesc->fs = 1;
+ ptxdesc->own = 1;
+
+ /* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
+ /* (1) The sequence number of each non-Qos frame / broadcast /
+ * multicast / mgnt frame should be controled by Hw because Fw
+ * will also send null data which we cannot control when Fw LPS enable.
+ * --> default enable non-Qos data sequense number.
+ 2010.06.23. by tynli. */
+ /* (2) Enable HW SEQ control for beacon packet,
+ * because we use Hw beacon. */
+ /* (3) Use HW Qos SEQ to control the seq num of Ext port
+ * non-Qos packets. */
+ /* 2010.06.23. Added by tynli. */
+ if (!pattrib->qos_en) {
+ /* Hw set sequence number */
+ ptxdesc->hwseq_en = 1; /* HWSEQ_EN */
+ ptxdesc->hwseq_sel = 0; /* HWSEQ_SEL */
+ }
+}
+
+/*
+ * Description:
+ *
+ * Parameters:
+ * pxmitframe xmitframe
+ * pbuf where to fill tx desc
+ */
+void rtl8723a_update_txdesc(struct xmit_frame *pxmitframe, u8 *pbuf)
+{
+ struct tx_desc *pdesc;
+
+ pdesc = (struct tx_desc *)pbuf;
+ memset(pdesc, 0, sizeof(struct tx_desc));
+
+ rtl8723a_fill_default_txdesc(pxmitframe, pbuf);
+
+ pdesc->txdw0 = cpu_to_le32(pdesc->txdw0);
+ pdesc->txdw1 = cpu_to_le32(pdesc->txdw1);
+ pdesc->txdw2 = cpu_to_le32(pdesc->txdw2);
+ pdesc->txdw3 = cpu_to_le32(pdesc->txdw3);
+ pdesc->txdw4 = cpu_to_le32(pdesc->txdw4);
+ pdesc->txdw5 = cpu_to_le32(pdesc->txdw5);
+ pdesc->txdw6 = cpu_to_le32(pdesc->txdw6);
+ pdesc->txdw7 = cpu_to_le32(pdesc->txdw7);
+ rtl8723a_cal_txdesc_chksum(pdesc);
+}
+
+/*
+ * Description: In normal chip, we should send some packet to Hw which
+ * will be used by Fw in FW LPS mode. The function is to fill the Tx
+ * descriptor of this packets, then
+ */
+/* Fw can tell Hw to send these packet derectly. */
+/* Added by tynli. 2009.10.15. */
+/* */
+void rtl8723a_fill_fake_txdesc(struct rtw_adapter *padapter, u8 *pDesc,
+ u32 BufferLen, u8 IsPsPoll, u8 IsBTQosNull)
+{
+ struct tx_desc *ptxdesc;
+
+ /* Clear all status */
+ ptxdesc = (struct tx_desc *)pDesc;
+ memset(pDesc, 0, TXDESC_SIZE);
+
+ /* offset 0 */
+ /* own, bFirstSeg, bLastSeg; */
+ ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
+
+ /* 32 bytes for TX Desc */
+ ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE + OFFSET_SZ) <<
+ OFFSET_SHT) & 0x00ff0000);
+
+ /* Buffer size + command header */
+ ptxdesc->txdw0 |= cpu_to_le32(BufferLen & 0x0000ffff);
+
+ /* offset 4 */
+ /* Fixed queue of Mgnt queue */
+ ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT << QSEL_SHT) & 0x00001f00);
+
+ /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed
+ to error vlaue by Hw. */
+ if (IsPsPoll) {
+ ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
+ } else {
+ /* Hw set sequence number */
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(7));
+ /* set bit3 to 1. Suugested by TimChen. 2009.12.29. */
+ ptxdesc->txdw3 |= cpu_to_le32((8 << 28));
+ }
+
+ if (true == IsBTQosNull) {
+ ptxdesc->txdw2 |= cpu_to_le32(BIT(23)); /* BT NULL */
+ }
+
+ /* offset 16 */
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(8)); /* driver uses rate */
+
+ /* USB interface drop packet if the checksum of descriptor isn't
+ correct. */
+ /* Using this checksum can let hardware recovery from packet bulk
+ out error (e.g. Cancel URC, Bulk out error.). */
+ rtl8723a_cal_txdesc_chksum(ptxdesc);
+}
+
+static void hw_var_set_opmode(struct rtw_adapter *padapter, u8 mode)
+{
+ u8 val8;
+
+ if ((mode == _HW_STATE_STATION_) || (mode == _HW_STATE_NOLINK_)) {
+ StopTxBeacon(padapter);
+
+ /* disable atim wnd */
+ val8 = DIS_TSF_UDT | EN_BCN_FUNCTION | DIS_ATIM;
+ SetBcnCtrlReg23a(padapter, val8, ~val8);
+ } else if ((mode == _HW_STATE_ADHOC_) /*|| (mode == _HW_STATE_AP_) */) {
+ ResumeTxBeacon(padapter);
+
+ val8 = DIS_TSF_UDT | EN_BCN_FUNCTION | DIS_BCNQ_SUB;
+ SetBcnCtrlReg23a(padapter, val8, ~val8);
+ } else if (mode == _HW_STATE_AP_) {
+#ifdef CONFIG_8723AU_BT_COEXIST
+ /* add NULL Data and BT NULL Data Packets to FW RSVD Page */
+ rtl8723a_set_BTCoex_AP_mode_FwRsvdPkt_cmd(padapter);
+#endif
+
+ ResumeTxBeacon(padapter);
+
+ val8 = DIS_TSF_UDT | DIS_BCNQ_SUB;
+ SetBcnCtrlReg23a(padapter, val8, ~val8);
+
+ /* Set RCR */
+ /* rtw_write32(padapter, REG_RCR, 0x70002a8e);
+ CBSSID_DATA must set to 0 */
+ /* CBSSID_DATA must set to 0 */
+ rtw_write32(padapter, REG_RCR, 0x7000228e);
+ /* enable to rx data frame */
+ rtw_write16(padapter, REG_RXFLTMAP2, 0xFFFF);
+ /* enable to rx ps-poll */
+ rtw_write16(padapter, REG_RXFLTMAP1, 0x0400);
+
+ /* Beacon Control related register for first time */
+ rtw_write8(padapter, REG_BCNDMATIM, 0x02); /* 2ms */
+ rtw_write8(padapter, REG_DRVERLYINT, 0x05); /* 5ms */
+ rtw_write8(padapter, REG_ATIMWND, 0x0a); /* 10ms for port0 */
+ rtw_write16(padapter, REG_BCNTCFG, 0x00);
+ rtw_write16(padapter, REG_TBTT_PROHIBIT, 0xff04);
+ /* +32767 (~32ms) */
+ rtw_write16(padapter, REG_TSFTR_SYN_OFFSET, 0x7fff);
+
+ /* reset TSF */
+ rtw_write8(padapter, REG_DUAL_TSF_RST, BIT(0));
+
+ /* enable BCN Function */
+ /* don't enable update TSF (due to TSF update when
+ beacon/probe rsp are received) */
+ val8 = DIS_TSF_UDT | EN_BCN_FUNCTION |
+ EN_TXBCN_RPT | DIS_BCNQ_SUB;
+ SetBcnCtrlReg23a(padapter, val8, ~val8);
+ }
+
+ val8 = rtw_read8(padapter, MSR);
+ val8 = (val8 & 0xC) | mode;
+ rtw_write8(padapter, MSR, val8);
+}
+
+static void hw_var_set_macaddr(struct rtw_adapter *padapter, u8 *val)
+{
+ u8 idx = 0;
+ u32 reg_macid;
+
+ reg_macid = REG_MACID;
+
+ for (idx = 0; idx < 6; idx++)
+ rtw_write8(padapter, (reg_macid + idx), val[idx]);
+}
+
+static void hw_var_set_bssid(struct rtw_adapter *padapter, u8 *val)
+{
+ u8 idx = 0;
+ u32 reg_bssid;
+
+ reg_bssid = REG_BSSID;
+
+ for (idx = 0; idx < 6; idx++)
+ rtw_write8(padapter, (reg_bssid + idx), val[idx]);
+}
+
+static void hw_var_set_correct_tsf(struct rtw_adapter *padapter)
+{
+ u64 tsf;
+ u32 reg_tsftr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ /* tsf = pmlmeext->TSFValue - ((u32)pmlmeext->TSFValue %
+ (pmlmeinfo->bcn_interval*1024)) - 1024; us */
+ tsf = pmlmeext->TSFValue -
+ rtw_modular6423a(pmlmeext->TSFValue,
+ (pmlmeinfo->bcn_interval * 1024)) - 1024; /* us */
+
+ if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) ||
+ ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) {
+ /* pHalData->RegTxPause |= STOP_BCNQ;BIT(6) */
+ /* rtw_write8(padapter, REG_TXPAUSE,
+ (rtw_read8(Adapter, REG_TXPAUSE)|BIT(6))); */
+ StopTxBeacon(padapter);
+ }
+
+ reg_tsftr = REG_TSFTR;
+
+ /* disable related TSF function */
+ SetBcnCtrlReg23a(padapter, 0, EN_BCN_FUNCTION);
+
+ rtw_write32(padapter, reg_tsftr, tsf);
+ rtw_write32(padapter, reg_tsftr + 4, tsf >> 32);
+
+ /* enable related TSF function */
+ SetBcnCtrlReg23a(padapter, EN_BCN_FUNCTION, 0);
+
+ if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) ||
+ ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE))
+ ResumeTxBeacon(padapter);
+}
+
+static void hw_var_set_mlme_disconnect(struct rtw_adapter *padapter)
+{
+ /* reject all data frames */
+ rtw_write16(padapter, REG_RXFLTMAP2, 0);
+
+ /* reset TSF */
+ rtw_write8(padapter, REG_DUAL_TSF_RST, BIT(0));
+
+ /* disable update TSF */
+ SetBcnCtrlReg23a(padapter, DIS_TSF_UDT, 0);
+}
+
+static void hw_var_set_mlme_join(struct rtw_adapter *padapter, u8 type)
+{
+ u8 RetryLimit = 0x30;
+
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (type == 0) { /* prepare to join */
+ u32 v32;
+
+ /* enable to rx data frame.Accept all data frame */
+ /* rtw_write32(padapter, REG_RCR,
+ rtw_read32(padapter, REG_RCR)|RCR_ADF); */
+ rtw_write16(padapter, REG_RXFLTMAP2, 0xFFFF);
+
+ v32 = rtw_read32(padapter, REG_RCR);
+ v32 |= RCR_CBSSID_DATA | RCR_CBSSID_BCN;
+ rtw_write32(padapter, REG_RCR, v32);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)
+ RetryLimit =
+ (pHalData->CustomerID == RT_CID_CCX) ? 7 : 48;
+ else /* Ad-hoc Mode */
+ RetryLimit = 0x7;
+ } else if (type == 1) { /* joinbss_event callback when join res < 0 */
+ /* config RCR to receive different BSSID & not to
+ receive data frame during linking */
+ rtw_write16(padapter, REG_RXFLTMAP2, 0);
+ } else if (type == 2) { /* sta add event callback */
+ /* enable update TSF */
+ SetBcnCtrlReg23a(padapter, 0, DIS_TSF_UDT);
+
+ if (check_fwstate(pmlmepriv,
+ WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)) {
+ /* fixed beacon issue for 8191su........... */
+ rtw_write8(padapter, 0x542, 0x02);
+ RetryLimit = 0x7;
+ }
+ }
+
+ rtw_write16(padapter, REG_RL,
+ RetryLimit << RETRY_LIMIT_SHORT_SHIFT | RetryLimit <<
+ RETRY_LIMIT_LONG_SHIFT);
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ switch (type) {
+ case 0:
+ /* prepare to join */
+ BT_WifiAssociateNotify(padapter, true);
+ break;
+ case 1:
+ /* joinbss_event callback when join res < 0 */
+ BT_WifiAssociateNotify(padapter, false);
+ break;
+ case 2:
+ /* sta add event callback */
+/* BT_WifiMediaStatusNotify(padapter, RT_MEDIA_CONNECT); */
+ break;
+ }
+#endif
+}
+
+void SetHwReg8723A(struct rtw_adapter *padapter, u8 variable, u8 *val)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ u32 *val32 = (u32 *)val;
+
+ switch (variable) {
+ case HW_VAR_MEDIA_STATUS:
+ rtl8723a_set_media_status(padapter, *val);
+ break;
+
+ case HW_VAR_MEDIA_STATUS1:
+ rtl8723a_set_media_status1(padapter, *val);
+ break;
+
+ case HW_VAR_SET_OPMODE:
+ hw_var_set_opmode(padapter, *val);
+ break;
+
+ case HW_VAR_MAC_ADDR:
+ hw_var_set_macaddr(padapter, val);
+ break;
+
+ case HW_VAR_BSSID:
+ hw_var_set_bssid(padapter, val);
+ break;
+
+ case HW_VAR_BASIC_RATE:
+ HalSetBrateCfg23a(padapter, val);
+ break;
+
+ case HW_VAR_TXPAUSE:
+ rtl8723a_set_tx_pause(padapter, *val);
+ break;
+
+ case HW_VAR_BCN_FUNC:
+ rtl8723a_set_bcn_func(padapter, *val);
+ break;
+
+ case HW_VAR_CORRECT_TSF:
+ hw_var_set_correct_tsf(padapter);
+ break;
+
+ case HW_VAR_CHECK_BSSID:
+ rtl8723a_check_bssid(padapter, *val);
+ break;
+
+ case HW_VAR_MLME_DISCONNECT:
+ hw_var_set_mlme_disconnect(padapter);
+ break;
+
+ case HW_VAR_MLME_SITESURVEY:
+ rtl8723a_mlme_sitesurvey(padapter, *val);
+ break;
+
+ case HW_VAR_MLME_JOIN:
+ hw_var_set_mlme_join(padapter, *val);
+ break;
+
+ case HW_VAR_ON_RCR_AM:
+ rtl8723a_on_rcr_am(padapter);
+ break;
+
+ case HW_VAR_OFF_RCR_AM:
+ rtl8723a_off_rcr_am(padapter);
+ break;
+
+ case HW_VAR_BEACON_INTERVAL:
+ rtl8723a_set_beacon_interval(padapter, *((u16 *) val));
+ break;
+
+ case HW_VAR_SLOT_TIME:
+ rtl8723a_set_slot_time(padapter, *val);
+ break;
+
+ case HW_VAR_RESP_SIFS:
+ rtl8723a_set_resp_sifs(padapter, val[0], val[1],
+ val[2], val[3]);
+ break;
+
+ case HW_VAR_ACK_PREAMBLE:
+ rtl8723a_ack_preamble(padapter, *val);
+ break;
+
+ case HW_VAR_SEC_CFG:
+ rtl8723a_set_sec_cfg(padapter, *val);
+ break;
+
+ case HW_VAR_DM_FLAG:
+ rtl8723a_odm_support_ability_write(padapter, *val32);
+ break;
+ case HW_VAR_DM_FUNC_OP:
+ rtl8723a_odm_support_ability_backup(padapter, *val);
+ break;
+ case HW_VAR_DM_FUNC_SET:
+ rtl8723a_odm_support_ability_set(padapter, *val32);
+ break;
+
+ case HW_VAR_DM_FUNC_CLR:
+ rtl8723a_odm_support_ability_clr(padapter, *val32);
+ break;
+
+ case HW_VAR_CAM_EMPTY_ENTRY:
+ rtl8723a_cam_empty_entry(padapter, *val);
+ break;
+
+ case HW_VAR_CAM_INVALID_ALL:
+ rtl8723a_cam_invalid_all(padapter);
+ break;
+
+ case HW_VAR_CAM_WRITE:
+ rtl8723a_cam_write(padapter, val32[0], val32[1]);
+ break;
+
+ case HW_VAR_AC_PARAM_VO:
+ rtl8723a_set_ac_param_vo(padapter, *val32);
+ break;
+
+ case HW_VAR_AC_PARAM_VI:
+ rtl8723a_set_ac_param_vi(padapter, *val32);
+ break;
+
+ case HW_VAR_AC_PARAM_BE:
+ rtl8723a_set_ac_param_be(padapter, *val32);
+ break;
+
+ case HW_VAR_AC_PARAM_BK:
+ rtl8723a_set_ac_param_bk(padapter, *val32);
+ break;
+
+ case HW_VAR_ACM_CTRL:
+ rtl8723a_set_acm_ctrl(padapter, *val);
+ break;
+
+ case HW_VAR_AMPDU_MIN_SPACE:
+ rtl8723a_set_ampdu_min_space(padapter, *val);
+ break;
+
+ case HW_VAR_AMPDU_FACTOR:
+ rtl8723a_set_ampdu_factor(padapter, *val);
+ break;
+
+ case HW_VAR_RXDMA_AGG_PG_TH:
+ rtl8723a_set_rxdma_agg_pg_th(padapter, *val);
+ break;
+
+ case HW_VAR_H2C_FW_PWRMODE:
+ rtl8723a_set_FwPwrMode_cmd(padapter, *val);
+ break;
+
+ case HW_VAR_H2C_FW_JOINBSSRPT:
+ rtl8723a_set_FwJoinBssReport_cmd(padapter, *val);
+ break;
+
+#ifdef CONFIG_8723AU_P2P
+ case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
+ rtl8723a_set_p2p_ps_offload_cmd(padapter, *val);
+ break;
+#endif /* CONFIG_8723AU_P2P */
+
+ case HW_VAR_INITIAL_GAIN:
+ rtl8723a_set_initial_gain(padapter, *val32);
+ break;
+ case HW_VAR_EFUSE_BYTES:
+ pHalData->EfuseUsedBytes = *((u16 *) val);
+ break;
+ case HW_VAR_EFUSE_BT_BYTES:
+ pHalData->BTEfuseUsedBytes = *((u16 *) val);
+ break;
+ case HW_VAR_FIFO_CLEARN_UP:
+ rtl8723a_fifo_cleanup(padapter);
+ break;
+ case HW_VAR_CHECK_TXBUF:
+ break;
+ case HW_VAR_APFM_ON_MAC:
+ rtl8723a_set_apfm_on_mac(padapter, *val);
+ break;
+
+ case HW_VAR_NAV_UPPER:
+ rtl8723a_set_nav_upper(padapter, *val32);
+ break;
+ case HW_VAR_BCN_VALID:
+ rtl8723a_bcn_valid(padapter);
+ break;
+ default:
+ break;
+ }
+
+}
+
+void GetHwReg8723A(struct rtw_adapter *padapter, u8 variable, u8 *val)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+
+ switch (variable) {
+ case HW_VAR_BASIC_RATE:
+ *((u16 *) val) = pHalData->BasicRateSet;
+ break;
+
+ case HW_VAR_TXPAUSE:
+ *val = rtw_read8(padapter, REG_TXPAUSE);
+ break;
+
+ case HW_VAR_BCN_VALID:
+ /* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2 */
+ val[0] = (BIT0 & rtw_read8(padapter, REG_TDECTRL + 2)) ? true :
+ false;
+ break;
+
+ case HW_VAR_RF_TYPE:
+ *val = pHalData->rf_type;
+ break;
+
+ case HW_VAR_DM_FLAG:
+ {
+ struct dm_odm_t *podmpriv = &pHalData->odmpriv;
+ *((u32 *) val) = podmpriv->SupportAbility;
+ }
+ break;
+
+ case HW_VAR_FWLPS_RF_ON:
+ {
+ /* When we halt NIC, we should check if FW LPS is leave. */
+ u32 valRCR;
+
+ if ((padapter->bSurpriseRemoved == true) ||
+ (padapter->pwrctrlpriv.rf_pwrstate == rf_off)) {
+ /* If it is in HW/SW Radio OFF or IPS state, we do
+ not check Fw LPS Leave, because Fw is unload. */
+ *val = true;
+ } else {
+ valRCR = rtw_read32(padapter, REG_RCR);
+ valRCR &= 0x00070000;
+ if (valRCR)
+ *val = false;
+ else
+ *val = true;
+ }
+ }
+ break;
+ case HW_VAR_EFUSE_BYTES:
+ *((u16 *) val) = pHalData->EfuseUsedBytes;
+ break;
+
+ case HW_VAR_EFUSE_BT_BYTES:
+ *((u16 *) val) = pHalData->BTEfuseUsedBytes;
+ break;
+
+ case HW_VAR_APFM_ON_MAC:
+ *val = pHalData->bMacPwrCtrlOn;
+ break;
+ case HW_VAR_CHK_HI_QUEUE_EMPTY:
+ *val =
+ ((rtw_read32(padapter, REG_HGQ_INFORMATION) & 0x0000ff00) ==
+ 0) ? true : false;
+ break;
+ }
+}
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+
+void rtl8723a_SingleDualAntennaDetection(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData;
+ struct dm_odm_t *pDM_Odm;
+ struct sw_ant_sw *pDM_SWAT_Table;
+ u8 i;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pDM_Odm = &pHalData->odmpriv;
+ pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
+
+ /* */
+ /* <Roger_Notes> RTL8723A Single and Dual antenna dynamic detection
+ mechanism when RF power state is on. */
+ /* We should take power tracking, IQK, LCK, RCK RF read/write
+ operation into consideration. */
+ /* 2011.12.15. */
+ /* */
+ if (!pHalData->bAntennaDetected) {
+ u8 btAntNum = BT_GetPGAntNum(padapter);
+
+ /* Set default antenna B status */
+ if (btAntNum == Ant_x2)
+ pDM_SWAT_Table->ANTB_ON = true;
+ else if (btAntNum == Ant_x1)
+ pDM_SWAT_Table->ANTB_ON = false;
+ else
+ pDM_SWAT_Table->ANTB_ON = true;
+
+ if (pHalData->CustomerID != RT_CID_TOSHIBA) {
+ for (i = 0; i < MAX_ANTENNA_DETECTION_CNT; i++) {
+ if (ODM_SingleDualAntennaDetection
+ (&pHalData->odmpriv, ANTTESTALL) == true)
+ break;
+ }
+
+ /* Set default antenna number for BT coexistence */
+ if (btAntNum == Ant_x2)
+ BT_SetBtCoexCurrAntNum(padapter,
+ pDM_SWAT_Table->
+ ANTB_ON ? 2 : 1);
+ }
+ pHalData->bAntennaDetected = true;
+ }
+}
+#endif /* CONFIG_8723AU_BT_COEXIST */
+
+void rtl8723a_clone_haldata(struct rtw_adapter *dst_adapter,
+ struct rtw_adapter *src_adapter)
+{
+ memcpy(dst_adapter->HalData, src_adapter->HalData,
+ dst_adapter->hal_data_sz);
+}
+
+void rtl8723a_start_thread(struct rtw_adapter *padapter)
+{
+}
+
+void rtl8723a_stop_thread(struct rtw_adapter *padapter)
+{
+}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c b/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
new file mode 100644
index 000000000000..8400e6e2fca8
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
@@ -0,0 +1,1162 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTL8723A_PHYCFG_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <rtl8723a_hal.h>
+
+/*---------------------------Define Local Constant---------------------------*/
+/* Channel switch:The size of command tables for switch channel*/
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+/*---------------------------Define Local Constant---------------------------*/
+
+/*------------------------Define global variable-----------------------------*/
+
+/*------------------------Define local variable------------------------------*/
+
+/*--------------------Define export function prototype-----------------------*/
+/* Please refer to header file */
+/*--------------------Define export function prototype-----------------------*/
+
+/*----------------------------Function Body----------------------------------*/
+/* */
+/* 1. BB register R/W API */
+/* */
+
+/**
+* Function: phy_CalculateBitShift
+*
+* OverView: Get shifted position of the BitMask
+*
+* Input:
+* u32 BitMask,
+*
+* Output: none
+* Return: u32 Return the shift bit bit position of the mask
+*/
+static u32 phy_CalculateBitShift(u32 BitMask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++) {
+ if (((BitMask>>i) & 0x1) == 1)
+ break;
+ }
+
+ return i;
+}
+
+/**
+* Function: PHY_QueryBBReg
+*
+* OverView: Read "sepcific bits" from BB register
+*
+* Input:
+* struct rtw_adapter * Adapter,
+* u32 RegAddr, Target address to be readback
+* u32 BitMask Target bit position in the
+* target address to be readback
+* Output:
+* None
+* Return:
+* u32 Data The readback register value
+* Note:
+* This function is equal to "GetRegSetting" in PHY programming guide
+*/
+u32
+PHY_QueryBBReg(struct rtw_adapter *Adapter, u32 RegAddr, u32 BitMask)
+{
+ u32 ReturnValue = 0, OriginalValue, BitShift;
+
+ OriginalValue = rtw_read32(Adapter, RegAddr);
+ BitShift = phy_CalculateBitShift(BitMask);
+ ReturnValue = (OriginalValue & BitMask) >> BitShift;
+ return ReturnValue;
+}
+
+/**
+* Function: PHY_SetBBReg
+*
+* OverView: Write "Specific bits" to BB register (page 8~)
+*
+* Input:
+* struct rtw_adapter * Adapter,
+* u32 RegAddr, Target address to be modified
+* u32 BitMask Target bit position in the
+* target address to be modified
+* u32 Data The new register value in the
+* target bit position of the
+* target address
+*
+* Output:
+* None
+* Return:
+* None
+* Note:
+* This function is equal to "PutRegSetting" in PHY programming guide
+*/
+
+void
+PHY_SetBBReg(struct rtw_adapter *Adapter, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ u32 OriginalValue, BitShift;
+
+ /* RT_TRACE(COMP_RF, DBG_TRACE, ("--->PHY_SetBBReg(): RegAddr(%#lx), BitMask(%#lx), Data(%#lx)\n", RegAddr, BitMask, Data)); */
+
+ if (BitMask != bMaskDWord) {/* if not "double word" write */
+ OriginalValue = rtw_read32(Adapter, RegAddr);
+ BitShift = phy_CalculateBitShift(BitMask);
+ Data = ((OriginalValue & (~BitMask)) | (Data << BitShift));
+ }
+
+ rtw_write32(Adapter, RegAddr, Data);
+
+ /* RTPRINT(FPHY, PHY_BBW, ("BBW MASK = 0x%lx Addr[0x%lx]= 0x%lx\n", BitMask, RegAddr, Data)); */
+ /* RT_TRACE(COMP_RF, DBG_TRACE, ("<---PHY_SetBBReg(): RegAddr(%#lx), BitMask(%#lx), Data(%#lx)\n", RegAddr, BitMask, Data)); */
+}
+
+/* */
+/* 2. RF register R/W API */
+/* */
+
+/**
+* Function: phy_RFSerialRead
+*
+* OverView: Read regster from RF chips
+*
+* Input:
+* struct rtw_adapter * Adapter,
+* enum RF_RADIO_PATH eRFPath, Radio path of A/B/C/D
+* u32 Offset, The target address to be read
+*
+* Output: None
+* Return: u32 reback value
+* Note: Threre are three types of serial operations:
+* 1. Software serial write
+* 2. Hardware LSSI-Low Speed Serial Interface
+* 3. Hardware HSSI-High speed
+* serial write. Driver need to implement (1) and (2).
+* This function is equal to the combination of RF_ReadReg() and
+* RFLSSIRead()
+*/
+static u32
+phy_RFSerialRead(struct rtw_adapter *Adapter, enum RF_RADIO_PATH eRFPath,
+ u32 Offset)
+{
+ u32 retValue = 0;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct bb_reg_define *pPhyReg = &pHalData->PHYRegDef[eRFPath];
+ u32 NewOffset;
+ u32 tmplong, tmplong2;
+ u8 RfPiEnable = 0;
+ /* */
+ /* Make sure RF register offset is correct */
+ /* */
+ Offset &= 0x3f;
+
+ /* */
+ /* Switch page for 8256 RF IC */
+ /* */
+ NewOffset = Offset;
+
+ /* 2009/06/17 MH We can not execute IO for power save or
+ other accident mode. */
+ /* if (RT_CANNOT_IO(Adapter)) */
+ /* */
+ /* RTPRINT(FPHY, PHY_RFR, ("phy_RFSerialRead return all one\n")); */
+ /* return 0xFFFFFFFF; */
+ /* */
+
+ /* For 92S LSSI Read RFLSSIRead */
+ /* For RF A/B write 0x824/82c(does not work in the future) */
+ /* We must use 0x824 for RF A and B to execute read trigger */
+ tmplong = PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord);
+ if (eRFPath == RF_PATH_A)
+ tmplong2 = tmplong;
+ else
+ tmplong2 = PHY_QueryBBReg(Adapter, pPhyReg->rfHSSIPara2,
+ bMaskDWord);
+
+ tmplong2 = (tmplong2 & ~bLSSIReadAddress) |
+ (NewOffset << 23) | bLSSIReadEdge; /* T65 RF */
+
+ PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2,
+ bMaskDWord, tmplong & (~bLSSIReadEdge));
+ udelay(10);/* PlatformStallExecution(10); */
+
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord, tmplong2);
+ udelay(100);/* PlatformStallExecution(100); */
+
+ PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord,
+ tmplong | bLSSIReadEdge);
+ udelay(10);/* PlatformStallExecution(10); */
+
+ if (eRFPath == RF_PATH_A)
+ RfPiEnable = (u8)PHY_QueryBBReg(Adapter,
+ rFPGA0_XA_HSSIParameter1, BIT8);
+ else if (eRFPath == RF_PATH_B)
+ RfPiEnable = (u8)PHY_QueryBBReg(Adapter,
+ rFPGA0_XB_HSSIParameter1, BIT8);
+
+ if (RfPiEnable) {
+ /* Read from BBreg8b8, 12 bits for 8190, 20bits for T65 RF */
+ retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBackPi,
+ bLSSIReadBackData);
+ /* DBG_8723A("Readback from RF-PI : 0x%x\n", retValue); */
+ } else {
+ /* Read from BBreg8a0, 12 bits for 8190, 20 bits for T65 RF */
+ retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBack,
+ bLSSIReadBackData);
+ /* DBG_8723A("Readback from RF-SI : 0x%x\n", retValue); */
+ }
+ /* DBG_8723A("RFR-%d Addr[0x%x]= 0x%x\n", eRFPath, pPhyReg->rfLSSIReadBack, retValue); */
+
+ return retValue;
+}
+
+/**
+* Function: phy_RFSerialWrite
+*
+* OverView: Write data to RF register (page 8~)
+*
+* Input:
+* struct rtw_adapter * Adapter,
+* enum RF_RADIO_PATH eRFPath, Radio path of A/B/C/D
+* u32 Offset, The target address to be read
+* u32 Data The new register Data in the target
+* bit position of the target to be read
+*
+* Output:
+* None
+* Return:
+* None
+* Note:
+* Threre are three types of serial operations:
+* 1. Software serial write
+* 2. Hardware LSSI-Low Speed Serial Interface
+* 3. Hardware HSSI-High speed
+* serial write. Driver need to implement (1) and (2).
+* This function is equal to the combination of RF_ReadReg() and
+* RFLSSIRead()
+*
+* Note: For RF8256 only
+* The total count of RTL8256(Zebra4) register is around 36 bit it only employs
+* 4-bit RF address. RTL8256 uses "register mode control bit"
+* (Reg00[12], Reg00[10]) to access register address bigger than 0xf.
+* See "Appendix-4 in PHY Configuration programming guide" for more details.
+* Thus, we define a sub-finction for RTL8526 register address conversion
+* ===========================================================
+* Register Mode: RegCTL[1] RegCTL[0] Note
+* (Reg00[12]) (Reg00[10])
+* ===========================================================
+* Reg_Mode0 0 x Reg 0 ~15(0x0 ~ 0xf)
+* ------------------------------------------------------------------
+* Reg_Mode1 1 0 Reg 16 ~30(0x1 ~ 0xf)
+* ------------------------------------------------------------------
+* Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf)
+* ------------------------------------------------------------------
+*
+* 2008/09/02 MH Add 92S RF definition
+*/
+static void
+phy_RFSerialWrite(struct rtw_adapter *Adapter, enum RF_RADIO_PATH eRFPath,
+ u32 Offset, u32 Data)
+{
+ u32 DataAndAddr = 0;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct bb_reg_define *pPhyReg = &pHalData->PHYRegDef[eRFPath];
+ u32 NewOffset;
+
+ /* 2009/06/17 MH We can not execute IO for power save or
+ other accident mode. */
+ /* if (RT_CANNOT_IO(Adapter)) */
+ /* */
+ /* RTPRINT(FPHY, PHY_RFW, ("phy_RFSerialWrite stop\n")); */
+ /* return; */
+ /* */
+
+ Offset &= 0x3f;
+
+ /* */
+ /* Shadow Update */
+ /* */
+ /* PHY_RFShadowWrite(Adapter, eRFPath, Offset, Data); */
+
+ /* */
+ /* Switch page for 8256 RF IC */
+ /* */
+ NewOffset = Offset;
+
+ /* */
+ /* Put write addr in [5:0] and write data in [31:16] */
+ /* */
+ /* DataAndAddr = (Data<<16) | (NewOffset&0x3f); */
+ /* T65 RF */
+ DataAndAddr = ((NewOffset<<20) | (Data&0x000fffff)) & 0x0fffffff;
+
+ /* */
+ /* Write Operation */
+ /* */
+ PHY_SetBBReg(Adapter, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
+ /* RTPRINT(FPHY, PHY_RFW, ("RFW-%d Addr[0x%lx]= 0x%lx\n", eRFPath, pPhyReg->rf3wireOffset, DataAndAddr)); */
+
+}
+
+/**
+* Function: PHY_QueryRFReg
+*
+* OverView: Query "Specific bits" to RF register (page 8~)
+*
+* Input:
+* struct rtw_adapter * Adapter,
+* enum RF_RADIO_PATH eRFPath, Radio path of A/B/C/D
+* u32 RegAddr, The target address to be read
+* u32BitMask The target bit position in the target
+* address to be read
+*
+* Output:
+* None
+* Return:
+* u32 Readback value
+* Note:
+* This function is equal to "GetRFRegSetting" in PHY programming guide
+*/
+u32
+PHY_QueryRFReg(struct rtw_adapter *Adapter, enum RF_RADIO_PATH eRFPath,
+ u32 RegAddr, u32 BitMask)
+{
+ u32 Original_Value, Readback_Value, BitShift;
+ /* struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter); */
+ /* u8 RFWaitCounter = 0; */
+ /* _irqL irqL; */
+
+ Original_Value = phy_RFSerialRead(Adapter, eRFPath, RegAddr);
+
+ BitShift = phy_CalculateBitShift(BitMask);
+ Readback_Value = (Original_Value & BitMask) >> BitShift;
+
+ return Readback_Value;
+}
+
+/**
+* Function: PHY_SetRFReg
+*
+* OverView: Write "Specific bits" to RF register (page 8~)
+*
+* Input:
+* struct rtw_adapter * Adapter,
+* enum RF_RADIO_PATH eRFPath, Radio path of A/B/C/D
+* u32 RegAddr, The target address to be modified
+* u32 BitMask The target bit position in the target
+* address to be modified
+* u32 Data The new register Data in the target
+* bit position of the target address
+*
+* Output:
+* None
+* Return:
+* None
+* Note: This function is equal to "PutRFRegSetting" in PHY programming guide
+*/
+void
+PHY_SetRFReg(struct rtw_adapter *Adapter, enum RF_RADIO_PATH eRFPath,
+ u32 RegAddr, u32 BitMask, u32 Data)
+{
+ /* struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter); */
+ /* u8 RFWaitCounter = 0; */
+ u32 Original_Value, BitShift;
+
+ /* RF data is 12 bits only */
+ if (BitMask != bRFRegOffsetMask) {
+ Original_Value = phy_RFSerialRead(Adapter, eRFPath, RegAddr);
+ BitShift = phy_CalculateBitShift(BitMask);
+ Data = ((Original_Value & (~BitMask)) | (Data << BitShift));
+ }
+
+ phy_RFSerialWrite(Adapter, eRFPath, RegAddr, Data);
+}
+
+/* 3. Initial MAC/BB/RF config by reading MAC/BB/RF txt. */
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_MACConfig8723A
+ *
+ * Overview: Condig MAC by header file or parameter file.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 08/12/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+s32 PHY_MACConfig8723A(struct rtw_adapter *Adapter)
+{
+ int rtStatus = _SUCCESS;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ s8 *pszMACRegFile;
+ s8 sz8723MACRegFile[] = RTL8723_PHY_MACREG;
+ bool is92C = IS_92C_SERIAL(pHalData->VersionID);
+
+ pszMACRegFile = sz8723MACRegFile;
+
+ /* */
+ /* Config MAC */
+ /* */
+ if (HAL_STATUS_FAILURE ==
+ ODM_ConfigMACWithHeaderFile23a(&pHalData->odmpriv))
+ rtStatus = _FAIL;
+
+ /* 2010.07.13 AMPDU aggregation number 9 */
+ /* rtw_write16(Adapter, REG_MAX_AGGR_NUM, MAX_AGGR_NUM); */
+ rtw_write8(Adapter, REG_MAX_AGGR_NUM, 0x0A); /* By tynli. 2010.11.18. */
+ if (is92C && (BOARD_USB_DONGLE == pHalData->BoardType))
+ rtw_write8(Adapter, 0x40, 0x04);
+
+ return rtStatus;
+}
+
+/**
+* Function: phy_InitBBRFRegisterDefinition
+*
+* OverView: Initialize Register definition offset for Radio Path A/B/C/D
+*
+* Input:
+* struct rtw_adapter * Adapter,
+*
+* Output: None
+* Return: None
+* Note:
+* The initialization value is constant and it should never be changes
+*/
+static void
+phy_InitBBRFRegisterDefinition(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+
+ /* RF Interface Sowrtware Control */
+ /* 16 LSBs if read 32-bit from 0x870 */
+ pHalData->PHYRegDef[RF_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW;
+ /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
+ pHalData->PHYRegDef[RF_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW;
+
+ /* RF Interface Readback Value */
+ /* 16 LSBs if read 32-bit from 0x8E0 */
+ pHalData->PHYRegDef[RF_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB;
+ /* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
+ pHalData->PHYRegDef[RF_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;
+
+ /* RF Interface Output (and Enable) */
+ /* 16 LSBs if read 32-bit from 0x860 */
+ pHalData->PHYRegDef[RF_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE;
+ /* 16 LSBs if read 32-bit from 0x864 */
+ pHalData->PHYRegDef[RF_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE;
+
+ /* RF Interface (Output and) Enable */
+ /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
+ pHalData->PHYRegDef[RF_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE;
+ /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */
+ pHalData->PHYRegDef[RF_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE;
+
+ /* Addr of LSSI. Wirte RF register by driver */
+ pHalData->PHYRegDef[RF_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter;
+ pHalData->PHYRegDef[RF_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter;
+
+ /* RF parameter */
+ /* BB Band Select */
+ pHalData->PHYRegDef[RF_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter;
+ pHalData->PHYRegDef[RF_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter;
+
+ /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
+ pHalData->PHYRegDef[RF_PATH_A].rfTxGainStage = rFPGA0_TxGainStage;
+ pHalData->PHYRegDef[RF_PATH_B].rfTxGainStage = rFPGA0_TxGainStage;
+
+ /* Tranceiver A~D HSSI Parameter-1 */
+ /* wire control parameter1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1;
+ /* wire control parameter1 */
+ pHalData->PHYRegDef[RF_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1;
+
+ /* Tranceiver A~D HSSI Parameter-2 */
+ /* wire control parameter2 */
+ pHalData->PHYRegDef[RF_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2;
+ /* wire control parameter2 */
+ pHalData->PHYRegDef[RF_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2;
+
+ /* RF switch Control */
+ pHalData->PHYRegDef[RF_PATH_A].rfSwitchControl =
+ rFPGA0_XAB_SwitchControl; /* TR/Ant switch control */
+ pHalData->PHYRegDef[RF_PATH_B].rfSwitchControl =
+ rFPGA0_XAB_SwitchControl;
+
+ /* AGC control 1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1;
+ pHalData->PHYRegDef[RF_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1;
+
+ /* AGC control 2 */
+ pHalData->PHYRegDef[RF_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2;
+ pHalData->PHYRegDef[RF_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2;
+
+ /* RX AFE control 1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
+
+ /* RX AFE control 1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfRxAFE = rOFDM0_XARxAFE;
+ pHalData->PHYRegDef[RF_PATH_B].rfRxAFE = rOFDM0_XBRxAFE;
+
+ /* Tx AFE control 1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
+
+ /* Tx AFE control 2 */
+ pHalData->PHYRegDef[RF_PATH_A].rfTxAFE = rOFDM0_XATxAFE;
+ pHalData->PHYRegDef[RF_PATH_B].rfTxAFE = rOFDM0_XBTxAFE;
+
+ /* Tranceiver LSSI Readback SI mode */
+ pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
+ pHalData->PHYRegDef[RF_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
+
+ /* Tranceiver LSSI Readback PI mode */
+ pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBackPi =
+ TransceiverA_HSPI_Readback;
+ pHalData->PHYRegDef[RF_PATH_B].rfLSSIReadBackPi =
+ TransceiverB_HSPI_Readback;
+}
+
+/* The following is for High Power PA */
+static void
+storePwrIndexDiffRateOffset(struct rtw_adapter *Adapter, u32 RegAddr,
+ u32 BitMask, u32 Data)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+
+ if (RegAddr == rTxAGC_A_Rate18_06) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][0] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][0])); */
+ }
+ if (RegAddr == rTxAGC_A_Rate54_24) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][1] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][1])); */
+ }
+ if (RegAddr == rTxAGC_A_CCK1_Mcs32) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][6] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][6])); */
+ }
+ if (RegAddr == rTxAGC_B_CCK11_A_CCK2_11 && BitMask == 0xffffff00) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][7] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][7])); */
+ }
+ if (RegAddr == rTxAGC_A_Mcs03_Mcs00) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][2] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][2])); */
+ }
+ if (RegAddr == rTxAGC_A_Mcs07_Mcs04) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][3] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][3])); */
+ }
+ if (RegAddr == rTxAGC_A_Mcs11_Mcs08) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][4] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][4])); */
+ }
+ if (RegAddr == rTxAGC_A_Mcs15_Mcs12) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][5] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][5])); */
+ }
+ if (RegAddr == rTxAGC_B_Rate18_06) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][8] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][8])); */
+ }
+ if (RegAddr == rTxAGC_B_Rate54_24) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][9] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][9])); */
+ }
+ if (RegAddr == rTxAGC_B_CCK1_55_Mcs32) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][14] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][14])); */
+ }
+ if (RegAddr == rTxAGC_B_CCK11_A_CCK2_11 && BitMask == 0x000000ff) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][15] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][15])); */
+ }
+ if (RegAddr == rTxAGC_B_Mcs03_Mcs00) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][10] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][10])); */
+ }
+ if (RegAddr == rTxAGC_B_Mcs07_Mcs04) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][11] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][11])); */
+ }
+ if (RegAddr == rTxAGC_B_Mcs11_Mcs08) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][12] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][12])); */
+ }
+ if (RegAddr == rTxAGC_B_Mcs15_Mcs12) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][13] = Data;
+ /* RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%lx\n",
+ pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[
+ pHalData->pwrGroupCnt][13])); */
+ pHalData->pwrGroupCnt++;
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: phy_ConfigBBWithPgHeaderFile
+ *
+ * Overview: Config PHY_REG_PG array
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/06/2008 MHC Add later!!!!!!.. Please modify for new files!!!!
+ * 11/10/2008 tynli Modify to mew files.
+ *---------------------------------------------------------------------------*/
+static int
+phy_ConfigBBWithPgHeaderFile(struct rtw_adapter *Adapter, u8 ConfigType)
+{
+ int i;
+ u32 *Rtl819XPHY_REGArray_Table_PG;
+ u16 PHY_REGArrayPGLen;
+
+ PHY_REGArrayPGLen = Rtl8723_PHY_REG_Array_PGLength;
+ Rtl819XPHY_REGArray_Table_PG = (u32 *)Rtl8723_PHY_REG_Array_PG;
+
+ if (ConfigType == BaseBand_Config_PHY_REG) {
+ for (i = 0; i < PHY_REGArrayPGLen; i = i + 3) {
+ storePwrIndexDiffRateOffset(Adapter,
+ Rtl819XPHY_REGArray_Table_PG[i],
+ Rtl819XPHY_REGArray_Table_PG[i+1],
+ Rtl819XPHY_REGArray_Table_PG[i+2]);
+ }
+ }
+
+ return _SUCCESS;
+} /* phy_ConfigBBWithPgHeaderFile */
+
+static void
+phy_BB8192C_Config_1T(struct rtw_adapter *Adapter)
+{
+ /* for path - B */
+ PHY_SetBBReg(Adapter, rFPGA0_TxInfo, 0x3, 0x2);
+ PHY_SetBBReg(Adapter, rFPGA1_TxInfo, 0x300033, 0x200022);
+
+ /* 20100519 Joseph: Add for 1T2R config. Suggested by Kevin,
+ Jenyu and Yunan. */
+ PHY_SetBBReg(Adapter, rCCK0_AFESetting, bMaskByte3, 0x45);
+ PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, bMaskByte0, 0x23);
+ /* B path first AGC */
+ PHY_SetBBReg(Adapter, rOFDM0_AGCParameter1, 0x30, 0x1);
+
+ PHY_SetBBReg(Adapter, 0xe74, 0x0c000000, 0x2);
+ PHY_SetBBReg(Adapter, 0xe78, 0x0c000000, 0x2);
+ PHY_SetBBReg(Adapter, 0xe7c, 0x0c000000, 0x2);
+ PHY_SetBBReg(Adapter, 0xe80, 0x0c000000, 0x2);
+ PHY_SetBBReg(Adapter, 0xe88, 0x0c000000, 0x2);
+}
+
+static int
+phy_BB8723a_Config_ParaFile(struct rtw_adapter *Adapter)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(Adapter);
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ int rtStatus = _SUCCESS;
+
+ u8 sz8723BBRegFile[] = RTL8723_PHY_REG;
+ u8 sz8723AGCTableFile[] = RTL8723_AGC_TAB;
+ u8 sz8723BBRegPgFile[] = RTL8723_PHY_REG_PG;
+ u8 sz8723BBRegMpFile[] = RTL8723_PHY_REG_MP;
+
+ u8 *pszBBRegFile = NULL, *pszAGCTableFile = NULL;
+ u8 *pszBBRegPgFile = NULL, *pszBBRegMpFile = NULL;
+
+ /* RT_TRACE(COMP_INIT, DBG_TRACE, ("==>phy_BB8192S_Config_ParaFile\n")); */
+
+ pszBBRegFile = sz8723BBRegFile ;
+ pszAGCTableFile = sz8723AGCTableFile;
+ pszBBRegPgFile = sz8723BBRegPgFile;
+ pszBBRegMpFile = sz8723BBRegMpFile;
+
+ /* */
+ /* 1. Read PHY_REG.TXT BB INIT!! */
+ /* We will seperate as 88C / 92C according to chip version */
+ /* */
+ if (HAL_STATUS_FAILURE == ODM_ConfigBBWithHeaderFile23a(&pHalData->odmpriv,
+ CONFIG_BB_PHY_REG))
+ rtStatus = _FAIL;
+ if (rtStatus != _SUCCESS)
+ goto phy_BB8190_Config_ParaFile_Fail;
+
+ /* */
+ /* 20100318 Joseph: Config 2T2R to 1T2R if necessary. */
+ /* */
+ if (pHalData->rf_type == RF_1T2R) {
+ phy_BB8192C_Config_1T(Adapter);
+ DBG_8723A("phy_BB8723a_Config_ParaFile():Config to 1T!!\n");
+ }
+
+ /* */
+ /* 2. If EEPROM or EFUSE autoload OK, We must config by
+ PHY_REG_PG.txt */
+ /* */
+ if (pEEPROM->bautoload_fail_flag == false) {
+ pHalData->pwrGroupCnt = 0;
+
+ rtStatus = phy_ConfigBBWithPgHeaderFile(Adapter,
+ BaseBand_Config_PHY_REG);
+ }
+
+ if (rtStatus != _SUCCESS)
+ goto phy_BB8190_Config_ParaFile_Fail;
+
+ /* */
+ /* 3. BB AGC table Initialization */
+ /* */
+ if (HAL_STATUS_FAILURE == ODM_ConfigBBWithHeaderFile23a(&pHalData->odmpriv,
+ CONFIG_BB_AGC_TAB))
+ rtStatus = _FAIL;
+
+phy_BB8190_Config_ParaFile_Fail:
+
+ return rtStatus;
+}
+
+int
+PHY_BBConfig8723A(struct rtw_adapter *Adapter)
+{
+ int rtStatus = _SUCCESS;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u8 TmpU1B = 0;
+ u8 CrystalCap;
+
+ phy_InitBBRFRegisterDefinition(Adapter);
+
+ /* Suggested by Scott. tynli_test. 2010.12.30. */
+ /* 1. 0x28[1] = 1 */
+ TmpU1B = rtw_read8(Adapter, REG_AFE_PLL_CTRL);
+ udelay(2);
+ rtw_write8(Adapter, REG_AFE_PLL_CTRL, (TmpU1B|BIT1));
+ udelay(2);
+
+ /* 2. 0x29[7:0] = 0xFF */
+ rtw_write8(Adapter, REG_AFE_PLL_CTRL+1, 0xff);
+ udelay(2);
+
+ /* 3. 0x02[1:0] = 2b'11 */
+ TmpU1B = rtw_read8(Adapter, REG_SYS_FUNC_EN);
+ rtw_write8(Adapter, REG_SYS_FUNC_EN,
+ (TmpU1B | FEN_BB_GLB_RSTn | FEN_BBRSTB));
+
+ /* 4. 0x25[6] = 0 */
+ TmpU1B = rtw_read8(Adapter, REG_AFE_XTAL_CTRL + 1);
+ rtw_write8(Adapter, REG_AFE_XTAL_CTRL+1, (TmpU1B & (~BIT6)));
+
+ /* 5. 0x24[20] = 0 Advised by SD3 Alex Wang. 2011.02.09. */
+ TmpU1B = rtw_read8(Adapter, REG_AFE_XTAL_CTRL+2);
+ rtw_write8(Adapter, REG_AFE_XTAL_CTRL+2, (TmpU1B & (~BIT4)));
+
+ /* 6. 0x1f[7:0] = 0x07 */
+ rtw_write8(Adapter, REG_RF_CTRL, 0x07);
+
+ /* */
+ /* Config BB and AGC */
+ /* */
+ rtStatus = phy_BB8723a_Config_ParaFile(Adapter);
+
+/* only for B-cut */
+ if (pHalData->EEPROMVersion >= 0x01) {
+ CrystalCap = pHalData->CrystalCap & 0x3F;
+ PHY_SetBBReg(Adapter, REG_MAC_PHY_CTRL, 0xFFF000,
+ (CrystalCap | (CrystalCap << 6)));
+ }
+
+ PHY_SetBBReg(Adapter, REG_LDOA15_CTRL, bMaskDWord, 0x01572505);
+ return rtStatus;
+}
+
+int
+PHY_RFConfig8723A(struct rtw_adapter *Adapter)
+{
+ int rtStatus = _SUCCESS;
+
+ /* */
+ /* RF config */
+ /* */
+ rtStatus = PHY_RF6052_Config8723A(Adapter);
+ return rtStatus;
+}
+
+static void getTxPowerIndex(struct rtw_adapter *Adapter,
+ u8 channel, u8 *cckPowerLevel, u8 *ofdmPowerLevel)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u8 index = (channel - 1);
+ /* 1. CCK */
+ cckPowerLevel[RF_PATH_A] = pHalData->TxPwrLevelCck[RF_PATH_A][index];
+ cckPowerLevel[RF_PATH_B] = pHalData->TxPwrLevelCck[RF_PATH_B][index];
+
+ /* 2. OFDM for 1S or 2S */
+ if (GET_RF_TYPE(Adapter) == RF_1T2R || GET_RF_TYPE(Adapter) == RF_1T1R) {
+ /* Read HT 40 OFDM TX power */
+ ofdmPowerLevel[RF_PATH_A] =
+ pHalData->TxPwrLevelHT40_1S[RF_PATH_A][index];
+ ofdmPowerLevel[RF_PATH_B] =
+ pHalData->TxPwrLevelHT40_1S[RF_PATH_B][index];
+ } else if (GET_RF_TYPE(Adapter) == RF_2T2R) {
+ /* Read HT 40 OFDM TX power */
+ ofdmPowerLevel[RF_PATH_A] =
+ pHalData->TxPwrLevelHT40_2S[RF_PATH_A][index];
+ ofdmPowerLevel[RF_PATH_B] =
+ pHalData->TxPwrLevelHT40_2S[RF_PATH_B][index];
+ }
+}
+
+static void ccxPowerIndexCheck(struct rtw_adapter *Adapter, u8 channel,
+ u8 *cckPowerLevel, u8 *ofdmPowerLevel)
+{
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: SetTxPowerLevel8723A()
+ *
+ * Overview: This function is export to "HalCommon" moudule
+ * We must consider RF path later!!!!!!!
+ *
+ * Input: struct rtw_adapter * Adapter
+ * u8 channel
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ *---------------------------------------------------------------------------*/
+void PHY_SetTxPowerLevel8723A(struct rtw_adapter *Adapter, u8 channel)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u8 cckPowerLevel[2], ofdmPowerLevel[2]; /* [0]:RF-A, [1]:RF-B */
+
+ if (pHalData->bTXPowerDataReadFromEEPORM == false)
+ return;
+
+ getTxPowerIndex(Adapter, channel, &cckPowerLevel[0],
+ &ofdmPowerLevel[0]);
+
+ ccxPowerIndexCheck(Adapter, channel, &cckPowerLevel[0],
+ &ofdmPowerLevel[0]);
+
+ rtl823a_phy_rf6052setccktxpower(Adapter, &cckPowerLevel[0]);
+ rtl8723a_PHY_RF6052SetOFDMTxPower(Adapter, &ofdmPowerLevel[0], channel);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_SetBWMode23aCallback8192C()
+ *
+ * Overview: Timer callback function for SetSetBWMode23a
+ *
+ * Input: PRT_TIMER pTimer
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Note:
+ * (1) We do not take j mode into consideration now
+ * (2) Will two workitem of "switch channel" and
+ * "switch channel bandwidth" run concurrently?
+ *---------------------------------------------------------------------------*/
+static void
+_PHY_SetBWMode23a92C(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u8 regBwOpMode;
+ u8 regRRSR_RSC;
+
+ if (pHalData->rf_chip == RF_PSEUDO_11N)
+ return;
+
+ /* There is no 40MHz mode in RF_8225. */
+ if (pHalData->rf_chip == RF_8225)
+ return;
+
+ if (Adapter->bDriverStopped)
+ return;
+
+ /* 3 */
+ /* 3<1>Set MAC register */
+ /* 3 */
+
+ regBwOpMode = rtw_read8(Adapter, REG_BWOPMODE);
+ regRRSR_RSC = rtw_read8(Adapter, REG_RRSR+2);
+
+ switch (pHalData->CurrentChannelBW) {
+ case HT_CHANNEL_WIDTH_20:
+ regBwOpMode |= BW_OPMODE_20MHZ;
+ rtw_write8(Adapter, REG_BWOPMODE, regBwOpMode);
+ break;
+ case HT_CHANNEL_WIDTH_40:
+ regBwOpMode &= ~BW_OPMODE_20MHZ;
+ rtw_write8(Adapter, REG_BWOPMODE, regBwOpMode);
+ regRRSR_RSC = (regRRSR_RSC & 0x90) |
+ (pHalData->nCur40MhzPrimeSC << 5);
+ rtw_write8(Adapter, REG_RRSR+2, regRRSR_RSC);
+ break;
+
+ default:
+ break;
+ }
+
+ /* 3 */
+ /* 3<2>Set PHY related register */
+ /* 3 */
+ switch (pHalData->CurrentChannelBW) {
+ /* 20 MHz channel*/
+ case HT_CHANNEL_WIDTH_20:
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x0);
+ PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x0);
+ PHY_SetBBReg(Adapter, rFPGA0_AnalogParameter2, BIT10, 1);
+
+ break;
+
+ /* 40 MHz channel*/
+ case HT_CHANNEL_WIDTH_40:
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x1);
+ PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x1);
+
+ /* Set Control channel to upper or lower. These settings
+ are required only for 40MHz */
+ PHY_SetBBReg(Adapter, rCCK0_System, bCCKSideBand,
+ (pHalData->nCur40MhzPrimeSC >> 1));
+ PHY_SetBBReg(Adapter, rOFDM1_LSTF, 0xC00,
+ pHalData->nCur40MhzPrimeSC);
+ PHY_SetBBReg(Adapter, rFPGA0_AnalogParameter2, BIT10, 0);
+
+ PHY_SetBBReg(Adapter, 0x818, (BIT26 | BIT27),
+ (pHalData->nCur40MhzPrimeSC ==
+ HAL_PRIME_CHNL_OFFSET_LOWER) ? 2:1);
+ break;
+
+ default:
+ /*RT_TRACE(COMP_DBG, DBG_LOUD,
+ ("PHY_SetBWMode23aCallback8192C(): unknown Bandwidth: %#X\n" \
+ , pHalData->CurrentChannelBW));*/
+ break;
+ }
+ /* Skip over setting of J-mode in BB register here. Default value
+ is "None J mode". Emily 20070315 */
+
+ /* Added it for 20/40 mhz switch time evaluation by guangan 070531 */
+ /* NowL = PlatformEFIORead4Byte(Adapter, TSFR); */
+ /* NowH = PlatformEFIORead4Byte(Adapter, TSFR+4); */
+ /* EndTime = ((u64)NowH << 32) + NowL; */
+ /* RT_TRACE(COMP_SCAN, DBG_LOUD, ("SetBWMode23aCallback8190Pci: time
+ of SetBWMode23a = %I64d us!\n", (EndTime - BeginTime))); */
+
+ /* 3<3>Set RF related register */
+ switch (pHalData->rf_chip) {
+ case RF_8225:
+ /* PHY_SetRF8225Bandwidth(Adapter,
+ pHalData->CurrentChannelBW); */
+ break;
+
+ case RF_8256:
+ /* Please implement this function in Hal8190PciPhy8256.c */
+ /* PHY_SetRF8256Bandwidth(Adapter,
+ pHalData->CurrentChannelBW); */
+ break;
+
+ case RF_8258:
+ /* Please implement this function in Hal8190PciPhy8258.c */
+ /* PHY_SetRF8258Bandwidth(); */
+ break;
+
+ case RF_PSEUDO_11N:
+ /* Do Nothing */
+ break;
+
+ case RF_6052:
+ rtl8723a_phy_rf6052set_bw(Adapter, pHalData->CurrentChannelBW);
+ break;
+
+ default:
+ /* RT_ASSERT(false, ("Unknown RFChipID: %d\n",
+ pHalData->RFChipID)); */
+ break;
+ }
+
+ /* pHalData->SetBWMode23aInProgress = false; */
+
+ /* RT_TRACE(COMP_SCAN, DBG_LOUD,
+ ("<== PHY_SetBWMode23aCallback8192C() \n")); */
+}
+
+ /*-----------------------------------------------------------------------------
+ * Function: SetBWMode23a8190Pci()
+ *
+ * Overview: This function is export to "HalCommon" moudule
+ *
+ * Input: struct rtw_adapter * Adapter
+ * enum ht_channel_width Bandwidth 20M or 40M
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Note: We do not take j mode into consideration now
+ *---------------------------------------------------------------------------*/
+void
+PHY_SetBWMode23a8723A(struct rtw_adapter *Adapter,
+ enum ht_channel_width Bandwidth, unsigned char Offset)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ enum ht_channel_width tmpBW = pHalData->CurrentChannelBW;
+
+ pHalData->CurrentChannelBW = Bandwidth;
+
+ pHalData->nCur40MhzPrimeSC = Offset;
+
+ if ((!Adapter->bDriverStopped) && (!Adapter->bSurpriseRemoved))
+ _PHY_SetBWMode23a92C(Adapter);
+ else
+ pHalData->CurrentChannelBW = tmpBW;
+}
+
+static void _PHY_SwChnl8723A(struct rtw_adapter *Adapter, u8 channel)
+{
+ u8 eRFPath;
+ u32 param1, param2;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+
+ if (Adapter->bNotifyChannelChange)
+ DBG_8723A("[%s] ch = %d\n", __FUNCTION__, channel);
+
+ /* s1. pre common command - CmdID_SetTxPowerLevel */
+ PHY_SetTxPowerLevel8723A(Adapter, channel);
+
+ /* s2. RF dependent command - CmdID_RF_WriteReg,
+ param1 = RF_CHNLBW, param2 = channel */
+ param1 = RF_CHNLBW;
+ param2 = channel;
+ for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) {
+ pHalData->RfRegChnlVal[eRFPath] =
+ (pHalData->RfRegChnlVal[eRFPath] & 0xfffffc00) | param2;
+ PHY_SetRFReg(Adapter, (enum RF_RADIO_PATH)eRFPath, param1,
+ bRFRegOffsetMask, pHalData->RfRegChnlVal[eRFPath]);
+ }
+
+ /* s3. post common command - CmdID_End, None */
+}
+
+void PHY_SwChnl8723A(struct rtw_adapter *Adapter, u8 channel)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u8 tmpchannel = pHalData->CurrentChannel;
+ bool result = true;
+
+ if (pHalData->rf_chip == RF_PSEUDO_11N) {
+ /* return immediately if it is peudo-phy */
+ return;
+ }
+
+ if (channel == 0)
+ channel = 1;
+
+ pHalData->CurrentChannel = channel;
+
+ if ((!Adapter->bDriverStopped) && (!Adapter->bSurpriseRemoved)) {
+ _PHY_SwChnl8723A(Adapter, channel);
+
+ if (!result)
+ pHalData->CurrentChannel = tmpchannel;
+ } else {
+ pHalData->CurrentChannel = tmpchannel;
+ }
+}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
new file mode 100644
index 000000000000..ed39c18c3f84
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
@@ -0,0 +1,507 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/******************************************************************************
+ *
+ *
+ * Module: rtl8192c_rf6052.c (Source C File)
+ *
+ * Note: Provide RF 6052 series relative API.
+ *
+ * Function:
+ *
+ * Export:
+ *
+ * Abbrev:
+ *
+ * History:
+ * Data Who Remark
+ *
+ * 09/25/2008 MHC Create initial version.
+ * 11/05/2008 MHC Add API for tw power setting.
+ *
+ *
+******************************************************************************/
+
+#define _RTL8723A_RF6052_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <rtl8723a_hal.h>
+
+/*---------------------------Define Local Constant---------------------------*/
+/* Define local structure for debug!!!!! */
+struct rf_shadow_compare_map {
+ /* Shadow register value */
+ u32 Value;
+ /* Compare or not flag */
+ u8 Compare;
+ /* Record If it had ever modified unpredicted */
+ u8 ErrorOrNot;
+ /* Recorver Flag */
+ u8 Recorver;
+ /* */
+ u8 Driver_Write;
+};
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_RF6052SetBandwidth()
+ *
+ * Overview: This function is called by SetBWMode23aCallback8190Pci() only
+ *
+ * Input: struct rtw_adapter * Adapter
+ * WIRELESS_BANDWIDTH_E Bandwidth 20M or 40M
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Note: For RF type 0222D
+ *---------------------------------------------------------------------------*/
+void rtl8723a_phy_rf6052set_bw(
+ struct rtw_adapter *Adapter,
+ enum ht_channel_width Bandwidth) /* 20M or 40M */
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+
+ switch (Bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | 0x0400);
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ break;
+ case HT_CHANNEL_WIDTH_40:
+ pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff));
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ break;
+ default:
+ break;
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_RF6052SetCckTxPower
+ *
+ * Overview:
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/05/2008 MHC Simulate 8192series..
+ *
+ *---------------------------------------------------------------------------*/
+
+void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter, u8 *pPowerlevel)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ u32 TxAGC[2] = {0, 0}, tmpval = 0;
+ bool TurboScanOff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+
+ /* According to SD3 eechou's suggestion, we need to disable turbo scan for RU. */
+ /* Otherwise, external PA will be broken if power index > 0x20. */
+ if (pHalData->EEPROMRegulatory != 0 || pHalData->ExternalPA)
+ TurboScanOff = true;
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ TxAGC[RF_PATH_A] = 0x3f3f3f3f;
+ TxAGC[RF_PATH_B] = 0x3f3f3f3f;
+
+ TurboScanOff = true;/* disable turbo scan */
+
+ if (TurboScanOff) {
+ for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
+ TxAGC[idx1] =
+ pPowerlevel[idx1] | (pPowerlevel[idx1]<<8) |
+ (pPowerlevel[idx1]<<16) | (pPowerlevel[idx1]<<24);
+ /* 2010/10/18 MH For external PA module. We need to limit power index to be less than 0x20. */
+ if (TxAGC[idx1] > 0x20 && pHalData->ExternalPA)
+ TxAGC[idx1] = 0x20;
+ }
+ }
+ } else {
+/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power. It shall be determined by power training mechanism. */
+/* Currently, we cannot fully disable driver dynamic tx power mechanism because it is referenced by BT coexist mechanism. */
+/* In the future, two mechanism shall be separated from each other and maintained independantly. Thanks for Lanhsin's reminder. */
+ if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
+ TxAGC[RF_PATH_A] = 0x10101010;
+ TxAGC[RF_PATH_B] = 0x10101010;
+ } else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2) {
+ TxAGC[RF_PATH_A] = 0x00000000;
+ TxAGC[RF_PATH_B] = 0x00000000;
+ } else {
+ for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
+ TxAGC[idx1] =
+ pPowerlevel[idx1] | (pPowerlevel[idx1]<<8) |
+ (pPowerlevel[idx1]<<16) | (pPowerlevel[idx1]<<24);
+ }
+
+ if (pHalData->EEPROMRegulatory == 0) {
+ tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][6]) +
+ (pHalData->MCSTxPowerLevelOriginalOffset[0][7]<<8);
+ TxAGC[RF_PATH_A] += tmpval;
+
+ tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][14]) +
+ (pHalData->MCSTxPowerLevelOriginalOffset[0][15]<<24);
+ TxAGC[RF_PATH_B] += tmpval;
+ }
+ }
+ }
+
+ for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
+ ptr = (u8 *)(&TxAGC[idx1]);
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+
+ /* rf-A cck tx power */
+ tmpval = TxAGC[RF_PATH_A]&0xff;
+ PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval);
+ tmpval = TxAGC[RF_PATH_A]>>8;
+ PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+
+ /* rf-B cck tx power */
+ tmpval = TxAGC[RF_PATH_B]>>24;
+ PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval);
+ tmpval = TxAGC[RF_PATH_B]&0x00ffffff;
+ PHY_SetBBReg(Adapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
+} /* PHY_RF6052SetCckTxPower */
+
+/* powerbase0 for OFDM rates */
+/* powerbase1 for HT MCS rates */
+static void getPowerBase(
+ struct rtw_adapter *Adapter,
+ u8 *pPowerLevel,
+ u8 Channel,
+ u32 *OfdmBase,
+ u32 *MCSBase
+ )
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u32 powerBase0, powerBase1;
+ u8 Legacy_pwrdiff = 0;
+ s8 HT20_pwrdiff = 0;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerlevel[i] = pPowerLevel[i];
+ Legacy_pwrdiff = pHalData->TxPwrLegacyHtDiff[i][Channel-1];
+ powerBase0 = powerlevel[i] + Legacy_pwrdiff;
+
+ powerBase0 = (powerBase0<<24) | (powerBase0<<16) | (powerBase0<<8) | powerBase0;
+ *(OfdmBase+i) = powerBase0;
+ }
+
+ for (i = 0; i < 2; i++) {
+ /* Check HT20 to HT40 diff */
+ if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20) {
+ HT20_pwrdiff = pHalData->TxPwrHt20Diff[i][Channel-1];
+ powerlevel[i] += HT20_pwrdiff;
+ }
+ powerBase1 = powerlevel[i];
+ powerBase1 = (powerBase1<<24) | (powerBase1<<16) | (powerBase1<<8) | powerBase1;
+ *(MCSBase+i) = powerBase1;
+ }
+}
+
+static void getTxPowerWriteValByRegulatory(
+ struct rtw_adapter *Adapter,
+ u8 Channel,
+ u8 index,
+ u32 *powerBase0,
+ u32 *powerBase1,
+ u32 *pOutWriteVal
+ )
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ u8 i, chnlGroup = 0, pwr_diff_limit[4];
+ u32 writeVal, customer_limit, rf;
+
+ /* Index 0 & 1 = legacy OFDM, 2-5 = HT_MCS rate */
+ for (rf = 0; rf < 2; rf++) {
+ switch (pHalData->EEPROMRegulatory) {
+ case 0: /* Realtek better performance */
+ /* increase power diff defined by Realtek for large power */
+ chnlGroup = 0;
+ writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] +
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ case 1: /* Realtek regulatory */
+ /* increase power diff defined by Realtek for regulatory */
+ if (pHalData->pwrGroupCnt == 1)
+ chnlGroup = 0;
+ if (pHalData->pwrGroupCnt >= 3) {
+ if (Channel <= 3)
+ chnlGroup = 0;
+ else if (Channel >= 4 && Channel <= 9)
+ chnlGroup = 1;
+ else if (Channel > 9)
+ chnlGroup = 2;
+
+ if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ chnlGroup++;
+ else
+ chnlGroup += 4;
+ }
+ writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] +
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ case 2: /* Better regulatory */
+ /* don't increase any power diff */
+ writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ case 3: /* Customer defined power diff. */
+ chnlGroup = 0;
+
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] = (u8)((pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index +
+ (rf ? 8 : 0)]&(0x7f << (i*8))) >> (i*8));
+ if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_40) {
+ if (pwr_diff_limit[i] > pHalData->PwrGroupHT40[rf][Channel-1])
+ pwr_diff_limit[i] = pHalData->PwrGroupHT40[rf][Channel-1];
+ } else {
+ if (pwr_diff_limit[i] > pHalData->PwrGroupHT20[rf][Channel-1])
+ pwr_diff_limit[i] = pHalData->PwrGroupHT20[rf][Channel-1];
+ }
+ }
+ customer_limit = (pwr_diff_limit[3]<<24) | (pwr_diff_limit[2]<<16) |
+ (pwr_diff_limit[1]<<8) | (pwr_diff_limit[0]);
+ writeVal = customer_limit + ((index<2)?powerBase0[rf]:powerBase1[rf]);
+ break;
+ default:
+ chnlGroup = 0;
+ writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] +
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ }
+
+/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power. It shall be determined by power training mechanism. */
+/* Currently, we cannot fully disable driver dynamic tx power mechanism because it is referenced by BT coexist mechanism. */
+/* In the future, two mechanism shall be separated from each other and maintained independantly. Thanks for Lanhsin's reminder. */
+
+ if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1)
+ writeVal = 0x14141414;
+ else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2)
+ writeVal = 0x00000000;
+
+ /* 20100628 Joseph: High power mode for BT-Coexist mechanism. */
+ /* This mechanism is only applied when Driver-Highpower-Mechanism is OFF. */
+ if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT1)
+ writeVal = writeVal - 0x06060606;
+ else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT2)
+ writeVal = writeVal;
+ *(pOutWriteVal+rf) = writeVal;
+ }
+}
+
+static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u16 RegOffset_A[6] = {
+ rTxAGC_A_Rate18_06, rTxAGC_A_Rate54_24,
+ rTxAGC_A_Mcs03_Mcs00, rTxAGC_A_Mcs07_Mcs04,
+ rTxAGC_A_Mcs11_Mcs08, rTxAGC_A_Mcs15_Mcs12
+ };
+ u16 RegOffset_B[6] = {
+ rTxAGC_B_Rate18_06, rTxAGC_B_Rate54_24,
+ rTxAGC_B_Mcs03_Mcs00, rTxAGC_B_Mcs07_Mcs04,
+ rTxAGC_B_Mcs11_Mcs08, rTxAGC_B_Mcs15_Mcs12
+ };
+ u8 i, rf, pwr_val[4];
+ u32 writeVal;
+ u16 RegOffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeVal = pValue[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8)((writeVal & (0x7f<<(i*8)))>>(i*8));
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeVal = (pwr_val[3]<<24) | (pwr_val[2]<<16) |
+ (pwr_val[1]<<8) | pwr_val[0];
+
+ if (rf == 0)
+ RegOffset = RegOffset_A[index];
+ else
+ RegOffset = RegOffset_B[index];
+
+ PHY_SetBBReg(Adapter, RegOffset, bMaskDWord, writeVal);
+
+ /* 201005115 Joseph: Set Tx Power diff for Tx power training mechanism. */
+ if (((pHalData->rf_type == RF_2T2R) &&
+ (RegOffset == rTxAGC_A_Mcs15_Mcs12 ||
+ RegOffset == rTxAGC_B_Mcs15_Mcs12)) ||
+ ((pHalData->rf_type != RF_2T2R) &&
+ (RegOffset == rTxAGC_A_Mcs07_Mcs04 ||
+ RegOffset == rTxAGC_B_Mcs07_Mcs04))) {
+ writeVal = pwr_val[3];
+ if (RegOffset == rTxAGC_A_Mcs15_Mcs12 || RegOffset == rTxAGC_A_Mcs07_Mcs04)
+ RegOffset = 0xc90;
+ if (RegOffset == rTxAGC_B_Mcs15_Mcs12 || RegOffset == rTxAGC_B_Mcs07_Mcs04)
+ RegOffset = 0xc98;
+ for (i = 0; i < 3; i++) {
+ if (i != 2)
+ writeVal = (writeVal > 8) ? (writeVal-8) : 0;
+ else
+ writeVal = (writeVal > 6) ? (writeVal-6) : 0;
+ rtw_write8(Adapter, (u32)(RegOffset+i), (u8)writeVal);
+ }
+ }
+ }
+}
+/*-----------------------------------------------------------------------------
+ * Function: PHY_RF6052SetOFDMTxPower
+ *
+ * Overview: For legacy and HY OFDM, we must read EEPROM TX power index for
+ * different channel and read original value in TX power register area from
+ * 0xe00. We increase offset and original value to be correct tx pwr.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/05/2008 MHC Simulate 8192 series method.
+ * 01/06/2009 MHC 1. Prevent Path B tx power overflow or underflow dure to
+ * A/B pwr difference or legacy/HT pwr diff.
+ * 2. We concern with path B legacy/HT OFDM difference.
+ * 01/22/2009 MHC Support new EPRO format from SD3.
+ *
+ *---------------------------------------------------------------------------*/
+void rtl8723a_PHY_RF6052SetOFDMTxPower(struct rtw_adapter *Adapter, u8 *pPowerLevel, u8 Channel)
+{
+ u32 writeVal[2], powerBase0[2], powerBase1[2];
+ u8 index = 0;
+
+ getPowerBase(Adapter, pPowerLevel, Channel, &powerBase0[0], &powerBase1[0]);
+
+ for (index = 0; index < 6; index++) {
+ getTxPowerWriteValByRegulatory(Adapter, Channel, index,
+ &powerBase0[0], &powerBase1[0], &writeVal[0]);
+
+ writeOFDMPowerReg(Adapter, index, &writeVal[0]);
+ }
+}
+
+static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
+{
+ u32 u4RegValue = 0;
+ u8 eRFPath;
+ struct bb_reg_define *pPhyReg;
+ int rtStatus = _SUCCESS;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ static char sz8723RadioAFile[] = RTL8723_PHY_RADIO_A;
+ static char sz8723RadioBFile[] = RTL8723_PHY_RADIO_B;
+ char *pszRadioAFile, *pszRadioBFile;
+
+ pszRadioAFile = sz8723RadioAFile;
+ pszRadioBFile = sz8723RadioBFile;
+
+ /* 3----------------------------------------------------------------- */
+ /* 3 <2> Initialize RF */
+ /* 3----------------------------------------------------------------- */
+ for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) {
+
+ pPhyReg = &pHalData->PHYRegDef[eRFPath];
+
+ /*----Store original RFENV control type----*/
+ switch (eRFPath) {
+ case RF_PATH_A:
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
+ break;
+ case RF_PATH_B:
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16);
+ break;
+ }
+
+ /*----Set RF_ENV enable----*/
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
+ udelay(1);/* PlatformStallExecution(1); */
+
+ /*----Set RF_ENV output high----*/
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
+ udelay(1);/* PlatformStallExecution(1); */
+
+ /* Set bit number of Address and Data for RF register */
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
+ udelay(1);/* PlatformStallExecution(1); */
+
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
+ udelay(1);/* PlatformStallExecution(1); */
+
+ /*----Initialize RF fom connfiguration file----*/
+ switch (eRFPath) {
+ case RF_PATH_A:
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile23a(&pHalData->odmpriv, (enum RF_RADIO_PATH)eRFPath, (enum RF_RADIO_PATH)eRFPath))
+ rtStatus = _FAIL;
+ break;
+ case RF_PATH_B:
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile23a(&pHalData->odmpriv, (enum RF_RADIO_PATH)eRFPath, (enum RF_RADIO_PATH)eRFPath))
+ rtStatus = _FAIL;
+ break;
+ }
+
+ /*----Restore RFENV control type----*/;
+ switch (eRFPath) {
+ case RF_PATH_A:
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
+ break;
+ case RF_PATH_B:
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue);
+ break;
+ }
+
+ if (rtStatus != _SUCCESS) {
+ /* RT_TRACE(COMP_FPGA, DBG_LOUD, ("phy_RF6052_Config_ParaFile():Radio[%d] Fail!!", eRFPath)); */
+ goto phy_RF6052_Config_ParaFile_Fail;
+ }
+ }
+phy_RF6052_Config_ParaFile_Fail:
+ return rtStatus;
+}
+
+int PHY_RF6052_Config8723A(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ int rtStatus = _SUCCESS;
+
+ /* Initialize general global value */
+ /* TODO: Extend RF_PATH_C and RF_PATH_D in the future */
+ if (pHalData->rf_type == RF_1T1R)
+ pHalData->NumTotalRFPath = 1;
+ else
+ pHalData->NumTotalRFPath = 2;
+
+ /* Config BB and RF */
+ rtStatus = phy_RF6052_Config_ParaFile(Adapter);
+ return rtStatus;
+}
+
+/* End of HalRf6052.c */
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_rxdesc.c b/drivers/staging/rtl8723au/hal/rtl8723a_rxdesc.c
new file mode 100644
index 000000000000..81b5efe649fa
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_rxdesc.c
@@ -0,0 +1,69 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTL8723A_REDESC_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtl8723a_hal.h>
+
+static void process_rssi(struct rtw_adapter *padapter,
+ struct recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib = &prframe->attrib;
+ struct signal_stat *signal_stat = &padapter->recvpriv.signal_strength_data;
+
+ if (signal_stat->update_req) {
+ signal_stat->total_num = 0;
+ signal_stat->total_val = 0;
+ signal_stat->update_req = 0;
+ }
+
+ signal_stat->total_num++;
+ signal_stat->total_val += pattrib->phy_info.SignalStrength;
+ signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
+}
+
+static void process_link_qual(struct rtw_adapter *padapter,
+ struct recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib;
+ struct signal_stat *signal_stat;
+
+ if (prframe == NULL || padapter == NULL)
+ return;
+
+ pattrib = &prframe->attrib;
+ signal_stat = &padapter->recvpriv.signal_qual_data;
+
+ if (signal_stat->update_req) {
+ signal_stat->total_num = 0;
+ signal_stat->total_val = 0;
+ signal_stat->update_req = 0;
+ }
+
+ signal_stat->total_num++;
+ signal_stat->total_val += pattrib->phy_info.SignalQuality;
+ signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
+}
+
+/* void rtl8723a_process_phy_info(struct rtw_adapter *padapter, union recv_frame *prframe) */
+void rtl8723a_process_phy_info(struct rtw_adapter *padapter, void *prframe)
+{
+ struct recv_frame *precvframe = prframe;
+ /* Check RSSI */
+ process_rssi(padapter, precvframe);
+ /* Check EVM */
+ process_link_qual(padapter, precvframe);
+}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_sreset.c b/drivers/staging/rtl8723au/hal/rtl8723a_sreset.c
new file mode 100644
index 000000000000..c0218e734b9e
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_sreset.c
@@ -0,0 +1,73 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTL8723A_SRESET_C_
+
+#include <rtl8723a_sreset.h>
+#include <rtl8723a_hal.h>
+
+void rtl8723a_sreset_xmit_status_check(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+
+ unsigned long current_time;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ unsigned int diff_time;
+ u32 txdma_status;
+
+ txdma_status = rtw_read32(padapter, REG_TXDMA_STATUS);
+ if (txdma_status != 0) {
+ DBG_8723A("%s REG_TXDMA_STATUS:0x%08x\n", __func__, txdma_status);
+ rtw_hal_sreset_reset23a(padapter);
+ }
+
+ current_time = jiffies;
+
+ if (0 == pxmitpriv->free_xmitbuf_cnt || 0 == pxmitpriv->free_xmit_extbuf_cnt) {
+
+ diff_time = jiffies_to_msecs(jiffies - psrtpriv->last_tx_time);
+
+ if (diff_time > 2000) {
+ if (psrtpriv->last_tx_complete_time == 0) {
+ psrtpriv->last_tx_complete_time = current_time;
+ } else {
+ diff_time = jiffies_to_msecs(jiffies - psrtpriv->last_tx_complete_time);
+ if (diff_time > 4000) {
+ /* padapter->Wifi_Error_Status = WIFI_TX_HANG; */
+ DBG_8723A("%s tx hang\n", __func__);
+ rtw_hal_sreset_reset23a(padapter);
+ }
+ }
+ }
+ }
+
+ if (psrtpriv->dbg_trigger_point == SRESET_TGP_XMIT_STATUS) {
+ psrtpriv->dbg_trigger_point = SRESET_TGP_NULL;
+ rtw_hal_sreset_reset23a(padapter);
+ return;
+ }
+}
+
+void rtl8723a_sreset_linked_status_check(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+
+ if (psrtpriv->dbg_trigger_point == SRESET_TGP_LINK_STATUS) {
+ psrtpriv->dbg_trigger_point = SRESET_TGP_NULL;
+ rtw_hal_sreset_reset23a(padapter);
+ return;
+ }
+}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_xmit.c b/drivers/staging/rtl8723au/hal/rtl8723a_xmit.c
new file mode 100644
index 000000000000..d7612ccc47e9
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_xmit.c
@@ -0,0 +1,52 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTL8723A_XMIT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtl8723a_hal.h>
+
+void dump_txrpt_ccx_8723a(void *buf)
+{
+ struct txrpt_ccx_8723a *txrpt_ccx = buf;
+
+ DBG_8723A("%s:\n"
+ "tag1:%u, rsvd:%u, int_bt:%u, int_tri:%u, int_ccx:%u\n"
+ "mac_id:%u, pkt_drop:%u, pkt_ok:%u, bmc:%u\n"
+ "retry_cnt:%u, lifetime_over:%u, retry_over:%u\n"
+ "ccx_qtime:%u\n"
+ "final_data_rate:0x%02x\n"
+ "qsel:%u, sw:0x%03x\n"
+ , __func__
+ , txrpt_ccx->tag1, txrpt_ccx->rsvd, txrpt_ccx->int_bt, txrpt_ccx->int_tri, txrpt_ccx->int_ccx
+ , txrpt_ccx->mac_id, txrpt_ccx->pkt_drop, txrpt_ccx->pkt_ok, txrpt_ccx->bmc
+ , txrpt_ccx->retry_cnt, txrpt_ccx->lifetime_over, txrpt_ccx->retry_over
+ , txrpt_ccx_qtime_8723a(txrpt_ccx)
+ , txrpt_ccx->final_data_rate
+ , txrpt_ccx->qsel, txrpt_ccx_sw_8723a(txrpt_ccx)
+ );
+}
+
+void handle_txrpt_ccx_8723a(struct rtw_adapter *adapter, void *buf)
+{
+ struct txrpt_ccx_8723a *txrpt_ccx = buf;
+
+ if (txrpt_ccx->int_ccx) {
+ if (txrpt_ccx->pkt_ok)
+ rtw_ack_tx_done23a(&adapter->xmitpriv, RTW_SCTX_DONE_SUCCESS);
+ else
+ rtw_ack_tx_done23a(&adapter->xmitpriv, RTW_SCTX_DONE_CCX_PKT_FAIL);
+ }
+}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723au_led.c b/drivers/staging/rtl8723au/hal/rtl8723au_led.c
new file mode 100644
index 000000000000..4d5c909487f8
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/rtl8723au_led.c
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "drv_types.h"
+#include "rtl8723a_hal.h"
+#include "rtl8723a_led.h"
+
+/* */
+/* LED object. */
+/* */
+
+/* */
+/* Prototype of protected function. */
+/* */
+
+/* */
+/* LED_819xUsb routines. */
+/* */
+
+/* Description: */
+/* Turn on LED according to LedPin specified. */
+void SwLedOn23a(struct rtw_adapter *padapter, struct led_8723a *pLed)
+{
+ u8 LedCfg = 0;
+
+ if ((padapter->bSurpriseRemoved == true) || (padapter->bDriverStopped == true))
+ return;
+ switch (pLed->LedPin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ rtw_write8(padapter, REG_LEDCFG0, (LedCfg&0xf0)|BIT5|BIT6); /* SW control led0 on. */
+ break;
+ case LED_PIN_LED1:
+ rtw_write8(padapter, REG_LEDCFG1, (LedCfg&0x00)|BIT6); /* SW control led1 on. */
+ break;
+ case LED_PIN_LED2:
+ LedCfg = rtw_read8(padapter, REG_LEDCFG2);
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg&0x80)|BIT5); /* SW control led1 on. */
+ break;
+ default:
+ break;
+ }
+ pLed->bLedOn = true;
+}
+
+/* Description: */
+/* Turn off LED according to LedPin specified. */
+void SwLedOff23a(struct rtw_adapter *padapter, struct led_8723a *pLed)
+{
+ u8 LedCfg = 0;
+ /* struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); */
+
+ if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
+ goto exit;
+
+ switch (pLed->LedPin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ rtw_write8(padapter, REG_LEDCFG0, (LedCfg&0xf0)|BIT5|BIT6); /* SW control led0 on. */
+ break;
+ case LED_PIN_LED1:
+ rtw_write8(padapter, REG_LEDCFG1, (LedCfg&0x00)|BIT5|BIT6); /* SW control led1 on. */
+ break;
+ case LED_PIN_LED2:
+ LedCfg = rtw_read8(padapter, REG_LEDCFG2);
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg&0x80)|BIT3|BIT5); /* SW control led1 on. */
+ break;
+ default:
+ break;
+ }
+exit:
+ pLed->bLedOn = false;
+}
+
+/* Interface to manipulate LED objects. */
+
+/* Description: */
+/* Initialize all LED_871x objects. */
+void
+rtl8723au_InitSwLeds(struct rtw_adapter *padapter)
+{
+ struct led_priv *pledpriv = &padapter->ledpriv;
+
+ pledpriv->LedControlHandler = LedControl871x23a;
+ /* 8723as-vau wifi used led2 */
+ InitLed871x23a(padapter, &pledpriv->SwLed0, LED_PIN_LED2);
+
+/* InitLed871x23a(padapter,&pledpriv->SwLed1, LED_PIN_LED2); */
+}
+
+/* Description: */
+/* DeInitialize all LED_819xUsb objects. */
+void
+rtl8723au_DeInitSwLeds(struct rtw_adapter *padapter)
+{
+ struct led_priv *ledpriv = &padapter->ledpriv;
+
+ DeInitLed871x23a(&ledpriv->SwLed0);
+}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723au_recv.c b/drivers/staging/rtl8723au/hal/rtl8723au_recv.c
new file mode 100644
index 000000000000..213d1936109d
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/rtl8723au_recv.c
@@ -0,0 +1,247 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTL8192CU_RECV_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <mlme_osdep.h>
+#include <linux/ip.h>
+#include <linux/if_ether.h>
+#include <ethernet.h>
+#include <usb_ops.h>
+#include <wifi.h>
+#include <rtl8723a_hal.h>
+
+void rtl8723au_init_recvbuf(struct rtw_adapter *padapter,
+ struct recv_buf *precvbuf)
+{
+}
+
+int rtl8723au_init_recv_priv(struct rtw_adapter *padapter)
+{
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ int i, size, res = _SUCCESS;
+ struct recv_buf *precvbuf;
+ unsigned long tmpaddr;
+ unsigned long alignment;
+ struct sk_buff *pskb;
+
+ tasklet_init(&precvpriv->recv_tasklet,
+ (void(*)(unsigned long))rtl8723au_recv_tasklet,
+ (unsigned long)padapter);
+
+ precvpriv->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!precvpriv->int_in_urb)
+ DBG_8723A("alloc_urb for interrupt in endpoint fail !!!!\n");
+ precvpriv->int_in_buf = kzalloc(USB_INTR_CONTENT_LENGTH, GFP_KERNEL);
+ if (!precvpriv->int_in_buf)
+ DBG_8723A("alloc_mem for interrupt in endpoint fail !!!!\n");
+
+ /* init recv_buf */
+ _rtw_init_queue23a(&precvpriv->free_recv_buf_queue);
+
+ size = NR_RECVBUFF * sizeof(struct recv_buf);
+ precvpriv->precv_buf = kzalloc(size, GFP_KERNEL);
+ if (!precvpriv->precv_buf) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("alloc recv_buf fail!\n"));
+ goto exit;
+ }
+
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ INIT_LIST_HEAD(&precvbuf->list);
+
+ res = rtw_os_recvbuf_resource_alloc23a(padapter, precvbuf);
+ if (res == _FAIL)
+ break;
+
+ precvbuf->adapter = padapter;
+
+ precvbuf++;
+ }
+
+ precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
+
+ skb_queue_head_init(&precvpriv->rx_skb_queue);
+ skb_queue_head_init(&precvpriv->free_recv_skb_queue);
+
+ for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
+ size = MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ;
+ pskb = __netdev_alloc_skb(padapter->pnetdev, size, GFP_KERNEL);
+
+ if (pskb) {
+ pskb->dev = padapter->pnetdev;
+
+ tmpaddr = (unsigned long)pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
+ skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
+
+ skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
+ }
+
+ pskb = NULL;
+ }
+
+exit:
+ return res;
+}
+
+void rtl8723au_free_recv_priv(struct rtw_adapter *padapter)
+{
+ int i;
+ struct recv_buf *precvbuf;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ rtw_os_recvbuf_resource_free23a(padapter, precvbuf);
+ precvbuf++;
+ }
+
+ kfree(precvpriv->precv_buf);
+
+ usb_free_urb(precvpriv->int_in_urb);
+ kfree(precvpriv->int_in_buf);
+
+ if (skb_queue_len(&precvpriv->rx_skb_queue))
+ DBG_8723A(KERN_WARNING "rx_skb_queue not empty\n");
+
+ skb_queue_purge(&precvpriv->rx_skb_queue);
+
+ if (skb_queue_len(&precvpriv->free_recv_skb_queue)) {
+ DBG_8723A(KERN_WARNING "free_recv_skb_queue not empty, %d\n",
+ skb_queue_len(&precvpriv->free_recv_skb_queue));
+ }
+
+ skb_queue_purge(&precvpriv->free_recv_skb_queue);
+}
+
+void update_recvframe_attrib(struct recv_frame *precvframe,
+ struct recv_stat *prxstat)
+{
+ struct rx_pkt_attrib *pattrib;
+ struct recv_stat report;
+ struct rxreport_8723a *prxreport;
+
+ report.rxdw0 = le32_to_cpu(prxstat->rxdw0);
+ report.rxdw1 = le32_to_cpu(prxstat->rxdw1);
+ report.rxdw2 = le32_to_cpu(prxstat->rxdw2);
+ report.rxdw3 = le32_to_cpu(prxstat->rxdw3);
+ report.rxdw4 = le32_to_cpu(prxstat->rxdw4);
+ report.rxdw5 = le32_to_cpu(prxstat->rxdw5);
+
+ prxreport = (struct rxreport_8723a *)&report;
+
+ pattrib = &precvframe->attrib;
+ memset(pattrib, 0, sizeof(struct rx_pkt_attrib));
+
+ /* update rx report to recv_frame attribute */
+ pattrib->pkt_len = (u16)prxreport->pktlen;
+ pattrib->drvinfo_sz = (u8)(prxreport->drvinfosize << 3);
+ pattrib->physt = (u8)prxreport->physt;
+
+ pattrib->crc_err = (u8)prxreport->crc32;
+ pattrib->icv_err = (u8)prxreport->icverr;
+
+ pattrib->bdecrypted = (u8)(prxreport->swdec ? 0 : 1);
+ pattrib->encrypt = (u8)prxreport->security;
+
+ pattrib->qos = (u8)prxreport->qos;
+ pattrib->priority = (u8)prxreport->tid;
+
+ pattrib->amsdu = (u8)prxreport->amsdu;
+
+ pattrib->seq_num = (u16)prxreport->seq;
+ pattrib->frag_num = (u8)prxreport->frag;
+ pattrib->mfrag = (u8)prxreport->mf;
+ pattrib->mdata = (u8)prxreport->md;
+
+ pattrib->mcs_rate = (u8)prxreport->rxmcs;
+ pattrib->rxht = (u8)prxreport->rxht;
+}
+
+void update_recvframe_phyinfo(struct recv_frame *precvframe,
+ struct phy_stat *pphy_status)
+{
+ struct rtw_adapter *padapter = precvframe->adapter;
+ struct rx_pkt_attrib *pattrib = &precvframe->attrib;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct odm_phy_info *pPHYInfo = (struct odm_phy_info *)(&pattrib->phy_info);
+ struct odm_packet_info pkt_info;
+ u8 *sa = NULL, *da;
+ struct sta_priv *pstapriv;
+ struct sta_info *psta;
+ struct sk_buff *skb = precvframe->pkt;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *wlanhdr = skb->data;
+
+ pkt_info.bPacketMatchBSSID = false;
+ pkt_info.bPacketToSelf = false;
+ pkt_info.bPacketBeacon = false;
+
+ pkt_info.bPacketMatchBSSID =
+ (!ieee80211_is_ctl(hdr->frame_control) &&
+ !pattrib->icv_err &&
+ !pattrib->crc_err &&
+ !memcmp(get_hdr_bssid(wlanhdr),
+ get_bssid(&padapter->mlmepriv), ETH_ALEN));
+
+ da = ieee80211_get_DA(hdr);
+ pkt_info.bPacketToSelf = pkt_info.bPacketMatchBSSID &&
+ (!memcmp(da, myid(&padapter->eeprompriv), ETH_ALEN));
+
+ pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
+ ieee80211_is_beacon(hdr->frame_control);
+
+ pkt_info.StationID = 0xFF;
+ if (pkt_info.bPacketBeacon) {
+ if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE) == true)
+ sa = padapter->mlmepriv.cur_network.network.MacAddress;
+ /* to do Ad-hoc */
+ } else {
+ sa = ieee80211_get_SA(hdr);
+ }
+
+ pstapriv = &padapter->stapriv;
+ psta = rtw_get_stainfo23a(pstapriv, sa);
+ if (psta) {
+ pkt_info.StationID = psta->mac_id;
+ /* printk("%s ==> StationID(%d)\n", __FUNCTION__, pkt_info.StationID); */
+ }
+ pkt_info.Rate = pattrib->mcs_rate;
+
+ ODM_PhyStatusQuery23a(&pHalData->odmpriv, pPHYInfo,
+ (u8 *)pphy_status, &pkt_info);
+ precvframe->psta = NULL;
+ if (pkt_info.bPacketMatchBSSID &&
+ (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == true)) {
+ if (psta) {
+ precvframe->psta = psta;
+ rtl8723a_process_phy_info(padapter, precvframe);
+ }
+ } else if (pkt_info.bPacketToSelf || pkt_info.bPacketBeacon) {
+ if (check_fwstate(&padapter->mlmepriv,
+ WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE) ==
+ true) {
+ if (psta)
+ precvframe->psta = psta;
+ }
+ rtl8723a_process_phy_info(padapter, precvframe);
+ }
+}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c b/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
new file mode 100644
index 000000000000..2af2e3ee1abc
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
@@ -0,0 +1,548 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTL8192C_XMIT_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <osdep_intf.h>
+#include <usb_ops.h>
+/* include <rtl8192c_hal.h> */
+#include <rtl8723a_hal.h>
+
+s32 rtl8723au_init_xmit_priv(struct rtw_adapter *padapter)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ tasklet_init(&pxmitpriv->xmit_tasklet,
+ (void(*)(unsigned long))rtl8723au_xmit_tasklet,
+ (unsigned long)padapter);
+ return _SUCCESS;
+}
+
+void rtl8723au_free_xmit_priv(struct rtw_adapter *padapter)
+{
+}
+
+static void do_queue_select(struct rtw_adapter *padapter, struct pkt_attrib *pattrib)
+{
+ u8 qsel;
+
+ qsel = pattrib->priority;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ ("### do_queue_select priority =%d , qsel = %d\n",
+ pattrib->priority, qsel));
+
+ pattrib->qsel = qsel;
+}
+
+static int urb_zero_packet_chk(struct rtw_adapter *padapter, int sz)
+{
+ int blnSetTxDescOffset;
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
+
+ if (pdvobj->ishighspeed) {
+ if (((sz + TXDESC_SIZE) % 512) == 0)
+ blnSetTxDescOffset = 1;
+ else
+ blnSetTxDescOffset = 0;
+ } else {
+ if (((sz + TXDESC_SIZE) % 64) == 0)
+ blnSetTxDescOffset = 1;
+ else
+ blnSetTxDescOffset = 0;
+ }
+ return blnSetTxDescOffset;
+}
+
+static void rtl8192cu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
+{
+ u16 *usPtr = (u16 *)ptxdesc;
+ u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
+ u32 index;
+ u16 checksum = 0;
+
+ /* Clear first */
+ ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
+
+ for (index = 0 ; index < count ; index++)
+ checksum = checksum ^ le16_to_cpu(*(usPtr + index));
+
+ ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff&checksum);
+}
+
+static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
+{
+ if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
+ switch (pattrib->encrypt) {
+ /* SEC_TYPE */
+ case _WEP40_:
+ case _WEP104_:
+ ptxdesc->txdw1 |= cpu_to_le32((0x01<<22)&0x00c00000);
+ break;
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ /* ptxdesc->txdw1 |= cpu_to_le32((0x02<<22)&0x00c00000); */
+ ptxdesc->txdw1 |= cpu_to_le32((0x01<<22)&0x00c00000);
+ break;
+ case _AES_:
+ ptxdesc->txdw1 |= cpu_to_le32((0x03<<22)&0x00c00000);
+ break;
+ case _NO_PRIVACY_:
+ default:
+ break;
+ }
+ }
+}
+
+static void fill_txdesc_vcs(struct pkt_attrib *pattrib, u32 *pdw)
+{
+ /* DBG_8723A("cvs_mode =%d\n", pattrib->vcs_mode); */
+
+ switch (pattrib->vcs_mode) {
+ case RTS_CTS:
+ *pdw |= cpu_to_le32(BIT(12));
+ break;
+ case CTS_TO_SELF:
+ *pdw |= cpu_to_le32(BIT(11));
+ break;
+ case NONE_VCS:
+ default:
+ break;
+ }
+
+ if (pattrib->vcs_mode) {
+ *pdw |= cpu_to_le32(BIT(13));
+
+ /* Set RTS BW */
+ if (pattrib->ht_en) {
+ *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0;
+
+ if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
+ *pdw |= cpu_to_le32((0x01<<28)&0x30000000);
+ else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
+ *pdw |= cpu_to_le32((0x02<<28)&0x30000000);
+ else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
+ *pdw |= 0;
+ else
+ *pdw |= cpu_to_le32((0x03<<28)&0x30000000);
+ }
+ }
+}
+
+static void fill_txdesc_phy(struct pkt_attrib *pattrib, u32 *pdw)
+{
+ if (pattrib->ht_en) {
+ *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0;
+
+ if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
+ *pdw |= cpu_to_le32((0x01<<20)&0x003f0000);
+ else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
+ *pdw |= cpu_to_le32((0x02<<20)&0x003f0000);
+ else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
+ *pdw |= 0;
+ else
+ *pdw |= cpu_to_le32((0x03<<20)&0x003f0000);
+ }
+}
+
+static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
+{
+ int pull = 0;
+ uint qsel;
+ struct rtw_adapter *padapter = pxmitframe->padapter;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ int bmcst = is_multicast_ether_addr(pattrib->ra);
+
+ if ((!bagg_pkt) && (urb_zero_packet_chk(padapter, sz) == 0)) {
+ ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ);
+ pull = 1;
+ pxmitframe->pkt_offset--;
+ }
+
+ memset(ptxdesc, 0, sizeof(struct tx_desc));
+
+ if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) {
+ /* offset 4 */
+ ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id&0x1f);
+
+ qsel = (uint)(pattrib->qsel & 0x0000001f);
+ ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
+
+ ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid<<16) & 0x000f0000);
+
+ fill_txdesc_sectype(pattrib, ptxdesc);
+
+ if (pattrib->ampdu_en)
+ ptxdesc->txdw1 |= cpu_to_le32(BIT(5));/* AGG EN */
+ else
+ ptxdesc->txdw1 |= cpu_to_le32(BIT(6));/* AGG BK */
+
+ /* offset 8 */
+
+ /* offset 12 */
+ ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<16)&0xffff0000);
+
+ /* offset 16 , offset 20 */
+ if (pattrib->qos_en)
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(6));/* QoS */
+
+ if ((pattrib->ether_type != 0x888e) &&
+ (pattrib->ether_type != 0x0806) &&
+ (pattrib->dhcp_pkt != 1)) {
+ /* Non EAP & ARP & DHCP type data packet */
+
+ fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
+ fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
+
+ ptxdesc->txdw4 |= cpu_to_le32(0x00000008);/* RTS Rate = 24M */
+ ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* */
+
+ /* use REG_INIDATA_RATE_SEL value */
+ ptxdesc->txdw5 |= cpu_to_le32(pdmpriv->INIDATA_RATE[pattrib->mac_id]);
+ } else {
+ /* EAP data packet and ARP packet. */
+ /* Use the 1M data rate to send the EAP/ARP packet. */
+ /* This will maybe make the handshake smooth. */
+
+ ptxdesc->txdw1 |= cpu_to_le32(BIT(6));/* AGG BK */
+
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
+
+ if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/* DATA_SHORT */
+
+ ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate23a(pmlmeext->tx_rate));
+ }
+ } else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
+ /* offset 4 */
+ ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id&0x1f);
+
+ qsel = (uint)(pattrib->qsel&0x0000001f);
+ ptxdesc->txdw1 |= cpu_to_le32((qsel<<QSEL_SHT)&0x00001f00);
+
+ ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid<<16) & 0x000f0000);
+
+ /* offset 8 */
+ /* CCX-TXRPT ack for xmit mgmt frames. */
+ if (pxmitframe->ack_report)
+ ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
+
+ /* offset 12 */
+ ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<16)&0xffff0000);
+
+ /* offset 16 */
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
+
+ /* offset 20 */
+ ptxdesc->txdw5 |= cpu_to_le32(BIT(17));/* retry limit enable */
+ ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
+
+ ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate23a(pmlmeext->tx_rate));
+ } else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
+ DBG_8723A("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
+ } else {
+ DBG_8723A("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
+
+ /* offset 4 */
+ ptxdesc->txdw1 |= cpu_to_le32((4)&0x1f);/* CAM_ID(MAC_ID) */
+
+ ptxdesc->txdw1 |= cpu_to_le32((6<<16) & 0x000f0000);/* raid */
+
+ /* offset 8 */
+
+ /* offset 12 */
+ ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<16)&0xffff0000);
+
+ /* offset 16 */
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
+
+ /* offset 20 */
+ ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate23a(pmlmeext->tx_rate));
+ }
+
+ /* (1) The sequence number of each non-Qos frame / broadcast / multicast / */
+ /* mgnt frame should be controled by Hw because Fw will also send null data */
+ /* which we cannot control when Fw LPS enable. */
+ /* --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
+ /* (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
+ /* (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
+ if (!pattrib->qos_en) {
+ /* Hw set sequence number */
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(7));
+ /* set bit3 to 1. */
+ ptxdesc->txdw3 |= cpu_to_le32((8 << 28));
+ }
+
+ /* offset 0 */
+ ptxdesc->txdw0 |= cpu_to_le32(sz&0x0000ffff);
+ ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
+ ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000);/* 32 bytes for TX Desc */
+
+ if (bmcst)
+ ptxdesc->txdw0 |= cpu_to_le32(BIT(24));
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("offset0-txdesc = 0x%x\n", ptxdesc->txdw0));
+
+ /* offset 4 */
+ /* pkt_offset, unit:8 bytes padding */
+ if (pxmitframe->pkt_offset > 0)
+ ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
+
+ rtl8192cu_cal_txdesc_chksum(ptxdesc);
+ return pull;
+}
+
+static s32 rtw_dump_xframe(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ s32 ret = _SUCCESS;
+ s32 inner_ret = _SUCCESS;
+ int t, sz, w_sz, pull = 0;
+ u8 *mem_addr;
+ u32 ff_hwaddr;
+ struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
+ (pxmitframe->attrib.ether_type != 0x0806) &&
+ (pxmitframe->attrib.ether_type != 0x888e) &&
+ (pxmitframe->attrib.dhcp_pkt != 1))
+ rtw_issue_addbareq_cmd23a(padapter, pxmitframe);
+
+ mem_addr = pxmitframe->buf_addr;
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_dump_xframe()\n"));
+
+ for (t = 0; t < pattrib->nr_frags; t++) {
+ if (inner_ret != _SUCCESS && ret == _SUCCESS)
+ ret = _FAIL;
+
+ if (t != (pattrib->nr_frags - 1)) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("pattrib->nr_frags =%d\n", pattrib->nr_frags));
+
+ sz = pxmitpriv->frag_len;
+ sz = sz - 4 - pattrib->icv_len;
+ } else {
+ /* no frag */
+ sz = pattrib->last_txcmdsz;
+ }
+
+ pull = update_txdesc(pxmitframe, mem_addr, sz, false);
+
+ if (pull) {
+ mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
+
+ pxmitframe->buf_addr = mem_addr;
+
+ w_sz = sz + TXDESC_SIZE;
+ } else {
+ w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
+ }
+
+ ff_hwaddr = rtw_get_ff_hwaddr23a(pxmitframe);
+ inner_ret = rtw_write_port(padapter, ff_hwaddr, w_sz, pxmitbuf);
+ rtw_count_tx_stats23a(padapter, pxmitframe, sz);
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ ("rtw_write_port, w_sz =%d\n", w_sz));
+
+ mem_addr += w_sz;
+
+ mem_addr = PTR_ALIGN(mem_addr, 4);
+ }
+
+ rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
+
+ if (ret != _SUCCESS)
+ rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
+
+ return ret;
+}
+
+s32 rtl8723au_xmitframe_complete(struct rtw_adapter *padapter,
+ struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+{
+ struct hw_xmit *phwxmits;
+ struct xmit_frame *pxmitframe;
+ int hwentry;
+ int res = _SUCCESS, xcnt = 0;
+
+ phwxmits = pxmitpriv->hwxmits;
+ hwentry = pxmitpriv->hwxmit_entry;
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("xmitframe_complete()\n"));
+
+ if (pxmitbuf == NULL) {
+ pxmitbuf = rtw_alloc_xmitbuf23a(pxmitpriv);
+ if (!pxmitbuf)
+ return false;
+ }
+ pxmitframe = rtw_dequeue_xframe23a(pxmitpriv, phwxmits, hwentry);
+
+ if (pxmitframe) {
+ pxmitframe->pxmitbuf = pxmitbuf;
+
+ pxmitframe->buf_addr = pxmitbuf->pbuf;
+
+ pxmitbuf->priv_data = pxmitframe;
+
+ if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) {
+ if (pxmitframe->attrib.priority <= 15)/* TID0~15 */
+ res = rtw_xmitframe_coalesce23a(padapter, pxmitframe->pkt, pxmitframe);
+
+ rtw_os_xmit_complete23a(padapter, pxmitframe);/* always return ndis_packet after rtw_xmitframe_coalesce23a */
+ }
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("xmitframe_complete(): rtw_dump_xframe\n"));
+
+ if (res == _SUCCESS) {
+ rtw_dump_xframe(padapter, pxmitframe);
+ } else {
+ rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf);
+ rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
+ }
+ xcnt++;
+ } else {
+ rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf);
+ return false;
+ }
+ return true;
+}
+
+static s32 xmitframe_direct(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ s32 res = _SUCCESS;
+
+ res = rtw_xmitframe_coalesce23a(padapter, pxmitframe->pkt, pxmitframe);
+ if (res == _SUCCESS)
+ rtw_dump_xframe(padapter, pxmitframe);
+ return res;
+}
+
+/*
+ * Return
+ * true dump packet directly
+ * false enqueue packet
+ */
+static s32 pre_xmitframe(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ s32 res;
+ struct xmit_buf *pxmitbuf = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ do_queue_select(padapter, pattrib);
+ spin_lock_bh(&pxmitpriv->lock);
+
+#ifdef CONFIG_8723AU_AP_MODE
+ if (xmitframe_enqueue_for_sleeping_sta23a(padapter, pxmitframe)) {
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ if (pattrib->psta)
+ psta = pattrib->psta;
+ else
+ psta = rtw_get_stainfo23a(pstapriv, pattrib->ra);
+
+ if (psta) {
+ if (psta->sleepq_len > (NR_XMITFRAME>>3))
+ wakeup_sta_to_xmit23a(padapter, psta);
+ }
+
+ return false;
+ }
+#endif
+
+ if (rtw_txframes_sta_ac_pending23a(padapter, pattrib) > 0)
+ goto enqueue;
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
+ goto enqueue;
+
+ pxmitbuf = rtw_alloc_xmitbuf23a(pxmitpriv);
+ if (pxmitbuf == NULL)
+ goto enqueue;
+
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ pxmitframe->pxmitbuf = pxmitbuf;
+ pxmitframe->buf_addr = pxmitbuf->pbuf;
+ pxmitbuf->priv_data = pxmitframe;
+
+ if (xmitframe_direct(padapter, pxmitframe) != _SUCCESS) {
+ rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf);
+ rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
+ }
+ return true;
+
+enqueue:
+ res = rtw_xmitframe_enqueue23a(padapter, pxmitframe);
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ if (res != _SUCCESS) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_,
+ ("pre_xmitframe: enqueue xmitframe fail\n"));
+ rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
+
+ /* Trick, make the statistics correct */
+ pxmitpriv->tx_pkts--;
+ pxmitpriv->tx_drop++;
+ return true;
+ }
+ return false;
+}
+
+s32 rtl8723au_mgnt_xmit(struct rtw_adapter *padapter, struct xmit_frame *pmgntframe)
+{
+ return rtw_dump_xframe(padapter, pmgntframe);
+}
+
+/*
+ * Return
+ * true dump packet directly ok
+ * false temporary can't transmit packets to hardware
+ */
+s32 rtl8723au_hal_xmit(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ return pre_xmitframe(padapter, pxmitframe);
+}
+
+s32 rtl8723au_hal_xmitframe_enqueue(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ s32 err;
+
+ err = rtw_xmitframe_enqueue23a(padapter, pxmitframe);
+ if (err != _SUCCESS) {
+ rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
+
+ /* Trick, make the statistics correct */
+ pxmitpriv->tx_pkts--;
+ pxmitpriv->tx_drop++;
+ } else {
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
+ }
+ return err;
+}
diff --git a/drivers/staging/rtl8723au/hal/usb_halinit.c b/drivers/staging/rtl8723au/hal/usb_halinit.c
new file mode 100644
index 000000000000..e206829d50fa
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/usb_halinit.c
@@ -0,0 +1,1834 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _HCI_HAL_INIT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_efuse.h>
+
+#include <HalPwrSeqCmd.h>
+#include <Hal8723PwrSeq.h>
+#include <rtl8723a_hal.h>
+#include <rtl8723a_led.h>
+#include <linux/ieee80211.h>
+
+#include <usb_ops.h>
+#include <usb_hal.h>
+#include <usb_osintf.h>
+
+static void
+_ConfigChipOutEP(struct rtw_adapter *pAdapter, u8 NumOutPipe)
+{
+ u8 value8;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
+
+ pHalData->OutEpQueueSel = 0;
+ pHalData->OutEpNumber = 0;
+
+ /* Normal and High queue */
+ value8 = rtw_read8(pAdapter, (REG_NORMAL_SIE_EP + 1));
+
+ if (value8 & USB_NORMAL_SIE_EP_MASK) {
+ pHalData->OutEpQueueSel |= TX_SELE_HQ;
+ pHalData->OutEpNumber++;
+ }
+
+ if ((value8 >> USB_NORMAL_SIE_EP_SHIFT) & USB_NORMAL_SIE_EP_MASK) {
+ pHalData->OutEpQueueSel |= TX_SELE_NQ;
+ pHalData->OutEpNumber++;
+ }
+
+ /* Low queue */
+ value8 = rtw_read8(pAdapter, (REG_NORMAL_SIE_EP + 2));
+ if (value8 & USB_NORMAL_SIE_EP_MASK) {
+ pHalData->OutEpQueueSel |= TX_SELE_LQ;
+ pHalData->OutEpNumber++;
+ }
+
+ /* TODO: Error recovery for this case */
+ /* RT_ASSERT((NumOutPipe == pHalData->OutEpNumber),
+ ("Out EP number isn't match! %d(Descriptor) != %d (SIE reg)\n",
+ (u32)NumOutPipe, (u32)pHalData->OutEpNumber)); */
+}
+
+static bool rtl8723au_set_queue_pipe_mapping(struct rtw_adapter *pAdapter,
+ u8 NumInPipe, u8 NumOutPipe)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
+ bool result = false;
+
+ _ConfigChipOutEP(pAdapter, NumOutPipe);
+
+ /* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
+ if (pHalData->OutEpNumber == 1) {
+ if (NumInPipe != 1)
+ return result;
+ }
+
+ result = Hal_MappingOutPipe23a(pAdapter, NumOutPipe);
+
+ return result;
+}
+
+static void rtl8723au_interface_configure(struct rtw_adapter *padapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+
+ if (pdvobjpriv->ishighspeed == true) {
+ /* 512 bytes */
+ pHalData->UsbBulkOutSize = USB_HIGH_SPEED_BULK_SIZE;
+ } else {
+ /* 64 bytes */
+ pHalData->UsbBulkOutSize = USB_FULL_SPEED_BULK_SIZE;
+ }
+
+ pHalData->interfaceIndex = pdvobjpriv->InterfaceNumber;
+
+ rtl8723au_set_queue_pipe_mapping(padapter,
+ pdvobjpriv->RtNumInPipes,
+ pdvobjpriv->RtNumOutPipes);
+}
+
+static u8 _InitPowerOn(struct rtw_adapter *padapter)
+{
+ u8 status = _SUCCESS;
+ u16 value16 = 0;
+ u8 value8 = 0;
+
+ /* RSV_CTRL 0x1C[7:0] = 0x00
+ unlock ISO/CLK/Power control register */
+ rtw_write8(padapter, REG_RSV_CTRL, 0x0);
+
+ /* HW Power on sequence */
+ if (!HalPwrSeqCmdParsing23a(padapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_USB_MSK, rtl8723AU_card_enable_flow))
+ return _FAIL;
+
+ /* 0x04[19] = 1, suggest by Jackie 2011.05.09, reset 8051 */
+ value8 = rtw_read8(padapter, REG_APS_FSMCO+2);
+ rtw_write8(padapter, REG_APS_FSMCO + 2, (value8 | BIT3));
+
+ /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
+ /* Set CR bit10 to enable 32k calibration. Suggested by SD1 Gimmy.
+ Added by tynli. 2011.08.31. */
+ value16 = rtw_read16(padapter, REG_CR);
+ value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN |
+ PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN |
+ ENSEC | CALTMR_EN);
+ rtw_write16(padapter, REG_CR, value16);
+
+ /* for Efuse PG, suggest by Jackie 2011.11.23 */
+ PHY_SetBBReg(padapter, REG_EFUSE_CTRL, BIT28|BIT29|BIT30, 0x06);
+
+ return status;
+}
+
+/* Shall USB interface init this? */
+static void _InitInterrupt(struct rtw_adapter *Adapter)
+{
+ u32 value32;
+
+ /* HISR - turn all on */
+ value32 = 0xFFFFFFFF;
+ rtw_write32(Adapter, REG_HISR, value32);
+
+ /* HIMR - turn all on */
+ rtw_write32(Adapter, REG_HIMR, value32);
+}
+
+static void _InitQueueReservedPage(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u32 numHQ = 0;
+ u32 numLQ = 0;
+ u32 numNQ = 0;
+ u32 numPubQ;
+ u32 value32;
+ u8 value8;
+ bool bWiFiConfig = pregistrypriv->wifi_spec;
+ /* u32 txQPageNum, txQPageUnit, txQRemainPage; */
+
+ { /* for WMM */
+ /* RT_ASSERT((outEPNum>= 2), ("for WMM , number of out-ep "
+ "must more than or equal to 2!\n")); */
+
+ numPubQ = bWiFiConfig ?
+ WMM_NORMAL_PAGE_NUM_PUBQ : NORMAL_PAGE_NUM_PUBQ;
+
+ if (pHalData->OutEpQueueSel & TX_SELE_HQ) {
+ numHQ = bWiFiConfig ?
+ WMM_NORMAL_PAGE_NUM_HPQ : NORMAL_PAGE_NUM_HPQ;
+ }
+
+ if (pHalData->OutEpQueueSel & TX_SELE_LQ) {
+ numLQ = bWiFiConfig ?
+ WMM_NORMAL_PAGE_NUM_LPQ : NORMAL_PAGE_NUM_LPQ;
+ }
+ /* NOTE: This step shall be proceed before
+ writting REG_RQPN. */
+ if (pHalData->OutEpQueueSel & TX_SELE_NQ) {
+ numNQ = bWiFiConfig ?
+ WMM_NORMAL_PAGE_NUM_NPQ : NORMAL_PAGE_NUM_NPQ;
+ }
+ value8 = (u8)_NPQ(numNQ);
+ rtw_write8(Adapter, REG_RQPN_NPQ, value8);
+ }
+
+ /* TX DMA */
+ value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
+ rtw_write32(Adapter, REG_RQPN, value32);
+}
+
+static void _InitTxBufferBoundary(struct rtw_adapter *Adapter)
+{
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+
+ u8 txpktbuf_bndy;
+
+ if (!pregistrypriv->wifi_spec)
+ txpktbuf_bndy = TX_PAGE_BOUNDARY;
+ else /* for WMM */
+ txpktbuf_bndy = WMM_NORMAL_TX_PAGE_BOUNDARY;
+
+ rtw_write8(Adapter, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtw_write8(Adapter, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+ rtw_write8(Adapter, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
+ rtw_write8(Adapter, REG_TRXFF_BNDY, txpktbuf_bndy);
+ rtw_write8(Adapter, REG_TDECTRL+1, txpktbuf_bndy);
+}
+
+static void _InitPageBoundary(struct rtw_adapter *Adapter)
+{
+ /* RX Page Boundary */
+ /* srand(static_cast<unsigned int>(time(NULL))); */
+ u16 rxff_bndy = 0x27FF;/* rand() % 1) ? 0x27FF : 0x23FF; */
+
+ rtw_write16(Adapter, (REG_TRXFF_BNDY + 2), rxff_bndy);
+
+ /* TODO: ?? shall we set tx boundary? */
+}
+
+static void
+_InitNormalChipRegPriority(struct rtw_adapter *Adapter, u16 beQ, u16 bkQ,
+ u16 viQ, u16 voQ, u16 mgtQ, u16 hiQ)
+{
+ u16 value16 = rtw_read16(Adapter, REG_TRXDMA_CTRL) & 0x7;
+
+ value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
+ _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
+ _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ);
+
+ rtw_write16(Adapter, REG_TRXDMA_CTRL, value16);
+}
+
+static void _InitNormalChipOneOutEpPriority(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u16 value = 0;
+
+ switch (pHalData->OutEpQueueSel) {
+ case TX_SELE_HQ:
+ value = QUEUE_HIGH;
+ break;
+ case TX_SELE_LQ:
+ value = QUEUE_LOW;
+ break;
+ case TX_SELE_NQ:
+ value = QUEUE_NORMAL;
+ break;
+ default:
+ /* RT_ASSERT(false, ("Shall not reach here!\n")); */
+ break;
+ }
+
+ _InitNormalChipRegPriority(Adapter, value, value, value,
+ value, value, value);
+}
+
+static void _InitNormalChipTwoOutEpPriority(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+ u16 valueHi = 0;
+ u16 valueLow = 0;
+
+ switch (pHalData->OutEpQueueSel) {
+ case (TX_SELE_HQ | TX_SELE_LQ):
+ valueHi = QUEUE_HIGH;
+ valueLow = QUEUE_LOW;
+ break;
+ case (TX_SELE_NQ | TX_SELE_LQ):
+ valueHi = QUEUE_NORMAL;
+ valueLow = QUEUE_LOW;
+ break;
+ case (TX_SELE_HQ | TX_SELE_NQ):
+ valueHi = QUEUE_HIGH;
+ valueLow = QUEUE_NORMAL;
+ break;
+ default:
+ /* RT_ASSERT(false, ("Shall not reach here!\n")); */
+ break;
+ }
+
+ if (!pregistrypriv->wifi_spec) {
+ beQ = valueLow;
+ bkQ = valueLow;
+ viQ = valueHi;
+ voQ = valueHi;
+ mgtQ = valueHi;
+ hiQ = valueHi;
+ } else {/* for WMM , CONFIG_OUT_EP_WIFI_MODE */
+ beQ = valueLow;
+ bkQ = valueHi;
+ viQ = valueHi;
+ voQ = valueLow;
+ mgtQ = valueHi;
+ hiQ = valueHi;
+ }
+
+ _InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+}
+
+static void _InitNormalChipThreeOutEpPriority(struct rtw_adapter *Adapter)
+{
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+
+ if (!pregistrypriv->wifi_spec) {/* typical setting */
+ beQ = QUEUE_LOW;
+ bkQ = QUEUE_LOW;
+ viQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
+ mgtQ = QUEUE_HIGH;
+ hiQ = QUEUE_HIGH;
+ } else {/* for WMM */
+ beQ = QUEUE_LOW;
+ bkQ = QUEUE_NORMAL;
+ viQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
+ mgtQ = QUEUE_HIGH;
+ hiQ = QUEUE_HIGH;
+ }
+ _InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+}
+
+static void _InitNormalChipQueuePriority(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+
+ switch (pHalData->OutEpNumber) {
+ case 1:
+ _InitNormalChipOneOutEpPriority(Adapter);
+ break;
+ case 2:
+ _InitNormalChipTwoOutEpPriority(Adapter);
+ break;
+ case 3:
+ _InitNormalChipThreeOutEpPriority(Adapter);
+ break;
+ default:
+ /* RT_ASSERT(false, ("Shall not reach here!\n")); */
+ break;
+ }
+}
+
+static void _InitQueuePriority(struct rtw_adapter *Adapter)
+{
+ _InitNormalChipQueuePriority(Adapter);
+}
+
+static void _InitNetworkType(struct rtw_adapter *Adapter)
+{
+ u32 value32;
+
+ value32 = rtw_read32(Adapter, REG_CR);
+
+ /* TODO: use the other function to set network type */
+ value32 = (value32 & ~MASK_NETTYPE) | _NETTYPE(NT_LINK_AP);
+ rtw_write32(Adapter, REG_CR, value32);
+}
+
+static void _InitTransferPageSize(struct rtw_adapter *Adapter)
+{
+ /* Tx page size is always 128. */
+
+ u8 value8;
+ value8 = _PSRX(PBP_128) | _PSTX(PBP_128);
+ rtw_write8(Adapter, REG_PBP, value8);
+}
+
+static void _InitDriverInfoSize(struct rtw_adapter *Adapter, u8 drvInfoSize)
+{
+ rtw_write8(Adapter, REG_RX_DRVINFO_SZ, drvInfoSize);
+}
+
+static void _InitWMACSetting(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+
+ /* don't turn on AAP, it will allow all packets to driver */
+ pHalData->ReceiveConfig = RCR_APM | RCR_AM | RCR_AB | RCR_CBSSID_DATA |
+ RCR_CBSSID_BCN | RCR_APP_ICV | RCR_AMF |
+ RCR_HTC_LOC_CTRL | RCR_APP_MIC |
+ RCR_APP_PHYSTS;
+
+ /* some REG_RCR will be modified later by
+ phy_ConfigMACWithHeaderFile() */
+ rtw_write32(Adapter, REG_RCR, pHalData->ReceiveConfig);
+
+ /* Accept all multicast address */
+ rtw_write32(Adapter, REG_MAR, 0xFFFFFFFF);
+ rtw_write32(Adapter, REG_MAR + 4, 0xFFFFFFFF);
+
+ /* Accept all data frames */
+ /* value16 = 0xFFFF; */
+ /* rtw_write16(Adapter, REG_RXFLTMAP2, value16); */
+
+ /* 2010.09.08 hpfan */
+ /* Since ADF is removed from RCR, ps-poll will not be indicate
+ to driver, */
+ /* RxFilterMap should mask ps-poll to gurantee AP mode can
+ rx ps-poll. */
+ /* value16 = 0x400; */
+ /* rtw_write16(Adapter, REG_RXFLTMAP1, value16); */
+
+ /* Accept all management frames */
+ /* value16 = 0xFFFF; */
+ /* rtw_write16(Adapter, REG_RXFLTMAP0, value16); */
+
+ /* enable RX_SHIFT bits */
+ /* rtw_write8(Adapter, REG_TRXDMA_CTRL, rtw_read8(Adapter,
+ REG_TRXDMA_CTRL)|BIT(1)); */
+}
+
+static void _InitAdaptiveCtrl(struct rtw_adapter *Adapter)
+{
+ u16 value16;
+ u32 value32;
+
+ /* Response Rate Set */
+ value32 = rtw_read32(Adapter, REG_RRSR);
+ value32 &= ~RATE_BITMAP_ALL;
+ value32 |= RATE_RRSR_CCK_ONLY_1M;
+ rtw_write32(Adapter, REG_RRSR, value32);
+
+ /* CF-END Threshold */
+ /* m_spIoBase->rtw_write8(REG_CFEND_TH, 0x1); */
+
+ /* SIFS (used in NAV) */
+ value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
+ rtw_write16(Adapter, REG_SPEC_SIFS, value16);
+
+ /* Retry Limit */
+ value16 = _LRL(0x30) | _SRL(0x30);
+ rtw_write16(Adapter, REG_RL, value16);
+}
+
+static void _InitRateFallback(struct rtw_adapter *Adapter)
+{
+ /* Set Data Auto Rate Fallback Retry Count register. */
+ rtw_write32(Adapter, REG_DARFRC, 0x00000000);
+ rtw_write32(Adapter, REG_DARFRC+4, 0x10080404);
+ rtw_write32(Adapter, REG_RARFRC, 0x04030201);
+ rtw_write32(Adapter, REG_RARFRC+4, 0x08070605);
+}
+
+static void _InitEDCA(struct rtw_adapter *Adapter)
+{
+ /* Set Spec SIFS (used in NAV) */
+ rtw_write16(Adapter, REG_SPEC_SIFS, 0x100a);
+ rtw_write16(Adapter, REG_MAC_SPEC_SIFS, 0x100a);
+
+ /* Set SIFS for CCK */
+ rtw_write16(Adapter, REG_SIFS_CTX, 0x100a);
+
+ /* Set SIFS for OFDM */
+ rtw_write16(Adapter, REG_SIFS_TRX, 0x100a);
+
+ /* TXOP */
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, 0x005EA42B);
+ rtw_write32(Adapter, REG_EDCA_BK_PARAM, 0x0000A44F);
+ rtw_write32(Adapter, REG_EDCA_VI_PARAM, 0x005EA324);
+ rtw_write32(Adapter, REG_EDCA_VO_PARAM, 0x002FA226);
+}
+
+static void _InitHWLed(struct rtw_adapter *Adapter)
+{
+ struct led_priv *pledpriv = &Adapter->ledpriv;
+
+ if (pledpriv->LedStrategy != HW_LED)
+ return;
+
+/* HW led control */
+/* to do .... */
+/* must consider cases of antenna diversity/ commbo card/solo card/mini card */
+}
+
+static void _InitRDGSetting(struct rtw_adapter *Adapter)
+{
+ rtw_write8(Adapter, REG_RD_CTRL, 0xFF);
+ rtw_write16(Adapter, REG_RD_NAV_NXT, 0x200);
+ rtw_write8(Adapter, REG_RD_RESP_PKT_TH, 0x05);
+}
+
+static void _InitRetryFunction(struct rtw_adapter *Adapter)
+{
+ u8 value8;
+
+ value8 = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL);
+ value8 |= EN_AMPDU_RTY_NEW;
+ rtw_write8(Adapter, REG_FWHW_TXQ_CTRL, value8);
+
+ /* Set ACK timeout */
+ rtw_write8(Adapter, REG_ACKTO, 0x40);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: usb_AggSettingTxUpdate()
+ *
+ * Overview: Seperate TX/RX parameters update independent for TP
+ * detection and dynamic TX/RX aggreagtion parameters update.
+ *
+ * Input: struct rtw_adapter *
+ *
+ * Output/Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 12/10/2010 MHC Seperate to smaller function.
+ *
+ *---------------------------------------------------------------------------*/
+static void usb_AggSettingTxUpdate(struct rtw_adapter *Adapter)
+{
+} /* usb_AggSettingTxUpdate */
+
+/*-----------------------------------------------------------------------------
+ * Function: usb_AggSettingRxUpdate()
+ *
+ * Overview: Seperate TX/RX parameters update independent for TP
+ * detection and dynamic TX/RX aggreagtion parameters update.
+ *
+ * Input: struct rtw_adapter *
+ *
+ * Output/Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 12/10/2010 MHC Seperate to smaller function.
+ *
+ *---------------------------------------------------------------------------*/
+static void usb_AggSettingRxUpdate(struct rtw_adapter *Adapter)
+{
+} /* usb_AggSettingRxUpdate */
+
+static void InitUsbAggregationSetting(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+
+ /* Tx aggregation setting */
+ usb_AggSettingTxUpdate(Adapter);
+
+ /* Rx aggregation setting */
+ usb_AggSettingRxUpdate(Adapter);
+
+ /* 201/12/10 MH Add for USB agg mode dynamic switch. */
+ pHalData->UsbRxHighSpeedMode = false;
+}
+
+static void _InitOperationMode(struct rtw_adapter *Adapter)
+{
+}
+
+static void _InitRFType(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ bool is92CU = IS_92C_SERIAL(pHalData->VersionID);
+
+ pHalData->rf_chip = RF_6052;
+
+ if (is92CU == false) {
+ pHalData->rf_type = RF_1T1R;
+ DBG_8723A("Set RF Chip ID to RF_6052 and RF type to 1T1R.\n");
+ return;
+ }
+
+ /* TODO: Consider that EEPROM set 92CU to 1T1R later. */
+ /* Force to overwrite setting according to chip version. Ignore
+ EEPROM setting. */
+ /* pHalData->RF_Type = is92CU ? RF_2T2R : RF_1T1R; */
+ MSG_8723A("Set RF Chip ID to RF_6052 and RF type to %d.\n",
+ pHalData->rf_type);
+}
+
+/* Set CCK and OFDM Block "ON" */
+static void _BBTurnOnBlock(struct rtw_adapter *Adapter)
+{
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bCCKEn, 0x1);
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
+}
+
+#define MgntActSet_RF_State(...)
+static void _RfPowerSave(struct rtw_adapter *padapter)
+{
+}
+
+enum {
+ Antenna_Lfet = 1,
+ Antenna_Right = 2,
+};
+
+enum rt_rf_power_state RfOnOffDetect23a(struct rtw_adapter *pAdapter)
+{
+ /* struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter); */
+ u8 val8;
+ enum rt_rf_power_state rfpowerstate = rf_off;
+
+ if (pAdapter->pwrctrlpriv.bHWPowerdown) {
+ val8 = rtw_read8(pAdapter, REG_HSISR);
+ DBG_8723A("pwrdown, 0x5c(BIT7) =%02x\n", val8);
+ rfpowerstate = (val8 & BIT7) ? rf_off : rf_on;
+ } else { /* rf on/off */
+ rtw_write8(pAdapter, REG_MAC_PINMUX_CFG,
+ rtw_read8(pAdapter, REG_MAC_PINMUX_CFG) & ~BIT3);
+ val8 = rtw_read8(pAdapter, REG_GPIO_IO_SEL);
+ DBG_8723A("GPIO_IN =%02x\n", val8);
+ rfpowerstate = (val8 & BIT3) ? rf_on : rf_off;
+ }
+ return rfpowerstate;
+} /* HalDetectPwrDownMode */
+
+void _ps_open_RF23a(struct rtw_adapter *padapter);
+
+static u32 rtl8723au_hal_init(struct rtw_adapter *Adapter)
+{
+ u8 val8 = 0;
+ u32 boundary, status = _SUCCESS;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u32 NavUpper = WiFiNavUpperUs;
+
+ unsigned long init_start_time = jiffies;
+
+#define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN);
+ if (Adapter->pwrctrlpriv.bkeepfwalive) {
+ _ps_open_RF23a(Adapter);
+
+ if (pHalData->bIQKInitialized) {
+ rtl8723a_phy_iq_calibrate(Adapter, true);
+ } else {
+ rtl8723a_phy_iq_calibrate(Adapter, false);
+ pHalData->bIQKInitialized = true;
+ }
+ rtl8723a_odm_check_tx_power_tracking(Adapter);
+ rtl8723a_phy_lc_calibrate(Adapter);
+
+ goto exit;
+ }
+
+ /* Check if MAC has already power on. by tynli. 2011.05.27. */
+ val8 = rtw_read8(Adapter, REG_CR);
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("%s: REG_CR 0x100 = 0x%02x\n", __func__, val8));
+ /* Fix 92DU-VC S3 hang with the reason is that secondary mac is not
+ initialized. */
+ /* 0x100 value of first mac is 0xEA while 0x100 value of secondary
+ is 0x00 */
+ if (val8 == 0xEA) {
+ pHalData->bMACFuncEnable = false;
+ } else {
+ pHalData->bMACFuncEnable = true;
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("%s: MAC has already power on\n", __func__));
+ }
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PW_ON);
+ status = _InitPowerOn(Adapter);
+ if (status == _FAIL) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
+ ("Failed to init power on!\n"));
+ goto exit;
+ }
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_LLTT);
+ if (!pregistrypriv->wifi_spec) {
+ boundary = TX_PAGE_BOUNDARY;
+ } else {
+ /* for WMM */
+ boundary = WMM_NORMAL_TX_PAGE_BOUNDARY;
+ }
+
+ if (!pHalData->bMACFuncEnable) {
+ status = InitLLTTable23a(Adapter, boundary);
+ if (status == _FAIL) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
+ ("Failed to init LLT table\n"));
+ goto exit;
+ }
+ }
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC01);
+ if (pHalData->bRDGEnable)
+ _InitRDGSetting(Adapter);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_DOWNLOAD_FW);
+ status = rtl8723a_FirmwareDownload(Adapter);
+ if (status != _SUCCESS) {
+ Adapter->bFWReady = false;
+ pHalData->fw_ractrl = false;
+ DBG_8723A("fw download fail!\n");
+ goto exit;
+ } else {
+ Adapter->bFWReady = true;
+ pHalData->fw_ractrl = true;
+ DBG_8723A("fw download ok!\n");
+ }
+
+ rtl8723a_InitializeFirmwareVars(Adapter);
+
+ if (pwrctrlpriv->reg_rfoff == true) {
+ pwrctrlpriv->rf_pwrstate = rf_off;
+ }
+
+ /* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
+ /* HW GPIO pin. Before PHY_RFConfig8192C. */
+ /* HalDetectPwrDownMode(Adapter); */
+ /* 2010/08/26 MH If Efuse does not support sective suspend then disable the function. */
+ /* HalDetectSelectiveSuspendMode(Adapter); */
+
+ /* Set RF type for BB/RF configuration */
+ _InitRFType(Adapter);/* _ReadRFType() */
+
+ /* Save target channel */
+ /* <Roger_Notes> Current Channel will be updated again later. */
+ pHalData->CurrentChannel = 6;/* default set to 6 */
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MAC);
+ status = PHY_MACConfig8723A(Adapter);
+ if (status == _FAIL) {
+ DBG_8723A("PHY_MACConfig8723A fault !!\n");
+ goto exit;
+ }
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BB);
+ /* */
+ /* d. Initialize BB related configurations. */
+ /* */
+ status = PHY_BBConfig8723A(Adapter);
+ if (status == _FAIL) {
+ DBG_8723A("PHY_BBConfig8723A fault !!\n");
+ goto exit;
+ }
+
+ /* Add for tx power by rate fine tune. We need to call the function after BB config. */
+ /* Because the tx power by rate table is inited in BB config. */
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_RF);
+ status = PHY_RFConfig8723A(Adapter);
+ if (status == _FAIL) {
+ DBG_8723A("PHY_RFConfig8723A fault !!\n");
+ goto exit;
+ }
+
+ /* reducing 80M spur */
+ PHY_SetBBReg(Adapter, RF_T_METER, bMaskDWord, 0x0381808d);
+ PHY_SetBBReg(Adapter, RF_SYN_G4, bMaskDWord, 0xf2ffff83);
+ PHY_SetBBReg(Adapter, RF_SYN_G4, bMaskDWord, 0xf2ffff82);
+ PHY_SetBBReg(Adapter, RF_SYN_G4, bMaskDWord, 0xf2ffff83);
+
+ /* RFSW Control */
+ PHY_SetBBReg(Adapter, rFPGA0_TxInfo, bMaskDWord, 0x00000003); /* 0x804[14]= 0 */
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFInterfaceSW, bMaskDWord, 0x07000760); /* 0x870[6:5]= b'11 */
+ PHY_SetBBReg(Adapter, rFPGA0_XA_RFInterfaceOE, bMaskDWord, 0x66F60210); /* 0x860[6:5]= b'00 */
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("%s: 0x870 = value 0x%x\n", __func__, PHY_QueryBBReg(Adapter, 0x870, bMaskDWord)));
+
+ /* */
+ /* Joseph Note: Keep RfRegChnlVal for later use. */
+ /* */
+ pHalData->RfRegChnlVal[0] = PHY_QueryRFReg(Adapter, (enum RF_RADIO_PATH)0, RF_CHNLBW, bRFRegOffsetMask);
+ pHalData->RfRegChnlVal[1] = PHY_QueryRFReg(Adapter, (enum RF_RADIO_PATH)1, RF_CHNLBW, bRFRegOffsetMask);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC02);
+ if (!pHalData->bMACFuncEnable) {
+ _InitQueueReservedPage(Adapter);
+ _InitTxBufferBoundary(Adapter);
+ }
+ _InitQueuePriority(Adapter);
+ _InitPageBoundary(Adapter);
+ _InitTransferPageSize(Adapter);
+
+ /* Get Rx PHY status in order to report RSSI and others. */
+ _InitDriverInfoSize(Adapter, DRVINFO_SZ);
+
+ _InitInterrupt(Adapter);
+ hal_init_macaddr23a(Adapter);/* set mac_address */
+ _InitNetworkType(Adapter);/* set msr */
+ _InitWMACSetting(Adapter);
+ _InitAdaptiveCtrl(Adapter);
+ _InitEDCA(Adapter);
+ _InitRateFallback(Adapter);
+ _InitRetryFunction(Adapter);
+ InitUsbAggregationSetting(Adapter);
+ _InitOperationMode(Adapter);/* todo */
+ rtl8723a_InitBeaconParameters(Adapter);
+
+ _InitHWLed(Adapter);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_TURN_ON_BLOCK);
+ _BBTurnOnBlock(Adapter);
+ /* NicIFSetMacAddress(padapter, padapter->PermanentAddress); */
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_SECURITY);
+ invalidate_cam_all23a(Adapter);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC11);
+ /* 2010/12/17 MH We need to set TX power according to EFUSE content at first. */
+ PHY_SetTxPowerLevel8723A(Adapter, pHalData->CurrentChannel);
+
+ rtl8723a_InitAntenna_Selection(Adapter);
+
+ /* HW SEQ CTRL */
+ /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */
+ rtw_write8(Adapter, REG_HWSEQ_CTRL, 0xFF);
+
+ /* */
+ /* Disable BAR, suggested by Scott */
+ /* 2010.04.09 add by hpfan */
+ /* */
+ rtw_write32(Adapter, REG_BAR_MODE_CTRL, 0x0201ffff);
+
+ if (pregistrypriv->wifi_spec)
+ rtw_write16(Adapter, REG_FAST_EDCA_CTRL, 0);
+
+ /* Move by Neo for USB SS from above setp */
+ _RfPowerSave(Adapter);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_IQK);
+ /* 2010/08/26 MH Merge from 8192CE. */
+ /* sherry masked that it has been done in _RfPowerSave */
+ /* 20110927 */
+ /* recovery for 8192cu and 9723Au 20111017 */
+ if (pwrctrlpriv->rf_pwrstate == rf_on) {
+ if (pHalData->bIQKInitialized) {
+ rtl8723a_phy_iq_calibrate(Adapter, true);
+ } else {
+ rtl8723a_phy_iq_calibrate(Adapter, false);
+ pHalData->bIQKInitialized = true;
+ }
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_PW_TRACK);
+ rtl8723a_odm_check_tx_power_tracking(Adapter);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_LCK);
+ rtl8723a_phy_lc_calibrate(Adapter);
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ rtl8723a_SingleDualAntennaDetection(Adapter);
+#endif
+ }
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC21);
+ /* fixed USB interface interference issue */
+ rtw_write8(Adapter, 0xfe40, 0xe0);
+ rtw_write8(Adapter, 0xfe41, 0x8d);
+ rtw_write8(Adapter, 0xfe42, 0x80);
+ rtw_write32(Adapter, 0x20c, 0xfd0320);
+ /* Solve too many protocol error on USB bus */
+ if (!IS_81xxC_VENDOR_UMC_A_CUT(pHalData->VersionID)) {
+ /* 0xE6 = 0x94 */
+ rtw_write8(Adapter, 0xFE40, 0xE6);
+ rtw_write8(Adapter, 0xFE41, 0x94);
+ rtw_write8(Adapter, 0xFE42, 0x80);
+
+ /* 0xE0 = 0x19 */
+ rtw_write8(Adapter, 0xFE40, 0xE0);
+ rtw_write8(Adapter, 0xFE41, 0x19);
+ rtw_write8(Adapter, 0xFE42, 0x80);
+
+ /* 0xE5 = 0x91 */
+ rtw_write8(Adapter, 0xFE40, 0xE5);
+ rtw_write8(Adapter, 0xFE41, 0x91);
+ rtw_write8(Adapter, 0xFE42, 0x80);
+
+ /* 0xE2 = 0x81 */
+ rtw_write8(Adapter, 0xFE40, 0xE2);
+ rtw_write8(Adapter, 0xFE41, 0x81);
+ rtw_write8(Adapter, 0xFE42, 0x80);
+
+ }
+
+/* HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PABIAS); */
+/* _InitPABias(Adapter); */
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BT_COEXIST);
+ /* Init BT hw config. */
+ BT_InitHwConfig(Adapter);
+#endif
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_HAL_DM);
+ rtl8723a_InitHalDm(Adapter);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC31);
+ rtw_hal_set_hwreg23a(Adapter, HW_VAR_NAV_UPPER, (u8 *)&NavUpper);
+
+ /* 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test, but we need to fin root cause. */
+ if (((rtw_read32(Adapter, rFPGA0_RFMOD) & 0xFF000000) != 0x83000000)) {
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(24), 1);
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("%s: IQK fail recorver\n", __func__));
+ }
+
+ /* ack for xmit mgmt frames. */
+ rtw_write32(Adapter, REG_FWHW_TXQ_CTRL, rtw_read32(Adapter, REG_FWHW_TXQ_CTRL)|BIT(12));
+
+exit:
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
+
+ DBG_8723A("%s in %dms\n", __func__,
+ jiffies_to_msecs(jiffies - init_start_time));
+ return status;
+}
+
+static void phy_SsPwrSwitch92CU(struct rtw_adapter *Adapter,
+ enum rt_rf_power_state eRFPowerState,
+ int bRegSSPwrLvl)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u8 value8;
+ u8 bytetmp;
+
+ switch (eRFPowerState) {
+ case rf_on:
+ if (bRegSSPwrLvl == 1) {
+ /* 1. Enable MAC Clock. Can not be enabled now. */
+ /* WriteXBYTE(REG_SYS_CLKR+1,
+ ReadXBYTE(REG_SYS_CLKR+1) | BIT(3)); */
+
+ /* 2. Force PWM, Enable SPS18_LDO_Marco_Block */
+ rtw_write8(Adapter, REG_SPS0_CTRL,
+ rtw_read8(Adapter, REG_SPS0_CTRL) |
+ (BIT0|BIT3));
+
+ /* 3. restore BB, AFE control register. */
+ /* RF */
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x380038, 1);
+ else
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x38, 1);
+ PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 1);
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT1, 0);
+
+ /* AFE */
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x63DB25A0);
+ else if (pHalData->rf_type == RF_1T1R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x631B25A0);
+
+ /* 4. issue 3-wire command that RF set to Rx idle
+ mode. This is used to re-write the RX idle mode. */
+ /* We can only prvide a usual value instead and then
+ HW will modify the value by itself. */
+ PHY_SetRFReg(Adapter, RF_PATH_A, 0,
+ bRFRegOffsetMask, 0x32D95);
+ if (pHalData->rf_type == RF_2T2R) {
+ PHY_SetRFReg(Adapter, RF_PATH_B, 0,
+ bRFRegOffsetMask, 0x32D95);
+ }
+ } else { /* Level 2 or others. */
+ /* h. AFE_PLL_CTRL 0x28[7:0] = 0x80
+ disable AFE PLL */
+ rtw_write8(Adapter, REG_AFE_PLL_CTRL, 0x81);
+
+ /* i. AFE_XTAL_CTRL 0x24[15:0] = 0x880F
+ gated AFE DIG_CLOCK */
+ rtw_write16(Adapter, REG_AFE_XTAL_CTRL, 0x800F);
+ mdelay(1);
+
+ /* 2. Force PWM, Enable SPS18_LDO_Marco_Block */
+ rtw_write8(Adapter, REG_SPS0_CTRL,
+ rtw_read8(Adapter, REG_SPS0_CTRL) |
+ (BIT0|BIT3));
+
+ /* 3. restore BB, AFE control register. */
+ /* RF */
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x380038, 1);
+ else
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x38, 1);
+ PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 1);
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT1, 0);
+
+ /* AFE */
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA,
+ bMaskDWord, 0x63DB25A0);
+ else if (pHalData->rf_type == RF_1T1R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA,
+ bMaskDWord, 0x631B25A0);
+
+ /* 4. issue 3-wire command that RF set to Rx idle
+ mode. This is used to re-write the RX idle mode. */
+ /* We can only prvide a usual value instead and
+ then HW will modify the value by itself. */
+ PHY_SetRFReg(Adapter, RF_PATH_A, 0,
+ bRFRegOffsetMask, 0x32D95);
+ if (pHalData->rf_type == RF_2T2R) {
+ PHY_SetRFReg(Adapter, RF_PATH_B, 0,
+ bRFRegOffsetMask, 0x32D95);
+ }
+
+ /* 5. gated MAC Clock */
+ bytetmp = rtw_read8(Adapter, REG_APSD_CTRL);
+ rtw_write8(Adapter, REG_APSD_CTRL, bytetmp & ~BIT6);
+
+ mdelay(10);
+
+ /* Set BB reset at first */
+ rtw_write8(Adapter, REG_SYS_FUNC_EN, 0x17); /* 0x16 */
+
+ /* Enable TX */
+ rtw_write8(Adapter, REG_TXPAUSE, 0x0);
+ }
+ break;
+ case rf_sleep:
+ case rf_off:
+ value8 = rtw_read8(Adapter, REG_SPS0_CTRL) ;
+ if (IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID))
+ value8 &= ~(BIT0);
+ else
+ value8 &= ~(BIT0|BIT3);
+ if (bRegSSPwrLvl == 1) {
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("SS LVL1\n"));
+ /* Disable RF and BB only for SelectSuspend. */
+
+ /* 1. Set BB/RF to shutdown. */
+ /* (1) Reg878[5:3]= 0 RF rx_code for
+ preamble power saving */
+ /* (2)Reg878[21:19]= 0 Turn off RF-B */
+ /* (3) RegC04[7:4]= 0 Turn off all paths
+ for packet detection */
+ /* (4) Reg800[1] = 1 enable preamble power
+ saving */
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF0] =
+ PHY_QueryBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ bMaskDWord);
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF1] =
+ PHY_QueryBBReg(Adapter, rOFDM0_TRxPathEnable,
+ bMaskDWord);
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF2] =
+ PHY_QueryBBReg(Adapter, rFPGA0_RFMOD,
+ bMaskDWord);
+ if (pHalData->rf_type == RF_2T2R) {
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x380038, 0);
+ } else if (pHalData->rf_type == RF_1T1R) {
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x38, 0);
+ }
+ PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 0);
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT1, 1);
+
+ /* 2 .AFE control register to power down. bit[30:22] */
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_AFE0] =
+ PHY_QueryBBReg(Adapter, rRx_Wait_CCA,
+ bMaskDWord);
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x00DB25A0);
+ else if (pHalData->rf_type == RF_1T1R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x001B25A0);
+
+ /* 3. issue 3-wire command that RF set to power down.*/
+ PHY_SetRFReg(Adapter, RF_PATH_A, 0, bRFRegOffsetMask, 0);
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetRFReg(Adapter, RF_PATH_B, 0,
+ bRFRegOffsetMask, 0);
+
+ /* 4. Force PFM , disable SPS18_LDO_Marco_Block */
+ rtw_write8(Adapter, REG_SPS0_CTRL, value8);
+ } else { /* Level 2 or others. */
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("SS LVL2\n"));
+ {
+ u8 eRFPath = RF_PATH_A, value8 = 0;
+ rtw_write8(Adapter, REG_TXPAUSE, 0xFF);
+ PHY_SetRFReg(Adapter,
+ (enum RF_RADIO_PATH)eRFPath,
+ 0x0, bMaskByte0, 0x0);
+ value8 |= APSDOFF;
+ /* 0x40 */
+ rtw_write8(Adapter, REG_APSD_CTRL, value8);
+
+ /* After switch APSD, we need to delay
+ for stability */
+ mdelay(10);
+
+ /* Set BB reset at first */
+ value8 = 0 ;
+ value8 |= (FEN_USBD | FEN_USBA |
+ FEN_BB_GLB_RSTn);
+ /* 0x16 */
+ rtw_write8(Adapter, REG_SYS_FUNC_EN, value8);
+ }
+
+ /* Disable RF and BB only for SelectSuspend. */
+
+ /* 1. Set BB/RF to shutdown. */
+ /* (1) Reg878[5:3]= 0 RF rx_code for
+ preamble power saving */
+ /* (2)Reg878[21:19]= 0 Turn off RF-B */
+ /* (3) RegC04[7:4]= 0 Turn off all paths for
+ packet detection */
+ /* (4) Reg800[1] = 1 enable preamble power
+ saving */
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF0] =
+ PHY_QueryBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ bMaskDWord);
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF1] =
+ PHY_QueryBBReg(Adapter, rOFDM0_TRxPathEnable,
+ bMaskDWord);
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF2] =
+ PHY_QueryBBReg(Adapter, rFPGA0_RFMOD,
+ bMaskDWord);
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x380038, 0);
+ else if (pHalData->rf_type == RF_1T1R)
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x38, 0);
+ PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 0);
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT1, 1);
+
+ /* 2 .AFE control register to power down. bit[30:22] */
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_AFE0] =
+ PHY_QueryBBReg(Adapter, rRx_Wait_CCA,
+ bMaskDWord);
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x00DB25A0);
+ else if (pHalData->rf_type == RF_1T1R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x001B25A0);
+
+ /* 3. issue 3-wire command that RF set to power down. */
+ PHY_SetRFReg(Adapter, RF_PATH_A, 0, bRFRegOffsetMask, 0);
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetRFReg(Adapter, RF_PATH_B, 0,
+ bRFRegOffsetMask, 0);
+
+ /* 4. Force PFM , disable SPS18_LDO_Marco_Block */
+ rtw_write8(Adapter, REG_SPS0_CTRL, value8);
+
+ /* 2010/10/13 MH/Isaachsu exchange sequence. */
+ /* h. AFE_PLL_CTRL 0x28[7:0] = 0x80
+ disable AFE PLL */
+ rtw_write8(Adapter, REG_AFE_PLL_CTRL, 0x80);
+ mdelay(1);
+
+ /* i. AFE_XTAL_CTRL 0x24[15:0] = 0x880F
+ gated AFE DIG_CLOCK */
+ rtw_write16(Adapter, REG_AFE_XTAL_CTRL, 0xA80F);
+ }
+ break;
+ default:
+ break;
+ }
+
+} /* phy_PowerSwitch92CU */
+
+void _ps_open_RF23a(struct rtw_adapter *padapter)
+{
+ /* here call with bRegSSPwrLvl 1, bRegSSPwrLvl 2 needs to be verified */
+ phy_SsPwrSwitch92CU(padapter, rf_on, 1);
+}
+
+static void CardDisableRTL8723U(struct rtw_adapter *Adapter)
+{
+ u8 u1bTmp;
+
+ DBG_8723A("CardDisableRTL8723U\n");
+ /* USB-MF Card Disable Flow */
+ /* 1. Run LPS WL RFOFF flow */
+ HalPwrSeqCmdParsing23a(Adapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_USB_MSK, rtl8723AU_enter_lps_flow);
+
+ /* 2. 0x1F[7:0] = 0 turn off RF */
+ rtw_write8(Adapter, REG_RF_CTRL, 0x00);
+
+ /* ==== Reset digital sequence ====== */
+ if ((rtw_read8(Adapter, REG_MCUFWDL)&BIT7) &&
+ Adapter->bFWReady) /* 8051 RAM code */
+ rtl8723a_FirmwareSelfReset(Adapter);
+
+ /* Reset MCU. Suggested by Filen. 2011.01.26. by tynli. */
+ u1bTmp = rtw_read8(Adapter, REG_SYS_FUNC_EN+1);
+ rtw_write8(Adapter, REG_SYS_FUNC_EN+1, (u1bTmp & (~BIT2)));
+
+ /* g. MCUFWDL 0x80[1:0]= 0 reset MCU ready status */
+ rtw_write8(Adapter, REG_MCUFWDL, 0x00);
+
+ /* ==== Reset digital sequence end ====== */
+ /* Card disable power action flow */
+ HalPwrSeqCmdParsing23a(Adapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_USB_MSK,
+ rtl8723AU_card_disable_flow);
+
+ /* Reset MCU IO Wrapper, added by Roger, 2011.08.30. */
+ u1bTmp = rtw_read8(Adapter, REG_RSV_CTRL + 1);
+ rtw_write8(Adapter, REG_RSV_CTRL+1, (u1bTmp & (~BIT0)));
+ u1bTmp = rtw_read8(Adapter, REG_RSV_CTRL + 1);
+ rtw_write8(Adapter, REG_RSV_CTRL+1, u1bTmp | BIT0);
+
+ /* 7. RSV_CTRL 0x1C[7:0] = 0x0E lock ISO/CLK/Power control register */
+ rtw_write8(Adapter, REG_RSV_CTRL, 0x0e);
+}
+
+static u32 rtl8723au_hal_deinit(struct rtw_adapter *padapter)
+{
+ DBG_8723A("==> %s\n", __func__);
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ BT_HaltProcess(padapter);
+#endif
+ /* 2011/02/18 To Fix RU LNA power leakage problem. We need to
+ execute below below in Adapter init and halt sequence.
+ According to EEchou's opinion, we can enable the ability for all */
+ /* IC. Accord to johnny's opinion, only RU need the support. */
+ CardDisableRTL8723U(padapter);
+
+ return _SUCCESS;
+}
+
+static unsigned int rtl8723au_inirp_init(struct rtw_adapter *Adapter)
+{
+ u8 i;
+ struct recv_buf *precvbuf;
+ uint status;
+ struct intf_hdl *pintfhdl = &Adapter->iopriv.intf;
+ struct recv_priv *precvpriv = &Adapter->recvpriv;
+ u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ struct recv_buf *rbuf);
+ u32 (*_read_interrupt)(struct intf_hdl *pintfhdl, u32 addr);
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+
+ _read_port = pintfhdl->io_ops._read_port;
+
+ status = _SUCCESS;
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("===> usb_inirp_init\n"));
+
+ precvpriv->ff_hwaddr = RECV_BULK_IN_ADDR;
+
+ /* issue Rx irp to receive data */
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ if (_read_port(pintfhdl, precvpriv->ff_hwaddr, 0, precvbuf) ==
+ false) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
+ ("usb_rx_init: usb_read_port error\n"));
+ status = _FAIL;
+ goto exit;
+ }
+ precvbuf++;
+ precvpriv->free_recv_buf_queue_cnt--;
+ }
+ _read_interrupt = pintfhdl->io_ops._read_interrupt;
+ if (_read_interrupt(pintfhdl, RECV_INT_IN_ADDR) == false) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
+ ("usb_rx_init: usb_read_interrupt error\n"));
+ status = _FAIL;
+ }
+ pHalData->IntrMask[0] = rtw_read32(Adapter, REG_USB_HIMR);
+ MSG_8723A("pHalData->IntrMask = 0x%04x\n", pHalData->IntrMask[0]);
+ pHalData->IntrMask[0] |= UHIMR_C2HCMD|UHIMR_CPWM;
+ rtw_write32(Adapter, REG_USB_HIMR, pHalData->IntrMask[0]);
+exit:
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("<=== usb_inirp_init\n"));
+ return status;
+}
+
+static unsigned int rtl8723au_inirp_deinit(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("\n ===> usb_rx_deinit\n"));
+ rtw_read_port_cancel(Adapter);
+ pHalData->IntrMask[0] = rtw_read32(Adapter, REG_USB_HIMR);
+ MSG_8723A("%s pHalData->IntrMask = 0x%04x\n", __func__,
+ pHalData->IntrMask[0]);
+ pHalData->IntrMask[0] = 0x0;
+ rtw_write32(Adapter, REG_USB_HIMR, pHalData->IntrMask[0]);
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("\n <=== usb_rx_deinit\n"));
+ return _SUCCESS;
+}
+
+static void _ReadBoardType(struct rtw_adapter *Adapter, u8 *PROMContent,
+ bool AutoloadFail)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u8 boardType = BOARD_USB_DONGLE;
+
+ if (AutoloadFail) {
+ if (IS_8723_SERIES(pHalData->VersionID))
+ pHalData->rf_type = RF_1T1R;
+ else
+ pHalData->rf_type = RF_2T2R;
+ pHalData->BoardType = boardType;
+ return;
+ }
+
+ boardType = PROMContent[EEPROM_NORMAL_BoardType];
+ boardType &= BOARD_TYPE_NORMAL_MASK;/* bit[7:5] */
+ boardType >>= 5;
+
+ pHalData->BoardType = boardType;
+ MSG_8723A("_ReadBoardType(%x)\n", pHalData->BoardType);
+
+ if (boardType == BOARD_USB_High_PA)
+ pHalData->ExternalPA = 1;
+}
+
+static void _ReadLEDSetting(struct rtw_adapter *Adapter, u8 *PROMContent,
+ bool AutoloadFail)
+{
+ struct led_priv *pledpriv = &Adapter->ledpriv;
+
+ pledpriv->LedStrategy = HW_LED;
+}
+
+static void Hal_EfuseParsePIDVID_8723AU(struct rtw_adapter *pAdapter,
+ u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
+
+ if (AutoLoadFail) {
+ pHalData->EEPROMVID = 0;
+ pHalData->EEPROMPID = 0;
+ } else {
+ /* VID, PID */
+ pHalData->EEPROMVID =
+ le16_to_cpu(*(u16 *)&hwinfo[EEPROM_VID_8723AU]);
+ pHalData->EEPROMPID =
+ le16_to_cpu(*(u16 *)&hwinfo[EEPROM_PID_8723AU]);
+ }
+
+ MSG_8723A("EEPROM VID = 0x%4x\n", pHalData->EEPROMVID);
+ MSG_8723A("EEPROM PID = 0x%4x\n", pHalData->EEPROMPID);
+}
+
+static void Hal_EfuseParseMACAddr_8723AU(struct rtw_adapter *padapter,
+ u8 *hwinfo, bool AutoLoadFail)
+{
+ u16 i;
+ u8 sMacAddr[ETH_ALEN] = {0x00, 0xE0, 0x4C, 0x87, 0x23, 0x00};
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ if (AutoLoadFail) {
+ for (i = 0; i < 6; i++)
+ pEEPROM->mac_addr[i] = sMacAddr[i];
+ } else {
+ /* Read Permanent MAC address */
+ memcpy(pEEPROM->mac_addr, &hwinfo[EEPROM_MAC_ADDR_8723AU],
+ ETH_ALEN);
+ }
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_,
+ ("Hal_EfuseParseMACAddr_8723AU: Permanent Address =%02x:%02x:"
+ "%02x:%02x:%02x:%02x\n",
+ pEEPROM->mac_addr[0], pEEPROM->mac_addr[1],
+ pEEPROM->mac_addr[2], pEEPROM->mac_addr[3],
+ pEEPROM->mac_addr[4], pEEPROM->mac_addr[5]));
+}
+
+static void readAdapterInfo(struct rtw_adapter *padapter)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+ /* struct hal_data_8723a * pHalData = GET_HAL_DATA(padapter); */
+ u8 hwinfo[HWSET_MAX_SIZE];
+
+ Hal_InitPGData(padapter, hwinfo);
+ Hal_EfuseParseIDCode(padapter, hwinfo);
+ Hal_EfuseParsePIDVID_8723AU(padapter, hwinfo,
+ pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseEEPROMVer(padapter, hwinfo,
+ pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseMACAddr_8723AU(padapter, hwinfo,
+ pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParsetxpowerinfo_8723A(padapter, hwinfo,
+ pEEPROM->bautoload_fail_flag);
+ _ReadBoardType(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseBTCoexistInfo_8723A(padapter, hwinfo,
+ pEEPROM->bautoload_fail_flag);
+
+ rtl8723a_EfuseParseChnlPlan(padapter, hwinfo,
+ pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseThermalMeter_8723A(padapter, hwinfo,
+ pEEPROM->bautoload_fail_flag);
+ _ReadLEDSetting(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+/* _ReadRFSetting(Adapter, PROMContent, pEEPROM->bautoload_fail_flag); */
+/* _ReadPSSetting(Adapter, PROMContent, pEEPROM->bautoload_fail_flag); */
+ Hal_EfuseParseAntennaDiversity(padapter, hwinfo,
+ pEEPROM->bautoload_fail_flag);
+
+ Hal_EfuseParseEEPROMVer(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseCustomerID(padapter, hwinfo,
+ pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseRateIndicationOption(padapter, hwinfo,
+ pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseXtal_8723A(padapter, hwinfo,
+ pEEPROM->bautoload_fail_flag);
+ /* */
+ /* The following part initialize some vars by PG info. */
+ /* */
+ Hal_InitChannelPlan23a(padapter);
+
+ /* hal_CustomizedBehavior_8723U(Adapter); */
+
+/* Adapter->bDongle = (PROMContent[EEPROM_EASY_REPLACEMENT] == 1)? 0: 1; */
+ DBG_8723A("%s(): REPLACEMENT = %x\n", __func__, padapter->bDongle);
+}
+
+static void _ReadPROMContent(struct rtw_adapter *Adapter)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(Adapter);
+ u8 eeValue;
+
+ eeValue = rtw_read8(Adapter, REG_9346CR);
+ /* To check system boot selection. */
+ pEEPROM->EepromOrEfuse = (eeValue & BOOT_FROM_EEPROM) ? true : false;
+ pEEPROM->bautoload_fail_flag = (eeValue & EEPROM_EN) ? false : true;
+
+ DBG_8723A("Boot from %s, Autoload %s !\n",
+ (pEEPROM->EepromOrEfuse ? "EEPROM" : "EFUSE"),
+ (pEEPROM->bautoload_fail_flag ? "Fail" : "OK"));
+
+ readAdapterInfo(Adapter);
+}
+
+static void _ReadRFType(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+
+ pHalData->rf_chip = RF_6052;
+}
+
+static void _ReadSilmComboMode(struct rtw_adapter *Adapter)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+
+ pHalData->SlimComboDbg = false; /* Default is not debug mode. */
+}
+
+/* */
+/* Description: */
+/* We should set Efuse cell selection to WiFi cell in default. */
+/* */
+/* Assumption: */
+/* PASSIVE_LEVEL */
+/* */
+/* Added by Roger, 2010.11.23. */
+/* */
+static void hal_EfuseCellSel(struct rtw_adapter *Adapter)
+{
+ u32 value32;
+
+ value32 = rtw_read32(Adapter, EFUSE_TEST);
+ value32 = (value32 & ~EFUSE_SEL_MASK) | EFUSE_SEL(EFUSE_WIFI_SEL_0);
+ rtw_write32(Adapter, EFUSE_TEST, value32);
+}
+
+static int _ReadAdapterInfo8723AU(struct rtw_adapter *Adapter)
+{
+ /* struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter); */
+ unsigned long start = jiffies;
+
+ MSG_8723A("====> _ReadAdapterInfo8723AU\n");
+
+ hal_EfuseCellSel(Adapter);
+
+ _ReadRFType(Adapter);/* rf_chip -> _InitRFType() */
+ _ReadPROMContent(Adapter);
+
+ /* 2010/10/25 MH THe function must be called after
+ borad_type & IC-Version recognize. */
+ _ReadSilmComboMode(Adapter);
+
+ /* MSG_8723A("%s()(done), rf_chip = 0x%x, rf_type = 0x%x\n",
+ __func__, pHalData->rf_chip, pHalData->rf_type); */
+
+ MSG_8723A("<==== _ReadAdapterInfo8723AU in %d ms\n",
+ jiffies_to_msecs(jiffies - start));
+
+ return _SUCCESS;
+}
+
+static void ReadAdapterInfo8723AU(struct rtw_adapter *Adapter)
+{
+ /* Read EEPROM size before call any EEPROM function */
+ Adapter->EepromAddressSize = GetEEPROMSize8723A(Adapter);
+
+ _ReadAdapterInfo8723AU(Adapter);
+}
+
+#define GPIO_DEBUG_PORT_NUM 0
+static void rtl8723au_trigger_gpio_0(struct rtw_adapter *padapter)
+{
+ u32 gpioctrl;
+ DBG_8723A("==> trigger_gpio_0...\n");
+ rtw_write16_async(padapter, REG_GPIO_PIN_CTRL, 0);
+ rtw_write8_async(padapter, REG_GPIO_PIN_CTRL+2, 0xFF);
+ gpioctrl = (BIT(GPIO_DEBUG_PORT_NUM) << 24)|
+ (BIT(GPIO_DEBUG_PORT_NUM) << 16);
+ rtw_write32_async(padapter, REG_GPIO_PIN_CTRL, gpioctrl);
+ gpioctrl |= (BIT(GPIO_DEBUG_PORT_NUM)<<8);
+ rtw_write32_async(padapter, REG_GPIO_PIN_CTRL, gpioctrl);
+ DBG_8723A("<=== trigger_gpio_0...\n");
+}
+
+/*
+ * If variable not handled here,
+ * some variables will be processed in SetHwReg8723A()
+ */
+static void SetHwReg8723AU(struct rtw_adapter *Adapter, u8 variable, u8 *val)
+{
+ switch (variable) {
+ case HW_VAR_RXDMA_AGG_PG_TH:
+ break;
+ case HW_VAR_SET_RPWM:
+ rtl8723a_set_rpwm(Adapter, *val);
+ break;
+ case HW_VAR_TRIGGER_GPIO_0:
+ rtl8723au_trigger_gpio_0(Adapter);
+ break;
+ default:
+ SetHwReg8723A(Adapter, variable, val);
+ break;
+ }
+
+}
+
+/*
+ * If variable not handled here,
+ * some variables will be processed in GetHwReg8723A()
+ */
+static void GetHwReg8723AU(struct rtw_adapter *Adapter, u8 variable, u8 *val)
+{
+ GetHwReg8723A(Adapter, variable, val);
+}
+
+/* */
+/* Description: */
+/* Query setting of specified variable. */
+/* */
+static u8 GetHalDefVar8192CUsb(struct rtw_adapter *Adapter,
+ enum hal_def_variable eVariable, void *pValue)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u8 bResult = _SUCCESS;
+
+ switch (eVariable) {
+ case HAL_DEF_UNDERCORATEDSMOOTHEDPWDB:
+ *((int *)pValue) = pHalData->dmpriv.UndecoratedSmoothedPWDB;
+ break;
+ case HAL_DEF_IS_SUPPORT_ANT_DIV:
+ break;
+ case HAL_DEF_CURRENT_ANTENNA:
+ break;
+ case HAL_DEF_DRVINFO_SZ:
+ *((u32 *)pValue) = DRVINFO_SZ;
+ break;
+ case HAL_DEF_MAX_RECVBUF_SZ:
+ *((u32 *)pValue) = MAX_RECVBUF_SZ;
+ break;
+ case HAL_DEF_RX_PACKET_OFFSET:
+ *((u32 *)pValue) = RXDESC_SIZE + DRVINFO_SZ;
+ break;
+ case HAL_DEF_DBG_DUMP_RXPKT:
+ *((u8 *)pValue) = pHalData->bDumpRxPkt;
+ break;
+ case HAL_DEF_DBG_DM_FUNC:
+ *((u32 *)pValue) = pHalData->odmpriv.SupportAbility;
+ break;
+ case HW_VAR_MAX_RX_AMPDU_FACTOR:
+ *((u32 *)pValue) = IEEE80211_HT_MAX_AMPDU_64K;
+ break;
+ case HW_DEF_ODM_DBG_FLAG:
+ {
+ struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
+ printk("pDM_Odm->DebugComponents = 0x%llx\n",
+ pDM_Odm->DebugComponents);
+ }
+ break;
+ default:
+ /* RT_TRACE(COMP_INIT, DBG_WARNING, ("GetHalDefVar8192CUsb(): "
+ "Unkown variable: %d!\n", eVariable)); */
+ bResult = _FAIL;
+ break;
+ }
+
+ return bResult;
+}
+
+/* Change default setting of specified variable. */
+static u8 SetHalDefVar8192CUsb(struct rtw_adapter *Adapter,
+ enum hal_def_variable eVariable, void *pValue)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u8 bResult = _SUCCESS;
+
+ switch (eVariable) {
+ case HAL_DEF_DBG_DUMP_RXPKT:
+ pHalData->bDumpRxPkt = *((u8 *)pValue);
+ break;
+ case HAL_DEF_DBG_DM_FUNC:
+ {
+ u8 dm_func = *((u8 *)pValue);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct dm_odm_t *podmpriv = &pHalData->odmpriv;
+
+ if (dm_func == 0) { /* disable all dynamic func */
+ podmpriv->SupportAbility = DYNAMIC_FUNC_DISABLE;
+ DBG_8723A("==> Disable all dynamic function...\n");
+ } else if (dm_func == 1) {/* disable DIG */
+ podmpriv->SupportAbility &= (~DYNAMIC_BB_DIG);
+ DBG_8723A("==> Disable DIG...\n");
+ } else if (dm_func == 2) {/* disable High power */
+ podmpriv->SupportAbility &= (~DYNAMIC_BB_DYNAMIC_TXPWR);
+ } else if (dm_func == 3) {/* disable tx power tracking */
+ podmpriv->SupportAbility &= (~DYNAMIC_RF_CALIBRATION);
+ DBG_8723A("==> Disable tx power tracking...\n");
+ } else if (dm_func == 4) {/* disable BT coexistence */
+ pdmpriv->DMFlag &= (~DYNAMIC_FUNC_BT);
+ } else if (dm_func == 5) {/* disable antenna diversity */
+ podmpriv->SupportAbility &= (~DYNAMIC_BB_ANT_DIV);
+ } else if (dm_func == 6) {/* turn on all dynamic func */
+ if (!(podmpriv->SupportAbility & DYNAMIC_BB_DIG)) {
+ struct dig_t *pDigTable =
+ &podmpriv->DM_DigTable;
+ pDigTable->CurIGValue = rtw_read8(Adapter, 0xc50);
+ }
+ pdmpriv->DMFlag |= DYNAMIC_FUNC_BT;
+ podmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
+ DBG_8723A("==> Turn on all dynamic function...\n");
+ }
+ }
+ break;
+ case HW_DEF_FA_CNT_DUMP:
+ {
+ u8 bRSSIDump = *((u8 *)pValue);
+ struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
+ if (bRSSIDump)
+ pDM_Odm->DebugComponents = ODM_COMP_DIG|ODM_COMP_FA_CNT;
+ else
+ pDM_Odm->DebugComponents = 0;
+ }
+ break;
+ case HW_DEF_ODM_DBG_FLAG:
+ {
+ u64 DebugComponents = *((u64 *)pValue);
+ struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
+ pDM_Odm->DebugComponents = DebugComponents;
+ }
+ break;
+ default:
+ /* RT_TRACE(COMP_INIT, DBG_TRACE, ("SetHalDefVar819xUsb(): "
+ "Unkown variable: %d!\n", eVariable)); */
+ bResult = _FAIL;
+ break;
+ }
+
+ return bResult;
+}
+
+static void UpdateHalRAMask8192CUsb(struct rtw_adapter *padapter,
+ u32 mac_id, u8 rssi_level)
+{
+ u8 init_rate = 0;
+ u8 networkType, raid;
+ u32 mask, rate_bitmap;
+ u8 shortGIrate = false;
+ int supportRateNum = 0;
+ struct sta_info *psta;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
+
+ if (mac_id >= NUM_STA) /* CAM_SIZE */
+ return;
+
+ psta = pmlmeinfo->FW_sta_info[mac_id].psta;
+ if (psta == NULL)
+ return;
+
+ switch (mac_id) {
+ case 0:/* for infra mode */
+ supportRateNum =
+ rtw_get_rateset_len23a(cur_network->SupportedRates);
+ networkType = judge_network_type23a(padapter,
+ cur_network->SupportedRates,
+ supportRateNum) & 0xf;
+ /* pmlmeext->cur_wireless_mode = networkType; */
+ raid = networktype_to_raid23a(networkType);
+
+ mask = update_supported_rate23a(cur_network->SupportedRates,
+ supportRateNum);
+ mask |= (pmlmeinfo->HT_enable) ?
+ update_MSC_rate23a(&pmlmeinfo->HT_caps) : 0;
+
+ if (support_short_GI23a(padapter, &pmlmeinfo->HT_caps))
+ shortGIrate = true;
+ break;
+
+ case 1:/* for broadcast/multicast */
+ supportRateNum = rtw_get_rateset_len23a(
+ pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
+ networkType = WIRELESS_11B;
+ else
+ networkType = WIRELESS_11G;
+ raid = networktype_to_raid23a(networkType);
+
+ mask = update_basic_rate23a(cur_network->SupportedRates,
+ supportRateNum);
+ break;
+
+ default: /* for each sta in IBSS */
+ supportRateNum = rtw_get_rateset_len23a(
+ pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
+ networkType = judge_network_type23a(padapter,
+ pmlmeinfo->FW_sta_info[mac_id].SupportedRates,
+ supportRateNum) & 0xf;
+ /* pmlmeext->cur_wireless_mode = networkType; */
+ raid = networktype_to_raid23a(networkType);
+
+ mask = update_supported_rate23a(cur_network->SupportedRates,
+ supportRateNum);
+
+ /* todo: support HT in IBSS */
+ break;
+ }
+
+ /* mask &= 0x0fffffff; */
+ rate_bitmap = 0x0fffffff;
+ rate_bitmap = ODM_Get_Rate_Bitmap23a(&pHalData->odmpriv,
+ mac_id, mask, rssi_level);
+ printk(KERN_DEBUG "%s => mac_id:%d, networkType:0x%02x, "
+ "mask:0x%08x\n\t ==> rssi_level:%d, rate_bitmap:0x%08x\n",
+ __func__,
+ mac_id, networkType, mask, rssi_level, rate_bitmap);
+
+ mask &= rate_bitmap;
+ mask |= ((raid<<28)&0xf0000000);
+
+ init_rate = get_highest_rate_idx23a(mask)&0x3f;
+
+ if (pHalData->fw_ractrl == true) {
+ u8 arg = 0;
+
+ /* arg = (cam_idx-4)&0x1f;MACID */
+ arg = mac_id&0x1f;/* MACID */
+
+ arg |= BIT(7);
+
+ if (shortGIrate == true)
+ arg |= BIT(5);
+
+ DBG_8723A("update raid entry, mask = 0x%x, arg = 0x%x\n",
+ mask, arg);
+
+ rtl8723a_set_raid_cmd(padapter, mask, arg);
+ } else {
+ if (shortGIrate == true)
+ init_rate |= BIT(6);
+
+ rtw_write8(padapter, (REG_INIDATA_RATE_SEL+mac_id), init_rate);
+ }
+
+ /* set ra_id */
+ psta->raid = raid;
+ psta->init_rate = init_rate;
+
+ /* set correct initial date rate for each mac_id */
+ pdmpriv->INIDATA_RATE[mac_id] = init_rate;
+}
+
+static void rtl8723au_init_default_value(struct rtw_adapter *padapter)
+{
+ rtl8723a_init_default_value(padapter);
+}
+
+static u8 rtl8192cu_ps_func(struct rtw_adapter *Adapter,
+ enum hal_intf_ps_func efunc_id, u8 *val)
+{
+ return true;
+}
+
+int rtl8723au_set_hal_ops(struct rtw_adapter *padapter)
+{
+ struct hal_ops *pHalFunc = &padapter->HalFunc;
+
+ padapter->HalData = kzalloc(sizeof(struct hal_data_8723a), GFP_KERNEL);
+ if (!padapter->HalData) {
+ DBG_8723A("cannot alloc memory for HAL DATA\n");
+ return -ENOMEM;
+ }
+ padapter->hal_data_sz = sizeof(struct hal_data_8723a);
+
+ pHalFunc->hal_init = &rtl8723au_hal_init;
+ pHalFunc->hal_deinit = &rtl8723au_hal_deinit;
+
+ pHalFunc->inirp_init = &rtl8723au_inirp_init;
+ pHalFunc->inirp_deinit = &rtl8723au_inirp_deinit;
+
+ pHalFunc->init_xmit_priv = &rtl8723au_init_xmit_priv;
+ pHalFunc->free_xmit_priv = &rtl8723au_free_xmit_priv;
+
+ pHalFunc->init_recv_priv = &rtl8723au_init_recv_priv;
+ pHalFunc->free_recv_priv = &rtl8723au_free_recv_priv;
+ pHalFunc->InitSwLeds = NULL;
+ pHalFunc->DeInitSwLeds = NULL;
+
+ pHalFunc->init_default_value = &rtl8723au_init_default_value;
+ pHalFunc->intf_chip_configure = &rtl8723au_interface_configure;
+ pHalFunc->read_adapter_info = &ReadAdapterInfo8723AU;
+ pHalFunc->SetHwRegHandler = &SetHwReg8723AU;
+ pHalFunc->GetHwRegHandler = &GetHwReg8723AU;
+ pHalFunc->GetHalDefVarHandler = &GetHalDefVar8192CUsb;
+ pHalFunc->SetHalDefVarHandler = &SetHalDefVar8192CUsb;
+ pHalFunc->UpdateRAMaskHandler = &UpdateHalRAMask8192CUsb;
+ pHalFunc->hal_xmit = &rtl8723au_hal_xmit;
+ pHalFunc->mgnt_xmit = &rtl8723au_mgnt_xmit;
+ pHalFunc->hal_xmitframe_enqueue = &rtl8723au_hal_xmitframe_enqueue;
+ pHalFunc->interface_ps_func = &rtl8192cu_ps_func;
+ rtl8723a_set_hal_ops(pHalFunc);
+ return 0;
+}
diff --git a/drivers/staging/rtl8723au/hal/usb_ops_linux.c b/drivers/staging/rtl8723au/hal/usb_ops_linux.c
new file mode 100644
index 000000000000..0311cdf77ff1
--- /dev/null
+++ b/drivers/staging/rtl8723au/hal/usb_ops_linux.c
@@ -0,0 +1,848 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _HCI_OPS_OS_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <osdep_intf.h>
+#include <usb_ops.h>
+#include <recv_osdep.h>
+#include <rtl8723a_hal.h>
+#include <rtl8723a_recv.h>
+
+static int usbctrl_vendorreq(struct intf_hdl *pintfhdl, u8 request, u16 value, u16 index, void *pdata, u16 len, u8 requesttype)
+{
+ struct rtw_adapter *padapter = pintfhdl->padapter ;
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+ struct usb_device *udev = pdvobjpriv->pusbdev;
+
+ unsigned int pipe;
+ int status = 0;
+ u8 reqtype;
+ u8 *pIo_buf;
+ int vendorreq_times = 0;
+
+ if ((padapter->bSurpriseRemoved) || (padapter->pwrctrlpriv.pnp_bstop_trx)) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usbctrl_vendorreq:(padapter->bSurpriseRemoved||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n"));
+ status = -EPERM;
+ goto exit;
+ }
+
+ if (len > MAX_VENDOR_REQ_CMD_SIZE) {
+ DBG_8723A("[%s] Buffer len error , vendor request failed\n", __FUNCTION__);
+ status = -EINVAL;
+ goto exit;
+ }
+
+ mutex_lock(&pdvobjpriv->usb_vendor_req_mutex);
+
+ /* Acquire IO memory for vendorreq */
+ pIo_buf = pdvobjpriv->usb_vendor_req_buf;
+
+ if (pIo_buf == NULL) {
+ DBG_8723A("[%s] pIo_buf == NULL \n", __FUNCTION__);
+ status = -ENOMEM;
+ goto release_mutex;
+ }
+
+ while (++vendorreq_times <= MAX_USBCTRL_VENDORREQ_TIMES) {
+ memset(pIo_buf, 0, len);
+
+ if (requesttype == 0x01) {
+ pipe = usb_rcvctrlpipe(udev, 0);/* read_in */
+ reqtype = REALTEK_USB_VENQT_READ;
+ } else {
+ pipe = usb_sndctrlpipe(udev, 0);/* write_out */
+ reqtype = REALTEK_USB_VENQT_WRITE;
+ memcpy(pIo_buf, pdata, len);
+ }
+
+ status = rtw_usb_control_msg(udev, pipe, request, reqtype,
+ value, index, pIo_buf, len,
+ RTW_USB_CONTROL_MSG_TIMEOUT);
+
+ if (status == len) { /* Success this control transfer. */
+ rtw_reset_continual_urb_error(pdvobjpriv);
+ if (requesttype == 0x01) {
+ /* For Control read transfer, we have to copy
+ * the read data from pIo_buf to pdata.
+ */
+ memcpy(pdata, pIo_buf, len);
+ }
+ } else { /* error cases */
+ DBG_8723A("reg 0x%x, usb %s %u fail, status:%d value ="
+ " 0x%x, vendorreq_times:%d\n",
+ value, (requesttype == 0x01) ? "read" : "write",
+ len, status, *(u32 *)pdata, vendorreq_times);
+
+ if (status < 0) {
+ if (status == (-ESHUTDOWN) || status == -ENODEV) {
+ padapter->bSurpriseRemoved = true;
+ } else {
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ pHalData->srestpriv.Wifi_Error_Status = USB_VEN_REQ_CMD_FAIL;
+ }
+ } else { /* status != len && status >= 0 */
+ if (status > 0) {
+ if (requesttype == 0x01) {
+ /* For Control read transfer, we have to copy
+ * the read data from pIo_buf to pdata.
+ */
+ memcpy(pdata, pIo_buf, len);
+ }
+ }
+ }
+
+ if (rtw_inc_and_chk_continual_urb_error(pdvobjpriv)) {
+ padapter->bSurpriseRemoved = true;
+ break;
+ }
+
+ }
+
+ /* firmware download is checksumed, don't retry */
+ if ((value >= FW_8723A_START_ADDRESS && value <= FW_8723A_END_ADDRESS) || status == len)
+ break;
+ }
+
+release_mutex:
+ mutex_unlock(&pdvobjpriv->usb_vendor_req_mutex);
+exit:
+ return status;
+}
+
+static u8 usb_read8(struct intf_hdl *pintfhdl, u32 addr)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u8 data = 0;
+
+ request = 0x05;
+ requesttype = 0x01;/* read_in */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 1;
+
+ usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+ return data;
+}
+
+static u16 usb_read16(struct intf_hdl *pintfhdl, u32 addr)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u16 data = 0;
+
+ request = 0x05;
+ requesttype = 0x01;/* read_in */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 2;
+
+ usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+ return data;
+}
+
+static u32 usb_read32(struct intf_hdl *pintfhdl, u32 addr)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u32 data = 0;
+
+ request = 0x05;
+ requesttype = 0x01;/* read_in */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 4;
+
+ usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+ return data;
+}
+
+static int usb_write8(struct intf_hdl *pintfhdl, u32 addr, u8 val)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u8 data;
+ int ret;
+
+ request = 0x05;
+ requesttype = 0x00;/* write_out */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 1;
+
+ data = val;
+
+ ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+ return ret;
+}
+
+static int usb_write16(struct intf_hdl *pintfhdl, u32 addr, u16 val)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u16 data;
+ int ret;
+
+ request = 0x05;
+ requesttype = 0x00;/* write_out */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 2;
+
+ data = val;
+
+ ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+ return ret;
+}
+
+static int usb_write32(struct intf_hdl *pintfhdl, u32 addr, u32 val)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u32 data;
+ int ret;
+
+ request = 0x05;
+ requesttype = 0x00;/* write_out */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 4;
+ data = val;
+
+ ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+ return ret;
+}
+
+static int usb_writeN(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u8 buf[VENDOR_CMD_MAX_DATA_LEN] = {0};
+ int ret;
+
+ request = 0x05;
+ requesttype = 0x00;/* write_out */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = length;
+ memcpy(buf, pdata, len);
+
+ ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, buf, len, requesttype);
+
+ return ret;
+}
+
+/*
+ * Description:
+ * Recognize the interrupt content by reading the interrupt
+ * register or content and masking interrupt mask (IMR)
+ * if it is our NIC's interrupt. After recognizing, we may clear
+ * the all interrupts (ISR).
+ * Arguments:
+ * [in] Adapter -
+ * The adapter context.
+ * [in] pContent -
+ * Under PCI interface, this field is ignord.
+ * Under USB interface, the content is the interrupt
+ * content pointer.
+ * Under SDIO interface, this is the interrupt type which
+ * is Local interrupt or system interrupt.
+ * [in] ContentLen -
+ * The length in byte of pContent.
+ * Return:
+ * If any interrupt matches the mask (IMR), return true, and
+ * return false otherwise.
+ */
+static bool
+InterruptRecognized8723AU(struct rtw_adapter *Adapter, void *pContent,
+ u32 ContentLen)
+{
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ u8 *buffer = (u8 *)pContent;
+ struct reportpwrstate_parm report;
+
+ memcpy(&pHalData->IntArray[0], &buffer[USB_INTR_CONTENT_HISR_OFFSET],
+ 4);
+ pHalData->IntArray[0] &= pHalData->IntrMask[0];
+
+ /* For HISR extension. Added by tynli. 2009.10.07. */
+ memcpy(&pHalData->IntArray[1],
+ &buffer[USB_INTR_CONTENT_HISRE_OFFSET], 4);
+ pHalData->IntArray[1] &= pHalData->IntrMask[1];
+
+ /* We sholud remove this function later because DDK suggest
+ * not to executing too many operations in MPISR */
+
+ memcpy(&report.state, &buffer[USB_INTR_CPWM_OFFSET], 1);
+
+ return ((pHalData->IntArray[0])&pHalData->IntrMask[0]) != 0 ||
+ ((pHalData->IntArray[1])&pHalData->IntrMask[1]) != 0;
+}
+
+static void usb_read_interrupt_complete(struct urb *purb, struct pt_regs *regs)
+{
+ int err;
+ struct rtw_adapter *padapter = (struct rtw_adapter *)purb->context;
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped ||
+ padapter->bReadPortCancel) {
+ DBG_8723A("%s() RX Warning! bDriverStopped(%d) OR "
+ "bSurpriseRemoved(%d) bReadPortCancel(%d)\n",
+ __FUNCTION__, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved,
+ padapter->bReadPortCancel);
+ return;
+ }
+
+ if (purb->status == 0) {
+ struct c2h_evt_hdr *c2h_evt;
+
+ c2h_evt = (struct c2h_evt_hdr *)purb->transfer_buffer;
+
+ if (purb->actual_length > USB_INTR_CONTENT_LENGTH) {
+ DBG_8723A("usb_read_interrupt_complete: purb->actual_"
+ "length > USB_INTR_CONTENT_LENGTH\n");
+ goto urb_submit;
+ }
+
+ InterruptRecognized8723AU(padapter, purb->transfer_buffer,
+ purb->actual_length);
+
+ if (c2h_evt_exist(c2h_evt)) {
+ if (c2h_id_filter_ccx_8723a(c2h_evt->id)) {
+ /* Handle CCX report here */
+ handle_txrpt_ccx_8723a(padapter, (void *)(c2h_evt->payload));
+ /* Replace with special pointer to
+ trigger c2h_evt_clear23a */
+ if (rtw_cbuf_push23a(padapter->evtpriv.c2h_queue,
+ (void *)&padapter->evtpriv) !=
+ _SUCCESS)
+ DBG_8723A("%s rtw_cbuf_push23a fail\n",
+ __func__);
+ schedule_work(&padapter->evtpriv.c2h_wk);
+ } else if ((c2h_evt = (struct c2h_evt_hdr *)
+ kmalloc(16, GFP_ATOMIC))) {
+ memcpy(c2h_evt, purb->transfer_buffer, 16);
+ if (rtw_cbuf_push23a(padapter->evtpriv.c2h_queue,
+ (void *)c2h_evt) != _SUCCESS)
+ DBG_8723A("%s rtw_cbuf_push23a fail\n",
+ __func__);
+ schedule_work(&padapter->evtpriv.c2h_wk);
+ } else {
+ /* Error handling for malloc fail */
+ if (rtw_cbuf_push23a(padapter->evtpriv.c2h_queue,
+ (void *)NULL) != _SUCCESS)
+ DBG_8723A("%s rtw_cbuf_push23a fail\n",
+ __func__);
+ schedule_work(&padapter->evtpriv.c2h_wk);
+ }
+ }
+
+urb_submit:
+ err = usb_submit_urb(purb, GFP_ATOMIC);
+ if (err && (err != -EPERM)) {
+ DBG_8723A("cannot submit interrupt in-token(err = "
+ "0x%08x), urb_status = %d\n",
+ err, purb->status);
+ }
+ } else {
+ DBG_8723A("###=> usb_read_interrupt_complete => urb "
+ "status(%d)\n", purb->status);
+
+ switch (purb->status) {
+ case -EINVAL:
+ case -EPIPE:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port_complete:bSurpriseRemoved ="
+ "true\n"));
+ /* Fall Through here */
+ case -ENOENT:
+ padapter->bDriverStopped = true;
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port_complete:bDriverStopped ="
+ "true\n"));
+ break;
+ case -EPROTO:
+ break;
+ case -EINPROGRESS:
+ DBG_8723A("ERROR: URB IS IN PROGRESS!/n");
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static u32 usb_read_interrupt(struct intf_hdl *pintfhdl, u32 addr)
+{
+ int err;
+ unsigned int pipe;
+ u32 ret = _SUCCESS;
+ struct rtw_adapter *adapter = pintfhdl->padapter;
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
+ struct recv_priv *precvpriv = &adapter->recvpriv;
+ struct usb_device *pusbd = pdvobj->pusbdev;
+
+ /* translate DMA FIFO addr to pipehandle */
+ pipe = ffaddr2pipehdl23a(pdvobj, addr);
+
+ usb_fill_int_urb(precvpriv->int_in_urb, pusbd, pipe,
+ precvpriv->int_in_buf, USB_INTR_CONTENT_LENGTH,
+ usb_read_interrupt_complete, adapter, 1);
+
+ err = usb_submit_urb(precvpriv->int_in_urb, GFP_ATOMIC);
+ if (err && (err != -EPERM)) {
+ DBG_8723A("cannot submit interrupt in-token(err = 0x%08x),"
+ "urb_status = %d\n", err,
+ precvpriv->int_in_urb->status);
+ ret = _FAIL;
+ }
+
+ return ret;
+}
+
+static int recvbuf2recvframe(struct rtw_adapter *padapter, struct sk_buff *pskb)
+{
+ u8 *pbuf;
+ u8 shift_sz = 0;
+ u16 pkt_cnt;
+ u32 pkt_offset, skb_len, alloc_sz;
+ s32 transfer_len;
+ struct recv_stat *prxstat;
+ struct phy_stat *pphy_info = NULL;
+ struct sk_buff *pkt_copy = NULL;
+ struct recv_frame *precvframe = NULL;
+ struct rx_pkt_attrib *pattrib = NULL;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct rtw_queue *pfree_recv_queue = &precvpriv->free_recv_queue;
+
+ transfer_len = (s32)pskb->len;
+ pbuf = pskb->data;
+
+ prxstat = (struct recv_stat *)pbuf;
+ pkt_cnt = (le32_to_cpu(prxstat->rxdw2) >> 16) & 0xff;
+
+ do {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("recvbuf2recvframe: rxdesc = offsset 0:0x%08x, "
+ "4:0x%08x, 8:0x%08x, C:0x%08x\n", prxstat->rxdw0,
+ prxstat->rxdw1, prxstat->rxdw2, prxstat->rxdw4));
+
+ prxstat = (struct recv_stat *)pbuf;
+
+ precvframe = rtw_alloc_recvframe23a(pfree_recv_queue);
+ if (!precvframe) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("recvbuf2recvframe: precvframe == NULL\n"));
+ DBG_8723A("%s()-%d: rtw_alloc_recvframe23a() failed! RX "
+ "Drop!\n", __FUNCTION__, __LINE__);
+ goto _exit_recvbuf2recvframe;
+ }
+
+ INIT_LIST_HEAD(&precvframe->list);
+
+ update_recvframe_attrib(precvframe, prxstat);
+
+ pattrib = &precvframe->attrib;
+
+ if (pattrib->crc_err) {
+ DBG_8723A("%s()-%d: RX Warning! rx CRC ERROR !!\n",
+ __FUNCTION__, __LINE__);
+ rtw_free_recvframe23a(precvframe, pfree_recv_queue);
+ goto _exit_recvbuf2recvframe;
+ }
+
+ pkt_offset = RXDESC_SIZE + pattrib->drvinfo_sz +
+ pattrib->shift_sz + pattrib->pkt_len;
+
+ if ((pattrib->pkt_len <= 0) || (pkt_offset > transfer_len)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("recvbuf2recvframe: pkt_len<= 0\n"));
+ DBG_8723A("%s()-%d: RX Warning!\n",
+ __FUNCTION__, __LINE__);
+ rtw_free_recvframe23a(precvframe, pfree_recv_queue);
+ goto _exit_recvbuf2recvframe;
+ }
+
+ /* Modified by Albert 20101213 */
+ /* For 8 bytes IP header alignment. */
+ /* Qos data, wireless lan header length is 26 */
+ if (pattrib->qos) {
+ shift_sz = 6;
+ } else {
+ shift_sz = 0;
+ }
+
+ skb_len = pattrib->pkt_len;
+
+ /* for first fragment packet, driver need allocate
+ * 1536+drvinfo_sz+RXDESC_SIZE to defrag packet.
+ * modify alloc_sz for recvive crc error packet
+ * by thomas 2011-06-02 */
+ if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
+ /* alloc_sz = 1664; 1664 is 128 alignment. */
+ if (skb_len <= 1650)
+ alloc_sz = 1664;
+ else
+ alloc_sz = skb_len + 14;
+ } else {
+ alloc_sz = skb_len;
+ /* 6 is for IP header 8 bytes alignment in QoS packet case. */
+ /* 8 is for skb->data 4 bytes alignment. */
+ alloc_sz += 14;
+ }
+
+ pkt_copy = netdev_alloc_skb(padapter->pnetdev, alloc_sz);
+ if (pkt_copy) {
+ pkt_copy->dev = padapter->pnetdev;
+ precvframe->pkt = pkt_copy;
+ skb_reserve(pkt_copy, 8 - ((unsigned long)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
+ /*force ip_hdr at 8-byte alignment address according to shift_sz. */
+ skb_reserve(pkt_copy, shift_sz);
+ memcpy(pkt_copy->data, (pbuf + pattrib->shift_sz + pattrib->drvinfo_sz + RXDESC_SIZE), skb_len);
+ skb_put(pkt_copy, skb_len);
+ } else {
+ if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
+ DBG_8723A("recvbuf2recvframe: alloc_skb fail, "
+ "drop frag frame \n");
+ rtw_free_recvframe23a(precvframe,
+ pfree_recv_queue);
+ goto _exit_recvbuf2recvframe;
+ }
+
+ precvframe->pkt = skb_clone(pskb, GFP_ATOMIC);
+ if (!precvframe->pkt) {
+ DBG_8723A("recvbuf2recvframe: skb_clone "
+ "fail\n");
+ rtw_free_recvframe23a(precvframe,
+ pfree_recv_queue);
+ goto _exit_recvbuf2recvframe;
+ }
+ }
+
+ if (pattrib->physt) {
+ pphy_info = (struct phy_stat *)(pbuf + RXDESC_OFFSET);
+ update_recvframe_phyinfo(precvframe, pphy_info);
+ }
+
+ if (rtw_recv_entry23a(precvframe) != _SUCCESS)
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("recvbuf2recvframe: rtw_recv_entry23a"
+ "(precvframe) != _SUCCESS\n"));
+
+ pkt_cnt--;
+ transfer_len -= pkt_offset;
+ pbuf += pkt_offset;
+ precvframe = NULL;
+ pkt_copy = NULL;
+
+ if (transfer_len > 0 && pkt_cnt == 0)
+ pkt_cnt = (le32_to_cpu(prxstat->rxdw2)>>16) & 0xff;
+
+ } while ((transfer_len > 0) && (pkt_cnt > 0));
+
+_exit_recvbuf2recvframe:
+
+ return _SUCCESS;
+}
+
+void rtl8723au_recv_tasklet(void *priv)
+{
+ struct sk_buff *pskb;
+ struct rtw_adapter *padapter = (struct rtw_adapter *)priv;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
+ if ((padapter->bDriverStopped) ||
+ (padapter->bSurpriseRemoved)) {
+ DBG_8723A("recv_tasklet => bDriverStopped or "
+ "bSurpriseRemoved \n");
+ dev_kfree_skb_any(pskb);
+ break;
+ }
+
+ recvbuf2recvframe(padapter, pskb);
+ skb_reset_tail_pointer(pskb);
+
+ pskb->len = 0;
+
+ skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
+ }
+}
+
+static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
+{
+ struct recv_buf *precvbuf = (struct recv_buf *)purb->context;
+ struct rtw_adapter *padapter = (struct rtw_adapter *)precvbuf->adapter;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct hal_data_8723a *pHalData;
+
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port_complete!!!\n"));
+
+ precvpriv->rx_pending_cnt--;
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped ||
+ padapter->bReadPortCancel) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port_complete:bDriverStopped(%d) OR "
+ "bSurpriseRemoved(%d)\n", padapter->bDriverStopped,
+ padapter->bSurpriseRemoved));
+
+ DBG_8723A("%s()-%d: RX Warning! bDriverStopped(%d) OR "
+ "bSurpriseRemoved(%d) bReadPortCancel(%d)\n",
+ __FUNCTION__, __LINE__, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved, padapter->bReadPortCancel);
+ return;
+ }
+
+ if (purb->status == 0) {
+ if ((purb->actual_length > MAX_RECVBUF_SZ) ||
+ (purb->actual_length < RXDESC_SIZE)) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port_complete: (purb->actual_"
+ "length > MAX_RECVBUF_SZ) || (purb->actual_"
+ "length < RXDESC_SIZE)\n"));
+ rtw_read_port(padapter, precvpriv->ff_hwaddr, 0,
+ precvbuf);
+ DBG_8723A("%s()-%d: RX Warning!\n",
+ __FUNCTION__, __LINE__);
+ } else {
+ rtw_reset_continual_urb_error(
+ adapter_to_dvobj(padapter));
+
+ skb_put(precvbuf->pskb, purb->actual_length);
+ skb_queue_tail(&precvpriv->rx_skb_queue,
+ precvbuf->pskb);
+
+ if (skb_queue_len(&precvpriv->rx_skb_queue) <= 1)
+ tasklet_schedule(&precvpriv->recv_tasklet);
+
+ precvbuf->pskb = NULL;
+ rtw_read_port(padapter, precvpriv->ff_hwaddr, 0,
+ precvbuf);
+ }
+ } else {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port_complete : purb->status(%d) != 0 \n",
+ purb->status));
+ skb_put(precvbuf->pskb, purb->actual_length);
+ precvbuf->pskb = NULL;
+
+ DBG_8723A("###=> usb_read_port_complete => urb status(%d)\n",
+ purb->status);
+
+ if (rtw_inc_and_chk_continual_urb_error(
+ adapter_to_dvobj(padapter))) {
+ padapter->bSurpriseRemoved = true;
+ }
+
+ switch (purb->status) {
+ case -EINVAL:
+ case -EPIPE:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port_complete:bSurprise"
+ "Removed = true\n"));
+ /* Intentional fall through here */
+ case -ENOENT:
+ padapter->bDriverStopped = true;
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port_complete:"
+ "bDriverStopped = true\n"));
+ break;
+ case -EPROTO:
+ case -EOVERFLOW:
+ pHalData = GET_HAL_DATA(padapter);
+ pHalData->srestpriv.Wifi_Error_Status =
+ USB_READ_PORT_FAIL;
+ rtw_read_port(padapter, precvpriv->ff_hwaddr,
+ 0, precvbuf);
+ break;
+ case -EINPROGRESS:
+ DBG_8723A("ERROR: URB IS IN PROGRESS!/n");
+ break;
+ default:
+ break;
+ }
+
+ }
+}
+
+static u32 usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ struct recv_buf *precvbuf)
+{
+ int err;
+ unsigned int pipe;
+ unsigned long tmpaddr = 0;
+ unsigned long alignment = 0;
+ u32 ret = _SUCCESS;
+ struct urb *purb = NULL;
+ struct rtw_adapter *adapter = pintfhdl->padapter;
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
+ struct recv_priv *precvpriv = &adapter->recvpriv;
+ struct usb_device *pusbd = pdvobj->pusbdev;
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
+ adapter->pwrctrlpriv.pnp_bstop_trx) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port:(padapter->bDriverStopped ||"
+ "padapter->bSurpriseRemoved ||adapter->"
+ "pwrctrlpriv.pnp_bstop_trx)!!!\n"));
+ return _FAIL;
+ }
+
+ if (!precvbuf) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port:precvbuf == NULL\n"));
+ return _FAIL;
+ }
+
+ if (!precvbuf->pskb)
+ precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
+
+ rtl8723au_init_recvbuf(adapter, precvbuf);
+
+ /* re-assign for linux based on skb */
+ if (!precvbuf->pskb) {
+ precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
+ if (precvbuf->pskb == NULL) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n"));
+ return _FAIL;
+ }
+
+ tmpaddr = (unsigned long)precvbuf->pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
+ skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
+ }
+
+ precvpriv->rx_pending_cnt++;
+
+ purb = precvbuf->purb;
+
+ /* translate DMA FIFO addr to pipehandle */
+ pipe = ffaddr2pipehdl23a(pdvobj, addr);
+
+ usb_fill_bulk_urb(purb, pusbd, pipe, precvbuf->pskb->data,
+ MAX_RECVBUF_SZ, usb_read_port_complete,
+ precvbuf);/* context is precvbuf */
+
+ err = usb_submit_urb(purb, GFP_ATOMIC);
+ if ((err) && (err != -EPERM)) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("cannot submit rx in-token(err = 0x%.8x), URB_STATUS "
+ "= 0x%.8x", err, purb->status));
+ DBG_8723A("cannot submit rx in-token(err = 0x%08x), urb_status "
+ "= %d\n", err, purb->status);
+ ret = _FAIL;
+ }
+ return ret;
+}
+
+void rtl8723au_xmit_tasklet(void *priv)
+{
+ int ret = false;
+ struct rtw_adapter *padapter = (struct rtw_adapter *)priv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ if (check_fwstate(&padapter->mlmepriv, _FW_UNDER_SURVEY))
+ return;
+
+ while (1) {
+ if ((padapter->bDriverStopped) ||
+ (padapter->bSurpriseRemoved) ||
+ (padapter->bWritePortCancel)) {
+ DBG_8723A("xmit_tasklet => bDriverStopped or "
+ "bSurpriseRemoved or bWritePortCancel\n");
+ break;
+ }
+
+ ret = rtl8723au_xmitframe_complete(padapter, pxmitpriv, NULL);
+
+ if (!ret)
+ break;
+ }
+}
+
+void rtl8723au_set_intf_ops(struct _io_ops *pops)
+{
+
+ memset((u8 *)pops, 0, sizeof(struct _io_ops));
+
+ pops->_read8 = &usb_read8;
+ pops->_read16 = &usb_read16;
+ pops->_read32 = &usb_read32;
+ pops->_read_mem = &usb_read_mem23a;
+ pops->_read_port = &usb_read_port;
+
+ pops->_write8 = &usb_write8;
+ pops->_write16 = &usb_write16;
+ pops->_write32 = &usb_write32;
+ pops->_writeN = &usb_writeN;
+
+ pops->_write_mem = &usb_write_mem23a;
+ pops->_write_port = &usb_write_port23a;
+
+ pops->_read_port_cancel = &usb_read_port_cancel23a;
+ pops->_write_port_cancel = &usb_write_port23a_cancel;
+
+ pops->_read_interrupt = &usb_read_interrupt;
+}
+
+void rtl8723au_set_hw_type(struct rtw_adapter *padapter)
+{
+ padapter->chip_type = RTL8723A;
+ padapter->HardwareType = HARDWARE_TYPE_RTL8723AU;
+ DBG_8723A("CHIP TYPE: RTL8723A\n");
+}
diff --git a/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h b/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h
new file mode 100644
index 000000000000..4b7f3479c0a9
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h
@@ -0,0 +1,230 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ ******************************************************************************/
+#ifndef __INC_HAL8723PHYCFG_H__
+#define __INC_HAL8723PHYCFG_H__
+
+/*--------------------------Define Parameters-------------------------------*/
+#define LOOP_LIMIT 5
+#define MAX_STALL_TIME 50 /* us */
+#define AntennaDiversityValue 0x80
+#define MAX_TXPWR_IDX_NMODE_92S 63
+#define Reset_Cnt_Limit 3
+
+
+#define MAX_AGGR_NUM 0x0909
+
+/*--------------------------Define Parameters-------------------------------*/
+
+
+/*------------------------------Define structure----------------------------*/
+enum swchnlcmdid {
+ CmdID_End,
+ CmdID_SetTxPowerLevel,
+ CmdID_BBRegWrite10,
+ CmdID_WritePortUlong,
+ CmdID_WritePortUshort,
+ CmdID_WritePortUchar,
+ CmdID_RF_WriteReg,
+};
+
+
+/* 1. Switch channel related */
+struct swchnlcmd {
+ enum swchnlcmdid CmdID;
+ u32 Para1;
+ u32 Para2;
+ u32 msDelay;
+};
+
+enum HW90_BLOCK {
+ HW90_BLOCK_MAC = 0,
+ HW90_BLOCK_PHY0 = 1,
+ HW90_BLOCK_PHY1 = 2,
+ HW90_BLOCK_RF = 3,
+ HW90_BLOCK_MAXIMUM = 4, /* Never use this */
+};
+
+enum RF_RADIO_PATH {
+ RF_PATH_A = 0, /* Radio Path A */
+ RF_PATH_B = 1, /* Radio Path B */
+ RF_PATH_MAX /* Max RF number 90 support */
+};
+
+#define CHANNEL_MAX_NUMBER 14 /* 14 is the max channel number */
+#define CHANNEL_GROUP_MAX 3 /* ch1~3, ch4~9, ch10~14 total three groups */
+
+enum WIRELESS_MODE {
+ WIRELESS_MODE_UNKNOWN = 0x00,
+ WIRELESS_MODE_A = BIT2,
+ WIRELESS_MODE_B = BIT0,
+ WIRELESS_MODE_G = BIT1,
+ WIRELESS_MODE_AUTO = BIT5,
+ WIRELESS_MODE_N_24G = BIT3,
+ WIRELESS_MODE_N_5G = BIT4,
+ WIRELESS_MODE_AC = BIT6
+};
+
+enum baseband_config_type {
+ BaseBand_Config_PHY_REG = 0, /* Radio Path A */
+ BaseBand_Config_AGC_TAB = 1, /* Radio Path B */
+};
+
+enum ra_offset_area {
+ RA_OFFSET_LEGACY_OFDM1,
+ RA_OFFSET_LEGACY_OFDM2,
+ RA_OFFSET_HT_OFDM1,
+ RA_OFFSET_HT_OFDM2,
+ RA_OFFSET_HT_OFDM3,
+ RA_OFFSET_HT_OFDM4,
+ RA_OFFSET_HT_CCK,
+};
+
+
+/* BB/RF related */
+enum rf_type_8190p {
+ RF_TYPE_MIN, /* 0 */
+ RF_8225 = 1, /* 1 11b/g RF for verification only */
+ RF_8256 = 2, /* 2 11b/g/n */
+ RF_8258 = 3, /* 3 11a/b/g/n RF */
+ RF_6052 = 4, /* 4 11b/g/n RF */
+ RF_PSEUDO_11N = 5, /* 5, It is a temporality RF. */
+};
+
+struct bb_reg_define {
+ u32 rfintfs; /* set software control: */
+ /* 0x870~0x877[8 bytes] */
+ u32 rfintfi; /* readback data: */
+ /* 0x8e0~0x8e7[8 bytes] */
+ u32 rfintfo; /* output data: */
+ /* 0x860~0x86f [16 bytes] */
+ u32 rfintfe; /* output enable: */
+ /* 0x860~0x86f [16 bytes] */
+ u32 rf3wireOffset; /* LSSI data: */
+ /* 0x840~0x84f [16 bytes] */
+ u32 rfLSSI_Select; /* BB Band Select: */
+ /* 0x878~0x87f [8 bytes] */
+ u32 rfTxGainStage; /* Tx gain stage: */
+ /* 0x80c~0x80f [4 bytes] */
+ u32 rfHSSIPara1; /* wire parameter control1 : */
+ /* 0x820~0x823, 0x828~0x82b, 0x830~0x833, 0x838~0x83b [16 bytes] */
+ u32 rfHSSIPara2; /* wire parameter control2 : */
+ /* 0x824~0x827, 0x82c~0x82f, 0x834~0x837, 0x83c~0x83f [16 bytes] */
+ u32 rfSwitchControl; /* Tx Rx antenna control : */
+ /* 0x858~0x85f [16 bytes] */
+ u32 rfAGCControl1; /* AGC parameter control1 : */
+ /* 0xc50~0xc53, 0xc58~0xc5b, 0xc60~0xc63, 0xc68~0xc6b [16 bytes] */
+ u32 rfAGCControl2; /* AGC parameter control2 : */
+ /* 0xc54~0xc57, 0xc5c~0xc5f, 0xc64~0xc67, 0xc6c~0xc6f [16 bytes] */
+ u32 rfRxIQImbalance; /* OFDM Rx IQ imbalance matrix : */
+ /* 0xc14~0xc17, 0xc1c~0xc1f, 0xc24~0xc27, 0xc2c~0xc2f [16 bytes] */
+ u32 rfRxAFE; /* Rx IQ DC ofset and Rx digital filter, Rx DC notch filter : */
+ /* 0xc10~0xc13, 0xc18~0xc1b, 0xc20~0xc23, 0xc28~0xc2b [16 bytes] */
+ u32 rfTxIQImbalance; /* OFDM Tx IQ imbalance matrix */
+ /* 0xc80~0xc83, 0xc88~0xc8b, 0xc90~0xc93, 0xc98~0xc9b [16 bytes] */
+ u32 rfTxAFE; /* Tx IQ DC Offset and Tx DFIR type */
+ /* 0xc84~0xc87, 0xc8c~0xc8f, 0xc94~0xc97, 0xc9c~0xc9f [16 bytes] */
+ u32 rfLSSIReadBack; /* LSSI RF readback data SI mode */
+ /* 0x8a0~0x8af [16 bytes] */
+ u32 rfLSSIReadBackPi; /* LSSI RF readback data PI mode 0x8b8-8bc for Path A and B */
+};
+
+struct r_antenna_sel_ofdm {
+ u32 r_tx_antenna:4;
+ u32 r_ant_l:4;
+ u32 r_ant_non_ht:4;
+ u32 r_ant_ht1:4;
+ u32 r_ant_ht2:4;
+ u32 r_ant_ht_s1:4;
+ u32 r_ant_non_ht_s1:4;
+ u32 OFDM_TXSC:2;
+ u32 Reserved:2;
+};
+
+struct r_antenna_sel_cck {
+ u8 r_cckrx_enable_2:2;
+ u8 r_cckrx_enable:2;
+ u8 r_ccktx_enable:4;
+};
+
+/*------------------------------Define structure----------------------------*/
+
+
+/*------------------------Export global variable----------------------------*/
+/*------------------------Export global variable----------------------------*/
+
+
+/*------------------------Export Macro Definition---------------------------*/
+/*------------------------Export Macro Definition---------------------------*/
+
+
+/*--------------------------Exported Function prototype---------------------*/
+/* */
+/* BB and RF register read/write */
+/* */
+u32 PHY_QueryBBReg(struct rtw_adapter *Adapter, u32 RegAddr,
+ u32 BitMask);
+void PHY_SetBBReg(struct rtw_adapter *Adapter, u32 RegAddr,
+ u32 BitMask, u32 Data);
+u32 PHY_QueryRFReg(struct rtw_adapter *Adapter,
+ enum RF_RADIO_PATH eRFPath, u32 RegAddr,
+ u32 BitMask);
+void PHY_SetRFReg(struct rtw_adapter *Adapter,
+ enum RF_RADIO_PATH eRFPath, u32 RegAddr,
+ u32 BitMask, u32 Data);
+
+/* */
+/* BB TX Power R/W */
+/* */
+void PHY_SetTxPowerLevel8723A(struct rtw_adapter *Adapter, u8 channel);
+
+/* */
+/* Switch bandwidth for 8723A */
+/* */
+void PHY_SetBWMode23a8723A(struct rtw_adapter *pAdapter,
+ enum ht_channel_width ChnlWidth,
+ unsigned char Offset);
+
+/* */
+/* channel switch related funciton */
+/* */
+void PHY_SwChnl8723A(struct rtw_adapter *pAdapter, u8 channel);
+ /* Call after initialization */
+void ChkFwCmdIoDone(struct rtw_adapter *Adapter);
+
+/* */
+/* Modify the value of the hw register when beacon interval be changed. */
+/* */
+void
+rtl8192c_PHY_SetBeaconHwReg(struct rtw_adapter *Adapter, u16 BeaconInterval);
+
+
+void PHY_SwitchEphyParameter(struct rtw_adapter *Adapter);
+
+void PHY_EnableHostClkReq(struct rtw_adapter *Adapter);
+
+bool
+SetAntennaConfig92C(struct rtw_adapter *Adapter, u8 DefaultAnt);
+
+/*--------------------------Exported Function prototype---------------------*/
+
+#define PHY_SetMacReg PHY_SetBBReg
+
+/* MAC/BB/RF HAL config */
+int PHY_BBConfig8723A(struct rtw_adapter *Adapter);
+int PHY_RFConfig8723A(struct rtw_adapter *Adapter);
+s32 PHY_MACConfig8723A(struct rtw_adapter *padapter);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/Hal8723APhyReg.h b/drivers/staging/rtl8723au/include/Hal8723APhyReg.h
new file mode 100644
index 000000000000..759928f78d6d
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/Hal8723APhyReg.h
@@ -0,0 +1,1078 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ ******************************************************************************/
+#ifndef __INC_HAL8723APHYREG_H__
+#define __INC_HAL8723APHYREG_H__
+
+/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
+/* 1. Page1(0x100) */
+#define rPMAC_Reset 0x100
+#define rPMAC_TxStart 0x104
+#define rPMAC_TxLegacySIG 0x108
+#define rPMAC_TxHTSIG1 0x10c
+#define rPMAC_TxHTSIG2 0x110
+#define rPMAC_PHYDebug 0x114
+#define rPMAC_TxPacketNum 0x118
+#define rPMAC_TxIdle 0x11c
+#define rPMAC_TxMACHeader0 0x120
+#define rPMAC_TxMACHeader1 0x124
+#define rPMAC_TxMACHeader2 0x128
+#define rPMAC_TxMACHeader3 0x12c
+#define rPMAC_TxMACHeader4 0x130
+#define rPMAC_TxMACHeader5 0x134
+#define rPMAC_TxDataType 0x138
+#define rPMAC_TxRandomSeed 0x13c
+#define rPMAC_CCKPLCPPreamble 0x140
+#define rPMAC_CCKPLCPHeader 0x144
+#define rPMAC_CCKCRC16 0x148
+#define rPMAC_OFDMRxCRC32OK 0x170
+#define rPMAC_OFDMRxCRC32Er 0x174
+#define rPMAC_OFDMRxParityEr 0x178
+#define rPMAC_OFDMRxCRC8Er 0x17c
+#define rPMAC_CCKCRxRC16Er 0x180
+#define rPMAC_CCKCRxRC32Er 0x184
+#define rPMAC_CCKCRxRC32OK 0x188
+#define rPMAC_TxStatus 0x18c
+
+/* 2. Page2(0x200) */
+/* The following two definition are only used for USB interface. */
+#define RF_BB_CMD_ADDR 0x02c0 /* RF/BB read/write command address. */
+#define RF_BB_CMD_DATA 0x02c4 /* RF/BB read/write command data. */
+
+/* 3. Page8(0x800) */
+#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC RF BW Setting?? */
+
+#define rFPGA0_TxInfo 0x804 /* Status report?? */
+#define rFPGA0_PSDFunction 0x808
+
+#define rFPGA0_TxGainStage 0x80c /* Set TX PWR init gain? */
+
+#define rFPGA0_RFTiming1 0x810 /* Useless now */
+#define rFPGA0_RFTiming2 0x814
+
+#define rFPGA0_XA_HSSIParameter1 0x820 /* RF 3 wire register */
+#define rFPGA0_XA_HSSIParameter2 0x824
+#define rFPGA0_XB_HSSIParameter1 0x828
+#define rFPGA0_XB_HSSIParameter2 0x82c
+#define rTxAGC_B_Rate18_06 0x830
+#define rTxAGC_B_Rate54_24 0x834
+#define rTxAGC_B_CCK1_55_Mcs32 0x838
+#define rTxAGC_B_Mcs03_Mcs00 0x83c
+
+#define rTxAGC_B_Mcs07_Mcs04 0x848
+#define rTxAGC_B_Mcs11_Mcs08 0x84c
+
+#define rFPGA0_XA_LSSIParameter 0x840
+#define rFPGA0_XB_LSSIParameter 0x844
+
+#define rFPGA0_RFWakeUpParameter 0x850 /* Useless now */
+#define rFPGA0_RFSleepUpParameter 0x854
+
+#define rFPGA0_XAB_SwitchControl 0x858 /* RF Channel switch */
+#define rFPGA0_XCD_SwitchControl 0x85c
+
+#define rFPGA0_XA_RFInterfaceOE 0x860 /* RF Channel switch */
+#define rFPGA0_XB_RFInterfaceOE 0x864
+
+#define rTxAGC_B_Mcs15_Mcs12 0x868
+#define rTxAGC_B_CCK11_A_CCK2_11 0x86c
+
+#define rFPGA0_XAB_RFInterfaceSW 0x870 /* RF Interface Software Control */
+#define rFPGA0_XCD_RFInterfaceSW 0x874
+
+#define rFPGA0_XAB_RFParameter 0x878 /* RF Parameter */
+#define rFPGA0_XCD_RFParameter 0x87c
+
+#define rFPGA0_AnalogParameter1 0x880 /* Crystal cap setting RF-R/W protection for parameter4?? */
+#define rFPGA0_AnalogParameter2 0x884
+#define rFPGA0_AnalogParameter3 0x888 /* Useless now */
+#define rFPGA0_AnalogParameter4 0x88c
+
+#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Tranceiver LSSI Readback */
+#define rFPGA0_XB_LSSIReadBack 0x8a4
+#define rFPGA0_XC_LSSIReadBack 0x8a8
+#define rFPGA0_XD_LSSIReadBack 0x8ac
+
+#define rFPGA0_PSDReport 0x8b4 /* Useless now */
+#define TransceiverA_HSPI_Readback 0x8b8 /* Transceiver A HSPI Readback */
+#define TransceiverB_HSPI_Readback 0x8bc /* Transceiver B HSPI Readback */
+#define rFPGA0_XAB_RFInterfaceRB 0x8e0 /* Useless now RF Interface Readback Value */
+#define rFPGA0_XCD_RFInterfaceRB 0x8e4 /* Useless now */
+
+/* 4. Page9(0x900) */
+#define rFPGA1_RFMOD 0x900 /* RF mode & OFDM TxSC RF BW Setting?? */
+
+#define rFPGA1_TxBlock 0x904 /* Useless now */
+#define rFPGA1_DebugSelect 0x908 /* Useless now */
+#define rFPGA1_TxInfo 0x90c /* Useless now Status report?? */
+
+/* 5. PageA(0xA00) */
+/* Set Control channel to upper or lower. These settings are required only for 40MHz */
+#define rCCK0_System 0xa00
+
+#define rCCK0_AFESetting 0xa04 /* Disable init gain now Select RX path by RSSI */
+#define rCCK0_CCA 0xa08 /* Disable init gain now Init gain */
+
+#define rCCK0_RxAGC1 0xa0c /* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold, RX LNA Threshold useless now. Not the same as 90 series */
+#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
+
+#define rCCK0_RxHP 0xa14
+
+#define rCCK0_DSPParameter1 0xa18 /* Timing recovery & Channel estimation threshold */
+#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
+
+#define rCCK0_TxFilter1 0xa20
+#define rCCK0_TxFilter2 0xa24
+#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
+#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d useless now 0xa30-a4f channel report */
+#define rCCK0_TRSSIReport 0xa50
+#define rCCK0_RxReport 0xa54 /* 0xa57 */
+#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
+#define rCCK0_FACounterUpper 0xa58 /* 0xa5c */
+/* PageB(0xB00) */
+#define rPdp_AntA 0xb00
+#define rPdp_AntA_4 0xb04
+#define rConfig_Pmpd_AntA 0xb28
+#define rConfig_AntA 0xb68
+#define rConfig_AntB 0xb6c
+#define rPdp_AntB 0xb70
+#define rPdp_AntB_4 0xb74
+#define rConfig_Pmpd_AntB 0xb98
+#define rAPK 0xbd8
+
+/* 6. PageC(0xC00) */
+#define rOFDM0_LSTF 0xc00
+
+#define rOFDM0_TRxPathEnable 0xc04
+#define rOFDM0_TRMuxPar 0xc08
+#define rOFDM0_TRSWIsolation 0xc0c
+
+#define rOFDM0_XARxAFE 0xc10 /* RxIQ DC offset, Rx digital filter, DC notch filter */
+#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */
+#define rOFDM0_XBRxAFE 0xc18
+#define rOFDM0_XBRxIQImbalance 0xc1c
+#define rOFDM0_XCRxAFE 0xc20
+#define rOFDM0_XCRxIQImbalance 0xc24
+#define rOFDM0_XDRxAFE 0xc28
+#define rOFDM0_XDRxIQImbalance 0xc2c
+
+#define rOFDM0_RxDetector1 0xc30 /* PD,BW & SBD DM tune init gain */
+#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync. */
+#define rOFDM0_RxDetector3 0xc38 /* Frame Sync. */
+#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */
+
+#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
+#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
+#define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */
+#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
+
+#define rOFDM0_XAAGCCore1 0xc50 /* DIG */
+#define rOFDM0_XAAGCCore2 0xc54
+#define rOFDM0_XBAGCCore1 0xc58
+#define rOFDM0_XBAGCCore2 0xc5c
+#define rOFDM0_XCAGCCore1 0xc60
+#define rOFDM0_XCAGCCore2 0xc64
+#define rOFDM0_XDAGCCore1 0xc68
+#define rOFDM0_XDAGCCore2 0xc6c
+
+#define rOFDM0_AGCParameter1 0xc70
+#define rOFDM0_AGCParameter2 0xc74
+#define rOFDM0_AGCRSSITable 0xc78
+#define rOFDM0_HTSTFAGC 0xc7c
+
+#define rOFDM0_XATxIQImbalance 0xc80 /* TX PWR TRACK and DIG */
+#define rOFDM0_XATxAFE 0xc84
+#define rOFDM0_XBTxIQImbalance 0xc88
+#define rOFDM0_XBTxAFE 0xc8c
+#define rOFDM0_XCTxIQImbalance 0xc90
+#define rOFDM0_XCTxAFE 0xc94
+#define rOFDM0_XDTxIQImbalance 0xc98
+#define rOFDM0_XDTxAFE 0xc9c
+
+#define rOFDM0_RxIQExtAnta 0xca0
+#define rOFDM0_TxCoeff1 0xca4
+#define rOFDM0_TxCoeff2 0xca8
+#define rOFDM0_TxCoeff3 0xcac
+#define rOFDM0_TxCoeff4 0xcb0
+#define rOFDM0_TxCoeff5 0xcb4
+#define rOFDM0_TxCoeff6 0xcb8
+#define rOFDM0_RxHPParameter 0xce0
+#define rOFDM0_TxPseudoNoiseWgt 0xce4
+#define rOFDM0_FrameSync 0xcf0
+#define rOFDM0_DFSReport 0xcf4
+
+/* 7. PageD(0xD00) */
+#define rOFDM1_LSTF 0xd00
+#define rOFDM1_TRxPathEnable 0xd04
+
+#define rOFDM1_CFO 0xd08 /* No setting now */
+#define rOFDM1_CSI1 0xd10
+#define rOFDM1_SBD 0xd14
+#define rOFDM1_CSI2 0xd18
+#define rOFDM1_CFOTracking 0xd2c
+#define rOFDM1_TRxMesaure1 0xd34
+#define rOFDM1_IntfDet 0xd3c
+#define rOFDM1_PseudoNoiseStateAB 0xd50
+#define rOFDM1_PseudoNoiseStateCD 0xd54
+#define rOFDM1_RxPseudoNoiseWgt 0xd58
+
+#define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */
+#define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */
+#define rOFDM_PHYCounter3 0xda8 /* MCS not support */
+
+#define rOFDM_ShortCFOAB 0xdac /* No setting now */
+#define rOFDM_ShortCFOCD 0xdb0
+#define rOFDM_LongCFOAB 0xdb4
+#define rOFDM_LongCFOCD 0xdb8
+#define rOFDM_TailCFOAB 0xdbc
+#define rOFDM_TailCFOCD 0xdc0
+#define rOFDM_PWMeasure1 0xdc4
+#define rOFDM_PWMeasure2 0xdc8
+#define rOFDM_BWReport 0xdcc
+#define rOFDM_AGCReport 0xdd0
+#define rOFDM_RxSNR 0xdd4
+#define rOFDM_RxEVMCSI 0xdd8
+#define rOFDM_SIGReport 0xddc
+
+
+/* 8. PageE(0xE00) */
+#define rTxAGC_A_Rate18_06 0xe00
+#define rTxAGC_A_Rate54_24 0xe04
+#define rTxAGC_A_CCK1_Mcs32 0xe08
+#define rTxAGC_A_Mcs03_Mcs00 0xe10
+#define rTxAGC_A_Mcs07_Mcs04 0xe14
+#define rTxAGC_A_Mcs11_Mcs08 0xe18
+#define rTxAGC_A_Mcs15_Mcs12 0xe1c
+
+#define rFPGA0_IQK 0xe28
+#define rTx_IQK_Tone_A 0xe30
+#define rRx_IQK_Tone_A 0xe34
+#define rTx_IQK_PI_A 0xe38
+#define rRx_IQK_PI_A 0xe3c
+
+#define rTx_IQK 0xe40
+#define rRx_IQK 0xe44
+#define rIQK_AGC_Pts 0xe48
+#define rIQK_AGC_Rsp 0xe4c
+#define rTx_IQK_Tone_B 0xe50
+#define rRx_IQK_Tone_B 0xe54
+#define rTx_IQK_PI_B 0xe58
+#define rRx_IQK_PI_B 0xe5c
+#define rIQK_AGC_Cont 0xe60
+
+#define rBlue_Tooth 0xe6c
+#define rRx_Wait_CCA 0xe70
+#define rTx_CCK_RFON 0xe74
+#define rTx_CCK_BBON 0xe78
+#define rTx_OFDM_RFON 0xe7c
+#define rTx_OFDM_BBON 0xe80
+#define rTx_To_Rx 0xe84
+#define rTx_To_Tx 0xe88
+#define rRx_CCK 0xe8c
+
+#define rTx_Power_Before_IQK_A 0xe94
+#define rTx_Power_After_IQK_A 0xe9c
+
+#define rRx_Power_Before_IQK_A 0xea0
+#define rRx_Power_Before_IQK_A_2 0xea4
+#define rRx_Power_After_IQK_A 0xea8
+#define rRx_Power_After_IQK_A_2 0xeac
+
+#define rTx_Power_Before_IQK_B 0xeb4
+#define rTx_Power_After_IQK_B 0xebc
+
+#define rRx_Power_Before_IQK_B 0xec0
+#define rRx_Power_Before_IQK_B_2 0xec4
+#define rRx_Power_After_IQK_B 0xec8
+#define rRx_Power_After_IQK_B_2 0xecc
+
+#define rRx_OFDM 0xed0
+#define rRx_Wait_RIFS 0xed4
+#define rRx_TO_Rx 0xed8
+#define rStandby 0xedc
+#define rSleep 0xee0
+#define rPMPD_ANAEN 0xeec
+
+/* 7. RF Register 0x00-0x2E (RF 8256) */
+/* RF-0222D 0x00-3F */
+/* Zebra1 */
+#define rZebra1_HSSIEnable 0x0 /* Useless now */
+#define rZebra1_TRxEnable1 0x1
+#define rZebra1_TRxEnable2 0x2
+#define rZebra1_AGC 0x4
+#define rZebra1_ChargePump 0x5
+#define rZebra1_Channel 0x7 /* RF channel switch */
+
+#define rZebra1_TxGain 0x8 /* Useless now */
+#define rZebra1_TxLPF 0x9
+#define rZebra1_RxLPF 0xb
+#define rZebra1_RxHPFCorner 0xc
+
+/* Zebra4 */
+#define rGlobalCtrl 0 /* Useless now */
+#define rRTL8256_TxLPF 19
+#define rRTL8256_RxLPF 11
+
+/* RTL8258 */
+#define rRTL8258_TxLPF 0x11 /* Useless now */
+#define rRTL8258_RxLPF 0x13
+#define rRTL8258_RSSILPF 0xa
+
+/* RL6052 Register definition */
+#define RF_AC 0x00
+#define RF_IQADJ_G1 0x01
+#define RF_IQADJ_G2 0x02
+#define RF_BS_PA_APSET_G1_G4 0x03
+#define RF_BS_PA_APSET_G5_G8 0x04
+#define RF_POW_TRSW 0x05
+#define RF_GAIN_RX 0x06
+#define RF_GAIN_TX 0x07
+#define RF_TXM_IDAC 0x08
+#define RF_IPA_G 0x09
+#define RF_TXBIAS_G 0x0A
+#define RF_TXPA_AG 0x0B
+#define RF_IPA_A 0x0C
+#define RF_TXBIAS_A 0x0D
+#define RF_BS_PA_APSET_G9_G11 0x0E
+#define RF_BS_IQGEN 0x0F
+#define RF_MODE1 0x10
+#define RF_MODE2 0x11
+#define RF_RX_AGC_HP 0x12
+#define RF_TX_AGC 0x13
+#define RF_BIAS 0x14
+#define RF_IPA 0x15
+#define RF_TXBIAS 0x16
+#define RF_POW_ABILITY 0x17
+#define RF_MODE_AG 0x18
+#define rRfChannel 0x18 /* RF channel and BW switch */
+#define RF_CHNLBW 0x18 /* RF channel and BW switch */
+#define RF_TOP 0x19
+#define RF_RX_G1 0x1A
+#define RF_RX_G2 0x1B
+#define RF_RX_BB2 0x1C
+#define RF_RX_BB1 0x1D
+#define RF_RCK1 0x1E
+#define RF_RCK2 0x1F
+#define RF_TX_G1 0x20
+#define RF_TX_G2 0x21
+#define RF_TX_G3 0x22
+#define RF_TX_BB1 0x23
+#define RF_T_METER 0x24
+#define RF_SYN_G1 0x25 /* RF TX Power control */
+#define RF_SYN_G2 0x26 /* RF TX Power control */
+#define RF_SYN_G3 0x27 /* RF TX Power control */
+#define RF_SYN_G4 0x28 /* RF TX Power control */
+#define RF_SYN_G5 0x29 /* RF TX Power control */
+#define RF_SYN_G6 0x2A /* RF TX Power control */
+#define RF_SYN_G7 0x2B /* RF TX Power control */
+#define RF_SYN_G8 0x2C /* RF TX Power control */
+
+#define RF_RCK_OS 0x30 /* RF TX PA control */
+
+#define RF_TXPA_G1 0x31 /* RF TX PA control */
+#define RF_TXPA_G2 0x32 /* RF TX PA control */
+#define RF_TXPA_G3 0x33 /* RF TX PA control */
+
+/* Bit Mask */
+/* 1. Page1(0x100) */
+#define bBBResetB 0x100 /* Useless now? */
+#define bGlobalResetB 0x200
+#define bOFDMTxStart 0x4
+#define bCCKTxStart 0x8
+#define bCRC32Debug 0x100
+#define bPMACLoopback 0x10
+#define bTxLSIG 0xffffff
+#define bOFDMTxRate 0xf
+#define bOFDMTxReserved 0x10
+#define bOFDMTxLength 0x1ffe0
+#define bOFDMTxParity 0x20000
+#define bTxHTSIG1 0xffffff
+#define bTxHTMCSRate 0x7f
+#define bTxHTBW 0x80
+#define bTxHTLength 0xffff00
+#define bTxHTSIG2 0xffffff
+#define bTxHTSmoothing 0x1
+#define bTxHTSounding 0x2
+#define bTxHTReserved 0x4
+#define bTxHTAggreation 0x8
+#define bTxHTSTBC 0x30
+#define bTxHTAdvanceCoding 0x40
+#define bTxHTShortGI 0x80
+#define bTxHTNumberHT_LTF 0x300
+#define bTxHTCRC8 0x3fc00
+#define bCounterReset 0x10000
+#define bNumOfOFDMTx 0xffff
+#define bNumOfCCKTx 0xffff0000
+#define bTxIdleInterval 0xffff
+#define bOFDMService 0xffff0000
+#define bTxMACHeader 0xffffffff
+#define bTxDataInit 0xff
+#define bTxHTMode 0x100
+#define bTxDataType 0x30000
+#define bTxRandomSeed 0xffffffff
+#define bCCKTxPreamble 0x1
+#define bCCKTxSFD 0xffff0000
+#define bCCKTxSIG 0xff
+#define bCCKTxService 0xff00
+#define bCCKLengthExt 0x8000
+#define bCCKTxLength 0xffff0000
+#define bCCKTxCRC16 0xffff
+#define bCCKTxStatus 0x1
+#define bOFDMTxStatus 0x2
+
+#define IS_BB_REG_OFFSET_92S(_Offset) \
+ ((_Offset >= 0x800) && (_Offset <= 0xfff))
+
+/* 2. Page8(0x800) */
+#define bRFMOD 0x1 /* Reg 0x800 rFPGA0_RFMOD */
+#define bJapanMode 0x2
+#define bCCKTxSC 0x30
+#define bCCKEn 0x1000000
+#define bOFDMEn 0x2000000
+
+#define bOFDMRxADCPhase 0x10000 /* Useless now */
+#define bOFDMTxDACPhase 0x40000
+#define bXATxAGC 0x3f
+
+#define bAntennaSelect 0x0300
+
+#define bXBTxAGC 0xf00 /* Reg 80c rFPGA0_TxGainStage */
+#define bXCTxAGC 0xf000
+#define bXDTxAGC 0xf0000
+
+#define bPAStart 0xf0000000 /* Useless now */
+#define bTRStart 0x00f00000
+#define bRFStart 0x0000f000
+#define bBBStart 0x000000f0
+#define bBBCCKStart 0x0000000f
+#define bPAEnd 0xf /* Reg0x814 */
+#define bTREnd 0x0f000000
+#define bRFEnd 0x000f0000
+#define bCCAMask 0x000000f0 /* T2R */
+#define bR2RCCAMask 0x00000f00
+#define bHSSI_R2TDelay 0xf8000000
+#define bHSSI_T2RDelay 0xf80000
+#define bContTxHSSI 0x400 /* chane gain at continue Tx */
+#define bIGFromCCK 0x200
+#define bAGCAddress 0x3f
+#define bRxHPTx 0x7000
+#define bRxHPT2R 0x38000
+#define bRxHPCCKIni 0xc0000
+#define bAGCTxCode 0xc00000
+#define bAGCRxCode 0x300000
+
+#define b3WireDataLength 0x800 /* Reg 0x820~84f rFPGA0_XA_HSSIParameter1 */
+#define b3WireAddressLength 0x400
+
+#define b3WireRFPowerDown 0x1 /* Useless now */
+/* define bHWSISelect 0x8 */
+#define b5GPAPEPolarity 0x40000000
+#define b2GPAPEPolarity 0x80000000
+#define bRFSW_TxDefaultAnt 0x3
+#define bRFSW_TxOptionAnt 0x30
+#define bRFSW_RxDefaultAnt 0x300
+#define bRFSW_RxOptionAnt 0x3000
+#define bRFSI_3WireData 0x1
+#define bRFSI_3WireClock 0x2
+#define bRFSI_3WireLoad 0x4
+#define bRFSI_3WireRW 0x8
+#define bRFSI_3Wire 0xf
+
+#define bRFSI_RFENV 0x10 /* Reg 0x870 rFPGA0_XAB_RFInterfaceSW */
+
+#define bRFSI_TRSW 0x20 /* Useless now */
+#define bRFSI_TRSWB 0x40
+#define bRFSI_ANTSW 0x100
+#define bRFSI_ANTSWB 0x200
+#define bRFSI_PAPE 0x400
+#define bRFSI_PAPE5G 0x800
+#define bBandSelect 0x1
+#define bHTSIG2_GI 0x80
+#define bHTSIG2_Smoothing 0x01
+#define bHTSIG2_Sounding 0x02
+#define bHTSIG2_Aggreaton 0x08
+#define bHTSIG2_STBC 0x30
+#define bHTSIG2_AdvCoding 0x40
+#define bHTSIG2_NumOfHTLTF 0x300
+#define bHTSIG2_CRC8 0x3fc
+#define bHTSIG1_MCS 0x7f
+#define bHTSIG1_BandWidth 0x80
+#define bHTSIG1_HTLength 0xffff
+#define bLSIG_Rate 0xf
+#define bLSIG_Reserved 0x10
+#define bLSIG_Length 0x1fffe
+#define bLSIG_Parity 0x20
+#define bCCKRxPhase 0x4
+
+#define bLSSIReadAddress 0x7f800000 /* T65 RF */
+
+#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
+
+#define bLSSIReadBackData 0xfffff /* T65 RF */
+
+#define bLSSIReadOKFlag 0x1000 /* Useless now */
+#define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */
+#define bRegulator0Standby 0x1
+#define bRegulatorPLLStandby 0x2
+#define bRegulator1Standby 0x4
+#define bPLLPowerUp 0x8
+#define bDPLLPowerUp 0x10
+#define bDA10PowerUp 0x20
+#define bAD7PowerUp 0x200
+#define bDA6PowerUp 0x2000
+#define bXtalPowerUp 0x4000
+#define b40MDClkPowerUP 0x8000
+#define bDA6DebugMode 0x20000
+#define bDA6Swing 0x380000
+
+#define bADClkPhase 0x4000000 /* Reg 0x880 rFPGA0_AnalogParameter1 20/40 CCK support switch 40/80 BB MHZ */
+
+#define b80MClkDelay 0x18000000 /* Useless */
+#define bAFEWatchDogEnable 0x20000000
+
+#define bXtalCap01 0xc0000000 /* Reg 0x884 rFPGA0_AnalogParameter2 Crystal cap */
+#define bXtalCap23 0x3
+#define bXtalCap92x 0x0f000000
+#define bXtalCap 0x0f000000
+
+#define bIntDifClkEnable 0x400 /* Useless */
+#define bExtSigClkEnable 0x800
+#define bBandgapMbiasPowerUp 0x10000
+#define bAD11SHGain 0xc0000
+#define bAD11InputRange 0x700000
+#define bAD11OPCurrent 0x3800000
+#define bIPathLoopback 0x4000000
+#define bQPathLoopback 0x8000000
+#define bAFELoopback 0x10000000
+#define bDA10Swing 0x7e0
+#define bDA10Reverse 0x800
+#define bDAClkSource 0x1000
+#define bAD7InputRange 0x6000
+#define bAD7Gain 0x38000
+#define bAD7OutputCMMode 0x40000
+#define bAD7InputCMMode 0x380000
+#define bAD7Current 0xc00000
+#define bRegulatorAdjust 0x7000000
+#define bAD11PowerUpAtTx 0x1
+#define bDA10PSAtTx 0x10
+#define bAD11PowerUpAtRx 0x100
+#define bDA10PSAtRx 0x1000
+#define bCCKRxAGCFormat 0x200
+#define bPSDFFTSamplepPoint 0xc000
+#define bPSDAverageNum 0x3000
+#define bIQPathControl 0xc00
+#define bPSDFreq 0x3ff
+#define bPSDAntennaPath 0x30
+#define bPSDIQSwitch 0x40
+#define bPSDRxTrigger 0x400000
+#define bPSDTxTrigger 0x80000000
+#define bPSDSineToneScale 0x7f000000
+#define bPSDReport 0xffff
+
+/* 3. Page9(0x900) */
+#define bOFDMTxSC 0x30000000 /* Useless */
+#define bCCKTxOn 0x1
+#define bOFDMTxOn 0x2
+#define bDebugPage 0xfff /* reset debug page and also HWord, LWord */
+#define bDebugItem 0xff /* reset debug page and LWord */
+#define bAntL 0x10
+#define bAntNonHT 0x100
+#define bAntHT1 0x1000
+#define bAntHT2 0x10000
+#define bAntHT1S1 0x100000
+#define bAntNonHTS1 0x1000000
+
+/* 4. PageA(0xA00) */
+#define bCCKBBMode 0x3 /* Useless */
+#define bCCKTxPowerSaving 0x80
+#define bCCKRxPowerSaving 0x40
+
+#define bCCKSideBand 0x10 /* Reg 0xa00 rCCK0_System 20/40 switch */
+
+#define bCCKScramble 0x8 /* Useless */
+#define bCCKAntDiversity 0x8000
+#define bCCKCarrierRecovery 0x4000
+#define bCCKTxRate 0x3000
+#define bCCKDCCancel 0x0800
+#define bCCKISICancel 0x0400
+#define bCCKMatchFilter 0x0200
+#define bCCKEqualizer 0x0100
+#define bCCKPreambleDetect 0x800000
+#define bCCKFastFalseCCA 0x400000
+#define bCCKChEstStart 0x300000
+#define bCCKCCACount 0x080000
+#define bCCKcs_lim 0x070000
+#define bCCKBistMode 0x80000000
+#define bCCKCCAMask 0x40000000
+#define bCCKTxDACPhase 0x4
+#define bCCKRxADCPhase 0x20000000 /* r_rx_clk */
+#define bCCKr_cp_mode0 0x0100
+#define bCCKTxDCOffset 0xf0
+#define bCCKRxDCOffset 0xf
+#define bCCKCCAMode 0xc000
+#define bCCKFalseCS_lim 0x3f00
+#define bCCKCS_ratio 0xc00000
+#define bCCKCorgBit_sel 0x300000
+#define bCCKPD_lim 0x0f0000
+#define bCCKNewCCA 0x80000000
+#define bCCKRxHPofIG 0x8000
+#define bCCKRxIG 0x7f00
+#define bCCKLNAPolarity 0x800000
+#define bCCKRx1stGain 0x7f0000
+#define bCCKRFExtend 0x20000000 /* CCK Rx Iinital gain polarity */
+#define bCCKRxAGCSatLevel 0x1f000000
+#define bCCKRxAGCSatCount 0xe0
+#define bCCKRxRFSettle 0x1f /* AGCsamp_dly */
+#define bCCKFixedRxAGC 0x8000
+/* define bCCKRxAGCFormat 0x4000 remove to HSSI register 0x824 */
+#define bCCKAntennaPolarity 0x2000
+#define bCCKTxFilterType 0x0c00
+#define bCCKRxAGCReportType 0x0300
+#define bCCKRxDAGCEn 0x80000000
+#define bCCKRxDAGCPeriod 0x20000000
+#define bCCKRxDAGCSatLevel 0x1f000000
+#define bCCKTimingRecovery 0x800000
+#define bCCKTxC0 0x3f0000
+#define bCCKTxC1 0x3f000000
+#define bCCKTxC2 0x3f
+#define bCCKTxC3 0x3f00
+#define bCCKTxC4 0x3f0000
+#define bCCKTxC5 0x3f000000
+#define bCCKTxC6 0x3f
+#define bCCKTxC7 0x3f00
+#define bCCKDebugPort 0xff0000
+#define bCCKDACDebug 0x0f000000
+#define bCCKFalseAlarmEnable 0x8000
+#define bCCKFalseAlarmRead 0x4000
+#define bCCKTRSSI 0x7f
+#define bCCKRxAGCReport 0xfe
+#define bCCKRxReport_AntSel 0x80000000
+#define bCCKRxReport_MFOff 0x40000000
+#define bCCKRxRxReport_SQLoss 0x20000000
+#define bCCKRxReport_Pktloss 0x10000000
+#define bCCKRxReport_Lockedbit 0x08000000
+#define bCCKRxReport_RateError 0x04000000
+#define bCCKRxReport_RxRate 0x03000000
+#define bCCKRxFACounterLower 0xff
+#define bCCKRxFACounterUpper 0xff000000
+#define bCCKRxHPAGCStart 0xe000
+#define bCCKRxHPAGCFinal 0x1c00
+#define bCCKRxFalseAlarmEnable 0x8000
+#define bCCKFACounterFreeze 0x4000
+#define bCCKTxPathSel 0x10000000
+#define bCCKDefaultRxPath 0xc000000
+#define bCCKOptionRxPath 0x3000000
+
+/* 5. PageC(0xC00) */
+#define bNumOfSTF 0x3 /* Useless */
+#define bShift_L 0xc0
+#define bGI_TH 0xc
+#define bRxPathA 0x1
+#define bRxPathB 0x2
+#define bRxPathC 0x4
+#define bRxPathD 0x8
+#define bTxPathA 0x1
+#define bTxPathB 0x2
+#define bTxPathC 0x4
+#define bTxPathD 0x8
+#define bTRSSIFreq 0x200
+#define bADCBackoff 0x3000
+#define bDFIRBackoff 0xc000
+#define bTRSSILatchPhase 0x10000
+#define bRxIDCOffset 0xff
+#define bRxQDCOffset 0xff00
+#define bRxDFIRMode 0x1800000
+#define bRxDCNFType 0xe000000
+#define bRXIQImb_A 0x3ff
+#define bRXIQImb_B 0xfc00
+#define bRXIQImb_C 0x3f0000
+#define bRXIQImb_D 0xffc00000
+#define bDC_dc_Notch 0x60000
+#define bRxNBINotch 0x1f000000
+#define bPD_TH 0xf
+#define bPD_TH_Opt2 0xc000
+#define bPWED_TH 0x700
+#define bIfMF_Win_L 0x800
+#define bPD_Option 0x1000
+#define bMF_Win_L 0xe000
+#define bBW_Search_L 0x30000
+#define bwin_enh_L 0xc0000
+#define bBW_TH 0x700000
+#define bED_TH2 0x3800000
+#define bBW_option 0x4000000
+#define bRatio_TH 0x18000000
+#define bWindow_L 0xe0000000
+#define bSBD_Option 0x1
+#define bFrame_TH 0x1c
+#define bFS_Option 0x60
+#define bDC_Slope_check 0x80
+#define bFGuard_Counter_DC_L 0xe00
+#define bFrame_Weight_Short 0x7000
+#define bSub_Tune 0xe00000
+#define bFrame_DC_Length 0xe000000
+#define bSBD_start_offset 0x30000000
+#define bFrame_TH_2 0x7
+#define bFrame_GI2_TH 0x38
+#define bGI2_Sync_en 0x40
+#define bSarch_Short_Early 0x300
+#define bSarch_Short_Late 0xc00
+#define bSarch_GI2_Late 0x70000
+#define bCFOAntSum 0x1
+#define bCFOAcc 0x2
+#define bCFOStartOffset 0xc
+#define bCFOLookBack 0x70
+#define bCFOSumWeight 0x80
+#define bDAGCEnable 0x10000
+#define bTXIQImb_A 0x3ff
+#define bTXIQImb_B 0xfc00
+#define bTXIQImb_C 0x3f0000
+#define bTXIQImb_D 0xffc00000
+#define bTxIDCOffset 0xff
+#define bTxQDCOffset 0xff00
+#define bTxDFIRMode 0x10000
+#define bTxPesudoNoiseOn 0x4000000
+#define bTxPesudoNoise_A 0xff
+#define bTxPesudoNoise_B 0xff00
+#define bTxPesudoNoise_C 0xff0000
+#define bTxPesudoNoise_D 0xff000000
+#define bCCADropOption 0x20000
+#define bCCADropThres 0xfff00000
+#define bEDCCA_H 0xf
+#define bEDCCA_L 0xf0
+#define bLambda_ED 0x300
+#define bRxInitialGain 0x7f
+#define bRxAntDivEn 0x80
+#define bRxAGCAddressForLNA 0x7f00
+#define bRxHighPowerFlow 0x8000
+#define bRxAGCFreezeThres 0xc0000
+#define bRxFreezeStep_AGC1 0x300000
+#define bRxFreezeStep_AGC2 0xc00000
+#define bRxFreezeStep_AGC3 0x3000000
+#define bRxFreezeStep_AGC0 0xc000000
+#define bRxRssi_Cmp_En 0x10000000
+#define bRxQuickAGCEn 0x20000000
+#define bRxAGCFreezeThresMode 0x40000000
+#define bRxOverFlowCheckType 0x80000000
+#define bRxAGCShift 0x7f
+#define bTRSW_Tri_Only 0x80
+#define bPowerThres 0x300
+#define bRxAGCEn 0x1
+#define bRxAGCTogetherEn 0x2
+#define bRxAGCMin 0x4
+#define bRxHP_Ini 0x7
+#define bRxHP_TRLNA 0x70
+#define bRxHP_RSSI 0x700
+#define bRxHP_BBP1 0x7000
+#define bRxHP_BBP2 0x70000
+#define bRxHP_BBP3 0x700000
+#define bRSSI_H 0x7f0000 /* the threshold for high power */
+#define bRSSI_Gen 0x7f000000 /* the threshold for ant diversity */
+#define bRxSettle_TRSW 0x7
+#define bRxSettle_LNA 0x38
+#define bRxSettle_RSSI 0x1c0
+#define bRxSettle_BBP 0xe00
+#define bRxSettle_RxHP 0x7000
+#define bRxSettle_AntSW_RSSI 0x38000
+#define bRxSettle_AntSW 0xc0000
+#define bRxProcessTime_DAGC 0x300000
+#define bRxSettle_HSSI 0x400000
+#define bRxProcessTime_BBPPW 0x800000
+#define bRxAntennaPowerShift 0x3000000
+#define bRSSITableSelect 0xc000000
+#define bRxHP_Final 0x7000000
+#define bRxHTSettle_BBP 0x7
+#define bRxHTSettle_HSSI 0x8
+#define bRxHTSettle_RxHP 0x70
+#define bRxHTSettle_BBPPW 0x80
+#define bRxHTSettle_Idle 0x300
+#define bRxHTSettle_Reserved 0x1c00
+#define bRxHTRxHPEn 0x8000
+#define bRxHTAGCFreezeThres 0x30000
+#define bRxHTAGCTogetherEn 0x40000
+#define bRxHTAGCMin 0x80000
+#define bRxHTAGCEn 0x100000
+#define bRxHTDAGCEn 0x200000
+#define bRxHTRxHP_BBP 0x1c00000
+#define bRxHTRxHP_Final 0xe0000000
+#define bRxPWRatioTH 0x3
+#define bRxPWRatioEn 0x4
+#define bRxMFHold 0x3800
+#define bRxPD_Delay_TH1 0x38
+#define bRxPD_Delay_TH2 0x1c0
+#define bRxPD_DC_COUNT_MAX 0x600
+/* define bRxMF_Hold 0x3800 */
+#define bRxPD_Delay_TH 0x8000
+#define bRxProcess_Delay 0xf0000
+#define bRxSearchrange_GI2_Early 0x700000
+#define bRxFrame_Guard_Counter_L 0x3800000
+#define bRxSGI_Guard_L 0xc000000
+#define bRxSGI_Search_L 0x30000000
+#define bRxSGI_TH 0xc0000000
+#define bDFSCnt0 0xff
+#define bDFSCnt1 0xff00
+#define bDFSFlag 0xf0000
+#define bMFWeightSum 0x300000
+#define bMinIdxTH 0x7f000000
+#define bDAFormat 0x40000
+#define bTxChEmuEnable 0x01000000
+#define bTRSWIsolation_A 0x7f
+#define bTRSWIsolation_B 0x7f00
+#define bTRSWIsolation_C 0x7f0000
+#define bTRSWIsolation_D 0x7f000000
+#define bExtLNAGain 0x7c00
+
+/* 6. PageE(0xE00) */
+#define bSTBCEn 0x4 /* Useless */
+#define bAntennaMapping 0x10
+#define bNss 0x20
+#define bCFOAntSumD 0x200
+#define bPHYCounterReset 0x8000000
+#define bCFOReportGet 0x4000000
+#define bOFDMContinueTx 0x10000000
+#define bOFDMSingleCarrier 0x20000000
+#define bOFDMSingleTone 0x40000000
+/* define bRxPath1 0x01 */
+/* define bRxPath2 0x02 */
+/* define bRxPath3 0x04 */
+/* define bRxPath4 0x08 */
+/* define bTxPath1 0x10 */
+/* define bTxPath2 0x20 */
+#define bHTDetect 0x100
+#define bCFOEn 0x10000
+#define bCFOValue 0xfff00000
+#define bSigTone_Re 0x3f
+#define bSigTone_Im 0x7f00
+#define bCounter_CCA 0xffff
+#define bCounter_ParityFail 0xffff0000
+#define bCounter_RateIllegal 0xffff
+#define bCounter_CRC8Fail 0xffff0000
+#define bCounter_MCSNoSupport 0xffff
+#define bCounter_FastSync 0xffff
+#define bShortCFO 0xfff
+#define bShortCFOTLength 12 /* total */
+#define bShortCFOFLength 11 /* fraction */
+#define bLongCFO 0x7ff
+#define bLongCFOTLength 11
+#define bLongCFOFLength 11
+#define bTailCFO 0x1fff
+#define bTailCFOTLength 13
+#define bTailCFOFLength 12
+#define bmax_en_pwdB 0xffff
+#define bCC_power_dB 0xffff0000
+#define bnoise_pwdB 0xffff
+#define bPowerMeasTLength 10
+#define bPowerMeasFLength 3
+#define bRx_HT_BW 0x1
+#define bRxSC 0x6
+#define bRx_HT 0x8
+#define bNB_intf_det_on 0x1
+#define bIntf_win_len_cfg 0x30
+#define bNB_Intf_TH_cfg 0x1c0
+#define bRFGain 0x3f
+#define bTableSel 0x40
+#define bTRSW 0x80
+#define bRxSNR_A 0xff
+#define bRxSNR_B 0xff00
+#define bRxSNR_C 0xff0000
+#define bRxSNR_D 0xff000000
+#define bSNREVMTLength 8
+#define bSNREVMFLength 1
+#define bCSI1st 0xff
+#define bCSI2nd 0xff00
+#define bRxEVM1st 0xff0000
+#define bRxEVM2nd 0xff000000
+#define bSIGEVM 0xff
+#define bPWDB 0xff00
+#define bSGIEN 0x10000
+
+#define bSFactorQAM1 0xf /* Useless */
+#define bSFactorQAM2 0xf0
+#define bSFactorQAM3 0xf00
+#define bSFactorQAM4 0xf000
+#define bSFactorQAM5 0xf0000
+#define bSFactorQAM6 0xf0000
+#define bSFactorQAM7 0xf00000
+#define bSFactorQAM8 0xf000000
+#define bSFactorQAM9 0xf0000000
+#define bCSIScheme 0x100000
+
+#define bNoiseLvlTopSet 0x3 /* Useless */
+#define bChSmooth 0x4
+#define bChSmoothCfg1 0x38
+#define bChSmoothCfg2 0x1c0
+#define bChSmoothCfg3 0xe00
+#define bChSmoothCfg4 0x7000
+#define bMRCMode 0x800000
+#define bTHEVMCfg 0x7000000
+
+#define bLoopFitType 0x1 /* Useless */
+#define bUpdCFO 0x40
+#define bUpdCFOOffData 0x80
+#define bAdvUpdCFO 0x100
+#define bAdvTimeCtrl 0x800
+#define bUpdClko 0x1000
+#define bFC 0x6000
+#define bTrackingMode 0x8000
+#define bPhCmpEnable 0x10000
+#define bUpdClkoLTF 0x20000
+#define bComChCFO 0x40000
+#define bCSIEstiMode 0x80000
+#define bAdvUpdEqz 0x100000
+#define bUChCfg 0x7000000
+#define bUpdEqz 0x8000000
+
+/* Rx Pseduo noise */
+#define bRxPesudoNoiseOn 0x20000000 /* Useless */
+#define bRxPesudoNoise_A 0xff
+#define bRxPesudoNoise_B 0xff00
+#define bRxPesudoNoise_C 0xff0000
+#define bRxPesudoNoise_D 0xff000000
+#define bPesudoNoiseState_A 0xffff
+#define bPesudoNoiseState_B 0xffff0000
+#define bPesudoNoiseState_C 0xffff
+#define bPesudoNoiseState_D 0xffff0000
+
+/* 7. RF Register */
+/* Zebra1 */
+#define bZebra1_HSSIEnable 0x8 /* Useless */
+#define bZebra1_TRxControl 0xc00
+#define bZebra1_TRxGainSetting 0x07f
+#define bZebra1_RxCorner 0xc00
+#define bZebra1_TxChargePump 0x38
+#define bZebra1_RxChargePump 0x7
+#define bZebra1_ChannelNum 0xf80
+#define bZebra1_TxLPFBW 0x400
+#define bZebra1_RxLPFBW 0x600
+
+/* Zebra4 */
+#define bRTL8256RegModeCtrl1 0x100 /* Useless */
+#define bRTL8256RegModeCtrl0 0x40
+#define bRTL8256_TxLPFBW 0x18
+#define bRTL8256_RxLPFBW 0x600
+
+/* RTL8258 */
+#define bRTL8258_TxLPFBW 0xc /* Useless */
+#define bRTL8258_RxLPFBW 0xc00
+#define bRTL8258_RSSILPFBW 0xc0
+
+
+/* Other Definition */
+
+/* byte endable for sb_write */
+#define bByte0 0x1 /* Useless */
+#define bByte1 0x2
+#define bByte2 0x4
+#define bByte3 0x8
+#define bWord0 0x3
+#define bWord1 0xc
+#define bDWord 0xf
+
+/* for PutRegsetting & GetRegSetting BitMask */
+#define bMaskByte0 0xff /* Reg 0xc50 rOFDM0_XAAGCCore~0xC6f */
+#define bMaskByte1 0xff00
+#define bMaskByte2 0xff0000
+#define bMaskByte3 0xff000000
+#define bMaskHWord 0xffff0000
+#define bMaskLWord 0x0000ffff
+#define bMaskDWord 0xffffffff
+#define bMask12Bits 0xfff
+#define bMaskH4Bits 0xf0000000
+#define bMaskOFDM_D 0xffc00000
+#define bMaskCCK 0x3f3f3f3f
+
+/* for PutRFRegsetting & GetRFRegSetting BitMask */
+#define bRFRegOffsetMask 0xfffff
+
+#define bDisable 0x0
+
+#define LeftAntenna 0x0 /* Useless */
+#define RightAntenna 0x1
+
+#define tCheckTxStatus 500 /* 500ms Useless */
+#define tUpdateRxCounter 100 /* 100ms */
+
+#define rateCCK 0 /* Useless */
+#define rateOFDM 1
+#define rateHT 2
+
+/* define Register-End */
+#define bPMAC_End 0x1ff /* Useless */
+#define bFPGAPHY0_End 0x8ff
+#define bFPGAPHY1_End 0x9ff
+#define bCCKPHY0_End 0xaff
+#define bOFDMPHY0_End 0xcff
+#define bOFDMPHY1_End 0xdff
+
+/* define max debug item in each debug page */
+/* define bMaxItem_FPGA_PHY0 0x9 */
+/* define bMaxItem_FPGA_PHY1 0x3 */
+/* define bMaxItem_PHY_11B 0x16 */
+/* define bMaxItem_OFDM_PHY0 0x29 */
+/* define bMaxItem_OFDM_PHY1 0x0 */
+
+#define bPMACControl 0x0 /* Useless */
+#define bWMACControl 0x1
+#define bWNICControl 0x2
+
+#define PathA 0x0 /* Useless */
+#define PathB 0x1
+#define PathC 0x2
+#define PathD 0x3
+
+/* PageB(0xB00) */
+#define rPdp_AntA 0xb00
+#define rPdp_AntA_4 0xb04
+#define rPdp_AntA_8 0xb08
+#define rPdp_AntA_C 0xb0c
+#define rPdp_AntA_18 0xb18
+#define rPdp_AntA_1C 0xb1c
+#define rPdp_AntA_20 0xb20
+#define rPdp_AntA_24 0xb24
+
+#define rConfig_Pmpd_AntA 0xb28
+#define rConfig_ram64x16 0xb2c
+
+#define rBndA 0xb30
+#define rHssiPar 0xb34
+
+#define rConfig_AntA 0xb68
+#define rConfig_AntB 0xb6c
+
+#define rPdp_AntB 0xb70
+#define rPdp_AntB_4 0xb74
+#define rPdp_AntB_8 0xb78
+#define rPdp_AntB_C 0xb7c
+#define rPdp_AntB_10 0xb80
+#define rPdp_AntB_14 0xb84
+#define rPdp_AntB_18 0xb88
+#define rPdp_AntB_1C 0xb8c
+#define rPdp_AntB_20 0xb90
+#define rPdp_AntB_24 0xb94
+
+#define rConfig_Pmpd_AntB 0xb98
+
+#define rBndB 0xba0
+
+#define rAPK 0xbd8
+#define rPm_Rx0_AntA 0xbdc
+#define rPm_Rx1_AntA 0xbe0
+#define rPm_Rx2_AntA 0xbe4
+#define rPm_Rx3_AntA 0xbe8
+#define rPm_Rx0_AntB 0xbec
+#define rPm_Rx1_AntB 0xbf0
+#define rPm_Rx2_AntB 0xbf4
+#define rPm_Rx3_AntB 0xbf8
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/Hal8723PwrSeq.h b/drivers/staging/rtl8723au/include/Hal8723PwrSeq.h
new file mode 100644
index 000000000000..7f3bdea6a55e
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/Hal8723PwrSeq.h
@@ -0,0 +1,150 @@
+#ifndef __HAL8723PWRSEQ_H__
+#define __HAL8723PWRSEQ_H__
+/*
+ Check document WM-20110607-Paul-RTL8723A_Power_Architecture-R02.vsd
+ There are 6 HW Power States:
+ 0: POFF--Power Off
+ 1: PDN--Power Down
+ 2: CARDEMU--Card Emulation
+ 3: ACT--Active Mode
+ 4: LPS--Low Power State
+ 5: SUS--Suspend
+
+ The transision from different states are defined below
+ TRANS_CARDEMU_TO_ACT
+ TRANS_ACT_TO_CARDEMU
+ TRANS_CARDEMU_TO_SUS
+ TRANS_SUS_TO_CARDEMU
+ TRANS_CARDEMU_TO_PDN
+ TRANS_ACT_TO_LPS
+ TRANS_LPS_TO_ACT
+
+ TRANS_END
+*/
+#include "HalPwrSeqCmd.h"
+#include "rtl8723a_spec.h"
+
+#define RTL8723A_TRANS_CARDEMU_TO_ACT_STEPS 15
+#define RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS 15
+#define RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS 15
+#define RTL8723A_TRANS_SUS_TO_CARDEMU_STEPS 15
+#define RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS 15
+#define RTL8723A_TRANS_PDN_TO_CARDEMU_STEPS 15
+#define RTL8723A_TRANS_ACT_TO_LPS_STEPS 15
+#define RTL8723A_TRANS_LPS_TO_ACT_STEPS 15
+#define RTL8723A_TRANS_END_STEPS 1
+
+
+/* format
+ * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here
+ */
+#define RTL8723A_TRANS_CARDEMU_TO_ACT \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0}, /*0x20[0] = 1b'1 enable LDOA12 MACRO block for all interface*/ \
+ {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*0x67[0] = 0 to disable BT_GPS_SEL pins*/ \
+ {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS},/*Delay 1ms*/ \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, 0}, /*0x00[5] = 1b'0 release analog Ips to digital , 1:isolation*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, 0},/* disable SW LPS 0x04[10]= 0*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1 power ready*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/* release WLON reset 0x04[16]= 1*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0},/* disable HWPDN 0x04[15]= 0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT4|BIT3), 0},/* disable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/* polling until return 0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT0, 0},/**/ \
+ {0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 1},/*0x4C[23] = 0x4E[7] = 1, switch DPDT_SEL_P output from WL BB */\
+
+#define RTL8723A_TRANS_ACT_TO_CARDEMU \
+ {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/ \
+ {0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0},/*0x4C[23] = 0x4E[7] = 0, switch DPDT_SEL_P output from register 0x65[2] */\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, BIT5}, /*0x00[5] = 1b'1 analog Ips to digital , 1:isolation*/ \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0}, /*0x20[0] = 1b'0 disable LDOA12 MACRO block*/ \
+
+
+#define RTL8723A_TRANS_CARDEMU_TO_SUS \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4|BIT3, (BIT4|BIT3)}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SDIO SOP option to disable BG/MB/ACK/SWR*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3|BIT4}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8723A_TRANS_SUS_TO_CARDEMU \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3 | BIT7, 0}, /*clear suspend enable and power down enable*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
+
+#define RTL8723A_TRANS_CARDEMU_TO_CARDDIS \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07 = 0x20 , SOP option to disable BG/MB*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, BIT2}, /*0x04[10] = 1, enable SW LPS*/ \
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8723A_TRANS_CARDDIS_TO_CARDEMU \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3 | BIT7, 0}, /*clear suspend enable and power down enable*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0}, /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/\
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*PCIe DMA start*/
+
+
+#define RTL8723A_TRANS_CARDEMU_TO_PDN \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK|PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SOP option to disable BG/MB/ACK/SWR*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/* 0x04[16] = 0*/\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, BIT7},/* 0x04[15] = 1*/
+
+#define RTL8723A_TRANS_PDN_TO_CARDEMU \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0},/* 0x04[15] = 0*/
+
+#define RTL8723A_TRANS_ACT_TO_LPS \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF},/*PCIe DMA stop*/ \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF},/*Tx Pause*/ \
+ {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled, and clock are gated*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0},/*Whole BB is reset*/ \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x03},/*Reset MAC TRX*/ \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0},/*check if removed later*/ \
+ {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00},/*When driver enter Sus/ Disable, enable LOP for BT*/ \
+ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, BIT5},/*Respond TxOK to scheduler*/
+
+#define RTL8723A_TRANS_LPS_TO_ACT \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/\
+ {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\
+ {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/\
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*. 0x08[4] = 0 switch TSF to 40M*/\
+ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT7, 0}, /*Polling 0x109[7]= 0 TSF in 40M*/\
+ {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT6|BIT7, 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/\
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1}, /*. 0x101[1] = 1*/\
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0}, /*. 0x02[1:0] = 2b'11 enable BB macro*/\
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
+
+#define RTL8723A_TRANS_END \
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, PWR_CMD_END, 0, 0},
+
+
+extern struct wlan_pwr_cfg rtl8723AU_power_on_flow[RTL8723A_TRANS_CARDEMU_TO_ACT_STEPS+RTL8723A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723AU_radio_off_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723AU_card_disable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723AU_card_enable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723AU_suspend_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS+RTL8723A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723AU_resume_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS+RTL8723A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723AU_hwpdn_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723AU_enter_lps_flow[RTL8723A_TRANS_ACT_TO_LPS_STEPS+RTL8723A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723AU_leave_lps_flow[RTL8723A_TRANS_LPS_TO_ACT_STEPS+RTL8723A_TRANS_END_STEPS];
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h b/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h
new file mode 100644
index 000000000000..bbeaab48057a
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h
@@ -0,0 +1,29 @@
+#ifndef __INC_HAL8723U_FW_IMG_H
+#define __INC_HAL8723U_FW_IMG_H
+
+/*Created on 2013/01/14, 15:51*/
+
+/* FW v16 enable usb interrupt */
+#define Rtl8723UImgArrayLength 22172
+extern u8 Rtl8723UFwImgArray[Rtl8723UImgArrayLength];
+#define Rtl8723UBTImgArrayLength 1
+extern u8 Rtl8723UFwBTImgArray[Rtl8723UBTImgArrayLength];
+
+#define Rtl8723UUMCBCutImgArrayWithBTLength 24118
+#define Rtl8723UUMCBCutImgArrayWithoutBTLength 19200
+
+extern u8 Rtl8723UFwUMCBCutImgArrayWithBT[Rtl8723UUMCBCutImgArrayWithBTLength];
+extern u8 Rtl8723UFwUMCBCutImgArrayWithoutBT[Rtl8723UUMCBCutImgArrayWithoutBTLength];
+
+#define Rtl8723SUMCBCutMPImgArrayLength 24174
+extern const u8 Rtl8723SFwUMCBCutMPImgArray[Rtl8723SUMCBCutMPImgArrayLength];
+
+#define Rtl8723EBTImgArrayLength 15276
+extern u8 Rtl8723EFwBTImgArray[Rtl8723EBTImgArrayLength] ;
+
+#define Rtl8723UPHY_REG_Array_PGLength 336
+extern u32 Rtl8723UPHY_REG_Array_PG[Rtl8723UPHY_REG_Array_PGLength];
+#define Rtl8723UMACPHY_Array_PGLength 1
+extern u32 Rtl8723UMACPHY_Array_PG[Rtl8723UMACPHY_Array_PGLength];
+
+#endif /* ifndef __INC_HAL8723U_FW_IMG_H */
diff --git a/drivers/staging/rtl8723au/include/HalDMOutSrc8723A.h b/drivers/staging/rtl8723au/include/HalDMOutSrc8723A.h
new file mode 100644
index 000000000000..d7651f7a665b
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/HalDMOutSrc8723A.h
@@ -0,0 +1,64 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8723A_ODM_H__
+#define __RTL8723A_ODM_H__
+/* */
+
+#define RSSI_CCK 0
+#define RSSI_OFDM 1
+#define RSSI_DEFAULT 2
+
+#define IQK_MAC_REG_NUM 4
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM 9
+#define HP_THERMAL_NUM 8
+
+
+/* */
+/* structure and define */
+/* */
+
+
+
+
+/*------------------------Export global variable----------------------------*/
+/*------------------------Export global variable----------------------------*/
+/*------------------------Export Marco Definition---------------------------*/
+/* define DM_MultiSTA_InitGainChangeNotify(Event) {DM_DigTable.CurMultiSTAConnectState = Event;} */
+
+
+/* */
+/* function prototype */
+/* */
+
+/* */
+/* IQ calibrate */
+/* */
+void rtl8723a_phy_iq_calibrate(struct rtw_adapter *pAdapter, bool bReCovery);
+
+/* */
+/* LC calibrate */
+/* */
+void rtl8723a_phy_lc_calibrate(struct rtw_adapter *pAdapter);
+
+/* */
+/* AP calibrate */
+/* */
+void rtl8723a_phy_ap_calibrate(struct rtw_adapter *pAdapter, char delta);
+
+void rtl8723a_odm_check_tx_power_tracking(struct rtw_adapter *Adapter);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/HalHWImg8723A_BB.h b/drivers/staging/rtl8723au/include/HalHWImg8723A_BB.h
new file mode 100644
index 000000000000..e99833cc7929
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/HalHWImg8723A_BB.h
@@ -0,0 +1,44 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+*
+******************************************************************************/
+
+#ifndef __INC_BB_8723A_HW_IMG_H
+#define __INC_BB_8723A_HW_IMG_H
+
+/******************************************************************************
+* AGC_TAB_1T.TXT
+******************************************************************************/
+
+void ODM_ReadAndConfig_AGC_TAB_1T_8723A(struct dm_odm_t *pDM_Odm);
+
+/******************************************************************************
+* PHY_REG_1T.TXT
+******************************************************************************/
+
+void ODM_ReadAndConfig_PHY_REG_1T_8723A(struct dm_odm_t *pDM_Odm);
+
+/******************************************************************************
+* PHY_REG_MP.TXT
+******************************************************************************/
+
+void ODM_ReadAndConfig_PHY_REG_MP_8723A(struct dm_odm_t *pDM_Odm);
+
+/******************************************************************************
+* PHY_REG_PG.TXT
+******************************************************************************/
+
+void ODM_ReadAndConfig_PHY_REG_PG_8723A(struct dm_odm_t *pDM_Odm);
+
+#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/rtl8723au/include/HalHWImg8723A_FW.h b/drivers/staging/rtl8723au/include/HalHWImg8723A_FW.h
new file mode 100644
index 000000000000..7ee363b99b49
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/HalHWImg8723A_FW.h
@@ -0,0 +1,28 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+*
+******************************************************************************/
+
+#ifndef __INC_FW_8723A_HW_IMG_H
+#define __INC_FW_8723A_HW_IMG_H
+
+
+/******************************************************************************
+* rtl8723fw_B.TXT
+******************************************************************************/
+
+void ODM_ReadFirmware_8723A_rtl8723fw_B(struct dm_odm_t *pDM_Odm,
+ u8 *pFirmware, u32 *pFirmwareSize);
+
+#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/rtl8723au/include/HalHWImg8723A_MAC.h b/drivers/staging/rtl8723au/include/HalHWImg8723A_MAC.h
new file mode 100644
index 000000000000..201be1f87292
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/HalHWImg8723A_MAC.h
@@ -0,0 +1,26 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+*
+******************************************************************************/
+
+#ifndef __INC_MAC_8723A_HW_IMG_H
+#define __INC_MAC_8723A_HW_IMG_H
+
+/******************************************************************************
+* MAC_REG.TXT
+******************************************************************************/
+
+void ODM_ReadAndConfig_MAC_REG_8723A(struct dm_odm_t *pDM_Odm);
+
+#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/rtl8723au/include/HalHWImg8723A_RF.h b/drivers/staging/rtl8723au/include/HalHWImg8723A_RF.h
new file mode 100644
index 000000000000..c9af1c375339
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/HalHWImg8723A_RF.h
@@ -0,0 +1,25 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+******************************************************************************/
+
+#ifndef __INC_RF_8723A_HW_IMG_H
+#define __INC_RF_8723A_HW_IMG_H
+
+/******************************************************************************
+* RadioA_1T.TXT
+******************************************************************************/
+
+void ODM_ReadAndConfig_RadioA_1T_8723A(struct dm_odm_t *pDM_Odm);
+
+#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/rtl8723au/include/HalPwrSeqCmd.h b/drivers/staging/rtl8723au/include/HalPwrSeqCmd.h
new file mode 100644
index 000000000000..12e03a36f2d3
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/HalPwrSeqCmd.h
@@ -0,0 +1,130 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ ******************************************************************************/
+#ifndef __HALPWRSEQCMD_H__
+#define __HALPWRSEQCMD_H__
+
+#include <drv_types.h>
+
+/*---------------------------------------------*/
+/*---------------------------------------------*/
+#define PWR_CMD_READ 0x00
+ /* offset: the read register offset */
+ /* msk: the mask of the read value */
+ /* value: N/A, left by 0 */
+ /* note: dirver shall implement this function by read & msk */
+
+#define PWR_CMD_WRITE 0x01
+ /* offset: the read register offset */
+ /* msk: the mask of the write bits */
+ /* value: write value */
+ /* note: driver shall implement this cmd by read & msk after write */
+
+#define PWR_CMD_POLLING 0x02
+ /* offset: the read register offset */
+ /* msk: the mask of the polled value */
+ /* value: the value to be polled, masked by the msd field. */
+ /* note: driver shall implement this cmd by */
+ /* do{ */
+ /* if( (Read(offset) & msk) == (value & msk) ) */
+ /* break; */
+ /* } while(not timeout); */
+
+#define PWR_CMD_DELAY 0x03
+ /* offset: the value to delay */
+ /* msk: N/A */
+ /* value: the unit of delay, 0: us, 1: ms */
+
+#define PWR_CMD_END 0x04
+ /* offset: N/A */
+ /* msk: N/A */
+ /* value: N/A */
+
+/*---------------------------------------------*/
+/* 3 The value of base: 4 bits */
+/*---------------------------------------------*/
+ /* define the base address of each block */
+#define PWR_BASEADDR_MAC 0x00
+#define PWR_BASEADDR_USB 0x01
+#define PWR_BASEADDR_PCIE 0x02
+#define PWR_BASEADDR_SDIO 0x03
+
+/*---------------------------------------------*/
+/* 3 The value of interface_msk: 4 bits */
+/*---------------------------------------------*/
+#define PWR_INTF_SDIO_MSK BIT(0)
+#define PWR_INTF_USB_MSK BIT(1)
+#define PWR_INTF_PCI_MSK BIT(2)
+#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+/*---------------------------------------------*/
+/* 3 The value of fab_msk: 4 bits */
+/*---------------------------------------------*/
+#define PWR_FAB_TSMC_MSK BIT(0)
+#define PWR_FAB_UMC_MSK BIT(1)
+#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+/*---------------------------------------------*/
+/* 3 The value of cut_msk: 8 bits */
+/*---------------------------------------------*/
+#define PWR_CUT_TESTCHIP_MSK BIT(0)
+#define PWR_CUT_A_MSK BIT(1)
+#define PWR_CUT_B_MSK BIT(2)
+#define PWR_CUT_C_MSK BIT(3)
+#define PWR_CUT_D_MSK BIT(4)
+#define PWR_CUT_E_MSK BIT(5)
+#define PWR_CUT_F_MSK BIT(6)
+#define PWR_CUT_G_MSK BIT(7)
+#define PWR_CUT_ALL_MSK 0xFF
+
+
+enum pwrseq_delay_unit {
+ PWRSEQ_DELAY_US,
+ PWRSEQ_DELAY_MS,
+};
+
+struct wlan_pwr_cfg {
+ u16 offset;
+ u8 cut_msk;
+ u8 fab_msk:4;
+ u8 interface_msk:4;
+ u8 base:4;
+ u8 cmd:4;
+ u8 msk;
+ u8 value;
+};
+
+
+#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
+#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
+#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
+#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
+#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
+#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
+#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
+#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
+
+
+/* */
+/* Prototype of protected function. */
+/* */
+u8 HalPwrSeqCmdParsing23a(
+ struct rtw_adapter *padapter,
+ u8 CutVersion,
+ u8 FabVersion,
+ u8 InterfaceType,
+ struct wlan_pwr_cfg PwrCfgCmd[]);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/HalVerDef.h b/drivers/staging/rtl8723au/include/HalVerDef.h
new file mode 100644
index 000000000000..607b71f6e1e4
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/HalVerDef.h
@@ -0,0 +1,136 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_VERSION_DEF_H__
+#define __HAL_VERSION_DEF_H__
+
+enum hal_ic_type {
+ CHIP_8192S = 0,
+ CHIP_8188C = 1,
+ CHIP_8192C = 2,
+ CHIP_8192D = 3,
+ CHIP_8723A = 4,
+ CHIP_8188E = 5,
+ CHIP_8881A = 6,
+ CHIP_8812A = 7,
+ CHIP_8821A = 8,
+ CHIP_8723B = 9,
+ CHIP_8192E = 10,
+};
+
+enum hal_chip_type {
+ TEST_CHIP = 0,
+ NORMAL_CHIP = 1,
+ FPGA = 2,
+};
+
+enum hal_cut_version {
+ A_CUT_VERSION = 0,
+ B_CUT_VERSION = 1,
+ C_CUT_VERSION = 2,
+ D_CUT_VERSION = 3,
+ E_CUT_VERSION = 4,
+ F_CUT_VERSION = 5,
+ G_CUT_VERSION = 6,
+};
+
+/* HAL_Manufacturer */
+enum hal_vendor {
+ CHIP_VENDOR_TSMC = 0,
+ CHIP_VENDOR_UMC = 1,
+};
+
+enum hal_rf_type {
+ RF_TYPE_1T1R = 0,
+ RF_TYPE_1T2R = 1,
+ RF_TYPE_2T2R = 2,
+ RF_TYPE_2T3R = 3,
+ RF_TYPE_2T4R = 4,
+ RF_TYPE_3T3R = 5,
+ RF_TYPE_3T4R = 6,
+ RF_TYPE_4T4R = 7,
+};
+
+struct hal_version {
+ enum hal_ic_type ICType;
+ enum hal_chip_type ChipType;
+ enum hal_cut_version CUTVersion;
+ enum hal_vendor VendorType;
+ enum hal_rf_type RFType;
+ u8 ROMVer;
+};
+
+/* Get element */
+#define GET_CVID_IC_TYPE(version) ((version).ICType)
+#define GET_CVID_CHIP_TYPE(version) ((version).ChipType)
+#define GET_CVID_RF_TYPE(version) ((version).RFType)
+#define GET_CVID_MANUFACTUER(version) ((version).VendorType)
+#define GET_CVID_CUT_VERSION(version) ((version).CUTVersion)
+#define GET_CVID_ROM_VERSION(version) (((version).ROMVer) & ROM_VERSION_MASK)
+
+/* Common Macro. -- */
+
+#define IS_81XXC(version) \
+ (((GET_CVID_IC_TYPE(version) == CHIP_8192C) || \
+ (GET_CVID_IC_TYPE(version) == CHIP_8188C)) ? true : false)
+#define IS_8723_SERIES(version) \
+ ((GET_CVID_IC_TYPE(version) == CHIP_8723A) ? true : false)
+
+#define IS_TEST_CHIP(version) \
+ ((GET_CVID_CHIP_TYPE(version) == TEST_CHIP) ? true : false)
+#define IS_NORMAL_CHIP(version) \
+ ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false)
+
+#define IS_A_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == A_CUT_VERSION) ? true : false)
+#define IS_B_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? true : false)
+#define IS_C_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == C_CUT_VERSION) ? true : false)
+#define IS_D_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == D_CUT_VERSION) ? true : false)
+#define IS_E_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == E_CUT_VERSION) ? true : false)
+
+#define IS_CHIP_VENDOR_TSMC(version) \
+ ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC) ? true : false)
+#define IS_CHIP_VENDOR_UMC(version) \
+ ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_UMC) ? true : false)
+
+#define IS_1T1R(version) \
+ ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T1R) ? true : false)
+#define IS_1T2R(version) \
+ ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R) ? true : false)
+#define IS_2T2R(version) \
+ ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R) ? true : false)
+
+/* Chip version Macro. -- */
+
+#define IS_92C_SERIAL(version) \
+ ((IS_81XXC(version) && IS_2T2R(version)) ? true : false)
+#define IS_81xxC_VENDOR_UMC_A_CUT(version) \
+ (IS_81XXC(version)?(IS_CHIP_VENDOR_UMC(version) ? \
+ (IS_A_CUT(version) ? true : false) : false) : false)
+#define IS_81xxC_VENDOR_UMC_B_CUT(version) \
+ (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
+ (IS_B_CUT(version) ? true : false) : false): false)
+#define IS_81xxC_VENDOR_UMC_C_CUT(version) \
+ (IS_81XXC(version)?(IS_CHIP_VENDOR_UMC(version) ? \
+ (IS_C_CUT(version) ? true : false) : false) : false)
+#define IS_8723A_A_CUT(version) \
+ ((IS_8723_SERIES(version)) ? (IS_A_CUT(version) ? true : false) : false)
+#define IS_8723A_B_CUT(version) \
+ ((IS_8723_SERIES(version)) ? (IS_B_CUT(version) ? true : false) : false)
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/cmd_osdep.h b/drivers/staging/rtl8723au/include/cmd_osdep.h
new file mode 100644
index 000000000000..4866bee04054
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/cmd_osdep.h
@@ -0,0 +1,26 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __CMD_OSDEP_H_
+#define __CMD_OSDEP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+int _rtw_init_evt_priv23a(struct evt_priv *pevtpriv);
+void _rtw_free_evt_priv23a(struct evt_priv *pevtpriv);
+void _rtw_free_cmd_priv23a(struct cmd_priv *pcmdpriv);
+int _rtw_enqueue_cmd23a(struct rtw_queue *queue, struct cmd_obj *obj);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/drv_types.h b/drivers/staging/rtl8723au/include/drv_types.h
new file mode 100644
index 000000000000..53eecea48cec
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/drv_types.h
@@ -0,0 +1,360 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/*-----------------------------------------------------------------------------
+
+ For type defines and data structure defines
+
+------------------------------------------------------------------------------*/
+
+
+#ifndef __DRV_TYPES_H__
+#define __DRV_TYPES_H__
+
+#include <osdep_service.h>
+#include <wlan_bssdef.h>
+
+
+enum _NIC_VERSION {
+ RTL8711_NIC,
+ RTL8712_NIC,
+ RTL8713_NIC,
+ RTL8716_NIC
+
+};
+
+
+#include <rtw_ht.h>
+
+#include <rtw_cmd.h>
+#include <wlan_bssdef.h>
+#include <rtw_xmit.h>
+#include <rtw_recv.h>
+#include <hal_intf.h>
+#include <hal_com.h>
+#include <rtw_qos.h>
+#include <rtw_security.h>
+#include <rtw_pwrctrl.h>
+#include <rtw_io.h>
+#include <rtw_eeprom.h>
+#include <sta_info.h>
+#include <rtw_mlme.h>
+#include <rtw_debug.h>
+#include <rtw_rf.h>
+#include <rtw_event.h>
+#include <rtw_led.h>
+#include <rtw_mlme_ext.h>
+#include <rtw_p2p.h>
+#include <rtw_ap.h>
+
+#include "ioctl_cfg80211.h"
+
+#define SPEC_DEV_ID_NONE BIT(0)
+#define SPEC_DEV_ID_DISABLE_HT BIT(1)
+#define SPEC_DEV_ID_ENABLE_PS BIT(2)
+#define SPEC_DEV_ID_RF_CONFIG_1T1R BIT(3)
+#define SPEC_DEV_ID_RF_CONFIG_2T2R BIT(4)
+#define SPEC_DEV_ID_ASSIGN_IFNAME BIT(5)
+
+struct specific_device_id {
+ u32 flags;
+
+ u16 idVendor;
+ u16 idProduct;
+
+};
+
+struct registry_priv {
+ u8 chip_version;
+ u8 rfintfs;
+ struct cfg80211_ssid ssid;
+ u8 channel;/* ad-hoc support requirement */
+ u8 wireless_mode;/* A, B, G, auto */
+ u8 scan_mode;/* active, passive */
+ u8 preamble;/* long, short, auto */
+ u8 vrtl_carrier_sense;/* Enable, Disable, Auto */
+ u8 vcs_type;/* RTS/CTS, CTS-to-self */
+ u16 rts_thresh;
+ u16 frag_thresh;
+ u8 adhoc_tx_pwr;
+ u8 soft_ap;
+ u8 power_mgnt;
+ u8 ips_mode;
+ u8 smart_ps;
+ u8 long_retry_lmt;
+ u8 short_retry_lmt;
+ u16 busy_thresh;
+ u8 ack_policy;
+ u8 software_encrypt;
+ u8 software_decrypt;
+ u8 acm_method;
+ /* UAPSD */
+ u8 wmm_enable;
+ u8 uapsd_enable;
+
+ struct wlan_bssid_ex dev_network;
+
+ u8 ht_enable;
+ u8 cbw40_enable;
+ u8 ampdu_enable;/* for tx */
+ u8 rx_stbc;
+ u8 ampdu_amsdu;/* A-MPDU Supports A-MSDU is permitted */
+ u8 lowrate_two_xmit;
+
+ u8 rf_config;
+ u8 low_power;
+
+ u8 wifi_spec;/* !turbo_mode */
+
+ u8 channel_plan;
+#ifdef CONFIG_8723AU_BT_COEXIST
+ u8 btcoex;
+ u8 bt_iso;
+ u8 bt_sco;
+ u8 bt_ampdu;
+#endif
+ bool bAcceptAddbaReq;
+
+ u8 antdiv_cfg;
+ u8 antdiv_type;
+
+ u8 usbss_enable;/* 0:disable,1:enable */
+ u8 hwpdn_mode;/* 0:disable,1:enable,2:decide by EFUSE config */
+ u8 hwpwrp_detect;/* 0:disable,1:enable */
+
+ u8 hw_wps_pbc;/* 0:disable,1:enable */
+
+ u8 max_roaming_times; /* max number driver will try to roaming */
+
+ u8 enable80211d;
+
+ u8 ifname[16];
+ u8 if2name[16];
+
+ u8 notch_filter;
+
+ u8 regulatory_tid;
+};
+
+
+#define MAX_CONTINUAL_URB_ERR 4
+
+#define GET_PRIMARY_ADAPTER(padapter) \
+ (((struct rtw_adapter *)padapter)->dvobj->if1)
+
+enum _IFACE_ID {
+ IFACE_ID0, /* maping to PRIMARY_ADAPTER */
+ IFACE_ID1, /* maping to SECONDARY_ADAPTER */
+ IFACE_ID2,
+ IFACE_ID3,
+ IFACE_ID_MAX,
+};
+
+struct dvobj_priv {
+ struct rtw_adapter *if1; /* PRIMARY_ADAPTER */
+ struct rtw_adapter *if2; /* SECONDARY_ADAPTER */
+
+ /* for local/global synchronization */
+ struct mutex hw_init_mutex;
+ struct mutex h2c_fwcmd_mutex;
+ struct mutex setch_mutex;
+ struct mutex setbw_mutex;
+
+ unsigned char oper_channel; /* saved chan info when set chan bw */
+ unsigned char oper_bwmode;
+ unsigned char oper_ch_offset;/* PRIME_CHNL_OFFSET */
+
+ struct rtw_adapter *padapters[IFACE_ID_MAX];
+ u8 iface_nums; /* total number of ifaces used runtime */
+
+ /* For 92D, DMDP have 2 interface. */
+ u8 InterfaceNumber;
+ u8 NumInterfaces;
+
+ /* In /Out Pipe information */
+ int RtInPipe[2];
+ int RtOutPipe[3];
+ u8 Queue2Pipe[HW_QUEUE_ENTRY];/* for out pipe mapping */
+
+ u8 irq_alloc;
+
+/*-------- below is for USB INTERFACE --------*/
+
+ u8 nr_endpoint;
+ u8 ishighspeed;
+ u8 RtNumInPipes;
+ u8 RtNumOutPipes;
+ int ep_num[5]; /* endpoint number */
+
+ int RegUsbSS;
+
+ struct semaphore usb_suspend_sema;
+
+ struct mutex usb_vendor_req_mutex;
+
+ u8 *usb_alloc_vendor_req_buf;
+ u8 *usb_vendor_req_buf;
+
+ struct usb_interface *pusbintf;
+ struct usb_device *pusbdev;
+ atomic_t continual_urb_error;
+
+/*-------- below is for PCIE INTERFACE --------*/
+
+};
+
+static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
+{
+ /* todo: get interface type from dvobj and the return the dev accordingly */
+ return &dvobj->pusbintf->dev;
+}
+
+enum _IFACE_TYPE {
+ IFACE_PORT0, /* mapping to port0 for C/D series chips */
+ IFACE_PORT1, /* mapping to port1 for C/D series chip */
+ MAX_IFACE_PORT,
+};
+
+enum _ADAPTER_TYPE {
+ PRIMARY_ADAPTER,
+ SECONDARY_ADAPTER,
+ MAX_ADAPTER,
+};
+
+struct rtw_adapter {
+ int pid[3];/* process id from UI, 0:wps, 1:hostapd, 2:dhcpcd */
+ int bDongle;/* build-in module or external dongle */
+ u16 chip_type;
+ u16 HardwareType;
+
+ struct dvobj_priv *dvobj;
+ struct mlme_priv mlmepriv;
+ struct mlme_ext_priv mlmeextpriv;
+ struct cmd_priv cmdpriv;
+ struct evt_priv evtpriv;
+ /* struct io_queue *pio_queue; */
+ struct io_priv iopriv;
+ struct xmit_priv xmitpriv;
+ struct recv_priv recvpriv;
+ struct sta_priv stapriv;
+ struct security_priv securitypriv;
+ struct registry_priv registrypriv;
+ struct pwrctrl_priv pwrctrlpriv;
+ struct eeprom_priv eeprompriv;
+ struct led_priv ledpriv;
+
+#ifdef CONFIG_8723AU_AP_MODE
+ struct hostapd_priv *phostapdpriv;
+#endif
+
+ struct cfg80211_wifidirect_info cfg80211_wdinfo;
+ u32 setband;
+ struct wifidirect_info wdinfo;
+
+#ifdef CONFIG_8723AU_P2P
+ struct wifi_display_info wfd_info;
+#endif /* CONFIG_8723AU_P2P */
+
+ void *HalData;
+ u32 hal_data_sz;
+ struct hal_ops HalFunc;
+
+ s32 bDriverStopped;
+ s32 bSurpriseRemoved;
+ s32 bCardDisableWOHSM;
+
+ u32 IsrContent;
+ u32 ImrContent;
+
+ u8 EepromAddressSize;
+ u8 hw_init_completed;
+ u8 bDriverIsGoingToUnload;
+ u8 init_adpt_in_progress;
+ u8 bHaltInProgress;
+
+ void *cmdThread;
+ void *evtThread;
+ void *xmitThread;
+ void *recvThread;
+
+ void (*intf_start)(struct rtw_adapter *adapter);
+ void (*intf_stop)(struct rtw_adapter *adapter);
+
+ struct net_device *pnetdev;
+
+ /* used by rtw_rereg_nd_name related function */
+ struct rereg_nd_name_data {
+ struct net_device *old_pnetdev;
+ char old_ifname[IFNAMSIZ];
+ u8 old_ips_mode;
+ u8 old_bRegUseLed;
+ } rereg_nd_name_priv;
+
+ int bup;
+ struct net_device_stats stats;
+ struct iw_statistics iwstats;
+ struct proc_dir_entry *dir_dev;/* for proc directory */
+
+ struct wireless_dev *rtw_wdev;
+ int net_closed;
+
+ u8 bFWReady;
+ u8 bBTFWReady;
+ u8 bReadPortCancel;
+ u8 bWritePortCancel;
+ u8 bRxRSSIDisplay;
+ /* The driver will show the desired chan nor when this flag is 1. */
+ u8 bNotifyChannelChange;
+#ifdef CONFIG_8723AU_P2P
+ /* driver will show current P2P status when the application reads it*/
+ u8 bShowGetP2PState;
+#endif
+ struct rtw_adapter *pbuddy_adapter;
+
+ /* extend to support multi interface */
+ /* IFACE_ID0 is equals to PRIMARY_ADAPTER */
+ /* IFACE_ID1 is equals to SECONDARY_ADAPTER */
+ u8 iface_id;
+
+#ifdef CONFIG_BR_EXT
+ _lock br_ext_lock;
+ /* unsigned int macclone_completed; */
+ struct nat25_network_db_entry *nethash[NAT25_HASH_SIZE];
+ int pppoe_connection_in_progress;
+ unsigned char pppoe_addr[MACADDRLEN];
+ unsigned char scdb_mac[MACADDRLEN];
+ unsigned char scdb_ip[4];
+ struct nat25_network_db_entry *scdb_entry;
+ unsigned char br_mac[MACADDRLEN];
+ unsigned char br_ip[4];
+
+ struct br_ext_info ethBrExtInfo;
+#endif /* CONFIG_BR_EXT */
+
+ u8 fix_rate;
+
+ unsigned char in_cta_test;
+
+};
+
+#define adapter_to_dvobj(adapter) (adapter->dvobj)
+
+int rtw_handle_dualmac23a(struct rtw_adapter *adapter, bool init);
+
+static inline u8 *myid(struct eeprom_priv *peepriv)
+{
+ return peepriv->mac_addr;
+}
+
+#endif /* __DRV_TYPES_H__ */
diff --git a/drivers/staging/rtl8188eu/include/h2clbk.h b/drivers/staging/rtl8723au/include/ethernet.h
index e595030ac8a3..39fc6df88188 100644
--- a/drivers/staging/rtl8188eu/include/h2clbk.h
+++ b/drivers/staging/rtl8723au/include/ethernet.h
@@ -11,25 +11,12 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
*
******************************************************************************/
+/*! \file */
+#ifndef __INC_ETHERNET_H
+#define __INC_ETHERNET_H
+#define LLC_HEADER_SIZE 6 /* LLC Header Length */
-#define _H2CLBK_H_
-
-
-#include <rtl8711_spec.h>
-#include <TypeDef.h>
-
-
-void _lbk_cmd(struct adapter *adapter);
-
-void _lbk_rsp(struct adapter *adapter);
-
-void _lbk_evt(IN struct adapter *adapter);
-
-void h2c_event_callback(unsigned char *dev, unsigned char *pbuf);
+#endif /* #ifndef __INC_ETHERNET_H */
diff --git a/drivers/staging/rtl8723au/include/hal_com.h b/drivers/staging/rtl8723au/include/hal_com.h
new file mode 100644
index 000000000000..20f983cfc2b7
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/hal_com.h
@@ -0,0 +1,211 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_COMMON_H__
+#define __HAL_COMMON_H__
+
+/* */
+/* Rate Definition */
+/* */
+/* CCK */
+#define RATR_1M 0x00000001
+#define RATR_2M 0x00000002
+#define RATR_55M 0x00000004
+#define RATR_11M 0x00000008
+/* OFDM */
+#define RATR_6M 0x00000010
+#define RATR_9M 0x00000020
+#define RATR_12M 0x00000040
+#define RATR_18M 0x00000080
+#define RATR_24M 0x00000100
+#define RATR_36M 0x00000200
+#define RATR_48M 0x00000400
+#define RATR_54M 0x00000800
+/* MCS 1 Spatial Stream */
+#define RATR_MCS0 0x00001000
+#define RATR_MCS1 0x00002000
+#define RATR_MCS2 0x00004000
+#define RATR_MCS3 0x00008000
+#define RATR_MCS4 0x00010000
+#define RATR_MCS5 0x00020000
+#define RATR_MCS6 0x00040000
+#define RATR_MCS7 0x00080000
+/* MCS 2 Spatial Stream */
+#define RATR_MCS8 0x00100000
+#define RATR_MCS9 0x00200000
+#define RATR_MCS10 0x00400000
+#define RATR_MCS11 0x00800000
+#define RATR_MCS12 0x01000000
+#define RATR_MCS13 0x02000000
+#define RATR_MCS14 0x04000000
+#define RATR_MCS15 0x08000000
+
+/* CCK */
+#define RATE_1M BIT(0)
+#define RATE_2M BIT(1)
+#define RATE_5_5M BIT(2)
+#define RATE_11M BIT(3)
+/* OFDM */
+#define RATE_6M BIT(4)
+#define RATE_9M BIT(5)
+#define RATE_12M BIT(6)
+#define RATE_18M BIT(7)
+#define RATE_24M BIT(8)
+#define RATE_36M BIT(9)
+#define RATE_48M BIT(10)
+#define RATE_54M BIT(11)
+/* MCS 1 Spatial Stream */
+#define RATE_MCS0 BIT(12)
+#define RATE_MCS1 BIT(13)
+#define RATE_MCS2 BIT(14)
+#define RATE_MCS3 BIT(15)
+#define RATE_MCS4 BIT(16)
+#define RATE_MCS5 BIT(17)
+#define RATE_MCS6 BIT(18)
+#define RATE_MCS7 BIT(19)
+/* MCS 2 Spatial Stream */
+#define RATE_MCS8 BIT(20)
+#define RATE_MCS9 BIT(21)
+#define RATE_MCS10 BIT(22)
+#define RATE_MCS11 BIT(23)
+#define RATE_MCS12 BIT(24)
+#define RATE_MCS13 BIT(25)
+#define RATE_MCS14 BIT(26)
+#define RATE_MCS15 BIT(27)
+
+/* ALL CCK Rate */
+#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define RATE_ALL_OFDM_AG \
+ (RATR_6M | RATR_9M | RATR_12M | RATR_18M | RATR_24M| \
+ RATR_36M|RATR_48M|RATR_54M)
+#define RATE_ALL_OFDM_1SS \
+ (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | RATR_MCS3 | \
+ RATR_MCS4 | RATR_MCS5 | RATR_MCS6 | RATR_MCS7)
+#define RATE_ALL_OFDM_2SS \
+ (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | RATR_MCS11| \
+ RATR_MCS12 | RATR_MCS13 | RATR_MCS14 | RATR_MCS15)
+
+/*------------------------------ Tx Desc definition Macro ------------------------*/
+/* pragma mark -- Tx Desc related definition. -- */
+/* */
+/* */
+/* Rate */
+/* */
+/* CCK Rates, TxHT = 0 */
+#define DESC_RATE1M 0x00
+#define DESC_RATE2M 0x01
+#define DESC_RATE5_5M 0x02
+#define DESC_RATE11M 0x03
+
+/* OFDM Rates, TxHT = 0 */
+#define DESC_RATE6M 0x04
+#define DESC_RATE9M 0x05
+#define DESC_RATE12M 0x06
+#define DESC_RATE18M 0x07
+#define DESC_RATE24M 0x08
+#define DESC_RATE36M 0x09
+#define DESC_RATE48M 0x0a
+#define DESC_RATE54M 0x0b
+
+/* MCS Rates, TxHT = 1 */
+#define DESC_RATEMCS0 0x0c
+#define DESC_RATEMCS1 0x0d
+#define DESC_RATEMCS2 0x0e
+#define DESC_RATEMCS3 0x0f
+#define DESC_RATEMCS4 0x10
+#define DESC_RATEMCS5 0x11
+#define DESC_RATEMCS6 0x12
+#define DESC_RATEMCS7 0x13
+#define DESC_RATEMCS8 0x14
+#define DESC_RATEMCS9 0x15
+#define DESC_RATEMCS10 0x16
+#define DESC_RATEMCS11 0x17
+#define DESC_RATEMCS12 0x18
+#define DESC_RATEMCS13 0x19
+#define DESC_RATEMCS14 0x1a
+#define DESC_RATEMCS15 0x1b
+#define DESC_RATEMCS15_SG 0x1c
+#define DESC_RATEMCS32 0x20
+
+#define REG_P2P_CTWIN 0x0572 /* 1 Byte long (in unit of TU) */
+#define REG_NOA_DESC_SEL 0x05CF
+#define REG_NOA_DESC_DURATION 0x05E0
+#define REG_NOA_DESC_INTERVAL 0x05E4
+#define REG_NOA_DESC_START 0x05E8
+#define REG_NOA_DESC_COUNT 0x05EC
+
+#include "HalVerDef.h"
+void dump_chip_info23a(struct hal_version ChipVersion);
+
+
+u8 /* return the final channel plan decision */
+hal_com_get_channel_plan23a(
+ struct rtw_adapter *padapter,
+ u8 hw_channel_plan, /* channel plan from HW (efuse/eeprom) */
+ u8 sw_channel_plan, /* channel plan from SW (registry/module param) */
+ u8 def_channel_plan, /* channel plan used when the former two is invalid */
+ bool AutoLoadFail
+ );
+
+u8 MRateToHwRate23a(u8 rate);
+
+void HalSetBrateCfg23a(struct rtw_adapter *padapter, u8 *mBratesOS);
+
+bool
+Hal_MappingOutPipe23a(struct rtw_adapter *pAdapter, u8 NumOutPipe);
+
+void hal_init_macaddr23a(struct rtw_adapter *adapter);
+
+void c2h_evt_clear23a(struct rtw_adapter *adapter);
+s32 c2h_evt_read23a(struct rtw_adapter *adapter, u8 *buf);
+
+void rtl8723a_set_ampdu_min_space(struct rtw_adapter *padapter, u8 MinSpacingToSet);
+void rtl8723a_set_ampdu_factor(struct rtw_adapter *padapter, u8 FactorToSet);
+void rtl8723a_set_acm_ctrl(struct rtw_adapter *padapter, u8 ctrl);
+void rtl8723a_set_media_status(struct rtw_adapter *padapter, u8 status);
+void rtl8723a_set_media_status1(struct rtw_adapter *padapter, u8 status);
+void rtl8723a_set_bcn_func(struct rtw_adapter *padapter, u8 val);
+void rtl8723a_check_bssid(struct rtw_adapter *padapter, u8 val);
+void rtl8723a_mlme_sitesurvey(struct rtw_adapter *padapter, u8 flag);
+void rtl8723a_on_rcr_am(struct rtw_adapter *padapter);
+void rtl8723a_off_rcr_am(struct rtw_adapter *padapter);
+void rtl8723a_set_slot_time(struct rtw_adapter *padapter, u8 slottime);
+void rtl8723a_ack_preamble(struct rtw_adapter *padapter, u8 bShortPreamble);
+void rtl8723a_set_sec_cfg(struct rtw_adapter *padapter, u8 sec);
+void rtl8723a_cam_empty_entry(struct rtw_adapter *padapter, u8 ucIndex);
+void rtl8723a_cam_invalid_all(struct rtw_adapter *padapter);
+void rtl8723a_cam_write(struct rtw_adapter *padapter, u32 val1, u32 val2);
+void rtl8723a_fifo_cleanup(struct rtw_adapter *padapter);
+void rtl8723a_set_apfm_on_mac(struct rtw_adapter *padapter, u8 val);
+void rtl8723a_bcn_valid(struct rtw_adapter *padapter);
+void rtl8723a_set_tx_pause(struct rtw_adapter *padapter, u8 pause);
+void rtl8723a_set_beacon_interval(struct rtw_adapter *padapter, u16 interval);
+void rtl8723a_set_resp_sifs(struct rtw_adapter *padapter,
+ u8 r2t1, u8 r2t2, u8 t2t1, u8 t2t2);
+void rtl8723a_set_ac_param_vo(struct rtw_adapter *padapter, u32 vo);
+void rtl8723a_set_ac_param_vi(struct rtw_adapter *padapter, u32 vi);
+void rtl8723a_set_ac_param_be(struct rtw_adapter *padapter, u32 be);
+void rtl8723a_set_ac_param_bk(struct rtw_adapter *padapter, u32 bk);
+void rtl8723a_set_rxdma_agg_pg_th(struct rtw_adapter *padapter, u8 val);
+void rtl8723a_set_nav_upper(struct rtw_adapter *padapter, u32 usNavUpper);
+void rtl8723a_set_initial_gain(struct rtw_adapter *padapter, u32 rx_gain);
+
+void rtl8723a_odm_support_ability_write(struct rtw_adapter *padapter, u32 val);
+void rtl8723a_odm_support_ability_backup(struct rtw_adapter *padapter, u8 val);
+void rtl8723a_odm_support_ability_set(struct rtw_adapter *padapter, u32 val);
+void rtl8723a_odm_support_ability_clr(struct rtw_adapter *padapter, u32 val);
+
+void rtl8723a_set_rpwm(struct rtw_adapter *padapter, u8 val);
+
+#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723au/include/hal_intf.h b/drivers/staging/rtl8723au/include/hal_intf.h
new file mode 100644
index 000000000000..d183f4ba1ecb
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/hal_intf.h
@@ -0,0 +1,392 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_INTF_H__
+#define __HAL_INTF_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+enum RTL871X_HCI_TYPE {
+ RTW_PCIE = BIT0,
+ RTW_USB = BIT1,
+ RTW_SDIO = BIT2,
+ RTW_GSPI = BIT3,
+};
+
+enum _CHIP_TYPE {
+ NULL_CHIP_TYPE,
+ RTL8712_8188S_8191S_8192S,
+ RTL8188C_8192C,
+ RTL8192D,
+ RTL8723A,
+ RTL8188E,
+ MAX_CHIP_TYPE
+};
+
+enum HW_VARIABLES {
+ HW_VAR_MEDIA_STATUS,
+ HW_VAR_MEDIA_STATUS1,
+ HW_VAR_SET_OPMODE,
+ HW_VAR_MAC_ADDR,
+ HW_VAR_BSSID,
+ HW_VAR_INIT_RTS_RATE,
+ HW_VAR_BASIC_RATE,
+ HW_VAR_TXPAUSE,
+ HW_VAR_BCN_FUNC,
+ HW_VAR_CORRECT_TSF,
+ HW_VAR_CHECK_BSSID,
+ HW_VAR_MLME_DISCONNECT,
+ HW_VAR_MLME_SITESURVEY,
+ HW_VAR_MLME_JOIN,
+ HW_VAR_ON_RCR_AM,
+ HW_VAR_OFF_RCR_AM,
+ HW_VAR_BEACON_INTERVAL,
+ HW_VAR_SLOT_TIME,
+ HW_VAR_RESP_SIFS,
+ HW_VAR_ACK_PREAMBLE,
+ HW_VAR_SEC_CFG,
+ HW_VAR_BCN_VALID,
+ HW_VAR_RF_TYPE,
+ HW_VAR_DM_FLAG,
+ HW_VAR_DM_FUNC_OP,
+ HW_VAR_DM_FUNC_SET,
+ HW_VAR_DM_FUNC_CLR,
+ HW_VAR_CAM_EMPTY_ENTRY,
+ HW_VAR_CAM_INVALID_ALL,
+ HW_VAR_CAM_WRITE,
+ HW_VAR_CAM_READ,
+ HW_VAR_AC_PARAM_VO,
+ HW_VAR_AC_PARAM_VI,
+ HW_VAR_AC_PARAM_BE,
+ HW_VAR_AC_PARAM_BK,
+ HW_VAR_ACM_CTRL,
+ HW_VAR_AMPDU_MIN_SPACE,
+ HW_VAR_AMPDU_FACTOR,
+ HW_VAR_RXDMA_AGG_PG_TH,
+ HW_VAR_SET_RPWM,
+ HW_VAR_H2C_FW_PWRMODE,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ HW_VAR_FWLPS_RF_ON,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ HW_VAR_TDLS_WRCR,
+ HW_VAR_TDLS_INIT_CH_SEN,
+ HW_VAR_TDLS_RS_RCR,
+ HW_VAR_TDLS_DONE_CH_SEN,
+ HW_VAR_INITIAL_GAIN,
+ HW_VAR_TRIGGER_GPIO_0,
+ HW_VAR_BT_SET_COEXIST,
+ HW_VAR_BT_ISSUE_DELBA,
+ HW_VAR_CURRENT_ANTENNA,
+ HW_VAR_ANTENNA_DIVERSITY_LINK,
+ HW_VAR_ANTENNA_DIVERSITY_SELECT,
+ HW_VAR_SWITCH_EPHY_WoWLAN,
+ HW_VAR_EFUSE_BYTES,
+ HW_VAR_EFUSE_BT_BYTES,
+ HW_VAR_FIFO_CLEARN_UP,
+ HW_VAR_CHECK_TXBUF,
+ HW_VAR_APFM_ON_MAC, /* Auto FSM to Turn On, include clock, isolation, power control for MAC only */
+ /* The valid upper nav range for the HW updating, if the true value is larger than the upper range, the HW won't update it. */
+ /* Unit in microsecond. 0 means disable this function. */
+ HW_VAR_NAV_UPPER,
+ HW_VAR_RPT_TIMER_SETTING,
+ HW_VAR_TX_RPT_MAX_MACID,
+ HW_VAR_H2C_MEDIA_STATUS_RPT,
+ HW_VAR_CHK_HI_QUEUE_EMPTY,
+ HW_VAR_READ_LLT_TAB,
+};
+
+enum hal_def_variable {
+ HAL_DEF_UNDERCORATEDSMOOTHEDPWDB,
+ HAL_DEF_IS_SUPPORT_ANT_DIV,
+ HAL_DEF_CURRENT_ANTENNA,
+ HAL_DEF_DRVINFO_SZ,
+ HAL_DEF_MAX_RECVBUF_SZ,
+ HAL_DEF_RX_PACKET_OFFSET,
+ HAL_DEF_DBG_DUMP_RXPKT,/* for dbg */
+ HAL_DEF_DBG_DM_FUNC,/* for dbg */
+ HAL_DEF_RA_DECISION_RATE,
+ HAL_DEF_RA_SGI,
+ HAL_DEF_PT_PWR_STATUS,
+ HW_VAR_MAX_RX_AMPDU_FACTOR,
+ HW_DEF_RA_INFO_DUMP,
+ HAL_DEF_DBG_DUMP_TXPKT,
+ HW_DEF_FA_CNT_DUMP,
+ HW_DEF_ODM_DBG_FLAG,
+};
+
+enum hal_odm_variable {
+ HAL_ODM_STA_INFO,
+ HAL_ODM_P2P_STATE,
+ HAL_ODM_WIFI_DISPLAY_STATE,
+};
+
+enum hal_intf_ps_func {
+ HAL_USB_SELECT_SUSPEND,
+ HAL_MAX_ID,
+};
+
+struct hal_ops {
+ u32 (*hal_power_on)(struct rtw_adapter *padapter);
+ u32 (*hal_init)(struct rtw_adapter *padapter);
+ u32 (*hal_deinit)(struct rtw_adapter *padapter);
+
+ void (*free_hal_data)(struct rtw_adapter *padapter);
+
+ u32 (*inirp_init)(struct rtw_adapter *padapter);
+ u32 (*inirp_deinit)(struct rtw_adapter *padapter);
+
+ s32 (*init_xmit_priv)(struct rtw_adapter *padapter);
+ void (*free_xmit_priv)(struct rtw_adapter *padapter);
+
+ s32 (*init_recv_priv)(struct rtw_adapter *padapter);
+ void (*free_recv_priv)(struct rtw_adapter *padapter);
+
+ void (*InitSwLeds)(struct rtw_adapter *padapter);
+ void (*DeInitSwLeds)(struct rtw_adapter *padapter);
+
+ void (*dm_init)(struct rtw_adapter *padapter);
+ void (*dm_deinit)(struct rtw_adapter *padapter);
+ void (*read_chip_version)(struct rtw_adapter *padapter);
+
+ void (*init_default_value)(struct rtw_adapter *padapter);
+
+ void (*intf_chip_configure)(struct rtw_adapter *padapter);
+
+ void (*read_adapter_info)(struct rtw_adapter *padapter);
+
+ void (*enable_interrupt)(struct rtw_adapter *padapter);
+ void (*disable_interrupt)(struct rtw_adapter *padapter);
+ s32 (*interrupt_handler)(struct rtw_adapter *padapter);
+ void (*set_bwmode_handler)(struct rtw_adapter *padapter,
+ enum ht_channel_width Bandwidth, u8 Offset);
+ void (*set_channel_handler)(struct rtw_adapter *padapter, u8 channel);
+
+ void (*hal_dm_watchdog)(struct rtw_adapter *padapter);
+
+ void (*SetHwRegHandler)(struct rtw_adapter *padapter,
+ u8 variable, u8 *val);
+ void (*GetHwRegHandler)(struct rtw_adapter *padapter,
+ u8 variable, u8 *val);
+
+ u8 (*GetHalDefVarHandler)(struct rtw_adapter *padapter,
+ enum hal_def_variable eVariable,
+ void *pValue);
+ u8 (*SetHalDefVarHandler)(struct rtw_adapter *padapter,
+ enum hal_def_variable eVariable,
+ void *pValue);
+
+ void (*GetHalODMVarHandler)(struct rtw_adapter *padapter,
+ enum hal_odm_variable eVariable,
+ void *pValue1, bool bSet);
+ void (*SetHalODMVarHandler)(struct rtw_adapter *padapter,
+ enum hal_odm_variable eVariable,
+ void *pValue1, bool bSet);
+
+ void (*UpdateRAMaskHandler)(struct rtw_adapter *padapter,
+ u32 mac_id, u8 rssi_level);
+ void (*SetBeaconRelatedRegistersHandler)(struct rtw_adapter *padapter);
+
+ void (*Add_RateATid)(struct rtw_adapter *padapter, u32 bitmap,
+ u8 arg, u8 rssi_level);
+ void (*run_thread)(struct rtw_adapter *padapter);
+ void (*cancel_thread)(struct rtw_adapter *padapter);
+
+ u8 (*interface_ps_func)(struct rtw_adapter *padapter,
+ enum hal_intf_ps_func efunc_id, u8 *val);
+
+ s32 (*hal_xmit)(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe);
+ s32 (*mgnt_xmit)(struct rtw_adapter *padapter,
+ struct xmit_frame *pmgntframe);
+ s32 (*hal_xmitframe_enqueue)(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe);
+
+ u32 (*read_bbreg)(struct rtw_adapter *padapter, u32 RegAddr,
+ u32 BitMask);
+ void (*write_bbreg)(struct rtw_adapter *padapter, u32 RegAddr,
+ u32 BitMask, u32 Data);
+ u32 (*read_rfreg)(struct rtw_adapter *padapter, u32 eRFPath,
+ u32 RegAddr, u32 BitMask);
+ void (*write_rfreg)(struct rtw_adapter *padapter, u32 eRFPath,
+ u32 RegAddr, u32 BitMask, u32 Data);
+
+ void (*EfusePowerSwitch)(struct rtw_adapter *padapter, u8 bWrite,
+ u8 PwrState);
+ void (*ReadEFuse)(struct rtw_adapter *padapter, u8 efuseType,
+ u16 _offset, u16 _size_byte, u8 *pbuf);
+ void (*EFUSEGetEfuseDefinition)(struct rtw_adapter *padapter,
+ u8 efuseType, u8 type, void *pOut);
+ u16 (*EfuseGetCurrentSize)(struct rtw_adapter *padapter, u8 efuseType);
+ int (*Efuse_PgPacketRead23a)(struct rtw_adapter *padapter,
+ u8 offset, u8 *data);
+ int (*Efuse_PgPacketWrite23a)(struct rtw_adapter *padapter,
+ u8 offset, u8 word_en, u8 *data);
+ u8 (*Efuse_WordEnableDataWrite23a)(struct rtw_adapter *padapter,
+ u16 efuse_addr, u8 word_en,
+ u8 *data);
+ bool (*Efuse_PgPacketWrite23a_BT)(struct rtw_adapter *padapter,
+ u8 offset, u8 word_en, u8 *data);
+
+ void (*sreset_init_value23a)(struct rtw_adapter *padapter);
+ void (*sreset_reset_value23a)(struct rtw_adapter *padapter);
+ void (*silentreset)(struct rtw_adapter *padapter);
+ void (*sreset_xmit_status_check)(struct rtw_adapter *padapter);
+ void (*sreset_linked_status_check) (struct rtw_adapter *padapter);
+ u8 (*sreset_get_wifi_status23a)(struct rtw_adapter *padapter);
+ bool (*sreset_inprogress)(struct rtw_adapter *padapter);
+
+ void (*hal_notch_filter)(struct rtw_adapter *adapter, bool enable);
+ void (*hal_reset_security_engine)(struct rtw_adapter *adapter);
+ s32 (*c2h_handler)(struct rtw_adapter *padapter, struct c2h_evt_hdr *c2h_evt);
+ c2h_id_filter c2h_id_filter_ccx;
+};
+
+enum rt_eeprom_type {
+ EEPROM_93C46,
+ EEPROM_93C56,
+ EEPROM_BOOT_EFUSE,
+};
+
+
+
+#define RF_CHANGE_BY_INIT 0
+#define RF_CHANGE_BY_IPS BIT28
+#define RF_CHANGE_BY_PS BIT29
+#define RF_CHANGE_BY_HW BIT30
+#define RF_CHANGE_BY_SW BIT31
+
+enum hardware_type {
+ HARDWARE_TYPE_RTL8180,
+ HARDWARE_TYPE_RTL8185,
+ HARDWARE_TYPE_RTL8187,
+ HARDWARE_TYPE_RTL8188,
+ HARDWARE_TYPE_RTL8190P,
+ HARDWARE_TYPE_RTL8192E,
+ HARDWARE_TYPE_RTL819xU,
+ HARDWARE_TYPE_RTL8192SE,
+ HARDWARE_TYPE_RTL8192SU,
+ HARDWARE_TYPE_RTL8192CE,
+ HARDWARE_TYPE_RTL8192CU,
+ HARDWARE_TYPE_RTL8192DE,
+ HARDWARE_TYPE_RTL8192DU,
+ HARDWARE_TYPE_RTL8723AE,
+ HARDWARE_TYPE_RTL8723AU,
+ HARDWARE_TYPE_RTL8723AS,
+ HARDWARE_TYPE_RTL8188EE,
+ HARDWARE_TYPE_RTL8188EU,
+ HARDWARE_TYPE_RTL8188ES,
+ HARDWARE_TYPE_MAX,
+};
+
+#define GET_EEPROM_EFUSE_PRIV(adapter) (&adapter->eeprompriv)
+#define is_boot_from_eeprom(adapter) (adapter->eeprompriv.EepromOrEfuse)
+
+extern int rtw_ht_enable23A;
+extern int rtw_cbw40_enable23A;
+extern int rtw_ampdu_enable23A;/* for enable tx_ampdu */
+
+void rtw_hal_def_value_init23a(struct rtw_adapter *padapter);
+int pm_netdev_open23a(struct net_device *pnetdev, u8 bnormal);
+int rtw_resume_process23a(struct rtw_adapter *padapter);
+
+void rtw_hal_free_data23a(struct rtw_adapter *padapter);
+
+void rtw_hal_dm_init23a(struct rtw_adapter *padapter);
+void rtw_hal_dm_deinit23a(struct rtw_adapter *padapter);
+void rtw_hal_sw_led_init23a(struct rtw_adapter *padapter);
+void rtw_hal_sw_led_deinit23a(struct rtw_adapter *padapter);
+
+u32 rtw_hal_power_on23a(struct rtw_adapter *padapter);
+uint rtw_hal_init23a(struct rtw_adapter *padapter);
+uint rtw_hal_deinit23a(struct rtw_adapter *padapter);
+void rtw_hal_stop(struct rtw_adapter *padapter);
+void rtw_hal_set_hwreg23a(struct rtw_adapter *padapter, u8 variable, u8 *val);
+void rtw23a_hal_get_hwreg(struct rtw_adapter *padapter, u8 variable, u8 *val);
+
+void rtw_hal_chip_configure23a(struct rtw_adapter *padapter);
+void rtw_hal_read_chip_info23a(struct rtw_adapter *padapter);
+void rtw_hal_read_chip_version23a(struct rtw_adapter *padapter);
+
+u8 rtw_hal_set_def_var23a(struct rtw_adapter *padapter,
+ enum hal_def_variable eVariable,
+ void *pValue);
+u8 rtw_hal_get_def_var23a(struct rtw_adapter *padapter,
+ enum hal_def_variable eVariable,
+ void *pValue);
+
+void rtw_hal_set_odm_var23a(struct rtw_adapter *padapter,
+ enum hal_odm_variable eVariable,
+ void *pValue1, bool bSet);
+void rtw_hal_get_odm_var23a(struct rtw_adapter *padapter,
+ enum hal_odm_variable eVariable,
+ void *pValue1, bool bSet);
+
+void rtw_hal_enable_interrupt23a(struct rtw_adapter *padapter);
+void rtw_hal_disable_interrupt23a(struct rtw_adapter *padapter);
+
+u32 rtw_hal_inirp_init23a(struct rtw_adapter *padapter);
+u32 rtw_hal_inirp_deinit23a(struct rtw_adapter *padapter);
+
+u8 rtw_hal_intf_ps_func23a(struct rtw_adapter *padapter,
+ enum hal_intf_ps_func efunc_id, u8 *val);
+
+s32 rtw_hal_xmit23aframe_enqueue(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe);
+s32 rtw_hal_xmit23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe);
+s32 rtw_hal_mgnt_xmit23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pmgntframe);
+
+s32 rtw_hal_init23a_xmit_priv(struct rtw_adapter *padapter);
+void rtw_hal_free_xmit_priv23a(struct rtw_adapter *padapter);
+
+s32 rtw_hal_init23a_recv_priv(struct rtw_adapter *padapter);
+void rtw_hal_free_recv_priv23a(struct rtw_adapter *padapter);
+
+void rtw_hal_update_ra_mask23a(struct sta_info *psta, u8 rssi_level);
+void rtw_hal_add_ra_tid23a(struct rtw_adapter *padapter, u32 bitmap, u8 arg, u8 rssi_level);
+void rtw_hal_clone_data(struct rtw_adapter *dst_padapter, struct rtw_adapter *src_padapter);
+void rtw_hal_start_thread23a(struct rtw_adapter *padapter);
+void rtw_hal_stop_thread23a(struct rtw_adapter *padapter);
+
+void rtw_hal_bcn_related_reg_setting23a(struct rtw_adapter *padapter);
+
+u32 rtw_hal_read_bbreg23a(struct rtw_adapter *padapter, u32 RegAddr, u32 BitMask);
+void rtw_hal_write_bbreg23a(struct rtw_adapter *padapter, u32 RegAddr, u32 BitMask, u32 Data);
+u32 rtw_hal_read_rfreg23a(struct rtw_adapter *padapter, u32 eRFPath, u32 RegAddr, u32 BitMask);
+void rtw_hal_write_rfreg23a(struct rtw_adapter *padapter, u32 eRFPath, u32 RegAddr, u32 BitMask, u32 Data);
+
+s32 rtw_hal_interrupt_handler23a(struct rtw_adapter *padapter);
+
+void rtw_hal_set_bwmode23a(struct rtw_adapter *padapter,
+ enum ht_channel_width Bandwidth, u8 Offset);
+void rtw_hal_set_chan23a(struct rtw_adapter *padapter, u8 channel);
+void rtw_hal_dm_watchdog23a(struct rtw_adapter *padapter);
+
+void rtw_hal_sreset_init23a(struct rtw_adapter *padapter);
+void rtw_hal_sreset_reset23a(struct rtw_adapter *padapter);
+void rtw_hal_sreset_reset23a_value23a(struct rtw_adapter *padapter);
+void rtw_hal_sreset_xmit_status_check23a(struct rtw_adapter *padapter);
+void rtw_hal_sreset_linked_status_check23a (struct rtw_adapter *padapter);
+u8 rtw_hal_sreset_get_wifi_status23a(struct rtw_adapter *padapter);
+bool rtw_hal_sreset_inprogress(struct rtw_adapter *padapter);
+
+void rtw_hal_notch_filter23a(struct rtw_adapter *adapter, bool enable);
+void rtw_hal_reset_security_engine23a(struct rtw_adapter *adapter);
+
+s32 rtw_hal_c2h_handler23a(struct rtw_adapter *adapter, struct c2h_evt_hdr *c2h_evt);
+c2h_id_filter rtw_hal_c2h_id_filter_ccx23a(struct rtw_adapter *adapter);
+
+#endif /* __HAL_INTF_H__ */
diff --git a/drivers/staging/rtl8723au/include/ieee80211.h b/drivers/staging/rtl8723au/include/ieee80211.h
new file mode 100644
index 000000000000..28e4ab239fb9
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/ieee80211.h
@@ -0,0 +1,603 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __IEEE80211_H
+#define __IEEE80211_H
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include "linux/ieee80211.h"
+#include "wifi.h"
+
+#include <linux/wireless.h>
+
+#if (WIRELESS_EXT < 22)
+#error "Obsolete pre 2007 wireless extensions are not supported"
+#endif
+
+
+#define MGMT_QUEUE_NUM 5
+
+#ifdef CONFIG_8723AU_AP_MODE
+
+/* STA flags */
+#define WLAN_STA_AUTH BIT(0)
+#define WLAN_STA_ASSOC BIT(1)
+#define WLAN_STA_PS BIT(2)
+#define WLAN_STA_TIM BIT(3)
+#define WLAN_STA_PERM BIT(4)
+#define WLAN_STA_AUTHORIZED BIT(5)
+#define WLAN_STA_PENDING_POLL BIT(6) /* pending activity poll not ACKed */
+#define WLAN_STA_SHORT_PREAMBLE BIT(7)
+#define WLAN_STA_PREAUTH BIT(8)
+#define WLAN_STA_WME BIT(9)
+#define WLAN_STA_MFP BIT(10)
+#define WLAN_STA_HT BIT(11)
+#define WLAN_STA_WPS BIT(12)
+#define WLAN_STA_MAYBE_WPS BIT(13)
+#define WLAN_STA_NONERP BIT(31)
+
+#endif
+
+#define IEEE_CMD_SET_WPA_PARAM 1
+#define IEEE_CMD_SET_WPA_IE 2
+#define IEEE_CMD_SET_ENCRYPTION 3
+
+#define IEEE_CRYPT_ALG_NAME_LEN 16
+
+#define WPA_CIPHER_NONE BIT(0)
+#define WPA_CIPHER_WEP40 BIT(1)
+#define WPA_CIPHER_WEP104 BIT(2)
+#define WPA_CIPHER_TKIP BIT(3)
+#define WPA_CIPHER_CCMP BIT(4)
+
+
+
+#define WPA_SELECTOR_LEN 4
+extern u8 RTW_WPA_OUI23A_TYPE[] ;
+extern u16 RTW_WPA_VERSION23A ;
+extern u8 WPA_AUTH_KEY_MGMT_NONE23A[];
+extern u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X23A[];
+extern u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X23A[];
+extern u8 WPA_CIPHER_SUITE_NONE23A[];
+extern u8 WPA_CIPHER_SUITE_WEP4023A[];
+extern u8 WPA_CIPHER_SUITE_TKIP23A[];
+extern u8 WPA_CIPHER_SUITE_WRAP23A[];
+extern u8 WPA_CIPHER_SUITE_CCMP23A[];
+extern u8 WPA_CIPHER_SUITE_WEP10423A[];
+
+
+#define RSN_HEADER_LEN 4
+#define RSN_SELECTOR_LEN 4
+
+extern u16 RSN_VERSION_BSD23A;
+extern u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X23A[];
+extern u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X23A[];
+extern u8 RSN_CIPHER_SUITE_NONE23A[];
+extern u8 RSN_CIPHER_SUITE_WEP4023A[];
+extern u8 RSN_CIPHER_SUITE_TKIP23A[];
+extern u8 RSN_CIPHER_SUITE_WRAP23A[];
+extern u8 RSN_CIPHER_SUITE_CCMP23A[];
+extern u8 RSN_CIPHER_SUITE_WEP10423A[];
+
+enum ratr_table_mode {
+ RATR_INX_WIRELESS_NGB = 0, /* BGN 40 Mhz 2SS 1SS */
+ RATR_INX_WIRELESS_NG = 1, /* GN or N */
+ RATR_INX_WIRELESS_NB = 2, /* BGN 20 Mhz 2SS 1SS or BN */
+ RATR_INX_WIRELESS_N = 3,
+ RATR_INX_WIRELESS_GB = 4,
+ RATR_INX_WIRELESS_G = 5,
+ RATR_INX_WIRELESS_B = 6,
+ RATR_INX_WIRELESS_MC = 7,
+ RATR_INX_WIRELESS_AC_N = 8,
+};
+
+enum NETWORK_TYPE
+{
+ WIRELESS_INVALID = 0,
+ /* Sub-Element */
+ WIRELESS_11B = BIT(0), /* tx: cck only , rx: cck only, hw: cck */
+ WIRELESS_11G = BIT(1), /* tx: ofdm only, rx: ofdm & cck, hw: cck & ofdm */
+ WIRELESS_11A = BIT(2), /* tx: ofdm only, rx: ofdm only, hw: ofdm only */
+ WIRELESS_11_24N = BIT(3), /* tx: MCS only, rx: MCS & cck, hw: MCS & cck */
+ WIRELESS_11_5N = BIT(4), /* tx: MCS only, rx: MCS & ofdm, hw: ofdm only */
+ /* WIRELESS_AUTO = BIT(5), */
+ WIRELESS_AC = BIT(6),
+
+ /* Combination */
+ WIRELESS_11BG = (WIRELESS_11B|WIRELESS_11G), /* tx: cck & ofdm, rx: cck & ofdm & MCS, hw: cck & ofdm */
+ WIRELESS_11G_24N = (WIRELESS_11G|WIRELESS_11_24N), /* tx: ofdm & MCS, rx: ofdm & cck & MCS, hw: cck & ofdm */
+ WIRELESS_11A_5N = (WIRELESS_11A|WIRELESS_11_5N), /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
+ WIRELESS_11BG_24N = (WIRELESS_11B|WIRELESS_11G|WIRELESS_11_24N), /* tx: ofdm & cck & MCS, rx: ofdm & cck & MCS, hw: ofdm & cck */
+ WIRELESS_11AGN = (WIRELESS_11A|WIRELESS_11G|WIRELESS_11_24N|WIRELESS_11_5N), /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
+ WIRELESS_11ABGN = (WIRELESS_11A|WIRELESS_11B|WIRELESS_11G|WIRELESS_11_24N|WIRELESS_11_5N),
+};
+
+#define SUPPORTED_24G_NETTYPE_MSK (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N)
+#define SUPPORTED_5G_NETTYPE_MSK (WIRELESS_11A | WIRELESS_11_5N)
+
+#define IsSupported24G(NetType) ((NetType) & SUPPORTED_24G_NETTYPE_MSK ? true : false)
+#define IsSupported5G(NetType) ((NetType) & SUPPORTED_5G_NETTYPE_MSK ? true : false)
+
+#define IsEnableHWCCK(NetType) IsSupported24G(NetType)
+#define IsEnableHWOFDM(NetType) ((NetType) & (WIRELESS_11G|WIRELESS_11_24N|SUPPORTED_5G_NETTYPE_MSK) ? true : false)
+
+#define IsSupportedRxCCK(NetType) IsEnableHWCCK(NetType)
+#define IsSupportedRxOFDM(NetType) IsEnableHWOFDM(NetType)
+#define IsSupportedRxMCS(NetType) IsEnableHWOFDM(NetType)
+
+#define IsSupportedTxCCK(NetType) ((NetType) & (WIRELESS_11B) ? true : false)
+#define IsSupportedTxOFDM(NetType) ((NetType) & (WIRELESS_11G|WIRELESS_11A) ? true : false)
+#define IsSupportedTxMCS(NetType) ((NetType) & (WIRELESS_11_24N|WIRELESS_11_5N) ? true : false)
+
+
+struct ieee_param {
+ u32 cmd;
+ u8 sta_addr[ETH_ALEN];
+ union {
+ struct {
+ u8 name;
+ u32 value;
+ } wpa_param;
+ struct {
+ u32 len;
+ u8 reserved[32];
+ u8 data[0];
+ } wpa_ie;
+ struct{
+ int command;
+ int reason_code;
+ } mlme;
+ struct {
+ u8 alg[IEEE_CRYPT_ALG_NAME_LEN];
+ u8 set_tx;
+ u32 err;
+ u8 idx;
+ u8 seq[8]; /* sequence counter (set: RX, get: TX) */
+ u16 key_len;
+ u8 key[0];
+ } crypt;
+#ifdef CONFIG_8723AU_AP_MODE
+ struct {
+ u16 aid;
+ u16 capability;
+ int flags;
+ u8 tx_supp_rates[16];
+ struct ieee80211_ht_cap ht_cap;
+ } add_sta;
+ struct {
+ u8 reserved[2];/* for set max_num_sta */
+ u8 buf[0];
+ } bcn_ie;
+#endif
+
+ } u;
+};
+
+
+#define MIN_FRAG_THRESHOLD 256U
+#define MAX_FRAG_THRESHOLD 2346U
+
+/* QoS,QOS */
+#define NORMAL_ACK 0
+#define NO_ACK 1
+#define NON_EXPLICIT_ACK 2
+#define BLOCK_ACK 3
+
+/* IEEE 802.11 defines */
+
+#define P80211_OUI_LEN 3
+
+struct ieee80211_snap_hdr {
+
+ u8 dsap; /* always 0xAA */
+ u8 ssap; /* always 0xAA */
+ u8 ctrl; /* always 0x03 */
+ u8 oui[P80211_OUI_LEN]; /* organizational universal id */
+
+} __attribute__ ((packed));
+
+
+#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
+
+#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
+
+#define WLAN_QC_GET_TID(qc) ((qc) & 0x0f)
+
+#define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTW_IEEE80211_SCTL_FRAG)
+#define WLAN_GET_SEQ_SEQ(seq) ((seq) & RTW_IEEE80211_SCTL_SEQ)
+
+
+#define WLAN_REASON_JOIN_WRONG_CHANNEL 65534
+#define WLAN_REASON_EXPIRATION_CHK 65535
+
+
+
+#define IEEE80211_STATMASK_SIGNAL (1<<0)
+#define IEEE80211_STATMASK_RSSI (1<<1)
+#define IEEE80211_STATMASK_NOISE (1<<2)
+#define IEEE80211_STATMASK_RATE (1<<3)
+#define IEEE80211_STATMASK_WEMASK 0x7
+
+
+#define IEEE80211_CCK_MODULATION (1<<0)
+#define IEEE80211_OFDM_MODULATION (1<<1)
+
+#define IEEE80211_24GHZ_BAND (1<<0)
+#define IEEE80211_52GHZ_BAND (1<<1)
+
+#define IEEE80211_CCK_RATE_LEN 4
+#define IEEE80211_NUM_OFDM_RATESLEN 8
+
+
+#define IEEE80211_CCK_RATE_1MB 0x02
+#define IEEE80211_CCK_RATE_2MB 0x04
+#define IEEE80211_CCK_RATE_5MB 0x0B
+#define IEEE80211_CCK_RATE_11MB 0x16
+#define IEEE80211_OFDM_RATE_LEN 8
+#define IEEE80211_OFDM_RATE_6MB 0x0C
+#define IEEE80211_OFDM_RATE_9MB 0x12
+#define IEEE80211_OFDM_RATE_12MB 0x18
+#define IEEE80211_OFDM_RATE_18MB 0x24
+#define IEEE80211_OFDM_RATE_24MB 0x30
+#define IEEE80211_OFDM_RATE_36MB 0x48
+#define IEEE80211_OFDM_RATE_48MB 0x60
+#define IEEE80211_OFDM_RATE_54MB 0x6C
+#define IEEE80211_BASIC_RATE_MASK 0x80
+
+#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
+#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
+#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
+#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
+#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
+#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
+#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
+#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
+#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
+#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
+#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
+#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
+
+#define IEEE80211_CCK_RATES_MASK 0x0000000F
+#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
+ IEEE80211_CCK_RATE_2MB_MASK)
+#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \
+ IEEE80211_CCK_RATE_5MB_MASK | \
+ IEEE80211_CCK_RATE_11MB_MASK)
+
+#define IEEE80211_OFDM_RATES_MASK 0x00000FF0
+#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \
+ IEEE80211_OFDM_RATE_12MB_MASK | \
+ IEEE80211_OFDM_RATE_24MB_MASK)
+#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \
+ IEEE80211_OFDM_RATE_9MB_MASK | \
+ IEEE80211_OFDM_RATE_18MB_MASK | \
+ IEEE80211_OFDM_RATE_36MB_MASK | \
+ IEEE80211_OFDM_RATE_48MB_MASK | \
+ IEEE80211_OFDM_RATE_54MB_MASK)
+#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \
+ IEEE80211_CCK_DEFAULT_RATES_MASK)
+
+#define IEEE80211_NUM_OFDM_RATES 8
+#define IEEE80211_NUM_CCK_RATES 4
+#define IEEE80211_OFDM_SHIFT_MASK_A 4
+
+#define WEP_KEYS 4
+#define WEP_KEY_LEN 13
+
+
+
+/*
+
+ 802.11 data frame from AP
+
+ ,-------------------------------------------------------------------.
+Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
+ |------|------|---------|---------|---------|------|---------|------|
+Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs |
+ | | tion | (BSSID) | | | ence | data | |
+ `-------------------------------------------------------------------'
+
+Total: 28-2340 bytes
+
+*/
+
+struct ieee80211_header_data {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[6];
+ u8 addr2[6];
+ u8 addr3[6];
+ u16 seq_ctrl;
+};
+
+struct ieee80211_info_element_hdr {
+ u8 id;
+ u8 len;
+} __attribute__ ((packed));
+
+struct ieee80211_info_element {
+ u8 id;
+ u8 len;
+ u8 data[0];
+} __attribute__ ((packed));
+
+
+struct ieee80211_txb {
+ u8 nr_frags;
+ u8 encrypted;
+ u16 reserved;
+ u16 frag_size;
+ u16 payload_size;
+ struct sk_buff *fragments[0];
+};
+
+
+/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
+ * only use 8, and then use extended rates for the remaining supported
+ * rates. Other APs, however, stick all of their supported rates on the
+ * main rates information element... */
+#define MAX_RATES_LENGTH ((u8)12)
+#define MAX_RATES_EX_LENGTH ((u8)16)
+#define MAX_CHANNEL_NUMBER 161
+
+#define MAX_WPA_IE_LEN (256)
+#define MAX_WPS_IE_LEN (512)
+#define MAX_P2P_IE_LEN (256)
+#define MAX_WFD_IE_LEN (128)
+
+#define IW_ESSID_MAX_SIZE 32
+
+/*
+join_res:
+-1: authentication fail
+-2: association fail
+> 0: TID
+*/
+
+#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
+#define DEFAULT_FTS 2346
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ARG(x) ((u8*)(x))[0],((u8*)(x))[1],((u8*)(x))[2],((u8*)(x))[3],((u8*)(x))[4],((u8*)(x))[5]
+
+#define CFG_IEEE80211_RESERVE_FCS (1<<0)
+#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
+
+#define MAXTID 16
+
+#define IEEE_A (1<<0)
+#define IEEE_B (1<<1)
+#define IEEE_G (1<<2)
+#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
+
+/* Baron move to ieee80211.c */
+int ieee80211_is_empty_essid23a(const char *essid, int essid_len);
+
+enum _PUBLIC_ACTION{
+ ACT_PUBLIC_BSSCOEXIST = 0, /* 20/40 BSS Coexistence */
+ ACT_PUBLIC_DSE_ENABLE = 1,
+ ACT_PUBLIC_DSE_DEENABLE = 2,
+ ACT_PUBLIC_DSE_REG_LOCATION = 3,
+ ACT_PUBLIC_EXT_CHL_SWITCH = 4,
+ ACT_PUBLIC_DSE_MSR_REQ = 5,
+ ACT_PUBLIC_DSE_MSR_RPRT = 6,
+ ACT_PUBLIC_MP = 7, /* Measurement Pilot */
+ ACT_PUBLIC_DSE_PWR_CONSTRAINT = 8,
+ ACT_PUBLIC_VENDOR = 9, /* for WIFI_DIRECT */
+ ACT_PUBLIC_GAS_INITIAL_REQ = 10,
+ ACT_PUBLIC_GAS_INITIAL_RSP = 11,
+ ACT_PUBLIC_GAS_COMEBACK_REQ = 12,
+ ACT_PUBLIC_GAS_COMEBACK_RSP = 13,
+ ACT_PUBLIC_TDLS_DISCOVERY_RSP = 14,
+ ACT_PUBLIC_LOCATION_TRACK = 15,
+ ACT_PUBLIC_MAX
+};
+
+#define WME_OUI_TYPE 2
+#define WME_OUI_SUBTYPE_INFORMATION_ELEMENT 0
+#define WME_OUI_SUBTYPE_PARAMETER_ELEMENT 1
+#define WME_OUI_SUBTYPE_TSPEC_ELEMENT 2
+#define WME_VERSION 1
+
+
+#define OUI_BROADCOM 0x00904c /* Broadcom (Epigram) */
+
+#define VENDOR_HT_CAPAB_OUI_TYPE 0x33 /* 00-90-4c:0x33 */
+
+/* Represent channel details, subset of ieee80211_channel */
+struct rtw_ieee80211_channel {
+ /* enum ieee80211_band band; */
+ /* u16 center_freq; */
+ u16 hw_value;
+ u32 flags;
+ /* int max_antenna_gain; */
+ /* int max_power; */
+ /* int max_reg_power; */
+ /* bool beacon_found; */
+ /* u32 orig_flags; */
+ /* int orig_mag; */
+ /* int orig_mpwr; */
+};
+
+#define CHAN_FMT \
+ /*"band:%d, "*/ \
+ /*"center_freq:%u, "*/ \
+ "hw_value:%u, " \
+ "flags:0x%08x" \
+ /*"max_antenna_gain:%d\n"*/ \
+ /*"max_power:%d\n"*/ \
+ /*"max_reg_power:%d\n"*/ \
+ /*"beacon_found:%u\n"*/ \
+ /*"orig_flags:0x%08x\n"*/ \
+ /*"orig_mag:%d\n"*/ \
+ /*"orig_mpwr:%d\n"*/
+
+#define CHAN_ARG(channel) \
+ /*(channel)->band*/ \
+ /*, (channel)->center_freq*/ \
+ (channel)->hw_value \
+ , (channel)->flags \
+ /*, (channel)->max_antenna_gain*/ \
+ /*, (channel)->max_power*/ \
+ /*, (channel)->max_reg_power*/ \
+ /*, (channel)->beacon_found*/ \
+ /*, (channel)->orig_flags*/ \
+ /*, (channel)->orig_mag*/ \
+ /*, (channel)->orig_mpwr*/ \
+
+/* Parsed Information Elements */
+struct rtw_ieee802_11_elems {
+ u8 *ssid;
+ u8 ssid_len;
+ u8 *supp_rates;
+ u8 supp_rates_len;
+ u8 *fh_params;
+ u8 fh_params_len;
+ u8 *ds_params;
+ u8 ds_params_len;
+ u8 *cf_params;
+ u8 cf_params_len;
+ u8 *tim;
+ u8 tim_len;
+ u8 *ibss_params;
+ u8 ibss_params_len;
+ u8 *challenge;
+ u8 challenge_len;
+ u8 *erp_info;
+ u8 erp_info_len;
+ u8 *ext_supp_rates;
+ u8 ext_supp_rates_len;
+ u8 *wpa_ie;
+ u8 wpa_ie_len;
+ u8 *rsn_ie;
+ u8 rsn_ie_len;
+ u8 *wme;
+ u8 wme_len;
+ u8 *wme_tspec;
+ u8 wme_tspec_len;
+ u8 *wps_ie;
+ u8 wps_ie_len;
+ u8 *power_cap;
+ u8 power_cap_len;
+ u8 *supp_channels;
+ u8 supp_channels_len;
+ u8 *mdie;
+ u8 mdie_len;
+ u8 *ftie;
+ u8 ftie_len;
+ u8 *timeout_int;
+ u8 timeout_int_len;
+ u8 *ht_capabilities;
+ u8 ht_capabilities_len;
+ u8 *ht_operation;
+ u8 ht_operation_len;
+ u8 *vendor_ht_cap;
+ u8 vendor_ht_cap_len;
+};
+
+enum parse_res {
+ ParseOK = 0,
+ ParseUnknown = 1,
+ ParseFailed = -1
+};
+
+enum parse_res rtw_ieee802_11_parse_elems23a(u8 *start, uint len,
+ struct rtw_ieee802_11_elems *elems,
+ int show_errors);
+
+u8 *rtw_set_fixed_ie23a(unsigned char *pbuf, unsigned int len, unsigned char *source, unsigned int *frlen);
+u8 *rtw_set_ie23a(u8 *pbuf, int index, uint len, u8 *source, uint *frlen);
+
+enum secondary_ch_offset {
+ SCN = 0, /* no secondary channel */
+ SCA = 1, /* secondary channel above */
+ SCB = 3, /* secondary channel below */
+};
+u8 secondary_ch_offset_to_hal_ch_offset23a(u8 ch_offset);
+u8 hal_ch_offset_to_secondary_ch_offset23a(u8 ch_offset);
+u8 *rtw_set_ie23a_ch_switch(u8 *buf, u32 *buf_len, u8 ch_switch_mode, u8 new_ch, u8 ch_switch_cnt);
+u8 *rtw_set_ie23a_secondary_ch_offset(u8 *buf, u32 *buf_len, u8 secondary_ch_offset);
+u8 *rtw_set_ie23a_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl, u8 flags, u16 reason, u16 precedence);
+
+u8 *rtw_get_ie23a(u8*pbuf, int index, int *len, int limit);
+u8 *rtw_get_ie23a_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, uint *ielen);
+int rtw_ies_remove_ie23a(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 oui_len);
+
+void rtw_set_supported_rate23a(u8* SupportedRates, uint mode) ;
+
+unsigned char *rtw_get_wpa_ie23a(unsigned char *pie, int *wpa_ie_len, int limit);
+unsigned char *rtw_get_wpa2_ie23a(unsigned char *pie, int *rsn_ie_len, int limit);
+int rtw_get_wpa_cipher_suite23a(u8 *s);
+int rtw_get_wpa2_cipher_suite23a(u8 *s);
+int rtw_parse_wpa_ie23a(u8* wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x);
+int rtw_parse_wpa2_ie23a(u8* wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x);
+
+int rtw_get_sec_ie23a(u8 *in_ie,uint in_len,u8 *rsn_ie,u16 *rsn_len,u8 *wpa_ie,u16 *wpa_len);
+
+u8 rtw_is_wps_ie23a(u8 *ie_ptr, uint *wps_ielen);
+u8 *rtw_get_wps_ie23a(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen);
+u8 *rtw_get_wps_attr23a(u8 *wps_ie, uint wps_ielen, u16 target_attr_id ,u8 *buf_attr, u32 *len_attr);
+u8 *rtw_get_wps_attr_content23a(u8 *wps_ie, uint wps_ielen, u16 target_attr_id ,u8 *buf_content, uint *len_content);
+
+/**
+ * for_each_ie - iterate over continuous IEs
+ * @ie:
+ * @buf:
+ * @buf_len:
+ */
+#define for_each_ie(ie, buf, buf_len) \
+ for (ie = (void*)buf; (((u8*)ie) - ((u8*)buf) + 1) < buf_len; ie = (void*)(((u8*)ie) + *(((u8*)ie)+1) + 2))
+
+void dump_ies23a(u8 *buf, u32 buf_len);
+void dump_wps_ie23a(u8 *ie, u32 ie_len);
+
+#ifdef CONFIG_8723AU_P2P
+void dump_p2p_ie23a(u8 *ie, u32 ie_len);
+u8 *rtw_get_p2p_ie23a(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen);
+u8 *rtw_get_p2p_attr23a(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id ,u8 *buf_attr, u32 *len_attr);
+u8 *rtw_get_p2p_attr23a_content(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id ,u8 *buf_content, uint *len_content);
+u32 rtw_set_p2p_attr_content23a(u8 *pbuf, u8 attr_id, u16 attr_len, u8 *pdata_attr);
+void rtw_wlan_bssid_ex_remove_p2p_attr23a(struct wlan_bssid_ex *bss_ex, u8 attr_id);
+#endif
+
+#ifdef CONFIG_8723AU_P2P
+int rtw_get_wfd_ie(u8 *in_ie, int in_len, u8 *wfd_ie, uint *wfd_ielen);
+int rtw_get_wfd_attr_content(u8 *wfd_ie, uint wfd_ielen, u8 target_attr_id ,u8 *attr_content, uint *attr_contentlen);
+#endif /* CONFIG_8723AU_P2P */
+
+uint rtw_get_rateset_len23a(u8 *rateset);
+
+struct registry_priv;
+int rtw_generate_ie23a(struct registry_priv *pregistrypriv);
+
+
+int rtw_get_bit_value_from_ieee_value23a(u8 val);
+
+uint rtw_is_cckrates_included23a(u8 *rate);
+
+uint rtw_is_cckratesonly_included23a(u8 *rate);
+
+int rtw_check_network_type23a(unsigned char *rate, int ratelen, int channel);
+
+void rtw_get_bcn_info23a(struct wlan_network *pnetwork);
+
+void rtw_macaddr_cfg23a(u8 *mac_addr);
+
+u16 rtw_mcs_rate23a(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsigned char * MCS_rate);
+
+int rtw_action_frame_parse23a(const u8 *frame, u32 frame_len, u8* category, u8 *action);
+const char *action_public_str23a(u8 action);
+
+#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8723au/include/ioctl_cfg80211.h b/drivers/staging/rtl8723au/include/ioctl_cfg80211.h
new file mode 100644
index 000000000000..0eb9036d7250
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/ioctl_cfg80211.h
@@ -0,0 +1,119 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __IOCTL_CFG80211_H__
+#define __IOCTL_CFG80211_H__
+
+struct rtw_wdev_invit_info {
+ u8 token;
+ u8 flags;
+ u8 status;
+ u8 req_op_ch;
+ u8 rsp_op_ch;
+};
+
+#define rtw_wdev_invit_info_init(invit_info) \
+ do { \
+ (invit_info)->token = 0; \
+ (invit_info)->flags = 0x00; \
+ (invit_info)->status = 0xff; \
+ (invit_info)->req_op_ch = 0; \
+ (invit_info)->rsp_op_ch = 0; \
+ } while (0)
+
+struct rtw_wdev_priv {
+ struct wireless_dev *rtw_wdev;
+
+ struct rtw_adapter *padapter;
+
+ struct cfg80211_scan_request *scan_request;
+ spinlock_t scan_req_lock;
+
+ struct net_device *pmon_ndev;/* for monitor interface */
+ char ifname_mon[IFNAMSIZ + 1]; /* name for monitor interface */
+
+ u8 p2p_enabled;
+
+ u8 provdisc_req_issued;
+
+ struct rtw_wdev_invit_info invit_info;
+
+ bool block;
+ bool power_mgmt;
+};
+
+#define wdev_to_priv(w) ((struct rtw_wdev_priv *)(wdev_priv(w)))
+
+#define wiphy_to_adapter(x) \
+ (struct rtw_adapter *)(((struct rtw_wdev_priv *) \
+ wiphy_priv(x))->padapter)
+
+#define wiphy_to_wdev(x) \
+ (struct wireless_dev *)(((struct rtw_wdev_priv *) \
+ wiphy_priv(x))->rtw_wdev)
+
+int rtw_wdev_alloc(struct rtw_adapter *padapter, struct device *dev);
+void rtw_wdev_free(struct wireless_dev *wdev);
+void rtw_wdev_unregister(struct wireless_dev *wdev);
+
+void rtw_cfg80211_init_wiphy(struct rtw_adapter *padapter);
+
+void rtw_cfg80211_surveydone_event_callback(struct rtw_adapter *padapter);
+
+void rtw_cfg80211_indicate_connect(struct rtw_adapter *padapter);
+void rtw_cfg80211_indicate_disconnect(struct rtw_adapter *padapter);
+void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
+ bool aborted);
+
+#ifdef CONFIG_8723AU_AP_MODE
+void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter,
+ u8 *pmgmt_frame, uint frame_len);
+void rtw_cfg80211_indicate_sta_disassoc(struct rtw_adapter *padapter,
+ unsigned char *da, unsigned short reason);
+#endif /* CONFIG_8723AU_AP_MODE */
+
+void rtw_cfg80211_issue_p2p_provision_request23a(struct rtw_adapter *padapter,
+ const u8 *buf, size_t len);
+void rtw_cfg80211_rx_p2p_action_public(struct rtw_adapter *padapter,
+ u8 *pmgmt_frame, uint frame_len);
+void rtw_cfg80211_rx_action_p2p(struct rtw_adapter *padapter,
+ u8 *pmgmt_frame, uint frame_len);
+void rtw_cfg80211_rx_action(struct rtw_adapter *adapter, u8 *frame,
+ uint frame_len, const char*msg);
+
+int rtw_cfg80211_set_mgnt_wpsp2pie(struct net_device *net, char *buf, int len,
+ int type);
+
+bool rtw_cfg80211_pwr_mgmt(struct rtw_adapter *adapter);
+
+#define rtw_cfg80211_rx_mgmt(adapter, freq, sig_dbm, buf, len, gfp) \
+ cfg80211_rx_mgmt((adapter)->rtw_wdev, freq, sig_dbm, buf, len, 0, gfp)
+
+#define rtw_cfg80211_send_rx_assoc(adapter, bss, buf, len) \
+ cfg80211_send_rx_assoc((adapter)->pnetdev, bss, buf, len)
+
+#define rtw_cfg80211_mgmt_tx_status(adapter, cookie, buf, len, ack, gfp) \
+ cfg80211_mgmt_tx_status((adapter)->rtw_wdev, cookie, buf, \
+ len, ack, gfp)
+
+#define rtw_cfg80211_ready_on_channel(adapter, cookie, chan, \
+ channel_type, duration, gfp) \
+ cfg80211_ready_on_channel((adapter)->rtw_wdev, cookie, chan, \
+ duration, gfp)
+#define rtw_cfg80211_remain_on_channel_expired(adapter, cookie, chan, \
+ chan_type, gfp) \
+ cfg80211_remain_on_channel_expired((adapter)->rtw_wdev, \
+ cookie, chan, gfp)
+
+#endif /* __IOCTL_CFG80211_H__ */
diff --git a/drivers/staging/rtl8723au/include/mlme_osdep.h b/drivers/staging/rtl8723au/include/mlme_osdep.h
new file mode 100644
index 000000000000..b7132a9a1378
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/mlme_osdep.h
@@ -0,0 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __MLME_OSDEP_H_
+#define __MLME_OSDEP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+void rtw_os_indicate_disconnect23a(struct rtw_adapter *adapter);
+void rtw_os_indicate_connect23a(struct rtw_adapter *adapter);
+void rtw_os_indicate_scan_done23a(struct rtw_adapter *padapter, bool aborted);
+void rtw_report_sec_ie23a(struct rtw_adapter *adapter, u8 authmode, u8 *sec_ie);
+
+void rtw_reset_securitypriv23a(struct rtw_adapter *adapter);
+
+#endif /* _MLME_OSDEP_H_ */
diff --git a/drivers/staging/rtl8723au/include/mp_custom_oid.h b/drivers/staging/rtl8723au/include/mp_custom_oid.h
new file mode 100644
index 000000000000..da197cf678a1
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/mp_custom_oid.h
@@ -0,0 +1,342 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __CUSTOM_OID_H
+#define __CUSTOM_OID_H
+
+/* 0xFF818000 - 0xFF81802F RTL8180 Mass Production Kit */
+/* 0xFF818500 - 0xFF81850F RTL8185 Setup Utility */
+/* 0xFF818580 - 0xFF81858F RTL8185 Phy Status Utility */
+
+/* For Production Kit with Agilent Equipments */
+/* in order to make our custom oids hopefully somewhat unique */
+/* we will use 0xFF (indicating implementation specific OID) */
+/* 81(first byte of non zero Realtek unique identifier) */
+/* 80 (second byte of non zero Realtek unique identifier) */
+/* XX (the custom OID number - providing 255 possible custom oids) */
+
+#define OID_RT_PRO_RESET_DUT 0xFF818000
+#define OID_RT_PRO_SET_DATA_RATE 0xFF818001
+#define OID_RT_PRO_START_TEST 0xFF818002
+#define OID_RT_PRO_STOP_TEST 0xFF818003
+#define OID_RT_PRO_SET_PREAMBLE 0xFF818004
+#define OID_RT_PRO_SET_SCRAMBLER 0xFF818005
+#define OID_RT_PRO_SET_FILTER_BB 0xFF818006
+#define OID_RT_PRO_SET_MANUAL_DIVERSITY_BB 0xFF818007
+#define OID_RT_PRO_SET_CHANNEL_DIRECT_CALL 0xFF818008
+#define OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL 0xFF818009
+#define OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL 0xFF81800A
+
+#define OID_RT_PRO_SET_TX_ANTENNA_BB 0xFF81800D
+#define OID_RT_PRO_SET_ANTENNA_BB 0xFF81800E
+#define OID_RT_PRO_SET_CR_SCRAMBLER 0xFF81800F
+#define OID_RT_PRO_SET_CR_NEW_FILTER 0xFF818010
+#define OID_RT_PRO_SET_TX_POWER_CONTROL 0xFF818011
+#define OID_RT_PRO_SET_CR_TX_CONFIG 0xFF818012
+#define OID_RT_PRO_GET_TX_POWER_CONTROL 0xFF818013
+#define OID_RT_PRO_GET_CR_SIGNAL_QUALITY 0xFF818014
+#define OID_RT_PRO_SET_CR_SETPOINT 0xFF818015
+#define OID_RT_PRO_SET_INTEGRATOR 0xFF818016
+#define OID_RT_PRO_SET_SIGNAL_QUALITY 0xFF818017
+#define OID_RT_PRO_GET_INTEGRATOR 0xFF818018
+#define OID_RT_PRO_GET_SIGNAL_QUALITY 0xFF818019
+#define OID_RT_PRO_QUERY_EEPROM_TYPE 0xFF81801A
+#define OID_RT_PRO_WRITE_MAC_ADDRESS 0xFF81801B
+#define OID_RT_PRO_READ_MAC_ADDRESS 0xFF81801C
+#define OID_RT_PRO_WRITE_CIS_DATA 0xFF81801D
+#define OID_RT_PRO_READ_CIS_DATA 0xFF81801E
+#define OID_RT_PRO_WRITE_POWER_CONTROL 0xFF81801F
+#define OID_RT_PRO_READ_POWER_CONTROL 0xFF818020
+#define OID_RT_PRO_WRITE_EEPROM 0xFF818021
+#define OID_RT_PRO_READ_EEPROM 0xFF818022
+#define OID_RT_PRO_RESET_TX_PACKET_SENT 0xFF818023
+#define OID_RT_PRO_QUERY_TX_PACKET_SENT 0xFF818024
+#define OID_RT_PRO_RESET_RX_PACKET_RECEIVED 0xFF818025
+#define OID_RT_PRO_QUERY_RX_PACKET_RECEIVED 0xFF818026
+#define OID_RT_PRO_QUERY_RX_PACKET_CRC32_ERROR 0xFF818027
+#define OID_RT_PRO_QUERY_CURRENT_ADDRESS 0xFF818028
+#define OID_RT_PRO_QUERY_PERMANENT_ADDRESS 0xFF818029
+#define OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS 0xFF81802A
+#define OID_RT_PRO_RECEIVE_PACKET 0xFF81802C
+/* added by Owen on 04/08/03 for Cameo's request */
+#define OID_RT_PRO_WRITE_EEPROM_BYTE 0xFF81802D
+#define OID_RT_PRO_READ_EEPROM_BYTE 0xFF81802E
+#define OID_RT_PRO_SET_MODULATION 0xFF81802F
+/* */
+
+#define OID_RT_DRIVER_OPTION 0xFF818080
+#define OID_RT_RF_OFF 0xFF818081
+#define OID_RT_AUTH_STATUS 0xFF818082
+
+/* */
+#define OID_RT_PRO_SET_CONTINUOUS_TX 0xFF81800B
+#define OID_RT_PRO_SET_SINGLE_CARRIER_TX 0xFF81800C
+#define OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX 0xFF81802B
+#define OID_RT_PRO_SET_SINGLE_TONE_TX 0xFF818043
+/* */
+
+
+/* by Owen for RTL8185 Phy Status Report Utility */
+#define OID_RT_UTILITYfalse_ALARM_COUNTERS 0xFF818580
+#define OID_RT_UTILITY_SELECT_DEBUG_MODE 0xFF818581
+#define OID_RT_UTILITY_SELECT_SUBCARRIER_NUMBER 0xFF818582
+#define OID_RT_UTILITY_GET_RSSI_STATUS 0xFF818583
+#define OID_RT_UTILITY_GET_FRAME_DETECTION_STATUS 0xFF818584
+#define OID_RT_UTILITY_GET_AGC_AND_FREQUENCY_OFFSET_ESTIMATION_STATUS 0xFF818585
+#define OID_RT_UTILITY_GET_CHANNEL_ESTIMATION_STATUS 0xFF818586
+
+/* by Owen on 03/09/19-03/09/22 for RTL8185 */
+#define OID_RT_WIRELESS_MODE 0xFF818500
+#define OID_RT_SUPPORTED_RATES 0xFF818501
+#define OID_RT_DESIRED_RATES 0xFF818502
+#define OID_RT_WIRELESS_MODE_STARTING_ADHOC 0xFF818503
+/* */
+
+#define OID_RT_GET_CONNECT_STATE 0xFF030001
+#define OID_RT_RESCAN 0xFF030002
+#define OID_RT_SET_KEY_LENGTH 0xFF030003
+#define OID_RT_SET_DEFAULT_KEY_ID 0xFF030004
+
+#define OID_RT_SET_CHANNEL 0xFF010182
+#define OID_RT_SET_SNIFFER_MODE 0xFF010183
+#define OID_RT_GET_SIGNAL_QUALITY 0xFF010184
+#define OID_RT_GET_SMALL_PACKET_CRC 0xFF010185
+#define OID_RT_GET_MIDDLE_PACKET_CRC 0xFF010186
+#define OID_RT_GET_LARGE_PACKET_CRC 0xFF010187
+#define OID_RT_GET_TX_RETRY 0xFF010188
+#define OID_RT_GET_RX_RETRY 0xFF010189
+#define OID_RT_PRO_SET_FW_DIG_STATE 0xFF01018A/* S */
+#define OID_RT_PRO_SET_FW_RA_STATE 0xFF01018B/* S */
+
+#define OID_RT_GET_RX_TOTAL_PACKET 0xFF010190
+#define OID_RT_GET_TX_BEACON_OK 0xFF010191
+#define OID_RT_GET_TX_BEACON_ERR 0xFF010192
+#define OID_RT_GET_RX_ICV_ERR 0xFF010193
+#define OID_RT_SET_ENCRYPTION_ALGORITHM 0xFF010194
+#define OID_RT_SET_NO_AUTO_RESCAN 0xFF010195
+#define OID_RT_GET_PREAMBLE_MODE 0xFF010196
+#define OID_RT_GET_DRIVER_UP_DELTA_TIME 0xFF010197
+#define OID_RT_GET_AP_IP 0xFF010198
+#define OID_RT_GET_CHANNELPLAN 0xFF010199
+#define OID_RT_SET_PREAMBLE_MODE 0xFF01019A
+#define OID_RT_SET_BCN_INTVL 0xFF01019B
+#define OID_RT_GET_RF_VENDER 0xFF01019C
+#define OID_RT_DEDICATE_PROBE 0xFF01019D
+#define OID_RT_PRO_RX_FILTER_PATTERN 0xFF01019E
+
+#define OID_RT_GET_DCST_CURRENT_THRESHOLD 0xFF01019F
+
+#define OID_RT_GET_CCA_ERR 0xFF0101A0
+#define OID_RT_GET_CCA_UPGRADE_THRESHOLD 0xFF0101A1
+#define OID_RT_GET_CCA_FALLBACK_THRESHOLD 0xFF0101A2
+
+#define OID_RT_GET_CCA_UPGRADE_EVALUATE_TIMES 0xFF0101A3
+#define OID_RT_GET_CCA_FALLBACK_EVALUATE_TIMES 0xFF0101A4
+
+/* by Owen on 03/31/03 for Cameo's request */
+#define OID_RT_SET_RATE_ADAPTIVE 0xFF0101A5
+/* */
+#define OID_RT_GET_DCST_EVALUATE_PERIOD 0xFF0101A5
+#define OID_RT_GET_DCST_TIME_UNIT_INDEX 0xFF0101A6
+#define OID_RT_GET_TOTAL_TX_BYTES 0xFF0101A7
+#define OID_RT_GET_TOTAL_RX_BYTES 0xFF0101A8
+#define OID_RT_CURRENT_TX_POWER_LEVEL 0xFF0101A9
+#define OID_RT_GET_ENC_KEY_MISMATCH_COUNT 0xFF0101AA
+#define OID_RT_GET_ENC_KEY_MATCH_COUNT 0xFF0101AB
+#define OID_RT_GET_CHANNEL 0xFF0101AC
+
+#define OID_RT_SET_CHANNELPLAN 0xFF0101AD
+#define OID_RT_GET_HARDWARE_RADIO_OFF 0xFF0101AE
+#define OID_RT_CHANNELPLAN_BY_COUNTRY 0xFF0101AF
+#define OID_RT_SCAN_AVAILABLE_BSSID 0xFF0101B0
+#define OID_RT_GET_HARDWARE_VERSION 0xFF0101B1
+#define OID_RT_GET_IS_ROAMING 0xFF0101B2
+#define OID_RT_GET_IS_PRIVACY 0xFF0101B3
+#define OID_RT_GET_KEY_MISMATCH 0xFF0101B4
+#define OID_RT_SET_RSSI_ROAM_TRAFFIC_TH 0xFF0101B5
+#define OID_RT_SET_RSSI_ROAM_SIGNAL_TH 0xFF0101B6
+#define OID_RT_RESET_LOG 0xFF0101B7
+#define OID_RT_GET_LOG 0xFF0101B8
+#define OID_RT_SET_INDICATE_HIDDEN_AP 0xFF0101B9
+#define OID_RT_GET_HEADER_FAIL 0xFF0101BA
+#define OID_RT_SUPPORTED_WIRELESS_MODE 0xFF0101BB
+#define OID_RT_GET_CHANNEL_LIST 0xFF0101BC
+#define OID_RT_GET_SCAN_IN_PROGRESS 0xFF0101BD
+#define OID_RT_GET_TX_INFO 0xFF0101BE
+#define OID_RT_RF_READ_WRITE_OFFSET 0xFF0101BF
+#define OID_RT_RF_READ_WRITE 0xFF0101C0
+
+/* For Netgear request. 2005.01.13, by rcnjko. */
+#define OID_RT_FORCED_DATA_RATE 0xFF0101C1
+#define OID_RT_WIRELESS_MODE_FOR_SCAN_LIST 0xFF0101C2
+/* For Netgear request. 2005.02.17, by rcnjko. */
+#define OID_RT_GET_BSS_WIRELESS_MODE 0xFF0101C3
+/* For AZ project. 2005.06.27, by rcnjko. */
+#define OID_RT_SCAN_WITH_MAGIC_PACKET 0xFF0101C4
+
+/* Vincent 8185MP */
+#define OID_RT_PRO_RX_FILTER 0xFF0111C0
+
+/* Andy TEST */
+/* define OID_RT_PRO_WRITE_REGISTRY 0xFF0111C1 */
+/* define OID_RT_PRO_READ_REGISTRY 0xFF0111C2 */
+#define OID_CE_USB_WRITE_REGISTRY 0xFF0111C1
+#define OID_CE_USB_READ_REGISTRY 0xFF0111C2
+
+
+#define OID_RT_PRO_SET_INITIAL_GAIN 0xFF0111C3
+#define OID_RT_PRO_SET_BB_RF_STANDBY_MODE 0xFF0111C4
+#define OID_RT_PRO_SET_BB_RF_SHUTDOWN_MODE 0xFF0111C5
+#define OID_RT_PRO_SET_TX_CHARGE_PUMP 0xFF0111C6
+#define OID_RT_PRO_SET_RX_CHARGE_PUMP 0xFF0111C7
+#define OID_RT_PRO_RF_WRITE_REGISTRY 0xFF0111C8
+#define OID_RT_PRO_RF_READ_REGISTRY 0xFF0111C9
+#define OID_RT_PRO_QUERY_RF_TYPE 0xFF0111CA
+
+/* AP OID */
+#define OID_RT_AP_GET_ASSOCIATED_STATION_LIST 0xFF010300
+#define OID_RT_AP_GET_CURRENT_TIME_STAMP 0xFF010301
+#define OID_RT_AP_SWITCH_INTO_AP_MODE 0xFF010302
+#define OID_RT_AP_SET_DTIM_PERIOD 0xFF010303
+#define OID_RT_AP_SUPPORTED 0xFF010304 /* Determine if driver supports AP mode. 2004.08.27, by rcnjko. */
+#define OID_RT_AP_SET_PASSPHRASE 0xFF010305 /* Set WPA-PSK passphrase into authenticator. 2005.07.08, byrcnjko. */
+
+/* 8187MP. 2004.09.06, by rcnjko. */
+#define OID_RT_PRO8187_WI_POLL 0xFF818780
+#define OID_RT_PRO_WRITE_BB_REG 0xFF818781
+#define OID_RT_PRO_READ_BB_REG 0xFF818782
+#define OID_RT_PRO_WRITE_RF_REG 0xFF818783
+#define OID_RT_PRO_READ_RF_REG 0xFF818784
+
+/* Meeting House. added by Annie, 2005-07-20. */
+#define OID_RT_MH_VENDER_ID 0xFFEDC100
+
+/* 8711 MP OID added 20051230. */
+#define OID_RT_PRO8711_JOIN_BSS 0xFF871100/* S */
+
+#define OID_RT_PRO_READ_REGISTER 0xFF871101 /* Q */
+#define OID_RT_PRO_WRITE_REGISTER 0xFF871102 /* S */
+
+#define OID_RT_PRO_BURST_READ_REGISTER 0xFF871103 /* Q */
+#define OID_RT_PRO_BURST_WRITE_REGISTER 0xFF871104 /* S */
+
+#define OID_RT_PRO_WRITE_TXCMD 0xFF871105 /* S */
+
+#define OID_RT_PRO_READ16_EEPROM 0xFF871106 /* Q */
+#define OID_RT_PRO_WRITE16_EEPROM 0xFF871107 /* S */
+
+#define OID_RT_PRO_H2C_SET_COMMAND 0xFF871108 /* S */
+#define OID_RT_PRO_H2C_QUERY_RESULT 0xFF871109 /* Q */
+
+#define OID_RT_PRO8711_WI_POLL 0xFF87110A /* Q */
+#define OID_RT_PRO8711_PKT_LOSS 0xFF87110B /* Q */
+#define OID_RT_RD_ATTRIB_MEM 0xFF87110C/* Q */
+#define OID_RT_WR_ATTRIB_MEM 0xFF87110D/* S */
+
+
+/* Method 2 for H2C/C2H */
+#define OID_RT_PRO_H2C_CMD_MODE 0xFF871110 /* S */
+#define OID_RT_PRO_H2C_CMD_RSP_MODE 0xFF871111 /* Q */
+#define OID_RT_PRO_H2C_CMD_EVENT_MODE 0xFF871112 /* S */
+#define OID_RT_PRO_WAIT_C2H_EVENT 0xFF871113 /* Q */
+#define OID_RT_PRO_RW_ACCESS_PROTOCOL_TEST 0xFF871114/* Q */
+
+#define OID_RT_PRO_SCSI_ACCESS_TEST 0xFF871115 /* Q, S */
+
+#define OID_RT_PRO_SCSI_TCPIPOFFLOAD_OUT 0xFF871116 /* S */
+#define OID_RT_PRO_SCSI_TCPIPOFFLOAD_IN 0xFF871117 /* Q,S */
+#define OID_RT_RRO_RX_PKT_VIA_IOCTRL 0xFF871118 /* Q */
+#define OID_RT_RRO_RX_PKTARRAY_VIA_IOCTRL 0xFF871119 /* Q */
+
+#define OID_RT_RPO_SET_PWRMGT_TEST 0xFF87111A /* S */
+#define OID_RT_PRO_QRY_PWRMGT_TEST 0XFF87111B /* Q */
+#define OID_RT_RPO_ASYNC_RWIO_TEST 0xFF87111C /* S */
+#define OID_RT_RPO_ASYNC_RWIO_POLL 0xFF87111D /* Q */
+#define OID_RT_PRO_SET_RF_INTFS 0xFF87111E /* S */
+#define OID_RT_POLL_RX_STATUS 0xFF87111F /* Q */
+
+#define OID_RT_PRO_CFG_DEBUG_MESSAGE 0xFF871120 /* Q,S */
+#define OID_RT_PRO_SET_DATA_RATE_EX 0xFF871121/* S */
+#define OID_RT_PRO_SET_BASIC_RATE 0xFF871122/* S */
+#define OID_RT_PRO_READ_TSSI 0xFF871123/* S */
+#define OID_RT_PRO_SET_POWER_TRACKING 0xFF871124/* S */
+
+
+#define OID_RT_PRO_QRY_PWRSTATE 0xFF871150 /* Q */
+#define OID_RT_PRO_SET_PWRSTATE 0xFF871151 /* S */
+
+/* Method 2 , using workitem */
+#define OID_RT_SET_READ_REG 0xFF871181 /* S */
+#define OID_RT_SET_WRITE_REG 0xFF871182 /* S */
+#define OID_RT_SET_BURST_READ_REG 0xFF871183 /* S */
+#define OID_RT_SET_BURST_WRITE_REG 0xFF871184 /* S */
+#define OID_RT_SET_WRITE_TXCMD 0xFF871185 /* S */
+#define OID_RT_SET_READ16_EEPROM 0xFF871186 /* S */
+#define OID_RT_SET_WRITE16_EEPROM 0xFF871187 /* S */
+#define OID_RT_QRY_POLL_WKITEM 0xFF871188 /* Q */
+
+/* For SDIO INTERFACE only */
+#define OID_RT_PRO_SYNCPAGERW_SRAM 0xFF8711A0 /* Q, S */
+#define OID_RT_PRO_871X_DRV_EXT 0xFF8711A1
+
+/* For USB INTERFACE only */
+#define OID_RT_PRO_USB_VENDOR_REQ 0xFF8711B0 /* Q, S */
+#define OID_RT_PRO_SCSI_AUTO_TEST 0xFF8711B1 /* S */
+#define OID_RT_PRO_USB_MAC_AC_FIFO_WRITE 0xFF8711B2 /* S */
+#define OID_RT_PRO_USB_MAC_RX_FIFO_READ 0xFF8711B3 /* Q */
+#define OID_RT_PRO_USB_MAC_RX_FIFO_POLLING 0xFF8711B4 /* Q */
+
+#define OID_RT_PRO_H2C_SET_RATE_TABLE 0xFF8711FB /* S */
+#define OID_RT_PRO_H2C_GET_RATE_TABLE 0xFF8711FC /* S */
+#define OID_RT_PRO_H2C_C2H_LBK_TEST 0xFF8711FE
+
+#define OID_RT_PRO_ENCRYPTION_CTRL 0xFF871200 /* Q, S */
+#define OID_RT_PRO_ADD_STA_INFO 0xFF871201 /* S */
+#define OID_RT_PRO_DELE_STA_INFO 0xFF871202 /* S */
+#define OID_RT_PRO_QUERY_DR_VARIABLE 0xFF871203 /* Q */
+
+#define OID_RT_PRO_RX_PACKET_TYPE 0xFF871204 /* Q, S */
+
+#define OID_RT_PRO_READ_EFUSE 0xFF871205 /* Q */
+#define OID_RT_PRO_WRITE_EFUSE 0xFF871206 /* S */
+#define OID_RT_PRO_RW_EFUSE_PGPKT 0xFF871207 /* Q, S */
+#define OID_RT_GET_EFUSE_CURRENT_SIZE 0xFF871208 /* Q */
+
+#define OID_RT_SET_BANDWIDTH 0xFF871209 /* S */
+#define OID_RT_SET_CRYSTAL_CAP 0xFF87120A /* S */
+
+#define OID_RT_SET_RX_PACKET_TYPE 0xFF87120B /* S */
+
+#define OID_RT_GET_EFUSE_MAX_SIZE 0xFF87120C /* Q */
+
+#define OID_RT_PRO_SET_TX_AGC_OFFSET 0xFF87120D /* S */
+
+#define OID_RT_PRO_SET_PKT_TEST_MODE 0xFF87120E /* S */
+
+#define OID_RT_PRO_FOR_EVM_TEST_SETTING 0xFF87120F /* S */
+
+#define OID_RT_PRO_GET_THERMAL_METER 0xFF871210 /* Q */
+
+#define OID_RT_RESET_PHY_RX_PACKET_COUNT 0xFF871211 /* S */
+#define OID_RT_GET_PHY_RX_PACKET_RECEIVED 0xFF871212 /* Q */
+#define OID_RT_GET_PHY_RX_PACKET_CRC32_ERROR 0xFF871213 /* Q */
+
+#define OID_RT_SET_POWER_DOWN 0xFF871214 /* S */
+
+#define OID_RT_GET_POWER_MODE 0xFF871215 /* Q */
+
+#define OID_RT_PRO_EFUSE 0xFF871216 /* Q, S */
+#define OID_RT_PRO_EFUSE_MAP 0xFF871217 /* Q, S */
+
+#endif /* ifndef __CUSTOM_OID_H */
diff --git a/drivers/staging/rtl8723au/include/odm.h b/drivers/staging/rtl8723au/include/odm.h
new file mode 100644
index 000000000000..dfedfbb48fc2
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/odm.h
@@ -0,0 +1,1205 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+
+#ifndef __HALDMOUTSRC_H__
+#define __HALDMOUTSRC_H__
+
+/* */
+/* Definition */
+/* */
+/* */
+/* 2011/09/22 MH Define all team supprt ability. */
+/* */
+
+/* */
+/* 2011/09/22 MH Define for all teams. Please Define the constan in your precomp header. */
+/* */
+/* define DM_ODM_SUPPORT_AP 0 */
+/* define DM_ODM_SUPPORT_ADSL 0 */
+/* define DM_ODM_SUPPORT_CE 0 */
+/* define DM_ODM_SUPPORT_MP 1 */
+
+#define TP_MODE 0
+#define RSSI_MODE 1
+#define TRAFFIC_LOW 0
+#define TRAFFIC_HIGH 1
+
+
+/* */
+/* 3 Tx Power Tracking */
+/* 3============================================================ */
+#define DPK_DELTA_MAPPING_NUM 13
+#define index_mapping_HP_NUM 15
+
+
+/* */
+/* 3 PSD Handler */
+/* 3============================================================ */
+
+#define AFH_PSD 1 /* 0:normal PSD scan, 1: only do 20 pts PSD */
+#define MODE_40M 0 /* 0:20M, 1:40M */
+#define PSD_TH2 3
+#define PSD_CHMIN 20 /* Minimum channel number for BT AFH */
+#define SIR_STEP_SIZE 3
+#define Smooth_Size_1 5
+#define Smooth_TH_1 3
+#define Smooth_Size_2 10
+#define Smooth_TH_2 4
+#define Smooth_Size_3 20
+#define Smooth_TH_3 4
+#define Smooth_Step_Size 5
+#define Adaptive_SIR 1
+#define PSD_RESCAN 4
+#define PSD_SCAN_INTERVAL 700 /* ms */
+
+/* 8723A High Power IGI Setting */
+#define DM_DIG_HIGH_PWR_IGI_LOWER_BOUND 0x22
+#define DM_DIG_Gmode_HIGH_PWR_IGI_LOWER_BOUND 0x28
+#define DM_DIG_HIGH_PWR_THRESHOLD 0x3a
+
+/* LPS define */
+#define DM_DIG_FA_TH0_LPS 4 /* 4 in lps */
+#define DM_DIG_FA_TH1_LPS 15 /* 15 lps */
+#define DM_DIG_FA_TH2_LPS 30 /* 30 lps */
+#define RSSI_OFFSET_DIG 0x05;
+
+/* ANT Test */
+#define ANTTESTALL 0x00 /* Ant A or B will be Testing */
+#define ANTTESTA 0x01 /* Ant A will be Testing */
+#define ANTTESTB 0x02 /* Ant B will be testing */
+
+
+/* */
+/* structure and define */
+/* */
+
+/* */
+/* 2011/09/20 MH Add for AP/ADSLpseudo DM structuer requirement. */
+/* We need to remove to other position??? */
+/* */
+struct rtl8723a_priv {
+ u8 temp;
+};
+
+
+struct dig_t {
+ u8 Dig_Enable_Flag;
+ u8 Dig_Ext_Port_Stage;
+
+ int RssiLowThresh;
+ int RssiHighThresh;
+
+ u32 FALowThresh;
+ u32 FAHighThresh;
+
+ u8 CurSTAConnectState;
+ u8 PreSTAConnectState;
+ u8 CurMultiSTAConnectState;
+
+ u8 PreIGValue;
+ u8 CurIGValue;
+ u8 BackupIGValue;
+
+ s8 BackoffVal;
+ s8 BackoffVal_range_max;
+ s8 BackoffVal_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ u8 Rssi_val_min;
+
+ u8 PreCCK_CCAThres;
+ u8 CurCCK_CCAThres;
+ u8 PreCCKPDState;
+ u8 CurCCKPDState;
+
+ u8 LargeFAHit;
+ u8 ForbiddenIGI;
+ u32 Recover_cnt;
+
+ u8 DIG_Dynamic_MIN_0;
+ u8 DIG_Dynamic_MIN_1;
+ bool bMediaConnect_0;
+ bool bMediaConnect_1;
+
+ u32 AntDiv_RSSI_max;
+ u32 RSSI_max;
+};
+
+struct dynamic_pwr_sav {
+ u8 PreCCAState;
+ u8 CurCCAState;
+
+ u8 PreRFState;
+ u8 CurRFState;
+
+ int Rssi_val_min;
+
+ u8 initialize;
+ u32 Reg874,RegC70,Reg85C,RegA74;
+};
+
+struct false_alarm_stats {
+ u32 Cnt_Parity_Fail;
+ u32 Cnt_Rate_Illegal;
+ u32 Cnt_Crc8_fail;
+ u32 Cnt_Mcs_fail;
+ u32 Cnt_Ofdm_fail;
+ u32 Cnt_Cck_fail;
+ u32 Cnt_all;
+ u32 Cnt_Fast_Fsync;
+ u32 Cnt_SB_Search_fail;
+ u32 Cnt_OFDM_CCA;
+ u32 Cnt_CCK_CCA;
+ u32 Cnt_CCA_all;
+ u32 Cnt_BW_USC; /* Gary */
+ u32 Cnt_BW_LSC; /* Gary */
+};
+
+struct pri_cca {
+ u8 PriCCA_flag;
+ u8 intf_flag;
+ u8 intf_type;
+ u8 DupRTS_flag;
+ u8 Monitor_flag;
+};
+
+struct rx_hp {
+ u8 RXHP_flag;
+ u8 PSD_func_trigger;
+ u8 PSD_bitmap_RXHP[80];
+ u8 Pre_IGI;
+ u8 Cur_IGI;
+ u8 Pre_pw_th;
+ u8 Cur_pw_th;
+ bool First_time_enter;
+ bool RXHP_enable;
+ u8 TP_Mode;
+ struct timer_list PSDTimer;
+};
+
+#define ASSOCIATE_ENTRY_NUM 32 /* Max size of AsocEntry[]. */
+#define ODM_ASSOCIATE_ENTRY_NUM ASSOCIATE_ENTRY_NUM
+
+/* This indicates two different the steps. */
+/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to the signal on the air. */
+/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in SWAW_STEP_PEAK */
+/* with original RSSI to determine if it is necessary to switch antenna. */
+#define SWAW_STEP_PEAK 0
+#define SWAW_STEP_DETERMINE 1
+
+#define TP_MODE 0
+#define RSSI_MODE 1
+#define TRAFFIC_LOW 0
+#define TRAFFIC_HIGH 1
+
+struct sw_ant_sw {
+ u8 try_flag;
+ s32 PreRSSI;
+ u8 CurAntenna;
+ u8 PreAntenna;
+ u8 RSSI_Trying;
+ u8 TestMode;
+ u8 bTriggerAntennaSwitch;
+ u8 SelectAntennaMap;
+ u8 RSSI_target;
+
+ /* Before link Antenna Switch check */
+ u8 SWAS_NoLink_State;
+ u32 SWAS_NoLink_BK_Reg860;
+ bool ANTA_ON; /* To indicate Ant A is or not */
+ bool ANTB_ON; /* To indicate Ant B is on or not */
+
+ s32 RSSI_sum_A;
+ s32 RSSI_sum_B;
+ s32 RSSI_cnt_A;
+ s32 RSSI_cnt_B;
+
+ u64 lastTxOkCnt;
+ u64 lastRxOkCnt;
+ u64 TXByteCnt_A;
+ u64 TXByteCnt_B;
+ u64 RXByteCnt_A;
+ u64 RXByteCnt_B;
+ u8 TrafficLoad;
+ struct timer_list SwAntennaSwitchTimer;
+};
+
+struct edca_turbo {
+ bool bCurrentTurboEDCA;
+ bool bIsCurRDLState;
+ u32 prv_traffic_idx; /* edca turbo */
+
+};
+
+struct odm_rate_adapt {
+ u8 Type; /* DM_Type_ByFW/DM_Type_ByDriver */
+ u8 HighRSSIThresh; /* if RSSI > HighRSSIThresh => RATRState is DM_RATR_STA_HIGH */
+ u8 LowRSSIThresh; /* if RSSI <= LowRSSIThresh => RATRState is DM_RATR_STA_LOW */
+ u8 RATRState; /* Current RSSI level, DM_RATR_STA_HIGH/DM_RATR_STA_MIDDLE/DM_RATR_STA_LOW */
+ u32 LastRATR; /* RATR Register Content */
+};
+
+#define IQK_MAC_REG_NUM 4
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM_MAX 10
+#define IQK_BB_REG_NUM 9
+#define HP_THERMAL_NUM 8
+
+#define AVG_THERMAL_NUM 8
+#define IQK_Matrix_REG_NUM 8
+#define IQK_Matrix_Settings_NUM 1+24+21
+
+#define DM_Type_ByFW 0
+#define DM_Type_ByDriver 1
+
+/* Declare for common info */
+
+struct odm_phy_info {
+ u8 RxPWDBAll;
+ u8 SignalQuality; /* in 0-100 index. */
+ u8 RxMIMOSignalQuality[RF_PATH_MAX]; /* EVM */
+ u8 RxMIMOSignalStrength[RF_PATH_MAX];/* in 0~100 index */
+ s8 RxPower; /* in dBm Translate from PWdB */
+ s8 RecvSignalPower;/* Real power in dBm for this packet, no beautification and aggregation. Keep this raw info to be used for the other procedures. */
+ u8 BTRxRSSIPercentage;
+ u8 SignalStrength; /* in 0-100 index. */
+ u8 RxPwr[RF_PATH_MAX];/* per-path's pwdb */
+ u8 RxSNR[RF_PATH_MAX];/* per-path's SNR */
+};
+
+
+struct odm_phy_dbg_info {
+ /* ODM Write,debug info */
+ s8 RxSNRdB[RF_PATH_MAX];
+ u64 NumQryPhyStatus;
+ u64 NumQryPhyStatusCCK;
+ u64 NumQryPhyStatusOFDM;
+ /* Others */
+ s32 RxEVM[RF_PATH_MAX];
+
+};
+
+struct odm_packet_info {
+ u8 Rate;
+ u8 StationID;
+ bool bPacketMatchBSSID;
+ bool bPacketToSelf;
+ bool bPacketBeacon;
+};
+
+struct odm_mac_info {
+ u8 test;
+
+};
+
+
+enum {
+ /* BB Team */
+ ODM_DIG = 0x00000001,
+ ODM_HIGH_POWER = 0x00000002,
+ ODM_CCK_CCA_TH = 0x00000004,
+ ODM_FA_STATISTICS = 0x00000008,
+ ODM_RAMASK = 0x00000010,
+ ODM_RSSI_MONITOR = 0x00000020,
+ ODM_SW_ANTDIV = 0x00000040,
+ ODM_HW_ANTDIV = 0x00000080,
+ ODM_BB_PWRSV = 0x00000100,
+ ODM_2TPATHDIV = 0x00000200,
+ ODM_1TPATHDIV = 0x00000400,
+ ODM_PSD2AFH = 0x00000800
+};
+
+/* */
+/* 2011/20/20 MH For MP driver RT_WLAN_STA = struct sta_info */
+/* Please declare below ODM relative info in your STA info structure. */
+/* */
+struct odm_sta_info {
+ /* Driver Write */
+ bool bUsed; /* record the sta status link or not? */
+ u8 IOTPeer; /* Enum value. HT_IOT_PEER_E */
+
+ /* ODM Write */
+ /* 1 PHY_STATUS_INFO */
+ u8 RSSI_Path[4]; /* */
+ u8 RSSI_Ave;
+ u8 RXEVM[4];
+ u8 RXSNR[4];
+
+ /* ODM Write */
+ /* 1 TX_INFO (may changed by IC) */
+
+ /* */
+ /* Please use compile flag to disable the structure for other IC except 88E. */
+ /* Move To lower layer. */
+ /* */
+ /* ODM Write Wilson will handle this part(said by Luke.Lee) */
+};
+
+/* */
+/* 2011/10/20 MH Define Common info enum for all team. */
+/* */
+
+enum odm_cmninfo {
+ /* Fixed value: */
+ /* */
+
+ ODM_CMNINFO_PLATFORM = 0,
+ ODM_CMNINFO_ABILITY, /* enum odm_ability */
+ ODM_CMNINFO_INTERFACE, /* enum odm_interface_def */
+ ODM_CMNINFO_MP_TEST_CHIP,
+ ODM_CMNINFO_IC_TYPE, /* enum odm_ic_type_def */
+ ODM_CMNINFO_CUT_VER, /* enum odm_cut_version */
+ ODM_CMNINFO_FAB_VER, /* enum odm_fab_version */
+ ODM_CMNINFO_RF_TYPE, /* enum rf_path_def or enum odm_rf_type? */
+ ODM_CMNINFO_BOARD_TYPE, /* enum odm_board_type */
+ ODM_CMNINFO_EXT_LNA, /* true */
+ ODM_CMNINFO_EXT_PA,
+ ODM_CMNINFO_EXT_TRSW,
+ ODM_CMNINFO_PATCH_ID, /* CUSTOMER ID */
+ ODM_CMNINFO_BINHCT_TEST,
+ ODM_CMNINFO_BWIFI_TEST,
+ ODM_CMNINFO_SMART_CONCURRENT,
+
+
+ /* */
+ /* Dynamic value: */
+ /* */
+ ODM_CMNINFO_MAC_PHY_MODE, /* enum odm_mac_phy_mode */
+ ODM_CMNINFO_TX_UNI,
+ ODM_CMNINFO_RX_UNI,
+ ODM_CMNINFO_WM_MODE, /* enum odm_wireless_mode */
+ ODM_CMNINFO_BAND, /* enum odm_band_type */
+ ODM_CMNINFO_SEC_CHNL_OFFSET, /* enum odm_sec_chnl_offset */
+ ODM_CMNINFO_SEC_MODE, /* enum odm_security */
+ ODM_CMNINFO_BW, /* enum odm_band_width */
+ ODM_CMNINFO_CHNL,
+
+ ODM_CMNINFO_DMSP_GET_VALUE,
+ ODM_CMNINFO_BUDDY_ADAPTOR,
+ ODM_CMNINFO_DMSP_IS_MASTER,
+ ODM_CMNINFO_SCAN,
+ ODM_CMNINFO_POWER_SAVING,
+ ODM_CMNINFO_ONE_PATH_CCA, /* enum odm_cca_path */
+ ODM_CMNINFO_DRV_STOP,
+ ODM_CMNINFO_PNP_IN,
+ ODM_CMNINFO_INIT_ON,
+ ODM_CMNINFO_ANT_TEST,
+ ODM_CMNINFO_NET_CLOSED,
+ ODM_CMNINFO_MP_MODE,
+
+ ODM_CMNINFO_WIFI_DIRECT,
+ ODM_CMNINFO_WIFI_DISPLAY,
+ ODM_CMNINFO_LINK,
+ ODM_CMNINFO_RSSI_MIN,
+ ODM_CMNINFO_DBG_COMP, /* u64 */
+ ODM_CMNINFO_DBG_LEVEL, /* u32 */
+ ODM_CMNINFO_RA_THRESHOLD_HIGH, /* u8 */
+ ODM_CMNINFO_RA_THRESHOLD_LOW, /* u8 */
+ ODM_CMNINFO_RF_ANTENNA_TYPE, /* u8 */
+ ODM_CMNINFO_BT_DISABLED,
+ ODM_CMNINFO_BT_OPERATION,
+ ODM_CMNINFO_BT_DIG,
+ ODM_CMNINFO_BT_BUSY, /* Check Bt is using or not */
+ ODM_CMNINFO_BT_DISABLE_EDCA,
+
+ /* */
+ /* Dynamic ptr array hook itms. */
+ /* */
+ ODM_CMNINFO_STA_STATUS,
+ ODM_CMNINFO_PHY_STATUS,
+ ODM_CMNINFO_MAC_STATUS,
+
+ ODM_CMNINFO_MAX,
+};
+
+/* Define ODM support ability. ODM_CMNINFO_ABILITY */
+enum {
+ /* BB ODM section BIT 0-15 */
+ ODM_BB_DIG = BIT0,
+ ODM_BB_RA_MASK = BIT1,
+ ODM_BB_DYNAMIC_TXPWR = BIT2,
+ ODM_BB_FA_CNT = BIT3,
+ ODM_BB_RSSI_MONITOR = BIT4,
+ ODM_BB_CCK_PD = BIT5,
+ ODM_BB_ANT_DIV = BIT6,
+ ODM_BB_PWR_SAVE = BIT7,
+ ODM_BB_PWR_TRAIN = BIT8,
+ ODM_BB_RATE_ADAPTIVE = BIT9,
+ ODM_BB_PATH_DIV = BIT10,
+ ODM_BB_PSD = BIT11,
+ ODM_BB_RXHP = BIT12,
+
+ /* MAC DM section BIT 16-23 */
+ ODM_MAC_EDCA_TURBO = BIT16,
+ ODM_MAC_EARLY_MODE = BIT17,
+
+ /* RF ODM section BIT 24-31 */
+ ODM_RF_TX_PWR_TRACK = BIT24,
+ ODM_RF_RX_GAIN_TRACK = BIT25,
+ ODM_RF_CALIBRATION = BIT26,
+
+};
+
+/* ODM_CMNINFO_INTERFACE */
+enum odm_interface_def {
+ ODM_ITRF_PCIE = 0x1,
+ ODM_ITRF_USB = 0x2,
+ ODM_ITRF_SDIO = 0x4,
+ ODM_ITRF_ALL = 0x7,
+};
+
+/* ODM_CMNINFO_IC_TYPE */
+enum odm_ic_type_def {
+ ODM_RTL8192S = BIT0,
+ ODM_RTL8192C = BIT1,
+ ODM_RTL8192D = BIT2,
+ ODM_RTL8723A = BIT3,
+ ODM_RTL8188E = BIT4,
+ ODM_RTL8812 = BIT5,
+ ODM_RTL8821 = BIT6,
+};
+
+#define ODM_IC_11N_SERIES \
+ (ODM_RTL8192S|ODM_RTL8192C|ODM_RTL8192D|ODM_RTL8723A|ODM_RTL8188E)
+#define ODM_IC_11AC_SERIES (ODM_RTL8812)
+
+/* ODM_CMNINFO_CUT_VER */
+enum odm_cut_version {
+ ODM_CUT_A = 1,
+ ODM_CUT_B = 2,
+ ODM_CUT_C = 3,
+ ODM_CUT_D = 4,
+ ODM_CUT_E = 5,
+ ODM_CUT_F = 6,
+ ODM_CUT_TEST = 7,
+};
+
+/* ODM_CMNINFO_FAB_VER */
+enum odm_fab_version {
+ ODM_TSMC = 0,
+ ODM_UMC = 1,
+};
+
+/* ODM_CMNINFO_RF_TYPE */
+/* For example 1T2R (A+AB = BIT0|BIT4|BIT5) */
+enum rf_path_def {
+ ODM_RF_TX_A = BIT0,
+ ODM_RF_TX_B = BIT1,
+ ODM_RF_TX_C = BIT2,
+ ODM_RF_TX_D = BIT3,
+ ODM_RF_RX_A = BIT4,
+ ODM_RF_RX_B = BIT5,
+ ODM_RF_RX_C = BIT6,
+ ODM_RF_RX_D = BIT7,
+};
+
+
+enum odm_rf_type {
+ ODM_1T1R = 0,
+ ODM_1T2R = 1,
+ ODM_2T2R = 2,
+ ODM_2T3R = 3,
+ ODM_2T4R = 4,
+ ODM_3T3R = 5,
+ ODM_3T4R = 6,
+ ODM_4T4R = 7,
+};
+
+/* ODM Dynamic common info value definition */
+
+enum odm_mac_phy_mode {
+ ODM_SMSP = 0,
+ ODM_DMSP = 1,
+ ODM_DMDP = 2,
+};
+
+
+enum odm_bt_coexist {
+ ODM_BT_BUSY = 1,
+ ODM_BT_ON = 2,
+ ODM_BT_OFF = 3,
+ ODM_BT_NONE = 4,
+};
+
+/* ODM_CMNINFO_OP_MODE */
+enum odm_operation_mode {
+ ODM_NO_LINK = BIT0,
+ ODM_LINK = BIT1,
+ ODM_SCAN = BIT2,
+ ODM_POWERSAVE = BIT3,
+ ODM_AP_MODE = BIT4,
+ ODM_CLIENT_MODE = BIT5,
+ ODM_AD_HOC = BIT6,
+ ODM_WIFI_DIRECT = BIT7,
+ ODM_WIFI_DISPLAY = BIT8,
+};
+
+/* ODM_CMNINFO_WM_MODE */
+enum odm_wireless_mode {
+ ODM_WM_UNKNOW = 0x0,
+ ODM_WM_B = BIT0,
+ ODM_WM_G = BIT1,
+ ODM_WM_A = BIT2,
+ ODM_WM_N24G = BIT3,
+ ODM_WM_N5G = BIT4,
+ ODM_WM_AUTO = BIT5,
+ ODM_WM_AC = BIT6,
+};
+
+/* ODM_CMNINFO_BAND */
+enum odm_band_type {
+ ODM_BAND_2_4G = BIT0,
+ ODM_BAND_5G = BIT1,
+
+};
+
+/* ODM_CMNINFO_SEC_CHNL_OFFSET */
+enum odm_sec_chnl_offset {
+ ODM_DONT_CARE = 0,
+ ODM_BELOW = 1,
+ ODM_ABOVE = 2
+};
+
+/* ODM_CMNINFO_SEC_MODE */
+enum odm_security {
+ ODM_SEC_OPEN = 0,
+ ODM_SEC_WEP40 = 1,
+ ODM_SEC_TKIP = 2,
+ ODM_SEC_RESERVE = 3,
+ ODM_SEC_AESCCMP = 4,
+ ODM_SEC_WEP104 = 5,
+ ODM_WEP_WPA_MIXED = 6, /* WEP + WPA */
+ ODM_SEC_SMS4 = 7,
+};
+
+/* ODM_CMNINFO_BW */
+enum odm_band_width {
+ ODM_BW20M = 0,
+ ODM_BW40M = 1,
+ ODM_BW80M = 2,
+ ODM_BW160M = 3,
+ ODM_BW10M = 4,
+};
+
+/* ODM_CMNINFO_CHNL */
+
+/* ODM_CMNINFO_BOARD_TYPE */
+enum odm_board_type {
+ ODM_BOARD_NORMAL = 0,
+ ODM_BOARD_HIGHPWR = 1,
+ ODM_BOARD_MINICARD = 2,
+ ODM_BOARD_SLIM = 3,
+ ODM_BOARD_COMBO = 4,
+
+};
+
+/* ODM_CMNINFO_ONE_PATH_CCA */
+enum odm_cca_path {
+ ODM_CCA_2R = 0,
+ ODM_CCA_1R_A = 1,
+ ODM_CCA_1R_B = 2,
+};
+
+struct odm_ra_info {
+ u8 RateID;
+ u32 RateMask;
+ u32 RAUseRate;
+ u8 RateSGI;
+ u8 RssiStaRA;
+ u8 PreRssiStaRA;
+ u8 SGIEnable;
+ u8 DecisionRate;
+ u8 PreRate;
+ u8 HighestRate;
+ u8 LowestRate;
+ u32 NscUp;
+ u32 NscDown;
+ u16 RTY[5];
+ u32 TOTAL;
+ u16 DROP;
+ u8 Active;
+ u16 RptTime;
+ u8 RAWaitingCounter;
+ u8 RAPendingCounter;
+ u8 PTActive; /* on or off */
+ u8 PTTryState; /* 0 trying state, 1 for decision state */
+ u8 PTStage; /* 0~6 */
+ u8 PTStopCount; /* Stop PT counter */
+ u8 PTPreRate; /* if rate change do PT */
+ u8 PTPreRssi; /* if RSSI change 5% do PT */
+ u8 PTModeSS; /* decide whitch rate should do PT */
+ u8 RAstage; /* StageRA, decide how many times RA will be done between PT */
+ u8 PTSmoothFactor;
+};
+
+struct iqk_matrix_regs_set {
+ bool bIQKDone;
+ s32 Value[1][IQK_Matrix_REG_NUM];
+};
+
+struct odm_rf_cal_t {
+ /* for tx power tracking */
+
+ u32 RegA24; /* for TempCCK */
+ s32 RegE94;
+ s32 RegE9C;
+ s32 RegEB4;
+ s32 RegEBC;
+
+ /* u8 bTXPowerTracking; */
+ u8 TXPowercount;
+ bool bTXPowerTrackingInit;
+ bool bTXPowerTracking;
+ u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking as default */
+ u8 TM_Trigger;
+ u8 InternalPA5G[2]; /* pathA / pathB */
+
+ u8 ThermalMeter[2]; /* ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
+ u8 ThermalValue;
+ u8 ThermalValue_LCK;
+ u8 ThermalValue_IQK;
+ u8 ThermalValue_DPK;
+ u8 ThermalValue_AVG[AVG_THERMAL_NUM];
+ u8 ThermalValue_AVG_index;
+ u8 ThermalValue_RxGain;
+ u8 ThermalValue_Crystal;
+ u8 ThermalValue_DPKstore;
+ u8 ThermalValue_DPKtrack;
+ bool TxPowerTrackingInProgress;
+ bool bDPKenable;
+
+ bool bReloadtxpowerindex;
+ u8 bRfPiEnable;
+ u32 TXPowerTrackingCallbackCnt; /* cosa add for debug */
+
+ u8 bCCKinCH14;
+ u8 CCK_index;
+ u8 OFDM_index[2];
+ bool bDoneTxpower;
+
+ u8 ThermalValue_HP[HP_THERMAL_NUM];
+ u8 ThermalValue_HP_index;
+ struct iqk_matrix_regs_set IQKMatrixRegSetting[IQK_Matrix_Settings_NUM];
+
+ u8 Delta_IQK;
+ u8 Delta_LCK;
+
+ /* for IQK */
+ u32 RegC04;
+ u32 Reg874;
+ u32 RegC08;
+ u32 RegB68;
+ u32 RegB6C;
+ u32 Reg870;
+ u32 Reg860;
+ u32 Reg864;
+
+ bool bIQKInitialized;
+ bool bLCKInProgress;
+ bool bAntennaDetected;
+ u32 ADDA_backup[IQK_ADDA_REG_NUM];
+ u32 IQK_MAC_backup[IQK_MAC_REG_NUM];
+ u32 IQK_BB_backup_recover[9];
+ u32 IQK_BB_backup[IQK_BB_REG_NUM];
+
+ /* for APK */
+ u32 APKoutput[2][2]; /* path A/B; output1_1a/output1_2a */
+ u8 bAPKdone;
+ u8 bAPKThermalMeterIgnore;
+ u8 bDPdone;
+ u8 bDPPathAOK;
+ u8 bDPPathBOK;
+};
+
+/* ODM Dynamic common info value definition */
+struct odm_fat_t {
+ u8 Bssid[6];
+ u8 antsel_rx_keep_0;
+ u8 antsel_rx_keep_1;
+ u8 antsel_rx_keep_2;
+ u32 antSumRSSI[7];
+ u32 antRSSIcnt[7];
+ u32 antAveRSSI[7];
+ u8 FAT_State;
+ u32 TrainIdx;
+ u8 antsel_a[ODM_ASSOCIATE_ENTRY_NUM];
+ u8 antsel_b[ODM_ASSOCIATE_ENTRY_NUM];
+ u8 antsel_c[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 MainAnt_Sum[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 AuxAnt_Sum[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 MainAnt_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 AuxAnt_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
+ u8 RxIdleAnt;
+ bool bBecomeLinked;
+};
+
+enum fat_state {
+ FAT_NORMAL_STATE = 0,
+ FAT_TRAINING_STATE = 1,
+};
+
+enum ant_dif_type {
+ NO_ANTDIV = 0xFF,
+ CG_TRX_HW_ANTDIV = 0x01,
+ CGCS_RX_HW_ANTDIV = 0x02,
+ FIXED_HW_ANTDIV = 0x03,
+ CG_TRX_SMART_ANTDIV = 0x04,
+ CGCS_RX_SW_ANTDIV = 0x05,
+};
+
+/* 2011/09/22 MH Copy from SD4 defined structure. We use to support PHY DM integration. */
+struct dm_odm_t {
+ /* struct timer_list FastAntTrainingTimer; */
+ /* */
+ /* Add for different team use temporarily */
+ /* */
+ struct rtw_adapter *Adapter; /* For CE/NIC team */
+ struct rtl8723a_priv *priv; /* For AP/ADSL team */
+ /* WHen you use Adapter or priv pointer, you must make sure the pointer is ready. */
+ bool odm_ready;
+
+ struct rtl8723a_priv fake_priv;
+
+ u64 DebugComponents;
+ u32 DebugLevel;
+
+/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
+ bool bCckHighPower;
+ u8 RFPathRxEnable; /* ODM_CMNINFO_RFPATH_ENABLE */
+ u8 ControlChannel;
+/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
+
+/* 1 COMMON INFORMATION */
+
+ /* Init Value */
+/* HOOK BEFORE REG INIT----------- */
+ /* ODM Support Ability DIG/RATR/TX_PWR_TRACK/ ¡K¡K = 1/2/3/¡K */
+ u32 SupportAbility;
+ /* ODM PCIE/USB/SDIO/GSPI = 0/1/2/3 */
+ u8 SupportInterface;
+ /* ODM composite or independent. Bit oriented/ 92C+92D+ .... or any other type = 1/2/3/... */
+ u32 SupportICType;
+ /* Cut Version TestChip/A-cut/B-cut... = 0/1/2/3/... */
+ u8 CutVersion;
+ /* Fab Version TSMC/UMC = 0/1 */
+ u8 FabVersion;
+ /* RF Type 4T4R/3T3R/2T2R/1T2R/1T1R/... */
+ u8 RFType;
+ /* Board Type Normal/HighPower/MiniCard/SLIM/Combo/... = 0/1/2/3/4/... */
+ u8 BoardType;
+ /* with external LNA NO/Yes = 0/1 */
+ u8 ExtLNA;
+ /* with external PA NO/Yes = 0/1 */
+ u8 ExtPA;
+ /* with external TRSW NO/Yes = 0/1 */
+ u8 ExtTRSW;
+ u8 PatchID; /* Customer ID */
+ bool bInHctTest;
+ bool bWIFITest;
+
+ bool bDualMacSmartConcurrent;
+ u32 BK_SupportAbility;
+ u8 AntDivType;
+/* HOOK BEFORE REG INIT----------- */
+
+ /* */
+ /* Dynamic Value */
+ /* */
+/* POINTER REFERENCE----------- */
+
+ u8 u8_temp;
+ bool bool_temp;
+ struct rtw_adapter *PADAPTER_temp;
+
+ /* MAC PHY Mode SMSP/DMSP/DMDP = 0/1/2 */
+ u8 *pMacPhyMode;
+ /* TX Unicast byte count */
+ u64 *pNumTxBytesUnicast;
+ /* RX Unicast byte count */
+ u64 *pNumRxBytesUnicast;
+ /* Wireless mode B/G/A/N = BIT0/BIT1/BIT2/BIT3 */
+ u8 *pWirelessMode; /* enum odm_wireless_mode */
+ /* Frequence band 2.4G/5G = 0/1 */
+ u8 *pBandType;
+ /* Secondary channel offset don't_care/below/above = 0/1/2 */
+ u8 *pSecChOffset;
+ /* Security mode Open/WEP/AES/TKIP = 0/1/2/3 */
+ u8 *pSecurity;
+ /* BW info 20M/40M/80M = 0/1/2 */
+ u8 *pBandWidth;
+ /* Central channel location Ch1/Ch2/.... */
+ u8 *pChannel; /* central channel number */
+ /* Common info for 92D DMSP */
+
+ bool *pbGetValueFromOtherMac;
+ struct rtw_adapter **pBuddyAdapter;
+ bool *pbMasterOfDMSP; /* MAC0: master, MAC1: slave */
+ /* Common info for Status */
+ bool *pbScanInProcess;
+ bool *pbPowerSaving;
+ /* CCA Path 2-path/path-A/path-B = 0/1/2; using enum odm_cca_path. */
+ u8 *pOnePathCCA;
+ /* pMgntInfo->AntennaTest */
+ u8 *pAntennaTest;
+ bool *pbNet_closed;
+/* POINTER REFERENCE----------- */
+ /* */
+/* CALL BY VALUE------------- */
+ bool bWIFI_Direct;
+ bool bWIFI_Display;
+ bool bLinked;
+ u8 RSSI_Min;
+ u8 InterfaceIndex; /* Add for 92D dual MAC: 0--Mac0 1--Mac1 */
+ bool bIsMPChip;
+ bool bOneEntryOnly;
+ /* Common info for BTDM */
+ bool bBtDisabled; /* BT is disabled */
+ bool bBtHsOperation; /* BT HS mode is under progress */
+ u8 btHsDigVal; /* use BT rssi to decide the DIG value */
+ bool bBtDisableEdcaTurbo; /* Under some condition, don't enable the EDCA Turbo */
+ bool bBtBusy; /* BT is busy. */
+/* CALL BY VALUE------------- */
+
+ /* 2 Define STA info. */
+ /* _ODM_STA_INFO */
+ /* 2012/01/12 MH For MP, we need to reduce one array pointer for default port.?? */
+ struct sta_info * pODM_StaInfo[ODM_ASSOCIATE_ENTRY_NUM];
+
+ /* */
+ /* 2012/02/14 MH Add to share 88E ra with other SW team. */
+ /* We need to colelct all support abilit to a proper area. */
+ /* */
+ bool RaSupport88E;
+
+ /* Define ........... */
+
+ /* Latest packet phy info (ODM write) */
+ struct odm_phy_dbg_info PhyDbgInfo;
+ /* PHY_INFO_88E PhyInfo; */
+
+ /* Latest packet phy info (ODM write) */
+ struct odm_mac_info *pMacInfo;
+ /* MAC_INFO_88E MacInfo; */
+
+ /* Different Team independt structure?? */
+
+ /* */
+ /* TX_RTP_CMN TX_retrpo; */
+ /* TX_RTP_88E TX_retrpo; */
+ /* TX_RTP_8195 TX_retrpo; */
+
+ /* */
+ /* ODM Structure */
+ /* */
+ struct odm_fat_t DM_FatTable;
+ struct dig_t DM_DigTable;
+ struct dynamic_pwr_sav DM_PSTable;
+ struct pri_cca DM_PriCCA;
+ struct rx_hp DM_RXHP_Table;
+ struct false_alarm_stats FalseAlmCnt;
+ struct false_alarm_stats FlaseAlmCntBuddyAdapter;
+ struct sw_ant_sw DM_SWAT_Table;
+ bool RSSI_test;
+
+ struct edca_turbo DM_EDCA_Table;
+ u32 WMMEDCA_BE;
+ /* Copy from SD4 structure */
+ /* */
+ /* ================================================== */
+ /* */
+
+ /* common */
+ bool *pbDriverStopped;
+ bool *pbDriverIsGoingToPnpSetPowerSleep;
+ bool *pinit_adpt_in_progress;
+
+ /* PSD */
+ bool bUserAssignLevel;
+ struct timer_list PSDTimer;
+ u8 RSSI_BT; /* come from BT */
+ bool bPSDinProcess;
+ bool bDMInitialGainEnable;
+
+ /* for rate adaptive, in fact, 88c/92c fw will handle this */
+ u8 bUseRAMask;
+
+ struct odm_rate_adapt RateAdaptive;
+
+
+ struct odm_rf_cal_t RFCalibrateInfo;
+
+ /* */
+ /* TX power tracking */
+ /* */
+ u8 BbSwingIdxOfdm;
+ u8 BbSwingIdxOfdmCurrent;
+ u8 BbSwingIdxOfdmBase;
+ bool BbSwingFlagOfdm;
+ u8 BbSwingIdxCck;
+ u8 BbSwingIdxCckCurrent;
+ u8 BbSwingIdxCckBase;
+ bool BbSwingFlagCck;
+ /* */
+ /* ODM system resource. */
+ /* */
+
+ /* ODM relative time. */
+ struct timer_list PathDivSwitchTimer;
+ /* 2011.09.27 add for Path Diversity */
+ struct timer_list CCKPathDiversityTimer;
+ struct timer_list FastAntTrainingTimer;
+
+ /* ODM relative workitem. */
+}; /* DM_Dynamic_Mechanism_Structure */
+
+enum odm_rf_content {
+ odm_radioa_txt = 0x1000,
+ odm_radiob_txt = 0x1001,
+ odm_radioc_txt = 0x1002,
+ odm_radiod_txt = 0x1003
+};
+
+enum odm_bb_config_type {
+ CONFIG_BB_PHY_REG,
+ CONFIG_BB_AGC_TAB,
+ CONFIG_BB_AGC_TAB_2G,
+ CONFIG_BB_AGC_TAB_5G,
+ CONFIG_BB_PHY_REG_PG,
+};
+
+/* Status code */
+enum rt_status {
+ RT_STATUS_SUCCESS,
+ RT_STATUS_FAILURE,
+ RT_STATUS_PENDING,
+ RT_STATUS_RESOURCE,
+ RT_STATUS_INVALID_CONTEXT,
+ RT_STATUS_INVALID_PARAMETER,
+ RT_STATUS_NOT_SUPPORT,
+ RT_STATUS_OS_API_FAILED,
+};
+
+/* include "odm_function.h" */
+
+/* 3=========================================================== */
+/* 3 DIG */
+/* 3=========================================================== */
+
+enum dm_dig_op {
+ DIG_TYPE_THRESH_HIGH = 0,
+ DIG_TYPE_THRESH_LOW = 1,
+ DIG_TYPE_BACKOFF = 2,
+ DIG_TYPE_RX_GAIN_MIN = 3,
+ DIG_TYPE_RX_GAIN_MAX = 4,
+ DIG_TYPE_ENABLE = 5,
+ DIG_TYPE_DISABLE = 6,
+ DIG_OP_TYPE_MAX
+};
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DM_SCAN_RSSI_TH 0x14 /* scan return issue for LC */
+
+
+#define DM_FALSEALARM_THRESH_LOW 400
+#define DM_FALSEALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX_NIC 0x4e
+#define DM_DIG_MIN_NIC 0x1e
+
+#define DM_DIG_MAX_AP 0x32
+#define DM_DIG_MIN_AP 0x20
+
+#define DM_DIG_MAX_NIC_HP 0x46
+#define DM_DIG_MIN_NIC_HP 0x2e
+
+#define DM_DIG_MAX_AP_HP 0x42
+#define DM_DIG_MIN_AP_HP 0x30
+
+/* vivi 92c&92d has different definition, 20110504 */
+/* this is for 92c */
+#define DM_DIG_FA_TH0 0x200
+#define DM_DIG_FA_TH1 0x300
+#define DM_DIG_FA_TH2 0x400
+/* this is for 92d */
+#define DM_DIG_FA_TH0_92D 0x100
+#define DM_DIG_FA_TH1_92D 0x400
+#define DM_DIG_FA_TH2_92D 0x600
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+/* 3=========================================================== */
+/* 3 AGC RX High Power Mode */
+/* 3=========================================================== */
+#define LNA_Low_Gain_1 0x64
+#define LNA_Low_Gain_2 0x5A
+#define LNA_Low_Gain_3 0x58
+
+#define FA_RXHP_TH1 5000
+#define FA_RXHP_TH2 1500
+#define FA_RXHP_TH3 800
+#define FA_RXHP_TH4 600
+#define FA_RXHP_TH5 500
+
+/* 3=========================================================== */
+/* 3 EDCA */
+/* 3=========================================================== */
+
+/* 3=========================================================== */
+/* 3 Dynamic Tx Power */
+/* 3=========================================================== */
+/* Dynamic Tx Power Control Threshold */
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define TX_POWER_NEAR_FIELD_THRESH_AP 0x3F
+
+#define TxHighPwrLevel_Normal 0
+#define TxHighPwrLevel_Level1 1
+#define TxHighPwrLevel_Level2 2
+#define TxHighPwrLevel_BT1 3
+#define TxHighPwrLevel_BT2 4
+#define TxHighPwrLevel_15 5
+#define TxHighPwrLevel_35 6
+#define TxHighPwrLevel_50 7
+#define TxHighPwrLevel_70 8
+#define TxHighPwrLevel_100 9
+
+/* 3=========================================================== */
+/* 3 Rate Adaptive */
+/* 3=========================================================== */
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+/* 3=========================================================== */
+/* 3 BB Power Save */
+/* 3=========================================================== */
+
+
+enum dm_1r_cca {
+ CCA_1R =0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf_def {
+ RF_Save =0,
+ RF_Normal = 1,
+ RF_MAX = 2,
+};
+
+/* 3=========================================================== */
+/* 3 Antenna Diversity */
+/* 3=========================================================== */
+enum dm_swas {
+ Antenna_A = 1,
+ Antenna_B = 2,
+ Antenna_MAX = 3,
+};
+
+/* Maximal number of antenna detection mechanism needs to perform, added by Roger, 2011.12.28. */
+#define MAX_ANTENNA_DETECTION_CNT 10
+
+/* */
+/* Extern Global Variables. */
+/* */
+#define OFDM_TABLE_SIZE_92C 37
+#define OFDM_TABLE_SIZE_92D 43
+#define CCK_TABLE_SIZE 33
+
+extern u32 OFDMSwingTable23A[OFDM_TABLE_SIZE_92D];
+extern u8 CCKSwingTable_Ch1_Ch1323A[CCK_TABLE_SIZE][8];
+extern u8 CCKSwingTable_Ch1423A [CCK_TABLE_SIZE][8];
+
+
+
+/* */
+/* check Sta pointer valid or not */
+/* */
+#define IS_STA_VALID(pSta) (pSta)
+/* 20100514 Joseph: Add definition for antenna switching test after link. */
+/* This indicates two different the steps. */
+/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to the signal on the air. */
+/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in SWAW_STEP_PEAK */
+/* with original RSSI to determine if it is necessary to switch antenna. */
+#define SWAW_STEP_PEAK 0
+#define SWAW_STEP_DETERMINE 1
+
+void ODM_Write_DIG23a(struct dm_odm_t *pDM_Odm, u8 CurrentIGI);
+void ODM_Write_CCK_CCA_Thres23a(struct dm_odm_t *pDM_Odm, u8 CurCCK_CCAThres);
+
+void ODM_SetAntenna(struct dm_odm_t *pDM_Odm, u8 Antenna);
+
+
+#define dm_RF_Saving ODM_RF_Saving23a
+void ODM_RF_Saving23a(struct dm_odm_t *pDM_Odm, u8 bForceInNormal);
+
+#define SwAntDivRestAfterLink ODM_SwAntDivRestAfterLink
+void ODM_SwAntDivRestAfterLink(struct dm_odm_t *pDM_Odm);
+
+#define dm_CheckTXPowerTracking ODM_TXPowerTrackingCheck23a
+void ODM_TXPowerTrackingCheck23a(struct dm_odm_t *pDM_Odm);
+
+bool ODM_RAStateCheck23a(struct dm_odm_t *pDM_Odm, s32 RSSI, bool bForceUpdate,
+ u8 *pRATRState);
+
+
+#define dm_SWAW_RSSI_Check ODM_SwAntDivChkPerPktRssi
+void ODM_SwAntDivChkPerPktRssi(struct dm_odm_t *pDM_Odm, u8 StationID,
+ struct odm_phy_info *pPhyInfo);
+
+u32 ConvertTo_dB23a(u32 Value);
+
+u32 GetPSDData(struct dm_odm_t *pDM_Odm, unsigned int point, u8 initial_gain_psd);
+
+void odm_DIG23abyRSSI_LPS(struct dm_odm_t *pDM_Odm);
+
+u32 ODM_Get_Rate_Bitmap23a(struct dm_odm_t *pDM_Odm, u32 macid, u32 ra_mask, u8 rssi_level);
+
+
+void ODM23a_DMInit(struct dm_odm_t *pDM_Odm);
+
+void ODM_DMWatchdog23a(struct dm_odm_t *pDM_Odm); /* For common use in the future */
+
+void ODM_CmnInfoInit23a(struct dm_odm_t *pDM_Odm, enum odm_cmninfo CmnInfo, u32 Value);
+
+void ODM23a_CmnInfoHook(struct dm_odm_t *pDM_Odm, enum odm_cmninfo CmnInfo, void *pValue);
+
+void ODM_CmnInfoPtrArrayHook23a(struct dm_odm_t *pDM_Odm, enum odm_cmninfo CmnInfo, u16 Index, void *pValue);
+
+void ODM_CmnInfoUpdate23a(struct dm_odm_t *pDM_Odm, u32 CmnInfo, u64 Value);
+
+void ODM_InitAllTimers(struct dm_odm_t *pDM_Odm);
+
+void ODM_CancelAllTimers(struct dm_odm_t *pDM_Odm);
+
+void ODM_ReleaseAllTimers(struct dm_odm_t *pDM_Odm);
+
+void ODM_ResetIQKResult(struct dm_odm_t *pDM_Odm);
+
+void ODM_AntselStatistics_88C(struct dm_odm_t *pDM_Odm, u8 MacId, u32 PWDBAll, bool isCCKrate);
+
+void ODM_SingleDualAntennaDefaultSetting(struct dm_odm_t *pDM_Odm);
+
+bool ODM_SingleDualAntennaDetection(struct dm_odm_t *pDM_Odm, u8 mode);
+
+void odm_dtc(struct dm_odm_t *pDM_Odm);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/odm_HWConfig.h b/drivers/staging/rtl8723au/include/odm_HWConfig.h
new file mode 100644
index 000000000000..147855c96ad4
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/odm_HWConfig.h
@@ -0,0 +1,174 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+
+#ifndef __HALHWOUTSRC_H__
+#define __HALHWOUTSRC_H__
+
+#include <Hal8723APhyCfg.h>
+
+/* */
+/* Definition */
+/* */
+/* */
+/* */
+/* CCK Rates, TxHT = 0 */
+#define DESC92C_RATE1M 0x00
+#define DESC92C_RATE2M 0x01
+#define DESC92C_RATE5_5M 0x02
+#define DESC92C_RATE11M 0x03
+
+/* OFDM Rates, TxHT = 0 */
+#define DESC92C_RATE6M 0x04
+#define DESC92C_RATE9M 0x05
+#define DESC92C_RATE12M 0x06
+#define DESC92C_RATE18M 0x07
+#define DESC92C_RATE24M 0x08
+#define DESC92C_RATE36M 0x09
+#define DESC92C_RATE48M 0x0a
+#define DESC92C_RATE54M 0x0b
+
+/* MCS Rates, TxHT = 1 */
+#define DESC92C_RATEMCS0 0x0c
+#define DESC92C_RATEMCS1 0x0d
+#define DESC92C_RATEMCS2 0x0e
+#define DESC92C_RATEMCS3 0x0f
+#define DESC92C_RATEMCS4 0x10
+#define DESC92C_RATEMCS5 0x11
+#define DESC92C_RATEMCS6 0x12
+#define DESC92C_RATEMCS7 0x13
+#define DESC92C_RATEMCS8 0x14
+#define DESC92C_RATEMCS9 0x15
+#define DESC92C_RATEMCS10 0x16
+#define DESC92C_RATEMCS11 0x17
+#define DESC92C_RATEMCS12 0x18
+#define DESC92C_RATEMCS13 0x19
+#define DESC92C_RATEMCS14 0x1a
+#define DESC92C_RATEMCS15 0x1b
+#define DESC92C_RATEMCS15_SG 0x1c
+#define DESC92C_RATEMCS32 0x20
+
+
+/* */
+/* structure and define */
+/* */
+
+struct phy_rx_agc_info {
+ #ifdef __LITTLE_ENDIAN
+ u8 gain:7,trsw:1;
+ #else
+ u8 trsw:1,gain:7;
+ #endif
+};
+
+struct phy_status_rpt {
+ struct phy_rx_agc_info path_agc[RF_PATH_MAX];
+ u8 ch_corr[RF_PATH_MAX];
+ u8 cck_sig_qual_ofdm_pwdb_all;
+ u8 cck_agc_rpt_ofdm_cfosho_a;
+ u8 cck_rpt_b_ofdm_cfosho_b;
+ u8 rsvd_1;/* ch_corr_msb; */
+ u8 noise_power_db_msb;
+ u8 path_cfotail[RF_PATH_MAX];
+ u8 pcts_mask[RF_PATH_MAX];
+ s8 stream_rxevm[RF_PATH_MAX];
+ u8 path_rxsnr[RF_PATH_MAX];
+ u8 noise_power_db_lsb;
+ u8 rsvd_2[3];
+ u8 stream_csi[RF_PATH_MAX];
+ u8 stream_target_csi[RF_PATH_MAX];
+ s8 sig_evm;
+ u8 rsvd_3;
+
+#ifdef __LITTLE_ENDIAN
+ u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 idle_long:1;
+ u8 r_ant_train_en:1;
+ u8 ant_sel_b:1;
+ u8 ant_sel:1;
+#else /* _BIG_ENDIAN_ */
+ u8 ant_sel:1;
+ u8 ant_sel_b:1;
+ u8 r_ant_train_en:1;
+ u8 idle_long:1;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
+#endif
+};
+
+
+struct phy_status_rpt_8195 {
+ struct phy_rx_agc_info path_agc[2];
+ u8 ch_num[2];
+ u8 cck_sig_qual_ofdm_pwdb_all;
+ u8 cck_agc_rpt_ofdm_cfosho_a;
+ u8 cck_bb_pwr_ofdm_cfosho_b;
+ u8 cck_rx_path; /* CCK_RX_PATH [3:0] (with regA07[3:0] definition) */
+ u8 rsvd_1;
+ u8 path_cfotail[2];
+ u8 pcts_mask[2];
+ s8 stream_rxevm[2];
+ u8 path_rxsnr[2];
+ u8 rsvd_2[2];
+ u8 stream_snr[2];
+ u8 stream_csi[2];
+ u8 rsvd_3[2];
+ s8 sig_evm;
+ u8 rsvd_4;
+#ifdef __LITTLE_ENDIAN
+ u8 antidx_anta:3;
+ u8 antidx_antb:3;
+ u8 rsvd_5:2;
+#else /* _BIG_ENDIAN_ */
+ u8 rsvd_5:2;
+ u8 antidx_antb:3;
+ u8 antidx_anta:3;
+#endif
+};
+
+
+void odm_Init_RSSIForDM23a(struct dm_odm_t *pDM_Odm);
+
+void
+ODM_PhyStatusQuery23a(
+ struct dm_odm_t *pDM_Odm,
+ struct odm_phy_info *pPhyInfo,
+ u8 * pPhyStatus,
+ struct odm_packet_info *pPktinfo
+ );
+
+void ODM_MacStatusQuery23a(struct dm_odm_t *pDM_Odm,
+ u8 *pMacStatus,
+ u8 MacID,
+ bool bPacketMatchBSSID,
+ bool bPacketToSelf,
+ bool bPacketBeacon
+);
+
+enum hal_status ODM_ConfigRFWithHeaderFile23a(struct dm_odm_t *pDM_Odm,
+ enum RF_RADIO_PATH Content,
+ enum RF_RADIO_PATH eRFPath
+);
+
+enum hal_status ODM_ConfigBBWithHeaderFile23a(struct dm_odm_t *pDM_Odm,
+ enum odm_bb_config_type ConfigType
+);
+
+enum hal_status ODM_ConfigMACWithHeaderFile23a(struct dm_odm_t *pDM_Odm);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/odm_RegConfig8723A.h b/drivers/staging/rtl8723au/include/odm_RegConfig8723A.h
new file mode 100644
index 000000000000..4ea579b2e8bd
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/odm_RegConfig8723A.h
@@ -0,0 +1,34 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __INC_ODM_REGCONFIG_H_8723A
+#define __INC_ODM_REGCONFIG_H_8723A
+
+void odm_ConfigRFReg_8723A(struct dm_odm_t *pDM_Odm, u32 Addr, u32 Data,
+ enum RF_RADIO_PATH RF_PATH, u32 RegAddr);
+
+void odm_ConfigRF_RadioA_8723A(struct dm_odm_t *pDM_Odm, u32 Addr, u32 Data);
+
+void odm_ConfigRF_RadioB_8723A(struct dm_odm_t *pDM_Odm, u32 Addr, u32 Data);
+
+void odm_ConfigMAC_8723A(struct dm_odm_t *pDM_Odm, u32 Addr, u8 Data);
+
+void odm_ConfigBB_AGC_8723A(struct dm_odm_t *pDM_Odm, u32 Addr,
+ u32 Bitmask, u32 Data);
+
+void odm_ConfigBB_PHY_REG_PG_8723A(struct dm_odm_t *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data);
+
+void odm_ConfigBB_PHY_8723A(struct dm_odm_t *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data);
+
+#endif /* end of SUPPORT */
diff --git a/drivers/staging/rtl8723au/include/odm_RegDefine11AC.h b/drivers/staging/rtl8723au/include/odm_RegDefine11AC.h
new file mode 100644
index 000000000000..77b7acec5ea8
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/odm_RegDefine11AC.h
@@ -0,0 +1,49 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_REGDEFINE11AC_H__
+#define __ODM_REGDEFINE11AC_H__
+
+/* 2 RF REG LIST */
+
+
+
+/* 2 BB REG LIST */
+/* PAGE 8 */
+/* PAGE 9 */
+#define ODM_REG_OFDM_FA_RST_11AC 0x9A4
+/* PAGE A */
+#define ODM_REG_CCK_CCA_11AC 0xA0A
+#define ODM_REG_CCK_FA_RST_11AC 0xA2C
+#define ODM_REG_CCK_FA_11AC 0xA5C
+/* PAGE C */
+#define ODM_REG_IGI_A_11AC 0xC50
+/* PAGE E */
+#define ODM_REG_IGI_B_11AC 0xE50
+/* PAGE F */
+#define ODM_REG_OFDM_FA_11AC 0xF48
+
+
+/* 2 MAC REG LIST */
+
+
+
+
+/* DIG Related */
+#define ODM_BIT_IGI_11AC 0xFFFFFFFF
+
+
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/odm_RegDefine11N.h b/drivers/staging/rtl8723au/include/odm_RegDefine11N.h
new file mode 100644
index 000000000000..27782154f502
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/odm_RegDefine11N.h
@@ -0,0 +1,165 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_REGDEFINE11N_H__
+#define __ODM_REGDEFINE11N_H__
+
+
+/* 2 RF REG LIST */
+#define ODM_REG_RF_MODE_11N 0x00
+#define ODM_REG_RF_0B_11N 0x0B
+#define ODM_REG_CHNBW_11N 0x18
+#define ODM_REG_T_METER_11N 0x24
+#define ODM_REG_RF_25_11N 0x25
+#define ODM_REG_RF_26_11N 0x26
+#define ODM_REG_RF_27_11N 0x27
+#define ODM_REG_RF_2B_11N 0x2B
+#define ODM_REG_RF_2C_11N 0x2C
+#define ODM_REG_RXRF_A3_11N 0x3C
+#define ODM_REG_T_METER_92D_11N 0x42
+#define ODM_REG_T_METER_88E_11N 0x42
+
+
+
+/* 2 BB REG LIST */
+/* PAGE 8 */
+#define ODM_REG_BB_CTRL_11N 0x800
+#define ODM_REG_RF_PIN_11N 0x804
+#define ODM_REG_PSD_CTRL_11N 0x808
+#define ODM_REG_TX_ANT_CTRL_11N 0x80C
+#define ODM_REG_BB_PWR_SAV5_11N 0x818
+#define ODM_REG_CCK_RPT_FORMAT_11N 0x824
+#define ODM_REG_RX_DEFUALT_A_11N 0x858
+#define ODM_REG_RX_DEFUALT_B_11N 0x85A
+#define ODM_REG_BB_PWR_SAV3_11N 0x85C
+#define ODM_REG_ANTSEL_CTRL_11N 0x860
+#define ODM_REG_RX_ANT_CTRL_11N 0x864
+#define ODM_REG_PIN_CTRL_11N 0x870
+#define ODM_REG_BB_PWR_SAV1_11N 0x874
+#define ODM_REG_ANTSEL_PATH_11N 0x878
+#define ODM_REG_BB_3WIRE_11N 0x88C
+#define ODM_REG_SC_CNT_11N 0x8C4
+#define ODM_REG_PSD_DATA_11N 0x8B4
+/* PAGE 9 */
+#define ODM_REG_ANT_MAPPING1_11N 0x914
+#define ODM_REG_ANT_MAPPING2_11N 0x918
+/* PAGE A */
+#define ODM_REG_CCK_ANTDIV_PARA1_11N 0xA00
+#define ODM_REG_CCK_CCA_11N 0xA0A
+#define ODM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
+#define ODM_REG_CCK_ANTDIV_PARA3_11N 0xA10
+#define ODM_REG_CCK_ANTDIV_PARA4_11N 0xA14
+#define ODM_REG_CCK_FILTER_PARA1_11N 0xA22
+#define ODM_REG_CCK_FILTER_PARA2_11N 0xA23
+#define ODM_REG_CCK_FILTER_PARA3_11N 0xA24
+#define ODM_REG_CCK_FILTER_PARA4_11N 0xA25
+#define ODM_REG_CCK_FILTER_PARA5_11N 0xA26
+#define ODM_REG_CCK_FILTER_PARA6_11N 0xA27
+#define ODM_REG_CCK_FILTER_PARA7_11N 0xA28
+#define ODM_REG_CCK_FILTER_PARA8_11N 0xA29
+#define ODM_REG_CCK_FA_RST_11N 0xA2C
+#define ODM_REG_CCK_FA_MSB_11N 0xA58
+#define ODM_REG_CCK_FA_LSB_11N 0xA5C
+#define ODM_REG_CCK_CCA_CNT_11N 0xA60
+#define ODM_REG_BB_PWR_SAV4_11N 0xA74
+/* PAGE B */
+#define ODM_REG_LNA_SWITCH_11N 0xB2C
+#define ODM_REG_PATH_SWITCH_11N 0xB30
+#define ODM_REG_RSSI_CTRL_11N 0xB38
+#define ODM_REG_CONFIG_ANTA_11N 0xB68
+#define ODM_REG_RSSI_BT_11N 0xB9C
+/* PAGE C */
+#define ODM_REG_OFDM_FA_HOLDC_11N 0xC00
+#define ODM_REG_RX_PATH_11N 0xC04
+#define ODM_REG_TRMUX_11N 0xC08
+#define ODM_REG_OFDM_FA_RSTC_11N 0xC0C
+#define ODM_REG_RXIQI_MATRIX_11N 0xC14
+#define ODM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
+#define ODM_REG_IGI_A_11N 0xC50
+#define ODM_REG_ANTDIV_PARA2_11N 0xC54
+#define ODM_REG_IGI_B_11N 0xC58
+#define ODM_REG_ANTDIV_PARA3_11N 0xC5C
+#define ODM_REG_BB_PWR_SAV2_11N 0xC70
+#define ODM_REG_RX_OFF_11N 0xC7C
+#define ODM_REG_TXIQK_MATRIXA_11N 0xC80
+#define ODM_REG_TXIQK_MATRIXB_11N 0xC88
+#define ODM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
+#define ODM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
+#define ODM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
+#define ODM_REG_ANTDIV_PARA1_11N 0xCA4
+#define ODM_REG_OFDM_FA_TYPE1_11N 0xCF0
+/* PAGE D */
+#define ODM_REG_OFDM_FA_RSTD_11N 0xD00
+#define ODM_REG_OFDM_FA_TYPE2_11N 0xDA0
+#define ODM_REG_OFDM_FA_TYPE3_11N 0xDA4
+#define ODM_REG_OFDM_FA_TYPE4_11N 0xDA8
+/* PAGE E */
+#define ODM_REG_TXAGC_A_6_18_11N 0xE00
+#define ODM_REG_TXAGC_A_24_54_11N 0xE04
+#define ODM_REG_TXAGC_A_1_MCS32_11N 0xE08
+#define ODM_REG_TXAGC_A_MCS0_3_11N 0xE10
+#define ODM_REG_TXAGC_A_MCS4_7_11N 0xE14
+#define ODM_REG_TXAGC_A_MCS8_11_11N 0xE18
+#define ODM_REG_TXAGC_A_MCS12_15_11N 0xE1C
+#define ODM_REG_FPGA0_IQK_11N 0xE28
+#define ODM_REG_TXIQK_TONE_A_11N 0xE30
+#define ODM_REG_RXIQK_TONE_A_11N 0xE34
+#define ODM_REG_TXIQK_PI_A_11N 0xE38
+#define ODM_REG_RXIQK_PI_A_11N 0xE3C
+#define ODM_REG_TXIQK_11N 0xE40
+#define ODM_REG_RXIQK_11N 0xE44
+#define ODM_REG_IQK_AGC_PTS_11N 0xE48
+#define ODM_REG_IQK_AGC_RSP_11N 0xE4C
+#define ODM_REG_BLUETOOTH_11N 0xE6C
+#define ODM_REG_RX_WAIT_CCA_11N 0xE70
+#define ODM_REG_TX_CCK_RFON_11N 0xE74
+#define ODM_REG_TX_CCK_BBON_11N 0xE78
+#define ODM_REG_OFDM_RFON_11N 0xE7C
+#define ODM_REG_OFDM_BBON_11N 0xE80
+#define ODM_REG_TX2RX_11N 0xE84
+#define ODM_REG_TX2TX_11N 0xE88
+#define ODM_REG_RX_CCK_11N 0xE8C
+#define ODM_REG_RX_OFDM_11N 0xED0
+#define ODM_REG_RX_WAIT_RIFS_11N 0xED4
+#define ODM_REG_RX2RX_11N 0xED8
+#define ODM_REG_STANDBY_11N 0xEDC
+#define ODM_REG_SLEEP_11N 0xEE0
+#define ODM_REG_PMPD_ANAEN_11N 0xEEC
+
+
+
+
+
+
+
+/* 2 MAC REG LIST */
+#define ODM_REG_BB_RST_11N 0x02
+#define ODM_REG_ANTSEL_PIN_11N 0x4C
+#define ODM_REG_EARLY_MODE_11N 0x4D0
+#define ODM_REG_RSSI_MONITOR_11N 0x4FE
+#define ODM_REG_EDCA_VO_11N 0x500
+#define ODM_REG_EDCA_VI_11N 0x504
+#define ODM_REG_EDCA_BE_11N 0x508
+#define ODM_REG_EDCA_BK_11N 0x50C
+#define ODM_REG_TXPAUSE_11N 0x522
+#define ODM_REG_RESP_TX_11N 0x6D8
+#define ODM_REG_ANT_TRAIN_PARA1_11N 0x7b0
+#define ODM_REG_ANT_TRAIN_PARA2_11N 0x7b4
+
+
+/* DIG Related */
+#define ODM_BIT_IGI_11N 0x0000007F
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/odm_debug.h b/drivers/staging/rtl8723au/include/odm_debug.h
new file mode 100644
index 000000000000..5bc51d09e52f
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/odm_debug.h
@@ -0,0 +1,139 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+
+#ifndef __ODM_DBG_H__
+#define __ODM_DBG_H__
+
+
+/* */
+/* Define the debug levels */
+/* */
+/* 1. DBG_TRACE and DBG_LOUD are used for normal cases. */
+/* So that, they can help SW engineer to develope or trace states changed */
+/* and also help HW enginner to trace every operation to and from HW, */
+/* e.g IO, Tx, Rx. */
+/* */
+/* 2. DBG_WARNNING and DBG_SERIOUS are used for unusual or error cases, */
+/* which help us to debug SW or HW. */
+/* */
+/* */
+/* */
+/* Never used in a call to ODM_RT_TRACE()! */
+/* */
+#define ODM_DBG_OFF 1
+
+/* */
+/* Fatal bug. */
+/* For example, Tx/Rx/IO locked up, OS hangs, memory access violation, */
+/* resource allocation failed, unexpected HW behavior, HW BUG and so on. */
+/* */
+#define ODM_DBG_SERIOUS 2
+
+/* */
+/* Abnormal, rare, or unexpeted cases. */
+/* For example, IRP/Packet/OID canceled, device suprisely unremoved and so on. */
+/* */
+#define ODM_DBG_WARNING 3
+
+/* */
+/* Normal case with useful information about current SW or HW state. */
+/* For example, Tx/Rx descriptor to fill, Tx/Rx descriptor completed status, */
+/* SW protocol state change, dynamic mechanism state change and so on. */
+/* */
+#define ODM_DBG_LOUD 4
+
+/* */
+/* Normal case with detail execution flow or information. */
+/* */
+#define ODM_DBG_TRACE 5
+
+/* */
+/* Define the tracing components */
+/* */
+/* */
+/* BB Functions */
+#define ODM_COMP_DIG BIT0
+#define ODM_COMP_RA_MASK BIT1
+#define ODM_COMP_DYNAMIC_TXPWR BIT2
+#define ODM_COMP_FA_CNT BIT3
+#define ODM_COMP_RSSI_MONITOR BIT4
+#define ODM_COMP_CCK_PD BIT5
+#define ODM_COMP_ANT_DIV BIT6
+#define ODM_COMP_PWR_SAVE BIT7
+#define ODM_COMP_PWR_TRAIN BIT8
+#define ODM_COMP_RATE_ADAPTIVE BIT9
+#define ODM_COMP_PATH_DIV BIT10
+#define ODM_COMP_PSD BIT11
+#define ODM_COMP_DYNAMIC_PRICCA BIT12
+#define ODM_COMP_RXHP BIT13
+/* MAC Functions */
+#define ODM_COMP_EDCA_TURBO BIT16
+#define ODM_COMP_EARLY_MODE BIT17
+/* RF Functions */
+#define ODM_COMP_TX_PWR_TRACK BIT24
+#define ODM_COMP_RX_GAIN_TRACK BIT25
+#define ODM_COMP_CALIBRATION BIT26
+/* Common Functions */
+#define ODM_COMP_COMMON BIT30
+#define ODM_COMP_INIT BIT31
+
+/*------------------------Export Macro Definition---------------------------*/
+ #define DbgPrint printk
+ #define RT_PRINTK(fmt, args...) DbgPrint("%s(): " fmt, __func__, ## args);
+
+#ifndef ASSERT
+ #define ASSERT(expr)
+#endif
+
+#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) \
+ if(((comp) & pDM_Odm->DebugComponents) && (level <= pDM_Odm->DebugLevel)) \
+ { \
+ DbgPrint("[ODM-8723A] "); \
+ RT_PRINTK fmt; \
+ }
+
+#define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt) \
+ if(((comp) & pDM_Odm->DebugComponents) && (level <= pDM_Odm->DebugLevel)) \
+ { \
+ RT_PRINTK fmt; \
+ }
+
+#define ODM_RT_ASSERT(pDM_Odm, expr, fmt) \
+ if(!(expr)) { \
+ DbgPrint("Assertion failed! %s at ......\n", #expr); \
+ DbgPrint(" ......%s,%s,line=%d\n", __FILE__, __func__, __LINE__);\
+ RT_PRINTK fmt; \
+ ASSERT(false); \
+ }
+#define ODM_dbg_enter() { DbgPrint("==> %s\n", __func__); }
+#define ODM_dbg_exit() { DbgPrint("<== %s\n", __func__); }
+#define ODM_dbg_trace(str) { DbgPrint("%s:%s\n", __func__, str); }
+
+#define ODM_PRINT_ADDR(pDM_Odm, comp, level, title_str, ptr) \
+ if(((comp) & pDM_Odm->DebugComponents) && (level <= pDM_Odm->DebugLevel){ \
+ int __i; \
+ u8 * __ptr = (u8 *)ptr; \
+ DbgPrint("[ODM] "); \
+ DbgPrint(title_str); \
+ DbgPrint(" "); \
+ for (__i=0; __i < 6; __i++) \
+ DbgPrint("%02X%s", __ptr[__i], (__i == 5) ? "" : "-"); \
+ DbgPrint("\n"); \
+ }
+
+void ODM_InitDebugSetting23a(struct dm_odm_t *pDM_Odm);
+
+#endif /* __ODM_DBG_H__ */
diff --git a/drivers/staging/rtl8723au/include/odm_interface.h b/drivers/staging/rtl8723au/include/odm_interface.h
new file mode 100644
index 000000000000..f216b5846f92
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/odm_interface.h
@@ -0,0 +1,131 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+
+#ifndef __ODM_INTERFACE_H__
+#define __ODM_INTERFACE_H__
+
+
+
+/* */
+/* =========== Constant/Structure/Enum/... Define */
+/* */
+
+
+
+/* */
+/* =========== Macro Define */
+/* */
+
+#define _reg_all(_name) ODM_##_name
+#define _reg_ic(_name, _ic) ODM_##_name##_ic
+#define _bit_all(_name) BIT_##_name
+#define _bit_ic(_name, _ic) BIT_##_name##_ic
+
+/* _cat: implemented by Token-Pasting Operator. */
+
+/*===================================
+
+#define ODM_REG_DIG_11N 0xC50
+#define ODM_REG_DIG_11AC 0xDDD
+
+ODM_REG(DIG,_pDM_Odm)
+=====================================*/
+
+#define _reg_11N(_name) ODM_REG_##_name##_11N
+#define _reg_11AC(_name) ODM_REG_##_name##_11AC
+#define _bit_11N(_name) ODM_BIT_##_name##_11N
+#define _bit_11AC(_name) ODM_BIT_##_name##_11AC
+
+#define _cat(_name, _ic_type, _func) \
+ ( \
+ ((_ic_type) & ODM_IC_11N_SERIES)? _func##_11N(_name): \
+ _func##_11AC(_name) \
+ )
+
+/* _name: name of register or bit. */
+/* Example: "ODM_REG(R_A_AGC_CORE1, pDM_Odm)" */
+/* gets "ODM_R_A_AGC_CORE1" or "ODM_R_A_AGC_CORE1_8192C", depends on SupportICType. */
+#define ODM_REG(_name, _pDM_Odm) _cat(_name, _pDM_Odm->SupportICType, _reg)
+#define ODM_BIT(_name, _pDM_Odm) _cat(_name, _pDM_Odm->SupportICType, _bit)
+
+/* */
+/* 2012/02/17 MH For non-MP compile pass only. Linux does not support workitem. */
+/* Suggest HW team to use thread instead of workitem. Windows also support the feature. */
+/* */
+typedef void (*RT_WORKITEM_CALL_BACK)(struct work_struct *pContext);
+
+/* */
+/* =========== Extern Variable ??? It should be forbidden. */
+/* */
+
+
+/* */
+/* =========== EXtern Function Prototype */
+/* */
+
+
+u8 ODM_Read1Byte(struct dm_odm_t *pDM_Odm, u32 RegAddr);
+
+u16 ODM_Read2Byte(struct dm_odm_t *pDM_Odm, u32 RegAddr);
+
+u32 ODM_Read4Byte(struct dm_odm_t *pDM_Odm, u32 RegAddr);
+
+void ODM_Write1Byte(struct dm_odm_t *pDM_Odm, u32 RegAddr, u8 Data);
+
+void ODM_Write2Byte(struct dm_odm_t *pDM_Odm, u32 RegAddr, u16 Data);
+
+void ODM_Write4Byte(struct dm_odm_t *pDM_Odm, u32 RegAddr, u32 Data);
+
+void ODM_SetMACReg(struct dm_odm_t *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data);
+
+u32 ODM_GetMACReg(struct dm_odm_t *pDM_Odm, u32 RegAddr, u32 BitMask);
+
+void ODM_SetBBReg(struct dm_odm_t *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data);
+
+u32 ODM_GetBBReg(struct dm_odm_t *pDM_Odm, u32 RegAddr, u32 BitMask);
+
+void ODM_SetRFReg(struct dm_odm_t *pDM_Odm, enum RF_RADIO_PATH eRFPath,
+ u32 RegAddr, u32 BitMask, u32 Data);
+
+u32 ODM_GetRFReg(struct dm_odm_t *pDM_Odm, enum RF_RADIO_PATH eRFPath,
+ u32 RegAddr, u32 BitMask);
+
+/* Memory Relative Function. */
+void ODM_AllocateMemory(struct dm_odm_t *pDM_Odm, void **pPtr, u32 length);
+void ODM_FreeMemory(struct dm_odm_t *pDM_Odm, void *pPtr, u32 length);
+
+s32 ODM_CompareMemory(struct dm_odm_t *pDM_Odm, void *pBuf1, void *pBuf2, u32 length);
+
+/* ODM MISC-spin lock relative API. */
+void ODM_AcquireSpinLock(struct dm_odm_t *pDM_Odm, enum rt_spinlock_type type);
+
+void ODM_ReleaseSpinLock(struct dm_odm_t *pDM_Odm, enum rt_spinlock_type type);
+
+/* ODM MISC-workitem relative API. */
+void ODM_InitializeWorkItem(struct dm_odm_t *pDM_Odm, void *pRtWorkItem,
+ RT_WORKITEM_CALL_BACK RtWorkItemCallback, void *pContext, const char *szID);
+
+/* ODM Timer relative API. */
+void ODM_SetTimer(struct dm_odm_t *pDM_Odm, struct timer_list *pTimer, u32 msDelay);
+
+void ODM_ReleaseTimer(struct dm_odm_t *pDM_Odm, struct timer_list *pTimer);
+
+/* ODM FW relative API. */
+u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
+ u32 *pElementID, u32 *pCmdLen, u8 **pCmbBuffer,
+ u8 *CmdStartSeq);
+
+#endif /* __ODM_INTERFACE_H__ */
diff --git a/drivers/staging/rtl8723au/include/odm_precomp.h b/drivers/staging/rtl8723au/include/odm_precomp.h
new file mode 100644
index 000000000000..f3fc2fad9884
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/odm_precomp.h
@@ -0,0 +1,54 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_PRECOMP_H__
+#define __ODM_PRECOMP_H__
+
+#include "odm_types.h"
+
+#define TEST_FALG___ 1
+
+/* 2 Config Flags and Structs - defined by each ODM Type */
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <hal_intf.h>
+
+
+/* 2 Hardware Parameter Files */
+#include "Hal8723UHWImg_CE.h"
+
+
+/* 2 OutSrc Header Files */
+
+#include "odm.h"
+#include "odm_HWConfig.h"
+#include "odm_debug.h"
+#include "odm_RegDefine11AC.h"
+#include "odm_RegDefine11N.h"
+
+#include "HalDMOutSrc8723A.h" /* for IQK,LCK,Power-tracking */
+#include "rtl8723a_hal.h"
+
+#include "odm_interface.h"
+#include "odm_reg.h"
+
+#include "HalHWImg8723A_MAC.h"
+#include "HalHWImg8723A_RF.h"
+#include "HalHWImg8723A_BB.h"
+#include "HalHWImg8723A_FW.h"
+#include "odm_RegConfig8723A.h"
+
+#endif /* __ODM_PRECOMP_H__ */
diff --git a/drivers/staging/rtl8723au/include/odm_reg.h b/drivers/staging/rtl8723au/include/odm_reg.h
new file mode 100644
index 000000000000..56191e9fdcdb
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/odm_reg.h
@@ -0,0 +1,114 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/* */
+/* File Name: odm_reg.h */
+/* */
+/* Description: */
+/* */
+/* This file is for general register definition. */
+/* */
+/* */
+/* */
+#ifndef __HAL_ODM_REG_H__
+#define __HAL_ODM_REG_H__
+
+/* */
+/* Register Definition */
+/* */
+
+/* MAC REG */
+#define ODM_BB_RESET 0x002
+#define ODM_DUMMY 0x4fe
+#define ODM_EDCA_VO_PARAM 0x500
+#define ODM_EDCA_VI_PARAM 0x504
+#define ODM_EDCA_BE_PARAM 0x508
+#define ODM_EDCA_BK_PARAM 0x50C
+#define ODM_TXPAUSE 0x522
+
+/* BB REG */
+#define ODM_FPGA_PHY0_PAGE8 0x800
+#define ODM_PSD_SETTING 0x808
+#define ODM_AFE_SETTING 0x818
+#define ODM_TXAGC_B_6_18 0x830
+#define ODM_TXAGC_B_24_54 0x834
+#define ODM_TXAGC_B_MCS32_5 0x838
+#define ODM_TXAGC_B_MCS0_MCS3 0x83c
+#define ODM_TXAGC_B_MCS4_MCS7 0x848
+#define ODM_TXAGC_B_MCS8_MCS11 0x84c
+#define ODM_ANALOG_REGISTER 0x85c
+#define ODM_RF_INTERFACE_OUTPUT 0x860
+#define ODM_TXAGC_B_MCS12_MCS15 0x868
+#define ODM_TXAGC_B_11_A_2_11 0x86c
+#define ODM_AD_DA_LSB_MASK 0x874
+#define ODM_ENABLE_3_WIRE 0x88c
+#define ODM_PSD_REPORT 0x8b4
+#define ODM_R_ANT_SELECT 0x90c
+#define ODM_CCK_ANT_SELECT 0xa07
+#define ODM_CCK_PD_THRESH 0xa0a
+#define ODM_CCK_RF_REG1 0xa11
+#define ODM_CCK_MATCH_FILTER 0xa20
+#define ODM_CCK_RAKE_MAC 0xa2e
+#define ODM_CCK_CNT_RESET 0xa2d
+#define ODM_CCK_TX_DIVERSITY 0xa2f
+#define ODM_CCK_FA_CNT_MSB 0xa5b
+#define ODM_CCK_FA_CNT_LSB 0xa5c
+#define ODM_CCK_NEW_FUNCTION 0xa75
+#define ODM_OFDM_PHY0_PAGE_C 0xc00
+#define ODM_OFDM_RX_ANT 0xc04
+#define ODM_R_A_RXIQI 0xc14
+#define ODM_R_A_AGC_CORE1 0xc50
+#define ODM_R_A_AGC_CORE2 0xc54
+#define ODM_R_B_AGC_CORE1 0xc58
+#define ODM_R_AGC_PAR 0xc70
+#define ODM_R_HTSTF_AGC_PAR 0xc7c
+#define ODM_TX_PWR_TRAINING_A 0xc90
+#define ODM_TX_PWR_TRAINING_B 0xc98
+#define ODM_OFDM_FA_CNT1 0xcf0
+#define ODM_OFDM_PHY0_PAGE_D 0xd00
+#define ODM_OFDM_FA_CNT2 0xda0
+#define ODM_OFDM_FA_CNT3 0xda4
+#define ODM_OFDM_FA_CNT4 0xda8
+#define ODM_TXAGC_A_6_18 0xe00
+#define ODM_TXAGC_A_24_54 0xe04
+#define ODM_TXAGC_A_1_MCS32 0xe08
+#define ODM_TXAGC_A_MCS0_MCS3 0xe10
+#define ODM_TXAGC_A_MCS4_MCS7 0xe14
+#define ODM_TXAGC_A_MCS8_MCS11 0xe18
+#define ODM_TXAGC_A_MCS12_MCS15 0xe1c
+
+/* RF REG */
+#define ODM_GAIN_SETTING 0x00
+#define ODM_CHANNEL 0x18
+
+/* Ant Detect Reg */
+#define ODM_DPDT 0x300
+
+/* PSD Init */
+#define ODM_PSDREG 0x808
+
+/* 92D Path Div */
+#define PATHDIV_REG 0xB30
+#define PATHDIV_TRI 0xBA0
+
+
+/* */
+/* Bitmap Definition */
+/* */
+
+#define BIT_FA_RESET BIT0
+
+
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/odm_types.h b/drivers/staging/rtl8723au/include/odm_types.h
new file mode 100644
index 000000000000..a866769ea178
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/odm_types.h
@@ -0,0 +1,36 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __ODM_TYPES_H__
+#define __ODM_TYPES_H__
+
+/* Define Different SW team support */
+
+enum hal_status {
+ HAL_STATUS_SUCCESS,
+ HAL_STATUS_FAILURE,
+};
+
+enum rt_spinlock_type {
+ RT_TEMP =1,
+};
+
+#define SET_TX_DESC_ANTSEL_A_88E(__pTxDesc, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 24, 1, __Value)
+#define SET_TX_DESC_ANTSEL_B_88E(__pTxDesc, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 25, 1, __Value)
+#define SET_TX_DESC_ANTSEL_C_88E(__pTxDesc, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pTxDesc+28, 29, 1, __Value)
+
+#endif /* __ODM_TYPES_H__ */
diff --git a/drivers/staging/rtl8723au/include/osdep_intf.h b/drivers/staging/rtl8723au/include/osdep_intf.h
new file mode 100644
index 000000000000..b603cf532900
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/osdep_intf.h
@@ -0,0 +1,46 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __OSDEP_INTF_H_
+#define __OSDEP_INTF_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+int rtw_hw_suspend23a(struct rtw_adapter *padapter);
+int rtw_hw_resume23a(struct rtw_adapter *padapter);
+
+u8 rtw_init_drv_sw23a(struct rtw_adapter *padapter);
+u8 rtw_free_drv_sw23a(struct rtw_adapter *padapter);
+u8 rtw_reset_drv_sw23a(struct rtw_adapter *padapter);
+
+u32 rtw_start_drv_threads23a(struct rtw_adapter *padapter);
+void rtw_stop_drv_threads23a (struct rtw_adapter *padapter);
+void rtw_cancel_all_timer23a(struct rtw_adapter *padapter);
+
+int rtw_init_netdev23a_name23a(struct net_device *pnetdev, const char *ifname);
+struct net_device *rtw_init_netdev23a(struct rtw_adapter *padapter);
+
+u16 rtw_recv_select_queue23a(struct sk_buff *skb);
+
+void rtw_ips_dev_unload23a(struct rtw_adapter *padapter);
+
+int rtw_ips_pwr_up23a(struct rtw_adapter *padapter);
+void rtw_ips_pwr_down23a(struct rtw_adapter *padapter);
+
+int rtw_drv_register_netdev(struct rtw_adapter *padapter);
+void rtw_ndev_destructor(struct net_device *ndev);
+
+#endif /* _OSDEP_INTF_H_ */
diff --git a/drivers/staging/rtl8723au/include/osdep_service.h b/drivers/staging/rtl8723au/include/osdep_service.h
new file mode 100644
index 000000000000..039bc7285ed0
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/osdep_service.h
@@ -0,0 +1,207 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __OSDEP_SERVICE_H_
+#define __OSDEP_SERVICE_H_
+
+#define _FAIL 0
+#define _SUCCESS 1
+#define RTW_RX_HANDLED 2
+
+#include <linux/version.h>
+#include <linux/spinlock.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/circ_buf.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <linux/semaphore.h>
+#include <linux/sem.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h> /* Necessary because we use the proc fs */
+#include <linux/interrupt.h> /* for struct tasklet_struct */
+#include <linux/ip.h>
+#include <linux/kthread.h>
+
+
+/* #include <linux/ieee80211.h> */
+#include <net/ieee80211_radiotap.h>
+#include <net/cfg80211.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+
+struct rtw_adapter;
+struct c2h_evt_hdr;
+
+typedef s32 (*c2h_id_filter)(u8 id);
+
+struct rtw_queue {
+ struct list_head queue;
+ spinlock_t lock;
+};
+
+static inline struct list_head *get_list_head(struct rtw_queue *queue)
+{
+ return (&queue->queue);
+}
+
+static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
+{
+ return (netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
+ netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
+ netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
+ netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3)) );
+}
+
+#ifndef BIT
+#define BIT(x) ( 1 << (x))
+#endif
+static inline u32 CHKBIT(u32 x)
+{
+ WARN_ON(x >= 32);
+ if (x >= 32)
+ return 0;
+ return BIT(x);
+}
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+#define BIT32 0x0100000000
+#define BIT33 0x0200000000
+#define BIT34 0x0400000000
+#define BIT35 0x0800000000
+#define BIT36 0x1000000000
+
+int RTW_STATUS_CODE23a(int error_code);
+
+u8* _rtw_vmalloc(u32 sz);
+u8* _rtw_zvmalloc(u32 sz);
+void _rtw_vmfree(u8 *pbuf, u32 sz);
+#define rtw_vmalloc(sz) _rtw_vmalloc((sz))
+#define rtw_zvmalloc(sz) _rtw_zvmalloc((sz))
+#define rtw_vmfree(pbuf, sz) _rtw_vmfree((pbuf), (sz))
+
+extern unsigned char REALTEK_96B_IE23A[];
+extern unsigned char MCS_rate_2R23A[16];
+extern unsigned char RTW_WPA_OUI23A[];
+extern unsigned char WPA_TKIP_CIPHER23A[4];
+extern unsigned char RSN_TKIP_CIPHER23A[4];
+
+extern unsigned char MCS_rate_2R23A[16];
+extern unsigned char MCS_rate_1R23A[16];
+
+void _rtw_init_queue23a(struct rtw_queue *pqueue);
+u32 _rtw_queue_empty23a(struct rtw_queue *pqueue);
+
+static inline u32 bitshift(u32 bitmask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++)
+ if (((bitmask>>i) & 0x1) == 1) break;
+
+ return i;
+}
+
+void rtw_suspend_lock_init(void);
+void rtw_suspend_lock_uninit(void);
+void rtw_lock_suspend(void);
+void rtw_unlock_suspend(void);
+
+
+#define NDEV_FMT "%s"
+#define NDEV_ARG(ndev) ndev->name
+#define ADPT_FMT "%s"
+#define ADPT_ARG(adapter) adapter->pnetdev->name
+#define FUNC_NDEV_FMT "%s(%s)"
+#define FUNC_NDEV_ARG(ndev) __func__, ndev->name
+#define FUNC_ADPT_FMT "%s(%s)"
+#define FUNC_ADPT_ARG(adapter) __func__, adapter->pnetdev->name
+
+#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)),(sig), 1)
+
+u64 rtw_modular6423a(u64 x, u64 y);
+u64 rtw_division6423a(u64 x, u64 y);
+
+
+/* Macros for handling unaligned memory accesses */
+
+#define RTW_GET_BE24(a) ((((u32) (a)[0]) << 16) | (((u32) (a)[1]) << 8) | \
+ ((u32) (a)[2]))
+
+
+struct rtw_cbuf {
+ u32 write;
+ u32 read;
+ u32 size;
+ void *bufs[0];
+};
+
+bool rtw_cbuf_full23a(struct rtw_cbuf *cbuf);
+bool rtw_cbuf_empty23a(struct rtw_cbuf *cbuf);
+bool rtw_cbuf_push23a(struct rtw_cbuf *cbuf, void *buf);
+void *rtw_cbuf_pop23a(struct rtw_cbuf *cbuf);
+struct rtw_cbuf *rtw_cbuf_alloc23a(u32 size);
+void rtw_cbuf_free(struct rtw_cbuf *cbuf);
+int rtw_change_ifname(struct rtw_adapter *padapter, const char *ifname);
+s32 c2h_evt_hdl(struct rtw_adapter *adapter, struct c2h_evt_hdr *c2h_evt, c2h_id_filter filter);
+void indicate_wx_scan_complete_event(struct rtw_adapter *padapter);
+u8 rtw_do_join23a(struct rtw_adapter *padapter);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/recv_osdep.h b/drivers/staging/rtl8723au/include/recv_osdep.h
new file mode 100644
index 000000000000..15c94b6168bf
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/recv_osdep.h
@@ -0,0 +1,45 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RECV_OSDEP_H_
+#define __RECV_OSDEP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+int _rtw_init_recv_priv23a(struct recv_priv *precvpriv, struct rtw_adapter *padapter);
+void _rtw_free_recv_priv23a (struct recv_priv *precvpriv);
+
+int rtw_recv_entry23a(struct recv_frame *precv_frame);
+int rtw_recv_indicatepkt23a(struct rtw_adapter *adapter, struct recv_frame *precv_frame);
+void rtw_recv_returnpacket(struct net_device *cnxt, struct sk_buff *preturnedpkt);
+
+void rtw_hostapd_mlme_rx23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+void rtw_handle_tkip_mic_err23a(struct rtw_adapter *padapter, u8 bgroup);
+
+int rtw_init_recv_priv(struct recv_priv *precvpriv, struct rtw_adapter *padapter);
+void rtw_free_recv_priv (struct recv_priv *precvpriv);
+
+int rtw_os_recv_resource_init(struct recv_priv *precvpriv, struct rtw_adapter *padapter);
+int rtw_os_recv_resource_alloc23a(struct rtw_adapter *padapter, struct recv_frame *precvframe);
+void rtw_os_recv_resource_free(struct recv_priv *precvpriv);
+
+int rtw_os_recvbuf_resource_alloc23a(struct rtw_adapter *padapter, struct recv_buf *precvbuf);
+int rtw_os_recvbuf_resource_free23a(struct rtw_adapter *padapter, struct recv_buf *precvbuf);
+
+void rtw_os_read_port23a(struct rtw_adapter *padapter, struct recv_buf *precvbuf);
+
+void rtw_init_recv_timer23a(struct recv_reorder_ctrl *preorder_ctrl);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h b/drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h
new file mode 100644
index 000000000000..6d1edc6ef84d
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h
@@ -0,0 +1,1672 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723A_BT_COEXIST_H__
+#define __RTL8723A_BT_COEXIST_H__
+
+#include <drv_types.h>
+#include "odm_precomp.h"
+
+
+#define __BT_C__ 1
+#define __BT_HANDLEPACKET_C__ 1
+#define __BT_HCI_C__ 1
+#define __HALBTC87231ANT_C__ 1
+#define __HALBTC87232ANT_C__ 1
+#define __HALBTC8723_C__ 1
+#define __HALBTCCSR1ANT_C__ 1
+#define __HALBTCCSR2ANT_C__ 1
+#define __HALBTCOEXIST_C__ 1
+#define __HALBT_C__ 1
+
+#ifdef __BT_C__ /* COMMON/BT.h */
+
+/* HEADER/PlatformDef.h */
+enum rt_media_status {
+ RT_MEDIA_DISCONNECT = 0,
+ RT_MEDIA_CONNECT = 1
+};
+
+/* ===== Below this line is sync from SD7 driver COMMON/BT.h ===== */
+
+#define BT_TMP_BUF_SIZE 100
+
+void BT_SignalCompensation(struct rtw_adapter *padapter,
+ u8 *rssi_wifi, u8 *rssi_bt);
+void BT_WifiScanNotify(struct rtw_adapter *padapter, u8 scanType);
+void BT_WifiAssociateNotify(struct rtw_adapter *padapter, u8 action);
+void BT_WifiMediaStatusNotify(struct rtw_adapter *padapter,
+ enum rt_media_status mstatus);
+void BT_SpecialPacketNotify(struct rtw_adapter * padapter);
+void BT_HaltProcess(struct rtw_adapter * padapter);
+void BT_LpsLeave(struct rtw_adapter * padapter);
+
+
+#define BT_HsConnectionEstablished(Adapter) false
+/* ===== End of sync from SD7 driver COMMON/BT.h ===== */
+#endif /* __BT_C__ */
+
+#ifdef __BT_HCI_C__ /* COMMON/bt_hci.h */
+
+/* HEADER/SecurityType.h */
+#define TKIP_ENC_KEY_POS 32 /* KEK_LEN+KEK_LEN) */
+#define MAXRSNIELEN 256
+
+/* COMMON/Protocol802_11.h */
+/* */
+/* 802.11 Management frame Status Code field */
+/* */
+struct octet_string {
+ u8 *Octet;
+ u16 Length;
+};
+
+
+/* AES_CCMP specific */
+enum {
+ AESCCMP_BLK_SIZE = 16, /* # octets in an AES block */
+ AESCCMP_MAX_PACKET = 4*512, /* largest packet size */
+ AESCCMP_N_RESERVED = 0, /* reserved nonce octet value */
+ AESCCMP_A_DATA = 0x40, /* the Adata bit in the flags */
+ AESCCMP_M_SHIFT = 3, /* how much to shift the 3-bit M field */
+ AESCCMP_L_SHIFT = 0, /* how much to shift the 3-bit L field */
+ AESCCMP_L_SIZE = 2, /* size of the l(m) length field (in octets) */
+ AESCCMP_OFFSET_SC = 22,
+ AESCCMP_OFFSET_DURATION = 4,
+ AESCCMP_OFFSET_A2 = 10,
+ AESCCMP_OFFSET_A4 = 24,
+ AESCCMP_QC_TID_MASK = 0x0f,
+ AESCCMP_BLK_SIZE_TOTAL = 16*16, /* Added by Annie for CKIP AES MIC BSOD, 2006-08-17. */
+ /* 16*8 < 4*60 Resove to 16*16 */
+};
+
+/* Key Length */
+#define PMK_LEN 32
+#define PTK_LEN_TKIP 64
+#define GTK_LEN 32
+#define KEY_NONCE_LEN 32
+
+
+/* COMMON/Dot11d.h */
+struct chnl_txpower_triple {
+ u8 FirstChnl;
+ u8 NumChnls;
+ s8 MaxTxPowerInDbm;
+};
+
+
+/* ===== Below this line is sync from SD7 driver COMMON/bt_hci.h ===== */
+/* The following is for BT 3.0 + HS HCI COMMAND ERRORS CODES */
+
+#define Max80211PALPDUSize 1492
+#define Max80211AMPASSOCLen 672
+#define MinGUserPrio 4
+#define MaxGUserPrio 7
+#define BEUserPrio0 0
+#define BEUserPrio1 3
+#define Max80211BeaconPeriod 2000
+#define ShortRangeModePowerMax 4
+
+#define BT_Default_Chnl 10
+#define ACLDataHeaderLen 4
+
+#define BTTotalDataBlockNum 0x100
+#define BTLocalBufNum 0x200
+#define BTMaxDataBlockLen 0x800
+#define BTTOTALBANDWIDTH 0x7530
+#define BTMAXBANDGUBANDWIDTH 0x4e20
+#define TmpLocalBufSize 0x100
+#define BTSynDataPacketLength 0xff
+/* */
+
+#define BTMaxAuthCount 5
+#define BTMaxAsocCount 5
+
+#define MAX_LOGICAL_LINK_NUM 2 /* temporarily define */
+#define MAX_BT_ASOC_ENTRY_NUM 2 /* temporarily define */
+
+#define INVALID_PL_HANDLE 0xff
+#define INVALID_ENTRY_NUM 0xff
+/* */
+
+#define CAM_BT_START_INDEX (HALF_CAM_ENTRY - 4) /* MAX_BT_ASOC_ENTRY_NUM : 4 !!! */
+#define BT_HWCAM_STAR CAM_BT_START_INDEX /* We used HALF_CAM_ENTRY ~ HALF_CAM_ENTRY -MAX_BT_ASOC_ENTRY_NUM */
+
+enum hci_status {
+ HCI_STATUS_SUCCESS = 0x00, /* Success */
+ HCI_STATUS_UNKNOW_HCI_CMD = 0x01, /* Unknown HCI Command */
+ HCI_STATUS_UNKNOW_CONNECT_ID = 0X02, /* Unknown Connection Identifier */
+ HCI_STATUS_HW_FAIL = 0X03, /* Hardware Failure */
+ HCI_STATUS_PAGE_TIMEOUT = 0X04, /* Page Timeout */
+ HCI_STATUS_AUTH_FAIL = 0X05, /* Authentication Failure */
+ HCI_STATUS_PIN_OR_KEY_MISSING = 0X06, /* PIN or Key Missing */
+ HCI_STATUS_MEM_CAP_EXCEED = 0X07, /* Memory Capacity Exceeded */
+ HCI_STATUS_CONNECT_TIMEOUT = 0X08, /* Connection Timeout */
+ HCI_STATUS_CONNECT_LIMIT = 0X09, /* Connection Limit Exceeded */
+ HCI_STATUS_SYN_CONNECT_LIMIT = 0X0a, /* Synchronous Connection Limit To A Device Exceeded */
+ HCI_STATUS_ACL_CONNECT_EXISTS = 0X0b, /* ACL Connection Already Exists */
+ HCI_STATUS_CMD_DISALLOW = 0X0c, /* Command Disallowed */
+ HCI_STATUS_CONNECT_RJT_LIMIT_RESOURCE = 0X0d, /* Connection Rejected due to Limited Resources */
+ HCI_STATUS_CONNECT_RJT_SEC_REASON = 0X0e, /* Connection Rejected Due To Security Reasons */
+ HCI_STATUS_CONNECT_RJT_UNACCEPT_BD_ADDR = 0X0f, /* Connection Rejected due to Unacceptable BD_ADDR */
+ HCI_STATUS_CONNECT_ACCEPT_TIMEOUT = 0X10, /* Connection Accept Timeout Exceeded */
+ HCI_STATUS_UNSUPPORT_FEATURE_PARA_VALUE = 0X11, /* Unsupported Feature or Parameter Value */
+ HCI_STATUS_INVALID_HCI_CMD_PARA_VALUE = 0X12, /* Invalid HCI Command Parameters */
+ HCI_STATUS_REMOTE_USER_TERMINATE_CONNECT = 0X13, /* Remote User Terminated Connection */
+ HCI_STATUS_REMOTE_DEV_TERMINATE_LOW_RESOURCE = 0X14, /* Remote Device Terminated Connection due to Low Resources */
+ HCI_STATUS_REMOTE_DEV_TERMINATE_CONNECT_POWER_OFF = 0X15, /* Remote Device Terminated Connection due to Power Off */
+ HCI_STATUS_CONNECT_TERMINATE_LOCAL_HOST = 0X16, /* Connection Terminated By Local Host */
+ HCI_STATUS_REPEATE_ATTEMPT = 0X17, /* Repeated Attempts */
+ HCI_STATUS_PAIR_NOT_ALLOW = 0X18, /* Pairing Not Allowed */
+ HCI_STATUS_UNKNOW_LMP_PDU = 0X19, /* Unknown LMP PDU */
+ HCI_STATUS_UNSUPPORT_REMOTE_LMP_FEATURE = 0X1a, /* Unsupported Remote Feature / Unsupported LMP Feature */
+ HCI_STATUS_SOC_OFFSET_REJECT = 0X1b, /* SCO Offset Rejected */
+ HCI_STATUS_SOC_INTERVAL_REJECT = 0X1c, /* SCO Interval Rejected */
+ HCI_STATUS_SOC_AIR_MODE_REJECT = 0X1d,/* SCO Air Mode Rejected */
+ HCI_STATUS_INVALID_LMP_PARA = 0X1e, /* Invalid LMP Parameters */
+ HCI_STATUS_UNSPECIFIC_ERROR = 0X1f, /* Unspecified Error */
+ HCI_STATUS_UNSUPPORT_LMP_PARA_VALUE = 0X20, /* Unsupported LMP Parameter Value */
+ HCI_STATUS_ROLE_CHANGE_NOT_ALLOW = 0X21, /* Role Change Not Allowed */
+ HCI_STATUS_LMP_RESPONSE_TIMEOUT = 0X22, /* LMP Response Timeout */
+ HCI_STATUS_LMP_ERROR_TRANSACTION_COLLISION = 0X23, /* LMP Error Transaction Collision */
+ HCI_STATUS_LMP_PDU_NOT_ALLOW = 0X24, /* LMP PDU Not Allowed */
+ HCI_STATUS_ENCRYPTION_MODE_NOT_ALLOW = 0X25, /* Encryption Mode Not Acceptable */
+ HCI_STATUS_LINK_KEY_CAN_NOT_CHANGE = 0X26, /* Link Key Can Not be Changed */
+ HCI_STATUS_REQUEST_QOS_NOT_SUPPORT = 0X27, /* Requested QoS Not Supported */
+ HCI_STATUS_INSTANT_PASSED = 0X28, /* Instant Passed */
+ HCI_STATUS_PAIRING_UNIT_KEY_NOT_SUPPORT = 0X29, /* Pairing With Unit Key Not Supported */
+ HCI_STATUS_DIFFERENT_TRANSACTION_COLLISION = 0X2a, /* Different Transaction Collision */
+ HCI_STATUS_RESERVE_1 = 0X2b, /* Reserved */
+ HCI_STATUS_QOS_UNACCEPT_PARA = 0X2c, /* QoS Unacceptable Parameter */
+ HCI_STATUS_QOS_REJECT = 0X2d, /* QoS Rejected */
+ HCI_STATUS_CHNL_CLASSIFICATION_NOT_SUPPORT = 0X2e, /* Channel Classification Not Supported */
+ HCI_STATUS_INSUFFICIENT_SECURITY = 0X2f, /* Insufficient Security */
+ HCI_STATUS_PARA_OUT_OF_RANGE = 0x30, /* Parameter Out Of Mandatory Range */
+ HCI_STATUS_RESERVE_2 = 0X31, /* Reserved */
+ HCI_STATUS_ROLE_SWITCH_PENDING = 0X32, /* Role Switch Pending */
+ HCI_STATUS_RESERVE_3 = 0X33, /* Reserved */
+ HCI_STATUS_RESERVE_SOLT_VIOLATION = 0X34, /* Reserved Slot Violation */
+ HCI_STATUS_ROLE_SWITCH_FAIL = 0X35, /* Role Switch Failed */
+ HCI_STATUS_EXTEND_INQUIRY_RSP_TOO_LARGE = 0X36, /* Extended Inquiry Response Too Large */
+ HCI_STATUS_SEC_SIMPLE_PAIRING_NOT_SUPPORT = 0X37, /* Secure Simple Pairing Not Supported By Host. */
+ HCI_STATUS_HOST_BUSY_PAIRING = 0X38, /* Host Busy - Pairing */
+ HCI_STATUS_CONNECT_REJ_NOT_SUIT_CHNL_FOUND = 0X39, /* Connection Rejected due to No Suitable Channel Found */
+ HCI_STATUS_CONTROLLER_BUSY = 0X3a /* CONTROLLER BUSY */
+};
+
+/* */
+/* The following is for BT 3.0 + HS HCI COMMAND */
+/* */
+
+/* bit 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
+/* | OCF | OGF | */
+/* */
+
+/* OGF 0x01 */
+#define LINK_CONTROL_COMMANDS 0x01
+enum link_control_commands {
+ HCI_INQUIRY = 0x0001,
+ HCI_INQUIRY_CANCEL = 0x0002,
+ HCI_PERIODIC_INQUIRY_MODE = 0x0003,
+ HCI_EXIT_PERIODIC_INQUIRY_MODE = 0x0004,
+ HCI_CREATE_CONNECTION = 0x0005,
+ HCI_DISCONNECT = 0x0006,
+ HCI_CREATE_CONNECTION_CANCEL = 0x0008,
+ HCI_ACCEPT_CONNECTIONREQUEST = 0x0009,
+ HCI_REJECT_CONNECTION_REQUEST = 0x000a,
+ HCI_LINK_KEY_REQUEST_REPLY = 0x000b,
+ HCI_LINK_KEY_REQUEST_NEGATIVE_REPLY = 0x000c,
+ HCI_PIN_CODE_REQUEST_REPLY = 0x000d,
+ HCI_PIN_CODE_REQUEST_NEGATIVE_REPLY = 0x000e,
+ HCI_CHANGE_CONNECTION_PACKET_TYPE = 0x000f,
+ HCI_AUTHENTICATION_REQUESTED = 0x0011,
+ HCI_SET_CONNECTION_ENCRYPTION = 0x0013,
+ HCI_CHANGE_CONNECTION_LINK_KEY = 0x0015,
+ HCI_MASTER_LINK_KEY = 0x0017,
+ HCI_REMOTE_NAME_REQUEST = 0x0019,
+ HCI_REMOTE_NAME_REQUEST_CANCEL = 0x001a,
+ HCI_READ_REMOTE_SUPPORTED_FEATURES = 0x001b,
+ HCI_READ_REMOTE_EXTENDED_FEATURES = 0x001c,
+ HCI_READ_REMOTE_VERSION_INFORMATION = 0x001d,
+ HCI_READ_CLOCK_OFFSET = 0x001f,
+ HCI_READ_LMP_HANDLE = 0x0020,
+ HCI_SETUP_SYNCHRONOUS_CONNECTION = 0x0028,
+ HCI_ACCEPT_SYNCHRONOUS_CONNECTION_REQUEST = 0x0029,
+ HCI_REJECT_SYNCHRONOUS_CONNECTION_REQUEST = 0x002a,
+ HCI_IO_CAPABILITY_REQUEST_REPLY = 0x002b,
+ HCI_USER_CONFIRMATION_REQUEST_REPLY = 0x002c,
+ HCI_USER_CONFIRMATION_REQUEST_NEGATIVE_REPLY = 0x002d,
+ HCI_USER_PASSKEY_REQUEST_REPLY = 0x002e,
+ HCI_USER_PASSKEY_REQUESTNEGATIVE_REPLY = 0x002f,
+ HCI_REMOTE_OOB_DATA_REQUEST_REPLY = 0x0030,
+ HCI_REMOTE_OOB_DATA_REQUEST_NEGATIVE_REPLY = 0x0033,
+ HCI_IO_CAPABILITY_REQUEST_NEGATIVE_REPLY = 0x0034,
+ HCI_CREATE_PHYSICAL_LINK = 0x0035,
+ HCI_ACCEPT_PHYSICAL_LINK = 0x0036,
+ HCI_DISCONNECT_PHYSICAL_LINK = 0x0037,
+ HCI_CREATE_LOGICAL_LINK = 0x0038,
+ HCI_ACCEPT_LOGICAL_LINK = 0x0039,
+ HCI_DISCONNECT_LOGICAL_LINK = 0x003a,
+ HCI_LOGICAL_LINK_CANCEL = 0x003b,
+ HCI_FLOW_SPEC_MODIFY = 0x003c
+};
+
+/* OGF 0x02 */
+#define HOLD_MODE_COMMAND 0x02
+enum hold_mode_command {
+ HCI_HOLD_MODE = 0x0001,
+ HCI_SNIFF_MODE = 0x0002,
+ HCI_EXIT_SNIFF_MODE = 0x0003,
+ HCI_PARK_STATE = 0x0005,
+ HCI_EXIT_PARK_STATE = 0x0006,
+ HCI_QOS_SETUP = 0x0007,
+ HCI_ROLE_DISCOVERY = 0x0009,
+ HCI_SWITCH_ROLE = 0x000b,
+ HCI_READ_LINK_POLICY_SETTINGS = 0x000c,
+ HCI_WRITE_LINK_POLICY_SETTINGS = 0x000d,
+ HCI_READ_DEFAULT_LINK_POLICY_SETTINGS = 0x000e,
+ HCI_WRITE_DEFAULT_LINK_POLICY_SETTINGS = 0x000f,
+ HCI_FLOW_SPECIFICATION = 0x0010,
+ HCI_SNIFF_SUBRATING = 0x0011
+};
+
+/* OGF 0x03 */
+#define OGF_SET_EVENT_MASK_COMMAND 0x03
+enum set_event_mask_command {
+ HCI_SET_EVENT_MASK = 0x0001,
+ HCI_RESET = 0x0003,
+ HCI_SET_EVENT_FILTER = 0x0005,
+ HCI_FLUSH = 0x0008,
+ HCI_READ_PIN_TYPE = 0x0009,
+ HCI_WRITE_PIN_TYPE = 0x000a,
+ HCI_CREATE_NEW_UNIT_KEY = 0x000b,
+ HCI_READ_STORED_LINK_KEY = 0x000d,
+ HCI_WRITE_STORED_LINK_KEY = 0x0011,
+ HCI_DELETE_STORED_LINK_KEY = 0x0012,
+ HCI_WRITE_LOCAL_NAME = 0x0013,
+ HCI_READ_LOCAL_NAME = 0x0014,
+ HCI_READ_CONNECTION_ACCEPT_TIMEOUT = 0x0015,
+ HCI_WRITE_CONNECTION_ACCEPT_TIMEOUT = 0x0016,
+ HCI_READ_PAGE_TIMEOUT = 0x0017,
+ HCI_WRITE_PAGE_TIMEOUT = 0x0018,
+ HCI_READ_SCAN_ENABLE = 0x0019,
+ HCI_WRITE_SCAN_ENABLE = 0x001a,
+ HCI_READ_PAGE_SCAN_ACTIVITY = 0x001b,
+ HCI_WRITE_PAGE_SCAN_ACTIVITY = 0x001c,
+ HCI_READ_INQUIRY_SCAN_ACTIVITY = 0x001d,
+ HCI_WRITE_INQUIRY_SCAN_ACTIVITY = 0x001e,
+ HCI_READ_AUTHENTICATION_ENABLE = 0x001f,
+ HCI_WRITE_AUTHENTICATION_ENABLE = 0x0020,
+ HCI_READ_CLASS_OF_DEVICE = 0x0023,
+ HCI_WRITE_CLASS_OF_DEVICE = 0x0024,
+ HCI_READ_VOICE_SETTING = 0x0025,
+ HCI_WRITE_VOICE_SETTING = 0x0026,
+ HCI_READ_AUTOMATIC_FLUSH_TIMEOUT = 0x0027,
+ HCI_WRITE_AUTOMATIC_FLUSH_TIMEOUT = 0x0028,
+ HCI_READ_NUM_BROADCAST_RETRANSMISSIONS = 0x0029,
+ HCI_WRITE_NUM_BROADCAST_RETRANSMISSIONS = 0x002a,
+ HCI_READ_HOLD_MODE_ACTIVITY = 0x002b,
+ HCI_WRITE_HOLD_MODE_ACTIVITY = 0x002c,
+ HCI_READ_SYNCHRONOUS_FLOW_CONTROL_ENABLE = 0x002e,
+ HCI_WRITE_SYNCHRONOUS_FLOW_CONTROL_ENABLE = 0x002f,
+ HCI_SET_CONTROLLER_TO_HOST_FLOW_CONTROL = 0x0031,
+ HCI_HOST_BUFFER_SIZE = 0x0033,
+ HCI_HOST_NUMBER_OF_COMPLETED_PACKETS = 0x0035,
+ HCI_READ_LINK_SUPERVISION_TIMEOUT = 0x0036,
+ HCI_WRITE_LINK_SUPERVISION_TIMEOUT = 0x0037,
+ HCI_READ_NUMBER_OF_SUPPORTED_IAC = 0x0038,
+ HCI_READ_CURRENT_IAC_LAP = 0x0039,
+ HCI_WRITE_CURRENT_IAC_LAP = 0x003a,
+ HCI_READ_PAGE_SCAN_MODE = 0x003d,
+ HCI_WRITE_PAGE_SCAN_MODE = 0x003e,
+ HCI_SET_AFH_HOST_CHANNEL_CLASSIFICATION = 0x003f,
+ HCI_READ_INQUIRY_SCAN_TYPE = 0x0042,
+ HCI_WRITE_INQUIRY_SCAN_TYPE = 0x0043,
+ HCI_READ_INQUIRY_MODE = 0x0044,
+ HCI_WRITE_INQUIRY_MODE = 0x0045,
+ HCI_READ_PAGE_SCAN_TYPE = 0x0046,
+ HCI_WRITE_PAGE_SCAN_TYPE = 0x0047,
+ HCI_READ_AFH_CHANNEL_ASSESSMENT_MODE = 0x0048,
+ HCI_WRITE_AFH_CHANNEL_ASSESSMENT_MODE = 0x0049,
+ HCI_READ_EXTENDED_INQUIRY_RESPONSE = 0x0051,
+ HCI_WRITE_EXTENDED_INQUIRY_RESPONSE = 0x0052,
+ HCI_REFRESH_ENCRYPTION_KEY = 0x0053,
+ HCI_READ_SIMPLE_PAIRING_MODE = 0x0055,
+ HCI_WRITE_SIMPLE_PAIRING_MODE = 0x0056,
+ HCI_READ_LOCAL_OOB_DATA = 0x0057,
+ HCI_READ_INQUIRY_RESPONSE_TRANSMIT_POWER_LEVEL = 0x0058,
+ HCI_WRITE_INQUIRY_TRANSMIT_POWER_LEVEL = 0x0059,
+ HCI_READ_DEFAULT_ERRONEOUS_DATA_REPORTING = 0x005a,
+ HCI_WRITE_DEFAULT_ERRONEOUS_DATA_REPORTING = 0x005b,
+ HCI_ENHANCED_FLUSH = 0x005f,
+ HCI_SEND_KEYPRESS_NOTIFICATION = 0x0060,
+ HCI_READ_LOGICAL_LINK_ACCEPT_TIMEOUT = 0x0061,
+ HCI_WRITE_LOGICAL_LINK_ACCEPT_TIMEOUT = 0x0062,
+ HCI_SET_EVENT_MASK_PAGE_2 = 0x0063,
+ HCI_READ_LOCATION_DATA = 0x0064,
+ HCI_WRITE_LOCATION_DATA = 0x0065,
+ HCI_READ_FLOW_CONTROL_MODE = 0x0066,
+ HCI_WRITE_FLOW_CONTROL_MODE = 0x0067,
+ HCI_READ_ENHANCE_TRANSMIT_POWER_LEVEL = 0x0068,
+ HCI_READ_BEST_EFFORT_FLUSH_TIMEOUT = 0x0069,
+ HCI_WRITE_BEST_EFFORT_FLUSH_TIMEOUT = 0x006a,
+ HCI_SHORT_RANGE_MODE = 0x006b
+};
+
+/* OGF 0x04 */
+#define OGF_INFORMATIONAL_PARAMETERS 0x04
+enum informational_params {
+ HCI_READ_LOCAL_VERSION_INFORMATION = 0x0001,
+ HCI_READ_LOCAL_SUPPORTED_COMMANDS = 0x0002,
+ HCI_READ_LOCAL_SUPPORTED_FEATURES = 0x0003,
+ HCI_READ_LOCAL_EXTENDED_FEATURES = 0x0004,
+ HCI_READ_BUFFER_SIZE = 0x0005,
+ HCI_READ_BD_ADDR = 0x0009,
+ HCI_READ_DATA_BLOCK_SIZE = 0x000a
+};
+
+/* OGF 0x05 */
+#define OGF_STATUS_PARAMETERS 0x05
+enum status_params {
+ HCI_READ_FAILED_CONTACT_COUNTER = 0x0001,
+ HCI_RESET_FAILED_CONTACT_COUNTER = 0x0002,
+ HCI_READ_LINK_QUALITY = 0x0003,
+ HCI_READ_RSSI = 0x0005,
+ HCI_READ_AFH_CHANNEL_MAP = 0x0006,
+ HCI_READ_CLOCK = 0x0007,
+ HCI_READ_ENCRYPTION_KEY_SIZE = 0x0008,
+ HCI_READ_LOCAL_AMP_INFO = 0x0009,
+ HCI_READ_LOCAL_AMP_ASSOC = 0x000a,
+ HCI_WRITE_REMOTE_AMP_ASSOC = 0x000b
+};
+
+/* OGF 0x06 */
+#define OGF_TESTING_COMMANDS 0x06
+enum testing_commands {
+ HCI_READ_LOOPBACK_MODE = 0x0001,
+ HCI_WRITE_LOOPBACK_MODE = 0x0002,
+ HCI_ENABLE_DEVICE_UNDER_TEST_MODE = 0x0003,
+ HCI_WRITE_SIMPLE_PAIRING_DEBUG_MODE = 0x0004,
+ HCI_ENABLE_AMP_RECEIVER_REPORTS = 0x0007,
+ HCI_AMP_TEST_END = 0x0008,
+ HCI_AMP_TEST_COMMAND = 0x0009
+};
+
+/* OGF 0x3f */
+#define OGF_EXTENSION 0X3f
+enum hci_extension_commands {
+ HCI_SET_ACL_LINK_DATA_FLOW_MODE = 0x0010,
+ HCI_SET_ACL_LINK_STATUS = 0x0020,
+ HCI_SET_SCO_LINK_STATUS = 0x0030,
+ HCI_SET_RSSI_VALUE = 0x0040,
+ HCI_SET_CURRENT_BLUETOOTH_STATUS = 0x0041,
+
+ /* The following is for RTK8723 */
+ HCI_EXTENSION_VERSION_NOTIFY = 0x0100,
+ HCI_LINK_STATUS_NOTIFY = 0x0101,
+ HCI_BT_OPERATION_NOTIFY = 0x0102,
+ HCI_ENABLE_WIFI_SCAN_NOTIFY = 0x0103,
+
+
+ /* The following is for IVT */
+ HCI_WIFI_CURRENT_CHANNEL = 0x0300,
+ HCI_WIFI_CURRENT_BANDWIDTH = 0x0301,
+ HCI_WIFI_CONNECTION_STATUS = 0x0302,
+};
+
+enum bt_spec {
+ BT_SPEC_1_0_b = 0x00,
+ BT_SPEC_1_1 = 0x01,
+ BT_SPEC_1_2 = 0x02,
+ BT_SPEC_2_0_EDR = 0x03,
+ BT_SPEC_2_1_EDR = 0x04,
+ BT_SPEC_3_0_HS = 0x05,
+ BT_SPEC_4_0 = 0x06
+};
+
+/* The following is for BT 3.0 + HS EVENTS */
+enum hci_event {
+ HCI_EVENT_INQUIRY_COMPLETE = 0x01,
+ HCI_EVENT_INQUIRY_RESULT = 0x02,
+ HCI_EVENT_CONNECTION_COMPLETE = 0x03,
+ HCI_EVENT_CONNECTION_REQUEST = 0x04,
+ HCI_EVENT_DISCONNECTION_COMPLETE = 0x05,
+ HCI_EVENT_AUTHENTICATION_COMPLETE = 0x06,
+ HCI_EVENT_REMOTE_NAME_REQUEST_COMPLETE = 0x07,
+ HCI_EVENT_ENCRYPTION_CHANGE = 0x08,
+ HCI_EVENT_CHANGE_LINK_KEY_COMPLETE = 0x09,
+ HCI_EVENT_MASTER_LINK_KEY_COMPLETE = 0x0a,
+ HCI_EVENT_READ_REMOTE_SUPPORT_FEATURES_COMPLETE = 0x0b,
+ HCI_EVENT_READ_REMOTE_VER_INFO_COMPLETE = 0x0c,
+ HCI_EVENT_QOS_SETUP_COMPLETE = 0x0d,
+ HCI_EVENT_COMMAND_COMPLETE = 0x0e,
+ HCI_EVENT_COMMAND_STATUS = 0x0f,
+ HCI_EVENT_HARDWARE_ERROR = 0x10,
+ HCI_EVENT_FLUSH_OCCRUED = 0x11,
+ HCI_EVENT_ROLE_CHANGE = 0x12,
+ HCI_EVENT_NUMBER_OF_COMPLETE_PACKETS = 0x13,
+ HCI_EVENT_MODE_CHANGE = 0x14,
+ HCI_EVENT_RETURN_LINK_KEYS = 0x15,
+ HCI_EVENT_PIN_CODE_REQUEST = 0x16,
+ HCI_EVENT_LINK_KEY_REQUEST = 0x17,
+ HCI_EVENT_LINK_KEY_NOTIFICATION = 0x18,
+ HCI_EVENT_LOOPBACK_COMMAND = 0x19,
+ HCI_EVENT_DATA_BUFFER_OVERFLOW = 0x1a,
+ HCI_EVENT_MAX_SLOTS_CHANGE = 0x1b,
+ HCI_EVENT_READ_CLOCK_OFFSET_COMPLETE = 0x1c,
+ HCI_EVENT_CONNECT_PACKET_TYPE_CHANGE = 0x1d,
+ HCI_EVENT_QOS_VIOLATION = 0x1e,
+ HCI_EVENT_PAGE_SCAN_REPETITION_MODE_CHANGE = 0x20,
+ HCI_EVENT_FLOW_SEPC_COMPLETE = 0x21,
+ HCI_EVENT_INQUIRY_RESULT_WITH_RSSI = 0x22,
+ HCI_EVENT_READ_REMOTE_EXT_FEATURES_COMPLETE = 0x23,
+ HCI_EVENT_SYNC_CONNECT_COMPLETE = 0x2c,
+ HCI_EVENT_SYNC_CONNECT_CHANGE = 0x2d,
+ HCI_EVENT_SNIFFER_SUBRATING = 0x2e,
+ HCI_EVENT_EXTENTED_INQUIRY_RESULT = 0x2f,
+ HCI_EVENT_ENCRYPTION_KEY_REFLASH_COMPLETE = 0x30,
+ HCI_EVENT_IO_CAPIBILITY_COMPLETE = 0x31,
+ HCI_EVENT_IO_CAPIBILITY_RESPONSE = 0x32,
+ HCI_EVENT_USER_CONFIRMTION_REQUEST = 0x33,
+ HCI_EVENT_USER_PASSKEY_REQUEST = 0x34,
+ HCI_EVENT_REMOTE_OOB_DATA_REQUEST = 0x35,
+ HCI_EVENT_SIMPLE_PAIRING_COMPLETE = 0x36,
+ HCI_EVENT_LINK_SUPERVISION_TIMEOUT_CHANGE = 0x38,
+ HCI_EVENT_ENHANCED_FLUSH_COMPLETE = 0x39,
+ HCI_EVENT_USER_PASSKEY_NOTIFICATION = 0x3b,
+ HCI_EVENT_KEYPRESS_NOTIFICATION = 0x3c,
+ HCI_EVENT_REMOTE_HOST_SUPPORT_FEATURES_NOTIFICATION = 0x3d,
+ HCI_EVENT_PHY_LINK_COMPLETE = 0x40,
+ HCI_EVENT_CHANNEL_SELECT = 0x41,
+ HCI_EVENT_DISCONNECT_PHY_LINK_COMPLETE = 0x42,
+ HCI_EVENT_PHY_LINK_LOSS_EARLY_WARNING = 0x43,
+ HCI_EVENT_PHY_LINK_RECOVER = 0x44,
+ HCI_EVENT_LOGICAL_LINK_COMPLETE = 0x45,
+ HCI_EVENT_DISCONNECT_LOGICAL_LINK_COMPLETE = 0x46,
+ HCI_EVENT_FLOW_SPEC_MODIFY_COMPLETE = 0x47,
+ HCI_EVENT_NUM_OF_COMPLETE_DATA_BLOCKS = 0x48,
+ HCI_EVENT_AMP_START_TEST = 0x49,
+ HCI_EVENT_AMP_TEST_END = 0x4a,
+ HCI_EVENT_AMP_RECEIVER_REPORT = 0x4b,
+ HCI_EVENT_SHORT_RANGE_MODE_CHANGE_COMPLETE = 0x4c,
+ HCI_EVENT_AMP_STATUS_CHANGE = 0x4d,
+ HCI_EVENT_EXTENSION_RTK = 0xfe,
+ HCI_EVENT_EXTENSION_MOTO = 0xff,
+};
+
+enum hci_extension_event_moto {
+ HCI_EVENT_GET_BT_RSSI = 0x01,
+};
+
+enum hci_extension_event {
+ HCI_EVENT_EXT_WIFI_SCAN_NOTIFY = 0x01,
+};
+
+enum hci_event_mask_page_2 {
+ EMP2_HCI_EVENT_PHY_LINK_COMPLETE = 0x0000000000000001,
+ EMP2_HCI_EVENT_CHANNEL_SELECT = 0x0000000000000002,
+ EMP2_HCI_EVENT_DISCONNECT_PHY_LINK_COMPLETE = 0x0000000000000004,
+ EMP2_HCI_EVENT_PHY_LINK_LOSS_EARLY_WARNING = 0x0000000000000008,
+ EMP2_HCI_EVENT_PHY_LINK_RECOVER = 0x0000000000000010,
+ EMP2_HCI_EVENT_LOGICAL_LINK_COMPLETE = 0x0000000000000020,
+ EMP2_HCI_EVENT_DISCONNECT_LOGICAL_LINK_COMPLETE = 0x0000000000000040,
+ EMP2_HCI_EVENT_FLOW_SPEC_MODIFY_COMPLETE = 0x0000000000000080,
+ EMP2_HCI_EVENT_NUM_OF_COMPLETE_DATA_BLOCKS = 0x0000000000000100,
+ EMP2_HCI_EVENT_AMP_START_TEST = 0x0000000000000200,
+ EMP2_HCI_EVENT_AMP_TEST_END = 0x0000000000000400,
+ EMP2_HCI_EVENT_AMP_RECEIVER_REPORT = 0x0000000000000800,
+ EMP2_HCI_EVENT_SHORT_RANGE_MODE_CHANGE_COMPLETE = 0x0000000000001000,
+ EMP2_HCI_EVENT_AMP_STATUS_CHANGE = 0x0000000000002000,
+};
+
+enum hci_state_machine {
+ HCI_STATE_STARTING = 0x01,
+ HCI_STATE_CONNECTING = 0x02,
+ HCI_STATE_AUTHENTICATING = 0x04,
+ HCI_STATE_CONNECTED = 0x08,
+ HCI_STATE_DISCONNECTING = 0x10,
+ HCI_STATE_DISCONNECTED = 0x20
+};
+
+enum amp_assoc_structure_type {
+ AMP_MAC_ADDR = 0x01,
+ AMP_PREFERRED_CHANNEL_LIST = 0x02,
+ AMP_CONNECTED_CHANNEL = 0x03,
+ AMP_80211_PAL_CAP_LIST = 0x04,
+ AMP_80211_PAL_VISION = 0x05,
+ AMP_RESERVED_FOR_TESTING = 0x33
+};
+
+enum amp_btap_type {
+ AMP_BTAP_NONE,
+ AMP_BTAP_CREATOR,
+ AMP_BTAP_JOINER
+};
+
+enum hci_state_with_cmd {
+ STATE_CMD_CREATE_PHY_LINK,
+ STATE_CMD_ACCEPT_PHY_LINK,
+ STATE_CMD_DISCONNECT_PHY_LINK,
+ STATE_CMD_CONNECT_ACCEPT_TIMEOUT,
+ STATE_CMD_MAC_START_COMPLETE,
+ STATE_CMD_MAC_START_FAILED,
+ STATE_CMD_MAC_CONNECT_COMPLETE,
+ STATE_CMD_MAC_CONNECT_FAILED,
+ STATE_CMD_MAC_DISCONNECT_INDICATE,
+ STATE_CMD_MAC_CONNECT_CANCEL_INDICATE,
+ STATE_CMD_4WAY_FAILED,
+ STATE_CMD_4WAY_SUCCESSED,
+ STATE_CMD_ENTER_STATE,
+ STATE_CMD_NO_SUCH_CMD,
+};
+
+enum hci_service_type {
+ SERVICE_NO_TRAFFIC,
+ SERVICE_BEST_EFFORT,
+ SERVICE_GUARANTEE
+};
+
+enum hci_traffic_mode {
+ TRAFFIC_MODE_BEST_EFFORT = 0x00,
+ TRAFFIC_MODE_GUARANTEED_LATENCY = 0x01,
+ TRAFFIC_MODE_GUARANTEED_BANDWIDTH = 0x02,
+ TRAFFIC_MODE_GUARANTEED_LATENCY_AND_BANDWIDTH = 0x03
+};
+
+#define HCIOPCODE(_OCF, _OGF) (_OGF<<10|_OCF)
+#define HCIOPCODELOW(_OCF, _OGF) (u8)(HCIOPCODE(_OCF, _OGF)&0x00ff)
+#define HCIOPCODEHIGHT(_OCF, _OGF) (u8)(HCIOPCODE(_OCF, _OGF)>>8)
+
+#define TWOBYTE_HIGHTBYTE(_DATA) (u8)(_DATA>>8)
+#define TWOBYTE_LOWBYTE(_DATA) (u8)(_DATA)
+
+enum amp_status {
+ AMP_STATUS_AVA_PHY_PWR_DWN = 0x0,
+ AMP_STATUS_BT_USE_ONLY = 0x1,
+ AMP_STATUS_NO_CAPACITY_FOR_BT = 0x2,
+ AMP_STATUS_LOW_CAPACITY_FOR_BT = 0x3,
+ AMP_STATUS_MEDIUM_CAPACITY_FOR_BT = 0x4,
+ AMP_STATUS_HIGH_CAPACITY_FOR_BT = 0x5,
+ AMP_STATUS_FULL_CAPACITY_FOR_BT = 0x6
+};
+
+enum bt_wpa_msg_type {
+ Type_BT_4way1st = 0,
+ Type_BT_4way2nd = 1,
+ Type_BT_4way3rd = 2,
+ Type_BT_4way4th = 3,
+ Type_BT_unknow = 4
+};
+
+enum bt_connect_type {
+ BT_CONNECT_AUTH_REQ = 0x00,
+ BT_CONNECT_AUTH_RSP = 0x01,
+ BT_CONNECT_ASOC_REQ = 0x02,
+ BT_CONNECT_ASOC_RSP = 0x03,
+ BT_DISCONNECT = 0x04
+};
+
+enum bt_ll_service_type {
+ BT_LL_BE = 0x01,
+ BT_LL_GU = 0x02
+};
+
+enum bt_ll_flowspec {
+ BT_TX_BE_FS, /* TX best effort flowspec */
+ BT_RX_BE_FS, /* RX best effort flowspec */
+ BT_TX_GU_FS, /* TX guaranteed latency flowspec */
+ BT_RX_GU_FS, /* RX guaranteed latency flowspec */
+ BT_TX_BE_AGG_FS, /* TX aggregated best effort flowspec */
+ BT_RX_BE_AGG_FS, /* RX aggregated best effort flowspec */
+ BT_TX_GU_BW_FS, /* TX guaranteed bandwidth flowspec */
+ BT_RX_GU_BW_FS, /* RX guaranteed bandwidth flowspec */
+ BT_TX_GU_LARGE_FS, /* TX guaranteed latency flowspec, for testing only */
+ BT_RX_GU_LARGE_FS, /* RX guaranteed latency flowspec, for testing only */
+};
+
+enum bt_traffic_mode {
+ BT_MOTOR_EXT_BE = 0x00, /* Best Effort. Default. for HCRP, PAN, SDP, RFCOMM-based profiles like FTP, OPP, SPP, DUN, etc. */
+ BT_MOTOR_EXT_GUL = 0x01, /* Guaranteed Latency. This type of traffic is used e.g. for HID and AVRCP. */
+ BT_MOTOR_EXT_GUB = 0X02, /* Guaranteed Bandwidth. */
+ BT_MOTOR_EXT_GULB = 0X03 /* Guaranteed Latency and Bandwidth. for A2DP and VDP. */
+};
+
+enum bt_traffic_mode_profile {
+ BT_PROFILE_NONE,
+ BT_PROFILE_A2DP,
+ BT_PROFILE_PAN,
+ BT_PROFILE_HID,
+ BT_PROFILE_SCO
+};
+
+enum bt_link_role {
+ BT_LINK_MASTER = 0,
+ BT_LINK_SLAVE = 1
+};
+
+enum bt_state_wpa_auth {
+ STATE_WPA_AUTH_UNINITIALIZED,
+ STATE_WPA_AUTH_WAIT_PACKET_1, /* Join */
+ STATE_WPA_AUTH_WAIT_PACKET_2, /* Creat */
+ STATE_WPA_AUTH_WAIT_PACKET_3,
+ STATE_WPA_AUTH_WAIT_PACKET_4,
+ STATE_WPA_AUTH_SUCCESSED
+};
+
+#define BT_WPA_AUTH_TIMEOUT_PERIOD 1000
+#define BTMaxWPAAuthReTransmitCoun 5
+
+#define MAX_AMP_ASSOC_FRAG_LEN 248
+#define TOTAL_ALLOCIATE_ASSOC_LEN 1000
+
+struct hci_flow_spec {
+ u8 Identifier;
+ u8 ServiceType;
+ u16 MaximumSDUSize;
+ u32 SDUInterArrivalTime;
+ u32 AccessLatency;
+ u32 FlushTimeout;
+};
+
+struct hci_log_link_cmd_data {
+ u8 BtPhyLinkhandle;
+ u16 BtLogLinkhandle;
+ u8 BtTxFlowSpecID;
+ struct hci_flow_spec Tx_Flow_Spec;
+ struct hci_flow_spec Rx_Flow_Spec;
+ u32 TxPacketCount;
+ u32 BestEffortFlushTimeout;
+
+ u8 bLLCompleteEventIsSet;
+
+ u8 bLLCancelCMDIsSetandComplete;
+};
+
+struct hci_phy_link_cmd_data {
+ /* Physical_Link_Handle */
+ u8 BtPhyLinkhandle;
+
+ u16 LinkSuperversionTimeout;
+
+ /* u16 SuperTimeOutCnt; */
+
+ /* Dedicated_AMP_Key_Length */
+ u8 BtAMPKeyLen;
+ /* Dedicated_AMP_Key_Type */
+ u8 BtAMPKeyType;
+ /* Dedicated_AMP_Key */
+ u8 BtAMPKey[PMK_LEN];
+};
+
+struct amp_assoc_structure {
+ /* TYPE ID */
+ u8 TypeID;
+ /* Length */
+ u16 Length;
+ /* Value */
+ u8 Data[1];
+};
+
+struct amp_pref_chnl_regulatory {
+ u8 reXId;
+ u8 regulatoryClass;
+ u8 coverageClass;
+};
+
+struct amp_assoc_cmd_data {
+ /* Physical_Link_Handle */
+ u8 BtPhyLinkhandle;
+ /* Length_So_Far */
+ u16 LenSoFar;
+
+ u16 MaxRemoteASSOCLen;
+ /* AMP_ASSOC_Remaining_Length */
+ u16 AMPAssocRemLen;
+ /* AMP_ASSOC_fragment */
+ void *AMPAssocfragment;
+};
+
+struct hci_link_info {
+ u16 ConnectHandle;
+ u8 IncomingTrafficMode;
+ u8 OutgoingTrafficMode;
+ u8 BTProfile;
+ u8 BTCoreSpec;
+ s8 BT_RSSI;
+ u8 TrafficProfile;
+ u8 linkRole;
+};
+
+struct hci_ext_config {
+ struct hci_link_info linkInfo[MAX_BT_ASOC_ENTRY_NUM];
+ u8 btOperationCode;
+ u16 CurrentConnectHandle;
+ u8 CurrentIncomingTrafficMode;
+ u8 CurrentOutgoingTrafficMode;
+ s8 MIN_BT_RSSI;
+ u8 NumberOfHandle;
+ u8 NumberOfSCO;
+ u8 CurrentBTStatus;
+ u16 HCIExtensionVer;
+
+ /* Bt coexist related */
+ u8 btProfileCase;
+ u8 btProfileAction;
+ u8 bManualControl;
+ u8 bBTBusy;
+ u8 bBTA2DPBusy;
+ u8 bEnableWifiScanNotify;
+
+ u8 bHoldForBtOperation;
+ u32 bHoldPeriodCnt;
+};
+
+struct hci_acl_packet_data {
+ u16 ACLDataPacketLen;
+ u8 SyncDataPacketLen;
+ u16 TotalNumACLDataPackets;
+ u16 TotalSyncNumDataPackets;
+};
+
+struct hci_phy_link_bss_info {
+ u16 bdCap; /* capability information */
+};
+
+struct packet_irp_hcicmd_data {
+ u16 OCF:10;
+ u16 OGF:6;
+ u8 Length;
+ u8 Data[20];
+};
+
+struct bt_asoc_entry {
+ u8 bUsed;
+ u8 mAssoc;
+ u8 b4waySuccess;
+ u8 Bssid[6];
+ struct hci_phy_link_cmd_data PhyLinkCmdData;
+
+ struct hci_log_link_cmd_data LogLinkCmdData[MAX_LOGICAL_LINK_NUM];
+
+ struct hci_acl_packet_data ACLPacketsData;
+
+ struct amp_assoc_cmd_data AmpAsocCmdData;
+ struct octet_string BTSsid;
+ u8 BTSsidBuf[33];
+
+ enum hci_status PhyLinkDisconnectReason;
+
+ u8 bSendSupervisionPacket;
+ /* u8 CurrentSuervisionPacketSendNum; */
+ /* u8 LastSuervisionPacketSendNum; */
+ u32 NoRxPktCnt;
+ /* Is Creator or Joiner */
+ enum amp_btap_type AMPRole;
+
+ /* BT current state */
+ u8 BtCurrentState;
+ /* BT next state */
+ u8 BtNextState;
+
+ u8 bNeedPhysLinkCompleteEvent;
+
+ enum hci_status PhysLinkCompleteStatus;
+
+ u8 BTRemoteMACAddr[6];
+
+ u32 BTCapability;
+
+ u8 SyncDataPacketLen;
+
+ u16 TotalSyncNumDataPackets;
+ u16 TotalNumACLDataPackets;
+
+ u8 ShortRangeMode;
+
+ u8 PTK[PTK_LEN_TKIP];
+ u8 GTK[GTK_LEN];
+ u8 ANonce[KEY_NONCE_LEN];
+ u8 SNonce[KEY_NONCE_LEN];
+ u64 KeyReplayCounter;
+ u8 WPAAuthReplayCount;
+ u8 AESKeyBuf[AESCCMP_BLK_SIZE_TOTAL];
+ u8 PMK[PMK_LEN];
+ enum bt_state_wpa_auth BTWPAAuthState;
+ s32 UndecoratedSmoothedPWDB;
+
+ /* Add for HW security !! */
+ u8 HwCAMIndex; /* Cam index */
+ u8 bPeerQosSta;
+
+ u32 rxSuvpPktCnt;
+};
+
+struct bt_traffic_statistics {
+ u8 bTxBusyTraffic;
+ u8 bRxBusyTraffic;
+ u8 bIdle;
+ u32 TxPktCntInPeriod;
+ u32 RxPktCntInPeriod;
+ u64 TxPktLenInPeriod;
+ u64 RxPktLenInPeriod;
+};
+
+struct bt_mgnt {
+ u8 bBTConnectInProgress;
+ u8 bLogLinkInProgress;
+ u8 bPhyLinkInProgress;
+ u8 bPhyLinkInProgressStartLL;
+ u8 BtCurrentPhyLinkhandle;
+ u16 BtCurrentLogLinkhandle;
+ u8 CurrentConnectEntryNum;
+ u8 DisconnectEntryNum;
+ u8 CurrentBTConnectionCnt;
+ enum bt_connect_type BTCurrentConnectType;
+ enum bt_connect_type BTReceiveConnectPkt;
+ u8 BTAuthCount;
+ u8 BTAsocCount;
+ u8 bStartSendSupervisionPkt;
+ u8 BtOperationOn;
+ u8 BTNeedAMPStatusChg;
+ u8 JoinerNeedSendAuth;
+ struct hci_phy_link_bss_info bssDesc;
+ struct hci_ext_config ExtConfig;
+ u8 bNeedNotifyAMPNoCap;
+ u8 bCreateSpportQos;
+ u8 bSupportProfile;
+ u8 BTChannel;
+ u8 CheckChnlIsSuit;
+ u8 bBtScan;
+ u8 btLogoTest;
+};
+
+struct bt_hci_dgb_info {
+ u32 hciCmdCnt;
+ u32 hciCmdCntUnknown;
+ u32 hciCmdCntCreatePhyLink;
+ u32 hciCmdCntAcceptPhyLink;
+ u32 hciCmdCntDisconnectPhyLink;
+ u32 hciCmdPhyLinkStatus;
+ u32 hciCmdCntCreateLogLink;
+ u32 hciCmdCntAcceptLogLink;
+ u32 hciCmdCntDisconnectLogLink;
+ u32 hciCmdCntReadLocalAmpAssoc;
+ u32 hciCmdCntWriteRemoteAmpAssoc;
+ u32 hciCmdCntSetAclLinkStatus;
+ u32 hciCmdCntSetScoLinkStatus;
+ u32 hciCmdCntExtensionVersionNotify;
+ u32 hciCmdCntLinkStatusNotify;
+};
+
+struct bt_irp_dgb_info {
+ u32 irpMJCreate;
+ /* Io Control */
+ u32 irpIoControl;
+ u32 irpIoCtrlHciCmd;
+ u32 irpIoCtrlHciEvent;
+ u32 irpIoCtrlHciTxData;
+ u32 irpIoCtrlHciRxData;
+ u32 irpIoCtrlUnknown;
+
+ u32 irpIoCtrlHciTxData1s;
+};
+
+struct bt_packet_dgb_info {
+ u32 btPktTxProbReq;
+ u32 btPktRxProbReq;
+ u32 btPktRxProbReqFail;
+ u32 btPktTxProbRsp;
+ u32 btPktRxProbRsp;
+ u32 btPktTxAuth;
+ u32 btPktRxAuth;
+ u32 btPktRxAuthButDrop;
+ u32 btPktTxAssocReq;
+ u32 btPktRxAssocReq;
+ u32 btPktRxAssocReqButDrop;
+ u32 btPktTxAssocRsp;
+ u32 btPktRxAssocRsp;
+ u32 btPktTxDisassoc;
+ u32 btPktRxDisassoc;
+ u32 btPktRxDeauth;
+ u32 btPktTx4way1st;
+ u32 btPktRx4way1st;
+ u32 btPktTx4way2nd;
+ u32 btPktRx4way2nd;
+ u32 btPktTx4way3rd;
+ u32 btPktRx4way3rd;
+ u32 btPktTx4way4th;
+ u32 btPktRx4way4th;
+ u32 btPktTxLinkSuperReq;
+ u32 btPktRxLinkSuperReq;
+ u32 btPktTxLinkSuperRsp;
+ u32 btPktRxLinkSuperRsp;
+ u32 btPktTxData;
+ u32 btPktRxData;
+};
+
+struct bt_dgb {
+ u8 dbgCtrl;
+ u32 dbgProfile;
+ struct bt_hci_dgb_info dbgHciInfo;
+ struct bt_irp_dgb_info dbgIrpInfo;
+ struct bt_packet_dgb_info dbgBtPkt;
+};
+
+struct bt_hci_info {
+ /* 802.11 Pal version specifier */
+ u8 BTPalVersion;
+ u16 BTPalCompanyID;
+ u16 BTPalsubversion;
+
+ /* Connected channel list */
+ u16 BTConnectChnlListLen;
+ u8 BTConnectChnllist[64];
+
+ /* Fail contact counter */
+ u16 FailContactCount;
+
+ /* Event mask */
+ u64 BTEventMask;
+ u64 BTEventMaskPage2;
+
+ /* timeout var */
+ u16 ConnAcceptTimeout;
+ u16 LogicalAcceptTimeout;
+ u16 PageTimeout;
+
+ u8 LocationDomainAware;
+ u16 LocationDomain;
+ u8 LocationDomainOptions;
+ u8 LocationOptions;
+
+ u8 FlowControlMode;
+
+ /* Preferred channel list */
+ u16 BtPreChnlListLen;
+ u8 BTPreChnllist[64];
+
+ u16 enFlush_LLH; /* enhanced flush handle */
+ u16 FLTO_LLH; /* enhanced flush handle */
+
+ /* */
+ /* Test command only. */
+ u8 bInTestMode;
+ u8 bTestIsEnd;
+ u8 bTestNeedReport;
+ u8 TestScenario;
+ u8 TestReportInterval;
+ u8 TestCtrType;
+ u32 TestEventType;
+ u16 TestNumOfFrame;
+ u16 TestNumOfErrFrame;
+ u16 TestNumOfBits;
+ u16 TestNumOfErrBits;
+ /* */
+};
+
+struct bt_traffic {
+ /* Add for check replay data */
+ u8 LastRxUniFragNum;
+ u16 LastRxUniSeqNum;
+
+ /* s32 EntryMaxUndecoratedSmoothedPWDB; */
+ /* s32 EntryMinUndecoratedSmoothedPWDB; */
+
+ struct bt_traffic_statistics Bt30TrafficStatistics;
+};
+
+#define RT_WORK_ITEM struct work_struct
+
+struct bt_security {
+ /* WPA auth state
+ * May need to remove to BTSecInfo ...
+ * enum bt_state_wpa_auth BTWPAAuthState;
+ */
+ struct octet_string RSNIE;
+ u8 RSNIEBuf[MAXRSNIELEN];
+ u8 bRegNoEncrypt;
+ u8 bUsedHwEncrypt;
+};
+
+struct bt_30info {
+ struct rtw_adapter *padapter;
+ struct bt_asoc_entry BtAsocEntry[MAX_BT_ASOC_ENTRY_NUM];
+ struct bt_mgnt BtMgnt;
+ struct bt_dgb BtDbg;
+ struct bt_hci_info BtHciInfo;
+ struct bt_traffic BtTraffic;
+ struct bt_security BtSec;
+ RT_WORK_ITEM HCICmdWorkItem;
+ struct timer_list BTHCICmdTimer;
+ RT_WORK_ITEM BTPsDisableWorkItem;
+ RT_WORK_ITEM BTConnectWorkItem;
+ struct timer_list BTHCIDiscardAclDataTimer;
+ struct timer_list BTHCIJoinTimeoutTimer;
+ struct timer_list BTTestSendPacketTimer;
+ struct timer_list BTDisconnectPhyLinkTimer;
+ struct timer_list BTBeaconTimer;
+ u8 BTBeaconTmrOn;
+
+ struct timer_list BTPsDisableTimer;
+
+ void * pBtChnlList;
+};
+
+struct packet_irp_acl_data {
+ u16 Handle:12;
+ u16 PB_Flag:2;
+ u16 BC_Flag:2;
+ u16 Length;
+ u8 Data[1];
+};
+
+struct packet_irp_hcievent_data {
+ u8 EventCode;
+ u8 Length;
+ u8 Data[20];
+};
+
+struct common_triple {
+ u8 byte_1st;
+ u8 byte_2nd;
+ u8 byte_3rd;
+};
+
+#define COUNTRY_STR_LEN 3 /* country string len = 3 */
+
+#define LOCAL_PMK 0
+
+enum hci_wifi_connect_status {
+ HCI_WIFI_NOT_CONNECTED = 0x0,
+ HCI_WIFI_CONNECTED = 0x1,
+ HCI_WIFI_CONNECT_IN_PROGRESS = 0x2,
+};
+
+enum hci_ext_bp_operation {
+ HCI_BT_OP_NONE = 0x0,
+ HCI_BT_OP_INQUIRY_START = 0x1,
+ HCI_BT_OP_INQUIRY_FINISH = 0x2,
+ HCI_BT_OP_PAGING_START = 0x3,
+ HCI_BT_OP_PAGING_SUCCESS = 0x4,
+ HCI_BT_OP_PAGING_UNSUCCESS = 0x5,
+ HCI_BT_OP_PAIRING_START = 0x6,
+ HCI_BT_OP_PAIRING_FINISH = 0x7,
+ HCI_BT_OP_BT_DEV_ENABLE = 0x8,
+ HCI_BT_OP_BT_DEV_DISABLE = 0x9,
+ HCI_BT_OP_MAX
+};
+
+/* Function proto type */
+struct btdata_entry {
+ struct list_head List;
+ void *pDataBlock;
+};
+
+#define BTHCI_SM_WITH_INFO(_Adapter, _StateToEnter, _StateCmd, _EntryNum) \
+{ \
+ RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state change] caused by ""%s"", line =%d\n", __FUNCTION__, __LINE__)); \
+ BTHCI_StateMachine(_Adapter, _StateToEnter, _StateCmd, _EntryNum);\
+}
+
+void BTHCI_EventParse(struct rtw_adapter * padapter, void *pEvntData, u32 dataLen);
+#define BT_EventParse BTHCI_EventParse
+u8 BTHCI_HsConnectionEstablished(struct rtw_adapter * padapter);
+void BTHCI_UpdateBTProfileRTKToMoto(struct rtw_adapter * padapter);
+void BTHCI_WifiScanNotify(struct rtw_adapter * padapter, u8 scanType);
+void BTHCI_StateMachine(struct rtw_adapter * padapter, u8 StateToEnter, enum hci_state_with_cmd StateCmd, u8 EntryNum);
+void BTHCI_DisconnectPeer(struct rtw_adapter * padapter, u8 EntryNum);
+void BTHCI_EventNumOfCompletedDataBlocks(struct rtw_adapter * padapter);
+void BTHCI_EventAMPStatusChange(struct rtw_adapter * padapter, u8 AMP_Status);
+void BTHCI_DisconnectAll(struct rtw_adapter * padapter);
+enum hci_status BTHCI_HandleHCICMD(struct rtw_adapter * padapter, struct packet_irp_hcicmd_data *pHciCmd);
+
+/* ===== End of sync from SD7 driver COMMON/bt_hci.h ===== */
+#endif /* __BT_HCI_C__ */
+
+#ifdef __HALBTC87231ANT_C__ /* HAL/BTCoexist/HalBtc87231Ant.h */
+/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc87231Ant.h ===== */
+#define GET_BT_INFO(padapter) (&GET_HAL_DATA(padapter)->BtInfo)
+
+#define BTC_FOR_SCAN_START 1
+#define BTC_FOR_SCAN_FINISH 0
+
+#define BT_TXRX_CNT_THRES_1 1200
+#define BT_TXRX_CNT_THRES_2 1400
+#define BT_TXRX_CNT_THRES_3 3000
+#define BT_TXRX_CNT_LEVEL_0 0 /* < 1200 */
+#define BT_TXRX_CNT_LEVEL_1 1 /* >= 1200 && < 1400 */
+#define BT_TXRX_CNT_LEVEL_2 2 /* >= 1400 */
+#define BT_TXRX_CNT_LEVEL_3 3 /* >= 3000 */
+
+enum bt_state_1ant {
+ BT_INFO_STATE_DISABLED = 0,
+ BT_INFO_STATE_NO_CONNECTION = 1,
+ BT_INFO_STATE_CONNECT_IDLE = 2,
+ BT_INFO_STATE_INQ_OR_PAG = 3,
+ BT_INFO_STATE_ACL_ONLY_BUSY = 4,
+ BT_INFO_STATE_SCO_ONLY_BUSY = 5,
+ BT_INFO_STATE_ACL_SCO_BUSY = 6,
+ BT_INFO_STATE_ACL_INQ_OR_PAG = 7,
+ BT_INFO_STATE_MAX = 8
+};
+
+struct btdm_8723a_1ant {
+ u8 prePsTdma;
+ u8 curPsTdma;
+ u8 psTdmaDuAdjType;
+ u8 bPrePsTdmaOn;
+ u8 bCurPsTdmaOn;
+ u8 preWifiPara;
+ u8 curWifiPara;
+ u8 preCoexWifiCon;
+ u8 curCoexWifiCon;
+ u8 wifiRssiThresh;
+
+ u32 psTdmaMonitorCnt;
+ u32 psTdmaGlobalCnt;
+
+ /* DurationAdjust For SCO */
+ u32 psTdmaMonitorCntForSCO;
+ u8 psTdmaDuAdjTypeForSCO;
+ u8 RSSI_WiFi_Last;
+ u8 RSSI_BT_Last;
+
+ u8 bWiFiHalt;
+ u8 bRAChanged;
+};
+
+void BTDM_1AntSignalCompensation(struct rtw_adapter * padapter, u8 *rssi_wifi, u8 *rssi_bt);
+void BTDM_1AntForDhcp(struct rtw_adapter * padapter);
+void BTDM_1AntBtCoexist8723A(struct rtw_adapter * padapter);
+
+/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc87231Ant.h ===== */
+#endif /* __HALBTC87231ANT_C__ */
+
+#ifdef __HALBTC87232ANT_C__ /* HAL/BTCoexist/HalBtc87232Ant.h */
+/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc87232Ant.h ===== */
+enum bt_2ant_bt_status {
+ BT_2ANT_BT_STATUS_IDLE = 0x0,
+ BT_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_2ANT_BT_STATUS_NON_IDLE = 0x2,
+ BT_2ANT_BT_STATUS_MAX
+};
+
+enum bt_2ant_coex_algo {
+ BT_2ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_2ANT_COEX_ALGO_SCO = 0x1,
+ BT_2ANT_COEX_ALGO_HID = 0x2,
+ BT_2ANT_COEX_ALGO_A2DP = 0x3,
+ BT_2ANT_COEX_ALGO_PANEDR = 0x4,
+ BT_2ANT_COEX_ALGO_PANHS = 0x5,
+ BT_2ANT_COEX_ALGO_PANEDR_A2DP = 0x6,
+ BT_2ANT_COEX_ALGO_PANEDR_HID = 0x7,
+ BT_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x8,
+ BT_2ANT_COEX_ALGO_HID_A2DP = 0x9,
+ BT_2ANT_COEX_ALGO_HID_A2DP_PANHS = 0xA,
+ BT_2ANT_COEX_ALGO_MAX = 0xB,
+};
+
+struct btdm_8723a_2ant {
+ u8 bPreDecBtPwr;
+ u8 bCurDecBtPwr;
+
+ u8 preWlanActHi;
+ u8 curWlanActHi;
+ u8 preWlanActLo;
+ u8 curWlanActLo;
+
+ u8 preFwDacSwingLvl;
+ u8 curFwDacSwingLvl;
+
+ u8 bPreRfRxLpfShrink;
+ u8 bCurRfRxLpfShrink;
+
+ u8 bPreLowPenaltyRa;
+ u8 bCurLowPenaltyRa;
+
+ u8 preBtRetryIndex;
+ u8 curBtRetryIndex;
+
+ u8 bPreDacSwingOn;
+ u32 preDacSwingLvl;
+ u8 bCurDacSwingOn;
+ u32 curDacSwingLvl;
+
+ u8 bPreAdcBackOff;
+ u8 bCurAdcBackOff;
+
+ u8 bPreAgcTableEn;
+ u8 bCurAgcTableEn;
+
+ u32 preVal0x6c0;
+ u32 curVal0x6c0;
+ u32 preVal0x6c8;
+ u32 curVal0x6c8;
+ u8 preVal0x6cc;
+ u8 curVal0x6cc;
+
+ u8 bCurIgnoreWlanAct;
+ u8 bPreIgnoreWlanAct;
+
+ u8 prePsTdma;
+ u8 curPsTdma;
+ u8 psTdmaDuAdjType;
+ u8 bPrePsTdmaOn;
+ u8 bCurPsTdmaOn;
+
+ u8 preAlgorithm;
+ u8 curAlgorithm;
+ u8 bResetTdmaAdjust;
+
+ u8 btStatus;
+};
+
+void BTDM_2AntBtCoexist8723A(struct rtw_adapter * padapter);
+/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc87232Ant.h ===== */
+#endif /* __HALBTC87232ANT_C__ */
+
+#ifdef __HALBTC8723_C__ /* HAL/BTCoexist/HalBtc8723.h */
+/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc8723.h ===== */
+
+#define BT_Q_PKT_OFF 0
+#define BT_Q_PKT_ON 1
+
+#define BT_TX_PWR_OFF 0
+#define BT_TX_PWR_ON 1
+
+/* TDMA mode definition */
+#define TDMA_2ANT 0
+#define TDMA_1ANT 1
+#define TDMA_NAV_OFF 0
+#define TDMA_NAV_ON 1
+#define TDMA_DAC_SWING_OFF 0
+#define TDMA_DAC_SWING_ON 1
+
+#define BT_RSSI_LEVEL_H 0
+#define BT_RSSI_LEVEL_M 1
+#define BT_RSSI_LEVEL_L 2
+
+/* PTA mode related definition */
+#define BT_PTA_MODE_OFF 0
+#define BT_PTA_MODE_ON 1
+
+/* Penalty Tx Rate Adaptive */
+#define BT_TX_RATE_ADAPTIVE_NORMAL 0
+#define BT_TX_RATE_ADAPTIVE_LOW_PENALTY 1
+
+/* RF Corner */
+#define BT_RF_RX_LPF_CORNER_RESUME 0
+#define BT_RF_RX_LPF_CORNER_SHRINK 1
+
+#define BT_INFO_ACL BIT(0)
+#define BT_INFO_SCO BIT(1)
+#define BT_INFO_INQ_PAG BIT(2)
+#define BT_INFO_ACL_BUSY BIT(3)
+#define BT_INFO_SCO_BUSY BIT(4)
+#define BT_INFO_HID BIT(5)
+#define BT_INFO_A2DP BIT(6)
+#define BT_INFO_FTP BIT(7)
+
+
+
+struct bt_coexist_8723a {
+ u32 highPriorityTx;
+ u32 highPriorityRx;
+ u32 lowPriorityTx;
+ u32 lowPriorityRx;
+ u8 btRssi;
+ u8 TotalAntNum;
+ u8 bC2hBtInfoSupport;
+ u8 c2hBtInfo;
+ u8 c2hBtInfoOriginal;
+ u8 prec2hBtInfo; /* for 1Ant */
+ u8 bC2hBtInquiryPage;
+ unsigned long btInqPageStartTime; /* for 2Ant */
+ u8 c2hBtProfile; /* for 1Ant */
+ u8 btRetryCnt;
+ u8 btInfoExt;
+ u8 bC2hBtInfoReqSent;
+ u8 bForceFwBtInfo;
+ u8 bForceA2dpSink;
+ struct btdm_8723a_2ant btdm2Ant;
+ struct btdm_8723a_1ant btdm1Ant;
+};
+
+void BTDM_SetFwChnlInfo(struct rtw_adapter * padapter, enum rt_media_status mstatus);
+u8 BTDM_IsWifiConnectionExist(struct rtw_adapter * padapter);
+void BTDM_SetFw3a(struct rtw_adapter * padapter, u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5);
+void BTDM_QueryBtInformation(struct rtw_adapter * padapter);
+void BTDM_SetSwRfRxLpfCorner(struct rtw_adapter * padapter, u8 type);
+void BTDM_SetSwPenaltyTxRateAdaptive(struct rtw_adapter * padapter, u8 raType);
+void BTDM_SetFwDecBtPwr(struct rtw_adapter * padapter, u8 bDecBtPwr);
+u8 BTDM_BtProfileSupport(struct rtw_adapter * padapter);
+void BTDM_LpsLeave(struct rtw_adapter * padapter);
+u8 BTDM_1Ant8723A(struct rtw_adapter * padapter);
+#define BT_1Ant BTDM_1Ant8723A
+
+/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc8723.h ===== */
+#endif /* __HALBTC8723_C__ */
+
+#ifdef __HALBTCCSR1ANT_C__ /* HAL/BTCoexist/HalBtcCsr1Ant.h */
+/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtcCsr1Ant.h ===== */
+
+enum BT_A2DP_INDEX{
+ BT_A2DP_INDEX0 = 0, /* 32, 12; the most critical for BT */
+ BT_A2DP_INDEX1, /* 12, 24 */
+ BT_A2DP_INDEX2, /* 0, 0 */
+ BT_A2DP_INDEX_MAX
+};
+
+#define BT_A2DP_STATE_NOT_ENTERED 0
+#define BT_A2DP_STATE_DETECTING 1
+#define BT_A2DP_STATE_DETECTED 2
+
+#define BTDM_ANT_BT_IDLE 0
+#define BTDM_ANT_WIFI 1
+#define BTDM_ANT_BT 2
+
+
+void BTDM_SingleAnt(struct rtw_adapter * padapter, u8 bSingleAntOn, u8 bInterruptOn, u8 bMultiNAVOn);
+void BTDM_CheckBTIdleChange1Ant(struct rtw_adapter * padapter);
+
+/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtcCsr1Ant.h ===== */
+#endif /* __HALBTCCSR1ANT_C__ */
+
+#ifdef __HALBTCCSR2ANT_C__ /* HAL/BTCoexist/HalBtcCsr2Ant.h */
+/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtcCsr2Ant.h ===== */
+
+/* */
+/* For old core stack before v251 */
+/* */
+#define BT_RSSI_STATE_NORMAL_POWER BIT0
+#define BT_RSSI_STATE_AMDPU_OFF BIT1
+#define BT_RSSI_STATE_SPECIAL_LOW BIT2
+#define BT_RSSI_STATE_BG_EDCA_LOW BIT3
+#define BT_RSSI_STATE_TXPOWER_LOW BIT4
+
+#define BT_DACSWING_OFF 0
+#define BT_DACSWING_M4 1
+#define BT_DACSWING_M7 2
+#define BT_DACSWING_M10 3
+
+void BTDM_DiminishWiFi(struct rtw_adapter * Adapter, u8 bDACOn, u8 bInterruptOn, u8 DACSwingLevel, u8 bNAVOn);
+
+/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtcCsr2Ant.h ===== */
+#endif /* __HALBTCCSR2ANT_C__ */
+
+#ifdef __HALBTCOEXIST_C__ /* HAL/BTCoexist/HalBtCoexist.h */
+
+/* HEADER/TypeDef.h */
+#define MAX_FW_SUPPORT_MACID_NUM 64
+
+/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtCoexist.h ===== */
+
+#define FW_VER_BT_REG 62
+#define FW_VER_BT_REG1 74
+#define REG_BT_ACTIVE 0x444
+#define REG_BT_STATE 0x448
+#define REG_BT_POLLING1 0x44c
+#define REG_BT_POLLING 0x700
+
+#define REG_BT_ACTIVE_OLD 0x488
+#define REG_BT_STATE_OLD 0x48c
+#define REG_BT_POLLING_OLD 0x490
+
+/* The reg define is for 8723 */
+#define REG_HIGH_PRIORITY_TXRX 0x770
+#define REG_LOW_PRIORITY_TXRX 0x774
+
+#define BT_FW_COEX_THRESH_TOL 6
+#define BT_FW_COEX_THRESH_20 20
+#define BT_FW_COEX_THRESH_23 23
+#define BT_FW_COEX_THRESH_25 25
+#define BT_FW_COEX_THRESH_30 30
+#define BT_FW_COEX_THRESH_35 35
+#define BT_FW_COEX_THRESH_40 40
+#define BT_FW_COEX_THRESH_45 45
+#define BT_FW_COEX_THRESH_47 47
+#define BT_FW_COEX_THRESH_50 50
+#define BT_FW_COEX_THRESH_55 55
+#define BT_FW_COEX_THRESH_65 65
+
+#define BT_COEX_STATE_BT30 BIT(0)
+#define BT_COEX_STATE_WIFI_HT20 BIT(1)
+#define BT_COEX_STATE_WIFI_HT40 BIT(2)
+#define BT_COEX_STATE_WIFI_LEGACY BIT(3)
+
+#define BT_COEX_STATE_WIFI_RSSI_LOW BIT(4)
+#define BT_COEX_STATE_WIFI_RSSI_MEDIUM BIT(5)
+#define BT_COEX_STATE_WIFI_RSSI_HIGH BIT(6)
+#define BT_COEX_STATE_DEC_BT_POWER BIT(7)
+
+#define BT_COEX_STATE_WIFI_IDLE BIT(8)
+#define BT_COEX_STATE_WIFI_UPLINK BIT(9)
+#define BT_COEX_STATE_WIFI_DOWNLINK BIT(10)
+
+#define BT_COEX_STATE_BT_INQ_PAGE BIT(11)
+#define BT_COEX_STATE_BT_IDLE BIT(12)
+#define BT_COEX_STATE_BT_UPLINK BIT(13)
+#define BT_COEX_STATE_BT_DOWNLINK BIT(14)
+/* */
+/* Todo: Remove these definitions */
+#define BT_COEX_STATE_BT_PAN_IDLE BIT(15)
+#define BT_COEX_STATE_BT_PAN_UPLINK BIT(16)
+#define BT_COEX_STATE_BT_PAN_DOWNLINK BIT(17)
+#define BT_COEX_STATE_BT_A2DP_IDLE BIT(18)
+/* */
+#define BT_COEX_STATE_BT_RSSI_LOW BIT(19)
+
+#define BT_COEX_STATE_PROFILE_HID BIT(20)
+#define BT_COEX_STATE_PROFILE_A2DP BIT(21)
+#define BT_COEX_STATE_PROFILE_PAN BIT(22)
+#define BT_COEX_STATE_PROFILE_SCO BIT(23)
+
+#define BT_COEX_STATE_WIFI_RSSI_1_LOW BIT(24)
+#define BT_COEX_STATE_WIFI_RSSI_1_MEDIUM BIT(25)
+#define BT_COEX_STATE_WIFI_RSSI_1_HIGH BIT(26)
+
+#define BT_COEX_STATE_WIFI_RSSI_BEACON_LOW BIT(27)
+#define BT_COEX_STATE_WIFI_RSSI_BEACON_MEDIUM BIT(28)
+#define BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH BIT(29)
+
+
+#define BT_COEX_STATE_BTINFO_COMMON BIT30
+#define BT_COEX_STATE_BTINFO_B_HID_SCOESCO BIT31
+#define BT_COEX_STATE_BTINFO_B_FTP_A2DP BIT32
+
+#define BT_COEX_STATE_BT_CNT_LEVEL_0 BIT33
+#define BT_COEX_STATE_BT_CNT_LEVEL_1 BIT34
+#define BT_COEX_STATE_BT_CNT_LEVEL_2 BIT35
+#define BT_COEX_STATE_BT_CNT_LEVEL_3 BIT36
+
+#define BT_RSSI_STATE_HIGH 0
+#define BT_RSSI_STATE_MEDIUM 1
+#define BT_RSSI_STATE_LOW 2
+#define BT_RSSI_STATE_STAY_HIGH 3
+#define BT_RSSI_STATE_STAY_MEDIUM 4
+#define BT_RSSI_STATE_STAY_LOW 5
+
+#define BT_AGCTABLE_OFF 0
+#define BT_AGCTABLE_ON 1
+
+#define BT_BB_BACKOFF_OFF 0
+#define BT_BB_BACKOFF_ON 1
+
+#define BT_FW_NAV_OFF 0
+#define BT_FW_NAV_ON 1
+
+#define BT_COEX_MECH_NONE 0
+#define BT_COEX_MECH_SCO 1
+#define BT_COEX_MECH_HID 2
+#define BT_COEX_MECH_A2DP 3
+#define BT_COEX_MECH_PAN 4
+#define BT_COEX_MECH_HID_A2DP 5
+#define BT_COEX_MECH_HID_PAN 6
+#define BT_COEX_MECH_PAN_A2DP 7
+#define BT_COEX_MECH_HID_SCO_ESCO 8
+#define BT_COEX_MECH_FTP_A2DP 9
+#define BT_COEX_MECH_COMMON 10
+#define BT_COEX_MECH_MAX 11
+/* BT Dbg Ctrl */
+#define BT_DBG_PROFILE_NONE 0
+#define BT_DBG_PROFILE_SCO 1
+#define BT_DBG_PROFILE_HID 2
+#define BT_DBG_PROFILE_A2DP 3
+#define BT_DBG_PROFILE_PAN 4
+#define BT_DBG_PROFILE_HID_A2DP 5
+#define BT_DBG_PROFILE_HID_PAN 6
+#define BT_DBG_PROFILE_PAN_A2DP 7
+#define BT_DBG_PROFILE_MAX 9
+
+struct bt_coexist_str {
+ u8 BluetoothCoexist;
+ u8 BT_Ant_Num;
+ u8 BT_CoexistType;
+ u8 BT_Ant_isolation; /* 0:good, 1:bad */
+ u8 bt_radiosharedtype;
+ u32 Ratio_Tx;
+ u32 Ratio_PRI;
+ u8 bInitlized;
+ u32 BtRfRegOrigin1E;
+ u32 BtRfRegOrigin1F;
+ u8 bBTBusyTraffic;
+ u8 bBTTrafficModeSet;
+ u8 bBTNonTrafficModeSet;
+ struct bt_traffic_statistics BT21TrafficStatistics;
+ u64 CurrentState;
+ u64 PreviousState;
+ u8 preRssiState;
+ u8 preRssiState1;
+ u8 preRssiStateBeacon;
+ u8 bFWCoexistAllOff;
+ u8 bSWCoexistAllOff;
+ u8 bHWCoexistAllOff;
+ u8 bBalanceOn;
+ u8 bSingleAntOn;
+ u8 bInterruptOn;
+ u8 bMultiNAVOn;
+ u8 PreWLANActH;
+ u8 PreWLANActL;
+ u8 WLANActH;
+ u8 WLANActL;
+ u8 A2DPState;
+ u8 AntennaState;
+ u32 lastBtEdca;
+ u16 last_aggr_num;
+ u8 bEDCAInitialized;
+ u8 exec_cnt;
+ u8 b8723aAgcTableOn;
+ u8 b92DAgcTableOn;
+ struct bt_coexist_8723a halCoex8723;
+ u8 btActiveZeroCnt;
+ u8 bCurBtDisabled;
+ u8 bPreBtDisabled;
+ u8 bNeedToRoamForBtDisableEnable;
+ u8 fw3aVal[5];
+};
+
+void BTDM_CheckAntSelMode(struct rtw_adapter * padapter);
+void BTDM_FwC2hBtRssi(struct rtw_adapter * padapter, u8 *tmpBuf);
+#define BT_FwC2hBtRssi BTDM_FwC2hBtRssi
+void BTDM_FwC2hBtInfo(struct rtw_adapter * padapter, u8 *tmpBuf, u8 length);
+#define BT_FwC2hBtInfo BTDM_FwC2hBtInfo
+void BTDM_DisplayBtCoexInfo(struct rtw_adapter * padapter);
+#define BT_DisplayBtCoexInfo BTDM_DisplayBtCoexInfo
+void BTDM_RejectAPAggregatedPacket(struct rtw_adapter * padapter, u8 bReject);
+u8 BTDM_IsHT40(struct rtw_adapter * padapter);
+u8 BTDM_Legacy(struct rtw_adapter * padapter);
+void BTDM_CheckWiFiState(struct rtw_adapter * padapter);
+s32 BTDM_GetRxSS(struct rtw_adapter * padapter);
+u8 BTDM_CheckCoexBcnRssiState(struct rtw_adapter * padapter, u8 levelNum, u8 RssiThresh, u8 RssiThresh1);
+u8 BTDM_CheckCoexRSSIState1(struct rtw_adapter * padapter, u8 levelNum, u8 RssiThresh, u8 RssiThresh1);
+u8 BTDM_CheckCoexRSSIState(struct rtw_adapter * padapter, u8 levelNum, u8 RssiThresh, u8 RssiThresh1);
+u8 BTDM_DisableEDCATurbo(struct rtw_adapter * padapter);
+#define BT_DisableEDCATurbo BTDM_DisableEDCATurbo
+void BTDM_Balance(struct rtw_adapter * padapter, u8 bBalanceOn, u8 ms0, u8 ms1);
+void BTDM_AGCTable(struct rtw_adapter * padapter, u8 type);
+void BTDM_BBBackOffLevel(struct rtw_adapter * padapter, u8 type);
+void BTDM_FWCoexAllOff(struct rtw_adapter * padapter);
+void BTDM_SWCoexAllOff(struct rtw_adapter * padapter);
+void BTDM_HWCoexAllOff(struct rtw_adapter * padapter);
+void BTDM_CoexAllOff(struct rtw_adapter * padapter);
+void BTDM_TurnOffBtCoexistBeforeEnterIPS(struct rtw_adapter * padapter);
+void BTDM_SignalCompensation(struct rtw_adapter * padapter, u8 *rssi_wifi, u8 *rssi_bt);
+void BTDM_Coexist(struct rtw_adapter * padapter);
+#define BT_CoexistMechanism BTDM_Coexist
+void BTDM_UpdateCoexState(struct rtw_adapter * padapter);
+u8 BTDM_IsSameCoexistState(struct rtw_adapter * padapter);
+void BTDM_PWDBMonitor(struct rtw_adapter * padapter);
+u8 BTDM_IsBTBusy(struct rtw_adapter * padapter);
+#define BT_IsBtBusy BTDM_IsBTBusy
+u8 BTDM_IsWifiBusy(struct rtw_adapter * padapter);
+u8 BTDM_IsCoexistStateChanged(struct rtw_adapter * padapter);
+u8 BTDM_IsWifiUplink(struct rtw_adapter * padapter);
+u8 BTDM_IsWifiDownlink(struct rtw_adapter * padapter);
+u8 BTDM_IsBTHSMode(struct rtw_adapter * padapter);
+u8 BTDM_IsBTUplink(struct rtw_adapter * padapter);
+u8 BTDM_IsBTDownlink(struct rtw_adapter * padapter);
+void BTDM_AdjustForBtOperation(struct rtw_adapter * padapter);
+void BTDM_ForHalt(struct rtw_adapter * padapter);
+void BTDM_WifiScanNotify(struct rtw_adapter * padapter, u8 scanType);
+void BTDM_WifiAssociateNotify(struct rtw_adapter * padapter, u8 action);
+void BTDM_MediaStatusNotify(struct rtw_adapter * padapter, enum rt_media_status mstatus);
+void BTDM_ForDhcp(struct rtw_adapter * padapter);
+void BTDM_ResetActionProfileState(struct rtw_adapter * padapter);
+void BTDM_SetBtCoexCurrAntNum(struct rtw_adapter * padapter, u8 antNum);
+#define BT_SetBtCoexCurrAntNum BTDM_SetBtCoexCurrAntNum
+u8 BTDM_IsActionSCO(struct rtw_adapter * padapter);
+u8 BTDM_IsActionHID(struct rtw_adapter * padapter);
+u8 BTDM_IsActionA2DP(struct rtw_adapter * padapter);
+u8 BTDM_IsActionPAN(struct rtw_adapter * padapter);
+u8 BTDM_IsActionHIDA2DP(struct rtw_adapter * padapter);
+u8 BTDM_IsActionHIDPAN(struct rtw_adapter * padapter);
+u8 BTDM_IsActionPANA2DP(struct rtw_adapter * padapter);
+u8 BTDM_IsBtDisabled(struct rtw_adapter * padapter);
+#define BT_IsBtDisabled BTDM_IsBtDisabled
+u32 BTDM_BtTxRxCounterH(struct rtw_adapter * padapter);
+u32 BTDM_BtTxRxCounterL(struct rtw_adapter * padapter);
+
+/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtCoexist.h ===== */
+#endif /* __HALBTCOEXIST_C__ */
+
+#ifdef __HALBT_C__ /* HAL/HalBT.h */
+/* ===== Below this line is sync from SD7 driver HAL/HalBT.h ===== */
+
+#define RTS_CTS_NO_LEN_LIMIT 0
+
+u8 HALBT_GetPGAntNum(struct rtw_adapter * padapter);
+#define BT_GetPGAntNum HALBT_GetPGAntNum
+void HALBT_SetKey(struct rtw_adapter * padapter, u8 EntryNum);
+void HALBT_RemoveKey(struct rtw_adapter * padapter, u8 EntryNum);
+void HALBT_InitBTVars8723A(struct rtw_adapter * padapter);
+#define HALBT_InitHalVars HALBT_InitBTVars8723A
+#define BT_InitHalVars HALBT_InitHalVars
+u8 HALBT_IsBTExist(struct rtw_adapter * padapter);
+#define BT_IsBtExist HALBT_IsBTExist
+u8 HALBT_BTChipType(struct rtw_adapter * padapter);
+void HALBT_InitHwConfig(struct rtw_adapter * padapter);
+#define BT_InitHwConfig HALBT_InitHwConfig
+void HALBT_SetRtsCtsNoLenLimit(struct rtw_adapter * padapter);
+
+/* ===== End of sync from SD7 driver HAL/HalBT.c ===== */
+#endif /* __HALBT_C__ */
+
+#define _bt_dbg_off_ 0
+#define _bt_dbg_on_ 1
+
+extern u32 BTCoexDbgLevel;
+
+
+
+#endif /* __RTL8723A_BT_COEXIST_H__ */
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_cmd.h b/drivers/staging/rtl8723au/include/rtl8723a_cmd.h
new file mode 100644
index 000000000000..8fccbfc3a45c
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtl8723a_cmd.h
@@ -0,0 +1,160 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723A_CMD_H__
+#define __RTL8723A_CMD_H__
+
+
+#define H2C_BT_FW_PATCH_LEN 3
+#define H2C_BT_PWR_FORCE_LEN 3
+
+enum cmd_msg_element_id
+{
+ NONE_CMDMSG_EID,
+ AP_OFFLOAD_EID = 0,
+ SET_PWRMODE_EID = 1,
+ JOINBSS_RPT_EID = 2,
+ RSVD_PAGE_EID = 3,
+ RSSI_4_EID = 4,
+ RSSI_SETTING_EID = 5,
+ MACID_CONFIG_EID = 6,
+ MACID_PS_MODE_EID = 7,
+ P2P_PS_OFFLOAD_EID = 8,
+ SELECTIVE_SUSPEND_ROF_CMD = 9,
+ BT_QUEUE_PKT_EID = 17,
+ BT_ANT_TDMA_EID = 20,
+ BT_2ANT_HID_EID = 21,
+ P2P_PS_CTW_CMD_EID = 32,
+ FORCE_BT_TX_PWR_EID = 33,
+ SET_TDMA_WLAN_ACT_TIME_EID = 34,
+ SET_BT_TX_RETRY_INDEX_EID = 35,
+ HID_PROFILE_ENABLE_EID = 36,
+ BT_IGNORE_WLAN_ACT_EID = 37,
+ BT_PTA_MANAGER_UPDATE_ENABLE_EID = 38,
+ DAC_SWING_VALUE_EID = 41,
+ TRADITIONAL_TDMA_EN_EID = 51,
+ H2C_BT_FW_PATCH = 54,
+ B_TYPE_TDMA_EID = 58,
+ SCAN_EN_EID = 59,
+ LOWPWR_LPS_EID = 71,
+ H2C_RESET_TSF = 75,
+ MAX_CMDMSG_EID
+};
+
+struct cmd_msg_parm {
+ u8 eid; /* element id */
+ u8 sz; /* sz */
+ u8 buf[6];
+};
+
+struct setpwrmode_parm {
+ u8 Mode;
+ u8 SmartPS;
+ u8 AwakeInterval; /* unit: beacon interval */
+ u8 bAllQueueUAPSD;
+
+#define SETPM_LOWRXBCN BIT(0)
+#define SETPM_AUTOANTSWITCH BIT(1)
+#define SETPM_PSALLOWBTHIGHPRI BIT(2)
+ u8 BcnAntMode;
+} __packed;
+
+struct H2C_SS_RFOFF_PARAM{
+ u8 ROFOn; /* 1: on, 0:off */
+ u16 gpio_period; /* unit: 1024 us */
+}__attribute__ ((packed));
+
+
+struct joinbssrpt_parm {
+ u8 OpMode; /* enum rt_media_status */
+};
+
+struct rsvdpage_loc {
+ u8 LocProbeRsp;
+ u8 LocPsPoll;
+ u8 LocNullData;
+ u8 LocQosNull;
+ u8 LocBTQosNull;
+};
+
+struct P2P_PS_Offload_t {
+ u8 Offload_En:1;
+ u8 role:1; /* 1: Owner, 0: Client */
+ u8 CTWindow_En:1;
+ u8 NoA0_En:1;
+ u8 NoA1_En:1;
+ u8 AllStaSleep:1; /* Only valid in Owner */
+ u8 discovery:1;
+ u8 rsvd:1;
+};
+
+struct P2P_PS_CTWPeriod_t {
+ u8 CTWPeriod; /* TU */
+};
+
+#define B_TDMA_EN BIT(0)
+#define B_TDMA_FIXANTINBT BIT(1)
+#define B_TDMA_TXPSPOLL BIT(2)
+#define B_TDMA_VAL870 BIT(3)
+#define B_TDMA_AUTOWAKEUP BIT(4)
+#define B_TDMA_NOPS BIT(5)
+#define B_TDMA_WLANHIGHPRI BIT(6)
+
+struct b_type_tdma_parm {
+ u8 option;
+
+ u8 TBTTOnPeriod;
+ u8 MedPeriod;
+ u8 rsvd30;
+} __packed;
+
+struct scan_en_parm {
+ u8 En;
+} __packed;
+
+/* BT_PWR */
+#define SET_H2CCMD_BT_PWR_IDX(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd, 0, 8, __Value)
+
+/* BT_FW_PATCH */
+#define SET_H2CCMD_BT_FW_PATCH_ENABLE(__pH2CCmd, __Value) SET_BITS_TO_LE_4BYTE(__pH2CCmd, 0, 8, __Value) /* SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value) */
+#define SET_H2CCMD_BT_FW_PATCH_SIZE(__pH2CCmd, __Value) SET_BITS_TO_LE_4BYTE(__pH2CCmd, 8, 16, __Value) /* SET_BITS_TO_LE_2BYTE((__pH2CCmd)+1, 0, 16, __Value) */
+
+struct lowpwr_lps_parm{
+ u8 bcn_count:4;
+ u8 tb_bcn_threshold:3;
+ u8 enable:1;
+ u8 bcn_interval;
+ u8 drop_threshold;
+ u8 max_early_period;
+ u8 max_bcn_timeout_period;
+} __packed;
+
+
+/* host message to firmware cmd */
+void rtl8723a_set_FwPwrMode_cmd(struct rtw_adapter * padapter, u8 Mode);
+void rtl8723a_set_FwJoinBssReport_cmd(struct rtw_adapter * padapter, u8 mstatus);
+#ifdef CONFIG_8723AU_BT_COEXIST
+void rtl8723a_set_BTCoex_AP_mode_FwRsvdPkt_cmd(struct rtw_adapter * padapter);
+#endif
+u8 rtl8723a_set_rssi_cmd(struct rtw_adapter * padapter, u8 *param);
+u8 rtl8723a_set_raid_cmd(struct rtw_adapter * padapter, u32 mask, u8 arg);
+void rtl8723a_add_rateatid(struct rtw_adapter * padapter, u32 bitmap, u8 arg, u8 rssi_level);
+
+#ifdef CONFIG_8723AU_P2P
+void rtl8723a_set_p2p_ps_offload_cmd(struct rtw_adapter * padapter, u8 p2p_ps_state);
+#endif /* CONFIG_8723AU_P2P */
+
+void CheckFwRsvdPageContent23a(struct rtw_adapter *padapter);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_dm.h b/drivers/staging/rtl8723au/include/rtl8723a_dm.h
new file mode 100644
index 000000000000..47e887f5bb26
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtl8723a_dm.h
@@ -0,0 +1,144 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723A_DM_H__
+#define __RTL8723A_DM_H__
+/* */
+/* Description: */
+/* */
+/* This file is for 8723A dynamic mechanism only */
+/* */
+/* */
+/* */
+#define DYNAMIC_FUNC_BT BIT(0)
+
+enum{
+ UP_LINK,
+ DOWN_LINK,
+};
+/* */
+/* structure and define */
+/* */
+
+/* duplicate code,will move to ODM ######### */
+#define IQK_MAC_REG_NUM 4
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM 9
+#define HP_THERMAL_NUM 8
+/* duplicate code,will move to ODM ######### */
+struct dm_priv
+{
+ u8 DM_Type;
+ u8 DMFlag;
+ u8 InitDMFlag;
+ u32 InitODMFlag;
+
+ /* Upper and Lower Signal threshold for Rate Adaptive*/
+ int UndecoratedSmoothedPWDB;
+ int UndecoratedSmoothedCCK;
+ int EntryMinUndecoratedSmoothedPWDB;
+ int EntryMaxUndecoratedSmoothedPWDB;
+ int MinUndecoratedPWDBForDM;
+ int LastMinUndecoratedPWDBForDM;
+
+ s32 UndecoratedSmoothedBeacon;
+ #ifdef CONFIG_8723AU_BT_COEXIST
+ s32 BT_EntryMinUndecoratedSmoothedPWDB;
+ s32 BT_EntryMaxUndecoratedSmoothedPWDB;
+ #endif
+
+ /* for High Power */
+ u8 bDynamicTxPowerEnable;
+ u8 LastDTPLvl;
+ u8 DynamicTxHighPowerLvl;/* Add by Jacken Tx Power Control for Near/Far Range 2008/03/06 */
+
+ /* for tx power tracking */
+ u8 bTXPowerTracking;
+ u8 TXPowercount;
+ u8 bTXPowerTrackingInit;
+ u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking as default */
+ u8 TM_Trigger;
+
+ u8 ThermalMeter[2]; /* ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
+ u8 ThermalValue;
+ u8 ThermalValue_LCK;
+ u8 ThermalValue_IQK;
+ u8 ThermalValue_DPK;
+
+ u8 bRfPiEnable;
+
+ /* for APK */
+ u32 APKoutput[2][2]; /* path A/B; output1_1a/output1_2a */
+ u8 bAPKdone;
+ u8 bAPKThermalMeterIgnore;
+ u8 bDPdone;
+ u8 bDPPathAOK;
+ u8 bDPPathBOK;
+
+ /* for IQK */
+ u32 RegC04;
+ u32 Reg874;
+ u32 RegC08;
+ u32 RegB68;
+ u32 RegB6C;
+ u32 Reg870;
+ u32 Reg860;
+ u32 Reg864;
+ u32 ADDA_backup[IQK_ADDA_REG_NUM];
+ u32 IQK_MAC_backup[IQK_MAC_REG_NUM];
+ u32 IQK_BB_backup_recover[9];
+ u32 IQK_BB_backup[IQK_BB_REG_NUM];
+ u8 PowerIndex_backup[6];
+
+ u8 bCCKinCH14;
+
+ u8 CCK_index;
+ u8 OFDM_index[2];
+
+ u8 bDoneTxpower;
+ u8 CCK_index_HP;
+ u8 OFDM_index_HP[2];
+ u8 ThermalValue_HP[HP_THERMAL_NUM];
+ u8 ThermalValue_HP_index;
+
+ /* for TxPwrTracking */
+ s32 RegE94;
+ s32 RegE9C;
+ s32 RegEB4;
+ s32 RegEBC;
+
+ u32 TXPowerTrackingCallbackCnt; /* cosa add for debug */
+
+ u32 prv_traffic_idx; /* edca turbo */
+
+ s32 OFDM_Pkt_Cnt;
+ u8 RSSI_Select;
+/* u8 DIG_Dynamic_MIN ; */
+/* duplicate code,will move to ODM ######### */
+ /* Add for Reading Initial Data Rate SEL Register 0x484 during watchdog. Using for fill tx desc. 2011.3.21 by Thomas */
+ u8 INIDATA_RATE[32];
+};
+
+
+/* */
+/* function prototype */
+/* */
+
+void rtl8723a_init_dm_priv(struct rtw_adapter *padapter);
+void rtl8723a_deinit_dm_priv(struct rtw_adapter *padapter);
+
+void rtl8723a_InitHalDm(struct rtw_adapter *padapter);
+void rtl8723a_HalDmWatchDog(struct rtw_adapter *padapter);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_hal.h b/drivers/staging/rtl8723au/include/rtl8723a_hal.h
new file mode 100644
index 000000000000..c20248bab717
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtl8723a_hal.h
@@ -0,0 +1,575 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723A_HAL_H__
+#define __RTL8723A_HAL_H__
+
+#include "rtl8723a_spec.h"
+#include "rtl8723a_pg.h"
+#include "Hal8723APhyReg.h"
+#include "Hal8723APhyCfg.h"
+#include "rtl8723a_rf.h"
+#ifdef CONFIG_8723AU_BT_COEXIST
+#include "rtl8723a_bt-coexist.h"
+#endif
+#include "rtl8723a_dm.h"
+#include "rtl8723a_recv.h"
+#include "rtl8723a_xmit.h"
+#include "rtl8723a_cmd.h"
+#include "rtl8723a_sreset.h"
+#include "rtw_efuse.h"
+
+#include "odm_precomp.h"
+
+
+/* 2TODO: We should define 8192S firmware related macro settings here!! */
+#define RTL819X_DEFAULT_RF_TYPE RF_1T2R
+#define RTL819X_TOTAL_RF_PATH 2
+
+/* TODO: The following need to check!! */
+#define RTL8723_FW_UMC_IMG "rtl8192CU\\rtl8723fw.bin"
+#define RTL8723_FW_UMC_B_IMG "rtl8192CU\\rtl8723fw_B.bin"
+#define RTL8723_PHY_REG "rtl8723S\\PHY_REG_1T.txt"
+#define RTL8723_PHY_RADIO_A "rtl8723S\\radio_a_1T.txt"
+#define RTL8723_PHY_RADIO_B "rtl8723S\\radio_b_1T.txt"
+#define RTL8723_AGC_TAB "rtl8723S\\AGC_TAB_1T.txt"
+#define RTL8723_PHY_MACREG "rtl8723S\\MAC_REG.txt"
+#define RTL8723_PHY_REG_PG "rtl8723S\\PHY_REG_PG.txt"
+#define RTL8723_PHY_REG_MP "rtl8723S\\PHY_REG_MP.txt"
+
+/* */
+/* RTL8723S From header */
+/* */
+
+/* Fw Array */
+#define Rtl8723_FwImageArray Rtl8723UFwImgArray
+#define Rtl8723_FwUMCBCutImageArrayWithBT Rtl8723UFwUMCBCutImgArrayWithBT
+#define Rtl8723_FwUMCBCutImageArrayWithoutBT Rtl8723UFwUMCBCutImgArrayWithoutBT
+
+#define Rtl8723_ImgArrayLength Rtl8723UImgArrayLength
+#define Rtl8723_UMCBCutImgArrayWithBTLength Rtl8723UUMCBCutImgArrayWithBTLength
+#define Rtl8723_UMCBCutImgArrayWithoutBTLength Rtl8723UUMCBCutImgArrayWithoutBTLength
+
+#define Rtl8723_PHY_REG_Array_PG Rtl8723UPHY_REG_Array_PG
+#define Rtl8723_PHY_REG_Array_PGLength Rtl8723UPHY_REG_Array_PGLength
+
+#define Rtl8723_FwUMCBCutMPImageArray Rtl8723SFwUMCBCutMPImgAr
+#define Rtl8723_UMCBCutMPImgArrayLength Rtl8723SUMCBCutMPImgArrayLength
+
+#define DRVINFO_SZ 4 /* unit is 8bytes */
+#define PageNum_128(_Len) (u32)(((_Len)>>7) + ((_Len)&0x7F ? 1:0))
+
+#define FW_8723A_SIZE 0x8000
+#define FW_8723A_START_ADDRESS 0x1000
+#define FW_8723A_END_ADDRESS 0x1FFF /* 0x5FFF */
+
+#define MAX_PAGE_SIZE 4096 /* @ page : 4k bytes */
+
+#define IS_FW_HEADER_EXIST(_pFwHdr) ((le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x92C0 ||\
+ (le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x88C0 ||\
+ (le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x2300)
+
+/* */
+/* This structure must be cared byte-ordering */
+/* */
+/* Added by tynli. 2009.12.04. */
+struct rt_8723a_firmware_hdr {
+ /* 8-byte alinment required */
+
+ /* LONG WORD 0 ---- */
+ u16 Signature; /* 92C0: test chip; 92C, 88C0: test chip; 88C1: MP A-cut; 92C1: MP A-cut */
+ u8 Category; /* AP/NIC and USB/PCI */
+ u8 Function; /* Reserved for different FW function indcation, for further use when driver needs to download different FW in different conditions */
+ u16 Version; /* FW Version */
+ u8 Subversion; /* FW Subversion, default 0x00 */
+ u16 Rsvd1;
+
+
+ /* LONG WORD 1 ---- */
+ u8 Month; /* Release time Month field */
+ u8 Date; /* Release time Date field */
+ u8 Hour; /* Release time Hour field */
+ u8 Minute; /* Release time Minute field */
+ u16 RamCodeSize; /* The size of RAM code */
+ u16 Rsvd2;
+
+ /* LONG WORD 2 ---- */
+ u32 SvnIdx; /* The SVN entry index */
+ u32 Rsvd3;
+
+ /* LONG WORD 3 ---- */
+ u32 Rsvd4;
+ u32 Rsvd5;
+};
+
+#define DRIVER_EARLY_INT_TIME 0x05
+#define BCN_DMA_ATIME_INT_TIME 0x02
+
+
+/* BK, BE, VI, VO, HCCA, MANAGEMENT, COMMAND, HIGH, BEACON. */
+#define MAX_TX_QUEUE 9
+
+#define TX_SELE_HQ BIT(0) /* High Queue */
+#define TX_SELE_LQ BIT(1) /* Low Queue */
+#define TX_SELE_NQ BIT(2) /* Normal Queue */
+
+/* Note: We will divide number of page equally for each queue other than public queue! */
+#define TX_TOTAL_PAGE_NUMBER 0xF8
+#define TX_PAGE_BOUNDARY (TX_TOTAL_PAGE_NUMBER + 1)
+
+/* For Normal Chip Setting */
+/* (HPQ + LPQ + NPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */
+#define NORMAL_PAGE_NUM_PUBQ 0xE7
+#define NORMAL_PAGE_NUM_HPQ 0x0C
+#define NORMAL_PAGE_NUM_LPQ 0x02
+#define NORMAL_PAGE_NUM_NPQ 0x02
+
+/* For Test Chip Setting */
+/* (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */
+#define TEST_PAGE_NUM_PUBQ 0x7E
+
+/* For Test Chip Setting */
+#define WMM_TEST_TX_TOTAL_PAGE_NUMBER 0xF5
+#define WMM_TEST_TX_PAGE_BOUNDARY (WMM_TEST_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
+
+#define WMM_TEST_PAGE_NUM_PUBQ 0xA3
+#define WMM_TEST_PAGE_NUM_HPQ 0x29
+#define WMM_TEST_PAGE_NUM_LPQ 0x29
+
+/* Note: For Normal Chip Setting, modify later */
+#define WMM_NORMAL_TX_TOTAL_PAGE_NUMBER 0xF5
+#define WMM_NORMAL_TX_PAGE_BOUNDARY (WMM_TEST_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
+
+#define WMM_NORMAL_PAGE_NUM_PUBQ 0xB0
+#define WMM_NORMAL_PAGE_NUM_HPQ 0x29
+#define WMM_NORMAL_PAGE_NUM_LPQ 0x1C
+#define WMM_NORMAL_PAGE_NUM_NPQ 0x1C
+
+
+/* */
+/* Chip specific */
+/* */
+#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
+#define CHIP_BONDING_92C_1T2R 0x1
+#define CHIP_BONDING_88C_USB_MCARD 0x2
+#define CHIP_BONDING_88C_USB_HP 0x1
+
+#include "HalVerDef.h"
+#include "hal_com.h"
+
+/* */
+/* Channel Plan */
+/* */
+enum ChannelPlan
+{
+ CHPL_FCC = 0,
+ CHPL_IC = 1,
+ CHPL_ETSI = 2,
+ CHPL_SPAIN = 3,
+ CHPL_FRANCE = 4,
+ CHPL_MKK = 5,
+ CHPL_MKK1 = 6,
+ CHPL_ISRAEL = 7,
+ CHPL_TELEC = 8,
+ CHPL_GLOBAL = 9,
+ CHPL_WORLD = 10,
+};
+
+#define EFUSE_REAL_CONTENT_LEN 512
+#define EFUSE_MAP_LEN 128
+#define EFUSE_MAX_SECTION 16
+#define EFUSE_IC_ID_OFFSET 506 /* For some inferiority IC purpose. added by Roger, 2009.09.02. */
+#define AVAILABLE_EFUSE_ADDR(addr) (addr < EFUSE_REAL_CONTENT_LEN)
+/* */
+/* <Roger_Notes> */
+/* To prevent out of boundary programming case, */
+/* leave 1byte and program full section */
+/* 9bytes + 1byt + 5bytes and pre 1byte. */
+/* For worst case: */
+/* | 1byte|----8bytes----|1byte|--5bytes--| */
+/* | | Reserved(14bytes) | */
+/* */
+
+/* PG data exclude header, dummy 6 bytes frome CP test and reserved 1byte. */
+#define EFUSE_OOB_PROTECT_BYTES 15
+
+#define EFUSE_REAL_CONTENT_LEN_8723A 512
+#define EFUSE_MAP_LEN_8723A 256
+#define EFUSE_MAX_SECTION_8723A 32
+
+/* */
+/* EFUSE for BT definition */
+/* */
+#define EFUSE_BT_REAL_BANK_CONTENT_LEN 512
+#define EFUSE_BT_REAL_CONTENT_LEN 1536 /* 512*3 */
+#define EFUSE_BT_MAP_LEN 1024 /* 1k bytes */
+#define EFUSE_BT_MAX_SECTION 128 /* 1024/8 */
+
+#define EFUSE_PROTECT_BYTES_BANK 16
+
+/* */
+/* <Roger_Notes> For RTL8723 WiFi/BT/GPS multi-function configuration. 2010.10.06. */
+/* */
+enum RT_MULTI_FUNC {
+ RT_MULTI_FUNC_NONE = 0x00,
+ RT_MULTI_FUNC_WIFI = 0x01,
+ RT_MULTI_FUNC_BT = 0x02,
+ RT_MULTI_FUNC_GPS = 0x04,
+};
+
+/* */
+/* <Roger_Notes> For RTL8723 WiFi PDn/GPIO polarity control configuration. 2010.10.08. */
+/* */
+enum RT_POLARITY_CTL {
+ RT_POLARITY_LOW_ACT = 0,
+ RT_POLARITY_HIGH_ACT = 1,
+};
+
+/* For RTL8723 regulator mode. by tynli. 2011.01.14. */
+enum RT_REGULATOR_MODE {
+ RT_SWITCHING_REGULATOR = 0,
+ RT_LDO_REGULATOR = 1,
+};
+
+/* Description: Determine the types of C2H events that are the same in driver and Fw. */
+/* Fisrt constructed by tynli. 2009.10.09. */
+enum {
+ C2H_DBG = 0,
+ C2H_TSF = 1,
+ C2H_AP_RPT_RSP = 2,
+ C2H_CCX_TX_RPT = 3, /* The FW notify the report of the specific tx packet. */
+ C2H_BT_RSSI = 4,
+ C2H_BT_OP_MODE = 5,
+ C2H_EXT_RA_RPT = 6,
+ C2H_HW_INFO_EXCH = 10,
+ C2H_C2H_H2C_TEST = 11,
+ C2H_BT_INFO = 12,
+ C2H_BT_MP_INFO = 15,
+ MAX_C2HEVENT
+};
+
+struct hal_data_8723a {
+ struct hal_version VersionID;
+ enum rt_customer_id CustomerID;
+
+ u16 FirmwareVersion;
+ u16 FirmwareVersionRev;
+ u16 FirmwareSubVersion;
+ u16 FirmwareSignature;
+
+ /* current WIFI_PHY values */
+ u32 ReceiveConfig;
+ enum WIRELESS_MODE CurrentWirelessMode;
+ enum ht_channel_width CurrentChannelBW;
+ u8 CurrentChannel;
+ u8 nCur40MhzPrimeSC;/* Control channel sub-carrier */
+
+ u16 BasicRateSet;
+
+ /* rf_ctrl */
+ u8 rf_chip;
+ u8 rf_type;
+ u8 NumTotalRFPath;
+
+ u8 BoardType;
+ u8 CrystalCap;
+ /* */
+ /* EEPROM setting. */
+ /* */
+ u8 EEPROMVersion;
+ u16 EEPROMVID;
+ u16 EEPROMPID;
+ u16 EEPROMSVID;
+ u16 EEPROMSDID;
+ u8 EEPROMCustomerID;
+ u8 EEPROMSubCustomerID;
+ u8 EEPROMRegulatory;
+ u8 EEPROMThermalMeter;
+ u8 EEPROMBluetoothCoexist;
+ u8 EEPROMBluetoothType;
+ u8 EEPROMBluetoothAntNum;
+ u8 EEPROMBluetoothAntIsolation;
+ u8 EEPROMBluetoothRadioShared;
+
+ u8 bTXPowerDataReadFromEEPORM;
+ u8 bAPKThermalMeterIgnore;
+
+ u8 bIQKInitialized;
+ u8 bAntennaDetected;
+
+ u8 TxPwrLevelCck[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ u8 TxPwrLevelHT40_1S[RF_PATH_MAX][CHANNEL_MAX_NUMBER]; /* For HT 40MHZ pwr */
+ u8 TxPwrLevelHT40_2S[RF_PATH_MAX][CHANNEL_MAX_NUMBER]; /* For HT 40MHZ pwr */
+ u8 TxPwrHt20Diff[RF_PATH_MAX][CHANNEL_MAX_NUMBER];/* HT 20<->40 Pwr diff */
+ u8 TxPwrLegacyHtDiff[RF_PATH_MAX][CHANNEL_MAX_NUMBER];/* For HT<->legacy pwr diff */
+ /* For power group */
+ u8 PwrGroupHT20[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ u8 PwrGroupHT40[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+
+ u8 LegacyHTTxPowerDiff;/* Legacy to HT rate power diff */
+
+ /* Read/write are allow for following hardware information variables */
+ u8 framesync;
+ u32 framesyncC34;
+ u8 framesyncMonitor;
+ u8 DefaultInitialGain[4];
+ u8 pwrGroupCnt;
+ u32 MCSTxPowerLevelOriginalOffset[7][16];
+ u32 CCKTxPowerLevelOriginalOffset;
+
+ u32 AntennaTxPath; /* Antenna path Tx */
+ u32 AntennaRxPath; /* Antenna path Rx */
+ u8 ExternalPA;
+
+ u8 bLedOpenDrain; /* Support Open-drain arrangement for controlling the LED. Added by Roger, 2009.10.16. */
+
+ u8 b1x1RecvCombine; /* for 1T1R receive combining */
+
+ /* For EDCA Turbo mode */
+
+ u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */
+
+ /* vivi, for tx power tracking, 20080407 */
+ /* u16 TSSI_13dBm; */
+ /* u32 Pwr_Track; */
+ /* The current Tx Power Level */
+ u8 CurrentCckTxPwrIdx;
+ u8 CurrentOfdm24GTxPwrIdx;
+
+ struct bb_reg_define PHYRegDef[4]; /* Radio A/B/C/D */
+
+ bool bRFPathRxEnable[4]; /* We support 4 RF path now. */
+
+ u32 RfRegChnlVal[2];
+
+ u8 bCckHighPower;
+
+ /* RDG enable */
+ bool bRDGEnable;
+
+ /* for host message to fw */
+ u8 LastHMEBoxNum;
+
+ u8 fw_ractrl;
+ u8 RegTxPause;
+ /* Beacon function related global variable. */
+ u32 RegBcnCtrlVal;
+ u8 RegFwHwTxQCtrl;
+ u8 RegReg542;
+
+ struct dm_priv dmpriv;
+ struct dm_odm_t odmpriv;
+ struct sreset_priv srestpriv;
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ u8 bBTMode;
+ /* BT only. */
+ struct bt_30info BtInfo;
+ /* For bluetooth co-existance */
+ struct bt_coexist_str bt_coexist;
+#endif
+
+ u8 bDumpRxPkt;/* for debug */
+ u8 FwRsvdPageStartOffset; /* 2010.06.23. Added by tynli. Reserve page start offset except beacon in TxQ. */
+
+ /* 2010/08/09 MH Add CU power down mode. */
+ u8 pwrdown;
+
+ /* Add for dual MAC 0--Mac0 1--Mac1 */
+ u32 interfaceIndex;
+
+ u8 OutEpQueueSel;
+ u8 OutEpNumber;
+
+ /* 2010/12/10 MH Add for USB aggreation mode dynamic shceme. */
+ bool UsbRxHighSpeedMode;
+
+ /* 2010/11/22 MH Add for slim combo debug mode selective. */
+ /* This is used for fix the drawback of CU TSMC-A/UMC-A cut. HW auto suspend ability. Close BT clock. */
+ bool SlimComboDbg;
+
+ /* */
+ /* Add For EEPROM Efuse switch and Efuse Shadow map Setting */
+ /* */
+ u8 EepromOrEfuse;
+ u16 EfuseUsedBytes;
+ u16 BTEfuseUsedBytes;
+
+ /* Interrupt relatd register information. */
+ u32 SysIntrStatus;
+ u32 SysIntrMask;
+
+ /* */
+ /* 2011/02/23 MH Add for 8723 mylti function definition. The define should be moved to an */
+ /* independent file in the future. */
+ /* */
+ /* 8723-----------------------------------------*/
+ enum RT_MULTI_FUNC MultiFunc; /* For multi-function consideration. */
+ enum RT_POLARITY_CTL PolarityCtl; /* For Wifi PDn Polarity control. */
+ enum RT_REGULATOR_MODE RegulatorMode; /* switching regulator or LDO */
+ /* 8723-----------------------------------------
+ * 2011/02/23 MH Add for 8723 mylti function definition. The define should be moved to an */
+ /* independent file in the future. */
+
+ bool bMACFuncEnable;
+
+#ifdef CONFIG_8723AU_P2P
+ struct P2P_PS_Offload_t p2p_ps_offload;
+#endif
+
+
+ /* */
+ /* For USB Interface HAL related */
+ /* */
+ u32 UsbBulkOutSize;
+
+ /* Interrupt related register information. */
+ u32 IntArray[2];
+ u32 IntrMask[2];
+
+ /* */
+ /* For SDIO Interface HAL related */
+ /* */
+
+ /* Auto FSM to Turn On, include clock, isolation, power control for MAC only */
+ u8 bMacPwrCtrlOn;
+
+};
+
+#define GET_HAL_DATA(__pAdapter) ((struct hal_data_8723a *)((__pAdapter)->HalData))
+#define GET_RF_TYPE(priv) (GET_HAL_DATA(priv)->rf_type)
+
+#define INCLUDE_MULTI_FUNC_BT(_Adapter) (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_BT)
+#define INCLUDE_MULTI_FUNC_GPS(_Adapter) (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_GPS)
+
+struct rxreport_8723a {
+ u32 pktlen:14;
+ u32 crc32:1;
+ u32 icverr:1;
+ u32 drvinfosize:4;
+ u32 security:3;
+ u32 qos:1;
+ u32 shift:2;
+ u32 physt:1;
+ u32 swdec:1;
+ u32 ls:1;
+ u32 fs:1;
+ u32 eor:1;
+ u32 own:1;
+
+ u32 macid:5;
+ u32 tid:4;
+ u32 hwrsvd:4;
+ u32 amsdu:1;
+ u32 paggr:1;
+ u32 faggr:1;
+ u32 a1fit:4;
+ u32 a2fit:4;
+ u32 pam:1;
+ u32 pwr:1;
+ u32 md:1;
+ u32 mf:1;
+ u32 type:2;
+ u32 mc:1;
+ u32 bc:1;
+
+ u32 seq:12;
+ u32 frag:4;
+ u32 nextpktlen:14;
+ u32 nextind:1;
+ u32 rsvd0831:1;
+
+ u32 rxmcs:6;
+ u32 rxht:1;
+ u32 gf:1;
+ u32 splcp:1;
+ u32 bw:1;
+ u32 htc:1;
+ u32 eosp:1;
+ u32 bssidfit:2;
+ u32 rsvd1214:16;
+ u32 unicastwake:1;
+ u32 magicwake:1;
+
+ u32 pattern0match:1;
+ u32 pattern1match:1;
+ u32 pattern2match:1;
+ u32 pattern3match:1;
+ u32 pattern4match:1;
+ u32 pattern5match:1;
+ u32 pattern6match:1;
+ u32 pattern7match:1;
+ u32 pattern8match:1;
+ u32 pattern9match:1;
+ u32 patternamatch:1;
+ u32 patternbmatch:1;
+ u32 patterncmatch:1;
+ u32 rsvd1613:19;
+
+ u32 tsfl;
+
+ u32 bassn:12;
+ u32 bavld:1;
+ u32 rsvd2413:19;
+};
+
+/* rtl8723a_hal_init.c */
+s32 rtl8723a_FirmwareDownload(struct rtw_adapter *padapter);
+void rtl8723a_FirmwareSelfReset(struct rtw_adapter *padapter);
+void rtl8723a_InitializeFirmwareVars(struct rtw_adapter *padapter);
+
+void rtl8723a_InitAntenna_Selection(struct rtw_adapter *padapter);
+void rtl8723a_DeinitAntenna_Selection(struct rtw_adapter *padapter);
+void rtl8723a_CheckAntenna_Selection(struct rtw_adapter *padapter);
+void rtl8723a_init_default_value(struct rtw_adapter *padapter);
+
+s32 InitLLTTable23a(struct rtw_adapter *padapter, u32 boundary);
+
+s32 CardDisableHWSM(struct rtw_adapter *padapter, u8 resetMCU);
+s32 CardDisableWithoutHWSM(struct rtw_adapter *padapter);
+
+/* EFuse */
+u8 GetEEPROMSize8723A(struct rtw_adapter *padapter);
+void Hal_InitPGData(struct rtw_adapter *padapter, u8 *PROMContent);
+void Hal_EfuseParseIDCode(struct rtw_adapter *padapter, u8 *hwinfo);
+void Hal_EfuseParsetxpowerinfo_8723A(struct rtw_adapter *padapter, u8 *PROMContent, bool AutoLoadFail);
+void Hal_EfuseParseBTCoexistInfo_8723A(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseEEPROMVer(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void rtl8723a_EfuseParseChnlPlan(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseCustomerID(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseAntennaDiversity(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseRateIndicationOption(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseXtal_8723A(struct rtw_adapter *pAdapter, u8 *hwinfo, u8 AutoLoadFail);
+void Hal_EfuseParseThermalMeter_8723A(struct rtw_adapter *padapter, u8 *hwinfo, u8 AutoLoadFail);
+
+void Hal_InitChannelPlan23a(struct rtw_adapter *padapter);
+
+void rtl8723a_set_hal_ops(struct hal_ops *pHalFunc);
+void SetHwReg8723A(struct rtw_adapter *padapter, u8 variable, u8 *val);
+void GetHwReg8723A(struct rtw_adapter *padapter, u8 variable, u8 *val);
+#ifdef CONFIG_8723AU_BT_COEXIST
+void rtl8723a_SingleDualAntennaDetection(struct rtw_adapter *padapter);
+#endif
+
+/* register */
+void SetBcnCtrlReg23a(struct rtw_adapter *padapter, u8 SetBits, u8 ClearBits);
+void rtl8723a_InitBeaconParameters(struct rtw_adapter *padapter);
+
+void rtl8723a_clone_haldata(struct rtw_adapter *dst_adapter, struct rtw_adapter *src_adapter);
+void rtl8723a_start_thread(struct rtw_adapter *padapter);
+void rtl8723a_stop_thread(struct rtw_adapter *padapter);
+
+s32 c2h_id_filter_ccx_8723a(u8 id);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_led.h b/drivers/staging/rtl8723au/include/rtl8723a_led.h
new file mode 100644
index 000000000000..1623d186feb4
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtl8723a_led.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723A_LED_H__
+#define __RTL8723A_LED_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+/* */
+/* Interface to manipulate LED objects. */
+/* */
+void rtl8723au_InitSwLeds(struct rtw_adapter *padapter);
+void rtl8723au_DeInitSwLeds(struct rtw_adapter *padapter);
+void SwLedOn23a(struct rtw_adapter *padapter, struct led_8723a * pLed);
+void SwLedOff23a(struct rtw_adapter *padapter, struct led_8723a * pLed);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_pg.h b/drivers/staging/rtl8723au/include/rtl8723a_pg.h
new file mode 100644
index 000000000000..5c2ec448e568
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtl8723a_pg.h
@@ -0,0 +1,98 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723A_PG_H__
+#define __RTL8723A_PG_H__
+
+/* EEPROM/Efuse PG Offset for 8723E/8723U/8723S */
+#define EEPROM_CCK_TX_PWR_INX_8723A 0x10
+#define EEPROM_HT40_1S_TX_PWR_INX_8723A 0x16
+#define EEPROM_HT20_TX_PWR_INX_DIFF_8723A 0x1C
+#define EEPROM_OFDM_TX_PWR_INX_DIFF_8723A 0x1F
+#define EEPROM_HT40_MAX_PWR_OFFSET_8723A 0x22
+#define EEPROM_HT20_MAX_PWR_OFFSET_8723A 0x25
+
+#define EEPROM_ChannelPlan_8723A 0x28
+#define EEPROM_TSSI_A_8723A 0x29
+#define EEPROM_THERMAL_METER_8723A 0x2A
+#define RF_OPTION1_8723A 0x2B
+#define RF_OPTION2_8723A 0x2C
+#define RF_OPTION3_8723A 0x2D
+#define RF_OPTION4_8723A 0x2E
+#define EEPROM_VERSION_8723A 0x30
+#define EEPROM_CustomID_8723A 0x31
+#define EEPROM_SubCustomID_8723A 0x32
+#define EEPROM_XTAL_K_8723A 0x33
+#define EEPROM_Chipset_8723A 0x34
+
+/* RTL8723AE */
+#define EEPROM_VID_8723AE 0x49
+#define EEPROM_DID_8723AE 0x4B
+#define EEPROM_SVID_8723AE 0x4D
+#define EEPROM_SMID_8723AE 0x4F
+#define EEPROM_MAC_ADDR_8723AE 0x67
+
+/* RTL8723AU */
+#define EEPROM_MAC_ADDR_8723AU 0xC6
+#define EEPROM_VID_8723AU 0xB7
+#define EEPROM_PID_8723AU 0xB9
+
+/* RTL8723AS */
+#define EEPROM_MAC_ADDR_8723AS 0xAA
+
+/* EEPROM/Efuse Value Type */
+#define EETYPE_TX_PWR 0x0
+
+/* EEPROM/Efuse Default Value */
+#define EEPROM_Default_CrystalCap_8723A 0x20
+
+
+/* EEPROM/EFUSE data structure definition. */
+#define MAX_CHNL_GROUP 3+9
+
+struct txpowerinfo {
+ u8 CCKIndex[RF_PATH_MAX][MAX_CHNL_GROUP];
+ u8 HT40_1SIndex[RF_PATH_MAX][MAX_CHNL_GROUP];
+ u8 HT40_2SIndexDiff[RF_PATH_MAX][MAX_CHNL_GROUP];
+ u8 HT20IndexDiff[RF_PATH_MAX][MAX_CHNL_GROUP];
+ u8 OFDMIndexDiff[RF_PATH_MAX][MAX_CHNL_GROUP];
+ u8 HT40MaxOffset[RF_PATH_MAX][MAX_CHNL_GROUP];
+ u8 HT20MaxOffset[RF_PATH_MAX][MAX_CHNL_GROUP];
+ u8 TSSI_A[3];
+ u8 TSSI_B[3];
+ u8 TSSI_A_5G[3]; /* 5GL/5GM/5GH */
+ u8 TSSI_B_5G[3];
+};
+
+enum bt_ant_num {
+ Ant_x2 = 0,
+ Ant_x1 = 1
+};
+
+enum bt_cotype {
+ BT_2Wire = 0,
+ BT_ISSC_3Wire = 1,
+ BT_Accel = 2,
+ BT_CSR_BC4 = 3,
+ BT_CSR_BC8 = 4,
+ BT_RTL8756 = 5,
+ BT_RTL8723A = 6
+};
+
+enum bt_radioshared {
+ BT_Radio_Shared = 0,
+ BT_Radio_Individual = 1,
+};
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_recv.h b/drivers/staging/rtl8723au/include/rtl8723a_recv.h
new file mode 100644
index 000000000000..6bf6904f4d48
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtl8723a_recv.h
@@ -0,0 +1,70 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723A_RECV_H__
+#define __RTL8723A_RECV_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define NR_RECVBUFF (4)
+
+#define NR_PREALLOC_RECV_SKB (8)
+
+#define RECV_BLK_SZ 512
+#define RECV_BLK_CNT 16
+#define RECV_BLK_TH RECV_BLK_CNT
+
+#define MAX_RECVBUF_SZ (15360) /* 15k < 16k */
+
+#define RECV_BULK_IN_ADDR 0x80
+#define RECV_INT_IN_ADDR 0x81
+
+#define PHY_RSSI_SLID_WIN_MAX 100
+#define PHY_LINKQUALITY_SLID_WIN_MAX 20
+
+
+struct phy_stat
+{
+ unsigned int phydw0;
+ unsigned int phydw1;
+ unsigned int phydw2;
+ unsigned int phydw3;
+ unsigned int phydw4;
+ unsigned int phydw5;
+ unsigned int phydw6;
+ unsigned int phydw7;
+};
+
+/* Rx smooth factor */
+#define Rx_Smooth_Factor (20)
+
+struct interrupt_msg_format {
+ unsigned int C2H_MSG0;
+ unsigned int C2H_MSG1;
+ unsigned int C2H_MSG2;
+ unsigned int C2H_MSG3;
+ unsigned int HISR; /* from HISR Reg0x124, read to clear */
+ unsigned int HISRE;/* from HISRE Reg0x12c, read to clear */
+ unsigned int MSG_EX;
+};
+
+void rtl8723au_init_recvbuf(struct rtw_adapter *padapter, struct recv_buf *precvbuf);
+int rtl8723au_init_recv_priv(struct rtw_adapter * padapter);
+void rtl8723au_free_recv_priv(struct rtw_adapter * padapter);
+void rtl8723a_process_phy_info(struct rtw_adapter *padapter, void *prframe);
+void update_recvframe_attrib(struct recv_frame *precvframe, struct recv_stat *prxstat);
+void update_recvframe_phyinfo(struct recv_frame *precvframe, struct phy_stat *pphy_info);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_rf.h b/drivers/staging/rtl8723au/include/rtl8723a_rf.h
new file mode 100644
index 000000000000..166a45fe47b1
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtl8723a_rf.h
@@ -0,0 +1,58 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723A_RF_H__
+#define __RTL8723A_RF_H__
+
+/*--------------------------Define Parameters-------------------------------*/
+
+/* */
+/* For RF 6052 Series */
+/* */
+#define RF6052_MAX_TX_PWR 0x3F
+#define RF6052_MAX_REG 0x3F
+#define RF6052_MAX_PATH 2
+/*--------------------------Define Parameters-------------------------------*/
+
+
+/*------------------------------Define structure----------------------------*/
+
+/*------------------------------Define structure----------------------------*/
+
+
+/*------------------------Export global variable----------------------------*/
+/*------------------------Export global variable----------------------------*/
+
+/*------------------------Export Marco Definition---------------------------*/
+
+/*------------------------Export Marco Definition---------------------------*/
+
+
+/*--------------------------Exported Function prototype---------------------*/
+
+/* */
+/* RF RL6052 Series API */
+/* */
+void rtl8723a_phy_rf6052set_bw(struct rtw_adapter *Adapter,
+ enum ht_channel_width Bandwidth);
+void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter,
+ u8* pPowerlevel);
+void rtl8723a_PHY_RF6052SetOFDMTxPower(struct rtw_adapter *Adapter,
+ u8* pPowerLevel, u8 Channel);
+
+/*--------------------------Exported Function prototype---------------------*/
+
+int PHY_RF6052_Config8723A(struct rtw_adapter *Adapter);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_spec.h b/drivers/staging/rtl8723au/include/rtl8723a_spec.h
new file mode 100644
index 000000000000..3595c27907d0
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtl8723a_spec.h
@@ -0,0 +1,2158 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *******************************************************************************/
+#ifndef __RTL8723A_SPEC_H__
+#define __RTL8723A_SPEC_H__
+
+/* */
+/* */
+/* 0x0000h ~ 0x00FFh System Configuration */
+/* */
+/* */
+#define REG_SYS_ISO_CTRL 0x0000
+#define REG_SYS_FUNC_EN 0x0002
+#define REG_APS_FSMCO 0x0004
+#define REG_SYS_CLKR 0x0008
+#define REG_9346CR 0x000A
+#define REG_EE_VPD 0x000C
+#define REG_AFE_MISC 0x0010
+#define REG_SPS0_CTRL 0x0011
+#define REG_SPS_OCP_CFG 0x0018
+#define REG_RSV_CTRL 0x001C
+#define REG_RF_CTRL 0x001F
+#define REG_LDOA15_CTRL 0x0020
+#define REG_LDOV12D_CTRL 0x0021
+#define REG_LDOHCI12_CTRL 0x0022
+#define REG_LPLDO_CTRL 0x0023
+#define REG_AFE_XTAL_CTRL 0x0024
+#define REG_AFE_PLL_CTRL 0x0028
+#define REG_MAC_PHY_CTRL 0x002c
+#define REG_EFUSE_CTRL 0x0030
+#define REG_EFUSE_TEST 0x0034
+#define REG_PWR_DATA 0x0038
+#define REG_CAL_TIMER 0x003C
+#define REG_ACLK_MON 0x003E
+#define REG_GPIO_MUXCFG 0x0040
+#define REG_GPIO_IO_SEL 0x0042
+#define REG_MAC_PINMUX_CFG 0x0043
+#define REG_GPIO_PIN_CTRL 0x0044
+#define REG_GPIO_INTM 0x0048
+#define REG_LEDCFG0 0x004C
+#define REG_LEDCFG1 0x004D
+#define REG_LEDCFG2 0x004E
+#define REG_LEDCFG3 0x004F
+#define REG_LEDCFG REG_LEDCFG2
+#define REG_FSIMR 0x0050
+#define REG_FSISR 0x0054
+#define REG_HSIMR 0x0058
+#define REG_HSISR 0x005c
+ /* RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
+#define REG_GPIO_PIN_CTRL_2 0x0060
+ /* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
+#define REG_GPIO_IO_SEL_2 0x0062
+ /* RTL8723 WIFI/BT/GPS Multi-Function control source. */
+#define REG_MULTI_FUNC_CTRL 0x0068
+#define REG_MCUFWDL 0x0080
+#define REG_HMEBOX_EXT_0 0x0088
+#define REG_HMEBOX_EXT_1 0x008A
+#define REG_HMEBOX_EXT_2 0x008C
+#define REG_HMEBOX_EXT_3 0x008E
+ /* Host suspend counter on FPGA platform */
+#define REG_HOST_SUSP_CNT 0x00BC
+ /* Efuse access protection for RTL8723 */
+#define REG_EFUSE_ACCESS 0x00CF
+#define REG_BIST_SCAN 0x00D0
+#define REG_BIST_RPT 0x00D4
+#define REG_BIST_ROM_RPT 0x00D8
+#define REG_USB_SIE_INTF 0x00E0
+#define REG_PCIE_MIO_INTF 0x00E4
+#define REG_PCIE_MIO_INTD 0x00E8
+#define REG_HPON_FSM 0x00EC
+#define REG_SYS_CFG 0x00F0
+#define REG_GPIO_OUTSTS 0x00F4 /* For RTL8723 only. */
+
+/* */
+/* */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* */
+/* */
+#define REG_CR 0x0100
+#define REG_PBP 0x0104
+#define REG_TRXDMA_CTRL 0x010C
+#define REG_TRXFF_BNDY 0x0114
+#define REG_TRXFF_STATUS 0x0118
+#define REG_RXFF_PTR 0x011C
+#define REG_HIMR 0x0120
+#define REG_HISR 0x0124
+#define REG_HIMRE 0x0128
+#define REG_HISRE 0x012C
+#define REG_CPWM 0x012F
+#define REG_FWIMR 0x0130
+#define REG_FWISR 0x0134
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_PKTBUF_DBG_DATA_L 0x0144
+#define REG_PKTBUF_DBG_DATA_H 0x0148
+
+#define REG_TC0_CTRL 0x0150
+#define REG_TC1_CTRL 0x0154
+#define REG_TC2_CTRL 0x0158
+#define REG_TC3_CTRL 0x015C
+#define REG_TC4_CTRL 0x0160
+#define REG_TCUNIT_BASE 0x0164
+#define REG_MBIST_START 0x0174
+#define REG_MBIST_DONE 0x0178
+#define REG_MBIST_FAIL 0x017C
+#define REG_C2HEVT_MSG_NORMAL 0x01A0
+#define REG_C2HEVT_CLEAR 0x01AF
+#define REG_C2HEVT_MSG_TEST 0x01B8
+#define REG_MCUTST_1 0x01c0
+#define REG_FMETHR 0x01C8
+#define REG_HMETFR 0x01CC
+#define REG_HMEBOX_0 0x01D0
+#define REG_HMEBOX_1 0x01D4
+#define REG_HMEBOX_2 0x01D8
+#define REG_HMEBOX_3 0x01DC
+
+#define REG_LLT_INIT 0x01E0
+#define REG_BB_ACCEESS_CTRL 0x01E8
+#define REG_BB_ACCESS_DATA 0x01EC
+
+
+/* */
+/* */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* */
+/* */
+#define REG_RQPN 0x0200
+#define REG_FIFOPAGE 0x0204
+#define REG_TDECTRL 0x0208
+#define REG_TXDMA_OFFSET_CHK 0x020C
+#define REG_TXDMA_STATUS 0x0210
+#define REG_RQPN_NPQ 0x0214
+
+/* */
+/* */
+/* 0x0280h ~ 0x02FFh RXDMA Configuration */
+/* */
+/* */
+#define REG_RXDMA_AGG_PG_TH 0x0280
+#define REG_RXPKT_NUM 0x0284
+#define REG_RXDMA_STATUS 0x0288
+
+
+/* */
+/* */
+/* 0x0300h ~ 0x03FFh PCIe */
+/* */
+/* */
+#define REG_PCIE_CTRL_REG 0x0300
+#define REG_INT_MIG 0x0304 /* Interrupt Migration */
+ /* TX Beacon Descriptor Address */
+#define REG_BCNQ_DESA 0x0308
+ /* TX High Queue Descriptor Address */
+#define REG_HQ_DESA 0x0310
+ /* TX Manage Queue Descriptor Address */
+#define REG_MGQ_DESA 0x0318
+ /* TX VO Queue Descriptor Address */
+#define REG_VOQ_DESA 0x0320
+ /* TX VI Queue Descriptor Address */
+#define REG_VIQ_DESA 0x0328
+ /* TX BE Queue Descriptor Address */
+#define REG_BEQ_DESA 0x0330
+ /* TX BK Queue Descriptor Address */
+#define REG_BKQ_DESA 0x0338
+ /* RX Queue Descriptor Address */
+#define REG_RX_DESA 0x0340
+ /* Backdoor REG for Access Configuration */
+#define REG_DBI 0x0348
+ /* MDIO for Access PCIE PHY */
+#define REG_MDIO 0x0354
+ /* Debug Selection Register */
+#define REG_DBG_SEL 0x0360
+ /* PCIe RPWM */
+#define REG_PCIE_HRPWM 0x0361
+ /* PCIe CPWM */
+#define REG_PCIE_HCPWM 0x0363
+ /* UART Control */
+#define REG_UART_CTRL 0x0364
+ /* UART TX Descriptor Address */
+#define REG_UART_TX_DESA 0x0370
+ /* UART Rx Descriptor Address */
+#define REG_UART_RX_DESA 0x0378
+
+
+/* spec version 11 */
+/* */
+/* */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* */
+/* */
+#define REG_VOQ_INFORMATION 0x0400
+#define REG_VIQ_INFORMATION 0x0404
+#define REG_BEQ_INFORMATION 0x0408
+#define REG_BKQ_INFORMATION 0x040C
+#define REG_MGQ_INFORMATION 0x0410
+#define REG_HGQ_INFORMATION 0x0414
+#define REG_BCNQ_INFORMATION 0x0418
+
+
+#define REG_CPU_MGQ_INFORMATION 0x041C
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define REG_HWSEQ_CTRL 0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY 0x0425
+#define REG_LIFETIME_EN 0x0426
+#define REG_MULTI_BCNQ_OFFSET 0x0427
+#define REG_SPEC_SIFS 0x0428
+#define REG_RL 0x042A
+#define REG_DARFRC 0x0430
+#define REG_RARFRC 0x0438
+#define REG_RRSR 0x0440
+#define REG_ARFR0 0x0444
+#define REG_ARFR1 0x0448
+#define REG_ARFR2 0x044C
+#define REG_ARFR3 0x0450
+#define REG_AGGLEN_LMT 0x0458
+#define REG_AMPDU_MIN_SPACE 0x045C
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
+#define REG_FAST_EDCA_CTRL 0x0460
+#define REG_RD_RESP_PKT_TH 0x0463
+#define REG_INIRTS_RATE_SEL 0x0480
+#define REG_INIDATA_RATE_SEL 0x0484
+
+
+#define REG_POWER_STATUS 0x04A4
+#define REG_POWER_STAGE1 0x04B4
+#define REG_POWER_STAGE2 0x04B8
+#define REG_PKT_VO_VI_LIFE_TIME 0x04C0
+#define REG_PKT_BE_BK_LIFE_TIME 0x04C2
+#define REG_STBC_SETTING 0x04C4
+#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_MAX_AGGR_NUM 0x04CA
+#define REG_RTS_MAX_AGGR_NUM 0x04CB
+#define REG_BAR_MODE_CTRL 0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
+#define REG_NQOS_SEQ 0x04DC
+#define REG_QOS_SEQ 0x04DE
+#define REG_NEED_CPU_HANDLE 0x04E0
+#define REG_PKT_LOSE_RPT 0x04E1
+#define REG_PTCL_ERR_STATUS 0x04E2
+#define REG_DUMMY 0x04FC
+
+
+
+/* */
+/* */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* */
+/* */
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050C
+#define REG_BCNTCFG 0x0510
+#define REG_PIFS 0x0512
+#define REG_RDG_PIFS 0x0513
+#define REG_SIFS_CCK 0x0514
+#define REG_SIFS_OFDM 0x0516
+#define REG_SIFS_CTX 0x0514
+#define REG_SIFS_TRX 0x0516
+#define REG_TSFTR_SYN_OFFSET 0x0518
+#define REG_AGGR_BREAK_TIME 0x051A
+#define REG_SLOT 0x051B
+#define REG_TX_PTCL_CTRL 0x0520
+#define REG_TXPAUSE 0x0522
+#define REG_DIS_TXREQ_CLR 0x0523
+#define REG_RD_CTRL 0x0524
+#define REG_TBTT_PROHIBIT 0x0540
+#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
+#define REG_BCN_CTRL 0x0550
+#define REG_BCN_CTRL_1 0x0551
+#define REG_MBID_NUM 0x0552
+#define REG_DUAL_TSF_RST 0x0553
+ /* The same as REG_MBSSID_BCN_SPACE */
+#define REG_BCN_INTERVAL 0x0554
+#define REG_MBSSID_BCN_SPACE 0x0554
+#define REG_DRVERLYINT 0x0558
+#define REG_BCNDMATIM 0x0559
+#define REG_ATIMWND 0x055A
+#define REG_BCN_MAX_ERR 0x055D
+#define REG_RXTSF_OFFSET_CCK 0x055E
+#define REG_RXTSF_OFFSET_OFDM 0x055F
+#define REG_TSFTR 0x0560
+#define REG_TSFTR1 0x0568
+#define REG_INIT_TSFTR 0x0564
+#define REG_ATIMWND_1 0x0570
+#define REG_PSTIMER 0x0580
+#define REG_TIMER0 0x0584
+#define REG_TIMER1 0x0588
+#define REG_ACMHWCTRL 0x05C0
+#define REG_ACMRSTCTRL 0x05C1
+#define REG_ACMAVG 0x05C2
+#define REG_VO_ADMTIME 0x05C4
+#define REG_VI_ADMTIME 0x05C6
+#define REG_BE_ADMTIME 0x05C8
+#define REG_EDCA_RANDOM_GEN 0x05CC
+#define REG_SCH_TXCMD 0x05D0
+
+/* define REG_FW_TSF_SYNC_CNT 0x04A0 */
+#define REG_FW_RESET_TSF_CNT_1 0x05FC
+#define REG_FW_RESET_TSF_CNT_0 0x05FD
+#define REG_FW_BCN_DIS_CNT 0x05FE
+
+/* */
+/* */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* */
+/* */
+#define REG_APSD_CTRL 0x0600
+#define REG_BWOPMODE 0x0603
+#define REG_TCR 0x0604
+#define REG_RCR 0x0608
+#define REG_RX_PKT_LIMIT 0x060C
+#define REG_RX_DLK_TIME 0x060D
+#define REG_RX_DRVINFO_SZ 0x060F
+
+#define REG_MACID 0x0610
+#define REG_BSSID 0x0618
+#define REG_MAR 0x0620
+#define REG_MBIDCAMCFG 0x0628
+
+#define REG_USTIME_EDCA 0x0638
+#define REG_MAC_SPEC_SIFS 0x063A
+
+/* 20100719 Joseph: Hardware register definition change. (HW datasheet v54) */
+ /* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
+#define REG_R2T_SIFS 0x063C
+ /* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
+#define REG_T2T_SIFS 0x063E
+#define REG_ACKTO 0x0640
+#define REG_CTS2TO 0x0641
+#define REG_EIFS 0x0642
+
+/* WMA, BA, CCX */
+#define REG_NAV_CTRL 0x0650
+#define REG_BACAMCMD 0x0654
+#define REG_BACAMCONTENT 0x0658
+#define REG_LBDLY 0x0660
+#define REG_FWDLY 0x0661
+#define REG_RXERR_RPT 0x0664
+#define REG_WMAC_TRXPTCL_CTL 0x0668
+
+
+/* Security */
+#define REG_CAMCMD 0x0670
+#define REG_CAMWRITE 0x0674
+#define REG_CAMREAD 0x0678
+#define REG_CAMDBG 0x067C
+#define REG_SECCFG 0x0680
+
+/* Power */
+#define REG_WOW_CTRL 0x0690
+#define REG_PSSTATUS 0x0691
+#define REG_PS_RX_INFO 0x0692
+#define REG_LPNAV_CTRL 0x0694
+#define REG_WKFMCAM_CMD 0x0698
+#define REG_WKFMCAM_RWD 0x069C
+#define REG_RXFLTMAP0 0x06A0
+#define REG_RXFLTMAP1 0x06A2
+#define REG_RXFLTMAP2 0x06A4
+#define REG_BCN_PSR_RPT 0x06A8
+#define REG_CALB32K_CTRL 0x06AC
+#define REG_PKT_MON_CTRL 0x06B4
+#define REG_BT_COEX_TABLE 0x06C0
+#define REG_WMAC_RESP_TXINFO 0x06D8
+
+#define REG_MACID1 0x0700
+#define REG_BSSID1 0x0708
+
+
+/* */
+/* */
+/* 0xFE00h ~ 0xFE55h USB Configuration */
+/* */
+/* */
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+/* For test chip */
+#define REG_TEST_USB_TXQS 0xFE48
+#define REG_TEST_SIE_VID 0xFE60 /* 0xFE60~0xFE61 */
+#define REG_TEST_SIE_PID 0xFE62 /* 0xFE62~0xFE63 */
+#define REG_TEST_SIE_OPTIONAL 0xFE64
+#define REG_TEST_SIE_CHIRP_K 0xFE65
+#define REG_TEST_SIE_PHY 0xFE66 /* 0xFE66~0xFE6B */
+#define REG_TEST_SIE_MAC_ADDR 0xFE70 /* 0xFE70~0xFE75 */
+#define REG_TEST_SIE_STRING 0xFE80 /* 0xFE80~0xFEB9 */
+
+
+/* For normal chip */
+#define REG_NORMAL_SIE_VID 0xFE60 /* 0xFE60~0xFE61 */
+#define REG_NORMAL_SIE_PID 0xFE62 /* 0xFE62~0xFE63 */
+#define REG_NORMAL_SIE_OPTIONAL 0xFE64
+#define REG_NORMAL_SIE_EP 0xFE65 /* 0xFE65~0xFE67 */
+#define REG_NORMAL_SIE_PHY 0xFE68 /* 0xFE68~0xFE6B */
+#define REG_NORMAL_SIE_OPTIONAL2 0xFE6C
+#define REG_NORMAL_SIE_GPS_EP 0xFE6D /* RTL8723 only */
+#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 /* 0xFE70~0xFE75 */
+#define REG_NORMAL_SIE_STRING 0xFE80 /* 0xFE80~0xFEDF */
+
+
+/* */
+/* */
+/* Redifine 8192C register definition for compatibility */
+/* */
+/* */
+
+/* TODO: use these definition when using REG_xxx naming rule. */
+/* NOTE: DO NOT Remove these definition. Use later. */
+
+ /* System Isolation Interface Control. */
+#define SYS_ISO_CTRL REG_SYS_ISO_CTRL
+ /* System Function Enable. */
+#define SYS_FUNC_EN REG_SYS_FUNC_EN
+#define SYS_CLK REG_SYS_CLKR
+ /* 93C46/93C56 Command Register. */
+#define CR9346 REG_9346CR
+ /* E-Fuse Control. */
+#define EFUSE_CTRL REG_EFUSE_CTRL
+ /* E-Fuse Test. */
+#define EFUSE_TEST REG_EFUSE_TEST
+ /* Media Status register */
+#define MSR (REG_CR + 2)
+#define ISR REG_HISR
+ /* Timing Sync Function Timer Register. */
+#define TSFR REG_TSFTR
+
+ /* MAC ID Register, Offset 0x0050-0x0053 */
+#define MACIDR0 REG_MACID
+ /* MAC ID Register, Offset 0x0054-0x0055 */
+#define MACIDR4 (REG_MACID + 4)
+
+#define PBP REG_PBP
+
+ /* Redifine MACID register, to compatible prior ICs. */
+#define IDR0 MACIDR0
+#define IDR4 MACIDR4
+
+
+/* */
+/* 9. Security Control Registers (Offset: ) */
+/* */
+ /* IN 8190 Data Sheet is called CAMcmd */
+#define RWCAM REG_CAMCMD
+ /* Software write CAM input content */
+#define WCAMI REG_CAMWRITE
+ /* Software read/write CAM config */
+#define RCAMO REG_CAMREAD
+#define CAMDBG REG_CAMDBG
+ /* Security Configuration Register */
+#define SECR REG_SECCFG
+
+/* Unused register */
+#define UnusedRegister 0x1BF
+#define DCAM UnusedRegister
+#define PSR UnusedRegister
+#define BBAddr UnusedRegister
+#define PhyDataR UnusedRegister
+
+#define InvalidBBRFValue 0x12345678
+
+/* Min Spacing related settings. */
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+/* */
+/* 8192C Cmd9346CR bits (Offset 0xA, 16bit) */
+/* */
+ /* EEPROM enable when set 1 */
+#define CmdEEPROM_En BIT5
+ /* System EEPROM select, 0: boot from E-FUSE,
+ 1: The EEPROM used is 9346 */
+#define CmdEERPOMSEL BIT4
+#define Cmd9346CR_9356SEL BIT4
+#define AutoLoadEEPROM (CmdEEPROM_En|CmdEERPOMSEL)
+#define AutoLoadEFUSE CmdEEPROM_En
+
+/* */
+/* 8192C GPIO MUX Configuration Register (offset 0x40, 4 byte) */
+/* */
+#define GPIOSEL_GPIO 0
+#define GPIOSEL_ENBT BIT5
+
+/* */
+/* 8192C GPIO PIN Control Register (offset 0x44, 4 byte) */
+/* */
+ /* GPIO pins input value */
+#define GPIO_IN REG_GPIO_PIN_CTRL
+ /* GPIO pins output value */
+#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
+ /* GPIO pins output enable when a bit is set to "1";
+ otherwise, input is configured. */
+#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
+#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
+
+/* */
+/* 8192C (MSR) Media Status Register (Offset 0x4C, 8 bits) */
+/* */
+/*
+Network Type
+00: No link
+01: Link in ad hoc network
+10: Link in infrastructure network
+11: AP mode
+Default: 00b.
+*/
+#define MSR_NOLINK 0x00
+#define MSR_ADHOC 0x01
+#define MSR_INFRA 0x02
+#define MSR_AP 0x03
+
+/* */
+/* 6. Adaptive Control Registers (Offset: 0x0160 - 0x01CF) */
+/* */
+/* */
+/* 8192C Response Rate Set Register (offset 0x181, 24bits) */
+/* */
+#define RRSR_RSC_OFFSET 21
+#define RRSR_SHORT_OFFSET 23
+#define RRSR_RSC_BW_40M 0x600000
+#define RRSR_RSC_UPSUBCHNL 0x400000
+#define RRSR_RSC_LOWSUBCHNL 0x200000
+#define RRSR_SHORT 0x800000
+#define RRSR_1M BIT0
+#define RRSR_2M BIT1
+#define RRSR_5_5M BIT2
+#define RRSR_11M BIT3
+#define RRSR_6M BIT4
+#define RRSR_9M BIT5
+#define RRSR_12M BIT6
+#define RRSR_18M BIT7
+#define RRSR_24M BIT8
+#define RRSR_36M BIT9
+#define RRSR_48M BIT10
+#define RRSR_54M BIT11
+#define RRSR_MCS0 BIT12
+#define RRSR_MCS1 BIT13
+#define RRSR_MCS2 BIT14
+#define RRSR_MCS3 BIT15
+#define RRSR_MCS4 BIT16
+#define RRSR_MCS5 BIT17
+#define RRSR_MCS6 BIT18
+#define RRSR_MCS7 BIT19
+#define BRSR_AckShortPmb BIT23
+/* CCK ACK: use Short Preamble or not */
+
+/* */
+/* 8192C BW_OPMODE bits (Offset 0x203, 8bit) */
+/* */
+#define BW_OPMODE_20MHZ BIT2
+#define BW_OPMODE_5G BIT1
+#define BW_OPMODE_11J BIT0
+
+
+/* */
+/* 8192C CAM Config Setting (offset 0x250, 1 byte) */
+/* */
+#define CAM_VALID BIT15
+#define CAM_NOTVALID 0x0000
+#define CAM_USEDK BIT5
+
+#define CAM_CONTENT_COUNT 8
+
+#define CAM_NONE 0x0
+#define CAM_WEP40 0x01
+#define CAM_TKIP 0x02
+#define CAM_AES 0x04
+#define CAM_WEP104 0x05
+
+#define TOTAL_CAM_ENTRY 32
+#define HALF_CAM_ENTRY 16
+
+#define CAM_CONFIG_USEDK true
+#define CAM_CONFIG_NO_USEDK false
+
+#define CAM_WRITE BIT16
+#define CAM_READ 0x00000000
+#define CAM_POLLINIG BIT31
+
+#define SCR_UseDK 0x01
+#define SCR_TxSecEnable 0x02
+#define SCR_RxSecEnable 0x04
+
+
+/* */
+/* 12. Host Interrupt Status Registers (Offset: 0x0300 - 0x030F) */
+/* */
+/* */
+/* 8190 IMR/ISR bits (offset 0xfd, 8bits) */
+/* */
+#define IMR8190_DISABLED 0x0
+/* IMR DW0 Bit 0-31 */
+
+#define IMR_BCNDMAINT6 BIT31 /* Beacon DMA Interrupt 6 */
+#define IMR_BCNDMAINT5 BIT30 /* Beacon DMA Interrupt 5 */
+#define IMR_BCNDMAINT4 BIT29 /* Beacon DMA Interrupt 4 */
+#define IMR_BCNDMAINT3 BIT28 /* Beacon DMA Interrupt 3 */
+#define IMR_BCNDMAINT2 BIT27 /* Beacon DMA Interrupt 2 */
+#define IMR_BCNDMAINT1 BIT26 /* Beacon DMA Interrupt 1 */
+#define IMR_BCNDOK8 BIT25 /* Beacon Queue DMA OK
+ Interrupt 8 */
+#define IMR_BCNDOK7 BIT24 /* Beacon Queue DMA OK
+ Interrupt 7 */
+#define IMR_BCNDOK6 BIT23 /* Beacon Queue DMA OK
+ Interrupt 6 */
+#define IMR_BCNDOK5 BIT22 /* Beacon Queue DMA OK
+ Interrupt 5 */
+#define IMR_BCNDOK4 BIT21 /* Beacon Queue DMA OK
+ Interrupt 4 */
+#define IMR_BCNDOK3 BIT20 /* Beacon Queue DMA OK
+ Interrupt 3 */
+#define IMR_BCNDOK2 BIT19 /* Beacon Queue DMA OK
+ Interrupt 2 */
+#define IMR_BCNDOK1 BIT18 /* Beacon Queue DMA OK
+ Interrupt 1 */
+#define IMR_TIMEOUT2 BIT17 /* Timeout interrupt 2 */
+#define IMR_TIMEOUT1 BIT16 /* Timeout interrupt 1 */
+#define IMR_TXFOVW BIT15 /* Transmit FIFO Overflow */
+#define IMR_PSTIMEOUT BIT14 /* Power save time out
+ interrupt */
+#define IMR_BcnInt BIT13 /* Beacon DMA Interrupt 0 */
+#define IMR_RXFOVW BIT12 /* Receive FIFO Overflow */
+#define IMR_RDU BIT11 /* Receive Descriptor
+ Unavailable */
+#define IMR_ATIMEND BIT10 /* For 92C,ATIM Window
+ End Interrupt */
+#define IMR_BDOK BIT9 /* Beacon Queue DMA OK
+ Interrup */
+#define IMR_HIGHDOK BIT8 /* High Queue DMA OK
+ Interrupt */
+#define IMR_TBDOK BIT7 /* Transmit Beacon OK
+ interrup */
+#define IMR_MGNTDOK BIT6 /* Management Queue DMA OK
+ Interrupt */
+#define IMR_TBDER BIT5 /* For 92C,Transmit Beacon
+ Error Interrupt */
+#define IMR_BKDOK BIT4 /* AC_BK DMA OK Interrupt */
+#define IMR_BEDOK BIT3 /* AC_BE DMA OK Interrupt */
+#define IMR_VIDOK BIT2 /* AC_VI DMA OK Interrupt */
+#define IMR_VODOK BIT1 /* AC_VO DMA Interrupt */
+#define IMR_ROK BIT0 /* Receive DMA OK Interrupt */
+
+#define IMR_RX_MASK (IMR_ROK|IMR_RDU|IMR_RXFOVW)
+#define IMR_TX_MASK (IMR_VODOK|IMR_VIDOK|IMR_BEDOK| \
+ IMR_BKDOK|IMR_MGNTDOK|IMR_HIGHDOK| \
+ IMR_BDOK)
+
+/* 13. Host Interrupt Status Extension Register (Offset: 0x012C-012Eh) */
+#define IMR_BcnInt_E BIT12
+#define IMR_TXERR BIT11
+#define IMR_RXERR BIT10
+#define IMR_C2HCMD BIT9
+#define IMR_CPWM BIT8
+/* RSVD [2-7] */
+#define IMR_OCPINT BIT1
+#define IMR_WLANOFF BIT0
+
+
+/* 8192C EEPROM/EFUSE share register definition. */
+
+/* Default Value for EEPROM or EFUSE!!! */
+#define EEPROM_Default_TSSI 0x0
+#define EEPROM_Default_TxPowerDiff 0x0
+#define EEPROM_Default_CrystalCap 0x5
+ /* Default: 2X2, RTL8192CE(QFPN68) */
+#define EEPROM_Default_BoardType 0x02
+#define EEPROM_Default_TxPower 0x1010
+#define EEPROM_Default_HT2T_TxPwr 0x10
+
+#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
+#define EEPROM_Default_ThermalMeter 0x12
+
+#define EEPROM_Default_AntTxPowerDiff 0x0
+#define EEPROM_Default_TxPwDiff_CrystalCap 0x5
+#define EEPROM_Default_TxPowerLevel 0x22
+#define EEPROM_Default_HT40_2SDiff 0x0
+ /* HT20<->40 default Tx Power Index Difference */
+#define EEPROM_Default_HT20_Diff 2
+#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
+#define EEPROM_Default_HT40_PwrMaxOffset 0
+#define EEPROM_Default_HT20_PwrMaxOffset 0
+
+/* For debug */
+#define EEPROM_Default_PID 0x1234
+#define EEPROM_Default_VID 0x5678
+#define EEPROM_Default_CustomerID 0xAB
+#define EEPROM_Default_SubCustomerID 0xCD
+#define EEPROM_Default_Version 0
+
+#define EEPROM_CHANNEL_PLAN_FCC 0x0
+#define EEPROM_CHANNEL_PLAN_IC 0x1
+#define EEPROM_CHANNEL_PLAN_ETSI 0x2
+#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
+#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
+#define EEPROM_CHANNEL_PLAN_MKK 0x5
+#define EEPROM_CHANNEL_PLAN_MKK1 0x6
+#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
+#define EEPROM_CHANNEL_PLAN_TELEC 0x8
+#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
+#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
+#define EEPROM_CHANNEL_PLAN_NCC 0xB
+#define EEPROM_USB_OPTIONAL1 0xE
+#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
+
+
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_TOSHIBA 0x4
+ /* CCX test. By Bruce, 2009-02-25. */
+#define EEPROM_CID_CCX 0x10
+#define EEPROM_CID_QMI 0x0D
+ /* added by chiyoko for dtm, 20090108 */
+#define EEPROM_CID_WHQL 0xFE
+
+
+#define RTL_EEPROM_ID 0x8129
+
+#define SUPPORT_HW_RADIO_DETECT(pHalData) \
+ (pHalData->BoardType == BOARD_MINICARD || \
+ pHalData->BoardType == BOARD_USB_SOLO || \
+ pHalData->BoardType == BOARD_USB_COMBO)
+
+/* */
+/* EEPROM address for Test chip */
+/* */
+#define EEPROM_TEST_USB_OPT 0x0E
+#define EEPROM_TEST_CHIRP_K 0x0F
+#define EEPROM_TEST_EP_SETTING 0x0E
+#define EEPROM_TEST_USB_PHY 0x10
+
+
+/* */
+/* EEPROM address for Normal chip */
+/* */
+#define EEPROM_NORMAL_USB_OPT 0x0E
+#define EEPROM_NORMAL_CHIRP_K 0x0E /* Changed */
+#define EEPROM_NORMAL_EP_SETTING 0x0F /* Changed */
+#define EEPROM_NORMAL_USB_PHY 0x12 /* Changed */
+
+enum {
+ BOARD_USB_DONGLE = 0, /* USB dongle */
+ BOARD_USB_High_PA = 1, /* USB dongle with high power PA */
+ BOARD_MINICARD = 2, /* Minicard */
+ BOARD_USB_SOLO = 3, /* USB solo-Slim module */
+ BOARD_USB_COMBO = 4, /* USB Combo-Slim module */
+};
+
+/* Test chip and normal chip common define */
+/* */
+/* EEPROM address for both */
+/* */
+#define EEPROM_ID0 0x00
+#define EEPROM_ID1 0x01
+#define EEPROM_RTK_RSV1 0x02
+#define EEPROM_RTK_RSV2 0x03
+#define EEPROM_RTK_RSV3 0x04
+#define EEPROM_RTK_RSV4 0x05
+#define EEPROM_RTK_RSV5 0x06
+#define EEPROM_DBG_SEL 0x07
+#define EEPROM_RTK_RSV6 0x08
+#define EEPROM_VID 0x0A
+#define EEPROM_PID 0x0C
+
+#define EEPROM_MAC_ADDR 0x16
+#define EEPROM_STRING 0x1C
+#define EEPROM_SUBCUSTOMER_ID 0x59
+#define EEPROM_CCK_TX_PWR_INX 0x5A
+#define EEPROM_HT40_1S_TX_PWR_INX 0x60
+#define EEPROM_HT40_2S_TX_PWR_INX_DIFF 0x66
+#define EEPROM_HT20_TX_PWR_INX_DIFF 0x69
+#define EEPROM_OFDM_TX_PWR_INX_DIFF 0x6C
+#define EEPROM_HT40_MAX_PWR_OFFSET 0x6F
+#define EEPROM_HT20_MAX_PWR_OFFSET 0x72
+
+#define EEPROM_CHANNEL_PLAN 0x75
+#define EEPROM_TSSI_A 0x76
+#define EEPROM_TSSI_B 0x77
+#define EEPROM_THERMAL_METER 0x78
+#define EEPROM_RF_OPT1 0x79
+#define EEPROM_RF_OPT2 0x7A
+#define EEPROM_RF_OPT3 0x7B
+#define EEPROM_RF_OPT4 0x7C
+#define EEPROM_VERSION 0x7E
+#define EEPROM_CUSTOMER_ID 0x7F
+
+ /* 0x0: RTL8188SU, 0x1: RTL8191SU, 0x2: RTL8192SU, 0x3: RTL8191GU */
+#define EEPROM_BoardType 0x54
+ /* 0x5C-0x76, Tx Power index. */
+#define EEPROM_TxPwIndex 0x5C
+ /* Difference of gain index between legacy and high throughput OFDM. */
+#define EEPROM_PwDiff 0x67
+ /* CCK Tx Power */
+#define EEPROM_TxPowerCCK 0x5A
+
+/* 2009/02/09 Cosa Add for SD3 requirement */
+ /* HT20 Tx Power Index Difference */
+#define EEPROM_TX_PWR_HT20_DIFF 0x6e
+ /* HT20<->40 default Tx Power Index Difference */
+#define DEFAULT_HT20_TXPWR_DIFF 2
+ /* OFDM Tx Power Index Difference */
+#define EEPROM_TX_PWR_OFDM_DIFF 0x71
+
+ /* Power diff for channel group */
+#define EEPROM_TxPWRGroup 0x73
+ /* Check if power safety is need */
+#define EEPROM_Regulatory 0x79
+
+ /* 92cu, 0x7E[4] */
+#define EEPROM_BLUETOOTH_COEXIST 0x7E
+#define EEPROM_NORMAL_BoardType EEPROM_RF_OPT1 /* 7:5] */
+#define BOARD_TYPE_NORMAL_MASK 0xE0
+#define BOARD_TYPE_TEST_MASK 0x0F
+ /* BIT0 1 for build-in module, 0 for external dongle */
+#define EEPROM_EASY_REPLACEMENT 0x50
+/* */
+/* EPROM content definitions */
+/* */
+#define OS_LINK_SPEED BIT(5)
+
+#define BOARD_TYPE_MASK 0xF
+
+#define BT_COEXISTENCE BIT(4)
+#define BT_CO_SHIFT 4
+
+#define EP_NUMBER_MASK 0x30 /* bit 4:5 0Eh */
+#define EP_NUMBER_SHIFT 4
+
+
+#define USB_PHY_PARA_SIZE 5
+
+
+/* */
+/* EEPROM default value definitions */
+/* */
+/* Use 0xABCD instead of 0x8192 for debug */
+#define EEPROM_DEF_ID_0 0xCD /* Byte 0x00 */
+#define EEPROM_DEF_ID_1 0xAB /* Byte 0x01 */
+
+#define EEPROM_DEF_RTK_RSV_A3 0x74 /* Byte 0x03 */
+#define EEPROM_DEF_RTK_RSV_A4 0x6D /* Byte 0x04 */
+#define EEPROM_DEF_RTK_RSV_A8 0xFF /* Byte 0x08 */
+
+#define EEPROM_DEF_VID_0 0x0A /* Byte 0x0A */
+#define EEPROM_DEF_VID_1 0x0B
+
+#define EEPROM_DEF_PID_0 0x92 /* Byte 0x0C */
+#define EEPROM_DEF_PID_1 0x81
+
+
+#define EEPROM_TEST_DEF_USB_OPT 0x80 /* Byte 0x0E */
+#define EEPROM_NORMAL_DEF_USB_OPT 0x00 /* Byte 0x0E */
+
+#define EEPROM_DEF_CHIRPK 0x15 /* Byte 0x0F */
+
+#define EEPROM_DEF_USB_PHY_0 0x85 /* Byte 0x10 */
+#define EEPROM_DEF_USB_PHY_1 0x62 /* Byte 0x11 */
+#define EEPROM_DEF_USB_PHY_2 0x9E /* Byte 0x12 */
+#define EEPROM_DEF_USB_PHY_3 0x06 /* Byte 0x13 */
+
+#define EEPROM_DEF_TSSI_A 0x09 /* Byte 0x78 */
+#define EEPROM_DEF_TSSI_B 0x09 /* Byte 0x79 */
+
+
+#define EEPROM_DEF_THERMAL_METER 0x12 /* Byte 0x7A */
+
+ /* Check if power safety spec is need */
+#define RF_OPTION1 0x79
+#define RF_OPTION2 0x7A
+#define RF_OPTION3 0x7B
+#define RF_OPTION4 0x7C
+
+
+#define EEPROM_USB_SN BIT(0)
+#define EEPROM_USB_REMOTE_WAKEUP BIT(1)
+#define EEPROM_USB_DEVICE_PWR BIT(2)
+#define EEPROM_EP_NUMBER (BIT(3)|BIT(4))
+
+/*===================================================================
+=====================================================================
+Here the register defines are for 92C. When the define is as same with 92C,
+we will use the 92C's define for the consistency
+So the following defines for 92C is not entire!!!!!!
+=====================================================================
+=====================================================================*/
+/*
+Based on Datasheet V33---090401
+Register Summary
+Current IOREG MAP
+0x0000h ~ 0x00FFh System Configuration (256 Bytes)
+0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
+0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
+0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
+0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
+0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
+0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
+0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
+0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
+*/
+
+/* */
+/* 8192C (RCR) Receive Configuration Register (Offset 0x608, 32 bits) */
+/* */
+#define RCR_APPFCS BIT31 /* WMAC append FCS after payload*/
+#define RCR_APP_MIC BIT30
+#define RCR_APP_PHYSTS BIT28
+#define RCR_APP_ICV BIT29
+#define RCR_APP_PHYST_RXFF BIT28
+#define RCR_APP_BA_SSN BIT27 /* Accept BA SSN */
+#define RCR_ENMBID BIT24 /* Enable Multiple BssId. */
+#define RCR_LSIGEN BIT23
+#define RCR_MFBEN BIT22
+#define RCR_HTC_LOC_CTRL BIT14 /* MFC<--HTC=1 MFC-->HTC=0 */
+#define RCR_AMF BIT13 /* Accept management type frame */
+#define RCR_ACF BIT12 /* Accept control type frame */
+#define RCR_ADF BIT11 /* Accept data type frame */
+#define RCR_AICV BIT9 /* Accept ICV error packet */
+#define RCR_ACRC32 BIT8 /* Accept CRC32 error packet */
+#define RCR_CBSSID_BCN BIT7 /* Accept BSSID match packet
+ (Rx beacon, probe rsp) */
+#define RCR_CBSSID_DATA BIT6 /* Accept BSSID match packet
+ (Data) */
+#define RCR_CBSSID RCR_CBSSID_DATA /* Accept BSSID match
+ packet */
+#define RCR_APWRMGT BIT5 /* Accept power management
+ packet */
+#define RCR_ADD3 BIT4 /* Accept address 3 match
+ packet */
+#define RCR_AB BIT3 /* Accept broadcast packet */
+#define RCR_AM BIT2 /* Accept multicast packet */
+#define RCR_APM BIT1 /* Accept physical match packet */
+#define RCR_AAP BIT0 /* Accept all unicast packet */
+#define RCR_MXDMA_OFFSET 8
+#define RCR_FIFO_OFFSET 13
+
+
+
+/* */
+/* 8192c USB specific Regsiter Offset and Content definition, */
+/* 2009.08.18, added by vivi. for merge 92c and 92C into one driver */
+/* */
+/* define APS_FSMCO 0x0004 same with 92Ce */
+#define RSV_CTRL 0x001C
+#define RD_CTRL 0x0524
+
+/* */
+/* */
+/* 0xFE00h ~ 0xFE55h USB Configuration */
+/* */
+/* */
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_USB_VID 0xFE60
+#define REG_USB_PID 0xFE62
+#define REG_USB_OPTIONAL 0xFE64
+#define REG_USB_CHIRP_K 0xFE65
+#define REG_USB_PHY 0xFE66
+#define REG_USB_MAC_ADDR 0xFE70
+
+#define REG_USB_HRPWM 0xFE58
+#define REG_USB_HCPWM 0xFE57
+
+#define InvalidBBRFValue 0x12345678
+
+/* */
+/* 8192C Regsiter Bit and Content definition */
+/* */
+/* */
+/* */
+/* 0x0000h ~ 0x00FFh System Configuration */
+/* */
+/* */
+
+/* 2 SPS0_CTRL */
+#define SW18_FPWM BIT(3)
+
+
+/* 2 SYS_ISO_CTRL */
+#define ISO_MD2PP BIT(0)
+#define ISO_UA2USB BIT(1)
+#define ISO_UD2CORE BIT(2)
+#define ISO_PA2PCIE BIT(3)
+#define ISO_PD2CORE BIT(4)
+#define ISO_IP2MAC BIT(5)
+#define ISO_DIOP BIT(6)
+#define ISO_DIOE BIT(7)
+#define ISO_EB2CORE BIT(8)
+#define ISO_DIOR BIT(9)
+
+#define PWC_EV25V BIT(14)
+#define PWC_EV12V BIT(15)
+
+
+/* 2 SYS_FUNC_EN */
+#define FEN_BBRSTB BIT(0)
+#define FEN_BB_GLB_RSTn BIT(1)
+#define FEN_USBA BIT(2)
+#define FEN_UPLL BIT(3)
+#define FEN_USBD BIT(4)
+#define FEN_DIO_PCIE BIT(5)
+#define FEN_PCIEA BIT(6)
+#define FEN_PPLL BIT(7)
+#define FEN_PCIED BIT(8)
+#define FEN_DIOE BIT(9)
+#define FEN_CPUEN BIT(10)
+#define FEN_DCORE BIT(11)
+#define FEN_ELDR BIT(12)
+#define FEN_DIO_RF BIT(13)
+#define FEN_HWPDN BIT(14)
+#define FEN_MREGEN BIT(15)
+
+/* 2 APS_FSMCO */
+#define PFM_LDALL BIT(0)
+#define PFM_ALDN BIT(1)
+#define PFM_LDKP BIT(2)
+#define PFM_WOWL BIT(3)
+#define EnPDN BIT(4)
+#define PDN_PL BIT(5)
+#define APFM_ONMAC BIT(8)
+#define APFM_OFF BIT(9)
+#define APFM_RSM BIT(10)
+#define AFSM_HSUS BIT(11)
+#define AFSM_PCIE BIT(12)
+#define APDM_MAC BIT(13)
+#define APDM_HOST BIT(14)
+#define APDM_HPDN BIT(15)
+#define RDY_MACON BIT(16)
+#define SUS_HOST BIT(17)
+#define ROP_ALD BIT(20)
+#define ROP_PWR BIT(21)
+#define ROP_SPS BIT(22)
+#define SOP_MRST BIT(25)
+#define SOP_FUSE BIT(26)
+#define SOP_ABG BIT(27)
+#define SOP_AMB BIT(28)
+#define SOP_RCK BIT(29)
+#define SOP_A8M BIT(30)
+#define XOP_BTCK BIT(31)
+
+/* 2 SYS_CLKR */
+#define ANAD16V_EN BIT(0)
+#define ANA8M BIT(1)
+#define MACSLP BIT(4)
+#define LOADER_CLK_EN BIT(5)
+#define _80M_SSC_DIS BIT(7)
+#define _80M_SSC_EN_HO BIT(8)
+#define PHY_SSC_RSTB BIT(9)
+#define SEC_CLK_EN BIT(10)
+#define MAC_CLK_EN BIT(11)
+#define SYS_CLK_EN BIT(12)
+#define RING_CLK_EN BIT(13)
+
+
+/* 2 9346CR */
+
+
+#define EEDO BIT(0)
+#define EEDI BIT(1)
+#define EESK BIT(2)
+#define EECS BIT(3)
+/* define EERPROMSEL BIT(4) */
+/* define EEPROM_EN BIT(5) */
+#define BOOT_FROM_EEPROM BIT(4)
+#define EEPROM_EN BIT(5)
+#define EEM0 BIT(6)
+#define EEM1 BIT(7)
+
+
+/* 2 AFE_MISC */
+#define AFE_BGEN BIT(0)
+#define AFE_MBEN BIT(1)
+#define MAC_ID_EN BIT(7)
+
+
+/* 2 SPS0_CTRL */
+
+
+/* 2 SPS_OCP_CFG */
+
+
+/* 2 RSV_CTRL */
+#define WLOCK_ALL BIT(0)
+#define WLOCK_00 BIT(1)
+#define WLOCK_04 BIT(2)
+#define WLOCK_08 BIT(3)
+#define WLOCK_40 BIT(4)
+#define R_DIS_PRST_0 BIT(5)
+#define R_DIS_PRST_1 BIT(6)
+#define LOCK_ALL_EN BIT(7)
+
+/* 2 RF_CTRL */
+#define RF_EN BIT(0)
+#define RF_RSTB BIT(1)
+#define RF_SDMRSTB BIT(2)
+
+
+
+/* 2 LDOA15_CTRL */
+#define LDA15_EN BIT(0)
+#define LDA15_STBY BIT(1)
+#define LDA15_OBUF BIT(2)
+#define LDA15_REG_VOS BIT(3)
+#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
+
+
+
+/* 2 LDOV12D_CTRL */
+#define LDV12_EN BIT(0)
+#define LDV12_SDBY BIT(1)
+#define LPLDO_HSM BIT(2)
+#define LPLDO_LSM_DIS BIT(3)
+#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
+
+
+/* 2 AFE_XTAL_CTRL */
+#define XTAL_EN BIT(0)
+#define XTAL_BSEL BIT(1)
+#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
+#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
+#define XTAL_GATE_USB BIT(8)
+#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
+#define XTAL_GATE_AFE BIT(11)
+#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
+#define XTAL_RF_GATE BIT(14)
+#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
+#define XTAL_GATE_DIG BIT(17)
+#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
+#define XTAL_BT_GATE BIT(20)
+#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
+#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
+
+
+#define CKDLY_AFE BIT(26)
+#define CKDLY_USB BIT(27)
+#define CKDLY_DIG BIT(28)
+#define CKDLY_BT BIT(29)
+
+
+/* 2 AFE_PLL_CTRL */
+#define APLL_EN BIT(0)
+#define APLL_320_EN BIT(1)
+#define APLL_FREF_SEL BIT(2)
+#define APLL_EDGE_SEL BIT(3)
+#define APLL_WDOGB BIT(4)
+#define APLL_LPFEN BIT(5)
+
+#define APLL_REF_CLK_13MHZ 0x1
+#define APLL_REF_CLK_19_2MHZ 0x2
+#define APLL_REF_CLK_20MHZ 0x3
+#define APLL_REF_CLK_25MHZ 0x4
+#define APLL_REF_CLK_26MHZ 0x5
+#define APLL_REF_CLK_38_4MHZ 0x6
+#define APLL_REF_CLK_40MHZ 0x7
+
+#define APLL_320EN BIT(14)
+#define APLL_80EN BIT(15)
+#define APLL_1MEN BIT(24)
+
+
+/* 2 EFUSE_CTRL */
+#define ALD_EN BIT(18)
+#define EF_PD BIT(19)
+#define EF_FLAG BIT(31)
+
+/* 2 EFUSE_TEST (For RTL8723 partially) */
+#define EF_TRPT BIT(7)
+ /* 00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */
+#define EF_CELL_SEL (BIT(8)|BIT(9))
+#define LDOE25_EN BIT(31)
+#define EFUSE_SEL(x) (((x) & 0x3) << 8)
+#define EFUSE_SEL_MASK 0x300
+#define EFUSE_WIFI_SEL_0 0x0
+#define EFUSE_BT_SEL_0 0x1
+#define EFUSE_BT_SEL_1 0x2
+#define EFUSE_BT_SEL_2 0x3
+
+#define EFUSE_ACCESS_ON 0x69 /* For RTL8723 only. */
+#define EFUSE_ACCESS_OFF 0x00 /* For RTL8723 only. */
+
+/* 2 PWR_DATA */
+
+/* 2 CAL_TIMER */
+
+/* 2 ACLK_MON */
+#define RSM_EN BIT(0)
+#define Timer_EN BIT(4)
+
+
+/* 2 GPIO_MUXCFG */
+#define TRSW0EN BIT(2)
+#define TRSW1EN BIT(3)
+#define EROM_EN BIT(4)
+#define EnBT BIT(5)
+#define EnUart BIT(8)
+#define Uart_910 BIT(9)
+#define EnPMAC BIT(10)
+#define SIC_SWRST BIT(11)
+#define EnSIC BIT(12)
+#define SIC_23 BIT(13)
+#define EnHDP BIT(14)
+#define SIC_LBK BIT(15)
+
+/* 2 GPIO_PIN_CTRL */
+
+/* GPIO BIT */
+#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
+
+/* 2 GPIO_INTM */
+
+/* 2 LEDCFG */
+#define LED0PL BIT(4)
+#define LED0DIS BIT(7)
+#define LED1DIS BIT(15)
+#define LED1PL BIT(12)
+
+#define SECCAM_CLR BIT(30)
+
+
+/* 2 FSIMR */
+
+/* 2 FSISR */
+
+
+/* 2 8051FWDL */
+/* 2 MCUFWDL */
+#define MCUFWDL_EN BIT(0)
+#define MCUFWDL_RDY BIT(1)
+#define FWDL_ChkSum_rpt BIT(2)
+#define MACINI_RDY BIT(3)
+#define BBINI_RDY BIT(4)
+#define RFINI_RDY BIT(5)
+#define WINTINI_RDY BIT(6)
+#define CPRST BIT(23)
+
+/* 2REG_HPON_FSM */
+#define BOND92CE_1T2R_CFG BIT(22)
+
+
+/* 2 REG_SYS_CFG */
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define TRP_B15V_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define CHIP_VER (BIT(12)|BIT(13)|BIT(14)|BIT(15))
+#define BT_FUNC BIT(16)
+#define VENDOR_ID BIT(19)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23)
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+#define CHIP_VER_RTL_MASK 0xF000 /* Bit 12 ~ 15 */
+#define CHIP_VER_RTL_SHIFT 12
+
+/* 2REG_GPIO_OUTSTS (For RTL8723 only) */
+#define EFS_HCI_SEL (BIT(0)|BIT(1))
+#define PAD_HCI_SEL (BIT(2)|BIT(3))
+#define HCI_SEL (BIT(4)|BIT(5))
+#define PKG_SEL_HCI BIT(6)
+#define FEN_GPS BIT(7)
+#define FEN_BT BIT(8)
+#define FEN_WL BIT(9)
+#define FEN_PCI BIT(10)
+#define FEN_USB BIT(11)
+#define BTRF_HWPDN_N BIT(12)
+#define WLRF_HWPDN_N BIT(13)
+#define PDN_BT_N BIT(14)
+#define PDN_GPS_N BIT(15)
+#define BT_CTL_HWPDN BIT(16)
+#define GPS_CTL_HWPDN BIT(17)
+#define PPHY_SUSB BIT(20)
+#define UPHY_SUSB BIT(21)
+#define PCI_SUSEN BIT(22)
+#define USB_SUSEN BIT(23)
+#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
+
+/* */
+/* */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* */
+/* */
+
+
+/* 2 Function Enable Registers */
+/* 2 CR */
+
+#define REG_LBMODE (REG_CR + 3)
+
+
+#define HCI_TXDMA_EN BIT(0)
+#define HCI_RXDMA_EN BIT(1)
+#define TXDMA_EN BIT(2)
+#define RXDMA_EN BIT(3)
+#define PROTOCOL_EN BIT(4)
+#define SCHEDULE_EN BIT(5)
+#define MACTXEN BIT(6)
+#define MACRXEN BIT(7)
+#define ENSWBCN BIT(8)
+#define ENSEC BIT(9)
+
+/* Network type */
+#define _NETTYPE(x) (((x) & 0x3) << 16)
+#define MASK_NETTYPE 0x30000
+#define NT_NO_LINK 0x0
+#define NT_LINK_AD_HOC 0x1
+#define NT_LINK_AP 0x2
+#define NT_AS_AP 0x3
+
+#define _LBMODE(x) (((x) & 0xF) << 24)
+#define MASK_LBMODE 0xF000000
+#define LOOPBACK_NORMAL 0x0
+#define LOOPBACK_IMMEDIATELY 0xB
+#define LOOPBACK_MAC_DELAY 0x3
+#define LOOPBACK_PHY 0x1
+#define LOOPBACK_DMA 0x7
+
+
+/* 2 PBP - Page Size Register */
+#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
+#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
+#define _PSRX_MASK 0xF
+#define _PSTX_MASK 0xF0
+#define _PSRX(x) (x)
+#define _PSTX(x) ((x) << 4)
+
+#define PBP_64 0x0
+#define PBP_128 0x1
+#define PBP_256 0x2
+#define PBP_512 0x3
+#define PBP_1024 0x4
+
+
+/* 2 TX/RXDMA */
+#define RXDMA_ARBBW_EN BIT(0)
+#define RXSHFT_EN BIT(1)
+#define RXDMA_AGG_EN BIT(2)
+#define QS_VO_QUEUE BIT(8)
+#define QS_VI_QUEUE BIT(9)
+#define QS_BE_QUEUE BIT(10)
+#define QS_BK_QUEUE BIT(11)
+#define QS_MANAGER_QUEUE BIT(12)
+#define QS_HIGH_QUEUE BIT(13)
+
+#define HQSEL_VOQ BIT(0)
+#define HQSEL_VIQ BIT(1)
+#define HQSEL_BEQ BIT(2)
+#define HQSEL_BKQ BIT(3)
+#define HQSEL_MGTQ BIT(4)
+#define HQSEL_HIQ BIT(5)
+
+/* For normal driver, 0x10C */
+#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
+#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
+#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
+#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8 )
+#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6 )
+#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4 )
+
+#define QUEUE_LOW 1
+#define QUEUE_NORMAL 2
+#define QUEUE_HIGH 3
+
+
+
+/* 2 TRXFF_BNDY */
+
+
+/* 2 LLT_INIT */
+#define _LLT_NO_ACTIVE 0x0
+#define _LLT_WRITE_ACCESS 0x1
+#define _LLT_READ_ACCESS 0x2
+
+#define _LLT_INIT_DATA(x) ((x) & 0xFF)
+#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
+#define _LLT_OP(x) (((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
+
+
+/* 2 BB_ACCESS_CTRL */
+#define BB_WRITE_READ_MASK (BIT(31) | BIT(30))
+#define BB_WRITE_EN BIT(30)
+#define BB_READ_EN BIT(31)
+/* define BB_ADDR_MASK 0xFFF */
+/* define _BB_ADDR(x) ((x) & BB_ADDR_MASK) */
+
+/* */
+/* */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* */
+/* */
+/* 2 RQPN */
+#define _HPQ(x) ((x) & 0xFF)
+#define _LPQ(x) (((x) & 0xFF) << 8)
+#define _PUBQ(x) (((x) & 0xFF) << 16)
+ /* NOTE: in RQPN_NPQ register */
+#define _NPQ(x) ((x) & 0xFF)
+
+
+#define HPQ_PUBLIC_DIS BIT(24)
+#define LPQ_PUBLIC_DIS BIT(25)
+#define LD_RQPN BIT(31)
+
+
+/* 2 TDECTRL */
+#define BCN_VALID BIT(16)
+#define BCN_HEAD(x) (((x) & 0xFF) << 8)
+#define BCN_HEAD_MASK 0xFF00
+
+/* 2 TDECTL */
+#define BLK_DESC_NUM_SHIFT 4
+#define BLK_DESC_NUM_MASK 0xF
+
+
+/* 2 TXDMA_OFFSET_CHK */
+#define DROP_DATA_EN BIT(9)
+
+/* */
+/* */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* */
+/* */
+/* 2 FWHW_TXQ_CTRL */
+#define EN_AMPDU_RTY_NEW BIT(7)
+
+/* 2 INIRTSMCS_SEL */
+#define _INIRTSMCS_SEL(x) ((x) & 0x3F)
+
+
+/* 2 SPEC SIFS */
+#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
+#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
+
+
+/* 2 RRSR */
+
+#define RATE_REG_BITMAP_ALL 0xFFFFF
+
+#define _RRSC_BITMAP(x) ((x) & 0xFFFFF)
+
+#define _RRSR_RSC(x) (((x) & 0x3) << 21)
+#define RRSR_RSC_RESERVED 0x0
+#define RRSR_RSC_UPPER_SUBCHANNEL 0x1
+#define RRSR_RSC_LOWER_SUBCHANNEL 0x2
+#define RRSR_RSC_DUPLICATE_MODE 0x3
+
+
+/* 2 ARFR */
+#define USE_SHORT_G1 BIT(20)
+
+/* 2 AGGLEN_LMT_L */
+#define _AGGLMT_MCS0(x) ((x) & 0xF)
+#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4)
+#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8)
+#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12)
+#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16)
+#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20)
+#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24)
+#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28)
+
+
+/* 2 RL */
+#define RETRY_LIMIT_SHORT_SHIFT 8
+#define RETRY_LIMIT_LONG_SHIFT 0
+
+
+/* 2 DARFRC */
+#define _DARF_RC1(x) ((x) & 0x1F)
+#define _DARF_RC2(x) (((x) & 0x1F) << 8)
+#define _DARF_RC3(x) (((x) & 0x1F) << 16)
+#define _DARF_RC4(x) (((x) & 0x1F) << 24)
+/* NOTE: shift starting from address (DARFRC + 4) */
+#define _DARF_RC5(x) ((x) & 0x1F)
+#define _DARF_RC6(x) (((x) & 0x1F) << 8)
+#define _DARF_RC7(x) (((x) & 0x1F) << 16)
+#define _DARF_RC8(x) (((x) & 0x1F) << 24)
+
+
+/* 2 RARFRC */
+#define _RARF_RC1(x) ((x) & 0x1F)
+#define _RARF_RC2(x) (((x) & 0x1F) << 8)
+#define _RARF_RC3(x) (((x) & 0x1F) << 16)
+#define _RARF_RC4(x) (((x) & 0x1F) << 24)
+/* NOTE: shift starting from address (RARFRC + 4) */
+#define _RARF_RC5(x) ((x) & 0x1F)
+#define _RARF_RC6(x) (((x) & 0x1F) << 8)
+#define _RARF_RC7(x) (((x) & 0x1F) << 16)
+#define _RARF_RC8(x) (((x) & 0x1F) << 24)
+
+
+/* */
+/* */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* */
+/* */
+
+
+
+/* 2 EDCA setting */
+#define AC_PARAM_TXOP_LIMIT_OFFSET 16
+#define AC_PARAM_ECW_MAX_OFFSET 12
+#define AC_PARAM_ECW_MIN_OFFSET 8
+#define AC_PARAM_AIFS_OFFSET 0
+
+
+/* 2 EDCA_VO_PARAM */
+#define _AIFS(x) (x)
+#define _ECW_MAX_MIN(x) ((x) << 8)
+#define _TXOP_LIMIT(x) ((x) << 16)
+
+
+#define _BCNIFS(x) ((x) & 0xFF)
+#define _BCNECW(x) (((x) & 0xF))<< 8)
+
+
+#define _LRL(x) ((x) & 0x3F)
+#define _SRL(x) (((x) & 0x3F) << 8)
+
+
+/* 2 SIFS_CCK */
+#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
+#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8);
+
+
+/* 2 SIFS_OFDM */
+#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
+#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8);
+
+
+/* 2 TBTT PROHIBIT */
+#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
+
+
+/* 2 REG_RD_CTRL */
+#define DIS_EDCA_CNT_DWN BIT(11)
+
+
+/* 2 BCN_CTRL */
+#define EN_MBSSID BIT(1)
+#define EN_TXBCN_RPT BIT(2)
+#define EN_BCN_FUNCTION BIT(3)
+#define DIS_TSF_UPDATE BIT(3)
+
+/* The same function but different bit field. */
+#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
+#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
+
+/* 2 ACMHWCTRL */
+#define AcmHw_HwEn BIT(0)
+#define AcmHw_BeqEn BIT(1)
+#define AcmHw_ViqEn BIT(2)
+#define AcmHw_VoqEn BIT(3)
+#define AcmHw_BeqStatus BIT(4)
+#define AcmHw_ViqStatus BIT(5)
+#define AcmHw_VoqStatus BIT(6)
+
+
+
+/* */
+/* */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* */
+/* */
+
+/* 2 APSD_CTRL */
+#define APSDOFF BIT(6)
+#define APSDOFF_STATUS BIT(7)
+
+
+/* 2 BWOPMODE */
+#define BW_20MHZ BIT(2)
+
+
+#define RATE_BITMAP_ALL 0xFFFFF
+
+/* Only use CCK 1M rate for ACK */
+#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
+
+/* 2 TCR */
+#define TSFRST BIT(0)
+#define DIS_GCLK BIT(1)
+#define PAD_SEL BIT(2)
+#define PWR_ST BIT(6)
+#define PWRBIT_OW_EN BIT(7)
+#define ACRC BIT(8)
+#define CFENDFORM BIT(9)
+#define ICV BIT(10)
+
+
+
+/* 2 RCR */
+#define AAP BIT(0)
+#define APM BIT(1)
+#define AM BIT(2)
+#define AB BIT(3)
+#define ADD3 BIT(4)
+#define APWRMGT BIT(5)
+#define CBSSID BIT(6)
+#define CBSSID_BCN BIT(7)
+#define ACRC32 BIT(8)
+#define AICV BIT(9)
+#define ADF BIT(11)
+#define ACF BIT(12)
+#define AMF BIT(13)
+#define HTC_LOC_CTRL BIT(14)
+#define UC_DATA_EN BIT(16)
+#define BM_DATA_EN BIT(17)
+#define MFBEN BIT(22)
+#define LSIGEN BIT(23)
+#define EnMBID BIT(24)
+#define APP_BASSN BIT(27)
+#define APP_PHYSTS BIT(28)
+#define APP_ICV BIT(29)
+#define APP_MIC BIT(30)
+#define APP_FCS BIT(31)
+
+/* 2 RX_PKT_LIMIT */
+
+/* 2 RX_DLK_TIME */
+
+/* 2 MBIDCAMCFG */
+
+
+
+/* 2 AMPDU_MIN_SPACE */
+#define _MIN_SPACE(x) ((x) & 0x7)
+#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3)
+
+
+/* 2 RXERR_RPT */
+#define RXERR_TYPE_OFDM_PPDU 0
+#define RXERR_TYPE_OFDMfalse_ALARM 1
+#define RXERR_TYPE_OFDM_MPDU_OK 2
+#define RXERR_TYPE_OFDM_MPDU_FAIL 3
+#define RXERR_TYPE_CCK_PPDU 4
+#define RXERR_TYPE_CCKfalse_ALARM 5
+#define RXERR_TYPE_CCK_MPDU_OK 6
+#define RXERR_TYPE_CCK_MPDU_FAIL 7
+#define RXERR_TYPE_HT_PPDU 8
+#define RXERR_TYPE_HTfalse_ALARM 9
+#define RXERR_TYPE_HT_MPDU_TOTAL 10
+#define RXERR_TYPE_HT_MPDU_OK 11
+#define RXERR_TYPE_HT_MPDU_FAIL 12
+#define RXERR_TYPE_RX_FULL_DROP 15
+
+#define RXERR_COUNTER_MASK 0xFFFFF
+#define RXERR_RPT_RST BIT(27)
+#define _RXERR_RPT_SEL(type) ((type) << 28)
+
+
+/* 2 SECCFG */
+#define SCR_TxUseDK BIT(0) /* Force Tx Use Default Key */
+#define SCR_RxUseDK BIT(1) /* Force Rx Use Default Key */
+#define SCR_TxEncEnable BIT(2) /* Enable Tx Encryption */
+#define SCR_RxDecEnable BIT(3) /* Enable Rx Decryption */
+#define SCR_SKByA2 BIT(4) /* Search kEY BY A2 */
+#define SCR_NoSKMC BIT(5) /* No Key Search Multicast */
+
+
+
+/* */
+/* */
+/* 0xFE00h ~ 0xFE55h USB Configuration */
+/* */
+/* */
+
+/* 2 USB Information (0xFE17) */
+#define USB_IS_HIGH_SPEED 0
+#define USB_IS_FULL_SPEED 1
+#define USB_SPEED_MASK BIT(5)
+
+#define USB_NORMAL_SIE_EP_MASK 0xF
+#define USB_NORMAL_SIE_EP_SHIFT 4
+
+#define USB_TEST_EP_MASK 0x30
+#define USB_TEST_EP_SHIFT 4
+
+/* 2 Special Option */
+#define USB_AGG_EN BIT(3)
+
+
+/* 2REG_C2HEVT_CLEAR */
+ /* Set by driver and notify FW that the driver has read the
+ C2H command message */
+#define C2H_EVT_HOST_CLOSE 0x00
+ /* Set by FW indicating that FW had set the C2H command message
+ and it's not yet read by driver. */
+#define C2H_EVT_FW_CLOSE 0xFF
+
+
+/* 2REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
+ /* Enable GPIO[9] as WiFi HW PDn source */
+#define WL_HWPDN_EN BIT0
+ /* WiFi HW PDn polarity control */
+#define WL_HWPDN_SL BIT1
+ /* WiFi function enable */
+#define WL_FUNC_EN BIT2
+ /* Enable GPIO[9] as WiFi RF HW PDn source */
+#define WL_HWROF_EN BIT3
+ /* Enable GPIO[11] as BT HW PDn source */
+#define BT_HWPDN_EN BIT16
+ /* BT HW PDn polarity control */
+#define BT_HWPDN_SL BIT17
+ /* BT function enable */
+#define BT_FUNC_EN BIT18
+ /* Enable GPIO[11] as BT/GPS RF HW PDn source */
+#define BT_HWROF_EN BIT19
+ /* Enable GPIO[10] as GPS HW PDn source */
+#define GPS_HWPDN_EN BIT20
+ /* GPS HW PDn polarity control */
+#define GPS_HWPDN_SL BIT21
+ /* GPS function enable */
+#define GPS_FUNC_EN BIT22
+
+/* 3 REG_LIFECTRL_CTRL */
+#define HAL92C_EN_PKT_LIFE_TIME_BK BIT3
+#define HAL92C_EN_PKT_LIFE_TIME_BE BIT2
+#define HAL92C_EN_PKT_LIFE_TIME_VI BIT1
+#define HAL92C_EN_PKT_LIFE_TIME_VO BIT0
+
+#define HAL92C_MSDU_LIFE_TIME_UNIT 128 /* in us, said by Tim. */
+
+/* */
+/* General definitions */
+/* */
+
+#define LAST_ENTRY_OF_TX_PKT_BUFFER 255
+
+#define POLLING_LLT_THRESHOLD 20
+#define POLLING_READY_TIMEOUT_COUNT 1000
+
+/* Min Spacing related settings. */
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+/* */
+/* 8723A Regsiter offset definition */
+/* */
+#define HAL_8723A_NAV_UPPER_UNIT 128 /* micro-second */
+
+/* */
+/* */
+/* 0x0000h ~ 0x00FFh System Configuration */
+/* */
+/* */
+#define REG_SYSON_REG_LOCK 0x001C
+
+
+/* */
+/* */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* */
+/* */
+#define REG_FTIMR 0x0138
+
+
+/* */
+/* */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* */
+/* */
+
+
+/* */
+/* */
+/* 0x0280h ~ 0x02FFh RXDMA Configuration */
+/* */
+/* */
+
+
+/* */
+/* */
+/* 0x0300h ~ 0x03FFh PCIe */
+/* */
+/* */
+
+
+/* */
+/* */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* */
+/* */
+#define REG_EARLY_MODE_CONTROL 0x4D0
+
+
+/* */
+/* */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* */
+/* */
+
+/* 2 BCN_CTRL */
+#define DIS_ATIM BIT(0)
+#define DIS_BCNQ_SUB BIT(1)
+#define DIS_TSF_UDT BIT(4)
+
+
+/* */
+/* */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* */
+/* */
+/* */
+/* Note: */
+/* The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
+ * The default value is always too small, but the WiFi TestPlan test
+ * by 25,000 microseconds of NAV through sending CTS in the air. We
+ * must update this value greater than 25,000 microseconds to pass the
+ * item.
+* The offset of NAV_UPPER in 8192C Spec is incorrect, and the offset
+* should be 0x0652. Commented by SD1 Scott. */
+/* By Bruce, 2011-07-18. */
+/* */
+#define REG_NAV_UPPER 0x0652 /* unit of 128 */
+
+
+/* */
+/* 8723 Regsiter Bit and Content definition */
+/* */
+
+/* */
+/* */
+/* 0x0000h ~ 0x00FFh System Configuration */
+/* */
+/* */
+
+/* 2 SPS0_CTRL */
+
+/* 2 SYS_ISO_CTRL */
+
+/* 2 SYS_FUNC_EN */
+
+/* 2 APS_FSMCO */
+#define EN_WLON BIT(16)
+
+/* 2 SYS_CLKR */
+
+/* 2 9346CR */
+
+/* 2 AFE_MISC */
+
+/* 2 SPS0_CTRL */
+
+/* 2 SPS_OCP_CFG */
+
+/* 2 SYSON_REG_LOCK */
+#define WLOCK_ALL BIT(0)
+#define WLOCK_00 BIT(1)
+#define WLOCK_04 BIT(2)
+#define WLOCK_08 BIT(3)
+#define WLOCK_40 BIT(4)
+#define WLOCK_1C_B6 BIT(5)
+#define R_DIS_PRST_1 BIT(6)
+#define LOCK_ALL_EN BIT(7)
+
+/* 2 RF_CTRL */
+
+/* 2 LDOA15_CTRL */
+
+/* 2 LDOV12D_CTRL */
+
+/* 2 AFE_XTAL_CTRL */
+
+/* 2 AFE_PLL_CTRL */
+
+/* 2 EFUSE_CTRL */
+
+/* 2 EFUSE_TEST (For RTL8723 partially) */
+
+/* 2 PWR_DATA */
+
+/* 2 CAL_TIMER */
+
+/* 2 ACLK_MON */
+
+/* 2 GPIO_MUXCFG */
+
+/* 2 GPIO_PIN_CTRL */
+
+/* 2 GPIO_INTM */
+
+/* 2 LEDCFG */
+
+/* 2 FSIMR */
+
+/* 2 FSISR */
+
+/* 2 HSIMR */
+/* 8723 Host System Interrupt Mask Register (offset 0x58, 32 byte) */
+#define HSIMR_GPIO12_0_INT_EN BIT(0)
+#define HSIMR_SPS_OCP_INT_EN BIT(5)
+#define HSIMR_RON_INT_EN BIT(6)
+#define HSIMR_PDNINT_EN BIT(7)
+#define HSIMR_GPIO9_INT_EN BIT(25)
+
+/* 2 HSISR */
+/* 8723 Host System Interrupt Status Register (offset 0x5C, 32 byte) */
+#define HSISR_GPIO12_0_INT BIT(0)
+#define HSISR_SPS_OCP_INT BIT(5)
+#define HSISR_RON_INT BIT(6)
+#define HSISR_PDNINT BIT(7)
+#define HSISR_GPIO9_INT BIT(25)
+
+/* interrupt mask which needs to clear */
+#define MASK_HSISR_CLEAR (HSISR_GPIO12_0_INT | \
+ HSISR_SPS_OCP_INT | \
+ HSISR_RON_INT | \
+ HSISR_PDNINT | \
+ HSISR_GPIO9_INT)
+
+/* 2 MCUFWDL */
+#define RAM_DL_SEL BIT7 /* 1:RAM, 0:ROM */
+
+/* 2 HPON_FSM */
+
+/* 2 SYS_CFG */
+#define RTL_ID BIT(23) /* TestChip ID,
+ 1:Test(RLE); 0:MP(RL) */
+#define SPS_SEL BIT(24) /* 1:LDO regulator mode;
+ 0:Switching regulator mode*/
+
+
+/* */
+/* */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* */
+/* */
+
+/* 2 Function Enable Registers */
+
+/* 2 CR */
+#define CALTMR_EN BIT(10)
+
+/* 2 PBP - Page Size Register */
+
+/* 2 TX/RXDMA */
+
+/* 2 TRXFF_BNDY */
+
+/* 2 LLT_INIT */
+
+/* 2 BB_ACCESS_CTRL */
+
+
+/* */
+/* */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* */
+/* */
+
+/* 2 RQPN */
+
+/* 2 TDECTRL */
+
+/* 2 TDECTL */
+
+/* 2 TXDMA_OFFSET_CHK */
+
+
+/* */
+/* */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* */
+/* */
+
+/* 2 FWHW_TXQ_CTRL */
+
+/* 2 INIRTSMCS_SEL */
+
+/* 2 SPEC SIFS */
+
+/* 2 RRSR */
+
+/* 2 ARFR */
+
+/* 2 AGGLEN_LMT_L */
+
+/* 2 RL */
+
+/* 2 DARFRC */
+
+/* 2 RARFRC */
+
+
+/* */
+/* */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* */
+/* */
+
+/* 2 EDCA setting */
+
+/* 2 EDCA_VO_PARAM */
+
+/* 2 SIFS_CCK */
+
+/* 2 SIFS_OFDM */
+
+/* 2 TBTT PROHIBIT */
+
+/* 2 REG_RD_CTRL */
+
+/* 2 BCN_CTRL */
+
+/* 2 ACMHWCTRL */
+
+
+/* */
+/* */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* */
+/* */
+
+/* 2 APSD_CTRL */
+
+/* 2 BWOPMODE */
+
+/* 2 TCR */
+
+/* 2 RCR */
+
+/* 2 RX_PKT_LIMIT */
+
+/* 2 RX_DLK_TIME */
+
+/* 2 MBIDCAMCFG */
+
+/* 2 AMPDU_MIN_SPACE */
+
+/* 2 RXERR_RPT */
+
+/* 2 SECCFG */
+
+
+/* */
+/* */
+/* 0xFE00h ~ 0xFE55h RTL8723 SDIO Configuration */
+/* */
+/* */
+
+/* I/O bus domain address mapping */
+#define WLAN_IOREG_BASE 0x10260000
+#define FIRMWARE_FIFO_BASE 0x10270000
+#define TX_HIQ_BASE 0x10310000
+#define TX_MIQ_BASE 0x10320000
+#define TX_LOQ_BASE 0x10330000
+#define RX_RX0FF_BASE 0x10340000
+
+/* SDIO host local register space mapping. */
+#define WLAN_IOREG_MSK 0x7FFF
+#define WLAN_FIFO_MSK 0x1FFF /* Aggregation Length[12:0] */
+#define WLAN_RX0FF_MSK 0x0003
+
+#define WLAN_RX0FF_DEVICE_ID 7 /* 0b[16], 111b[15:13] */
+#define WLAN_IOREG_DEVICE_ID 8 /* 1b[16] */
+
+/* 8723 EFUSE */
+#define HWSET_MAX_SIZE 256
+
+
+/* USB interrupt */
+#define UHIMR_TIMEOUT2 BIT31
+#define UHIMR_TIMEOUT1 BIT30
+#define UHIMR_PSTIMEOUT BIT29
+#define UHIMR_GTINT4 BIT28
+#define UHIMR_GTINT3 BIT27
+#define UHIMR_TXBCNERR BIT26
+#define UHIMR_TXBCNOK BIT25
+#define UHIMR_TSF_BIT32_TOGGLE BIT24
+#define UHIMR_BCNDMAINT3 BIT23
+#define UHIMR_BCNDMAINT2 BIT22
+#define UHIMR_BCNDMAINT1 BIT21
+#define UHIMR_BCNDMAINT0 BIT20
+#define UHIMR_BCNDOK3 BIT19
+#define UHIMR_BCNDOK2 BIT18
+#define UHIMR_BCNDOK1 BIT17
+#define UHIMR_BCNDOK0 BIT16
+#define UHIMR_HSISR_IND BIT15
+#define UHIMR_BCNDMAINT_E BIT14
+/* RSVD BIT13 */
+#define UHIMR_CTW_END BIT12
+/* RSVD BIT11 */
+#define UHIMR_C2HCMD BIT10
+#define UHIMR_CPWM2 BIT9
+#define UHIMR_CPWM BIT8
+#define UHIMR_HIGHDOK BIT7 /* High Queue DMA OK
+ Interrupt */
+#define UHIMR_MGNTDOK BIT6 /* Management Queue DMA OK
+ Interrupt */
+#define UHIMR_BKDOK BIT5 /* AC_BK DMA OK Interrupt */
+#define UHIMR_BEDOK BIT4 /* AC_BE DMA OK Interrupt */
+#define UHIMR_VIDOK BIT3 /* AC_VI DMA OK Interrupt */
+#define UHIMR_VODOK BIT2 /* AC_VO DMA Interrupt */
+#define UHIMR_RDU BIT1 /* Receive Descriptor
+ Unavailable */
+#define UHIMR_ROK BIT0 /* Receive DMA OK Interrupt */
+
+/* USB Host Interrupt Status Extension bit */
+#define UHIMR_BCNDMAINT7 BIT23
+#define UHIMR_BCNDMAINT6 BIT22
+#define UHIMR_BCNDMAINT5 BIT21
+#define UHIMR_BCNDMAINT4 BIT20
+#define UHIMR_BCNDOK7 BIT19
+#define UHIMR_BCNDOK6 BIT18
+#define UHIMR_BCNDOK5 BIT17
+#define UHIMR_BCNDOK4 BIT16
+/* bit14-15: RSVD */
+#define UHIMR_ATIMEND_E BIT13
+#define UHIMR_ATIMEND BIT12
+#define UHIMR_TXERR BIT11
+#define UHIMR_RXERR BIT10
+#define UHIMR_TXFOVW BIT9
+#define UHIMR_RXFOVW BIT8
+/* bit2-7: RSVD */
+#define UHIMR_OCPINT BIT1
+/* bit0: RSVD */
+
+#define REG_USB_HIMR 0xFE38
+#define REG_USB_HIMRE 0xFE3C
+#define REG_USB_HISR 0xFE78
+#define REG_USB_HISRE 0xFE7C
+
+#define USB_INTR_CPWM_OFFSET 16
+#define USB_INTR_CONTENT_HISR_OFFSET 48
+#define USB_INTR_CONTENT_HISRE_OFFSET 52
+#define USB_INTR_CONTENT_LENGTH 56
+#define USB_C2H_CMDID_OFFSET 0
+#define USB_C2H_SEQ_OFFSET 1
+#define USB_C2H_EVENT_OFFSET 2
+/* */
+/* General definitions */
+/* */
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_sreset.h b/drivers/staging/rtl8723au/include/rtl8723a_sreset.h
new file mode 100644
index 000000000000..82af6a2704ed
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtl8723a_sreset.h
@@ -0,0 +1,25 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTL8723A_SRESET_H_
+#define _RTL8723A_SRESET_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_sreset.h>
+
+void rtl8723a_sreset_xmit_status_check(struct rtw_adapter *padapter);
+void rtl8723a_sreset_linked_status_check(struct rtw_adapter *padapter);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_xmit.h b/drivers/staging/rtl8723au/include/rtl8723a_xmit.h
new file mode 100644
index 000000000000..3b6fdc3a9b7b
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtl8723a_xmit.h
@@ -0,0 +1,229 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723A_XMIT_H__
+#define __RTL8723A_XMIT_H__
+
+/* */
+/* Queue Select Value in TxDesc */
+/* */
+#define QSLT_BK 0x2/* 0x01 */
+#define QSLT_BE 0x0
+#define QSLT_VI 0x5/* 0x4 */
+#define QSLT_VO 0x7/* 0x6 */
+#define QSLT_BEACON 0x10
+#define QSLT_HIGH 0x11
+#define QSLT_MGNT 0x12
+#define QSLT_CMD 0x13
+
+/* */
+/* defined for TX DESC Operation */
+/* */
+
+#define MAX_TID (15)
+
+/* OFFSET 0 */
+#define OFFSET_SZ 0
+#define OFFSET_SHT 16
+#define BMC BIT(24)
+#define LSG BIT(26)
+#define FSG BIT(27)
+#define OWN BIT(31)
+
+
+/* OFFSET 4 */
+#define PKT_OFFSET_SZ 0
+#define BK BIT(6)
+#define QSEL_SHT 8
+#define Rate_ID_SHT 16
+#define NAVUSEHDR BIT(20)
+#define PKT_OFFSET_SHT 26
+#define HWPC BIT(31)
+
+/* OFFSET 8 */
+#define AGG_EN BIT(29)
+
+/* OFFSET 12 */
+#define SEQ_SHT 16
+
+/* OFFSET 16 */
+#define QoS BIT(6)
+#define HW_SEQ_EN BIT(7)
+#define USERATE BIT(8)
+#define DISDATAFB BIT(10)
+#define DATA_SHORT BIT(24)
+#define DATA_BW BIT(25)
+
+/* OFFSET 20 */
+#define SGI BIT(6)
+
+struct txdesc_8723a {
+ u32 pktlen:16;
+ u32 offset:8;
+ u32 bmc:1;
+ u32 htc:1;
+ u32 ls:1;
+ u32 fs:1;
+ u32 linip:1;
+ u32 noacm:1;
+ u32 gf:1;
+ u32 own:1;
+
+ u32 macid:5;
+ u32 agg_en:1;
+ u32 bk:1;
+ u32 rd_en:1;
+ u32 qsel:5;
+ u32 rd_nav_ext:1;
+ u32 lsig_txop_en:1;
+ u32 pifs:1;
+ u32 rate_id:4;
+ u32 navusehdr:1;
+ u32 en_desc_id:1;
+ u32 sectype:2;
+ u32 rsvd0424:2;
+ u32 pkt_offset:5; /* unit: 8 bytes */
+ u32 rsvd0431:1;
+
+ u32 rts_rc:6;
+ u32 data_rc:6;
+ u32 rsvd0812:2;
+ u32 bar_rty_th:2;
+ u32 rsvd0816:1;
+ u32 morefrag:1;
+ u32 raw:1;
+ u32 ccx:1;
+ u32 ampdu_density:3;
+ u32 bt_null:1;
+ u32 ant_sel_a:1;
+ u32 ant_sel_b:1;
+ u32 tx_ant_cck:2;
+ u32 tx_antl:2;
+ u32 tx_ant_ht:2;
+
+ u32 nextheadpage:8;
+ u32 tailpage:8;
+ u32 seq:12;
+ u32 cpu_handle:1;
+ u32 tag1:1;
+ u32 trigger_int:1;
+ u32 hwseq_en:1;
+
+ u32 rtsrate:5;
+ u32 ap_dcfe:1;
+ u32 hwseq_sel:2;
+ u32 userate:1;
+ u32 disrtsfb:1;
+ u32 disdatafb:1;
+ u32 cts2self:1;
+ u32 rtsen:1;
+ u32 hw_rts_en:1;
+ u32 port_id:1;
+ u32 rsvd1615:3;
+ u32 wait_dcts:1;
+ u32 cts2ap_en:1;
+ u32 data_sc:2;
+ u32 data_stbc:2;
+ u32 data_short:1;
+ u32 data_bw:1;
+ u32 rts_short:1;
+ u32 rts_bw:1;
+ u32 rts_sc:2;
+ u32 vcs_stbc:2;
+
+ u32 datarate:6;
+ u32 sgi:1;
+ u32 try_rate:1;
+ u32 data_ratefb_lmt:5;
+ u32 rts_ratefb_lmt:4;
+ u32 rty_lmt_en:1;
+ u32 data_rt_lmt:6;
+ u32 usb_txagg_num:8;
+
+ u32 txagg_a:5;
+ u32 txagg_b:5;
+ u32 use_max_len:1;
+ u32 max_agg_num:5;
+ u32 mcsg1_max_len:4;
+ u32 mcsg2_max_len:4;
+ u32 mcsg3_max_len:4;
+ u32 mcs7_sgi_max_len:4;
+
+ u32 checksum:16; /* TxBuffSize(PCIe)/CheckSum(USB) */
+ u32 mcsg4_max_len:4;
+ u32 mcsg5_max_len:4;
+ u32 mcsg6_max_len:4;
+ u32 mcs15_sgi_max_len:4;
+};
+
+#define txdesc_set_ccx_sw_8723a(txdesc, value) \
+ do { \
+ ((struct txdesc_8723a *)(txdesc))->mcsg4_max_len = (((value)>>8) & 0x0f); \
+ ((struct txdesc_8723a *)(txdesc))->mcs15_sgi_max_len= (((value)>>4) & 0x0f); \
+ ((struct txdesc_8723a *)(txdesc))->mcsg6_max_len = ((value) & 0x0f); \
+ } while (0)
+
+struct txrpt_ccx_8723a {
+ /* offset 0 */
+ u8 tag1:1;
+ u8 rsvd:4;
+ u8 int_bt:1;
+ u8 int_tri:1;
+ u8 int_ccx:1;
+
+ /* offset 1 */
+ u8 mac_id:5;
+ u8 pkt_drop:1;
+ u8 pkt_ok:1;
+ u8 bmc:1;
+
+ /* offset 2 */
+ u8 retry_cnt:6;
+ u8 lifetime_over:1;
+ u8 retry_over:1;
+
+ /* offset 3 */
+ u8 ccx_qtime0;
+ u8 ccx_qtime1;
+
+ /* offset 5 */
+ u8 final_data_rate;
+
+ /* offset 6 */
+ u8 sw1:4;
+ u8 qsel:4;
+
+ /* offset 7 */
+ u8 sw0;
+};
+
+#define txrpt_ccx_sw_8723a(txrpt_ccx) ((txrpt_ccx)->sw0 + ((txrpt_ccx)->sw1<<8))
+#define txrpt_ccx_qtime_8723a(txrpt_ccx) ((txrpt_ccx)->ccx_qtime0+((txrpt_ccx)->ccx_qtime1<<8))
+
+void dump_txrpt_ccx_8723a(void *buf);
+void handle_txrpt_ccx_8723a(struct rtw_adapter *adapter, void *buf);
+void rtl8723a_update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem);
+void rtl8723a_fill_fake_txdesc(struct rtw_adapter *padapter, u8 *pDesc, u32 BufferLen, u8 IsPsPoll, u8 IsBTQosNull);
+
+s32 rtl8723au_hal_xmitframe_enqueue(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe);
+s32 rtl8723au_xmit_buf_handler(struct rtw_adapter *padapter);
+#define hal_xmit_handler rtl8723au_xmit_buf_handler
+s32 rtl8723au_init_xmit_priv(struct rtw_adapter * padapter);
+void rtl8723au_free_xmit_priv(struct rtw_adapter * padapter);
+s32 rtl8723au_hal_xmit(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe);
+s32 rtl8723au_mgnt_xmit(struct rtw_adapter *padapter, struct xmit_frame *pmgntframe);
+s32 rtl8723au_xmitframe_complete(struct rtw_adapter *padapter, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
+
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_ap.h b/drivers/staging/rtl8723au/include/rtw_ap.h
new file mode 100644
index 000000000000..76f82d68f633
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_ap.h
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_AP_H_
+#define __RTW_AP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+#ifdef CONFIG_8723AU_AP_MODE
+
+/* external function */
+void rtw_indicate_sta_assoc_event23a(struct rtw_adapter *padapter, struct sta_info *psta);
+void rtw_indicate_sta_disassoc_event23a(struct rtw_adapter *padapter, struct sta_info *psta);
+
+void init_mlme_ap_info23a(struct rtw_adapter *padapter);
+void free_mlme_ap_info23a(struct rtw_adapter *padapter);
+/* void update_BCNTIM(struct rtw_adapter *padapter); */
+void rtw_add_bcn_ie(struct rtw_adapter *padapter, struct wlan_bssid_ex *pnetwork, u8 index, u8 *data, u8 len);
+void rtw_remove_bcn_ie(struct rtw_adapter *padapter, struct wlan_bssid_ex *pnetwork, u8 index);
+void update_beacon23a(struct rtw_adapter *padapter, u8 ie_id, u8 *oui, u8 tx);
+void add_RATid23a(struct rtw_adapter *padapter, struct sta_info *psta, u8 rssi_level);
+void expire_timeout_chk23a(struct rtw_adapter *padapter);
+void update_sta_info23a_apmode23a(struct rtw_adapter *padapter, struct sta_info *psta);
+int rtw_check_beacon_data23a(struct rtw_adapter *padapter, u8 *pbuf, int len);
+void rtw_ap_restore_network(struct rtw_adapter *padapter);
+void rtw_set_macaddr_acl23a(struct rtw_adapter *padapter, int mode);
+int rtw_acl_add_sta23a(struct rtw_adapter *padapter, u8 *addr);
+int rtw_acl_remove_sta23a(struct rtw_adapter *padapter, u8 *addr);
+
+void associated_clients_update23a(struct rtw_adapter *padapter, u8 updated);
+void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info *psta);
+u8 bss_cap_update_on_sta_leave23a(struct rtw_adapter *padapter, struct sta_info *psta);
+void sta_info_update23a(struct rtw_adapter *padapter, struct sta_info *psta);
+void ap_sta_info_defer_update23a(struct rtw_adapter *padapter, struct sta_info *psta);
+u8 ap_free_sta23a(struct rtw_adapter *padapter, struct sta_info *psta, bool active, u16 reason);
+int rtw_sta_flush23a(struct rtw_adapter *padapter);
+int rtw_ap_inform_ch_switch23a(struct rtw_adapter *padapter, u8 new_ch, u8 ch_offset);
+void start_ap_mode23a(struct rtw_adapter *padapter);
+void stop_ap_mode23a(struct rtw_adapter *padapter);
+#endif /* end of CONFIG_8723AU_AP_MODE */
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_cmd.h b/drivers/staging/rtl8723au/include/rtw_cmd.h
new file mode 100644
index 000000000000..f9caa3e35f57
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_cmd.h
@@ -0,0 +1,835 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_CMD_H_
+#define __RTW_CMD_H_
+
+#include <wlan_bssdef.h>
+#include <rtw_rf.h>
+#include <rtw_led.h>
+
+#define C2H_MEM_SZ (16*1024)
+
+#include <osdep_service.h>
+#include <ieee80211.h> /* <ieee80211/ieee80211.h> */
+
+
+#define FREE_CMDOBJ_SZ 128
+
+#define MAX_CMDSZ 1024
+#define MAX_RSPSZ 512
+#define MAX_EVTSZ 1024
+
+#define CMDBUFF_ALIGN_SZ 512
+
+struct cmd_obj {
+ struct rtw_adapter *padapter;
+ u16 cmdcode;
+ u8 res;
+ u8 *parmbuf;
+ u32 cmdsz;
+ u8 *rsp;
+ u32 rspsz;
+ /* struct semaphore cmd_sem; */
+ struct list_head list;
+};
+
+struct cmd_priv {
+ struct semaphore cmd_queue_sema;
+ /* struct semaphore cmd_done_sema; */
+ struct semaphore terminate_cmdthread_sema;
+ struct rtw_queue cmd_queue;
+ u8 cmd_seq;
+ u8 *cmd_buf; /* shall be non-paged, and 4 bytes aligned */
+ u8 *cmd_allocated_buf;
+ u8 *rsp_buf; /* shall be non-paged, and 4 bytes aligned */
+ u8 *rsp_allocated_buf;
+ u32 cmd_issued_cnt;
+ u32 cmd_done_cnt;
+ u32 rsp_cnt;
+ u8 cmdthd_running;
+ struct rtw_adapter *padapter;
+};
+
+#define C2H_QUEUE_MAX_LEN 10
+
+struct evt_priv {
+ struct work_struct c2h_wk;
+ bool c2h_wk_alive;
+ struct rtw_cbuf *c2h_queue;
+
+ atomic_t event_seq;
+ u8 *evt_buf; /* shall be non-paged, and 4 bytes aligned */
+ u8 *evt_allocated_buf;
+ u32 evt_done_cnt;
+};
+
+#define init_h2fwcmd_w_parm_no_rsp(pcmd, pparm, code) \
+do {\
+ INIT_LIST_HEAD(&pcmd->list);\
+ pcmd->cmdcode = code;\
+ pcmd->parmbuf = (u8 *)(pparm);\
+ pcmd->cmdsz = sizeof (*pparm);\
+ pcmd->rsp = NULL;\
+ pcmd->rspsz = 0;\
+} while(0)
+
+struct c2h_evt_hdr {
+ u8 id:4;
+ u8 plen:4;
+ u8 seq;
+ u8 payload[0];
+};
+
+#define c2h_evt_exist(c2h_evt) ((c2h_evt)->id || (c2h_evt)->plen)
+
+u32 rtw_enqueue_cmd23a(struct cmd_priv *pcmdpriv, struct cmd_obj *obj);
+void rtw_free_cmd_obj23a(struct cmd_obj *pcmd);
+
+int rtw_cmd_thread23a(void *context);
+
+int rtw_init_cmd_priv23a(struct cmd_priv *pcmdpriv);
+void rtw_free_cmd_priv23a (struct cmd_priv *pcmdpriv);
+
+u32 rtw_init_evt_priv23a (struct evt_priv *pevtpriv);
+void rtw_free_evt_priv23a (struct evt_priv *pevtpriv);
+void rtw_cmd_clr_isr23a(struct cmd_priv *pcmdpriv);
+void rtw_evt_notify_isr(struct evt_priv *pevtpriv);
+#ifdef CONFIG_8723AU_P2P
+u8 p2p_protocol_wk_cmd23a(struct rtw_adapter*padapter, int intCmdType );
+#endif /* CONFIG_8723AU_P2P */
+
+enum rtw_drvextra_cmd_id
+{
+ NONE_WK_CID,
+ DYNAMIC_CHK_WK_CID,
+ DM_CTRL_WK_CID,
+ PBC_POLLING_WK_CID,
+ POWER_SAVING_CTRL_WK_CID,/* IPS,AUTOSuspend */
+ LPS_CTRL_WK_CID,
+ ANT_SELECT_WK_CID,
+ P2P_PS_WK_CID,
+ P2P_PROTO_WK_CID,
+ CHECK_HIQ_WK_CID,/* for softap mode, check hi queue if empty */
+ C2H_WK_CID,
+ RTP_TIMER_CFG_WK_CID,
+ MAX_WK_CID
+};
+
+enum LPS_CTRL_TYPE
+{
+ LPS_CTRL_SCAN=0,
+ LPS_CTRL_JOINBSS=1,
+ LPS_CTRL_CONNECT=2,
+ LPS_CTRL_DISCONNECT=3,
+ LPS_CTRL_SPECIAL_PACKET=4,
+ LPS_CTRL_LEAVE=5,
+};
+
+enum RFINTFS {
+ SWSI,
+ HWSI,
+ HWPI,
+};
+
+/*
+Caller Mode: Infra, Ad-HoC(C)
+
+Notes: To enter USB suspend mode
+
+Command Mode
+
+*/
+struct usb_suspend_parm {
+ u32 action;/* 1: sleep, 0:resume */
+};
+
+/*
+Caller Mode: Infra, Ad-HoC
+
+Notes: To join a known BSS.
+
+Command-Event Mode
+
+*/
+
+/*
+Caller Mode: Infra, Ad-HoC(C)
+
+Notes: To disconnect the current associated BSS
+
+Command Mode
+
+*/
+struct disconnect_parm {
+ u32 deauth_timeout_ms;
+};
+
+struct setopmode_parm {
+ u8 mode;
+ u8 rsvd[3];
+};
+
+/*
+Caller Mode: AP, Ad-HoC, Infra
+
+Notes: To ask RTL8711 performing site-survey
+
+Command-Event Mode
+
+*/
+
+#define RTW_SSID_SCAN_AMOUNT 9 /* for WEXT_CSCAN_AMOUNT 9 */
+#define RTW_CHANNEL_SCAN_AMOUNT (14+37)
+struct sitesurvey_parm {
+ int scan_mode; /* active: 1, passive: 0 */
+ u8 ssid_num;
+ u8 ch_num;
+ struct cfg80211_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To set the auth type of RTL8711. open/shared/802.1x
+
+Command Mode
+
+*/
+struct setauth_parm {
+ u8 mode; /* 0: legacy open, 1: legacy shared 2: 802.1x */
+ u8 _1x; /* 0: PSK, 1: TLS */
+ u8 rsvd[2];
+};
+
+/*
+Caller Mode: Infra
+
+a. algorithm: wep40, wep104, tkip & aes
+b. keytype: grp key/unicast key
+c. key contents
+
+when shared key ==> keyid is the camid
+when 802.1x ==> keyid [0:1] ==> grp key
+when 802.1x ==> keyid > 2 ==> unicast key
+
+*/
+struct setkey_parm {
+ u8 algorithm; /* encryption algorithm, could be none, wep40, TKIP, CCMP, wep104 */
+ u8 keyid;
+ u8 grpkey; /* 1: this is the grpkey for 802.1x. 0: this is the unicast key for 802.1x */
+ u8 set_tx; /* 1: main tx key for wep. 0: other key. */
+ u8 key[16]; /* this could be 40 or 104 */
+};
+
+/*
+When in AP or Ad-Hoc mode, this is used to
+allocate an sw/hw entry for a newly associated sta.
+
+Command
+
+when shared key ==> algorithm/keyid
+
+*/
+struct set_stakey_parm {
+ u8 addr[ETH_ALEN];
+ u8 algorithm;
+ u8 id;/* currently for erasing cam entry if algorithm == _NO_PRIVACY_ */
+ u8 key[16];
+};
+
+struct set_stakey_rsp {
+ u8 addr[ETH_ALEN];
+ u8 keyid;
+ u8 rsvd;
+};
+
+/*
+Caller Ad-Hoc/AP
+
+Command -Rsp(AID == CAMID) mode
+
+This is to force fw to add an sta_data entry per driver's request.
+
+FW will write an cam entry associated with it.
+
+*/
+struct set_assocsta_parm {
+ u8 addr[ETH_ALEN];
+};
+
+struct set_assocsta_rsp {
+ u8 cam_id;
+ u8 rsvd[3];
+};
+
+/*
+ Caller Ad-Hoc/AP
+
+ Command mode
+
+ This is to force fw to del an sta_data entry per driver's request
+
+ FW will invalidate the cam entry associated with it.
+
+*/
+struct del_assocsta_parm {
+ u8 addr[ETH_ALEN];
+};
+
+/*
+Caller Mode: AP/Ad-HoC(M)
+
+Notes: To notify fw that given staid has changed its power state
+
+Command Mode
+
+*/
+struct setstapwrstate_parm {
+ u8 staid;
+ u8 status;
+ u8 hwaddr[6];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To setup the basic rate of RTL8711
+
+Command Mode
+
+*/
+struct setbasicrate_parm {
+ u8 basicrates[NumRates];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To read the current basic rate
+
+Command-Rsp Mode
+
+*/
+struct getbasicrate_parm {
+ u32 rsvd;
+};
+
+struct getbasicrate_rsp {
+ u8 basicrates[NumRates];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To setup the data rate of RTL8711
+
+Command Mode
+
+*/
+struct setdatarate_parm {
+ u8 mac_id;
+ u8 datarates[NumRates];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To read the current data rate
+
+Command-Rsp Mode
+
+*/
+struct getdatarate_parm {
+ u32 rsvd;
+};
+
+struct getdatarate_rsp {
+ u8 datarates[NumRates];
+};
+
+
+/*
+Caller Mode: Any
+AP: AP can use the info for the contents of beacon frame
+Infra: STA can use the info when sitesurveying
+Ad-HoC(M): Like AP
+Ad-HoC(C): Like STA
+
+
+Notes: To set the phy capability of the NIC
+
+Command Mode
+
+*/
+
+struct setphyinfo_parm {
+ struct regulatory_class class_sets[NUM_REGULATORYS];
+ u8 status;
+};
+
+struct getphyinfo_parm {
+ u32 rsvd;
+};
+
+struct getphyinfo_rsp {
+ struct regulatory_class class_sets[NUM_REGULATORYS];
+ u8 status;
+};
+
+/*
+Caller Mode: Any
+
+Notes: To set the channel/modem/band
+This command will be used when channel/modem/band is changed.
+
+Command Mode
+
+*/
+struct setphy_parm {
+ u8 rfchannel;
+ u8 modem;
+};
+
+/*
+Caller Mode: Any
+
+Notes: To get the current setting of channel/modem/band
+
+Command-Rsp Mode
+
+*/
+struct getphy_parm {
+ u32 rsvd;
+};
+
+struct getphy_rsp {
+ u8 rfchannel;
+ u8 modem;
+};
+
+struct readBB_parm {
+ u8 offset;
+};
+
+struct readBB_rsp {
+ u8 value;
+};
+
+struct readTSSI_parm {
+ u8 offset;
+};
+
+struct readTSSI_rsp {
+ u8 value;
+};
+
+struct writeBB_parm {
+ u8 offset;
+ u8 value;
+};
+
+struct readRF_parm {
+ u8 offset;
+};
+
+struct readRF_rsp {
+ u32 value;
+};
+
+struct writeRF_parm {
+ u32 offset;
+ u32 value;
+};
+
+struct getrfintfs_parm {
+ u8 rfintfs;
+};
+
+struct Tx_Beacon_param
+{
+ struct wlan_bssid_ex network;
+};
+
+/* CMD param Formart for driver extra cmd handler */
+struct drvextra_cmd_parm {
+ int ec_id; /* extra cmd id */
+ int type_size; /* Can use this field as the type id or command size */
+ unsigned char *pbuf;
+};
+
+/*------------------- Below are used for RF/BB tunning ---------------------*/
+
+struct setantenna_parm {
+ u8 tx_antset;
+ u8 rx_antset;
+ u8 tx_antenna;
+ u8 rx_antenna;
+};
+
+struct enrateadaptive_parm {
+ u32 en;
+};
+
+struct settxagctbl_parm {
+ u32 txagc[MAX_RATES_LENGTH];
+};
+
+struct gettxagctbl_parm {
+ u32 rsvd;
+};
+
+struct gettxagctbl_rsp {
+ u32 txagc[MAX_RATES_LENGTH];
+};
+
+struct setagcctrl_parm {
+ u32 agcctrl; /* 0: pure hw, 1: fw */
+};
+
+struct setssup_parm {
+ u32 ss_ForceUp[MAX_RATES_LENGTH];
+};
+
+struct getssup_parm {
+ u32 rsvd;
+};
+
+struct getssup_rsp {
+ u8 ss_ForceUp[MAX_RATES_LENGTH];
+};
+
+struct setssdlevel_parm {
+ u8 ss_DLevel[MAX_RATES_LENGTH];
+};
+
+struct getssdlevel_parm {
+ u32 rsvd;
+};
+
+struct getssdlevel_rsp {
+ u8 ss_DLevel[MAX_RATES_LENGTH];
+};
+
+struct setssulevel_parm {
+ u8 ss_ULevel[MAX_RATES_LENGTH];
+};
+
+struct getssulevel_parm {
+ u32 rsvd;
+};
+
+struct getssulevel_rsp {
+ u8 ss_ULevel[MAX_RATES_LENGTH];
+};
+
+struct setcountjudge_parm {
+ u8 count_judge[MAX_RATES_LENGTH];
+};
+
+struct getcountjudge_parm {
+ u32 rsvd;
+};
+
+struct getcountjudge_rsp {
+ u8 count_judge[MAX_RATES_LENGTH];
+};
+
+struct setratable_parm {
+ u8 ss_ForceUp[NumRates];
+ u8 ss_ULevel[NumRates];
+ u8 ss_DLevel[NumRates];
+ u8 count_judge[NumRates];
+};
+
+struct getratable_parm {
+ uint rsvd;
+};
+
+struct getratable_rsp {
+ u8 ss_ForceUp[NumRates];
+ u8 ss_ULevel[NumRates];
+ u8 ss_DLevel[NumRates];
+ u8 count_judge[NumRates];
+};
+
+/* to get TX,RX retry count */
+struct gettxretrycnt_parm{
+ unsigned int rsvd;
+};
+struct gettxretrycnt_rsp{
+ unsigned long tx_retrycnt;
+};
+
+struct getrxretrycnt_parm{
+ unsigned int rsvd;
+};
+struct getrxretrycnt_rsp{
+ unsigned long rx_retrycnt;
+};
+
+/* to get BCNOK,BCNERR count */
+struct getbcnokcnt_parm{
+ unsigned int rsvd;
+};
+struct getbcnokcnt_rsp{
+ unsigned long bcnokcnt;
+};
+
+struct getbcnerrcnt_parm{
+ unsigned int rsvd;
+};
+struct getbcnerrcnt_rsp{
+ unsigned long bcnerrcnt;
+};
+
+/* to get current TX power level */
+struct getcurtxpwrlevel_parm{
+ unsigned int rsvd;
+};
+
+struct getcurtxpwrlevel_rsp{
+ unsigned short tx_power;
+};
+
+struct setprobereqextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct setassocreqextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct setproberspextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct setassocrspextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct addBaReq_parm {
+ unsigned int tid;
+ u8 addr[ETH_ALEN];
+};
+
+/*H2C Handler index: 46 */
+struct set_ch_parm {
+ u8 ch;
+ u8 bw;
+ u8 ch_offset;
+};
+
+/*H2C Handler index: 59 */
+struct SetChannelPlan_param {
+ u8 channel_plan;
+};
+
+/*H2C Handler index: 60 */
+struct LedBlink_param {
+ struct led_8723a *pLed;
+};
+
+/*H2C Handler index: 61 */
+struct SetChannelSwitch_param {
+ u8 new_ch_no;
+};
+
+/*H2C Handler index: 62 */
+struct TDLSoption_param {
+ u8 addr[ETH_ALEN];
+ u8 option;
+};
+
+#define GEN_CMD_CODE(cmd) cmd ## _CMD_
+
+
+/*
+
+Result:
+0x00: success
+0x01: sucess, and check Response.
+0x02: cmd ignored due to duplicated sequcne number
+0x03: cmd dropped due to invalid cmd code
+0x04: reserved.
+
+*/
+
+#define H2C_RSP_OFFSET 512
+
+#define H2C_SUCCESS 0x00
+#define H2C_SUCCESS_RSP 0x01
+#define H2C_DUPLICATED 0x02
+#define H2C_DROPPED 0x03
+#define H2C_PARAMETERS_ERROR 0x04
+#define H2C_REJECTED 0x05
+#define H2C_CMD_OVERFLOW 0x06
+#define H2C_RESERVED 0x07
+
+u8 rtw_setassocsta_cmd(struct rtw_adapter *padapter, u8 *mac_addr);
+u8 rtw_setstandby_cmd(struct rtw_adapter *padapter, uint action);
+u8 rtw_sitesurvey_cmd23a(struct rtw_adapter *padapter, struct cfg80211_ssid *ssid, int ssid_num, struct rtw_ieee80211_channel *ch, int ch_num);
+u8 rtw_createbss_cmd23a(struct rtw_adapter *padapter);
+u8 rtw_createbss_cmd23a_ex(struct rtw_adapter *padapter, unsigned char *pbss, unsigned int sz);
+u8 rtw_setphy_cmd(struct rtw_adapter *padapter, u8 modem, u8 ch);
+u8 rtw_setstakey_cmd23a(struct rtw_adapter *padapter, u8 *psta, u8 unicast_key);
+u8 rtw_clearstakey_cmd23a(struct rtw_adapter *padapter, u8 *psta, u8 entry, u8 enqueue);
+u8 rtw_joinbss_cmd23a(struct rtw_adapter *padapter, struct wlan_network* pnetwork);
+u8 rtw_disassoc_cmd23a(struct rtw_adapter *padapter, u32 deauth_timeout_ms, bool enqueue);
+u8 rtw_setopmode_cmd23a(struct rtw_adapter *padapter, enum ndis_802_11_net_infra networktype);
+u8 rtw_setdatarate_cmd(struct rtw_adapter *padapter, u8 *rateset);
+u8 rtw_setbasicrate_cmd(struct rtw_adapter *padapter, u8 *rateset);
+u8 rtw_setbbreg_cmd(struct rtw_adapter * padapter, u8 offset, u8 val);
+u8 rtw_setrfreg_cmd(struct rtw_adapter * padapter, u8 offset, u32 val);
+u8 rtw_getbbreg_cmd(struct rtw_adapter * padapter, u8 offset, u8 * pval);
+u8 rtw_getrfreg_cmd(struct rtw_adapter * padapter, u8 offset, u8 * pval);
+u8 rtw_setrfintfs_cmd(struct rtw_adapter *padapter, u8 mode);
+u8 rtw_setrttbl_cmd(struct rtw_adapter *padapter, struct setratable_parm *prate_table);
+u8 rtw_getrttbl_cmd(struct rtw_adapter *padapter, struct getratable_rsp *pval);
+
+u8 rtw_gettssi_cmd(struct rtw_adapter *padapter, u8 offset,u8 *pval);
+u8 rtw_setfwdig_cmd(struct rtw_adapter*padapter, u8 type);
+u8 rtw_setfwra_cmd(struct rtw_adapter*padapter, u8 type);
+
+u8 rtw_addbareq_cmd23a(struct rtw_adapter*padapter, u8 tid, u8 *addr);
+
+u8 rtw_dynamic_chk_wk_cmd23a(struct rtw_adapter *adapter);
+
+u8 rtw_lps_ctrl_wk_cmd23a(struct rtw_adapter*padapter, u8 lps_ctrl_type, u8 enqueue);
+
+u8 rtw_ps_cmd23a(struct rtw_adapter*padapter);
+
+#ifdef CONFIG_8723AU_AP_MODE
+u8 rtw_chk_hi_queue_cmd23a(struct rtw_adapter*padapter);
+#endif
+
+u8 rtw_set_ch_cmd23a(struct rtw_adapter*padapter, u8 ch, u8 bw, u8 ch_offset, u8 enqueue);
+u8 rtw_set_chplan_cmd(struct rtw_adapter*padapter, u8 chplan, u8 enqueue);
+u8 rtw_led_blink_cmd(struct rtw_adapter*padapter, struct led_8723a *pLed);
+u8 rtw_set_csa_cmd(struct rtw_adapter*padapter, u8 new_ch_no);
+u8 rtw_tdls_cmd(struct rtw_adapter*padapter, u8 *addr, u8 option);
+
+u8 rtw_c2h_wk_cmd23a(struct rtw_adapter *padapter, u8 *c2h_evt);
+
+u8 rtw_drvextra_cmd_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf);
+
+void rtw_survey_cmd_callback23a(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
+void rtw_disassoc_cmd23a_callback(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
+void rtw_joinbss_cmd23a_callback(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
+void rtw_createbss_cmd23a_callback(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
+void rtw_getbbrfreg_cmdrsp_callback23a(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
+void rtw_readtssi_cmdrsp_callback(struct rtw_adapter* padapter, struct cmd_obj *pcmd);
+
+void rtw_setstaKey_cmdrsp_callback23a(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
+void rtw_setassocsta_cmdrsp_callback23a(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
+void rtw_getrttbl_cmdrsp_callback(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
+
+struct _cmd_callback {
+ u32 cmd_code;
+ void (*callback)(struct rtw_adapter *padapter, struct cmd_obj *cmd);
+};
+
+enum rtw_h2c_cmd {
+ GEN_CMD_CODE(_Read_MACREG) , /*0*/
+ GEN_CMD_CODE(_Write_MACREG) ,
+ GEN_CMD_CODE(_Read_BBREG) ,
+ GEN_CMD_CODE(_Write_BBREG) ,
+ GEN_CMD_CODE(_Read_RFREG) ,
+ GEN_CMD_CODE(_Write_RFREG) , /*5*/
+ GEN_CMD_CODE(_Read_EEPROM) ,
+ GEN_CMD_CODE(_Write_EEPROM) ,
+ GEN_CMD_CODE(_Read_EFUSE) ,
+ GEN_CMD_CODE(_Write_EFUSE) ,
+
+ GEN_CMD_CODE(_Read_CAM) , /*10*/
+ GEN_CMD_CODE(_Write_CAM) ,
+ GEN_CMD_CODE(_setBCNITV),
+ GEN_CMD_CODE(_setMBIDCFG),
+ GEN_CMD_CODE(_JoinBss), /*14*/
+ GEN_CMD_CODE(_DisConnect) , /*15*/
+ GEN_CMD_CODE(_CreateBss) ,
+ GEN_CMD_CODE(_SetOpMode) ,
+ GEN_CMD_CODE(_SiteSurvey), /*18*/
+ GEN_CMD_CODE(_SetAuth) ,
+
+ GEN_CMD_CODE(_SetKey) , /*20*/
+ GEN_CMD_CODE(_SetStaKey) ,
+ GEN_CMD_CODE(_SetAssocSta) ,
+ GEN_CMD_CODE(_DelAssocSta) ,
+ GEN_CMD_CODE(_SetStaPwrState) ,
+ GEN_CMD_CODE(_SetBasicRate) , /*25*/
+ GEN_CMD_CODE(_GetBasicRate) ,
+ GEN_CMD_CODE(_SetDataRate) ,
+ GEN_CMD_CODE(_GetDataRate) ,
+ GEN_CMD_CODE(_SetPhyInfo) ,
+
+ GEN_CMD_CODE(_GetPhyInfo) , /*30*/
+ GEN_CMD_CODE(_SetPhy) ,
+ GEN_CMD_CODE(_GetPhy) ,
+ GEN_CMD_CODE(_readRssi) ,
+ GEN_CMD_CODE(_readGain) ,
+ GEN_CMD_CODE(_SetAtim) , /*35*/
+ GEN_CMD_CODE(_SetPwrMode) ,
+ GEN_CMD_CODE(_JoinbssRpt),
+ GEN_CMD_CODE(_SetRaTable) ,
+ GEN_CMD_CODE(_GetRaTable) ,
+
+ GEN_CMD_CODE(_GetCCXReport), /*40*/
+ GEN_CMD_CODE(_GetDTMReport),
+ GEN_CMD_CODE(_GetTXRateStatistics),
+ GEN_CMD_CODE(_SetUsbSuspend),
+ GEN_CMD_CODE(_SetH2cLbk),
+ GEN_CMD_CODE(_AddBAReq) , /*45*/
+ GEN_CMD_CODE(_SetChannel), /*46*/
+ GEN_CMD_CODE(_SetTxPower),
+ GEN_CMD_CODE(_SwitchAntenna),
+ GEN_CMD_CODE(_SetCrystalCap),
+ GEN_CMD_CODE(_SetSingleCarrierTx), /*50*/
+
+ GEN_CMD_CODE(_SetSingleToneTx),/*51*/
+ GEN_CMD_CODE(_SetCarrierSuppressionTx),
+ GEN_CMD_CODE(_SetContinuousTx),
+ GEN_CMD_CODE(_SwitchBandwidth), /*54*/
+ GEN_CMD_CODE(_TX_Beacon), /*55*/
+
+ GEN_CMD_CODE(_Set_MLME_EVT), /*56*/
+ GEN_CMD_CODE(_Set_Drv_Extra), /*57*/
+ GEN_CMD_CODE(_Set_H2C_MSG), /*58*/
+
+ GEN_CMD_CODE(_SetChannelPlan), /*59*/
+ GEN_CMD_CODE(_LedBlink), /*60*/
+
+ GEN_CMD_CODE(_SetChannelSwitch), /*61*/
+ GEN_CMD_CODE(_TDLS), /*62*/
+
+ MAX_H2CCMD
+};
+
+#define _GetBBReg_CMD_ _Read_BBREG_CMD_
+#define _SetBBReg_CMD_ _Write_BBREG_CMD_
+#define _GetRFReg_CMD_ _Read_RFREG_CMD_
+#define _SetRFReg_CMD_ _Write_RFREG_CMD_
+
+extern struct _cmd_callback rtw_cmd_callback[];
+
+#endif /* _CMD_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_debug.h b/drivers/staging/rtl8723au/include/rtw_debug.h
new file mode 100644
index 000000000000..a69d6e215b8b
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_debug.h
@@ -0,0 +1,192 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_DEBUG_H__
+#define __RTW_DEBUG_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define _drv_always_ 1
+#define _drv_emerg_ 2
+#define _drv_alert_ 3
+#define _drv_err_ 4
+#define _drv_warning_ 5
+#define _drv_notice_ 6
+#define _drv_info_ 7
+#define _drv_debug_ 8
+
+#define _module_rtl871x_xmit_c_ BIT(0)
+#define _module_xmit_osdep_c_ BIT(1)
+#define _module_rtl871x_recv_c_ BIT(2)
+#define _module_recv_osdep_c_ BIT(3)
+#define _module_rtl871x_mlme_c_ BIT(4)
+#define _module_mlme_osdep_c_ BIT(5)
+#define _module_rtl871x_sta_mgt_c_ BIT(6)
+#define _module_rtl871x_cmd_c_ BIT(7)
+#define _module_cmd_osdep_c_ BIT(8)
+#define _module_rtl871x_io_c_ BIT(9)
+#define _module_io_osdep_c_ BIT(10)
+#define _module_os_intfs_c_ BIT(11)
+#define _module_rtl871x_security_c_ BIT(12)
+#define _module_rtl871x_eeprom_c_ BIT(13)
+#define _module_hal_init_c_ BIT(14)
+#define _module_hci_hal_init_c_ BIT(15)
+#define _module_rtl871x_ioctl_c_ BIT(16)
+#define _module_rtl871x_ioctl_set_c_ BIT(17)
+#define _module_rtl871x_ioctl_query_c_ BIT(18)
+#define _module_rtl871x_pwrctrl_c_ BIT(19)
+#define _module_hci_intfs_c_ BIT(20)
+#define _module_hci_ops_c_ BIT(21)
+#define _module_osdep_service_c_ BIT(22)
+#define _module_mp_ BIT(23)
+#define _module_hci_ops_os_c_ BIT(24)
+#define _module_rtl871x_ioctl_os_c BIT(25)
+#define _module_rtl8712_cmd_c_ BIT(26)
+#define _module_rtl8192c_xmit_c_ BIT(28)
+#define _module_hal_xmit_c_ BIT(28) /* duplication intentional */
+#define _module_efuse_ BIT(29)
+#define _module_rtl8712_recv_c_ BIT(30)
+#define _module_rtl8712_led_c_ BIT(31)
+
+#undef _MODULE_DEFINE_
+
+#if defined _RTW_XMIT_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_xmit_c_
+#elif defined _XMIT_OSDEP_C_
+ #define _MODULE_DEFINE_ _module_xmit_osdep_c_
+#elif defined _RTW_RECV_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_recv_c_
+#elif defined _RECV_OSDEP_C_
+ #define _MODULE_DEFINE_ _module_recv_osdep_c_
+#elif defined _RTW_MLME_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_mlme_c_
+#elif defined _MLME_OSDEP_C_
+ #define _MODULE_DEFINE_ _module_mlme_osdep_c_
+#elif defined _RTW_MLME_EXT_C_
+ #define _MODULE_DEFINE_ 1
+#elif defined _RTW_STA_MGT_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_sta_mgt_c_
+#elif defined _RTW_CMD_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_cmd_c_
+#elif defined _CMD_OSDEP_C_
+ #define _MODULE_DEFINE_ _module_cmd_osdep_c_
+#elif defined _RTW_IO_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_io_c_
+#elif defined _IO_OSDEP_C_
+ #define _MODULE_DEFINE_ _module_io_osdep_c_
+#elif defined _OS_INTFS_C_
+ #define _MODULE_DEFINE_ _module_os_intfs_c_
+#elif defined _RTW_SECURITY_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_security_c_
+#elif defined _RTW_EEPROM_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_eeprom_c_
+#elif defined _HAL_INTF_C_
+ #define _MODULE_DEFINE_ _module_hal_init_c_
+#elif (defined _HCI_HAL_INIT_C_) || (defined _SDIO_HALINIT_C_)
+ #define _MODULE_DEFINE_ _module_hci_hal_init_c_
+#elif defined _RTL871X_IOCTL_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_ioctl_c_
+#elif defined _RTL871X_IOCTL_SET_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_ioctl_set_c_
+#elif defined _RTL871X_IOCTL_QUERY_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_ioctl_query_c_
+#elif defined _RTL871X_PWRCTRL_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_pwrctrl_c_
+#elif defined _RTW_PWRCTRL_C_
+ #define _MODULE_DEFINE_ 1
+#elif defined _HCI_INTF_C_
+ #define _MODULE_DEFINE_ _module_hci_intfs_c_
+#elif defined _HCI_OPS_C_
+ #define _MODULE_DEFINE_ _module_hci_ops_c_
+#elif defined _SDIO_OPS_C_
+ #define _MODULE_DEFINE_ 1
+#elif defined _OSDEP_HCI_INTF_C_
+ #define _MODULE_DEFINE_ _module_hci_intfs_c_
+#elif defined _OSDEP_SERVICE_C_
+ #define _MODULE_DEFINE_ _module_osdep_service_c_
+#elif defined _HCI_OPS_OS_C_
+ #define _MODULE_DEFINE_ _module_hci_ops_os_c_
+#elif defined _RTL871X_IOCTL_LINUX_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_ioctl_os_c
+#elif defined _RTL8712_CMD_C_
+ #define _MODULE_DEFINE_ _module_rtl8712_cmd_c_
+#elif defined _RTL8192C_XMIT_C_
+ #define _MODULE_DEFINE_ 1
+#elif defined _RTL8723AS_XMIT_C_
+ #define _MODULE_DEFINE_ 1
+#elif defined _RTL8712_RECV_C_
+ #define _MODULE_DEFINE_ _module_rtl8712_recv_c_
+#elif defined _RTL8192CU_RECV_C_
+ #define _MODULE_DEFINE_ _module_rtl8712_recv_c_
+#elif defined _RTL871X_MLME_EXT_C_
+ #define _MODULE_DEFINE_ _module_mlme_osdep_c_
+#elif defined _RTW_MP_C_
+ #define _MODULE_DEFINE_ _module_mp_
+#elif defined _RTW_MP_IOCTL_C_
+ #define _MODULE_DEFINE_ _module_mp_
+#elif defined _RTW_EFUSE_C_
+ #define _MODULE_DEFINE_ _module_efuse_
+#endif
+
+#define DRIVER_PREFIX "RTL8723AU: "
+#define DEBUG_LEVEL (_drv_err_)
+#define DBG_8723A_LEVEL(_level, fmt, arg...) \
+ do { \
+ if (_level <= GlobalDebugLevel23A) \
+ pr_info(DRIVER_PREFIX"ERROR " fmt, ##arg);\
+ } while (0)
+
+#define DBG_8723A(...) \
+ do { \
+ if (_drv_err_ <= GlobalDebugLevel23A) \
+ pr_info(DRIVER_PREFIX __VA_ARGS__); \
+ } while (0)
+
+#define MSG_8723A(...) \
+ do { \
+ if (_drv_err_ <= GlobalDebugLevel23A) \
+ pr_info(DRIVER_PREFIX __VA_ARGS__); \
+ } while (0)
+
+extern u32 GlobalDebugLevel23A;
+
+
+#define RT_TRACE(_Comp, _Level, Fmt) \
+do { \
+ if (_Level <= GlobalDebugLevel23A) { \
+ pr_info("%s [0x%08x,%d]", DRIVER_PREFIX, \
+ (unsigned int)_Comp, _Level); \
+ pr_info Fmt; \
+ } \
+} while (0)
+
+#define RT_PRINT_DATA(_Comp, _Level, _TitleString, _HexData, \
+ _HexDataLen) \
+ if (_Level <= GlobalDebugLevel23A) { \
+ int __i; \
+ u8 *ptr = (u8 *)_HexData; \
+ pr_info("%s", DRIVER_PREFIX); \
+ pr_info(_TitleString); \
+ for (__i = 0; __i < (int)_HexDataLen; __i++) { \
+ printk("%02X%s", ptr[__i], \
+ (((__i + 1) % 4) == 0) ? " " : " "); \
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ printk("\n"); \
+ }
+
+#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8723au/include/rtw_eeprom.h b/drivers/staging/rtl8723au/include/rtw_eeprom.h
new file mode 100644
index 000000000000..d008f032181b
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_eeprom.h
@@ -0,0 +1,135 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_EEPROM_H__
+#define __RTW_EEPROM_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define RTL8712_EEPROM_ID 0x8712
+/* define EEPROM_MAX_SIZE 256 */
+
+#define HWSET_MAX_SIZE_512 512
+#define EEPROM_MAX_SIZE HWSET_MAX_SIZE_512
+
+#define CLOCK_RATE 50 /* 100us */
+
+/* EEPROM opcodes */
+#define EEPROM_READ_OPCODE 06
+#define EEPROM_WRITE_OPCODE 05
+#define EEPROM_ERASE_OPCODE 07
+#define EEPROM_EWEN_OPCODE 19 /* Erase/write enable */
+#define EEPROM_EWDS_OPCODE 16 /* Erase/write disable */
+
+/* Country codes */
+#define USA 0x555320
+#define EUROPE 0x1 /* temp, should be provided later */
+#define JAPAN 0x2 /* temp, should be provided later */
+
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_ALPHA 0x1
+#define EEPROM_CID_Senao 0x3
+#define EEPROM_CID_NetCore 0x5
+#define EEPROM_CID_CAMEO 0X8
+#define EEPROM_CID_SITECOM 0x9
+#define EEPROM_CID_COREGA 0xB
+#define EEPROM_CID_EDIMAX_BELKIN 0xC
+#define EEPROM_CID_SERCOMM_BELKIN 0xE
+#define EEPROM_CID_CAMEO1 0xF
+#define EEPROM_CID_WNC_COREGA 0x12
+#define EEPROM_CID_CLEVO 0x13
+#define EEPROM_CID_WHQL 0xFE /* added by chiyoko for dtm, 20090108 */
+
+/* */
+/* Customer ID, note that: */
+/* This variable is initiailzed through EEPROM or registry, */
+/* however, its definition may be different with that in EEPROM for */
+/* EEPROM size consideration. So, we have to perform proper translation between them. */
+/* Besides, CustomerID of registry has precedence of that of EEPROM. */
+/* defined below. 060703, by rcnjko. */
+/* */
+enum rt_customer_id
+{
+ RT_CID_DEFAULT = 0,
+ RT_CID_8187_ALPHA0 = 1,
+ RT_CID_8187_SERCOMM_PS = 2,
+ RT_CID_8187_HW_LED = 3,
+ RT_CID_8187_NETGEAR = 4,
+ RT_CID_WHQL = 5,
+ RT_CID_819x_CAMEO = 6,
+ RT_CID_819x_RUNTOP = 7,
+ RT_CID_819x_Senao = 8,
+ RT_CID_TOSHIBA = 9, /* Merge by Jacken, 2008/01/31. */
+ RT_CID_819x_Netcore = 10,
+ RT_CID_Nettronix = 11,
+ RT_CID_DLINK = 12,
+ RT_CID_PRONET = 13,
+ RT_CID_COREGA = 14,
+ RT_CID_CHINA_MOBILE = 15,
+ RT_CID_819x_ALPHA = 16,
+ RT_CID_819x_Sitecom = 17,
+ RT_CID_CCX = 18, /* It's set under CCX logo test and isn't demanded for CCX functions, but for test behavior like retry limit and tx report. By Bruce, 2009-02-17. */
+ RT_CID_819x_Lenovo = 19,
+ RT_CID_819x_QMI = 20,
+ RT_CID_819x_Edimax_Belkin = 21,
+ RT_CID_819x_Sercomm_Belkin = 22,
+ RT_CID_819x_CAMEO1 = 23,
+ RT_CID_819x_MSI = 24,
+ RT_CID_819x_Acer = 25,
+ RT_CID_819x_AzWave_ASUS = 26,
+ RT_CID_819x_AzWave = 27, /* For AzWave in PCIe, The ID is AzWave use and not only Asus */
+ RT_CID_819x_HP = 28,
+ RT_CID_819x_WNC_COREGA = 29,
+ RT_CID_819x_Arcadyan_Belkin = 30,
+ RT_CID_819x_SAMSUNG = 31,
+ RT_CID_819x_CLEVO = 32,
+ RT_CID_819x_DELL = 33,
+ RT_CID_819x_PRONETS = 34,
+ RT_CID_819x_Edimax_ASUS = 35,
+ RT_CID_819x_CAMEO_NETGEAR = 36,
+ RT_CID_PLANEX = 37,
+ RT_CID_CC_C = 38,
+ RT_CID_819x_Xavi = 39,
+ RT_CID_819x_FUNAI_TV = 40,
+ RT_CID_819x_ALPHA_WD=41,
+};
+
+struct eeprom_priv {
+ u8 bautoload_fail_flag;
+ u8 bloadfile_fail_flag;
+ u8 bloadmac_fail_flag;
+ /* u8 bempty; */
+ /* u8 sys_config; */
+ u8 mac_addr[6]; /* PermanentAddress */
+ /* u8 config0; */
+ u16 channel_plan;
+ /* u8 country_string[3]; */
+ /* u8 tx_power_b[15]; */
+ /* u8 tx_power_g[15]; */
+ /* u8 tx_power_a[201]; */
+
+ u8 EepromOrEfuse;
+
+ u8 efuse_eeprom_data[HWSET_MAX_SIZE_512]; /* 92C:256bytes, 88E:512bytes, we use union set (512bytes) */
+};
+
+void eeprom_write16(struct rtw_adapter *padapter, u16 reg, u16 data);
+u16 eeprom_read16(struct rtw_adapter *padapter, u16 reg);
+void read_eeprom_content(struct rtw_adapter *padapter);
+void eeprom_read_sz(struct rtw_adapter * padapter, u16 reg,u8* data, u32 sz);
+
+void read_eeprom_content_by_attrib(struct rtw_adapter *padapter);
+
+#endif /* __RTL871X_EEPROM_H__ */
diff --git a/drivers/staging/rtl8723au/include/rtw_efuse.h b/drivers/staging/rtl8723au/include/rtw_efuse.h
new file mode 100644
index 000000000000..a7755056163f
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_efuse.h
@@ -0,0 +1,109 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_EFUSE_H__
+#define __RTW_EFUSE_H__
+
+#include <osdep_service.h>
+
+#define EFUSE_ERROE_HANDLE 1
+
+#define PG_STATE_HEADER 0x01
+#define PG_STATE_WORD_0 0x02
+#define PG_STATE_WORD_1 0x04
+#define PG_STATE_WORD_2 0x08
+#define PG_STATE_WORD_3 0x10
+#define PG_STATE_DATA 0x20
+
+#define PG_SWBYTE_H 0x01
+#define PG_SWBYTE_L 0x02
+
+#define PGPKT_DATA_SIZE 8
+
+#define EFUSE_WIFI 0
+#define EFUSE_BT 1
+
+enum _EFUSE_DEF_TYPE {
+ TYPE_EFUSE_MAX_SECTION = 0,
+ TYPE_EFUSE_REAL_CONTENT_LEN = 1,
+ TYPE_AVAILABLE_EFUSE_BYTES_BANK = 2,
+ TYPE_AVAILABLE_EFUSE_BYTES_TOTAL = 3,
+ TYPE_EFUSE_MAP_LEN = 4,
+ TYPE_EFUSE_PROTECT_BYTES_BANK = 5,
+ TYPE_EFUSE_CONTENT_LEN_BANK = 6,
+};
+
+/* E-Fuse */
+#define EFUSE_MAP_SIZE 256
+
+#define EFUSE_MAX_SIZE 512
+/* end of E-Fuse */
+
+#define EFUSE_MAX_MAP_LEN 256
+#define EFUSE_MAX_HW_SIZE 512
+#define EFUSE_MAX_SECTION_BASE 16
+
+#define EXT_HEADER(header) ((header & 0x1F ) == 0x0F)
+#define ALL_WORDS_DISABLED(wde) ((wde & 0x0F) == 0x0F)
+#define GET_HDR_OFFSET_2_0(header) ( (header & 0xE0) >> 5)
+
+#define EFUSE_REPEAT_THRESHOLD_ 3
+
+/* */
+/* The following is for BT Efuse definition */
+/* */
+#define EFUSE_BT_MAX_MAP_LEN 1024
+#define EFUSE_MAX_BANK 4
+#define EFUSE_MAX_BT_BANK (EFUSE_MAX_BANK-1)
+/* */
+/*--------------------------Define Parameters-------------------------------*/
+#define EFUSE_MAX_WORD_UNIT 4
+
+/*------------------------------Define structure----------------------------*/
+struct pg_pkt_struct {
+ u8 offset;
+ u8 word_en;
+ u8 data[8];
+ u8 word_cnts;
+};
+
+/*------------------------Export global variable----------------------------*/
+
+u8 efuse_GetCurrentSize23a(struct rtw_adapter *padapter, u16 *size);
+u16 efuse_GetMaxSize23a(struct rtw_adapter *padapter);
+u8 rtw_efuse_access23a(struct rtw_adapter *padapter, u8 bRead, u16 start_addr, u16 cnts, u8 *data);
+u8 rtw_efuse_map_read23a(struct rtw_adapter *padapter, u16 addr, u16 cnts, u8 *data);
+u8 rtw_efuse_map_write(struct rtw_adapter *padapter, u16 addr, u16 cnts, u8 *data);
+u8 rtw_BT_efuse_map_read23a(struct rtw_adapter *padapter, u16 addr, u16 cnts, u8 *data);
+u8 rtw_BT_efuse_map_write(struct rtw_adapter *padapter, u16 addr, u16 cnts, u8 *data);
+
+u16 Efuse_GetCurrentSize23a(struct rtw_adapter *pAdapter, u8 efuseType);
+u8 Efuse_CalculateWordCnts23a(u8 word_en);
+void ReadEFuseByte23a(struct rtw_adapter *Adapter, u16 _offset, u8 *pbuf);
+void EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType, u8 type, void *pOut);
+u8 efuse_OneByteRead23a(struct rtw_adapter *pAdapter, u16 addr, u8 *data);
+u8 efuse_OneByteWrite23a(struct rtw_adapter *pAdapter, u16 addr, u8 data);
+
+void Efuse_PowerSwitch23a(struct rtw_adapter *pAdapter,u8 bWrite,u8 PwrState);
+int Efuse_PgPacketRead23a(struct rtw_adapter *pAdapter, u8 offset, u8 *data);
+int Efuse_PgPacketWrite23a(struct rtw_adapter *pAdapter, u8 offset, u8 word_en, u8 *data);
+void efuse_WordEnableDataRead23a(u8 word_en, u8 *sourdata, u8 *targetdata);
+u8 Efuse_WordEnableDataWrite23a(struct rtw_adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data);
+
+u8 EFUSE_Read1Byte23a(struct rtw_adapter *pAdapter, u16 Address);
+void EFUSE_ShadowMapUpdate23a(struct rtw_adapter *pAdapter, u8 efuseType);
+void EFUSE_ShadowRead23a(struct rtw_adapter *pAdapter, u8 Type, u16 Offset, u32 *Value);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_event.h b/drivers/staging/rtl8723au/include/rtw_event.h
new file mode 100644
index 000000000000..bb20640e6855
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_event.h
@@ -0,0 +1,114 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_EVENT_H_
+#define _RTW_EVENT_H_
+
+#include <osdep_service.h>
+
+#include <wlan_bssdef.h>
+
+/*
+Used to report a bss has been scanned
+
+*/
+struct survey_event {
+ struct wlan_bssid_ex bss;
+};
+
+/*
+Used to report that the requested site survey has been done.
+
+bss_cnt indicates the number of bss that has been reported.
+
+
+*/
+struct surveydone_event {
+ unsigned int bss_cnt;
+
+};
+
+/*
+Used to report the link result of joinning the given bss
+
+
+join_res:
+-1: authentication fail
+-2: association fail
+> 0: TID
+
+*/
+struct joinbss_event {
+ struct wlan_network network;
+};
+
+/*
+Used to report a given STA has joinned the created BSS.
+It is used in AP/Ad-HoC(M) mode.
+
+
+*/
+struct stassoc_event {
+ unsigned char macaddr[6];
+ unsigned char rsvd[2];
+ int cam_id;
+
+};
+
+struct stadel_event {
+ unsigned char macaddr[6];
+ unsigned char rsvd[2]; /* for reason */
+ int mac_id;
+};
+
+struct addba_event
+{
+ unsigned int tid;
+};
+
+#define GEN_EVT_CODE(event) event ## _EVT_
+
+struct fwevent {
+ u32 parmsize;
+ void (*event_callback)(struct rtw_adapter *dev, u8 *pbuf);
+};
+
+
+#define C2HEVENT_SZ 32
+
+struct event_node{
+ unsigned char *node;
+ unsigned char evt_code;
+ unsigned short evt_sz;
+ volatile int *caller_ff_tail;
+ int caller_ff_sz;
+};
+
+struct c2hevent_queue {
+ volatile int head;
+ volatile int tail;
+ struct event_node nodes[C2HEVENT_SZ];
+ unsigned char seq;
+};
+
+#define NETWORK_QUEUE_SZ 4
+
+struct network_queue {
+ volatile int head;
+ volatile int tail;
+ struct wlan_bssid_ex networks[NETWORK_QUEUE_SZ];
+};
+
+
+#endif /* _WLANEVENT_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_ht.h b/drivers/staging/rtl8723au/include/rtw_ht.h
new file mode 100644
index 000000000000..7fe0aa46f707
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_ht.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_HT_H_
+#define _RTW_HT_H_
+
+#include <osdep_service.h>
+#include "linux/ieee80211.h"
+#include "wifi.h"
+
+struct ht_priv
+{
+ u32 ht_option;
+ u32 ampdu_enable;/* for enable Tx A-MPDU */
+ /* u8 baddbareq_issued[16]; */
+ u32 tx_amsdu_enable;/* for enable Tx A-MSDU */
+ u32 tx_amdsu_maxlen; /* 1: 8k, 0:4k ; default:8k, for tx */
+ u32 rx_ampdu_maxlen; /* for rx reordering ctrl win_sz, updated when join_callback. */
+
+ u8 bwmode;/* */
+ u8 ch_offset;/* PRIME_CHNL_OFFSET */
+ u8 sgi;/* short GI */
+
+ /* for processing Tx A-MPDU */
+ u8 agg_enable_bitmap;
+ /* u8 ADDBA_retry_count; */
+ u8 candidate_tid_bitmap;
+
+ struct ieee80211_ht_cap ht_cap;
+};
+
+#endif /* _RTL871X_HT_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_io.h b/drivers/staging/rtl8723au/include/rtw_io.h
new file mode 100644
index 000000000000..8d39d800267d
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_io.h
@@ -0,0 +1,416 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef _RTW_IO_H_
+#define _RTW_IO_H_
+
+#include <osdep_service.h>
+#include <osdep_intf.h>
+
+#include <asm/byteorder.h>
+#include <linux/semaphore.h>
+#include <linux/list.h>
+/* include <linux/smp_lock.h> */
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+
+#define rtw_usb_buffer_alloc(dev, size, dma) usb_alloc_coherent((dev), (size), (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), (dma))
+#define rtw_usb_buffer_free(dev, size, addr, dma) usb_free_coherent((dev), (size), (addr), (dma))
+
+#define NUM_IOREQ 8
+
+#define MAX_PROT_SZ (64-16)
+
+#define _IOREADY 0
+#define _IO_WAIT_COMPLETE 1
+#define _IO_WAIT_RSP 2
+
+/* IO COMMAND TYPE */
+#define _IOSZ_MASK_ (0x7F)
+#define _IO_WRITE_ BIT(7)
+#define _IO_FIXED_ BIT(8)
+#define _IO_BURST_ BIT(9)
+#define _IO_BYTE_ BIT(10)
+#define _IO_HW_ BIT(11)
+#define _IO_WORD_ BIT(12)
+#define _IO_SYNC_ BIT(13)
+#define _IO_CMDMASK_ (0x1F80)
+
+
+/*
+ For prompt mode accessing, caller shall free io_req
+ Otherwise, io_handler will free io_req
+*/
+
+
+
+/* IO STATUS TYPE */
+#define _IO_ERR_ BIT(2)
+#define _IO_SUCCESS_ BIT(1)
+#define _IO_DONE_ BIT(0)
+
+
+#define IO_RD32 (_IO_SYNC_ | _IO_WORD_)
+#define IO_RD16 (_IO_SYNC_ | _IO_HW_)
+#define IO_RD8 (_IO_SYNC_ | _IO_BYTE_)
+
+#define IO_RD32_ASYNC (_IO_WORD_)
+#define IO_RD16_ASYNC (_IO_HW_)
+#define IO_RD8_ASYNC (_IO_BYTE_)
+
+#define IO_WR32 (_IO_WRITE_ | _IO_SYNC_ | _IO_WORD_)
+#define IO_WR16 (_IO_WRITE_ | _IO_SYNC_ | _IO_HW_)
+#define IO_WR8 (_IO_WRITE_ | _IO_SYNC_ | _IO_BYTE_)
+
+#define IO_WR32_ASYNC (_IO_WRITE_ | _IO_WORD_)
+#define IO_WR16_ASYNC (_IO_WRITE_ | _IO_HW_)
+#define IO_WR8_ASYNC (_IO_WRITE_ | _IO_BYTE_)
+
+/*
+
+ Only Sync. burst accessing is provided.
+
+*/
+
+#define IO_WR_BURST(x) (_IO_WRITE_ | _IO_SYNC_ | _IO_BURST_ | ( (x) & _IOSZ_MASK_))
+#define IO_RD_BURST(x) (_IO_SYNC_ | _IO_BURST_ | ( (x) & _IOSZ_MASK_))
+
+
+
+/* below is for the intf_option bit defition... */
+
+#define _INTF_ASYNC_ BIT(0) /* support async io */
+
+struct intf_priv;
+struct intf_hdl;
+struct io_queue;
+
+struct _io_ops
+{
+ u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
+ u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
+ u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
+
+ int (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+ int (*_writeN)(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata);
+
+ int (*_write8_async)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int (*_write16_async)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int (*_write32_async)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+
+ void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+ void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+
+ void (*_sync_irp_protocol_rw)(struct io_queue *pio_q);
+
+ u32 (*_read_interrupt)(struct intf_hdl *pintfhdl, u32 addr);
+
+ u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, struct recv_buf *rbuf);
+ u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, struct xmit_buf *pmem);
+
+ u32 (*_write_scsi)(struct intf_hdl *pintfhdl,u32 cnt, u8 *pmem);
+
+ void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
+ void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
+
+};
+
+struct io_req {
+ struct list_head list;
+ u32 addr;
+ volatile u32 val;
+ u32 command;
+ u32 status;
+ u8 *pbuf;
+ struct semaphore sema;
+
+ void (*_async_io_callback)(struct rtw_adapter *padater, struct io_req *pio_req, u8 *cnxt);
+ u8 *cnxt;
+};
+
+struct intf_hdl {
+ struct rtw_adapter *padapter;
+ struct dvobj_priv *pintf_dev;/* pointer to &(padapter->dvobjpriv); */
+
+ struct _io_ops io_ops;
+
+};
+
+struct reg_protocol_rd {
+
+#ifdef __LITTLE_ENDIAN
+
+ /* DW1 */
+ u32 NumOfTrans:4;
+ u32 Reserved1:4;
+ u32 Reserved2:24;
+ /* DW2 */
+ u32 ByteCount:7;
+ u32 WriteEnable:1; /* 0:read, 1:write */
+ u32 FixOrContinuous:1; /* 0:continuous, 1: Fix */
+ u32 BurstMode:1;
+ u32 Byte1Access:1;
+ u32 Byte2Access:1;
+ u32 Byte4Access:1;
+ u32 Reserved3:3;
+ u32 Reserved4:16;
+ /* DW3 */
+ u32 BusAddress;
+ /* DW4 */
+ /* u32 Value; */
+#else
+
+
+/* DW1 */
+ u32 Reserved1 :4;
+ u32 NumOfTrans :4;
+
+ u32 Reserved2 :24;
+
+ /* DW2 */
+ u32 WriteEnable : 1;
+ u32 ByteCount :7;
+
+
+ u32 Reserved3 : 3;
+ u32 Byte4Access : 1;
+
+ u32 Byte2Access : 1;
+ u32 Byte1Access : 1;
+ u32 BurstMode :1 ;
+ u32 FixOrContinuous : 1;
+
+ u32 Reserved4 : 16;
+
+ /* DW3 */
+ u32 BusAddress;
+
+ /* DW4 */
+ /* u32 Value; */
+
+#endif
+
+};
+
+
+struct reg_protocol_wt {
+
+
+#ifdef __LITTLE_ENDIAN
+
+ /* DW1 */
+ u32 NumOfTrans:4;
+ u32 Reserved1:4;
+ u32 Reserved2:24;
+ /* DW2 */
+ u32 ByteCount:7;
+ u32 WriteEnable:1; /* 0:read, 1:write */
+ u32 FixOrContinuous:1; /* 0:continuous, 1: Fix */
+ u32 BurstMode:1;
+ u32 Byte1Access:1;
+ u32 Byte2Access:1;
+ u32 Byte4Access:1;
+ u32 Reserved3:3;
+ u32 Reserved4:16;
+ /* DW3 */
+ u32 BusAddress;
+ /* DW4 */
+ u32 Value;
+
+#else
+ /* DW1 */
+ u32 Reserved1 :4;
+ u32 NumOfTrans :4;
+
+ u32 Reserved2 :24;
+
+ /* DW2 */
+ u32 WriteEnable : 1;
+ u32 ByteCount :7;
+
+ u32 Reserved3 : 3;
+ u32 Byte4Access : 1;
+
+ u32 Byte2Access : 1;
+ u32 Byte1Access : 1;
+ u32 BurstMode :1 ;
+ u32 FixOrContinuous : 1;
+
+ u32 Reserved4 : 16;
+
+ /* DW3 */
+ u32 BusAddress;
+
+ /* DW4 */
+ u32 Value;
+
+#endif
+
+};
+
+
+
+/*
+Below is the data structure used by _io_handler
+
+*/
+
+struct io_queue {
+ spinlock_t lock;
+ struct list_head free_ioreqs;
+ struct list_head pending; /* The io_req list that will be served in the single protocol read/write. */
+ struct list_head processing;
+ u8 *free_ioreqs_buf; /* 4-byte aligned */
+ u8 *pallocated_free_ioreqs_buf;
+ struct intf_hdl intf;
+};
+
+struct io_priv{
+
+ struct rtw_adapter *padapter;
+
+ struct intf_hdl intf;
+
+};
+
+uint ioreq_flush(struct rtw_adapter *adapter, struct io_queue *ioqueue);
+void sync_ioreq_enqueue(struct io_req *preq,struct io_queue *ioqueue);
+uint sync_ioreq_flush(struct rtw_adapter *adapter, struct io_queue *ioqueue);
+
+uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue);
+struct io_req *alloc_ioreq(struct io_queue *pio_q);
+
+uint register_intf_hdl(u8 *dev, struct intf_hdl *pintfhdl);
+void unregister_intf_hdl(struct intf_hdl *pintfhdl);
+
+void _rtw_attrib_read(struct rtw_adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void _rtw_attrib_write(struct rtw_adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+
+u8 _rtw_read823a(struct rtw_adapter *adapter, u32 addr);
+u16 _rtw_read1623a(struct rtw_adapter *adapter, u32 addr);
+u32 _rtw_read3223a(struct rtw_adapter *adapter, u32 addr);
+void _rtw_read_mem23a(struct rtw_adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void _rtw_read_port23a(struct rtw_adapter *adapter, u32 addr, u32 cnt, struct recv_buf *rbuf);
+void _rtw_read_port23a_cancel(struct rtw_adapter *adapter);
+
+int _rtw_write823a(struct rtw_adapter *adapter, u32 addr, u8 val);
+int _rtw_write1623a(struct rtw_adapter *adapter, u32 addr, u16 val);
+int _rtw_write3223a(struct rtw_adapter *adapter, u32 addr, u32 val);
+int _rtw_writeN23a(struct rtw_adapter *adapter, u32 addr, u32 length, u8 *pdata);
+
+int _rtw_write823a_async23a(struct rtw_adapter *adapter, u32 addr, u8 val);
+int _rtw_write1623a_async(struct rtw_adapter *adapter, u32 addr, u16 val);
+int _rtw_write3223a_async23a(struct rtw_adapter *adapter, u32 addr, u32 val);
+
+void _rtw_write_mem23a(struct rtw_adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+u32 _rtw_write_port23a(struct rtw_adapter *adapter, u32 addr, u32 cnt, struct xmit_buf *pmem);
+u32 _rtw_write_port23a_and_wait23a(struct rtw_adapter *adapter, u32 addr, u32 cnt, struct xmit_buf *pmem, int timeout_ms);
+void _rtw_write_port23a_cancel(struct rtw_adapter *adapter);
+
+#ifdef DBG_IO
+bool match_read_sniff_ranges(u16 addr, u16 len);
+bool match_write_sniff_ranges(u16 addr, u16 len);
+
+u8 dbg_rtw_read823a(struct rtw_adapter *adapter, u32 addr, const char *caller, const int line);
+u16 dbg_rtw_read1623a(struct rtw_adapter *adapter, u32 addr, const char *caller, const int line);
+u32 dbg_rtw_read3223a(struct rtw_adapter *adapter, u32 addr, const char *caller, const int line);
+
+int dbg_rtw_write823a(struct rtw_adapter *adapter, u32 addr, u8 val, const char *caller, const int line);
+int dbg_rtw_write1623a(struct rtw_adapter *adapter, u32 addr, u16 val, const char *caller, const int line);
+int dbg_rtw_write3223a(struct rtw_adapter *adapter, u32 addr, u32 val, const char *caller, const int line);
+int dbg_rtw_writeN23a(struct rtw_adapter *adapter, u32 addr ,u32 length , u8 *data, const char *caller, const int line);
+
+#define rtw_read8(adapter, addr) dbg_rtw_read823a((adapter), (addr), __FUNCTION__, __LINE__)
+#define rtw_read16(adapter, addr) dbg_rtw_read1623a((adapter), (addr), __FUNCTION__, __LINE__)
+#define rtw_read32(adapter, addr) dbg_rtw_read3223a((adapter), (addr), __FUNCTION__, __LINE__)
+#define rtw_read_mem(adapter, addr, cnt, mem) _rtw_read_mem23a((adapter), (addr), (cnt), (mem))
+#define rtw_read_port(adapter, addr, cnt, mem) _rtw_read_port23a((adapter), (addr), (cnt), (mem))
+#define rtw_read_port_cancel(adapter) _rtw_read_port23a_cancel((adapter))
+
+#define rtw_write8(adapter, addr, val) dbg_rtw_write823a((adapter), (addr), (val), __FUNCTION__, __LINE__)
+#define rtw_write16(adapter, addr, val) dbg_rtw_write1623a((adapter), (addr), (val), __FUNCTION__, __LINE__)
+#define rtw_write32(adapter, addr, val) dbg_rtw_write3223a((adapter), (addr), (val), __FUNCTION__, __LINE__)
+#define rtw_writeN(adapter, addr, length, data) dbg_rtw_writeN23a((adapter), (addr), (length), (data), __FUNCTION__, __LINE__)
+
+#define rtw_write8_async(adapter, addr, val) _rtw_write823a_async23a((adapter), (addr), (val))
+#define rtw_write16_async(adapter, addr, val) _rtw_write1623a_async((adapter), (addr), (val))
+#define rtw_write32_async(adapter, addr, val) _rtw_write3223a_async23a((adapter), (addr), (val))
+
+#define rtw_write_mem(adapter, addr, cnt, mem) _rtw_write_mem23a((adapter), addr, cnt, mem)
+#define rtw_write_port(adapter, addr, cnt, mem) _rtw_write_port23a(adapter, addr, cnt, mem)
+#define rtw_write_port_and_wait(adapter, addr, cnt, mem, timeout_ms) _rtw_write_port23a_and_wait23a((adapter), (addr), (cnt), (mem), (timeout_ms))
+#define rtw_write_port_cancel(adapter) _rtw_write_port23a_cancel(adapter)
+#else /* DBG_IO */
+#define rtw_read8(adapter, addr) _rtw_read823a((adapter), (addr))
+#define rtw_read16(adapter, addr) _rtw_read1623a((adapter), (addr))
+#define rtw_read32(adapter, addr) _rtw_read3223a((adapter), (addr))
+#define rtw_read_mem(adapter, addr, cnt, mem) _rtw_read_mem23a((adapter), (addr), (cnt), (mem))
+#define rtw_read_port(adapter, addr, cnt, mem) _rtw_read_port23a((adapter), (addr), (cnt), (mem))
+#define rtw_read_port_cancel(adapter) _rtw_read_port23a_cancel((adapter))
+
+#define rtw_write8(adapter, addr, val) _rtw_write823a((adapter), (addr), (val))
+#define rtw_write16(adapter, addr, val) _rtw_write1623a((adapter), (addr), (val))
+#define rtw_write32(adapter, addr, val) _rtw_write3223a((adapter), (addr), (val))
+#define rtw_writeN(adapter, addr, length, data) _rtw_writeN23a((adapter), (addr), (length), (data))
+
+#define rtw_write8_async(adapter, addr, val) _rtw_write823a_async23a((adapter), (addr), (val))
+#define rtw_write16_async(adapter, addr, val) _rtw_write1623a_async((adapter), (addr), (val))
+#define rtw_write32_async(adapter, addr, val) _rtw_write3223a_async23a((adapter), (addr), (val))
+
+#define rtw_write_mem(adapter, addr, cnt, mem) _rtw_write_mem23a((adapter), (addr), (cnt), (mem))
+#define rtw_write_port(adapter, addr, cnt, mem) _rtw_write_port23a((adapter), (addr), (cnt), (mem))
+#define rtw_write_port_and_wait(adapter, addr, cnt, mem, timeout_ms) _rtw_write_port23a_and_wait23a((adapter), (addr), (cnt), (mem), (timeout_ms))
+#define rtw_write_port_cancel(adapter) _rtw_write_port23a_cancel((adapter))
+#endif /* DBG_IO */
+
+void rtw_write_scsi(struct rtw_adapter *adapter, u32 cnt, u8 *pmem);
+
+/* ioreq */
+void ioreq_read8(struct rtw_adapter *adapter, u32 addr, u8 *pval);
+void ioreq_read16(struct rtw_adapter *adapter, u32 addr, u16 *pval);
+void ioreq_read32(struct rtw_adapter *adapter, u32 addr, u32 *pval);
+void ioreq_write8(struct rtw_adapter *adapter, u32 addr, u8 val);
+void ioreq_write16(struct rtw_adapter *adapter, u32 addr, u16 val);
+void ioreq_write32(struct rtw_adapter *adapter, u32 addr, u32 val);
+
+int rtw_init_io_priv23a(struct rtw_adapter *padapter, void (*set_intf_ops)(struct _io_ops *pops));
+
+uint alloc_io_queue(struct rtw_adapter *adapter);
+void free_io_queue(struct rtw_adapter *adapter);
+void async_bus_io(struct io_queue *pio_q);
+void bus_sync_io(struct io_queue *pio_q);
+u32 _ioreq2rwmem(struct io_queue *pio_q);
+void dev_power_down(struct rtw_adapter * Adapter, u8 bpwrup);
+
+#define PlatformEFIOWrite1Byte(_a,_b,_c) \
+ rtw_write8(_a,_b,_c)
+#define PlatformEFIOWrite2Byte(_a,_b,_c) \
+ rtw_write16(_a,_b,_c)
+#define PlatformEFIOWrite4Byte(_a,_b,_c) \
+ rtw_write32(_a,_b,_c)
+
+#define PlatformEFIORead1Byte(_a,_b) \
+ rtw_read8(_a,_b)
+#define PlatformEFIORead2Byte(_a,_b) \
+ rtw_read16(_a,_b)
+#define PlatformEFIORead4Byte(_a,_b) \
+ rtw_read32(_a,_b)
+
+#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_ioctl.h b/drivers/staging/rtl8723au/include/rtw_ioctl.h
new file mode 100644
index 000000000000..629eec8a7023
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_ioctl.h
@@ -0,0 +1,26 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_IOCTL_H_
+#define _RTW_IOCTL_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+#if defined(CONFIG_WIRELESS_EXT)
+extern struct iw_handler_def rtw_handlers_def;
+#endif
+
+#endif /* #ifndef __INC_CEINFO_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_ioctl_set.h b/drivers/staging/rtl8723au/include/rtw_ioctl_set.h
new file mode 100644
index 000000000000..18ad2a873350
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_ioctl_set.h
@@ -0,0 +1,39 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_IOCTL_SET_H_
+#define __RTW_IOCTL_SET_H_
+
+#include <drv_types.h>
+
+
+struct bssid_info {
+ unsigned char BSSID[6];
+ u8 PMKID[16];
+};
+
+u8 rtw_set_802_11_authentication_mode23a(struct rtw_adapter *pdapter,
+ enum ndis_802_11_auth_mode authmode);
+u8 rtw_set_802_11_add_wep23a(struct rtw_adapter * padapter,
+ struct ndis_802_11_wep *wep);
+u8 rtw_set_802_11_bssid23a_list_scan(struct rtw_adapter *padapter,
+ struct cfg80211_ssid *pssid, int ssid_max_num);
+u8 rtw_set_802_11_infrastructure_mode23a(struct rtw_adapter *padapter,
+ enum ndis_802_11_net_infra networktype);
+u8 rtw_set_802_11_ssid23a(struct rtw_adapter * padapter, struct cfg80211_ssid * ssid);
+
+u16 rtw_get_cur_max_rate23a(struct rtw_adapter *adapter);
+s32 FillH2CCmd(struct rtw_adapter *padapter, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_led.h b/drivers/staging/rtl8723au/include/rtw_led.h
new file mode 100644
index 000000000000..c071da587efd
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_led.h
@@ -0,0 +1,181 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_LED_H_
+#define __RTW_LED_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define MSECS(t) (HZ * ((t) / 1000) + (HZ * ((t) % 1000)) / 1000)
+
+#define LED_BLINK_NORMAL_INTERVAL 100
+#define LED_BLINK_SLOWLY_INTERVAL 200
+#define LED_BLINK_LONG_INTERVAL 400
+
+#define LED_BLINK_NO_LINK_INTERVAL_ALPHA 1000
+#define LED_BLINK_LINK_INTERVAL_ALPHA 500 /* 500 */
+#define LED_BLINK_SCAN_INTERVAL_ALPHA 180 /* 150 */
+#define LED_BLINK_FASTER_INTERVAL_ALPHA 50
+#define LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA 5000
+
+#define LED_BLINK_NORMAL_INTERVAL_NETTRONIX 100
+#define LED_BLINK_SLOWLY_INTERVAL_NETTRONIX 2000
+
+#define LED_BLINK_SLOWLY_INTERVAL_PORNET 1000
+#define LED_BLINK_NORMAL_INTERVAL_PORNET 100
+
+#define LED_BLINK_FAST_INTERVAL_BITLAND 30
+
+/* 060403, rcnjko: Customized for AzWave. */
+#define LED_CM2_BLINK_ON_INTERVAL 250
+#define LED_CM2_BLINK_OFF_INTERVAL 4750
+
+#define LED_CM8_BLINK_INTERVAL 500 /* for QMI */
+#define LED_CM8_BLINK_OFF_INTERVAL 3750 /* for QMI */
+
+/* 080124, lanhsin: Customized for RunTop */
+#define LED_RunTop_BLINK_INTERVAL 300
+
+/* 060421, rcnjko: Customized for Sercomm Printer Server case. */
+#define LED_CM3_BLINK_INTERVAL 1500
+
+enum led_ctl_mode {
+ LED_CTL_POWER_ON = 1,
+ LED_CTL_LINK = 2,
+ LED_CTL_NO_LINK = 3,
+ LED_CTL_TX = 4,
+ LED_CTL_RX = 5,
+ LED_CTL_SITE_SURVEY = 6,
+ LED_CTL_POWER_OFF = 7,
+ LED_CTL_START_TO_LINK = 8,
+ LED_CTL_START_WPS = 9,
+ LED_CTL_STOP_WPS = 10,
+ LED_CTL_START_WPS_BOTTON = 11, /* added for runtop */
+ LED_CTL_STOP_WPS_FAIL = 12, /* added for ALPHA */
+ LED_CTL_STOP_WPS_FAIL_OVERLAP = 13, /* added for BELKIN */
+ LED_CTL_CONNECTION_NO_TRANSFER = 14,
+};
+
+enum led_state_872x {
+ LED_UNKNOWN = 0,
+ RTW_LED_ON = 1,
+ RTW_LED_OFF = 2,
+ LED_BLINK_NORMAL = 3,
+ LED_BLINK_SLOWLY = 4,
+ LED_BLINK_POWER_ON = 5,
+ LED_BLINK_SCAN = 6, /* LED is blinking during scanning period, the # of times to blink is depend on time for scanning. */
+ LED_BLINK_NO_LINK = 7, /* LED is blinking during no link state. */
+ LED_BLINK_StartToBlink = 8,/* Customzied for Sercomm Printer Server case */
+ LED_BLINK_TXRX = 9,
+ LED_BLINK_WPS = 10, /* LED is blinkg during WPS communication */
+ LED_BLINK_WPS_STOP = 11, /* for ALPHA */
+ LED_BLINK_WPS_STOP_OVERLAP = 12, /* for BELKIN */
+ LED_BLINK_RUNTOP = 13, /* Customized for RunTop */
+ LED_BLINK_CAMEO = 14,
+ LED_BLINK_XAVI = 15,
+ LED_BLINK_ALWAYS_ON = 16,
+};
+
+enum led_pin_8723a {
+ LED_PIN_NULL = 0,
+ LED_PIN_LED0 = 1,
+ LED_PIN_LED1 = 2,
+ LED_PIN_LED2 = 3,
+ LED_PIN_GPIO0 = 4,
+};
+
+struct led_8723a {
+ struct rtw_adapter *padapter;
+
+ enum led_pin_8723a LedPin; /* Identify how to implement this SW led. */
+ enum led_state_872x CurrLedState; /* Current LED state. */
+ enum led_state_872x BlinkingLedState; /* Next state for blinking, either RTW_LED_ON or RTW_LED_OFF are. */
+
+ u8 bLedOn; /* true if LED is ON, false if LED is OFF. */
+
+ u8 bLedBlinkInProgress; /* true if it is blinking, false o.w.. */
+
+ u8 bLedWPSBlinkInProgress;
+
+ u32 BlinkTimes; /* Number of times to toggle led state for blinking. */
+
+ struct timer_list BlinkTimer; /* Timer object for led blinking. */
+
+ u8 bSWLedCtrl;
+
+ /* ALPHA, added by chiyoko, 20090106 */
+ u8 bLedNoLinkBlinkInProgress;
+ u8 bLedLinkBlinkInProgress;
+ u8 bLedStartToLinkBlinkInProgress;
+ u8 bLedScanBlinkInProgress;
+
+ struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to manipulate H/W to blink LED. */
+};
+
+#define IS_LED_WPS_BLINKING(_LED_871x) (((struct led_8723a *)_LED_871x)->CurrLedState==LED_BLINK_WPS \
+ || ((struct led_8723a *)_LED_871x)->CurrLedState==LED_BLINK_WPS_STOP \
+ || ((struct led_8723a *)_LED_871x)->bLedWPSBlinkInProgress)
+
+#define IS_LED_BLINKING(_LED_871x) (((struct led_8723a *)_LED_871x)->bLedWPSBlinkInProgress \
+ ||((struct led_8723a *)_LED_871x)->bLedScanBlinkInProgress)
+
+/* */
+/* LED customization. */
+/* */
+
+enum led_strategy_8723a {
+ SW_LED_MODE0 = 0, /* SW control 1 LED via GPIO0. It is default option. */
+ SW_LED_MODE1= 1, /* 2 LEDs, through LED0 and LED1. For ALPHA. */
+ SW_LED_MODE2 = 2, /* SW control 1 LED via GPIO0, customized for AzWave 8187 minicard. */
+ SW_LED_MODE3 = 3, /* SW control 1 LED via GPIO0, customized for Sercomm Printer Server case. */
+ SW_LED_MODE4 = 4, /* for Edimax / Belkin */
+ SW_LED_MODE5 = 5, /* for Sercomm / Belkin */
+ SW_LED_MODE6 = 6, /* for 88CU minicard, porting from ce SW_LED_MODE7 */
+ HW_LED = 50, /* HW control 2 LEDs, LED0 and LED1 (there are 4 different control modes, see MAC.CONFIG1 for details.) */
+ LED_ST_NONE = 99,
+};
+
+void LedControl871x23a(struct rtw_adapter *padapter, enum led_ctl_mode LedAction);
+
+struct led_priv{
+ /* add for led controll */
+ struct led_8723a SwLed0;
+ struct led_8723a SwLed1;
+ enum led_strategy_8723a LedStrategy;
+ u8 bRegUseLed;
+ void (*LedControlHandler)(struct rtw_adapter *padapter, enum led_ctl_mode LedAction);
+ /* add for led controll */
+};
+
+#define rtw_led_control(adapter, LedAction)
+
+void BlinkWorkItemCallback23a(struct work_struct *work);
+
+void ResetLedStatus23a(struct led_8723a *pLed);
+
+void
+InitLed871x23a(
+ struct rtw_adapter *padapter,
+ struct led_8723a *pLed,
+ enum led_pin_8723a LedPin
+);
+
+void
+DeInitLed871x23a(struct led_8723a *pLed);
+
+/* hal... */
+void BlinkHandler23a(struct led_8723a *pLed);
+
+#endif /* __RTW_LED_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme.h b/drivers/staging/rtl8723au/include/rtw_mlme.h
new file mode 100644
index 000000000000..31f96f39b498
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_mlme.h
@@ -0,0 +1,624 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_MLME_H_
+#define __RTW_MLME_H_
+
+#include <osdep_service.h>
+#include <mlme_osdep.h>
+#include <drv_types.h>
+#include <wlan_bssdef.h>
+
+#define MAX_BSS_CNT 128
+#define MAX_JOIN_TIMEOUT 6500
+
+/* Increase the scanning timeout because of increasing the SURVEY_TO value. */
+
+#define SCANNING_TIMEOUT 8000
+
+#define SCAN_INTERVAL (30) /* unit:2sec, 30*2 = 60sec */
+
+#define SCANQUEUE_LIFETIME 20 /* unit:sec */
+
+#define WIFI_NULL_STATE 0x00000000
+
+#define WIFI_ASOC_STATE 0x00000001 /* Under Linked state.*/
+#define WIFI_REASOC_STATE 0x00000002
+#define WIFI_SLEEP_STATE 0x00000004
+#define WIFI_STATION_STATE 0x00000008
+
+#define WIFI_AP_STATE 0x00000010
+#define WIFI_ADHOC_STATE 0x00000020
+#define WIFI_ADHOC_MASTER_STATE 0x00000040
+#define WIFI_UNDER_LINKING 0x00000080
+
+#define WIFI_UNDER_WPS 0x00000100
+#define WIFI_STA_ALIVE_CHK_STATE 0x00000400
+/* to indicate the station is under site surveying */
+#define WIFI_SITE_MONITOR 0x00000800
+
+#define WIFI_MP_STATE 0x00010000
+#define WIFI_MP_CTX_BACKGROUND 0x00020000 /* in continous tx background */
+#define WIFI_MP_CTX_ST 0x00040000 /* in continous tx with single-tone */
+#define WIFI_MP_CTX_BACKGROUND_PENDING 0x00080000 /* pending in continous tx background due to out of skb */
+#define WIFI_MP_CTX_CCK_HW 0x00100000 /* in continous tx */
+#define WIFI_MP_CTX_CCK_CS 0x00200000 /* in continous tx with carrier suppression */
+#define WIFI_MP_LPBK_STATE 0x00400000
+
+#define _FW_UNDER_LINKING WIFI_UNDER_LINKING
+#define _FW_LINKED WIFI_ASOC_STATE
+#define _FW_UNDER_SURVEY WIFI_SITE_MONITOR
+
+
+enum dot11AuthAlgrthmNum {
+ dot11AuthAlgrthm_Open = 0,
+ dot11AuthAlgrthm_Shared,
+ dot11AuthAlgrthm_8021X,
+ dot11AuthAlgrthm_Auto,
+ dot11AuthAlgrthm_MaxNum
+};
+
+/* Scan type including active and passive scan. */
+enum rt_scan_type {
+ SCAN_PASSIVE,
+ SCAN_ACTIVE,
+ SCAN_MIX,
+};
+
+enum {
+ GHZ24_50 = 0,
+ GHZ_50,
+ GHZ_24,
+};
+
+enum SCAN_RESULT_TYPE {
+ SCAN_RESULT_P2P_ONLY = 0, /* Will return all the P2P devices. */
+ SCAN_RESULT_ALL = 1, /* Will return all the scanned device, include AP. */
+ SCAN_RESULT_WFD_TYPE = 2 /* Will just return the correct WFD device. */
+ /* If this device is Miracast sink device, it will just return all the Miracast source devices. */
+};
+
+/*
+
+there are several "locks" in mlme_priv,
+since mlme_priv is a shared resource between many threads,
+like ISR/Call-Back functions, the OID handlers, and even timer functions.
+
+
+Each _queue has its own locks, already.
+Other items are protected by mlme_priv.lock.
+
+To avoid possible dead lock, any thread trying to modifiying mlme_priv
+SHALL not lock up more than one locks at a time!
+*/
+
+#define traffic_threshold 10
+#define traffic_scan_period 500
+
+struct sitesurvey_ctrl {
+ u64 last_tx_pkts;
+ uint last_rx_pkts;
+ int traffic_busy;
+ struct timer_list sitesurvey_ctrl_timer;
+};
+
+struct rt_link_detect {
+ u32 NumTxOkInPeriod;
+ u32 NumRxOkInPeriod;
+ u32 NumRxUnicastOkInPeriod;
+ bool bBusyTraffic;
+ bool bTxBusyTraffic;
+ bool bRxBusyTraffic;
+ bool bHigherBusyTraffic; /* For interrupt migration purpose. */
+ bool bHigherBusyRxTraffic; /* We may disable Tx interrupt according as Rx traffic. */
+ bool bHigherBusyTxTraffic; /* We may disable Tx interrupt according as Tx traffic. */
+};
+
+struct profile_info {
+ u8 ssidlen;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 peermac[ETH_ALEN];
+};
+
+struct tx_invite_req_info {
+ u8 token;
+ u8 benable;
+ u8 go_ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssidlen;
+ u8 go_bssid[ETH_ALEN];
+ u8 peer_macaddr[ETH_ALEN];
+ u8 operating_ch; /* This information will be set by using the p2p_set op_ch = x */
+ u8 peer_ch; /* The listen channel for peer P2P device */
+
+};
+
+struct tx_invite_resp_info {
+ u8 token; /* Used to record the dialog token of p2p invitation request frame. */
+};
+
+#ifdef CONFIG_8723AU_P2P
+
+struct wifi_display_info {
+ u16 wfd_enable; /* Enable/Disable the WFD function. */
+ u16 rtsp_ctrlport; /* TCP port number at which the this WFD device listens for RTSP messages */
+ u16 peer_rtsp_ctrlport; /* TCP port number at which the peer WFD device listens for RTSP messages */
+ /* This filed should be filled when receiving the gropu negotiation request */
+
+ u8 peer_session_avail; /* WFD session is available or not for the peer wfd device. */
+ /* This variable will be set when sending the provisioning discovery request to peer WFD device. */
+ /* And this variable will be reset when it is read by using the iwpriv p2p_get wfd_sa command. */
+ u8 ip_address[4];
+ u8 peer_ip_address[4];
+ u8 wfd_pc; /* WFD preferred connection */
+ /* 0 -> Prefer to use the P2P for WFD connection on peer side. */
+ /* 1 -> Prefer to use the TDLS for WFD connection on peer side. */
+
+ u8 wfd_device_type;/* WFD Device Type */
+ /* 0 -> WFD Source Device */
+ /* 1 -> WFD Primary Sink Device */
+ enum SCAN_RESULT_TYPE scan_result_type; /* Used when P2P is enable. This parameter will impact the scan result. */
+};
+#endif /* CONFIG_8723AU_P2P */
+
+struct tx_provdisc_req_info {
+ u16 wps_config_method_request; /* Used when sending the provisioning request frame */
+ u16 peer_channel_num[2]; /* The channel number which the receiver stands. */
+ struct cfg80211_ssid ssid;
+ u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
+ u8 peerIFAddr[ETH_ALEN]; /* Peer interface address */
+ u8 benable; /* This provision discovery request frame is trigger to send or not */
+};
+
+struct rx_provdisc_req_info { /* When peer device issue prov_disc_req first, we should store the following informations */
+ u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
+ u8 strconfig_method_desc_of_prov_disc_req[4]; /* description for the config method located in the provisioning discovery request frame. */
+ /* The UI must know this information to know which config method the remote p2p device is requiring. */
+};
+
+struct tx_nego_req_info {
+ u16 peer_channel_num[2]; /* The channel number which the receiver stands. */
+ u8 peerDevAddr[ETH_ALEN];/* Peer device address */
+ u8 benable; /* This negoitation request frame is trigger to send or not */
+};
+
+struct group_id_info {
+ u8 go_device_addr[ETH_ALEN]; /*The GO's device address of P2P group */
+ u8 ssid[IEEE80211_MAX_SSID_LEN]; /* The SSID of this P2P group */
+};
+
+struct scan_limit_info {
+ u8 scan_op_ch_only; /* When this flag is set, the driver should just scan the operation channel */
+ u8 operation_ch[2]; /* Store the operation channel of invitation request frame */
+};
+
+struct cfg80211_wifidirect_info {
+ struct timer_list remain_on_ch_timer;
+ u8 restore_channel;
+ struct ieee80211_channel remain_on_ch_channel;
+ enum nl80211_channel_type remain_on_ch_type;
+ u64 remain_on_ch_cookie;
+ bool is_ro_ch;
+};
+
+struct wifidirect_info {
+ struct rtw_adapter *padapter;
+ struct timer_list find_phase_timer;
+ struct timer_list restore_p2p_state_timer;
+
+ /* Used to do the scanning. After confirming the peer is availalble, the driver transmits the P2P frame to peer. */
+ struct timer_list pre_tx_scan_timer;
+ struct timer_list reset_ch_sitesurvey;
+ struct timer_list reset_ch_sitesurvey2; /* Just for resetting the scan limit function by using p2p nego */
+ struct tx_provdisc_req_info tx_prov_disc_info;
+ struct rx_provdisc_req_info rx_prov_disc_info;
+ struct tx_invite_req_info invitereq_info;
+ struct profile_info profileinfo[P2P_MAX_PERSISTENT_GROUP_NUM]; /* Store the profile information of persistent group */
+ struct tx_invite_resp_info inviteresp_info;
+ struct tx_nego_req_info nego_req_info;
+ struct group_id_info groupid_info; /* Store the group id information when doing the group negotiation handshake. */
+ struct scan_limit_info rx_invitereq_info; /* Used for get the limit scan channel from the Invitation procedure */
+ struct scan_limit_info p2p_info; /* Used for get the limit scan channel from the P2P negotiation handshake */
+#ifdef CONFIG_8723AU_P2P
+ struct wifi_display_info *wfd_info;
+#endif
+ enum P2P_ROLE role;
+ enum P2P_STATE pre_p2p_state;
+ enum P2P_STATE p2p_state;
+ u8 device_addr[ETH_ALEN]; /* The device address should be the mac address of this device. */
+ u8 interface_addr[ETH_ALEN];
+ u8 social_chan[4];
+ u8 listen_channel;
+ u8 operating_channel;
+ u8 listen_dwell; /* This value should be between 1 and 3 */
+ u8 support_rate[8];
+ u8 p2p_wildcard_ssid[P2P_WILDCARD_SSID_LEN];
+ u8 intent; /* should only include the intent value. */
+ u8 p2p_peer_interface_addr[ETH_ALEN];
+ u8 p2p_peer_device_addr[ETH_ALEN];
+ u8 peer_intent; /* Included the intent value and tie breaker value. */
+ u8 device_name[WPS_MAX_DEVICE_NAME_LEN]; /* Device name for displaying on searching device screen */
+ u8 device_name_len;
+ u8 profileindex; /* Used to point to the index of profileinfo array */
+ u8 peer_operating_ch;
+ u8 find_phase_state_exchange_cnt;
+ u16 device_password_id_for_nego; /* The device password ID for group negotation */
+ u8 negotiation_dialog_token;
+ /* SSID information for group negotitation */
+ u8 nego_ssid[IEEE80211_MAX_SSID_LEN];
+ u8 nego_ssidlen;
+ u8 p2p_group_ssid[IEEE80211_MAX_SSID_LEN];
+ u8 p2p_group_ssid_len;
+ u8 persistent_supported; /* Flag to know the persistent function should be supported or not. */
+ /* In the Sigma test, the Sigma will provide this enable from the sta_set_p2p CAPI. */
+ /* 0: disable */
+ /* 1: enable */
+ u8 session_available; /* Flag to set the WFD session available to enable or disable "by Sigma" */
+ /* In the Sigma test, the Sigma will disable the session available by using the sta_preset CAPI. */
+ /* 0: disable */
+ /* 1: enable */
+
+ u8 wfd_tdls_enable; /* Flag to enable or disable the TDLS by WFD Sigma */
+ /* 0: disable */
+ /* 1: enable */
+ u8 wfd_tdls_weaksec; /* Flag to enable or disable the weak security function for TDLS by WFD Sigma */
+ /* 0: disable */
+ /* In this case, the driver can't issue the tdsl setup request frame. */
+ /* 1: enable */
+ /* In this case, the driver can issue the tdls setup request frame */
+ /* even the current security is weak security. */
+
+ enum P2P_WPSINFO ui_got_wps_info; /* This field will store the WPS value (PIN value or PBC) that UI had got from the user. */
+ u16 supported_wps_cm; /* This field describes the WPS config method which this driver supported. */
+ /* The value should be the combination of config method defined in page104 of WPS v2.0 spec. */
+ uint channel_list_attr_len; /* This field will contain the length of body of P2P Channel List attribute of group negotitation response frame. */
+ u8 channel_list_attr[100]; /* This field will contain the body of P2P Channel List attribute of group negotitation response frame. */
+ /* We will use the channel_cnt and channel_list fields when constructing the group negotitation confirm frame. */
+#ifdef CONFIG_8723AU_P2P
+ enum P2P_PS_MODE p2p_ps_mode; /* indicate p2p ps mode */
+ enum P2P_PS_STATE p2p_ps_state; /* indicate p2p ps state */
+ u8 noa_index; /* Identifies and instance of Notice of Absence timing. */
+ u8 ctwindow; /* Client traffic window. A period of time in TU after TBTT. */
+ u8 opp_ps; /* opportunistic power save. */
+ u8 noa_num; /* number of NoA descriptor in P2P IE. */
+ u8 noa_count[P2P_MAX_NOA_NUM]; /* Count for owner, Type of client. */
+ u32 noa_duration[P2P_MAX_NOA_NUM]; /* Max duration for owner, preferred or min acceptable duration for client. */
+ u32 noa_interval[P2P_MAX_NOA_NUM]; /* Length of interval for owner, preferred or max acceptable interval of client. */
+ u32 noa_start_time[P2P_MAX_NOA_NUM]; /* schedule expressed in terms of the lower 4 bytes of the TSF timer. */
+#endif /* CONFIG_8723AU_P2P */
+};
+
+struct tdls_ss_record { /* signal strength record */
+ u8 macaddr[ETH_ALEN];
+ u8 RxPWDBAll;
+ u8 is_tdls_sta; /* true: direct link sta, false: else */
+};
+
+struct tdls_info {
+ u8 ap_prohibited;
+ uint setup_state;
+ u8 sta_cnt;
+ /* 1:tdls sta == (NUM_STA-1), reach max direct link no; 0: else; */
+ u8 sta_maximum;
+ struct tdls_ss_record ss_record;
+ u8 macid_index; /* macid entry that is ready to write */
+ /* cam entry that is trying to clear, using it in direct link teardown*/
+ u8 clear_cam;
+ u8 ch_sensing;
+ u8 cur_channel;
+ u8 candidate_ch;
+ u8 collect_pkt_num[MAX_CHANNEL_NUM];
+ spinlock_t cmd_lock;
+ spinlock_t hdl_lock;
+ u8 watchdog_count;
+ u8 dev_discovered; /* WFD_TDLS: for sigma test */
+ u8 enable;
+#ifdef CONFIG_8723AU_P2P
+ struct wifi_display_info *wfd_info;
+#endif
+};
+
+struct mlme_priv {
+ spinlock_t lock;
+ int fw_state;
+ u8 bScanInProcess;
+ u8 to_join; /* flag */
+ u8 to_roaming; /* roaming trying times */
+
+ struct rtw_adapter *nic_hdl;
+
+ u8 not_indic_disco;
+ struct rtw_queue scanned_queue;
+
+ struct cfg80211_ssid assoc_ssid;
+ u8 assoc_bssid[6];
+
+ struct wlan_network cur_network;
+
+ /* uint wireless_mode; no used, remove it */
+
+ u32 scan_interval;
+
+ struct timer_list assoc_timer;
+
+ uint assoc_by_bssid;
+ uint assoc_by_rssi;
+
+ struct timer_list scan_to_timer;
+
+ struct timer_list set_scan_deny_timer;
+ atomic_t set_scan_deny; /* 0: allowed, 1: deny */
+
+ struct qos_priv qospriv;
+
+ /* Number of non-HT AP/stations */
+ int num_sta_no_ht;
+
+ int num_FortyMHzIntolerant;
+
+ struct ht_priv htpriv;
+
+ struct rt_link_detect LinkDetectInfo;
+ struct timer_list dynamic_chk_timer; /* dynamic/periodic check timer */
+
+ u8 key_mask; /* use for ips to set wep key after ips_leave23a */
+ u8 acm_mask; /* for wmm acm mask */
+ u8 ChannelPlan;
+ enum rt_scan_type scan_mode; /* active: 1, passive: 0 */
+
+ u8 *wps_probe_req_ie;
+ u32 wps_probe_req_ie_len;
+ u8 *assoc_req;
+ u32 assoc_req_len;
+ u32 assoc_rsp_len;
+ u8 *assoc_rsp;
+ u32 wps_assoc_resp_ie_len;
+ u8 *wps_assoc_resp_ie;
+ u8 *wps_probe_resp_ie;
+ u32 wps_probe_resp_ie_len;
+ u8 *wps_beacon_ie;
+ u32 wps_beacon_ie_len;
+ u32 p2p_go_probe_resp_ie_len; /* for GO */
+ u32 p2p_assoc_req_ie_len;
+ u8 *p2p_beacon_ie;
+ u8 *p2p_probe_req_ie;
+ u8 *p2p_probe_resp_ie;
+ u8 *p2p_go_probe_resp_ie; /* for GO */
+ u8 *p2p_assoc_req_ie;
+ u32 p2p_beacon_ie_len;
+ u32 p2p_probe_req_ie_len;
+ u32 p2p_probe_resp_ie_len;
+ u8 *wfd_assoc_req_ie;
+ u32 wfd_assoc_req_ie_len;
+
+#ifdef CONFIG_8723AU_AP_MODE
+ /* Number of associated Non-ERP stations (i.e., stations using 802.11b
+ * in 802.11g BSS) */
+ int num_sta_non_erp;
+
+ /* Number of associated stations that do not support Short Slot Time */
+ int num_sta_no_short_slot_time;
+
+ /* Number of associated stations that do not support Short Preamble */
+ int num_sta_no_short_preamble;
+
+ int olbc; /* Overlapping Legacy BSS Condition */
+
+ /* Number of HT associated stations that do not support greenfield */
+ int num_sta_ht_no_gf;
+
+ /* Number of associated non-HT stations */
+ /* int num_sta_no_ht; */
+
+ /* Number of HT associated stations 20 MHz */
+ int num_sta_ht_20mhz;
+
+ /* Overlapping BSS information */
+ int olbc_ht;
+
+ u16 ht_op_mode;
+
+ spinlock_t bcn_update_lock;
+ u8 update_bcn;
+
+#endif /* ifdef CONFIG_8723AU_AP_MODE */
+
+ u8 *wfd_beacon_ie;
+ u8 *wfd_probe_req_ie;
+ u8 *wfd_probe_resp_ie;
+ u8 *wfd_go_probe_resp_ie; /* for GO */
+
+ u32 wfd_beacon_ie_len;
+ u32 wfd_probe_req_ie_len;
+ u32 wfd_probe_resp_ie_len;
+ u32 wfd_go_probe_resp_ie_len; /* for GO */
+};
+
+#ifdef CONFIG_8723AU_AP_MODE
+
+struct hostapd_priv {
+ struct rtw_adapter *padapter;
+};
+
+int hostapd_mode_init(struct rtw_adapter *padapter);
+void hostapd_mode_unload(struct rtw_adapter *padapter);
+#endif
+
+void rtw_joinbss_event_prehandle23a(struct rtw_adapter *adapter, u8 *pbuf);
+void rtw_survey_event_cb23a(struct rtw_adapter *adapter, u8 *pbuf);
+void rtw_surveydone_event_callback23a(struct rtw_adapter *adapter, u8 *pbuf);
+void rtw23a_joinbss_event_cb(struct rtw_adapter *adapter, u8 *pbuf);
+void rtw_stassoc_event_callback23a(struct rtw_adapter *adapter, u8 *pbuf);
+void rtw_stadel_event_callback23a(struct rtw_adapter *adapter, u8 *pbuf);
+void rtw_atimdone_event_callback23a(struct rtw_adapter *adapter, u8 *pbuf);
+void rtw_cpwm_event_callback23a(struct rtw_adapter *adapter, u8 *pbuf);
+
+
+int event_thread(void *context);
+void rtw23a_join_to_handler(unsigned long);
+
+void rtw_free_network_queue23a(struct rtw_adapter *adapter, u8 isfreeall);
+int rtw_init_mlme_priv23a(struct rtw_adapter *adapter);
+
+void rtw_free_mlme_priv23a(struct mlme_priv *pmlmepriv);
+
+int rtw_select_and_join_from_scanned_queue23a(struct mlme_priv *pmlmepriv);
+int rtw_set_key23a(struct rtw_adapter *adapter,
+ struct security_priv *psecuritypriv, int keyid, u8 set_tx);
+int rtw_set_auth23a(struct rtw_adapter *adapter,
+ struct security_priv *psecuritypriv);
+
+static inline u8 *get_bssid(struct mlme_priv *pmlmepriv)
+{ /* if sta_mode:pmlmepriv->cur_network.network.MacAddress => bssid */
+ /* if adhoc_mode:pmlmepriv->cur_network.network.MacAddress => ibss mac address */
+ return pmlmepriv->cur_network.network.MacAddress;
+}
+
+static inline int check_fwstate(struct mlme_priv *pmlmepriv, int state)
+{
+ if (pmlmepriv->fw_state & state)
+ return true;
+
+ return false;
+}
+
+static inline int get_fwstate(struct mlme_priv *pmlmepriv)
+{
+ return pmlmepriv->fw_state;
+}
+
+/*
+ * No Limit on the calling context,
+ * therefore set it to be the critical section...
+ *
+ * ### NOTE:#### (!!!!)
+ * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
+ */
+static inline void set_fwstate(struct mlme_priv *pmlmepriv, int state)
+{
+ pmlmepriv->fw_state |= state;
+ /* FOR HW integration */
+ if (_FW_UNDER_SURVEY == state)
+ pmlmepriv->bScanInProcess = true;
+}
+
+static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state)
+{
+ pmlmepriv->fw_state &= ~state;
+ /* FOR HW integration */
+ if (_FW_UNDER_SURVEY == state)
+ pmlmepriv->bScanInProcess = false;
+}
+
+/*
+ * No Limit on the calling context,
+ * therefore set it to be the critical section...
+ */
+static inline void clr_fwstate(struct mlme_priv *pmlmepriv, int state)
+{
+ spin_lock_bh(&pmlmepriv->lock);
+ if (check_fwstate(pmlmepriv, state) == true)
+ pmlmepriv->fw_state ^= state;
+ spin_unlock_bh(&pmlmepriv->lock);
+}
+
+static inline void clr_fwstate_ex(struct mlme_priv *pmlmepriv, int state)
+{
+ spin_lock_bh(&pmlmepriv->lock);
+ _clr_fwstate_(pmlmepriv, state);
+ spin_unlock_bh(&pmlmepriv->lock);
+}
+
+u16 rtw_get_capability23a(struct wlan_bssid_ex *bss);
+void rtw_update_scanned_network23a(struct rtw_adapter *adapter,
+ struct wlan_bssid_ex *target);
+void rtw_disconnect_hdl23a_under_linked(struct rtw_adapter *adapter,
+ struct sta_info *psta, u8 free_assoc);
+void rtw_generate_random_ibss23a(u8 *pibss);
+struct wlan_network *rtw_find_network23a(struct rtw_queue *scanned_queue, u8 *addr);
+struct wlan_network *rtw_get_oldest_wlan_network23a(struct rtw_queue *scanned_queue);
+
+void rtw_free_assoc_resources23a(struct rtw_adapter *adapter,
+ int lock_scanned_queue);
+void rtw_indicate_disconnect23a(struct rtw_adapter *adapter);
+void rtw_indicate_connect23a(struct rtw_adapter *adapter);
+void rtw_indicate_scan_done23a(struct rtw_adapter *padapter, bool aborted);
+void rtw_scan_abort23a(struct rtw_adapter *adapter);
+
+int rtw_restruct_sec_ie23a(struct rtw_adapter *adapter, u8 *in_ie, u8 *out_ie,
+ uint in_len);
+int rtw_restruct_wmm_ie23a(struct rtw_adapter *adapter, u8 *in_ie, u8 *out_ie,
+ uint in_len, uint initial_out_len);
+void rtw_init_registrypriv_dev_network23a(struct rtw_adapter *adapter);
+
+void rtw_update_registrypriv_dev_network23a(struct rtw_adapter *adapter);
+
+void rtw_get_encrypt_decrypt_from_registrypriv23a(struct rtw_adapter *adapter);
+
+void rtw_scan_timeout_handler23a(unsigned long data);
+
+void rtw_dynamic_check_timer_handler(unsigned long data);
+bool rtw_is_scan_deny(struct rtw_adapter *adapter);
+void rtw_clear_scan_deny(struct rtw_adapter *adapter);
+void rtw_set_scan_deny_timer_hdl(unsigned long data);
+void rtw_set_scan_deny(struct rtw_adapter *adapter, u32 ms);
+
+int _rtw_init_mlme_priv23a(struct rtw_adapter *padapter);
+
+void rtw23a_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
+
+void _rtw_free_mlme_priv23a(struct mlme_priv *pmlmepriv);
+
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv);
+
+void _rtw_free_network23a(struct mlme_priv *pmlmepriv,
+ struct wlan_network *pnetwork, u8 isfreeall);
+void _rtw_free_network23a_nolock23a(struct mlme_priv *pmlmepriv,
+ struct wlan_network *pnetwork);
+
+struct wlan_network *_rtw_find_network23a(struct rtw_queue *scanned_queue, u8 *addr);
+
+void _rtw_free_network23a_queue23a(struct rtw_adapter *padapter, u8 isfreeall);
+
+int rtw_if_up23a(struct rtw_adapter *padapter);
+
+int rtw_linked_check(struct rtw_adapter *padapter);
+
+u8 *rtw_get_capability23a_from_ie(u8 *ie);
+u8 *rtw_get_timestampe_from_ie23a(u8 *ie);
+u8 *rtw_get_beacon_interval23a_from_ie(u8 *ie);
+
+
+void rtw_joinbss_reset23a(struct rtw_adapter *padapter);
+
+unsigned int rtw_restructure_ht_ie23a(struct rtw_adapter *padapter, u8 *in_ie,
+ u8 *out_ie, uint in_len, uint *pout_len);
+void rtw_update_ht_cap23a(struct rtw_adapter *padapter,
+ u8 *pie, uint ie_len);
+void rtw_issue_addbareq_cmd23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe);
+
+int rtw_is_same_ibss23a(struct rtw_adapter *adapter,
+ struct wlan_network *pnetwork);
+int is_same_network23a(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst);
+
+void _rtw23a_roaming(struct rtw_adapter *adapter,
+ struct wlan_network *tgt_network);
+void rtw23a_roaming(struct rtw_adapter *adapter,
+ struct wlan_network *tgt_network);
+void rtw_set_roaming(struct rtw_adapter *adapter, u8 to_roaming);
+u8 rtw_to_roaming(struct rtw_adapter *adapter);
+void rtw_stassoc_hw_rpt23a(struct rtw_adapter *adapter, struct sta_info *psta);
+
+#endif /* __RTL871X_MLME_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
new file mode 100644
index 000000000000..0aaf0d5d8aea
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
@@ -0,0 +1,780 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_MLME_EXT_H_
+#define __RTW_MLME_EXT_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wlan_bssdef.h>
+
+
+/* Commented by Albert 20101105 */
+/* Increase the SURVEY_TO value from 100 to 150 ( 100ms to 150ms ) */
+/* The Realtek 8188CE SoftAP will spend around 100ms to send the probe response after receiving the probe request. */
+/* So, this driver tried to extend the dwell time for each scanning channel. */
+/* This will increase the chance to receive the probe response from SoftAP. */
+
+#define SURVEY_TO (100)
+#define REAUTH_TO (300) /* 50) */
+#define REASSOC_TO (300) /* 50) */
+/* define DISCONNECT_TO (3000) */
+#define ADDBA_TO (2000)
+
+#define LINKED_TO (1) /* unit:2 sec, 1x2=2 sec */
+
+#define REAUTH_LIMIT (4)
+#define REASSOC_LIMIT (4)
+#define READDBA_LIMIT (2)
+
+#define ROAMING_LIMIT 8
+
+#define DYNAMIC_FUNC_DISABLE (0x0)
+
+/* ====== enum odm_ability ======== */
+/* BB ODM section BIT 0-15 */
+#define DYNAMIC_BB_DIG BIT(0)
+#define DYNAMIC_BB_RA_MASK BIT(1)
+#define DYNAMIC_BB_DYNAMIC_TXPWR BIT(2)
+#define DYNAMIC_BB_BB_FA_CNT BIT(3)
+
+#define DYNAMIC_BB_RSSI_MONITOR BIT(4)
+#define DYNAMIC_BB_CCK_PD BIT(5)
+#define DYNAMIC_BB_ANT_DIV BIT(6)
+#define DYNAMIC_BB_PWR_SAVE BIT(7)
+#define DYNAMIC_BB_PWR_TRAIN BIT(8)
+#define DYNAMIC_BB_RATE_ADAPTIVE BIT(9)
+#define DYNAMIC_BB_PATH_DIV BIT(10)
+#define DYNAMIC_BB_PSD BIT(11)
+
+/* MAC DM section BIT 16-23 */
+#define DYNAMIC_MAC_struct edca_turboURBO BIT(16)
+#define DYNAMIC_MAC_EARLY_MODE BIT(17)
+
+/* RF ODM section BIT 24-31 */
+#define DYNAMIC_RF_TX_PWR_TRACK BIT(24)
+#define DYNAMIC_RF_RX_GAIN_TRACK BIT(25)
+#define DYNAMIC_RF_CALIBRATION BIT(26)
+
+#define DYNAMIC_ALL_FUNC_ENABLE 0xFFFFFFF
+
+#define _HW_STATE_NOLINK_ 0x00
+#define _HW_STATE_ADHOC_ 0x01
+#define _HW_STATE_STATION_ 0x02
+#define _HW_STATE_AP_ 0x03
+
+
+#define _1M_RATE_ 0
+#define _2M_RATE_ 1
+#define _5M_RATE_ 2
+#define _11M_RATE_ 3
+#define _6M_RATE_ 4
+#define _9M_RATE_ 5
+#define _12M_RATE_ 6
+#define _18M_RATE_ 7
+#define _24M_RATE_ 8
+#define _36M_RATE_ 9
+#define _48M_RATE_ 10
+#define _54M_RATE_ 11
+
+
+extern unsigned char RTW_WPA_OUI23A[];
+extern unsigned char WMM_OUI23A[];
+extern unsigned char WPS_OUI23A[];
+extern unsigned char WFD_OUI23A[];
+extern unsigned char P2P_OUI23A[];
+
+extern unsigned char WMM_INFO_OUI23A[];
+extern unsigned char WMM_PARA_OUI23A[];
+
+
+/* */
+/* Channel Plan Type. */
+/* Note: */
+/* We just add new channel plan when the new channel plan is different from any of the following */
+/* channel plan. */
+/* If you just wnat to customize the acitions(scan period or join actions) about one of the channel plan, */
+/* customize them in struct rt_channel_info in the RT_CHANNEL_LIST. */
+/* */
+enum { /* _RT_CHANNEL_DOMAIN */
+ /* old channel plan mapping ===== */
+ RT_CHANNEL_DOMAIN_FCC = 0x00,
+ RT_CHANNEL_DOMAIN_IC = 0x01,
+ RT_CHANNEL_DOMAIN_ETSI = 0x02,
+ RT_CHANNEL_DOMAIN_SPAIN = 0x03,
+ RT_CHANNEL_DOMAIN_FRANCE = 0x04,
+ RT_CHANNEL_DOMAIN_MKK = 0x05,
+ RT_CHANNEL_DOMAIN_MKK1 = 0x06,
+ RT_CHANNEL_DOMAIN_ISRAEL = 0x07,
+ RT_CHANNEL_DOMAIN_TELEC = 0x08,
+ RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN = 0x09,
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_13 = 0x0A,
+ RT_CHANNEL_DOMAIN_TAIWAN = 0x0B,
+ RT_CHANNEL_DOMAIN_CHINA = 0x0C,
+ RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO = 0x0D,
+ RT_CHANNEL_DOMAIN_KOREA = 0x0E,
+ RT_CHANNEL_DOMAIN_TURKEY = 0x0F,
+ RT_CHANNEL_DOMAIN_JAPAN = 0x10,
+ RT_CHANNEL_DOMAIN_FCC_NO_DFS = 0x11,
+ RT_CHANNEL_DOMAIN_JAPAN_NO_DFS = 0x12,
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_5G = 0x13,
+ RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS = 0x14,
+
+ /* new channel plan mapping, (2GDOMAIN_5GDOMAIN) ===== */
+ RT_CHANNEL_DOMAIN_WORLD_NULL = 0x20,
+ RT_CHANNEL_DOMAIN_ETSI1_NULL = 0x21,
+ RT_CHANNEL_DOMAIN_FCC1_NULL = 0x22,
+ RT_CHANNEL_DOMAIN_MKK1_NULL = 0x23,
+ RT_CHANNEL_DOMAIN_ETSI2_NULL = 0x24,
+ RT_CHANNEL_DOMAIN_FCC1_FCC1 = 0x25,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI1 = 0x26,
+ RT_CHANNEL_DOMAIN_MKK1_MKK1 = 0x27,
+ RT_CHANNEL_DOMAIN_WORLD_KCC1 = 0x28,
+ RT_CHANNEL_DOMAIN_WORLD_FCC2 = 0x29,
+ RT_CHANNEL_DOMAIN_WORLD_FCC3 = 0x30,
+ RT_CHANNEL_DOMAIN_WORLD_FCC4 = 0x31,
+ RT_CHANNEL_DOMAIN_WORLD_FCC5 = 0x32,
+ RT_CHANNEL_DOMAIN_WORLD_FCC6 = 0x33,
+ RT_CHANNEL_DOMAIN_FCC1_FCC7 = 0x34,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI2 = 0x35,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI3 = 0x36,
+ RT_CHANNEL_DOMAIN_MKK1_MKK2 = 0x37,
+ RT_CHANNEL_DOMAIN_MKK1_MKK3 = 0x38,
+ RT_CHANNEL_DOMAIN_FCC1_NCC1 = 0x39,
+ RT_CHANNEL_DOMAIN_FCC1_NCC2 = 0x40,
+ RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G = 0x41,
+ /* Add new channel plan above this line=============== */
+ RT_CHANNEL_DOMAIN_MAX,
+ RT_CHANNEL_DOMAIN_REALTEK_DEFINE = 0x7F,
+};
+
+enum { /* _RT_CHANNEL_DOMAIN_2G */
+ RT_CHANNEL_DOMAIN_2G_WORLD = 0x00, /* Worldwird 13 */
+ RT_CHANNEL_DOMAIN_2G_ETSI1 = 0x01, /* Europe */
+ RT_CHANNEL_DOMAIN_2G_FCC1 = 0x02, /* US */
+ RT_CHANNEL_DOMAIN_2G_MKK1 = 0x03, /* Japan */
+ RT_CHANNEL_DOMAIN_2G_ETSI2 = 0x04, /* France */
+ RT_CHANNEL_DOMAIN_2G_NULL = 0x05,
+ /* Add new channel plan above this line=============== */
+ RT_CHANNEL_DOMAIN_2G_MAX,
+};
+
+enum { /* _RT_CHANNEL_DOMAIN_5G */
+ RT_CHANNEL_DOMAIN_5G_NULL = 0x00,
+ RT_CHANNEL_DOMAIN_5G_ETSI1 = 0x01, /* Europe */
+ RT_CHANNEL_DOMAIN_5G_ETSI2 = 0x02, /* Australia, New Zealand */
+ RT_CHANNEL_DOMAIN_5G_ETSI3 = 0x03, /* Russia */
+ RT_CHANNEL_DOMAIN_5G_FCC1 = 0x04, /* US */
+ RT_CHANNEL_DOMAIN_5G_FCC2 = 0x05, /* FCC o/w DFS Channels */
+ RT_CHANNEL_DOMAIN_5G_FCC3 = 0x06, /* India, Mexico */
+ RT_CHANNEL_DOMAIN_5G_FCC4 = 0x07, /* Venezuela */
+ RT_CHANNEL_DOMAIN_5G_FCC5 = 0x08, /* China */
+ RT_CHANNEL_DOMAIN_5G_FCC6 = 0x09, /* Israel */
+ RT_CHANNEL_DOMAIN_5G_FCC7_IC1 = 0x0A, /* US, Canada */
+ RT_CHANNEL_DOMAIN_5G_KCC1 = 0x0B, /* Korea */
+ RT_CHANNEL_DOMAIN_5G_MKK1 = 0x0C, /* Japan */
+ RT_CHANNEL_DOMAIN_5G_MKK2 = 0x0D, /* Japan (W52, W53) */
+ RT_CHANNEL_DOMAIN_5G_MKK3 = 0x0E, /* Japan (W56) */
+ RT_CHANNEL_DOMAIN_5G_NCC1 = 0x0F, /* Taiwan */
+ RT_CHANNEL_DOMAIN_5G_NCC2 = 0x10, /* Taiwan o/w DFS */
+ /* Add new channel plan above this line=============== */
+ /* Driver Self Defined ===== */
+ RT_CHANNEL_DOMAIN_5G_FCC = 0x11,
+ RT_CHANNEL_DOMAIN_5G_JAPAN_NO_DFS = 0x12,
+ RT_CHANNEL_DOMAIN_5G_FCC4_NO_DFS = 0x13,
+ RT_CHANNEL_DOMAIN_5G_MAX,
+};
+
+#define rtw_is_channel_plan_valid(chplan) (chplan<RT_CHANNEL_DOMAIN_MAX || chplan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
+
+struct rt_channel_plan {
+ unsigned char Channel[MAX_CHANNEL_NUM];
+ unsigned char Len;
+};
+
+struct rt_channel_plan_2g {
+ unsigned char Channel[MAX_CHANNEL_NUM_2G];
+ unsigned char Len;
+};
+
+struct rt_channel_plan_5g {
+ unsigned char Channel[MAX_CHANNEL_NUM_5G];
+ unsigned char Len;
+};
+
+struct rt_channel_plan_map {
+ unsigned char Index2G;
+ unsigned char Index5G;
+};
+
+enum Associated_AP {
+ atherosAP = 0,
+ broadcomAP = 1,
+ ciscoAP = 2,
+ marvellAP = 3,
+ ralinkAP = 4,
+ realtekAP = 5,
+ airgocapAP = 6,
+ unknownAP = 7,
+ maxAP,
+};
+
+enum { /* HT_IOT_PEER_E */
+ HT_IOT_PEER_UNKNOWN = 0,
+ HT_IOT_PEER_REALTEK = 1,
+ HT_IOT_PEER_REALTEK_92SE = 2,
+ HT_IOT_PEER_BROADCOM = 3,
+ HT_IOT_PEER_RALINK = 4,
+ HT_IOT_PEER_ATHEROS = 5,
+ HT_IOT_PEER_CISCO = 6,
+ HT_IOT_PEER_MERU = 7,
+ HT_IOT_PEER_MARVELL = 8,
+ HT_IOT_PEER_REALTEK_SOFTAP = 9,/* peer is RealTek SOFT_AP, by Bohn, 2009.12.17 */
+ HT_IOT_PEER_SELF_SOFTAP = 10, /* Self is SoftAP */
+ HT_IOT_PEER_AIRGO = 11,
+ HT_IOT_PEER_INTEL = 12,
+ HT_IOT_PEER_RTK_APCLIENT = 13,
+ HT_IOT_PEER_REALTEK_81XX = 14,
+ HT_IOT_PEER_REALTEK_WOW = 15,
+ HT_IOT_PEER_TENDA = 16,
+ HT_IOT_PEER_MAX = 17
+};
+
+enum SCAN_STATE {
+ SCAN_DISABLE = 0,
+ SCAN_START = 1,
+ SCAN_TXNULL = 2,
+ SCAN_PROCESS = 3,
+ SCAN_COMPLETE = 4,
+ SCAN_STATE_MAX,
+};
+
+struct mlme_handler {
+ char *str;
+ unsigned int (*func)(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+};
+
+struct action_handler {
+ unsigned int num;
+ char* str;
+ unsigned int (*func)(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+};
+
+struct ss_res
+{
+ int state;
+ int bss_cnt;
+ int channel_idx;
+ int scan_mode;
+ u8 ssid_num;
+ u8 ch_num;
+ struct cfg80211_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
+};
+
+/* define AP_MODE 0x0C */
+/* define STATION_MODE 0x08 */
+/* define AD_HOC_MODE 0x04 */
+/* define NO_LINK_MODE 0x00 */
+
+#define WIFI_FW_NULL_STATE _HW_STATE_NOLINK_
+#define WIFI_FW_STATION_STATE _HW_STATE_STATION_
+#define WIFI_FW_AP_STATE _HW_STATE_AP_
+#define WIFI_FW_ADHOC_STATE _HW_STATE_ADHOC_
+
+#define WIFI_FW_AUTH_NULL 0x00000100
+#define WIFI_FW_AUTH_STATE 0x00000200
+#define WIFI_FW_AUTH_SUCCESS 0x00000400
+
+#define WIFI_FW_ASSOC_STATE 0x00002000
+#define WIFI_FW_ASSOC_SUCCESS 0x00004000
+
+#define WIFI_FW_LINKING_STATE (WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE | WIFI_FW_AUTH_SUCCESS |WIFI_FW_ASSOC_STATE)
+
+struct FW_Sta_Info {
+ struct sta_info *psta;
+ u32 status;
+ u32 rx_pkt;
+ u32 retry;
+ unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
+};
+
+/*
+ * Usage:
+ * When one iface acted as AP mode and the other iface is STA mode and scanning,
+ * it should switch back to AP's operating channel periodically.
+ * Parameters info:
+ * When the driver scanned RTW_SCAN_NUM_OF_CH channels, it would switch back to AP's operating channel for
+ * RTW_STAY_AP_CH_MILLISECOND * SURVEY_TO milliseconds.
+ * Example:
+ * For chip supports 2.4G + 5GHz and AP mode is operating in channel 1,
+ * RTW_SCAN_NUM_OF_CH is 8, RTW_STAY_AP_CH_MILLISECOND is 3 and SURVEY_TO is 100.
+ * When it's STA mode gets set_scan command,
+ * it would
+ * 1. Doing the scan on channel 1.2.3.4.5.6.7.8
+ * 2. Back to channel 1 for 300 milliseconds
+ * 3. Go through doing site survey on channel 9.10.11.36.40.44.48.52
+ * 4. Back to channel 1 for 300 milliseconds
+ * 5. ... and so on, till survey done.
+ */
+
+struct mlme_ext_info
+{
+ u32 state;
+ u32 reauth_count;
+ u32 reassoc_count;
+ u32 link_count;
+ u32 auth_seq;
+ u32 auth_algo; /* 802.11 auth, could be open, shared, auto */
+ u32 authModeToggle;
+ u32 enc_algo;/* encrypt algorithm; */
+ u32 key_index; /* this is only valid for legendary wep, 0~3 for key id. */
+ u32 iv;
+ u8 chg_txt[128];
+ u16 aid;
+ u16 bcn_interval;
+ u16 capability;
+ u8 assoc_AP_vendor;
+ u8 slotTime;
+ u8 preamble_mode;
+ u8 WMM_enable;
+ u8 ERP_enable;
+ u8 ERP_IE;
+ u8 HT_enable;
+ u8 HT_caps_enable;
+ u8 HT_info_enable;
+ u8 HT_protection;
+ u8 turboMode_cts2self;
+ u8 turboMode_rtsen;
+ u8 SM_PS;
+ u8 agg_enable_bitmap;
+ u8 ADDBA_retry_count;
+ u8 candidate_tid_bitmap;
+ u8 dialogToken;
+ /* Accept ADDBA Request */
+ bool bAcceptAddbaReq;
+ u8 bwmode_updated;
+ u8 hidden_ssid_mode;
+
+ struct ADDBA_request ADDBA_req;
+ struct WMM_para_element WMM_param;
+ struct HT_caps_element HT_caps;
+ struct HT_info_element HT_info;
+ struct wlan_bssid_ex network;/* join network or bss_network, if in ap mode, it is the same to cur_network.network */
+ struct FW_Sta_Info FW_sta_info[NUM_STA];
+};
+
+/* The channel information about this channel including joining, scanning, and power constraints. */
+struct rt_channel_info {
+ u8 ChannelNum; /* The channel number. */
+ enum rt_scan_type ScanType; /* Scan type such as passive or active scan. */
+};
+
+int rtw_ch_set_search_ch23a(struct rt_channel_info *ch_set, const u32 ch);
+
+/* P2P_MAX_REG_CLASSES - Maximum number of regulatory classes */
+#define P2P_MAX_REG_CLASSES 10
+
+/* P2P_MAX_REG_CLASS_CHANNELS - Maximum number of channels per regulatory class */
+#define P2P_MAX_REG_CLASS_CHANNELS 20
+
+/* struct p2p_channels - List of supported channels */
+struct p2p_channels {
+ /* struct p2p_reg_class - Supported regulatory class */
+ struct p2p_reg_class {
+ /* reg_class - Regulatory class (IEEE 802.11-2007, Annex J) */
+ u8 reg_class;
+
+ /* channel - Supported channels */
+ u8 channel[P2P_MAX_REG_CLASS_CHANNELS];
+
+ /* channels - Number of channel entries in use */
+ size_t channels;
+ } reg_class[P2P_MAX_REG_CLASSES];
+
+ /* reg_classes - Number of reg_class entries in use */
+ size_t reg_classes;
+};
+
+struct p2p_oper_class_map {
+ enum hw_mode {IEEE80211G,IEEE80211A} mode;
+ u8 op_class;
+ u8 min_chan;
+ u8 max_chan;
+ u8 inc;
+ enum {
+ BW20, BW40PLUS, BW40MINUS
+ } bw;
+};
+
+struct mlme_ext_priv {
+ struct rtw_adapter *padapter;
+ u8 mlmeext_init;
+ atomic_t event_seq;
+ u16 mgnt_seq;
+
+ /* struct fw_priv fwpriv; */
+
+ unsigned char cur_channel;
+ unsigned char cur_bwmode;
+ unsigned char cur_ch_offset;/* PRIME_CHNL_OFFSET */
+ unsigned char cur_wireless_mode; /* NETWORK_TYPE */
+
+ unsigned char max_chan_nums;
+ struct rt_channel_info channel_set[MAX_CHANNEL_NUM];
+ struct p2p_channels channel_list;
+ unsigned char basicrate[NumRates];
+ unsigned char datarate[NumRates];
+
+ struct ss_res sitesurvey_res;
+ struct mlme_ext_info mlmext_info;/* for sta/adhoc mode, including current scanning/connecting/connected related info. */
+ /* for ap mode, network includes ap's cap_info */
+ struct timer_list survey_timer;
+ struct timer_list link_timer;
+ u16 chan_scan_time;
+
+ u8 scan_abort;
+ u8 tx_rate; /* TXRATE when USERATE is set. */
+
+ u32 retry; /* retry for issue probereq */
+
+ u64 TSFValue;
+
+ unsigned char bstart_bss;
+ u8 update_channel_plan_by_ap_done;
+ /* recv_decache check for Action_public frame */
+ u8 action_public_dialog_token;
+ u16 action_public_rxseq;
+ u8 active_keep_alive_check;
+};
+
+int init_mlme_ext_priv23a(struct rtw_adapter* padapter);
+int init_hw_mlme_ext23a(struct rtw_adapter *padapter);
+void free_mlme_ext_priv23a (struct mlme_ext_priv *pmlmeext);
+void init_mlme_ext_timer23a(struct rtw_adapter *padapter);
+void init_addba_retry_timer23a(struct sta_info *psta);
+struct xmit_frame *alloc_mgtxmitframe23a(struct xmit_priv *pxmitpriv);
+
+unsigned char networktype_to_raid23a(unsigned char network_type);
+u8 judge_network_type23a(struct rtw_adapter *padapter, unsigned char *rate,
+ int ratelen);
+void get_rate_set23a(struct rtw_adapter *padapter, unsigned char *pbssrate,
+ int *bssrate_len);
+void UpdateBrateTbl23a(struct rtw_adapter *padapter,u8 *mBratesOS);
+void Update23aTblForSoftAP(u8 *bssrateset, u32 bssratelen);
+
+void Save_DM_Func_Flag23a(struct rtw_adapter *padapter);
+void Restore_DM_Func_Flag23a(struct rtw_adapter *padapter);
+void Switch_DM_Func23a(struct rtw_adapter *padapter, unsigned long mode, u8 enable);
+
+void Set_MSR23a(struct rtw_adapter *padapter, u8 type);
+
+u8 rtw_get_oper_ch23a(struct rtw_adapter *adapter);
+void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch);
+u8 rtw_get_oper_bw23a(struct rtw_adapter *adapter);
+void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw);
+u8 rtw_get_oper_ch23aoffset(struct rtw_adapter *adapter);
+void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset);
+
+void set_channel_bwmode23a(struct rtw_adapter *padapter, unsigned char channel,
+ unsigned char channel_offset, unsigned short bwmode);
+void SelectChannel23a(struct rtw_adapter *padapter, unsigned char channel);
+void SetBWMode23a(struct rtw_adapter *padapter, unsigned short bwmode,
+ unsigned char channel_offset);
+
+unsigned int decide_wait_for_beacon_timeout23a(unsigned int bcn_interval);
+
+void write_cam23a(struct rtw_adapter *padapter, u8 entry, u16 ctrl,
+ u8 *mac, u8 *key);
+void clear_cam_entry23a(struct rtw_adapter *padapter, u8 entry);
+
+void invalidate_cam_all23a(struct rtw_adapter *padapter);
+void CAM_empty_entry23a(struct rtw_adapter *Adapter, u8 ucIndex);
+
+int allocate_fw_sta_entry23a(struct rtw_adapter *padapter);
+void flush_all_cam_entry23a(struct rtw_adapter *padapter);
+
+bool IsLegal5GChannel(struct rtw_adapter *Adapter, u8 channel);
+
+void site_survey23a(struct rtw_adapter *padapter);
+u8 collect_bss_info23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame,
+ struct wlan_bssid_ex *bssid);
+void update_network23a(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
+ struct rtw_adapter *padapter, bool update_ie);
+
+int get_bsstype23a(unsigned short capability);
+u8 *get_my_bssid23a(struct wlan_bssid_ex *pnetwork);
+u16 get_beacon_interval23a(struct wlan_bssid_ex *bss);
+
+int is_client_associated_to_ap23a(struct rtw_adapter *padapter);
+int is_client_associated_to_ibss23a(struct rtw_adapter *padapter);
+int is_IBSS_empty23a(struct rtw_adapter *padapter);
+
+unsigned char check_assoc_AP23a(u8 *pframe, uint len);
+
+int WMM_param_handler23a(struct rtw_adapter *padapter,
+ struct ndis_802_11_var_ies *pIE);
+#ifdef CONFIG_8723AU_P2P
+int WFD_info_handler(struct rtw_adapter *padapter,
+ struct ndis_802_11_var_ies *pIE);
+#endif
+void WMMOnAssocRsp23a(struct rtw_adapter *padapter);
+
+void HT_caps_handler23a(struct rtw_adapter *padapter,
+ struct ndis_802_11_var_ies *pIE);
+void HT_info_handler23a(struct rtw_adapter *padapter,
+ struct ndis_802_11_var_ies *pIE);
+void HTOnAssocRsp23a(struct rtw_adapter *padapter);
+
+void ERP_IE_handler23a(struct rtw_adapter *padapter,
+ struct ndis_802_11_var_ies *pIE);
+void VCS_update23a(struct rtw_adapter *padapter, struct sta_info *psta);
+
+void update_beacon23a_info(struct rtw_adapter *padapter, u8 *pframe, uint len,
+ struct sta_info *psta);
+int rtw_check_bcn_info23a(struct rtw_adapter *Adapter, u8 *pframe, u32 packet_len);
+void update_IOT_info23a(struct rtw_adapter *padapter);
+void update_capinfo23a(struct rtw_adapter *Adapter, u16 updateCap);
+void update_wireless_mode23a(struct rtw_adapter * padapter);
+void update_tx_basic_rate23a(struct rtw_adapter *padapter, u8 modulation);
+void update_bmc_sta_support_rate23a(struct rtw_adapter *padapter, u32 mac_id);
+int update_sta_support_rate23a(struct rtw_adapter *padapter, u8* pvar_ie,
+ uint var_ie_len, int cam_idx);
+
+/* for sta/adhoc mode */
+void update_sta_info23a(struct rtw_adapter *padapter, struct sta_info *psta);
+unsigned int update_basic_rate23a(unsigned char *ptn, unsigned int ptn_sz);
+unsigned int update_supported_rate23a(unsigned char *ptn, unsigned int ptn_sz);
+unsigned int update_MSC_rate23a(struct HT_caps_element *pHT_caps);
+void Update_RA_Entry23a(struct rtw_adapter *padapter, struct sta_info *psta);
+void set_sta_rate23a(struct rtw_adapter *padapter, struct sta_info *psta);
+
+unsigned int receive_disconnect23a(struct rtw_adapter *padapter,
+ unsigned char *MacAddr, unsigned short reason);
+
+unsigned char get_highest_rate_idx23a(u32 mask);
+int support_short_GI23a(struct rtw_adapter *padapter,
+ struct HT_caps_element *pHT_caps);
+unsigned int is_ap_in_tkip23a(struct rtw_adapter *padapter);
+unsigned int is_ap_in_wep23a(struct rtw_adapter *padapter);
+unsigned int should_forbid_n_rate23a(struct rtw_adapter *padapter);
+
+void report_join_res23a(struct rtw_adapter *padapter, int res);
+void report_survey_event23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame);
+void report_surveydone_event23a(struct rtw_adapter *padapter);
+void report_del_sta_event23a(struct rtw_adapter *padapter,
+ unsigned char *MacAddr, unsigned short reason);
+void report_add_sta_event23a(struct rtw_adapter *padapter,
+ unsigned char *MacAddr, int cam_idx);
+
+void beacon_timing_control23a(struct rtw_adapter *padapter);
+u8 set_tx_beacon_cmd23a(struct rtw_adapter*padapter);
+unsigned int setup_beacon_frame(struct rtw_adapter *padapter,
+ unsigned char *beacon_frame);
+void update_mgnt_tx_rate23a(struct rtw_adapter *padapter, u8 rate);
+void update_mgntframe_attrib23a(struct rtw_adapter *padapter,
+ struct pkt_attrib *pattrib);
+void dump_mgntframe23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pmgntframe);
+s32 dump_mgntframe23a_and_wait(struct rtw_adapter *padapter,
+ struct xmit_frame *pmgntframe, int timeout_ms);
+s32 dump_mgntframe23a_and_wait_ack23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pmgntframe);
+
+#ifdef CONFIG_8723AU_P2P
+void issue_probersp23a_p2p23a(struct rtw_adapter *padapter, unsigned char *da);
+void issue_p2p_provision_request23a(struct rtw_adapter *padapter, u8 *pssid,
+ u8 ussidlen, u8* pdev_raddr);
+void issue_p2p_GO_request23a(struct rtw_adapter *padapter, u8* raddr);
+void issue23a_probereq_p2p(struct rtw_adapter *padapter, u8 *da);
+int issue23a_probereq_p2p_ex(struct rtw_adapter *adapter, u8 *da, int try_cnt,
+ int wait_ms);
+void issue_p2p_invitation_response23a(struct rtw_adapter *padapter, u8* raddr,
+ u8 dialogToken, u8 success);
+void issue_p2p_invitation_request23a(struct rtw_adapter *padapter, u8* raddr);
+#endif /* CONFIG_8723AU_P2P */
+void issue_beacon23a(struct rtw_adapter *padapter, int timeout_ms);
+void issue_probersp23a(struct rtw_adapter *padapter, unsigned char *da,
+ u8 is_valid_p2p_probereq);
+void issue_assocreq23a(struct rtw_adapter *padapter);
+void issue_asocrsp23a(struct rtw_adapter *padapter, unsigned short status,
+ struct sta_info *pstat, int pkt_type);
+void issue_auth23a(struct rtw_adapter *padapter, struct sta_info *psta,
+ unsigned short status);
+void issue_probereq23a(struct rtw_adapter *padapter, struct cfg80211_ssid *pssid,
+ u8 *da);
+s32 issue_probereq23a_ex23a(struct rtw_adapter *padapter, struct cfg80211_ssid *pssid,
+ u8 *da, int try_cnt, int wait_ms);
+int issue_nulldata23a(struct rtw_adapter *padapter, unsigned char *da,
+ unsigned int power_mode, int try_cnt, int wait_ms);
+int issue_qos_nulldata23a(struct rtw_adapter *padapter, unsigned char *da, u16 tid,
+ int try_cnt, int wait_ms);
+int issue_deauth23a(struct rtw_adapter *padapter, unsigned char *da,
+ unsigned short reason);
+int issue_deauth23a_ex23a(struct rtw_adapter *padapter, u8 *da, unsigned short reason,
+ int try_cnt, int wait_ms);
+void issue_action_spct_ch_switch23a(struct rtw_adapter *padapter, u8 *ra,
+ u8 new_ch, u8 ch_offset);
+void issue_action_BA23a(struct rtw_adapter *padapter, unsigned char *raddr,
+ unsigned char action, unsigned short status);
+unsigned int send_delba23a(struct rtw_adapter *padapter, u8 initiator, u8 *addr);
+unsigned int send_beacon23a(struct rtw_adapter *padapter);
+
+void start_clnt_assoc23a(struct rtw_adapter *padapter);
+void start_clnt_auth23a(struct rtw_adapter *padapter);
+void start_clnt_join23a(struct rtw_adapter *padapter);
+void start_create_ibss23a(struct rtw_adapter *padapter);
+
+unsigned int OnAssocReq23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnAssocRsp23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnProbeReq23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnProbeRsp23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int DoReserved23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnBeacon23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnAtim23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnDisassoc23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnAuth23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnAuth23aClient23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnDeAuth23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnAction23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+
+unsigned int on_action_spct23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnAction23a_qos(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnAction23a_dls(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnAction23a_back23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int on_action_public23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnAction23a_ht(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnAction23a_wmm(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+unsigned int OnAction23a_p2p(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
+
+
+void mlmeext_joinbss_event_callback23a(struct rtw_adapter *padapter, int join_res);
+void mlmeext_sta_del_event_callback23a(struct rtw_adapter *padapter);
+void mlmeext_sta_add_event_callback23a(struct rtw_adapter *padapter, struct sta_info *psta);
+
+void linked_status_chk23a(struct rtw_adapter *padapter);
+
+#define set_survey_timer(mlmeext, ms) \
+ /*DBG_8723A("%s set_survey_timer(%p, %d)\n", __FUNCTION__, (mlmeext), (ms));*/ \
+ mod_timer(&mlmeext->survey_timer, jiffies + msecs_to_jiffies(ms));
+
+#define set_link_timer(mlmeext, ms) \
+ /*DBG_8723A("%s set_link_timer(%p, %d)\n", __FUNCTION__, (mlmeext), (ms));*/ \
+ mod_timer(&mlmeext->link_timer, jiffies + msecs_to_jiffies(ms));
+
+int cckrates_included23a(unsigned char *rate, int ratelen);
+int cckratesonly_included23a(unsigned char *rate, int ratelen);
+
+void process_addba_req23a(struct rtw_adapter *padapter, u8 *paddba_req, u8 *addr);
+
+void update_TSF23a(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len);
+void correct_TSF23a(struct rtw_adapter *padapter, struct mlme_ext_priv *pmlmeext);
+
+struct cmd_hdl {
+ uint parmsize;
+ u8 (*h2cfuns)(struct rtw_adapter *padapter, u8 *pbuf);
+};
+
+
+u8 read_macreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+u8 write_macreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+u8 read_bbreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+u8 write_bbreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+u8 read_rfreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+u8 write_rfreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+
+
+u8 NULL_hdl23a(struct rtw_adapter *padapter, u8 *pbuf);
+u8 join_cmd_hdl23a(struct rtw_adapter *padapter, u8 *pbuf);
+u8 disconnect_hdl23a(struct rtw_adapter *padapter, u8 *pbuf);
+u8 createbss_hdl23a(struct rtw_adapter *padapter, u8 *pbuf);
+u8 setopmode_hdl23a(struct rtw_adapter *padapter, u8 *pbuf);
+u8 sitesurvey_cmd_hdl23a(struct rtw_adapter *padapter, u8 *pbuf);
+u8 setauth_hdl23a(struct rtw_adapter *padapter, u8 *pbuf);
+u8 setkey_hdl23a(struct rtw_adapter *padapter, u8 *pbuf);
+u8 set_stakey_hdl23a(struct rtw_adapter *padapter, u8 *pbuf);
+u8 set_assocsta_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+u8 del_assocsta_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+u8 add_ba_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf);
+
+u8 mlme_evt_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf);
+u8 h2c_msg_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf);
+u8 tx_beacon_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf);
+u8 set_ch_hdl23a(struct rtw_adapter *padapter, u8 *pbuf);
+u8 set_chplan_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf);
+u8 led_blink_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf);
+u8 set_csa_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf); /* Kurt: Handling DFS channel switch announcement ie. */
+u8 tdls_hdl23a(struct rtw_adapter *padapter, unsigned char *pbuf);
+
+#define GEN_DRV_CMD_HANDLER(size, cmd) {size, &cmd ## _hdl23a},
+#define GEN_MLME_EXT_HANDLER(size, cmd) {size, cmd},
+
+struct C2HEvent_Header {
+#ifdef __LITTLE_ENDIAN
+
+ unsigned int len:16;
+ unsigned int ID:8;
+ unsigned int seq:8;
+
+#elif defined(__BIG_ENDIAN)
+
+ unsigned int seq:8;
+ unsigned int ID:8;
+ unsigned int len:16;
+
+#else
+
+# error "Must be LITTLE or BIG Endian"
+
+#endif
+
+ unsigned int rsvd;
+};
+
+void rtw_dummy_event_callback23a(struct rtw_adapter *adapter , u8 *pbuf);
+void rtw23a_fwdbg_event_callback(struct rtw_adapter *adapter , u8 *pbuf);
+
+enum rtw_c2h_event {
+ GEN_EVT_CODE(_Read_MACREG) = 0, /*0*/
+ GEN_EVT_CODE(_Read_BBREG),
+ GEN_EVT_CODE(_Read_RFREG),
+ GEN_EVT_CODE(_Read_EEPROM),
+ GEN_EVT_CODE(_Read_EFUSE),
+ GEN_EVT_CODE(_Read_CAM), /*5*/
+ GEN_EVT_CODE(_Get_BasicRate),
+ GEN_EVT_CODE(_Get_DataRate),
+ GEN_EVT_CODE(_Survey), /*8*/
+ GEN_EVT_CODE(_SurveyDone), /*9*/
+
+ GEN_EVT_CODE(_JoinBss) , /*10*/
+ GEN_EVT_CODE(_AddSTA),
+ GEN_EVT_CODE(_DelSTA),
+ GEN_EVT_CODE(_AtimDone) ,
+ GEN_EVT_CODE(_TX_Report),
+ GEN_EVT_CODE(_CCX_Report), /*15*/
+ GEN_EVT_CODE(_DTM_Report),
+ GEN_EVT_CODE(_TX_Rate_Statistics),
+ GEN_EVT_CODE(_C2HLBK),
+ GEN_EVT_CODE(_FWDBG),
+ GEN_EVT_CODE(_C2HFEEDBACK), /*20*/
+ GEN_EVT_CODE(_ADDBA),
+ GEN_EVT_CODE(_C2HBCN),
+ GEN_EVT_CODE(_ReportPwrState), /* filen: only for PCIE, USB */
+ GEN_EVT_CODE(_CloseRF), /* filen: only for PCIE, work around ASPM */
+ MAX_C2HEVT
+};
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_p2p.h b/drivers/staging/rtl8723au/include/rtw_p2p.h
new file mode 100644
index 000000000000..93fdc658ff4d
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_p2p.h
@@ -0,0 +1,158 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_P2P_H_
+#define __RTW_P2P_H_
+
+#include <drv_types.h>
+
+u32 build_beacon_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_probe_resp_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_prov_disc_request_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pbuf,
+ u8 *pssid, u8 ussidlen, u8 *pdev_raddr);
+u32 build_assoc_resp_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pbuf,
+ u8 status_code);
+u32 build_deauth_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pbuf);
+#ifdef CONFIG_8723AU_P2P
+u32 build_probe_req_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_probe_resp_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf,
+ u8 tunneled);
+u32 build_beacon_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_nego_req_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_nego_resp_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_nego_confirm_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_invitation_req_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_invitation_resp_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_assoc_req_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_assoc_resp_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_provdisc_req_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_provdisc_resp_wfd_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+#endif /* CONFIG_8723AU_P2P */
+
+u32 process_probe_req_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pframe,
+ uint len);
+u32 process_assoc_req_p2p_ie23a(struct wifidirect_info *pwdinfo, u8 *pframe,
+ uint len, struct sta_info *psta);
+u32 process_p2p_devdisc_req23a(struct wifidirect_info *pwdinfo, u8 *pframe,
+ uint len);
+u32 process_p2p_devdisc_resp23a(struct wifidirect_info *pwdinfo, u8 *pframe,
+ uint len);
+u8 process_p2p_provdisc_req23a(struct wifidirect_info *pwdinfo, u8 *pframe,
+ uint len);
+u8 process_p2p_provdisc_resp23a(struct wifidirect_info *pwdinfo, u8 *pframe);
+u8 process_p2p_group_negotation_req23a(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_group_negotation_resp23a(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_group_negotation_confirm23a(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_presence_req23a(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+
+void p2p_protocol_wk_hdl23a(struct rtw_adapter *padapter, int cmdtype);
+
+#ifdef CONFIG_8723AU_P2P
+void process_p2p_ps_ie23a(struct rtw_adapter *padapter, u8 *IEs, u32 IELength);
+void p2p_ps_wk_hdl23a(struct rtw_adapter *padapter, u8 p2p_ps_state);
+u8 p2p_ps_wk_cmd23a(struct rtw_adapter *padapter, u8 p2p_ps_state, u8 enqueue);
+#endif /* CONFIG_8723AU_P2P */
+
+void rtw_init_cfg80211_wifidirect_info(struct rtw_adapter *padapter);
+int rtw_p2p_check_frames(struct rtw_adapter *padapter, const u8 *buf,
+ u32 len, u8 tx);
+void rtw_append_wfd_ie(struct rtw_adapter *padapter, u8 *buf, u32 *len);
+
+void reset_global_wifidirect_info23a(struct rtw_adapter *padapter);
+int rtw_init_wifi_display_info(struct rtw_adapter *padapter);
+void rtw_init_wifidirect_timers23a(struct rtw_adapter *padapter);
+void rtw_init_wifidirect_addrs23a(struct rtw_adapter *padapter, u8 *dev_addr,
+ u8 *iface_addr);
+void init_wifidirect_info23a(struct rtw_adapter *padapter, enum P2P_ROLE role);
+int rtw_p2p_enable23a(struct rtw_adapter *padapter, enum P2P_ROLE role);
+
+static inline void _rtw_p2p_set_state(struct wifidirect_info *wdinfo,
+ enum P2P_STATE state)
+{
+ if (wdinfo->p2p_state != state) {
+ /* wdinfo->pre_p2p_state = wdinfo->p2p_state; */
+ wdinfo->p2p_state = state;
+ }
+}
+
+static inline void _rtw_p2p_set_pre_state(struct wifidirect_info *wdinfo,
+ enum P2P_STATE state)
+{
+ if (wdinfo->pre_p2p_state != state)
+ wdinfo->pre_p2p_state = state;
+}
+
+static inline void _rtw_p2p_set_role(struct wifidirect_info *wdinfo,
+ enum P2P_ROLE role)
+{
+ if (wdinfo->role != role)
+ wdinfo->role = role;
+}
+
+static inline int _rtw_p2p_state(struct wifidirect_info *wdinfo)
+{
+ return wdinfo->p2p_state;
+}
+
+static inline int _rtw_p2p_pre_state(struct wifidirect_info *wdinfo)
+{
+ return wdinfo->pre_p2p_state;
+}
+
+static inline int _rtw_p2p_role(struct wifidirect_info *wdinfo)
+{
+ return wdinfo->role;
+}
+
+static inline bool _rtw_p2p_chk_state(struct wifidirect_info *wdinfo,
+ enum P2P_STATE state)
+{
+ return wdinfo->p2p_state == state;
+}
+
+static inline bool _rtw_p2p_chk_role(struct wifidirect_info *wdinfo,
+ enum P2P_ROLE role)
+{
+ return wdinfo->role == role;
+}
+
+#define rtw_p2p_set_state(wdinfo, state) _rtw_p2p_set_state(wdinfo, state)
+#define rtw_p2p_set_pre_state(wdinfo, state) \
+ _rtw_p2p_set_pre_state(wdinfo, state)
+#define rtw_p2p_set_role(wdinfo, role) _rtw_p2p_set_role(wdinfo, role)
+
+#define rtw_p2p_state(wdinfo) _rtw_p2p_state(wdinfo)
+#define rtw_p2p_pre_state(wdinfo) _rtw_p2p_pre_state(wdinfo)
+#define rtw_p2p_role(wdinfo) _rtw_p2p_role(wdinfo)
+#define rtw_p2p_chk_state(wdinfo, state) _rtw_p2p_chk_state(wdinfo, state)
+#define rtw_p2p_chk_role(wdinfo, role) _rtw_p2p_chk_role(wdinfo, role)
+
+#define rtw_p2p_findphase_ex_set(wdinfo, value) \
+ ((wdinfo)->find_phase_state_exchange_cnt = (value))
+
+/* is this find phase exchange for social channel scan? */
+#define rtw_p2p_findphase_ex_is_social(wdinfo) \
+ ((wdinfo)->find_phase_state_exchange_cnt >= \
+ P2P_FINDPHASE_EX_SOCIAL_FIRST)
+
+/* should we need find phase exchange anymore? */
+#define rtw_p2p_findphase_ex_is_needed(wdinfo) \
+ ((wdinfo)->find_phase_state_exchange_cnt < P2P_FINDPHASE_EX_MAX && \
+ (wdinfo)->find_phase_state_exchange_cnt != P2P_FINDPHASE_EX_NONE)
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_pwrctrl.h b/drivers/staging/rtl8723au/include/rtw_pwrctrl.h
new file mode 100644
index 000000000000..e0da87d4d3d6
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_pwrctrl.h
@@ -0,0 +1,265 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_PWRCTRL_H_
+#define __RTW_PWRCTRL_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define FW_PWR0 0
+#define FW_PWR1 1
+#define FW_PWR2 2
+#define FW_PWR3 3
+
+
+#define HW_PWR0 7
+#define HW_PWR1 6
+#define HW_PWR2 2
+#define HW_PWR3 0
+#define HW_PWR4 8
+
+#define FW_PWRMSK 0x7
+
+
+#define XMIT_ALIVE BIT(0)
+#define RECV_ALIVE BIT(1)
+#define CMD_ALIVE BIT(2)
+#define EVT_ALIVE BIT(3)
+
+enum Power_Mgnt {
+ PS_MODE_ACTIVE = 0,
+ PS_MODE_MIN,
+ PS_MODE_MAX,
+ PS_MODE_DTIM,
+ PS_MODE_VOIP,
+ PS_MODE_UAPSD_WMM,
+ PS_MODE_UAPSD,
+ PS_MODE_IBSS,
+ PS_MODE_WWLAN,
+ PM_Radio_Off,
+ PM_Card_Disable,
+ PS_MODE_NUM
+};
+
+
+/* BIT[2:0] = HW state
+ * BIT[3] = Protocol PS state, 0: active, 1: sleep state
+ * BIT[4] = sub-state
+ */
+
+#define PS_DPS BIT(0)
+#define PS_LCLK (PS_DPS)
+#define PS_RF_OFF BIT(1)
+#define PS_ALL_ON BIT(2)
+#define PS_ST_ACTIVE BIT(3)
+
+#define PS_ISR_ENABLE BIT(4)
+#define PS_IMR_ENABLE BIT(5)
+#define PS_ACK BIT(6)
+#define PS_TOGGLE BIT(7)
+
+#define PS_STATE_MASK (0x0F)
+#define PS_STATE_HW_MASK (0x07)
+#define PS_SEQ_MASK (0xc0)
+
+#define PS_STATE(x) (PS_STATE_MASK & (x))
+#define PS_STATE_HW(x) (PS_STATE_HW_MASK & (x))
+#define PS_SEQ(x) (PS_SEQ_MASK & (x))
+
+#define PS_STATE_S0 (PS_DPS)
+#define PS_STATE_S1 (PS_LCLK)
+#define PS_STATE_S2 (PS_RF_OFF)
+#define PS_STATE_S3 (PS_ALL_ON)
+#define PS_STATE_S4 ((PS_ST_ACTIVE) | (PS_ALL_ON))
+
+
+#define PS_IS_RF_ON(x) ((x) & (PS_ALL_ON))
+#define PS_IS_ACTIVE(x) ((x) & (PS_ST_ACTIVE))
+#define CLR_PS_STATE(x) ((x) = ((x) & (0xF0)))
+
+
+struct reportpwrstate_parm {
+ unsigned char mode;
+ unsigned char state; /* the CPWM value */
+ unsigned short rsvd;
+};
+
+#define LPS_DELAY_TIME (1*HZ) /* 1 sec */
+
+#define EXE_PWR_NONE 0x01
+#define EXE_PWR_IPS 0x02
+#define EXE_PWR_LPS 0x04
+
+/* RF state. */
+enum rt_rf_power_state {
+ rf_on, /* RF is on after RFSleep or RFOff */
+ rf_sleep, /* 802.11 Power Save mode */
+ rf_off, /* HW/SW Radio OFF or Inactive Power Save */
+ /* Add the new RF state above this line===== */
+ rf_max
+};
+
+/* RF Off Level for IPS or HW/SW radio off */
+#define RT_RF_OFF_LEVL_ASPM BIT(0) /* PCI ASPM */
+#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /* PCI clock request */
+#define RT_RF_OFF_LEVL_PCI_D3 BIT(2) /* PCI D3 mode */
+/* NIC halt, re-init hw params */
+#define RT_RF_OFF_LEVL_HALT_NIC BIT(3)
+/* FW free, re-download the FW */
+#define RT_RF_OFF_LEVL_FREE_FW BIT(4)
+#define RT_RF_OFF_LEVL_FW_32K BIT(5) /* FW in 32k */
+/* Always enable ASPM and Clock Req in initialization. */
+#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6)
+/* When LPS is on, disable 2R if no packet is received or transmittd. */
+#define RT_RF_LPS_DISALBE_2R BIT(30)
+#define RT_RF_LPS_LEVEL_ASPM BIT(31) /* LPS with ASPM */
+
+#define RT_IN_PS_LEVEL(ppsc, _PS_FLAG) \
+ ((ppsc->cur_ps_level & _PS_FLAG) ? true : false)
+#define RT_CLEAR_PS_LEVEL(ppsc, _PS_FLAG) \
+ (ppsc->cur_ps_level &= (~(_PS_FLAG)))
+#define RT_SET_PS_LEVEL(ppsc, _PS_FLAG) \
+ (ppsc->cur_ps_level |= _PS_FLAG)
+
+
+enum {
+ PSBBREG_RF0 = 0,
+ PSBBREG_RF1,
+ PSBBREG_RF2,
+ PSBBREG_AFE0,
+ PSBBREG_TOTALCNT
+};
+
+enum { /* for ips_mode */
+ IPS_NONE = 0,
+ IPS_NORMAL,
+ IPS_LEVEL_2,
+};
+
+struct pwrctrl_priv {
+ struct semaphore lock;
+ volatile u8 rpwm; /* requested power state for fw */
+ volatile u8 cpwm; /* fw current power state. updated when 1.
+ * read from HCPWM 2. driver lowers power level
+ */
+ volatile u8 tog; /* toggling */
+ volatile u8 cpwm_tog; /* toggling */
+
+ u8 pwr_mode;
+ u8 smart_ps;
+ u8 bcn_ant_mode;
+
+ u32 alives;
+ struct work_struct cpwm_event;
+ u8 bpower_saving;
+
+ u8 b_hw_radio_off;
+ u8 reg_rfoff;
+ u8 reg_pdnmode; /* powerdown mode */
+ u32 rfoff_reason;
+
+ /* RF OFF Level */
+ u32 cur_ps_level;
+ u32 reg_rfps_level;
+
+ uint ips_enter23a_cnts;
+ uint ips_leave23a_cnts;
+
+ u8 ips_mode;
+ u8 ips_mode_req; /* used to accept the mode setting request */
+ uint bips_processing;
+ unsigned long ips_deny_time; /* deny IPS when system time is smaller */
+ u8 ps_processing; /* used to mark whether in rtw_ps_processor23a */
+
+ u8 bLeisurePs;
+ u8 LpsIdleCount;
+ u8 power_mgnt;
+ u8 bFwCurrentInPSMode;
+ unsigned long DelayLPSLastTimeStamp;
+ u8 btcoex_rfon;
+ s32 pnp_current_pwr_state;
+ u8 pnp_bstop_trx;
+
+ u8 bInternalAutoSuspend;
+ u8 bInSuspend;
+#ifdef CONFIG_8723AU_BT_COEXIST
+ u8 bAutoResume;
+ u8 autopm_cnt;
+#endif
+ u8 bSupportRemoteWakeup;
+ struct timer_list pwr_state_check_timer;
+ int pwr_state_check_interval;
+ u8 pwr_state_check_cnts;
+
+ int ps_flag;
+
+ enum rt_rf_power_state rf_pwrstate;/* cur power state */
+ enum rt_rf_power_state change_rfpwrstate;
+
+ u8 wepkeymask;
+ u8 bHWPowerdown;/* if support hw power down */
+ u8 bHWPwrPindetect;
+ u8 bkeepfwalive;
+ u8 brfoffbyhw;
+ unsigned long PS_BBRegBackup[PSBBREG_TOTALCNT];
+};
+
+#define rtw_get_ips_mode_req(pwrctrlpriv) \
+ ((pwrctrlpriv)->ips_mode_req)
+
+#define rtw_ips_mode_req(pwrctrlpriv, ips_mode) \
+ ((pwrctrlpriv)->ips_mode_req = (ips_mode))
+
+#define RTW_PWR_STATE_CHK_INTERVAL 2000
+
+#define _rtw_set_pwr_state_check_timer(pwrctrlpriv, ms) \
+ (mod_timer(&pwrctrlpriv->pwr_state_check_timer, jiffies + \
+ msecs_to_jiffies(ms)))
+
+#define rtw_set_pwr_state_check_timer(pwrctrlpriv) \
+ (_rtw_set_pwr_state_check_timer((pwrctrlpriv), \
+ (pwrctrlpriv)->pwr_state_check_interval))
+
+void rtw_init_pwrctrl_priv23a(struct rtw_adapter *adapter);
+void rtw_free_pwrctrl_priv(struct rtw_adapter *adapter);
+
+void rtw_set_ps_mode23a(struct rtw_adapter *padapter, u8 ps_mode,
+ u8 smart_ps, u8 bcn_ant_mode);
+void rtw_set_rpwm23a(struct rtw_adapter *padapter, u8 val8);
+void LeaveAllPowerSaveMode23a(struct rtw_adapter *adapter);
+void ips_enter23a(struct rtw_adapter *padapter);
+int ips_leave23a(struct rtw_adapter *padapter);
+
+void rtw_ps_processor23a(struct rtw_adapter *padapter);
+
+enum rt_rf_power_state RfOnOffDetect23a(struct rtw_adapter *adapter);
+
+s32 LPS_RF_ON_check23a(struct rtw_adapter *padapter, u32 delay_ms);
+void LPS_Enter23a(struct rtw_adapter *padapter);
+void LPS_Leave23a(struct rtw_adapter *padapter);
+
+u8 rtw_interface_ps_func23a(struct rtw_adapter *padapter,
+ enum hal_intf_ps_func efunc_id, u8 *val);
+void rtw_set_ips_deny23a(struct rtw_adapter *padapter, u32 ms);
+int _rtw_pwr_wakeup23a(struct rtw_adapter *padapter, u32 ips_deffer_ms,
+ const char *caller);
+#define rtw_pwr_wakeup(adapter) _rtw_pwr_wakeup23a(adapter, \
+ RTW_PWR_STATE_CHK_INTERVAL, __func__)
+#define rtw_pwr_wakeup_ex(adapter, ips_deffer_ms) \
+ _rtw_pwr_wakeup23a(adapter, ips_deffer_ms, __func__)
+int rtw_pm_set_ips23a(struct rtw_adapter *padapter, u8 mode);
+int rtw_pm_set_lps23a(struct rtw_adapter *padapter, u8 mode);
+
+#endif /* __RTL871X_PWRCTRL_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_qos.h b/drivers/staging/rtl8723au/include/rtw_qos.h
new file mode 100644
index 000000000000..68fc5ba1844a
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_qos.h
@@ -0,0 +1,26 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef _RTW_QOS_H_
+#define _RTW_QOS_H_
+
+#include <osdep_service.h>
+
+struct qos_priv {
+ /* bit mask option: u-apsd, s-apsd, ts, block ack... */
+ unsigned int qos_option;
+};
+
+#endif /* _RTL871X_QOS_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_recv.h b/drivers/staging/rtl8723au/include/rtw_recv.h
new file mode 100644
index 000000000000..d1866a6e831f
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_recv.h
@@ -0,0 +1,318 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_RECV_H_
+#define _RTW_RECV_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <Hal8723APhyCfg.h>
+
+#define NR_RECVFRAME 256
+
+#define MAX_RXFRAME_CNT 512
+#define MAX_RX_NUMBLKS (32)
+#define RECVFRAME_HDR_ALIGN 128
+
+#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
+
+#define MAX_SUBFRAME_COUNT 64
+
+/* for Rx reordering buffer control */
+struct recv_reorder_ctrl {
+ struct rtw_adapter *padapter;
+ u8 enable;
+ u16 indicate_seq;/* wstart_b, init_value=0xffff */
+ u16 wend_b;
+ u8 wsize_b;
+ struct rtw_queue pending_recvframe_queue;
+ struct timer_list reordering_ctrl_timer;
+};
+
+struct stainfo_rxcache {
+ u16 tid_rxseq[16];
+/*
+ unsigned short tid0_rxseq;
+ unsigned short tid1_rxseq;
+ unsigned short tid2_rxseq;
+ unsigned short tid3_rxseq;
+ unsigned short tid4_rxseq;
+ unsigned short tid5_rxseq;
+ unsigned short tid6_rxseq;
+ unsigned short tid7_rxseq;
+ unsigned short tid8_rxseq;
+ unsigned short tid9_rxseq;
+ unsigned short tid10_rxseq;
+ unsigned short tid11_rxseq;
+ unsigned short tid12_rxseq;
+ unsigned short tid13_rxseq;
+ unsigned short tid14_rxseq;
+ unsigned short tid15_rxseq;
+*/
+};
+
+struct smooth_rssi_data {
+ u32 elements[100]; /* array to store values */
+ u32 index; /* index to current array to store */
+ u32 total_num; /* num of valid elements */
+ u32 total_val; /* sum of valid elements */
+};
+
+struct signal_stat {
+ u8 update_req; /* used to indicate */
+ u8 avg_val; /* avg of valid elements */
+ u32 total_num; /* num of valid elements */
+ u32 total_val; /* sum of valid elements */
+};
+
+struct phy_info {
+ u8 RxPWDBAll;
+ u8 SignalQuality; /* in 0-100 index. */
+ u8 RxMIMOSignalQuality[RF_PATH_MAX]; /* EVM */
+ u8 RxMIMOSignalStrength[RF_PATH_MAX];/* 0~100 */
+ s8 RxPower; /* in dBm Translate from PWdB */
+ /* Real power in dBm for this packet, no beautification and aggregation.
+ * Keep this raw info to be used for the other procedures.
+ */
+ s8 RecvSignalPower;
+ u8 BTRxRSSIPercentage;
+ u8 SignalStrength; /* in 0-100 index. */
+ u8 RxPwr[RF_PATH_MAX];/* per-path's pwdb */
+ u8 RxSNR[RF_PATH_MAX];/* per-path's SNR */
+};
+
+
+struct rx_pkt_attrib {
+ u16 pkt_len;
+ u8 physt;
+ u8 drvinfo_sz;
+ u8 shift_sz;
+ u8 hdrlen; /* the WLAN Header Len */
+ u8 to_fr_ds;
+ u8 amsdu;
+ u8 qos;
+ u8 priority;
+ u8 pw_save;
+ u8 mdata;
+ u16 seq_num;
+ u8 frag_num;
+ u8 mfrag;
+ u8 order;
+ u8 privacy; /* in frame_ctrl field */
+ u8 bdecrypted;
+ /* when 0 indicate no encrypt. when non-zero, indicate the algorith */
+ u8 encrypt;
+ u8 iv_len;
+ u8 icv_len;
+ u8 crc_err;
+ u8 icv_err;
+
+ u16 eth_type;
+
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ u8 ra[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+
+ u8 ack_policy;
+
+ u8 tcpchk_valid; /* 0: invalid, 1: valid */
+ u8 ip_chkrpt; /* 0: incorrect, 1: correct */
+ u8 tcp_chkrpt; /* 0: incorrect, 1: correct */
+ u8 key_index;
+
+ u8 mcs_rate;
+ u8 rxht;
+ u8 sgi;
+ u8 pkt_rpt_type;
+ u32 MacIDValidEntry[2]; /* 64 bits present 64 entry. */
+ struct phy_info phy_info;
+};
+
+/* These definition is used for Rx packet reordering. */
+#define SN_LESS(a, b) (((a-b) & 0x800) != 0)
+#define SN_EQUAL(a, b) (a == b)
+#define REORDER_WAIT_TIME (50) /* (ms) */
+
+#define RECVBUFF_ALIGN_SZ 8
+
+#define RXDESC_SIZE 24
+#define RXDESC_OFFSET RXDESC_SIZE
+
+struct recv_stat {
+ unsigned int rxdw0;
+ unsigned int rxdw1;
+ unsigned int rxdw2;
+ unsigned int rxdw3;
+ unsigned int rxdw4;
+ unsigned int rxdw5;
+};
+
+/* accesser of recv_priv: rtw_recv_entry23a(dispatch / passive level); \
+ * recv_thread(passive) ; returnpkt(dispatch) ; halt(passive) ;
+ *
+ * using enter_critical section to protect
+ */
+struct recv_priv {
+ spinlock_t lock;
+
+ struct rtw_queue free_recv_queue;
+ struct rtw_queue recv_pending_queue;
+ struct rtw_queue uc_swdec_pending_queue;
+
+ void *pallocated_frame_buf;
+
+ uint free_recvframe_cnt;
+
+ struct rtw_adapter *adapter;
+
+ u32 bIsAnyNonBEPkts;
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_drop;
+ u64 last_rx_bytes;
+
+ uint rx_icv_err;
+ uint rx_largepacket_crcerr;
+ uint rx_smallpacket_crcerr;
+ uint rx_middlepacket_crcerr;
+
+ /* u8 *pallocated_urb_buf; */
+ struct semaphore allrxreturnevt;
+ uint ff_hwaddr;
+ u8 rx_pending_cnt;
+
+ struct urb *int_in_urb;
+
+ u8 *int_in_buf;
+
+ struct tasklet_struct irq_prepare_beacon_tasklet;
+ struct tasklet_struct recv_tasklet;
+ struct sk_buff_head free_recv_skb_queue;
+ struct sk_buff_head rx_skb_queue;
+ u8 *precv_buf;
+ struct rtw_queue free_recv_buf_queue;
+ u32 free_recv_buf_queue_cnt;
+
+ /* For display the phy informatiom */
+ u8 is_signal_dbg; /* for debug */
+ u8 signal_strength_dbg; /* for debug */
+ s8 rssi;
+ s8 rxpwdb;
+ u8 signal_strength;
+ u8 signal_qual;
+ u8 noise;
+ int RxSNRdB[2];
+ s8 RxRssi[2];
+ int FalseAlmCnt_all;
+
+ struct timer_list signal_stat_timer;
+ u32 signal_stat_sampling_interval;
+ /* u32 signal_stat_converging_constant; */
+ struct signal_stat signal_qual_data;
+ struct signal_stat signal_strength_data;
+};
+
+#define rtw_set_signal_stat_timer(recvpriv) \
+ mod_timer(&(recvpriv)->signal_stat_timer, jiffies + \
+ msecs_to_jiffies((recvpriv)->signal_stat_sampling_interval))
+
+struct sta_recv_priv {
+ spinlock_t lock;
+ int option;
+
+ /* struct rtw_queue blk_strms[MAX_RX_NUMBLKS]; */
+ struct rtw_queue defrag_q; /* keeping the fragment frame until defrag */
+
+ struct stainfo_rxcache rxcache;
+
+ /* uint sta_rx_bytes; */
+ /* uint sta_rx_pkts; */
+ /* uint sta_rx_fail; */
+
+};
+
+
+struct recv_buf {
+ struct list_head list;
+
+ struct rtw_adapter *adapter;
+
+ struct urb *purb;
+ struct sk_buff *pskb;
+};
+
+/* head ----->
+ *
+ * data ----->
+ *
+ * payload
+ *
+ * tail ----->
+ *
+ * end ----->
+ *
+ * len = (unsigned int )(tail - data);
+ *
+ */
+struct recv_frame {
+ struct list_head list;
+ struct sk_buff *pkt;
+
+ struct rtw_adapter *adapter;
+
+ struct rx_pkt_attrib attrib;
+
+ struct sta_info *psta;
+
+ /* for A-MPDU Rx reordering buffer control */
+ struct recv_reorder_ctrl *preorder_ctrl;
+};
+
+/* get a free recv_frame from pfree_recv_queue */
+struct recv_frame *rtw_alloc_recvframe23a(struct rtw_queue *pfree_recv_queue);
+int rtw_free_recvframe23a(struct recv_frame *precvframe, struct rtw_queue *pfree_recv_queue);
+
+int rtw_enqueue_recvframe23a(struct recv_frame *precvframe, struct rtw_queue *queue);
+
+void rtw_free_recvframe23a_queue(struct rtw_queue *pframequeue, struct rtw_queue *pfree_recv_queue);
+u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter);
+
+int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *queue);
+int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue);
+struct recv_buf *rtw_dequeue_recvbuf23a(struct rtw_queue *queue);
+
+void rtw_reordering_ctrl_timeout_handler23a(unsigned long pcontext);
+
+static inline s32 translate_percentage_to_dbm(u32 SignalStrengthIndex)
+{
+ s32 SignalPower; /* in dBm. */
+
+ /* Translate to dBm (x=0.5y-95). */
+ SignalPower = (s32)((SignalStrengthIndex + 1) >> 1);
+ SignalPower -= 95;
+
+ return SignalPower;
+}
+
+
+struct sta_info;
+
+void _rtw_init_sta_recv_priv23a(struct sta_recv_priv *psta_recvpriv);
+
+void mgt_dispatcher23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_rf.h b/drivers/staging/rtl8723au/include/rtw_rf.h
new file mode 100644
index 000000000000..91a0a22a2709
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_rf.h
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_RF_H_
+#define __RTW_RF_H_
+
+#include <rtw_cmd.h>
+
+#define OFDM_PHY 1
+#define MIXED_PHY 2
+#define CCK_PHY 3
+
+#define NumRates (13)
+
+/* slot time for 11g */
+#define SHORT_SLOT_TIME 9
+#define NON_SHORT_SLOT_TIME 20
+
+/* We now define the max channels in each channel plan. */
+#define MAX_CHANNEL_NUM_2G 14
+#define MAX_CHANNEL_NUM_5G 24
+#define MAX_CHANNEL_NUM 38/* 14+24 */
+
+/* define NUM_REGULATORYS 21 */
+#define NUM_REGULATORYS 1
+
+/* Country codes */
+#define USA 0x555320
+#define EUROPE 0x1 /* temp, should be provided later */
+#define JAPAN 0x2 /* temp, should be provided later */
+
+struct regulatory_class {
+ u32 starting_freq; /* MHz, */
+ u8 channel_set[MAX_CHANNEL_NUM];
+ u8 channel_cck_power[MAX_CHANNEL_NUM];/* dbm */
+ u8 channel_ofdm_power[MAX_CHANNEL_NUM];/* dbm */
+ u8 txpower_limit; /* dbm */
+ u8 channel_spacing; /* MHz */
+ u8 modem;
+};
+
+enum {
+ cESS = 0x0001,
+ cIBSS = 0x0002,
+ cPollable = 0x0004,
+ cPollReq = 0x0008,
+ cPrivacy = 0x0010,
+ cShortPreamble = 0x0020,
+ cPBCC = 0x0040,
+ cChannelAgility = 0x0080,
+ cSpectrumMgnt = 0x0100,
+ cQos = 0x0200, /* For HCCA, use with CF-Pollable and CF-PollReq */
+ cShortSlotTime = 0x0400,
+ cAPSD = 0x0800,
+ cRM = 0x1000, /* RRM (Radio Request Measurement) */
+ cDSSS_OFDM = 0x2000,
+ cDelayedBA = 0x4000,
+ cImmediateBA = 0x8000,
+};
+
+enum {
+ PREAMBLE_LONG = 1,
+ PREAMBLE_AUTO = 2,
+ PREAMBLE_SHORT = 3,
+};
+
+/* Bandwidth Offset */
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
+#define HAL_PRIME_CHNL_OFFSET_LOWER 1
+#define HAL_PRIME_CHNL_OFFSET_UPPER 2
+
+/* Represent Channel Width in HT Capabilities */
+enum ht_channel_width {
+ HT_CHANNEL_WIDTH_20 = 0,
+ HT_CHANNEL_WIDTH_40 = 1,
+ HT_CHANNEL_WIDTH_80 = 2,
+ HT_CHANNEL_WIDTH_160 = 3,
+ HT_CHANNEL_WIDTH_10 = 4,
+};
+
+/* */
+/* Represent Extention Channel Offset in HT Capabilities */
+/* This is available only in 40Mhz mode. */
+/* */
+enum {
+ HT_EXTCHNL_OFFSET_NO_EXT = 0,
+ HT_EXTCHNL_OFFSET_UPPER = 1,
+ HT_EXTCHNL_OFFSET_NO_DEF = 2,
+ HT_EXTCHNL_OFFSET_LOWER = 3,
+};
+
+/* 2007/11/15 MH Define different RF type. */
+enum {
+ RF_1T2R = 0,
+ RF_2T4R = 1,
+ RF_2T2R = 2,
+ RF_1T1R = 3,
+ RF_2T2R_GREEN = 4,
+ RF_819X_MAX_TYPE = 5,
+};
+
+#endif /* _RTL8711_RF_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_security.h b/drivers/staging/rtl8723au/include/rtw_security.h
new file mode 100644
index 000000000000..75bbb934a53c
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_security.h
@@ -0,0 +1,357 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_SECURITY_H_
+#define __RTW_SECURITY_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+#define _NO_PRIVACY_ 0x0
+#define _WEP40_ 0x1
+#define _TKIP_ 0x2
+#define _TKIP_WTMIC_ 0x3
+#define _AES_ 0x4
+#define _WEP104_ 0x5
+#define _WEP_WPA_MIXED_ 0x07 /* WEP + WPA */
+#define _SMS4_ 0x06
+
+#define is_wep_enc(alg) (((alg) == _WEP40_) || ((alg) == _WEP104_))
+
+#define _WPA_IE_ID_ 0xdd
+#define _WPA2_IE_ID_ 0x30
+
+#define SHA256_MAC_LEN 32
+#define AES_BLOCK_SIZE 16
+#define AES_PRIV_SIZE (4 * 44)
+
+enum ENCRYP_PROTOCOL {
+ ENCRYP_PROTOCOL_OPENSYS, /* open system */
+ ENCRYP_PROTOCOL_WEP, /* WEP */
+ ENCRYP_PROTOCOL_WPA, /* WPA */
+ ENCRYP_PROTOCOL_WPA2, /* WPA2 */
+ ENCRYP_PROTOCOL_MAX
+};
+
+#ifndef Ndis802_11AuthModeWPA2
+#define Ndis802_11AuthModeWPA2 (Ndis802_11AuthModeWPANone + 1)
+#endif
+
+#ifndef Ndis802_11AuthModeWPA2PSK
+#define Ndis802_11AuthModeWPA2PSK (Ndis802_11AuthModeWPANone + 2)
+#endif
+
+union pn48 {
+ u64 val;
+
+#ifdef __LITTLE_ENDIAN
+
+struct {
+ u8 TSC0;
+ u8 TSC1;
+ u8 TSC2;
+ u8 TSC3;
+ u8 TSC4;
+ u8 TSC5;
+ u8 TSC6;
+ u8 TSC7;
+} _byte_;
+
+#elif defined(__BIG_ENDIAN)
+
+struct {
+ u8 TSC7;
+ u8 TSC6;
+ u8 TSC5;
+ u8 TSC4;
+ u8 TSC3;
+ u8 TSC2;
+ u8 TSC1;
+ u8 TSC0;
+} _byte_;
+#else
+#error Need BIG or LITTLE endian
+
+#endif
+
+};
+
+union Keytype {
+ u8 skey[16];
+ u32 lkey[4];
+};
+
+
+struct rt_pmkid_list {
+ u8 bUsed;
+ u8 Bssid[6];
+ u8 PMKID[16];
+ u8 SsidBuf[33];
+ u8 *ssid_octet;
+ u16 ssid_length;
+};
+
+struct security_priv {
+ u32 dot11AuthAlgrthm; /* 802.11 auth, could be open, shared,
+ * 8021x and authswitch */
+ u32 dot11PrivacyAlgrthm; /* This specifies the privacy for
+ * shared auth. algorithm.
+ */
+ /* WEP */
+ u32 dot11PrivacyKeyIndex; /* this is only valid for legendary
+ * wep, 0~3 for key id. (tx key index)
+ */
+ union Keytype dot11DefKey[4]; /* this is only valid for def. key */
+ u32 dot11DefKeylen[4];
+
+ u32 dot118021XGrpPrivacy; /* specify the privacy algthm.
+ * used for Grp key
+ */
+ u32 dot118021XGrpKeyid; /* key id used for Grp Key
+ * (tx key index)
+ */
+ union Keytype dot118021XGrpKey[4];/* 802.1x Grp Key, inx0 and inx1 */
+ union Keytype dot118021XGrptxmickey[4];
+ union Keytype dot118021XGrprxmickey[4];
+ union pn48 dot11Grptxpn; /* PN48 used for Grp Key xmit.*/
+ union pn48 dot11Grprxpn; /* PN48 used for Grp Key recv.*/
+
+#ifdef CONFIG_8723AU_AP_MODE
+ /* extend security capabilities for AP_MODE */
+ unsigned int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
+ unsigned int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
+ unsigned int wpa_group_cipher;
+ unsigned int wpa2_group_cipher;
+ unsigned int wpa_pairwise_cipher;
+ unsigned int wpa2_pairwise_cipher;
+#endif
+
+ u8 wps_ie[MAX_WPS_IE_LEN];/* added in assoc req */
+ int wps_ie_len;
+ u8 binstallGrpkey;
+ u8 busetkipkey;
+ u8 bcheck_grpkey;
+ u8 bgrpkey_handshake;
+ s32 hw_decrypted;
+ u32 ndisauthtype; /* enum ndis_802_11_auth_mode */
+ u32 ndisencryptstatus; /* NDIS_802_11_ENCRYPTION_STATUS */
+ struct wlan_bssid_ex sec_bss; /* for joinbss (h2c buffer) usage */
+ struct ndis_802_11_wep ndiswep;
+ u8 assoc_info[600];
+ u8 szofcapability[256]; /* for wpa2 usage */
+ u8 oidassociation[512]; /* for wpa/wpa2 usage */
+ u8 authenticator_ie[256]; /* store ap security information element */
+ u8 supplicant_ie[256]; /* store sta security information element */
+
+ /* for tkip countermeasure */
+ unsigned long last_mic_err_time;
+ u8 btkip_countermeasure;
+ u8 btkip_wait_report;
+ unsigned long btkip_countermeasure_time;
+
+ /* For WPA2 Pre-Authentication. */
+ struct rt_pmkid_list PMKIDList[NUM_PMKID_CACHE];
+ u8 PMKIDIndex;
+ u8 bWepDefaultKeyIdxSet;
+};
+
+struct sha256_state {
+ u64 length;
+ u32 state[8], curlen;
+ u8 buf[64];
+};
+
+#define GET_ENCRY_ALGO(psecuritypriv, psta, encry_algo, bmcst)\
+do {\
+ switch (psecuritypriv->dot11AuthAlgrthm) {\
+ case dot11AuthAlgrthm_Open:\
+ case dot11AuthAlgrthm_Shared:\
+ case dot11AuthAlgrthm_Auto:\
+ encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\
+ break;\
+ case dot11AuthAlgrthm_8021X:\
+ if (bmcst)\
+ encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\
+ else\
+ encry_algo = (u8)psta->dot118021XPrivacy;\
+ break;\
+ } \
+} while (0)
+
+#define GET_TKIP_PN(iv, dot11txpn)\
+do {\
+ dot11txpn._byte_.TSC0 = iv[2];\
+ dot11txpn._byte_.TSC1 = iv[0];\
+ dot11txpn._byte_.TSC2 = iv[4];\
+ dot11txpn._byte_.TSC3 = iv[5];\
+ dot11txpn._byte_.TSC4 = iv[6];\
+ dot11txpn._byte_.TSC5 = iv[7];\
+} while (0)
+
+#define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
+#define ROR32(A, n) ROL32((A), 32-(n))
+
+struct mic_data {
+ u32 K0, K1; /* Key */
+ u32 L, R; /* Current state */
+ u32 M; /* Message accumulator (single word) */
+ u32 nBytesInM; /* # bytes in M */
+};
+
+extern const u32 Te0[256];
+extern const u32 Te1[256];
+extern const u32 Te2[256];
+extern const u32 Te3[256];
+extern const u32 Te4[256];
+extern const u32 Td0[256];
+extern const u32 Td1[256];
+extern const u32 Td2[256];
+extern const u32 Td3[256];
+extern const u32 Td4[256];
+extern const u32 rcon[10];
+extern const u8 Td4s[256];
+extern const u8 rcons[10];
+
+#define RCON(i) (rcons[(i)] << 24)
+
+static inline u32 rotr(u32 val, int bits)
+{
+ return (val >> bits) | (val << (32 - bits));
+}
+
+#define TE0(i) Te0[((i) >> 24) & 0xff]
+#define TE1(i) rotr(Te0[((i) >> 16) & 0xff], 8)
+#define TE2(i) rotr(Te0[((i) >> 8) & 0xff], 16)
+#define TE3(i) rotr(Te0[(i) & 0xff], 24)
+#define TE41(i) ((Te0[((i) >> 24) & 0xff] << 8) & 0xff000000)
+#define TE42(i) (Te0[((i) >> 16) & 0xff] & 0x00ff0000)
+#define TE43(i) (Te0[((i) >> 8) & 0xff] & 0x0000ff00)
+#define TE44(i) ((Te0[(i) & 0xff] >> 8) & 0x000000ff)
+#define TE421(i) ((Te0[((i) >> 16) & 0xff] << 8) & 0xff000000)
+#define TE432(i) (Te0[((i) >> 8) & 0xff] & 0x00ff0000)
+#define TE443(i) (Te0[(i) & 0xff] & 0x0000ff00)
+#define TE414(i) ((Te0[((i) >> 24) & 0xff] >> 8) & 0x000000ff)
+#define TE4(i) ((Te0[(i)] >> 8) & 0x000000ff)
+
+#define TD0(i) Td0[((i) >> 24) & 0xff]
+#define TD1(i) rotr(Td0[((i) >> 16) & 0xff], 8)
+#define TD2(i) rotr(Td0[((i) >> 8) & 0xff], 16)
+#define TD3(i) rotr(Td0[(i) & 0xff], 24)
+#define TD41(i) (Td4s[((i) >> 24) & 0xff] << 24)
+#define TD42(i) (Td4s[((i) >> 16) & 0xff] << 16)
+#define TD43(i) (Td4s[((i) >> 8) & 0xff] << 8)
+#define TD44(i) (Td4s[(i) & 0xff])
+#define TD0_(i) Td0[(i) & 0xff]
+#define TD1_(i) rotr(Td0[(i) & 0xff], 8)
+#define TD2_(i) rotr(Td0[(i) & 0xff], 16)
+#define TD3_(i) rotr(Td0[(i) & 0xff], 24)
+
+#define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ \
+ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3]))
+
+#define PUTU32(ct, st) { \
+(ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); \
+(ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); }
+
+#define WPA_GET_BE32(a) ((((u32) (a)[0]) << 24) | (((u32) (a)[1]) << 16) | \
+ (((u32) (a)[2]) << 8) | ((u32) (a)[3]))
+
+#define WPA_PUT_LE16(a, val) \
+ do { \
+ (a)[1] = ((u16) (val)) >> 8; \
+ (a)[0] = ((u16) (val)) & 0xff; \
+ } while (0)
+
+#define WPA_PUT_BE32(a, val) \
+ do { \
+ (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[3] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define WPA_PUT_BE64(a, val) \
+ do { \
+ (a)[0] = (u8) (((u64) (val)) >> 56); \
+ (a)[1] = (u8) (((u64) (val)) >> 48); \
+ (a)[2] = (u8) (((u64) (val)) >> 40); \
+ (a)[3] = (u8) (((u64) (val)) >> 32); \
+ (a)[4] = (u8) (((u64) (val)) >> 24); \
+ (a)[5] = (u8) (((u64) (val)) >> 16); \
+ (a)[6] = (u8) (((u64) (val)) >> 8); \
+ (a)[7] = (u8) (((u64) (val)) & 0xff); \
+ } while (0)
+
+/* ===== start - public domain SHA256 implementation ===== */
+
+/* This is based on SHA256 implementation in LibTomCrypt that was released into
+ * public domain by Tom St Denis. */
+
+/* the K array */
+static const unsigned long K[64] = {
+ 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, 0x3956c25bUL,
+ 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, 0xd807aa98UL, 0x12835b01UL,
+ 0x243185beUL, 0x550c7dc3UL, 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL,
+ 0xc19bf174UL, 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
+ 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL, 0x983e5152UL,
+ 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL, 0xc6e00bf3UL, 0xd5a79147UL,
+ 0x06ca6351UL, 0x14292967UL, 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL,
+ 0x53380d13UL, 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
+ 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL, 0xd192e819UL,
+ 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL, 0x19a4c116UL, 0x1e376c08UL,
+ 0x2748774cUL, 0x34b0bcb5UL, 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL,
+ 0x682e6ff3UL, 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
+ 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
+};
+
+/* Various logical functions */
+#define RORc(x, y) \
+(((((unsigned long)(x) & 0xFFFFFFFFUL) >> (unsigned long) ((y) & 31)) | \
+((unsigned long)(x) << (unsigned long) (32 - ((y) & 31)))) & 0xFFFFFFFFUL)
+#define Ch(x, y, z) (z ^ (x & (y ^ z)))
+#define Maj(x, y, z) (((x | y) & z) | (x & y))
+#define S(x, n) RORc((x), (n))
+#define R(x, n) (((x)&0xFFFFFFFFUL)>>(n))
+#define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22))
+#define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25))
+#define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3))
+#define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10))
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+
+void rtw_secmicsetkey23a(struct mic_data *pmicdata, u8 *key);
+void rtw_secmicappend23abyte23a(struct mic_data *pmicdata, u8 b);
+void rtw_secmicappend23a(struct mic_data *pmicdata, u8 *src, u32 nbBytes);
+void rtw_secgetmic23a(struct mic_data *pmicdata, u8 *dst);
+
+void rtw_seccalctkipmic23a(u8 *key, u8 *header, u8 *data, u32 data_len,
+ u8 *Miccode, u8 priorityi);
+
+u32 rtw_aes_encrypt23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe);
+u32 rtw_tkip_encrypt23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe);
+void rtw_wep_encrypt23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe);
+u32 rtw_aes_decrypt23a(struct rtw_adapter *padapter,
+ struct recv_frame *precvframe);
+u32 rtw_tkip_decrypt23a(struct rtw_adapter *padapter,
+ struct recv_frame *precvframe);
+void rtw_wep_decrypt23a(struct rtw_adapter *padapter, struct recv_frame *precvframe);
+
+void rtw_use_tkipkey_handler23a(void *FunctionContext);
+
+#endif /* __RTL871X_SECURITY_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_sreset.h b/drivers/staging/rtl8723au/include/rtw_sreset.h
new file mode 100644
index 000000000000..4c523722dd14
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_sreset.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_SRESET_C_
+#define _RTW_SRESET_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+enum {
+ SRESET_TGP_NULL = 0,
+ SRESET_TGP_XMIT_STATUS = 1,
+ SRESET_TGP_LINK_STATUS = 2,
+};
+
+struct sreset_priv {
+ struct mutex silentreset_mutex;
+ u8 silent_reset_inprogress;
+ u8 Wifi_Error_Status;
+ unsigned long last_tx_time;
+ unsigned long last_tx_complete_time;
+
+ s32 dbg_trigger_point;
+};
+
+#include <rtl8723a_hal.h>
+
+#define WIFI_STATUS_SUCCESS 0
+#define USB_VEN_REQ_CMD_FAIL BIT0
+#define USB_READ_PORT_FAIL BIT1
+#define USB_WRITE_PORT_FAIL BIT2
+#define WIFI_MAC_TXDMA_ERROR BIT3
+#define WIFI_TX_HANG BIT4
+#define WIFI_RX_HANG BIT5
+#define WIFI_IF_NOT_EXIST BIT6
+
+void sreset_init_value23a(struct rtw_adapter *padapter);
+void sreset_reset_value23a(struct rtw_adapter *padapter);
+u8 sreset_get_wifi_status23a(struct rtw_adapter *padapter);
+void sreset_set_wifi_error_status23a(struct rtw_adapter *padapter, u32 status);
+void sreset_set_trigger_point(struct rtw_adapter *padapter, s32 tgp);
+bool sreset_inprogress(struct rtw_adapter *padapter);
+void sreset_reset(struct rtw_adapter *padapter);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_version.h b/drivers/staging/rtl8723au/include/rtw_version.h
new file mode 100644
index 000000000000..c947733a3e3e
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_version.h
@@ -0,0 +1 @@
+#define DRIVERVERSION "v4.1.6_7336.20130426"
diff --git a/drivers/staging/rtl8723au/include/rtw_xmit.h b/drivers/staging/rtl8723au/include/rtw_xmit.h
new file mode 100644
index 000000000000..65a33a07c8ee
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/rtw_xmit.h
@@ -0,0 +1,407 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_XMIT_H_
+#define _RTW_XMIT_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define MAX_XMITBUF_SZ 2048
+#define NR_XMITBUFF 4
+
+#define XMITBUF_ALIGN_SZ 512
+
+/* xmit extension buff defination */
+#define MAX_XMIT_EXTBUF_SZ 1536
+#define NR_XMIT_EXTBUFF 32
+
+#define MAX_NUMBLKS 1
+
+#define XMIT_VO_QUEUE 0
+#define XMIT_VI_QUEUE 1
+#define XMIT_BE_QUEUE 2
+#define XMIT_BK_QUEUE 3
+
+#define VO_QUEUE_INX 0
+#define VI_QUEUE_INX 1
+#define BE_QUEUE_INX 2
+#define BK_QUEUE_INX 3
+#define BCN_QUEUE_INX 4
+#define MGT_QUEUE_INX 5
+#define HIGH_QUEUE_INX 6
+#define TXCMD_QUEUE_INX 7
+
+#define HW_QUEUE_ENTRY 8
+
+#define WEP_IV(pattrib_iv, dot11txpn, keyidx) \
+do { \
+ pattrib_iv[0] = dot11txpn._byte_.TSC0; \
+ pattrib_iv[1] = dot11txpn._byte_.TSC1; \
+ pattrib_iv[2] = dot11txpn._byte_.TSC2; \
+ pattrib_iv[3] = ((keyidx & 0x3) << 6); \
+ dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0 : \
+ (dot11txpn.val+1); \
+} while (0)
+
+#define TKIP_IV(pattrib_iv, dot11txpn, keyidx) \
+do { \
+ pattrib_iv[0] = dot11txpn._byte_.TSC1; \
+ pattrib_iv[1] = (dot11txpn._byte_.TSC1 | 0x20) & 0x7f; \
+ pattrib_iv[2] = dot11txpn._byte_.TSC0; \
+ pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6); \
+ pattrib_iv[4] = dot11txpn._byte_.TSC2; \
+ pattrib_iv[5] = dot11txpn._byte_.TSC3; \
+ pattrib_iv[6] = dot11txpn._byte_.TSC4; \
+ pattrib_iv[7] = dot11txpn._byte_.TSC5; \
+ dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : \
+ (dot11txpn.val+1); \
+} while (0)
+
+#define AES_IV(pattrib_iv, dot11txpn, keyidx)\
+do { \
+ pattrib_iv[0] = dot11txpn._byte_.TSC0; \
+ pattrib_iv[1] = dot11txpn._byte_.TSC1; \
+ pattrib_iv[2] = 0; \
+ pattrib_iv[3] = BIT(5) | ((keyidx & 0x3) << 6); \
+ pattrib_iv[4] = dot11txpn._byte_.TSC2; \
+ pattrib_iv[5] = dot11txpn._byte_.TSC3; \
+ pattrib_iv[6] = dot11txpn._byte_.TSC4; \
+ pattrib_iv[7] = dot11txpn._byte_.TSC5; \
+ dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : \
+ (dot11txpn.val+1); \
+} while (0)
+
+#define HWXMIT_ENTRY 4
+
+#define TXDESC_SIZE 32
+
+#define PACKET_OFFSET_SZ 8
+#define TXDESC_OFFSET (TXDESC_SIZE + PACKET_OFFSET_SZ)
+
+struct tx_desc {
+ /* DWORD 0 */
+ unsigned int txdw0;
+ unsigned int txdw1;
+ unsigned int txdw2;
+ unsigned int txdw3;
+ unsigned int txdw4;
+ unsigned int txdw5;
+ unsigned int txdw6;
+ unsigned int txdw7;
+};
+
+union txdesc {
+ struct tx_desc txdesc;
+ unsigned int value[TXDESC_SIZE>>2];
+};
+
+struct hw_xmit {
+ struct rtw_queue *sta_queue;
+ int accnt;
+};
+
+/* reduce size */
+struct pkt_attrib {
+ u8 type;
+ u8 subtype;
+ u8 bswenc;
+ u8 dhcp_pkt;
+ u16 ether_type;
+ u16 seqnum;
+ u16 pkt_hdrlen; /* the original 802.3 pkt header len */
+ u16 hdrlen; /* the WLAN Header Len */
+ u32 pktlen; /* the original 802.3 pkt raw_data len */
+ u32 last_txcmdsz;
+ u8 nr_frags;
+ u8 encrypt; /* when 0 indicate no encrypt. */
+ u8 iv_len;
+ u8 icv_len;
+ u8 iv[18];
+ u8 icv[16];
+ u8 priority;
+ u8 ack_policy;
+ u8 mac_id;
+ u8 vcs_mode; /* virtual carrier sense method */
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ u8 ra[ETH_ALEN];
+ u8 key_idx;
+ u8 qos_en;
+ u8 ht_en;
+ u8 raid;/* rate adpative id */
+ u8 bwmode;
+ u8 ch_offset;/* PRIME_CHNL_OFFSET */
+ u8 sgi;/* short GI */
+ u8 ampdu_en;/* tx ampdu enable */
+ u8 mdata;/* more data bit */
+ u8 pctrl;/* per packet txdesc control enable */
+ u8 triggered;/* for ap mode handling Power Saving sta */
+ u8 qsel;
+ u8 eosp;
+ u8 rate;
+ u8 retry_ctrl;
+ struct sta_info *psta;
+};
+
+#define WLANHDR_OFFSET 64
+
+#define NULL_FRAMETAG 0x0
+#define DATA_FRAMETAG 0x01
+#define L2_FRAMETAG 0x02
+#define MGNT_FRAMETAG 0x03
+#define AMSDU_FRAMETAG 0x04
+
+#define EII_FRAMETAG 0x05
+#define IEEE8023_FRAMETAG 0x06
+
+#define MP_FRAMETAG 0x07
+
+#define TXAGG_FRAMETAG 0x08
+
+struct submit_ctx {
+ u32 timeout_ms; /* <0: not synchronous, 0: wait forever,
+ * >0: up to ms waiting
+ */
+ int status; /* status for operation */
+ struct completion done;
+};
+
+enum {
+ RTW_SCTX_SUBMITTED = -1,
+ RTW_SCTX_DONE_SUCCESS = 0,
+ RTW_SCTX_DONE_UNKNOWN,
+ RTW_SCTX_DONE_TIMEOUT,
+ RTW_SCTX_DONE_BUF_ALLOC,
+ RTW_SCTX_DONE_BUF_FREE,
+ RTW_SCTX_DONE_WRITE_PORT_ERR,
+ RTW_SCTX_DONE_TX_DESC_NA,
+ RTW_SCTX_DONE_TX_DENY,
+ RTW_SCTX_DONE_CCX_PKT_FAIL,
+ RTW_SCTX_DONE_DRV_STOP,
+ RTW_SCTX_DONE_DEV_REMOVE,
+};
+
+void rtw_sctx_init23a(struct submit_ctx *sctx, int timeout_ms);
+int rtw_sctx_wait23a(struct submit_ctx *sctx);
+void rtw23a_sctx_done_err(struct submit_ctx **sctx, int status);
+void rtw_sctx_done23a(struct submit_ctx **sctx);
+
+struct xmit_buf {
+ struct list_head list, list2;
+ struct rtw_adapter *padapter;
+
+ u8 *pallocated_buf;
+ u8 *pbuf;
+ void *priv_data;
+
+ u16 ext_tag; /* 0: Normal xmitbuf, 1: extension xmitbuf. */
+ u16 flags;
+ u32 alloc_sz;
+ u32 len;
+ struct submit_ctx *sctx;
+ u32 ff_hwaddr;
+ struct urb *pxmit_urb[8];
+ u8 bpending[8];
+ int last[8];
+#if defined(DBG_XMIT_BUF) || defined(DBG_XMIT_BUF_EXT)
+ u8 no;
+#endif
+};
+
+struct xmit_frame {
+ struct list_head list;
+ struct pkt_attrib attrib;
+ struct sk_buff *pkt;
+ int frame_tag;
+ struct rtw_adapter *padapter;
+ u8 *buf_addr;
+ struct xmit_buf *pxmitbuf;
+
+ s8 pkt_offset;
+
+ u8 ack_report;
+
+ u8 ext_tag; /* 0:data, 1:mgmt */
+};
+
+struct tx_servq {
+ struct list_head tx_pending;
+ struct rtw_queue sta_pending;
+ int qcnt;
+};
+
+struct sta_xmit_priv {
+ spinlock_t lock;
+ int option;
+ int apsd_setting; /* When bit mask is on, the associated edca
+ * queue supports APSD.
+ */
+ struct tx_servq be_q; /* priority == 0,3 */
+ struct tx_servq bk_q; /* priority == 1,2 */
+ struct tx_servq vi_q; /* priority == 4,5 */
+ struct tx_servq vo_q; /* priority == 6,7 */
+ struct list_head legacy_dz;
+ struct list_head apsd;
+ u16 txseq_tid[16];
+};
+
+struct hw_txqueue {
+ volatile int head;
+ volatile int tail;
+ volatile int free_sz; /* in units of 64 bytes */
+ volatile int free_cmdsz;
+ volatile int txsz[8];
+ uint ff_hwaddr;
+ uint cmd_hwaddr;
+ int ac_tag;
+};
+
+struct agg_pkt_info {
+ u16 offset;
+ u16 pkt_len;
+};
+
+struct xmit_priv {
+ spinlock_t lock;
+
+ struct semaphore xmit_sema;
+ struct semaphore terminate_xmitthread_sema;
+
+ struct rtw_queue be_pending;
+ struct rtw_queue bk_pending;
+ struct rtw_queue vi_pending;
+ struct rtw_queue vo_pending;
+ struct rtw_queue bm_pending;
+
+ u8 *pallocated_frame_buf;
+ u8 *pxmit_frame_buf;
+ uint free_xmitframe_cnt;
+ struct rtw_queue free_xmit_queue;
+
+ u8 *xframe_ext_alloc_addr;
+ u8 *xframe_ext;
+ uint free_xframe_ext_cnt;
+ struct rtw_queue free_xframe_ext_queue;
+
+ uint frag_len;
+
+ struct rtw_adapter *adapter;
+
+ u8 vcs_setting;
+ u8 vcs;
+ u8 vcs_type;
+
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 tx_drop;
+ u64 last_tx_bytes;
+ u64 last_tx_pkts;
+
+ struct hw_xmit *hwxmits;
+ u8 hwxmit_entry;
+
+ u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength from
+ * large to small. it's value is 0->vo, 1->vi,
+ * 2->be, 3->bk.
+ */
+
+ struct semaphore tx_retevt;/* all tx return event; */
+ u8 txirp_cnt;/* */
+
+ struct tasklet_struct xmit_tasklet;
+ /* per AC pending irp */
+ int beq_cnt;
+ int bkq_cnt;
+ int viq_cnt;
+ int voq_cnt;
+
+ struct rtw_queue free_xmitbuf_queue;
+ struct list_head xmitbuf_list; /* track buffers for cleanup */
+ struct rtw_queue pending_xmitbuf_queue;
+ uint free_xmitbuf_cnt;
+
+ struct rtw_queue free_xmit_extbuf_queue;
+ struct list_head xmitextbuf_list; /* track buffers for cleanup */
+ uint free_xmit_extbuf_cnt;
+
+ u16 nqos_ssn;
+ int ack_tx;
+ struct mutex ack_tx_mutex;
+ struct submit_ctx ack_tx_ops;
+ spinlock_t lock_sctx;
+};
+
+struct xmit_buf *rtw_alloc_xmitbuf23a_ext(struct xmit_priv *pxmitpriv);
+s32 rtw_free_xmitbuf_ext23a(struct xmit_priv *pxmitpriv,
+ struct xmit_buf *pxmitbuf);
+
+struct xmit_buf *rtw_alloc_xmitbuf23a(struct xmit_priv *pxmitpriv);
+s32 rtw_free_xmitbuf23a(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
+
+void rtw_count_tx_stats23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe, int sz);
+void rtw_update_protection23a(struct rtw_adapter *padapter, u8 *ie, uint ie_len);
+s32 rtw_make_wlanhdr23a(struct rtw_adapter *padapter, u8 *hdr,
+ struct pkt_attrib *pattrib);
+s32 rtw_put_snap23a(u8 *data, u16 h_proto);
+struct xmit_frame *rtw_alloc_xmitframe23a(struct xmit_priv *pxmitpriv);
+struct xmit_frame *rtw_alloc_xmitframe23a_ext(struct xmit_priv *pxmitpriv);
+struct xmit_frame *rtw_alloc_xmitframe23a_once(struct xmit_priv *pxmitpriv);
+s32 rtw_free_xmitframe23a(struct xmit_priv *pxmitpriv,
+ struct xmit_frame *pxmitframe);
+void rtw_free_xmitframe_queue23a(struct xmit_priv *pxmitpriv, struct rtw_queue *pframequeue);
+struct tx_servq *rtw_get_sta_pending23a(struct rtw_adapter *padapter,
+ struct sta_info *psta, int up, u8 *ac);
+s32 rtw_xmitframe_enqueue23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe);
+struct xmit_frame *rtw_dequeue_xframe23a(struct xmit_priv *pxmitpriv,
+ struct hw_xmit *phwxmit_i, int entry);
+s32 rtw_xmit23a_classifier(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe);
+u32 rtw_calculate_wlan_pkt_size_by_attribue23a(struct pkt_attrib *pattrib);
+#define rtw_wlan_pkt_size(f) rtw_calculate_wlan_pkt_size_by_attribue23a(&f->attrib)
+s32 rtw_xmitframe_coalesce23a(struct rtw_adapter *padapter, struct sk_buff *pkt,
+ struct xmit_frame *pxmitframe);
+s32 _rtw_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag);
+void _rtw_init_sta_xmit_priv23a(struct sta_xmit_priv *psta_xmitpriv);
+
+s32 rtw_txframes_pending23a(struct rtw_adapter *padapter);
+s32 rtw_txframes_sta_ac_pending23a(struct rtw_adapter *padapter,
+ struct pkt_attrib *pattrib);
+void rtw_init_hwxmits23a(struct hw_xmit *phwxmit, int entry);
+s32 _rtw_init_xmit_priv23a(struct xmit_priv *pxmitpriv,
+ struct rtw_adapter *padapter);
+void _rtw_free_xmit_priv23a(struct xmit_priv *pxmitpriv);
+void rtw_alloc_hwxmits23a(struct rtw_adapter *padapter);
+void rtw_free_hwxmits23a(struct rtw_adapter *padapter);
+int rtw_xmit23a(struct rtw_adapter *padapter, struct sk_buff *pkt);
+#if defined(CONFIG_8723AU_AP_MODE)
+int xmitframe_enqueue_for_sleeping_sta23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxmitframe);
+void stop_sta_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta);
+void wakeup_sta_to_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta);
+void xmit_delivery_enabled_frames23a(struct rtw_adapter *padapter,
+ struct sta_info *psta);
+#endif
+u8 qos_acm23a(u8 acm_mask, u8 priority);
+u32 rtw_get_ff_hwaddr23a(struct xmit_frame *pxmitframe);
+int rtw_ack_tx_wait23a(struct xmit_priv *pxmitpriv, u32 timeout_ms);
+void rtw_ack_tx_done23a(struct xmit_priv *pxmitpriv, int status);
+
+/* include after declaring struct xmit_buf, in order to avoid warning */
+#include <xmit_osdep.h>
+
+#endif /* _RTL871X_XMIT_H_ */
diff --git a/drivers/staging/rtl8723au/include/sta_info.h b/drivers/staging/rtl8723au/include/sta_info.h
new file mode 100644
index 000000000000..ffbc9e3f2156
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/sta_info.h
@@ -0,0 +1,385 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __STA_INFO_H_
+#define __STA_INFO_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+
+#define IBSS_START_MAC_ID 2
+#define NUM_STA 32
+#define NUM_ACL 16
+
+
+/* if mode ==0, then the sta is allowed once the addr is hit. */
+/* if mode ==1, then the sta is rejected once the addr is non-hit. */
+struct rtw_wlan_acl_node {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u8 valid;
+};
+
+/* mode=0, disable */
+/* mode=1, accept unless in deny list */
+/* mode=2, deny unless in accept list */
+struct wlan_acl_pool {
+ int mode;
+ int num;
+ struct rtw_wlan_acl_node aclnode[NUM_ACL];
+ struct rtw_queue acl_node_q;
+};
+
+struct rssi_sta {
+ s32 UndecoratedSmoothedPWDB;
+ s32 UndecoratedSmoothedCCK;
+ s32 UndecoratedSmoothedOFDM;
+ u64 PacketMap;
+ u8 ValidBit;
+};
+
+struct stainfo_stats {
+ u64 rx_mgnt_pkts;
+ u64 rx_beacon_pkts;
+ u64 rx_probereq_pkts;
+ u64 rx_probersp_pkts;
+ u64 rx_probersp_bm_pkts;
+ u64 rx_probersp_uo_pkts;
+ u64 rx_ctrl_pkts;
+ u64 rx_data_pkts;
+
+ u64 last_rx_mgnt_pkts;
+ u64 last_rx_beacon_pkts;
+ u64 last_rx_probereq_pkts;
+ u64 last_rx_probersp_pkts;
+ u64 last_rx_probersp_bm_pkts;
+ u64 last_rx_probersp_uo_pkts;
+ u64 last_rx_ctrl_pkts;
+ u64 last_rx_data_pkts;
+
+ u64 rx_bytes;
+ u64 rx_drops;
+
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_drops;
+
+};
+
+struct sta_info {
+ spinlock_t lock;
+ struct list_head list; /* free_sta_queue */
+ struct list_head hash_list; /* sta_hash */
+ struct rtw_adapter *padapter;
+
+ struct sta_xmit_priv sta_xmitpriv;
+ struct sta_recv_priv sta_recvpriv;
+
+ struct rtw_queue sleep_q;
+ unsigned int sleepq_len;
+
+ uint state;
+ uint aid;
+ uint mac_id;
+ uint qos_option;
+ u8 hwaddr[ETH_ALEN];
+
+ uint ieee8021x_blocked; /* 0: allowed, 1:blocked */
+ uint dot118021XPrivacy; /* aes, tkip... */
+ union Keytype dot11tkiptxmickey;
+ union Keytype dot11tkiprxmickey;
+ union Keytype dot118021x_UncstKey;
+ union pn48 dot11txpn; /* PN48 used for Unicast xmit. */
+ union pn48 dot11rxpn; /* PN48 used for Unicast recv. */
+
+
+ u8 bssrateset[16];
+ u32 bssratelen;
+ s32 rssi;
+ s32 signal_quality;
+
+ u8 cts2self;
+ u8 rtsen;
+
+ u8 raid;
+ u8 init_rate;
+ u32 ra_mask;
+ u8 wireless_mode; /* NETWORK_TYPE */
+ struct stainfo_stats sta_stats;
+
+ /* for A-MPDU TX, ADDBA timeout check */
+ struct timer_list addba_retry_timer;
+
+ /* for A-MPDU Rx reordering buffer control */
+ struct recv_reorder_ctrl recvreorder_ctrl[16];
+
+ /* for A-MPDU Tx */
+ /* unsigned char ampdu_txen_bitmap; */
+ u16 BA_starting_seqctrl[16];
+
+ struct ht_priv htpriv;
+
+ /* Notes: */
+ /* STA_Mode: */
+ /* curr_network(mlme_priv/security_priv/qos/ht) + sta_info: (STA & AP) CAP/INFO */
+ /* scan_q: AP CAP/INFO */
+
+ /* AP_Mode: */
+ /* curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO */
+ /* sta_info: (AP & STA) CAP/INFO */
+
+ struct list_head asoc_list;
+ struct list_head auth_list;
+
+ unsigned int expire_to;
+ unsigned int auth_seq;
+ unsigned int authalg;
+ unsigned char chg_txt[128];
+
+ u16 capability;
+ int flags;
+
+ int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
+ int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
+ int wpa_group_cipher;
+ int wpa2_group_cipher;
+ int wpa_pairwise_cipher;
+ int wpa2_pairwise_cipher;
+
+ u8 bpairwise_key_installed;
+
+ u8 wpa_ie[32];
+
+ u8 nonerp_set;
+ u8 no_short_slot_time_set;
+ u8 no_short_preamble_set;
+ u8 no_ht_gf_set;
+ u8 no_ht_set;
+ u8 ht_20mhz_set;
+
+ unsigned int tx_ra_bitmap;
+ u8 qos_info;
+
+ u8 max_sp_len;
+ u8 uapsd_bk;/* BIT(0): Delivery enabled, BIT(1): Trigger enabled */
+ u8 uapsd_be;
+ u8 uapsd_vi;
+ u8 uapsd_vo;
+
+ u8 has_legacy_ac;
+ unsigned int sleepq_ac_len;
+
+ /* p2p priv data */
+ u8 is_p2p_device;
+ u8 p2p_status_code;
+
+ u8 keep_alive_trycnt;
+
+ /* p2p client info */
+ u8 dev_addr[ETH_ALEN];
+ u8 dev_cap;
+ u16 config_methods;
+ u8 primary_dev_type[8];
+ u8 num_of_secdev_type;
+ u8 secdev_types_list[32];/* 32/8 == 4; */
+ u16 dev_name_len;
+ u8 dev_name[32];
+ u8 *passoc_req;
+ u32 assoc_req_len;
+
+ /* for DM */
+ struct rssi_sta rssi_stat;
+
+ /* */
+ /* ================ODM Relative Info======================= */
+ /* Please be care, dont declare too much structure here. It will cost memory * STA support num. */
+ /* */
+ /* */
+ /* 2011/10/20 MH Add for ODM STA info. */
+ /* */
+ /* Driver Write */
+ u8 bValid; /* record the sta status link or not? */
+ u8 IOTPeer; /* Enum value. HT_IOT_PEER_E */
+ u8 rssi_level; /* for Refresh RA mask */
+ /* ODM Write */
+ /* 1 PHY_STATUS_INFO */
+ u8 RSSI_Path[4]; /* */
+ u8 RSSI_Ave;
+ u8 RXEVM[4];
+ u8 RXSNR[4];
+
+ /* ODM Write */
+ /* 1 TX_INFO (may changed by IC) */
+ /* ================ODM Relative Info======================= */
+ /* */
+
+ /* To store the sequence number of received management frame */
+ u16 RxMgmtFrameSeqNum;
+};
+
+#define sta_rx_pkts(sta) \
+ (sta->sta_stats.rx_mgnt_pkts \
+ + sta->sta_stats.rx_ctrl_pkts \
+ + sta->sta_stats.rx_data_pkts)
+
+#define sta_last_rx_pkts(sta) \
+ (sta->sta_stats.last_rx_mgnt_pkts \
+ + sta->sta_stats.last_rx_ctrl_pkts \
+ + sta->sta_stats.last_rx_data_pkts)
+
+#define sta_rx_data_pkts(sta) \
+ (sta->sta_stats.rx_data_pkts)
+
+#define sta_last_rx_data_pkts(sta) \
+ (sta->sta_stats.last_rx_data_pkts)
+
+#define sta_rx_mgnt_pkts(sta) \
+ (sta->sta_stats.rx_mgnt_pkts)
+
+#define sta_last_rx_mgnt_pkts(sta) \
+ (sta->sta_stats.last_rx_mgnt_pkts)
+
+#define sta_rx_beacon_pkts(sta) \
+ (sta->sta_stats.rx_beacon_pkts)
+
+#define sta_last_rx_beacon_pkts(sta) \
+ (sta->sta_stats.last_rx_beacon_pkts)
+
+#define sta_rx_probereq_pkts(sta) \
+ (sta->sta_stats.rx_probereq_pkts)
+
+#define sta_last_rx_probereq_pkts(sta) \
+ (sta->sta_stats.last_rx_probereq_pkts)
+
+#define sta_rx_probersp_pkts(sta) \
+ (sta->sta_stats.rx_probersp_pkts)
+
+#define sta_last_rx_probersp_pkts(sta) \
+ (sta->sta_stats.last_rx_probersp_pkts)
+
+#define sta_rx_probersp_bm_pkts(sta) \
+ (sta->sta_stats.rx_probersp_bm_pkts)
+
+#define sta_last_rx_probersp_bm_pkts(sta) \
+ (sta->sta_stats.last_rx_probersp_bm_pkts)
+
+#define sta_rx_probersp_uo_pkts(sta) \
+ (sta->sta_stats.rx_probersp_uo_pkts)
+
+#define sta_last_rx_probersp_uo_pkts(sta) \
+ (sta->sta_stats.last_rx_probersp_uo_pkts)
+
+#define sta_update_last_rx_pkts(sta) \
+ do { \
+ sta->sta_stats.last_rx_mgnt_pkts = sta->sta_stats.rx_mgnt_pkts; \
+ sta->sta_stats.last_rx_beacon_pkts = sta->sta_stats.rx_beacon_pkts; \
+ sta->sta_stats.last_rx_probereq_pkts = sta->sta_stats.rx_probereq_pkts; \
+ sta->sta_stats.last_rx_probersp_pkts = sta->sta_stats.rx_probersp_pkts; \
+ sta->sta_stats.last_rx_probersp_bm_pkts = sta->sta_stats.rx_probersp_bm_pkts; \
+ sta->sta_stats.last_rx_probersp_uo_pkts = sta->sta_stats.rx_probersp_uo_pkts; \
+ sta->sta_stats.last_rx_ctrl_pkts = sta->sta_stats.rx_ctrl_pkts; \
+ sta->sta_stats.last_rx_data_pkts = sta->sta_stats.rx_data_pkts; \
+ } while (0)
+
+#define STA_RX_PKTS_ARG(sta) \
+ sta->sta_stats.rx_mgnt_pkts \
+ , sta->sta_stats.rx_ctrl_pkts \
+ , sta->sta_stats.rx_data_pkts
+
+#define STA_LAST_RX_PKTS_ARG(sta) \
+ sta->sta_stats.last_rx_mgnt_pkts, \
+ sta->sta_stats.last_rx_ctrl_pkts, \
+ sta->sta_stats.last_rx_data_pkts
+
+#define STA_RX_PKTS_DIFF_ARG(sta) \
+ sta->sta_stats.rx_mgnt_pkts - sta->sta_stats.last_rx_mgnt_pkts, \
+ sta->sta_stats.rx_ctrl_pkts - sta->sta_stats.last_rx_ctrl_pkts, \
+ sta->sta_stats.rx_data_pkts - sta->sta_stats.last_rx_data_pkts
+
+#define STA_PKTS_FMT "(m:%llu, c:%llu, d:%llu)"
+
+struct sta_priv {
+ u8 *pallocated_stainfo_buf;
+ u8 *pstainfo_buf;
+ struct rtw_queue free_sta_queue;
+
+ spinlock_t sta_hash_lock;
+ struct list_head sta_hash[NUM_STA];
+ int asoc_sta_count;
+ struct rtw_queue sleep_q;
+ struct rtw_queue wakeup_q;
+
+ struct rtw_adapter *padapter;
+ struct list_head asoc_list;
+ struct list_head auth_list;
+ spinlock_t asoc_list_lock;
+ spinlock_t auth_list_lock;
+ u8 asoc_list_cnt;
+ u8 auth_list_cnt;
+
+ unsigned int auth_to; /* sec, time to expire in authenticating. */
+ unsigned int assoc_to; /* sec, time to expire before associating. */
+ unsigned int expire_to; /* sec , time to expire after associated. */
+
+ /* pointers to STA info; based on allocated AID or NULL if AID free
+ * AID is in the range 1-2007, so sta_aid[0] corresponders to AID 1
+ * and so on
+ */
+ struct sta_info *sta_aid[NUM_STA];
+
+ u16 sta_dz_bitmap;/* only support 15 stations, staion aid bitmap
+ * for sleeping sta. */
+ u16 tim_bitmap;/* only support 15 stations,
+ * aid=0~15 mapping bit0~bit15 */
+
+ u16 max_num_sta;
+
+ struct wlan_acl_pool acl_list;
+};
+
+static inline u32 wifi_mac_hash(u8 *mac)
+{
+ u32 x;
+
+ x = mac[0];
+ x = (x << 2) ^ mac[1];
+ x = (x << 2) ^ mac[2];
+ x = (x << 2) ^ mac[3];
+ x = (x << 2) ^ mac[4];
+ x = (x << 2) ^ mac[5];
+
+ x ^= x >> 8;
+ x = x & (NUM_STA - 1);
+
+ return x;
+}
+
+u32 _rtw_init_sta_priv23a(struct sta_priv *pstapriv);
+u32 _rtw_free_sta_priv23a(struct sta_priv *pstapriv);
+
+#define stainfo_offset_valid(offset) (offset < NUM_STA && offset >= 0)
+int rtw_stainfo_offset23a(struct sta_priv *stapriv, struct sta_info *sta);
+struct sta_info *rtw_get_stainfo23a_by_offset23a(struct sta_priv *stapriv,
+ int offset);
+
+struct sta_info *rtw_alloc_stainfo23a(struct sta_priv *pstapriv, u8 *hwaddr);
+u32 rtw_free_stainfo23a(struct rtw_adapter *padapter, struct sta_info *psta);
+void rtw_free_all_stainfo23a(struct rtw_adapter *padapter);
+struct sta_info *rtw_get_stainfo23a(struct sta_priv *pstapriv, u8 *hwaddr);
+u32 rtw_init_bcmc_stainfo23a(struct rtw_adapter *padapter);
+struct sta_info *rtw_get_bcmc_stainfo23a(struct rtw_adapter *padapter);
+u8 rtw_access_ctrl23a(struct rtw_adapter *padapter, u8 *mac_addr);
+
+#endif /* _STA_INFO_H_ */
diff --git a/drivers/staging/rtl8723au/include/usb_hal.h b/drivers/staging/rtl8723au/include/usb_hal.h
new file mode 100644
index 000000000000..4edec3b539b7
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/usb_hal.h
@@ -0,0 +1,20 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __USB_HAL_H__
+#define __USB_HAL_H__
+
+int rtl8723au_set_hal_ops(struct rtw_adapter *padapter);
+
+#endif /* __USB_HAL_H__ */
diff --git a/drivers/staging/rtl8723au/include/usb_ops.h b/drivers/staging/rtl8723au/include/usb_ops.h
new file mode 100644
index 000000000000..55d1380f9036
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/usb_ops.h
@@ -0,0 +1,97 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __USB_OPS_H_
+#define __USB_OPS_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <osdep_intf.h>
+#include <usb_ops_linux.h>
+
+#define REALTEK_USB_VENQT_READ 0xC0
+#define REALTEK_USB_VENQT_WRITE 0x40
+#define REALTEK_USB_VENQT_CMD_REQ 0x05
+#define REALTEK_USB_VENQT_CMD_IDX 0x00
+
+enum {
+ VENDOR_WRITE = 0x00,
+ VENDOR_READ = 0x01,
+};
+
+#define ALIGNMENT_UNIT 16
+#define MAX_VENDOR_REQ_CMD_SIZE 254 /* 8188cu SIE Support */
+#define MAX_USB_IO_CTL_SIZE (MAX_VENDOR_REQ_CMD_SIZE +ALIGNMENT_UNIT)
+
+#define rtw_usb_control_msg(dev, pipe, request, requesttype, value, \
+ index, data, size, timeout_ms) \
+ usb_control_msg((dev), (pipe), (request), (requesttype), \
+ (value), (index), (data), (size), (timeout_ms))
+#define rtw_usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout_ms) \
+ usb_bulk_msg((usb_dev), (pipe), (data), (len), (actual_length), \
+ (timeout_ms))
+
+void rtl8723au_set_hw_type(struct rtw_adapter *padapter);
+#define hal_set_hw_type rtl8723au_set_hw_type
+
+void rtl8723au_set_intf_ops(struct _io_ops *pops);
+#define usb_set_intf_ops rtl8723au_set_intf_ops
+
+void rtl8723au_recv_tasklet(void *priv);
+
+void rtl8723au_xmit_tasklet(void *priv);
+
+/* Increase and check if the continual_urb_error of this @param dvobjprive is
+ * larger than MAX_CONTINUAL_URB_ERR. Return result
+ */
+static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
+{
+ int ret = false;
+ int value;
+
+ value = atomic_inc_return(&dvobj->continual_urb_error);
+ if (value > MAX_CONTINUAL_URB_ERR) {
+ DBG_8723A("[dvobj:%p][ERROR] continual_urb_error:%d > %d\n",
+ dvobj, value, MAX_CONTINUAL_URB_ERR);
+ ret = true;
+ }
+ return ret;
+}
+
+/* Set the continual_urb_error of this @param dvobjprive to 0 */
+static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
+{
+ atomic_set(&dvobj->continual_urb_error, 0);
+}
+
+#define USB_HIGH_SPEED_BULK_SIZE 512
+#define USB_FULL_SPEED_BULK_SIZE 64
+
+static inline u8 rtw_usb_bulk_size_boundary(struct rtw_adapter *padapter,
+ int buf_len)
+{
+ u8 rst = true;
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+
+ if (pdvobjpriv->ishighspeed)
+ rst = (0 == (buf_len) % USB_HIGH_SPEED_BULK_SIZE) ?
+ true : false;
+ else
+ rst = (0 == (buf_len) % USB_FULL_SPEED_BULK_SIZE) ?
+ true : false;
+ return rst;
+}
+
+
+#endif /* __USB_OPS_H_ */
diff --git a/drivers/staging/rtl8723au/include/usb_ops_linux.h b/drivers/staging/rtl8723au/include/usb_ops_linux.h
new file mode 100644
index 000000000000..8f5c59eace5a
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/usb_ops_linux.h
@@ -0,0 +1,46 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __USB_OPS_LINUX_H__
+#define __USB_OPS_LINUX_H__
+
+#define VENDOR_CMD_MAX_DATA_LEN 254
+
+#define RTW_USB_CONTROL_MSG_TIMEOUT_TEST 10/* ms */
+#define RTW_USB_CONTROL_MSG_TIMEOUT 500/* ms */
+
+#define MAX_USBCTRL_VENDORREQ_TIMES 10
+
+#define RTW_USB_BULKOUT_TIMEOUT 5000/* ms */
+
+#define _usbctrl_vendorreq_async_callback(urb, regs) \
+ _usbctrl_vendorreq_async_callback(urb)
+#define usb_write_mem23a_complete(purb, regs) usb_write_mem23a_complete(purb)
+#define usb_write_port23a_complete(purb, regs) usb_write_port23a_complete(purb)
+#define usb_read_port_complete(purb, regs) usb_read_port_complete(purb)
+#define usb_read_interrupt_complete(purb, regs) \
+ usb_read_interrupt_complete(purb)
+
+unsigned int ffaddr2pipehdl23a(struct dvobj_priv *pdvobj, u32 addr);
+
+void usb_read_mem23a(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem);
+void usb_write_mem23a(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem);
+
+void usb_read_port_cancel23a(struct intf_hdl *pintfhdl);
+
+u32 usb_write_port23a(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ struct xmit_buf *wmem);
+void usb_write_port23a_cancel(struct intf_hdl *pintfhdl);
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/usb_osintf.h b/drivers/staging/rtl8723au/include/usb_osintf.h
new file mode 100644
index 000000000000..46087662834e
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/usb_osintf.h
@@ -0,0 +1,24 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __USB_OSINTF_H
+#define __USB_OSINTF_H
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <usb_vendor_req.h>
+
+#define USBD_HALTED(_status) ((u32)(_status) >> 30 == 3)
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/usb_vendor_req.h b/drivers/staging/rtl8723au/include/usb_vendor_req.h
new file mode 100644
index 000000000000..eb4508ef191e
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/usb_vendor_req.h
@@ -0,0 +1,31 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _USB_VENDOR_REQUEST_H_
+#define _USB_VENDOR_REQUEST_H_
+
+/* 4 Set/Get Register related wIndex/Data */
+#define RT_USB_RESET_MASK_OFF 0
+#define RT_USB_RESET_MASK_ON 1
+#define RT_USB_SLEEP_MASK_OFF 0
+#define RT_USB_SLEEP_MASK_ON 1
+#define RT_USB_LDO_ON 1
+#define RT_USB_LDO_OFF 0
+
+/* 4 Set/Get SYSCLK related wValue or Data */
+#define RT_USB_SYSCLK_32KHZ 0
+#define RT_USB_SYSCLK_40MHZ 1
+#define RT_USB_SYSCLK_60MHZ 2
+
+#endif
diff --git a/drivers/staging/rtl8723au/include/wifi.h b/drivers/staging/rtl8723au/include/wifi.h
new file mode 100644
index 000000000000..b5034c6ef1dc
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/wifi.h
@@ -0,0 +1,707 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _WIFI_H_
+#define _WIFI_H_
+
+#define P80211CAPTURE_VERSION 0x80211001
+
+/* This value is tested by WiFi 11n Test Plan 5.2.3.
+ * This test verifies the WLAN NIC can update the NAV through sending
+ * the CTS with large duration.
+ */
+#define WiFiNavUpperUs 30000 /* 30 ms */
+
+enum WIFI_FRAME_TYPE {
+ WIFI_MGT_TYPE = (0),
+ WIFI_CTRL_TYPE = (BIT(2)),
+ WIFI_DATA_TYPE = (BIT(3)),
+ WIFI_QOS_DATA_TYPE = (BIT(7)|BIT(3)), /* QoS Data */
+};
+
+enum WIFI_FRAME_SUBTYPE {
+ /* below is for mgt frame */
+ WIFI_ASSOCREQ = (0 | WIFI_MGT_TYPE),
+ WIFI_ASSOCRSP = (BIT(4) | WIFI_MGT_TYPE),
+ WIFI_REASSOCREQ = (BIT(5) | WIFI_MGT_TYPE),
+ WIFI_REASSOCRSP = (BIT(5) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_PROBEREQ = (BIT(6) | WIFI_MGT_TYPE),
+ WIFI_PROBERSP = (BIT(6) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_BEACON = (BIT(7) | WIFI_MGT_TYPE),
+ WIFI_ATIM = (BIT(7) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_DISASSOC = (BIT(7) | BIT(5) | WIFI_MGT_TYPE),
+ WIFI_AUTH = (BIT(7) | BIT(5) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_DEAUTH = (BIT(7) | BIT(6) | WIFI_MGT_TYPE),
+ WIFI_ACTION = (BIT(7) | BIT(6) | BIT(4) | WIFI_MGT_TYPE),
+
+ /* below is for control frame */
+ WIFI_PSPOLL = (BIT(7) | BIT(5) | WIFI_CTRL_TYPE),
+ WIFI_RTS = (BIT(7) | BIT(5) | BIT(4) | WIFI_CTRL_TYPE),
+ WIFI_CTS = (BIT(7) | BIT(6) | WIFI_CTRL_TYPE),
+ WIFI_ACK = (BIT(7) | BIT(6) | BIT(4) | WIFI_CTRL_TYPE),
+ WIFI_CFEND = (BIT(7) | BIT(6) | BIT(5) | WIFI_CTRL_TYPE),
+ WIFI_CFEND_CFACK = (BIT(7) | BIT(6) | BIT(5) | BIT(4) | WIFI_CTRL_TYPE),
+
+ /* below is for data frame */
+ WIFI_DATA = (0 | WIFI_DATA_TYPE),
+ WIFI_DATA_CFACK = (BIT(4) | WIFI_DATA_TYPE),
+ WIFI_DATA_CFPOLL = (BIT(5) | WIFI_DATA_TYPE),
+ WIFI_DATA_CFACKPOLL = (BIT(5) | BIT(4) | WIFI_DATA_TYPE),
+ WIFI_DATA_NULL = (BIT(6) | WIFI_DATA_TYPE),
+ WIFI_CF_ACK = (BIT(6) | BIT(4) | WIFI_DATA_TYPE),
+ WIFI_CF_POLL = (BIT(6) | BIT(5) | WIFI_DATA_TYPE),
+ WIFI_CF_ACKPOLL = (BIT(6) | BIT(5) | BIT(4) | WIFI_DATA_TYPE),
+ WIFI_QOS_DATA_NULL = (BIT(6) | WIFI_QOS_DATA_TYPE),
+};
+
+
+enum WIFI_REG_DOMAIN {
+ DOMAIN_FCC = 1,
+ DOMAIN_IC = 2,
+ DOMAIN_ETSI = 3,
+ DOMAIN_SPAIN = 4,
+ DOMAIN_FRANCE = 5,
+ DOMAIN_MKK = 6,
+ DOMAIN_ISRAEL = 7,
+ DOMAIN_MKK1 = 8,
+ DOMAIN_MKK2 = 9,
+ DOMAIN_MKK3 = 10,
+ DOMAIN_MAX
+};
+
+
+#define SetToDs(pbuf) \
+ (*(unsigned short *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_TODS))
+
+#define SetFrDs(pbuf) \
+ (*(unsigned short *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_FROMDS))
+
+#define get_tofr_ds(pframe) ((ieee80211_has_tods(pframe) << 1) | \
+ ieee80211_has_fromds(pframe))
+
+#define SetMFrag(pbuf) \
+ (*(unsigned short *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS))
+
+#define ClearMFrag(pbuf) \
+ (*(unsigned short *)(pbuf) &= (~cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)))
+
+#define SetRetry(pbuf) \
+ (*(unsigned short *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_RETRY))
+
+#define SetPwrMgt(pbuf) \
+ (*(unsigned short *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_PM))
+
+#define SetMData(pbuf) \
+ (*(unsigned short *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_MOREDATA))
+
+#define SetPrivacy(pbuf) \
+ (*(unsigned short *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_PROTECTED))
+
+#define SetFrameType(pbuf, type) \
+ do { \
+ *(unsigned short *)(pbuf) &= __constant_cpu_to_le16(~(BIT(3) | BIT(2))); \
+ *(unsigned short *)(pbuf) |= __constant_cpu_to_le16(type); \
+ } while (0)
+
+#define SetFrameSubType(pbuf, type) \
+ do { \
+ *(unsigned short *)(pbuf) &= cpu_to_le16(~(BIT(7) | BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2))); \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(type); \
+ } while (0)
+
+#define GetTupleCache(pbuf) (cpu_to_le16(*(unsigned short *)((unsigned long)(pbuf) + 22)))
+
+#define SetFragNum(pbuf, num) \
+ do { \
+ *(unsigned short *)((unsigned long)(pbuf) + 22) = \
+ ((*(unsigned short *)((unsigned long)(pbuf) + 22)) & le16_to_cpu(~(0x000f))) | \
+ cpu_to_le16(0x0f & (num)); \
+ } while (0)
+
+#define SetSeqNum(pbuf, num) \
+ do { \
+ *(unsigned short *)((unsigned long)(pbuf) + 22) = \
+ ((*(unsigned short *)((unsigned long)(pbuf) + 22)) & le16_to_cpu((unsigned short)0x000f)) | \
+ le16_to_cpu((unsigned short)(0xfff0 & (num << 4))); \
+ } while (0)
+
+#define SetDuration(pbuf, dur) \
+ (*(unsigned short *)((unsigned long)(pbuf) + 2) = \
+ cpu_to_le16(0xffff & (dur)))
+
+#define SetPriority(pbuf, tid) \
+ (*(unsigned short *)(pbuf) |= cpu_to_le16(tid & 0xf))
+
+#define SetEOSP(pbuf, eosp) \
+ (*(unsigned short *)(pbuf) |= cpu_to_le16((eosp & 1) << 4))
+
+#define SetAckpolicy(pbuf, ack) \
+ (*(unsigned short *)(pbuf) |= cpu_to_le16((ack & 3) << 5))
+
+#define SetAMsdu(pbuf, amsdu) \
+ (*(unsigned short *)(pbuf) |= cpu_to_le16((amsdu & 1) << 7))
+
+#define GetAid(pbuf) \
+ (cpu_to_le16(*(unsigned short *)((unsigned long)(pbuf) + 2)) & \
+ 0x3fff)
+
+#define GetTid(pbuf) \
+ (cpu_to_le16(*(unsigned short *)((unsigned long)(pbuf) + \
+ (((ieee80211_has_tods(pbuf)<<1) | \
+ ieee80211_has_fromds(pbuf)) == 3 ? 30 : 24))) & 0x000f)
+
+static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
+{
+ unsigned char *sa;
+ unsigned int to_fr_ds;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) pframe;
+
+ to_fr_ds = (ieee80211_has_tods(hdr->frame_control) << 1) |
+ ieee80211_has_fromds(hdr->frame_control);
+
+ switch (to_fr_ds) {
+ case 0x00: /* ToDs=0, FromDs=0 */
+ sa = hdr->addr3;
+ break;
+ case 0x01: /* ToDs=0, FromDs=1 */
+ sa = hdr->addr2;
+ break;
+ case 0x02: /* ToDs=1, FromDs=0 */
+ sa = hdr->addr1;
+ break;
+ case 0x03: /* ToDs=1, FromDs=1 */
+ sa = hdr->addr1;
+ break;
+ default:
+ sa = NULL; /* */
+ break;
+ }
+ return sa;
+}
+
+/*-----------------------------------------------------------------------------
+ Below is for the security related definition
+------------------------------------------------------------------------------*/
+#define _RESERVED_FRAME_TYPE_ 0
+#define _SKB_FRAME_TYPE_ 2
+#define _PRE_ALLOCMEM_ 1
+#define _PRE_ALLOCHDR_ 3
+#define _PRE_ALLOCLLCHDR_ 4
+#define _PRE_ALLOCICVHDR_ 5
+#define _PRE_ALLOCMICHDR_ 6
+
+#define _SIFSTIME_ \
+ ((priv->pmib->dot11BssType.net_work_type & WIRELESS_11A) ? 16 : 10)
+#define _ACKCTSLNG_ 14 /* 14 bytes long, including crclng */
+#define _CRCLNG_ 4
+
+#define _ASOCREQ_IE_OFFSET_ 4 /* excluding wlan_hdr */
+#define _ASOCRSP_IE_OFFSET_ 6
+#define _REASOCREQ_IE_OFFSET_ 10
+#define _REASOCRSP_IE_OFFSET_ 6
+#define _PROBEREQ_IE_OFFSET_ 0
+#define _PROBERSP_IE_OFFSET_ 12
+#define _AUTH_IE_OFFSET_ 6
+#define _DEAUTH_IE_OFFSET_ 0
+#define _BEACON_IE_OFFSET_ 12
+#define _PUBLIC_ACTION_IE_OFFSET_ 8
+
+#define _FIXED_IE_LENGTH_ _BEACON_IE_OFFSET_
+
+#define _SSID_IE_ 0
+#define _SUPPORTEDRATES_IE_ 1
+#define _DSSET_IE_ 3
+#define _TIM_IE_ 5
+#define _IBSS_PARA_IE_ 6
+#define _COUNTRY_IE_ 7
+#define _CHLGETXT_IE_ 16
+#define _SUPPORTED_CH_IE_ 36
+#define _CH_SWTICH_ANNOUNCE_ 37 /* Secondary Channel Offset */
+#define _RSN_IE_2_ 48
+#define _SSN_IE_1_ 221
+#define _ERPINFO_IE_ 42
+#define _EXT_SUPPORTEDRATES_IE_ 50
+
+#define _HT_CAPABILITY_IE_ 45
+#define _FTIE_ 55
+#define _TIMEOUT_ITVL_IE_ 56
+#define _SRC_IE_ 59
+#define _HT_EXTRA_INFO_IE_ 61
+#define _HT_ADD_INFO_IE_ 61 /* _HT_EXTRA_INFO_IE_ */
+
+
+#define EID_BSSCoexistence 72 /* 20/40 BSS Coexistence */
+#define EID_BSSIntolerantChlReport 73
+#define _RIC_Descriptor_IE_ 75
+
+#define _LINK_ID_IE_ 101
+#define _CH_SWITCH_TIMING_ 104
+#define _PTI_BUFFER_STATUS_ 106
+#define _EXT_CAP_IE_ 127
+#define _VENDOR_SPECIFIC_IE_ 221
+
+#define _RESERVED47_ 47
+
+/* ---------------------------------------------------------------------------
+ Below is the fixed elements...
+-----------------------------------------------------------------------------*/
+#define _AUTH_ALGM_NUM_ 2
+#define _AUTH_SEQ_NUM_ 2
+#define _BEACON_ITERVAL_ 2
+#define _CAPABILITY_ 2
+#define _CURRENT_APADDR_ 6
+#define _LISTEN_INTERVAL_ 2
+#define _ASOC_ID_ 2
+#define _STATUS_CODE_ 2
+#define _TIMESTAMP_ 8
+
+#define AUTH_ODD_TO 0
+#define AUTH_EVEN_TO 1
+
+#define WLAN_ETHCONV_ENCAP 1
+#define WLAN_ETHCONV_RFC1042 2
+#define WLAN_ETHCONV_8021h 3
+
+#define cap_ESS BIT(0)
+#define cap_IBSS BIT(1)
+#define cap_CFPollable BIT(2)
+#define cap_CFRequest BIT(3)
+#define cap_Privacy BIT(4)
+#define cap_ShortPremble BIT(5)
+#define cap_PBCC BIT(6)
+#define cap_ChAgility BIT(7)
+#define cap_SpecMgmt BIT(8)
+#define cap_QoS BIT(9)
+#define cap_ShortSlot BIT(10)
+
+/*-----------------------------------------------------------------------------
+ Below is the definition for 802.11i / 802.1x
+------------------------------------------------------------------------------*/
+#define _IEEE8021X_MGT_ 1 /* WPA */
+#define _IEEE8021X_PSK_ 2 /* WPA with pre-shared key */
+
+/*
+#define _NO_PRIVACY_ 0
+#define _WEP_40_PRIVACY_ 1
+#define _TKIP_PRIVACY_ 2
+#define _WRAP_PRIVACY_ 3
+#define _CCMP_PRIVACY_ 4
+#define _WEP_104_PRIVACY_ 5
+#define _WEP_WPA_MIXED_PRIVACY_ 6 WEP + WPA
+*/
+
+/*-----------------------------------------------------------------------------
+ Below is the definition for WMM
+------------------------------------------------------------------------------*/
+#define _WMM_IE_Length_ 7 /* for WMM STA */
+#define _WMM_Para_Element_Length_ 24
+
+
+/*-----------------------------------------------------------------------------
+ Below is the definition for 802.11n
+------------------------------------------------------------------------------*/
+
+#define SetOrderBit(pbuf) \
+ (*(unsigned short *)(pbuf) |= cpu_to_le16(_ORDER_))
+
+#define GetOrderBit(pbuf) \
+ (((*(unsigned short *)(pbuf)) & le16_to_cpu(_ORDER_)) != 0)
+
+
+/* struct rtw_ieee80211_ht_cap - HT additional information
+ *
+ * This structure refers to "HT information element" as
+ * described in 802.11n draft section 7.3.2.53
+ */
+struct ieee80211_ht_addt_info {
+ unsigned char control_chan;
+ unsigned char ht_param;
+ unsigned short operation_mode;
+ unsigned short stbc_param;
+ unsigned char basic_set[16];
+} __packed;
+
+struct HT_caps_element {
+ union {
+ struct {
+ unsigned short HT_caps_info;
+ unsigned char AMPDU_para;
+ unsigned char MCS_rate[16];
+ unsigned short HT_ext_caps;
+ unsigned int Beamforming_caps;
+ unsigned char ASEL_caps;
+ } HT_cap_element;
+ unsigned char HT_cap[26];
+ } u;
+} __packed;
+
+struct HT_info_element {
+ unsigned char primary_channel;
+ unsigned char infos[5];
+ unsigned char MCS_rate[16];
+} __packed;
+
+struct AC_param {
+ unsigned char ACI_AIFSN;
+ unsigned char CW;
+ unsigned short TXOP_limit;
+} __packed;
+
+struct WMM_para_element {
+ unsigned char QoS_info;
+ unsigned char reserved;
+ struct AC_param ac_param[4];
+} __packed;
+
+struct ADDBA_request {
+ unsigned char dialog_token;
+ unsigned short BA_para_set;
+ unsigned short BA_timeout_value;
+ unsigned short BA_starting_seqctrl;
+} __packed;
+
+
+#define OP_MODE_PURE 0
+#define OP_MODE_MAY_BE_LEGACY_STAS 1
+#define OP_MODE_20MHZ_HT_STA_ASSOCED 2
+#define OP_MODE_MIXED 3
+
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_OFF_MASK ((u8) BIT(0) | BIT(1))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_ABOVE ((u8) BIT(0))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_BELOW ((u8) BIT(0) | BIT(1))
+#define HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH ((u8) BIT(2))
+#define HT_INFO_HT_PARAM_RIFS_MODE ((u8) BIT(3))
+#define HT_INFO_HT_PARAM_CTRL_ACCESS_ONLY ((u8) BIT(4))
+#define HT_INFO_HT_PARAM_SRV_INTERVAL_GRANULARITY ((u8) BIT(5))
+
+#define HT_INFO_OPERATION_MODE_OP_MODE_MASK \
+ ((u16) (0x0001 | 0x0002))
+#define HT_INFO_OPERATION_MODE_OP_MODE_OFFSET 0
+#define HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT ((u8) BIT(2))
+#define HT_INFO_OPERATION_MODE_TRANSMIT_BURST_LIMIT ((u8) BIT(3))
+#define HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT ((u8) BIT(4))
+
+#define HT_INFO_STBC_PARAM_DUAL_BEACON ((u16) BIT(6))
+#define HT_INFO_STBC_PARAM_DUAL_STBC_PROTECT ((u16) BIT(7))
+#define HT_INFO_STBC_PARAM_SECONDARY_BCN ((u16) BIT(8))
+#define HT_INFO_STBC_PARAM_LSIG_TXOP_PROTECT_ALLOWED ((u16) BIT(9))
+#define HT_INFO_STBC_PARAM_PCO_ACTIVE ((u16) BIT(10))
+#define HT_INFO_STBC_PARAM_PCO_PHASE ((u16) BIT(11))
+
+
+
+/* ===============WPS Section=============== */
+/* For WPSv1.0 */
+#define WPSOUI 0x0050f204
+/* WPS attribute ID */
+#define WPS_ATTR_VER1 0x104A
+#define WPS_ATTR_SIMPLE_CONF_STATE 0x1044
+#define WPS_ATTR_RESP_TYPE 0x103B
+#define WPS_ATTR_UUID_E 0x1047
+#define WPS_ATTR_MANUFACTURER 0x1021
+#define WPS_ATTR_MODEL_NAME 0x1023
+#define WPS_ATTR_MODEL_NUMBER 0x1024
+#define WPS_ATTR_SERIAL_NUMBER 0x1042
+#define WPS_ATTR_PRIMARY_DEV_TYPE 0x1054
+#define WPS_ATTR_SEC_DEV_TYPE_LIST 0x1055
+#define WPS_ATTR_DEVICE_NAME 0x1011
+#define WPS_ATTR_CONF_METHOD 0x1008
+#define WPS_ATTR_RF_BANDS 0x103C
+#define WPS_ATTR_DEVICE_PWID 0x1012
+#define WPS_ATTR_REQUEST_TYPE 0x103A
+#define WPS_ATTR_ASSOCIATION_STATE 0x1002
+#define WPS_ATTR_CONFIG_ERROR 0x1009
+#define WPS_ATTR_VENDOR_EXT 0x1049
+#define WPS_ATTR_SELECTED_REGISTRAR 0x1041
+
+/* Value of WPS attribute "WPS_ATTR_DEVICE_NAME */
+#define WPS_MAX_DEVICE_NAME_LEN 32
+
+/* Value of WPS Request Type Attribute */
+#define WPS_REQ_TYPE_ENROLLEE_INFO_ONLY 0x00
+#define WPS_REQ_TYPE_ENROLLEE_OPEN_8021X 0x01
+#define WPS_REQ_TYPE_REGISTRAR 0x02
+#define WPS_REQ_TYPE_WLAN_MANAGER_REGISTRAR 0x03
+
+/* Value of WPS Response Type Attribute */
+#define WPS_RESPONSE_TYPE_INFO_ONLY 0x00
+#define WPS_RESPONSE_TYPE_8021X 0x01
+#define WPS_RESPONSE_TYPE_REGISTRAR 0x02
+#define WPS_RESPONSE_TYPE_AP 0x03
+
+/* Value of WPS WiFi Simple Configuration State Attribute */
+#define WPS_WSC_STATE_NOT_CONFIG 0x01
+#define WPS_WSC_STATE_CONFIG 0x02
+
+/* Value of WPS Version Attribute */
+#define WPS_VERSION_1 0x10
+
+/* Value of WPS Configuration Method Attribute */
+#define WPS_CONFIG_METHOD_FLASH 0x0001
+#define WPS_CONFIG_METHOD_ETHERNET 0x0002
+#define WPS_CONFIG_METHOD_LABEL 0x0004
+#define WPS_CONFIG_METHOD_DISPLAY 0x0008
+#define WPS_CONFIG_METHOD_E_NFC 0x0010
+#define WPS_CONFIG_METHOD_I_NFC 0x0020
+#define WPS_CONFIG_METHOD_NFC 0x0040
+#define WPS_CONFIG_METHOD_PBC 0x0080
+#define WPS_CONFIG_METHOD_KEYPAD 0x0100
+#define WPS_CONFIG_METHOD_VPBC 0x0280
+#define WPS_CONFIG_METHOD_PPBC 0x0480
+#define WPS_CONFIG_METHOD_VDISPLAY 0x2008
+#define WPS_CONFIG_METHOD_PDISPLAY 0x4008
+
+/* Value of Category ID of WPS Primary Device Type Attribute */
+#define WPS_PDT_CID_DISPLAYS 0x0007
+#define WPS_PDT_CID_MULIT_MEDIA 0x0008
+#define WPS_PDT_CID_RTK_WIDI WPS_PDT_CID_MULIT_MEDIA
+
+/* Value of Sub Category ID of WPS Primary Device Type Attribute */
+#define WPS_PDT_SCID_MEDIA_SERVER 0x0005
+#define WPS_PDT_SCID_RTK_DMP WPS_PDT_SCID_MEDIA_SERVER
+
+/* Value of Device Password ID */
+#define WPS_DPID_PIN 0x0000
+#define WPS_DPID_USER_SPEC 0x0001
+#define WPS_DPID_MACHINE_SPEC 0x0002
+#define WPS_DPID_REKEY 0x0003
+#define WPS_DPID_PBC 0x0004
+#define WPS_DPID_REGISTRAR_SPEC 0x0005
+
+/* Value of WPS RF Bands Attribute */
+#define WPS_RF_BANDS_2_4_GHZ 0x01
+#define WPS_RF_BANDS_5_GHZ 0x02
+
+/* Value of WPS Association State Attribute */
+#define WPS_ASSOC_STATE_NOT_ASSOCIATED 0x00
+#define WPS_ASSOC_STATE_CONNECTION_SUCCESS 0x01
+#define WPS_ASSOC_STATE_CONFIGURATION_FAILURE 0x02
+#define WPS_ASSOC_STATE_ASSOCIATION_FAILURE 0x03
+#define WPS_ASSOC_STATE_IP_FAILURE 0x04
+
+/* =====================P2P Section===================== */
+/* For P2P */
+#define P2POUI 0x506F9A09
+
+/* P2P Attribute ID */
+#define P2P_ATTR_STATUS 0x00
+#define P2P_ATTR_MINOR_REASON_CODE 0x01
+#define P2P_ATTR_CAPABILITY 0x02
+#define P2P_ATTR_DEVICE_ID 0x03
+#define P2P_ATTR_GO_INTENT 0x04
+#define P2P_ATTR_CONF_TIMEOUT 0x05
+#define P2P_ATTR_LISTEN_CH 0x06
+#define P2P_ATTR_GROUP_BSSID 0x07
+#define P2P_ATTR_EX_LISTEN_TIMING 0x08
+#define P2P_ATTR_INTENTED_IF_ADDR 0x09
+#define P2P_ATTR_MANAGEABILITY 0x0A
+#define P2P_ATTR_CH_LIST 0x0B
+#define P2P_ATTR_NOA 0x0C
+#define P2P_ATTR_DEVICE_INFO 0x0D
+#define P2P_ATTR_GROUP_INFO 0x0E
+#define P2P_ATTR_GROUP_ID 0x0F
+#define P2P_ATTR_INTERFACE 0x10
+#define P2P_ATTR_OPERATING_CH 0x11
+#define P2P_ATTR_INVITATION_FLAGS 0x12
+
+/* Value of Status Attribute */
+#define P2P_STATUS_SUCCESS 0x00
+#define P2P_STATUS_FAIL_INFO_UNAVAILABLE 0x01
+#define P2P_STATUS_FAIL_INCOMPATIBLE_PARAM 0x02
+#define P2P_STATUS_FAIL_LIMIT_REACHED 0x03
+#define P2P_STATUS_FAIL_INVALID_PARAM 0x04
+#define P2P_STATUS_FAIL_REQUEST_UNABLE 0x05
+#define P2P_STATUS_FAIL_PREVOUS_PROTO_ERR 0x06
+#define P2P_STATUS_FAIL_NO_COMMON_CH 0x07
+#define P2P_STATUS_FAIL_UNKNOWN_P2PGROUP 0x08
+#define P2P_STATUS_FAIL_BOTH_GOINTENT_15 0x09
+#define P2P_STATUS_FAIL_INCOMPATIBLE_PROVSION 0x0A
+#define P2P_STATUS_FAIL_USER_REJECT 0x0B
+
+/* Value of Inviation Flags Attribute */
+#define P2P_INVITATION_FLAGS_PERSISTENT BIT(0)
+
+#define DMP_P2P_DEVCAP_SUPPORT (P2P_DEVCAP_SERVICE_DISCOVERY | \
+ P2P_DEVCAP_CLIENT_DISCOVERABILITY | \
+ P2P_DEVCAP_CONCURRENT_OPERATION | \
+ P2P_DEVCAP_INVITATION_PROC)
+
+#define DMP_P2P_GRPCAP_SUPPORT (P2P_GRPCAP_INTRABSS)
+
+/* Value of Device Capability Bitmap */
+#define P2P_DEVCAP_SERVICE_DISCOVERY BIT(0)
+#define P2P_DEVCAP_CLIENT_DISCOVERABILITY BIT(1)
+#define P2P_DEVCAP_CONCURRENT_OPERATION BIT(2)
+#define P2P_DEVCAP_INFRA_MANAGED BIT(3)
+#define P2P_DEVCAP_DEVICE_LIMIT BIT(4)
+#define P2P_DEVCAP_INVITATION_PROC BIT(5)
+
+/* Value of Group Capability Bitmap */
+#define P2P_GRPCAP_GO BIT(0)
+#define P2P_GRPCAP_PERSISTENT_GROUP BIT(1)
+#define P2P_GRPCAP_GROUP_LIMIT BIT(2)
+#define P2P_GRPCAP_INTRABSS BIT(3)
+#define P2P_GRPCAP_CROSS_CONN BIT(4)
+#define P2P_GRPCAP_PERSISTENT_RECONN BIT(5)
+#define P2P_GRPCAP_GROUP_FORMATION BIT(6)
+
+/* P2P Public Action Frame ( Management Frame ) */
+#define P2P_PUB_ACTION_ACTION 0x09
+
+/* P2P Public Action Frame Type */
+#define P2P_GO_NEGO_REQ 0
+#define P2P_GO_NEGO_RESP 1
+#define P2P_GO_NEGO_CONF 2
+#define P2P_INVIT_REQ 3
+#define P2P_INVIT_RESP 4
+#define P2P_DEVDISC_REQ 5
+#define P2P_DEVDISC_RESP 6
+#define P2P_PROVISION_DISC_REQ 7
+#define P2P_PROVISION_DISC_RESP 8
+
+/* P2P Action Frame Type */
+#define P2P_NOTICE_OF_ABSENCE 0
+#define P2P_PRESENCE_REQUEST 1
+#define P2P_PRESENCE_RESPONSE 2
+#define P2P_GO_DISC_REQUEST 3
+
+
+#define P2P_MAX_PERSISTENT_GROUP_NUM 10
+
+#define P2P_PROVISIONING_SCAN_CNT 3
+
+#define P2P_WILDCARD_SSID_LEN 7
+
+#define P2P_FINDPHASE_EX_NONE 0 /* default value, used when: (1)p2p disabed or (2)p2p enabled but only do 1 scan phase */
+#define P2P_FINDPHASE_EX_FULL 1 /* used when p2p enabled and want to do 1 scan phase and P2P_FINDPHASE_EX_MAX-1 find phase */
+#define P2P_FINDPHASE_EX_SOCIAL_FIRST (P2P_FINDPHASE_EX_FULL+1)
+#define P2P_FINDPHASE_EX_MAX 4
+#define P2P_FINDPHASE_EX_SOCIAL_LAST P2P_FINDPHASE_EX_MAX
+
+#define P2P_PROVISION_TIMEOUT 5000 /*5 sec timeout for sending the provision discovery request */
+#define P2P_CONCURRENT_PROVISION_TIMEOUT 3000 /*3 sec timeout for sending the provision discovery request under concurrent mode */
+#define P2P_GO_NEGO_TIMEOUT 5000 /*5 sec timeout for receiving the group negotation response */
+#define P2P_CONCURRENT_GO_NEGO_TIMEOUT 3000 /*3 sec timeout for sending the negotiation request under concurrent mode */
+#define P2P_TX_PRESCAN_TIMEOUT 100 /*100ms */
+#define P2P_INVITE_TIMEOUT 5000 /*5 sec timeout for sending the invitation request */
+#define P2P_CONCURRENT_INVITE_TIMEOUT 3000 /*3 sec timeout for sending the invitation request under concurrent mode */
+#define P2P_RESET_SCAN_CH 25000 /*25 sec t/o to reset the scan channel ( based on channel plan ) */
+#define P2P_MAX_INTENT 15
+
+#define P2P_MAX_NOA_NUM 2
+
+/* WPS Configuration Method */
+#define WPS_CM_NONE 0x0000
+#define WPS_CM_LABEL 0x0004
+#define WPS_CM_DISPLYA 0x0008
+#define WPS_CM_EXTERNAL_NFC_TOKEN 0x0010
+#define WPS_CM_INTEGRATED_NFC_TOKEN 0x0020
+#define WPS_CM_NFC_INTERFACE 0x0040
+#define WPS_CM_PUSH_BUTTON 0x0080
+#define WPS_CM_KEYPAD 0x0100
+#define WPS_CM_SW_PUHS_BUTTON 0x0280
+#define WPS_CM_HW_PUHS_BUTTON 0x0480
+#define WPS_CM_SW_DISPLAY_PIN 0x2008
+#define WPS_CM_LCD_DISPLAY_PIN 0x4008
+
+enum P2P_ROLE {
+ P2P_ROLE_DISABLE = 0,
+ P2P_ROLE_DEVICE = 1,
+ P2P_ROLE_CLIENT = 2,
+ P2P_ROLE_GO = 3
+};
+
+enum P2P_STATE {
+ P2P_STATE_NONE = 0, /*P2P disable */
+ P2P_STATE_IDLE = 1, /*P2P had enabled and do nothing */
+ P2P_STATE_LISTEN = 2, /*In pure listen state */
+ P2P_STATE_SCAN = 3, /*In scan phase */
+ P2P_STATE_FIND_PHASE_LISTEN = 4, /*In the listen state of find phase */
+ P2P_STATE_FIND_PHASE_SEARCH = 5, /*In the search state of find phase */
+ P2P_STATE_TX_PROVISION_DIS_REQ = 6, /*In P2P provisioning discovery */
+ P2P_STATE_RX_PROVISION_DIS_RSP = 7,
+ P2P_STATE_RX_PROVISION_DIS_REQ = 8,
+ P2P_STATE_GONEGO_ING = 9, /*Doing the group owner negoitation handshake */
+ P2P_STATE_GONEGO_OK = 10, /*finish the group negoitation handshake with success */
+ P2P_STATE_GONEGO_FAIL = 11, /*finish the group negoitation handshake with failure */
+ P2P_STATE_RECV_INVITE_REQ_MATCH = 12, /*receiving the P2P Inviation request and match with the profile. */
+ P2P_STATE_PROVISIONING_ING = 13, /*Doing the P2P WPS */
+ P2P_STATE_PROVISIONING_DONE = 14, /*Finish the P2P WPS */
+ P2P_STATE_TX_INVITE_REQ = 15, /*Transmit the P2P Invitation request */
+ P2P_STATE_RX_INVITE_RESP_OK = 16, /*Receiving the P2P Invitation response */
+ P2P_STATE_RECV_INVITE_REQ_DISMATCH = 17,/*receiving the P2P Inviation request and dismatch with the profile. */
+ P2P_STATE_RECV_INVITE_REQ_GO = 18, /*receiving the P2P Inviation request and this wifi is GO. */
+ P2P_STATE_RECV_INVITE_REQ_JOIN = 19, /*receiving the P2P Inviation request to join an existing P2P Group. */
+ P2P_STATE_RX_INVITE_RESP_FAIL = 20, /*receiving the P2P Inviation response with failure */
+ P2P_STATE_RX_INFOR_NOREADY = 21, /*receiving p2p negotiation response with information is not available */
+ P2P_STATE_TX_INFOR_NOREADY = 22, /*sending p2p negotiation response with information is not available */
+};
+
+enum P2P_WPSINFO {
+ P2P_NO_WPSINFO = 0,
+ P2P_GOT_WPSINFO_PEER_DISPLAY_PIN = 1,
+ P2P_GOT_WPSINFO_SELF_DISPLAY_PIN = 2,
+ P2P_GOT_WPSINFO_PBC = 3,
+};
+
+#define P2P_PRIVATE_IOCTL_SET_LEN 64
+
+enum P2P_PROTO_WK_ID {
+ P2P_FIND_PHASE_WK = 0,
+ P2P_RESTORE_STATE_WK = 1,
+ P2P_PRE_TX_PROVDISC_PROCESS_WK = 2,
+ P2P_PRE_TX_NEGOREQ_PROCESS_WK = 3,
+ P2P_PRE_TX_INVITEREQ_PROCESS_WK = 4,
+ P2P_AP_P2P_CH_SWITCH_PROCESS_WK = 5,
+ P2P_RO_CH_WK = 6,
+};
+
+#ifdef CONFIG_8723AU_P2P
+enum P2P_PS_STATE {
+ P2P_PS_DISABLE = 0,
+ P2P_PS_ENABLE = 1,
+ P2P_PS_SCAN = 2,
+ P2P_PS_SCAN_DONE = 3,
+ P2P_PS_ALLSTASLEEP = 4, /* for P2P GO */
+};
+
+enum P2P_PS_MODE {
+ P2P_PS_NONE = 0,
+ P2P_PS_CTWINDOW = 1,
+ P2P_PS_NOA = 2,
+ P2P_PS_MIX = 3, /* CTWindow and NoA */
+};
+#endif /* CONFIG_8723AU_P2P */
+
+/* =====================WFD Section===================== */
+/* For Wi-Fi Display */
+#define WFD_ATTR_DEVICE_INFO 0x00
+#define WFD_ATTR_ASSOC_BSSID 0x01
+#define WFD_ATTR_COUPLED_SINK_INFO 0x06
+#define WFD_ATTR_LOCAL_IP_ADDR 0x08
+#define WFD_ATTR_SESSION_INFO 0x09
+#define WFD_ATTR_ALTER_MAC 0x0a
+
+/* For WFD Device Information Attribute */
+#define WFD_DEVINFO_SOURCE 0x0000
+#define WFD_DEVINFO_PSINK 0x0001
+#define WFD_DEVINFO_SSINK 0x0002
+#define WFD_DEVINFO_DUAL 0x0003
+
+#define WFD_DEVINFO_SESSION_AVAIL 0x0010
+#define WFD_DEVINFO_WSD 0x0040
+#define WFD_DEVINFO_PC_TDLS 0x0080
+#define WFD_DEVINFO_HDCP_SUPPORT 0x0100
+
+#endif /* _WIFI_H_ */
diff --git a/drivers/staging/rtl8723au/include/wlan_bssdef.h b/drivers/staging/rtl8723au/include/wlan_bssdef.h
new file mode 100644
index 000000000000..92287ebe5b9b
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/wlan_bssdef.h
@@ -0,0 +1,215 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __WLAN_BSSDEF_H__
+#define __WLAN_BSSDEF_H__
+
+
+#define MAX_IE_SZ 768
+
+
+#define NDIS_802_11_LENGTH_RATES 8
+#define NDIS_802_11_LENGTH_RATES_EX 16
+
+enum ndis_802_11_net_type {
+ Ndis802_11FH,
+ Ndis802_11DS,
+ Ndis802_11OFDM5,
+ Ndis802_11OFDM24,
+ Ndis802_11NetworkTypeMax /* just an upper bound */
+};
+
+struct ndis_802_11_configuration_fh {
+ u32 Length; /* Length of structure */
+ u32 HopPattern; /* As defined by 802.11, MSB set */
+ u32 HopSet; /* to one if non-802.11 */
+ u32 DwellTime; /* units are Kusec */
+};
+
+
+/*
+ FW will only save the channel number in DSConfig.
+ ODI Handler will convert the channel number to freq. number.
+*/
+struct ndis_802_11_config {
+ u32 Length; /* Length of structure */
+ u32 BeaconPeriod; /* units are Kusec */
+ u32 ATIMWindow; /* units are Kusec */
+ u32 DSConfig; /* Frequency, units are kHz */
+ struct ndis_802_11_configuration_fh FHConfig;
+};
+
+enum ndis_802_11_net_infra {
+ Ndis802_11IBSS,
+ Ndis802_11Infrastructure,
+ Ndis802_11AutoUnknown,
+ Ndis802_11InfrastructureMax, /* Not a real value, defined as upper bound */
+ Ndis802_11APMode
+};
+
+struct ndis_802_11_fixed_ies {
+ u8 Timestamp[8];
+ u16 BeaconInterval;
+ u16 Capabilities;
+};
+
+struct ndis_802_11_var_ies {
+ u8 ElementID;
+ u8 Length;
+ u8 data[1];
+};
+
+/* Length is the 4 bytes multiples of the sum of
+ * sizeof(6 * sizeof(unsigned char)) + 2 + sizeof(struct ndis_802_11_ssid) +
+ * sizeof(u32) + sizeof(long) + sizeof(enum ndis_802_11_net_type) +
+ * sizeof(struct ndis_802_11_config) + sizeof(sizeof(unsigned char) *
+ * NDIS_802_11_LENGTH_RATES_EX) + IELength
+ *
+ * Except the IELength, all other fields are fixed length. Therefore,
+ * we can define a macro to present the partial sum.
+ */
+
+enum ndis_802_11_auth_mode {
+ Ndis802_11AuthModeOpen,
+ Ndis802_11AuthModeShared,
+ Ndis802_11AuthModeAutoSwitch,
+ Ndis802_11AuthModeWPA,
+ Ndis802_11AuthModeWPAPSK,
+ Ndis802_11AuthModeWPANone,
+ dis802_11AuthModeMax /* upper bound */
+};
+
+enum {
+ Ndis802_11WEPEnabled,
+ Ndis802_11Encryption1Enabled = Ndis802_11WEPEnabled,
+ Ndis802_11WEPDisabled,
+ Ndis802_11EncryptionDisabled = Ndis802_11WEPDisabled,
+ Ndis802_11WEPKeyAbsent,
+ Ndis802_11Encryption1KeyAbsent = Ndis802_11WEPKeyAbsent,
+ Ndis802_11WEPNotSupported,
+ Ndis802_11EncryptionNotSupported = Ndis802_11WEPNotSupported,
+ Ndis802_11Encryption2Enabled,
+ Ndis802_11Encryption2KeyAbsent,
+ Ndis802_11Encryption3Enabled,
+ Ndis802_11Encryption3KeyAbsent,
+};
+
+/* Key mapping keys require a BSSID */
+struct ndis_802_11_key {
+ u32 Length; /* Length of this structure */
+ u32 KeyIndex;
+ u32 KeyLength; /* length of key in bytes */
+ unsigned char BSSID[6];
+ unsigned long long KeyRSC;
+ u8 KeyMaterial[32]; /* variable length depending on above field */
+};
+
+struct ndis_802_11_wep {
+ u32 Length; /* Length of this structure */
+ u32 KeyIndex; /* 0 is the per-client key, 1-N are global */
+ u32 KeyLength; /* length of key in bytes */
+ u8 KeyMaterial[16];/* variable length depending on above field */
+};
+
+enum NDIS_802_11_STATUS_TYPE {
+ Ndis802_11StatusType_Authentication,
+ Ndis802_11StatusType_MediaStreamMode,
+ Ndis802_11StatusType_PMKID_CandidateList,
+ Ndis802_11StatusTypeMax /* not a real type, just an upper bound */
+};
+
+/* mask for authentication/integrity fields */
+#define NDIS_802_11_AUTH_REQUEST_AUTH_FIELDS 0x0f
+#define NDIS_802_11_AUTH_REQUEST_REAUTH 0x01
+#define NDIS_802_11_AUTH_REQUEST_KEYUPDATE 0x02
+#define NDIS_802_11_AUTH_REQUEST_PAIRWISE_ERROR 0x06
+#define NDIS_802_11_AUTH_REQUEST_GROUP_ERROR 0x0E
+
+/* MIC check time, 60 seconds. */
+#define MIC_CHECK_TIME 60000000
+
+#ifndef Ndis802_11APMode
+#define Ndis802_11APMode (Ndis802_11InfrastructureMax+1)
+#endif
+
+struct wlan_phy_info {
+ u8 SignalStrength;/* in percentage) */
+ u8 SignalQuality;/* in percentage) */
+ u8 Optimum_antenna; /* for Antenna diversity */
+ u8 Reserved_0;
+};
+
+struct wlan_bcn_info {
+ /* these infor get from rtw_get_encrypt_info when
+ * * translate scan to UI */
+ u8 encryp_protocol;/* ENCRYP_PROTOCOL_E: OPEN/WEP/WPA/WPA2 */
+ int group_cipher; /* WPA/WPA2 group cipher */
+ int pairwise_cipher;/* WPA/WPA2/WEP pairwise cipher */
+ int is_8021x;
+
+ /* bwmode 20/40 and ch_offset UP/LOW */
+ unsigned short ht_cap_info;
+ unsigned char ht_info_infos_0;
+};
+
+struct wlan_bssid_ex {
+ u32 Length;
+ u8 MacAddress[ETH_ALEN];
+ u16 reserved;
+ struct cfg80211_ssid Ssid;
+ u32 Privacy;
+ long Rssi;/* in dBM, raw data , get from PHY) */
+ enum ndis_802_11_net_type NetworkTypeInUse;
+ struct ndis_802_11_config Configuration;
+ enum ndis_802_11_net_infra InfrastructureMode;
+ unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
+ struct wlan_phy_info PhyInfo;
+ u32 IELength;
+ u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and capability info*/
+} __packed;
+
+static inline uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
+{
+ return sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + bss->IELength;
+}
+
+struct wlan_network {
+ struct list_head list;
+ int network_type; /* refer to ieee80211.h for 11A/B/G */
+ /* set to fixed when not to be removed as site-surveying */
+ int fixed;
+ unsigned long last_scanned; /* timestamp for the network */
+ int aid; /* will only be valid when a BSS is joined. */
+ int join_res;
+ struct wlan_bssid_ex network; /* must be the last item */
+ struct wlan_bcn_info BcnInfo;
+};
+
+enum VRTL_CARRIER_SENSE {
+ DISABLE_VCS,
+ ENABLE_VCS,
+ AUTO_VCS
+};
+
+enum VCS_TYPE {
+ NONE_VCS,
+ RTS_CTS,
+ CTS_TO_SELF
+};
+
+/* john */
+#define NUM_PRE_AUTH_KEY 16
+#define NUM_PMKID_CACHE NUM_PRE_AUTH_KEY
+
+#endif /* ifndef WLAN_BSSDEF_H_ */
diff --git a/drivers/staging/rtl8723au/include/xmit_osdep.h b/drivers/staging/rtl8723au/include/xmit_osdep.h
new file mode 100644
index 000000000000..0eca53ece75d
--- /dev/null
+++ b/drivers/staging/rtl8723au/include/xmit_osdep.h
@@ -0,0 +1,57 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __XMIT_OSDEP_H_
+#define __XMIT_OSDEP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+struct pkt_file {
+ struct sk_buff *pkt;
+ __kernel_size_t pkt_len; /* the remainder length of the open_file */
+ unsigned char *cur_buffer;
+ u8 *buf_start;
+ u8 *cur_addr;
+ __kernel_size_t buf_len;
+};
+
+
+#define NR_XMITFRAME 256
+
+struct xmit_priv;
+struct pkt_attrib;
+struct sta_xmit_priv;
+struct xmit_frame;
+struct xmit_buf;
+
+int rtw_xmit23a_entry23a(struct sk_buff *pkt, struct net_device *pnetdev);
+
+void rtw_os_xmit_schedule23a(struct rtw_adapter *padapter);
+
+int rtw_os_xmit_resource_alloc23a(struct rtw_adapter *padapter,
+ struct xmit_buf *pxmitbuf, u32 alloc_sz);
+void rtw_os_xmit_resource_free23a(struct rtw_adapter *padapter,
+ struct xmit_buf *pxmitbuf);
+uint rtw_remainder_len23a(struct pkt_file *pfile);
+void _rtw_open_pktfile23a(struct sk_buff *pkt, struct pkt_file *pfile);
+uint _rtw_pktfile_read23a(struct pkt_file *pfile, u8 *rmem, uint rlen);
+int rtw_endofpktfile23a(struct pkt_file *pfile);
+
+void rtw_os_pkt_complete23a(struct rtw_adapter *padapter, struct sk_buff *pkt);
+void rtw_os_xmit_complete23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxframe);
+int netdev_open23a(struct net_device *pnetdev);
+
+#endif /* __XMIT_OSDEP_H_ */
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
new file mode 100644
index 000000000000..50840b9a11fa
--- /dev/null
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -0,0 +1,4532 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _IOCTL_CFG80211_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_ioctl_set.h>
+#include <xmit_osdep.h>
+
+#include "ioctl_cfg80211.h"
+#include <linux/version.h>
+
+#define RTW_MAX_MGMT_TX_CNT 8
+
+#define RTW_MAX_REMAIN_ON_CHANNEL_DURATION 65535 /* ms */
+#define RTW_MAX_NUM_PMKIDS 4
+
+#define RTW_CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
+
+static const u32 rtw_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+};
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+}
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_rate rtw_rates[] = {
+ RATETAB_ENT(10, 0x1, 0),
+ RATETAB_ENT(20, 0x2, 0),
+ RATETAB_ENT(55, 0x4, 0),
+ RATETAB_ENT(110, 0x8, 0),
+ RATETAB_ENT(60, 0x10, 0),
+ RATETAB_ENT(90, 0x20, 0),
+ RATETAB_ENT(120, 0x40, 0),
+ RATETAB_ENT(180, 0x80, 0),
+ RATETAB_ENT(240, 0x100, 0),
+ RATETAB_ENT(360, 0x200, 0),
+ RATETAB_ENT(480, 0x400, 0),
+ RATETAB_ENT(540, 0x800, 0),
+};
+
+#define rtw_a_rates (rtw_rates + 4)
+#define RTW_A_RATES_NUM 8
+#define rtw_g_rates (rtw_rates + 0)
+#define RTW_G_RATES_NUM 12
+
+#define RTW_2G_CHANNELS_NUM 14
+#define RTW_5G_CHANNELS_NUM 37
+
+static struct ieee80211_channel rtw_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static struct ieee80211_channel rtw_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0),
+ CHAN5G(184, 0), CHAN5G(188, 0),
+ CHAN5G(192, 0), CHAN5G(196, 0),
+ CHAN5G(200, 0), CHAN5G(204, 0),
+ CHAN5G(208, 0), CHAN5G(212, 0),
+ CHAN5G(216, 0),
+};
+
+static void rtw_2g_channels_init(struct ieee80211_channel *channels)
+{
+ memcpy((void *)channels, (void *)rtw_2ghz_channels,
+ sizeof(struct ieee80211_channel) * RTW_2G_CHANNELS_NUM);
+}
+
+static void rtw_5g_channels_init(struct ieee80211_channel *channels)
+{
+ memcpy((void *)channels, (void *)rtw_5ghz_a_channels,
+ sizeof(struct ieee80211_channel) * RTW_5G_CHANNELS_NUM);
+}
+
+static void rtw_2g_rates_init(struct ieee80211_rate *rates)
+{
+ memcpy(rates, rtw_g_rates,
+ sizeof(struct ieee80211_rate) * RTW_G_RATES_NUM);
+}
+
+static void rtw_5g_rates_init(struct ieee80211_rate *rates)
+{
+ memcpy(rates, rtw_a_rates,
+ sizeof(struct ieee80211_rate) * RTW_A_RATES_NUM);
+}
+
+static struct ieee80211_supported_band *
+rtw_spt_band_alloc(enum ieee80211_band band)
+{
+ struct ieee80211_supported_band *spt_band = NULL;
+ int n_channels, n_bitrates;
+
+ if (band == IEEE80211_BAND_2GHZ) {
+ n_channels = RTW_2G_CHANNELS_NUM;
+ n_bitrates = RTW_G_RATES_NUM;
+ } else if (band == IEEE80211_BAND_5GHZ) {
+ n_channels = RTW_5G_CHANNELS_NUM;
+ n_bitrates = RTW_A_RATES_NUM;
+ } else {
+ goto exit;
+ }
+ spt_band = kzalloc(sizeof(struct ieee80211_supported_band) +
+ sizeof(struct ieee80211_channel) * n_channels +
+ sizeof(struct ieee80211_rate) * n_bitrates,
+ GFP_KERNEL);
+ if (!spt_band)
+ goto exit;
+
+ spt_band->channels =
+ (struct ieee80211_channel *)(((u8 *) spt_band) +
+ sizeof(struct
+ ieee80211_supported_band));
+ spt_band->bitrates =
+ (struct ieee80211_rate *)(((u8 *) spt_band->channels) +
+ sizeof(struct ieee80211_channel) *
+ n_channels);
+ spt_band->band = band;
+ spt_band->n_channels = n_channels;
+ spt_band->n_bitrates = n_bitrates;
+
+ if (band == IEEE80211_BAND_2GHZ) {
+ rtw_2g_channels_init(spt_band->channels);
+ rtw_2g_rates_init(spt_band->bitrates);
+ } else if (band == IEEE80211_BAND_5GHZ) {
+ rtw_5g_channels_init(spt_band->channels);
+ rtw_5g_rates_init(spt_band->bitrates);
+ }
+
+ /* spt_band.ht_cap */
+
+exit:
+ return spt_band;
+}
+
+static const struct ieee80211_txrx_stypes
+rtw_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_ADHOC] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_STATION] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_AP_VLAN] = {
+ /* copy AP */
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+};
+
+#define MAX_BSSINFO_LEN 1000
+static int rtw_cfg80211_inform_bss(struct rtw_adapter *padapter,
+ struct wlan_network *pnetwork)
+{
+ int ret = 0;
+ struct ieee80211_channel *notify_channel;
+ struct cfg80211_bss *bss;
+ /* struct ieee80211_supported_band *band; */
+ u16 channel;
+ u32 freq;
+ u64 notify_timestamp;
+ u16 notify_capability;
+ u16 notify_interval;
+ u8 *notify_ie;
+ size_t notify_ielen;
+ s32 notify_signal;
+ u8 buf[MAX_BSSINFO_LEN], *pbuf;
+ size_t len, bssinf_len = 0;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ u8 bc_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+ struct wireless_dev *wdev = padapter->rtw_wdev;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ /* DBG_8723A("%s\n", __func__); */
+
+ bssinf_len =
+ pnetwork->network.IELength + sizeof(struct ieee80211_hdr_3addr);
+ if (bssinf_len > MAX_BSSINFO_LEN) {
+ DBG_8723A("%s IE Length too long > %d byte\n", __func__,
+ MAX_BSSINFO_LEN);
+ goto exit;
+ }
+
+ channel = pnetwork->network.Configuration.DSConfig;
+ if (channel <= RTW_CH_MAX_2G_CHANNEL)
+ freq = ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_2GHZ);
+ else
+ freq = ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_5GHZ);
+
+ notify_channel = ieee80211_get_channel(wiphy, freq);
+
+ /* rtw_get_timestampe_from_ie23a() */
+ notify_timestamp = jiffies_to_msecs(jiffies) * 1000; /* uSec */
+
+ notify_interval =
+ le16_to_cpu(*(u16 *)
+ rtw_get_beacon_interval23a_from_ie(pnetwork->network.IEs));
+ notify_capability =
+ le16_to_cpu(*(u16 *)
+ rtw_get_capability23a_from_ie(pnetwork->network.IEs));
+
+ notify_ie = pnetwork->network.IEs + _FIXED_IE_LENGTH_;
+ notify_ielen = pnetwork->network.IELength - _FIXED_IE_LENGTH_;
+
+ /* We've set wiphy's signal_type as CFG80211_SIGNAL_TYPE_MBM:
+ * signal strength in mBm (100*dBm)
+ */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) &&
+ is_same_network23a(&pmlmepriv->cur_network.network,
+ &pnetwork->network)) {
+ notify_signal = 100 * translate_percentage_to_dbm(padapter->recvpriv.signal_strength); /* dbm */
+ } else {
+ notify_signal = 100 * translate_percentage_to_dbm(pnetwork->network.PhyInfo.SignalStrength); /* dbm */
+ }
+ pbuf = buf;
+
+ pwlanhdr = (struct ieee80211_hdr *)pbuf;
+ fctrl = &pwlanhdr->frame_control;
+ *(fctrl) = 0;
+
+ SetSeqNum(pwlanhdr, 0);
+
+ if (pnetwork->network.reserved == 1) { /* WIFI_BEACON */
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ SetFrameSubType(pbuf, WIFI_BEACON);
+ } else {
+ memcpy(pwlanhdr->addr1, myid(&padapter->eeprompriv), ETH_ALEN);
+ SetFrameSubType(pbuf, WIFI_PROBERSP);
+ }
+
+ memcpy(pwlanhdr->addr2, pnetwork->network.MacAddress, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pnetwork->network.MacAddress, ETH_ALEN);
+
+ pbuf += sizeof(struct ieee80211_hdr_3addr);
+ len = sizeof(struct ieee80211_hdr_3addr);
+
+ memcpy(pbuf, pnetwork->network.IEs, pnetwork->network.IELength);
+ len += pnetwork->network.IELength;
+
+ bss = cfg80211_inform_bss_frame(wiphy, notify_channel,
+ (struct ieee80211_mgmt *)buf, len,
+ notify_signal, GFP_ATOMIC);
+
+ if (unlikely(!bss)) {
+ DBG_8723A("rtw_cfg80211_inform_bss error\n");
+ return -EINVAL;
+ }
+
+ cfg80211_put_bss(wiphy, bss);
+
+exit:
+ return ret;
+}
+
+void rtw_cfg80211_indicate_connect(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ struct wireless_dev *pwdev = padapter->rtw_wdev;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif
+
+ DBG_8723A("%s(padapter =%p)\n", __func__, padapter);
+
+ if (pwdev->iftype != NL80211_IFTYPE_STATION &&
+ pwdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ return;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ return;
+
+#ifdef CONFIG_8723AU_P2P
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ DBG_8723A("%s, role =%d, p2p_state =%d, pre_p2p_state =%d\n",
+ __func__, rtw_p2p_role(pwdinfo),
+ rtw_p2p_state(pwdinfo), rtw_p2p_pre_state(pwdinfo));
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ if (rtw_to_roaming(padapter) > 0) {
+ struct wiphy *wiphy = pwdev->wiphy;
+ struct ieee80211_channel *notify_channel;
+ u32 freq;
+ u16 channel = cur_network->network.Configuration.DSConfig;
+
+ if (channel <= RTW_CH_MAX_2G_CHANNEL)
+ freq =
+ ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_2GHZ);
+ else
+ freq =
+ ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_5GHZ);
+
+ notify_channel = ieee80211_get_channel(wiphy, freq);
+
+ DBG_8723A("%s call cfg80211_roamed\n", __func__);
+ cfg80211_roamed(padapter->pnetdev, notify_channel,
+ cur_network->network.MacAddress,
+ pmlmepriv->assoc_req +
+ sizeof(struct ieee80211_hdr_3addr) + 2,
+ pmlmepriv->assoc_req_len -
+ sizeof(struct ieee80211_hdr_3addr) - 2,
+ pmlmepriv->assoc_rsp +
+ sizeof(struct ieee80211_hdr_3addr) + 6,
+ pmlmepriv->assoc_rsp_len -
+ sizeof(struct ieee80211_hdr_3addr) - 6,
+ GFP_ATOMIC);
+ } else {
+ cfg80211_connect_result(padapter->pnetdev,
+ cur_network->network.MacAddress,
+ pmlmepriv->assoc_req +
+ sizeof(struct ieee80211_hdr_3addr) + 2,
+ pmlmepriv->assoc_req_len -
+ sizeof(struct ieee80211_hdr_3addr) - 2,
+ pmlmepriv->assoc_rsp +
+ sizeof(struct ieee80211_hdr_3addr) + 6,
+ pmlmepriv->assoc_rsp_len -
+ sizeof(struct ieee80211_hdr_3addr) - 6,
+ WLAN_STATUS_SUCCESS, GFP_ATOMIC);
+ }
+}
+
+void rtw_cfg80211_indicate_disconnect(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wireless_dev *pwdev = padapter->rtw_wdev;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif
+
+ DBG_8723A("%s(padapter =%p)\n", __func__, padapter);
+
+ if (pwdev->iftype != NL80211_IFTYPE_STATION &&
+ pwdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ return;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ return;
+
+#ifdef CONFIG_8723AU_P2P
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ del_timer_sync(&pwdinfo->find_phase_timer);
+ del_timer_sync(&pwdinfo->restore_p2p_state_timer);
+ del_timer_sync(&pwdinfo->pre_tx_scan_timer);
+
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+
+ DBG_8723A("%s, role =%d, p2p_state =%d, pre_p2p_state =%d\n",
+ __func__, rtw_p2p_role(pwdinfo),
+ rtw_p2p_state(pwdinfo), rtw_p2p_pre_state(pwdinfo));
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ if (!padapter->mlmepriv.not_indic_disco) {
+ if (check_fwstate(&padapter->mlmepriv, WIFI_UNDER_LINKING)) {
+ cfg80211_connect_result(padapter->pnetdev, NULL, NULL,
+ 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_ATOMIC);
+ } else {
+ cfg80211_disconnected(padapter->pnetdev, 0, NULL,
+ 0, GFP_ATOMIC);
+ }
+ }
+}
+
+#ifdef CONFIG_8723AU_AP_MODE
+static u8 set_pairwise_key(struct rtw_adapter *padapter, struct sta_info *psta)
+{
+ struct cmd_obj *ph2c;
+ struct set_stakey_parm *psetstakey_para;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL);
+ if (psetstakey_para == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
+
+ psetstakey_para->algorithm = (u8) psta->dot118021XPrivacy;
+
+ memcpy(psetstakey_para->addr, psta->hwaddr, ETH_ALEN);
+
+ memcpy(psetstakey_para->key, &psta->dot118021x_UncstKey, 16);
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
+
+exit:
+ return res;
+}
+
+static int set_group_key(struct rtw_adapter *padapter, u8 *key, u8 alg,
+ int keyid)
+{
+ u8 keylen;
+ struct cmd_obj *pcmd;
+ struct setkey_parm *psetkeyparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ int res = _SUCCESS;
+
+ DBG_8723A("%s\n", __func__);
+
+ pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ if (!pcmd) {
+ res = _FAIL;
+ goto exit;
+ }
+ psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL);
+ if (!psetkeyparm) {
+ kfree(pcmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetkeyparm->keyid = (u8) keyid;
+ if (is_wep_enc(alg))
+ padapter->mlmepriv.key_mask |= CHKBIT(psetkeyparm->keyid);
+
+ psetkeyparm->algorithm = alg;
+
+ psetkeyparm->set_tx = 1;
+
+ switch (alg) {
+ case _WEP40_:
+ keylen = 5;
+ break;
+ case _WEP104_:
+ keylen = 13;
+ break;
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ case _AES_:
+ default:
+ keylen = 16;
+ }
+
+ memcpy(&psetkeyparm->key[0], key, keylen);
+
+ pcmd->cmdcode = _SetKey_CMD_;
+ pcmd->parmbuf = (u8 *) psetkeyparm;
+ pcmd->cmdsz = (sizeof(struct setkey_parm));
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ INIT_LIST_HEAD(&pcmd->list);
+
+ res = rtw_enqueue_cmd23a(pcmdpriv, pcmd);
+
+exit:
+ return res;
+}
+
+static int set_wep_key(struct rtw_adapter *padapter, u8 *key, u8 keylen,
+ int keyid)
+{
+ u8 alg;
+
+ switch (keylen) {
+ case 5:
+ alg = _WEP40_;
+ break;
+ case 13:
+ alg = _WEP104_;
+ break;
+ default:
+ alg = _NO_PRIVACY_;
+ }
+
+ return set_group_key(padapter, key, alg, keyid);
+}
+
+static int rtw_cfg80211_ap_set_encryption(struct net_device *dev,
+ struct ieee_param *param,
+ u32 param_len)
+{
+ int ret = 0;
+ u32 wep_key_idx, wep_key_len;
+ struct sta_info *psta = NULL, *pbcmc_sta = NULL;
+ struct rtw_adapter *padapter = netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_8723A("%s\n", __func__);
+
+ param->u.crypt.err = 0;
+ param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
+
+ /* sizeof(struct ieee_param) = 64 bytes; */
+ /* if (param_len != (u32) ((u8 *) param->u.crypt.key -
+ (u8 *) param) + param->u.crypt.key_len) */
+ if (param_len != sizeof(struct ieee_param) + param->u.crypt.key_len) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (is_broadcast_ether_addr(param->sta_addr)) {
+ if (param->u.crypt.idx >= WEP_KEYS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ psta = rtw_get_stainfo23a(pstapriv, param->sta_addr);
+ if (!psta) {
+ /* ret = -EINVAL; */
+ DBG_8723A("rtw_set_encryption(), sta has already "
+ "been removed or never been added\n");
+ goto exit;
+ }
+ }
+
+ if (strcmp(param->u.crypt.alg, "none") == 0 && (psta == NULL)) {
+ /* todo:clear default encryption keys */
+
+ DBG_8723A("clear default encryption keys, keyid =%d\n",
+ param->u.crypt.idx);
+
+ goto exit;
+ }
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0 && (psta == NULL)) {
+ DBG_8723A("r871x_set_encryption, crypt.alg = WEP\n");
+
+ wep_key_idx = param->u.crypt.idx;
+ wep_key_len = param->u.crypt.key_len;
+
+ DBG_8723A("r871x_set_encryption, wep_key_idx =%d, len =%d\n",
+ wep_key_idx, wep_key_len);
+
+ if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (wep_key_len > 0) {
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ }
+
+ if (psecuritypriv->bWepDefaultKeyIdxSet == 0) {
+ /* wep default key has not been set, so use
+ this key index as default key. */
+
+ psecuritypriv->ndisencryptstatus =
+ Ndis802_11Encryption1Enabled;
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+
+ if (wep_key_len == 13) {
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+
+ psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
+ }
+
+ memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0],
+ param->u.crypt.key, wep_key_len);
+
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = wep_key_len;
+
+ set_wep_key(padapter, param->u.crypt.key, wep_key_len,
+ wep_key_idx);
+
+ goto exit;
+
+ }
+
+ if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* group key */
+ if (param->u.crypt.set_tx == 0) { /* group key */
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ DBG_8723A("%s, set group_key, WEP\n",
+ __func__);
+
+ memcpy(psecuritypriv->
+ dot118021XGrpKey[param->u.crypt.idx].
+ skey, param->u.crypt.key,
+ (param->u.crypt.key_len >
+ 16 ? 16 : param->u.crypt.key_len));
+
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13) {
+ psecuritypriv->dot118021XGrpPrivacy =
+ _WEP104_;
+ }
+
+ } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ DBG_8723A("%s, set group_key, TKIP\n",
+ __func__);
+
+ psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
+
+ memcpy(psecuritypriv->
+ dot118021XGrpKey[param->u.crypt.idx].
+ skey, param->u.crypt.key,
+ (param->u.crypt.key_len >
+ 16 ? 16 : param->u.crypt.key_len));
+
+ /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
+ /* set mic key */
+ memcpy(psecuritypriv->
+ dot118021XGrptxmickey[param->u.crypt.
+ idx].skey,
+ &param->u.crypt.key[16], 8);
+ memcpy(psecuritypriv->
+ dot118021XGrprxmickey[param->u.crypt.
+ idx].skey,
+ &param->u.crypt.key[24], 8);
+
+ psecuritypriv->busetkipkey = true;
+
+ } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ DBG_8723A("%s, set group_key, CCMP\n",
+ __func__);
+
+ psecuritypriv->dot118021XGrpPrivacy = _AES_;
+
+ memcpy(psecuritypriv->
+ dot118021XGrpKey[param->u.crypt.idx].
+ skey, param->u.crypt.key,
+ (param->u.crypt.key_len >
+ 16 ? 16 : param->u.crypt.key_len));
+ } else {
+ DBG_8723A("%s, set group_key, none\n",
+ __func__);
+
+ psecuritypriv->dot118021XGrpPrivacy =
+ _NO_PRIVACY_;
+ }
+
+ psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
+
+ psecuritypriv->binstallGrpkey = true;
+
+ psecuritypriv->dot11PrivacyAlgrthm =
+ psecuritypriv->dot118021XGrpPrivacy;
+
+ set_group_key(padapter, param->u.crypt.key,
+ psecuritypriv->dot118021XGrpPrivacy,
+ param->u.crypt.idx);
+
+ pbcmc_sta = rtw_get_bcmc_stainfo23a(padapter);
+ if (pbcmc_sta) {
+ pbcmc_sta->ieee8021x_blocked = false;
+ /* rx will use bmc_sta's dot118021XPrivacy */
+ pbcmc_sta->dot118021XPrivacy =
+ psecuritypriv->dot118021XGrpPrivacy;
+
+ }
+
+ }
+
+ goto exit;
+ }
+
+ if (psecuritypriv->dot11AuthAlgrthm ==
+ dot11AuthAlgrthm_8021X && psta) { /* psk/802_1x */
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ if (param->u.crypt.set_tx == 1) {
+ /* pairwise key */
+ memcpy(psta->dot118021x_UncstKey.skey,
+ param->u.crypt.key,
+ (param->u.crypt.key_len >
+ 16 ? 16 : param->u.crypt.key_len));
+
+ if (!strcmp(param->u.crypt.alg, "WEP")) {
+ DBG_8723A("%s, set pairwise key, WEP\n",
+ __func__);
+
+ psta->dot118021XPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13) {
+ psta->dot118021XPrivacy =
+ _WEP104_;
+ }
+ } else if (!strcmp(param->u.crypt.alg, "TKIP")) {
+ DBG_8723A("%s, set pairwise key, "
+ "TKIP\n", __func__);
+
+ psta->dot118021XPrivacy = _TKIP_;
+
+ /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
+ /* set mic key */
+ memcpy(psta->dot11tkiptxmickey.skey,
+ &param->u.crypt.key[16], 8);
+ memcpy(psta->dot11tkiprxmickey.skey,
+ &param->u.crypt.key[24], 8);
+
+ psecuritypriv->busetkipkey = true;
+
+ } else if (!strcmp(param->u.crypt.alg, "CCMP")) {
+
+ DBG_8723A("%s, set pairwise key, "
+ "CCMP\n", __func__);
+
+ psta->dot118021XPrivacy = _AES_;
+ } else {
+ DBG_8723A("%s, set pairwise key, "
+ "none\n", __func__);
+
+ psta->dot118021XPrivacy = _NO_PRIVACY_;
+ }
+
+ set_pairwise_key(padapter, psta);
+
+ psta->ieee8021x_blocked = false;
+
+ psta->bpairwise_key_installed = true;
+ } else { /* group key??? */
+ if (!strcmp(param->u.crypt.alg, "WEP")) {
+ memcpy(psecuritypriv->
+ dot118021XGrpKey[param->u.crypt.
+ idx].skey,
+ param->u.crypt.key,
+ (param->u.crypt.key_len >
+ 16 ? 16 : param->u.crypt.
+ key_len));
+
+ psecuritypriv->dot118021XGrpPrivacy =
+ _WEP40_;
+ if (param->u.crypt.key_len == 13) {
+ psecuritypriv->
+ dot118021XGrpPrivacy =
+ _WEP104_;
+ }
+ } else if (!strcmp(param->u.crypt.alg, "TKIP")) {
+ psecuritypriv->dot118021XGrpPrivacy =
+ _TKIP_;
+
+ memcpy(psecuritypriv->
+ dot118021XGrpKey[param->u.crypt.
+ idx].skey,
+ param->u.crypt.key,
+ (param->u.crypt.key_len >
+ 16 ? 16 : param->u.crypt.
+ key_len));
+
+ /* DEBUG_ERR("set key length :param->u"
+ ".crypt.key_len =%d\n",
+ param->u.crypt.key_len); */
+ /* set mic key */
+ memcpy(psecuritypriv->
+ dot118021XGrptxmickey[param->u.
+ crypt.idx].
+ skey, &param->u.crypt.key[16],
+ 8);
+ memcpy(psecuritypriv->
+ dot118021XGrprxmickey[param->u.
+ crypt.idx].
+ skey, &param->u.crypt.key[24],
+ 8);
+
+ psecuritypriv->busetkipkey = true;
+
+ } else if (!strcmp(param->u.crypt.alg, "CCMP")) {
+ psecuritypriv->dot118021XGrpPrivacy =
+ _AES_;
+
+ memcpy(psecuritypriv->
+ dot118021XGrpKey[param->u.crypt.
+ idx].skey,
+ param->u.crypt.key,
+ (param->u.crypt.key_len >
+ 16 ? 16 : param->u.crypt.
+ key_len));
+ } else {
+ psecuritypriv->dot118021XGrpPrivacy =
+ _NO_PRIVACY_;
+ }
+
+ psecuritypriv->dot118021XGrpKeyid =
+ param->u.crypt.idx;
+
+ psecuritypriv->binstallGrpkey = true;
+
+ psecuritypriv->dot11PrivacyAlgrthm =
+ psecuritypriv->dot118021XGrpPrivacy;
+
+ set_group_key(padapter, param->u.crypt.key,
+ psecuritypriv->
+ dot118021XGrpPrivacy,
+ param->u.crypt.idx);
+
+ pbcmc_sta = rtw_get_bcmc_stainfo23a(padapter);
+ if (pbcmc_sta) {
+ /* rx will use bmc_sta's
+ dot118021XPrivacy */
+ pbcmc_sta->ieee8021x_blocked = false;
+ pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;
+ }
+ }
+ }
+ }
+
+exit:
+
+ return ret;
+
+}
+#endif
+
+static int rtw_cfg80211_set_encryption(struct net_device *dev,
+ struct ieee_param *param, u32 param_len)
+{
+ int ret = 0;
+ u32 wep_key_idx, wep_key_len;
+ struct rtw_adapter *padapter = netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_8723AU_P2P */
+
+
+
+ DBG_8723A("%s\n", __func__);
+
+ param->u.crypt.err = 0;
+ param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
+
+ if (param_len <
+ (u32) ((u8 *) param->u.crypt.key - (u8 *) param) +
+ param->u.crypt.key_len) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (is_broadcast_ether_addr(param->sta_addr)) {
+ if (param->u.crypt.idx >= WEP_KEYS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_,
+ ("wpa_set_encryption, crypt.alg = WEP\n"));
+ DBG_8723A("wpa_set_encryption, crypt.alg = WEP\n");
+
+ wep_key_idx = param->u.crypt.idx;
+ wep_key_len = param->u.crypt.key_len;
+
+ if ((wep_key_idx > WEP_KEYS) || (wep_key_len <= 0)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (psecuritypriv->bWepDefaultKeyIdxSet == 0) {
+ /* wep default key has not been set, so use this
+ key index as default key. */
+
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+
+ psecuritypriv->ndisencryptstatus =
+ Ndis802_11Encryption1Enabled;
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+
+ if (wep_key_len == 13) {
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+
+ psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
+ }
+
+ memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0],
+ param->u.crypt.key, wep_key_len);
+
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = wep_key_len;
+
+ rtw_set_key23a(padapter, psecuritypriv, wep_key_idx, 0);
+
+ goto exit;
+ }
+
+ if (padapter->securitypriv.dot11AuthAlgrthm ==
+ dot11AuthAlgrthm_8021X) { /* 802_1x */
+ struct sta_info *psta, *pbcmc_sta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (check_fwstate(pmlmepriv,
+ WIFI_STATION_STATE | WIFI_MP_STATE)) {
+ /* sta mode */
+ psta = rtw_get_stainfo23a(pstapriv, get_bssid(pmlmepriv));
+ if (psta == NULL) {
+ DBG_8723A("%s, : Obtain Sta_info fail\n",
+ __func__);
+ } else {
+ /* Jeff: don't disable ieee8021x_blocked
+ while clearing key */
+ if (strcmp(param->u.crypt.alg, "none") != 0)
+ psta->ieee8021x_blocked = false;
+
+ if ((padapter->securitypriv.ndisencryptstatus ==
+ Ndis802_11Encryption2Enabled) ||
+ (padapter->securitypriv.ndisencryptstatus ==
+ Ndis802_11Encryption3Enabled)) {
+ psta->dot118021XPrivacy =
+ padapter->securitypriv.
+ dot11PrivacyAlgrthm;
+ }
+
+ if (param->u.crypt.set_tx == 1) {
+ /* pairwise key */
+ DBG_8723A("%s, : param->u.crypt.set_tx"
+ " == 1\n", __func__);
+
+ memcpy(psta->dot118021x_UncstKey.skey,
+ param->u.crypt.key,
+ (param->u.crypt.key_len >
+ 16 ? 16 : param->u.crypt.
+ key_len));
+
+ if (strcmp(param->u.crypt.alg,
+ "TKIP") == 0) {
+ memcpy(psta->dot11tkiptxmickey.
+ skey,
+ &param->u.crypt.key[16],
+ 8);
+ memcpy(psta->dot11tkiprxmickey.
+ skey,
+ &param->u.crypt.key[24],
+ 8);
+
+ padapter->securitypriv.
+ busetkipkey = false;
+ }
+ DBG_8723A(" ~~~~set sta key:unicastkey\n");
+
+ rtw_setstakey_cmd23a(padapter,
+ (unsigned char *)psta,
+ true);
+ } else { /* group key */
+ memcpy(padapter->securitypriv.
+ dot118021XGrpKey[param->u.crypt.
+ idx].skey,
+ param->u.crypt.key,
+ (param->u.crypt.key_len >
+ 16 ? 16 : param->u.crypt.
+ key_len));
+ memcpy(padapter->securitypriv.
+ dot118021XGrptxmickey[param->u.
+ crypt.idx].
+ skey, &param->u.crypt.key[16],
+ 8);
+ memcpy(padapter->securitypriv.
+ dot118021XGrprxmickey[param->u.
+ crypt.idx].
+ skey, &param->u.crypt.key[24],
+ 8);
+ padapter->securitypriv.binstallGrpkey =
+ true;
+ /* DEBUG_ERR((" param->u.crypt.key_len"
+ "=%d\n", param->u.crypt.key_len)); */
+ DBG_8723A
+ (" ~~~~set sta key:groupkey\n");
+
+ padapter->securitypriv.
+ dot118021XGrpKeyid =
+ param->u.crypt.idx;
+
+ rtw_set_key23a(padapter,
+ &padapter->securitypriv,
+ param->u.crypt.idx, 1);
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_p2p_chk_state
+ (pwdinfo,
+ P2P_STATE_PROVISIONING_ING)) {
+ rtw_p2p_set_state(pwdinfo,
+ P2P_STATE_PROVISIONING_DONE);
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ }
+ }
+
+ pbcmc_sta = rtw_get_bcmc_stainfo23a(padapter);
+ if (pbcmc_sta) {
+ /* Jeff: don't disable ieee8021x_blocked
+ while clearing key */
+ if (strcmp(param->u.crypt.alg, "none") != 0)
+ pbcmc_sta->ieee8021x_blocked = false;
+
+ if ((padapter->securitypriv.ndisencryptstatus ==
+ Ndis802_11Encryption2Enabled) ||
+ (padapter->securitypriv.ndisencryptstatus ==
+ Ndis802_11Encryption3Enabled)) {
+ pbcmc_sta->dot118021XPrivacy =
+ padapter->securitypriv.
+ dot11PrivacyAlgrthm;
+ }
+ }
+ } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { /* adhoc mode */
+ }
+ }
+
+exit:
+
+ DBG_8723A("%s, ret =%d\n", __func__, ret);
+
+
+
+ return ret;
+}
+
+static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
+{
+ char *alg_name;
+ u32 param_len;
+ struct ieee_param *param = NULL;
+ int ret = 0;
+ struct wireless_dev *rtw_wdev = wiphy_to_wdev(wiphy);
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ DBG_8723A(FUNC_NDEV_FMT " adding key for %pM\n", FUNC_NDEV_ARG(ndev),
+ mac_addr);
+ DBG_8723A("cipher = 0x%x\n", params->cipher);
+ DBG_8723A("key_len = 0x%x\n", params->key_len);
+ DBG_8723A("seq_len = 0x%x\n", params->seq_len);
+ DBG_8723A("key_index =%d\n", key_index);
+ DBG_8723A("pairwise =%d\n", pairwise);
+
+ param_len = sizeof(struct ieee_param) + params->key_len;
+ param = kzalloc(param_len, GFP_KERNEL);
+ if (param == NULL)
+ return -1;
+
+ param->cmd = IEEE_CMD_SET_ENCRYPTION;
+ memset(param->sta_addr, 0xff, ETH_ALEN);
+
+ switch (params->cipher) {
+ case IW_AUTH_CIPHER_NONE:
+ /* todo: remove key */
+ /* remove = 1; */
+ alg_name = "none";
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ alg_name = "WEP";
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ alg_name = "TKIP";
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ alg_name = "CCMP";
+ break;
+
+ default:
+ ret = -ENOTSUPP;
+ goto addkey_end;
+ }
+
+ strncpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
+
+ if (!mac_addr || is_broadcast_ether_addr(mac_addr)) {
+ param->u.crypt.set_tx = 0; /* for wpa/wpa2 group key */
+ } else {
+ param->u.crypt.set_tx = 1; /* for wpa/wpa2 pairwise key */
+ }
+
+ /* param->u.crypt.idx = key_index - 1; */
+ param->u.crypt.idx = key_index;
+
+ if (params->seq_len && params->seq) {
+ memcpy(param->u.crypt.seq, params->seq, params->seq_len);
+ }
+
+ if (params->key_len && params->key) {
+ param->u.crypt.key_len = params->key_len;
+ memcpy(param->u.crypt.key, params->key, params->key_len);
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ ret = rtw_cfg80211_set_encryption(ndev, param, param_len);
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+#ifdef CONFIG_8723AU_AP_MODE
+ if (mac_addr)
+ memcpy(param->sta_addr, (void *)mac_addr, ETH_ALEN);
+
+ ret = rtw_cfg80211_ap_set_encryption(ndev, param, param_len);
+#endif
+ } else {
+ DBG_8723A("error! fw_state = 0x%x, iftype =%d\n",
+ pmlmepriv->fw_state, rtw_wdev->iftype);
+
+ }
+
+addkey_end:
+ kfree(param);
+
+ return ret;
+}
+
+static int
+cfg80211_rtw_get_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ void *cookie,
+ void (*callback) (void *cookie, struct key_params *))
+{
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+ return 0;
+}
+
+static int cfg80211_rtw_del_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct rtw_adapter *padapter = netdev_priv(ndev);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ DBG_8723A(FUNC_NDEV_FMT " key_index =%d\n", FUNC_NDEV_ARG(ndev),
+ key_index);
+
+ if (key_index == psecuritypriv->dot11PrivacyKeyIndex) {
+ /* clear the flag of wep default key set. */
+ psecuritypriv->bWepDefaultKeyIdxSet = 0;
+ }
+
+ return 0;
+}
+
+static int cfg80211_rtw_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev, u8 key_index,
+ bool unicast, bool multicast)
+{
+ struct rtw_adapter *padapter = netdev_priv(ndev);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ DBG_8723A(FUNC_NDEV_FMT " key_index =%d"
+ ", unicast =%d, multicast =%d.\n", FUNC_NDEV_ARG(ndev),
+ key_index, unicast, multicast);
+
+ if ((key_index < WEP_KEYS) &&
+ ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) ||
+ (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_))) {
+ /* set wep default key */
+ psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
+
+ psecuritypriv->dot11PrivacyKeyIndex = key_index;
+
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+ if (psecuritypriv->dot11DefKeylen[key_index] == 13) {
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+
+ /* set the flag to represent that wep default key
+ has been set */
+ psecuritypriv->bWepDefaultKeyIdxSet = 1;
+ }
+
+ return 0;
+}
+
+static int cfg80211_rtw_get_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 *mac, struct station_info *sinfo)
+{
+ int ret = 0;
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ sinfo->filled = 0;
+
+ if (!mac) {
+ DBG_8723A(FUNC_NDEV_FMT " mac ==%p\n", FUNC_NDEV_ARG(ndev), mac);
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ psta = rtw_get_stainfo23a(pstapriv, mac);
+ if (psta == NULL) {
+ DBG_8723A("%s, sta_info is null\n", __func__);
+ ret = -ENOENT;
+ goto exit;
+ }
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A(FUNC_NDEV_FMT " mac =" MAC_FMT "\n", FUNC_NDEV_ARG(ndev),
+ MAC_ARG(mac));
+#endif
+
+ /* for infra./P2PClient mode */
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
+ check_fwstate(pmlmepriv, _FW_LINKED)) {
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+
+ if (memcmp(mac, cur_network->network.MacAddress, ETH_ALEN)) {
+ DBG_8723A("%s, mismatch bssid =" MAC_FMT "\n", __func__,
+ MAC_ARG(cur_network->network.MacAddress));
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->signal = translate_percentage_to_dbm(padapter->recvpriv.
+ signal_strength);
+
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->txrate.legacy = rtw_get_cur_max_rate23a(padapter);
+
+ sinfo->filled |= STATION_INFO_RX_PACKETS;
+ sinfo->rx_packets = sta_rx_data_pkts(psta);
+
+ sinfo->filled |= STATION_INFO_TX_PACKETS;
+ sinfo->tx_packets = psta->sta_stats.tx_pkts;
+ }
+
+ /* for Ad-Hoc/AP mode */
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_AP_STATE)) &&
+ check_fwstate(pmlmepriv, _FW_LINKED)
+ ) {
+ /* TODO: should acquire station info... */
+ }
+
+exit:
+ return ret;
+}
+
+static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ enum nl80211_iftype old_type;
+ enum ndis_802_11_net_infra networkType;
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wireless_dev *rtw_wdev = wiphy_to_wdev(wiphy);
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif
+ int ret = 0;
+ u8 change = false;
+
+ DBG_8723A(FUNC_NDEV_FMT " call netdev_open23a\n", FUNC_NDEV_ARG(ndev));
+ if (netdev_open23a(ndev) != 0) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ old_type = rtw_wdev->iftype;
+ DBG_8723A(FUNC_NDEV_FMT " old_iftype =%d, new_iftype =%d\n",
+ FUNC_NDEV_ARG(ndev), old_type, type);
+
+ if (old_type != type) {
+ change = true;
+ pmlmeext->action_public_rxseq = 0xffff;
+ pmlmeext->action_public_dialog_token = 0xff;
+ }
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ networkType = Ndis802_11IBSS;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_STATION:
+ networkType = Ndis802_11Infrastructure;
+#ifdef CONFIG_8723AU_P2P
+ if (change && rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ del_timer_sync(&pwdinfo->find_phase_timer);
+ del_timer_sync(&pwdinfo->restore_p2p_state_timer);
+ del_timer_sync(&pwdinfo->pre_tx_scan_timer);
+
+ /* it means remove GO and change mode from AP(GO)
+ to station(P2P DEVICE) */
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+
+ DBG_8723A("%s, role =%d, p2p_state =%d, pre_p2p_state ="
+ "%d\n", __func__, rtw_p2p_role(pwdinfo),
+ rtw_p2p_state(pwdinfo),
+ rtw_p2p_pre_state(pwdinfo));
+ }
+#endif /* CONFIG_8723AU_P2P */
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ networkType = Ndis802_11APMode;
+#ifdef CONFIG_8723AU_P2P
+ if (change && !rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ /* it means P2P Group created, we will be GO
+ and change mode from P2P DEVICE to AP(GO) */
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+#endif /* CONFIG_8723AU_P2P */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rtw_wdev->iftype = type;
+
+ if (rtw_set_802_11_infrastructure_mode23a(padapter, networkType) == false) {
+ rtw_wdev->iftype = old_type;
+ ret = -EPERM;
+ goto exit;
+ }
+
+ rtw_setopmode_cmd23a(padapter, networkType);
+
+exit:
+ return ret;
+}
+
+void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
+ bool aborted)
+{
+ spin_lock_bh(&pwdev_priv->scan_req_lock);
+ if (pwdev_priv->scan_request != NULL) {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s with scan req\n", __func__);
+#endif
+ if (pwdev_priv->scan_request->wiphy !=
+ pwdev_priv->rtw_wdev->wiphy)
+ DBG_8723A("error wiphy compare\n");
+ else
+ cfg80211_scan_done(pwdev_priv->scan_request, aborted);
+
+ pwdev_priv->scan_request = NULL;
+ } else {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s without scan req\n", __func__);
+#endif
+ }
+ spin_unlock_bh(&pwdev_priv->scan_req_lock);
+}
+
+void rtw_cfg80211_surveydone_event_callback(struct rtw_adapter *padapter)
+{
+ struct list_head *plist, *phead, *ptmp;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct rtw_queue *queue = &pmlmepriv->scanned_queue;
+ struct wlan_network *pnetwork;
+
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s\n", __func__);
+#endif
+
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+
+ phead = get_list_head(queue);
+
+ list_for_each_safe(plist, ptmp, phead) {
+ pnetwork = container_of(plist, struct wlan_network, list);
+
+ /* report network only if the current channel set
+ contains the channel to which this network belongs */
+ if (rtw_ch_set_search_ch23a
+ (padapter->mlmeextpriv.channel_set,
+ pnetwork->network.Configuration.DSConfig) >= 0)
+ rtw_cfg80211_inform_bss(padapter, pnetwork);
+ }
+
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+
+ /* call this after other things have been done */
+ rtw_cfg80211_indicate_scan_done(wdev_to_priv(padapter->rtw_wdev),
+ false);
+}
+
+static int rtw_cfg80211_set_probe_req_wpsp2pie(struct rtw_adapter *padapter,
+ char *buf, int len)
+{
+ int ret = 0;
+ uint wps_ielen = 0;
+ u8 *wps_ie;
+#ifdef CONFIG_8723AU_P2P
+ u32 p2p_ielen = 0;
+ u8 *p2p_ie;
+ u32 wfd_ielen = 0;
+#endif
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s, ielen =%d\n", __func__, len);
+#endif
+
+ if (len > 0) {
+ wps_ie = rtw_get_wps_ie23a(buf, len, NULL, &wps_ielen);
+ if (wps_ie) {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("probe_req_wps_ielen =%d\n", wps_ielen);
+#endif
+ if (pmlmepriv->wps_probe_req_ie) {
+ pmlmepriv->wps_probe_req_ie_len = 0;
+ kfree(pmlmepriv->wps_probe_req_ie);
+ pmlmepriv->wps_probe_req_ie = NULL;
+ }
+
+ pmlmepriv->wps_probe_req_ie =
+ kmalloc(wps_ielen, GFP_KERNEL);
+ if (pmlmepriv->wps_probe_req_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ memcpy(pmlmepriv->wps_probe_req_ie, wps_ie, wps_ielen);
+ pmlmepriv->wps_probe_req_ie_len = wps_ielen;
+ }
+#ifdef CONFIG_8723AU_P2P
+ p2p_ie = rtw_get_p2p_ie23a(buf, len, NULL, &p2p_ielen);
+ if (p2p_ie) {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("probe_req_p2p_ielen =%d\n", p2p_ielen);
+#endif
+
+ if (pmlmepriv->p2p_probe_req_ie) {
+ pmlmepriv->p2p_probe_req_ie_len = 0;
+ kfree(pmlmepriv->p2p_probe_req_ie);
+ pmlmepriv->p2p_probe_req_ie = NULL;
+ }
+
+ pmlmepriv->p2p_probe_req_ie =
+ kmalloc(p2p_ielen, GFP_KERNEL);
+ if (pmlmepriv->p2p_probe_req_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+
+ }
+ memcpy(pmlmepriv->p2p_probe_req_ie, p2p_ie, p2p_ielen);
+ pmlmepriv->p2p_probe_req_ie_len = p2p_ielen;
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ /* buf += p2p_ielen; */
+ /* len -= p2p_ielen; */
+
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_get_wfd_ie(buf, len, NULL, &wfd_ielen)) {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("probe_req_wfd_ielen =%d\n", wfd_ielen);
+#endif
+
+ if (pmlmepriv->wfd_probe_req_ie) {
+ pmlmepriv->wfd_probe_req_ie_len = 0;
+ kfree(pmlmepriv->wfd_probe_req_ie);
+ pmlmepriv->wfd_probe_req_ie = NULL;
+ }
+
+ pmlmepriv->wfd_probe_req_ie =
+ kmalloc(wfd_ielen, GFP_KERNEL);
+ if (pmlmepriv->wfd_probe_req_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+
+ }
+ rtw_get_wfd_ie(buf, len, pmlmepriv->wfd_probe_req_ie,
+ &pmlmepriv->wfd_probe_req_ie_len);
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ }
+
+ return ret;
+}
+
+static int cfg80211_rtw_scan(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request)
+{
+ int i;
+ u8 _status = false;
+ int ret = 0;
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct cfg80211_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
+ struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
+ struct cfg80211_ssid *ssids = request->ssids;
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ int social_channel = 0;
+#endif /* CONFIG_8723AU_P2P */
+ bool need_indicate_scan_done = false;
+
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A(FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(padapter));
+#endif
+
+ spin_lock_bh(&pwdev_priv->scan_req_lock);
+ pwdev_priv->scan_request = request;
+ spin_unlock_bh(&pwdev_priv->scan_req_lock);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s under WIFI_AP_STATE\n", __func__);
+#endif
+ /* need_indicate_scan_done = true; */
+ /* goto check_need_indicate_scan_done; */
+ }
+
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
+ need_indicate_scan_done = true;
+ goto check_need_indicate_scan_done;
+ }
+#ifdef CONFIG_8723AU_P2P
+ if (!memcmp(ssids->ssid, "DIRECT-", 7) &&
+ rtw_get_p2p_ie23a((u8 *) request->ie, request->ie_len, NULL, NULL)) {
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ rtw_p2p_enable23a(padapter, P2P_ROLE_DEVICE);
+ wdev_to_priv(padapter->rtw_wdev)->p2p_enabled = true;
+ } else {
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s, role =%d, p2p_state =%d\n", __func__,
+ rtw_p2p_role(pwdinfo),
+ rtw_p2p_state(pwdinfo));
+#endif
+ }
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_LISTEN);
+
+ if (request->n_channels == 3 &&
+ request->channels[0]->hw_value == 1 &&
+ request->channels[1]->hw_value == 6 &&
+ request->channels[2]->hw_value == 11)
+ social_channel = 1;
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ if (request->ie && request->ie_len > 0) {
+ rtw_cfg80211_set_probe_req_wpsp2pie(padapter,
+ (u8 *) request->ie,
+ request->ie_len);
+ }
+
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic == true) {
+ DBG_8723A("%s, bBusyTraffic == true\n", __func__);
+ need_indicate_scan_done = true;
+ goto check_need_indicate_scan_done;
+ }
+ if (rtw_is_scan_deny(padapter)) {
+ DBG_8723A(FUNC_ADPT_FMT ": scan deny\n",
+ FUNC_ADPT_ARG(padapter));
+ need_indicate_scan_done = true;
+ goto check_need_indicate_scan_done;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING) ==
+ true) {
+ DBG_8723A("%s, fwstate = 0x%x\n", __func__, pmlmepriv->fw_state);
+ need_indicate_scan_done = true;
+ goto check_need_indicate_scan_done;
+ }
+#ifdef CONFIG_8723AU_P2P
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) &&
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE)) {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH);
+ rtw_free_network_queue23a(padapter, true);
+
+ if (social_channel == 0)
+ rtw_p2p_findphase_ex_set(pwdinfo,
+ P2P_FINDPHASE_EX_NONE);
+ else
+ rtw_p2p_findphase_ex_set(pwdinfo,
+ P2P_FINDPHASE_EX_SOCIAL_LAST);
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ memset(ssid, 0, sizeof(struct cfg80211_ssid) * RTW_SSID_SCAN_AMOUNT);
+ /* parsing request ssids, n_ssids */
+ for (i = 0; i < request->n_ssids && i < RTW_SSID_SCAN_AMOUNT; i++) {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("ssid =%s, len =%d\n", ssids[i].ssid,
+ ssids[i].ssid_len);
+#endif
+ memcpy(ssid[i].ssid, ssids[i].ssid, ssids[i].ssid_len);
+ ssid[i].ssid_len = ssids[i].ssid_len;
+ }
+
+ /* parsing channels, n_channels */
+ memset(ch, 0,
+ sizeof(struct rtw_ieee80211_channel) * RTW_CHANNEL_SCAN_AMOUNT);
+
+ if (request->n_channels == 1) {
+ for (i = 0; i < request->n_channels &&
+ i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A(FUNC_ADPT_FMT CHAN_FMT "\n",
+ FUNC_ADPT_ARG(padapter),
+ CHAN_ARG(request->channels[i]));
+#endif
+ ch[i].hw_value = request->channels[i]->hw_value;
+ ch[i].flags = request->channels[i]->flags;
+ }
+ }
+
+ spin_lock_bh(&pmlmepriv->lock);
+ if (request->n_channels == 1) {
+ memcpy(&ch[1], &ch[0], sizeof(struct rtw_ieee80211_channel));
+ memcpy(&ch[2], &ch[0], sizeof(struct rtw_ieee80211_channel));
+ _status = rtw_sitesurvey_cmd23a(padapter, ssid,
+ RTW_SSID_SCAN_AMOUNT, ch, 3);
+ } else {
+ _status = rtw_sitesurvey_cmd23a(padapter, ssid,
+ RTW_SSID_SCAN_AMOUNT, NULL, 0);
+ }
+ spin_unlock_bh(&pmlmepriv->lock);
+
+ if (_status == false)
+ ret = -1;
+
+check_need_indicate_scan_done:
+ if (need_indicate_scan_done)
+ rtw_cfg80211_surveydone_event_callback(padapter);
+ return ret;
+}
+
+static int cfg80211_rtw_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ DBG_8723A("%s\n", __func__);
+ return 0;
+}
+
+static int cfg80211_rtw_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_ibss_params *params)
+{
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+ return 0;
+}
+
+static int cfg80211_rtw_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
+{
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+ return 0;
+}
+
+static int rtw_cfg80211_set_wpa_version(struct security_priv *psecuritypriv,
+ u32 wpa_version)
+{
+ DBG_8723A("%s, wpa_version =%d\n", __func__, wpa_version);
+
+ if (!wpa_version) {
+ psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
+ return 0;
+ }
+
+ if (wpa_version & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)) {
+ psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPAPSK;
+ }
+
+/*
+ if (wpa_version & NL80211_WPA_VERSION_2)
+ {
+ psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPA2PSK;
+ }
+*/
+
+ return 0;
+}
+
+static int rtw_cfg80211_set_auth_type(struct security_priv *psecuritypriv,
+ enum nl80211_auth_type sme_auth_type)
+{
+ DBG_8723A("%s, nl80211_auth_type =%d\n", __func__, sme_auth_type);
+
+ switch (sme_auth_type) {
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
+
+ break;
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+
+ if (psecuritypriv->ndisauthtype > Ndis802_11AuthModeWPA)
+ psecuritypriv->dot11AuthAlgrthm =
+ dot11AuthAlgrthm_8021X;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
+
+ psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ default:
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+ /* return -ENOTSUPP; */
+ }
+
+ return 0;
+}
+
+static int rtw_cfg80211_set_cipher(struct security_priv *psecuritypriv,
+ u32 cipher, bool ucast)
+{
+ u32 ndisencryptstatus = Ndis802_11EncryptionDisabled;
+
+ u32 *profile_cipher = ucast ? &psecuritypriv->dot11PrivacyAlgrthm :
+ &psecuritypriv->dot118021XGrpPrivacy;
+
+ DBG_8723A("%s, ucast =%d, cipher = 0x%x\n", __func__, ucast, cipher);
+
+ if (!cipher) {
+ *profile_cipher = _NO_PRIVACY_;
+ psecuritypriv->ndisencryptstatus = ndisencryptstatus;
+ return 0;
+ }
+
+ switch (cipher) {
+ case IW_AUTH_CIPHER_NONE:
+ *profile_cipher = _NO_PRIVACY_;
+ ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ *profile_cipher = _WEP40_;
+ ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ *profile_cipher = _WEP104_;
+ ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ *profile_cipher = _TKIP_;
+ ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ *profile_cipher = _AES_;
+ ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ default:
+ DBG_8723A("Unsupported cipher: 0x%x\n", cipher);
+ return -ENOTSUPP;
+ }
+
+ if (ucast)
+ psecuritypriv->ndisencryptstatus = ndisencryptstatus;
+
+ return 0;
+}
+
+static int rtw_cfg80211_set_key_mgt(struct security_priv *psecuritypriv,
+ u32 key_mgt)
+{
+ DBG_8723A("%s, key_mgt = 0x%x\n", __func__, key_mgt);
+
+ if (key_mgt == WLAN_AKM_SUITE_8021X)
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+ else if (key_mgt == WLAN_AKM_SUITE_PSK)
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+ else
+ DBG_8723A("Invalid key mgt: 0x%x\n", key_mgt);
+
+ return 0;
+}
+
+static int rtw_cfg80211_set_wpa_ie(struct rtw_adapter *padapter, const u8 *pie,
+ size_t ielen)
+{
+ u8 *buf = NULL, *pos = NULL;
+ int group_cipher = 0, pairwise_cipher = 0;
+ int ret = 0;
+ int wpa_ielen = 0;
+ int wpa2_ielen = 0;
+ u8 *pwpa, *pwpa2;
+ u8 null_addr[] = { 0, 0, 0, 0, 0, 0 };
+ int i;
+
+ if (!pie || !ielen) {
+ /* Treat this as normal case, but need to clear
+ WIFI_UNDER_WPS */
+ _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ goto exit;
+ }
+ if (ielen > MAX_WPA_IE_LEN + MAX_WPS_IE_LEN + MAX_P2P_IE_LEN) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ buf = kzalloc(ielen, GFP_KERNEL);
+ if (buf == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ memcpy(buf, pie, ielen);
+
+ /* dump */
+ DBG_8723A("set wpa_ie(length:%zu):\n", ielen);
+ for (i = 0; i < ielen; i = i + 8)
+ DBG_8723A("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n",
+ buf[i], buf[i + 1],
+ buf[i + 2], buf[i + 3], buf[i + 4],
+ buf[i + 5], buf[i + 6], buf[i + 7]);
+ pos = buf;
+ if (ielen < RSN_HEADER_LEN) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_,
+ ("Ie len too short %d\n", (int)ielen));
+ ret = -1;
+ goto exit;
+ }
+
+ pwpa = rtw_get_wpa_ie23a(buf, &wpa_ielen, ielen);
+ if (pwpa && wpa_ielen > 0) {
+ if (rtw_parse_wpa_ie23a(pwpa, wpa_ielen + 2, &group_cipher,
+ &pairwise_cipher, NULL) == _SUCCESS) {
+ padapter->securitypriv.dot11AuthAlgrthm =
+ dot11AuthAlgrthm_8021X;
+ padapter->securitypriv.ndisauthtype =
+ Ndis802_11AuthModeWPAPSK;
+ memcpy(padapter->securitypriv.supplicant_ie, &pwpa[0],
+ wpa_ielen + 2);
+
+ DBG_8723A("got wpa_ie, wpa_ielen:%u\n", wpa_ielen);
+ }
+ }
+
+ pwpa2 = rtw_get_wpa2_ie23a(buf, &wpa2_ielen, ielen);
+ if (pwpa2 && wpa2_ielen > 0) {
+ if (rtw_parse_wpa2_ie23a (pwpa2, wpa2_ielen + 2, &group_cipher,
+ &pairwise_cipher, NULL) == _SUCCESS) {
+ padapter->securitypriv.dot11AuthAlgrthm =
+ dot11AuthAlgrthm_8021X;
+ padapter->securitypriv.ndisauthtype =
+ Ndis802_11AuthModeWPA2PSK;
+ memcpy(padapter->securitypriv.supplicant_ie, &pwpa2[0],
+ wpa2_ielen + 2);
+
+ DBG_8723A("got wpa2_ie, wpa2_ielen:%u\n", wpa2_ielen);
+ }
+ }
+
+ if (group_cipher == 0) {
+ group_cipher = WPA_CIPHER_NONE;
+ }
+ if (pairwise_cipher == 0) {
+ pairwise_cipher = WPA_CIPHER_NONE;
+ }
+
+ switch (group_cipher) {
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus =
+ Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus =
+ Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus =
+ Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _AES_;
+ padapter->securitypriv.ndisencryptstatus =
+ Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus =
+ Ndis802_11Encryption1Enabled;
+ break;
+ }
+
+ switch (pairwise_cipher) {
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus =
+ Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus =
+ Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus =
+ Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _AES_;
+ padapter->securitypriv.ndisencryptstatus =
+ Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus =
+ Ndis802_11Encryption1Enabled;
+ break;
+ }
+
+ { /* handle wps_ie */
+ uint wps_ielen;
+ u8 *wps_ie;
+
+ wps_ie = rtw_get_wps_ie23a(buf, ielen, NULL, &wps_ielen);
+ if (wps_ie && wps_ielen > 0) {
+ DBG_8723A("got wps_ie, wps_ielen:%u\n", wps_ielen);
+ padapter->securitypriv.wps_ie_len =
+ wps_ielen <
+ MAX_WPS_IE_LEN ? wps_ielen : MAX_WPS_IE_LEN;
+ memcpy(padapter->securitypriv.wps_ie, wps_ie,
+ padapter->securitypriv.wps_ie_len);
+ set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ } else {
+ _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ }
+ }
+
+#ifdef CONFIG_8723AU_P2P
+ { /* check p2p_ie for assoc req; */
+ uint p2p_ielen = 0;
+ u8 *p2p_ie;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ p2p_ie = rtw_get_p2p_ie23a(buf, ielen, NULL, &p2p_ielen);
+ if (p2p_ie) {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s p2p_assoc_req_ielen =%d\n", __func__,
+ p2p_ielen);
+#endif
+
+ if (pmlmepriv->p2p_assoc_req_ie) {
+ pmlmepriv->p2p_assoc_req_ie_len = 0;
+ kfree(pmlmepriv->p2p_assoc_req_ie);
+ pmlmepriv->p2p_assoc_req_ie = NULL;
+ }
+
+ pmlmepriv->p2p_assoc_req_ie =
+ kmalloc(p2p_ielen, GFP_KERNEL);
+ if (pmlmepriv->p2p_assoc_req_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ goto exit;
+ }
+ memcpy(pmlmepriv->p2p_assoc_req_ie, p2p_ie, p2p_ielen);
+ pmlmepriv->p2p_assoc_req_ie_len = p2p_ielen;
+ }
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+#ifdef CONFIG_8723AU_P2P
+ { /* check wfd_ie for assoc req; */
+ uint wfd_ielen = 0;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (rtw_get_wfd_ie(buf, ielen, NULL, &wfd_ielen)) {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s wfd_assoc_req_ielen =%d\n", __func__,
+ wfd_ielen);
+#endif
+
+ if (pmlmepriv->wfd_assoc_req_ie) {
+ pmlmepriv->wfd_assoc_req_ie_len = 0;
+ kfree(pmlmepriv->wfd_assoc_req_ie);
+ pmlmepriv->wfd_assoc_req_ie = NULL;
+ }
+
+ pmlmepriv->wfd_assoc_req_ie =
+ kmalloc(wfd_ielen, GFP_KERNEL);
+ if (pmlmepriv->wfd_assoc_req_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ goto exit;
+ }
+ rtw_get_wfd_ie(buf, ielen, pmlmepriv->wfd_assoc_req_ie,
+ &pmlmepriv->wfd_assoc_req_ie_len);
+ }
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ /* TKIP and AES disallow multicast packets until installing group key */
+ if (padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_ ||
+ padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_WTMIC_ ||
+ padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
+ /* WPS open need to enable multicast */
+ /* check_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS) == true)*/
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_OFF_RCR_AM, null_addr);
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_set_wpa_ie: pairwise_cipher = 0x%08x padapter->"
+ "securitypriv.ndisencryptstatus =%d padapter->"
+ "securitypriv.ndisauthtype =%d\n", pairwise_cipher,
+ padapter->securitypriv.ndisencryptstatus,
+ padapter->securitypriv.ndisauthtype));
+
+exit:
+ kfree(buf);
+ if (ret)
+ _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ return ret;
+}
+
+static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ int ret = 0;
+ struct list_head *phead, *plist, *ptmp;
+ struct wlan_network *pnetwork = NULL;
+ enum ndis_802_11_auth_mode authmode;
+ struct cfg80211_ssid ndis_ssid;
+ u8 *dst_ssid;
+ u8 *src_ssid;
+ u8 *dst_bssid;
+ const u8 *src_bssid;
+ /* u8 matched_by_bssid = false; */
+ /* u8 matched_by_ssid = false; */
+ u8 matched = false;
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct rtw_queue *queue = &pmlmepriv->scanned_queue;
+
+ DBG_8723A("=>" FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+ DBG_8723A("privacy =%d, key =%p, key_len =%d, key_idx =%d\n",
+ sme->privacy, sme->key, sme->key_len, sme->key_idx);
+
+ if (wdev_to_priv(padapter->rtw_wdev)->block) {
+ ret = -EBUSY;
+ DBG_8723A("%s wdev_priv.block is set\n", __func__);
+ goto exit;
+ }
+
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (!sme->ssid || !sme->ssid_len) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (sme->ssid_len > IW_ESSID_MAX_SIZE) {
+ ret = -E2BIG;
+ goto exit;
+ }
+
+ memset(&ndis_ssid, 0, sizeof(struct cfg80211_ssid));
+ ndis_ssid.ssid_len = sme->ssid_len;
+ memcpy(ndis_ssid.ssid, sme->ssid, sme->ssid_len);
+
+ DBG_8723A("ssid =%s, len =%zu\n", ndis_ssid.ssid, sme->ssid_len);
+
+ if (sme->bssid)
+ DBG_8723A("bssid =" MAC_FMT "\n", MAC_ARG(sme->bssid));
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
+ ret = -EBUSY;
+ DBG_8723A("%s, fw_state = 0x%x, goto exit\n", __func__,
+ pmlmepriv->fw_state);
+ goto exit;
+ }
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+ rtw_scan_abort23a(padapter);
+ }
+
+ spin_lock_bh(&queue->lock);
+
+ phead = get_list_head(queue);
+
+ list_for_each_safe(plist, ptmp, phead) {
+ pnetwork = container_of(plist, struct wlan_network, list);
+
+ dst_ssid = pnetwork->network.Ssid.ssid;
+ dst_bssid = pnetwork->network.MacAddress;
+
+ if (sme->bssid) {
+ if (memcmp(pnetwork->network.MacAddress,
+ sme->bssid, ETH_ALEN))
+ continue;
+ }
+
+ if (sme->ssid && sme->ssid_len) {
+ if (pnetwork->network.Ssid.ssid_len != sme->ssid_len ||
+ memcmp(pnetwork->network.Ssid.ssid, sme->ssid,
+ sme->ssid_len))
+ continue;
+ }
+
+ if (sme->bssid) {
+ src_bssid = sme->bssid;
+
+ if ((!memcmp(dst_bssid, src_bssid, ETH_ALEN))) {
+ DBG_8723A("matched by bssid\n");
+
+ ndis_ssid.ssid_len =
+ pnetwork->network.Ssid.ssid_len;
+ memcpy(ndis_ssid.ssid,
+ pnetwork->network.Ssid.ssid,
+ pnetwork->network.Ssid.ssid_len);
+
+ matched = true;
+ break;
+ }
+
+ } else if (sme->ssid && sme->ssid_len) {
+ src_ssid = ndis_ssid.ssid;
+
+ if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.ssid_len)) &&
+ (pnetwork->network.Ssid.ssid_len ==
+ ndis_ssid.ssid_len)) {
+ DBG_8723A("matched by ssid\n");
+ matched = true;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&queue->lock);
+
+ if (!matched || (pnetwork == NULL)) {
+ ret = -ENOENT;
+ DBG_8723A("connect, matched == false, goto exit\n");
+ goto exit;
+ }
+
+ if (rtw_set_802_11_infrastructure_mode23a
+ (padapter, pnetwork->network.InfrastructureMode) == false) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ psecuritypriv->ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+ psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
+
+ ret =
+ rtw_cfg80211_set_wpa_version(psecuritypriv,
+ sme->crypto.wpa_versions);
+ if (ret < 0)
+ goto exit;
+
+ ret = rtw_cfg80211_set_auth_type(psecuritypriv, sme->auth_type);
+
+ if (ret < 0)
+ goto exit;
+
+ DBG_8723A("%s, ie_len =%zu\n", __func__, sme->ie_len);
+
+ ret = rtw_cfg80211_set_wpa_ie(padapter, sme->ie, sme->ie_len);
+ if (ret < 0)
+ goto exit;
+
+ if (sme->crypto.n_ciphers_pairwise) {
+ ret = rtw_cfg80211_set_cipher(psecuritypriv,
+ sme->crypto.ciphers_pairwise[0],
+ true);
+ if (ret < 0)
+ goto exit;
+ }
+
+ /* For WEP Shared auth */
+ if ((psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_Shared ||
+ psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_Auto) &&
+ sme->key) {
+ u32 wep_key_idx, wep_key_len, wep_total_len;
+ struct ndis_802_11_wep *pwep = NULL;
+ DBG_8723A("%s(): Shared/Auto WEP\n", __func__);
+
+ wep_key_idx = sme->key_idx;
+ wep_key_len = sme->key_len;
+
+ if (sme->key_idx > WEP_KEYS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (wep_key_len > 0) {
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ wep_total_len =
+ wep_key_len +
+ offsetof(struct ndis_802_11_wep, KeyMaterial);
+ pwep = (struct ndis_802_11_wep *)kmalloc(wep_total_len,
+ GFP_KERNEL);
+ if (pwep == NULL) {
+ DBG_8723A(" wpa_set_encryption: pwep "
+ "allocate fail !!!\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ memset(pwep, 0, wep_total_len);
+
+ pwep->KeyLength = wep_key_len;
+ pwep->Length = wep_total_len;
+
+ if (wep_key_len == 13) {
+ padapter->securitypriv.dot11PrivacyAlgrthm =
+ _WEP104_;
+ padapter->securitypriv.dot118021XGrpPrivacy =
+ _WEP104_;
+ }
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ pwep->KeyIndex = wep_key_idx;
+ pwep->KeyIndex |= 0x80000000;
+
+ memcpy(pwep->KeyMaterial, (void *)sme->key, pwep->KeyLength);
+
+ if (rtw_set_802_11_add_wep23a(padapter, pwep) == (u8) _FAIL) {
+ ret = -EOPNOTSUPP;
+ }
+
+ kfree(pwep);
+
+ if (ret < 0)
+ goto exit;
+ }
+
+ ret = rtw_cfg80211_set_cipher(psecuritypriv,
+ sme->crypto.cipher_group, false);
+ if (ret < 0)
+ return ret;
+
+ if (sme->crypto.n_akm_suites) {
+ ret = rtw_cfg80211_set_key_mgt(psecuritypriv,
+ sme->crypto.akm_suites[0]);
+ if (ret < 0)
+ goto exit;
+ }
+
+ authmode = psecuritypriv->ndisauthtype;
+ rtw_set_802_11_authentication_mode23a(padapter, authmode);
+
+ /* rtw_set_802_11_encryption_mode(padapter,
+ padapter->securitypriv.ndisencryptstatus); */
+
+ if (rtw_set_802_11_ssid23a(padapter, &ndis_ssid) == false) {
+ ret = -1;
+ goto exit;
+ }
+
+ DBG_8723A("set ssid:dot11AuthAlgrthm =%d, dot11PrivacyAlgrthm =%d, "
+ "dot118021XGrpPrivacy =%d\n", psecuritypriv->dot11AuthAlgrthm,
+ psecuritypriv->dot11PrivacyAlgrthm,
+ psecuritypriv->dot118021XGrpPrivacy);
+
+exit:
+
+ DBG_8723A("<=%s, ret %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int cfg80211_rtw_disconnect(struct wiphy *wiphy, struct net_device *ndev,
+ u16 reason_code)
+{
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+
+ rtw_set_roaming(padapter, 0);
+
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
+ rtw_scan_abort23a(padapter);
+ LeaveAllPowerSaveMode23a(padapter);
+ rtw_disassoc_cmd23a(padapter, 500, false);
+
+ DBG_8723A("%s...call rtw_indicate_disconnect23a\n", __func__);
+
+ padapter->mlmepriv.not_indic_disco = true;
+ rtw_indicate_disconnect23a(padapter);
+ padapter->mlmepriv.not_indic_disco = false;
+
+ rtw_free_assoc_resources23a(padapter, 1);
+ }
+
+ return 0;
+}
+
+static int cfg80211_rtw_set_txpower(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, int mbm)
+{
+ DBG_8723A("%s\n", __func__);
+ return 0;
+}
+
+static int cfg80211_rtw_get_txpower(struct wiphy *wiphy,
+ struct wireless_dev *wdev, int *dbm)
+{
+ DBG_8723A("%s\n", __func__);
+ *dbm = (12);
+ return 0;
+}
+
+inline bool rtw_cfg80211_pwr_mgmt(struct rtw_adapter *adapter)
+{
+ struct rtw_wdev_priv *rtw_wdev_priv = wdev_to_priv(adapter->rtw_wdev);
+ return rtw_wdev_priv->power_mgmt;
+}
+
+static int cfg80211_rtw_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *ndev,
+ bool enabled, int timeout)
+{
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+ struct rtw_wdev_priv *rtw_wdev_priv = wdev_to_priv(padapter->rtw_wdev);
+
+ DBG_8723A(FUNC_NDEV_FMT " enabled:%u, timeout:%d\n",
+ FUNC_NDEV_ARG(ndev), enabled, timeout);
+
+ rtw_wdev_priv->power_mgmt = enabled;
+
+ if (!enabled)
+ LPS_Leave23a(padapter);
+
+ return 0;
+}
+
+static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy,
+ struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ u8 index, blInserted = false;
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u8 strZeroMacAddress[ETH_ALEN] = { 0x00 };
+
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(netdev));
+
+ if (!memcmp(pmksa->bssid, strZeroMacAddress, ETH_ALEN)) {
+ return -EINVAL;
+ }
+
+ blInserted = false;
+
+ /* overwrite PMKID */
+ for (index = 0; index < NUM_PMKID_CACHE; index++) {
+ if (!memcmp(psecuritypriv->PMKIDList[index].Bssid,
+ pmksa->bssid, ETH_ALEN)) {
+ /* BSSID is matched, the same AP => rewrite with
+ new PMKID. */
+ DBG_8723A(FUNC_NDEV_FMT
+ " BSSID exists in the PMKList.\n",
+ FUNC_NDEV_ARG(netdev));
+
+ memcpy(psecuritypriv->PMKIDList[index].PMKID,
+ pmksa->pmkid, WLAN_PMKID_LEN);
+ psecuritypriv->PMKIDList[index].bUsed = true;
+ psecuritypriv->PMKIDIndex = index + 1;
+ blInserted = true;
+ break;
+ }
+ }
+
+ if (!blInserted) {
+ /* Find a new entry */
+ DBG_8723A(FUNC_NDEV_FMT
+ " Use the new entry index = %d for this PMKID.\n",
+ FUNC_NDEV_ARG(netdev), psecuritypriv->PMKIDIndex);
+
+ memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].
+ Bssid, pmksa->bssid, ETH_ALEN);
+ memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].
+ PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
+
+ psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bUsed =
+ true;
+ psecuritypriv->PMKIDIndex++;
+ if (psecuritypriv->PMKIDIndex == 16) {
+ psecuritypriv->PMKIDIndex = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int cfg80211_rtw_del_pmksa(struct wiphy *wiphy,
+ struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ u8 index, bMatched = false;
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(netdev));
+
+ for (index = 0; index < NUM_PMKID_CACHE; index++) {
+ if (!memcmp(psecuritypriv->PMKIDList[index].Bssid,
+ pmksa->bssid, ETH_ALEN)) {
+ /* BSSID is matched, the same AP => Remove this PMKID information and reset it. */
+ memset(psecuritypriv->PMKIDList[index].Bssid, 0x00,
+ ETH_ALEN);
+ memset(psecuritypriv->PMKIDList[index].PMKID, 0x00,
+ WLAN_PMKID_LEN);
+ psecuritypriv->PMKIDList[index].bUsed = false;
+ bMatched = true;
+ break;
+ }
+ }
+
+ if (false == bMatched) {
+ DBG_8723A(FUNC_NDEV_FMT " do not have matched BSSID\n",
+ FUNC_NDEV_ARG(netdev));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cfg80211_rtw_flush_pmksa(struct wiphy *wiphy,
+ struct net_device *netdev)
+{
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(netdev));
+
+ memset(&psecuritypriv->PMKIDList[0], 0x00,
+ sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ psecuritypriv->PMKIDIndex = 0;
+
+ return 0;
+}
+
+#ifdef CONFIG_8723AU_AP_MODE
+void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter,
+ u8 *pmgmt_frame, uint frame_len)
+{
+ s32 freq;
+ int channel;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct net_device *ndev = padapter->pnetdev;
+
+ DBG_8723A("%s(padapter =%p,%s)\n", __func__, padapter, ndev->name);
+
+#if defined(RTW_USE_CFG80211_STA_EVENT)
+ {
+ struct station_info sinfo;
+ u8 ie_offset;
+ if (ieee80211_is_assoc_req(hdr->frame_control))
+ ie_offset = _ASOCREQ_IE_OFFSET_;
+ else /* WIFI_REASSOCREQ */
+ ie_offset = _REASOCREQ_IE_OFFSET_;
+
+ sinfo.filled = 0;
+ sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+ sinfo.assoc_req_ies = pmgmt_frame + WLAN_HDR_A3_LEN + ie_offset;
+ sinfo.assoc_req_ies_len =
+ frame_len - WLAN_HDR_A3_LEN - ie_offset;
+ cfg80211_new_sta(ndev, hdr->addr2, &sinfo, GFP_ATOMIC);
+ }
+#else /* defined(RTW_USE_CFG80211_STA_EVENT) */
+ channel = pmlmeext->cur_channel;
+ if (channel <= RTW_CH_MAX_2G_CHANNEL)
+ freq = ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_2GHZ);
+ else
+ freq = ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_5GHZ);
+
+ rtw_cfg80211_rx_mgmt(padapter, freq, 0, pmgmt_frame, frame_len,
+ GFP_ATOMIC);
+#endif /* defined(RTW_USE_CFG80211_STA_EVENT) */
+}
+
+void rtw_cfg80211_indicate_sta_disassoc(struct rtw_adapter *padapter,
+ unsigned char *da,
+ unsigned short reason)
+{
+ s32 freq;
+ int channel;
+ u8 *pmgmt_frame;
+ uint frame_len;
+ struct ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ u8 mgmt_buf[128] = { 0 };
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct net_device *ndev = padapter->pnetdev;
+
+ DBG_8723A("%s(padapter =%p,%s)\n", __func__, padapter, ndev->name);
+
+#if defined(RTW_USE_CFG80211_STA_EVENT)
+ cfg80211_del_sta(ndev, da, GFP_ATOMIC);
+#else /* defined(RTW_USE_CFG80211_STA_EVENT) */
+ channel = pmlmeext->cur_channel;
+ if (channel <= RTW_CH_MAX_2G_CHANNEL)
+ freq = ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_2GHZ);
+ else
+ freq = ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_5GHZ);
+
+ pmgmt_frame = mgmt_buf;
+ pwlanhdr = (struct ieee80211_hdr *)pmgmt_frame;
+
+ fctrl = &pwlanhdr->frame_control;
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, myid(&padapter->eeprompriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid23a(&pmlmeinfo->network), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pmgmt_frame, WIFI_DEAUTH);
+
+ pmgmt_frame += sizeof(struct ieee80211_hdr_3addr);
+ frame_len = sizeof(struct ieee80211_hdr_3addr);
+
+ reason = cpu_to_le16(reason);
+ pmgmt_frame = rtw_set_fixed_ie23a(pmgmt_frame,
+ WLAN_REASON_PREV_AUTH_NOT_VALID,
+ (unsigned char *)&reason, &frame_len);
+
+ rtw_cfg80211_rx_mgmt(padapter, freq, 0, mgmt_buf, frame_len,
+ GFP_ATOMIC);
+#endif /* defined(RTW_USE_CFG80211_STA_EVENT) */
+}
+
+static int rtw_cfg80211_monitor_if_open(struct net_device *ndev)
+{
+ int ret = 0;
+
+ DBG_8723A("%s\n", __func__);
+
+ return ret;
+}
+
+static int rtw_cfg80211_monitor_if_close(struct net_device *ndev)
+{
+ int ret = 0;
+
+ DBG_8723A("%s\n", __func__);
+
+ return ret;
+}
+
+static int rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int ret = 0;
+ int rtap_len;
+ int qos_len = 0;
+ int dot11_hdr_len = 24;
+ int snap_len = 6;
+ unsigned char *pdata;
+ unsigned char src_mac_addr[6];
+ unsigned char dst_mac_addr[6];
+ struct ieee80211_hdr *dot11_hdr;
+ struct ieee80211_radiotap_header *rtap_hdr;
+ struct rtw_adapter *padapter = netdev_priv(ndev);
+
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+
+ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+ goto fail;
+
+ rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
+ if (unlikely(rtap_hdr->it_version))
+ goto fail;
+
+ rtap_len = ieee80211_get_radiotap_len(skb->data);
+ if (unlikely(skb->len < rtap_len))
+ goto fail;
+
+ if (rtap_len != 14) {
+ DBG_8723A("radiotap len (should be 14): %d\n", rtap_len);
+ goto fail;
+ }
+
+ /* Skip the ratio tap header */
+ skb_pull(skb, rtap_len);
+
+ dot11_hdr = (struct ieee80211_hdr *)skb->data;
+ /* Check if the QoS bit is set */
+ if (ieee80211_is_data(dot11_hdr->frame_control)) {
+ /* Check if this ia a Wireless Distribution System (WDS) frame
+ * which has 4 MAC addresses
+ */
+ if (ieee80211_is_data_qos(dot11_hdr->frame_control))
+ qos_len = IEEE80211_QOS_CTL_LEN;
+ if (ieee80211_has_a4(dot11_hdr->frame_control))
+ dot11_hdr_len += 6;
+
+ memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr));
+ memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
+
+ /*
+ * Skip the 802.11 header, QoS (if any) and SNAP,
+ * but leave spaces for two MAC addresses
+ */
+ skb_pull(skb, dot11_hdr_len + qos_len + snap_len -
+ ETH_ALEN * 2);
+ pdata = (unsigned char *)skb->data;
+ memcpy(pdata, dst_mac_addr, ETH_ALEN);
+ memcpy(pdata + ETH_ALEN, src_mac_addr, ETH_ALEN);
+
+ DBG_8723A("should be eapol packet\n");
+
+ /* Use the real net device to transmit the packet */
+ ret = rtw_xmit23a_entry23a(skb, padapter->pnetdev);
+
+ return ret;
+
+ } else if (ieee80211_is_action(dot11_hdr->frame_control)) {
+ /* only for action frames */
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ /* u8 category, action, OUI_Subtype, dialogToken = 0; */
+ /* unsigned char *frame_body; */
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ u32 len = skb->len;
+ u8 category, action;
+#ifdef CONFIG_8723AU_P2P
+ int type = -1;
+#endif
+
+ if (rtw_action_frame_parse23a(skb->data, len, &category,
+ &action) == false) {
+ DBG_8723A(FUNC_NDEV_FMT " frame_control:0x%x\n",
+ FUNC_NDEV_ARG(ndev),
+ le16_to_cpu(dot11_hdr->frame_control));
+ goto fail;
+ }
+
+ DBG_8723A("RTW_Tx:da =" MAC_FMT " via " FUNC_NDEV_FMT "\n",
+ MAC_ARG(dot11_hdr->addr1), FUNC_NDEV_ARG(ndev));
+#ifdef CONFIG_8723AU_P2P
+ type = rtw_p2p_check_frames(padapter, skb->data, len, true);
+ if (type >= 0)
+ goto dump;
+#endif
+ if (category == WLAN_CATEGORY_PUBLIC)
+ DBG_8723A("RTW_Tx:%s\n", action_public_str23a(action));
+ else
+ DBG_8723A("RTW_Tx:category(%u), action(%u)\n", category,
+ action);
+#ifdef CONFIG_8723AU_P2P
+dump:
+#endif
+ /* starting alloc mgmt frame to dump it */
+ pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto fail;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+ pattrib->retry_ctrl = false;
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+
+ memcpy(pframe, skb->data, len);
+#ifdef CONFIG_8723AU_P2P
+ if (type >= 0) {
+ struct wifi_display_info *pwfd_info;
+
+ pwfd_info = padapter->wdinfo.wfd_info;
+
+ if (pwfd_info->wfd_enable)
+ rtw_append_wfd_ie(padapter, pframe, &len);
+ }
+#endif /* CONFIG_8723AU_P2P */
+ pattrib->pktlen = len;
+
+ /* update seq number */
+ pmlmeext->mgnt_seq = le16_to_cpu(dot11_hdr->seq_ctrl) >> 4;
+ pattrib->seqnum = pmlmeext->mgnt_seq;
+ pmlmeext->mgnt_seq++;
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe23a(padapter, pmgntframe);
+ }
+
+fail:
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int
+rtw_cfg80211_monitor_if_set_mac_address(struct net_device *ndev, void *addr)
+{
+ int ret = 0;
+
+ DBG_8723A("%s\n", __func__);
+
+ return ret;
+}
+
+static const struct net_device_ops rtw_cfg80211_monitor_if_ops = {
+ .ndo_open = rtw_cfg80211_monitor_if_open,
+ .ndo_stop = rtw_cfg80211_monitor_if_close,
+ .ndo_start_xmit = rtw_cfg80211_monitor_if_xmit_entry,
+ .ndo_set_mac_address = rtw_cfg80211_monitor_if_set_mac_address,
+};
+
+static int rtw_cfg80211_add_monitor_if(struct rtw_adapter *padapter, char *name,
+ struct net_device **ndev)
+{
+ int ret = 0;
+ struct net_device *mon_ndev = NULL;
+ struct wireless_dev *mon_wdev = NULL;
+ struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
+
+ if (!name) {
+ DBG_8723A(FUNC_ADPT_FMT " without specific name\n",
+ FUNC_ADPT_ARG(padapter));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (pwdev_priv->pmon_ndev) {
+ DBG_8723A(FUNC_ADPT_FMT " monitor interface exist: " NDEV_FMT
+ "\n", FUNC_ADPT_ARG(padapter),
+ NDEV_ARG(pwdev_priv->pmon_ndev));
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mon_ndev = alloc_etherdev(sizeof(struct rtw_adapter));
+ if (!mon_ndev) {
+ DBG_8723A(FUNC_ADPT_FMT " allocate ndev fail\n",
+ FUNC_ADPT_ARG(padapter));
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ strncpy(mon_ndev->name, name, IFNAMSIZ);
+ mon_ndev->name[IFNAMSIZ - 1] = 0;
+ mon_ndev->destructor = rtw_ndev_destructor;
+
+ mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops;
+
+ /* wdev */
+ mon_wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!mon_wdev) {
+ DBG_8723A(FUNC_ADPT_FMT " allocate mon_wdev fail\n",
+ FUNC_ADPT_ARG(padapter));
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mon_wdev->wiphy = padapter->rtw_wdev->wiphy;
+ mon_wdev->netdev = mon_ndev;
+ mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
+ mon_ndev->ieee80211_ptr = mon_wdev;
+
+ ret = register_netdevice(mon_ndev);
+ if (ret) {
+ goto out;
+ }
+
+ *ndev = pwdev_priv->pmon_ndev = mon_ndev;
+ memcpy(pwdev_priv->ifname_mon, name, IFNAMSIZ + 1);
+
+out:
+ if (ret) {
+ kfree(mon_wdev);
+ mon_wdev = NULL;
+ }
+
+ if (ret && mon_ndev) {
+ free_netdev(mon_ndev);
+ *ndev = mon_ndev = NULL;
+ }
+
+ return ret;
+}
+
+static struct wireless_dev *
+cfg80211_rtw_add_virtual_intf(struct wiphy *wiphy, const char *name,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ int ret = 0;
+ struct net_device *ndev = NULL;
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+
+ DBG_8723A(FUNC_ADPT_FMT " wiphy:%s, name:%s, type:%d\n",
+ FUNC_ADPT_ARG(padapter), wiphy_name(wiphy), name, type);
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
+ ret = -ENODEV;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ ret =
+ rtw_cfg80211_add_monitor_if(padapter, (char *)name, &ndev);
+ break;
+
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_STATION:
+ ret = -ENODEV;
+ break;
+
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ ret = -ENODEV;
+ break;
+ default:
+ ret = -ENODEV;
+ DBG_8723A("Unsupported interface type\n");
+ break;
+ }
+
+ DBG_8723A(FUNC_ADPT_FMT " ndev:%p, ret:%d\n", FUNC_ADPT_ARG(padapter),
+ ndev, ret);
+
+ return ndev ? ndev->ieee80211_ptr : ERR_PTR(ret);
+}
+
+static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct rtw_wdev_priv *pwdev_priv =
+ (struct rtw_wdev_priv *)wiphy_priv(wiphy);
+ struct net_device *ndev;
+ ndev = wdev ? wdev->netdev : NULL;
+
+ if (!ndev)
+ goto exit;
+
+ unregister_netdevice(ndev);
+
+ if (ndev == pwdev_priv->pmon_ndev) {
+ pwdev_priv->pmon_ndev = NULL;
+ pwdev_priv->ifname_mon[0] = '\0';
+ DBG_8723A(FUNC_NDEV_FMT " remove monitor interface\n",
+ FUNC_NDEV_ARG(ndev));
+ }
+
+exit:
+ return 0;
+}
+
+static int rtw_add_beacon(struct rtw_adapter *adapter, const u8 *head,
+ size_t head_len, const u8 *tail, size_t tail_len)
+{
+ int ret = 0;
+ u8 *pbuf = NULL;
+ uint len, wps_ielen = 0;
+#ifdef CONFIG_8723AU_P2P
+ uint p2p_ielen = 0;
+ u8 got_p2p_ie = false;
+#endif
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ /* struct sta_priv *pstapriv = &padapter->stapriv; */
+
+ DBG_8723A("%s beacon_head_len =%zu, beacon_tail_len =%zu\n",
+ __func__, head_len, tail_len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ if (head_len < 24)
+ return -EINVAL;
+
+ pbuf = kzalloc(head_len + tail_len, GFP_KERNEL);
+ if (!pbuf)
+ return -ENOMEM;
+ /* 24 = beacon header len. */
+ memcpy(pbuf, (void *)head + 24, head_len - 24);
+ memcpy(pbuf + head_len - 24, (void *)tail, tail_len);
+
+ len = head_len + tail_len - 24;
+
+ /* check wps ie if inclued */
+ if (rtw_get_wps_ie23a
+ (pbuf + _FIXED_IE_LENGTH_, len - _FIXED_IE_LENGTH_, NULL,
+ &wps_ielen))
+ DBG_8723A("add bcn, wps_ielen =%d\n", wps_ielen);
+
+#ifdef CONFIG_8723AU_P2P
+ /* check p2p ie if inclued */
+ if (rtw_get_p2p_ie23a
+ (pbuf + _FIXED_IE_LENGTH_, len - _FIXED_IE_LENGTH_, NULL,
+ &p2p_ielen)) {
+ DBG_8723A("got p2p_ie, len =%d\n", p2p_ielen);
+ got_p2p_ie = true;
+ }
+#endif
+
+ /* pbss_network->IEs will not include p2p_ie, wfd ie */
+ rtw_ies_remove_ie23a(pbuf, &len, _BEACON_IE_OFFSET_, _VENDOR_SPECIFIC_IE_,
+ P2P_OUI23A, 4);
+ rtw_ies_remove_ie23a(pbuf, &len, _BEACON_IE_OFFSET_, _VENDOR_SPECIFIC_IE_,
+ WFD_OUI23A, 4);
+
+ if (rtw_check_beacon_data23a(adapter, pbuf, len) == _SUCCESS) {
+#ifdef CONFIG_8723AU_P2P
+ /* check p2p if enable */
+ if (got_p2p_ie == true) {
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ DBG_8723A("Enable P2P function for the first "
+ "time\n");
+ rtw_p2p_enable23a(adapter, P2P_ROLE_GO);
+ wdev_to_priv(adapter->rtw_wdev)->p2p_enabled =
+ true;
+ } else {
+ del_timer_sync(&pwdinfo->find_phase_timer);
+ del_timer_sync(&pwdinfo->
+ restore_p2p_state_timer);
+ del_timer_sync(&pwdinfo->pre_tx_scan_timer);
+
+ DBG_8723A("enter GO Mode, p2p_ielen =%d\n",
+ p2p_ielen);
+
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ pwdinfo->intent = 15;
+ }
+
+ pwdinfo->operating_channel = pmlmeext->cur_channel;
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ ret = 0;
+
+ } else {
+ ret = -EINVAL;
+ }
+
+ kfree(pbuf);
+
+ return ret;
+}
+
+static int cfg80211_rtw_start_ap(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_ap_settings *settings)
+{
+ int ret = 0;
+ struct rtw_adapter *adapter = wiphy_to_adapter(wiphy);
+
+ DBG_8723A(FUNC_NDEV_FMT " hidden_ssid:%d, auth_type:%d\n",
+ FUNC_NDEV_ARG(ndev), settings->hidden_ssid,
+ settings->auth_type);
+
+ ret = rtw_add_beacon(adapter, settings->beacon.head,
+ settings->beacon.head_len, settings->beacon.tail,
+ settings->beacon.tail_len);
+
+ adapter->mlmeextpriv.mlmext_info.hidden_ssid_mode =
+ settings->hidden_ssid;
+
+ if (settings->ssid && settings->ssid_len) {
+ struct wlan_bssid_ex *pbss_network =
+ &adapter->mlmepriv.cur_network.network;
+ struct wlan_bssid_ex *pbss_network_ext =
+ &adapter->mlmeextpriv.mlmext_info.network;
+
+ if (0)
+ DBG_8723A(FUNC_ADPT_FMT
+ " ssid:(%s,%d), from ie:(%s,%d)\n",
+ FUNC_ADPT_ARG(adapter), settings->ssid,
+ (int)settings->ssid_len,
+ pbss_network->Ssid.ssid,
+ pbss_network->Ssid.ssid_len);
+
+ memcpy(pbss_network->Ssid.ssid, (void *)settings->ssid,
+ settings->ssid_len);
+ pbss_network->Ssid.ssid_len = settings->ssid_len;
+ memcpy(pbss_network_ext->Ssid.ssid, (void *)settings->ssid,
+ settings->ssid_len);
+ pbss_network_ext->Ssid.ssid_len = settings->ssid_len;
+
+ if (0)
+ DBG_8723A(FUNC_ADPT_FMT
+ " after ssid:(%s,%d), (%s,%d)\n",
+ FUNC_ADPT_ARG(adapter),
+ pbss_network->Ssid.ssid,
+ pbss_network->Ssid.ssid_len,
+ pbss_network_ext->Ssid.ssid,
+ pbss_network_ext->Ssid.ssid_len);
+ }
+
+ return ret;
+}
+
+static int cfg80211_rtw_change_beacon(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_beacon_data *info)
+{
+ int ret = 0;
+ struct rtw_adapter *adapter = wiphy_to_adapter(wiphy);
+
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+
+ ret = rtw_add_beacon(adapter, info->head, info->head_len, info->tail,
+ info->tail_len);
+
+ return ret;
+}
+
+static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
+{
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+ return 0;
+}
+
+static int cfg80211_rtw_add_station(struct wiphy *wiphy,
+ struct net_device *ndev, u8 *mac,
+ struct station_parameters *params)
+{
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+
+ return 0;
+}
+
+static int cfg80211_rtw_del_station(struct wiphy *wiphy,
+ struct net_device *ndev, u8 *mac)
+{
+ int ret = 0;
+ struct list_head *phead, *plist, *ptmp;
+ u8 updated = 0;
+ struct sta_info *psta;
+ struct rtw_adapter *padapter = netdev_priv(ndev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_8723A("+" FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+
+ if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true) {
+ DBG_8723A("%s, fw_state != FW_LINKED|WIFI_AP_STATE\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!mac) {
+ DBG_8723A("flush all sta, and cam_entry\n");
+
+ flush_all_cam_entry23a(padapter); /* clear CAM */
+
+ ret = rtw_sta_flush23a(padapter);
+
+ return ret;
+ }
+
+ DBG_8723A("free sta macaddr =" MAC_FMT "\n", MAC_ARG(mac));
+
+ if (is_broadcast_ether_addr(mac))
+ return -EINVAL;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+
+ phead = &pstapriv->asoc_list;
+
+ /* check asoc_queue */
+ list_for_each_safe(plist, ptmp, phead) {
+ psta = container_of(plist, struct sta_info, asoc_list);
+
+ if (!memcmp(mac, psta->hwaddr, ETH_ALEN)) {
+ if (psta->dot8021xalg == 1 &&
+ psta->bpairwise_key_installed == false) {
+ DBG_8723A("%s, sta's dot8021xalg = 1 and "
+ "key_installed = false\n", __func__);
+ } else {
+ DBG_8723A("free psta =%p, aid =%d\n", psta,
+ psta->aid);
+
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+
+ /* spin_unlock_bh(&pstapriv->asoc_list_lock); */
+ updated =
+ ap_free_sta23a(padapter, psta, true,
+ WLAN_REASON_DEAUTH_LEAVING);
+ /* spin_lock_bh(&pstapriv->asoc_list_lock); */
+
+ psta = NULL;
+
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ associated_clients_update23a(padapter, updated);
+
+ DBG_8723A("-" FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+
+ return ret;
+}
+
+static int cfg80211_rtw_change_station(struct wiphy *wiphy,
+ struct net_device *ndev, u8 *mac,
+ struct station_parameters *params)
+{
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+ return 0;
+}
+
+static int cfg80211_rtw_dump_station(struct wiphy *wiphy,
+ struct net_device *ndev, int idx, u8 *mac,
+ struct station_info *sinfo)
+{
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+
+ /* TODO: dump scanned queue */
+
+ return -ENOENT;
+}
+
+static int cfg80211_rtw_change_bss(struct wiphy *wiphy, struct net_device *ndev,
+ struct bss_parameters *params)
+{
+ DBG_8723A(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
+ return 0;
+}
+#endif /* CONFIG_8723AU_AP_MODE */
+
+void rtw_cfg80211_rx_action_p2p(struct rtw_adapter *padapter, u8 *pmgmt_frame,
+ uint frame_len)
+{
+#ifdef CONFIG_8723AU_P2P
+ int type;
+#endif
+ s32 freq;
+ int channel;
+ u8 category, action;
+
+ channel = rtw_get_oper_ch23a(padapter);
+
+ DBG_8723A("RTW_Rx:cur_ch =%d\n", channel);
+#ifdef CONFIG_8723AU_P2P
+ type = rtw_p2p_check_frames(padapter, pmgmt_frame, frame_len, false);
+ if (type >= 0)
+ goto indicate;
+#endif
+ rtw_action_frame_parse23a(pmgmt_frame, frame_len, &category, &action);
+ DBG_8723A("RTW_Rx:category(%u), action(%u)\n", category, action);
+
+#ifdef CONFIG_8723AU_P2P
+indicate:
+#endif
+ if (channel <= RTW_CH_MAX_2G_CHANNEL)
+ freq = ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_2GHZ);
+ else
+ freq = ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_5GHZ);
+
+ rtw_cfg80211_rx_mgmt(padapter, freq, 0, pmgmt_frame, frame_len,
+ GFP_ATOMIC);
+}
+
+void rtw_cfg80211_rx_p2p_action_public(struct rtw_adapter *padapter,
+ u8 *pmgmt_frame, uint frame_len)
+{
+#ifdef CONFIG_8723AU_P2P
+ int type;
+#endif
+ s32 freq;
+ int channel;
+ u8 category, action;
+
+ channel = rtw_get_oper_ch23a(padapter);
+
+ DBG_8723A("RTW_Rx:cur_ch =%d\n", channel);
+#ifdef CONFIG_8723AU_P2P
+ type = rtw_p2p_check_frames(padapter, pmgmt_frame, frame_len, false);
+ if (type >= 0) {
+ switch (type) {
+ case P2P_GO_NEGO_CONF:
+ case P2P_PROVISION_DISC_RESP:
+ rtw_clear_scan_deny(padapter);
+ }
+ goto indicate;
+ }
+#endif
+ rtw_action_frame_parse23a(pmgmt_frame, frame_len, &category, &action);
+ DBG_8723A("RTW_Rx:category(%u), action(%u)\n", category, action);
+
+#ifdef CONFIG_8723AU_P2P
+indicate:
+#endif
+ if (channel <= RTW_CH_MAX_2G_CHANNEL)
+ freq = ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_2GHZ);
+ else
+ freq = ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_5GHZ);
+
+ rtw_cfg80211_rx_mgmt(padapter, freq, 0, pmgmt_frame, frame_len,
+ GFP_ATOMIC);
+}
+
+void rtw_cfg80211_rx_action(struct rtw_adapter *adapter, u8 *frame,
+ uint frame_len, const char *msg)
+{
+ s32 freq;
+ int channel;
+ u8 category, action;
+
+ channel = rtw_get_oper_ch23a(adapter);
+
+ rtw_action_frame_parse23a(frame, frame_len, &category, &action);
+
+ DBG_8723A("RTW_Rx:cur_ch =%d\n", channel);
+ if (msg)
+ DBG_8723A("RTW_Rx:%s\n", msg);
+ else
+ DBG_8723A("RTW_Rx:category(%u), action(%u)\n", category,
+ action);
+
+ if (channel <= RTW_CH_MAX_2G_CHANNEL)
+ freq = ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_2GHZ);
+ else
+ freq = ieee80211_channel_to_frequency(channel,
+ IEEE80211_BAND_5GHZ);
+
+ rtw_cfg80211_rx_mgmt(adapter, freq, 0, frame, frame_len, GFP_ATOMIC);
+}
+
+#ifdef CONFIG_8723AU_P2P
+void rtw_cfg80211_issue_p2p_provision_request23a(struct rtw_adapter *padapter,
+ const u8 *buf, size_t len)
+{
+ u16 wps_devicepassword_id = 0x0000;
+ uint wps_devicepassword_id_len = 0;
+ u8 wpsie[255] = { 0x00 }, p2p_ie[255] = { 0x00 };
+ uint p2p_ielen = 0;
+ uint wpsielen = 0;
+ u32 devinfo_contentlen = 0;
+ u8 devinfo_content[64] = { 0x00 };
+ u16 capability = 0;
+ uint capability_len = 0;
+
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u8 dialogToken = 1;
+ u32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_PROVISION_DISC_REQ;
+ u32 p2pielen = 0;
+#ifdef CONFIG_8723AU_P2P
+ u32 wfdielen = 0;
+#endif /* CONFIG_8723AU_P2P */
+
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr, *hdr;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 *frame_body =
+ (unsigned char *)(buf + sizeof(struct ieee80211_hdr_3addr));
+ size_t frame_body_len = len - sizeof(struct ieee80211_hdr_3addr);
+
+ DBG_8723A("[%s] In\n", __func__);
+
+ hdr = (struct ieee80211_hdr *)buf;
+ /* prepare for building provision_request frame */
+ memcpy(pwdinfo->tx_prov_disc_info.peerIFAddr, hdr->addr1, ETH_ALEN);
+ memcpy(pwdinfo->tx_prov_disc_info.peerDevAddr, hdr->addr1, ETH_ALEN);
+
+ pwdinfo->tx_prov_disc_info.wps_config_method_request =
+ WPS_CM_PUSH_BUTTON;
+
+ rtw_get_wps_ie23a(frame_body + _PUBLIC_ACTION_IE_OFFSET_,
+ frame_body_len - _PUBLIC_ACTION_IE_OFFSET_, wpsie,
+ &wpsielen);
+ rtw_get_wps_attr_content23a(wpsie, wpsielen, WPS_ATTR_DEVICE_PWID,
+ (u8 *)&wps_devicepassword_id,
+ &wps_devicepassword_id_len);
+ wps_devicepassword_id = be16_to_cpu(wps_devicepassword_id);
+
+ switch (wps_devicepassword_id) {
+ case WPS_DPID_PIN:
+ pwdinfo->tx_prov_disc_info.wps_config_method_request =
+ WPS_CM_LABEL;
+ break;
+ case WPS_DPID_USER_SPEC:
+ pwdinfo->tx_prov_disc_info.wps_config_method_request =
+ WPS_CM_DISPLYA;
+ break;
+ case WPS_DPID_MACHINE_SPEC:
+ break;
+ case WPS_DPID_REKEY:
+ break;
+ case WPS_DPID_PBC:
+ pwdinfo->tx_prov_disc_info.wps_config_method_request =
+ WPS_CM_PUSH_BUTTON;
+ break;
+ case WPS_DPID_REGISTRAR_SPEC:
+ pwdinfo->tx_prov_disc_info.wps_config_method_request =
+ WPS_CM_KEYPAD;
+ break;
+ default:
+ break;
+ }
+
+ if (rtw_get_p2p_ie23a(frame_body + _PUBLIC_ACTION_IE_OFFSET_,
+ frame_body_len - _PUBLIC_ACTION_IE_OFFSET_,
+ p2p_ie, &p2p_ielen)) {
+ rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen,
+ P2P_ATTR_DEVICE_INFO, devinfo_content,
+ &devinfo_contentlen);
+ rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen, P2P_ATTR_CAPABILITY,
+ (u8 *)&capability, &capability_len);
+ }
+
+ /* start to build provision_request frame */
+ memset(wpsie, 0, sizeof(wpsie));
+ memset(p2p_ie, 0, sizeof(p2p_ie));
+ p2p_ielen = 0;
+
+ pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *) (pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ pwlanhdr->frame_control = 0;
+
+ memcpy(pwlanhdr->addr1, pwdinfo->tx_prov_disc_info.peerDevAddr,
+ ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pwdinfo->tx_prov_disc_info.peerDevAddr,
+ ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &action, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 4, (unsigned char *)&p2poui,
+ &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &oui_subtype, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie23a(pframe, 1, &dialogToken, &pattrib->pktlen);
+
+ /* build_prov_disc_request_p2p_ie23a */
+ /* P2P OUI */
+ p2pielen = 0;
+ p2p_ie[p2pielen++] = 0x50;
+ p2p_ie[p2pielen++] = 0x6F;
+ p2p_ie[p2pielen++] = 0x9A;
+ p2p_ie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110301 */
+ /* According to the P2P Specification, the provision discovery request frame should contain 3 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Device Info */
+ /* 3. Group ID ( When joining an operating P2P Group ) */
+
+ /* P2P Capability ATTR */
+ /* Type: */
+ p2p_ie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ put_unaligned_le16(0x0002, p2p_ie + p2pielen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ /* Group Capability Bitmap, 1 byte */
+ memcpy(p2p_ie + p2pielen, &capability, 2);
+ p2pielen += 2;
+
+ /* Device Info ATTR */
+ /* Type: */
+ p2p_ie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ put_unaligned_le16(devinfo_contentlen, p2p_ie + p2pielen);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2p_ie + p2pielen, devinfo_content, devinfo_contentlen);
+ p2pielen += devinfo_contentlen;
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, p2pielen,
+ (unsigned char *)p2p_ie, &p2p_ielen);
+ pattrib->pktlen += p2p_ielen;
+
+ wpsielen = 0;
+ /* WPS OUI */
+ *(u32 *)(wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(u16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* Config Method */
+ /* Type: */
+ *(u16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD);
+ wpsielen += 2;
+
+ /* Length: */
+ *(u16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ *(u16 *)(wpsie + wpsielen) =
+ cpu_to_be16(pwdinfo->tx_prov_disc_info.wps_config_method_request);
+ wpsielen += 2;
+
+ pframe = rtw_set_ie23a(pframe, _VENDOR_SPECIFIC_IE_, wpsielen,
+ (unsigned char *)wpsie, &pattrib->pktlen);
+
+#ifdef CONFIG_8723AU_P2P
+ wfdielen = build_provdisc_req_wfd_ie(pwdinfo, pframe);
+ pframe += wfdielen;
+ pattrib->pktlen += wfdielen;
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ /* dump_mgntframe23a(padapter, pmgntframe); */
+ if (dump_mgntframe23a_and_wait_ack23a(padapter, pmgntframe) != _SUCCESS)
+ DBG_8723A("%s, ack to\n", __func__);
+}
+
+static s32 cfg80211_rtw_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct ieee80211_channel *channel,
+ unsigned int duration, u64 *cookie)
+{
+ s32 err = 0;
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ struct cfg80211_wifidirect_info *pcfg80211_wdinfo =
+ &padapter->cfg80211_wdinfo;
+ u8 remain_ch =
+ (u8) ieee80211_frequency_to_channel(channel->center_freq);
+ u8 ready_on_channel = false;
+
+ DBG_8723A(FUNC_ADPT_FMT " ch:%u duration:%d\n", FUNC_ADPT_ARG(padapter),
+ remain_ch, duration);
+
+ if (pcfg80211_wdinfo->is_ro_ch == true) {
+ DBG_8723A("%s, cancel ro ch timer\n", __func__);
+
+ del_timer_sync(&padapter->cfg80211_wdinfo.remain_on_ch_timer);
+
+#ifdef CONFIG_8723AU_P2P
+ p2p_protocol_wk_hdl23a(padapter, P2P_RO_CH_WK);
+#endif
+ }
+
+ pcfg80211_wdinfo->is_ro_ch = true;
+
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ err = -EFAULT;
+ goto exit;
+ }
+
+ memcpy(&pcfg80211_wdinfo->remain_on_ch_channel, channel,
+ sizeof(struct ieee80211_channel));
+ pcfg80211_wdinfo->remain_on_ch_cookie = *cookie;
+
+ rtw_scan_abort23a(padapter);
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ rtw_p2p_enable23a(padapter, P2P_ROLE_DEVICE);
+ wdev_to_priv(padapter->rtw_wdev)->p2p_enabled = true;
+ } else {
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s, role =%d, p2p_state =%d\n", __func__,
+ rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo));
+#endif
+ }
+
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_LISTEN);
+
+ if (duration < 400)
+ duration = duration * 3; /* extend from exper. */
+
+ pcfg80211_wdinfo->restore_channel = pmlmeext->cur_channel;
+
+ if (rtw_ch_set_search_ch23a(pmlmeext->channel_set, remain_ch) >= 0) {
+ if (remain_ch != pmlmeext->cur_channel) {
+ ready_on_channel = true;
+ }
+ } else {
+ DBG_8723A("%s remain_ch:%u not in channel plan!!!!\n",
+ __func__, remain_ch);
+ }
+
+ /* call this after other things have been done */
+ if (ready_on_channel == true) {
+ if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
+ pmlmeext->cur_channel = remain_ch;
+
+ set_channel_bwmode23a(padapter, remain_ch,
+ HAL_PRIME_CHNL_OFFSET_DONT_CARE,
+ HT_CHANNEL_WIDTH_20);
+ }
+ }
+ DBG_8723A("%s, set ro ch timer, duration =%d\n", __func__, duration);
+ mod_timer(&pcfg80211_wdinfo->remain_on_ch_timer,
+ jiffies + msecs_to_jiffies(duration));
+
+ rtw_cfg80211_ready_on_channel(padapter, *cookie, channel, channel_type,
+ duration, GFP_KERNEL);
+
+ pwdinfo->listen_channel = pmlmeext->cur_channel;
+
+exit:
+ if (err)
+ pcfg80211_wdinfo->is_ro_ch = false;
+
+ return err;
+}
+
+static s32 cfg80211_rtw_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
+{
+ s32 err = 0;
+ struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ struct cfg80211_wifidirect_info *pcfg80211_wdinfo =
+ &padapter->cfg80211_wdinfo;
+
+ DBG_8723A(FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(padapter));
+
+ if (pcfg80211_wdinfo->is_ro_ch == true) {
+ DBG_8723A("%s, cancel ro ch timer\n", __func__);
+ del_timer_sync(&padapter->cfg80211_wdinfo.remain_on_ch_timer);
+#ifdef CONFIG_8723AU_P2P
+ p2p_protocol_wk_hdl23a(padapter, P2P_RO_CH_WK);
+#endif
+ }
+
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s, role =%d, p2p_state =%d\n", __func__,
+ rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo));
+#endif
+ pcfg80211_wdinfo->is_ro_ch = false;
+
+ return err;
+}
+
+#endif /* CONFIG_8723AU_P2P */
+
+static int _cfg80211_rtw_mgmt_tx(struct rtw_adapter *padapter, u8 tx_ch,
+ const u8 *buf, size_t len)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ int ret = _FAIL;
+ bool ack = true;
+ struct ieee80211_hdr *pwlanhdr;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ /* struct cfg80211_wifidirect_info *pcfg80211_wdinfo = &padapter->cfg80211_wdinfo; */
+
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ rtw_set_scan_deny(padapter, 1000);
+
+ rtw_scan_abort23a(padapter);
+
+ if (tx_ch != rtw_get_oper_ch23a(padapter)) {
+ if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED))
+ pmlmeext->cur_channel = tx_ch;
+ set_channel_bwmode23a(padapter, tx_ch,
+ HAL_PRIME_CHNL_OFFSET_DONT_CARE,
+ HT_CHANNEL_WIDTH_20);
+ }
+
+ /* starting alloc mgmt frame to dump it */
+ pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
+ if (pmgntframe == NULL) {
+ /* ret = -ENOMEM; */
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib23a(padapter, pattrib);
+ pattrib->retry_ctrl = false;
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *) (pmgntframe->buf_addr) + TXDESC_OFFSET;
+
+ memcpy(pframe, (void *)buf, len);
+ pattrib->pktlen = len;
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+ /* update seq number */
+ pmlmeext->mgnt_seq = le16_to_cpu(pwlanhdr->seq_ctrl) >> 4;
+ pattrib->seqnum = pmlmeext->mgnt_seq;
+ pmlmeext->mgnt_seq++;
+
+#ifdef CONFIG_8723AU_P2P
+ {
+ struct wifi_display_info *pwfd_info;
+
+ pwfd_info = padapter->wdinfo.wfd_info;
+
+ if (true == pwfd_info->wfd_enable) {
+ rtw_append_wfd_ie(padapter, pframe, &pattrib->pktlen);
+ }
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ if (dump_mgntframe23a_and_wait_ack23a(padapter, pmgntframe) != _SUCCESS) {
+ ack = false;
+ ret = _FAIL;
+
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s, ack == _FAIL\n", __func__);
+#endif
+ } else {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s, ack =%d, ok!\n", __func__, ack);
+#endif
+ ret = _SUCCESS;
+ }
+
+exit:
+
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s, ret =%d\n", __func__, ret);
+#endif
+
+ return ret;
+}
+
+static int cfg80211_rtw_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+ struct ieee80211_channel *chan,
+ bool offchan,
+ unsigned int wait,
+ const u8 *buf, size_t len,
+ bool no_cck, bool dont_wait_for_ack,
+#else
+ struct cfg80211_mgmt_tx_params *params,
+#endif
+ u64 *cookie)
+{
+ struct rtw_adapter *padapter =
+ (struct rtw_adapter *)wiphy_to_adapter(wiphy);
+ struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
+ int ret = 0;
+ int tx_ret;
+ u32 dump_limit = RTW_MAX_MGMT_TX_CNT;
+ u32 dump_cnt = 0;
+ bool ack = true;
+ u8 category, action;
+ int type = (-1);
+ unsigned long start = jiffies;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ size_t len = params->len;
+ struct ieee80211_channel *chan = params->chan;
+ const u8 *buf = params->buf;
+#endif
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)buf;
+ u8 tx_ch = (u8) ieee80211_frequency_to_channel(chan->center_freq);
+
+ /* cookie generation */
+ *cookie = (unsigned long)buf;
+
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A(FUNC_ADPT_FMT " len =%zu, ch =%d"
+ "\n", FUNC_ADPT_ARG(padapter), len, tx_ch);
+#endif /* CONFIG_DEBUG_CFG80211 */
+
+ /* indicate ack before issue frame to avoid racing with rsp frame */
+ rtw_cfg80211_mgmt_tx_status(padapter, *cookie, buf, len, ack,
+ GFP_KERNEL);
+
+ if (rtw_action_frame_parse23a(buf, len, &category, &action) == false) {
+ DBG_8723A(FUNC_ADPT_FMT " frame_control:0x%x\n",
+ FUNC_ADPT_ARG(padapter),
+ le16_to_cpu(hdr->frame_control));
+ goto exit;
+ }
+
+ DBG_8723A("RTW_Tx:tx_ch =%d, da =" MAC_FMT "\n", tx_ch,
+ MAC_ARG(hdr->addr1));
+#ifdef CONFIG_8723AU_P2P
+ type = rtw_p2p_check_frames(padapter, buf, len, true);
+ if (type >= 0)
+ goto dump;
+#endif
+ if (category == WLAN_CATEGORY_PUBLIC)
+ DBG_8723A("RTW_Tx:%s\n", action_public_str23a(action));
+ else
+ DBG_8723A("RTW_Tx:category(%u), action(%u)\n",
+ category, action);
+
+#ifdef CONFIG_8723AU_P2P
+dump:
+#endif
+ do {
+ dump_cnt++;
+ tx_ret = _cfg80211_rtw_mgmt_tx(padapter, tx_ch, buf, len);
+ } while (dump_cnt < dump_limit && tx_ret != _SUCCESS);
+
+ if (tx_ret != _SUCCESS || dump_cnt > 1) {
+ DBG_8723A(FUNC_ADPT_FMT " %s (%d/%d) in %d ms\n",
+ FUNC_ADPT_ARG(padapter),
+ tx_ret == _SUCCESS ? "OK" : "FAIL", dump_cnt,
+ dump_limit, jiffies_to_msecs(jiffies - start));
+ }
+
+ switch (type) {
+ case P2P_GO_NEGO_CONF:
+ rtw_clear_scan_deny(padapter);
+ break;
+ case P2P_INVIT_RESP:
+ if (pwdev_priv->invit_info.flags & BIT(0)
+ && pwdev_priv->invit_info.status == 0) {
+ DBG_8723A(FUNC_ADPT_FMT " agree with invitation of "
+ "persistent group\n",
+ FUNC_ADPT_ARG(padapter));
+ rtw_set_scan_deny(padapter, 5000);
+ rtw_pwr_wakeup_ex(padapter, 5000);
+ rtw_clear_scan_deny(padapter);
+ }
+ break;
+ }
+
+exit:
+ return ret;
+}
+
+static void cfg80211_rtw_mgmt_frame_register(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u16 frame_type, bool reg)
+{
+
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A(FUNC_ADPT_FMT " frame_type:%x, reg:%d\n",
+ FUNC_ADPT_ARG(adapter), frame_type, reg);
+#endif
+
+ if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
+ return;
+
+ return;
+}
+
+static int rtw_cfg80211_set_beacon_wpsp2pie(struct net_device *ndev, char *buf,
+ int len)
+{
+ int ret = 0;
+ uint wps_ielen = 0;
+ u8 *wps_ie;
+#ifdef CONFIG_8723AU_P2P
+ u32 p2p_ielen = 0;
+ u32 wfd_ielen = 0;
+ u8 *p2p_ie;
+#endif
+#ifdef CONFIG_8723AU_AP_MODE
+ u8 wps_oui[8] = { 0x0, 0x50, 0xf2, 0x04 };
+#endif
+ struct rtw_adapter *padapter = netdev_priv(ndev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ DBG_8723A(FUNC_NDEV_FMT " ielen =%d\n", FUNC_NDEV_ARG(ndev), len);
+
+ if (len > 0) {
+ wps_ie = rtw_get_wps_ie23a(buf, len, NULL, &wps_ielen);
+ if (wps_ie) {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("bcn_wps_ielen =%d\n", wps_ielen);
+#endif
+
+ if (pmlmepriv->wps_beacon_ie) {
+ pmlmepriv->wps_beacon_ie_len = 0;
+ kfree(pmlmepriv->wps_beacon_ie);
+ pmlmepriv->wps_beacon_ie = NULL;
+ }
+
+ pmlmepriv->wps_beacon_ie =
+ kmalloc(wps_ielen, GFP_KERNEL);
+ if (pmlmepriv->wps_beacon_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ memcpy(pmlmepriv->wps_beacon_ie, wps_ie, wps_ielen);
+ pmlmepriv->wps_beacon_ie_len = wps_ielen;
+
+#ifdef CONFIG_8723AU_AP_MODE
+ update_beacon23a(padapter, _VENDOR_SPECIFIC_IE_, wps_oui,
+ true);
+#endif
+ }
+#ifdef CONFIG_8723AU_P2P
+ p2p_ie = rtw_get_p2p_ie23a(buf, len, NULL, &p2p_ielen);
+ if (p2p_ie) {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("bcn_p2p_ielen =%d\n", p2p_ielen);
+#endif
+
+ if (pmlmepriv->p2p_beacon_ie) {
+ pmlmepriv->p2p_beacon_ie_len = 0;
+ kfree(pmlmepriv->p2p_beacon_ie);
+ pmlmepriv->p2p_beacon_ie = NULL;
+ }
+
+ pmlmepriv->p2p_beacon_ie =
+ kmalloc(p2p_ielen, GFP_KERNEL);
+ if (pmlmepriv->p2p_beacon_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ memcpy(pmlmepriv->p2p_beacon_ie, p2p_ie, p2p_ielen);
+ pmlmepriv->p2p_beacon_ie_len = p2p_ielen;
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ /* buf += p2p_ielen; */
+ /* len -= p2p_ielen; */
+
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_get_wfd_ie(buf, len, NULL, &wfd_ielen)) {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("bcn_wfd_ielen =%d\n", wfd_ielen);
+#endif
+
+ if (pmlmepriv->wfd_beacon_ie) {
+ pmlmepriv->wfd_beacon_ie_len = 0;
+ kfree(pmlmepriv->wfd_beacon_ie);
+ pmlmepriv->wfd_beacon_ie = NULL;
+ }
+
+ pmlmepriv->wfd_beacon_ie =
+ kmalloc(wfd_ielen, GFP_KERNEL);
+ if (pmlmepriv->wfd_beacon_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+
+ }
+ rtw_get_wfd_ie(buf, len, pmlmepriv->wfd_beacon_ie,
+ &pmlmepriv->wfd_beacon_ie_len);
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ pmlmeext->bstart_bss = true;
+
+ }
+
+ return ret;
+}
+
+static int rtw_cfg80211_set_probe_resp_wpsp2pie(struct net_device *net,
+ char *buf, int len)
+{
+ struct rtw_adapter *padapter = netdev_priv(net);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+#ifdef CONFIG_8723AU_P2P
+ u32 p2p_ielen = 0;
+ u8 *p2p_ie;
+ u32 wfd_ielen = 0;
+#endif
+ int ret = 0;
+ uint wps_ielen = 0;
+ u8 *wps_ie;
+
+ if (len > 0) {
+ wps_ie = rtw_get_wps_ie23a(buf, len, NULL, &wps_ielen);
+ if (wps_ie) {
+ uint attr_contentlen = 0;
+ u16 uconfig_method, *puconfig_method = NULL;
+
+ if (pmlmepriv->wps_probe_resp_ie) {
+ pmlmepriv->wps_probe_resp_ie_len = 0;
+ kfree(pmlmepriv->wps_probe_resp_ie);
+ pmlmepriv->wps_probe_resp_ie = NULL;
+ }
+
+ pmlmepriv->wps_probe_resp_ie =
+ kmalloc(wps_ielen, GFP_KERNEL);
+ if (pmlmepriv->wps_probe_resp_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+
+ }
+
+ /* add PUSH_BUTTON config_method by driver self in
+ wpsie of probe_resp at GO Mode */
+ puconfig_method = (u16 *)rtw_get_wps_attr_content23a(wps_ie, wps_ielen,
+ WPS_ATTR_CONF_METHOD,
+ NULL,
+ &attr_contentlen);
+ if (puconfig_method) {
+ uconfig_method = WPS_CM_PUSH_BUTTON;
+ uconfig_method = cpu_to_be16(uconfig_method);
+
+ *puconfig_method |= uconfig_method;
+ }
+
+ memcpy(pmlmepriv->wps_probe_resp_ie, wps_ie, wps_ielen);
+ pmlmepriv->wps_probe_resp_ie_len = wps_ielen;
+
+ }
+
+ /* buf += wps_ielen; */
+ /* len -= wps_ielen; */
+
+#ifdef CONFIG_8723AU_P2P
+ p2p_ie = rtw_get_p2p_ie23a(buf, len, NULL, &p2p_ielen);
+ if (p2p_ie) {
+ u8 is_GO = false;
+ u32 attr_contentlen = 0;
+ u16 cap_attr = 0;
+
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("probe_resp_p2p_ielen =%d\n", p2p_ielen);
+#endif
+
+ /* Check P2P Capability ATTR */
+ if (rtw_get_p2p_attr23a_content(p2p_ie, p2p_ielen,
+ P2P_ATTR_CAPABILITY,
+ (u8 *) &cap_attr,
+ (uint *) &attr_contentlen)) {
+ u8 grp_cap = 0;
+ /* DBG_8723A( "[%s] Got P2P Capability Attr!!\n", __func__ ); */
+ cap_attr = le16_to_cpu(cap_attr);
+ grp_cap = (u8) ((cap_attr >> 8) & 0xff);
+
+ is_GO = (grp_cap & BIT(0)) ? true : false;
+
+ if (is_GO)
+ DBG_8723A
+ ("Got P2P Capability Attr, grp_cap"
+ "= 0x%x, is_GO\n", grp_cap);
+ }
+
+ if (is_GO == false) {
+ if (pmlmepriv->p2p_probe_resp_ie) {
+ pmlmepriv->p2p_probe_resp_ie_len = 0;
+ kfree(pmlmepriv->p2p_probe_resp_ie);
+ pmlmepriv->p2p_probe_resp_ie = NULL;
+ }
+
+ pmlmepriv->p2p_probe_resp_ie =
+ kmalloc(p2p_ielen, GFP_KERNEL);
+ if (pmlmepriv->p2p_probe_resp_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ memcpy(pmlmepriv->p2p_probe_resp_ie, p2p_ie,
+ p2p_ielen);
+ pmlmepriv->p2p_probe_resp_ie_len = p2p_ielen;
+ } else {
+ if (pmlmepriv->p2p_go_probe_resp_ie) {
+ pmlmepriv->p2p_go_probe_resp_ie_len = 0;
+ kfree(pmlmepriv->p2p_go_probe_resp_ie);
+ pmlmepriv->p2p_go_probe_resp_ie = NULL;
+ }
+
+ pmlmepriv->p2p_go_probe_resp_ie =
+ kmalloc(p2p_ielen, GFP_KERNEL);
+ if (pmlmepriv->p2p_go_probe_resp_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+
+ }
+ memcpy(pmlmepriv->p2p_go_probe_resp_ie,
+ p2p_ie, p2p_ielen);
+ pmlmepriv->p2p_go_probe_resp_ie_len = p2p_ielen;
+ }
+ }
+#endif /* CONFIG_8723AU_P2P */
+
+ /* buf += p2p_ielen; */
+ /* len -= p2p_ielen; */
+
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_get_wfd_ie(buf, len, NULL, &wfd_ielen)) {
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("probe_resp_wfd_ielen =%d\n", wfd_ielen);
+#endif
+
+ if (pmlmepriv->wfd_probe_resp_ie) {
+ pmlmepriv->wfd_probe_resp_ie_len = 0;
+ kfree(pmlmepriv->wfd_probe_resp_ie);
+ pmlmepriv->wfd_probe_resp_ie = NULL;
+ }
+
+ pmlmepriv->wfd_probe_resp_ie =
+ kmalloc(wfd_ielen, GFP_KERNEL);
+ if (pmlmepriv->wfd_probe_resp_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+
+ }
+ rtw_get_wfd_ie(buf, len, pmlmepriv->wfd_probe_resp_ie,
+ &pmlmepriv->wfd_probe_resp_ie_len);
+ }
+#endif /* CONFIG_8723AU_P2P */
+ }
+
+ return ret;
+}
+
+static int rtw_cfg80211_set_assoc_resp_wpsp2pie(struct net_device *net,
+ char *buf, int len)
+{
+ int ret = 0;
+ struct rtw_adapter *padapter = netdev_priv(net);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ DBG_8723A("%s, ielen =%d\n", __func__, len);
+
+ if (len > 0) {
+ if (pmlmepriv->wps_assoc_resp_ie) {
+ pmlmepriv->wps_assoc_resp_ie_len = 0;
+ kfree(pmlmepriv->wps_assoc_resp_ie);
+ pmlmepriv->wps_assoc_resp_ie = NULL;
+ }
+
+ pmlmepriv->wps_assoc_resp_ie = kmalloc(len, GFP_KERNEL);
+ if (pmlmepriv->wps_assoc_resp_ie == NULL) {
+ DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+
+ }
+ memcpy(pmlmepriv->wps_assoc_resp_ie, buf, len);
+ pmlmepriv->wps_assoc_resp_ie_len = len;
+ }
+
+ return ret;
+}
+
+int rtw_cfg80211_set_mgnt_wpsp2pie(struct net_device *net, char *buf, int len,
+ int type)
+{
+ int ret = 0;
+ uint wps_ielen = 0;
+#ifdef CONFIG_8723AU_P2P
+ u32 p2p_ielen = 0;
+#endif
+
+#ifdef CONFIG_DEBUG_CFG80211
+ DBG_8723A("%s, ielen =%d\n", __func__, len);
+#endif
+
+ if ((rtw_get_wps_ie23a(buf, len, NULL, &wps_ielen) && (wps_ielen > 0))
+#ifdef CONFIG_8723AU_P2P
+ || (rtw_get_p2p_ie23a(buf, len, NULL, &p2p_ielen) && (p2p_ielen > 0))
+#endif
+ ) {
+ if (net) {
+ switch (type) {
+ case 0x1: /* BEACON */
+ ret =
+ rtw_cfg80211_set_beacon_wpsp2pie(net, buf,
+ len);
+ break;
+ case 0x2: /* PROBE_RESP */
+ ret =
+ rtw_cfg80211_set_probe_resp_wpsp2pie(net,
+ buf,
+ len);
+ break;
+ case 0x4: /* ASSOC_RESP */
+ ret =
+ rtw_cfg80211_set_assoc_resp_wpsp2pie(net,
+ buf,
+ len);
+ break;
+ }
+ }
+ }
+
+ return ret;
+
+}
+
+static struct cfg80211_ops rtw_cfg80211_ops = {
+ .change_virtual_intf = cfg80211_rtw_change_iface,
+ .add_key = cfg80211_rtw_add_key,
+ .get_key = cfg80211_rtw_get_key,
+ .del_key = cfg80211_rtw_del_key,
+ .set_default_key = cfg80211_rtw_set_default_key,
+ .get_station = cfg80211_rtw_get_station,
+ .scan = cfg80211_rtw_scan,
+ .set_wiphy_params = cfg80211_rtw_set_wiphy_params,
+ .connect = cfg80211_rtw_connect,
+ .disconnect = cfg80211_rtw_disconnect,
+ .join_ibss = cfg80211_rtw_join_ibss,
+ .leave_ibss = cfg80211_rtw_leave_ibss,
+ .set_tx_power = cfg80211_rtw_set_txpower,
+ .get_tx_power = cfg80211_rtw_get_txpower,
+ .set_power_mgmt = cfg80211_rtw_set_power_mgmt,
+ .set_pmksa = cfg80211_rtw_set_pmksa,
+ .del_pmksa = cfg80211_rtw_del_pmksa,
+ .flush_pmksa = cfg80211_rtw_flush_pmksa,
+
+#ifdef CONFIG_8723AU_AP_MODE
+ .add_virtual_intf = cfg80211_rtw_add_virtual_intf,
+ .del_virtual_intf = cfg80211_rtw_del_virtual_intf,
+
+ .start_ap = cfg80211_rtw_start_ap,
+ .change_beacon = cfg80211_rtw_change_beacon,
+ .stop_ap = cfg80211_rtw_stop_ap,
+
+ .add_station = cfg80211_rtw_add_station,
+ .del_station = cfg80211_rtw_del_station,
+ .change_station = cfg80211_rtw_change_station,
+ .dump_station = cfg80211_rtw_dump_station,
+ .change_bss = cfg80211_rtw_change_bss,
+#endif /* CONFIG_8723AU_AP_MODE */
+
+#ifdef CONFIG_8723AU_P2P
+ .remain_on_channel = cfg80211_rtw_remain_on_channel,
+ .cancel_remain_on_channel = cfg80211_rtw_cancel_remain_on_channel,
+#endif
+
+ .mgmt_tx = cfg80211_rtw_mgmt_tx,
+ .mgmt_frame_register = cfg80211_rtw_mgmt_frame_register,
+};
+
+static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap,
+ enum ieee80211_band band, u8 rf_type)
+{
+
+#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
+#define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */
+
+ ht_cap->ht_supported = true;
+
+ ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU;
+
+ /*
+ *Maximum length of AMPDU that the STA can receive.
+ *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+
+ /*Minimum MPDU start spacing , */
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ /*
+ *hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ *base on ant_num
+ *rx_mask: RX mask
+ *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
+ *if rx_ant = 2 rx_mask[1]= 0xff;==>MCS8-MCS15
+ *if rx_ant >= 3 rx_mask[2]= 0xff;
+ *if BW_40 rx_mask[4]= 0x01;
+ *highest supported RX rate
+ */
+ if (rf_type == RF_1T1R) {
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0x00;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+
+ ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7;
+ } else if ((rf_type == RF_1T2R) || (rf_type == RF_2T2R)) {
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+
+ ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15;
+ } else {
+ DBG_8723A("%s, error rf_type =%d\n", __func__, rf_type);
+ }
+
+}
+
+void rtw_cfg80211_init_wiphy(struct rtw_adapter *padapter)
+{
+ u8 rf_type;
+ struct ieee80211_supported_band *bands;
+ struct wireless_dev *pwdev = padapter->rtw_wdev;
+ struct wiphy *wiphy = pwdev->wiphy;
+
+ rtw23a_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ DBG_8723A("%s:rf_type =%d\n", __func__, rf_type);
+
+ /* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
+ {
+ bands = wiphy->bands[IEEE80211_BAND_2GHZ];
+ if (bands)
+ rtw_cfg80211_init_ht_capab(&bands->ht_cap,
+ IEEE80211_BAND_2GHZ,
+ rf_type);
+ }
+
+ /* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
+ {
+ bands = wiphy->bands[IEEE80211_BAND_5GHZ];
+ if (bands)
+ rtw_cfg80211_init_ht_capab(&bands->ht_cap,
+ IEEE80211_BAND_5GHZ,
+ rf_type);
+ }
+}
+
+static void rtw_cfg80211_preinit_wiphy(struct rtw_adapter *padapter,
+ struct wiphy *wiphy)
+{
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->max_scan_ssids = RTW_SSID_SCAN_AMOUNT;
+ wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ wiphy->max_num_pmkids = RTW_MAX_NUM_PMKIDS;
+
+ wiphy->max_remain_on_channel_duration =
+ RTW_MAX_REMAIN_ON_CHANNEL_DURATION;
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+#ifdef CONFIG_8723AU_AP_MODE
+ BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MONITOR) |
+#endif
+#if defined(CONFIG_8723AU_P2P)
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) |
+#endif
+ 0;
+
+#ifdef CONFIG_8723AU_AP_MODE
+ wiphy->mgmt_stypes = rtw_cfg80211_default_mgmt_stypes;
+#endif /* CONFIG_8723AU_AP_MODE */
+
+ wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
+
+ /*
+ wiphy->iface_combinations = &rtw_combinations;
+ wiphy->n_iface_combinations = 1;
+ */
+
+ wiphy->cipher_suites = rtw_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(rtw_cipher_suites);
+
+ /* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
+ wiphy->bands[IEEE80211_BAND_2GHZ] =
+ rtw_spt_band_alloc(IEEE80211_BAND_2GHZ);
+ /* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
+ wiphy->bands[IEEE80211_BAND_5GHZ] =
+ rtw_spt_band_alloc(IEEE80211_BAND_5GHZ);
+
+ wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAVE_AP_SME;
+
+ if (padapter->registrypriv.power_mgnt != PS_MODE_ACTIVE)
+ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ else
+ wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+}
+
+int rtw_wdev_alloc(struct rtw_adapter *padapter, struct device *dev)
+{
+ int ret = 0;
+ struct wiphy *wiphy;
+ struct wireless_dev *wdev;
+ struct rtw_wdev_priv *pwdev_priv;
+ struct net_device *pnetdev = padapter->pnetdev;
+
+ DBG_8723A("%s(padapter =%p)\n", __func__, padapter);
+
+ /* wiphy */
+ wiphy = wiphy_new(&rtw_cfg80211_ops, sizeof(struct rtw_wdev_priv));
+ if (!wiphy) {
+ DBG_8723A("Couldn't allocate wiphy device\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ set_wiphy_dev(wiphy, dev);
+ rtw_cfg80211_preinit_wiphy(padapter, wiphy);
+
+ ret = wiphy_register(wiphy);
+ if (ret < 0) {
+ DBG_8723A("Couldn't register wiphy device\n");
+ goto free_wiphy;
+ }
+
+ /* wdev */
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev) {
+ DBG_8723A("Couldn't allocate wireless device\n");
+ ret = -ENOMEM;
+ goto unregister_wiphy;
+ }
+ wdev->wiphy = wiphy;
+ wdev->netdev = pnetdev;
+ /* wdev->iftype = NL80211_IFTYPE_STATION; */
+ /* for rtw_setopmode_cmd23a() in cfg80211_rtw_change_iface() */
+ wdev->iftype = NL80211_IFTYPE_MONITOR;
+ padapter->rtw_wdev = wdev;
+ pnetdev->ieee80211_ptr = wdev;
+
+ /* init pwdev_priv */
+ pwdev_priv = wdev_to_priv(wdev);
+ pwdev_priv->rtw_wdev = wdev;
+ pwdev_priv->pmon_ndev = NULL;
+ pwdev_priv->ifname_mon[0] = '\0';
+ pwdev_priv->padapter = padapter;
+ pwdev_priv->scan_request = NULL;
+ spin_lock_init(&pwdev_priv->scan_req_lock);
+
+ pwdev_priv->p2p_enabled = false;
+ pwdev_priv->provdisc_req_issued = false;
+ rtw_wdev_invit_info_init(&pwdev_priv->invit_info);
+
+ if (padapter->registrypriv.power_mgnt != PS_MODE_ACTIVE)
+ pwdev_priv->power_mgmt = true;
+ else
+ pwdev_priv->power_mgmt = false;
+
+ return ret;
+unregister_wiphy:
+ wiphy_unregister(wiphy);
+free_wiphy:
+ wiphy_free(wiphy);
+exit:
+ return ret;
+}
+
+void rtw_wdev_free(struct wireless_dev *wdev)
+{
+ struct rtw_wdev_priv *pwdev_priv;
+
+ DBG_8723A("%s(wdev =%p)\n", __func__, wdev);
+
+ if (!wdev)
+ return;
+
+ pwdev_priv = wdev_to_priv(wdev);
+
+ kfree(wdev->wiphy->bands[IEEE80211_BAND_2GHZ]);
+ kfree(wdev->wiphy->bands[IEEE80211_BAND_5GHZ]);
+
+ wiphy_free(wdev->wiphy);
+
+ kfree(wdev);
+}
+
+void rtw_wdev_unregister(struct wireless_dev *wdev)
+{
+ struct rtw_wdev_priv *pwdev_priv;
+
+ DBG_8723A("%s(wdev =%p)\n", __func__, wdev);
+
+ if (!wdev)
+ return;
+
+ pwdev_priv = wdev_to_priv(wdev);
+
+ rtw_cfg80211_indicate_scan_done(pwdev_priv, true);
+
+ if (pwdev_priv->pmon_ndev) {
+ DBG_8723A("%s, unregister monitor interface\n", __func__);
+ unregister_netdev(pwdev_priv->pmon_ndev);
+ }
+
+ wiphy_unregister(wdev->wiphy);
+}
diff --git a/drivers/staging/rtl8723au/os_dep/mlme_linux.c b/drivers/staging/rtl8723au/os_dep/mlme_linux.c
new file mode 100644
index 000000000000..b30d4d37556a
--- /dev/null
+++ b/drivers/staging/rtl8723au/os_dep/mlme_linux.c
@@ -0,0 +1,187 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#define _MLME_OSDEP_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <mlme_osdep.h>
+#include <rtw_ioctl_set.h>
+
+void rtw_os_indicate_connect23a(struct rtw_adapter *adapter)
+{
+ rtw_cfg80211_indicate_connect(adapter);
+
+ netif_carrier_on(adapter->pnetdev);
+
+ if (adapter->pid[2] != 0)
+ rtw_signal_process(adapter->pid[2], SIGALRM);
+}
+
+void rtw_os_indicate_scan_done23a(struct rtw_adapter *padapter, bool aborted)
+{
+ rtw_cfg80211_indicate_scan_done(wdev_to_priv(padapter->rtw_wdev),
+ aborted);
+}
+
+static struct rt_pmkid_list backupPMKIDList[NUM_PMKID_CACHE];
+
+void rtw_reset_securitypriv23a(struct rtw_adapter *adapter)
+{
+ u8 backupPMKIDIndex = 0;
+ u8 backupTKIPCountermeasure = 0x00;
+ unsigned long backupTKIPcountermeasure_time = 0;
+
+ if (adapter->securitypriv.dot11AuthAlgrthm ==
+ dot11AuthAlgrthm_8021X) { /* 802.1x */
+ /* We have to backup the PMK information for WiFi PMK
+ * Caching test item.
+ * Backup the btkip_countermeasure information.
+ * When the countermeasure is trigger, the driver have to
+ * disconnect with AP for 60 seconds.
+ */
+ memset(&backupPMKIDList[0], 0x00, sizeof(struct rt_pmkid_list) *
+ NUM_PMKID_CACHE);
+
+ memcpy(&backupPMKIDList[0], &adapter->securitypriv.PMKIDList[0],
+ sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ backupPMKIDIndex = adapter->securitypriv.PMKIDIndex;
+ backupTKIPCountermeasure = adapter->securitypriv.btkip_countermeasure;
+ backupTKIPcountermeasure_time = adapter->securitypriv.btkip_countermeasure_time;
+
+ memset((unsigned char *)&adapter->securitypriv, 0,
+ sizeof (struct security_priv));
+ /* Restore the PMK information to securitypriv structure
+ * for the following connection.
+ */
+ memcpy(&adapter->securitypriv.PMKIDList[0], &backupPMKIDList[0],
+ sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ adapter->securitypriv.PMKIDIndex = backupPMKIDIndex;
+ adapter->securitypriv.btkip_countermeasure = backupTKIPCountermeasure;
+ adapter->securitypriv.btkip_countermeasure_time = backupTKIPcountermeasure_time;
+
+ adapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ adapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
+ } else { /* reset values in securitypriv */
+ struct security_priv *psec_priv = &adapter->securitypriv;
+
+ /* open system */
+ psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+ psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ psec_priv->dot11PrivacyKeyIndex = 0;
+
+ psec_priv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psec_priv->dot118021XGrpKeyid = 1;
+
+ psec_priv->ndisauthtype = Ndis802_11AuthModeOpen;
+ psec_priv->ndisencryptstatus = Ndis802_11WEPDisabled;
+ }
+}
+
+void rtw_os_indicate_disconnect23a(struct rtw_adapter *adapter)
+{
+ /* Do it first for tx broadcast pkt after disconnection issue! */
+ netif_carrier_off(adapter->pnetdev);
+
+ rtw_cfg80211_indicate_disconnect(adapter);
+
+ rtw_reset_securitypriv23a(adapter);
+}
+
+void rtw_report_sec_ie23a(struct rtw_adapter *adapter, u8 authmode, u8 *sec_ie)
+{
+ uint len;
+ u8 *buff, *p, i;
+ union iwreq_data wrqu;
+
+ RT_TRACE(_module_mlme_osdep_c_, _drv_info_,
+ ("+rtw_report_sec_ie23a, authmode =%d\n", authmode));
+
+ buff = NULL;
+ if (authmode == _WPA_IE_ID_) {
+ RT_TRACE(_module_mlme_osdep_c_, _drv_info_,
+ ("rtw_report_sec_ie23a, authmode =%d\n", authmode));
+
+ buff = kzalloc(IW_CUSTOM_MAX, GFP_KERNEL);
+ if (!buff)
+ return;
+ p = buff;
+
+ p += sprintf(p, "ASSOCINFO(ReqIEs =");
+
+ len = sec_ie[1]+2;
+ len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
+
+ for (i = 0; i < len; i++)
+ p += sprintf(p, "%02x", sec_ie[i]);
+
+ p += sprintf(p, ")");
+
+ memset(&wrqu, 0, sizeof(wrqu));
+
+ wrqu.data.length = p-buff;
+
+ wrqu.data.length = (wrqu.data.length < IW_CUSTOM_MAX) ?
+ wrqu.data.length : IW_CUSTOM_MAX;
+
+ kfree(buff);
+ }
+}
+
+#ifdef CONFIG_8723AU_AP_MODE
+void rtw_indicate_sta_assoc_event23a(struct rtw_adapter *padapter,
+ struct sta_info *psta)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ union iwreq_data wrqu;
+
+ if (psta == NULL)
+ return;
+
+ if (psta->aid > NUM_STA)
+ return;
+
+ if (pstapriv->sta_aid[psta->aid - 1] != psta)
+ return;
+
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
+
+ DBG_8723A("+rtw_indicate_sta_assoc_event23a\n");
+}
+
+void rtw_indicate_sta_disassoc_event23a(struct rtw_adapter *padapter,
+ struct sta_info *psta)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ union iwreq_data wrqu;
+
+ if (psta == NULL)
+ return;
+
+ if (psta->aid > NUM_STA)
+ return;
+
+ if (pstapriv->sta_aid[psta->aid - 1] != psta)
+ return;
+
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
+
+ DBG_8723A("+rtw_indicate_sta_disassoc_event23a\n");
+}
+#endif
diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c
new file mode 100644
index 000000000000..57eca7a45672
--- /dev/null
+++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c
@@ -0,0 +1,970 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _OS_INTFS_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <xmit_osdep.h>
+#include <recv_osdep.h>
+#include <hal_intf.h>
+#include <rtw_version.h>
+#include <ethernet.h>
+
+#include <usb_osintf.h>
+#include <linux/version.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
+MODULE_AUTHOR("Realtek Semiconductor Corp.");
+MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
+MODULE_VERSION(DRIVERVERSION);
+MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin");
+
+/* module param defaults */
+static int rtw_chip_version = 0x00;
+static int rtw_rfintfs = HWPI;
+static int rtw_debug = 1;
+
+static int rtw_channel = 1;/* ad-hoc support requirement */
+static int rtw_wireless_mode = WIRELESS_11BG_24N;
+static int rtw_vrtl_carrier_sense = AUTO_VCS;
+static int rtw_vcs_type = RTS_CTS;/* */
+static int rtw_rts_thresh = 2347;/* */
+static int rtw_frag_thresh = 2346;/* */
+static int rtw_preamble = PREAMBLE_LONG;/* long, short, auto */
+static int rtw_scan_mode = 1;/* active, passive */
+static int rtw_adhoc_tx_pwr = 1;
+static int rtw_soft_ap;
+static int rtw_power_mgnt = 1;
+static int rtw_ips_mode = IPS_NORMAL;
+
+static int rtw_smart_ps = 2;
+
+module_param(rtw_ips_mode, int, 0644);
+MODULE_PARM_DESC(rtw_ips_mode, "The default IPS mode");
+
+static int rtw_long_retry_lmt = 7;
+static int rtw_short_retry_lmt = 7;
+static int rtw_busy_thresh = 40;
+static int rtw_ack_policy = NORMAL_ACK;
+
+static int rtw_acm_method;/* 0:By SW 1:By HW. */
+
+static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */
+static int rtw_uapsd_enable;
+
+int rtw_ht_enable23A = 1;
+/* 0 :diable, bit(0): enable 2.4g, bit(1): enable 5g */
+int rtw_cbw40_enable23A = 3;
+int rtw_ampdu_enable23A = 1;/* for enable tx_ampdu */
+/* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, default is set to enable
+ * 2.4GHZ for IOT issue with bufflao's AP at 5GHZ
+ */
+static int rtw_rx_stbc = 1;
+static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */
+
+/* Use 2 path Tx to transmit MCS0~7 and legacy mode */
+static int rtw_lowrate_two_xmit = 1;
+
+/* int rf_config = RF_1T2R; 1T2R */
+static int rtw_rf_config = RF_819X_MAX_TYPE; /* auto */
+static int rtw_low_power;
+static int rtw_wifi_spec;
+static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+static int rtw_btcoex_enable = 1;
+static int rtw_bt_iso = 2;/* 0:Low, 1:High, 2:From Efuse */
+/* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy, 5.OtherBusy */
+static int rtw_bt_sco = 3;
+/* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
+static int rtw_bt_ampdu = 1 ;
+#endif
+
+/* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */
+static int rtw_AcceptAddbaReq = true;
+
+static int rtw_antdiv_cfg = 2; /* 0:OFF , 1:ON, 2:decide by Efuse config */
+static int rtw_antdiv_type; /* 0:decide by efuse */
+
+static int rtw_enusbss;/* 0:disable, 1:enable */
+
+static int rtw_hwpdn_mode = 2;/* 0:disable, 1:enable, 2: by EFUSE config */
+
+static int rtw_hwpwrp_detect; /* HW power ping detect 0:disable , 1:enable */
+
+static int rtw_hw_wps_pbc = 1;
+
+static int rtw_80211d;
+
+static int rtw_regulatory_id = 0xff;/* Regulatory tab id, 0xff = follow efuse's setting */
+
+module_param(rtw_regulatory_id, int, 0644);
+
+static char *ifname = "wlan%d";
+module_param(ifname, charp, 0644);
+MODULE_PARM_DESC(ifname, "The default name to allocate for first interface");
+
+static char *if2name = "wlan%d";
+module_param(if2name, charp, 0644);
+MODULE_PARM_DESC(if2name, "The default name to allocate for second interface");
+
+module_param(rtw_channel_plan, int, 0644);
+module_param(rtw_chip_version, int, 0644);
+module_param(rtw_rfintfs, int, 0644);
+module_param(rtw_channel, int, 0644);
+module_param(rtw_wmm_enable, int, 0644);
+module_param(rtw_vrtl_carrier_sense, int, 0644);
+module_param(rtw_vcs_type, int, 0644);
+module_param(rtw_busy_thresh, int, 0644);
+module_param(rtw_ht_enable23A, int, 0644);
+module_param(rtw_cbw40_enable23A, int, 0644);
+module_param(rtw_ampdu_enable23A, int, 0644);
+module_param(rtw_rx_stbc, int, 0644);
+module_param(rtw_ampdu_amsdu, int, 0644);
+
+module_param(rtw_lowrate_two_xmit, int, 0644);
+
+module_param(rtw_rf_config, int, 0644);
+module_param(rtw_power_mgnt, int, 0644);
+module_param(rtw_smart_ps, int, 0644);
+module_param(rtw_low_power, int, 0644);
+module_param(rtw_wifi_spec, int, 0644);
+
+module_param(rtw_antdiv_cfg, int, 0644);
+
+module_param(rtw_enusbss, int, 0644);
+module_param(rtw_hwpdn_mode, int, 0644);
+module_param(rtw_hwpwrp_detect, int, 0644);
+
+module_param(rtw_hw_wps_pbc, int, 0644);
+
+static uint rtw_max_roaming_times = 2;
+module_param(rtw_max_roaming_times, uint, 0644);
+MODULE_PARM_DESC(rtw_max_roaming_times, "The max roaming times to try");
+
+module_param(rtw_80211d, int, 0644);
+MODULE_PARM_DESC(rtw_80211d, "Enable 802.11d mechanism");
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+module_param(rtw_btcoex_enable, int, 0644);
+MODULE_PARM_DESC(rtw_btcoex_enable, "Enable BT co-existence mechanism");
+#endif
+
+static uint rtw_notch_filter;
+module_param(rtw_notch_filter, uint, 0644);
+MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P");
+module_param_named(debug, rtw_debug, int, 0444);
+MODULE_PARM_DESC(debug, "Set debug level (1-9) (default 1)");
+
+static int netdev_close(struct net_device *pnetdev);
+
+static uint loadparam(struct rtw_adapter *padapter, struct net_device *pnetdev)
+{
+ struct registry_priv *registry_par = &padapter->registrypriv;
+ uint status = _SUCCESS;
+
+ GlobalDebugLevel23A = rtw_debug;
+ registry_par->chip_version = (u8)rtw_chip_version;
+ registry_par->rfintfs = (u8)rtw_rfintfs;
+ memcpy(registry_par->ssid.ssid, "ANY", 3);
+ registry_par->ssid.ssid_len = 3;
+ registry_par->channel = (u8)rtw_channel;
+ registry_par->wireless_mode = (u8)rtw_wireless_mode;
+ registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense;
+ registry_par->vcs_type = (u8)rtw_vcs_type;
+ registry_par->rts_thresh = (u16)rtw_rts_thresh;
+ registry_par->frag_thresh = (u16)rtw_frag_thresh;
+ registry_par->preamble = (u8)rtw_preamble;
+ registry_par->scan_mode = (u8)rtw_scan_mode;
+ registry_par->adhoc_tx_pwr = (u8)rtw_adhoc_tx_pwr;
+ registry_par->soft_ap = (u8)rtw_soft_ap;
+ registry_par->smart_ps = (u8)rtw_smart_ps;
+ registry_par->power_mgnt = (u8)rtw_power_mgnt;
+ registry_par->ips_mode = (u8)rtw_ips_mode;
+ registry_par->long_retry_lmt = (u8)rtw_long_retry_lmt;
+ registry_par->short_retry_lmt = (u8)rtw_short_retry_lmt;
+ registry_par->busy_thresh = (u16)rtw_busy_thresh;
+ registry_par->ack_policy = (u8)rtw_ack_policy;
+ registry_par->acm_method = (u8)rtw_acm_method;
+ /* UAPSD */
+ registry_par->wmm_enable = (u8)rtw_wmm_enable;
+ registry_par->uapsd_enable = (u8)rtw_uapsd_enable;
+ registry_par->ht_enable = (u8)rtw_ht_enable23A;
+ registry_par->cbw40_enable = (u8)rtw_cbw40_enable23A;
+ registry_par->ampdu_enable = (u8)rtw_ampdu_enable23A;
+ registry_par->rx_stbc = (u8)rtw_rx_stbc;
+ registry_par->ampdu_amsdu = (u8)rtw_ampdu_amsdu;
+ registry_par->lowrate_two_xmit = (u8)rtw_lowrate_two_xmit;
+ registry_par->rf_config = (u8)rtw_rf_config;
+ registry_par->low_power = (u8)rtw_low_power;
+ registry_par->wifi_spec = (u8)rtw_wifi_spec;
+ registry_par->channel_plan = (u8)rtw_channel_plan;
+#ifdef CONFIG_8723AU_BT_COEXIST
+ registry_par->btcoex = (u8)rtw_btcoex_enable;
+ registry_par->bt_iso = (u8)rtw_bt_iso;
+ registry_par->bt_sco = (u8)rtw_bt_sco;
+ registry_par->bt_ampdu = (u8)rtw_bt_ampdu;
+#endif
+ registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq;
+ registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg;
+ registry_par->antdiv_type = (u8)rtw_antdiv_type;
+
+ /* 0:disable, 1:enable, 2:by EFUSE config */
+ registry_par->hwpdn_mode = (u8)rtw_hwpdn_mode;
+ /* 0:disable, 1:enable */
+ registry_par->hwpwrp_detect = (u8)rtw_hwpwrp_detect;
+ registry_par->hw_wps_pbc = (u8)rtw_hw_wps_pbc;
+ registry_par->max_roaming_times = (u8)rtw_max_roaming_times;
+ registry_par->enable80211d = (u8)rtw_80211d;
+ snprintf(registry_par->ifname, 16, "%s", ifname);
+ snprintf(registry_par->if2name, 16, "%s", if2name);
+ registry_par->notch_filter = (u8)rtw_notch_filter;
+ registry_par->regulatory_tid = (u8)rtw_regulatory_id;
+ return status;
+}
+
+static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
+{
+ struct rtw_adapter *padapter = netdev_priv(pnetdev);
+ struct sockaddr *addr = p;
+
+ if (!padapter->bup)
+ ether_addr_copy(padapter->eeprompriv.mac_addr, addr->sa_data);
+ return 0;
+}
+
+static struct net_device_stats *rtw_net_get_stats(struct net_device *pnetdev)
+{
+ struct rtw_adapter *padapter = netdev_priv(pnetdev);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ padapter->stats.tx_packets = pxmitpriv->tx_pkts;
+ padapter->stats.rx_packets = precvpriv->rx_pkts;
+ padapter->stats.tx_dropped = pxmitpriv->tx_drop;
+ padapter->stats.rx_dropped = precvpriv->rx_drop;
+ padapter->stats.tx_bytes = pxmitpriv->tx_bytes;
+ padapter->stats.rx_bytes = precvpriv->rx_bytes;
+
+ return &padapter->stats;
+}
+
+/*
+ * AC to queue mapping
+ *
+ * AC_VO -> queue 0
+ * AC_VI -> queue 1
+ * AC_BE -> queue 2
+ * AC_BK -> queue 3
+ */
+static const u16 rtw_1d_to_queue[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
+
+/* Given a data frame determine the 802.1p/1d tag to use. */
+static unsigned int rtw_classify8021d(struct sk_buff *skb)
+{
+ unsigned int dscp;
+
+ /* skb->priority values from 256->263 are magic values to
+ * directly indicate a specific 802.1d priority. This is used
+ * to allow 802.1d priority to be passed directly in from VLAN
+ * tags, etc.
+ */
+ if (skb->priority >= 256 && skb->priority <= 263)
+ return skb->priority - 256;
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ dscp = ip_hdr(skb)->tos & 0xfc;
+ break;
+ default:
+ return 0;
+ }
+ return dscp >> 5;
+}
+
+static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv,
+ select_queue_fallback_t fallback)
+{
+ struct rtw_adapter *padapter = netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ skb->priority = rtw_classify8021d(skb);
+
+ if (pmlmepriv->acm_mask != 0)
+ skb->priority = qos_acm23a(pmlmepriv->acm_mask, skb->priority);
+ return rtw_1d_to_queue[skb->priority];
+}
+
+u16 rtw_recv_select_queue23a(struct sk_buff *skb)
+{
+ struct iphdr *piphdr;
+ unsigned int dscp;
+ u16 eth_type;
+ u32 priority;
+ u8 *pdata = skb->data;
+
+ memcpy(&eth_type, pdata + (ETH_ALEN << 1), 2);
+ switch (eth_type) {
+ case htons(ETH_P_IP):
+ piphdr = (struct iphdr *)(pdata + ETH_HLEN);
+ dscp = piphdr->tos & 0xfc;
+ priority = dscp >> 5;
+ break;
+ default:
+ priority = 0;
+ }
+ return rtw_1d_to_queue[priority];
+}
+
+static const struct net_device_ops rtw_netdev_ops = {
+ .ndo_open = netdev_open23a,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = rtw_xmit23a_entry23a,
+ .ndo_select_queue = rtw_select_queue,
+ .ndo_set_mac_address = rtw_net_set_mac_address,
+ .ndo_get_stats = rtw_net_get_stats,
+};
+
+int rtw_init_netdev23a_name23a(struct net_device *pnetdev, const char *ifname)
+{
+ if (dev_alloc_name(pnetdev, ifname) < 0) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_,
+ ("dev_alloc_name, fail!\n"));
+ }
+ netif_carrier_off(pnetdev);
+ return 0;
+}
+
+static const struct device_type wlan_type = {
+ .name = "wlan",
+};
+
+struct net_device *rtw_init_netdev23a(struct rtw_adapter *old_padapter)
+{
+ struct rtw_adapter *padapter;
+ struct net_device *pnetdev;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+init_net_dev\n"));
+
+ pnetdev = alloc_etherdev_mq(sizeof(struct rtw_adapter), 4);
+ if (!pnetdev)
+ return NULL;
+
+ pnetdev->dev.type = &wlan_type;
+ padapter = netdev_priv(pnetdev);
+ padapter->pnetdev = pnetdev;
+
+ DBG_8723A("register rtw_netdev_ops to netdev_ops\n");
+ pnetdev->netdev_ops = &rtw_netdev_ops;
+
+ pnetdev->watchdog_timeo = HZ*3; /* 3 second timeout */
+
+ /* step 2. */
+ loadparam(padapter, pnetdev);
+ return pnetdev;
+}
+
+u32 rtw_start_drv_threads23a(struct rtw_adapter *padapter)
+{
+ u32 _status = _SUCCESS;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_,
+ ("+rtw_start_drv_threads23a\n"));
+ padapter->cmdThread = kthread_run(rtw_cmd_thread23a, padapter,
+ "RTW_CMD_THREAD");
+ if (IS_ERR(padapter->cmdThread)) {
+ _status = _FAIL;
+ } else {
+ /* wait for cmd_thread to run */
+ down(&padapter->cmdpriv.terminate_cmdthread_sema);
+ }
+ rtw_hal_start_thread23a(padapter);
+ return _status;
+}
+
+void rtw_stop_drv_threads23a(struct rtw_adapter *padapter)
+{
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_stop_drv_threads23a\n"));
+
+ /* Below is to termindate rtw_cmd_thread23a & event_thread... */
+ up(&padapter->cmdpriv.cmd_queue_sema);
+ if (padapter->cmdThread)
+ down(&padapter->cmdpriv.terminate_cmdthread_sema);
+ rtw_hal_stop_thread23a(padapter);
+}
+
+static u8 rtw_init_default_value(struct rtw_adapter *padapter)
+{
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u8 ret = _SUCCESS;
+
+ /* xmit_priv */
+ pxmitpriv->vcs_setting = pregistrypriv->vrtl_carrier_sense;
+ pxmitpriv->vcs = pregistrypriv->vcs_type;
+ pxmitpriv->vcs_type = pregistrypriv->vcs_type;
+ /* pxmitpriv->rts_thresh = pregistrypriv->rts_thresh; */
+ pxmitpriv->frag_len = pregistrypriv->frag_thresh;
+
+ /* mlme_priv */
+ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
+ pmlmepriv->scan_mode = SCAN_ACTIVE;
+
+ /* ht_priv */
+ pmlmepriv->htpriv.ampdu_enable = false;/* set to disabled */
+
+ /* security_priv */
+ psecuritypriv->binstallGrpkey = _FAIL;
+
+ /* open system */
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+ psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+
+ psecuritypriv->dot11PrivacyKeyIndex = 0;
+
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psecuritypriv->dot118021XGrpKeyid = 1;
+
+ psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
+ psecuritypriv->ndisencryptstatus = Ndis802_11WEPDisabled;
+
+ /* registry_priv */
+ rtw_init_registrypriv_dev_network23a(padapter);
+ rtw_update_registrypriv_dev_network23a(padapter);
+
+ /* hal_priv */
+ rtw_hal_def_value_init23a(padapter);
+
+ /* misc. */
+ padapter->bReadPortCancel = false;
+ padapter->bWritePortCancel = false;
+ padapter->bRxRSSIDisplay = 0;
+ padapter->bNotifyChannelChange = 0;
+#ifdef CONFIG_8723AU_P2P
+ padapter->bShowGetP2PState = 1;
+#endif
+ return ret;
+}
+
+u8 rtw_reset_drv_sw23a(struct rtw_adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+ u8 ret8 = _SUCCESS;
+
+ /* hal_priv */
+ rtw_hal_def_value_init23a(padapter);
+ padapter->bReadPortCancel = false;
+ padapter->bWritePortCancel = false;
+ padapter->bRxRSSIDisplay = 0;
+ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
+
+ padapter->xmitpriv.tx_pkts = 0;
+ padapter->recvpriv.rx_pkts = 0;
+
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING);
+
+ rtw_hal_sreset_reset23a_value23a(padapter);
+ pwrctrlpriv->pwr_state_check_cnts = 0;
+
+ /* mlmeextpriv */
+ padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE;
+
+ rtw_set_signal_stat_timer(&padapter->recvpriv);
+ return ret8;
+}
+
+u8 rtw_init_drv_sw23a(struct rtw_adapter *padapter)
+{
+ u8 ret8 = _SUCCESS;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_init_drv_sw23a\n"));
+
+ if ((rtw_init_cmd_priv23a(&padapter->cmdpriv)) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_,
+ ("\n Can't init cmd_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ padapter->cmdpriv.padapter = padapter;
+
+ if (rtw_init_evt_priv23a(&padapter->evtpriv) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_,
+ ("\n Can't init evt_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (rtw_init_mlme_priv23a(padapter) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_,
+ ("\n Can't init mlme_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+#ifdef CONFIG_8723AU_P2P
+ rtw_init_wifidirect_timers23a(padapter);
+ init_wifidirect_info23a(padapter, P2P_ROLE_DISABLE);
+ reset_global_wifidirect_info23a(padapter);
+ rtw_init_cfg80211_wifidirect_info(padapter);
+#ifdef CONFIG_8723AU_P2P
+ if (rtw_init_wifi_display_info(padapter) == _FAIL)
+ RT_TRACE(_module_os_intfs_c_, _drv_err_,
+ ("\n Can't init init_wifi_display_info\n"));
+#endif
+#endif /* CONFIG_8723AU_P2P */
+
+ if (init_mlme_ext_priv23a(padapter) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_,
+ ("\n Can't init mlme_ext_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_init_xmit_priv23a(&padapter->xmitpriv, padapter) == _FAIL) {
+ DBG_8723A("Can't _rtw_init_xmit_priv23a\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_init_recv_priv23a(&padapter->recvpriv, padapter) == _FAIL) {
+ DBG_8723A("Can't _rtw_init_recv_priv23a\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_init_sta_priv23a(&padapter->stapriv) == _FAIL) {
+ DBG_8723A("Can't _rtw_init_sta_priv23a\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ padapter->stapriv.padapter = padapter;
+ padapter->setband = GHZ24_50;
+ rtw_init_bcmc_stainfo23a(padapter);
+
+ rtw_init_pwrctrl_priv23a(padapter);
+
+ ret8 = rtw_init_default_value(padapter);
+
+ rtw_hal_dm_init23a(padapter);
+ rtw_hal_sw_led_init23a(padapter);
+
+ rtw_hal_sreset_init23a(padapter);
+
+exit:
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw23a\n"));
+ return ret8;
+}
+
+void rtw_cancel_all_timer23a(struct rtw_adapter *padapter)
+{
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_cancel_all_timer23a\n"));
+
+ del_timer_sync(&padapter->mlmepriv.assoc_timer);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_,
+ ("rtw_cancel_all_timer23a:cancel association timer complete!\n"));
+
+ del_timer_sync(&padapter->mlmepriv.scan_to_timer);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_,
+ ("rtw_cancel_all_timer23a:cancel scan_to_timer!\n"));
+
+ del_timer_sync(&padapter->mlmepriv.dynamic_chk_timer);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_,
+ ("rtw_cancel_all_timer23a:cancel dynamic_chk_timer!\n"));
+
+ /* cancel sw led timer */
+ rtw_hal_sw_led_deinit23a(padapter);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_,
+ ("rtw_cancel_all_timer23a:cancel DeInitSwLeds!\n"));
+
+ del_timer_sync(&padapter->pwrctrlpriv.pwr_state_check_timer);
+
+#ifdef CONFIG_8723AU_P2P
+ del_timer_sync(&padapter->cfg80211_wdinfo.remain_on_ch_timer);
+#endif /* CONFIG_8723AU_P2P */
+
+ del_timer_sync(&padapter->mlmepriv.set_scan_deny_timer);
+ rtw_clear_scan_deny(padapter);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_,
+ ("rtw_cancel_all_timer23a:cancel set_scan_deny_timer!\n"));
+
+ del_timer_sync(&padapter->recvpriv.signal_stat_timer);
+ /* cancel dm timer */
+ rtw_hal_dm_deinit23a(padapter);
+}
+
+u8 rtw_free_drv_sw23a(struct rtw_adapter *padapter)
+{
+#ifdef CONFIG_8723AU_P2P
+ struct wifidirect_info *pwdinfo;
+#endif
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("==>rtw_free_drv_sw23a"));
+
+ /* we can call rtw_p2p_enable23a here, but:
+ * 1. rtw_p2p_enable23a may have IO operation
+ * 2. rtw_p2p_enable23a is bundled with wext interface
+ */
+#ifdef CONFIG_8723AU_P2P
+ pwdinfo = &padapter->wdinfo;
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ del_timer_sync(&pwdinfo->find_phase_timer);
+ del_timer_sync(&pwdinfo->restore_p2p_state_timer);
+ del_timer_sync(&pwdinfo->pre_tx_scan_timer);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE);
+ }
+#endif
+
+ free_mlme_ext_priv23a(&padapter->mlmeextpriv);
+
+ rtw_free_cmd_priv23a(&padapter->cmdpriv);
+
+ rtw_free_evt_priv23a(&padapter->evtpriv);
+
+ rtw_free_mlme_priv23a(&padapter->mlmepriv);
+
+ _rtw_free_xmit_priv23a(&padapter->xmitpriv);
+
+ _rtw_free_sta_priv23a(&padapter->stapriv);/* will free bcmc_stainfo here */
+
+ _rtw_free_recv_priv23a(&padapter->recvpriv);
+
+ rtw_free_pwrctrl_priv(padapter);
+
+ rtw_hal_free_data23a(padapter);
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("<== rtw_free_drv_sw23a\n"));
+
+ /* free the old_pnetdev */
+ if (padapter->rereg_nd_name_priv.old_pnetdev) {
+ free_netdev(padapter->rereg_nd_name_priv.old_pnetdev);
+ padapter->rereg_nd_name_priv.old_pnetdev = NULL;
+ }
+
+ /* clear pbuddy_adapter to avoid access wrong pointer. */
+ if (padapter->pbuddy_adapter != NULL)
+ padapter->pbuddy_adapter->pbuddy_adapter = NULL;
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_free_drv_sw23a\n"));
+ return _SUCCESS;
+}
+
+static int _rtw_drv_register_netdev(struct rtw_adapter *padapter, char *name)
+{
+ struct net_device *pnetdev = padapter->pnetdev;
+ int ret = _SUCCESS;
+
+ /* alloc netdev name */
+ rtw_init_netdev23a_name23a(pnetdev, name);
+
+ ether_addr_copy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr);
+
+ /* Tell the network stack we exist */
+ if (register_netdev(pnetdev)) {
+ DBG_8723A(FUNC_NDEV_FMT "Failed!\n", FUNC_NDEV_ARG(pnetdev));
+ ret = _FAIL;
+ goto error_register_netdev;
+ }
+ DBG_8723A("%s, MAC Address (if%d) = " MAC_FMT "\n", __func__,
+ (padapter->iface_id + 1), MAC_ARG(pnetdev->dev_addr));
+ return ret;
+
+error_register_netdev:
+
+ if (padapter->iface_id > IFACE_ID0) {
+ rtw_free_drv_sw23a(padapter);
+
+ free_netdev(pnetdev);
+ }
+ return ret;
+}
+
+int rtw_drv_register_netdev(struct rtw_adapter *if1)
+{
+ struct dvobj_priv *dvobj = if1->dvobj;
+ int i, status = _SUCCESS;
+
+ if (dvobj->iface_nums < IFACE_ID_MAX) {
+ for (i = 0; i < dvobj->iface_nums; i++) {
+ struct rtw_adapter *padapter = dvobj->padapters[i];
+
+ if (padapter) {
+ char *name;
+
+ if (padapter->iface_id == IFACE_ID0)
+ name = if1->registrypriv.ifname;
+ else if (padapter->iface_id == IFACE_ID1)
+ name = if1->registrypriv.if2name;
+ else
+ name = "wlan%d";
+ status = _rtw_drv_register_netdev(padapter,
+ name);
+ if (status != _SUCCESS)
+ break;
+ }
+ }
+ }
+ return status;
+}
+
+int netdev_open23a(struct net_device *pnetdev)
+{
+ struct rtw_adapter *padapter = netdev_priv(pnetdev);
+ struct pwrctrl_priv *pwrctrlpriv;
+ int ret = 0;
+ uint status;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - dev_open\n"));
+ DBG_8723A("+871x_drv - drv_open, bup =%d\n", padapter->bup);
+
+ mutex_lock(&adapter_to_dvobj(padapter)->hw_init_mutex);
+
+ pwrctrlpriv = &padapter->pwrctrlpriv;
+ if (pwrctrlpriv->ps_flag) {
+ padapter->net_closed = false;
+ goto netdev_open23a_normal_process;
+ }
+
+ if (!padapter->bup) {
+ padapter->bDriverStopped = false;
+ padapter->bSurpriseRemoved = false;
+ padapter->bCardDisableWOHSM = false;
+
+ status = rtw_hal_init23a(padapter);
+ if (status == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_,
+ ("rtl871x_hal_init(): Can't init h/w!\n"));
+ goto netdev_open23a_error;
+ }
+
+ DBG_8723A("MAC Address = "MAC_FMT"\n",
+ MAC_ARG(pnetdev->dev_addr));
+
+ status = rtw_start_drv_threads23a(padapter);
+ if (status == _FAIL) {
+ DBG_8723A("Initialize driver software resource Failed!\n");
+ goto netdev_open23a_error;
+ }
+
+ if (init_hw_mlme_ext23a(padapter) == _FAIL) {
+ DBG_8723A("can't init mlme_ext_priv\n");
+ goto netdev_open23a_error;
+ }
+
+ if (padapter->intf_start)
+ padapter->intf_start(padapter);
+
+ rtw_cfg80211_init_wiphy(padapter);
+
+ rtw_led_control(padapter, LED_CTL_NO_LINK);
+
+ padapter->bup = true;
+ }
+ padapter->net_closed = false;
+
+ mod_timer(&padapter->mlmepriv.dynamic_chk_timer,
+ jiffies + msecs_to_jiffies(2000));
+
+ padapter->pwrctrlpriv.bips_processing = false;
+ rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
+
+ /* netif_carrier_on(pnetdev);call this func when
+ rtw23a_joinbss_event_cb return success */
+ if (!rtw_netif_queue_stopped(pnetdev))
+ netif_tx_start_all_queues(pnetdev);
+ else
+ netif_tx_wake_all_queues(pnetdev);
+
+netdev_open23a_normal_process:
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - dev_open\n"));
+ DBG_8723A("-871x_drv - drv_open, bup =%d\n", padapter->bup);
+exit:
+ mutex_unlock(&adapter_to_dvobj(padapter)->hw_init_mutex);
+ return ret;
+
+netdev_open23a_error:
+ padapter->bup = false;
+
+ netif_carrier_off(pnetdev);
+ netif_tx_stop_all_queues(pnetdev);
+
+ RT_TRACE(_module_os_intfs_c_, _drv_err_,
+ ("-871x_drv - dev_open, fail!\n"));
+ DBG_8723A("-871x_drv - drv_open fail, bup =%d\n", padapter->bup);
+
+ ret = -1;
+ goto exit;
+}
+
+static int ips_netdrv_open(struct rtw_adapter *padapter)
+{
+ int status = _SUCCESS;
+
+ padapter->net_closed = false;
+ DBG_8723A("===> %s.........\n", __func__);
+
+ padapter->bDriverStopped = false;
+ padapter->bSurpriseRemoved = false;
+ padapter->bCardDisableWOHSM = false;
+
+ status = rtw_hal_init23a(padapter);
+ if (status == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_,
+ ("ips_netdrv_open(): Can't init h/w!\n"));
+ goto netdev_open23a_error;
+ }
+
+ if (padapter->intf_start)
+ padapter->intf_start(padapter);
+
+ rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
+ mod_timer(&padapter->mlmepriv.dynamic_chk_timer,
+ jiffies + msecs_to_jiffies(5000));
+
+ return _SUCCESS;
+
+netdev_open23a_error:
+ /* padapter->bup = false; */
+ DBG_8723A("-ips_netdrv_open - drv_open failure, bup =%d\n",
+ padapter->bup);
+
+ return _FAIL;
+}
+
+int rtw_ips_pwr_up23a(struct rtw_adapter *padapter)
+{
+ int result;
+ unsigned long start_time = jiffies;
+
+ DBG_8723A("===> rtw_ips_pwr_up23a..............\n");
+ rtw_reset_drv_sw23a(padapter);
+
+ result = ips_netdrv_open(padapter);
+
+ rtw_led_control(padapter, LED_CTL_NO_LINK);
+
+ DBG_8723A("<=== rtw_ips_pwr_up23a.............. in %dms\n",
+ jiffies_to_msecs(jiffies - start_time));
+ return result;
+}
+
+void rtw_ips_pwr_down23a(struct rtw_adapter *padapter)
+{
+ unsigned long start_time = jiffies;
+
+ DBG_8723A("===> rtw_ips_pwr_down23a...................\n");
+
+ padapter->bCardDisableWOHSM = true;
+ padapter->net_closed = true;
+
+ rtw_led_control(padapter, LED_CTL_POWER_OFF);
+
+ rtw_ips_dev_unload23a(padapter);
+ padapter->bCardDisableWOHSM = false;
+ DBG_8723A("<=== rtw_ips_pwr_down23a..................... in %dms\n",
+ jiffies_to_msecs(jiffies - start_time));
+}
+
+void rtw_ips_dev_unload23a(struct rtw_adapter *padapter)
+{
+ rtw_hal_set_hwreg23a(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
+
+ if (padapter->intf_stop)
+ padapter->intf_stop(padapter);
+
+ /* s5. */
+ if (!padapter->bSurpriseRemoved)
+ rtw_hal_deinit23a(padapter);
+}
+
+int pm_netdev_open23a(struct net_device *pnetdev, u8 bnormal)
+{
+ int status;
+
+ if (bnormal)
+ status = netdev_open23a(pnetdev);
+ else
+ status = (_SUCCESS == ips_netdrv_open(netdev_priv(pnetdev))) ?
+ (0) : (-1);
+
+ return status;
+}
+
+static int netdev_close(struct net_device *pnetdev)
+{
+ struct rtw_adapter *padapter = netdev_priv(pnetdev);
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - drv_close\n"));
+
+ if (padapter->pwrctrlpriv.bInternalAutoSuspend) {
+ if (padapter->pwrctrlpriv.rf_pwrstate == rf_off)
+ padapter->pwrctrlpriv.ps_flag = true;
+ }
+ padapter->net_closed = true;
+
+ if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) {
+ DBG_8723A("(2)871x_drv - drv_close, bup =%d, hw_init_completed =%d\n",
+ padapter->bup,
+ padapter->hw_init_completed);
+
+ /* s1. */
+ if (pnetdev) {
+ if (!rtw_netif_queue_stopped(pnetdev))
+ netif_tx_stop_all_queues(pnetdev);
+ }
+
+ /* s2. */
+ LeaveAllPowerSaveMode23a(padapter);
+ rtw_disassoc_cmd23a(padapter, 500, false);
+ /* s2-2. indicate disconnect to os */
+ rtw_indicate_disconnect23a(padapter);
+ /* s2-3. */
+ rtw_free_assoc_resources23a(padapter, 1);
+ /* s2-4. */
+ rtw_free_network_queue23a(padapter, true);
+ /* Close LED */
+ rtw_led_control(padapter, LED_CTL_POWER_OFF);
+ }
+
+#ifdef CONFIG_8723AU_P2P
+ if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled)
+ wdev_to_priv(padapter->rtw_wdev)->p2p_enabled = false;
+ rtw_p2p_enable23a(padapter, P2P_ROLE_DISABLE);
+#endif /* CONFIG_8723AU_P2P */
+
+ rtw_scan_abort23a(padapter);
+ /* set this at the end */
+ padapter->rtw_wdev->iftype = NL80211_IFTYPE_MONITOR;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n"));
+ DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup);
+
+ return 0;
+}
+
+void rtw_ndev_destructor(struct net_device *ndev)
+{
+ DBG_8723A(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+ kfree(ndev->ieee80211_ptr);
+ free_netdev(ndev);
+}
diff --git a/drivers/staging/rtl8723au/os_dep/osdep_service.c b/drivers/staging/rtl8723au/os_dep/osdep_service.c
new file mode 100644
index 000000000000..97fc27dce19c
--- /dev/null
+++ b/drivers/staging/rtl8723au/os_dep/osdep_service.c
@@ -0,0 +1,175 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+
+#define _OSDEP_SERVICE_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <linux/vmalloc.h>
+
+#define RT_TAG ('1178')
+
+/*
+* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE23a
+* @return: one of RTW_STATUS_CODE23a
+*/
+inline int RTW_STATUS_CODE23a(int error_code)
+{
+ if (error_code >= 0)
+ return _SUCCESS;
+ return _FAIL;
+}
+
+inline u8 *_rtw_vmalloc(u32 sz)
+{
+ u8 *pbuf;
+ pbuf = vmalloc(sz);
+
+ return pbuf;
+}
+
+inline u8 *_rtw_zvmalloc(u32 sz)
+{
+ u8 *pbuf;
+ pbuf = _rtw_vmalloc(sz);
+ if (pbuf != NULL)
+ memset(pbuf, 0, sz);
+
+ return pbuf;
+}
+
+inline void _rtw_vmfree(u8 *pbuf, u32 sz)
+{
+ vfree(pbuf);
+}
+
+void _rtw_init_queue23a(struct rtw_queue *pqueue)
+{
+ INIT_LIST_HEAD(&pqueue->queue);
+ spin_lock_init(&pqueue->lock);
+}
+
+u32 _rtw_queue_empty23a(struct rtw_queue *pqueue)
+{
+ if (list_empty(&pqueue->queue))
+ return true;
+ else
+ return false;
+}
+
+u64 rtw_modular6423a(u64 x, u64 y)
+{
+ return do_div(x, y);
+}
+
+u64 rtw_division6423a(u64 x, u64 y)
+{
+ do_div(x, y);
+ return x;
+}
+
+/* rtw_cbuf_full23a - test if cbuf is full
+ * @cbuf: pointer of struct rtw_cbuf
+ *
+ * Returns: true if cbuf is full
+ */
+inline bool rtw_cbuf_full23a(struct rtw_cbuf *cbuf)
+{
+ return (cbuf->write == cbuf->read-1) ? true : false;
+}
+
+/* rtw_cbuf_empty23a - test if cbuf is empty
+ * @cbuf: pointer of struct rtw_cbuf
+ *
+ * Returns: true if cbuf is empty
+ */
+inline bool rtw_cbuf_empty23a(struct rtw_cbuf *cbuf)
+{
+ return (cbuf->write == cbuf->read) ? true : false;
+}
+
+/**
+ * rtw_cbuf_push23a - push a pointer into cbuf
+ * @cbuf: pointer of struct rtw_cbuf
+ * @buf: pointer to push in
+ *
+ * Lock free operation, be careful of the use scheme
+ * Returns: true push success
+ */
+bool rtw_cbuf_push23a(struct rtw_cbuf *cbuf, void *buf)
+{
+ if (rtw_cbuf_full23a(cbuf))
+ return _FAIL;
+
+ if (0)
+ DBG_8723A("%s on %u\n", __func__, cbuf->write);
+ cbuf->bufs[cbuf->write] = buf;
+ cbuf->write = (cbuf->write+1)%cbuf->size;
+
+ return _SUCCESS;
+}
+
+/**
+ * rtw_cbuf_pop23a - pop a pointer from cbuf
+ * @cbuf: pointer of struct rtw_cbuf
+ *
+ * Lock free operation, be careful of the use scheme
+ * Returns: pointer popped out
+ */
+void *rtw_cbuf_pop23a(struct rtw_cbuf *cbuf)
+{
+ void *buf;
+ if (rtw_cbuf_empty23a(cbuf))
+ return NULL;
+
+ if (0)
+ DBG_8723A("%s on %u\n", __func__, cbuf->read);
+ buf = cbuf->bufs[cbuf->read];
+ cbuf->read = (cbuf->read+1)%cbuf->size;
+
+ return buf;
+}
+
+/**
+ * rtw_cbuf_alloc23a - allocte a rtw_cbuf with given size and do initialization
+ * @size: size of pointer
+ *
+ * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
+ */
+struct rtw_cbuf *rtw_cbuf_alloc23a(u32 size)
+{
+ struct rtw_cbuf *cbuf;
+
+ cbuf = kmalloc(sizeof(*cbuf) + sizeof(void *)*size, GFP_KERNEL);
+
+ if (cbuf) {
+ cbuf->write = 0;
+ cbuf->read = 0;
+ cbuf->size = size;
+ }
+
+ return cbuf;
+}
+
+/**
+ * rtw_cbuf_free - free the given rtw_cbuf
+ * @cbuf: pointer of struct rtw_cbuf to free
+ */
+void rtw_cbuf_free(struct rtw_cbuf *cbuf)
+{
+ kfree(cbuf);
+}
diff --git a/drivers/staging/rtl8723au/os_dep/recv_linux.c b/drivers/staging/rtl8723au/os_dep/recv_linux.c
new file mode 100644
index 000000000000..84402a589f25
--- /dev/null
+++ b/drivers/staging/rtl8723au/os_dep/recv_linux.c
@@ -0,0 +1,225 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RECV_OSDEP_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <wifi.h>
+#include <recv_osdep.h>
+
+#include <osdep_intf.h>
+#include <ethernet.h>
+
+#include <usb_ops.h>
+
+/* alloc os related resource in struct recv_frame */
+int rtw_os_recv_resource_alloc23a(struct rtw_adapter *padapter,
+ struct recv_frame *precvframe)
+{
+ int res = _SUCCESS;
+
+ precvframe->pkt = NULL;
+
+ return res;
+}
+
+/* alloc os related resource in struct recv_buf */
+int rtw_os_recvbuf_resource_alloc23a(struct rtw_adapter *padapter,
+ struct recv_buf *precvbuf)
+{
+ int res = _SUCCESS;
+
+ precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
+ if (precvbuf->purb == NULL)
+ res = _FAIL;
+
+ precvbuf->pskb = NULL;
+
+ return res;
+}
+
+/* free os related resource in struct recv_buf */
+int rtw_os_recvbuf_resource_free23a(struct rtw_adapter *padapter,
+ struct recv_buf *precvbuf)
+{
+ int ret = _SUCCESS;
+
+ usb_free_urb(precvbuf->purb);
+
+ if (precvbuf->pskb)
+ dev_kfree_skb_any(precvbuf->pskb);
+
+ return ret;
+}
+
+void rtw_handle_tkip_mic_err23a(struct rtw_adapter *padapter, u8 bgroup)
+{
+ enum nl80211_key_type key_type = 0;
+ union iwreq_data wrqu;
+ struct iw_michaelmicfailure ev;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ unsigned long cur_time;
+
+ if (psecuritypriv->last_mic_err_time == 0) {
+ psecuritypriv->last_mic_err_time = jiffies;
+ } else {
+ cur_time = jiffies;
+
+ if (cur_time - psecuritypriv->last_mic_err_time < 60*HZ) {
+ psecuritypriv->btkip_countermeasure = true;
+ psecuritypriv->last_mic_err_time = 0;
+ psecuritypriv->btkip_countermeasure_time = cur_time;
+ } else {
+ psecuritypriv->last_mic_err_time = jiffies;
+ }
+ }
+
+ if (bgroup)
+ key_type |= NL80211_KEYTYPE_GROUP;
+ else
+ key_type |= NL80211_KEYTYPE_PAIRWISE;
+
+ cfg80211_michael_mic_failure(padapter->pnetdev,
+ (u8 *)&pmlmepriv->assoc_bssid[0],
+ key_type, -1, NULL, GFP_ATOMIC);
+
+ memset(&ev, 0x00, sizeof(ev));
+ if (bgroup)
+ ev.flags |= IW_MICFAILURE_GROUP;
+ else
+ ev.flags |= IW_MICFAILURE_PAIRWISE;
+
+ ev.src_addr.sa_family = ARPHRD_ETHER;
+ ether_addr_copy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0]);
+
+ memset(&wrqu, 0x00, sizeof(wrqu));
+ wrqu.data.length = sizeof(ev);
+}
+
+void rtw_hostapd_mlme_rx23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+}
+
+int rtw_recv_indicatepkt23a(struct rtw_adapter *padapter,
+ struct recv_frame *precv_frame)
+{
+ struct recv_priv *precvpriv;
+ struct rtw_queue *pfree_recv_queue;
+ struct sk_buff *skb;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ precvpriv = &(padapter->recvpriv);
+ pfree_recv_queue = &(precvpriv->free_recv_queue);
+
+ skb = precv_frame->pkt;
+ if (!skb) {
+ RT_TRACE(_module_recv_osdep_c_, _drv_err_,
+ ("rtw_recv_indicatepkt23a():skb == NULL!!!!\n"));
+ goto _recv_indicatepkt_drop;
+ }
+
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("rtw_recv_indicatepkt23a():skb != NULL !!!\n"));
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("rtw_recv_indicatepkt23a():precv_frame->hdr.rx_data =%p\n",
+ precv_frame->pkt->data));
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("\n skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n",
+ skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb), skb->len));
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ struct sk_buff *pskb2 = NULL;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
+ int bmcast = is_multicast_ether_addr(pattrib->dst);
+
+ /* DBG_8723A("bmcast =%d\n", bmcast); */
+
+ if (!ether_addr_equal(pattrib->dst,
+ myid(&padapter->eeprompriv))) {
+ /* DBG_8723A("not ap psta =%p, addr =%pM\n", psta, pattrib->dst); */
+ if (bmcast) {
+ psta = rtw_get_bcmc_stainfo23a(padapter);
+ pskb2 = skb_clone(skb, GFP_ATOMIC);
+ } else {
+ psta = rtw_get_stainfo23a(pstapriv, pattrib->dst);
+ }
+
+ if (psta) {
+ struct net_device *pnetdev = padapter->pnetdev;
+
+ /* DBG_8723A("directly forwarding to the rtw_xmit23a_entry23a\n"); */
+
+ /* skb->ip_summed = CHECKSUM_NONE; */
+ skb->dev = pnetdev;
+ skb_set_queue_mapping(skb, rtw_recv_select_queue23a(skb));
+
+ rtw_xmit23a_entry23a(skb, pnetdev);
+
+ if (bmcast)
+ skb = pskb2;
+ else
+ goto _recv_indicatepkt_end;
+ }
+ } else { /* to APself */
+ /* DBG_8723A("to APSelf\n"); */
+ }
+ }
+
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->dev = padapter->pnetdev;
+ skb->protocol = eth_type_trans(skb, padapter->pnetdev);
+
+ netif_rx(skb);
+
+_recv_indicatepkt_end:
+
+ precv_frame->pkt = NULL; /* pointers to NULL before rtw_free_recvframe23a() */
+
+ rtw_free_recvframe23a(precv_frame, pfree_recv_queue);
+
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("\n rtw_recv_indicatepkt23a :after netif_rx!!!!\n"));
+ return _SUCCESS;
+
+_recv_indicatepkt_drop:
+
+ rtw_free_recvframe23a(precv_frame, pfree_recv_queue);
+ return _FAIL;
+}
+
+void rtw_os_read_port23a(struct rtw_adapter *padapter, struct recv_buf *precvbuf)
+{
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ /* free skb in recv_buf */
+ dev_kfree_skb_any(precvbuf->pskb);
+
+ precvbuf->pskb = NULL;
+
+ rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, precvbuf);
+}
+
+void rtw_init_recv_timer23a(struct recv_reorder_ctrl *preorder_ctrl)
+{
+ setup_timer(&preorder_ctrl->reordering_ctrl_timer,
+ rtw_reordering_ctrl_timeout_handler23a,
+ (unsigned long)preorder_ctrl);
+}
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
new file mode 100644
index 000000000000..612806e0de2e
--- /dev/null
+++ b/drivers/staging/rtl8723au/os_dep/usb_intf.c
@@ -0,0 +1,833 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _HCI_INTF_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <xmit_osdep.h>
+#include <hal_intf.h>
+#include <rtw_version.h>
+#include <osdep_intf.h>
+#include <usb_vendor_req.h>
+#include <usb_ops.h>
+#include <usb_osintf.h>
+#include <usb_hal.h>
+
+static int rtw_suspend(struct usb_interface *intf, pm_message_t message);
+static int rtw_resume(struct usb_interface *intf);
+static int rtw_drv_init(struct usb_interface *pusb_intf,
+ const struct usb_device_id *pdid);
+static void rtw_disconnect(struct usb_interface *pusb_intf);
+
+#define USB_VENDER_ID_REALTEK 0x0BDA
+
+#define RTL8723A_USB_IDS \
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDER_ID_REALTEK, 0x8724, \
+ 0xff, 0xff, 0xff)}, /* 8723AU 1*1 */ \
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDER_ID_REALTEK, 0x1724, \
+ 0xff, 0xff, 0xff)}, /* 8723AU 1*1 */ \
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDER_ID_REALTEK, 0x0724, \
+ 0xff, 0xff, 0xff)}, /* 8723AU 1*1 */
+
+static struct usb_device_id rtl8723a_usb_id_tbl[] = {
+ RTL8723A_USB_IDS
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, rtl8723a_usb_id_tbl);
+
+static struct usb_driver rtl8723a_usb_drv = {
+ .name = (char *)"rtl8723au",
+ .probe = rtw_drv_init,
+ .disconnect = rtw_disconnect,
+ .id_table = rtl8723a_usb_id_tbl,
+ .suspend = rtw_suspend,
+ .resume = rtw_resume,
+ .reset_resume = rtw_resume,
+};
+
+static struct usb_driver *usb_drv = &rtl8723a_usb_drv;
+
+static inline int RT_usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
+{
+ return (epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN;
+}
+
+static inline int RT_usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd)
+{
+ return (epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT;
+}
+
+static inline int RT_usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd)
+{
+ return (epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT;
+}
+
+static inline int RT_usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd)
+{
+ return (epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK;
+}
+
+static inline int RT_usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
+{
+ return RT_usb_endpoint_xfer_bulk(epd) && RT_usb_endpoint_dir_in(epd);
+}
+
+static inline int RT_usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
+{
+ return RT_usb_endpoint_xfer_bulk(epd) && RT_usb_endpoint_dir_out(epd);
+}
+
+static inline int RT_usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd)
+{
+ return RT_usb_endpoint_xfer_int(epd) && RT_usb_endpoint_dir_in(epd);
+}
+
+static inline int RT_usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
+{
+ return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+}
+
+static u8 rtw_init_intf_priv(struct dvobj_priv *dvobj)
+{
+ u8 rst = _SUCCESS;
+
+ mutex_init(&dvobj->usb_vendor_req_mutex);
+ dvobj->usb_alloc_vendor_req_buf = kzalloc(MAX_USB_IO_CTL_SIZE,
+ GFP_KERNEL);
+ if (dvobj->usb_alloc_vendor_req_buf == NULL) {
+ DBG_8723A("alloc usb_vendor_req_buf failed... /n");
+ rst = _FAIL;
+ goto exit;
+ }
+ dvobj->usb_vendor_req_buf =
+ PTR_ALIGN(dvobj->usb_alloc_vendor_req_buf, ALIGNMENT_UNIT);
+exit:
+ return rst;
+}
+
+static u8 rtw_deinit_intf_priv(struct dvobj_priv *dvobj)
+{
+ u8 rst = _SUCCESS;
+
+ kfree(dvobj->usb_alloc_vendor_req_buf);
+
+ mutex_destroy(&dvobj->usb_vendor_req_mutex);
+
+ return rst;
+}
+
+static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
+{
+ struct dvobj_priv *pdvobjpriv;
+ struct usb_device_descriptor *pdev_desc;
+ struct usb_host_config *phost_conf;
+ struct usb_config_descriptor *pconf_desc;
+ struct usb_host_interface *phost_iface;
+ struct usb_interface_descriptor *piface_desc;
+ struct usb_host_endpoint *phost_endp;
+ struct usb_endpoint_descriptor *pendp_desc;
+ struct usb_device *pusbd;
+ int i;
+ int status = _FAIL;
+
+ pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL);
+ if (!pdvobjpriv)
+ goto exit;
+
+ mutex_init(&pdvobjpriv->hw_init_mutex);
+ mutex_init(&pdvobjpriv->h2c_fwcmd_mutex);
+ mutex_init(&pdvobjpriv->setch_mutex);
+ mutex_init(&pdvobjpriv->setbw_mutex);
+
+ pdvobjpriv->pusbintf = usb_intf;
+ pusbd = interface_to_usbdev(usb_intf);
+ pdvobjpriv->pusbdev = pusbd;
+ usb_set_intfdata(usb_intf, pdvobjpriv);
+
+ pdvobjpriv->RtNumInPipes = 0;
+ pdvobjpriv->RtNumOutPipes = 0;
+
+ pdev_desc = &pusbd->descriptor;
+
+ phost_conf = pusbd->actconfig;
+ pconf_desc = &phost_conf->desc;
+
+ phost_iface = &usb_intf->altsetting[0];
+ piface_desc = &phost_iface->desc;
+
+ pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces;
+ pdvobjpriv->InterfaceNumber = piface_desc->bInterfaceNumber;
+ pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
+
+ for (i = 0; i < pdvobjpriv->nr_endpoint; i++) {
+ phost_endp = phost_iface->endpoint + i;
+ if (phost_endp) {
+ pendp_desc = &phost_endp->desc;
+
+ DBG_8723A("\nusb_endpoint_descriptor(%d):\n", i);
+ DBG_8723A("bLength =%x\n", pendp_desc->bLength);
+ DBG_8723A("bDescriptorType =%x\n",
+ pendp_desc->bDescriptorType);
+ DBG_8723A("bEndpointAddress =%x\n",
+ pendp_desc->bEndpointAddress);
+ DBG_8723A("wMaxPacketSize =%d\n",
+ le16_to_cpu(pendp_desc->wMaxPacketSize));
+ DBG_8723A("bInterval =%x\n", pendp_desc->bInterval);
+
+ if (RT_usb_endpoint_is_bulk_in(pendp_desc)) {
+ DBG_8723A("RT_usb_endpoint_is_bulk_in = %x\n",
+ RT_usb_endpoint_num(pendp_desc));
+ pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] =
+ RT_usb_endpoint_num(pendp_desc);
+ pdvobjpriv->RtNumInPipes++;
+ } else if (RT_usb_endpoint_is_int_in(pendp_desc)) {
+ DBG_8723A("RT_usb_endpoint_is_int_in = %x, Interval = %x\n",
+ RT_usb_endpoint_num(pendp_desc),
+ pendp_desc->bInterval);
+ pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] =
+ RT_usb_endpoint_num(pendp_desc);
+ pdvobjpriv->RtNumInPipes++;
+ } else if (RT_usb_endpoint_is_bulk_out(pendp_desc)) {
+ DBG_8723A("RT_usb_endpoint_is_bulk_out = %x\n",
+ RT_usb_endpoint_num(pendp_desc));
+ pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] =
+ RT_usb_endpoint_num(pendp_desc);
+ pdvobjpriv->RtNumOutPipes++;
+ }
+ pdvobjpriv->ep_num[i] = RT_usb_endpoint_num(pendp_desc);
+ }
+ }
+ DBG_8723A("nr_endpoint =%d, in_num =%d, out_num =%d\n\n",
+ pdvobjpriv->nr_endpoint, pdvobjpriv->RtNumInPipes,
+ pdvobjpriv->RtNumOutPipes);
+
+ if (pusbd->speed == USB_SPEED_HIGH) {
+ pdvobjpriv->ishighspeed = true;
+ DBG_8723A("USB_SPEED_HIGH\n");
+ } else {
+ pdvobjpriv->ishighspeed = false;
+ DBG_8723A("NON USB_SPEED_HIGH\n");
+ }
+
+ if (rtw_init_intf_priv(pdvobjpriv) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_,
+ ("\n Can't INIT rtw_init_intf_priv\n"));
+ goto free_dvobj;
+ }
+ /* 3 misc */
+ sema_init(&(pdvobjpriv->usb_suspend_sema), 0);
+ rtw_reset_continual_urb_error(pdvobjpriv);
+ usb_get_dev(pusbd);
+ status = _SUCCESS;
+free_dvobj:
+ if (status != _SUCCESS && pdvobjpriv) {
+ usb_set_intfdata(usb_intf, NULL);
+ mutex_destroy(&pdvobjpriv->hw_init_mutex);
+ mutex_destroy(&pdvobjpriv->h2c_fwcmd_mutex);
+ mutex_destroy(&pdvobjpriv->setch_mutex);
+ mutex_destroy(&pdvobjpriv->setbw_mutex);
+ kfree(pdvobjpriv);
+ pdvobjpriv = NULL;
+ }
+exit:
+ return pdvobjpriv;
+}
+
+static void usb_dvobj_deinit(struct usb_interface *usb_intf)
+{
+ struct dvobj_priv *dvobj = usb_get_intfdata(usb_intf);
+
+ usb_set_intfdata(usb_intf, NULL);
+ if (dvobj) {
+ /* Modify condition for 92DU DMDP 2010.11.18, by Thomas */
+ if ((dvobj->NumInterfaces != 2 && dvobj->NumInterfaces != 3) ||
+ (dvobj->InterfaceNumber == 1)) {
+ if (interface_to_usbdev(usb_intf)->state !=
+ USB_STATE_NOTATTACHED) {
+ /* If we didn't unplug usb dongle and
+ * remove/insert module, driver fails on
+ * sitesurvey for the first time when
+ * device is up .
+ * Reset usb port for sitesurvey fail issue.
+ */
+ DBG_8723A("usb attached..., try to reset usb device\n");
+ usb_reset_device(interface_to_usbdev(usb_intf));
+ }
+ }
+ rtw_deinit_intf_priv(dvobj);
+ mutex_destroy(&dvobj->hw_init_mutex);
+ mutex_destroy(&dvobj->h2c_fwcmd_mutex);
+ mutex_destroy(&dvobj->setch_mutex);
+ mutex_destroy(&dvobj->setbw_mutex);
+ kfree(dvobj);
+ }
+ usb_put_dev(interface_to_usbdev(usb_intf));
+}
+
+static void decide_chip_type_by_usb_device_id(struct rtw_adapter *padapter,
+ const struct usb_device_id *pdid)
+{
+ padapter->chip_type = NULL_CHIP_TYPE;
+ hal_set_hw_type(padapter);
+}
+
+static void usb_intf_start(struct rtw_adapter *padapter)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_start\n"));
+ rtw_hal_inirp_init23a(padapter);
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-usb_intf_start\n"));
+}
+
+static void usb_intf_stop(struct rtw_adapter *padapter)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_stop\n"));
+
+ /* disable_hw_interrupt */
+ if (!padapter->bSurpriseRemoved) {
+ /* device still exists, so driver can do i/o operation
+ * TODO:
+ */
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("SurpriseRemoved == false\n"));
+ }
+
+ /* cancel in irp */
+ rtw_hal_inirp_deinit23a(padapter);
+
+ /* cancel out irp */
+ rtw_write_port_cancel(padapter);
+
+ /* todo:cancel other irps */
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-usb_intf_stop\n"));
+}
+
+static void rtw_dev_unload(struct rtw_adapter *padapter)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_dev_unload\n"));
+
+ if (padapter->bup) {
+ DBG_8723A("===> rtw_dev_unload\n");
+
+ padapter->bDriverStopped = true;
+ if (padapter->xmitpriv.ack_tx)
+ rtw_ack_tx_done23a(&padapter->xmitpriv,
+ RTW_SCTX_DONE_DRV_STOP);
+
+ /* s3. */
+ if (padapter->intf_stop)
+ padapter->intf_stop(padapter);
+
+ /* s4. */
+ if (!padapter->pwrctrlpriv.bInternalAutoSuspend)
+ rtw_stop_drv_threads23a(padapter);
+
+ /* s5. */
+ if (!padapter->bSurpriseRemoved) {
+ rtw_hal_deinit23a(padapter);
+ padapter->bSurpriseRemoved = true;
+ }
+ padapter->bup = false;
+ } else {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("r871x_dev_unload():padapter->bup == false\n"));
+ }
+ DBG_8723A("<=== rtw_dev_unload\n");
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-rtw_dev_unload\n"));
+}
+
+int rtw_hw_suspend23a(struct rtw_adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct net_device *pnetdev = padapter->pnetdev;
+
+ if ((!padapter->bup) || (padapter->bDriverStopped) ||
+ (padapter->bSurpriseRemoved)) {
+ DBG_8723A("padapter->bup =%d bDriverStopped =%d bSurpriseRemoved = %d\n",
+ padapter->bup, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved);
+ goto error_exit;
+ }
+
+ if (padapter) { /* system suspend */
+ LeaveAllPowerSaveMode23a(padapter);
+
+ DBG_8723A("==> rtw_hw_suspend23a\n");
+ down(&pwrpriv->lock);
+ pwrpriv->bips_processing = true;
+ /* padapter->net_closed = true; */
+ /* s1. */
+ if (pnetdev) {
+ netif_carrier_off(pnetdev);
+ netif_tx_stop_all_queues(pnetdev);
+ }
+
+ /* s2. */
+ rtw_disassoc_cmd23a(padapter, 500, false);
+
+ /* s2-2. indicate disconnect to os */
+ /* rtw_indicate_disconnect23a(padapter); */
+ {
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ _clr_fwstate_(pmlmepriv, _FW_LINKED);
+
+ rtw_led_control(padapter, LED_CTL_NO_LINK);
+
+ rtw_os_indicate_disconnect23a(padapter);
+
+ /* donnot enqueue cmd */
+ rtw_lps_ctrl_wk_cmd23a(padapter,
+ LPS_CTRL_DISCONNECT, 0);
+ }
+ }
+ /* s2-3. */
+ rtw_free_assoc_resources23a(padapter, 1);
+
+ /* s2-4. */
+ rtw_free_network_queue23a(padapter, true);
+ rtw_ips_dev_unload23a(padapter);
+ pwrpriv->rf_pwrstate = rf_off;
+ pwrpriv->bips_processing = false;
+ up(&pwrpriv->lock);
+ } else {
+ goto error_exit;
+ }
+ return 0;
+error_exit:
+ DBG_8723A("%s, failed\n", __func__);
+ return -1;
+}
+
+int rtw_hw_resume23a(struct rtw_adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct net_device *pnetdev = padapter->pnetdev;
+
+ if (padapter) { /* system resume */
+ DBG_8723A("==> rtw_hw_resume23a\n");
+ down(&pwrpriv->lock);
+ pwrpriv->bips_processing = true;
+ rtw_reset_drv_sw23a(padapter);
+
+ if (pm_netdev_open23a(pnetdev, false)) {
+ up(&pwrpriv->lock);
+ goto error_exit;
+ }
+
+ netif_device_attach(pnetdev);
+ netif_carrier_on(pnetdev);
+
+ if (!rtw_netif_queue_stopped(pnetdev))
+ netif_tx_start_all_queues(pnetdev);
+ else
+ netif_tx_wake_all_queues(pnetdev);
+
+ pwrpriv->bkeepfwalive = false;
+ pwrpriv->brfoffbyhw = false;
+
+ pwrpriv->rf_pwrstate = rf_on;
+ pwrpriv->bips_processing = false;
+
+ up(&pwrpriv->lock);
+ } else {
+ goto error_exit;
+ }
+ return 0;
+error_exit:
+ DBG_8723A("%s, Open net dev failed\n", __func__);
+ return -1;
+}
+
+static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
+{
+ struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
+ struct rtw_adapter *padapter = dvobj->if1;
+ struct net_device *pnetdev = padapter->pnetdev;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ int ret = 0;
+ unsigned long start_time = jiffies;
+
+ DBG_8723A("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+
+ if ((!padapter->bup) || (padapter->bDriverStopped) ||
+ (padapter->bSurpriseRemoved)) {
+ DBG_8723A("padapter->bup =%d bDriverStopped =%d bSurpriseRemoved = %d\n",
+ padapter->bup, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved);
+ goto exit;
+ }
+ pwrpriv->bInSuspend = true;
+ rtw_cancel_all_timer23a(padapter);
+ LeaveAllPowerSaveMode23a(padapter);
+
+ down(&pwrpriv->lock);
+ /* padapter->net_closed = true; */
+ /* s1. */
+ if (pnetdev) {
+ netif_carrier_off(pnetdev);
+ netif_tx_stop_all_queues(pnetdev);
+ }
+
+ /* s2. */
+ rtw_disassoc_cmd23a(padapter, 0, false);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
+ check_fwstate(pmlmepriv, _FW_LINKED)) {
+ DBG_8723A("%s:%d %s(%pM), length:%d assoc_ssid.length:%d\n",
+ __func__, __LINE__,
+ pmlmepriv->cur_network.network.Ssid.ssid,
+ pmlmepriv->cur_network.network.MacAddress,
+ pmlmepriv->cur_network.network.Ssid.ssid_len,
+ pmlmepriv->assoc_ssid.ssid_len);
+
+ rtw_set_roaming(padapter, 1);
+ }
+ /* s2-2. indicate disconnect to os */
+ rtw_indicate_disconnect23a(padapter);
+ /* s2-3. */
+ rtw_free_assoc_resources23a(padapter, 1);
+ /* s2-4. */
+ rtw_free_network_queue23a(padapter, true);
+
+ rtw_dev_unload(padapter);
+ up(&pwrpriv->lock);
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
+ rtw_indicate_scan_done23a(padapter, 1);
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
+ rtw_indicate_disconnect23a(padapter);
+
+exit:
+ DBG_8723A("<=== %s return %d.............. in %dms\n", __func__,
+ ret, jiffies_to_msecs(jiffies - start_time));
+
+ return ret;
+}
+
+static int rtw_resume(struct usb_interface *pusb_intf)
+{
+ struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
+ struct rtw_adapter *padapter = dvobj->if1;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ int ret = 0;
+
+ if (pwrpriv->bInternalAutoSuspend)
+ ret = rtw_resume_process23a(padapter);
+ else
+ ret = rtw_resume_process23a(padapter);
+
+ return ret;
+}
+
+int rtw_resume_process23a(struct rtw_adapter *padapter)
+{
+ struct net_device *pnetdev;
+ struct pwrctrl_priv *pwrpriv = NULL;
+ int ret = -1;
+ unsigned long start_time = jiffies;
+
+ DBG_8723A("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+
+ if (!padapter)
+ goto exit;
+ pnetdev = padapter->pnetdev;
+ pwrpriv = &padapter->pwrctrlpriv;
+
+ down(&pwrpriv->lock);
+ rtw_reset_drv_sw23a(padapter);
+ pwrpriv->bkeepfwalive = false;
+
+ DBG_8723A("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
+ if (pm_netdev_open23a(pnetdev, true) != 0)
+ goto exit;
+
+ netif_device_attach(pnetdev);
+ netif_carrier_on(pnetdev);
+
+ up(&pwrpriv->lock);
+
+ if (padapter->pid[1] != 0) {
+ DBG_8723A("pid[1]:%d\n", padapter->pid[1]);
+ rtw_signal_process(padapter->pid[1], SIGUSR2);
+ }
+
+ rtw23a_roaming(padapter, NULL);
+
+ ret = 0;
+exit:
+ if (pwrpriv)
+ pwrpriv->bInSuspend = false;
+ DBG_8723A("<=== %s return %d.............. in %dms\n", __func__,
+ ret, jiffies_to_msecs(jiffies - start_time));
+
+ return ret;
+}
+
+/*
+ * drv_init() - a device potentially for us
+ *
+ * notes: drv_init() is called when the bus driver has located a card
+ * for us to support.
+ * We accept the new device by returning 0.
+ */
+static struct rtw_adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
+ struct usb_interface *pusb_intf,
+ const struct usb_device_id *pdid)
+{
+ struct rtw_adapter *padapter = NULL;
+ struct net_device *pnetdev = NULL;
+ int status = _FAIL;
+
+ pnetdev = rtw_init_netdev23a(padapter);
+ if (!pnetdev)
+ goto handle_dualmac;
+ padapter = netdev_priv(pnetdev);
+
+ padapter->dvobj = dvobj;
+ padapter->bDriverStopped = true;
+ dvobj->if1 = padapter;
+ dvobj->padapters[dvobj->iface_nums++] = padapter;
+ padapter->iface_id = IFACE_ID0;
+
+ /* step 1-1., decide the chip_type via vid/pid */
+ decide_chip_type_by_usb_device_id(padapter, pdid);
+
+ if (rtw_handle_dualmac23a(padapter, 1) != _SUCCESS)
+ goto free_adapter;
+
+ SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
+
+ if (rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj)))
+ goto handle_dualmac;
+
+ /* step 2. hook HalFunc, allocate HalData */
+ if (rtl8723au_set_hal_ops(padapter))
+ return NULL;
+
+ padapter->intf_start = &usb_intf_start;
+ padapter->intf_stop = &usb_intf_stop;
+
+ /* step init_io_priv */
+ rtw_init_io_priv23a(padapter, usb_set_intf_ops);
+
+ /* step read_chip_version */
+ rtw_hal_read_chip_version23a(padapter);
+
+ /* step usb endpoint mapping */
+ rtw_hal_chip_configure23a(padapter);
+
+ /* step read efuse/eeprom data and get mac_addr */
+ rtw_hal_read_chip_info23a(padapter);
+
+ /* step 5. */
+ if (rtw_init_drv_sw23a(padapter) == _FAIL) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("Initialize driver software resource Failed!\n"));
+ goto free_hal_data;
+ }
+
+#ifdef CONFIG_PM
+ if (padapter->pwrctrlpriv.bSupportRemoteWakeup) {
+ dvobj->pusbdev->do_remote_wakeup = 1;
+ pusb_intf->needs_remote_wakeup = 1;
+ device_init_wakeup(&pusb_intf->dev, 1);
+ DBG_8723A("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~~~~\n");
+ DBG_8723A("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~[%d]~~~\n",
+ device_may_wakeup(&pusb_intf->dev));
+ }
+#endif
+ /* 2012-07-11 Move here to prevent the 8723AS-VAU BT
+ * auto suspend influence
+ */
+ if (usb_autopm_get_interface(pusb_intf) < 0)
+ DBG_8723A("can't get autopm:\n");
+#ifdef CONFIG_8723AU_BT_COEXIST
+ padapter->pwrctrlpriv.autopm_cnt = 1;
+#endif
+
+ /* set mac addr */
+ rtw_macaddr_cfg23a(padapter->eeprompriv.mac_addr);
+#ifdef CONFIG_8723AU_P2P
+ rtw_init_wifidirect_addrs23a(padapter, padapter->eeprompriv.mac_addr,
+ padapter->eeprompriv.mac_addr);
+#endif
+
+ DBG_8723A("bDriverStopped:%d, bSurpriseRemoved:%d, bup:%d, hw_init_completed:%d\n",
+ padapter->bDriverStopped, padapter->bSurpriseRemoved,
+ padapter->bup, padapter->hw_init_completed
+ );
+ status = _SUCCESS;
+
+free_hal_data:
+ if (status != _SUCCESS)
+ kfree(padapter->HalData);
+ if (status != _SUCCESS) {
+ rtw_wdev_unregister(padapter->rtw_wdev);
+ rtw_wdev_free(padapter->rtw_wdev);
+ }
+handle_dualmac:
+ if (status != _SUCCESS)
+ rtw_handle_dualmac23a(padapter, 0);
+free_adapter:
+ if (status != _SUCCESS) {
+ if (pnetdev)
+ free_netdev(pnetdev);
+ padapter = NULL;
+ }
+ return padapter;
+}
+
+static void rtw_usb_if1_deinit(struct rtw_adapter *if1)
+{
+ struct net_device *pnetdev = if1->pnetdev;
+ struct mlme_priv *pmlmepriv = &if1->mlmepriv;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ rtw_disassoc_cmd23a(if1, 0, false);
+
+#ifdef CONFIG_8723AU_AP_MODE
+ free_mlme_ap_info23a(if1);
+#endif
+
+ if (pnetdev)
+ unregister_netdev(pnetdev); /* will call netdev_close() */
+
+ rtw_cancel_all_timer23a(if1);
+
+ rtw_dev_unload(if1);
+
+ DBG_8723A("+r871xu_dev_remove, hw_init_completed =%d\n",
+ if1->hw_init_completed);
+
+ rtw_handle_dualmac23a(if1, 0);
+
+ if (if1->rtw_wdev) {
+ rtw_wdev_unregister(if1->rtw_wdev);
+ rtw_wdev_free(if1->rtw_wdev);
+ }
+
+#ifdef CONFIG_8723AU_BT_COEXIST
+ if (1 == if1->pwrctrlpriv.autopm_cnt) {
+ usb_autopm_put_interface(adapter_to_dvobj(if1)->pusbintf);
+ if1->pwrctrlpriv.autopm_cnt--;
+ }
+#endif
+
+ rtw_free_drv_sw23a(if1);
+
+ if (pnetdev)
+ free_netdev(pnetdev);
+}
+
+static int rtw_drv_init(struct usb_interface *pusb_intf,
+ const struct usb_device_id *pdid)
+{
+ struct rtw_adapter *if1 = NULL;
+ struct dvobj_priv *dvobj;
+ int status = _FAIL;
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_init\n"));
+
+ /* Initialize dvobj_priv */
+ dvobj = usb_dvobj_init(pusb_intf);
+ if (!dvobj) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("initialize device object priv Failed!\n"));
+ goto exit;
+ }
+
+ if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid);
+ if (!if1) {
+ DBG_8723A("rtw_init_primary_adapter Failed!\n");
+ goto free_dvobj;
+ }
+
+ /* dev_alloc_name && register_netdev */
+ status = rtw_drv_register_netdev(if1);
+ if (status != _SUCCESS)
+ goto free_if1;
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("-871x_drv - drv_init, success!\n"));
+
+ status = _SUCCESS;
+
+free_if1:
+ if (status != _SUCCESS && if1)
+ rtw_usb_if1_deinit(if1);
+free_dvobj:
+ if (status != _SUCCESS)
+ usb_dvobj_deinit(pusb_intf);
+exit:
+ return status == _SUCCESS ? 0 : -ENODEV;
+}
+
+/* dev_remove() - our device is being removed */
+static void rtw_disconnect(struct usb_interface *pusb_intf)
+{
+ struct dvobj_priv *dvobj;
+ struct rtw_adapter *padapter;
+ struct net_device *pnetdev;
+ struct mlme_priv *pmlmepriv;
+
+ dvobj = usb_get_intfdata(pusb_intf);
+ if (!dvobj)
+ return;
+
+ padapter = dvobj->if1;
+ pnetdev = padapter->pnetdev;
+ pmlmepriv = &padapter->mlmepriv;
+
+ usb_set_intfdata(pusb_intf, NULL);
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+dev_remove()\n"));
+
+ rtw_pm_set_ips23a(padapter, IPS_NONE);
+ rtw_pm_set_lps23a(padapter, PS_MODE_ACTIVE);
+
+ LeaveAllPowerSaveMode23a(padapter);
+
+ rtw_usb_if1_deinit(padapter);
+
+ usb_dvobj_deinit(pusb_intf);
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-dev_remove()\n"));
+ DBG_8723A("-r871xu_dev_remove, done\n");
+
+ return;
+}
+
+static int __init rtw_drv_entry(void)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_entry\n"));
+ return usb_register(usb_drv);
+}
+
+static void __exit rtw_drv_halt(void)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_halt\n"));
+ DBG_8723A("+rtw_drv_halt\n");
+
+ usb_deregister(usb_drv);
+
+ DBG_8723A("-rtw_drv_halt\n");
+}
+
+module_init(rtw_drv_entry);
+module_exit(rtw_drv_halt);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
new file mode 100644
index 000000000000..c49160e477d8
--- /dev/null
+++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
@@ -0,0 +1,283 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _USB_OPS_LINUX_C_
+
+#include <drv_types.h>
+#include <usb_ops_linux.h>
+#include <rtw_sreset.h>
+
+unsigned int ffaddr2pipehdl23a(struct dvobj_priv *pdvobj, u32 addr)
+{
+ struct usb_device *pusbd = pdvobj->pusbdev;
+ unsigned int pipe = 0, ep_num = 0;
+
+ if (addr == RECV_BULK_IN_ADDR) {
+ pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
+ } else if (addr == RECV_INT_IN_ADDR) {
+ pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]);
+ } else if (addr < HW_QUEUE_ENTRY) {
+ ep_num = pdvobj->Queue2Pipe[addr];
+ pipe = usb_sndbulkpipe(pusbd, ep_num);
+ }
+ return pipe;
+}
+
+struct zero_bulkout_context {
+ void *pbuf;
+ void *purb;
+ void *pirp;
+ void *padapter;
+};
+
+void usb_read_mem23a(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
+{
+}
+
+void usb_write_mem23a(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
+{
+}
+
+void usb_read_port_cancel23a(struct intf_hdl *pintfhdl)
+{
+ struct recv_buf *precvbuf;
+ struct rtw_adapter *padapter = pintfhdl->padapter;
+ int i;
+
+ precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf;
+
+ DBG_8723A("%s\n", __func__);
+
+ padapter->bReadPortCancel = true;
+
+ for (i = 0; i < NR_RECVBUFF ; i++) {
+ if (precvbuf->purb)
+ usb_kill_urb(precvbuf->purb);
+ precvbuf++;
+ }
+ usb_kill_urb(padapter->recvpriv.int_in_urb);
+}
+
+static void usb_write_port23a_complete(struct urb *purb, struct pt_regs *regs)
+{
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)purb->context;
+ struct rtw_adapter *padapter = pxmitbuf->padapter;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct hal_data_8723a *phaldata;
+ unsigned long irqL;
+
+ switch (pxmitbuf->flags) {
+ case VO_QUEUE_INX:
+ pxmitpriv->voq_cnt--;
+ break;
+ case VI_QUEUE_INX:
+ pxmitpriv->viq_cnt--;
+ break;
+ case BE_QUEUE_INX:
+ pxmitpriv->beq_cnt--;
+ break;
+ case BK_QUEUE_INX:
+ pxmitpriv->bkq_cnt--;
+ break;
+ case HIGH_QUEUE_INX:
+#ifdef CONFIG_8723AU_AP_MODE
+ rtw_chk_hi_queue_cmd23a(padapter);
+#endif
+ break;
+ default:
+ break;
+ }
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped ||
+ padapter->bWritePortCancel) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_write_port23a_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ padapter->bDriverStopped, padapter->bSurpriseRemoved));
+ DBG_8723A("%s(): TX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) bWritePortCancel(%d) pxmitbuf->ext_tag(%x)\n",
+ __func__, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved, padapter->bReadPortCancel,
+ pxmitbuf->ext_tag);
+
+ goto check_completion;
+ }
+
+ if (purb->status) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_write_port23a_complete : purb->status(%d) != 0\n",
+ purb->status));
+ DBG_8723A("###=> urb_write_port_complete status(%d)\n",
+ purb->status);
+ if ((purb->status == -EPIPE) || (purb->status == -EPROTO)) {
+ sreset_set_wifi_error_status23a(padapter,
+ USB_WRITE_PORT_FAIL);
+ } else if (purb->status == -EINPROGRESS) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_write_port23a_complete: EINPROGESS\n"));
+ goto check_completion;
+ } else if (purb->status == -ENOENT) {
+ DBG_8723A("%s: -ENOENT\n", __func__);
+ goto check_completion;
+ } else if (purb->status == -ECONNRESET) {
+ DBG_8723A("%s: -ECONNRESET\n", __func__);
+ goto check_completion;
+ } else if (purb->status == -ESHUTDOWN) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_write_port23a_complete: ESHUTDOWN\n"));
+ padapter->bDriverStopped = true;
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_write_port23a_complete:bDriverStopped = true\n"));
+ goto check_completion;
+ } else {
+ padapter->bSurpriseRemoved = true;
+ DBG_8723A("bSurpriseRemoved = true\n");
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_write_port23a_complete:bSurpriseRemoved = true\n"));
+ goto check_completion;
+ }
+ }
+ phaldata = GET_HAL_DATA(padapter);
+ phaldata->srestpriv.last_tx_complete_time = jiffies;
+
+check_completion:
+ spin_lock_irqsave(&pxmitpriv->lock_sctx, irqL);
+ rtw23a_sctx_done_err(&pxmitbuf->sctx,
+ purb->status ? RTW_SCTX_DONE_WRITE_PORT_ERR :
+ RTW_SCTX_DONE_SUCCESS);
+ spin_unlock_irqrestore(&pxmitpriv->lock_sctx, irqL);
+
+ rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf);
+
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
+}
+
+u32 usb_write_port23a(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ struct xmit_buf *pxmitbuf)
+{
+ struct urb *purb = NULL;
+ struct rtw_adapter *padapter = (struct rtw_adapter *)pintfhdl->padapter;
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data;
+ struct usb_device *pusbd = pdvobj->pusbdev;
+ unsigned long irqL;
+ unsigned int pipe;
+ int status;
+ u32 ret = _FAIL;
+
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("+usb_write_port23a\n"));
+
+ if ((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) ||
+ (padapter->pwrctrlpriv.pnp_bstop_trx)) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_write_port23a:( padapter->bDriverStopped ||padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n"));
+ rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY);
+ goto exit;
+ }
+
+ spin_lock_irqsave(&pxmitpriv->lock, irqL);
+
+ switch (addr) {
+ case VO_QUEUE_INX:
+ pxmitpriv->voq_cnt++;
+ pxmitbuf->flags = VO_QUEUE_INX;
+ break;
+ case VI_QUEUE_INX:
+ pxmitpriv->viq_cnt++;
+ pxmitbuf->flags = VI_QUEUE_INX;
+ break;
+ case BE_QUEUE_INX:
+ pxmitpriv->beq_cnt++;
+ pxmitbuf->flags = BE_QUEUE_INX;
+ break;
+ case BK_QUEUE_INX:
+ pxmitpriv->bkq_cnt++;
+ pxmitbuf->flags = BK_QUEUE_INX;
+ break;
+ case HIGH_QUEUE_INX:
+ pxmitbuf->flags = HIGH_QUEUE_INX;
+ break;
+ default:
+ pxmitbuf->flags = MGT_QUEUE_INX;
+ break;
+ }
+
+ spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
+
+ purb = pxmitbuf->pxmit_urb[0];
+
+ /* translate DMA FIFO addr to pipehandle */
+ pipe = ffaddr2pipehdl23a(pdvobj, addr);
+
+ usb_fill_bulk_urb(purb, pusbd, pipe,
+ pxmitframe->buf_addr, /* pxmitbuf->pbuf */
+ cnt, usb_write_port23a_complete,
+ pxmitbuf);/* context is pxmitbuf */
+
+ status = usb_submit_urb(purb, GFP_ATOMIC);
+ if (!status) {
+ struct hal_data_8723a *phaldata = GET_HAL_DATA(padapter);
+ phaldata->srestpriv.last_tx_time = jiffies;
+ } else {
+ rtw23a_sctx_done_err(&pxmitbuf->sctx,
+ RTW_SCTX_DONE_WRITE_PORT_ERR);
+ DBG_8723A("usb_write_port23a, status =%d\n", status);
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_write_port23a(): usb_submit_urb, status =%x\n",
+ status));
+
+ switch (status) {
+ case -ENODEV:
+ padapter->bDriverStopped = true;
+ break;
+ default:
+ break;
+ }
+ goto exit;
+ }
+ ret = _SUCCESS;
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("-usb_write_port23a\n"));
+
+exit:
+ if (ret != _SUCCESS)
+ rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf);
+
+ return ret;
+}
+
+void usb_write_port23a_cancel(struct intf_hdl *pintfhdl)
+{
+ struct rtw_adapter *padapter = pintfhdl->padapter;
+ struct xmit_buf *pxmitbuf;
+ struct list_head *plist;
+ int j;
+
+ DBG_8723A("%s\n", __func__);
+
+ padapter->bWritePortCancel = true;
+
+ list_for_each(plist, &padapter->xmitpriv.xmitbuf_list) {
+ pxmitbuf = container_of(plist, struct xmit_buf, list2);
+ for (j = 0; j < 8; j++) {
+ if (pxmitbuf->pxmit_urb[j])
+ usb_kill_urb(pxmitbuf->pxmit_urb[j]);
+ }
+ }
+ list_for_each(plist, &padapter->xmitpriv.xmitextbuf_list) {
+ pxmitbuf = container_of(plist, struct xmit_buf, list2);
+ for (j = 0; j < 8; j++) {
+ if (pxmitbuf->pxmit_urb[j])
+ usb_kill_urb(pxmitbuf->pxmit_urb[j]);
+ }
+ }
+}
diff --git a/drivers/staging/rtl8723au/os_dep/xmit_linux.c b/drivers/staging/rtl8723au/os_dep/xmit_linux.c
new file mode 100644
index 000000000000..e1c6fc746233
--- /dev/null
+++ b/drivers/staging/rtl8723au/os_dep/xmit_linux.c
@@ -0,0 +1,195 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _XMIT_OSDEP_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <wifi.h>
+#include <mlme_osdep.h>
+#include <xmit_osdep.h>
+#include <osdep_intf.h>
+
+uint rtw_remainder_len23a(struct pkt_file *pfile)
+{
+ return pfile->buf_len - ((unsigned long)(pfile->cur_addr) -
+ (unsigned long)(pfile->buf_start));
+}
+
+void _rtw_open_pktfile23a(struct sk_buff *pktptr, struct pkt_file *pfile)
+{
+ pfile->pkt = pktptr;
+ pfile->buf_start = pktptr->data;
+ pfile->cur_addr = pktptr->data;
+ pfile->buf_len = pktptr->len;
+ pfile->pkt_len = pktptr->len;
+
+ pfile->cur_buffer = pfile->buf_start;
+}
+
+uint _rtw_pktfile_read23a(struct pkt_file *pfile, u8 *rmem, uint rlen)
+{
+ uint len = 0;
+
+ len = rtw_remainder_len23a(pfile);
+ len = (rlen > len) ? len : rlen;
+
+ if (rmem)
+ skb_copy_bits(pfile->pkt, pfile->buf_len-pfile->pkt_len,
+ rmem, len);
+
+ pfile->cur_addr += len;
+ pfile->pkt_len -= len;
+
+ return len;
+}
+
+int rtw_endofpktfile23a(struct pkt_file *pfile)
+{
+ if (pfile->pkt_len == 0)
+ return true;
+ return false;
+}
+
+int rtw_os_xmit_resource_alloc23a(struct rtw_adapter *padapter,
+ struct xmit_buf *pxmitbuf, u32 alloc_sz)
+{
+ int i;
+
+ pxmitbuf->pallocated_buf = kzalloc(alloc_sz, GFP_KERNEL);
+ if (pxmitbuf->pallocated_buf == NULL)
+ return _FAIL;
+
+ pxmitbuf->pbuf = PTR_ALIGN(pxmitbuf->pallocated_buf, XMITBUF_ALIGN_SZ);
+
+ for (i = 0; i < 8; i++) {
+ pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (pxmitbuf->pxmit_urb[i] == NULL) {
+ DBG_8723A("pxmitbuf->pxmit_urb[i]==NULL");
+ return _FAIL;
+ }
+ }
+ return _SUCCESS;
+}
+
+void rtw_os_xmit_resource_free23a(struct rtw_adapter *padapter,
+ struct xmit_buf *pxmitbuf)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ usb_free_urb(pxmitbuf->pxmit_urb[i]);
+ kfree(pxmitbuf->pallocated_buf);
+}
+
+#define WMM_XMIT_THRESHOLD (NR_XMITFRAME*2/5)
+
+void rtw_os_pkt_complete23a(struct rtw_adapter *padapter, struct sk_buff *pkt)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u16 queue;
+
+ queue = skb_get_queue_mapping(pkt);
+ if (padapter->registrypriv.wifi_spec) {
+ if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
+ (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
+ netif_wake_subqueue(padapter->pnetdev, queue);
+ } else {
+ if (__netif_subqueue_stopped(padapter->pnetdev, queue))
+ netif_wake_subqueue(padapter->pnetdev, queue);
+ }
+ dev_kfree_skb_any(pkt);
+}
+
+void rtw_os_xmit_complete23a(struct rtw_adapter *padapter,
+ struct xmit_frame *pxframe)
+{
+ if (pxframe->pkt)
+ rtw_os_pkt_complete23a(padapter, pxframe->pkt);
+
+ pxframe->pkt = NULL;
+}
+
+void rtw_os_xmit_schedule23a(struct rtw_adapter *padapter)
+{
+ struct xmit_priv *pxmitpriv;
+
+ if (!padapter)
+ return;
+ pxmitpriv = &padapter->xmitpriv;
+
+ spin_lock_bh(&pxmitpriv->lock);
+
+ if (rtw_txframes_pending23a(padapter))
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
+ spin_unlock_bh(&pxmitpriv->lock);
+}
+
+static void rtw_check_xmit_resource(struct rtw_adapter *padapter,
+ struct sk_buff *pkt)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u16 queue;
+
+ queue = skb_get_queue_mapping(pkt);
+ if (padapter->registrypriv.wifi_spec) {
+ /* No free space for Tx, tx_worker is too slow */
+ if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
+ netif_stop_subqueue(padapter->pnetdev, queue);
+ } else {
+ if (pxmitpriv->free_xmitframe_cnt <= 4) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
+ netif_stop_subqueue(padapter->pnetdev, queue);
+ }
+ }
+}
+
+int rtw_xmit23a_entry23a(struct sk_buff *skb, struct net_device *pnetdev)
+{
+ struct rtw_adapter *padapter = netdev_priv(pnetdev);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ int res = 0;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("+xmit_enry\n"));
+
+ if (!rtw_if_up23a(padapter)) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_,
+ ("rtw_xmit23a_entry23a: rtw_if_up23a fail\n"));
+ goto drop_packet;
+ }
+
+ rtw_check_xmit_resource(padapter, skb);
+
+ res = rtw_xmit23a(padapter, skb);
+ if (res < 0)
+ goto drop_packet;
+
+ pxmitpriv->tx_pkts++;
+ RT_TRACE(_module_xmit_osdep_c_, _drv_info_,
+ ("rtw_xmit23a_entry23a: tx_pkts=%d\n",
+ (u32)pxmitpriv->tx_pkts));
+ goto exit;
+
+drop_packet:
+ pxmitpriv->tx_drop++;
+ dev_kfree_skb_any(skb);
+ RT_TRACE(_module_xmit_osdep_c_, _drv_notice_,
+ ("rtw_xmit23a_entry23a: drop, tx_drop=%d\n",
+ (u32)pxmitpriv->tx_drop));
+exit:
+ return 0;
+}
diff --git a/drivers/staging/rtl8821ae/base.c b/drivers/staging/rtl8821ae/base.c
index 18c936fbdf1e..e5073fe24770 100644
--- a/drivers/staging/rtl8821ae/base.c
+++ b/drivers/staging/rtl8821ae/base.c
@@ -39,10 +39,10 @@
#include "pci.h"
/*
- *NOTICE!!!: This file will be very big, we hsould
- *keep it clear under follwing roles:
+ *NOTICE!!!: This file will be very big, we should
+ *keep it clear under following roles:
*
- *This file include follwing part, so, if you add new
+ *This file include following part, so, if you add new
*functions into this file, please check which part it
*should includes. or check if you should add new part
*for this file:
@@ -64,73 +64,73 @@
*
*********************************************************/
static struct ieee80211_channel rtl_channeltable_2g[] = {
- {.center_freq = 2412,.hw_value = 1,},
- {.center_freq = 2417,.hw_value = 2,},
- {.center_freq = 2422,.hw_value = 3,},
- {.center_freq = 2427,.hw_value = 4,},
- {.center_freq = 2432,.hw_value = 5,},
- {.center_freq = 2437,.hw_value = 6,},
- {.center_freq = 2442,.hw_value = 7,},
- {.center_freq = 2447,.hw_value = 8,},
- {.center_freq = 2452,.hw_value = 9,},
- {.center_freq = 2457,.hw_value = 10,},
- {.center_freq = 2462,.hw_value = 11,},
- {.center_freq = 2467,.hw_value = 12,},
- {.center_freq = 2472,.hw_value = 13,},
- {.center_freq = 2484,.hw_value = 14,},
+ {.center_freq = 2412, .hw_value = 1,},
+ {.center_freq = 2417, .hw_value = 2,},
+ {.center_freq = 2422, .hw_value = 3,},
+ {.center_freq = 2427, .hw_value = 4,},
+ {.center_freq = 2432, .hw_value = 5,},
+ {.center_freq = 2437, .hw_value = 6,},
+ {.center_freq = 2442, .hw_value = 7,},
+ {.center_freq = 2447, .hw_value = 8,},
+ {.center_freq = 2452, .hw_value = 9,},
+ {.center_freq = 2457, .hw_value = 10,},
+ {.center_freq = 2462, .hw_value = 11,},
+ {.center_freq = 2467, .hw_value = 12,},
+ {.center_freq = 2472, .hw_value = 13,},
+ {.center_freq = 2484, .hw_value = 14,},
};
static struct ieee80211_channel rtl_channeltable_5g[] = {
- {.center_freq = 5180,.hw_value = 36,},
- {.center_freq = 5200,.hw_value = 40,},
- {.center_freq = 5220,.hw_value = 44,},
- {.center_freq = 5240,.hw_value = 48,},
- {.center_freq = 5260,.hw_value = 52,},
- {.center_freq = 5280,.hw_value = 56,},
- {.center_freq = 5300,.hw_value = 60,},
- {.center_freq = 5320,.hw_value = 64,},
- {.center_freq = 5500,.hw_value = 100,},
- {.center_freq = 5520,.hw_value = 104,},
- {.center_freq = 5540,.hw_value = 108,},
- {.center_freq = 5560,.hw_value = 112,},
- {.center_freq = 5580,.hw_value = 116,},
- {.center_freq = 5600,.hw_value = 120,},
- {.center_freq = 5620,.hw_value = 124,},
- {.center_freq = 5640,.hw_value = 128,},
- {.center_freq = 5660,.hw_value = 132,},
- {.center_freq = 5680,.hw_value = 136,},
- {.center_freq = 5700,.hw_value = 140,},
- {.center_freq = 5745,.hw_value = 149,},
- {.center_freq = 5765,.hw_value = 153,},
- {.center_freq = 5785,.hw_value = 157,},
- {.center_freq = 5805,.hw_value = 161,},
- {.center_freq = 5825,.hw_value = 165,},
+ {.center_freq = 5180, .hw_value = 36,},
+ {.center_freq = 5200, .hw_value = 40,},
+ {.center_freq = 5220, .hw_value = 44,},
+ {.center_freq = 5240, .hw_value = 48,},
+ {.center_freq = 5260, .hw_value = 52,},
+ {.center_freq = 5280, .hw_value = 56,},
+ {.center_freq = 5300, .hw_value = 60,},
+ {.center_freq = 5320, .hw_value = 64,},
+ {.center_freq = 5500, .hw_value = 100,},
+ {.center_freq = 5520, .hw_value = 104,},
+ {.center_freq = 5540, .hw_value = 108,},
+ {.center_freq = 5560, .hw_value = 112,},
+ {.center_freq = 5580, .hw_value = 116,},
+ {.center_freq = 5600, .hw_value = 120,},
+ {.center_freq = 5620, .hw_value = 124,},
+ {.center_freq = 5640, .hw_value = 128,},
+ {.center_freq = 5660, .hw_value = 132,},
+ {.center_freq = 5680, .hw_value = 136,},
+ {.center_freq = 5700, .hw_value = 140,},
+ {.center_freq = 5745, .hw_value = 149,},
+ {.center_freq = 5765, .hw_value = 153,},
+ {.center_freq = 5785, .hw_value = 157,},
+ {.center_freq = 5805, .hw_value = 161,},
+ {.center_freq = 5825, .hw_value = 165,},
};
static struct ieee80211_rate rtl_ratetable_2g[] = {
- {.bitrate = 10,.hw_value = 0x00,},
- {.bitrate = 20,.hw_value = 0x01,},
- {.bitrate = 55,.hw_value = 0x02,},
- {.bitrate = 110,.hw_value = 0x03,},
- {.bitrate = 60,.hw_value = 0x04,},
- {.bitrate = 90,.hw_value = 0x05,},
- {.bitrate = 120,.hw_value = 0x06,},
- {.bitrate = 180,.hw_value = 0x07,},
- {.bitrate = 240,.hw_value = 0x08,},
- {.bitrate = 360,.hw_value = 0x09,},
- {.bitrate = 480,.hw_value = 0x0a,},
- {.bitrate = 540,.hw_value = 0x0b,},
+ {.bitrate = 10, .hw_value = 0x00,},
+ {.bitrate = 20, .hw_value = 0x01,},
+ {.bitrate = 55, .hw_value = 0x02,},
+ {.bitrate = 110, .hw_value = 0x03,},
+ {.bitrate = 60, .hw_value = 0x04,},
+ {.bitrate = 90, .hw_value = 0x05,},
+ {.bitrate = 120, .hw_value = 0x06,},
+ {.bitrate = 180, .hw_value = 0x07,},
+ {.bitrate = 240, .hw_value = 0x08,},
+ {.bitrate = 360, .hw_value = 0x09,},
+ {.bitrate = 480, .hw_value = 0x0a,},
+ {.bitrate = 540, .hw_value = 0x0b,},
};
static struct ieee80211_rate rtl_ratetable_5g[] = {
- {.bitrate = 60,.hw_value = 0x04,},
- {.bitrate = 90,.hw_value = 0x05,},
- {.bitrate = 120,.hw_value = 0x06,},
- {.bitrate = 180,.hw_value = 0x07,},
- {.bitrate = 240,.hw_value = 0x08,},
- {.bitrate = 360,.hw_value = 0x09,},
- {.bitrate = 480,.hw_value = 0x0a,},
- {.bitrate = 540,.hw_value = 0x0b,},
+ {.bitrate = 60, .hw_value = 0x04,},
+ {.bitrate = 90, .hw_value = 0x05,},
+ {.bitrate = 120, .hw_value = 0x06,},
+ {.bitrate = 180, .hw_value = 0x07,},
+ {.bitrate = 240, .hw_value = 0x08,},
+ {.bitrate = 360, .hw_value = 0x09,},
+ {.bitrate = 480, .hw_value = 0x0a,},
+ {.bitrate = 540, .hw_value = 0x0b,},
};
static const struct ieee80211_supported_band rtl_band_2ghz = {
@@ -320,7 +320,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
/* <5> set hw caps */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_RX_INCLUDES_FCS |
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
IEEE80211_HW_BEACON_FILTER |
#endif
IEEE80211_HW_AMPDU_AGGREGATION |
@@ -332,11 +332,11 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
/* swlps or hwlps has been set in diff chip in init_sw_vars */
if (rtlpriv->psc.b_swctrl_lps)
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- /* IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ /* IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */
0;
/*<delete in kernel start>*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
@@ -354,11 +354,11 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
/*<delete in kernel start>*/
#endif
/*<delete in kernel end>*/
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39))
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
#endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
#endif
@@ -402,7 +402,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
/* <2> work queue */
rtlpriv->works.hw = hw;
/*<delete in kernel start>*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
/*<delete in kernel end>*/
rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0);
/*<delete in kernel start>*/
@@ -662,30 +662,27 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
if (rtlpriv->dm.b_useramask) {
tcb_desc->ratr_index = ratr_index;
- /* TODO we will differentiate adhoc and station futrue */
+ /* TODO we will differentiate adhoc and station future */
if (mac->opmode == NL80211_IFTYPE_STATION ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
tcb_desc->mac_id = 0;
-
- if (mac->mode == WIRELESS_MODE_N_24G) {
+ if (mac->mode == WIRELESS_MODE_N_24G)
tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB;
- } else if (mac->mode == WIRELESS_MODE_N_5G) {
+ else if (mac->mode == WIRELESS_MODE_N_5G)
tcb_desc->ratr_index = RATR_INX_WIRELESS_NG;
- } else if (mac->mode & WIRELESS_MODE_G) {
+ else if (mac->mode & WIRELESS_MODE_G)
tcb_desc->ratr_index = RATR_INX_WIRELESS_GB;
- } else if (mac->mode & WIRELESS_MODE_B) {
+ else if (mac->mode & WIRELESS_MODE_B)
tcb_desc->ratr_index = RATR_INX_WIRELESS_B;
- } else if (mac->mode & WIRELESS_MODE_A) {
+ else if (mac->mode & WIRELESS_MODE_A)
tcb_desc->ratr_index = RATR_INX_WIRELESS_G;
- }
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (NULL != sta) {
- if (sta->aid > 0) {
+ if (sta->aid > 0)
tcb_desc->mac_id = sta->aid + 1;
- } else {
+ else
tcb_desc->mac_id = 1;
- }
} else {
tcb_desc->mac_id = 0;
}
@@ -711,7 +708,7 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
return;
} else if (mac->opmode == NL80211_IFTYPE_STATION) {
if (!mac->bw_40 || !(sta->ht_cap.ht_supported))
- return;
+ return;
}
if (tcb_desc->b_multicast || tcb_desc->b_broadcast)
return;
@@ -730,7 +727,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u8 hw_rate;
- if ((get_rf_type(rtlphy) == RF_2T2R) && (sta->ht_cap.mcs.rx_mask[1]!=0))
+ if ((get_rf_type(rtlphy) == RF_2T2R) && (sta->ht_cap.mcs.rx_mask[1] != 0))
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
else
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
@@ -772,16 +769,16 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
tcb_desc->disable_ratefallback = 1;
} else {
/*
- *because hw will nerver use hw_rate
+ *because hw will never use hw_rate
*when tcb_desc->use_driver_rate = false
*so we never set highest N rate here,
- *and N rate will all be controled by FW
+ *and N rate will all be controlled by FW
*when tcb_desc->use_driver_rate = false
*/
if (sta && (sta->ht_cap.ht_supported)) {
tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw, sta);
} else {
- if(rtlmac->mode == WIRELESS_MODE_B) {
+ if (rtlmac->mode == WIRELESS_MODE_B) {
tcb_desc->hw_rate =
rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M];
} else {
@@ -809,7 +806,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
tcb_desc->b_packet_bw = false;
}
}
-//EXPORT_SYMBOL(rtl_get_tcb_desc);
+/* EXPORT_SYMBOL(rtl_get_tcb_desc); */
bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
{
@@ -862,8 +859,8 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
("%s ACT_ADDBAREQ From :%pM\n",
is_tx ? "Tx" : "Rx", hdr->addr2));
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("req \n"),
- skb->data, skb->len);
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("req\n"),
+ skb->data, skb->len);
if (!is_tx) {
struct ieee80211_sta *sta = NULL;
struct rtl_sta_info *sta_entry = NULL;
@@ -897,7 +894,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
hdr->addr3,
tid);
if (skb_delba) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
rx_status.freq = hw->conf.chandef.chan->center_freq;
rx_status.band = hw->conf.chandef.chan->band;
#else
@@ -1184,7 +1181,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
return;
- rtlpriv->link_info.bcn_rx_inperiod ++;
+ rtlpriv->link_info.bcn_rx_inperiod++;
}
void rtl_watchdog_wq_callback(void *data)
@@ -1363,7 +1360,7 @@ void rtl_easy_concurrent_retrytimer_callback(unsigned long data)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_priv *buddy_priv = rtlpriv->buddy_priv;
- if(buddy_priv == NULL)
+ if (buddy_priv == NULL)
return;
rtlpriv->cfg->ops->dualmac_easy_concurrent(hw);
@@ -1478,13 +1475,13 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
/* rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); */
info->control.rates[0].idx = 0;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
info->band = hw->conf.chandef.chan->band;
#else
info->band = hw->conf.channel->band;
#endif
/*<delete in kernel start>*/
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
info->control.sta = sta;
rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
#else
@@ -1499,7 +1496,7 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
err_free:
return 0;
}
-//EXPORT_SYMBOL(rtl_send_smps_action);
+/* EXPORT_SYMBOL(rtl_send_smps_action); */
/* because mac80211 have issues when can receive del ba
* so here we just make a fake del_ba if we receive a ba_req
@@ -1528,8 +1525,8 @@ struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
IEEE80211_STYPE_ACTION);
action_frame->u.action.category = WLAN_CATEGORY_BACK;
action_frame->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
- params = (u16)(1 << 11); /* bit 11 initiator */
- params |= (u16)(tid << 12); /* bit 15:12 TID number */
+ params = (u16)(1 << 11); /* bit 11 initiator */
+ params |= (u16)(tid << 12); /* bit 15:12 TID number */
action_frame->u.action.u.delba.params = cpu_to_le16(params);
action_frame->u.action.u.delba.reason_code =
@@ -1652,9 +1649,8 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
return;
- if (rtl_find_221_ie(hw, data, len)) {
+ if (rtl_find_221_ie(hw, data, len))
vendor = mac->vendor;
- }
if ((memcmp(mac->bssid, ap5_1, 3) == 0) ||
(memcmp(mac->bssid, ap5_2, 3) == 0) ||
@@ -1671,7 +1667,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
(memcmp(mac->bssid, ap4_2, 3) == 0) ||
(memcmp(mac->bssid, ap4_3, 3) == 0) ||
vendor == PEER_RAL) {
- RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>ral findn\n"));
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>ral find\n"));
vendor = PEER_RAL;
} else if (memcmp(mac->bssid, ap6_1, 3) == 0 ||
vendor == PEER_CISCO) {
@@ -1715,7 +1711,7 @@ static ssize_t rtl_store_debug_level(struct device *d,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 0, &val);
+ ret = kstrtoul(buf, 0, &val);
if (ret) {
printk(KERN_DEBUG "%s is not in hex or decimal form.\n", buf);
} else {
@@ -1845,8 +1841,7 @@ struct rtl_global_var global_var = {};
int rtl_core_module_init(void)
{
if (rtl_rate_control_register())
- printk(KERN_DEBUG "rtl: Unable to register rtl_rc,"
- "use default RC !!\n");
+ printk(KERN_DEBUG "rtl: Unable to register rtl_rc, use default RC !!\n");
/* add proc for debug */
rtl_proc_add_topdir();
@@ -1861,7 +1856,7 @@ int rtl_core_module_init(void)
void rtl_core_module_exit(void)
{
/*RC*/
- rtl_rate_control_unregister();
+ rtl_rate_control_unregister();
/* add proc for debug */
rtl_proc_remove_topdir();
diff --git a/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c
index b30f17ae0215..5a54bb10698c 100644
--- a/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c
+++ b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c
@@ -1653,7 +1653,7 @@ halbtc8812a1ant_TdmaDurationAdjustForAcl(
}
else
{
- //accquire the BT TRx retry count from BT_Info byte2
+ //acquire the BT TRx retry count from BT_Info byte2
retry_count = coex_sta->bt_retry_cnt;
bt_info_ext = coex_sta->bt_info_ext;
BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retry_count = %d\n", retry_count));
diff --git a/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c
index e619923ef0ab..8e4293a35872 100644
--- a/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c
+++ b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c
@@ -629,7 +629,7 @@ halbtc8723a1ant_TdmaDurationAdjust(
}
else
{
- //accquire the BT TRx retry count from BT_Info byte2
+ //acquire the BT TRx retry count from BT_Info byte2
retryCount = pCoexSta->btRetryCnt;
BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], retryCount = %d\n", retryCount));
result = 0;
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c
index 973d0ea82cb8..1b04530d46bc 100644
--- a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c
@@ -1721,7 +1721,7 @@ halbtc8192e1ant_TdmaDurationAdjustForAcl(
}
else
{
- //accquire the BT TRx retry count from BT_Info byte2
+ //acquire the BT TRx retry count from BT_Info byte2
retryCount = pCoexSta->btRetryCnt;
btInfoExt = pCoexSta->btInfoExt;
BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount));
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c
index 44ec78562e2d..115908928ae4 100644
--- a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c
@@ -1803,7 +1803,7 @@ void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
result = 0;
wait_cnt = 0;
} else {
- /* accquire the BT TRx retry count from BT_Info byte2 */
+ /* acquire the BT TRx retry count from BT_Info byte2 */
retry_cnt = coex_sta->bt_retry_cnt;
BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
"[BTCoex], retry_cnt = %d\n", retry_cnt);
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c
index 180d6f12e7b5..3f5c4fd2e73f 100644
--- a/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c
@@ -1687,7 +1687,7 @@ halbtc8723a2ant_TdmaDurationAdjust(
}
else
{
- //accquire the BT TRx retry count from BT_Info byte2
+ //acquire the BT TRx retry count from BT_Info byte2
retryCount = pCoexSta->btRetryCnt;
BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount));
BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, WaitCount=%d\n",
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c
index 3414ba78cc43..9677943fc20d 100644
--- a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c
@@ -1950,7 +1950,7 @@ void halbtc8723b1ant_tdma_duration_adjust_for_acl(struct btc_coexist *btcoexist,
result = 0;
wait_count = 0;
} else {
- /*accquire the BT TRx retry count from BT_Info byte2 */
+ /*acquire the BT TRx retry count from BT_Info byte2 */
retry_count = coex_sta->bt_retry_cnt;
bt_info_ext = coex_sta->bt_info_ext;
result = 0;
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c
index 83b1b4218333..d337bd0b3c1b 100644
--- a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c
@@ -1830,7 +1830,7 @@ void halbtc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
result = 0;
wait_count = 0;
} else {
- /*accquire the BT TRx retry count from BT_Info byte2*/
+ /*acquire the BT TRx retry count from BT_Info byte2*/
retryCount = coex_sta->bt_retry_cnt;
BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
"[BTCoex], retryCount = %d\n", retryCount);
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c
index 9d9fa4d7575d..c4e83773ec98 100644
--- a/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c
@@ -732,17 +732,7 @@ bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
else
btcoexist->binded = true;
-#if ( defined(CONFIG_PCI_HCI))
- btcoexist->chip_interface = BTC_INTF_PCI;
-#elif ( defined(CONFIG_USB_HCI))
- btcoexist->chip_interface = BTC_INTF_USB;
-#elif ( defined(CONFIG_SDIO_HCI))
- btcoexist->chip_interface = BTC_INTF_SDIO;
-#elif ( defined(CONFIG_GSPI_HCI))
- btcoexist->chip_interface = BTC_INTF_GSPI;
-#else
btcoexist->chip_interface = BTC_INTF_UNKNOWN;
-#endif
if (NULL == btcoexist->adapter)
btcoexist->adapter = adapter;
@@ -1087,7 +1077,7 @@ void exhalbtc_dbg_control(struct btc_coexist *btcoexist,
btcoexist->statistics.cnt_dbg_ctrl++;
}
-void exhalbtc_stack_update_profile_info()
+void exhalbtc_stack_update_profile_info(void)
{
}
diff --git a/drivers/staging/rtl8821ae/core.c b/drivers/staging/rtl8821ae/core.c
index 40de6089039e..ff3139b6da65 100644
--- a/drivers/staging/rtl8821ae/core.c
+++ b/drivers/staging/rtl8821ae/core.c
@@ -373,7 +373,7 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
/* sleep here is must, or we may recv the beacon and
* cause mac80211 into wrong ps state, this will cause
* power save nullfunc send fail, and further cause
- * pkt loss, So sleep must quickly but not immediatly
+ * pkt loss, So sleep must quickly but not immediately
* because that will cause nullfunc send by mac80211
* fail, and cause pkt loss, we have tested that 5mA
* is worked very well */
@@ -1200,8 +1200,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type = AESCMAC_ENCRYPTION;
RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CMAC\n"));
RT_TRACE(COMP_SEC, DBG_DMESG,
- ("HW don't support CMAC encrypiton, "
- "use software CMAC encrypiton\n"));
+ ("HW don't support CMAC encryption, "
+ "use software CMAC encryption\n"));
err = -EOPNOTSUPP;
goto out_unlock;
default:
@@ -1235,8 +1235,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type = AESCMAC_ENCRYPTION;
RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CMAC\n"));
RT_TRACE(COMP_SEC, DBG_DMESG,
- ("HW don't support CMAC encrypiton, "
- "use software CMAC encrypiton\n"));
+ ("HW don't support CMAC encryption, "
+ "use software CMAC encryption\n"));
err = -EOPNOTSUPP;
goto out_unlock;
default:
@@ -1411,7 +1411,7 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
}
/* this function is called by mac80211 to flush tx buffer
- * before switch channle or power save, or tx buffer packet
+ * before switch channel or power save, or tx buffer packet
* maybe send after offchannel or rf sleep, this may cause
* dis-association by AP */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
diff --git a/drivers/staging/rtl8821ae/core.h b/drivers/staging/rtl8821ae/core.h
index 4b247db2861d..f0c74e9239fd 100644
--- a/drivers/staging/rtl8821ae/core.h
+++ b/drivers/staging/rtl8821ae/core.h
@@ -2,20 +2,20 @@
*
* Copyright(c) 2009-2010 Realtek Corporation.
*
- * Tmis program is free software; you can redistribute it and/or modify it
+ * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
- * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
- * Tme full GNU General Public License is included in this distribution in the
+ * The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
diff --git a/drivers/staging/rtl8821ae/debug.c b/drivers/staging/rtl8821ae/debug.c
index cb051223c684..8a6c794bda41 100644
--- a/drivers/staging/rtl8821ae/debug.c
+++ b/drivers/staging/rtl8821ae/debug.c
@@ -2,20 +2,20 @@
*
* Copyright(c) 2009-2010 Realtek Corporation.
*
- * Tmis program is free software; you can redistribute it and/or modify it
+ * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
- * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
- * Tme full GNU General Public License is included in this distribution in the
+ * The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
@@ -985,4 +985,4 @@ void rtl_proc_remove_topdir(void)
{
if (proc_topdir)
remove_proc_entry("rtlwifi", init_net.proc_net);
-} \ No newline at end of file
+}
diff --git a/drivers/staging/rtl8821ae/debug.h b/drivers/staging/rtl8821ae/debug.h
index 5eb6251b89da..6c0a553e98b7 100644
--- a/drivers/staging/rtl8821ae/debug.h
+++ b/drivers/staging/rtl8821ae/debug.h
@@ -2,20 +2,20 @@
*
* Copyright(c) 2009-2010 Realtek Corporation.
*
- * Tmis program is free software; you can redistribute it and/or modify it
+ * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
- * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
- * Tme full GNU General Public License is included in this distribution in the
+ * The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
@@ -44,7 +44,7 @@
#define DBG_EMERG 0
/*
- *Abnormal, rare, or unexpeted cases.
+ *Abnormal, rare, or unexpected cases.
*For example, Packet/IO Ctl canceled,
*device suprisely unremoved and so on.
*/
@@ -54,7 +54,7 @@
*Normal case driver developer should
*open, we can see link status like
*assoc/AddBA/DHCP/adapter start and
- *so on basic and useful infromations.
+ *so on basic and useful informations.
*/
#define DBG_DMESG 3
diff --git a/drivers/staging/rtl8821ae/efuse.c b/drivers/staging/rtl8821ae/efuse.c
index 74c19ecc95a9..250aae1ce631 100644
--- a/drivers/staging/rtl8821ae/efuse.c
+++ b/drivers/staging/rtl8821ae/efuse.c
@@ -2,20 +2,20 @@
*
* Copyright(c) 2009-2010 Realtek Corporation.
*
- * Tmis program is free software; you can redistribute it and/or modify it
+ * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
- * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
- * Tme full GNU General Public License is included in this distribution in the
+ * The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
@@ -149,7 +149,7 @@ u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address)
return 0xFF;
}
-//EXPORT_SYMBOL(efuse_read_1byte);
+/* EXPORT_SYMBOL(efuse_read_1byte); */
void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
{
@@ -517,7 +517,7 @@ void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
}
-//EXPORT_SYMBOL(rtl_efuse_shadow_map_update);
+/* EXPORT_SYMBOL(rtl_efuse_shadow_map_update); */
void efuse_force_write_vendor_Id(struct ieee80211_hw *hw)
{
@@ -628,7 +628,7 @@ int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data)
}
return bresult;
}
-//EXPORT_SYMBOL(efuse_one_byte_read);
+/* EXPORT_SYMBOL(efuse_one_byte_read); */
static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
{
@@ -1120,16 +1120,16 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate)
{
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0x69);
- // 1.2V Power: From VDDON with Power Cut(0x0000h[15]), defualt valid
+ /* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), default valid */
tmpV16 = rtl_read_word(rtlpriv,
rtlpriv->cfg->maps[SYS_ISO_CTRL]);
printk("SYS_ISO_CTRL=%04x.\n",tmpV16);
if( ! (tmpV16 & PWC_EV12V ) ){
tmpV16 |= PWC_EV12V ;
- //PlatformEFIOWrite2Byte(pAdapter,REG_SYS_ISO_CTRL,tmpV16);
+ /* PlatformEFIOWrite2Byte(pAdapter,REG_SYS_ISO_CTRL,tmpV16); */
}
- // Reset: 0x0000h[28], default valid
+ /* Reset: 0x0000h[28], default valid */
tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]);
printk("SYS_FUNC_EN=%04x.\n",tmpV16);
if( !(tmpV16 & FEN_ELDR) ){
@@ -1137,7 +1137,7 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate)
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16);
}
- // Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid
+ /* Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid */
tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK] );
printk("SYS_CLK=%04x.\n",tmpV16);
if( (!(tmpV16 & LOADER_CLK_EN) ) ||(!(tmpV16 & ANA8M) ) )
@@ -1148,7 +1148,7 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate)
if(bwrite == true)
{
- // Enable LDO 2.5V before read/write action
+ /* Enable LDO 2.5V before read/write action */
tempval = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
printk("EFUSE_TEST=%04x.\n",tmpV16);
tempval &= ~(BIT(3) | BIT(4) |BIT(5) | BIT(6));
@@ -1161,7 +1161,7 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate)
{
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0x00);
if(bwrite == true){
- // Disable LDO 2.5V after read/write action
+ /* Disable LDO 2.5V after read/write action */
tempval = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, (tempval & 0x7F));
}
diff --git a/drivers/staging/rtl8821ae/pci.c b/drivers/staging/rtl8821ae/pci.c
index cfa651edd238..a562aa60d595 100644
--- a/drivers/staging/rtl8821ae/pci.c
+++ b/drivers/staging/rtl8821ae/pci.c
@@ -388,13 +388,13 @@ static u8 _rtl_pci_get_pciehdr_offset(struct ieee80211_hw *hw)
* capability that we are looking for, follow the link to
* the next capability and continue looping.
*/
- rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS ,
- pcicfg_addr_port +
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS ,
+ pcicfg_addr_port +
(num4bytes << 2));
rtl_pci_raw_read_port_ushort(PCI_CONF_DATA,
(u16*)&capability_hdr);
/* Found the PCI express capability. */
- if (capability_hdr.capability_id ==
+ if (capability_hdr.capability_id ==
PCI_CAPABILITY_ID_PCI_EXPRESS)
break;
else
@@ -418,7 +418,7 @@ bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
list_for_each_entry(temp_priv, &rtlpriv->glb_var->glb_priv_list,
list) {
if (temp_priv) {
- temp_pcipriv =
+ temp_pcipriv =
(struct rtl_pci_priv *)temp_priv->priv;
RT_TRACE(COMP_INIT, DBG_LOUD,
(("pcipriv->ndis_adapter.funcnumber %x \n"),
@@ -526,8 +526,8 @@ static void _rtl_pci_io_handler_init(struct device *dev,
}
static bool _rtl_pci_update_earlymode_info(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct rtl_tcb_desc *tcb_desc,
+ struct sk_buff *skb,
+ struct rtl_tcb_desc *tcb_desc,
u8 tid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -535,7 +535,7 @@ static bool _rtl_pci_update_earlymode_info(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 additionlen = FCS_LEN;
struct sk_buff *next_skb;
-
+
/* here open is 4, wep/tkip is 8, aes is 12*/
if (info->control.hw_key)
additionlen += info->control.hw_key->icv_len;
@@ -544,7 +544,7 @@ static bool _rtl_pci_update_earlymode_info(struct ieee80211_hw *hw,
tcb_desc->empkt_num = 0;
spin_lock_bh(&rtlpriv->locks.waitq_lock);
skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
- struct ieee80211_tx_info *next_info =
+ struct ieee80211_tx_info *next_info =
IEEE80211_SKB_CB(next_skb);
if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
tcb_desc->empkt_len[tcb_desc->empkt_num] =
@@ -554,7 +554,7 @@ static bool _rtl_pci_update_earlymode_info(struct ieee80211_hw *hw,
break;
}
- if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
+ if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
next_skb))
break;
@@ -575,26 +575,26 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
struct sk_buff *skb = NULL;
struct ieee80211_tx_info *info = NULL;
int tid; /* should be int */
-
+
if (!rtlpriv->rtlhal.b_earlymode_enable)
- return;
+ return;
if (rtlpriv->dm.supp_phymode_switch &&
(rtlpriv->easy_concurrent_ctl.bswitch_in_process ||
- (rtlpriv->buddy_priv &&
+ (rtlpriv->buddy_priv &&
rtlpriv->buddy_priv->easy_concurrent_ctl.bswitch_in_process)))
return;
- /* we juse use em for BE/BK/VI/VO */
+ /* we just use em for BE/BK/VI/VO */
for (tid = 7; tid >= 0; tid--) {
u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)];
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
- while (!mac->act_scanning &&
+ while (!mac->act_scanning &&
rtlpriv->psc.rfpwr_state == ERFON) {
struct rtl_tcb_desc tcb_desc;
- memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
spin_lock_bh(&rtlpriv->locks.waitq_lock);
if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
- (ring->entries - skb_queue_len(&ring->queue) >
+ (ring->entries - skb_queue_len(&ring->queue) >
rtlhal->max_earlymode_num)) {
skb = skb_dequeue(&mac->skb_waitq[tid]);
} else {
@@ -607,7 +607,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
* multicast/broadcast/no_qos data */
info = IEEE80211_SKB_CB(skb);
if (info->flags & IEEE80211_TX_CTL_AMPDU)
- _rtl_pci_update_earlymode_info(hw, skb,
+ _rtl_pci_update_earlymode_info(hw, skb,
&tcb_desc, tid);
/*<delete in kernel start>*/
@@ -635,7 +635,7 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
u8 tid;
u8 *entry;
-
+
if (rtlpriv->use_new_trx_flow)
entry = (u8 *)(&ring->buffer_desc[ring->idx]);
else
@@ -643,11 +643,11 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
return;
-
+
ring->idx = (ring->idx + 1) % ring->entries;
-
+
skb = __skb_dequeue(&ring->queue);
-
+
pci_unmap_single(rtlpci->pdev,
le32_to_cpu(rtlpriv->cfg->ops->
get_desc((u8 *) entry, true,
@@ -672,7 +672,7 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
}
/* for sw LPS, just after NULL skb send out, we can
- * sure AP kown we are sleeped, our we should not let
+ * sure AP known we are slept, our we should not let
* rf to sleep*/
fc = rtl_get_fc(skb);
if (ieee80211_is_nullfunc(fc)) {
@@ -740,7 +740,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
u8 tmp_one = 1;
struct sk_buff *skb;
- skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (!skb)
return 0;
rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
@@ -754,25 +754,25 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
return 0;
if (rtlpriv->use_new_trx_flow) {
- rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
HW_DESC_RX_PREPARE,
(u8 *) & bufferaddress);
- } else {
- rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ } else {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
HW_DESC_RXBUFF_ADDR,
(u8 *) & bufferaddress);
- rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
HW_DESC_RXPKT_LEN,
(u8 *) & rtlpci->rxbuffersize);
- rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
HW_DESC_RXOWN,
(u8 *) & tmp_one);
}
-
+
return 1;
}
-/* inorder to receive 8K AMSDU we have set skb to
+/* In order to receive 8K AMSDU we have set skb to
* 9100bytes in init rx ring, but if this packet is
* not a AMSDU, this so big packet will be sent to
* TCP/IP directly, this cause big packet ping fail
@@ -783,7 +783,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
/* but some platform will fail when alloc skb sometimes.
* in this condition, we will send the old skb to
* mac80211 directly, this will not cause any other
- * issues, but only be losted by TCP/IP */
+ * issues, but only be lost by TCP/IP */
static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw,
struct sk_buff *skb, struct ieee80211_rx_status rx_status)
{
@@ -792,7 +792,7 @@ static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw,
} else {
struct sk_buff *uskb = NULL;
u8 *pdata;
-
+
uskb = dev_alloc_skb(skb->len + 128);
if (likely(uskb)) {
memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
@@ -804,7 +804,7 @@ static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw,
ieee80211_rx_irqsafe(hw, uskb);
} else {
ieee80211_rx_irqsafe(hw, skb);
- }
+ }
}
}
@@ -814,11 +814,11 @@ static void _rtl_pci_hs_interrupt(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR],
- rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) |
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR],
+ rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) |
rtlpci->sys_irq_mask);
-
+
}
static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
{
@@ -839,7 +839,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
.noise = -98,
.rate = 0,
};
-
+
/*RX NORMAL PKT */
while (count--) {
struct ieee80211_hdr *hdr;
@@ -852,65 +852,65 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
/*rx pkt */
struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
rtlpci->rx_ring[rxring_idx].idx];
-
+
if (rtlpriv->use_new_trx_flow) {
- rx_remained_cnt =
+ rx_remained_cnt =
rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
hw_queue);
- if (rx_remained_cnt < 1)
+ if (rx_remained_cnt < 1)
return;
-
+
} else { /* rx descriptor */
pdesc = &rtlpci->rx_ring[rxring_idx].desc[
rtlpci->rx_ring[rxring_idx].idx];
-
+
own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
- false,
+ false,
HW_DESC_OWN);
if (own) /* wait data to be filled by hardware */
return;
}
-
+
/* Get here means: data is filled already*/
/* AAAAAAttention !!!
* We can NOT access 'skb' before 'pci_unmap_single' */
pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb),
rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
-
+
if (rtlpriv->use_new_trx_flow) {
buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
rtlpci->rx_ring[rxring_idx].idx];
/*means rx wifi info*/
pdesc = (struct rtl_rx_desc *)skb->data;
}
-
+
rtlpriv->cfg->ops->query_rx_desc(hw, &status,
&rx_status, (u8 *) pdesc, skb);
-
+
if (rtlpriv->use_new_trx_flow)
- rtlpriv->cfg->ops->rx_check_dma_ok(hw,
- (u8 *)buffer_desc,
+ rtlpriv->cfg->ops->rx_check_dma_ok(hw,
+ (u8 *)buffer_desc,
hw_queue);
-
- len = rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false,
+
+ len = rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false,
HW_DESC_RXPKT_LEN);
-
+
if (skb->end - skb->tail > len) {
skb_put(skb, len);
- if (rtlpriv->use_new_trx_flow)
- skb_reserve(skb, status.rx_drvinfo_size +
+ if (rtlpriv->use_new_trx_flow)
+ skb_reserve(skb, status.rx_drvinfo_size +
status.rx_bufshift + 24);
else
- skb_reserve(skb, status.rx_drvinfo_size +
+ skb_reserve(skb, status.rx_drvinfo_size +
status.rx_bufshift);
} else {
- printk("skb->end - skb->tail = %d, len is %d\n",
+ printk("skb->end - skb->tail = %d, len is %d\n",
skb->end - skb->tail, len);
break;
}
-
+
rtlpriv->cfg->ops->rx_command_packet_handler(hw, status, skb);
/*
@@ -922,9 +922,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
hdr = rtl_get_hdr(skb);
fc = rtl_get_fc(skb);
-
+
if (!status.b_crc && !status.b_hwerror) {
- memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
sizeof(rx_status));
if (is_broadcast_ether_addr(hdr->addr1)) {
@@ -947,13 +947,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
/* static bcn for roaming */
rtl_beacon_statistic(hw, skb);
- rtl_p2p_info(hw, (void*)skb->data, skb->len);
+ rtl_p2p_info(hw, (void*)skb->data, skb->len);
/* for sw lps */
rtl_swlps_beacon(hw, (void*)skb->data, skb->len);
rtl_recognize_peer(hw, (void*)skb->data, skb->len);
if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
(rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)&&
- (ieee80211_is_beacon(fc) ||
+ (ieee80211_is_beacon(fc) ||
ieee80211_is_probe_resp(fc))) {
dev_kfree_skb_any(skb);
} else {
@@ -964,13 +964,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
}
if (rtlpriv->use_new_trx_flow) {
rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
- rtlpci->rx_ring[hw_queue].next_rx_rp %=
+ rtlpci->rx_ring[hw_queue].next_rx_rp %=
RTL_PCI_MAX_RX_COUNT;
rx_remained_cnt--;
if (1/*rx_remained_cnt == 0*/) {
- rtl_write_word(rtlpriv, 0x3B4,
+ rtl_write_word(rtlpriv, 0x3B4,
rtlpci->rx_ring[hw_queue].next_rx_rp);
}
}
@@ -981,22 +981,22 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
}
if (rtlpriv->use_new_trx_flow) {
- _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
+ _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
rxring_idx,
- rtlpci->rx_ring[rxring_idx].idx);
+ rtlpci->rx_ring[rxring_idx].idx);
} else {
_rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
- rtlpci->rx_ring[rxring_idx].idx);
+ rtlpci->rx_ring[rxring_idx].idx);
- if (rtlpci->rx_ring[rxring_idx].idx ==
+ if (rtlpci->rx_ring[rxring_idx].idx ==
rtlpci->rxringcount - 1)
- rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc,
- false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc,
+ false,
HW_DESC_RXERO,
(u8 *) & tmp_one);
}
- rtlpci->rx_ring[rxring_idx].idx =
- (rtlpci->rx_ring[rxring_idx].idx + 1) %
+ rtlpci->rx_ring[rxring_idx].idx =
+ (rtlpci->rx_ring[rxring_idx].idx + 1) %
rtlpci->rxringcount;
}
}
@@ -1011,7 +1011,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
u32 inta = 0;
u32 intb = 0;
-
+
if (rtlpci->irq_enabled == 0)
return IRQ_HANDLED;
@@ -1020,7 +1020,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR], 0x0);
-
+
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE], 0x0);
@@ -1029,7 +1029,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
- /*Shared IRQ or HW disappared */
+ /*Shared IRQ or HW disappeared */
if (!inta || inta == 0xffff)
goto done;
/*<1> beacon related */
@@ -1127,7 +1127,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
/*<4> fw related*/
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
- RT_TRACE(COMP_INTR, DBG_TRACE,
+ RT_TRACE(COMP_INTR, DBG_TRACE,
("firmware interrupt!\n"));
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.fwevt_wq, 0);
@@ -1142,12 +1142,12 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
- RT_TRACE(COMP_INTR, DBG_TRACE,
+ RT_TRACE(COMP_INTR, DBG_TRACE,
("hsisr interrupt!\n"));
_rtl_pci_hs_interrupt(hw);
}
}
-
+
if(rtlpriv->rtlhal.b_earlymode_enable)
tasklet_schedule(&rtlpriv->works.irq_tasklet);
@@ -1157,7 +1157,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE],
rtlpci->irq_mask[1]);
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-
+
return IRQ_HANDLED;
done:
@@ -1200,16 +1200,16 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
pdesc = &ring->desc[0];
if (rtlpriv->use_new_trx_flow)
pbuffer_desc = &ring->buffer_desc[0];
-
+
/*<delete in kernel start>*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
- rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
- (u8 *)pbuffer_desc, info, pskb,
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ (u8 *)pbuffer_desc, info, pskb,
BEACON_QUEUE, &tcb_desc);
#else
/*<delete in kernel end>*/
rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
- (u8 *)pbuffer_desc, info, NULL, pskb,
+ (u8 *)pbuffer_desc, info, NULL, pskb,
BEACON_QUEUE, &tcb_desc);
/*<delete in kernel start>*/
#endif
@@ -1235,7 +1235,7 @@ static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
desc_num = TX_DESC_NUM_92E;
else
desc_num = RT_TXDESC_NUM;
-
+
for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
rtlpci->txringcount[i] = desc_num;
}
@@ -1309,12 +1309,12 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
/* alloc tx buffer desc for new trx flow*/
if (rtlpriv->use_new_trx_flow) {
buffer_desc = pci_alloc_consistent(rtlpci->pdev,
- sizeof(*buffer_desc) * entries,
+ sizeof(*buffer_desc) * entries,
&buffer_desc_dma);
if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
RT_TRACE(COMP_ERR, DBG_EMERG,
- ("Cannot allocate TX ring (prio = %d)\n",
+ ("Cannot allocate TX ring (prio = %d)\n",
prio));
return -ENOMEM;
}
@@ -1322,13 +1322,13 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
memset(buffer_desc, 0, sizeof(*buffer_desc) * entries);
rtlpci->tx_ring[prio].buffer_desc = buffer_desc;
rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma;
-
+
rtlpci->tx_ring[prio].cur_tx_rp = 0;
rtlpci->tx_ring[prio].cur_tx_wp = 0;
rtlpci->tx_ring[prio].avl_desc = entries;
}
-
+
/* alloc dma for this ring */
desc = pci_alloc_consistent(rtlpci->pdev,
sizeof(*desc) * entries, &desc_dma);
@@ -1342,7 +1342,7 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
memset(desc, 0, sizeof(*desc) * entries);
rtlpci->tx_ring[prio].desc = desc;
rtlpci->tx_ring[prio].dma = desc_dma;
-
+
rtlpci->tx_ring[prio].idx = 0;
rtlpci->tx_ring[prio].entries = entries;
skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
@@ -1357,7 +1357,7 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
sizeof(*desc));
rtlpriv->cfg->ops->set_desc(hw, (u8 *) & (desc[i]),
- true,
+ true,
HW_DESC_TX_NEXTDESC_ADDR,
(u8 *) & nextdescaddress);
}
@@ -1371,15 +1371,15 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
struct rtl_priv *rtlpriv = rtl_priv(hw);
int i;
-
+
if (rtlpriv->use_new_trx_flow) {
struct rtl_rx_buffer_desc *entry = NULL;
/* alloc dma for this ring */
- rtlpci->rx_ring[rxring_idx].buffer_desc =
+ rtlpci->rx_ring[rxring_idx].buffer_desc =
pci_alloc_consistent(rtlpci->pdev,
sizeof(*rtlpci->rx_ring[rxring_idx].
- buffer_desc) *
- rtlpci->rxringcount,
+ buffer_desc) *
+ rtlpci->rxringcount,
&rtlpci->rx_ring[rxring_idx].dma);
if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
(unsigned long)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
@@ -1393,9 +1393,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
/* init every desc in this ring */
rtlpci->rx_ring[rxring_idx].idx = 0;
- for (i = 0; i < rtlpci->rxringcount; i++) {
+ for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
- if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
rxring_idx, i))
return -ENOMEM;
}
@@ -1403,14 +1403,14 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
struct rtl_rx_desc *entry = NULL;
u8 tmp_one = 1;
/* alloc dma for this ring */
- rtlpci->rx_ring[rxring_idx].desc =
+ rtlpci->rx_ring[rxring_idx].desc =
pci_alloc_consistent(rtlpci->pdev,
sizeof(*rtlpci->rx_ring[rxring_idx].
- desc) * rtlpci->rxringcount,
+ desc) * rtlpci->rxringcount,
&rtlpci->rx_ring[rxring_idx].dma);
if (!rtlpci->rx_ring[rxring_idx].desc ||
(unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
- RT_TRACE(COMP_ERR, DBG_EMERG,
+ RT_TRACE(COMP_ERR, DBG_EMERG,
("Cannot allocate RX ring\n"));
return -ENOMEM;
}
@@ -1421,9 +1421,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
/* init every desc in this ring */
rtlpci->rx_ring[rxring_idx].idx = 0;
- for (i = 0; i < rtlpci->rxringcount; i++) {
+ for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].desc[i];
- if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
rxring_idx, i))
return -ENOMEM;
}
@@ -1467,7 +1467,7 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
sizeof(*ring->buffer_desc) * ring->entries,
ring->buffer_desc, ring->buffer_desc_dma);
ring->buffer_desc = NULL;
- }
+ }
}
static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
@@ -1532,7 +1532,7 @@ err_free_rings:
_rtl_pci_free_rx_ring(hw, rxring_idx);
for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
- if (rtlpci->tx_ring[i].desc ||
+ if (rtlpci->tx_ring[i].desc ||
rtlpci->tx_ring[i].buffer_desc)
_rtl_pci_free_tx_ring(hw, i);
@@ -1567,16 +1567,16 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
/* force the rx_ring[RX_MPDU_QUEUE/
* RX_CMD_QUEUE].idx to the first one */
/*new trx flow, do nothing*/
- if ((rtlpriv->use_new_trx_flow == false) &&
+ if ((rtlpriv->use_new_trx_flow == false) &&
rtlpci->rx_ring[rxring_idx].desc) {
struct rtl_rx_desc *entry = NULL;
for (i = 0; i < rtlpci->rxringcount; i++) {
- entry = &rtlpci->rx_ring[rxring_idx].desc[i];
- rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry,
+ entry = &rtlpci->rx_ring[rxring_idx].desc[i];
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry,
false,
- HW_DESC_RXOWN,
- (u8 *) & tmp_one);
+ HW_DESC_RXOWN,
+ (u8 *) & tmp_one);
}
}
rtlpci->rx_ring[rxring_idx].idx = 0; }
@@ -1585,13 +1585,13 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
* and force the tx idx to the first one */
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
- if (rtlpci->tx_ring[i].desc ||
+ if (rtlpci->tx_ring[i].desc ||
rtlpci->tx_ring[i].buffer_desc) {
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
while (skb_queue_len(&ring->queue)) {
u8 *entry;
- struct sk_buff *skb =
+ struct sk_buff *skb =
__skb_dequeue(&ring->queue);
if (rtlpriv->use_new_trx_flow)
entry = (u8 *)(&ring->buffer_desc
@@ -1618,7 +1618,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
/*<delete in kernel start>*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
-static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
struct sk_buff *skb)
#else
/*<delete in kernel end>*/
@@ -1741,10 +1741,10 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
}
pdesc = &ring->desc[idx];
-
+
if (rtlpriv->use_new_trx_flow) {
ptx_bd_desc = &ring->buffer_desc[idx];
- } else {
+ } else {
own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
true, HW_DESC_OWN);
@@ -1755,17 +1755,17 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
hw_queue, ring->idx, idx,
skb_queue_len(&ring->queue)));
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
flags);
return skb->len;
}
}
-
+
if (ieee80211_is_data_qos(fc)) {
tid = rtl_get_tid(skb);
if (sta) {
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- seq_number = (le16_to_cpu(hdr->seq_ctrl) &
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) &
IEEE80211_SCTL_SEQ) >> 4;
seq_number += 1;
@@ -1779,13 +1779,13 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
/*<delete in kernel start>*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
- rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
- (u8 *)ptx_bd_desc, info, skb,
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ (u8 *)ptx_bd_desc, info, skb,
hw_queue, ptcb_desc);
#else
/*<delete in kernel end>*/
- rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
- (u8 *)ptx_bd_desc, info, sta, skb,
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ (u8 *)ptx_bd_desc, info, sta, skb,
hw_queue, ptcb_desc);
/*<delete in kernel start>*/
#endif
@@ -1832,10 +1832,10 @@ static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop)
u16 i = 0;
int queue_id;
struct rtl8192_tx_ring *ring;
-
+
if (mac->skip_scan)
return;
-
+
for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
u32 queue_len;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
@@ -1936,7 +1936,7 @@ void rtl_pci_stop(struct ieee80211_hw *hw)
u8 RFInProgressTimeOut = 0;
/*
- *should before disable interrrupt&adapter
+ *should before disable interrupt&adapter
*and will do it immediately.
*/
set_hal_stop(rtlhal);
@@ -2081,13 +2081,13 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
rtlhal->interfaceindex = 0;
}
}
-
+
/* 92ee use new trx flow */
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
rtlpriv->use_new_trx_flow = true;
else
rtlpriv->use_new_trx_flow = false;
-
+
/*find bus info */
pcipriv->ndis_adapter.busnumber = pdev->bus->number;
pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
@@ -2095,7 +2095,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
/*find bridge info */
pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
- /* some ARM have no bridge_pdev and will crash here
+ /* some ARM have no bridge_pdev and will crash here
* so we should check if bridge_pdev is NULL */
if (bridge_pdev) {
pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
@@ -2187,7 +2187,7 @@ static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
}
rtlpci->using_msi = true;
-
+
RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG, ("MSI Interrupt Mode!\n"));
return 0;
}
@@ -2198,7 +2198,7 @@ static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
int ret;
-
+
ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
IRQF_SHARED, KBUILD_MODNAME, hw);
if (ret < 0) {
@@ -2206,7 +2206,7 @@ static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
}
rtlpci->using_msi = false;
- RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG,
+ RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG,
("Pin-based Interrupt Mode!\n"));
return 0;
}
@@ -2293,7 +2293,7 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
/*
*init dbgp flags before all
*other functions, because we will
- *use it in other funtions like
+ *use it in other functions like
*RT_TRACE/RT_PRINT/RTL_PRINT_DATA
*you can not use these macro
*before this
@@ -2377,7 +2377,7 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
} else {
rtlpriv->mac80211.mac80211_registered = 1;
}
- /* the wiphy must have been registed to
+ /* the wiphy must have been registed to
* cfg80211 prior to regulatory_hint */
if (regulatory_hint(hw->wiphy, rtlpriv->regd.alpha2)) {
RT_TRACE(COMP_ERR, DBG_WARNING, ("regulatory_hint fail\n"));
@@ -2428,13 +2428,13 @@ fail1:
return -ENODEV;
}
-//EXPORT_SYMBOL(rtl_pci_probe);
+/* EXPORT_SYMBOL(rtl_pci_probe); */
struct ieee80211_hw *rtl_pci_get_hw_pointer(void)
{
return hw_export;
}
-//EXPORT_SYMBOL(rtl_pci_get_hw_pointer);
+/* EXPORT_SYMBOL(rtl_pci_get_hw_pointer); */
void rtl_pci_disconnect(struct pci_dev *pdev)
{
@@ -2450,7 +2450,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
/* add for prov */
rtl_proc_remove_one(hw);
-
+
/*ieee80211_unregister_hw will call ops_stop */
if (rtlmac->mac80211_registered == 1) {
@@ -2491,7 +2491,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
ieee80211_free_hw(hw);
}
-//EXPORT_SYMBOL(rtl_pci_disconnect);
+/* EXPORT_SYMBOL(rtl_pci_disconnect); */
/***************************************
kernel pci power state define:
@@ -2519,7 +2519,7 @@ int rtl_pci_suspend(struct device *dev)
return 0;
}
-//EXPORT_SYMBOL(rtl_pci_suspend);
+/* EXPORT_SYMBOL(rtl_pci_suspend); */
int rtl_pci_resume(struct device *dev)
{
@@ -2529,10 +2529,10 @@ int rtl_pci_resume(struct device *dev)
rtlpriv->cfg->ops->hw_resume(hw);
rtl_init_rfkill(hw);
-
+
return 0;
}
-//EXPORT_SYMBOL(rtl_pci_resume);
+/* EXPORT_SYMBOL(rtl_pci_resume); */
struct rtl_intf_ops rtl_pci_ops = {
.read_efuse_byte = read_efuse_byte,
diff --git a/drivers/staging/rtl8821ae/pci.h b/drivers/staging/rtl8821ae/pci.h
index 9f206550a657..06eaa521e0eb 100644
--- a/drivers/staging/rtl8821ae/pci.h
+++ b/drivers/staging/rtl8821ae/pci.h
@@ -148,11 +148,11 @@ struct rtl_pci_capabilities_header {
* RX wifi info == RX descriptor in old flow */
struct rtl_tx_buffer_desc {
#if (RTL8192EE_SEG_NUM == 2)
- u32 dword[2*(DMA_IS_64BIT + 1)*8]; //seg = 8
+ u32 dword[2*(DMA_IS_64BIT + 1)*8]; /* seg = 8 */
#elif (RTL8192EE_SEG_NUM == 1)
- u32 dword[2*(DMA_IS_64BIT + 1)*4]; //seg = 4
+ u32 dword[2*(DMA_IS_64BIT + 1)*4]; /* seg = 4 */
#elif (RTL8192EE_SEG_NUM == 0)
- u32 dword[2*(DMA_IS_64BIT + 1)*2]; //seg = 2
+ u32 dword[2*(DMA_IS_64BIT + 1)*2]; /* seg = 2 */
#endif
} __packed;
@@ -187,7 +187,7 @@ struct rtl8192_tx_ring {
};
struct rtl8192_rx_ring {
- struct rtl_rx_desc *desc;/*for old trx flow, not uesd in new trx*/
+ struct rtl_rx_desc *desc;/*for old trx flow, not used in new trx*/
/*dma matches either 'desc' or 'buffer_desc'*/
dma_addr_t dma;
unsigned int idx;
diff --git a/drivers/staging/rtl8821ae/ps.c b/drivers/staging/rtl8821ae/ps.c
index f12ffa83c58d..7876442417f4 100644
--- a/drivers/staging/rtl8821ae/ps.c
+++ b/drivers/staging/rtl8821ae/ps.c
@@ -257,7 +257,7 @@ void rtl_ips_nic_off_wq_callback(void *data)
*Do not enter IPS in the following conditions:
*(1) RF is already OFF or Sleep
*(2) b_swrf_processing (indicates the IPS is still under going)
- *(3) Connectted (only disconnected can trigger IPS)
+ *(3) Connected (only disconnected can trigger IPS)
*(4) IBSS (send Beacon)
*(5) AP mode (send Beacon)
*(6) monitor mode (rcv packet)
diff --git a/drivers/staging/rtl8821ae/rc.c b/drivers/staging/rtl8821ae/rc.c
index d387f13ea7dc..0cc32c60ddee 100644
--- a/drivers/staging/rtl8821ae/rc.c
+++ b/drivers/staging/rtl8821ae/rc.c
@@ -286,7 +286,6 @@ static void rtl_rate_free_sta(void *rtlpriv,
}
static struct rate_control_ops rtl_rate_ops = {
- .module = NULL,
.name = "rtl_rc",
.alloc = rtl_rate_alloc,
.free = rtl_rate_free,
diff --git a/drivers/staging/rtl8821ae/regd.c b/drivers/staging/rtl8821ae/regd.c
index d89f15cb8089..0a4b3984b7ef 100644
--- a/drivers/staging/rtl8821ae/regd.c
+++ b/drivers/staging/rtl8821ae/regd.c
@@ -243,7 +243,7 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
}
/*
- *If a country IE has been recieved check its rule for this
+ *If a country IE has been received check its rule for this
*channel first before enabling active scan. The passive scan
*would have been enforced by the initial processing of our
*custom regulatory domain.
@@ -455,7 +455,7 @@ int rtl_regd_init(struct ieee80211_hw *hw,
if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
RT_TRACE(COMP_REGD, DBG_DMESG,
- (KERN_DEBUG "rtl: EEPROM indicates invalid contry code"
+ (KERN_DEBUG "rtl: EEPROM indicates invalid country code"
"world wide 13 should be used\n"));
rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
diff --git a/drivers/staging/rtl8821ae/regd.h b/drivers/staging/rtl8821ae/regd.h
index abc60ab8165c..dceb3f18200b 100644
--- a/drivers/staging/rtl8821ae/regd.h
+++ b/drivers/staging/rtl8821ae/regd.h
@@ -30,8 +30,8 @@
#ifndef __RTL_REGD_H__
#define __RTL_REGD_H__
-#define IEEE80211_CHAN_NO_IBSS 1<<2
-#define IEEE80211_CHAN_PASSIVE_SCAN 1<<1
+#define IEEE80211_CHAN_NO_IBSS (1 << 2)
+#define IEEE80211_CHAN_PASSIVE_SCAN (1 << 1)
#define WIPHY_FLAG_CUSTOM_REGULATORY BIT(0)
#define WIPHY_FLAG_STRICT_REGULATORY BIT(1)
#define WIPHY_FLAG_DISABLE_BEACON_HINTS BIT(2)
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/dm.c b/drivers/staging/rtl8821ae/rtl8821ae/dm.c
index 8634206b8929..e0efcd281dfe 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/dm.c
+++ b/drivers/staging/rtl8821ae/rtl8821ae/dm.c
@@ -731,7 +731,7 @@ void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw)
rtl_dm_dig->min_undecorated_pwdb_for_dm =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
- ("AP Ext Port or disconnet PWDB = 0x%x \n",
+ ("AP Ext Port or disconnect PWDB = 0x%x \n",
rtl_dm_dig->min_undecorated_pwdb_for_dm));
}
RT_TRACE(COMP_DIG, DBG_LOUD, ("MinUndecoratedPWDBForDM =%d\n",
@@ -925,7 +925,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
RT_TRACE(COMP_DIG, DBG_LOUD,
- ("rtl8821ae_dm_dig(): Abnornally false alarm case. \n"));
+ ("rtl8821ae_dm_dig(): Abnormally false alarm case. \n"));
if (dm_digtable.large_fa_hit != 3)
dm_digtable.large_fa_hit++;
@@ -1087,7 +1087,7 @@ static void rtl8821ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
else
falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail;
- /*reset OFDM FA coutner*/
+ /*reset OFDM FA counter*/
rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 1);
rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 0);
/* reset CCK FA counter*/
@@ -1316,7 +1316,7 @@ u8 rtl8812ae_hw_rate_to_mrate(
/*-----------------------------------------------------------------------------
* Function: odm_TxPwrTrackSetPwr88E()
*
- * Overview: 88E change all channel tx power accordign to flag.
+ * Overview: 88E change all channel tx power according to flag.
* OFDM & CCK are all different.
*
* Input: NONE
@@ -1537,7 +1537,7 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtldm->modify_txagc_flag_path_b = false;
RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
- ("******Path_B pDM_Odm->Modify_TxAGC_Flag = FALSE \n"));
+ ("******Path_B dm_Odm->Modify_TxAGC_Flag = FALSE \n"));
}
}
}
@@ -1654,7 +1654,7 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter
if (delta > 0 && rtldm->txpower_track_control)
{
- /*"delta" here is used to record the absolute value of differrence.*/
+ /*"delta" here is used to record the absolute value of difference.*/
delta = thermal_value > rtlefuse->eeprom_thermalmeter ? \
(thermal_value - rtlefuse->eeprom_thermalmeter) : \
(rtlefuse->eeprom_thermalmeter - thermal_value);
@@ -1976,7 +1976,7 @@ void rtl8821ae_phy_lccalibrate(
/*-----------------------------------------------------------------------------
* Function: odm_TxPwrTrackSetPwr88E()
*
- * Overview: 88E change all channel tx power accordign to flag.
+ * Overview: 88E change all channel tx power according to flag.
* OFDM & CCK are all different.
*
* Input: NONE
@@ -2159,7 +2159,7 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter
u8 *delta_swing_table_idx_tup_b;
u8 *delta_swing_table_idx_tdown_b;
- /*2. Initilization ( 7 steps in total )*/
+ /*2. Initialization ( 7 steps in total )*/
rtl8821ae_get_delta_swing_table(hw, (u8**)&delta_swing_table_idx_tup_a,
(u8**)&delta_swing_table_idx_tdown_a,
(u8**)&delta_swing_table_idx_tup_b,
@@ -2244,7 +2244,7 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter
if (delta > 0 && rtldm->txpower_track_control)
{
- /*"delta" here is used to record the absolute value of differrence.*/
+ /*"delta" here is used to record the absolute value of difference.*/
delta = thermal_value > rtlefuse->eeprom_thermalmeter ? \
(thermal_value - rtlefuse->eeprom_thermalmeter) : \
(rtlefuse->eeprom_thermalmeter - thermal_value);
@@ -2613,11 +2613,11 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
RT_TRACE(COMP_TURBO, DBG_LOUD,
("rtl8821ae_dm_check_edca_turbo=====>"));
RT_TRACE(COMP_TURBO, DBG_LOUD,
- ("Orginial BE PARAM: 0x%x\n",
+ ("Original BE PARAM: 0x%x\n",
rtl_read_dword(rtlpriv, DM_REG_EDCA_BE_11N)));
/*===============================
- list paramter for different platform
+ list parameter for different platform
===============================*/
b_last_is_cur_rdl_state = rtlpriv->dm.bis_cur_rdlstate;
pb_is_cur_rdl_state = &( rtlpriv->dm.bis_cur_rdlstate);
@@ -2963,7 +2963,7 @@ void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
"Crystal cap = 0x%x, Crystal cap offset = %d\n",
rtldm->crystal_cap, adjust_xtal));
- /*3.Adjudt Crystal Cap.*/
+ /*3.Adjust Crystal Cap.*/
if (adjust_xtal != 0){
rtldm->is_freeze = 0;
rtldm->crystal_cap += adjust_xtal;
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/fw.c b/drivers/staging/rtl8821ae/rtl8821ae/fw.c
index 4083cab926a3..46eb4125d18f 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/fw.c
+++ b/drivers/staging/rtl8821ae/rtl8821ae/fw.c
@@ -164,7 +164,7 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw)
if (counter >= FW_8821AE_POLLING_TIMEOUT_COUNT) {
RT_TRACE(COMP_ERR, DBG_LOUD,
- ("chksum report faill ! REG_MCUFWDL:0x%08x .\n",
+ ("chksum report fail ! REG_MCUFWDL:0x%08x .\n",
value32));
goto exit;
}
@@ -368,7 +368,7 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
RT_TRACE(COMP_CMD, DBG_LOUD,
- ("Wating too long for FW read "
+ ("Waiting too long for FW read "
"clear HMEBox(%d)!\n", boxnum));
break;
}
@@ -378,7 +378,7 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
isfw_read = _rtl8821ae_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
RT_TRACE(COMP_CMD, DBG_LOUD,
- ("Wating for FW read clear HMEBox(%d)!!! "
+ ("Waiting for FW read clear HMEBox(%d)!!! "
"0x130 = %2x\n", boxnum, u1b_tmp));
}
}
@@ -1179,7 +1179,7 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
("Set RSVD page location to Fw FAIL!!!!!!.\n"));
}
-/*Shoud check FW support p2p or not.*/
+/*Should check FW support p2p or not.*/
void rtl8821ae_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow)
{
u8 u1_ctwindow_period[1] ={ ctwindow};
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h
index 799cc6f95cc1..b365f82f481c 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h
@@ -142,7 +142,7 @@ void rtl8821ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw);
long rtl8821ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw);
void rtl8821ae_dm_bt_balance(struct ieee80211_hw *hw,
bool b_balance_on, u8 ms0, u8 ms1);
-void rtl8821ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 tyep);
+void rtl8821ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type);
void rtl8821ae_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type);
u8 rtl8821ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
u8 level_num, u8 rssi_thresh, u8 rssi_thresh1);
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c
index 79386ee142f9..7b1d113505fb 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c
@@ -157,7 +157,7 @@ bool rtl8821ae_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw)
&&(rtlpcipriv->btcoexist.previous_state_h
== rtlpcipriv->btcoexist.current_state_h)) {
RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
- ("[DM][BT], Coexist state do not chang!!\n"));
+ ("[DM][BT], Coexist state do not change!!\n"));
return true;
} else {
RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
@@ -902,7 +902,7 @@ void rtl8821ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8821ae *p_bt
/*
* Note:
- * We should add delay for making sure sw DacSwing can be set sucessfully.
+ * We should add delay for making sure sw DacSwing can be set successfully.
* because of that rtl8821ae_dm_bt_set_fw_2_ant_hid() and rtl8821ae_dm_bt_set_fw_tdma_ctrl()
* will overwrite the reg 0x880.
*/
@@ -2025,7 +2025,7 @@ void rtl_8821ae_c2h_command_handle(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x1AF, 0x00);
return;
}
- ptmp_buf = (u8 *) kmalloc(c2h_event.cmd_len, GFP_KERNEL);
+ ptmp_buf = kmalloc(c2h_event.cmd_len, GFP_KERNEL);
if(ptmp_buf == NULL) {
RT_TRACE(COMP_FW, DBG_TRACE, ("malloc cmd buf failed\n"));
return;
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hw.c b/drivers/staging/rtl8821ae/rtl8821ae/hw.c
index 5ed7a114c56b..1b8583b689d4 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/hw.c
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hw.c
@@ -147,6 +147,7 @@ static void _rtl8821ae_set_fw_clock_on(struct ieee80211_hw *hw,
} else {
rtlhal->bfw_clk_change_in_progress = false;
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ break;
}
}
@@ -1016,7 +1017,7 @@ static void _rtl8821ae_hw_configure(struct ieee80211_hw *hw)
/* ARFB table 12 for 11ac 24G 1SS */
rtl_write_dword(rtlpriv, REG_ARFR3, 0x00000015);
rtl_write_dword(rtlpriv, REG_ARFR3 + 4, 0xffcff000);
- /* 0x420[7] = 0 , enable retry AMPDU in new AMPD not singal MPDU. */
+ /* 0x420[7] = 0 , enable retry AMPDU in new AMPD not signal MPDU. */
rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F00);
rtl_write_byte(rtlpriv, REG_AMPDU_MAX_TIME, 0x70);
@@ -1406,7 +1407,7 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw)
rtl8821ae_phy_mac_config(hw);
/* because last function modify RCR, so we update
* rcr var here, or TP will unstable for receive_config
- * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
+ * is wrong, RX RCR_ACRC32 will cause TP unstable & Rx
* RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
@@ -1562,7 +1563,7 @@ static enum version_8821ae _rtl8821ae_read_chip_version(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(COMP_INIT, DBG_LOUD,
- ("Chip Version ID: Unknow (0x%X).\n", version));
+ ("Chip Version ID: Unknown (0x%X).\n", version));
break;
}
@@ -1622,7 +1623,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, (MSR), bt_msr);
rtlpriv->cfg->ops->led_control(hw, ledaction);
- if ((bt_msr & 0xfc) == MSR_AP)
+ if ((bt_msr & ~0xfc) == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
else
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
@@ -2371,7 +2372,7 @@ static void _rtl8812ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
if (rtlefuse->eeprom_channelplan == 0xff)
rtlefuse->eeprom_channelplan = 0x7F;
- /* set channel paln to world wide 13 */
+ /* set channel plan to world wide 13 */
//rtlefuse->channel_plan = (u8) rtlefuse->eeprom_channelplan;
/*parse xtal*/
@@ -2534,7 +2535,7 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
if (rtlefuse->eeprom_channelplan == 0xff)
rtlefuse->eeprom_channelplan = 0x7F;
- /* set channel paln to world wide 13 */
+ /* set channel plan to world wide 13 */
//rtlefuse->channel_plan = (u8) rtlefuse->eeprom_channelplan;
/*parse xtal*/
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/phy.c b/drivers/staging/rtl8821ae/rtl8821ae/phy.c
index d02fca38a2b2..1dd33016b4b9 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/phy.c
+++ b/drivers/staging/rtl8821ae/rtl8821ae/phy.c
@@ -86,7 +86,7 @@ void rtl8812ae_fixspur(
/* 0x8AC[11:10] = 2'b10*/
- /* <20120914, Kordan> A workarould to resolve
+ /* <20120914, Kordan> A workaround to resolve
2480Mhz spur by setting ADC clock as 160M. (Asked by Binson)*/
if (band_width == HT_CHANNEL_WIDTH_20 &&
(channel == 13 || channel == 14)) {
@@ -107,7 +107,7 @@ void rtl8812ae_fixspur(
}
else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
{
- /* <20120914, Kordan> A workarould to resolve
+ /* <20120914, Kordan> A workaround to resolve
2480Mhz spur by setting ADC clock as 160M. (Asked by Binson)*/
if (band_width == HT_CHANNEL_WIDTH_20 &&
(channel == 13 || channel == 14))
@@ -441,8 +441,8 @@ u32 phy_get_tx_bb_swing_8812A(
struct rtl_dm *rtldm = rtl_dm(rtlpriv);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- char bb_swing_2g = (char) (-1 * 0xFF);
- char bb_swing_5g = (char) (-1 * 0xFF);
+ char bb_swing_2g = (char) ((-1 * 0xFF) & 0xFF);
+ char bb_swing_5g = (char) ((-1 * 0xFF) & 0xFF);
u32 out = 0x200;
const char auto_temp = -1;
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/phy.h b/drivers/staging/rtl8821ae/rtl8821ae/phy.h
index a932d8c9d45d..a80bf739940a 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/phy.h
+++ b/drivers/staging/rtl8821ae/rtl8821ae/phy.h
@@ -30,7 +30,7 @@
#ifndef __RTL8821AE_PHY_H__
#define __RTL8821AE_PHY_H__
-/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/
+/*It must always set to 4, otherwise read efuse table sequence will be wrong.*/
#define MAX_TX_COUNT 4
#define TX_1S 0
#define TX_2S 1
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h
index 8b39c042fa93..480a6bb6d76b 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h
@@ -81,7 +81,7 @@
{0x0047, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* 0x47[7:0] = 00 gpio mode */ \
{0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* suspend option all off */ \
{0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, BIT7},/*0x14[7] = 1 turn on ZCD */ \
- {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, BIT0},/* 0x15[0] =1 trun on ZCD */ \
+ {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, BIT0},/* 0x15[0] =1 turn on ZCD */ \
{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, BIT4},/*0x23[4] = 1 hpon LDO sleep mode */ \
{0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */ \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, BIT3}, /*0x04[11] = 2b'11 enable WL suspend for PCIe*/
@@ -91,7 +91,7 @@
/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0}, /*0x04[11] = 2b'01enable WL suspend*/ \
{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, 0},/*0x23[4] = 0 hpon LDO sleep mode leave */ \
- {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/* 0x15[0] =0 trun off ZCD */ \
+ {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/* 0x15[0] =0 turn off ZCD */ \
{0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, 0},/*0x14[7] = 0 turn off ZCD */ \
{0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio0~7 input mode */ \
{0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio11 input mode, gpio10~8 input mode */
@@ -110,7 +110,7 @@
{0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xff},/* gpio0~7 output mode */ \
{0x0047, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* 0x47[7:0] = 00 gpio mode */ \
{0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, BIT7},/*0x14[7] = 1 turn on ZCD */ \
- {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, BIT0},/* 0x15[0] =1 trun on ZCD */ \
+ {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, BIT0},/* 0x15[0] =1 turn on ZCD */ \
{0x0012, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/*0x12[0] = 0 force PFM mode */ \
{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, BIT4},/*0x23[4] = 1 hpon LDO sleep mode */ \
{0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */ \
@@ -124,7 +124,7 @@
/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
{0x0012, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*0x12[0] = 1 force PWM mode */ \
{0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, 0},/*0x14[7] = 0 turn off ZCD */ \
- {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/* 0x15[0] =0 trun off ZCD */ \
+ {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/* 0x15[0] =0 turn off ZCD */ \
{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, 0},/*0x23[4] = 0 hpon LDO leave sleep mode */ \
{0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio0~7 input mode */ \
{0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio11 input mode, gpio10~8 input mode */ \
@@ -204,7 +204,7 @@ extern struct wlan_pwr_cfg rtl8812_leave_lps_flow[RTL8812_TRANS_LPS_TO_ACT_STEP
4: LPS--Low Power State
5: SUS--Suspend
- The transision from different states are defined below
+ The transition from different states are defined below
TRANS_CARDEMU_TO_ACT
TRANS_ACT_TO_CARDEMU
TRANS_CARDEMU_TO_SUS
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c
index 710bc015251c..ff1887187770 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c
@@ -82,7 +82,7 @@ bool rtl_hal_pwrseqcmdparsing (struct rtl_priv* rtlpriv, u8 cut_version,
value = value | (GET_PWR_CFG_VALUE(pwr_cfg_cmd)
& GET_PWR_CFG_MASK(pwr_cfg_cmd));
- /*Write the value back to sytem register*/
+ /*Write the value back to system register*/
rtl_write_byte(rtlpriv, offset, value);
}
break;
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/reg.h b/drivers/staging/rtl8821ae/rtl8821ae/reg.h
index 09c5f00d2603..beffb4243b1e 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/reg.h
+++ b/drivers/staging/rtl8821ae/rtl8821ae/reg.h
@@ -596,13 +596,13 @@
#define IMR_BCNDMAINT3 BIT(23) /* Beacon DMA Interrupt 3 */
#define IMR_BCNDMAINT2 BIT(22) /* Beacon DMA Interrupt 2 */
#define IMR_BCNDMAINT1 BIT(21) /* Beacon DMA Interrupt 1 */
-#define IMR_BCNDOK7 BIT(20) /* Beacon Queue DMA OK Interrup 7 */
-#define IMR_BCNDOK6 BIT(19) /* Beacon Queue DMA OK Interrup 6 */
-#define IMR_BCNDOK5 BIT(18) /* Beacon Queue DMA OK Interrup 5 */
-#define IMR_BCNDOK4 BIT(17) /* Beacon Queue DMA OK Interrup 4 */
-#define IMR_BCNDOK3 BIT(16) /* Beacon Queue DMA OK Interrup 3 */
-#define IMR_BCNDOK2 BIT(15) /* Beacon Queue DMA OK Interrup 2 */
-#define IMR_BCNDOK1 BIT(14) /* Beacon Queue DMA OK Interrup 1 */
+#define IMR_BCNDOK7 BIT(20) /* Beacon Queue DMA OK Interrupt 7 */
+#define IMR_BCNDOK6 BIT(19) /* Beacon Queue DMA OK Interrupt 6 */
+#define IMR_BCNDOK5 BIT(18) /* Beacon Queue DMA OK Interrupt 5 */
+#define IMR_BCNDOK4 BIT(17) /* Beacon Queue DMA OK Interrupt 4 */
+#define IMR_BCNDOK3 BIT(16) /* Beacon Queue DMA OK Interrupt 3 */
+#define IMR_BCNDOK2 BIT(15) /* Beacon Queue DMA OK Interrupt 2 */
+#define IMR_BCNDOK1 BIT(14) /* Beacon Queue DMA OK Interrupt 1 */
#define IMR_ATIMEND_E BIT(13) /* ATIM Window End Extension for Win7 */
#define IMR_TXERR BIT(11) /* Tx Error Flag Interrupt Status, write 1 clear. */
#define IMR_RXERR BIT(10) /* Rx Error Flag INT Status, Write 1 clear */
@@ -613,7 +613,7 @@
#define HWSET_MAX_SIZE 512
#define EFUSE_MAX_SECTION 64
#define EFUSE_REAL_CONTENT_LEN 256
-#define EFUSE_OOB_PROTECT_BYTES 18 /* PG data exclude header, dummy 7 bytes frome CP test and reserved 1byte.*/
+#define EFUSE_OOB_PROTECT_BYTES 18 /* PG data exclude header, dummy 7 bytes from CP test and reserved 1byte.*/
#define EEPROM_DEFAULT_TSSI 0x0
@@ -1511,7 +1511,7 @@
#define ROFDM0_TXCOEFF5 0xcb4
#define ROFDM0_TXCOEFF6 0xcb8
-/*Path_A RFE cotrol */
+/*Path_A RFE control */
#define RA_RFE_CTRL_8812 0xcb8
/*Path_B RFE control*/
#define RB_RFE_CTRL_8812 0xeb8
@@ -2336,19 +2336,19 @@
#define WOL_REASON_DEAUTH BIT(3)
#define WOL_REASON_FW_DISCONNECT BIT(4)
-#define RA_RFE_PINMUX 0xcb0 /* Path_A RFE cotrol pinmux*/
+#define RA_RFE_PINMUX 0xcb0 /* Path_A RFE control pinmux*/
#define RB_RFE_PINMUX 0xeb0 /* Path_B RFE control pinmux*/
#define RA_RFE_INV 0xcb4
#define RB_RFE_INV 0xeb4
/* RXIQC */
-#define RA_RXIQC_AB 0xc10 /*RxIQ imblance matrix coeff. A & B*/
-#define RA_RXIQC_CD 0xc14 /*RxIQ imblance matrix coeff. C & D*/
+#define RA_RXIQC_AB 0xc10 /*RxIQ imbalance matrix coeff. A & B*/
+#define RA_RXIQC_CD 0xc14 /*RxIQ imbalance matrix coeff. C & D*/
#define RA_TXSCALE 0xc1c /* Pah_A TX scaling factor*/
#define RB_TXSCALE 0xe1c /* Path_B TX scaling factor*/
-#define RB_RXIQC_AB 0xe10 /*RxIQ imblance matrix coeff. A & B*/
-#define RB_RXIQC_CD 0xe14 /*RxIQ imblance matrix coeff. C & D*/
+#define RB_RXIQC_AB 0xe10 /*RxIQ imbalance matrix coeff. A & B*/
+#define RB_RXIQC_CD 0xe14 /*RxIQ imbalance matrix coeff. C & D*/
#define RXIQC_AC 0x02ff /*bit mask for IQC matrix element A & C*/
#define RXIQC_BD 0x02ff0000 /*bit mask for IQC matrix element A & C*/
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/sw.c b/drivers/staging/rtl8821ae/rtl8821ae/sw.c
index 85a3474fc099..a8d175569770 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/sw.c
+++ b/drivers/staging/rtl8821ae/rtl8821ae/sw.c
@@ -57,9 +57,9 @@ void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
* 0 - Disable ASPM,
* 1 - Enable ASPM without Clock Req,
* 2 - Enable ASPM with Clock Req,
- * 3 - Alwyas Enable ASPM with Clock Req,
+ * 3 - Always Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
* */
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/trx.c b/drivers/staging/rtl8821ae/rtl8821ae/trx.c
index 75ae4387fe19..f40a93c0d2bb 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/trx.c
+++ b/drivers/staging/rtl8821ae/rtl8821ae/trx.c
@@ -37,8 +37,8 @@
#include "trx.h"
#include "led.h"
#include "dm.h"
-#include "phy.h"
-u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
+
+static u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
{
u16 fc = rtl_get_fc(skb);
@@ -244,7 +244,7 @@ static void _rtl8821ae_query_rxphystatus(struct ieee80211_hw *hw,
cck_agc_rpt = cck_buf->cck_agc_rpt;
/* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive) */
if (ppsc->rfpwr_state == ERFON)
cck_highpwr = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2,
@@ -363,7 +363,7 @@ static void _rtl8821ae_query_rxphystatus(struct ieee80211_hw *hw,
pstatus->rx_mimo_signalstrength[i] = (u8) rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive) */
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
@@ -603,7 +603,7 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
/* hw will set status->decrypted true, if it finds the
* frame is open data frame or mgmt frame. */
- /* So hw will not decryption robust managment frame
+ /* So hw will not decryption robust management frame
* for IEEE80211w but still set status->decrypted
* true, so here we should set it back to undecrypted
* for IEEE80211w frame, and mac80211 sw will help
@@ -616,7 +616,7 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
diff --git a/drivers/staging/rtl8821ae/wifi.h b/drivers/staging/rtl8821ae/wifi.h
index 76bef93ad70a..17a9d9f8781d 100644
--- a/drivers/staging/rtl8821ae/wifi.h
+++ b/drivers/staging/rtl8821ae/wifi.h
@@ -40,10 +40,10 @@
#define RF_CHANGE_BY_INIT 0
-#define RF_CHANGE_BY_IPS BIT(28)
-#define RF_CHANGE_BY_PS BIT(29)
-#define RF_CHANGE_BY_HW BIT(30)
-#define RF_CHANGE_BY_SW BIT(31)
+#define RF_CHANGE_BY_IPS BIT(28)
+#define RF_CHANGE_BY_PS BIT(29)
+#define RF_CHANGE_BY_HW BIT(30)
+#define RF_CHANGE_BY_SW BIT(31)
#define IQK_ADDA_REG_NUM 16
#define IQK_MAC_REG_NUM 4
@@ -69,35 +69,35 @@
#define QBSS_LOAD_SIZE 5
#define MAX_WMMELE_LENGTH 64
-#define TOTAL_CAM_ENTRY 32
+#define TOTAL_CAM_ENTRY 32
/*slot time for 11g. */
#define RTL_SLOT_TIME_9 9
#define RTL_SLOT_TIME_20 20
/*related with tcp/ip. */
-/*if_ehther.h*/
-#define ETH_P_PAE 0x888E /*Port Access Entity
+/*if_ether.h*/
+#define ETH_P_PAE 0x888E /*Port Access Entity
*(IEEE 802.1X) */
-#define ETH_P_IP 0x0800 /*Internet Protocol packet */
-#define ETH_P_ARP 0x0806 /*Address Resolution packet */
-#define SNAP_SIZE 6
+#define ETH_P_IP 0x0800 /*Internet Protocol packet */
+#define ETH_P_ARP 0x0806 /*Address Resolution packet */
+#define SNAP_SIZE 6
#define PROTOC_TYPE_SIZE 2
/*related with 802.11 frame*/
-#define MAC80211_3ADDR_LEN 24
-#define MAC80211_4ADDR_LEN 30
+#define MAC80211_3ADDR_LEN 24
+#define MAC80211_4ADDR_LEN 30
#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max
* channel number */
#define CHANNEL_MAX_NUMBER_2G 14
-#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to
+#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to
*"phy_GetChnlGroup8812A" and
* "Hal_ReadTxPowerInfo8812A"*/
#define CHANNEL_MAX_NUMBER_5G_80M 7
#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, ch4~9, ch10~14
* total three groups */
-#define MAX_PG_GROUP 13
+#define MAX_PG_GROUP 13
#define CHANNEL_GROUP_MAX_2G 3
#define CHANNEL_GROUP_IDX_5GL 3
#define CHANNEL_GROUP_IDX_5GM 6
@@ -112,14 +112,14 @@
#define MAX_NUM_RATES 264
/*for 88E use*/
-/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/
+/*It must always set to 4, otherwise read efuse table sequence will be wrong.*/
#define MAX_TX_COUNT 4
#define MAX_RF_PATH 4
#define MAX_CHNL_GROUP_24G 6
#define MAX_CHNL_GROUP_5G 14
/* BK, BE, VI, VO, HCCA, MANAGEMENT, COMMAND, HIGH, BEACON. */
-#define MAX_TX_QUEUE 9
+#define MAX_TX_QUEUE 9
#define TX_PWR_BY_RATE_NUM_BAND 2
#define TX_PWR_BY_RATE_NUM_RF 4
@@ -127,11 +127,11 @@
#define MAX_BASE_NUM_IN_PHY_REG_PG_24G 6
#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 5
-#define DELTA_SWINGIDX_SIZE 30
-#define BAND_NUM 3
+#define DELTA_SWINGIDX_SIZE 30
+#define BAND_NUM 3
/*Now, it's just for 8192ee
*not OK yet, keep it 0*/
-#define DMA_IS_64BIT 0
+#define DMA_IS_64BIT 0
#define RTL8192EE_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */
struct txpower_info_2g {
@@ -219,7 +219,7 @@ enum hardware_type {
};
enum scan_operation_backup_opt {
- SCAN_OPT_BACKUP_BAND0=0,
+ SCAN_OPT_BACKUP_BAND0 = 0,
SCAN_OPT_BACKUP_BAND1,
SCAN_OPT_RESTORE,
SCAN_OPT_MAX
@@ -435,7 +435,7 @@ enum ht_channel_width {
HT_CHANNEL_WIDTH_80 = 2,
};
-/* Ref: 802.11i sepc D10.0 7.3.2.25.1
+/* Ref: 802.11i spec D10.0 7.3.2.25.1
Cipher Suites Encryption Algorithms */
enum rt_enc_alg {
NO_ENCRYPTION = 0,
@@ -499,14 +499,14 @@ enum rtl_var_map {
RTL_IMR_BCNDMAINT3, /*Beacon DMA Interrupt 3 */
RTL_IMR_BCNDMAINT2, /*Beacon DMA Interrupt 2 */
RTL_IMR_BCNDMAINT1, /*Beacon DMA Interrupt 1 */
- RTL_IMR_BCNDOK8, /*Beacon Queue DMA OK Interrup 8 */
- RTL_IMR_BCNDOK7, /*Beacon Queue DMA OK Interrup 7 */
- RTL_IMR_BCNDOK6, /*Beacon Queue DMA OK Interrup 6 */
- RTL_IMR_BCNDOK5, /*Beacon Queue DMA OK Interrup 5 */
- RTL_IMR_BCNDOK4, /*Beacon Queue DMA OK Interrup 4 */
- RTL_IMR_BCNDOK3, /*Beacon Queue DMA OK Interrup 3 */
- RTL_IMR_BCNDOK2, /*Beacon Queue DMA OK Interrup 2 */
- RTL_IMR_BCNDOK1, /*Beacon Queue DMA OK Interrup 1 */
+ RTL_IMR_BCNDOK8, /*Beacon Queue DMA OK Interrupt 8 */
+ RTL_IMR_BCNDOK7, /*Beacon Queue DMA OK Interrupt 7 */
+ RTL_IMR_BCNDOK6, /*Beacon Queue DMA OK Interrupt 6 */
+ RTL_IMR_BCNDOK5, /*Beacon Queue DMA OK Interrupt 5 */
+ RTL_IMR_BCNDOK4, /*Beacon Queue DMA OK Interrupt 4 */
+ RTL_IMR_BCNDOK3, /*Beacon Queue DMA OK Interrupt 3 */
+ RTL_IMR_BCNDOK2, /*Beacon Queue DMA OK Interrupt 2 */
+ RTL_IMR_BCNDOK1, /*Beacon Queue DMA OK Interrupt 1 */
RTL_IMR_TIMEOUT2, /*Timeout interrupt 2 */
RTL_IMR_TIMEOUT1, /*Timeout interrupt 1 */
RTL_IMR_TXFOVW, /*Transmit FIFO Overflow */
@@ -515,10 +515,10 @@ enum rtl_var_map {
RTL_IMR_RXFOVW, /*Receive FIFO Overflow */
RTL_IMR_RDU, /*Receive Descriptor Unavailable */
RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */
- RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrup */
+ RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrupt */
RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */
RTL_IMR_COMDOK, /*Command Queue DMA OK Interrupt*/
- RTL_IMR_TBDOK, /*Transmit Beacon OK interrup */
+ RTL_IMR_TBDOK, /*Transmit Beacon OK interrupt */
RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */
RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */
RTL_IMR_BKDOK, /*AC_BK DMA OK Interrupt */
@@ -645,9 +645,9 @@ enum wireless_mode {
};
enum ratr_table_mode {
- RATR_INX_WIRELESS_NGB = 0, // BGN 40 Mhz 2SS 1SS
- RATR_INX_WIRELESS_NG = 1, // GN or N
- RATR_INX_WIRELESS_NB = 2, // BGN 20 Mhz 2SS 1SS or BN
+ RATR_INX_WIRELESS_NGB = 0, /* BGN 40 Mhz 2SS 1SS */
+ RATR_INX_WIRELESS_NG = 1, /* GN or N */
+ RATR_INX_WIRELESS_NB = 2, /* BGN 20 Mhz 2SS 1SS or BN */
RATR_INX_WIRELESS_N = 3,
RATR_INX_WIRELESS_GB = 4,
RATR_INX_WIRELESS_G = 5,
@@ -759,7 +759,7 @@ struct rtl_tid_data {
struct rtl_ht_agg agg;
};
-struct rssi_sta{
+struct rssi_sta {
long undecorated_smoothed_pwdb;
};
@@ -899,7 +899,7 @@ struct regd_pair_mapping {
u16 reg_2ghz_ctl;
};
-struct dynamic_primary_cca{
+struct dynamic_primary_cca {
u8 pricca_flag;
u8 intf_flag;
u8 intf_type;
@@ -939,14 +939,14 @@ enum p2p_ps_state {
P2P_PS_ENABLE = 1,
P2P_PS_SCAN = 2,
P2P_PS_SCAN_DONE = 3,
- P2P_PS_ALLSTASLEEP = 4, // for P2P GO
+ P2P_PS_ALLSTASLEEP = 4, /* for P2P GO */
};
enum p2p_ps_mode {
P2P_PS_NONE = 0,
P2P_PS_CTWINDOW = 1,
P2P_PS_NOA = 2,
- P2P_PS_MIX = 3, // CTWindow and NoA
+ P2P_PS_MIX = 3, /* CTWindow and NoA */
};
struct rtl_p2p_ps_info {
@@ -969,7 +969,7 @@ struct rtl_p2p_ps_info {
u32 noa_start_time[P2P_MAX_NOA_NUM];
};
- struct p2p_ps_offload_t {
+struct p2p_ps_offload_t {
u8 Offload_En:1;
u8 role:1; /* 1: Owner, 0: Client */
u8 CTWindow_En:1;
@@ -981,7 +981,7 @@ struct rtl_p2p_ps_info {
};
#define IQK_MATRIX_REG_NUM 8
-#define IQK_MATRIX_SETTINGS_NUM (14+24+21) // Channels_2_4G_NUM + Channels_5G_20M_NUM + Channels_5G
+#define IQK_MATRIX_SETTINGS_NUM (14+24+21) /* Channels_2_4G_NUM + Channels_5G_20M_NUM + Channels_5G */
struct iqk_matrix_regs {
bool b_iqk_done;
long value[1][IQK_MATRIX_REG_NUM];
@@ -1074,12 +1074,12 @@ struct rtl_phy {
enum rt_polarity_ctl polarity_ctl;
};
-#define RTL_AGG_STOP 0
+#define RTL_AGG_STOP 0
#define RTL_AGG_PROGRESS 1
-#define RTL_AGG_START 2
+#define RTL_AGG_START 2
#define RTL_AGG_OPERATIONAL 3
-#define RTL_RX_AGG_START 1
-#define RTL_RX_AGG_STOP 0
+#define RTL_RX_AGG_START 1
+#define RTL_RX_AGG_STOP 0
struct rtl_priv;
struct rtl_io {
@@ -1092,13 +1092,13 @@ struct rtl_io {
/*PCI IO map */
unsigned long pci_base_addr; /*device I/O address */
- void (*write8_async) (struct rtl_priv * rtlpriv, u32 addr, u8 val);
- void (*write16_async) (struct rtl_priv * rtlpriv, u32 addr, u16 val);
- void (*write32_async) (struct rtl_priv * rtlpriv, u32 addr, u32 val);
+ void (*write8_async)(struct rtl_priv *rtlpriv, u32 addr, u8 val);
+ void (*write16_async)(struct rtl_priv *rtlpriv, u32 addr, u16 val);
+ void (*write32_async)(struct rtl_priv *rtlpriv, u32 addr, u32 val);
- u8(*read8_sync) (struct rtl_priv * rtlpriv, u32 addr);
- u16(*read16_sync) (struct rtl_priv * rtlpriv, u32 addr);
- u32(*read32_sync) (struct rtl_priv * rtlpriv, u32 addr);
+ u8 (*read8_sync)(struct rtl_priv *rtlpriv, u32 addr);
+ u16 (*read16_sync)(struct rtl_priv *rtlpriv, u32 addr);
+ u32 (*read32_sync)(struct rtl_priv *rtlpriv, u32 addr);
};
@@ -1256,7 +1256,7 @@ struct rtl_security {
bool use_defaultkey;
/*Encryption Algorithm for Unicast Packet */
enum rt_enc_alg pairwise_enc_algorithm;
- /*Encryption Algorithm for Brocast/Multicast */
+ /*Encryption Algorithm for Broadcast/Multicast */
enum rt_enc_alg group_enc_algorithm;
/*Cam Entry Bitmap */
u32 hwsec_cam_bitmap;
@@ -1317,9 +1317,9 @@ struct rtl_pstbl {
};
-#define ASSOCIATE_ENTRY_NUM 32+1
+#define ASSOCIATE_ENTRY_NUM (32+1)
-struct fast_ant_trainning{
+struct fast_ant_trainning {
u8 bssid[6];
u8 antsel_rx_keep_0;
u8 antsel_rx_keep_1;
@@ -1427,19 +1427,19 @@ struct rtl_dm {
char bb_swing_diff_5g;
u8 delta_swing_table_idx_24gccka_p[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_24gccka_n[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_24gcckb_p[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gccka_n[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gcckb_p[DELTA_SWINGIDX_SIZE];
u8 delta_swing_table_idx_24gcckb_n[DELTA_SWINGIDX_SIZE];
u8 delta_swing_table_idx_24ga_p[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_24ga_n[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_24gb_p[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_24gb_n[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_5ga_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_5ga_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_5gb_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_5gb_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_24ga_p_8188e[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_24ga_n_8188e[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24ga_n[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gb_p[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gb_n[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_5ga_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_5ga_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_5gb_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_5gb_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24ga_p_8188e[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24ga_n_8188e[DELTA_SWINGIDX_SIZE];
/* DMSP */
@@ -1451,7 +1451,7 @@ struct rtl_dm {
struct fast_ant_trainning fat_table;
u8 resp_tx_path;
- u8 path_sel;
+ u8 path_sel;
u32 patha_sum;
u32 pathb_sum;
u32 patha_cnt;
@@ -1671,7 +1671,6 @@ struct rtl_stats {
u8 rx_mimo_evm_dbm[4];
u16 cfo_short[4]; /* per-path's Cfo_short */
u16 cfo_tail[4];
-
u8 rx_pwr[4]; /* per-path's pwdb */
u8 rx_snr[4]; /* per-path's SNR */
u8 bandwidth;
@@ -1692,7 +1691,7 @@ struct rtl_stats {
};
struct rt_link_detect {
- /* count for raoming */
+ /* count for roaming */
u32 bcn_rx_inperiod;
u32 roam_times;
@@ -1756,48 +1755,48 @@ struct proxim {
void *proximity_priv;
int (*proxim_rx)(struct ieee80211_hw *hw, struct rtl_stats *status,
struct sk_buff *skb);
- u8 (*proxim_get_var)(struct ieee80211_hw *hw, u8 type);
+ u8 (*proxim_get_var)(struct ieee80211_hw *hw, u8 type);
};
struct rtl_hal_ops {
- int (*init_sw_vars) (struct ieee80211_hw * hw);
- void (*deinit_sw_vars) (struct ieee80211_hw * hw);
- void (*read_eeprom_info) (struct ieee80211_hw * hw);
- void (*interrupt_recognized) (struct ieee80211_hw * hw,
- u32 * p_inta, u32 * p_intb);
- int (*hw_init) (struct ieee80211_hw * hw);
- void (*hw_disable) (struct ieee80211_hw * hw);
- void (*hw_suspend) (struct ieee80211_hw * hw);
- void (*hw_resume) (struct ieee80211_hw * hw);
- void (*enable_interrupt) (struct ieee80211_hw * hw);
- void (*disable_interrupt) (struct ieee80211_hw * hw);
- int (*set_network_type) (struct ieee80211_hw * hw,
- enum nl80211_iftype type);
+ int (*init_sw_vars)(struct ieee80211_hw *hw);
+ void (*deinit_sw_vars)(struct ieee80211_hw *hw);
+ void (*read_eeprom_info)(struct ieee80211_hw *hw);
+ void (*interrupt_recognized)(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb);
+ int (*hw_init)(struct ieee80211_hw *hw);
+ void (*hw_disable)(struct ieee80211_hw *hw);
+ void (*hw_suspend)(struct ieee80211_hw *hw);
+ void (*hw_resume)(struct ieee80211_hw *hw);
+ void (*enable_interrupt)(struct ieee80211_hw *hw);
+ void (*disable_interrupt)(struct ieee80211_hw *hw);
+ int (*set_network_type)(struct ieee80211_hw *hw,
+ enum nl80211_iftype type);
void (*set_chk_bssid)(struct ieee80211_hw *hw,
bool check_bssid);
- void (*set_bw_mode) (struct ieee80211_hw * hw,
- enum nl80211_channel_type ch_type);
- u8(*switch_channel) (struct ieee80211_hw * hw);
- void (*set_qos) (struct ieee80211_hw * hw, int aci);
- void (*set_bcn_reg) (struct ieee80211_hw * hw);
- void (*set_bcn_intv) (struct ieee80211_hw * hw);
- void (*update_interrupt_mask) (struct ieee80211_hw * hw,
- u32 add_msr, u32 rm_msr);
- void (*get_hw_reg) (struct ieee80211_hw * hw, u8 variable, u8 * val);
- void (*set_hw_reg) (struct ieee80211_hw * hw, u8 variable, u8 * val);
- void (*update_rate_tbl) (struct ieee80211_hw * hw,
- struct ieee80211_sta *sta, u8 rssi_level);
- void (*pre_fill_tx_bd_desc) (struct ieee80211_hw *hw, u8 *tx_bd_desc,
- u8 *desc, u8 queue_index,
- struct sk_buff *skb, dma_addr_t addr);
- u16 (*rx_desc_buff_remained_cnt) (struct ieee80211_hw *hw,
- u8 queue_index);
- void (*rx_check_dma_ok) (struct ieee80211_hw *hw, u8 *header_desc,
- u8 queue_index);
- void (*fill_tx_desc) (struct ieee80211_hw * hw,
- struct ieee80211_hdr * hdr,
- u8 * pdesc_tx, u8 * pbd_desc,
- struct ieee80211_tx_info * info,
+ void (*set_bw_mode)(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+ u8 (*switch_channel)(struct ieee80211_hw *hw);
+ void (*set_qos)(struct ieee80211_hw *hw, int aci);
+ void (*set_bcn_reg)(struct ieee80211_hw *hw);
+ void (*set_bcn_intv)(struct ieee80211_hw *hw);
+ void (*update_interrupt_mask)(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+ void (*get_hw_reg)(struct ieee80211_hw *hw, u8 variable, u8 *val);
+ void (*set_hw_reg)(struct ieee80211_hw *hw, u8 variable, u8 *val);
+ void (*update_rate_tbl)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 rssi_level);
+ void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc,
+ u8 *desc, u8 queue_index,
+ struct sk_buff *skb, dma_addr_t addr);
+ u16 (*rx_desc_buff_remained_cnt)(struct ieee80211_hw *hw,
+ u8 queue_index);
+ void (*rx_check_dma_ok)(struct ieee80211_hw *hw, u8 *header_desc,
+ u8 queue_index);
+ void (*fill_tx_desc)(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr,
+ u8 *pdesc_tx, u8 *pbd_desc,
+ struct ieee80211_tx_info *info,
/*<delete in kernel start>*/
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
/*<delete in kernel end>*/
@@ -1805,74 +1804,77 @@ struct rtl_hal_ops {
/*<delete in kernel start>*/
#endif
/*<delete in kernel end>*/
- struct sk_buff * skb, u8 hw_queue,
+ struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
- void (*fill_tx_cmddesc) (struct ieee80211_hw * hw, u8 * pdesc,
- bool b_firstseg, bool b_lastseg,
- struct sk_buff * skb);
- bool(*query_rx_desc) (struct ieee80211_hw * hw,
- struct rtl_stats * status,
- struct ieee80211_rx_status * rx_status,
- u8 * pdesc, struct sk_buff * skb);
- void (*set_channel_access) (struct ieee80211_hw * hw);
- bool(*radio_onoff_checking) (struct ieee80211_hw * hw, u8 * valid);
- void (*dm_watchdog) (struct ieee80211_hw * hw);
- void (*scan_operation_backup) (struct ieee80211_hw * hw, u8 operation);
- bool(*set_rf_power_state) (struct ieee80211_hw * hw,
- enum rf_pwrstate rfpwr_state);
- void (*led_control) (struct ieee80211_hw * hw,
- enum led_ctl_mode ledaction);
- void (*set_desc) (struct ieee80211_hw *hw, u8 * pdesc, bool istx,
- u8 desc_name, u8 * val);
- u32(*get_desc) (u8 * pdesc, bool istx, u8 desc_name);
- bool (*is_tx_desc_closed) (struct ieee80211_hw *hw,
- u8 hw_queue, u16 index);
- void (*tx_polling) (struct ieee80211_hw * hw, u8 hw_queue);
- void (*enable_hw_sec) (struct ieee80211_hw * hw);
- void (*set_key) (struct ieee80211_hw * hw, u32 key_index,
- u8 * p_macaddr, bool is_group, u8 enc_algo,
- bool is_wepkey, bool clear_all);
- void (*init_sw_leds) (struct ieee80211_hw * hw);
- u32(*get_bbreg) (struct ieee80211_hw * hw, u32 regaddr, u32 bitmask);
- void (*set_bbreg) (struct ieee80211_hw * hw, u32 regaddr, u32 bitmask,
- u32 data);
- u32(*get_rfreg) (struct ieee80211_hw * hw, enum radio_path rfpath,
- u32 regaddr, u32 bitmask);
- void (*set_rfreg) (struct ieee80211_hw * hw, enum radio_path rfpath,
- u32 regaddr, u32 bitmask, u32 data);
+ void (*fill_tx_cmddesc)(struct ieee80211_hw *hw, u8 *pdesc,
+ bool b_firstseg, bool b_lastseg,
+ struct sk_buff *skb);
+ bool (*query_rx_desc)(struct ieee80211_hw *hw,
+ struct rtl_stats *status,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+ void (*set_channel_access)(struct ieee80211_hw *hw);
+ bool (*radio_onoff_checking)(struct ieee80211_hw *hw, u8 *valid);
+ void (*dm_watchdog)(struct ieee80211_hw *hw);
+ void (*scan_operation_backup)(struct ieee80211_hw *hw, u8 operation);
+ bool (*set_rf_power_state)(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+ void (*led_control)(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction);
+ void (*set_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
+ u32 (*get_desc)(u8 *pdesc, bool istx, u8 desc_name);
+ bool (*is_tx_desc_closed)(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
+ void (*tx_polling)(struct ieee80211_hw *hw, u8 hw_queue);
+ void (*enable_hw_sec)(struct ieee80211_hw *hw);
+ void (*set_key)(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+ void (*init_sw_leds)(struct ieee80211_hw *hw);
+ u32 (*get_bbreg)(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+ void (*set_bbreg)(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+ u32 data);
+ u32 (*get_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask);
+ void (*set_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
void (*allow_all_destaddr)(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg);
- void (*linked_set_reg) (struct ieee80211_hw * hw);
- void (*check_switch_to_dmdp) (struct ieee80211_hw * hw);
- void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
- void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw);
- void (*c2h_command_handle) (struct ieee80211_hw *hw);
- void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw, bool mstate);
- void (*bt_turn_off_bt_coexist_before_enter_lps) (struct ieee80211_hw *hw);
- void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
- u32 cmd_len, u8 *p_cmdbuffer);
- bool (*get_btc_status) (void);
- u32 (*rx_command_packet_handler)(struct ieee80211_hw *hw, struct rtl_stats status, struct sk_buff *skb);
+ bool allow_all_da, bool write_into_reg);
+ void (*linked_set_reg)(struct ieee80211_hw *hw);
+ void (*check_switch_to_dmdp)(struct ieee80211_hw *hw);
+ void (*dualmac_easy_concurrent)(struct ieee80211_hw *hw);
+ void (*dualmac_switch_to_dmdp)(struct ieee80211_hw *hw);
+ void (*c2h_command_handle)(struct ieee80211_hw *hw);
+ void (*bt_wifi_media_status_notify)(struct ieee80211_hw *hw,
+ bool mstate);
+ void (*bt_turn_off_bt_coexist_before_enter_lps)(struct ieee80211_hw *hw);
+ void (*fill_h2c_cmd)(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
+ bool (*get_btc_status)(void);
+ u32 (*rx_command_packet_handler)(struct ieee80211_hw *hw,
+ struct rtl_stats status,
+ struct sk_buff *skb);
};
struct rtl_intf_ops {
/*com */
void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
- int (*adapter_start) (struct ieee80211_hw * hw);
- void (*adapter_stop) (struct ieee80211_hw * hw);
+ int (*adapter_start)(struct ieee80211_hw *hw);
+ void (*adapter_stop)(struct ieee80211_hw *hw);
bool (*check_buddy_priv)(struct ieee80211_hw *hw,
- struct rtl_priv **buddy_priv);
+ struct rtl_priv **buddy_priv);
/*<delete in kernel start>*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
- int (*adapter_tx) (struct ieee80211_hw * hw, struct sk_buff * skb,
- struct rtl_tcb_desc *ptcb_desc);
+ int (*adapter_tx)(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct rtl_tcb_desc *ptcb_desc);
#else
/*<delete in kernel end>*/
- int (*adapter_tx) (struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct sk_buff *skb,
- struct rtl_tcb_desc *ptcb_desc);
+ int (*adapter_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct rtl_tcb_desc *ptcb_desc);
/*<delete in kernel start>*/
#endif
/*<delete in kernel end>*/
@@ -1881,22 +1883,22 @@ struct rtl_intf_ops {
#else
void (*flush)(struct ieee80211_hw *hw, bool drop);
#endif
- int (*reset_trx_ring) (struct ieee80211_hw * hw);
+ int (*reset_trx_ring)(struct ieee80211_hw *hw);
/*<delete in kernel start>*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
- bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
+ bool (*waitq_insert)(struct ieee80211_hw *hw, struct sk_buff *skb);
#else
/*<delete in kernel end>*/
- bool (*waitq_insert) (struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct sk_buff *skb);
+ bool (*waitq_insert)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb);
/*<delete in kernel start>*/
#endif
/*<delete in kernel end>*/
/*pci */
- void (*disable_aspm) (struct ieee80211_hw * hw);
- void (*enable_aspm) (struct ieee80211_hw * hw);
+ void (*disable_aspm)(struct ieee80211_hw *hw);
+ void (*enable_aspm)(struct ieee80211_hw *hw);
/*usb */
};
@@ -2027,21 +2029,21 @@ struct rtl_btc_info {
};
struct rtl_btc_ops {
- void (*btc_init_variables) (struct rtl_priv *rtlpriv);
- void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
- void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
- void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
- void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype);
- void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action);
- void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv,
- enum rt_media_status mstatus);
- void (*btc_periodical) (struct rtl_priv *rtlpriv);
- void (*btc_halt_notify) (void);
- void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv,
- u8 * tmp_buf, u8 length);
- bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv);
- bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv);
- bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
+ void (*btc_init_variables)(struct rtl_priv *rtlpriv);
+ void (*btc_init_hal_vars)(struct rtl_priv *rtlpriv);
+ void (*btc_init_hw_config)(struct rtl_priv *rtlpriv);
+ void (*btc_ips_notify)(struct rtl_priv *rtlpriv, u8 type);
+ void (*btc_scan_notify)(struct rtl_priv *rtlpriv, u8 scantype);
+ void (*btc_connect_notify)(struct rtl_priv *rtlpriv, u8 action);
+ void (*btc_mediastatus_notify)(struct rtl_priv *rtlpriv,
+ enum rt_media_status mstatus);
+ void (*btc_periodical)(struct rtl_priv *rtlpriv);
+ void (*btc_halt_notify)(void);
+ void (*btc_btinfo_notify)(struct rtl_priv *rtlpriv,
+ u8 *tmp_buf, u8 length);
+ bool (*btc_is_limited_dig)(struct rtl_priv *rtlpriv);
+ bool (*btc_is_disable_edca_turbo)(struct rtl_priv *rtlpriv);
+ bool (*btc_is_bt_disabled)(struct rtl_priv *rtlpriv);
};
struct rtl_bt_coexist {
@@ -2087,7 +2089,7 @@ struct rtl_priv {
/*
*hal_cfg : for diff cards
- *intf_ops : for diff interrface usb/pcie
+ *intf_ops : for diff interface usb/pcie
*/
struct rtl_hal_cfg *cfg;
struct rtl_intf_ops *intf_ops;
@@ -2105,7 +2107,7 @@ struct rtl_priv {
/*for bt coexist use*/
struct rtl_bt_coexist btcoexist;
- /* seperate 92ee from other ICs,
+ /* separate 92ee from other ICs,
* 92ee use new trx flow. */
bool use_new_trx_flow;
/*This must be the last item so
@@ -2120,57 +2122,57 @@ struct rtl_priv {
#define rtl_hal(rtlpriv) (&((rtlpriv)->rtlhal))
#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse))
#define rtl_psc(rtlpriv) (&((rtlpriv)->psc))
-#define rtl_sec(rtlpriv) (&((rtlpriv)->sec))
-#define rtl_dm(rtlpriv) (&((rtlpriv)->dm))
+#define rtl_sec(rtlpriv) (&((rtlpriv)->sec))
+#define rtl_dm(rtlpriv) (&((rtlpriv)->dm))
/***************************************
Bluetooth Co-existance Related
****************************************/
enum bt_ant_num {
- ANT_X2 = 0,
- ANT_X1 = 1,
+ ANT_X2 = 0,
+ ANT_X1 = 1,
};
enum bt_co_type {
- BT_2WIRE = 0,
- BT_ISSC_3WIRE = 1,
- BT_ACCEL = 2,
- BT_CSR_BC4 = 3,
- BT_CSR_BC8 = 4,
- BT_RTL8756 = 5,
- BT_RTL8723A = 6,
- BT_RTL8821A = 7,
- BT_RTL8723B = 8,
- BT_RTL8192E = 9,
- BT_RTL8812A = 11,
-};
-
-enum bt_total_ant_num{
+ BT_2WIRE = 0,
+ BT_ISSC_3WIRE = 1,
+ BT_ACCEL = 2,
+ BT_CSR_BC4 = 3,
+ BT_CSR_BC8 = 4,
+ BT_RTL8756 = 5,
+ BT_RTL8723A = 6,
+ BT_RTL8821A = 7,
+ BT_RTL8723B = 8,
+ BT_RTL8192E = 9,
+ BT_RTL8812A = 11,
+};
+
+enum bt_total_ant_num {
ANT_TOTAL_X2 = 0,
ANT_TOTAL_X1 = 1
};
enum bt_cur_state {
- BT_OFF = 0,
- BT_ON = 1,
+ BT_OFF = 0,
+ BT_ON = 1,
};
enum bt_service_type {
- BT_SCO = 0,
- BT_A2DP = 1,
- BT_HID = 2,
- BT_HID_IDLE = 3,
- BT_SCAN = 4,
- BT_IDLE = 5,
- BT_OTHER_ACTION = 6,
- BT_BUSY = 7,
- BT_OTHERBUSY = 8,
- BT_PAN = 9,
+ BT_SCO = 0,
+ BT_A2DP = 1,
+ BT_HID = 2,
+ BT_HID_IDLE = 3,
+ BT_SCAN = 4,
+ BT_IDLE = 5,
+ BT_OTHER_ACTION = 6,
+ BT_BUSY = 7,
+ BT_OTHERBUSY = 8,
+ BT_PAN = 9,
};
enum bt_radio_shared {
- BT_RADIO_SHARED = 0,
- BT_RADIO_INDIVIDUAL = 1,
+ BT_RADIO_SHARED = 0,
+ BT_RADIO_INDIVIDUAL = 1,
};
struct bt_coexist_info {
@@ -2255,11 +2257,11 @@ struct bt_coexist_info {
/* Write data to memory */
#define WRITEEF1BYTE(_ptr, _val) \
- (*((u8 *)(_ptr)))=EF1BYTE(_val)
+ ((*((u8 *)(_ptr))) = EF1BYTE(_val))
#define WRITEEF2BYTE(_ptr, _val) \
- (*((u16 *)(_ptr)))=EF2BYTE(_val)
+ ((*((u16 *)(_ptr))) = EF2BYTE(_val))
#define WRITEEF4BYTE(_ptr, _val) \
- (*((u32 *)(_ptr)))=EF4BYTE(_val)
+ ((*((u32 *)(_ptr))) = EF4BYTE(_val))
/*Example:
BIT_LEN_MASK_32(0) => 0x00000000
@@ -2298,17 +2300,17 @@ Translate subfield (continuous bits in little-endian) of 4-byte
value to host byte ordering.*/
#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
( \
- ( LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset) ) & \
+ (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
BIT_LEN_MASK_32(__bitlen) \
)
#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
( \
- ( LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset) ) & \
+ (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
BIT_LEN_MASK_16(__bitlen) \
)
#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
( \
- ( LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset) ) & \
+ (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
BIT_LEN_MASK_8(__bitlen) \
)
@@ -2318,17 +2320,17 @@ and return the result in 4-byte value in host byte ordering.*/
#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
( \
LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
- ( ~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) ) \
+ (~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen)) \
)
#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
( \
LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
- ( ~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) ) \
+ (~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen)) \
)
#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
( \
LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
- ( ~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) ) \
+ (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
)
/*Description:
@@ -2337,19 +2339,19 @@ Set subfield of little-endian 4-byte value to specified value. */
*((u32 *)(__pstart)) = EF4BYTE \
( \
LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
- ( (((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset) )\
+ ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset))\
);
#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
*((u16 *)(__pstart)) = EF2BYTE \
( \
LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
- ( (((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset) )\
+ ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset))\
);
#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
*((u8 *)(__pstart)) = EF1BYTE \
( \
LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
- ( (((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset) ) \
+ ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
);
#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
@@ -2359,18 +2361,18 @@ Set subfield of little-endian 4-byte value to specified value. */
mem access macro define end
****************************************/
-#define byte(x,n) ((x >> (8 * n)) & 0xff)
+#define byte(x, n) ((x >> (8 * n)) & 0xff)
#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC)
-#define RTL_WATCH_DOG_TIME 2000
+#define RTL_WATCH_DOG_TIME 2000
#define MSECS(t) msecs_to_jiffies(t)
-#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS)
-#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
-#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
-#define WLAN_FC_MORE_DATA(fc) ((fc) & IEEE80211_FCTL_MOREDATA)
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS)
+#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
+#define WLAN_FC_MORE_DATA(fc) ((fc) & IEEE80211_FCTL_MOREDATA)
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
#define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */
#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */
@@ -2383,7 +2385,7 @@ Set subfield of little-endian 4-byte value to specified value. */
#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6)
/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/
#define RT_PS_LEVEL_ASPM BIT(7)
-/*When LPS is on, disable 2R if no packet is received or transmittd.*/
+/*When LPS is on, disable 2R if no packet is received or transmitted.*/
#define RT_RF_LPS_DISALBE_2R BIT(30)
#define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */
#define RT_IN_PS_LEVEL(ppsc, _ps_flg) \
@@ -2397,13 +2399,13 @@ Set subfield of little-endian 4-byte value to specified value. */
container_of(container_of(x, struct delayed_work, work), y, z)
#define FILL_OCTET_STRING(_os,_octet,_len) \
- (_os).octet=(u8*)(_octet); \
- (_os).length=(_len);
+ (_os).octet = (u8 *)(_octet); \
+ (_os).length = (_len);
#define CP_MACADDR(des,src) \
- ((des)[0]=(src)[0],(des)[1]=(src)[1],\
- (des)[2]=(src)[2],(des)[3]=(src)[3],\
- (des)[4]=(src)[4],(des)[5]=(src)[5])
+ ((des)[0] = (src)[0],(des)[1] = (src)[1],\
+ (des)[2] = (src)[2],(des)[3] = (src)[3],\
+ (des)[4] = (src)[4],(des)[5] = (src)[5])
static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
{
diff --git a/drivers/staging/rts5139/ms.c b/drivers/staging/rts5139/ms.c
index 9253f6ab2e08..390292ad4566 100644
--- a/drivers/staging/rts5139/ms.c
+++ b/drivers/staging/rts5139/ms.c
@@ -1033,8 +1033,7 @@ static int ms_read_attribute_info(struct rts51x_chip *chip)
((u32) buf[cur_addr_off + 5] << 16) |
((u32) buf[cur_addr_off + 6] << 8) |
buf[cur_addr_off + 7];
- RTS51X_DEBUGP("sys_info_addr = 0x%x,"
- "sys_info_size = 0x%x\n",
+ RTS51X_DEBUGP("sys_info_addr = 0x%x, sys_info_size = 0x%x\n",
sys_info_addr, sys_info_size);
if (sys_info_size != 96) {
kfree(buf);
@@ -1069,8 +1068,7 @@ static int ms_read_attribute_info(struct rts51x_chip *chip)
((u32) buf[cur_addr_off + 5] << 16) |
((u32) buf[cur_addr_off + 6] << 8) |
buf[cur_addr_off + 7];
- RTS51X_DEBUGP("model_name_addr = 0x%x,"
- "model_name_size = 0x%x\n",
+ RTS51X_DEBUGP("model_name_addr = 0x%x, model_name_size = 0x%x\n",
model_name_addr, model_name_size);
if (model_name_size != 48) {
kfree(buf);
@@ -1751,8 +1749,7 @@ static int ms_copy_page(struct rts51x_chip *chip, u16 old_blk, u16 new_blk,
retval = ms_read_status_reg(chip);
if (retval != STATUS_SUCCESS) {
uncorrect_flag = 1;
- RTS51X_DEBUGP("Uncorrectable"
- "error\n");
+ RTS51X_DEBUGP("Uncorrectable error\n");
} else {
uncorrect_flag = 0;
}
@@ -1770,8 +1767,7 @@ static int ms_copy_page(struct rts51x_chip *chip, u16 old_blk, u16 new_blk,
ms_write_extra_data(chip, old_blk, i,
extra,
MS_EXTRA_SIZE);
- RTS51X_DEBUGP("page %d :"
- "extra[0] = 0x%x\n",
+ RTS51X_DEBUGP("page %d : extra[0] = 0x%x\n",
i, extra[0]);
MS_SET_BAD_BLOCK_FLG(ms_card);
@@ -1932,8 +1928,7 @@ static int ms_auto_copy_page(struct rts51x_chip *chip, u16 old_blk, u16 new_blk,
u8 page_len, bus_width, val = 0;
u8 extra[MS_EXTRA_SIZE];
- RTS51X_DEBUGP("Auto copy page from 0x%x to 0x%x,"
- "logical block is 0x%x\n",
+ RTS51X_DEBUGP("Auto copy page from 0x%x to 0x%x, logical block is 0x%x\n",
old_blk, new_blk, log_blk);
RTS51X_DEBUGP("start_page = %d, end_page = %d\n", start_page,
end_page);
@@ -2555,8 +2550,7 @@ static int ms_build_l2p_tbl(struct rts51x_chip *chip, int seg_no)
for (log_blk = 0; log_blk < 494; log_blk++) {
tmp_blk = segment->l2p_table[log_blk];
if (tmp_blk < ms_card->boot_block) {
- RTS51X_DEBUGP("Boot block is not the first"
- "normal block.\n");
+ RTS51X_DEBUGP("Boot block is not the first normal block.\n");
if (chip->card_wp & MS_CARD)
break;
@@ -3976,8 +3970,7 @@ static int rts51x_ms_rw_multi_sector(struct scsi_cmnd *srb, struct rts51x_chip *
end_page = start_page + (u8) total_sec_cnt;
page_cnt = end_page - start_page;
- RTS51X_DEBUGP("start_page = %d, end_page = %d,"
- "page_cnt = %d\n",
+ RTS51X_DEBUGP("start_page = %d, end_page = %d, page_cnt = %d\n",
start_page, end_page, page_cnt);
if (srb->sc_data_direction == DMA_FROM_DEVICE)
diff --git a/drivers/staging/rts5139/ms_mg.c b/drivers/staging/rts5139/ms_mg.c
index 54cfd85259a9..00862c1a36db 100644
--- a/drivers/staging/rts5139/ms_mg.c
+++ b/drivers/staging/rts5139/ms_mg.c
@@ -35,6 +35,7 @@
#include "rts51x_scsi.h"
#include "rts51x_card.h"
#include "ms.h"
+#include "ms_mg.h"
#ifdef SUPPORT_MAGIC_GATE
diff --git a/drivers/staging/rts5139/rts51x.c b/drivers/staging/rts5139/rts51x.c
index a8d2d046b44f..c8d06d4fddd2 100644
--- a/drivers/staging/rts5139/rts51x.c
+++ b/drivers/staging/rts5139/rts51x.c
@@ -215,7 +215,7 @@ void rts51x_try_to_exit_ss(struct rts51x_chip *chip)
* a USB port reset, whether from this driver or a different one.
*/
-int rts51x_pre_reset(struct usb_interface *iface)
+static int rts51x_pre_reset(struct usb_interface *iface)
{
struct rts51x_chip *chip = usb_get_intfdata(iface);
@@ -226,7 +226,7 @@ int rts51x_pre_reset(struct usb_interface *iface)
return 0;
}
-int rts51x_post_reset(struct usb_interface *iface)
+static int rts51x_post_reset(struct usb_interface *iface)
{
struct rts51x_chip *chip = usb_get_intfdata(iface);
@@ -832,7 +832,7 @@ static void rts51x_disconnect(struct usb_interface *intf)
* Initialization and registration
***********************************************************************/
-struct usb_device_id rts5139_usb_ids[] = {
+static struct usb_device_id rts5139_usb_ids[] = {
{USB_DEVICE(0x0BDA, 0x0139)},
{USB_DEVICE(0x0BDA, 0x0129)},
{} /* Terminating entry */
diff --git a/drivers/staging/rts5139/rts51x_fop.c b/drivers/staging/rts5139/rts51x_fop.c
index dee7d8af564e..677d18b3dcd5 100644
--- a/drivers/staging/rts5139/rts51x_fop.c
+++ b/drivers/staging/rts5139/rts51x_fop.c
@@ -93,7 +93,7 @@ static int rts51x_sd_direct_cmnd(struct rts51x_chip *chip,
}
retval =
- copy_to_user((void *)cmnd->buf, (void *)buf, cmnd->buf_len);
+ copy_to_user(cmnd->buf, (void *)buf, cmnd->buf_len);
if (retval) {
kfree(buf);
TRACE_RET(chip, STATUS_NOMEM);
@@ -109,7 +109,7 @@ static int rts51x_sd_direct_cmnd(struct rts51x_chip *chip,
TRACE_RET(chip, STATUS_NOMEM);
retval =
- copy_from_user((void *)buf, (void *)cmnd->buf,
+ copy_from_user((void *)buf, cmnd->buf,
cmnd->buf_len);
if (retval) {
kfree(buf);
@@ -154,7 +154,7 @@ static int rts51x_sd_get_rsp(struct rts51x_chip *chip, struct sd_rsp *rsp)
else
count = (rsp->rsp_len < 6) ? rsp->rsp_len : 6;
- retval = copy_to_user((void *)rsp->rsp, (void *)sd_card->rsp, count);
+ retval = copy_to_user(rsp->rsp, (void *)sd_card->rsp, count);
if (retval)
TRACE_RET(chip, STATUS_NOMEM);
@@ -250,7 +250,7 @@ long rts51x_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RTS5139_IOC_SD_DIRECT:
retval =
- copy_from_user((void *)&cmnd, (void *)arg,
+ copy_from_user((void *)&cmnd, (void __user *)arg,
sizeof(struct sd_direct_cmnd));
if (retval) {
retval = -ENOMEM;
@@ -265,7 +265,7 @@ long rts51x_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case RTS5139_IOC_SD_GET_RSP:
retval =
- copy_from_user((void *)&rsp, (void *)arg,
+ copy_from_user(&rsp, (void __user *)arg,
sizeof(struct sd_rsp));
if (retval) {
retval = -ENOMEM;
diff --git a/drivers/staging/rts5139/rts51x_fop.h b/drivers/staging/rts5139/rts51x_fop.h
index eb45acf50d1a..c691ee99720e 100644
--- a/drivers/staging/rts5139/rts51x_fop.h
+++ b/drivers/staging/rts5139/rts51x_fop.h
@@ -35,12 +35,12 @@
struct sd_direct_cmnd {
u8 cmnd[12];
- void *buf;
+ void __user *buf;
int buf_len;
};
struct sd_rsp {
- void *rsp;
+ void __user *rsp;
int rsp_len;
};
diff --git a/drivers/staging/rts5139/rts51x_scsi.c b/drivers/staging/rts5139/rts51x_scsi.c
index 3a990253c780..75282fea38f7 100644
--- a/drivers/staging/rts5139/rts51x_scsi.c
+++ b/drivers/staging/rts5139/rts51x_scsi.c
@@ -441,7 +441,7 @@ static int test_unit_ready(struct scsi_cmnd *srb, struct rts51x_chip *chip)
return TRANSPORT_GOOD;
}
-unsigned char formatter_inquiry_str[20] = {
+static unsigned char formatter_inquiry_str[20] = {
'M', 'E', 'M', 'O', 'R', 'Y', 'S', 'T', 'I', 'C', 'K',
'-', 'M', 'G', /* Byte[47:49] */
0x0B, /* Byte[50]: MG, MS, MSPro, MSXC */
@@ -1990,7 +1990,7 @@ static int show_info(struct seq_file *m, struct Scsi_Host *host)
/* queue a command */
/* This is always called with scsi_lock(host) held */
-int queuecommand_lck(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
+static int queuecommand_lck(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
{
struct rts51x_chip *chip = host_to_rts51x(srb->device->host);
diff --git a/drivers/staging/rts5139/rts51x_transport.c b/drivers/staging/rts5139/rts51x_transport.c
index c172f4ae7c23..74588d249b32 100644
--- a/drivers/staging/rts5139/rts51x_transport.c
+++ b/drivers/staging/rts5139/rts51x_transport.c
@@ -646,9 +646,9 @@ int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status)
chip->usb->intr_urb->actual_length);
}
-u8 media_not_present[] = {
+static u8 media_not_present[] = {
0x70, 0, 0x02, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0 };
-u8 invalid_cmd_field[] = {
+static u8 invalid_cmd_field[] = {
0x70, 0, 0x05, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0 };
void rts51x_invoke_transport(struct scsi_cmnd *srb, struct rts51x_chip *chip)
diff --git a/drivers/staging/rts5139/sd.c b/drivers/staging/rts5139/sd.c
index 4283b0917f24..da5a9b8fb69c 100644
--- a/drivers/staging/rts5139/sd.c
+++ b/drivers/staging/rts5139/sd.c
@@ -1529,7 +1529,7 @@ static int sd_tuning_rx(struct rts51x_chip *chip)
int i, j;
u32 raw_phase_map[3], phase_map;
u8 final_phase;
- int (*tuning_cmd) (struct rts51x_chip *chip, u8 sample_point);
+ int (*tuning_cmd)(struct rts51x_chip *chip, u8 sample_point);
if (CHK_SD(sd_card)) {
if (CHK_SD_DDR50(sd_card))
@@ -1627,7 +1627,7 @@ static int sd_tuning_tx(struct rts51x_chip *chip)
int i, j;
u32 raw_phase_map[3], phase_map;
u8 final_phase;
- int (*tuning_cmd) (struct rts51x_chip *chip, u8 sample_point);
+ int (*tuning_cmd)(struct rts51x_chip *chip, u8 sample_point);
if (CHK_SD(sd_card)) {
if (CHK_SD_DDR50(sd_card))
diff --git a/drivers/staging/rts5139/sd_cprm.c b/drivers/staging/rts5139/sd_cprm.c
index d4689839e15a..cede6c07394f 100644
--- a/drivers/staging/rts5139/sd_cprm.c
+++ b/drivers/staging/rts5139/sd_cprm.c
@@ -35,6 +35,7 @@
#include "rts51x_scsi.h"
#include "rts51x_card.h"
#include "rts51x_chip.h"
+#include "sd_cprm.h"
#include "sd.h"
#ifdef SUPPORT_CPRM
diff --git a/drivers/staging/rts5139/xd.c b/drivers/staging/rts5139/xd.c
index 10fea7e16ace..be432351be86 100644
--- a/drivers/staging/rts5139/xd.c
+++ b/drivers/staging/rts5139/xd.c
@@ -473,7 +473,8 @@ static int reset_xd(struct rts51x_chip *chip)
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF,
XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP *
(2 + i + chip->option.rts51x_xd_rw_step)
- + XD_TIME_RWN_STEP * (i + chip->option.rts51x_xd_rwn_step));
+ + XD_TIME_RWN_STEP *
+ (i + chip->option.rts51x_xd_rwn_step));
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF,
XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP * (4 +
i) + XD_TIME_RWN_STEP * (3 + i));
@@ -1526,8 +1527,8 @@ static int xd_read_multiple_pages(struct rts51x_chip *chip, u32 phy_blk,
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
- rts51x_trans_dma_enable(chip->srb->sc_data_direction, chip, page_cnt * 512,
- DMA_512);
+ rts51x_trans_dma_enable(chip->srb->sc_data_direction, chip,
+ page_cnt * 512, DMA_512);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_READ_PAGES);
@@ -1745,8 +1746,8 @@ static int xd_write_multiple_pages(struct rts51x_chip *chip, u32 old_blk,
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
RING_BUFFER);
- rts51x_trans_dma_enable(chip->srb->sc_data_direction, chip, page_cnt * 512,
- DMA_512);
+ rts51x_trans_dma_enable(chip->srb->sc_data_direction, chip,
+ page_cnt * 512, DMA_512);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_WRITE_PAGES);
@@ -1842,8 +1843,8 @@ static int xd_delay_write(struct rts51x_chip *chip)
return STATUS_SUCCESS;
}
-int rts51x_xd_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sector,
- u16 sector_cnt)
+int rts51x_xd_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip,
+ u32 start_sector, u16 sector_cnt)
{
struct xd_info *xd_card = &(chip->xd_card);
unsigned int lun = SCSI_LUN(srb);
@@ -1883,7 +1884,8 @@ int rts51x_xd_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sect
retval = xd_build_l2p_tbl(chip, zone_no);
if (retval != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
- rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ rts51x_set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, retval);
}
}
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index edf979f18a6c..d22916a4b9d8 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -259,7 +259,7 @@ static int ms_read_bytes(struct rtsx_chip *chip,
MS_TRANSFER_END, MS_TRANSFER_END);
for (i = 0; i < data_len - 1; i++)
- rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
if (data_len % 2)
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len, 0, 0);
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 8586ac5d2144..d2d1345fb06c 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -1031,8 +1031,10 @@ static void rtsx_remove(struct pci_dev *pci)
/* PCI IDs */
static DEFINE_PCI_DEVICE_TABLE(rtsx_ids) = {
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5208), PCI_CLASS_OTHERS << 16, 0xFF0000 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5288), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5208),
+ PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5288),
+ PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, },
};
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index 3055eb10c076..01aeb01bebe5 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -105,8 +105,10 @@ void try_to_switch_sdio_ctrl(struct rtsx_chip *chip)
RTSX_DEBUGP("reg 0xFF34: 0x%x, reg 0xFF38: 0x%x\n", reg1, reg2);
if ((reg1 & 0xC0) && (reg2 & 0xC0)) {
chip->sd_int = 1;
- rtsx_write_register(chip, SDIO_CTRL, 0xFF, SDIO_BUS_CTRL | SDIO_CD_CTRL);
- rtsx_write_register(chip, PWR_GATE_CTRL, LDO3318_PWR_MASK, LDO_ON);
+ rtsx_write_register(chip, SDIO_CTRL, 0xFF,
+ SDIO_BUS_CTRL | SDIO_CD_CTRL);
+ rtsx_write_register(chip, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, LDO_ON);
}
}
@@ -452,7 +454,8 @@ void rtsx_reinit_cards(struct rtsx_chip *chip, int reset_chip)
}
#ifdef DISABLE_CARD_INT
-void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset, unsigned long *need_release)
+void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset,
+ unsigned long *need_release)
{
u8 release_map = 0, reset_map = 0;
@@ -504,11 +507,14 @@ void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset, unsigne
}
reset_map = 0;
- if (!(chip->card_exist & XD_CARD) && (xd_cnt > (DEBOUNCE_CNT-1)))
+ if (!(chip->card_exist & XD_CARD) &&
+ (xd_cnt > (DEBOUNCE_CNT-1)))
reset_map |= XD_CARD;
- if (!(chip->card_exist & SD_CARD) && (sd_cnt > (DEBOUNCE_CNT-1)))
+ if (!(chip->card_exist & SD_CARD) &&
+ (sd_cnt > (DEBOUNCE_CNT-1)))
reset_map |= SD_CARD;
- if (!(chip->card_exist & MS_CARD) && (ms_cnt > (DEBOUNCE_CNT-1)))
+ if (!(chip->card_exist & MS_CARD) &&
+ (ms_cnt > (DEBOUNCE_CNT-1)))
reset_map |= MS_CARD;
}
@@ -549,7 +555,8 @@ void rtsx_init_cards(struct rtsx_chip *chip)
if (!(chip->card_exist & MS_CARD))
clear_bit(MS_NR, &(chip->need_release));
- RTSX_DEBUGP("chip->need_release = 0x%x\n", (unsigned int)(chip->need_release));
+ RTSX_DEBUGP("chip->need_release = 0x%x\n",
+ (unsigned int)(chip->need_release));
#ifdef SUPPORT_OCP
if (chip->need_release) {
@@ -588,8 +595,10 @@ void rtsx_init_cards(struct rtsx_chip *chip)
release_xd_card(chip);
- if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN))
- rtsx_write_register(chip, HOST_SLEEP_STATE, 0xC0, 0xC0);
+ if (CHECK_PID(chip, 0x5288) &&
+ CHECK_BARO_PKG(chip, QFN))
+ rtsx_write_register(chip, HOST_SLEEP_STATE,
+ 0xC0, 0xC0);
}
if (chip->need_release & MS_CARD) {
@@ -610,13 +619,15 @@ void rtsx_init_cards(struct rtsx_chip *chip)
}
if (chip->need_reset) {
- RTSX_DEBUGP("chip->need_reset = 0x%x\n", (unsigned int)(chip->need_reset));
+ RTSX_DEBUGP("chip->need_reset = 0x%x\n",
+ (unsigned int)(chip->need_reset));
rtsx_reset_cards(chip);
}
if (chip->need_reinit) {
- RTSX_DEBUGP("chip->need_reinit = 0x%x\n", (unsigned int)(chip->need_reinit));
+ RTSX_DEBUGP("chip->need_reinit = 0x%x\n",
+ (unsigned int)(chip->need_reinit));
rtsx_reinit_cards(chip, 0);
}
@@ -624,7 +635,7 @@ void rtsx_init_cards(struct rtsx_chip *chip)
static inline u8 double_depth(u8 depth)
{
- return ((depth > 1) ? (depth - 1) : depth);
+ return (depth > 1) ? (depth - 1) : depth;
}
int switch_ssc_clock(struct rtsx_chip *chip, int clk)
@@ -641,7 +652,8 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
max_N = 120;
max_div = CLK_DIV_4;
- RTSX_DEBUGP("Switch SSC clock to %dMHz (cur_clk = %d)\n", clk, chip->cur_clk);
+ RTSX_DEBUGP("Switch SSC clock to %dMHz (cur_clk = %d)\n",
+ clk, chip->cur_clk);
if ((clk <= 2) || (N > max_N))
TRACE_RET(chip, STATUS_FAIL);
@@ -676,8 +688,10 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, N);
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
if (sd_vpclk_phase_reset) {
- rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
- rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
}
retval = rtsx_send_cmd(chip, 0, WAIT_TIME);
@@ -786,8 +800,10 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk)
if (sd_vpclk_phase_reset) {
udelay(200);
- RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
- RTSX_WRITE_REG(chip, SD_VPCLK1_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
+ RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET,
+ PHASE_NOT_RESET);
+ RTSX_WRITE_REG(chip, SD_VPCLK1_CTL, PHASE_NOT_RESET,
+ PHASE_NOT_RESET);
udelay(200);
}
RTSX_WRITE_REG(chip, CLK_CTL, 0xFF, 0);
@@ -797,7 +813,8 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk)
return STATUS_SUCCESS;
}
-void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size)
+void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip,
+ u32 byte_cnt, u8 pack_size)
{
if (pack_size > DMA_1024)
pack_size = DMA_512;
@@ -810,10 +827,12 @@ void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip, u32 b
rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC0, 0xFF, (u8)byte_cnt);
if (dir == DMA_FROM_DEVICE) {
- rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL, 0x03 | DMA_PACK_SIZE_MASK,
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
DMA_DIR_FROM_CARD | DMA_EN | pack_size);
} else {
- rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL, 0x03 | DMA_PACK_SIZE_MASK,
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
DMA_DIR_TO_CARD | DMA_EN | pack_size);
}
@@ -903,7 +922,8 @@ int card_power_off(struct rtsx_chip *chip, u8 card)
return STATUS_SUCCESS;
}
-int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 sec_addr, u16 sec_cnt)
+int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 sec_addr, u16 sec_cnt)
{
int retval;
unsigned int lun = SCSI_LUN(srb);
@@ -921,7 +941,8 @@ int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 sec_addr, u16 sec
rtsx_release_chip(chip);
TRACE_RET(chip, STATUS_FAIL);
}
- if (detect_card_cd(chip, chip->cur_card) != STATUS_SUCCESS)
+ if (detect_card_cd(chip, chip->cur_card) !=
+ STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
if (!chip->rw_need_retry) {
@@ -1016,7 +1037,8 @@ void toggle_gpio(struct rtsx_chip *chip, u8 gpio)
void turn_on_led(struct rtsx_chip *chip, u8 gpio)
{
if (CHECK_PID(chip, 0x5288))
- rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), (u8)(1 << gpio));
+ rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
+ (u8)(1 << gpio));
else
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
}
@@ -1026,7 +1048,8 @@ void turn_off_led(struct rtsx_chip *chip, u8 gpio)
if (CHECK_PID(chip, 0x5288))
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
else
- rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), (u8)(1 << gpio));
+ rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
+ (u8)(1 << gpio));
}
int detect_card_cd(struct rtsx_chip *chip, int card)
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index 382e73a54f4d..bbfa665c5c99 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -2867,7 +2867,7 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
- if ((get_lun_card(chip, lun) != MS_CARD)) {
+ if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
@@ -2983,7 +2983,7 @@ static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
- if ((get_lun_card(chip, lun) != SD_CARD)) {
+ if (get_lun_card(chip, lun) != SD_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
@@ -3046,7 +3046,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
- if ((get_lun_card(chip, lun) != MS_CARD)) {
+ if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
@@ -3151,7 +3151,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
- if ((get_lun_card(chip, lun) != MS_CARD)) {
+ if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 97b7b012983e..694d3834962c 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -625,8 +625,8 @@ out:
return err;
}
-static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
- enum dma_data_direction dma_dir, int timeout)
+static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
+ size_t len, enum dma_data_direction dma_dir, int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
diff --git a/drivers/staging/sb105x/Kconfig b/drivers/staging/sb105x/Kconfig
deleted file mode 100644
index 245e7847a354..000000000000
--- a/drivers/staging/sb105x/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-config SB105X
- tristate "SystemBase PCI Multiport UART"
- select SERIAL_CORE
- depends on PCI && X86 && TTY && BROKEN
- help
- A driver for the SystemBase Multi-2/PCI serial card
-
- To compile this driver a module, choose M here: the module
- will be called "sb105x".
diff --git a/drivers/staging/sb105x/Makefile b/drivers/staging/sb105x/Makefile
deleted file mode 100644
index b1bf3779acae..000000000000
--- a/drivers/staging/sb105x/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_SB105X) += sb105x.o
-
-sb105x-y := sb_pci_mp.o
diff --git a/drivers/staging/sb105x/sb_mp_register.h b/drivers/staging/sb105x/sb_mp_register.h
deleted file mode 100644
index 276c1bbcc18d..000000000000
--- a/drivers/staging/sb105x/sb_mp_register.h
+++ /dev/null
@@ -1,295 +0,0 @@
-
-/*
- * SB105X_UART.h
- *
- * Copyright (C) 2008 systembase
- *
- * UART registers.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef UART_SB105X_H
-#define UART_SB105X_H
-
-/*
- * option register
- */
-
-/* Device Information Register */
-#define MP_OPTR_DIR0 0x04 /* port0 ~ port8 */
-#define MP_OPTR_DIR1 0x05 /* port8 ~ port15 */
-#define MP_OPTR_DIR2 0x06 /* port16 ~ port23 */
-#define MP_OPTR_DIR3 0x07 /* port24 ~ port31 */
-
-#define DIR_UART_16C550 0
-#define DIR_UART_16C1050 1
-#define DIR_UART_16C1050A 2
-
-#define DIR_CLK_1843200 0x0 /* input clock 1843200 Hz */
-#define DIR_CLK_3686400 0x1 /* input clock 3686400 Hz */
-#define DIR_CLK_7372800 0x2 /* input clock 7372800 Hz */
-#define DIR_CLK_14745600 0x3 /* input clock 14745600 Hz */
-#define DIR_CLK_29491200 0x4 /* input clock 29491200 Hz */
-#define DIR_CLK_58985400 0x5 /* input clock 58985400 Hz */
-
-/* Interface Information Register */
-#define MP_OPTR_IIR0 0x08 /* port0 ~ port8 */
-#define MP_OPTR_IIR1 0x09 /* port8 ~ port15 */
-#define MP_OPTR_IIR2 0x0A /* port16 ~ port23 */
-#define MP_OPTR_IIR3 0x0B /* port24 ~ port31 */
-
-#define IIR_RS232 0x00 /* RS232 type */
-#define IIR_RS422 0x10 /* RS422 type */
-#define IIR_RS485 0x20 /* RS485 type */
-#define IIR_TYPE_MASK 0x30
-
-/* Interrupt Mask Register */
-#define MP_OPTR_IMR0 0x0C /* port0 ~ port8 */
-#define MP_OPTR_IMR1 0x0D /* port8 ~ port15 */
-#define MP_OPTR_IMR2 0x0E /* port16 ~ port23 */
-#define MP_OPTR_IMR3 0x0F /* port24 ~ port31 */
-
-/* Interrupt Poll Register */
-#define MP_OPTR_IPR0 0x10 /* port0 ~ port8 */
-#define MP_OPTR_IPR1 0x11 /* port8 ~ port15 */
-#define MP_OPTR_IPR2 0x12 /* port16 ~ port23 */
-#define MP_OPTR_IPR3 0x13 /* port24 ~ port31 */
-
-/* General Purpose Output Control Register */
-#define MP_OPTR_GPOCR 0x20
-
-/* General Purpose Output Data Register */
-#define MP_OPTR_GPODR 0x21
-
-/* Parallel Additional Function Register */
-#define MP_OPTR_PAFR 0x23
-
-/*
- * systembase 16c105x UART register
- */
-
-#define PAGE_0 0
-#define PAGE_1 1
-#define PAGE_2 2
-#define PAGE_3 3
-#define PAGE_4 4
-
-/*
- * ******************************************************************
- * * DLAB=0 =============== Page 0 Registers *
- * ******************************************************************
- */
-
-#define SB105X_RX 0 /* In: Receive buffer */
-#define SB105X_TX 0 /* Out: Transmit buffer */
-
-#define SB105X_IER 1 /* Out: Interrupt Enable Register */
-
-#define SB105X_IER_CTSI 0x80 /* CTS# Interrupt Enable (Requires EFR[4] = 1) */
-#define SB105X_IER_RTSI 0x40 /* RTS# Interrupt Enable (Requires EFR[4] = 1) */
-#define SB105X_IER_XOI 0x20 /* Xoff Interrupt Enable (Requires EFR[4] = 1) */
-#define SB105X_IER_SME 0x10 /* Sleep Mode Enable (Requires EFR[4] = 1) */
-#define SB105X_IER_MSI 0x08 /* Enable Modem status interrupt */
-#define SB105X_IER_RLSI 0x04 /* Enable receiver line status interrupt */
-#define SB105X_IER_THRI 0x02 /* Enable Transmitter holding register int. */
-#define SB105X_IER_RDI 0x01 /* Enable receiver data interrupt */
-
-#define SB105X_ISR 2 /* In: Interrupt ID Register */
-
-#define SB105X_ISR_NOINT 0x01 /* No interrupts pending */
-#define SB105X_ISR_RLSI 0x06 /* Receiver line status interrupt (Priority = 1)*/
-#define SB105X_ISR_RDAI 0x0c /* Receive Data Available interrupt */
-#define SB105X_ISR_CTII 0x04 /* Character Timeout Indication interrupt */
-#define SB105X_ISR_THRI 0x02 /* Transmitter holding register empty */
-#define SB105X_ISR_MSI 0x00 /* Modem status interrupt */
-#define SB105X_ISR_RXCI 0x10 /* Receive Xoff or Special Character interrupt */
-#define SB105X_ISR_RCSI 0x20 /* RTS#, CTS# status interrupt during Auto RTS/CTS flow control */
-
-#define SB105X_FCR 2 /* Out: FIFO Control Register */
-
-#define SB105X_FCR_FEN 0x01 /* FIFO Enable */
-#define SB105X_FCR_RXFR 0x02 /* RX FIFO Reset */
-#define SB105X_FCR_TXFR 0x04 /* TX FIFO Reset */
-#define SB105X_FCR_DMS 0x08 /* DMA Mode Select */
-
-#define SB105X_FCR_RTR08 0x00 /* Receive Trigger Level set at 8 */
-#define SB105X_FCR_RTR16 0x40 /* Receive Trigger Level set at 16 */
-#define SB105X_FCR_RTR56 0x80 /* Receive Trigger Level set at 56 */
-#define SB105X_FCR_RTR60 0xc0 /* Receive Trigger Level set at 60 */
-#define SB105X_FCR_TTR08 0x00 /* Transmit Trigger Level set at 8 */
-#define SB105X_FCR_TTR16 0x10 /* Transmit Trigger Level set at 16 */
-#define SB105X_FCR_TTR32 0x20 /* Transmit Trigger Level set at 32 */
-#define SB105X_FCR_TTR56 0x30 /* Transmit Trigger Level set at 56 */
-
-#define SB105X_LCR 3 /* Out: Line Control Register */
-/*
- * * Note: if the word length is 5 bits (SB105X_LCR_WLEN5), then setting
- * * SB105X_LCR_STOP will select 1.5 stop bits, not 2 stop bits.
- */
-#define SB105X_LCR_DLAB 0x80 /* Divisor Latch Enable */
-#define SB105X_LCR_SBC 0x40 /* Break Enable*/
-#define SB105X_LCR_SPAR 0x20 /* Set Stick parity */
-#define SB105X_LCR_EPAR 0x10 /* Even parity select */
-#define SB105X_LCR_PAREN 0x08 /* Parity Enable */
-#define SB105X_LCR_STOP 0x04 /* Stop bits: 0->1 bit, 1->2 bits, 1 and SB105X_LCR_WLEN5 -> 1.5 bit */
-#define SB105X_LCR_WLEN5 0x00 /* Wordlength: 5 bits */
-#define SB105X_LCR_WLEN6 0x01 /* Wordlength: 6 bits */
-#define SB105X_LCR_WLEN7 0x02 /* Wordlength: 7 bits */
-#define SB105X_LCR_WLEN8 0x03 /* Wordlength: 8 bits */
-
-#define SB105X_LCR_BF 0xBF
-
-#define SB105X_MCR 4 /* Out: Modem Control Register */
-#define SB105X_MCR_CPS 0x80 /* Clock Prescaler Select */
-#define SB105X_MCR_P2S 0x40 /* Page 2 Select /Xoff Re-Transmit Access Enable */
-#define SB105X_MCR_XOA 0x20 /* Xon Any Enable */
-#define SB105X_MCR_ILB 0x10 /* Internal Loopback Enable */
-#define SB105X_MCR_OUT2 0x08 /* Out2/Interrupt Output Enable*/
-#define SB105X_MCR_OUT1 0x04 /* Out1/Interrupt Output Enable */
-#define SB105X_MCR_RTS 0x02 /* RTS# Output */
-#define SB105X_MCR_DTR 0x01 /* DTR# Output */
-
-#define SB105X_LSR 5 /* In: Line Status Register */
-#define SB105X_LSR_RFEI 0x80 /* Receive FIFO data error Indicator */
-#define SB105X_LSR_TEMI 0x40 /* THR and TSR Empty Indicator */
-#define SB105X_LSR_THRE 0x20 /* THR Empty Indicator */
-#define SB105X_LSR_BII 0x10 /* Break interrupt indicator */
-#define SB105X_LSR_FEI 0x08 /* Frame error indicator */
-#define SB105X_LSR_PEI 0x04 /* Parity error indicator */
-#define SB105X_LSR_OEI 0x02 /* Overrun error indicator */
-#define SB105X_LSR_RDRI 0x01 /* Receive data ready Indicator*/
-
-#define SB105X_MSR 6 /* In: Modem Status Register */
-#define SB105X_MSR_DCD 0x80 /* Data Carrier Detect */
-#define SB105X_MSR_RI 0x40 /* Ring Indicator */
-#define SB105X_MSR_DSR 0x20 /* Data Set Ready */
-#define SB105X_MSR_CTS 0x10 /* Clear to Send */
-#define SB105X_MSR_DDCD 0x08 /* Delta DCD */
-#define SB105X_MSR_DRI 0x04 /* Delta ring indicator */
-#define SB105X_MSR_DDSR 0x02 /* Delta DSR */
-#define SB105X_MSR_DCTS 0x01 /* Delta CTS */
-
-#define SB105XA_MDR 6 /* Out: Multi Drop mode Register */
-#define SB105XA_MDR_NPS 0x08 /* 9th Bit Polarity Select */
-#define SB105XA_MDR_AME 0x02 /* Auto Multi-drop Enable */
-#define SB105XA_MDR_MDE 0x01 /* Multi Drop Enable */
-
-#define SB105X_SPR 7 /* I/O: Scratch Register */
-
-/*
- * DLAB=1
- */
-#define SB105X_DLL 0 /* Out: Divisor Latch Low */
-#define SB105X_DLM 1 /* Out: Divisor Latch High */
-
-/*
- * ******************************************************************
- * * DLAB(LCR[7]) = 0 , MCR[6] = 1 ============= Page 2 Registers *
- * ******************************************************************
- */
-#define SB105X_GICR 1 /* Global Interrupt Control Register */
-#define SB105X_GICR_GIM 0x01 /* Global Interrupt Mask */
-
-#define SB105X_GISR 2 /* Global Interrupt Status Register */
-#define SB105X_GISR_MGICR0 0x80 /* Mirror the content of GICR[0] */
-#define SB105X_GISR_CS3IS 0x08 /* SB105X of CS3# Interrupt Status */
-#define SB105X_GISR_CS2IS 0x04 /* SB105X of CS2# Interrupt Status */
-#define SB105X_GISR_CS1IS 0x02 /* SB105X of CS1# Interrupt Status */
-#define SB105X_GISR_CS0IS 0x01 /* SB105X of CS0# Interrupt Status */
-
-#define SB105X_TFCR 5 /* Transmit FIFO Count Register */
-
-#define SB105X_RFCR 6 /* Receive FIFO Count Register */
-
-#define SB105X_FSR 7 /* Flow Control Status Register */
-#define SB105X_FSR_THFS 0x20 /* Transmit Hardware Flow Control Status */
-#define SB105X_FSR_TSFS 0x10 /* Transmit Software Flow Control Status */
-#define SB105X_FSR_RHFS 0x02 /* Receive Hardware Flow Control Status */
-#define SB105X_FSR_RSFS 0x01 /* Receive Software Flow Control Status */
-
-/*
- * ******************************************************************
- * * LCR = 0xBF, PSR[0] = 0 ============= Page 3 Registers *
- * ******************************************************************
- */
-
-#define SB105X_PSR 0 /* Page Select Register */
-#define SB105X_PSR_P3KEY 0xA4 /* Page 3 Select Key */
-#define SB105X_PSR_P4KEY 0xA5 /* Page 5 Select Key */
-
-#define SB105X_ATR 1 /* Auto Toggle Control Register */
-#define SB105X_ATR_RPS 0x80 /* RXEN Polarity Select */
-#define SB105X_ATR_RCMS 0x40 /* RXEN Control Mode Select */
-#define SB105X_ATR_TPS 0x20 /* TXEN Polarity Select */
-#define SB105X_ATR_TCMS 0x10 /* TXEN Control Mode Select */
-#define SB105X_ATR_ATDIS 0x00 /* Auto Toggle is disabled */
-#define SB105X_ATR_ART 0x01 /* RTS#/TXEN pin operates as TXEN */
-#define SB105X_ATR_ADT 0x02 /* DTR#/TXEN pin operates as TXEN */
-#define SB105X_ATR_A80 0x03 /* only in 80 pin use */
-
-#define SB105X_EFR 2 /* (Auto) Enhanced Feature Register */
-#define SB105X_EFR_ACTS 0x80 /* Auto-CTS Flow Control Enable */
-#define SB105X_EFR_ARTS 0x40 /* Auto-RTS Flow Control Enable */
-#define SB105X_EFR_SCD 0x20 /* Special Character Detect */
-#define SB105X_EFR_EFBEN 0x10 /* Enhanced Function Bits Enable */
-
-#define SB105X_XON1 4 /* Xon1 Character Register */
-#define SB105X_XON2 5 /* Xon2 Character Register */
-#define SB105X_XOFF1 6 /* Xoff1 Character Register */
-#define SB105X_XOFF2 7 /* Xoff2 Character Register */
-
-/*
- * ******************************************************************
- * * LCR = 0xBF, PSR[0] = 1 ============ Page 4 Registers *
- * ******************************************************************
- */
-
-#define SB105X_AFR 1 /* Additional Feature Register */
-#define SB105X_AFR_GIPS 0x20 /* Global Interrupt Polarity Select */
-#define SB105X_AFR_GIEN 0x10 /* Global Interrupt Enable */
-#define SB105X_AFR_AFEN 0x01 /* 256-byte FIFO Enable */
-
-#define SB105X_XRCR 2 /* Xoff Re-transmit Count Register */
-#define SB105X_XRCR_NRC1 0x00 /* Transmits Xoff Character whenever the number of received data is 1 during XOFF status */
-#define SB105X_XRCR_NRC4 0x01 /* Transmits Xoff Character whenever the number of received data is 4 during XOFF status */
-#define SB105X_XRCR_NRC8 0x02 /* Transmits Xoff Character whenever the number of received data is 8 during XOFF status */
-#define SB105X_XRCR_NRC16 0x03 /* Transmits Xoff Character whenever the number of received data is 16 during XOFF status */
-
-#define SB105X_TTR 4 /* Transmit FIFO Trigger Level Register */
-#define SB105X_RTR 5 /* Receive FIFO Trigger Level Register */
-#define SB105X_FUR 6 /* Flow Control Upper Threshold Register */
-#define SB105X_FLR 7 /* Flow Control Lower Threshold Register */
-
-
-/* page 0 */
-
-#define SB105X_GET_CHAR(port) inb((port)->iobase + SB105X_RX)
-#define SB105X_GET_IER(port) inb((port)->iobase + SB105X_IER)
-#define SB105X_GET_ISR(port) inb((port)->iobase + SB105X_ISR)
-#define SB105X_GET_LCR(port) inb((port)->iobase + SB105X_LCR)
-#define SB105X_GET_MCR(port) inb((port)->iobase + SB105X_MCR)
-#define SB105X_GET_LSR(port) inb((port)->iobase + SB105X_LSR)
-#define SB105X_GET_MSR(port) inb((port)->iobase + SB105X_MSR)
-#define SB105X_GET_SPR(port) inb((port)->iobase + SB105X_SPR)
-
-#define SB105X_PUT_CHAR(port,v) outb((v),(port)->iobase + SB105X_TX )
-#define SB105X_PUT_IER(port,v) outb((v),(port)->iobase + SB105X_IER )
-#define SB105X_PUT_FCR(port,v) outb((v),(port)->iobase + SB105X_FCR )
-#define SB105X_PUT_LCR(port,v) outb((v),(port)->iobase + SB105X_LCR )
-#define SB105X_PUT_MCR(port,v) outb((v),(port)->iobase + SB105X_MCR )
-#define SB105X_PUT_SPR(port,v) outb((v),(port)->iobase + SB105X_SPR )
-
-
-/* page 1 */
-#define SB105X_GET_REG(port,reg) inb((port)->iobase + (reg))
-#define SB105X_PUT_REG(port,reg,v) outb((v),(port)->iobase + (reg))
-
-/* page 2 */
-
-#define SB105X_PUT_PSR(port,v) outb((v),(port)->iobase + SB105X_PSR )
-
-#endif
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
deleted file mode 100644
index c9d6ee3903ad..000000000000
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ /dev/null
@@ -1,3189 +0,0 @@
-#include "sb_pci_mp.h"
-#include <linux/module.h>
-#include <linux/parport.h>
-
-extern struct parport *parport_pc_probe_port(unsigned long base_lo,
- unsigned long base_hi,
- int irq, int dma,
- struct device *dev,
- int irqflags);
-
-static struct mp_device_t mp_devs[MAX_MP_DEV];
-static int mp_nrpcibrds = sizeof(mp_pciboards)/sizeof(mppcibrd_t);
-static int NR_BOARD=0;
-static int NR_PORTS=0;
-static struct mp_port multi_ports[MAX_MP_PORT];
-static struct irq_info irq_lists[NR_IRQS];
-
-static _INLINE_ unsigned int serial_in(struct mp_port *mtpt, int offset);
-static _INLINE_ void serial_out(struct mp_port *mtpt, int offset, int value);
-static _INLINE_ unsigned int read_option_register(struct mp_port *mtpt, int offset);
-static int sb1054_get_register(struct sb_uart_port *port, int page, int reg);
-static int sb1054_set_register(struct sb_uart_port *port, int page, int reg, int value);
-static void SendATCommand(struct mp_port *mtpt);
-static int set_deep_fifo(struct sb_uart_port *port, int status);
-static int get_deep_fifo(struct sb_uart_port *port);
-static int get_device_type(int arg);
-static int set_auto_rts(struct sb_uart_port *port, int status);
-static void mp_stop(struct tty_struct *tty);
-static void __mp_start(struct tty_struct *tty);
-static void mp_start(struct tty_struct *tty);
-static void mp_tasklet_action(unsigned long data);
-static inline void mp_update_mctrl(struct sb_uart_port *port, unsigned int set, unsigned int clear);
-static int mp_startup(struct sb_uart_state *state, int init_hw);
-static void mp_shutdown(struct sb_uart_state *state);
-static void mp_change_speed(struct sb_uart_state *state, struct MP_TERMIOS *old_termios);
-
-static inline int __mp_put_char(struct sb_uart_port *port, struct circ_buf *circ, unsigned char c);
-static int mp_put_char(struct tty_struct *tty, unsigned char ch);
-
-static void mp_put_chars(struct tty_struct *tty);
-static int mp_write(struct tty_struct *tty, const unsigned char *buf, int count);
-static int mp_write_room(struct tty_struct *tty);
-static int mp_chars_in_buffer(struct tty_struct *tty);
-static void mp_flush_buffer(struct tty_struct *tty);
-static void mp_send_xchar(struct tty_struct *tty, char ch);
-static void mp_throttle(struct tty_struct *tty);
-static void mp_unthrottle(struct tty_struct *tty);
-static int mp_get_info(struct sb_uart_state *state, struct serial_struct *retinfo);
-static int mp_set_info(struct sb_uart_state *state, struct serial_struct *newinfo);
-static int mp_get_lsr_info(struct sb_uart_state *state, unsigned int *value);
-
-static int mp_tiocmget(struct tty_struct *tty);
-static int mp_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear);
-static int mp_break_ctl(struct tty_struct *tty, int break_state);
-static int mp_do_autoconfig(struct sb_uart_state *state);
-static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg);
-static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt);
-static int mp_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
-static void mp_set_termios(struct tty_struct *tty, struct MP_TERMIOS *old_termios);
-static void mp_close(struct tty_struct *tty, struct file *filp);
-static void mp_wait_until_sent(struct tty_struct *tty, int timeout);
-static void mp_hangup(struct tty_struct *tty);
-static void mp_update_termios(struct sb_uart_state *state);
-static int mp_block_til_ready(struct file *filp, struct sb_uart_state *state);
-static struct sb_uart_state *uart_get(struct uart_driver *drv, int line);
-static int mp_open(struct tty_struct *tty, struct file *filp);
-static const char *mp_type(struct sb_uart_port *port);
-static void mp_change_pm(struct sb_uart_state *state, int pm_state);
-static inline void mp_report_port(struct uart_driver *drv, struct sb_uart_port *port);
-static void mp_configure_port(struct uart_driver *drv, struct sb_uart_state *state, struct sb_uart_port *port);
-static void mp_unconfigure_port(struct uart_driver *drv, struct sb_uart_state *state);
-static int mp_register_driver(struct uart_driver *drv);
-static void mp_unregister_driver(struct uart_driver *drv);
-static int mp_add_one_port(struct uart_driver *drv, struct sb_uart_port *port);
-static int mp_remove_one_port(struct uart_driver *drv, struct sb_uart_port *port);
-static void autoconfig(struct mp_port *mtpt, unsigned int probeflags);
-static void autoconfig_irq(struct mp_port *mtpt);
-static void multi_stop_tx(struct sb_uart_port *port);
-static void multi_start_tx(struct sb_uart_port *port);
-static void multi_stop_rx(struct sb_uart_port *port);
-static void multi_enable_ms(struct sb_uart_port *port);
-static _INLINE_ void receive_chars(struct mp_port *mtpt, int *status );
-static _INLINE_ void transmit_chars(struct mp_port *mtpt);
-static _INLINE_ void check_modem_status(struct mp_port *mtpt);
-static inline void multi_handle_port(struct mp_port *mtpt);
-static irqreturn_t multi_interrupt(int irq, void *dev_id);
-static void serial_do_unlink(struct irq_info *i, struct mp_port *mtpt);
-static int serial_link_irq_chain(struct mp_port *mtpt);
-static void serial_unlink_irq_chain(struct mp_port *mtpt);
-static void multi_timeout(unsigned long data);
-static unsigned int multi_tx_empty(struct sb_uart_port *port);
-static unsigned int multi_get_mctrl(struct sb_uart_port *port);
-static void multi_set_mctrl(struct sb_uart_port *port, unsigned int mctrl);
-static void multi_break_ctl(struct sb_uart_port *port, int break_state);
-static int multi_startup(struct sb_uart_port *port);
-static void multi_shutdown(struct sb_uart_port *port);
-static unsigned int multi_get_divisor(struct sb_uart_port *port, unsigned int baud);
-static void multi_set_termios(struct sb_uart_port *port, struct MP_TERMIOS *termios, struct MP_TERMIOS *old);
-static void multi_pm(struct sb_uart_port *port, unsigned int state, unsigned int oldstate);
-static void multi_release_std_resource(struct mp_port *mtpt);
-static void multi_release_port(struct sb_uart_port *port);
-static int multi_request_port(struct sb_uart_port *port);
-static void multi_config_port(struct sb_uart_port *port, int flags);
-static int multi_verify_port(struct sb_uart_port *port, struct serial_struct *ser);
-static const char *multi_type(struct sb_uart_port *port);
-static void __init multi_init_ports(void);
-static void __init multi_register_ports(struct uart_driver *drv);
-static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd);
-
-static int deep[256];
-static int deep_count;
-static int fcr_arr[256];
-static int fcr_count;
-static int ttr[256];
-static int ttr_count;
-static int rtr[256];
-static int rtr_count;
-
-module_param_array(deep,int,&deep_count,0);
-module_param_array(fcr_arr,int,&fcr_count,0);
-module_param_array(ttr,int,&ttr_count,0);
-module_param_array(rtr,int,&rtr_count,0);
-
-static _INLINE_ unsigned int serial_in(struct mp_port *mtpt, int offset)
-{
- return inb(mtpt->port.iobase + offset);
-}
-
-static _INLINE_ void serial_out(struct mp_port *mtpt, int offset, int value)
-{
- outb(value, mtpt->port.iobase + offset);
-}
-
-static _INLINE_ unsigned int read_option_register(struct mp_port *mtpt, int offset)
-{
- return inb(mtpt->option_base_addr + offset);
-}
-
-static int sb1053a_get_interface(struct mp_port *mtpt, int port_num)
-{
- unsigned long option_base_addr = mtpt->option_base_addr;
- unsigned int interface = 0;
-
- switch (port_num)
- {
- case 0:
- case 1:
- /* set GPO[1:0] = 00 */
- outb(0x00, option_base_addr + MP_OPTR_GPODR);
- break;
- case 2:
- case 3:
- /* set GPO[1:0] = 01 */
- outb(0x01, option_base_addr + MP_OPTR_GPODR);
- break;
- case 4:
- case 5:
- /* set GPO[1:0] = 10 */
- outb(0x02, option_base_addr + MP_OPTR_GPODR);
- break;
- default:
- break;
- }
-
- port_num &= 0x1;
-
- /* get interface */
- interface = inb(option_base_addr + MP_OPTR_IIR0 + port_num);
-
- /* set GPO[1:0] = 11 */
- outb(0x03, option_base_addr + MP_OPTR_GPODR);
-
- return (interface);
-}
-
-static int sb1054_get_register(struct sb_uart_port *port, int page, int reg)
-{
- int ret = 0;
- unsigned int lcr = 0;
- unsigned int mcr = 0;
- unsigned int tmp = 0;
-
- if( page <= 0)
- {
- printk(" page 0 can not use this function\n");
- return -1;
- }
-
- switch(page)
- {
- case 1:
- lcr = SB105X_GET_LCR(port);
- tmp = lcr | SB105X_LCR_DLAB;
- SB105X_PUT_LCR(port, tmp);
-
- tmp = SB105X_GET_LCR(port);
-
- ret = SB105X_GET_REG(port,reg);
- SB105X_PUT_LCR(port,lcr);
- break;
- case 2:
- mcr = SB105X_GET_MCR(port);
- tmp = mcr | SB105X_MCR_P2S;
- SB105X_PUT_MCR(port,tmp);
-
- ret = SB105X_GET_REG(port,reg);
-
- SB105X_PUT_MCR(port,mcr);
- break;
- case 3:
- lcr = SB105X_GET_LCR(port);
- tmp = lcr | SB105X_LCR_BF;
- SB105X_PUT_LCR(port,tmp);
- SB105X_PUT_REG(port,SB105X_PSR,SB105X_PSR_P3KEY);
-
- ret = SB105X_GET_REG(port,reg);
-
- SB105X_PUT_LCR(port,lcr);
- break;
- case 4:
- lcr = SB105X_GET_LCR(port);
- tmp = lcr | SB105X_LCR_BF;
- SB105X_PUT_LCR(port,tmp);
- SB105X_PUT_REG(port,SB105X_PSR,SB105X_PSR_P4KEY);
-
- ret = SB105X_GET_REG(port,reg);
-
- SB105X_PUT_LCR(port,lcr);
- break;
- default:
- printk(" error invalid page number \n");
- return -1;
- }
-
- return ret;
-}
-
-static int sb1054_set_register(struct sb_uart_port *port, int page, int reg, int value)
-{
- int lcr = 0;
- int mcr = 0;
- int ret = 0;
-
- if( page <= 0)
- {
- printk(" page 0 can not use this function\n");
- return -1;
- }
- switch(page)
- {
- case 1:
- lcr = SB105X_GET_LCR(port);
- SB105X_PUT_LCR(port, lcr | SB105X_LCR_DLAB);
-
- SB105X_PUT_REG(port,reg,value);
-
- SB105X_PUT_LCR(port, lcr);
- ret = 1;
- break;
- case 2:
- mcr = SB105X_GET_MCR(port);
- SB105X_PUT_MCR(port, mcr | SB105X_MCR_P2S);
-
- SB105X_PUT_REG(port,reg,value);
-
- SB105X_PUT_MCR(port, mcr);
- ret = 1;
- break;
- case 3:
- lcr = SB105X_GET_LCR(port);
- SB105X_PUT_LCR(port, lcr | SB105X_LCR_BF);
- SB105X_PUT_PSR(port, SB105X_PSR_P3KEY);
-
- SB105X_PUT_REG(port,reg,value);
-
- SB105X_PUT_LCR(port, lcr);
- ret = 1;
- break;
- case 4:
- lcr = SB105X_GET_LCR(port);
- SB105X_PUT_LCR(port, lcr | SB105X_LCR_BF);
- SB105X_PUT_PSR(port, SB105X_PSR_P4KEY);
-
- SB105X_PUT_REG(port,reg,value);
-
- SB105X_PUT_LCR(port, lcr);
- ret = 1;
- break;
- default:
- printk(" error invalid page number \n");
- return -1;
- }
-
- return ret;
-}
-
-static int set_multidrop_mode(struct sb_uart_port *port, unsigned int mode)
-{
- int mdr = SB105XA_MDR_NPS;
-
- if (mode & MDMODE_ENABLE)
- {
- mdr |= SB105XA_MDR_MDE;
- }
-
- if (1) //(mode & MDMODE_AUTO)
- {
- int efr = 0;
- mdr |= SB105XA_MDR_AME;
- efr = sb1054_get_register(port, PAGE_3, SB105X_EFR);
- efr |= SB105X_EFR_SCD;
- sb1054_set_register(port, PAGE_3, SB105X_EFR, efr);
- }
-
- sb1054_set_register(port, PAGE_1, SB105XA_MDR, mdr);
- port->mdmode &= ~0x6;
- port->mdmode |= mode;
- printk("[%d] multidrop init: %x\n", port->line, port->mdmode);
-
- return 0;
-}
-
-static int get_multidrop_addr(struct sb_uart_port *port)
-{
- return sb1054_get_register(port, PAGE_3, SB105X_XOFF2);
-}
-
-static int set_multidrop_addr(struct sb_uart_port *port, unsigned int addr)
-{
- sb1054_set_register(port, PAGE_3, SB105X_XOFF2, addr);
-
- return 0;
-}
-
-static void SendATCommand(struct mp_port *mtpt)
-{
- // a t cr lf
- unsigned char ch[] = {0x61,0x74,0x0d,0x0a,0x0};
- unsigned char lineControl;
- unsigned char i=0;
- unsigned char Divisor = 0xc;
-
- lineControl = serial_inp(mtpt,UART_LCR);
- serial_outp(mtpt,UART_LCR,(lineControl | UART_LCR_DLAB));
- serial_outp(mtpt,UART_DLL,(Divisor & 0xff));
- serial_outp(mtpt,UART_DLM,(Divisor & 0xff00)>>8); //baudrate is 4800
-
-
- serial_outp(mtpt,UART_LCR,lineControl);
- serial_outp(mtpt,UART_LCR,0x03); // N-8-1
- serial_outp(mtpt,UART_FCR,7);
- serial_outp(mtpt,UART_MCR,0x3);
- while(ch[i]){
- while((serial_inp(mtpt,UART_LSR) & 0x60) !=0x60){
- ;
- }
- serial_outp(mtpt,0,ch[i++]);
- }
-
-
-}// end of SendATCommand()
-
-static int set_deep_fifo(struct sb_uart_port *port, int status)
-{
- int afr_status = 0;
- afr_status = sb1054_get_register(port, PAGE_4, SB105X_AFR);
-
- if(status == ENABLE)
- {
- afr_status |= SB105X_AFR_AFEN;
- }
- else
- {
- afr_status &= ~SB105X_AFR_AFEN;
- }
-
- sb1054_set_register(port,PAGE_4,SB105X_AFR,afr_status);
- sb1054_set_register(port,PAGE_4,SB105X_TTR,ttr[port->line]);
- sb1054_set_register(port,PAGE_4,SB105X_RTR,rtr[port->line]);
- afr_status = sb1054_get_register(port, PAGE_4, SB105X_AFR);
-
- return afr_status;
-}
-
-static int get_device_type(int arg)
-{
- int ret;
- ret = inb(mp_devs[arg].option_reg_addr+MP_OPTR_DIR0);
- ret = (ret & 0xf0) >> 4;
- switch (ret)
- {
- case DIR_UART_16C550:
- return PORT_16C55X;
- case DIR_UART_16C1050:
- return PORT_16C105X;
- case DIR_UART_16C1050A:
- /*
- if (mtpt->port.line < 2)
- {
- return PORT_16C105XA;
- }
- else
- {
- if (mtpt->device->device_id & 0x50)
- {
- return PORT_16C55X;
- }
- else
- {
- return PORT_16C105X;
- }
- }*/
- return PORT_16C105XA;
- default:
- return PORT_UNKNOWN;
- }
-
-}
-static int get_deep_fifo(struct sb_uart_port *port)
-{
- int afr_status = 0;
- afr_status = sb1054_get_register(port, PAGE_4, SB105X_AFR);
- return afr_status;
-}
-
-static int set_auto_rts(struct sb_uart_port *port, int status)
-{
- int atr_status = 0;
-
-#if 0
- int efr_status = 0;
-
- efr_status = sb1054_get_register(port, PAGE_3, SB105X_EFR);
- if(status == ENABLE)
- efr_status |= SB105X_EFR_ARTS;
- else
- efr_status &= ~SB105X_EFR_ARTS;
- sb1054_set_register(port,PAGE_3,SB105X_EFR,efr_status);
- efr_status = sb1054_get_register(port, PAGE_3, SB105X_EFR);
-#endif
-
-//ATR
- atr_status = sb1054_get_register(port, PAGE_3, SB105X_ATR);
- switch(status)
- {
- case RS422PTP:
- atr_status = (SB105X_ATR_TPS) | (SB105X_ATR_A80);
- break;
- case RS422MD:
- atr_status = (SB105X_ATR_TPS) | (SB105X_ATR_TCMS) | (SB105X_ATR_A80);
- break;
- case RS485NE:
- atr_status = (SB105X_ATR_RCMS) | (SB105X_ATR_TPS) | (SB105X_ATR_TCMS) | (SB105X_ATR_A80);
- break;
- case RS485ECHO:
- atr_status = (SB105X_ATR_TPS) | (SB105X_ATR_TCMS) | (SB105X_ATR_A80);
- break;
- }
-
- sb1054_set_register(port,PAGE_3,SB105X_ATR,atr_status);
- atr_status = sb1054_get_register(port, PAGE_3, SB105X_ATR);
-
- return atr_status;
-}
-
-static void mp_stop(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
- unsigned long flags;
-
- spin_lock_irqsave(&port->lock, flags);
- port->ops->stop_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void __mp_start(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
-
- if (!uart_circ_empty(&state->info->xmit) && state->info->xmit.buf &&
- !tty->stopped && !tty->hw_stopped)
- port->ops->start_tx(port);
-}
-
-static void mp_start(struct tty_struct *tty)
-{
- __mp_start(tty);
-}
-
-static void mp_tasklet_action(unsigned long data)
-{
- struct sb_uart_state *state = (struct sb_uart_state *)data;
- struct tty_struct *tty;
-
- printk("tasklet is called!\n");
- tty = state->info->tty;
- tty_wakeup(tty);
-}
-
-static inline void mp_update_mctrl(struct sb_uart_port *port, unsigned int set, unsigned int clear)
-{
- unsigned int old;
-
- old = port->mctrl;
- port->mctrl = (old & ~clear) | set;
- if (old != port->mctrl)
- port->ops->set_mctrl(port, port->mctrl);
-}
-
-#define uart_set_mctrl(port,set) mp_update_mctrl(port,set,0)
-#define uart_clear_mctrl(port,clear) mp_update_mctrl(port,0,clear)
-
-static int mp_startup(struct sb_uart_state *state, int init_hw)
-{
- struct sb_uart_info *info = state->info;
- struct sb_uart_port *port = state->port;
- unsigned long page;
- int retval = 0;
-
- if (info->flags & UIF_INITIALIZED)
- return 0;
-
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
-
- if (port->type == PORT_UNKNOWN)
- return 0;
-
- if (!info->xmit.buf) {
- page = get_zeroed_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- info->xmit.buf = (unsigned char *) page;
-
- uart_circ_clear(&info->xmit);
- }
-
- retval = port->ops->startup(port);
- if (retval == 0) {
- if (init_hw) {
- mp_change_speed(state, NULL);
-
- if (info->tty && (info->tty->termios.c_cflag & CBAUD))
- uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
- }
-
- info->flags |= UIF_INITIALIZED;
-
- if (info->tty)
- clear_bit(TTY_IO_ERROR, &info->tty->flags);
- }
-
- if (retval && capable(CAP_SYS_ADMIN))
- retval = 0;
-
- return retval;
-}
-
-static void mp_shutdown(struct sb_uart_state *state)
-{
- struct sb_uart_info *info = state->info;
- struct sb_uart_port *port = state->port;
-
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
-
- if (info->flags & UIF_INITIALIZED) {
- info->flags &= ~UIF_INITIALIZED;
-
- if (!info->tty || (info->tty->termios.c_cflag & HUPCL))
- uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
-
- wake_up_interruptible(&info->delta_msr_wait);
-
- port->ops->shutdown(port);
-
- synchronize_irq(port->irq);
- }
- tasklet_kill(&info->tlet);
-
- if (info->xmit.buf) {
- free_page((unsigned long)info->xmit.buf);
- info->xmit.buf = NULL;
- }
-}
-
-static void mp_change_speed(struct sb_uart_state *state, struct MP_TERMIOS *old_termios)
-{
- struct tty_struct *tty = state->info->tty;
- struct sb_uart_port *port = state->port;
-
- if (!tty || port->type == PORT_UNKNOWN)
- return;
-
- if (tty->termios.c_cflag & CRTSCTS)
- state->info->flags |= UIF_CTS_FLOW;
- else
- state->info->flags &= ~UIF_CTS_FLOW;
-
- if (tty->termios.c_cflag & CLOCAL)
- state->info->flags &= ~UIF_CHECK_CD;
- else
- state->info->flags |= UIF_CHECK_CD;
-
- port->ops->set_termios(port, &tty->termios, old_termios);
-}
-
-static inline int __mp_put_char(struct sb_uart_port *port, struct circ_buf *circ, unsigned char c)
-{
- unsigned long flags;
- int ret = 0;
-
- if (!circ->buf)
- return 0;
-
- spin_lock_irqsave(&port->lock, flags);
- if (uart_circ_chars_free(circ) != 0) {
- circ->buf[circ->head] = c;
- circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
- ret = 1;
- }
- spin_unlock_irqrestore(&port->lock, flags);
- return ret;
-}
-
-static int mp_put_char(struct tty_struct *tty, unsigned char ch)
-{
- struct sb_uart_state *state = tty->driver_data;
-
- return __mp_put_char(state->port, &state->info->xmit, ch);
-}
-
-static void mp_put_chars(struct tty_struct *tty)
-{
- mp_start(tty);
-}
-
-static int mp_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port;
- struct circ_buf *circ;
- int c, ret = 0;
-
- if (!state || !state->info) {
- return -EL3HLT;
- }
-
- port = state->port;
- circ = &state->info->xmit;
-
- if (!circ->buf)
- return 0;
-
- while (1) {
- c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(circ->buf + circ->head, buf, c);
-
- circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
- buf += c;
- count -= c;
- ret += c;
- }
- mp_start(tty);
- return ret;
-}
-
-static int mp_write_room(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
-
- return uart_circ_chars_free(&state->info->xmit);
-}
-
-static int mp_chars_in_buffer(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
-
- return uart_circ_chars_pending(&state->info->xmit);
-}
-
-static void mp_flush_buffer(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port;
- unsigned long flags;
-
- if (!state || !state->info) {
- return;
- }
-
- port = state->port;
- spin_lock_irqsave(&port->lock, flags);
- uart_circ_clear(&state->info->xmit);
- spin_unlock_irqrestore(&port->lock, flags);
- wake_up_interruptible(&tty->write_wait);
- tty_wakeup(tty);
-}
-
-static void mp_send_xchar(struct tty_struct *tty, char ch)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
- unsigned long flags;
-
- if (port->ops->send_xchar)
- port->ops->send_xchar(port, ch);
- else {
- port->x_char = ch;
- if (ch) {
- spin_lock_irqsave(&port->lock, flags);
- port->ops->start_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
- }
- }
-}
-
-static void mp_throttle(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
-
- if (I_IXOFF(tty))
- mp_send_xchar(tty, STOP_CHAR(tty));
-
- if (tty->termios.c_cflag & CRTSCTS)
- uart_clear_mctrl(state->port, TIOCM_RTS);
-}
-
-static void mp_unthrottle(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
-
- if (I_IXOFF(tty)) {
- if (port->x_char)
- port->x_char = 0;
- else
- mp_send_xchar(tty, START_CHAR(tty));
- }
-
- if (tty->termios.c_cflag & CRTSCTS)
- uart_set_mctrl(port, TIOCM_RTS);
-}
-
-static int mp_get_info(struct sb_uart_state *state, struct serial_struct *retinfo)
-{
- struct sb_uart_port *port = state->port;
- struct serial_struct tmp;
-
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = port->type;
- tmp.line = port->line;
- tmp.port = port->iobase;
- if (HIGH_BITS_OFFSET)
- tmp.port_high = (long) port->iobase >> HIGH_BITS_OFFSET;
- tmp.irq = port->irq;
- tmp.flags = port->flags;
- tmp.xmit_fifo_size = port->fifosize;
- tmp.baud_base = port->uartclk / 16;
- tmp.close_delay = state->close_delay;
- tmp.closing_wait = state->closing_wait == USF_CLOSING_WAIT_NONE ?
- ASYNC_CLOSING_WAIT_NONE :
- state->closing_wait;
- tmp.custom_divisor = port->custom_divisor;
- tmp.hub6 = port->hub6;
- tmp.io_type = port->iotype;
- tmp.iomem_reg_shift = port->regshift;
- tmp.iomem_base = (void *)port->mapbase;
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
- return 0;
-}
-
-static int mp_set_info(struct sb_uart_state *state, struct serial_struct *newinfo)
-{
- struct serial_struct new_serial;
- struct sb_uart_port *port = state->port;
- unsigned long new_port;
- unsigned int change_irq, change_port, closing_wait;
- unsigned int old_custom_divisor;
- unsigned int old_flags, new_flags;
- int retval = 0;
-
- if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
- return -EFAULT;
-
- new_port = new_serial.port;
- if (HIGH_BITS_OFFSET)
- new_port += (unsigned long) new_serial.port_high << HIGH_BITS_OFFSET;
-
- new_serial.irq = irq_canonicalize(new_serial.irq);
-
- closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
- USF_CLOSING_WAIT_NONE : new_serial.closing_wait;
- MP_STATE_LOCK(state);
-
- change_irq = new_serial.irq != port->irq;
- change_port = new_port != port->iobase ||
- (unsigned long)new_serial.iomem_base != port->mapbase ||
- new_serial.hub6 != port->hub6 ||
- new_serial.io_type != port->iotype ||
- new_serial.iomem_reg_shift != port->regshift ||
- new_serial.type != port->type;
- old_flags = port->flags;
- new_flags = new_serial.flags;
- old_custom_divisor = port->custom_divisor;
-
- if (!capable(CAP_SYS_ADMIN)) {
- retval = -EPERM;
- if (change_irq || change_port ||
- (new_serial.baud_base != port->uartclk / 16) ||
- (new_serial.close_delay != state->close_delay) ||
- (closing_wait != state->closing_wait) ||
- (new_serial.xmit_fifo_size != port->fifosize) ||
- (((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0))
- goto exit;
- port->flags = ((port->flags & ~UPF_USR_MASK) |
- (new_flags & UPF_USR_MASK));
- port->custom_divisor = new_serial.custom_divisor;
- goto check_and_exit;
- }
-
- if (port->ops->verify_port)
- retval = port->ops->verify_port(port, &new_serial);
-
- if ((new_serial.irq >= NR_IRQS) || (new_serial.irq < 0) ||
- (new_serial.baud_base < 9600))
- retval = -EINVAL;
-
- if (retval)
- goto exit;
-
- if (change_port || change_irq) {
- retval = -EBUSY;
-
- if (uart_users(state) > 1)
- goto exit;
-
- mp_shutdown(state);
- }
-
- if (change_port) {
- unsigned long old_iobase, old_mapbase;
- unsigned int old_type, old_iotype, old_hub6, old_shift;
-
- old_iobase = port->iobase;
- old_mapbase = port->mapbase;
- old_type = port->type;
- old_hub6 = port->hub6;
- old_iotype = port->iotype;
- old_shift = port->regshift;
-
- if (old_type != PORT_UNKNOWN)
- port->ops->release_port(port);
-
- port->iobase = new_port;
- port->type = new_serial.type;
- port->hub6 = new_serial.hub6;
- port->iotype = new_serial.io_type;
- port->regshift = new_serial.iomem_reg_shift;
- port->mapbase = (unsigned long)new_serial.iomem_base;
-
- if (port->type != PORT_UNKNOWN) {
- retval = port->ops->request_port(port);
- } else {
- retval = 0;
- }
-
- if (retval && old_type != PORT_UNKNOWN) {
- port->iobase = old_iobase;
- port->type = old_type;
- port->hub6 = old_hub6;
- port->iotype = old_iotype;
- port->regshift = old_shift;
- port->mapbase = old_mapbase;
- retval = port->ops->request_port(port);
- if (retval)
- port->type = PORT_UNKNOWN;
-
- retval = -EBUSY;
- }
- }
-
- port->irq = new_serial.irq;
- port->uartclk = new_serial.baud_base * 16;
- port->flags = (port->flags & ~UPF_CHANGE_MASK) |
- (new_flags & UPF_CHANGE_MASK);
- port->custom_divisor = new_serial.custom_divisor;
- state->close_delay = new_serial.close_delay;
- state->closing_wait = closing_wait;
- port->fifosize = new_serial.xmit_fifo_size;
- if (state->info->tty)
- state->info->tty->low_latency =
- (port->flags & UPF_LOW_LATENCY) ? 1 : 0;
-
-check_and_exit:
- retval = 0;
- if (port->type == PORT_UNKNOWN)
- goto exit;
- if (state->info->flags & UIF_INITIALIZED) {
- if (((old_flags ^ port->flags) & UPF_SPD_MASK) ||
- old_custom_divisor != port->custom_divisor) {
- if (port->flags & UPF_SPD_MASK) {
- printk(KERN_NOTICE
- "%s sets custom speed on ttyMP%d. This "
- "is deprecated.\n", current->comm,
- port->line);
- }
- mp_change_speed(state, NULL);
- }
- } else
- retval = mp_startup(state, 1);
-exit:
- MP_STATE_UNLOCK(state);
- return retval;
-}
-
-
-static int mp_get_lsr_info(struct sb_uart_state *state, unsigned int *value)
-{
- struct sb_uart_port *port = state->port;
- unsigned int result;
-
- result = port->ops->tx_empty(port);
-
- if (port->x_char ||
- ((uart_circ_chars_pending(&state->info->xmit) > 0) &&
- !state->info->tty->stopped && !state->info->tty->hw_stopped))
- result &= ~TIOCSER_TEMT;
-
- return put_user(result, value);
-}
-
-static int mp_tiocmget(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
- int result = -EIO;
-
- MP_STATE_LOCK(state);
- if (!(tty->flags & (1 << TTY_IO_ERROR))) {
- result = port->mctrl;
- spin_lock_irq(&port->lock);
- result |= port->ops->get_mctrl(port);
- spin_unlock_irq(&port->lock);
- }
- MP_STATE_UNLOCK(state);
- return result;
-}
-
-static int mp_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
- int ret = -EIO;
-
-
- MP_STATE_LOCK(state);
- if (!(tty->flags & (1 << TTY_IO_ERROR))) {
- mp_update_mctrl(port, set, clear);
- ret = 0;
- }
- MP_STATE_UNLOCK(state);
-
- return ret;
-}
-
-static int mp_break_ctl(struct tty_struct *tty, int break_state)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
-
- MP_STATE_LOCK(state);
-
- if (port->type != PORT_UNKNOWN)
- port->ops->break_ctl(port, break_state);
-
- MP_STATE_UNLOCK(state);
- return 0;
-}
-
-static int mp_do_autoconfig(struct sb_uart_state *state)
-{
- struct sb_uart_port *port = state->port;
- int flags, ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (mutex_lock_interruptible(&state->mutex))
- return -ERESTARTSYS;
- ret = -EBUSY;
- if (uart_users(state) == 1) {
- mp_shutdown(state);
-
- if (port->type != PORT_UNKNOWN)
- port->ops->release_port(port);
-
- flags = UART_CONFIG_TYPE;
- if (port->flags & UPF_AUTO_IRQ)
- flags |= UART_CONFIG_IRQ;
-
- port->ops->config_port(port, flags);
-
- ret = mp_startup(state, 1);
- }
- MP_STATE_UNLOCK(state);
- return ret;
-}
-
-static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg)
-{
- struct sb_uart_port *port = state->port;
- DECLARE_WAITQUEUE(wait, current);
- struct sb_uart_icount cprev, cnow;
- int ret;
-
- spin_lock_irq(&port->lock);
- memcpy(&cprev, &port->icount, sizeof(struct sb_uart_icount));
-
- port->ops->enable_ms(port);
- spin_unlock_irq(&port->lock);
-
- add_wait_queue(&state->info->delta_msr_wait, &wait);
- for (;;) {
- spin_lock_irq(&port->lock);
- memcpy(&cnow, &port->icount, sizeof(struct sb_uart_icount));
- spin_unlock_irq(&port->lock);
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
- ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
- ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
- ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
- ret = 0;
- break;
- }
-
- schedule();
-
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- cprev = cnow;
- }
-
- current->state = TASK_RUNNING;
- remove_wait_queue(&state->info->delta_msr_wait, &wait);
-
- return ret;
-}
-
-static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
-{
- struct serial_icounter_struct icount = {};
- struct sb_uart_icount cnow;
- struct sb_uart_port *port = state->port;
-
- spin_lock_irq(&port->lock);
- memcpy(&cnow, &port->icount, sizeof(struct sb_uart_icount));
- spin_unlock_irq(&port->lock);
-
- icount.cts = cnow.cts;
- icount.dsr = cnow.dsr;
- icount.rng = cnow.rng;
- icount.dcd = cnow.dcd;
- icount.rx = cnow.rx;
- icount.tx = cnow.tx;
- icount.frame = cnow.frame;
- icount.overrun = cnow.overrun;
- icount.parity = cnow.parity;
- icount.brk = cnow.brk;
- icount.buf_overrun = cnow.buf_overrun;
-
- return copy_to_user(icnt, &icount, sizeof(icount)) ? -EFAULT : 0;
-}
-
-static int mp_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct mp_port *info = (struct mp_port *)state->port;
- int ret = -ENOIOCTLCMD;
-
-
- switch (cmd) {
- case TIOCSMULTIDROP:
- /* set multi-drop mode enable or disable, and default operation mode is H/W mode */
- if (info->port.type == PORT_16C105XA)
- {
- //arg &= ~0x6;
- //state->port->mdmode = 0;
- return set_multidrop_mode((struct sb_uart_port *)info, (unsigned int)arg);
- }
- ret = -ENOTSUPP;
- break;
- case GETDEEPFIFO:
- ret = get_deep_fifo(state->port);
- return ret;
- case SETDEEPFIFO:
- ret = set_deep_fifo(state->port,arg);
- deep[state->port->line] = arg;
- return ret;
- case SETTTR:
- if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
- ret = sb1054_set_register(state->port,PAGE_4,SB105X_TTR,arg);
- ttr[state->port->line] = arg;
- }
- return ret;
- case SETRTR:
- if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
- ret = sb1054_set_register(state->port,PAGE_4,SB105X_RTR,arg);
- rtr[state->port->line] = arg;
- }
- return ret;
- case GETTTR:
- if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
- ret = sb1054_get_register(state->port,PAGE_4,SB105X_TTR);
- }
- return ret;
- case GETRTR:
- if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
- ret = sb1054_get_register(state->port,PAGE_4,SB105X_RTR);
- }
- return ret;
-
- case SETFCR:
- if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
- ret = sb1054_set_register(state->port,PAGE_1,SB105X_FCR,arg);
- }
- else{
- serial_out(info,2,arg);
- }
-
- return ret;
- case TIOCSMDADDR:
- /* set multi-drop address */
- if (info->port.type == PORT_16C105XA)
- {
- state->port->mdmode |= MDMODE_ADDR;
- return set_multidrop_addr((struct sb_uart_port *)info, (unsigned int)arg);
- }
- ret = -ENOTSUPP;
- break;
-
- case TIOCGMDADDR:
- /* set multi-drop address */
- if ((info->port.type == PORT_16C105XA) && (state->port->mdmode & MDMODE_ADDR))
- {
- return get_multidrop_addr((struct sb_uart_port *)info);
- }
- ret = -ENOTSUPP;
- break;
-
- case TIOCSENDADDR:
- /* send address in multi-drop mode */
- if ((info->port.type == PORT_16C105XA)
- && (state->port->mdmode & (MDMODE_ENABLE)))
- {
- if (mp_chars_in_buffer(tty) > 0)
- {
- tty_wait_until_sent(tty, 0);
- }
- //while ((serial_in(info, UART_LSR) & 0x60) != 0x60);
- //while (sb1054_get_register(state->port, PAGE_2, SB105X_TFCR) != 0);
- while ((serial_in(info, UART_LSR) & 0x60) != 0x60);
- serial_out(info, UART_SCR, (int)arg);
- }
- break;
-
- case TIOCGSERIAL:
- ret = mp_get_info(state, (struct serial_struct *)arg);
- break;
-
- case TIOCSSERIAL:
- ret = mp_set_info(state, (struct serial_struct *)arg);
- break;
-
- case TIOCSERCONFIG:
- ret = mp_do_autoconfig(state);
- break;
-
- case TIOCSERGWILD: /* obsolete */
- case TIOCSERSWILD: /* obsolete */
- ret = 0;
- break;
- /* for Multiport */
- case TIOCGNUMOFPORT: /* Get number of ports */
- return NR_PORTS;
- case TIOCGGETDEVID:
- return mp_devs[arg].device_id;
- case TIOCGGETREV:
- return mp_devs[arg].revision;
- case TIOCGGETNRPORTS:
- return mp_devs[arg].nr_ports;
- case TIOCGGETBDNO:
- return NR_BOARD;
- case TIOCGGETINTERFACE:
- if (mp_devs[arg].revision == 0xc0)
- {
- /* for SB16C1053APCI */
- return (sb1053a_get_interface(info, info->port.line));
- }
- else
- {
- return (inb(mp_devs[arg].option_reg_addr+MP_OPTR_IIR0+(state->port->line/8)));
- }
- case TIOCGGETPORTTYPE:
- ret = get_device_type(arg);
- return ret;
- case TIOCSMULTIECHO: /* set to multi-drop mode(RS422) or echo mode(RS485)*/
- outb( ( inb(info->interface_config_addr) & ~0x03 ) | 0x01 ,
- info->interface_config_addr);
- return 0;
- case TIOCSPTPNOECHO: /* set to multi-drop mode(RS422) or echo mode(RS485) */
- outb( ( inb(info->interface_config_addr) & ~0x03 ) ,
- info->interface_config_addr);
- return 0;
- }
-
- if (ret != -ENOIOCTLCMD)
- goto out;
-
- if (tty->flags & (1 << TTY_IO_ERROR)) {
- ret = -EIO;
- goto out;
- }
-
- switch (cmd) {
- case TIOCMIWAIT:
- ret = mp_wait_modem_status(state, arg);
- break;
-
- case TIOCGICOUNT:
- ret = mp_get_count(state, (struct serial_icounter_struct *)arg);
- break;
- }
-
- if (ret != -ENOIOCTLCMD)
- goto out;
-
- MP_STATE_LOCK(state);
- switch (cmd) {
- case TIOCSERGETLSR: /* Get line status register */
- ret = mp_get_lsr_info(state, (unsigned int *)arg);
- break;
-
- default: {
- struct sb_uart_port *port = state->port;
- if (port->ops->ioctl)
- ret = port->ops->ioctl(port, cmd, arg);
- break;
- }
- }
-
- MP_STATE_UNLOCK(state);
-out:
- return ret;
-}
-
-static void mp_set_termios(struct tty_struct *tty, struct MP_TERMIOS *old_termios)
-{
- struct sb_uart_state *state = tty->driver_data;
- unsigned long flags;
- unsigned int cflag = tty->termios.c_cflag;
-
-#define RELEVANT_IFLAG(iflag) ((iflag) & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-
- if ((cflag ^ old_termios->c_cflag) == 0 &&
- RELEVANT_IFLAG(tty->termios.c_iflag ^ old_termios->c_iflag) == 0)
- return;
-
- mp_change_speed(state, old_termios);
-
- if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
- uart_clear_mctrl(state->port, TIOCM_RTS | TIOCM_DTR);
-
- if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
- unsigned int mask = TIOCM_DTR;
- if (!(cflag & CRTSCTS) ||
- !test_bit(TTY_THROTTLED, &tty->flags))
- mask |= TIOCM_RTS;
- uart_set_mctrl(state->port, mask);
- }
-
- if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
- spin_lock_irqsave(&state->port->lock, flags);
- tty->hw_stopped = 0;
- __mp_start(tty);
- spin_unlock_irqrestore(&state->port->lock, flags);
- }
-
- if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
- spin_lock_irqsave(&state->port->lock, flags);
- if (!(state->port->ops->get_mctrl(state->port) & TIOCM_CTS)) {
- tty->hw_stopped = 1;
- state->port->ops->stop_tx(state->port);
- }
- spin_unlock_irqrestore(&state->port->lock, flags);
- }
-}
-
-static void mp_close(struct tty_struct *tty, struct file *filp)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port;
-
- printk("mp_close!\n");
- if (!state || !state->port)
- return;
-
- port = state->port;
-
- printk("close1 %d\n", __LINE__);
- MP_STATE_LOCK(state);
-
- printk("close2 %d\n", __LINE__);
- if (tty_hung_up_p(filp))
- goto done;
-
- printk("close3 %d\n", __LINE__);
- if ((tty->count == 1) && (state->count != 1)) {
- printk("mp_close: bad serial port count; tty->count is 1, "
- "state->count is %d\n", state->count);
- state->count = 1;
- }
- printk("close4 %d\n", __LINE__);
- if (--state->count < 0) {
- printk("rs_close: bad serial port count for ttyMP%d: %d\n",
- port->line, state->count);
- state->count = 0;
- }
- if (state->count)
- goto done;
-
- tty->closing = 1;
-
- printk("close5 %d\n", __LINE__);
- if (state->closing_wait != USF_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, state->closing_wait);
-
- printk("close6 %d\n", __LINE__);
- if (state->info->flags & UIF_INITIALIZED) {
- unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
- port->ops->stop_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
- mp_wait_until_sent(tty, port->timeout);
- }
- printk("close7 %d\n", __LINE__);
-
- mp_shutdown(state);
- printk("close8 %d\n", __LINE__);
- mp_flush_buffer(tty);
- tty_ldisc_flush(tty);
- tty->closing = 0;
- state->info->tty = NULL;
- if (state->info->blocked_open)
- {
- if (state->close_delay)
- {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(state->close_delay);
- }
- }
- else
- {
- mp_change_pm(state, 3);
- }
- printk("close8 %d\n", __LINE__);
-
- state->info->flags &= ~UIF_NORMAL_ACTIVE;
- wake_up_interruptible(&state->info->open_wait);
-
-done:
- printk("close done\n");
- MP_STATE_UNLOCK(state);
- module_put(THIS_MODULE);
-}
-
-static void mp_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
- unsigned long char_time, expire;
-
- if (port->type == PORT_UNKNOWN || port->fifosize == 0)
- return;
-
- char_time = (port->timeout - HZ/50) / port->fifosize;
- char_time = char_time / 5;
- if (char_time == 0)
- char_time = 1;
- if (timeout && timeout < char_time)
- char_time = timeout;
-
- if (timeout == 0 || timeout > 2 * port->timeout)
- timeout = 2 * port->timeout;
-
- expire = jiffies + timeout;
-
- while (!port->ops->tx_empty(port)) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(char_time);
- if (signal_pending(current))
- break;
- if (time_after(jiffies, expire))
- break;
- }
- set_current_state(TASK_RUNNING); /* might not be needed */
-}
-
-static void mp_hangup(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
-
- MP_STATE_LOCK(state);
- if (state->info && state->info->flags & UIF_NORMAL_ACTIVE) {
- mp_flush_buffer(tty);
- mp_shutdown(state);
- state->count = 0;
- state->info->flags &= ~UIF_NORMAL_ACTIVE;
- state->info->tty = NULL;
- wake_up_interruptible(&state->info->open_wait);
- wake_up_interruptible(&state->info->delta_msr_wait);
- }
- MP_STATE_UNLOCK(state);
-}
-
-static void mp_update_termios(struct sb_uart_state *state)
-{
- struct tty_struct *tty = state->info->tty;
- struct sb_uart_port *port = state->port;
-
- if (!(tty->flags & (1 << TTY_IO_ERROR))) {
- mp_change_speed(state, NULL);
-
- if (tty->termios.c_cflag & CBAUD)
- uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
- }
-}
-
-static int mp_block_til_ready(struct file *filp, struct sb_uart_state *state)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct sb_uart_info *info = state->info;
- struct sb_uart_port *port = state->port;
- unsigned int mctrl;
-
- info->blocked_open++;
- state->count--;
-
- add_wait_queue(&info->open_wait, &wait);
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (tty_hung_up_p(filp) || info->tty == NULL)
- break;
-
- if (!(info->flags & UIF_INITIALIZED))
- break;
-
- if ((filp->f_flags & O_NONBLOCK) ||
- (info->tty->termios.c_cflag & CLOCAL) ||
- (info->tty->flags & (1 << TTY_IO_ERROR))) {
- break;
- }
-
- if (info->tty->termios.c_cflag & CBAUD)
- uart_set_mctrl(port, TIOCM_DTR);
-
- spin_lock_irq(&port->lock);
- port->ops->enable_ms(port);
- mctrl = port->ops->get_mctrl(port);
- spin_unlock_irq(&port->lock);
- if (mctrl & TIOCM_CAR)
- break;
-
- MP_STATE_UNLOCK(state);
- schedule();
- MP_STATE_LOCK(state);
-
- if (signal_pending(current))
- break;
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&info->open_wait, &wait);
-
- state->count++;
- info->blocked_open--;
-
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- if (!info->tty || tty_hung_up_p(filp))
- return -EAGAIN;
-
- return 0;
-}
-
-static struct sb_uart_state *uart_get(struct uart_driver *drv, int line)
-{
- struct sb_uart_state *state;
-
- MP_MUTEX_LOCK(mp_mutex);
- state = drv->state + line;
- if (mutex_lock_interruptible(&state->mutex)) {
- state = ERR_PTR(-ERESTARTSYS);
- goto out;
- }
- state->count++;
- if (!state->port) {
- state->count--;
- MP_STATE_UNLOCK(state);
- state = ERR_PTR(-ENXIO);
- goto out;
- }
-
- if (!state->info) {
- state->info = kmalloc(sizeof(struct sb_uart_info), GFP_KERNEL);
- if (state->info) {
- memset(state->info, 0, sizeof(struct sb_uart_info));
- init_waitqueue_head(&state->info->open_wait);
- init_waitqueue_head(&state->info->delta_msr_wait);
-
- state->port->info = state->info;
-
- tasklet_init(&state->info->tlet, mp_tasklet_action,
- (unsigned long)state);
- } else {
- state->count--;
- MP_STATE_UNLOCK(state);
- state = ERR_PTR(-ENOMEM);
- }
- }
-
-out:
- MP_MUTEX_UNLOCK(mp_mutex);
- return state;
-}
-
-static int mp_open(struct tty_struct *tty, struct file *filp)
-{
- struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state;
- struct sb_uart_state *state;
- int retval;
- int line = tty->index;
- struct mp_port *mtpt;
-
- retval = -ENODEV;
- if (line >= tty->driver->num)
- goto fail;
-
- state = uart_get(drv, line);
-
- if (IS_ERR(state)) {
- retval = PTR_ERR(state);
- goto fail;
- }
-
- mtpt = (struct mp_port *)state->port;
-
- tty->driver_data = state;
- tty->low_latency = (state->port->flags & UPF_LOW_LATENCY) ? 1 : 0;
- tty->alt_speed = 0;
- state->info->tty = tty;
-
- if (tty_hung_up_p(filp)) {
- retval = -EAGAIN;
- state->count--;
- MP_STATE_UNLOCK(state);
- goto fail;
- }
-
- if (state->count == 1)
- mp_change_pm(state, 0);
-
- retval = mp_startup(state, 0);
-
- if (retval == 0)
- retval = mp_block_til_ready(filp, state);
- MP_STATE_UNLOCK(state);
-
- if (retval == 0 && !(state->info->flags & UIF_NORMAL_ACTIVE)) {
- state->info->flags |= UIF_NORMAL_ACTIVE;
-
- mp_update_termios(state);
- }
-
- uart_clear_mctrl(state->port, TIOCM_RTS);
- try_module_get(THIS_MODULE);
-fail:
- return retval;
-}
-
-
-static const char *mp_type(struct sb_uart_port *port)
-{
- const char *str = NULL;
-
- if (port->ops->type)
- str = port->ops->type(port);
-
- if (!str)
- str = "unknown";
-
- return str;
-}
-
-static void mp_change_pm(struct sb_uart_state *state, int pm_state)
-{
- struct sb_uart_port *port = state->port;
- if (port->ops->pm)
- port->ops->pm(port, pm_state, state->pm_state);
- state->pm_state = pm_state;
-}
-
-static inline void mp_report_port(struct uart_driver *drv, struct sb_uart_port *port)
-{
- char address[64];
-
- switch (port->iotype) {
- case UPIO_PORT:
- snprintf(address, sizeof(address),"I/O 0x%x", port->iobase);
- break;
- case UPIO_HUB6:
- snprintf(address, sizeof(address),"I/O 0x%x offset 0x%x", port->iobase, port->hub6);
- break;
- case UPIO_MEM:
- snprintf(address, sizeof(address),"MMIO 0x%lx", port->mapbase);
- break;
- default:
- snprintf(address, sizeof(address),"*unknown*" );
- strlcpy(address, "*unknown*", sizeof(address));
- break;
- }
-
- printk( "%s%d at %s (irq = %d) is a %s\n",
- drv->dev_name, port->line, address, port->irq, mp_type(port));
-
-}
-
-static void mp_configure_port(struct uart_driver *drv, struct sb_uart_state *state, struct sb_uart_port *port)
-{
- unsigned int flags;
-
-
- if (!port->iobase && !port->mapbase && !port->membase)
- {
- DPRINTK("%s error \n",__FUNCTION__);
- return;
- }
- flags = UART_CONFIG_TYPE;
- if (port->flags & UPF_AUTO_IRQ)
- flags |= UART_CONFIG_IRQ;
- if (port->flags & UPF_BOOT_AUTOCONF) {
- port->type = PORT_UNKNOWN;
- port->ops->config_port(port, flags);
- }
-
- if (port->type != PORT_UNKNOWN) {
- unsigned long flags;
-
- mp_report_port(drv, port);
-
- spin_lock_irqsave(&port->lock, flags);
- port->ops->set_mctrl(port, 0);
- spin_unlock_irqrestore(&port->lock, flags);
-
- mp_change_pm(state, 3);
- }
-}
-
-static void mp_unconfigure_port(struct uart_driver *drv, struct sb_uart_state *state)
-{
- struct sb_uart_port *port = state->port;
- struct sb_uart_info *info = state->info;
-
- if (info && info->tty)
- tty_hangup(info->tty);
-
- MP_STATE_LOCK(state);
-
- state->info = NULL;
-
- if (port->type != PORT_UNKNOWN)
- port->ops->release_port(port);
-
- port->type = PORT_UNKNOWN;
-
- if (info) {
- tasklet_kill(&info->tlet);
- kfree(info);
- }
-
- MP_STATE_UNLOCK(state);
-}
-static struct tty_operations mp_ops = {
- .open = mp_open,
- .close = mp_close,
- .write = mp_write,
- .put_char = mp_put_char,
- .flush_chars = mp_put_chars,
- .write_room = mp_write_room,
- .chars_in_buffer= mp_chars_in_buffer,
- .flush_buffer = mp_flush_buffer,
- .ioctl = mp_ioctl,
- .throttle = mp_throttle,
- .unthrottle = mp_unthrottle,
- .send_xchar = mp_send_xchar,
- .set_termios = mp_set_termios,
- .stop = mp_stop,
- .start = mp_start,
- .hangup = mp_hangup,
- .break_ctl = mp_break_ctl,
- .wait_until_sent= mp_wait_until_sent,
-#ifdef CONFIG_PROC_FS
- .proc_fops = NULL,
-#endif
- .tiocmget = mp_tiocmget,
- .tiocmset = mp_tiocmset,
-};
-
-static int mp_register_driver(struct uart_driver *drv)
-{
- struct tty_driver *normal = NULL;
- int i, retval;
-
- drv->state = kmalloc(sizeof(struct sb_uart_state) * drv->nr, GFP_KERNEL);
- retval = -ENOMEM;
- if (!drv->state)
- {
- printk("SB PCI Error: Kernel memory allocation error!\n");
- goto out;
- }
- memset(drv->state, 0, sizeof(struct sb_uart_state) * drv->nr);
-
- normal = alloc_tty_driver(drv->nr);
- if (!normal)
- {
- printk("SB PCI Error: tty allocation error!\n");
- goto out;
- }
-
- drv->tty_driver = normal;
-
- normal->owner = drv->owner;
- normal->magic = TTY_DRIVER_MAGIC;
- normal->driver_name = drv->driver_name;
- normal->name = drv->dev_name;
- normal->major = drv->major;
- normal->minor_start = drv->minor;
-
- normal->num = MAX_MP_PORT ;
-
- normal->type = TTY_DRIVER_TYPE_SERIAL;
- normal->subtype = SERIAL_TYPE_NORMAL;
- normal->init_termios = tty_std_termios;
- normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- normal->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
- normal->driver_state = drv;
-
- tty_set_operations(normal, &mp_ops);
-
-for (i = 0; i < drv->nr; i++) {
- struct sb_uart_state *state = drv->state + i;
-
- state->close_delay = 500;
- state->closing_wait = 30000;
-
- mutex_init(&state->mutex);
- }
-
- retval = tty_register_driver(normal);
-out:
- if (retval < 0) {
- printk("Register tty driver Fail!\n");
- put_tty_driver(normal);
- kfree(drv->state);
- }
-
- return retval;
-}
-
-void mp_unregister_driver(struct uart_driver *drv)
-{
- struct tty_driver *normal = NULL;
-
- normal = drv->tty_driver;
-
- if (!normal)
- {
- return;
- }
-
- tty_unregister_driver(normal);
- put_tty_driver(normal);
- drv->tty_driver = NULL;
-
-
- kfree(drv->state);
-
-}
-
-static int mp_add_one_port(struct uart_driver *drv, struct sb_uart_port *port)
-{
- struct sb_uart_state *state;
- int ret = 0;
-
-
- if (port->line >= drv->nr)
- return -EINVAL;
-
- state = drv->state + port->line;
-
- MP_MUTEX_LOCK(mp_mutex);
- if (state->port) {
- ret = -EINVAL;
- goto out;
- }
-
- state->port = port;
-
- spin_lock_init(&port->lock);
- port->cons = drv->cons;
- port->info = state->info;
-
- mp_configure_port(drv, state, port);
-
- tty_register_device(drv->tty_driver, port->line, port->dev);
-
-out:
- MP_MUTEX_UNLOCK(mp_mutex);
-
-
- return ret;
-}
-
-static int mp_remove_one_port(struct uart_driver *drv, struct sb_uart_port *port)
-{
- struct sb_uart_state *state = drv->state + port->line;
-
- if (state->port != port)
- printk(KERN_ALERT "Removing wrong port: %p != %p\n",
- state->port, port);
-
- MP_MUTEX_LOCK(mp_mutex);
-
- tty_unregister_device(drv->tty_driver, port->line);
-
- mp_unconfigure_port(drv, state);
- state->port = NULL;
- MP_MUTEX_UNLOCK(mp_mutex);
-
- return 0;
-}
-
-static void autoconfig(struct mp_port *mtpt, unsigned int probeflags)
-{
- unsigned char status1, scratch, scratch2, scratch3;
- unsigned char save_lcr, save_mcr;
- unsigned long flags;
-
- unsigned char u_type;
- unsigned char b_ret = 0;
-
- if (!mtpt->port.iobase && !mtpt->port.mapbase && !mtpt->port.membase)
- return;
-
- DEBUG_AUTOCONF("ttyMP%d: autoconf (0x%04x, 0x%p): ",
- mtpt->port.line, mtpt->port.iobase, mtpt->port.membase);
-
- spin_lock_irqsave(&mtpt->port.lock, flags);
-
- if (!(mtpt->port.flags & UPF_BUGGY_UART)) {
- scratch = serial_inp(mtpt, UART_IER);
- serial_outp(mtpt, UART_IER, 0);
-#ifdef __i386__
- outb(0xff, 0x080);
-#endif
- scratch2 = serial_inp(mtpt, UART_IER) & 0x0f;
- serial_outp(mtpt, UART_IER, 0x0F);
-#ifdef __i386__
- outb(0, 0x080);
-#endif
- scratch3 = serial_inp(mtpt, UART_IER) & 0x0F;
- serial_outp(mtpt, UART_IER, scratch);
- if (scratch2 != 0 || scratch3 != 0x0F) {
- DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
- scratch2, scratch3);
- goto out;
- }
- }
-
- save_mcr = serial_in(mtpt, UART_MCR);
- save_lcr = serial_in(mtpt, UART_LCR);
-
- if (!(mtpt->port.flags & UPF_SKIP_TEST)) {
- serial_outp(mtpt, UART_MCR, UART_MCR_LOOP | 0x0A);
- status1 = serial_inp(mtpt, UART_MSR) & 0xF0;
- serial_outp(mtpt, UART_MCR, save_mcr);
- if (status1 != 0x90) {
- DEBUG_AUTOCONF("LOOP test failed (%02x) ",
- status1);
- goto out;
- }
- }
-
- serial_outp(mtpt, UART_LCR, 0xBF);
- serial_outp(mtpt, UART_EFR, 0);
- serial_outp(mtpt, UART_LCR, 0);
-
- serial_outp(mtpt, UART_FCR, UART_FCR_ENABLE_FIFO);
- scratch = serial_in(mtpt, UART_IIR) >> 6;
-
- DEBUG_AUTOCONF("iir=%d ", scratch);
- if(mtpt->device->nr_ports >= 8)
- b_ret = read_option_register(mtpt,(MP_OPTR_DIR0 + ((mtpt->port.line)/8)));
- else
- b_ret = read_option_register(mtpt,MP_OPTR_DIR0);
- u_type = (b_ret & 0xf0) >> 4;
- if(mtpt->port.type == PORT_UNKNOWN )
- {
- switch (u_type)
- {
- case DIR_UART_16C550:
- mtpt->port.type = PORT_16C55X;
- break;
- case DIR_UART_16C1050:
- mtpt->port.type = PORT_16C105X;
- break;
- case DIR_UART_16C1050A:
- if (mtpt->port.line < 2)
- {
- mtpt->port.type = PORT_16C105XA;
- }
- else
- {
- if (mtpt->device->device_id & 0x50)
- {
- mtpt->port.type = PORT_16C55X;
- }
- else
- {
- mtpt->port.type = PORT_16C105X;
- }
- }
- break;
- default:
- mtpt->port.type = PORT_UNKNOWN;
- break;
- }
- }
-
- if(mtpt->port.type == PORT_UNKNOWN )
- {
-printk("unknow2\n");
- switch (scratch) {
- case 0:
- case 1:
- mtpt->port.type = PORT_UNKNOWN;
- break;
- case 2:
- case 3:
- mtpt->port.type = PORT_16C55X;
- break;
- }
- }
-
- serial_outp(mtpt, UART_LCR, save_lcr);
-
- mtpt->port.fifosize = uart_config[mtpt->port.type].dfl_xmit_fifo_size;
- mtpt->capabilities = uart_config[mtpt->port.type].flags;
-
- if (mtpt->port.type == PORT_UNKNOWN)
- goto out;
- serial_outp(mtpt, UART_MCR, save_mcr);
- serial_outp(mtpt, UART_FCR, (UART_FCR_ENABLE_FIFO |
- UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT));
- serial_outp(mtpt, UART_FCR, 0);
- (void)serial_in(mtpt, UART_RX);
- serial_outp(mtpt, UART_IER, 0);
-
-out:
- spin_unlock_irqrestore(&mtpt->port.lock, flags);
- DEBUG_AUTOCONF("type=%s\n", uart_config[mtpt->port.type].name);
-}
-
-static void autoconfig_irq(struct mp_port *mtpt)
-{
- unsigned char save_mcr, save_ier;
- unsigned long irqs;
- int irq;
-
- /* forget possible initially masked and pending IRQ */
- probe_irq_off(probe_irq_on());
- save_mcr = serial_inp(mtpt, UART_MCR);
- save_ier = serial_inp(mtpt, UART_IER);
- serial_outp(mtpt, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
-
- irqs = probe_irq_on();
- serial_outp(mtpt, UART_MCR, 0);
- serial_outp(mtpt, UART_MCR,
- UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
-
- serial_outp(mtpt, UART_IER, 0x0f); /* enable all intrs */
- (void)serial_inp(mtpt, UART_LSR);
- (void)serial_inp(mtpt, UART_RX);
- (void)serial_inp(mtpt, UART_IIR);
- (void)serial_inp(mtpt, UART_MSR);
- serial_outp(mtpt, UART_TX, 0xFF);
- irq = probe_irq_off(irqs);
-
- serial_outp(mtpt, UART_MCR, save_mcr);
- serial_outp(mtpt, UART_IER, save_ier);
-
- mtpt->port.irq = (irq > 0) ? irq : 0;
-}
-
-static void multi_stop_tx(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
-
- if (mtpt->ier & UART_IER_THRI) {
- mtpt->ier &= ~UART_IER_THRI;
- serial_out(mtpt, UART_IER, mtpt->ier);
- }
-
- tasklet_schedule(&port->info->tlet);
-}
-
-static void multi_start_tx(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
-
- if (!(mtpt->ier & UART_IER_THRI)) {
- mtpt->ier |= UART_IER_THRI;
- serial_out(mtpt, UART_IER, mtpt->ier);
- }
-}
-
-static void multi_stop_rx(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
-
- mtpt->ier &= ~UART_IER_RLSI;
- mtpt->port.read_status_mask &= ~UART_LSR_DR;
- serial_out(mtpt, UART_IER, mtpt->ier);
-}
-
-static void multi_enable_ms(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
-
- mtpt->ier |= UART_IER_MSI;
- serial_out(mtpt, UART_IER, mtpt->ier);
-}
-
-
-static _INLINE_ void receive_chars(struct mp_port *mtpt, int *status )
-{
- struct tty_struct *tty = mtpt->port.info->tty;
- unsigned char lsr = *status;
- int max_count = 256;
- unsigned char ch;
- char flag;
-
- //lsr &= mtpt->port.read_status_mask;
-
- do {
- if ((lsr & UART_LSR_PE) && (mtpt->port.mdmode & MDMODE_ENABLE))
- {
- ch = serial_inp(mtpt, UART_RX);
- }
- else if (lsr & UART_LSR_SPECIAL)
- {
- flag = 0;
- ch = serial_inp(mtpt, UART_RX);
-
- if (lsr & UART_LSR_BI)
- {
-
- mtpt->port.icount.brk++;
- flag = TTY_BREAK;
-
- if (sb_uart_handle_break(&mtpt->port))
- goto ignore_char;
- }
- if (lsr & UART_LSR_PE)
- {
- mtpt->port.icount.parity++;
- flag = TTY_PARITY;
- }
- if (lsr & UART_LSR_FE)
- {
- mtpt->port.icount.frame++;
- flag = TTY_FRAME;
- }
- if (lsr & UART_LSR_OE)
- {
- mtpt->port.icount.overrun++;
- flag = TTY_OVERRUN;
- }
- tty_insert_flip_char(tty, ch, flag);
- }
- else
- {
- ch = serial_inp(mtpt, UART_RX);
- tty_insert_flip_char(tty, ch, 0);
- }
-ignore_char:
- lsr = serial_inp(mtpt, UART_LSR);
- } while ((lsr & UART_LSR_DR) && (max_count-- > 0));
-
- tty_flip_buffer_push(tty);
-}
-
-
-
-
-static _INLINE_ void transmit_chars(struct mp_port *mtpt)
-{
- struct circ_buf *xmit = &mtpt->port.info->xmit;
- int count;
-
- if (mtpt->port.x_char) {
- serial_outp(mtpt, UART_TX, mtpt->port.x_char);
- mtpt->port.icount.tx++;
- mtpt->port.x_char = 0;
- return;
- }
- if (uart_circ_empty(xmit) || uart_tx_stopped(&mtpt->port)) {
- multi_stop_tx(&mtpt->port);
- return;
- }
-
- count = uart_circ_chars_pending(xmit);
-
- if(count > mtpt->port.fifosize)
- {
- count = mtpt->port.fifosize;
- }
-
- printk("[%d] mdmode: %x\n", mtpt->port.line, mtpt->port.mdmode);
- do {
-#if 0
- /* check multi-drop mode */
- if ((mtpt->port.mdmode & (MDMODE_ENABLE | MDMODE_ADDR)) == (MDMODE_ENABLE | MDMODE_ADDR))
- {
- printk("send address\n");
- /* send multi-drop address */
- serial_out(mtpt, UART_SCR, xmit->buf[xmit->tail]);
- }
- else
-#endif
- {
- serial_out(mtpt, UART_TX, xmit->buf[xmit->tail]);
- }
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- mtpt->port.icount.tx++;
- } while (--count > 0);
-}
-
-
-
-static _INLINE_ void check_modem_status(struct mp_port *mtpt)
-{
- int status;
-
- status = serial_in(mtpt, UART_MSR);
-
- if ((status & UART_MSR_ANY_DELTA) == 0)
- return;
-
- if (status & UART_MSR_TERI)
- mtpt->port.icount.rng++;
- if (status & UART_MSR_DDSR)
- mtpt->port.icount.dsr++;
- if (status & UART_MSR_DDCD)
- sb_uart_handle_dcd_change(&mtpt->port, status & UART_MSR_DCD);
- if (status & UART_MSR_DCTS)
- sb_uart_handle_cts_change(&mtpt->port, status & UART_MSR_CTS);
-
- wake_up_interruptible(&mtpt->port.info->delta_msr_wait);
-}
-
-static inline void multi_handle_port(struct mp_port *mtpt)
-{
- unsigned int status = serial_inp(mtpt, UART_LSR);
-
- //printk("lsr: %x\n", status);
-
- if ((status & UART_LSR_DR) || (status & UART_LSR_SPECIAL))
- receive_chars(mtpt, &status);
- check_modem_status(mtpt);
- if (status & UART_LSR_THRE)
- {
- if ((mtpt->port.type == PORT_16C105X)
- || (mtpt->port.type == PORT_16C105XA))
- transmit_chars(mtpt);
- else
- {
- if (mtpt->interface >= RS485NE)
- uart_set_mctrl(&mtpt->port, TIOCM_RTS);
-
- transmit_chars(mtpt);
-
-
- if (mtpt->interface >= RS485NE)
- {
- while((status=serial_in(mtpt,UART_LSR) &0x60)!=0x60);
- uart_clear_mctrl(&mtpt->port, TIOCM_RTS);
- }
- }
- }
-}
-
-
-
-static irqreturn_t multi_interrupt(int irq, void *dev_id)
-{
- struct irq_info *iinfo = dev_id;
- struct list_head *lhead, *end = NULL;
- int pass_counter = 0;
-
-
- spin_lock(&iinfo->lock);
-
- lhead = iinfo->head;
- do {
- struct mp_port *mtpt;
- unsigned int iir;
-
- mtpt = list_entry(lhead, struct mp_port, list);
-
- iir = serial_in(mtpt, UART_IIR);
- printk("interrupt! port %d, iir 0x%x\n", mtpt->port.line, iir); //wlee
- if (!(iir & UART_IIR_NO_INT))
- {
- printk("interrupt handle\n");
- spin_lock(&mtpt->port.lock);
- multi_handle_port(mtpt);
- spin_unlock(&mtpt->port.lock);
-
- end = NULL;
- } else if (end == NULL)
- end = lhead;
-
- lhead = lhead->next;
- if (lhead == iinfo->head && pass_counter++ > PASS_LIMIT)
- {
- printk(KERN_ERR "multi: too much work for "
- "irq%d\n", irq);
- printk( "multi: too much work for "
- "irq%d\n", irq);
- break;
- }
- } while (lhead != end);
-
- spin_unlock(&iinfo->lock);
-
-
- return IRQ_HANDLED;
-}
-
-static void serial_do_unlink(struct irq_info *i, struct mp_port *mtpt)
-{
- spin_lock_irq(&i->lock);
-
- if (!list_empty(i->head)) {
- if (i->head == &mtpt->list)
- i->head = i->head->next;
- list_del(&mtpt->list);
- } else {
- i->head = NULL;
- }
-
- spin_unlock_irq(&i->lock);
-}
-
-static int serial_link_irq_chain(struct mp_port *mtpt)
-{
- struct irq_info *i = irq_lists + mtpt->port.irq;
- int ret, irq_flags = mtpt->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
- spin_lock_irq(&i->lock);
-
- if (i->head) {
- list_add(&mtpt->list, i->head);
- spin_unlock_irq(&i->lock);
-
- ret = 0;
- } else {
- INIT_LIST_HEAD(&mtpt->list);
- i->head = &mtpt->list;
- spin_unlock_irq(&i->lock);
-
- ret = request_irq(mtpt->port.irq, multi_interrupt,
- irq_flags, "serial", i);
- if (ret < 0)
- serial_do_unlink(i, mtpt);
- }
-
- return ret;
-}
-
-
-
-
-static void serial_unlink_irq_chain(struct mp_port *mtpt)
-{
- struct irq_info *i = irq_lists + mtpt->port.irq;
-
- if (list_empty(i->head))
- {
- free_irq(mtpt->port.irq, i);
- }
- serial_do_unlink(i, mtpt);
-}
-
-static void multi_timeout(unsigned long data)
-{
- struct mp_port *mtpt = (struct mp_port *)data;
-
-
- spin_lock(&mtpt->port.lock);
- multi_handle_port(mtpt);
- spin_unlock(&mtpt->port.lock);
-
- mod_timer(&mtpt->timer, jiffies+1 );
-}
-
-static unsigned int multi_tx_empty(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned long flags;
- unsigned int ret;
-
- spin_lock_irqsave(&mtpt->port.lock, flags);
- ret = serial_in(mtpt, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&mtpt->port.lock, flags);
-
- return ret;
-}
-
-
-static unsigned int multi_get_mctrl(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned char status;
- unsigned int ret;
-
- status = serial_in(mtpt, UART_MSR);
-
- ret = 0;
- if (status & UART_MSR_DCD)
- ret |= TIOCM_CAR;
- if (status & UART_MSR_RI)
- ret |= TIOCM_RNG;
- if (status & UART_MSR_DSR)
- ret |= TIOCM_DSR;
- if (status & UART_MSR_CTS)
- ret |= TIOCM_CTS;
- return ret;
-}
-
-static void multi_set_mctrl(struct sb_uart_port *port, unsigned int mctrl)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned char mcr = 0;
-
- mctrl &= 0xff;
-
- if (mctrl & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
- if (mctrl & TIOCM_DTR)
- mcr |= UART_MCR_DTR;
- if (mctrl & TIOCM_OUT1)
- mcr |= UART_MCR_OUT1;
- if (mctrl & TIOCM_OUT2)
- mcr |= UART_MCR_OUT2;
- if (mctrl & TIOCM_LOOP)
- mcr |= UART_MCR_LOOP;
-
-
- serial_out(mtpt, UART_MCR, mcr);
-}
-
-
-static void multi_break_ctl(struct sb_uart_port *port, int break_state)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned long flags;
-
- spin_lock_irqsave(&mtpt->port.lock, flags);
- if (break_state == -1)
- mtpt->lcr |= UART_LCR_SBC;
- else
- mtpt->lcr &= ~UART_LCR_SBC;
- serial_out(mtpt, UART_LCR, mtpt->lcr);
- spin_unlock_irqrestore(&mtpt->port.lock, flags);
-}
-
-
-
-static int multi_startup(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned long flags;
- int retval;
-
- mtpt->capabilities = uart_config[mtpt->port.type].flags;
- mtpt->mcr = 0;
-
- if (mtpt->capabilities & UART_CLEAR_FIFO) {
- serial_outp(mtpt, UART_FCR, UART_FCR_ENABLE_FIFO);
- serial_outp(mtpt, UART_FCR, UART_FCR_ENABLE_FIFO |
- UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
- serial_outp(mtpt, UART_FCR, 0);
- }
-
- (void) serial_inp(mtpt, UART_LSR);
- (void) serial_inp(mtpt, UART_RX);
- (void) serial_inp(mtpt, UART_IIR);
- (void) serial_inp(mtpt, UART_MSR);
- //test-wlee 9-bit disable
- serial_outp(mtpt, UART_MSR, 0);
-
-
- if (!(mtpt->port.flags & UPF_BUGGY_UART) &&
- (serial_inp(mtpt, UART_LSR) == 0xff)) {
- printk("ttyS%d: LSR safety check engaged!\n", mtpt->port.line);
- //return -ENODEV;
- }
-
- if ((!is_real_interrupt(mtpt->port.irq)) || (mtpt->poll_type==TYPE_POLL)) {
- unsigned int timeout = mtpt->port.timeout;
-
- timeout = timeout > 6 ? (timeout / 2 - 2) : 1;
-
- mtpt->timer.data = (unsigned long)mtpt;
- mod_timer(&mtpt->timer, jiffies + timeout);
- }
- else
- {
- retval = serial_link_irq_chain(mtpt);
- if (retval)
- return retval;
- }
-
- serial_outp(mtpt, UART_LCR, UART_LCR_WLEN8);
-
- spin_lock_irqsave(&mtpt->port.lock, flags);
- if ((is_real_interrupt(mtpt->port.irq))||(mtpt->poll_type==TYPE_INTERRUPT))
- mtpt->port.mctrl |= TIOCM_OUT2;
-
- multi_set_mctrl(&mtpt->port, mtpt->port.mctrl);
- spin_unlock_irqrestore(&mtpt->port.lock, flags);
-
-
- mtpt->ier = UART_IER_RLSI | UART_IER_RDI;
- serial_outp(mtpt, UART_IER, mtpt->ier);
-
- (void) serial_inp(mtpt, UART_LSR);
- (void) serial_inp(mtpt, UART_RX);
- (void) serial_inp(mtpt, UART_IIR);
- (void) serial_inp(mtpt, UART_MSR);
-
- return 0;
-}
-
-
-
-static void multi_shutdown(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned long flags;
-
-
- mtpt->ier = 0;
- serial_outp(mtpt, UART_IER, 0);
-
- spin_lock_irqsave(&mtpt->port.lock, flags);
- mtpt->port.mctrl &= ~TIOCM_OUT2;
-
- multi_set_mctrl(&mtpt->port, mtpt->port.mctrl);
- spin_unlock_irqrestore(&mtpt->port.lock, flags);
-
- serial_out(mtpt, UART_LCR, serial_inp(mtpt, UART_LCR) & ~UART_LCR_SBC);
- serial_outp(mtpt, UART_FCR, UART_FCR_ENABLE_FIFO |
- UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT);
- serial_outp(mtpt, UART_FCR, 0);
-
-
- (void) serial_in(mtpt, UART_RX);
-
- if ((!is_real_interrupt(mtpt->port.irq))||(mtpt->poll_type==TYPE_POLL))
- {
- del_timer_sync(&mtpt->timer);
- }
- else
- {
- serial_unlink_irq_chain(mtpt);
- }
-}
-
-
-
-static unsigned int multi_get_divisor(struct sb_uart_port *port, unsigned int baud)
-{
- unsigned int quot;
-
- if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
- baud == (port->uartclk/4))
- quot = 0x8001;
- else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
- baud == (port->uartclk/8))
- quot = 0x8002;
- else
- quot = sb_uart_get_divisor(port, baud);
-
- return quot;
-}
-
-
-
-
-static void multi_set_termios(struct sb_uart_port *port, struct MP_TERMIOS *termios, struct MP_TERMIOS *old)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned char cval, fcr = 0;
- unsigned long flags;
- unsigned int baud, quot;
-
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- cval = 0x00;
- break;
- case CS6:
- cval = 0x01;
- break;
- case CS7:
- cval = 0x02;
- break;
- default:
- case CS8:
- cval = 0x03;
- break;
- }
-
- if (termios->c_cflag & CSTOPB)
- cval |= 0x04;
- if (termios->c_cflag & PARENB)
- cval |= UART_LCR_PARITY;
- if (!(termios->c_cflag & PARODD))
- cval |= UART_LCR_EPAR;
-
-#ifdef CMSPAR
- if (termios->c_cflag & CMSPAR)
- cval |= UART_LCR_SPAR;
-#endif
-
- baud = sb_uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
- quot = multi_get_divisor(port, baud);
-
- if (mtpt->capabilities & UART_USE_FIFO) {
- //if (baud < 2400)
- // fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
- //else
- // fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_8;
-
- // fcr = UART_FCR_ENABLE_FIFO | 0x90;
- fcr = fcr_arr[mtpt->port.line];
- }
-
- spin_lock_irqsave(&mtpt->port.lock, flags);
-
- sb_uart_update_timeout(port, termios->c_cflag, baud);
-
- mtpt->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
- if (termios->c_iflag & INPCK)
- mtpt->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (termios->c_iflag & (BRKINT | PARMRK))
- mtpt->port.read_status_mask |= UART_LSR_BI;
-
- mtpt->port.ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- mtpt->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
- if (termios->c_iflag & IGNBRK) {
- mtpt->port.ignore_status_mask |= UART_LSR_BI;
- if (termios->c_iflag & IGNPAR)
- mtpt->port.ignore_status_mask |= UART_LSR_OE;
- }
-
- if ((termios->c_cflag & CREAD) == 0)
- mtpt->port.ignore_status_mask |= UART_LSR_DR;
-
- mtpt->ier &= ~UART_IER_MSI;
- if (UART_ENABLE_MS(&mtpt->port, termios->c_cflag))
- mtpt->ier |= UART_IER_MSI;
-
- serial_out(mtpt, UART_IER, mtpt->ier);
-
- if (mtpt->capabilities & UART_STARTECH) {
- serial_outp(mtpt, UART_LCR, 0xBF);
- serial_outp(mtpt, UART_EFR,
- termios->c_cflag & CRTSCTS ? UART_EFR_CTS :0);
- }
-
- serial_outp(mtpt, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
-
- serial_outp(mtpt, UART_DLL, quot & 0xff); /* LS of divisor */
- serial_outp(mtpt, UART_DLM, quot >> 8); /* MS of divisor */
-
- serial_outp(mtpt, UART_LCR, cval); /* reset DLAB */
- mtpt->lcr = cval; /* Save LCR */
-
- if (fcr & UART_FCR_ENABLE_FIFO) {
- /* emulated UARTs (Lucent Venus 167x) need two steps */
- serial_outp(mtpt, UART_FCR, UART_FCR_ENABLE_FIFO);
- }
-
- serial_outp(mtpt, UART_FCR, fcr); /* set fcr */
-
-
- if ((mtpt->port.type == PORT_16C105X)
- || (mtpt->port.type == PORT_16C105XA))
- {
- if(deep[mtpt->port.line]!=0)
- set_deep_fifo(port, ENABLE);
-
- if (mtpt->interface != RS232)
- set_auto_rts(port,mtpt->interface);
-
- }
- else
- {
- if (mtpt->interface >= RS485NE)
- {
- uart_clear_mctrl(&mtpt->port, TIOCM_RTS);
- }
- }
-
- if(mtpt->device->device_id == PCI_DEVICE_ID_MP4M)
- {
- SendATCommand(mtpt);
- printk("SendATCommand\n");
- }
- multi_set_mctrl(&mtpt->port, mtpt->port.mctrl);
- spin_unlock_irqrestore(&mtpt->port.lock, flags);
-}
-
-static void multi_pm(struct sb_uart_port *port, unsigned int state, unsigned int oldstate)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- if (state) {
- if (mtpt->capabilities & UART_STARTECH) {
- serial_outp(mtpt, UART_LCR, 0xBF);
- serial_outp(mtpt, UART_EFR, UART_EFR_ECB);
- serial_outp(mtpt, UART_LCR, 0);
- serial_outp(mtpt, UART_IER, UART_IERX_SLEEP);
- serial_outp(mtpt, UART_LCR, 0xBF);
- serial_outp(mtpt, UART_EFR, 0);
- serial_outp(mtpt, UART_LCR, 0);
- }
-
- if (mtpt->pm)
- mtpt->pm(port, state, oldstate);
- }
- else
- {
- if (mtpt->capabilities & UART_STARTECH) {
- serial_outp(mtpt, UART_LCR, 0xBF);
- serial_outp(mtpt, UART_EFR, UART_EFR_ECB);
- serial_outp(mtpt, UART_LCR, 0);
- serial_outp(mtpt, UART_IER, 0);
- serial_outp(mtpt, UART_LCR, 0xBF);
- serial_outp(mtpt, UART_EFR, 0);
- serial_outp(mtpt, UART_LCR, 0);
- }
-
- if (mtpt->pm)
- mtpt->pm(port, state, oldstate);
- }
-}
-
-static void multi_release_std_resource(struct mp_port *mtpt)
-{
- unsigned int size = 8 << mtpt->port.regshift;
-
- switch (mtpt->port.iotype) {
- case UPIO_MEM:
- if (!mtpt->port.mapbase)
- break;
-
- if (mtpt->port.flags & UPF_IOREMAP) {
- iounmap(mtpt->port.membase);
- mtpt->port.membase = NULL;
- }
-
- release_mem_region(mtpt->port.mapbase, size);
- break;
-
- case UPIO_HUB6:
- case UPIO_PORT:
- release_region(mtpt->port.iobase,size);
- break;
- }
-}
-
-static void multi_release_port(struct sb_uart_port *port)
-{
-}
-
-static int multi_request_port(struct sb_uart_port *port)
-{
- return 0;
-}
-
-static void multi_config_port(struct sb_uart_port *port, int flags)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- int probeflags = PROBE_ANY;
-
- if (flags & UART_CONFIG_TYPE)
- autoconfig(mtpt, probeflags);
- if (mtpt->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
- autoconfig_irq(mtpt);
-
- if (mtpt->port.type == PORT_UNKNOWN)
- multi_release_std_resource(mtpt);
-}
-
-static int multi_verify_port(struct sb_uart_port *port, struct serial_struct *ser)
-{
- if (ser->irq >= NR_IRQS || ser->irq < 0 ||
- ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
- ser->type == PORT_STARTECH)
- return -EINVAL;
- return 0;
-}
-
-static const char *multi_type(struct sb_uart_port *port)
-{
- int type = port->type;
-
- if (type >= ARRAY_SIZE(uart_config))
- type = 0;
- return uart_config[type].name;
-}
-
-static struct sb_uart_ops multi_pops = {
- .tx_empty = multi_tx_empty,
- .set_mctrl = multi_set_mctrl,
- .get_mctrl = multi_get_mctrl,
- .stop_tx = multi_stop_tx,
- .start_tx = multi_start_tx,
- .stop_rx = multi_stop_rx,
- .enable_ms = multi_enable_ms,
- .break_ctl = multi_break_ctl,
- .startup = multi_startup,
- .shutdown = multi_shutdown,
- .set_termios = multi_set_termios,
- .pm = multi_pm,
- .type = multi_type,
- .release_port = multi_release_port,
- .request_port = multi_request_port,
- .config_port = multi_config_port,
- .verify_port = multi_verify_port,
-};
-
-static struct uart_driver multi_reg = {
- .owner = THIS_MODULE,
- .driver_name = "goldel_tulip",
- .dev_name = "ttyMP",
- .major = SB_TTY_MP_MAJOR,
- .minor = 0,
- .nr = MAX_MP_PORT,
- .cons = NULL,
-};
-
-static void __init multi_init_ports(void)
-{
- struct mp_port *mtpt;
- static int first = 1;
- int i,j,k;
- unsigned char osc;
- unsigned char b_ret = 0;
- static struct mp_device_t *sbdev;
-
- if (!first)
- return;
- first = 0;
-
- mtpt = multi_ports;
-
- for (k=0;k<NR_BOARD;k++)
- {
- sbdev = &mp_devs[k];
-
- for (i = 0; i < sbdev->nr_ports; i++, mtpt++)
- {
- mtpt->device = sbdev;
- mtpt->port.iobase = sbdev->uart_access_addr + 8*i;
- mtpt->port.irq = sbdev->irq;
- if ( ((sbdev->device_id == PCI_DEVICE_ID_MP4)&&(sbdev->revision==0x91)))
- mtpt->interface_config_addr = sbdev->option_reg_addr + 0x08 + i;
- else if (sbdev->revision == 0xc0)
- mtpt->interface_config_addr = sbdev->option_reg_addr + 0x08 + (i & 0x1);
- else
- mtpt->interface_config_addr = sbdev->option_reg_addr + 0x08 + i/8;
-
- mtpt->option_base_addr = sbdev->option_reg_addr;
-
- mtpt->poll_type = sbdev->poll_type;
-
- mtpt->port.uartclk = BASE_BAUD * 16;
-
- /* get input clock information */
- osc = inb(sbdev->option_reg_addr + MP_OPTR_DIR0 + i/8) & 0x0F;
- if (osc==0x0f)
- osc = 0;
- for(j=0;j<osc;j++)
- mtpt->port.uartclk *= 2;
- mtpt->port.flags |= STD_COM_FLAGS | UPF_SHARE_IRQ ;
- mtpt->port.iotype = UPIO_PORT;
- mtpt->port.ops = &multi_pops;
-
- if (sbdev->revision == 0xc0)
- {
- /* for SB16C1053APCI */
- b_ret = sb1053a_get_interface(mtpt, i);
- }
- else
- {
- b_ret = read_option_register(mtpt,(MP_OPTR_IIR0 + i/8));
- printk("IIR_RET = %x\n",b_ret);
- }
-
- /* default to RS232 */
- mtpt->interface = RS232;
- if (IIR_RS422 == (b_ret & IIR_TYPE_MASK))
- mtpt->interface = RS422PTP;
- if (IIR_RS485 == (b_ret & IIR_TYPE_MASK))
- mtpt->interface = RS485NE;
- }
- }
-}
-
-static void __init multi_register_ports(struct uart_driver *drv)
-{
- int i;
-
- multi_init_ports();
-
- for (i = 0; i < NR_PORTS; i++) {
- struct mp_port *mtpt = &multi_ports[i];
-
- mtpt->port.line = i;
- mtpt->port.ops = &multi_pops;
- init_timer(&mtpt->timer);
- mtpt->timer.function = multi_timeout;
- mp_add_one_port(drv, &mtpt->port);
- }
-}
-
-/**
- * pci_remap_base - remap BAR value of pci device
- *
- * PARAMETERS
- * pcidev - pci_dev structure address
- * offset - BAR offset PCI_BASE_ADDRESS_0 ~ PCI_BASE_ADDRESS_4
- * address - address to be changed BAR value
- * size - size of address space
- *
- * RETURNS
- * If this function performs successful, it returns 0. Otherwise, It returns -1.
- */
-static int pci_remap_base(struct pci_dev *pcidev, unsigned int offset,
- unsigned int address, unsigned int size)
-{
-#if 0
- struct resource *root;
- unsigned index = (offset - 0x10) >> 2;
-#endif
-
- pci_write_config_dword(pcidev, offset, address);
-#if 0
- root = pcidev->resource[index].parent;
- release_resource(&pcidev->resource[index]);
- address &= ~0x1;
- pcidev->resource[index].start = address;
- pcidev->resource[index].end = address + size - 1;
-
- if (request_resource(root, &pcidev->resource[index]) != NULL)
- {
- printk(KERN_ERR "pci remap conflict!! 0x%x\n", address);
- return (-1);
- }
-#endif
-
- return (0);
-}
-
-static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)
-{
- static struct mp_device_t *sbdev = mp_devs;
- unsigned long addr = 0;
- int j;
- struct resource *ret = NULL;
-
- sbdev->device_id = brd.device_id;
- pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &(sbdev->revision));
- sbdev->name = brd.name;
- sbdev->uart_access_addr = pcidev->resource[0].start & PCI_BASE_ADDRESS_IO_MASK;
-
- /* check revision. The SB16C1053APCI's option i/o address is BAR4 */
- if (sbdev->revision == 0xc0)
- {
- /* SB16C1053APCI */
- sbdev->option_reg_addr = pcidev->resource[4].start & PCI_BASE_ADDRESS_IO_MASK;
- }
- else
- {
- sbdev->option_reg_addr = pcidev->resource[1].start & PCI_BASE_ADDRESS_IO_MASK;
- }
-#if 1
- if (sbdev->revision == 0xc0)
- {
- outb(0x00, sbdev->option_reg_addr + MP_OPTR_GPOCR);
- inb(sbdev->option_reg_addr + MP_OPTR_GPOCR);
- outb(0x83, sbdev->option_reg_addr + MP_OPTR_GPOCR);
- }
-#endif
-
- sbdev->irq = pcidev->irq;
-
- if ((brd.device_id & 0x0800) || !(brd.device_id &0xff00))
- {
- sbdev->poll_type = TYPE_INTERRUPT;
- }
- else
- {
- sbdev->poll_type = TYPE_POLL;
- }
-
- /* codes which is specific to each board*/
- switch(brd.device_id){
- case PCI_DEVICE_ID_MP1 :
- case PCIE_DEVICE_ID_MP1 :
- case PCIE_DEVICE_ID_MP1E :
- case PCIE_DEVICE_ID_GT_MP1 :
- sbdev->nr_ports = 1;
- break;
- case PCI_DEVICE_ID_MP2 :
- case PCIE_DEVICE_ID_MP2 :
- case PCIE_DEVICE_ID_GT_MP2 :
- case PCIE_DEVICE_ID_MP2B :
- case PCIE_DEVICE_ID_MP2E :
- sbdev->nr_ports = 2;
-
- /* serial base address remap */
- if (sbdev->revision == 0xc0)
- {
- int prev_port_addr = 0;
-
- pci_read_config_dword(pcidev, PCI_BASE_ADDRESS_0, &prev_port_addr);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_1, prev_port_addr + 8, 8);
- }
- break;
- case PCI_DEVICE_ID_MP4 :
- case PCI_DEVICE_ID_MP4A :
- case PCIE_DEVICE_ID_MP4 :
- case PCI_DEVICE_ID_GT_MP4 :
- case PCI_DEVICE_ID_GT_MP4A :
- case PCIE_DEVICE_ID_GT_MP4 :
- case PCI_DEVICE_ID_MP4M :
- case PCIE_DEVICE_ID_MP4B :
- sbdev->nr_ports = 4;
-
- if(sbdev->revision == 0x91){
- sbdev->reserved_addr[0] = pcidev->resource[0].start & PCI_BASE_ADDRESS_IO_MASK;
- outb(0x03 , sbdev->reserved_addr[0] + 0x01);
- outb(0x03 , sbdev->reserved_addr[0] + 0x02);
- outb(0x01 , sbdev->reserved_addr[0] + 0x20);
- outb(0x00 , sbdev->reserved_addr[0] + 0x21);
- request_region(sbdev->reserved_addr[0], 32, sbdev->name);
- sbdev->uart_access_addr = pcidev->resource[1].start & PCI_BASE_ADDRESS_IO_MASK;
- sbdev->option_reg_addr = pcidev->resource[2].start & PCI_BASE_ADDRESS_IO_MASK;
- }
-
- /* SB16C1053APCI */
- if (sbdev->revision == 0xc0)
- {
- int prev_port_addr = 0;
-
- pci_read_config_dword(pcidev, PCI_BASE_ADDRESS_0, &prev_port_addr);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_1, prev_port_addr + 8, 8);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_2, prev_port_addr + 16, 8);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_3, prev_port_addr + 24, 8);
- }
- break;
- case PCI_DEVICE_ID_MP6 :
- case PCI_DEVICE_ID_MP6A :
- case PCI_DEVICE_ID_GT_MP6 :
- case PCI_DEVICE_ID_GT_MP6A :
- sbdev->nr_ports = 6;
-
- /* SB16C1053APCI */
- if (sbdev->revision == 0xc0)
- {
- int prev_port_addr = 0;
-
- pci_read_config_dword(pcidev, PCI_BASE_ADDRESS_0, &prev_port_addr);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_1, prev_port_addr + 8, 8);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_2, prev_port_addr + 16, 16);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_3, prev_port_addr + 32, 16);
- }
- break;
- case PCI_DEVICE_ID_MP8 :
- case PCIE_DEVICE_ID_MP8 :
- case PCI_DEVICE_ID_GT_MP8 :
- case PCIE_DEVICE_ID_GT_MP8 :
- case PCIE_DEVICE_ID_MP8B :
- sbdev->nr_ports = 8;
- break;
- case PCI_DEVICE_ID_MP32 :
- case PCIE_DEVICE_ID_MP32 :
- case PCI_DEVICE_ID_GT_MP32 :
- case PCIE_DEVICE_ID_GT_MP32 :
- {
- int portnum_hex=0;
- portnum_hex = inb(sbdev->option_reg_addr);
- sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16);
- }
- break;
-#ifdef CONFIG_PARPORT_PC
- case PCI_DEVICE_ID_MP2S1P :
- sbdev->nr_ports = 2;
-
- /* SB16C1053APCI */
- if (sbdev->revision == 0xc0)
- {
- int prev_port_addr = 0;
-
- pci_read_config_dword(pcidev, PCI_BASE_ADDRESS_0, &prev_port_addr);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_1, prev_port_addr + 8, 8);
- }
-
- /* add PC compatible parallel port */
- parport_pc_probe_port(pcidev->resource[2].start, pcidev->resource[3].start, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, &pcidev->dev, 0);
- break;
- case PCI_DEVICE_ID_MP1P :
- /* add PC compatible parallel port */
- parport_pc_probe_port(pcidev->resource[2].start, pcidev->resource[3].start, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, &pcidev->dev, 0);
- break;
-#endif
- }
-
- ret = request_region(sbdev->uart_access_addr, (8*sbdev->nr_ports), sbdev->name);
-
- if (sbdev->revision == 0xc0)
- {
- ret = request_region(sbdev->option_reg_addr, 0x40, sbdev->name);
- }
- else
- {
- ret = request_region(sbdev->option_reg_addr, 0x20, sbdev->name);
- }
-
-
- NR_BOARD++;
- NR_PORTS += sbdev->nr_ports;
-
- /* Enable PCI interrupt */
- addr = sbdev->option_reg_addr + MP_OPTR_IMR0;
- for(j=0; j < (sbdev->nr_ports/8)+1; j++)
- {
- if (sbdev->poll_type == TYPE_INTERRUPT)
- {
- outb(0xff,addr +j);
- }
- }
- sbdev++;
-
- return 0;
-}
-
-static int __init multi_init(void)
-{
- int ret, i;
- struct pci_dev *dev = NULL;
-
- if(fcr_count==0)
- {
- for(i=0;i<256;i++)
- {
- fcr_arr[i] = 0x01;
-
- }
- }
- if(deep_count==0)
- {
- for(i=0;i<256;i++)
- {
- deep[i] = 1;
-
- }
- }
- if(rtr_count==0)
- {
- for(i=0;i<256;i++)
- {
- rtr[i] = 0x10;
- }
- }
- if(ttr_count==0)
- {
- for(i=0;i<256;i++)
- {
- ttr[i] = 0x38;
- }
- }
-
-
-printk("MULTI INIT\n");
- for( i=0; i< mp_nrpcibrds; i++)
- {
-
- while( (dev = pci_get_device(mp_pciboards[i].vendor_id, mp_pciboards[i].device_id, dev) ) )
-
- {
-printk("FOUND~~~\n");
-// Cent OS bug fix
-// if (mp_pciboards[i].device_id & 0x0800)
- {
- int status;
- pci_disable_device(dev);
- status = pci_enable_device(dev);
-
- if (status != 0)
- {
- printk("Multiport Board Enable Fail !\n\n");
- status = -ENXIO;
- return status;
- }
- }
-
- init_mp_dev(dev, mp_pciboards[i]);
- }
- }
-
- for (i = 0; i < NR_IRQS; i++)
- spin_lock_init(&irq_lists[i].lock);
-
- ret = mp_register_driver(&multi_reg);
-
- if (ret >= 0)
- multi_register_ports(&multi_reg);
-
- return ret;
-}
-
-static void __exit multi_exit(void)
-{
- int i;
-
- for (i = 0; i < NR_PORTS; i++)
- mp_remove_one_port(&multi_reg, &multi_ports[i].port);
-
- mp_unregister_driver(&multi_reg);
-}
-
-module_init(multi_init);
-module_exit(multi_exit);
-
-MODULE_DESCRIPTION("SystemBase Multiport PCI/PCIe CORE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sb105x/sb_pci_mp.h b/drivers/staging/sb105x/sb_pci_mp.h
deleted file mode 100644
index 80ae4ab04603..000000000000
--- a/drivers/staging/sb105x/sb_pci_mp.h
+++ /dev/null
@@ -1,291 +0,0 @@
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/wait.h>
-#include <linux/tty_driver.h>
-#include <linux/pci.h>
-#include <linux/circ_buf.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/serial.h>
-#include <linux/interrupt.h>
-
-
-#include <linux/parport.h>
-#include <linux/ctype.h>
-#include <linux/poll.h>
-
-
-#define MP_TERMIOS ktermios
-
-#include "sb_mp_register.h"
-#include "sb_ser_core.h"
-
-#define DRIVER_VERSION "1.1"
-#define DRIVER_DATE "2012/01/05"
-#define DRIVER_AUTHOR "SYSTEMBASE<tech@sysbas.com>"
-#define DRIVER_DESC "SystemBase PCI/PCIe Multiport Core"
-
-#define SB_TTY_MP_MAJOR 54
-#define PCI_VENDOR_ID_MULTIPORT 0x14A1
-
-#define PCI_DEVICE_ID_MP1 0x4d01
-#define PCI_DEVICE_ID_MP2 0x4d02
-#define PCI_DEVICE_ID_MP4 0x4d04
-#define PCI_DEVICE_ID_MP4A 0x4d54
-#define PCI_DEVICE_ID_MP6 0x4d06
-#define PCI_DEVICE_ID_MP6A 0x4d56
-#define PCI_DEVICE_ID_MP8 0x4d08
-#define PCI_DEVICE_ID_MP32 0x4d32
-/* Parallel port */
-#define PCI_DEVICE_ID_MP1P 0x4301
-#define PCI_DEVICE_ID_MP2S1P 0x4303
-
-#define PCIE_DEVICE_ID_MP1 0x4501
-#define PCIE_DEVICE_ID_MP2 0x4502
-#define PCIE_DEVICE_ID_MP4 0x4504
-#define PCIE_DEVICE_ID_MP8 0x4508
-#define PCIE_DEVICE_ID_MP32 0x4532
-
-#define PCIE_DEVICE_ID_MP1E 0x4e01
-#define PCIE_DEVICE_ID_MP2E 0x4e02
-#define PCIE_DEVICE_ID_MP2B 0x4b02
-#define PCIE_DEVICE_ID_MP4B 0x4b04
-#define PCIE_DEVICE_ID_MP8B 0x4b08
-
-#define PCI_DEVICE_ID_GT_MP4 0x0004
-#define PCI_DEVICE_ID_GT_MP4A 0x0054
-#define PCI_DEVICE_ID_GT_MP6 0x0006
-#define PCI_DEVICE_ID_GT_MP6A 0x0056
-#define PCI_DEVICE_ID_GT_MP8 0x0008
-#define PCI_DEVICE_ID_GT_MP32 0x0032
-
-#define PCIE_DEVICE_ID_GT_MP1 0x1501
-#define PCIE_DEVICE_ID_GT_MP2 0x1502
-#define PCIE_DEVICE_ID_GT_MP4 0x1504
-#define PCIE_DEVICE_ID_GT_MP8 0x1508
-#define PCIE_DEVICE_ID_GT_MP32 0x1532
-
-#define PCI_DEVICE_ID_MP4M 0x4604 //modem
-
-#define MAX_MP_DEV 8
-#define BD_MAX_PORT 32 /* Max serial port in one board */
-#define MAX_MP_PORT 256 /* Max serial port in one PC */
-
-#define PORT_16C105XA 3
-#define PORT_16C105X 2
-#define PORT_16C55X 1
-
-#define ENABLE 1
-#define DISABLE 0
-
-/* ioctls */
-#define TIOCGNUMOFPORT 0x545F
-#define TIOCSMULTIECHO 0x5440
-#define TIOCSPTPNOECHO 0x5441
-
-#define TIOCGOPTIONREG 0x5461
-#define TIOCGDISABLEIRQ 0x5462
-#define TIOCGENABLEIRQ 0x5463
-#define TIOCGSOFTRESET 0x5464
-#define TIOCGSOFTRESETR 0x5465
-#define TIOCGREGINFO 0x5466
-#define TIOCGGETLSR 0x5467
-#define TIOCGGETDEVID 0x5468
-#define TIOCGGETBDNO 0x5469
-#define TIOCGGETINTERFACE 0x546A
-#define TIOCGGETREV 0x546B
-#define TIOCGGETNRPORTS 0x546C
-#define TIOCGGETPORTTYPE 0x546D
-#define GETDEEPFIFO 0x54AA
-#define SETDEEPFIFO 0x54AB
-#define SETFCR 0x54BA
-#define SETTTR 0x54B1
-#define SETRTR 0x54B2
-#define GETTTR 0x54B3
-#define GETRTR 0x54B4
-
-/* multi-drop mode related ioctl commands */
-#define TIOCSMULTIDROP 0x5470
-#define TIOCSMDADDR 0x5471
-#define TIOCGMDADDR 0x5472
-#define TIOCSENDADDR 0x5473
-
-
-/* serial interface */
-#define RS232 1
-#define RS422PTP 2
-#define RS422MD 3
-#define RS485NE 4
-#define RS485ECHO 5
-
-#define serial_inp(up, offset) serial_in(up, offset)
-#define serial_outp(up, offset, value) serial_out(up, offset, value)
-
-#define PASS_LIMIT 256
-#define is_real_interrupt(irq) ((irq) != 0)
-
-#define PROBE_ANY (~0)
-
-static DEFINE_MUTEX(mp_mutex);
-#define MP_MUTEX_LOCK(x) mutex_lock(&(x))
-#define MP_MUTEX_UNLOCK(x) mutex_unlock(&(x))
-#define MP_STATE_LOCK(x) mutex_lock(&((x)->mutex))
-#define MP_STATE_UNLOCK(x) mutex_unlock(&((x)->mutex))
-
-
-#define UART_LSR_SPECIAL 0x1E
-
-#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
-#define uart_users(state) ((state)->count + ((state)->info ? (state)->info->blocked_open : 0))
-
-
-//#define MP_DEBUG 1
-#undef MP_DEBUG
-
-#ifdef MP_DEBUG
-#define DPRINTK(x...) printk(x)
-#else
-#define DPRINTK(x...) do { } while (0)
-#endif
-
-#ifdef MP_DEBUG
-#define DEBUG_AUTOCONF(fmt...) printk(fmt)
-#else
-#define DEBUG_AUTOCONF(fmt...) do { } while (0)
-#endif
-
-#ifdef MP_DEBUG
-#define DEBUG_INTR(fmt...) printk(fmt)
-#else
-#define DEBUG_INTR(fmt...) do { } while (0)
-#endif
-
-#if defined(__i386__) && defined(CONFIG_M486)
-#define SERIAL_INLINE
-#endif
-#ifdef SERIAL_INLINE
-#define _INLINE_ inline
-#else
-#define _INLINE_
-#endif
-
-#define TYPE_POLL 1
-#define TYPE_INTERRUPT 2
-
-
-struct mp_device_t {
- unsigned short device_id;
- unsigned char revision;
- char *name;
- unsigned long uart_access_addr;
- unsigned long option_reg_addr;
- unsigned long reserved_addr[4];
- int irq;
- int nr_ports;
- int poll_type;
-};
-
-typedef struct mppcibrd {
- char *name;
- unsigned short vendor_id;
- unsigned short device_id;
-} mppcibrd_t;
-
-static mppcibrd_t mp_pciboards[] = {
-
- { "Multi-1 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP1} ,
- { "Multi-2 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP2} ,
- { "Multi-4 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP4} ,
- { "Multi-4 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP4A} ,
- { "Multi-6 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP6} ,
- { "Multi-6 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP6A} ,
- { "Multi-8 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP8} ,
- { "Multi-32 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP32} ,
-
- { "Multi-1P PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP1P} ,
- { "Multi-2S1P PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP2S1P} ,
-
- { "Multi-4(GT) PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_GT_MP4} ,
- { "Multi-4(GT) PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_GT_MP4A} ,
- { "Multi-6(GT) PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_GT_MP6} ,
- { "Multi-6(GT) PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_GT_MP6A} ,
- { "Multi-8(GT) PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_GT_MP8} ,
- { "Multi-32(GT) PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_GT_MP32} ,
-
- { "Multi-1 PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP1} ,
- { "Multi-2 PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP2} ,
- { "Multi-4 PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP4} ,
- { "Multi-8 PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP8} ,
- { "Multi-32 PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP32} ,
-
- { "Multi-1 PCIe E", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP1E} ,
- { "Multi-2 PCIe E", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP2E} ,
- { "Multi-2 PCIe B", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP2B} ,
- { "Multi-4 PCIe B", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP4B} ,
- { "Multi-8 PCIe B", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP8B} ,
-
- { "Multi-1(GT) PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_GT_MP1} ,
- { "Multi-2(GT) PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_GT_MP2} ,
- { "Multi-4(GT) PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_GT_MP4} ,
- { "Multi-8(GT) PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_GT_MP8} ,
- { "Multi-32(GT) PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_GT_MP32} ,
-
- { "Multi-4M PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP4M} ,
-};
-
-struct mp_port {
- struct sb_uart_port port;
-
- struct timer_list timer; /* "no irq" timer */
- struct list_head list; /* ports on this IRQ */
- unsigned int capabilities; /* port capabilities */
- unsigned short rev;
- unsigned char acr;
- unsigned char ier;
- unsigned char lcr;
- unsigned char mcr;
- unsigned char mcr_mask; /* mask of user bits */
- unsigned char mcr_force; /* mask of forced bits */
- unsigned char lsr_break_flag;
-
- void (*pm)(struct sb_uart_port *port,
- unsigned int state, unsigned int old);
- struct mp_device_t *device;
- unsigned long interface_config_addr;
- unsigned long option_base_addr;
- unsigned char interface;
- unsigned char poll_type;
-};
-
-struct irq_info {
- spinlock_t lock;
- struct list_head *head;
-};
-
-struct sb105x_uart_config {
- char *name;
- int dfl_xmit_fifo_size;
- int flags;
-};
-
-static const struct sb105x_uart_config uart_config[] = {
- { "unknown", 1, 0 },
- { "16550A", 16, UART_CLEAR_FIFO | UART_USE_FIFO },
- { "SB16C1050", 128, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH },
- { "SB16C1050A", 128, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH },
-};
-
-
-
diff --git a/drivers/staging/sb105x/sb_ser_core.h b/drivers/staging/sb105x/sb_ser_core.h
deleted file mode 100644
index c8fb99173299..000000000000
--- a/drivers/staging/sb105x/sb_ser_core.h
+++ /dev/null
@@ -1,368 +0,0 @@
-#include <linux/wait.h>
-
-#define UART_CONFIG_TYPE (1 << 0)
-#define UART_CONFIG_IRQ (1 << 1)
-#define UPIO_PORT (0)
-#define UPIO_HUB6 (1)
-#define UPIO_MEM (2)
-#define UPIO_MEM32 (3)
-#define UPIO_AU (4) /* Au1x00 type IO */
-#define UPIO_TSI (5) /* Tsi108/109 type IO */
-#define UPF_FOURPORT (1 << 1)
-#define UPF_SAK (1 << 2)
-#define UPF_SPD_MASK (0x1030)
-#define UPF_SPD_HI (0x0010)
-#define UPF_SPD_VHI (0x0020)
-#define UPF_SPD_CUST (0x0030)
-#define UPF_SPD_SHI (0x1000)
-#define UPF_SPD_WARP (0x1010)
-#define UPF_SKIP_TEST (1 << 6)
-#define UPF_AUTO_IRQ (1 << 7)
-#define UPF_HARDPPS_CD (1 << 11)
-#define UPF_LOW_LATENCY (1 << 13)
-#define UPF_BUGGY_UART (1 << 14)
-#define UPF_MAGIC_MULTIPLIER (1 << 16)
-#define UPF_CONS_FLOW (1 << 23)
-#define UPF_SHARE_IRQ (1 << 24)
-#define UPF_BOOT_AUTOCONF (1 << 28)
-#define UPF_DEAD (1 << 30)
-#define UPF_IOREMAP (1 << 31)
-#define UPF_CHANGE_MASK (0x17fff)
-#define UPF_USR_MASK (UPF_SPD_MASK|UPF_LOW_LATENCY)
-#define USF_CLOSING_WAIT_INF (0)
-#define USF_CLOSING_WAIT_NONE (~0U)
-
-#define UART_XMIT_SIZE PAGE_SIZE
-
-#define UIF_CHECK_CD (1 << 25)
-#define UIF_CTS_FLOW (1 << 26)
-#define UIF_NORMAL_ACTIVE (1 << 29)
-#define UIF_INITIALIZED (1 << 31)
-#define UIF_SUSPENDED (1 << 30)
-
-#define WAKEUP_CHARS 256
-
-#define uart_circ_empty(circ) ((circ)->head == (circ)->tail)
-#define uart_circ_clear(circ) ((circ)->head = (circ)->tail = 0)
-
-#define uart_circ_chars_pending(circ) \
- (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-
-#define uart_circ_chars_free(circ) \
- (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-
-#define uart_tx_stopped(port) \
- ((port)->info->tty->stopped || (port)->info->tty->hw_stopped)
-
-#define UART_ENABLE_MS(port,cflag) ((port)->flags & UPF_HARDPPS_CD || \
- (cflag) & CRTSCTS || \
- !((cflag) & CLOCAL))
-
-
-struct sb_uart_port;
-struct sb_uart_info;
-struct serial_struct;
-struct device;
-
-struct sb_uart_ops {
- unsigned int (*tx_empty)(struct sb_uart_port *);
- void (*set_mctrl)(struct sb_uart_port *, unsigned int mctrl);
- unsigned int (*get_mctrl)(struct sb_uart_port *);
- void (*stop_tx)(struct sb_uart_port *);
- void (*start_tx)(struct sb_uart_port *);
- void (*send_xchar)(struct sb_uart_port *, char ch);
- void (*stop_rx)(struct sb_uart_port *);
- void (*enable_ms)(struct sb_uart_port *);
- void (*break_ctl)(struct sb_uart_port *, int ctl);
- int (*startup)(struct sb_uart_port *);
- void (*shutdown)(struct sb_uart_port *);
- void (*set_termios)(struct sb_uart_port *, struct MP_TERMIOS *new,
- struct MP_TERMIOS *old);
- void (*pm)(struct sb_uart_port *, unsigned int state,
- unsigned int oldstate);
- int (*set_wake)(struct sb_uart_port *, unsigned int state);
-
- const char *(*type)(struct sb_uart_port *);
-
- void (*release_port)(struct sb_uart_port *);
-
- int (*request_port)(struct sb_uart_port *);
- void (*config_port)(struct sb_uart_port *, int);
- int (*verify_port)(struct sb_uart_port *, struct serial_struct *);
- int (*ioctl)(struct sb_uart_port *, unsigned int, unsigned long);
-};
-
-
-struct sb_uart_icount {
- __u32 cts;
- __u32 dsr;
- __u32 rng;
- __u32 dcd;
- __u32 rx;
- __u32 tx;
- __u32 frame;
- __u32 overrun;
- __u32 parity;
- __u32 brk;
- __u32 buf_overrun;
-};
-typedef unsigned int upf_t;
-
-struct sb_uart_port {
- spinlock_t lock; /* port lock */
- unsigned int iobase; /* in/out[bwl] */
- unsigned char __iomem *membase; /* read/write[bwl] */
- unsigned int irq; /* irq number */
- unsigned int uartclk; /* base uart clock */
- unsigned int fifosize; /* tx fifo size */
- unsigned char x_char; /* xon/xoff char */
- unsigned char regshift; /* reg offset shift */
- unsigned char iotype; /* io access style */
- unsigned char unused1;
-
-
- unsigned int read_status_mask; /* driver specific */
- unsigned int ignore_status_mask; /* driver specific */
- struct sb_uart_info *info; /* pointer to parent info */
- struct sb_uart_icount icount; /* statistics */
-
- struct console *cons; /* struct console, if any */
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
- unsigned long sysrq; /* sysrq timeout */
-#endif
-
- upf_t flags;
-
- unsigned int mctrl; /* current modem ctrl settings */
- unsigned int timeout; /* character-based timeout */
- unsigned int type; /* port type */
- const struct sb_uart_ops *ops;
- unsigned int custom_divisor;
- unsigned int line; /* port index */
- unsigned long mapbase; /* for ioremap */
- struct device *dev; /* parent device */
- unsigned char hub6; /* this should be in the 8250 driver */
- unsigned char unused[3];
-};
-
-#define mdmode unused[2]
-#define MDMODE_ADDR 0x1
-#define MDMODE_ENABLE 0x2
-#define MDMODE_AUTO 0x4
-#define MDMODE_ADDRSEND 0x8
-
-struct sb_uart_state {
- unsigned int close_delay; /* msec */
- unsigned int closing_wait; /* msec */
-
-
- int count;
- int pm_state;
- struct sb_uart_info *info;
- struct sb_uart_port *port;
-
- struct mutex mutex;
-};
-
-typedef unsigned int uif_t;
-
-struct sb_uart_info {
- struct tty_struct *tty;
- struct circ_buf xmit;
- uif_t flags;
-
- int blocked_open;
-
- struct tasklet_struct tlet;
-
- wait_queue_head_t open_wait;
- wait_queue_head_t delta_msr_wait;
-};
-
-
-struct module;
-struct tty_driver;
-
-struct uart_driver {
- struct module *owner;
- const char *driver_name;
- const char *dev_name;
- int major;
- int minor;
- int nr;
- struct console *cons;
-
- struct sb_uart_state *state;
- struct tty_driver *tty_driver;
-};
-
-void sb_uart_write_wakeup(struct sb_uart_port *port)
-{
- struct sb_uart_info *info = port->info;
- tasklet_schedule(&info->tlet);
-}
-
-void sb_uart_update_timeout(struct sb_uart_port *port, unsigned int cflag,
- unsigned int baud)
-{
- unsigned int bits;
-
- switch (cflag & CSIZE)
- {
- case CS5:
- bits = 7;
- break;
-
- case CS6:
- bits = 8;
- break;
-
- case CS7:
- bits = 9;
- break;
-
- default:
- bits = 10;
- break;
- }
-
- if (cflag & CSTOPB)
- {
- bits++;
- }
-
- if (cflag & PARENB)
- {
- bits++;
- }
-
- bits = bits * port->fifosize;
-
- port->timeout = (HZ * bits) / baud + HZ/50;
-}
-unsigned int sb_uart_get_baud_rate(struct sb_uart_port *port, struct MP_TERMIOS *termios,
- struct MP_TERMIOS *old, unsigned int min,
- unsigned int max)
-{
- unsigned int try, baud, altbaud = 38400;
- upf_t flags = port->flags & UPF_SPD_MASK;
-
- if (flags == UPF_SPD_HI)
- altbaud = 57600;
- if (flags == UPF_SPD_VHI)
- altbaud = 115200;
- if (flags == UPF_SPD_SHI)
- altbaud = 230400;
- if (flags == UPF_SPD_WARP)
- altbaud = 460800;
-
- for (try = 0; try < 2; try++) {
-
- switch (termios->c_cflag & (CBAUD | CBAUDEX))
- {
- case B921600 : baud = 921600; break;
- case B460800 : baud = 460800; break;
- case B230400 : baud = 230400; break;
- case B115200 : baud = 115200; break;
- case B57600 : baud = 57600; break;
- case B38400 : baud = 38400; break;
- case B19200 : baud = 19200; break;
- case B9600 : baud = 9600; break;
- case B4800 : baud = 4800; break;
- case B2400 : baud = 2400; break;
- case B1800 : baud = 1800; break;
- case B1200 : baud = 1200; break;
- case B600 : baud = 600; break;
- case B300 : baud = 300; break;
- case B200 : baud = 200; break;
- case B150 : baud = 150; break;
- case B134 : baud = 134; break;
- case B110 : baud = 110; break;
- case B75 : baud = 75; break;
- case B50 : baud = 50; break;
- default : baud = 9600; break;
- }
-
- if (baud == 38400)
- baud = altbaud;
-
- if (baud == 0)
- baud = 9600;
-
- if (baud >= min && baud <= max)
- return baud;
-
- termios->c_cflag &= ~CBAUD;
- if (old) {
- termios->c_cflag |= old->c_cflag & CBAUD;
- old = NULL;
- continue;
- }
-
- termios->c_cflag |= B9600;
- }
-
- return 0;
-}
-unsigned int sb_uart_get_divisor(struct sb_uart_port *port, unsigned int baud)
-{
- unsigned int quot;
-
- if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
- quot = port->custom_divisor;
- else
- quot = (port->uartclk + (8 * baud)) / (16 * baud);
-
- return quot;
-}
-
-
-
-static inline int sb_uart_handle_break(struct sb_uart_port *port)
-{
- struct sb_uart_info *info = port->info;
-
- if (port->flags & UPF_SAK)
- do_SAK(info->tty);
- return 0;
-}
-
-static inline void sb_uart_handle_dcd_change(struct sb_uart_port *port, unsigned int status)
-{
- struct sb_uart_info *info = port->info;
-
- port->icount.dcd++;
-
- if (info->flags & UIF_CHECK_CD) {
- if (status)
- wake_up_interruptible(&info->open_wait);
- else if (info->tty)
- tty_hangup(info->tty);
- }
-}
-
-static inline void sb_uart_handle_cts_change(struct sb_uart_port *port, unsigned int status)
-{
- struct sb_uart_info *info = port->info;
- struct tty_struct *tty = info->tty;
-
- port->icount.cts++;
-
- if (info->flags & UIF_CTS_FLOW) {
- if (tty->hw_stopped) {
- if (status) {
- tty->hw_stopped = 0;
- port->ops->start_tx(port);
- sb_uart_write_wakeup(port);
- }
- } else {
- if (!status) {
- tty->hw_stopped = 1;
- port->ops->stop_tx(port);
- }
- }
- }
-}
-
-
-
diff --git a/drivers/staging/sbe-2t3e3/2t3e3.h b/drivers/staging/sbe-2t3e3/2t3e3.h
index ccad049c1122..e7bf721f3fd1 100644
--- a/drivers/staging/sbe-2t3e3/2t3e3.h
+++ b/drivers/staging/sbe-2t3e3/2t3e3.h
@@ -613,12 +613,12 @@
* descriptor list and data buffer
*
**********************************************************************/
-typedef struct {
+struct t3e3_rx_desc {
u32 rdes0;
u32 rdes1;
u32 rdes2;
u32 rdes3;
-} t3e3_rx_desc_t;
+};
#define SBE_2T3E3_RX_DESC_RING_SIZE 64
@@ -648,12 +648,12 @@ typedef struct {
/*********************/
-typedef struct {
+struct t3e3_tx_desc {
u32 tdes0;
u32 tdes1;
u32 tdes2;
u32 tdes3;
-} t3e3_tx_desc_t;
+};
#define SBE_2T3E3_TX_DESC_RING_SIZE 256
@@ -701,7 +701,7 @@ struct channel {
} h;
/* statistics */
- t3e3_stats_t s;
+ struct t3e3_stats s;
/* running */
struct {
@@ -709,7 +709,7 @@ struct channel {
} r;
/* parameters */
- t3e3_param_t p;
+ struct t3e3_param p;
u32 liu_regs[SBE_2T3E3_LIU_REG_MAX]; /* LIU registers */
u32 framer_regs[SBE_2T3E3_FRAMER_REG_MAX]; /* Framer registers */
@@ -723,12 +723,12 @@ struct channel {
u32 interrupt_enable_mask;
/* receive chain/ring */
- t3e3_rx_desc_t *rx_ring;
+ struct t3e3_rx_desc *rx_ring;
struct sk_buff *rx_data[SBE_2T3E3_RX_DESC_RING_SIZE];
u32 rx_ring_current_read;
/* transmit chain/ring */
- t3e3_tx_desc_t *tx_ring;
+ struct t3e3_tx_desc *tx_ring;
struct sk_buff *tx_data[SBE_2T3E3_TX_DESC_RING_SIZE];
u32 tx_ring_current_read;
u32 tx_ring_current_write;
@@ -760,8 +760,7 @@ void t3e3_init(struct channel *);
void t3e3_if_up(struct channel *);
void t3e3_if_down(struct channel *);
int t3e3_if_start_xmit(struct sk_buff *skb, struct net_device *dev);
-void t3e3_if_config(struct channel *, u32, char *,
- t3e3_resp_t *, int *);
+void t3e3_if_config(struct channel *, u32, char *, struct t3e3_resp *, int *);
void t3e3_set_frame_type(struct channel *, u32);
u32 t3e3_eeprom_read_word(struct channel *, u32);
void t3e3_read_card_serial_number(struct channel *);
@@ -838,7 +837,7 @@ static inline int has_two_ports(struct pci_dev *pdev)
return pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0;
}
-#define dev_to_priv(dev) (*(struct channel **) ((hdlc_device*)(dev) + 1))
+#define dev_to_priv(dev) (*(struct channel **) ((hdlc_device *)(dev) + 1))
static inline u32 dc_read(unsigned long addr, u32 reg)
{
diff --git a/drivers/staging/sbe-2t3e3/ctrl.c b/drivers/staging/sbe-2t3e3/ctrl.c
index d280bcfd660a..e0964ac9e7d7 100644
--- a/drivers/staging/sbe-2t3e3/ctrl.c
+++ b/drivers/staging/sbe-2t3e3/ctrl.c
@@ -164,12 +164,12 @@ static void t3e3_reg_write(struct channel *sc, u32 *reg)
}
}
-static void t3e3_port_get(struct channel *sc, t3e3_param_t *param)
+static void t3e3_port_get(struct channel *sc, struct t3e3_param *param)
{
- memcpy(param, &(sc->p), sizeof(t3e3_param_t));
+ memcpy(param, &(sc->p), sizeof(struct t3e3_param));
}
-static void t3e3_port_set(struct channel *sc, t3e3_param_t *param)
+static void t3e3_port_set(struct channel *sc, struct t3e3_param *param)
{
if (param->frame_mode != 0xff)
cpld_set_frame_mode(sc, param->frame_mode);
@@ -216,8 +216,7 @@ static void t3e3_port_set(struct channel *sc, t3e3_param_t *param)
cpld_set_scrambler(sc, param->scrambler);
}
-static void t3e3_port_get_stats(struct channel *sc,
- t3e3_stats_t *stats)
+static void t3e3_port_get_stats(struct channel *sc, struct t3e3_stats *stats)
{
u32 result;
@@ -279,18 +278,18 @@ static void t3e3_port_get_stats(struct channel *sc,
result += exar7250_read(sc, SBE_2T3E3_FRAMER_REG_PMON_HOLDING_REGISTER);
sc->s.CP_BIT += result;
- memcpy(stats, &(sc->s), sizeof(t3e3_stats_t));
+ memcpy(stats, &(sc->s), sizeof(struct t3e3_stats));
}
static void t3e3_port_del_stats(struct channel *sc)
{
- memset(&(sc->s), 0, sizeof(t3e3_stats_t));
+ memset(&(sc->s), 0, sizeof(struct t3e3_stats));
}
void t3e3_if_config(struct channel *sc, u32 cmd, char *set,
- t3e3_resp_t *ret, int *rlen)
+ struct t3e3_resp *ret, int *rlen)
{
- t3e3_param_t *param = (t3e3_param_t *)set;
+ struct t3e3_param *param = (struct t3e3_param *)set;
u32 *data = (u32 *)set;
/* turn off all interrupt */
diff --git a/drivers/staging/sbe-2t3e3/ctrl.h b/drivers/staging/sbe-2t3e3/ctrl.h
index c11a58871845..41f144d75c36 100644
--- a/drivers/staging/sbe-2t3e3/ctrl.h
+++ b/drivers/staging/sbe-2t3e3/ctrl.h
@@ -84,7 +84,7 @@
#define NG_SBE_2T3E3_NODE_TYPE "sbe2T3E3"
#define NG_SBE_2T3E3_COOKIE 0x03800891
-typedef struct t3e3_param {
+struct t3e3_param {
u_int8_t frame_mode; /* FRAME_MODE_* */
u_int8_t crc; /* CRC_* */
u_int8_t receiver_on; /* ON/OFF */
@@ -102,9 +102,9 @@ typedef struct t3e3_param {
u_int8_t fractional_mode; /* FRACTIONAL_MODE_* */
u_int8_t bandwidth_start; /* 0-255 */
u_int8_t bandwidth_stop; /* 0-255 */
-} t3e3_param_t;
+};
-typedef struct t3e3_stats {
+struct t3e3_stats {
u_int64_t in_bytes;
u32 in_packets, in_dropped;
u32 in_errors, in_error_desc, in_error_coll, in_error_drib,
@@ -117,15 +117,15 @@ typedef struct t3e3_stats {
u_int8_t LOC, LOF, OOF, LOS, AIS, FERF, IDLE, AIC, FEAC;
u_int16_t FEBE_code;
u32 LCV, FRAMING_BIT, PARITY_ERROR, FEBE_count, CP_BIT;
-} t3e3_stats_t;
+};
-typedef struct t3e3_resp {
+struct t3e3_resp {
union {
- t3e3_param_t param;
- t3e3_stats_t stats;
+ struct t3e3_param param;
+ struct t3e3_stats stats;
u32 data;
} u;
-} t3e3_resp_t;
+};
#endif /* CTRL_H */
diff --git a/drivers/staging/sbe-2t3e3/dc.c b/drivers/staging/sbe-2t3e3/dc.c
index f207b9e015ce..02510f67ac45 100644
--- a/drivers/staging/sbe-2t3e3/dc.c
+++ b/drivers/staging/sbe-2t3e3/dc.c
@@ -316,13 +316,13 @@ static int dc_init_descriptor_list(struct channel *sc)
if (sc->ether.rx_ring == NULL)
sc->ether.rx_ring = kcalloc(SBE_2T3E3_RX_DESC_RING_SIZE,
- sizeof(t3e3_rx_desc_t), GFP_KERNEL);
+ sizeof(struct t3e3_rx_desc), GFP_KERNEL);
if (sc->ether.rx_ring == NULL)
return -ENOMEM;
if (sc->ether.tx_ring == NULL)
sc->ether.tx_ring = kcalloc(SBE_2T3E3_TX_DESC_RING_SIZE,
- sizeof(t3e3_tx_desc_t), GFP_KERNEL);
+ sizeof(struct t3e3_tx_desc), GFP_KERNEL);
if (sc->ether.tx_ring == NULL) {
kfree(sc->ether.rx_ring);
sc->ether.rx_ring = NULL;
@@ -339,7 +339,8 @@ static int dc_init_descriptor_list(struct channel *sc)
SBE_2T3E3_RX_DESC_SECOND_ADDRESS_CHAINED | SBE_2T3E3_MTU;
if (sc->ether.rx_data[i] == NULL) {
- if (!(m = dev_alloc_skb(MCLBYTES))) {
+ m = dev_alloc_skb(MCLBYTES);
+ if (!m) {
for (j = 0; j < i; j++) {
dev_kfree_skb_any(sc->ether.rx_data[j]);
sc->ether.rx_data[j] = NULL;
diff --git a/drivers/staging/sbe-2t3e3/intr.c b/drivers/staging/sbe-2t3e3/intr.c
index efdeb7510047..1bf74b788008 100644
--- a/drivers/staging/sbe-2t3e3/intr.c
+++ b/drivers/staging/sbe-2t3e3/intr.c
@@ -118,7 +118,7 @@ void dc_intr_rx(struct channel *sc)
{
u32 current_read;
u32 error_mask, error;
- t3e3_rx_desc_t *current_desc;
+ struct t3e3_rx_desc *current_desc;
struct sk_buff *m, *m2;
unsigned rcv_len;
@@ -292,7 +292,7 @@ void dc_intr_tx(struct channel *sc)
{
u32 current_read, current_write;
u32 last_segment, error;
- t3e3_tx_desc_t *current_desc;
+ struct t3e3_tx_desc *current_desc;
spin_lock(&sc->ether.tx_lock);
diff --git a/drivers/staging/sbe-2t3e3/maps.c b/drivers/staging/sbe-2t3e3/maps.c
index 7084fbe7b794..e5494502cde1 100644
--- a/drivers/staging/sbe-2t3e3/maps.c
+++ b/drivers/staging/sbe-2t3e3/maps.c
@@ -13,8 +13,7 @@
#include <linux/kernel.h>
#include "2t3e3.h"
-const u32 cpld_reg_map[][2] =
-{
+const u32 cpld_reg_map[][2] = {
{ 0x0000, 0x0080 }, /* 0 - Port Control Register A (PCRA) */
{ 0x0004, 0x0084 }, /* 1 - Port Control Register B (PCRB) */
{ 0x0008, 0x0088 }, /* 2 - LCV Count Register (PLCR) */
@@ -35,8 +34,7 @@ const u32 cpld_reg_map[][2] =
{ 0x0070, 0x00f0 }, /* 17 - Port Bandwidth Stop (PBWL) */
};
-const u32 cpld_val_map[][2] =
-{
+const u32 cpld_val_map[][2] = {
{ 0x01, 0x02 }, /* LIU1 / LIU2 select for Serial Chip Select */
{ 0x04, 0x08 }, /* DAC1 / DAC2 select for Serial Chip Select */
{ 0x00, 0x04 }, /* LOOP1 / LOOP2 - select of loop timing source */
@@ -94,8 +92,7 @@ const u32 t3e3_framer_reg_map[] = {
0x81 /* 47 - LINE_INTERFACE_SCAN */
};
-const u32 t3e3_liu_reg_map[] =
-{
+const u32 t3e3_liu_reg_map[] = {
0x00, /* REG0 */
0x01, /* REG1 */
0x02, /* REG2 */
diff --git a/drivers/staging/sbe-2t3e3/module.c b/drivers/staging/sbe-2t3e3/module.c
index 0e32be5c2471..a6f93a43d216 100644
--- a/drivers/staging/sbe-2t3e3/module.c
+++ b/drivers/staging/sbe-2t3e3/module.c
@@ -122,7 +122,7 @@ static void t3e3_remove_card(struct pci_dev *pdev)
struct channel *channel0 = pci_get_drvdata(pdev);
struct card *card = channel0->card;
- del_timer(&card->timer);
+ del_timer_sync(&card->timer);
if (has_two_ports(channel0->pdev)) {
t3e3_remove_channel(&card->channels[1]);
pci_dev_put(card->channels[1].pdev);
diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
index 1f5088b3c10b..fe6c9513c9cd 100644
--- a/drivers/staging/sbe-2t3e3/netdev.c
+++ b/drivers/staging/sbe-2t3e3/netdev.c
@@ -25,8 +25,8 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct channel *sc = dev_to_priv(dev);
int cmd_2t3e3, len, rlen;
- t3e3_param_t param;
- t3e3_resp_t resp;
+ struct t3e3_param param;
+ struct t3e3_resp resp;
void __user *data = ifr->ifr_data + sizeof(cmd_2t3e3) + sizeof(len);
if (cmd == SIOCWANDEV)
@@ -61,7 +61,7 @@ static struct net_device_stats *t3e3_get_stats(struct net_device *dev)
{
struct net_device_stats *nstats = &dev->stats;
struct channel *sc = dev_to_priv(dev);
- t3e3_stats_t *stats = &sc->s;
+ struct t3e3_stats *stats = &sc->s;
memset(nstats, 0, sizeof(struct net_device_stats));
nstats->rx_packets = stats->in_packets;
diff --git a/drivers/staging/sep/sep_crypto.c b/drivers/staging/sep/sep_crypto.c
index 7fc267550c65..965485f71fe9 100644
--- a/drivers/staging/sep/sep_crypto.c
+++ b/drivers/staging/sep/sep_crypto.c
@@ -112,7 +112,7 @@ static void sep_do_callback(struct work_struct *work)
* on what operation is to be done
*/
static int sep_submit_work(struct workqueue_struct *work_queue,
- void(*funct)(void *),
+ void (*funct)(void *),
void *data)
{
struct sep_work_struct *sep_work;
diff --git a/drivers/staging/sep/sep_main.c b/drivers/staging/sep/sep_main.c
index 122614c4092b..e301207eb0bd 100644
--- a/drivers/staging/sep/sep_main.c
+++ b/drivers/staging/sep/sep_main.c
@@ -1266,9 +1266,8 @@ static int sep_lock_user_pages(struct sep_device *sep,
/* Check the number of pages locked - if not all then exit with error */
if (result != num_pages) {
dev_warn(&sep->pdev->dev,
- "[PID%d] not all pages locked by get_user_pages, "
- "result 0x%X, num_pages 0x%X\n",
- current->pid, result, num_pages);
+ "[PID%d] not all pages locked by get_user_pages, result 0x%X, num_pages 0x%X\n",
+ current->pid, result, num_pages);
error = -ENOMEM;
goto end_function_with_error3;
}
@@ -1293,9 +1292,9 @@ static int sep_lock_user_pages(struct sep_device *sep,
lli_array[count].block_size = PAGE_SIZE;
dev_dbg(&sep->pdev->dev,
- "[PID%d] lli_array[%x].bus_address is %08lx, "
- "lli_array[%x].block_size is (hex) %x\n", current->pid,
- count, (unsigned long)lli_array[count].bus_address,
+ "[PID%d] lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is (hex) %x\n",
+ current->pid, count,
+ (unsigned long)lli_array[count].bus_address,
count, lli_array[count].block_size);
}
@@ -1314,8 +1313,7 @@ static int sep_lock_user_pages(struct sep_device *sep,
"[PID%d] After check if page 0 has all data\n",
current->pid);
dev_dbg(&sep->pdev->dev,
- "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
- "lli_array[0].block_size is (hex) %x\n",
+ "[PID%d] lli_array[0].bus_address is (hex) %08lx, lli_array[0].block_size is (hex) %x\n",
current->pid,
(unsigned long)lli_array[0].bus_address,
lli_array[0].block_size);
@@ -1332,8 +1330,7 @@ static int sep_lock_user_pages(struct sep_device *sep,
"[PID%d] After last page size adjustment\n",
current->pid);
dev_dbg(&sep->pdev->dev,
- "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
- "lli_array[%x].block_size is (hex) %x\n",
+ "[PID%d] lli_array[%x].bus_address is (hex) %08lx, lli_array[%x].block_size is (hex) %x\n",
current->pid,
num_pages - 1,
(unsigned long)lli_array[num_pages - 1].bus_address,
@@ -1449,8 +1446,7 @@ static int sep_lli_table_secure_dma(struct sep_device *sep,
start_page += PAGE_SIZE;
dev_dbg(&sep->pdev->dev,
- "[PID%d] lli_array[%x].bus_address is %08lx, "
- "lli_array[%x].block_size is (hex) %x\n",
+ "[PID%d] lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is (hex) %x\n",
current->pid,
count, (unsigned long)lli_array[count].bus_address,
count, lli_array[count].block_size);
@@ -1469,8 +1465,7 @@ static int sep_lli_table_secure_dma(struct sep_device *sep,
dev_dbg(&sep->pdev->dev,
"[PID%d] After check if page 0 has all data\n"
- "lli_array[0].bus_address is (hex) %08lx, "
- "lli_array[0].block_size is (hex) %x\n",
+ "lli_array[0].bus_address is (hex) %08lx, lli_array[0].block_size is (hex) %x\n",
current->pid,
(unsigned long)lli_array[0].bus_address,
lli_array[0].block_size);
@@ -1484,8 +1479,7 @@ static int sep_lli_table_secure_dma(struct sep_device *sep,
dev_dbg(&sep->pdev->dev,
"[PID%d] After last page size adjustment\n"
- "lli_array[%x].bus_address is (hex) %08lx, "
- "lli_array[%x].block_size is (hex) %x\n",
+ "lli_array[%x].bus_address is (hex) %08lx, lli_array[%x].block_size is (hex) %x\n",
current->pid, num_pages - 1,
(unsigned long)lli_array[num_pages - 1].bus_address,
num_pages - 1,
@@ -1745,9 +1739,8 @@ static void sep_debug_print_lli_tables(struct sep_device *sep,
while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
dev_dbg(&sep->pdev->dev,
- "[PID%d] lli table %08lx, "
- "table_data_size is (hex) %lx\n",
- current->pid, table_count, table_data_size);
+ "[PID%d] lli table %08lx, table_data_size is (hex) %lx\n",
+ current->pid, table_count, table_data_size);
dev_dbg(&sep->pdev->dev,
"[PID%d] num_table_entries is (hex) %lx\n",
current->pid, num_table_entries);
@@ -1762,8 +1755,8 @@ static void sep_debug_print_lli_tables(struct sep_device *sep,
(unsigned long) lli_table_ptr);
dev_dbg(&sep->pdev->dev,
- "[PID%d] phys address is %08lx "
- "block size is (hex) %x\n", current->pid,
+ "[PID%d] phys address is %08lx block size is (hex) %x\n",
+ current->pid,
(unsigned long)lli_table_ptr->bus_address,
lli_table_ptr->block_size);
}
@@ -1772,14 +1765,12 @@ static void sep_debug_print_lli_tables(struct sep_device *sep,
lli_table_ptr--;
dev_dbg(&sep->pdev->dev,
- "[PID%d] phys lli_table_ptr->block_size "
- "is (hex) %x\n",
+ "[PID%d] phys lli_table_ptr->block_size is (hex) %x\n",
current->pid,
lli_table_ptr->block_size);
dev_dbg(&sep->pdev->dev,
- "[PID%d] phys lli_table_ptr->physical_address "
- "is %08lx\n",
+ "[PID%d] phys lli_table_ptr->physical_address is %08lx\n",
current->pid,
(unsigned long)lli_table_ptr->bus_address);
@@ -1788,13 +1779,11 @@ static void sep_debug_print_lli_tables(struct sep_device *sep,
num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
dev_dbg(&sep->pdev->dev,
- "[PID%d] phys table_data_size is "
- "(hex) %lx num_table_entries is"
- " %lx bus_address is%lx\n",
- current->pid,
- table_data_size,
- num_table_entries,
- (unsigned long)lli_table_ptr->bus_address);
+ "[PID%d] phys table_data_size is (hex) %lx num_table_entries is %lx bus_address is%lx\n",
+ current->pid,
+ table_data_size,
+ num_table_entries,
+ (unsigned long)lli_table_ptr->bus_address);
if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
lli_table_ptr = (struct sep_lli_entry *)
@@ -2244,14 +2233,12 @@ static int sep_construct_dma_tables_from_lli(
table_data_size = out_table_data_size;
dev_dbg(&sep->pdev->dev,
- "[PID%d] construct tables from lli"
- " in_table_data_size is (hex) %x\n", current->pid,
- in_table_data_size);
+ "[PID%d] construct tables from lli in_table_data_size is (hex) %x\n",
+ current->pid, in_table_data_size);
dev_dbg(&sep->pdev->dev,
- "[PID%d] construct tables from lli"
- "out_table_data_size is (hex) %x\n", current->pid,
- out_table_data_size);
+ "[PID%d] construct tables from lli out_table_data_size is (hex) %x\n",
+ current->pid, out_table_data_size);
/* Construct input lli table */
sep_build_lli_table(sep, &lli_in_array[current_in_entry],
@@ -2316,8 +2303,7 @@ static int sep_construct_dma_tables_from_lli(
info_in_entry_ptr->block_size);
dev_dbg(&sep->pdev->dev,
- "[PID%d] output lli_table_out_ptr:"
- "%08lx %08x\n",
+ "[PID%d] output lli_table_out_ptr: %08lx %08x\n",
current->pid,
(unsigned long)info_out_entry_ptr->bus_address,
info_out_entry_ptr->block_size);
@@ -2446,8 +2432,8 @@ static int sep_prepare_input_output_dma_table(struct sep_device *sep,
dma_ctx);
if (error) {
dev_warn(&sep->pdev->dev,
- "[PID%d] sep_lock_kernel_pages for input "
- "virtual buffer failed\n", current->pid);
+ "[PID%d] sep_lock_kernel_pages for input virtual buffer failed\n",
+ current->pid);
goto end_function;
}
@@ -2460,8 +2446,8 @@ static int sep_prepare_input_output_dma_table(struct sep_device *sep,
if (error) {
dev_warn(&sep->pdev->dev,
- "[PID%d] sep_lock_kernel_pages for output "
- "virtual buffer failed\n", current->pid);
+ "[PID%d] sep_lock_kernel_pages for output virtual buffer failed\n",
+ current->pid);
goto end_function_free_lli_in;
}
@@ -2476,8 +2462,8 @@ static int sep_prepare_input_output_dma_table(struct sep_device *sep,
dma_ctx);
if (error) {
dev_warn(&sep->pdev->dev,
- "[PID%d] sep_lock_user_pages for input "
- "virtual buffer failed\n", current->pid);
+ "[PID%d] sep_lock_user_pages for input virtual buffer failed\n",
+ current->pid);
goto end_function;
}
@@ -2491,8 +2477,7 @@ static int sep_prepare_input_output_dma_table(struct sep_device *sep,
SEP_DRIVER_OUT_FLAG, dma_ctx);
if (error) {
dev_warn(&sep->pdev->dev,
- "[PID%d] secure dma table setup "
- " for output virtual buffer failed\n",
+ "[PID%d] secure dma table setup for output virtual buffer failed\n",
current->pid);
goto end_function_free_lli_in;
@@ -2512,8 +2497,7 @@ static int sep_prepare_input_output_dma_table(struct sep_device *sep,
if (error) {
dev_warn(&sep->pdev->dev,
- "[PID%d] sep_lock_user_pages"
- " for output virtual buffer failed\n",
+ "[PID%d] sep_lock_user_pages for output virtual buffer failed\n",
current->pid);
goto end_function_free_lli_in;
@@ -2826,8 +2810,7 @@ int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
if (error) {
dev_warn(&sep->pdev->dev,
- "prepare DMA table call failed "
- "from prepare DCB call\n");
+ "prepare DMA table call failed from prepare DCB call\n");
goto end_function_error;
}
@@ -2889,7 +2872,8 @@ static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
* Go over each DCB and see if
* tail pointer must be updated
*/
- for (i = 0; i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
+ for (i = 0; i < (*dma_ctx)->nr_dcb_creat;
+ i++, dcb_table_ptr++) {
if (dcb_table_ptr->out_vr_tail_pt) {
pt_hold = (unsigned long)dcb_table_ptr->
out_vr_tail_pt;
@@ -3089,6 +3073,7 @@ static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOCPREPAREDCB start\n",
current->pid);
+ /* fall-through */
case SEP_IOCPREPAREDCB_SECURE_DMA:
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
@@ -3762,8 +3747,7 @@ static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
if (actual_count != count_user) {
dev_warn(&sep->pdev->dev,
- "[PID%d] inconsistent message "
- "sizes 0x%08zX vs 0x%08zX\n",
+ "[PID%d] inconsistent message sizes 0x%08zX vs 0x%08zX\n",
current->pid, actual_count, count_user);
error = -EMSGSIZE;
goto end_function;
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index f0fcbf7c7d7f..0267dd8b84b7 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -724,7 +724,7 @@ static int qt_startup(struct usb_serial *serial)
goto startup_error;
}
- switch (serial->dev->descriptor.idProduct) {
+ switch (le16_to_cpu(serial->dev->descriptor.idProduct)) {
case QUATECH_DSU100:
case QUATECH_QSU100:
case QUATECH_ESU100A:
@@ -762,7 +762,9 @@ static int qt_startup(struct usb_serial *serial)
}
- status = box_set_prebuffer_level(serial); /* sets to default value */
+ status = box_set_prebuffer_level(serial); /* sets to
+ * default value
+ */
if (status < 0) {
dev_dbg(dev, "box_set_prebuffer_level failed\n");
goto startup_error;
@@ -887,7 +889,8 @@ static int qt_open(struct tty_struct *tty,
(SERIAL_MSR_CTS | SERIAL_MSR_DSR | SERIAL_MSR_RI | SERIAL_MSR_CD);
/* Set Baud rate to default and turn off (default)flow control here */
- result = qt_setuart(serial, port->port_number, DEFAULT_DIVISOR, DEFAULT_LCR);
+ result = qt_setuart(serial, port->port_number, DEFAULT_DIVISOR,
+ DEFAULT_LCR);
if (result < 0) {
dev_dbg(&port->dev, "qt_setuart failed\n");
return result;
@@ -1014,11 +1017,13 @@ static void qt_close(struct usb_serial_port *port)
/* Close uart channel */
status = qt_close_channel(serial, index);
if (status < 0)
- dev_dbg(&port->dev, "%s - qt_close_channel failed.\n", __func__);
+ dev_dbg(&port->dev, "%s - qt_close_channel failed.\n",
+ __func__);
port0->open_ports--;
- dev_dbg(&port->dev, "qt_num_open_ports in close%d\n", port0->open_ports);
+ dev_dbg(&port->dev, "qt_num_open_ports in close%d\n",
+ port0->open_ports);
if (port0->open_ports == 0) {
if (serial->port[0]->interrupt_in_urb) {
@@ -1235,7 +1240,8 @@ static void qt_set_termios(struct tty_struct *tty,
/* Now determine flow control */
if (cflag & CRTSCTS) {
- dev_dbg(&port->dev, "%s - Enabling HW flow control\n", __func__);
+ dev_dbg(&port->dev, "%s - Enabling HW flow control\n",
+ __func__);
/* Enable RTS/CTS flow control */
status = box_set_hw_flow_ctrl(port->serial, index, 1);
diff --git a/drivers/staging/silicom/bp_mod.h b/drivers/staging/silicom/bp_mod.h
index 8154a7bf050f..82b4963e97b6 100644
--- a/drivers/staging/silicom/bp_mod.h
+++ b/drivers/staging/silicom/bp_mod.h
@@ -497,11 +497,19 @@ static inline unsigned int jiffies_to_msecs(const unsigned long j)
#define BPCTLI_STATUS 0x00008 /* Device Status - RO */
/* HW related */
-#define BPCTLI_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
-#define BPCTLI_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define BPCTLI_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW
+ * Defineable Pin 6
+ */
+#define BPCTLI_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW
+ * Defineable Pin 7
+ */
#define BPCTLI_CTRL_SDP0_DATA 0x00040000 /* SWDPIN 0 value */
-#define BPCTLI_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
-#define BPCTLI_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */
+#define BPCTLI_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6
+ * 0=in 1=out
+ */
+#define BPCTLI_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7
+ * 0=in 1=out
+ */
#define BPCTLI_CTRL_SDP0_DIR 0x00400000 /* SDP0 Input or output */
#define BPCTLI_CTRL_SWDPIN1 0x00080000
#define BPCTLI_CTRL_SDP1_DIR 0x00800000
@@ -565,7 +573,9 @@ static inline unsigned int jiffies_to_msecs(const unsigned long j)
#define BPCTLI_SWFW_PHY0_SM 0x02
#define BPCTLI_SWFW_PHY1_SM 0x04
-#define BPCTLI_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+#define BPCTLI_SW_FW_SYNC 0x05B5C /* Software-Firmware
+ * Synchronization - RW
+ */
#define BPCTLI_SWSM 0x05B50 /* SW Semaphore */
#define BPCTLI_FWSM 0x05B54 /* FW Semaphore */
@@ -623,7 +633,8 @@ static inline unsigned int jiffies_to_msecs(const unsigned long j)
/*#define BP10G_MCLK_DATA_OUT9 BP10G_I2C_CLK_OUT
#define BP10G_MDIO_DATA_OUT9 BP10G_I2C_DATA_OUT*/
- /*#define BP10G_MCLK_DATA_OUT9*//*BP10G_I2C_DATA_OUT */
+ /*#define BP10G_MCLK_DATA_OUT9*/
+ /*BP10G_I2C_DATA_OUT */
#define BP10G_MDIO_DATA_OUT9 BP10G_I2C_DATA_OUT /*BP10G_I2C_CLK_OUT */
/* VIA EOSDP ! */
@@ -698,5 +709,3 @@ static inline unsigned int jiffies_to_msecs(const unsigned long j)
readl((void *)((a)->mem_map) + BP10GB_##reg))
#endif
-
-int bp_proc_create(void);
diff --git a/drivers/staging/silicom/bpctl_mod.c b/drivers/staging/silicom/bpctl_mod.c
index 20325f53328e..6b9365b28e8a 100644
--- a/drivers/staging/silicom/bpctl_mod.c
+++ b/drivers/staging/silicom/bpctl_mod.c
@@ -117,12 +117,12 @@ static int wdt_timer(struct bpctl_dev *pbpctl_dev, int *time_left);
static struct bpctl_dev *get_status_port_fn(struct bpctl_dev *pbpctl_dev);
static void if_scan_init(void);
-int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block);
-int bypass_proc_remove_dev_sd(struct bpctl_dev *pbp_device_block);
-int bp_proc_create(void);
+static int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block);
+static int bypass_proc_remove_dev_sd(struct bpctl_dev *pbp_device_block);
+static int bp_proc_create(void);
-int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
-int get_dev_idx_bsf(int bus, int slot, int func);
+static int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
+static int get_dev_idx_bsf(int bus, int slot, int func);
static int bp_get_dev_idx_bsf(struct net_device *dev, int *index)
{
@@ -262,7 +262,7 @@ static struct notifier_block bp_notifier_block = {
.notifier_call = bp_device_event,
};
-int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
+static int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
int wdt_time_left(struct bpctl_dev *pbpctl_dev);
static void write_pulse(struct bpctl_dev *pbpctl_dev,
@@ -1502,7 +1502,8 @@ static int send_wdt_pulse(struct bpctl_dev *pbpctl_dev)
return 0;
}
-void send_bypass_clear_pulse(struct bpctl_dev *pbpctl_dev, unsigned int value)
+static void send_bypass_clear_pulse(struct bpctl_dev *pbpctl_dev,
+ unsigned int value)
{
uint32_t ctrl_ext = 0;
@@ -1757,7 +1758,7 @@ static int wdt_pulse_int(struct bpctl_dev *pbpctl_dev)
/*************************************/
/* CMND_ON 0x4 (100)*/
-int cmnd_on(struct bpctl_dev *pbpctl_dev)
+static int cmnd_on(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1774,7 +1775,7 @@ int cmnd_on(struct bpctl_dev *pbpctl_dev)
}
/* CMND_OFF 0x2 (10)*/
-int cmnd_off(struct bpctl_dev *pbpctl_dev)
+static int cmnd_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1792,7 +1793,7 @@ int cmnd_off(struct bpctl_dev *pbpctl_dev)
}
/* BYPASS_ON (0xa)*/
-int bypass_on(struct bpctl_dev *pbpctl_dev)
+static int bypass_on(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1813,7 +1814,7 @@ int bypass_on(struct bpctl_dev *pbpctl_dev)
}
/* BYPASS_OFF (0x8 111)*/
-int bypass_off(struct bpctl_dev *pbpctl_dev)
+static int bypass_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1836,7 +1837,7 @@ int bypass_off(struct bpctl_dev *pbpctl_dev)
}
/* TAP_OFF (0x9)*/
-int tap_off(struct bpctl_dev *pbpctl_dev)
+static int tap_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if ((pbpctl_dev->bp_caps & TAP_CAP)
@@ -1849,7 +1850,7 @@ int tap_off(struct bpctl_dev *pbpctl_dev)
}
/* TAP_ON (0xb)*/
-int tap_on(struct bpctl_dev *pbpctl_dev)
+static int tap_on(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if ((pbpctl_dev->bp_caps & TAP_CAP)
@@ -1862,7 +1863,7 @@ int tap_on(struct bpctl_dev *pbpctl_dev)
}
/* DISC_OFF (0x9)*/
-int disc_off(struct bpctl_dev *pbpctl_dev)
+static int disc_off(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if ((pbpctl_dev->bp_caps & DISC_CAP) && (pbpctl_dev->bp_ext_ver >= 0x8)) {
@@ -1874,7 +1875,7 @@ int disc_off(struct bpctl_dev *pbpctl_dev)
}
/* DISC_ON (0xb)*/
-int disc_on(struct bpctl_dev *pbpctl_dev)
+static int disc_on(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if ((pbpctl_dev->bp_caps & DISC_CAP) && (pbpctl_dev->bp_ext_ver >= 0x8)) {
@@ -1885,58 +1886,8 @@ int disc_on(struct bpctl_dev *pbpctl_dev)
return ret;
}
-/* DISC_PORT_ON */
-int disc_port_on(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
- struct bpctl_dev *pbpctl_dev_m;
-
- if ((is_bypass_fn(pbpctl_dev)) == 1)
- pbpctl_dev_m = pbpctl_dev;
- else
- pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
- if (pbpctl_dev_m == NULL)
- return BP_NOT_CAP;
-
- if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1)
- write_data(pbpctl_dev_m, TX_DISA);
- else
- write_data(pbpctl_dev_m, TX_DISB);
-
- msec_delay_bp(LATCH_DELAY);
-
- }
- return ret;
-}
-
-/* DISC_PORT_OFF */
-int disc_port_off(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
- struct bpctl_dev *pbpctl_dev_m;
-
- if ((is_bypass_fn(pbpctl_dev)) == 1)
- pbpctl_dev_m = pbpctl_dev;
- else
- pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
- if (pbpctl_dev_m == NULL)
- return BP_NOT_CAP;
-
- if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1)
- write_data(pbpctl_dev_m, TX_ENA);
- else
- write_data(pbpctl_dev_m, TX_ENB);
-
- msec_delay_bp(LATCH_DELAY);
-
- }
- return ret;
-}
-
/*TWO_PORT_LINK_HW_EN (0xe)*/
-int tpl_hw_on(struct bpctl_dev *pbpctl_dev)
+static int tpl_hw_on(struct bpctl_dev *pbpctl_dev)
{
int ret = 0, ctrl = 0;
struct bpctl_dev *pbpctl_dev_b = NULL;
@@ -1964,7 +1915,7 @@ int tpl_hw_on(struct bpctl_dev *pbpctl_dev)
}
/*TWO_PORT_LINK_HW_DIS (0xc)*/
-int tpl_hw_off(struct bpctl_dev *pbpctl_dev)
+static int tpl_hw_off(struct bpctl_dev *pbpctl_dev)
{
int ret = 0, ctrl = 0;
struct bpctl_dev *pbpctl_dev_b = NULL;
@@ -1990,7 +1941,7 @@ int tpl_hw_off(struct bpctl_dev *pbpctl_dev)
}
/* WDT_OFF (0x6 110)*/
-int wdt_off(struct bpctl_dev *pbpctl_dev)
+static int wdt_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -2013,7 +1964,7 @@ int wdt_off(struct bpctl_dev *pbpctl_dev)
static unsigned int
wdt_val_array[] = { 1000, 1500, 2000, 3000, 4000, 8000, 16000, 32000, 0 };
-int wdt_on(struct bpctl_dev *pbpctl_dev, unsigned int timeout)
+static int wdt_on(struct bpctl_dev *pbpctl_dev, unsigned int timeout)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -2065,7 +2016,7 @@ int wdt_on(struct bpctl_dev *pbpctl_dev, unsigned int timeout)
return BP_NOT_CAP;
}
-void bp75_put_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
+static void bp75_put_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
{
u32 swsm;
@@ -2076,7 +2027,7 @@ void bp75_put_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
BPCTL_WRITE_REG(pbpctl_dev, SWSM, swsm);
}
-s32 bp75_get_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
+static s32 bp75_get_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
{
u32 swsm;
s32 ret_val = 0;
@@ -2190,7 +2141,8 @@ static s32 bp75_acquire_phy(struct bpctl_dev *pbpctl_dev)
return ret_val;
}
-s32 bp75_read_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset, u16 *data)
+static s32 bp75_read_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset,
+ u16 *data)
{
u32 i, mdic = 0;
s32 ret_val = 0;
@@ -2223,7 +2175,8 @@ s32 bp75_read_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset, u16 *data)
return ret_val;
}
-s32 bp75_write_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset, u16 data)
+static s32 bp75_write_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset,
+ u16 data)
{
u32 i, mdic = 0;
s32 ret_val = 0;
@@ -2353,11 +2306,10 @@ static int set_tx(struct bpctl_dev *pbpctl_dev, int tx_state)
if (PEG5_IF_SERIES(pbpctl_dev->subdevice)) {
if (tx_state) {
uint16_t mii_reg;
- if (!
- (ret =
- bp75_read_phy_reg(pbpctl_dev,
- BPCTLI_PHY_CONTROL,
- &mii_reg))) {
+ ret = bp75_read_phy_reg(pbpctl_dev,
+ BPCTLI_PHY_CONTROL,
+ &mii_reg);
+ if (!ret) {
if (mii_reg & BPCTLI_MII_CR_POWER_DOWN) {
ret =
bp75_write_phy_reg
@@ -2369,17 +2321,15 @@ static int set_tx(struct bpctl_dev *pbpctl_dev, int tx_state)
}
} else {
uint16_t mii_reg;
- if (!
- (ret =
- bp75_read_phy_reg(pbpctl_dev,
- BPCTLI_PHY_CONTROL,
- &mii_reg))) {
+ ret = bp75_read_phy_reg(pbpctl_dev,
+ BPCTLI_PHY_CONTROL,
+ &mii_reg);
+ if (!ret) {
mii_reg |= BPCTLI_MII_CR_POWER_DOWN;
- ret =
- bp75_write_phy_reg(pbpctl_dev,
- BPCTLI_PHY_CONTROL,
- mii_reg);
+ ret = bp75_write_phy_reg(pbpctl_dev,
+ BPCTLI_PHY_CONTROL,
+ mii_reg);
}
}
@@ -2534,7 +2484,7 @@ static int set_bp_force_link(struct bpctl_dev *pbpctl_dev, int tx_state)
}
/*RESET_CONT 0x20 */
-int reset_cont(struct bpctl_dev *pbpctl_dev)
+static int reset_cont(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -2551,7 +2501,7 @@ int reset_cont(struct bpctl_dev *pbpctl_dev)
}
/*DIS_BYPASS_CAP 0x22 */
-int dis_bypass_cap(struct bpctl_dev *pbpctl_dev)
+static int dis_bypass_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
@@ -2570,7 +2520,7 @@ int dis_bypass_cap(struct bpctl_dev *pbpctl_dev)
}
/*EN_BYPASS_CAP 0x24 */
-int en_bypass_cap(struct bpctl_dev *pbpctl_dev)
+static int en_bypass_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
@@ -2586,7 +2536,7 @@ int en_bypass_cap(struct bpctl_dev *pbpctl_dev)
}
/* BYPASS_STATE_PWRON 0x26*/
-int bypass_state_pwron(struct bpctl_dev *pbpctl_dev)
+static int bypass_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP) {
write_data(pbpctl_dev, BYPASS_STATE_PWRON);
@@ -2600,7 +2550,7 @@ int bypass_state_pwron(struct bpctl_dev *pbpctl_dev)
}
/* NORMAL_STATE_PWRON 0x28*/
-int normal_state_pwron(struct bpctl_dev *pbpctl_dev)
+static int normal_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP)
|| (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP)) {
@@ -2615,7 +2565,7 @@ int normal_state_pwron(struct bpctl_dev *pbpctl_dev)
}
/* BYPASS_STATE_PWROFF 0x27*/
-int bypass_state_pwroff(struct bpctl_dev *pbpctl_dev)
+static int bypass_state_pwroff(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP) {
write_data(pbpctl_dev, BYPASS_STATE_PWROFF);
@@ -2626,7 +2576,7 @@ int bypass_state_pwroff(struct bpctl_dev *pbpctl_dev)
}
/* NORMAL_STATE_PWROFF 0x29*/
-int normal_state_pwroff(struct bpctl_dev *pbpctl_dev)
+static int normal_state_pwroff(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP)) {
write_data(pbpctl_dev, NORMAL_STATE_PWROFF);
@@ -2637,7 +2587,7 @@ int normal_state_pwroff(struct bpctl_dev *pbpctl_dev)
}
/*TAP_STATE_PWRON 0x2a*/
-int tap_state_pwron(struct bpctl_dev *pbpctl_dev)
+static int tap_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
write_data(pbpctl_dev, TAP_STATE_PWRON);
@@ -2648,7 +2598,7 @@ int tap_state_pwron(struct bpctl_dev *pbpctl_dev)
}
/*DIS_TAP_CAP 0x2c*/
-int dis_tap_cap(struct bpctl_dev *pbpctl_dev)
+static int dis_tap_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_DIS_CAP) {
write_data(pbpctl_dev, DIS_TAP_CAP);
@@ -2659,7 +2609,7 @@ int dis_tap_cap(struct bpctl_dev *pbpctl_dev)
}
/*EN_TAP_CAP 0x2e*/
-int en_tap_cap(struct bpctl_dev *pbpctl_dev)
+static int en_tap_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_DIS_CAP) {
write_data(pbpctl_dev, EN_TAP_CAP);
@@ -2670,7 +2620,7 @@ int en_tap_cap(struct bpctl_dev *pbpctl_dev)
}
/*DISC_STATE_PWRON 0x2a*/
-int disc_state_pwron(struct bpctl_dev *pbpctl_dev)
+static int disc_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -2683,7 +2633,7 @@ int disc_state_pwron(struct bpctl_dev *pbpctl_dev)
}
/*DIS_DISC_CAP 0x2c*/
-int dis_disc_cap(struct bpctl_dev *pbpctl_dev)
+static int dis_disc_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_DIS_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -2695,60 +2645,8 @@ int dis_disc_cap(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-/*DISC_STATE_PWRON 0x2a*/
-int disc_port_state_pwron(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
- struct bpctl_dev *pbpctl_dev_m;
-
- return BP_NOT_CAP;
-
- if ((is_bypass_fn(pbpctl_dev)) == 1)
- pbpctl_dev_m = pbpctl_dev;
- else
- pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
- if (pbpctl_dev_m == NULL)
- return BP_NOT_CAP;
-
- if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1)
- write_data(pbpctl_dev_m, TX_DISA_PWRUP);
- else
- write_data(pbpctl_dev_m, TX_DISB_PWRUP);
-
- msec_delay_bp(LATCH_DELAY);
-
- }
- return ret;
-}
-
-int normal_port_state_pwron(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
- struct bpctl_dev *pbpctl_dev_m;
- return BP_NOT_CAP;
-
- if ((is_bypass_fn(pbpctl_dev)) == 1)
- pbpctl_dev_m = pbpctl_dev;
- else
- pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
- if (pbpctl_dev_m == NULL)
- return BP_NOT_CAP;
-
- if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1)
- write_data(pbpctl_dev_m, TX_ENA_PWRUP);
- else
- write_data(pbpctl_dev_m, TX_ENB_PWRUP);
-
- msec_delay_bp(LATCH_DELAY);
-
- }
- return ret;
-}
-
/*EN_TAP_CAP 0x2e*/
-int en_disc_cap(struct bpctl_dev *pbpctl_dev)
+static int en_disc_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_DIS_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -2760,7 +2658,7 @@ int en_disc_cap(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int std_nic_on(struct bpctl_dev *pbpctl_dev)
+static int std_nic_on(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & STD_NIC_CAP) {
@@ -2814,7 +2712,7 @@ int std_nic_on(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int std_nic_off(struct bpctl_dev *pbpctl_dev)
+static int std_nic_off(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & STD_NIC_CAP) {
@@ -2977,7 +2875,7 @@ static void wd_reset_timer(unsigned long param)
}
/*WAIT_AT_PWRUP 0x80 */
-int bp_wait_at_pwup_en(struct bpctl_dev *pbpctl_dev)
+static int bp_wait_at_pwup_en(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -2992,7 +2890,7 @@ int bp_wait_at_pwup_en(struct bpctl_dev *pbpctl_dev)
}
/*DIS_WAIT_AT_PWRUP 0x81 */
-int bp_wait_at_pwup_dis(struct bpctl_dev *pbpctl_dev)
+static int bp_wait_at_pwup_dis(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3009,7 +2907,7 @@ int bp_wait_at_pwup_dis(struct bpctl_dev *pbpctl_dev)
/*EN_HW_RESET 0x82 */
-int bp_hw_reset_en(struct bpctl_dev *pbpctl_dev)
+static int bp_hw_reset_en(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3025,7 +2923,7 @@ int bp_hw_reset_en(struct bpctl_dev *pbpctl_dev)
/*DIS_HW_RESET 0x83 */
-int bp_hw_reset_dis(struct bpctl_dev *pbpctl_dev)
+static int bp_hw_reset_dis(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3040,7 +2938,7 @@ int bp_hw_reset_dis(struct bpctl_dev *pbpctl_dev)
}
-int wdt_exp_mode(struct bpctl_dev *pbpctl_dev, int mode)
+static int wdt_exp_mode(struct bpctl_dev *pbpctl_dev, int mode)
{
uint32_t status_reg = 0, status_reg1 = 0;
@@ -3091,7 +2989,7 @@ int wdt_exp_mode(struct bpctl_dev *pbpctl_dev, int mode)
return BP_NOT_CAP;
}
-int bypass_fw_ver(struct bpctl_dev *pbpctl_dev)
+static int bypass_fw_ver(struct bpctl_dev *pbpctl_dev)
{
if (is_bypass_fn(pbpctl_dev))
return read_reg(pbpctl_dev, VER_REG_ADDR);
@@ -3099,7 +2997,7 @@ int bypass_fw_ver(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_sign_check(struct bpctl_dev *pbpctl_dev)
+static int bypass_sign_check(struct bpctl_dev *pbpctl_dev)
{
if (is_bypass_fn(pbpctl_dev))
@@ -3210,7 +3108,7 @@ static int bp_force_link_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_from_last_read(struct bpctl_dev *pbpctl_dev)
+static int bypass_from_last_read(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
struct bpctl_dev *pbpctl_dev_b = NULL;
@@ -3230,7 +3128,7 @@ int bypass_from_last_read(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_status_clear(struct bpctl_dev *pbpctl_dev)
+static int bypass_status_clear(struct bpctl_dev *pbpctl_dev)
{
struct bpctl_dev *pbpctl_dev_b = NULL;
@@ -3244,7 +3142,7 @@ int bypass_status_clear(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_flag_status(struct bpctl_dev *pbpctl_dev)
+static int bypass_flag_status(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_CAP)) {
@@ -3257,7 +3155,7 @@ int bypass_flag_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_flag_status_clear(struct bpctl_dev *pbpctl_dev)
+static int bypass_flag_status_clear(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_CAP) {
@@ -3272,7 +3170,7 @@ int bypass_flag_status_clear(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_change_status(struct bpctl_dev *pbpctl_dev)
+static int bypass_change_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -3291,18 +3189,6 @@ int bypass_change_status(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int bypass_off_status(struct bpctl_dev *pbpctl_dev)
-{
-
- if (pbpctl_dev->bp_caps & BP_CAP) {
- if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
- return ((((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
- BYPASS_OFF_MASK) == BYPASS_OFF_MASK) ? 1 : 0);
- }
- }
- return BP_NOT_CAP;
-}
-
static int bypass_status(struct bpctl_dev *pbpctl_dev)
{
u32 ctrl_ext = 0;
@@ -3386,7 +3272,7 @@ static int bypass_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int default_pwron_status(struct bpctl_dev *pbpctl_dev)
+static int default_pwron_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3418,7 +3304,7 @@ static int default_pwroff_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int dis_bypass_cap_status(struct bpctl_dev *pbpctl_dev)
+static int dis_bypass_cap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
@@ -3431,31 +3317,7 @@ int dis_bypass_cap_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int cmd_en_status(struct bpctl_dev *pbpctl_dev)
-{
-
- if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
- if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
- return ((((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
- CMND_EN_MASK) == CMND_EN_MASK) ? 1 : 0);
- }
- }
- return BP_NOT_CAP;
-}
-
-int wdt_en_status(struct bpctl_dev *pbpctl_dev)
-{
-
- if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
- if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
- return ((((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
- WDT_EN_MASK) == WDT_EN_MASK) ? 1 : 0);
- }
- }
- return BP_NOT_CAP;
-}
-
-int wdt_programmed(struct bpctl_dev *pbpctl_dev, int *timeout)
+static int wdt_programmed(struct bpctl_dev *pbpctl_dev, int *timeout)
{
int ret = 0;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -3481,40 +3343,7 @@ int wdt_programmed(struct bpctl_dev *pbpctl_dev, int *timeout)
return ret;
}
-int bypass_support(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
-
- if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
- if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
- ret =
- ((((read_reg(pbpctl_dev, PRODUCT_CAP_REG_ADDR)) &
- BYPASS_SUPPORT_MASK) ==
- BYPASS_SUPPORT_MASK) ? 1 : 0);
- } else if (pbpctl_dev->bp_ext_ver == PXG2BPI_VER)
- ret = 1;
- } else
- ret = BP_NOT_CAP;
- return ret;
-}
-
-int tap_support(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
-
- if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
- if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
- ret =
- ((((read_reg(pbpctl_dev, PRODUCT_CAP_REG_ADDR)) &
- TAP_SUPPORT_MASK) == TAP_SUPPORT_MASK) ? 1 : 0);
- } else if (pbpctl_dev->bp_ext_ver == PXG2BPI_VER)
- ret = 0;
- } else
- ret = BP_NOT_CAP;
- return ret;
-}
-
-int normal_support(struct bpctl_dev *pbpctl_dev)
+static int normal_support(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -3530,7 +3359,7 @@ int normal_support(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int get_bp_prod_caps(struct bpctl_dev *pbpctl_dev)
+static int get_bp_prod_caps(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & SW_CTL_CAP) &&
(pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER))
@@ -3539,7 +3368,7 @@ int get_bp_prod_caps(struct bpctl_dev *pbpctl_dev)
}
-int tap_flag_status(struct bpctl_dev *pbpctl_dev)
+static int tap_flag_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_STATUS_CAP) {
@@ -3551,7 +3380,7 @@ int tap_flag_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int tap_flag_status_clear(struct bpctl_dev *pbpctl_dev)
+static int tap_flag_status_clear(struct bpctl_dev *pbpctl_dev)
{
uint32_t status_reg = 0;
if (pbpctl_dev->bp_caps & TAP_STATUS_CAP) {
@@ -3565,7 +3394,7 @@ int tap_flag_status_clear(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int tap_change_status(struct bpctl_dev *pbpctl_dev)
+static int tap_change_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
@@ -3582,17 +3411,7 @@ int tap_change_status(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int tap_off_status(struct bpctl_dev *pbpctl_dev)
-{
- if (pbpctl_dev->bp_caps & TAP_CAP) {
- if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
- return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
- TAP_OFF_MASK) == TAP_OFF_MASK) ? 1 : 0);
- }
- return BP_NOT_CAP;
-}
-
-int tap_status(struct bpctl_dev *pbpctl_dev)
+static int tap_status(struct bpctl_dev *pbpctl_dev)
{
u32 ctrl_ext = 0;
@@ -3631,7 +3450,7 @@ int tap_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int default_pwron_tap_status(struct bpctl_dev *pbpctl_dev)
+static int default_pwron_tap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
@@ -3642,7 +3461,7 @@ int default_pwron_tap_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int dis_tap_cap_status(struct bpctl_dev *pbpctl_dev)
+static int dis_tap_cap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
@@ -3653,7 +3472,7 @@ int dis_tap_cap_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_flag_status(struct bpctl_dev *pbpctl_dev)
+static int disc_flag_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3665,7 +3484,7 @@ int disc_flag_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_flag_status_clear(struct bpctl_dev *pbpctl_dev)
+static int disc_flag_status_clear(struct bpctl_dev *pbpctl_dev)
{
uint32_t status_reg = 0;
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3679,7 +3498,7 @@ int disc_flag_status_clear(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_change_status(struct bpctl_dev *pbpctl_dev)
+static int disc_change_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3690,7 +3509,7 @@ int disc_change_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_off_status(struct bpctl_dev *pbpctl_dev)
+static int disc_off_status(struct bpctl_dev *pbpctl_dev)
{
struct bpctl_dev *pbpctl_dev_b = NULL;
u32 ctrl_ext = 0;
@@ -3786,7 +3605,7 @@ static int disc_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int default_pwron_disc_status(struct bpctl_dev *pbpctl_dev)
+static int default_pwron_disc_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
@@ -3797,7 +3616,7 @@ int default_pwron_disc_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int dis_disc_cap_status(struct bpctl_dev *pbpctl_dev)
+static int dis_disc_cap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DIS_DISC_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
@@ -3808,55 +3627,7 @@ int dis_disc_cap_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_port_status(struct bpctl_dev *pbpctl_dev)
-{
- int ret = BP_NOT_CAP;
- struct bpctl_dev *pbpctl_dev_m;
-
- if ((is_bypass_fn(pbpctl_dev)) == 1)
- pbpctl_dev_m = pbpctl_dev;
- else
- pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
- if (pbpctl_dev_m == NULL)
- return BP_NOT_CAP;
-
- if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1) {
- return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
- TX_DISA_MASK) == TX_DISA_MASK) ? 1 : 0);
- } else
- return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
- TX_DISB_MASK) == TX_DISB_MASK) ? 1 : 0);
-
- }
- return ret;
-}
-
-int default_pwron_disc_port_status(struct bpctl_dev *pbpctl_dev)
-{
- int ret = BP_NOT_CAP;
- struct bpctl_dev *pbpctl_dev_m;
-
- if ((is_bypass_fn(pbpctl_dev)) == 1)
- pbpctl_dev_m = pbpctl_dev;
- else
- pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
- if (pbpctl_dev_m == NULL)
- return BP_NOT_CAP;
-
- if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1)
- return ret;
- /* return((((read_reg(pbpctl_dev,STATUS_TAP_REG_ADDR)) & TX_DISA_MASK)==TX_DISA_MASK)?1:0); */
- else
- return ret;
- /* return((((read_reg(pbpctl_dev,STATUS_TAP_REG_ADDR)) & TX_DISA_MASK)==TX_DISA_MASK)?1:0); */
-
- }
- return ret;
-}
-
-int wdt_exp_mode_status(struct bpctl_dev *pbpctl_dev)
+static int wdt_exp_mode_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver <= PXG2BPI_VER)
@@ -3879,7 +3650,7 @@ int wdt_exp_mode_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int tpl2_flag_status(struct bpctl_dev *pbpctl_dev)
+static int tpl2_flag_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps_ex & TPL2_CAP_EX) {
@@ -3890,22 +3661,7 @@ int tpl2_flag_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int tpl_hw_status(struct bpctl_dev *pbpctl_dev)
-{
- struct bpctl_dev *pbpctl_dev_b = NULL;
-
- pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
- if (!pbpctl_dev_b)
- return BP_NOT_CAP;
-
- if (TPL_IF_SERIES(pbpctl_dev->subdevice))
- return (((BPCTL_READ_REG(pbpctl_dev, CTRL)) &
- BPCTLI_CTRL_SWDPIN0) != 0 ? 1 : 0);
- return BP_NOT_CAP;
-}
-
-
-int bp_wait_at_pwup_status(struct bpctl_dev *pbpctl_dev)
+static int bp_wait_at_pwup_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
@@ -3916,7 +3672,7 @@ int bp_wait_at_pwup_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bp_hw_reset_status(struct bpctl_dev *pbpctl_dev)
+static int bp_hw_reset_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3930,7 +3686,7 @@ int bp_hw_reset_status(struct bpctl_dev *pbpctl_dev)
}
-int std_nic_status(struct bpctl_dev *pbpctl_dev)
+static int std_nic_status(struct bpctl_dev *pbpctl_dev)
{
int status_val = 0;
@@ -3978,7 +3734,7 @@ int std_nic_status(struct bpctl_dev *pbpctl_dev)
/******************************************************/
/**************SW_INIT*********************************/
/******************************************************/
-void bypass_caps_init(struct bpctl_dev *pbpctl_dev)
+static void bypass_caps_init(struct bpctl_dev *pbpctl_dev)
{
u_int32_t ctrl_ext = 0;
struct bpctl_dev *pbpctl_dev_m = NULL;
@@ -4196,23 +3952,7 @@ void bypass_caps_init(struct bpctl_dev *pbpctl_dev)
}
}
-int bypass_off_init(struct bpctl_dev *pbpctl_dev)
-{
- int ret = cmnd_on(pbpctl_dev);
- if (ret < 0)
- return ret;
- if (INTEL_IF_SERIES(pbpctl_dev->subdevice))
- return dis_bypass_cap(pbpctl_dev);
- wdt_off(pbpctl_dev);
- if (pbpctl_dev->bp_caps & BP_CAP)
- bypass_off(pbpctl_dev);
- if (pbpctl_dev->bp_caps & TAP_CAP)
- tap_off(pbpctl_dev);
- cmnd_off(pbpctl_dev);
- return 0;
-}
-
-void remove_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
+static void remove_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
{
#ifdef BP_SELF_TEST
struct bpctl_dev *pbpctl_dev_sl = NULL;
@@ -4241,7 +3981,7 @@ void remove_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
}
-int init_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
+static int init_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
init_timer(&pbpctl_dev->bp_timer);
@@ -4288,7 +4028,7 @@ int bp_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
#endif
-int set_bypass_wd_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
+static int set_bypass_wd_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->reset_time != param) {
@@ -4307,7 +4047,7 @@ int set_bypass_wd_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
return BP_NOT_CAP;
}
-int get_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
+static int get_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP)
return pbpctl_dev->reset_time;
@@ -4378,7 +4118,7 @@ int is_bypass_fn(struct bpctl_dev *pbpctl_dev)
return (((pbpctl_dev->func == 0) || (pbpctl_dev->func == 2)) ? 1 : 0);
}
-int set_bypass_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
+static int set_bypass_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
{
int ret = 0;
@@ -4396,12 +4136,12 @@ int set_bypass_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
return ret;
}
-int get_bypass_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bypass_fn(struct bpctl_dev *pbpctl_dev)
{
return bypass_status(pbpctl_dev);
}
-int get_bypass_change_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bypass_change_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4409,7 +4149,7 @@ int get_bypass_change_fn(struct bpctl_dev *pbpctl_dev)
return bypass_change_status(pbpctl_dev);
}
-int set_dis_bypass_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
+static int set_dis_bypass_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4428,7 +4168,7 @@ int set_dis_bypass_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
return ret;
}
-int get_dis_bypass_fn(struct bpctl_dev *pbpctl_dev)
+static int get_dis_bypass_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4436,7 +4176,7 @@ int get_dis_bypass_fn(struct bpctl_dev *pbpctl_dev)
return dis_bypass_cap_status(pbpctl_dev);
}
-int set_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
+static int set_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4455,7 +4195,7 @@ int set_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
return ret;
}
-int get_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4463,7 +4203,7 @@ int get_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev)
return default_pwroff_status(pbpctl_dev);
}
-int set_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
+static int set_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4482,7 +4222,7 @@ int set_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
return ret;
}
-int get_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4490,7 +4230,7 @@ int get_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev)
return default_pwron_status(pbpctl_dev);
}
-int set_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int timeout)
+static int set_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int timeout)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4512,7 +4252,7 @@ int set_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int timeout)
return ret;
}
-int get_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int *timeout)
+static int get_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int *timeout)
{
if (!pbpctl_dev)
return -1;
@@ -4520,7 +4260,7 @@ int get_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int *timeout)
return wdt_programmed(pbpctl_dev, timeout);
}
-int get_wd_expire_time_fn(struct bpctl_dev *pbpctl_dev, int *time_left)
+static int get_wd_expire_time_fn(struct bpctl_dev *pbpctl_dev, int *time_left)
{
if (!pbpctl_dev)
return -1;
@@ -4528,7 +4268,7 @@ int get_wd_expire_time_fn(struct bpctl_dev *pbpctl_dev, int *time_left)
return wdt_timer(pbpctl_dev, time_left);
}
-int reset_bypass_wd_timer_fn(struct bpctl_dev *pbpctl_dev)
+static int reset_bypass_wd_timer_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4536,7 +4276,7 @@ int reset_bypass_wd_timer_fn(struct bpctl_dev *pbpctl_dev)
return wdt_timer_reload(pbpctl_dev);
}
-int get_wd_set_caps_fn(struct bpctl_dev *pbpctl_dev)
+static int get_wd_set_caps_fn(struct bpctl_dev *pbpctl_dev)
{
int bp_status = 0;
@@ -4560,7 +4300,7 @@ int get_wd_set_caps_fn(struct bpctl_dev *pbpctl_dev)
return bp_status;
}
-int set_std_nic_fn(struct bpctl_dev *pbpctl_dev, int nic_mode)
+static int set_std_nic_fn(struct bpctl_dev *pbpctl_dev, int nic_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4580,7 +4320,7 @@ int set_std_nic_fn(struct bpctl_dev *pbpctl_dev, int nic_mode)
return ret;
}
-int get_std_nic_fn(struct bpctl_dev *pbpctl_dev)
+static int get_std_nic_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4588,7 +4328,7 @@ int get_std_nic_fn(struct bpctl_dev *pbpctl_dev)
return std_nic_status(pbpctl_dev);
}
-int set_tap_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
+static int set_tap_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
@@ -4604,7 +4344,7 @@ int set_tap_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
return BP_NOT_CAP;
}
-int get_tap_fn(struct bpctl_dev *pbpctl_dev)
+static int get_tap_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4612,7 +4352,7 @@ int get_tap_fn(struct bpctl_dev *pbpctl_dev)
return tap_status(pbpctl_dev);
}
-int set_tap_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
+static int set_tap_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4630,7 +4370,7 @@ int set_tap_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
return ret;
}
-int get_tap_pwup_fn(struct bpctl_dev *pbpctl_dev)
+static int get_tap_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4642,7 +4382,7 @@ int get_tap_pwup_fn(struct bpctl_dev *pbpctl_dev)
return ((ret == 0) ? 1 : 0);
}
-int get_tap_change_fn(struct bpctl_dev *pbpctl_dev)
+static int get_tap_change_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4650,7 +4390,7 @@ int get_tap_change_fn(struct bpctl_dev *pbpctl_dev)
return tap_change_status(pbpctl_dev);
}
-int set_dis_tap_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
+static int set_dis_tap_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4667,7 +4407,7 @@ int set_dis_tap_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
return BP_NOT_CAP;
}
-int get_dis_tap_fn(struct bpctl_dev *pbpctl_dev)
+static int get_dis_tap_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4675,7 +4415,7 @@ int get_dis_tap_fn(struct bpctl_dev *pbpctl_dev)
return dis_tap_cap_status(pbpctl_dev);
}
-int set_disc_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
+static int set_disc_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
{
if (!pbpctl_dev)
return -1;
@@ -4692,7 +4432,7 @@ int set_disc_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
return BP_NOT_CAP;
}
-int get_disc_fn(struct bpctl_dev *pbpctl_dev)
+static int get_disc_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4703,7 +4443,7 @@ int get_disc_fn(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int set_disc_pwup_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
+static int set_disc_pwup_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4721,7 +4461,7 @@ int set_disc_pwup_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
return ret;
}
-int get_disc_pwup_fn(struct bpctl_dev *pbpctl_dev)
+static int get_disc_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4731,7 +4471,7 @@ int get_disc_pwup_fn(struct bpctl_dev *pbpctl_dev)
return (ret == 0 ? 1 : (ret < 0 ? BP_NOT_CAP : 0));
}
-int get_disc_change_fn(struct bpctl_dev *pbpctl_dev)
+static int get_disc_change_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4741,7 +4481,7 @@ int get_disc_change_fn(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int set_dis_disc_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
+static int set_dis_disc_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4759,7 +4499,7 @@ int set_dis_disc_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
return BP_NOT_CAP;
}
-int get_dis_disc_fn(struct bpctl_dev *pbpctl_dev)
+static int get_dis_disc_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4770,55 +4510,7 @@ int get_dis_disc_fn(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int set_disc_port_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
-{
- int ret = BP_NOT_CAP;
- if (!pbpctl_dev)
- return -1;
-
- if (!disc_mode)
- ret = disc_port_off(pbpctl_dev);
- else
- ret = disc_port_on(pbpctl_dev);
-
- return ret;
-}
-
-int get_disc_port_fn(struct bpctl_dev *pbpctl_dev)
-{
- if (!pbpctl_dev)
- return -1;
-
- return disc_port_status(pbpctl_dev);
-}
-
-int set_disc_port_pwup_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
-{
- int ret = BP_NOT_CAP;
- if (!pbpctl_dev)
- return -1;
-
- if (!disc_mode)
- ret = normal_port_state_pwron(pbpctl_dev);
- else
- ret = disc_port_state_pwron(pbpctl_dev);
-
- return ret;
-}
-
-int get_disc_port_pwup_fn(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
- if (!pbpctl_dev)
- return -1;
-
- ret = default_pwron_disc_port_status(pbpctl_dev);
- if (ret < 0)
- return ret;
- return ((ret == 0) ? 1 : 0);
-}
-
-int get_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev)
+static int get_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4826,7 +4518,7 @@ int get_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev)
return wdt_exp_mode_status(pbpctl_dev);
}
-int set_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev, int param)
+static int set_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
@@ -4834,19 +4526,7 @@ int set_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev, int param)
return wdt_exp_mode(pbpctl_dev, param);
}
-int reset_cont_fn(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
- if (!pbpctl_dev)
- return -1;
-
- ret = cmnd_on(pbpctl_dev);
- if (ret < 0)
- return ret;
- return reset_cont(pbpctl_dev);
-}
-
-int set_tx_fn(struct bpctl_dev *pbpctl_dev, int tx_state)
+static int set_tx_fn(struct bpctl_dev *pbpctl_dev, int tx_state)
{
struct bpctl_dev *pbpctl_dev_b = NULL;
@@ -4867,7 +4547,7 @@ int set_tx_fn(struct bpctl_dev *pbpctl_dev, int tx_state)
return set_tx(pbpctl_dev, tx_state);
}
-int set_bp_force_link_fn(int dev_num, int tx_state)
+static int set_bp_force_link_fn(int dev_num, int tx_state)
{
static struct bpctl_dev *bpctl_dev_curr;
@@ -4879,7 +4559,7 @@ int set_bp_force_link_fn(int dev_num, int tx_state)
return set_bp_force_link(bpctl_dev_curr, tx_state);
}
-int set_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev, int param)
+static int set_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
@@ -4887,7 +4567,7 @@ int set_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev, int param)
return set_bypass_wd_auto(pbpctl_dev, param);
}
-int get_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev)
+static int get_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4914,7 +4594,7 @@ int get_bp_self_test_fn(struct bpctl_dev *pbpctl_dev)
#endif
-int get_bypass_caps_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bypass_caps_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4923,7 +4603,8 @@ int get_bypass_caps_fn(struct bpctl_dev *pbpctl_dev)
}
-int get_bypass_slave_fn(struct bpctl_dev *pbpctl_dev, struct bpctl_dev **pbpctl_dev_out)
+static int get_bypass_slave_fn(struct bpctl_dev *pbpctl_dev,
+ struct bpctl_dev **pbpctl_dev_out)
{
int idx_dev = 0;
if (!pbpctl_dev)
@@ -4955,7 +4636,7 @@ int get_bypass_slave_fn(struct bpctl_dev *pbpctl_dev, struct bpctl_dev **pbpctl_
return 0;
}
-int is_bypass(struct bpctl_dev *pbpctl_dev)
+static int is_bypass(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4966,7 +4647,7 @@ int is_bypass(struct bpctl_dev *pbpctl_dev)
return 0;
}
-int get_tx_fn(struct bpctl_dev *pbpctl_dev)
+static int get_tx_fn(struct bpctl_dev *pbpctl_dev)
{
struct bpctl_dev *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
@@ -4986,7 +4667,7 @@ int get_tx_fn(struct bpctl_dev *pbpctl_dev)
return tx_status(pbpctl_dev);
}
-int get_bp_force_link_fn(int dev_num)
+static int get_bp_force_link_fn(int dev_num)
{
static struct bpctl_dev *bpctl_dev_curr;
@@ -5049,7 +4730,7 @@ static void bp_tpl_timer_fn(unsigned long param)
mod_timer(&pbpctl_dev->bp_tpl_timer, jiffies + BP_LINK_MON_DELAY * HZ);
}
-void remove_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
+static void remove_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
{
struct bpctl_dev *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
@@ -5067,7 +4748,7 @@ void remove_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
return;
}
-int init_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
+static int init_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -5080,7 +4761,7 @@ int init_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int set_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
+static int set_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
{
if (!pbpctl_dev)
return -1;
@@ -5098,17 +4779,7 @@ int set_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
return BP_NOT_CAP;
}
-int get_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
-{
- if (!pbpctl_dev)
- return -1;
- if (pbpctl_dev->bp_caps & TPL_CAP)
- return pbpctl_dev->bp_tpl_flag;
-
- return BP_NOT_CAP;
-}
-
-int set_tpl_fn(struct bpctl_dev *pbpctl_dev, int tpl_mode)
+static int set_tpl_fn(struct bpctl_dev *pbpctl_dev, int tpl_mode)
{
struct bpctl_dev *pbpctl_dev_b = NULL;
@@ -5138,7 +4809,7 @@ int set_tpl_fn(struct bpctl_dev *pbpctl_dev, int tpl_mode)
return BP_NOT_CAP;
}
-int get_tpl_fn(struct bpctl_dev *pbpctl_dev)
+static int get_tpl_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (!pbpctl_dev)
@@ -5152,7 +4823,7 @@ int get_tpl_fn(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int set_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
+static int set_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
@@ -5172,7 +4843,7 @@ int set_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
return BP_NOT_CAP;
}
-int get_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -5185,7 +4856,7 @@ int get_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int set_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
+static int set_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
@@ -5205,7 +4876,7 @@ int set_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
return BP_NOT_CAP;
}
-int get_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -5220,7 +4891,7 @@ int get_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev)
}
-int get_bypass_info_fn(struct bpctl_dev *pbpctl_dev, char *dev_name,
+static int get_bypass_info_fn(struct bpctl_dev *pbpctl_dev, char *dev_name,
char *add_param)
{
if (!pbpctl_dev)
@@ -5232,7 +4903,7 @@ int get_bypass_info_fn(struct bpctl_dev *pbpctl_dev, char *dev_name,
return 0;
}
-int get_dev_idx_bsf(int bus, int slot, int func)
+static int get_dev_idx_bsf(int bus, int slot, int func)
{
int idx_dev = 0;
for (idx_dev = 0;
@@ -7022,12 +6693,6 @@ int set_wd_exp_mode_sd(int ifindex, int param)
}
EXPORT_SYMBOL(set_wd_exp_mode_sd);
-int reset_cont_sd(int ifindex)
-{
- return reset_cont_fn(get_dev_idx_p(ifindex));
-
-}
-
int set_tx_sd(int ifindex, int tx_state)
{
return set_tx_fn(get_dev_idx_p(ifindex), tx_state);
@@ -7118,7 +6783,7 @@ EXPORT_SYMBOL(bp_if_scan_sd);
static struct proc_dir_entry *bp_procfs_dir;
-int bp_proc_create(void)
+static int bp_proc_create(void)
{
bp_procfs_dir = proc_mkdir(BP_PROC_DIR, init_net.proc_net);
if (bp_procfs_dir == (struct proc_dir_entry *)0) {
@@ -7746,7 +7411,7 @@ static int show_wd_autoreset(struct seq_file *m, void *v)
}
RW_FOPS(wd_autoreset)
-int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block)
+static int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block)
{
struct bypass_pfs_sd *current_pfs = &(pbp_device_block->bypass_pfs_set);
static struct proc_dir_entry *procfs_dir;
@@ -7816,7 +7481,7 @@ int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block)
return ret;
}
-int bypass_proc_remove_dev_sd(struct bpctl_dev *pbp_device_block)
+static int bypass_proc_remove_dev_sd(struct bpctl_dev *pbp_device_block)
{
struct bypass_pfs_sd *current_pfs = &pbp_device_block->bypass_pfs_set;
diff --git a/drivers/staging/silicom/bypasslib/bp_ioctl.h b/drivers/staging/silicom/bypasslib/bp_ioctl.h
index 2d1ef5384436..bf47f786866b 100644
--- a/drivers/staging/silicom/bypasslib/bp_ioctl.h
+++ b/drivers/staging/silicom/bypasslib/bp_ioctl.h
@@ -51,9 +51,9 @@
#define WDT_STEP_TIME 0x10 /* BIT_4 */
#define WD_MIN_TIME_GET(desc) (desc & 0xf)
-#define WD_STEP_COUNT_GET(desc) (desc>>5) & 0xf
+#define WD_STEP_COUNT_GET(desc) ((desc>>5) & 0xf)
-typedef enum {
+enum {
IS_BYPASS = 1,
GET_BYPASS_SLAVE,
GET_BYPASS_CAPS,
@@ -103,7 +103,7 @@ typedef enum {
SET_BP_HW_RESET,
} CMND_TYPE;
-typedef enum {
+enum {
IF_SCAN_SD,
GET_DEV_NUM_SD,
IS_BYPASS_SD,
@@ -156,7 +156,7 @@ typedef enum {
} CMND_TYPE_SD;
-#define SIOCGIFBYPASS SIOCDEVPRIVATE+10
+#define SIOCGIFBYPASS (SIOCDEVPRIVATE+10)
struct bp_info {
char prod_name[14];
diff --git a/drivers/staging/silicom/bypasslib/libbp_sd.h b/drivers/staging/silicom/bypasslib/libbp_sd.h
index 3b4f8364ed18..cac4b0b2ed78 100644
--- a/drivers/staging/silicom/bypasslib/libbp_sd.h
+++ b/drivers/staging/silicom/bypasslib/libbp_sd.h
@@ -18,7 +18,7 @@
* @if_index: network device index
*
* Output:
- * 1 - if device is bypass controlling device,
+ * 1 - if device is bypass controlling device,
* 0 - if device is bypass slave device
* -1 - device not support Bypass
**/
@@ -30,7 +30,7 @@ int is_bypass_sd(int if_index);
*
* Output:
* network device index of the slave device
- * -1 - on failure (device not support Bypass or it's a slave device)
+ * -1 - on failure (device not support Bypass or it's a slave device)
**/
int get_bypass_slave_sd(int if_index);
@@ -39,55 +39,72 @@ int get_bypass_slave_sd(int if_index);
* @if_index: network device index
*
* Output:
- * flags word on success;flag word is a 32-bit mask word with each bit defines different
- * capability as described bellow.
+ * flags word on success;flag word is a 32-bit mask word with each bit defines
+ * different capability as described bellow.
* Value of 1 for supporting this feature. 0 for not supporting this feature.
- * -1 - on failure (if the device is not capable of the operation or not a Bypass device)
- * Bit feature description
- *
- * 0 BP_CAP The interface is Bypass capable in general
- *
- * 1 BP_STATUS_CAP The interface can report of the current Bypass mode
- *
- * 2 BP_STATUS_CHANGE_CAP The interface can report on a change to bypass mode from
- * the last time the mode was defined
- *
- * 3 SW_CTL_CAP The interface is Software controlled capable for bypass/non bypass modes.
- *
- * 4 BP_DIS_CAP The interface is capable of disabling the Bypass mode at all times.
- * This mode will retain its mode even during power loss and also after
- * power recovery. This will overcome on any bypass operation due to
- * watchdog timeout or set bypass command.
- *
- * 5 BP_DIS_STATUS_CAP The interface can report of the current DIS_BP_CAP
- *
- * 6 STD_NIC_CAP The interface is capable to be configured to operate as standard, non Bypass,
- * NIC interface (have direct connection to interfaces at all power modes)
- *
- * 7 BP_PWOFF_NO_CAP The interface can be in Bypass mode at power off state
- *
- * 8 BP_PWOFF_OFF_CAP The interface can disconnect the Bypass mode at power off state without
- * effecting all the other states of operation
- *
- * 9 BP_PWOFF_CTL_CAP The behavior of the Bypass mode at Power-off state can be controlled by
- * software without effecting any other state
- *
- *10 BP_PWUP_ON_CAP The interface can be in Bypass mode when power is turned on
- * (until the system take control of the bypass functionality)
- *
- *11 BP_PWUP_OFF_CAP The interface can disconnect from Bypass mode when power is turned on
- * (until the system take control of the bypass functionality)
- *
- *12 BP_PWUP_CTL_CAP The behavior of the Bypass mode at Power-up can be controlled by software
- *
- *13 WD_CTL_CAP The interface has watchdog capabilities to turn to Bypass mode when not reset
- * for defined period of time.
- *
- *14 WD_STATUS_CAP The interface can report on the watchdog status (Active/inactive)
- *
- *15 WD_TIMEOUT_CAP The interface can report the time left till watchdog triggers to Bypass mode.
- *
- *16-31 RESERVED
+ * -1 - on failure (if the device is not capable of the operation or not a
+ * Bypass device)
+ * Bit feature description
+ *
+ * 0 BP_CAP The interface is Bypass capable in general
+ *
+ * 1 BP_STATUS_CAP The interface can report of the current Bypass
+ * mode
+ *
+ * 2 BP_STATUS_CHANGE_CAP The interface can report on a change to bypass
+ * mode from the last time the mode was defined
+ *
+ * 3 SW_CTL_CAP The interface is Software controlled capable for
+ * bypass/non bypass modes.
+ *
+ * 4 BP_DIS_CAP The interface is capable of disabling the Bypass
+ * mode at all times. This mode will retain its
+ * mode even during power loss and also after power
+ * recovery. This will overcome on any bypass
+ * operation due to watchdog timeout or set bypass
+ * command.
+ *
+ * 5 BP_DIS_STATUS_CAP The interface can report of the current
+ * DIS_BP_CAP
+ *
+ * 6 STD_NIC_CAP The interface is capable to be configured to
+ * operate as standard, non Bypass, NIC interface
+ * (have direct connection to interfaces at all
+ * power modes)
+ *
+ * 7 BP_PWOFF_NO_CAP The interface can be in Bypass mode at power off
+ * state
+ *
+ * 8 BP_PWOFF_OFF_CAP The interface can disconnect the Bypass mode at
+ * power off state without effecting all the other
+ * states of operation
+ *
+ * 9 BP_PWOFF_CTL_CAP The behavior of the Bypass mode at Power-off
+ * state can be controlled by software without
+ * effecting any other state
+ *
+ *10 BP_PWUP_ON_CAP The interface can be in Bypass mode when power
+ * is turned on (until the system take control of
+ * the bypass functionality)
+ *
+ *11 BP_PWUP_OFF_CAP The interface can disconnect from Bypass mode
+ * when power is turned on (until the system take
+ * control of the bypass functionality)
+ *
+ *12 BP_PWUP_CTL_CAP The behavior of the Bypass mode at Power-up can
+ * be controlled by software
+ *
+ *13 WD_CTL_CAP The interface has watchdog capabilities to turn
+ * to Bypass mode when not reset for defined period
+ * of time.
+ *
+ *14 WD_STATUS_CAP The interface can report on the watchdog status
+ * (Active/inactive)
+ *
+ *15 WD_TIMEOUT_CAP The interface can report the time left till
+ * watchdog triggers to Bypass mode.
+ *
+ *16-31 RESERVED
*
* **/
int get_bypass_caps_sd(int if_index);
@@ -97,34 +114,35 @@ int get_bypass_caps_sd(int if_index);
* @if_index: network device index
*
* Output:
- *
- * Set of numbers defining the various parameters of the watchdog capable
+ *
+ * Set of numbers defining the various parameters of the watchdog capable
* to be set to as described bellow.
* -1 - on failure (device not support Bypass or it's a slave device)
- *
+ *
* Bit feature description
- *
+ *
* 0-3 WD_MIN_TIME The interface WD minimal time period in 100mS units
- *
- * 4 WD_STEP_TIME The steps of the WD timer in
+ *
+ * 4 WD_STEP_TIME The steps of the WD timer in
* 0 - for linear steps (WD_MIN_TIME * X)
- * 1 - for multiply by 2 from previous step (WD_MIN_TIME * 2^X)
- *
- * 5-8 WD_STEP_COUNT Number of steps the WD timer supports in 2^X
+ * 1 - for multiply by 2 from previous step
+ * (WD_MIN_TIME * 2^X)
+ *
+ * 5-8 WD_STEP_COUNT Number of steps the WD timer supports in 2^X
* (X bit available for defining the value)
- *
- *
- *
+ *
+ *
+ *
**/
int get_wd_set_caps_sd(int if_index);
/**
* set_bypass - set Bypass state
* @if_index: network device index of the controlling device
- * @bypass_mode: bypass mode (1=on, 0=off)
+ * @bypass_mode: bypass mode (1=on, 0=off)
* Output:
* 0 - on success
- * -1 - on failure (device not support Bypass or it's a slave device)
+ * -1 - on failure (device not support Bypass or it's a slave device)
**/
int set_bypass_sd(int if_index, int bypass_mode);
@@ -133,7 +151,7 @@ int set_bypass_sd(int if_index, int bypass_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support Bypass or it's a slave device)
+ * -1 - on failure (device not support Bypass or it's a slave device)
**/
int get_bypass_sd(int if_index);
@@ -142,7 +160,7 @@ int get_bypass_sd(int if_index);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support Bypass or it's a slave device)
+ * -1 - on failure (device not support Bypass or it's a slave device)
**/
int get_bypass_change_sd(int if_index);
@@ -152,8 +170,8 @@ int get_bypass_change_sd(int if_index);
* @dis_bypass: disable bypass(1=dis, 0=en)
* Output:
* 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int set_dis_bypass_sd(int if_index, int dis_bypass);
@@ -162,8 +180,8 @@ int set_dis_bypass_sd(int if_index, int dis_bypass);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - on success (normal Bypass mode/ Disable bypass)
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int get_dis_bypass_sd(int if_index);
@@ -172,9 +190,9 @@ int get_dis_bypass_sd(int if_index);
* @if_index: network device index of the controlling device
* @bypass_mode: bypass mode setting at power off state (1=BP en, 0=BP Dis)
* Output:
- * 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * 0 - on success
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int set_bypass_pwoff_sd(int if_index, int bypass_mode);
@@ -183,8 +201,8 @@ int set_bypass_pwoff_sd(int if_index, int bypass_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - on success (Disable bypass at power off state / normal Bypass mode)
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int get_bypass_pwoff_sd(int if_index);
@@ -193,9 +211,9 @@ int get_bypass_pwoff_sd(int if_index);
* @if_index: network device index of the controlling device
* @bypass_mode: bypass mode setting at power up state (1=BP en, 0=BP Dis)
* Output:
- * 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * 0 - on success
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int set_bypass_pwup_sd(int if_index, int bypass_mode);
@@ -204,59 +222,60 @@ int set_bypass_pwup_sd(int if_index, int bypass_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - on success (Disable bypass at power up state / normal Bypass mode)
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int get_bypass_pwup_sd(int if_index);
/**
* set_bypass_wd - Set watchdog state
* @if_index: network device index of the controlling device
- * @ms_timeout: requested timeout (in ms units), 0 for disabling the watchdog timer
- * @ms_timeout_set(output): requested timeout (in ms units),
- * that the adapter supports and will be used by the watchdog
+ * @ms_timeout: requested timeout (in ms units), 0 for disabling the watchdog
+ * timer
+ * @ms_timeout_set(output): requested timeout (in ms units), that the adapter
+ * supports and will be used by the watchdog
* Output:
- * 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * 0 - on success
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int set_bypass_wd_sd(int if_index, int ms_timeout, int *ms_timeout_set);
/**
* get_bypass_wd - Get watchdog state
* @if_index: network device index of the controlling device
- * @ms_timeout (output): WDT timeout (in ms units),
+ * @ms_timeout (output): WDT timeout (in ms units),
* -1 for unknown wdt status
* 0 if WDT is disabled
* Output:
* 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int get_bypass_wd_sd(int if_index, int *ms_timeout_set);
/**
* get_wd_expire_time - Get watchdog expire
* @if_index: network device index of the controlling device
- * @ms_time_left (output): time left till watchdog time expire,
+ * @ms_time_left (output): time left till watchdog time expire,
* -1 if WDT has expired
* 0 if WDT is disabled
* Output:
* 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device or unknown wdt status)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device or unknown wdt status)
**/
int get_wd_expire_time_sd(int if_index, int *ms_time_left);
/**
* reset_bypass_wd_timer - Reset watchdog timer
* @if_index: network device index of the controlling device
- *
+ *
* Output:
* 1 - on success
* 0 - watchdog is not configured
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device or unknown wdt status)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device or unknown wdt status)
**/
int reset_bypass_wd_timer_sd(int if_index);
@@ -264,53 +283,54 @@ int reset_bypass_wd_timer_sd(int if_index);
* set_std_nic - Standard NIC mode of operation
* @if_index: network device index of the controlling device
* @nic_mode: 0/1 (Default Bypass mode / Standard NIC mode)
- *
+ *
* Output:
* 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int set_std_nic_sd(int if_index, int nic_mode);
/**
* get_std_nic - Get Standard NIC mode setting
* @if_index: network device index of the controlling device
- *
+ *
* Output:
* 0/1 (Default Bypass mode / Standard NIC mode) on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int get_std_nic_sd(int if_index);
/**
- * set_tx - set transmitter enable/disable
+ * set_tx - set transmitter enable/disable
* @if_index: network device index of the controlling device
* @tx_state: 0/1 (Transmit Disable / Transmit Enable)
- *
+ *
* Output:
* 0 - on success
- * -1 - on failure (device is not capable of the operation )
+ * -1 - on failure (device is not capable of the operation )
**/
int set_tx_sd(int if_index, int tx_state);
/**
* get_std_nic - get transmitter state (disable / enable)
* @if_index: network device index of the controlling device
- *
+ *
* Output:
* 0/1 (ransmit Disable / Transmit Enable) on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass)
**/
int get_tx_sd(int if_index);
/**
* set_tap - set TAP state
* @if_index: network device index of the controlling device
- * @tap_mode: 1 tap mode , 0 normal nic mode
+ * @tap_mode: 1 tap mode , 0 normal nic mode
* Output:
* 0 - on success
- * -1 - on failure (device not support TAP or it's a slave device)
+ * -1 - on failure (device not support TAP or it's a slave device)
**/
int set_tap_sd(int if_index, int tap_mode);
@@ -319,7 +339,7 @@ int set_tap_sd(int if_index, int tap_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support TAP or it's a slave device)
+ * -1 - on failure (device not support TAP or it's a slave device)
**/
int get_tap_sd(int if_index);
@@ -328,7 +348,7 @@ int get_tap_sd(int if_index);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support TAP or it's a slave device)
+ * -1 - on failure (device not support TAP or it's a slave device)
**/
int get_tap_change_sd(int if_index);
@@ -338,8 +358,8 @@ int get_tap_change_sd(int if_index);
* @dis_tap: disable tap(1=dis, 0=en)
* Output:
* 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support TAP
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * TAP or it's a slave device)
**/
int set_dis_tap_sd(int if_index, int dis_tap);
@@ -348,8 +368,8 @@ int set_dis_tap_sd(int if_index, int dis_tap);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - on success (normal TAP mode/ Disable TAP)
- * -1 - on failure (device is not capable of the operation ordevice not support TAP
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * TAP or it's a slave device)
**/
int get_dis_tap_sd(int if_index);
@@ -358,9 +378,9 @@ int get_dis_tap_sd(int if_index);
* @if_index: network device index of the controlling device
* @bypass_mode: tap mode setting at power up state (1=TAP en, 0=TAP Dis)
* Output:
- * 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support TAP
- * or it's a slave device)
+ * 0 - on success
+ * -1 - on failure (device is not capable of the operation or device not
+ * support TAP or it's a slave device)
**/
int set_tap_pwup_sd(int if_index, int tap_mode);
@@ -369,18 +389,18 @@ int set_tap_pwup_sd(int if_index, int tap_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - on success (Disable TAP at power up state / normal TAP mode)
- * -1 - on failure (device is not capable of the operation ordevice not support TAP
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not
+ * support TAP or it's a slave device)
**/
int get_tap_pwup_sd(int if_index);
/**
* set_bp_disc - set Disconnect state
* @if_index: network device index of the controlling device
- * @tap_mode: 1 disc mode , 0 non-disc mode
+ * @tap_mode: 1 disc mode , 0 non-disc mode
* Output:
* 0 - on success
- * -1 - on failure (device not support Disconnect or it's a slave device)
+ * -1 - on failure (device not support Disconnect or it's a slave device)
**/
int set_bp_disc_sd(int if_index, int disc_mode);
@@ -389,7 +409,7 @@ int set_bp_disc_sd(int if_index, int disc_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support Disconnect or it's a slave device)
+ * -1 - on failure (device not support Disconnect or it's a slave device)
**/
int get_bp_disc_sd(int if_index);
@@ -398,7 +418,7 @@ int get_bp_disc_sd(int if_index);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support Disconnect or it's a slave device)
+ * -1 - on failure (device not support Disconnect or it's a slave device)
**/
int get_bp_disc_change_sd(int if_index);
@@ -408,8 +428,8 @@ int get_bp_disc_change_sd(int if_index);
* @dis_tap: disable tap(1=dis, 0=en)
* Output:
* 0 - on success
- * -1 - on failure (device is not capable ofthe operation ordevice not support Disconnect
- * or it's a slave device)
+ * -1 - on failure (device is not capable ofthe operation or device not
+ * support Disconnect or it's a slave device)
**/
int set_bp_dis_disc_sd(int if_index, int dis_disc);
@@ -418,8 +438,8 @@ int set_bp_dis_disc_sd(int if_index, int dis_disc);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - on success (normal Disconnect mode/ Disable Disconnect)
- * -1 - on failure (device is not capable of the operation ordevice not support Disconnect
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not
+ * support Disconnect or it's a slave device)
**/
int get_bp_dis_disc_sd(int if_index);
@@ -428,9 +448,9 @@ int get_bp_dis_disc_sd(int if_index);
* @if_index: network device index of the controlling device
* @disc_mode: tap mode setting at power up state (1=Disc en, 0=Disc Dis)
* Output:
- * 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Disconnect
- * or it's a slave device)
+ * 0 - on success
+ * -1 - on failure (device is not capable of the operation or device not
+ * support Disconnect or it's a slave device)
**/
int set_bp_disc_pwup_sd(int if_index, int disc_mode);
@@ -438,19 +458,20 @@ int set_bp_disc_pwup_sd(int if_index, int disc_mode);
* get_bp_disc_pwup - Get Disconnect mode state at power-up state
* @if_index: network device index of the controlling device
* Output:
- * 0/1 - on success (Disable Disconnect at power up state / normal Disconnect mode)
- * -1 - on failure (device is not capable of the operation ordevice not support TAP
- * or it's a slave device)
+ * 0/1 - on success (Disable Disconnect at power up state / normal Disconnect
+ * mode)
+ * -1 - on failure (device is not capable of the operation or device not
+ * support TAP or it's a slave device)
**/
int get_bp_disc_pwup_sd(int if_index);
/**
* set_wd_exp_mode - Set adapter state when WDT expired.
* @if_index: network device index of the controlling device
- * @bypass_mode: adapter mode (1=tap mode, 0=bypass mode)
+ * @bypass_mode: adapter mode (1=tap mode, 0=bypass mode)
* Output:
* 0 - on success
- * -1 - on failure (device not support Bypass or it's a slave device)
+ * -1 - on failure (device not support Bypass or it's a slave device)
**/
int set_wd_exp_mode_sd(int if_index, int bypass_mode);
@@ -459,39 +480,41 @@ int set_wd_exp_mode_sd(int if_index, int bypass_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (bypass/tap) on success
- * -1 - on failure (device not support Bypass or it's a slave device)
+ * -1 - on failure (device not support Bypass or it's a slave device)
**/
int get_wd_exp_mode_sd(int if_index);
/**
* set_wd_autoreset - reset WDT periodically.
* @if_index: network device index of the controlling device
- * @bypass_mode: adapter mode (1=tap mode, 0=bypass mode)
+ * @bypass_mode: adapter mode (1=tap mode, 0=bypass mode)
* Output:
* 1 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device or unknown wdt status)
+ * -1 - on failure (device is not capable of the operation or device not
+ * support Bypass or it's a slave device or unknown wdt
+ * status)
**/
int set_wd_autoreset_sd(int if_index, int time);
/**
* set_wd_autoreset - reset WDT periodically.
* @if_index: network device index of the controlling device
- * @bypass_mode: adapter mode (1=tap mode, 0=bypass mode)
+ * @bypass_mode: adapter mode (1=tap mode, 0=bypass mode)
* Output:
* 1 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device or unknown wdt status)
+ * -1 - on failure (device is not capable of the operation or device not
+ * support Bypass or it's a slave device or unknown wdt
+ * status)
**/
int get_wd_autoreset_sd(int if_index);
/**
* set_tpl - set TPL state
* @if_index: network device index of the controlling device
- * @tpl_mode: 1 tpl mode , 0 normal nic mode
+ * @tpl_mode: 1 tpl mode , 0 normal nic mode
* Output:
* 0 - on success
- * -1 - on failure (device not support TPL)
+ * -1 - on failure (device not support TPL)
**/
int set_tpl_sd(int if_index, int tpl_mode);
@@ -500,7 +523,7 @@ int set_tpl_sd(int if_index, int tpl_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support TPL or it's a slave device)
+ * -1 - on failure (device not support TPL or it's a slave device)
**/
int get_tpl_sd(int if_index);
diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README
index 53052c4e78ae..4fa50e73ce86 100644
--- a/drivers/staging/slicoss/README
+++ b/drivers/staging/slicoss/README
@@ -5,43 +5,3 @@ This driver is supposed to support:
Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
The driver was actually tested on Oasis and Kalahari cards.
-
-TODO:
- - move firmware loading to request_firmware()
- - remove direct memory access of structures
- - any remaining sparse and checkpatch.pl warnings
-
- - use net_device_ops
- - use dev->stats rather than adapter->stats
- - don't cast netdev_priv it is already void
- - GET RID OF MACROS
- - work on all architectures
- - without CONFIG_X86_64 confusion
- - do 64 bit correctly
- - don't depend on order of union
- - get rid of ASSERT(), use BUG() instead but only where necessary
- looks like most aren't really useful
- - no new SIOCDEVPRIVATE ioctl allowed
- - don't use module_param for configuring interrupt mitigation
- use ethtool instead
- - reorder code to elminate use of forward declarations
- - don't keep private linked list of drivers.
- - remove all the gratiutous debug infrastructure
- - use PCI_DEVICE()
- - do ethtool correctly using ethtool_ops
- - NAPI?
- - wasted overhead of extra stats
- - state variables for things that are
- easily available and shouldn't be kept in card structure, cardnum, ...
- slotnumber, events, ...
- - get rid of slic_spinlock wrapper
- - volatile == bad design => bad code
- - locking too fine grained, not designed just throw more locks
- at problem
-
-
-Please send patches to:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-and Cc: Lior Dotan <liodot@gmail.com> and Christopher Harrer
-<charrer@alacritech.com> as well as they are also able to test out any
-changes.
diff --git a/drivers/staging/slicoss/TODO b/drivers/staging/slicoss/TODO
new file mode 100644
index 000000000000..62ff1008b1ee
--- /dev/null
+++ b/drivers/staging/slicoss/TODO
@@ -0,0 +1,38 @@
+TODO:
+ - move firmware loading to request_firmware()
+ - remove direct memory access of structures
+ - any remaining sparse and checkpatch.pl warnings
+
+ - use net_device_ops
+ - use dev->stats rather than adapter->stats
+ - don't cast netdev_priv it is already void
+ - GET RID OF MACROS
+ - work on all architectures
+ - without CONFIG_X86_64 confusion
+ - do 64 bit correctly
+ - don't depend on order of union
+ - get rid of ASSERT(), use BUG() instead but only where necessary
+ looks like most aren't really useful
+ - no new SIOCDEVPRIVATE ioctl allowed
+ - don't use module_param for configuring interrupt mitigation
+ use ethtool instead
+ - reorder code to elminate use of forward declarations
+ - don't keep private linked list of drivers.
+ - remove all the gratiutous debug infrastructure
+ - use PCI_DEVICE()
+ - do ethtool correctly using ethtool_ops
+ - NAPI?
+ - wasted overhead of extra stats
+ - state variables for things that are
+ easily available and shouldn't be kept in card structure, cardnum, ...
+ slotnumber, events, ...
+ - get rid of slic_spinlock wrapper
+ - volatile == bad design => bad code
+ - locking too fine grained, not designed just throw more locks
+ at problem
+
+Please send patches to:
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+and Cc: Lior Dotan <liodot@gmail.com> and Christopher Harrer
+<charrer@alacritech.com> as well as they are also able to test out any
+changes.
diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h
index 4c7822bd5358..702902cdb461 100644
--- a/drivers/staging/slicoss/slic.h
+++ b/drivers/staging/slicoss/slic.h
@@ -464,9 +464,12 @@ struct adapter {
/*
* SLIC Handles
*/
- struct slic_handle slic_handles[SLIC_CMDQ_MAXCMDS+1]; /* Object handles*/
- struct slic_handle *pfree_slic_handles; /* Free object handles*/
- struct slic_spinlock handle_lock; /* Object handle list lock*/
+ /* Object handles*/
+ struct slic_handle slic_handles[SLIC_CMDQ_MAXCMDS+1];
+ /* Free object handles*/
+ struct slic_handle *pfree_slic_handles;
+ /* Object handle list lock*/
+ struct slic_spinlock handle_lock;
ushort slic_handle_ix;
u32 xmitq_full;
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 1426ca49bfe8..e27b88f02ccd 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -100,11 +100,11 @@
#include "slic.h"
static uint slic_first_init = 1;
-static char *slic_banner = "Alacritech SLIC Technology(tm) Server "\
+static char *slic_banner = "Alacritech SLIC Technology(tm) Server "
"and Storage Accelerator (Non-Accelerated)";
static char *slic_proc_version = "2.0.351 2006/07/14 12:26:00";
-static char *slic_product_name = "SLIC Technology(tm) Server "\
+static char *slic_product_name = "SLIC Technology(tm) Server "
"and Storage Accelerator (Non-Accelerated)";
static char *slic_vendor = "Alacritech, Inc.";
@@ -144,29 +144,6 @@ static const struct pci_device_id slic_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
-#define SLIC_GET_SLIC_HANDLE(_adapter, _pslic_handle) \
-{ \
- spin_lock_irqsave(&_adapter->handle_lock.lock, \
- _adapter->handle_lock.flags); \
- _pslic_handle = _adapter->pfree_slic_handles; \
- if (_pslic_handle) { \
- _adapter->pfree_slic_handles = _pslic_handle->next; \
- } \
- spin_unlock_irqrestore(&_adapter->handle_lock.lock, \
- _adapter->handle_lock.flags); \
-}
-
-#define SLIC_FREE_SLIC_HANDLE(_adapter, _pslic_handle) \
-{ \
- _pslic_handle->type = SLIC_HANDLE_FREE; \
- spin_lock_irqsave(&_adapter->handle_lock.lock, \
- _adapter->handle_lock.flags); \
- _pslic_handle->next = _adapter->pfree_slic_handles; \
- _adapter->pfree_slic_handles = _pslic_handle; \
- spin_unlock_irqrestore(&_adapter->handle_lock.lock, \
- _adapter->handle_lock.flags); \
-}
-
static inline void slic_reg32_write(void __iomem *reg, u32 value, bool flush)
{
writel(value, reg);
@@ -1442,7 +1419,13 @@ static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page)
while ((cmdcnt < SLIC_CMDQ_CMDSINPAGE) &&
(adapter->slic_handle_ix < 256)) {
/* Allocate and initialize a SLIC_HANDLE for this command */
- SLIC_GET_SLIC_HANDLE(adapter, pslic_handle);
+ spin_lock_irqsave(&adapter->handle_lock.lock,
+ adapter->handle_lock.flags);
+ pslic_handle = adapter->pfree_slic_handles;
+ if (pslic_handle)
+ adapter->pfree_slic_handles = pslic_handle->next;
+ spin_unlock_irqrestore(&adapter->handle_lock.lock,
+ adapter->handle_lock.flags);
pslic_handle->type = SLIC_HANDLE_CMD;
pslic_handle->address = (void *) cmd;
pslic_handle->offset = (ushort) adapter->slic_handle_ix++;
@@ -1830,7 +1813,7 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
#endif
seq_printf(seq, "driver_version : %s\n", slic_proc_version);
- seq_puts(seq, "Microcode versions: \n");
+ seq_puts(seq, "Microcode versions:\n");
seq_printf(seq, " Gigabit (gb) : %s %s\n",
MOJAVE_UCODE_VERS_STRING, MOJAVE_UCODE_VERS_DATE);
seq_printf(seq, " Gigabit Receiver : %s %s\n",
@@ -1917,16 +1900,14 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
if (config->OEMFruFormat == VENDOR4_FRU_FORMAT) {
seq_printf(seq,
- "Serial # : "
- "%c%c%c%c%c%c%c%c%c%c%c%c\n",
+ "Serial # : %c%c%c%c%c%c%c%c%c%c%c%c\n",
fru[8], fru[9], fru[10],
fru[11], fru[12], fru[13],
fru[16], fru[17], fru[18],
fru[19], fru[20], fru[21]);
} else {
seq_printf(seq,
- "Serial # : "
- "%c%c%c%c%c%c%c%c%c%c%c%c%c%c\n",
+ "Serial # : %c%c%c%c%c%c%c%c%c%c%c%c%c%c\n",
fru[8], fru[9], fru[10],
fru[11], fru[12], fru[13],
fru[14], fru[15], fru[16],
@@ -1974,8 +1955,7 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
{
seq_puts(seq, "FRU Information:\n");
seq_printf(seq,
- " Part # : "
- "%c%c%c%c%c%c%c%c\n",
+ " Part # : %c%c%c%c%c%c%c%c\n",
oemfru[0], oemfru[1], oemfru[2],
oemfru[3], oemfru[4], oemfru[5],
oemfru[6], oemfru[7]);
@@ -2002,20 +1982,17 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
{
seq_puts(seq, "FRU Information:\n");
seq_printf(seq,
- " FRU Number : "
- "%c%c%c%c%c%c%c%c\n",
+ " FRU Number : %c%c%c%c%c%c%c%c\n",
oemfru[0], oemfru[1], oemfru[2],
oemfru[3], oemfru[4], oemfru[5],
oemfru[6], oemfru[7]);
seq_sprintf(seq,
- " Part Number : "
- "%c%c%c%c%c%c%c%c\n",
+ " Part Number : %c%c%c%c%c%c%c%c\n",
oemfru[8], oemfru[9], oemfru[10],
oemfru[11], oemfru[12], oemfru[13],
oemfru[14], oemfru[15]);
seq_printf(seq,
- " EC Level : "
- "%c%c%c%c%c%c%c%c\n",
+ " EC Level : %c%c%c%c%c%c%c%c\n",
oemfru[16], oemfru[17], oemfru[18],
oemfru[19], oemfru[20], oemfru[21],
oemfru[22], oemfru[23]);
@@ -2412,8 +2389,7 @@ static void slic_xmit_fail(struct adapter *adapter,
switch (status) {
case XMIT_FAIL_LINK_STATE:
dev_err(&adapter->netdev->dev,
- "reject xmit skb[%p: %x] linkstate[%s] "
- "adapter[%s:%d] card[%s:%d]\n",
+ "reject xmit skb[%p: %x] linkstate[%s] adapter[%s:%d] card[%s:%d]\n",
skb, skb->pkt_type,
SLIC_LINKSTATE(adapter->linkstate),
SLIC_ADAPTER_STATE(adapter->state),
@@ -2428,8 +2404,7 @@ static void slic_xmit_fail(struct adapter *adapter,
break;
case XMIT_FAIL_HOSTCMD_FAIL:
dev_err(&adapter->netdev->dev,
- "xmit_start skb[%p] type[%x] No host commands "
- "available\n", skb, skb->pkt_type);
+ "xmit_start skb[%p] type[%x] No host commands available\n", skb, skb->pkt_type);
break;
}
}
@@ -2642,8 +2617,7 @@ static void slic_interrupt_card_up(u32 isr, struct adapter *adapter,
}
} else if (isr & ISR_XDROP) {
dev_err(&dev->dev,
- "isr & ISR_ERR [%x] "
- "ISR_XDROP \n", isr);
+ "isr & ISR_ERR [%x] ISR_XDROP\n", isr);
} else {
dev_err(&dev->dev,
"isr & ISR_ERR [%x]\n",
@@ -2970,7 +2944,7 @@ static void slic_card_cleanup(struct sliccard *card)
{
if (card->loadtimerset) {
card->loadtimerset = 0;
- del_timer(&card->loadtimer);
+ del_timer_sync(&card->loadtimer);
}
slic_debug_card_destroy(card);
@@ -3269,8 +3243,7 @@ static int slic_card_init(struct sliccard *card, struct adapter *adapter)
if (!peeprom) {
dev_err(&adapter->pcidev->dev,
- "eeprom read failed to get memory "
- "bus %d slot %d\n", adapter->busnumber,
+ "eeprom read failed to get memory bus %d slot %d\n", adapter->busnumber,
adapter->slotnumber);
return -ENOMEM;
} else {
@@ -3703,7 +3676,7 @@ static int slic_entry_probe(struct pci_dev *pcidev,
err = slic_card_locate(adapter);
if (err) {
dev_err(&pcidev->dev, "cannot locate card\n");
- goto err_out_free_mmio_region;
+ goto err_out_unmap;
}
card = adapter->card;
@@ -3743,8 +3716,6 @@ static int slic_entry_probe(struct pci_dev *pcidev,
err_out_unmap:
iounmap(memmapped_ioaddr);
-err_out_free_mmio_region:
- release_mem_region(mmio_start, mmio_len);
err_out_free_netdev:
free_netdev(netdev);
err_out_exit_slic_probe:
diff --git a/drivers/staging/sm7xxfb/Kconfig b/drivers/staging/sm7xxfb/Kconfig
deleted file mode 100644
index e2922ae3a3ee..000000000000
--- a/drivers/staging/sm7xxfb/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-config FB_SM7XX
- tristate "Silicon Motion SM7XX framebuffer support"
- depends on FB && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- Frame buffer driver for the Silicon Motion SM710, SM712, SM721
- and SM722 chips.
-
- This driver is also available as a module. The module will be
- called sm7xxfb. If you want to compile it as a module, say M
- here and read <file:Documentation/kbuild/modules.txt>.
diff --git a/drivers/staging/sm7xxfb/Makefile b/drivers/staging/sm7xxfb/Makefile
deleted file mode 100644
index 48f471cf9f36..000000000000
--- a/drivers/staging/sm7xxfb/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_FB_SM7XX) += sm7xxfb.o
diff --git a/drivers/staging/sm7xxfb/TODO b/drivers/staging/sm7xxfb/TODO
deleted file mode 100644
index 1fcead591c16..000000000000
--- a/drivers/staging/sm7xxfb/TODO
+++ /dev/null
@@ -1,9 +0,0 @@
-TODO:
-- Dual head support
-- 2D acceleration support
-- use kernel coding style
-- refine the code and remove unused code
-- move it to drivers/video/sm7xxfb.c
-
-Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and
-Teddy Wang <teddy.wang@siliconmotion.com.cn>.
diff --git a/drivers/staging/sm7xxfb/sm7xx.h b/drivers/staging/sm7xxfb/sm7xx.h
deleted file mode 100644
index 85998615b801..000000000000
--- a/drivers/staging/sm7xxfb/sm7xx.h
+++ /dev/null
@@ -1,779 +0,0 @@
-/*
- * Silicon Motion SM712 frame buffer device
- *
- * Copyright (C) 2006 Silicon Motion Technology Corp.
- * Authors: Ge Wang, gewang@siliconmotion.com
- * Boyod boyod.yang@siliconmotion.com.cn
- *
- * Copyright (C) 2009 Lemote, Inc.
- * Author: Wu Zhangjin, wuzhangjin@gmail.com
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#define NR_PALETTE 256
-
-#define FB_ACCEL_SMI_LYNX 88
-
-#define SCREEN_X_RES 1024
-#define SCREEN_Y_RES 600
-#define SCREEN_BPP 16
-
-/*Assume SM712 graphics chip has 4MB VRAM */
-#define SM712_VIDEOMEMORYSIZE 0x00400000
-/*Assume SM722 graphics chip has 8MB VRAM */
-#define SM722_VIDEOMEMORYSIZE 0x00800000
-
-#define dac_reg (0x3c8)
-#define dac_val (0x3c9)
-
-extern void __iomem *smtc_RegBaseAddress;
-#define smtc_mmiowb(dat, reg) writeb(dat, smtc_RegBaseAddress + reg)
-#define smtc_mmioww(dat, reg) writew(dat, smtc_RegBaseAddress + reg)
-#define smtc_mmiowl(dat, reg) writel(dat, smtc_RegBaseAddress + reg)
-
-#define smtc_mmiorb(reg) readb(smtc_RegBaseAddress + reg)
-#define smtc_mmiorw(reg) readw(smtc_RegBaseAddress + reg)
-#define smtc_mmiorl(reg) readl(smtc_RegBaseAddress + reg)
-
-#define SIZE_SR00_SR04 (0x04 - 0x00 + 1)
-#define SIZE_SR10_SR24 (0x24 - 0x10 + 1)
-#define SIZE_SR30_SR75 (0x75 - 0x30 + 1)
-#define SIZE_SR80_SR93 (0x93 - 0x80 + 1)
-#define SIZE_SRA0_SRAF (0xAF - 0xA0 + 1)
-#define SIZE_GR00_GR08 (0x08 - 0x00 + 1)
-#define SIZE_AR00_AR14 (0x14 - 0x00 + 1)
-#define SIZE_CR00_CR18 (0x18 - 0x00 + 1)
-#define SIZE_CR30_CR4D (0x4D - 0x30 + 1)
-#define SIZE_CR90_CRA7 (0xA7 - 0x90 + 1)
-#define SIZE_VPR (0x6C + 1)
-#define SIZE_DPR (0x44 + 1)
-
-static inline void smtc_crtcw(int reg, int val)
-{
- smtc_mmiowb(reg, 0x3d4);
- smtc_mmiowb(val, 0x3d5);
-}
-
-static inline unsigned int smtc_crtcr(int reg)
-{
- smtc_mmiowb(reg, 0x3d4);
- return smtc_mmiorb(0x3d5);
-}
-
-static inline void smtc_grphw(int reg, int val)
-{
- smtc_mmiowb(reg, 0x3ce);
- smtc_mmiowb(val, 0x3cf);
-}
-
-static inline unsigned int smtc_grphr(int reg)
-{
- smtc_mmiowb(reg, 0x3ce);
- return smtc_mmiorb(0x3cf);
-}
-
-static inline void smtc_attrw(int reg, int val)
-{
- smtc_mmiorb(0x3da);
- smtc_mmiowb(reg, 0x3c0);
- smtc_mmiorb(0x3c1);
- smtc_mmiowb(val, 0x3c0);
-}
-
-static inline void smtc_seqw(int reg, int val)
-{
- smtc_mmiowb(reg, 0x3c4);
- smtc_mmiowb(val, 0x3c5);
-}
-
-static inline unsigned int smtc_seqr(int reg)
-{
- smtc_mmiowb(reg, 0x3c4);
- return smtc_mmiorb(0x3c5);
-}
-
-/* The next structure holds all information relevant for a specific video mode.
- */
-
-struct ModeInit {
- int mmSizeX;
- int mmSizeY;
- int bpp;
- int hz;
- unsigned char Init_MISC;
- unsigned char Init_SR00_SR04[SIZE_SR00_SR04];
- unsigned char Init_SR10_SR24[SIZE_SR10_SR24];
- unsigned char Init_SR30_SR75[SIZE_SR30_SR75];
- unsigned char Init_SR80_SR93[SIZE_SR80_SR93];
- unsigned char Init_SRA0_SRAF[SIZE_SRA0_SRAF];
- unsigned char Init_GR00_GR08[SIZE_GR00_GR08];
- unsigned char Init_AR00_AR14[SIZE_AR00_AR14];
- unsigned char Init_CR00_CR18[SIZE_CR00_CR18];
- unsigned char Init_CR30_CR4D[SIZE_CR30_CR4D];
- unsigned char Init_CR90_CRA7[SIZE_CR90_CRA7];
-};
-
-/**********************************************************************
- SM712 Mode table.
- **********************************************************************/
-struct ModeInit VGAMode[] = {
- {
- /* mode#0: 640 x 480 16Bpp 60Hz */
- 640, 480, 16, 60,
- /* Init_MISC */
- 0xE3,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x00, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
- 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
- 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
- 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
- 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
- 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
- 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
- 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
- },
- { /* Init_CR90_CRA7 */
- 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
- 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
- 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
- },
- },
- {
- /* mode#1: 640 x 480 24Bpp 60Hz */
- 640, 480, 24, 60,
- /* Init_MISC */
- 0xE3,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x00, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
- 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
- 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
- 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
- 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
- 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
- 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
- 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
- },
- { /* Init_CR90_CRA7 */
- 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
- 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
- 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
- },
- },
- {
- /* mode#0: 640 x 480 32Bpp 60Hz */
- 640, 480, 32, 60,
- /* Init_MISC */
- 0xE3,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x00, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
- 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
- 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
- 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
- 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
- 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
- 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
- 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
- },
- { /* Init_CR90_CRA7 */
- 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
- 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
- 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
- },
- },
-
- { /* mode#2: 800 x 600 16Bpp 60Hz */
- 800, 600, 16, 60,
- /* Init_MISC */
- 0x2B,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
- 0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
- 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
- 0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
- 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
- 0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
- 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
- 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
- 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
- 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
- },
- { /* Init_CR90_CRA7 */
- 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
- 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
- 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
- },
- },
- { /* mode#3: 800 x 600 24Bpp 60Hz */
- 800, 600, 24, 60,
- 0x2B,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x36, 0x03, 0x20, 0x09, 0xC0, 0x36, 0x36, 0x36,
- 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x36, 0x36, 0x36,
- 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
- 0x04, 0x55, 0x59, 0x36, 0x36, 0x00, 0x00, 0x36,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
- 0x02, 0x45, 0x30, 0x30, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x36,
- 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x36, 0x36,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
- 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
- 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
- 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
- },
- { /* Init_CR90_CRA7 */
- 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
- 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
- 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
- },
- },
- { /* mode#7: 800 x 600 32Bpp 60Hz */
- 800, 600, 32, 60,
- /* Init_MISC */
- 0x2B,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
- 0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
- 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
- 0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
- 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
- 0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
- 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
- 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
- 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
- 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
- },
- { /* Init_CR90_CRA7 */
- 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
- 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
- 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
- },
- },
- /* We use 1024x768 table to light 1024x600 panel for lemote */
- { /* mode#4: 1024 x 600 16Bpp 60Hz */
- 1024, 600, 16, 60,
- /* Init_MISC */
- 0xEB,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x00, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xC8, 0x40, 0x14, 0x60, 0x00, 0x0A, 0x17, 0x20,
- 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x00, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x22, 0x03, 0x24, 0x09, 0xC0, 0x22, 0x22, 0x22,
- 0x22, 0x22, 0x22, 0x22, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x22, 0x22, 0x22,
- 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
- 0x00, 0x60, 0x59, 0x22, 0x22, 0x00, 0x00, 0x22,
- 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x16, 0x02, 0x0D, 0x82, 0x09, 0x02,
- 0x04, 0x45, 0x3F, 0x30, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
- 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
- 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
- 0xA3, 0x7F, 0x00, 0x82, 0x0b, 0x6f, 0x57, 0x00,
- 0x5c, 0x0f, 0xE0, 0xe0, 0x7F, 0x57,
- },
- { /* Init_CR90_CRA7 */
- 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
- 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
- },
- },
- { /* mode#5: 1024 x 768 24Bpp 60Hz */
- 1024, 768, 24, 60,
- /* Init_MISC */
- 0xEB,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
- 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
- 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
- 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
- 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
- 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
- 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
- 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
- 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
- },
- { /* Init_CR90_CRA7 */
- 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
- 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
- },
- },
- { /* mode#4: 1024 x 768 32Bpp 60Hz */
- 1024, 768, 32, 60,
- /* Init_MISC */
- 0xEB,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x32, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
- 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
- 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
- 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
- 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
- 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
- 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
- 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
- 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
- },
- { /* Init_CR90_CRA7 */
- 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
- 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
- },
- },
- { /* mode#6: 320 x 240 16Bpp 60Hz */
- 320, 240, 16, 60,
- /* Init_MISC */
- 0xEB,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x32, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
- 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
- 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
- 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
- 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
- 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
- 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
- 0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
- 0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
- 0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
- },
- { /* Init_CR90_CRA7 */
- 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
- 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
- },
- },
-
- { /* mode#8: 320 x 240 32Bpp 60Hz */
- 320, 240, 32, 60,
- /* Init_MISC */
- 0xEB,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x32, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
- 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
- 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
- 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
- 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
- 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
- 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
- 0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
- 0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
- 0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
- },
- { /* Init_CR90_CRA7 */
- 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
- 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
- },
- },
-};
-
-#define numVGAModes ARRAY_SIZE(VGAMode)
diff --git a/drivers/staging/sm7xxfb/sm7xxfb.c b/drivers/staging/sm7xxfb/sm7xxfb.c
deleted file mode 100644
index 6176d98744cc..000000000000
--- a/drivers/staging/sm7xxfb/sm7xxfb.c
+++ /dev/null
@@ -1,1028 +0,0 @@
-/*
- * Silicon Motion SM7XX frame buffer device
- *
- * Copyright (C) 2006 Silicon Motion Technology Corp.
- * Authors: Ge Wang, gewang@siliconmotion.com
- * Boyod boyod.yang@siliconmotion.com.cn
- *
- * Copyright (C) 2009 Lemote, Inc.
- * Author: Wu Zhangjin, wuzhangjin@gmail.com
- *
- * Copyright (C) 2011 Igalia, S.L.
- * Author: Javier M. Mellid <jmunhoz@igalia.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- * Framebuffer driver for Silicon Motion SM710, SM712, SM721 and SM722 chips
- */
-
-#include <linux/io.h>
-#include <linux/fb.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/console.h>
-#include <linux/screen_info.h>
-
-#ifdef CONFIG_PM
-#include <linux/pm.h>
-#endif
-
-#include "sm7xx.h"
-
-/*
-* Private structure
-*/
-struct smtcfb_info {
- struct pci_dev *pdev;
- struct fb_info fb;
- u16 chip_id;
- u8 chip_rev_id;
-
- void __iomem *lfb; /* linear frame buffer */
- void __iomem *dp_regs; /* drawing processor control regs */
- void __iomem *vp_regs; /* video processor control regs */
- void __iomem *cp_regs; /* capture processor control regs */
- void __iomem *mmio; /* memory map IO port */
-
- u_int width;
- u_int height;
- u_int hz;
-
- u32 colreg[17];
-};
-
-void __iomem *smtc_RegBaseAddress; /* Memory Map IO starting address */
-
-static struct fb_var_screeninfo smtcfb_var = {
- .xres = 1024,
- .yres = 600,
- .xres_virtual = 1024,
- .yres_virtual = 600,
- .bits_per_pixel = 16,
- .red = {16, 8, 0},
- .green = {8, 8, 0},
- .blue = {0, 8, 0},
- .activate = FB_ACTIVATE_NOW,
- .height = -1,
- .width = -1,
- .vmode = FB_VMODE_NONINTERLACED,
- .nonstd = 0,
- .accel_flags = FB_ACCELF_TEXT,
-};
-
-static struct fb_fix_screeninfo smtcfb_fix = {
- .id = "smXXXfb",
- .type = FB_TYPE_PACKED_PIXELS,
- .visual = FB_VISUAL_TRUECOLOR,
- .line_length = 800 * 3,
- .accel = FB_ACCEL_SMI_LYNX,
- .type_aux = 0,
- .xpanstep = 0,
- .ypanstep = 0,
- .ywrapstep = 0,
-};
-
-struct vesa_mode {
- char index[6];
- u16 lfb_width;
- u16 lfb_height;
- u16 lfb_depth;
-};
-
-static struct vesa_mode vesa_mode_table[] = {
- {"0x301", 640, 480, 8},
- {"0x303", 800, 600, 8},
- {"0x305", 1024, 768, 8},
- {"0x307", 1280, 1024, 8},
-
- {"0x311", 640, 480, 16},
- {"0x314", 800, 600, 16},
- {"0x317", 1024, 768, 16},
- {"0x31A", 1280, 1024, 16},
-
- {"0x312", 640, 480, 24},
- {"0x315", 800, 600, 24},
- {"0x318", 1024, 768, 24},
- {"0x31B", 1280, 1024, 24},
-};
-
-struct screen_info smtc_scr_info;
-
-/* process command line options, get vga parameter */
-static int __init sm7xx_vga_setup(char *options)
-{
- int i;
-
- if (!options || !*options)
- return -EINVAL;
-
- smtc_scr_info.lfb_width = 0;
- smtc_scr_info.lfb_height = 0;
- smtc_scr_info.lfb_depth = 0;
-
- pr_debug("sm7xx_vga_setup = %s\n", options);
-
- for (i = 0; i < ARRAY_SIZE(vesa_mode_table); i++) {
- if (strstr(options, vesa_mode_table[i].index)) {
- smtc_scr_info.lfb_width = vesa_mode_table[i].lfb_width;
- smtc_scr_info.lfb_height =
- vesa_mode_table[i].lfb_height;
- smtc_scr_info.lfb_depth = vesa_mode_table[i].lfb_depth;
- return 0;
- }
- }
-
- return -1;
-}
-__setup("vga=", sm7xx_vga_setup);
-
-static void sm712_setpalette(int regno, unsigned red, unsigned green,
- unsigned blue, struct fb_info *info)
-{
- /* set bit 5:4 = 01 (write LCD RAM only) */
- smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x10);
-
- smtc_mmiowb(regno, dac_reg);
- smtc_mmiowb(red >> 10, dac_val);
- smtc_mmiowb(green >> 10, dac_val);
- smtc_mmiowb(blue >> 10, dac_val);
-}
-
-/* chan_to_field
- *
- * convert a colour value into a field position
- *
- * from pxafb.c
- */
-
-static inline unsigned int chan_to_field(unsigned int chan,
- struct fb_bitfield *bf)
-{
- chan &= 0xffff;
- chan >>= 16 - bf->length;
- return chan << bf->offset;
-}
-
-static int smtc_blank(int blank_mode, struct fb_info *info)
-{
- /* clear DPMS setting */
- switch (blank_mode) {
- case FB_BLANK_UNBLANK:
- /* Screen On: HSync: On, VSync : On */
- smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
- smtc_seqw(0x6a, 0x16);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
- smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
- smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
- break;
- case FB_BLANK_NORMAL:
- /* Screen Off: HSync: On, VSync : On Soft blank */
- smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
- smtc_seqw(0x6a, 0x16);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
- break;
- case FB_BLANK_VSYNC_SUSPEND:
- /* Screen On: HSync: On, VSync : Off */
- smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
- smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
- smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
- break;
- case FB_BLANK_HSYNC_SUSPEND:
- /* Screen On: HSync: Off, VSync : On */
- smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
- smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
- smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
- break;
- case FB_BLANK_POWERDOWN:
- /* Screen On: HSync: Off, VSync : Off */
- smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
- smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
- smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned trans, struct fb_info *info)
-{
- struct smtcfb_info *sfb;
- u32 val;
-
- sfb = info->par;
-
- if (regno > 255)
- return 1;
-
- switch (sfb->fb.fix.visual) {
- case FB_VISUAL_DIRECTCOLOR:
- case FB_VISUAL_TRUECOLOR:
- /*
- * 16/32 bit true-colour, use pseudo-palette for 16 base color
- */
- if (regno < 16) {
- if (sfb->fb.var.bits_per_pixel == 16) {
- u32 *pal = sfb->fb.pseudo_palette;
- val = chan_to_field(red, &sfb->fb.var.red);
- val |= chan_to_field(green, &sfb->fb.var.green);
- val |= chan_to_field(blue, &sfb->fb.var.blue);
-#ifdef __BIG_ENDIAN
- pal[regno] =
- ((red & 0xf800) >> 8) |
- ((green & 0xe000) >> 13) |
- ((green & 0x1c00) << 3) |
- ((blue & 0xf800) >> 3);
-#else
- pal[regno] = val;
-#endif
- } else {
- u32 *pal = sfb->fb.pseudo_palette;
- val = chan_to_field(red, &sfb->fb.var.red);
- val |= chan_to_field(green, &sfb->fb.var.green);
- val |= chan_to_field(blue, &sfb->fb.var.blue);
-#ifdef __BIG_ENDIAN
- val =
- (val & 0xff00ff00 >> 8) |
- (val & 0x00ff00ff << 8);
-#endif
- pal[regno] = val;
- }
- }
- break;
-
- case FB_VISUAL_PSEUDOCOLOR:
- /* color depth 8 bit */
- sm712_setpalette(regno, red, green, blue, info);
- break;
-
- default:
- return 1; /* unknown type */
- }
-
- return 0;
-
-}
-
-#ifdef __BIG_ENDIAN
-static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, size_t
- count, loff_t *ppos)
-{
- unsigned long p = *ppos;
-
- u32 *buffer, *dst;
- u32 __iomem *src;
- int c, i, cnt = 0, err = 0;
- unsigned long total_size;
-
- if (!info || !info->screen_base)
- return -ENODEV;
-
- if (info->state != FBINFO_STATE_RUNNING)
- return -EPERM;
-
- total_size = info->screen_size;
-
- if (total_size == 0)
- total_size = info->fix.smem_len;
-
- if (p >= total_size)
- return 0;
-
- if (count >= total_size)
- count = total_size;
-
- if (count + p > total_size)
- count = total_size - p;
-
- buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- src = (u32 __iomem *) (info->screen_base + p);
-
- if (info->fbops->fb_sync)
- info->fbops->fb_sync(info);
-
- while (count) {
- c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
- dst = buffer;
- for (i = c >> 2; i--;) {
- *dst = fb_readl(src++);
- *dst =
- (*dst & 0xff00ff00 >> 8) |
- (*dst & 0x00ff00ff << 8);
- dst++;
- }
- if (c & 3) {
- u8 *dst8 = (u8 *) dst;
- u8 __iomem *src8 = (u8 __iomem *) src;
-
- for (i = c & 3; i--;) {
- if (i & 1) {
- *dst8++ = fb_readb(++src8);
- } else {
- *dst8++ = fb_readb(--src8);
- src8 += 2;
- }
- }
- src = (u32 __iomem *) src8;
- }
-
- if (copy_to_user(buf, buffer, c)) {
- err = -EFAULT;
- break;
- }
- *ppos += c;
- buf += c;
- cnt += c;
- count -= c;
- }
-
- kfree(buffer);
-
- return (err) ? err : cnt;
-}
-
-static ssize_t
-smtcfb_write(struct fb_info *info, const char __user *buf, size_t count,
- loff_t *ppos)
-{
- unsigned long p = *ppos;
-
- u32 *buffer, *src;
- u32 __iomem *dst;
- int c, i, cnt = 0, err = 0;
- unsigned long total_size;
-
- if (!info || !info->screen_base)
- return -ENODEV;
-
- if (info->state != FBINFO_STATE_RUNNING)
- return -EPERM;
-
- total_size = info->screen_size;
-
- if (total_size == 0)
- total_size = info->fix.smem_len;
-
- if (p > total_size)
- return -EFBIG;
-
- if (count > total_size) {
- err = -EFBIG;
- count = total_size;
- }
-
- if (count + p > total_size) {
- if (!err)
- err = -ENOSPC;
-
- count = total_size - p;
- }
-
- buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- dst = (u32 __iomem *) (info->screen_base + p);
-
- if (info->fbops->fb_sync)
- info->fbops->fb_sync(info);
-
- while (count) {
- c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
- src = buffer;
-
- if (copy_from_user(src, buf, c)) {
- err = -EFAULT;
- break;
- }
-
- for (i = c >> 2; i--;) {
- fb_writel((*src & 0xff00ff00 >> 8) |
- (*src & 0x00ff00ff << 8), dst++);
- src++;
- }
- if (c & 3) {
- u8 *src8 = (u8 *) src;
- u8 __iomem *dst8 = (u8 __iomem *) dst;
-
- for (i = c & 3; i--;) {
- if (i & 1) {
- fb_writeb(*src8++, ++dst8);
- } else {
- fb_writeb(*src8++, --dst8);
- dst8 += 2;
- }
- }
- dst = (u32 __iomem *) dst8;
- }
-
- *ppos += c;
- buf += c;
- cnt += c;
- count -= c;
- }
-
- kfree(buffer);
-
- return (cnt) ? cnt : err;
-}
-#endif /* ! __BIG_ENDIAN */
-
-static void sm7xx_set_timing(struct smtcfb_info *sfb)
-{
- int i = 0, j = 0;
- u32 m_nScreenStride;
-
- dev_dbg(&sfb->pdev->dev,
- "sfb->width=%d sfb->height=%d "
- "sfb->fb.var.bits_per_pixel=%d sfb->hz=%d\n",
- sfb->width, sfb->height, sfb->fb.var.bits_per_pixel, sfb->hz);
-
- for (j = 0; j < numVGAModes; j++) {
- if (VGAMode[j].mmSizeX == sfb->width &&
- VGAMode[j].mmSizeY == sfb->height &&
- VGAMode[j].bpp == sfb->fb.var.bits_per_pixel &&
- VGAMode[j].hz == sfb->hz) {
-
- dev_dbg(&sfb->pdev->dev,
- "VGAMode[j].mmSizeX=%d VGAMode[j].mmSizeY=%d "
- "VGAMode[j].bpp=%d VGAMode[j].hz=%d\n",
- VGAMode[j].mmSizeX, VGAMode[j].mmSizeY,
- VGAMode[j].bpp, VGAMode[j].hz);
-
- dev_dbg(&sfb->pdev->dev, "VGAMode index=%d\n", j);
-
- smtc_mmiowb(0x0, 0x3c6);
-
- smtc_seqw(0, 0x1);
-
- smtc_mmiowb(VGAMode[j].Init_MISC, 0x3c2);
-
- /* init SEQ register SR00 - SR04 */
- for (i = 0; i < SIZE_SR00_SR04; i++)
- smtc_seqw(i, VGAMode[j].Init_SR00_SR04[i]);
-
- /* init SEQ register SR10 - SR24 */
- for (i = 0; i < SIZE_SR10_SR24; i++)
- smtc_seqw(i + 0x10,
- VGAMode[j].Init_SR10_SR24[i]);
-
- /* init SEQ register SR30 - SR75 */
- for (i = 0; i < SIZE_SR30_SR75; i++)
- if ((i + 0x30) != 0x62 &&
- (i + 0x30) != 0x6a &&
- (i + 0x30) != 0x6b)
- smtc_seqw(i + 0x30,
- VGAMode[j].Init_SR30_SR75[i]);
-
- /* init SEQ register SR80 - SR93 */
- for (i = 0; i < SIZE_SR80_SR93; i++)
- smtc_seqw(i + 0x80,
- VGAMode[j].Init_SR80_SR93[i]);
-
- /* init SEQ register SRA0 - SRAF */
- for (i = 0; i < SIZE_SRA0_SRAF; i++)
- smtc_seqw(i + 0xa0,
- VGAMode[j].Init_SRA0_SRAF[i]);
-
- /* init Graphic register GR00 - GR08 */
- for (i = 0; i < SIZE_GR00_GR08; i++)
- smtc_grphw(i, VGAMode[j].Init_GR00_GR08[i]);
-
- /* init Attribute register AR00 - AR14 */
- for (i = 0; i < SIZE_AR00_AR14; i++)
- smtc_attrw(i, VGAMode[j].Init_AR00_AR14[i]);
-
- /* init CRTC register CR00 - CR18 */
- for (i = 0; i < SIZE_CR00_CR18; i++)
- smtc_crtcw(i, VGAMode[j].Init_CR00_CR18[i]);
-
- /* init CRTC register CR30 - CR4D */
- for (i = 0; i < SIZE_CR30_CR4D; i++)
- smtc_crtcw(i + 0x30,
- VGAMode[j].Init_CR30_CR4D[i]);
-
- /* init CRTC register CR90 - CRA7 */
- for (i = 0; i < SIZE_CR90_CRA7; i++)
- smtc_crtcw(i + 0x90,
- VGAMode[j].Init_CR90_CRA7[i]);
- }
- }
- smtc_mmiowb(0x67, 0x3c2);
-
- /* set VPR registers */
- writel(0x0, sfb->vp_regs + 0x0C);
- writel(0x0, sfb->vp_regs + 0x40);
-
- /* set data width */
- m_nScreenStride =
- (sfb->width * sfb->fb.var.bits_per_pixel) / 64;
- switch (sfb->fb.var.bits_per_pixel) {
- case 8:
- writel(0x0, sfb->vp_regs + 0x0);
- break;
- case 16:
- writel(0x00020000, sfb->vp_regs + 0x0);
- break;
- case 24:
- writel(0x00040000, sfb->vp_regs + 0x0);
- break;
- case 32:
- writel(0x00030000, sfb->vp_regs + 0x0);
- break;
- }
- writel((u32) (((m_nScreenStride + 2) << 16) | m_nScreenStride),
- sfb->vp_regs + 0x10);
-
-}
-
-static void smtc_set_timing(struct smtcfb_info *sfb)
-{
- switch (sfb->chip_id) {
- case 0x710:
- case 0x712:
- case 0x720:
- sm7xx_set_timing(sfb);
- break;
- }
-}
-
-static void smtcfb_setmode(struct smtcfb_info *sfb)
-{
- switch (sfb->fb.var.bits_per_pixel) {
- case 32:
- sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
- sfb->fb.fix.line_length = sfb->fb.var.xres * 4;
- sfb->fb.var.red.length = 8;
- sfb->fb.var.green.length = 8;
- sfb->fb.var.blue.length = 8;
- sfb->fb.var.red.offset = 16;
- sfb->fb.var.green.offset = 8;
- sfb->fb.var.blue.offset = 0;
- break;
- case 24:
- sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
- sfb->fb.fix.line_length = sfb->fb.var.xres * 3;
- sfb->fb.var.red.length = 8;
- sfb->fb.var.green.length = 8;
- sfb->fb.var.blue.length = 8;
- sfb->fb.var.red.offset = 16;
- sfb->fb.var.green.offset = 8;
- sfb->fb.var.blue.offset = 0;
- break;
- case 8:
- sfb->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
- sfb->fb.fix.line_length = sfb->fb.var.xres;
- sfb->fb.var.red.length = 3;
- sfb->fb.var.green.length = 3;
- sfb->fb.var.blue.length = 2;
- sfb->fb.var.red.offset = 5;
- sfb->fb.var.green.offset = 2;
- sfb->fb.var.blue.offset = 0;
- break;
- case 16:
- default:
- sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
- sfb->fb.fix.line_length = sfb->fb.var.xres * 2;
- sfb->fb.var.red.length = 5;
- sfb->fb.var.green.length = 6;
- sfb->fb.var.blue.length = 5;
- sfb->fb.var.red.offset = 11;
- sfb->fb.var.green.offset = 5;
- sfb->fb.var.blue.offset = 0;
- break;
- }
-
- sfb->width = sfb->fb.var.xres;
- sfb->height = sfb->fb.var.yres;
- sfb->hz = 60;
- smtc_set_timing(sfb);
-}
-
-static int smtc_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
- /* sanity checks */
- if (var->xres_virtual < var->xres)
- var->xres_virtual = var->xres;
-
- if (var->yres_virtual < var->yres)
- var->yres_virtual = var->yres;
-
- /* set valid default bpp */
- if ((var->bits_per_pixel != 8) && (var->bits_per_pixel != 16) &&
- (var->bits_per_pixel != 24) && (var->bits_per_pixel != 32))
- var->bits_per_pixel = 16;
-
- return 0;
-}
-
-static int smtc_set_par(struct fb_info *info)
-{
- smtcfb_setmode(info->par);
-
- return 0;
-}
-
-static struct fb_ops smtcfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = smtc_check_var,
- .fb_set_par = smtc_set_par,
- .fb_setcolreg = smtc_setcolreg,
- .fb_blank = smtc_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_imageblit = cfb_imageblit,
- .fb_copyarea = cfb_copyarea,
-#ifdef __BIG_ENDIAN
- .fb_read = smtcfb_read,
- .fb_write = smtcfb_write,
-#endif
-};
-
-/*
- * alloc struct smtcfb_info and assign default values
- */
-static struct smtcfb_info *smtc_alloc_fb_info(struct pci_dev *pdev)
-{
- struct smtcfb_info *sfb;
-
- sfb = kzalloc(sizeof(*sfb), GFP_KERNEL);
-
- if (!sfb)
- return NULL;
-
- sfb->pdev = pdev;
-
- sfb->fb.flags = FBINFO_FLAG_DEFAULT;
- sfb->fb.fbops = &smtcfb_ops;
- sfb->fb.fix = smtcfb_fix;
- sfb->fb.var = smtcfb_var;
- sfb->fb.pseudo_palette = sfb->colreg;
- sfb->fb.par = sfb;
-
- return sfb;
-}
-
-/*
- * free struct smtcfb_info
- */
-static void smtc_free_fb_info(struct smtcfb_info *sfb)
-{
- kfree(sfb);
-}
-
-/*
- * Unmap in the memory mapped IO registers
- */
-
-static void smtc_unmap_mmio(struct smtcfb_info *sfb)
-{
- if (sfb && smtc_RegBaseAddress)
- smtc_RegBaseAddress = NULL;
-}
-
-/*
- * Map in the screen memory
- */
-
-static int smtc_map_smem(struct smtcfb_info *sfb,
- struct pci_dev *pdev, u_long smem_len)
-{
-
- sfb->fb.fix.smem_start = pci_resource_start(pdev, 0);
-
-#ifdef __BIG_ENDIAN
- if (sfb->fb.var.bits_per_pixel == 32)
- sfb->fb.fix.smem_start += 0x800000;
-#endif
-
- sfb->fb.fix.smem_len = smem_len;
-
- sfb->fb.screen_base = sfb->lfb;
-
- if (!sfb->fb.screen_base) {
- dev_err(&pdev->dev,
- "%s: unable to map screen memory\n", sfb->fb.fix.id);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/*
- * Unmap in the screen memory
- *
- */
-static void smtc_unmap_smem(struct smtcfb_info *sfb)
-{
- if (sfb && sfb->fb.screen_base) {
- iounmap(sfb->fb.screen_base);
- sfb->fb.screen_base = NULL;
- }
-}
-
-/*
- * We need to wake up the device and make sure its in linear memory mode.
- */
-static inline void sm7xx_init_hw(void)
-{
- outb_p(0x18, 0x3c4);
- outb_p(0x11, 0x3c5);
-}
-
-static int smtcfb_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct smtcfb_info *sfb;
- u_long smem_size = 0x00800000; /* default 8MB */
- int err;
- unsigned long mmio_base;
-
- dev_info(&pdev->dev, "Silicon Motion display driver.");
-
- err = pci_enable_device(pdev); /* enable SMTC chip */
- if (err)
- return err;
-
- sprintf(smtcfb_fix.id, "sm%Xfb", ent->device);
-
- sfb = smtc_alloc_fb_info(pdev);
-
- if (!sfb) {
- err = -ENOMEM;
- goto failed_free;
- }
-
- sfb->chip_id = ent->device;
-
- pci_set_drvdata(pdev, sfb);
-
- sm7xx_init_hw();
-
- /* get mode parameter from smtc_scr_info */
- if (smtc_scr_info.lfb_width != 0) {
- sfb->fb.var.xres = smtc_scr_info.lfb_width;
- sfb->fb.var.yres = smtc_scr_info.lfb_height;
- sfb->fb.var.bits_per_pixel = smtc_scr_info.lfb_depth;
- } else {
- /* default resolution 1024x600 16bit mode */
- sfb->fb.var.xres = SCREEN_X_RES;
- sfb->fb.var.yres = SCREEN_Y_RES;
- sfb->fb.var.bits_per_pixel = SCREEN_BPP;
- }
-
-#ifdef __BIG_ENDIAN
- if (sfb->fb.var.bits_per_pixel == 24)
- sfb->fb.var.bits_per_pixel = (smtc_scr_info.lfb_depth = 32);
-#endif
- /* Map address and memory detection */
- mmio_base = pci_resource_start(pdev, 0);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
-
- switch (sfb->chip_id) {
- case 0x710:
- case 0x712:
- sfb->fb.fix.mmio_start = mmio_base + 0x00400000;
- sfb->fb.fix.mmio_len = 0x00400000;
- smem_size = SM712_VIDEOMEMORYSIZE;
-#ifdef __BIG_ENDIAN
- sfb->lfb = ioremap(mmio_base, 0x00c00000);
-#else
- sfb->lfb = ioremap(mmio_base, 0x00800000);
-#endif
- sfb->mmio = (smtc_RegBaseAddress =
- sfb->lfb + 0x00700000);
- sfb->dp_regs = sfb->lfb + 0x00408000;
- sfb->vp_regs = sfb->lfb + 0x0040c000;
-#ifdef __BIG_ENDIAN
- if (sfb->fb.var.bits_per_pixel == 32) {
- sfb->lfb += 0x800000;
- dev_info(&pdev->dev, "sfb->lfb=%p", sfb->lfb);
- }
-#endif
- if (!smtc_RegBaseAddress) {
- dev_err(&pdev->dev,
- "%s: unable to map memory mapped IO!",
- sfb->fb.fix.id);
- err = -ENOMEM;
- goto failed_fb;
- }
-
- /* set MCLK = 14.31818 * (0x16 / 0x2) */
- smtc_seqw(0x6a, 0x16);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x62, 0x3e);
- /* enable PCI burst */
- smtc_seqw(0x17, 0x20);
- /* enable word swap */
-#ifdef __BIG_ENDIAN
- if (sfb->fb.var.bits_per_pixel == 32)
- smtc_seqw(0x17, 0x30);
-#endif
- break;
- case 0x720:
- sfb->fb.fix.mmio_start = mmio_base;
- sfb->fb.fix.mmio_len = 0x00200000;
- smem_size = SM722_VIDEOMEMORYSIZE;
- sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
- sfb->lfb = sfb->dp_regs + 0x00200000;
- sfb->mmio = (smtc_RegBaseAddress =
- sfb->dp_regs + 0x000c0000);
- sfb->vp_regs = sfb->dp_regs + 0x800;
-
- smtc_seqw(0x62, 0xff);
- smtc_seqw(0x6a, 0x0d);
- smtc_seqw(0x6b, 0x02);
- break;
- default:
- dev_err(&pdev->dev,
- "No valid Silicon Motion display chip was detected!");
-
- goto failed_fb;
- }
-
- /* can support 32 bpp */
- if (15 == sfb->fb.var.bits_per_pixel)
- sfb->fb.var.bits_per_pixel = 16;
-
- sfb->fb.var.xres_virtual = sfb->fb.var.xres;
- sfb->fb.var.yres_virtual = sfb->fb.var.yres;
- err = smtc_map_smem(sfb, pdev, smem_size);
- if (err)
- goto failed;
-
- smtcfb_setmode(sfb);
-
- err = register_framebuffer(&sfb->fb);
- if (err < 0)
- goto failed;
-
- dev_info(&pdev->dev,
- "Silicon Motion SM%X Rev%X primary display mode %dx%d-%d Init Complete.",
- sfb->chip_id, sfb->chip_rev_id, sfb->fb.var.xres,
- sfb->fb.var.yres, sfb->fb.var.bits_per_pixel);
-
- return 0;
-
-failed:
- dev_err(&pdev->dev, "Silicon Motion, Inc. primary display init fail.");
-
- smtc_unmap_smem(sfb);
- smtc_unmap_mmio(sfb);
-failed_fb:
- smtc_free_fb_info(sfb);
-
-failed_free:
- pci_disable_device(pdev);
-
- return err;
-}
-
-/*
- * 0x710 (LynxEM)
- * 0x712 (LynxEM+)
- * 0x720 (Lynx3DM, Lynx3DM+)
- */
-static const struct pci_device_id smtcfb_pci_table[] = {
- { PCI_DEVICE(0x126f, 0x710), },
- { PCI_DEVICE(0x126f, 0x712), },
- { PCI_DEVICE(0x126f, 0x720), },
- {0,}
-};
-
-static void smtcfb_pci_remove(struct pci_dev *pdev)
-{
- struct smtcfb_info *sfb;
-
- sfb = pci_get_drvdata(pdev);
- smtc_unmap_smem(sfb);
- smtc_unmap_mmio(sfb);
- unregister_framebuffer(&sfb->fb);
- smtc_free_fb_info(sfb);
-}
-
-#ifdef CONFIG_PM
-static int smtcfb_pci_suspend(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct smtcfb_info *sfb;
-
- sfb = pci_get_drvdata(pdev);
-
- /* set the hw in sleep mode use external clock and self memory refresh
- * so that we can turn off internal PLLs later on
- */
- smtc_seqw(0x20, (smtc_seqr(0x20) | 0xc0));
- smtc_seqw(0x69, (smtc_seqr(0x69) & 0xf7));
-
- console_lock();
- fb_set_suspend(&sfb->fb, 1);
- console_unlock();
-
- /* additionally turn off all function blocks including internal PLLs */
- smtc_seqw(0x21, 0xff);
-
- return 0;
-}
-
-static int smtcfb_pci_resume(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct smtcfb_info *sfb;
-
- sfb = pci_get_drvdata(pdev);
-
- /* reinit hardware */
- sm7xx_init_hw();
- switch (sfb->chip_id) {
- case 0x710:
- case 0x712:
- /* set MCLK = 14.31818 * (0x16 / 0x2) */
- smtc_seqw(0x6a, 0x16);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x62, 0x3e);
- /* enable PCI burst */
- smtc_seqw(0x17, 0x20);
-#ifdef __BIG_ENDIAN
- if (sfb->fb.var.bits_per_pixel == 32)
- smtc_seqw(0x17, 0x30);
-#endif
- break;
- case 0x720:
- smtc_seqw(0x62, 0xff);
- smtc_seqw(0x6a, 0x0d);
- smtc_seqw(0x6b, 0x02);
- break;
- }
-
- smtc_seqw(0x34, (smtc_seqr(0x34) | 0xc0));
- smtc_seqw(0x33, ((smtc_seqr(0x33) | 0x08) & 0xfb));
-
- smtcfb_setmode(sfb);
-
- console_lock();
- fb_set_suspend(&sfb->fb, 0);
- console_unlock();
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(sm7xx_pm_ops, smtcfb_pci_suspend, smtcfb_pci_resume);
-#define SM7XX_PM_OPS (&sm7xx_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define SM7XX_PM_OPS NULL
-
-#endif /* !CONFIG_PM */
-
-static struct pci_driver smtcfb_driver = {
- .name = "smtcfb",
- .id_table = smtcfb_pci_table,
- .probe = smtcfb_pci_probe,
- .remove = smtcfb_pci_remove,
- .driver.pm = SM7XX_PM_OPS,
-};
-
-module_pci_driver(smtcfb_driver);
-
-MODULE_AUTHOR("Siliconmotion ");
-MODULE_DESCRIPTION("Framebuffer driver for SMI Graphic Cards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index e2f597ee6261..1ca91f7092b1 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -851,75 +851,75 @@ static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr,
* Declare the attributes.
*/
static struct kobj_attribute keymap_attribute =
- __ATTR(keymap, ROOT_W, keymap_show, keymap_store);
+ __ATTR(keymap, S_IWUSR|S_IRUGO, keymap_show, keymap_store);
static struct kobj_attribute silent_attribute =
- __ATTR(silent, USER_W, NULL, silent_store);
+ __ATTR(silent, S_IWUGO, NULL, silent_store);
static struct kobj_attribute synth_attribute =
- __ATTR(synth, USER_RW, synth_show, synth_store);
+ __ATTR(synth, S_IWUGO|S_IRUGO, synth_show, synth_store);
static struct kobj_attribute synth_direct_attribute =
- __ATTR(synth_direct, USER_W, NULL, synth_direct_store);
+ __ATTR(synth_direct, S_IWUGO, NULL, synth_direct_store);
static struct kobj_attribute version_attribute =
__ATTR_RO(version);
static struct kobj_attribute delimiters_attribute =
- __ATTR(delimiters, USER_RW, punc_show, punc_store);
+ __ATTR(delimiters, S_IWUGO|S_IRUGO, punc_show, punc_store);
static struct kobj_attribute ex_num_attribute =
- __ATTR(ex_num, USER_RW, punc_show, punc_store);
+ __ATTR(ex_num, S_IWUGO|S_IRUGO, punc_show, punc_store);
static struct kobj_attribute punc_all_attribute =
- __ATTR(punc_all, USER_RW, punc_show, punc_store);
+ __ATTR(punc_all, S_IWUGO|S_IRUGO, punc_show, punc_store);
static struct kobj_attribute punc_most_attribute =
- __ATTR(punc_most, USER_RW, punc_show, punc_store);
+ __ATTR(punc_most, S_IWUGO|S_IRUGO, punc_show, punc_store);
static struct kobj_attribute punc_some_attribute =
- __ATTR(punc_some, USER_RW, punc_show, punc_store);
+ __ATTR(punc_some, S_IWUGO|S_IRUGO, punc_show, punc_store);
static struct kobj_attribute repeats_attribute =
- __ATTR(repeats, USER_RW, punc_show, punc_store);
+ __ATTR(repeats, S_IWUGO|S_IRUGO, punc_show, punc_store);
static struct kobj_attribute attrib_bleep_attribute =
- __ATTR(attrib_bleep, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(attrib_bleep, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute bell_pos_attribute =
- __ATTR(bell_pos, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(bell_pos, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute bleep_time_attribute =
- __ATTR(bleep_time, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(bleep_time, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute bleeps_attribute =
- __ATTR(bleeps, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(bleeps, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute cursor_time_attribute =
- __ATTR(cursor_time, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(cursor_time, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute key_echo_attribute =
- __ATTR(key_echo, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(key_echo, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute no_interrupt_attribute =
- __ATTR(no_interrupt, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(no_interrupt, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punc_level_attribute =
- __ATTR(punc_level, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(punc_level, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute reading_punc_attribute =
- __ATTR(reading_punc, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(reading_punc, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute say_control_attribute =
- __ATTR(say_control, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(say_control, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute say_word_ctl_attribute =
- __ATTR(say_word_ctl, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(say_word_ctl, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute spell_delay_attribute =
- __ATTR(spell_delay, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(spell_delay, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
/*
* These attributes are i18n related.
*/
static struct kobj_attribute announcements_attribute =
- __ATTR(announcements, USER_RW, message_show, message_store);
+ __ATTR(announcements, S_IWUGO|S_IRUGO, message_show, message_store);
static struct kobj_attribute characters_attribute =
- __ATTR(characters, USER_RW, chars_chartab_show, chars_chartab_store);
+ __ATTR(characters, S_IWUGO|S_IRUGO, chars_chartab_show, chars_chartab_store);
static struct kobj_attribute chartab_attribute =
- __ATTR(chartab, USER_RW, chars_chartab_show, chars_chartab_store);
+ __ATTR(chartab, S_IWUGO|S_IRUGO, chars_chartab_show, chars_chartab_store);
static struct kobj_attribute ctl_keys_attribute =
- __ATTR(ctl_keys, USER_RW, message_show, message_store);
+ __ATTR(ctl_keys, S_IWUGO|S_IRUGO, message_show, message_store);
static struct kobj_attribute colors_attribute =
- __ATTR(colors, USER_RW, message_show, message_store);
+ __ATTR(colors, S_IWUGO|S_IRUGO, message_show, message_store);
static struct kobj_attribute formatted_attribute =
- __ATTR(formatted, USER_RW, message_show, message_store);
+ __ATTR(formatted, S_IWUGO|S_IRUGO, message_show, message_store);
static struct kobj_attribute function_names_attribute =
- __ATTR(function_names, USER_RW, message_show, message_store);
+ __ATTR(function_names, S_IWUGO|S_IRUGO, message_show, message_store);
static struct kobj_attribute key_names_attribute =
- __ATTR(key_names, USER_RW, message_show, message_store);
+ __ATTR(key_names, S_IWUGO|S_IRUGO, message_show, message_store);
static struct kobj_attribute states_attribute =
- __ATTR(states, USER_RW, message_show, message_store);
+ __ATTR(states, S_IWUGO|S_IRUGO, message_show, message_store);
/*
* Create groups of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index 4e18fb405344..c62d74c47906 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -40,7 +40,7 @@ const struct old_serial_port *spk_serial_init(int index)
cval |= UART_LCR_EPAR;
if (synth_request_region(ser->port, 8)) {
/* try to take it back. */
- printk(KERN_INFO "Ports not available, trying to steal them\n");
+ pr_info("Ports not available, trying to steal them\n");
__release_region(&ioport_resource, ser->port, 8);
err = synth_request_region(ser->port, 8);
if (err) {
@@ -106,7 +106,7 @@ static void start_serial_interrupt(int irq)
"serial", (void *) synth_readbuf_handler);
if (rv)
- printk(KERN_ERR "Unable to request Speakup serial I R Q\n");
+ pr_err("Unable to request Speakup serial I R Q\n");
/* Set MCR */
outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2,
speakup_info.port_tts + UART_MCR);
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index 0126f714821a..a7bcceec436a 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -12,8 +12,6 @@
/* proc permissions */
#define USER_R (S_IFREG|S_IRUGO)
#define USER_W (S_IFREG|S_IWUGO)
-#define USER_RW (S_IFREG|S_IRUGO|S_IWUGO)
-#define ROOT_W (S_IFREG|S_IRUGO|S_IWUSR)
#define TOGGLE_0 .u.n = {NULL, 0, 0, 1, 0, 0, NULL }
#define TOGGLE_1 .u.n = {NULL, 1, 0, 1, 0, 0, NULL }
diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
index 1c8a7f4a0ef5..e7dfa434bd96 100644
--- a/drivers/staging/speakup/speakup_acntpc.c
+++ b/drivers/staging/speakup/speakup_acntpc.c
@@ -62,28 +62,28 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/acntpc.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index 22a8b7291098..c7f014ed9628 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -47,28 +47,28 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/acntsa.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index 70cf1591676a..38c8c2221e4e 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -53,30 +53,30 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/apollo.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute lang_attribute =
- __ATTR(lang, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(lang, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
- __ATTR(voice, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(voice, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_audptr.c b/drivers/staging/speakup/speakup_audptr.c
index 61a3ceeb0d3a..de5b4a5f43b6 100644
--- a/drivers/staging/speakup/speakup_audptr.c
+++ b/drivers/staging/speakup/speakup_audptr.c
@@ -49,30 +49,30 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/audptr.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_bns.c b/drivers/staging/speakup/speakup_bns.c
index 4bfe3d458dc0..4939e8c7272e 100644
--- a/drivers/staging/speakup/speakup_bns.c
+++ b/drivers/staging/speakup/speakup_bns.c
@@ -44,28 +44,28 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/bns.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index d306e010d3ea..b17af9803929 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -70,30 +70,30 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/decext.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
- __ATTR(voice, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(voice, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index ea6b72d40b31..cfa4bc032358 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -164,30 +164,30 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/decpc.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
- __ATTR(voice, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(voice, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index 15fdec323a70..1fcae55dabba 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -70,30 +70,30 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/dectlk.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
- __ATTR(voice, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(voice, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
@@ -270,10 +270,12 @@ static void do_catch_up(struct spk_synth *synth)
if (jiffies >= jiff_max) {
if (!in_escape)
spk_serial_out(PROCSPEECH);
- spin_lock_irqsave(&speakup_info.spinlock, flags);
+ spin_lock_irqsave(&speakup_info.spinlock,
+ flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock,
+ flags);
schedule_timeout(msecs_to_jiffies
(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index 1feb0fba1b43..5c6c34191e8d 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -67,34 +67,34 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/dtlk.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute freq_attribute =
- __ATTR(freq, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(freq, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
- __ATTR(voice, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(voice, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c
index 4a24b9c1e8e3..e19e9994bbb5 100644
--- a/drivers/staging/speakup/speakup_dummy.c
+++ b/drivers/staging/speakup/speakup_dummy.c
@@ -46,28 +46,28 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/dummy.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
index 2f2fe5eeff63..9c246d701a95 100644
--- a/drivers/staging/speakup/speakup_keypc.c
+++ b/drivers/staging/speakup/speakup_keypc.c
@@ -59,24 +59,24 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/keypc.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_ltlk.c b/drivers/staging/speakup/speakup_ltlk.c
index 326f94d6b079..c9be6f52c254 100644
--- a/drivers/staging/speakup/speakup_ltlk.c
+++ b/drivers/staging/speakup/speakup_ltlk.c
@@ -50,34 +50,34 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/ltlk.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute freq_attribute =
- __ATTR(freq, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(freq, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
- __ATTR(voice, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(voice, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index 243c3d52fe5e..ee6089502a96 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -61,41 +61,41 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/soft.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute freq_attribute =
- __ATTR(freq, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(freq, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
- __ATTR(voice, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(voice, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
/*
* We should uncomment the following definition, when we agree on a
* method of passing a language designation to the software synthesizer.
* static struct kobj_attribute lang_attribute =
- * __ATTR(lang, USER_RW, spk_var_show, spk_var_store);
+ * __ATTR(lang, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
*/
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index e74f85620c68..711cf114df83 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -48,30 +48,30 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/spkout.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index 5a29b9fcc930..3f0be04df071 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -44,28 +44,28 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/txprt.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, USER_RW, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 1b6d581c438b..b5e74e9de6bd 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -17,7 +17,7 @@ menuconfig TIDSPBRIDGE
config TIDSPBRIDGE_DVFS
bool "Enable Bridge Dynamic Voltage and Frequency Scaling (DVFS)"
- depends on TIDSPBRIDGE && OMAP_PM_SRF && CPU_FREQ
+ depends on TIDSPBRIDGE && CPU_FREQ
help
DVFS allows DSP Bridge to initiate the operating point change to
scale the chip voltage and frequency in order to match the
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index e322fb7aebe1..c2829aa7780f 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -2127,7 +2127,7 @@ void dump_dl_modules(struct bridge_dev_context *bridge_context)
u32 module_size;
u32 module_struct_size = 0;
u32 sect_ndx;
- char *sect_str ;
+ char *sect_str;
int status = 0;
status = dev_get_intf_fxns(dev_object, &intf_fxns);
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index b770b2281ce8..8945b4e3a2a6 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -280,9 +280,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
/* Wait until the state has moved to ON */
- while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
- OMAP_INTRANSITION_MASK)
- ;
+ while (*pdata->dsp_prm_read(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST)&
+ OMAP_INTRANSITION_MASK);
/* Disable Automatic transition */
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
@@ -419,7 +418,8 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
/* Assert RST1 i.e only the RST only for DSP megacell */
if (!status) {
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
- OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
+ OMAP3430_RST1_IVA2_MASK,
+ OMAP3430_IVA2_MOD,
OMAP2_RM_RSTCTRL);
/* Mask address with 1K for compatibility */
@@ -432,7 +432,8 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
/* Reset and Unreset the RST2, so that BOOTADDR is copied to
* IVA2 SYSC register */
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
- OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+ OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD,
+ OMAP2_RM_RSTCTRL);
udelay(100);
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -446,7 +447,8 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
/* Only make TLB entry if both addresses are non-zero */
for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
entry_ndx++) {
- struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
+ struct bridge_ioctl_extproc *e =
+ &dev_context->atlb_entry[entry_ndx];
struct hw_mmu_map_attrs_t map_attrs = {
.endianism = e->endianism,
.element_size = e->elem_size,
@@ -641,8 +643,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
/* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
* before turning off the clocks.. This is to ensure that there are no
* pending L3 or other transactons from IVA2 */
- dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
- OMAP_POWERSTATEST_MASK;
+ dsp_pwr_state = (*pdata->dsp_prm_read)
+ (OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
if (dsp_pwr_state != PWRDM_POWER_OFF) {
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -682,8 +684,9 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
dev_context->mbox = NULL;
}
/* Reset IVA2 clocks*/
- (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
- OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+ (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK |
+ OMAP3430_RST2_IVA2_MASK | OMAP3430_RST3_IVA2_MASK,
+ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dsp_clock_disable_all(dev_context->dsp_per_clks);
dsp_clk_disable(DSP_CLK_IVA2);
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index 1862afd80dc1..657104f37f7d 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -99,7 +99,8 @@ int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
return -EPERM;
}
pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
- OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
+ OMAP2_PM_PWSTST) &
+ OMAP_POWERSTATEST_MASK;
}
if (timeout == 0) {
pr_err("%s: Timed out waiting for DSP off mode\n", __func__);
@@ -209,7 +210,8 @@ int sleep_dsp(struct bridge_dev_context *dev_context, u32 dw_cmd,
return -EPERM;
}
pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
- OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
+ OMAP2_PM_PWSTST) &
+ OMAP_POWERSTATEST_MASK;
}
if (!timeout) {
@@ -355,7 +357,7 @@ int pre_scale_dsp(struct bridge_dev_context *dev_context, void *pargs)
(dev_context->brd_state == BRD_DSP_HIBERNATION)) {
dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n");
return 0;
- } else if ((dev_context->brd_state == BRD_RUNNING)) {
+ } else if (dev_context->brd_state == BRD_RUNNING) {
/* Send a prenotification to DSP */
dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__);
sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY);
@@ -396,13 +398,14 @@ int post_scale_dsp(struct bridge_dev_context *dev_context,
io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n",
__func__);
- } else if ((dev_context->brd_state == BRD_RUNNING)) {
+ } else if (dev_context->brd_state == BRD_RUNNING) {
/* Update the OPP value in shared memory */
io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
/* Send a post notification to DSP */
sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_POSTNOTIFY);
- dev_dbg(bridge, "OPP: %s wrote to shm. Sent post notification "
- "to DSP\n", __func__);
+ dev_dbg(bridge,
+ "OPP: %s wrote to shm. Sent post notification to DSP\n",
+ __func__);
} else {
status = -EPERM;
}
diff --git a/drivers/staging/tidspbridge/dynload/tramp.c b/drivers/staging/tidspbridge/dynload/tramp.c
index 404af1895980..5f0431305fbb 100644
--- a/drivers/staging/tidspbridge/dynload/tramp.c
+++ b/drivers/staging/tidspbridge/dynload/tramp.c
@@ -503,7 +503,7 @@ static int priv_tgt_img_gen(struct dload_state *dlthis, u32 base,
* TRAMPOLINES ARE TREATED AS 2ND PASS even though this is really
* the first (and only) relocation that will be performed on them.
*/
-static int priv_pkt_relo(struct dload_state *dlthis, tgt_au_t * data,
+static int priv_pkt_relo(struct dload_state *dlthis, tgt_au_t *data,
struct reloc_record_t *rp[], u32 relo_count)
{
int ret_val = 1;
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index 190ca3fe7327..2ae48c9a9362 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -101,14 +101,14 @@ static int dcd_uuid_from_string(char *sz_uuid, struct dsp_uuid *uuid_obj)
* if the converted value doesn't fit in u32. So, convert the
* last six bytes to u64 and memcpy what is needed
*/
- if(sscanf(sz_uuid, "%8x%c%4hx%c%4hx%c%2hhx%2hhx%c%llx",
+ if (sscanf(sz_uuid, "%8x%c%4hx%c%4hx%c%2hhx%2hhx%c%llx",
&uuid_tmp.data1, &c, &uuid_tmp.data2, &c,
&uuid_tmp.data3, &c, &uuid_tmp.data4,
&uuid_tmp.data5, &c, &t) != 10)
return -EINVAL;
t = cpu_to_be64(t);
- memcpy(&uuid_tmp.data6[0], ((char*)&t) + 2, 6);
+ memcpy(&uuid_tmp.data6[0], ((char *)&t) + 2, 6);
*uuid_obj = uuid_tmp;
return 0;
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index be26917a6896..757ae20b38ee 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -738,7 +738,7 @@ void mem_ext_phys_pool_release(void)
* Allocate physically contiguous, uncached memory from external memory pool
*/
-static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr)
+static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 *phys_addr)
{
u32 new_alloc_ptr;
u32 offset;
diff --git a/drivers/staging/tidspbridge/rmgr/mgr.c b/drivers/staging/tidspbridge/rmgr/mgr.c
index b32ba0ad2a07..93e6282f122b 100644
--- a/drivers/staging/tidspbridge/rmgr/mgr.c
+++ b/drivers/staging/tidspbridge/rmgr/mgr.c
@@ -266,15 +266,15 @@ int mgr_enum_processor_info(u32 processor_id,
* this is a clumsy overwrite */
processor_info->processor_type = DSPTYPE64;
} else {
- dev_dbg(bridge, "%s: Failed to get DCD processor info "
- "%x\n", __func__, status2);
+ dev_dbg(bridge, "%s: Failed to get DCD processor info %x\n",
+ __func__, status2);
status = -EPERM;
}
}
*pu_num_procs = proc_index;
if (proc_detect == false) {
- dev_dbg(bridge, "%s: Failed to get proc info from DCD, so use "
- "CFG registry\n", __func__);
+ dev_dbg(bridge, "%s: Failed to get proc info from DCD, so use CFG registry\n",
+ __func__);
processor_info->processor_type = DSPTYPE64;
}
func_end:
diff --git a/drivers/staging/tidspbridge/rmgr/nldr.c b/drivers/staging/tidspbridge/rmgr/nldr.c
index ca3805046a73..5ac507ccd19d 100644
--- a/drivers/staging/tidspbridge/rmgr/nldr.c
+++ b/drivers/staging/tidspbridge/rmgr/nldr.c
@@ -623,7 +623,7 @@ void nldr_delete(struct nldr_object *nldr_obj)
* ======== nldr_get_fxn_addr ========
*/
int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
- char *str_fxn, u32 * addr)
+ char *str_fxn, u32 *addr)
{
struct dbll_sym_val *dbll_sym;
struct nldr_object *nldr_obj;
@@ -1751,9 +1751,8 @@ static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
}
if (ref_count && (*ref_count > 0)) {
*ref_count -= 1;
- if (other_ref) {
+ if (other_ref)
*other_ref -= 1;
- }
}
if (ref_count && *ref_count == 0) {
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index 87dfa92ab45b..9d3044a384ee 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -246,7 +246,7 @@ static void fill_stream_def(struct node_object *hnode,
struct node_strmdef *pstrm_def,
struct dsp_strmattr *pattrs);
static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
-static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
+static int get_fxn_address(struct node_object *hnode, u32 *fxn_addr,
u32 phase);
static int get_node_props(struct dcd_manager *hdcd_mgr,
struct node_object *hnode,
@@ -406,7 +406,7 @@ int node_allocate(struct proc_object *hprocessor,
/* check for page aligned Heap size */
if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
- pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
+ pr_err("%s: node heap size not aligned to 4K, size = 0x%x\n",
__func__, attr_in->heap_size);
status = -EINVAL;
} else {
@@ -703,9 +703,9 @@ DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
pattr = &node_dfltbufattrs; /* set defaults */
status = proc_get_processor_id(pnode->processor, &proc_id);
- if (proc_id != DSP_UNIT) {
+ if (proc_id != DSP_UNIT)
goto func_end;
- }
+
/* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
* virt address, so set this info in this node's translator
* object for future ref. If MEM_GETVIRTUALSEGID then retrieve
@@ -886,11 +886,10 @@ int node_connect(struct node_object *node1, u32 stream1,
if (pattrs && pattrs->strm_mode != STRMMODE_PROCCOPY)
return -EPERM; /* illegal stream mode */
- if (node1_type != NODE_GPP) {
+ if (node1_type != NODE_GPP)
hnode_mgr = node1->node_mgr;
- } else {
+ else
hnode_mgr = node2->node_mgr;
- }
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
@@ -1576,7 +1575,7 @@ func_end:
* Purpose:
* Frees the message buffer.
*/
-int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
+int node_free_msg_buf(struct node_object *hnode, u8 *pbuffer,
struct dsp_bufferattr *pattr)
{
struct node_object *pnode = (struct node_object *)hnode;
@@ -2322,7 +2321,8 @@ int node_terminate(struct node_object *hnode, int *pstatus)
if (!hdeh_mgr)
goto func_cont;
- bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
+ bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
+ DSP_EXCEPTIONABORT);
}
}
func_cont:
@@ -2640,7 +2640,7 @@ static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
* Purpose:
* Retrieves the address for create, execute or delete phase for a node.
*/
-static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
+static int get_fxn_address(struct node_object *hnode, u32 *fxn_addr,
u32 phase)
{
char *pstr_fxn_name = NULL;
diff --git a/drivers/staging/unisys/Documentation/overview.txt b/drivers/staging/unisys/Documentation/overview.txt
new file mode 100644
index 000000000000..8d078e4de3b8
--- /dev/null
+++ b/drivers/staging/unisys/Documentation/overview.txt
@@ -0,0 +1,174 @@
+
+Overview
+
+This document describes the driver set for Unisys Secure Partitioning (s-Par®).
+
+s-Par is firmware that provides hardware partitioning capabilities for
+splitting large-scale Intel x86 servers into multiple isolated
+partitions. s-Par provides a set of para-virtualized device drivers to
+allow guest partitions on the same server to share devices that would
+normally be unsharable; specifically, PCI network interfaces and host
+bus adapters that do not support shared access via SR-IOV. The shared
+device is owned and managed by a small, single-purpose service
+partition, which communicates with each guest partition sharing that
+device through an area of shared memory called a channel. Additional
+drivers provide support interfaces for communicating with s-Par
+services, logging and diagnostics, and accessing the Linux console
+from the s-Par user interface.
+
+The driver stack consists of a set of support modules, a set of bus
+modules, and a set of device driver modules. The support modules
+handle a number of common functions across each of the other
+drivers. The bus modules provide organization for the device driver
+modules, which provide the shared device functionality.
+
+These drivers are for the Unisys virtual PCI hardware model where the
+hypervisor need not intervene (other than normal interrupt handling)
+in the interactions between the client drivers and the virtual adapter
+firmware in the adapter service partition.
+
+Driver Descriptions
+
+Device Modules
+
+The modules in this section handle shared devices and the virtual
+buses required to support them. These modules use functions in and
+depend on the modules described in the support modules section.
+
+visorchipset
+
+The visorchipset module receives device creation and destruction
+events from the Command service partition of s-Par, as well as
+controlling registration of shared device drivers with the s-Par
+driver core. The events received are used to populate other s-Par
+modules with their assigned shared devices. Visorchipset is required
+for shared device drivers to function properly. Visorchipset also
+stores information for handling dump disk device creation during
+kdump.
+
+In operation, the visorchipset module processes device creation and
+destruction messages sent by s-Par's Command service partition through
+a channel. These messages result in creation (or destruction) of each
+virtual bus and virtual device. Each bus and device is also associated
+with a communication channel, which is used to communicate with one or
+more IO service partitions to perform device IO on behalf of the
+guest.
+
+virthba
+
+The virthba module provides access to a shared SCSI host bus adapter
+and one or more disk devices, by proxying SCSI commands between the
+guest and the service partition that owns the shared SCSI adapter,
+using a channel between the guest and the service partition. The disks
+that appear on the shared bus are defined by the s-Par configuration
+and enforced by the service partition, while the guest driver handles
+sending commands and handling responses. Each disk is shared as a
+whole to a guest. Sharing the bus adapter in this way provides
+resiliency; should the device encounter an error, only the service
+partition is rebooted, and the device is reinitialized. This allows
+guests to continue running and to recover from the error.
+
+virtnic
+
+The virtnic module provides a paravirtualized network interface to a
+guest by proxying buffer information between the guest and the service
+partition that owns the shared network interface, using a channel
+between the guest and the service partition. The connectivity of this
+interface with the shared interface and possibly other guest
+partitions is defined by the s-Par configuration and enforced by the
+service partition; the guest driver handles communication and link
+status.
+
+visorserial
+
+The visorserial module allows the console of the linux guest to be
+accessed via the s-Par console serial channel. It creates devices in
+/dev/visorserialclientX which behave like a serial terminal and are
+connected to the diagnostics system in s-Par. By assigning a getty to
+the terminal in the guest, a user could log into and access the guest
+from the s-Par diagnostics SWITCH RUN terminal.
+
+visorbus
+
+The visorbus module handles the bus functions for most functional
+drivers except visorserial, visordiag, virthba, and virtnic. It
+maintains the sysfs subtree /sys/devices/visorbus*/. It is responsible
+for device creation and destruction of the devices on its bus.
+
+visorclientbus
+
+The visorclientbus module forwards the bus functions for virthba, and
+virtnic to the virtpci driver.
+
+virtpci
+
+The virtpci module handles the bus functions for virthba, and virtnic.
+
+s-Par Integration Modules
+
+The modules in this section provide integration with s-Par guest
+partition services like diagnostics and remote desktop. These modules
+depend on functions in the modules described in the support modules
+section.
+
+visorvideoclient
+
+The visorvideoclient module provides functionality for video support
+for the Unisys s-Par Partition Desktop application. The guest OS must
+also have the UEFI GOP protocol enabled for the partition desktop to
+function. visorconinclient The visorconinclient module provides
+keyboard and mouse support for the Unisys s-Par Partition Desktop
+application.
+
+sparstop
+
+The sparstop module handles requests from the Unisys s-Par platform to
+shutdown the linux guest. It allows a program on the guest to perform
+clean-up functions on the guest before the guest is shut down or
+rebooted using ACPI.
+
+visordiag
+
+This driver provides the ability for the guest to write information
+into the s-Par diagnostics subsystem. It creates a set of devices
+named /dev/visordiag.X which can be written to by the guest to add
+text to the s-Par system log.
+
+Support Modules
+
+The modules described in this section provide functions and
+abstractions to support the modules described in the previous
+sections, to avoid having duplicated functionality.
+
+visornoop
+
+The visornoop module is a placeholder that responds to device
+create/destroy messages that are currently not in use by linux guests.
+
+visoruislib
+
+The visoruislib module is a support library, used to handle requests
+from virtpci.
+
+visorchannelstub
+
+The visorchannelstub module provides support routines for storing and
+retrieving data from a channel.
+
+visorchannel
+
+The visorchannel module is a support library that abstracts reading
+and writing a channel in memory.
+
+visorutil
+
+The visorutil module is a support library required by all other s-Par
+driver modules. Among its features it abstracts reading, writing, and
+manipulating a block of memory.
+
+Minimum Required Driver Set
+
+The drivers required to boot a Linux guest are visorchipset, visorbus,
+visorvideoclient, visorconinclient, visoruislib, visorchannelstub,
+visorchannel, and visorutil. The other drivers are required by the
+product configurations that are currently being marketed.
diff --git a/drivers/staging/unisys/Documentation/proc-entries.txt b/drivers/staging/unisys/Documentation/proc-entries.txt
new file mode 100644
index 000000000000..426f92b1c577
--- /dev/null
+++ b/drivers/staging/unisys/Documentation/proc-entries.txt
@@ -0,0 +1,93 @@
+ s-Par Proc Entries
+This document describes the proc entries created by the Unisys s-Par modules.
+
+Support Module Entries
+These entries are provided primarily for debugging.
+
+/proc/uislib/info: This entry contains debugging information for the
+uislib module, including bus information and memory usage.
+
+/proc/visorchipset/controlvm: This directory contains debugging
+entries for the controlvm channel used by visorchipset.
+
+/proc/uislib/platform: This entry is used to display the platform
+number this node is in the system. For some guests, this may be
+invalid.
+
+/proc/visorchipset/chipsetready: This entry is written to by scripts
+to signify that any user level activity has been completed before the
+guest can be considered running and is shown as running in the s-Par
+UI.
+
+Device Entries
+These entries provide status of the devices shared by a service partition.
+
+/proc/uislib/vbus: this is a directory containing entries for each
+virtual bus. Each numbered sub-directory contains an info entry, which
+describes the devices that appear on that bus.
+
+/proc/uislib/cycles_before_wait: This entry is used to tune
+performance, by setting the number of cycles we wait before going idle
+when in polling mode. A longer time will reduce message latency but
+spend more processing time polling.
+
+/proc/uislib/smart_wakeup: This entry is used to tune performance, by
+enabling or disabling smart wakeup.
+
+/proc/virthba/info: This entry contains debugging information for the
+virthba module, including interrupt information and memory usage.
+
+/proc/virthba/enable_ints: This entry controls interrupt use by the
+virthba module. Writing a 0 to this entry will disable interrupts.
+
+/proc/virtnic/info: This entry contains debugging information for the
+virtnic module, including interrupt information, send and receive
+counts, and other device information.
+
+/proc/virtnic/ethX: This is a directory containing entries for each
+virtual NIC. Each named subdirectory contains two entries,
+clientstring and zone.
+
+/proc/virtpci/info: This entry contains debugging information for the
+virtpci module, including virtual PCI bus information and device
+locations.
+
+/proc/virtnic/enable_ints: This entry controls interrupt use by the
+virtnic module. Writing a 0 to this entry will disable interrupts.
+
+Visorconinclient, visordiag, visornoop, visorserialclient, and
+visorvideoclient Entries
+
+The entries in proc for these modules all follow the same
+pattern. Each module has its own proc directory with the same name,
+e.g. visordiag presents a /proc/visordiag directory. Inside of the
+module's directory are a device directory, which contains one numbered
+directory for each device provided by that module. Each device has a
+diag entry that presents the device number and visorbus name for that
+device. The module directory also has a driver/diag entry, which
+reports the corresponding s-Par version number of the driver.
+
+Automated Installation Entries
+
+These entries are used to pass information between the s-Par platform
+and the Linux-based installation and recovery tool. These values are
+read/write, however, the guest can only reset them to 0, or report an
+error status through the installer entry. The values are only set via
+s-Par's firmware interface, to help prevent accidentally booting into
+the tool.
+
+/proc/visorchipset/boottotool: This entry instructs s-Par that the
+next reboot will launch the installation and recovery tool. If set to
+0, the next boot will happen according to the UEFI boot manager
+settings.
+
+/proc/visorchipset/toolaction: This entry indicates the installation
+and recovery tool mode requested for the next boot.
+
+/proc/visorchipset/installer: this entry is used by the installation
+and recovery tool to pass status and result information back to the
+s-Par firmware.
+
+/proc/visorchipset/partition: This directory contains the guest
+partition configuration data for each virtual bus, for use during
+installation and at runtime for s-Par service partitions.
diff --git a/drivers/staging/unisys/Kconfig b/drivers/staging/unisys/Kconfig
new file mode 100644
index 000000000000..6bae2afbaa15
--- /dev/null
+++ b/drivers/staging/unisys/Kconfig
@@ -0,0 +1,20 @@
+#
+# Unisys SPAR driver configuration
+#
+menuconfig UNISYSSPAR
+ bool "Unisys SPAR driver support"
+ depends on X86_64 && BROKEN
+ ---help---
+ Support for the Unisys SPAR drivers
+
+if UNISYSSPAR
+
+source "drivers/staging/unisys/visorutil/Kconfig"
+source "drivers/staging/unisys/visorchannel/Kconfig"
+source "drivers/staging/unisys/visorchipset/Kconfig"
+source "drivers/staging/unisys/channels/Kconfig"
+source "drivers/staging/unisys/uislib/Kconfig"
+source "drivers/staging/unisys/virtpci/Kconfig"
+source "drivers/staging/unisys/virthba/Kconfig"
+
+endif # UNISYSSPAR
diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS
new file mode 100644
index 000000000000..c9cef0b91531
--- /dev/null
+++ b/drivers/staging/unisys/MAINTAINERS
@@ -0,0 +1,6 @@
+Unisys s-Par drivers
+M: Ben Romer <sparmaintainer@unisys.com>
+S: Maintained
+F: Documentation/s-Par/overview.txt
+F: Documentation/s-Par/proc-entries.txt
+F: drivers/staging/unisys/
diff --git a/drivers/staging/unisys/Makefile b/drivers/staging/unisys/Makefile
new file mode 100644
index 000000000000..b988d6940aae
--- /dev/null
+++ b/drivers/staging/unisys/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for Unisys SPAR drivers
+#
+obj-$(CONFIG_UNISYS_VISORUTIL) += visorutil/
+obj-$(CONFIG_UNISYS_VISORCHANNEL) += visorchannel/
+obj-$(CONFIG_UNISYS_VISORCHIPSET) += visorchipset/
+obj-$(CONFIG_UNISYS_CHANNELSTUB) += channels/
+obj-$(CONFIG_UNISYS_UISLIB) += uislib/
+obj-$(CONFIG_UNISYS_VIRTPCI) += virtpci/
+obj-$(CONFIG_UNISYS_VIRTHBA) += virthba/
diff --git a/drivers/staging/unisys/TODO b/drivers/staging/unisys/TODO
new file mode 100644
index 000000000000..034ac61c44f2
--- /dev/null
+++ b/drivers/staging/unisys/TODO
@@ -0,0 +1,21 @@
+TODO:
+ -checkpatch warnings
+ -move /proc entries to /sys
+ -proper major number(s)
+ -add other drivers needed for full functionality:
+ -visorclientbus
+ -visorbus
+ -visordiag
+ -virtnic
+ -visornoop
+ -visorserial
+ -visorvideoclient
+ -visorconinclient
+ -sparstop
+ -move individual drivers into proper driver subsystems
+
+
+Patches to:
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ Ken Cox <jkc@redhat.com>
+ Unisys s-Par maintainer mailing list <sparmaintainer@unisys.com>
diff --git a/drivers/staging/unisys/channels/Kconfig b/drivers/staging/unisys/channels/Kconfig
new file mode 100644
index 000000000000..47a235385567
--- /dev/null
+++ b/drivers/staging/unisys/channels/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys channels configuration
+#
+
+config UNISYS_CHANNELSTUB
+ tristate "Unisys channelstub driver"
+ depends on UNISYSSPAR
+ ---help---
+ If you say Y here, you will enable the Unisys channels driver.
+
diff --git a/drivers/staging/unisys/channels/Makefile b/drivers/staging/unisys/channels/Makefile
new file mode 100644
index 000000000000..e60b0aef4dcd
--- /dev/null
+++ b/drivers/staging/unisys/channels/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for Unisys channelstub
+#
+
+obj-$(CONFIG_UNISYS_CHANNELSTUB) += visorchannelstub.o
+
+visorchannelstub-y := channel.o chanstub.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
+
diff --git a/drivers/staging/unisys/channels/channel.c b/drivers/staging/unisys/channels/channel.c
new file mode 100644
index 000000000000..f6452595b742
--- /dev/null
+++ b/drivers/staging/unisys/channels/channel.c
@@ -0,0 +1,219 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/kernel.h>
+#ifdef CONFIG_MODVERSIONS
+#include <config/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h> /* for module_init and module_exit */
+#include <linux/slab.h> /* for memcpy */
+#include <linux/types.h>
+
+/* Implementation of exported functions for Supervisor channels */
+#include "channel.h"
+
+/*
+ * Routine Description:
+ * Tries to insert the prebuilt signal pointed to by pSignal into the nth
+ * Queue of the Channel pointed to by pChannel
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ * pSignal: (IN) pointer to the signal
+ *
+ * Assumptions:
+ * - pChannel, Queue and pSignal are valid.
+ * - If insertion fails due to a full queue, the caller will determine the
+ * retry policy (e.g. wait & try again, report an error, etc.).
+ *
+ * Return value:
+ * 1 if the insertion succeeds, 0 if the queue was full.
+ */
+unsigned char
+visor_signal_insert(CHANNEL_HEADER __iomem *pChannel, U32 Queue, void *pSignal)
+{
+ void __iomem *psignal;
+ unsigned int head, tail, nof;
+
+ SIGNAL_QUEUE_HEADER __iomem *pqhdr =
+ (SIGNAL_QUEUE_HEADER __iomem *)
+ ((char __iomem *) pChannel + readq(&pChannel->oChannelSpace))
+ + Queue;
+
+ /* capture current head and tail */
+ head = readl(&pqhdr->Head);
+ tail = readl(&pqhdr->Tail);
+
+ /* queue is full if (head + 1) % n equals tail */
+ if (((head + 1) % readl(&pqhdr->MaxSignalSlots)) == tail) {
+ nof = readq(&pqhdr->NumOverflows) + 1;
+ writeq(nof, &pqhdr->NumOverflows);
+ return 0;
+ }
+
+ /* increment the head index */
+ head = (head + 1) % readl(&pqhdr->MaxSignalSlots);
+
+ /* copy signal to the head location from the area pointed to
+ * by pSignal
+ */
+ psignal = (char __iomem *)pqhdr + readq(&pqhdr->oSignalBase) +
+ (head * readl(&pqhdr->SignalSize));
+ MEMCPY_TOIO(psignal, pSignal, readl(&pqhdr->SignalSize));
+
+ VolatileBarrier();
+ writel(head, &pqhdr->Head);
+
+ writeq(readq(&pqhdr->NumSignalsSent) + 1, &pqhdr->NumSignalsSent);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(visor_signal_insert);
+
+/*
+ * Routine Description:
+ * Removes one signal from Channel pChannel's nth Queue at the
+ * time of the call and copies it into the memory pointed to by
+ * pSignal.
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ * pSignal: (IN) pointer to where the signals are to be copied
+ *
+ * Assumptions:
+ * - pChannel and Queue are valid.
+ * - pSignal points to a memory area large enough to hold queue's SignalSize
+ *
+ * Return value:
+ * 1 if the removal succeeds, 0 if the queue was empty.
+ */
+unsigned char
+visor_signal_remove(CHANNEL_HEADER __iomem *pChannel, U32 Queue, void *pSignal)
+{
+ void __iomem *psource;
+ unsigned int head, tail;
+ SIGNAL_QUEUE_HEADER __iomem *pqhdr =
+ (SIGNAL_QUEUE_HEADER __iomem *) ((char __iomem *) pChannel +
+ readq(&pChannel->oChannelSpace)) + Queue;
+
+ /* capture current head and tail */
+ head = readl(&pqhdr->Head);
+ tail = readl(&pqhdr->Tail);
+
+ /* queue is empty if the head index equals the tail index */
+ if (head == tail) {
+ writeq(readq(&pqhdr->NumEmptyCnt) + 1, &pqhdr->NumEmptyCnt);
+ return 0;
+ }
+
+ /* advance past the 'empty' front slot */
+ tail = (tail + 1) % readl(&pqhdr->MaxSignalSlots);
+
+ /* copy signal from tail location to the area pointed to by pSignal */
+ psource = (char __iomem *) pqhdr + readq(&pqhdr->oSignalBase) +
+ (tail * readl(&pqhdr->SignalSize));
+ MEMCPY_FROMIO(pSignal, psource, readl(&pqhdr->SignalSize));
+
+ VolatileBarrier();
+ writel(tail, &pqhdr->Tail);
+
+ writeq(readq(&pqhdr->NumSignalsReceived) + 1,
+ &pqhdr->NumSignalsReceived);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(visor_signal_remove);
+
+/*
+ * Routine Description:
+ * Removes all signals present in Channel pChannel's nth Queue at the
+ * time of the call and copies them into the memory pointed to by
+ * pSignal. Returns the # of signals copied as the value of the routine.
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ * pSignal: (IN) pointer to where the signals are to be copied
+ *
+ * Assumptions:
+ * - pChannel and Queue are valid.
+ * - pSignal points to a memory area large enough to hold Queue's MaxSignals
+ * # of signals, each of which is Queue's SignalSize.
+ *
+ * Return value:
+ * # of signals copied.
+ */
+unsigned int
+SignalRemoveAll(pCHANNEL_HEADER pChannel, U32 Queue, void *pSignal)
+{
+ void *psource;
+ unsigned int head, tail, signalCount = 0;
+ pSIGNAL_QUEUE_HEADER pqhdr =
+ (pSIGNAL_QUEUE_HEADER) ((char *) pChannel +
+ pChannel->oChannelSpace) + Queue;
+
+ /* capture current head and tail */
+ head = pqhdr->Head;
+ tail = pqhdr->Tail;
+
+ /* queue is empty if the head index equals the tail index */
+ if (head == tail)
+ return 0;
+
+ while (head != tail) {
+ /* advance past the 'empty' front slot */
+ tail = (tail + 1) % pqhdr->MaxSignalSlots;
+
+ /* copy signal from tail location to the area pointed
+ * to by pSignal
+ */
+ psource =
+ (char *) pqhdr + pqhdr->oSignalBase +
+ (tail * pqhdr->SignalSize);
+ MEMCPY((char *) pSignal + (pqhdr->SignalSize * signalCount),
+ psource, pqhdr->SignalSize);
+
+ VolatileBarrier();
+ pqhdr->Tail = tail;
+
+ signalCount++;
+ pqhdr->NumSignalsReceived++;
+ }
+
+ return signalCount;
+}
+
+/*
+ * Routine Description:
+ * Determine whether a signal queue is empty.
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ *
+ * Return value:
+ * 1 if the signal queue is empty, 0 otherwise.
+ */
+unsigned char
+visor_signalqueue_empty(CHANNEL_HEADER __iomem *pChannel, U32 Queue)
+{
+ SIGNAL_QUEUE_HEADER __iomem *pqhdr =
+ (SIGNAL_QUEUE_HEADER __iomem *) ((char __iomem *) pChannel +
+ readq(&pChannel->oChannelSpace)) + Queue;
+ return readl(&pqhdr->Head) == readl(&pqhdr->Tail);
+}
+EXPORT_SYMBOL_GPL(visor_signalqueue_empty);
+
diff --git a/drivers/staging/unisys/channels/chanstub.c b/drivers/staging/unisys/channels/chanstub.c
new file mode 100644
index 000000000000..f504f49a436a
--- /dev/null
+++ b/drivers/staging/unisys/channels/chanstub.c
@@ -0,0 +1,70 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#define EXPORT_SYMTAB
+#include <linux/kernel.h>
+#ifdef CONFIG_MODVERSIONS
+#include <config/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h> /* for module_init and module_exit */
+#include <linux/slab.h> /* for memcpy */
+#include <linux/types.h>
+
+#include "channel.h"
+#include "chanstub.h"
+#include "version.h"
+
+static __init int
+channel_mod_init(void)
+{
+ return 0;
+}
+
+static __exit void
+channel_mod_exit(void)
+{
+}
+
+unsigned char
+SignalInsert_withLock(CHANNEL_HEADER __iomem *pChannel, U32 Queue,
+ void *pSignal, spinlock_t *lock)
+{
+ unsigned char result;
+ unsigned long flags;
+ spin_lock_irqsave(lock, flags);
+ result = visor_signal_insert(pChannel, Queue, pSignal);
+ spin_unlock_irqrestore(lock, flags);
+ return result;
+}
+
+unsigned char
+SignalRemove_withLock(CHANNEL_HEADER __iomem *pChannel, U32 Queue,
+ void *pSignal, spinlock_t *lock)
+{
+ unsigned char result;
+ spin_lock(lock);
+ result = visor_signal_remove(pChannel, Queue, pSignal);
+ spin_unlock(lock);
+ return result;
+}
+
+module_init(channel_mod_init);
+module_exit(channel_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Bryan Glaudel");
+MODULE_ALIAS("uischan");
+ /* this is extracted during depmod and kept in modules.dep */
diff --git a/drivers/staging/unisys/channels/chanstub.h b/drivers/staging/unisys/channels/chanstub.h
new file mode 100644
index 000000000000..8d727debca67
--- /dev/null
+++ b/drivers/staging/unisys/channels/chanstub.h
@@ -0,0 +1,23 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __CHANSTUB_H__
+#define __CHANSTUB_H__
+unsigned char SignalInsert_withLock(CHANNEL_HEADER __iomem *pChannel, U32 Queue,
+ void *pSignal, spinlock_t *lock);
+unsigned char SignalRemove_withLock(CHANNEL_HEADER __iomem *pChannel, U32 Queue,
+ void *pSignal, spinlock_t *lock);
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/channels/channel.h b/drivers/staging/unisys/common-spar/include/channels/channel.h
new file mode 100644
index 000000000000..aee204172b21
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/channel.h
@@ -0,0 +1,661 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __CHANNEL_H__
+#define __CHANNEL_H__
+
+/*
+* Whenever this file is changed a corresponding change must be made in
+* the Console/ServicePart/visordiag_early/supervisor_channel.h file
+* which is needed for Linux kernel compiles. These two files must be
+* in sync.
+*/
+
+/* define the following to prevent include nesting in kernel header
+ * files of similar abreviated content
+ */
+#define __SUPERVISOR_CHANNEL_H__
+
+#include "commontypes.h"
+
+#define SIGNATURE_16(A, B) ((A) | (B<<8))
+#define SIGNATURE_32(A, B, C, D) \
+ (SIGNATURE_16(A, B) | (SIGNATURE_16(C, D) << 16))
+#define SIGNATURE_64(A, B, C, D, E, F, G, H) \
+ (SIGNATURE_32(A, B, C, D) | ((U64)(SIGNATURE_32(E, F, G, H)) << 32))
+
+#ifndef lengthof
+#define lengthof(TYPE, MEMBER) (sizeof(((TYPE *)0)->MEMBER))
+#endif
+#ifndef COVERQ
+#define COVERQ(v, d) (((v)+(d)-1) / (d))
+#endif
+#ifndef COVER
+#define COVER(v, d) ((d)*COVERQ(v, d))
+#endif
+
+#ifndef GUID0
+#define GUID0 {0, 0, 0, {0, 0, 0, 0, 0, 0, 0, 0} }
+#endif
+
+/* The C language is inconsistent with respect to where it allows literal
+ * constants, especially literal constant structs. Literal constant structs
+ * are allowed for initialization only, whereas other types of literal
+ * constants are allowed anywhere. We get around this inconsistency by
+ * declaring a "static const" variable for each GUID. This variable can be
+ * used in expressions where the literal constant would not be allowed.
+ */
+static const GUID Guid0 = GUID0;
+
+#define ULTRA_CHANNEL_PROTOCOL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L')
+
+typedef enum {
+ CHANNELSRV_UNINITIALIZED = 0, /* channel is in an undefined state */
+ CHANNELSRV_READY = 1 /* channel has been initialized by server */
+} CHANNEL_SERVERSTATE;
+
+typedef enum {
+ CHANNELCLI_DETACHED = 0,
+ CHANNELCLI_DISABLED = 1, /* client can see channel but is NOT
+ * allowed to use it unless given TBD
+ * explicit request (should actually be
+ * < DETACHED) */
+ CHANNELCLI_ATTACHING = 2, /* legacy EFI client request
+ * for EFI server to attach */
+ CHANNELCLI_ATTACHED = 3, /* idle, but client may want
+ * to use channel any time */
+ CHANNELCLI_BUSY = 4, /* client either wants to use or is
+ * using channel */
+ CHANNELCLI_OWNED = 5 /* "no worries" state - client can
+ * access channel anytime */
+} CHANNEL_CLIENTSTATE;
+static inline const U8 *
+ULTRA_CHANNELCLI_STRING(U32 v)
+{
+ switch (v) {
+ case CHANNELCLI_DETACHED:
+ return (const U8 *) ("DETACHED");
+ case CHANNELCLI_DISABLED:
+ return (const U8 *) ("DISABLED");
+ case CHANNELCLI_ATTACHING:
+ return (const U8 *) ("ATTACHING");
+ case CHANNELCLI_ATTACHED:
+ return (const U8 *) ("ATTACHED");
+ case CHANNELCLI_BUSY:
+ return (const U8 *) ("BUSY");
+ case CHANNELCLI_OWNED:
+ return (const U8 *) ("OWNED");
+ default:
+ break;
+ }
+ return (const U8 *) ("?");
+}
+
+#define ULTRA_CHANNELSRV_IS_READY(x) ((x) == CHANNELSRV_READY)
+#define ULTRA_CHANNEL_SERVER_READY(pChannel) \
+ (ULTRA_CHANNELSRV_IS_READY(readl(&(pChannel)->SrvState)))
+
+#define ULTRA_VALID_CHANNELCLI_TRANSITION(o, n) \
+ (((((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_DISABLED)) || \
+ (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_DISABLED)) || \
+ (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_DISABLED)) || \
+ (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_DETACHED)) || \
+ (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_DETACHED)) || \
+ (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_ATTACHING)) || \
+ (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_ATTACHED)) || \
+ (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_ATTACHED)) || \
+ (((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_ATTACHED)) || \
+ (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_BUSY)) || \
+ (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_OWNED)) || \
+ (((o) == CHANNELCLI_DISABLED) && ((n) == CHANNELCLI_OWNED)) || \
+ (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_OWNED)) || \
+ (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_OWNED)) || \
+ (((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_OWNED)) || (0)) \
+ ? (1) : (0))
+
+#define ULTRA_CHANNEL_CLIENT_CHK_TRANSITION(old, new, chanId, logCtx, \
+ file, line) \
+ do { \
+ if (!ULTRA_VALID_CHANNELCLI_TRANSITION(old, new)) \
+ UltraLogEvent(logCtx, \
+ CHANNELSTATE_DIAG_EVENTID_TRANSITERR, \
+ CHANNELSTATE_DIAG_SEVERITY, \
+ CHANNELSTATE_DIAG_SUBSYS, \
+ __func__, __LINE__, \
+ "%s Channel StateTransition INVALID! (%s) %s(%d)-->%s(%d) @%s:%d\n", \
+ chanId, "CliState<x>", \
+ ULTRA_CHANNELCLI_STRING(old), \
+ old, \
+ ULTRA_CHANNELCLI_STRING(new), \
+ new, \
+ PathName_Last_N_Nodes((U8 *)file, 4), \
+ line); \
+ } while (0)
+
+#define ULTRA_CHANNEL_CLIENT_TRANSITION(pChan, chanId, \
+ newstate, logCtx) \
+ do { \
+ ULTRA_CHANNEL_CLIENT_CHK_TRANSITION( \
+ readl(&(((CHANNEL_HEADER __iomem *) \
+ (pChan))->CliStateOS)), \
+ newstate, \
+ chanId, logCtx, __FILE__, __LINE__); \
+ UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK, \
+ CHANNELSTATE_DIAG_SEVERITY, \
+ CHANNELSTATE_DIAG_SUBSYS, \
+ __func__, __LINE__, \
+ "%s Channel StateTransition (%s) %s(%d)-->%s(%d) @%s:%d\n", \
+ chanId, "CliStateOS", \
+ ULTRA_CHANNELCLI_STRING( \
+ readl(&((CHANNEL_HEADER __iomem *) \
+ (pChan))->CliStateOS)), \
+ readl(&((CHANNEL_HEADER __iomem *) \
+ (pChan))->CliStateOS), \
+ ULTRA_CHANNELCLI_STRING(newstate), \
+ newstate, \
+ PathName_Last_N_Nodes(__FILE__, 4), __LINE__); \
+ writel(newstate, &((CHANNEL_HEADER __iomem *) \
+ (pChan))->CliStateOS); \
+ MEMORYBARRIER; \
+ } while (0)
+
+#define ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(pChan, chanId, logCtx) \
+ ULTRA_channel_client_acquire_os(pChan, chanId, logCtx, \
+ (char *)__FILE__, __LINE__, \
+ (char *)__func__)
+#define ULTRA_CHANNEL_CLIENT_RELEASE_OS(pChan, chanId, logCtx) \
+ ULTRA_channel_client_release_os(pChan, chanId, logCtx, \
+ (char *)__FILE__, __LINE__, (char *)__func__)
+
+/* Values for ULTRA_CHANNEL_PROTOCOL.CliErrorBoot: */
+/* throttling invalid boot channel statetransition error due to client
+ * disabled */
+#define ULTRA_CLIERRORBOOT_THROTTLEMSG_DISABLED 0x01
+
+/* throttling invalid boot channel statetransition error due to client
+ * not attached */
+#define ULTRA_CLIERRORBOOT_THROTTLEMSG_NOTATTACHED 0x02
+
+/* throttling invalid boot channel statetransition error due to busy channel */
+#define ULTRA_CLIERRORBOOT_THROTTLEMSG_BUSY 0x04
+
+/* Values for ULTRA_CHANNEL_PROTOCOL.CliErrorOS: */
+/* throttling invalid guest OS channel statetransition error due to
+ * client disabled */
+#define ULTRA_CLIERROROS_THROTTLEMSG_DISABLED 0x01
+
+/* throttling invalid guest OS channel statetransition error due to
+ * client not attached */
+#define ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED 0x02
+
+/* throttling invalid guest OS channel statetransition error due to
+ * busy channel */
+#define ULTRA_CLIERROROS_THROTTLEMSG_BUSY 0x04
+
+/* Values for ULTRA_CHANNEL_PROTOCOL.Features: This define exists so
+* that windows guest can look at the FeatureFlags in the io channel,
+* and configure the windows driver to use interrupts or not based on
+* this setting. This flag is set in uislib after the
+* ULTRA_VHBA_init_channel is called. All feature bits for all
+* channels should be defined here. The io channel feature bits are
+* defined right here */
+#define ULTRA_IO_DRIVER_ENABLES_INTS (0x1ULL << 1)
+#define ULTRA_IO_CHANNEL_IS_POLLING (0x1ULL << 3)
+#define ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS (0x1ULL << 4)
+#define ULTRA_IO_DRIVER_DISABLES_INTS (0x1ULL << 5)
+#define ULTRA_IO_DRIVER_SUPPORTS_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6)
+
+#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
+/* Common Channel Header */
+typedef struct _CHANNEL_HEADER {
+ U64 Signature; /* Signature */
+ U32 LegacyState; /* DEPRECATED - being replaced by */
+ /* / SrvState, CliStateBoot, and CliStateOS below */
+ U32 HeaderSize; /* sizeof(CHANNEL_HEADER) */
+ U64 Size; /* Total size of this channel in bytes */
+ U64 Features; /* Flags to modify behavior */
+ GUID Type; /* Channel type: data, bus, control, etc. */
+ U64 PartitionHandle; /* ID of guest partition */
+ U64 Handle; /* Device number of this channel in client */
+ U64 oChannelSpace; /* Offset in bytes to channel specific area */
+ U32 VersionId; /* CHANNEL_HEADER Version ID */
+ U32 PartitionIndex; /* Index of guest partition */
+ GUID ZoneGuid; /* Guid of Channel's zone */
+ U32 oClientString; /* offset from channel header to
+ * nul-terminated ClientString (0 if
+ * ClientString not present) */
+ U32 CliStateBoot; /* CHANNEL_CLIENTSTATE of pre-boot
+ * EFI client of this channel */
+ U32 CmdStateCli; /* CHANNEL_COMMANDSTATE (overloaded in
+ * Windows drivers, see ServerStateUp,
+ * ServerStateDown, etc) */
+ U32 CliStateOS; /* CHANNEL_CLIENTSTATE of Guest OS
+ * client of this channel */
+ U32 ChannelCharacteristics; /* CHANNEL_CHARACTERISTIC_<xxx> */
+ U32 CmdStateSrv; /* CHANNEL_COMMANDSTATE (overloaded in
+ * Windows drivers, see ServerStateUp,
+ * ServerStateDown, etc) */
+ U32 SrvState; /* CHANNEL_SERVERSTATE */
+ U8 CliErrorBoot; /* bits to indicate err states for
+ * boot clients, so err messages can
+ * be throttled */
+ U8 CliErrorOS; /* bits to indicate err states for OS
+ * clients, so err messages can be
+ * throttled */
+ U8 Filler[1]; /* Pad out to 128 byte cacheline */
+ /* Please add all new single-byte values below here */
+ U8 RecoverChannel;
+} CHANNEL_HEADER, *pCHANNEL_HEADER, ULTRA_CHANNEL_PROTOCOL;
+
+#define ULTRA_CHANNEL_ENABLE_INTS (0x1ULL << 0)
+
+/* Subheader for the Signal Type variation of the Common Channel */
+typedef struct _SIGNAL_QUEUE_HEADER {
+ /* 1st cache line */
+ U32 VersionId; /* SIGNAL_QUEUE_HEADER Version ID */
+ U32 Type; /* Queue type: storage, network */
+ U64 Size; /* Total size of this queue in bytes */
+ U64 oSignalBase; /* Offset to signal queue area */
+ U64 FeatureFlags; /* Flags to modify behavior */
+ U64 NumSignalsSent; /* Total # of signals placed in this queue */
+ U64 NumOverflows; /* Total # of inserts failed due to
+ * full queue */
+ U32 SignalSize; /* Total size of a signal for this queue */
+ U32 MaxSignalSlots; /* Max # of slots in queue, 1 slot is
+ * always empty */
+ U32 MaxSignals; /* Max # of signals in queue
+ * (MaxSignalSlots-1) */
+ U32 Head; /* Queue head signal # */
+ /* 2nd cache line */
+ U64 NumSignalsReceived; /* Total # of signals removed from this queue */
+ U32 Tail; /* Queue tail signal # (on separate
+ * cache line) */
+ U32 Reserved1; /* Reserved field */
+ U64 Reserved2; /* Resrved field */
+ U64 ClientQueue;
+ U64 NumInterruptsReceived; /* Total # of Interrupts received. This
+ * is incremented by the ISR in the
+ * guest windows driver */
+ U64 NumEmptyCnt; /* Number of times that visor_signal_remove
+ * is called and returned Empty
+ * Status. */
+ U32 ErrorFlags; /* Error bits set during SignalReinit
+ * to denote trouble with client's
+ * fields */
+ U8 Filler[12]; /* Pad out to 64 byte cacheline */
+} SIGNAL_QUEUE_HEADER, *pSIGNAL_QUEUE_HEADER;
+
+#pragma pack(pop)
+
+#define SignalInit(chan, QHDRFLD, QDATAFLD, QDATATYPE, ver, typ) \
+ do { \
+ MEMSET(&chan->QHDRFLD, 0, sizeof(chan->QHDRFLD)); \
+ chan->QHDRFLD.VersionId = ver; \
+ chan->QHDRFLD.Type = typ; \
+ chan->QHDRFLD.Size = sizeof(chan->QDATAFLD); \
+ chan->QHDRFLD.SignalSize = sizeof(QDATATYPE); \
+ chan->QHDRFLD.oSignalBase = (UINTN)(chan->QDATAFLD)- \
+ (UINTN)(&chan->QHDRFLD); \
+ chan->QHDRFLD.MaxSignalSlots = \
+ sizeof(chan->QDATAFLD)/sizeof(QDATATYPE); \
+ chan->QHDRFLD.MaxSignals = chan->QHDRFLD.MaxSignalSlots-1; \
+ } while (0)
+
+/* Generic function useful for validating any type of channel when it is
+ * received by the client that will be accessing the channel.
+ * Note that <logCtx> is only needed for callers in the EFI environment, and
+ * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
+ */
+static inline int
+ULTRA_check_channel_client(void __iomem *pChannel,
+ GUID expectedTypeGuid,
+ char *channelName,
+ U64 expectedMinBytes,
+ U32 expectedVersionId,
+ U64 expectedSignature,
+ char *fileName, int lineNumber, void *logCtx)
+{
+ if (MEMCMP(&expectedTypeGuid, &Guid0, sizeof(GUID)) != 0)
+ /* caller wants us to verify type GUID */
+ if (MEMCMP_IO(&(((CHANNEL_HEADER __iomem *) (pChannel))->Type),
+ &expectedTypeGuid, sizeof(GUID)) != 0) {
+ CHANNEL_GUID_MISMATCH(expectedTypeGuid, channelName,
+ "type", expectedTypeGuid,
+ ((CHANNEL_HEADER __iomem *)
+ (pChannel))->Type, fileName,
+ lineNumber, logCtx);
+ return 0;
+ }
+ if (expectedMinBytes > 0) /* caller wants us to verify
+ * channel size */
+ if (readq(&((CHANNEL_HEADER __iomem *)
+ (pChannel))->Size) < expectedMinBytes) {
+ CHANNEL_U64_MISMATCH(expectedTypeGuid, channelName,
+ "size", expectedMinBytes,
+ ((CHANNEL_HEADER __iomem *)
+ (pChannel))->Size, fileName,
+ lineNumber, logCtx);
+ return 0;
+ }
+ if (expectedVersionId > 0) /* caller wants us to verify
+ * channel version */
+ if (readl(&((CHANNEL_HEADER __iomem *) (pChannel))->VersionId)
+ != expectedVersionId) {
+ CHANNEL_U32_MISMATCH(expectedTypeGuid, channelName,
+ "version", expectedVersionId,
+ ((CHANNEL_HEADER __iomem *)
+ (pChannel))->VersionId, fileName,
+ lineNumber, logCtx);
+ return 0;
+ }
+ if (expectedSignature > 0) /* caller wants us to verify
+ * channel signature */
+ if (readq(&((CHANNEL_HEADER __iomem *) (pChannel))->Signature)
+ != expectedSignature) {
+ CHANNEL_U64_MISMATCH(expectedTypeGuid, channelName,
+ "signature", expectedSignature,
+ ((CHANNEL_HEADER __iomem *)
+ (pChannel))->Signature, fileName,
+ lineNumber, logCtx);
+ return 0;
+ }
+ return 1;
+}
+
+/* Generic function useful for validating any type of channel when it is about
+ * to be initialized by the server of the channel.
+ * Note that <logCtx> is only needed for callers in the EFI environment, and
+ * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
+ */
+static inline int
+ULTRA_check_channel_server(GUID typeGuid,
+ char *channelName,
+ U64 expectedMinBytes,
+ U64 actualBytes,
+ char *fileName, int lineNumber, void *logCtx)
+{
+ if (expectedMinBytes > 0) /* caller wants us to verify
+ * channel size */
+ if (actualBytes < expectedMinBytes) {
+ CHANNEL_U64_MISMATCH(typeGuid, channelName, "size",
+ expectedMinBytes, actualBytes,
+ fileName, lineNumber, logCtx);
+ return 0;
+ }
+ return 1;
+}
+
+/* Given a file pathname <s> (with '/' or '\' separating directory nodes),
+ * returns a pointer to the beginning of a node within that pathname such
+ * that the number of nodes from that pointer to the end of the string is
+ * NOT more than <n>. Note that if the pathname has less than <n> nodes
+ * in it, the return pointer will be to the beginning of the string.
+ */
+static inline U8 *
+PathName_Last_N_Nodes(U8 *s, unsigned int n)
+{
+ U8 *p = s;
+ unsigned int node_count = 0;
+ while (*p != '\0') {
+ if ((*p == '/') || (*p == '\\'))
+ node_count++;
+ p++;
+ }
+ if (node_count <= n)
+ return s;
+ while (n > 0) {
+ p--;
+ if (p == s)
+ break; /* should never happen, unless someone
+ * is changing the string while we are
+ * looking at it!! */
+ if ((*p == '/') || (*p == '\\'))
+ n--;
+ }
+ return p + 1;
+}
+
+static inline int
+ULTRA_channel_client_acquire_os(void __iomem *pChannel, U8 *chanId,
+ void *logCtx, char *file, int line, char *func)
+{
+ CHANNEL_HEADER __iomem *pChan = pChannel;
+
+ if (readl(&pChan->CliStateOS) == CHANNELCLI_DISABLED) {
+ if ((readb(&pChan->CliErrorOS)
+ & ULTRA_CLIERROROS_THROTTLEMSG_DISABLED) == 0) {
+ /* we are NOT throttling this message */
+ writeb(readb(&pChan->CliErrorOS) |
+ ULTRA_CLIERROROS_THROTTLEMSG_DISABLED,
+ &pChan->CliErrorOS);
+ /* throttle until acquire successful */
+
+ UltraLogEvent(logCtx,
+ CHANNELSTATE_DIAG_EVENTID_TRANSITERR,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel StateTransition INVALID! - acquire failed because OS client DISABLED @%s:%d\n",
+ chanId, PathName_Last_N_Nodes(
+ (U8 *) file, 4), line);
+ }
+ return 0;
+ }
+ if ((readl(&pChan->CliStateOS) != CHANNELCLI_OWNED)
+ && (readl(&pChan->CliStateBoot) == CHANNELCLI_DISABLED)) {
+ /* Our competitor is DISABLED, so we can transition to OWNED */
+ UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel StateTransition (%s) %s(%d)-->%s(%d) @%s:%d\n",
+ chanId, "CliStateOS",
+ ULTRA_CHANNELCLI_STRING(
+ readl(&pChan->CliStateOS)),
+ readl(&pChan->CliStateOS),
+ ULTRA_CHANNELCLI_STRING(CHANNELCLI_OWNED),
+ CHANNELCLI_OWNED,
+ PathName_Last_N_Nodes((U8 *) file, 4), line);
+ writel(CHANNELCLI_OWNED, &pChan->CliStateOS);
+ MEMORYBARRIER;
+ }
+ if (readl(&pChan->CliStateOS) == CHANNELCLI_OWNED) {
+ if (readb(&pChan->CliErrorOS) != 0) {
+ /* we are in an error msg throttling state;
+ * come out of it */
+ UltraLogEvent(logCtx,
+ CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel OS client acquire now successful @%s:%d\n",
+ chanId, PathName_Last_N_Nodes((U8 *) file,
+ 4), line);
+ writeb(0, &pChan->CliErrorOS);
+ }
+ return 1;
+ }
+
+ /* We have to do it the "hard way". We transition to BUSY,
+ * and can use the channel iff our competitor has not also
+ * transitioned to BUSY. */
+ if (readl(&pChan->CliStateOS) != CHANNELCLI_ATTACHED) {
+ if ((readb(&pChan->CliErrorOS)
+ & ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED) == 0) {
+ /* we are NOT throttling this message */
+ writeb(readb(&pChan->CliErrorOS) |
+ ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED,
+ &pChan->CliErrorOS);
+ /* throttle until acquire successful */
+ UltraLogEvent(logCtx,
+ CHANNELSTATE_DIAG_EVENTID_TRANSITERR,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel StateTransition INVALID! - acquire failed because OS client NOT ATTACHED (state=%s(%d)) @%s:%d\n",
+ chanId,
+ ULTRA_CHANNELCLI_STRING(
+ readl(&pChan->CliStateOS)),
+ readl(&pChan->CliStateOS),
+ PathName_Last_N_Nodes((U8 *) file, 4),
+ line);
+ }
+ return 0;
+ }
+ writel(CHANNELCLI_BUSY, &pChan->CliStateOS);
+ MEMORYBARRIER;
+ if (readl(&pChan->CliStateBoot) == CHANNELCLI_BUSY) {
+ if ((readb(&pChan->CliErrorOS)
+ & ULTRA_CLIERROROS_THROTTLEMSG_BUSY) == 0) {
+ /* we are NOT throttling this message */
+ writeb(readb(&pChan->CliErrorOS) |
+ ULTRA_CLIERROROS_THROTTLEMSG_BUSY,
+ &pChan->CliErrorOS);
+ /* throttle until acquire successful */
+ UltraLogEvent(logCtx,
+ CHANNELSTATE_DIAG_EVENTID_TRANSITBUSY,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel StateTransition failed - host OS acquire failed because boot BUSY @%s:%d\n",
+ chanId, PathName_Last_N_Nodes((U8 *) file,
+ 4), line);
+ }
+ /* reset busy */
+ writel(CHANNELCLI_ATTACHED, &pChan->CliStateOS);
+ MEMORYBARRIER;
+ return 0;
+ }
+ if (readb(&pChan->CliErrorOS) != 0) {
+ /* we are in an error msg throttling state; come out of it */
+ UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel OS client acquire now successful @%s:%d\n",
+ chanId, PathName_Last_N_Nodes((U8 *) file, 4),
+ line);
+ writeb(0, &pChan->CliErrorOS);
+ }
+ return 1;
+}
+
+static inline void
+ULTRA_channel_client_release_os(void __iomem *pChannel, U8 *chanId,
+ void *logCtx, char *file, int line, char *func)
+{
+ CHANNEL_HEADER __iomem *pChan = pChannel;
+ if (readb(&pChan->CliErrorOS) != 0) {
+ /* we are in an error msg throttling state; come out of it */
+ UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel OS client error state cleared @%s:%d\n",
+ chanId, PathName_Last_N_Nodes((U8 *) file, 4),
+ line);
+ writeb(0, &pChan->CliErrorOS);
+ }
+ if (readl(&pChan->CliStateOS) == CHANNELCLI_OWNED)
+ return;
+ if (readl(&pChan->CliStateOS) != CHANNELCLI_BUSY) {
+ UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITERR,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel StateTransition INVALID! - release failed because OS client NOT BUSY (state=%s(%d)) @%s:%d\n",
+ chanId,
+ ULTRA_CHANNELCLI_STRING(
+ readl(&pChan->CliStateOS)),
+ readl(&pChan->CliStateOS),
+ PathName_Last_N_Nodes((U8 *) file, 4), line);
+ /* return; */
+ }
+ writel(CHANNELCLI_ATTACHED, &pChan->CliStateOS); /* release busy */
+}
+
+/*
+* Routine Description:
+* Tries to insert the prebuilt signal pointed to by pSignal into the nth
+* Queue of the Channel pointed to by pChannel
+*
+* Parameters:
+* pChannel: (IN) points to the IO Channel
+* Queue: (IN) nth Queue of the IO Channel
+* pSignal: (IN) pointer to the signal
+*
+* Assumptions:
+* - pChannel, Queue and pSignal are valid.
+* - If insertion fails due to a full queue, the caller will determine the
+* retry policy (e.g. wait & try again, report an error, etc.).
+*
+* Return value: 1 if the insertion succeeds, 0 if the queue was
+* full.
+*/
+
+unsigned char visor_signal_insert(CHANNEL_HEADER __iomem *pChannel, U32 Queue,
+ void *pSignal);
+
+/*
+* Routine Description:
+* Removes one signal from Channel pChannel's nth Queue at the
+* time of the call and copies it into the memory pointed to by
+* pSignal.
+*
+* Parameters:
+* pChannel: (IN) points to the IO Channel
+* Queue: (IN) nth Queue of the IO Channel
+* pSignal: (IN) pointer to where the signals are to be copied
+*
+* Assumptions:
+* - pChannel and Queue are valid.
+* - pSignal points to a memory area large enough to hold queue's SignalSize
+*
+* Return value: 1 if the removal succeeds, 0 if the queue was
+* empty.
+*/
+
+unsigned char visor_signal_remove(CHANNEL_HEADER __iomem *pChannel, U32 Queue,
+ void *pSignal);
+
+/*
+* Routine Description:
+* Removes all signals present in Channel pChannel's nth Queue at the
+* time of the call and copies them into the memory pointed to by
+* pSignal. Returns the # of signals copied as the value of the routine.
+*
+* Parameters:
+* pChannel: (IN) points to the IO Channel
+* Queue: (IN) nth Queue of the IO Channel
+* pSignal: (IN) pointer to where the signals are to be copied
+*
+* Assumptions:
+* - pChannel and Queue are valid.
+* - pSignal points to a memory area large enough to hold Queue's MaxSignals
+* # of signals, each of which is Queue's SignalSize.
+*
+* Return value:
+* # of signals copied.
+*/
+unsigned int SignalRemoveAll(pCHANNEL_HEADER pChannel, U32 Queue,
+ void *pSignal);
+
+/*
+* Routine Description:
+* Determine whether a signal queue is empty.
+*
+* Parameters:
+* pChannel: (IN) points to the IO Channel
+* Queue: (IN) nth Queue of the IO Channel
+*
+* Return value:
+* 1 if the signal queue is empty, 0 otherwise.
+*/
+unsigned char visor_signalqueue_empty(CHANNEL_HEADER __iomem *pChannel,
+ U32 Queue);
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/channels/channel_guid.h b/drivers/staging/unisys/common-spar/include/channels/channel_guid.h
new file mode 100644
index 000000000000..ae0dc2b2ad14
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/channel_guid.h
@@ -0,0 +1,64 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * CHANNEL Guids
+ */
+
+/* Used in IOChannel
+ * {414815ed-c58c-11da-95a9-00e08161165f}
+ */
+#define ULTRA_VHBA_CHANNEL_PROTOCOL_GUID \
+ { 0x414815ed, 0xc58c, 0x11da, \
+ { 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f } }
+static const GUID UltraVhbaChannelProtocolGuid =
+ ULTRA_VHBA_CHANNEL_PROTOCOL_GUID;
+
+/* Used in IOChannel
+ * {8cd5994d-c58e-11da-95a9-00e08161165f}
+ */
+#define ULTRA_VNIC_CHANNEL_PROTOCOL_GUID \
+ { 0x8cd5994d, 0xc58e, 0x11da, \
+ { 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f } }
+static const GUID UltraVnicChannelProtocolGuid =
+ ULTRA_VNIC_CHANNEL_PROTOCOL_GUID;
+
+/* Used in IOChannel
+ * {72120008-4AAB-11DC-8530-444553544200}
+ */
+#define ULTRA_SIOVM_GUID \
+ { 0x72120008, 0x4AAB, 0x11DC, \
+ { 0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00 } }
+static const GUID UltraSIOVMGuid = ULTRA_SIOVM_GUID;
+
+
+/* Used in visornoop/visornoop_main.c
+ * {5b52c5ac-e5f5-4d42-8dff-429eaecd221f}
+ */
+#define ULTRA_CONTROLDIRECTOR_CHANNEL_PROTOCOL_GUID \
+ { 0x5b52c5ac, 0xe5f5, 0x4d42, \
+ { 0x8d, 0xff, 0x42, 0x9e, 0xae, 0xcd, 0x22, 0x1f } }
+
+static const GUID UltraControlDirectorChannelProtocolGuid =
+ ULTRA_CONTROLDIRECTOR_CHANNEL_PROTOCOL_GUID;
+
+/* Used in visorchipset/visorchipset_main.c
+ * {B4E79625-AEDE-4EAA-9E11-D3EDDCD4504C}
+ */
+#define ULTRA_DIAG_POOL_CHANNEL_PROTOCOL_GUID \
+ {0xb4e79625, 0xaede, 0x4eaa, \
+ { 0x9e, 0x11, 0xd3, 0xed, 0xdc, 0xd4, 0x50, 0x4c } }
+
+
diff --git a/drivers/staging/unisys/common-spar/include/channels/controlframework.h b/drivers/staging/unisys/common-spar/include/channels/controlframework.h
new file mode 100644
index 000000000000..512643348349
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/controlframework.h
@@ -0,0 +1,77 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Module Name:
+ * controlframework.h
+ *
+ * Abstract: This file defines common structures in the unmanaged
+ * Ultravisor (mostly EFI) space.
+ *
+ */
+
+#ifndef _CONTROL_FRAMEWORK_H_
+#define _CONTROL_FRAMEWORK_H_
+
+#include "commontypes.h"
+#include "channel.h"
+
+#define ULTRA_MEMORY_COUNT_Ki 1024
+
+/* Scale order 0 is one 32-bit (4-byte) word (in 64 or 128-bit
+ * architecture potentially 64 or 128-bit word) */
+#define ULTRA_MEMORY_PAGE_WORD 4
+
+/* Define Ki scale page to be traditional 4KB page */
+#define ULTRA_MEMORY_PAGE_Ki (ULTRA_MEMORY_PAGE_WORD * ULTRA_MEMORY_COUNT_Ki)
+typedef struct _ULTRA_SEGMENT_STATE {
+ U16 Enabled:1; /* Bit 0: May enter other states */
+ U16 Active:1; /* Bit 1: Assigned to active partition */
+ U16 Alive:1; /* Bit 2: Configure message sent to
+ * service/server */
+ U16 Revoked:1; /* Bit 3: similar to partition state
+ * ShuttingDown */
+ U16 Allocated:1; /* Bit 4: memory (device/port number)
+ * has been selected by Command */
+ U16 Known:1; /* Bit 5: has been introduced to the
+ * service/guest partition */
+ U16 Ready:1; /* Bit 6: service/Guest partition has
+ * responded to introduction */
+ U16 Operating:1; /* Bit 7: resource is configured and
+ * operating */
+ /* Note: don't use high bit unless we need to switch to ushort
+ * which is non-compliant */
+} ULTRA_SEGMENT_STATE;
+static const ULTRA_SEGMENT_STATE SegmentStateRunning = {
+ 1, 1, 1, 0, 1, 1, 1, 1
+};
+static const ULTRA_SEGMENT_STATE SegmentStatePaused = {
+ 1, 1, 1, 0, 1, 1, 1, 0
+};
+static const ULTRA_SEGMENT_STATE SegmentStateStandby = {
+ 1, 1, 0, 0, 1, 1, 1, 0
+};
+typedef union {
+ U64 Full;
+ struct {
+ U8 Major; /* will be 1 for the first release and
+ * increment thereafter */
+ U8 Minor;
+ U16 Maintenance;
+ U32 Revision; /* Subversion revision */
+ } Part;
+} ULTRA_COMPONENT_VERSION;
+
+#endif /* _CONTROL_FRAMEWORK_H_ not defined */
diff --git a/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h b/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h
new file mode 100644
index 000000000000..47f1c4fa1e7e
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h
@@ -0,0 +1,619 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __CONTROLVMCHANNEL_H__
+#define __CONTROLVMCHANNEL_H__
+
+#include "commontypes.h"
+#include "channel.h"
+#include "controlframework.h"
+enum { INVALID_GUEST_FIRMWARE, SAMPLE_GUEST_FIRMWARE,
+ TIANO32_GUEST_FIRMWARE, TIANO64_GUEST_FIRMWARE
+};
+
+/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
+#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_GUID \
+ {0x2b3c2d10, 0x7ef5, 0x4ad8, \
+ {0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d} }
+
+static const GUID UltraControlvmChannelProtocolGuid =
+ ULTRA_CONTROLVM_CHANNEL_PROTOCOL_GUID;
+
+#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE \
+ ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+#define CONTROLVM_MESSAGE_MAX 64
+
+/* Must increment this whenever you insert or delete fields within
+* this channel struct. Also increment whenever you change the meaning
+* of fields within this channel struct so as to break pre-existing
+* software. Note that you can usually add fields to the END of the
+* channel struct withOUT needing to increment this. */
+#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID 1
+
+#define ULTRA_CONTROLVM_CHANNEL_OK_CLIENT(pChannel, logCtx) \
+ (ULTRA_check_channel_client(pChannel, \
+ UltraControlvmChannelProtocolGuid, \
+ "controlvm", \
+ sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL), \
+ ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_CONTROLVM_CHANNEL_OK_SERVER(actualBytes, logCtx) \
+ (ULTRA_check_channel_server(UltraControlvmChannelProtocolGuid, \
+ "controlvm", \
+ sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL), \
+ actualBytes, __FILE__, __LINE__, logCtx))
+
+#define MY_DEVICE_INDEX 0
+#define MAX_MACDATA_LEN 8 /* number of bytes for MAC address in config packet */
+#define MAX_SERIAL_NUM 32
+
+#define DISK_ZERO_PUN_NUMBER 1 /* Target ID on the SCSI bus for LUN 0 */
+#define DISK_ZERO_LUN_NUMBER 3 /* Logical Unit Number */
+
+/* Defines for various channel queues... */
+#define CONTROLVM_QUEUE_REQUEST 0
+#define CONTROLVM_QUEUE_RESPONSE 1
+#define CONTROLVM_QUEUE_EVENT 2
+#define CONTROLVM_QUEUE_ACK 3
+
+/* Max number of messages stored during IOVM creation to be reused
+ * after crash */
+#define CONTROLVM_CRASHMSG_MAX 2
+
+/** Ids for commands that may appear in either queue of a ControlVm channel.
+ *
+ * Commands that are initiated by the command partition (CP), by an IO or
+ * console service partition (SP), or by a guest partition (GP)are:
+ * - issued on the RequestQueue queue (q #0) in the ControlVm channel
+ * - responded to on the ResponseQueue queue (q #1) in the ControlVm channel
+ *
+ * Events that are initiated by an IO or console service partition (SP) or
+ * by a guest partition (GP) are:
+ * - issued on the EventQueue queue (q #2) in the ControlVm channel
+ * - responded to on the EventAckQueue queue (q #3) in the ControlVm channel
+ */
+typedef enum {
+ CONTROLVM_INVALID = 0,
+ /* SWITCH commands required Parameter: SwitchNumber */
+ /* BUS commands required Parameter: BusNumber */
+ CONTROLVM_BUS_CREATE = 0x101, /* CP --> SP, GP */
+ CONTROLVM_BUS_DESTROY = 0x102, /* CP --> SP, GP */
+ CONTROLVM_BUS_CONFIGURE = 0x104, /* CP --> SP */
+ CONTROLVM_BUS_CHANGESTATE = 0x105, /* CP --> SP, GP */
+ CONTROLVM_BUS_CHANGESTATE_EVENT = 0x106, /* SP, GP --> CP */
+/* DEVICE commands required Parameter: BusNumber, DeviceNumber */
+
+ CONTROLVM_DEVICE_CREATE = 0x201, /* CP --> SP, GP */
+ CONTROLVM_DEVICE_DESTROY = 0x202, /* CP --> SP, GP */
+ CONTROLVM_DEVICE_CONFIGURE = 0x203, /* CP --> SP */
+ CONTROLVM_DEVICE_CHANGESTATE = 0x204, /* CP --> SP, GP */
+ CONTROLVM_DEVICE_CHANGESTATE_EVENT = 0x205, /* SP, GP --> CP */
+ CONTROLVM_DEVICE_RECONFIGURE = 0x206, /* CP --> Boot */
+/* DISK commands required Parameter: BusNumber, DeviceNumber */
+ CONTROLVM_DISK_CREATE = 0x221, /* CP --> SP */
+ CONTROLVM_DISK_DESTROY = 0x222, /* CP --> SP */
+ CONTROLVM_DISK_CONFIGURE = 0x223, /* CP --> SP */
+ CONTROLVM_DISK_CHANGESTATE = 0x224, /* CP --> SP */
+/* CHIPSET commands */
+ CONTROLVM_CHIPSET_INIT = 0x301, /* CP --> SP, GP */
+ CONTROLVM_CHIPSET_STOP = 0x302, /* CP --> SP, GP */
+ CONTROLVM_CHIPSET_SHUTDOWN = 0x303, /* CP --> SP */
+ CONTROLVM_CHIPSET_READY = 0x304, /* CP --> SP */
+ CONTROLVM_CHIPSET_SELFTEST = 0x305, /* CP --> SP */
+
+} CONTROLVM_ID;
+
+struct InterruptInfo {
+ /**< specifies interrupt info. It is used to send interrupts
+ * for this channel. The peer at the end of this channel
+ * who has registered an interrupt (using recv fields
+ * above) will receive the interrupt. Passed as a parameter
+ * to Issue_VMCALL_IO_QUEUE_TRANSITION, which generates the
+ * interrupt. Currently this is used by IOPart-SP to wake
+ * up GP when Data Channel transitions from empty to
+ * non-empty.*/
+ U64 sendInterruptHandle;
+
+ /**< specifies interrupt handle. It is used to retrieve the
+ * corresponding interrupt pin from Monitor; and the
+ * interrupt pin is used to connect to the corresponding
+ * intrrupt. Used by IOPart-GP only. */
+ U64 recvInterruptHandle;
+
+ /**< specifies interrupt vector. It, interrupt pin, and shared are
+ * used to connect to the corresponding interrupt. Used by
+ * IOPart-GP only. */
+ U32 recvInterruptVector;
+
+ /**< specifies if the recvInterrupt is shared. It, interrupt pin
+ * and vector are used to connect to 0 = not shared; 1 = shared.
+ * the corresponding interrupt. Used by IOPart-GP only. */
+ U8 recvInterruptShared;
+ U8 reserved[3]; /* Natural alignment purposes */
+};
+
+struct PciId {
+ U16 Domain;
+ U8 Bus;
+ U8 Slot;
+ U8 Func;
+ U8 Reserved[3]; /* Natural alignment purposes */
+};
+
+struct PciConfigHdr {
+ U16 VendorId;
+ U16 SubSysVendor;
+ U16 DeviceId;
+ U16 SubSysDevice;
+ U32 ClassCode;
+ U32 Reserved; /* Natural alignment purposes */
+};
+
+struct ScsiId {
+ U32 Bus;
+ U32 Target;
+ U32 Lun;
+ U32 Host; /* Command should ignore this for *
+ * DiskArrival/RemovalEvents */
+};
+
+struct WWID {
+ U32 wwid1;
+ U32 wwid2;
+};
+
+struct virtDiskInfo {
+ U32 switchNo; /* defined by SWITCH_CREATE */
+ U32 externalPortNo; /* 0 for SAS RAID provided (external)
+ * virtual disks, 1 for virtual disk
+ * images, 2 for gold disk images */
+ U16 VirtualDiskIndex; /* Index of disk descriptor in the
+ * VirtualDisk segment associated with
+ * externalPortNo */
+ U16 Reserved1;
+ U32 Reserved2;
+};
+
+typedef enum {
+ CONTROLVM_ACTION_NONE = 0,
+ CONTROLVM_ACTION_SET_RESTORE = 0x05E7,
+ CONTROLVM_ACTION_CLEAR_RESTORE = 0x0C18,
+ CONTROLVM_ACTION_RESTORING = 0x08E5,
+ CONTROLVM_ACTION_RESTORE_BUSY = 0x0999,
+ CONTROLVM_ACTION_CLEAR_NVRAM = 0xB01
+} CONTROLVM_ACTION;
+
+typedef enum _ULTRA_TOOL_ACTIONS {
+ /* enumeration that defines intended action */
+ ULTRA_TOOL_ACTION_NONE = 0, /* normal boot of boot disk */
+ ULTRA_TOOL_ACTION_INSTALL = 1, /* install source disk(s) to boot
+ * disk */
+ ULTRA_TOOL_ACTION_CAPTURE = 2, /* capture boot disk to target disk(s)
+ * as 'gold image' */
+ ULTRA_TOOL_ACTION_REPAIR = 3, /* use source disk(s) to repair
+ * installation on boot disk */
+ ULTRA_TOOL_ACTION_CLEAN = 4, /* 'scrub' virtual disk before
+ * releasing back to storage pool */
+ ULTRA_TOOL_ACTION_UPGRADE = 5, /* upgrade to use content of images
+ * referenced from newer blueprint */
+ ULTRA_TOOL_ACTION_DIAG = 6, /* use tool to invoke diagnostic script
+ * provided by blueprint */
+ ULTRA_TOOL_ACTION_FAILED = 7, /* used when tool fails installation
+ and cannot continue */
+ ULTRA_TOOL_ACTION_COUNT = 8
+} ULTRA_TOOL_ACTIONS;
+
+typedef struct _ULTRA_EFI_SPAR_INDICATION {
+ U64 BootToFirmwareUI:1; /* Bit 0: Stop in uefi ui */
+ U64 ClearNvram:1; /* Bit 1: Clear NVRAM */
+ U64 ClearCmos:1; /* Bit 2: Clear CMOS */
+ U64 BootToTool:1; /* Bit 3: Run install tool */
+ /* remaining bits are available */
+} ULTRA_EFI_SPAR_INDICATION;
+
+typedef enum {
+ ULTRA_CHIPSET_FEATURE_REPLY = 0x00000001,
+ ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002,
+ ULTRA_CHIPSET_FEATURE_PCIVBUS = 0x00000004
+} ULTRA_CHIPSET_FEATURE;
+
+/** This is the common structure that is at the beginning of every
+ * ControlVm message (both commands and responses) in any ControlVm
+ * queue. Commands are easily distinguished from responses by
+ * looking at the flags.response field.
+ */
+typedef struct _CONTROLVM_MESSAGE_HEADER {
+ U32 Id; /* See CONTROLVM_ID. */
+ /* For requests, indicates the message type. */
+ /* For responses, indicates the type of message we are responding to. */
+
+ U32 MessageSize; /* Includes size of this struct + size
+ * of message */
+ U32 SegmentIndex; /* Index of segment containing Vm
+ * message/information */
+ U32 CompletionStatus; /* Error status code or result of
+ * message completion */
+ struct {
+ U32 failed:1; /**< =1 in a response to * signify
+ * failure */
+ U32 responseExpected:1; /**< =1 in all messages that expect a
+ * response (Control ignores this
+ * bit) */
+ U32 server:1; /**< =1 in all bus & device-related
+ * messages where the message
+ * receiver is to act as the bus or
+ * device server */
+ U32 testMessage:1; /**< =1 for testing use only
+ * (Control and Command ignore this
+ * bit) */
+ U32 partialCompletion:1; /**< =1 if there are forthcoming
+ * responses/acks associated
+ * with this message */
+ U32 preserve:1; /**< =1 this is to let us know to
+ * preserve channel contents
+ * (for running guests)*/
+ U32 writerInDiag:1; /**< =1 the DiagWriter is active in the
+ * Diagnostic Partition*/
+
+ /* remaining bits in this 32-bit word are available */
+ } Flags;
+ U32 Reserved; /* Natural alignment */
+ U64 MessageHandle; /* Identifies the particular message instance,
+ * and is used to match particular */
+ /* request instances with the corresponding response instance. */
+ U64 PayloadVmOffset; /* Offset of payload area from start of this
+ * instance of ControlVm segment */
+ U32 PayloadMaxBytes; /* Maximum bytes allocated in payload
+ * area of ControlVm segment */
+ U32 PayloadBytes; /* Actual number of bytes of payload
+ * area to copy between IO/Command; */
+ /* if non-zero, there is a payload to copy. */
+} CONTROLVM_MESSAGE_HEADER;
+
+typedef struct _CONTROLVM_PACKET_DEVICE_CREATE {
+ U32 busNo; /**< bus # (0..n-1) from the msg receiver's
+ * perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 devNo; /**< bus-relative (0..n-1) device number */
+ U64 channelAddr; /**< Guest physical address of the channel, which
+ * can be dereferenced by the receiver
+ * of this ControlVm command */
+ U64 channelBytes; /**< specifies size of the channel in bytes */
+ GUID dataTypeGuid;/**< specifies format of data in channel */
+ GUID devInstGuid; /**< instance guid for the device */
+ struct InterruptInfo intr; /**< specifies interrupt information */
+} CONTROLVM_PACKET_DEVICE_CREATE; /* for CONTROLVM_DEVICE_CREATE */
+
+typedef struct _CONTROLVM_PACKET_DEVICE_CONFIGURE {
+ U32 busNo; /**< bus # (0..n-1) from the msg
+ * receiver's perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 devNo; /**< bus-relative (0..n-1) device number */
+} CONTROLVM_PACKET_DEVICE_CONFIGURE; /* for CONTROLVM_DEVICE_CONFIGURE */
+
+typedef struct _CONTROLVM_MESSAGE_DEVICE_CREATE {
+ CONTROLVM_MESSAGE_HEADER Header;
+ CONTROLVM_PACKET_DEVICE_CREATE Packet;
+} CONTROLVM_MESSAGE_DEVICE_CREATE; /* total 128 bytes */
+
+typedef struct _CONTROLVM_MESSAGE_DEVICE_CONFIGURE {
+ CONTROLVM_MESSAGE_HEADER Header;
+ CONTROLVM_PACKET_DEVICE_CONFIGURE Packet;
+} CONTROLVM_MESSAGE_DEVICE_CONFIGURE; /* total 56 bytes */
+
+/* This is the format for a message in any ControlVm queue. */
+typedef struct _CONTROLVM_MESSAGE_PACKET {
+ union {
+
+ /* BEGIN Request messages */
+ struct {
+ U32 busNo; /*< bus # (0..n-1) from the msg
+ * receiver's perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 deviceCount; /*< indicates the max number of
+ * devices on this bus */
+ U64 channelAddr; /*< Guest physical address of the
+ * channel, which can be
+ * dereferenced by the receiver
+ * of this ControlVm command */
+ U64 channelBytes; /*< size of the channel in bytes */
+ GUID busDataTypeGuid;/*< indicates format of data in bus
+ * channel */
+ GUID busInstGuid; /*< instance guid for the bus */
+ } createBus; /* for CONTROLVM_BUS_CREATE */
+ struct {
+ U32 busNo; /*< bus # (0..n-1) from the msg
+ * receiver's perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 reserved; /* Natural alignment purposes */
+ } destroyBus; /* for CONTROLVM_BUS_DESTROY */
+ struct {
+ U32 busNo; /*< bus # (0..n-1) from the
+ * msg receiver's
+ * perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 reserved1; /* for alignment purposes */
+ U64 guestHandle; /* This is used to convert
+ * guest physical address to real
+ * physical address for DMA, for ex. */
+ U64 recvBusInterruptHandle;/*< specifies interrupt
+ * info. It is used by SP to register
+ * to receive interrupts from the CP.
+ * This interrupt is used for bus
+ * level notifications. The
+ * corresponding
+ * sendBusInterruptHandle is kept in
+ * CP. */
+ } configureBus; /* for CONTROLVM_BUS_CONFIGURE */
+
+ /* for CONTROLVM_DEVICE_CREATE */
+ CONTROLVM_PACKET_DEVICE_CREATE createDevice;
+ struct {
+ U32 busNo; /*< bus # (0..n-1) from the msg
+ * receiver's perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 devNo; /*< bus-relative (0..n-1) device
+ * number */
+ } destroyDevice; /* for CONTROLVM_DEVICE_DESTROY */
+
+ /* for CONTROLVM_DEVICE_CONFIGURE */
+ CONTROLVM_PACKET_DEVICE_CONFIGURE configureDevice;
+ struct {
+ U32 busNo; /*< bus # (0..n-1) from the msg
+ * receiver's perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 devNo; /*< bus-relative (0..n-1) device
+ * number */
+ } reconfigureDevice; /* for CONTROLVM_DEVICE_RECONFIGURE */
+ struct {
+ U32 busNo;
+ ULTRA_SEGMENT_STATE state;
+ U8 reserved[2]; /* Natural alignment purposes */
+ } busChangeState; /* for CONTROLVM_BUS_CHANGESTATE */
+ struct {
+ U32 busNo;
+ U32 devNo;
+ ULTRA_SEGMENT_STATE state;
+ struct {
+ U32 physicalDevice:1; /* =1 if message is for
+ * a physical device */
+ /* remaining bits in this 32-bit word are available */
+ } flags;
+ U8 reserved[2]; /* Natural alignment purposes */
+ } deviceChangeState; /* for CONTROLVM_DEVICE_CHANGESTATE */
+ struct {
+ U32 busNo;
+ U32 devNo;
+ ULTRA_SEGMENT_STATE state;
+ U8 reserved[6]; /* Natural alignment purposes */
+ } deviceChangeStateEvent; /* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */
+ struct {
+ U32 busCount; /*< indicates the max number of busses */
+ U32 switchCount; /*< indicates the max number of
+ * switches (applicable for service
+ * partition only) */
+ ULTRA_CHIPSET_FEATURE features;
+ U32 platformNumber; /* Platform Number */
+ } initChipset; /* for CONTROLVM_CHIPSET_INIT */
+ struct {
+ U32 Options; /*< reserved */
+ U32 Test; /*< bit 0 set to run embedded selftest */
+ } chipsetSelftest; /* for CONTROLVM_CHIPSET_SELFTEST */
+
+ /* END Request messages */
+
+ /* BEGIN Response messages */
+
+ /* END Response messages */
+
+ /* BEGIN Event messages */
+
+ /* END Event messages */
+
+ /* BEGIN Ack messages */
+
+ /* END Ack messages */
+ U64 addr; /*< a physical address of something, that
+ * can be dereferenced by the receiver of
+ * this ControlVm command (depends on
+ * command id) */
+ U64 handle; /*< a handle of something (depends on
+ * command id) */
+ };
+} CONTROLVM_MESSAGE_PACKET;
+
+/* All messages in any ControlVm queue have this layout. */
+typedef struct _CONTROLVM_MESSAGE {
+ CONTROLVM_MESSAGE_HEADER hdr;
+ CONTROLVM_MESSAGE_PACKET cmd;
+} CONTROLVM_MESSAGE;
+
+typedef struct _DEVICE_MAP {
+ GUEST_PHYSICAL_ADDRESS DeviceChannelAddress;
+ U64 DeviceChannelSize;
+ U32 CA_Index;
+ U32 Reserved; /* natural alignment */
+ U64 Reserved2; /* Align structure on 32-byte boundary */
+} DEVICE_MAP;
+
+typedef struct _GUEST_DEVICES {
+ DEVICE_MAP VideoChannel;
+ DEVICE_MAP KeyboardChannel;
+ DEVICE_MAP NetworkChannel;
+ DEVICE_MAP StorageChannel;
+ DEVICE_MAP ConsoleChannel;
+ U32 PartitionIndex;
+ U32 Pad;
+} GUEST_DEVICES;
+
+typedef struct _ULTRA_CONTROLVM_CHANNEL_PROTOCOL {
+ CHANNEL_HEADER Header;
+ GUEST_PHYSICAL_ADDRESS gpControlVm; /* guest physical address of
+ * this channel */
+ GUEST_PHYSICAL_ADDRESS gpPartitionTables; /* guest physical address of
+ * partition tables */
+ GUEST_PHYSICAL_ADDRESS gpDiagGuest; /* guest physical address of
+ * diagnostic channel */
+ GUEST_PHYSICAL_ADDRESS gpBootRomDisk; /* guest phys addr of (read
+ * only) Boot ROM disk */
+ GUEST_PHYSICAL_ADDRESS gpBootRamDisk; /* guest phys addr of writable
+ * Boot RAM disk */
+ GUEST_PHYSICAL_ADDRESS gpAcpiTable; /* guest phys addr of acpi
+ * table */
+ GUEST_PHYSICAL_ADDRESS gpControlChannel; /* guest phys addr of control
+ * channel */
+ GUEST_PHYSICAL_ADDRESS gpDiagRomDisk; /* guest phys addr of diagnostic
+ * ROM disk */
+ GUEST_PHYSICAL_ADDRESS gpNvram; /* guest phys addr of NVRAM
+ * channel */
+ U64 RequestPayloadOffset; /* Offset to request payload area */
+ U64 EventPayloadOffset; /* Offset to event payload area */
+ U32 RequestPayloadBytes; /* Bytes available in request payload
+ * area */
+ U32 EventPayloadBytes; /* Bytes available in event payload area */
+ U32 ControlChannelBytes;
+ U32 NvramChannelBytes; /* Bytes in PartitionNvram segment */
+ U32 MessageBytes; /* sizeof(CONTROLVM_MESSAGE) */
+ U32 MessageCount; /* CONTROLVM_MESSAGE_MAX */
+ GUEST_PHYSICAL_ADDRESS gpSmbiosTable; /* guest phys addr of SMBIOS
+ * tables */
+ GUEST_PHYSICAL_ADDRESS gpPhysicalSmbiosTable; /* guest phys addr of
+ * SMBIOS table */
+ /* ULTRA_MAX_GUESTS_PER_SERVICE */
+ GUEST_DEVICES gpObsoleteGuestDevices[16];
+
+ /* guest physical address of EFI firmware image base */
+ GUEST_PHYSICAL_ADDRESS VirtualGuestFirmwareImageBase;
+
+ /* guest physical address of EFI firmware entry point */
+ GUEST_PHYSICAL_ADDRESS VirtualGuestFirmwareEntryPoint;
+
+ /* guest EFI firmware image size */
+ U64 VirtualGuestFirmwareImageSize;
+
+ /* GPA = 1MB where EFI firmware image is copied to */
+ GUEST_PHYSICAL_ADDRESS VirtualGuestFirmwareBootBase;
+ GUEST_PHYSICAL_ADDRESS VirtualGuestImageBase;
+ GUEST_PHYSICAL_ADDRESS VirtualGuestImageSize;
+ U64 PrototypeControlChannelOffset;
+ GUEST_PHYSICAL_ADDRESS VirtualGuestPartitionHandle;
+
+ U16 RestoreAction; /* Restore Action field to restore the guest
+ * partition */
+ U16 DumpAction; /* For Windows guests it shows if the visordisk
+ * is running in dump mode */
+ U16 NvramFailCount;
+ U16 SavedCrashMsgCount; /* = CONTROLVM_CRASHMSG_MAX */
+ U32 SavedCrashMsgOffset; /* Offset to request payload area needed
+ * for crash dump */
+ U32 InstallationError; /* Type of error encountered during
+ * installation */
+ U32 InstallationTextId; /* Id of string to display */
+ U16 InstallationRemainingSteps; /* Number of remaining installation
+ * steps (for progress bars) */
+ U8 ToolAction; /* ULTRA_TOOL_ACTIONS Installation Action
+ * field */
+ U8 Reserved; /* alignment */
+ ULTRA_EFI_SPAR_INDICATION EfiSparIndication;
+ ULTRA_EFI_SPAR_INDICATION EfiSparIndicationSupported;
+ U32 SPReserved;
+ U8 Reserved2[28]; /* Force signals to begin on 128-byte cache
+ * line */
+ SIGNAL_QUEUE_HEADER RequestQueue; /* Service or guest partition
+ * uses this queue to send
+ * requests to Control */
+ SIGNAL_QUEUE_HEADER ResponseQueue; /* Control uses this queue to
+ * respond to service or guest
+ * partition requests */
+ SIGNAL_QUEUE_HEADER EventQueue; /* Control uses this queue to
+ * send events to service or
+ * guest partition */
+ SIGNAL_QUEUE_HEADER EventAckQueue; /* Service or guest partition
+ * uses this queue to ack
+ * Control events */
+
+ /* Request fixed-size message pool - does not include payload */
+ CONTROLVM_MESSAGE RequestMsg[CONTROLVM_MESSAGE_MAX];
+
+ /* Response fixed-size message pool - does not include payload */
+ CONTROLVM_MESSAGE ResponseMsg[CONTROLVM_MESSAGE_MAX];
+
+ /* Event fixed-size message pool - does not include payload */
+ CONTROLVM_MESSAGE EventMsg[CONTROLVM_MESSAGE_MAX];
+
+ /* Ack fixed-size message pool - does not include payload */
+ CONTROLVM_MESSAGE EventAckMsg[CONTROLVM_MESSAGE_MAX];
+
+ /* Message stored during IOVM creation to be reused after crash */
+ CONTROLVM_MESSAGE SavedCrashMsg[CONTROLVM_CRASHMSG_MAX];
+} ULTRA_CONTROLVM_CHANNEL_PROTOCOL;
+
+/* Offsets for VM channel attributes... */
+#define VM_CH_REQ_QUEUE_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, RequestQueue)
+#define VM_CH_RESP_QUEUE_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ResponseQueue)
+#define VM_CH_EVENT_QUEUE_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventQueue)
+#define VM_CH_ACK_QUEUE_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventAckQueue)
+#define VM_CH_REQ_MSG_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, RequestMsg)
+#define VM_CH_RESP_MSG_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ResponseMsg)
+#define VM_CH_EVENT_MSG_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventMsg)
+#define VM_CH_ACK_MSG_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventAckMsg)
+#define VM_CH_CRASH_MSG_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, SavedCrashMsg)
+
+/* The following header will be located at the beginning of PayloadVmOffset for
+ * various ControlVm commands. The receiver of a ControlVm command with a
+ * PayloadVmOffset will dereference this address and then use ConnectionOffset,
+ * InitiatorOffset, and TargetOffset to get the location of UTF-8 formatted
+ * strings that can be parsed to obtain command-specific information. The value
+ * of TotalLength should equal PayloadBytes. The format of the strings at
+ * PayloadVmOffset will take different forms depending on the message. See the
+ * following Wiki page for more information:
+ * https://ustr-linux-1.na.uis.unisys.com/spar/index.php/ControlVm_Parameters_Area
+ */
+typedef struct _ULTRA_CONTROLVM_PARAMETERS_HEADER {
+ U32 TotalLength;
+ U32 HeaderLength;
+ U32 ConnectionOffset;
+ U32 ConnectionLength;
+ U32 InitiatorOffset;
+ U32 InitiatorLength;
+ U32 TargetOffset;
+ U32 TargetLength;
+ U32 ClientOffset;
+ U32 ClientLength;
+ U32 NameOffset;
+ U32 NameLength;
+ GUID Id;
+ U32 Revision;
+ U32 Reserved; /* Natural alignment */
+} ULTRA_CONTROLVM_PARAMETERS_HEADER;
+
+#endif /* __CONTROLVMCHANNEL_H__ */
diff --git a/drivers/staging/unisys/common-spar/include/channels/diagchannel.h b/drivers/staging/unisys/common-spar/include/channels/diagchannel.h
new file mode 100644
index 000000000000..c93515eb211d
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/diagchannel.h
@@ -0,0 +1,427 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*++
+ *
+ * Module Name:
+ *
+ * diagchannel.h
+ *
+ * Abstract:
+ *
+ * This file defines the DiagChannel protocol. This protocol is used to aid in
+ * preserving event data sent by external applications. This protocol provides
+ * a region for event data to reside in. This data will eventually be sent to
+ * the Boot Partition where it will be committed to memory and/or disk. This
+ * file contains platform-independent data that can be built using any
+ * Supervisor build environment (Windows, Linux, EFI).
+ *
+*/
+
+#ifndef _DIAG_CHANNEL_H_
+#define _DIAG_CHANNEL_H_
+
+#include "commontypes.h"
+#include "channel.h"
+
+/* {EEA7A573-DB82-447c-8716-EFBEAAAE4858} */
+#define ULTRA_DIAG_CHANNEL_PROTOCOL_GUID \
+ {0xeea7a573, 0xdb82, 0x447c, \
+ {0x87, 0x16, 0xef, 0xbe, 0xaa, 0xae, 0x48, 0x58} }
+
+static const GUID UltraDiagChannelProtocolGuid =
+ ULTRA_DIAG_CHANNEL_PROTOCOL_GUID;
+
+/* {E850F968-3263-4484-8CA5-2A35D087A5A8} */
+#define ULTRA_DIAG_ROOT_CHANNEL_PROTOCOL_GUID \
+ {0xe850f968, 0x3263, 0x4484, \
+ {0x8c, 0xa5, 0x2a, 0x35, 0xd0, 0x87, 0xa5, 0xa8} }
+
+#define ULTRA_DIAG_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+
+/* Must increment this whenever you insert or delete fields within this channel
+* struct. Also increment whenever you change the meaning of fields within this
+* channel struct so as to break pre-existing software. Note that you can
+* usually add fields to the END of the channel struct withOUT needing to
+* increment this. */
+#define ULTRA_DIAG_CHANNEL_PROTOCOL_VERSIONID 2
+
+#define ULTRA_DIAG_CHANNEL_OK_CLIENT(pChannel, logCtx) \
+ (ULTRA_check_channel_client(pChannel, \
+ UltraDiagChannelProtocolGuid, \
+ "diag", \
+ sizeof(ULTRA_DIAG_CHANNEL_PROTOCOL), \
+ ULTRA_DIAG_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_DIAG_CHANNEL_PROTOCOL_SIGNATURE, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_DIAG_CHANNEL_OK_SERVER(actualBytes, logCtx) \
+ (ULTRA_check_channel_server(UltraDiagChannelProtocolGuid, \
+ "diag", \
+ sizeof(ULTRA_DIAG_CHANNEL_PROTOCOL), \
+ actualBytes, __FILE__, __LINE__, logCtx))
+#define MAX_MODULE_NAME_SIZE 128 /* Maximum length of module name... */
+#define MAX_ADDITIONAL_INFO_SIZE 256 /* Maximum length of any additional info
+ * accompanying event... */
+#define MAX_SUBSYSTEMS 64 /* Maximum number of subsystems allowed in
+ * DiagChannel... */
+#define LOW_SUBSYSTEMS 32 /* Half of MAX_SUBSYSTEMS to allow 64-bit
+ * math */
+#define SUBSYSTEM_DEBUG 0 /* Standard subsystem for debug events */
+#define SUBSYSTEM_DEFAULT 1 /* Default subsystem for legacy calls to
+ * ReportEvent */
+
+/* few useful subsystem mask values */
+#define SUBSYSTEM_MASK_DEBUG 0x01 /* Standard subsystem for debug
+ * events */
+#define SUBSYSTEM_MASK_DEFAULT 0x02 /* Default subsystem for legacy calls to
+ * ReportEvents */
+
+/* Event parameter "Severity" is overloaded with Cause in byte 2 and Severity in
+ * byte 0, bytes 1 and 3 are reserved */
+#define SEVERITY_MASK 0x0FF /* mask out all but the Severity in byte 0 */
+#define CAUSE_MASK 0x0FF0000 /* mask out all but the cause in byte 2 */
+#define CAUSE_SHIFT_AMT 16 /* shift 2 bytes to place it in byte 2 */
+
+/* SubsystemSeverityFilter */
+#define SEVERITY_FILTER_MASK 0x0F /* mask out the Cause half, SeverityFilter is
+ * in the lower nibble */
+#define CAUSE_FILTER_MASK 0xF0 /* mask out the Severity half, CauseFilter is in
+ * the upper nibble */
+#define CAUSE_FILTER_SHIFT_AMT 4 /* shift amount to place it in lower or upper
+ * nibble */
+
+/* Copied from EFI's EFI_TIME struct in efidef.h. EFI headers are not allowed
+* in some of the Supervisor areas, such as Monitor, so it has been "ported" here
+* for use in diagnostic event timestamps... */
+typedef struct _DIAG_EFI_TIME {
+ U16 Year; /* 1998 - 20XX */
+ U8 Month; /* 1 - 12 */
+ U8 Day; /* 1 - 31 */
+ U8 Hour; /* 0 - 23 */
+ U8 Minute; /* 0 - 59 */
+ U8 Second; /* 0 - 59 */
+ U8 Pad1;
+ U32 Nanosecond; /* 0 - 999, 999, 999 */
+ S16 TimeZone; /* -1440 to 1440 or 2047 */
+ U8 Daylight;
+ U8 Pad2;
+} DIAG_EFI_TIME;
+
+typedef enum {
+ ULTRA_COMPONENT_GUEST = 0,
+ ULTRA_COMPONENT_MONITOR = 0x01,
+ ULTRA_COMPONENT_CCM = 0x02, /* Common Control module */
+ /* RESERVED 0x03 - 0x7 */
+
+ /* Ultravisor Components */
+ ULTRA_COMPONENT_BOOT = 0x08,
+ ULTRA_COMPONENT_IDLE = 0x09,
+ ULTRA_COMPONENT_CONTROL = 0x0A,
+ ULTRA_COMPONENT_LOGGER = 0x0B,
+ ULTRA_COMPONENT_ACPI = 0X0C,
+ /* RESERVED 0x0D - 0x0F */
+
+ /* sPAR Components */
+ ULTRA_COMPONENT_COMMAND = 0x10,
+ ULTRA_COMPONENT_IODRIVER = 0x11,
+ ULTRA_COMPONENT_CONSOLE = 0x12,
+ ULTRA_COMPONENT_OPERATIONS = 0x13,
+ ULTRA_COMPONENT_MANAGEMENT = 0x14,
+ ULTRA_COMPONENT_DIAG = 0x15,
+ ULTRA_COMPONENT_HWDIAG = 0x16,
+ ULTRA_COMPONENT_PSERVICES = 0x17,
+ ULTRA_COMPONENT_PDIAG = 0x18
+ /* RESERVED 0x18 - 0x1F */
+} ULTRA_COMPONENT_TYPES;
+
+/* Structure: DIAG_CHANNEL_EVENT Purpose: Contains attributes that make up an
+ * event to be written to the DIAG_CHANNEL memory. Attributes: EventId: Id of
+ * the diagnostic event to write to memory. Severity: Severity of the event
+ * (Error, Info, etc). ModuleName: Module/file name where event originated.
+ * LineNumber: Line number in module name where event originated. Timestamp:
+ * Date/time when event was received by ReportEvent, and written to DiagChannel.
+ * Reserved: Padding to align structure on a 64-byte cache line boundary.
+ * AdditionalInfo: Array of characters for additional event info (may be
+ * empty). */
+typedef struct _DIAG_CHANNEL_EVENT {
+ U32 EventId;
+ U32 Severity;
+ U8 ModuleName[MAX_MODULE_NAME_SIZE];
+ U32 LineNumber;
+ DIAG_EFI_TIME Timestamp; /* Size = 16 bytes */
+ U32 PartitionNumber; /* Filled in by Diag Switch as pool blocks are
+ * filled */
+ U16 VirtualProcessorNumber;
+ U16 LogicalProcessorNumber;
+ U8 ComponentType; /* ULTRA_COMPONENT_TYPES */
+ U8 Subsystem;
+ U16 Reserved0; /* pad to U64 alignment */
+ U32 BlockNumber; /* filled in by DiagSwitch as pool blocks are
+ * filled */
+ U32 BlockNumberHigh;
+ U32 EventNumber; /* filled in by DiagSwitch as pool blocks are
+ * filled */
+ U32 EventNumberHigh;
+
+ /* The BlockNumber and EventNumber fields are set only by DiagSwitch
+ * and referenced only by WinDiagDisplay formatting tool as
+ * additional diagnostic information. Other tools including
+ * WinDiagDisplay currently ignore these 'Reserved' bytes. */
+ U8 Reserved[8];
+ U8 AdditionalInfo[MAX_ADDITIONAL_INFO_SIZE];
+
+ /* NOTE: Changesto DIAG_CHANNEL_EVENT generally need to be reflected in
+ * existing copies *
+ * - for AppOS at
+ * GuestLinux/visordiag_early/supervisor_diagchannel.h *
+ * - for WinDiagDisplay at
+ * EFI/Ultra/Tools/WinDiagDisplay/WinDiagDisplay/diagstruct.h */
+} DIAG_CHANNEL_EVENT;
+
+/* Levels of severity for diagnostic events, in order from lowest severity to
+* highest (i.e. fatal errors are the most severe, and should always be logged,
+* but info events rarely need to be logged except during debugging). The values
+* DIAG_SEVERITY_ENUM_BEGIN and DIAG_SEVERITY_ENUM_END are not valid severity
+* values. They exist merely to dilineate the list, so that future additions
+* won't require changes to the driver (i.e. when checking for out-of-range
+* severities in SetSeverity). The values DIAG_SEVERITY_OVERRIDE and
+* DIAG_SEVERITY_SHUTOFF are not valid severity values for logging events but
+* they are valid for controlling the amount of event data. This enum is also
+* defined in DotNet\sParFramework\ControlFramework\ControlFramework.cs. If a
+* change is made to this enum, they should also be reflected in that file. */
+typedef enum { DIAG_SEVERITY_ENUM_BEGIN = 0,
+ DIAG_SEVERITY_OVERRIDE = DIAG_SEVERITY_ENUM_BEGIN,
+ DIAG_SEVERITY_VERBOSE = DIAG_SEVERITY_OVERRIDE, /* 0 */
+ DIAG_SEVERITY_INFO = DIAG_SEVERITY_VERBOSE + 1, /* 1 */
+ DIAG_SEVERITY_WARNING = DIAG_SEVERITY_INFO + 1, /* 2 */
+ DIAG_SEVERITY_ERR = DIAG_SEVERITY_WARNING + 1, /* 3 */
+ DIAG_SEVERITY_PRINT = DIAG_SEVERITY_ERR + 1, /* 4 */
+ DIAG_SEVERITY_SHUTOFF = DIAG_SEVERITY_PRINT + 1, /* 5 */
+ DIAG_SEVERITY_ENUM_END = DIAG_SEVERITY_SHUTOFF, /* 5 */
+ DIAG_SEVERITY_NONFATAL_ERR = DIAG_SEVERITY_ERR,
+ DIAG_SEVERITY_FATAL_ERR = DIAG_SEVERITY_PRINT
+} DIAG_SEVERITY;
+
+/* Event Cause enums
+*
+* Levels of cause for diagnostic events, in order from least to greatest cause
+* Internal errors are most urgent since ideally they should never exist
+* Invalid requests are preventable by avoiding invalid inputs
+* Operations errors depend on environmental factors which may impact which
+* requests are possible
+* Manifest provides intermediate value to capture firmware and configuration
+* version information
+* Trace provides suplimental debug information in release firmware
+* Unknown Log captures unclasified LogEvent calls.
+* Debug is the least urgent since it provides suplimental debug information only
+* in debug firmware
+* Unknown Debug captures unclassified DebugEvent calls.
+* This enum is also defined in
+* DotNet\sParFramework\ControlFramework\ControlFramework.cs.
+* If a change is made to this enum, they should also be reflected in that
+* file. */
+
+
+
+/* A cause value "DIAG_CAUSE_FILE_XFER" together with a severity value of
+* "DIAG_SEVERITY_PRINT" (=4), is used for transferring text or binary file to
+* the Diag partition. This cause-severity combination will be used by Logger
+* DiagSwitch to segregate events into block types. The files are transferred in
+* 256 byte chunks maximum, in the AdditionalInfo field of the DIAG_CHANNEL_EVENT
+* structure. In the file transfer mode, some event fields will have different
+* meaning: EventId specifies the file offset, severity specifies the block type,
+* ModuleName specifies the filename, LineNumber specifies the number of valid
+* data bytes in an event and AdditionalInfo contains up to 256 bytes of data. */
+
+/* The Diag DiagWriter appends event blocks to events.raw as today, and for data
+ * blocks uses DIAG_CHANNEL_EVENT
+ * PartitionNumber to extract and append 'AdditionalInfo' to filename (specified
+ * by ModuleName). */
+
+/* The Dell PDiag uses this new mechanism to stash DSET .zip onto the
+ * 'diagnostic' virtual disk. */
+typedef enum {
+ DIAG_CAUSE_UNKNOWN = 0,
+ DIAG_CAUSE_UNKNOWN_DEBUG = DIAG_CAUSE_UNKNOWN + 1, /* 1 */
+ DIAG_CAUSE_DEBUG = DIAG_CAUSE_UNKNOWN_DEBUG + 1, /* 2 */
+ DIAG_CAUSE_UNKNOWN_LOG = DIAG_CAUSE_DEBUG + 1, /* 3 */
+ DIAG_CAUSE_TRACE = DIAG_CAUSE_UNKNOWN_LOG + 1, /* 4 */
+ DIAG_CAUSE_MANIFEST = DIAG_CAUSE_TRACE + 1, /* 5 */
+ DIAG_CAUSE_OPERATIONS_ERROR = DIAG_CAUSE_MANIFEST + 1, /* 6 */
+ DIAG_CAUSE_INVALID_REQUEST = DIAG_CAUSE_OPERATIONS_ERROR + 1, /* 7 */
+ DIAG_CAUSE_INTERNAL_ERROR = DIAG_CAUSE_INVALID_REQUEST + 1, /* 8 */
+ DIAG_CAUSE_FILE_XFER = DIAG_CAUSE_INTERNAL_ERROR + 1, /* 9 */
+ DIAG_CAUSE_ENUM_END = DIAG_CAUSE_FILE_XFER /* 9 */
+} DIAG_CAUSE;
+
+/* Event Cause category defined into the byte 2 of Severity */
+#define CAUSE_DEBUG (DIAG_CAUSE_DEBUG << CAUSE_SHIFT_AMT)
+#define CAUSE_TRACE (DIAG_CAUSE_TRACE << CAUSE_SHIFT_AMT)
+#define CAUSE_MANIFEST (DIAG_CAUSE_MANIFEST << CAUSE_SHIFT_AMT)
+#define CAUSE_OPERATIONS_ERROR (DIAG_CAUSE_OPERATIONS_ERROR << CAUSE_SHIFT_AMT)
+#define CAUSE_INVALID_REQUEST (DIAG_CAUSE_INVALID_REQUEST << CAUSE_SHIFT_AMT)
+#define CAUSE_INTERNAL_ERROR (DIAG_CAUSE_INTERNAL_ERROR << CAUSE_SHIFT_AMT)
+#define CAUSE_FILE_XFER (DIAG_CAUSE_FILE_XFER << CAUSE_SHIFT_AMT)
+#define CAUSE_ENUM_END CAUSE_FILE_XFER
+
+/* Combine Cause and Severity categories into one */
+#define CAUSE_DEBUG_SEVERITY_VERBOSE \
+ (CAUSE_DEBUG | DIAG_SEVERITY_VERBOSE)
+#define CAUSE_TRACE_SEVERITY_VERBOSE \
+ (CAUSE_TRACE | DIAG_SEVERITY_VERBOSE)
+#define CAUSE_MANIFEST_SEVERITY_VERBOSE\
+ (CAUSE_MANIFEST | DIAG_SEVERITY_VERBOSE)
+#define CAUSE_OPERATIONS_SEVERITY_VERBOSE \
+ (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_VERBOSE)
+#define CAUSE_INVALID_SEVERITY_VERBOSE \
+ (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_VERBOSE)
+#define CAUSE_INTERNAL_SEVERITY_VERBOSE \
+ (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_VERBOSE)
+
+#define CAUSE_DEBUG_SEVERITY_INFO \
+ (CAUSE_DEBUG | DIAG_SEVERITY_INFO)
+#define CAUSE_TRACE_SEVERITY_INFO \
+ (CAUSE_TRACE | DIAG_SEVERITY_INFO)
+#define CAUSE_MANIFEST_SEVERITY_INFO \
+ (CAUSE_MANIFEST | DIAG_SEVERITY_INFO)
+#define CAUSE_OPERATIONS_SEVERITY_INFO \
+ (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_INFO)
+#define CAUSE_INVALID_SEVERITY_INFO \
+ (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_INFO)
+#define CAUSE_INTERNAL_SEVERITY_INFO \
+ (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_INFO)
+
+#define CAUSE_DEBUG_SEVERITY_WARN \
+ (CAUSE_DEBUG | DIAG_SEVERITY_WARNING)
+#define CAUSE_TRACE_SEVERITY_WARN \
+ (CAUSE_TRACE | DIAG_SEVERITY_WARNING)
+#define CAUSE_MANIFEST_SEVERITY_WARN \
+ (CAUSE_MANIFEST | DIAG_SEVERITY_WARNING)
+#define CAUSE_OPERATIONS_SEVERITY_WARN \
+ (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_WARNING)
+#define CAUSE_INVALID_SEVERITY_WARN \
+ (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_WARNING)
+#define CAUSE_INTERNAL_SEVERITY_WARN \
+ (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_WARNING)
+
+#define CAUSE_DEBUG_SEVERITY_ERR \
+ (CAUSE_DEBUG | DIAG_SEVERITY_ERR)
+#define CAUSE_TRACE_SEVERITY_ERR \
+ (CAUSE_TRACE | DIAG_SEVERITY_ERR)
+#define CAUSE_MANIFEST_SEVERITY_ERR \
+ (CAUSE_MANIFEST | DIAG_SEVERITY_ERR)
+#define CAUSE_OPERATIONS_SEVERITY_ERR \
+ (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_ERR)
+#define CAUSE_INVALID_SEVERITY_ERR \
+ (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_ERR)
+#define CAUSE_INTERNAL_SEVERITY_ERR \
+ (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_ERR)
+
+#define CAUSE_DEBUG_SEVERITY_PRINT \
+ (CAUSE_DEBUG | DIAG_SEVERITY_PRINT)
+#define CAUSE_TRACE_SEVERITY_PRINT \
+ (CAUSE_TRACE | DIAG_SEVERITY_PRINT)
+#define CAUSE_MANIFEST_SEVERITY_PRINT \
+ (CAUSE_MANIFEST | DIAG_SEVERITY_PRINT)
+#define CAUSE_OPERATIONS_SEVERITY_PRINT \
+ (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_PRINT)
+#define CAUSE_INVALID_SEVERITY_PRINT \
+ (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_PRINT)
+#define CAUSE_INTERNAL_SEVERITY_PRINT \
+ (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_PRINT)
+#define CAUSE_FILE_XFER_SEVERITY_PRINT \
+ (CAUSE_FILE_XFER | DIAG_SEVERITY_PRINT)
+
+/* Structure: DIAG_CHANNEL_PROTOCOL_HEADER
+ *
+ * Purpose: Contains attributes that make up the header specific to the
+ * DIAG_CHANNEL area.
+ *
+ * Attributes:
+ *
+ * DiagLock: Diag Channel spinlock.
+ *
+ *IsChannelInitialized: 1 iff SignalInit was called for this channel; otherwise
+ * 0, and assume the channel is not ready for use yet.
+ *
+ * Reserved: Padding to allign the fields in this structure.
+ *
+ *SubsystemSeverityFilter: Level of severity on a subsystem basis that controls
+ * whether events are logged. Any event's severity for a
+ * particular subsystem below this level will be discarded.
+ */
+typedef struct _DIAG_CHANNEL_PROTOCOL_HEADER {
+ volatile U32 DiagLock;
+ U8 IsChannelInitialized;
+ U8 Reserved[3];
+ U8 SubsystemSeverityFilter[64];
+} DIAG_CHANNEL_PROTOCOL_HEADER;
+
+/* The Diagram for the Diagnostic Channel: */
+/* ----------------------- */
+/* | Channel Header | Defined by ULTRA_CHANNEL_PROTOCOL */
+/* ----------------------- */
+/* | Signal Queue Header | Defined by SIGNAL_QUEUE_HEADER */
+/* ----------------------- */
+/* | DiagChannel Header | Defined by DIAG_CHANNEL_PROTOCOL_HEADER */
+/* ----------------------- */
+/* | Channel Event Info | Defined by (DIAG_CHANNEL_EVENT * MAX_EVENTS) */
+/* ----------------------- */
+/* | Reserved | Reserved (pad out to 4MB) */
+/* ----------------------- */
+
+/* Offsets/sizes for diagnostic channel attributes... */
+#define DIAG_CH_QUEUE_HEADER_OFFSET (sizeof(ULTRA_CHANNEL_PROTOCOL))
+#define DIAG_CH_QUEUE_HEADER_SIZE (sizeof(SIGNAL_QUEUE_HEADER))
+#define DIAG_CH_PROTOCOL_HEADER_OFFSET \
+ (DIAG_CH_QUEUE_HEADER_OFFSET + DIAG_CH_QUEUE_HEADER_SIZE)
+#define DIAG_CH_PROTOCOL_HEADER_SIZE (sizeof(DIAG_CHANNEL_PROTOCOL_HEADER))
+#define DIAG_CH_EVENT_OFFSET \
+ (DIAG_CH_PROTOCOL_HEADER_OFFSET + DIAG_CH_PROTOCOL_HEADER_SIZE)
+#define DIAG_CH_SIZE (4096 * 1024)
+
+/* For Control and Idle Partitions with larger (8 MB) diagnostic(root)
+ * channels */
+#define DIAG_CH_LRG_SIZE (2 * DIAG_CH_SIZE) /* 8 MB */
+
+/*
+ * Structure: ULTRA_DIAG_CHANNEL_PROTOCOL
+ *
+ * Purpose: Contains attributes that make up the DIAG_CHANNEL memory.
+ *
+ * Attributes:
+ *
+ * CommonChannelHeader: Header info common to all channels.
+ *
+ * QueueHeader: Queue header common to all channels - used to determine where to
+ * store event.
+ *
+ * DiagChannelHeader: Diagnostic channel header info (see
+ * DIAG_CHANNEL_PROTOCOL_HEADER comments).
+ *
+ * Events: Area where diagnostic events (up to MAX_EVENTS) are written.
+ *
+ *Reserved: Reserved area to allow for correct channel size padding.
+*/
+typedef struct _ULTRA_DIAG_CHANNEL_PROTOCOL {
+ ULTRA_CHANNEL_PROTOCOL CommonChannelHeader;
+ SIGNAL_QUEUE_HEADER QueueHeader;
+ DIAG_CHANNEL_PROTOCOL_HEADER DiagChannelHeader;
+ DIAG_CHANNEL_EVENT Events[(DIAG_CH_SIZE - DIAG_CH_EVENT_OFFSET) /
+ sizeof(DIAG_CHANNEL_EVENT)];
+}
+ULTRA_DIAG_CHANNEL_PROTOCOL;
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/channels/iochannel.h b/drivers/staging/unisys/common-spar/include/channels/iochannel.h
new file mode 100644
index 000000000000..8de1d249d55f
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/iochannel.h
@@ -0,0 +1,933 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION */
+/* All rights reserved. */
+#ifndef __IOCHANNEL_H__
+#define __IOCHANNEL_H__
+
+/*
+* Everything needed for IOPart-GuestPart communication is define in
+* this file. Note: Everything is OS-independent because this file is
+* used by Windows, Linux and possible EFI drivers. */
+
+
+/*
+* Communication flow between the IOPart and GuestPart uses the channel headers
+* channel state. The following states are currently being used:
+* UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED
+*
+* additional states will be used later. No locking is needed to switch between
+* states due to the following rules:
+*
+* 1. IOPart is only the only partition allowed to change from UNIT
+* 2. IOPart is only the only partition allowed to change from
+* CHANNEL_ATTACHING
+* 3. GuestPart is only the only partition allowed to change from
+* CHANNEL_ATTACHED
+*
+* The state changes are the following: IOPart sees the channel is in UNINIT,
+* UNINIT -> CHANNEL_ATTACHING (performed only by IOPart)
+* CHANNEL_ATTACHING -> CHANNEL_ATTACHED (performed only by IOPart)
+* CHANNEL_ATTACHED -> CHANNEL_OPENED (performed only by GuestPart)
+*/
+
+#include "commontypes.h"
+#include "vmcallinterface.h"
+
+#define _ULTRA_CONTROLVM_CHANNEL_INLINE_
+#include <linux/dma-direction.h>
+#include "controlvmchannel.h"
+#include "vbuschannel.h"
+#undef _ULTRA_CONTROLVM_CHANNEL_INLINE_
+#include "channel.h"
+
+/*
+ * CHANNEL Guids
+ */
+
+#include "channel_guid.h"
+
+#define ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+#define ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE \
+ ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+
+/* Must increment these whenever you insert or delete fields within this channel
+* struct. Also increment whenever you change the meaning of fields within this
+* channel struct so as to break pre-existing software. Note that you can
+* usually add fields to the END of the channel struct withOUT needing to
+* increment this. */
+#define ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID 2
+#define ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID 2
+#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_VERSIONID 1
+
+#define ULTRA_VHBA_CHANNEL_OK_CLIENT(pChannel, logCtx) \
+ (ULTRA_check_channel_client(pChannel, UltraVhbaChannelProtocolGuid, \
+ "vhba", MIN_IO_CHANNEL_SIZE, \
+ ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_VHBA_CHANNEL_OK_SERVER(actualBytes, logCtx) \
+ (ULTRA_check_channel_server(UltraVhbaChannelProtocolGuid, \
+ "vhba", MIN_IO_CHANNEL_SIZE, actualBytes, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_VNIC_CHANNEL_OK_CLIENT(pChannel, logCtx) \
+ (ULTRA_check_channel_client(pChannel, UltraVnicChannelProtocolGuid, \
+ "vnic", MIN_IO_CHANNEL_SIZE, \
+ ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_VNIC_CHANNEL_OK_SERVER(actualBytes, logCtx) \
+ (ULTRA_check_channel_server(UltraVnicChannelProtocolGuid, \
+ "vnic", MIN_IO_CHANNEL_SIZE, actualBytes, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_VSWITCH_CHANNEL_OK_CLIENT(pChannel, logCtx) \
+ (ULTRA_check_channel_client(pChannel, UltraVswitchChannelProtocolGuid, \
+ "vswitch", MIN_IO_CHANNEL_SIZE, \
+ ULTRA_VSWITCH_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_VSWITCH_CHANNEL_OK_SERVER(actualBytes, logCtx) \
+ (ULTRA_check_channel_server(UltraVswitchChannelProtocolGuid, \
+ "vswitch", MIN_IO_CHANNEL_SIZE, \
+ actualBytes, \
+ __FILE__, __LINE__, logCtx))
+/*
+* Everything necessary to handle SCSI & NIC traffic between Guest Partition and
+* IO Partition is defined below. */
+
+
+/*
+* Defines and enums.
+*/
+
+#define MINNUM(a, b) (((a) < (b)) ? (a) : (b))
+#define MAXNUM(a, b) (((a) > (b)) ? (a) : (b))
+
+/* these define the two queues per data channel between iopart and
+ * ioguestparts */
+#define IOCHAN_TO_IOPART 0 /* used by ioguestpart to 'insert' signals to
+ * iopart */
+#define IOCHAN_FROM_GUESTPART 0 /* used by iopart to 'remove' signals from
+ * ioguestpart - same queue as previous queue */
+
+#define IOCHAN_TO_GUESTPART 1 /* used by iopart to 'insert' signals to
+ * ioguestpart */
+#define IOCHAN_FROM_IOPART 1 /* used by ioguestpart to 'remove' signals from
+ * iopart - same queue as previous queue */
+
+/* these define the two queues per control channel between controlpart and "its"
+ * guests, which includes the iopart */
+#define CTRLCHAN_TO_CTRLGUESTPART 0 /* used by ctrlguestpart to 'insert' signals
+ * to ctrlpart */
+#define CTLRCHAN_FROM_CTRLPART 0 /* used by ctrlpart to 'remove' signals from
+ * ctrlquestpart - same queue as previous
+ * queue */
+
+#define CTRLCHAN_TO_CTRLPART 1 /* used by ctrlpart to 'insert' signals to
+ * ctrlguestpart */
+#define CTRLCHAN_FROM_CTRLGUESTPART 1 /* used by ctrguestpart to 'remove'
+ * signals from ctrlpart - same queue as
+ * previous queue */
+
+/* these define the Event & Ack queues per control channel Events are generated
+* by CTRLGUESTPART and sent to CTRLPART; Acks are generated by CTRLPART and sent
+* to CTRLGUESTPART. */
+#define CTRLCHAN_EVENT_TO_CTRLPART 2 /* used by ctrlguestpart to 'insert' Events
+ * to ctrlpart */
+#define CTRLCHAN_EVENT_FROM_CTRLGUESTPART 2 /* used by ctrlpart to 'remove'
+ * Events from ctrlguestpart */
+
+#define CTRLCHAN_ACK_TO_CTRLGUESTPART 3 /* used by ctrlpart to 'insert' Acks to
+ * ctrlguestpart */
+#define CTRLCHAN_ACK_FROM_CTRLPART 3 /* used by ctrlguestpart to 'remove' Events
+ * from ctrlpart */
+
+/* size of cdb - i.e., scsi cmnd */
+#define MAX_CMND_SIZE 16
+
+#define MAX_SENSE_SIZE 64
+
+#define MAX_PHYS_INFO 64
+
+/* Because GuestToGuestCopy is limited to 4KiB segments, and we have limited the
+* Emulex Driver to 256 scatter list segments via the lpfc_sg_seg_cnt parameter
+* to 256, the maximum I/O size is limited to 256 * 4 KiB = 1 MB */
+#define MAX_IO_SIZE (1024*1024) /* 1 MB */
+
+/* NOTE 1: lpfc defines its support for segments in
+* #define LPFC_SG_SEG_CNT 64
+*
+* NOTE 2: In Linux, frags array in skb is currently allocated to be
+* MAX_SKB_FRAGS size, which is 18 which is smaller than MAX_PHYS_INFO for
+* now. */
+
+#ifndef MAX_SERIAL_NUM
+#define MAX_SERIAL_NUM 32
+#endif /* MAX_SERIAL_NUM */
+
+#define MAX_SCSI_BUSES 1
+#define MAX_SCSI_TARGETS 8
+#define MAX_SCSI_LUNS 16
+#define MAX_SCSI_FROM_HOST 0xFFFFFFFF /* Indicator to use Physical HBA
+ * SCSI Host value */
+
+/* various types of network packets that can be sent in cmdrsp */
+typedef enum { NET_RCV_POST = 0, /* submit buffer to hold receiving
+ * incoming packet */
+ /* virtnic -> uisnic */
+ NET_RCV, /* incoming packet received */
+ /* uisnic -> virtpci */
+ NET_XMIT, /* for outgoing net packets */
+ /* virtnic -> uisnic */
+ NET_XMIT_DONE, /* outgoing packet xmitted */
+ /* uisnic -> virtpci */
+ NET_RCV_ENBDIS, /* enable/disable packet reception */
+ /* virtnic -> uisnic */
+ NET_RCV_ENBDIS_ACK, /* acknowledge enable/disable packet
+ * reception */
+ /* uisnic -> virtnic */
+ NET_RCV_PROMISC, /* enable/disable promiscuous mode */
+ /* virtnic -> uisnic */
+ NET_CONNECT_STATUS, /* indicate the loss or restoration of a network
+ * connection */
+ /* uisnic -> virtnic */
+ NET_MACADDR, /* indicates the client has requested to update
+ * its MAC addr */
+ NET_MACADDR_ACK, /* Mac addres */
+
+} NET_TYPES;
+
+#define ETH_HEADER_SIZE 14 /* size of ethernet header */
+
+#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */
+#define ETH_MIN_PACKET_SIZE (ETH_HEADER_SIZE + ETH_MIN_DATA_SIZE)
+
+#define ETH_DEF_DATA_SIZE 1500 /* default data size */
+#define ETH_DEF_PACKET_SIZE (ETH_HEADER_SIZE + ETH_DEF_DATA_SIZE)
+
+#define ETH_MAX_MTU 16384 /* maximum data size */
+
+#ifndef MAX_MACADDR_LEN
+#define MAX_MACADDR_LEN 6 /* number of bytes in MAC address */
+#endif /* MAX_MACADDR_LEN */
+
+#define ETH_IS_LOCALLY_ADMINISTERED(Address) \
+ (((U8 *) (Address))[0] & ((U8) 0x02))
+#define NIC_VENDOR_ID 0x0008000B
+
+/* various types of scsi task mgmt commands */
+typedef enum { TASK_MGMT_ABORT_TASK =
+ 1, TASK_MGMT_BUS_RESET, TASK_MGMT_LUN_RESET,
+ TASK_MGMT_TARGET_RESET,
+} TASK_MGMT_TYPES;
+
+/* various types of vdisk mgmt commands */
+typedef enum { VDISK_MGMT_ACQUIRE = 1, VDISK_MGMT_RELEASE,
+} VDISK_MGMT_TYPES;
+
+/* this is used in the vdest field */
+#define VDEST_ALL 0xFFFF
+
+#define MIN_NUMSIGNALS 64
+#define MAX_NUMSIGNALS 4096
+
+/* MAX_NET_RCV_BUF specifies the number of rcv buffers that are created by each
+* guest's virtnic and posted to uisnic. Uisnic, for each channel, keeps the rcv
+* buffers posted and uses them to receive data on behalf of the guest's virtnic.
+* NOTE: the num_rcv_bufs is configurable for each VNIC. So the following is
+* simply an upperlimit on what each VNIC can provide. Setting it to half of the
+* NUMSIGNALS to prevent queue full deadlocks */
+#define MAX_NET_RCV_BUFS (MIN_NUMSIGNALS / 2)
+
+/*
+ * structs with pragma pack */
+
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+
+#pragma pack(push, 1)
+
+struct guest_phys_info {
+ U64 address;
+ U64 length;
+};
+
+#define GPI_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct guest_phys_info))
+
+struct uisscsi_dest {
+ U32 channel; /* channel == bus number */
+ U32 id; /* id == target number */
+ U32 lun; /* lun == logical unit number */
+};
+
+struct vhba_wwnn {
+ U32 wwnn1;
+ U32 wwnn2;
+};
+
+/* WARNING: Values stired in this structure must contain maximum counts (not
+ * maximum values). */
+struct vhba_config_max { /* 20 bytes */
+ U32 max_channel; /* maximum channel for devices attached to this
+ * bus */
+ U32 max_id; /* maximum SCSI ID for devices attached to this
+ * bus */
+ U32 max_lun; /* maximum SCSI LUN for devices attached to this
+ * bus */
+ U32 cmd_per_lun; /* maximum number of outstanding commands per
+ * lun that are allowed at one time */
+ U32 max_io_size; /* maximum io size for devices attached to this
+ * bus */
+ /* max io size is often determined by the resource of the hba. e.g */
+ /* max scatter gather list length * page size / sector size */
+};
+
+struct uiscmdrsp_scsi {
+ void *scsicmd; /* the handle to the cmd that was received -
+ * send it back as is in the rsp packet. */
+ U8 cmnd[MAX_CMND_SIZE]; /* the cdb for the command */
+ U32 bufflen; /* length of data to be transferred out or in */
+ U16 guest_phys_entries; /* Number of entries in scatter-gather (sg)
+ * list */
+ struct guest_phys_info gpi_list[MAX_PHYS_INFO]; /* physical address
+ * information for each
+ * fragment */
+ enum dma_data_direction data_dir; /* direction of the data, if any */
+ struct uisscsi_dest vdest; /* identifies the virtual hba, id,
+ * channel, lun to which cmd was sent */
+
+ /* the following fields are needed to queue the rsp back to cmd
+ * originator */
+ int linuxstat; /* the original Linux status - for use by linux
+ * vdisk code */
+ U8 scsistat; /* the scsi status */
+ U8 addlstat; /* non-scsi status - covers cases like timeout
+ * needed by windows guests */
+#define ADDL_RESET 1
+#define ADDL_TIMEOUT 2
+#define ADDL_INTERNAL_ERROR 3
+#define ADDL_SEL_TIMEOUT 4
+#define ADDL_CMD_TIMEOUT 5
+#define ADDL_BAD_TARGET 6
+#define ADDL_RETRY 7
+
+ /* the following fields are need to determine the result of command */
+ U8 sensebuf[MAX_SENSE_SIZE]; /* sense info in case cmd failed; */
+ /* it holds the sense_data struct; */
+ /* see that struct for details. */
+ void *vdisk; /* contains pointer to the vdisk so that we can clean up
+ * when the IO completes. */
+ int no_disk_result; /* used to return no disk inquiry result */
+ /* when no_disk_result is set to 1, */
+ /* scsi.scsistat is SAM_STAT_GOOD */
+ /* scsi.addlstat is 0 */
+ /* scsi.linuxstat is SAM_STAT_GOOD */
+ /* That is, there is NO error. */
+};
+
+/*
+* Defines to support sending correct inquiry result when no disk is
+* configured. */
+
+/* From SCSI SPC2 -
+ *
+ * If the target is not capable of supporting a device on this logical unit, the
+ * device server shall set this field to 7Fh (PERIPHERAL QUALIFIER set to 011b
+ * and PERIPHERAL DEVICE TYPE set to 1Fh).
+ *
+ *The device server is capable of supporting the specified peripheral device
+ *type on this logical unit. However, the physical device is not currently
+ *connected to this logical unit.
+ */
+
+#define DEV_NOT_PRESENT 0x7f /* old name - compatibility */
+#define DEV_NOT_CAPABLE 0x7f /* peripheral qualifier of 0x3 */
+ /* peripheral type of 0x1f */
+ /* specifies no device but target present */
+
+#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1 */
+ /* peripheral type of 0 - disk */
+ /* specifies device capable, but not present */
+
+#define DEV_PROC_CAPABLE_NOT_PRESENT 0x23 /* peripheral qualifier of 0x1 */
+ /* peripheral type of 3 - processor */
+ /* specifies device capable, but not present */
+
+#define DEV_HISUPPORT 0x10; /* HiSup = 1; shows support for report luns */
+ /* must be returned for lun 0. */
+
+/* NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
+* in buf[4] some linux code accesses bytes beyond 5 to retrieve vendor, product
+* & revision. Yikes! So let us always send back 36 bytes, the minimum for
+* inquiry result. */
+#define NO_DISK_INQUIRY_RESULT_LEN 36
+
+#define MIN_INQUIRY_RESULT_LEN 5 /* we need at least 5 bytes minimum for inquiry
+ * result */
+
+/* SCSI device version for no disk inquiry result */
+#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */
+
+/* Windows and Linux want different things for a non-existent lun. So, we'll let
+ * caller pass in the peripheral qualifier and type.
+ * NOTE:[4] SCSI returns (n-4); so we return length-1-4 or length-5. */
+
+#define SET_NO_DISK_INQUIRY_RESULT(buf, len, lun, lun0notpresent, notpresent) \
+ do { \
+ MEMSET(buf, 0, \
+ MINNUM(len, \
+ (unsigned int) NO_DISK_INQUIRY_RESULT_LEN)); \
+ buf[2] = (U8) SCSI_SPC2_VER; \
+ if (lun == 0) { \
+ buf[0] = (U8) lun0notpresent; \
+ buf[3] = (U8) DEV_HISUPPORT; \
+ } else \
+ buf[0] = (U8) notpresent; \
+ buf[4] = (U8) ( \
+ MINNUM(len, \
+ (unsigned int) NO_DISK_INQUIRY_RESULT_LEN) - 5); \
+ if (len >= NO_DISK_INQUIRY_RESULT_LEN) { \
+ buf[8] = 'D'; \
+ buf[9] = 'E'; \
+ buf[10] = 'L'; \
+ buf[11] = 'L'; \
+ buf[16] = 'P'; \
+ buf[17] = 'S'; \
+ buf[18] = 'E'; \
+ buf[19] = 'U'; \
+ buf[20] = 'D'; \
+ buf[21] = 'O'; \
+ buf[22] = ' '; \
+ buf[23] = 'D'; \
+ buf[24] = 'E'; \
+ buf[25] = 'V'; \
+ buf[26] = 'I'; \
+ buf[27] = 'C'; \
+ buf[28] = 'E'; \
+ buf[30] = ' '; \
+ buf[31] = '.'; \
+ } \
+ } while (0)
+
+
+/*
+* Struct & Defines to support sense information.
+*/
+
+
+/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
+* initialized in exactly the manner that is recommended in Windows (hence the
+* odd values).
+* When set, these fields will have the following values:
+* ErrorCode = 0x70 indicates current error
+* Valid = 1 indicates sense info is valid
+* SenseKey contains sense key as defined by SCSI specs.
+* AdditionalSenseCode contains sense key as defined by SCSI specs.
+* AdditionalSenseCodeQualifier contains qualifier to sense code as defined by
+* scsi docs.
+* AdditionalSenseLength contains will be sizeof(sense_data)-8=10.
+*/
+struct sense_data {
+ U8 ErrorCode:7;
+ U8 Valid:1;
+ U8 SegmentNumber;
+ U8 SenseKey:4;
+ U8 Reserved:1;
+ U8 IncorrectLength:1;
+ U8 EndOfMedia:1;
+ U8 FileMark:1;
+ U8 Information[4];
+ U8 AdditionalSenseLength;
+ U8 CommandSpecificInformation[4];
+ U8 AdditionalSenseCode;
+ U8 AdditionalSenseCodeQualifier;
+ U8 FieldReplaceableUnitCode;
+ U8 SenseKeySpecific[3];
+};
+
+/* some SCSI ADSENSE codes */
+#ifndef SCSI_ADSENSE_LUN_NOT_READY
+#define SCSI_ADSENSE_LUN_NOT_READY 0x04
+#endif /* */
+#ifndef SCSI_ADSENSE_ILLEGAL_COMMAND
+#define SCSI_ADSENSE_ILLEGAL_COMMAND 0x20
+#endif /* */
+#ifndef SCSI_ADSENSE_ILLEGAL_BLOCK
+#endif /* */
+#ifndef SCSI_ADSENSE_ILLEGAL_BLOCK
+#define SCSI_ADSENSE_ILLEGAL_BLOCK 0x21
+#endif /* */
+#ifndef SCSI_ADSENSE_INVALID_CDB
+#define SCSI_ADSENSE_INVALID_CDB 0x24
+#endif /* */
+#ifndef SCSI_ADSENSE_INVALID_LUN
+#define SCSI_ADSENSE_INVALID_LUN 0x25
+#endif /* */
+#ifndef SCSI_ADWRITE_PROTECT
+#define SCSI_ADWRITE_PROTECT 0x27
+#endif /* */
+#ifndef SCSI_ADSENSE_MEDIUM_CHANGED
+#define SCSI_ADSENSE_MEDIUM_CHANGED 0x28
+#endif /* */
+#ifndef SCSI_ADSENSE_BUS_RESET
+#define SCSI_ADSENSE_BUS_RESET 0x29
+#endif /* */
+#ifndef SCSI_ADSENSE_NO_MEDIA_IN_DEVICE
+#define SCSI_ADSENSE_NO_MEDIA_IN_DEVICE 0x3a
+#endif /* */
+
+struct net_pkt_xmt {
+ int len; /* full length of data in the packet */
+ int num_frags; /* number of fragments in frags containing data */
+ struct phys_info frags[MAX_PHYS_INFO]; /* physical page information for
+ * each fragment */
+ char ethhdr[ETH_HEADER_SIZE]; /* the ethernet header */
+ struct {
+
+ /* these are needed for csum at uisnic end */
+ U8 valid; /* 1 = rest of this struct is valid - else
+ * ignore */
+ U8 hrawoffv; /* 1 = hwrafoff is valid */
+ U8 nhrawoffv; /* 1 = nhwrafoff is valid */
+ U16 protocol; /* specifies packet protocol */
+ U32 csum; /* value used to set skb->csum at IOPart */
+ U32 hrawoff; /* value used to set skb->h.raw at IOPart */
+ /* hrawoff points to the start of the TRANSPORT LAYER HEADER */
+ U32 nhrawoff; /* value used to set skb->nh.raw at IOPart */
+ /* nhrawoff points to the start of the NETWORK LAYER HEADER */
+ } lincsum;
+
+ /* **** NOTE ****
+ * The full packet is described in frags but the ethernet header is
+ * separately kept in ethhdr so that uisnic doesn't have "MAP" the
+ * guest memory to get to the header. uisnic needs ethhdr to
+ * determine how to route the packet.
+ */
+};
+
+struct net_pkt_xmtdone {
+ U32 xmt_done_result; /* result of NET_XMIT */
+#define XMIT_SUCCESS 0
+#define XMIT_FAILED 1
+};
+
+/* RCVPOST_BUF_SIZe must be at most page_size(4096) - cache_line_size (64) The
+* reason is because dev_skb_alloc which is used to generate RCV_POST skbs in
+* virtnic requires that there is "overhead" in the buffer, and pads 16 bytes. I
+* prefer to use 1 full cache line size for "overhead" so that transfers are
+* better. IOVM requires that a buffer be represented by 1 phys_info structure
+* which can only cover page_size. */
+#define RCVPOST_BUF_SIZE 4032
+#define MAX_NET_RCV_CHAIN \
+ ((ETH_MAX_MTU+ETH_HEADER_SIZE + RCVPOST_BUF_SIZE-1) / RCVPOST_BUF_SIZE)
+
+struct net_pkt_rcvpost {
+ /* rcv buf size must be large enough to include ethernet data len +
+ * ethernet header len - we are choosing 2K because it is guaranteed
+ * to be describable */
+ struct phys_info frag; /* physical page information for the
+ * single fragment 2K rcv buf */
+ U64 UniqueNum; /* This is used to make sure that
+ * receive posts are returned to */
+ /* the Adapter which sent them origonally. */
+};
+
+struct net_pkt_rcv {
+
+ /* the number of receive buffers that can be chained */
+ /* is based on max mtu and size of each rcv buf */
+ U32 rcv_done_len; /* length of received data */
+ U8 numrcvbufs; /* number of receive buffers that contain the */
+ /* incoming data; guest end MUST chain these together. */
+ void *rcvbuf[MAX_NET_RCV_CHAIN]; /* the list of receive buffers
+ * that must be chained; */
+ /* each entry is a receive buffer provided by NET_RCV_POST. */
+ /* NOTE: first rcvbuf in the chain will also be provided in net.buf. */
+ U64 UniqueNum;
+ U32 RcvsDroppedDelta;
+};
+
+struct net_pkt_enbdis {
+ void *context;
+ U16 enable; /* 1 = enable, 0 = disable */
+};
+
+struct net_pkt_macaddr {
+ void *context;
+ U8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
+};
+
+/* cmd rsp packet used for VNIC network traffic */
+struct uiscmdrsp_net {
+ NET_TYPES type;
+ void *buf;
+ union {
+ struct net_pkt_xmt xmt; /* used for NET_XMIT */
+ struct net_pkt_xmtdone xmtdone; /* used for NET_XMIT_DONE */
+ struct net_pkt_rcvpost rcvpost; /* used for NET_RCV_POST */
+ struct net_pkt_rcv rcv; /* used for NET_RCV */
+ struct net_pkt_enbdis enbdis; /* used for NET_RCV_ENBDIS, */
+ /* NET_RCV_ENBDIS_ACK, */
+ /* NET_RCV_PROMSIC, */
+ /* and NET_CONNECT_STATUS */
+ struct net_pkt_macaddr macaddr;
+ };
+};
+
+struct uiscmdrsp_scsitaskmgmt {
+ TASK_MGMT_TYPES tasktype;
+
+ /* the type of task */
+ struct uisscsi_dest vdest;
+
+ /* the vdisk for which this task mgmt is generated */
+ void *scsicmd;
+
+ /* This is some handle that the guest has saved off for its own use.
+ * Its value is preserved by iopart & returned as is in the task mgmt
+ * rsp. */
+ void *notify;
+
+ /* For linux guests, this is a pointer to wait_queue_head that a
+ * thread is waiting on to see if the taskmgmt command has completed.
+ * For windows guests, this is a pointer to a location that a waiting
+ * thread is testing to see if the taskmgmt command has completed.
+ * When the rsp is received by guest, the thread receiving the
+ * response uses this to notify the the thread waiting for taskmgmt
+ * command completion. Its value is preserved by iopart & returned
+ * as is in the task mgmt rsp. */
+ void *notifyresult;
+
+ /* this is a handle to location in guest where the result of the
+ * taskmgmt command (result field) is to saved off when the response
+ * is handled. Its value is preserved by iopart & returned as is in
+ * the task mgmt rsp. */
+ char result;
+
+ /* result of taskmgmt command - set by IOPart - values are: */
+#define TASK_MGMT_FAILED 0
+#define TASK_MGMT_SUCCESS 1
+};
+
+/* The following is used by uissd to send disk add/remove notifications to
+ * Guest */
+/* Note that the vHba pointer is not used by the Client/Guest side. */
+struct uiscmdrsp_disknotify {
+ U8 add; /* 0-remove, 1-add */
+ void *vHba; /* Pointer to vhba_info for channel info to
+ * route msg */
+ U32 channel, id, lun; /* SCSI Path of Disk to added or removed */
+};
+
+/* The following is used by virthba/vSCSI to send the Acquire/Release commands
+* to the IOVM. */
+struct uiscmdrsp_vdiskmgmt {
+ VDISK_MGMT_TYPES vdisktype;
+
+ /* the type of task */
+ struct uisscsi_dest vdest;
+
+ /* the vdisk for which this task mgmt is generated */
+ void *scsicmd;
+
+ /* This is some handle that the guest has saved off for its own use.
+ * Its value is preserved by iopart & returned as is in the task mgmt
+ * rsp. */
+ void *notify;
+
+ /* For linux guests, this is a pointer to wait_queue_head that a
+ * thread is waiting on to see if the taskmgmt command has completed.
+ * For windows guests, this is a pointer to a location that a waiting
+ * thread is testing to see if the taskmgmt command has completed.
+ * When the rsp is received by guest, the thread receiving the
+ * response uses this to notify the the thread waiting for taskmgmt
+ * command completion. Its value is preserved by iopart & returned
+ * as is in the task mgmt rsp. */
+ void *notifyresult;
+
+ /* this is a handle to location in guest where the result of the
+ * taskmgmt command (result field) is to saved off when the response
+ * is handled. Its value is preserved by iopart & returned as is in
+ * the task mgmt rsp. */
+ char result;
+
+ /* result of taskmgmt command - set by IOPart - values are: */
+#define VDISK_MGMT_FAILED 0
+#define VDISK_MGMT_SUCCESS 1
+};
+
+/* keeping cmd & rsp info in one structure for now cmd rsp packet for scsi */
+struct uiscmdrsp {
+ char cmdtype;
+
+ /* describes what type of information is in the struct */
+#define CMD_SCSI_TYPE 1
+#define CMD_NET_TYPE 2
+#define CMD_SCSITASKMGMT_TYPE 3
+#define CMD_NOTIFYGUEST_TYPE 4
+#define CMD_VDISKMGMT_TYPE 5
+ union {
+ struct uiscmdrsp_scsi scsi;
+ struct uiscmdrsp_net net;
+ struct uiscmdrsp_scsitaskmgmt scsitaskmgmt;
+ struct uiscmdrsp_disknotify disknotify;
+ struct uiscmdrsp_vdiskmgmt vdiskmgmt;
+ };
+ void *private_data; /* used to send the response when the cmd is
+ * done (scsi & scsittaskmgmt). */
+ struct uiscmdrsp *next; /* General Purpose Queue Link */
+ struct uiscmdrsp *activeQ_next; /* Used to track active commands */
+ struct uiscmdrsp *activeQ_prev; /* Used to track active commands */
+};
+
+/* This is just the header of the IO channel. It is assumed that directly after
+* this header there is a large region of memory which contains the command and
+* response queues as specified in cmdQ and rspQ SIGNAL_QUEUE_HEADERS. */
+typedef struct _ULTRA_IO_CHANNEL_PROTOCOL {
+ CHANNEL_HEADER ChannelHeader;
+ SIGNAL_QUEUE_HEADER cmdQ;
+ SIGNAL_QUEUE_HEADER rspQ;
+ union {
+ struct {
+ struct vhba_wwnn wwnn; /* 8 bytes */
+ struct vhba_config_max max; /* 20 bytes */
+ } vhba; /* 28 */
+ struct {
+ U8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
+ U32 num_rcv_bufs; /* 4 */
+ U32 mtu; /* 4 */
+ GUID zoneGuid; /* 16 */
+ } vnic; /* total 30 */
+ };
+
+#define MAX_CLIENTSTRING_LEN 1024
+ U8 clientString[MAX_CLIENTSTRING_LEN]; /* NULL terminated - so holds
+ * max - 1 bytes */
+} ULTRA_IO_CHANNEL_PROTOCOL;
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+
+/* define offsets to members of struct uiscmdrsp */
+#define OFFSET_CMDTYPE OFFSETOF(struct uiscmdrsp, cmdtype)
+#define OFFSET_SCSI OFFSETOF(struct uiscmdrsp, scsi)
+#define OFFSET_NET OFFSETOF(struct uiscmdrsp, net)
+#define OFFSET_SCSITASKMGMT OFFSETOF(struct uiscmdrsp, scsitaskmgmt)
+#define OFFSET_NEXT OFFSETOF(struct uiscmdrsp, next)
+
+/* define offsets to members of struct uiscmdrsp_net */
+#define OFFSET_TYPE OFFSETOF(struct uiscmdrsp_net, type)
+#define OFFSET_BUF OFFSETOF(struct uiscmdrsp_net, buf)
+#define OFFSET_XMT OFFSETOF(struct uiscmdrsp_net, xmt)
+#define OFFSET_XMT_DONE_RESULT OFFSETOF(struct uiscmdrsp_net, xmtdone)
+#define OFFSET_RCVPOST OFFSETOF(struct uiscmdrsp_net, rcvpost)
+#define OFFSET_RCV_DONE_LEN OFFSETOF(struct uiscmdrsp_net, rcv)
+#define OFFSET_ENBDIS OFFSETOF(struct uiscmdrsp_net, enbdis)
+
+/* define offsets to members of struct net_pkt_rcvpost */
+#define OFFSET_TOTALLEN OFFSETOF(struct net_pkt_rcvpost, totallen)
+#define OFFSET_FRAG OFFSETOF(struct net_pkt_rcvpost, frag)
+
+/*
+* INLINE functions for initializing and accessing I/O data channels
+*/
+
+
+#define NUMSIGNALS(x, q) (((ULTRA_IO_CHANNEL_PROTOCOL *)(x))->q.MaxSignalSlots)
+#define SIZEOF_PROTOCOL (COVER(sizeof(ULTRA_IO_CHANNEL_PROTOCOL), 64))
+#define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64))
+
+#define IO_CHANNEL_SIZE(x) COVER(SIZEOF_PROTOCOL + \
+ (NUMSIGNALS(x, cmdQ) + \
+ NUMSIGNALS(x, rspQ)) * SIZEOF_CMDRSP, 4096)
+#define MIN_IO_CHANNEL_SIZE COVER(SIZEOF_PROTOCOL + \
+ 2 * MIN_NUMSIGNALS * SIZEOF_CMDRSP, 4096)
+#ifdef __GNUC__
+/* These defines should only ever be used in service partitons */
+/* because they rely on the size of uiscmdrsp */
+#define QSLOTSFROMBYTES(bytes) (((bytes-SIZEOF_PROTOCOL)/2)/SIZEOF_CMDRSP)
+#define QSIZEFROMBYTES(bytes) (QSLOTSFROMBYTES(bytes)*SIZEOF_CMDRSP)
+#define SignalQInit(x) \
+ do { \
+ x->cmdQ.Size = QSIZEFROMBYTES(x->ChannelHeader.Size); \
+ x->cmdQ.oSignalBase = SIZEOF_PROTOCOL - \
+ OFFSETOF(ULTRA_IO_CHANNEL_PROTOCOL, cmdQ); \
+ x->cmdQ.SignalSize = SIZEOF_CMDRSP; \
+ x->cmdQ.MaxSignalSlots = \
+ QSLOTSFROMBYTES(x->ChannelHeader.Size); \
+ x->cmdQ.MaxSignals = x->cmdQ.MaxSignalSlots - 1; \
+ x->rspQ.Size = QSIZEFROMBYTES(x->ChannelHeader.Size); \
+ x->rspQ.oSignalBase = \
+ (SIZEOF_PROTOCOL + x->cmdQ.Size) - \
+ OFFSETOF(ULTRA_IO_CHANNEL_PROTOCOL, rspQ); \
+ x->rspQ.SignalSize = SIZEOF_CMDRSP; \
+ x->rspQ.MaxSignalSlots = \
+ QSLOTSFROMBYTES(x->ChannelHeader.Size); \
+ x->rspQ.MaxSignals = x->rspQ.MaxSignalSlots - 1; \
+ x->ChannelHeader.oChannelSpace = \
+ OFFSETOF(ULTRA_IO_CHANNEL_PROTOCOL, cmdQ); \
+ } while (0)
+
+#define INIT_CLIENTSTRING(chan, type, clientStr, clientStrLen) \
+ do { \
+ if (clientStr) { \
+ chan->ChannelHeader.oClientString = \
+ OFFSETOF(type, clientString); \
+ MEMCPY(chan->clientString, clientStr, \
+ MINNUM(clientStrLen, \
+ (U32) (MAX_CLIENTSTRING_LEN - 1))); \
+ chan->clientString[MINNUM(clientStrLen, \
+ (U32) (MAX_CLIENTSTRING_LEN \
+ - 1))] \
+ = '\0'; \
+ } \
+ else \
+ if (clientStrLen > 0) \
+ return 0; \
+ } while (0)
+
+
+#define ULTRA_IO_CHANNEL_SERVER_READY(x, chanId, logCtx) \
+ ULTRA_CHANNEL_SERVER_TRANSITION(x, chanId, SrvState, CHANNELSRV_READY, \
+ logCtx);
+
+#define ULTRA_IO_CHANNEL_SERVER_NOTREADY(x, chanId, logCtx) \
+ ULTRA_CHANNEL_SERVER_TRANSITION(x, chanId, SrvState, \
+ CHANNELSRV_UNINITIALIZED, logCtx);
+
+static inline int ULTRA_VHBA_init_channel(ULTRA_IO_CHANNEL_PROTOCOL *x,
+ struct vhba_wwnn *wwnn,
+ struct vhba_config_max *max,
+ unsigned char *clientStr,
+ U32 clientStrLen, U64 bytes) {
+ MEMSET(x, 0, sizeof(ULTRA_IO_CHANNEL_PROTOCOL));
+ x->ChannelHeader.VersionId = ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID;
+ x->ChannelHeader.Signature = ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE;
+ x->ChannelHeader.SrvState = CHANNELSRV_UNINITIALIZED;
+ x->ChannelHeader.HeaderSize = sizeof(x->ChannelHeader);
+ x->ChannelHeader.Size = COVER(bytes, 4096);
+ x->ChannelHeader.Type = UltraVhbaChannelProtocolGuid;
+ x->ChannelHeader.ZoneGuid = Guid0;
+ x->vhba.wwnn = *wwnn;
+ x->vhba.max = *max;
+ INIT_CLIENTSTRING(x, ULTRA_IO_CHANNEL_PROTOCOL, clientStr,
+ clientStrLen);
+ SignalQInit(x);
+ if ((x->cmdQ.MaxSignalSlots > MAX_NUMSIGNALS) ||
+ (x->rspQ.MaxSignalSlots > MAX_NUMSIGNALS)) {
+ return 0;
+ }
+ if ((x->cmdQ.MaxSignalSlots < MIN_NUMSIGNALS) ||
+ (x->rspQ.MaxSignalSlots < MIN_NUMSIGNALS)) {
+ return 0;
+ }
+ return 1;
+}
+
+static inline void ULTRA_VHBA_set_max(ULTRA_IO_CHANNEL_PROTOCOL *x,
+ struct vhba_config_max *max) {
+ x->vhba.max = *max;
+}
+
+static inline int ULTRA_VNIC_init_channel(ULTRA_IO_CHANNEL_PROTOCOL *x,
+ unsigned char *macaddr,
+ U32 num_rcv_bufs, U32 mtu,
+ GUID zoneGuid,
+ unsigned char *clientStr,
+ U32 clientStrLen,
+ U64 bytes) {
+ MEMSET(x, 0, sizeof(ULTRA_IO_CHANNEL_PROTOCOL));
+ x->ChannelHeader.VersionId = ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID;
+ x->ChannelHeader.Signature = ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE;
+ x->ChannelHeader.SrvState = CHANNELSRV_UNINITIALIZED;
+ x->ChannelHeader.HeaderSize = sizeof(x->ChannelHeader);
+ x->ChannelHeader.Size = COVER(bytes, 4096);
+ x->ChannelHeader.Type = UltraVnicChannelProtocolGuid;
+ x->ChannelHeader.ZoneGuid = Guid0;
+ MEMCPY(x->vnic.macaddr, macaddr, MAX_MACADDR_LEN);
+ x->vnic.num_rcv_bufs = num_rcv_bufs;
+ x->vnic.mtu = mtu;
+ x->vnic.zoneGuid = zoneGuid;
+ INIT_CLIENTSTRING(x, ULTRA_IO_CHANNEL_PROTOCOL, clientStr,
+ clientStrLen);
+ SignalQInit(x);
+ if ((x->cmdQ.MaxSignalSlots > MAX_NUMSIGNALS) ||
+ (x->rspQ.MaxSignalSlots > MAX_NUMSIGNALS)) {
+ return 0;
+ }
+ if ((x->cmdQ.MaxSignalSlots < MIN_NUMSIGNALS) ||
+ (x->rspQ.MaxSignalSlots < MIN_NUMSIGNALS)) {
+ return 0;
+ }
+ return 1;
+}
+
+#endif /* __GNUC__ */
+
+/*
+* INLINE function for expanding a guest's pfn-off-size into multiple 4K page
+* pfn-off-size entires.
+*/
+
+
+/* we deal with 4K page sizes when we it comes to passing page information
+ * between */
+/* Guest and IOPartition. */
+#define PI_PAGE_SIZE 0x1000
+#define PI_PAGE_MASK 0x0FFF
+#define PI_PAGE_SHIFT 12
+
+/* returns next non-zero index on success or zero on failure (i.e. out of
+ * room)
+ */
+static INLINE U16
+add_physinfo_entries(U32 inp_pfn, /* input - specifies the pfn to be used
+ * to add entries */
+ U16 inp_off, /* input - specifies the off to be used
+ * to add entries */
+ U32 inp_len, /* input - specifies the len to be used
+ * to add entries */
+ U16 index, /* input - index in array at which new
+ * entries are added */
+ U16 max_pi_arr_entries, /* input - specifies the maximum
+ * entries pi_arr can hold */
+ struct phys_info pi_arr[]) /* input & output - array to
+ * which entries are added */
+{
+ U32 len;
+ U16 i, firstlen;
+
+ firstlen = PI_PAGE_SIZE - inp_off;
+ if (inp_len <= firstlen) {
+
+ /* the input entry spans only one page - add as is */
+ if (index >= max_pi_arr_entries)
+ return 0;
+ pi_arr[index].pi_pfn = inp_pfn;
+ pi_arr[index].pi_off = (U16) inp_off;
+ pi_arr[index].pi_len = (U16) inp_len;
+ return index + 1;
+ }
+
+ /* this entry spans multiple pages */
+ for (len = inp_len, i = 0; len;
+ len -= pi_arr[index + i].pi_len, i++) {
+ if (index + i >= max_pi_arr_entries)
+ return 0;
+ pi_arr[index + i].pi_pfn = inp_pfn + i;
+ if (i == 0) {
+ pi_arr[index].pi_off = inp_off;
+ pi_arr[index].pi_len = firstlen;
+ }
+
+ else {
+ pi_arr[index + i].pi_off = 0;
+ pi_arr[index + i].pi_len =
+ (U16) MINNUM(len, (U32) PI_PAGE_SIZE);
+ }
+
+ }
+ return index + i;
+}
+
+#endif /* __IOCHANNEL_H__ */
diff --git a/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h b/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h
new file mode 100644
index 000000000000..99dbbcf3d11e
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h
@@ -0,0 +1,135 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VBUSCHANNEL_H__
+#define __VBUSCHANNEL_H__
+
+/* The vbus channel is the channel area provided via the BUS_CREATE controlvm
+ * message for each virtual bus. This channel area is provided to both server
+ * and client ends of the bus. The channel header area is initialized by
+ * the server, and the remaining information is filled in by the client.
+ * We currently use this for the client to provide various information about
+ * the client devices and client drivers for the server end to see.
+ */
+#include "commontypes.h"
+#include "vbusdeviceinfo.h"
+#include "channel.h"
+
+/* {193b331b-c58f-11da-95a9-00e08161165f} */
+#define ULTRA_VBUS_CHANNEL_PROTOCOL_GUID \
+ {0x193b331b, 0xc58f, 0x11da, \
+ {0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f} }
+static const GUID UltraVbusChannelProtocolGuid =
+ ULTRA_VBUS_CHANNEL_PROTOCOL_GUID;
+
+#define ULTRA_VBUS_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+
+/* Must increment this whenever you insert or delete fields within this channel
+* struct. Also increment whenever you change the meaning of fields within this
+* channel struct so as to break pre-existing software. Note that you can
+* usually add fields to the END of the channel struct withOUT needing to
+* increment this. */
+#define ULTRA_VBUS_CHANNEL_PROTOCOL_VERSIONID 1
+
+#define ULTRA_VBUS_CHANNEL_OK_CLIENT(pChannel, logCtx) \
+ (ULTRA_check_channel_client(pChannel, \
+ UltraVbusChannelProtocolGuid, \
+ "vbus", \
+ sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL), \
+ ULTRA_VBUS_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VBUS_CHANNEL_PROTOCOL_SIGNATURE, \
+ __FILE__, __LINE__, logCtx))
+
+#define ULTRA_VBUS_CHANNEL_OK_SERVER(actualBytes, logCtx) \
+ (ULTRA_check_channel_server(UltraVbusChannelProtocolGuid, \
+ "vbus", \
+ sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL), \
+ actualBytes, \
+ __FILE__, __LINE__, logCtx))
+
+
+#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
+typedef struct _ULTRA_VBUS_HEADERINFO {
+ U32 structBytes; /* size of this struct in bytes */
+ U32 deviceInfoStructBytes; /* sizeof(ULTRA_VBUS_DEVICEINFO) */
+ U32 devInfoCount; /* num of items in DevInfo member */
+ /* (this is the allocated size) */
+ U32 chpInfoByteOffset; /* byte offset from beginning of this struct */
+ /* to the the ChpInfo struct (below) */
+ U32 busInfoByteOffset; /* byte offset from beginning of this struct */
+ /* to the the BusInfo struct (below) */
+ U32 devInfoByteOffset; /* byte offset from beginning of this struct */
+ /* to the the DevInfo array (below) */
+ U8 reserved[104];
+} ULTRA_VBUS_HEADERINFO;
+
+typedef struct _ULTRA_VBUS_CHANNEL_PROTOCOL {
+ ULTRA_CHANNEL_PROTOCOL ChannelHeader; /* initialized by server */
+ ULTRA_VBUS_HEADERINFO HdrInfo; /* initialized by server */
+ /* the remainder of this channel is filled in by the client */
+ ULTRA_VBUS_DEVICEINFO ChpInfo; /* describes client chipset device and
+ * driver */
+ ULTRA_VBUS_DEVICEINFO BusInfo; /* describes client bus device and
+ * driver */
+ ULTRA_VBUS_DEVICEINFO DevInfo[0]; /* describes client device and
+ * driver for */
+ /* each device on the bus */
+} ULTRA_VBUS_CHANNEL_PROTOCOL;
+
+#define VBUS_CH_SIZE_EXACT(MAXDEVICES) \
+ (sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL) + ((MAXDEVICES) * \
+ sizeof(ULTRA_VBUS_DEVICEINFO)))
+#define VBUS_CH_SIZE(MAXDEVICES) COVER(VBUS_CH_SIZE_EXACT(MAXDEVICES), 4096)
+
+static INLINE void
+ULTRA_VBUS_init_channel(ULTRA_VBUS_CHANNEL_PROTOCOL __iomem *x,
+ int bytesAllocated)
+{
+ /* Please note that the memory at <x> does NOT necessarily have space
+ * for DevInfo structs allocated at the end, which is why we do NOT use
+ * <bytesAllocated> to clear. */
+ memset_io(x, 0, sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL));
+ if (bytesAllocated < (int) sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL))
+ return;
+ writel(ULTRA_VBUS_CHANNEL_PROTOCOL_VERSIONID,
+ &x->ChannelHeader.VersionId);
+ writeq(ULTRA_VBUS_CHANNEL_PROTOCOL_SIGNATURE,
+ &x->ChannelHeader.Signature);
+ writel(CHANNELSRV_READY, &x->ChannelHeader.SrvState);
+ writel(sizeof(x->ChannelHeader), &x->ChannelHeader.HeaderSize);
+ writeq(bytesAllocated, &x->ChannelHeader.Size);
+ memcpy_toio(&x->ChannelHeader.Type, &UltraVbusChannelProtocolGuid,
+ sizeof(x->ChannelHeader.Type));
+ memcpy_toio(&x->ChannelHeader.ZoneGuid, &Guid0,
+ sizeof(x->ChannelHeader.ZoneGuid));
+ writel(sizeof(ULTRA_VBUS_HEADERINFO), &x->HdrInfo.structBytes);
+ writel(sizeof(ULTRA_VBUS_HEADERINFO), &x->HdrInfo.chpInfoByteOffset);
+ writel(readl(&x->HdrInfo.chpInfoByteOffset) +
+ sizeof(ULTRA_VBUS_DEVICEINFO),
+ &x->HdrInfo.busInfoByteOffset);
+ writel(readl(&x->HdrInfo.busInfoByteOffset)
+ + sizeof(ULTRA_VBUS_DEVICEINFO),
+ &x->HdrInfo.devInfoByteOffset);
+ writel(sizeof(ULTRA_VBUS_DEVICEINFO),
+ &x->HdrInfo.deviceInfoStructBytes);
+ bytesAllocated -= (sizeof(ULTRA_CHANNEL_PROTOCOL)
+ + readl(&x->HdrInfo.devInfoByteOffset));
+ writel(bytesAllocated / readl(&x->HdrInfo.deviceInfoStructBytes),
+ &x->HdrInfo.devInfoCount);
+}
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/controlvmcompletionstatus.h b/drivers/staging/unisys/common-spar/include/controlvmcompletionstatus.h
new file mode 100644
index 000000000000..de30d321d982
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/controlvmcompletionstatus.h
@@ -0,0 +1,92 @@
+/* controlvmcompletionstatus.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* Defines for all valid values returned in the response message header
+ * completionStatus field. See controlvmchannel.h for description of
+ * the header: _CONTROLVM_MESSAGE_HEADER.
+ */
+
+#ifndef __CONTROLVMCOMPLETIONSTATUS_H__
+#define __CONTROLVMCOMPLETIONSTATUS_H__
+
+/* General Errors------------------------------------------------------[0-99] */
+#define CONTROLVM_RESP_SUCCESS 0
+#define CONTROLVM_RESP_ERROR_ALREADY_DONE 1
+#define CONTROLVM_RESP_ERROR_IOREMAP_FAILED 2
+#define CONTROLVM_RESP_ERROR_KMALLOC_FAILED 3
+#define CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN 4
+#define CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT 5
+
+/* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */
+#define CONTROLVM_RESP_ERROR_CLIENT_SWITCHCOUNT_NONZERO 100
+#define CONTROLVM_RESP_ERROR_EXPECTED_CHIPSET_INIT 101
+
+/* Maximum Limit----------------------------------------------------[200-299] */
+#define CONTROLVM_RESP_ERROR_MAX_BUSES 201 /* BUS_CREATE */
+#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202 /* DEVICE_CREATE */
+/* Payload and Parameter Related------------------------------------[400-499] */
+#define CONTROLVM_RESP_ERROR_PAYLOAD_INVALID 400 /* SWITCH_ATTACHEXTPORT,
+ * DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_ERROR_INITIATOR_PARAMETER_INVALID 401 /* Multiple */
+#define CONTROLVM_RESP_ERROR_TARGET_PARAMETER_INVALID 402 /* DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_ERROR_CLIENT_PARAMETER_INVALID 403 /* DEVICE_CONFIGURE */
+/* Specified[Packet Structure] Value-------------------------------[500-599] */
+#define CONTROLVM_RESP_ERROR_BUS_INVALID 500 /* SWITCH_ATTACHINTPORT,
+ * BUS_CONFIGURE,
+ * DEVICE_CREATE,
+ * DEVICE_CONFIG
+ * DEVICE_DESTROY */
+#define CONTROLVM_RESP_ERROR_DEVICE_INVALID 501 /* SWITCH_ATTACHINTPORT */
+ /* DEVICE_CREATE,
+ * DEVICE_CONFIGURE,
+ * DEVICE_DESTROY */
+#define CONTROLVM_RESP_ERROR_CHANNEL_INVALID 502 /* DEVICE_CREATE,
+ * DEVICE_CONFIGURE */
+/* Partition Driver Callback Interface----------------------[600-699] */
+#define CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE 604 /* BUS_CREATE,
+ * BUS_DESTROY,
+ * DEVICE_CREATE,
+ * DEVICE_DESTROY */
+/* Unable to invoke VIRTPCI callback */
+#define CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR 605 /* BUS_CREATE,
+ * BUS_DESTROY,
+ * DEVICE_CREATE,
+ * DEVICE_DESTROY */
+/* VIRTPCI Callback returned error */
+#define CONTROLVM_RESP_ERROR_GENERIC_DRIVER_CALLBACK_ERROR 606 /* SWITCH_ATTACHEXTPORT,
+ * SWITCH_DETACHEXTPORT
+ * DEVICE_CONFIGURE */
+
+/* generic device callback returned error */
+/* Bus Related------------------------------------------------------[700-799] */
+#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700 /* BUS_DESTROY */
+/* Channel Related--------------------------------------------------[800-899] */
+#define CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN 800 /* GET_CHANNELINFO,
+ * DEVICE_DESTROY */
+#define CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL 801 /* DEVICE_CREATE */
+/* Chipset Shutdown Related---------------------------------------[1000-1099] */
+#define CONTROLVM_RESP_ERROR_CHIPSET_SHUTDOWN_FAILED 1000
+#define CONTROLVM_RESP_ERROR_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001
+
+/* Chipset Stop Related-------------------------------------------[1100-1199] */
+#define CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_BUS 1100
+#define CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_SWITCH 1101
+
+/* Device Related-------------------------------------------------[1400-1499] */
+#define CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT 1400
+
+#endif /* __CONTROLVMCOMPLETIONSTATUS_H__ not defined */
diff --git a/drivers/staging/unisys/common-spar/include/diagnostics/appos_subsystems.h b/drivers/staging/unisys/common-spar/include/diagnostics/appos_subsystems.h
new file mode 100644
index 000000000000..4c6294d20606
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/diagnostics/appos_subsystems.h
@@ -0,0 +1,310 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* Please note that this file is to be used ONLY for defining diagnostic
+ * subsystem values for the appos (sPAR Linux service partitions) component.
+ */
+#ifndef __APPOS_SUBSYSTEMS_H__
+#define __APPOS_SUBSYSTEMS_H__
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/string.h>
+#else
+#include <stdio.h>
+#include <string.h>
+#endif
+
+static inline char *
+subsys_unknown_to_s(int subsys, char *s, int n)
+{
+ snprintf(s, n, "SUBSYS-%-2.2d", subsys);
+ s[n - 1] = '\0';
+ return s;
+}
+
+#define SUBSYS_TO_MASK(subsys) (1ULL << (subsys))
+
+/* The first SUBSYS_APPOS_MAX subsystems are the same for each AppOS type
+ * (IOVM, SMS, etc.) The rest have unique values for each AppOS type.
+ */
+#define SUBSYS_APPOS_MAX 16
+
+#define SUBSYS_APPOS_DEFAULT 1 /* or "other" */
+#define SUBSYS_APPOS_CHIPSET 2 /* controlvm and other */
+ /* low-level sPAR activity */
+#define SUBSYS_APPOS_BUS 3 /* sPAR bus */
+/* DAK #define SUBSYS_APPOS_DIAG 4 // diagnostics and dump */
+#define SUBSYS_APPOS_CHANNELACCESS 5 /* generic channel access */
+#define SUBSYS_APPOS_NICCLIENT 6 /* virtual NIC client */
+#define SUBSYS_APPOS_HBACLIENT 7 /* virtual HBA client */
+#define SUBSYS_APPOS_CONSOLESERIAL 8 /* sPAR virtual serial console */
+#define SUBSYS_APPOS_UISLIB 9 /* */
+#define SUBSYS_APPOS_VRTCUPDD 10 /* */
+#define SUBSYS_APPOS_WATCHDOG 11 /* watchdog timer and healthcheck */
+#define SUBSYS_APPOS_13 13 /* available */
+#define SUBSYS_APPOS_14 14 /* available */
+#define SUBSYS_APPOS_15 15 /* available */
+#define SUBSYS_APPOS_16 16 /* available */
+static inline char *
+subsys_generic_to_s(int subsys, char *s, int n)
+{
+ switch (subsys) {
+ case SUBSYS_APPOS_DEFAULT:
+ strncpy(s, "APPOS_DEFAULT", n);
+ break;
+ case SUBSYS_APPOS_CHIPSET:
+ strncpy(s, "APPOS_CHIPSET", n);
+ break;
+ case SUBSYS_APPOS_BUS:
+ strncpy(s, "APPOS_BUS", n);
+ break;
+ case SUBSYS_APPOS_CHANNELACCESS:
+ strncpy(s, "APPOS_CHANNELACCESS", n);
+ break;
+ case SUBSYS_APPOS_NICCLIENT:
+ strncpy(s, "APPOS_NICCLIENT", n);
+ break;
+ case SUBSYS_APPOS_HBACLIENT:
+ strncpy(s, "APPOS_HBACLIENT", n);
+ break;
+ case SUBSYS_APPOS_CONSOLESERIAL:
+ strncpy(s, "APPOS_CONSOLESERIAL", n);
+ break;
+ case SUBSYS_APPOS_UISLIB:
+ strncpy(s, "APPOS_UISLIB", n);
+ break;
+ case SUBSYS_APPOS_VRTCUPDD:
+ strncpy(s, "APPOS_VRTCUPDD", n);
+ break;
+ case SUBSYS_APPOS_WATCHDOG:
+ strncpy(s, "APPOS_WATCHDOG", n);
+ break;
+ case SUBSYS_APPOS_13:
+ strncpy(s, "APPOS_13", n);
+ break;
+ case SUBSYS_APPOS_14:
+ strncpy(s, "APPOS_14", n);
+ break;
+ case SUBSYS_APPOS_15:
+ strncpy(s, "APPOS_15", n);
+ break;
+ case SUBSYS_APPOS_16:
+ strncpy(s, "APPOS_16", n);
+ break;
+ default:
+ subsys_unknown_to_s(subsys, s, n);
+ break;
+ }
+ s[n - 1] = '\0';
+ return s;
+}
+
+/* CONSOLE */
+
+#define SUBSYS_CONSOLE_VIDEO (SUBSYS_APPOS_MAX + 1) /* 17 */
+#define SUBSYS_CONSOLE_KBDMOU (SUBSYS_APPOS_MAX + 2) /* 18 */
+#define SUBSYS_CONSOLE_04 (SUBSYS_APPOS_MAX + 4)
+#define SUBSYS_CONSOLE_05 (SUBSYS_APPOS_MAX + 5)
+#define SUBSYS_CONSOLE_06 (SUBSYS_APPOS_MAX + 6)
+#define SUBSYS_CONSOLE_07 (SUBSYS_APPOS_MAX + 7)
+#define SUBSYS_CONSOLE_08 (SUBSYS_APPOS_MAX + 8)
+#define SUBSYS_CONSOLE_09 (SUBSYS_APPOS_MAX + 9)
+#define SUBSYS_CONSOLE_10 (SUBSYS_APPOS_MAX + 10)
+#define SUBSYS_CONSOLE_11 (SUBSYS_APPOS_MAX + 11)
+#define SUBSYS_CONSOLE_12 (SUBSYS_APPOS_MAX + 12)
+#define SUBSYS_CONSOLE_13 (SUBSYS_APPOS_MAX + 13)
+#define SUBSYS_CONSOLE_14 (SUBSYS_APPOS_MAX + 14)
+#define SUBSYS_CONSOLE_15 (SUBSYS_APPOS_MAX + 15)
+#define SUBSYS_CONSOLE_16 (SUBSYS_APPOS_MAX + 16)
+#define SUBSYS_CONSOLE_17 (SUBSYS_APPOS_MAX + 17)
+#define SUBSYS_CONSOLE_18 (SUBSYS_APPOS_MAX + 18)
+#define SUBSYS_CONSOLE_19 (SUBSYS_APPOS_MAX + 19)
+#define SUBSYS_CONSOLE_20 (SUBSYS_APPOS_MAX + 20)
+#define SUBSYS_CONSOLE_21 (SUBSYS_APPOS_MAX + 21)
+#define SUBSYS_CONSOLE_22 (SUBSYS_APPOS_MAX + 22)
+#define SUBSYS_CONSOLE_23 (SUBSYS_APPOS_MAX + 23)
+#define SUBSYS_CONSOLE_24 (SUBSYS_APPOS_MAX + 24)
+#define SUBSYS_CONSOLE_25 (SUBSYS_APPOS_MAX + 25)
+#define SUBSYS_CONSOLE_26 (SUBSYS_APPOS_MAX + 26)
+#define SUBSYS_CONSOLE_27 (SUBSYS_APPOS_MAX + 27)
+#define SUBSYS_CONSOLE_28 (SUBSYS_APPOS_MAX + 28)
+#define SUBSYS_CONSOLE_29 (SUBSYS_APPOS_MAX + 29)
+#define SUBSYS_CONSOLE_30 (SUBSYS_APPOS_MAX + 30)
+#define SUBSYS_CONSOLE_31 (SUBSYS_APPOS_MAX + 31)
+#define SUBSYS_CONSOLE_32 (SUBSYS_APPOS_MAX + 32)
+#define SUBSYS_CONSOLE_33 (SUBSYS_APPOS_MAX + 33)
+#define SUBSYS_CONSOLE_34 (SUBSYS_APPOS_MAX + 34)
+#define SUBSYS_CONSOLE_35 (SUBSYS_APPOS_MAX + 35)
+#define SUBSYS_CONSOLE_36 (SUBSYS_APPOS_MAX + 36)
+#define SUBSYS_CONSOLE_37 (SUBSYS_APPOS_MAX + 37)
+#define SUBSYS_CONSOLE_38 (SUBSYS_APPOS_MAX + 38)
+#define SUBSYS_CONSOLE_39 (SUBSYS_APPOS_MAX + 39)
+#define SUBSYS_CONSOLE_40 (SUBSYS_APPOS_MAX + 40)
+#define SUBSYS_CONSOLE_41 (SUBSYS_APPOS_MAX + 41)
+#define SUBSYS_CONSOLE_42 (SUBSYS_APPOS_MAX + 42)
+#define SUBSYS_CONSOLE_43 (SUBSYS_APPOS_MAX + 43)
+#define SUBSYS_CONSOLE_44 (SUBSYS_APPOS_MAX + 44)
+#define SUBSYS_CONSOLE_45 (SUBSYS_APPOS_MAX + 45)
+#define SUBSYS_CONSOLE_46 (SUBSYS_APPOS_MAX + 46)
+
+static inline char *
+subsys_console_to_s(int subsys, char *s, int n)
+{
+ switch (subsys) {
+ case SUBSYS_CONSOLE_VIDEO:
+ strncpy(s, "CONSOLE_VIDEO", n);
+ break;
+ case SUBSYS_CONSOLE_KBDMOU:
+ strncpy(s, "CONSOLE_KBDMOU", n);
+ break;
+ case SUBSYS_CONSOLE_04:
+ strncpy(s, "CONSOLE_04", n);
+ break;
+ case SUBSYS_CONSOLE_05:
+ strncpy(s, "CONSOLE_05", n);
+ break;
+ case SUBSYS_CONSOLE_06:
+ strncpy(s, "CONSOLE_06", n);
+ break;
+ case SUBSYS_CONSOLE_07:
+ strncpy(s, "CONSOLE_07", n);
+ break;
+ case SUBSYS_CONSOLE_08:
+ strncpy(s, "CONSOLE_08", n);
+ break;
+ case SUBSYS_CONSOLE_09:
+ strncpy(s, "CONSOLE_09", n);
+ break;
+ case SUBSYS_CONSOLE_10:
+ strncpy(s, "CONSOLE_10", n);
+ break;
+ case SUBSYS_CONSOLE_11:
+ strncpy(s, "CONSOLE_11", n);
+ break;
+ case SUBSYS_CONSOLE_12:
+ strncpy(s, "CONSOLE_12", n);
+ break;
+ case SUBSYS_CONSOLE_13:
+ strncpy(s, "CONSOLE_13", n);
+ break;
+ case SUBSYS_CONSOLE_14:
+ strncpy(s, "CONSOLE_14", n);
+ break;
+ case SUBSYS_CONSOLE_15:
+ strncpy(s, "CONSOLE_15", n);
+ break;
+ case SUBSYS_CONSOLE_16:
+ strncpy(s, "CONSOLE_16", n);
+ break;
+ case SUBSYS_CONSOLE_17:
+ strncpy(s, "CONSOLE_17", n);
+ break;
+ case SUBSYS_CONSOLE_18:
+ strncpy(s, "CONSOLE_18", n);
+ break;
+ case SUBSYS_CONSOLE_19:
+ strncpy(s, "CONSOLE_19", n);
+ break;
+ case SUBSYS_CONSOLE_20:
+ strncpy(s, "CONSOLE_20", n);
+ break;
+ case SUBSYS_CONSOLE_21:
+ strncpy(s, "CONSOLE_21", n);
+ break;
+ case SUBSYS_CONSOLE_22:
+ strncpy(s, "CONSOLE_22", n);
+ break;
+ case SUBSYS_CONSOLE_23:
+ strncpy(s, "CONSOLE_23", n);
+ break;
+ case SUBSYS_CONSOLE_24:
+ strncpy(s, "CONSOLE_24", n);
+ break;
+ case SUBSYS_CONSOLE_25:
+ strncpy(s, "CONSOLE_25", n);
+ break;
+ case SUBSYS_CONSOLE_26:
+ strncpy(s, "CONSOLE_26", n);
+ break;
+ case SUBSYS_CONSOLE_27:
+ strncpy(s, "CONSOLE_27", n);
+ break;
+ case SUBSYS_CONSOLE_28:
+ strncpy(s, "CONSOLE_28", n);
+ break;
+ case SUBSYS_CONSOLE_29:
+ strncpy(s, "CONSOLE_29", n);
+ break;
+ case SUBSYS_CONSOLE_30:
+ strncpy(s, "CONSOLE_30", n);
+ break;
+ case SUBSYS_CONSOLE_31:
+ strncpy(s, "CONSOLE_31", n);
+ break;
+ case SUBSYS_CONSOLE_32:
+ strncpy(s, "CONSOLE_32", n);
+ break;
+ case SUBSYS_CONSOLE_33:
+ strncpy(s, "CONSOLE_33", n);
+ break;
+ case SUBSYS_CONSOLE_34:
+ strncpy(s, "CONSOLE_34", n);
+ break;
+ case SUBSYS_CONSOLE_35:
+ strncpy(s, "CONSOLE_35", n);
+ break;
+ case SUBSYS_CONSOLE_36:
+ strncpy(s, "CONSOLE_36", n);
+ break;
+ case SUBSYS_CONSOLE_37:
+ strncpy(s, "CONSOLE_37", n);
+ break;
+ case SUBSYS_CONSOLE_38:
+ strncpy(s, "CONSOLE_38", n);
+ break;
+ case SUBSYS_CONSOLE_39:
+ strncpy(s, "CONSOLE_39", n);
+ break;
+ case SUBSYS_CONSOLE_40:
+ strncpy(s, "CONSOLE_40", n);
+ break;
+ case SUBSYS_CONSOLE_41:
+ strncpy(s, "CONSOLE_41", n);
+ break;
+ case SUBSYS_CONSOLE_42:
+ strncpy(s, "CONSOLE_42", n);
+ break;
+ case SUBSYS_CONSOLE_43:
+ strncpy(s, "CONSOLE_43", n);
+ break;
+ case SUBSYS_CONSOLE_44:
+ strncpy(s, "CONSOLE_44", n);
+ break;
+ case SUBSYS_CONSOLE_45:
+ strncpy(s, "CONSOLE_45", n);
+ break;
+ case SUBSYS_CONSOLE_46:
+ strncpy(s, "CONSOLE_46", n);
+ break;
+ default:
+ subsys_unknown_to_s(subsys, s, n);
+ break;
+ }
+ s[n - 1] = '\0';
+ return s;
+}
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/iovmcall_gnuc.h b/drivers/staging/unisys/common-spar/include/iovmcall_gnuc.h
new file mode 100644
index 000000000000..7304e9a0648c
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/iovmcall_gnuc.h
@@ -0,0 +1,53 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* Linux GCC Version (32-bit and 64-bit) */
+static inline unsigned long
+__unisys_vmcall_gnuc(unsigned long tuple, unsigned long reg_ebx,
+ unsigned long reg_ecx)
+{
+ unsigned long result = 0;
+
+ unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
+ cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
+ if (cpuid_ecx & 0x80000000) {
+ __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
+ "a"(tuple), "b"(reg_ebx), "c"(reg_ecx)
+ );
+ } else {
+ result = -1;
+ }
+ return result;
+}
+
+static inline unsigned long
+__unisys_extended_vmcall_gnuc(unsigned long long tuple,
+ unsigned long long reg_ebx,
+ unsigned long long reg_ecx,
+ unsigned long long reg_edx)
+{
+ unsigned long result = 0;
+
+ unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
+ cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
+ if (cpuid_ecx & 0x80000000) {
+ __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
+ "a"(tuple), "b"(reg_ebx), "c"(reg_ecx),
+ "d"(reg_edx));
+ } else {
+ result = -1;
+ }
+ return result;
+ }
diff --git a/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h b/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h
new file mode 100644
index 000000000000..ae708faaa94d
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h
@@ -0,0 +1,209 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VBUSDEVICEINFO_H__
+#define __VBUSDEVICEINFO_H__
+
+#include "commontypes.h"
+
+#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
+
+/* An array of this struct is present in the channel area for each vbus.
+ * (See vbuschannel.h.)
+ * It is filled in by the client side to provide info about the device
+ * and driver from the client's perspective.
+ */
+typedef struct _ULTRA_VBUS_DEVICEINFO {
+ U8 devType[16]; /* short string identifying the device type */
+ U8 drvName[16]; /* driver .sys file name */
+ U8 infoStrings[96]; /* sequence of tab-delimited id strings: */
+ /* <DRIVER_REV> <DRIVER_VERTAG> <DRIVER_COMPILETIME> */
+ U8 reserved[128]; /* pad size to 256 bytes */
+} ULTRA_VBUS_DEVICEINFO;
+
+#pragma pack(pop)
+
+/* Reads chars from the buffer at <src> for <srcmax> bytes, and writes to
+ * the buffer at <p>, which is <remain> bytes long, ensuring never to
+ * overflow the buffer at <p>, using the following rules:
+ * - printable characters are simply copied from the buffer at <src> to the
+ * buffer at <p>
+ * - intervening streaks of non-printable characters in the buffer at <src>
+ * are replaced with a single space in the buffer at <p>
+ * Note that we pay no attention to '\0'-termination.
+ * Returns the number of bytes written to <p>.
+ *
+ * Pass <p> == NULL and <remain> == 0 for this special behavior. In this
+ * case, we simply return the number of bytes that WOULD HAVE been written
+ * to a buffer at <p>, had it been infinitely big.
+ */
+static inline int
+VBUSCHANNEL_sanitize_buffer(char *p, int remain, char __iomem *src, int srcmax)
+{
+ int chars = 0;
+ int nonprintable_streak = 0;
+ while (srcmax > 0) {
+ if ((readb(src) >= ' ') && (readb(src) < 0x7f)) {
+ if (nonprintable_streak) {
+ if (remain > 0) {
+ *p = ' ';
+ p++;
+ remain--;
+ chars++;
+ } else if (p == NULL)
+ chars++;
+ nonprintable_streak = 0;
+ }
+ if (remain > 0) {
+ *p = readb(src);
+ p++;
+ remain--;
+ chars++;
+ } else if (p == NULL)
+ chars++;
+ } else
+ nonprintable_streak = 1;
+ src++;
+ srcmax--;
+ }
+ return chars;
+}
+
+#define VBUSCHANNEL_ADDACHAR(ch, p, remain, chars) \
+ do { \
+ if (remain <= 0) \
+ break; \
+ *p = ch; \
+ p++; chars++; remain--; \
+ } while (0)
+
+/* Converts the non-negative value at <num> to an ascii decimal string
+ * at <p>, writing at most <remain> bytes. Note there is NO '\0' termination
+ * written to <p>.
+ *
+ * Returns the number of bytes written to <p>.
+ *
+ * Note that we create this function because we need to do this operation in
+ * an environment-independent way (since we are in a common header file).
+ */
+static inline int
+VBUSCHANNEL_itoa(char *p, int remain, int num)
+{
+ int digits = 0;
+ char s[32];
+ int i;
+
+ if (num == 0) {
+ /* '0' is a special case */
+ if (remain <= 0)
+ return 0;
+ *p = '0';
+ return 1;
+ }
+ /* form a backwards decimal ascii string in <s> */
+ while (num > 0) {
+ if (digits >= (int) sizeof(s))
+ return 0;
+ s[digits++] = (num % 10) + '0';
+ num = num / 10;
+ }
+ if (remain < digits) {
+ /* not enough room left at <p> to hold number, so fill with
+ * '?' */
+ for (i = 0; i < remain; i++, p++)
+ *p = '?';
+ return remain;
+ }
+ /* plug in the decimal ascii string representing the number, by */
+ /* reversing the string we just built in <s> */
+ i = digits;
+ while (i > 0) {
+ i--;
+ *p = s[i];
+ p++;
+ }
+ return digits;
+}
+
+/* Reads <devInfo>, and converts its contents to a printable string at <p>,
+ * writing at most <remain> bytes. Note there is NO '\0' termination
+ * written to <p>.
+ *
+ * Pass <devix> >= 0 if you want a device index presented.
+ *
+ * Returns the number of bytes written to <p>.
+ */
+static inline int
+VBUSCHANNEL_devInfoToStringBuffer(ULTRA_VBUS_DEVICEINFO __iomem *devInfo,
+ char *p, int remain, int devix)
+{
+ char __iomem *psrc;
+ int nsrc, x, i, pad;
+ int chars = 0;
+
+ psrc = &(devInfo->devType[0]);
+ nsrc = sizeof(devInfo->devType);
+ if (VBUSCHANNEL_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0)
+ return 0;
+
+ /* emit device index */
+ if (devix >= 0) {
+ VBUSCHANNEL_ADDACHAR('[', p, remain, chars);
+ x = VBUSCHANNEL_itoa(p, remain, devix);
+ p += x;
+ remain -= x;
+ chars += x;
+ VBUSCHANNEL_ADDACHAR(']', p, remain, chars);
+ } else {
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ }
+
+ /* emit device type */
+ x = VBUSCHANNEL_sanitize_buffer(p, remain, psrc, nsrc);
+ p += x;
+ remain -= x;
+ chars += x;
+ pad = 15 - x; /* pad device type to be exactly 15 chars */
+ for (i = 0; i < pad; i++)
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+
+ /* emit driver name */
+ psrc = &(devInfo->drvName[0]);
+ nsrc = sizeof(devInfo->drvName);
+ x = VBUSCHANNEL_sanitize_buffer(p, remain, psrc, nsrc);
+ p += x;
+ remain -= x;
+ chars += x;
+ pad = 15 - x; /* pad driver name to be exactly 15 chars */
+ for (i = 0; i < pad; i++)
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+
+ /* emit strings */
+ psrc = &(devInfo->infoStrings[0]);
+ nsrc = sizeof(devInfo->infoStrings);
+ x = VBUSCHANNEL_sanitize_buffer(p, remain, psrc, nsrc);
+ p += x;
+ remain -= x;
+ chars += x;
+ VBUSCHANNEL_ADDACHAR('\n', p, remain, chars);
+
+ return chars;
+}
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/version.h b/drivers/staging/unisys/common-spar/include/version.h
new file mode 100644
index 000000000000..00b0ebb09eae
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/version.h
@@ -0,0 +1,46 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* version.h */
+
+/* Common version/release info needed by all components goes here.
+ * (This file must compile cleanly in all environments.)
+ * Ultimately, this will be combined with defines generated dynamically as
+ * part of the sysgen, and some of the defines below may in fact end up
+ * being replaced with dynamically generated ones.
+ */
+#ifndef __VERSION_H__
+#define __VERSION_H__
+
+#define SPARVER1 "1"
+#define SPARVER2 "0"
+#define SPARVER3 "0"
+#define SPARVER4 "0"
+
+#define VERSION SPARVER1 "." SPARVER2 "." SPARVER3 "." SPARVER4
+#define VERSIONDATE __DATE__
+
+/* Here are various version forms needed in Windows environments.
+ */
+#define VISOR_PRODUCTVERSION SPARVERCOMMA
+#define VISOR_PRODUCTVERSION_STR SPARVER1 "." SPARVER2 "." SPARVER3 "." \
+ SPARVER4
+#define VISOR_OBJECTVERSION_STR SPARVER1 "," SPARVER2 "," SPARVER3 "," \
+ SPARVER4
+
+#define COPYRIGHT "Unisys Corporation"
+#define COPYRIGHTDATE "2010 - 2013"
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/vmcallinterface.h b/drivers/staging/unisys/common-spar/include/vmcallinterface.h
new file mode 100644
index 000000000000..14c404367fa7
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/vmcallinterface.h
@@ -0,0 +1,167 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __IOMONINTF_H__
+#define __IOMONINTF_H__
+
+/*
+* This file contains all structures needed to support the VMCALLs for IO
+* Virtualization. The VMCALLs are provided by Monitor and used by IO code
+* running on IO Partitions.
+*/
+
+#ifdef __GNUC__
+#include "iovmcall_gnuc.h"
+#endif /* */
+#include "diagchannel.h"
+
+#ifdef VMCALL_IO_CONTROLVM_ADDR
+#undef VMCALL_IO_CONTROLVM_ADDR
+#endif /* */
+
+/* define subsystem number for AppOS, used in uislib driver */
+#define MDS_APPOS 0x4000000000000000L /* subsystem = 62 - AppOS */
+typedef enum { /* VMCALL identification tuples */
+ /* Note: when a new VMCALL is added:
+ * - the 1st 2 hex digits correspond to one of the
+ * VMCALL_MONITOR_INTERFACE types and
+ * - the next 2 hex digits are the nth relative instance of within a
+ * type
+ * E.G. for VMCALL_VIRTPART_RECYCLE_PART,
+ * - the 0x02 identifies it as a VMCALL_VIRTPART type and
+ * - the 0x01 identifies it as the 1st instance of a VMCALL_VIRTPART
+ * type of VMCALL
+ */
+
+ VMCALL_IO_CONTROLVM_ADDR = 0x0501, /* used by all Guests, not just
+ * IO */
+ VMCALL_IO_DIAG_ADDR = 0x0508,
+ VMCALL_IO_VISORSERIAL_ADDR = 0x0509,
+ VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET = 0x0708, /* Allow caller to
+ * query virtual time
+ * offset */
+ VMCALL_CHANNEL_VERSION_MISMATCH = 0x0709,
+ VMCALL_POST_CODE_LOGEVENT = 0x070B, /* LOGEVENT Post Code (RDX) with
+ * specified subsystem mask (RCX
+ * - monitor_subsystems.h) and
+ * severity (RDX) */
+ VMCALL_GENERIC_SURRENDER_QUANTUM_FOREVER = 0x0802, /* Yield the
+ * remainder & all
+ * future quantums of
+ * the caller */
+ VMCALL_MEASUREMENT_DO_NOTHING = 0x0901,
+ VMCALL_UPDATE_PHYSICAL_TIME = 0x0a02 /* Allow
+ * ULTRA_SERVICE_CAPABILITY_TIME
+ * capable guest to make
+ * VMCALL */
+} VMCALL_MONITOR_INTERFACE_METHOD_TUPLE;
+
+#define VMCALL_SUCCESS 0
+#define VMCALL_SUCCESSFUL(result) (result == 0)
+
+#ifdef __GNUC__
+#define unisys_vmcall(tuple, reg_ebx, reg_ecx) \
+ __unisys_vmcall_gnuc(tuple, reg_ebx, reg_ecx)
+#define unisys_extended_vmcall(tuple, reg_ebx, reg_ecx, reg_edx) \
+ __unisys_extended_vmcall_gnuc(tuple, reg_ebx, reg_ecx, reg_edx)
+#define ISSUE_IO_VMCALL(InterfaceMethod, param, result) \
+ (result = unisys_vmcall(InterfaceMethod, (param) & 0xFFFFFFFF, \
+ (param) >> 32))
+#define ISSUE_IO_EXTENDED_VMCALL(InterfaceMethod, param1, param2, \
+ param3, result) \
+ (result = unisys_extended_vmcall(InterfaceMethod, param1, \
+ param2, param3))
+
+ /* The following uses VMCALL_POST_CODE_LOGEVENT interface but is currently
+ * not used much */
+#define ISSUE_IO_VMCALL_POSTCODE_SEVERITY(postcode, severity) \
+do { \
+ U32 _tempresult = VMCALL_SUCCESS; \
+ ISSUE_IO_EXTENDED_VMCALL(VMCALL_POST_CODE_LOGEVENT, severity, \
+ MDS_APPOS, postcode, _tempresult); \
+} while (0)
+#endif
+
+/* Structures for IO VMCALLs */
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+#pragma pack(push, 1)
+struct phys_info {
+ U64 pi_pfn;
+ U16 pi_off;
+ U16 pi_len;
+};
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+typedef struct phys_info IO_DATA_STRUCTURE;
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+#pragma pack(push, 1)
+/* Parameters to VMCALL_IO_CONTROLVM_ADDR interface */
+typedef struct _VMCALL_IO_CONTROLVM_ADDR_PARAMS {
+ /* The Guest-relative physical address of the ControlVm channel.
+ * This VMCall fills this in with the appropriate address. */
+ U64 ChannelAddress; /* contents provided by this VMCALL (OUT) */
+ /* the size of the ControlVm channel in bytes This VMCall fills this
+ * in with the appropriate address. */
+ U32 ChannelBytes; /* contents provided by this VMCALL (OUT) */
+ U8 Unused[4]; /* Unused Bytes in the 64-Bit Aligned Struct */
+} VMCALL_IO_CONTROLVM_ADDR_PARAMS;
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+#pragma pack(push, 1)
+/* Parameters to VMCALL_IO_DIAG_ADDR interface */
+typedef struct _VMCALL_IO_DIAG_ADDR_PARAMS {
+ /* The Guest-relative physical address of the diagnostic channel.
+ * This VMCall fills this in with the appropriate address. */
+ U64 ChannelAddress; /* contents provided by this VMCALL (OUT) */
+} VMCALL_IO_DIAG_ADDR_PARAMS;
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+#pragma pack(push, 1)
+/* Parameters to VMCALL_IO_VISORSERIAL_ADDR interface */
+typedef struct _VMCALL_IO_VISORSERIAL_ADDR_PARAMS {
+ /* The Guest-relative physical address of the serial console
+ * channel. This VMCall fills this in with the appropriate
+ * address. */
+ U64 ChannelAddress; /* contents provided by this VMCALL (OUT) */
+} VMCALL_IO_VISORSERIAL_ADDR_PARAMS;
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+
+/* Parameters to VMCALL_CHANNEL_MISMATCH interface */
+typedef struct _VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS {
+ U8 ChannelName[32]; /* Null terminated string giving name of channel
+ * (IN) */
+ U8 ItemName[32]; /* Null terminated string giving name of
+ * mismatched item (IN) */
+ U32 SourceLineNumber; /* line# where invoked. (IN) */
+ U8 SourceFileName[36]; /* source code where invoked - Null terminated
+ * string (IN) */
+} VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS;
+
+#endif /* __IOMONINTF_H__ */
diff --git a/drivers/staging/unisys/include/commontypes.h b/drivers/staging/unisys/include/commontypes.h
new file mode 100644
index 000000000000..ef12af4a72db
--- /dev/null
+++ b/drivers/staging/unisys/include/commontypes.h
@@ -0,0 +1,170 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef _COMMONTYPES_H_
+#define _COMMONTYPES_H_
+
+/* define the following to prevent include nesting in kernel header files of
+ * similar abreviated content */
+#define _SUPERVISOR_COMMONTYPES_H_
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#else
+#include <stdint.h>
+#include <syslog.h>
+#endif
+
+#define U8 uint8_t
+#define U16 uint16_t
+#define U32 uint32_t
+#define U64 uint64_t
+#define S8 int8_t
+#define S16 int16_t
+#define S32 int32_t
+#define S64 int64_t
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_X86_32
+#define UINTN U32
+#else
+#define UINTN U64
+#endif
+
+#else
+
+#include <stdint.h>
+#if __WORDSIZE == 32
+#define UINTN U32
+#elif __WORDSIZE == 64
+#define UINTN U64
+#else
+#error Unsupported __WORDSIZE
+#endif
+
+#endif
+
+typedef struct {
+ U32 data1;
+ U16 data2;
+ U16 data3;
+ U8 data4[8];
+} __attribute__ ((__packed__)) GUID;
+
+#ifndef GUID0
+#define GUID0 {0, 0, 0, {0, 0, 0, 0, 0, 0, 0, 0} }
+#endif
+typedef U64 GUEST_PHYSICAL_ADDRESS;
+
+#define MEMSET(ptr, val, len) memset(ptr, val, len)
+#define MEMCMP(m1, m2, len) memcmp(m1, m2, len)
+#define MEMCMP_IO(m1, m2, len) memcmp((void __force *)m1, m2, len)
+#define STRLEN(s) ((UINTN)strlen((const char *)s))
+#define STRCPY(d, s) (strcpy((char *)d, (const char *)s))
+
+#define INLINE inline
+#define OFFSETOF offsetof
+
+#ifdef __KERNEL__
+#define MEMORYBARRIER mb()
+#define MEMCPY(dest, src, len) memcpy(dest, src, len)
+#define MEMCPY_TOIO(dest, src, len) memcpy_toio(dest, src, len)
+#define MEMCPY_FROMIO(dest, src, len) memcpy_fromio(dest, src, len)
+
+#define CHANNEL_GUID_MISMATCH(chType, chName, field, expected, actual, fil, \
+ lin, logCtx) \
+ do { \
+ char s1[50], s2[50], s3[50]; \
+ pr_err("Channel mismatch on channel=%s(%s) field=%s expected=%s actual=%s @%s:%d\n", \
+ chName, GUID_format2(&chType, s1), field, \
+ GUID_format2(&expected, s2), GUID_format2(&actual, s3), \
+ fil, lin); \
+ } while (0)
+#define CHANNEL_U32_MISMATCH(chType, chName, field, expected, actual, fil, \
+ lin, logCtx) \
+ do { \
+ char s1[50]; \
+ pr_err("Channel mismatch on channel=%s(%s) field=%s expected=0x%-8.8lx actual=0x%-8.8lx @%s:%d\n", \
+ chName, GUID_format2(&chType, s1), field, \
+ (unsigned long)expected, (unsigned long)actual, \
+ fil, lin); \
+ } while (0)
+
+#define CHANNEL_U64_MISMATCH(chType, chName, field, expected, actual, fil, \
+ lin, logCtx) \
+ do { \
+ char s1[50]; \
+ pr_err("Channel mismatch on channel=%s(%s) field=%s expected=0x%-8.8Lx actual=0x%-8.8Lx @%s:%d\n", \
+ chName, GUID_format2(&chType, s1), field, \
+ (unsigned long long)expected, \
+ (unsigned long long)actual, \
+ fil, lin); \
+ } while (0)
+
+#define UltraLogEvent(logCtx, EventId, Severity, SubsystemMask, pFunctionName, \
+ LineNumber, Str, args...) \
+ pr_info(Str, ## args)
+
+#else
+#define MEMCPY(dest, src, len) memcpy(dest, src, len)
+
+#define MEMORYBARRIER mb()
+
+#define CHANNEL_GUID_MISMATCH(chType, chName, field, expected, actual, fil, \
+ lin, logCtx) \
+ do { \
+ char s1[50], s2[50], s3[50]; \
+ syslog(LOG_USER | LOG_ERR, \
+ "Channel mismatch on channel=%s(%s) field=%s expected=%s actual=%s @%s:%d", \
+ chName, GUID_format2(&chType, s1), field, \
+ GUID_format2(&expected, s2), GUID_format2(&actual, s3), \
+ fil, lin); \
+ } while (0)
+
+#define CHANNEL_U32_MISMATCH(chType, chName, field, expected, actual, fil, \
+ lin, logCtx) \
+ do { \
+ char s1[50]; \
+ syslog(LOG_USER | LOG_ERR, \
+ "Channel mismatch on channel=%s(%s) field=%s expected=0x%-8.8lx actual=0x%-8.8lx @%s:%d", \
+ chName, GUID_format2(&chType, s1), field, \
+ (unsigned long)expected, (unsigned long)actual, \
+ fil, lin); \
+ } while (0)
+
+#define CHANNEL_U64_MISMATCH(chType, chName, field, expected, actual, fil, \
+ lin, logCtx) \
+ do { \
+ char s1[50]; \
+ syslog(LOG_USER | LOG_ERR, \
+ "Channel mismatch on channel=%s(%s) field=%s expected=0x%-8.8Lx actual=0x%-8.8Lx @%s:%d", \
+ chName, GUID_format2(&chType, s1), field, \
+ (unsigned long long)expected, \
+ (unsigned long long)actual, \
+ fil, lin); \
+ } while (0)
+
+#define UltraLogEvent(logCtx, EventId, Severity, SubsystemMask, pFunctionName, \
+ LineNumber, Str, args...) \
+ syslog(LOG_USER | LOG_INFO, Str, ## args)
+#endif
+
+#define VolatileBarrier() MEMORYBARRIER
+
+#endif
+#include "guidutils.h"
diff --git a/drivers/staging/unisys/include/guestlinuxdebug.h b/drivers/staging/unisys/include/guestlinuxdebug.h
new file mode 100644
index 000000000000..c3de8496e5d6
--- /dev/null
+++ b/drivers/staging/unisys/include/guestlinuxdebug.h
@@ -0,0 +1,182 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __GUESTLINUXDEBUG_H__
+#define __GUESTLINUXDEBUG_H__
+
+/*
+* This file contains supporting interface for "vmcallinterface.h", particuarly
+* regarding adding additional structure and functionality to linux
+* ISSUE_IO_VMCALL_POSTCODE_SEVERITY */
+
+
+/******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/
+#include "vmcallinterface.h"
+typedef enum { /* POSTCODE driver identifier tuples */
+ /* visorchipset driver files */
+ VISOR_CHIPSET_PC = 0xA0,
+ VISOR_CHIPSET_PC_controlvm_c = 0xA1,
+ VISOR_CHIPSET_PC_controlvm_cm2 = 0xA2,
+ VISOR_CHIPSET_PC_controlvm_direct_c = 0xA3,
+ VISOR_CHIPSET_PC_file_c = 0xA4,
+ VISOR_CHIPSET_PC_parser_c = 0xA5,
+ VISOR_CHIPSET_PC_testing_c = 0xA6,
+ VISOR_CHIPSET_PC_visorchipset_main_c = 0xA7,
+ VISOR_CHIPSET_PC_visorswitchbus_c = 0xA8,
+ /* visorbus driver files */
+ VISOR_BUS_PC = 0xB0,
+ VISOR_BUS_PC_businst_attr_c = 0xB1,
+ VISOR_BUS_PC_channel_attr_c = 0xB2,
+ VISOR_BUS_PC_devmajorminor_attr_c = 0xB3,
+ VISOR_BUS_PC_visorbus_main_c = 0xB4,
+ /* visorclientbus driver files */
+ VISOR_CLIENT_BUS_PC = 0xC0,
+ VISOR_CLIENT_BUS_PC_visorclientbus_main_c = 0xC1,
+ /* virt hba driver files */
+ VIRT_HBA_PC = 0xC2,
+ VIRT_HBA_PC_virthba_c = 0xC3,
+ /* virtpci driver files */
+ VIRT_PCI_PC = 0xC4,
+ VIRT_PCI_PC_virtpci_c = 0xC5,
+ /* virtnic driver files */
+ VIRT_NIC_PC = 0xC6,
+ VIRT_NIC_P_virtnic_c = 0xC7,
+ /* uislib driver files */
+ UISLIB_PC = 0xD0,
+ UISLIB_PC_uislib_c = 0xD1,
+ UISLIB_PC_uisqueue_c = 0xD2,
+ UISLIB_PC_uisthread_c = 0xD3,
+ UISLIB_PC_uisutils_c = 0xD4,
+} DRIVER_PC;
+
+typedef enum { /* POSTCODE event identifier tuples */
+ ATTACH_PORT_ENTRY_PC = 0x001,
+ ATTACH_PORT_FAILURE_PC = 0x002,
+ ATTACH_PORT_SUCCESS_PC = 0x003,
+ BUS_FAILURE_PC = 0x004,
+ BUS_CREATE_ENTRY_PC = 0x005,
+ BUS_CREATE_FAILURE_PC = 0x006,
+ BUS_CREATE_EXIT_PC = 0x007,
+ BUS_CONFIGURE_ENTRY_PC = 0x008,
+ BUS_CONFIGURE_FAILURE_PC = 0x009,
+ BUS_CONFIGURE_EXIT_PC = 0x00A,
+ CHIPSET_INIT_ENTRY_PC = 0x00B,
+ CHIPSET_INIT_SUCCESS_PC = 0x00C,
+ CHIPSET_INIT_FAILURE_PC = 0x00D,
+ CHIPSET_INIT_EXIT_PC = 0x00E,
+ CREATE_WORKQUEUE_PC = 0x00F,
+ CREATE_WORKQUEUE_FAILED_PC = 0x0A0,
+ CONTROLVM_INIT_FAILURE_PC = 0x0A1,
+ DEVICE_CREATE_ENTRY_PC = 0x0A2,
+ DEVICE_CREATE_FAILURE_PC = 0x0A3,
+ DEVICE_CREATE_SUCCESS_PC = 0x0A4,
+ DEVICE_CREATE_EXIT_PC = 0x0A5,
+ DEVICE_ADD_PC = 0x0A6,
+ DEVICE_REGISTER_FAILURE_PC = 0x0A7,
+ DEVICE_CHANGESTATE_ENTRY_PC = 0x0A8,
+ DEVICE_CHANGESTATE_FAILURE_PC = 0x0A9,
+ DEVICE_CHANGESTATE_EXIT_PC = 0x0AA,
+ DRIVER_ENTRY_PC = 0x0AB,
+ DRIVER_EXIT_PC = 0x0AC,
+ MALLOC_FAILURE_PC = 0x0AD,
+ QUEUE_DELAYED_WORK_PC = 0x0AE,
+ UISLIB_THREAD_FAILURE_PC = 0x0B7,
+ VBUS_CHANNEL_ENTRY_PC = 0x0B8,
+ VBUS_CHANNEL_FAILURE_PC = 0x0B9,
+ VBUS_CHANNEL_EXIT_PC = 0x0BA,
+ VHBA_CREATE_ENTRY_PC = 0x0BB,
+ VHBA_CREATE_FAILURE_PC = 0x0BC,
+ VHBA_CREATE_EXIT_PC = 0x0BD,
+ VHBA_CREATE_SUCCESS_PC = 0x0BE,
+ VHBA_COMMAND_HANDLER_PC = 0x0BF,
+ VHBA_PROBE_ENTRY_PC = 0x0C0,
+ VHBA_PROBE_FAILURE_PC = 0x0C1,
+ VHBA_PROBE_EXIT_PC = 0x0C2,
+ VNIC_CREATE_ENTRY_PC = 0x0C3,
+ VNIC_CREATE_FAILURE_PC = 0x0C4,
+ VNIC_CREATE_SUCCESS_PC = 0x0C5,
+ VNIC_PROBE_ENTRY_PC = 0x0C6,
+ VNIC_PROBE_FAILURE_PC = 0x0C7,
+ VNIC_PROBE_EXIT_PC = 0x0C8,
+ VPCI_CREATE_ENTRY_PC = 0x0C9,
+ VPCI_CREATE_FAILURE_PC = 0x0CA,
+ VPCI_CREATE_EXIT_PC = 0x0CB,
+ VPCI_PROBE_ENTRY_PC = 0x0CC,
+ VPCI_PROBE_FAILURE_PC = 0x0CD,
+ VPCI_PROBE_EXIT_PC = 0x0CE,
+ CRASH_DEV_ENTRY_PC = 0x0CF,
+ CRASH_DEV_EXIT_PC = 0x0D0,
+ CRASH_DEV_HADDR_NULL = 0x0D1,
+ CRASH_DEV_CONTROLVM_NULL = 0x0D2,
+ CRASH_DEV_RD_BUS_FAIULRE_PC = 0x0D3,
+ CRASH_DEV_RD_DEV_FAIULRE_PC = 0x0D4,
+ CRASH_DEV_BUS_NULL_FAILURE_PC = 0x0D5,
+ CRASH_DEV_DEV_NULL_FAILURE_PC = 0x0D6,
+ CRASH_DEV_CTRL_RD_FAILURE_PC = 0x0D7,
+ CRASH_DEV_COUNT_FAILURE_PC = 0x0D8,
+ SAVE_MSG_BUS_FAILURE_PC = 0x0D9,
+ SAVE_MSG_DEV_FAILURE_PC = 0x0DA,
+ CALLHOME_INIT_FAILURE_PC = 0x0DB
+} EVENT_PC;
+
+#ifdef __GNUC__
+
+#define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR
+#define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING
+#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT /* TODO-> Info currently
+ * doesnt show, so we
+ * set info=warning */
+/* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR);
+ * Please also note that the resulting postcode is in hex, so if you are
+ * searching for the __LINE__ number, convert it first to decimal. The line
+ * number combined with driver and type of call, will allow you to track down
+ * exactly what line an error occured on, or where the last driver
+ * entered/exited from.
+ */
+
+/* BASE FUNCTIONS */
+#define POSTCODE_LINUX_A(DRIVER_PC, EVENT_PC, pc32bit, severity) \
+do { \
+ unsigned long long post_code_temp; \
+ post_code_temp = (((U64)DRIVER_PC) << 56) | (((U64)EVENT_PC) << 44) | \
+ ((((U64)__LINE__) & 0xFFF) << 32) | \
+ (((U64)pc32bit) & 0xFFFFFFFF); \
+ ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
+} while (0)
+
+#define POSTCODE_LINUX_B(DRIVER_PC, EVENT_PC, pc16bit1, pc16bit2, severity) \
+do { \
+ unsigned long long post_code_temp; \
+ post_code_temp = (((U64)DRIVER_PC) << 56) | (((U64)EVENT_PC) << 44) | \
+ ((((U64)__LINE__) & 0xFFF) << 32) | \
+ ((((U64)pc16bit1) & 0xFFFF) << 16) | \
+ (((U64)pc16bit2) & 0xFFFF); \
+ ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
+} while (0)
+
+/* MOST COMMON */
+#define POSTCODE_LINUX_2(EVENT_PC, severity) \
+ POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, 0x0000, severity);
+
+#define POSTCODE_LINUX_3(EVENT_PC, pc32bit, severity) \
+ POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, pc32bit, severity);
+
+
+#define POSTCODE_LINUX_4(EVENT_PC, pc16bit1, pc16bit2, severity) \
+ POSTCODE_LINUX_B(CURRENT_FILE_PC, EVENT_PC, pc16bit1, \
+ pc16bit2, severity);
+
+#endif
+#endif
diff --git a/drivers/staging/unisys/include/guidutils.h b/drivers/staging/unisys/include/guidutils.h
new file mode 100644
index 000000000000..75caf929cd6d
--- /dev/null
+++ b/drivers/staging/unisys/include/guidutils.h
@@ -0,0 +1,203 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* guidutils.h
+ *
+ * These are GUID manipulation inlines that can be used from either
+ * kernel-mode or user-mode.
+ *
+ */
+#ifndef __GUIDUTILS_H__
+#define __GUIDUTILS_H__
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#define GUID_STRTOUL kstrtoul
+#else
+#include <stdio.h>
+#include <ctype.h>
+#include <string.h>
+#include <stdlib.h>
+
+#define GUID_STRTOUL strtoul
+#endif
+
+static inline char *
+GUID_format1(const GUID *guid, char *s)
+{
+ sprintf(s, "{%-8.8lx-%-4.4x-%-4.4x-%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x}",
+ (ulong) guid->data1,
+ guid->data2,
+ guid->data3,
+ guid->data4[0],
+ guid->data4[1],
+ guid->data4[2],
+ guid->data4[3],
+ guid->data4[4], guid->data4[5], guid->data4[6], guid->data4[7]);
+ return s;
+}
+
+/** Format a GUID in Microsoft's 'what in the world were they thinking'
+ * format.
+ */
+static inline char *
+GUID_format2(const GUID *guid, char *s)
+{
+ sprintf(s, "{%-8.8lx-%-4.4x-%-4.4x-%-2.2x%-2.2x-%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x}",
+ (ulong) guid->data1,
+ guid->data2,
+ guid->data3,
+ guid->data4[0],
+ guid->data4[1],
+ guid->data4[2],
+ guid->data4[3],
+ guid->data4[4], guid->data4[5], guid->data4[6], guid->data4[7]);
+ return s;
+}
+
+/**
+ * Like GUID_format2 but without the curly braces and the
+ * hex digits in upper case
+ */
+static inline char *
+GUID_format3(const GUID *guid, char *s)
+{
+ sprintf(s, "%-8.8lX-%-4.4X-%-4.4X-%-2.2X%-2.2X-%-2.2X%-2.2X%-2.2X%-2.2X%-2.2X%-2.2X",
+ (ulong) guid->data1,
+ guid->data2,
+ guid->data3,
+ guid->data4[0],
+ guid->data4[1],
+ guid->data4[2],
+ guid->data4[3],
+ guid->data4[4], guid->data4[5], guid->data4[6], guid->data4[7]);
+ return s;
+}
+
+/** Parse a guid string in any of these forms:
+ * {11111111-2222-3333-4455-66778899aabb}
+ * {11111111-2222-3333-445566778899aabb}
+ * 11111111-2222-3333-4455-66778899aabb
+ * 11111111-2222-3333-445566778899aabb
+ */
+static inline GUID
+GUID_scan(U8 *p)
+{
+ GUID guid = GUID0;
+ U8 x[33];
+ int count = 0;
+ int c, i = 0;
+ U8 cdata1[9];
+ U8 cdata2[5];
+ U8 cdata3[5];
+ U8 cdata4[3];
+ int dashcount = 0;
+ int brace = 0;
+ unsigned long uldata;
+
+ if (!p)
+ return guid;
+ if (*p == '{') {
+ p++;
+ brace = 1;
+ }
+ while (count < 32) {
+ if (*p == '}')
+ return guid;
+ if (*p == '\0')
+ return guid;
+ c = toupper(*p);
+ p++;
+ if (c == '-') {
+ switch (dashcount) {
+ case 0:
+ if (i != 8)
+ return guid;
+ break;
+ case 1:
+ if (i != 4)
+ return guid;
+ break;
+ case 2:
+ if (i != 4)
+ return guid;
+ break;
+ case 3:
+ if (i != 4)
+ return guid;
+ break;
+ default:
+ return guid;
+ }
+ dashcount++;
+ i = 0;
+ continue;
+ }
+ if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F'))
+ i++;
+ else
+ return guid;
+ x[count++] = c;
+ }
+ x[count] = '\0';
+ if (brace) {
+ if (*p == '}')
+ p++;
+ else
+ return guid;
+ }
+ if (dashcount == 3 || dashcount == 4)
+ ;
+ else
+ return guid;
+ memset(cdata1, 0, sizeof(cdata1));
+ memset(cdata2, 0, sizeof(cdata2));
+ memset(cdata3, 0, sizeof(cdata3));
+ memset(cdata4, 0, sizeof(cdata4));
+ memcpy(cdata1, x + 0, 8);
+ memcpy(cdata2, x + 8, 4);
+ memcpy(cdata3, x + 12, 4);
+
+ if (GUID_STRTOUL((char *) cdata1, 16, &uldata) == 0)
+ guid.data1 = (U32)uldata;
+ if (GUID_STRTOUL((char *) cdata2, 16, &uldata) == 0)
+ guid.data2 = (U16)uldata;
+ if (GUID_STRTOUL((char *) cdata3, 16, &uldata) == 0)
+ guid.data3 = (U16)uldata;
+
+ for (i = 0; i < 8; i++) {
+ memcpy(cdata4, x + 16 + (i * 2), 2);
+ if (GUID_STRTOUL((char *) cdata4, 16, &uldata) == 0)
+ guid.data4[i] = (U8) uldata;
+ }
+
+ return guid;
+}
+
+static inline char *
+GUID_sanitize(char *inputGuidStr, char *outputGuidStr)
+{
+ GUID g;
+ GUID guid0 = GUID0;
+ *outputGuidStr = '\0';
+ g = GUID_scan((U8 *) inputGuidStr);
+ if (memcmp(&g, &guid0, sizeof(GUID)) == 0)
+ return outputGuidStr; /* bad GUID format */
+ return GUID_format1(&g, outputGuidStr);
+}
+
+#endif
diff --git a/drivers/staging/unisys/include/periodic_work.h b/drivers/staging/unisys/include/periodic_work.h
new file mode 100644
index 000000000000..6c7190bdcd66
--- /dev/null
+++ b/drivers/staging/unisys/include/periodic_work.h
@@ -0,0 +1,40 @@
+/* periodic_work.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __PERIODIC_WORK_H__
+#define __PERIODIC_WORK_H__
+
+#include "timskmod.h"
+
+
+
+/* PERIODIC_WORK an opaque structure to users.
+ * Fields are declared only in the implementation .c files.
+ */
+typedef struct PERIODIC_WORK_Tag PERIODIC_WORK;
+
+PERIODIC_WORK *visor_periodic_work_create(ulong jiffy_interval,
+ struct workqueue_struct *workqueue,
+ void (*workfunc)(void *),
+ void *workfuncarg,
+ const char *devnam);
+void visor_periodic_work_destroy(PERIODIC_WORK *periodic_work);
+BOOL visor_periodic_work_nextperiod(PERIODIC_WORK *periodic_work);
+BOOL visor_periodic_work_start(PERIODIC_WORK *periodic_work);
+BOOL visor_periodic_work_stop(PERIODIC_WORK *periodic_work);
+
+#endif
diff --git a/drivers/staging/unisys/include/procobjecttree.h b/drivers/staging/unisys/include/procobjecttree.h
new file mode 100644
index 000000000000..c81d11287e68
--- /dev/null
+++ b/drivers/staging/unisys/include/procobjecttree.h
@@ -0,0 +1,48 @@
+/* procobjecttree.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/** @file *********************************************************************
+ *
+ * This describes the interfaces necessary for creating a tree of types,
+ * objects, and properties in /proc.
+ *
+ ******************************************************************************
+ */
+
+#ifndef __PROCOBJECTTREE_H__
+#define __PROCOBJECTTREE_H__
+
+#include "uniklog.h"
+#include "timskmod.h"
+
+/* These are opaque structures to users.
+ * Fields are declared only in the implementation .c files.
+ */
+typedef struct MYPROCOBJECT_Tag MYPROCOBJECT;
+typedef struct MYPROCTYPE_Tag MYPROCTYPE;
+
+MYPROCOBJECT *visor_proc_CreateObject(MYPROCTYPE *type, const char *name,
+ void *context);
+void visor_proc_DestroyObject(MYPROCOBJECT *obj);
+MYPROCTYPE *visor_proc_CreateType(struct proc_dir_entry *procRootDir,
+ const char **name,
+ const char **propertyNames,
+ void (*show_property)(struct seq_file *,
+ void *, int));
+void visor_proc_DestroyType(MYPROCTYPE *type);
+
+#endif
diff --git a/drivers/staging/unisys/include/sparstop.h b/drivers/staging/unisys/include/sparstop.h
new file mode 100644
index 000000000000..3603ac607643
--- /dev/null
+++ b/drivers/staging/unisys/include/sparstop.h
@@ -0,0 +1,30 @@
+/* sparstop.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __SPARSTOP_H__
+#define __SPARSTOP_H__
+
+#include "timskmod.h"
+#include "version.h"
+#include <linux/ctype.h>
+
+typedef void (*SPARSTOP_COMPLETE_FUNC) (void *context, int status);
+
+int sp_stop(void *context, SPARSTOP_COMPLETE_FUNC get_complete_func);
+void test_remove_stop_device(void);
+
+#endif
diff --git a/drivers/staging/unisys/include/timskmod.h b/drivers/staging/unisys/include/timskmod.h
new file mode 100644
index 000000000000..5fd5ad514464
--- /dev/null
+++ b/drivers/staging/unisys/include/timskmod.h
@@ -0,0 +1,324 @@
+/* timskmod.h
+ *
+ * Copyright � 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __TIMSKMOD_H__
+#define __TIMSKMOD_H__
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+#include <linux/cdev.h>
+#include <linux/types.h>
+#include <asm/irq.h>
+#include <linux/io.h>
+#include <asm/dma.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+/* #define EXPORT_SYMTAB */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/fcntl.h>
+#include <linux/aio.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/seq_file.h>
+#include <linux/mm.h>
+
+/* #define DEBUG */
+#ifndef BOOL
+#define BOOL int
+#endif
+#define FALSE 0
+#define TRUE 1
+#if !defined SUCCESS
+#define SUCCESS 0
+#endif
+#define FAILURE (-1)
+#define DRIVERNAMEMAX 50
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define STRUCTSEQUAL(x, y) (memcmp(&x, &y, sizeof(x)) == 0)
+#ifndef HOSTADDRESS
+#define HOSTADDRESS unsigned long long
+#endif
+
+typedef long VMMIO; /**< Virtual MMIO address (returned from ioremap), which
+ * is a virtual address pointer to a memory-mapped region.
+ * These are declared as "long" instead of u32* to force you to
+ * use readb()/writeb()/memcpy_fromio()/etc to access them.
+ * (On x86 we could probably get away with treating them as
+ * pointers.)
+ */
+typedef long VMMIO8; /**< #VMMIO pointing to 8-bit data */
+typedef long VMMIO16;/**< #VMMIO pointing to 16-bit data */
+typedef long VMMIO32;/**< #VMMIO pointing to 32-bit data */
+
+#define LOCKSEM(sem) down_interruptible(sem)
+#define LOCKSEM_UNINTERRUPTIBLE(sem) down(sem)
+#define UNLOCKSEM(sem) up(sem)
+
+/** lock read/write semaphore for reading.
+ Note that all read/write semaphores are of the "uninterruptible" variety.
+ @param sem (rw_semaphore *) points to semaphore to lock
+ */
+#define LOCKREADSEM(sem) down_read(sem)
+
+/** unlock read/write semaphore for reading.
+ Note that all read/write semaphores are of the "uninterruptible" variety.
+ @param sem (rw_semaphore *) points to semaphore to unlock
+ */
+#define UNLOCKREADSEM(sem) up_read(sem)
+
+/** lock read/write semaphore for writing.
+ Note that all read/write semaphores are of the "uninterruptible" variety.
+ @param sem (rw_semaphore *) points to semaphore to lock
+ */
+#define LOCKWRITESEM(sem) down_write(sem)
+
+/** unlock read/write semaphore for writing.
+ Note that all read/write semaphores are of the "uninterruptible" variety.
+ @param sem (rw_semaphore *) points to semaphore to unlock
+ */
+#define UNLOCKWRITESEM(sem) up_write(sem)
+
+#ifdef ENABLE_RETURN_TRACE
+#define RETTRACE(x) \
+ do { \
+ if (1) { \
+ INFODRV("RET 0x%lx in %s", \
+ (ulong)(x), __func__); \
+ } \
+ } while (0)
+#else
+#define RETTRACE(x)
+#endif
+
+/** Try to evaulate the provided expression, and do a RETINT(x) iff
+ * the expression evaluates to < 0.
+ * @param x the expression to try
+ */
+#define ASSERT(cond) \
+ do { if (!(cond)) \
+ HUHDRV("ASSERT failed - %s", \
+ __stringify(cond)); \
+ } while (0)
+
+#define sizeofmember(TYPE, MEMBER) (sizeof(((TYPE *)0)->MEMBER))
+/** "Covered quotient" function */
+#define COVQ(v, d) (((v) + (d) - 1) / (d))
+#define SWAPPOINTERS(p1, p2) \
+ do { \
+ void *SWAPPOINTERS_TEMP = (void *)p1; \
+ (void *)(p1) = (void *)(p2); \
+ (void *)(p2) = SWAPPOINTERS_TEMP; \
+ } while (0)
+
+/**
+ * @addtogroup driverlogging
+ * @{
+ */
+
+#define PRINTKDRV(fmt, args...) LOGINF(fmt, ## args)
+#define TBDDRV(fmt, args...) LOGERR(fmt, ## args)
+#define HUHDRV(fmt, args...) LOGERR(fmt, ## args)
+#define ERRDRV(fmt, args...) LOGERR(fmt, ## args)
+#define WARNDRV(fmt, args...) LOGWRN(fmt, ## args)
+#define SECUREDRV(fmt, args...) LOGWRN(fmt, ## args)
+#define INFODRV(fmt, args...) LOGINF(fmt, ## args)
+#define DEBUGDRV(fmt, args...) DBGINF(fmt, ## args)
+
+#define PRINTKDEV(devname, fmt, args...) LOGINFDEV(devname, fmt, ## args)
+#define TBDDEV(devname, fmt, args...) LOGERRDEV(devname, fmt, ## args)
+#define HUHDEV(devname, fmt, args...) LOGERRDEV(devname, fmt, ## args)
+#define ERRDEV(devname, fmt, args...) LOGERRDEV(devname, fmt, ## args)
+#define ERRDEVX(devno, fmt, args...) LOGERRDEVX(devno, fmt, ## args)
+#define WARNDEV(devname, fmt, args...) LOGWRNDEV(devname, fmt, ## args)
+#define SECUREDEV(devname, fmt, args...) LOGWRNDEV(devname, fmt, ## args)
+#define INFODEV(devname, fmt, args...) LOGINFDEV(devname, fmt, ## args)
+#define INFODEVX(devno, fmt, args...) LOGINFDEVX(devno, fmt, ## args)
+#define DEBUGDEV(devname, fmt, args...) DBGINFDEV(devname, fmt, ## args)
+
+
+/* @} */
+
+/** Verifies the consistency of your PRIVATEDEVICEDATA structure using
+ * conventional "signature" fields:
+ * <p>
+ * - sig1 should contain the size of the structure
+ * - sig2 should contain a pointer to the beginning of the structure
+ */
+#define DDLOOKSVALID(dd) \
+ ((dd != NULL) && \
+ ((dd)->sig1 == sizeof(PRIVATEDEVICEDATA)) && \
+ ((dd)->sig2 == dd))
+
+/** Verifies the consistency of your PRIVATEFILEDATA structure using
+ * conventional "signature" fields:
+ * <p>
+ * - sig1 should contain the size of the structure
+ * - sig2 should contain a pointer to the beginning of the structure
+ */
+#define FDLOOKSVALID(fd) \
+ ((fd != NULL) && \
+ ((fd)->sig1 == sizeof(PRIVATEFILEDATA)) && \
+ ((fd)->sig2 == fd))
+
+/** Locks dd->lockDev if you havn't already locked it */
+#define LOCKDEV(dd) \
+ { \
+ if (!lockedDev) { \
+ spin_lock(&dd->lockDev); \
+ lockedDev = TRUE; \
+ } \
+ }
+
+/** Unlocks dd->lockDev if you previously locked it */
+#define UNLOCKDEV(dd) \
+ { \
+ if (lockedDev) { \
+ spin_unlock(&dd->lockDev); \
+ lockedDev = FALSE; \
+ } \
+ }
+
+/** Locks dd->lockDevISR if you havn't already locked it */
+#define LOCKDEVISR(dd) \
+ { \
+ if (!lockedDevISR) { \
+ spin_lock_irqsave(&dd->lockDevISR, flags); \
+ lockedDevISR = TRUE; \
+ } \
+ }
+
+/** Unlocks dd->lockDevISR if you previously locked it */
+#define UNLOCKDEVISR(dd) \
+ { \
+ if (lockedDevISR) { \
+ spin_unlock_irqrestore(&dd->lockDevISR, flags); \
+ lockedDevISR = FALSE; \
+ } \
+ }
+
+/** Locks LockGlobalISR if you havn't already locked it */
+#define LOCKGLOBALISR \
+ { \
+ if (!lockedGlobalISR) { \
+ spin_lock_irqsave(&LockGlobalISR, flags); \
+ lockedGlobalISR = TRUE; \
+ } \
+ }
+
+/** Unlocks LockGlobalISR if you previously locked it */
+#define UNLOCKGLOBALISR \
+ { \
+ if (lockedGlobalISR) { \
+ spin_unlock_irqrestore(&LockGlobalISR, flags); \
+ lockedGlobalISR = FALSE; \
+ } \
+ }
+
+/** Locks LockGlobal if you havn't already locked it */
+#define LOCKGLOBAL \
+ { \
+ if (!lockedGlobal) { \
+ spin_lock(&LockGlobal); \
+ lockedGlobal = TRUE; \
+ } \
+ }
+
+/** Unlocks LockGlobal if you previously locked it */
+#define UNLOCKGLOBAL \
+ { \
+ if (lockedGlobal) { \
+ spin_unlock(&LockGlobal); \
+ lockedGlobal = FALSE; \
+ } \
+ }
+
+/** Use this at the beginning of functions where you intend to
+ * use #LOCKDEV/#UNLOCKDEV, #LOCKDEVISR/#UNLOCKDEVISR,
+ * #LOCKGLOBAL/#UNLOCKGLOBAL, #LOCKGLOBALISR/#UNLOCKGLOBALISR.
+ *
+ * Note that __attribute__((unused)) is how you tell GNU C to suppress
+ * any warning messages about the variable being unused.
+ */
+#define LOCKPREAMBLE \
+ ulong flags __attribute__((unused)) = 0; \
+ BOOL lockedDev __attribute__((unused)) = FALSE; \
+ BOOL lockedDevISR __attribute__((unused)) = FALSE; \
+ BOOL lockedGlobal __attribute__((unused)) = FALSE; \
+ BOOL lockedGlobalISR __attribute__((unused)) = FALSE
+
+
+
+/** Sleep for an indicated number of seconds (for use in kernel mode).
+ * @param x the number of seconds to sleep.
+ */
+#define SLEEP(x) \
+ do { current->state = TASK_INTERRUPTIBLE; \
+ schedule_timeout((x)*HZ); \
+ } while (0)
+
+/** Sleep for an indicated number of jiffies (for use in kernel mode).
+ * @param x the number of jiffies to sleep.
+ */
+#define SLEEPJIFFIES(x) \
+ do { current->state = TASK_INTERRUPTIBLE; \
+ schedule_timeout(x); \
+ } while (0)
+
+#ifndef max
+#define max(a, b) (((a) > (b)) ? (a):(b))
+#endif
+
+static inline struct cdev *cdev_alloc_init(struct module *owner,
+ const struct file_operations *fops)
+{
+ struct cdev *cdev = NULL;
+ cdev = cdev_alloc();
+ if (!cdev)
+ return NULL;
+ cdev->ops = fops;
+ cdev->owner = owner;
+
+ /* Note that the memory allocated for cdev will be deallocated
+ * when the usage count drops to 0, because it is controlled
+ * by a kobject of type ktype_cdev_dynamic. (This
+ * deallocation could very well happen outside of our kernel
+ * module, like via the cdev_put in __fput() for example.)
+ */
+ return cdev;
+}
+
+#include "timskmodutils.h"
+
+#endif
diff --git a/drivers/staging/unisys/include/timskmodutils.h b/drivers/staging/unisys/include/timskmodutils.h
new file mode 100644
index 000000000000..2d81d46bf11e
--- /dev/null
+++ b/drivers/staging/unisys/include/timskmodutils.h
@@ -0,0 +1,75 @@
+/* timskmodutils.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __TIMSKMODUTILS_H__
+#define __TIMSKMODUTILS_H__
+
+#include "timskmod.h"
+
+void *kmalloc_kernel(size_t siz);
+void myprintk(const char *myDrvName, const char *devname,
+ const char *template, ...);
+
+/*--------------------------------*
+ *--- GENERAL MESSAGEQ STUFF ---*
+ *--------------------------------*/
+
+struct MessageQEntry;
+
+/** the data structure used to hold an arbitrary data item that you want
+ * to place on a #MESSAGEQ. Declare and initialize as follows:
+ *
+ * This structure should be considered opaque; the client using it should
+ * never access the fields directly.
+ * Refer to these functions for more info:
+ *
+ * @ingroup messageq
+ */
+typedef struct MessageQEntry {
+ void *data;
+ struct MessageQEntry *qNext;
+ struct MessageQEntry *qPrev;
+} MESSAGEQENTRY;
+
+/** the data structure used to hold a FIFO queue of #MESSAGEQENTRY<b></b>s.
+ * Declare and initialize as follows:
+ * @code
+ * MESSAGEQ myQueue;
+ * @endcode
+ * This structure should be considered opaque; the client using it should
+ * never access the fields directly.
+ * Refer to these functions for more info:
+ *
+ * @ingroup messageq
+ */
+typedef struct MessageQ {
+ MESSAGEQENTRY *qHead;
+ MESSAGEQENTRY *qTail;
+ struct semaphore nQEntries;
+ spinlock_t queueLock;
+} MESSAGEQ;
+
+char *cyclesToSeconds(u64 cycles, u64 cyclesPerSecond,
+ char *buf, size_t bufsize);
+char *cyclesToIterationSeconds(u64 cycles, u64 cyclesPerSecond,
+ u64 iterations, char *buf, size_t bufsize);
+char *cyclesToSomethingsPerSecond(u64 cycles, u64 cyclesPerSecond,
+ u64 somethings, char *buf, size_t bufsize);
+struct seq_file *visor_seq_file_new_buffer(void *buf, size_t buf_size);
+void visor_seq_file_done_buffer(struct seq_file *m);
+
+#endif
diff --git a/drivers/staging/unisys/include/uisqueue.h b/drivers/staging/unisys/include/uisqueue.h
new file mode 100644
index 000000000000..6dab3900994a
--- /dev/null
+++ b/drivers/staging/unisys/include/uisqueue.h
@@ -0,0 +1,440 @@
+/* uisqueue.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Unisys IO Virtualization header NOTE: This file contains only Linux
+ * specific structs. All OS-independent structs are in iochannel.h.xx
+ */
+
+#ifndef __UISQUEUE_H__
+#define __UISQUEUE_H__
+
+#include "linux/version.h"
+#include "iochannel.h"
+#include "uniklog.h"
+#include <linux/atomic.h>
+#include <linux/semaphore.h>
+
+#include "controlvmchannel.h"
+#include "controlvmcompletionstatus.h"
+
+struct uisqueue_info {
+
+ CHANNEL_HEADER __iomem *chan;
+ /* channel containing queues in which scsi commands &
+ * responses are queued
+ */
+ U64 packets_sent;
+ U64 packets_received;
+ U64 interrupts_sent;
+ U64 interrupts_received;
+ U64 max_not_empty_cnt;
+ U64 total_wakeup_cnt;
+ U64 non_empty_wakeup_cnt;
+
+ struct {
+ SIGNAL_QUEUE_HEADER Reserved1; /* */
+ SIGNAL_QUEUE_HEADER Reserved2; /* */
+ } safe_uis_queue;
+ unsigned int (*send_int_if_needed)(struct uisqueue_info *info,
+ unsigned int whichcqueue,
+ unsigned char issueInterruptIfEmpty,
+ U64 interruptHandle,
+ unsigned char io_termination);
+};
+
+/* uisqueue_put_cmdrsp_with_lock_client queues a commmand or response
+ * to the specified queue, at the tail if the queue is full but
+ * oktowait == 0, then it return 0 indicating failure. otherwise it
+ * wait for the queue to become non-full. If command is queued, return
+ * 1 for success.
+ */
+#define DONT_ISSUE_INTERRUPT 0
+#define ISSUE_INTERRUPT 1
+
+#define DONT_WAIT 0
+#define OK_TO_WAIT 1
+#define UISLIB_LOCK_PREFIX \
+ ".section .smp_locks,\"a\"\n" \
+ _ASM_ALIGN "\n" \
+ _ASM_PTR "661f\n" /* address */ \
+ ".previous\n" \
+ "661:\n\tlock; "
+
+unsigned long long uisqueue_InterlockedOr(unsigned long long __iomem *Target,
+ unsigned long long Set);
+unsigned long long uisqueue_InterlockedAnd(unsigned long long __iomem *Target,
+ unsigned long long Set);
+
+unsigned int uisqueue_send_int_if_needed(struct uisqueue_info *pqueueinfo,
+ unsigned int whichqueue,
+ unsigned char issueInterruptIfEmpty,
+ U64 interruptHandle,
+ unsigned char io_termination);
+
+int uisqueue_put_cmdrsp_with_lock_client(struct uisqueue_info *queueinfo,
+ struct uiscmdrsp *cmdrsp,
+ unsigned int queue,
+ void *insertlock,
+ unsigned char issueInterruptIfEmpty,
+ U64 interruptHandle,
+ char oktowait,
+ U8 *channelId);
+
+/* uisqueue_get_cmdrsp gets the cmdrsp entry at the head of the queue
+ * and copies it to the area pointed by cmdrsp param.
+ * returns 0 if queue is empty, 1 otherwise
+ */
+int
+
+uisqueue_get_cmdrsp(struct uisqueue_info *queueinfo, void *cmdrsp,
+ unsigned int queue);
+
+#define MAX_NAME_SIZE_UISQUEUE 64
+
+struct extport_info {
+ U8 valid:1;
+ /* if 1, indicates this extport slot is occupied
+ * if 0, indicates that extport slot is unoccupied */
+
+ U32 num_devs_using;
+ /* When extport is added, this is set to 0. For exports
+ * located in NETWORK switches:
+ * Each time a VNIC, i.e., intport, is added to the switch this
+ * is used to assign a pref_pnic for the VNIC and when assigned
+ * to a VNIC this counter is incremented. When a VNIC is
+ * deleted, the extport corresponding to the VNIC's pref_pnic
+ * is located and its num_devs_using is decremented. For VNICs,
+ * num_devs_using is basically used to load-balance transmit
+ * traffic from VNICs.
+ */
+
+ struct switch_info *swtch;
+ struct PciId pci_id;
+ char name[MAX_NAME_SIZE_UISQUEUE];
+ union {
+ struct vhba_wwnn wwnn;
+ unsigned char macaddr[MAX_MACADDR_LEN];
+ };
+};
+
+struct device_info {
+ void __iomem *chanptr;
+ U64 channelAddr;
+ U64 channelBytes;
+ GUID channelTypeGuid;
+ GUID devInstGuid;
+ struct InterruptInfo intr;
+ struct switch_info *swtch;
+ char devid[30]; /* "vbus<busno>:dev<devno>" */
+ U16 polling;
+ struct semaphore interrupt_callback_lock;
+ U32 busNo;
+ U32 devNo;
+ int (*interrupt)(void *);
+ void *interrupt_context;
+ void *private_data;
+ struct list_head list_polling_device_channels;
+ unsigned long long moved_to_tail_cnt;
+ unsigned long long first_busy_cnt;
+ unsigned long long last_on_list_cnt;
+};
+
+typedef enum {
+ RECOVERY_LAN = 1,
+ IB_LAN = 2
+} SWITCH_TYPE;
+
+struct bus_info {
+ U32 busNo, deviceCount;
+ struct device_info **device;
+ U64 guestHandle, recvBusInterruptHandle;
+ GUID busInstGuid;
+ ULTRA_VBUS_CHANNEL_PROTOCOL __iomem *pBusChannel;
+ int busChannelBytes;
+ struct proc_dir_entry *proc_dir; /* proc/uislib/vbus/<x> */
+ struct proc_dir_entry *proc_info; /* proc/uislib/vbus/<x>/info */
+ char name[25];
+ char partitionName[99];
+ struct bus_info *next;
+ U8 localVnic; /* 1 if local vnic created internally
+ * by IOVM; 0 otherwise... */
+};
+
+#define DEDICATED_SWITCH(pSwitch) ((pSwitch->extPortCount == 1) && \
+ (pSwitch->intPortCount == 1))
+
+struct sn_list_entry {
+ struct uisscsi_dest pdest; /* scsi bus, target, lun for
+ * phys disk */
+ U8 sernum[MAX_SERIAL_NUM]; /* serial num of physical
+ * disk.. The length is always
+ * MAX_SERIAL_NUM, padded with
+ * spaces */
+ struct sn_list_entry *next;
+};
+
+struct networkPolicy {
+ U32 promiscuous:1;
+ U32 macassign:1;
+ U32 peerforwarding:1;
+ U32 nonotify:1;
+ U32 standby:1;
+ U32 callhome:2;
+ char ip_addr[30];
+};
+
+/*
+ * IO messages sent to UisnicControlChanFunc & UissdControlChanFunc by
+ * code that processes the ControlVm channel messages.
+ */
+
+
+typedef enum {
+ IOPART_ADD_VNIC,
+ IOPART_DEL_VNIC,
+ IOPART_DEL_ALL_VNICS,
+ IOPART_ADD_VHBA,
+ IOPART_ADD_VDISK,
+ IOPART_DEL_VHBA,
+ IOPART_DEL_VDISK,
+ IOPART_DEL_ALL_VDISKS_FOR_VHBA,
+ IOPART_DEL_ALL_VHBAS,
+ IOPART_ATTACH_PHBA,
+ IOPART_DETACH_PHBA, /* 10 */
+ IOPART_ATTACH_PNIC,
+ IOPART_DETACH_PNIC,
+ IOPART_DETACH_VHBA,
+ IOPART_DETACH_VNIC,
+ IOPART_PAUSE_VDISK,
+ IOPART_RESUME_VDISK,
+ IOPART_ADD_DEVICE, /* add generic device */
+ IOPART_DEL_DEVICE, /* del generic device */
+} IOPART_MSG_TYPE;
+
+struct add_virt_iopart {
+ void *chanptr; /* pointer to data channel */
+ U64 guestHandle; /* used to convert guest physical
+ * address to real physical address
+ * for DMA, for ex. */
+ U64 recvBusInterruptHandle; /* used to register to receive
+ * bus level interrupts. */
+ struct InterruptInfo intr; /* contains recv & send
+ * interrupt info */
+ /* recvInterruptHandle is used to register to receive
+ * interrupts on the data channel. Used by GuestLinux/Windows
+ * IO drivers to connect to interrupt. sendInterruptHandle is
+ * used by IOPart drivers as parameter to
+ * Issue_VMCALL_IO_QUEUE_TRANSITION to interrupt thread in
+ * guest linux/windows IO drivers when data channel queue for
+ * vhba/vnic goes from EMPTY to NON-EMPTY. */
+ struct switch_info *swtch; /* pointer to the virtual
+ * switch to which the vnic is
+ * connected */
+
+ U8 useG2GCopy; /* Used to determine if a virtual HBA
+ * needs to use G2G copy. */
+ U8 Filler[7];
+
+ U32 busNo;
+ U32 devNo;
+ char *params;
+ ulong params_bytes;
+
+};
+
+struct add_vdisk_iopart {
+ void *chanptr; /* pointer to data channel */
+ int implicit;
+ struct uisscsi_dest vdest; /* scsi bus, target, lun for virt disk */
+ struct uisscsi_dest pdest; /* scsi bus, target, lun for phys disk */
+ U8 sernum[MAX_SERIAL_NUM]; /* serial num of physical disk */
+ U32 serlen; /* length of serial num */
+ U32 busNo;
+ U32 devNo;
+};
+
+struct del_vdisk_iopart {
+ void *chanptr; /* pointer to data channel */
+ struct uisscsi_dest vdest; /* scsi bus, target, lun for virt disk */
+ U32 busNo;
+ U32 devNo;
+};
+
+struct del_virt_iopart {
+ void *chanptr; /* pointer to data channel */
+ U32 busNo;
+ U32 devNo;
+};
+
+struct det_virt_iopart { /* detach internal port */
+ void *chanptr; /* pointer to data channel */
+ struct switch_info *swtch;
+};
+
+struct paures_vdisk_iopart {
+ void *chanptr; /* pointer to data channel */
+ struct uisscsi_dest vdest; /* scsi bus, target, lun for virt disk */
+};
+
+struct add_switch_iopart { /* add switch */
+ struct switch_info *swtch;
+ char *params;
+ ulong params_bytes;
+};
+
+struct del_switch_iopart { /* destroy switch */
+ struct switch_info *swtch;
+};
+
+struct io_msgs {
+
+ IOPART_MSG_TYPE msgtype;
+
+ /* additional params needed by some messages */
+ union {
+ struct add_virt_iopart add_vhba;
+ struct add_virt_iopart add_vnic;
+ struct add_vdisk_iopart add_vdisk;
+ struct del_virt_iopart del_vhba;
+ struct del_virt_iopart del_vnic;
+ struct det_virt_iopart det_vhba;
+ struct det_virt_iopart det_vnic;
+ struct del_vdisk_iopart del_vdisk;
+ struct del_virt_iopart del_all_vdisks_for_vhba;
+ struct add_virt_iopart add_device;
+ struct del_virt_iopart del_device;
+ struct det_virt_iopart det_intport;
+ struct add_switch_iopart add_switch;
+ struct del_switch_iopart del_switch;
+ struct extport_info *extPort; /* for attach or detach
+ * pnic/generic delete all
+ * vhbas/allvnics need no
+ * parameters */
+ struct paures_vdisk_iopart paures_vdisk;
+ };
+};
+
+/*
+* Guest messages sent to VirtControlChanFunc by code that processes
+* the ControlVm channel messages.
+*/
+
+typedef enum {
+ GUEST_ADD_VBUS,
+ GUEST_ADD_VHBA,
+ GUEST_ADD_VNIC,
+ GUEST_DEL_VBUS,
+ GUEST_DEL_VHBA,
+ GUEST_DEL_VNIC,
+ GUEST_DEL_ALL_VHBAS,
+ GUEST_DEL_ALL_VNICS,
+ GUEST_DEL_ALL_VBUSES, /* deletes all vhbas & vnics on all
+ * buses and deletes all buses */
+ GUEST_PAUSE_VHBA,
+ GUEST_PAUSE_VNIC,
+ GUEST_RESUME_VHBA,
+ GUEST_RESUME_VNIC
+} GUESTPART_MSG_TYPE;
+
+struct add_vbus_guestpart {
+ void __iomem *chanptr; /* pointer to data channel for bus -
+ * NOT YET USED */
+ U32 busNo; /* bus number to be created/deleted */
+ U32 deviceCount; /* max num of devices on bus */
+ GUID busTypeGuid; /* indicates type of bus */
+ GUID busInstGuid; /* instance guid for device */
+};
+
+struct del_vbus_guestpart {
+ U32 busNo; /* bus number to be deleted */
+ /* once we start using the bus's channel, add can dump busNo
+ * into the channel header and then delete will need only one
+ * parameter, chanptr. */
+};
+
+struct add_virt_guestpart {
+ void __iomem *chanptr; /* pointer to data channel */
+ U32 busNo; /* bus number for the operation */
+ U32 deviceNo; /* number of device on the bus */
+ GUID devInstGuid; /* instance guid for device */
+ struct InterruptInfo intr; /* recv/send interrupt info */
+ /* recvInterruptHandle contains info needed in order to
+ * register to receive interrupts on the data channel.
+ * sendInterruptHandle contains handle which is provided to
+ * monitor VMCALL that will cause an interrupt to be generated
+ * for the other end.
+ */
+};
+
+struct pause_virt_guestpart {
+ void __iomem *chanptr; /* pointer to data channel */
+};
+
+struct resume_virt_guestpart {
+ void __iomem *chanptr; /* pointer to data channel */
+};
+
+struct del_virt_guestpart {
+ void __iomem *chanptr; /* pointer to data channel */
+};
+
+struct init_chipset_guestpart {
+ U32 busCount; /* indicates the max number of busses */
+ U32 switchCount; /* indicates the max number of switches */
+};
+
+struct guest_msgs {
+
+ GUESTPART_MSG_TYPE msgtype;
+
+ /* additional params needed by messages */
+ union {
+ struct add_vbus_guestpart add_vbus;
+ struct add_virt_guestpart add_vhba;
+ struct add_virt_guestpart add_vnic;
+ struct pause_virt_guestpart pause_vhba;
+ struct pause_virt_guestpart pause_vnic;
+ struct resume_virt_guestpart resume_vhba;
+ struct resume_virt_guestpart resume_vnic;
+ struct del_vbus_guestpart del_vbus;
+ struct del_virt_guestpart del_vhba;
+ struct del_virt_guestpart del_vnic;
+ struct del_vbus_guestpart del_all_vhbas;
+ struct del_vbus_guestpart del_all_vnics;
+ /* del_all_vbuses needs no parameters */
+ };
+ struct init_chipset_guestpart init_chipset;
+
+};
+
+#ifndef __xg
+#define __xg(x) ((volatile long *)(x))
+#endif
+
+/*
+* Below code is a copy of Linux kernel's cmpxchg function located at
+* this place
+* http://tcsxeon:8080/source/xref/00trunk-AppOS-linux/include/asm-x86/cmpxchg_64.h#84
+* Reason for creating our own version of cmpxchg along with
+* UISLIB_LOCK_PREFIX is to make the operation atomic even for non SMP
+* guests.
+*/
+
+#define uislibcmpxchg64(p, o, n, s) cmpxchg(p, o, n)
+
+#endif /* __UISQUEUE_H__ */
diff --git a/drivers/staging/unisys/include/uisthread.h b/drivers/staging/unisys/include/uisthread.h
new file mode 100644
index 000000000000..2b1fba759098
--- /dev/null
+++ b/drivers/staging/unisys/include/uisthread.h
@@ -0,0 +1,46 @@
+/* uisthread.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*****************************************************************************/
+/* Unisys thread utilities header */
+/*****************************************************************************/
+
+
+#ifndef __UISTHREAD_H__
+#define __UISTHREAD_H__
+
+
+#include "linux/completion.h"
+
+struct uisthread_info {
+ struct task_struct *task;
+ int id;
+ int should_stop;
+ struct completion has_stopped;
+};
+
+
+/* returns 0 for failure, 1 for success */
+int uisthread_start(
+ struct uisthread_info *thrinfo,
+ int (*threadfn)(void *),
+ void *thrcontext,
+ char *name);
+
+void uisthread_stop(struct uisthread_info *thrinfo);
+
+#endif /* __UISTHREAD_H__ */
diff --git a/drivers/staging/unisys/include/uisutils.h b/drivers/staging/unisys/include/uisutils.h
new file mode 100644
index 000000000000..5fdab3a3464a
--- /dev/null
+++ b/drivers/staging/unisys/include/uisutils.h
@@ -0,0 +1,348 @@
+/* uisutils.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Unisys Virtual HBA utilities header
+ */
+
+#ifndef __UISUTILS__H__
+#define __UISUTILS__H__
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+
+#include "vmcallinterface.h"
+#include "channel.h"
+#include "uisthread.h"
+#include "uisqueue.h"
+#include "diagnostics/appos_subsystems.h"
+#include "vbusdeviceinfo.h"
+#include <linux/atomic.h>
+
+/* This is the MAGIC number stuffed by virthba in host->this_id. Used to
+ * identify virtual hbas.
+ */
+#define UIS_MAGIC_VHBA 707
+
+/* global function pointers that act as callback functions into
+ * uisnicmod, uissdmod, and virtpcimod
+ */
+extern int (*UisnicControlChanFunc)(struct io_msgs *);
+extern int (*UissdControlChanFunc)(struct io_msgs *);
+extern int (*VirtControlChanFunc)(struct guest_msgs *);
+
+/* Return values of above callback functions: */
+#define CCF_ERROR 0 /* completed and failed */
+#define CCF_OK 1 /* completed successfully */
+#define CCF_PENDING 2 /* operation still pending */
+extern atomic_t UisUtils_Registered_Services;
+
+typedef unsigned int MACARRAY[MAX_MACADDR_LEN];
+typedef struct ReqHandlerInfo_struct {
+ GUID switchTypeGuid;
+ int (*controlfunc)(struct io_msgs *);
+ unsigned long min_channel_bytes;
+ int (*Server_Channel_Ok)(unsigned long channelBytes);
+ int (*Server_Channel_Init)
+ (void *x, unsigned char *clientStr, U32 clientStrLen, U64 bytes);
+ char switch_type_name[99];
+ struct list_head list_link; /* links into ReqHandlerInfo_list */
+} ReqHandlerInfo_t;
+
+ReqHandlerInfo_t *ReqHandlerAdd(GUID switchTypeGuid,
+ const char *switch_type_name,
+ int (*controlfunc)(struct io_msgs *),
+ unsigned long min_channel_bytes,
+ int (*Server_Channel_Ok)(unsigned long
+ channelBytes),
+ int (*Server_Channel_Init)
+ (void *x, unsigned char *clientStr,
+ U32 clientStrLen, U64 bytes));
+ReqHandlerInfo_t *ReqHandlerFind(GUID switchTypeGuid);
+int ReqHandlerDel(GUID switchTypeGuid);
+
+#define uislib_ioremap_cache(addr, size) \
+ dbg_ioremap_cache(addr, size, __FILE__, __LINE__)
+
+static inline void __iomem *
+dbg_ioremap_cache(U64 addr, unsigned long size, char *file, int line)
+{
+ void __iomem *new;
+ new = ioremap_cache(addr, size);
+ return new;
+}
+
+#define uislib_ioremap(addr, size) dbg_ioremap(addr, size, __FILE__, __LINE__)
+
+static inline void *
+dbg_ioremap(U64 addr, unsigned long size, char *file, int line)
+{
+ void *new;
+ new = ioremap(addr, size);
+ return new;
+}
+
+#define uislib_iounmap(addr) dbg_iounmap(addr, __FILE__, __LINE__)
+
+static inline void
+dbg_iounmap(void __iomem *addr, char *file, int line)
+{
+ iounmap(addr);
+}
+
+#define PROC_READ_BUFFER_SIZE 131072 /* size of the buffer to allocate to
+ * hold all of /proc/XXX/info */
+int uisutil_add_proc_line_ex(int *total, char **buffer, int *buffer_remaining,
+ char *format, ...);
+
+int uisctrl_register_req_handler(int type, void *fptr,
+ ULTRA_VBUS_DEVICEINFO *chipset_DriverInfo);
+int uisctrl_register_req_handler_ex(GUID switchTypeGuid,
+ const char *switch_type_name,
+ int (*fptr)(struct io_msgs *),
+ unsigned long min_channel_bytes,
+ int (*Server_Channel_Ok)(unsigned long
+ channelBytes),
+ int (*Server_Channel_Init)
+ (void *x, unsigned char *clientStr,
+ U32 clientStrLen, U64 bytes),
+ ULTRA_VBUS_DEVICEINFO *chipset_DriverInfo);
+
+int uisctrl_unregister_req_handler_ex(GUID switchTypeGuid);
+unsigned char *util_map_virt(struct phys_info *sg);
+void util_unmap_virt(struct phys_info *sg);
+unsigned char *util_map_virt_atomic(struct phys_info *sg);
+void util_unmap_virt_atomic(void *buf);
+int uislib_server_inject_add_vnic(U32 switchNo, U32 BusNo, U32 numIntPorts,
+ U32 numExtPorts, MACARRAY pmac[],
+ pCHANNEL_HEADER **chan);
+void uislib_server_inject_del_vnic(U32 switchNo, U32 busNo, U32 numIntPorts,
+ U32 numExtPorts);
+int uislib_client_inject_add_bus(U32 busNo, GUID instGuid,
+ U64 channelAddr, ulong nChannelBytes);
+int uislib_client_inject_del_bus(U32 busNo);
+
+int uislib_client_inject_add_vhba(U32 busNo, U32 devNo,
+ U64 phys_chan_addr, U32 chan_bytes,
+ int is_test_addr, GUID instGuid,
+ struct InterruptInfo *intr);
+int uislib_client_inject_pause_vhba(U32 busNo, U32 devNo);
+int uislib_client_inject_resume_vhba(U32 busNo, U32 devNo);
+int uislib_client_inject_del_vhba(U32 busNo, U32 devNo);
+int uislib_client_inject_add_vnic(U32 busNo, U32 devNo,
+ U64 phys_chan_addr, U32 chan_bytes,
+ int is_test_addr, GUID instGuid,
+ struct InterruptInfo *intr);
+int uislib_client_inject_pause_vnic(U32 busNo, U32 devNo);
+int uislib_client_inject_resume_vnic(U32 busNo, U32 devNo);
+int uislib_client_inject_del_vnic(U32 busNo, U32 devNo);
+#ifdef STORAGE_CHANNEL
+U64 uislib_storage_channel(int client_id);
+#endif
+int uislib_get_owned_pdest(struct uisscsi_dest *pdest);
+
+int uislib_send_event(CONTROLVM_ID id, CONTROLVM_MESSAGE_PACKET *event);
+
+/* structure used by vhba & vnic to keep track of queue & thread info */
+struct chaninfo {
+ struct uisqueue_info *queueinfo;
+ /* this specifies the queue structures for a channel */
+ /* ALLOCATED BY THE OTHER END - WE JUST GET A POINTER TO THE MEMORY */
+ spinlock_t insertlock;
+ /* currently used only in virtnic when sending data to uisnic */
+ /* to synchronize the inserts into the signal queue */
+ struct uisthread_info threadinfo;
+ /* this specifies the thread structures used by the thread that */
+ /* handles this channel */
+};
+
+/* this is the wait code for all the threads - it is used to get
+* something from a queue choices: wait_for_completion_interruptible,
+* _timeout, interruptible_timeout
+*/
+#define UIS_THREAD_WAIT_MSEC(x) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ schedule_timeout(msecs_to_jiffies(x)); \
+}
+#define UIS_THREAD_WAIT_USEC(x) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ schedule_timeout(usecs_to_jiffies(x)); \
+}
+#define UIS_THREAD_WAIT UIS_THREAD_WAIT_MSEC(5)
+#define UIS_THREAD_WAIT_SEC(x) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ schedule_timeout((x)*HZ); \
+}
+
+/* This is a hack until we fix IOVM to initialize the channel header
+ * correctly at DEVICE_CREATE time, INSTEAD OF waiting until
+ * DEVICE_CONFIGURE time.
+ */
+#define WAIT_FOR_VALID_GUID(guid) \
+ do { \
+ while (MEMCMP_IO(&guid, &Guid0, sizeof(Guid0)) == 0) { \
+ LOGERR("Waiting for non-0 GUID (why???)...\n"); \
+ UIS_THREAD_WAIT_SEC(5); \
+ } \
+ LOGERR("OK... GUID is non-0 now\n"); \
+ } while (0)
+
+/* CopyFragsInfoFromSkb returns the number of entries added to frags array
+ * Returns -1 on failure.
+ */
+unsigned int uisutil_copy_fragsinfo_from_skb(unsigned char *calling_ctx,
+ void *skb_in,
+ unsigned int firstfraglen,
+ unsigned int frags_max,
+ struct phys_info frags[]);
+
+static inline unsigned int
+Issue_VMCALL_IO_CONTROLVM_ADDR(U64 *ControlAddress, U32 *ControlBytes)
+{
+ VMCALL_IO_CONTROLVM_ADDR_PARAMS params;
+ int result = VMCALL_SUCCESS;
+ U64 physaddr;
+
+ physaddr = virt_to_phys(&params);
+ ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
+ if (VMCALL_SUCCESSFUL(result)) {
+ *ControlAddress = params.ChannelAddress;
+ *ControlBytes = params.ChannelBytes;
+ }
+ return result;
+}
+
+static inline unsigned int Issue_VMCALL_IO_DIAG_ADDR(U64 *DiagChannelAddress)
+{
+ VMCALL_IO_DIAG_ADDR_PARAMS params;
+ int result = VMCALL_SUCCESS;
+ U64 physaddr;
+
+ physaddr = virt_to_phys(&params);
+ ISSUE_IO_VMCALL(VMCALL_IO_DIAG_ADDR, physaddr, result);
+ if (VMCALL_SUCCESSFUL(result))
+ *DiagChannelAddress = params.ChannelAddress;
+ return result;
+}
+
+static inline unsigned int
+Issue_VMCALL_IO_VISORSERIAL_ADDR(U64 *DiagChannelAddress)
+{
+ VMCALL_IO_VISORSERIAL_ADDR_PARAMS params;
+ int result = VMCALL_SUCCESS;
+ U64 physaddr;
+
+ physaddr = virt_to_phys(&params);
+ ISSUE_IO_VMCALL(VMCALL_IO_VISORSERIAL_ADDR, physaddr, result);
+ if (VMCALL_SUCCESSFUL(result))
+ *DiagChannelAddress = params.ChannelAddress;
+ return result;
+}
+
+static inline S64 Issue_VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET(void)
+{
+ U64 result = VMCALL_SUCCESS;
+ U64 physaddr = 0;
+
+ ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
+ result);
+ return result;
+}
+
+static inline S64 Issue_VMCALL_MEASUREMENT_DO_NOTHING(void)
+{
+ U64 result = VMCALL_SUCCESS;
+ U64 physaddr = 0;
+
+ ISSUE_IO_VMCALL(VMCALL_MEASUREMENT_DO_NOTHING, physaddr, result);
+ return result;
+}
+
+struct log_info_t {
+ volatile unsigned long long last_cycles;
+ unsigned long long delta_sum[64];
+ unsigned long long delta_cnt[64];
+ unsigned long long max_delta[64];
+ unsigned long long min_delta[64];
+};
+
+static inline int Issue_VMCALL_UPDATE_PHYSICAL_TIME(U64 adjustment)
+{
+ int result = VMCALL_SUCCESS;
+
+ ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
+ return result;
+}
+
+static inline unsigned int
+Issue_VMCALL_CHANNEL_MISMATCH(const char *ChannelName,
+ const char *ItemName,
+ U32 SourceLineNumber, const char *path_n_fn)
+{
+ VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS params;
+ int result = VMCALL_SUCCESS;
+ U64 physaddr;
+ char *last_slash = NULL;
+
+ strncpy(params.ChannelName, ChannelName,
+ lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS, ChannelName));
+ strncpy(params.ItemName, ItemName,
+ lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS, ItemName));
+ params.SourceLineNumber = SourceLineNumber;
+
+ last_slash = strrchr(path_n_fn, '/');
+ if (last_slash != NULL) {
+ last_slash++;
+ strncpy(params.SourceFileName, last_slash,
+ lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS,
+ SourceFileName));
+ } else
+ strncpy(params.SourceFileName,
+ "Cannot determine source filename",
+ lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS,
+ SourceFileName));
+
+ physaddr = virt_to_phys(&params);
+ ISSUE_IO_VMCALL(VMCALL_CHANNEL_VERSION_MISMATCH, physaddr, result);
+ return result;
+}
+
+static inline unsigned int Issue_VMCALL_FATAL_BYE_BYE(void)
+{
+ int result = VMCALL_SUCCESS;
+ U64 physaddr = 0;
+
+ ISSUE_IO_VMCALL(VMCALL_GENERIC_SURRENDER_QUANTUM_FOREVER, physaddr,
+ result);
+ return result;
+}
+
+#define UIS_DAEMONIZE(nam)
+void *uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln);
+#define UISCACHEALLOC(cur_pool) uislib_cache_alloc(cur_pool, __FILE__, __LINE__)
+void uislib_cache_free(struct kmem_cache *cur_pool, void *p, char *fn, int ln);
+#define UISCACHEFREE(cur_pool, p) \
+ uislib_cache_free(cur_pool, p, __FILE__, __LINE__)
+
+void uislib_enable_channel_interrupts(U32 busNo, U32 devNo,
+ int (*interrupt)(void *),
+ void *interrupt_context);
+void uislib_disable_channel_interrupts(U32 busNo, U32 devNo);
+void uislib_force_channel_interrupt(U32 busNo, U32 devNo);
+
+#endif /* __UISUTILS__H__ */
diff --git a/drivers/staging/unisys/include/uniklog.h b/drivers/staging/unisys/include/uniklog.h
new file mode 100644
index 000000000000..4d7b87cefa61
--- /dev/null
+++ b/drivers/staging/unisys/include/uniklog.h
@@ -0,0 +1,193 @@
+/* uniklog.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* This module contains macros to aid developers in logging messages.
+ *
+ * This module is affected by the DEBUG compiletime option.
+ *
+ */
+#ifndef __UNIKLOG_H__
+#define __UNIKLOG_H__
+
+
+#include <linux/printk.h>
+
+/*
+ * # DBGINF
+ *
+ * \brief Log debug informational message - log a LOG_INFO message only
+ * if DEBUG compiletime option enabled
+ *
+ * \param devname the device name of the device reporting this message, or
+ * NULL if this message is NOT device-related.
+ * \param fmt printf()-style format string containing the message to log.
+ * \param args Optional arguments to be formatted and inserted into the
+ * format string.
+ * \return nothing
+ *
+ * Log a message at the LOG_INFO level, but only if DEBUG is enabled. If
+ * DEBUG is disabled, this expands to a no-op.
+ */
+
+/*
+ * # DBGVER
+ *
+ * \brief Log debug verbose message - log a LOG_DEBUG message only if
+ * DEBUG compiletime option enabled
+ *
+ * \param devname the device name of the device reporting this message, or
+ * NULL if this message is NOT device-related.
+ * \param fmt printf()-style format string containing the message to log.
+ * \param args Optional arguments to be formatted and inserted into the
+ * format string.
+ * \return nothing
+ *
+ * Log a message at the LOG_DEBUG level, but only if DEBUG is enabled. If
+ * DEBUG is disabled, this expands to a no-op. Note also that LOG_DEBUG
+ * messages can be enabled/disabled at runtime as well.
+ */
+#define DBGINFDEV(devname, fmt, args...) do { } while (0)
+#define DBGVERDEV(devname, fmt, args...) do { } while (0)
+#define DBGINF(fmt, args...) do { } while (0)
+#define DBGVER(fmt, args...) do { } while (0)
+
+/*
+ * # LOGINF
+ *
+ * \brief Log informational message - logs a message at the LOG_INFO level
+ *
+ * \param devname the device name of the device reporting this message, or
+ * NULL if this message is NOT device-related.
+ * \param fmt printf()-style format string containing the message to log.
+ * \param args Optional arguments to be formatted and inserted into the
+ * format string.
+ * \return nothing
+ *
+ * Logs the specified message at the LOG_INFO level.
+ */
+
+#define LOGINF(fmt, args...) pr_info(fmt, ## args)
+#define LOGINFDEV(devname, fmt, args...) \
+ pr_info("%s " fmt, devname, ## args)
+#define LOGINFDEVX(devno, fmt, args...) \
+ pr_info("dev%d " fmt, devno, ## args)
+#define LOGINFNAME(vnic, fmt, args...) \
+ do { \
+ if (vnic != NULL) { \
+ pr_info("%s " fmt, vnic->name, ## args); \
+ } else { \
+ pr_info(fmt, ## args); \
+ } \
+ } while (0)
+
+/*
+ * # LOGVER
+ *
+ * \brief Log verbose message - logs a message at the LOG_DEBUG level,
+ * which can be disabled at runtime
+ *
+ * \param devname the device name of the device reporting this message, or
+ * NULL if this message is NOT device-related.
+ * \param fmt printf()-style format string containing the message to log.
+ * \param args Optional arguments to be formatted and inserted into the format
+ * \param string.
+ * \return nothing
+ *
+ * Logs the specified message at the LOG_DEBUG level. Note also that
+ * LOG_DEBUG messages can be enabled/disabled at runtime as well.
+ */
+#define LOGVER(fmt, args...) pr_debug(fmt, ## args)
+#define LOGVERDEV(devname, fmt, args...) \
+ pr_debug("%s " fmt, devname, ## args)
+#define LOGVERNAME(vnic, fmt, args...) \
+ do { \
+ if (vnic != NULL) { \
+ pr_debug("%s " fmt, vnic->name, ## args); \
+ } else { \
+ pr_debug(fmt, ## args); \
+ } \
+ } while (0)
+
+
+/*
+ * # LOGERR
+ *
+ * \brief Log error message - logs a message at the LOG_ERR level,
+ * including source line number information
+ *
+ * \param devname the device name of the device reporting this message, or
+ * NULL if this message is NOT device-related.
+ * \param fmt printf()-style format string containing the message to log.
+ * \param args Optional arguments to be formatted and inserted into the format
+ * \param string.
+ * \return nothing
+ *
+ * Logs the specified error message at the LOG_ERR level. It will also
+ * include the file, line number, and function name of where the error
+ * originated in the log message.
+ */
+#define LOGERR(fmt, args...) pr_err(fmt, ## args)
+#define LOGERRDEV(devname, fmt, args...) \
+ pr_err("%s " fmt, devname, ## args)
+#define LOGERRDEVX(devno, fmt, args...) \
+ pr_err("dev%d " fmt, devno, ## args)
+#define LOGERRNAME(vnic, fmt, args...) \
+ do { \
+ if (vnic != NULL) { \
+ pr_err("%s " fmt, vnic->name, ## args); \
+ } else { \
+ pr_err(fmt, ## args); \
+ } \
+ } while (0)
+#define LOGORDUMPERR(seqfile, fmt, args...) do { \
+ if (seqfile) { \
+ seq_printf(seqfile, fmt, ## args); \
+ } else { \
+ LOGERR(fmt, ## args); \
+ } \
+ } while (0)
+
+/*
+ * # LOGWRN
+ *
+ * \brief Log warning message - Logs a message at the LOG_WARNING level,
+ * including source line number information
+ *
+ * \param devname the device name of the device reporting this message, or
+ * NULL if this message is NOT device-related.
+ * \param fmt printf()-style format string containing the message to log.
+ * \param args Optional arguments to be formatted and inserted into the format
+ * \param string.
+ * \return nothing
+ *
+ * Logs the specified error message at the LOG_WARNING level. It will also
+ * include the file, line number, and function name of where the error
+ * originated in the log message.
+ */
+#define LOGWRN(fmt, args...) pr_warn(fmt, ## args)
+#define LOGWRNDEV(devname, fmt, args...) \
+ pr_warn("%s " fmt, devname, ## args)
+#define LOGWRNNAME(vnic, fmt, args...) \
+ do { \
+ if (vnic != NULL) { \
+ pr_warn("%s " fmt, vnic->name, ## args); \
+ } else { \
+ pr_warn(fmt, ## args); \
+ } \
+ } while (0)
+
+#endif /* __UNIKLOG_H__ */
diff --git a/drivers/staging/unisys/include/vbushelper.h b/drivers/staging/unisys/include/vbushelper.h
new file mode 100644
index 000000000000..93e35f039ded
--- /dev/null
+++ b/drivers/staging/unisys/include/vbushelper.h
@@ -0,0 +1,47 @@
+/* vbushelper.h
+ *
+ * Copyright © 2011 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VBUSHELPER_H__
+#define __VBUSHELPER_H__
+
+#include "vbusdeviceinfo.h"
+
+/* TARGET_HOSTNAME specified as -DTARGET_HOSTNAME=\"thename\" on the
+ * command line */
+
+#define TARGET_HOSTNAME "linuxguest"
+
+static inline void
+BusDeviceInfo_Init(ULTRA_VBUS_DEVICEINFO *pBusDeviceInfo,
+ const char *deviceType, const char *driverName,
+ const char *ver, const char *verTag,
+ const char *buildDate, const char *buildTime)
+{
+ memset(pBusDeviceInfo, 0, sizeof(ULTRA_VBUS_DEVICEINFO));
+ snprintf(pBusDeviceInfo->devType, sizeof(pBusDeviceInfo->devType),
+ "%s", (deviceType) ? deviceType : "unknownType");
+ snprintf(pBusDeviceInfo->drvName, sizeof(pBusDeviceInfo->drvName),
+ "%s", (driverName) ? driverName : "unknownDriver");
+ snprintf(pBusDeviceInfo->infoStrings,
+ sizeof(pBusDeviceInfo->infoStrings), "%s\t%s\t%s %s\t%s",
+ (ver) ? ver : "unknownVer",
+ (verTag) ? verTag : "unknownVerTag",
+ (buildDate) ? buildDate : "noBuildDate",
+ (buildTime) ? buildTime : "nobuildTime", TARGET_HOSTNAME);
+}
+
+#endif
diff --git a/drivers/staging/unisys/uislib/Kconfig b/drivers/staging/unisys/uislib/Kconfig
new file mode 100644
index 000000000000..8d87d9c81c66
--- /dev/null
+++ b/drivers/staging/unisys/uislib/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys uislib configuration
+#
+
+config UNISYS_UISLIB
+ tristate "Unisys uislib driver"
+ depends on UNISYSSPAR && UNISYS_VISORCHIPSET && UNISYS_CHANNELSTUB
+ ---help---
+ If you say Y here, you will enable the Unisys uislib driver.
+
diff --git a/drivers/staging/unisys/uislib/Makefile b/drivers/staging/unisys/uislib/Makefile
new file mode 100644
index 000000000000..6e44d49458f5
--- /dev/null
+++ b/drivers/staging/unisys/uislib/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for Unisys uislib
+#
+
+obj-$(CONFIG_UNISYS_UISLIB) += visoruislib.o
+
+visoruislib-y := uislib.o uisqueue.o uisthread.o uisutils.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/channels
+ccflags-y += -Idrivers/staging/unisys/visorchipset
+ccflags-y += -Idrivers/staging/unisys/sparstopdriver
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
+
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
+
diff --git a/drivers/staging/unisys/uislib/uislib.c b/drivers/staging/unisys/uislib/uislib.c
new file mode 100644
index 000000000000..8ea9c46e56ae
--- /dev/null
+++ b/drivers/staging/unisys/uislib/uislib.c
@@ -0,0 +1,2421 @@
+/* uislib.c
+ *
+ * Copyright � 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* @ALL_INSPECTED */
+#define EXPORT_SYMTAB
+#include <linux/kernel.h>
+#include <linux/highmem.h>
+#ifdef CONFIG_MODVERSIONS
+#include <config/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include "commontypes.h"
+
+#include <linux/version.h>
+#include "uniklog.h"
+#include "diagnostics/appos_subsystems.h"
+#include "uisutils.h"
+#include "vbuschannel.h"
+
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h> /* for copy_from_user */
+#include <linux/ctype.h> /* for toupper */
+#include <linux/list.h>
+
+#include "sparstop.h"
+#include "visorchipset.h"
+#include "chanstub.h"
+#include "version.h"
+#include "guestlinuxdebug.h"
+
+#define SET_PROC_OWNER(x, y)
+
+#define UISLIB_TEST_PROC
+#define POLLJIFFIES_NORMAL 1
+/* Choose whether or not you want to wakeup the request-polling thread
+ * after an IO termination:
+ * this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC UISLIB_PC_uislib_c
+#define __MYFILE__ "uislib.c"
+
+/* global function pointers that act as callback functions into virtpcimod */
+int (*VirtControlChanFunc)(struct guest_msgs *);
+
+static int ProcReadBufferValid;
+static char *ProcReadBuffer; /* Note this MUST be global,
+ * because the contents must */
+static unsigned int chipset_inited;
+
+#define WAIT_ON_CALLBACK(handle) \
+ do { \
+ if (handle) \
+ break; \
+ UIS_THREAD_WAIT; \
+ } while (1)
+
+static struct bus_info *BusListHead;
+static rwlock_t BusListLock;
+static int BusListCount; /* number of buses in the list */
+static int MaxBusCount; /* maximum number of buses expected */
+static U64 PhysicalDataChan;
+static int PlatformNumber;
+
+static struct uisthread_info Incoming_ThreadInfo;
+static BOOL Incoming_Thread_Started = FALSE;
+static LIST_HEAD(List_Polling_Device_Channels);
+static unsigned long long tot_moved_to_tail_cnt;
+static unsigned long long tot_wait_cnt;
+static unsigned long long tot_wakeup_cnt;
+static unsigned long long tot_schedule_cnt;
+static int en_smart_wakeup = 1;
+static DEFINE_SEMAPHORE(Lock_Polling_Device_Channels); /* unlocked */
+static DECLARE_WAIT_QUEUE_HEAD(Wakeup_Polling_Device_Channels);
+static int Go_Polling_Device_Channels;
+
+static struct proc_dir_entry *uislib_proc_dir;
+static struct proc_dir_entry *uislib_proc_vbus_dir;
+static struct proc_dir_entry *vnic_proc_entry; /* Used to be "datachan" */
+static struct proc_dir_entry *ctrlchan_proc_entry;
+static struct proc_dir_entry *pmem_proc_entry;
+static struct proc_dir_entry *info_proc_entry;
+static struct proc_dir_entry *switch_proc_entry;
+static struct proc_dir_entry *extport_proc_entry;
+static struct proc_dir_entry *platformnumber_proc_entry;
+static struct proc_dir_entry *bus_proc_entry;
+static struct proc_dir_entry *dev_proc_entry;
+static struct proc_dir_entry *chipset_proc_entry;
+static struct proc_dir_entry *cycles_before_wait_proc_entry;
+static struct proc_dir_entry *reset_counts_proc_entry;
+static struct proc_dir_entry *smart_wakeup_proc_entry;
+static struct proc_dir_entry *disable_proc_entry;
+
+#define DIR_PROC_ENTRY "uislib"
+#define DIR_VBUS_PROC_ENTRY "vbus"
+#define VNIC_PROC_ENTRY_FN "vnic" /* Used to be "datachan" */
+#define CTRLCHAN_PROC_ENTRY_FN "ctrlchan"
+#define PMEM_PROC_ENTRY_FN "phys_to_virt"
+#define INFO_PROC_ENTRY_FN "info"
+#define SWITCH_PROC_ENTRY_FN "switch"
+#define SWITCH_COUNT_PROC_ENTRY_FN "switch_count"
+#define EXTPORT_PROC_ENTRY_FN "extport"
+#define PLATFORMNUMBER_PROC_ENTRY_FN "platform"
+#define BUS_PROC_ENTRY_FN "bus"
+#define DEV_PROC_ENTRY_FN "device"
+#define CHIPSET_PROC_ENTRY_FN "chipset"
+#define CYCLES_BEFORE_WAIT_PROC_ENTRY_FN "cycles_before_wait"
+#define RESET_COUNTS_PROC_ENTRY_FN "reset_counts"
+#define SMART_WAKEUP_PROC_ENTRY_FN "smart_wakeup"
+#define CALLHOME_PROC_ENTRY_FN "callhome"
+#define CALLHOME_THROTTLED_PROC_ENTRY_FN "callhome_throttled"
+#define DISABLE_PROC_ENTRY_FN "switch_state"
+#ifdef UISLIB_TEST_PROC
+static struct proc_dir_entry *test_proc_entry;
+#define TEST_PROC_ENTRY_FN "test"
+#endif
+static unsigned long long cycles_before_wait, wait_cycles;
+
+/*****************************************************/
+/* local functions */
+/*****************************************************/
+
+static int proc_info_vbus_show(struct seq_file *m, void *v);
+static int
+proc_info_vbus_open(struct inode *inode, struct file *filp)
+{
+ /* proc_info_vbus_show will grab this from seq_file.private: */
+ struct bus_info *bus = PDE_DATA(inode);
+ return single_open(filp, proc_info_vbus_show, bus);
+}
+
+static const struct file_operations proc_info_vbus_fops = {
+ .open = proc_info_vbus_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t uislib_proc_read_writeonly(struct file *file,
+ char __user *buffer,
+ size_t count, loff_t *ppos);
+
+static ssize_t vnic_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+
+static const struct file_operations proc_vnic_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = vnic_proc_write,
+};
+
+static ssize_t chipset_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+
+static const struct file_operations proc_chipset_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = chipset_proc_write,
+};
+
+static ssize_t info_proc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static const struct file_operations proc_info_fops = {
+ .read = info_proc_read,
+};
+
+static ssize_t platformnumber_proc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static const struct file_operations proc_platformnumber_fops = {
+ .read = platformnumber_proc_read,
+};
+
+static ssize_t cycles_before_wait_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_cycles_before_wait_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = cycles_before_wait_proc_write,
+};
+
+static ssize_t reset_counts_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_reset_counts_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = reset_counts_proc_write,
+};
+
+static ssize_t smart_wakeup_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_smart_wakeup_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = smart_wakeup_proc_write,
+};
+
+static ssize_t test_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_test_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = test_proc_write,
+};
+
+static ssize_t bus_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_bus_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = bus_proc_write,
+};
+
+static ssize_t dev_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_dev_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = dev_proc_write,
+};
+
+static void
+init_msg_header(CONTROLVM_MESSAGE *msg, U32 id, uint rsp, uint svr)
+{
+ memset(msg, 0, sizeof(CONTROLVM_MESSAGE));
+ msg->hdr.Id = id;
+ msg->hdr.Flags.responseExpected = rsp;
+ msg->hdr.Flags.server = svr;
+}
+
+static void
+create_bus_proc_entries(struct bus_info *bus)
+{
+ bus->proc_dir = proc_mkdir(bus->name, uislib_proc_vbus_dir);
+ if (!bus->proc_dir) {
+ LOGERR("failed to create /proc/uislib/vbus/%s directory",
+ bus->name);
+ return;
+ }
+ bus->proc_info = proc_create_data("info", 0, bus->proc_dir,
+ &proc_info_vbus_fops, bus);
+ if (!bus->proc_info) {
+ LOGERR("failed to create /proc/uislib/vbus/%s/info", bus->name);
+ remove_proc_entry(bus->name, uislib_proc_vbus_dir);
+ bus->proc_dir = NULL;
+ return;
+ }
+ SET_PROC_OWNER(bus->proc_info, THIS_MODULE);
+
+}
+
+static __iomem void *
+init_vbus_channel(U64 channelAddr, U32 channelBytes, int isServer)
+{
+ void *rc = NULL;
+ void __iomem *pChan = uislib_ioremap_cache(channelAddr, channelBytes);
+ if (!pChan) {
+ LOGERR("CONTROLVM_BUS_CREATE error: ioremap_cache of channelAddr:%Lx for channelBytes:%llu failed",
+ (unsigned long long) channelAddr,
+ (unsigned long long) channelBytes);
+ rc = NULL;
+ goto Away;
+ }
+ if (isServer) {
+ memset_io(pChan, 0, channelBytes);
+ if (!ULTRA_VBUS_CHANNEL_OK_SERVER(channelBytes, NULL)) {
+ ERRDRV("%s channel cannot be used", __func__);
+ uislib_iounmap(pChan);
+ rc = NULL;
+ goto Away;
+ }
+ ULTRA_VBUS_init_channel(pChan, channelBytes);
+ } else {
+ if (!ULTRA_VBUS_CHANNEL_OK_CLIENT(pChan, NULL)) {
+ ERRDRV("%s channel cannot be used", __func__);
+ uislib_iounmap(pChan);
+ rc = NULL;
+ goto Away;
+ }
+ }
+ rc = pChan;
+Away:
+ return rc;
+}
+
+static int
+create_bus(CONTROLVM_MESSAGE *msg, char *buf)
+{
+ U32 busNo, deviceCount;
+ struct bus_info *tmp, *bus;
+ size_t size;
+
+ if (MaxBusCount == BusListCount) {
+ LOGERR("CONTROLVM_BUS_CREATE Failed: max buses:%d already created\n",
+ MaxBusCount);
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, MaxBusCount,
+ POSTCODE_SEVERITY_ERR);
+ return CONTROLVM_RESP_ERROR_MAX_BUSES;
+ }
+
+ busNo = msg->cmd.createBus.busNo;
+ deviceCount = msg->cmd.createBus.deviceCount;
+
+ POSTCODE_LINUX_4(BUS_CREATE_ENTRY_PC, busNo, deviceCount,
+ POSTCODE_SEVERITY_INFO);
+
+ size =
+ sizeof(struct bus_info) +
+ (deviceCount * sizeof(struct device_info *));
+ bus = kzalloc(size, GFP_ATOMIC);
+ if (!bus) {
+ LOGERR("CONTROLVM_BUS_CREATE Failed: kmalloc for bus failed.\n");
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ }
+
+ /* Currently by default, the bus Number is the GuestHandle.
+ * Configure Bus message can override this.
+ */
+ if (msg->hdr.Flags.testMessage) {
+ /* This implies we're the IOVM so set guest handle to 0... */
+ bus->guestHandle = 0;
+ bus->busNo = busNo;
+ bus->localVnic = 1;
+ } else
+ bus->busNo = bus->guestHandle = busNo;
+ sprintf(bus->name, "%d", (int) bus->busNo);
+ bus->deviceCount = deviceCount;
+ bus->device =
+ (struct device_info **) ((char *) bus + sizeof(struct bus_info));
+ bus->busInstGuid = msg->cmd.createBus.busInstGuid;
+ bus->busChannelBytes = 0;
+ bus->pBusChannel = NULL;
+
+ /* add bus to our bus list - but check for duplicates first */
+ read_lock(&BusListLock);
+ for (tmp = BusListHead; tmp; tmp = tmp->next) {
+ if (tmp->busNo == bus->busNo)
+ break;
+ }
+ read_unlock(&BusListLock);
+ if (tmp) {
+ /* found a bus already in the list with same busNo -
+ * reject add
+ */
+ LOGERR("CONTROLVM_BUS_CREATE Failed: bus %d already exists.\n",
+ bus->busNo);
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
+ POSTCODE_SEVERITY_ERR);
+ kfree(bus);
+ return CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ if ((msg->cmd.createBus.channelAddr != 0)
+ && (msg->cmd.createBus.channelBytes != 0)) {
+ bus->busChannelBytes = msg->cmd.createBus.channelBytes;
+ bus->pBusChannel =
+ init_vbus_channel(msg->cmd.createBus.channelAddr,
+ msg->cmd.createBus.channelBytes,
+ msg->hdr.Flags.server);
+ }
+ /* the msg is bound for virtpci; send guest_msgs struct to callback */
+ if (!msg->hdr.Flags.server) {
+ struct guest_msgs cmd;
+ cmd.msgtype = GUEST_ADD_VBUS;
+ cmd.add_vbus.busNo = busNo;
+ cmd.add_vbus.chanptr = bus->pBusChannel;
+ cmd.add_vbus.deviceCount = deviceCount;
+ cmd.add_vbus.busTypeGuid = msg->cmd.createBus.busDataTypeGuid;
+ cmd.add_vbus.busInstGuid = msg->cmd.createBus.busInstGuid;
+ if (!VirtControlChanFunc) {
+ kfree(bus);
+ LOGERR("CONTROLVM_BUS_CREATE Failed: virtpci callback not registered.");
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
+ POSTCODE_SEVERITY_ERR);
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ }
+ if (!VirtControlChanFunc(&cmd)) {
+ kfree(bus);
+ LOGERR("CONTROLVM_BUS_CREATE Failed: virtpci GUEST_ADD_VBUS returned error.");
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
+ POSTCODE_SEVERITY_ERR);
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+ }
+ create_bus_proc_entries(bus);
+
+ /* add bus at the head of our list */
+ write_lock(&BusListLock);
+ if (!BusListHead)
+ BusListHead = bus;
+ else {
+ bus->next = BusListHead;
+ BusListHead = bus;
+ }
+ BusListCount++;
+ write_unlock(&BusListLock);
+
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus->busNo,
+ POSTCODE_SEVERITY_INFO);
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+destroy_bus(CONTROLVM_MESSAGE *msg, char *buf)
+{
+ int i;
+ struct bus_info *bus, *prev = NULL;
+ U32 busNo;
+
+ busNo = msg->cmd.destroyBus.busNo;
+
+ /* find and delete the bus */
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; prev = bus, bus = bus->next) {
+ if (bus->busNo == busNo) {
+ /* found the bus - ensure that all device
+ * slots are NULL
+ */
+ for (i = 0; i < bus->deviceCount; i++) {
+ if (bus->device[i] != NULL) {
+ LOGERR("CONTROLVM_BUS_DESTROY Failed: device %i attached to bus %d.",
+ i, busNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED;
+ }
+ }
+ read_unlock(&BusListLock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (!msg->hdr.Flags.server) {
+ struct guest_msgs cmd;
+ cmd.msgtype = GUEST_DEL_VBUS;
+ cmd.del_vbus.busNo = busNo;
+ if (!VirtControlChanFunc) {
+ LOGERR("CONTROLVM_BUS_DESTROY Failed: virtpci callback not registered.");
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ }
+ if (!VirtControlChanFunc(&cmd)) {
+ LOGERR("CONTROLVM_BUS_DESTROY Failed: virtpci GUEST_DEL_VBUS returned error.");
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+ }
+ /* remove the bus from the list */
+ write_lock(&BusListLock);
+ if (prev) /* not at head */
+ prev->next = bus->next;
+ else
+ BusListHead = bus->next;
+ BusListCount--;
+ write_unlock(&BusListLock);
+ break;
+ }
+ }
+
+ if (!bus) {
+ LOGERR("CONTROLVM_BUS_DESTROY Failed: failed to find bus %d.\n",
+ busNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ if (bus->proc_info) {
+ remove_proc_entry("info", bus->proc_dir);
+ bus->proc_info = NULL;
+ }
+ if (bus->proc_dir) {
+ remove_proc_entry(bus->name, uislib_proc_vbus_dir);
+ bus->proc_dir = NULL;
+ }
+ if (bus->pBusChannel) {
+ uislib_iounmap(bus->pBusChannel);
+ bus->pBusChannel = NULL;
+ }
+
+ kfree(bus);
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+create_device(CONTROLVM_MESSAGE *msg, char *buf)
+{
+ struct device_info *dev;
+ struct bus_info *bus;
+ U32 busNo, devNo;
+ int result = CONTROLVM_RESP_SUCCESS;
+ U64 minSize = MIN_IO_CHANNEL_SIZE;
+ ReqHandlerInfo_t *pReqHandler;
+
+ busNo = msg->cmd.createDevice.busNo;
+ devNo = msg->cmd.createDevice.devNo;
+
+ POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+
+ dev = kzalloc(sizeof(struct device_info), GFP_ATOMIC);
+ if (!dev) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: kmalloc for dev failed.\n");
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ }
+
+ dev->channelTypeGuid = msg->cmd.createDevice.dataTypeGuid;
+ dev->intr = msg->cmd.createDevice.intr;
+ dev->channelAddr = msg->cmd.createDevice.channelAddr;
+ dev->busNo = busNo;
+ dev->devNo = devNo;
+ sema_init(&dev->interrupt_callback_lock, 1); /* unlocked */
+ sprintf(dev->devid, "vbus%u:dev%u", (unsigned) busNo, (unsigned) devNo);
+ /* map the channel memory for the device. */
+ if (msg->hdr.Flags.testMessage)
+ dev->chanptr = (void __iomem *)__va(dev->channelAddr);
+ else {
+ pReqHandler = ReqHandlerFind(dev->channelTypeGuid);
+ if (pReqHandler)
+ /* generic service handler registered for this
+ * channel
+ */
+ minSize = pReqHandler->min_channel_bytes;
+ if (minSize > msg->cmd.createDevice.channelBytes) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: channel size is too small, channel size:0x%lx, required size:0x%lx",
+ (ulong) msg->cmd.createDevice.channelBytes,
+ (ulong) minSize);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL;
+ goto Away;
+ }
+ dev->chanptr =
+ uislib_ioremap_cache(dev->channelAddr,
+ msg->cmd.createDevice.channelBytes);
+ if (!dev->chanptr) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: ioremap_cache of channelAddr:%Lx for channelBytes:%llu failed",
+ dev->channelAddr,
+ msg->cmd.createDevice.channelBytes);
+ result = CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ goto Away;
+ }
+ }
+ dev->devInstGuid = msg->cmd.createDevice.devInstGuid;
+ dev->channelBytes = msg->cmd.createDevice.channelBytes;
+
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; bus = bus->next) {
+ if (bus->busNo == busNo) {
+ /* make sure the device number is valid */
+ if (devNo >= bus->deviceCount) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: device (%d) >= deviceCount (%d).",
+ devNo, bus->deviceCount);
+ result = CONTROLVM_RESP_ERROR_MAX_DEVICES;
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
+ devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ read_unlock(&BusListLock);
+ goto Away;
+ }
+ /* make sure this device is not already set */
+ if (bus->device[devNo]) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: device %d is already exists.",
+ devNo);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
+ devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ read_unlock(&BusListLock);
+ goto Away;
+ }
+ read_unlock(&BusListLock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (!msg->hdr.Flags.server) {
+ struct guest_msgs cmd;
+ if (!memcmp
+ (&dev->channelTypeGuid,
+ &UltraVhbaChannelProtocolGuid,
+ sizeof(GUID))) {
+ WAIT_FOR_VALID_GUID(((CHANNEL_HEADER
+ __iomem *) (dev->
+ chanptr))->
+ Type);
+ if (!ULTRA_VHBA_CHANNEL_OK_CLIENT
+ (dev->chanptr, NULL)) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed:[CLIENT]VHBA dev %d chan invalid.",
+ devNo);
+ POSTCODE_LINUX_4
+ (DEVICE_CREATE_FAILURE_PC,
+ devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
+ goto Away;
+ }
+ cmd.msgtype = GUEST_ADD_VHBA;
+ cmd.add_vhba.chanptr = dev->chanptr;
+ cmd.add_vhba.busNo = busNo;
+ cmd.add_vhba.deviceNo = devNo;
+ cmd.add_vhba.devInstGuid =
+ dev->devInstGuid;
+ cmd.add_vhba.intr = dev->intr;
+ } else
+ if (!memcmp
+ (&dev->channelTypeGuid,
+ &UltraVnicChannelProtocolGuid,
+ sizeof(GUID))) {
+ WAIT_FOR_VALID_GUID(((CHANNEL_HEADER
+ __iomem *) (dev->
+ chanptr))->
+ Type);
+ if (!ULTRA_VNIC_CHANNEL_OK_CLIENT
+ (dev->chanptr, NULL)) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: VNIC[CLIENT] dev %d chan invalid.",
+ devNo);
+ POSTCODE_LINUX_4
+ (DEVICE_CREATE_FAILURE_PC,
+ devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
+ goto Away;
+ }
+ cmd.msgtype = GUEST_ADD_VNIC;
+ cmd.add_vnic.chanptr = dev->chanptr;
+ cmd.add_vnic.busNo = busNo;
+ cmd.add_vnic.deviceNo = devNo;
+ cmd.add_vnic.devInstGuid =
+ dev->devInstGuid;
+ cmd.add_vhba.intr = dev->intr;
+ } else {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: unknown channelTypeGuid.\n");
+ POSTCODE_LINUX_4
+ (DEVICE_CREATE_FAILURE_PC, devNo,
+ busNo, POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ goto Away;
+ }
+
+ if (!VirtControlChanFunc) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: virtpci callback not registered.");
+ POSTCODE_LINUX_4
+ (DEVICE_CREATE_FAILURE_PC, devNo,
+ busNo, POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ goto Away;
+ }
+
+ if (!VirtControlChanFunc(&cmd)) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: virtpci GUEST_ADD_[VHBA||VNIC] returned error.");
+ POSTCODE_LINUX_4
+ (DEVICE_CREATE_FAILURE_PC, devNo,
+ busNo, POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ goto Away;
+ }
+ }
+ bus->device[devNo] = dev;
+ POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+ return CONTROLVM_RESP_SUCCESS;
+ }
+ }
+ read_unlock(&BusListLock);
+
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: failed to find bus %d.", busNo);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_BUS_INVALID;
+
+Away:
+ if (!msg->hdr.Flags.testMessage) {
+ uislib_iounmap(dev->chanptr);
+ dev->chanptr = NULL;
+ }
+
+ kfree(dev);
+ return result;
+}
+
+static int
+pause_device(CONTROLVM_MESSAGE *msg)
+{
+ U32 busNo, devNo;
+ struct bus_info *bus;
+ struct device_info *dev;
+ struct guest_msgs cmd;
+
+ busNo = msg->cmd.deviceChangeState.busNo;
+ devNo = msg->cmd.deviceChangeState.devNo;
+
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; bus = bus->next) {
+ if (bus->busNo == busNo) {
+ /* make sure the device number is valid */
+ if (devNo >= bus->deviceCount) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: device(%d) >= deviceCount(%d).",
+ devNo, bus->deviceCount);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ }
+ /* make sure this device exists */
+ dev = bus->device[devNo];
+ if (!dev) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: device %d does not exist.",
+ devNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ read_unlock(&BusListLock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (!memcmp
+ (&dev->channelTypeGuid,
+ &UltraVhbaChannelProtocolGuid, sizeof(GUID))) {
+ cmd.msgtype = GUEST_PAUSE_VHBA;
+ cmd.pause_vhba.chanptr = dev->chanptr;
+ } else
+ if (!memcmp
+ (&dev->channelTypeGuid,
+ &UltraVnicChannelProtocolGuid,
+ sizeof(GUID))) {
+ cmd.msgtype = GUEST_PAUSE_VNIC;
+ cmd.pause_vnic.chanptr = dev->chanptr;
+ } else {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: unknown channelTypeGuid.\n");
+ return
+ CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ }
+
+ if (!VirtControlChanFunc) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: virtpci callback not registered.");
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ }
+
+ if (!VirtControlChanFunc(&cmd)) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: virtpci GUEST_PAUSE_[VHBA||VNIC] returned error.");
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+ break;
+ }
+ }
+
+ if (!bus) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: bus %d does not exist",
+ busNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_BUS_INVALID;
+ }
+
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+resume_device(CONTROLVM_MESSAGE *msg)
+{
+ U32 busNo, devNo;
+ struct bus_info *bus;
+ struct device_info *dev;
+ struct guest_msgs cmd;
+
+ busNo = msg->cmd.deviceChangeState.busNo;
+ devNo = msg->cmd.deviceChangeState.devNo;
+
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; bus = bus->next) {
+ if (bus->busNo == busNo) {
+ /* make sure the device number is valid */
+ if (devNo >= bus->deviceCount) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: device(%d) >= deviceCount(%d).",
+ devNo, bus->deviceCount);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ }
+ /* make sure this device exists */
+ dev = bus->device[devNo];
+ if (!dev) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: device %d does not exist.",
+ devNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ read_unlock(&BusListLock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (!memcmp(&dev->channelTypeGuid,
+ &UltraVhbaChannelProtocolGuid,
+ sizeof(GUID))) {
+ cmd.msgtype = GUEST_RESUME_VHBA;
+ cmd.resume_vhba.chanptr = dev->chanptr;
+ } else
+ if (!memcmp(&dev->channelTypeGuid,
+ &UltraVnicChannelProtocolGuid,
+ sizeof(GUID))) {
+ cmd.msgtype = GUEST_RESUME_VNIC;
+ cmd.resume_vnic.chanptr = dev->chanptr;
+ } else {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: unknown channelTypeGuid.\n");
+ return
+ CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ }
+
+ if (!VirtControlChanFunc) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: virtpci callback not registered.");
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ }
+
+ if (!VirtControlChanFunc(&cmd)) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: virtpci GUEST_RESUME_[VHBA||VNIC] returned error.");
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+ break;
+ }
+ }
+
+ if (!bus) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: bus %d does not exist",
+ busNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_BUS_INVALID;
+ }
+
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
+{
+ U32 busNo, devNo;
+ struct bus_info *bus;
+ struct device_info *dev;
+ struct guest_msgs cmd;
+
+ busNo = msg->cmd.destroyDevice.busNo;
+ devNo = msg->cmd.destroyDevice.devNo;
+
+ read_lock(&BusListLock);
+ LOGINF("destroy_device called for busNo=%u, devNo=%u", busNo, devNo);
+ for (bus = BusListHead; bus; bus = bus->next) {
+ if (bus->busNo == busNo) {
+ /* make sure the device number is valid */
+ if (devNo >= bus->deviceCount) {
+ LOGERR("CONTROLVM_DEVICE_DESTORY Failed: device(%d) >= deviceCount(%d).",
+ devNo, bus->deviceCount);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ }
+ /* make sure this device exists */
+ dev = bus->device[devNo];
+ if (!dev) {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: device %d does not exist.",
+ devNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ read_unlock(&BusListLock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (!memcmp
+ (&dev->channelTypeGuid,
+ &UltraVhbaChannelProtocolGuid, sizeof(GUID))) {
+ cmd.msgtype = GUEST_DEL_VHBA;
+ cmd.del_vhba.chanptr = dev->chanptr;
+ } else
+ if (!memcmp
+ (&dev->channelTypeGuid,
+ &UltraVnicChannelProtocolGuid,
+ sizeof(GUID))) {
+ cmd.msgtype = GUEST_DEL_VNIC;
+ cmd.del_vnic.chanptr = dev->chanptr;
+ } else {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: unknown channelTypeGuid.\n");
+ return
+ CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ }
+
+ if (!VirtControlChanFunc) {
+ LOGERR("CONTROLVM_DEVICE_DESTORY Failed: virtpci callback not registered.");
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ }
+
+ if (!VirtControlChanFunc(&cmd)) {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: virtpci GUEST_DEL_[VHBA||VNIC] returned error.");
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+/* you must disable channel interrupts BEFORE you unmap the channel,
+ * because if you unmap first, there may still be some activity going
+ * on which accesses the channel and you will get a "unable to handle
+ * kernel paging request"
+ */
+ if (dev->polling) {
+ LOGINF("calling uislib_disable_channel_interrupts");
+ uislib_disable_channel_interrupts(busNo, devNo);
+ }
+ /* unmap the channel memory for the device. */
+ if (!msg->hdr.Flags.testMessage) {
+ LOGINF("destroy_device, doing iounmap");
+ uislib_iounmap(dev->chanptr);
+ }
+ kfree(dev);
+ bus->device[devNo] = NULL;
+ break;
+ }
+ }
+
+ if (!bus) {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: bus %d does not exist",
+ busNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_BUS_INVALID;
+ }
+
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+init_chipset(CONTROLVM_MESSAGE *msg, char *buf)
+{
+ POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ MaxBusCount = msg->cmd.initChipset.busCount;
+ PlatformNumber = msg->cmd.initChipset.platformNumber;
+ PhysicalDataChan = 0;
+
+ /* We need to make sure we have our functions registered
+ * before processing messages. If we are a test vehicle the
+ * testMessage for init_chipset will be set. We can ignore the
+ * waits for the callbacks, since this will be manually entered
+ * from a user. If no testMessage is set, we will wait for the
+ * functions.
+ */
+ if (!msg->hdr.Flags.testMessage)
+ WAIT_ON_CALLBACK(VirtControlChanFunc);
+
+ chipset_inited = 1;
+ POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
+
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+stop_chipset(CONTROLVM_MESSAGE *msg, char *buf)
+{
+ /* Check that all buses and switches have been torn down and
+ * destroyed.
+ */
+ if (BusListHead) {
+ /* Buses still exist. */
+ LOGERR("CONTROLVM_CHIPSET_STOP: BusListHead is not NULL");
+ return CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_BUS;
+ }
+ if (BusListCount) {
+ /* BusListHead is NULL, but BusListCount != 0 */
+ LOGERR("CONTROLVM_CHIPSET_STOP: BusListCount != 0");
+ return CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_BUS;
+ }
+
+ /* Buses are shut down. */
+ return visorchipset_chipset_notready();
+}
+
+static int
+delete_bus_glue(U32 busNo)
+{
+ CONTROLVM_MESSAGE msg;
+
+ init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
+ msg.cmd.destroyBus.busNo = busNo;
+ if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("destroy_bus failed. busNo=0x%x\n", busNo);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+delete_device_glue(U32 busNo, U32 devNo)
+{
+ CONTROLVM_MESSAGE msg;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
+ msg.cmd.destroyDevice.busNo = busNo;
+ msg.cmd.destroyDevice.devNo = devNo;
+ if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("destroy_device failed. busNo=0x%x devNo=0x%x\n", busNo,
+ devNo);
+ return 0;
+ }
+ return 1;
+}
+
+int
+uislib_client_inject_add_bus(U32 busNo, GUID instGuid,
+ U64 channelAddr, ulong nChannelBytes)
+{
+ CONTROLVM_MESSAGE msg;
+
+ LOGINF("enter busNo=0x%x\n", busNo);
+ /* step 0: init the chipset */
+ POSTCODE_LINUX_3(CHIPSET_INIT_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
+
+ if (!chipset_inited) {
+ /* step: initialize the chipset */
+ init_msg_header(&msg, CONTROLVM_CHIPSET_INIT, 0, 0);
+ /* this change is needed so that console will come up
+ * OK even when the bus 0 create comes in late. If the
+ * bus 0 create is the first create, then the add_vnic
+ * will work fine, but if the bus 0 create arrives
+ * after number 4, then the add_vnic will fail, and the
+ * ultraboot will fail.
+ */
+ msg.cmd.initChipset.busCount = 23;
+ msg.cmd.initChipset.switchCount = 0;
+ if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("init_chipset failed.\n");
+ return 0;
+ }
+ LOGINF("chipset initialized\n");
+ POSTCODE_LINUX_3(CHIPSET_INIT_EXIT_PC, busNo,
+ POSTCODE_SEVERITY_INFO);
+ }
+
+ /* step 1: create a bus */
+ POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_WARNING);
+ init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0);
+ msg.cmd.createBus.busNo = busNo;
+ msg.cmd.createBus.deviceCount = 23; /* devNo+1; */
+ msg.cmd.createBus.channelAddr = channelAddr;
+ msg.cmd.createBus.channelBytes = nChannelBytes;
+ if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("create_bus failed.\n");
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_add_bus);
+
+
+int
+uislib_client_inject_del_bus(U32 busNo)
+{
+ return delete_bus_glue(busNo);
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_del_bus);
+
+int
+uislib_client_inject_pause_vhba(U32 busNo, U32 devNo)
+{
+ CONTROLVM_MESSAGE msg;
+ int rc;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
+ msg.cmd.deviceChangeState.busNo = busNo;
+ msg.cmd.deviceChangeState.devNo = devNo;
+ msg.cmd.deviceChangeState.state = SegmentStateStandby;
+ rc = pause_device(&msg);
+ if (rc != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("VHBA pause_device failed. busNo=0x%x devNo=0x%x\n",
+ busNo, devNo);
+ return rc;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vhba);
+
+int
+uislib_client_inject_resume_vhba(U32 busNo, U32 devNo)
+{
+ CONTROLVM_MESSAGE msg;
+ int rc;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
+ msg.cmd.deviceChangeState.busNo = busNo;
+ msg.cmd.deviceChangeState.devNo = devNo;
+ msg.cmd.deviceChangeState.state = SegmentStateRunning;
+ rc = resume_device(&msg);
+ if (rc != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("VHBA resume_device failed. busNo=0x%x devNo=0x%x\n",
+ busNo, devNo);
+ return rc;
+ }
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vhba);
+
+int
+uislib_client_inject_add_vhba(U32 busNo, U32 devNo,
+ U64 phys_chan_addr, U32 chan_bytes,
+ int is_test_addr, GUID instGuid,
+ struct InterruptInfo *intr)
+{
+ CONTROLVM_MESSAGE msg;
+
+ LOGINF(" enter busNo=0x%x devNo=0x%x\n", busNo, devNo);
+ /* chipset init'ed with bus bus has been previously created -
+ * Verify it still exists step 2: create the VHBA device on the
+ * bus
+ */
+ POSTCODE_LINUX_4(VHBA_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
+ if (is_test_addr)
+ /* signify that the physical channel address does NOT
+ * need to be ioremap()ed
+ */
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.createDevice.busNo = busNo;
+ msg.cmd.createDevice.devNo = devNo;
+ msg.cmd.createDevice.devInstGuid = instGuid;
+ if (intr)
+ msg.cmd.createDevice.intr = *intr;
+ else
+ memset(&msg.cmd.createDevice.intr, 0,
+ sizeof(struct InterruptInfo));
+ msg.cmd.createDevice.channelAddr = phys_chan_addr;
+ if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
+ LOGERR("wrong channel size.chan_bytes = 0x%x IO_CHANNEL_SIZE= 0x%x\n",
+ chan_bytes, (unsigned int) MIN_IO_CHANNEL_SIZE);
+ POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, chan_bytes,
+ MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ msg.cmd.createDevice.channelBytes = chan_bytes;
+ msg.cmd.createDevice.dataTypeGuid = UltraVhbaChannelProtocolGuid;
+ if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("VHBA create_device failed.\n");
+ POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ POSTCODE_LINUX_4(VHBA_CREATE_SUCCESS_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_add_vhba);
+
+int
+uislib_client_inject_del_vhba(U32 busNo, U32 devNo)
+{
+ return delete_device_glue(busNo, devNo);
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_del_vhba);
+
+int
+uislib_client_inject_add_vnic(U32 busNo, U32 devNo,
+ U64 phys_chan_addr, U32 chan_bytes,
+ int is_test_addr, GUID instGuid,
+ struct InterruptInfo *intr)
+{
+ CONTROLVM_MESSAGE msg;
+
+ LOGINF(" enter busNo=0x%x devNo=0x%x\n", busNo, devNo);
+ /* chipset init'ed with bus bus has been previously created -
+ * Verify it still exists step 2: create the VNIC device on the
+ * bus
+ */
+ POSTCODE_LINUX_4(VNIC_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
+ if (is_test_addr)
+ /* signify that the physical channel address does NOT
+ * need to be ioremap()ed
+ */
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.createDevice.busNo = busNo;
+ msg.cmd.createDevice.devNo = devNo;
+ msg.cmd.createDevice.devInstGuid = instGuid;
+ if (intr)
+ msg.cmd.createDevice.intr = *intr;
+ else
+ memset(&msg.cmd.createDevice.intr, 0,
+ sizeof(struct InterruptInfo));
+ msg.cmd.createDevice.channelAddr = phys_chan_addr;
+ if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
+ LOGERR("wrong channel size.chan_bytes = 0x%x IO_CHANNEL_SIZE= 0x%x\n",
+ chan_bytes, (unsigned int) MIN_IO_CHANNEL_SIZE);
+ POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, chan_bytes,
+ MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ msg.cmd.createDevice.channelBytes = chan_bytes;
+ msg.cmd.createDevice.dataTypeGuid = UltraVnicChannelProtocolGuid;
+ if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("VNIC create_device failed.\n");
+ POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ POSTCODE_LINUX_4(VNIC_CREATE_SUCCESS_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_add_vnic);
+
+int
+uislib_client_inject_pause_vnic(U32 busNo, U32 devNo)
+{
+ CONTROLVM_MESSAGE msg;
+ int rc;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
+ msg.cmd.deviceChangeState.busNo = busNo;
+ msg.cmd.deviceChangeState.devNo = devNo;
+ msg.cmd.deviceChangeState.state = SegmentStateStandby;
+ rc = pause_device(&msg);
+ if (rc != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("VNIC pause_device failed. busNo=0x%x devNo=0x%x\n",
+ busNo, devNo);
+ return -1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vnic);
+
+int
+uislib_client_inject_resume_vnic(U32 busNo, U32 devNo)
+{
+ CONTROLVM_MESSAGE msg;
+ int rc;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
+ msg.cmd.deviceChangeState.busNo = busNo;
+ msg.cmd.deviceChangeState.devNo = devNo;
+ msg.cmd.deviceChangeState.state = SegmentStateRunning;
+ rc = resume_device(&msg);
+ if (rc != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("VNIC resume_device failed. busNo=0x%x devNo=0x%x\n",
+ busNo, devNo);
+ return -1;
+ }
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vnic);
+
+int
+uislib_client_inject_del_vnic(U32 busNo, U32 devNo)
+{
+ return delete_device_glue(busNo, devNo);
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_del_vnic);
+
+static int
+uislib_client_add_vnic(U32 busNo)
+{
+ BOOL busCreated = FALSE;
+ int devNo = 0; /* Default to 0, since only one device
+ * will be created for this bus... */
+ GUID dummyGuid = GUID0;
+ CONTROLVM_MESSAGE msg;
+
+ init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0);
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.createBus.busNo = busNo;
+ msg.cmd.createBus.deviceCount = 4;
+ msg.cmd.createBus.channelAddr = 0;
+ msg.cmd.createBus.channelBytes = 0;
+ if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("client create_bus failed");
+ return 0;
+ }
+ busCreated = TRUE;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.createDevice.busNo = busNo;
+ msg.cmd.createDevice.devNo = devNo;
+ msg.cmd.createDevice.devInstGuid = dummyGuid;
+ memset(&msg.cmd.createDevice.intr, 0, sizeof(struct InterruptInfo));
+ msg.cmd.createDevice.channelAddr = PhysicalDataChan;
+ msg.cmd.createDevice.channelBytes = MIN_IO_CHANNEL_SIZE;
+ msg.cmd.createDevice.dataTypeGuid = UltraVnicChannelProtocolGuid;
+ if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("client create_device failed");
+ goto AwayCleanup;
+ }
+
+ return 1;
+
+AwayCleanup:
+ if (busCreated) {
+ init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.destroyBus.busNo = busNo;
+ if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
+ LOGERR("client destroy_bus failed.\n");
+ }
+
+ return 0;
+} /* end uislib_client_add_vnic */
+EXPORT_SYMBOL_GPL(uislib_client_add_vnic);
+
+static int
+uislib_client_delete_vnic(U32 busNo)
+{
+ int devNo = 0; /* Default to 0, since only one device
+ * will be created for this bus... */
+ CONTROLVM_MESSAGE msg;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.destroyDevice.busNo = busNo;
+ msg.cmd.destroyDevice.devNo = devNo;
+ if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ /* Don't error exit - try to see if bus can be destroyed... */
+ LOGERR("client destroy_device failed.\n");
+ }
+
+ init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.destroyBus.busNo = busNo;
+ if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
+ LOGERR("client destroy_bus failed.\n");
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uislib_client_delete_vnic);
+/* end client_delete_vnic */
+
+void *
+uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln)
+{
+ /* __GFP_NORETRY means "ok to fail", meaning kmalloc() can
+ * return NULL. If you do NOT specify __GFP_NORETRY, Linux
+ * will go to extreme measures to get memory for you (like,
+ * invoke oom killer), which will probably cripple the system.
+ */
+ void *p = kmem_cache_alloc(cur_pool, GFP_ATOMIC | __GFP_NORETRY);
+ if (p == NULL) {
+ LOGERR("uislib_malloc failed to alloc uiscmdrsp @%s:%d",
+ fn, ln);
+ return NULL;
+ }
+ return p;
+}
+EXPORT_SYMBOL_GPL(uislib_cache_alloc);
+
+void
+uislib_cache_free(struct kmem_cache *cur_pool, void *p, char *fn, int ln)
+{
+ if (p == NULL) {
+ LOGERR("uislib_free NULL pointer @%s:%d", fn, ln);
+ return;
+ }
+ kmem_cache_free(cur_pool, p);
+}
+EXPORT_SYMBOL_GPL(uislib_cache_free);
+
+/*****************************************************/
+/* proc filesystem callback functions */
+/*****************************************************/
+
+static ssize_t
+vnic_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int action = 0xffff, busNo = 0, i, result = 0;
+ char buf[4];
+ char direction;
+/* GUID guid; */
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("echo > /proc/uislib/vnic copy_from_user ****FAILED.\n");
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%d%c", &action, &direction);
+ if (i != 2) {
+ LOGERR("unable to parse vnic proc parameters.\n");
+ return -EFAULT;
+ }
+
+ if ((direction != '-') && (direction != '+')) {
+ LOGERR("unable to determine whether to add or delete vnic\n");
+ return -EFAULT;
+ }
+
+ /* if (i < 1), i.e., if we didn't even read the action field,
+ * then action will default to 0xffff and the code below will
+ * fall through the switch and print usage.
+ */
+ switch (action) {
+ case 0:
+ /* call client method... */
+ busNo = 0; /* All client drivers use bus value of 0... */
+ if (direction == '+')
+ result = uislib_client_add_vnic(busNo);
+ else
+ result = uislib_client_delete_vnic(busNo);
+ if (!result) {
+ LOGERR("echo 0%c > /proc/uislib/vnic failed (client end)",
+ direction);
+ return -EFAULT;
+ }
+ return count;
+
+ default:
+ break;
+ }
+
+ LOGERR("USAGE: echo <action><direction (up/down)> > /proc/uislib/vnic");
+ LOGERR(" ");
+ LOGERR("Client Syntax");
+ LOGERR("-------------");
+ LOGERR("0+ ==> add vnic");
+ LOGERR("0- ==> delete vnic");
+ LOGERR(" ");
+ return count;
+} /* end vnic_proc_write */
+
+static ssize_t
+chipset_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int i, action = 0xffff;
+ char buf[4];
+ CONTROLVM_MESSAGE msg;
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ memset(&msg, 0, sizeof(CONTROLVM_MESSAGE));
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user ****FAILED.\n");
+ return -EFAULT;
+ }
+
+ if (chipset_inited) {
+ LOGINF("Chipset already initialized\n");
+ return -EFAULT;
+ }
+ i = sscanf(buf, "%x", &action);
+
+ /* if (i < 1), i.e., if we didn't even read the action field,
+ * then action will default to 0xffff and the code below will
+ * fall through the switch and print usage.
+ */
+ switch (action) {
+ case 1:
+ /* GUEST */
+ /* step: initialize the chipset */
+ init_msg_header(&msg, CONTROLVM_CHIPSET_INIT, 0, 0);
+ msg.hdr.Flags.testMessage = 0;
+ msg.cmd.initChipset.busCount = 23;
+ msg.cmd.initChipset.switchCount = 23;
+
+ if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("init_chipset failed.\n");
+ return 0;
+ }
+ return 1;
+ case 2:
+ /* BOTH */
+ init_msg_header(&msg, CONTROLVM_CHIPSET_INIT, 0, 0);
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.initChipset.busCount = 23;
+ msg.cmd.initChipset.switchCount = 23;
+
+ if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("init_chipset failed.\n");
+ return 0;
+ }
+ return 1;
+
+ default:
+ break;
+ }
+
+ LOGERR("usage: 1 ==> init_chipset client\n");
+ LOGERR("usage: 2 ==> init_chipset test\n");
+ return -EFAULT;
+}
+
+#define PLINE(...) uisutil_add_proc_line_ex(&tot, buff, \
+ buff_len, __VA_ARGS__)
+
+static int
+info_proc_read_helper(char **buff, int *buff_len)
+{
+ int i, tot = 0;
+ struct bus_info *bus;
+
+ if (PLINE("\nBuses:\n") < 0)
+ goto err_done;
+
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; bus = bus->next) {
+
+ if (PLINE(" bus=0x%p, busNo=%d, deviceCount=%d\n",
+ bus, bus->busNo, bus->deviceCount) < 0)
+ goto err_done_unlock;
+
+
+ if (PLINE(" Devices:\n") < 0)
+ goto err_done_unlock;
+
+ for (i = 0; i < bus->deviceCount; i++) {
+ if (bus->device[i]) {
+ if (PLINE(" busNo %d, device[%i]: 0x%p, chanptr=0x%p, swtch=0x%p\n",
+ bus->busNo, i, bus->device[i],
+ bus->device[i]->chanptr,
+ bus->device[i]->swtch) < 0)
+ goto err_done_unlock;
+
+ if (PLINE(" first_busy_cnt=%llu, moved_to_tail_cnt=%llu, last_on_list_cnt=%llu\n",
+ bus->device[i]->first_busy_cnt,
+ bus->device[i]->moved_to_tail_cnt,
+ bus->device[i]->last_on_list_cnt) < 0)
+ goto err_done_unlock;
+ }
+ }
+ }
+ read_unlock(&BusListLock);
+
+ if (PLINE("UisUtils_Registered_Services: %d\n",
+ atomic_read(&UisUtils_Registered_Services)) < 0)
+ goto err_done;
+ if (PLINE("cycles_before_wait %llu wait_cycles:%llu\n",
+ cycles_before_wait, wait_cycles) < 0)
+ goto err_done;
+ if (PLINE("tot_wakeup_cnt %llu:tot_wait_cnt %llu:tot_schedule_cnt %llu\n",
+ tot_wakeup_cnt, tot_wait_cnt, tot_schedule_cnt) < 0)
+ goto err_done;
+ if (PLINE("en_smart_wakeup %d\n", en_smart_wakeup) < 0)
+ goto err_done;
+ if (PLINE("tot_moved_to_tail_cnt %llu\n", tot_moved_to_tail_cnt) < 0)
+ goto err_done;
+
+ return tot;
+
+err_done_unlock:
+ read_unlock(&BusListLock);
+err_done:
+ return -1;
+}
+
+static ssize_t
+info_proc_read(struct file *file, char __user *buf, size_t len, loff_t *offset)
+{
+ char *temp;
+ int totalBytes = 0;
+ int remaining_bytes = PROC_READ_BUFFER_SIZE;
+
+/* *start = buf; */
+ if (ProcReadBuffer == NULL) {
+ DBGINF("ProcReadBuffer == NULL; allocating buffer.\n.");
+ ProcReadBuffer = vmalloc(PROC_READ_BUFFER_SIZE);
+
+ if (ProcReadBuffer == NULL) {
+ LOGERR("failed to allocate buffer to provide proc data.\n");
+ return -ENOMEM;
+ }
+ }
+
+ temp = ProcReadBuffer;
+
+ if ((*offset == 0) || (!ProcReadBufferValid)) {
+ DBGINF("calling info_proc_read_helper.\n");
+ /* if the read fails, then -1 will be returned */
+ totalBytes = info_proc_read_helper(&temp, &remaining_bytes);
+ ProcReadBufferValid = 1;
+ } else
+ totalBytes = strlen(ProcReadBuffer);
+
+ return simple_read_from_buffer(buf, len, offset,
+ ProcReadBuffer, totalBytes);
+}
+
+static ssize_t
+platformnumber_proc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ int length = 0;
+ char *vbuf;
+ loff_t pos = *offset;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos > 0 || !len)
+ return 0;
+
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ length = sprintf(vbuf, "%d\n", PlatformNumber);
+
+ if (copy_to_user(buf, vbuf, length)) {
+ kfree(vbuf);
+ return -EFAULT;
+ }
+
+ kfree(vbuf);
+ *offset += length;
+ return length;
+}
+
+#ifdef UISLIB_TEST_PROC
+
+/* proc/uislib/vbus/<x>/info */
+static int
+proc_info_vbus_show(struct seq_file *m, void *v)
+{
+ struct bus_info *bus = m->private;
+ int i, devInfoCount, x;
+ char buf[999];
+
+ if (bus == NULL)
+ return 0;
+ seq_printf(m, "Client device / client driver info for %s partition (vbus #%d):\n",
+ bus->partitionName, bus->busNo);
+ if ((bus->busChannelBytes == 0) || (bus->pBusChannel == NULL))
+ return 0;
+ devInfoCount =
+ (bus->busChannelBytes -
+ sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL)) /
+ sizeof(ULTRA_VBUS_DEVICEINFO);
+ x = VBUSCHANNEL_devInfoToStringBuffer(&bus->pBusChannel->ChpInfo, buf,
+ sizeof(buf) - 1, -1);
+ buf[x] = '\0';
+ seq_printf(m, "%s", buf);
+ x = VBUSCHANNEL_devInfoToStringBuffer(&bus->pBusChannel->BusInfo,
+ buf, sizeof(buf) - 1, -1);
+ buf[x] = '\0';
+ seq_printf(m, "%s", buf);
+ for (i = 0; i < devInfoCount; i++) {
+ x = VBUSCHANNEL_devInfoToStringBuffer(&bus->pBusChannel->
+ DevInfo[i], buf,
+ sizeof(buf) - 1, i);
+ if (x > 0) {
+ buf[x] = '\0';
+ seq_printf(m, "%s", buf);
+ }
+ }
+ return 0;
+}
+
+static ssize_t
+bus_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int server_flag = 0;
+ int i, action = 0xffff, result;
+ char buf[16];
+ CONTROLVM_MESSAGE msg;
+ U32 busNo, deviceCount;
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ memset(&msg, 0, sizeof(CONTROLVM_MESSAGE));
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("echo > /proc/uislib/bus: copy_from_user ****FAILED.");
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%x-%d-%d", &action, &busNo, &deviceCount);
+
+ /* if (i < 1), i.e., if we didn't even read the action field,
+ * then action will default to 0xffff and the code below will
+ * fall through the switch and print usage.
+ */
+ switch (action) {
+ case 0:
+ /* destroy a bus */
+ if (i != 2)
+ break;
+ init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, server_flag);
+ msg.cmd.destroyBus.busNo = busNo;
+
+ result = destroy_bus(&msg, NULL);
+
+ if (result != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("echo 0-%d > /proc/uislib/bus {CONTROLVM_BUS_DESTROY Failed} Result(%d)",
+ busNo, result);
+ return -EFAULT;
+ }
+ return count;
+ case 1:
+ /* create a bus */
+ if (i != 3)
+ break;
+ init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, server_flag);
+ msg.cmd.createBus.busNo = busNo;
+ msg.cmd.createBus.deviceCount = deviceCount;
+
+ result = create_bus(&msg, NULL);
+
+ if (result != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("echo 1-%d-%d > /proc/uislib/bus {CONTROLVM_BUS_CREATE Failed} Result(%d)",
+ busNo, deviceCount, result);
+ return -EFAULT;
+ }
+
+ return count;
+ default:
+ break;
+ }
+
+ LOGERR("USAGE: echo <action>-<busNo>... > /proc/uislib/bus");
+ LOGERR(" ");
+ LOGERR("Destruct Syntax ControlVM Message Id");
+ LOGERR("--------------- ---------------------");
+ LOGERR("0-<busNo> ==> CONTROLVM_BUS_DESTROY");
+ LOGERR(" ");
+ LOGERR("Construct Syntax ControlVM Message Id");
+ LOGERR("----------------------- -------------------- ");
+ LOGERR("1-<busNo>-<deviceCount> ==> CONTROLVM_BUS_CREATE");
+
+ return -EFAULT;
+}
+
+static ssize_t
+uislib_proc_read_writeonly(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t
+dev_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int server_flag = 0;
+ CONTROLVM_MESSAGE msg;
+ U32 busNo, devNo;
+ char buf[32];
+ unsigned int chanptr;
+ int type, i, action = 0xffff, result;
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("echo > /proc/uislib/device: copy_from_user ****FAILED.");
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%x-%d-%d-%x-%d",
+ &action, &busNo, &devNo, &chanptr, &type);
+
+ switch (action) {
+ case 0:
+ if (i != 3)
+ break;
+
+ /* destroy a device */
+ init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, server_flag);
+ msg.cmd.destroyDevice.busNo = busNo;
+ msg.cmd.destroyDevice.devNo = devNo;
+
+ result = destroy_device(&msg, NULL);
+
+ if (result != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("echo 0-%d-%d > /proc/uislib/device {CONTROLVM_DEVICE_DESTROY Failed} Result(%d)",
+ busNo, devNo, result);
+ return -EFAULT;
+ }
+
+ return count;
+
+ case 1:
+ if (i != 5)
+ break;
+
+ /* create a device */
+ init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, server_flag);
+ msg.cmd.createDevice.busNo = busNo;
+ msg.cmd.createDevice.devNo = devNo;
+ msg.cmd.createDevice.channelAddr = __pa(chanptr);
+ msg.cmd.createDevice.channelBytes = MIN_IO_CHANNEL_SIZE;
+
+ if (type == 0)
+ msg.cmd.createDevice.dataTypeGuid =
+ UltraVhbaChannelProtocolGuid;
+ else if (type == 1)
+ msg.cmd.createDevice.dataTypeGuid =
+ UltraVnicChannelProtocolGuid;
+ else {
+ LOGERR("echo 1-%d-%d-%x-<type> > /proc/uislib/devce failed: invalid device type %d.",
+ busNo, devNo, chanptr, type);
+ return -EFAULT;
+ }
+
+ result = create_device(&msg, NULL);
+
+ if (result != CONTROLVM_RESP_SUCCESS) {
+ if (type == 0)
+ LOGERR("echo 1-%d-%d-%x-0 > /proc/uislib/device {CONTROLVM_DEVICE_CREATE[vHBA] Failed} Result(%d)",
+ busNo, devNo, chanptr, result);
+ else
+ LOGERR("echo 1-%d-%d-%x-1 > /proc/uislib/device {CONTROLVM_DEVICE_CREATE[vNIC] Failed} Result(%d)",
+ busNo, devNo, chanptr, result);
+ return -EFAULT;
+ }
+
+ default:
+ break;
+ }
+
+ LOGERR("USAGE: echo <action>-<busNo>-<devNo>... > /proc/uislib/device");
+ LOGERR(" ");
+ LOGERR("Destruct Syntax ControlVM Message Id");
+ LOGERR("----------------- ------------------------");
+ LOGERR("0-<busNo>-<devNo> ==> CONTROLVM_DEVICE_DESTROY");
+ LOGERR(" ");
+ LOGERR("Construct Syntax ControlVM Message Id");
+ LOGERR
+ ("---------------------------------- ----------------------- ");
+ LOGERR
+ ("1-<busNo>-<devNo>-<chanptr>-<type> ==> CONTROLVM_DEVICE_CREATE");
+ LOGERR(" <type = 0>: vHBA");
+ LOGERR(" <type = 1>: vNIC");
+ LOGERR(" ");
+
+ return -EFAULT;
+}
+
+static ssize_t
+cycles_before_wait_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[16];
+
+#define CYCLES_BEFORE_WAIT_USE_ERROR { \
+ LOGERR("Incorrect Call Home Input.\n"); \
+ pr_info("Please pass Call Home Event Parameters in the form:\n"); \
+ pr_info("EventID Category Type[parameter1][parameter2][parameter3][parameter4][parameter5][parameter6]\n"); \
+ return -EFAULT; \
+}
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (count == 0)
+ CYCLES_BEFORE_WAIT_USE_ERROR;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user failed.\n");
+ return -EFAULT;
+ }
+ buf[count - 1] = '\0'; /* Replace the LF at the end of the
+ * input with a NULL */
+ /* Pull out the cycles_before_wait must be decimal integer */
+ if (sscanf(buf, "%lld", &cycles_before_wait) != 1)
+ CYCLES_BEFORE_WAIT_USE_ERROR;
+
+ return count;
+}
+
+static ssize_t
+reset_counts_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[16];
+ unsigned long long new_value;
+ struct bus_info *bus;
+ int i;
+
+#define RESET_COUNTS_USE_ERROR { \
+ LOGERR("Incorrect reset_counts Input.\n"); \
+ pr_info("Please pass the new value for the counters:\n"); \
+ pr_info("e.g. echo 0 > reset_counts\n"); \
+ return -EFAULT; \
+ }
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (count == 0)
+ RESET_COUNTS_USE_ERROR;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user failed.\n");
+ return -EFAULT;
+ }
+ buf[count - 1] = '\0'; /* Replace the LF at the end of the
+ * input with a NULL */
+ /* Pull out the reset_counts must be decimal integer */
+ if (sscanf(buf, "%llu", &new_value) != 1)
+ RESET_COUNTS_USE_ERROR;
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; bus = bus->next) {
+
+ for (i = 0; i < bus->deviceCount; i++) {
+ if (bus->device[i]) {
+ bus->device[i]->first_busy_cnt = new_value;
+ bus->device[i]->moved_to_tail_cnt = new_value;
+ bus->device[i]->last_on_list_cnt = new_value;
+ }
+ }
+ }
+ read_unlock(&BusListLock);
+ tot_moved_to_tail_cnt = new_value;
+ tot_wait_cnt = new_value;
+ tot_wakeup_cnt = new_value;
+ tot_schedule_cnt = new_value;
+ return count;
+}
+
+static ssize_t
+smart_wakeup_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[16];
+ int new_value;
+
+#define SMART_WAKEUP_USE_ERROR { \
+ LOGERR("Incorrect smart_wakeup Input 0 disables smart_wakeup, and 1 enables smart_wakeup.\n"); \
+ pr_info("echo 0 > smart_wakeup\n"); \
+ pr_info("echo 1 > smart_wakeup\n"); \
+ return -EFAULT; \
+ }
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (count == 0)
+ SMART_WAKEUP_USE_ERROR;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user failed.\n");
+ return -EFAULT;
+ }
+ buf[count - 1] = '\0'; /* Replace the LF at the end of the
+ * input with a NULL */
+ /* Pull out the smart_wakeup must be decimal integer */
+ if (sscanf(buf, "%d", &new_value) != 1)
+ SMART_WAKEUP_USE_ERROR;
+ en_smart_wakeup = new_value;
+ return count;
+}
+
+static ssize_t
+test_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int i, action = 0xffff;
+ char buf[16];
+ CONTROLVM_MESSAGE msg;
+ S64 vrtc_offset;
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ memset(&msg, 0, sizeof(CONTROLVM_MESSAGE));
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user ****FAILED.\n");
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%x", &action);
+
+ /* if (i < 1), i.e., if we didn't even read the action field,
+ * then action will default to 0xffff and the code below will
+ * fall through the switch and print usage. */
+ switch (action) {
+ case 6:
+ msg.hdr.Id = CONTROLVM_CHIPSET_STOP;
+ msg.hdr.Flags.responseExpected = 1;
+ stop_chipset(&msg, NULL);
+ break;
+ case 7:
+ vrtc_offset = 0;
+ LOGERR("about to issue QUERY vrtc_offset=%LX", vrtc_offset);
+ vrtc_offset = Issue_VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET();
+ LOGERR("result is vrtc_offset=%LX", vrtc_offset);
+ break;
+ case 8:
+ vrtc_offset = 60;
+ LOGERR("about to increase physical time by 0x%LX seconds",
+ vrtc_offset);
+ vrtc_offset = Issue_VMCALL_UPDATE_PHYSICAL_TIME(vrtc_offset);
+ break;
+ case 9:
+ vrtc_offset = -60;
+ LOGERR("about to decrease physical time by 0x%LX seconds",
+ vrtc_offset);
+ vrtc_offset = Issue_VMCALL_UPDATE_PHYSICAL_TIME(vrtc_offset);
+ break;
+ default:
+ LOGERR("usage: 6 for CHIPSET_STOP\n");
+ LOGERR(" 7 for VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET()\n");
+ LOGERR(" 8 for VMCALL_UPDATE_PHYSICAL_TIME(60)\n");
+ LOGERR(" 9 for VMCALL_UPDATE_PHYSICAL_TIME(-60)\n");
+ return -EFAULT;
+ break;
+ }
+ return count;
+}
+
+#endif /* UISLIB_TEST_PROC */
+static struct device_info *
+find_dev(U32 busNo, U32 devNo)
+{
+ struct bus_info *bus;
+ struct device_info *dev = NULL;
+
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; bus = bus->next) {
+ if (bus->busNo == busNo) {
+ /* make sure the device number is valid */
+ if (devNo >= bus->deviceCount) {
+ LOGERR("%s bad busNo, devNo=%d,%d",
+ __func__,
+ (int) (busNo), (int) (devNo));
+ goto Away;
+ }
+ dev = bus->device[devNo];
+ if (!dev)
+ LOGERR("%s bad busNo, devNo=%d,%d",
+ __func__,
+ (int) (busNo), (int) (devNo));
+ goto Away;
+ }
+ }
+Away:
+ read_unlock(&BusListLock);
+ return dev;
+}
+
+/* This thread calls the "interrupt" function for each device that has
+ * enabled such using uislib_enable_channel_interrupts(). The "interrupt"
+ * function typically reads and processes the devices's channel input
+ * queue. This thread repeatedly does this, until the thread is told to stop
+ * (via uisthread_stop()). Sleeping rules:
+ * - If we have called the "interrupt" function for all devices, and all of
+ * them have reported "nothing processed" (returned 0), then we will go to
+ * sleep for a maximum of POLLJIFFIES_NORMAL jiffies.
+ * - If anyone calls uislib_force_channel_interrupt(), the above jiffy
+ * sleep will be interrupted, and we will resume calling the "interrupt"
+ * function for all devices.
+ * - The list of devices is dynamically re-ordered in order to
+ * attempt to preserve fairness. Whenever we spin thru the list of
+ * devices and call the dev->interrupt() function, if we find
+ * devices which report that there is still more work to do, the
+ * the first such device we find is moved to the end of the device
+ * list. This ensures that extremely busy devices don't starve out
+ * less-busy ones.
+ *
+ */
+static int
+Process_Incoming(void *v)
+{
+ unsigned long long cur_cycles, old_cycles, idle_cycles, delta_cycles;
+ struct list_head *new_tail = NULL;
+ int i;
+ UIS_DAEMONIZE("dev_incoming");
+ for (i = 0; i < 16; i++) {
+ old_cycles = get_cycles();
+ wait_event_timeout(Wakeup_Polling_Device_Channels,
+ 0, POLLJIFFIES_NORMAL);
+ cur_cycles = get_cycles();
+ if (wait_cycles == 0) {
+ wait_cycles = (cur_cycles - old_cycles);
+ } else {
+ if (wait_cycles < (cur_cycles - old_cycles))
+ wait_cycles = (cur_cycles - old_cycles);
+ }
+ }
+ LOGINF("wait_cycles=%llu", wait_cycles);
+ cycles_before_wait = wait_cycles;
+ idle_cycles = 0;
+ Go_Polling_Device_Channels = 0;
+ while (1) {
+ struct list_head *lelt, *tmp;
+ struct device_info *dev = NULL;
+
+ /* poll each channel for input */
+ LOCKSEM_UNINTERRUPTIBLE(&Lock_Polling_Device_Channels);
+ new_tail = NULL;
+ list_for_each_safe(lelt, tmp, &List_Polling_Device_Channels) {
+ int rc = 0;
+ dev = list_entry(lelt, struct device_info,
+ list_polling_device_channels);
+ LOCKSEM_UNINTERRUPTIBLE(&dev->interrupt_callback_lock);
+ if (dev->interrupt)
+ rc = dev->interrupt(dev->interrupt_context);
+ else
+ continue;
+ UNLOCKSEM(&dev->interrupt_callback_lock);
+ if (rc) {
+ /* dev->interrupt returned, but there
+ * is still more work to do.
+ * Reschedule work to occur as soon as
+ * possible. */
+ idle_cycles = 0;
+ if (new_tail == NULL) {
+ dev->first_busy_cnt++;
+ if (!
+ (list_is_last
+ (lelt,
+ &List_Polling_Device_Channels))) {
+ new_tail = lelt;
+ dev->moved_to_tail_cnt++;
+ } else
+ dev->last_on_list_cnt++;
+ }
+
+ }
+ if (Incoming_ThreadInfo.should_stop)
+ break;
+ }
+ if (new_tail != NULL) {
+ tot_moved_to_tail_cnt++;
+ list_move_tail(new_tail, &List_Polling_Device_Channels);
+ }
+ UNLOCKSEM(&Lock_Polling_Device_Channels);
+ cur_cycles = get_cycles();
+ delta_cycles = cur_cycles - old_cycles;
+ old_cycles = cur_cycles;
+
+ /* At this point, we have scanned thru all of the
+ * channels, and at least one of the following is true:
+ * - there is no input waiting on any of the channels
+ * - we have received a signal to stop this thread
+ */
+ if (Incoming_ThreadInfo.should_stop)
+ break;
+ if (en_smart_wakeup == 0xFF) {
+ LOGINF("en_smart_wakeup set to 0xff, to force exiting process_incoming");
+ break;
+ }
+ /* wait for POLLJIFFIES_NORMAL jiffies, or until
+ * someone wakes up Wakeup_Polling_Device_Channels,
+ * whichever comes first only do a wait when we have
+ * been idle for cycles_before_wait cycles.
+ */
+ if (idle_cycles > cycles_before_wait) {
+ Go_Polling_Device_Channels = 0;
+ tot_wait_cnt++;
+ wait_event_timeout(Wakeup_Polling_Device_Channels,
+ Go_Polling_Device_Channels,
+ POLLJIFFIES_NORMAL);
+ Go_Polling_Device_Channels = 1;
+ } else {
+ tot_schedule_cnt++;
+ schedule();
+ idle_cycles = idle_cycles + delta_cycles;
+ }
+ }
+ DBGINF("exiting.\n");
+ complete_and_exit(&Incoming_ThreadInfo.has_stopped, 0);
+}
+
+static BOOL
+Initialize_incoming_thread(void)
+{
+ if (Incoming_Thread_Started)
+ return TRUE;
+ if (!uisthread_start(&Incoming_ThreadInfo,
+ &Process_Incoming, NULL, "dev_incoming")) {
+ LOGERR("uisthread_start Initialize_incoming_thread ****FAILED");
+ return FALSE;
+ }
+ Incoming_Thread_Started = TRUE;
+ return TRUE;
+}
+
+/* Add a new device/channel to the list being processed by
+ * Process_Incoming().
+ * <interrupt> - indicates the function to call periodically.
+ * <interrupt_context> - indicates the data to pass to the <interrupt>
+ * function.
+ */
+void
+uislib_enable_channel_interrupts(U32 busNo, U32 devNo,
+ int (*interrupt)(void *),
+ void *interrupt_context)
+{
+ struct device_info *dev;
+ dev = find_dev(busNo, devNo);
+ if (!dev) {
+ LOGERR("%s busNo=%d, devNo=%d", __func__, (int) (busNo),
+ (int) (devNo));
+ return;
+ }
+ LOCKSEM_UNINTERRUPTIBLE(&Lock_Polling_Device_Channels);
+ Initialize_incoming_thread();
+ dev->interrupt = interrupt;
+ dev->interrupt_context = interrupt_context;
+ dev->polling = TRUE;
+ list_add_tail(&(dev->list_polling_device_channels),
+ &List_Polling_Device_Channels);
+ UNLOCKSEM(&Lock_Polling_Device_Channels);
+}
+EXPORT_SYMBOL_GPL(uislib_enable_channel_interrupts);
+
+/* Remove a device/channel from the list being processed by
+ * Process_Incoming().
+ */
+void
+uislib_disable_channel_interrupts(U32 busNo, U32 devNo)
+{
+ struct device_info *dev;
+ dev = find_dev(busNo, devNo);
+ if (!dev) {
+ LOGERR("%s busNo=%d, devNo=%d", __func__, (int) (busNo),
+ (int) (devNo));
+ return;
+ }
+ LOCKSEM_UNINTERRUPTIBLE(&Lock_Polling_Device_Channels);
+ list_del(&dev->list_polling_device_channels);
+ dev->polling = FALSE;
+ dev->interrupt = NULL;
+ UNLOCKSEM(&Lock_Polling_Device_Channels);
+}
+EXPORT_SYMBOL_GPL(uislib_disable_channel_interrupts);
+
+static void
+do_wakeup_polling_device_channels(struct work_struct *dummy)
+{
+ if (!Go_Polling_Device_Channels) {
+ Go_Polling_Device_Channels = 1;
+ wake_up(&Wakeup_Polling_Device_Channels);
+ }
+}
+
+static DECLARE_WORK(Work_wakeup_polling_device_channels,
+ do_wakeup_polling_device_channels);
+
+/* Call this function when you want to send a hint to Process_Incoming() that
+ * your device might have more requests.
+ */
+void
+uislib_force_channel_interrupt(U32 busNo, U32 devNo)
+{
+ if (en_smart_wakeup == 0)
+ return;
+ if (Go_Polling_Device_Channels)
+ return;
+ /* The point of using schedule_work() instead of just doing
+ * the work inline is to force a slight delay before waking up
+ * the Process_Incoming() thread.
+ */
+ tot_wakeup_cnt++;
+ schedule_work(&Work_wakeup_polling_device_channels);
+}
+EXPORT_SYMBOL_GPL(uislib_force_channel_interrupt);
+
+/*****************************************************/
+/* Module Init & Exit functions */
+/*****************************************************/
+
+static int __init
+uislib_mod_init(void)
+{
+
+ LOGINF("MONITORAPIS");
+
+ LOGINF("sizeof(struct uiscmdrsp):%lu bytes\n",
+ (ulong) sizeof(struct uiscmdrsp));
+ LOGINF("sizeof(struct phys_info):%lu\n",
+ (ulong) sizeof(struct phys_info));
+ LOGINF("sizeof(uiscmdrsp_scsi):%lu\n",
+ (ulong) sizeof(struct uiscmdrsp_scsi));
+ LOGINF("sizeof(uiscmdrsp_net):%lu\n",
+ (ulong) sizeof(struct uiscmdrsp_net));
+ LOGINF("sizeof(CONTROLVM_MESSAGE):%lu bytes\n",
+ (ulong) sizeof(CONTROLVM_MESSAGE));
+ LOGINF("sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL):%lu bytes\n",
+ (ulong) sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL));
+ LOGINF("sizeof(CHANNEL_HEADER):%lu bytes\n",
+ (ulong) sizeof(CHANNEL_HEADER));
+ LOGINF("sizeof(ULTRA_IO_CHANNEL_PROTOCOL):%lu bytes\n",
+ (ulong) sizeof(ULTRA_IO_CHANNEL_PROTOCOL));
+ LOGINF("SIZEOF_CMDRSP:%lu bytes\n", SIZEOF_CMDRSP);
+ LOGINF("SIZEOF_PROTOCOL:%lu bytes\n", SIZEOF_PROTOCOL);
+
+ /* initialize global pointers to NULL */
+ BusListHead = NULL;
+ BusListCount = MaxBusCount = 0;
+ rwlock_init(&BusListLock);
+ VirtControlChanFunc = NULL;
+
+ /* Issue VMCALL_GET_CONTROLVM_ADDR to get CtrlChanPhysAddr and
+ * then map this physical address to a virtual address. */
+ POSTCODE_LINUX_2(DRIVER_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ /* create the proc entries for the channels */
+ uislib_proc_dir = proc_mkdir(DIR_PROC_ENTRY, NULL);
+ /* (e.g., for /proc/uislib/vbus/<x>/info) */
+ uislib_proc_vbus_dir = proc_mkdir(DIR_VBUS_PROC_ENTRY, uislib_proc_dir);
+
+ vnic_proc_entry = proc_create(VNIC_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_vnic_fops);
+ SET_PROC_OWNER(vnic_proc_entry, THIS_MODULE);
+
+ /* for testing purposes only, create the proc entries for
+ * enqueuing Control Channel messages */
+ chipset_proc_entry =
+ proc_create(CHIPSET_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_chipset_fops);
+ SET_PROC_OWNER(chipset_proc_entry, THIS_MODULE);
+
+ info_proc_entry = proc_create(INFO_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_info_fops);
+ SET_PROC_OWNER(info_proc_entry, THIS_MODULE);
+
+ platformnumber_proc_entry =
+ proc_create(PLATFORMNUMBER_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_platformnumber_fops);
+ SET_PROC_OWNER(platformnumberinfo_proc_entry, THIS_MODULE);
+
+ cycles_before_wait_proc_entry =
+ proc_create(CYCLES_BEFORE_WAIT_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_cycles_before_wait_fops);
+ SET_PROC_OWNER(cycles_before_wait_proc_entry, THIS_MODULE);
+
+ reset_counts_proc_entry =
+ proc_create(RESET_COUNTS_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_reset_counts_fops);
+ SET_PROC_OWNER(reset_counts_proc_entry, THIS_MODULE);
+
+ smart_wakeup_proc_entry =
+ proc_create(SMART_WAKEUP_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_smart_wakeup_fops);
+ SET_PROC_OWNER(smart_wakeup_proc_entry, THIS_MODULE);
+
+#ifdef UISLIB_TEST_PROC
+ test_proc_entry = proc_create(TEST_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_test_fops);
+ SET_PROC_OWNER(test_proc_entry, THIS_MODULE);
+
+ bus_proc_entry = proc_create(BUS_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_bus_fops);
+ SET_PROC_OWNER(bus_proc_entry, THIS_MODULE);
+
+ dev_proc_entry = proc_create(DEV_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_dev_fops);
+ SET_PROC_OWNER(dev_proc_entry, THIS_MODULE);
+#endif /* UISLIB_TEST_PROC */
+ POSTCODE_LINUX_3(DRIVER_EXIT_PC, 0, POSTCODE_SEVERITY_INFO);
+ return 0;
+}
+
+static void __exit
+uislib_mod_exit(void)
+{
+ if (disable_proc_entry)
+ remove_proc_entry(DISABLE_PROC_ENTRY_FN, uislib_proc_dir);
+ if (cycles_before_wait_proc_entry)
+ remove_proc_entry(CYCLES_BEFORE_WAIT_PROC_ENTRY_FN,
+ uislib_proc_dir);
+ if (reset_counts_proc_entry)
+ remove_proc_entry(RESET_COUNTS_PROC_ENTRY_FN, uislib_proc_dir);
+ if (smart_wakeup_proc_entry)
+ remove_proc_entry(SMART_WAKEUP_PROC_ENTRY_FN, uislib_proc_dir);
+ if (ctrlchan_proc_entry)
+ remove_proc_entry(CTRLCHAN_PROC_ENTRY_FN, uislib_proc_dir);
+ if (pmem_proc_entry)
+ remove_proc_entry(PMEM_PROC_ENTRY_FN, uislib_proc_dir);
+ if (info_proc_entry)
+ remove_proc_entry(INFO_PROC_ENTRY_FN, uislib_proc_dir);
+ if (switch_proc_entry)
+ remove_proc_entry(SWITCH_PROC_ENTRY_FN, uislib_proc_dir);
+ if (extport_proc_entry)
+ remove_proc_entry(EXTPORT_PROC_ENTRY_FN, uislib_proc_dir);
+ if (platformnumber_proc_entry)
+ remove_proc_entry(PLATFORMNUMBER_PROC_ENTRY_FN,
+ uislib_proc_dir);
+ if (bus_proc_entry)
+ remove_proc_entry(BUS_PROC_ENTRY_FN, uislib_proc_dir);
+ if (dev_proc_entry)
+ remove_proc_entry(DEV_PROC_ENTRY_FN, uislib_proc_dir);
+ if (vnic_proc_entry)
+ remove_proc_entry(VNIC_PROC_ENTRY_FN, uislib_proc_dir);
+ if (chipset_proc_entry)
+ remove_proc_entry(CHIPSET_PROC_ENTRY_FN, uislib_proc_dir);
+ if (uislib_proc_vbus_dir)
+ remove_proc_entry(DIR_VBUS_PROC_ENTRY, uislib_proc_dir);
+ if (uislib_proc_dir)
+ remove_proc_entry(DIR_PROC_ENTRY, NULL);
+
+ if (ProcReadBuffer) {
+ vfree(ProcReadBuffer);
+ ProcReadBuffer = NULL;
+ }
+
+ DBGINF("goodbye.\n");
+ return;
+}
+
+module_init(uislib_mod_init);
+module_exit(uislib_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Usha Srinivasan");
+MODULE_ALIAS("uislib");
+ /* this is extracted during depmod and kept in modules.dep */
diff --git a/drivers/staging/unisys/uislib/uisqueue.c b/drivers/staging/unisys/uislib/uisqueue.c
new file mode 100644
index 000000000000..40598ff1f4f2
--- /dev/null
+++ b/drivers/staging/unisys/uislib/uisqueue.c
@@ -0,0 +1,160 @@
+/* uisqueue.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* @ALL_INSPECTED */
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "uisutils.h"
+
+#include "chanstub.h"
+
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages */
+#define CURRENT_FILE_PC UISLIB_PC_uisqueue_c
+#define __MYFILE__ "uisqueue.c"
+
+#define CHECK_CACHE_ALIGN 0
+
+/*****************************************************/
+/* Exported functions */
+/*****************************************************/
+unsigned long long
+uisqueue_InterlockedOr(unsigned long long __iomem *Target,
+ unsigned long long Set)
+{
+ unsigned long long i;
+ unsigned long long j;
+
+ j = readq(Target);
+ do {
+ i = j;
+ j = uislibcmpxchg64((__force unsigned long long *)Target,
+ i, i | Set, sizeof(*(Target)));
+
+ } while (i != j);
+
+ return j;
+}
+EXPORT_SYMBOL_GPL(uisqueue_InterlockedOr);
+
+unsigned long long
+uisqueue_InterlockedAnd(unsigned long long __iomem *Target,
+ unsigned long long Set)
+{
+ unsigned long long i;
+ unsigned long long j;
+
+ j = readq(Target);
+ do {
+ i = j;
+ j = uislibcmpxchg64((__force unsigned long long *)Target,
+ i, i & Set, sizeof(*(Target)));
+
+ } while (i != j);
+
+ return j;
+}
+EXPORT_SYMBOL_GPL(uisqueue_InterlockedAnd);
+
+static U8
+do_locked_client_insert(struct uisqueue_info *queueinfo,
+ unsigned int whichqueue,
+ void *pSignal,
+ spinlock_t *lock,
+ unsigned char issueInterruptIfEmpty,
+ U64 interruptHandle, U8 *channelId)
+{
+ unsigned long flags;
+ unsigned char queueWasEmpty;
+ unsigned int locked = 0;
+ unsigned int acquired = 0;
+ U8 rc = 0;
+
+ spin_lock_irqsave(lock, flags);
+ locked = 1;
+
+ if (!ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(queueinfo->chan, channelId, NULL))
+ goto Away;
+
+ acquired = 1;
+
+ queueWasEmpty = visor_signalqueue_empty(queueinfo->chan, whichqueue);
+ if (!visor_signal_insert(queueinfo->chan, whichqueue, pSignal))
+ goto Away;
+ ULTRA_CHANNEL_CLIENT_RELEASE_OS(queueinfo->chan, channelId, NULL);
+ acquired = 0;
+ spin_unlock_irqrestore(lock, flags);
+ locked = 0;
+
+ queueinfo->packets_sent++;
+
+ rc = 1;
+Away:
+ if (acquired) {
+ ULTRA_CHANNEL_CLIENT_RELEASE_OS(queueinfo->chan, channelId,
+ NULL);
+ acquired = 0;
+ }
+ if (locked) {
+ spin_unlock_irqrestore((spinlock_t *) lock, flags);
+ locked = 0;
+ }
+
+ return rc;
+}
+
+int
+uisqueue_put_cmdrsp_with_lock_client(struct uisqueue_info *queueinfo,
+ struct uiscmdrsp *cmdrsp,
+ unsigned int whichqueue,
+ void *insertlock,
+ unsigned char issueInterruptIfEmpty,
+ U64 interruptHandle,
+ char oktowait, U8 *channelId)
+{
+ while (!do_locked_client_insert(queueinfo, whichqueue, cmdrsp,
+ (spinlock_t *) insertlock,
+ issueInterruptIfEmpty,
+ interruptHandle, channelId)) {
+ if (oktowait != OK_TO_WAIT) {
+ LOGERR("****FAILED visor_signal_insert failed; cannot wait; insert aborted\n");
+ return 0; /* failed to queue */
+ }
+ /* try again */
+ LOGERR("****FAILED visor_signal_insert failed; waiting to try again\n");
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(10));
+ }
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uisqueue_put_cmdrsp_with_lock_client);
+
+/* uisqueue_get_cmdrsp gets the cmdrsp entry at the head of the queue
+ * returns NULL if queue is empty */
+int
+uisqueue_get_cmdrsp(struct uisqueue_info *queueinfo,
+ void *cmdrsp, unsigned int whichqueue)
+{
+ if (!visor_signal_remove(queueinfo->chan, whichqueue, cmdrsp))
+ return 0;
+
+ queueinfo->packets_received++;
+
+ return 1; /* Success */
+}
+EXPORT_SYMBOL_GPL(uisqueue_get_cmdrsp);
diff --git a/drivers/staging/unisys/uislib/uisthread.c b/drivers/staging/unisys/uislib/uisthread.c
new file mode 100644
index 000000000000..782b06aad56d
--- /dev/null
+++ b/drivers/staging/unisys/uislib/uisthread.c
@@ -0,0 +1,85 @@
+/* uisthread.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* @ALL_INSPECTED */
+#include <asm/processor.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include "uniklog.h"
+#include "uisutils.h"
+#include "uisthread.h"
+
+#define KILL(a, b, c) kill_pid(find_vpid(a), b, c)
+
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC UISLIB_PC_uisthread_c
+#define __MYFILE__ "uisthread.c"
+
+/*****************************************************/
+/* Exported functions */
+/*****************************************************/
+
+/* returns 0 for failure, 1 for success */
+int
+uisthread_start(struct uisthread_info *thrinfo,
+ int (*threadfn)(void *), void *thrcontext, char *name)
+{
+ thrinfo->should_stop = 0;
+ /* used to stop the thread */
+ init_completion(&thrinfo->has_stopped);
+ thrinfo->task = kthread_create(threadfn, thrcontext, name, NULL);
+ if (IS_ERR(thrinfo->task)) {
+ thrinfo->id = 0;
+ return 0; /* failure */
+ }
+ thrinfo->id = thrinfo->task->pid;
+ wake_up_process(thrinfo->task);
+ LOGINF("started thread pid:%d\n", thrinfo->id);
+ return 1;
+
+}
+EXPORT_SYMBOL_GPL(uisthread_start);
+
+void
+uisthread_stop(struct uisthread_info *thrinfo)
+{
+ int ret;
+ int stopped = 0;
+ if (thrinfo->id == 0)
+ return; /* thread not running */
+
+ LOGINF("uisthread_stop stopping id:%d\n", thrinfo->id);
+ thrinfo->should_stop = 1;
+ ret = KILL(thrinfo->id, SIGHUP, 1);
+ if (ret) {
+ LOGERR("unable to signal thread %d\n", ret);
+ } else {
+ /* give up if the thread has NOT died in 1 minute */
+ if (wait_for_completion_timeout(&thrinfo->has_stopped, 60 * HZ))
+ stopped = 1;
+ else
+ LOGERR("timed out trying to signal thread\n");
+ }
+ if (stopped) {
+ LOGINF("uisthread_stop stopped id:%d\n", thrinfo->id);
+ thrinfo->id = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(uisthread_stop);
diff --git a/drivers/staging/unisys/uislib/uisutils.c b/drivers/staging/unisys/uislib/uisutils.c
new file mode 100644
index 000000000000..3178f75e1ebe
--- /dev/null
+++ b/drivers/staging/unisys/uislib/uisutils.c
@@ -0,0 +1,350 @@
+/* uisutils.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <commontypes.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include "uniklog.h"
+#include "uisutils.h"
+#include "version.h"
+#include "vbushelper.h"
+#include "guidutils.h"
+#include <linux/skbuff.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/highmem.h>
+#endif
+
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC UISLIB_PC_uisutils_c
+#define __MYFILE__ "uisutils.c"
+
+/* exports */
+atomic_t UisUtils_Registered_Services = ATOMIC_INIT(0);
+ /* num registrations via
+ * uisctrl_register_req_handler() or
+ * uisctrl_register_req_handler_ex() */
+
+
+/*****************************************************/
+/* Utility functions */
+/*****************************************************/
+
+int
+uisutil_add_proc_line_ex(int *total, char **buffer, int *buffer_remaining,
+ char *format, ...)
+{
+ va_list args;
+ int len;
+
+ DBGINF("buffer = 0x%p : *buffer = 0x%p.\n", buffer, *buffer);
+ va_start(args, format);
+ len = vsnprintf(*buffer, *buffer_remaining, format, args);
+ if (len >= *buffer_remaining) {
+ *buffer += *buffer_remaining;
+ *total += *buffer_remaining;
+ *buffer_remaining = 0;
+ LOGERR("bytes remaining is too small!\n");
+ return -1;
+ }
+ *buffer_remaining -= len;
+ *buffer += len;
+ *total += len;
+ return len;
+}
+EXPORT_SYMBOL_GPL(uisutil_add_proc_line_ex);
+
+int
+uisctrl_register_req_handler(int type, void *fptr,
+ ULTRA_VBUS_DEVICEINFO *chipset_DriverInfo)
+{
+ LOGINF("type = %d, fptr = 0x%p.\n", type, fptr);
+
+ switch (type) {
+ case 2:
+ if (fptr) {
+ if (!VirtControlChanFunc)
+ atomic_inc(&UisUtils_Registered_Services);
+ VirtControlChanFunc = fptr;
+ } else {
+ if (VirtControlChanFunc)
+ atomic_dec(&UisUtils_Registered_Services);
+ VirtControlChanFunc = NULL;
+ }
+ break;
+
+ default:
+ LOGERR("invalid type %d.\n", type);
+ return 0;
+ }
+ if (chipset_DriverInfo)
+ BusDeviceInfo_Init(chipset_DriverInfo,
+ "chipset", "uislib",
+ VERSION, NULL, __DATE__, __TIME__);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uisctrl_register_req_handler);
+
+int
+uisctrl_register_req_handler_ex(GUID switchTypeGuid,
+ const char *switch_type_name,
+ int (*controlfunc)(struct io_msgs *),
+ unsigned long min_channel_bytes,
+ int (*Server_Channel_Ok)(unsigned long
+ channelBytes),
+ int (*Server_Channel_Init)
+ (void *x, unsigned char *clientStr,
+ U32 clientStrLen, U64 bytes),
+ ULTRA_VBUS_DEVICEINFO *chipset_DriverInfo)
+{
+ char s[99];
+ ReqHandlerInfo_t *pReqHandlerInfo;
+ int rc = 0; /* assume failure */
+ LOGINF("type=%s, controlfunc=0x%p.\n",
+ GUID_format1(&switchTypeGuid, s), controlfunc);
+ if (!controlfunc) {
+ LOGERR("%s: controlfunc must be supplied\n",
+ GUID_format1(&switchTypeGuid, s));
+ goto Away;
+ }
+ if (!Server_Channel_Ok) {
+ LOGERR("%s: Server_Channel_Ok must be supplied\n",
+ GUID_format1(&switchTypeGuid, s));
+ goto Away;
+ }
+ if (!Server_Channel_Init) {
+ LOGERR("%s: Server_Channel_Init must be supplied\n",
+ GUID_format1(&switchTypeGuid, s));
+ goto Away;
+ }
+ pReqHandlerInfo = ReqHandlerAdd(switchTypeGuid,
+ switch_type_name,
+ controlfunc,
+ min_channel_bytes,
+ Server_Channel_Ok, Server_Channel_Init);
+ if (!pReqHandlerInfo) {
+ LOGERR("failed to add %s to server list\n",
+ GUID_format1(&switchTypeGuid, s));
+ goto Away;
+ }
+
+ atomic_inc(&UisUtils_Registered_Services);
+ rc = 1; /* success */
+Away:
+ if (rc) {
+ if (chipset_DriverInfo)
+ BusDeviceInfo_Init(chipset_DriverInfo,
+ "chipset", "uislib",
+ VERSION, NULL,
+ __DATE__, __TIME__);
+ } else
+ LOGERR("failed to register type %s.\n",
+ GUID_format1(&switchTypeGuid, s));
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(uisctrl_register_req_handler_ex);
+
+int
+uisctrl_unregister_req_handler_ex(GUID switchTypeGuid)
+{
+ char s[99];
+ int rc = 0; /* assume failure */
+ LOGINF("type=%s.\n", GUID_format1(&switchTypeGuid, s));
+ if (ReqHandlerDel(switchTypeGuid) < 0) {
+ LOGERR("failed to remove %s from server list\n",
+ GUID_format1(&switchTypeGuid, s));
+ goto Away;
+ }
+ atomic_dec(&UisUtils_Registered_Services);
+ rc = 1; /* success */
+Away:
+ if (!rc)
+ LOGERR("failed to unregister type %s.\n",
+ GUID_format1(&switchTypeGuid, s));
+ return rc;
+}
+EXPORT_SYMBOL_GPL(uisctrl_unregister_req_handler_ex);
+
+/*
+ * unsigned int uisutil_copy_fragsinfo_from_skb(unsigned char *calling_ctx,
+ * void *skb_in,
+ * unsigned int firstfraglen,
+ * unsigned int frags_max,
+ * struct phys_info frags[])
+ *
+ * calling_ctx - input - a string that is displayed to show
+ * who called * this func
+ * void *skb_in - skb whose frag info we're copying type is hidden so we
+ * don't need to include skbbuff in uisutils.h which is
+ * included in non-networking code.
+ * unsigned int firstfraglen - input - length of first fragment in skb
+ * unsigned int frags_max - input - max len of frags array
+ * struct phys_info frags[] - output - frags array filled in on output
+ * return value indicates number of
+ * entries filled in frags
+ */
+unsigned int
+uisutil_copy_fragsinfo_from_skb(unsigned char *calling_ctx, void *skb_in,
+ unsigned int firstfraglen,
+ unsigned int frags_max,
+ struct phys_info frags[])
+{
+ unsigned int count = 0, ii, size, offset = 0, numfrags;
+ struct sk_buff *skb = skb_in;
+
+ numfrags = skb_shinfo(skb)->nr_frags;
+
+ while (firstfraglen) {
+ if (count == frags_max) {
+ LOGERR("%s frags array too small: max:%d count:%d\n",
+ calling_ctx, frags_max, count);
+ return -1; /* failure */
+ }
+ frags[count].pi_pfn =
+ page_to_pfn(virt_to_page(skb->data + offset));
+ frags[count].pi_off =
+ (unsigned long) (skb->data + offset) & PI_PAGE_MASK;
+ size =
+ min(firstfraglen,
+ (unsigned int) (PI_PAGE_SIZE - frags[count].pi_off));
+ /* can take smallest of firstfraglen(what's left) OR
+ * bytes left in the page
+ */
+ frags[count].pi_len = size;
+ firstfraglen -= size;
+ offset += size;
+ count++;
+ }
+ if (numfrags) {
+ if ((count + numfrags) > frags_max) {
+ LOGERR("**** FAILED %s frags array too small: max:%d count+nr_frags:%d\n",
+ calling_ctx, frags_max, count + numfrags);
+ return -1; /* failure */
+ }
+
+ for (ii = 0; ii < numfrags; ii++) {
+ count = add_physinfo_entries(page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[ii])), /* pfn */
+ skb_shinfo(skb)->frags[ii].
+ page_offset,
+ skb_shinfo(skb)->frags[ii].
+ size, count, frags_max,
+ frags);
+ if (count == 0) {
+ LOGERR("**** FAILED to add physinfo entries\n");
+ return -1; /* failure */
+ }
+ }
+ }
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *skbinlist;
+ int c;
+ for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
+ skbinlist = skbinlist->next) {
+
+ c = uisutil_copy_fragsinfo_from_skb("recursive",
+ skbinlist,
+ skbinlist->len -
+ skbinlist->data_len,
+ frags_max - count,
+ &frags[count]);
+ if (c == -1) {
+ LOGERR("**** FAILED recursive call failed\n");
+ return -1;
+ }
+ count += c;
+ }
+ }
+ return count;
+}
+EXPORT_SYMBOL_GPL(uisutil_copy_fragsinfo_from_skb);
+
+static LIST_HEAD(ReqHandlerInfo_list); /* list of ReqHandlerInfo_t */
+static DEFINE_SPINLOCK(ReqHandlerInfo_list_lock);
+
+ReqHandlerInfo_t *
+ReqHandlerAdd(GUID switchTypeGuid,
+ const char *switch_type_name,
+ int (*controlfunc)(struct io_msgs *),
+ unsigned long min_channel_bytes,
+ int (*Server_Channel_Ok)(unsigned long channelBytes),
+ int (*Server_Channel_Init)
+ (void *x, unsigned char *clientStr, U32 clientStrLen, U64 bytes))
+{
+ ReqHandlerInfo_t *rc = NULL;
+
+ rc = kzalloc(sizeof(*rc), GFP_ATOMIC);
+ if (!rc)
+ return NULL;
+ rc->switchTypeGuid = switchTypeGuid;
+ rc->controlfunc = controlfunc;
+ rc->min_channel_bytes = min_channel_bytes;
+ rc->Server_Channel_Ok = Server_Channel_Ok;
+ rc->Server_Channel_Init = Server_Channel_Init;
+ if (switch_type_name)
+ strncpy(rc->switch_type_name, switch_type_name,
+ sizeof(rc->switch_type_name) - 1);
+ spin_lock(&ReqHandlerInfo_list_lock);
+ list_add_tail(&(rc->list_link), &ReqHandlerInfo_list);
+ spin_unlock(&ReqHandlerInfo_list_lock);
+
+ return rc;
+}
+
+ReqHandlerInfo_t *
+ReqHandlerFind(GUID switchTypeGuid)
+{
+ struct list_head *lelt, *tmp;
+ ReqHandlerInfo_t *entry = NULL;
+ spin_lock(&ReqHandlerInfo_list_lock);
+ list_for_each_safe(lelt, tmp, &ReqHandlerInfo_list) {
+ entry = list_entry(lelt, ReqHandlerInfo_t, list_link);
+ if (memcmp
+ (&entry->switchTypeGuid, &switchTypeGuid,
+ sizeof(GUID)) == 0) {
+ spin_unlock(&ReqHandlerInfo_list_lock);
+ return entry;
+ }
+ }
+ spin_unlock(&ReqHandlerInfo_list_lock);
+ return NULL;
+}
+
+int
+ReqHandlerDel(GUID switchTypeGuid)
+{
+ struct list_head *lelt, *tmp;
+ ReqHandlerInfo_t *entry = NULL;
+ int rc = -1;
+ spin_lock(&ReqHandlerInfo_list_lock);
+ list_for_each_safe(lelt, tmp, &ReqHandlerInfo_list) {
+ entry = list_entry(lelt, ReqHandlerInfo_t, list_link);
+ if (memcmp
+ (&entry->switchTypeGuid, &switchTypeGuid,
+ sizeof(GUID)) == 0) {
+ list_del(lelt);
+ kfree(entry);
+ rc++;
+ }
+ }
+ spin_unlock(&ReqHandlerInfo_list_lock);
+ return rc;
+}
diff --git a/drivers/staging/unisys/virthba/Kconfig b/drivers/staging/unisys/virthba/Kconfig
new file mode 100644
index 000000000000..c0d7986e78cb
--- /dev/null
+++ b/drivers/staging/unisys/virthba/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys virthba configuration
+#
+
+config UNISYS_VIRTHBA
+ tristate "Unisys virthba driver"
+ depends on UNISYSSPAR && UNISYS_VISORCHIPSET && UNISYS_CHANNELSTUB && UNISYS_UISLIB && UNISYS_VIRTPCI && SCSI
+ ---help---
+ If you say Y here, you will enable the Unisys virthba driver.
+
diff --git a/drivers/staging/unisys/virthba/Makefile b/drivers/staging/unisys/virthba/Makefile
new file mode 100644
index 000000000000..632b1c08b975
--- /dev/null
+++ b/drivers/staging/unisys/virthba/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for Unisys virthba
+#
+
+obj-$(CONFIG_UNISYS_VIRTHBA) += virthba.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/uislib
+ccflags-y += -Idrivers/staging/unisys/timskmod
+ccflags-y += -Idrivers/staging/unisys/visorchipset
+ccflags-y += -Idrivers/staging/unisys/virtpci
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
+
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
+
diff --git a/drivers/staging/unisys/virthba/virthba.c b/drivers/staging/unisys/virthba/virthba.c
new file mode 100644
index 000000000000..817b11dfa19c
--- /dev/null
+++ b/drivers/staging/unisys/virthba/virthba.c
@@ -0,0 +1,1823 @@
+/* virthba.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#define EXPORT_SYMTAB
+
+/* if you want to turn on some debugging of write device data or read
+ * device data, define these two undefs. You will probably want to
+ * customize the code which is here since it was written assuming
+ * reading and writing a specific data file df.64M.txt which is a
+ * 64Megabyte file created by Art Nilson using a scritp I wrote called
+ * cr_test_data.pl. The data file consists of 256 byte lines of text
+ * which start with an 8 digit sequence number, a colon, and then
+ * letters after that */
+
+#undef DBGINF
+
+#include <linux/kernel.h>
+#ifdef CONFIG_MODVERSIONS
+#include <config/modversions.h>
+#endif
+
+#include "uniklog.h"
+#include "diagnostics/appos_subsystems.h"
+#include "uisutils.h"
+#include "uisqueue.h"
+#include "uisthread.h"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <asm/param.h>
+#include <linux/proc_fs.h>
+#include <linux/types.h>
+
+#include "virthba.h"
+#include "virtpci.h"
+#include "visorchipset.h"
+#include "version.h"
+#include "guestlinuxdebug.h"
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC VIRT_HBA_PC_virthba_c
+#define __MYFILE__ "virthba.c"
+
+/* NOTE: L1_CACHE_BYTES >=128 */
+#define DEVICE_ATTRIBUTE struct device_attribute
+
+/*****************************************************/
+/* Forward declarations */
+/*****************************************************/
+static int virthba_probe(struct virtpci_dev *dev,
+ const struct pci_device_id *id);
+static void virthba_remove(struct virtpci_dev *dev);
+static int virthba_abort_handler(struct scsi_cmnd *scsicmd);
+static int virthba_bus_reset_handler(struct scsi_cmnd *scsicmd);
+static int virthba_device_reset_handler(struct scsi_cmnd *scsicmd);
+static int virthba_host_reset_handler(struct scsi_cmnd *scsicmd);
+static const char *virthba_get_info(struct Scsi_Host *shp);
+static int virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+static int virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
+ void (*virthba_cmnd_done)(struct scsi_cmnd *));
+
+#ifdef DEF_SCSI_QCMD
+DEF_SCSI_QCMD(virthba_queue_command)
+#else
+#define virthba_queue_command virthba_queue_command_lck
+#endif
+
+
+static int virthba_slave_alloc(struct scsi_device *scsidev);
+static int virthba_slave_configure(struct scsi_device *scsidev);
+static void virthba_slave_destroy(struct scsi_device *scsidev);
+static int process_incoming_rsps(void *);
+static int virthba_serverup(struct virtpci_dev *virtpcidev);
+static int virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state);
+static void doDiskAddRemove(struct work_struct *work);
+static void virthba_serverdown_complete(struct work_struct *work);
+
+static ssize_t info_proc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static ssize_t rqwu_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+static ssize_t enable_ints_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos);
+static ssize_t enable_ints_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+
+/*****************************************************/
+/* Globals */
+/*****************************************************/
+
+static int rsltq_wait_usecs = 4000; /* Default 4ms */
+static unsigned int MaxBuffLen;
+
+/* Module options */
+static char *virthba_options = "NONE";
+
+static const struct pci_device_id virthba_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_VIRTHBA)},
+ {0},
+};
+
+/* export virthba_id_table */
+MODULE_DEVICE_TABLE(pci, virthba_id_table);
+
+static struct workqueue_struct *virthba_serverdown_workqueue;
+
+static struct virtpci_driver virthba_driver = {
+ .name = "uisvirthba",
+ .version = VERSION,
+ .vertag = NULL,
+ .build_date = __DATE__,
+ .build_time = __TIME__,
+ .id_table = virthba_id_table,
+ .probe = virthba_probe,
+ .remove = virthba_remove,
+ .resume = virthba_serverup,
+ .suspend = virthba_serverdown
+};
+
+/* The Send and Recive Buffers of the IO Queue may both be full */
+#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS*2)
+#define INTERRUPT_VECTOR_MASK 0x3F
+
+struct scsipending {
+ char cmdtype; /* Type of pointer that is being stored */
+ void *sent; /* The Data being tracked */
+ /* struct scsi_cmnd *type for virthba_queue_command */
+ /* struct uiscmdrsp *type for management commands */
+};
+
+#define VIRTHBA_ERROR_COUNT 30
+#define IOS_ERROR_THRESHOLD 1000
+struct virtdisk_info {
+ U32 valid;
+ U32 channel, id, lun; /* Disk Path */
+ atomic_t ios_threshold;
+ atomic_t error_count;
+ struct virtdisk_info *next;
+};
+/* Each Scsi_Host has a host_data area that contains this struct. */
+struct virthba_info {
+ struct Scsi_Host *scsihost;
+ struct virtpci_dev *virtpcidev;
+ struct list_head dev_info_list;
+ struct chaninfo chinfo;
+ struct InterruptInfo intr; /* use recvInterrupt info to receive
+ interrupts when IOs complete */
+ int interrupt_vector;
+ struct scsipending pending[MAX_PENDING_REQUESTS]; /* Tracks the requests
+ that have been */
+ /* forwarded to the IOVM and haven't returned yet */
+ unsigned int nextinsert; /* Start search for next pending
+ free slot here */
+ spinlock_t privlock;
+ bool serverdown;
+ bool serverchangingstate;
+ unsigned long long acquire_failed_cnt;
+ unsigned long long interrupts_rcvd;
+ unsigned long long interrupts_notme;
+ unsigned long long interrupts_disabled;
+ struct work_struct serverdown_completion;
+ U64 __iomem *flags_addr;
+ atomic_t interrupt_rcvd;
+ wait_queue_head_t rsp_queue;
+ struct virtdisk_info head;
+};
+
+/* Work Data for DARWorkQ */
+struct diskaddremove {
+ U8 add; /* 0-remove, 1-add */
+ struct Scsi_Host *shost; /* Scsi Host for this virthba instance */
+ U32 channel, id, lun; /* Disk Path */
+ struct diskaddremove *next;
+};
+
+#define virtpci_dev_to_virthba_virthba_get_info(d) \
+ container_of(d, struct virthba_info, virtpcidev)
+
+static DEVICE_ATTRIBUTE *virthba_shost_attrs[];
+static struct scsi_host_template virthba_driver_template = {
+ .name = "Unisys Virtual HBA",
+ .proc_name = "uisvirthba",
+ .info = virthba_get_info,
+ .ioctl = virthba_ioctl,
+ .queuecommand = virthba_queue_command,
+ .eh_abort_handler = virthba_abort_handler,
+ .eh_device_reset_handler = virthba_device_reset_handler,
+ .eh_bus_reset_handler = virthba_bus_reset_handler,
+ .eh_host_reset_handler = virthba_host_reset_handler,
+ .shost_attrs = virthba_shost_attrs,
+
+#define VIRTHBA_MAX_CMNDS 128
+ .can_queue = VIRTHBA_MAX_CMNDS,
+ .sg_tablesize = 64, /* largest number of address/length pairs */
+ .this_id = -1,
+ .slave_alloc = virthba_slave_alloc,
+ .slave_configure = virthba_slave_configure,
+ .slave_destroy = virthba_slave_destroy,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+struct virthba_devices_open {
+ struct virthba_info *virthbainfo;
+};
+
+static const struct file_operations proc_info_fops = {
+ .read = info_proc_read,
+};
+
+static const struct file_operations proc_rqwu_fops = {
+ .write = rqwu_proc_write,
+};
+
+static const struct file_operations proc_enable_ints_fops = {
+ .read = enable_ints_read,
+ .write = enable_ints_write,
+};
+
+
+#define VIRTHBASOPENMAX 1
+/* array of open devices maintained by open() and close(); */
+static struct virthba_devices_open VirtHbasOpen[VIRTHBASOPENMAX];
+static struct proc_dir_entry *virthba_proc_dir;
+static struct proc_dir_entry *info_proc_entry;
+static struct proc_dir_entry *rqwaitus_proc_entry;
+static struct proc_dir_entry *enable_ints_proc_entry;
+#define INFO_PROC_ENTRY_FN "info"
+#define ENABLE_INTS_ENTRY_FN "enable_ints"
+#define RQWU_PROC_ENTRY_FN "rqwait_usecs"
+#define DIR_PROC_ENTRY "virthba"
+
+/*****************************************************/
+/* Local Functions */
+/*****************************************************/
+static int
+add_scsipending_entry(struct virthba_info *vhbainfo, char cmdtype, void *new)
+{
+ unsigned long flags;
+ int insert_location;
+
+ spin_lock_irqsave(&vhbainfo->privlock, flags);
+ insert_location = vhbainfo->nextinsert;
+ while (vhbainfo->pending[insert_location].sent != NULL) {
+ insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
+ if (insert_location == (int) vhbainfo->nextinsert) {
+ LOGERR("Queue should be full. insert_location<<%d>> Unable to find open slot for pending commands.\n",
+ insert_location);
+ spin_unlock_irqrestore(&vhbainfo->privlock, flags);
+ return -1;
+ }
+ }
+
+ vhbainfo->pending[insert_location].cmdtype = cmdtype;
+ vhbainfo->pending[insert_location].sent = new;
+ vhbainfo->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
+ spin_unlock_irqrestore(&vhbainfo->privlock, flags);
+
+ return insert_location;
+}
+
+static unsigned int
+add_scsipending_entry_with_wait(struct virthba_info *vhbainfo, char cmdtype,
+ void *new)
+{
+ int insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
+
+ while (insert_location == -1) {
+ LOGERR("Failed to find empty queue slot. Waiting to try again\n");
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(10));
+ insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
+ }
+
+ return (unsigned int) insert_location;
+}
+
+static void *
+del_scsipending_entry(struct virthba_info *vhbainfo, uintptr_t del)
+{
+ unsigned long flags;
+ void *sent = NULL;
+
+ if (del >= MAX_PENDING_REQUESTS) {
+ LOGERR("Invalid queue position <<%lu>> given to delete. MAX_PENDING_REQUESTS <<%d>>\n",
+ (unsigned long) del, MAX_PENDING_REQUESTS);
+ } else {
+ spin_lock_irqsave(&vhbainfo->privlock, flags);
+
+ if (vhbainfo->pending[del].sent == NULL)
+ LOGERR("Deleting already cleared queue entry at <<%lu>>.\n",
+ (unsigned long) del);
+
+ sent = vhbainfo->pending[del].sent;
+
+ vhbainfo->pending[del].cmdtype = 0;
+ vhbainfo->pending[del].sent = NULL;
+ spin_unlock_irqrestore(&vhbainfo->privlock, flags);
+ }
+
+ return sent;
+}
+
+/* DARWorkQ (Disk Add/Remove) */
+static struct work_struct DARWorkQ;
+static struct diskaddremove *DARWorkQHead;
+static spinlock_t DARWorkQLock;
+static unsigned short DARWorkQSched;
+#define QUEUE_DISKADDREMOVE(dar) { \
+ spin_lock_irqsave(&DARWorkQLock, flags); \
+ if (!DARWorkQHead) { \
+ DARWorkQHead = dar; \
+ dar->next = NULL; \
+ } \
+ else { \
+ dar->next = DARWorkQHead; \
+ DARWorkQHead = dar; \
+ } \
+ if (!DARWorkQSched) { \
+ schedule_work(&DARWorkQ); \
+ DARWorkQSched = 1; \
+ } \
+ spin_unlock_irqrestore(&DARWorkQLock, flags); \
+}
+
+static inline void
+SendDiskAddRemove(struct diskaddremove *dar)
+{
+ struct scsi_device *sdev;
+ int error;
+
+ sdev = scsi_device_lookup(dar->shost, dar->channel, dar->id, dar->lun);
+ if (sdev) {
+ if (!(dar->add))
+ scsi_remove_device(sdev);
+ } else if (dar->add) {
+ error =
+ scsi_add_device(dar->shost, dar->channel, dar->id,
+ dar->lun);
+ if (error)
+ LOGERR("Failed scsi_add_device: host_no=%d[chan=%d:id=%d:lun=%d]\n",
+ dar->shost->host_no, dar->channel, dar->id,
+ dar->lun);
+ } else
+ LOGERR("Failed scsi_device_lookup:[chan=%d:id=%d:lun=%d]\n",
+ dar->channel, dar->id, dar->lun);
+ kfree(dar);
+}
+
+/*****************************************************/
+/* DARWorkQ Handler Thread */
+/*****************************************************/
+static void
+doDiskAddRemove(struct work_struct *work)
+{
+ struct diskaddremove *dar;
+ struct diskaddremove *tmphead;
+ int i = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&DARWorkQLock, flags);
+ tmphead = DARWorkQHead;
+ DARWorkQHead = NULL;
+ DARWorkQSched = 0;
+ spin_unlock_irqrestore(&DARWorkQLock, flags);
+ while (tmphead) {
+ dar = tmphead;
+ tmphead = dar->next;
+ SendDiskAddRemove(dar);
+ i++;
+ }
+}
+
+/*****************************************************/
+/* Routine to add entry to DARWorkQ */
+/*****************************************************/
+static void
+process_disk_notify(struct Scsi_Host *shost, struct uiscmdrsp *cmdrsp)
+{
+ struct diskaddremove *dar;
+ unsigned long flags;
+
+ dar = kzalloc(sizeof(struct diskaddremove), GFP_ATOMIC);
+ if (dar) {
+ dar->add = cmdrsp->disknotify.add;
+ dar->shost = shost;
+ dar->channel = cmdrsp->disknotify.channel;
+ dar->id = cmdrsp->disknotify.id;
+ dar->lun = cmdrsp->disknotify.lun;
+ QUEUE_DISKADDREMOVE(dar);
+ } else {
+ LOGERR("kmalloc failed for dar. host_no=%d[chan=%d:id=%d:lun=%d]\n",
+ shost->host_no, cmdrsp->disknotify.channel,
+ cmdrsp->disknotify.id, cmdrsp->disknotify.lun);
+ }
+}
+
+/*****************************************************/
+/* Probe Remove Functions */
+/*****************************************************/
+static irqreturn_t
+virthba_ISR(int irq, void *dev_id)
+{
+ struct virthba_info *virthbainfo = (struct virthba_info *) dev_id;
+ CHANNEL_HEADER __iomem *pChannelHeader;
+ SIGNAL_QUEUE_HEADER __iomem *pqhdr;
+ U64 mask;
+ unsigned long long rc1;
+
+ if (virthbainfo == NULL)
+ return IRQ_NONE;
+ virthbainfo->interrupts_rcvd++;
+ pChannelHeader = virthbainfo->chinfo.queueinfo->chan;
+ if (((readq(&pChannelHeader->Features)
+ & ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS) != 0)
+ && ((readq(&pChannelHeader->Features) &
+ ULTRA_IO_DRIVER_DISABLES_INTS) !=
+ 0)) {
+ virthbainfo->interrupts_disabled++;
+ mask = ~ULTRA_CHANNEL_ENABLE_INTS;
+ rc1 = uisqueue_InterlockedAnd(virthbainfo->flags_addr, mask);
+ }
+ if (visor_signalqueue_empty(pChannelHeader, IOCHAN_FROM_IOPART)) {
+ virthbainfo->interrupts_notme++;
+ return IRQ_NONE;
+ }
+ pqhdr = (SIGNAL_QUEUE_HEADER __iomem *)
+ ((char __iomem *) pChannelHeader +
+ readq(&pChannelHeader->oChannelSpace)) + IOCHAN_FROM_IOPART;
+ writeq(readq(&pqhdr->NumInterruptsReceived) + 1,
+ &pqhdr->NumInterruptsReceived);
+ atomic_set(&virthbainfo->interrupt_rcvd, 1);
+ wake_up_interruptible(&virthbainfo->rsp_queue);
+ return IRQ_HANDLED;
+}
+
+static int
+virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
+{
+ int error;
+ struct Scsi_Host *scsihost;
+ struct virthba_info *virthbainfo;
+ int rsp;
+ int i;
+ irq_handler_t handler = virthba_ISR;
+ CHANNEL_HEADER __iomem *pChannelHeader;
+ SIGNAL_QUEUE_HEADER __iomem *pqhdr;
+ U64 mask;
+
+ LOGVER("entering virthba_probe...\n");
+ LOGVER("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
+ virtpcidev->deviceNo);
+
+ LOGINF("entering virthba_probe...\n");
+ LOGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
+ virtpcidev->deviceNo);
+ POSTCODE_LINUX_2(VHBA_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ /* call scsi_host_alloc to register a scsi host adapter
+ * instance - this virthba that has just been created is an
+ * instance of a scsi host adapter. This scsi_host_alloc
+ * function allocates a new Scsi_Host struct & performs basic
+ * initializatoin. The host is not published to the scsi
+ * midlayer until scsi_add_host is called.
+ */
+ DBGINF("calling scsi_host_alloc.\n");
+
+ /* arg 2 passed in length of extra space we want allocated
+ * with scsi_host struct for our own use scsi_host_alloc
+ * assign host_no
+ */
+ scsihost = scsi_host_alloc(&virthba_driver_template,
+ sizeof(struct virthba_info));
+ if (scsihost == NULL)
+ return -ENODEV;
+
+ DBGINF("scsihost: 0x%p, scsihost->this_id: %d, host_no: %d.\n",
+ scsihost, scsihost->this_id, scsihost->host_no);
+
+ scsihost->this_id = UIS_MAGIC_VHBA;
+ /* linux treats max-channel differently than max-id & max-lun.
+ * In the latter cases, those two values result in 0 to max-1
+ * (inclusive) being scanned. But in the case of channels, the
+ * scan is 0 to max (inclusive); so we will subtract one from
+ * the max-channel value.
+ */
+ LOGINF("virtpcidev->scsi.max.max_channel=%u, max_id=%u, max_lun=%u, cmd_per_lun=%u, max_io_size=%u\n",
+ (unsigned) virtpcidev->scsi.max.max_channel - 1,
+ (unsigned) virtpcidev->scsi.max.max_id,
+ (unsigned) virtpcidev->scsi.max.max_lun,
+ (unsigned) virtpcidev->scsi.max.cmd_per_lun,
+ (unsigned) virtpcidev->scsi.max.max_io_size);
+ scsihost->max_channel = (unsigned) virtpcidev->scsi.max.max_channel;
+ scsihost->max_id = (unsigned) virtpcidev->scsi.max.max_id;
+ scsihost->max_lun = (unsigned) virtpcidev->scsi.max.max_lun;
+ scsihost->cmd_per_lun = (unsigned) virtpcidev->scsi.max.cmd_per_lun;
+ scsihost->max_sectors =
+ (unsigned short) (virtpcidev->scsi.max.max_io_size >> 9);
+ scsihost->sg_tablesize =
+ (unsigned short) (virtpcidev->scsi.max.max_io_size / PAGE_SIZE);
+ if (scsihost->sg_tablesize > MAX_PHYS_INFO)
+ scsihost->sg_tablesize = MAX_PHYS_INFO;
+ LOGINF("scsihost->max_channel=%u, max_id=%u, max_lun=%u, cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n",
+ scsihost->max_channel, scsihost->max_id, scsihost->max_lun,
+ scsihost->cmd_per_lun, scsihost->max_sectors,
+ scsihost->sg_tablesize);
+ LOGINF("scsihost->can_queue=%u, scsihost->cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n",
+ scsihost->can_queue, scsihost->cmd_per_lun, scsihost->max_sectors,
+ scsihost->sg_tablesize);
+
+ DBGINF("calling scsi_add_host\n");
+
+ /* this creates "host%d" in sysfs. If 2nd argument is NULL,
+ * then this generic /sys/devices/platform/host? device is
+ * created and /sys/scsi_host/host? ->
+ * /sys/devices/platform/host? If 2nd argument is not NULL,
+ * then this generic /sys/devices/<path>/host? is created and
+ * host? points to that device instead.
+ */
+ error = scsi_add_host(scsihost, &virtpcidev->generic_dev);
+ if (error) {
+ LOGERR("scsi_add_host ****FAILED 0x%x TBD - RECOVER\n", error);
+ POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ /* decr refcount on scsihost which was incremented by
+ * scsi_add_host so the scsi_host gets deleted
+ */
+ scsi_host_put(scsihost);
+ return -ENODEV;
+ }
+
+ virthbainfo = (struct virthba_info *) scsihost->hostdata;
+ memset(virthbainfo, 0, sizeof(struct virthba_info));
+ for (i = 0; i < VIRTHBASOPENMAX; i++) {
+ if (VirtHbasOpen[i].virthbainfo == NULL) {
+ VirtHbasOpen[i].virthbainfo = virthbainfo;
+ break;
+ }
+ }
+ virthbainfo->interrupt_vector = -1;
+ virthbainfo->chinfo.queueinfo = &virtpcidev->queueinfo;
+ virthbainfo->virtpcidev = virtpcidev;
+ spin_lock_init(&virthbainfo->chinfo.insertlock);
+
+ DBGINF("generic_dev: 0x%p, queueinfo: 0x%p.\n",
+ &virtpcidev->generic_dev, &virtpcidev->queueinfo);
+
+ init_waitqueue_head(&virthbainfo->rsp_queue);
+ spin_lock_init(&virthbainfo->privlock);
+ memset(&virthbainfo->pending, 0, sizeof(virthbainfo->pending));
+ virthbainfo->serverdown = false;
+ virthbainfo->serverchangingstate = false;
+
+ virthbainfo->intr = virtpcidev->intr;
+ /* save of host within virthba_info */
+ virthbainfo->scsihost = scsihost;
+
+ /* save of host within virtpci_dev */
+ virtpcidev->scsi.scsihost = scsihost;
+
+ /* Setup workqueue for serverdown messages */
+ INIT_WORK(&virthbainfo->serverdown_completion,
+ virthba_serverdown_complete);
+
+ writeq(readq(&virthbainfo->chinfo.queueinfo->chan->Features) |
+ ULTRA_IO_CHANNEL_IS_POLLING,
+ &virthbainfo->chinfo.queueinfo->chan->Features);
+ /* start thread that will receive scsicmnd responses */
+ DBGINF("starting rsp thread -- queueinfo: 0x%p, threadinfo: 0x%p.\n",
+ virthbainfo->chinfo.queueinfo, &virthbainfo->chinfo.threadinfo);
+
+ pChannelHeader = virthbainfo->chinfo.queueinfo->chan;
+ pqhdr = (SIGNAL_QUEUE_HEADER __iomem *)
+ ((char __iomem *)pChannelHeader +
+ readq(&pChannelHeader->oChannelSpace)) + IOCHAN_FROM_IOPART;
+ virthbainfo->flags_addr = &pqhdr->FeatureFlags;
+
+ if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
+ process_incoming_rsps,
+ virthbainfo, "vhba_incoming")) {
+ LOGERR("uisthread_start rsp ****FAILED\n");
+ /* decr refcount on scsihost which was incremented by
+ * scsi_add_host so the scsi_host gets deleted
+ */
+ POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ scsi_host_put(scsihost);
+ return -ENODEV;
+ }
+ LOGINF("sendInterruptHandle=0x%16llX",
+ virthbainfo->intr.sendInterruptHandle);
+ LOGINF("recvInterruptHandle=0x%16llX",
+ virthbainfo->intr.recvInterruptHandle);
+ LOGINF("recvInterruptVector=0x%8X",
+ virthbainfo->intr.recvInterruptVector);
+ LOGINF("recvInterruptShared=0x%2X",
+ virthbainfo->intr.recvInterruptShared);
+ LOGINF("scsihost.hostt->name=%s", scsihost->hostt->name);
+ virthbainfo->interrupt_vector =
+ virthbainfo->intr.recvInterruptHandle & INTERRUPT_VECTOR_MASK;
+ rsp = request_irq(virthbainfo->interrupt_vector, handler, IRQF_SHARED,
+ scsihost->hostt->name, virthbainfo);
+ if (rsp != 0) {
+ LOGERR("request_irq(%d) uislib_virthba_ISR request failed with rsp=%d\n",
+ virthbainfo->interrupt_vector, rsp);
+ virthbainfo->interrupt_vector = -1;
+ POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ } else {
+ U64 __iomem *Features_addr =
+ &virthbainfo->chinfo.queueinfo->chan->Features;
+ LOGERR("request_irq(%d) uislib_virthba_ISR request succeeded\n",
+ virthbainfo->interrupt_vector);
+ mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
+ ULTRA_IO_DRIVER_DISABLES_INTS);
+ uisqueue_InterlockedAnd(Features_addr, mask);
+ mask = ULTRA_IO_DRIVER_ENABLES_INTS;
+ uisqueue_InterlockedOr(Features_addr, mask);
+ rsltq_wait_usecs = 4000000;
+ }
+
+ DBGINF("calling scsi_scan_host.\n");
+ scsi_scan_host(scsihost);
+ DBGINF("return from scsi_scan_host.\n");
+
+ LOGINF("virthba added scsihost:0x%p\n", scsihost);
+ POSTCODE_LINUX_2(VHBA_PROBE_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ return 0;
+}
+
+static void
+virthba_remove(struct virtpci_dev *virtpcidev)
+{
+ struct virthba_info *virthbainfo;
+ struct Scsi_Host *scsihost =
+ (struct Scsi_Host *) virtpcidev->scsi.scsihost;
+
+ LOGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
+ virtpcidev->deviceNo);
+ virthbainfo = (struct virthba_info *) scsihost->hostdata;
+ if (virthbainfo->interrupt_vector != -1)
+ free_irq(virthbainfo->interrupt_vector, virthbainfo);
+ LOGINF("Removing virtpcidev: 0x%p, virthbainfo: 0x%p\n", virtpcidev,
+ virthbainfo);
+
+ DBGINF("removing scsihost: 0x%p, scsihost->this_id: %d\n", scsihost,
+ scsihost->this_id);
+ scsi_remove_host(scsihost);
+
+ DBGINF("stopping thread.\n");
+ uisthread_stop(&virthbainfo->chinfo.threadinfo);
+
+ DBGINF("calling scsi_host_put\n");
+
+ /* decr refcount on scsihost which was incremented by
+ * scsi_add_host so the scsi_host gets deleted
+ */
+ scsi_host_put(scsihost);
+ LOGINF("virthba removed scsi_host.\n");
+}
+
+static int
+forward_vdiskmgmt_command(VDISK_MGMT_TYPES vdiskcmdtype,
+ struct Scsi_Host *scsihost,
+ struct uisscsi_dest *vdest)
+{
+ struct uiscmdrsp *cmdrsp;
+ struct virthba_info *virthbainfo =
+ (struct virthba_info *) scsihost->hostdata;
+ int notifyresult = 0xffff;
+ wait_queue_head_t notifyevent;
+
+ LOGINF("vDiskMgmt:%d %d:%d:%d\n", vdiskcmdtype,
+ vdest->channel, vdest->id, vdest->lun);
+
+ if (virthbainfo->serverdown || virthbainfo->serverchangingstate) {
+ DBGINF("Server is down/changing state. Returning Failure.\n");
+ return FAILED;
+ }
+
+ cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
+ if (cmdrsp == NULL) {
+ LOGERR("kmalloc of cmdrsp failed.\n");
+ return FAILED; /* reject */
+ }
+
+ init_waitqueue_head(&notifyevent);
+
+ /* issue VDISK_MGMT_CMD
+ * set type to command - as opposed to task mgmt
+ */
+ cmdrsp->cmdtype = CMD_VDISKMGMT_TYPE;
+ /* specify the event that has to be triggered when this cmd is
+ * complete
+ */
+ cmdrsp->vdiskmgmt.notify = (void *) &notifyevent;
+ cmdrsp->vdiskmgmt.notifyresult = (void *) &notifyresult;
+
+ /* save destination */
+ cmdrsp->vdiskmgmt.vdisktype = vdiskcmdtype;
+ cmdrsp->vdiskmgmt.vdest.channel = vdest->channel;
+ cmdrsp->vdiskmgmt.vdest.id = vdest->id;
+ cmdrsp->vdiskmgmt.vdest.lun = vdest->lun;
+ cmdrsp->vdiskmgmt.scsicmd =
+ (void *) (uintptr_t)
+ add_scsipending_entry_with_wait(virthbainfo, CMD_VDISKMGMT_TYPE,
+ (void *) cmdrsp);
+
+ uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
+ cmdrsp, IOCHAN_TO_IOPART,
+ &virthbainfo->chinfo.insertlock,
+ DONT_ISSUE_INTERRUPT, (U64) NULL,
+ OK_TO_WAIT, "vhba");
+ LOGINF("VdiskMgmt waiting on event notifyevent=0x%p\n",
+ cmdrsp->scsitaskmgmt.notify);
+ wait_event(notifyevent, notifyresult != 0xffff);
+ LOGINF("VdiskMgmt complete; result:%d\n", cmdrsp->vdiskmgmt.result);
+ kfree(cmdrsp);
+ return SUCCESS;
+}
+
+/*****************************************************/
+/* Scsi Host support functions */
+/*****************************************************/
+
+static int
+forward_taskmgmt_command(TASK_MGMT_TYPES tasktype, struct scsi_device *scsidev)
+{
+ struct uiscmdrsp *cmdrsp;
+ struct virthba_info *virthbainfo =
+ (struct virthba_info *) scsidev->host->hostdata;
+ int notifyresult = 0xffff;
+ wait_queue_head_t notifyevent;
+
+ LOGINF("TaskMgmt:%d %d:%d:%d\n", tasktype,
+ scsidev->channel, scsidev->id, scsidev->lun);
+
+ if (virthbainfo->serverdown || virthbainfo->serverchangingstate) {
+ DBGINF("Server is down/changing state. Returning Failure.\n");
+ return FAILED;
+ }
+
+ cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
+ if (cmdrsp == NULL) {
+ LOGERR("kmalloc of cmdrsp failed.\n");
+ return FAILED; /* reject */
+ }
+
+ init_waitqueue_head(&notifyevent);
+
+ /* issue TASK_MGMT_ABORT_TASK */
+ /* set type to command - as opposed to task mgmt */
+ cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
+ /* specify the event that has to be triggered when this */
+ /* cmd is complete */
+ cmdrsp->scsitaskmgmt.notify = (void *) &notifyevent;
+ cmdrsp->scsitaskmgmt.notifyresult = (void *) &notifyresult;
+
+ /* save destination */
+ cmdrsp->scsitaskmgmt.tasktype = tasktype;
+ cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
+ cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
+ cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
+ cmdrsp->scsitaskmgmt.scsicmd =
+ (void *) (uintptr_t)
+ add_scsipending_entry_with_wait(virthbainfo,
+ CMD_SCSITASKMGMT_TYPE,
+ (void *) cmdrsp);
+
+ uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
+ cmdrsp, IOCHAN_TO_IOPART,
+ &virthbainfo->chinfo.insertlock,
+ DONT_ISSUE_INTERRUPT, (U64) NULL,
+ OK_TO_WAIT, "vhba");
+ LOGINF("TaskMgmt waiting on event notifyevent=0x%p\n",
+ cmdrsp->scsitaskmgmt.notify);
+ wait_event(notifyevent, notifyresult != 0xffff);
+ LOGINF("TaskMgmt complete; result:%d\n", cmdrsp->scsitaskmgmt.result);
+ kfree(cmdrsp);
+ return SUCCESS;
+}
+
+/* The abort handler returns SUCCESS if it has succeeded to make LLDD
+ * and all related hardware forget about the scmd.
+ */
+static int
+virthba_abort_handler(struct scsi_cmnd *scsicmd)
+{
+ /* issue TASK_MGMT_ABORT_TASK */
+ struct scsi_device *scsidev;
+ struct virtdisk_info *vdisk;
+
+ scsidev = scsicmd->device;
+ for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
+ vdisk->next; vdisk = vdisk->next) {
+ if ((scsidev->channel == vdisk->channel)
+ && (scsidev->id == vdisk->id)
+ && (scsidev->lun == vdisk->lun)) {
+ if (atomic_read(&vdisk->error_count) <
+ VIRTHBA_ERROR_COUNT) {
+ atomic_inc(&vdisk->error_count);
+ POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
+ POSTCODE_SEVERITY_INFO);
+ } else
+ atomic_set(&vdisk->ios_threshold,
+ IOS_ERROR_THRESHOLD);
+ }
+ }
+ return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd->device);
+}
+
+static int
+virthba_bus_reset_handler(struct scsi_cmnd *scsicmd)
+{
+ /* issue TASK_MGMT_TARGET_RESET for each target on the bus */
+ struct scsi_device *scsidev;
+ struct virtdisk_info *vdisk;
+
+ scsidev = scsicmd->device;
+ for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
+ vdisk->next; vdisk = vdisk->next) {
+ if ((scsidev->channel == vdisk->channel)
+ && (scsidev->id == vdisk->id)
+ && (scsidev->lun == vdisk->lun)) {
+ if (atomic_read(&vdisk->error_count) <
+ VIRTHBA_ERROR_COUNT) {
+ atomic_inc(&vdisk->error_count);
+ POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
+ POSTCODE_SEVERITY_INFO);
+ } else
+ atomic_set(&vdisk->ios_threshold,
+ IOS_ERROR_THRESHOLD);
+ }
+ }
+ return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd->device);
+}
+
+static int
+virthba_device_reset_handler(struct scsi_cmnd *scsicmd)
+{
+ /* issue TASK_MGMT_LUN_RESET */
+ struct scsi_device *scsidev;
+ struct virtdisk_info *vdisk;
+
+ scsidev = scsicmd->device;
+ for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
+ vdisk->next; vdisk = vdisk->next) {
+ if ((scsidev->channel == vdisk->channel)
+ && (scsidev->id == vdisk->id)
+ && (scsidev->lun == vdisk->lun)) {
+ if (atomic_read(&vdisk->error_count) <
+ VIRTHBA_ERROR_COUNT) {
+ atomic_inc(&vdisk->error_count);
+ POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
+ POSTCODE_SEVERITY_INFO);
+ } else
+ atomic_set(&vdisk->ios_threshold,
+ IOS_ERROR_THRESHOLD);
+ }
+ }
+ return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd->device);
+}
+
+static int
+virthba_host_reset_handler(struct scsi_cmnd *scsicmd)
+{
+ /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
+ LOGERR("virthba_host_reset_handler Not yet implemented\n");
+ return SUCCESS;
+}
+
+static char virthba_get_info_str[256];
+
+static const char *
+virthba_get_info(struct Scsi_Host *shp)
+{
+ /* Return version string */
+ sprintf(virthba_get_info_str, "virthba, version %s\n", VIRTHBA_VERSION);
+ return virthba_get_info_str;
+}
+
+static int
+virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
+{
+ DBGINF("In virthba_ioctl: ioctl: cmd=0x%x\n", cmd);
+ return -EINVAL;
+}
+
+/* This returns SCSI_MLQUEUE_DEVICE_BUSY if the signal queue to IOpart
+ * is full.
+ */
+static int
+virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
+ void (*virthba_cmnd_done)(struct scsi_cmnd *))
+{
+ struct scsi_device *scsidev = scsicmd->device;
+ int insert_location;
+ unsigned char op;
+ unsigned char *cdb = scsicmd->cmnd;
+ struct Scsi_Host *scsihost = scsidev->host;
+ struct uiscmdrsp *cmdrsp;
+ unsigned int i;
+ struct virthba_info *virthbainfo =
+ (struct virthba_info *) scsihost->hostdata;
+ struct scatterlist *sg = NULL;
+ struct scatterlist *sgl = NULL;
+ int sg_failed = 0;
+
+ if (virthbainfo->serverdown || virthbainfo->serverchangingstate) {
+ DBGINF("Server is down/changing state. Returning SCSI_MLQUEUE_DEVICE_BUSY.\n");
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+
+ cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
+ if (cmdrsp == NULL) {
+ LOGERR("kmalloc of cmdrsp failed.\n");
+ return 1; /* reject the command */
+ }
+
+ /* now saving everything we need from scsi_cmd into cmdrsp
+ * before we queue cmdrsp set type to command - as opposed to
+ * task mgmt
+ */
+ cmdrsp->cmdtype = CMD_SCSI_TYPE;
+ /* save the pending insertion location. Deletion from pending
+ * will return the scsicmd pointer for completion
+ */
+ insert_location =
+ add_scsipending_entry(virthbainfo, CMD_SCSI_TYPE, (void *) scsicmd);
+ if (insert_location != -1) {
+ cmdrsp->scsi.scsicmd = (void *) (uintptr_t) insert_location;
+ } else {
+ LOGERR("Queue is full. Returning busy.\n");
+ kfree(cmdrsp);
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+ /* save done function that we have call when cmd is complete */
+ scsicmd->scsi_done = virthba_cmnd_done;
+ /* save destination */
+ cmdrsp->scsi.vdest.channel = scsidev->channel;
+ cmdrsp->scsi.vdest.id = scsidev->id;
+ cmdrsp->scsi.vdest.lun = scsidev->lun;
+ /* save datadir */
+ cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
+ memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
+
+ cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
+
+ /* keep track of the max buffer length so far. */
+ if (cmdrsp->scsi.bufflen > MaxBuffLen)
+ MaxBuffLen = cmdrsp->scsi.bufflen;
+
+ if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO) {
+ LOGERR("scsicmd use_sg:%d greater than MAX:%d\n",
+ scsi_sg_count(scsicmd), MAX_PHYS_INFO);
+ del_scsipending_entry(virthbainfo, (uintptr_t) insert_location);
+ kfree(cmdrsp);
+ return 1; /* reject the command */
+ }
+
+ /* This is what we USED to do when we assumed we were running */
+ /* uissd & virthba on the same Linux system. */
+ /* cmdrsp->scsi.buffer = scsicmd->request_buffer; */
+ /* The following code does NOT make that assumption. */
+ /* convert buffer to phys information */
+ if (scsi_sg_count(scsicmd) == 0) {
+ if (scsi_bufflen(scsicmd) > 0) {
+ LOGERR("**** FAILED No scatter list for bufflen > 0\n");
+ BUG_ON(scsi_sg_count(scsicmd) == 0);
+ }
+ DBGINF("No sg; buffer:0x%p bufflen:%d\n",
+ scsi_sglist(scsicmd), scsi_bufflen(scsicmd));
+ } else {
+ /* buffer is scatterlist - copy it out */
+ sgl = scsi_sglist(scsicmd);
+
+ for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) {
+
+ cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
+ cmdrsp->scsi.gpi_list[i].length = sg->length;
+ if ((i != 0) && (sg->offset != 0))
+ LOGINF("Offset on a sg_entry other than zero =<<%d>>.\n",
+ sg->offset);
+ }
+
+ if (sg_failed) {
+ LOGERR("Start sg_list dump (entries %d, bufflen %d)...\n",
+ scsi_sg_count(scsicmd), cmdrsp->scsi.bufflen);
+ for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) {
+ LOGERR(" Entry(%d): page->[0x%p], phys->[0x%Lx], off(%d), len(%d)\n",
+ i, sg_page(sg),
+ (unsigned long long) sg_phys(sg),
+ sg->offset, sg->length);
+ }
+ LOGERR("Done sg_list dump.\n");
+ /* BUG(); ***** For now, let it fail in uissd
+ * if it is a problem, as it might just
+ * work
+ */
+ }
+
+ cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
+ }
+
+ op = cdb[0];
+ i = uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
+ cmdrsp, IOCHAN_TO_IOPART,
+ &virthbainfo->chinfo.
+ insertlock,
+ DONT_ISSUE_INTERRUPT,
+ (U64) NULL, DONT_WAIT, "vhba");
+ if (i == 0) {
+ /* queue must be full - and we said don't wait - return busy */
+ LOGERR("uisqueue_put_cmdrsp_with_lock ****FAILED\n");
+ kfree(cmdrsp);
+ del_scsipending_entry(virthbainfo, (uintptr_t) insert_location);
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+
+ /* we're done with cmdrsp space - data from it has been copied
+ * into channel - free it now.
+ */
+ kfree(cmdrsp);
+ return 0; /* non-zero implies host/device is busy */
+}
+
+static int
+virthba_slave_alloc(struct scsi_device *scsidev)
+{
+ /* this called by the midlayer before scan for new devices -
+ * LLD can alloc any struc & do init if needed.
+ */
+ struct virtdisk_info *vdisk;
+ struct virtdisk_info *tmpvdisk;
+ struct virthba_info *virthbainfo;
+ struct Scsi_Host *scsihost = (struct Scsi_Host *) scsidev->host;
+
+ virthbainfo = (struct virthba_info *) scsihost->hostdata;
+ if (!virthbainfo) {
+ LOGERR("Could not find virthba_info for scsihost\n");
+ return 0; /* even though we errored, treat as success */
+ }
+ for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
+ if (vdisk->next->valid &&
+ (vdisk->next->channel == scsidev->channel) &&
+ (vdisk->next->id == scsidev->id) &&
+ (vdisk->next->lun == scsidev->lun))
+ return 0;
+ }
+ tmpvdisk = kzalloc(sizeof(struct virtdisk_info), GFP_ATOMIC);
+ if (!tmpvdisk) { /* error allocating */
+ LOGERR("Could not allocate memory for disk\n");
+ return 0;
+ }
+
+ tmpvdisk->channel = scsidev->channel;
+ tmpvdisk->id = scsidev->id;
+ tmpvdisk->lun = scsidev->lun;
+ tmpvdisk->valid = 1;
+ vdisk->next = tmpvdisk;
+ return 0; /* success */
+}
+
+static int
+virthba_slave_configure(struct scsi_device *scsidev)
+{
+ return 0; /* success */
+}
+
+static void
+virthba_slave_destroy(struct scsi_device *scsidev)
+{
+ /* midlevel calls this after device has been quiesced and
+ * before it is to be deleted.
+ */
+ struct virtdisk_info *vdisk, *delvdisk;
+ struct virthba_info *virthbainfo;
+ struct Scsi_Host *scsihost = (struct Scsi_Host *) scsidev->host;
+
+ virthbainfo = (struct virthba_info *) scsihost->hostdata;
+ if (!virthbainfo)
+ LOGERR("Could not find virthba_info for scsihost\n");
+ for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
+ if (vdisk->next->valid &&
+ (vdisk->next->channel == scsidev->channel) &&
+ (vdisk->next->id == scsidev->id) &&
+ (vdisk->next->lun == scsidev->lun)) {
+ delvdisk = vdisk->next;
+ vdisk->next = vdisk->next->next;
+ kfree(delvdisk);
+ return;
+ }
+ }
+ return;
+}
+
+/*****************************************************/
+/* Scsi Cmnd support thread */
+/*****************************************************/
+
+static void
+do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
+{
+ struct virtdisk_info *vdisk;
+ struct scsi_device *scsidev;
+ struct sense_data *sd;
+
+ scsidev = scsicmd->device;
+ memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
+ sd = (struct sense_data *) scsicmd->sense_buffer;
+
+ /* Do not log errors for disk-not-present inquiries */
+ if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
+ (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
+ (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
+ return;
+
+ /* Okay see what our error_count is here.... */
+ for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
+ vdisk->next; vdisk = vdisk->next) {
+ if ((scsidev->channel != vdisk->channel)
+ || (scsidev->id != vdisk->id)
+ || (scsidev->lun != vdisk->lun))
+ continue;
+
+ if (atomic_read(&vdisk->error_count) < VIRTHBA_ERROR_COUNT) {
+ atomic_inc(&vdisk->error_count);
+ LOGERR("SCSICMD ****FAILED scsicmd:0x%p op:0x%x <%d:%d:%d:%d> 0x%x-0x%x-0x%x-0x%x-0x%x.\n",
+ scsicmd, cmdrsp->scsi.cmnd[0],
+ scsidev->host->host_no, scsidev->id,
+ scsidev->channel, scsidev->lun,
+ cmdrsp->scsi.linuxstat, sd->Valid, sd->SenseKey,
+ sd->AdditionalSenseCode,
+ sd->AdditionalSenseCodeQualifier);
+ if (atomic_read(&vdisk->error_count) ==
+ VIRTHBA_ERROR_COUNT) {
+ LOGERR("Throtling SCSICMD errors disk <%d:%d:%d:%d>\n",
+ scsidev->host->host_no, scsidev->id,
+ scsidev->channel, scsidev->lun);
+ }
+ atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
+ }
+ }
+}
+
+static void
+do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
+{
+ struct scsi_device *scsidev;
+ unsigned char buf[36];
+ struct scatterlist *sg;
+ unsigned int i;
+ char *thispage;
+ char *thispage_orig;
+ int bufind = 0;
+ struct virtdisk_info *vdisk;
+
+ scsidev = scsicmd->device;
+ if ((cmdrsp->scsi.cmnd[0] == INQUIRY)
+ && (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
+ if (cmdrsp->scsi.no_disk_result == 0)
+ return;
+
+ /* Linux scsi code is weird; it wants
+ * a device at Lun 0 to issue report
+ * luns, but we don't want a disk
+ * there so we'll present a processor
+ * there. */
+ SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen,
+ scsidev->lun,
+ DEV_DISK_CAPABLE_NOT_PRESENT,
+ DEV_NOT_CAPABLE);
+
+ if (scsi_sg_count(scsicmd) == 0) {
+ if (scsi_bufflen(scsicmd) > 0) {
+ LOGERR("**** FAILED No scatter list for bufflen > 0\n");
+ BUG_ON(scsi_sg_count(scsicmd) ==
+ 0);
+ }
+ memcpy(scsi_sglist(scsicmd), buf,
+ cmdrsp->scsi.bufflen);
+ return;
+ }
+
+ sg = scsi_sglist(scsicmd);
+ for (i = 0; i < scsi_sg_count(scsicmd); i++) {
+ DBGVER("copying OUT OF buf into 0x%p %d\n",
+ sg_page(sg + i), sg[i].length);
+ thispage_orig = kmap_atomic(sg_page(sg + i));
+ thispage = (void *) ((unsigned long)thispage_orig |
+ sg[i].offset);
+ memcpy(thispage, buf + bufind, sg[i].length);
+ kunmap_atomic(thispage_orig);
+ bufind += sg[i].length;
+ }
+ } else {
+
+ vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
+ for ( ; vdisk->next; vdisk = vdisk->next) {
+ if ((scsidev->channel != vdisk->channel)
+ || (scsidev->id != vdisk->id)
+ || (scsidev->lun != vdisk->lun))
+ continue;
+
+ if (atomic_read(&vdisk->ios_threshold) > 0) {
+ atomic_dec(&vdisk->ios_threshold);
+ if (atomic_read(&vdisk->ios_threshold) == 0) {
+ LOGERR("Resetting error count for disk\n");
+ atomic_set(&vdisk->error_count, 0);
+ }
+ }
+ }
+ }
+}
+
+static void
+complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
+{
+ DBGINF("cmdrsp: 0x%p, scsistat:0x%x.\n", cmdrsp, cmdrsp->scsi.scsistat);
+
+ /* take what we need out of cmdrsp and complete the scsicmd */
+ scsicmd->result = cmdrsp->scsi.linuxstat;
+ if (cmdrsp->scsi.linuxstat)
+ do_scsi_linuxstat(cmdrsp, scsicmd);
+ else
+ do_scsi_nolinuxstat(cmdrsp, scsicmd);
+
+ if (scsicmd->scsi_done) {
+ DBGVER("Scsi_DONE\n");
+ scsicmd->scsi_done(scsicmd);
+ }
+}
+
+static inline void
+complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
+{
+ /* copy the result of the taskmgmt and */
+ /* wake up the error handler that is waiting for this */
+ *(int *) cmdrsp->vdiskmgmt.notifyresult = cmdrsp->vdiskmgmt.result;
+ wake_up_all((wait_queue_head_t *) cmdrsp->vdiskmgmt.notify);
+ LOGINF("set notify result to %d\n", cmdrsp->vdiskmgmt.result);
+}
+
+static inline void
+complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
+{
+ /* copy the result of the taskmgmt and */
+ /* wake up the error handler that is waiting for this */
+ *(int *) cmdrsp->scsitaskmgmt.notifyresult =
+ cmdrsp->scsitaskmgmt.result;
+ wake_up_all((wait_queue_head_t *) cmdrsp->scsitaskmgmt.notify);
+ LOGINF("set notify result to %d\n", cmdrsp->scsitaskmgmt.result);
+}
+
+static void
+drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
+ struct uiscmdrsp *cmdrsp)
+{
+ unsigned long flags;
+ int qrslt = 0;
+ struct scsi_cmnd *scsicmd;
+ struct Scsi_Host *shost = virthbainfo->scsihost;
+
+ while (1) {
+ spin_lock_irqsave(&virthbainfo->chinfo.insertlock, flags);
+ if (!ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(dc->queueinfo->chan,
+ "vhba", NULL)) {
+ spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock,
+ flags);
+ virthbainfo->acquire_failed_cnt++;
+ break;
+ }
+ qrslt = uisqueue_get_cmdrsp(dc->queueinfo, cmdrsp,
+ IOCHAN_FROM_IOPART);
+ ULTRA_CHANNEL_CLIENT_RELEASE_OS(dc->queueinfo->chan,
+ "vhba", NULL);
+ spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock, flags);
+ if (qrslt == 0)
+ break;
+ if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
+ /* scsicmd location is returned by the
+ * deletion
+ */
+ scsicmd = del_scsipending_entry(virthbainfo,
+ (uintptr_t) cmdrsp->scsi.scsicmd);
+ if (!scsicmd)
+ break;
+ /* complete the orig cmd */
+ complete_scsi_command(cmdrsp, scsicmd);
+ } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
+ if (!del_scsipending_entry(virthbainfo,
+ (uintptr_t) cmdrsp->scsitaskmgmt.scsicmd))
+ break;
+ complete_taskmgmt_command(cmdrsp);
+ } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
+ /* The vHba pointer has no meaning in
+ * a Client/Guest Partition. Let's be
+ * safe and set it to NULL now. Do
+ * not use it here! */
+ cmdrsp->disknotify.vHba = NULL;
+ process_disk_notify(shost, cmdrsp);
+ } else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
+ if (!del_scsipending_entry(virthbainfo,
+ (uintptr_t) cmdrsp->vdiskmgmt.scsicmd))
+ break;
+ complete_vdiskmgmt_command(cmdrsp);
+ } else
+ LOGERR("Invalid cmdtype %d\n", cmdrsp->cmdtype);
+ /* cmdrsp is now available for reuse */
+ }
+}
+
+
+/* main function for the thread that waits for scsi commands to arrive
+ * in a specified queue
+ */
+static int
+process_incoming_rsps(void *v)
+{
+ struct virthba_info *virthbainfo = v;
+ struct chaninfo *dc = &virthbainfo->chinfo;
+ struct uiscmdrsp *cmdrsp = NULL;
+ const int SZ = sizeof(struct uiscmdrsp);
+ U64 mask;
+ unsigned long long rc1;
+
+ UIS_DAEMONIZE("vhba_incoming");
+ /* alloc once and reuse */
+ cmdrsp = kmalloc(SZ, GFP_ATOMIC);
+ if (cmdrsp == NULL) {
+ LOGERR("process_incoming_rsps ****FAILED to malloc - thread exiting\n");
+ complete_and_exit(&dc->threadinfo.has_stopped, 0);
+ return 0;
+ }
+ mask = ULTRA_CHANNEL_ENABLE_INTS;
+ while (1) {
+ wait_event_interruptible_timeout(virthbainfo->rsp_queue,
+ (atomic_read(&virthbainfo->interrupt_rcvd) == 1),
+ usecs_to_jiffies(rsltq_wait_usecs));
+ atomic_set(&virthbainfo->interrupt_rcvd, 0);
+ /* drain queue */
+ drain_queue(virthbainfo, dc, cmdrsp);
+ rc1 = uisqueue_InterlockedOr(virthbainfo->flags_addr, mask);
+ if (dc->threadinfo.should_stop)
+ break;
+ }
+
+ kfree(cmdrsp);
+
+ DBGINF("exiting processing incoming rsps.\n");
+ complete_and_exit(&dc->threadinfo.has_stopped, 0);
+}
+
+/*****************************************************/
+/* proc filesystem functions */
+/*****************************************************/
+
+static ssize_t
+info_proc_read(struct file *file, char __user *buf, size_t len, loff_t *offset)
+{
+ int length = 0;
+ U64 phys_flags_addr;
+ int i;
+ struct virthba_info *virthbainfo;
+ char *vbuf;
+ loff_t pos = *offset;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos > 0 || !len)
+ return 0;
+
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ for (i = 0; i < VIRTHBASOPENMAX; i++) {
+ if (VirtHbasOpen[i].virthbainfo == NULL)
+ continue;
+
+ virthbainfo = VirtHbasOpen[i].virthbainfo;
+ length += sprintf(vbuf + length, "CHANSOCK is not defined.\n");
+
+ length += sprintf(vbuf + length, "MaxBuffLen:%d\n", MaxBuffLen);
+
+ length += sprintf(vbuf + length, "\nvirthba result queue poll wait:%d usecs.\n",
+ rsltq_wait_usecs);
+
+ length += sprintf(vbuf + length,
+ "\nModule build: Date:%s Time:%s\n",
+ __DATE__, __TIME__);
+ length += sprintf(vbuf + length, "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
+ virthbainfo->interrupts_rcvd,
+ virthbainfo->interrupts_disabled);
+ length += sprintf(vbuf + length, "\ninterrupts_notme = %llu,\n",
+ virthbainfo->interrupts_notme);
+ phys_flags_addr = virt_to_phys((__force void *)
+ virthbainfo->flags_addr);
+ length += sprintf(vbuf + length, "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
+ virthbainfo->flags_addr, phys_flags_addr,
+ (__le64)readq(virthbainfo->flags_addr));
+ length += sprintf(vbuf + length, "acquire_failed_cnt:%llu\n",
+ virthbainfo->acquire_failed_cnt);
+ length += sprintf(vbuf + length, "\n");
+ }
+ if (copy_to_user(buf, vbuf, length)) {
+ kfree(vbuf);
+ return -EFAULT;
+ }
+
+ kfree(vbuf);
+ *offset += length;
+ return length;
+}
+
+static ssize_t
+enable_ints_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t
+enable_ints_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[4];
+ int i, new_value;
+ struct virthba_info *virthbainfo;
+ U64 __iomem *Features_addr;
+ U64 mask;
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ buf[count] = '\0';
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user failed. buf<<%.*s>> count<<%lu>>\n",
+ (int) count, buf, count);
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%d", &new_value);
+
+ if (i < 1) {
+ LOGERR("Failed to scan value for enable_ints, buf<<%.*s>>",
+ (int) count, buf);
+ return -EFAULT;
+ }
+
+ /* set all counts to new_value usually 0 */
+ for (i = 0; i < VIRTHBASOPENMAX; i++) {
+ if (VirtHbasOpen[i].virthbainfo != NULL) {
+ virthbainfo = VirtHbasOpen[i].virthbainfo;
+ Features_addr =
+ &virthbainfo->chinfo.queueinfo->chan->Features;
+ if (new_value == 1) {
+ mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
+ ULTRA_IO_DRIVER_DISABLES_INTS);
+ uisqueue_InterlockedAnd(Features_addr, mask);
+ mask = ULTRA_IO_DRIVER_ENABLES_INTS;
+ uisqueue_InterlockedOr(Features_addr, mask);
+ rsltq_wait_usecs = 4000000;
+ } else {
+ mask = ~(ULTRA_IO_DRIVER_ENABLES_INTS |
+ ULTRA_IO_DRIVER_DISABLES_INTS);
+ uisqueue_InterlockedAnd(Features_addr, mask);
+ mask = ULTRA_IO_CHANNEL_IS_POLLING;
+ uisqueue_InterlockedOr(Features_addr, mask);
+ rsltq_wait_usecs = 4000;
+ }
+ }
+ }
+ return count;
+}
+
+static ssize_t
+rqwu_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[16];
+ int i, usecs;
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user failed. buf<<%.*s>> count<<%lu>>\n",
+ (int) count, buf, count);
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%d", &usecs);
+
+ if (i < 1) {
+ LOGERR("Failed to scan value for rqwait_usecs buf<<%.*s>>",
+ (int) count, buf);
+ return -EFAULT;
+ }
+
+ /* set global wait time */
+ rsltq_wait_usecs = usecs;
+ return count;
+}
+
+/* As per VirtpciFunc returns 1 for success and 0 for failure */
+static int
+virthba_serverup(struct virtpci_dev *virtpcidev)
+{
+ struct virthba_info *virthbainfo =
+ (struct virthba_info *) ((struct Scsi_Host *) virtpcidev->scsi.
+ scsihost)->hostdata;
+
+ DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
+ virtpcidev->deviceNo);
+
+ if (!virthbainfo->serverdown) {
+ DBGINF("Server up message recieved while server is already up.\n");
+ return 1;
+ }
+ if (virthbainfo->serverchangingstate) {
+ LOGERR("Server already processing change state message\n");
+ return 0;
+ }
+
+ virthbainfo->serverchangingstate = true;
+ /* Must transition channel to ATTACHED state BEFORE we
+ * can start using the device again
+ */
+ ULTRA_CHANNEL_CLIENT_TRANSITION(virthbainfo->chinfo.queueinfo->chan,
+ dev_name(&virtpcidev->generic_dev),
+ CHANNELCLI_ATTACHED, NULL);
+
+ /* Start Processing the IOVM Response Queue Again */
+ if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
+ process_incoming_rsps,
+ virthbainfo, "vhba_incoming")) {
+ LOGERR("uisthread_start rsp ****FAILED\n");
+ return 0;
+ }
+ virthbainfo->serverdown = false;
+ virthbainfo->serverchangingstate = false;
+
+ return 1;
+}
+
+static void
+virthba_serverdown_complete(struct work_struct *work)
+{
+ struct virthba_info *virthbainfo;
+ struct virtpci_dev *virtpcidev;
+ int i;
+ struct scsipending *pendingdel = NULL;
+ struct scsi_cmnd *scsicmd = NULL;
+ struct uiscmdrsp *cmdrsp;
+ unsigned long flags;
+
+ virthbainfo = container_of(work, struct virthba_info,
+ serverdown_completion);
+
+ /* Stop Using the IOVM Response Queue (queue should be drained
+ * by the end)
+ */
+ uisthread_stop(&virthbainfo->chinfo.threadinfo);
+
+ /* Fail Commands that weren't completed */
+ spin_lock_irqsave(&virthbainfo->privlock, flags);
+ for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
+ pendingdel = &(virthbainfo->pending[i]);
+ switch (pendingdel->cmdtype) {
+ case CMD_SCSI_TYPE:
+ scsicmd = (struct scsi_cmnd *) pendingdel->sent;
+ scsicmd->result = (DID_RESET << 16);
+ if (scsicmd->scsi_done)
+ scsicmd->scsi_done(scsicmd);
+ break;
+ case CMD_SCSITASKMGMT_TYPE:
+ cmdrsp = (struct uiscmdrsp *) pendingdel->sent;
+ DBGINF("cmdrsp=0x%x, notify=0x%x\n", cmdrsp,
+ cmdrsp->scsitaskmgmt.notify);
+ *(int *) cmdrsp->scsitaskmgmt.notifyresult =
+ TASK_MGMT_FAILED;
+ wake_up_all((wait_queue_head_t *)
+ cmdrsp->scsitaskmgmt.notify);
+ break;
+ case CMD_VDISKMGMT_TYPE:
+ cmdrsp = (struct uiscmdrsp *) pendingdel->sent;
+ *(int *) cmdrsp->vdiskmgmt.notifyresult =
+ VDISK_MGMT_FAILED;
+ wake_up_all((wait_queue_head_t *)
+ cmdrsp->vdiskmgmt.notify);
+ break;
+ default:
+ if (pendingdel->sent != NULL)
+ LOGERR("Unknown command type: 0x%x. Only freeing list structure.\n",
+ pendingdel->cmdtype);
+ }
+ pendingdel->cmdtype = 0;
+ pendingdel->sent = NULL;
+ }
+ spin_unlock_irqrestore(&virthbainfo->privlock, flags);
+
+ virtpcidev = virthbainfo->virtpcidev;
+
+ DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
+ virtpcidev->deviceNo);
+ virthbainfo->serverdown = true;
+ virthbainfo->serverchangingstate = false;
+ /* Return the ServerDown response to Command */
+ visorchipset_device_pause_response(virtpcidev->busNo,
+ virtpcidev->deviceNo, 0);
+}
+
+/* As per VirtpciFunc returns 1 for success and 0 for failure */
+static int
+virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state)
+{
+ struct virthba_info *virthbainfo =
+ (struct virthba_info *) ((struct Scsi_Host *) virtpcidev->scsi.
+ scsihost)->hostdata;
+
+ DBGINF("virthba_serverdown");
+ DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
+ virtpcidev->deviceNo);
+
+ if (!virthbainfo->serverdown && !virthbainfo->serverchangingstate) {
+ virthbainfo->serverchangingstate = true;
+ queue_work(virthba_serverdown_workqueue,
+ &virthbainfo->serverdown_completion);
+ } else if (virthbainfo->serverchangingstate) {
+ LOGERR("Server already processing change state message\n");
+ return 0;
+ } else
+ LOGERR("Server already down, but another server down message received.");
+
+ return 1;
+}
+
+/*****************************************************/
+/* Module Init & Exit functions */
+/*****************************************************/
+
+static int __init
+virthba_parse_line(char *str)
+{
+ DBGINF("In virthba_parse_line %s\n", str);
+ return 1;
+}
+
+static void __init
+virthba_parse_options(char *line)
+{
+ char *next = line;
+
+ POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ if (line == NULL || !*line)
+ return;
+ while ((line = next) != NULL) {
+ next = strchr(line, ' ');
+ if (next != NULL)
+ *next++ = 0;
+ if (!virthba_parse_line(line))
+ DBGINF("Unknown option '%s'\n", line);
+ }
+
+ POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
+}
+
+static int __init
+virthba_mod_init(void)
+{
+ int error;
+ int i;
+
+ LOGINF("Entering virthba_mod_init...\n");
+
+ POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ virthba_parse_options(virthba_options);
+
+ error = virtpci_register_driver(&virthba_driver);
+ if (error < 0) {
+ LOGERR("register ****FAILED 0x%x\n", error);
+ POSTCODE_LINUX_3(VHBA_CREATE_FAILURE_PC, error,
+ POSTCODE_SEVERITY_ERR);
+ } else {
+ /* create the proc directories */
+ virthba_proc_dir = proc_mkdir(DIR_PROC_ENTRY, NULL);
+ info_proc_entry = proc_create(INFO_PROC_ENTRY_FN, 0,
+ virthba_proc_dir,
+ &proc_info_fops);
+ rqwaitus_proc_entry = proc_create(RQWU_PROC_ENTRY_FN, 0,
+ virthba_proc_dir,
+ &proc_rqwu_fops);
+ enable_ints_proc_entry = proc_create(ENABLE_INTS_ENTRY_FN, 0,
+ virthba_proc_dir,
+ &proc_enable_ints_fops);
+
+ /* Initialize DARWorkQ */
+ INIT_WORK(&DARWorkQ, doDiskAddRemove);
+ spin_lock_init(&DARWorkQLock);
+
+ /* clear out array */
+ for (i = 0; i < VIRTHBASOPENMAX; i++)
+ VirtHbasOpen[i].virthbainfo = NULL;
+ /* Initialize the serverdown workqueue */
+ virthba_serverdown_workqueue =
+ create_singlethread_workqueue("virthba_serverdown");
+ if (virthba_serverdown_workqueue == NULL) {
+ LOGERR("**** FAILED virthba_serverdown_workqueue creation\n");
+ POSTCODE_LINUX_2(VHBA_CREATE_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ error = -1;
+ }
+ }
+
+ POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ LOGINF("Leaving virthba_mod_init\n");
+ return error;
+}
+
+static ssize_t
+virthba_acquire_lun(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uisscsi_dest vdest;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ int i;
+
+ i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun);
+ if (i != 3)
+ return i;
+
+ return forward_vdiskmgmt_command(VDISK_MGMT_ACQUIRE, shost, &vdest);
+}
+
+static ssize_t
+virthba_release_lun(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uisscsi_dest vdest;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ int i;
+
+ i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun);
+ if (i != 3)
+ return i;
+
+ return forward_vdiskmgmt_command(VDISK_MGMT_RELEASE, shost, &vdest);
+}
+
+#define CLASS_DEVICE_ATTR(_name, _mode, _show, _store) \
+ struct device_attribute class_device_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+static CLASS_DEVICE_ATTR(acquire_lun, S_IWUSR, NULL, virthba_acquire_lun);
+static CLASS_DEVICE_ATTR(release_lun, S_IWUSR, NULL, virthba_release_lun);
+
+static DEVICE_ATTRIBUTE *virthba_shost_attrs[] = {
+ &class_device_attr_acquire_lun,
+ &class_device_attr_release_lun,
+ NULL
+};
+
+static void __exit
+virthba_mod_exit(void)
+{
+ LOGINF("entering virthba_mod_exit...\n");
+
+ virtpci_unregister_driver(&virthba_driver);
+ /* unregister is going to call virthba_remove */
+ /* destroy serverdown completion workqueue */
+ if (virthba_serverdown_workqueue) {
+ destroy_workqueue(virthba_serverdown_workqueue);
+ virthba_serverdown_workqueue = NULL;
+ }
+
+ if (info_proc_entry)
+ remove_proc_entry(INFO_PROC_ENTRY_FN, virthba_proc_dir);
+
+ if (rqwaitus_proc_entry)
+ remove_proc_entry(RQWU_PROC_ENTRY_FN, NULL);
+
+ if (enable_ints_proc_entry)
+ remove_proc_entry(ENABLE_INTS_ENTRY_FN, NULL);
+
+ if (virthba_proc_dir)
+ remove_proc_entry(DIR_PROC_ENTRY, NULL);
+
+ LOGINF("Leaving virthba_mod_exit\n");
+
+}
+
+/* specify function to be run at module insertion time */
+module_init(virthba_mod_init);
+
+/* specify function to be run when module is removed */
+module_exit(virthba_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Usha Srinivasan");
+MODULE_ALIAS("uisvirthba");
+ /* this is extracted during depmod and kept in modules.dep */
+/* module parameter */
+module_param(virthba_options, charp, S_IRUGO);
diff --git a/drivers/staging/unisys/virthba/virthba.h b/drivers/staging/unisys/virthba/virthba.h
new file mode 100644
index 000000000000..88b797439a16
--- /dev/null
+++ b/drivers/staging/unisys/virthba/virthba.h
@@ -0,0 +1,31 @@
+/* virthba.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Unisys Virtual HBA driver header
+ */
+
+
+
+#ifndef __VIRTHBA_H__
+#define __VIRTHBA_H__
+
+
+#define VIRTHBA_VERSION "01.00"
+
+
+#endif /* __VIRTHBA_H__ */
diff --git a/drivers/staging/unisys/virtpci/Kconfig b/drivers/staging/unisys/virtpci/Kconfig
new file mode 100644
index 000000000000..e59efcbc4d3b
--- /dev/null
+++ b/drivers/staging/unisys/virtpci/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys virtpci configuration
+#
+
+config UNISYS_VIRTPCI
+ tristate "Unisys virtpci driver"
+ depends on UNISYSSPAR && UNISYS_UISLIB
+ ---help---
+ If you say Y here, you will enable the Unisys virtpci driver.
+
diff --git a/drivers/staging/unisys/virtpci/Makefile b/drivers/staging/unisys/virtpci/Makefile
new file mode 100644
index 000000000000..f9399aabddd1
--- /dev/null
+++ b/drivers/staging/unisys/virtpci/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for Unisys virtpci
+#
+
+obj-$(CONFIG_UNISYS_VIRTPCI) += virtpci.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/uislib
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
+
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
+
diff --git a/drivers/staging/unisys/virtpci/virtpci.c b/drivers/staging/unisys/virtpci/virtpci.c
new file mode 100644
index 000000000000..8e34650b00b5
--- /dev/null
+++ b/drivers/staging/unisys/virtpci/virtpci.c
@@ -0,0 +1,1771 @@
+/* virtpci.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#define EXPORT_SYMTAB
+
+#include <linux/kernel.h>
+#ifdef CONFIG_MODVERSIONS
+#include <config/modversions.h>
+#endif
+#include "uniklog.h"
+#include "diagnostics/appos_subsystems.h"
+#include "uisutils.h"
+#include "commontypes.h"
+#include "vbuschannel.h"
+#include "vbushelper.h"
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/proc_fs.h>
+#include <linux/if_ether.h>
+#include <linux/version.h>
+#include "version.h"
+#include "guestlinuxdebug.h"
+
+struct driver_private {
+ struct kobject kobj;
+ struct klist klist_devices;
+ struct klist_node knode_bus;
+ struct module_kobject *mkobj;
+ struct device_driver *driver;
+};
+#define to_driver(obj) container_of(obj, struct driver_private, kobj)
+
+/* bus_id went away in 2.6.30 - the size was 20 bytes, so we'll define
+ * it ourselves, and a macro to make getting the field a bit simpler.
+ */
+#ifndef BUS_ID_SIZE
+#define BUS_ID_SIZE 20
+#endif
+
+#define BUS_ID(x) dev_name(x)
+
+#include "virtpci.h"
+
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC VIRT_PCI_PC_virtpci_c
+#define __MYFILE__ "virtpci.c"
+
+#define VIRTPCI_VERSION "01.00"
+
+/*****************************************************/
+/* Forward declarations */
+/*****************************************************/
+
+static int delete_vbus_device(struct device *vbus, void *data);
+static int match_busid(struct device *dev, void *data);
+static void virtpci_bus_release(struct device *dev);
+static void virtpci_device_release(struct device *dev);
+static int virtpci_device_add(struct device *parentbus, int devtype,
+ struct add_virt_guestpart *addparams,
+ struct scsi_adap_info *scsi,
+ struct net_adap_info *net);
+static int virtpci_device_del(struct device *parentbus, int devtype,
+ struct vhba_wwnn *wwnn, unsigned char macaddr[]);
+static int virtpci_device_serverdown(struct device *parentbus, int devtype,
+ struct vhba_wwnn *wwnn,
+ unsigned char macaddr[]);
+static int virtpci_device_serverup(struct device *parentbus, int devtype,
+ struct vhba_wwnn *wwnn,
+ unsigned char macaddr[]);
+static ssize_t virtpci_driver_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+static ssize_t virtpci_driver_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count);
+static int virtpci_bus_match(struct device *dev, struct device_driver *drv);
+static int virtpci_uevent(struct device *dev, struct kobj_uevent_env *env);
+static int virtpci_device_suspend(struct device *dev, pm_message_t state);
+static int virtpci_device_resume(struct device *dev);
+static int virtpci_device_probe(struct device *dev);
+static int virtpci_device_remove(struct device *dev);
+static ssize_t virt_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+static ssize_t info_proc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+
+static const struct file_operations proc_virt_fops = {
+ .write = virt_proc_write,
+};
+
+static const struct file_operations proc_info_fops = {
+ .read = info_proc_read,
+};
+
+/*****************************************************/
+/* Globals */
+/*****************************************************/
+
+/* methods in bus_type struct allow the bus code to serve as an
+ * intermediary between the device core and individual device core and
+ * individual drivers
+ */
+static struct bus_type virtpci_bus_type = {
+ .name = "uisvirtpci",
+ .match = virtpci_bus_match,
+ .uevent = virtpci_uevent,
+ .suspend = virtpci_device_suspend,
+ .resume = virtpci_device_resume,
+};
+
+static struct device virtpci_rootbus_device = {
+ .init_name = "vbusroot", /* root bus */
+ .release = virtpci_bus_release
+};
+
+/* filled in with info about parent chipset driver when we register with it */
+static ULTRA_VBUS_DEVICEINFO Chipset_DriverInfo;
+
+static const struct sysfs_ops virtpci_driver_sysfs_ops = {
+ .show = virtpci_driver_attr_show,
+ .store = virtpci_driver_attr_store,
+};
+
+static struct kobj_type virtpci_driver_kobj_type = {
+ .sysfs_ops = &virtpci_driver_sysfs_ops,
+};
+
+static struct virtpci_dev *VpcidevListHead;
+static DEFINE_RWLOCK(VpcidevListLock);
+
+/* filled in with info about this driver, wrt it servicing client busses */
+static ULTRA_VBUS_DEVICEINFO Bus_DriverInfo;
+
+/* virtpci_proc_dir_entry is used to create the proc entry directory
+ * for virtpci
+ */
+static struct proc_dir_entry *virtpci_proc_dir;
+/* virt_proc_entry is used to tell virtpci to add/delete vhbas/vnics/vbuses */
+static struct proc_dir_entry *virt_proc_entry;
+/* info_proc_entry is used to tell virtpci to display current info
+ * kept in the driver
+ */
+static struct proc_dir_entry *info_proc_entry;
+#define VIRT_PROC_ENTRY_FN "virt"
+#define INFO_PROC_ENTRY_FN "info"
+#define DIR_PROC_ENTRY "virtpci"
+
+struct virtpci_busdev {
+ struct device virtpci_bus_device;
+};
+
+/*****************************************************/
+/* Local functions */
+/*****************************************************/
+
+static inline
+int WAIT_FOR_IO_CHANNEL(ULTRA_IO_CHANNEL_PROTOCOL __iomem *chanptr)
+{
+ int count = 120;
+ while (count > 0) {
+
+ if (ULTRA_CHANNEL_SERVER_READY(&chanptr->ChannelHeader))
+ return 1;
+ UIS_THREAD_WAIT_SEC(1);
+ count--;
+ }
+ return 0;
+}
+
+/* Write the contents of <info> to the ULTRA_VBUS_CHANNEL_PROTOCOL.ChpInfo. */
+static int write_vbus_chpInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
+ ULTRA_VBUS_DEVICEINFO *info)
+{
+ int off;
+ if (!chan) {
+ LOGERR("vbus channel not present");
+ return -1;
+ }
+ off = sizeof(ULTRA_CHANNEL_PROTOCOL) + chan->HdrInfo.chpInfoByteOffset;
+ if (chan->HdrInfo.chpInfoByteOffset == 0) {
+ LOGERR("vbus channel not used, because chpInfoByteOffset == 0");
+ return -1;
+ }
+ memcpy(((U8 *) (chan)) + off, info, sizeof(*info));
+ return 0;
+}
+
+/* Write the contents of <info> to the ULTRA_VBUS_CHANNEL_PROTOCOL.BusInfo. */
+static int write_vbus_busInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
+ ULTRA_VBUS_DEVICEINFO *info)
+{
+ int off;
+ if (!chan) {
+ LOGERR("vbus channel not present");
+ return -1;
+ }
+ off = sizeof(ULTRA_CHANNEL_PROTOCOL) + chan->HdrInfo.busInfoByteOffset;
+ if (chan->HdrInfo.busInfoByteOffset == 0) {
+ LOGERR("vbus channel not used, because busInfoByteOffset == 0");
+ return -1;
+ }
+ memcpy(((U8 *) (chan)) + off, info, sizeof(*info));
+ return 0;
+}
+
+/* Write the contents of <info> to the
+ * ULTRA_VBUS_CHANNEL_PROTOCOL.DevInfo[<devix>].
+ */
+static int
+write_vbus_devInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
+ ULTRA_VBUS_DEVICEINFO *info, int devix)
+{
+ int off;
+ if (!chan) {
+ LOGERR("vbus channel not present");
+ return -1;
+ }
+ off =
+ (sizeof(ULTRA_CHANNEL_PROTOCOL) +
+ chan->HdrInfo.devInfoByteOffset) +
+ (chan->HdrInfo.deviceInfoStructBytes * devix);
+ if (chan->HdrInfo.devInfoByteOffset == 0) {
+ LOGERR("vbus channel not used, because devInfoByteOffset == 0");
+ return -1;
+ }
+ memcpy(((U8 *) (chan)) + off, info, sizeof(*info));
+ return 0;
+}
+
+/* adds a vbus
+ * returns 0 failure, 1 success,
+ */
+static int add_vbus(struct add_vbus_guestpart *addparams)
+{
+ int ret;
+ struct device *vbus;
+ vbus = kzalloc(sizeof(struct device), GFP_ATOMIC);
+
+ POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ if (!vbus)
+ return 0;
+
+ dev_set_name(vbus, "vbus%d", addparams->busNo);
+ vbus->release = virtpci_bus_release;
+ vbus->parent = &virtpci_rootbus_device; /* root bus is parent */
+ vbus->bus = &virtpci_bus_type; /* bus type */
+ vbus->platform_data = (__force void *)addparams->chanptr;
+
+ /* register a virt bus device -
+ * this bus shows up under /sys/devices with .name value
+ * "virtpci%d" any devices added to this bus then show up under
+ * /sys/devices/virtpci0
+ */
+ ret = device_register(vbus);
+ if (ret) {
+ LOGERR("device_register FAILED:%d\n", ret);
+ POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ write_vbus_chpInfo(vbus->platform_data /* chanptr */ ,
+ &Chipset_DriverInfo);
+ write_vbus_busInfo(vbus->platform_data /* chanptr */ , &Bus_DriverInfo);
+ LOGINF("Added vbus %d; device %s created successfully\n",
+ addparams->busNo, BUS_ID(vbus));
+ POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ return 1;
+}
+
+/* for CHANSOCK wwwnn/max are AUTO-GENERATED; for normal channels,
+ * wwnn/max are in the channel header.
+ */
+#define GET_SCSIADAPINFO_FROM_CHANPTR(chanptr) { \
+ memcpy_fromio(&scsi.wwnn, \
+ &((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ chanptr)->vhba.wwnn, \
+ sizeof(struct vhba_wwnn)); \
+ memcpy_fromio(&scsi.max, \
+ &((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ chanptr)->vhba.max, \
+ sizeof(struct vhba_config_max)); \
+ }
+
+/* find bus device with the busid that matches - match_busid matches bus_id */
+#define GET_BUS_DEV(busno) { \
+ sprintf(busid, "vbus%d", busno); \
+ vbus = bus_find_device(&virtpci_bus_type, NULL, \
+ (void *)busid, match_busid); \
+ if (!vbus) { \
+ LOGERR("**** FAILED to find vbus %s\n", busid); \
+ return 0; \
+ } \
+}
+
+/* adds a vhba
+ * returns 0 failure, 1 success,
+ */
+static int add_vhba(struct add_virt_guestpart *addparams)
+{
+ int i;
+ struct scsi_adap_info scsi;
+ struct device *vbus;
+ unsigned char busid[BUS_ID_SIZE];
+
+ POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ if (!WAIT_FOR_IO_CHANNEL
+ ((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) addparams->chanptr)) {
+ LOGERR("Timed out. Channel not ready\n");
+ POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ GET_SCSIADAPINFO_FROM_CHANPTR(addparams->chanptr);
+
+ GET_BUS_DEV(addparams->busNo);
+
+ LOGINF("Adding vhba wwnn:%x:%x config:%d-%d-%d-%d chanptr:%p\n",
+ scsi.wwnn.wwnn1, scsi.wwnn.wwnn2,
+ scsi.max.max_channel, scsi.max.max_id, scsi.max.max_lun,
+ scsi.max.cmd_per_lun, addparams->chanptr);
+ i = virtpci_device_add(vbus, VIRTHBA_TYPE, addparams, &scsi, NULL);
+ if (i) {
+ LOGINF("Added vhba wwnn:%x:%x chanptr:%p\n", scsi.wwnn.wwnn1,
+ scsi.wwnn.wwnn2, addparams->chanptr);
+ POSTCODE_LINUX_3(VPCI_CREATE_EXIT_PC, i,
+ POSTCODE_SEVERITY_INFO);
+ }
+ return i;
+
+}
+
+/* for CHANSOCK macaddr is AUTO-GENERATED; for normal channels,
+ * macaddr is in the channel header.
+ */
+#define GET_NETADAPINFO_FROM_CHANPTR(chanptr) { \
+ memcpy_fromio(net.mac_addr, \
+ ((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ chanptr)->vnic.macaddr, \
+ MAX_MACADDR_LEN); \
+ net.num_rcv_bufs = \
+ readl(&((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ chanptr)->vnic.num_rcv_bufs); \
+ net.mtu = readl(&((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ chanptr)->vnic.mtu); \
+ memcpy_fromio(&net.zoneGuid, \
+ &((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ chanptr)->vnic.zoneGuid, \
+ sizeof(GUID)); \
+}
+
+/* adds a vnic
+ * returns 0 failure, 1 success,
+ */
+static int
+add_vnic(struct add_virt_guestpart *addparams)
+{
+ int i;
+ struct net_adap_info net;
+ struct device *vbus;
+ unsigned char busid[BUS_ID_SIZE];
+
+ POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ if (!WAIT_FOR_IO_CHANNEL
+ ((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) addparams->chanptr)) {
+ LOGERR("Timed out, channel not ready\n");
+ POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ GET_NETADAPINFO_FROM_CHANPTR(addparams->chanptr);
+
+ GET_BUS_DEV(addparams->busNo);
+
+ LOGINF("Adding vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x rcvbufs:%d mtu:%d chanptr:%p{%-8.8lx-%-4.4x-%-4.4x-%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x}\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2], net.mac_addr[3],
+ net.mac_addr[4], net.mac_addr[5], net.num_rcv_bufs, net.mtu,
+ addparams->chanptr, (ulong) net.zoneGuid.data1, net.zoneGuid.data2,
+ net.zoneGuid.data3, net.zoneGuid.data4[0], net.zoneGuid.data4[1],
+ net.zoneGuid.data4[2], net.zoneGuid.data4[3],
+ net.zoneGuid.data4[4], net.zoneGuid.data4[5],
+ net.zoneGuid.data4[6], net.zoneGuid.data4[7]);
+ i = virtpci_device_add(vbus, VIRTNIC_TYPE, addparams, NULL, &net);
+ if (i) {
+ LOGINF("Added vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ POSTCODE_LINUX_3(VPCI_CREATE_EXIT_PC, i,
+ POSTCODE_SEVERITY_INFO);
+ return 1;
+ }
+ return 0;
+}
+
+/* delete vbus
+ * returns 0 failure, 1 success,
+ */
+static int
+delete_vbus(struct del_vbus_guestpart *delparams)
+{
+ struct device *vbus;
+ unsigned char busid[BUS_ID_SIZE];
+
+ GET_BUS_DEV(delparams->busNo);
+ /* ensure that bus has no devices? -- TBD */
+ LOGINF("Deleting %s\n", BUS_ID(vbus));
+ if (delete_vbus_device(vbus, NULL))
+ return 0; /* failure */
+ LOGINF("Deleted vbus %d\n", delparams->busNo);
+ return 1;
+}
+
+static int
+delete_vbus_device(struct device *vbus, void *data)
+{
+ int checkforroot = (data != NULL);
+ struct device *pDev = &virtpci_rootbus_device;
+
+ if ((checkforroot) && match_busid(vbus, (void *) BUS_ID(pDev))) {
+ /* skip it - don't delete root bus */
+ LOGINF("skipping root bus\n");
+ return 0; /* pretend no error */
+ }
+ LOGINF("Calling unregister for %s\n", BUS_ID(vbus));
+ device_unregister(vbus);
+ kfree(vbus);
+ LOGINF("VBus unregister and freed\n");
+ return 0; /* no error */
+}
+
+/* pause vhba
+* returns 0 failure, 1 success,
+*/
+static int pause_vhba(struct pause_virt_guestpart *pauseparams)
+{
+ int i;
+ struct scsi_adap_info scsi;
+
+ GET_SCSIADAPINFO_FROM_CHANPTR(pauseparams->chanptr);
+
+ LOGINF("Pausing vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1, scsi.wwnn.wwnn2);
+ i = virtpci_device_serverdown(NULL /*no parent bus */ , VIRTHBA_TYPE,
+ &scsi.wwnn, NULL);
+ if (i)
+ LOGINF("Paused vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1,
+ scsi.wwnn.wwnn2);
+ return i;
+}
+
+/* pause vnic
+ * returns 0 failure, 1 success,
+ */
+static int pause_vnic(struct pause_virt_guestpart *pauseparams)
+{
+ int i;
+ struct net_adap_info net;
+
+ GET_NETADAPINFO_FROM_CHANPTR(pauseparams->chanptr);
+
+ LOGINF("Pausing vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ i = virtpci_device_serverdown(NULL /*no parent bus */ , VIRTNIC_TYPE,
+ NULL, net.mac_addr);
+ if (i) {
+ LOGINF(" Paused vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ }
+ return i;
+}
+
+/* resume vhba
+ * returns 0 failure, 1 success,
+ */
+static int resume_vhba(struct resume_virt_guestpart *resumeparams)
+{
+ int i;
+ struct scsi_adap_info scsi;
+
+ GET_SCSIADAPINFO_FROM_CHANPTR(resumeparams->chanptr);
+
+ LOGINF("Resuming vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1, scsi.wwnn.wwnn2);
+ i = virtpci_device_serverup(NULL /*no parent bus */ , VIRTHBA_TYPE,
+ &scsi.wwnn, NULL);
+ if (i)
+ LOGINF("Resumed vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1,
+ scsi.wwnn.wwnn2);
+ return i;
+}
+
+/* resume vnic
+* returns 0 failure, 1 success,
+*/
+static int
+resume_vnic(struct resume_virt_guestpart *resumeparams)
+{
+ int i;
+ struct net_adap_info net;
+
+ GET_NETADAPINFO_FROM_CHANPTR(resumeparams->chanptr);
+
+ LOGINF("Resuming vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ i = virtpci_device_serverup(NULL /*no parent bus */ , VIRTNIC_TYPE,
+ NULL, net.mac_addr);
+ if (i) {
+ LOGINF(" Resumed vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ }
+ return i;
+}
+
+/* delete vhba
+* returns 0 failure, 1 success,
+*/
+static int delete_vhba(struct del_virt_guestpart *delparams)
+{
+ int i;
+ struct scsi_adap_info scsi;
+
+ GET_SCSIADAPINFO_FROM_CHANPTR(delparams->chanptr);
+
+ LOGINF("Deleting vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1, scsi.wwnn.wwnn2);
+ i = virtpci_device_del(NULL /*no parent bus */ , VIRTHBA_TYPE,
+ &scsi.wwnn, NULL);
+ if (i) {
+ LOGINF("Deleted vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1,
+ scsi.wwnn.wwnn2);
+ return 1;
+ }
+ return 0;
+}
+
+/* deletes a vnic
+ * returns 0 failure, 1 success,
+ */
+static int delete_vnic(struct del_virt_guestpart *delparams)
+{
+ int i;
+ struct net_adap_info net;
+
+ GET_NETADAPINFO_FROM_CHANPTR(delparams->chanptr);
+
+ LOGINF("Deleting vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ i = virtpci_device_del(NULL /*no parent bus */ , VIRTNIC_TYPE, NULL,
+ net.mac_addr);
+ if (i) {
+ LOGINF("Deleted vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ }
+ return i;
+}
+
+#define DELETE_ONE_VPCIDEV(vpcidev) { \
+ LOGINF("calling device_unregister:%p\n", &vpcidev->generic_dev); \
+ device_unregister(&vpcidev->generic_dev); \
+ LOGINF("Deleted %p\n", vpcidev); \
+ kfree(vpcidev); \
+}
+
+/* deletes all vhbas and vnics
+ * returns 0 failure, 1 success,
+ */
+static void delete_all(void)
+{
+ int count = 0;
+ unsigned long flags;
+ struct virtpci_dev *tmpvpcidev, *nextvpcidev;
+
+ /* delete the entire vhba/vnic list in one shot */
+ write_lock_irqsave(&VpcidevListLock, flags);
+ tmpvpcidev = VpcidevListHead;
+ VpcidevListHead = NULL;
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+
+ /* delete one vhba/vnic at a time */
+ while (tmpvpcidev) {
+ nextvpcidev = tmpvpcidev->next;
+ /* delete the vhba/vnic at tmpvpcidev */
+ DELETE_ONE_VPCIDEV(tmpvpcidev);
+ tmpvpcidev = nextvpcidev;
+ count++;
+ }
+ LOGINF("Deleted %d vhbas/vnics.\n", count);
+
+ /* now delete each vbus */
+ if (bus_for_each_dev
+ (&virtpci_bus_type, NULL, (void *) 1, delete_vbus_device))
+ LOGERR("delete of all vbus failed\n");
+}
+
+/* deletes all vnics or vhbas
+ * returns 0 failure, 1 success,
+ */
+static int delete_all_virt(VIRTPCI_DEV_TYPE devtype, struct del_vbus_guestpart *delparams)
+{
+ int i;
+ unsigned char busid[BUS_ID_SIZE];
+ struct device *vbus;
+
+ GET_BUS_DEV(delparams->busNo);
+
+ if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
+ LOGERR("**** FAILED to delete all devices; devtype:%d not vhba:%d or vnic:%d\n",
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ return 0;
+ }
+
+ LOGINF("Deleting all %s in vbus %s\n",
+ devtype == VIRTHBA_TYPE ? "vhbas" : "vnics", busid);
+ /* delete all vhbas/vnics */
+ i = virtpci_device_del(vbus, devtype, NULL, NULL);
+ if (i > 0)
+ LOGINF("Deleted %d %s\n", i,
+ devtype == VIRTHBA_TYPE ? "vhbas" : "vnics");
+ return 1;
+}
+
+static int virtpci_ctrlchan_func(struct guest_msgs *msg)
+{
+ switch (msg->msgtype) {
+ case GUEST_ADD_VBUS:
+ return add_vbus(&msg->add_vbus);
+ case GUEST_ADD_VHBA:
+ return add_vhba(&msg->add_vhba);
+ case GUEST_ADD_VNIC:
+ return add_vnic(&msg->add_vnic);
+ case GUEST_DEL_VBUS:
+ return delete_vbus(&msg->del_vbus);
+ case GUEST_DEL_VHBA:
+ return delete_vhba(&msg->del_vhba);
+ case GUEST_DEL_VNIC:
+ return delete_vnic(&msg->del_vhba);
+ case GUEST_DEL_ALL_VHBAS:
+ return delete_all_virt(VIRTHBA_TYPE, &msg->del_all_vhbas);
+ case GUEST_DEL_ALL_VNICS:
+ return delete_all_virt(VIRTNIC_TYPE, &msg->del_all_vnics);
+ case GUEST_DEL_ALL_VBUSES:
+ delete_all();
+ return 1;
+ case GUEST_PAUSE_VHBA:
+ return pause_vhba(&msg->pause_vhba);
+ case GUEST_PAUSE_VNIC:
+ return pause_vnic(&msg->pause_vnic);
+ case GUEST_RESUME_VHBA:
+ return resume_vhba(&msg->resume_vhba);
+ case GUEST_RESUME_VNIC:
+ return resume_vnic(&msg->resume_vnic);
+ default:
+ LOGERR("invalid message type %d.\n", msg->msgtype);
+ return 0;
+ }
+}
+
+/* same as driver_helper in bus.c linux */
+static int match_busid(struct device *dev, void *data)
+{
+ const char *name = data;
+
+ if (strcmp(name, BUS_ID(dev)) == 0)
+ return 1;
+ return 0;
+}
+
+/*****************************************************/
+/* Bus functions */
+/*****************************************************/
+
+static const struct pci_device_id *
+virtpci_match_device(const struct pci_device_id *ids,
+ const struct virtpci_dev *dev)
+{
+ while (ids->vendor || ids->subvendor || ids->class_mask) {
+ DBGINF("ids->vendor:%x dev->vendor:%x ids->device:%x dev->device:%x\n",
+ ids->vendor, dev->vendor, ids->device, dev->device);
+
+ if ((ids->vendor == dev->vendor)
+ && (ids->device == dev->device))
+ return ids;
+
+ ids++;
+ }
+ return NULL;
+}
+
+/* NOTE: !!!!!! This function is called when a new device is added
+* for this bus. Or, it is called for existing devices when a new
+* driver is added for this bus. It returns nonzero if a given device
+* can be handled by the given driver.
+*/
+static int virtpci_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct virtpci_dev *virtpcidev = device_to_virtpci_dev(dev);
+ struct virtpci_driver *virtpcidrv = driver_to_virtpci_driver(drv);
+ int match = 0;
+
+ DBGINF("In virtpci_bus_match dev->bus_id:%s drv->name:%s\n",
+ dev->bus_id, drv->name);
+
+ /* check ids list for a match */
+ if (virtpci_match_device(virtpcidrv->id_table, virtpcidev))
+ match = 1;
+
+ DBGINF("returning match:%d\n", match);
+ return match; /* 0 - no match; 1 - yes it matches */
+}
+
+static int virtpci_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ DBGINF("In virtpci_hotplug\n");
+ /* add variables to the environment prior to the generation of
+ * hotplug events to user space
+ */
+ if (add_uevent_var(env, "VIRTPCI_VERSION=%s", VIRTPCI_VERSION))
+ return -ENOMEM;
+ return 0;
+}
+
+static int virtpci_device_suspend(struct device *dev, pm_message_t state)
+{
+ DBGINF("In virtpci_device_suspend -NYI ****\n");
+ return 0;
+}
+
+static int virtpci_device_resume(struct device *dev)
+{
+ DBGINF("In virtpci_device_resume -NYI ****\n");
+ return 0;
+}
+
+/* For a child device just created on a client bus, fill in
+ * information about the driver that is controlling this device into
+ * the the appropriate slot within the vbus channel of the bus
+ * instance.
+ */
+static void fix_vbus_devInfo(struct device *dev, int devNo, int devType,
+ struct virtpci_driver *virtpcidrv)
+{
+ struct device *vbus;
+ void *pChan;
+ ULTRA_VBUS_DEVICEINFO devInfo;
+ const char *stype;
+
+ if (!dev) {
+ LOGERR("%s dev is NULL", __func__);
+ return;
+ }
+ if (!virtpcidrv) {
+ LOGERR("%s driver is NULL", __func__);
+ return;
+ }
+ vbus = dev->parent;
+ if (!vbus) {
+ LOGERR("%s dev has no parent bus", __func__);
+ return;
+ }
+ pChan = vbus->platform_data;
+ if (!pChan) {
+ LOGERR("%s dev bus has no channel", __func__);
+ return;
+ }
+ switch (devType) {
+ case PCI_DEVICE_ID_VIRTHBA:
+ stype = "vHBA";
+ break;
+ case PCI_DEVICE_ID_VIRTNIC:
+ stype = "vNIC";
+ break;
+ default:
+ stype = "unknown";
+ break;
+ }
+ BusDeviceInfo_Init(&devInfo, stype,
+ virtpcidrv->name,
+ virtpcidrv->version,
+ virtpcidrv->vertag,
+ virtpcidrv->build_date, virtpcidrv->build_time);
+ write_vbus_devInfo(pChan, &devInfo, devNo);
+
+ /* Re-write bus+chipset info, because it is possible that this
+ * was previously written by our good counterpart, visorbus.
+ */
+ write_vbus_chpInfo(pChan, &Chipset_DriverInfo);
+ write_vbus_busInfo(pChan, &Bus_DriverInfo);
+}
+
+/* This function is called to query the existence of a specific device
+* and whether this driver can work with it. It should return -ENODEV
+* in case of failure.
+*/
+static int virtpci_device_probe(struct device *dev)
+{
+ struct virtpci_dev *virtpcidev = device_to_virtpci_dev(dev);
+ struct virtpci_driver *virtpcidrv =
+ driver_to_virtpci_driver(dev->driver);
+ const struct pci_device_id *id;
+ int error = 0;
+
+ LOGINF("In virtpci_device_probe dev:%p virtpcidev:%p virtpcidrv:%p\n",
+ dev, virtpcidev, virtpcidrv); /* VERBOSE/DEBUG ? */
+ POSTCODE_LINUX_2(VPCI_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ /* static match and static probe vs dynamic match & dynamic
+ * probe - do we care?.
+ */
+ if (!virtpcidrv->id_table)
+ return -ENODEV;
+
+ id = virtpci_match_device(virtpcidrv->id_table, virtpcidev);
+ if (!id)
+ return -ENODEV;
+
+ /* increment reference count */
+ get_device(dev);
+
+ /* if virtpcidev is not already claimed & probe function is
+ * valid, probe it
+ */
+ if (!virtpcidev->mydriver && virtpcidrv->probe) {
+ /* call the probe function - virthba or virtnic probe
+ * is what it should be
+ */
+ error = virtpcidrv->probe(virtpcidev, id);
+ if (!error) {
+ fix_vbus_devInfo(dev, virtpcidev->deviceNo,
+ virtpcidev->device, virtpcidrv);
+ virtpcidev->mydriver = virtpcidrv;
+ POSTCODE_LINUX_2(VPCI_PROBE_EXIT_PC,
+ POSTCODE_SEVERITY_INFO);
+ } else
+ put_device(dev);
+ }
+ POSTCODE_LINUX_2(VPCI_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ return error; /* -ENODEV for probe failure */
+}
+
+static int virtpci_device_remove(struct device *dev_)
+{
+ /* dev_ passed in is the HBA device which we called
+ * generic_dev in our virtpcidev struct
+ */
+ struct virtpci_dev *virtpcidev = device_to_virtpci_dev(dev_);
+ struct virtpci_driver *virtpcidrv = virtpcidev->mydriver;
+ LOGINF("In virtpci_device_remove bus_id:%s dev_:%p virtpcidev:%p dev->driver:%p drivername:%s\n",
+ BUS_ID(dev_), dev_, virtpcidev, dev_->driver,
+ dev_->driver->name); /* VERBOSE/DEBUG */
+ if (virtpcidrv) {
+ /* TEMP: assuming we have only one such driver for now */
+ if (virtpcidrv->remove)
+ virtpcidrv->remove(virtpcidev);
+ virtpcidev->mydriver = NULL;
+ }
+
+ DBGINF("calling putdevice\n");
+ put_device(dev_);
+
+ DBGINF("Leaving\n");
+ return 0;
+}
+
+/*****************************************************/
+/* Bus functions */
+/*****************************************************/
+
+static void virtpci_bus_release(struct device *dev)
+{
+ /* this function is called when the last reference to the
+ * device is removed
+ */
+ DBGINF("In virtpci_bus_release\n");
+ /* what else is supposed to happen here? */
+}
+
+/*****************************************************/
+/* Adapter functions */
+/*****************************************************/
+
+static int virtpci_device_add(struct device *parentbus, int devtype,
+ struct add_virt_guestpart *addparams,
+ struct scsi_adap_info *scsi, /* NULL for VNIC add */
+ struct net_adap_info *net /* NULL for VHBA add */)
+{
+ struct virtpci_dev *virtpcidev = NULL;
+ struct virtpci_dev *tmpvpcidev = NULL, *prev;
+ unsigned long flags;
+ int ret;
+ ULTRA_IO_CHANNEL_PROTOCOL __iomem *pIoChan = NULL;
+ struct device *pDev;
+
+ LOGINF("virtpci_device_add parentbus:%p chanptr:%p\n", parentbus,
+ addparams->chanptr);
+
+ POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
+ LOGERR("**** FAILED to add device; devtype:%d not vhba:%d or vnic:%d\n",
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ POSTCODE_LINUX_3(VPCI_CREATE_FAILURE_PC, devtype,
+ POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ /* add a Virtual Device */
+ virtpcidev = kzalloc(sizeof(struct virtpci_dev), GFP_ATOMIC);
+ if (virtpcidev == NULL) {
+ LOGERR("can't add device - malloc FALLED\n");
+ POSTCODE_LINUX_2(MALLOC_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ /* initialize stuff unique to virtpci_dev struct */
+ virtpcidev->devtype = devtype;
+ if (devtype == VIRTHBA_TYPE) {
+ virtpcidev->device = PCI_DEVICE_ID_VIRTHBA;
+ virtpcidev->scsi = *scsi;
+ } else {
+ virtpcidev->device = PCI_DEVICE_ID_VIRTNIC;
+ virtpcidev->net = *net;
+ }
+ virtpcidev->vendor = PCI_VENDOR_ID_UNISYS;
+ virtpcidev->busNo = addparams->busNo;
+ virtpcidev->deviceNo = addparams->deviceNo;
+
+ virtpcidev->queueinfo.chan = addparams->chanptr;
+ virtpcidev->queueinfo.send_int_if_needed = NULL;
+
+ /* Set up safe queue... */
+ pIoChan = (ULTRA_IO_CHANNEL_PROTOCOL __iomem *)
+ virtpcidev->queueinfo.chan;
+
+ virtpcidev->intr = addparams->intr;
+
+ /* initialize stuff in the device portion of the struct */
+ virtpcidev->generic_dev.bus = &virtpci_bus_type;
+ virtpcidev->generic_dev.parent = parentbus;
+ virtpcidev->generic_dev.release = virtpci_device_release;
+
+ dev_set_name(&virtpcidev->generic_dev, "%x:%x",
+ addparams->busNo, addparams->deviceNo);
+
+ /* add the vhba/vnic to virtpci device list - but check for
+ * duplicate wwnn/macaddr first
+ */
+ write_lock_irqsave(&VpcidevListLock, flags);
+ for (tmpvpcidev = VpcidevListHead; tmpvpcidev;
+ tmpvpcidev = tmpvpcidev->next) {
+ if (devtype == VIRTHBA_TYPE) {
+ if ((tmpvpcidev->scsi.wwnn.wwnn1 == scsi->wwnn.wwnn1) &&
+ (tmpvpcidev->scsi.wwnn.wwnn2 == scsi->wwnn.wwnn2)) {
+ /* duplicate - already have vpcidev
+ with this wwnn */
+ break;
+ }
+ } else
+ if (memcmp
+ (tmpvpcidev->net.mac_addr, net->mac_addr,
+ MAX_MACADDR_LEN) == 0) {
+ /* duplicate - already have vnic with this wwnn */
+ break;
+ }
+ }
+ if (tmpvpcidev) {
+ /* found a vhba/vnic already in the list with same
+ * wwnn or macaddr - reject add
+ */
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+ kfree(virtpcidev);
+ LOGERR("**** FAILED vhba/vnic already exists in the list\n");
+ POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ /* add it at the head */
+ if (!VpcidevListHead)
+ VpcidevListHead = virtpcidev;
+ else {
+ /* insert virtpcidev at the head of our linked list of
+ * vpcidevs
+ */
+ virtpcidev->next = VpcidevListHead;
+ VpcidevListHead = virtpcidev;
+ }
+
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+
+ /* Must transition channel to ATTACHED state BEFORE
+ * registering the device, because polling of the channel
+ * queues can begin at any time after device_register().
+ */
+ pDev = &virtpcidev->generic_dev;
+ ULTRA_CHANNEL_CLIENT_TRANSITION(addparams->chanptr,
+ BUS_ID(pDev),
+ CHANNELCLI_ATTACHED, NULL);
+
+ /* don't register until device has been added to
+ * list. Otherwise, a device_unregister from this function can
+ * cause a "scheduling while atomic".
+ */
+ DBGINF("registering device:%p with bus_id:%s\n",
+ &virtpcidev->generic_dev, virtpcidev->generic_dev.bus_id);
+ ret = device_register(&virtpcidev->generic_dev);
+ /* NOTE: THIS IS CALLING HOTPLUG virtpci_hotplug!!!
+ * This call to device_register results in virtpci_bus_match
+ * being called !!!!! And, if match returns success, then
+ * virtpcidev->generic_dev.driver is setup to core_driver,
+ * i.e., virtpci and the probe function
+ * virtpcidev->generic_dev.driver->probe is called which
+ * results in virtpci_device_probe being called. And if
+ * virtpci_device_probe is successful
+ */
+ if (ret) {
+ LOGERR("device_register returned %d\n", ret);
+ pDev = &virtpcidev->generic_dev;
+ ULTRA_CHANNEL_CLIENT_TRANSITION(addparams->chanptr,
+ BUS_ID(pDev),
+ CHANNELCLI_DETACHED, NULL);
+ /* remove virtpcidev, the one we just added, from the list */
+ write_lock_irqsave(&VpcidevListLock, flags);
+ for (tmpvpcidev = VpcidevListHead, prev = NULL;
+ tmpvpcidev;
+ prev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) {
+ if (tmpvpcidev == virtpcidev) {
+ if (prev)
+ prev->next = tmpvpcidev->next;
+ else
+ VpcidevListHead = tmpvpcidev->next;
+ break;
+ }
+ }
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+ kfree(virtpcidev);
+ return 0;
+ }
+
+ LOGINF("Added %s:%d:%d &virtpcidev->generic_dev:%p\n",
+ (devtype == VIRTHBA_TYPE) ? "virthba" : "virtnic",
+ addparams->busNo, addparams->deviceNo, &virtpcidev->generic_dev);
+ POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ return 1;
+}
+
+static int virtpci_device_serverdown(struct device *parentbus,
+ int devtype,
+ struct vhba_wwnn *wwnn,
+ unsigned char macaddr[])
+{
+ int pausethisone = 0;
+ bool found = false;
+ struct virtpci_dev *tmpvpcidev, *prevvpcidev;
+ struct virtpci_driver *vpcidriver;
+ unsigned long flags;
+ int rc = 0;
+
+ if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
+ LOGERR("**** FAILED to pause device; devtype:%d not vhba:%d or vnic:%d\n",
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ return 0;
+ }
+
+ /* find the vhba or vnic in virtpci device list */
+ write_lock_irqsave(&VpcidevListLock, flags);
+
+ for (tmpvpcidev = VpcidevListHead, prevvpcidev = NULL;
+ (tmpvpcidev && !found);
+ prevvpcidev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) {
+ if (tmpvpcidev->devtype != devtype)
+ continue;
+
+ if (devtype == VIRTHBA_TYPE) {
+ pausethisone =
+ ((tmpvpcidev->scsi.wwnn.wwnn1 == wwnn->wwnn1) &&
+ (tmpvpcidev->scsi.wwnn.wwnn2 == wwnn->wwnn2));
+ /* devtype is vhba, we're pausing vhba whose
+ * wwnn matches the current device's wwnn
+ */
+ } else { /* VIRTNIC_TYPE */
+ pausethisone =
+ memcmp(tmpvpcidev->net.mac_addr, macaddr,
+ MAX_MACADDR_LEN) == 0;
+ /* devtype is vnic, we're pausing vnic whose
+ * macaddr matches the current device's macaddr */
+ }
+
+ if (!pausethisone)
+ continue;
+
+ found = true;
+ vpcidriver = tmpvpcidev->mydriver;
+ rc = vpcidriver->suspend(tmpvpcidev, 0);
+ }
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+
+ if (!found) {
+ LOGERR("**** FAILED to find vhba/vnic in the list\n");
+ return 0;
+ }
+
+ return rc;
+}
+
+static int virtpci_device_serverup(struct device *parentbus,
+ int devtype,
+ struct vhba_wwnn *wwnn,
+ unsigned char macaddr[])
+{
+ int resumethisone = 0;
+ bool found = false;
+ struct virtpci_dev *tmpvpcidev, *prevvpcidev;
+ struct virtpci_driver *vpcidriver;
+ unsigned long flags;
+ int rc = 0;
+
+ if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
+ LOGERR("**** FAILED to resume device; devtype:%d not vhba:%d or vnic:%d\n",
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ return 0;
+ }
+
+ /* find the vhba or vnic in virtpci device list */
+ write_lock_irqsave(&VpcidevListLock, flags);
+
+ for (tmpvpcidev = VpcidevListHead, prevvpcidev = NULL;
+ (tmpvpcidev && !found);
+ prevvpcidev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) {
+ if (tmpvpcidev->devtype != devtype)
+ continue;
+
+ if (devtype == VIRTHBA_TYPE) {
+ resumethisone =
+ ((tmpvpcidev->scsi.wwnn.wwnn1 == wwnn->wwnn1) &&
+ (tmpvpcidev->scsi.wwnn.wwnn2 == wwnn->wwnn2));
+ /* devtype is vhba, we're resuming vhba whose
+ * wwnn matches the current device's wwnn */
+ } else { /* VIRTNIC_TYPE */
+ resumethisone =
+ memcmp(tmpvpcidev->net.mac_addr, macaddr,
+ MAX_MACADDR_LEN) == 0;
+ /* devtype is vnic, we're resuming vnic whose
+ * macaddr matches the current device's macaddr */
+ }
+
+ if (!resumethisone)
+ continue;
+
+ found = true;
+ vpcidriver = tmpvpcidev->mydriver;
+ /* This should be done at BUS resume time, but an
+ * existing problem prevents us from ever getting a bus
+ * resume... This hack would fail to work should we
+ * ever have a bus that contains NO devices, since we
+ * would never even get here in that case.
+ */
+ fix_vbus_devInfo(&tmpvpcidev->generic_dev, tmpvpcidev->deviceNo,
+ tmpvpcidev->device, vpcidriver);
+ rc = vpcidriver->resume(tmpvpcidev);
+ }
+
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+
+ if (!found) {
+ LOGERR("**** FAILED to find vhba/vnic in the list\n");
+ return 0;
+ }
+
+ return rc;
+}
+
+static int virtpci_device_del(struct device *parentbus,
+ int devtype, struct vhba_wwnn *wwnn,
+ unsigned char macaddr[])
+{
+ int count = 0, all = 0, delthisone;
+ struct virtpci_dev *tmpvpcidev, *prevvpcidev, *dellist = NULL;
+ unsigned long flags;
+
+#define DEL_CONTINUE { \
+ prevvpcidev = tmpvpcidev;\
+ tmpvpcidev = tmpvpcidev->next;\
+ continue; \
+}
+
+ if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
+ LOGERR("**** FAILED to delete device; devtype:%d not vhba:%d or vnic:%d\n",
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ return 0;
+ }
+
+ /* see if we are to delete all - NOTE: all implies we have a
+ * valid parentbus
+ */
+ all = ((devtype == VIRTHBA_TYPE) && (wwnn == NULL)) ||
+ ((devtype == VIRTNIC_TYPE) && (macaddr == NULL));
+
+ /* find all the vhba or vnic or both in virtpci device list
+ * keep list of ones we are deleting so we can call
+ * device_unregister after we release the lock; otherwise we
+ * encounter "schedule while atomic"
+ */
+ write_lock_irqsave(&VpcidevListLock, flags);
+ for (tmpvpcidev = VpcidevListHead, prevvpcidev = NULL; tmpvpcidev;) {
+ if (tmpvpcidev->devtype != devtype)
+ DEL_CONTINUE;
+
+ if (all) {
+ delthisone =
+ (tmpvpcidev->generic_dev.parent == parentbus);
+ /* we're deleting all vhbas or vnics on the
+ * specified parent bus
+ */
+ } else if (devtype == VIRTHBA_TYPE) {
+ delthisone =
+ ((tmpvpcidev->scsi.wwnn.wwnn1 == wwnn->wwnn1) &&
+ (tmpvpcidev->scsi.wwnn.wwnn2 == wwnn->wwnn2));
+ /* devtype is vhba, we're deleting vhba whose
+ * wwnn matches the current device's wwnn
+ */
+ } else { /* VIRTNIC_TYPE */
+ delthisone =
+ memcmp(tmpvpcidev->net.mac_addr, macaddr,
+ MAX_MACADDR_LEN) == 0;
+ /* devtype is vnic, we're deleting vnic whose
+ * macaddr matches the current device's macaddr
+ */
+ }
+
+ if (!delthisone)
+ DEL_CONTINUE;
+
+ /* take vhba/vnic out of the list */
+ if (prevvpcidev)
+ /* not at head */
+ prevvpcidev->next = tmpvpcidev->next;
+ else
+ VpcidevListHead = tmpvpcidev->next;
+
+ /* add it to our deletelist */
+ tmpvpcidev->next = dellist;
+ dellist = tmpvpcidev;
+
+ count++;
+ if (!all)
+ break; /* done */
+ /* going to top of loop again - set tmpvpcidev to next
+ * one we're to process
+ */
+ if (prevvpcidev)
+ tmpvpcidev = prevvpcidev->next;
+ else
+ tmpvpcidev = VpcidevListHead;
+ }
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+
+ if (!all && (count == 0)) {
+ LOGERR("**** FAILED to find vhba/vnic in the list\n");
+ return 0;
+ }
+
+ /* now delete each one from delete list */
+ while (dellist) {
+ /* save next */
+ tmpvpcidev = dellist->next;
+ /* delete the vhba/vnic at dellist */
+ DELETE_ONE_VPCIDEV(dellist);
+ /* do next */
+ dellist = tmpvpcidev;
+ }
+
+ return count;
+}
+
+static void virtpci_device_release(struct device *dev_)
+{
+ /* this function is called when the last reference to the
+ * device is removed
+ */
+ LOGINF("In virtpci_device_release:%p - NOT YET IMPLEMENTED\n", dev_);
+}
+
+/*****************************************************/
+/* Driver functions */
+/*****************************************************/
+
+#define kobj_to_device_driver(obj) container_of(obj, struct device_driver, kobj)
+#define attribute_to_driver_attribute(obj) \
+ container_of(obj, struct driver_attribute, attr)
+
+static ssize_t virtpci_driver_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct driver_attribute *dattr = attribute_to_driver_attribute(attr);
+ ssize_t ret = 0;
+
+ struct driver_private *dprivate = to_driver(kobj);
+ struct device_driver *driver;
+ if (dprivate != NULL)
+ driver = dprivate->driver;
+ else
+ driver = NULL;
+
+ DBGINF("In virtpci_driver_attr_show driver->name:%s\n", driver->name);
+ if (driver) {
+ if (dattr->show)
+ ret = dattr->show(driver, buf);
+ }
+ return ret;
+}
+
+static ssize_t virtpci_driver_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct driver_attribute *dattr = attribute_to_driver_attribute(attr);
+ ssize_t ret = 0;
+
+ struct driver_private *dprivate = to_driver(kobj);
+ struct device_driver *driver;
+ if (dprivate != NULL)
+ driver = dprivate->driver;
+ else
+ driver = NULL;
+
+ DBGINF("In virtpci_driver_attr_store driver->name:%s\n", driver->name);
+
+ if (driver) {
+ if (dattr->store)
+ ret = dattr->store(driver, buf, count);
+ }
+ return ret;
+}
+
+/* register a new virtpci driver */
+int virtpci_register_driver(struct virtpci_driver *drv)
+{
+ int result = 0;
+
+ DBGINF("In virtpci_register_driver\n");
+
+ if (drv->id_table == NULL) {
+ LOGERR("id_table missing\n");
+ return 1;
+ }
+ /* initialize core driver fields needed to call driver_register */
+ drv->core_driver.name = drv->name; /* name of driver in sysfs */
+ drv->core_driver.bus = &virtpci_bus_type; /* type of bus this
+ * driver works with */
+ drv->core_driver.probe = virtpci_device_probe; /* called to query the
+ * existence of a
+ * specific device and
+ * whether this driver
+ *can work with it */
+ drv->core_driver.remove = virtpci_device_remove; /* called when the
+ * device is removed
+ * from the system */
+ /* register with core */
+ result = driver_register(&drv->core_driver);
+ /* calls bus_add_driver which calls driver_attach and
+ * module_add_driver
+ */
+ if (result)
+ return result; /* failed */
+
+ drv->core_driver.p->kobj.ktype = &virtpci_driver_kobj_type;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtpci_register_driver);
+
+void virtpci_unregister_driver(struct virtpci_driver *drv)
+{
+ DBGINF("In virtpci_unregister_driver drv:%p\n", drv);
+ driver_unregister(&drv->core_driver);
+ /* driver_unregister calls bus_remove_driver
+ * bus_remove_driver calls device_detach
+ * device_detach calls device_release_driver for each of the
+ * driver's devices
+ * device_release driver calls drv->remove which is
+ * virtpci_device_remove
+ * virtpci_device_remove calls virthba_remove
+ */
+ DBGINF("Leaving\n");
+}
+EXPORT_SYMBOL_GPL(virtpci_unregister_driver);
+
+/*****************************************************/
+/* proc filesystem functions */
+/*****************************************************/
+struct print_vbus_info {
+ int *length;
+ char *buf;
+};
+
+static int print_vbus(struct device *vbus, void *data)
+{
+ struct print_vbus_info *p = (struct print_vbus_info *) data;
+ int l = *(p->length);
+
+ *(p->length) = l + sprintf(p->buf + l, "bus_id:%s\n", dev_name(vbus));
+ return 0; /* no error */
+}
+
+static ssize_t info_proc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ int length = 0;
+ struct virtpci_dev *tmpvpcidev;
+ unsigned long flags;
+ struct print_vbus_info printparam;
+ char *vbuf;
+ loff_t pos = *offset;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos > 0 || !len)
+ return 0;
+
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ length += sprintf(vbuf + length, "CHANSOCK is not defined.\n");
+
+ length += sprintf(vbuf + length, "\n Virtual PCI Bus devices\n");
+ printparam.length = &length;
+ printparam.buf = vbuf;
+ if (bus_for_each_dev(&virtpci_bus_type, NULL,
+ (void *) &printparam, print_vbus))
+ LOGERR("delete of all vbus failed\n");
+
+ length += sprintf(vbuf + length, "\n Virtual PCI devices\n");
+ read_lock_irqsave(&VpcidevListLock, flags);
+ tmpvpcidev = VpcidevListHead;
+ while (tmpvpcidev) {
+ if (tmpvpcidev->devtype == VIRTHBA_TYPE) {
+ length += sprintf(vbuf + length, "[%d:%d] VHba:%08x:%08x max-config:%d-%d-%d-%d",
+ tmpvpcidev->busNo, tmpvpcidev->deviceNo,
+ tmpvpcidev->scsi.wwnn.wwnn1,
+ tmpvpcidev->scsi.wwnn.wwnn2,
+ tmpvpcidev->scsi.max.max_channel,
+ tmpvpcidev->scsi.max.max_id,
+ tmpvpcidev->scsi.max.max_lun,
+ tmpvpcidev->scsi.max.cmd_per_lun);
+ } else {
+ length += sprintf(vbuf + length, "[%d:%d] VNic:%02x:%02x:%02x:%02x:%02x:%02x num_rcv_bufs:%d mtu:%d",
+ tmpvpcidev->busNo, tmpvpcidev->deviceNo,
+ tmpvpcidev->net.mac_addr[0],
+ tmpvpcidev->net.mac_addr[1],
+ tmpvpcidev->net.mac_addr[2],
+ tmpvpcidev->net.mac_addr[3],
+ tmpvpcidev->net.mac_addr[4],
+ tmpvpcidev->net.mac_addr[5],
+ tmpvpcidev->net.num_rcv_bufs,
+ tmpvpcidev->net.mtu);
+ }
+ length +=
+ sprintf(vbuf + length, " chanptr:%p\n",
+ tmpvpcidev->queueinfo.chan);
+ tmpvpcidev = tmpvpcidev->next;
+ }
+ read_unlock_irqrestore(&VpcidevListLock, flags);
+
+ length +=
+ sprintf(vbuf + length, "\nModule build: Date:%s Time:%s\n", __DATE__,
+ __TIME__);
+
+ length += sprintf(vbuf + length, "\n");
+ if (copy_to_user(buf, vbuf, length)) {
+ kfree(vbuf);
+ return -EFAULT;
+ }
+
+ kfree(vbuf);
+ *offset += length;
+ return length;
+}
+
+static ssize_t virt_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[16];
+ int type, i, action = 0xffff;
+ unsigned int busno, deviceno;
+ void __iomem *chanptr;
+ struct add_vbus_guestpart busaddparams;
+ struct add_virt_guestpart addparams;
+ struct del_vbus_guestpart busdelparams;
+ struct del_virt_guestpart delparams;
+ GUID dummyGuid = GUID0;
+#ifdef STORAGE_CHANNEL
+ U64 storagechannel;
+#endif
+
+#define PRINT_USAGE_RETURN {\
+ LOGERR("usage: 0-0-<chanptr> ==> delete vhba\n"); \
+ LOGERR("usage: 0-1-<chanptr>-<busNo>-<deviceNo> ==> add vhba\n"); \
+ LOGERR("usage: 0-f-<busNo> ==> delete all vhbas\n"); \
+ LOGERR("\n"); \
+ LOGERR("usage: 1-0-<chanptr> ==> delete vnic\n"); \
+ LOGERR("usage: 1-1-<chanptr>-<busNo>-<deviceNo> ==> add vnic\n"); \
+ LOGERR("usage: 1-f-<busNo> ==> delete all vnics\n"); \
+ LOGERR("\n"); \
+ LOGERR("usage: 6-0-<busNo> ==> delete vbus\n"); \
+ LOGERR("usage: 6-1-<busNo> ==> add vbus\n"); \
+ LOGERR("usage: 6-f ==> delete all vbuses\n"); \
+ LOGERR("usage: 98-<busNo>-<deviceNo> ==> INJECT Client delete vnic\n"); \
+ LOGERR("usage: 99-<chanptr>-<busNo>-<deviceNo> ==> INJECT Client add vnic\n"); \
+ return -EINVAL; \
+}
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user failed.\n");
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%x-%x", &type, &action);
+ if (i < 2)
+ PRINT_USAGE_RETURN;
+
+ if (type == 0x98) {
+ /* client inject delete vnic */
+ i = sscanf(buf, "%x-%d-%d", &type, &busno, &deviceno);
+ if (i != 3)
+ PRINT_USAGE_RETURN;
+ uislib_client_inject_del_vnic(busno, deviceno);
+ return count; /* success */
+ } else if (type == 0x99) {
+ /* client inject add vnic */
+ i = sscanf(buf, "%x-%p-%d-%d", &type, &chanptr, &busno,
+ &deviceno);
+ if (i != 4)
+ PRINT_USAGE_RETURN;
+ if (!uislib_client_inject_add_vnic(busno, deviceno,
+ __pa(chanptr),
+ MIN_IO_CHANNEL_SIZE,
+ 1, /* test msg */
+ dummyGuid, /* inst guid */
+ NULL)) { /*interrupt info */
+ LOGERR("FAILED to inject add vnic\n");
+ return -EFAULT;
+ }
+ return count; /* success */
+ }
+
+ if ((type != VIRTHBA_TYPE) && (type != VIRTNIC_TYPE)
+ && (type != VIRTBUS_TYPE))
+ PRINT_USAGE_RETURN;
+
+ if (type == VIRTBUS_TYPE) {
+ i = sscanf(buf, "%x-%x-%d", &type, &action, &busno);
+ switch (action) {
+ case 0:
+ /* delete vbus */
+ if (i != 3)
+ break;
+ busdelparams.busNo = busno;
+ if (delete_vbus(&busdelparams))
+ return count; /* success */
+ return -EFAULT;
+
+ case 1:
+ /* add vbus */
+ if (i != 3)
+ break;
+ busaddparams.chanptr = NULL; /* NOT YET USED */
+ busaddparams.busNo = busno;
+ if (add_vbus(&busaddparams))
+ return count; /* success */
+ return -EFAULT;
+
+ case 0xf:
+ /* delete all vbuses and all vhbas/vnics on the buses */
+ if (i != 2)
+ break;
+ delete_all();
+ return count; /* success */
+ default:
+ break;
+ }
+ PRINT_USAGE_RETURN;
+ }
+
+ /* if (type == VIRTNIC_TYPE) or if (type == VIRTHBA_TYPE) */
+ switch (action) {
+ case 0:
+ /* delete vhba/vnic */
+ i = sscanf(buf, "%x-%x-%p", &type, &action, &chanptr);
+ if (i != 3)
+ break;
+ delparams.chanptr = chanptr;
+ if (type == VIRTHBA_TYPE) {
+ if (delete_vhba(&delparams))
+ return count; /* success */
+ } else {
+ if (delete_vnic(&delparams))
+ return count; /* success */
+ }
+ return -EFAULT;
+
+ case 1:
+ /* add vhba/vnic */
+ i = sscanf(buf, "%x-%x-%p-%d-%d", &type, &action, &chanptr,
+ &busno, &deviceno);
+ if (i != 5)
+ break;
+ addparams.chanptr = chanptr;
+ addparams.busNo = busno;
+ addparams.deviceNo = deviceno;
+ if (type == VIRTHBA_TYPE) {
+ if (add_vhba(&addparams))
+ return count; /* success */
+ } else {
+ if (add_vnic(&addparams))
+ return count; /* success */
+ }
+ return -EFAULT;
+
+#ifdef STORAGE_CHANNEL
+ case 2:
+ /* add vhba */
+ i = sscanf(buf, "%x-%x-%d-%d", &type, &action, &busno,
+ &deviceno);
+ if (i != 4)
+ break;
+ storagechannel = uislib_storage_channel(0); /* Get my storage channel */
+ /* ioremap_cache it now */
+ addparams.chanptr =
+ (void *) ioremap_cache(storagechannel, IO_CHANNEL_SIZE);
+ if (addparams.chanptr == NULL) {
+ LOGERR("Failure to get remap storage channel.\n");
+ return -EFAULT;
+ }
+ addparams.busNo = busno;
+ addparams.deviceNo = deviceno;
+ if (type == VIRTHBA_TYPE) {
+ if (add_vhba(&addparams))
+ return count; /* success */
+ }
+ return -EFAULT;
+#endif
+ case 0xf:
+ /* delete all vhbas/vnics */
+ i = sscanf(buf, "%x-%x-%d", &type, &action, &busno);
+ if (i != 3)
+ break;
+ busdelparams.busNo = busno;
+ delete_all_virt(type, &busdelparams);
+ return count; /* success */
+ default:
+ break;
+ }
+ PRINT_USAGE_RETURN;
+}
+
+/*****************************************************/
+/* Module Init & Exit functions */
+/*****************************************************/
+
+static int __init virtpci_mod_init(void)
+{
+ int ret;
+
+
+ LOGINF("Module build: Date:%s Time:%s...\n", __DATE__, __TIME__);
+
+ POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ ret = bus_register(&virtpci_bus_type);
+ /* creates /sys/bus/uisvirtpci which contains devices &
+ * drivers directory
+ */
+ if (ret) {
+ LOGERR("bus_register ****FAILED:%d\n", ret);
+ POSTCODE_LINUX_3(VPCI_CREATE_FAILURE_PC, ret,
+ POSTCODE_SEVERITY_ERR);
+ return ret;
+ }
+ DBGINF("bus_register successful\n");
+ BusDeviceInfo_Init(&Bus_DriverInfo,
+ "clientbus", "virtpci",
+ VERSION, NULL, __DATE__, __TIME__);
+
+ /* create a root bus used to parent all the virtpci buses. */
+ ret = device_register(&virtpci_rootbus_device);
+ if (ret) {
+ LOGERR("device_register FAILED:%d\n", ret);
+ bus_unregister(&virtpci_bus_type);
+ POSTCODE_LINUX_3(VPCI_CREATE_FAILURE_PC, ret,
+ POSTCODE_SEVERITY_ERR);
+ return ret;
+ }
+ DBGINF("device_register successful ret:%x\n", ret);
+
+ if (!uisctrl_register_req_handler(2, (void *) &virtpci_ctrlchan_func,
+ &Chipset_DriverInfo)) {
+ LOGERR("uisctrl_register_req_handler ****FAILED.\n");
+ POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ device_unregister(&virtpci_rootbus_device);
+ bus_unregister(&virtpci_bus_type);
+ return -1;
+ }
+
+ LOGINF("successfully registered virtpci_ctrlchan_func (0x%p) as callback.\n",
+ (void *) &virtpci_ctrlchan_func);
+ /* create the proc directories */
+ virtpci_proc_dir = proc_mkdir(DIR_PROC_ENTRY, NULL);
+ virt_proc_entry = proc_create(VIRT_PROC_ENTRY_FN, 0, virtpci_proc_dir,
+ &proc_virt_fops);
+ info_proc_entry = proc_create(INFO_PROC_ENTRY_FN, 0, virtpci_proc_dir,
+ &proc_info_fops);
+ LOGINF("Leaving\n");
+ POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ return 0;
+}
+
+static void __exit virtpci_mod_exit(void)
+{
+ LOGINF("virtpci_mod_exit...\n");
+
+ /* unregister the callback function */
+ if (!uisctrl_register_req_handler(2, NULL, NULL))
+ LOGERR("uisctrl_register_req_handler ****FAILED.\n");
+
+ device_unregister(&virtpci_rootbus_device);
+ bus_unregister(&virtpci_bus_type);
+
+ if (virt_proc_entry)
+ remove_proc_entry(VIRT_PROC_ENTRY_FN, virtpci_proc_dir);
+
+ if (info_proc_entry)
+ remove_proc_entry(INFO_PROC_ENTRY_FN, virtpci_proc_dir);
+
+ if (virtpci_proc_dir)
+ remove_proc_entry(DIR_PROC_ENTRY, NULL);
+
+ LOGINF("Leaving\n");
+
+}
+
+module_init(virtpci_mod_init);
+module_exit(virtpci_mod_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Usha Srinivasan");
+MODULE_ALIAS("uisvirtpci");
+
diff --git a/drivers/staging/unisys/virtpci/virtpci.h b/drivers/staging/unisys/virtpci/virtpci.h
new file mode 100644
index 000000000000..b8fd07bc5435
--- /dev/null
+++ b/drivers/staging/unisys/virtpci/virtpci.h
@@ -0,0 +1,104 @@
+/* virtpci.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Unisys Virtual PCI driver header
+ */
+
+#ifndef __VIRTPCI_H__
+#define __VIRTPCI_H__
+
+#include "uisqueue.h"
+#include <linux/version.h>
+
+#define PCI_DEVICE_ID_VIRTHBA 0xAA00
+#define PCI_DEVICE_ID_VIRTNIC 0xAB00
+
+struct scsi_adap_info {
+ void *scsihost; /* scsi host if this device is a scsi hba */
+ struct vhba_wwnn wwnn; /* the world wide node name of vhba */
+ struct vhba_config_max max; /* various max specifications used
+ * to config vhba */
+};
+
+struct net_adap_info {
+ struct net_device *netdev; /* network device if this
+ * device is a NIC */
+ u8 mac_addr[MAX_MACADDR_LEN];
+ int num_rcv_bufs;
+ unsigned mtu;
+ GUID zoneGuid;
+};
+
+typedef enum {
+ VIRTHBA_TYPE = 0,
+ VIRTNIC_TYPE = 1,
+ VIRTBUS_TYPE = 6,
+} VIRTPCI_DEV_TYPE;
+
+struct virtpci_dev {
+ VIRTPCI_DEV_TYPE devtype; /* indicates type of the
+ * virtual pci device */
+ struct virtpci_driver *mydriver; /* which driver has allocated
+ * this device */
+ unsigned short vendor; /* vendor id for device */
+ unsigned short device; /* device id for device */
+ U32 busNo; /* number of bus on which device exists */
+ U32 deviceNo; /* device's number on the bus */
+ struct InterruptInfo intr; /* interrupt info */
+ struct device generic_dev; /* generic device */
+ union {
+ struct scsi_adap_info scsi;
+ struct net_adap_info net;
+ };
+
+ struct uisqueue_info queueinfo; /* holds ptr to channel where cmds &
+ * rsps are queued & retrieved */
+ struct virtpci_dev *next; /* points to next virtpci device */
+};
+
+struct virtpci_driver {
+ struct list_head node;
+ const char *name; /* the name of the driver in sysfs */
+ const char *version;
+ const char *vertag;
+ const char *build_date;
+ const char *build_time;
+ const struct pci_device_id *id_table; /* must be non-NULL for probe
+ * to be called */
+ int (*probe)(struct virtpci_dev *dev,
+ const struct pci_device_id *id); /* device inserted */
+ void (*remove)(struct virtpci_dev *dev); /* Device removed (NULL if
+ * not a hot-plug capable
+ * driver) */
+ int (*suspend)(struct virtpci_dev *dev,
+ u32 state); /* Device suspended */
+ int (*resume)(struct virtpci_dev *dev); /* Device woken up */
+ int (*enable_wake)(struct virtpci_dev *dev,
+ u32 state, int enable); /* Enable wake event */
+ struct device_driver core_driver; /* VIRTPCI core fills this in */
+};
+
+#define driver_to_virtpci_driver(in_drv) \
+ container_of(in_drv, struct virtpci_driver, core_driver)
+#define device_to_virtpci_dev(in_dev) \
+ container_of(in_dev, struct virtpci_dev, generic_dev)
+
+int virtpci_register_driver(struct virtpci_driver *);
+void virtpci_unregister_driver(struct virtpci_driver *);
+
+#endif /* __VIRTPCI_H__ */
diff --git a/drivers/staging/unisys/visorchannel/Kconfig b/drivers/staging/unisys/visorchannel/Kconfig
new file mode 100644
index 000000000000..41c3b4b997eb
--- /dev/null
+++ b/drivers/staging/unisys/visorchannel/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys visorchannel configuration
+#
+
+config UNISYS_VISORCHANNEL
+ tristate "Unisys visorchannel driver"
+ depends on UNISYSSPAR && UNISYS_VISORUTIL
+ ---help---
+ If you say Y here, you will enable the Unisys visorchannel driver.
+
diff --git a/drivers/staging/unisys/visorchannel/Makefile b/drivers/staging/unisys/visorchannel/Makefile
new file mode 100644
index 000000000000..f0060be55bc5
--- /dev/null
+++ b/drivers/staging/unisys/visorchannel/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for Unisys visorchannel
+#
+
+obj-$(CONFIG_UNISYS_VISORCHANNEL) += visorchannel.o
+
+visorchannel-y := visorchannel_main.o visorchannel_funcs.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
+ccflags-y += -Idrivers/staging/unisys/visorutil
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
+
diff --git a/drivers/staging/unisys/visorchannel/globals.h b/drivers/staging/unisys/visorchannel/globals.h
new file mode 100644
index 000000000000..668f832ca566
--- /dev/null
+++ b/drivers/staging/unisys/visorchannel/globals.h
@@ -0,0 +1,29 @@
+/* globals.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VISORCHANNEL_GLOBALS_H__
+#define __VISORCHANNEL_GLOBALS_H__
+
+#include "uniklog.h"
+#include "timskmod.h"
+#include "memregion.h"
+#include "version.h"
+
+#define MYDRVNAME "visorchannel"
+
+
+#endif
diff --git a/drivers/staging/unisys/visorchannel/visorchannel.h b/drivers/staging/unisys/visorchannel/visorchannel.h
new file mode 100644
index 000000000000..62d29a233fd0
--- /dev/null
+++ b/drivers/staging/unisys/visorchannel/visorchannel.h
@@ -0,0 +1,106 @@
+/* visorchannel.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VISORCHANNEL_H__
+#define __VISORCHANNEL_H__
+
+#include "commontypes.h"
+#include "memregion.h"
+#include "channel.h"
+#ifndef HOSTADDRESS
+#define HOSTADDRESS U64
+#endif
+#ifndef BOOL
+#define BOOL int
+#endif
+
+/* VISORCHANNEL is an opaque structure to users.
+ * Fields are declared only in the implementation .c files.
+ */
+typedef struct VISORCHANNEL_Tag VISORCHANNEL;
+
+/* Note that for visorchannel_create() and visorchannel_create_overlapped(),
+ * <channelBytes> and <guid> arguments may be 0 if we are a channel CLIENT.
+ * In this case, the values can simply be read from the channel header.
+ */
+VISORCHANNEL *visorchannel_create(HOSTADDRESS physaddr,
+ ulong channelBytes, GUID guid);
+VISORCHANNEL *visorchannel_create_overlapped(ulong channelBytes,
+ VISORCHANNEL *parent, ulong off,
+ GUID guid);
+VISORCHANNEL *visorchannel_create_with_lock(HOSTADDRESS physaddr,
+ ulong channelBytes, GUID guid);
+VISORCHANNEL *visorchannel_create_overlapped_with_lock(ulong channelBytes,
+ VISORCHANNEL *parent,
+ ulong off, GUID guid);
+void visorchannel_destroy(VISORCHANNEL *channel);
+int visorchannel_read(VISORCHANNEL *channel, ulong offset,
+ void *local, ulong nbytes);
+int visorchannel_write(VISORCHANNEL *channel, ulong offset,
+ void *local, ulong nbytes);
+int visorchannel_clear(VISORCHANNEL *channel, ulong offset,
+ U8 ch, ulong nbytes);
+BOOL visorchannel_signalremove(VISORCHANNEL *channel, U32 queue, void *msg);
+BOOL visorchannel_signalinsert(VISORCHANNEL *channel, U32 queue, void *msg);
+int visorchannel_signalqueue_slots_avail(VISORCHANNEL *channel, U32 queue);
+int visorchannel_signalqueue_max_slots(VISORCHANNEL *channel, U32 queue);
+
+HOSTADDRESS visorchannel_get_physaddr(VISORCHANNEL *channel);
+ulong visorchannel_get_nbytes(VISORCHANNEL *channel);
+char *visorchannel_id(VISORCHANNEL *channel, char *s);
+char *visorchannel_zoneid(VISORCHANNEL *channel, char *s);
+U64 visorchannel_get_clientpartition(VISORCHANNEL *channel);
+GUID visorchannel_get_GUID(VISORCHANNEL *channel);
+MEMREGION *visorchannel_get_memregion(VISORCHANNEL *channel);
+char *visorchannel_GUID_id(GUID *guid, char *s);
+void visorchannel_debug(VISORCHANNEL *channel, int nQueues,
+ struct seq_file *seq, U32 off);
+void visorchannel_dump_section(VISORCHANNEL *chan, char *s,
+ int off, int len, struct seq_file *seq);
+void *visorchannel_get_header(VISORCHANNEL *channel);
+
+#define VISORCHANNEL_CHANGE_SERVER_STATE(chan, chanId, newstate) \
+ do { \
+ U8 *p = (U8 *)visorchannel_get_header(chan); \
+ if (p) { \
+ ULTRA_CHANNEL_SERVER_TRANSITION(p, chanId, SrvState, \
+ newstate, logCtx); \
+ visorchannel_write \
+ (chan, \
+ offsetof(ULTRA_CHANNEL_PROTOCOL, SrvState), \
+ p + \
+ offsetof(ULTRA_CHANNEL_PROTOCOL, SrvState), \
+ sizeof(U32)); \
+ } \
+ } while (0)
+
+#define VISORCHANNEL_CHANGE_CLIENT_STATE(chan, chanId, newstate) \
+ do { \
+ U8 *p = (U8 *)visorchannel_get_header(chan); \
+ if (p) { \
+ ULTRA_CHANNEL_CLIENT_TRANSITION(p, chanId, \
+ newstate, logCtx); \
+ visorchannel_write \
+ (chan, \
+ offsetof(ULTRA_CHANNEL_PROTOCOL, CliStateOS), \
+ p + \
+ offsetof(ULTRA_CHANNEL_PROTOCOL, CliStateOS), \
+ sizeof(U32)); \
+ } \
+ } while (0)
+
+#endif
diff --git a/drivers/staging/unisys/visorchannel/visorchannel_funcs.c b/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
new file mode 100644
index 000000000000..053681616ba3
--- /dev/null
+++ b/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
@@ -0,0 +1,674 @@
+/* visorchannel_funcs.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * This provides Supervisor channel communication primitives, which are
+ * independent of the mechanism used to access the channel data. All channel
+ * data is accessed using the memregion abstraction. (memregion has both
+ * a CM2 implementation and a direct memory implementation.)
+ */
+
+#include "globals.h"
+#include "visorchannel.h"
+#include "guidutils.h"
+
+#define MYDRVNAME "visorchannel"
+
+struct VISORCHANNEL_Tag {
+ MEMREGION *memregion; /* from visor_memregion_create() */
+ CHANNEL_HEADER chan_hdr;
+ GUID guid;
+ ulong size;
+ BOOL needs_lock;
+ spinlock_t insert_lock;
+ spinlock_t remove_lock;
+
+ struct {
+ SIGNAL_QUEUE_HEADER req_queue;
+ SIGNAL_QUEUE_HEADER rsp_queue;
+ SIGNAL_QUEUE_HEADER event_queue;
+ SIGNAL_QUEUE_HEADER ack_queue;
+ } safe_uis_queue;
+};
+
+/* Creates the VISORCHANNEL abstraction for a data area in memory, but does
+ * NOT modify this data area.
+ */
+static VISORCHANNEL *
+visorchannel_create_guts(HOSTADDRESS physaddr, ulong channelBytes,
+ VISORCHANNEL *parent, ulong off, GUID guid,
+ BOOL needs_lock)
+{
+ VISORCHANNEL *p = NULL;
+ void *rc = NULL;
+
+ p = kmalloc(sizeof(VISORCHANNEL), GFP_KERNEL|__GFP_NORETRY);
+ if (p == NULL) {
+ ERRDRV("allocation failed: (status=0)\n");
+ rc = NULL;
+ goto Away;
+ }
+ p->memregion = NULL;
+ p->needs_lock = needs_lock;
+ spin_lock_init(&p->insert_lock);
+ spin_lock_init(&p->remove_lock);
+
+ /* prepare chan_hdr (abstraction to read/write channel memory) */
+ if (parent == NULL)
+ p->memregion =
+ visor_memregion_create(physaddr, sizeof(CHANNEL_HEADER));
+ else
+ p->memregion =
+ visor_memregion_create_overlapped(parent->memregion,
+ off,
+ sizeof(CHANNEL_HEADER));
+ if (p->memregion == NULL) {
+ ERRDRV("visor_memregion_create failed failed: (status=0)\n");
+ rc = NULL;
+ goto Away;
+ }
+ if (visor_memregion_read(p->memregion, 0, &p->chan_hdr,
+ sizeof(CHANNEL_HEADER)) < 0) {
+ ERRDRV("visor_memregion_read failed: (status=0)\n");
+ rc = NULL;
+ goto Away;
+ }
+ if (channelBytes == 0)
+ /* we had better be a CLIENT of this channel */
+ channelBytes = (ulong) p->chan_hdr.Size;
+ if (STRUCTSEQUAL(guid, Guid0))
+ /* we had better be a CLIENT of this channel */
+ guid = p->chan_hdr.Type;
+ if (visor_memregion_resize(p->memregion, channelBytes) < 0) {
+ ERRDRV("visor_memregion_resize failed: (status=0)\n");
+ rc = NULL;
+ goto Away;
+ }
+ p->size = channelBytes;
+ p->guid = guid;
+
+ rc = p;
+Away:
+
+ if (rc == NULL) {
+ if (p != NULL) {
+ visorchannel_destroy(p);
+ p = NULL;
+ }
+ }
+ return rc;
+}
+
+VISORCHANNEL *
+visorchannel_create(HOSTADDRESS physaddr, ulong channelBytes, GUID guid)
+{
+ return visorchannel_create_guts(physaddr, channelBytes, NULL, 0, guid,
+ FALSE);
+}
+EXPORT_SYMBOL_GPL(visorchannel_create);
+
+VISORCHANNEL *
+visorchannel_create_with_lock(HOSTADDRESS physaddr, ulong channelBytes,
+ GUID guid)
+{
+ return visorchannel_create_guts(physaddr, channelBytes, NULL, 0, guid,
+ TRUE);
+}
+EXPORT_SYMBOL_GPL(visorchannel_create_with_lock);
+
+VISORCHANNEL *
+visorchannel_create_overlapped(ulong channelBytes,
+ VISORCHANNEL *parent, ulong off, GUID guid)
+{
+ return visorchannel_create_guts(0, channelBytes, parent, off, guid,
+ FALSE);
+}
+EXPORT_SYMBOL_GPL(visorchannel_create_overlapped);
+
+VISORCHANNEL *
+visorchannel_create_overlapped_with_lock(ulong channelBytes,
+ VISORCHANNEL *parent, ulong off,
+ GUID guid)
+{
+ return visorchannel_create_guts(0, channelBytes, parent, off, guid,
+ TRUE);
+}
+EXPORT_SYMBOL_GPL(visorchannel_create_overlapped_with_lock);
+
+void
+visorchannel_destroy(VISORCHANNEL *channel)
+{
+ if (channel == NULL)
+ return;
+ if (channel->memregion != NULL) {
+ visor_memregion_destroy(channel->memregion);
+ channel->memregion = NULL;
+ }
+ kfree(channel);
+}
+EXPORT_SYMBOL_GPL(visorchannel_destroy);
+
+HOSTADDRESS
+visorchannel_get_physaddr(VISORCHANNEL *channel)
+{
+ return visor_memregion_get_physaddr(channel->memregion);
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_physaddr);
+
+ulong
+visorchannel_get_nbytes(VISORCHANNEL *channel)
+{
+ return channel->size;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_nbytes);
+
+char *
+visorchannel_GUID_id(GUID *guid, char *s)
+{
+ return GUID_format1(guid, s);
+}
+EXPORT_SYMBOL_GPL(visorchannel_GUID_id);
+
+char *
+visorchannel_id(VISORCHANNEL *channel, char *s)
+{
+ return visorchannel_GUID_id(&channel->guid, s);
+}
+EXPORT_SYMBOL_GPL(visorchannel_id);
+
+char *
+visorchannel_zoneid(VISORCHANNEL *channel, char *s)
+{
+ return visorchannel_GUID_id(&channel->chan_hdr.ZoneGuid, s);
+}
+EXPORT_SYMBOL_GPL(visorchannel_zoneid);
+
+HOSTADDRESS
+visorchannel_get_clientpartition(VISORCHANNEL *channel)
+{
+ return channel->chan_hdr.PartitionHandle;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_clientpartition);
+
+GUID
+visorchannel_get_GUID(VISORCHANNEL *channel)
+{
+ return channel->guid;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_GUID);
+
+MEMREGION *
+visorchannel_get_memregion(VISORCHANNEL *channel)
+{
+ return channel->memregion;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_memregion);
+
+int
+visorchannel_read(VISORCHANNEL *channel, ulong offset,
+ void *local, ulong nbytes)
+{
+ int rc = visor_memregion_read(channel->memregion, offset,
+ local, nbytes);
+ if ((rc >= 0) && (offset == 0) && (nbytes >= sizeof(CHANNEL_HEADER)))
+ memcpy(&channel->chan_hdr, local, sizeof(CHANNEL_HEADER));
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_read);
+
+int
+visorchannel_write(VISORCHANNEL *channel, ulong offset,
+ void *local, ulong nbytes)
+{
+ if (offset == 0 && nbytes >= sizeof(CHANNEL_HEADER))
+ memcpy(&channel->chan_hdr, local, sizeof(CHANNEL_HEADER));
+ return visor_memregion_write(channel->memregion, offset, local, nbytes);
+}
+EXPORT_SYMBOL_GPL(visorchannel_write);
+
+int
+visorchannel_clear(VISORCHANNEL *channel, ulong offset, U8 ch, ulong nbytes)
+{
+ int rc = -1;
+ int bufsize = 65536;
+ int written = 0;
+ U8 *buf = vmalloc(bufsize);
+
+ if (buf == NULL) {
+ ERRDRV("%s failed memory allocation", __func__);
+ goto Away;
+ }
+ memset(buf, ch, bufsize);
+ while (nbytes > 0) {
+ ulong thisbytes = bufsize;
+ int x = -1;
+ if (nbytes < thisbytes)
+ thisbytes = nbytes;
+ x = visor_memregion_write(channel->memregion, offset + written,
+ buf, thisbytes);
+ if (x < 0) {
+ rc = x;
+ goto Away;
+ }
+ written += thisbytes;
+ nbytes -= thisbytes;
+ }
+ rc = 0;
+
+Away:
+ if (buf != NULL) {
+ vfree(buf);
+ buf = NULL;
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_clear);
+
+void *
+visorchannel_get_header(VISORCHANNEL *channel)
+{
+ return (void *) &(channel->chan_hdr);
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_header);
+
+/** Return offset of a specific SIGNAL_QUEUE_HEADER from the beginning of a
+ * channel header
+ */
+#define SIG_QUEUE_OFFSET(chan_hdr, q) \
+ ((chan_hdr)->oChannelSpace + ((q) * sizeof(SIGNAL_QUEUE_HEADER)))
+
+/** Return offset of a specific queue entry (data) from the beginning of a
+ * channel header
+ */
+#define SIG_DATA_OFFSET(chan_hdr, q, sig_hdr, slot) \
+ (SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->oSignalBase + \
+ ((slot) * (sig_hdr)->SignalSize))
+
+/** Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back
+ * into host memory
+ */
+#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
+ (visor_memregion_write(channel->memregion, \
+ SIG_QUEUE_OFFSET(&channel->chan_hdr, queue)+ \
+ offsetof(SIGNAL_QUEUE_HEADER, FIELD), \
+ &((sig_hdr)->FIELD), \
+ sizeof((sig_hdr)->FIELD)) >= 0)
+
+static BOOL
+sig_read_header(VISORCHANNEL *channel, U32 queue,
+ SIGNAL_QUEUE_HEADER *sig_hdr)
+{
+ BOOL rc = FALSE;
+
+ if (channel->chan_hdr.oChannelSpace < sizeof(CHANNEL_HEADER)) {
+ ERRDRV("oChannelSpace too small: (status=%d)\n", rc);
+ goto Away;
+ }
+
+ /* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */
+
+ if (visor_memregion_read(channel->memregion,
+ SIG_QUEUE_OFFSET(&channel->chan_hdr, queue),
+ sig_hdr, sizeof(SIGNAL_QUEUE_HEADER)) < 0) {
+ ERRDRV("queue=%d SIG_QUEUE_OFFSET=%d",
+ queue, (int)SIG_QUEUE_OFFSET(&channel->chan_hdr, queue));
+ ERRDRV("visor_memregion_read of signal queue failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ rc = TRUE;
+Away:
+ return rc;
+}
+
+static BOOL
+sig_do_data(VISORCHANNEL *channel, U32 queue,
+ SIGNAL_QUEUE_HEADER *sig_hdr, U32 slot, void *data, BOOL is_write)
+{
+ BOOL rc = FALSE;
+ int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue,
+ sig_hdr, slot);
+ if (is_write) {
+ if (visor_memregion_write(channel->memregion,
+ signal_data_offset,
+ data, sig_hdr->SignalSize) < 0) {
+ ERRDRV("visor_memregion_write of signal data failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ } else {
+ if (visor_memregion_read(channel->memregion, signal_data_offset,
+ data, sig_hdr->SignalSize) < 0) {
+ ERRDRV("visor_memregion_read of signal data failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ }
+ rc = TRUE;
+Away:
+ return rc;
+}
+
+static inline BOOL
+sig_read_data(VISORCHANNEL *channel, U32 queue,
+ SIGNAL_QUEUE_HEADER *sig_hdr, U32 slot, void *data)
+{
+ return sig_do_data(channel, queue, sig_hdr, slot, data, FALSE);
+}
+
+static inline BOOL
+sig_write_data(VISORCHANNEL *channel, U32 queue,
+ SIGNAL_QUEUE_HEADER *sig_hdr, U32 slot, void *data)
+{
+ return sig_do_data(channel, queue, sig_hdr, slot, data, TRUE);
+}
+
+static inline unsigned char
+safe_sig_queue_validate(pSIGNAL_QUEUE_HEADER psafe_sqh,
+ pSIGNAL_QUEUE_HEADER punsafe_sqh,
+ U32 *phead, U32 *ptail)
+{
+ if ((*phead >= psafe_sqh->MaxSignalSlots)
+ || (*ptail >= psafe_sqh->MaxSignalSlots)) {
+ /* Choose 0 or max, maybe based on current tail value */
+ *phead = 0;
+ *ptail = 0;
+
+ /* Sync with client as necessary */
+ punsafe_sqh->Head = *phead;
+ punsafe_sqh->Tail = *ptail;
+
+ ERRDRV("safe_sig_queue_validate: head = 0x%x, tail = 0x%x, MaxSlots = 0x%x",
+ *phead, *ptail, psafe_sqh->MaxSignalSlots);
+ return 0;
+ }
+ return 1;
+} /* end safe_sig_queue_validate */
+
+BOOL
+visorchannel_signalremove(VISORCHANNEL *channel, U32 queue, void *msg)
+{
+ BOOL rc = FALSE;
+ SIGNAL_QUEUE_HEADER sig_hdr;
+
+ if (channel->needs_lock)
+ spin_lock(&channel->remove_lock);
+
+ if (!sig_read_header(channel, queue, &sig_hdr)) {
+ rc = FALSE;
+ goto Away;
+ }
+ if (sig_hdr.Head == sig_hdr.Tail) {
+ rc = FALSE; /* no signals to remove */
+ goto Away;
+ }
+ sig_hdr.Tail = (sig_hdr.Tail + 1) % sig_hdr.MaxSignalSlots;
+ if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.Tail, msg)) {
+ ERRDRV("sig_read_data failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ sig_hdr.NumSignalsReceived++;
+
+ /* For each data field in SIGNAL_QUEUE_HEADER that was modified,
+ * update host memory.
+ */
+ MEMORYBARRIER;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, Tail)) {
+ ERRDRV("visor_memregion_write of Tail failed: (status=%d)\n",
+ rc);
+ goto Away;
+ }
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, NumSignalsReceived)) {
+ ERRDRV("visor_memregion_write of NumSignalsReceived failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ rc = TRUE;
+Away:
+ if (channel->needs_lock)
+ spin_unlock(&channel->remove_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalremove);
+
+BOOL
+visorchannel_signalinsert(VISORCHANNEL *channel, U32 queue, void *msg)
+{
+ BOOL rc = FALSE;
+ SIGNAL_QUEUE_HEADER sig_hdr;
+
+ if (channel->needs_lock)
+ spin_lock(&channel->insert_lock);
+
+ if (!sig_read_header(channel, queue, &sig_hdr)) {
+ rc = FALSE;
+ goto Away;
+ }
+
+ sig_hdr.Head = ((sig_hdr.Head + 1) % sig_hdr.MaxSignalSlots);
+ if (sig_hdr.Head == sig_hdr.Tail) {
+ sig_hdr.NumOverflows++;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, NumOverflows)) {
+ ERRDRV("visor_memregion_write of NumOverflows failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ rc = FALSE;
+ goto Away;
+ }
+
+ if (!sig_write_data(channel, queue, &sig_hdr, sig_hdr.Head, msg)) {
+ ERRDRV("sig_write_data failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ sig_hdr.NumSignalsSent++;
+
+ /* For each data field in SIGNAL_QUEUE_HEADER that was modified,
+ * update host memory.
+ */
+ MEMORYBARRIER;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, Head)) {
+ ERRDRV("visor_memregion_write of Head failed: (status=%d)\n",
+ rc);
+ goto Away;
+ }
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, NumSignalsSent)) {
+ ERRDRV("visor_memregion_write of NumSignalsSent failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ rc = TRUE;
+Away:
+ if (channel->needs_lock)
+ spin_unlock(&channel->insert_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
+
+
+int
+visorchannel_signalqueue_slots_avail(VISORCHANNEL *channel, U32 queue)
+{
+ SIGNAL_QUEUE_HEADER sig_hdr;
+ U32 slots_avail, slots_used;
+ U32 head, tail;
+
+ if (!sig_read_header(channel, queue, &sig_hdr))
+ return 0;
+ head = sig_hdr.Head;
+ tail = sig_hdr.Tail;
+ if (head < tail)
+ head = head + sig_hdr.MaxSignalSlots;
+ slots_used = (head - tail);
+ slots_avail = sig_hdr.MaxSignals - slots_used;
+ return (int) slots_avail;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalqueue_slots_avail);
+
+int
+visorchannel_signalqueue_max_slots(VISORCHANNEL *channel, U32 queue)
+{
+ SIGNAL_QUEUE_HEADER sig_hdr;
+ if (!sig_read_header(channel, queue, &sig_hdr))
+ return 0;
+ return (int) sig_hdr.MaxSignals;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalqueue_max_slots);
+
+static void
+sigqueue_debug(SIGNAL_QUEUE_HEADER *q, int which, struct seq_file *seq)
+{
+ seq_printf(seq, "Signal Queue #%d\n", which);
+ seq_printf(seq, " VersionId = %lu\n", (ulong) q->VersionId);
+ seq_printf(seq, " Type = %lu\n", (ulong) q->Type);
+ seq_printf(seq, " oSignalBase = %llu\n",
+ (long long) q->oSignalBase);
+ seq_printf(seq, " SignalSize = %lu\n", (ulong) q->SignalSize);
+ seq_printf(seq, " MaxSignalSlots = %lu\n",
+ (ulong) q->MaxSignalSlots);
+ seq_printf(seq, " MaxSignals = %lu\n", (ulong) q->MaxSignals);
+ seq_printf(seq, " FeatureFlags = %-16.16Lx\n",
+ (long long) q->FeatureFlags);
+ seq_printf(seq, " NumSignalsSent = %llu\n",
+ (long long) q->NumSignalsSent);
+ seq_printf(seq, " NumSignalsReceived = %llu\n",
+ (long long) q->NumSignalsReceived);
+ seq_printf(seq, " NumOverflows = %llu\n",
+ (long long) q->NumOverflows);
+ seq_printf(seq, " Head = %lu\n", (ulong) q->Head);
+ seq_printf(seq, " Tail = %lu\n", (ulong) q->Tail);
+}
+
+void
+visorchannel_debug(VISORCHANNEL *channel, int nQueues,
+ struct seq_file *seq, U32 off)
+{
+ HOSTADDRESS addr = 0;
+ ulong nbytes = 0, nbytes_region = 0;
+ MEMREGION *memregion = NULL;
+ CHANNEL_HEADER hdr;
+ CHANNEL_HEADER *phdr = &hdr;
+ char s[99];
+ int i = 0;
+ int errcode = 0;
+
+ if (channel == NULL) {
+ ERRDRV("%s no channel", __func__);
+ return;
+ }
+ memregion = channel->memregion;
+ if (memregion == NULL) {
+ ERRDRV("%s no memregion", __func__);
+ return;
+ }
+ addr = visor_memregion_get_physaddr(memregion);
+ nbytes_region = visor_memregion_get_nbytes(memregion);
+ errcode = visorchannel_read(channel, off,
+ phdr, sizeof(CHANNEL_HEADER));
+ if (errcode < 0) {
+ seq_printf(seq,
+ "Read of channel header failed with errcode=%d)\n",
+ errcode);
+ if (off == 0) {
+ phdr = &channel->chan_hdr;
+ seq_puts(seq, "(following data may be stale)\n");
+ } else
+ return;
+ }
+ nbytes = (ulong) (phdr->Size);
+ seq_printf(seq, "--- Begin channel @0x%-16.16Lx for 0x%lx bytes (region=0x%lx bytes) ---\n",
+ addr + off, nbytes, nbytes_region);
+ seq_printf(seq, "Type = %s\n", GUID_format2(&phdr->Type, s));
+ seq_printf(seq, "ZoneGuid = %s\n",
+ GUID_format2(&phdr->ZoneGuid, s));
+ seq_printf(seq, "Signature = 0x%-16.16Lx\n",
+ (long long) phdr->Signature);
+ seq_printf(seq, "LegacyState = %lu\n", (ulong) phdr->LegacyState);
+ seq_printf(seq, "SrvState = %lu\n", (ulong) phdr->SrvState);
+ seq_printf(seq, "CliStateBoot = %lu\n", (ulong) phdr->CliStateBoot);
+ seq_printf(seq, "CliStateOS = %lu\n", (ulong) phdr->CliStateOS);
+ seq_printf(seq, "HeaderSize = %lu\n", (ulong) phdr->HeaderSize);
+ seq_printf(seq, "Size = %llu\n", (long long) phdr->Size);
+ seq_printf(seq, "Features = 0x%-16.16llx\n",
+ (long long) phdr->Features);
+ seq_printf(seq, "PartitionHandle = 0x%-16.16llx\n",
+ (long long) phdr->PartitionHandle);
+ seq_printf(seq, "Handle = 0x%-16.16llx\n",
+ (long long) phdr->Handle);
+ seq_printf(seq, "VersionId = %lu\n", (ulong) phdr->VersionId);
+ seq_printf(seq, "oChannelSpace = %llu\n",
+ (long long) phdr->oChannelSpace);
+ if ((phdr->oChannelSpace == 0) || (errcode < 0))
+ ;
+ else
+ for (i = 0; i < nQueues; i++) {
+ SIGNAL_QUEUE_HEADER q;
+ errcode = visorchannel_read(channel,
+ off + phdr->oChannelSpace +
+ (i * sizeof(q)),
+ &q, sizeof(q));
+ if (errcode < 0) {
+ seq_printf(seq,
+ "failed to read signal queue #%d from channel @0x%-16.16Lx errcode=%d\n",
+ i, addr, errcode);
+ continue;
+ }
+ sigqueue_debug(&q, i, seq);
+ }
+ seq_printf(seq, "--- End channel @0x%-16.16Lx for 0x%lx bytes ---\n",
+ addr + off, nbytes);
+}
+EXPORT_SYMBOL_GPL(visorchannel_debug);
+
+void
+visorchannel_dump_section(VISORCHANNEL *chan, char *s,
+ int off, int len, struct seq_file *seq)
+{
+ char *buf, *tbuf, *fmtbuf;
+ int fmtbufsize = 0;
+ int i;
+ int errcode = 0;
+
+ fmtbufsize = 100 * COVQ(len, 16);
+ buf = kmalloc(len, GFP_KERNEL|__GFP_NORETRY);
+ fmtbuf = kmalloc(fmtbufsize, GFP_KERNEL|__GFP_NORETRY);
+ if (buf == NULL || fmtbuf == NULL)
+ goto Away;
+
+ errcode = visorchannel_read(chan, off, buf, len);
+ if (errcode < 0) {
+ ERRDRV("%s failed to read %s from channel errcode=%d",
+ s, __func__, errcode);
+ goto Away;
+ }
+ seq_printf(seq, "channel %s:\n", s);
+ tbuf = buf;
+ while (len > 0) {
+ i = (len < 16) ? len : 16;
+ hex_dump_to_buffer(tbuf, i, 16, 1, fmtbuf, fmtbufsize, TRUE);
+ seq_printf(seq, "%s\n", fmtbuf);
+ tbuf += 16;
+ len -= 16;
+ }
+
+Away:
+ if (buf != NULL) {
+ kfree(buf);
+ buf = NULL;
+ }
+ if (fmtbuf != NULL) {
+ kfree(fmtbuf);
+ fmtbuf = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(visorchannel_dump_section);
diff --git a/drivers/staging/unisys/visorchannel/visorchannel_main.c b/drivers/staging/unisys/visorchannel/visorchannel_main.c
new file mode 100644
index 000000000000..482ee0ac1c16
--- /dev/null
+++ b/drivers/staging/unisys/visorchannel/visorchannel_main.c
@@ -0,0 +1,49 @@
+/* visorchannel_main.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * This is a module "wrapper" around visorchannel_funcs.
+ */
+
+#include "globals.h"
+#include "channel.h"
+#include "visorchannel.h"
+#include "guidutils.h"
+
+#define MYDRVNAME "visorchannel"
+
+static int __init
+visorchannel_init(void)
+{
+ INFODRV("driver version %s loaded", VERSION);
+ return 0;
+}
+
+static void
+visorchannel_exit(void)
+{
+ INFODRV("driver unloaded");
+}
+
+module_init(visorchannel_init);
+module_exit(visorchannel_exit);
+
+MODULE_AUTHOR("Unisys");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Supervisor channel driver for service partition: ver "
+ VERSION);
+MODULE_VERSION(VERSION);
diff --git a/drivers/staging/unisys/visorchipset/Kconfig b/drivers/staging/unisys/visorchipset/Kconfig
new file mode 100644
index 000000000000..7ca2fbca9d57
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys visorchipset configuration
+#
+
+config UNISYS_VISORCHIPSET
+ tristate "Unisys visorchipset driver"
+ depends on UNISYSSPAR && UNISYS_VISORUTIL && UNISYS_VISORCHANNEL
+ ---help---
+ If you say Y here, you will enable the Unisys visorchipset driver.
+
diff --git a/drivers/staging/unisys/visorchipset/Makefile b/drivers/staging/unisys/visorchipset/Makefile
new file mode 100644
index 000000000000..f5e8650e1b0e
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for Unisys visorchipset
+#
+
+obj-$(CONFIG_UNISYS_VISORCHIPSET) += visorchipset.o
+
+visorchipset-y := visorchipset_main.o controlvm_direct.o file.o filexfer.o \
+ parser.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/uislib
+ccflags-y += -Idrivers/staging/unisys/visorchannel
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
+ccflags-y += -Idrivers/staging/unisys/visorutil
+ccflags-y += -Iinclude/generated
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
+
diff --git a/drivers/staging/unisys/visorchipset/controlvm.h b/drivers/staging/unisys/visorchipset/controlvm.h
new file mode 100644
index 000000000000..873fa12dfe6f
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/controlvm.h
@@ -0,0 +1,27 @@
+/* controlvm.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __CONTROLVM_H__
+#define __CONTROLVM_H__
+
+#include "timskmod.h"
+
+int controlvm_init(void);
+void controlvm_deinit(void);
+HOSTADDRESS controlvm_get_channel_address(void);
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/controlvm_direct.c b/drivers/staging/unisys/visorchipset/controlvm_direct.c
new file mode 100644
index 000000000000..b911ea85c093
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/controlvm_direct.c
@@ -0,0 +1,62 @@
+/* controlvm_direct.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* This is a controlvm-related code that is dependent upon firmware running
+ * on a virtual partition.
+ */
+
+#include "globals.h"
+#include "uisutils.h"
+#include "controlvm.h"
+#define CURRENT_FILE_PC VISOR_CHIPSET_PC_controlvm_direct_c
+
+
+/* We can fill in this code when we learn how to make vmcalls... */
+
+
+
+int controlvm_init(void)
+{
+ return 0;
+}
+
+
+
+void controlvm_deinit(void)
+{
+}
+
+
+
+HOSTADDRESS controlvm_get_channel_address(void)
+{
+ static BOOL warned = FALSE;
+ U64 addr = 0;
+
+ U32 size = 0;
+
+ if (!VMCALL_SUCCESSFUL(Issue_VMCALL_IO_CONTROLVM_ADDR(&addr, &size))) {
+ if (!warned) {
+ ERRDRV("%s - vmcall to determine controlvm channel addr failed",
+ __func__);
+ warned = TRUE;
+ }
+ return 0;
+ }
+ INFODRV("controlvm addr=%Lx", addr);
+ return addr;
+}
diff --git a/drivers/staging/unisys/visorchipset/file.c b/drivers/staging/unisys/visorchipset/file.c
new file mode 100644
index 000000000000..e214a1153282
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/file.c
@@ -0,0 +1,226 @@
+/* file.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* This contains the implementation that allows a usermode program to
+ * communicate with the visorchipset driver using a device/file interface.
+ */
+
+#include "globals.h"
+#include "visorchannel.h"
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include "uisutils.h"
+#include "file.h"
+
+#define CURRENT_FILE_PC VISOR_CHIPSET_PC_file_c
+
+static struct cdev Cdev;
+static VISORCHANNEL **PControlVm_channel;
+static dev_t MajorDev = -1; /**< indicates major num for device */
+static BOOL Registered = FALSE;
+
+static int visorchipset_open(struct inode *inode, struct file *file);
+static int visorchipset_release(struct inode *inode, struct file *file);
+static int visorchipset_mmap(struct file *file, struct vm_area_struct *vma);
+#ifdef HAVE_UNLOCKED_IOCTL
+long visorchipset_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#else
+int visorchipset_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+#endif
+
+static const struct file_operations visorchipset_fops = {
+ .owner = THIS_MODULE,
+ .open = visorchipset_open,
+ .read = NULL,
+ .write = NULL,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = visorchipset_ioctl,
+#else
+ .ioctl = visorchipset_ioctl,
+#endif
+ .release = visorchipset_release,
+ .mmap = visorchipset_mmap,
+};
+
+int
+visorchipset_file_init(dev_t majorDev, VISORCHANNEL **pControlVm_channel)
+{
+ int rc = -1;
+
+ PControlVm_channel = pControlVm_channel;
+ MajorDev = majorDev;
+ cdev_init(&Cdev, &visorchipset_fops);
+ Cdev.owner = THIS_MODULE;
+ if (MAJOR(MajorDev) == 0) {
+ /* dynamic major device number registration required */
+ if (alloc_chrdev_region(&MajorDev, 0, 1, MYDRVNAME) < 0) {
+ ERRDRV("Unable to allocate+register char device %s",
+ MYDRVNAME);
+ goto Away;
+ }
+ Registered = TRUE;
+ INFODRV("New major number %d registered\n", MAJOR(MajorDev));
+ } else {
+ /* static major device number registration required */
+ if (register_chrdev_region(MajorDev, 1, MYDRVNAME) < 0) {
+ ERRDRV("Unable to register char device %s", MYDRVNAME);
+ goto Away;
+ }
+ Registered = TRUE;
+ INFODRV("Static major number %d registered\n", MAJOR(MajorDev));
+ }
+ if (cdev_add(&Cdev, MKDEV(MAJOR(MajorDev), 0), 1) < 0) {
+ ERRDRV("failed to create char device: (status=%d)\n", rc);
+ goto Away;
+ }
+ INFODRV("Registered char device for %s (major=%d)",
+ MYDRVNAME, MAJOR(MajorDev));
+ rc = 0;
+Away:
+ return rc;
+}
+
+void
+visorchipset_file_cleanup(void)
+{
+ if (Cdev.ops != NULL)
+ cdev_del(&Cdev);
+ Cdev.ops = NULL;
+ if (Registered) {
+ if (MAJOR(MajorDev) >= 0) {
+ unregister_chrdev_region(MajorDev, 1);
+ MajorDev = MKDEV(0, 0);
+ }
+ Registered = FALSE;
+ }
+}
+
+static int
+visorchipset_open(struct inode *inode, struct file *file)
+{
+ unsigned minor_number = iminor(inode);
+ int rc = -ENODEV;
+
+ DEBUGDRV("%s", __func__);
+ if (minor_number != 0)
+ goto Away;
+ file->private_data = NULL;
+ rc = 0;
+Away:
+ if (rc < 0)
+ ERRDRV("%s minor=%d failed", __func__, minor_number);
+ return rc;
+}
+
+static int
+visorchipset_release(struct inode *inode, struct file *file)
+{
+ DEBUGDRV("%s", __func__);
+ return 0;
+}
+
+static int
+visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ ulong physAddr = 0;
+ ulong offset = vma->vm_pgoff << PAGE_SHIFT;
+ GUEST_PHYSICAL_ADDRESS addr = 0;
+
+ /* sv_enable_dfp(); */
+ DEBUGDRV("%s", __func__);
+ if (offset & (PAGE_SIZE - 1)) {
+ ERRDRV("%s virtual address NOT page-aligned!", __func__);
+ return -ENXIO; /* need aligned offsets */
+ }
+ switch (offset) {
+ case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
+ vma->vm_flags |= VM_IO;
+ if (*PControlVm_channel == NULL) {
+ ERRDRV("%s no controlvm channel yet", __func__);
+ return -ENXIO;
+ }
+ visorchannel_read(*PControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ gpControlChannel), &addr,
+ sizeof(addr));
+ if (addr == 0) {
+ ERRDRV("%s control channel address is 0", __func__);
+ return -ENXIO;
+ }
+ physAddr = (ulong) (addr);
+ DEBUGDRV("mapping physical address = 0x%lx", physAddr);
+ if (remap_pfn_range(vma, vma->vm_start,
+ physAddr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ /*pgprot_noncached */
+ (vma->vm_page_prot))) {
+ ERRDRV("%s remap_pfn_range failed", __func__);
+ return -EAGAIN;
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+ DEBUGDRV("%s success!", __func__);
+ return 0;
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+long
+visorchipset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+#else
+int
+visorchipset_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+#endif
+{
+ int rc = SUCCESS;
+ S64 adjustment;
+ S64 vrtc_offset;
+ DBGINF("entered visorchipset_ioctl, cmd=%d", cmd);
+ switch (cmd) {
+ case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
+ /* get the physical rtc offset */
+ vrtc_offset = Issue_VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET();
+ if (copy_to_user
+ ((void __user *)arg, &vrtc_offset, sizeof(vrtc_offset))) {
+ rc = -EFAULT;
+ goto Away;
+ }
+ DBGINF("insde visorchipset_ioctl, cmd=%d, vrtc_offset=%lld",
+ cmd, vrtc_offset);
+ break;
+ case VMCALL_UPDATE_PHYSICAL_TIME:
+ if (copy_from_user
+ (&adjustment, (void __user *)arg, sizeof(adjustment))) {
+ rc = -EFAULT;
+ goto Away;
+ }
+ DBGINF("insde visorchipset_ioctl, cmd=%d, adjustment=%lld", cmd,
+ adjustment);
+ rc = Issue_VMCALL_UPDATE_PHYSICAL_TIME(adjustment);
+ break;
+ default:
+ LOGERR("visorchipset_ioctl received invalid command");
+ rc = -EFAULT;
+ break;
+ }
+Away:
+ DBGINF("exiting %d!", rc);
+ return rc;
+}
diff --git a/drivers/staging/unisys/visorchipset/file.h b/drivers/staging/unisys/visorchipset/file.h
new file mode 100644
index 000000000000..597282aa9a06
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/file.h
@@ -0,0 +1,26 @@
+/* file.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __FILE_H__
+#define __FILE_H__
+
+#include "globals.h"
+
+int visorchipset_file_init(dev_t majorDev, VISORCHANNEL **pControlVm_channel);
+void visorchipset_file_cleanup(void);
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/filexfer.c b/drivers/staging/unisys/visorchipset/filexfer.c
new file mode 100644
index 000000000000..f950d6e85b5f
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/filexfer.c
@@ -0,0 +1,506 @@
+/* filexfer.c
+ *
+ * Copyright © 2013 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* Code here-in is the "glue" that connects controlvm messages with the
+ * sparfilexfer driver, which is used to transfer file contents as payload
+ * across the controlvm channel.
+ */
+
+#include "globals.h"
+#include "controlvm.h"
+#include "visorchipset.h"
+#include "filexfer.h"
+
+#ifdef ENABLE_SPARFILEXFER /* sparfilexfer kernel module enabled in build */
+#include "sparfilexfer.h"
+
+/* Driver-global memory */
+static LIST_HEAD(Request_list); /* list of struct any_request *, via
+ * req_list memb */
+
+/* lock for above pool for allocation of any_request structs, and pool
+* name; note that kmem_cache_create requires that we keep the storage
+* for the pool name for the life of the pool
+ */
+static DEFINE_SPINLOCK(Request_list_lock);
+
+static struct kmem_cache *Request_memory_pool;
+static const char Request_memory_pool_name[] = "filexfer_request_pool";
+size_t Caller_req_context_bytes = 0; /* passed to filexfer_constructor() */
+
+/* This structure defines a single controlvm GETFILE conversation, which
+ * consists of a single controlvm request message and 1 or more controlvm
+ * response messages.
+ */
+struct getfile_request {
+ CONTROLVM_MESSAGE_HEADER controlvm_header;
+ atomic_t buffers_in_use;
+ GET_CONTIGUOUS_CONTROLVM_PAYLOAD_FUNC get_contiguous_controlvm_payload;
+ CONTROLVM_RESPOND_WITH_PAYLOAD_FUNC controlvm_respond_with_payload;
+};
+
+/* This structure defines a single controlvm PUTFILE conversation, which
+ * consists of a single controlvm request with a filename, and additional
+ * controlvm messages with file data.
+ */
+struct putfile_request {
+ GET_CONTROLVM_FILEDATA_FUNC get_controlvm_filedata;
+ CONTROLVM_RESPOND_FUNC controlvm_end_putFile;
+};
+
+/* This structure defines a single file transfer operation, which can either
+ * be a GETFILE or PUTFILE.
+ */
+struct any_request {
+ struct list_head req_list;
+ ulong2 file_request_number;
+ ulong2 data_sequence_number;
+ TRANSMITFILE_DUMP_FUNC dump_func;
+ BOOL is_get;
+ union {
+ struct getfile_request get;
+ struct putfile_request put;
+ };
+ /* Size of caller_context_data will be
+ * <Caller_req_context_bytes> bytes. I aligned this because I
+ * am paranoid about what happens when an arbitrary data
+ * structure with unknown alignment requirements gets copied
+ * here. I want caller_context_data to be aligned to the
+ * coarsest possible alignment boundary that could be required
+ * for any user data structure.
+ */
+ u8 caller_context_data[1] __aligned(sizeof(ulong2));
+};
+
+/*
+ * Links the any_request into the global list of allocated requests
+ * (<Request_list>).
+ */
+static void
+unit_tracking_create(struct list_head *dev_list_link)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&Request_list_lock, flags);
+ list_add(dev_list_link, &Request_list);
+ spin_unlock_irqrestore(&Request_list_lock, flags);
+}
+
+/* Unlinks a any_request from the global list (<Request_list>).
+ */
+static void
+unit_tracking_destroy(struct list_head *dev_list_link)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&Request_list_lock, flags);
+ list_del(dev_list_link);
+ spin_unlock_irqrestore(&Request_list_lock, flags);
+}
+
+/* Allocate memory for and return a new any_request struct, and
+ * link it to the global list of outstanding requests.
+ */
+static struct any_request *
+alloc_request(char *fn, int ln)
+{
+ struct any_request *req = (struct any_request *)
+ (visorchipset_cache_alloc(Request_memory_pool,
+ FALSE,
+ fn, ln));
+ if (!req)
+ return NULL;
+ memset(req, 0, sizeof(struct any_request) + Caller_req_context_bytes);
+ unit_tracking_create(&req->req_list);
+ return req;
+}
+
+/* Book-end for alloc_request().
+ */
+static void
+free_request(struct any_request *req, char *fn, int ln)
+{
+ unit_tracking_destroy(&req->req_list);
+ visorchipset_cache_free(Request_memory_pool, req, fn, ln);
+}
+
+/* Constructor for filexfer.o.
+ */
+int
+filexfer_constructor(size_t req_context_bytes)
+{
+ int rc = -1;
+
+ Caller_req_context_bytes = req_context_bytes;
+ Request_memory_pool =
+ kmem_cache_create(Request_memory_pool_name,
+ sizeof(struct any_request) +
+ Caller_req_context_bytes,
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!Request_memory_pool) {
+ LOGERR("failed to alloc Request_memory_pool");
+ rc = -ENOMEM;
+ goto Away;
+ }
+ rc = 0;
+Away:
+ if (rc < 0) {
+ if (Request_memory_pool) {
+ kmem_cache_destroy(Request_memory_pool);
+ Request_memory_pool = NULL;
+ }
+ }
+ return rc;
+}
+
+/* Destructor for filexfer.o.
+ */
+void
+filexfer_destructor(void)
+{
+ if (Request_memory_pool) {
+ kmem_cache_destroy(Request_memory_pool);
+ Request_memory_pool = NULL;
+ }
+}
+
+/* This function will obtain an available chunk from the controlvm payload area,
+ * store the size in bytes of the chunk in <actual_size>, and return a pointer
+ * to the chunk. The function is passed to the sparfilexfer driver, which calls
+ * it whenever payload space is required to copy file data into.
+ */
+static void *
+get_empty_bucket_for_getfile_data(void *context,
+ ulong min_size, ulong max_size,
+ ulong *actual_size)
+{
+ void *bucket;
+ struct any_request *req = (struct any_request *) context;
+
+ if (!req->is_get) {
+ LOGERR("%s - unexpected call", __func__);
+ return NULL;
+ }
+ bucket = (*req->get.get_contiguous_controlvm_payload)
+ (min_size, max_size, actual_size);
+ if (bucket != NULL) {
+ atomic_inc(&req->get.buffers_in_use);
+ DBGINF("%s - sent %lu-byte buffer", __func__, *actual_size);
+ }
+ return bucket;
+}
+
+/* This function will send a controlvm response with data in the payload
+ * (whose space was obtained with get_empty_bucket_for_getfile_data). The
+ * function is passed to the sparfilexfer driver, which calls it whenever it
+ * wants to send file data back across the controlvm channel.
+ */
+static int
+send_full_getfile_data_bucket(void *context, void *bucket,
+ ulong bucket_actual_size, ulong bucket_used_size)
+{
+ struct any_request *req = (struct any_request *) context;
+
+ if (!req->is_get) {
+ LOGERR("%s - unexpected call", __func__);
+ return 0;
+ }
+ DBGINF("sending buffer for %lu/%lu",
+ bucket_used_size, bucket_actual_size);
+ if (!(*req->get.controlvm_respond_with_payload)
+ (&req->get.controlvm_header,
+ req->file_request_number,
+ req->data_sequence_number++,
+ 0, bucket, bucket_actual_size, bucket_used_size, TRUE))
+ atomic_dec(&req->get.buffers_in_use);
+ return 0;
+}
+
+/* This function will send a controlvm response indicating the end of a
+ * GETFILE transfer. The function is passed to the sparfilexfer driver.
+ */
+static void
+send_end_of_getfile_data(void *context, int status)
+{
+ struct any_request *req = (struct any_request *) context;
+ if (!req->is_get) {
+ LOGERR("%s - unexpected call", __func__);
+ return;
+ }
+ LOGINF("status=%d", status);
+ (*req->get.controlvm_respond_with_payload)
+ (&req->get.controlvm_header,
+ req->file_request_number,
+ req->data_sequence_number++, status, NULL, 0, 0, FALSE);
+ free_request(req, __FILE__, __LINE__);
+ module_put(THIS_MODULE);
+}
+
+/* This function supplies data for a PUTFILE transfer.
+ * The function is passed to the sparfilexfer driver.
+ */
+static int
+get_putfile_data(void *context, void *pbuf, size_t bufsize,
+ BOOL buf_is_userspace, size_t *bytes_transferred)
+{
+ struct any_request *req = (struct any_request *) context;
+ if (req->is_get) {
+ LOGERR("%s - unexpected call", __func__);
+ return -1;
+ }
+ return (*req->put.get_controlvm_filedata) (&req->caller_context_data[0],
+ pbuf, bufsize,
+ buf_is_userspace,
+ bytes_transferred);
+}
+
+/* This function is called to indicate the end of a PUTFILE transfer.
+ * The function is passed to the sparfilexfer driver.
+ */
+static void
+end_putfile(void *context, int status)
+{
+ struct any_request *req = (struct any_request *) context;
+ if (req->is_get) {
+ LOGERR("%s - unexpected call", __func__);
+ return;
+ }
+ (*req->put.controlvm_end_putFile) (&req->caller_context_data[0],
+ status);
+ free_request(req, __FILE__, __LINE__);
+ module_put(THIS_MODULE);
+}
+
+/* Refer to filexfer.h for description. */
+BOOL
+filexfer_getFile(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ ulong2 file_request_number,
+ uint uplink_index,
+ uint disk_index,
+ char *file_name,
+ GET_CONTIGUOUS_CONTROLVM_PAYLOAD_FUNC
+ get_contiguous_controlvm_payload,
+ CONTROLVM_RESPOND_WITH_PAYLOAD_FUNC
+ controlvm_respond_with_payload,
+ TRANSMITFILE_DUMP_FUNC dump_func)
+{
+ BOOL use_count_up = FALSE;
+ BOOL failed = TRUE;
+ struct any_request *req = alloc_request(__FILE__, __LINE__);
+
+ if (!req) {
+ LOGERR("allocation of any_request failed");
+ goto Away;
+ }
+ /* We need to increment this module's use count because we're handing
+ * off pointers to functions within this module to be used by
+ * another module.
+ */
+ __module_get(THIS_MODULE);
+ use_count_up = TRUE;
+ req->is_get = TRUE;
+ req->file_request_number = file_request_number;
+ req->data_sequence_number = 0;
+ req->dump_func = dump_func;
+ req->get.controlvm_header = *msgHdr;
+ atomic_set(&req->get.buffers_in_use, 0);
+ req->get.get_contiguous_controlvm_payload =
+ get_contiguous_controlvm_payload;
+ req->get.controlvm_respond_with_payload =
+ controlvm_respond_with_payload;
+ if (sparfilexfer_local2remote(req, /* context, passed to
+ * callback funcs */
+ file_name,
+ file_request_number,
+ uplink_index,
+ disk_index,
+ get_empty_bucket_for_getfile_data,
+ send_full_getfile_data_bucket,
+ send_end_of_getfile_data) < 0) {
+ LOGERR("sparfilexfer_local2remote failed");
+ goto Away;
+ }
+ failed = FALSE;
+Away:
+ if (failed) {
+ if (use_count_up) {
+ module_put(THIS_MODULE);
+ use_count_up = FALSE;
+ }
+ if (req) {
+ free_request(req, __FILE__, __LINE__);
+ req = NULL;
+ }
+ return FALSE;
+ } else {
+ return TRUE;
+ /* success; send callbacks will be called for responses */
+ }
+}
+
+/* Refer to filexfer.h for description. */
+void *
+filexfer_putFile(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ ulong2 file_request_number,
+ uint uplink_index,
+ uint disk_index,
+ char *file_name,
+ TRANSMITFILE_INIT_CONTEXT_FUNC init_context,
+ GET_CONTROLVM_FILEDATA_FUNC get_controlvm_filedata,
+ CONTROLVM_RESPOND_FUNC controlvm_end_putFile,
+ TRANSMITFILE_DUMP_FUNC dump_func)
+{
+ BOOL use_count_up = FALSE;
+ BOOL failed = TRUE;
+ struct any_request *req = alloc_request(__FILE__, __LINE__);
+ void *caller_ctx = NULL;
+
+ if (!req) {
+ LOGERR("allocation of any_request failed");
+ goto Away;
+ }
+ caller_ctx = (void *) (&(req->caller_context_data[0]));
+ /* We need to increment this module's use count because we're handing
+ * off pointers to functions within this module to be used by
+ * another module.
+ */
+ __module_get(THIS_MODULE);
+ use_count_up = TRUE;
+ req->is_get = FALSE;
+ req->file_request_number = file_request_number;
+ req->data_sequence_number = 0;
+ req->dump_func = dump_func;
+ req->put.get_controlvm_filedata = get_controlvm_filedata;
+ req->put.controlvm_end_putFile = controlvm_end_putFile;
+ (*init_context) (caller_ctx, msgHdr, file_request_number);
+ if (sparfilexfer_remote2local(req, /* context, passed to
+ * callback funcs */
+ file_name,
+ file_request_number,
+ uplink_index,
+ disk_index,
+ get_putfile_data, end_putfile) < 0) {
+ LOGERR("sparfilexfer_remote2local failed");
+ goto Away;
+ }
+ failed = FALSE;
+Away:
+ if (failed) {
+ if (use_count_up) {
+ module_put(THIS_MODULE);
+ use_count_up = FALSE;
+ }
+ if (req) {
+ free_request(req, __FILE__, __LINE__);
+ req = NULL;
+ }
+ return NULL;
+ } else {
+ return caller_ctx;
+ /* success; callbacks will be called for responses */
+ }
+}
+
+static void
+dump_get_request(struct seq_file *f, struct getfile_request *getreq)
+{
+ seq_printf(f, " buffers_in_use=%d\n",
+ atomic_read(&getreq->buffers_in_use));
+}
+
+static void
+dump_put_request(struct seq_file *f, struct putfile_request *putreq)
+{
+}
+
+static void
+dump_request(struct seq_file *f, struct any_request *req)
+{
+ seq_printf(f, "* %s id=%llu seq=%llu\n",
+ ((req->is_get) ? "Get" : "Put"),
+ req->file_request_number, req->data_sequence_number);
+ if (req->is_get)
+ dump_get_request(f, &req->get);
+ else
+ dump_put_request(f, &req->put);
+ if (req->dump_func)
+ (*req->dump_func) (f, &(req->caller_context_data[0]), " ");
+}
+
+void
+filexfer_dump(struct seq_file *f)
+{
+ ulong flags;
+ struct list_head *entry;
+
+ seq_puts(f, "Outstanding TRANSMIT_FILE requests:\n");
+ spin_lock_irqsave(&Request_list_lock, flags);
+ list_for_each(entry, &Request_list) {
+ struct any_request *req;
+ req = list_entry(entry, struct any_request, req_list);
+ dump_request(f, req);
+ }
+ spin_unlock_irqrestore(&Request_list_lock, flags);
+}
+
+#else /* ifdef ENABLE_SPARFILEXFER */
+int
+filexfer_constructor(size_t req_context_bytes)
+{
+ return 0; /* success */
+}
+
+void
+filexfer_destructor(void)
+{
+}
+
+BOOL
+filexfer_getFile(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ u64 file_request_number,
+ uint uplink_index,
+ uint disk_index,
+ char *file_name,
+ GET_CONTIGUOUS_CONTROLVM_PAYLOAD_FUNC
+ get_contiguous_controlvm_payload,
+ CONTROLVM_RESPOND_WITH_PAYLOAD_FUNC
+ controlvm_respond_with_payload,
+ TRANSMITFILE_DUMP_FUNC dump_func)
+{
+ /* since no sparfilexfer module exists to call, we just fail */
+ return FALSE;
+}
+
+void *
+filexfer_putFile(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ u64 file_request_number,
+ uint uplink_index,
+ uint disk_index,
+ char *file_name,
+ TRANSMITFILE_INIT_CONTEXT_FUNC init_context,
+ GET_CONTROLVM_FILEDATA_FUNC get_controlvm_filedata,
+ CONTROLVM_RESPOND_FUNC controlvm_end_putFile,
+ TRANSMITFILE_DUMP_FUNC dump_func)
+{
+ /* since no sparfilexfer module exists to call, we just fail */
+ return NULL;
+}
+
+void
+filexfer_dump(struct seq_file *f)
+{
+}
+
+#endif /* ifdef ENABLE_SPARFILEXFER */
diff --git a/drivers/staging/unisys/visorchipset/filexfer.h b/drivers/staging/unisys/visorchipset/filexfer.h
new file mode 100644
index 000000000000..a1bfca69cdcf
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/filexfer.h
@@ -0,0 +1,147 @@
+/* filexfer.h
+ *
+ * Copyright © 2013 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* This header file defines the interface that filexfer.c provides to other
+ * code in the visorchipset driver.
+ */
+
+#ifndef __FILEXFER_H__
+#define __FILEXFER_H__
+
+#include "globals.h"
+#include "controlvmchannel.h"
+#include <linux/seq_file.h>
+
+typedef void *(*GET_CONTIGUOUS_CONTROLVM_PAYLOAD_FUNC) (ulong min_size,
+ ulong max_size,
+ ulong *actual_size);
+
+typedef BOOL
+(*CONTROLVM_RESPOND_WITH_PAYLOAD_FUNC) (CONTROLVM_MESSAGE_HEADER *msgHdr,
+ u64 fileRequestNumber,
+ u64 dataSequenceNumber,
+ int response,
+ void *bucket, ulong payloadChunkSize,
+ ulong payloadUsedBytes, BOOL partial);
+
+typedef void
+(*TRANSMITFILE_INIT_CONTEXT_FUNC)(void *ctx,
+ const CONTROLVM_MESSAGE_HEADER *hdr,
+ u64 file_request_number);
+typedef void (*TRANSMITFILE_DUMP_FUNC) (struct seq_file *f, void *ctx,
+ const char *pfx);
+typedef int (*GET_CONTROLVM_FILEDATA_FUNC) (void *ctx,
+ void *buf, size_t bufsize,
+ BOOL buf_is_userspace,
+ size_t *bytes_transferred);
+typedef void (*CONTROLVM_RESPOND_FUNC) (void *ctx, int response);
+
+/* Call once to initialize filexfer.o.
+ * req_context_bytes number of bytes the caller needs to keep track of each file
+ * transfer conversation. The <ctx_init_value> passed to filexfer_putFile() is
+ * assumed to be this many bytes in size. Code within filexfer.o will copy this
+ * into a dynamically-allocated area, and pass back a pointer to that area in
+ * callback functions.
+ */
+int filexfer_constructor(size_t req_context_bytes);
+
+/* Call once to clean up filexfer.o */
+void filexfer_destructor(void);
+
+/* Call this to dump diagnostic info about all outstanding getFiles/putFiles */
+void filexfer_dump(struct seq_file *f);
+
+/* Call to transfer a file from the local filesystem (i.e., from the environment
+ * where this driver is running) across the controlvm channel to a remote
+ * environment. 1 or more controlvm responses will be sent as a result, each
+ * of which whose payload contains file data. Only the last controlvm message
+ * will have Flags.partialCompletion==0.
+ *
+ * msgHdr the controlvm message header of the GETFILE request which
+ * we just received
+ * file_request_number this is all data from the GETFILE request that
+ * uplink_index define which file is to be transferred
+ * disk_index
+ * file_name
+ * get_contiguous_controlvm_payload function to call when space is needed
+ * in the payload area
+ * controlvm_respond_with_payload function to call to send each controlvm
+ * response containing file data as the
+ * payload; returns FALSE only if the
+ * payload buffer was freed inline
+ * dump_func function to dump context data in
+ * human-readable format
+ *
+ * Returns TRUE iff the file transfer request has been successfully initiated,
+ * or FALSE to indicate failure.
+ */
+BOOL
+filexfer_getFile(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ u64 file_request_number,
+ uint uplink_index,
+ uint disk_index,
+ char *file_name,
+ GET_CONTIGUOUS_CONTROLVM_PAYLOAD_FUNC
+ get_contiguous_controlvm_payload,
+ CONTROLVM_RESPOND_WITH_PAYLOAD_FUNC
+ controlvm_respond_with_payload,
+ TRANSMITFILE_DUMP_FUNC dump_func);
+
+/* Call to create a file in the local filesystem (i.e., in the environment
+ * where this driver is running) from data received as payload in
+ * controlvm channel messages from a remote environment. 1 or more controlvm
+ * messages will be received for this transfer, and only the last will have
+ * Flags.partialCompletion==0.
+ *
+ * msgHdr the controlvm message header of the PUTFILE request which
+ * we just received
+ * file_request_number this is all data from the PUTFILE request that
+ * uplink_index define which file is to be created in the local
+ * disk_index filesystem
+ * file_name
+ * init_context function to call to initialize the
+ * <req_context_bytes>-sized storage area returned by
+ * this func; note that it would NOT be sufficient to
+ * allow the caller to initialize this upon return, as
+ * the the other user-supplied callbacks might have
+ * already been called by then
+ * get_controlvm_filedata function to call to obtain more data for the file
+ * being written; refer to get_controlvm_filedata()
+ * in visorchipset_main.c for a complete description
+ * of parameters
+ * controlvm_end_putFile function to call to indicate that creation of the
+ * local file has completed; set <response> to a
+ * negative value to indicate an error
+ * dump_func function to dump context data in human-readable
+ * format
+ *
+ * Returns a pointer to a dynamically-allocated storage area of size
+ * <req_context_bytes> which the caller can use, or NULL for error. The
+ * caller should NEVER free the returned pointer, but should expect to receive
+ * it as the <ctx> argument when callback functions are called.
+ */
+void *filexfer_putFile(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ u64 file_request_number,
+ uint uplink_index,
+ uint disk_index,
+ char *file_name,
+ TRANSMITFILE_INIT_CONTEXT_FUNC init_context,
+ GET_CONTROLVM_FILEDATA_FUNC get_controlvm_filedata,
+ CONTROLVM_RESPOND_FUNC controlvm_end_putFile,
+ TRANSMITFILE_DUMP_FUNC dump_func);
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/globals.h b/drivers/staging/unisys/visorchipset/globals.h
new file mode 100644
index 000000000000..a0e6d4fc03ae
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/globals.h
@@ -0,0 +1,45 @@
+/* globals.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+
+#ifndef __VISORCHIPSET_GLOBALS_H__
+#define __VISORCHIPSET_GLOBALS_H__
+
+#include "uniklog.h"
+#include "diagnostics/appos_subsystems.h"
+#include "timskmod.h"
+#include "visorchipset.h"
+#include "visorchipset_umode.h"
+#include "version.h"
+
+#define MYDRVNAME "visorchipset"
+
+
+/* module parameters */
+
+extern int visorchipset_testvnic;
+extern int visorchipset_testvnicclient;
+extern int visorchipset_testmsg;
+extern int visorchipset_major;
+extern int visorchipset_serverregwait;
+extern int visorchipset_clientregwait;
+extern int visorchipset_testteardown;
+extern int visorchipset_disable_controlvm;
+extern int visorchipset_crash_kernel;
+extern int visorchipset_holdchipsetready;
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/parser.c b/drivers/staging/unisys/visorchipset/parser.c
new file mode 100644
index 000000000000..b408d415bd8c
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/parser.c
@@ -0,0 +1,474 @@
+/* parser.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include "parser.h"
+#include "memregion.h"
+#include "controlvmchannel.h"
+#include <linux/ctype.h>
+#include <linux/mm.h>
+
+#define MYDRVNAME "visorchipset_parser"
+#define CURRENT_FILE_PC VISOR_CHIPSET_PC_parser_c
+
+/* We will refuse to allocate more than this many bytes to copy data from
+ * incoming payloads. This serves as a throttling mechanism.
+ */
+#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
+static ulong Controlvm_Payload_Bytes_Buffered;
+
+struct PARSER_CONTEXT_Tag {
+ ulong allocbytes;
+ ulong param_bytes;
+ u8 *curr;
+ ulong bytes_remaining;
+ BOOL byte_stream;
+ char data[0];
+};
+
+static PARSER_CONTEXT *
+parser_init_guts(U64 addr, U32 bytes, BOOL isLocal,
+ BOOL hasStandardPayloadHeader, BOOL *tryAgain)
+{
+ int allocbytes = sizeof(PARSER_CONTEXT) + bytes;
+ PARSER_CONTEXT *rc = NULL;
+ PARSER_CONTEXT *ctx = NULL;
+ MEMREGION *rgn = NULL;
+ ULTRA_CONTROLVM_PARAMETERS_HEADER *phdr = NULL;
+
+ if (tryAgain)
+ *tryAgain = FALSE;
+ if (!hasStandardPayloadHeader)
+ /* alloc and 0 extra byte to ensure payload is
+ * '\0'-terminated
+ */
+ allocbytes++;
+ if ((Controlvm_Payload_Bytes_Buffered + bytes)
+ > MAX_CONTROLVM_PAYLOAD_BYTES) {
+ ERRDRV("%s (%s:%d) - prevented allocation of %d bytes to prevent exceeding throttling max (%d)",
+ __func__, __FILE__, __LINE__, allocbytes,
+ MAX_CONTROLVM_PAYLOAD_BYTES);
+ if (tryAgain)
+ *tryAgain = TRUE;
+ rc = NULL;
+ goto Away;
+ }
+ ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
+ if (ctx == NULL) {
+ ERRDRV("%s (%s:%d) - failed to allocate %d bytes",
+ __func__, __FILE__, __LINE__, allocbytes);
+ if (tryAgain)
+ *tryAgain = TRUE;
+ rc = NULL;
+ goto Away;
+ }
+
+ ctx->allocbytes = allocbytes;
+ ctx->param_bytes = bytes;
+ ctx->curr = NULL;
+ ctx->bytes_remaining = 0;
+ ctx->byte_stream = FALSE;
+ if (isLocal) {
+ void *p;
+ if (addr > virt_to_phys(high_memory - 1)) {
+ ERRDRV("%s - bad local address (0x%-16.16Lx for %lu)",
+ __func__,
+ (unsigned long long) addr, (ulong) bytes);
+ rc = NULL;
+ goto Away;
+ }
+ p = __va((ulong) (addr));
+ memcpy(ctx->data, p, bytes);
+ } else {
+ rgn = visor_memregion_create(addr, bytes);
+ if (!rgn) {
+ rc = NULL;
+ goto Away;
+ }
+ if (visor_memregion_read(rgn, 0, ctx->data, bytes) < 0) {
+ rc = NULL;
+ goto Away;
+ }
+ }
+ if (!hasStandardPayloadHeader) {
+ ctx->byte_stream = TRUE;
+ rc = ctx;
+ goto Away;
+ }
+ phdr = (ULTRA_CONTROLVM_PARAMETERS_HEADER *) (ctx->data);
+ if (phdr->TotalLength != bytes) {
+ ERRDRV("%s - bad total length %lu (should be %lu)",
+ __func__,
+ (ulong) (phdr->TotalLength), (ulong) (bytes));
+ rc = NULL;
+ goto Away;
+ }
+ if (phdr->TotalLength < phdr->HeaderLength) {
+ ERRDRV("%s - total length < header length (%lu < %lu)",
+ __func__,
+ (ulong) (phdr->TotalLength),
+ (ulong) (phdr->HeaderLength));
+ rc = NULL;
+ goto Away;
+ }
+ if (phdr->HeaderLength < sizeof(ULTRA_CONTROLVM_PARAMETERS_HEADER)) {
+ ERRDRV("%s - header is too small (%lu < %lu)",
+ __func__,
+ (ulong) (phdr->HeaderLength),
+ (ulong) (sizeof(ULTRA_CONTROLVM_PARAMETERS_HEADER)));
+ rc = NULL;
+ goto Away;
+ }
+
+ rc = ctx;
+Away:
+ if (rgn) {
+ visor_memregion_destroy(rgn);
+ rgn = NULL;
+ }
+ if (rc)
+ Controlvm_Payload_Bytes_Buffered += ctx->param_bytes;
+ else {
+ if (ctx) {
+ parser_done(ctx);
+ ctx = NULL;
+ }
+ }
+ return rc;
+}
+
+PARSER_CONTEXT *
+parser_init(U64 addr, U32 bytes, BOOL isLocal, BOOL *tryAgain)
+{
+ return parser_init_guts(addr, bytes, isLocal, TRUE, tryAgain);
+}
+
+/* Call this instead of parser_init() if the payload area consists of just
+ * a sequence of bytes, rather than a ULTRA_CONTROLVM_PARAMETERS_HEADER
+ * structures. Afterwards, you can call parser_simpleString_get() or
+ * parser_byteStream_get() to obtain the data.
+ */
+PARSER_CONTEXT *
+parser_init_byteStream(U64 addr, U32 bytes, BOOL isLocal, BOOL *tryAgain)
+{
+ return parser_init_guts(addr, bytes, isLocal, FALSE, tryAgain);
+}
+
+/* Obtain '\0'-terminated copy of string in payload area.
+ */
+char *
+parser_simpleString_get(PARSER_CONTEXT *ctx)
+{
+ if (!ctx->byte_stream)
+ return NULL;
+ return ctx->data; /* note this IS '\0'-terminated, because of
+ * the num of bytes we alloc+clear in
+ * parser_init_byteStream() */
+}
+
+/* Obtain a copy of the buffer in the payload area.
+ */
+void *
+parser_byteStream_get(PARSER_CONTEXT *ctx, ulong *nbytes)
+{
+ if (!ctx->byte_stream)
+ return NULL;
+ if (nbytes)
+ *nbytes = ctx->param_bytes;
+ return (void *) ctx->data;
+}
+
+GUID
+parser_id_get(PARSER_CONTEXT *ctx)
+{
+ ULTRA_CONTROLVM_PARAMETERS_HEADER *phdr = NULL;
+
+ if (ctx == NULL) {
+ ERRDRV("%s (%s:%d) - no context",
+ __func__, __FILE__, __LINE__);
+ return Guid0;
+ }
+ phdr = (ULTRA_CONTROLVM_PARAMETERS_HEADER *) (ctx->data);
+ return phdr->Id;
+}
+
+void
+parser_param_start(PARSER_CONTEXT *ctx, PARSER_WHICH_STRING which_string)
+{
+ ULTRA_CONTROLVM_PARAMETERS_HEADER *phdr = NULL;
+
+ if (ctx == NULL) {
+ ERRDRV("%s (%s:%d) - no context",
+ __func__, __FILE__, __LINE__);
+ goto Away;
+ }
+ phdr = (ULTRA_CONTROLVM_PARAMETERS_HEADER *) (ctx->data);
+ switch (which_string) {
+ case PARSERSTRING_INITIATOR:
+ ctx->curr = ctx->data + phdr->InitiatorOffset;
+ ctx->bytes_remaining = phdr->InitiatorLength;
+ break;
+ case PARSERSTRING_TARGET:
+ ctx->curr = ctx->data + phdr->TargetOffset;
+ ctx->bytes_remaining = phdr->TargetLength;
+ break;
+ case PARSERSTRING_CONNECTION:
+ ctx->curr = ctx->data + phdr->ConnectionOffset;
+ ctx->bytes_remaining = phdr->ConnectionLength;
+ break;
+ case PARSERSTRING_NAME:
+ ctx->curr = ctx->data + phdr->NameOffset;
+ ctx->bytes_remaining = phdr->NameLength;
+ break;
+ default:
+ ERRDRV("%s - bad which_string %d", __func__, which_string);
+ break;
+ }
+
+Away:
+ return;
+}
+
+void
+parser_done(PARSER_CONTEXT *ctx)
+{
+ if (!ctx)
+ return;
+ Controlvm_Payload_Bytes_Buffered -= ctx->param_bytes;
+ kfree(ctx);
+}
+
+/** Return length of string not counting trailing spaces. */
+static int
+string_length_no_trail(char *s, int len)
+{
+ int i = len - 1;
+ while (i >= 0) {
+ if (!isspace(s[i]))
+ return i + 1;
+ i--;
+ }
+ return 0;
+}
+
+/** Grab the next name and value out of the parameter buffer.
+ * The entire parameter buffer looks like this:
+ * <name>=<value>\0
+ * <name>=<value>\0
+ * ...
+ * \0
+ * If successful, the next <name> value is returned within the supplied
+ * <nam> buffer (the value is always upper-cased), and the corresponding
+ * <value> is returned within a kmalloc()ed buffer, whose pointer is
+ * provided as the return value of this function.
+ * (The total number of bytes allocated is strlen(<value>)+1.)
+ *
+ * NULL is returned to indicate failure, which can occur for several reasons:
+ * - all <name>=<value> pairs have already been processed
+ * - bad parameter
+ * - parameter buffer ends prematurely (couldn't find an '=' or '\0' within
+ * the confines of the parameter buffer)
+ * - the <nam> buffer is not large enough to hold the <name> of the next
+ * parameter
+ */
+void *
+parser_param_get(PARSER_CONTEXT *ctx, char *nam, int namesize)
+{
+ u8 *pscan, *pnam = nam;
+ ulong nscan;
+ int value_length = -1, orig_value_length = -1;
+ void *value = NULL;
+ int i;
+ int closing_quote = 0;
+
+ if (!ctx)
+ return NULL;
+ pscan = ctx->curr;
+ nscan = ctx->bytes_remaining;
+ if (nscan == 0)
+ return NULL;
+ if (*pscan == '\0')
+ /* This is the normal return point after you have processed
+ * all of the <name>=<value> pairs in a syntactically-valid
+ * parameter buffer.
+ */
+ return NULL;
+
+ /* skip whitespace */
+ while (isspace(*pscan)) {
+ pscan++;
+ nscan--;
+ if (nscan == 0)
+ return NULL;
+ }
+
+ while (*pscan != ':') {
+ if (namesize <= 0) {
+ ERRDRV("%s - name too big", __func__);
+ return NULL;
+ }
+ *pnam = toupper(*pscan);
+ pnam++;
+ namesize--;
+ pscan++;
+ nscan--;
+ if (nscan == 0) {
+ ERRDRV("%s - unexpected end of input parsing name",
+ __func__);
+ return NULL;
+ }
+ }
+ if (namesize <= 0) {
+ ERRDRV("%s - name too big", __func__);
+ return NULL;
+ }
+ *pnam = '\0';
+ nam[string_length_no_trail(nam, strlen(nam))] = '\0';
+
+ /* point to char immediately after ":" in "<name>:<value>" */
+ pscan++;
+ nscan--;
+ /* skip whitespace */
+ while (isspace(*pscan)) {
+ pscan++;
+ nscan--;
+ if (nscan == 0) {
+ ERRDRV("%s - unexpected end of input looking for value",
+ __func__);
+ return NULL;
+ }
+ }
+ if (nscan == 0) {
+ ERRDRV("%s - unexpected end of input looking for value",
+ __func__);
+ return NULL;
+ }
+ if (*pscan == '\'' || *pscan == '"') {
+ closing_quote = *pscan;
+ pscan++;
+ nscan--;
+ if (nscan == 0) {
+ ERRDRV("%s - unexpected end of input after %c",
+ __func__, closing_quote);
+ return NULL;
+ }
+ }
+
+ /* look for a separator character, terminator character, or
+ * end of data
+ */
+ for (i = 0, value_length = -1; i < nscan; i++) {
+ if (closing_quote) {
+ if (pscan[i] == '\0') {
+ ERRDRV("%s - unexpected end of input parsing quoted value", __func__);
+ return NULL;
+ }
+ if (pscan[i] == closing_quote) {
+ value_length = i;
+ break;
+ }
+ } else
+ if (pscan[i] == ',' || pscan[i] == ';'
+ || pscan[i] == '\0') {
+ value_length = i;
+ break;
+ }
+ }
+ if (value_length < 0) {
+ if (closing_quote) {
+ ERRDRV("%s - unexpected end of input parsing quoted value", __func__);
+ return NULL;
+ }
+ value_length = nscan;
+ }
+ orig_value_length = value_length;
+ if (closing_quote == 0)
+ value_length = string_length_no_trail(pscan, orig_value_length);
+ value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
+ if (value == NULL)
+ return NULL;
+ memcpy(value, pscan, value_length);
+ ((u8 *) (value))[value_length] = '\0';
+
+ pscan += orig_value_length;
+ nscan -= orig_value_length;
+
+ /* skip past separator or closing quote */
+ if (nscan > 0) {
+ if (*pscan != '\0') {
+ pscan++;
+ nscan--;
+ }
+ }
+
+ if (closing_quote && (nscan > 0)) {
+ /* we still need to skip around the real separator if present */
+ /* first, skip whitespace */
+ while (isspace(*pscan)) {
+ pscan++;
+ nscan--;
+ if (nscan == 0)
+ break;
+ }
+ if (nscan > 0) {
+ if (*pscan == ',' || *pscan == ';') {
+ pscan++;
+ nscan--;
+ } else if (*pscan != '\0') {
+ ERRDRV("%s - missing separator after quoted string", __func__);
+ kfree(value);
+ value = NULL;
+ return NULL;
+ }
+ }
+ }
+ ctx->curr = pscan;
+ ctx->bytes_remaining = nscan;
+ return value;
+}
+
+void *
+parser_string_get(PARSER_CONTEXT *ctx)
+{
+ u8 *pscan;
+ ulong nscan;
+ int value_length = -1;
+ void *value = NULL;
+ int i;
+
+ if (!ctx)
+ return NULL;
+ pscan = ctx->curr;
+ nscan = ctx->bytes_remaining;
+ if (nscan == 0)
+ return NULL;
+ if (!pscan)
+ return NULL;
+ for (i = 0, value_length = -1; i < nscan; i++)
+ if (pscan[i] == '\0') {
+ value_length = i;
+ break;
+ }
+ if (value_length < 0) /* '\0' was not included in the length */
+ value_length = nscan;
+ value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
+ if (value == NULL)
+ return NULL;
+ if (value_length > 0)
+ memcpy(value, pscan, value_length);
+ ((u8 *) (value))[value_length] = '\0';
+ return value;
+}
diff --git a/drivers/staging/unisys/visorchipset/parser.h b/drivers/staging/unisys/visorchipset/parser.h
new file mode 100644
index 000000000000..a0cc50a533cd
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/parser.h
@@ -0,0 +1,45 @@
+/* parser.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __PARSER_H__
+#define __PARSER_H__
+
+#include "uniklog.h"
+#include "timskmod.h"
+#include "channel.h"
+
+typedef enum {
+ PARSERSTRING_INITIATOR,
+ PARSERSTRING_TARGET,
+ PARSERSTRING_CONNECTION,
+ PARSERSTRING_NAME,
+} PARSER_WHICH_STRING;
+
+typedef struct PARSER_CONTEXT_Tag PARSER_CONTEXT;
+
+PARSER_CONTEXT *parser_init(U64 addr, U32 bytes, BOOL isLocal, BOOL *tryAgain);
+PARSER_CONTEXT *parser_init_byteStream(U64 addr, U32 bytes, BOOL isLocal,
+ BOOL *tryAgain);
+void parser_param_start(PARSER_CONTEXT *ctx, PARSER_WHICH_STRING which_string);
+void *parser_param_get(PARSER_CONTEXT *ctx, char *nam, int namesize);
+void *parser_string_get(PARSER_CONTEXT *ctx);
+GUID parser_id_get(PARSER_CONTEXT *ctx);
+char *parser_simpleString_get(PARSER_CONTEXT *ctx);
+void *parser_byteStream_get(PARSER_CONTEXT *ctx, ulong *nbytes);
+void parser_done(PARSER_CONTEXT *ctx);
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/testing.h b/drivers/staging/unisys/visorchipset/testing.h
new file mode 100644
index 000000000000..a44f5556cb21
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/testing.h
@@ -0,0 +1,41 @@
+/* testing.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VISORCHIPSET_TESTING_H__
+#define __VISORCHIPSET_TESTING_H__
+
+#define VISORCHIPSET_TEST_PROC
+#include "globals.h"
+#include "controlvmchannel.h"
+
+void test_produce_test_message(CONTROLVM_MESSAGE *msg, int isLocalTestAddr);
+BOOL test_consume_test_message(CONTROLVM_MESSAGE *msg);
+void test_manufacture_vnic_client_add(void *p);
+void test_manufacture_vnic_client_add_phys(HOSTADDRESS addr);
+void test_manufacture_preamble_messages(void);
+void test_manufacture_device_attach(ulong busNo, ulong devNo);
+void test_manufacture_device_add(ulong busNo, ulong devNo, GUID dataTypeGuid,
+ void *pChannel);
+void test_manufacture_add_bus(ulong busNo, ulong maxDevices,
+ GUID id, u8 *name, BOOL isServer);
+void test_manufacture_device_destroy(ulong busNo, ulong devNo);
+void test_manufacture_bus_destroy(ulong busNo);
+void test_manufacture_detach_externalPort(ulong switchNo, ulong externalPortNo);
+void test_manufacture_detach_internalPort(ulong switchNo, ulong internalPortNo);
+void test_cleanup(void);
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
new file mode 100644
index 000000000000..d4bf203cdfdf
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
@@ -0,0 +1,307 @@
+/* visorchipset.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VISORCHIPSET_H__
+#define __VISORCHIPSET_H__
+
+#include "timskmod.h"
+#include "channel.h"
+#include "controlvmchannel.h"
+#include "parser.h"
+#include "procobjecttree.h"
+#include "vbusdeviceinfo.h"
+#include "vbushelper.h"
+
+/** Describes the state from the perspective of which controlvm messages have
+ * been received for a bus or device.
+ */
+typedef struct {
+ U32 created:1;
+ U32 attached:1;
+ U32 configured:1;
+ U32 running:1;
+ /* Add new fields above. */
+ /* Remaining bits in this 32-bit word are unused. */
+} VISORCHIPSET_STATE;
+
+typedef enum {
+ /** address is guest physical, but outside of the physical memory
+ * region that is controlled by the running OS (this is the normal
+ * address type for Supervisor channels)
+ */
+ ADDRTYPE_localPhysical,
+
+ /** address is guest physical, and withIN the confines of the
+ * physical memory controlled by the running OS.
+ */
+ ADDRTYPE_localTest,
+} VISORCHIPSET_ADDRESSTYPE;
+
+typedef enum {
+ CRASH_dev,
+ CRASH_bus,
+} CRASH_OBJ_TYPE;
+
+/** Attributes for a particular Supervisor channel.
+ */
+typedef struct {
+ VISORCHIPSET_ADDRESSTYPE addrType;
+ HOSTADDRESS channelAddr;
+ struct InterruptInfo intr;
+ U64 nChannelBytes;
+ GUID channelTypeGuid;
+ GUID channelInstGuid;
+
+} VISORCHIPSET_CHANNEL_INFO;
+
+/** Attributes for a particular Supervisor device.
+ * Any visorchipset client can query these attributes using
+ * visorchipset_get_client_device_info() or
+ * visorchipset_get_server_device_info().
+ */
+typedef struct {
+ struct list_head entry;
+ U32 busNo;
+ U32 devNo;
+ GUID devInstGuid;
+ VISORCHIPSET_STATE state;
+ VISORCHIPSET_CHANNEL_INFO chanInfo;
+ U32 Reserved1; /* CONTROLVM_ID */
+ U64 Reserved2;
+ U32 switchNo; /* when devState.attached==1 */
+ U32 internalPortNo; /* when devState.attached==1 */
+ CONTROLVM_MESSAGE_HEADER pendingMsgHdr; /* CONTROLVM_MESSAGE */
+ /** For private use by the bus driver */
+ void *bus_driver_context;
+
+} VISORCHIPSET_DEVICE_INFO;
+
+static inline VISORCHIPSET_DEVICE_INFO *
+finddevice(struct list_head *list, U32 busNo, U32 devNo)
+{
+ VISORCHIPSET_DEVICE_INFO *p;
+
+ list_for_each_entry(p, list, entry) {
+ if (p->busNo == busNo && p->devNo == devNo)
+ return p;
+ }
+ return NULL;
+}
+
+static inline void delbusdevices(struct list_head *list, U32 busNo)
+{
+ VISORCHIPSET_DEVICE_INFO *p;
+
+ list_for_each_entry(p, list, entry) {
+ if (p->busNo == busNo) {
+ list_del(&p->entry);
+ kfree(p);
+ }
+ }
+}
+
+/** Attributes for a particular Supervisor bus.
+ * (For a service partition acting as the server for buses/devices, there
+ * is a 1-to-1 relationship between busses and guest partitions.)
+ * Any visorchipset client can query these attributes using
+ * visorchipset_get_client_bus_info() or visorchipset_get_bus_info().
+ */
+typedef struct {
+ struct list_head entry;
+ U32 busNo;
+ VISORCHIPSET_STATE state;
+ VISORCHIPSET_CHANNEL_INFO chanInfo;
+ GUID partitionGuid;
+ U64 partitionHandle;
+ U8 *name; /* UTF8 */
+ U8 *description; /* UTF8 */
+ U64 Reserved1;
+ U32 Reserved2;
+ MYPROCOBJECT *procObject;
+ struct {
+ U32 server:1;
+ /* Add new fields above. */
+ /* Remaining bits in this 32-bit word are unused. */
+ } flags;
+ CONTROLVM_MESSAGE_HEADER pendingMsgHdr; /* CONTROLVM MsgHdr */
+ /** For private use by the bus driver */
+ void *bus_driver_context;
+ U64 devNo;
+
+} VISORCHIPSET_BUS_INFO;
+
+static inline VISORCHIPSET_BUS_INFO *
+findbus(struct list_head *list, U32 busNo)
+{
+ VISORCHIPSET_BUS_INFO *p;
+
+ list_for_each_entry(p, list, entry) {
+ if (p->busNo == busNo)
+ return p;
+ }
+ return NULL;
+}
+
+/** Attributes for a particular Supervisor switch.
+ */
+typedef struct {
+ U32 switchNo;
+ VISORCHIPSET_STATE state;
+ GUID switchTypeGuid;
+ U8 *authService1;
+ U8 *authService2;
+ U8 *authService3;
+ U8 *securityContext;
+ U64 Reserved;
+ U32 Reserved2; /* CONTROLVM_ID */
+ struct device dev;
+ BOOL dev_exists;
+ CONTROLVM_MESSAGE_HEADER pendingMsgHdr;
+
+} VISORCHIPSET_SWITCH_INFO;
+
+/** Attributes for a particular Supervisor external port, which is connected
+ * to a specific switch.
+ */
+typedef struct {
+ U32 switchNo;
+ U32 externalPortNo;
+ VISORCHIPSET_STATE state;
+ GUID networkZoneGuid;
+ int pdPort;
+ U8 *ip;
+ U8 *ipNetmask;
+ U8 *ipBroadcast;
+ U8 *ipNetwork;
+ U8 *ipGateway;
+ U8 *ipDNS;
+ U64 Reserved1;
+ U32 Reserved2; /* CONTROLVM_ID */
+ struct device dev;
+ BOOL dev_exists;
+ CONTROLVM_MESSAGE_HEADER pendingMsgHdr;
+
+} VISORCHIPSET_EXTERNALPORT_INFO;
+
+/** Attributes for a particular Supervisor internal port, which is how a
+ * device connects to a particular switch.
+ */
+typedef struct {
+ U32 switchNo;
+ U32 internalPortNo;
+ VISORCHIPSET_STATE state;
+ U32 busNo; /* valid only when state.attached == 1 */
+ U32 devNo; /* valid only when state.attached == 1 */
+ U64 Reserved1;
+ U32 Reserved2; /* CONTROLVM_ID */
+ CONTROLVM_MESSAGE_HEADER pendingMsgHdr;
+ MYPROCOBJECT *procObject;
+
+} VISORCHIPSET_INTERNALPORT_INFO;
+
+/* These functions will be called from within visorchipset when certain
+ * events happen. (The implementation of these functions is outside of
+ * visorchipset.)
+ */
+typedef struct {
+ void (*bus_create)(ulong busNo);
+ void (*bus_destroy)(ulong busNo);
+ void (*device_create)(ulong busNo, ulong devNo);
+ void (*device_destroy)(ulong busNo, ulong devNo);
+ void (*device_pause)(ulong busNo, ulong devNo);
+ void (*device_resume)(ulong busNo, ulong devNo);
+ int (*get_channel_info)(GUID typeGuid, ulong *minSize,
+ ulong *maxSize);
+} VISORCHIPSET_BUSDEV_NOTIFIERS;
+
+/* These functions live inside visorchipset, and will be called to indicate
+ * responses to specific events (by code outside of visorchipset).
+ * For now, the value for each response is simply either:
+ * 0 = it worked
+ * -1 = it failed
+ */
+typedef struct {
+ void (*bus_create)(ulong busNo, int response);
+ void (*bus_destroy)(ulong busNo, int response);
+ void (*device_create)(ulong busNo, ulong devNo, int response);
+ void (*device_destroy)(ulong busNo, ulong devNo, int response);
+ void (*device_pause)(ulong busNo, ulong devNo, int response);
+ void (*device_resume)(ulong busNo, ulong devNo, int response);
+} VISORCHIPSET_BUSDEV_RESPONDERS;
+
+/** Register functions (in the bus driver) to get called by visorchipset
+ * whenever a bus or device appears for which this service partition is
+ * to be the server for. visorchipset will fill in <responders>, to
+ * indicate functions the bus driver should call to indicate message
+ * responses.
+ */
+void
+visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
+ VISORCHIPSET_BUSDEV_RESPONDERS *responders,
+ ULTRA_VBUS_DEVICEINFO *driverInfo);
+
+/** Register functions (in the bus driver) to get called by visorchipset
+ * whenever a bus or device appears for which this service partition is
+ * to be the client for. visorchipset will fill in <responders>, to
+ * indicate functions the bus driver should call to indicate message
+ * responses.
+ */
+void
+visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
+ VISORCHIPSET_BUSDEV_RESPONDERS *responders,
+ ULTRA_VBUS_DEVICEINFO *driverInfo);
+
+typedef void (*SPARREPORTEVENT_COMPLETE_FUNC) (CONTROLVM_MESSAGE *msg,
+ int status);
+
+void visorchipset_device_pause_response(ulong busNo, ulong devNo, int response);
+
+BOOL visorchipset_get_bus_info(ulong busNo, VISORCHIPSET_BUS_INFO *busInfo);
+BOOL visorchipset_get_device_info(ulong busNo, ulong devNo,
+ VISORCHIPSET_DEVICE_INFO *devInfo);
+BOOL visorchipset_get_switch_info(ulong switchNo,
+ VISORCHIPSET_SWITCH_INFO *switchInfo);
+BOOL visorchipset_get_externalport_info(ulong switchNo, ulong externalPortNo,
+ VISORCHIPSET_EXTERNALPORT_INFO
+ *externalPortInfo);
+BOOL visorchipset_set_bus_context(ulong busNo, void *context);
+BOOL visorchipset_set_device_context(ulong busNo, ulong devNo, void *context);
+int visorchipset_chipset_ready(void);
+int visorchipset_chipset_selftest(void);
+int visorchipset_chipset_notready(void);
+void visorchipset_controlvm_respond_reportEvent(CONTROLVM_MESSAGE *msg,
+ void *payload);
+void visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type);
+void *visorchipset_cache_alloc(struct kmem_cache *pool,
+ BOOL ok_to_block, char *fn, int ln);
+void visorchipset_cache_free(struct kmem_cache *pool, void *p,
+ char *fn, int ln);
+
+#if defined(TRANSMITFILE_DEBUG) || defined(DEBUG)
+#define DBG_GETFILE_PAYLOAD(msg, controlvm_header) \
+ LOGINF(msg, \
+ (ulong)controlvm_header.PayloadVmOffset, \
+ (ulong)controlvm_header.PayloadMaxBytes)
+#define DBG_GETFILE(fmt, ...) LOGINF(fmt, ##__VA_ARGS__)
+#define DBG_PUTFILE(fmt, ...) LOGINF(fmt, ##__VA_ARGS__)
+#else
+#define DBG_GETFILE_PAYLOAD(msg, controlvm_header)
+#define DBG_GETFILE(fmt, ...)
+#define DBG_PUTFILE(fmt, ...)
+#endif
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/visorchipset_main.c b/drivers/staging/unisys/visorchipset/visorchipset_main.c
new file mode 100644
index 000000000000..257c6e59b460
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/visorchipset_main.c
@@ -0,0 +1,2963 @@
+/* visorchipset_main.c
+ *
+ * Copyright � 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include "globals.h"
+#include "controlvm.h"
+#include "visorchipset.h"
+#include "procobjecttree.h"
+#include "visorchannel.h"
+#include "periodic_work.h"
+#include "testing.h"
+#include "file.h"
+#include "parser.h"
+#include "uniklog.h"
+#include "uisutils.h"
+#include "guidutils.h"
+#include "controlvmcompletionstatus.h"
+#include "guestlinuxdebug.h"
+#include "filexfer.h"
+
+#include <linux/nls.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
+#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
+ * vnic loopback test */
+#define TEST_VNIC_SWITCHNO 1
+#define TEST_VNIC_BUSNO 9
+
+#define MAX_NAME_SIZE 128
+#define MAX_IP_SIZE 50
+#define MAXOUTSTANDINGCHANNELCOMMAND 256
+#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
+#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
+
+/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
+* we switch to slow polling mode. As soon as we get a controlvm
+* message, we switch back to fast polling mode.
+*/
+#define MIN_IDLE_SECONDS 10
+static ulong Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+static ulong Most_recent_message_jiffies; /* when we got our last
+ * controlvm message */
+static inline char *
+NONULLSTR(char *s)
+{
+ if (s)
+ return s;
+ else
+ return "";
+}
+
+static int serverregistered;
+static int clientregistered;
+
+#define MAX_CHIPSET_EVENTS 2
+static U8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
+
+static struct delayed_work Periodic_controlvm_work;
+static struct workqueue_struct *Periodic_controlvm_workqueue;
+static DEFINE_SEMAPHORE(NotifierLock);
+
+typedef struct {
+ CONTROLVM_MESSAGE message;
+ unsigned int crc;
+} MESSAGE_ENVELOPE;
+
+static CONTROLVM_MESSAGE_HEADER g_DiagMsgHdr;
+static CONTROLVM_MESSAGE_HEADER g_ChipSetMsgHdr;
+static CONTROLVM_MESSAGE_HEADER g_DelDumpMsgHdr;
+static const GUID UltraDiagPoolChannelProtocolGuid =
+ ULTRA_DIAG_POOL_CHANNEL_PROTOCOL_GUID;
+/* 0xffffff is an invalid Bus/Device number */
+static ulong g_diagpoolBusNo = 0xffffff;
+static ulong g_diagpoolDevNo = 0xffffff;
+static CONTROLVM_MESSAGE_PACKET g_DeviceChangeStatePacket;
+
+/* Only VNIC and VHBA channels are sent to visorclientbus (aka
+ * "visorhackbus")
+ */
+#define FOR_VISORHACKBUS(channel_type_guid) \
+ ((memcmp(&channel_type_guid, &UltraVnicChannelProtocolGuid, \
+ sizeof(GUID)) == 0) || \
+ (memcmp(&channel_type_guid, &UltraVhbaChannelProtocolGuid, \
+ sizeof(GUID)) == 0))
+#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
+
+#define is_diagpool_channel(channel_type_guid) \
+ (memcmp(&channel_type_guid, \
+ &UltraDiagPoolChannelProtocolGuid, sizeof(GUID)) == 0)
+
+typedef enum {
+ PARTPROP_invalid,
+ PARTPROP_name,
+ PARTPROP_description,
+ PARTPROP_handle,
+ PARTPROP_busNumber,
+ /* add new properties above, but don't forget to change
+ * InitPartitionProperties() and show_partition_property() also...
+ */
+ PARTPROP_last
+} PARTITION_property;
+static const char *PartitionTypeNames[] = { "partition", NULL };
+
+static char *PartitionPropertyNames[PARTPROP_last + 1];
+static void
+InitPartitionProperties(void)
+{
+ char **p = PartitionPropertyNames;
+ p[PARTPROP_invalid] = "";
+ p[PARTPROP_name] = "name";
+ p[PARTPROP_description] = "description";
+ p[PARTPROP_handle] = "handle";
+ p[PARTPROP_busNumber] = "busNumber";
+ p[PARTPROP_last] = NULL;
+}
+
+typedef enum {
+ CTLVMPROP_invalid,
+ CTLVMPROP_physAddr,
+ CTLVMPROP_controlChannelAddr,
+ CTLVMPROP_controlChannelBytes,
+ CTLVMPROP_sparBootPart,
+ CTLVMPROP_sparStoragePart,
+ CTLVMPROP_livedumpLength,
+ CTLVMPROP_livedumpCrc32,
+ /* add new properties above, but don't forget to change
+ * InitControlVmProperties() show_controlvm_property() also...
+ */
+ CTLVMPROP_last
+} CONTROLVM_property;
+
+static const char *ControlVmTypeNames[] = { "controlvm", NULL };
+
+static char *ControlVmPropertyNames[CTLVMPROP_last + 1];
+static void
+InitControlVmProperties(void)
+{
+ char **p = ControlVmPropertyNames;
+ p[CTLVMPROP_invalid] = "";
+ p[CTLVMPROP_physAddr] = "physAddr";
+ p[CTLVMPROP_controlChannelAddr] = "controlChannelAddr";
+ p[CTLVMPROP_controlChannelBytes] = "controlChannelBytes";
+ p[CTLVMPROP_sparBootPart] = "spar_boot_part";
+ p[CTLVMPROP_sparStoragePart] = "spar_storage_part";
+ p[CTLVMPROP_livedumpLength] = "livedumpLength";
+ p[CTLVMPROP_livedumpCrc32] = "livedumpCrc32";
+ p[CTLVMPROP_last] = NULL;
+}
+
+static MYPROCOBJECT *ControlVmObject;
+static MYPROCTYPE *PartitionType;
+static MYPROCTYPE *ControlVmType;
+
+#define VISORCHIPSET_DIAG_PROC_ENTRY_FN "diagdump"
+static struct proc_dir_entry *diag_proc_dir;
+
+#define VISORCHIPSET_CHIPSET_PROC_ENTRY_FN "chipsetready"
+static struct proc_dir_entry *chipset_proc_dir;
+
+#define VISORCHIPSET_PARAHOTPLUG_PROC_ENTRY_FN "parahotplug"
+static struct proc_dir_entry *parahotplug_proc_dir;
+
+static LIST_HEAD(BusInfoList);
+static LIST_HEAD(DevInfoList);
+
+static struct proc_dir_entry *ProcDir;
+static VISORCHANNEL *ControlVm_channel;
+
+static ssize_t visorchipset_proc_read_writeonly(struct file *file,
+ char __user *buf,
+ size_t len, loff_t *offset);
+static ssize_t proc_read_installer(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static ssize_t proc_write_installer(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static ssize_t proc_read_toolaction(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static ssize_t proc_write_toolaction(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static ssize_t proc_read_bootToTool(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static ssize_t proc_write_bootToTool(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_installer_fops = {
+ .read = proc_read_installer,
+ .write = proc_write_installer,
+};
+
+static const struct file_operations proc_toolaction_fops = {
+ .read = proc_read_toolaction,
+ .write = proc_write_toolaction,
+};
+
+static const struct file_operations proc_bootToTool_fops = {
+ .read = proc_read_bootToTool,
+ .write = proc_write_bootToTool,
+};
+
+typedef struct {
+ U8 __iomem *ptr; /* pointer to base address of payload pool */
+ U64 offset; /* offset from beginning of controlvm
+ * channel to beginning of payload * pool */
+ U32 bytes; /* number of bytes in payload pool */
+} CONTROLVM_PAYLOAD_INFO;
+
+/* Manages the request payload in the controlvm channel */
+static CONTROLVM_PAYLOAD_INFO ControlVm_payload_info;
+
+static pCHANNEL_HEADER Test_Vnic_channel;
+
+typedef struct {
+ CONTROLVM_MESSAGE_HEADER Dumpcapture_header;
+ CONTROLVM_MESSAGE_HEADER Gettextdump_header;
+ CONTROLVM_MESSAGE_HEADER Dumpcomplete_header;
+ BOOL Gettextdump_outstanding;
+ u32 crc32;
+ ulong length;
+ atomic_t buffers_in_use;
+ ulong destination;
+} LIVEDUMP_INFO;
+/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
+ * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
+ */
+static LIVEDUMP_INFO LiveDump_info;
+
+/* The following globals are used to handle the scenario where we are unable to
+ * offload the payload from a controlvm message due to memory requirements. In
+ * this scenario, we simply stash the controlvm message, then attempt to
+ * process it again the next time controlvm_periodic_work() runs.
+ */
+static CONTROLVM_MESSAGE ControlVm_Pending_Msg;
+static BOOL ControlVm_Pending_Msg_Valid = FALSE;
+
+/* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
+ * TRANSMIT_FILE PutFile payloads.
+ */
+static struct kmem_cache *Putfile_buffer_list_pool;
+static const char Putfile_buffer_list_pool_name[] =
+ "controlvm_putfile_buffer_list_pool";
+
+/* This identifies a data buffer that has been received via a controlvm messages
+ * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
+ */
+struct putfile_buffer_entry {
+ struct list_head next; /* putfile_buffer_entry list */
+ PARSER_CONTEXT *parser_ctx; /* points to buffer containing input data */
+};
+
+/* List of struct putfile_request *, via next_putfile_request member.
+ * Each entry in this list identifies an outstanding TRANSMIT_FILE
+ * conversation.
+ */
+static LIST_HEAD(Putfile_request_list);
+
+/* This describes a buffer and its current state of transfer (e.g., how many
+ * bytes have already been supplied as putfile data, and how many bytes are
+ * remaining) for a putfile_request.
+ */
+struct putfile_active_buffer {
+ /* a payload from a controlvm message, containing a file data buffer */
+ PARSER_CONTEXT *parser_ctx;
+ /* points within data area of parser_ctx to next byte of data */
+ u8 *pnext;
+ /* # bytes left from <pnext> to the end of this data buffer */
+ size_t bytes_remaining;
+};
+
+#define PUTFILE_REQUEST_SIG 0x0906101302281211
+/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
+ * conversation. Structs of this type are dynamically linked into
+ * <Putfile_request_list>.
+ */
+struct putfile_request {
+ u64 sig; /* PUTFILE_REQUEST_SIG */
+
+ /* header from original TransmitFile request */
+ CONTROLVM_MESSAGE_HEADER controlvm_header;
+ u64 file_request_number; /* from original TransmitFile request */
+
+ /* link to next struct putfile_request */
+ struct list_head next_putfile_request;
+
+ /* most-recent sequence number supplied via a controlvm message */
+ u64 data_sequence_number;
+
+ /* head of putfile_buffer_entry list, which describes the data to be
+ * supplied as putfile data;
+ * - this list is added to when controlvm messages come in that supply
+ * file data
+ * - this list is removed from via the hotplug program that is actually
+ * consuming these buffers to write as file data */
+ struct list_head input_buffer_list;
+ spinlock_t req_list_lock; /* lock for input_buffer_list */
+
+ /* waiters for input_buffer_list to go non-empty */
+ wait_queue_head_t input_buffer_wq;
+
+ /* data not yet read within current putfile_buffer_entry */
+ struct putfile_active_buffer active_buf;
+
+ /* <0 = failed, 0 = in-progress, >0 = successful; */
+ /* note that this must be set with req_list_lock, and if you set <0, */
+ /* it is your responsibility to also free up all of the other objects */
+ /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
+ /* before releasing the lock */
+ int completion_status;
+};
+
+static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
+
+struct parahotplug_request {
+ struct list_head list;
+ int id;
+ unsigned long expiration;
+ CONTROLVM_MESSAGE msg;
+};
+
+static LIST_HEAD(Parahotplug_request_list);
+static DEFINE_SPINLOCK(Parahotplug_request_list_lock); /* lock for above */
+static void parahotplug_process_list(void);
+
+/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
+ * CONTROLVM_REPORTEVENT.
+ */
+static VISORCHIPSET_BUSDEV_NOTIFIERS BusDev_Server_Notifiers;
+static VISORCHIPSET_BUSDEV_NOTIFIERS BusDev_Client_Notifiers;
+
+static void bus_create_response(ulong busNo, int response);
+static void bus_destroy_response(ulong busNo, int response);
+static void device_create_response(ulong busNo, ulong devNo, int response);
+static void device_destroy_response(ulong busNo, ulong devNo, int response);
+static void device_resume_response(ulong busNo, ulong devNo, int response);
+
+static VISORCHIPSET_BUSDEV_RESPONDERS BusDev_Responders = {
+ .bus_create = bus_create_response,
+ .bus_destroy = bus_destroy_response,
+ .device_create = device_create_response,
+ .device_destroy = device_destroy_response,
+ .device_pause = visorchipset_device_pause_response,
+ .device_resume = device_resume_response,
+};
+
+/* info for /dev/visorchipset */
+static dev_t MajorDev = -1; /**< indicates major num for device */
+
+/* /sys/devices/platform/visorchipset */
+static struct platform_device Visorchipset_platform_device = {
+ .name = "visorchipset",
+ .id = -1,
+};
+
+/* Function prototypes */
+static void controlvm_respond(CONTROLVM_MESSAGE_HEADER *msgHdr, int response);
+static void controlvm_respond_chipset_init(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ int response,
+ ULTRA_CHIPSET_FEATURE features);
+static void controlvm_respond_physdev_changestate(CONTROLVM_MESSAGE_HEADER *
+ msgHdr, int response,
+ ULTRA_SEGMENT_STATE state);
+
+static void
+show_partition_property(struct seq_file *f, void *ctx, int property)
+{
+ VISORCHIPSET_BUS_INFO *info = (VISORCHIPSET_BUS_INFO *) (ctx);
+
+ switch (property) {
+ case PARTPROP_name:
+ seq_printf(f, "%s\n", NONULLSTR(info->name));
+ break;
+ case PARTPROP_description:
+ seq_printf(f, "%s\n", NONULLSTR(info->description));
+ break;
+ case PARTPROP_handle:
+ seq_printf(f, "0x%-16.16Lx\n", info->partitionHandle);
+ break;
+ case PARTPROP_busNumber:
+ seq_printf(f, "%d\n", info->busNo);
+ break;
+ default:
+ seq_printf(f, "(%d??)\n", property);
+ break;
+ }
+}
+
+static void
+show_controlvm_property(struct seq_file *f, void *ctx, int property)
+{
+ /* Note: ctx is not needed since we only have 1 controlvm channel */
+ switch (property) {
+ case CTLVMPROP_physAddr:
+ if (ControlVm_channel == NULL)
+ seq_puts(f, "0x0\n");
+ else
+ seq_printf(f, "0x%-16.16Lx\n",
+ visorchannel_get_physaddr
+ (ControlVm_channel));
+ break;
+ case CTLVMPROP_controlChannelAddr:
+ if (ControlVm_channel == NULL)
+ seq_puts(f, "0x0\n");
+ else {
+ GUEST_PHYSICAL_ADDRESS addr = 0;
+ visorchannel_read(ControlVm_channel,
+ offsetof
+ (ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ gpControlChannel), &addr,
+ sizeof(addr));
+ seq_printf(f, "0x%-16.16Lx\n", (u64) (addr));
+ }
+ break;
+ case CTLVMPROP_controlChannelBytes:
+ if (ControlVm_channel == NULL)
+ seq_puts(f, "0x0\n");
+ else {
+ U32 bytes = 0;
+ visorchannel_read(ControlVm_channel,
+ offsetof
+ (ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ ControlChannelBytes), &bytes,
+ sizeof(bytes));
+ seq_printf(f, "%lu\n", (ulong) (bytes));
+ }
+ break;
+ case CTLVMPROP_sparBootPart:
+ seq_puts(f, "0:0:0:0/1\n");
+ break;
+ case CTLVMPROP_sparStoragePart:
+ seq_puts(f, "0:0:0:0/2\n");
+ break;
+ case CTLVMPROP_livedumpLength:
+ seq_printf(f, "%lu\n", LiveDump_info.length);
+ break;
+ case CTLVMPROP_livedumpCrc32:
+ seq_printf(f, "%lu\n", (ulong) LiveDump_info.crc32);
+ break;
+ default:
+ seq_printf(f, "(%d??)\n", property);
+ break;
+ }
+}
+
+static void
+proc_Init(void)
+{
+ if (ProcDir == NULL) {
+ ProcDir = proc_mkdir(MYDRVNAME, NULL);
+ if (ProcDir == NULL) {
+ LOGERR("failed to create /proc directory %s",
+ MYDRVNAME);
+ POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ }
+ }
+}
+
+static void
+proc_DeInit(void)
+{
+ if (ProcDir != NULL)
+ remove_proc_entry(MYDRVNAME, NULL);
+ ProcDir = NULL;
+}
+
+#if 0
+static void
+testUnicode(void)
+{
+ wchar_t unicodeString[] = { 'a', 'b', 'c', 0 };
+ char s[sizeof(unicodeString) * NLS_MAX_CHARSET_SIZE];
+ wchar_t unicode2[99];
+
+ /* NOTE: Either due to a bug, or feature I don't understand, the
+ * kernel utf8_mbstowcs() and utf_wcstombs() do NOT copy the
+ * trailed NUL byte!! REALLY!!!!! Arrrrgggghhhhh
+ */
+
+ LOGINF("sizeof(wchar_t) = %d", sizeof(wchar_t));
+ LOGINF("utf8_wcstombs=%d",
+ chrs = utf8_wcstombs(s, unicodeString, sizeof(s)));
+ if (chrs >= 0)
+ s[chrs] = '\0'; /* GRRRRRRRR */
+ LOGINF("s='%s'", s);
+ LOGINF("utf8_mbstowcs=%d", chrs = utf8_mbstowcs(unicode2, s, 100));
+ if (chrs >= 0)
+ unicode2[chrs] = 0; /* GRRRRRRRR */
+ if (memcmp(unicodeString, unicode2, sizeof(unicodeString)) == 0)
+ LOGINF("strings match... good");
+ else
+ LOGINF("strings did not match!!");
+}
+#endif
+
+static void
+busInfo_clear(void *v)
+{
+ VISORCHIPSET_BUS_INFO *p = (VISORCHIPSET_BUS_INFO *) (v);
+
+ if (p->procObject) {
+ visor_proc_DestroyObject(p->procObject);
+ p->procObject = NULL;
+ }
+ kfree(p->name);
+ p->name = NULL;
+
+ kfree(p->description);
+ p->description = NULL;
+
+ p->state.created = 0;
+ memset(p, 0, sizeof(VISORCHIPSET_BUS_INFO));
+}
+
+static void
+devInfo_clear(void *v)
+{
+ VISORCHIPSET_DEVICE_INFO *p = (VISORCHIPSET_DEVICE_INFO *) (v);
+ p->state.created = 0;
+ memset(p, 0, sizeof(VISORCHIPSET_DEVICE_INFO));
+}
+
+static U8
+check_chipset_events(void)
+{
+ int i;
+ U8 send_msg = 1;
+ /* Check events to determine if response should be sent */
+ for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
+ send_msg &= chipset_events[i];
+ return send_msg;
+}
+
+static void
+clear_chipset_events(void)
+{
+ int i;
+ /* Clear chipset_events */
+ for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
+ chipset_events[i] = 0;
+}
+
+void
+visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
+ VISORCHIPSET_BUSDEV_RESPONDERS *responders,
+ ULTRA_VBUS_DEVICEINFO *driverInfo)
+{
+ LOCKSEM_UNINTERRUPTIBLE(&NotifierLock);
+ if (notifiers == NULL) {
+ memset(&BusDev_Server_Notifiers, 0,
+ sizeof(BusDev_Server_Notifiers));
+ serverregistered = 0; /* clear flag */
+ } else {
+ BusDev_Server_Notifiers = *notifiers;
+ serverregistered = 1; /* set flag */
+ }
+ if (responders)
+ *responders = BusDev_Responders;
+ if (driverInfo)
+ BusDeviceInfo_Init(driverInfo, "chipset", "visorchipset",
+ VERSION, NULL, __DATE__, __TIME__);
+
+ UNLOCKSEM(&NotifierLock);
+}
+EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
+
+void
+visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
+ VISORCHIPSET_BUSDEV_RESPONDERS *responders,
+ ULTRA_VBUS_DEVICEINFO *driverInfo)
+{
+ LOCKSEM_UNINTERRUPTIBLE(&NotifierLock);
+ if (notifiers == NULL) {
+ memset(&BusDev_Client_Notifiers, 0,
+ sizeof(BusDev_Client_Notifiers));
+ clientregistered = 0; /* clear flag */
+ } else {
+ BusDev_Client_Notifiers = *notifiers;
+ clientregistered = 1; /* set flag */
+ }
+ if (responders)
+ *responders = BusDev_Responders;
+ if (driverInfo)
+ BusDeviceInfo_Init(driverInfo, "chipset(bolts)", "visorchipset",
+ VERSION, NULL, __DATE__, __TIME__);
+ UNLOCKSEM(&NotifierLock);
+}
+EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
+
+static void
+cleanup_controlvm_structures(void)
+{
+ VISORCHIPSET_BUS_INFO *bi;
+ VISORCHIPSET_DEVICE_INFO *di;
+
+ list_for_each_entry(bi, &BusInfoList, entry) {
+ busInfo_clear(bi);
+ list_del(&bi->entry);
+ kfree(bi);
+ }
+
+ list_for_each_entry(di, &DevInfoList, entry) {
+ devInfo_clear(di);
+ list_del(&di->entry);
+ kfree(di);
+ }
+}
+
+static void
+chipset_init(CONTROLVM_MESSAGE *inmsg)
+{
+ static int chipset_inited;
+ ULTRA_CHIPSET_FEATURE features = 0;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ if (chipset_inited) {
+ LOGERR("CONTROLVM_CHIPSET_INIT Failed: Already Done.");
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto Away;
+ }
+ chipset_inited = 1;
+ POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
+
+ /* Set features to indicate we support parahotplug (if Command
+ * also supports it). */
+ features =
+ inmsg->cmd.initChipset.
+ features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
+
+ /* Set the "reply" bit so Command knows this is a
+ * features-aware driver. */
+ features |= ULTRA_CHIPSET_FEATURE_REPLY;
+
+Away:
+ if (rc < 0)
+ cleanup_controlvm_structures();
+ if (inmsg->hdr.Flags.responseExpected)
+ controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
+}
+
+static void
+controlvm_init_response(CONTROLVM_MESSAGE *msg,
+ CONTROLVM_MESSAGE_HEADER *msgHdr, int response)
+{
+ memset(msg, 0, sizeof(CONTROLVM_MESSAGE));
+ memcpy(&msg->hdr, msgHdr, sizeof(CONTROLVM_MESSAGE_HEADER));
+ msg->hdr.PayloadBytes = 0;
+ msg->hdr.PayloadVmOffset = 0;
+ msg->hdr.PayloadMaxBytes = 0;
+ if (response < 0) {
+ msg->hdr.Flags.failed = 1;
+ msg->hdr.CompletionStatus = (U32) (-response);
+ }
+}
+
+static void
+controlvm_respond(CONTROLVM_MESSAGE_HEADER *msgHdr, int response)
+{
+ CONTROLVM_MESSAGE outmsg;
+ if (!ControlVm_channel)
+ return;
+ controlvm_init_response(&outmsg, msgHdr, response);
+ /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
+ * back the deviceChangeState structure in the packet. */
+ if (msgHdr->Id == CONTROLVM_DEVICE_CHANGESTATE
+ && g_DeviceChangeStatePacket.deviceChangeState.busNo ==
+ g_diagpoolBusNo
+ && g_DeviceChangeStatePacket.deviceChangeState.devNo ==
+ g_diagpoolDevNo)
+ outmsg.cmd = g_DeviceChangeStatePacket;
+ if (outmsg.hdr.Flags.testMessage == 1) {
+ LOGINF("%s controlvm_msg=0x%x response=%d for test message",
+ __func__, outmsg.hdr.Id, response);
+ return;
+ }
+ if (!visorchannel_signalinsert(ControlVm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ LOGERR("signalinsert failed!");
+ return;
+ }
+}
+
+static void
+controlvm_respond_chipset_init(CONTROLVM_MESSAGE_HEADER *msgHdr, int response,
+ ULTRA_CHIPSET_FEATURE features)
+{
+ CONTROLVM_MESSAGE outmsg;
+ if (!ControlVm_channel)
+ return;
+ controlvm_init_response(&outmsg, msgHdr, response);
+ outmsg.cmd.initChipset.features = features;
+ if (!visorchannel_signalinsert(ControlVm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ LOGERR("signalinsert failed!");
+ return;
+ }
+}
+
+static void
+controlvm_respond_physdev_changestate(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ int response, ULTRA_SEGMENT_STATE state)
+{
+ CONTROLVM_MESSAGE outmsg;
+ if (!ControlVm_channel)
+ return;
+ controlvm_init_response(&outmsg, msgHdr, response);
+ outmsg.cmd.deviceChangeState.state = state;
+ outmsg.cmd.deviceChangeState.flags.physicalDevice = 1;
+ if (!visorchannel_signalinsert(ControlVm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ LOGERR("signalinsert failed!");
+ return;
+ }
+}
+
+void
+visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type)
+{
+ U32 localSavedCrashMsgOffset;
+ U16 localSavedCrashMsgCount;
+
+ /* get saved message count */
+ if (visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ SavedCrashMsgCount),
+ &localSavedCrashMsgCount, sizeof(U16)) < 0) {
+ LOGERR("failed to get Saved Message Count");
+ POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
+ LOGERR("Saved Message Count incorrect %d",
+ localSavedCrashMsgCount);
+ POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
+ localSavedCrashMsgCount,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* get saved crash message offset */
+ if (visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ SavedCrashMsgOffset),
+ &localSavedCrashMsgOffset, sizeof(U32)) < 0) {
+ LOGERR("failed to get Saved Message Offset");
+ POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ if (type == CRASH_bus) {
+ if (visorchannel_write(ControlVm_channel,
+ localSavedCrashMsgOffset,
+ msg, sizeof(CONTROLVM_MESSAGE)) < 0) {
+ LOGERR("SAVE_MSG_BUS_FAILURE: Failed to write CrashCreateBusMsg!");
+ POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ } else {
+ if (visorchannel_write(ControlVm_channel,
+ localSavedCrashMsgOffset +
+ sizeof(CONTROLVM_MESSAGE), msg,
+ sizeof(CONTROLVM_MESSAGE)) < 0) {
+ LOGERR("SAVE_MSG_DEV_FAILURE: Failed to write CrashCreateDevMsg!");
+ POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(visorchipset_save_message);
+
+static void
+bus_responder(CONTROLVM_ID cmdId, ulong busNo, int response)
+{
+ VISORCHIPSET_BUS_INFO *p = NULL;
+ BOOL need_clear = FALSE;
+
+ p = findbus(&BusInfoList, busNo);
+ if (!p) {
+ LOGERR("internal error busNo=%lu", busNo);
+ return;
+ }
+ if (response < 0) {
+ if ((cmdId == CONTROLVM_BUS_CREATE) &&
+ (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
+ /* undo the row we just created... */
+ delbusdevices(&DevInfoList, busNo);
+ } else {
+ if (cmdId == CONTROLVM_BUS_CREATE)
+ p->state.created = 1;
+ if (cmdId == CONTROLVM_BUS_DESTROY)
+ need_clear = TRUE;
+ }
+
+ if (p->pendingMsgHdr.Id == CONTROLVM_INVALID) {
+ LOGERR("bus_responder no pending msg");
+ return; /* no controlvm response needed */
+ }
+ if (p->pendingMsgHdr.Id != (U32) cmdId) {
+ LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.Id);
+ return;
+ }
+ controlvm_respond(&p->pendingMsgHdr, response);
+ p->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ if (need_clear) {
+ busInfo_clear(p);
+ delbusdevices(&DevInfoList, busNo);
+ }
+}
+
+static void
+device_changestate_responder(CONTROLVM_ID cmdId,
+ ulong busNo, ulong devNo, int response,
+ ULTRA_SEGMENT_STATE responseState)
+{
+ VISORCHIPSET_DEVICE_INFO *p = NULL;
+ CONTROLVM_MESSAGE outmsg;
+
+ if (!ControlVm_channel)
+ return;
+
+ p = finddevice(&DevInfoList, busNo, devNo);
+ if (!p) {
+ LOGERR("internal error; busNo=%lu, devNo=%lu", busNo, devNo);
+ return;
+ }
+ if (p->pendingMsgHdr.Id == CONTROLVM_INVALID) {
+ LOGERR("device_responder no pending msg");
+ return; /* no controlvm response needed */
+ }
+ if (p->pendingMsgHdr.Id != cmdId) {
+ LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.Id);
+ return;
+ }
+
+ controlvm_init_response(&outmsg, &p->pendingMsgHdr, response);
+
+ outmsg.cmd.deviceChangeState.busNo = busNo;
+ outmsg.cmd.deviceChangeState.devNo = devNo;
+ outmsg.cmd.deviceChangeState.state = responseState;
+
+ if (!visorchannel_signalinsert(ControlVm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ LOGERR("signalinsert failed!");
+ return;
+ }
+
+ p->pendingMsgHdr.Id = CONTROLVM_INVALID;
+}
+
+static void
+device_responder(CONTROLVM_ID cmdId, ulong busNo, ulong devNo, int response)
+{
+ VISORCHIPSET_DEVICE_INFO *p = NULL;
+ BOOL need_clear = FALSE;
+
+ p = finddevice(&DevInfoList, busNo, devNo);
+ if (!p) {
+ LOGERR("internal error; busNo=%lu, devNo=%lu", busNo, devNo);
+ return;
+ }
+ if (response >= 0) {
+ if (cmdId == CONTROLVM_DEVICE_CREATE)
+ p->state.created = 1;
+ if (cmdId == CONTROLVM_DEVICE_DESTROY)
+ need_clear = TRUE;
+ }
+
+ if (p->pendingMsgHdr.Id == CONTROLVM_INVALID) {
+ LOGERR("device_responder no pending msg");
+ return; /* no controlvm response needed */
+ }
+ if (p->pendingMsgHdr.Id != (U32) cmdId) {
+ LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.Id);
+ return;
+ }
+ controlvm_respond(&p->pendingMsgHdr, response);
+ p->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ if (need_clear)
+ devInfo_clear(p);
+}
+
+static void
+bus_epilog(U32 busNo,
+ U32 cmd, CONTROLVM_MESSAGE_HEADER *msgHdr,
+ int response, BOOL needResponse)
+{
+ BOOL notified = FALSE;
+
+ VISORCHIPSET_BUS_INFO *pBusInfo = findbus(&BusInfoList, busNo);
+
+ if (!pBusInfo) {
+ LOGERR("HUH? bad busNo=%d", busNo);
+ return;
+ }
+ if (needResponse) {
+ memcpy(&pBusInfo->pendingMsgHdr, msgHdr,
+ sizeof(CONTROLVM_MESSAGE_HEADER));
+ } else
+ pBusInfo->pendingMsgHdr.Id = CONTROLVM_INVALID;
+
+ LOCKSEM_UNINTERRUPTIBLE(&NotifierLock);
+ if (response == CONTROLVM_RESP_SUCCESS) {
+ switch (cmd) {
+ case CONTROLVM_BUS_CREATE:
+ /* We can't tell from the bus_create
+ * information which of our 2 bus flavors the
+ * devices on this bus will ultimately end up.
+ * FORTUNATELY, it turns out it is harmless to
+ * send the bus_create to both of them. We can
+ * narrow things down a little bit, though,
+ * because we know: - BusDev_Server can handle
+ * either server or client devices
+ * - BusDev_Client can handle ONLY client
+ * devices */
+ if (BusDev_Server_Notifiers.bus_create) {
+ (*BusDev_Server_Notifiers.bus_create) (busNo);
+ notified = TRUE;
+ }
+ if ((!pBusInfo->flags.server) /*client */ &&
+ BusDev_Client_Notifiers.bus_create) {
+ (*BusDev_Client_Notifiers.bus_create) (busNo);
+ notified = TRUE;
+ }
+ break;
+ case CONTROLVM_BUS_DESTROY:
+ if (BusDev_Server_Notifiers.bus_destroy) {
+ (*BusDev_Server_Notifiers.bus_destroy) (busNo);
+ notified = TRUE;
+ }
+ if ((!pBusInfo->flags.server) /*client */ &&
+ BusDev_Client_Notifiers.bus_destroy) {
+ (*BusDev_Client_Notifiers.bus_destroy) (busNo);
+ notified = TRUE;
+ }
+ break;
+ }
+ }
+ if (notified)
+ /* The callback function just called above is responsible
+ * for calling the appropriate VISORCHIPSET_BUSDEV_RESPONDERS
+ * function, which will call bus_responder()
+ */
+ ;
+ else
+ bus_responder(cmd, busNo, response);
+ UNLOCKSEM(&NotifierLock);
+}
+
+static void
+device_epilog(U32 busNo, U32 devNo, ULTRA_SEGMENT_STATE state, U32 cmd,
+ CONTROLVM_MESSAGE_HEADER *msgHdr, int response,
+ BOOL needResponse, BOOL for_visorbus)
+{
+ VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers = NULL;
+ BOOL notified = FALSE;
+
+ VISORCHIPSET_DEVICE_INFO *pDevInfo =
+ finddevice(&DevInfoList, busNo, devNo);
+ char *envp[] = {
+ "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
+ NULL
+ };
+
+ if (!pDevInfo) {
+ LOGERR("HUH? bad busNo=%d, devNo=%d", busNo, devNo);
+ return;
+ }
+ if (for_visorbus)
+ notifiers = &BusDev_Server_Notifiers;
+ else
+ notifiers = &BusDev_Client_Notifiers;
+ if (needResponse) {
+ memcpy(&pDevInfo->pendingMsgHdr, msgHdr,
+ sizeof(CONTROLVM_MESSAGE_HEADER));
+ } else
+ pDevInfo->pendingMsgHdr.Id = CONTROLVM_INVALID;
+
+ LOCKSEM_UNINTERRUPTIBLE(&NotifierLock);
+ if (response >= 0) {
+ switch (cmd) {
+ case CONTROLVM_DEVICE_CREATE:
+ if (notifiers->device_create) {
+ (*notifiers->device_create) (busNo, devNo);
+ notified = TRUE;
+ }
+ break;
+ case CONTROLVM_DEVICE_CHANGESTATE:
+ /* ServerReady / ServerRunning / SegmentStateRunning */
+ if (state.Alive == SegmentStateRunning.Alive &&
+ state.Operating == SegmentStateRunning.Operating) {
+ if (notifiers->device_resume) {
+ (*notifiers->device_resume) (busNo,
+ devNo);
+ notified = TRUE;
+ }
+ }
+ /* ServerNotReady / ServerLost / SegmentStateStandby */
+ else if (state.Alive == SegmentStateStandby.Alive &&
+ state.Operating ==
+ SegmentStateStandby.Operating) {
+ /* technically this is standby case
+ * where server is lost
+ */
+ if (notifiers->device_pause) {
+ (*notifiers->device_pause) (busNo,
+ devNo);
+ notified = TRUE;
+ }
+ } else if (state.Alive == SegmentStatePaused.Alive &&
+ state.Operating ==
+ SegmentStatePaused.Operating) {
+ /* this is lite pause where channel is
+ * still valid just 'pause' of it
+ */
+ if (busNo == g_diagpoolBusNo
+ && devNo == g_diagpoolDevNo) {
+ LOGINF("DEVICE_CHANGESTATE(DiagpoolChannel busNo=%d devNo=%d is pausing...)",
+ busNo, devNo);
+ /* this will trigger the
+ * diag_shutdown.sh script in
+ * the visorchipset hotplug */
+ kobject_uevent_env
+ (&Visorchipset_platform_device.dev.
+ kobj, KOBJ_ONLINE, envp);
+ }
+ }
+ break;
+ case CONTROLVM_DEVICE_DESTROY:
+ if (notifiers->device_destroy) {
+ (*notifiers->device_destroy) (busNo, devNo);
+ notified = TRUE;
+ }
+ break;
+ }
+ }
+ if (notified)
+ /* The callback function just called above is responsible
+ * for calling the appropriate VISORCHIPSET_BUSDEV_RESPONDERS
+ * function, which will call device_responder()
+ */
+ ;
+ else
+ device_responder(cmd, busNo, devNo, response);
+ UNLOCKSEM(&NotifierLock);
+}
+
+static void
+bus_create(CONTROLVM_MESSAGE *inmsg)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
+ ulong busNo = cmd->createBus.busNo;
+ int rc = CONTROLVM_RESP_SUCCESS;
+ VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
+
+
+ pBusInfo = findbus(&BusInfoList, busNo);
+ if (pBusInfo && (pBusInfo->state.created == 1)) {
+ LOGERR("CONTROLVM_BUS_CREATE Failed: bus %lu already exists",
+ busNo);
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto Away;
+ }
+ pBusInfo = kzalloc(sizeof(VISORCHIPSET_BUS_INFO), GFP_KERNEL);
+ if (pBusInfo == NULL) {
+ LOGERR("CONTROLVM_BUS_CREATE Failed: bus %lu kzalloc failed",
+ busNo);
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto Away;
+ }
+
+ INIT_LIST_HEAD(&pBusInfo->entry);
+ pBusInfo->busNo = busNo;
+ pBusInfo->devNo = cmd->createBus.deviceCount;
+
+ POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
+
+ if (inmsg->hdr.Flags.testMessage == 1)
+ pBusInfo->chanInfo.addrType = ADDRTYPE_localTest;
+ else
+ pBusInfo->chanInfo.addrType = ADDRTYPE_localPhysical;
+
+ pBusInfo->flags.server = inmsg->hdr.Flags.server;
+ pBusInfo->chanInfo.channelAddr = cmd->createBus.channelAddr;
+ pBusInfo->chanInfo.nChannelBytes = cmd->createBus.channelBytes;
+ pBusInfo->chanInfo.channelTypeGuid = cmd->createBus.busDataTypeGuid;
+ pBusInfo->chanInfo.channelInstGuid = cmd->createBus.busInstGuid;
+
+ list_add(&pBusInfo->entry, &BusInfoList);
+
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
+
+Away:
+ bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
+ rc, inmsg->hdr.Flags.responseExpected == 1);
+}
+
+static void
+bus_destroy(CONTROLVM_MESSAGE *inmsg)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
+ ulong busNo = cmd->destroyBus.busNo;
+ VISORCHIPSET_BUS_INFO *pBusInfo;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ pBusInfo = findbus(&BusInfoList, busNo);
+ if (!pBusInfo) {
+ LOGERR("CONTROLVM_BUS_DESTROY Failed: bus %lu invalid", busNo);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ goto Away;
+ }
+ if (pBusInfo->state.created == 0) {
+ LOGERR("CONTROLVM_BUS_DESTROY Failed: bus %lu already destroyed",
+ busNo);
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto Away;
+ }
+
+Away:
+ bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
+ rc, inmsg->hdr.Flags.responseExpected == 1);
+}
+
+static void
+bus_configure(CONTROLVM_MESSAGE *inmsg, PARSER_CONTEXT *parser_ctx)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
+ ulong busNo = cmd->configureBus.busNo;
+ VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
+ int rc = CONTROLVM_RESP_SUCCESS;
+ char s[99];
+
+ busNo = cmd->configureBus.busNo;
+ POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
+
+ pBusInfo = findbus(&BusInfoList, busNo);
+ if (!pBusInfo) {
+ LOGERR("CONTROLVM_BUS_CONFIGURE Failed: bus %lu invalid",
+ busNo);
+ POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ goto Away;
+ }
+ if (pBusInfo->state.created == 0) {
+ LOGERR("CONTROLVM_BUS_CONFIGURE Failed: Invalid bus %lu - not created yet",
+ busNo);
+ POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ goto Away;
+ }
+ /* TBD - add this check to other commands also... */
+ if (pBusInfo->pendingMsgHdr.Id != CONTROLVM_INVALID) {
+ LOGERR("CONTROLVM_BUS_CONFIGURE Failed: bus %lu MsgId=%u outstanding",
+ busNo, (uint) pBusInfo->pendingMsgHdr.Id);
+ POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+ goto Away;
+ }
+
+ pBusInfo->partitionHandle = cmd->configureBus.guestHandle;
+ pBusInfo->partitionGuid = parser_id_get(parser_ctx);
+ parser_param_start(parser_ctx, PARSERSTRING_NAME);
+ pBusInfo->name = parser_string_get(parser_ctx);
+
+ visorchannel_GUID_id(&pBusInfo->partitionGuid, s);
+ pBusInfo->procObject =
+ visor_proc_CreateObject(PartitionType, s, (void *) (pBusInfo));
+ if (pBusInfo->procObject == NULL) {
+ LOGERR("CONTROLVM_BUS_CONFIGURE Failed: busNo=%lu failed to create /proc entry",
+ busNo);
+ POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto Away;
+ }
+ POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
+Away:
+ bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
+ rc, inmsg->hdr.Flags.responseExpected == 1);
+}
+
+static void
+my_device_create(CONTROLVM_MESSAGE *inmsg)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
+ ulong busNo = cmd->createDevice.busNo;
+ ulong devNo = cmd->createDevice.devNo;
+ VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
+ VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ pDevInfo = finddevice(&DevInfoList, busNo, devNo);
+ if (pDevInfo && (pDevInfo->state.created == 1)) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: busNo=%lu, devNo=%lu already exists",
+ busNo, devNo);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto Away;
+ }
+ pBusInfo = findbus(&BusInfoList, busNo);
+ if (!pBusInfo) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: Invalid bus %lu - out of range",
+ busNo);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ goto Away;
+ }
+ if (pBusInfo->state.created == 0) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: Invalid bus %lu - not created yet",
+ busNo);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ goto Away;
+ }
+ pDevInfo = kzalloc(sizeof(VISORCHIPSET_DEVICE_INFO), GFP_KERNEL);
+ if (pDevInfo == NULL) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: busNo=%lu, devNo=%lu kmaloc failed",
+ busNo, devNo);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto Away;
+ }
+
+ INIT_LIST_HEAD(&pDevInfo->entry);
+ pDevInfo->busNo = busNo;
+ pDevInfo->devNo = devNo;
+ pDevInfo->devInstGuid = cmd->createDevice.devInstGuid;
+ POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+
+ if (inmsg->hdr.Flags.testMessage == 1)
+ pDevInfo->chanInfo.addrType = ADDRTYPE_localTest;
+ else
+ pDevInfo->chanInfo.addrType = ADDRTYPE_localPhysical;
+ pDevInfo->chanInfo.channelAddr = cmd->createDevice.channelAddr;
+ pDevInfo->chanInfo.nChannelBytes = cmd->createDevice.channelBytes;
+ pDevInfo->chanInfo.channelTypeGuid = cmd->createDevice.dataTypeGuid;
+ pDevInfo->chanInfo.intr = cmd->createDevice.intr;
+ list_add(&pDevInfo->entry, &DevInfoList);
+ POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+Away:
+ /* get the bus and devNo for DiagPool channel */
+ if (is_diagpool_channel(pDevInfo->chanInfo.channelTypeGuid)) {
+ g_diagpoolBusNo = busNo;
+ g_diagpoolDevNo = devNo;
+ LOGINF("CONTROLVM_DEVICE_CREATE for DiagPool channel: busNo=%lu, devNo=%lu",
+ g_diagpoolBusNo, g_diagpoolDevNo);
+ }
+ device_epilog(busNo, devNo, SegmentStateRunning,
+ CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
+ inmsg->hdr.Flags.responseExpected == 1,
+ FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
+}
+
+static void
+my_device_changestate(CONTROLVM_MESSAGE *inmsg)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
+ ulong busNo = cmd->deviceChangeState.busNo;
+ ulong devNo = cmd->deviceChangeState.devNo;
+ ULTRA_SEGMENT_STATE state = cmd->deviceChangeState.state;
+ VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ pDevInfo = finddevice(&DevInfoList, busNo, devNo);
+ if (!pDevInfo) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: busNo=%lu, devNo=%lu invalid (doesn't exist)",
+ busNo, devNo);
+ POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ goto Away;
+ }
+ if (pDevInfo->state.created == 0) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: busNo=%lu, devNo=%lu invalid (not created)",
+ busNo, devNo);
+ POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ }
+Away:
+ if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
+ device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
+ &inmsg->hdr, rc,
+ inmsg->hdr.Flags.responseExpected == 1,
+ FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
+}
+
+static void
+my_device_destroy(CONTROLVM_MESSAGE *inmsg)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
+ ulong busNo = cmd->destroyDevice.busNo;
+ ulong devNo = cmd->destroyDevice.devNo;
+ VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ pDevInfo = finddevice(&DevInfoList, busNo, devNo);
+ if (!pDevInfo) {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: busNo=%lu, devNo=%lu invalid",
+ busNo, devNo);
+ rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ goto Away;
+ }
+ if (pDevInfo->state.created == 0) {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: busNo=%lu, devNo=%lu already destroyed",
+ busNo, devNo);
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+
+Away:
+ if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
+ device_epilog(busNo, devNo, SegmentStateRunning,
+ CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
+ inmsg->hdr.Flags.responseExpected == 1,
+ FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
+}
+
+/* When provided with the physical address of the controlvm channel
+ * (phys_addr), the offset to the payload area we need to manage
+ * (offset), and the size of this payload area (bytes), fills in the
+ * CONTROLVM_PAYLOAD_INFO struct. Returns TRUE for success or FALSE
+ * for failure.
+ */
+static int
+initialize_controlvm_payload_info(HOSTADDRESS phys_addr, U64 offset, U32 bytes,
+ CONTROLVM_PAYLOAD_INFO *info)
+{
+ U8 __iomem *payload = NULL;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ if (info == NULL) {
+ LOGERR("HUH ? CONTROLVM_PAYLOAD_INIT Failed : Programmer check at %s:%d",
+ __FILE__, __LINE__);
+ rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
+ goto Away;
+ }
+ memset(info, 0, sizeof(CONTROLVM_PAYLOAD_INFO));
+ if ((offset == 0) || (bytes == 0)) {
+ LOGERR("CONTROLVM_PAYLOAD_INIT Failed: RequestPayloadOffset=%llu RequestPayloadBytes=%llu!",
+ (u64) offset, (u64) bytes);
+ rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
+ goto Away;
+ }
+ payload = ioremap_cache(phys_addr + offset, bytes);
+ if (payload == NULL) {
+ LOGERR("CONTROLVM_PAYLOAD_INIT Failed: ioremap_cache %llu for %llu bytes failed",
+ (u64) offset, (u64) bytes);
+ rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
+ goto Away;
+ }
+
+ info->offset = offset;
+ info->bytes = bytes;
+ info->ptr = payload;
+ LOGINF("offset=%llu, bytes=%lu, ptr=%p",
+ (u64) (info->offset), (ulong) (info->bytes), info->ptr);
+
+Away:
+ if (rc < 0) {
+ if (payload != NULL) {
+ iounmap(payload);
+ payload = NULL;
+ }
+ }
+ return rc;
+}
+
+static void
+destroy_controlvm_payload_info(CONTROLVM_PAYLOAD_INFO *info)
+{
+ if (info->ptr != NULL) {
+ iounmap(info->ptr);
+ info->ptr = NULL;
+ }
+ memset(info, 0, sizeof(CONTROLVM_PAYLOAD_INFO));
+}
+
+static void
+initialize_controlvm_payload(void)
+{
+ HOSTADDRESS phys_addr = visorchannel_get_physaddr(ControlVm_channel);
+ U64 payloadOffset = 0;
+ U32 payloadBytes = 0;
+ if (visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ RequestPayloadOffset),
+ &payloadOffset, sizeof(payloadOffset)) < 0) {
+ LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
+ POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ if (visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ RequestPayloadBytes),
+ &payloadBytes, sizeof(payloadBytes)) < 0) {
+ LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
+ POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ initialize_controlvm_payload_info(phys_addr,
+ payloadOffset, payloadBytes,
+ &ControlVm_payload_info);
+}
+
+/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
+ * Returns CONTROLVM_RESP_xxx code.
+ */
+int
+visorchipset_chipset_ready(void)
+{
+ kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
+ return CONTROLVM_RESP_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
+
+int
+visorchipset_chipset_selftest(void)
+{
+ char env_selftest[20];
+ char *envp[] = { env_selftest, NULL };
+ sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
+ kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
+ envp);
+ return CONTROLVM_RESP_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
+
+/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
+ * Returns CONTROLVM_RESP_xxx code.
+ */
+int
+visorchipset_chipset_notready(void)
+{
+ kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
+ return CONTROLVM_RESP_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
+
+static void
+chipset_ready(CONTROLVM_MESSAGE_HEADER *msgHdr)
+{
+ int rc = visorchipset_chipset_ready();
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ rc = -rc;
+ if (msgHdr->Flags.responseExpected && !visorchipset_holdchipsetready)
+ controlvm_respond(msgHdr, rc);
+ if (msgHdr->Flags.responseExpected && visorchipset_holdchipsetready) {
+ /* Send CHIPSET_READY response when all modules have been loaded
+ * and disks mounted for the partition
+ */
+ g_ChipSetMsgHdr = *msgHdr;
+ LOGINF("Holding CHIPSET_READY response");
+ }
+}
+
+static void
+chipset_selftest(CONTROLVM_MESSAGE_HEADER *msgHdr)
+{
+ int rc = visorchipset_chipset_selftest();
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ rc = -rc;
+ if (msgHdr->Flags.responseExpected)
+ controlvm_respond(msgHdr, rc);
+}
+
+static void
+chipset_notready(CONTROLVM_MESSAGE_HEADER *msgHdr)
+{
+ int rc = visorchipset_chipset_notready();
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ rc = -rc;
+ if (msgHdr->Flags.responseExpected)
+ controlvm_respond(msgHdr, rc);
+}
+
+/* This is your "one-stop" shop for grabbing the next message from the
+ * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
+ */
+static BOOL
+read_controlvm_event(CONTROLVM_MESSAGE *msg)
+{
+ if (visorchannel_signalremove(ControlVm_channel,
+ CONTROLVM_QUEUE_EVENT, msg)) {
+ /* got a message */
+ if (msg->hdr.Flags.testMessage == 1) {
+ LOGERR("ignoring bad CONTROLVM_QUEUE_EVENT msg with controlvm_msg_id=0x%x because Flags.testMessage is nonsensical (=1)", msg->hdr.Id);
+ return FALSE;
+ } else
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/*
+ * The general parahotplug flow works as follows. The visorchipset
+ * driver receives a DEVICE_CHANGESTATE message from Command
+ * specifying a physical device to enable or disable. The CONTROLVM
+ * message handler calls parahotplug_process_message, which then adds
+ * the message to a global list and kicks off a udev event which
+ * causes a user level script to enable or disable the specified
+ * device. The udev script then writes to
+ * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
+ * to get called, at which point the appropriate CONTROLVM message is
+ * retrieved from the list and responded to.
+ */
+
+#define PARAHOTPLUG_TIMEOUT_MS 2000
+
+/*
+ * Generate unique int to match an outstanding CONTROLVM message with a
+ * udev script /proc response
+ */
+static int
+parahotplug_next_id(void)
+{
+ static atomic_t id = ATOMIC_INIT(0);
+ return atomic_inc_return(&id);
+}
+
+/*
+ * Returns the time (in jiffies) when a CONTROLVM message on the list
+ * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
+ */
+static unsigned long
+parahotplug_next_expiration(void)
+{
+ return jiffies + PARAHOTPLUG_TIMEOUT_MS * HZ / 1000;
+}
+
+/*
+ * Create a parahotplug_request, which is basically a wrapper for a
+ * CONTROLVM_MESSAGE that we can stick on a list
+ */
+static struct parahotplug_request *
+parahotplug_request_create(CONTROLVM_MESSAGE *msg)
+{
+ struct parahotplug_request *req =
+ kmalloc(sizeof(struct parahotplug_request),
+ GFP_KERNEL|__GFP_NORETRY);
+ if (req == NULL)
+ return NULL;
+
+ req->id = parahotplug_next_id();
+ req->expiration = parahotplug_next_expiration();
+ req->msg = *msg;
+
+ return req;
+}
+
+/*
+ * Free a parahotplug_request.
+ */
+static void
+parahotplug_request_destroy(struct parahotplug_request *req)
+{
+ kfree(req);
+}
+
+/*
+ * Cause uevent to run the user level script to do the disable/enable
+ * specified in (the CONTROLVM message in) the specified
+ * parahotplug_request
+ */
+static void
+parahotplug_request_kickoff(struct parahotplug_request *req)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &req->msg.cmd;
+ char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
+ env_func[40];
+ char *envp[] = {
+ env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
+ };
+
+ sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
+ sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
+ sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
+ cmd->deviceChangeState.state.Active);
+ sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
+ cmd->deviceChangeState.busNo);
+ sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
+ cmd->deviceChangeState.devNo >> 3);
+ sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
+ cmd->deviceChangeState.devNo & 0x7);
+
+ LOGINF("parahotplug_request_kickoff: state=%d, bdf=%d/%d/%d, id=%u\n",
+ cmd->deviceChangeState.state.Active,
+ cmd->deviceChangeState.busNo, cmd->deviceChangeState.devNo >> 3,
+ cmd->deviceChangeState.devNo & 7, req->id);
+
+ kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
+ envp);
+}
+
+/*
+ * Remove any request from the list that's been on there too long and
+ * respond with an error.
+ */
+static void
+parahotplug_process_list(void)
+{
+ struct list_head *pos = NULL;
+ struct list_head *tmp = NULL;
+
+ spin_lock(&Parahotplug_request_list_lock);
+
+ list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
+ struct parahotplug_request *req =
+ list_entry(pos, struct parahotplug_request, list);
+ if (time_after_eq(jiffies, req->expiration)) {
+ list_del(pos);
+ if (req->msg.hdr.Flags.responseExpected)
+ controlvm_respond_physdev_changestate(
+ &req->msg.hdr,
+ CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
+ req->msg.cmd.deviceChangeState.state);
+ parahotplug_request_destroy(req);
+ }
+ }
+
+ spin_unlock(&Parahotplug_request_list_lock);
+}
+
+/*
+ * Called from the /proc handler, which means the user script has
+ * finished the enable/disable. Find the matching identifier, and
+ * respond to the CONTROLVM message with success.
+ */
+static int
+parahotplug_request_complete(int id, U16 active)
+{
+ struct list_head *pos = NULL;
+ struct list_head *tmp = NULL;
+
+ spin_lock(&Parahotplug_request_list_lock);
+
+ /* Look for a request matching "id". */
+ list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
+ struct parahotplug_request *req =
+ list_entry(pos, struct parahotplug_request, list);
+ if (req->id == id) {
+ /* Found a match. Remove it from the list and
+ * respond.
+ */
+ list_del(pos);
+ spin_unlock(&Parahotplug_request_list_lock);
+ req->msg.cmd.deviceChangeState.state.Active = active;
+ if (req->msg.hdr.Flags.responseExpected)
+ controlvm_respond_physdev_changestate(
+ &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
+ req->msg.cmd.deviceChangeState.state);
+ parahotplug_request_destroy(req);
+ return 0;
+ }
+ }
+
+ spin_unlock(&Parahotplug_request_list_lock);
+ return -1;
+}
+
+/*
+ * Enables or disables a PCI device by kicking off a udev script
+ */
+static void
+parahotplug_process_message(CONTROLVM_MESSAGE *inmsg)
+{
+ struct parahotplug_request *req;
+
+ req = parahotplug_request_create(inmsg);
+
+ if (req == NULL) {
+ LOGERR("parahotplug_process_message: couldn't allocate request");
+ return;
+ }
+
+ if (inmsg->cmd.deviceChangeState.state.Active) {
+ /* For enable messages, just respond with success
+ * right away. This is a bit of a hack, but there are
+ * issues with the early enable messages we get (with
+ * either the udev script not detecting that the device
+ * is up, or not getting called at all). Fortunately
+ * the messages that get lost don't matter anyway, as
+ * devices are automatically enabled at
+ * initialization.
+ */
+ parahotplug_request_kickoff(req);
+ controlvm_respond_physdev_changestate(&inmsg->hdr,
+ CONTROLVM_RESP_SUCCESS,
+ inmsg->cmd.
+ deviceChangeState.state);
+ parahotplug_request_destroy(req);
+ } else {
+ /* For disable messages, add the request to the
+ * request list before kicking off the udev script. It
+ * won't get responded to until the script has
+ * indicated it's done.
+ */
+ spin_lock(&Parahotplug_request_list_lock);
+ list_add_tail(&(req->list), &Parahotplug_request_list);
+ spin_unlock(&Parahotplug_request_list_lock);
+
+ parahotplug_request_kickoff(req);
+ }
+}
+
+/*
+ * Gets called when the udev script writes to
+ * /proc/visorchipset/parahotplug. Expects input in the form of "<id>
+ * <active>" where <id> is the identifier passed to the script that
+ * matches a request on the request list, and <active> is 0 or 1
+ * indicating whether the device is now enabled or not.
+ */
+static ssize_t
+parahotplug_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[64];
+ uint id;
+ ushort active;
+
+ if (count > sizeof(buf) - 1) {
+ LOGERR("parahotplug_proc_write: count (%d) exceeds size of buffer (%d)",
+ (int) count, (int) sizeof(buf));
+ return -EINVAL;
+ }
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("parahotplug_proc_write: copy_from_user failed");
+ return -EFAULT;
+ }
+ buf[count] = '\0';
+
+ if (sscanf(buf, "%u %hu", &id, &active) != 2) {
+ id = 0;
+ active = 0;
+ }
+
+ if (active != 1 && active != 0) {
+ LOGERR("parahotplug_proc_write: invalid active field");
+ return -EINVAL;
+ }
+
+ parahotplug_request_complete((int) id, (U16) active);
+
+ return count;
+}
+
+static const struct file_operations parahotplug_proc_fops = {
+ .owner = THIS_MODULE,
+ .read = visorchipset_proc_read_writeonly,
+ .write = parahotplug_proc_write,
+};
+
+/* Process a controlvm message.
+ * Return result:
+ * FALSE - this function will return FALSE only in the case where the
+ * controlvm message was NOT processed, but processing must be
+ * retried before reading the next controlvm message; a
+ * scenario where this can occur is when we need to throttle
+ * the allocation of memory in which to copy out controlvm
+ * payload data
+ * TRUE - processing of the controlvm message completed,
+ * either successfully or with an error.
+ */
+static BOOL
+handle_command(CONTROLVM_MESSAGE inmsg, HOSTADDRESS channel_addr)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg.cmd;
+ U64 parametersAddr = 0;
+ U32 parametersBytes = 0;
+ PARSER_CONTEXT *parser_ctx = NULL;
+ BOOL isLocalAddr = FALSE;
+ CONTROLVM_MESSAGE ackmsg;
+
+ /* create parsing context if necessary */
+ isLocalAddr = (inmsg.hdr.Flags.testMessage == 1);
+ if (channel_addr == 0) {
+ LOGERR("HUH? channel_addr is 0!");
+ return TRUE;
+ }
+ parametersAddr = channel_addr + inmsg.hdr.PayloadVmOffset;
+ parametersBytes = inmsg.hdr.PayloadBytes;
+
+ /* Parameter and channel addresses within test messages actually lie
+ * within our OS-controlled memory. We need to know that, because it
+ * makes a difference in how we compute the virtual address.
+ */
+ if (parametersAddr != 0 && parametersBytes != 0) {
+ BOOL retry = FALSE;
+ parser_ctx =
+ parser_init_byteStream(parametersAddr, parametersBytes,
+ isLocalAddr, &retry);
+ if (!parser_ctx) {
+ if (retry) {
+ LOGWRN("throttling to copy payload");
+ return FALSE;
+ }
+ LOGWRN("parsing failed");
+ LOGWRN("inmsg.hdr.Id=0x%lx", (ulong) inmsg.hdr.Id);
+ LOGWRN("parametersAddr=0x%llx", (u64) parametersAddr);
+ LOGWRN("parametersBytes=%lu", (ulong) parametersBytes);
+ LOGWRN("isLocalAddr=%d", isLocalAddr);
+ }
+ }
+
+ if (!isLocalAddr) {
+ controlvm_init_response(&ackmsg, &inmsg.hdr,
+ CONTROLVM_RESP_SUCCESS);
+ if ((ControlVm_channel)
+ &&
+ (!visorchannel_signalinsert
+ (ControlVm_channel, CONTROLVM_QUEUE_ACK, &ackmsg)))
+ LOGWRN("failed to send ACK failed");
+ }
+ switch (inmsg.hdr.Id) {
+ case CONTROLVM_CHIPSET_INIT:
+ LOGINF("CHIPSET_INIT(#busses=%lu,#switches=%lu)",
+ (ulong) inmsg.cmd.initChipset.busCount,
+ (ulong) inmsg.cmd.initChipset.switchCount);
+ chipset_init(&inmsg);
+ break;
+ case CONTROLVM_BUS_CREATE:
+ LOGINF("BUS_CREATE(%lu,#devs=%lu)",
+ (ulong) cmd->createBus.busNo,
+ (ulong) cmd->createBus.deviceCount);
+ bus_create(&inmsg);
+ break;
+ case CONTROLVM_BUS_DESTROY:
+ LOGINF("BUS_DESTROY(%lu)", (ulong) cmd->destroyBus.busNo);
+ bus_destroy(&inmsg);
+ break;
+ case CONTROLVM_BUS_CONFIGURE:
+ LOGINF("BUS_CONFIGURE(%lu)", (ulong) cmd->configureBus.busNo);
+ bus_configure(&inmsg, parser_ctx);
+ break;
+ case CONTROLVM_DEVICE_CREATE:
+ LOGINF("DEVICE_CREATE(%lu,%lu)",
+ (ulong) cmd->createDevice.busNo,
+ (ulong) cmd->createDevice.devNo);
+ my_device_create(&inmsg);
+ break;
+ case CONTROLVM_DEVICE_CHANGESTATE:
+ if (cmd->deviceChangeState.flags.physicalDevice) {
+ LOGINF("DEVICE_CHANGESTATE for physical device (%lu,%lu, active=%lu)",
+ (ulong) cmd->deviceChangeState.busNo,
+ (ulong) cmd->deviceChangeState.devNo,
+ (ulong) cmd->deviceChangeState.state.Active);
+ parahotplug_process_message(&inmsg);
+ } else {
+ LOGINF("DEVICE_CHANGESTATE for virtual device (%lu,%lu, state.Alive=0x%lx)",
+ (ulong) cmd->deviceChangeState.busNo,
+ (ulong) cmd->deviceChangeState.devNo,
+ (ulong) cmd->deviceChangeState.state.Alive);
+ /* save the hdr and cmd structures for later use */
+ /* when sending back the response to Command */
+ my_device_changestate(&inmsg);
+ g_DiagMsgHdr = inmsg.hdr;
+ g_DeviceChangeStatePacket = inmsg.cmd;
+ break;
+ }
+ break;
+ case CONTROLVM_DEVICE_DESTROY:
+ LOGINF("DEVICE_DESTROY(%lu,%lu)",
+ (ulong) cmd->destroyDevice.busNo,
+ (ulong) cmd->destroyDevice.devNo);
+ my_device_destroy(&inmsg);
+ break;
+ case CONTROLVM_DEVICE_CONFIGURE:
+ LOGINF("DEVICE_CONFIGURE(%lu,%lu)",
+ (ulong) cmd->configureDevice.busNo,
+ (ulong) cmd->configureDevice.devNo);
+ /* no op for now, just send a respond that we passed */
+ if (inmsg.hdr.Flags.responseExpected)
+ controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
+ break;
+ case CONTROLVM_CHIPSET_READY:
+ LOGINF("CHIPSET_READY");
+ chipset_ready(&inmsg.hdr);
+ break;
+ case CONTROLVM_CHIPSET_SELFTEST:
+ LOGINF("CHIPSET_SELFTEST");
+ chipset_selftest(&inmsg.hdr);
+ break;
+ case CONTROLVM_CHIPSET_STOP:
+ LOGINF("CHIPSET_STOP");
+ chipset_notready(&inmsg.hdr);
+ break;
+ default:
+ LOGERR("unrecognized controlvm cmd=%d", (int) inmsg.hdr.Id);
+ if (inmsg.hdr.Flags.responseExpected)
+ controlvm_respond(&inmsg.hdr,
+ -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
+ break;
+ }
+
+ if (parser_ctx != NULL) {
+ parser_done(parser_ctx);
+ parser_ctx = NULL;
+ }
+ return TRUE;
+}
+
+static void
+controlvm_periodic_work(struct work_struct *work)
+{
+ VISORCHIPSET_CHANNEL_INFO chanInfo;
+ CONTROLVM_MESSAGE inmsg;
+ char s[99];
+ BOOL gotACommand = FALSE;
+ BOOL handle_command_failed = FALSE;
+ static U64 Poll_Count;
+
+ /* make sure visorbus server is registered for controlvm callbacks */
+ if (visorchipset_serverregwait && !serverregistered)
+ goto Away;
+ /* make sure visorclientbus server is regsitered for controlvm
+ * callbacks
+ */
+ if (visorchipset_clientregwait && !clientregistered)
+ goto Away;
+
+ memset(&chanInfo, 0, sizeof(VISORCHIPSET_CHANNEL_INFO));
+ if (!ControlVm_channel) {
+ HOSTADDRESS addr = controlvm_get_channel_address();
+ if (addr != 0) {
+ ControlVm_channel =
+ visorchannel_create_with_lock
+ (addr,
+ sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL),
+ UltraControlvmChannelProtocolGuid);
+ if (ControlVm_channel == NULL)
+ LOGERR("failed to create controlvm channel");
+ else if (ULTRA_CONTROLVM_CHANNEL_OK_CLIENT
+ (visorchannel_get_header(ControlVm_channel),
+ NULL)) {
+ LOGINF("Channel %s (ControlVm) discovered",
+ visorchannel_id(ControlVm_channel, s));
+ initialize_controlvm_payload();
+ } else {
+ LOGERR("controlvm channel is invalid");
+ visorchannel_destroy(ControlVm_channel);
+ ControlVm_channel = NULL;
+ }
+ }
+ }
+
+ Poll_Count++;
+ if ((ControlVm_channel != NULL) || (Poll_Count >= 250))
+ ; /* keep going */
+ else
+ goto Away;
+
+ /* Check events to determine if response to CHIPSET_READY
+ * should be sent
+ */
+ if (visorchipset_holdchipsetready
+ && (g_ChipSetMsgHdr.Id != CONTROLVM_INVALID)) {
+ if (check_chipset_events() == 1) {
+ LOGINF("Sending CHIPSET_READY response");
+ controlvm_respond(&g_ChipSetMsgHdr, 0);
+ clear_chipset_events();
+ memset(&g_ChipSetMsgHdr, 0,
+ sizeof(CONTROLVM_MESSAGE_HEADER));
+ }
+ }
+
+ if (ControlVm_channel) {
+ while (visorchannel_signalremove(ControlVm_channel,
+ CONTROLVM_QUEUE_RESPONSE,
+ &inmsg)) {
+ if (inmsg.hdr.PayloadMaxBytes != 0) {
+ LOGERR("Payload of size %lu returned @%lu with unexpected message id %d.",
+ (ulong) inmsg.hdr.PayloadMaxBytes,
+ (ulong) inmsg.hdr.PayloadVmOffset,
+ inmsg.hdr.Id);
+ }
+ }
+ if (!gotACommand) {
+ if (ControlVm_Pending_Msg_Valid) {
+ /* we throttled processing of a prior
+ * msg, so try to process it again
+ * rather than reading a new one
+ */
+ inmsg = ControlVm_Pending_Msg;
+ ControlVm_Pending_Msg_Valid = FALSE;
+ gotACommand = TRUE;
+ } else
+ gotACommand = read_controlvm_event(&inmsg);
+ }
+ }
+
+ handle_command_failed = FALSE;
+ while (gotACommand && (!handle_command_failed)) {
+ Most_recent_message_jiffies = jiffies;
+ if (ControlVm_channel) {
+ if (handle_command(inmsg,
+ visorchannel_get_physaddr
+ (ControlVm_channel)))
+ gotACommand = read_controlvm_event(&inmsg);
+ else {
+ /* this is a scenario where throttling
+ * is required, but probably NOT an
+ * error...; we stash the current
+ * controlvm msg so we will attempt to
+ * reprocess it on our next loop
+ */
+ handle_command_failed = TRUE;
+ ControlVm_Pending_Msg = inmsg;
+ ControlVm_Pending_Msg_Valid = TRUE;
+ }
+
+ } else {
+ handle_command(inmsg, 0);
+ gotACommand = FALSE;
+ }
+ }
+
+ /* parahotplug_worker */
+ parahotplug_process_list();
+
+Away:
+
+ if (time_after(jiffies,
+ Most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
+ /* it's been longer than MIN_IDLE_SECONDS since we
+ * processed our last controlvm message; slow down the
+ * polling
+ */
+ if (Poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW) {
+ LOGINF("switched to slow controlvm polling");
+ Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+ }
+ } else {
+ if (Poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST) {
+ Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ LOGINF("switched to fast controlvm polling");
+ }
+ }
+
+ queue_delayed_work(Periodic_controlvm_workqueue,
+ &Periodic_controlvm_work, Poll_jiffies);
+}
+
+static void
+setup_crash_devices_work_queue(struct work_struct *work)
+{
+
+ CONTROLVM_MESSAGE localCrashCreateBusMsg;
+ CONTROLVM_MESSAGE localCrashCreateDevMsg;
+ CONTROLVM_MESSAGE msg;
+ HOSTADDRESS host_addr;
+ U32 localSavedCrashMsgOffset;
+ U16 localSavedCrashMsgCount;
+
+ /* make sure visorbus server is registered for controlvm callbacks */
+ if (visorchipset_serverregwait && !serverregistered)
+ goto Away;
+
+ /* make sure visorclientbus server is regsitered for controlvm
+ * callbacks
+ */
+ if (visorchipset_clientregwait && !clientregistered)
+ goto Away;
+
+ POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ /* send init chipset msg */
+ msg.hdr.Id = CONTROLVM_CHIPSET_INIT;
+ msg.cmd.initChipset.busCount = 23;
+ msg.cmd.initChipset.switchCount = 0;
+
+ chipset_init(&msg);
+
+ host_addr = controlvm_get_channel_address();
+ if (!host_addr) {
+ LOGERR("Huh? Host address is NULL");
+ POSTCODE_LINUX_2(CRASH_DEV_HADDR_NULL, POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ ControlVm_channel =
+ visorchannel_create_with_lock
+ (host_addr,
+ sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL),
+ UltraControlvmChannelProtocolGuid);
+
+ if (ControlVm_channel == NULL) {
+ LOGERR("failed to create controlvm channel");
+ POSTCODE_LINUX_2(CRASH_DEV_CONTROLVM_NULL,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* get saved message count */
+ if (visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ SavedCrashMsgCount),
+ &localSavedCrashMsgCount, sizeof(U16)) < 0) {
+ LOGERR("failed to get Saved Message Count");
+ POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
+ LOGERR("Saved Message Count incorrect %d",
+ localSavedCrashMsgCount);
+ POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
+ localSavedCrashMsgCount,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* get saved crash message offset */
+ if (visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ SavedCrashMsgOffset),
+ &localSavedCrashMsgOffset, sizeof(U32)) < 0) {
+ LOGERR("failed to get Saved Message Offset");
+ POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* read create device message for storage bus offset */
+ if (visorchannel_read(ControlVm_channel,
+ localSavedCrashMsgOffset,
+ &localCrashCreateBusMsg,
+ sizeof(CONTROLVM_MESSAGE)) < 0) {
+ LOGERR("CRASH_DEV_RD_BUS_FAIULRE: Failed to read CrashCreateBusMsg!");
+ POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* read create device message for storage device */
+ if (visorchannel_read(ControlVm_channel,
+ localSavedCrashMsgOffset +
+ sizeof(CONTROLVM_MESSAGE),
+ &localCrashCreateDevMsg,
+ sizeof(CONTROLVM_MESSAGE)) < 0) {
+ LOGERR("CRASH_DEV_RD_DEV_FAIULRE: Failed to read CrashCreateDevMsg!");
+ POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* reuse IOVM create bus message */
+ if (localCrashCreateBusMsg.cmd.createBus.channelAddr != 0)
+ bus_create(&localCrashCreateBusMsg);
+ else {
+ LOGERR("CrashCreateBusMsg is null, no dump will be taken");
+ POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* reuse create device message for storage device */
+ if (localCrashCreateDevMsg.cmd.createDevice.channelAddr != 0)
+ my_device_create(&localCrashCreateDevMsg);
+ else {
+ LOGERR("CrashCreateDevMsg is null, no dump will be taken");
+ POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ LOGINF("Bus and device ready for dumping");
+ POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ return;
+
+Away:
+
+ Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+
+ queue_delayed_work(Periodic_controlvm_workqueue,
+ &Periodic_controlvm_work, Poll_jiffies);
+}
+
+static void
+bus_create_response(ulong busNo, int response)
+{
+ bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
+}
+
+static void
+bus_destroy_response(ulong busNo, int response)
+{
+ bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
+}
+
+static void
+device_create_response(ulong busNo, ulong devNo, int response)
+{
+ device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
+}
+
+static void
+device_destroy_response(ulong busNo, ulong devNo, int response)
+{
+ device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
+}
+
+void
+visorchipset_device_pause_response(ulong busNo, ulong devNo, int response)
+{
+
+ device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
+ busNo, devNo, response,
+ SegmentStateStandby);
+}
+EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
+
+static void
+device_resume_response(ulong busNo, ulong devNo, int response)
+{
+ device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
+ busNo, devNo, response,
+ SegmentStateRunning);
+}
+
+BOOL
+visorchipset_get_bus_info(ulong busNo, VISORCHIPSET_BUS_INFO *busInfo)
+{
+ void *p = findbus(&BusInfoList, busNo);
+ if (!p) {
+ LOGERR("(%lu) failed", busNo);
+ return FALSE;
+ }
+ memcpy(busInfo, p, sizeof(VISORCHIPSET_BUS_INFO));
+ return TRUE;
+}
+EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
+
+BOOL
+visorchipset_set_bus_context(ulong busNo, void *context)
+{
+ VISORCHIPSET_BUS_INFO *p = findbus(&BusInfoList, busNo);
+ if (!p) {
+ LOGERR("(%lu) failed", busNo);
+ return FALSE;
+ }
+ p->bus_driver_context = context;
+ return TRUE;
+}
+EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
+
+BOOL
+visorchipset_get_device_info(ulong busNo, ulong devNo,
+ VISORCHIPSET_DEVICE_INFO *devInfo)
+{
+ void *p = finddevice(&DevInfoList, busNo, devNo);
+ if (!p) {
+ LOGERR("(%lu,%lu) failed", busNo, devNo);
+ return FALSE;
+ }
+ memcpy(devInfo, p, sizeof(VISORCHIPSET_DEVICE_INFO));
+ return TRUE;
+}
+EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
+
+BOOL
+visorchipset_set_device_context(ulong busNo, ulong devNo, void *context)
+{
+ VISORCHIPSET_DEVICE_INFO *p = finddevice(&DevInfoList, busNo, devNo);
+ if (!p) {
+ LOGERR("(%lu,%lu) failed", busNo, devNo);
+ return FALSE;
+ }
+ p->bus_driver_context = context;
+ return TRUE;
+}
+EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
+
+/* Generic wrapper function for allocating memory from a kmem_cache pool.
+ */
+void *
+visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
+ char *fn, int ln)
+{
+ gfp_t gfp;
+ void *p;
+
+ if (ok_to_block)
+ gfp = GFP_KERNEL;
+ else
+ gfp = GFP_ATOMIC;
+ /* __GFP_NORETRY means "ok to fail", meaning
+ * kmem_cache_alloc() can return NULL, implying the caller CAN
+ * cope with failure. If you do NOT specify __GFP_NORETRY,
+ * Linux will go to extreme measures to get memory for you
+ * (like, invoke oom killer), which will probably cripple the
+ * system.
+ */
+ gfp |= __GFP_NORETRY;
+ p = kmem_cache_alloc(pool, gfp);
+ if (!p) {
+ LOGERR("kmem_cache_alloc failed early @%s:%d\n", fn, ln);
+ return NULL;
+ }
+ atomic_inc(&Visorchipset_cache_buffers_in_use);
+ return p;
+}
+
+/* Generic wrapper function for freeing memory from a kmem_cache pool.
+ */
+void
+visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
+{
+ if (!p) {
+ LOGERR("NULL pointer @%s:%d\n", fn, ln);
+ return;
+ }
+ atomic_dec(&Visorchipset_cache_buffers_in_use);
+ kmem_cache_free(pool, p);
+}
+
+#define gettoken(bufp) strsep(bufp, " -\t\n")
+
+static ssize_t
+chipset_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[512];
+ char *token, *p;
+
+ if (count > sizeof(buf) - 1) {
+ LOGERR("chipset_proc_write: count (%d) exceeds size of buffer (%d)",
+ (int) count, (int) sizeof(buffer));
+ return -EINVAL;
+ }
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("chipset_proc_write: copy_from_user failed");
+ return -EFAULT;
+ }
+ buf[count] = '\0';
+
+ p = buf;
+ token = gettoken(&p);
+
+ if (strcmp(token, "CALLHOMEDISK_MOUNTED") == 0) {
+ token = gettoken(&p);
+ /* The Call Home Disk has been mounted */
+ if (strcmp(token, "0") == 0)
+ chipset_events[0] = 1;
+ } else if (strcmp(token, "MODULES_LOADED") == 0) {
+ token = gettoken(&p);
+ /* All modules for the partition have been loaded */
+ if (strcmp(token, "0") == 0)
+ chipset_events[1] = 1;
+ } else if (token == NULL) {
+ /* No event specified */
+ LOGERR("No event was specified to send CHIPSET_READY response");
+ return -1;
+ } else {
+ /* Unsupported event specified */
+ LOGERR("%s is an invalid event for sending CHIPSET_READY response", token);
+ return -1;
+ }
+
+ return count;
+}
+
+static ssize_t
+visorchipset_proc_read_writeonly(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ return 0;
+}
+
+/**
+ * Reads the InstallationError, InstallationTextId,
+ * InstallationRemainingSteps fields of ControlVMChannel.
+ */
+static ssize_t
+proc_read_installer(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ int length = 0;
+ U16 remainingSteps;
+ U32 error, textId;
+ char *vbuf;
+ loff_t pos = *offset;
+
+ if (!ControlVm_channel)
+ return -ENODEV;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos > 0 || !len)
+ return 0;
+
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ InstallationRemainingSteps), &remainingSteps,
+ sizeof(U16));
+ visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ InstallationError), &error, sizeof(U32));
+ visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ InstallationTextId), &textId, sizeof(U32));
+
+ length = sprintf(vbuf, "%u %u %u\n", remainingSteps, error, textId);
+ if (copy_to_user(buf, vbuf, length)) {
+ kfree(vbuf);
+ return -EFAULT;
+ }
+
+ kfree(vbuf);
+ *offset += length;
+ return length;
+}
+
+/**
+ * Writes to the InstallationError, InstallationTextId,
+ * InstallationRemainingSteps fields of
+ * ControlVMChannel.
+ * Input: RemainingSteps Error TextId
+ * Limit 32 characters input
+ */
+#define UINT16_MAX (65535U)
+#define UINT32_MAX (4294967295U)
+static ssize_t
+proc_write_installer(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ U16 remainingSteps;
+ U32 error, textId;
+
+ if (!ControlVm_channel)
+ return -ENODEV;
+
+ /* Check to make sure there is no buffer overflow */
+ if (count > (sizeof(buf) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ WARN(1, "Error copying from user space\n");
+ return -EFAULT;
+ }
+
+ if (sscanf(buf, "%hu %i %i", &remainingSteps, &error, &textId) != 3) {
+ remainingSteps = UINT16_MAX;
+ error = UINT32_MAX;
+ textId = UINT32_MAX;
+ }
+
+ if (remainingSteps != UINT16_MAX) {
+ if (visorchannel_write
+ (ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ InstallationRemainingSteps), &remainingSteps,
+ sizeof(U16)) < 0)
+ WARN(1, "Installation Status Write Failed - Write function error - RemainingSteps = %d\n",
+ remainingSteps);
+ }
+
+ if (error != UINT32_MAX) {
+ if (visorchannel_write
+ (ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ InstallationError), &error, sizeof(U32)) < 0)
+ WARN(1, "Installation Status Write Failed - Write function error - Error = %d\n",
+ error);
+ }
+
+ if (textId != UINT32_MAX) {
+ if (visorchannel_write
+ (ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ InstallationTextId), &textId, sizeof(U32)) < 0)
+ WARN(1, "Installation Status Write Failed - Write function error - TextId = %d\n",
+ textId);
+ }
+
+ /* So this function isn't called multiple times, must return
+ * size of buffer
+ */
+ return count;
+}
+
+/**
+ * Reads the ToolAction field of ControlVMChannel.
+ */
+static ssize_t
+proc_read_toolaction(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ int length = 0;
+ U8 toolAction;
+ char *vbuf;
+ loff_t pos = *offset;
+
+ if (!ControlVm_channel)
+ return -ENODEV;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos > 0 || !len)
+ return 0;
+
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ ToolAction), &toolAction, sizeof(U8));
+
+ length = sprintf(vbuf, "%u\n", toolAction);
+ if (copy_to_user(buf, vbuf, length)) {
+ kfree(vbuf);
+ return -EFAULT;
+ }
+
+ kfree(vbuf);
+ *offset += length;
+ return length;
+}
+
+/**
+ * Writes to the ToolAction field of ControlVMChannel.
+ * Input: ToolAction
+ * Limit 3 characters input
+ */
+#define UINT8_MAX (255U)
+static ssize_t
+proc_write_toolaction(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ char buf[3];
+ U8 toolAction;
+
+ if (!ControlVm_channel)
+ return -ENODEV;
+
+ /* Check to make sure there is no buffer overflow */
+ if (count > (sizeof(buf) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ WARN(1, "Error copying from user space\n");
+ return -EFAULT;
+ }
+
+ if (sscanf(buf, "%hhd", &toolAction) != 1)
+ toolAction = UINT8_MAX;
+
+ if (toolAction != UINT8_MAX) {
+ if (visorchannel_write
+ (ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ToolAction),
+ &toolAction, sizeof(U8)) < 0)
+ WARN(1, "Installation ToolAction Write Failed - ToolAction = %d\n",
+ toolAction);
+ }
+
+ /* So this function isn't called multiple times, must return
+ * size of buffer
+ */
+ return count;
+}
+
+/**
+ * Reads the EfiSparIndication.BootToTool field of ControlVMChannel.
+ */
+static ssize_t
+proc_read_bootToTool(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ int length = 0;
+ ULTRA_EFI_SPAR_INDICATION efiSparIndication;
+ char *vbuf;
+ loff_t pos = *offset;
+
+ if (!ControlVm_channel)
+ return -ENODEV;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos > 0 || !len)
+ return 0;
+
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ EfiSparIndication), &efiSparIndication,
+ sizeof(ULTRA_EFI_SPAR_INDICATION));
+
+ length = sprintf(vbuf, "%d\n", (int) efiSparIndication.BootToTool);
+ if (copy_to_user(buf, vbuf, length)) {
+ kfree(vbuf);
+ return -EFAULT;
+ }
+
+ kfree(vbuf);
+ *offset += length;
+ return length;
+}
+
+/**
+ * Writes to the EfiSparIndication.BootToTool field of ControlVMChannel.
+ * Input: 1 or 0 (1 being on, 0 being off)
+ */
+static ssize_t
+proc_write_bootToTool(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ char buf[3];
+ int inputVal;
+ ULTRA_EFI_SPAR_INDICATION efiSparIndication;
+
+ if (!ControlVm_channel)
+ return -ENODEV;
+
+ /* Check to make sure there is no buffer overflow */
+ if (count > (sizeof(buf) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ WARN(1, "Error copying from user space\n");
+ return -EFAULT;
+ }
+
+ if (sscanf(buf, "%i", &inputVal) != 1)
+ inputVal = 0;
+
+ efiSparIndication.BootToTool = (inputVal == 1 ? 1 : 0);
+
+ if (visorchannel_write
+ (ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EfiSparIndication),
+ &efiSparIndication, sizeof(ULTRA_EFI_SPAR_INDICATION)) < 0)
+ printk
+ ("Installation BootToTool Write Failed - BootToTool = %d\n",
+ (int) efiSparIndication.BootToTool);
+
+ /* So this function isn't called multiple times, must return
+ * size of buffer
+ */
+ return count;
+}
+
+static const struct file_operations chipset_proc_fops = {
+ .owner = THIS_MODULE,
+ .read = visorchipset_proc_read_writeonly,
+ .write = chipset_proc_write,
+};
+
+static int __init
+visorchipset_init(void)
+{
+ int rc = 0, x = 0;
+ struct proc_dir_entry *installer_file;
+ struct proc_dir_entry *toolaction_file;
+ struct proc_dir_entry *bootToTool_file;
+
+ LOGINF("chipset driver version %s loaded", VERSION);
+ /* process module options */
+ POSTCODE_LINUX_2(DRIVER_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ LOGINF("option - testvnic=%d", visorchipset_testvnic);
+ LOGINF("option - testvnicclient=%d", visorchipset_testvnicclient);
+ LOGINF("option - testmsg=%d", visorchipset_testmsg);
+ LOGINF("option - testteardown=%d", visorchipset_testteardown);
+ LOGINF("option - major=%d", visorchipset_major);
+ LOGINF("option - serverregwait=%d", visorchipset_serverregwait);
+ LOGINF("option - clientregwait=%d", visorchipset_clientregwait);
+ LOGINF("option - holdchipsetready=%d", visorchipset_holdchipsetready);
+
+ memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
+ memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
+ memset(&ControlVm_payload_info, 0, sizeof(ControlVm_payload_info));
+ memset(&LiveDump_info, 0, sizeof(LiveDump_info));
+ atomic_set(&LiveDump_info.buffers_in_use, 0);
+
+ if (visorchipset_testvnic) {
+ ERRDRV("testvnic option no longer supported: (status = %d)\n",
+ x);
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
+ rc = x;
+ goto Away;
+ }
+
+ controlvm_init();
+ MajorDev = MKDEV(visorchipset_major, 0);
+ rc = visorchipset_file_init(MajorDev, &ControlVm_channel);
+ if (rc < 0) {
+ ERRDRV("visorchipset_file_init(MajorDev, &ControlVm_channel): error (status=%d)\n", rc);
+ POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
+ goto Away;
+ }
+
+ proc_Init();
+ memset(PartitionPropertyNames, 0, sizeof(PartitionPropertyNames));
+ memset(ControlVmPropertyNames, 0, sizeof(ControlVmPropertyNames));
+ InitPartitionProperties();
+ InitControlVmProperties();
+
+ PartitionType = visor_proc_CreateType(ProcDir, PartitionTypeNames,
+ (const char **)
+ PartitionPropertyNames,
+ &show_partition_property);
+ ControlVmType =
+ visor_proc_CreateType(ProcDir, ControlVmTypeNames,
+ (const char **) ControlVmPropertyNames,
+ &show_controlvm_property);
+
+ ControlVmObject = visor_proc_CreateObject(ControlVmType, NULL, NULL);
+
+ /* Setup Installation fields */
+ installer_file = proc_create("installer", 0644, ProcDir,
+ &proc_installer_fops);
+ /* Setup the ToolAction field */
+ toolaction_file = proc_create("toolaction", 0644, ProcDir,
+ &proc_toolaction_fops);
+ /* Setup the BootToTool field */
+ bootToTool_file = proc_create("boottotool", 0644, ProcDir,
+ &proc_bootToTool_fops);
+
+ memset(&g_DiagMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+
+ chipset_proc_dir = proc_create(VISORCHIPSET_CHIPSET_PROC_ENTRY_FN,
+ 0644, ProcDir, &chipset_proc_fops);
+ memset(&g_ChipSetMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+
+ parahotplug_proc_dir =
+ proc_create(VISORCHIPSET_PARAHOTPLUG_PROC_ENTRY_FN, 0200,
+ ProcDir, &parahotplug_proc_fops);
+ memset(&g_DelDumpMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+
+ if (filexfer_constructor(sizeof(struct putfile_request)) < 0) {
+ ERRDRV("filexfer_constructor failed: (status=-1)\n");
+ POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
+ rc = -1;
+ goto Away;
+ }
+ Putfile_buffer_list_pool =
+ kmem_cache_create(Putfile_buffer_list_pool_name,
+ sizeof(struct putfile_buffer_entry),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!Putfile_buffer_list_pool) {
+ ERRDRV("failed to alloc Putfile_buffer_list_pool: (status=-1)\n");
+ POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
+ rc = -1;
+ goto Away;
+ }
+ if (visorchipset_disable_controlvm) {
+ LOGINF("visorchipset_init:controlvm disabled");
+ } else {
+ /* if booting in a crash kernel */
+ if (visorchipset_crash_kernel)
+ INIT_DELAYED_WORK(&Periodic_controlvm_work,
+ setup_crash_devices_work_queue);
+ else
+ INIT_DELAYED_WORK(&Periodic_controlvm_work,
+ controlvm_periodic_work);
+ Periodic_controlvm_workqueue =
+ create_singlethread_workqueue("visorchipset_controlvm");
+
+ if (Periodic_controlvm_workqueue == NULL) {
+ ERRDRV("cannot create controlvm workqueue: (status=%d)\n",
+ -ENOMEM);
+ POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
+ DIAG_SEVERITY_ERR);
+ rc = -ENOMEM;
+ goto Away;
+ }
+ Most_recent_message_jiffies = jiffies;
+ Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ rc = queue_delayed_work(Periodic_controlvm_workqueue,
+ &Periodic_controlvm_work, Poll_jiffies);
+ if (rc < 0) {
+ ERRDRV("queue_delayed_work(Periodic_controlvm_workqueue, &Periodic_controlvm_work, Poll_jiffies): error (status=%d)\n", rc);
+ POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
+ DIAG_SEVERITY_ERR);
+ goto Away;
+ }
+
+ }
+
+ Visorchipset_platform_device.dev.devt = MajorDev;
+ if (platform_device_register(&Visorchipset_platform_device) < 0) {
+ ERRDRV("platform_device_register(visorchipset) failed: (status=-1)\n");
+ POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
+ rc = -1;
+ goto Away;
+ }
+ LOGINF("visorchipset device created");
+ POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
+ rc = 0;
+Away:
+ if (rc) {
+ LOGERR("visorchipset_init failed");
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
+ POSTCODE_SEVERITY_ERR);
+ }
+ return rc;
+}
+
+static void
+visorchipset_exit(void)
+{
+ char s[99];
+ POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+
+ if (visorchipset_disable_controlvm) {
+ ;
+ } else {
+ cancel_delayed_work(&Periodic_controlvm_work);
+ flush_workqueue(Periodic_controlvm_workqueue);
+ destroy_workqueue(Periodic_controlvm_workqueue);
+ Periodic_controlvm_workqueue = NULL;
+ destroy_controlvm_payload_info(&ControlVm_payload_info);
+ }
+ Test_Vnic_channel = NULL;
+ if (Putfile_buffer_list_pool) {
+ kmem_cache_destroy(Putfile_buffer_list_pool);
+ Putfile_buffer_list_pool = NULL;
+ }
+ filexfer_destructor();
+ if (ControlVmObject) {
+ visor_proc_DestroyObject(ControlVmObject);
+ ControlVmObject = NULL;
+ }
+ cleanup_controlvm_structures();
+
+ if (ControlVmType) {
+ visor_proc_DestroyType(ControlVmType);
+ ControlVmType = NULL;
+ }
+ if (PartitionType) {
+ visor_proc_DestroyType(PartitionType);
+ PartitionType = NULL;
+ }
+ if (diag_proc_dir) {
+ remove_proc_entry(VISORCHIPSET_DIAG_PROC_ENTRY_FN, ProcDir);
+ diag_proc_dir = NULL;
+ }
+ memset(&g_DiagMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+
+ if (chipset_proc_dir) {
+ remove_proc_entry(VISORCHIPSET_CHIPSET_PROC_ENTRY_FN, ProcDir);
+ chipset_proc_dir = NULL;
+ }
+ memset(&g_ChipSetMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+
+ if (parahotplug_proc_dir) {
+ remove_proc_entry(VISORCHIPSET_PARAHOTPLUG_PROC_ENTRY_FN,
+ ProcDir);
+ parahotplug_proc_dir = NULL;
+ }
+
+ memset(&g_DelDumpMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+
+ proc_DeInit();
+ if (ControlVm_channel != NULL) {
+ LOGINF("Channel %s (ControlVm) disconnected",
+ visorchannel_id(ControlVm_channel, s));
+ visorchannel_destroy(ControlVm_channel);
+ ControlVm_channel = NULL;
+ }
+ controlvm_deinit();
+ visorchipset_file_cleanup();
+ POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ LOGINF("chipset driver unloaded");
+}
+
+module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
+int visorchipset_testvnic = 0;
+
+module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
+int visorchipset_testvnicclient = 0;
+
+module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_testmsg,
+ "1 to manufacture the chipset, bus, and switch messages");
+int visorchipset_testmsg = 0;
+
+module_param_named(major, visorchipset_major, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
+int visorchipset_major = 0;
+
+module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_serverreqwait,
+ "1 to have the module wait for the visor bus to register");
+int visorchipset_serverregwait = 0; /* default is off */
+module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
+int visorchipset_clientregwait = 1; /* default is on */
+module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_testteardown,
+ "1 to test teardown of the chipset, bus, and switch");
+int visorchipset_testteardown = 0; /* default is off */
+module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
+ S_IRUGO);
+MODULE_PARM_DESC(visorchipset_disable_controlvm,
+ "1 to disable polling of controlVm channel");
+int visorchipset_disable_controlvm = 0; /* default is off */
+module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_crash_kernel,
+ "1 means we are running in crash kernel");
+int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
+module_param_named(holdchipsetready, visorchipset_holdchipsetready,
+ int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_holdchipsetready,
+ "1 to hold response to CHIPSET_READY");
+int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
+ * response immediately */
+module_init(visorchipset_init);
+module_exit(visorchipset_exit);
+
+MODULE_AUTHOR("Unisys");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
+ VERSION);
+MODULE_VERSION(VERSION);
diff --git a/drivers/staging/unisys/visorchipset/visorchipset_umode.h b/drivers/staging/unisys/visorchipset/visorchipset_umode.h
new file mode 100644
index 000000000000..259e840376a5
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/visorchipset_umode.h
@@ -0,0 +1,37 @@
+/* visorchipset_umode.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/** @file *********************************************************************
+ *
+ * This describes structures needed for the interface between the
+ * visorchipset driver and a user-mode component that opens the device.
+ *
+ ******************************************************************************
+ */
+
+#ifndef __VISORCHIPSET_UMODE_H
+#define __VISORCHIPSET_UMODE_H
+
+
+
+/** The user-mode program can access the control channel buffer directly
+ * via this memory map.
+ */
+#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET (0x00000000)
+#define VISORCHIPSET_MMAP_CONTROLCHANSIZE (0x00400000) /* 4MB */
+
+#endif /* __VISORCHIPSET_UMODE_H */
diff --git a/drivers/staging/unisys/visorutil/Kconfig b/drivers/staging/unisys/visorutil/Kconfig
new file mode 100644
index 000000000000..4ff61a7c506b
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys timskmod configuration
+#
+
+config UNISYS_VISORUTIL
+ tristate "Unisys visorutil driver"
+ depends on UNISYSSPAR
+ ---help---
+ If you say Y here, you will enable the Unisys visorutil driver.
+
diff --git a/drivers/staging/unisys/visorutil/Makefile b/drivers/staging/unisys/visorutil/Makefile
new file mode 100644
index 000000000000..3f463888dcec
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Unisys timskmod
+#
+
+obj-$(CONFIG_UNISYS_VISORUTIL) += visorutil.o
+
+visorutil-y := charqueue.o easyproc.o periodic_work.o procobjecttree.o \
+ memregion_direct.o visorkmodutils.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
diff --git a/drivers/staging/unisys/visorutil/charqueue.c b/drivers/staging/unisys/visorutil/charqueue.c
new file mode 100644
index 000000000000..0ceede129e87
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/charqueue.c
@@ -0,0 +1,141 @@
+/* charqueue.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Simple character queue implementation for Linux kernel mode.
+ */
+
+#include "charqueue.h"
+
+#define MYDRVNAME "charqueue"
+
+#define IS_EMPTY(charqueue) (charqueue->head == charqueue->tail)
+
+
+
+struct CHARQUEUE_Tag {
+ int alloc_size;
+ int nslots;
+ spinlock_t lock;
+ int head, tail;
+ unsigned char buf[0];
+};
+
+
+
+CHARQUEUE *visor_charqueue_create(ulong nslots)
+{
+ int alloc_size = sizeof(CHARQUEUE) + nslots + 1;
+ CHARQUEUE *cq = kmalloc(alloc_size, GFP_KERNEL|__GFP_NORETRY);
+ if (cq == NULL) {
+ ERRDRV("visor_charqueue_create allocation failed (alloc_size=%d)",
+ alloc_size);
+ return NULL;
+ }
+ cq->alloc_size = alloc_size;
+ cq->nslots = nslots;
+ cq->head = cq->tail = 0;
+ spin_lock_init(&cq->lock);
+ return cq;
+}
+EXPORT_SYMBOL_GPL(visor_charqueue_create);
+
+
+
+void visor_charqueue_enqueue(CHARQUEUE *charqueue, unsigned char c)
+{
+ int alloc_slots = charqueue->nslots+1; /* 1 slot is always empty */
+
+ spin_lock(&charqueue->lock);
+ charqueue->head = (charqueue->head+1) % alloc_slots;
+ if (charqueue->head == charqueue->tail)
+ /* overflow; overwrite the oldest entry */
+ charqueue->tail = (charqueue->tail+1) % alloc_slots;
+ charqueue->buf[charqueue->head] = c;
+ spin_unlock(&charqueue->lock);
+}
+EXPORT_SYMBOL_GPL(visor_charqueue_enqueue);
+
+
+
+BOOL visor_charqueue_is_empty(CHARQUEUE *charqueue)
+{
+ BOOL b;
+ spin_lock(&charqueue->lock);
+ b = IS_EMPTY(charqueue);
+ spin_unlock(&charqueue->lock);
+ return b;
+}
+EXPORT_SYMBOL_GPL(visor_charqueue_is_empty);
+
+
+
+static int charqueue_dequeue_1(CHARQUEUE *charqueue)
+{
+ int alloc_slots = charqueue->nslots + 1; /* 1 slot is always empty */
+
+ if (IS_EMPTY(charqueue))
+ return -1;
+ charqueue->tail = (charqueue->tail+1) % alloc_slots;
+ return charqueue->buf[charqueue->tail];
+}
+
+
+
+int charqueue_dequeue(CHARQUEUE *charqueue)
+{
+ int rc;
+
+ spin_lock(&charqueue->lock);
+ rc = charqueue_dequeue_1(charqueue);
+ spin_unlock(&charqueue->lock);
+ return rc;
+}
+
+
+
+int visor_charqueue_dequeue_n(CHARQUEUE *charqueue, unsigned char *buf, int n)
+{
+ int rc, counter = 0, c;
+
+ spin_lock(&charqueue->lock);
+ for (;;) {
+ if (n <= 0)
+ break; /* no more buffer space */
+ c = charqueue_dequeue_1(charqueue);
+ if (c < 0)
+ break; /* no more input */
+ *buf = (unsigned char)(c);
+ buf++;
+ n--;
+ counter++;
+ }
+ rc = counter;
+ spin_unlock(&charqueue->lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_charqueue_dequeue_n);
+
+
+
+void visor_charqueue_destroy(CHARQUEUE *charqueue)
+{
+ if (charqueue == NULL)
+ return;
+ kfree(charqueue);
+}
+EXPORT_SYMBOL_GPL(visor_charqueue_destroy);
diff --git a/drivers/staging/unisys/visorutil/charqueue.h b/drivers/staging/unisys/visorutil/charqueue.h
new file mode 100644
index 000000000000..e82ae0bc8959
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/charqueue.h
@@ -0,0 +1,37 @@
+/* charqueue.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __CHARQUEUE_H__
+#define __CHARQUEUE_H__
+
+#include "uniklog.h"
+#include "timskmod.h"
+
+/* CHARQUEUE is an opaque structure to users.
+ * Fields are declared only in the implementation .c files.
+ */
+typedef struct CHARQUEUE_Tag CHARQUEUE;
+
+CHARQUEUE *visor_charqueue_create(ulong nslots);
+void visor_charqueue_enqueue(CHARQUEUE *charqueue, unsigned char c);
+int charqueue_dequeue(CHARQUEUE *charqueue);
+int visor_charqueue_dequeue_n(CHARQUEUE *charqueue, unsigned char *buf, int n);
+BOOL visor_charqueue_is_empty(CHARQUEUE *charqueue);
+void visor_charqueue_destroy(CHARQUEUE *charqueue);
+
+#endif
+
diff --git a/drivers/staging/unisys/visorutil/easyproc.c b/drivers/staging/unisys/visorutil/easyproc.c
new file mode 100644
index 000000000000..60b6b83d1b20
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/easyproc.c
@@ -0,0 +1,371 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/** @file *********************************************************************
+ *
+ * Handle procfs-specific tasks.
+ * Note that this file does not know about any module-specific things, nor
+ * does it know anything about what information to reveal as part of the proc
+ * entries. The 2 functions that take care of displaying device and
+ * driver specific information are passed as parameters to
+ * visor_easyproc_InitDriver().
+ *
+ * void show_device_info(struct seq_file *seq, void *p);
+ * void show_driver_info(struct seq_file *seq);
+ *
+ * The second parameter to show_device_info is actually a pointer to the
+ * device-specific info to show. It is the context that was originally
+ * passed to visor_easyproc_InitDevice().
+ *
+ ******************************************************************************
+ */
+
+#include <linux/proc_fs.h>
+
+#include "uniklog.h"
+#include "timskmod.h"
+#include "easyproc.h"
+
+#define MYDRVNAME "easyproc"
+
+
+
+/*
+ * /proc/<ProcId> ProcDir
+ * /proc/<ProcId>/driver ProcDriverDir
+ * /proc/<ProcId>/driver/diag ProcDriverDiagFile
+ * /proc/<ProcId>/device ProcDeviceDir
+ * /proc/<ProcId>/device/0 procDevicexDir
+ * /proc/<ProcId>/device/0/diag procDevicexDiagFile
+ */
+
+
+static ssize_t proc_write_device(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+static ssize_t proc_write_driver(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+
+static struct proc_dir_entry *
+ createProcDir(char *name, struct proc_dir_entry *parent)
+{
+ struct proc_dir_entry *p = proc_mkdir_mode(name, S_IFDIR, parent);
+ if (p == NULL)
+ ERRDRV("failed to create /proc directory %s", name);
+ return p;
+}
+
+static int seq_show_driver(struct seq_file *seq, void *offset);
+static int proc_open_driver(struct inode *inode, struct file *file)
+{
+ return single_open(file, seq_show_driver, PDE_DATA(inode));
+}
+static const struct file_operations proc_fops_driver = {
+ .open = proc_open_driver,
+ .read = seq_read,
+ .write = proc_write_driver,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int seq_show_device(struct seq_file *seq, void *offset);
+static int seq_show_device_property(struct seq_file *seq, void *offset);
+static int proc_open_device(struct inode *inode, struct file *file)
+{
+ return single_open(file, seq_show_device, PDE_DATA(inode));
+}
+static const struct file_operations proc_fops_device = {
+ .open = proc_open_device,
+ .read = seq_read,
+ .write = proc_write_device,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+static int proc_open_device_property(struct inode *inode, struct file *file)
+{
+ return single_open(file, seq_show_device_property, PDE_DATA(inode));
+}
+static const struct file_operations proc_fops_device_property = {
+ .open = proc_open_device_property,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+
+void visor_easyproc_InitDriver(struct easyproc_driver_info *pdriver,
+ char *procId,
+ void (*show_driver_info)(struct seq_file *),
+ void (*show_device_info)(struct seq_file *,
+ void *))
+{
+ memset(pdriver, 0, sizeof(struct easyproc_driver_info));
+ pdriver->ProcId = procId;
+ if (pdriver->ProcId == NULL)
+ ERRDRV("ProcId cannot be NULL (trouble ahead)!");
+ pdriver->Show_driver_info = show_driver_info;
+ pdriver->Show_device_info = show_device_info;
+ if (pdriver->ProcDir == NULL)
+ pdriver->ProcDir = createProcDir(pdriver->ProcId, NULL);
+ if ((pdriver->ProcDir != NULL) && (pdriver->ProcDriverDir == NULL))
+ pdriver->ProcDriverDir = createProcDir("driver",
+ pdriver->ProcDir);
+ if ((pdriver->ProcDir != NULL) && (pdriver->ProcDeviceDir == NULL))
+ pdriver->ProcDeviceDir = createProcDir("device",
+ pdriver->ProcDir);
+ if ((pdriver->ProcDriverDir != NULL) &&
+ (pdriver->ProcDriverDiagFile == NULL)) {
+ pdriver->ProcDriverDiagFile =
+ proc_create_data("diag", 0,
+ pdriver->ProcDriverDir,
+ &proc_fops_driver, pdriver);
+ if (pdriver->ProcDriverDiagFile == NULL)
+ ERRDRV("failed to register /proc/%s/driver/diag entry",
+ pdriver->ProcId);
+ }
+}
+EXPORT_SYMBOL_GPL(visor_easyproc_InitDriver);
+
+
+
+void visor_easyproc_InitDriverEx(struct easyproc_driver_info *pdriver,
+ char *procId,
+ void (*show_driver_info)(struct seq_file *),
+ void (*show_device_info)(struct seq_file *,
+ void *),
+ void (*write_driver_info)(char *buf,
+ size_t count,
+ loff_t *ppos),
+ void (*write_device_info)(char *buf,
+ size_t count,
+ loff_t *ppos,
+ void *p))
+{
+ visor_easyproc_InitDriver(pdriver, procId,
+ show_driver_info, show_device_info);
+ pdriver->Write_driver_info = write_driver_info;
+ pdriver->Write_device_info = write_device_info;
+}
+EXPORT_SYMBOL_GPL(visor_easyproc_InitDriverEx);
+
+
+
+void visor_easyproc_DeInitDriver(struct easyproc_driver_info *pdriver)
+{
+ if (pdriver->ProcDriverDiagFile != NULL) {
+ remove_proc_entry("diag", pdriver->ProcDriverDir);
+ pdriver->ProcDriverDiagFile = NULL;
+ }
+ if (pdriver->ProcDriverDir != NULL) {
+ remove_proc_entry("driver", pdriver->ProcDir);
+ pdriver->ProcDriverDir = NULL;
+ }
+ if (pdriver->ProcDeviceDir != NULL) {
+ remove_proc_entry("device", pdriver->ProcDir);
+ pdriver->ProcDeviceDir = NULL;
+ }
+ if (pdriver->ProcDir != NULL) {
+ remove_proc_entry(pdriver->ProcId, NULL);
+ pdriver->ProcDir = NULL;
+ }
+ pdriver->ProcId = NULL;
+ pdriver->Show_driver_info = NULL;
+ pdriver->Show_device_info = NULL;
+ pdriver->Write_driver_info = NULL;
+ pdriver->Write_device_info = NULL;
+}
+EXPORT_SYMBOL_GPL(visor_easyproc_DeInitDriver);
+
+
+
+void visor_easyproc_InitDevice(struct easyproc_driver_info *pdriver,
+ struct easyproc_device_info *p, int devno,
+ void *devdata)
+{
+ if ((pdriver->ProcDeviceDir != NULL) && (p->procDevicexDir == NULL)) {
+ char s[29];
+ sprintf(s, "%d", devno);
+ p->procDevicexDir = createProcDir(s, pdriver->ProcDeviceDir);
+ p->devno = devno;
+ }
+ p->devdata = devdata;
+ p->pdriver = pdriver;
+ p->devno = devno;
+ if ((p->procDevicexDir != NULL) && (p->procDevicexDiagFile == NULL)) {
+ p->procDevicexDiagFile =
+ proc_create_data("diag", 0, p->procDevicexDir,
+ &proc_fops_device, p);
+ if (p->procDevicexDiagFile == NULL)
+ ERRDEVX(devno, "failed to register /proc/%s/device/%d/diag entry",
+ pdriver->ProcId, devno
+ );
+ }
+ memset(&(p->device_property_info[0]), 0,
+ sizeof(p->device_property_info));
+}
+EXPORT_SYMBOL_GPL(visor_easyproc_InitDevice);
+
+
+
+void visor_easyproc_CreateDeviceProperty(struct easyproc_device_info *p,
+ void (*show_property_info)
+ (struct seq_file *, void *),
+ char *property_name)
+{
+ size_t i;
+ struct easyproc_device_property_info *px = NULL;
+
+ if (p->procDevicexDir == NULL) {
+ ERRDRV("state error");
+ return;
+ }
+ for (i = 0; i < ARRAY_SIZE(p->device_property_info); i++) {
+ if (p->device_property_info[i].procEntry == NULL) {
+ px = &(p->device_property_info[i]);
+ break;
+ }
+ }
+ if (!px) {
+ ERRDEVX(p->devno, "too many device properties");
+ return;
+ }
+ px->devdata = p->devdata;
+ px->pdriver = p->pdriver;
+ px->procEntry = proc_create_data(property_name, 0, p->procDevicexDir,
+ &proc_fops_device_property, px);
+ if (strlen(property_name)+1 > sizeof(px->property_name)) {
+ ERRDEVX(p->devno, "device property name %s too long",
+ property_name);
+ return;
+ }
+ strcpy(px->property_name, property_name);
+ if (px->procEntry == NULL) {
+ ERRDEVX(p->devno, "failed to register /proc/%s/device/%d/%s entry",
+ p->pdriver->ProcId, p->devno, property_name
+ );
+ return;
+ }
+ px->show_device_property_info = show_property_info;
+}
+EXPORT_SYMBOL_GPL(visor_easyproc_CreateDeviceProperty);
+
+
+
+void visor_easyproc_DeInitDevice(struct easyproc_driver_info *pdriver,
+ struct easyproc_device_info *p, int devno)
+{
+ size_t i;
+ for (i = 0; i < ARRAY_SIZE(p->device_property_info); i++) {
+ if (p->device_property_info[i].procEntry != NULL) {
+ struct easyproc_device_property_info *px =
+ &(p->device_property_info[i]);
+ remove_proc_entry(px->property_name, p->procDevicexDir);
+ px->procEntry = NULL;
+ }
+ }
+ if (p->procDevicexDiagFile != NULL) {
+ remove_proc_entry("diag", p->procDevicexDir);
+ p->procDevicexDiagFile = NULL;
+ }
+ if (p->procDevicexDir != NULL) {
+ char s[29];
+ sprintf(s, "%d", devno);
+ remove_proc_entry(s, pdriver->ProcDeviceDir);
+ p->procDevicexDir = NULL;
+ }
+ p->devdata = NULL;
+ p->pdriver = NULL;
+}
+EXPORT_SYMBOL_GPL(visor_easyproc_DeInitDevice);
+
+
+
+static int seq_show_driver(struct seq_file *seq, void *offset)
+{
+ struct easyproc_driver_info *p =
+ (struct easyproc_driver_info *)(seq->private);
+ if (!p)
+ return 0;
+ (*(p->Show_driver_info))(seq);
+ return 0;
+}
+
+
+
+static int seq_show_device(struct seq_file *seq, void *offset)
+{
+ struct easyproc_device_info *p =
+ (struct easyproc_device_info *)(seq->private);
+ if ((!p) || (!(p->pdriver)))
+ return 0;
+ (*(p->pdriver->Show_device_info))(seq, p->devdata);
+ return 0;
+}
+
+
+
+static int seq_show_device_property(struct seq_file *seq, void *offset)
+{
+ struct easyproc_device_property_info *p =
+ (struct easyproc_device_property_info *)(seq->private);
+ if ((!p) || (!(p->show_device_property_info)))
+ return 0;
+ (*(p->show_device_property_info))(seq, p->devdata);
+ return 0;
+}
+
+
+
+static ssize_t proc_write_driver(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct easyproc_driver_info *p = NULL;
+ char local_buf[256];
+ if (seq == NULL)
+ return 0;
+ p = (struct easyproc_driver_info *)(seq->private);
+ if ((!p) || (!(p->Write_driver_info)))
+ return 0;
+ if (count >= sizeof(local_buf))
+ return -ENOMEM;
+ if (copy_from_user(local_buf, buffer, count))
+ return -EFAULT;
+ local_buf[count] = '\0'; /* be friendly */
+ (*(p->Write_driver_info))(local_buf, count, ppos);
+ return count;
+}
+
+
+
+static ssize_t proc_write_device(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct easyproc_device_info *p = NULL;
+ char local_buf[256];
+ if (seq == NULL)
+ return 0;
+ p = (struct easyproc_device_info *)(seq->private);
+ if ((!p) || (!(p->pdriver)) || (!(p->pdriver->Write_device_info)))
+ return 0;
+ if (count >= sizeof(local_buf))
+ return -ENOMEM;
+ if (copy_from_user(local_buf, buffer, count))
+ return -EFAULT;
+ local_buf[count] = '\0'; /* be friendly */
+ (*(p->pdriver->Write_device_info))(local_buf, count, ppos, p->devdata);
+ return count;
+}
diff --git a/drivers/staging/unisys/visorutil/easyproc.h b/drivers/staging/unisys/visorutil/easyproc.h
new file mode 100644
index 000000000000..1cef1fd33d53
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/easyproc.h
@@ -0,0 +1,92 @@
+/* easyproc.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/** @file *********************************************************************
+ *
+ * This describes the interfaces necessary for a simple /proc file
+ * implementation for a driver.
+ *
+ ******************************************************************************
+ */
+
+#ifndef __EASYPROC_H__
+#define __EASYPROC_H__
+
+#include "timskmod.h"
+
+
+struct easyproc_driver_info {
+ struct proc_dir_entry *ProcDir;
+ struct proc_dir_entry *ProcDriverDir;
+ struct proc_dir_entry *ProcDriverDiagFile;
+ struct proc_dir_entry *ProcDeviceDir;
+ char *ProcId;
+ void (*Show_device_info)(struct seq_file *seq, void *p);
+ void (*Show_driver_info)(struct seq_file *seq);
+ void (*Write_device_info)(char *buf, size_t count,
+ loff_t *ppos, void *p);
+ void (*Write_driver_info)(char *buf, size_t count, loff_t *ppos);
+};
+
+/* property is a file under /proc/<x>/device/<x>/<property_name> */
+struct easyproc_device_property_info {
+ char property_name[25];
+ struct proc_dir_entry *procEntry;
+ struct easyproc_driver_info *pdriver;
+ void *devdata;
+ void (*show_device_property_info)(struct seq_file *seq, void *p);
+};
+
+struct easyproc_device_info {
+ struct proc_dir_entry *procDevicexDir;
+ struct proc_dir_entry *procDevicexDiagFile;
+ struct easyproc_driver_info *pdriver;
+ void *devdata;
+ int devno;
+ /* allow for a number of custom properties for each device: */
+ struct easyproc_device_property_info device_property_info[10];
+};
+
+void visor_easyproc_InitDevice(struct easyproc_driver_info *pdriver,
+ struct easyproc_device_info *p, int devno,
+ void *devdata);
+void visor_easyproc_DeInitDevice(struct easyproc_driver_info *pdriver,
+ struct easyproc_device_info *p, int devno);
+void visor_easyproc_InitDriver(struct easyproc_driver_info *pdriver,
+ char *procId,
+ void (*show_driver_info)(struct seq_file *),
+ void (*show_device_info)(struct seq_file *,
+ void *));
+void visor_easyproc_InitDriverEx(struct easyproc_driver_info *pdriver,
+ char *procId,
+ void (*show_driver_info)(struct seq_file *),
+ void (*show_device_info)(struct seq_file *,
+ void *),
+ void (*Write_driver_info)(char *buf,
+ size_t count,
+ loff_t *ppos),
+ void (*Write_device_info)(char *buf,
+ size_t count,
+ loff_t *ppos,
+ void *p));
+void visor_easyproc_DeInitDriver(struct easyproc_driver_info *pdriver);
+void visor_easyproc_CreateDeviceProperty(struct easyproc_device_info *p,
+ void (*show_property_info)
+ (struct seq_file *, void *),
+ char *property_name);
+
+#endif
diff --git a/drivers/staging/unisys/visorutil/memregion.h b/drivers/staging/unisys/visorutil/memregion.h
new file mode 100644
index 000000000000..bb122dbeafbc
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/memregion.h
@@ -0,0 +1,43 @@
+/* memregion.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __MEMREGION_H__
+#define __MEMREGION_H__
+
+#include "timskmod.h"
+
+/* MEMREGION is an opaque structure to users.
+ * Fields are declared only in the implementation .c files.
+ */
+typedef struct MEMREGION_Tag MEMREGION;
+
+MEMREGION *visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes);
+MEMREGION *visor_memregion_create_overlapped(MEMREGION *parent,
+ ulong offset, ulong nbytes);
+int visor_memregion_resize(MEMREGION *memregion, ulong newsize);
+int visor_memregion_read(MEMREGION *memregion,
+ ulong offset, void *dest, ulong nbytes);
+int visor_memregion_write(MEMREGION *memregion,
+ ulong offset, void *src, ulong nbytes);
+void visor_memregion_destroy(MEMREGION *memregion);
+HOSTADDRESS visor_memregion_get_physaddr(MEMREGION *memregion);
+ulong visor_memregion_get_nbytes(MEMREGION *memregion);
+void memregion_dump(MEMREGION *memregion, char *s,
+ ulong off, ulong len, struct seq_file *seq);
+void *visor_memregion_get_pointer(MEMREGION *memregion);
+
+#endif
diff --git a/drivers/staging/unisys/visorutil/memregion_direct.c b/drivers/staging/unisys/visorutil/memregion_direct.c
new file mode 100644
index 000000000000..2c1061d55ee9
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/memregion_direct.c
@@ -0,0 +1,223 @@
+/* memregion_direct.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * This is an implementation of memory regions that can be used to read/write
+ * channel memory (in main memory of the host system) from code running in
+ * a virtual partition.
+ */
+#include "uniklog.h"
+#include "timskmod.h"
+#include "memregion.h"
+
+#define MYDRVNAME "memregion"
+
+struct MEMREGION_Tag {
+ HOSTADDRESS physaddr;
+ ulong nbytes;
+ void *mapped;
+ BOOL requested;
+ BOOL overlapped;
+};
+
+static BOOL mapit(MEMREGION *memregion);
+static void unmapit(MEMREGION *memregion);
+
+MEMREGION *
+visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes)
+{
+ MEMREGION *rc = NULL;
+ MEMREGION *memregion = kzalloc(sizeof(MEMREGION),
+ GFP_KERNEL | __GFP_NORETRY);
+ if (memregion == NULL) {
+ ERRDRV("visor_memregion_create allocation failed");
+ return NULL;
+ }
+ memregion->physaddr = physaddr;
+ memregion->nbytes = nbytes;
+ memregion->overlapped = FALSE;
+ if (!mapit(memregion)) {
+ rc = NULL;
+ goto Away;
+ }
+ rc = memregion;
+Away:
+ if (rc == NULL) {
+ if (memregion != NULL) {
+ visor_memregion_destroy(memregion);
+ memregion = NULL;
+ }
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_memregion_create);
+
+MEMREGION *
+visor_memregion_create_overlapped(MEMREGION *parent, ulong offset, ulong nbytes)
+{
+ MEMREGION *memregion = NULL;
+
+ if (parent == NULL) {
+ ERRDRV("%s parent is NULL", __func__);
+ return NULL;
+ }
+ if (parent->mapped == NULL) {
+ ERRDRV("%s parent is not mapped!", __func__);
+ return NULL;
+ }
+ if ((offset >= parent->nbytes) ||
+ ((offset + nbytes) >= parent->nbytes)) {
+ ERRDRV("%s range (%lu,%lu) out of parent range",
+ __func__, offset, nbytes);
+ return NULL;
+ }
+ memregion = kzalloc(sizeof(MEMREGION), GFP_KERNEL|__GFP_NORETRY);
+ if (memregion == NULL) {
+ ERRDRV("%s allocation failed", __func__);
+ return NULL;
+ }
+
+ memregion->physaddr = parent->physaddr + offset;
+ memregion->nbytes = nbytes;
+ memregion->mapped = ((u8 *) (parent->mapped)) + offset;
+ memregion->requested = FALSE;
+ memregion->overlapped = TRUE;
+ return memregion;
+}
+EXPORT_SYMBOL_GPL(visor_memregion_create_overlapped);
+
+
+static BOOL
+mapit(MEMREGION *memregion)
+{
+ ulong physaddr = (ulong) (memregion->physaddr);
+ ulong nbytes = memregion->nbytes;
+
+ memregion->requested = FALSE;
+ if (!request_mem_region(physaddr, nbytes, MYDRVNAME))
+ ERRDRV("cannot reserve channel memory @0x%lx for 0x%lx-- no big deal", physaddr, nbytes);
+ else
+ memregion->requested = TRUE;
+ memregion->mapped = ioremap_cache(physaddr, nbytes);
+ if (memregion->mapped == NULL) {
+ ERRDRV("cannot ioremap_cache channel memory @0x%lx for 0x%lx",
+ physaddr, nbytes);
+ return FALSE;
+ }
+ return TRUE;
+}
+
+static void
+unmapit(MEMREGION *memregion)
+{
+ if (memregion->mapped != NULL) {
+ iounmap(memregion->mapped);
+ memregion->mapped = NULL;
+ }
+ if (memregion->requested) {
+ release_mem_region((ulong) (memregion->physaddr),
+ memregion->nbytes);
+ memregion->requested = FALSE;
+ }
+}
+
+HOSTADDRESS
+visor_memregion_get_physaddr(MEMREGION *memregion)
+{
+ return memregion->physaddr;
+}
+EXPORT_SYMBOL_GPL(visor_memregion_get_physaddr);
+
+ulong
+visor_memregion_get_nbytes(MEMREGION *memregion)
+{
+ return memregion->nbytes;
+}
+EXPORT_SYMBOL_GPL(visor_memregion_get_nbytes);
+
+void *
+visor_memregion_get_pointer(MEMREGION *memregion)
+{
+ return memregion->mapped;
+}
+EXPORT_SYMBOL_GPL(visor_memregion_get_pointer);
+
+int
+visor_memregion_resize(MEMREGION *memregion, ulong newsize)
+{
+ if (newsize == memregion->nbytes)
+ return 0;
+ if (memregion->overlapped)
+ /* no error check here - we no longer know the
+ * parent's range!
+ */
+ memregion->nbytes = newsize;
+ else {
+ unmapit(memregion);
+ memregion->nbytes = newsize;
+ if (!mapit(memregion))
+ return -1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(visor_memregion_resize);
+
+
+static int
+memregion_readwrite(BOOL is_write,
+ MEMREGION *memregion, ulong offset,
+ void *local, ulong nbytes)
+{
+ if (offset + nbytes > memregion->nbytes) {
+ ERRDRV("memregion_readwrite offset out of range!!");
+ return -EFAULT;
+ }
+ if (is_write)
+ memcpy_toio(memregion->mapped + offset, local, nbytes);
+ else
+ memcpy_fromio(local, memregion->mapped + offset, nbytes);
+
+ return 0;
+}
+
+int
+visor_memregion_read(MEMREGION *memregion, ulong offset, void *dest,
+ ulong nbytes)
+{
+ return memregion_readwrite(FALSE, memregion, offset, dest, nbytes);
+}
+EXPORT_SYMBOL_GPL(visor_memregion_read);
+
+int
+visor_memregion_write(MEMREGION *memregion, ulong offset, void *src,
+ ulong nbytes)
+{
+ return memregion_readwrite(TRUE, memregion, offset, src, nbytes);
+}
+EXPORT_SYMBOL_GPL(visor_memregion_write);
+
+void
+visor_memregion_destroy(MEMREGION *memregion)
+{
+ if (memregion == NULL)
+ return;
+ if (!memregion->overlapped)
+ unmapit(memregion);
+ kfree(memregion);
+}
+EXPORT_SYMBOL_GPL(visor_memregion_destroy);
+
diff --git a/drivers/staging/unisys/visorutil/periodic_work.c b/drivers/staging/unisys/visorutil/periodic_work.c
new file mode 100644
index 000000000000..0670a3174682
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/periodic_work.c
@@ -0,0 +1,236 @@
+/* periodic_work.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Helper functions to schedule periodic work in Linux kernel mode.
+ */
+
+#include "uniklog.h"
+#include "timskmod.h"
+#include "periodic_work.h"
+
+#define MYDRVNAME "periodic_work"
+
+
+
+struct PERIODIC_WORK_Tag {
+ rwlock_t lock;
+ struct delayed_work work;
+ void (*workfunc)(void *);
+ void *workfuncarg;
+ BOOL is_scheduled;
+ BOOL want_to_stop;
+ ulong jiffy_interval;
+ struct workqueue_struct *workqueue;
+ const char *devnam;
+};
+
+
+
+static void periodic_work_func(struct work_struct *work)
+{
+ PERIODIC_WORK *periodic_work =
+ container_of(work, struct PERIODIC_WORK_Tag, work.work);
+ (*periodic_work->workfunc)(periodic_work->workfuncarg);
+}
+
+
+
+PERIODIC_WORK *visor_periodic_work_create(ulong jiffy_interval,
+ struct workqueue_struct *workqueue,
+ void (*workfunc)(void *),
+ void *workfuncarg,
+ const char *devnam)
+{
+ PERIODIC_WORK *periodic_work = kzalloc(sizeof(PERIODIC_WORK),
+ GFP_KERNEL | __GFP_NORETRY);
+ if (periodic_work == NULL) {
+ ERRDRV("periodic_work allocation failed ");
+ return NULL;
+ }
+ rwlock_init(&periodic_work->lock);
+ periodic_work->jiffy_interval = jiffy_interval;
+ periodic_work->workqueue = workqueue;
+ periodic_work->workfunc = workfunc;
+ periodic_work->workfuncarg = workfuncarg;
+ periodic_work->devnam = devnam;
+ return periodic_work;
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_create);
+
+
+
+void visor_periodic_work_destroy(PERIODIC_WORK *periodic_work)
+{
+ if (periodic_work == NULL)
+ return;
+ kfree(periodic_work);
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_destroy);
+
+
+
+/** Call this from your periodic work worker function to schedule the next
+ * call.
+ * If this function returns FALSE, there was a failure and the
+ * periodic work is no longer scheduled
+ */
+BOOL visor_periodic_work_nextperiod(PERIODIC_WORK *periodic_work)
+{
+ BOOL rc = FALSE;
+ write_lock(&periodic_work->lock);
+ if (periodic_work->want_to_stop) {
+ periodic_work->is_scheduled = FALSE;
+ periodic_work->want_to_stop = FALSE;
+ rc = TRUE; /* yes, TRUE; see visor_periodic_work_stop() */
+ goto Away;
+ } else if (queue_delayed_work(periodic_work->workqueue,
+ &periodic_work->work,
+ periodic_work->jiffy_interval) < 0) {
+ ERRDEV(periodic_work->devnam, "queue_delayed_work failed!");
+ periodic_work->is_scheduled = FALSE;
+ rc = FALSE;
+ goto Away;
+ }
+ rc = TRUE;
+Away:
+ write_unlock(&periodic_work->lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_nextperiod);
+
+
+
+/** This function returns TRUE iff new periodic work was actually started.
+ * If this function returns FALSE, then no work was started
+ * (either because it was already started, or because of a failure).
+ */
+BOOL visor_periodic_work_start(PERIODIC_WORK *periodic_work)
+{
+ BOOL rc = FALSE;
+
+ write_lock(&periodic_work->lock);
+ if (periodic_work->is_scheduled) {
+ rc = FALSE;
+ goto Away;
+ }
+ if (periodic_work->want_to_stop) {
+ ERRDEV(periodic_work->devnam,
+ "dev_start_periodic_work failed!");
+ rc = FALSE;
+ goto Away;
+ }
+ INIT_DELAYED_WORK(&periodic_work->work, &periodic_work_func);
+ if (queue_delayed_work(periodic_work->workqueue,
+ &periodic_work->work,
+ periodic_work->jiffy_interval) < 0) {
+ ERRDEV(periodic_work->devnam,
+ "%s queue_delayed_work failed!", __func__);
+ rc = FALSE;
+ goto Away;
+ }
+ periodic_work->is_scheduled = TRUE;
+ rc = TRUE;
+Away:
+ write_unlock(&periodic_work->lock);
+ return rc;
+
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_start);
+
+
+
+
+/** This function returns TRUE iff your call actually stopped the periodic
+ * work.
+ *
+ * -- PAY ATTENTION... this is important --
+ *
+ * NO NO #1
+ *
+ * Do NOT call this function from some function that is running on the
+ * same workqueue as the work you are trying to stop might be running
+ * on! If you violate this rule, visor_periodic_work_stop() MIGHT work,
+ * but it also MIGHT get hung up in an infinite loop saying
+ * "waiting for delayed work...". This will happen if the delayed work
+ * you are trying to cancel has been put in the workqueue list, but can't
+ * run yet because we are running that same workqueue thread right now.
+ *
+ * Bottom line: If you need to call visor_periodic_work_stop() from a
+ * workitem, be sure the workitem is on a DIFFERENT workqueue than the
+ * workitem that you are trying to cancel.
+ *
+ * If I could figure out some way to check for this "no no" condition in
+ * the code, I would. It would have saved me the trouble of writing this
+ * long comment. And also, don't think this is some "theoretical" race
+ * condition. It is REAL, as I have spent the day chasing it.
+ *
+ * NO NO #2
+ *
+ * Take close note of the locks that you own when you call this function.
+ * You must NOT own any locks that are needed by the periodic work
+ * function that is currently installed. If you DO, a deadlock may result,
+ * because stopping the periodic work often involves waiting for the last
+ * iteration of the periodic work function to complete. Again, if you hit
+ * this deadlock, you will get hung up in an infinite loop saying
+ * "waiting for delayed work...".
+ */
+BOOL visor_periodic_work_stop(PERIODIC_WORK *periodic_work)
+{
+ BOOL stopped_something = FALSE;
+
+ write_lock(&periodic_work->lock);
+ stopped_something = periodic_work->is_scheduled &&
+ (!periodic_work->want_to_stop);
+ while (periodic_work->is_scheduled) {
+ periodic_work->want_to_stop = TRUE;
+ if (cancel_delayed_work(&periodic_work->work)) {
+ /* We get here if the delayed work was pending as
+ * delayed work, but was NOT run.
+ */
+ ASSERT(periodic_work->is_scheduled);
+ periodic_work->is_scheduled = FALSE;
+ } else {
+ /* If we get here, either the delayed work:
+ * - was run, OR,
+ * - is running RIGHT NOW on another processor, OR,
+ * - wasn't even scheduled (there is a miniscule
+ * timing window where this could be the case)
+ * flush_workqueue() would make sure it is finished
+ * executing, but that still isn't very useful, which
+ * explains the loop...
+ */
+ }
+ if (periodic_work->is_scheduled) {
+ write_unlock(&periodic_work->lock);
+ WARNDEV(periodic_work->devnam,
+ "waiting for delayed work...");
+ /* We rely on the delayed work function running here,
+ * and eventually calling
+ * visor_periodic_work_nextperiod(),
+ * which will see that want_to_stop is set, and
+ * subsequently clear is_scheduled.
+ */
+ SLEEPJIFFIES(10);
+ write_lock(&periodic_work->lock);
+ } else
+ periodic_work->want_to_stop = FALSE;
+ }
+ write_unlock(&periodic_work->lock);
+ return stopped_something;
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_stop);
diff --git a/drivers/staging/unisys/visorutil/procobjecttree.c b/drivers/staging/unisys/visorutil/procobjecttree.c
new file mode 100644
index 000000000000..67a19e1c7b02
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/procobjecttree.c
@@ -0,0 +1,348 @@
+/* procobjecttree.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include "procobjecttree.h"
+
+#define MYDRVNAME "procobjecttree"
+
+
+
+/** This is context info that we stash in each /proc file entry, which we
+ * need in order to call the callback function that supplies the /proc read
+ * info for that file.
+ */
+typedef struct {
+ void (*show_property)(struct seq_file *, void *, int);
+ MYPROCOBJECT *procObject;
+ int propertyIndex;
+
+} PROCDIRENTRYCONTEXT;
+
+/** This describes the attributes of a tree rooted at
+ * <procDirRoot>/<name[0]>/<name[1]>/...
+ * Properties for each object of this type will be located under
+ * <procDirRoot>/<name[0]>/<name[1]>/.../<objectName>/<propertyName>.
+ */
+struct MYPROCTYPE_Tag {
+ const char **name; /**< node names for this type, ending with NULL */
+ int nNames; /**< num of node names in <name> */
+
+ /** root dir for this type tree in /proc */
+ struct proc_dir_entry *procDirRoot;
+
+ struct proc_dir_entry **procDirs; /**< for each node in <name> */
+
+ /** bottom dir where objects will be rooted; i.e., this is
+ * <procDirRoot>/<name[0]>/<name[1]>/.../, which is the same as the
+ * last entry in the <procDirs> array. */
+ struct proc_dir_entry *procDir;
+
+ /** name for each property that objects of this type can have */
+ const char **propertyNames;
+
+ int nProperties; /**< num of names in <propertyNames> */
+
+ /** Call this, passing MYPROCOBJECT.context and the property index
+ * whenever someone reads the proc entry */
+ void (*show_property)(struct seq_file *, void *, int);
+};
+
+
+
+struct MYPROCOBJECT_Tag {
+ MYPROCTYPE *type;
+
+ /** This is the name of the dir node in /proc under which the
+ * properties of this object will appear as files. */
+ char *name;
+
+ int namesize; /**< number of bytes allocated for name */
+ void *context; /**< passed to MYPROCTYPE.show_property */
+
+ /** <type.procDirRoot>/<type.name[0]>/<type.name[1]>/.../<name> */
+ struct proc_dir_entry *procDir;
+
+ /** a proc dir entry for each of the properties of the object;
+ * properties are identified in MYPROCTYPE.propertyNames, so each of
+ * the <procDirProperties> describes a single file like
+ * <type.procDirRoot>/<type.name[0]>/<type.name[1]>/...
+ * /<name>/<propertyName>
+ */
+ struct proc_dir_entry **procDirProperties;
+
+ /** this is a holding area for the context information that is needed
+ * to run the /proc callback function */
+ PROCDIRENTRYCONTEXT *procDirPropertyContexts;
+};
+
+
+
+static struct proc_dir_entry *
+createProcDir(const char *name, struct proc_dir_entry *parent)
+{
+ struct proc_dir_entry *p = proc_mkdir_mode(name, S_IFDIR, parent);
+ if (p == NULL)
+ ERRDRV("failed to create /proc directory %s", name);
+ return p;
+}
+
+static struct proc_dir_entry *
+createProcFile(const char *name, struct proc_dir_entry *parent,
+ const struct file_operations *fops, void *data)
+{
+ struct proc_dir_entry *p = proc_create_data(name, 0, parent,
+ fops, data);
+ if (p == NULL)
+ ERRDRV("failed to create /proc file %s", name);
+ return p;
+}
+
+static int seq_show(struct seq_file *seq, void *offset);
+static int proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, seq_show, PDE_DATA(inode));
+}
+
+static const struct file_operations proc_fops = {
+ .open = proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+
+MYPROCTYPE *visor_proc_CreateType(struct proc_dir_entry *procDirRoot,
+ const char **name,
+ const char **propertyNames,
+ void (*show_property)(struct seq_file *,
+ void *, int))
+{
+ int i = 0;
+ MYPROCTYPE *rc = NULL, *type = NULL;
+ struct proc_dir_entry *parent = NULL;
+
+ if (procDirRoot == NULL) {
+ ERRDRV("procDirRoot cannot be NULL!\n");
+ goto Away;
+ }
+ if (name == NULL || name[0] == NULL) {
+ ERRDRV("name must contain at least 1 node name!\n");
+ goto Away;
+ }
+ type = kzalloc(sizeof(MYPROCTYPE), GFP_KERNEL | __GFP_NORETRY);
+ if (type == NULL) {
+ ERRDRV("out of memory\n");
+ goto Away;
+ }
+ type->name = name;
+ type->propertyNames = propertyNames;
+ type->nProperties = 0;
+ type->nNames = 0;
+ type->show_property = show_property;
+ type->procDirRoot = procDirRoot;
+ if (type->propertyNames != NULL)
+ while (type->propertyNames[type->nProperties] != NULL)
+ type->nProperties++;
+ while (type->name[type->nNames] != NULL)
+ type->nNames++;
+ type->procDirs = kzalloc((type->nNames + 1) *
+ sizeof(struct proc_dir_entry *),
+ GFP_KERNEL | __GFP_NORETRY);
+ if (type->procDirs == NULL) {
+ ERRDRV("out of memory\n");
+ goto Away;
+ }
+ parent = procDirRoot;
+ for (i = 0; i < type->nNames; i++) {
+ type->procDirs[i] = createProcDir(type->name[i], parent);
+ if (type->procDirs[i] == NULL) {
+ rc = NULL;
+ goto Away;
+ }
+ parent = type->procDirs[i];
+ }
+ type->procDir = type->procDirs[type->nNames-1];
+ rc = type;
+Away:
+ if (rc == NULL) {
+ if (type != NULL) {
+ visor_proc_DestroyType(type);
+ type = NULL;
+ }
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_proc_CreateType);
+
+
+
+void visor_proc_DestroyType(MYPROCTYPE *type)
+{
+ if (type == NULL)
+ return;
+ if (type->procDirs != NULL) {
+ int i = type->nNames-1;
+ while (i >= 0) {
+ if (type->procDirs[i] != NULL) {
+ struct proc_dir_entry *parent = NULL;
+ if (i == 0)
+ parent = type->procDirRoot;
+ else
+ parent = type->procDirs[i-1];
+ remove_proc_entry(type->name[i], parent);
+ }
+ i--;
+ }
+ kfree(type->procDirs);
+ type->procDirs = NULL;
+ }
+ kfree(type);
+}
+EXPORT_SYMBOL_GPL(visor_proc_DestroyType);
+
+
+
+MYPROCOBJECT *visor_proc_CreateObject(MYPROCTYPE *type,
+ const char *name, void *context)
+{
+ MYPROCOBJECT *obj = NULL, *rc = NULL;
+ int i = 0;
+
+ if (type == NULL) {
+ ERRDRV("type cannot be NULL\n");
+ goto Away;
+ }
+ obj = kzalloc(sizeof(MYPROCOBJECT), GFP_KERNEL | __GFP_NORETRY);
+ if (obj == NULL) {
+ ERRDRV("out of memory\n");
+ goto Away;
+ }
+ obj->type = type;
+ obj->context = context;
+ if (name == NULL) {
+ obj->name = NULL;
+ obj->procDir = type->procDir;
+ } else {
+ obj->namesize = strlen(name)+1;
+ obj->name = kmalloc(obj->namesize, GFP_KERNEL | __GFP_NORETRY);
+ if (obj->name == NULL) {
+ obj->namesize = 0;
+ ERRDRV("out of memory\n");
+ goto Away;
+ }
+ strcpy(obj->name, name);
+ obj->procDir = createProcDir(obj->name, type->procDir);
+ if (obj->procDir == NULL) {
+ goto Away;
+ }
+ }
+ obj->procDirPropertyContexts =
+ kzalloc((type->nProperties + 1) * sizeof(PROCDIRENTRYCONTEXT),
+ GFP_KERNEL | __GFP_NORETRY);
+ if (obj->procDirPropertyContexts == NULL) {
+ ERRDRV("out of memory\n");
+ goto Away;
+ }
+ obj->procDirProperties =
+ kzalloc((type->nProperties + 1) * sizeof(struct proc_dir_entry *),
+ GFP_KERNEL | __GFP_NORETRY);
+ if (obj->procDirProperties == NULL) {
+ ERRDRV("out of memory\n");
+ goto Away;
+ }
+ for (i = 0; i < type->nProperties; i++) {
+ obj->procDirPropertyContexts[i].procObject = obj;
+ obj->procDirPropertyContexts[i].propertyIndex = i;
+ obj->procDirPropertyContexts[i].show_property =
+ type->show_property;
+ if (type->propertyNames[i][0] != '\0') {
+ /* only create properties that have names */
+ obj->procDirProperties[i] =
+ createProcFile(type->propertyNames[i],
+ obj->procDir, &proc_fops,
+ &obj->procDirPropertyContexts[i]);
+ if (obj->procDirProperties[i] == NULL) {
+ rc = NULL;
+ goto Away;
+ }
+ }
+ }
+ rc = obj;
+Away:
+ if (rc == NULL) {
+ if (obj != NULL) {
+ visor_proc_DestroyObject(obj);
+ obj = NULL;
+ }
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_proc_CreateObject);
+
+
+
+void visor_proc_DestroyObject(MYPROCOBJECT *obj)
+{
+ MYPROCTYPE *type = NULL;
+ if (obj == NULL)
+ return;
+ type = obj->type;
+ if (type == NULL)
+ return;
+ if (obj->procDirProperties != NULL) {
+ int i = 0;
+ for (i = 0; i < type->nProperties; i++) {
+ if (obj->procDirProperties[i] != NULL) {
+ remove_proc_entry(type->propertyNames[i],
+ obj->procDir);
+ obj->procDirProperties[i] = NULL;
+ }
+ }
+ kfree(obj->procDirProperties);
+ obj->procDirProperties = NULL;
+ }
+ if (obj->procDirPropertyContexts != NULL) {
+ kfree(obj->procDirPropertyContexts);
+ obj->procDirPropertyContexts = NULL;
+ }
+ if (obj->procDir != NULL) {
+ if (obj->name != NULL)
+ remove_proc_entry(obj->name, type->procDir);
+ obj->procDir = NULL;
+ }
+ if (obj->name != NULL) {
+ kfree(obj->name);
+ obj->name = NULL;
+ }
+ kfree(obj);
+}
+EXPORT_SYMBOL_GPL(visor_proc_DestroyObject);
+
+
+
+static int seq_show(struct seq_file *seq, void *offset)
+{
+ PROCDIRENTRYCONTEXT *ctx = (PROCDIRENTRYCONTEXT *)(seq->private);
+ if (ctx == NULL) {
+ ERRDRV("I don't have a freakin' clue...");
+ return 0;
+ }
+ (*ctx->show_property)(seq, ctx->procObject->context,
+ ctx->propertyIndex);
+ return 0;
+}
diff --git a/drivers/staging/unisys/visorutil/visorkmodutils.c b/drivers/staging/unisys/visorutil/visorkmodutils.c
new file mode 100644
index 000000000000..a7d1e94ca3c3
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/visorkmodutils.c
@@ -0,0 +1,71 @@
+/* timskmodutils.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include "uniklog.h"
+#include "timskmod.h"
+
+#define MYDRVNAME "timskmodutils"
+
+/** Callers to interfaces that set __GFP_NORETRY flag below
+ * must check for a NULL (error) result as we are telling the
+ * kernel interface that it is okay to fail.
+ */
+
+void *kmalloc_kernel(size_t siz)
+{
+ return kmalloc(siz, GFP_KERNEL | __GFP_NORETRY);
+}
+
+/* Use these handy-dandy seq_file_xxx functions if you want to call some
+ * functions that write stuff into a seq_file, but you actually just want
+ * to dump that output into a buffer. Use them as follows:
+ * - call visor_seq_file_new_buffer to create the seq_file (you supply the buf)
+ * - call whatever functions you want that take a seq_file as an argument
+ * (the buf you supplied will get the output data)
+ * - call visor_seq_file_done_buffer to dispose of your seq_file
+ */
+struct seq_file *visor_seq_file_new_buffer(void *buf, size_t buf_size)
+{
+ struct seq_file *rc = NULL;
+ struct seq_file *m = kmalloc_kernel(sizeof(struct seq_file));
+
+ if (m == NULL) {
+ rc = NULL;
+ goto Away;
+ }
+ memset(m, 0, sizeof(struct seq_file));
+ m->buf = buf;
+ m->size = buf_size;
+ rc = m;
+Away:
+ if (rc == NULL) {
+ visor_seq_file_done_buffer(m);
+ m = NULL;
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_seq_file_new_buffer);
+
+
+
+void visor_seq_file_done_buffer(struct seq_file *m)
+{
+ if (!m)
+ return;
+ kfree(m);
+}
+EXPORT_SYMBOL_GPL(visor_seq_file_done_buffer);
diff --git a/drivers/staging/usbip/Kconfig b/drivers/staging/usbip/Kconfig
index 886000980474..bd99e9e47e50 100644
--- a/drivers/staging/usbip/Kconfig
+++ b/drivers/staging/usbip/Kconfig
@@ -1,7 +1,6 @@
config USBIP_CORE
tristate "USB/IP support"
depends on USB && NET
- default N
---help---
This enables pushing USB packets over IP to allow remote
machines direct access to USB devices. It provides the
@@ -18,7 +17,6 @@ config USBIP_CORE
config USBIP_VHCI_HCD
tristate "VHCI hcd"
depends on USBIP_CORE
- default N
---help---
This enables the USB/IP virtual host controller driver,
which is run on the remote machine.
@@ -29,7 +27,6 @@ config USBIP_VHCI_HCD
config USBIP_HOST
tristate "Host driver"
depends on USBIP_CORE
- default N
---help---
This enables the USB/IP host driver, which is run on the
machine that is sharing the USB devices.
@@ -40,6 +37,5 @@ config USBIP_HOST
config USBIP_DEBUG
bool "Debug messages for USB/IP"
depends on USBIP_CORE
- default N
---help---
This enables the debug messages from the USB/IP drivers.
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index a73e437ec215..266e2b0ce9a8 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -86,6 +86,7 @@ struct bus_id_priv {
char status;
int interf_count;
struct stub_device *sdev;
+ struct usb_device *udev;
char shutdown_busid;
};
@@ -93,7 +94,7 @@ struct bus_id_priv {
extern struct kmem_cache *stub_priv_cache;
/* stub_dev.c */
-extern struct usb_driver stub_driver;
+extern struct usb_device_driver stub_driver;
/* stub_main.c */
struct bus_id_priv *get_busid_priv(const char *busid);
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 2e2ccefb9c2b..de692d7011a5 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -86,13 +86,16 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
struct stub_device *sdev = dev_get_drvdata(dev);
int sockfd = 0;
struct socket *socket;
+ int rv;
if (!sdev) {
dev_err(dev, "sdev is null\n");
return -ENODEV;
}
- sscanf(buf, "%d", &sockfd);
+ rv = sscanf(buf, "%d", &sockfd);
+ if (rv != 1)
+ return -EINVAL;
if (sockfd != -1) {
int err;
@@ -279,21 +282,19 @@ static void stub_device_unusable(struct usbip_device *ud)
*
* Allocates and initializes a new stub_device struct.
*/
-static struct stub_device *stub_device_alloc(struct usb_device *udev,
- struct usb_interface *interface)
+static struct stub_device *stub_device_alloc(struct usb_device *udev)
{
struct stub_device *sdev;
- int busnum = interface_to_busnum(interface);
- int devnum = interface_to_devnum(interface);
+ int busnum = udev->bus->busnum;
+ int devnum = udev->devnum;
- dev_dbg(&interface->dev, "allocating stub device");
+ dev_dbg(&udev->dev, "allocating stub device");
/* yes, it's a new device */
sdev = kzalloc(sizeof(struct stub_device), GFP_KERNEL);
if (!sdev)
return NULL;
- sdev->interface = usb_get_intf(interface);
sdev->udev = usb_get_dev(udev);
/*
@@ -322,7 +323,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev,
usbip_start_eh(&sdev->ud);
- dev_dbg(&interface->dev, "register new interface\n");
+ dev_dbg(&udev->dev, "register new device\n");
return sdev;
}
@@ -332,32 +333,21 @@ static void stub_device_free(struct stub_device *sdev)
kfree(sdev);
}
-/*
- * If a usb device has multiple active interfaces, this driver is bound to all
- * the active interfaces. However, usbip exports *a* usb device (i.e., not *an*
- * active interface). Currently, a userland program must ensure that it
- * looks at the usbip's sysfs entries of only the first active interface.
- *
- * TODO: use "struct usb_device_driver" to bind a usb device.
- * However, it seems it is not fully supported in mainline kernel yet
- * (2.6.19.2).
- */
-static int stub_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
+static int stub_probe(struct usb_device *udev)
{
- struct usb_device *udev = interface_to_usbdev(interface);
struct stub_device *sdev = NULL;
- const char *udev_busid = dev_name(interface->dev.parent);
+ const char *udev_busid = dev_name(&udev->dev);
int err = 0;
struct bus_id_priv *busid_priv;
+ int rc;
- dev_dbg(&interface->dev, "Enter\n");
+ dev_dbg(&udev->dev, "Enter\n");
/* check we should claim or not by busid_table */
busid_priv = get_busid_priv(udev_busid);
if (!busid_priv || (busid_priv->status == STUB_BUSID_REMOV) ||
(busid_priv->status == STUB_BUSID_OTHER)) {
- dev_info(&interface->dev,
+ dev_info(&udev->dev,
"%s is not in match_busid table... skip!\n",
udev_busid);
@@ -383,60 +373,41 @@ static int stub_probe(struct usb_interface *interface,
return -ENODEV;
}
- if (busid_priv->status == STUB_BUSID_ALLOC) {
- sdev = busid_priv->sdev;
- if (!sdev)
- return -ENODEV;
-
- busid_priv->interf_count++;
- dev_info(&interface->dev,
- "usbip-host: register new interface (bus %u dev %u ifn %u)\n",
- udev->bus->busnum, udev->devnum,
- interface->cur_altsetting->desc.bInterfaceNumber);
-
- /* set private data to usb_interface */
- usb_set_intfdata(interface, sdev);
-
- err = stub_add_files(&interface->dev);
- if (err) {
- dev_err(&interface->dev, "stub_add_files for %s\n",
- udev_busid);
- usb_set_intfdata(interface, NULL);
- busid_priv->interf_count--;
- return err;
- }
-
- usb_get_intf(interface);
- return 0;
- }
-
/* ok, this is my device */
- sdev = stub_device_alloc(udev, interface);
+ sdev = stub_device_alloc(udev);
if (!sdev)
return -ENOMEM;
- dev_info(&interface->dev,
- "usbip-host: register new device (bus %u dev %u ifn %u)\n",
- udev->bus->busnum, udev->devnum,
- interface->cur_altsetting->desc.bInterfaceNumber);
+ dev_info(&udev->dev,
+ "usbip-host: register new device (bus %u dev %u)\n",
+ udev->bus->busnum, udev->devnum);
- busid_priv->interf_count = 0;
busid_priv->shutdown_busid = 0;
- /* set private data to usb_interface */
- usb_set_intfdata(interface, sdev);
- busid_priv->interf_count++;
+ /* set private data to usb_device */
+ dev_set_drvdata(&udev->dev, sdev);
busid_priv->sdev = sdev;
+ busid_priv->udev = udev;
+
+ /*
+ * Claim this hub port.
+ * It doesn't matter what value we pass as owner
+ * (struct dev_state) as long as it is unique.
+ */
+ rc = usb_hub_claim_port(udev->parent, udev->portnum,
+ (struct usb_dev_state *) udev);
+ if (rc) {
+ dev_dbg(&udev->dev, "unable to claim port\n");
+ return rc;
+ }
- err = stub_add_files(&interface->dev);
+ err = stub_add_files(&udev->dev);
if (err) {
- dev_err(&interface->dev, "stub_add_files for %s\n", udev_busid);
- usb_set_intfdata(interface, NULL);
- usb_put_intf(interface);
+ dev_err(&udev->dev, "stub_add_files for %s\n", udev_busid);
+ dev_set_drvdata(&udev->dev, NULL);
usb_put_dev(udev);
kthread_stop_put(sdev->ud.eh);
- busid_priv->interf_count = 0;
busid_priv->sdev = NULL;
stub_device_free(sdev);
return err;
@@ -461,13 +432,14 @@ static void shutdown_busid(struct bus_id_priv *busid_priv)
* called in usb_disconnect() or usb_deregister()
* but only if actconfig(active configuration) exists
*/
-static void stub_disconnect(struct usb_interface *interface)
+static void stub_disconnect(struct usb_device *udev)
{
struct stub_device *sdev;
- const char *udev_busid = dev_name(interface->dev.parent);
+ const char *udev_busid = dev_name(&udev->dev);
struct bus_id_priv *busid_priv;
+ int rc;
- dev_dbg(&interface->dev, "Enter\n");
+ dev_dbg(&udev->dev, "Enter\n");
busid_priv = get_busid_priv(udev_busid);
if (!busid_priv) {
@@ -475,41 +447,37 @@ static void stub_disconnect(struct usb_interface *interface)
return;
}
- sdev = usb_get_intfdata(interface);
+ sdev = dev_get_drvdata(&udev->dev);
/* get stub_device */
if (!sdev) {
- dev_err(&interface->dev, "could not get device");
+ dev_err(&udev->dev, "could not get device");
return;
}
- usb_set_intfdata(interface, NULL);
+ dev_set_drvdata(&udev->dev, NULL);
/*
* NOTE: rx/tx threads are invoked for each usb_device.
*/
- stub_remove_files(&interface->dev);
+ stub_remove_files(&udev->dev);
- /* If usb reset is called from event handler */
- if (busid_priv->sdev->ud.eh == current) {
- busid_priv->interf_count--;
+ /* release port */
+ rc = usb_hub_release_port(udev->parent, udev->portnum,
+ (struct usb_dev_state *) udev);
+ if (rc) {
+ dev_dbg(&udev->dev, "unable to release port\n");
return;
}
- if (busid_priv->interf_count > 1) {
- busid_priv->interf_count--;
- shutdown_busid(busid_priv);
- usb_put_intf(interface);
+ /* If usb reset is called from event handler */
+ if (busid_priv->sdev->ud.eh == current)
return;
- }
-
- busid_priv->interf_count = 0;
/* shutdown the current connection */
shutdown_busid(busid_priv);
usb_put_dev(sdev->udev);
- usb_put_intf(interface);
/* free sdev */
busid_priv->sdev = NULL;
@@ -523,28 +491,34 @@ static void stub_disconnect(struct usb_interface *interface)
}
}
-/*
- * Presence of pre_reset and post_reset prevents the driver from being unbound
- * when the device is being reset
- */
+#ifdef CONFIG_PM
-static int stub_pre_reset(struct usb_interface *interface)
+/* These functions need usb_port_suspend and usb_port_resume,
+ * which reside in drivers/usb/core/usb.h. Skip for now. */
+
+static int stub_suspend(struct usb_device *udev, pm_message_t message)
{
- dev_dbg(&interface->dev, "pre_reset\n");
+ dev_dbg(&udev->dev, "stub_suspend\n");
+
return 0;
}
-static int stub_post_reset(struct usb_interface *interface)
+static int stub_resume(struct usb_device *udev, pm_message_t message)
{
- dev_dbg(&interface->dev, "post_reset\n");
+ dev_dbg(&udev->dev, "stub_resume\n");
+
return 0;
}
-struct usb_driver stub_driver = {
+#endif /* CONFIG_PM */
+
+struct usb_device_driver stub_driver = {
.name = "usbip-host",
.probe = stub_probe,
.disconnect = stub_disconnect,
- .id_table = stub_table,
- .pre_reset = stub_pre_reset,
- .post_reset = stub_post_reset,
+#ifdef CONFIG_PM
+ .suspend = stub_suspend,
+ .resume = stub_resume,
+#endif
+ .supports_autosuspend = 0,
};
diff --git a/drivers/staging/usbip/stub_main.c b/drivers/staging/usbip/stub_main.c
index baf857f7cc88..9c5832abbdf1 100644
--- a/drivers/staging/usbip/stub_main.c
+++ b/drivers/staging/usbip/stub_main.c
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/module.h>
+#include <linux/device.h>
#include "usbip_common.h"
#include "stub.h"
@@ -187,6 +188,34 @@ static ssize_t store_match_busid(struct device_driver *dev, const char *buf,
static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid,
store_match_busid);
+static ssize_t rebind_store(struct device_driver *dev, const char *buf,
+ size_t count)
+{
+ int ret;
+ int len;
+ struct bus_id_priv *bid;
+
+ /* buf length should be less that BUSID_SIZE */
+ len = strnlen(buf, BUSID_SIZE);
+
+ if (!(len < BUSID_SIZE))
+ return -EINVAL;
+
+ bid = get_busid_priv(buf);
+ if (!bid)
+ return -ENODEV;
+
+ ret = device_attach(&bid->udev->dev);
+ if (ret < 0) {
+ dev_err(&bid->udev->dev, "rebind failed\n");
+ return ret;
+ }
+
+ return count;
+}
+
+static DRIVER_ATTR_WO(rebind);
+
static struct stub_priv *stub_priv_pop_from_listhead(struct list_head *listhead)
{
struct stub_priv *priv, *tmp;
@@ -254,7 +283,7 @@ static int __init usbip_host_init(void)
return -ENOMEM;
}
- ret = usb_register(&stub_driver);
+ ret = usb_register_device_driver(&stub_driver, THIS_MODULE);
if (ret) {
pr_err("usb_register failed %d\n", ret);
goto err_usb_register;
@@ -267,11 +296,18 @@ static int __init usbip_host_init(void)
goto err_create_file;
}
+ ret = driver_create_file(&stub_driver.drvwrap.driver,
+ &driver_attr_rebind);
+ if (ret) {
+ pr_err("driver_create_file failed\n");
+ goto err_create_file;
+ }
+
pr_info(DRIVER_DESC " v" USBIP_VERSION "\n");
return ret;
err_create_file:
- usb_deregister(&stub_driver);
+ usb_deregister_device_driver(&stub_driver);
err_usb_register:
kmem_cache_destroy(stub_priv_cache);
return ret;
@@ -282,11 +318,14 @@ static void __exit usbip_host_exit(void)
driver_remove_file(&stub_driver.drvwrap.driver,
&driver_attr_match_busid);
+ driver_remove_file(&stub_driver.drvwrap.driver,
+ &driver_attr_rebind);
+
/*
* deregister() calls stub_disconnect() for all devices. Device
* specific data is cleared in stub_disconnect().
*/
- usb_deregister(&stub_driver);
+ usb_deregister_device_driver(&stub_driver);
kmem_cache_destroy(stub_priv_cache);
}
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 5d1d4a183300..e0b6d6b42728 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -142,31 +142,19 @@ static int tweak_set_interface_cmd(struct urb *urb)
static int tweak_set_configuration_cmd(struct urb *urb)
{
+ struct stub_priv *priv = (struct stub_priv *) urb->context;
+ struct stub_device *sdev = priv->sdev;
struct usb_ctrlrequest *req;
__u16 config;
+ int err;
req = (struct usb_ctrlrequest *) urb->setup_packet;
config = le16_to_cpu(req->wValue);
- /*
- * I have never seen a multi-config device. Very rare.
- * For most devices, this will be called to choose a default
- * configuration only once in an initialization phase.
- *
- * set_configuration may change a device configuration and its device
- * drivers will be unbound and assigned for a new device configuration.
- * This means this usbip driver will be also unbound when called, then
- * eventually reassigned to the device as far as driver matching
- * condition is kept.
- *
- * Unfortunately, an existing usbip connection will be dropped
- * due to this driver unbinding. So, skip here.
- * A user may need to set a special configuration value before
- * exporting the device.
- */
- dev_info(&urb->dev->dev, "usb_set_configuration %d to %s... skip!\n",
- config, dev_name(&urb->dev->dev));
-
+ err = usb_set_configuration(sdev->udev, config);
+ if (err && err != -ENODEV)
+ dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
+ config, err);
return 0;
}
@@ -550,7 +538,7 @@ static void stub_rx_pdu(struct usbip_device *ud)
int ret;
struct usbip_header pdu;
struct stub_device *sdev = container_of(ud, struct stub_device, ud);
- struct device *dev = &sdev->interface->dev;
+ struct device *dev = &sdev->udev->dev;
usbip_dbg_stub_rx("Enter\n");
diff --git a/drivers/staging/usbip/stub_tx.c b/drivers/staging/usbip/stub_tx.c
index cd5326ae38cc..1622563823a3 100644
--- a/drivers/staging/usbip/stub_tx.c
+++ b/drivers/staging/usbip/stub_tx.c
@@ -74,12 +74,12 @@ void stub_complete(struct urb *urb)
/* OK */
break;
case -ENOENT:
- dev_info(&urb->dev->dev, "stopped by a call to usb_kill_urb() "
- "because of cleaning up a virtual connection\n");
+ dev_info(&urb->dev->dev,
+ "stopped by a call to usb_kill_urb() because of cleaning up a virtual connection\n");
return;
case -ECONNRESET:
- dev_info(&urb->dev->dev, "unlinked by a call to "
- "usb_unlink_urb()\n");
+ dev_info(&urb->dev->dev,
+ "unlinked by a call to usb_unlink_urb()\n");
break;
case -EPIPE:
dev_info(&urb->dev->dev, "endpoint %d is stalled\n",
@@ -89,8 +89,9 @@ void stub_complete(struct urb *urb)
dev_info(&urb->dev->dev, "device removed?\n");
break;
default:
- dev_info(&urb->dev->dev, "urb completion with non-zero status "
- "%d\n", urb->status);
+ dev_info(&urb->dev->dev,
+ "urb completion with non-zero status %d\n",
+ urb->status);
break;
}
@@ -228,8 +229,7 @@ static int stub_send_ret_submit(struct stub_device *sdev)
if (txsize != sizeof(pdu_header) + urb->actual_length) {
dev_err(&sdev->interface->dev,
- "actual length of urb %d does not "
- "match iso packet sizes %zu\n",
+ "actual length of urb %d does not match iso packet sizes %zu\n",
urb->actual_length,
txsize-sizeof(pdu_header));
kfree(iov);
diff --git a/drivers/staging/usbip/uapi/usbip.h b/drivers/staging/usbip/uapi/usbip.h
new file mode 100644
index 000000000000..fa5db30ede36
--- /dev/null
+++ b/drivers/staging/usbip/uapi/usbip.h
@@ -0,0 +1,26 @@
+/*
+ * usbip.h
+ *
+ * USBIP uapi defines and function prototypes etc.
+*/
+
+#ifndef _UAPI_LINUX_USBIP_H
+#define _UAPI_LINUX_USBIP_H
+
+/* usbip device status - exported in usbip device sysfs status */
+enum usbip_device_status {
+ /* sdev is available. */
+ SDEV_ST_AVAILABLE = 0x01,
+ /* sdev is now used. */
+ SDEV_ST_USED,
+ /* sdev is unusable because of a fatal error. */
+ SDEV_ST_ERROR,
+
+ /* vdev does not connect a remote device. */
+ VDEV_ST_NULL,
+ /* vdev is used, but the USB address is not assigned yet */
+ VDEV_ST_NOTASSIGNED,
+ VDEV_ST_USED,
+ VDEV_ST_ERROR
+};
+#endif /* _UAPI_LINUX_USBIP_H */
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index e010939ebb12..facaaf003f19 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -55,7 +55,8 @@ static ssize_t usbip_debug_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- sscanf(buf, "%lx", &usbip_debug_flag);
+ if (sscanf(buf, "%lx", &usbip_debug_flag) != 1)
+ return -EINVAL;
return count;
}
DEVICE_ATTR_RW(usbip_debug);
@@ -99,26 +100,8 @@ static void usbip_dump_usb_device(struct usb_device *udev)
struct device *dev = &udev->dev;
int i;
- dev_dbg(dev, " devnum(%d) devpath(%s) ",
- udev->devnum, udev->devpath);
-
- switch (udev->speed) {
- case USB_SPEED_HIGH:
- pr_debug("SPD_HIGH ");
- break;
- case USB_SPEED_FULL:
- pr_debug("SPD_FULL ");
- break;
- case USB_SPEED_LOW:
- pr_debug("SPD_LOW ");
- break;
- case USB_SPEED_UNKNOWN:
- pr_debug("SPD_UNKNOWN ");
- break;
- default:
- pr_debug("SPD_ERROR ");
- break;
- }
+ dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)",
+ udev->devnum, udev->devpath, usb_speed_string(udev->speed));
pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport);
@@ -195,8 +178,8 @@ static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd)
}
pr_debug(" ");
- pr_debug("bRequestType(%02X) bRequest(%02X) wValue(%04X) wIndex(%04X) "
- "wLength(%04X) ", cmd->bRequestType, cmd->bRequest,
+ pr_debug("bRequestType(%02X) bRequest(%02X) wValue(%04X) wIndex(%04X) wLength(%04X) ",
+ cmd->bRequestType, cmd->bRequest,
cmd->wValue, cmd->wIndex, cmd->wLength);
pr_debug("\n ");
@@ -307,8 +290,7 @@ void usbip_dump_header(struct usbip_header *pdu)
switch (pdu->base.command) {
case USBIP_CMD_SUBMIT:
- pr_debug("USBIP_CMD_SUBMIT: "
- "x_flags %u x_len %u sf %u #p %d iv %d\n",
+ pr_debug("USBIP_CMD_SUBMIT: x_flags %u x_len %u sf %u #p %d iv %d\n",
pdu->u.cmd_submit.transfer_flags,
pdu->u.cmd_submit.transfer_buffer_length,
pdu->u.cmd_submit.start_frame,
@@ -680,8 +662,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
if (total_length != urb->actual_length) {
dev_err(&urb->dev->dev,
- "total length of iso packets %d not equal to actual "
- "length of buffer %d\n",
+ "total length of iso packets %d not equal to actual length of buffer %d\n",
total_length, urb->actual_length);
if (ud->side == USBIP_STUB)
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index 9f86588a4534..f555d834f134 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include <linux/usb.h>
#include <linux/wait.h>
+#include "uapi/usbip.h"
#define USBIP_VERSION "1.0.0"
@@ -235,22 +236,6 @@ enum usbip_side {
USBIP_STUB,
};
-enum usbip_status {
- /* sdev is available. */
- SDEV_ST_AVAILABLE = 0x01,
- /* sdev is now used. */
- SDEV_ST_USED,
- /* sdev is unusable because of a fatal error. */
- SDEV_ST_ERROR,
-
- /* vdev does not connect a remote device. */
- VDEV_ST_NULL,
- /* vdev is used, but the USB address is not assigned yet */
- VDEV_ST_NOTASSIGNED,
- VDEV_ST_USED,
- VDEV_ST_ERROR
-};
-
/* event handler */
#define USBIP_EH_SHUTDOWN (1 << 0)
#define USBIP_EH_BYE (1 << 1)
@@ -271,7 +256,7 @@ enum usbip_status {
/* a common structure for stub_device and vhci_device */
struct usbip_device {
enum usbip_side side;
- enum usbip_status status;
+ enum usbip_device_status status;
/* lock for status */
spinlock_t lock;
diff --git a/drivers/staging/usbip/userspace/README b/drivers/staging/usbip/userspace/README
index 00a1658b272b..f528ba462b5f 100644
--- a/drivers/staging/usbip/userspace/README
+++ b/drivers/staging/usbip/userspace/README
@@ -9,8 +9,8 @@
- USB/IP device drivers
Found in the staging directory of the Linux kernel.
- - sysfsutils >= 2.0.0
- sysfsutils library
+ - libudev >= 2.0
+ libudev library
- libwrap0-dev
tcp wrapper library
@@ -19,6 +19,10 @@
- libtool, automake >= 1.9, autoconf >= 2.5.0, pkg-config
+[Optional]
+ - hwdata
+ Contains USB device identification data.
+
[Install]
0. Generate configuration scripts.
diff --git a/drivers/staging/usbip/userspace/configure.ac b/drivers/staging/usbip/userspace/configure.ac
index 0ee5d9263996..607d05c5ccfd 100644
--- a/drivers/staging/usbip/userspace/configure.ac
+++ b/drivers/staging/usbip/userspace/configure.ac
@@ -1,7 +1,7 @@
dnl Process this file with autoconf to produce a configure script.
AC_PREREQ(2.59)
-AC_INIT([usbip-utils], [1.1.1], [linux-usb@vger.kernel.org])
+AC_INIT([usbip-utils], [2.0], [linux-usb@vger.kernel.org])
AC_DEFINE([USBIP_VERSION], [0x00000111], [binary-coded decimal version number])
CURRENT=0
@@ -44,11 +44,11 @@ AC_FUNC_REALLOC
AC_CHECK_FUNCS([memset mkdir regcomp socket strchr strerror strstr dnl
strtoul])
-AC_CHECK_HEADER([sysfs/libsysfs.h],
- [AC_CHECK_LIB([sysfs], [sysfs_open_directory_list],
- [LIBS="$LIBS -lsysfs"],
- [AC_MSG_ERROR([Missing sysfs2 library!])])],
- [AC_MSG_ERROR([Missing /usr/include/sysfs/libsysfs.h])])
+AC_CHECK_HEADER([libudev.h],
+ [AC_CHECK_LIB([udev], [udev_new],
+ [LIBS="$LIBS -ludev"],
+ [AC_MSG_ERROR([Missing udev library!])])],
+ [AC_MSG_ERROR([Missing /usr/include/libudev.h])])
# Checks for libwrap library.
AC_MSG_CHECKING([whether to use the libwrap (TCP wrappers) library])
diff --git a/drivers/staging/usbip/userspace/libsrc/Makefile.am b/drivers/staging/usbip/userspace/libsrc/Makefile.am
index 4921189e0260..7c8f8a4d54e4 100644
--- a/drivers/staging/usbip/userspace/libsrc/Makefile.am
+++ b/drivers/staging/usbip/userspace/libsrc/Makefile.am
@@ -4,4 +4,5 @@ libusbip_la_LDFLAGS = -version-info @LIBUSBIP_VERSION@
lib_LTLIBRARIES := libusbip.la
libusbip_la_SOURCES := names.c names.h usbip_host_driver.c usbip_host_driver.h \
- usbip_common.c usbip_common.h vhci_driver.c vhci_driver.h
+ usbip_common.c usbip_common.h vhci_driver.c vhci_driver.h \
+ sysfs_utils.c sysfs_utils.h
diff --git a/drivers/staging/usbip/userspace/libsrc/list.h b/drivers/staging/usbip/userspace/libsrc/list.h
new file mode 100644
index 000000000000..8d0c936e184f
--- /dev/null
+++ b/drivers/staging/usbip/userspace/libsrc/list.h
@@ -0,0 +1,136 @@
+#ifndef _LIST_H
+#define _LIST_H
+
+/* Stripped down implementation of linked list taken
+ * from the Linux Kernel.
+ */
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+static inline void INIT_LIST_HEAD(struct list_head *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_del(struct list_head * prev, struct list_head * next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+#define POISON_POINTER_DELTA 0
+#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty() on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+static inline void __list_del_entry(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+static inline void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = LIST_POISON1;
+ entry->prev = LIST_POISON2;
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ * list_for_each_safe - iterate over a list safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+#endif
diff --git a/drivers/staging/usbip/userspace/libsrc/sysfs_utils.c b/drivers/staging/usbip/userspace/libsrc/sysfs_utils.c
new file mode 100644
index 000000000000..36ac88ece0b8
--- /dev/null
+++ b/drivers/staging/usbip/userspace/libsrc/sysfs_utils.c
@@ -0,0 +1,31 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include "sysfs_utils.h"
+#include "usbip_common.h"
+
+int write_sysfs_attribute(const char *attr_path, const char *new_value,
+ size_t len)
+{
+ int fd;
+ int length;
+
+ fd = open(attr_path, O_WRONLY);
+ if (fd < 0) {
+ dbg("error opening attribute %s", attr_path);
+ return -1;
+ }
+
+ length = write(fd, new_value, len);
+ if (length < 0) {
+ dbg("error writing to attribute %s", attr_path);
+ close(fd);
+ return -1;
+ }
+
+ close(fd);
+
+ return 0;
+}
diff --git a/drivers/staging/usbip/userspace/libsrc/sysfs_utils.h b/drivers/staging/usbip/userspace/libsrc/sysfs_utils.h
new file mode 100644
index 000000000000..32ac1d105d18
--- /dev/null
+++ b/drivers/staging/usbip/userspace/libsrc/sysfs_utils.h
@@ -0,0 +1,8 @@
+
+#ifndef __SYSFS_UTILS_H
+#define __SYSFS_UTILS_H
+
+int write_sysfs_attribute(const char *attr_path, const char *new_value,
+ size_t len);
+
+#endif
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_common.c b/drivers/staging/usbip/userspace/libsrc/usbip_common.c
index 66f03cc62ac6..238bf5bf7f4c 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_common.c
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_common.c
@@ -2,6 +2,7 @@
* Copyright (C) 2005-2007 Takahiro Hirofuchi
*/
+#include <libudev.h>
#include "usbip_common.h"
#include "names.h"
@@ -12,6 +13,8 @@ int usbip_use_syslog;
int usbip_use_stderr;
int usbip_use_debug;
+extern struct udev *udev_context;
+
struct speed_string {
int num;
char *speed;
@@ -23,6 +26,8 @@ static const struct speed_string speed_strings[] = {
{ USB_SPEED_LOW, "1.5", "Low Speed(1.5Mbps)" },
{ USB_SPEED_FULL, "12", "Full Speed(12Mbps)" },
{ USB_SPEED_HIGH, "480", "High Speed(480Mbps)" },
+ { USB_SPEED_WIRELESS, "53.3-480", "Wireless"},
+ { USB_SPEED_SUPER, "5000", "Super Speed(5000Mbps)" },
{ 0, NULL, NULL }
};
@@ -109,75 +114,61 @@ void dump_usb_device(struct usbip_usb_device *udev)
}
-int read_attr_value(struct sysfs_device *dev, const char *name,
+int read_attr_value(struct udev_device *dev, const char *name,
const char *format)
{
- char attrpath[SYSFS_PATH_MAX];
- struct sysfs_attribute *attr;
+ const char *attr;
int num = 0;
int ret;
- snprintf(attrpath, sizeof(attrpath), "%s/%s", dev->path, name);
-
- attr = sysfs_open_attribute(attrpath);
+ attr = udev_device_get_sysattr_value(dev, name);
if (!attr) {
- dbg("sysfs_open_attribute failed: %s", attrpath);
- return 0;
- }
-
- ret = sysfs_read_attribute(attr);
- if (ret < 0) {
- dbg("sysfs_read_attribute failed");
+ err("udev_device_get_sysattr_value failed");
goto err;
}
- ret = sscanf(attr->value, format, &num);
+ /* The client chooses the device configuration
+ * when attaching it so right after being bound
+ * to usbip-host on the server the device will
+ * have no configuration.
+ * Therefore, attributes such as bConfigurationValue
+ * and bNumInterfaces will not exist and sscanf will
+ * fail. Check for these cases and don't treat them
+ * as errors.
+ */
+
+ ret = sscanf(attr, format, &num);
if (ret < 1) {
- dbg("sscanf failed");
- goto err;
+ if (strcmp(name, "bConfigurationValue") &&
+ strcmp(name, "bNumInterfaces")) {
+ err("sscanf failed for attribute %s", name);
+ goto err;
+ }
}
err:
- sysfs_close_attribute(attr);
return num;
}
-int read_attr_speed(struct sysfs_device *dev)
+int read_attr_speed(struct udev_device *dev)
{
- char attrpath[SYSFS_PATH_MAX];
- struct sysfs_attribute *attr;
- char speed[100];
- int ret;
-
- snprintf(attrpath, sizeof(attrpath), "%s/%s", dev->path, "speed");
-
- attr = sysfs_open_attribute(attrpath);
- if (!attr) {
- dbg("sysfs_open_attribute failed: %s", attrpath);
- return 0;
- }
+ const char *speed;
- ret = sysfs_read_attribute(attr);
- if (ret < 0) {
- dbg("sysfs_read_attribute failed");
+ speed = udev_device_get_sysattr_value(dev, "speed");
+ if (!speed) {
+ err("udev_device_get_sysattr_value failed");
goto err;
}
- ret = sscanf(attr->value, "%99s\n", speed);
- if (ret < 1) {
- dbg("sscanf failed");
- goto err;
- }
-err:
- sysfs_close_attribute(attr);
-
for (int i = 0; speed_strings[i].speed != NULL; i++) {
if (!strcmp(speed, speed_strings[i].speed))
return speed_strings[i].num;
}
+err:
+
return USB_SPEED_UNKNOWN;
}
@@ -188,9 +179,10 @@ err:
} while (0)
-int read_usb_device(struct sysfs_device *sdev, struct usbip_usb_device *udev)
+int read_usb_device(struct udev_device *sdev, struct usbip_usb_device *udev)
{
uint32_t busnum, devnum;
+ const char *path, *name;
READ_ATTR(udev, uint8_t, sdev, bDeviceClass, "%02x\n");
READ_ATTR(udev, uint8_t, sdev, bDeviceSubClass, "%02x\n");
@@ -207,10 +199,13 @@ int read_usb_device(struct sysfs_device *sdev, struct usbip_usb_device *udev)
READ_ATTR(udev, uint8_t, sdev, devnum, "%d\n");
udev->speed = read_attr_speed(sdev);
- strncpy(udev->path, sdev->path, SYSFS_PATH_MAX);
- strncpy(udev->busid, sdev->name, SYSFS_BUS_ID_SIZE);
+ path = udev_device_get_syspath(sdev);
+ name = udev_device_get_sysname(sdev);
- sscanf(sdev->name, "%u-%u", &busnum, &devnum);
+ strncpy(udev->path, path, SYSFS_PATH_MAX);
+ strncpy(udev->busid, name, SYSFS_BUS_ID_SIZE);
+
+ sscanf(name, "%u-%u", &busnum, &devnum);
udev->busnum = busnum;
return 0;
@@ -220,13 +215,13 @@ int read_usb_interface(struct usbip_usb_device *udev, int i,
struct usbip_usb_interface *uinf)
{
char busid[SYSFS_BUS_ID_SIZE];
- struct sysfs_device *sif;
+ struct udev_device *sif;
sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i);
- sif = sysfs_open_device("usb", busid);
+ sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid);
if (!sif) {
- dbg("sysfs_open_device(\"usb\", \"%s\") failed", busid);
+ err("udev_device_new_from_subsystem_sysname %s failed", busid);
return -1;
}
@@ -234,8 +229,6 @@ int read_usb_interface(struct usbip_usb_device *udev, int i,
READ_ATTR(uinf, uint8_t, sif, bInterfaceSubClass, "%02x\n");
READ_ATTR(uinf, uint8_t, sif, bInterfaceProtocol, "%02x\n");
- sysfs_close_device(sif);
-
return 0;
}
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_common.h b/drivers/staging/usbip/userspace/libsrc/usbip_common.h
index 938ad1c36947..23be848f24fb 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_common.h
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_common.h
@@ -5,7 +5,7 @@
#ifndef __USBIP_COMMON_H
#define __USBIP_COMMON_H
-#include <sysfs/libsysfs.h>
+#include <libudev.h>
#include <stdint.h>
#include <stdio.h>
@@ -14,6 +14,8 @@
#include <syslog.h>
#include <unistd.h>
+#include <linux/usb/ch9.h>
+#include "../../uapi/usbip.h"
#ifndef USBIDS_FILE
#define USBIDS_FILE "/usr/share/hwdata/usb.ids"
@@ -28,6 +30,15 @@
#define USBIP_HOST_DRV_NAME "usbip-host"
#define USBIP_VHCI_DRV_NAME "vhci_hcd"
+/* sysfs constants */
+#define SYSFS_MNT_PATH "/sys"
+#define SYSFS_BUS_NAME "bus"
+#define SYSFS_BUS_TYPE "usb"
+#define SYSFS_DRIVERS_NAME "drivers"
+
+#define SYSFS_PATH_MAX 256
+#define SYSFS_BUS_ID_SIZE 32
+
extern int usbip_use_syslog;
extern int usbip_use_stderr;
extern int usbip_use_debug ;
@@ -76,30 +87,6 @@ extern int usbip_use_debug ;
abort(); \
} while (0)
-enum usb_device_speed {
- USB_SPEED_UNKNOWN = 0, /* enumerating */
- USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
- USB_SPEED_HIGH, /* usb 2.0 */
- USB_SPEED_VARIABLE /* wireless (usb 2.5) */
-};
-
-/* FIXME: how to sync with drivers/usbip_common.h ? */
-enum usbip_device_status {
- /* sdev is available. */
- SDEV_ST_AVAILABLE = 0x01,
- /* sdev is now used. */
- SDEV_ST_USED,
- /* sdev is unusable because of a fatal error. */
- SDEV_ST_ERROR,
-
- /* vdev does not connect a remote device. */
- VDEV_ST_NULL,
- /* vdev is used, but the USB address is not assigned yet */
- VDEV_ST_NOTASSIGNED,
- VDEV_ST_USED,
- VDEV_ST_ERROR
-};
-
struct usbip_usb_interface {
uint8_t bInterfaceClass;
uint8_t bInterfaceSubClass;
@@ -131,8 +118,8 @@ struct usbip_usb_device {
void dump_usb_interface(struct usbip_usb_interface *);
void dump_usb_device(struct usbip_usb_device *);
-int read_usb_device(struct sysfs_device *sdev, struct usbip_usb_device *udev);
-int read_attr_value(struct sysfs_device *dev, const char *name,
+int read_usb_device(struct udev_device *sdev, struct usbip_usb_device *udev);
+int read_attr_value(struct udev_device *dev, const char *name,
const char *format);
int read_usb_interface(struct usbip_usb_device *udev, int i,
struct usbip_usb_interface *uinf);
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c
index 71a449cf50db..c5bf60b135b9 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c
@@ -18,102 +18,65 @@
#include <sys/types.h>
#include <sys/stat.h>
+#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
+#include <libudev.h>
+
#include "usbip_common.h"
#include "usbip_host_driver.h"
+#include "list.h"
+#include "sysfs_utils.h"
#undef PROGNAME
#define PROGNAME "libusbip"
struct usbip_host_driver *host_driver;
+struct udev *udev_context;
-#define SYSFS_OPEN_RETRIES 100
-
-/* only the first interface value is true! */
static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
{
- char attrpath[SYSFS_PATH_MAX];
- struct sysfs_attribute *attr;
+ char status_attr_path[SYSFS_PATH_MAX];
+ int fd;
+ int length;
+ char status;
int value = 0;
- int rc;
- struct stat s;
- int retries = SYSFS_OPEN_RETRIES;
-
- /* This access is racy!
- *
- * Just after detach, our driver removes the sysfs
- * files and recreates them.
- *
- * We may try and fail to open the usbip_status of
- * an exported device in the (short) window where
- * it has been removed and not yet recreated.
- *
- * This is a bug in the interface. Nothing we can do
- * except work around it here by polling for the sysfs
- * usbip_status to reappear.
- */
-
- snprintf(attrpath, SYSFS_PATH_MAX, "%s/%s:%d.%d/usbip_status",
- udev->path, udev->busid, udev->bConfigurationValue, 0);
-
- while (retries > 0) {
- if (stat(attrpath, &s) == 0)
- break;
-
- if (errno != ENOENT) {
- dbg("stat failed: %s", attrpath);
- return -1;
- }
-
- usleep(10000); /* 10ms */
- retries--;
- }
- if (retries == 0)
- dbg("usbip_status not ready after %d retries",
- SYSFS_OPEN_RETRIES);
- else if (retries < SYSFS_OPEN_RETRIES)
- dbg("warning: usbip_status ready after %d retries",
- SYSFS_OPEN_RETRIES - retries);
+ snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
+ udev->path);
- attr = sysfs_open_attribute(attrpath);
- if (!attr) {
- dbg("sysfs_open_attribute failed: %s", attrpath);
+ if ((fd = open(status_attr_path, O_RDONLY)) < 0) {
+ err("error opening attribute %s", status_attr_path);
return -1;
}
- rc = sysfs_read_attribute(attr);
- if (rc) {
- dbg("sysfs_read_attribute failed: %s", attrpath);
- sysfs_close_attribute(attr);
+ length = read(fd, &status, 1);
+ if (length < 0) {
+ err("error reading attribute %s", status_attr_path);
+ close(fd);
return -1;
}
- value = atoi(attr->value);
-
- sysfs_close_attribute(attr);
+ value = atoi(&status);
return value;
}
-static struct usbip_exported_device *usbip_exported_device_new(char *sdevpath)
+static
+struct usbip_exported_device *usbip_exported_device_new(const char *sdevpath)
{
struct usbip_exported_device *edev = NULL;
+ struct usbip_exported_device *edev_old;
size_t size;
int i;
- edev = calloc(1, sizeof(*edev));
- if (!edev) {
- dbg("calloc failed");
- return NULL;
- }
+ edev = calloc(1, sizeof(struct usbip_exported_device));
- edev->sudev = sysfs_open_device_path(sdevpath);
+ edev->sudev = udev_device_new_from_syspath(udev_context, sdevpath);
if (!edev->sudev) {
- dbg("sysfs_open_device_path failed: %s", sdevpath);
+ err("udev_device_new_from_syspath: %s", sdevpath);
goto err;
}
@@ -124,11 +87,13 @@ static struct usbip_exported_device *usbip_exported_device_new(char *sdevpath)
goto err;
/* reallocate buffer to include usb interface data */
- size = sizeof(*edev) + edev->udev.bNumInterfaces *
+ size = sizeof(struct usbip_exported_device) + edev->udev.bNumInterfaces *
sizeof(struct usbip_usb_interface);
+ edev_old = edev;
edev = realloc(edev, size);
if (!edev) {
+ edev = edev_old;
dbg("realloc failed");
goto err;
}
@@ -138,160 +103,88 @@ static struct usbip_exported_device *usbip_exported_device_new(char *sdevpath)
return edev;
err:
- if (edev && edev->sudev)
- sysfs_close_device(edev->sudev);
+ if (edev->sudev)
+ udev_device_unref(edev->sudev);
if (edev)
free(edev);
return NULL;
}
-static int check_new(struct dlist *dlist, struct sysfs_device *target)
-{
- struct sysfs_device *dev;
-
- dlist_for_each_data(dlist, dev, struct sysfs_device) {
- if (!strncmp(dev->bus_id, target->bus_id, SYSFS_BUS_ID_SIZE))
- /* device found and is not new */
- return 0;
- }
- return 1;
-}
-
-static void delete_nothing(void *unused_data)
-{
- /*
- * NOTE: Do not delete anything, but the container will be deleted.
- */
- (void) unused_data;
-}
-
static int refresh_exported_devices(void)
{
- /* sysfs_device of usb_interface */
- struct sysfs_device *suintf;
- struct dlist *suintf_list;
- /* sysfs_device of usb_device */
- struct sysfs_device *sudev;
- struct dlist *sudev_list;
struct usbip_exported_device *edev;
-
- sudev_list = dlist_new_with_delete(sizeof(struct sysfs_device),
- delete_nothing);
-
- suintf_list = sysfs_get_driver_devices(host_driver->sysfs_driver);
- if (!suintf_list) {
- /*
- * Not an error condition. There are simply no devices bound to
- * the driver yet.
- */
- dbg("bind " USBIP_HOST_DRV_NAME ".ko to a usb device to be "
- "exportable!");
- return 0;
- }
-
- /* collect unique USB devices (not interfaces) */
- dlist_for_each_data(suintf_list, suintf, struct sysfs_device) {
- /* get usb device of this usb interface */
- sudev = sysfs_get_device_parent(suintf);
- if (!sudev) {
- dbg("sysfs_get_device_parent failed: %s", suintf->name);
- continue;
- }
-
- if (check_new(sudev_list, sudev)) {
- /* insert item at head of list */
- dlist_unshift(sudev_list, sudev);
- }
- }
-
- dlist_for_each_data(sudev_list, sudev, struct sysfs_device) {
- edev = usbip_exported_device_new(sudev->path);
- if (!edev) {
- dbg("usbip_exported_device_new failed");
- continue;
+ struct udev_enumerate *enumerate;
+ struct udev_list_entry *devices, *dev_list_entry;
+ struct udev_device *dev;
+ const char *path;
+
+ enumerate = udev_enumerate_new(udev_context);
+ udev_enumerate_add_match_subsystem(enumerate, "usb");
+ udev_enumerate_scan_devices(enumerate);
+
+ devices = udev_enumerate_get_list_entry(enumerate);
+
+ udev_list_entry_foreach(dev_list_entry, devices) {
+ path = udev_list_entry_get_name(dev_list_entry);
+ dev = udev_device_new_from_syspath(udev_context, path);
+
+ /* Check whether device uses usbip-host driver. */
+ if (!strcmp(udev_device_get_driver(dev),
+ USBIP_HOST_DRV_NAME)) {
+ edev = usbip_exported_device_new(path);
+ if (!edev) {
+ dbg("usbip_exported_device_new failed");
+ continue;
+ }
+
+ list_add(&edev->node, &host_driver->edev_list);
+ host_driver->ndevs++;
}
-
- dlist_unshift(host_driver->edev_list, edev);
- host_driver->ndevs++;
}
- dlist_destroy(sudev_list);
-
return 0;
}
-static struct sysfs_driver *open_sysfs_host_driver(void)
+static void usbip_exported_device_destroy(void)
{
- char bus_type[] = "usb";
- char sysfs_mntpath[SYSFS_PATH_MAX];
- char host_drv_path[SYSFS_PATH_MAX];
- struct sysfs_driver *host_drv;
- int rc;
-
- rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
- if (rc < 0) {
- dbg("sysfs_get_mnt_path failed");
- return NULL;
- }
-
- snprintf(host_drv_path, SYSFS_PATH_MAX, "%s/%s/%s/%s/%s",
- sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME,
- USBIP_HOST_DRV_NAME);
+ struct list_head *i, *tmp;
+ struct usbip_exported_device *edev;
- host_drv = sysfs_open_driver_path(host_drv_path);
- if (!host_drv) {
- dbg("sysfs_open_driver_path failed");
- return NULL;
+ list_for_each_safe(i, tmp, &host_driver->edev_list) {
+ edev = list_entry(i, struct usbip_exported_device, node);
+ list_del(i);
+ free(edev);
}
-
- return host_drv;
-}
-
-static void usbip_exported_device_delete(void *dev)
-{
- struct usbip_exported_device *edev = dev;
- sysfs_close_device(edev->sudev);
- free(dev);
}
int usbip_host_driver_open(void)
{
int rc;
- host_driver = calloc(1, sizeof(*host_driver));
- if (!host_driver) {
- dbg("calloc failed");
+ udev_context = udev_new();
+ if (!udev_context) {
+ err("udev_new failed");
return -1;
}
- host_driver->ndevs = 0;
- host_driver->edev_list =
- dlist_new_with_delete(sizeof(struct usbip_exported_device),
- usbip_exported_device_delete);
- if (!host_driver->edev_list) {
- dbg("dlist_new_with_delete failed");
- goto err_free_host_driver;
- }
+ host_driver = calloc(1, sizeof(*host_driver));
- host_driver->sysfs_driver = open_sysfs_host_driver();
- if (!host_driver->sysfs_driver)
- goto err_destroy_edev_list;
+ host_driver->ndevs = 0;
+ INIT_LIST_HEAD(&host_driver->edev_list);
rc = refresh_exported_devices();
if (rc < 0)
- goto err_close_sysfs_driver;
+ goto err_free_host_driver;
return 0;
-err_close_sysfs_driver:
- sysfs_close_driver(host_driver->sysfs_driver);
-err_destroy_edev_list:
- dlist_destroy(host_driver->edev_list);
err_free_host_driver:
free(host_driver);
host_driver = NULL;
+ udev_unref(udev_context);
+
return -1;
}
@@ -300,30 +193,22 @@ void usbip_host_driver_close(void)
if (!host_driver)
return;
- if (host_driver->edev_list)
- dlist_destroy(host_driver->edev_list);
- if (host_driver->sysfs_driver)
- sysfs_close_driver(host_driver->sysfs_driver);
+ usbip_exported_device_destroy();
free(host_driver);
host_driver = NULL;
+
+ udev_unref(udev_context);
}
int usbip_host_refresh_device_list(void)
{
int rc;
- if (host_driver->edev_list)
- dlist_destroy(host_driver->edev_list);
+ usbip_exported_device_destroy();
host_driver->ndevs = 0;
- host_driver->edev_list =
- dlist_new_with_delete(sizeof(struct usbip_exported_device),
- usbip_exported_device_delete);
- if (!host_driver->edev_list) {
- dbg("dlist_new_with_delete failed");
- return -1;
- }
+ INIT_LIST_HEAD(&host_driver->edev_list);
rc = refresh_exported_devices();
if (rc < 0)
@@ -335,8 +220,7 @@ int usbip_host_refresh_device_list(void)
int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
{
char attr_name[] = "usbip_sockfd";
- char attr_path[SYSFS_PATH_MAX];
- struct sysfs_attribute *attr;
+ char sockfd_attr_path[SYSFS_PATH_MAX];
char sockfd_buff[30];
int ret;
@@ -356,41 +240,32 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
}
/* only the first interface is true */
- snprintf(attr_path, sizeof(attr_path), "%s/%s:%d.%d/%s",
- edev->udev.path, edev->udev.busid,
- edev->udev.bConfigurationValue, 0, attr_name);
-
- attr = sysfs_open_attribute(attr_path);
- if (!attr) {
- dbg("sysfs_open_attribute failed: %s", attr_path);
- return -1;
- }
+ snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
+ edev->udev.path, attr_name);
snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
- dbg("write: %s", sockfd_buff);
- ret = sysfs_write_attribute(attr, sockfd_buff, strlen(sockfd_buff));
+ ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff,
+ strlen(sockfd_buff));
if (ret < 0) {
- dbg("sysfs_write_attribute failed: sockfd %s to %s",
- sockfd_buff, attr_path);
- goto err_write_sockfd;
+ err("write_sysfs_attribute failed: sockfd %s to %s",
+ sockfd_buff, sockfd_attr_path);
+ return ret;
}
- dbg("connect: %s", edev->udev.busid);
-
-err_write_sockfd:
- sysfs_close_attribute(attr);
+ info("connect: %s", edev->udev.busid);
return ret;
}
struct usbip_exported_device *usbip_host_get_device(int num)
{
+ struct list_head *i;
struct usbip_exported_device *edev;
- struct dlist *dlist = host_driver->edev_list;
int cnt = 0;
- dlist_for_each_data(dlist, edev, struct usbip_exported_device) {
+ list_for_each(i, &host_driver->edev_list) {
+ edev = list_entry(i, struct usbip_exported_device, node);
if (num == cnt)
return edev;
else
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h
index 34fd14cbfc49..2a31f855c616 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h
@@ -21,18 +21,19 @@
#include <stdint.h>
#include "usbip_common.h"
+#include "list.h"
struct usbip_host_driver {
int ndevs;
- struct sysfs_driver *sysfs_driver;
/* list of exported device */
- struct dlist *edev_list;
+ struct list_head edev_list;
};
struct usbip_exported_device {
- struct sysfs_device *sudev;
+ struct udev_device *sudev;
int32_t status;
struct usbip_usb_device udev;
+ struct list_head node;
struct usbip_usb_interface uinf[];
};
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
index 209df9b37cb4..8901fcb7946f 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
@@ -6,44 +6,28 @@
#include "vhci_driver.h"
#include <limits.h>
#include <netdb.h>
+#include <libudev.h>
+#include "sysfs_utils.h"
#undef PROGNAME
#define PROGNAME "libusbip"
struct usbip_vhci_driver *vhci_driver;
+struct udev *udev_context;
static struct usbip_imported_device *
imported_device_init(struct usbip_imported_device *idev, char *busid)
{
- struct sysfs_device *sudev;
+ struct udev_device *sudev;
- sudev = sysfs_open_device("usb", busid);
+ sudev = udev_device_new_from_subsystem_sysname(udev_context,
+ "usb", busid);
if (!sudev) {
- dbg("sysfs_open_device failed: %s", busid);
+ dbg("udev_device_new_from_subsystem_sysname failed: %s", busid);
goto err;
}
read_usb_device(sudev, &idev->udev);
- sysfs_close_device(sudev);
-
- /* add class devices of this imported device */
- struct usbip_class_device *cdev;
- dlist_for_each_data(vhci_driver->cdev_list, cdev,
- struct usbip_class_device) {
- if (!strncmp(cdev->dev_path, idev->udev.path,
- strlen(idev->udev.path))) {
- struct usbip_class_device *new_cdev;
- /*
- * alloc and copy because dlist is linked
- * from only one list
- */
- new_cdev = calloc(1, sizeof(*new_cdev));
- if (!new_cdev)
- goto err;
-
- memcpy(new_cdev, cdev, sizeof(*new_cdev));
- dlist_unshift(idev->cdev_list, (void *) new_cdev);
- }
- }
+ udev_device_unref(sudev);
return idev;
@@ -53,7 +37,7 @@ err:
-static int parse_status(char *value)
+static int parse_status(const char *value)
{
int ret = 0;
char *c;
@@ -100,12 +84,6 @@ static int parse_status(char *value)
idev->busnum = (devid >> 16);
idev->devnum = (devid & 0x0000ffff);
- idev->cdev_list = dlist_new(sizeof(struct usbip_class_device));
- if (!idev->cdev_list) {
- dbg("dlist_new failed");
- return -1;
- }
-
if (idev->status != VDEV_ST_NULL
&& idev->status != VDEV_ST_NOTASSIGNED) {
idev = imported_device_init(idev, lbusid);
@@ -129,156 +107,35 @@ static int parse_status(char *value)
return 0;
}
-
-static int check_usbip_device(struct sysfs_class_device *cdev)
-{
- /* /sys/class/video4linux/video0/device */
- char class_path[SYSFS_PATH_MAX];
- /* /sys/devices/platform/vhci_hcd/usb6/6-1:1.1 */
- char dev_path[SYSFS_PATH_MAX];
- int ret;
- struct usbip_class_device *usbip_cdev;
-
- snprintf(class_path, sizeof(class_path), "%s/device", cdev->path);
-
- ret = sysfs_get_link(class_path, dev_path, sizeof(dev_path));
- if (ret == 0) {
- if (!strncmp(dev_path, vhci_driver->hc_device->path,
- strlen(vhci_driver->hc_device->path))) {
- /* found usbip device */
- usbip_cdev = calloc(1, sizeof(*usbip_cdev));
- if (!usbip_cdev) {
- dbg("calloc failed");
- return -1;
- }
- dlist_unshift(vhci_driver->cdev_list, usbip_cdev);
- strncpy(usbip_cdev->class_path, class_path,
- sizeof(usbip_cdev->class_path));
- strncpy(usbip_cdev->dev_path, dev_path,
- sizeof(usbip_cdev->dev_path));
- dbg("found: %s %s", class_path, dev_path);
- }
- }
-
- return 0;
-}
-
-
-static int search_class_for_usbip_device(char *cname)
-{
- struct sysfs_class *class;
- struct dlist *cdev_list;
- struct sysfs_class_device *cdev;
- int ret = 0;
-
- class = sysfs_open_class(cname);
- if (!class) {
- dbg("sysfs_open_class failed");
- return -1;
- }
-
- dbg("class: %s", class->name);
-
- cdev_list = sysfs_get_class_devices(class);
- if (!cdev_list)
- /* nothing */
- goto out;
-
- dlist_for_each_data(cdev_list, cdev, struct sysfs_class_device) {
- dbg("cdev: %s", cdev->name);
- ret = check_usbip_device(cdev);
- if (ret < 0)
- goto out;
- }
-
-out:
- sysfs_close_class(class);
-
- return ret;
-}
-
-
-static int refresh_class_device_list(void)
-{
- int ret;
- struct dlist *cname_list;
- char *cname;
- char sysfs_mntpath[SYSFS_PATH_MAX];
- char class_path[SYSFS_PATH_MAX];
-
- ret = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
- if (ret < 0) {
- dbg("sysfs_get_mnt_path failed");
- return -1;
- }
-
- snprintf(class_path, sizeof(class_path), "%s/%s", sysfs_mntpath,
- SYSFS_CLASS_NAME);
-
- /* search under /sys/class */
- cname_list = sysfs_open_directory_list(class_path);
- if (!cname_list) {
- dbg("sysfs_open_directory failed");
- return -1;
- }
-
- dlist_for_each_data(cname_list, cname, char) {
- ret = search_class_for_usbip_device(cname);
- if (ret < 0) {
- sysfs_close_list(cname_list);
- return -1;
- }
- }
-
- sysfs_close_list(cname_list);
-
- /* search under /sys/block */
- ret = search_class_for_usbip_device(SYSFS_BLOCK_NAME);
- if (ret < 0)
- return -1;
-
- return 0;
-}
-
-
static int refresh_imported_device_list(void)
{
- struct sysfs_attribute *attr_status;
-
+ const char *attr_status;
- attr_status = sysfs_get_device_attr(vhci_driver->hc_device, "status");
+ attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device,
+ "status");
if (!attr_status) {
- dbg("sysfs_get_device_attr(\"status\") failed: %s",
- vhci_driver->hc_device->name);
+ err("udev_device_get_sysattr_value failed");
return -1;
}
- dbg("name: %s path: %s len: %d method: %d value: %s",
- attr_status->name, attr_status->path, attr_status->len,
- attr_status->method, attr_status->value);
-
- return parse_status(attr_status->value);
+ return parse_status(attr_status);
}
static int get_nports(void)
{
char *c;
int nports = 0;
- struct sysfs_attribute *attr_status;
+ const char *attr_status;
- attr_status = sysfs_get_device_attr(vhci_driver->hc_device, "status");
+ attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device,
+ "status");
if (!attr_status) {
- dbg("sysfs_get_device_attr(\"status\") failed: %s",
- vhci_driver->hc_device->name);
+ err("udev_device_get_sysattr_value failed");
return -1;
}
- dbg("name: %s path: %s len: %d method: %d value: %s",
- attr_status->name, attr_status->path, attr_status->len,
- attr_status->method, attr_status->value);
-
/* skip a header line */
- c = strchr(attr_status->value, '\n');
+ c = strchr(attr_status, '\n');
if (!c)
return 0;
c++;
@@ -295,71 +152,66 @@ static int get_nports(void)
return nports;
}
-static int get_hc_busid(char *sysfs_mntpath, char *hc_busid)
-{
- struct sysfs_driver *sdriver;
- char sdriver_path[SYSFS_PATH_MAX];
-
- struct sysfs_device *hc_dev;
- struct dlist *hc_devs;
-
- int found = 0;
-
- snprintf(sdriver_path, SYSFS_PATH_MAX, "%s/%s/%s/%s/%s", sysfs_mntpath,
- SYSFS_BUS_NAME, USBIP_VHCI_BUS_TYPE, SYSFS_DRIVERS_NAME,
- USBIP_VHCI_DRV_NAME);
-
- sdriver = sysfs_open_driver_path(sdriver_path);
- if (!sdriver) {
- dbg("sysfs_open_driver_path failed: %s", sdriver_path);
- dbg("make sure " USBIP_CORE_MOD_NAME ".ko and "
- USBIP_VHCI_DRV_NAME ".ko are loaded!");
- return -1;
- }
-
- hc_devs = sysfs_get_driver_devices(sdriver);
- if (!hc_devs) {
- dbg("sysfs_get_driver failed");
- goto err;
- }
-
- /* assume only one vhci_hcd */
- dlist_for_each_data(hc_devs, hc_dev, struct sysfs_device) {
- strncpy(hc_busid, hc_dev->bus_id, SYSFS_BUS_ID_SIZE);
- found = 1;
- }
-
-err:
- sysfs_close_driver(sdriver);
-
- if (found)
- return 0;
-
- dbg("%s not found", hc_busid);
- return -1;
-}
-
-static int read_record(int rhport, char *host, char *port, char *busid)
+/*
+ * Read the given port's record.
+ *
+ * To avoid buffer overflow we will read the entire line and
+ * validate each part's size. The initial buffer is padded by 4 to
+ * accommodate the 2 spaces, 1 newline and an additional character
+ * which is needed to properly validate the 3rd part without it being
+ * truncated to an acceptable length.
+ */
+static int read_record(int rhport, char *host, unsigned long host_len,
+ char *port, unsigned long port_len, char *busid)
{
+ int part;
FILE *file;
char path[PATH_MAX+1];
+ char *buffer, *start, *end;
+ char delim[] = {' ', ' ', '\n'};
+ int max_len[] = {(int)host_len, (int)port_len, SYSFS_BUS_ID_SIZE};
+ size_t buffer_len = host_len + port_len + SYSFS_BUS_ID_SIZE + 4;
+
+ buffer = malloc(buffer_len);
+ if (!buffer)
+ return -1;
snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport);
file = fopen(path, "r");
if (!file) {
err("fopen");
+ free(buffer);
return -1;
}
- if (fscanf(file, "%s %s %s\n", host, port, busid) != 3) {
- err("fscanf");
+ if (fgets(buffer, buffer_len, file) == NULL) {
+ err("fgets");
+ free(buffer);
fclose(file);
return -1;
}
-
fclose(file);
+ /* validate the length of each of the 3 parts */
+ start = buffer;
+ for (part = 0; part < 3; part++) {
+ end = strchr(start, delim[part]);
+ if (end == NULL || (end - start) > max_len[part]) {
+ free(buffer);
+ return -1;
+ }
+ start = end + 1;
+ }
+
+ if (sscanf(buffer, "%s %s %s\n", host, port, busid) != 3) {
+ err("sscanf");
+ free(buffer);
+ return -1;
+ }
+
+ free(buffer);
+
return 0;
}
@@ -367,30 +219,21 @@ static int read_record(int rhport, char *host, char *port, char *busid)
int usbip_vhci_driver_open(void)
{
- int ret;
- char hc_busid[SYSFS_BUS_ID_SIZE];
-
- vhci_driver = (struct usbip_vhci_driver *) calloc(1, sizeof(*vhci_driver));
- if (!vhci_driver) {
- dbg("calloc failed");
+ udev_context = udev_new();
+ if (!udev_context) {
+ err("udev_new failed");
return -1;
}
- ret = sysfs_get_mnt_path(vhci_driver->sysfs_mntpath, SYSFS_PATH_MAX);
- if (ret < 0) {
- dbg("sysfs_get_mnt_path failed");
- goto err;
- }
-
- ret = get_hc_busid(vhci_driver->sysfs_mntpath, hc_busid);
- if (ret < 0)
- goto err;
+ vhci_driver = calloc(1, sizeof(struct usbip_vhci_driver));
/* will be freed in usbip_driver_close() */
- vhci_driver->hc_device = sysfs_open_device(USBIP_VHCI_BUS_TYPE,
- hc_busid);
+ vhci_driver->hc_device =
+ udev_device_new_from_subsystem_sysname(udev_context,
+ USBIP_VHCI_BUS_TYPE,
+ USBIP_VHCI_DRV_NAME);
if (!vhci_driver->hc_device) {
- dbg("sysfs_open_device failed");
+ err("udev_device_new_from_subsystem_sysname failed");
goto err;
}
@@ -398,29 +241,21 @@ int usbip_vhci_driver_open(void)
dbg("available ports: %d", vhci_driver->nports);
- vhci_driver->cdev_list = dlist_new(sizeof(struct usbip_class_device));
- if (!vhci_driver->cdev_list)
- goto err;
-
- if (refresh_class_device_list())
- goto err;
-
if (refresh_imported_device_list())
goto err;
-
return 0;
-
err:
- if (vhci_driver->cdev_list)
- dlist_destroy(vhci_driver->cdev_list);
- if (vhci_driver->hc_device)
- sysfs_close_device(vhci_driver->hc_device);
+ udev_device_unref(vhci_driver->hc_device);
+
if (vhci_driver)
free(vhci_driver);
vhci_driver = NULL;
+
+ udev_unref(udev_context);
+
return -1;
}
@@ -430,53 +265,24 @@ void usbip_vhci_driver_close()
if (!vhci_driver)
return;
- if (vhci_driver->cdev_list)
- dlist_destroy(vhci_driver->cdev_list);
-
- for (int i = 0; i < vhci_driver->nports; i++) {
- if (vhci_driver->idev[i].cdev_list)
- dlist_destroy(vhci_driver->idev[i].cdev_list);
- }
+ udev_device_unref(vhci_driver->hc_device);
- if (vhci_driver->hc_device)
- sysfs_close_device(vhci_driver->hc_device);
free(vhci_driver);
vhci_driver = NULL;
+
+ udev_unref(udev_context);
}
int usbip_vhci_refresh_device_list(void)
{
- if (vhci_driver->cdev_list)
- dlist_destroy(vhci_driver->cdev_list);
-
-
- for (int i = 0; i < vhci_driver->nports; i++) {
- if (vhci_driver->idev[i].cdev_list)
- dlist_destroy(vhci_driver->idev[i].cdev_list);
- }
-
- vhci_driver->cdev_list = dlist_new(sizeof(struct usbip_class_device));
- if (!vhci_driver->cdev_list)
- goto err;
-
- if (refresh_class_device_list())
- goto err;
if (refresh_imported_device_list())
goto err;
return 0;
err:
- if (vhci_driver->cdev_list)
- dlist_destroy(vhci_driver->cdev_list);
-
- for (int i = 0; i < vhci_driver->nports; i++) {
- if (vhci_driver->idev[i].cdev_list)
- dlist_destroy(vhci_driver->idev[i].cdev_list);
- }
-
dbg("failed to refresh device list");
return -1;
}
@@ -494,24 +300,24 @@ int usbip_vhci_get_free_port(void)
int usbip_vhci_attach_device2(uint8_t port, int sockfd, uint32_t devid,
uint32_t speed) {
- struct sysfs_attribute *attr_attach;
char buff[200]; /* what size should be ? */
+ char attach_attr_path[SYSFS_PATH_MAX];
+ char attr_attach[] = "attach";
+ const char *path;
int ret;
- attr_attach = sysfs_get_device_attr(vhci_driver->hc_device, "attach");
- if (!attr_attach) {
- dbg("sysfs_get_device_attr(\"attach\") failed: %s",
- vhci_driver->hc_device->name);
- return -1;
- }
-
- snprintf(buff, sizeof(buff), "%u %u %u %u",
+ snprintf(buff, sizeof(buff), "%u %d %u %u",
port, sockfd, devid, speed);
dbg("writing: %s", buff);
- ret = sysfs_write_attribute(attr_attach, buff, strlen(buff));
+ path = udev_device_get_syspath(vhci_driver->hc_device);
+ snprintf(attach_attr_path, sizeof(attach_attr_path), "%s/%s",
+ path, attr_attach);
+ dbg("attach attribute path: %s", attach_attr_path);
+
+ ret = write_sysfs_attribute(attach_attr_path, buff, strlen(buff));
if (ret < 0) {
- dbg("sysfs_write_attribute failed");
+ dbg("write_sysfs_attribute failed");
return -1;
}
@@ -536,23 +342,23 @@ int usbip_vhci_attach_device(uint8_t port, int sockfd, uint8_t busnum,
int usbip_vhci_detach_device(uint8_t port)
{
- struct sysfs_attribute *attr_detach;
+ char detach_attr_path[SYSFS_PATH_MAX];
+ char attr_detach[] = "detach";
char buff[200]; /* what size should be ? */
+ const char *path;
int ret;
- attr_detach = sysfs_get_device_attr(vhci_driver->hc_device, "detach");
- if (!attr_detach) {
- dbg("sysfs_get_device_attr(\"detach\") failed: %s",
- vhci_driver->hc_device->name);
- return -1;
- }
-
snprintf(buff, sizeof(buff), "%u", port);
dbg("writing: %s", buff);
- ret = sysfs_write_attribute(attr_detach, buff, strlen(buff));
+ path = udev_device_get_syspath(vhci_driver->hc_device);
+ snprintf(detach_attr_path, sizeof(detach_attr_path), "%s/%s",
+ path, attr_detach);
+ dbg("detach attribute path: %s", detach_attr_path);
+
+ ret = write_sysfs_attribute(detach_attr_path, buff, strlen(buff));
if (ret < 0) {
- dbg("sysfs_write_attribute failed");
+ dbg("write_sysfs_attribute failed");
return -1;
}
@@ -573,7 +379,8 @@ int usbip_vhci_imported_device_dump(struct usbip_imported_device *idev)
if (idev->status == VDEV_ST_NULL || idev->status == VDEV_ST_NOTASSIGNED)
return 0;
- ret = read_record(idev->port, host, serv, remote_busid);
+ ret = read_record(idev->port, host, sizeof(host), serv, sizeof(serv),
+ remote_busid);
if (ret) {
err("read_record");
read_record_error = 1;
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.h b/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
index e071f8049c1f..fa2316cf2cac 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
@@ -5,7 +5,7 @@
#ifndef __VHCI_DRIVER_H
#define __VHCI_DRIVER_H
-#include <sysfs/libsysfs.h>
+#include <libudev.h>
#include <stdint.h>
#include "usbip_common.h"
@@ -13,11 +13,6 @@
#define USBIP_VHCI_BUS_TYPE "platform"
#define MAXNPORT 128
-struct usbip_class_device {
- char class_path[SYSFS_PATH_MAX];
- char dev_path[SYSFS_PATH_MAX];
-};
-
struct usbip_imported_device {
uint8_t port;
uint32_t status;
@@ -28,18 +23,13 @@ struct usbip_imported_device {
uint8_t devnum;
/* usbip_class_device list */
- struct dlist *cdev_list;
struct usbip_usb_device udev;
};
struct usbip_vhci_driver {
- char sysfs_mntpath[SYSFS_PATH_MAX];
/* /sys/devices/platform/vhci_hcd */
- struct sysfs_device *hc_device;
-
- /* usbip_class_device list */
- struct dlist *cdev_list;
+ struct udev_device *hc_device;
int nports;
struct usbip_imported_device idev[MAXNPORT];
diff --git a/drivers/staging/usbip/userspace/src/Makefile.am b/drivers/staging/usbip/userspace/src/Makefile.am
index b4f8c4b04b2f..e81a4ebadeff 100644
--- a/drivers/staging/usbip/userspace/src/Makefile.am
+++ b/drivers/staging/usbip/userspace/src/Makefile.am
@@ -8,5 +8,4 @@ usbip_SOURCES := usbip.h utils.h usbip.c utils.c usbip_network.c \
usbip_attach.c usbip_detach.c usbip_list.c \
usbip_bind.c usbip_unbind.c usbip_port.c
-
usbipd_SOURCES := usbip_network.h usbipd.c usbip_network.c
diff --git a/drivers/staging/usbip/userspace/src/usbip_attach.c b/drivers/staging/usbip/userspace/src/usbip_attach.c
index 085841196522..716a79e284c4 100644
--- a/drivers/staging/usbip/userspace/src/usbip_attach.c
+++ b/drivers/staging/usbip/userspace/src/usbip_attach.c
@@ -17,7 +17,6 @@
*/
#include <sys/stat.h>
-#include <sysfs/libsysfs.h>
#include <limits.h>
#include <stdint.h>
diff --git a/drivers/staging/usbip/userspace/src/usbip_bind.c b/drivers/staging/usbip/userspace/src/usbip_bind.c
index 9ecaf6e574df..fa46141ae68b 100644
--- a/drivers/staging/usbip/userspace/src/usbip_bind.c
+++ b/drivers/staging/usbip/userspace/src/usbip_bind.c
@@ -16,7 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <sysfs/libsysfs.h>
+#include <libudev.h>
#include <errno.h>
#include <stdio.h>
@@ -28,6 +28,7 @@
#include "usbip_common.h"
#include "utils.h"
#include "usbip.h"
+#include "sysfs_utils.h"
enum unbind_status {
UNBIND_ST_OK,
@@ -48,167 +49,92 @@ void usbip_bind_usage(void)
/* call at unbound state */
static int bind_usbip(char *busid)
{
- char bus_type[] = "usb";
char attr_name[] = "bind";
- char sysfs_mntpath[SYSFS_PATH_MAX];
char bind_attr_path[SYSFS_PATH_MAX];
- char intf_busid[SYSFS_BUS_ID_SIZE];
- struct sysfs_device *busid_dev;
- struct sysfs_attribute *bind_attr;
- struct sysfs_attribute *bConfValue;
- struct sysfs_attribute *bNumIntfs;
- int i, failed = 0;
- int rc, ret = -1;
-
- rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
- if (rc < 0) {
- err("sysfs must be mounted: %s", strerror(errno));
- return -1;
- }
+ int rc = -1;
snprintf(bind_attr_path, sizeof(bind_attr_path), "%s/%s/%s/%s/%s/%s",
- sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME,
- USBIP_HOST_DRV_NAME, attr_name);
-
- bind_attr = sysfs_open_attribute(bind_attr_path);
- if (!bind_attr) {
- dbg("problem getting bind attribute: %s", strerror(errno));
- return -1;
- }
+ SYSFS_MNT_PATH, SYSFS_BUS_NAME, SYSFS_BUS_TYPE,
+ SYSFS_DRIVERS_NAME, USBIP_HOST_DRV_NAME, attr_name);
- busid_dev = sysfs_open_device(bus_type, busid);
- if (!busid_dev) {
- dbg("sysfs_open_device %s failed: %s", busid, strerror(errno));
- goto err_close_bind_attr;
- }
-
- bConfValue = sysfs_get_device_attr(busid_dev, "bConfigurationValue");
- bNumIntfs = sysfs_get_device_attr(busid_dev, "bNumInterfaces");
-
- if (!bConfValue || !bNumIntfs) {
- dbg("problem getting device attributes: %s",
+ rc = write_sysfs_attribute(bind_attr_path, busid, strlen(busid));
+ if (rc < 0) {
+ err("error binding device %s to driver: %s", busid,
strerror(errno));
- goto err_close_busid_dev;
- }
-
- for (i = 0; i < atoi(bNumIntfs->value); i++) {
- snprintf(intf_busid, SYSFS_BUS_ID_SIZE, "%s:%.1s.%d", busid,
- bConfValue->value, i);
-
- rc = sysfs_write_attribute(bind_attr, intf_busid,
- SYSFS_BUS_ID_SIZE);
- if (rc < 0) {
- dbg("bind driver at %s failed", intf_busid);
- failed = 1;
- }
+ return -1;
}
- if (!failed)
- ret = 0;
-
-err_close_busid_dev:
- sysfs_close_device(busid_dev);
-err_close_bind_attr:
- sysfs_close_attribute(bind_attr);
-
- return ret;
+ return 0;
}
/* buggy driver may cause dead lock */
static int unbind_other(char *busid)
{
- char bus_type[] = "usb";
- char intf_busid[SYSFS_BUS_ID_SIZE];
- struct sysfs_device *busid_dev;
- struct sysfs_device *intf_dev;
- struct sysfs_driver *intf_drv;
- struct sysfs_attribute *unbind_attr;
- struct sysfs_attribute *bConfValue;
- struct sysfs_attribute *bDevClass;
- struct sysfs_attribute *bNumIntfs;
- int i, rc;
enum unbind_status status = UNBIND_ST_OK;
- busid_dev = sysfs_open_device(bus_type, busid);
- if (!busid_dev) {
- dbg("sysfs_open_device %s failed: %s", busid, strerror(errno));
- return -1;
+ char attr_name[] = "unbind";
+ char unbind_attr_path[SYSFS_PATH_MAX];
+ int rc = -1;
+
+ struct udev *udev;
+ struct udev_device *dev;
+ const char *driver;
+ const char *bDevClass;
+
+ /* Create libudev context. */
+ udev = udev_new();
+
+ /* Get the device. */
+ dev = udev_device_new_from_subsystem_sysname(udev, "usb", busid);
+ if (!dev) {
+ dbg("unable to find device with bus ID %s", busid);
+ goto err_close_busid_dev;
}
- bConfValue = sysfs_get_device_attr(busid_dev, "bConfigurationValue");
- bDevClass = sysfs_get_device_attr(busid_dev, "bDeviceClass");
- bNumIntfs = sysfs_get_device_attr(busid_dev, "bNumInterfaces");
- if (!bConfValue || !bDevClass || !bNumIntfs) {
- dbg("problem getting device attributes: %s",
- strerror(errno));
+ /* Check what kind of device it is. */
+ bDevClass = udev_device_get_sysattr_value(dev, "bDeviceClass");
+ if (!bDevClass) {
+ dbg("unable to get bDevClass device attribute");
goto err_close_busid_dev;
}
- if (!strncmp(bDevClass->value, "09", bDevClass->len)) {
+ if (!strncmp(bDevClass, "09", strlen(bDevClass))) {
dbg("skip unbinding of hub");
goto err_close_busid_dev;
}
- for (i = 0; i < atoi(bNumIntfs->value); i++) {
- snprintf(intf_busid, SYSFS_BUS_ID_SIZE, "%s:%.1s.%d", busid,
- bConfValue->value, i);
- intf_dev = sysfs_open_device(bus_type, intf_busid);
- if (!intf_dev) {
- dbg("could not open interface device: %s",
- strerror(errno));
- goto err_close_busid_dev;
- }
-
- dbg("%s -> %s", intf_dev->name, intf_dev->driver_name);
-
- if (!strncmp("unknown", intf_dev->driver_name, SYSFS_NAME_LEN))
- /* unbound interface */
- continue;
-
- if (!strncmp(USBIP_HOST_DRV_NAME, intf_dev->driver_name,
- SYSFS_NAME_LEN)) {
- /* already bound to usbip-host */
- status = UNBIND_ST_USBIP_HOST;
- continue;
- }
-
- /* unbinding */
- intf_drv = sysfs_open_driver(bus_type, intf_dev->driver_name);
- if (!intf_drv) {
- dbg("could not open interface driver on %s: %s",
- intf_dev->name, strerror(errno));
- goto err_close_intf_dev;
- }
+ /* Get the device driver. */
+ driver = udev_device_get_driver(dev);
+ if (!driver) {
+ /* No driver bound to this device. */
+ goto out;
+ }
- unbind_attr = sysfs_get_driver_attr(intf_drv, "unbind");
- if (!unbind_attr) {
- dbg("problem getting interface driver attribute: %s",
- strerror(errno));
- goto err_close_intf_drv;
- }
+ if (!strncmp(USBIP_HOST_DRV_NAME, driver,
+ strlen(USBIP_HOST_DRV_NAME))) {
+ /* Already bound to usbip-host. */
+ status = UNBIND_ST_USBIP_HOST;
+ goto out;
+ }
- rc = sysfs_write_attribute(unbind_attr, intf_dev->bus_id,
- SYSFS_BUS_ID_SIZE);
- if (rc < 0) {
- /* NOTE: why keep unbinding other interfaces? */
- dbg("unbind driver at %s failed", intf_dev->bus_id);
- status = UNBIND_ST_FAILED;
- }
+ /* Unbind device from driver. */
+ snprintf(unbind_attr_path, sizeof(unbind_attr_path), "%s/%s/%s/%s/%s/%s",
+ SYSFS_MNT_PATH, SYSFS_BUS_NAME, SYSFS_BUS_TYPE,
+ SYSFS_DRIVERS_NAME, driver, attr_name);
- sysfs_close_driver(intf_drv);
- sysfs_close_device(intf_dev);
+ rc = write_sysfs_attribute(unbind_attr_path, busid, strlen(busid));
+ if (rc < 0) {
+ err("error unbinding device %s from driver", busid);
+ goto err_close_busid_dev;
}
goto out;
-err_close_intf_drv:
- sysfs_close_driver(intf_drv);
-err_close_intf_dev:
- sysfs_close_device(intf_dev);
err_close_busid_dev:
status = UNBIND_ST_FAILED;
out:
- sysfs_close_device(busid_dev);
+ udev_device_unref(dev);
+ udev_unref(udev);
return status;
}
@@ -216,6 +142,17 @@ out:
static int bind_device(char *busid)
{
int rc;
+ struct udev *udev;
+ struct udev_device *dev;
+
+ /* Check whether the device with this bus ID exists. */
+ udev = udev_new();
+ dev = udev_device_new_from_subsystem_sysname(udev, "usb", busid);
+ if (!dev) {
+ err("device with the specified bus ID does not exist");
+ return -1;
+ }
+ udev_unref(udev);
rc = unbind_other(busid);
if (rc == UNBIND_ST_FAILED) {
@@ -240,7 +177,7 @@ static int bind_device(char *busid)
return -1;
}
- printf("bind device on busid %s: complete\n", busid);
+ info("bind device on busid %s: complete", busid);
return 0;
}
diff --git a/drivers/staging/usbip/userspace/src/usbip_detach.c b/drivers/staging/usbip/userspace/src/usbip_detach.c
index 13308df613a2..05c6d15856eb 100644
--- a/drivers/staging/usbip/userspace/src/usbip_detach.c
+++ b/drivers/staging/usbip/userspace/src/usbip_detach.c
@@ -16,8 +16,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <sysfs/libsysfs.h>
-
#include <ctype.h>
#include <limits.h>
#include <stdint.h>
diff --git a/drivers/staging/usbip/userspace/src/usbip_list.c b/drivers/staging/usbip/userspace/src/usbip_list.c
index 237e099337a1..d5ce34a410e7 100644
--- a/drivers/staging/usbip/userspace/src/usbip_list.c
+++ b/drivers/staging/usbip/userspace/src/usbip_list.c
@@ -17,7 +17,7 @@
*/
#include <sys/types.h>
-#include <sysfs/libsysfs.h>
+#include <libudev.h>
#include <errno.h>
#include <stdbool.h>
@@ -54,7 +54,7 @@ static int get_exported_devices(char *host, int sockfd)
struct usbip_usb_device udev;
struct usbip_usb_interface uintf;
unsigned int i;
- int j, rc;
+ int rc, j;
rc = usbip_net_send_op_common(sockfd, OP_REQ_DEVLIST, 0);
if (rc < 0) {
@@ -107,19 +107,20 @@ static int get_exported_devices(char *host, int sockfd)
for (j = 0; j < udev.bNumInterfaces; j++) {
rc = usbip_net_recv(sockfd, &uintf, sizeof(uintf));
if (rc < 0) {
- dbg("usbip_net_recv failed: usbip_usb_intf[%d]",
- j);
+ err("usbip_net_recv failed: usbip_usb_intf[%d]",
+ j);
return -1;
}
usbip_net_pack_usb_interface(0, &uintf);
usbip_names_get_class(class_name, sizeof(class_name),
- uintf.bInterfaceClass,
- uintf.bInterfaceSubClass,
- uintf.bInterfaceProtocol);
+ uintf.bInterfaceClass,
+ uintf.bInterfaceSubClass,
+ uintf.bInterfaceProtocol);
printf("%11s: %2d - %s\n", "", j, class_name);
}
+
printf("\n");
}
@@ -150,8 +151,8 @@ static int list_exported_devices(char *host)
return 0;
}
-static void print_device(char *busid, char *vendor, char *product,
- bool parsable)
+static void print_device(const char *busid, const char *vendor,
+ const char *product, bool parsable)
{
if (parsable)
printf("busid=%s#usbid=%.4s:%.4s#", busid, vendor, product);
@@ -165,106 +166,73 @@ static void print_product_name(char *product_name, bool parsable)
printf(" %s\n", product_name);
}
-static void print_interface(char *busid, char *driver, bool parsable)
-{
- if (parsable)
- printf("%s=%s#", busid, driver);
- else
- printf("%9s%s -> %s\n", "", busid, driver);
-}
-
-static int is_device(void *x)
-{
- struct sysfs_attribute *devpath;
- struct sysfs_device *dev = x;
- int ret = 0;
-
- devpath = sysfs_get_device_attr(dev, "devpath");
- if (devpath && *devpath->value != '0')
- ret = 1;
-
- return ret;
-}
-
-static int devcmp(void *a, void *b)
-{
- return strcmp(a, b);
-}
-
static int list_devices(bool parsable)
{
- char bus_type[] = "usb";
- char busid[SYSFS_BUS_ID_SIZE];
+ struct udev *udev;
+ struct udev_enumerate *enumerate;
+ struct udev_list_entry *devices, *dev_list_entry;
+ struct udev_device *dev;
+ const char *path;
+ const char *idVendor;
+ const char *idProduct;
+ const char *bConfValue;
+ const char *bNumIntfs;
+ const char *busid;
char product_name[128];
- struct sysfs_bus *ubus;
- struct sysfs_device *dev;
- struct sysfs_device *intf;
- struct sysfs_attribute *idVendor;
- struct sysfs_attribute *idProduct;
- struct sysfs_attribute *bConfValue;
- struct sysfs_attribute *bNumIntfs;
- struct dlist *devlist;
- int i;
int ret = -1;
- ubus = sysfs_open_bus(bus_type);
- if (!ubus) {
- err("could not open %s bus: %s", bus_type, strerror(errno));
- return -1;
- }
-
- devlist = sysfs_get_bus_devices(ubus);
- if (!devlist) {
- err("could not get %s bus devices: %s", bus_type,
- strerror(errno));
- goto err_out;
- }
-
- /* remove interfaces and root hubs from device list */
- dlist_filter_sort(devlist, is_device, devcmp);
-
- if (!parsable) {
- printf("Local USB devices\n");
- printf("=================\n");
- }
- dlist_for_each_data(devlist, dev, struct sysfs_device) {
- idVendor = sysfs_get_device_attr(dev, "idVendor");
- idProduct = sysfs_get_device_attr(dev, "idProduct");
- bConfValue = sysfs_get_device_attr(dev, "bConfigurationValue");
- bNumIntfs = sysfs_get_device_attr(dev, "bNumInterfaces");
+ /* Create libudev context. */
+ udev = udev_new();
+
+ /* Create libudev device enumeration. */
+ enumerate = udev_enumerate_new(udev);
+
+ /* Take only USB devices that are not hubs and do not have
+ * the bInterfaceNumber attribute, i.e. are not interfaces.
+ */
+ udev_enumerate_add_match_subsystem(enumerate, "usb");
+ udev_enumerate_add_nomatch_sysattr(enumerate, "bDeviceClass", "09");
+ udev_enumerate_add_nomatch_sysattr(enumerate, "bInterfaceNumber", NULL);
+ udev_enumerate_scan_devices(enumerate);
+
+ devices = udev_enumerate_get_list_entry(enumerate);
+
+ /* Show information about each device. */
+ udev_list_entry_foreach(dev_list_entry, devices) {
+ path = udev_list_entry_get_name(dev_list_entry);
+ dev = udev_device_new_from_syspath(udev, path);
+
+ /* Get device information. */
+ idVendor = udev_device_get_sysattr_value(dev, "idVendor");
+ idProduct = udev_device_get_sysattr_value(dev, "idProduct");
+ bConfValue = udev_device_get_sysattr_value(dev, "bConfigurationValue");
+ bNumIntfs = udev_device_get_sysattr_value(dev, "bNumInterfaces");
+ busid = udev_device_get_sysname(dev);
if (!idVendor || !idProduct || !bConfValue || !bNumIntfs) {
err("problem getting device attributes: %s",
strerror(errno));
goto err_out;
}
- /* get product name */
+ /* Get product name. */
usbip_names_get_product(product_name, sizeof(product_name),
- strtol(idVendor->value, NULL, 16),
- strtol(idProduct->value, NULL, 16));
- print_device(dev->bus_id, idVendor->value, idProduct->value,
- parsable);
+ strtol(idVendor, NULL, 16),
+ strtol(idProduct, NULL, 16));
+
+ /* Print information. */
+ print_device(busid, idVendor, idProduct, parsable);
print_product_name(product_name, parsable);
- for (i = 0; i < atoi(bNumIntfs->value); i++) {
- snprintf(busid, sizeof(busid), "%s:%.1s.%d",
- dev->bus_id, bConfValue->value, i);
- intf = sysfs_open_device(bus_type, busid);
- if (!intf) {
- err("could not open device interface: %s",
- strerror(errno));
- goto err_out;
- }
- print_interface(busid, intf->driver_name, parsable);
- sysfs_close_device(intf);
- }
printf("\n");
+
+ udev_device_unref(dev);
}
ret = 0;
err_out:
- sysfs_close_bus(ubus);
+ udev_enumerate_unref(enumerate);
+ udev_unref(udev);
return ret;
}
diff --git a/drivers/staging/usbip/userspace/src/usbip_network.h b/drivers/staging/usbip/userspace/src/usbip_network.h
index f19ae19799b4..c1e875cf1078 100644
--- a/drivers/staging/usbip/userspace/src/usbip_network.h
+++ b/drivers/staging/usbip/userspace/src/usbip_network.h
@@ -10,7 +10,6 @@
#endif
#include <sys/types.h>
-#include <sysfs/libsysfs.h>
#include <stdint.h>
diff --git a/drivers/staging/usbip/userspace/src/usbip_unbind.c b/drivers/staging/usbip/userspace/src/usbip_unbind.c
index d5a9ab6af2a6..a4a496c9cbaf 100644
--- a/drivers/staging/usbip/userspace/src/usbip_unbind.c
+++ b/drivers/staging/usbip/userspace/src/usbip_unbind.c
@@ -16,7 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <sysfs/libsysfs.h>
+#include <libudev.h>
#include <errno.h>
#include <stdio.h>
@@ -27,6 +27,7 @@
#include "usbip_common.h"
#include "utils.h"
#include "usbip.h"
+#include "sysfs_utils.h"
static const char usbip_unbind_usage_string[] =
"usbip unbind <args>\n"
@@ -41,115 +42,69 @@ void usbip_unbind_usage(void)
static int unbind_device(char *busid)
{
char bus_type[] = "usb";
- struct sysfs_driver *usbip_host_drv;
- struct sysfs_device *dev;
- struct dlist *devlist;
- int verified = 0;
int rc, ret = -1;
- char attr_name[] = "bConfigurationValue";
- char sysfs_mntpath[SYSFS_PATH_MAX];
- char busid_attr_path[SYSFS_PATH_MAX];
- struct sysfs_attribute *busid_attr;
- char *val = NULL;
- int len;
-
- /* verify the busid device is using usbip-host */
- usbip_host_drv = sysfs_open_driver(bus_type, USBIP_HOST_DRV_NAME);
- if (!usbip_host_drv) {
- err("could not open %s driver: %s", USBIP_HOST_DRV_NAME,
- strerror(errno));
- return -1;
- }
+ char unbind_attr_name[] = "unbind";
+ char unbind_attr_path[SYSFS_PATH_MAX];
+ char rebind_attr_name[] = "rebind";
+ char rebind_attr_path[SYSFS_PATH_MAX];
- devlist = sysfs_get_driver_devices(usbip_host_drv);
- if (!devlist) {
- err("%s is not in use by any devices", USBIP_HOST_DRV_NAME);
- goto err_close_usbip_host_drv;
- }
+ struct udev *udev;
+ struct udev_device *dev;
+ const char *driver;
- dlist_for_each_data(devlist, dev, struct sysfs_device) {
- if (!strncmp(busid, dev->name, strlen(busid)) &&
- !strncmp(dev->driver_name, USBIP_HOST_DRV_NAME,
- strlen(USBIP_HOST_DRV_NAME))) {
- verified = 1;
- break;
- }
- }
+ /* Create libudev context. */
+ udev = udev_new();
- if (!verified) {
- err("device on busid %s is not using %s", busid,
- USBIP_HOST_DRV_NAME);
- goto err_close_usbip_host_drv;
+ /* Check whether the device with this bus ID exists. */
+ dev = udev_device_new_from_subsystem_sysname(udev, "usb", busid);
+ if (!dev) {
+ err("device with the specified bus ID does not exist");
+ goto err_close_udev;
}
- /*
- * NOTE: A read and write of an attribute value of the device busid
- * refers to must be done to start probing. That way a rebind of the
- * default driver for the device occurs.
- *
- * This seems very hackish and adds a lot of pointless code. I think it
- * should be done in the kernel by the driver after del_match_busid is
- * finished!
- */
-
- rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
- if (rc < 0) {
- err("sysfs must be mounted: %s", strerror(errno));
- return -1;
+ /* Check whether the device is using usbip-host driver. */
+ driver = udev_device_get_driver(dev);
+ if (!driver || strcmp(driver, "usbip-host")) {
+ err("device is not bound to usbip-host driver");
+ goto err_close_udev;
}
- snprintf(busid_attr_path, sizeof(busid_attr_path), "%s/%s/%s/%s/%s/%s",
- sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DEVICES_NAME,
- busid, attr_name);
+ /* Unbind device from driver. */
+ snprintf(unbind_attr_path, sizeof(unbind_attr_path), "%s/%s/%s/%s/%s/%s",
+ SYSFS_MNT_PATH, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME,
+ USBIP_HOST_DRV_NAME, unbind_attr_name);
- /* read a device attribute */
- busid_attr = sysfs_open_attribute(busid_attr_path);
- if (!busid_attr) {
- err("could not open %s/%s: %s", busid, attr_name,
- strerror(errno));
- return -1;
- }
-
- if (sysfs_read_attribute(busid_attr) < 0) {
- err("problem reading attribute: %s", strerror(errno));
- goto err_out;
+ rc = write_sysfs_attribute(unbind_attr_path, busid, strlen(busid));
+ if (rc < 0) {
+ err("error unbinding device %s from driver", busid);
+ goto err_close_udev;
}
- len = busid_attr->len;
- val = malloc(len);
- *val = *busid_attr->value;
- sysfs_close_attribute(busid_attr);
-
- /* notify driver of unbind */
+ /* Notify driver of unbind. */
rc = modify_match_busid(busid, 0);
if (rc < 0) {
err("unable to unbind device on %s", busid);
- goto err_out;
+ goto err_close_udev;
}
- /* write the device attribute */
- busid_attr = sysfs_open_attribute(busid_attr_path);
- if (!busid_attr) {
- err("could not open %s/%s: %s", busid, attr_name,
- strerror(errno));
- return -1;
- }
+ /* Trigger new probing. */
+ snprintf(rebind_attr_path, sizeof(unbind_attr_path), "%s/%s/%s/%s/%s/%s",
+ SYSFS_MNT_PATH, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME,
+ USBIP_HOST_DRV_NAME, rebind_attr_name);
- rc = sysfs_write_attribute(busid_attr, val, len);
+ rc = write_sysfs_attribute(rebind_attr_path, busid, strlen(busid));
if (rc < 0) {
- err("problem writing attribute: %s", strerror(errno));
- goto err_out;
+ err("error rebinding");
+ goto err_close_udev;
}
- sysfs_close_attribute(busid_attr);
ret = 0;
- printf("unbind device on busid %s: complete\n", busid);
+ info("unbind device on busid %s: complete", busid);
-err_out:
- free(val);
-err_close_usbip_host_drv:
- sysfs_close_driver(usbip_host_drv);
+err_close_udev:
+ udev_device_unref(dev);
+ udev_unref(udev);
return ret;
}
diff --git a/drivers/staging/usbip/userspace/src/usbipd.c b/drivers/staging/usbip/userspace/src/usbipd.c
index 7980f8b5517b..2cae4ce48d11 100644
--- a/drivers/staging/usbip/userspace/src/usbipd.c
+++ b/drivers/staging/usbip/userspace/src/usbipd.c
@@ -43,6 +43,7 @@
#include "usbip_host_driver.h"
#include "usbip_common.h"
#include "usbip_network.h"
+#include "list.h"
#undef PROGNAME
#define PROGNAME "usbipd"
@@ -93,6 +94,7 @@ static int recv_request_import(int sockfd)
struct op_common reply;
struct usbip_exported_device *edev;
struct usbip_usb_device pdu_udev;
+ struct list_head *i;
int found = 0;
int error = 0;
int rc;
@@ -107,8 +109,8 @@ static int recv_request_import(int sockfd)
}
PACK_OP_IMPORT_REQUEST(0, &req);
- dlist_for_each_data(host_driver->edev_list, edev,
- struct usbip_exported_device) {
+ list_for_each(i, &host_driver->edev_list) {
+ edev = list_entry(i, struct usbip_exported_device, node);
if (!strncmp(req.busid, edev->udev.busid, SYSFS_BUS_ID_SIZE)) {
info("found requested device: %s", req.busid);
found = 1;
@@ -161,13 +163,12 @@ static int send_reply_devlist(int connfd)
struct usbip_usb_device pdu_udev;
struct usbip_usb_interface pdu_uinf;
struct op_devlist_reply reply;
- int i;
- int rc;
+ struct list_head *j;
+ int rc, i;
reply.ndev = 0;
/* number of exported devices */
- dlist_for_each_data(host_driver->edev_list, edev,
- struct usbip_exported_device) {
+ list_for_each(j, &host_driver->edev_list) {
reply.ndev += 1;
}
info("exportable devices: %d", reply.ndev);
@@ -185,8 +186,8 @@ static int send_reply_devlist(int connfd)
return -1;
}
- dlist_for_each_data(host_driver->edev_list, edev,
- struct usbip_exported_device) {
+ list_for_each(j, &host_driver->edev_list) {
+ edev = list_entry(j, struct usbip_exported_device, node);
dump_usb_device(&edev->udev);
memcpy(&pdu_udev, &edev->udev, sizeof(pdu_udev));
usbip_net_pack_usb_device(1, &pdu_udev);
@@ -203,9 +204,9 @@ static int send_reply_devlist(int connfd)
usbip_net_pack_usb_interface(1, &pdu_uinf);
rc = usbip_net_send(connfd, &pdu_uinf,
- sizeof(pdu_uinf));
+ sizeof(pdu_uinf));
if (rc < 0) {
- dbg("usbip_net_send failed: pdu_uinf");
+ err("usbip_net_send failed: pdu_uinf");
return -1;
}
}
diff --git a/drivers/staging/usbip/userspace/src/utils.c b/drivers/staging/usbip/userspace/src/utils.c
index 2d4966e6289c..2b3d6d235015 100644
--- a/drivers/staging/usbip/userspace/src/utils.c
+++ b/drivers/staging/usbip/userspace/src/utils.c
@@ -16,61 +16,37 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <sysfs/libsysfs.h>
-
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include "usbip_common.h"
#include "utils.h"
+#include "sysfs_utils.h"
int modify_match_busid(char *busid, int add)
{
- char bus_type[] = "usb";
char attr_name[] = "match_busid";
- char buff[SYSFS_BUS_ID_SIZE + 4];
- char sysfs_mntpath[SYSFS_PATH_MAX];
+ char command[SYSFS_BUS_ID_SIZE + 4];
char match_busid_attr_path[SYSFS_PATH_MAX];
- struct sysfs_attribute *match_busid_attr;
- int rc, ret = 0;
-
- if (strnlen(busid, SYSFS_BUS_ID_SIZE) > SYSFS_BUS_ID_SIZE - 1) {
- dbg("busid is too long");
- return -1;
- }
-
- rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
- if (rc < 0) {
- err("sysfs must be mounted: %s", strerror(errno));
- return -1;
- }
+ int rc;
snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
- "%s/%s/%s/%s/%s/%s", sysfs_mntpath, SYSFS_BUS_NAME, bus_type,
- SYSFS_DRIVERS_NAME, USBIP_HOST_DRV_NAME, attr_name);
-
- match_busid_attr = sysfs_open_attribute(match_busid_attr_path);
- if (!match_busid_attr) {
- dbg("problem getting match_busid attribute: %s",
- strerror(errno));
- return -1;
- }
+ "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
+ SYSFS_BUS_TYPE, SYSFS_DRIVERS_NAME, USBIP_HOST_DRV_NAME,
+ attr_name);
if (add)
- snprintf(buff, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
+ snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
else
- snprintf(buff, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
+ snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
- dbg("write \"%s\" to %s", buff, match_busid_attr->path);
-
- rc = sysfs_write_attribute(match_busid_attr, buff, sizeof(buff));
+ rc = write_sysfs_attribute(match_busid_attr_path, command,
+ sizeof(command));
if (rc < 0) {
dbg("failed to write match_busid: %s", strerror(errno));
- ret = -1;
+ return -1;
}
- sysfs_close_attribute(match_busid_attr);
-
- return ret;
+ return 0;
}
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 99dd2b1656c9..70e17551943d 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -205,8 +205,6 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
}
}
- pr_info("changed %d\n", changed);
-
if ((hcd->state == HC_STATE_SUSPENDED) && (changed == 1))
usb_hcd_resume_root_hub(hcd);
@@ -273,14 +271,14 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
break;
case USB_PORT_FEAT_POWER:
- usbip_dbg_vhci_rh(" ClearPortFeature: "
- "USB_PORT_FEAT_POWER\n");
+ usbip_dbg_vhci_rh(
+ " ClearPortFeature: USB_PORT_FEAT_POWER\n");
dum->port_status[rhport] = 0;
dum->resuming = 0;
break;
case USB_PORT_FEAT_C_RESET:
- usbip_dbg_vhci_rh(" ClearPortFeature: "
- "USB_PORT_FEAT_C_RESET\n");
+ usbip_dbg_vhci_rh(
+ " ClearPortFeature: USB_PORT_FEAT_C_RESET\n");
switch (dum->vdev[rhport].speed) {
case USB_SPEED_HIGH:
dum->port_status[rhport] |=
@@ -339,16 +337,17 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (dum->vdev[rhport].ud.status ==
VDEV_ST_NOTASSIGNED) {
- usbip_dbg_vhci_rh(" enable rhport %d "
- "(status %u)\n",
- rhport,
- dum->vdev[rhport].ud.status);
+ usbip_dbg_vhci_rh(
+ " enable rhport %d (status %u)\n",
+ rhport,
+ dum->vdev[rhport].ud.status);
dum->port_status[rhport] |=
USB_PORT_STAT_ENABLE;
}
}
((__le16 *) buf)[0] = cpu_to_le16(dum->port_status[rhport]);
- ((__le16 *) buf)[1] = cpu_to_le16(dum->port_status[rhport] >> 16);
+ ((__le16 *) buf)[1] =
+ cpu_to_le16(dum->port_status[rhport] >> 16);
usbip_dbg_vhci_rh(" GetPortStatus bye %x %x\n", ((u16 *)buf)[0],
((u16 *)buf)[1]);
@@ -360,12 +359,12 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case SetPortFeature:
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- usbip_dbg_vhci_rh(" SetPortFeature: "
- "USB_PORT_FEAT_SUSPEND\n");
+ usbip_dbg_vhci_rh(
+ " SetPortFeature: USB_PORT_FEAT_SUSPEND\n");
break;
case USB_PORT_FEAT_RESET:
- usbip_dbg_vhci_rh(" SetPortFeature: "
- "USB_PORT_FEAT_RESET\n");
+ usbip_dbg_vhci_rh(
+ " SetPortFeature: USB_PORT_FEAT_RESET\n");
/* if it's already running, disconnect first */
if (dum->port_status[rhport] & USB_PORT_STAT_ENABLE) {
dum->port_status[rhport] &=
@@ -537,9 +536,8 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
case USB_REQ_GET_DESCRIPTOR:
if (ctrlreq->wValue == cpu_to_le16(USB_DT_DEVICE << 8))
- usbip_dbg_vhci_hc("Not yet?: "
- "Get_Descriptor to device 0 "
- "(get max pipe size)\n");
+ usbip_dbg_vhci_hc(
+ "Not yet?:Get_Descriptor to device 0 (get max pipe size)\n");
if (vdev->udev)
usb_put_dev(vdev->udev);
@@ -548,8 +546,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
default:
/* NOT REACHED */
- dev_err(dev, "invalid request to devnum 0 bRequest %u, "
- "wValue %u\n", ctrlreq->bRequest,
+ dev_err(dev,
+ "invalid request to devnum 0 bRequest %u, wValue %u\n",
+ ctrlreq->bRequest,
ctrlreq->wValue);
ret = -EINVAL;
goto no_need_xmit;
@@ -1070,8 +1069,9 @@ static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
spin_unlock(&the_controller->lock);
if (connected > 0) {
- dev_info(&pdev->dev, "We have %d active connection%s. Do not "
- "suspend.\n", connected, (connected == 1 ? "" : "s"));
+ dev_info(&pdev->dev,
+ "We have %d active connection%s. Do not suspend.\n",
+ connected, (connected == 1 ? "" : "s"));
ret = -EBUSY;
} else {
dev_info(&pdev->dev, "suspend vhci_hcd");
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index baba127081b3..47bddcdde0a6 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -47,8 +47,8 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
* up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
* port number and its peer IP address.
*/
- out += sprintf(out, "prt sta spd bus dev socket "
- "local_busid\n");
+ out += sprintf(out,
+ "prt sta spd bus dev socket local_busid\n");
for (i = 0; i < VHCI_NPORTS; i++) {
struct vhci_device *vdev = port_to_vdev(i);
@@ -114,7 +114,8 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr,
int err;
__u32 rhport = 0;
- sscanf(buf, "%u", &rhport);
+ if (sscanf(buf, "%u", &rhport) != 1)
+ return -EINVAL;
/* check rhport */
if (rhport >= VHCI_NPORTS) {
@@ -183,7 +184,8 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
* @devid: unique device identifier in a remote host
* @speed: usb device speed in a remote host
*/
- sscanf(buf, "%u %u %u %u", &rhport, &sockfd, &devid, &speed);
+ if (sscanf(buf, "%u %u %u %u", &rhport, &sockfd, &devid, &speed) != 1)
+ return -EINVAL;
usbip_dbg_vhci_sysfs("rhport(%u) sockfd(%u) devid(%u) speed(%u)\n",
rhport, sockfd, devid, speed);
@@ -215,8 +217,9 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- dev_info(dev, "rhport(%u) sockfd(%d) devid(%u) speed(%u)\n",
- rhport, sockfd, devid, speed);
+ dev_info(dev,
+ "rhport(%u) sockfd(%d) devid(%u) speed(%u) speed_str(%s)\n",
+ rhport, sockfd, devid, speed, usb_speed_string(speed));
vdev->devid = devid;
vdev->speed = speed;
diff --git a/drivers/staging/vt6655/80211mgr.c b/drivers/staging/vt6655/80211mgr.c
index 7949d58ad7d1..9aa2e46c4a5d 100644
--- a/drivers/staging/vt6655/80211mgr.c
+++ b/drivers/staging/vt6655/80211mgr.c
@@ -91,12 +91,15 @@ vMgrEncodeBeacon(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_CAPINFO);
+ pFrame->pqwTimestamp = (PQWORD)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_TS);
+ pFrame->pwBeaconInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_BCN_INT);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_CAPINFO);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_BEACON_OFF_SSID;
@@ -124,16 +127,20 @@ vMgrDecodeBeacon(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_CAPINFO);
+ pFrame->pqwTimestamp = (PQWORD)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_TS);
+ pFrame->pwBeaconInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_BCN_INT);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_CAPINFO);
/* Information elements */
- pItem = (PWLAN_IE)((unsigned char *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)))
- + WLAN_BEACON_OFF_SSID);
+ pItem = (PWLAN_IE)((unsigned char *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))) +
+ WLAN_BEACON_OFF_SSID);
while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
switch (pItem->byElementID) {
case WLAN_EID_SSID:
@@ -171,7 +178,8 @@ vMgrDecodeBeacon(
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
+ pFrame->pRSNWPA =
+ (PWLAN_IE_RSN_EXT)pItem;
}
break;
@@ -181,7 +189,8 @@ vMgrDecodeBeacon(
break;
case WLAN_EID_EXTSUPP_RATES:
if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pExtSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_COUNTRY: /* 7 */
@@ -191,7 +200,8 @@ vMgrDecodeBeacon(
case WLAN_EID_PWR_CONSTRAINT: /* 32 */
if (pFrame->pIE_PowerConstraint == NULL)
- pFrame->pIE_PowerConstraint = (PWLAN_IE_PW_CONST)pItem;
+ pFrame->pIE_PowerConstraint =
+ (PWLAN_IE_PW_CONST)pItem;
break;
case WLAN_EID_CH_SWITCH: /* 37 */
@@ -210,7 +220,9 @@ vMgrDecodeBeacon(
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unrecognized EID=%dd in beacon decode.\n", pItem->byElementID);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Unrecognized EID=%dd in beacon decode.\n",
+ pItem->byElementID);
break;
}
@@ -282,9 +294,11 @@ vMgrEncodeDisassociation(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_DISASSOC_OFF_REASON);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DISASSOC_OFF_REASON + sizeof(*(pFrame->pwReason));
+ pFrame->pwReason = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_DISASSOC_OFF_REASON);
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DISASSOC_OFF_REASON +
+ sizeof(*(pFrame->pwReason));
return;
}
@@ -308,8 +322,9 @@ vMgrDecodeDisassociation(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_DISASSOC_OFF_REASON);
+ pFrame->pwReason = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_DISASSOC_OFF_REASON);
return;
}
@@ -332,11 +347,14 @@ vMgrEncodeAssocRequest(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCREQ_OFF_LISTEN_INT);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCREQ_OFF_LISTEN_INT + sizeof(*(pFrame->pwListenInterval));
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCREQ_OFF_CAP_INFO);
+ pFrame->pwListenInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCREQ_OFF_LISTEN_INT);
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCREQ_OFF_LISTEN_INT +
+ sizeof(*(pFrame->pwListenInterval));
return;
}
@@ -360,10 +378,12 @@ vMgrDecodeAssocRequest(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCREQ_OFF_LISTEN_INT);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCREQ_OFF_CAP_INFO);
+ pFrame->pwListenInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCREQ_OFF_LISTEN_INT);
/* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -377,7 +397,8 @@ vMgrDecodeAssocRequest(
break;
case WLAN_EID_SUPP_RATES:
if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_RSN:
@@ -387,16 +408,19 @@ vMgrDecodeAssocRequest(
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
+ pFrame->pRSNWPA =
+ (PWLAN_IE_RSN_EXT)pItem;
}
break;
case WLAN_EID_EXTSUPP_RATES:
if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pExtSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unrecognized EID=%dd in assocreq decode.\n",
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Unrecognized EID=%dd in assocreq decode.\n",
pItem->byElementID);
break;
}
@@ -424,14 +448,17 @@ vMgrEncodeAssocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_AID);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCRESP_OFF_AID
- + sizeof(*(pFrame->pwAid));
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_CAP_INFO);
+ pFrame->pwStatus = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_STATUS);
+ pFrame->pwAid = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_AID);
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCRESP_OFF_AID +
+ sizeof(*(pFrame->pwAid));
return;
}
@@ -457,16 +484,20 @@ vMgrDecodeAssocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_AID);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_CAP_INFO);
+ pFrame->pwStatus = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_STATUS);
+ pFrame->pwAid = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_AID);
/* Information elements */
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_SUPP_RATES);
+ pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_SUPP_RATES);
pItem = (PWLAN_IE)(pFrame->pSuppRates);
pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
@@ -474,7 +505,9 @@ vMgrDecodeAssocResponse(
if ((((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) &&
(pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pFrame->pExtSuppRates=[%p].\n", pItem);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "pFrame->pExtSuppRates=[%p].\n",
+ pItem);
} else {
pFrame->pExtSuppRates = NULL;
}
@@ -500,13 +533,17 @@ vMgrEncodeReassocRequest(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_LISTEN_INT);
- pFrame->pAddrCurrAP = (PIEEE_ADDR)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_CURR_AP);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCREQ_OFF_CURR_AP + sizeof(*(pFrame->pAddrCurrAP));
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCREQ_OFF_CAP_INFO);
+ pFrame->pwListenInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCREQ_OFF_LISTEN_INT);
+ pFrame->pAddrCurrAP = (PIEEE_ADDR)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCREQ_OFF_CURR_AP);
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCREQ_OFF_CURR_AP +
+ sizeof(*(pFrame->pAddrCurrAP));
return;
}
@@ -531,12 +568,15 @@ vMgrDecodeReassocRequest(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_LISTEN_INT);
- pFrame->pAddrCurrAP = (PIEEE_ADDR)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_CURR_AP);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCREQ_OFF_CAP_INFO);
+ pFrame->pwListenInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCREQ_OFF_LISTEN_INT);
+ pFrame->pAddrCurrAP = (PIEEE_ADDR)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCREQ_OFF_CURR_AP);
/* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -550,7 +590,8 @@ vMgrDecodeReassocRequest(
break;
case WLAN_EID_SUPP_RATES:
if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_RSN:
@@ -560,16 +601,19 @@ vMgrDecodeReassocRequest(
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
+ pFrame->pRSNWPA =
+ (PWLAN_IE_RSN_EXT)pItem;
}
break;
case WLAN_EID_EXTSUPP_RATES:
if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pExtSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unrecognized EID=%dd in reassocreq decode.\n",
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Unrecognized EID=%dd in reassocreq decode.\n",
pItem->byElementID);
break;
}
@@ -631,16 +675,20 @@ vMgrDecodeProbeRequest(
case WLAN_EID_SUPP_RATES:
if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_EXTSUPP_RATES:
if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pExtSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Bad EID=%dd in probereq\n", pItem->byElementID);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Bad EID=%dd in probereq\n",
+ pItem->byElementID);
break;
}
@@ -668,15 +716,18 @@ vMgrEncodeProbeResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_CAP_INFO);
+ pFrame->pqwTimestamp = (PQWORD)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_TS);
+ pFrame->pwBeaconInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_BCN_INT);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_CAP_INFO);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_PROBERESP_OFF_CAP_INFO +
- sizeof(*(pFrame->pwCapInfo));
+ sizeof(*(pFrame->pwCapInfo));
return;
}
@@ -702,12 +753,15 @@ vMgrDecodeProbeResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_CAP_INFO);
+ pFrame->pqwTimestamp = (PQWORD)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_TS);
+ pFrame->pwBeaconInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_BCN_INT);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_CAP_INFO);
/* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -721,7 +775,8 @@ vMgrDecodeProbeResponse(
break;
case WLAN_EID_SUPP_RATES:
if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_FH_PARMS:
break;
@@ -735,7 +790,8 @@ vMgrDecodeProbeResponse(
break;
case WLAN_EID_IBSS_PARMS:
if (pFrame->pIBSSParms == NULL)
- pFrame->pIBSSParms = (PWLAN_IE_IBSS_PARMS)pItem;
+ pFrame->pIBSSParms =
+ (PWLAN_IE_IBSS_PARMS)pItem;
break;
case WLAN_EID_RSN:
@@ -745,7 +801,8 @@ vMgrDecodeProbeResponse(
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
+ pFrame->pRSNWPA =
+ (PWLAN_IE_RSN_EXT)pItem;
}
break;
case WLAN_EID_ERP:
@@ -754,7 +811,8 @@ vMgrDecodeProbeResponse(
break;
case WLAN_EID_EXTSUPP_RATES:
if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pExtSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_COUNTRY: /* 7 */
@@ -764,7 +822,8 @@ vMgrDecodeProbeResponse(
case WLAN_EID_PWR_CONSTRAINT: /* 32 */
if (pFrame->pIE_PowerConstraint == NULL)
- pFrame->pIE_PowerConstraint = (PWLAN_IE_PW_CONST)pItem;
+ pFrame->pIE_PowerConstraint =
+ (PWLAN_IE_PW_CONST)pItem;
break;
case WLAN_EID_CH_SWITCH: /* 37 */
@@ -783,7 +842,9 @@ vMgrDecodeProbeResponse(
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Bad EID=%dd in proberesp\n", pItem->byElementID);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Bad EID=%dd in proberesp\n",
+ pItem->byElementID);
break;
}
@@ -811,13 +872,17 @@ vMgrEncodeAuthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwAuthAlgorithm = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_AUTH_ALG);
- pFrame->pwAuthSequence = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_AUTH_SEQ);
- pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_STATUS);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_AUTHEN_OFF_STATUS + sizeof(*(pFrame->pwStatus));
+ pFrame->pwAuthAlgorithm = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_AUTHEN_OFF_AUTH_ALG);
+ pFrame->pwAuthSequence = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_AUTHEN_OFF_AUTH_SEQ);
+ pFrame->pwStatus = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_AUTHEN_OFF_STATUS);
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_AUTHEN_OFF_STATUS +
+ sizeof(*(pFrame->pwStatus));
return;
}
@@ -843,12 +908,15 @@ vMgrDecodeAuthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwAuthAlgorithm = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_AUTH_ALG);
- pFrame->pwAuthSequence = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_AUTH_SEQ);
- pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_STATUS);
+ pFrame->pwAuthAlgorithm = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_AUTHEN_OFF_AUTH_ALG);
+ pFrame->pwAuthSequence = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_AUTHEN_OFF_AUTH_SEQ);
+ pFrame->pwStatus = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_AUTHEN_OFF_STATUS);
/* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -880,9 +948,11 @@ vMgrEncodeDeauthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_DEAUTHEN_OFF_REASON);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DEAUTHEN_OFF_REASON + sizeof(*(pFrame->pwReason));
+ pFrame->pwReason = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_DEAUTHEN_OFF_REASON);
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DEAUTHEN_OFF_REASON +
+ sizeof(*(pFrame->pwReason));
return;
}
@@ -906,8 +976,9 @@ vMgrDecodeDeauthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_DEAUTHEN_OFF_REASON);
+ pFrame->pwReason = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_DEAUTHEN_OFF_REASON);
return;
}
@@ -931,14 +1002,18 @@ vMgrEncodeReassocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_AID);
-
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCRESP_OFF_AID + sizeof(*(pFrame->pwAid));
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_CAP_INFO);
+ pFrame->pwStatus = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_STATUS);
+ pFrame->pwAid = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_AID);
+
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCRESP_OFF_AID +
+ sizeof(*(pFrame->pwAid));
return;
}
@@ -964,16 +1039,20 @@ vMgrDecodeReassocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_AID);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_CAP_INFO);
+ pFrame->pwStatus = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_STATUS);
+ pFrame->pwAid = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_AID);
/* Information elements */
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_SUPP_RATES);
+ pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_SUPP_RATES);
pItem = (PWLAN_IE)(pFrame->pSuppRates);
pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
diff --git a/drivers/staging/vt6655/aes_ccmp.c b/drivers/staging/vt6655/aes_ccmp.c
index 82b0bd1c056a..4ccfe06481fc 100644
--- a/drivers/staging/vt6655/aes_ccmp.c
+++ b/drivers/staging/vt6655/aes_ccmp.c
@@ -46,8 +46,7 @@
* SBOX Table
*/
-static unsigned char sbox_table[256] =
-{
+static unsigned char sbox_table[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
@@ -153,9 +152,8 @@ static void SubBytes(unsigned char *in, unsigned char *out)
{
int i;
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < 16; i++)
out[i] = sbox_table[in[i]];
- }
}
static void ShiftRows(unsigned char *in, unsigned char *out)
@@ -205,8 +203,7 @@ static void AESv128(unsigned char *key, unsigned char *data, unsigned char *ciph
SubBytes(ciphertext, TmpdataA);
ShiftRows(TmpdataA, TmpdataB);
xor_128(TmpdataB, abyRoundKey, ciphertext);
- } else /* round 1 ~ 9 */
- {
+ } else /* round 1 ~ 9 */{
SubBytes(ciphertext, TmpdataA);
ShiftRows(TmpdataA, TmpdataB);
MixColumns(&TmpdataB[0], &TmpdataA[0]);
@@ -311,13 +308,11 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
/* CCMP */
AESv128(pbyRxKey, MIC_IV, abyMIC);
- for (kk = 0; kk < 16; kk++) {
+ for (kk = 0; kk < 16; kk++)
abyTmp[kk] = MIC_HDR1[kk] ^ abyMIC[kk];
- }
AESv128(pbyRxKey, abyTmp, abyMIC);
- for (kk = 0; kk < 16; kk++) {
+ for (kk = 0; kk < 16; kk++)
abyTmp[kk] = MIC_HDR2[kk] ^ abyMIC[kk];
- }
AESv128(pbyRxKey, abyTmp, abyMIC);
wCnt = 1;
@@ -330,12 +325,10 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
AESv128(pbyRxKey, abyCTRPLD, abyTmp);
- for (kk = 0; kk < 16; kk++) {
+ for (kk = 0; kk < 16; kk++)
abyPlainText[kk] = abyTmp[kk] ^ pbyPayload[kk];
- }
- for (kk = 0; kk < 16; kk++) {
+ for (kk = 0; kk < 16; kk++)
abyTmp[kk] = abyMIC[kk] ^ abyPlainText[kk];
- }
AESv128(pbyRxKey, abyTmp, abyMIC);
memcpy(pbyPayload, abyPlainText, 16);
@@ -345,27 +338,23 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
/* last payload */
memcpy(&(abyLastCipher[0]), pbyPayload, jj);
- for (ii = jj; ii < 16; ii++) {
+ for (ii = jj; ii < 16; ii++)
abyLastCipher[ii] = 0x00;
- }
abyCTRPLD[14] = (unsigned char)(wCnt >> 8);
abyCTRPLD[15] = (unsigned char)(wCnt & 0xff);
AESv128(pbyRxKey, abyCTRPLD, abyTmp);
- for (kk = 0; kk < 16; kk++) {
+ for (kk = 0; kk < 16; kk++)
abyPlainText[kk] = abyTmp[kk] ^ abyLastCipher[kk];
- }
memcpy(pbyPayload, abyPlainText, jj);
pbyPayload += jj;
/* for MIC calculation */
- for (ii = jj; ii < 16; ii++) {
+ for (ii = jj; ii < 16; ii++)
abyPlainText[ii] = 0x00;
- }
- for (kk = 0; kk < 16; kk++) {
+ for (kk = 0; kk < 16; kk++)
abyTmp[kk] = abyMIC[kk] ^ abyPlainText[kk];
- }
AESv128(pbyRxKey, abyTmp, abyMIC);
/* =>above is the calculate MIC */
@@ -375,9 +364,8 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
abyCTRPLD[14] = (unsigned char)(wCnt >> 8);
abyCTRPLD[15] = (unsigned char)(wCnt & 0xff);
AESv128(pbyRxKey, abyCTRPLD, abyTmp);
- for (kk = 0; kk < 8; kk++) {
+ for (kk = 0; kk < 8; kk++)
abyTmp[kk] = abyTmp[kk] ^ pbyPayload[kk];
- }
/* =>above is the dec-MIC from packet */
/* -------------------------------------------- */
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 32d808ec502a..f8e41487f856 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -276,9 +276,8 @@ typedef struct tagSRxDesc {
volatile SRDES1 m_rd1RD1;
volatile u32 buff_addr;
volatile u32 next_desc;
- struct tagSRxDesc *next;//4 bytes
- volatile PDEVICE_RD_INFO pRDInfo;//4 bytes
- volatile u32 Reserved[2];//8 bytes
+ struct tagSRxDesc *next __aligned(8);
+ volatile PDEVICE_RD_INFO pRDInfo __aligned(8);
} __attribute__ ((__packed__))
SRxDesc, *PSRxDesc;
typedef const SRxDesc *PCSRxDesc;
@@ -361,9 +360,8 @@ typedef struct tagSTxDesc {
volatile STDES1 m_td1TD1;
volatile u32 buff_addr;
volatile u32 next_desc;
- struct tagSTxDesc *next; //4 bytes
- volatile PDEVICE_TD_INFO pTDInfo;//4 bytes
- volatile u32 Reserved[2];//8 bytes
+ struct tagSTxDesc *next __aligned(8);
+ volatile PDEVICE_TD_INFO pTDInfo __aligned(8);
} __attribute__ ((__packed__))
STxDesc, *PSTxDesc;
typedef const STxDesc *PCSTxDesc;
@@ -375,9 +373,8 @@ typedef struct tagSTxSyncDesc {
volatile u32 next_desc; // pointer to next logical descriptor
volatile unsigned short m_wFIFOCtl;
volatile unsigned short m_wTimeStamp;
- struct tagSTxSyncDesc *next; //4 bytes
- volatile PDEVICE_TD_INFO pTDInfo;//4 bytes
- volatile u32 m_dwReserved2;
+ struct tagSTxSyncDesc *next __aligned(8);
+ volatile PDEVICE_TD_INFO pTDInfo __aligned(8);
} __attribute__ ((__packed__))
STxSyncDesc, *PSTxSyncDesc;
typedef const STxSyncDesc *PCSTxSyncDesc;
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 0a29c9015419..771bf35ae044 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -729,27 +729,27 @@ device_receive_frame(
// Soft MIC
if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) {
if (bIsWEP) {
- unsigned long *pdwMIC_L;
- unsigned long *pdwMIC_R;
- unsigned long dwMIC_Priority;
- unsigned long dwMICKey0 = 0, dwMICKey1 = 0;
- unsigned long dwLocalMIC_L = 0;
- unsigned long dwLocalMIC_R = 0;
+ __le32 *pdwMIC_L;
+ __le32 *pdwMIC_R;
+ __le32 dwMIC_Priority;
+ __le32 dwMICKey0 = 0, dwMICKey1 = 0;
+ u32 dwLocalMIC_L = 0;
+ u32 dwLocalMIC_R = 0;
viawget_wpa_header *wpahdr;
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- dwMICKey0 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[24]));
- dwMICKey1 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[28]));
+ dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[24]));
+ dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[28]));
} else {
if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- dwMICKey0 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[16]));
- dwMICKey1 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[20]));
+ dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[16]));
+ dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[20]));
} else if ((pKey->dwKeyIndex & BIT28) == 0) {
- dwMICKey0 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[16]));
- dwMICKey1 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[20]));
+ dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[16]));
+ dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[20]));
} else {
- dwMICKey0 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[24]));
- dwMICKey1 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[28]));
+ dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[24]));
+ dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[28]));
}
}
@@ -763,14 +763,14 @@ device_receive_frame(
MIC_vGetMIC(&dwLocalMIC_L, &dwLocalMIC_R);
MIC_vUnInit();
- pdwMIC_L = (unsigned long *)(skb->data + 4 + FrameSize);
- pdwMIC_R = (unsigned long *)(skb->data + 4 + FrameSize + 4);
+ pdwMIC_L = (__le32 *)(skb->data + 4 + FrameSize);
+ pdwMIC_R = (__le32 *)(skb->data + 4 + FrameSize + 4);
//DBG_PRN_GRP12(("RxL: %lx, RxR: %lx\n", *pdwMIC_L, *pdwMIC_R));
//DBG_PRN_GRP12(("LocalL: %lx, LocalR: %lx\n", dwLocalMIC_L, dwLocalMIC_R));
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dwMICKey0= %lx,dwMICKey1= %lx \n", dwMICKey0, dwMICKey1);
- if ((cpu_to_le32(*pdwMIC_L) != dwLocalMIC_L) ||
- (cpu_to_le32(*pdwMIC_R) != dwLocalMIC_R) ||
+ if ((le32_to_cpu(*pdwMIC_L) != dwLocalMIC_L) ||
+ (le32_to_cpu(*pdwMIC_R) != dwLocalMIC_R) ||
pDevice->bRxMICFail) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC comparison is fail!\n");
pDevice->bRxMICFail = false;
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index eab3b41f9e3c..78b5809b8304 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -242,7 +242,7 @@ bool KeybSetKey(
if (uKeyLength == WLAN_WEP104_KEYLEN)
pKey->abyKey[15] |= 0x80;
}
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pbyBSSID, (unsigned long *)pKey->abyKey, byLocalID);
+ MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pbyBSSID, (u32 *)pKey->abyKey, byLocalID);
if ((dwKeyIndex & USE_KEYRSC) == 0) {
// RSC set by NIC
@@ -306,7 +306,7 @@ bool KeybSetKey(
if (uKeyLength == WLAN_WEP104_KEYLEN)
pKey->abyKey[15] |= 0x80;
}
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[j].wKeyCtl, j, uKeyIdx, pbyBSSID, (unsigned long *)pKey->abyKey, byLocalID);
+ MACvSetKeyEntry(dwIoBase, pTable->KeyTable[j].wKeyCtl, j, uKeyIdx, pbyBSSID, (u32 *)pKey->abyKey, byLocalID);
if ((dwKeyIndex & USE_KEYRSC) == 0) {
// RSC set by NIC
@@ -670,7 +670,7 @@ bool KeybSetDefaultKey(
if (uKeyLength == WLAN_WEP104_KEYLEN)
pKey->abyKey[15] |= 0x80;
}
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl, MAX_KEY_TABLE-1, uKeyIdx, pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID, (unsigned long *)pKey->abyKey, byLocalID);
+ MACvSetKeyEntry(dwIoBase, pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl, MAX_KEY_TABLE-1, uKeyIdx, pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID, (u32 *)pKey->abyKey, byLocalID);
if ((dwKeyIndex & USE_KEYRSC) == 0) {
// RSC set by NIC
@@ -766,7 +766,7 @@ bool KeybSetAllGroupKey(
if (uKeyLength == WLAN_WEP104_KEYLEN)
pKey->abyKey[15] |= 0x80;
}
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pTable->KeyTable[i].abyBSSID, (unsigned long *)pKey->abyKey, byLocalID);
+ MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pTable->KeyTable[i].abyBSSID, (u32 *)pKey->abyKey, byLocalID);
if ((dwKeyIndex & USE_KEYRSC) == 0) {
// RSC set by NIC
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 21bd8a1126d7..0ec079fa0398 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -1428,10 +1428,10 @@ bool MACbPSWakeup(unsigned long dwIoBase)
*/
void MACvSetKeyEntry(unsigned long dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx,
- unsigned int uKeyIdx, unsigned char *pbyAddr, unsigned long *pdwKey, unsigned char byLocalID)
+ unsigned int uKeyIdx, unsigned char *pbyAddr, u32 *pdwKey, unsigned char byLocalID)
{
unsigned short wOffset;
- unsigned long dwData;
+ u32 dwData;
int ii;
if (byLocalID <= 1)
@@ -1445,7 +1445,7 @@ void MACvSetKeyEntry(unsigned long dwIoBase, unsigned short wKeyCtl, unsigned in
dwData |= wKeyCtl;
dwData <<= 16;
dwData |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5));
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "1. wOffset: %d, Data: %lX, KeyCtl:%X\n", wOffset, dwData, wKeyCtl);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "1. wOffset: %d, Data: %X, KeyCtl:%X\n", wOffset, dwData, wKeyCtl);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
@@ -1460,7 +1460,7 @@ void MACvSetKeyEntry(unsigned long dwIoBase, unsigned short wKeyCtl, unsigned in
dwData |= *(pbyAddr+1);
dwData <<= 8;
dwData |= *(pbyAddr+0);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "2. wOffset: %d, Data: %lX\n", wOffset, dwData);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "2. wOffset: %d, Data: %X\n", wOffset, dwData);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
@@ -1470,7 +1470,7 @@ void MACvSetKeyEntry(unsigned long dwIoBase, unsigned short wKeyCtl, unsigned in
wOffset += (uKeyIdx * 4);
for (ii = 0; ii < 4; ii++) {
// always push 128 bits
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "3.(%d) wOffset: %d, Data: %lX\n", ii, wOffset+ii, *pdwKey);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "3.(%d) wOffset: %d, Data: %X\n", ii, wOffset+ii, *pdwKey);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+ii);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, *pdwKey++);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 3f177f7c581c..4615db05549f 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -1038,7 +1038,7 @@ bool MACbFlushSYNCFifo(unsigned long dwIoBase);
bool MACbPSWakeup(unsigned long dwIoBase);
void MACvSetKeyEntry(unsigned long dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx,
- unsigned int uKeyIdx, unsigned char *pbyAddr, unsigned long *pdwKey, unsigned char byLocalID);
+ unsigned int uKeyIdx, unsigned char *pbyAddr, u32 *pdwKey, unsigned char byLocalID);
void MACvDisableKeyEntry(unsigned long dwIoBase, unsigned int uEntryIdx);
void MACvSetDefaultKeyEntry(unsigned long dwIoBase, unsigned int uKeyLen,
unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID);
diff --git a/drivers/staging/vt6655/michael.c b/drivers/staging/vt6655/michael.c
index 7ea5f7f82f43..ade4c855f20d 100644
--- a/drivers/staging/vt6655/michael.c
+++ b/drivers/staging/vt6655/michael.c
@@ -53,14 +53,14 @@
*/
static void s_vClear(void); // Clear the internal message,
// resets the object to the state just after construction.
-static void s_vSetKey(unsigned long dwK0, unsigned long dwK1);
+static void s_vSetKey(u32 dwK0, u32 dwK1);
static void s_vAppendByte(unsigned char b); // Add a single byte to the internal message
/*--------------------- Export Variables --------------------------*/
-static unsigned long L, R; // Current state
+static u32 L, R; /* Current state */
-static unsigned long K0, K1; // Key
-static unsigned long M; // Message accumulator (single word)
+static u32 K0, K1; /* Key */
+static u32 M; /* Message accumulator (single word) */
static unsigned int nBytesInM; // # bytes in M
/*--------------------- Export Functions --------------------------*/
@@ -98,7 +98,7 @@ static void s_vClear(void)
M = 0;
}
-static void s_vSetKey(unsigned long dwK0, unsigned long dwK1)
+static void s_vSetKey(u32 dwK0, u32 dwK1)
{
// Set the key
K0 = dwK0;
@@ -129,7 +129,7 @@ static void s_vAppendByte(unsigned char b)
}
}
-void MIC_vInit(unsigned long dwK0, unsigned long dwK1)
+void MIC_vInit(u32 dwK0, u32 dwK1)
{
// Set the key
s_vSetKey(dwK0, dwK1);
@@ -155,7 +155,7 @@ void MIC_vAppend(unsigned char *src, unsigned int nBytes)
}
}
-void MIC_vGetMIC(unsigned long *pdwL, unsigned long *pdwR)
+void MIC_vGetMIC(u32 *pdwL, u32 *pdwR)
{
// Append the minimum padding
s_vAppendByte(0x5a);
diff --git a/drivers/staging/vt6655/michael.h b/drivers/staging/vt6655/michael.h
index 387d20623aa3..f6c2c15d9cb6 100644
--- a/drivers/staging/vt6655/michael.h
+++ b/drivers/staging/vt6655/michael.h
@@ -31,11 +31,13 @@
#ifndef __MICHAEL_H__
#define __MICHAEL_H__
+#include <linux/types.h>
+
/*--------------------- Export Definitions -------------------------*/
/*--------------------- Export Types ------------------------------*/
-void MIC_vInit(unsigned long dwK0, unsigned long dwK1);
+void MIC_vInit(u32 dwK0, u32 dwK1);
void MIC_vUnInit(void);
@@ -44,7 +46,7 @@ void MIC_vAppend(unsigned char *src, unsigned int nBytes);
/* Get the MIC result. Destination should accept 8 bytes of result. */
/* This also resets the message to empty. */
-void MIC_vGetMIC(unsigned long *pdwL, unsigned long *pdwR);
+void MIC_vGetMIC(u32 *pdwL, u32 *pdwR);
/*--------------------- Export Macros ------------------------------*/
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 6affd6edac0d..c2653eb4f76c 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1257,11 +1257,11 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
// unsigned char abyTmp[8];
// unsigned long dwCRC;
unsigned int cbMICHDR = 0;
- unsigned long dwMICKey0, dwMICKey1;
- unsigned long dwMIC_Priority;
- unsigned long *pdwMIC_L;
- unsigned long *pdwMIC_R;
- unsigned long dwSafeMIC_L, dwSafeMIC_R; //Fix "Last Frag Size" < "MIC length".
+ u32 dwMICKey0, dwMICKey1;
+ u32 dwMIC_Priority;
+ u32 *pdwMIC_L;
+ u32 *pdwMIC_R;
+ u32 dwSafeMIC_L, dwSafeMIC_R; /* Fix "Last Frag Size" < "MIC length". */
bool bMIC2Frag = false;
unsigned int uMICFragLen = 0;
unsigned int uMACfragNum = 1;
@@ -1434,21 +1434,21 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
//////////////////////////////////////////////////////////////////
if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]);
+ dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
+ dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
} else if ((pTransmitKey->dwKeyIndex & AUTHENTICATOR_KEY) != 0) {
- dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]);
+ dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
+ dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
} else {
- dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[24]);
- dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[28]);
+ dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[24]);
+ dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[28]);
}
// DO Software Michael
MIC_vInit(dwMICKey0, dwMICKey1);
MIC_vAppend((unsigned char *)&(psEthHeader->abyDstAddr[0]), 12);
dwMIC_Priority = 0;
MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC KEY: %X, %X\n", dwMICKey0, dwMICKey1);
}
///////////////////////////////////////////////////////////////////
@@ -1624,10 +1624,10 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
if (bMIC2Frag == false) {
if (uTmpLen != 0)
MIC_vAppend((pbyBuffer + uLength), uTmpLen);
- pdwMIC_L = (unsigned long *)(pbyBuffer + uLength + uTmpLen);
- pdwMIC_R = (unsigned long *)(pbyBuffer + uLength + uTmpLen + 4);
+ pdwMIC_L = (u32 *)(pbyBuffer + uLength + uTmpLen);
+ pdwMIC_R = (u32 *)(pbyBuffer + uLength + uTmpLen + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Last MIC:%lX, %lX\n", *pdwMIC_L, *pdwMIC_R);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Last MIC:%X, %X\n", *pdwMIC_L, *pdwMIC_R);
} else {
if (uMICFragLen >= 4) {
memcpy((pbyBuffer + uLength), ((unsigned char *)&dwSafeMIC_R + (uMICFragLen - 4)),
@@ -1744,8 +1744,8 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
uMICFragLen = cbFragPayloadSize - uTmpLen;
ASSERT(uMICFragLen < cbMIClen);
- pdwMIC_L = (unsigned long *)(pbyBuffer + uLength + uTmpLen);
- pdwMIC_R = (unsigned long *)(pbyBuffer + uLength + uTmpLen + 4);
+ pdwMIC_L = (u32 *)(pbyBuffer + uLength + uTmpLen);
+ pdwMIC_R = (u32 *)(pbyBuffer + uLength + uTmpLen + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
dwSafeMIC_L = *pdwMIC_L;
dwSafeMIC_R = *pdwMIC_R;
@@ -1759,7 +1759,7 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "\n");
*/
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get MIC:%lX, %lX\n", *pdwMIC_L, *pdwMIC_R);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get MIC:%X, %X\n", *pdwMIC_L, *pdwMIC_R);
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Middle frag len: %d\n", uTmpLen);
/*
@@ -1873,8 +1873,8 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFrameBodySize);
- pdwMIC_L = (unsigned long *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize);
- pdwMIC_R = (unsigned long *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize + 4);
+ pdwMIC_L = (u32 *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize);
+ pdwMIC_R = (u32 *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
MIC_vUnInit();
@@ -1887,7 +1887,7 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "uLength: %d, %d\n", uLength, cbFrameBodySize);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderLength, uPadding, cbIVlen);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC:%x, %x\n", *pdwMIC_L, *pdwMIC_R);
/*
for (ii = 0; ii < 8; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", *(((unsigned char *)(pdwMIC_L) + ii)));
@@ -2592,10 +2592,10 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, un
unsigned int uPadding = 0;
unsigned int cbMICHDR = 0;
unsigned int uLength = 0;
- unsigned long dwMICKey0, dwMICKey1;
- unsigned long dwMIC_Priority;
- unsigned long *pdwMIC_L;
- unsigned long *pdwMIC_R;
+ u32 dwMICKey0, dwMICKey1;
+ u32 dwMIC_Priority;
+ u32 *pdwMIC_L;
+ u32 *pdwMIC_R;
unsigned short wTxBufSize;
unsigned int cbMacHdLen;
SEthernetHeader sEthHeader;
@@ -2841,22 +2841,22 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, un
}
if ((pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]);
+ dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
+ dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
// DO Software Michael
MIC_vInit(dwMICKey0, dwMICKey1);
MIC_vAppend((unsigned char *)&(sEthHeader.abyDstAddr[0]), 12);
dwMIC_Priority = 0;
MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "DMA0_tx_8021:MIC KEY: %X, %X\n", dwMICKey0, dwMICKey1);
uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen;
MIC_vAppend((pbyTxBufferAddr + uLength), cbFrameBodySize);
- pdwMIC_L = (unsigned long *)(pbyTxBufferAddr + uLength + cbFrameBodySize);
- pdwMIC_R = (unsigned long *)(pbyTxBufferAddr + uLength + cbFrameBodySize + 4);
+ pdwMIC_L = (u32 *)(pbyTxBufferAddr + uLength + cbFrameBodySize);
+ pdwMIC_R = (u32 *)(pbyTxBufferAddr + uLength + cbFrameBodySize + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
MIC_vUnInit();
@@ -2869,7 +2869,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, un
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "uLength: %d, %d\n", uLength, cbFrameBodySize);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC:%x, %x\n", *pdwMIC_L, *pdwMIC_R);
}
diff --git a/drivers/staging/vt6655/wmgr.c b/drivers/staging/vt6655/wmgr.c
index 5200a2a0ecca..b673bc9b2130 100644
--- a/drivers/staging/vt6655/wmgr.c
+++ b/drivers/staging/vt6655/wmgr.c
@@ -967,10 +967,10 @@ s_vMgrRxAssocResponse(
sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
// decode the frame
vMgrDecodeAssocResponse(&sFrame);
- if ((sFrame.pwCapInfo == 0) ||
- (sFrame.pwStatus == 0) ||
- (sFrame.pwAid == 0) ||
- (sFrame.pSuppRates == 0)) {
+ if ((sFrame.pwCapInfo == NULL) ||
+ (sFrame.pwStatus == NULL) ||
+ (sFrame.pwAid == NULL) ||
+ (sFrame.pSuppRates == NULL)) {
DBG_PORT80(0xCC);
return;
}
@@ -1812,10 +1812,10 @@ s_vMgrRxBeacon(
// decode the beacon frame
vMgrDecodeBeacon(&sFrame);
- if ((sFrame.pwBeaconInterval == 0) ||
- (sFrame.pwCapInfo == 0) ||
- (sFrame.pSSID == 0) ||
- (sFrame.pSuppRates == 0)) {
+ if ((sFrame.pwBeaconInterval == NULL) ||
+ (sFrame.pwCapInfo == NULL) ||
+ (sFrame.pSSID == NULL) ||
+ (sFrame.pSuppRates == NULL)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Rx beacon frame error\n");
return;
}
@@ -2072,7 +2072,7 @@ s_vMgrRxBeacon(
if (bTSFLargeDiff)
bUpdateTSF = true;
- if (pDevice->bEnablePSMode && (sFrame.pTIM != 0)) {
+ if (pDevice->bEnablePSMode && (sFrame.pTIM != NULL)) {
// deal with DTIM, analysis TIM
pMgmt->bMulticastTIM = WLAN_MGMT_IS_MULTICAST_TIM(sFrame.pTIM->byBitMapCtl) ? true : false;
pMgmt->byDTIMCount = sFrame.pTIM->byDTIMCount;
@@ -4107,11 +4107,11 @@ s_vMgrRxProbeResponse(
sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
vMgrDecodeProbeResponse(&sFrame);
- if ((sFrame.pqwTimestamp == 0) ||
- (sFrame.pwBeaconInterval == 0) ||
- (sFrame.pwCapInfo == 0) ||
- (sFrame.pSSID == 0) ||
- (sFrame.pSuppRates == 0)) {
+ if ((sFrame.pqwTimestamp == NULL) ||
+ (sFrame.pwBeaconInterval == NULL) ||
+ (sFrame.pwCapInfo == NULL) ||
+ (sFrame.pSSID == NULL) ||
+ (sFrame.pSuppRates == NULL)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe resp:Fail addr:[%p] \n", pRxPacket->p80211Header);
DBG_PORT80(0xCC);
return;
@@ -4120,7 +4120,7 @@ s_vMgrRxProbeResponse(
if (sFrame.pSSID->len == 0)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Rx Probe resp: SSID len = 0 \n");
- if (sFrame.pDSParms != 0) {
+ if (sFrame.pDSParms != NULL) {
if (byCurrChannel > CB_MAX_CHANNEL_24G) {
// channel remapping to
byIEChannel = get_channel_mapping(pMgmt->pAdapter, sFrame.pDSParms->byCurrChannel, PHY_TYPE_11A);
diff --git a/drivers/staging/vt6656/80211hdr.h b/drivers/staging/vt6656/80211hdr.h
index 000304ffad6c..1e778ba7c634 100644
--- a/drivers/staging/vt6656/80211hdr.h
+++ b/drivers/staging/vt6656/80211hdr.h
@@ -151,7 +151,7 @@
#ifdef __BIG_ENDIAN
/* GET & SET Frame Control bit */
-#define WLAN_GET_FC_PRVER(n) ((((u16)(n) >> 8) & (BIT0 | BIT1))
+#define WLAN_GET_FC_PRVER(n) (((u16)(n) >> 8) & (BIT0 | BIT1))
#define WLAN_GET_FC_FTYPE(n) ((((u16)(n) >> 8) & (BIT2 | BIT3)) >> 2)
#define WLAN_GET_FC_FSTYPE(n) ((((u16)(n) >> 8) \
& (BIT4|BIT5|BIT6|BIT7)) >> 4)
diff --git a/drivers/staging/vt6656/aes_ccmp.c b/drivers/staging/vt6656/aes_ccmp.c
index 61b9f7bdb858..e2bfa8d266cd 100644
--- a/drivers/staging/vt6656/aes_ccmp.c
+++ b/drivers/staging/vt6656/aes_ccmp.c
@@ -269,9 +269,9 @@ bool AESbGenCCMP(u8 *pbyRxKey, u8 *pbyFrame, u16 wFrameSize)
/* MIC_HDR1 */
MIC_HDR1[0] = (u8)(wHLen >> 8);
MIC_HDR1[1] = (u8)(wHLen & 0xff);
- byTmp = (u8)(pMACHeader->frame_control & 0xff);
+ byTmp = (u8)(le16_to_cpu(pMACHeader->frame_control) >> 8);
MIC_HDR1[2] = byTmp & 0x8f;
- byTmp = (u8)(pMACHeader->frame_control >> 8);
+ byTmp = (u8)(le16_to_cpu(pMACHeader->frame_control) & 0xff);
byTmp &= 0x87;
MIC_HDR1[3] = byTmp | 0x40;
memcpy(&(MIC_HDR1[4]), pMACHeader->addr1, ETH_ALEN);
@@ -279,7 +279,7 @@ bool AESbGenCCMP(u8 *pbyRxKey, u8 *pbyFrame, u16 wFrameSize)
/* MIC_HDR2 */
memcpy(&(MIC_HDR2[0]), pMACHeader->addr3, ETH_ALEN);
- byTmp = (u8)(pMACHeader->seq_ctrl & 0xff);
+ byTmp = (u8)(le16_to_cpu(pMACHeader->seq_ctrl) >> 8);
MIC_HDR2[6] = byTmp & 0x0f;
MIC_HDR2[7] = 0;
diff --git a/drivers/staging/vt6656/card.h b/drivers/staging/vt6656/card.h
index c3017a74fa0f..f843e50875f9 100644
--- a/drivers/staging/vt6656/card.h
+++ b/drivers/staging/vt6656/card.h
@@ -39,13 +39,6 @@ typedef enum _CARD_PHY_TYPE {
PHY_TYPE_11A
} CARD_PHY_TYPE, *PCARD_PHY_TYPE;
-typedef enum _CARD_OP_MODE {
- OP_MODE_INFRASTRUCTURE = 0,
- OP_MODE_ADHOC,
- OP_MODE_AP,
- OP_MODE_UNKNOWN
-} CARD_OP_MODE, *PCARD_OP_MODE;
-
#define CB_MAX_CHANNEL_24G 14
#define CB_MAX_CHANNEL_5G 42 /* add channel9(5045MHz), 41==>42 */
#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G+CB_MAX_CHANNEL_5G)
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 1f422574c3d8..e2abe3ddd244 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -158,10 +158,10 @@ typedef enum __device_msg_level {
/*
* Enum of context types for SendPacket
*/
-typedef enum _CONTEXT_TYPE {
- CONTEXT_DATA_PACKET = 1,
- CONTEXT_MGMT_PACKET
-} CONTEXT_TYPE;
+enum {
+ CONTEXT_DATA_PACKET = 1,
+ CONTEXT_MGMT_PACKET
+};
/* RCB (Receive Control Block) */
struct vnt_rcb {
@@ -180,9 +180,7 @@ struct vnt_usb_send_context {
struct sk_buff *pPacket;
struct urb *pUrb;
unsigned int uBufLen;
- CONTEXT_TYPE Type;
- struct ethhdr sEthHeader;
- void *Next;
+ u8 type;
bool bBoolInUse;
unsigned char Data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
};
@@ -206,29 +204,10 @@ typedef struct _DEFAULT_CONFIG {
/*
* Structure to keep track of USB interrupt packets
*/
-typedef struct {
- unsigned int uDataLen;
- u8 * pDataBuf;
- /* struct urb *pUrb; */
- bool bInUse;
-} INT_BUFFER, *PINT_BUFFER;
-
-/* 0:11A 1:11B 2:11G */
-typedef enum _VIA_BB_TYPE
-{
- BB_TYPE_11A = 0,
- BB_TYPE_11B,
- BB_TYPE_11G
-} VIA_BB_TYPE, *PVIA_BB_TYPE;
-
-/* 0:11a, 1:11b, 2:11gb (only CCK in BasicRate), 3:11ga(OFDM in BasicRate) */
-typedef enum _VIA_PKT_TYPE
-{
- PK_TYPE_11A = 0,
- PK_TYPE_11B,
- PK_TYPE_11GB,
- PK_TYPE_11GA
-} VIA_PKT_TYPE, *PVIA_PKT_TYPE;
+struct vnt_interrupt_buffer {
+ u8 *data_buf;
+ bool in_use;
+};
/*++ NDIS related */
@@ -297,16 +276,6 @@ typedef struct tagSPMKIDCandidateEvent {
PMKID_CANDIDATE CandidateList[MAX_PMKIDLIST];
} SPMKIDCandidateEvent, *PSPMKIDCandidateEvent;
-/*++ 802.11h related */
-#define MAX_QUIET_COUNT 8
-
-typedef struct tagSQuietControl {
- bool bEnable;
- u32 dwStartTime;
- u8 byPeriod;
- u16 wDuration;
-} SQuietControl, *PSQuietControl;
-
/* The receive duplicate detection cache entry */
typedef struct tagSCacheEntry{
u16 wFmSequence;
@@ -386,8 +355,6 @@ struct vnt_private {
OPTIONS sOpts;
- struct tasklet_struct CmdWorkItem;
- struct tasklet_struct EventWorkItem;
struct work_struct read_work_item;
struct work_struct rx_mng_work_item;
@@ -437,29 +404,11 @@ struct vnt_private {
struct vnt_tx_pkt_info pkt_info[16];
/* Variables to track resources for the Interrupt In Pipe */
- INT_BUFFER intBuf;
- int fKillEventPollingThread;
- int bEventAvailable;
+ struct vnt_interrupt_buffer int_buf;
/* default config from file by user setting */
DEFAULT_CONFIG config_file;
- /* Statistic for USB */
- unsigned long ulBulkInPosted;
- unsigned long ulBulkInError;
- unsigned long ulBulkInContCRCError;
- unsigned long ulBulkInBytesRead;
-
- unsigned long ulBulkOutPosted;
- unsigned long ulBulkOutError;
- unsigned long ulBulkOutContCRCError;
- unsigned long ulBulkOutBytesWrite;
-
- unsigned long ulIntInPosted;
- unsigned long ulIntInError;
- unsigned long ulIntInContCRCError;
- unsigned long ulIntInBytesRead;
-
/* Version control */
u16 wFirmwareVersion;
u8 byLocalID;
@@ -480,11 +429,6 @@ struct vnt_private {
int bExistSWNetAddr;
/* Maintain statistical debug info. */
- unsigned long packetsReceived;
- unsigned long packetsReceivedDropped;
- unsigned long packetsReceivedOverflow;
- unsigned long packetsSent;
- unsigned long packetsSentDropped;
unsigned long SendContextsInUse;
unsigned long RcvBuffersInUse;
@@ -549,8 +493,8 @@ struct vnt_private {
u8 byCWMaxMin;
/* Rate */
- VIA_BB_TYPE byBBType; /* 0: 11A, 1:11B, 2:11G */
- VIA_PKT_TYPE byPacketType; /* 0:11a 1:11b 2:11gb 3:11ga */
+ u8 byBBType; /* 0: 11A, 1:11B, 2:11G */
+ u8 byPacketType; /* 0:11a 1:11b 2:11gb 3:11ga */
u16 wBasicRate;
u8 byACKRate;
u8 byTopOFDMBasicRate;
@@ -588,7 +532,9 @@ struct vnt_private {
u16 wFragmentationThreshold;
u8 byShortRetryLimit;
u8 byLongRetryLimit;
- CARD_OP_MODE eOPMode;
+
+ enum nl80211_iftype op_mode;
+
int bBSSIDFilter;
u16 wMaxTransmitMSDULifetime;
u8 abyBSSID[ETH_ALEN];
@@ -807,5 +753,6 @@ struct vnt_private {
(fMP_DISCONNECTED | fMP_RESET_IN_PROGRESS | fMP_HALT_IN_PROGRESS | fMP_INIT_IN_PROGRESS | fMP_SURPRISE_REMOVED)) == 0)
int device_alloc_frag_buf(struct vnt_private *, PSDeFragControlBlock pDeF);
+void vnt_configure_filter(struct vnt_private *);
#endif
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index eca04c0c1d97..4ccaa7e29a1c 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -627,7 +627,7 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
// Now it only supports 802.11g Infrastructure Mode, and support rate must up to 54 Mbps
if (pDevice->bDiversityEnable && (FrameSize>50) &&
- (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) &&
+ pDevice->op_mode == NL80211_IFTYPE_STATION &&
(pDevice->bLinkPass == true)) {
BBvAntennaDiversity(pDevice, s_byGetRateIdx(*pbyRxRate), 0);
}
@@ -1227,14 +1227,13 @@ static int s_bAPModeRxData(struct vnt_private *pDevice, struct sk_buff *skb,
if (is_multicast_ether_addr((u8 *)(skb->data+cbHeaderOffset))) {
if (pMgmt->sNodeDBTable[0].bPSEnable) {
- skbcpy = dev_alloc_skb((int)pDevice->rx_buf_sz);
+ skbcpy = netdev_alloc_skb(pDevice->dev, pDevice->rx_buf_sz);
// if any node in PS mode, buffer packet until DTIM.
if (skbcpy == NULL) {
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "relay multicast no skb available \n");
}
else {
- skbcpy->dev = pDevice->dev;
skbcpy->len = FrameSize;
memcpy(skbcpy->data, skb->data+cbHeaderOffset, FrameSize);
skb_queue_tail(&(pMgmt->sNodeDBTable[0].sTxPSQueue), skbcpy);
@@ -1295,63 +1294,66 @@ static int s_bAPModeRxData(struct vnt_private *pDevice, struct sk_buff *skb,
void RXvWorkItem(struct work_struct *work)
{
- struct vnt_private *pDevice =
+ struct vnt_private *priv =
container_of(work, struct vnt_private, read_work_item);
- int ntStatus;
- struct vnt_rcb *pRCB = NULL;
+ int status;
+ struct vnt_rcb *rcb = NULL;
- if (pDevice->Flags & fMP_DISCONNECTED)
+ if (priv->Flags & fMP_DISCONNECTED)
return;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Polling Thread\n");
- spin_lock_irq(&pDevice->lock);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Polling Thread\n");
- while ((pDevice->Flags & fMP_POST_READS) &&
- MP_IS_READY(pDevice) &&
- (pDevice->NumRecvFreeList != 0) ) {
- pRCB = pDevice->FirstRecvFreeList;
- pDevice->NumRecvFreeList--;
- DequeueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList);
- ntStatus = PIPEnsBulkInUsbRead(pDevice, pRCB);
- }
- pDevice->bIsRxWorkItemQueued = false;
- spin_unlock_irq(&pDevice->lock);
+ spin_lock_irq(&priv->lock);
+
+ while ((priv->Flags & fMP_POST_READS) && MP_IS_READY(priv) &&
+ (priv->NumRecvFreeList != 0)) {
+ rcb = priv->FirstRecvFreeList;
+
+ priv->NumRecvFreeList--;
+
+ DequeueRCB(priv->FirstRecvFreeList, priv->LastRecvFreeList);
+
+ status = PIPEnsBulkInUsbRead(priv, rcb);
+ }
+ priv->bIsRxWorkItemQueued = false;
+
+ spin_unlock_irq(&priv->lock);
}
-void RXvFreeRCB(struct vnt_rcb *pRCB, int bReAllocSkb)
+void RXvFreeRCB(struct vnt_rcb *rcb, int re_alloc_skb)
{
- struct vnt_private *pDevice = pRCB->pDevice;
+ struct vnt_private *priv = rcb->pDevice;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->RXvFreeRCB\n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->RXvFreeRCB\n");
- if (bReAllocSkb == false) {
- kfree_skb(pRCB->skb);
- bReAllocSkb = true;
+ if (re_alloc_skb == false) {
+ kfree_skb(rcb->skb);
+ re_alloc_skb = true;
}
- if (bReAllocSkb == true) {
- pRCB->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- // todo error handling
- if (pRCB->skb == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to re-alloc rx skb\n");
- }else {
- pRCB->skb->dev = pDevice->dev;
- }
- }
- //
- // Insert the RCB back in the Recv free list
- //
- EnqueueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList, pRCB);
- pDevice->NumRecvFreeList++;
+ if (re_alloc_skb == true) {
+ rcb->skb = netdev_alloc_skb(priv->dev, priv->rx_buf_sz);
+ /* TODO error handling */
+ if (!rcb->skb) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
+ " Failed to re-alloc rx skb\n");
+ }
+ }
- if ((pDevice->Flags & fMP_POST_READS) && MP_IS_READY(pDevice) &&
- (pDevice->bIsRxWorkItemQueued == false) ) {
+ /* Insert the RCB back in the Recv free list */
+ EnqueueRCB(priv->FirstRecvFreeList, priv->LastRecvFreeList, rcb);
+ priv->NumRecvFreeList++;
- pDevice->bIsRxWorkItemQueued = true;
- schedule_work(&pDevice->read_work_item);
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----RXFreeRCB %d %d\n",pDevice->NumRecvFreeList, pDevice->NumRecvMngList);
+ if ((priv->Flags & fMP_POST_READS) && MP_IS_READY(priv) &&
+ (priv->bIsRxWorkItemQueued == false)) {
+ priv->bIsRxWorkItemQueued = true;
+ schedule_work(&priv->read_work_item);
+ }
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----RXFreeRCB %d %d\n",
+ priv->NumRecvFreeList, priv->NumRecvMngList);
}
void RXvMngWorkItem(struct work_struct *work)
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index e0e93869a681..cca56b2f243d 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -70,119 +70,117 @@ void INTvWorkItem(struct vnt_private *pDevice)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Interrupt Polling Thread\n");
spin_lock_irq(&pDevice->lock);
- if (pDevice->fKillEventPollingThread != true)
- ntStatus = PIPEnsInterruptRead(pDevice);
+
+ ntStatus = PIPEnsInterruptRead(pDevice);
+
spin_unlock_irq(&pDevice->lock);
}
-void INTnsProcessData(struct vnt_private *pDevice)
+void INTnsProcessData(struct vnt_private *priv)
{
- PSINTData pINTData;
- struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- struct net_device_stats *pStats = &pDevice->stats;
+ struct vnt_interrupt_data *int_data;
+ struct vnt_manager *mgmt = &priv->vnt_mgmt;
+ struct net_device_stats *stats = &priv->stats;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptProcessData\n");
- pINTData = (PSINTData) pDevice->intBuf.pDataBuf;
- if (pINTData->byTSR0 & TSR_VALID) {
- if (pINTData->byTSR0 & (TSR_TMO | TSR_RETRYTMO))
- pDevice->wstats.discard.retries++;
+ int_data = (struct vnt_interrupt_data *)priv->int_buf.data_buf;
+
+ if (int_data->tsr0 & TSR_VALID) {
+ if (int_data->tsr0 & (TSR_TMO | TSR_RETRYTMO))
+ priv->wstats.discard.retries++;
else
- pStats->tx_packets++;
+ stats->tx_packets++;
- BSSvUpdateNodeTxCounter(pDevice,
- pINTData->byTSR0,
- pINTData->byPkt0);
- /*DBG_PRN_GRP01(("TSR0 %02x\n", pINTData->byTSR0));*/
+ BSSvUpdateNodeTxCounter(priv,
+ int_data->tsr0,
+ int_data->pkt0);
}
- if (pINTData->byTSR1 & TSR_VALID) {
- if (pINTData->byTSR1 & (TSR_TMO | TSR_RETRYTMO))
- pDevice->wstats.discard.retries++;
+
+ if (int_data->tsr1 & TSR_VALID) {
+ if (int_data->tsr1 & (TSR_TMO | TSR_RETRYTMO))
+ priv->wstats.discard.retries++;
else
- pStats->tx_packets++;
+ stats->tx_packets++;
- BSSvUpdateNodeTxCounter(pDevice,
- pINTData->byTSR1,
- pINTData->byPkt1);
- /*DBG_PRN_GRP01(("TSR1 %02x\n", pINTData->byTSR1));*/
+ BSSvUpdateNodeTxCounter(priv,
+ int_data->tsr1,
+ int_data->pkt1);
}
- if (pINTData->byTSR2 & TSR_VALID) {
- if (pINTData->byTSR2 & (TSR_TMO | TSR_RETRYTMO))
- pDevice->wstats.discard.retries++;
+
+ if (int_data->tsr2 & TSR_VALID) {
+ if (int_data->tsr2 & (TSR_TMO | TSR_RETRYTMO))
+ priv->wstats.discard.retries++;
else
- pStats->tx_packets++;
+ stats->tx_packets++;
- BSSvUpdateNodeTxCounter(pDevice,
- pINTData->byTSR2,
- pINTData->byPkt2);
- /*DBG_PRN_GRP01(("TSR2 %02x\n", pINTData->byTSR2));*/
+ BSSvUpdateNodeTxCounter(priv,
+ int_data->tsr2,
+ int_data->pkt2);
}
- if (pINTData->byTSR3 & TSR_VALID) {
- if (pINTData->byTSR3 & (TSR_TMO | TSR_RETRYTMO))
- pDevice->wstats.discard.retries++;
+
+ if (int_data->tsr3 & TSR_VALID) {
+ if (int_data->tsr3 & (TSR_TMO | TSR_RETRYTMO))
+ priv->wstats.discard.retries++;
else
- pStats->tx_packets++;
+ stats->tx_packets++;
- BSSvUpdateNodeTxCounter(pDevice,
- pINTData->byTSR3,
- pINTData->byPkt3);
- /*DBG_PRN_GRP01(("TSR3 %02x\n", pINTData->byTSR3));*/
+ BSSvUpdateNodeTxCounter(priv,
+ int_data->tsr3,
+ int_data->pkt3);
}
- if (pINTData->byISR0 != 0) {
- if (pINTData->byISR0 & ISR_BNTX) {
- if (pDevice->eOPMode == OP_MODE_AP) {
- if (pMgmt->byDTIMCount > 0) {
- pMgmt->byDTIMCount--;
- pMgmt->sNodeDBTable[0].bRxPSPoll =
+
+ if (int_data->isr0 != 0) {
+ if (int_data->isr0 & ISR_BNTX) {
+ if (priv->op_mode == NL80211_IFTYPE_AP) {
+ if (mgmt->byDTIMCount > 0) {
+ mgmt->byDTIMCount--;
+ mgmt->sNodeDBTable[0].bRxPSPoll =
false;
- } else if (pMgmt->byDTIMCount == 0) {
+ } else if (mgmt->byDTIMCount == 0) {
/* check if multicast tx buffering */
- pMgmt->byDTIMCount =
- pMgmt->byDTIMPeriod-1;
- pMgmt->sNodeDBTable[0].bRxPSPoll = true;
- if (pMgmt->sNodeDBTable[0].bPSEnable)
- bScheduleCommand((void *) pDevice,
+ mgmt->byDTIMCount =
+ mgmt->byDTIMPeriod-1;
+ mgmt->sNodeDBTable[0].bRxPSPoll = true;
+ if (mgmt->sNodeDBTable[0].bPSEnable)
+ bScheduleCommand((void *) priv,
WLAN_CMD_RX_PSPOLL,
NULL);
}
- bScheduleCommand((void *) pDevice,
+ bScheduleCommand((void *) priv,
WLAN_CMD_BECON_SEND,
NULL);
- } /* if (pDevice->eOPMode == OP_MODE_AP) */
- pDevice->bBeaconSent = true;
+ }
+ priv->bBeaconSent = true;
} else {
- pDevice->bBeaconSent = false;
+ priv->bBeaconSent = false;
}
- if (pINTData->byISR0 & ISR_TBTT) {
- if (pDevice->bEnablePSMode)
- bScheduleCommand((void *) pDevice,
+
+ if (int_data->isr0 & ISR_TBTT) {
+ if (priv->bEnablePSMode)
+ bScheduleCommand((void *) priv,
WLAN_CMD_TBTT_WAKEUP,
NULL);
- if (pDevice->bChannelSwitch) {
- pDevice->byChannelSwitchCount--;
- if (pDevice->byChannelSwitchCount == 0)
- bScheduleCommand((void *) pDevice,
+ if (priv->bChannelSwitch) {
+ priv->byChannelSwitchCount--;
+ if (priv->byChannelSwitchCount == 0)
+ bScheduleCommand((void *) priv,
WLAN_CMD_11H_CHSW,
NULL);
}
}
- pDevice->qwCurrTSF = cpu_to_le64(pINTData->qwTSF);
- /*DBG_PRN_GRP01(("ISR0 = %02x ,
- LoTsf = %08x,
- HiTsf = %08x\n",
- pINTData->byISR0,
- pINTData->dwLoTSF,
- pINTData->dwHiTSF)); */
+ priv->qwCurrTSF = le64_to_cpu(int_data->tsf);
}
- if (pINTData->byISR1 != 0)
- if (pINTData->byISR1 & ISR_GPIO3)
- bScheduleCommand((void *) pDevice,
+
+ if (int_data->isr1 != 0)
+ if (int_data->isr1 & ISR_GPIO3)
+ bScheduleCommand((void *) priv,
WLAN_CMD_RADIO,
NULL);
- pDevice->intBuf.uDataLen = 0;
- pDevice->intBuf.bInUse = false;
- pStats->tx_errors = pDevice->wstats.discard.retries;
- pStats->tx_dropped = pDevice->wstats.discard.retries;
+ priv->int_buf.in_use = false;
+
+ stats->tx_errors = priv->wstats.discard.retries;
+ stats->tx_dropped = priv->wstats.discard.retries;
}
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
index 8e6e217ba4ff..08db868e1d07 100644
--- a/drivers/staging/vt6656/int.h
+++ b/drivers/staging/vt6656/int.h
@@ -32,29 +32,28 @@
#include "device.h"
-typedef struct tagSINTData {
- u8 byTSR0;
- u8 byPkt0;
- u16 wTime0;
- u8 byTSR1;
- u8 byPkt1;
- u16 wTime1;
- u8 byTSR2;
- u8 byPkt2;
- u16 wTime2;
- u8 byTSR3;
- u8 byPkt3;
- u16 wTime3;
- u64 qwTSF;
- u8 byISR0;
- u8 byISR1;
- u8 byRTSSuccess;
- u8 byRTSFail;
- u8 byACKFail;
- u8 byFCSErr;
- u8 abySW[2];
-} __attribute__ ((__packed__))
-SINTData, *PSINTData;
+struct vnt_interrupt_data {
+ u8 tsr0;
+ u8 pkt0;
+ u16 time0;
+ u8 tsr1;
+ u8 pkt1;
+ u16 time1;
+ u8 tsr2;
+ u8 pkt2;
+ u16 time2;
+ u8 tsr3;
+ u8 pkt3;
+ u16 time3;
+ __le64 tsf;
+ u8 isr0;
+ u8 isr1;
+ u8 rts_success;
+ u8 rts_fail;
+ u8 ack_fail;
+ u8 fcs_err;
+ u8 sw[2];
+} __packed;
void INTvWorkItem(struct vnt_private *);
void INTnsProcessData(struct vnt_private *);
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index 3a68dfa23d84..cf4c06a42880 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -41,6 +41,7 @@
#include "wpactl.h"
#include "control.h"
#include "rndis.h"
+#include "baseband.h"
static const long frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484,
@@ -57,7 +58,7 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
struct vnt_private *pDevice = netdev_priv(dev);
long ldBm;
- pDevice->wstats.status = pDevice->eOPMode;
+ pDevice->wstats.status = pDevice->op_mode;
RFvRSSITodBm(pDevice, (u8)(pDevice->uCurrRSSI), &ldBm);
pDevice->wstats.qual.level = ldBm;
pDevice->wstats.qual.noise = 0;
@@ -724,10 +725,10 @@ int iwctl_giwaplist(struct net_device *dev, struct iw_request_info *info,
if (!wrq->pointer)
return -EINVAL;
- sock = kzalloc(sizeof(struct sockaddr) * IW_MAX_AP, GFP_KERNEL);
+ sock = kcalloc(IW_MAX_AP, sizeof(struct sockaddr), GFP_KERNEL);
if (sock == NULL)
return -ENOMEM;
- qual = kzalloc(sizeof(struct iw_quality) * IW_MAX_AP, GFP_KERNEL);
+ qual = kcalloc(IW_MAX_AP, sizeof(struct iw_quality), GFP_KERNEL);
if (qual == NULL) {
kfree(sock);
return -ENOMEM;
@@ -1394,7 +1395,8 @@ int iwctl_giwpower(struct net_device *dev, struct iw_request_info *info,
if (pMgmt == NULL)
return -EFAULT;
- if ((wrq->disabled = (mode == WMAC_POWER_CAM)))
+ wrq->disabled = (mode == WMAC_POWER_CAM);
+ if (wrq->disabled)
return 0;
if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index 54414ed27191..3ce19ddbc569 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -47,25 +47,19 @@ static int msglevel =MSG_LEVEL_INFO;
*
* Parameters:
* In:
- * uByteidx - Index of Mask
- * byData - Mask Value to write
+ * mc_filter (mac filter)
* Out:
* none
*
* Return Value: none
*
*/
-void MACvWriteMultiAddr(struct vnt_private *pDevice, u32 uByteIdx, u8 byData)
+void MACvWriteMultiAddr(struct vnt_private *pDevice, u64 mc_filter)
{
- u8 byData1;
+ __le64 le_mc = cpu_to_le64(mc_filter);
- byData1 = byData;
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- (u16) (MAC_REG_MAR0 + uByteIdx),
- MESSAGE_REQUEST_MACREG,
- 1,
- &byData1);
+ CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, MAC_REG_MAR0,
+ MESSAGE_REQUEST_MACREG, sizeof(le_mc), (u8 *)&le_mc);
}
/*
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index 0db1be5b01c8..4053e431ef99 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -403,7 +403,7 @@
#define MAC_REVISION_A0 0x00
#define MAC_REVISION_A1 0x01
-void MACvWriteMultiAddr(struct vnt_private *, u32, u8);
+void MACvWriteMultiAddr(struct vnt_private *, u64);
void MACbShutdown(struct vnt_private *);
void MACvSetBBType(struct vnt_private *, u8);
void MACvDisableKeyEntry(struct vnt_private *, u32);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 58edcae74efc..3c9323069e01 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -256,7 +256,7 @@ device_set_options(struct vnt_private *pDevice) {
pDevice->byShortPreamble = PREAMBLE_TYPE_DEF;
pDevice->ePSMode = PS_MODE_DEF;
pDevice->b11hEnable = X80211h_MODE_DEF;
- pDevice->eOPMode = OP_MODE_DEF;
+ pDevice->op_mode = NL80211_IFTYPE_UNSPECIFIED;
pDevice->uConnectionRate = DATA_RATE_DEF;
if (pDevice->uConnectionRate < RATE_AUTO) pDevice->bFixRate = true;
pDevice->byBBType = BBP_TYPE_DEF;
@@ -409,7 +409,7 @@ static int device_init_registers(struct vnt_private *pDevice)
if (pDevice->abyCCKPwrTbl[ii] == 0)
pDevice->abyCCKPwrTbl[ii] = pDevice->byCCKPwr;
- pDevice->abyOFDMPwrTbl[ii] =
+ pDevice->abyOFDMPwrTbl[ii] =
pDevice->abyEEPROM[ii + EEP_OFS_OFDM_PWR_TBL];
if (pDevice->abyOFDMPwrTbl[ii] == 0)
pDevice->abyOFDMPwrTbl[ii] = pDevice->byOFDMPwrG;
@@ -749,44 +749,47 @@ err_nomem:
return rc;
}
-static void device_free_tx_bufs(struct vnt_private *pDevice)
+static void device_free_tx_bufs(struct vnt_private *priv)
{
- struct vnt_usb_send_context *pTxContext;
- int ii;
+ struct vnt_usb_send_context *tx_context;
+ int ii;
- for (ii = 0; ii < pDevice->cbTD; ii++) {
+ for (ii = 0; ii < priv->cbTD; ii++) {
+ tx_context = priv->apTD[ii];
+ /* deallocate URBs */
+ if (tx_context->pUrb) {
+ usb_kill_urb(tx_context->pUrb);
+ usb_free_urb(tx_context->pUrb);
+ }
- pTxContext = pDevice->apTD[ii];
- /* deallocate URBs */
- if (pTxContext->pUrb) {
- usb_kill_urb(pTxContext->pUrb);
- usb_free_urb(pTxContext->pUrb);
- }
- kfree(pTxContext);
- }
- return;
+ kfree(tx_context);
+ }
+
+ return;
}
-static void device_free_rx_bufs(struct vnt_private *pDevice)
+static void device_free_rx_bufs(struct vnt_private *priv)
{
- struct vnt_rcb *pRCB;
+ struct vnt_rcb *rcb;
int ii;
- for (ii = 0; ii < pDevice->cbRD; ii++) {
+ for (ii = 0; ii < priv->cbRD; ii++) {
+ rcb = priv->apRCB[ii];
- pRCB = pDevice->apRCB[ii];
- /* deallocate URBs */
- if (pRCB->pUrb) {
- usb_kill_urb(pRCB->pUrb);
- usb_free_urb(pRCB->pUrb);
- }
- /* deallocate skb */
- if (pRCB->skb)
- dev_kfree_skb(pRCB->skb);
- }
- kfree(pDevice->pRCBMem);
+ /* deallocate URBs */
+ if (rcb->pUrb) {
+ usb_kill_urb(rcb->pUrb);
+ usb_free_urb(rcb->pUrb);
+ }
- return;
+ /* deallocate skb */
+ if (rcb->skb)
+ dev_kfree_skb(rcb->skb);
+ }
+
+ kfree(priv->pRCBMem);
+
+ return;
}
static void usb_device_reset(struct vnt_private *pDevice)
@@ -798,95 +801,109 @@ static void usb_device_reset(struct vnt_private *pDevice)
return ;
}
-static void device_free_int_bufs(struct vnt_private *pDevice)
+static void device_free_int_bufs(struct vnt_private *priv)
{
- kfree(pDevice->intBuf.pDataBuf);
- return;
+ kfree(priv->int_buf.data_buf);
+
+ return;
}
-static bool device_alloc_bufs(struct vnt_private *pDevice)
+static bool device_alloc_bufs(struct vnt_private *priv)
{
- struct vnt_usb_send_context *pTxContext;
- struct vnt_rcb *pRCB;
+ struct vnt_usb_send_context *tx_context;
+ struct vnt_rcb *rcb;
int ii;
- for (ii = 0; ii < pDevice->cbTD; ii++) {
+ for (ii = 0; ii < priv->cbTD; ii++) {
+ tx_context = kmalloc(sizeof(struct vnt_usb_send_context),
+ GFP_KERNEL);
+ if (tx_context == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
+ "%s : allocate tx usb context failed\n",
+ priv->dev->name);
+ goto free_tx;
+ }
- pTxContext = kmalloc(sizeof(struct vnt_usb_send_context), GFP_KERNEL);
- if (pTxContext == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : allocate tx usb context failed\n", pDevice->dev->name);
- goto free_tx;
- }
- pDevice->apTD[ii] = pTxContext;
- pTxContext->pDevice = (void *) pDevice;
- /* allocate URBs */
- pTxContext->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
- if (pTxContext->pUrb == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "alloc tx urb failed\n");
- goto free_tx;
- }
- pTxContext->bBoolInUse = false;
- }
+ priv->apTD[ii] = tx_context;
+ tx_context->pDevice = priv;
+
+ /* allocate URBs */
+ tx_context->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (tx_context->pUrb == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR,
+ KERN_ERR "alloc tx urb failed\n");
+ goto free_tx;
+ }
+
+ tx_context->bBoolInUse = false;
+ }
- /* allocate RCB mem */
- pDevice->pRCBMem = kzalloc((sizeof(struct vnt_rcb) * pDevice->cbRD),
+ /* allocate RCB mem */
+ priv->pRCBMem = kzalloc((sizeof(struct vnt_rcb) * priv->cbRD),
GFP_KERNEL);
- if (pDevice->pRCBMem == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : alloc rx usb context failed\n", pDevice->dev->name);
- goto free_tx;
- }
+ if (priv->pRCBMem == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
+ "%s : alloc rx usb context failed\n",
+ priv->dev->name);
+ goto free_tx;
+ }
- pDevice->FirstRecvFreeList = NULL;
- pDevice->LastRecvFreeList = NULL;
- pDevice->FirstRecvMngList = NULL;
- pDevice->LastRecvMngList = NULL;
- pDevice->NumRecvFreeList = 0;
+ priv->FirstRecvFreeList = NULL;
+ priv->LastRecvFreeList = NULL;
+ priv->FirstRecvMngList = NULL;
+ priv->LastRecvMngList = NULL;
+ priv->NumRecvFreeList = 0;
- pRCB = (struct vnt_rcb *)pDevice->pRCBMem;
+ rcb = (struct vnt_rcb *)priv->pRCBMem;
- for (ii = 0; ii < pDevice->cbRD; ii++) {
+ for (ii = 0; ii < priv->cbRD; ii++) {
+ priv->apRCB[ii] = rcb;
+ rcb->pDevice = priv;
- pDevice->apRCB[ii] = pRCB;
- pRCB->pDevice = (void *) pDevice;
- /* allocate URBs */
- pRCB->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
+ /* allocate URBs */
+ rcb->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (rcb->pUrb == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
+ " Failed to alloc rx urb\n");
+ goto free_rx_tx;
+ }
- if (pRCB->pUrb == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to alloc rx urb\n");
- goto free_rx_tx;
- }
- pRCB->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- if (pRCB->skb == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to alloc rx skb\n");
- goto free_rx_tx;
- }
- pRCB->skb->dev = pDevice->dev;
- pRCB->bBoolInUse = false;
- EnqueueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList, pRCB);
- pDevice->NumRecvFreeList++;
- pRCB++;
- }
+ rcb->skb = netdev_alloc_skb(priv->dev, priv->rx_buf_sz);
+ if (rcb->skb == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
+ " Failed to alloc rx skb\n");
+ goto free_rx_tx;
+ }
+
+ rcb->bBoolInUse = false;
+
+ EnqueueRCB(priv->FirstRecvFreeList,
+ priv->LastRecvFreeList, rcb);
- pDevice->pInterruptURB = usb_alloc_urb(0, GFP_ATOMIC);
- if (pDevice->pInterruptURB == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc int urb\n");
- goto free_rx_tx;
+ priv->NumRecvFreeList++;
+ rcb++;
}
- pDevice->intBuf.pDataBuf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
- if (pDevice->intBuf.pDataBuf == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc int buf\n");
- usb_free_urb(pDevice->pInterruptURB);
- goto free_rx_tx;
+ priv->pInterruptURB = usb_alloc_urb(0, GFP_ATOMIC);
+ if (priv->pInterruptURB == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR"Failed to alloc int urb\n");
+ goto free_rx_tx;
}
- return true;
+ priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
+ if (priv->int_buf.data_buf == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR"Failed to alloc int buf\n");
+ usb_free_urb(priv->pInterruptURB);
+ goto free_rx_tx;
+ }
+
+ return true;
free_rx_tx:
- device_free_rx_bufs(pDevice);
+ device_free_rx_bufs(priv);
free_tx:
- device_free_tx_bufs(pDevice);
+ device_free_tx_bufs(priv);
return false;
}
@@ -931,13 +948,11 @@ static void device_free_frag_bufs(struct vnt_private *pDevice)
int device_alloc_frag_buf(struct vnt_private *pDevice,
PSDeFragControlBlock pDeF)
{
+ pDeF->skb = netdev_alloc_skb(pDevice->dev, pDevice->rx_buf_sz);
+ if (!pDeF->skb)
+ return false;
- pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- if (pDeF->skb == NULL)
- return false;
- pDeF->skb->dev = pDevice->dev;
-
- return true;
+ return true;
}
static int device_open(struct net_device *dev)
@@ -974,8 +989,6 @@ static int device_open(struct net_device *dev)
goto free_all;
}
- device_set_multi(pDevice->dev);
-
/* init for key management */
KeyvInitTable(pDevice,&pDevice->sKey);
memcpy(pDevice->vnt_mgmt.abyMACAddr,
@@ -992,16 +1005,12 @@ static int device_open(struct net_device *dev)
vMgrObjectInit(pDevice);
- tasklet_init(&pDevice->EventWorkItem, (void *)INTvWorkItem, (unsigned long)pDevice);
-
schedule_delayed_work(&pDevice->second_callback_work, HZ);
- pDevice->int_interval = 100; /* max 100 microframes */
+ pDevice->int_interval = 1; /* bInterval is set to 1 */
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
pDevice->bIsRxWorkItemQueued = true;
- pDevice->fKillEventPollingThread = false;
- pDevice->bEventAvailable = false;
pDevice->bWPADEVUp = false;
pDevice->bwextstep0 = false;
@@ -1084,7 +1093,6 @@ static int device_close(struct net_device *dev)
MP_SET_FLAG(pDevice, fMP_DISCONNECTED);
MP_CLEAR_FLAG(pDevice, fMP_POST_WRITES);
MP_CLEAR_FLAG(pDevice, fMP_POST_READS);
- pDevice->fKillEventPollingThread = true;
cancel_delayed_work_sync(&pDevice->run_command_work);
cancel_delayed_work_sync(&pDevice->second_callback_work);
@@ -1098,8 +1106,6 @@ static int device_close(struct net_device *dev)
cancel_work_sync(&pDevice->rx_mng_work_item);
cancel_work_sync(&pDevice->read_work_item);
- tasklet_kill(&pDevice->EventWorkItem);
-
pDevice->bRoaming = false;
pDevice->bIsRoaming = false;
pDevice->bEnableRoaming = false;
@@ -1350,69 +1356,73 @@ static int Read_config_file(struct vnt_private *pDevice)
static void device_set_multi(struct net_device *dev)
{
- struct vnt_private *pDevice = netdev_priv(dev);
- struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ if (priv->flags & DEVICE_FLAGS_OPENED) {
+ spin_lock_irqsave(&priv->lock, flags);
+
+ bScheduleCommand(priv, WLAN_CMD_CONFIGURE_FILTER, NULL);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+}
+
+void vnt_configure_filter(struct vnt_private *priv)
+{
+ struct net_device *dev = priv->dev;
+ struct vnt_manager *mgmt = &priv->vnt_mgmt;
struct netdev_hw_addr *ha;
- u32 mc_filter[2];
- int ii;
- u8 pbyData[8] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- u8 byTmpMode = 0;
+ u64 mc_filter = 0;
+ u8 tmp = 0;
int rc;
- spin_lock_irq(&pDevice->lock);
- rc = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- MAC_REG_RCR,
- MESSAGE_REQUEST_MACREG,
- 1,
- &byTmpMode
- );
- if (rc == 0) pDevice->byRxMode = byTmpMode;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode in= %x\n", pDevice->byRxMode);
-
- if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
- DBG_PRT(MSG_LEVEL_ERR,KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
- /* unconditionally log net taps */
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
- }
- else if ((netdev_mc_count(dev) > pDevice->multicast_limit) ||
- (dev->flags & IFF_ALLMULTI)) {
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- MAC_REG_MAR0,
- MESSAGE_REQUEST_MACREG,
- 8,
- pbyData
- );
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
- }
- else {
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(ha, dev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
- }
- for (ii = 0; ii < 4; ii++) {
- MACvWriteMultiAddr(pDevice, ii, *((u8 *)&mc_filter[0] + ii));
- MACvWriteMultiAddr(pDevice, ii+ 4, *((u8 *)&mc_filter[1] + ii));
- }
- pDevice->byRxMode &= ~(RCR_UNICAST);
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
- }
+ rc = CONTROLnsRequestIn(priv, MESSAGE_TYPE_READ,
+ MAC_REG_RCR, MESSAGE_REQUEST_MACREG, 1, &tmp);
+ if (rc == 0)
+ priv->byRxMode = tmp;
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "priv->byRxMode in= %x\n",
+ priv->byRxMode);
+
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ DBG_PRT(MSG_LEVEL_ERR, KERN_NOTICE
+ "%s: Promiscuous mode enabled.\n", dev->name);
+ /* unconditionally log net taps */
+ priv->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
+ } else if ((netdev_mc_count(dev) > priv->multicast_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ mc_filter = ~0x0;
+ MACvWriteMultiAddr(priv, mc_filter);
+
+ priv->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
+ } else {
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- /*
- * If AP mode, don't enable RCR_UNICAST since HW only compares
- * addr1 with local MAC
- */
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
- pDevice->byRxMode &= ~(RCR_UNICAST);
- }
- ControlvWriteByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_RCR, pDevice->byRxMode);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode out= %x\n", pDevice->byRxMode);
- spin_unlock_irq(&pDevice->lock);
+ mc_filter |= 1ULL << (bit_nr & 0x3f);
+ }
+
+ MACvWriteMultiAddr(priv, mc_filter);
+
+ priv->byRxMode &= ~(RCR_UNICAST);
+ priv->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
+ }
+
+ if (mgmt->eConfigMode == WMAC_CONFIG_AP) {
+ /*
+ * If AP mode, don't enable RCR_UNICAST since HW only compares
+ * addr1 with local MAC
+ */
+ priv->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
+ priv->byRxMode &= ~(RCR_UNICAST);
+ }
+
+ ControlvWriteByte(priv, MESSAGE_REQUEST_MACREG,
+ MAC_REG_RCR, priv->byRxMode);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "priv->byRxMode out= %x\n", priv->byRxMode);
}
static struct net_device_stats *device_get_stats(struct net_device *dev)
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index e7d5487d1041..43da58927cd2 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -67,22 +67,23 @@ void PSvEnablePowerSaving(struct vnt_private *pDevice, u16 wListenInterval)
/* set period of power up before TBTT */
MACvWriteWord(pDevice, MAC_REG_PWBT, C_PWBT);
- if (pDevice->eOPMode != OP_MODE_ADHOC) {
+ if (pDevice->op_mode != NL80211_IFTYPE_ADHOC) {
/* set AID */
MACvWriteWord(pDevice, MAC_REG_AIDATIM, wAID);
- } else {
- /* set ATIM Window */
- /* MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow); */
}
- /* Warren:06-18-2004,the sequence must follow PSEN->AUTOSLEEP->GO2DOZE */
+ /* Warren:06-18-2004,the sequence must follow
+ * PSEN->AUTOSLEEP->GO2DOZE
+ */
/* enable power saving hw function */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_PSEN);
/* Set AutoSleep */
MACvRegBitsOn(pDevice, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
- /* Warren:MUST turn on this once before turn on AUTOSLEEP ,or the AUTOSLEEP doesn't work */
+ /* Warren:MUST turn on this once before turn on AUTOSLEEP ,or the
+ * AUTOSLEEP doesn't work
+ */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_GO2DOZE);
if (wListenInterval >= 2) {
@@ -105,8 +106,10 @@ void PSvEnablePowerSaving(struct vnt_private *pDevice, u16 wListenInterval)
pDevice->bEnablePSMode = true;
- /* We don't send null pkt in ad hoc mode since beacon will handle this. */
- if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)
+ /* We don't send null pkt in ad hoc mode
+ * since beacon will handle this.
+ */
+ if (pDevice->op_mode == NL80211_IFTYPE_STATION)
PSbSendNullPacket(pDevice);
pDevice->bPWBitOn = true;
@@ -137,7 +140,7 @@ void PSvDisablePowerSaving(struct vnt_private *pDevice)
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_ALBCN);
pDevice->bEnablePSMode = false;
- if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)
+ if (pDevice->op_mode == NL80211_IFTYPE_STATION)
PSbSendNullPacket(pDevice);
pDevice->bPWBitOn = false;
@@ -226,15 +229,19 @@ void PSvSendPSPOLL(struct vnt_private *pDevice)
WLAN_SET_FC_PWRMGT(0)
));
- pTxPacket->p80211Header->sA2.wDurationID = pMgmt->wCurrAID | BIT14 | BIT15;
- memcpy(pTxPacket->p80211Header->sA2.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- memcpy(pTxPacket->p80211Header->sA2.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
+ pTxPacket->p80211Header->sA2.wDurationID =
+ pMgmt->wCurrAID | BIT14 | BIT15;
+ memcpy(pTxPacket->p80211Header->sA2.abyAddr1, pMgmt->abyCurrBSSID,
+ WLAN_ADDR_LEN);
+ memcpy(pTxPacket->p80211Header->sA2.abyAddr2, pMgmt->abyMACAddr,
+ WLAN_ADDR_LEN);
pTxPacket->cbMPDULen = WLAN_HDR_ADDR2_LEN;
pTxPacket->cbPayloadLen = 0;
/* log failure if sending failed */
if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send PS-Poll packet failed..\n");
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Send PS-Poll packet failed..\n");
}
/*
@@ -276,16 +283,21 @@ int PSbSendNullPacket(struct vnt_private *pDevice)
pTxPacket->p80211Header->sA3.wFrameCtl = cpu_to_le16(flags);
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA)
- pTxPacket->p80211Header->sA3.wFrameCtl |= cpu_to_le16((u16)WLAN_SET_FC_TODS(1));
-
- memcpy(pTxPacket->p80211Header->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- memcpy(pTxPacket->p80211Header->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(pTxPacket->p80211Header->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
+ pTxPacket->p80211Header->sA3.wFrameCtl |=
+ cpu_to_le16((u16)WLAN_SET_FC_TODS(1));
+
+ memcpy(pTxPacket->p80211Header->sA3.abyAddr1, pMgmt->abyCurrBSSID,
+ WLAN_ADDR_LEN);
+ memcpy(pTxPacket->p80211Header->sA3.abyAddr2, pMgmt->abyMACAddr,
+ WLAN_ADDR_LEN);
+ memcpy(pTxPacket->p80211Header->sA3.abyAddr3, pMgmt->abyCurrBSSID,
+ WLAN_BSSID_LEN);
pTxPacket->cbMPDULen = WLAN_HDR_ADDR3_LEN;
pTxPacket->cbPayloadLen = 0;
/* log error if sending failed */
if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send Null Packet failed !\n");
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Send Null Packet failed !\n");
return false;
}
return true;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 51fff896fcb5..3840323858fc 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -118,7 +118,7 @@ static void s_vSWencryption(struct vnt_private *pDevice,
static unsigned int s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
u32 cbFrameLength, u16 wRate, int bNeedAck);
-static u16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
+static __le16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
u8 rsv_type, u8 pkt_type, u32 frame_lenght, u16 current_rate);
static u16 s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
@@ -129,10 +129,10 @@ static u16 s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
union vnt_tx_data_head *head, u32 cbFrameLength, int bNeedAck,
struct ethhdr *psEthHeader, u16 wCurrentRate, u8 byFBOption);
-static u16 s_uGetDataDuration(struct vnt_private *pDevice,
+static __le16 s_uGetDataDuration(struct vnt_private *pDevice,
u8 byPktType, int bNeedAck);
-static u16 s_uGetRTSCTSDuration(struct vnt_private *pDevice,
+static __le16 s_uGetRTSCTSDuration(struct vnt_private *pDevice,
u8 byDurType, u32 cbFrameLength, u8 byPktType, u16 wRate,
int bNeedAck, u8 byFBOption);
@@ -327,7 +327,7 @@ static void s_vSWencryption(struct vnt_private *pDevice,
}
}
-static u16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
+static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
{
return cpu_to_le16(wTimeStampOff[priv->byPreambleType % 2]
[rate % MAX_RATE]);
@@ -359,7 +359,7 @@ static u32 s_uGetTxRsvTime(struct vnt_private *priv, u8 pkt_type,
return data_time;
}
-static u16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
+static __le16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
u32 frame_length, u16 rate, int need_ack)
{
return cpu_to_le16((u16)s_uGetTxRsvTime(priv, pkt_type,
@@ -367,7 +367,7 @@ static u16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
-static u16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
+static __le16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
u8 rsv_type, u8 pkt_type, u32 frame_lenght, u16 current_rate)
{
u32 rrv_time, rts_time, cts_time, ack_time, data_time;
@@ -402,7 +402,7 @@ static u16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
rrv_time = cts_time + ack_time + data_time + 2 * priv->uSIFS;
- return rrv_time;
+ return cpu_to_le16((u16)rrv_time);
}
rrv_time = rts_time + cts_time + ack_time + data_time + 3 * priv->uSIFS;
@@ -411,7 +411,7 @@ static u16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
}
//byFreqType 0: 5GHz, 1:2.4Ghz
-static u16 s_uGetDataDuration(struct vnt_private *pDevice,
+static __le16 s_uGetDataDuration(struct vnt_private *pDevice,
u8 byPktType, int bNeedAck)
{
u32 uAckTime = 0;
@@ -430,7 +430,7 @@ static u16 s_uGetDataDuration(struct vnt_private *pDevice,
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
-static u16 s_uGetRTSCTSDuration(struct vnt_private *pDevice, u8 byDurType,
+static __le16 s_uGetRTSCTSDuration(struct vnt_private *pDevice, u8 byDurType,
u32 cbFrameLength, u8 byPktType, u16 wRate, int bNeedAck,
u8 byFBOption)
{
@@ -481,14 +481,14 @@ static u16 vnt_rxtx_datahead_g(struct vnt_private *priv, u8 pkt_type, u16 rate,
PK_TYPE_11B, &buf->b);
/* Get Duration and TimeStamp */
- buf->wDuration_a = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wDuration_b = s_uGetDataDuration(priv, PK_TYPE_11B, need_ack);
+ buf->duration_a = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration_b = s_uGetDataDuration(priv, PK_TYPE_11B, need_ack);
- buf->wTimeStampOff_a = vnt_time_stamp_off(priv, rate);
- buf->wTimeStampOff_b = vnt_time_stamp_off(priv,
+ buf->time_stamp_off_a = vnt_time_stamp_off(priv, rate);
+ buf->time_stamp_off_b = vnt_time_stamp_off(priv,
priv->byTopCCKBasicRate);
- return buf->wDuration_a;
+ return le16_to_cpu(buf->duration_a);
}
static u16 vnt_rxtx_datahead_g_fb(struct vnt_private *priv, u8 pkt_type,
@@ -502,17 +502,17 @@ static u16 vnt_rxtx_datahead_g_fb(struct vnt_private *priv, u8 pkt_type,
PK_TYPE_11B, &buf->b);
/* Get Duration and TimeStamp */
- buf->wDuration_a = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wDuration_b = s_uGetDataDuration(priv, PK_TYPE_11B, need_ack);
+ buf->duration_a = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration_b = s_uGetDataDuration(priv, PK_TYPE_11B, need_ack);
- buf->wDuration_a_f0 = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wDuration_a_f1 = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration_a_f0 = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration_a_f1 = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wTimeStampOff_a = vnt_time_stamp_off(priv, rate);
- buf->wTimeStampOff_b = vnt_time_stamp_off(priv,
+ buf->time_stamp_off_a = vnt_time_stamp_off(priv, rate);
+ buf->time_stamp_off_b = vnt_time_stamp_off(priv,
priv->byTopCCKBasicRate);
- return buf->wDuration_a;
+ return le16_to_cpu(buf->duration_a);
}
static u16 vnt_rxtx_datahead_a_fb(struct vnt_private *priv, u8 pkt_type,
@@ -522,14 +522,14 @@ static u16 vnt_rxtx_datahead_a_fb(struct vnt_private *priv, u8 pkt_type,
/* Get SignalField,ServiceField,Length */
BBvCalculateParameter(priv, frame_len, rate, pkt_type, &buf->a);
/* Get Duration and TimeStampOff */
- buf->wDuration = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wDuration_f0 = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wDuration_f1 = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration_f0 = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration_f1 = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wTimeStampOff = vnt_time_stamp_off(priv, rate);
+ buf->time_stamp_off = vnt_time_stamp_off(priv, rate);
- return buf->wDuration;
+ return le16_to_cpu(buf->duration);
}
static u16 vnt_rxtx_datahead_ab(struct vnt_private *priv, u8 pkt_type,
@@ -539,26 +539,27 @@ static u16 vnt_rxtx_datahead_ab(struct vnt_private *priv, u8 pkt_type,
/* Get SignalField,ServiceField,Length */
BBvCalculateParameter(priv, frame_len, rate, pkt_type, &buf->ab);
/* Get Duration and TimeStampOff */
- buf->wDuration = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wTimeStampOff = vnt_time_stamp_off(priv, rate);
+ buf->time_stamp_off = vnt_time_stamp_off(priv, rate);
- return buf->wDuration;
+ return le16_to_cpu(buf->duration);
}
static int vnt_fill_ieee80211_rts(struct vnt_private *priv,
struct ieee80211_rts *rts, struct ethhdr *eth_hdr,
- u16 duration)
+ __le16 duration)
{
rts->duration = duration;
rts->frame_control = TYPE_CTL_RTS;
- if (priv->eOPMode == OP_MODE_ADHOC || priv->eOPMode == OP_MODE_AP)
+ if (priv->op_mode == NL80211_IFTYPE_ADHOC ||
+ priv->op_mode == NL80211_IFTYPE_AP)
memcpy(rts->ra, eth_hdr->h_dest, ETH_ALEN);
else
memcpy(rts->ra, priv->abyBSSID, ETH_ALEN);
- if (priv->eOPMode == OP_MODE_AP)
+ if (priv->op_mode == NL80211_IFTYPE_AP)
memcpy(rts->ta, priv->abyBSSID, ETH_ALEN);
else
memcpy(rts->ta, eth_hdr->h_source, ETH_ALEN);
@@ -578,14 +579,14 @@ static u16 vnt_rxtx_rts_g_head(struct vnt_private *priv,
BBvCalculateParameter(priv, rts_frame_len,
priv->byTopOFDMBasicRate, pkt_type, &buf->a);
- buf->wDuration_bb = s_uGetRTSCTSDuration(priv, RTSDUR_BB, frame_len,
+ buf->duration_bb = s_uGetRTSCTSDuration(priv, RTSDUR_BB, frame_len,
PK_TYPE_11B, priv->byTopCCKBasicRate, need_ack, fb_option);
- buf->wDuration_aa = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ buf->duration_aa = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
pkt_type, current_rate, need_ack, fb_option);
- buf->wDuration_ba = s_uGetRTSCTSDuration(priv, RTSDUR_BA, frame_len,
+ buf->duration_ba = s_uGetRTSCTSDuration(priv, RTSDUR_BA, frame_len,
pkt_type, current_rate, need_ack, fb_option);
- vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration_aa);
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->duration_aa);
return vnt_rxtx_datahead_g(priv, pkt_type, current_rate,
&buf->data_head, frame_len, need_ack);
@@ -604,24 +605,24 @@ static u16 vnt_rxtx_rts_g_fb_head(struct vnt_private *priv,
priv->byTopOFDMBasicRate, pkt_type, &buf->a);
- buf->wDuration_bb = s_uGetRTSCTSDuration(priv, RTSDUR_BB, frame_len,
+ buf->duration_bb = s_uGetRTSCTSDuration(priv, RTSDUR_BB, frame_len,
PK_TYPE_11B, priv->byTopCCKBasicRate, need_ack, fb_option);
- buf->wDuration_aa = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ buf->duration_aa = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
pkt_type, current_rate, need_ack, fb_option);
- buf->wDuration_ba = s_uGetRTSCTSDuration(priv, RTSDUR_BA, frame_len,
+ buf->duration_ba = s_uGetRTSCTSDuration(priv, RTSDUR_BA, frame_len,
pkt_type, current_rate, need_ack, fb_option);
- buf->wRTSDuration_ba_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F0,
+ buf->rts_duration_ba_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F0,
frame_len, pkt_type, priv->tx_rate_fb0, need_ack, fb_option);
- buf->wRTSDuration_aa_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
+ buf->rts_duration_aa_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
frame_len, pkt_type, priv->tx_rate_fb0, need_ack, fb_option);
- buf->wRTSDuration_ba_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F1,
+ buf->rts_duration_ba_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F1,
frame_len, pkt_type, priv->tx_rate_fb1, need_ack, fb_option);
- buf->wRTSDuration_aa_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
+ buf->rts_duration_aa_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
frame_len, pkt_type, priv->tx_rate_fb1, need_ack, fb_option);
- vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration_aa);
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->duration_aa);
return vnt_rxtx_datahead_g_fb(priv, pkt_type, current_rate,
&buf->data_head, frame_len, need_ack);
@@ -637,10 +638,10 @@ static u16 vnt_rxtx_rts_ab_head(struct vnt_private *priv,
BBvCalculateParameter(priv, rts_frame_len,
priv->byTopOFDMBasicRate, pkt_type, &buf->ab);
- buf->wDuration = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ buf->duration = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
pkt_type, current_rate, need_ack, fb_option);
- vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration);
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->duration);
return vnt_rxtx_datahead_ab(priv, pkt_type, current_rate,
&buf->data_head, frame_len, need_ack);
@@ -656,16 +657,16 @@ static u16 vnt_rxtx_rts_a_fb_head(struct vnt_private *priv,
BBvCalculateParameter(priv, rts_frame_len,
priv->byTopOFDMBasicRate, pkt_type, &buf->a);
- buf->wDuration = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ buf->duration = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
pkt_type, current_rate, need_ack, fb_option);
- buf->wRTSDuration_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
+ buf->rts_duration_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
frame_len, pkt_type, priv->tx_rate_fb0, need_ack, fb_option);
- buf->wRTSDuration_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
+ buf->rts_duration_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
frame_len, pkt_type, priv->tx_rate_fb1, need_ack, fb_option);
- vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration);
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->duration);
return vnt_rxtx_datahead_a_fb(priv, pkt_type, current_rate,
&buf->data_head, frame_len, need_ack);
@@ -727,19 +728,19 @@ static u16 s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
/* Get SignalField,ServiceField,Length */
BBvCalculateParameter(pDevice, uCTSFrameLen,
pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
- pBuf->wDuration_ba = s_uGetRTSCTSDuration(pDevice, CTSDUR_BA,
+ pBuf->duration_ba = s_uGetRTSCTSDuration(pDevice, CTSDUR_BA,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck, byFBOption);
/* Get CTSDuration_ba_f0 */
- pBuf->wCTSDuration_ba_f0 = s_uGetRTSCTSDuration(pDevice,
+ pBuf->cts_duration_ba_f0 = s_uGetRTSCTSDuration(pDevice,
CTSDUR_BA_F0, cbFrameLength, byPktType,
pDevice->tx_rate_fb0, bNeedAck, byFBOption);
/* Get CTSDuration_ba_f1 */
- pBuf->wCTSDuration_ba_f1 = s_uGetRTSCTSDuration(pDevice,
+ pBuf->cts_duration_ba_f1 = s_uGetRTSCTSDuration(pDevice,
CTSDUR_BA_F1, cbFrameLength, byPktType,
pDevice->tx_rate_fb1, bNeedAck, byFBOption);
/* Get CTS Frame body */
- pBuf->data.duration = pBuf->wDuration_ba;
+ pBuf->data.duration = pBuf->duration_ba;
pBuf->data.frame_control = TYPE_CTL_CTS;
memcpy(pBuf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
@@ -751,11 +752,11 @@ static u16 s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
BBvCalculateParameter(pDevice, uCTSFrameLen,
pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
/* Get CTSDuration_ba */
- pBuf->wDuration_ba = s_uGetRTSCTSDuration(pDevice,
+ pBuf->duration_ba = s_uGetRTSCTSDuration(pDevice,
CTSDUR_BA, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, byFBOption);
/*Get CTS Frame body*/
- pBuf->data.duration = pBuf->wDuration_ba;
+ pBuf->data.duration = pBuf->duration_ba;
pBuf->data.frame_control = TYPE_CTL_CTS;
memcpy(pBuf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
@@ -815,16 +816,16 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
struct vnt_rrv_time_rts *pBuf =
&tx_buffer->tx_head.tx_rts.rts;
- pBuf->wRTSTxRrvTime_aa = s_uGetRTSCTSRsvTime(pDevice, 2,
+ pBuf->rts_rrv_time_aa = s_uGetRTSCTSRsvTime(pDevice, 2,
byPktType, cbFrameSize, wCurrentRate);
- pBuf->wRTSTxRrvTime_ba = s_uGetRTSCTSRsvTime(pDevice, 1,
+ pBuf->rts_rrv_time_ba = s_uGetRTSCTSRsvTime(pDevice, 1,
byPktType, cbFrameSize, wCurrentRate);
- pBuf->wRTSTxRrvTime_bb = s_uGetRTSCTSRsvTime(pDevice, 0,
+ pBuf->rts_rrv_time_bb = s_uGetRTSCTSRsvTime(pDevice, 0,
byPktType, cbFrameSize, wCurrentRate);
- pBuf->wTxRrvTime_a = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time_a = vnt_rxtx_rsvtime_le16(pDevice,
byPktType, cbFrameSize, wCurrentRate, bNeedACK);
- pBuf->wTxRrvTime_b = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time_b = vnt_rxtx_rsvtime_le16(pDevice,
PK_TYPE_11B, cbFrameSize,
pDevice->byTopCCKBasicRate, bNeedACK);
@@ -845,13 +846,13 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
struct vnt_rrv_time_cts *pBuf = &tx_buffer->
tx_head.tx_cts.cts;
- pBuf->wTxRrvTime_a = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time_a = vnt_rxtx_rsvtime_le16(pDevice,
byPktType, cbFrameSize, wCurrentRate, bNeedACK);
- pBuf->wTxRrvTime_b = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time_b = vnt_rxtx_rsvtime_le16(pDevice,
PK_TYPE_11B, cbFrameSize,
pDevice->byTopCCKBasicRate, bNeedACK);
- pBuf->wCTSTxRrvTime_ba = s_uGetRTSCTSRsvTime(pDevice, 3,
+ pBuf->cts_rrv_time_ba = s_uGetRTSCTSRsvTime(pDevice, 3,
byPktType, cbFrameSize, wCurrentRate);
if (need_mic) {
@@ -879,10 +880,10 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
struct vnt_rrv_time_ab *pBuf = &tx_buffer->
tx_head.tx_ab.ab;
- pBuf->wRTSTxRrvTime = s_uGetRTSCTSRsvTime(pDevice, 2,
+ pBuf->rts_rrv_time = s_uGetRTSCTSRsvTime(pDevice, 2,
byPktType, cbFrameSize, wCurrentRate);
- pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice,
byPktType, cbFrameSize, wCurrentRate, bNeedACK);
/* Fill RTS */
@@ -893,7 +894,7 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
struct vnt_rrv_time_ab *pBuf = &tx_buffer->
tx_head.tx_ab.ab;
- pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice,
PK_TYPE_11A, cbFrameSize,
wCurrentRate, bNeedACK);
@@ -913,10 +914,10 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
struct vnt_rrv_time_ab *pBuf = &tx_buffer->
tx_head.tx_ab.ab;
- pBuf->wRTSTxRrvTime = s_uGetRTSCTSRsvTime(pDevice, 0,
+ pBuf->rts_rrv_time = s_uGetRTSCTSRsvTime(pDevice, 0,
byPktType, cbFrameSize, wCurrentRate);
- pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice,
PK_TYPE_11B, cbFrameSize, wCurrentRate,
bNeedACK);
@@ -928,7 +929,7 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
struct vnt_rrv_time_ab *pBuf = &tx_buffer->
tx_head.tx_ab.ab;
- pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice,
PK_TYPE_11B, cbFrameSize,
wCurrentRate, bNeedACK);
@@ -991,8 +992,8 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
//Set packet type
pTxBufHead->wFIFOCtl |= (u16)(byPktType<<8);
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
+ if (pDevice->op_mode == NL80211_IFTYPE_ADHOC ||
+ pDevice->op_mode == NL80211_IFTYPE_AP) {
if (is_multicast_ether_addr(psEthHeader->h_dest)) {
bNeedACK = false;
pTxBufHead->wFIFOCtl =
@@ -1292,7 +1293,7 @@ static void s_vGenerateMACHeader(struct vnt_private *pDevice,
pMACHeader->frame_control = TYPE_802_11_DATA;
- if (pDevice->eOPMode == OP_MODE_AP) {
+ if (pDevice->op_mode == NL80211_IFTYPE_AP) {
memcpy(&(pMACHeader->addr1[0]),
&(psEthHeader->h_dest[0]),
ETH_ALEN);
@@ -1302,7 +1303,7 @@ static void s_vGenerateMACHeader(struct vnt_private *pDevice,
ETH_ALEN);
pMACHeader->frame_control |= FC_FROMDS;
} else {
- if (pDevice->eOPMode == OP_MODE_ADHOC) {
+ if (pDevice->op_mode == NL80211_IFTYPE_ADHOC) {
memcpy(&(pMACHeader->addr1[0]),
&(psEthHeader->h_dest[0]),
ETH_ALEN);
@@ -1541,8 +1542,8 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
pbyIVHead = (u8 *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding);
pbyPayloadHead = (u8 *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding + cbIVlen);
do {
- if ((pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) &&
- (pDevice->bLinkPass == true)) {
+ if (pDevice->op_mode == NL80211_IFTYPE_STATION &&
+ pDevice->bLinkPass == true) {
pbyBSSID = pDevice->abyBSSID;
// get pairwise key
if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
@@ -1560,7 +1561,7 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
pbyBSSID = pDevice->abyBroadcastAddr;
if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KEY is NULL. OP Mode[%d]\n", pDevice->eOPMode);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KEY is NULL. OP Mode[%d]\n", pDevice->op_mode);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get GTK.\n");
}
@@ -1592,14 +1593,14 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
struct vnt_tx_datahead_g *data_head = &pTX_Buffer->tx_head.
tx_cts.tx.head.cts_g.data_head;
- data_head->wDuration_a =
+ data_head->duration_a =
cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- data_head->wDuration_b =
+ data_head->duration_b =
cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
} else {
struct vnt_tx_datahead_ab *data_head = &pTX_Buffer->tx_head.
tx_ab.tx.head.data_head_ab;
- data_head->wDuration =
+ data_head->duration =
cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
}
}
@@ -1609,7 +1610,7 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
pTX_Buffer->byType = 0x00;
pContext->pPacket = NULL;
- pContext->Type = CONTEXT_MGMT_PACKET;
+ pContext->type = CONTEXT_MGMT_PACKET;
pContext->uBufLen = (u16)cbReqCount + 4; //USB header
if (WLAN_GET_FC_TODS(pMACHeader->frame_control) == 0) {
@@ -1701,7 +1702,7 @@ CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice,
pTX_Buffer->byType = 0x01;
pContext->pPacket = NULL;
- pContext->Type = CONTEXT_MGMT_PACKET;
+ pContext->type = CONTEXT_MGMT_PACKET;
pContext->uBufLen = (u16)cbReqCount + 4; //USB header
PIPEnsSendBulkOut(pDevice,pContext);
@@ -2032,14 +2033,14 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
struct vnt_tx_datahead_g *data_head = &pTX_Buffer->tx_head.
tx_cts.tx.head.cts_g.data_head;
- data_head->wDuration_a =
+ data_head->duration_a =
cpu_to_le16(p80211Header->sA2.wDurationID);
- data_head->wDuration_b =
+ data_head->duration_b =
cpu_to_le16(p80211Header->sA2.wDurationID);
} else {
struct vnt_tx_datahead_ab *data_head = &pTX_Buffer->tx_head.
tx_ab.tx.head.data_head_ab;
- data_head->wDuration =
+ data_head->duration =
cpu_to_le16(p80211Header->sA2.wDurationID);
}
}
@@ -2049,7 +2050,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
pTX_Buffer->byType = 0x00;
pContext->pPacket = skb;
- pContext->Type = CONTEXT_MGMT_PACKET;
+ pContext->type = CONTEXT_MGMT_PACKET;
pContext->uBufLen = (u16)cbReqCount + 4; //USB header
if (WLAN_GET_FC_TODS(pMACHeader->frame_control) == 0) {
@@ -2305,7 +2306,7 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
}
}
else {
- if (pDevice->eOPMode == OP_MODE_ADHOC) {
+ if (pDevice->op_mode == NL80211_IFTYPE_ADHOC) {
// Adhoc Tx rate decided from node DB
if (is_multicast_ether_addr(pDevice->sTxEthHeader.h_dest)) {
// Multicast use highest data rate
@@ -2336,7 +2337,7 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
}
}
}
- if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) {
+ if (pDevice->op_mode == NL80211_IFTYPE_STATION) {
// Infra STA rate decided from AP Node, index = 0
pDevice->wCurrentRate = pMgmt->sNodeDBTable[0].wTxDataRate;
}
@@ -2439,11 +2440,11 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
pTX_Buffer->wTxByteCount = (u16)BytesToWrite;
pContext->pPacket = skb;
- pContext->Type = CONTEXT_DATA_PACKET;
+ pContext->type = CONTEXT_DATA_PACKET;
pContext->uBufLen = (u16)BytesToWrite + 4 ; //USB header
s_vSaveTxPktInfo(pDevice, (u8)(pTX_Buffer->byPKTNO & 0x0F),
- &pContext->sEthHeader.h_dest[0],
+ &pDevice->sTxEthHeader.h_dest[0],
(u16)(BytesToWrite-uHeaderLen),
pTX_Buffer->fifo_head.wFIFOCtl);
@@ -2593,11 +2594,11 @@ int bRelayPacketSend(struct vnt_private *pDevice, u8 *pbySkbData, u32 uDataLen,
pTX_Buffer->wTxByteCount = (u16)BytesToWrite;
pContext->pPacket = NULL;
- pContext->Type = CONTEXT_DATA_PACKET;
+ pContext->type = CONTEXT_DATA_PACKET;
pContext->uBufLen = (u16)BytesToWrite + 4 ; //USB header
s_vSaveTxPktInfo(pDevice, (u8)(pTX_Buffer->byPKTNO & 0x0F),
- &pContext->sEthHeader.h_dest[0],
+ &pDevice->sTxEthHeader.h_dest[0],
(u16)(BytesToWrite - uHeaderLen),
pTX_Buffer->fifo_head.wFIFOCtl);
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index b3ee6d01aa88..6d6539d29d04 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -53,68 +53,68 @@ struct vnt_mic_hdr {
/* RsvTime buffer header */
struct vnt_rrv_time_rts {
- u16 wRTSTxRrvTime_ba;
- u16 wRTSTxRrvTime_aa;
- u16 wRTSTxRrvTime_bb;
+ __le16 rts_rrv_time_ba;
+ __le16 rts_rrv_time_aa;
+ __le16 rts_rrv_time_bb;
u16 wReserved;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
+ __le16 rrv_time_b;
+ __le16 rrv_time_a;
} __packed;
struct vnt_rrv_time_cts {
- u16 wCTSTxRrvTime_ba;
+ __le16 cts_rrv_time_ba;
u16 wReserved;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
+ __le16 rrv_time_b;
+ __le16 rrv_time_a;
} __packed;
struct vnt_rrv_time_ab {
- u16 wRTSTxRrvTime;
- u16 wTxRrvTime;
+ __le16 rts_rrv_time;
+ __le16 rrv_time;
} __packed;
/* TX data header */
struct vnt_tx_datahead_g {
struct vnt_phy_field b;
struct vnt_phy_field a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
+ __le16 duration_b;
+ __le16 duration_a;
+ __le16 time_stamp_off_b;
+ __le16 time_stamp_off_a;
} __packed;
struct vnt_tx_datahead_g_fb {
struct vnt_phy_field b;
struct vnt_phy_field a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
+ __le16 duration_b;
+ __le16 duration_a;
+ __le16 duration_a_f0;
+ __le16 duration_a_f1;
+ __le16 time_stamp_off_b;
+ __le16 time_stamp_off_a;
} __packed;
struct vnt_tx_datahead_ab {
struct vnt_phy_field ab;
- u16 wDuration;
- u16 wTimeStampOff;
+ __le16 duration;
+ __le16 time_stamp_off;
} __packed;
struct vnt_tx_datahead_a_fb {
struct vnt_phy_field a;
- u16 wDuration;
- u16 wTimeStampOff;
- u16 wDuration_f0;
- u16 wDuration_f1;
+ __le16 duration;
+ __le16 time_stamp_off;
+ __le16 duration_f0;
+ __le16 duration_f1;
} __packed;
/* RTS buffer header */
struct vnt_rts_g {
struct vnt_phy_field b;
struct vnt_phy_field a;
- u16 wDuration_ba;
- u16 wDuration_aa;
- u16 wDuration_bb;
+ __le16 duration_ba;
+ __le16 duration_aa;
+ __le16 duration_bb;
u16 wReserved;
struct ieee80211_rts data;
struct vnt_tx_datahead_g data_head;
@@ -123,21 +123,21 @@ struct vnt_rts_g {
struct vnt_rts_g_fb {
struct vnt_phy_field b;
struct vnt_phy_field a;
- u16 wDuration_ba;
- u16 wDuration_aa;
- u16 wDuration_bb;
+ __le16 duration_ba;
+ __le16 duration_aa;
+ __le16 duration_bb;
u16 wReserved;
- u16 wRTSDuration_ba_f0;
- u16 wRTSDuration_aa_f0;
- u16 wRTSDuration_ba_f1;
- u16 wRTSDuration_aa_f1;
+ __le16 rts_duration_ba_f0;
+ __le16 rts_duration_aa_f0;
+ __le16 rts_duration_ba_f1;
+ __le16 rts_duration_aa_f1;
struct ieee80211_rts data;
struct vnt_tx_datahead_g_fb data_head;
} __packed;
struct vnt_rts_ab {
struct vnt_phy_field ab;
- u16 wDuration;
+ __le16 duration;
u16 wReserved;
struct ieee80211_rts data;
struct vnt_tx_datahead_ab data_head;
@@ -145,10 +145,10 @@ struct vnt_rts_ab {
struct vnt_rts_a_fb {
struct vnt_phy_field a;
- u16 wDuration;
+ __le16 duration;
u16 wReserved;
- u16 wRTSDuration_f0;
- u16 wRTSDuration_f1;
+ __le16 rts_duration_f0;
+ __le16 rts_duration_f1;
struct ieee80211_rts data;
struct vnt_tx_datahead_a_fb data_head;
} __packed;
@@ -156,7 +156,7 @@ struct vnt_rts_a_fb {
/* CTS buffer header */
struct vnt_cts {
struct vnt_phy_field b;
- u16 wDuration_ba;
+ __le16 duration_ba;
u16 wReserved;
struct ieee80211_cts data;
u16 reserved2;
@@ -165,10 +165,10 @@ struct vnt_cts {
struct vnt_cts_fb {
struct vnt_phy_field b;
- u16 wDuration_ba;
+ __le16 duration_ba;
u16 wReserved;
- u16 wCTSDuration_ba_f0;
- u16 wCTSDuration_ba_f1;
+ __le16 cts_duration_ba_f0;
+ __le16 cts_duration_ba_f1;
struct ieee80211_cts data;
u16 reserved2;
struct vnt_tx_datahead_g_fb data_head;
@@ -234,8 +234,8 @@ struct vnt_tx_short_buf_head {
u16 fifo_ctl;
u16 time_stamp;
struct vnt_phy_field ab;
- u16 duration;
- u16 time_stamp_off;
+ __le16 duration;
+ __le16 time_stamp_off;
} __packed;
struct vnt_beacon_buffer {
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 01cf09999b6d..c5838d99f89f 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -105,6 +105,8 @@ int PIPEnsControlOutAsyn(struct vnt_private *pDevice, u8 byRequest,
int PIPEnsControlOut(struct vnt_private *pDevice, u8 byRequest, u16 wValue,
u16 wIndex, u16 wLength, u8 *pbyBuffer)
+ __releases(&pDevice->lock)
+ __acquires(&pDevice->lock)
{
int ntStatus = 0;
int ii;
@@ -167,6 +169,8 @@ int PIPEnsControlOut(struct vnt_private *pDevice, u8 byRequest, u16 wValue,
int PIPEnsControlIn(struct vnt_private *pDevice, u8 byRequest, u16 wValue,
u16 wIndex, u16 wLength, u8 *pbyBuffer)
+ __releases(&pDevice->lock)
+ __acquires(&pDevice->lock)
{
int ntStatus = 0;
int ii;
@@ -295,40 +299,38 @@ static void s_nsControlInUsbIoCompleteRead(struct urb *urb)
*
*/
-int PIPEnsInterruptRead(struct vnt_private *pDevice)
+int PIPEnsInterruptRead(struct vnt_private *priv)
{
- int ntStatus = STATUS_FAILURE;
+ int status = STATUS_FAILURE;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartInterruptUsbRead()\n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "---->s_nsStartInterruptUsbRead()\n");
- if(pDevice->intBuf.bInUse == true){
- return (STATUS_FAILURE);
- }
- pDevice->intBuf.bInUse = true;
-// pDevice->bEventAvailable = false;
- pDevice->ulIntInPosted++;
-
- //
- // Now that we have created the urb, we will send a
- // request to the USB device object.
- //
- pDevice->pInterruptURB->interval = pDevice->int_interval;
-
-usb_fill_bulk_urb(pDevice->pInterruptURB,
- pDevice->usb,
- usb_rcvbulkpipe(pDevice->usb, 1),
- (void *) pDevice->intBuf.pDataBuf,
+ if (priv->int_buf.in_use == true)
+ return STATUS_FAILURE;
+
+ priv->int_buf.in_use = true;
+
+ usb_fill_int_urb(priv->pInterruptURB,
+ priv->usb,
+ usb_rcvintpipe(priv->usb, 1),
+ priv->int_buf.data_buf,
MAX_INTERRUPT_SIZE,
s_nsInterruptUsbIoCompleteRead,
- pDevice);
+ priv,
+ priv->int_interval);
- ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC);
- if (ntStatus != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus);
- }
+ status = usb_submit_urb(priv->pInterruptURB, GFP_ATOMIC);
+ if (status) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Submit int URB failed %d\n", status);
+ priv->int_buf.in_use = false;
+ }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----s_nsStartInterruptUsbRead Return(%x)\n",ntStatus);
- return ntStatus;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "<----s_nsStartInterruptUsbRead Return(%x)\n", status);
+
+ return status;
}
/*
@@ -348,67 +350,48 @@ usb_fill_bulk_urb(pDevice->pInterruptURB,
static void s_nsInterruptUsbIoCompleteRead(struct urb *urb)
{
- struct vnt_private *pDevice = (struct vnt_private *)urb->context;
- int ntStatus;
+ struct vnt_private *priv = urb->context;
+ int status;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptUsbIoCompleteRead\n");
- //
- // The context given to IoSetCompletionRoutine is the receive buffer object
- //
-
- //
- // We have a number of cases:
- // 1) The USB read timed out and we received no data.
- // 2) The USB read timed out and we received some data.
- // 3) The USB read was successful and fully filled our irp buffer.
- // 4) The irp was cancelled.
- // 5) Some other failure from the USB device object.
- //
- ntStatus = urb->status;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_nsInterruptUsbIoCompleteRead Status %d\n", ntStatus);
-
- // if we were not successful, we need to free the int buffer for future use right here
- // otherwise interrupt data handler will free int buffer after it handle it.
- if (( ntStatus != STATUS_SUCCESS )) {
- pDevice->ulBulkInError++;
- pDevice->intBuf.bInUse = false;
-
-// if (ntStatus == USBD_STATUS_CRC) {
-// pDevice->ulIntInContCRCError++;
-// }
-
-// if (ntStatus == STATUS_NOT_CONNECTED )
-// {
- pDevice->fKillEventPollingThread = true;
-// }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"IntUSBIoCompleteControl STATUS = %d\n", ntStatus );
- } else {
- pDevice->ulIntInBytesRead += (unsigned long) urb->actual_length;
- pDevice->ulIntInContCRCError = 0;
- pDevice->bEventAvailable = true;
- INTnsProcessData(pDevice);
- }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "---->s_nsInterruptUsbIoCompleteRead\n");
+
+ switch (urb->status) {
+ case 0:
+ case -ETIMEDOUT:
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ priv->int_buf.in_use = false;
+ return;
+ default:
+ break;
+ }
- if (pDevice->fKillEventPollingThread != true) {
- usb_fill_bulk_urb(pDevice->pInterruptURB,
- pDevice->usb,
- usb_rcvbulkpipe(pDevice->usb, 1),
- (void *) pDevice->intBuf.pDataBuf,
- MAX_INTERRUPT_SIZE,
- s_nsInterruptUsbIoCompleteRead,
- pDevice);
+ status = urb->status;
- ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC);
- if (ntStatus != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus);
- }
- }
- //
- // We return STATUS_MORE_PROCESSING_REQUIRED so that the completion
- // routine (IofCompleteRequest) will stop working on the irp.
- //
- return ;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "s_nsInterruptUsbIoCompleteRead Status %d\n", status);
+
+ if (status != STATUS_SUCCESS) {
+ priv->int_buf.in_use = false;
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "IntUSBIoCompleteControl STATUS = %d\n", status);
+ } else {
+ INTnsProcessData(priv);
+ }
+
+ status = usb_submit_urb(priv->pInterruptURB, GFP_ATOMIC);
+ if (status) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Submit int URB failed %d\n", status);
+ } else {
+ priv->int_buf.in_use = true;
+ }
+
+ return;
}
/*
@@ -425,45 +408,41 @@ static void s_nsInterruptUsbIoCompleteRead(struct urb *urb)
*
*/
-int PIPEnsBulkInUsbRead(struct vnt_private *pDevice, struct vnt_rcb *pRCB)
+int PIPEnsBulkInUsbRead(struct vnt_private *priv, struct vnt_rcb *rcb)
{
- int ntStatus = 0;
- struct urb *pUrb;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartBulkInUsbRead\n");
+ int status = 0;
+ struct urb *urb;
- if (pDevice->Flags & fMP_DISCONNECTED)
- return STATUS_FAILURE;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartBulkInUsbRead\n");
- pDevice->ulBulkInPosted++;
+ if (priv->Flags & fMP_DISCONNECTED)
+ return STATUS_FAILURE;
- pUrb = pRCB->pUrb;
- //
- // Now that we have created the urb, we will send a
- // request to the USB device object.
- //
- if (pRCB->skb == NULL) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pRCB->skb is null \n");
- return ntStatus;
- }
+ urb = rcb->pUrb;
+ if (rcb->skb == NULL) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"rcb->skb is null\n");
+ return status;
+ }
- usb_fill_bulk_urb(pUrb,
- pDevice->usb,
- usb_rcvbulkpipe(pDevice->usb, 2),
- (void *) (pRCB->skb->data),
+ usb_fill_bulk_urb(urb,
+ priv->usb,
+ usb_rcvbulkpipe(priv->usb, 2),
+ (void *) (rcb->skb->data),
MAX_TOTAL_SIZE_WITH_ALL_HEADERS,
s_nsBulkInUsbIoCompleteRead,
- pRCB);
+ rcb);
- ntStatus = usb_submit_urb(pUrb, GFP_ATOMIC);
- if (ntStatus != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Rx URB failed %d\n", ntStatus);
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status != 0) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Submit Rx URB failed %d\n", status);
return STATUS_FAILURE ;
}
- pRCB->Ref = 1;
- pRCB->bBoolInUse= true;
- return ntStatus;
+ rcb->Ref = 1;
+ rcb->bBoolInUse = true;
+
+ return status;
}
/*
@@ -483,51 +462,47 @@ int PIPEnsBulkInUsbRead(struct vnt_private *pDevice, struct vnt_rcb *pRCB)
static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
{
- struct vnt_rcb *pRCB = (struct vnt_rcb *)urb->context;
- struct vnt_private *pDevice = pRCB->pDevice;
- unsigned long bytesRead;
- int bIndicateReceive = false;
- int bReAllocSkb = false;
- int status;
+ struct vnt_rcb *rcb = urb->context;
+ struct vnt_private *priv = rcb->pDevice;
+ int re_alloc_skb = false;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkInUsbIoCompleteRead\n");
- status = urb->status;
- bytesRead = urb->actual_length;
-
- if (status) {
- pDevice->ulBulkInError++;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK In failed %d\n", status);
-//todo...xxxxxx
-// if (status == USBD_STATUS_CRC) {
-// pDevice->ulBulkInContCRCError++;
-// }
-// if (status == STATUS_DEVICE_NOT_CONNECTED )
-// {
-// MP_SET_FLAG(pDevice, fMP_DISCONNECTED);
-// }
- } else {
- if (bytesRead)
- bIndicateReceive = true;
- pDevice->ulBulkInContCRCError = 0;
- pDevice->ulBulkInBytesRead += bytesRead;
- }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkInUsbIoCompleteRead\n");
- if (bIndicateReceive) {
- spin_lock(&pDevice->lock);
- if (RXbBulkInProcessData(pDevice, pRCB, bytesRead) == true)
- bReAllocSkb = true;
- spin_unlock(&pDevice->lock);
- }
- pRCB->Ref--;
- if (pRCB->Ref == 0)
- {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RxvFreeNormal %d \n",pDevice->NumRecvFreeList);
- spin_lock(&pDevice->lock);
- RXvFreeRCB(pRCB, bReAllocSkb);
- spin_unlock(&pDevice->lock);
- }
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ case -ETIMEDOUT:
+ default:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "BULK In failed %d\n", urb->status);
+ break;
+ }
+
+ if (urb->actual_length) {
+ spin_lock(&priv->lock);
+
+ if (RXbBulkInProcessData(priv, rcb, urb->actual_length) == true)
+ re_alloc_skb = true;
+
+ spin_unlock(&priv->lock);
+ }
+
+ rcb->Ref--;
+ if (rcb->Ref == 0) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RxvFreeNormal %d\n",
+ priv->NumRecvFreeList);
+ spin_lock(&priv->lock);
+
+ RXvFreeRCB(rcb, re_alloc_skb);
- return;
+ spin_unlock(&priv->lock);
+ }
+
+ return;
}
/*
@@ -544,53 +519,40 @@ static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
*
*/
-int PIPEnsSendBulkOut(struct vnt_private *pDevice,
- struct vnt_usb_send_context *pContext)
+int PIPEnsSendBulkOut(struct vnt_private *priv,
+ struct vnt_usb_send_context *context)
{
int status;
- struct urb *pUrb;
+ struct urb *urb;
- pDevice->bPWBitOn = false;
+ priv->bPWBitOn = false;
-/*
- if (pDevice->pPendingBulkOutContext != NULL) {
- pDevice->NumContextsQueued++;
- EnqueueContext(pDevice->FirstTxContextQueue, pDevice->LastTxContextQueue, pContext);
- status = STATUS_PENDING;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send pending!\n");
- return status;
- }
-*/
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_nsSendBulkOut\n");
-
- if (MP_IS_READY(pDevice) && (pDevice->Flags & fMP_POST_WRITES)) {
-
- pUrb = pContext->pUrb;
- pDevice->ulBulkOutPosted++;
-// pDevice->pPendingBulkOutContext = pContext;
- usb_fill_bulk_urb(
- pUrb,
- pDevice->usb,
- usb_sndbulkpipe(pDevice->usb, 3),
- (void *) &(pContext->Data[0]),
- pContext->uBufLen,
- s_nsBulkOutIoCompleteWrite,
- pContext);
-
- status = usb_submit_urb(pUrb, GFP_ATOMIC);
- if (status != 0)
- {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Tx URB failed %d\n", status);
- pContext->bBoolInUse = false;
- return STATUS_FAILURE;
- }
- return STATUS_PENDING;
- }
- else {
- pContext->bBoolInUse = false;
- return STATUS_RESOURCES;
- }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_nsSendBulkOut\n");
+
+ if (!(MP_IS_READY(priv) && priv->Flags & fMP_POST_WRITES)) {
+ context->bBoolInUse = false;
+ return STATUS_RESOURCES;
+ }
+
+ urb = context->pUrb;
+
+ usb_fill_bulk_urb(urb,
+ priv->usb,
+ usb_sndbulkpipe(priv->usb, 3),
+ context->Data,
+ context->uBufLen,
+ s_nsBulkOutIoCompleteWrite,
+ context);
+
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status != 0) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Submit Tx URB failed %d\n", status);
+ context->bBoolInUse = false;
+ return STATUS_FAILURE;
+ }
+
+ return STATUS_PENDING;
}
/*
@@ -623,68 +585,49 @@ int PIPEnsSendBulkOut(struct vnt_private *pDevice,
static void s_nsBulkOutIoCompleteWrite(struct urb *urb)
{
- struct vnt_private *pDevice;
- int status;
- CONTEXT_TYPE ContextType;
- unsigned long ulBufLen;
- struct vnt_usb_send_context *pContext;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkOutIoCompleteWrite\n");
- //
- // The context given to IoSetCompletionRoutine is an USB_CONTEXT struct
- //
- pContext = (struct vnt_usb_send_context *)urb->context;
-
- pDevice = pContext->pDevice;
- ContextType = pContext->Type;
- ulBufLen = pContext->uBufLen;
-
- if (!netif_device_present(pDevice->dev))
- return;
+ struct vnt_usb_send_context *context = urb->context;
+ struct vnt_private *priv = context->pDevice;
+ u8 context_type = context->type;
- //
- // Perform various IRP, URB, and buffer 'sanity checks'
- //
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkOutIoCompleteWrite\n");
- status = urb->status;
-
- if(status == STATUS_SUCCESS) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Write %d bytes\n",(int)ulBufLen);
- pDevice->ulBulkOutBytesWrite += ulBufLen;
- pDevice->ulBulkOutContCRCError = 0;
- } else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK Out failed %d\n", status);
- pDevice->ulBulkOutError++;
- }
+ switch (urb->status) {
+ case 0:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Write %d bytes\n", context->uBufLen);
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ context->bBoolInUse = false;
+ return;
+ case -ETIMEDOUT:
+ default:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "BULK Out failed %d\n", urb->status);
+ break;
+ }
-// pDevice->ulCheckForHangCount = 0;
-// pDevice->pPendingBulkOutContext = NULL;
+ if (!netif_device_present(priv->dev))
+ return;
- if ( CONTEXT_DATA_PACKET == ContextType ) {
- // Indicate to the protocol the status of the sent packet and return
- // ownership of the packet.
- if (pContext->pPacket != NULL) {
- dev_kfree_skb_irq(pContext->pPacket);
- pContext->pPacket = NULL;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"tx %d bytes\n",(int)ulBufLen);
- }
+ if (CONTEXT_DATA_PACKET == context_type) {
+ if (context->pPacket != NULL) {
+ dev_kfree_skb_irq(context->pPacket);
+ context->pPacket = NULL;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "tx %d bytes\n", context->uBufLen);
+ }
- pDevice->dev->trans_start = jiffies;
+ priv->dev->trans_start = jiffies;
+ }
- if (status == STATUS_SUCCESS) {
- pDevice->packetsSent++;
- }
- else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send USB error! [%08xh]\n", status);
- pDevice->packetsSentDropped++;
- }
+ if (priv->bLinkPass == true) {
+ if (netif_queue_stopped(priv->dev))
+ netif_wake_queue(priv->dev);
+ }
- }
- if (pDevice->bLinkPass == true) {
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
- }
- pContext->bBoolInUse = false;
+ context->bBoolInUse = false;
- return;
+ return;
}
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 6b9522914634..3cf3f24247a3 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -293,17 +293,11 @@ void vRunCommand(struct work_struct *work)
case WLAN_CMD_SCAN_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == true) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
+ if (pDevice->bRadioOff == true)
+ break;
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)
+ break;
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID;
@@ -311,16 +305,12 @@ void vRunCommand(struct work_struct *work)
pMgmt->uScanChannel = pDevice->byMinChannel;
if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
pDevice->eCommandState = WLAN_CMD_SCAN_END;
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
+ break;
} else {
if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d\n", pMgmt->uScanChannel);
pMgmt->uScanChannel++;
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
+ break;
}
if (pMgmt->uScanChannel == pDevice->byMinChannel) {
// pMgmt->eScanType = WMAC_SCAN_ACTIVE; //mike mark
@@ -420,16 +410,13 @@ void vRunCommand(struct work_struct *work)
memset(&wrqu, 0, sizeof(wrqu));
wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_DISASSOCIATE_START:
pDevice->byReAssocCount = 0;
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pMgmt->eCurrState != WMAC_STATE_ASSOC)) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
+ break;
} else {
pDevice->bwextstep0 = false;
pDevice->bwextstep1 = false;
@@ -458,17 +445,14 @@ void vRunCommand(struct work_struct *work)
netif_stop_queue(pDevice->dev);
if (pDevice->bNeedRadioOFF == true)
CARDbRadioPowerOff(pDevice);
- s_bCommandComplete(pDevice);
+
break;
case WLAN_CMD_SSID_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == true) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
+ if (pDevice->bRadioOff == true)
+ break;
memcpy(pMgmt->abyAdHocSSID, pMgmt->abyDesireSSID,
((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN);
@@ -489,11 +473,9 @@ void vRunCommand(struct work_struct *work)
if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
if (pItemSSID->len == pItemSSIDCurr->len) {
- if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
+ if (!memcmp(pItemSSID->abySSID,
+ pItemSSIDCurr->abySSID, pItemSSID->len))
+ break;
}
netif_stop_queue(pDevice->dev);
pDevice->bLinkPass = false;
@@ -582,7 +564,6 @@ void vRunCommand(struct work_struct *work)
}
}
}
- s_bCommandComplete(pDevice);
break;
case WLAN_AUTHENTICATE_WAIT:
@@ -612,7 +593,6 @@ void vRunCommand(struct work_struct *work)
}
pDevice->byLinkWaitCount = 0;
- s_bCommandComplete(pDevice);
break;
case WLAN_ASSOCIATE_WAIT:
@@ -647,7 +627,6 @@ void vRunCommand(struct work_struct *work)
return;
}
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_AP_MODE_START:
@@ -683,7 +662,6 @@ void vRunCommand(struct work_struct *work)
ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER);
schedule_delayed_work(&pDevice->second_callback_work, HZ);
}
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_TX_PSPACKET_START:
@@ -738,8 +716,6 @@ void vRunCommand(struct work_struct *work)
pMgmt->sNodeDBTable[ii].bRxPSPoll = false;
}
}
-
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_RADIO_START:
@@ -760,11 +736,8 @@ void vRunCommand(struct work_struct *work)
1,
&byTmp);
- if (ntStatus != STATUS_SUCCESS) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
+ if (ntStatus != STATUS_SUCCESS)
+ break;
if ((byTmp & GPIO3_DATA) == 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_RADIO_START_OFF........................\n");
// Old commands are useless.
@@ -833,7 +806,6 @@ void vRunCommand(struct work_struct *work)
}
}
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_CHANGE_BBSENSITIVITY_START:
@@ -843,24 +815,20 @@ void vRunCommand(struct work_struct *work)
BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Change sensitivity pDevice->byBBVGACurrent = %x\n", pDevice->byBBVGACurrent);
pDevice->bStopDataPkt = false;
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_TBTT_WAKEUP_START:
PSbIsNextTBTTWakeUp(pDevice);
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_BECON_SEND_START:
bMgrPrepareBeaconToSend(pDevice, pMgmt);
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_SETPOWER_START:
RFbSetPower(pDevice, pDevice->wCurrentRate, pMgmt->uCurrChannel);
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_CHANGE_ANTENNA_START:
@@ -878,12 +846,10 @@ void vRunCommand(struct work_struct *work)
else
BBvSetAntennaMode(pDevice, ANT_RXA);
}
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_REMOVE_ALLKEY_START:
KeybRemoveAllKey(pDevice, &(pDevice->sKey), pDevice->abyBSSID);
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_MAC_DISPOWERSAVING_START:
@@ -898,7 +864,6 @@ void vRunCommand(struct work_struct *work)
NULL
);
}
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_11H_CHSW_START:
@@ -906,14 +871,17 @@ void vRunCommand(struct work_struct *work)
pDevice->bChannelSwitch = false;
pMgmt->uCurrChannel = pDevice->byNewChannel;
pDevice->bStopDataPkt = false;
- s_bCommandComplete(pDevice);
break;
+ case WLAN_CMD_CONFIGURE_FILTER_START:
+ vnt_configure_filter(pDevice);
+ break;
default:
- s_bCommandComplete(pDevice);
break;
} //switch
+ s_bCommandComplete(pDevice);
+
spin_unlock_irq(&pDevice->lock);
return;
}
@@ -1009,6 +977,11 @@ static int s_bCommandComplete(struct vnt_private *pDevice)
pDevice->eCommandState = WLAN_CMD_11H_CHSW_START;
break;
+ case WLAN_CMD_CONFIGURE_FILTER:
+ pDevice->eCommandState =
+ WLAN_CMD_CONFIGURE_FILTER_START;
+ break;
+
default:
break;
}
diff --git a/drivers/staging/vt6656/wcmd.h b/drivers/staging/vt6656/wcmd.h
index caf2684ce915..736572101bad 100644
--- a/drivers/staging/vt6656/wcmd.h
+++ b/drivers/staging/vt6656/wcmd.h
@@ -51,7 +51,8 @@ typedef enum tagCMD_CODE {
WLAN_CMD_REMOVE_ALLKEY,
WLAN_CMD_MAC_DISPOWERSAVING,
WLAN_CMD_11H_CHSW,
- WLAN_CMD_RUN_AP
+ WLAN_CMD_RUN_AP,
+ WLAN_CMD_CONFIGURE_FILTER
} CMD_CODE, *PCMD_CODE;
#define CMD_Q_SIZE 32
@@ -96,6 +97,7 @@ typedef enum tagCMD_STATE {
WLAN_CMD_REMOVE_ALLKEY_START,
WLAN_CMD_MAC_DISPOWERSAVING_START,
WLAN_CMD_11H_CHSW_START,
+ WLAN_CMD_CONFIGURE_FILTER_START,
WLAN_CMD_IDLE
} CMD_STATE, *PCMD_STATE;
diff --git a/drivers/staging/vt6656/wmgr.c b/drivers/staging/vt6656/wmgr.c
index d74b0e7cb171..0d69719a7426 100644
--- a/drivers/staging/vt6656/wmgr.c
+++ b/drivers/staging/vt6656/wmgr.c
@@ -2164,12 +2164,12 @@ void vMgrCreateOwnIBSS(struct vnt_private *pDevice, PCMD_STATUS pStatus)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_ESS(1);
pMgmt->byDTIMPeriod = DEFAULT_DTIM_PERIOD;
pMgmt->byDTIMCount = pMgmt->byDTIMPeriod - 1;
- pDevice->eOPMode = OP_MODE_AP;
+ pDevice->op_mode = NL80211_IFTYPE_AP;
}
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_IBSS(1);
- pDevice->eOPMode = OP_MODE_ADHOC;
+ pDevice->op_mode = NL80211_IFTYPE_ADHOC;
}
if (pDevice->bEncryptionEnable) {
@@ -2359,7 +2359,7 @@ void vMgrJoinBSSBegin(struct vnt_private *pDevice, PCMD_STATUS pStatus)
pMgmt->eCurrState = WMAC_STATE_JOINTED;
// Adopt BSS state in Adapter Device Object
- pDevice->eOPMode = OP_MODE_INFRASTRUCTURE;
+ pDevice->op_mode = NL80211_IFTYPE_STATION;
memcpy(pDevice->abyBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN);
// Add current BSS to Candidate list
@@ -2500,7 +2500,7 @@ void vMgrJoinBSSBegin(struct vnt_private *pDevice, PCMD_STATUS pStatus)
pMgmt->eCurrMode = WMAC_MODE_IBSS_STA;
pMgmt->eCurrState = WMAC_STATE_STARTED;
// Adopt BSS state in Adapter Device Object
- pDevice->eOPMode = OP_MODE_ADHOC;
+ pDevice->op_mode = NL80211_IFTYPE_ADHOC;
pDevice->bLinkPass = true;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
memcpy(pDevice->abyBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN);
diff --git a/drivers/staging/winbond/localpara.h b/drivers/staging/winbond/localpara.h
index 84effc47d79d..8ca80ddda59a 100644
--- a/drivers/staging/winbond/localpara.h
+++ b/drivers/staging/winbond/localpara.h
@@ -58,9 +58,13 @@
#define LOCAL_11B_BASIC_RATE_BITMAP 0x826
#define LOCAL_11B_OPERATION_RATE_BITMAP 0x826
#define LOCAL_11G_BASIC_RATE_BITMAP 0x826 /* 1, 2, 5.5, 11 */
-#define LOCAL_11G_OPERATION_RATE_BITMAP 0x130c1240 /* 6, 9, 12, 18, 24, 36, 48, 54 */
+#define LOCAL_11G_OPERATION_RATE_BITMAP 0x130c1240 /* 6, 9, 12, 18,
+ * 24, 36, 48, 54
+ */
#define LOCAL_11A_BASIC_RATE_BITMAP 0x01001040 /* 6, 12, 24 */
-#define LOCAL_11A_OPERATION_RATE_BITMAP 0x120c0200 /* 9, 18, 36, 48, 54 */
+#define LOCAL_11A_OPERATION_RATE_BITMAP 0x120c0200 /* 9, 18, 36,
+ * 48, 54
+ */
#define PWR_ACTIVE 0
@@ -140,7 +144,9 @@ struct wb_local_para {
/* Unit time count for the decision to enter PS mode */
u16 CheckCountForPS;
u8 boHasTxActivity;/* tx activity has occurred */
- u8 boMacPsValid; /* Power save mode obtained from H/W is valid or not */
+ u8 boMacPsValid; /* Power save mode obtained
+ * from H/W is valid or not
+ */
/* Rate */
u8 TxRateMode; /*
@@ -162,35 +168,57 @@ struct wb_local_para {
u8 NumOfBRate;
u8 NumOfSRate;
- u8 NumOfDsssRateInSRate; /* number of DSSS rates in supported rate set */
+ u8 NumOfDsssRateInSRate; /* number of DSSS rates in
+ * supported rate set
+ */
u8 reserved1;
u32 dwBasicRateBitmap; /* bit map of basic rates */
- u32 dwSupportRateBitmap; /* bit map of all support rates including basic and operational rates */
+ u32 dwSupportRateBitmap; /* bit map of all support rates
+ * including basic and operational
+ * rates
+ */
/* For SME/MLME handler */
- u16 wOldSTAindex; /* valid when boHandover=TRUE, store old connected STA index */
- u16 wConnectedSTAindex; /* Index of peerly connected AP or IBSS in the descriptionset. */
- u16 Association_ID; /* The Association ID in the (Re)Association Response frame. */
- u16 ListenInterval; /* The listen interval when SME invoking MLME_ (Re)Associate_Request(). */
+ u16 wOldSTAindex; /* valid when boHandover=TRUE,
+ * store old connected STA index
+ */
+ u16 wConnectedSTAindex; /* Index of peerly connected AP or
+ * IBSS in the descriptionset.
+ */
+ u16 Association_ID; /* The Association ID in the
+ * (Re)Association Response frame.
+ */
+ u16 ListenInterval; /* The listen interval when SME invoking
+ * MLME_ (Re)Associate_Request().
+ */
struct radio_off RadioOffStatus;
u8 Reserved0[2];
- u8 boMsRadioOff; /* Ndis demands to be true when set Disassoc. OID and be false when set SSID OID. */
+ u8 boMsRadioOff; /* Ndis demands to be true when set
+ * Disassoc. OID and be false when
+ * set SSID OID.
+ */
u8 bAntennaNo; /* which antenna */
- u8 bConnectFlag; /* the connect status flag for roaming task */
+ u8 bConnectFlag; /* the connect status flag for
+ * roaming task
+ */
u8 RoamStatus;
u8 reserved7[3];
- struct chan_info CurrentChan; /* Current channel no. and channel band. It may be changed by scanning. */
+ struct chan_info CurrentChan; /* Current channel no. and channel band.
+ * It may be changed by scanning.
+ */
u8 boHandover; /* Roaming, Handover to other AP. */
u8 boCCAbusy;
- u16 CWMax; /* It may not be the real value that H/W used */
+ u16 CWMax; /* It may not be the real value
+ * that H/W used
+ */
u8 CWMin; /* 255: set according to 802.11 spec. */
u8 reserved2;
@@ -200,7 +228,9 @@ struct wb_local_para {
u8 bPreambleMode; /* AUTO, s32 */
u8 boNonERPpresent;
- u8 boProtectMechanism; /* H/W will take the necessary action based on this variable */
+ u8 boProtectMechanism; /* H/W will take the necessary action
+ * based on this variable
+ */
u8 boShortPreamble; /* Same here */
u8 boShortSlotTime; /* Same here */
u8 reserved_3;
@@ -213,8 +243,12 @@ struct wb_local_para {
u32 HwBssidValid;
/* For scan list */
- u8 BssListCount; /* Total count of valid descriptor indexes */
- u8 boReceiveUncorrectInfo; /* important settings in beacon/probe resp. have been changed */
+ u8 BssListCount; /* Total count of valid
+ * descriptor indexes
+ */
+ u8 boReceiveUncorrectInfo; /* important settings in beacon/probe
+ * resp. have been changed
+ */
u8 NoOfJoinerInIbss;
u8 reserved_4;
@@ -228,7 +262,9 @@ struct wb_local_para {
*/
u8 JoinerInIbss[(MAX_BSS_DESCRIPT_ELEMENT + 3) & ~0x03];
- /* General Statistics, count at Rx_handler or Tx_callback interrupt handler */
+ /* General Statistics, count at Rx_handler or
+ * Tx_callback interrupt handler
+ */
u64 GS_XMIT_OK; /* Good Frames Transmitted */
u64 GS_RCV_OK; /* Good Frames Received */
u32 GS_RCV_ERROR; /* Frames received with crc error */
@@ -248,10 +284,18 @@ struct wb_local_para {
u32 _dot11WEPUndecryptableCount;
u32 _dot11FrameDuplicateCount;
- struct chan_info IbssChanSetting; /* 2B. Start IBSS Channel setting by registry or WWU. */
- u8 reserved_5[2]; /* It may not be used after considering RF type, region and modulation type. */
+ struct chan_info IbssChanSetting; /* 2B. Start IBSS Channel
+ * setting by registry or
+ * WWU.
+ */
+ u8 reserved_5[2]; /* It may not be used after
+ * considering RF type, region
+ * and modulation type.
+ */
- u8 reserved_6[2]; /* two variables are for wep key error detection */
+ u8 reserved_6[2]; /* two variables are for wep
+ * key error detection
+ */
u32 bWepKeyError;
u32 bToSelfPacketReceived;
u32 WepKeyDetectTimerCount;
diff --git a/drivers/staging/winbond/wb35reg.c b/drivers/staging/winbond/wb35reg.c
index a5e255bb0f8b..bbc5ddcce6f5 100644
--- a/drivers/staging/winbond/wb35reg.c
+++ b/drivers/staging/winbond/wb35reg.c
@@ -76,41 +76,111 @@ void Wb35Reg_Update(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
switch (RegisterNo) {
- case 0x3b0: reg->U1B0 = RegisterValue; break;
- case 0x3bc: reg->U1BC_LEDConfigure = RegisterValue; break;
- case 0x400: reg->D00_DmaControl = RegisterValue; break;
- case 0x800: reg->M00_MacControl = RegisterValue; break;
- case 0x804: reg->M04_MulticastAddress1 = RegisterValue; break;
- case 0x808: reg->M08_MulticastAddress2 = RegisterValue; break;
- case 0x824: reg->M24_MacControl = RegisterValue; break;
- case 0x828: reg->M28_MacControl = RegisterValue; break;
- case 0x82c: reg->M2C_MacControl = RegisterValue; break;
- case 0x838: reg->M38_MacControl = RegisterValue; break;
- case 0x840: reg->M40_MacControl = RegisterValue; break;
- case 0x844: reg->M44_MacControl = RegisterValue; break;
- case 0x848: reg->M48_MacControl = RegisterValue; break;
- case 0x84c: reg->M4C_MacStatus = RegisterValue; break;
- case 0x860: reg->M60_MacControl = RegisterValue; break;
- case 0x868: reg->M68_MacControl = RegisterValue; break;
- case 0x870: reg->M70_MacControl = RegisterValue; break;
- case 0x874: reg->M74_MacControl = RegisterValue; break;
- case 0x878: reg->M78_ERPInformation = RegisterValue; break;
- case 0x87C: reg->M7C_MacControl = RegisterValue; break;
- case 0x880: reg->M80_MacControl = RegisterValue; break;
- case 0x884: reg->M84_MacControl = RegisterValue; break;
- case 0x888: reg->M88_MacControl = RegisterValue; break;
- case 0x898: reg->M98_MacControl = RegisterValue; break;
- case 0x100c: reg->BB0C = RegisterValue; break;
- case 0x102c: reg->BB2C = RegisterValue; break;
- case 0x1030: reg->BB30 = RegisterValue; break;
- case 0x103c: reg->BB3C = RegisterValue; break;
- case 0x1048: reg->BB48 = RegisterValue; break;
- case 0x104c: reg->BB4C = RegisterValue; break;
- case 0x1050: reg->BB50 = RegisterValue; break;
- case 0x1054: reg->BB54 = RegisterValue; break;
- case 0x1058: reg->BB58 = RegisterValue; break;
- case 0x105c: reg->BB5C = RegisterValue; break;
- case 0x1060: reg->BB60 = RegisterValue; break;
+ case 0x3b0:
+ reg->U1B0 = RegisterValue;
+ break;
+ case 0x3bc:
+ reg->U1BC_LEDConfigure = RegisterValue;
+ break;
+ case 0x400:
+ reg->D00_DmaControl = RegisterValue;
+ break;
+ case 0x800:
+ reg->M00_MacControl = RegisterValue;
+ break;
+ case 0x804:
+ reg->M04_MulticastAddress1 = RegisterValue;
+ break;
+ case 0x808:
+ reg->M08_MulticastAddress2 = RegisterValue;
+ break;
+ case 0x824:
+ reg->M24_MacControl = RegisterValue;
+ break;
+ case 0x828:
+ reg->M28_MacControl = RegisterValue;
+ break;
+ case 0x82c:
+ reg->M2C_MacControl = RegisterValue;
+ break;
+ case 0x838:
+ reg->M38_MacControl = RegisterValue;
+ break;
+ case 0x840:
+ reg->M40_MacControl = RegisterValue;
+ break;
+ case 0x844:
+ reg->M44_MacControl = RegisterValue;
+ break;
+ case 0x848:
+ reg->M48_MacControl = RegisterValue;
+ break;
+ case 0x84c:
+ reg->M4C_MacStatus = RegisterValue;
+ break;
+ case 0x860:
+ reg->M60_MacControl = RegisterValue;
+ break;
+ case 0x868:
+ reg->M68_MacControl = RegisterValue;
+ break;
+ case 0x870:
+ reg->M70_MacControl = RegisterValue;
+ break;
+ case 0x874:
+ reg->M74_MacControl = RegisterValue;
+ break;
+ case 0x878:
+ reg->M78_ERPInformation = RegisterValue;
+ break;
+ case 0x87C:
+ reg->M7C_MacControl = RegisterValue;
+ break;
+ case 0x880:
+ reg->M80_MacControl = RegisterValue;
+ break;
+ case 0x884:
+ reg->M84_MacControl = RegisterValue;
+ break;
+ case 0x888:
+ reg->M88_MacControl = RegisterValue;
+ break;
+ case 0x898:
+ reg->M98_MacControl = RegisterValue;
+ break;
+ case 0x100c:
+ reg->BB0C = RegisterValue;
+ break;
+ case 0x102c:
+ reg->BB2C = RegisterValue;
+ break;
+ case 0x1030:
+ reg->BB30 = RegisterValue;
+ break;
+ case 0x103c:
+ reg->BB3C = RegisterValue;
+ break;
+ case 0x1048:
+ reg->BB48 = RegisterValue;
+ break;
+ case 0x104c:
+ reg->BB4C = RegisterValue;
+ break;
+ case 0x1050:
+ reg->BB50 = RegisterValue;
+ break;
+ case 0x1054:
+ reg->BB54 = RegisterValue;
+ break;
+ case 0x1058:
+ reg->BB58 = RegisterValue;
+ break;
+ case 0x105c:
+ reg->BB5C = RegisterValue;
+ break;
+ case 0x1060:
+ reg->BB60 = RegisterValue;
+ break;
}
}
diff --git a/drivers/staging/winbond/wb35rx.c b/drivers/staging/winbond/wb35rx.c
index 8d71bc2f5940..f006b166aebc 100644
--- a/drivers/staging/winbond/wb35rx.c
+++ b/drivers/staging/winbond/wb35rx.c
@@ -16,7 +16,8 @@
#include "core.h"
#include "wb35rx_f.h"
-static void packet_came(struct ieee80211_hw *hw, char *pRxBufferAddress, int PacketSize)
+static void packet_came(struct ieee80211_hw *hw, char *pRxBufferAddress,
+ int PacketSize)
{
struct wbsoft_priv *priv = hw->priv;
struct sk_buff *skb;
@@ -64,7 +65,8 @@ static void Wb35Rx_adjust(struct wb35_descriptor *pRxDes)
} else if (DecryptionMethod) { /* For TKIP and CCMP */
for (i = 7; i > 1; i--)
pRxBufferAddress[i] = pRxBufferAddress[i - 2];
- pRxDes->buffer_address[0] = pRxBufferAddress + 2; /* Update the descriptor, shift 8 byte */
+ /* Update the descriptor, shift 8 byte */
+ pRxDes->buffer_address[0] = pRxBufferAddress + 2;
BufferSize -= 8; /* 8 byte for IV + ICV */
}
pRxDes->buffer_size[0] = BufferSize;
@@ -95,7 +97,9 @@ static u16 Wb35Rx_indicate(struct ieee80211_hw *hw)
/* Parse the bulkin buffer */
while (BufferSize >= 4) {
- if ((cpu_to_le32(*(u32 *)pRxBufferAddress) & 0x0fffffff) == RX_END_TAG) /* Is ending? */
+ /* Is ending? */
+ if ((cpu_to_le32(*(u32 *)pRxBufferAddress) & 0x0fffffff) ==
+ RX_END_TAG)
break;
/* Get the R00 R01 first */
@@ -108,7 +112,8 @@ static u16 Wb35Rx_indicate(struct ieee80211_hw *hw)
/* Basic check for Rx length. Is length valid? */
if (PacketSize > MAX_PACKET_SIZE) {
- pr_debug("Serious ERROR : Rx data size too long, size =%d\n", PacketSize);
+ pr_debug("Serious ERROR : Rx data size too long, size =%d\n",
+ PacketSize);
pWb35Rx->EP3vm_state = VM_STOP;
pWb35Rx->Ep3ErrorCount2++;
break;
@@ -118,7 +123,8 @@ static u16 Wb35Rx_indicate(struct ieee80211_hw *hw)
* Wb35Rx_indicate() is called synchronously so it isn't
* necessary to set "RxDes.Desctriptor_ID = RxBufferID;"
*/
- BufferSize -= 8; /* subtract 8 byte for 35's USB header length */
+ /* subtract 8 byte for 35's USB header length */
+ BufferSize -= 8;
pRxBufferAddress += 8;
RxDes.buffer_address[0] = pRxBufferAddress;
@@ -255,7 +261,7 @@ static void Wb35Rx(struct ieee80211_hw *hw)
pWb35Rx->pDRx = kzalloc(MAX_USB_RX_BUFFER, GFP_ATOMIC);
if (!pWb35Rx->pDRx) {
- printk("w35und: Rx memory alloc failed\n");
+ dev_info(&hw->wiphy->dev, "w35und: Rx memory alloc failed\n");
goto error;
}
pRxBufferAddress = pWb35Rx->pDRx;
@@ -270,7 +276,7 @@ static void Wb35Rx(struct ieee80211_hw *hw)
retv = usb_submit_urb(urb, GFP_ATOMIC);
if (retv != 0) {
- printk("Rx URB sending error\n");
+ dev_info(&hw->wiphy->dev, "Rx URB sending error\n");
goto error;
}
return;
@@ -306,7 +312,9 @@ static void Wb35Rx_reset_descriptor(struct hw_data *pHwData)
pWb35Rx->EP3vm_state = VM_STOP;
pWb35Rx->rx_halt = 0;
- /* Initial the Queue. The last buffer is reserved for used if the Rx resource is unavailable. */
+ /* Initial the Queue. The last buffer is reserved for used
+ * if the Rx resource is unavailable.
+ */
for (i = 0; i < MAX_USB_RX_BUFFER_NUMBER; i++)
pWb35Rx->RxOwner[i] = 1;
}
@@ -328,7 +336,8 @@ void Wb35Rx_stop(struct hw_data *pHwData)
/* Canceling the Irp if already sends it out. */
if (pWb35Rx->EP3vm_state == VM_RUNNING) {
- usb_unlink_urb(pWb35Rx->RxUrb); /* Only use unlink, let Wb35Rx_destroy to free them */
+ /* Only use unlink, let Wb35Rx_destroy to free them */
+ usb_unlink_urb(pWb35Rx->RxUrb);
pr_debug("EP3 Rx stop\n");
}
}
diff --git a/drivers/staging/wlags49_h2/dhf.c b/drivers/staging/wlags49_h2/dhf.c
index 13d360fa58ec..4877464f04b0 100644
--- a/drivers/staging/wlags49_h2/dhf.c
+++ b/drivers/staging/wlags49_h2/dhf.c
@@ -106,7 +106,7 @@
*---------------------------------------------------------------------------*/
/* 12345678901234 */
-char signature[14] = "FUPU7D37dhfwci";
+static char signature[14] = "FUPU7D37dhfwci";
/*-----------------------------------------------------------------------------
*
@@ -123,8 +123,8 @@ char signature[14] = "FUPU7D37dhfwci";
#define DL_SIZE 2000
/* CFG_IDENTITY_STRCT pri_identity = { LOF(CFG_IDENTITY_STRCT), CFG_PRI_IDENTITY }; */
-CFG_SUP_RANGE_STRCT mfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_MFI_SUP_RANGE };
-CFG_SUP_RANGE_STRCT cfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_CFI_SUP_RANGE };
+static CFG_SUP_RANGE_STRCT mfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_MFI_SUP_RANGE };
+static CFG_SUP_RANGE_STRCT cfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_CFI_SUP_RANGE };
/* Note: could be used rather than the above explained and defined DL_SIZE if need arises
* CFG_DL_BUF_STRCT dl_buf = { LOF(CFG_DL_BUF_STRCT), CFG_DL_BUF };
*/
@@ -139,7 +139,7 @@ CFG_SUP_RANGE_STRCT cfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_CFI_S
* This is only relevant if the DHF used without reloading the driver/utility.
*/
-LTV_INFO_STRUCT ltv_info[] = {
+static LTV_INFO_STRUCT ltv_info[] = {
{ (LTVP)&mfi_sup, LOF(CFG_SUP_RANGE_STRCT) } ,
{ (LTVP)&cfi_sup, LOF(CFG_SUP_RANGE_STRCT) } ,
{ (LTVP) NULL, 0 }
@@ -169,7 +169,7 @@ static int check_comp_fw(memimage *fw);
* station firmware image to be downloaded is compatible.
*.ENDDOC END DOCUMENTATION
*************************************************************************************************************/
-int
+static int
check_comp_fw(memimage *fw)
{
CFG_RANGE20_STRCT *p;
diff --git a/drivers/staging/wlags49_h2/hcf.c b/drivers/staging/wlags49_h2/hcf.c
index 4aac26d486f1..f44d888ecd6e 100644
--- a/drivers/staging/wlags49_h2/hcf.c
+++ b/drivers/staging/wlags49_h2/hcf.c
@@ -250,7 +250,7 @@ HCF_STATIC hcf_8 BASED mic_pad[8] = { 0x5A, 0, 0, 0, 0, 0, 0, 0 }; //MIC pa
#endif // HCF_TYPE_WPA
#if defined MSF_COMPONENT_ID
-CFG_IDENTITY_STRCT BASED cfg_drv_identity = {
+static CFG_IDENTITY_STRCT BASED cfg_drv_identity = {
sizeof(cfg_drv_identity)/sizeof(hcf_16) - 1, //length of RID
CFG_DRV_IDENTITY, // (0x0826)
MSF_COMPONENT_ID,
@@ -259,7 +259,7 @@ CFG_IDENTITY_STRCT BASED cfg_drv_identity = {
MSF_COMPONENT_MINOR_VER
} ;
-CFG_RANGES_STRCT BASED cfg_drv_sup_range = {
+static CFG_RANGES_STRCT BASED cfg_drv_sup_range = {
sizeof(cfg_drv_sup_range)/sizeof(hcf_16) - 1, //length of RID
CFG_DRV_SUP_RANGE, // (0x0827)
@@ -271,7 +271,7 @@ CFG_RANGES_STRCT BASED cfg_drv_sup_range = {
}}
} ;
-struct CFG_RANGE3_STRCT BASED cfg_drv_act_ranges_pri = {
+static struct CFG_RANGE3_STRCT BASED cfg_drv_act_ranges_pri = {
sizeof(cfg_drv_act_ranges_pri)/sizeof(hcf_16) - 1, //length of RID
CFG_DRV_ACT_RANGES_PRI, // (0x0828)
@@ -288,7 +288,7 @@ struct CFG_RANGE3_STRCT BASED cfg_drv_act_ranges_pri = {
} ;
-struct CFG_RANGE4_STRCT BASED cfg_drv_act_ranges_sta = {
+static struct CFG_RANGE4_STRCT BASED cfg_drv_act_ranges_sta = {
sizeof(cfg_drv_act_ranges_sta)/sizeof(hcf_16) - 1, //length of RID
CFG_DRV_ACT_RANGES_STA, // (0x0829)
@@ -333,7 +333,7 @@ struct CFG_RANGE4_STRCT BASED cfg_drv_act_ranges_sta = {
} ;
-struct CFG_RANGE6_STRCT BASED cfg_drv_act_ranges_hsi = {
+static struct CFG_RANGE6_STRCT BASED cfg_drv_act_ranges_hsi = {
sizeof(cfg_drv_act_ranges_hsi)/sizeof(hcf_16) - 1, //length of RID
CFG_DRV_ACT_RANGES_HSI, // (0x082A)
COMP_ROLE_ACT,
@@ -370,7 +370,7 @@ struct CFG_RANGE6_STRCT BASED cfg_drv_act_ranges_hsi = {
} ;
-CFG_RANGE4_STRCT BASED cfg_drv_act_ranges_apf = {
+static CFG_RANGE4_STRCT BASED cfg_drv_act_ranges_apf = {
sizeof(cfg_drv_act_ranges_apf)/sizeof(hcf_16) - 1, //length of RID
CFG_DRV_ACT_RANGES_APF, // (0x082B)
@@ -3099,7 +3099,7 @@ hcf_service_nic( IFBP ifbp, wci_bufp bufp, unsigned int len )
#define L *p
#define R *(p+1)
-void
+static void
calc_mic( hcf_32* p, hcf_32 m )
{
#if HCF_BIG_ENDIAN
@@ -3415,7 +3415,7 @@ calibrate( IFBP ifbp )
*.ENDDOC END DOCUMENTATION
*
************************************************************************************************************/
-int
+static int
check_mic( IFBP ifbp )
{
int rc = HCF_SUCCESS;
diff --git a/drivers/staging/wlags49_h2/sta_h2.c b/drivers/staging/wlags49_h2/sta_h2.c
index 25ac76f7d366..0ba8defc023a 100644
--- a/drivers/staging/wlags49_h2/sta_h2.c
+++ b/drivers/staging/wlags49_h2/sta_h2.c
@@ -4435,7 +4435,7 @@ static const CFG_PROG_STRCT fw_image_code[] = {
0000,
0x000F368E, /* Start execution address */
},
- { 0000, 0000, 0000, 0000, 00000000, 0000, 00000000}
+ { 0000, 0000, 0000, 0000, 00000000, 0000, NULL}
};
static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c
index 650def88e5c2..fc98b6dbdb89 100644
--- a/drivers/staging/wlags49_h2/wl_main.c
+++ b/drivers/staging/wlags49_h2/wl_main.c
@@ -76,38 +76,23 @@
#include <linux/seq_file.h>
#include <linux/types.h>
#include <linux/kernel.h>
-// #include <linux/sched.h>
-// #include <linux/ptrace.h>
-// #include <linux/slab.h>
-// #include <linux/ctype.h>
-// #include <linux/string.h>
-// #include <linux/timer.h>
-//#include <linux/interrupt.h>
-// #include <linux/tqueue.h>
-// #include <linux/in.h>
-// #include <linux/delay.h>
-// #include <asm/io.h>
-// // #include <asm/bitops.h>
#include <linux/unistd.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-// #include <linux/skbuff.h>
-// #include <linux/if_arp.h>
-// #include <linux/ioport.h>
#define BIN_DL 0
#if BIN_DL
#include <linux/vmalloc.h>
-#endif // BIN_DL
+#endif /* BIN_DL */
#include <debug.h>
#include <hcf.h>
#include <dhf.h>
-//in order to get around:: wl_main.c:2229: `HREG_EV_RDMAD' undeclared (first use in this function)
+/* in order to get around:: wl_main.c:2229: `HREG_EV_RDMAD' undeclared (first use in this function) */
#include <hcfdef.h>
#include <wl_if.h>
@@ -133,8 +118,7 @@
******************************************************************************/
#define VALID_PARAM(C) \
{ \
- if (!(C)) \
- { \
+ if (!(C)) { \
printk(KERN_INFO "Wireless, parameter error: \"%s\"\n", #C); \
goto failed; \
} \
@@ -142,7 +126,7 @@
/*******************************************************************************
* local functions
******************************************************************************/
-void wl_isr_handler( unsigned long p );
+void wl_isr_handler(unsigned long p);
#if 0 //SCULL_USE_PROC /* don't waste space if unused */
static int scull_read_procmem(struct seq_file *m, void *v);
@@ -168,7 +152,7 @@ static const struct file_operations scull_read_procmem_fops = {
/*******************************************************************************
* module parameter definitions - set with 'insmod'
******************************************************************************/
-static p_u16 irq_mask = 0xdeb8; // IRQ3,4,5,7,9,10,11,12,14,15
+static p_u16 irq_mask = 0xdeb8; /* IRQ3,4,5,7,9,10,11,12,14,15 */
static p_s8 irq_list[4] = { -1 };
#if 0
@@ -183,7 +167,7 @@ static p_u16 PARM_AUTH_KEY_MGMT_SUITE = PARM_DEFAULT_AUTH_KEY_MGMT_SUITE;
static p_u16 PARM_BRSC_2GHZ = PARM_DEFAULT_BRSC_2GHZ;
static p_u16 PARM_BRSC_5GHZ = PARM_DEFAULT_BRSC_5GHZ;
static p_u16 PARM_COEXISTENCE = PARM_DEFAULT_COEXISTENCE;
-static p_u16 PARM_CONNECTION_CONTROL = PARM_DEFAULT_CONNECTION_CONTROL; //;?rename and move
+static p_u16 PARM_CONNECTION_CONTROL = PARM_DEFAULT_CONNECTION_CONTROL; /* ;?rename and move */
static p_char *PARM_CREATE_IBSS = PARM_DEFAULT_CREATE_IBSS_STR;
static p_char *PARM_DESIRED_SSID = PARM_DEFAULT_SSID;
static p_char *PARM_DOWNLOAD_FIRMWARE = "";
@@ -220,7 +204,7 @@ static p_u16 PARM_RTS_THRESHOLD3 = PARM_DEFAULT_RTS_THRESHOLD;
static p_u16 PARM_RTS_THRESHOLD4 = PARM_DEFAULT_RTS_THRESHOLD;
static p_u16 PARM_RTS_THRESHOLD5 = PARM_DEFAULT_RTS_THRESHOLD;
static p_u16 PARM_RTS_THRESHOLD6 = PARM_DEFAULT_RTS_THRESHOLD;
-#endif // USE_WDS
+#endif /* USE_WDS */
static p_u16 PARM_RTS_THRESHOLD = PARM_DEFAULT_RTS_THRESHOLD;
static p_u16 PARM_SRSC_2GHZ = PARM_DEFAULT_SRSC_2GHZ;
static p_u16 PARM_SRSC_5GHZ = PARM_DEFAULT_SRSC_5GHZ;
@@ -234,7 +218,7 @@ static p_u16 PARM_TX_RATE3 = PARM_DEFAULT_TX_RATE_2GHZ;
static p_u16 PARM_TX_RATE4 = PARM_DEFAULT_TX_RATE_2GHZ;
static p_u16 PARM_TX_RATE5 = PARM_DEFAULT_TX_RATE_2GHZ;
static p_u16 PARM_TX_RATE6 = PARM_DEFAULT_TX_RATE_2GHZ;
-#endif // USE_WDS
+#endif /* USE_WDS */
static p_u16 PARM_TX_RATE = PARM_DEFAULT_TX_RATE_2GHZ;
#ifdef USE_WDS
static p_u8 PARM_WDS_ADDRESS1[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR;
@@ -243,7 +227,7 @@ static p_u8 PARM_WDS_ADDRESS3[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR;
static p_u8 PARM_WDS_ADDRESS4[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR;
static p_u8 PARM_WDS_ADDRESS5[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR;
static p_u8 PARM_WDS_ADDRESS6[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR;
-#endif // USE_WDS
+#endif /* USE_WDS */
#if 0
@@ -299,39 +283,41 @@ MODULE_PARM(PARM_BRSC_2GHZ, "b");
MODULE_PARM_DESC(PARM_BRSC_2GHZ, "Basic Rate Set Control 2.4 GHz");
MODULE_PARM(PARM_BRSC_5GHZ, "b");
MODULE_PARM_DESC(PARM_BRSC_5GHZ, "Basic Rate Set Control 5.0 GHz");
-#if 1 //;? (HCF_TYPE) & HCF_TYPE_STA
-//;?seems reasonable that even an AP-only driver could afford this small additional footprint
+#if 1 /* (HCF_TYPE) & HCF_TYPE_STA */
+/* ;?seems reasonable that even an AP-only driver could afford this small additional footprint */
MODULE_PARM(PARM_PM_ENABLED, "h");
MODULE_PARM_DESC(PARM_PM_ENABLED, "Power Management State (0 - 2, 8001 - 8002) [0]");
MODULE_PARM(PARM_PORT_TYPE, "b");
MODULE_PARM_DESC(PARM_PORT_TYPE, "Port Type (1 - 3) [1]");
-//;?MODULE_PARM(PARM_CREATE_IBSS, "s");
-//;?MODULE_PARM_DESC(PARM_CREATE_IBSS, "Create IBSS (<string> N or Y) [N]");
-//;?MODULE_PARM(PARM_MULTICAST_RX, "s");
-//;?MODULE_PARM_DESC(PARM_MULTICAST_RX, "Multicast Receive Enable (<string> N or Y) [Y]");
-//;?MODULE_PARM(PARM_MAX_SLEEP, "h");
-//;?MODULE_PARM_DESC(PARM_MAX_SLEEP, "Maximum Power Management Sleep Duration (0 - 65535) [100]");
-//;?MODULE_PARM(PARM_NETWORK_ADDR, "6b");
-//;?MODULE_PARM_DESC(PARM_NETWORK_ADDR, "Hardware Ethernet Address ([0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff]) [<factory value>]");
-//;?MODULE_PARM(PARM_AUTHENTICATION, "b");
-//
-//tracker 12448
-//;?MODULE_PARM_DESC(PARM_AUTHENTICATION, "Authentication Type (0-2) [0] 0=Open 1=SharedKey 2=LEAP");
-//;?MODULE_PARM_DESC(authentication, "Authentication Type (1-2) [1] 1=Open 2=SharedKey");
-//tracker 12448
-//
-//;?MODULE_PARM(PARM_OWN_ATIM_WINDOW, "b");
-//;?MODULE_PARM_DESC(PARM_OWN_ATIM_WINDOW, "ATIM Window time in TU for IBSS creation (0-100) [0]");
-//;?MODULE_PARM(PARM_PM_HOLDOVER_DURATION, "b");
-//;?MODULE_PARM_DESC(PARM_PM_HOLDOVER_DURATION, "Time station remains awake after MAC frame transfer when PM is on (0-65535) [100]");
-//;?MODULE_PARM(PARM_PROMISCUOUS_MODE, "s");
-//;?MODULE_PARM_DESC(PARM_PROMISCUOUS_MODE, "Promiscuous Mode Enable (<string> Y or N ) [N]" );
-//;?
+/*
+ * ;?MODULE_PARM(PARM_CREATE_IBSS, "s");
+ *;?MODULE_PARM_DESC(PARM_CREATE_IBSS, "Create IBSS (<string> N or Y) [N]");
+ *;?MODULE_PARM(PARM_MULTICAST_RX, "s");
+ *;?MODULE_PARM_DESC(PARM_MULTICAST_RX, "Multicast Receive Enable (<string> N or Y) [Y]");
+ *;?MODULE_PARM(PARM_MAX_SLEEP, "h");
+ *;?MODULE_PARM_DESC(PARM_MAX_SLEEP, "Maximum Power Management Sleep Duration (0 - 65535) [100]");
+ *;?MODULE_PARM(PARM_NETWORK_ADDR, "6b");
+ *;?MODULE_PARM_DESC(PARM_NETWORK_ADDR, "Hardware Ethernet Address ([0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff]) [<factory value>]");
+ *;?MODULE_PARM(PARM_AUTHENTICATION, "b");
+ *
+ *tracker 12448
+ *;?MODULE_PARM_DESC(PARM_AUTHENTICATION, "Authentication Type (0-2) [0] 0=Open 1=SharedKey 2=LEAP");
+ *;?MODULE_PARM_DESC(authentication, "Authentication Type (1-2) [1] 1=Open 2=SharedKey");
+ *tracker 12448
+ *
+ *;?MODULE_PARM(PARM_OWN_ATIM_WINDOW, "b");
+ *;?MODULE_PARM_DESC(PARM_OWN_ATIM_WINDOW, "ATIM Window time in TU for IBSS creation (0-100) [0]");
+ *;?MODULE_PARM(PARM_PM_HOLDOVER_DURATION, "b");
+ *;?MODULE_PARM_DESC(PARM_PM_HOLDOVER_DURATION, "Time station remains awake after MAC frame transfer when PM is on (0-65535) [100]");
+ *;?MODULE_PARM(PARM_PROMISCUOUS_MODE, "s");
+ *;?MODULE_PARM_DESC(PARM_PROMISCUOUS_MODE, "Promiscuous Mode Enable (<string> Y or N ) [N]" );
+ *;?
+ */
MODULE_PARM(PARM_CONNECTION_CONTROL, "b");
MODULE_PARM_DESC(PARM_CONNECTION_CONTROL, "Connection Control (0 - 3) [2]");
#endif /* HCF_STA */
-#if 1 //;? (HCF_TYPE) & HCF_TYPE_AP
- //;?should we restore this to allow smaller memory footprint
+#if 1 /* ;? (HCF_TYPE) & HCF_TYPE_AP */
+ /* ;?should we restore this to allow smaller memory footprint */
MODULE_PARM(PARM_OWN_DTIM_PERIOD, "b");
MODULE_PARM_DESC(PARM_OWN_DTIM_PERIOD, "DTIM Period (0 - 255) [1]");
MODULE_PARM(PARM_REJECT_ANY, "s");
@@ -394,11 +380,12 @@ MODULE_PARM_DESC(PARM_COEXISTENCE, "Coexistence (0-7) [0]");
#if DBG
static p_u32 pc_debug = DBG_LVL;
-//MODULE_PARM(pc_debug, "i");
-/*static ;?conflicts with my understanding of CL parameters and breaks now I moved
+/*
+ * MODULE_PARM(pc_debug, "i");
+ *static ;?conflicts with my understanding of CL parameters and breaks now I moved
* the correspondig logic to wl_profile
- */ p_u32 DebugFlag = ~0; //recognizable "undefined value" rather then DBG_DEFAULTS;
-//MODULE_PARM(DebugFlag, "l");
+ */ p_u32 DebugFlag = ~0; /* recognizable "undefined value" rather then DBG_DEFAULTS; */
+/* MODULE_PARM(DebugFlag, "l"); */
static struct dbg_info wl_info = { KBUILD_MODNAME, 0, 0 };
struct dbg_info *DbgInfo = &wl_info;
@@ -407,8 +394,8 @@ struct dbg_info *DbgInfo = &wl_info;
#ifdef USE_RTS
static p_char *useRTS = "N";
-MODULE_PARM( useRTS, "s" );
-MODULE_PARM_DESC( useRTS, "Use RTS test interface (<string> N or Y) [N]" );
+MODULE_PARM(useRTS, "s");
+MODULE_PARM_DESC(useRTS, "Use RTS test interface (<string> N or Y) [N]");
#endif /* USE_RTS */
/*******************************************************************************
@@ -427,7 +414,7 @@ extern memimage fw_image; // firmware image to be downloaded
#endif /* HCF_STA */
-int wl_insert( struct net_device *dev )
+int wl_insert(struct net_device *dev)
{
int result = 0;
int hcf_status = HCF_SUCCESS;
@@ -436,10 +423,10 @@ int wl_insert( struct net_device *dev )
struct wl_private *lp = wl_priv(dev);
/* Initialize the adapter hardware. */
- memset( &( lp->hcfCtx ), 0, sizeof( IFB_STRCT ));
+ memset(&(lp->hcfCtx), 0, sizeof(IFB_STRCT));
/* Initialize the adapter parameters. */
- spin_lock_init( &( lp->slock ));
+ spin_lock_init(&(lp->slock));
/* Initialize states */
//lp->lockcount = 0; //PE1DNN
@@ -448,31 +435,31 @@ int wl_insert( struct net_device *dev )
lp->dev = dev;
- DBG_PARAM( DbgInfo, "irq_mask", "0x%04x", irq_mask & 0x0FFFF );
- DBG_PARAM( DbgInfo, "irq_list", "0x%02x 0x%02x 0x%02x 0x%02x",
+ DBG_PARAM(DbgInfo, "irq_mask", "0x%04x", irq_mask & 0x0FFFF);
+ DBG_PARAM(DbgInfo, "irq_list", "0x%02x 0x%02x 0x%02x 0x%02x",
irq_list[0] & 0x0FF, irq_list[1] & 0x0FF,
- irq_list[2] & 0x0FF, irq_list[3] & 0x0FF );
- DBG_PARAM( DbgInfo, PARM_NAME_DESIRED_SSID, "\"%s\"", PARM_DESIRED_SSID );
- DBG_PARAM( DbgInfo, PARM_NAME_OWN_SSID, "\"%s\"", PARM_OWN_SSID );
- DBG_PARAM( DbgInfo, PARM_NAME_OWN_CHANNEL, "%d", PARM_OWN_CHANNEL);
- DBG_PARAM( DbgInfo, PARM_NAME_SYSTEM_SCALE, "%d", PARM_SYSTEM_SCALE );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE, "%d", PARM_TX_RATE );
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD, "%d", PARM_RTS_THRESHOLD );
- DBG_PARAM( DbgInfo, PARM_NAME_MICROWAVE_ROBUSTNESS, "\"%s\"", PARM_MICROWAVE_ROBUSTNESS );
- DBG_PARAM( DbgInfo, PARM_NAME_OWN_NAME, "\"%s\"", PARM_OWN_NAME );
+ irq_list[2] & 0x0FF, irq_list[3] & 0x0FF);
+ DBG_PARAM(DbgInfo, PARM_NAME_DESIRED_SSID, "\"%s\"", PARM_DESIRED_SSID);
+ DBG_PARAM(DbgInfo, PARM_NAME_OWN_SSID, "\"%s\"", PARM_OWN_SSID);
+ DBG_PARAM(DbgInfo, PARM_NAME_OWN_CHANNEL, "%d", PARM_OWN_CHANNEL);
+ DBG_PARAM(DbgInfo, PARM_NAME_SYSTEM_SCALE, "%d", PARM_SYSTEM_SCALE);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE, "%d", PARM_TX_RATE);
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD, "%d", PARM_RTS_THRESHOLD);
+ DBG_PARAM(DbgInfo, PARM_NAME_MICROWAVE_ROBUSTNESS, "\"%s\"", PARM_MICROWAVE_ROBUSTNESS);
+ DBG_PARAM(DbgInfo, PARM_NAME_OWN_NAME, "\"%s\"", PARM_OWN_NAME);
//;? DBG_PARAM( DbgInfo, PARM_NAME_ENABLE_ENCRYPTION, "\"%s\"", PARM_ENABLE_ENCRYPTION );
- DBG_PARAM( DbgInfo, PARM_NAME_KEY1, "\"%s\"", PARM_KEY1 );
- DBG_PARAM( DbgInfo, PARM_NAME_KEY2, "\"%s\"", PARM_KEY2 );
- DBG_PARAM( DbgInfo, PARM_NAME_KEY3, "\"%s\"", PARM_KEY3 );
- DBG_PARAM( DbgInfo, PARM_NAME_KEY4, "\"%s\"", PARM_KEY4 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_KEY, "%d", PARM_TX_KEY );
- DBG_PARAM( DbgInfo, PARM_NAME_MULTICAST_RATE, "%d", PARM_MULTICAST_RATE );
- DBG_PARAM( DbgInfo, PARM_NAME_DOWNLOAD_FIRMWARE, "\"%s\"", PARM_DOWNLOAD_FIRMWARE );
- DBG_PARAM( DbgInfo, PARM_NAME_AUTH_KEY_MGMT_SUITE, "%d", PARM_AUTH_KEY_MGMT_SUITE );
+ DBG_PARAM(DbgInfo, PARM_NAME_KEY1, "\"%s\"", PARM_KEY1);
+ DBG_PARAM(DbgInfo, PARM_NAME_KEY2, "\"%s\"", PARM_KEY2);
+ DBG_PARAM(DbgInfo, PARM_NAME_KEY3, "\"%s\"", PARM_KEY3);
+ DBG_PARAM(DbgInfo, PARM_NAME_KEY4, "\"%s\"", PARM_KEY4);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_KEY, "%d", PARM_TX_KEY);
+ DBG_PARAM(DbgInfo, PARM_NAME_MULTICAST_RATE, "%d", PARM_MULTICAST_RATE);
+ DBG_PARAM(DbgInfo, PARM_NAME_DOWNLOAD_FIRMWARE, "\"%s\"", PARM_DOWNLOAD_FIRMWARE);
+ DBG_PARAM(DbgInfo, PARM_NAME_AUTH_KEY_MGMT_SUITE, "%d", PARM_AUTH_KEY_MGMT_SUITE);
//;?#if (HCF_TYPE) & HCF_TYPE_STA
//;?should we make this code conditional depending on in STA mode
//;? DBG_PARAM( DbgInfo, PARM_NAME_PORT_TYPE, "%d", PARM_PORT_TYPE );
- DBG_PARAM( DbgInfo, PARM_NAME_PM_ENABLED, "%04x", PARM_PM_ENABLED );
+ DBG_PARAM(DbgInfo, PARM_NAME_PM_ENABLED, "%04x", PARM_PM_ENABLED);
//;? DBG_PARAM( DbgInfo, PARM_NAME_CREATE_IBSS, "\"%s\"", PARM_CREATE_IBSS );
//;? DBG_PARAM( DbgInfo, PARM_NAME_MULTICAST_RX, "\"%s\"", PARM_MULTICAST_RX );
//;? DBG_PARAM( DbgInfo, PARM_NAME_MAX_SLEEP, "%d", PARM_MAX_SLEEP );
@@ -488,24 +475,24 @@ int wl_insert( struct net_device *dev )
#if 1 //;? (HCF_TYPE) & HCF_TYPE_AP
//;?should we restore this to allow smaller memory footprint
//;?I guess: no, since this is Debug mode only
- DBG_PARAM( DbgInfo, PARM_NAME_OWN_DTIM_PERIOD, "%d", PARM_OWN_DTIM_PERIOD );
- DBG_PARAM( DbgInfo, PARM_NAME_REJECT_ANY, "\"%s\"", PARM_REJECT_ANY );
- DBG_PARAM( DbgInfo, PARM_NAME_EXCLUDE_UNENCRYPTED, "\"%s\"", PARM_EXCLUDE_UNENCRYPTED );
- DBG_PARAM( DbgInfo, PARM_NAME_MULTICAST_PM_BUFFERING, "\"%s\"", PARM_MULTICAST_PM_BUFFERING );
- DBG_PARAM( DbgInfo, PARM_NAME_INTRA_BSS_RELAY, "\"%s\"", PARM_INTRA_BSS_RELAY );
+ DBG_PARAM(DbgInfo, PARM_NAME_OWN_DTIM_PERIOD, "%d", PARM_OWN_DTIM_PERIOD);
+ DBG_PARAM(DbgInfo, PARM_NAME_REJECT_ANY, "\"%s\"", PARM_REJECT_ANY);
+ DBG_PARAM(DbgInfo, PARM_NAME_EXCLUDE_UNENCRYPTED, "\"%s\"", PARM_EXCLUDE_UNENCRYPTED);
+ DBG_PARAM(DbgInfo, PARM_NAME_MULTICAST_PM_BUFFERING, "\"%s\"", PARM_MULTICAST_PM_BUFFERING);
+ DBG_PARAM(DbgInfo, PARM_NAME_INTRA_BSS_RELAY, "\"%s\"", PARM_INTRA_BSS_RELAY);
#ifdef USE_WDS
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD1, "%d", PARM_RTS_THRESHOLD1 );
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD2, "%d", PARM_RTS_THRESHOLD2 );
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD3, "%d", PARM_RTS_THRESHOLD3 );
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD4, "%d", PARM_RTS_THRESHOLD4 );
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD5, "%d", PARM_RTS_THRESHOLD5 );
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD6, "%d", PARM_RTS_THRESHOLD6 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE1, "%d", PARM_TX_RATE1 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE2, "%d", PARM_TX_RATE2 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE3, "%d", PARM_TX_RATE3 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE4, "%d", PARM_TX_RATE4 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE5, "%d", PARM_TX_RATE5 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE6, "%d", PARM_TX_RATE6 );
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD1, "%d", PARM_RTS_THRESHOLD1);
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD2, "%d", PARM_RTS_THRESHOLD2);
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD3, "%d", PARM_RTS_THRESHOLD3);
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD4, "%d", PARM_RTS_THRESHOLD4);
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD5, "%d", PARM_RTS_THRESHOLD5);
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD6, "%d", PARM_RTS_THRESHOLD6);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE1, "%d", PARM_TX_RATE1);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE2, "%d", PARM_TX_RATE2);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE3, "%d", PARM_TX_RATE3);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE4, "%d", PARM_TX_RATE4);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE5, "%d", PARM_TX_RATE5);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE6, "%d", PARM_TX_RATE6);
DBG_PARAM(DbgInfo, PARM_NAME_WDS_ADDRESS1, "\"%pM\"",
PARM_WDS_ADDRESS1);
DBG_PARAM(DbgInfo, PARM_NAME_WDS_ADDRESS2, "\"%pM\"",
@@ -521,28 +508,28 @@ int wl_insert( struct net_device *dev )
#endif /* USE_WDS */
#endif /* HCF_AP */
- VALID_PARAM( !PARM_DESIRED_SSID || ( strlen( PARM_DESIRED_SSID ) <= PARM_MAX_NAME_LEN ));
- VALID_PARAM( !PARM_OWN_SSID || ( strlen( PARM_OWN_SSID ) <= PARM_MAX_NAME_LEN ));
- VALID_PARAM(( PARM_OWN_CHANNEL <= PARM_MAX_OWN_CHANNEL ));
- VALID_PARAM(( PARM_SYSTEM_SCALE >= PARM_MIN_SYSTEM_SCALE ) && ( PARM_SYSTEM_SCALE <= PARM_MAX_SYSTEM_SCALE ));
- VALID_PARAM(( PARM_TX_RATE >= PARM_MIN_TX_RATE ) && ( PARM_TX_RATE <= PARM_MAX_TX_RATE ));
- VALID_PARAM(( PARM_RTS_THRESHOLD <= PARM_MAX_RTS_THRESHOLD ));
- VALID_PARAM( !PARM_MICROWAVE_ROBUSTNESS || strchr( "NnYy", PARM_MICROWAVE_ROBUSTNESS[0] ) != NULL );
- VALID_PARAM( !PARM_OWN_NAME || ( strlen( PARM_NAME_OWN_NAME ) <= PARM_MAX_NAME_LEN ));
- VALID_PARAM(( PARM_ENABLE_ENCRYPTION <= PARM_MAX_ENABLE_ENCRYPTION ));
- VALID_PARAM( is_valid_key_string( PARM_KEY1 ));
- VALID_PARAM( is_valid_key_string( PARM_KEY2 ));
- VALID_PARAM( is_valid_key_string( PARM_KEY3 ));
- VALID_PARAM( is_valid_key_string( PARM_KEY4 ));
- VALID_PARAM(( PARM_TX_KEY >= PARM_MIN_TX_KEY ) && ( PARM_TX_KEY <= PARM_MAX_TX_KEY ));
-
- VALID_PARAM(( PARM_MULTICAST_RATE >= PARM_MIN_MULTICAST_RATE ) &&
- ( PARM_MULTICAST_RATE <= PARM_MAX_MULTICAST_RATE ));
-
- VALID_PARAM( !PARM_DOWNLOAD_FIRMWARE || ( strlen( PARM_DOWNLOAD_FIRMWARE ) <= 255 /*;?*/ ));
- VALID_PARAM(( PARM_AUTH_KEY_MGMT_SUITE < PARM_MAX_AUTH_KEY_MGMT_SUITE ));
-
- VALID_PARAM( !PARM_LOAD_BALANCING || strchr( "NnYy", PARM_LOAD_BALANCING[0] ) != NULL );
+ VALID_PARAM(!PARM_DESIRED_SSID || (strlen(PARM_DESIRED_SSID) <= PARM_MAX_NAME_LEN));
+ VALID_PARAM(!PARM_OWN_SSID || (strlen(PARM_OWN_SSID) <= PARM_MAX_NAME_LEN));
+ VALID_PARAM((PARM_OWN_CHANNEL <= PARM_MAX_OWN_CHANNEL));
+ VALID_PARAM((PARM_SYSTEM_SCALE >= PARM_MIN_SYSTEM_SCALE) && (PARM_SYSTEM_SCALE <= PARM_MAX_SYSTEM_SCALE));
+ VALID_PARAM((PARM_TX_RATE >= PARM_MIN_TX_RATE) && (PARM_TX_RATE <= PARM_MAX_TX_RATE));
+ VALID_PARAM((PARM_RTS_THRESHOLD <= PARM_MAX_RTS_THRESHOLD));
+ VALID_PARAM(!PARM_MICROWAVE_ROBUSTNESS || strchr("NnYy", PARM_MICROWAVE_ROBUSTNESS[0]) != NULL);
+ VALID_PARAM(!PARM_OWN_NAME || (strlen(PARM_NAME_OWN_NAME) <= PARM_MAX_NAME_LEN));
+ VALID_PARAM((PARM_ENABLE_ENCRYPTION <= PARM_MAX_ENABLE_ENCRYPTION));
+ VALID_PARAM(is_valid_key_string(PARM_KEY1));
+ VALID_PARAM(is_valid_key_string(PARM_KEY2));
+ VALID_PARAM(is_valid_key_string(PARM_KEY3));
+ VALID_PARAM(is_valid_key_string(PARM_KEY4));
+ VALID_PARAM((PARM_TX_KEY >= PARM_MIN_TX_KEY) && (PARM_TX_KEY <= PARM_MAX_TX_KEY));
+
+ VALID_PARAM((PARM_MULTICAST_RATE >= PARM_MIN_MULTICAST_RATE) &&
+ (PARM_MULTICAST_RATE <= PARM_MAX_MULTICAST_RATE));
+
+ VALID_PARAM(!PARM_DOWNLOAD_FIRMWARE || (strlen(PARM_DOWNLOAD_FIRMWARE) <= 255 /*;?*/));
+ VALID_PARAM((PARM_AUTH_KEY_MGMT_SUITE < PARM_MAX_AUTH_KEY_MGMT_SUITE));
+
+ VALID_PARAM(!PARM_LOAD_BALANCING || strchr("NnYy", PARM_LOAD_BALANCING[0]) != NULL);
VALID_PARAM( !PARM_MEDIUM_DISTRIBUTION || strchr( "NnYy", PARM_MEDIUM_DISTRIBUTION[0] ) != NULL );
VALID_PARAM(( PARM_TX_POW_LEVEL <= PARM_MAX_TX_POW_LEVEL ));
@@ -601,33 +588,25 @@ int wl_insert( struct net_device *dev )
lp->MulticastRate[0] = PARM_DEFAULT_MULTICAST_RATE_2GHZ;
lp->MulticastRate[1] = PARM_DEFAULT_MULTICAST_RATE_5GHZ;
- if ( strchr( "Yy", PARM_MICROWAVE_ROBUSTNESS[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_MICROWAVE_ROBUSTNESS[0] ) != NULL )
lp->MicrowaveRobustness = 1;
- } else {
+ else
lp->MicrowaveRobustness = 0;
- }
- if ( PARM_DESIRED_SSID && ( strlen( PARM_DESIRED_SSID ) <= HCF_MAX_NAME_LEN )) {
+ if ( PARM_DESIRED_SSID && ( strlen( PARM_DESIRED_SSID ) <= HCF_MAX_NAME_LEN ))
strcpy( lp->NetworkName, PARM_DESIRED_SSID );
- }
- if ( PARM_OWN_SSID && ( strlen( PARM_OWN_SSID ) <= HCF_MAX_NAME_LEN )) {
+ if ( PARM_OWN_SSID && ( strlen( PARM_OWN_SSID ) <= HCF_MAX_NAME_LEN ))
strcpy( lp->NetworkName, PARM_OWN_SSID );
- }
- if ( PARM_OWN_NAME && ( strlen( PARM_OWN_NAME ) <= HCF_MAX_NAME_LEN )) {
+ if ( PARM_OWN_NAME && ( strlen( PARM_OWN_NAME ) <= HCF_MAX_NAME_LEN ))
strcpy( lp->StationName, PARM_OWN_NAME );
- }
lp->EnableEncryption = PARM_ENABLE_ENCRYPTION;
- if ( PARM_KEY1 && ( strlen( PARM_KEY1 ) <= MAX_KEY_LEN )) {
+ if ( PARM_KEY1 && ( strlen( PARM_KEY1 ) <= MAX_KEY_LEN ))
strcpy( lp->Key1, PARM_KEY1 );
- }
- if ( PARM_KEY2 && ( strlen( PARM_KEY2 ) <= MAX_KEY_LEN )) {
+ if ( PARM_KEY2 && ( strlen( PARM_KEY2 ) <= MAX_KEY_LEN ))
strcpy( lp->Key2, PARM_KEY2 );
- }
- if ( PARM_KEY3 && ( strlen( PARM_KEY3 ) <= MAX_KEY_LEN )) {
+ if ( PARM_KEY3 && ( strlen( PARM_KEY3 ) <= MAX_KEY_LEN ))
strcpy( lp->Key3, PARM_KEY3 );
- }
- if ( PARM_KEY4 && ( strlen( PARM_KEY4 ) <= MAX_KEY_LEN )) {
+ if ( PARM_KEY4 && ( strlen( PARM_KEY4 ) <= MAX_KEY_LEN ))
strcpy( lp->Key4, PARM_KEY4 );
- }
lp->TransmitKeyID = PARM_TX_KEY;
@@ -639,17 +618,15 @@ int wl_insert( struct net_device *dev )
lp->DownloadFirmware = 1 ; //;?to be upgraded PARM_DOWNLOAD_FIRMWARE;
lp->AuthKeyMgmtSuite = PARM_AUTH_KEY_MGMT_SUITE;
- if ( strchr( "Yy", PARM_LOAD_BALANCING[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_LOAD_BALANCING[0] ) != NULL )
lp->loadBalancing = 1;
- } else {
+ else
lp->loadBalancing = 0;
- }
- if ( strchr( "Yy", PARM_MEDIUM_DISTRIBUTION[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_MEDIUM_DISTRIBUTION[0] ) != NULL )
lp->mediumDistribution = 1;
- } else {
+ else
lp->mediumDistribution = 0;
- }
lp->txPowLevel = PARM_TX_POW_LEVEL;
@@ -665,24 +642,20 @@ int wl_insert( struct net_device *dev )
lp->atimWindow = PARM_OWN_ATIM_WINDOW;
lp->holdoverDuration = PARM_PM_HOLDOVER_DURATION;
lp->PMEnabled = PARM_PM_ENABLED; //;?
- if ( strchr( "Yy", PARM_CREATE_IBSS[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_CREATE_IBSS[0] ) != NULL )
lp->CreateIBSS = 1;
- } else {
+ else
lp->CreateIBSS = 0;
- }
- if ( strchr( "Nn", PARM_MULTICAST_RX[0] ) != NULL ) {
+ if ( strchr( "Nn", PARM_MULTICAST_RX[0] ) != NULL )
lp->MulticastReceive = 0;
- } else {
+ else
lp->MulticastReceive = 1;
- }
- if ( strchr( "Yy", PARM_PROMISCUOUS_MODE[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_PROMISCUOUS_MODE[0] ) != NULL )
lp->promiscuousMode = 1;
- } else {
+ else
lp->promiscuousMode = 0;
- }
- for( i = 0; i < ETH_ALEN; i++ ) {
- lp->MACAddress[i] = PARM_NETWORK_ADDR[i];
- }
+ for( i = 0; i < ETH_ALEN; i++ )
+ lp->MACAddress[i] = PARM_NETWORK_ADDR[i];
lp->connectionControl = PARM_CONNECTION_CONTROL;
@@ -691,26 +664,22 @@ int wl_insert( struct net_device *dev )
//;?should we restore this to allow smaller memory footprint
lp->DTIMPeriod = PARM_OWN_DTIM_PERIOD;
- if ( strchr( "Yy", PARM_REJECT_ANY[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_REJECT_ANY[0] ) != NULL )
lp->RejectAny = 1;
- } else {
+ else
lp->RejectAny = 0;
- }
- if ( strchr( "Nn", PARM_EXCLUDE_UNENCRYPTED[0] ) != NULL ) {
+ if ( strchr( "Nn", PARM_EXCLUDE_UNENCRYPTED[0] ) != NULL )
lp->ExcludeUnencrypted = 0;
- } else {
+ else
lp->ExcludeUnencrypted = 1;
- }
- if ( strchr( "Yy", PARM_MULTICAST_PM_BUFFERING[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_MULTICAST_PM_BUFFERING[0] ) != NULL )
lp->multicastPMBuffering = 1;
- } else {
+ else
lp->multicastPMBuffering = 0;
- }
- if ( strchr( "Yy", PARM_INTRA_BSS_RELAY[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_INTRA_BSS_RELAY[0] ) != NULL )
lp->intraBSSRelay = 1;
- } else {
+ else
lp->intraBSSRelay = 0;
- }
lp->ownBeaconInterval = PARM_OWN_BEACON_INTERVAL;
lp->coexistence = PARM_COEXISTENCE;
@@ -750,11 +719,10 @@ int wl_insert( struct net_device *dev )
#endif /* USE_WDS */
#endif /* HCF_AP */
#ifdef USE_RTS
- if ( strchr( "Yy", useRTS[0] ) != NULL ) {
+ if ( strchr( "Yy", useRTS[0] ) != NULL )
lp->useRTS = 1;
- } else {
+ else
lp->useRTS = 0;
- }
#endif /* USE_RTS */
@@ -1560,7 +1528,8 @@ int wl_put_ltv( struct wl_private *lp )
hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord ));
/* Own Name (Station Nickname) */
- if (( len = ( strlen( lp->StationName ) + 1 ) & ~0x01 ) != 0 ) {
+ len = (strlen(lp->StationName) + 1) & ~0x01;
+ if (len != 0) {
//DBG_TRACE( DbgInfo, "CFG_CNF_OWN_NAME : %s\n",
// lp->StationName );
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 965b1c0a4753..a10d014365f2 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -68,31 +68,14 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
-// #include <linux/sched.h>
-// #include <linux/ptrace.h>
-// #include <linux/slab.h>
-// #include <linux/ctype.h>
-// #include <linux/string.h>
-//#include <linux/timer.h>
-// #include <linux/interrupt.h>
-// #include <linux/in.h>
-// #include <linux/delay.h>
-// #include <linux/skbuff.h>
-// #include <asm/io.h>
-// // #include <asm/bitops.h>
-
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
-// #include <linux/skbuff.h>
-// #include <linux/if_arp.h>
-// #include <linux/ioport.h>
#include <debug.h>
#include <hcf.h>
#include <dhf.h>
-// #include <hcfdef.h>
#include <wl_if.h>
#include <wl_internal.h>
@@ -104,16 +87,15 @@
#ifdef USE_PROFILE
#include <wl_profile.h>
-#endif /* USE_PROFILE */
+#endif /* USE_PROFILE */
#ifdef BUS_PCMCIA
#include <wl_cs.h>
-#endif /* BUS_PCMCIA */
+#endif /* BUS_PCMCIA */
#ifdef BUS_PCI
#include <wl_pci.h>
-#endif /* BUS_PCI */
-
+#endif /* BUS_PCI */
#if HCF_ENCAP
#define MTU_MAX (HCF_MAX_MSG - ETH_HLEN - 8)
@@ -121,17 +103,15 @@
#define MTU_MAX (HCF_MAX_MSG - ETH_HLEN)
#endif
-//static int mtu = MTU_MAX;
-//MODULE_PARM(mtu, "i");
-//MODULE_PARM_DESC(mtu, "MTU");
-
/*******************************************************************************
* macros
******************************************************************************/
#define BLOCK_INPUT(buf, len) \
- desc->buf_addr = buf; \
- desc->BUF_SIZE = len; \
- status = hcf_rcv_msg(&(lp->hcfCtx), desc, 0)
+ do { \
+ desc->buf_addr = buf; \
+ desc->BUF_SIZE = len; \
+ status = hcf_rcv_msg(&(lp->hcfCtx), desc, 0); \
+ } while (0)
#define BLOCK_INPUT_DMA(buf, len) memcpy( buf, desc_next->buf_addr, pktlen )
@@ -158,20 +138,12 @@
* errno value otherwise
*
******************************************************************************/
-int wl_init( struct net_device *dev )
+int wl_init(struct net_device *dev)
{
-// unsigned long flags;
-// struct wl_private *lp = wl_priv(dev);
-
- DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ return 0;
+} /* wl_init */
- /* Nothing to do, but grab the spinlock anyway just in case we ever need
- this routine */
-// wl_lock( lp, &flags );
-// wl_unlock( lp, &flags );
-
- return 0;
-} // wl_init
/*============================================================================*/
/*******************************************************************************
@@ -193,17 +165,20 @@ int wl_init( struct net_device *dev )
* errno otherwise
*
******************************************************************************/
-int wl_config( struct net_device *dev, struct ifmap *map )
+int wl_config(struct net_device *dev, struct ifmap *map)
{
- DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
- DBG_PARAM( DbgInfo, "map", "0x%p", map );
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ DBG_PARAM(DbgInfo, "map", "0x%p", map);
+
+ /*
+ * The only thing we care about here is a port change.
+ * Since this not needed, ignore the request.
+ */
+ DBG_TRACE(DbgInfo, "%s: %s called.\n", dev->name, __func__);
- /* The only thing we care about here is a port change. Since this not needed,
- ignore the request. */
- DBG_TRACE(DbgInfo, "%s: %s called.\n", dev->name, __func__);
+ return 0;
+} /* wl_config */
- return 0;
-} // wl_config
/*============================================================================*/
/*******************************************************************************
@@ -224,48 +199,47 @@ int wl_config( struct net_device *dev, struct ifmap *map )
* statistics.
*
******************************************************************************/
-struct net_device_stats *wl_stats( struct net_device *dev )
+struct net_device_stats *wl_stats(struct net_device *dev)
{
#ifdef USE_WDS
- int count;
-#endif /* USE_WDS */
- unsigned long flags;
- struct net_device_stats *pStats;
- struct wl_private *lp = wl_priv(dev);
+ int count;
+#endif /* USE_WDS */
+ unsigned long flags;
+ struct net_device_stats *pStats;
+ struct wl_private *lp = wl_priv(dev);
- //DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
+ /*DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev ); */
- pStats = NULL;
+ pStats = NULL;
- wl_lock( lp, &flags );
+ wl_lock(lp, &flags);
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- wl_unlock( lp, &flags );
- return NULL;
- }
-#endif /* USE_RTS */
+ if (lp->useRTS == 1) {
+ wl_unlock(lp, &flags);
+ return NULL;
+ }
+#endif /* USE_RTS */
- /* Return the statistics for the appropriate device */
+ /* Return the statistics for the appropriate device */
#ifdef USE_WDS
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- if( dev == lp->wds_port[count].dev ) {
- pStats = &( lp->wds_port[count].stats );
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (dev == lp->wds_port[count].dev)
+ pStats = &(lp->wds_port[count].stats);
}
- }
-#endif /* USE_WDS */
+#endif /* USE_WDS */
+
+ /* If pStats is still NULL, then the device is not a WDS port */
+ if (pStats == NULL)
+ pStats = &(lp->stats);
- /* If pStats is still NULL, then the device is not a WDS port */
- if( pStats == NULL ) {
- pStats = &( lp->stats );
- }
+ wl_unlock(lp, &flags);
- wl_unlock( lp, &flags );
+ return pStats;
+} /* wl_stats */
- return pStats;
-} // wl_stats
/*============================================================================*/
/*******************************************************************************
@@ -288,75 +262,77 @@ struct net_device_stats *wl_stats( struct net_device *dev )
******************************************************************************/
int wl_open(struct net_device *dev)
{
- int status = HCF_SUCCESS;
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
+ int status = HCF_SUCCESS;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
- wl_lock( lp, &flags );
+ wl_lock(lp, &flags);
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_TRACE( DbgInfo, "Skipping device open, in RTS mode\n" );
- wl_unlock( lp, &flags );
- return -EIO;
- }
-#endif /* USE_RTS */
+ if (lp->useRTS == 1) {
+ DBG_TRACE(DbgInfo, "Skipping device open, in RTS mode\n");
+ wl_unlock(lp, &flags);
+ return -EIO;
+ }
+#endif /* USE_RTS */
#ifdef USE_PROFILE
- parse_config( dev );
+ parse_config(dev);
#endif
- if( lp->portState == WVLAN_PORT_STATE_DISABLED ) {
- DBG_TRACE( DbgInfo, "Enabling Port 0\n" );
- status = wl_enable( lp );
-
- if( status != HCF_SUCCESS ) {
- DBG_TRACE( DbgInfo, "Enable port 0 failed: 0x%x\n", status );
- }
- }
-
- // Holding the lock too long, make a gap to allow other processes
- wl_unlock(lp, &flags);
- wl_lock( lp, &flags );
-
- if ( strlen( lp->fw_image_filename ) ) {
- DBG_TRACE( DbgInfo, ";???? Kludgy way to force a download\n" );
- status = wl_go( lp );
- } else {
- status = wl_apply( lp );
- }
-
- // Holding the lock too long, make a gap to allow other processes
- wl_unlock(lp, &flags);
- wl_lock( lp, &flags );
-
- if( status != HCF_SUCCESS ) {
- // Unsuccessful, try reset of the card to recover
- status = wl_reset( dev );
- }
-
- // Holding the lock too long, make a gap to allow other processes
- wl_unlock(lp, &flags);
- wl_lock( lp, &flags );
-
- if( status == HCF_SUCCESS ) {
- netif_carrier_on( dev );
- WL_WDS_NETIF_CARRIER_ON( lp );
-
- lp->is_handling_int = WL_HANDLING_INT; // Start handling interrupts
- wl_act_int_on( lp );
-
- netif_start_queue( dev );
- WL_WDS_NETIF_START_QUEUE( lp );
- } else {
- wl_hcf_error( dev, status ); /* Report the error */
- netif_device_detach( dev ); /* Stop the device and queue */
- }
-
- wl_unlock( lp, &flags );
-
- return status;
-} // wl_open
+ if (lp->portState == WVLAN_PORT_STATE_DISABLED) {
+ DBG_TRACE(DbgInfo, "Enabling Port 0\n");
+ status = wl_enable(lp);
+
+ if (status != HCF_SUCCESS) {
+ DBG_TRACE(DbgInfo, "Enable port 0 failed: 0x%x\n",
+ status);
+ }
+ }
+
+ /* Holding the lock too long, make a gap to allow other processes */
+ wl_unlock(lp, &flags);
+ wl_lock(lp, &flags);
+
+ if (strlen(lp->fw_image_filename)) {
+ DBG_TRACE(DbgInfo, ";???? Kludgy way to force a download\n");
+ status = wl_go(lp);
+ } else {
+ status = wl_apply(lp);
+ }
+
+ /* Holding the lock too long, make a gap to allow other processes */
+ wl_unlock(lp, &flags);
+ wl_lock(lp, &flags);
+
+ /* Unsuccessful, try reset of the card to recover */
+ if (status != HCF_SUCCESS)
+ status = wl_reset(dev);
+
+ /* Holding the lock too long, make a gap to allow other processes */
+ wl_unlock(lp, &flags);
+ wl_lock(lp, &flags);
+
+ if (status == HCF_SUCCESS) {
+ netif_carrier_on(dev);
+ WL_WDS_NETIF_CARRIER_ON(lp);
+
+ /* Start handling interrupts */
+ lp->is_handling_int = WL_HANDLING_INT;
+ wl_act_int_on(lp);
+
+ netif_start_queue(dev);
+ WL_WDS_NETIF_START_QUEUE(lp);
+ } else {
+ wl_hcf_error(dev, status); /* Report the error */
+ netif_device_detach(dev); /* Stop the device and queue */
+ }
+
+ wl_unlock(lp, &flags);
+
+ return status;
+} /* wl_open */
+
/*============================================================================*/
/*******************************************************************************
@@ -377,73 +353,70 @@ int wl_open(struct net_device *dev)
* errno otherwise
*
******************************************************************************/
-int wl_close( struct net_device *dev )
+int wl_close(struct net_device *dev)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
- DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- /* Mark the adapter as busy */
- netif_stop_queue( dev );
- WL_WDS_NETIF_STOP_QUEUE( lp );
+ /* Mark the adapter as busy */
+ netif_stop_queue(dev);
+ WL_WDS_NETIF_STOP_QUEUE(lp);
- netif_carrier_off( dev );
- WL_WDS_NETIF_CARRIER_OFF( lp );
+ netif_carrier_off(dev);
+ WL_WDS_NETIF_CARRIER_OFF(lp);
- /* Shutdown the adapter:
- Disable adapter interrupts
- Stop Tx/Rx
- Update statistics
- Set low power mode
- */
+ /*
+ * Shutdown the adapter:
+ * Disable adapter interrupts
+ * Stop Tx/Rx
+ * Update statistics
+ * Set low power mode
+ */
- wl_lock( lp, &flags );
+ wl_lock(lp, &flags);
- wl_act_int_off( lp );
- lp->is_handling_int = WL_NOT_HANDLING_INT; // Stop handling interrupts
+ wl_act_int_off(lp);
+ /* Stop handling interrupts */
+ lp->is_handling_int = WL_NOT_HANDLING_INT;
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_TRACE( DbgInfo, "Skipping device close, in RTS mode\n" );
- wl_unlock( lp, &flags );
- return -EIO;
- }
-#endif /* USE_RTS */
+ if (lp->useRTS == 1) {
+ DBG_TRACE(DbgInfo, "Skipping device close, in RTS mode\n");
+ wl_unlock(lp, &flags);
+ return -EIO;
+ }
+#endif /* USE_RTS */
- /* Disable the ports */
- wl_disable( lp );
+ /* Disable the ports */
+ wl_disable(lp);
- wl_unlock( lp, &flags );
+ wl_unlock(lp, &flags);
+
+ return 0;
+} /* wl_close */
- return 0;
-} // wl_close
/*============================================================================*/
static void wl_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION_STR, sizeof(info->version));
-// strlcpy(info.fw_version, priv->fw_name,
-// sizeof(info.fw_version));
-
- if (dev->dev.parent) {
- dev_set_name(dev->dev.parent, "%s", info->bus_info);
- //strlcpy(info->bus_info, dev->dev.parent->bus_id,
- // sizeof(info->bus_info));
- } else {
- snprintf(info->bus_info, sizeof(info->bus_info),
- "PCMCIA FIXME");
-// "PCMCIA 0x%lx", priv->hw.iobase);
- }
-} // wl_get_drvinfo
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION_STR, sizeof(info->version));
+
+ if (dev->dev.parent) {
+ dev_set_name(dev->dev.parent, "%s", info->bus_info);
+ } else {
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA FIXME");
+ }
+} /* wl_get_drvinfo */
static struct ethtool_ops wl_ethtool_ops = {
- .get_drvinfo = wl_get_drvinfo,
- .get_link = ethtool_op_get_link,
+ .get_drvinfo = wl_get_drvinfo,
+ .get_link = ethtool_op_get_link,
};
-
/*******************************************************************************
* wl_ioctl()
*******************************************************************************
@@ -464,81 +437,86 @@ static struct ethtool_ops wl_ethtool_ops = {
* errno value otherwise
*
******************************************************************************/
-int wl_ioctl( struct net_device *dev, struct ifreq *rq, int cmd )
+int wl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
- int ret = 0;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
+ int ret = 0;
- DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- DBG_PARAM(DbgInfo, "rq", "0x%p", rq);
- DBG_PARAM(DbgInfo, "cmd", "0x%04x", cmd);
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ DBG_PARAM(DbgInfo, "rq", "0x%p", rq);
+ DBG_PARAM(DbgInfo, "cmd", "0x%04x", cmd);
- wl_lock( lp, &flags );
+ wl_lock(lp, &flags);
- wl_act_int_off( lp );
+ wl_act_int_off(lp);
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- /* Handle any RTS IOCTL here */
- if( cmd == WL_IOCTL_RTS ) {
- DBG_TRACE( DbgInfo, "IOCTL: WL_IOCTL_RTS\n" );
- ret = wvlan_rts( (struct rtsreq *)rq, dev->base_addr );
- } else {
- DBG_TRACE( DbgInfo, "IOCTL not supported in RTS mode: 0x%X\n", cmd );
- ret = -EOPNOTSUPP;
- }
+ if (lp->useRTS == 1) {
+ /* Handle any RTS IOCTL here */
+ if (cmd == WL_IOCTL_RTS) {
+ DBG_TRACE(DbgInfo, "IOCTL: WL_IOCTL_RTS\n");
+ ret = wvlan_rts((struct rtsreq *)rq, dev->base_addr);
+ } else {
+ DBG_TRACE(DbgInfo,
+ "IOCTL not supported in RTS mode: 0x%X\n",
+ cmd);
+ ret = -EOPNOTSUPP;
+ }
- goto out_act_int_on_unlock;
- }
-#endif /* USE_RTS */
+ goto out_act_int_on_unlock;
+ }
+#endif /* USE_RTS */
- /* Only handle UIL IOCTL requests when the UIL has the system blocked. */
- if( !(( lp->flags & WVLAN2_UIL_BUSY ) && ( cmd != WVLAN2_IOCTL_UIL ))) {
+ /* Only handle UIL IOCTL requests when the UIL has the system blocked. */
+ if (!((lp->flags & WVLAN2_UIL_BUSY) && (cmd != WVLAN2_IOCTL_UIL))) {
#ifdef USE_UIL
- struct uilreq *urq = (struct uilreq *)rq;
+ struct uilreq *urq = (struct uilreq *)rq;
#endif /* USE_UIL */
- switch( cmd ) {
- // ================== Private IOCTLs (up to 16) ==================
+ switch (cmd) {
+ /* ================== Private IOCTLs (up to 16) ================== */
#ifdef USE_UIL
- case WVLAN2_IOCTL_UIL:
- DBG_TRACE( DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL\n" );
- ret = wvlan_uil( urq, lp );
- break;
-#endif /* USE_UIL */
-
- default:
- DBG_TRACE(DbgInfo, "IOCTL CODE NOT SUPPORTED: 0x%X\n", cmd );
- ret = -EOPNOTSUPP;
- break;
+ case WVLAN2_IOCTL_UIL:
+ DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL\n");
+ ret = wvlan_uil(urq, lp);
+ break;
+#endif /* USE_UIL */
+
+ default:
+ DBG_TRACE(DbgInfo, "IOCTL CODE NOT SUPPORTED: 0x%X\n",
+ cmd);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ } else {
+ DBG_WARNING(DbgInfo,
+ "DEVICE IS BUSY, CANNOT PROCESS REQUEST\n");
+ ret = -EBUSY;
}
- } else {
- DBG_WARNING( DbgInfo, "DEVICE IS BUSY, CANNOT PROCESS REQUEST\n" );
- ret = -EBUSY;
- }
#ifdef USE_RTS
out_act_int_on_unlock:
-#endif /* USE_RTS */
- wl_act_int_on( lp );
+#endif /* USE_RTS */
+ wl_act_int_on(lp);
- wl_unlock( lp, &flags );
+ wl_unlock(lp, &flags);
+
+ return ret;
+} /* wl_ioctl */
- return ret;
-} // wl_ioctl
/*============================================================================*/
#ifdef CONFIG_NET_POLL_CONTROLLER
-void wl_poll(struct net_device *dev)
+static void wl_poll(struct net_device *dev)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
- struct pt_regs regs;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
+ struct pt_regs regs;
- wl_lock( lp, &flags );
- wl_isr(dev->irq, dev, &regs);
- wl_unlock( lp, &flags );
+ wl_lock(lp, &flags);
+ wl_isr(dev->irq, dev, &regs);
+ wl_unlock(lp, &flags);
}
#endif
@@ -559,53 +537,54 @@ void wl_poll(struct net_device *dev)
* N/A
*
******************************************************************************/
-void wl_tx_timeout( struct net_device *dev )
+void wl_tx_timeout(struct net_device *dev)
{
#ifdef USE_WDS
- int count;
-#endif /* USE_WDS */
- unsigned long flags;
- struct wl_private *lp = wl_priv(dev);
- struct net_device_stats *pStats = NULL;
+ int count;
+#endif /* USE_WDS */
+ unsigned long flags;
+ struct wl_private *lp = wl_priv(dev);
+ struct net_device_stats *pStats = NULL;
- DBG_WARNING( DbgInfo, "%s: Transmit timeout.\n", dev->name );
+ DBG_WARNING(DbgInfo, "%s: Transmit timeout.\n", dev->name);
- wl_lock( lp, &flags );
+ wl_lock(lp, &flags);
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_TRACE( DbgInfo, "Skipping tx_timeout handler, in RTS mode\n" );
- wl_unlock( lp, &flags );
- return;
- }
-#endif /* USE_RTS */
-
- /* Figure out which device (the "root" device or WDS port) this timeout
- is for */
+ if (lp->useRTS == 1) {
+ DBG_TRACE(DbgInfo,
+ "Skipping tx_timeout handler, in RTS mode\n");
+ wl_unlock(lp, &flags);
+ return;
+ }
+#endif /* USE_RTS */
+
+ /* Figure out which device (the "root" device or WDS port) this timeout
+ is for */
#ifdef USE_WDS
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- if( dev == lp->wds_port[count].dev ) {
- pStats = &( lp->wds_port[count].stats );
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (dev == lp->wds_port[count].dev) {
+ pStats = &(lp->wds_port[count].stats);
- /* Break the loop so that we can use the counter to access WDS
- information in the private structure */
- break;
+ /* Break the loop so that we can use the counter to access WDS
+ information in the private structure */
+ break;
+ }
}
- }
-#endif /* USE_WDS */
+#endif /* USE_WDS */
+
+ /* If pStats is still NULL, then the device is not a WDS port */
+ if (pStats == NULL)
+ pStats = &(lp->stats);
- /* If pStats is still NULL, then the device is not a WDS port */
- if( pStats == NULL ) {
- pStats = &( lp->stats );
- }
+ /* Accumulate the timeout error */
+ pStats->tx_errors++;
- /* Accumulate the timeout error */
- pStats->tx_errors++;
+ wl_unlock(lp, &flags);
+} /* wl_tx_timeout */
- wl_unlock( lp, &flags );
-} // wl_tx_timeout
/*============================================================================*/
/*******************************************************************************
@@ -626,103 +605,105 @@ void wl_tx_timeout( struct net_device *dev )
* 1 on error
*
******************************************************************************/
-int wl_send( struct wl_private *lp )
+int wl_send(struct wl_private *lp)
{
- int status;
- DESC_STRCT *desc;
- WVLAN_LFRAME *txF = NULL;
- struct list_head *element;
- int len;
+ int status;
+ DESC_STRCT *desc;
+ WVLAN_LFRAME *txF = NULL;
+ struct list_head *element;
+ int len;
/*------------------------------------------------------------------------*/
- if( lp == NULL ) {
- DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
- return FALSE;
- }
- if( lp->dev == NULL ) {
- DBG_ERROR( DbgInfo, "net_device struct in wl_private is NULL\n" );
- return FALSE;
- }
-
- /* Check for the availability of FIDs; if none are available, don't take any
- frames off the txQ */
- if( lp->hcfCtx.IFB_RscInd == 0 ) {
- return FALSE;
- }
-
- /* Reclaim the TxQ Elements and place them back on the free queue */
- if( !list_empty( &( lp->txQ[0] ))) {
- element = lp->txQ[0].next;
-
- txF = (WVLAN_LFRAME * )list_entry( element, WVLAN_LFRAME, node );
- if( txF != NULL ) {
- lp->txF.skb = txF->frame.skb;
- lp->txF.port = txF->frame.port;
-
- txF->frame.skb = NULL;
- txF->frame.port = 0;
-
- list_del( &( txF->node ));
- list_add( element, &( lp->txFree ));
-
- lp->txQ_count--;
-
- if( lp->txQ_count < TX_Q_LOW_WATER_MARK ) {
- if( lp->netif_queue_on == FALSE ) {
- DBG_TX( DbgInfo, "Kickstarting Q: %d\n", lp->txQ_count );
- netif_wake_queue( lp->dev );
- WL_WDS_NETIF_WAKE_QUEUE( lp );
- lp->netif_queue_on = TRUE;
- }
- }
- }
- }
-
- if( lp->txF.skb == NULL ) {
- return FALSE;
- }
-
- /* If the device has resources (FIDs) available, then Tx the packet */
- /* Format the TxRequest and send it to the adapter */
- len = lp->txF.skb->len < ETH_ZLEN ? ETH_ZLEN : lp->txF.skb->len;
-
- desc = &( lp->desc_tx );
- desc->buf_addr = lp->txF.skb->data;
- desc->BUF_CNT = len;
- desc->next_desc_addr = NULL;
-
- status = hcf_send_msg( &( lp->hcfCtx ), desc, lp->txF.port );
-
- if( status == HCF_SUCCESS ) {
- lp->dev->trans_start = jiffies;
-
- DBG_TX( DbgInfo, "Transmit...\n" );
-
- if( lp->txF.port == HCF_PORT_0 ) {
- lp->stats.tx_packets++;
- lp->stats.tx_bytes += lp->txF.skb->len;
- }
+ if (lp == NULL) {
+ DBG_ERROR(DbgInfo, "Private adapter struct is NULL\n");
+ return FALSE;
+ }
+ if (lp->dev == NULL) {
+ DBG_ERROR(DbgInfo, "net_device struct in wl_private is NULL\n");
+ return FALSE;
+ }
+
+ /*
+ * Check for the availability of FIDs; if none are available,
+ * don't take any frames off the txQ
+ */
+ if (lp->hcfCtx.IFB_RscInd == 0)
+ return FALSE;
+
+ /* Reclaim the TxQ Elements and place them back on the free queue */
+ if (!list_empty(&(lp->txQ[0]))) {
+ element = lp->txQ[0].next;
+
+ txF = (WVLAN_LFRAME *) list_entry(element, WVLAN_LFRAME, node);
+ if (txF != NULL) {
+ lp->txF.skb = txF->frame.skb;
+ lp->txF.port = txF->frame.port;
+
+ txF->frame.skb = NULL;
+ txF->frame.port = 0;
+
+ list_del(&(txF->node));
+ list_add(element, &(lp->txFree));
+
+ lp->txQ_count--;
+
+ if (lp->txQ_count < TX_Q_LOW_WATER_MARK) {
+ if (lp->netif_queue_on == FALSE) {
+ DBG_TX(DbgInfo, "Kickstarting Q: %d\n",
+ lp->txQ_count);
+ netif_wake_queue(lp->dev);
+ WL_WDS_NETIF_WAKE_QUEUE(lp);
+ lp->netif_queue_on = TRUE;
+ }
+ }
+ }
+ }
+
+ if (lp->txF.skb == NULL)
+ return FALSE;
+
+ /* If the device has resources (FIDs) available, then Tx the packet */
+ /* Format the TxRequest and send it to the adapter */
+ len = lp->txF.skb->len < ETH_ZLEN ? ETH_ZLEN : lp->txF.skb->len;
+
+ desc = &(lp->desc_tx);
+ desc->buf_addr = lp->txF.skb->data;
+ desc->BUF_CNT = len;
+ desc->next_desc_addr = NULL;
+
+ status = hcf_send_msg(&(lp->hcfCtx), desc, lp->txF.port);
+
+ if (status == HCF_SUCCESS) {
+ lp->dev->trans_start = jiffies;
+
+ DBG_TX(DbgInfo, "Transmit...\n");
+ if (lp->txF.port == HCF_PORT_0) {
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += lp->txF.skb->len;
+ }
#ifdef USE_WDS
- else
- {
- lp->wds_port[(( lp->txF.port >> 8 ) - 1)].stats.tx_packets++;
- lp->wds_port[(( lp->txF.port >> 8 ) - 1)].stats.tx_bytes += lp->txF.skb->len;
- }
+ else {
+ lp->wds_port[((lp->txF.port >> 8) -
+ 1)].stats.tx_packets++;
+ lp->wds_port[((lp->txF.port >> 8) -
+ 1)].stats.tx_bytes += lp->txF.skb->len;
+ }
-#endif /* USE_WDS */
+#endif /* USE_WDS */
- /* Free the skb and perform queue cleanup, as the buffer was
- transmitted successfully */
- dev_kfree_skb( lp->txF.skb );
+ /* Free the skb and perform queue cleanup, as the buffer was
+ transmitted successfully */
+ dev_consume_skb_any( lp->txF.skb );
- lp->txF.skb = NULL;
- lp->txF.port = 0;
- }
+ lp->txF.skb = NULL;
+ lp->txF.port = 0;
+ }
+
+ return TRUE;
+} /* wl_send */
- return TRUE;
-} // wl_send
/*============================================================================*/
/*******************************************************************************
@@ -744,75 +725,74 @@ int wl_send( struct wl_private *lp )
* 1 on error
*
******************************************************************************/
-int wl_tx( struct sk_buff *skb, struct net_device *dev, int port )
+int wl_tx(struct sk_buff *skb, struct net_device *dev, int port)
{
- unsigned long flags;
- struct wl_private *lp = wl_priv(dev);
- WVLAN_LFRAME *txF = NULL;
- struct list_head *element;
+ unsigned long flags;
+ struct wl_private *lp = wl_priv(dev);
+ WVLAN_LFRAME *txF = NULL;
+ struct list_head *element;
/*------------------------------------------------------------------------*/
- /* Grab the spinlock */
- wl_lock( lp, &flags );
-
- if( lp->flags & WVLAN2_UIL_BUSY ) {
- DBG_WARNING( DbgInfo, "UIL has device blocked\n" );
- /* Start dropping packets here??? */
- wl_unlock( lp, &flags );
- return 1;
- }
+ /* Grab the spinlock */
+ wl_lock(lp, &flags);
+ if (lp->flags & WVLAN2_UIL_BUSY) {
+ DBG_WARNING(DbgInfo, "UIL has device blocked\n");
+ /* Start dropping packets here??? */
+ wl_unlock(lp, &flags);
+ return 1;
+ }
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_PRINT( "RTS: we're getting a Tx...\n" );
- wl_unlock( lp, &flags );
- return 1;
- }
-#endif /* USE_RTS */
-
- if( !lp->use_dma ) {
- /* Get an element from the queue */
- element = lp->txFree.next;
- txF = (WVLAN_LFRAME *)list_entry( element, WVLAN_LFRAME, node );
- if( txF == NULL ) {
- DBG_ERROR( DbgInfo, "Problem with list_entry\n" );
- wl_unlock( lp, &flags );
- return 1;
- }
- /* Fill out the frame */
- txF->frame.skb = skb;
- txF->frame.port = port;
- /* Move the frame to the txQ */
- /* NOTE: Here's where we would do priority queueing */
- list_move(&(txF->node), &(lp->txQ[0]));
-
- lp->txQ_count++;
- if( lp->txQ_count >= DEFAULT_NUM_TX_FRAMES ) {
- DBG_TX( DbgInfo, "Q Full: %d\n", lp->txQ_count );
- if( lp->netif_queue_on == TRUE ) {
- netif_stop_queue( lp->dev );
- WL_WDS_NETIF_STOP_QUEUE( lp );
- lp->netif_queue_on = FALSE;
- }
- }
- }
- wl_act_int_off( lp ); /* Disable Interrupts */
-
- /* Send the data to the hardware using the appropriate method */
+ if (lp->useRTS == 1) {
+ DBG_PRINT("RTS: we're getting a Tx...\n");
+ wl_unlock(lp, &flags);
+ return 1;
+ }
+#endif /* USE_RTS */
+
+ if (!lp->use_dma) {
+ /* Get an element from the queue */
+ element = lp->txFree.next;
+ txF = (WVLAN_LFRAME *) list_entry(element, WVLAN_LFRAME, node);
+ if (txF == NULL) {
+ DBG_ERROR(DbgInfo, "Problem with list_entry\n");
+ wl_unlock(lp, &flags);
+ return 1;
+ }
+ /* Fill out the frame */
+ txF->frame.skb = skb;
+ txF->frame.port = port;
+ /* Move the frame to the txQ */
+ /* NOTE: Here's where we would do priority queueing */
+ list_move(&(txF->node), &(lp->txQ[0]));
+
+ lp->txQ_count++;
+ if (lp->txQ_count >= DEFAULT_NUM_TX_FRAMES) {
+ DBG_TX(DbgInfo, "Q Full: %d\n", lp->txQ_count);
+ if (lp->netif_queue_on == TRUE) {
+ netif_stop_queue(lp->dev);
+ WL_WDS_NETIF_STOP_QUEUE(lp);
+ lp->netif_queue_on = FALSE;
+ }
+ }
+ }
+ wl_act_int_off(lp); /* Disable Interrupts */
+
+ /* Send the data to the hardware using the appropriate method */
#ifdef ENABLE_DMA
- if( lp->use_dma ) {
- wl_send_dma( lp, skb, port );
- }
- else
+ if (lp->use_dma) {
+ wl_send_dma(lp, skb, port);
+ } else
#endif
- {
- wl_send( lp );
- }
- /* Re-enable Interrupts, release the spinlock and return */
- wl_act_int_on( lp );
- wl_unlock( lp, &flags );
- return 0;
-} // wl_tx
+ {
+ wl_send(lp);
+ }
+ /* Re-enable Interrupts, release the spinlock and return */
+ wl_act_int_on(lp);
+ wl_unlock(lp, &flags);
+ return 0;
+} /* wl_tx */
+
/*============================================================================*/
/*******************************************************************************
@@ -835,67 +815,68 @@ int wl_tx( struct sk_buff *skb, struct net_device *dev, int port )
******************************************************************************/
int wl_rx(struct net_device *dev)
{
- int port;
- struct sk_buff *skb;
- struct wl_private *lp = wl_priv(dev);
- int status;
- hcf_16 pktlen;
- hcf_16 hfs_stat;
- DESC_STRCT *desc;
+ int port;
+ struct sk_buff *skb;
+ struct wl_private *lp = wl_priv(dev);
+ int status;
+ hcf_16 pktlen;
+ hcf_16 hfs_stat;
+ DESC_STRCT *desc;
/*------------------------------------------------------------------------*/
- DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- if(!( lp->flags & WVLAN2_UIL_BUSY )) {
+ if (!(lp->flags & WVLAN2_UIL_BUSY)) {
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_PRINT( "RTS: We're getting an Rx...\n" );
- return -EIO;
- }
-#endif /* USE_RTS */
-
- /* Read the HFS_STAT register from the lookahead buffer */
- hfs_stat = (hcf_16)(( lp->lookAheadBuf[HFS_STAT] ) |
- ( lp->lookAheadBuf[HFS_STAT + 1] << 8 ));
-
- /* Make sure the frame isn't bad */
- if(( hfs_stat & HFS_STAT_ERR ) != HCF_SUCCESS ) {
- DBG_WARNING( DbgInfo, "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
- lp->lookAheadBuf[HFS_STAT] );
- return -EIO;
- }
-
- /* Determine what port this packet is for */
- port = ( hfs_stat >> 8 ) & 0x0007;
- DBG_RX( DbgInfo, "Rx frame for port %d\n", port );
-
- pktlen = lp->hcfCtx.IFB_RxLen;
- if (pktlen != 0) {
- skb = ALLOC_SKB(pktlen);
- if (skb != NULL) {
- /* Set the netdev based on the port */
- switch( port ) {
+ if (lp->useRTS == 1) {
+ DBG_PRINT("RTS: We're getting an Rx...\n");
+ return -EIO;
+ }
+#endif /* USE_RTS */
+
+ /* Read the HFS_STAT register from the lookahead buffer */
+ hfs_stat = (hcf_16) ((lp->lookAheadBuf[HFS_STAT]) |
+ (lp->lookAheadBuf[HFS_STAT + 1] << 8));
+
+ /* Make sure the frame isn't bad */
+ if ((hfs_stat & HFS_STAT_ERR) != HCF_SUCCESS) {
+ DBG_WARNING(DbgInfo,
+ "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
+ lp->lookAheadBuf[HFS_STAT]);
+ return -EIO;
+ }
+
+ /* Determine what port this packet is for */
+ port = (hfs_stat >> 8) & 0x0007;
+ DBG_RX(DbgInfo, "Rx frame for port %d\n", port);
+
+ pktlen = lp->hcfCtx.IFB_RxLen;
+ if (pktlen != 0) {
+ skb = ALLOC_SKB(pktlen);
+ if (skb != NULL) {
+ /* Set the netdev based on the port */
+ switch (port) {
#ifdef USE_WDS
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- case 6:
- skb->dev = lp->wds_port[port-1].dev;
- break;
-#endif /* USE_WDS */
-
- case 0:
- default:
- skb->dev = dev;
- break;
- }
-
- desc = &( lp->desc_rx );
-
- desc->next_desc_addr = NULL;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ skb->dev = lp->wds_port[port - 1].dev;
+ break;
+#endif /* USE_WDS */
+
+ case 0:
+ default:
+ skb->dev = dev;
+ break;
+ }
+
+ desc = &(lp->desc_rx);
+
+ desc->next_desc_addr = NULL;
/*
#define BLOCK_INPUT(buf, len) \
@@ -904,67 +885,73 @@ int wl_rx(struct net_device *dev)
status = hcf_rcv_msg(&(lp->hcfCtx), desc, 0)
*/
- GET_PACKET( skb->dev, skb, pktlen );
+ GET_PACKET(skb->dev, skb, pktlen);
- if( status == HCF_SUCCESS ) {
- netif_rx( skb );
+ if (status == HCF_SUCCESS) {
+ netif_rx(skb);
- if( port == 0 ) {
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pktlen;
- }
+ if (port == 0) {
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pktlen;
+ }
#ifdef USE_WDS
- else
- {
- lp->wds_port[port-1].stats.rx_packets++;
- lp->wds_port[port-1].stats.rx_bytes += pktlen;
- }
-#endif /* USE_WDS */
-
- dev->last_rx = jiffies;
+ else {
+ lp->wds_port[port -
+ 1].stats.
+ rx_packets++;
+ lp->wds_port[port -
+ 1].stats.
+ rx_bytes += pktlen;
+ }
+#endif /* USE_WDS */
+
+ dev->last_rx = jiffies;
#ifdef WIRELESS_EXT
#ifdef WIRELESS_SPY
- if( lp->spydata.spy_number > 0 ) {
- char *srcaddr = skb->mac.raw + MAC_ADDR_SIZE;
+ if (lp->spydata.spy_number > 0) {
+ char *srcaddr =
+ skb->mac.raw +
+ MAC_ADDR_SIZE;
- wl_spy_gather( dev, srcaddr );
- }
+ wl_spy_gather(dev, srcaddr);
+ }
#endif /* WIRELESS_SPY */
#endif /* WIRELESS_EXT */
- } else {
- DBG_ERROR( DbgInfo, "Rx request to card FAILED\n" );
+ } else {
+ DBG_ERROR(DbgInfo,
+ "Rx request to card FAILED\n");
- if( port == 0 ) {
- lp->stats.rx_dropped++;
- }
+ if (port == 0)
+ lp->stats.rx_dropped++;
#ifdef USE_WDS
- else
- {
- lp->wds_port[port-1].stats.rx_dropped++;
- }
-#endif /* USE_WDS */
-
- dev_kfree_skb( skb );
- }
- } else {
- DBG_ERROR( DbgInfo, "Could not alloc skb\n" );
-
- if( port == 0 ) {
- lp->stats.rx_dropped++;
- }
+ else {
+ lp->wds_port[port -
+ 1].stats.
+ rx_dropped++;
+ }
+#endif /* USE_WDS */
+
+ dev_kfree_skb(skb);
+ }
+ } else {
+ DBG_ERROR(DbgInfo, "Could not alloc skb\n");
+
+ if (port == 0)
+ lp->stats.rx_dropped++;
#ifdef USE_WDS
- else
- {
- lp->wds_port[port-1].stats.rx_dropped++;
- }
-#endif /* USE_WDS */
- }
- }
- }
-
- return 0;
-} // wl_rx
+ else {
+ lp->wds_port[port -
+ 1].stats.rx_dropped++;
+ }
+#endif /* USE_WDS */
+ }
+ }
+ }
+
+ return 0;
+} /* wl_rx */
+
/*============================================================================*/
/*******************************************************************************
@@ -986,142 +973,159 @@ int wl_rx(struct net_device *dev)
******************************************************************************/
#ifdef NEW_MULTICAST
-void wl_multicast( struct net_device *dev )
+void wl_multicast(struct net_device *dev)
{
-#if 1 //;? (HCF_TYPE) & HCF_TYPE_STA //;?should we return an error status in AP mode
-//;?seems reasonable that even an AP-only driver could afford this small additional footprint
+#if 1 /* (HCF_TYPE) & HCF_TYPE_STA */
+ /*
+ * should we return an error status in AP mode ?
+ * seems reasonable that even an AP-only driver
+ * could afford this small additional footprint
+ */
- int x;
- struct netdev_hw_addr *ha;
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
+ int x;
+ struct netdev_hw_addr *ha;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
- DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- if( !wl_adapter_is_open( dev ))
- return;
+ if (!wl_adapter_is_open(dev))
+ return;
#if DBG
- if( DBG_FLAGS( DbgInfo ) & DBG_PARAM_ON ) {
- DBG_PRINT(" flags: %s%s%s\n",
- ( dev->flags & IFF_PROMISC ) ? "Promiscuous " : "",
- ( dev->flags & IFF_MULTICAST ) ? "Multicast " : "",
- ( dev->flags & IFF_ALLMULTI ) ? "All-Multicast" : "" );
+ if (DBG_FLAGS(DbgInfo) & DBG_PARAM_ON) {
+ DBG_PRINT(" flags: %s%s%s\n",
+ (dev->flags & IFF_PROMISC) ? "Promiscuous " : "",
+ (dev->flags & IFF_MULTICAST) ? "Multicast " : "",
+ (dev->flags & IFF_ALLMULTI) ? "All-Multicast" : "");
- DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
+ DBG_PRINT(" mc_count: %d\n", netdev_mc_count(dev));
- netdev_for_each_mc_addr(ha, dev)
- DBG_PRINT(" %pM (%d)\n", ha->addr, dev->addr_len);
- }
+ netdev_for_each_mc_addr(ha, dev)
+ DBG_PRINT(" %pM (%d)\n", ha->addr, dev->addr_len);
+ }
#endif /* DBG */
- if(!( lp->flags & WVLAN2_UIL_BUSY )) {
+ if (!(lp->flags & WVLAN2_UIL_BUSY)) {
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_TRACE( DbgInfo, "Skipping multicast, in RTS mode\n" );
- return;
- }
-#endif /* USE_RTS */
-
- wl_lock( lp, &flags );
- wl_act_int_off( lp );
-
- if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_STA ) {
- if( dev->flags & IFF_PROMISC ) {
- /* Enable promiscuous mode */
- lp->ltvRecord.len = 2;
- lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 1 );
- DBG_PRINT( "Enabling Promiscuous mode (IFF_PROMISC)\n" );
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
- }
- else if ((netdev_mc_count(dev) > HCF_MAX_MULTICAST) ||
- ( dev->flags & IFF_ALLMULTI )) {
- /* Shutting off this filter will enable all multicast frames to
- be sent up from the device; however, this is a static RID, so
- a call to wl_apply() is needed */
- lp->ltvRecord.len = 2;
- lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 );
- DBG_PRINT( "Enabling all multicast mode (IFF_ALLMULTI)\n" );
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
- wl_apply( lp );
- }
- else if (!netdev_mc_empty(dev)) {
- /* Set the multicast addresses */
- lp->ltvRecord.len = ( netdev_mc_count(dev) * 3 ) + 1;
- lp->ltvRecord.typ = CFG_GROUP_ADDR;
-
- x = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
- ha->addr, ETH_ALEN);
- DBG_PRINT( "Setting multicast list\n" );
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
- } else {
- /* Disable promiscuous mode */
- lp->ltvRecord.len = 2;
- lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 );
- DBG_PRINT( "Disabling Promiscuous mode\n" );
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
-
- /* Disable multicast mode */
- lp->ltvRecord.len = 2;
- lp->ltvRecord.typ = CFG_GROUP_ADDR;
- DBG_PRINT( "Disabling Multicast mode\n" );
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
-
- /* Turning on this filter will prevent all multicast frames from
- being sent up from the device; however, this is a static RID,
- so a call to wl_apply() is needed */
- lp->ltvRecord.len = 2;
- lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 1 );
- DBG_PRINT( "Disabling all multicast mode (IFF_ALLMULTI)\n" );
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
- wl_apply( lp );
- }
- }
- wl_act_int_on( lp );
- wl_unlock( lp, &flags );
- }
+ if (lp->useRTS == 1) {
+ DBG_TRACE(DbgInfo, "Skipping multicast, in RTS mode\n");
+ return;
+ }
+#endif /* USE_RTS */
+
+ wl_lock(lp, &flags);
+ wl_act_int_off(lp);
+
+ if (CNV_INT_TO_LITTLE(lp->hcfCtx.IFB_FWIdentity.comp_id) ==
+ COMP_ID_FW_STA) {
+ if (dev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ lp->ltvRecord.len = 2;
+ lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(1);
+ DBG_PRINT
+ ("Enabling Promiscuous mode (IFF_PROMISC)\n");
+ hcf_put_info(&(lp->hcfCtx),
+ (LTVP) & (lp->ltvRecord));
+ } else if ((netdev_mc_count(dev) > HCF_MAX_MULTICAST)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Shutting off this filter will enable all multicast frames to
+ be sent up from the device; however, this is a static RID, so
+ a call to wl_apply() is needed */
+ lp->ltvRecord.len = 2;
+ lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(0);
+ DBG_PRINT
+ ("Enabling all multicast mode (IFF_ALLMULTI)\n");
+ hcf_put_info(&(lp->hcfCtx),
+ (LTVP) & (lp->ltvRecord));
+ wl_apply(lp);
+ } else if (!netdev_mc_empty(dev)) {
+ /* Set the multicast addresses */
+ lp->ltvRecord.len =
+ (netdev_mc_count(dev) * 3) + 1;
+ lp->ltvRecord.typ = CFG_GROUP_ADDR;
+
+ x = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(&
+ (lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
+ ha->addr, ETH_ALEN);
+ DBG_PRINT("Setting multicast list\n");
+ hcf_put_info(&(lp->hcfCtx),
+ (LTVP) & (lp->ltvRecord));
+ } else {
+ /* Disable promiscuous mode */
+ lp->ltvRecord.len = 2;
+ lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(0);
+ DBG_PRINT("Disabling Promiscuous mode\n");
+ hcf_put_info(&(lp->hcfCtx),
+ (LTVP) & (lp->ltvRecord));
+
+ /* Disable multicast mode */
+ lp->ltvRecord.len = 2;
+ lp->ltvRecord.typ = CFG_GROUP_ADDR;
+ DBG_PRINT("Disabling Multicast mode\n");
+ hcf_put_info(&(lp->hcfCtx),
+ (LTVP) & (lp->ltvRecord));
+
+ /*
+ * Turning on this filter will prevent all multicast frames from
+ * being sent up from the device; however, this is a static RID,
+ * so a call to wl_apply() is needed
+ */
+ lp->ltvRecord.len = 2;
+ lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(1);
+ DBG_PRINT
+ ("Disabling all multicast mode (IFF_ALLMULTI)\n");
+ hcf_put_info(&(lp->hcfCtx),
+ (LTVP) & (lp->ltvRecord));
+ wl_apply(lp);
+ }
+ }
+ wl_act_int_on(lp);
+ wl_unlock(lp, &flags);
+ }
#endif /* HCF_STA */
-} // wl_multicast
+} /* wl_multicast */
+
/*============================================================================*/
#else /* NEW_MULTICAST */
-void wl_multicast( struct net_device *dev, int num_addrs, void *addrs )
+void wl_multicast(struct net_device *dev, int num_addrs, void *addrs)
{
- DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
- DBG_PARAM( DbgInfo, "num_addrs", "%d", num_addrs );
- DBG_PARAM( DbgInfo, "addrs", "0x%p", addrs );
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ DBG_PARAM(DbgInfo, "num_addrs", "%d", num_addrs);
+ DBG_PARAM(DbgInfo, "addrs", "0x%p", addrs);
#error Obsolete set multicast interface!
-} // wl_multicast
+} /* wl_multicast */
+
/*============================================================================*/
#endif /* NEW_MULTICAST */
-static const struct net_device_ops wl_netdev_ops =
-{
- .ndo_start_xmit = &wl_tx_port0,
+static const struct net_device_ops wl_netdev_ops = {
+ .ndo_start_xmit = &wl_tx_port0,
- .ndo_set_config = &wl_config,
- .ndo_get_stats = &wl_stats,
- .ndo_set_rx_mode = &wl_multicast,
+ .ndo_set_config = &wl_config,
+ .ndo_get_stats = &wl_stats,
+ .ndo_set_rx_mode = &wl_multicast,
- .ndo_init = &wl_insert,
- .ndo_open = &wl_adapter_open,
- .ndo_stop = &wl_adapter_close,
- .ndo_do_ioctl = &wl_ioctl,
+ .ndo_init = &wl_insert,
+ .ndo_open = &wl_adapter_open,
+ .ndo_stop = &wl_adapter_close,
+ .ndo_do_ioctl = &wl_ioctl,
- .ndo_tx_timeout = &wl_tx_timeout,
+ .ndo_tx_timeout = &wl_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = wl_poll,
+ .ndo_poll_controller = wl_poll,
#endif
};
@@ -1144,48 +1148,51 @@ static const struct net_device_ops wl_netdev_ops =
* device.
*
******************************************************************************/
-struct net_device * wl_device_alloc( void )
+struct net_device *wl_device_alloc(void)
{
- struct net_device *dev = NULL;
- struct wl_private *lp = NULL;
-
- /* Alloc a net_device struct */
- dev = alloc_etherdev(sizeof(struct wl_private));
- if (!dev)
- return NULL;
-
- /* Initialize the 'next' pointer in the struct. Currently only used for PCI,
- but do it here just in case it's used for other buses in the future */
- lp = wl_priv(dev);
-
+ struct net_device *dev = NULL;
+ struct wl_private *lp = NULL;
+
+ /* Alloc a net_device struct */
+ dev = alloc_etherdev(sizeof(struct wl_private));
+ if (!dev)
+ return NULL;
+
+ /*
+ * Initialize the 'next' pointer in the struct.
+ * Currently only used for PCI,
+ * but do it here just in case it's used
+ * for other buses in the future
+ */
+ lp = wl_priv(dev);
+
+ /* Check MTU */
+ if (dev->mtu > MTU_MAX) {
+ DBG_WARNING(DbgInfo, "%s: MTU set too high, limiting to %d.\n",
+ dev->name, MTU_MAX);
+ dev->mtu = MTU_MAX;
+ }
- /* Check MTU */
- if( dev->mtu > MTU_MAX )
- {
- DBG_WARNING( DbgInfo, "%s: MTU set too high, limiting to %d.\n",
- dev->name, MTU_MAX );
- dev->mtu = MTU_MAX;
- }
+ /* Setup the function table in the device structure. */
- /* Setup the function table in the device structure. */
+ dev->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
+ lp->wireless_data.spy_data = &lp->spy_data;
+ dev->wireless_data = &lp->wireless_data;
- dev->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
- lp->wireless_data.spy_data = &lp->spy_data;
- dev->wireless_data = &lp->wireless_data;
+ dev->netdev_ops = &wl_netdev_ops;
- dev->netdev_ops = &wl_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
- dev->watchdog_timeo = TX_TIMEOUT;
+ dev->ethtool_ops = &wl_ethtool_ops;
- dev->ethtool_ops = &wl_ethtool_ops;
+ netif_stop_queue(dev);
- netif_stop_queue( dev );
+ /* Allocate virtual devices for WDS support if needed */
+ WL_WDS_DEVICE_ALLOC(lp);
- /* Allocate virtual devices for WDS support if needed */
- WL_WDS_DEVICE_ALLOC( lp );
+ return dev;
+} /* wl_device_alloc */
- return dev;
-} // wl_device_alloc
/*============================================================================*/
/*******************************************************************************
@@ -1206,15 +1213,14 @@ struct net_device * wl_device_alloc( void )
* N/A
*
******************************************************************************/
-void wl_device_dealloc( struct net_device *dev )
+void wl_device_dealloc(struct net_device *dev)
{
-// struct wl_private *lp = wl_priv(dev);
+ /* Dealloc the WDS ports */
+ WL_WDS_DEVICE_DEALLOC(lp);
- /* Dealloc the WDS ports */
- WL_WDS_DEVICE_DEALLOC( lp );
+ free_netdev(dev);
+} /* wl_device_dealloc */
- free_netdev( dev );
-} // wl_device_dealloc
/*============================================================================*/
/*******************************************************************************
@@ -1235,15 +1241,16 @@ void wl_device_dealloc( struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port0( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port0(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 0\n" );
+ DBG_TX(DbgInfo, "Tx on Port 0\n");
- return wl_tx( skb, dev, HCF_PORT_0 );
+ return wl_tx(skb, dev, HCF_PORT_0);
#ifdef ENABLE_DMA
- return wl_tx_dma( skb, dev, HCF_PORT_0 );
+ return wl_tx_dma(skb, dev, HCF_PORT_0);
#endif
-} // wl_tx_port0
+} /* wl_tx_port0i */
+
/*============================================================================*/
#ifdef USE_WDS
@@ -1266,11 +1273,12 @@ int wl_tx_port0( struct sk_buff *skb, struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port1( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port1(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 1\n" );
- return wl_tx( skb, dev, HCF_PORT_1 );
-} // wl_tx_port1
+ DBG_TX(DbgInfo, "Tx on Port 1\n");
+ return wl_tx(skb, dev, HCF_PORT_1);
+} /* wl_tx_port1 */
+
/*============================================================================*/
/*******************************************************************************
@@ -1291,11 +1299,12 @@ int wl_tx_port1( struct sk_buff *skb, struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port2( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port2(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 2\n" );
- return wl_tx( skb, dev, HCF_PORT_2 );
-} // wl_tx_port2
+ DBG_TX(DbgInfo, "Tx on Port 2\n");
+ return wl_tx(skb, dev, HCF_PORT_2);
+} /* wl_tx_port2 */
+
/*============================================================================*/
/*******************************************************************************
@@ -1316,11 +1325,12 @@ int wl_tx_port2( struct sk_buff *skb, struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port3( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port3(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 3\n" );
- return wl_tx( skb, dev, HCF_PORT_3 );
-} // wl_tx_port3
+ DBG_TX(DbgInfo, "Tx on Port 3\n");
+ return wl_tx(skb, dev, HCF_PORT_3);
+} /* wl_tx_port3 */
+
/*============================================================================*/
/*******************************************************************************
@@ -1341,11 +1351,12 @@ int wl_tx_port3( struct sk_buff *skb, struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port4( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port4(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 4\n" );
- return wl_tx( skb, dev, HCF_PORT_4 );
-} // wl_tx_port4
+ DBG_TX(DbgInfo, "Tx on Port 4\n");
+ return wl_tx(skb, dev, HCF_PORT_4);
+} /* wl_tx_port4 */
+
/*============================================================================*/
/*******************************************************************************
@@ -1366,11 +1377,12 @@ int wl_tx_port4( struct sk_buff *skb, struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port5( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port5(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 5\n" );
- return wl_tx( skb, dev, HCF_PORT_5 );
-} // wl_tx_port5
+ DBG_TX(DbgInfo, "Tx on Port 5\n");
+ return wl_tx(skb, dev, HCF_PORT_5);
+} /* wl_tx_port5 */
+
/*============================================================================*/
/*******************************************************************************
@@ -1391,11 +1403,12 @@ int wl_tx_port5( struct sk_buff *skb, struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port6( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port6(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 6\n" );
- return wl_tx( skb, dev, HCF_PORT_6 );
-} // wl_tx_port6
+ DBG_TX(DbgInfo, "Tx on Port 6\n");
+ return wl_tx(skb, dev, HCF_PORT_6);
+} /* wl_tx_port6 */
+
/*============================================================================*/
/*******************************************************************************
@@ -1417,50 +1430,52 @@ int wl_tx_port6( struct sk_buff *skb, struct net_device *dev )
* structs in the private adapter structure.
*
******************************************************************************/
-void wl_wds_device_alloc( struct wl_private *lp )
+void wl_wds_device_alloc(struct wl_private *lp)
{
- int count;
+ int count;
- /* WDS support requires additional net_device structs to be allocated,
- so that user space apps can use these virtual devices to specify the
- port on which to Tx/Rx */
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- struct net_device *dev_wds = NULL;
+ /* WDS support requires additional net_device structs to be allocated,
+ so that user space apps can use these virtual devices to specify the
+ port on which to Tx/Rx */
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ struct net_device *dev_wds = NULL;
+
+ dev_wds = kzalloc(sizeof(struct net_device), GFP_KERNEL);
+ if (!dev_wds)
+ return;
+
+ ether_setup(dev_wds);
+
+ lp->wds_port[count].dev = dev_wds;
+
+ /* Re-use wl_init for all the devices, as it currently does nothing, but
+ * is required. Re-use the stats/tx_timeout handler for all as well; the
+ * WDS port which is requesting these operations can be determined by
+ * the net_device pointer. Set the private member of all devices to point
+ * to the same net_device struct; that way, all information gets
+ * funnelled through the one "real" net_device. Name the WDS ports
+ * "wds<n>"
+ * */
+ lp->wds_port[count].dev->init = &wl_init;
+ lp->wds_port[count].dev->get_stats = &wl_stats;
+ lp->wds_port[count].dev->tx_timeout = &wl_tx_timeout;
+ lp->wds_port[count].dev->watchdog_timeo = TX_TIMEOUT;
+ lp->wds_port[count].dev->priv = lp;
+
+ sprintf(lp->wds_port[count].dev->name, "wds%d", count);
+ }
- dev_wds = kzalloc(sizeof(struct net_device), GFP_KERNEL);
- if (!dev_wds)
- return;
+ /* Register the Tx handlers */
+ lp->wds_port[0].dev->hard_start_xmit = &wl_tx_port1;
+ lp->wds_port[1].dev->hard_start_xmit = &wl_tx_port2;
+ lp->wds_port[2].dev->hard_start_xmit = &wl_tx_port3;
+ lp->wds_port[3].dev->hard_start_xmit = &wl_tx_port4;
+ lp->wds_port[4].dev->hard_start_xmit = &wl_tx_port5;
+ lp->wds_port[5].dev->hard_start_xmit = &wl_tx_port6;
+
+ WL_WDS_NETIF_STOP_QUEUE(lp);
+} /* wl_wds_device_alloc */
- ether_setup( dev_wds );
-
- lp->wds_port[count].dev = dev_wds;
-
- /* Re-use wl_init for all the devices, as it currently does nothing, but
- is required. Re-use the stats/tx_timeout handler for all as well; the
- WDS port which is requesting these operations can be determined by
- the net_device pointer. Set the private member of all devices to point
- to the same net_device struct; that way, all information gets
- funnelled through the one "real" net_device. Name the WDS ports
- "wds<n>" */
- lp->wds_port[count].dev->init = &wl_init;
- lp->wds_port[count].dev->get_stats = &wl_stats;
- lp->wds_port[count].dev->tx_timeout = &wl_tx_timeout;
- lp->wds_port[count].dev->watchdog_timeo = TX_TIMEOUT;
- lp->wds_port[count].dev->priv = lp;
-
- sprintf( lp->wds_port[count].dev->name, "wds%d", count );
- }
-
- /* Register the Tx handlers */
- lp->wds_port[0].dev->hard_start_xmit = &wl_tx_port1;
- lp->wds_port[1].dev->hard_start_xmit = &wl_tx_port2;
- lp->wds_port[2].dev->hard_start_xmit = &wl_tx_port3;
- lp->wds_port[3].dev->hard_start_xmit = &wl_tx_port4;
- lp->wds_port[4].dev->hard_start_xmit = &wl_tx_port5;
- lp->wds_port[5].dev->hard_start_xmit = &wl_tx_port6;
-
- WL_WDS_NETIF_STOP_QUEUE( lp );
-} // wl_wds_device_alloc
/*============================================================================*/
/*******************************************************************************
@@ -1480,26 +1495,27 @@ void wl_wds_device_alloc( struct wl_private *lp )
* N/A
*
******************************************************************************/
-void wl_wds_device_dealloc( struct wl_private *lp )
+void wl_wds_device_dealloc(struct wl_private *lp)
{
- int count;
+ int count;
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- struct net_device *dev_wds = NULL;
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ struct net_device *dev_wds = NULL;
- dev_wds = lp->wds_port[count].dev;
+ dev_wds = lp->wds_port[count].dev;
- if( dev_wds != NULL ) {
- if( dev_wds->flags & IFF_UP ) {
- dev_close( dev_wds );
- dev_wds->flags &= ~( IFF_UP | IFF_RUNNING );
- }
+ if (dev_wds != NULL) {
+ if (dev_wds->flags & IFF_UP) {
+ dev_close(dev_wds);
+ dev_wds->flags &= ~(IFF_UP | IFF_RUNNING);
+ }
+
+ free_netdev(dev_wds);
+ lp->wds_port[count].dev = NULL;
+ }
+ }
+} /* wl_wds_device_dealloc */
- free_netdev(dev_wds);
- lp->wds_port[count].dev = NULL;
- }
- }
-} // wl_wds_device_dealloc
/*============================================================================*/
/*******************************************************************************
@@ -1520,21 +1536,22 @@ void wl_wds_device_dealloc( struct wl_private *lp )
* N/A
*
******************************************************************************/
-void wl_wds_netif_start_queue( struct wl_private *lp )
+void wl_wds_netif_start_queue(struct wl_private *lp)
{
- int count;
+ int count;
/*------------------------------------------------------------------------*/
- if( lp != NULL ) {
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- if( lp->wds_port[count].is_registered &&
- lp->wds_port[count].netif_queue_on == FALSE ) {
- netif_start_queue( lp->wds_port[count].dev );
- lp->wds_port[count].netif_queue_on = TRUE;
- }
- }
- }
-} // wl_wds_netif_start_queue
+ if (lp != NULL) {
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (lp->wds_port[count].is_registered &&
+ lp->wds_port[count].netif_queue_on == FALSE) {
+ netif_start_queue(lp->wds_port[count].dev);
+ lp->wds_port[count].netif_queue_on = TRUE;
+ }
+ }
+ }
+} /* wl_wds_netif_start_queue */
+
/*============================================================================*/
/*******************************************************************************
@@ -1555,21 +1572,22 @@ void wl_wds_netif_start_queue( struct wl_private *lp )
* N/A
*
******************************************************************************/
-void wl_wds_netif_stop_queue( struct wl_private *lp )
+void wl_wds_netif_stop_queue(struct wl_private *lp)
{
- int count;
+ int count;
/*------------------------------------------------------------------------*/
- if( lp != NULL ) {
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- if( lp->wds_port[count].is_registered &&
- lp->wds_port[count].netif_queue_on == TRUE ) {
- netif_stop_queue( lp->wds_port[count].dev );
- lp->wds_port[count].netif_queue_on = FALSE;
- }
- }
- }
-} // wl_wds_netif_stop_queue
+ if (lp != NULL) {
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (lp->wds_port[count].is_registered &&
+ lp->wds_port[count].netif_queue_on == TRUE) {
+ netif_stop_queue(lp->wds_port[count].dev);
+ lp->wds_port[count].netif_queue_on = FALSE;
+ }
+ }
+ }
+} /* wl_wds_netif_stop_queue */
+
/*============================================================================*/
/*******************************************************************************
@@ -1590,21 +1608,22 @@ void wl_wds_netif_stop_queue( struct wl_private *lp )
* N/A
*
******************************************************************************/
-void wl_wds_netif_wake_queue( struct wl_private *lp )
+void wl_wds_netif_wake_queue(struct wl_private *lp)
{
- int count;
+ int count;
/*------------------------------------------------------------------------*/
- if( lp != NULL ) {
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- if( lp->wds_port[count].is_registered &&
- lp->wds_port[count].netif_queue_on == FALSE ) {
- netif_wake_queue( lp->wds_port[count].dev );
- lp->wds_port[count].netif_queue_on = TRUE;
- }
- }
- }
-} // wl_wds_netif_wake_queue
+ if (lp != NULL) {
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (lp->wds_port[count].is_registered &&
+ lp->wds_port[count].netif_queue_on == FALSE) {
+ netif_wake_queue(lp->wds_port[count].dev);
+ lp->wds_port[count].netif_queue_on = TRUE;
+ }
+ }
+ }
+} /* wl_wds_netif_wake_queue */
+
/*============================================================================*/
/*******************************************************************************
@@ -1625,19 +1644,19 @@ void wl_wds_netif_wake_queue( struct wl_private *lp )
* N/A
*
******************************************************************************/
-void wl_wds_netif_carrier_on( struct wl_private *lp )
+void wl_wds_netif_carrier_on(struct wl_private *lp)
{
- int count;
+ int count;
/*------------------------------------------------------------------------*/
- if( lp != NULL ) {
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- if( lp->wds_port[count].is_registered ) {
- netif_carrier_on( lp->wds_port[count].dev );
- }
- }
- }
-} // wl_wds_netif_carrier_on
+ if (lp != NULL) {
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (lp->wds_port[count].is_registered)
+ netif_carrier_on(lp->wds_port[count].dev);
+ }
+ }
+} /* wl_wds_netif_carrier_on */
+
/*============================================================================*/
/*******************************************************************************
@@ -1658,21 +1677,22 @@ void wl_wds_netif_carrier_on( struct wl_private *lp )
* N/A
*
******************************************************************************/
-void wl_wds_netif_carrier_off( struct wl_private *lp )
+void wl_wds_netif_carrier_off(struct wl_private *lp)
{
int count;
- if(lp != NULL) {
- for(count = 0; count < NUM_WDS_PORTS; count++) {
- if(lp->wds_port[count].is_registered)
+ if (lp != NULL) {
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (lp->wds_port[count].is_registered)
netif_carrier_off(lp->wds_port[count].dev);
}
}
-} // wl_wds_netif_carrier_off
+} /* wl_wds_netif_carrier_off */
+
/*============================================================================*/
-#endif /* USE_WDS */
+#endif /* USE_WDS */
#ifdef ENABLE_DMA
/*******************************************************************************
@@ -1695,70 +1715,71 @@ void wl_wds_netif_carrier_off( struct wl_private *lp )
* 1 on error
*
******************************************************************************/
-int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
+int wl_send_dma(struct wl_private *lp, struct sk_buff *skb, int port)
{
- int len;
- DESC_STRCT *desc = NULL;
- DESC_STRCT *desc_next = NULL;
+ int len;
+ DESC_STRCT *desc = NULL;
+ DESC_STRCT *desc_next = NULL;
/*------------------------------------------------------------------------*/
- if( lp == NULL ) {
- DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
- return FALSE;
- }
+ if (lp == NULL) {
+ DBG_ERROR(DbgInfo, "Private adapter struct is NULL\n");
+ return FALSE;
+ }
- if( lp->dev == NULL ) {
- DBG_ERROR( DbgInfo, "net_device struct in wl_private is NULL\n" );
- return FALSE;
- }
+ if (lp->dev == NULL) {
+ DBG_ERROR(DbgInfo, "net_device struct in wl_private is NULL\n");
+ return FALSE;
+ }
- /* AGAIN, ALL THE QUEUEING DONE HERE IN I/O MODE IS NOT PERFORMED */
+ /* AGAIN, ALL THE QUEUEING DONE HERE IN I/O MODE IS NOT PERFORMED */
- if( skb == NULL ) {
- DBG_WARNING (DbgInfo, "Nothing to send.\n");
- return FALSE;
- }
+ if (skb == NULL) {
+ DBG_WARNING(DbgInfo, "Nothing to send.\n");
+ return FALSE;
+ }
- len = skb->len;
+ len = skb->len;
- /* Get a free descriptor */
- desc = wl_pci_dma_get_tx_packet( lp );
+ /* Get a free descriptor */
+ desc = wl_pci_dma_get_tx_packet(lp);
- if( desc == NULL ) {
- if( lp->netif_queue_on == TRUE ) {
- netif_stop_queue( lp->dev );
- WL_WDS_NETIF_STOP_QUEUE( lp );
- lp->netif_queue_on = FALSE;
+ if (desc == NULL) {
+ if (lp->netif_queue_on == TRUE) {
+ netif_stop_queue(lp->dev);
+ WL_WDS_NETIF_STOP_QUEUE(lp);
+ lp->netif_queue_on = FALSE;
- dev_kfree_skb( skb );
- return 0;
- }
- }
+ dev_kfree_skb_any( skb );
+ return 0;
+ }
+ }
+
+ SET_BUF_CNT(desc, /*HCF_DMA_FD_CNT */ HFS_ADDR_DEST);
+ SET_BUF_SIZE(desc, HCF_DMA_TX_BUF1_SIZE);
- SET_BUF_CNT( desc, /*HCF_DMA_FD_CNT*/HFS_ADDR_DEST );
- SET_BUF_SIZE( desc, HCF_DMA_TX_BUF1_SIZE );
+ desc_next = desc->next_desc_addr;
- desc_next = desc->next_desc_addr;
+ if (desc_next->buf_addr == NULL) {
+ DBG_ERROR(DbgInfo, "DMA descriptor buf_addr is NULL\n");
+ return FALSE;
+ }
- if( desc_next->buf_addr == NULL ) {
- DBG_ERROR( DbgInfo, "DMA descriptor buf_addr is NULL\n" );
- return FALSE;
- }
+ /* Copy the payload into the DMA packet */
+ memcpy(desc_next->buf_addr, skb->data, len);
- /* Copy the payload into the DMA packet */
- memcpy( desc_next->buf_addr, skb->data, len );
+ SET_BUF_CNT(desc_next, len);
+ SET_BUF_SIZE(desc_next, HCF_MAX_PACKET_SIZE);
- SET_BUF_CNT( desc_next, len );
- SET_BUF_SIZE( desc_next, HCF_MAX_PACKET_SIZE );
+ hcf_dma_tx_put(&(lp->hcfCtx), desc, 0);
- hcf_dma_tx_put( &( lp->hcfCtx ), desc, 0 );
+ /* Free the skb and perform queue cleanup, as the buffer was
+ transmitted successfully */
+ dev_consume_skb_any( skb );
- /* Free the skb and perform queue cleanup, as the buffer was
- transmitted successfully */
- dev_kfree_skb( skb );
+ return TRUE;
+} /* wl_send_dma */
- return TRUE;
-} // wl_send_dma
/*============================================================================*/
/*******************************************************************************
@@ -1779,147 +1800,152 @@ int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
* 1 on error
*
******************************************************************************/
-int wl_rx_dma( struct net_device *dev )
+int wl_rx_dma(struct net_device *dev)
{
- int port;
- hcf_16 pktlen;
- hcf_16 hfs_stat;
- struct sk_buff *skb;
- struct wl_private *lp = NULL;
- DESC_STRCT *desc, *desc_next;
- //CFG_MB_INFO_RANGE2_STRCT x;
+ int port;
+ hcf_16 pktlen;
+ hcf_16 hfs_stat;
+ struct sk_buff *skb;
+ struct wl_private *lp = NULL;
+ DESC_STRCT *desc, *desc_next;
/*------------------------------------------------------------------------*/
- DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- if((( lp = dev->priv ) != NULL ) &&
- !( lp->flags & WVLAN2_UIL_BUSY )) {
+ lp = dev->priv;
+ if ((lp != NULL) && !(lp->flags & WVLAN2_UIL_BUSY)) {
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_PRINT( "RTS: We're getting an Rx...\n" );
- return -EIO;
- }
-#endif /* USE_RTS */
-
- //if( lp->dma.status == 0 )
- //{
- desc = hcf_dma_rx_get( &( lp->hcfCtx ));
-
- if( desc != NULL )
- {
- /* Check and see if we rcvd. a WMP frame */
- /*
- if((( *(hcf_8 *)&desc->buf_addr[HFS_STAT] ) &
- ( HFS_STAT_MSG_TYPE | HFS_STAT_ERR )) == HFS_STAT_WMP_MSG )
- {
- DBG_TRACE( DbgInfo, "Got a WMP frame\n" );
-
- x.len = sizeof( CFG_MB_INFO_RANGE2_STRCT ) / sizeof( hcf_16 );
- x.typ = CFG_MB_INFO;
- x.base_typ = CFG_WMP;
- x.frag_cnt = 2;
- x.frag_buf[0].frag_len = GET_BUF_CNT( descp ) / sizeof( hcf_16 );
- x.frag_buf[0].frag_addr = (hcf_8 *) descp->buf_addr ;
- x.frag_buf[1].frag_len = ( GET_BUF_CNT( descp->next_desc_addr ) + 1 ) / sizeof( hcf_16 );
- x.frag_buf[1].frag_addr = (hcf_8 *) descp->next_desc_addr->buf_addr ;
-
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&x );
- }
- */
-
- desc_next = desc->next_desc_addr;
-
- /* Make sure the buffer isn't empty */
- if( GET_BUF_CNT( desc ) == 0 ) {
- DBG_WARNING( DbgInfo, "Buffer is empty!\n" );
-
- /* Give the descriptor back to the HCF */
- hcf_dma_rx_put( &( lp->hcfCtx ), desc );
- return -EIO;
- }
-
- /* Read the HFS_STAT register from the lookahead buffer */
- hfs_stat = (hcf_16)( desc->buf_addr[HFS_STAT/2] );
-
- /* Make sure the frame isn't bad */
- if(( hfs_stat & HFS_STAT_ERR ) != HCF_SUCCESS )
- {
- DBG_WARNING( DbgInfo, "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
- desc->buf_addr[HFS_STAT/2] );
-
- /* Give the descriptor back to the HCF */
- hcf_dma_rx_put( &( lp->hcfCtx ), desc );
- return -EIO;
- }
-
- /* Determine what port this packet is for */
- port = ( hfs_stat >> 8 ) & 0x0007;
- DBG_RX( DbgInfo, "Rx frame for port %d\n", port );
-
- pktlen = GET_BUF_CNT(desc_next);
- if (pktlen != 0) {
- skb = ALLOC_SKB(pktlen);
- if (skb != NULL) {
- switch( port ) {
+ if (lp->useRTS == 1) {
+ DBG_PRINT("RTS: We're getting an Rx...\n");
+ return -EIO;
+ }
+#endif /* USE_RTS */
+
+ /*
+ *if( lp->dma.status == 0 )
+ *{
+ */
+ desc = hcf_dma_rx_get(&(lp->hcfCtx));
+
+ if (desc != NULL) {
+ /* Check and see if we rcvd. a WMP frame */
+ /*
+ if((( *(hcf_8 *)&desc->buf_addr[HFS_STAT] ) &
+ ( HFS_STAT_MSG_TYPE | HFS_STAT_ERR )) == HFS_STAT_WMP_MSG )
+ {
+ DBG_TRACE( DbgInfo, "Got a WMP frame\n" );
+
+ x.len = sizeof( CFG_MB_INFO_RANGE2_STRCT ) / sizeof( hcf_16 );
+ x.typ = CFG_MB_INFO;
+ x.base_typ = CFG_WMP;
+ x.frag_cnt = 2;
+ x.frag_buf[0].frag_len = GET_BUF_CNT( descp ) / sizeof( hcf_16 );
+ x.frag_buf[0].frag_addr = (hcf_8 *) descp->buf_addr ;
+ x.frag_buf[1].frag_len = ( GET_BUF_CNT( descp->next_desc_addr ) + 1 ) / sizeof( hcf_16 );
+ x.frag_buf[1].frag_addr = (hcf_8 *) descp->next_desc_addr->buf_addr ;
+
+ hcf_put_info( &( lp->hcfCtx ), (LTVP)&x );
+ }
+ */
+
+ desc_next = desc->next_desc_addr;
+
+ /* Make sure the buffer isn't empty */
+ if (GET_BUF_CNT(desc) == 0) {
+ DBG_WARNING(DbgInfo, "Buffer is empty!\n");
+
+ /* Give the descriptor back to the HCF */
+ hcf_dma_rx_put(&(lp->hcfCtx), desc);
+ return -EIO;
+ }
+
+ /* Read the HFS_STAT register from the lookahead buffer */
+ hfs_stat = (hcf_16) (desc->buf_addr[HFS_STAT / 2]);
+
+ /* Make sure the frame isn't bad */
+ if ((hfs_stat & HFS_STAT_ERR) != HCF_SUCCESS) {
+ DBG_WARNING(DbgInfo,
+ "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
+ desc->buf_addr[HFS_STAT / 2]);
+
+ /* Give the descriptor back to the HCF */
+ hcf_dma_rx_put(&(lp->hcfCtx), desc);
+ return -EIO;
+ }
+
+ /* Determine what port this packet is for */
+ port = (hfs_stat >> 8) & 0x0007;
+ DBG_RX(DbgInfo, "Rx frame for port %d\n", port);
+
+ pktlen = GET_BUF_CNT(desc_next);
+ if (pktlen != 0) {
+ skb = ALLOC_SKB(pktlen);
+ if (skb != NULL) {
+ switch (port) {
#ifdef USE_WDS
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- case 6:
- skb->dev = lp->wds_port[port-1].dev;
- break;
-#endif /* USE_WDS */
-
- case 0:
- default:
- skb->dev = dev;
- break;
- }
-
- GET_PACKET_DMA( skb->dev, skb, pktlen );
-
- /* Give the descriptor back to the HCF */
- hcf_dma_rx_put( &( lp->hcfCtx ), desc );
-
- netif_rx( skb );
-
- if( port == 0 ) {
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pktlen;
- }
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ skb->dev =
+ lp->wds_port[port - 1].dev;
+ break;
+#endif /* USE_WDS */
+
+ case 0:
+ default:
+ skb->dev = dev;
+ break;
+ }
+
+ GET_PACKET_DMA(skb->dev, skb, pktlen);
+
+ /* Give the descriptor back to the HCF */
+ hcf_dma_rx_put(&(lp->hcfCtx), desc);
+
+ netif_rx(skb);
+
+ if (port == 0) {
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pktlen;
+ }
#ifdef USE_WDS
- else
- {
- lp->wds_port[port-1].stats.rx_packets++;
- lp->wds_port[port-1].stats.rx_bytes += pktlen;
- }
-#endif /* USE_WDS */
-
- dev->last_rx = jiffies;
-
- } else {
- DBG_ERROR( DbgInfo, "Could not alloc skb\n" );
-
- if( port == 0 )
- {
- lp->stats.rx_dropped++;
- }
+ else {
+ lp->wds_port[port -
+ 1].stats.
+ rx_packets++;
+ lp->wds_port[port -
+ 1].stats.
+ rx_bytes += pktlen;
+ }
+#endif /* USE_WDS */
+
+ dev->last_rx = jiffies;
+
+ } else {
+ DBG_ERROR(DbgInfo,
+ "Could not alloc skb\n");
+
+ if (port == 0)
+ lp->stats.rx_dropped++;
#ifdef USE_WDS
- else
- {
- lp->wds_port[port-1].stats.rx_dropped++;
- }
-#endif /* USE_WDS */
- }
- }
- }
- //}
- }
-
- return 0;
-} // wl_rx_dma
+ else {
+ lp->wds_port[port -
+ 1].stats.
+ rx_dropped++;
+ }
+#endif /* USE_WDS */
+ }
+ }
+ }
+ /*}*/
+ }
+
+ return 0;
+} /* wl_rx_dma */
+
/*============================================================================*/
-#endif // ENABLE_DMA
+#endif /* ENABLE_DMA */
diff --git a/drivers/staging/wlags49_h2/wl_util.c b/drivers/staging/wlags49_h2/wl_util.c
index 4ca6e42ecd7e..75019c171d57 100644
--- a/drivers/staging/wlags49_h2/wl_util.c
+++ b/drivers/staging/wlags49_h2/wl_util.c
@@ -161,43 +161,6 @@ int dbm( int value )
-
-/*******************************************************************************
- * percent()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Return a value as a percentage of min to max.
- *
- * PARAMETERS:
- *
- * value - the value in question
- * min - the minimum range value
- * max - the maximum range value
- *
- * RETURNS:
- *
- * the percentage value
- *
- ******************************************************************************/
-int percent( int value, int min, int max )
-{
- /* Truncate the value to be between min and max. */
- if( value < min )
- value = min;
-
- if( value > max )
- value = max;
-
- /* Return the value as a percentage of min to max. */
- return ((( value - min ) * 100 ) / ( max - min ));
-} // percent
-/*============================================================================*/
-
-
-
-
/*******************************************************************************
* is_valid_key_string()
*******************************************************************************
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 187fc060de26..49eeeaee664b 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -3354,7 +3354,7 @@ void wl_wext_event_essid( struct net_device *dev )
the call to wireless_send_event() must also point to where the ESSID
lives */
wrqu.essid.length = strlen( lp->NetworkName );
- wrqu.essid.pointer = (caddr_t)lp->NetworkName;
+ wrqu.essid.pointer = (void __user *)lp->NetworkName;
wrqu.essid.flags = 1;
wireless_send_event( dev, SIOCSIWESSID, &wrqu, lp->NetworkName );
@@ -3419,7 +3419,7 @@ void wl_wext_event_encode( struct net_device *dev )
/* Only provide the key if permissions allow */
if( capable( CAP_NET_ADMIN )) {
- wrqu.encoding.pointer = (caddr_t)lp->DefaultKeys.key[index].key;
+ wrqu.encoding.pointer = (void __user *)lp->DefaultKeys.key[index].key;
wrqu.encoding.length = lp->DefaultKeys.key[index].len;
} else {
wrqu.encoding.flags |= IW_ENCODE_NOKEY;
@@ -3778,7 +3778,7 @@ static const iw_handler wl_private_handler[] =
#endif
};
-struct iw_priv_args wl_priv_args[] = {
+static struct iw_priv_args wl_priv_args[] = {
{SIOCSIWNETNAME, IW_PRIV_TYPE_CHAR | HCF_MAX_NAME_LEN, 0, "snetwork_name" },
{SIOCGIWNETNAME, 0, IW_PRIV_TYPE_CHAR | HCF_MAX_NAME_LEN, "gnetwork_name" },
{SIOCSIWSTANAME, IW_PRIV_TYPE_CHAR | HCF_MAX_NAME_LEN, 0, "sstation_name" },
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index a7d24c95191d..f76f95c29617 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -400,6 +400,8 @@ static int prism2_scan(struct wiphy *wiphy,
numbss = msg1.numbss.data;
for (i = 0; i < numbss; i++) {
+ int freq;
+
memset(&msg2, 0, sizeof(msg2));
msg2.msgcode = DIDmsg_dot11req_scan_results;
msg2.bssindex.data = i;
@@ -414,9 +416,10 @@ static int prism2_scan(struct wiphy *wiphy,
ie_buf[1] = msg2.ssid.data.len;
ie_len = ie_buf[1] + 2;
memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
+ freq = ieee80211_channel_to_frequency(msg2.dschannel.data,
+ IEEE80211_BAND_2GHZ);
bss = cfg80211_inform_bss(wiphy,
- ieee80211_get_channel(wiphy,
- ieee80211_dsss_chan_to_freq(msg2.dschannel.data)),
+ ieee80211_get_channel(wiphy, freq),
(const u8 *) &(msg2.bssid.data.data),
msg2.timestamp.data, msg2.capinfo.data,
msg2.beaconperiod.data,
@@ -746,7 +749,7 @@ static const struct cfg80211_ops prism2_usb_cfg_ops = {
/* Functions to create/free wiphy interface */
-struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
+static struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
{
struct wiphy *wiphy;
struct prism2_wiphy_private *priv;
@@ -783,7 +786,7 @@ struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
}
-void wlan_free_wiphy(struct wiphy *wiphy)
+static void wlan_free_wiphy(struct wiphy *wiphy)
{
wiphy_unregister(wiphy);
wiphy_free(wiphy);
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 333a2f693e49..1f2c78cc0086 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -531,14 +531,14 @@ typedef struct hfa384x_rx_frame {
u16 reserved2;
/*-- 802.11 Header Information (802.11 byte order) --*/
- u16 frame_control;
+ __le16 frame_control;
u16 duration_id;
u8 address1[6];
u8 address2[6];
u8 address3[6];
u16 sequence_control;
u8 address4[6];
- u16 data_len; /* hfa384x (little endian) format */
+ __le16 data_len; /* hfa384x (little endian) format */
/*-- 802.3 Header Information --*/
u8 dest_addr[6];
diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h
index 2610824d36d7..3dd066ac034e 100644
--- a/drivers/staging/wlan-ng/p80211mgmt.h
+++ b/drivers/staging/wlan-ng/p80211mgmt.h
@@ -91,7 +91,7 @@
* fall at the end of their respective frames).
* 5a) The length field is set to include the last of the fixed and fixed
* length fields. It may have to be updated for optional or variable
-* length information elements.
+* length information elements.
* 6) Optional and variable length information elements are special cases
* and must be handled individually by the client code.
* --------------------------------------------------------------------
@@ -219,82 +219,82 @@
/*-- Information Element Types --------------------*/
/* prototype structure, all IEs start with these members */
-typedef struct wlan_ie {
+struct wlan_ie {
u8 eid;
u8 len;
-} __packed wlan_ie_t;
+} __packed;
/*-- Service Set Identity (SSID) -----------------*/
-typedef struct wlan_ie_ssid {
+struct wlan_ie_ssid {
u8 eid;
u8 len;
u8 ssid[1]; /* may be zero, ptrs may overlap */
-} __packed wlan_ie_ssid_t;
+} __packed;
/*-- Supported Rates -----------------------------*/
-typedef struct wlan_ie_supp_rates {
+struct wlan_ie_supp_rates {
u8 eid;
u8 len;
u8 rates[1]; /* had better be at LEAST one! */
-} __packed wlan_ie_supp_rates_t;
+} __packed;
/*-- FH Parameter Set ----------------------------*/
-typedef struct wlan_ie_fh_parms {
+struct wlan_ie_fh_parms {
u8 eid;
u8 len;
u16 dwell;
u8 hopset;
u8 hoppattern;
u8 hopindex;
-} __packed wlan_ie_fh_parms_t;
+} __packed;
/*-- DS Parameter Set ----------------------------*/
-typedef struct wlan_ie_ds_parms {
+struct wlan_ie_ds_parms {
u8 eid;
u8 len;
u8 curr_ch;
-} __packed wlan_ie_ds_parms_t;
+} __packed;
/*-- CF Parameter Set ----------------------------*/
-typedef struct wlan_ie_cf_parms {
+struct wlan_ie_cf_parms {
u8 eid;
u8 len;
u8 cfp_cnt;
u8 cfp_period;
u16 cfp_maxdur;
u16 cfp_durremaining;
-} __packed wlan_ie_cf_parms_t;
+} __packed;
/*-- TIM ------------------------------------------*/
-typedef struct wlan_ie_tim {
+struct wlan_ie_tim {
u8 eid;
u8 len;
u8 dtim_cnt;
u8 dtim_period;
u8 bitmap_ctl;
u8 virt_bm[1];
-} __packed wlan_ie_tim_t;
+} __packed;
/*-- IBSS Parameter Set ---------------------------*/
-typedef struct wlan_ie_ibss_parms {
+struct wlan_ie_ibss_parms {
u8 eid;
u8 len;
u16 atim_win;
-} __packed wlan_ie_ibss_parms_t;
+} __packed;
/*-- Challenge Text ------------------------------*/
-typedef struct wlan_ie_challenge {
+struct wlan_ie_challenge {
u8 eid;
u8 len;
u8 challenge[1];
-} __packed wlan_ie_challenge_t;
+} __packed;
/*-------------------------------------------------*/
/* Frame Types */
/* prototype structure, all mgmt frame types will start with these members */
-typedef struct wlan_fr_mgmt {
+struct wlan_fr_mgmt {
u16 type;
u16 len; /* DOES NOT include CRC !!!! */
u8 *buf;
@@ -303,10 +303,10 @@ typedef struct wlan_fr_mgmt {
void *priv;
/*-- fixed fields -----------*/
/*-- info elements ----------*/
-} wlan_fr_mgmt_t;
+};
/*-- Beacon ---------------------------------------*/
-typedef struct wlan_fr_beacon {
+struct wlan_fr_beacon {
u16 type;
u16 len;
u8 *buf;
@@ -318,18 +318,18 @@ typedef struct wlan_fr_beacon {
u16 *bcn_int;
u16 *cap_info;
/*-- info elements ----------*/
- wlan_ie_ssid_t *ssid;
- wlan_ie_supp_rates_t *supp_rates;
- wlan_ie_fh_parms_t *fh_parms;
- wlan_ie_ds_parms_t *ds_parms;
- wlan_ie_cf_parms_t *cf_parms;
- wlan_ie_ibss_parms_t *ibss_parms;
- wlan_ie_tim_t *tim;
+ struct wlan_ie_ssid *ssid;
+ struct wlan_ie_supp_rates *supp_rates;
+ struct wlan_ie_fh_parms *fh_parms;
+ struct wlan_ie_ds_parms *ds_parms;
+ struct wlan_ie_cf_parms *cf_parms;
+ struct wlan_ie_ibss_parms *ibss_parms;
+ struct wlan_ie_tim *tim;
-} wlan_fr_beacon_t;
+};
/*-- IBSS ATIM ------------------------------------*/
-typedef struct wlan_fr_ibssatim {
+struct wlan_fr_ibssatim {
u16 type;
u16 len;
u8 *buf;
@@ -342,10 +342,10 @@ typedef struct wlan_fr_ibssatim {
/* this frame type has a null body */
-} wlan_fr_ibssatim_t;
+};
/*-- Disassociation -------------------------------*/
-typedef struct wlan_fr_disassoc {
+struct wlan_fr_disassoc {
u16 type;
u16 len;
u8 *buf;
@@ -357,10 +357,10 @@ typedef struct wlan_fr_disassoc {
/*-- info elements ----------*/
-} wlan_fr_disassoc_t;
+};
/*-- Association Request --------------------------*/
-typedef struct wlan_fr_assocreq {
+struct wlan_fr_assocreq {
u16 type;
u16 len;
u8 *buf;
@@ -371,13 +371,13 @@ typedef struct wlan_fr_assocreq {
u16 *cap_info;
u16 *listen_int;
/*-- info elements ----------*/
- wlan_ie_ssid_t *ssid;
- wlan_ie_supp_rates_t *supp_rates;
+ struct wlan_ie_ssid *ssid;
+ struct wlan_ie_supp_rates *supp_rates;
-} wlan_fr_assocreq_t;
+};
/*-- Association Response -------------------------*/
-typedef struct wlan_fr_assocresp {
+struct wlan_fr_assocresp {
u16 type;
u16 len;
u8 *buf;
@@ -389,12 +389,12 @@ typedef struct wlan_fr_assocresp {
u16 *status;
u16 *aid;
/*-- info elements ----------*/
- wlan_ie_supp_rates_t *supp_rates;
+ struct wlan_ie_supp_rates *supp_rates;
-} wlan_fr_assocresp_t;
+};
/*-- Reassociation Request ------------------------*/
-typedef struct wlan_fr_reassocreq {
+struct wlan_fr_reassocreq {
u16 type;
u16 len;
u8 *buf;
@@ -406,13 +406,13 @@ typedef struct wlan_fr_reassocreq {
u16 *listen_int;
u8 *curr_ap;
/*-- info elements ----------*/
- wlan_ie_ssid_t *ssid;
- wlan_ie_supp_rates_t *supp_rates;
+ struct wlan_ie_ssid *ssid;
+ struct wlan_ie_supp_rates *supp_rates;
-} wlan_fr_reassocreq_t;
+};
/*-- Reassociation Response -----------------------*/
-typedef struct wlan_fr_reassocresp {
+struct wlan_fr_reassocresp {
u16 type;
u16 len;
u8 *buf;
@@ -424,12 +424,12 @@ typedef struct wlan_fr_reassocresp {
u16 *status;
u16 *aid;
/*-- info elements ----------*/
- wlan_ie_supp_rates_t *supp_rates;
+ struct wlan_ie_supp_rates *supp_rates;
-} wlan_fr_reassocresp_t;
+};
/*-- Probe Request --------------------------------*/
-typedef struct wlan_fr_probereq {
+struct wlan_fr_probereq {
u16 type;
u16 len;
u8 *buf;
@@ -438,13 +438,13 @@ typedef struct wlan_fr_probereq {
void *priv;
/*-- fixed fields -----------*/
/*-- info elements ----------*/
- wlan_ie_ssid_t *ssid;
- wlan_ie_supp_rates_t *supp_rates;
+ struct wlan_ie_ssid *ssid;
+ struct wlan_ie_supp_rates *supp_rates;
-} wlan_fr_probereq_t;
+};
/*-- Probe Response -------------------------------*/
-typedef struct wlan_fr_proberesp {
+struct wlan_fr_proberesp {
u16 type;
u16 len;
u8 *buf;
@@ -456,16 +456,16 @@ typedef struct wlan_fr_proberesp {
u16 *bcn_int;
u16 *cap_info;
/*-- info elements ----------*/
- wlan_ie_ssid_t *ssid;
- wlan_ie_supp_rates_t *supp_rates;
- wlan_ie_fh_parms_t *fh_parms;
- wlan_ie_ds_parms_t *ds_parms;
- wlan_ie_cf_parms_t *cf_parms;
- wlan_ie_ibss_parms_t *ibss_parms;
-} wlan_fr_proberesp_t;
+ struct wlan_ie_ssid *ssid;
+ struct wlan_ie_supp_rates *supp_rates;
+ struct wlan_ie_fh_parms *fh_parms;
+ struct wlan_ie_ds_parms *ds_parms;
+ struct wlan_ie_cf_parms *cf_parms;
+ struct wlan_ie_ibss_parms *ibss_parms;
+};
/*-- Authentication -------------------------------*/
-typedef struct wlan_fr_authen {
+struct wlan_fr_authen {
u16 type;
u16 len;
u8 *buf;
@@ -477,12 +477,12 @@ typedef struct wlan_fr_authen {
u16 *auth_seq;
u16 *status;
/*-- info elements ----------*/
- wlan_ie_challenge_t *challenge;
+ struct wlan_ie_challenge *challenge;
-} wlan_fr_authen_t;
+};
/*-- Deauthenication -----------------------------*/
-typedef struct wlan_fr_deauthen {
+struct wlan_fr_deauthen {
u16 type;
u16 len;
u8 *buf;
@@ -494,27 +494,27 @@ typedef struct wlan_fr_deauthen {
/*-- info elements ----------*/
-} wlan_fr_deauthen_t;
-
-void wlan_mgmt_encode_beacon(wlan_fr_beacon_t *f);
-void wlan_mgmt_decode_beacon(wlan_fr_beacon_t *f);
-void wlan_mgmt_encode_disassoc(wlan_fr_disassoc_t *f);
-void wlan_mgmt_decode_disassoc(wlan_fr_disassoc_t *f);
-void wlan_mgmt_encode_assocreq(wlan_fr_assocreq_t *f);
-void wlan_mgmt_decode_assocreq(wlan_fr_assocreq_t *f);
-void wlan_mgmt_encode_assocresp(wlan_fr_assocresp_t *f);
-void wlan_mgmt_decode_assocresp(wlan_fr_assocresp_t *f);
-void wlan_mgmt_encode_reassocreq(wlan_fr_reassocreq_t *f);
-void wlan_mgmt_decode_reassocreq(wlan_fr_reassocreq_t *f);
-void wlan_mgmt_encode_reassocresp(wlan_fr_reassocresp_t *f);
-void wlan_mgmt_decode_reassocresp(wlan_fr_reassocresp_t *f);
-void wlan_mgmt_encode_probereq(wlan_fr_probereq_t *f);
-void wlan_mgmt_decode_probereq(wlan_fr_probereq_t *f);
-void wlan_mgmt_encode_proberesp(wlan_fr_proberesp_t *f);
-void wlan_mgmt_decode_proberesp(wlan_fr_proberesp_t *f);
-void wlan_mgmt_encode_authen(wlan_fr_authen_t *f);
-void wlan_mgmt_decode_authen(wlan_fr_authen_t *f);
-void wlan_mgmt_encode_deauthen(wlan_fr_deauthen_t *f);
-void wlan_mgmt_decode_deauthen(wlan_fr_deauthen_t *f);
+};
+
+void wlan_mgmt_encode_beacon(struct wlan_fr_beacon *f);
+void wlan_mgmt_decode_beacon(struct wlan_fr_beacon *f);
+void wlan_mgmt_encode_disassoc(struct wlan_fr_disassoc *f);
+void wlan_mgmt_decode_disassoc(struct wlan_fr_disassoc *f);
+void wlan_mgmt_encode_assocreq(struct wlan_fr_assocreq *f);
+void wlan_mgmt_decode_assocreq(struct wlan_fr_assocreq *f);
+void wlan_mgmt_encode_assocresp(struct wlan_fr_assocresp *f);
+void wlan_mgmt_decode_assocresp(struct wlan_fr_assocresp *f);
+void wlan_mgmt_encode_reassocreq(struct wlan_fr_reassocreq *f);
+void wlan_mgmt_decode_reassocreq(struct wlan_fr_reassocreq *f);
+void wlan_mgmt_encode_reassocresp(struct wlan_fr_reassocresp *f);
+void wlan_mgmt_decode_reassocresp(struct wlan_fr_reassocresp *f);
+void wlan_mgmt_encode_probereq(struct wlan_fr_probereq *f);
+void wlan_mgmt_decode_probereq(struct wlan_fr_probereq *f);
+void wlan_mgmt_encode_proberesp(struct wlan_fr_proberesp *f);
+void wlan_mgmt_decode_proberesp(struct wlan_fr_proberesp *f);
+void wlan_mgmt_encode_authen(struct wlan_fr_authen *f);
+void wlan_mgmt_decode_authen(struct wlan_fr_authen *f);
+void wlan_mgmt_encode_deauthen(struct wlan_fr_deauthen *f);
+void wlan_mgmt_decode_deauthen(struct wlan_fr_deauthen *f);
#endif /* _P80211MGMT_H */
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 0039e082507d..e3ae8024d3a4 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -262,7 +262,6 @@ static void p80211netdev_rx_bh(unsigned long arg)
struct sk_buff *skb = NULL;
netdevice_t *dev = wlandev->netdev;
struct p80211_hdr_a3 *hdr;
- u16 fc;
/* Let's empty our our queue */
while ((skb = skb_dequeue(&wlandev->nsd_rxq))) {
@@ -286,8 +285,7 @@ static void p80211netdev_rx_bh(unsigned long arg)
continue;
} else {
hdr = (struct p80211_hdr_a3 *) skb->data;
- fc = le16_to_cpu(hdr->fc);
- if (p80211_rx_typedrop(wlandev, fc)) {
+ if (p80211_rx_typedrop(wlandev, hdr->fc)) {
dev_kfree_skb(skb);
continue;
}
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 0dfd2a4933ef..2b0c23587dfc 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -201,7 +201,7 @@ static int validate_identity(void);
* 0 - success
* ~0 - failure
----------------------------------------------------------------*/
-int prism2_fwtry(struct usb_device *udev, wlandevice_t *wlandev)
+static int prism2_fwtry(struct usb_device *udev, wlandevice_t *wlandev)
{
const struct firmware *fw_entry = NULL;
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 2199f5afbf90..f9ccf2371dba 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -454,9 +454,8 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
*/
result = hfa384x_drvr_start(hw);
if (result) {
- printk(KERN_ERR
- "hfa384x_drvr_start() failed,"
- "result=%d\n", (int)result);
+ netdev_err(wlandev->netdev,
+ "hfa384x_drvr_start() failed,result=%d\n", (int)result);
result =
P80211ENUM_resultcode_implementation_failure;
wlandev->msdstate = WLAN_MSD_HWPRESENT;
@@ -499,9 +498,8 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
*/
result = hfa384x_drvr_start(hw);
if (result) {
- printk(KERN_ERR
- "hfa384x_drvr_start() failed,"
- "result=%d\n", (int)result);
+ netdev_err(wlandev->netdev,
+ "hfa384x_drvr_start() failed,result=%d\n", (int)result);
result =
P80211ENUM_resultcode_implementation_failure;
wlandev->msdstate = WLAN_MSD_HWPRESENT;
@@ -510,9 +508,8 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
result = prism2sta_getcardinfo(wlandev);
if (result) {
- printk(KERN_ERR
- "prism2sta_getcardinfo() failed,"
- "result=%d\n", (int)result);
+ netdev_err(wlandev->netdev,
+ "prism2sta_getcardinfo() failed,result=%d\n", (int)result);
result =
P80211ENUM_resultcode_implementation_failure;
hfa384x_drvr_stop(hw);
@@ -521,9 +518,8 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
}
result = prism2sta_globalsetup(wlandev);
if (result) {
- printk(KERN_ERR
- "prism2sta_globalsetup() failed,"
- "result=%d\n", (int)result);
+ netdev_err(wlandev->netdev,
+ "prism2sta_globalsetup() failed,result=%d\n", (int)result);
result =
P80211ENUM_resultcode_implementation_failure;
hfa384x_drvr_stop(hw);
@@ -623,7 +619,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->ident_nic,
sizeof(hfa384x_compident_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve NICIDENTITY\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve NICIDENTITY\n");
goto failed;
}
@@ -633,7 +629,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->ident_nic.major = le16_to_cpu(hw->ident_nic.major);
hw->ident_nic.minor = le16_to_cpu(hw->ident_nic.minor);
- printk(KERN_INFO "ident: nic h/w: id=0x%02x %d.%d.%d\n",
+ netdev_info(wlandev->netdev, "ident: nic h/w: id=0x%02x %d.%d.%d\n",
hw->ident_nic.id, hw->ident_nic.major,
hw->ident_nic.minor, hw->ident_nic.variant);
@@ -642,7 +638,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->ident_pri_fw,
sizeof(hfa384x_compident_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve PRIIDENTITY\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve PRIIDENTITY\n");
goto failed;
}
@@ -652,7 +648,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->ident_pri_fw.major = le16_to_cpu(hw->ident_pri_fw.major);
hw->ident_pri_fw.minor = le16_to_cpu(hw->ident_pri_fw.minor);
- printk(KERN_INFO "ident: pri f/w: id=0x%02x %d.%d.%d\n",
+ netdev_info(wlandev->netdev, "ident: pri f/w: id=0x%02x %d.%d.%d\n",
hw->ident_pri_fw.id, hw->ident_pri_fw.major,
hw->ident_pri_fw.minor, hw->ident_pri_fw.variant);
@@ -661,12 +657,12 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->ident_sta_fw,
sizeof(hfa384x_compident_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve STAIDENTITY\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve STAIDENTITY\n");
goto failed;
}
if (hw->ident_nic.id < 0x8000) {
- printk(KERN_ERR
+ netdev_err(wlandev->netdev,
"FATAL: Card is not an Intersil Prism2/2.5/3\n");
result = -1;
goto failed;
@@ -683,16 +679,16 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->ident_sta_fw.variant &= ~((u16) (BIT(14) | BIT(15)));
if (hw->ident_sta_fw.id == 0x1f) {
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"ident: sta f/w: id=0x%02x %d.%d.%d\n",
hw->ident_sta_fw.id, hw->ident_sta_fw.major,
hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
} else {
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"ident: ap f/w: id=0x%02x %d.%d.%d\n",
hw->ident_sta_fw.id, hw->ident_sta_fw.major,
hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
- printk(KERN_ERR "Unsupported Tertiary AP firmeare loaded!\n");
+ netdev_err(wlandev->netdev, "Unsupported Tertiary AP firmeare loaded!\n");
goto failed;
}
@@ -701,7 +697,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_sup_mfi,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve MFISUPRANGE\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve MFISUPRANGE\n");
goto failed;
}
@@ -713,7 +709,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_sup_mfi.bottom = le16_to_cpu(hw->cap_sup_mfi.bottom);
hw->cap_sup_mfi.top = le16_to_cpu(hw->cap_sup_mfi.top);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_mfi.role, hw->cap_sup_mfi.id,
hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom,
@@ -724,7 +720,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_sup_cfi,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve CFISUPRANGE\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve CFISUPRANGE\n");
goto failed;
}
@@ -736,7 +732,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_sup_cfi.bottom = le16_to_cpu(hw->cap_sup_cfi.bottom);
hw->cap_sup_cfi.top = le16_to_cpu(hw->cap_sup_cfi.top);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_cfi.role, hw->cap_sup_cfi.id,
hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom,
@@ -747,7 +743,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_sup_pri,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve PRISUPRANGE\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve PRISUPRANGE\n");
goto failed;
}
@@ -759,7 +755,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_sup_pri.bottom = le16_to_cpu(hw->cap_sup_pri.bottom);
hw->cap_sup_pri.top = le16_to_cpu(hw->cap_sup_pri.top);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_pri.role, hw->cap_sup_pri.id,
hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom,
@@ -770,7 +766,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_sup_sta,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve STASUPRANGE\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve STASUPRANGE\n");
goto failed;
}
@@ -783,13 +779,13 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_sup_sta.top = le16_to_cpu(hw->cap_sup_sta.top);
if (hw->cap_sup_sta.id == 0x04) {
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"STA:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_sta.role, hw->cap_sup_sta.id,
hw->cap_sup_sta.variant, hw->cap_sup_sta.bottom,
hw->cap_sup_sta.top);
} else {
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"AP:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_sta.role, hw->cap_sup_sta.id,
hw->cap_sup_sta.variant, hw->cap_sup_sta.bottom,
@@ -801,7 +797,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_act_pri_cfi,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve PRI_CFIACTRANGES\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve PRI_CFIACTRANGES\n");
goto failed;
}
@@ -813,7 +809,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_act_pri_cfi.bottom = le16_to_cpu(hw->cap_act_pri_cfi.bottom);
hw->cap_act_pri_cfi.top = le16_to_cpu(hw->cap_act_pri_cfi.top);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id,
hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom,
@@ -824,7 +820,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_act_sta_cfi,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve STA_CFIACTRANGES\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve STA_CFIACTRANGES\n");
goto failed;
}
@@ -836,7 +832,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_act_sta_cfi.bottom = le16_to_cpu(hw->cap_act_sta_cfi.bottom);
hw->cap_act_sta_cfi.top = le16_to_cpu(hw->cap_act_sta_cfi.top);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id,
hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom,
@@ -847,7 +843,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_act_sta_mfi,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve STA_MFIACTRANGES\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve STA_MFIACTRANGES\n");
goto failed;
}
@@ -859,7 +855,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_act_sta_mfi.bottom = le16_to_cpu(hw->cap_act_sta_mfi.bottom);
hw->cap_act_sta_mfi.top = le16_to_cpu(hw->cap_act_sta_mfi.top);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id,
hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom,
@@ -871,9 +867,9 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
if (!result) {
wlan_mkprintstr(snum, HFA384x_RID_NICSERIALNUMBER_LEN,
pstr, sizeof(pstr));
- printk(KERN_INFO "Prism2 card SN: %s\n", pstr);
+ netdev_info(wlandev->netdev, "Prism2 card SN: %s\n", pstr);
} else {
- printk(KERN_ERR "Failed to retrieve Prism2 Card SN\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve Prism2 Card SN\n");
goto failed;
}
@@ -881,7 +877,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CNFOWNMACADDR,
wlandev->netdev->dev_addr, ETH_ALEN);
if (result != 0) {
- printk(KERN_ERR "Failed to retrieve mac address\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve mac address\n");
goto failed;
}
@@ -909,7 +905,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
goto done;
failed:
- printk(KERN_ERR "Failed, result=%d\n", result);
+ netdev_err(wlandev->netdev, "Failed, result=%d\n", result);
done:
return result;
}
@@ -1085,7 +1081,7 @@ static void prism2sta_inf_scanresults(wlandevice_t *wlandev,
HFA384x_RID_JOINREQUEST,
&joinreq, HFA384x_RID_JOINREQUEST_LEN);
if (result) {
- printk(KERN_ERR "setconfig(joinreq) failed, result=%d\n",
+ netdev_err(wlandev->netdev, "setconfig(joinreq) failed, result=%d\n",
result);
}
}
@@ -1226,7 +1222,7 @@ void prism2sta_processing_defer(struct work_struct *data)
*/
netif_carrier_off(wlandev->netdev);
- printk(KERN_INFO "linkstatus=NOTCONNECTED (unhandled)\n");
+ netdev_info(wlandev->netdev, "linkstatus=NOTCONNECTED (unhandled)\n");
break;
case HFA384x_LINK_CONNECTED:
@@ -1253,7 +1249,7 @@ void prism2sta_processing_defer(struct work_struct *data)
if (wlandev->netdev->type == ARPHRD_ETHER) {
u16 portstatus;
- printk(KERN_INFO "linkstatus=CONNECTED\n");
+ netdev_info(wlandev->netdev, "linkstatus=CONNECTED\n");
/* For non-usb devices, we can use the sync versions */
/* Collect the BSSID, and set state to allow tx */
@@ -1315,7 +1311,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* Block Transmits, Ignore receives of data frames
*/
if (wlandev->netdev->type == ARPHRD_ETHER)
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"linkstatus=DISCONNECTED (unhandled)\n");
wlandev->macmode = WLAN_MACMODE_NONE;
@@ -1341,7 +1337,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* Indicate Reassociation
* Enable Transmits, Receives and pass up data frames
*/
- printk(KERN_INFO "linkstatus=AP_CHANGE\n");
+ netdev_info(wlandev->netdev, "linkstatus=AP_CHANGE\n");
result = hfa384x_drvr_getconfig(hw,
HFA384x_RID_CURRENTBSSID,
@@ -1383,7 +1379,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* Response:
* Block Transmits, Ignore receives of data frames
*/
- printk(KERN_INFO "linkstatus=AP_OUTOFRANGE (unhandled)\n");
+ netdev_info(wlandev->netdev, "linkstatus=AP_OUTOFRANGE (unhandled)\n");
netif_carrier_off(wlandev->netdev);
@@ -1396,7 +1392,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* Response:
* Enable Transmits, Receives and pass up data frames
*/
- printk(KERN_INFO "linkstatus=AP_INRANGE\n");
+ netdev_info(wlandev->netdev, "linkstatus=AP_INRANGE\n");
hw->link_status = HFA384x_LINK_CONNECTED;
netif_carrier_on(wlandev->netdev);
@@ -1420,10 +1416,10 @@ void prism2sta_processing_defer(struct work_struct *data)
HFA384x_RID_JOINREQUEST,
&joinreq,
HFA384x_RID_JOINREQUEST_LEN);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"linkstatus=ASSOCFAIL (re-submitting join)\n");
} else {
- printk(KERN_INFO "linkstatus=ASSOCFAIL (unhandled)\n");
+ netdev_info(wlandev->netdev, "linkstatus=ASSOCFAIL (unhandled)\n");
}
netif_carrier_off(wlandev->netdev);
@@ -1713,7 +1709,7 @@ static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev,
if (result) {
if (added)
hw->authlist.cnt--;
- printk(KERN_ERR
+ netdev_err(wlandev->netdev,
"setconfig(authenticatestation) failed, result=%d\n",
result);
}
@@ -1928,7 +1924,7 @@ static wlandevice_t *create_wlan(void)
hw = kzalloc(sizeof(hfa384x_t), GFP_KERNEL);
if (!wlandev || !hw) {
- printk(KERN_ERR "%s: Memory allocation failure.\n", dev_info);
+ pr_err("%s: Memory allocation failure.\n", dev_info);
kfree(wlandev);
kfree(hw);
return NULL;
@@ -1980,7 +1976,7 @@ void prism2sta_commsqual_defer(struct work_struct *data)
&hw->qual, HFA384x_RID_DBMCOMMSQUALITY_LEN);
if (result) {
- printk(KERN_ERR "error fetching commsqual\n");
+ netdev_err(wlandev->netdev, "error fetching commsqual\n");
return;
}
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index b3ff603e6225..466804687fc0 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -1754,8 +1754,7 @@ static int xgifb_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "Unable request memory size %x\n",
xgifb_info->video_size);
dev_err(&pdev->dev,
- "Fatal error: Unable to reserve frame buffer memory. "
- "Is there another framebuffer driver active?\n");
+ "Fatal error: Unable to reserve frame buffer memory. Is there another framebuffer driver active?\n");
ret = -ENODEV;
goto error_disable;
}
@@ -2087,23 +2086,19 @@ static struct pci_driver xgifb_driver = {
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode,
- "Selects the desired default display mode in the format XxYxDepth "
- "(eg. 1024x768x16).");
+ "Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16).");
module_param(forcecrt2type, charp, 0);
MODULE_PARM_DESC(forcecrt2type,
- "Force the second display output type. Possible values are NONE, "
- "LCD, TV, VGA, SVIDEO or COMPOSITE.");
+ "Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE.");
module_param(vesa, int, 0);
MODULE_PARM_DESC(vesa,
- "Selects the desired default display mode by VESA mode number "
- "(eg. 0x117).");
+ "Selects the desired default display mode by VESA mode number (eg. 0x117).");
module_param(filter, int, 0);
MODULE_PARM_DESC(filter,
- "Selects TV flicker filter type (only for systems with a SiS301 video bridge). "
- "Possible values 0-7. Default: [no filter]).");
+ "Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter]).");
static int __init xgifb_init(void)
{
diff --git a/drivers/staging/xillybus/Kconfig b/drivers/staging/xillybus/Kconfig
index 75c38c8c26eb..b53bdf12da0d 100644
--- a/drivers/staging/xillybus/Kconfig
+++ b/drivers/staging/xillybus/Kconfig
@@ -5,6 +5,7 @@
config XILLYBUS
tristate "Xillybus generic FPGA interface"
depends on PCI || (OF_ADDRESS && OF_IRQ)
+ select CRC32
help
Xillybus is a generic interface for peripherals designed on
programmable logic (FPGA). The driver probes the hardware for
@@ -16,7 +17,7 @@ if XILLYBUS
config XILLYBUS_PCIE
tristate "Xillybus over PCIe"
- depends on PCI
+ depends on PCI_MSI
help
Set to M if you want Xillybus to use PCI Express for communicating
with the FPGA.
diff --git a/drivers/staging/xillybus/xillybus_core.c b/drivers/staging/xillybus/xillybus_core.c
index 2ebaf166038c..b0a6696f2da0 100644
--- a/drivers/staging/xillybus/xillybus_core.c
+++ b/drivers/staging/xillybus/xillybus_core.c
@@ -2318,8 +2318,12 @@ static int __init xillybus_init(void)
}
xillybus_wq = alloc_workqueue(xillyname, 0, 0);
+ if (!xillybus_wq) {
+ class_destroy(xillybus_class);
+ rc = -ENOMEM;
+ }
- return 0; /* Success */
+ return rc;
}
static void __exit xillybus_exit(void)
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 5f88d767671e..2d51912a6e40 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -143,7 +143,7 @@ config RCAR_THERMAL
config KIRKWOOD_THERMAL
tristate "Temperature sensor on Marvell Kirkwood SoCs"
- depends on ARCH_KIRKWOOD
+ depends on ARCH_KIRKWOOD || MACH_KIRKWOOD
depends on OF
help
Support for the Kirkwood thermal sensor driver into the Linux thermal
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 45af765a3198..a99c63152b8d 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -62,12 +62,16 @@ enum imx_thermal_trip {
#define IMX_POLLING_DELAY 2000 /* millisecond */
#define IMX_PASSIVE_DELAY 1000
+#define FACTOR0 10000000
+#define FACTOR1 15976
+#define FACTOR2 4297157
+
struct imx_thermal_data {
struct thermal_zone_device *tz;
struct thermal_cooling_device *cdev;
enum thermal_device_mode mode;
struct regmap *tempmon;
- int c1, c2; /* See formula in imx_get_sensor_data() */
+ u32 c1, c2; /* See formula in imx_get_sensor_data() */
unsigned long temp_passive;
unsigned long temp_critical;
unsigned long alarm_temp;
@@ -84,7 +88,7 @@ static void imx_set_alarm_temp(struct imx_thermal_data *data,
int alarm_value;
data->alarm_temp = alarm_temp;
- alarm_value = (alarm_temp - data->c2) / data->c1;
+ alarm_value = (data->c2 - alarm_temp) / data->c1;
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_ALARM_VALUE_MASK);
regmap_write(map, TEMPSENSE0 + REG_SET, alarm_value <<
TEMPSENSE0_ALARM_VALUE_SHIFT);
@@ -136,7 +140,7 @@ static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
n_meas = (val & TEMPSENSE0_TEMP_CNT_MASK) >> TEMPSENSE0_TEMP_CNT_SHIFT;
/* See imx_get_sensor_data() for formula derivation */
- *temp = data->c2 + data->c1 * n_meas;
+ *temp = data->c2 - n_meas * data->c1;
/* Update alarm value to next higher trip point */
if (data->alarm_temp == data->temp_passive && *temp >= data->temp_passive)
@@ -305,6 +309,7 @@ static int imx_get_sensor_data(struct platform_device *pdev)
int t1, t2, n1, n2;
int ret;
u32 val;
+ u64 temp64;
map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"fsl,tempmon-data");
@@ -330,6 +335,8 @@ static int imx_get_sensor_data(struct platform_device *pdev)
* [31:20] - sensor value @ 25C
* [19:8] - sensor value of hot
* [7:0] - hot temperature value
+ * Use universal formula now and only need sensor value @ 25C
+ * slope = 0.4297157 - (0.0015976 * 25C fuse)
*/
n1 = val >> 20;
n2 = (val & 0xfff00) >> 8;
@@ -337,20 +344,26 @@ static int imx_get_sensor_data(struct platform_device *pdev)
t1 = 25; /* t1 always 25C */
/*
- * Derived from linear interpolation,
- * Tmeas = T2 + (Nmeas - N2) * (T1 - T2) / (N1 - N2)
+ * Derived from linear interpolation:
+ * slope = 0.4297157 - (0.0015976 * 25C fuse)
+ * slope = (FACTOR2 - FACTOR1 * n1) / FACTOR0
+ * (Nmeas - n1) / (Tmeas - t1) = slope
* We want to reduce this down to the minimum computation necessary
* for each temperature read. Also, we want Tmeas in millicelsius
* and we don't want to lose precision from integer division. So...
- * milli_Tmeas = 1000 * T2 + 1000 * (Nmeas - N2) * (T1 - T2) / (N1 - N2)
- * Let constant c1 = 1000 * (T1 - T2) / (N1 - N2)
- * milli_Tmeas = (1000 * T2) + c1 * (Nmeas - N2)
- * milli_Tmeas = (1000 * T2) + (c1 * Nmeas) - (c1 * N2)
- * Let constant c2 = (1000 * T2) - (c1 * N2)
- * milli_Tmeas = c2 + (c1 * Nmeas)
+ * Tmeas = (Nmeas - n1) / slope + t1
+ * milli_Tmeas = 1000 * (Nmeas - n1) / slope + 1000 * t1
+ * milli_Tmeas = -1000 * (n1 - Nmeas) / slope + 1000 * t1
+ * Let constant c1 = (-1000 / slope)
+ * milli_Tmeas = (n1 - Nmeas) * c1 + 1000 * t1
+ * Let constant c2 = n1 *c1 + 1000 * t1
+ * milli_Tmeas = c2 - Nmeas * c1
*/
- data->c1 = 1000 * (t1 - t2) / (n1 - n2);
- data->c2 = 1000 * t2 - data->c1 * n2;
+ temp64 = FACTOR0;
+ temp64 *= 1000;
+ do_div(temp64, FACTOR1 * n1 - FACTOR2);
+ data->c1 = temp64;
+ data->c2 = n1 * data->c1 + 1000 * t1;
/*
* Set the default passive cooling trip point to 20 °C below the
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 79a09d02bbca..5a37940b02c9 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -299,12 +299,17 @@ static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable)
static void rcar_thermal_work(struct work_struct *work)
{
struct rcar_thermal_priv *priv;
+ unsigned long cctemp, nctemp;
priv = container_of(work, struct rcar_thermal_priv, work.work);
+ rcar_thermal_get_temp(priv->zone, &cctemp);
rcar_thermal_update_temp(priv);
rcar_thermal_irq_enable(priv);
- thermal_zone_device_update(priv->zone);
+
+ rcar_thermal_get_temp(priv->zone, &nctemp);
+ if (nctemp != cctemp)
+ thermal_zone_device_update(priv->zone);
}
static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
@@ -313,7 +318,7 @@ static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
status = (status >> rcar_id_to_shift(priv)) & 0x3;
- if (status & 0x3) {
+ if (status) {
dev_dbg(dev, "thermal%d %s%s\n",
priv->id,
(status & 0x2) ? "Rising " : "",
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 74c0e3474d6e..3ab12ee359b7 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1500,10 +1500,8 @@ static int ti_bandgap_resume(struct device *dev)
return ti_bandgap_restore_ctxt(bgp);
}
-static const struct dev_pm_ops ti_bandgap_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ti_bandgap_suspend,
- ti_bandgap_resume)
-};
+static SIMPLE_DEV_PM_OPS(ti_bandgap_dev_pm_ops, ti_bandgap_suspend,
+ ti_bandgap_resume);
#define DEV_PM_OPS (&ti_bandgap_dev_pm_ops)
#else
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 081fd7e6a9f0..9ea3d9d49ffc 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -590,12 +590,12 @@ static int __init pkg_temp_thermal_init(void)
platform_thermal_package_rate_control =
pkg_temp_thermal_platform_thermal_rate_control;
- get_online_cpus();
+ cpu_notifier_register_begin();
for_each_online_cpu(i)
if (get_core_online(i))
goto err_ret;
- register_hotcpu_notifier(&pkg_temp_thermal_notifier);
- put_online_cpus();
+ __register_hotcpu_notifier(&pkg_temp_thermal_notifier);
+ cpu_notifier_register_done();
pkg_temp_debugfs_init(); /* Don't care if fails */
@@ -604,7 +604,7 @@ static int __init pkg_temp_thermal_init(void)
err_ret:
for_each_online_cpu(i)
put_core_offline(i);
- put_online_cpus();
+ cpu_notifier_register_done();
kfree(pkg_work_scheduled);
platform_thermal_package_notify = NULL;
platform_thermal_package_rate_control = NULL;
@@ -617,8 +617,8 @@ static void __exit pkg_temp_thermal_exit(void)
struct phy_dev_entry *phdev, *n;
int i;
- get_online_cpus();
- unregister_hotcpu_notifier(&pkg_temp_thermal_notifier);
+ cpu_notifier_register_begin();
+ __unregister_hotcpu_notifier(&pkg_temp_thermal_notifier);
mutex_lock(&phy_dev_list_mutex);
list_for_each_entry_safe(phdev, n, &phy_dev_list, list) {
/* Retore old MSR value for package thermal interrupt */
@@ -636,7 +636,7 @@ static void __exit pkg_temp_thermal_exit(void)
for_each_online_cpu(i)
cancel_delayed_work_sync(
&per_cpu(pkg_temp_thermal_threshold_work, i));
- put_online_cpus();
+ cpu_notifier_register_done();
kfree(pkg_work_scheduled);
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 50b46881b6ca..94f9e3a38412 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -31,6 +31,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/major.h>
+#include <linux/atomic.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
@@ -70,6 +71,9 @@ static struct task_struct *hvc_task;
/* Picks up late kicks after list walk but before schedule() */
static int hvc_kicked;
+/* hvc_init is triggered from hvc_alloc, i.e. only when actually used */
+static atomic_t hvc_needs_init __read_mostly = ATOMIC_INIT(-1);
+
static int hvc_init(void);
#ifdef CONFIG_MAGIC_SYSRQ
@@ -851,7 +855,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
int i;
/* We wait until a driver actually comes along */
- if (!hvc_driver) {
+ if (atomic_inc_not_zero(&hvc_needs_init)) {
int err = hvc_init();
if (err)
return ERR_PTR(err);
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index b01659bd4f7c..a585079b4b38 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -61,6 +61,7 @@ static struct hvc_opal_priv *hvc_opal_privs[MAX_NR_HVC_CONSOLES];
/* For early boot console */
static struct hvc_opal_priv hvc_opal_boot_priv;
static u32 hvc_opal_boot_termno;
+static bool hvc_opal_event_registered;
static const struct hv_ops hvc_opal_raw_ops = {
.get_chars = opal_get_chars,
@@ -161,6 +162,18 @@ static const struct hv_ops hvc_opal_hvsi_ops = {
.tiocmset = hvc_opal_hvsi_tiocmset,
};
+static int hvc_opal_console_event(struct notifier_block *nb,
+ unsigned long events, void *change)
+{
+ if (events & OPAL_EVENT_CONSOLE_INPUT)
+ hvc_kick();
+ return 0;
+}
+
+static struct notifier_block hvc_opal_console_nb = {
+ .notifier_call = hvc_opal_console_event,
+};
+
static int hvc_opal_probe(struct platform_device *dev)
{
const struct hv_ops *ops;
@@ -170,6 +183,7 @@ static int hvc_opal_probe(struct platform_device *dev)
unsigned int termno, boot = 0;
const __be32 *reg;
+
if (of_device_is_compatible(dev->dev.of_node, "ibm,opal-console-raw")) {
proto = HV_PROTOCOL_RAW;
ops = &hvc_opal_raw_ops;
@@ -213,12 +227,18 @@ static int hvc_opal_probe(struct platform_device *dev)
dev->dev.of_node->full_name,
boot ? " (boot console)" : "");
- /* We don't do IRQ yet */
+ /* We don't do IRQ ... */
hp = hvc_alloc(termno, 0, ops, MAX_VIO_PUT_CHARS);
if (IS_ERR(hp))
return PTR_ERR(hp);
dev_set_drvdata(&dev->dev, hp);
+ /* ... but we use OPAL event to kick the console */
+ if (!hvc_opal_event_registered) {
+ opal_notifier_register(&hvc_opal_console_nb);
+ hvc_opal_event_registered = true;
+ }
+
return 0;
}
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index ebd5bff0f5c1..17ee3bf0926b 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -176,9 +176,6 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
": %d chars not inserted to flip buffer!\n",
length - work);
- /*
- * This may sleep if ->low_latency is set
- */
if (work)
tty_flip_buffer_push(&tty->port);
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index d15624c1b751..41fe8a047d37 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1900,13 +1900,10 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
struct n_tty_data *ldata = tty->disc_data;
int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1;
- if (ldata->icanon && !L_EXTPROC(tty)) {
- if (ldata->canon_head != ldata->read_tail)
- return 1;
- } else if (read_cnt(ldata) >= amt)
- return 1;
-
- return 0;
+ if (ldata->icanon && !L_EXTPROC(tty))
+ return ldata->canon_head != ldata->read_tail;
+ else
+ return read_cnt(ldata) >= amt;
}
/**
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 69932b7556cf..81f909c2101f 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1694,6 +1694,10 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
static void serial_unlink_irq_chain(struct uart_8250_port *up)
{
+ /*
+ * yes, some broken gcc emit "warning: 'i' may be used uninitialized"
+ * but no, we are not going to take a patch that assigns NULL below.
+ */
struct irq_info *i;
struct hlist_node *n;
struct hlist_head *h;
@@ -2882,14 +2886,10 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
touch_nmi_watchdog();
- local_irq_save(flags);
- if (port->sysrq) {
- /* serial8250_handle_irq() already took the lock */
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
- } else
- spin_lock(&port->lock);
+ if (port->sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
/*
* First save the IER then disable the interrupts
@@ -2921,8 +2921,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
serial8250_modem_status(up);
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static int __init serial8250_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 0ff3e3624d4c..b14bcba96c25 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1366,23 +1366,44 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios,
struct ktermios *old)
{
unsigned int baud = tty_termios_baud_rate(termios);
- unsigned int m = 6912;
- unsigned int n = 15625;
+ unsigned int m, n;
u32 reg;
- /* For baud rates 1M, 2M, 3M and 4M the dividers must be adjusted. */
- if (baud == 1000000 || baud == 2000000 || baud == 4000000) {
+ /*
+ * For baud rates 0.5M, 1M, 1.5M, 2M, 2.5M, 3M, 3.5M and 4M the
+ * dividers must be adjusted.
+ *
+ * uartclk = (m / n) * 100 MHz, where m <= n
+ */
+ switch (baud) {
+ case 500000:
+ case 1000000:
+ case 2000000:
+ case 4000000:
m = 64;
n = 100;
-
p->uartclk = 64000000;
- } else if (baud == 3000000) {
+ break;
+ case 3500000:
+ m = 56;
+ n = 100;
+ p->uartclk = 56000000;
+ break;
+ case 1500000:
+ case 3000000:
m = 48;
n = 100;
-
p->uartclk = 48000000;
- } else {
- p->uartclk = 44236800;
+ break;
+ case 2500000:
+ m = 40;
+ n = 100;
+ p->uartclk = 40000000;
+ break;
+ default:
+ m = 2304;
+ n = 3125;
+ p->uartclk = 73728000;
}
/* Reset the clock */
@@ -3449,6 +3470,10 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 921600,
.reg_shift = 2,
},
+ /*
+ * Intel BayTrail HSUART reference clock is 44.2368 MHz at power-on,
+ * but is overridden by byt_set_termios.
+ */
[pbn_byt] = {
.flags = FL_BASE0,
.num_ports = 1,
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index a3815eaed421..2e6d8ddc4425 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -289,7 +289,7 @@ config SERIAL_MAX3100
MAX3100 chip support
config SERIAL_MAX310X
- bool "MAX310X support"
+ tristate "MAX310X support"
depends on SPI_MASTER
select SERIAL_CORE
select REGMAP_SPI if SPI_MASTER
@@ -708,7 +708,7 @@ config SERIAL_IP22_ZILOG_CONSOLE
config SERIAL_SH_SCI
tristate "SuperH SCI(F) serial port support"
- depends on HAVE_CLK && (SUPERH || ARM || COMPILE_TEST)
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
select SERIAL_CORE
config SERIAL_SH_SCI_NR_UARTS
@@ -1024,7 +1024,7 @@ config SERIAL_SGI_IOC3
config SERIAL_MSM
bool "MSM on-chip serial port support"
- depends on ARCH_MSM
+ depends on ARCH_MSM || ARCH_QCOM
select SERIAL_CORE
config SERIAL_MSM_CONSOLE
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index d58783d364e3..d4eda24aa68b 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2154,9 +2154,19 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
amba_ports[i] = uap;
amba_set_drvdata(dev, uap);
+
+ if (!amba_reg.state) {
+ ret = uart_register_driver(&amba_reg);
+ if (ret < 0) {
+ pr_err("Failed to register AMBA-PL011 driver\n");
+ return ret;
+ }
+ }
+
ret = uart_add_one_port(&amba_reg, &uap->port);
if (ret) {
amba_ports[i] = NULL;
+ uart_unregister_driver(&amba_reg);
pl011_dma_remove(uap);
}
out:
@@ -2175,6 +2185,7 @@ static int pl011_remove(struct amba_device *dev)
amba_ports[i] = NULL;
pl011_dma_remove(uap);
+ uart_unregister_driver(&amba_reg);
return 0;
}
@@ -2230,22 +2241,14 @@ static struct amba_driver pl011_driver = {
static int __init pl011_init(void)
{
- int ret;
printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
- ret = uart_register_driver(&amba_reg);
- if (ret == 0) {
- ret = amba_driver_register(&pl011_driver);
- if (ret)
- uart_unregister_driver(&amba_reg);
- }
- return ret;
+ return amba_driver_register(&pl011_driver);
}
static void __exit pl011_exit(void)
{
amba_driver_unregister(&pl011_driver);
- uart_unregister_driver(&amba_reg);
}
/*
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index a49f10d269b2..53eeea13ff16 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -35,21 +35,18 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/dma-mapping.h>
#include <linux/atmel_pdc.h>
#include <linux/atmel_serial.h>
#include <linux/uaccess.h>
#include <linux/platform_data/atmel.h>
#include <linux/timer.h>
+#include <linux/gpio.h>
#include <asm/io.h>
#include <asm/ioctls.h>
-#ifdef CONFIG_ARM
-#include <mach/cpu.h>
-#include <asm/gpio.h>
-#endif
-
#define PDC_BUFFER_SIZE 512
/* Revisit: We should calculate this based on the actual port settings */
#define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */
@@ -115,9 +112,6 @@ static void atmel_stop_rx(struct uart_port *port);
#define UART_PUT_TCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TCR)
#define UART_GET_TCR(port) __raw_readl((port)->membase + ATMEL_PDC_TCR)
-static int (*atmel_open_hook)(struct uart_port *);
-static void (*atmel_close_hook)(struct uart_port *);
-
struct atmel_dma_buffer {
unsigned char *buf;
dma_addr_t dma_addr;
@@ -168,6 +162,7 @@ struct atmel_uart_port {
struct circ_buf rx_ring;
struct serial_rs485 rs485; /* rs485 settings */
+ int rts_gpio; /* optional RTS GPIO */
unsigned int tx_done_mask;
bool is_usart; /* usart or uart */
struct timer_list uart_timer; /* uart timer */
@@ -301,20 +296,16 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
unsigned int mode;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
-#ifdef CONFIG_ARCH_AT91RM9200
- if (cpu_is_at91rm9200()) {
- /*
- * AT91RM9200 Errata #39: RTS0 is not internally connected
- * to PA21. We need to drive the pin manually.
- */
- if (port->mapbase == AT91RM9200_BASE_US0) {
- if (mctrl & TIOCM_RTS)
- at91_set_gpio_value(AT91_PIN_PA21, 0);
- else
- at91_set_gpio_value(AT91_PIN_PA21, 1);
- }
+ /*
+ * AT91RM9200 Errata #39: RTS0 is not internally connected
+ * to PA21. We need to drive the pin as a GPIO.
+ */
+ if (gpio_is_valid(atmel_port->rts_gpio)) {
+ if (mctrl & TIOCM_RTS)
+ gpio_set_value(atmel_port->rts_gpio, 0);
+ else
+ gpio_set_value(atmel_port->rts_gpio, 1);
}
-#endif
if (mctrl & TIOCM_RTS)
control |= ATMEL_US_RTSEN;
@@ -1555,7 +1546,7 @@ static int atmel_startup(struct uart_port *port)
retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED,
tty ? tty->name : "atmel_serial", port);
if (retval) {
- printk("atmel_serial: atmel_startup - Can't get irq\n");
+ dev_err(port->dev, "atmel_startup - Can't get irq\n");
return retval;
}
@@ -1575,17 +1566,6 @@ static int atmel_startup(struct uart_port *port)
if (retval < 0)
atmel_set_ops(port);
}
- /*
- * If there is a specific "open" function (to register
- * control line interrupts)
- */
- if (atmel_open_hook) {
- retval = atmel_open_hook(port);
- if (retval) {
- free_irq(port->irq, port);
- return retval;
- }
- }
/* Save current CSR for comparison in atmel_tasklet_func() */
atmel_port->irq_status_prev = UART_GET_CSR(port);
@@ -1684,13 +1664,6 @@ static void atmel_shutdown(struct uart_port *port)
* Free the interrupt
*/
free_irq(port->irq, port);
-
- /*
- * If there is a specific "close" function (to unregister
- * control line interrupts)
- */
- if (atmel_close_hook)
- atmel_close_hook(port);
}
/*
@@ -1738,7 +1711,7 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
clk_disable_unprepare(atmel_port->clk);
break;
default:
- printk(KERN_ERR "atmel_serial: unknown pm %d\n", state);
+ dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
}
}
@@ -1853,13 +1826,10 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
mode &= ~ATMEL_US_USMODE;
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
- dev_dbg(port->dev, "Setting UART to RS485\n");
if ((atmel_port->rs485.delay_rts_after_send) > 0)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
- } else {
- dev_dbg(port->dev, "Setting UART to RS232\n");
}
/* set the parity, stop bits and data size */
@@ -2389,6 +2359,25 @@ static int atmel_serial_probe(struct platform_device *pdev)
port = &atmel_ports[ret];
port->backup_imr = 0;
port->uart.line = ret;
+ port->rts_gpio = -EINVAL; /* Invalid, zero could be valid */
+ if (pdata)
+ port->rts_gpio = pdata->rts_gpio;
+ else if (np)
+ port->rts_gpio = of_get_named_gpio(np, "rts-gpios", 0);
+
+ if (gpio_is_valid(port->rts_gpio)) {
+ ret = devm_gpio_request(&pdev->dev, port->rts_gpio, "RTS");
+ if (ret) {
+ dev_err(&pdev->dev, "error requesting RTS GPIO\n");
+ goto err;
+ }
+ /* Default to 1 as RTS is active low */
+ ret = gpio_direction_output(port->rts_gpio, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "error setting up RTS GPIO\n");
+ goto err;
+ }
+ }
ret = atmel_init_port(port, pdev);
if (ret)
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 78e82b017b92..a47421e4627c 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -30,6 +30,8 @@
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_bcm63xx.h>
+#include <linux/io.h>
+#include <linux/of.h>
#define BCM63XX_NR_UARTS 2
@@ -588,7 +590,7 @@ static int bcm_uart_request_port(struct uart_port *port)
{
unsigned int size;
- size = RSET_UART_SIZE;
+ size = UART_REG_SIZE;
if (!request_mem_region(port->mapbase, size, "bcm63xx")) {
dev_err(port->dev, "Memory region busy\n");
return -EBUSY;
@@ -608,7 +610,7 @@ static int bcm_uart_request_port(struct uart_port *port)
*/
static void bcm_uart_release_port(struct uart_port *port)
{
- release_mem_region(port->mapbase, RSET_UART_SIZE);
+ release_mem_region(port->mapbase, UART_REG_SIZE);
iounmap(port->membase);
}
@@ -805,6 +807,9 @@ static int bcm_uart_probe(struct platform_device *pdev)
struct clk *clk;
int ret;
+ if (pdev->dev.of_node)
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
+
if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
return -EINVAL;
@@ -856,6 +861,12 @@ static int bcm_uart_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id bcm63xx_of_match[] = {
+ { .compatible = "brcm,bcm6345-uart" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm63xx_of_match);
+
/*
* platform driver stuff
*/
@@ -865,6 +876,7 @@ static struct platform_driver bcm_uart_platform_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "bcm63xx_uart",
+ .of_match_table = bcm63xx_of_match,
},
};
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index b0eacb83f831..5e6fdb1ea73b 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -368,11 +368,16 @@ static const struct uart_ops uart_clps711x_ops = {
static void uart_clps711x_console_putchar(struct uart_port *port, int ch)
{
struct clps711x_port *s = dev_get_drvdata(port->dev);
- u32 sysflg = 0;
- do {
+ /* Wait for FIFO is not full */
+ while (1) {
+ u32 sysflg = 0;
+
regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
- } while (sysflg & SYSFLG_UTXFF);
+ if (!(sysflg & SYSFLG_UTXFF))
+ break;
+ cond_resched();
+ }
writew(ch, port->membase + UARTDR_OFFSET);
}
@@ -382,14 +387,18 @@ static void uart_clps711x_console_write(struct console *co, const char *c,
{
struct uart_port *port = clps711x_uart.state[co->index].uart_port;
struct clps711x_port *s = dev_get_drvdata(port->dev);
- u32 sysflg = 0;
uart_console_write(port, c, n, uart_clps711x_console_putchar);
/* Wait for transmitter to become empty */
- do {
+ while (1) {
+ u32 sysflg = 0;
+
regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
- } while (sysflg & SYSFLG_UBUSY);
+ if (!(sysflg & SYSFLG_UBUSY))
+ break;
+ cond_resched();
+ }
}
static int uart_clps711x_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 690bdea0a0c1..d567ac5d3af4 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -286,7 +286,6 @@ static struct e100_serial rs_table[] = {
#endif
}, /* ttyS0 */
-#ifndef CONFIG_SVINTO_SIM
{ .baud = DEF_BAUD,
.ioport = (unsigned char *)R_SERIAL1_CTRL,
.irq = 1U << 16, /* uses DMA 8 and 9 */
@@ -447,7 +446,6 @@ static struct e100_serial rs_table[] = {
.dma_in_enabled = 0
#endif
} /* ttyS3 */
-#endif
};
@@ -1035,7 +1033,6 @@ cflag_to_etrax_baud(unsigned int cflag)
static inline void
e100_dtr(struct e100_serial *info, int set)
{
-#ifndef CONFIG_SVINTO_SIM
unsigned char mask = e100_modem_pins[info->line].dtr_mask;
#ifdef SERIAL_DEBUG_IO
@@ -1060,7 +1057,6 @@ e100_dtr(struct e100_serial *info, int set)
info->line, *e100_modem_pins[info->line].dtr_shadow,
E100_DTR_GET(info));
#endif
-#endif
}
/* set = 0 means 3.3V on the pin, bitvalue: 0=active, 1=inactive
@@ -1069,7 +1065,6 @@ e100_dtr(struct e100_serial *info, int set)
static inline void
e100_rts(struct e100_serial *info, int set)
{
-#ifndef CONFIG_SVINTO_SIM
unsigned long flags;
local_irq_save(flags);
info->rx_ctrl &= ~E100_RTS_MASK;
@@ -1079,7 +1074,6 @@ e100_rts(struct e100_serial *info, int set)
#ifdef SERIAL_DEBUG_IO
printk("ser%i rts %i\n", info->line, set);
#endif
-#endif
}
@@ -1087,7 +1081,6 @@ e100_rts(struct e100_serial *info, int set)
static inline void
e100_ri_out(struct e100_serial *info, int set)
{
-#ifndef CONFIG_SVINTO_SIM
/* RI is active low */
{
unsigned char mask = e100_modem_pins[info->line].ri_mask;
@@ -1099,12 +1092,10 @@ e100_ri_out(struct e100_serial *info, int set)
*e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow;
local_irq_restore(flags);
}
-#endif
}
static inline void
e100_cd_out(struct e100_serial *info, int set)
{
-#ifndef CONFIG_SVINTO_SIM
/* CD is active low */
{
unsigned char mask = e100_modem_pins[info->line].cd_mask;
@@ -1116,27 +1107,22 @@ e100_cd_out(struct e100_serial *info, int set)
*e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow;
local_irq_restore(flags);
}
-#endif
}
static inline void
e100_disable_rx(struct e100_serial *info)
{
-#ifndef CONFIG_SVINTO_SIM
/* disable the receiver */
info->ioport[REG_REC_CTRL] =
(info->rx_ctrl &= ~IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
-#endif
}
static inline void
e100_enable_rx(struct e100_serial *info)
{
-#ifndef CONFIG_SVINTO_SIM
/* enable the receiver */
info->ioport[REG_REC_CTRL] =
(info->rx_ctrl |= IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
-#endif
}
/* the rx DMA uses both the dma_descr and the dma_eop interrupts */
@@ -1554,24 +1540,6 @@ transmit_chars_dma(struct e100_serial *info)
unsigned int c, sentl;
struct etrax_dma_descr *descr;
-#ifdef CONFIG_SVINTO_SIM
- /* This will output too little if tail is not 0 always since
- * we don't reloop to send the other part. Anyway this SHOULD be a
- * no-op - transmit_chars_dma would never really be called during sim
- * since rs_write does not write into the xmit buffer then.
- */
- if (info->xmit.tail)
- printk("Error in serial.c:transmit_chars-dma(), tail!=0\n");
- if (info->xmit.head != info->xmit.tail) {
- SIMCOUT(info->xmit.buf + info->xmit.tail,
- CIRC_CNT(info->xmit.head,
- info->xmit.tail,
- SERIAL_XMIT_SIZE));
- info->xmit.head = info->xmit.tail; /* move back head */
- info->tr_running = 0;
- }
- return;
-#endif
/* acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */
*info->oclrintradr =
IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
@@ -1842,13 +1810,6 @@ static void receive_chars_dma(struct e100_serial *info)
struct tty_struct *tty;
unsigned char rstat;
-#ifdef CONFIG_SVINTO_SIM
- /* No receive in the simulator. Will probably be when the rest of
- * the serial interface works, and this piece will just be removed.
- */
- return;
-#endif
-
/* Acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */
*info->iclrintradr =
IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
@@ -1934,12 +1895,6 @@ static int start_recv_dma(struct e100_serial *info)
static void
start_receive(struct e100_serial *info)
{
-#ifdef CONFIG_SVINTO_SIM
- /* No receive in the simulator. Will probably be when the rest of
- * the serial interface works, and this piece will just be removed.
- */
- return;
-#endif
if (info->uses_dma_in) {
/* reset the input dma channel to be sure it works */
@@ -1972,17 +1927,6 @@ tr_interrupt(int irq, void *dev_id)
int i;
int handled = 0;
-#ifdef CONFIG_SVINTO_SIM
- /* No receive in the simulator. Will probably be when the rest of
- * the serial interface works, and this piece will just be removed.
- */
- {
- const char *s = "What? tr_interrupt in simulator??\n";
- SIMCOUT(s,strlen(s));
- }
- return IRQ_HANDLED;
-#endif
-
/* find out the line that caused this irq and get it from rs_table */
ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */
@@ -2021,17 +1965,6 @@ rec_interrupt(int irq, void *dev_id)
int i;
int handled = 0;
-#ifdef CONFIG_SVINTO_SIM
- /* No receive in the simulator. Will probably be when the rest of
- * the serial interface works, and this piece will just be removed.
- */
- {
- const char *s = "What? rec_interrupt in simulator??\n";
- SIMCOUT(s,strlen(s));
- }
- return IRQ_HANDLED;
-#endif
-
/* find out the line that caused this irq and get it from rs_table */
ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */
@@ -2173,10 +2106,6 @@ timed_flush_handler(unsigned long ptr)
struct e100_serial *info;
int i;
-#ifdef CONFIG_SVINTO_SIM
- return;
-#endif
-
for (i = 0; i < NR_PORTS; i++) {
info = rs_table + i;
if (info->uses_dma_in)
@@ -2729,25 +2658,6 @@ startup(struct e100_serial * info)
printk("starting up ttyS%d (xmit_buf 0x%p)...\n", info->line, info->xmit.buf);
#endif
-#ifdef CONFIG_SVINTO_SIM
- /* Bits and pieces collected from below. Better to have them
- in one ifdef:ed clause than to mix in a lot of ifdefs,
- right? */
- if (info->port.tty)
- clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
-
- info->xmit.head = info->xmit.tail = 0;
- info->first_recv_buffer = info->last_recv_buffer = NULL;
- info->recv_cnt = info->max_recv_cnt = 0;
-
- for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
- info->rec_descr[i].buf = NULL;
-
- /* No real action in the simulator, but may set info important
- to ioctl. */
- change_speed(info);
-#else
-
/*
* Clear the FIFO buffers and disable them
* (they will be reenabled in change_speed())
@@ -2837,8 +2747,6 @@ startup(struct e100_serial * info)
e100_rts(info, 1);
e100_dtr(info, 1);
-#endif /* CONFIG_SVINTO_SIM */
-
info->port.flags |= ASYNC_INITIALIZED;
local_irq_restore(flags);
@@ -2857,7 +2765,6 @@ shutdown(struct e100_serial * info)
struct etrax_recv_buffer *buffer;
int i;
-#ifndef CONFIG_SVINTO_SIM
/* shut down the transmitter and receiver */
DFLOW(DEBUG_LOG(info->line, "shutdown %i\n", info->line));
e100_disable_rx(info);
@@ -2882,8 +2789,6 @@ shutdown(struct e100_serial * info)
info->tr_running = 0;
}
-#endif /* CONFIG_SVINTO_SIM */
-
if (!(info->port.flags & ASYNC_INITIALIZED))
return;
@@ -2995,17 +2900,12 @@ change_speed(struct e100_serial *info)
IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, normal);
r_alt_ser_baudrate_shadow &= ~mask;
r_alt_ser_baudrate_shadow |= (alt_source << (info->line*8));
-#ifndef CONFIG_SVINTO_SIM
*R_ALT_SER_BAUDRATE = r_alt_ser_baudrate_shadow;
-#endif /* CONFIG_SVINTO_SIM */
info->baud = cflag_to_baud(cflag);
-#ifndef CONFIG_SVINTO_SIM
info->ioport[REG_BAUD] = cflag_to_etrax_baud(cflag);
-#endif /* CONFIG_SVINTO_SIM */
}
-#ifndef CONFIG_SVINTO_SIM
/* start with default settings and then fill in changes */
local_irq_save(flags);
/* 8 bit, no/even parity */
@@ -3073,7 +2973,6 @@ change_speed(struct e100_serial *info)
*((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
local_irq_restore(flags);
-#endif /* !CONFIG_SVINTO_SIM */
update_char_time(info);
@@ -3122,11 +3021,6 @@ static int rs_raw_write(struct tty_struct *tty,
count, info->ioport[REG_STATUS]);
#endif
-#ifdef CONFIG_SVINTO_SIM
- /* Really simple. The output is here and now. */
- SIMCOUT(buf, count);
- return count;
-#endif
local_save_flags(flags);
DFLOW(DEBUG_LOG(info->line, "write count %i ", count));
DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty)));
@@ -3463,7 +3357,6 @@ static int
get_lsr_info(struct e100_serial * info, unsigned int *value)
{
unsigned int result = TIOCSER_TEMT;
-#ifndef CONFIG_SVINTO_SIM
unsigned long curr_time = jiffies;
unsigned long curr_time_usec = GET_JIFFIES_USEC();
unsigned long elapsed_usec =
@@ -3474,7 +3367,6 @@ get_lsr_info(struct e100_serial * info, unsigned int *value)
elapsed_usec < 2*info->char_time_usec) {
result = 0;
}
-#endif
if (copy_to_user(value, &result, sizeof(int)))
return -EFAULT;
@@ -3804,7 +3696,6 @@ rs_close(struct tty_struct *tty, struct file * filp)
e100_disable_serial_data_irq(info);
#endif
-#ifndef CONFIG_SVINTO_SIM
e100_disable_rx(info);
e100_disable_rx_irq(info);
@@ -3816,7 +3707,6 @@ rs_close(struct tty_struct *tty, struct file * filp)
*/
rs_wait_until_sent(tty, HZ);
}
-#endif
shutdown(info);
rs_flush_buffer(tty);
@@ -4479,7 +4369,6 @@ static int __init rs_init(void)
fast_timer_init();
#endif
-#ifndef CONFIG_SVINTO_SIM
#ifndef CONFIG_ETRAX_KGDB
/* Not needed in simulator. May only complicate stuff. */
/* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */
@@ -4489,7 +4378,6 @@ static int __init rs_init(void)
panic("%s: Failed to request irq8", __func__);
#endif
-#endif /* CONFIG_SVINTO_SIM */
return 0;
}
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 0eb5b5673ede..028582e924a5 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -671,7 +671,10 @@ static int efm32_uart_probe_dt(struct platform_device *pdev,
if (!np)
return 1;
- ret = of_property_read_u32(np, "location", &location);
+ ret = of_property_read_u32(np, "efm32,location", &location);
+ if (ret)
+ /* fall back to old and (wrongly) generic property "location" */
+ ret = of_property_read_u32(np, "location", &location);
if (!ret) {
if (location > 5) {
dev_err(&pdev->dev, "invalid location\n");
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 8978dc9a58b7..c5eb897de9de 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -13,14 +13,19 @@
#define SUPPORT_SYSRQ
#endif
-#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/clk.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/console.h>
+#include <linux/of_dma.h>
#include <linux/serial_core.h>
+#include <linux/slab.h>
#include <linux/tty_flip.h>
/* All registers are 8-bit width */
@@ -112,6 +117,10 @@
#define UARTSFIFO_TXOF 0x02
#define UARTSFIFO_RXUF 0x01
+#define DMA_MAXBURST 16
+#define DMA_MAXBURST_MASK (DMA_MAXBURST - 1)
+#define FSL_UART_RX_DMA_BUFFER_SIZE 64
+
#define DRIVER_NAME "fsl-lpuart"
#define DEV_NAME "ttyLP"
#define UART_NR 6
@@ -121,6 +130,24 @@ struct lpuart_port {
struct clk *clk;
unsigned int txfifo_size;
unsigned int rxfifo_size;
+
+ bool lpuart_dma_use;
+ struct dma_chan *dma_tx_chan;
+ struct dma_chan *dma_rx_chan;
+ struct dma_async_tx_descriptor *dma_tx_desc;
+ struct dma_async_tx_descriptor *dma_rx_desc;
+ dma_addr_t dma_tx_buf_bus;
+ dma_addr_t dma_rx_buf_bus;
+ dma_cookie_t dma_tx_cookie;
+ dma_cookie_t dma_rx_cookie;
+ unsigned char *dma_tx_buf_virt;
+ unsigned char *dma_rx_buf_virt;
+ unsigned int dma_tx_bytes;
+ unsigned int dma_rx_bytes;
+ int dma_tx_in_progress;
+ int dma_rx_in_progress;
+ unsigned int dma_rx_timeout;
+ struct timer_list lpuart_timer;
};
static struct of_device_id lpuart_dt_ids[] = {
@@ -131,6 +158,10 @@ static struct of_device_id lpuart_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
+/* Forward declare this for the dma callbacks*/
+static void lpuart_dma_tx_complete(void *arg);
+static void lpuart_dma_rx_complete(void *arg);
+
static void lpuart_stop_tx(struct uart_port *port)
{
unsigned char temp;
@@ -152,6 +183,210 @@ static void lpuart_enable_ms(struct uart_port *port)
{
}
+static void lpuart_copy_rx_to_tty(struct lpuart_port *sport,
+ struct tty_port *tty, int count)
+{
+ int copied;
+
+ sport->port.icount.rx += count;
+
+ if (!tty) {
+ dev_err(sport->port.dev, "No tty port\n");
+ return;
+ }
+
+ dma_sync_single_for_cpu(sport->port.dev, sport->dma_rx_buf_bus,
+ FSL_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+ copied = tty_insert_flip_string(tty,
+ ((unsigned char *)(sport->dma_rx_buf_virt)), count);
+
+ if (copied != count) {
+ WARN_ON(1);
+ dev_err(sport->port.dev, "RxData copy to tty layer failed\n");
+ }
+
+ dma_sync_single_for_device(sport->port.dev, sport->dma_rx_buf_bus,
+ FSL_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+}
+
+static void lpuart_pio_tx(struct lpuart_port *sport)
+{
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ while (!uart_circ_empty(xmit) &&
+ readb(sport->port.membase + UARTTCFIFO) < sport->txfifo_size) {
+ writeb(xmit->buf[xmit->tail], sport->port.membase + UARTDR);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ sport->port.icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+
+ if (uart_circ_empty(xmit))
+ writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_TDMAS,
+ sport->port.membase + UARTCR5);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static int lpuart_dma_tx(struct lpuart_port *sport, unsigned long count)
+{
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ dma_addr_t tx_bus_addr;
+
+ dma_sync_single_for_device(sport->port.dev, sport->dma_tx_buf_bus,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ sport->dma_tx_bytes = count & ~(DMA_MAXBURST_MASK);
+ tx_bus_addr = sport->dma_tx_buf_bus + xmit->tail;
+ sport->dma_tx_desc = dmaengine_prep_slave_single(sport->dma_tx_chan,
+ tx_bus_addr, sport->dma_tx_bytes,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+
+ if (!sport->dma_tx_desc) {
+ dev_err(sport->port.dev, "Not able to get desc for tx\n");
+ return -EIO;
+ }
+
+ sport->dma_tx_desc->callback = lpuart_dma_tx_complete;
+ sport->dma_tx_desc->callback_param = sport;
+ sport->dma_tx_in_progress = 1;
+ sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
+ dma_async_issue_pending(sport->dma_tx_chan);
+
+ return 0;
+}
+
+static void lpuart_prepare_tx(struct lpuart_port *sport)
+{
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ unsigned long count = CIRC_CNT_TO_END(xmit->head,
+ xmit->tail, UART_XMIT_SIZE);
+
+ if (!count)
+ return;
+
+ if (count < DMA_MAXBURST)
+ writeb(readb(sport->port.membase + UARTCR5) & ~UARTCR5_TDMAS,
+ sport->port.membase + UARTCR5);
+ else {
+ writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_TDMAS,
+ sport->port.membase + UARTCR5);
+ lpuart_dma_tx(sport, count);
+ }
+}
+
+static void lpuart_dma_tx_complete(void *arg)
+{
+ struct lpuart_port *sport = arg;
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ unsigned long flags;
+
+ async_tx_ack(sport->dma_tx_desc);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ xmit->tail = (xmit->tail + sport->dma_tx_bytes) & (UART_XMIT_SIZE - 1);
+ sport->dma_tx_in_progress = 0;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+
+ lpuart_prepare_tx(sport);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static int lpuart_dma_rx(struct lpuart_port *sport)
+{
+ dma_sync_single_for_device(sport->port.dev, sport->dma_rx_buf_bus,
+ FSL_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+ sport->dma_rx_desc = dmaengine_prep_slave_single(sport->dma_rx_chan,
+ sport->dma_rx_buf_bus, FSL_UART_RX_DMA_BUFFER_SIZE,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+
+ if (!sport->dma_rx_desc) {
+ dev_err(sport->port.dev, "Not able to get desc for rx\n");
+ return -EIO;
+ }
+
+ sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
+ sport->dma_rx_desc->callback_param = sport;
+ sport->dma_rx_in_progress = 1;
+ sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
+ dma_async_issue_pending(sport->dma_rx_chan);
+
+ return 0;
+}
+
+static void lpuart_dma_rx_complete(void *arg)
+{
+ struct lpuart_port *sport = arg;
+ struct tty_port *port = &sport->port.state->port;
+ unsigned long flags;
+
+ async_tx_ack(sport->dma_rx_desc);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ sport->dma_rx_in_progress = 0;
+ lpuart_copy_rx_to_tty(sport, port, FSL_UART_RX_DMA_BUFFER_SIZE);
+ tty_flip_buffer_push(port);
+ lpuart_dma_rx(sport);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static void lpuart_timer_func(unsigned long data)
+{
+ struct lpuart_port *sport = (struct lpuart_port *)data;
+ struct tty_port *port = &sport->port.state->port;
+ struct dma_tx_state state;
+ unsigned long flags;
+ unsigned char temp;
+ int count;
+
+ del_timer(&sport->lpuart_timer);
+ dmaengine_pause(sport->dma_rx_chan);
+ dmaengine_tx_status(sport->dma_rx_chan, sport->dma_rx_cookie, &state);
+ dmaengine_terminate_all(sport->dma_rx_chan);
+ count = FSL_UART_RX_DMA_BUFFER_SIZE - state.residue;
+ async_tx_ack(sport->dma_rx_desc);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ sport->dma_rx_in_progress = 0;
+ lpuart_copy_rx_to_tty(sport, port, count);
+ tty_flip_buffer_push(port);
+ temp = readb(sport->port.membase + UARTCR5);
+ writeb(temp & ~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static inline void lpuart_prepare_rx(struct lpuart_port *sport)
+{
+ unsigned long flags;
+ unsigned char temp;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ init_timer(&sport->lpuart_timer);
+ sport->lpuart_timer.function = lpuart_timer_func;
+ sport->lpuart_timer.data = (unsigned long)sport;
+ sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
+ add_timer(&sport->lpuart_timer);
+
+ lpuart_dma_rx(sport);
+ temp = readb(sport->port.membase + UARTCR5);
+ writeb(temp | UARTCR5_RDMAS, sport->port.membase + UARTCR5);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
@@ -172,14 +407,21 @@ static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
static void lpuart_start_tx(struct uart_port *port)
{
- struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ struct circ_buf *xmit = &sport->port.state->xmit;
unsigned char temp;
temp = readb(port->membase + UARTCR2);
writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
- if (readb(port->membase + UARTSR1) & UARTSR1_TDRE)
- lpuart_transmit_buffer(sport);
+ if (sport->lpuart_dma_use) {
+ if (!uart_circ_empty(xmit) && !sport->dma_tx_in_progress)
+ lpuart_prepare_tx(sport);
+ } else {
+ if (readb(port->membase + UARTSR1) & UARTSR1_TDRE)
+ lpuart_transmit_buffer(sport);
+ }
}
static irqreturn_t lpuart_txint(int irq, void *dev_id)
@@ -279,12 +521,19 @@ static irqreturn_t lpuart_int(int irq, void *dev_id)
sts = readb(sport->port.membase + UARTSR1);
- if (sts & UARTSR1_RDRF)
- lpuart_rxint(irq, dev_id);
-
+ if (sts & UARTSR1_RDRF) {
+ if (sport->lpuart_dma_use)
+ lpuart_prepare_rx(sport);
+ else
+ lpuart_rxint(irq, dev_id);
+ }
if (sts & UARTSR1_TDRE &&
- !(readb(sport->port.membase + UARTCR5) & UARTCR5_TDMAS))
- lpuart_txint(irq, dev_id);
+ !(readb(sport->port.membase + UARTCR5) & UARTCR5_TDMAS)) {
+ if (sport->lpuart_dma_use)
+ lpuart_pio_tx(sport);
+ else
+ lpuart_txint(irq, dev_id);
+ }
return IRQ_HANDLED;
}
@@ -366,13 +615,156 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
sport->port.membase + UARTCFIFO);
- writeb(2, sport->port.membase + UARTTWFIFO);
+ writeb(0, sport->port.membase + UARTTWFIFO);
writeb(1, sport->port.membase + UARTRWFIFO);
/* Restore cr2 */
writeb(cr2_saved, sport->port.membase + UARTCR2);
}
+static int lpuart_dma_tx_request(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ struct dma_chan *tx_chan;
+ struct dma_slave_config dma_tx_sconfig;
+ dma_addr_t dma_bus;
+ unsigned char *dma_buf;
+ int ret;
+
+ tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
+
+ if (!tx_chan) {
+ dev_err(sport->port.dev, "Dma tx channel request failed!\n");
+ return -ENODEV;
+ }
+
+ dma_bus = dma_map_single(tx_chan->device->dev,
+ sport->port.state->xmit.buf,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(tx_chan->device->dev, dma_bus)) {
+ dev_err(sport->port.dev, "dma_map_single tx failed\n");
+ dma_release_channel(tx_chan);
+ return -ENOMEM;
+ }
+
+ dma_buf = sport->port.state->xmit.buf;
+ dma_tx_sconfig.dst_addr = sport->port.mapbase + UARTDR;
+ dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_tx_sconfig.dst_maxburst = DMA_MAXBURST;
+ dma_tx_sconfig.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(tx_chan, &dma_tx_sconfig);
+
+ if (ret < 0) {
+ dev_err(sport->port.dev,
+ "Dma slave config failed, err = %d\n", ret);
+ dma_release_channel(tx_chan);
+ return ret;
+ }
+
+ sport->dma_tx_chan = tx_chan;
+ sport->dma_tx_buf_virt = dma_buf;
+ sport->dma_tx_buf_bus = dma_bus;
+ sport->dma_tx_in_progress = 0;
+
+ return 0;
+}
+
+static int lpuart_dma_rx_request(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ struct dma_chan *rx_chan;
+ struct dma_slave_config dma_rx_sconfig;
+ dma_addr_t dma_bus;
+ unsigned char *dma_buf;
+ int ret;
+
+ rx_chan = dma_request_slave_channel(sport->port.dev, "rx");
+
+ if (!rx_chan) {
+ dev_err(sport->port.dev, "Dma rx channel request failed!\n");
+ return -ENODEV;
+ }
+
+ dma_buf = devm_kzalloc(sport->port.dev,
+ FSL_UART_RX_DMA_BUFFER_SIZE, GFP_KERNEL);
+
+ if (!dma_buf) {
+ dev_err(sport->port.dev, "Dma rx alloc failed\n");
+ dma_release_channel(rx_chan);
+ return -ENOMEM;
+ }
+
+ dma_bus = dma_map_single(rx_chan->device->dev, dma_buf,
+ FSL_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(rx_chan->device->dev, dma_bus)) {
+ dev_err(sport->port.dev, "dma_map_single rx failed\n");
+ dma_release_channel(rx_chan);
+ return -ENOMEM;
+ }
+
+ dma_rx_sconfig.src_addr = sport->port.mapbase + UARTDR;
+ dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_rx_sconfig.src_maxburst = 1;
+ dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(rx_chan, &dma_rx_sconfig);
+
+ if (ret < 0) {
+ dev_err(sport->port.dev,
+ "Dma slave config failed, err = %d\n", ret);
+ dma_release_channel(rx_chan);
+ return ret;
+ }
+
+ sport->dma_rx_chan = rx_chan;
+ sport->dma_rx_buf_virt = dma_buf;
+ sport->dma_rx_buf_bus = dma_bus;
+ sport->dma_rx_in_progress = 0;
+
+ sport->dma_rx_timeout = (sport->port.timeout - HZ / 50) *
+ FSL_UART_RX_DMA_BUFFER_SIZE * 3 /
+ sport->rxfifo_size / 2;
+
+ if (sport->dma_rx_timeout < msecs_to_jiffies(20))
+ sport->dma_rx_timeout = msecs_to_jiffies(20);
+
+ return 0;
+}
+
+static void lpuart_dma_tx_free(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ struct dma_chan *dma_chan;
+
+ dma_unmap_single(sport->port.dev, sport->dma_tx_buf_bus,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ dma_chan = sport->dma_tx_chan;
+ sport->dma_tx_chan = NULL;
+ sport->dma_tx_buf_bus = 0;
+ sport->dma_tx_buf_virt = NULL;
+ dma_release_channel(dma_chan);
+}
+
+static void lpuart_dma_rx_free(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ struct dma_chan *dma_chan;
+
+ dma_unmap_single(sport->port.dev, sport->dma_rx_buf_bus,
+ FSL_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+
+ dma_chan = sport->dma_rx_chan;
+ sport->dma_rx_chan = NULL;
+ sport->dma_rx_buf_bus = 0;
+ sport->dma_rx_buf_virt = NULL;
+ dma_release_channel(dma_chan);
+}
+
static int lpuart_startup(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
@@ -380,6 +772,15 @@ static int lpuart_startup(struct uart_port *port)
unsigned long flags;
unsigned char temp;
+ /*whether use dma support by dma request results*/
+ if (lpuart_dma_tx_request(port) || lpuart_dma_rx_request(port)) {
+ sport->lpuart_dma_use = false;
+ } else {
+ sport->lpuart_dma_use = true;
+ temp = readb(port->membase + UARTCR5);
+ writeb(temp | UARTCR5_TDMAS, port->membase + UARTCR5);
+ }
+
ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0,
DRIVER_NAME, sport);
if (ret)
@@ -414,6 +815,11 @@ static void lpuart_shutdown(struct uart_port *port)
spin_unlock_irqrestore(&port->lock, flags);
devm_free_irq(port->dev, port->irq, sport);
+
+ if (sport->lpuart_dma_use) {
+ lpuart_dma_tx_free(port);
+ lpuart_dma_rx_free(port);
+ }
}
static void
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index d799140e53b6..3b6c1a2e25de 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -496,8 +496,7 @@ static void dma_tx_callback(void *data)
dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&sport->port);
+ uart_write_wakeup(&sport->port);
if (waitqueue_active(&sport->dma_wait)) {
wake_up(&sport->dma_wait);
@@ -1117,25 +1116,25 @@ static int imx_startup(struct uart_port *port)
*/
if (sport->txirq > 0) {
retval = request_irq(sport->rxirq, imx_rxint, 0,
- DRIVER_NAME, sport);
+ dev_name(port->dev), sport);
if (retval)
goto error_out1;
retval = request_irq(sport->txirq, imx_txint, 0,
- DRIVER_NAME, sport);
+ dev_name(port->dev), sport);
if (retval)
goto error_out2;
/* do not use RTS IRQ on IrDA */
if (!USE_IRDA(sport)) {
retval = request_irq(sport->rtsirq, imx_rtsint, 0,
- DRIVER_NAME, sport);
+ dev_name(port->dev), sport);
if (retval)
goto error_out3;
}
} else {
retval = request_irq(sport->port.irq, imx_int, 0,
- DRIVER_NAME, sport);
+ dev_name(port->dev), sport);
if (retval) {
free_irq(sport->port.irq, sport);
goto error_out1;
@@ -1470,44 +1469,13 @@ static const char *imx_type(struct uart_port *port)
}
/*
- * Release the memory region(s) being used by 'port'.
- */
-static void imx_release_port(struct uart_port *port)
-{
- struct platform_device *pdev = to_platform_device(port->dev);
- struct resource *mmres;
-
- mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mmres->start, resource_size(mmres));
-}
-
-/*
- * Request the memory region(s) being used by 'port'.
- */
-static int imx_request_port(struct uart_port *port)
-{
- struct platform_device *pdev = to_platform_device(port->dev);
- struct resource *mmres;
- void *ret;
-
- mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mmres)
- return -ENODEV;
-
- ret = request_mem_region(mmres->start, resource_size(mmres), "imx-uart");
-
- return ret ? 0 : -EBUSY;
-}
-
-/*
* Configure/autoconfigure the port.
*/
static void imx_config_port(struct uart_port *port, int flags)
{
struct imx_port *sport = (struct imx_port *)port;
- if (flags & UART_CONFIG_TYPE &&
- imx_request_port(&sport->port) == 0)
+ if (flags & UART_CONFIG_TYPE)
sport->port.type = PORT_IMX;
}
@@ -1617,8 +1585,6 @@ static struct uart_ops imx_pops = {
.flush_buffer = imx_flush_buffer,
.set_termios = imx_set_termios,
.type = imx_type,
- .release_port = imx_release_port,
- .request_port = imx_request_port,
.config_port = imx_config_port,
.verify_port = imx_verify_port,
#if defined(CONFIG_CONSOLE_POLL)
@@ -1935,7 +1901,6 @@ static void serial_imx_probe_pdata(struct imx_port *sport,
static int serial_imx_probe(struct platform_device *pdev)
{
struct imx_port *sport;
- struct imxuart_platform_data *pdata;
void __iomem *base;
int ret = 0;
struct resource *res;
@@ -1951,12 +1916,9 @@ static int serial_imx_probe(struct platform_device *pdev)
return ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- base = devm_ioremap(&pdev->dev, res->start, PAGE_SIZE);
- if (!base)
- return -ENOMEM;
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
sport->port.dev = &pdev->dev;
sport->port.mapbase = res->start;
@@ -1992,38 +1954,16 @@ static int serial_imx_probe(struct platform_device *pdev)
imx_ports[sport->port.line] = sport;
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata && pdata->init) {
- ret = pdata->init(pdev);
- if (ret)
- return ret;
- }
-
- ret = uart_add_one_port(&imx_reg, &sport->port);
- if (ret)
- goto deinit;
platform_set_drvdata(pdev, sport);
- return 0;
-deinit:
- if (pdata && pdata->exit)
- pdata->exit(pdev);
- return ret;
+ return uart_add_one_port(&imx_reg, &sport->port);
}
static int serial_imx_remove(struct platform_device *pdev)
{
- struct imxuart_platform_data *pdata;
struct imx_port *sport = platform_get_drvdata(pdev);
- pdata = dev_get_platdata(&pdev->dev);
-
- uart_remove_one_port(&imx_reg, &sport->port);
-
- if (pdata && pdata->exit)
- pdata->exit(pdev);
-
- return 0;
+ return uart_remove_one_port(&imx_reg, &sport->port);
}
static struct platform_driver serial_imx_driver = {
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 8d71e4047bb3..2a99d0c61b9e 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1,7 +1,7 @@
/*
* Maxim (Dallas) MAX3107/8/9, MAX14830 serial driver
*
- * Copyright (C) 2012-2013 Alexander Shiyan <shc_work@mail.ru>
+ * Copyright (C) 2012-2014 Alexander Shiyan <shc_work@mail.ru>
*
* Based on max3100.c, by Christian Pellegrin <chripell@evolware.org>
* Based on max3110.c, by Feng Tang <feng.tang@intel.com>
@@ -13,19 +13,21 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <linux/regmap.h>
-#include <linux/gpio.h>
#include <linux/spi/spi.h>
-
-#include <linux/platform_data/max310x.h>
+#include <linux/uaccess.h>
#define MAX310X_NAME "max310x"
#define MAX310X_MAJOR 204
@@ -161,10 +163,6 @@
/* IRDA register bits */
#define MAX310X_IRDA_IRDAEN_BIT (1 << 0) /* IRDA mode enable */
#define MAX310X_IRDA_SIR_BIT (1 << 1) /* SIR mode enable */
-#define MAX310X_IRDA_SHORTIR_BIT (1 << 2) /* Short SIR mode enable */
-#define MAX310X_IRDA_MIR_BIT (1 << 3) /* MIR mode enable */
-#define MAX310X_IRDA_RXINV_BIT (1 << 4) /* RX logic inversion enable */
-#define MAX310X_IRDA_TXINV_BIT (1 << 5) /* TX logic inversion enable */
/* Flow control trigger level register masks */
#define MAX310X_FLOWLVL_HALT_MASK (0x000f) /* Flow control halt level */
@@ -220,26 +218,6 @@
* XOFF2
*/
-/* GPIO configuration register bits */
-#define MAX310X_GPIOCFG_GP0OUT_BIT (1 << 0) /* GPIO 0 output enable */
-#define MAX310X_GPIOCFG_GP1OUT_BIT (1 << 1) /* GPIO 1 output enable */
-#define MAX310X_GPIOCFG_GP2OUT_BIT (1 << 2) /* GPIO 2 output enable */
-#define MAX310X_GPIOCFG_GP3OUT_BIT (1 << 3) /* GPIO 3 output enable */
-#define MAX310X_GPIOCFG_GP0OD_BIT (1 << 4) /* GPIO 0 open-drain enable */
-#define MAX310X_GPIOCFG_GP1OD_BIT (1 << 5) /* GPIO 1 open-drain enable */
-#define MAX310X_GPIOCFG_GP2OD_BIT (1 << 6) /* GPIO 2 open-drain enable */
-#define MAX310X_GPIOCFG_GP3OD_BIT (1 << 7) /* GPIO 3 open-drain enable */
-
-/* GPIO DATA register bits */
-#define MAX310X_GPIODATA_GP0OUT_BIT (1 << 0) /* GPIO 0 output value */
-#define MAX310X_GPIODATA_GP1OUT_BIT (1 << 1) /* GPIO 1 output value */
-#define MAX310X_GPIODATA_GP2OUT_BIT (1 << 2) /* GPIO 2 output value */
-#define MAX310X_GPIODATA_GP3OUT_BIT (1 << 3) /* GPIO 3 output value */
-#define MAX310X_GPIODATA_GP0IN_BIT (1 << 4) /* GPIO 0 input value */
-#define MAX310X_GPIODATA_GP1IN_BIT (1 << 5) /* GPIO 1 input value */
-#define MAX310X_GPIODATA_GP2IN_BIT (1 << 6) /* GPIO 2 input value */
-#define MAX310X_GPIODATA_GP3IN_BIT (1 << 7) /* GPIO 3 input value */
-
/* PLL configuration register masks */
#define MAX310X_PLLCFG_PREDIV_MASK (0x3f) /* PLL predivision value */
#define MAX310X_PLLCFG_PLLFACTOR_MASK (0xc0) /* PLL multiplication factor */
@@ -283,16 +261,15 @@ struct max310x_devtype {
struct max310x_one {
struct uart_port port;
struct work_struct tx_work;
+ struct work_struct md_work;
};
struct max310x_port {
struct uart_driver uart;
struct max310x_devtype *devtype;
struct regmap *regmap;
- struct regmap_config regcfg;
struct mutex mutex;
- struct max310x_pdata *pdata;
- int gpio_used;
+ struct clk *clk;
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio;
#endif
@@ -504,25 +481,33 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
return false;
}
-static void max310x_set_baud(struct uart_port *port, int baud)
+static int max310x_set_baud(struct uart_port *port, int baud)
{
- unsigned int mode = 0, div = port->uartclk / baud;
+ unsigned int mode = 0, clk = port->uartclk, div = clk / baud;
- if (!(div / 16)) {
+ /* Check for minimal value for divider */
+ if (div < 16)
+ div = 16;
+
+ if (clk % baud && (div / 16) < 0x8000) {
/* Mode x2 */
mode = MAX310X_BRGCFG_2XMODE_BIT;
- div = (port->uartclk * 2) / baud;
- }
-
- if (!(div / 16)) {
- /* Mode x4 */
- mode = MAX310X_BRGCFG_4XMODE_BIT;
- div = (port->uartclk * 4) / baud;
+ clk = port->uartclk * 2;
+ div = clk / baud;
+
+ if (clk % baud && (div / 16) < 0x8000) {
+ /* Mode x4 */
+ mode = MAX310X_BRGCFG_4XMODE_BIT;
+ clk = port->uartclk * 4;
+ div = clk / baud;
+ }
}
max310x_port_write(port, MAX310X_BRGDIVMSB_REG, (div / 16) >> 8);
max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div / 16);
max310x_port_write(port, MAX310X_BRGCFG_REG, (div % 16) | mode);
+
+ return DIV_ROUND_CLOSEST(clk, div);
}
static int max310x_update_best_err(unsigned long f, long *besterr)
@@ -538,18 +523,19 @@ static int max310x_update_best_err(unsigned long f, long *besterr)
return 1;
}
-static int max310x_set_ref_clk(struct max310x_port *s)
+static int max310x_set_ref_clk(struct max310x_port *s, unsigned long freq,
+ bool xtal)
{
unsigned int div, clksrc, pllcfg = 0;
long besterr = -1;
- unsigned long fdiv, fmul, bestfreq = s->pdata->frequency;
+ unsigned long fdiv, fmul, bestfreq = freq;
/* First, update error without PLL */
- max310x_update_best_err(s->pdata->frequency, &besterr);
+ max310x_update_best_err(freq, &besterr);
/* Try all possible PLL dividers */
for (div = 1; (div <= 63) && besterr; div++) {
- fdiv = DIV_ROUND_CLOSEST(s->pdata->frequency, div);
+ fdiv = DIV_ROUND_CLOSEST(freq, div);
/* Try multiplier 6 */
fmul = fdiv * 6;
@@ -582,10 +568,7 @@ static int max310x_set_ref_clk(struct max310x_port *s)
}
/* Configure clock source */
- if (s->pdata->driver_flags & MAX310X_EXT_CLK)
- clksrc = MAX310X_CLKSRC_EXTCLK_BIT;
- else
- clksrc = MAX310X_CLKSRC_CRYST_BIT;
+ clksrc = xtal ? MAX310X_CLKSRC_CRYST_BIT : MAX310X_CLKSRC_EXTCLK_BIT;
/* Configure PLL */
if (pllcfg) {
@@ -597,7 +580,7 @@ static int max310x_set_ref_clk(struct max310x_port *s)
regmap_write(s->regmap, MAX310X_CLKSRC_REG, clksrc);
/* Wait for crystal */
- if (pllcfg && !(s->pdata->driver_flags & MAX310X_EXT_CLK))
+ if (pllcfg && xtal)
msleep(10);
return (int)bestfreq;
@@ -782,11 +765,21 @@ static unsigned int max310x_get_mctrl(struct uart_port *port)
return TIOCM_DSR | TIOCM_CAR;
}
+static void max310x_md_proc(struct work_struct *ws)
+{
+ struct max310x_one *one = container_of(ws, struct max310x_one, md_work);
+
+ max310x_port_update(&one->port, MAX310X_MODE2_REG,
+ MAX310X_MODE2_LOOPBACK_BIT,
+ (one->port.mctrl & TIOCM_LOOP) ?
+ MAX310X_MODE2_LOOPBACK_BIT : 0);
+}
+
static void max310x_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- /* DCD and DSR are not wired and CTS/RTS is hadnled automatically
- * so do nothing
- */
+ struct max310x_one *one = container_of(port, struct max310x_one, port);
+
+ schedule_work(&one->md_work);
}
static void max310x_break_ctl(struct uart_port *port, int break_state)
@@ -875,40 +868,76 @@ static void max310x_set_termios(struct uart_port *port,
port->uartclk / 4);
/* Setup baudrate generator */
- max310x_set_baud(port, baud);
+ baud = max310x_set_baud(port, baud);
/* Update timeout according to new baud rate */
uart_update_timeout(port, termios->c_cflag, baud);
}
+static int max310x_ioctl(struct uart_port *port, unsigned int cmd,
+ unsigned long arg)
+{
+#if defined(TIOCSRS485) && defined(TIOCGRS485)
+ struct serial_rs485 rs485;
+ unsigned int val;
+
+ switch (cmd) {
+ case TIOCSRS485:
+ if (copy_from_user(&rs485, (void __user *)arg, sizeof(rs485)))
+ return -EFAULT;
+ if (rs485.delay_rts_before_send > 0x0f ||
+ rs485.delay_rts_after_send > 0x0f)
+ return -ERANGE;
+ val = (rs485.delay_rts_before_send << 4) |
+ rs485.delay_rts_after_send;
+ max310x_port_write(port, MAX310X_HDPIXDELAY_REG, val);
+ if (rs485.flags & SER_RS485_ENABLED) {
+ max310x_port_update(port, MAX310X_MODE1_REG,
+ MAX310X_MODE1_TRNSCVCTRL_BIT,
+ MAX310X_MODE1_TRNSCVCTRL_BIT);
+ max310x_port_update(port, MAX310X_MODE2_REG,
+ MAX310X_MODE2_ECHOSUPR_BIT,
+ MAX310X_MODE2_ECHOSUPR_BIT);
+ } else {
+ max310x_port_update(port, MAX310X_MODE1_REG,
+ MAX310X_MODE1_TRNSCVCTRL_BIT, 0);
+ max310x_port_update(port, MAX310X_MODE2_REG,
+ MAX310X_MODE2_ECHOSUPR_BIT, 0);
+ }
+ return 0;
+ case TIOCGRS485:
+ memset(&rs485, 0, sizeof(rs485));
+ val = max310x_port_read(port, MAX310X_MODE1_REG);
+ rs485.flags = (val & MAX310X_MODE1_TRNSCVCTRL_BIT) ?
+ SER_RS485_ENABLED : 0;
+ rs485.flags |= SER_RS485_RTS_ON_SEND;
+ val = max310x_port_read(port, MAX310X_HDPIXDELAY_REG);
+ rs485.delay_rts_before_send = val >> 4;
+ rs485.delay_rts_after_send = val & 0x0f;
+ if (copy_to_user((void __user *)arg, &rs485, sizeof(rs485)))
+ return -EFAULT;
+ return 0;
+ default:
+ break;
+ }
+#endif
+
+ return -ENOIOCTLCMD;
+}
+
static int max310x_startup(struct uart_port *port)
{
- unsigned int val, line = port->line;
struct max310x_port *s = dev_get_drvdata(port->dev);
+ unsigned int val;
s->devtype->power(port, 1);
- /* Configure baud rate, 9600 as default */
- max310x_set_baud(port, 9600);
-
- /* Configure LCR register, 8N1 mode by default */
- max310x_port_write(port, MAX310X_LCR_REG, MAX310X_LCR_WORD_LEN_8);
-
/* Configure MODE1 register */
max310x_port_update(port, MAX310X_MODE1_REG,
- MAX310X_MODE1_TRNSCVCTRL_BIT,
- (s->pdata->uart_flags[line] & MAX310X_AUTO_DIR_CTRL)
- ? MAX310X_MODE1_TRNSCVCTRL_BIT : 0);
-
- /* Configure MODE2 register */
- val = MAX310X_MODE2_RXEMPTINV_BIT;
- if (s->pdata->uart_flags[line] & MAX310X_LOOPBACK)
- val |= MAX310X_MODE2_LOOPBACK_BIT;
- if (s->pdata->uart_flags[line] & MAX310X_ECHO_SUPRESS)
- val |= MAX310X_MODE2_ECHOSUPR_BIT;
-
- /* Reset FIFOs */
- val |= MAX310X_MODE2_FIFORST_BIT;
+ MAX310X_MODE1_TRNSCVCTRL_BIT, 0);
+
+ /* Configure MODE2 register & Reset FIFOs*/
+ val = MAX310X_MODE2_RXEMPTINV_BIT | MAX310X_MODE2_FIFORST_BIT;
max310x_port_write(port, MAX310X_MODE2_REG, val);
max310x_port_update(port, MAX310X_MODE2_REG,
MAX310X_MODE2_FIFORST_BIT, 0);
@@ -989,6 +1018,7 @@ static const struct uart_ops max310x_ops = {
.release_port = max310x_null_void,
.config_port = max310x_config_port,
.verify_port = max310x_verify_port,
+ .ioctl = max310x_ioctl,
};
static int __maybe_unused max310x_suspend(struct device *dev)
@@ -1017,6 +1047,8 @@ static int __maybe_unused max310x_resume(struct device *dev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(max310x_pm_ops, max310x_suspend, max310x_resume);
+
#ifdef CONFIG_GPIOLIB
static int max310x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
@@ -1063,23 +1095,16 @@ static int max310x_gpio_direction_output(struct gpio_chip *chip,
}
#endif
-static int max310x_probe(struct device *dev, int is_spi,
- struct max310x_devtype *devtype, int irq)
+static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
+ struct regmap *regmap, int irq, unsigned long flags)
{
+ int i, ret, fmin, fmax, freq, uartclk;
+ struct clk *clk_osc, *clk_xtal;
struct max310x_port *s;
- struct max310x_pdata *pdata = dev_get_platdata(dev);
- int i, ret, uartclk;
-
- /* Check for IRQ */
- if (irq <= 0) {
- dev_err(dev, "No IRQ specified\n");
- return -ENOTSUPP;
- }
+ bool xtal = false;
- if (!pdata) {
- dev_err(dev, "No platform data supplied\n");
- return -EINVAL;
- }
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
/* Alloc port structure */
s = devm_kzalloc(dev, sizeof(*s) +
@@ -1089,52 +1114,44 @@ static int max310x_probe(struct device *dev, int is_spi,
return -ENOMEM;
}
- /* Check input frequency */
- if ((pdata->driver_flags & MAX310X_EXT_CLK) &&
- ((pdata->frequency < 500000) || (pdata->frequency > 35000000)))
- goto err_freq;
- /* Check frequency for quartz */
- if (!(pdata->driver_flags & MAX310X_EXT_CLK) &&
- ((pdata->frequency < 1000000) || (pdata->frequency > 4000000)))
- goto err_freq;
-
- s->pdata = pdata;
- s->devtype = devtype;
- dev_set_drvdata(dev, s);
-
- mutex_init(&s->mutex);
+ clk_osc = devm_clk_get(dev, "osc");
+ clk_xtal = devm_clk_get(dev, "xtal");
+ if (!IS_ERR(clk_osc)) {
+ s->clk = clk_osc;
+ fmin = 500000;
+ fmax = 35000000;
+ } else if (!IS_ERR(clk_xtal)) {
+ s->clk = clk_xtal;
+ fmin = 1000000;
+ fmax = 4000000;
+ xtal = true;
+ } else if (PTR_ERR(clk_osc) == -EPROBE_DEFER ||
+ PTR_ERR(clk_xtal) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else {
+ dev_err(dev, "Cannot get clock\n");
+ return -EINVAL;
+ }
- /* Setup regmap */
- s->regcfg.reg_bits = 8;
- s->regcfg.val_bits = 8;
- s->regcfg.read_flag_mask = 0x00;
- s->regcfg.write_flag_mask = 0x80;
- s->regcfg.cache_type = REGCACHE_RBTREE;
- s->regcfg.writeable_reg = max310x_reg_writeable;
- s->regcfg.volatile_reg = max310x_reg_volatile;
- s->regcfg.precious_reg = max310x_reg_precious;
- s->regcfg.max_register = devtype->nr * 0x20 - 1;
-
- if (IS_ENABLED(CONFIG_SPI_MASTER) && is_spi) {
- struct spi_device *spi = to_spi_device(dev);
-
- s->regmap = devm_regmap_init_spi(spi, &s->regcfg);
- } else
- return -ENOTSUPP;
+ ret = clk_prepare_enable(s->clk);
+ if (ret)
+ return ret;
- if (IS_ERR(s->regmap)) {
- dev_err(dev, "Failed to initialize register map\n");
- return PTR_ERR(s->regmap);
+ freq = clk_get_rate(s->clk);
+ /* Check frequency limits */
+ if (freq < fmin || freq > fmax) {
+ ret = -ERANGE;
+ goto out_clk;
}
- /* Board specific configure */
- if (s->pdata->init)
- s->pdata->init();
+ s->regmap = regmap;
+ s->devtype = devtype;
+ dev_set_drvdata(dev, s);
/* Check device to ensure we are talking to what we expect */
ret = devtype->detect(dev);
if (ret)
- return ret;
+ goto out_clk;
for (i = 0; i < devtype->nr; i++) {
unsigned int offs = i << 5;
@@ -1156,7 +1173,7 @@ static int max310x_probe(struct device *dev, int is_spi,
MAX310X_MODE1_AUTOSLEEP_BIT);
}
- uartclk = max310x_set_ref_clk(s);
+ uartclk = max310x_set_ref_clk(s, freq, xtal);
dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
/* Register UART driver */
@@ -1168,9 +1185,28 @@ static int max310x_probe(struct device *dev, int is_spi,
ret = uart_register_driver(&s->uart);
if (ret) {
dev_err(dev, "Registering UART driver failed\n");
- return ret;
+ goto out_clk;
}
+#ifdef CONFIG_GPIOLIB
+ /* Setup GPIO cotroller */
+ s->gpio.owner = THIS_MODULE;
+ s->gpio.dev = dev;
+ s->gpio.label = dev_name(dev);
+ s->gpio.direction_input = max310x_gpio_direction_input;
+ s->gpio.get = max310x_gpio_get;
+ s->gpio.direction_output= max310x_gpio_direction_output;
+ s->gpio.set = max310x_gpio_set;
+ s->gpio.base = -1;
+ s->gpio.ngpio = devtype->nr * 4;
+ s->gpio.can_sleep = 1;
+ ret = gpiochip_add(&s->gpio);
+ if (ret)
+ goto out_uart;
+#endif
+
+ mutex_init(&s->mutex);
+
for (i = 0; i < devtype->nr; i++) {
/* Initialize port data */
s->p[i].port.line = i;
@@ -1178,8 +1214,7 @@ static int max310x_probe(struct device *dev, int is_spi,
s->p[i].port.irq = irq;
s->p[i].port.type = PORT_MAX310X;
s->p[i].port.fifosize = MAX310X_FIFO_SIZE;
- s->p[i].port.flags = UPF_SKIP_TEST | UPF_FIXED_TYPE |
- UPF_LOW_LATENCY;
+ s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.iobase = i * 0x20;
s->p[i].port.membase = (void __iomem *)~0;
@@ -1195,48 +1230,35 @@ static int max310x_probe(struct device *dev, int is_spi,
MAX310X_MODE1_IRQSEL_BIT);
/* Initialize queue for start TX */
INIT_WORK(&s->p[i].tx_work, max310x_wq_proc);
+ /* Initialize queue for changing mode */
+ INIT_WORK(&s->p[i].md_work, max310x_md_proc);
/* Register port */
uart_add_one_port(&s->uart, &s->p[i].port);
/* Go to suspend mode */
devtype->power(&s->p[i].port, 0);
}
-#ifdef CONFIG_GPIOLIB
- /* Setup GPIO cotroller */
- if (s->pdata->gpio_base) {
- s->gpio.owner = THIS_MODULE;
- s->gpio.dev = dev;
- s->gpio.label = dev_name(dev);
- s->gpio.direction_input = max310x_gpio_direction_input;
- s->gpio.get = max310x_gpio_get;
- s->gpio.direction_output= max310x_gpio_direction_output;
- s->gpio.set = max310x_gpio_set;
- s->gpio.base = s->pdata->gpio_base;
- s->gpio.ngpio = devtype->nr * 4;
- s->gpio.can_sleep = 1;
- if (!gpiochip_add(&s->gpio))
- s->gpio_used = 1;
- } else
- dev_info(dev, "GPIO support not enabled\n");
-#endif
-
/* Setup interrupt */
ret = devm_request_threaded_irq(dev, irq, NULL, max310x_ist,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(dev), s);
- if (ret) {
- dev_err(dev, "Unable to reguest IRQ %i\n", irq);
+ IRQF_ONESHOT | flags, dev_name(dev), s);
+ if (!ret)
+ return 0;
+
+ dev_err(dev, "Unable to reguest IRQ %i\n", irq);
+
+ mutex_destroy(&s->mutex);
+
#ifdef CONFIG_GPIOLIB
- if (s->gpio_used)
- WARN_ON(gpiochip_remove(&s->gpio));
+ WARN_ON(gpiochip_remove(&s->gpio));
+
+out_uart:
#endif
- }
+ uart_unregister_driver(&s->uart);
- return ret;
+out_clk:
+ clk_disable_unprepare(s->clk);
-err_freq:
- dev_err(dev, "Frequency parameter incorrect\n");
- return -EINVAL;
+ return ret;
}
static int max310x_remove(struct device *dev)
@@ -1244,30 +1266,51 @@ static int max310x_remove(struct device *dev)
struct max310x_port *s = dev_get_drvdata(dev);
int i, ret = 0;
+#ifdef CONFIG_GPIOLIB
+ ret = gpiochip_remove(&s->gpio);
+ if (ret)
+ return ret;
+#endif
+
for (i = 0; i < s->uart.nr; i++) {
cancel_work_sync(&s->p[i].tx_work);
+ cancel_work_sync(&s->p[i].md_work);
uart_remove_one_port(&s->uart, &s->p[i].port);
s->devtype->power(&s->p[i].port, 0);
}
+ mutex_destroy(&s->mutex);
uart_unregister_driver(&s->uart);
-
-#ifdef CONFIG_GPIOLIB
- if (s->gpio_used)
- ret = gpiochip_remove(&s->gpio);
-#endif
-
- if (s->pdata->exit)
- s->pdata->exit();
+ clk_disable_unprepare(s->clk);
return ret;
}
+static const struct of_device_id __maybe_unused max310x_dt_ids[] = {
+ { .compatible = "maxim,max3107", .data = &max3107_devtype, },
+ { .compatible = "maxim,max3108", .data = &max3108_devtype, },
+ { .compatible = "maxim,max3109", .data = &max3109_devtype, },
+ { .compatible = "maxim,max14830", .data = &max14830_devtype },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max310x_dt_ids);
+
+static struct regmap_config regcfg = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .write_flag_mask = 0x80,
+ .cache_type = REGCACHE_RBTREE,
+ .writeable_reg = max310x_reg_writeable,
+ .volatile_reg = max310x_reg_volatile,
+ .precious_reg = max310x_reg_precious,
+};
+
#ifdef CONFIG_SPI_MASTER
static int max310x_spi_probe(struct spi_device *spi)
{
- struct max310x_devtype *devtype =
- (struct max310x_devtype *)spi_get_device_id(spi)->driver_data;
+ struct max310x_devtype *devtype;
+ unsigned long flags = 0;
+ struct regmap *regmap;
int ret;
/* Setup SPI bus */
@@ -1275,12 +1318,25 @@ static int max310x_spi_probe(struct spi_device *spi)
spi->mode = spi->mode ? : SPI_MODE_0;
spi->max_speed_hz = spi->max_speed_hz ? : 26000000;
ret = spi_setup(spi);
- if (ret) {
- dev_err(&spi->dev, "SPI setup failed\n");
+ if (ret)
return ret;
+
+ if (spi->dev.of_node) {
+ const struct of_device_id *of_id =
+ of_match_device(max310x_dt_ids, &spi->dev);
+
+ devtype = (struct max310x_devtype *)of_id->data;
+ } else {
+ const struct spi_device_id *id_entry = spi_get_device_id(spi);
+
+ devtype = (struct max310x_devtype *)id_entry->driver_data;
+ flags = IRQF_TRIGGER_FALLING;
}
- return max310x_probe(&spi->dev, 1, devtype, spi->irq);
+ regcfg.max_register = devtype->nr * 0x20 - 1;
+ regmap = devm_regmap_init_spi(spi, &regcfg);
+
+ return max310x_probe(&spi->dev, devtype, regmap, spi->irq, flags);
}
static int max310x_spi_remove(struct spi_device *spi)
@@ -1288,8 +1344,6 @@ static int max310x_spi_remove(struct spi_device *spi)
return max310x_remove(&spi->dev);
}
-static SIMPLE_DEV_PM_OPS(max310x_pm_ops, max310x_suspend, max310x_resume);
-
static const struct spi_device_id max310x_id_table[] = {
{ "max3107", (kernel_ulong_t)&max3107_devtype, },
{ "max3108", (kernel_ulong_t)&max3108_devtype, },
@@ -1301,9 +1355,10 @@ MODULE_DEVICE_TABLE(spi, max310x_id_table);
static struct spi_driver max310x_uart_driver = {
.driver = {
- .name = MAX310X_NAME,
- .owner = THIS_MODULE,
- .pm = &max310x_pm_ops,
+ .name = MAX310X_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(max310x_dt_ids),
+ .pm = &max310x_pm_ops,
},
.probe = max310x_spi_probe,
.remove = max310x_spi_remove,
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index b5d779cd3c2b..053b98eb46c8 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -39,6 +39,13 @@
#include "msm_serial.h"
+enum {
+ UARTDM_1P1 = 1,
+ UARTDM_1P2,
+ UARTDM_1P3,
+ UARTDM_1P4,
+};
+
struct msm_port {
struct uart_port uart;
char name[16];
@@ -309,6 +316,8 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
static void msm_reset(struct uart_port *port)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
/* reset everything */
msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
@@ -316,6 +325,10 @@ static void msm_reset(struct uart_port *port)
msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+
+ /* Disable DM modes */
+ if (msm_port->is_uartdm)
+ msm_write(port, 0, UARTDM_DMEN);
}
static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -711,6 +724,117 @@ static void msm_power(struct uart_port *port, unsigned int state,
}
}
+#ifdef CONFIG_CONSOLE_POLL
+static int msm_poll_init(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ /* Enable single character mode on RX FIFO */
+ if (msm_port->is_uartdm >= UARTDM_1P4)
+ msm_write(port, UARTDM_DMEN_RX_SC_ENABLE, UARTDM_DMEN);
+
+ return 0;
+}
+
+static int msm_poll_get_char_single(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : UART_RF;
+
+ if (!(msm_read(port, UART_SR) & UART_SR_RX_READY))
+ return NO_POLL_CHAR;
+ else
+ return msm_read(port, rf_reg) & 0xff;
+}
+
+static int msm_poll_get_char_dm_1p3(struct uart_port *port)
+{
+ int c;
+ static u32 slop;
+ static int count;
+ unsigned char *sp = (unsigned char *)&slop;
+
+ /* Check if a previous read had more than one char */
+ if (count) {
+ c = sp[sizeof(slop) - count];
+ count--;
+ /* Or if FIFO is empty */
+ } else if (!(msm_read(port, UART_SR) & UART_SR_RX_READY)) {
+ /*
+ * If RX packing buffer has less than a word, force stale to
+ * push contents into RX FIFO
+ */
+ count = msm_read(port, UARTDM_RXFS);
+ count = (count >> UARTDM_RXFS_BUF_SHIFT) & UARTDM_RXFS_BUF_MASK;
+ if (count) {
+ msm_write(port, UART_CR_CMD_FORCE_STALE, UART_CR);
+ slop = msm_read(port, UARTDM_RF);
+ c = sp[0];
+ count--;
+ } else {
+ c = NO_POLL_CHAR;
+ }
+ /* FIFO has a word */
+ } else {
+ slop = msm_read(port, UARTDM_RF);
+ c = sp[0];
+ count = sizeof(slop) - 1;
+ }
+
+ return c;
+}
+
+static int msm_poll_get_char(struct uart_port *port)
+{
+ u32 imr;
+ int c;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ /* Disable all interrupts */
+ imr = msm_read(port, UART_IMR);
+ msm_write(port, 0, UART_IMR);
+
+ if (msm_port->is_uartdm == UARTDM_1P3)
+ c = msm_poll_get_char_dm_1p3(port);
+ else
+ c = msm_poll_get_char_single(port);
+
+ /* Enable interrupts */
+ msm_write(port, imr, UART_IMR);
+
+ return c;
+}
+
+static void msm_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ u32 imr;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ /* Disable all interrupts */
+ imr = msm_read(port, UART_IMR);
+ msm_write(port, 0, UART_IMR);
+
+ if (msm_port->is_uartdm)
+ reset_dm_count(port, 1);
+
+ /* Wait until FIFO is empty */
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ cpu_relax();
+
+ /* Write a character */
+ msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+
+ /* Wait until FIFO is empty */
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ cpu_relax();
+
+ /* Enable interrupts */
+ msm_write(port, imr, UART_IMR);
+
+ return;
+}
+#endif
+
static struct uart_ops msm_uart_pops = {
.tx_empty = msm_tx_empty,
.set_mctrl = msm_set_mctrl,
@@ -729,6 +853,11 @@ static struct uart_ops msm_uart_pops = {
.config_port = msm_config_port,
.verify_port = msm_verify_port,
.pm = msm_power,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_init = msm_poll_init,
+ .poll_get_char = msm_poll_get_char,
+ .poll_put_char = msm_poll_put_char,
+#endif
};
static struct msm_port msm_uart_ports[] = {
@@ -900,7 +1029,10 @@ static struct uart_driver msm_uart_driver = {
static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
static const struct of_device_id msm_uartdm_table[] = {
- { .compatible = "qcom,msm-uartdm" },
+ { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
+ { .compatible = "qcom,msm-uartdm-v1.2", .data = (void *)UARTDM_1P2 },
+ { .compatible = "qcom,msm-uartdm-v1.3", .data = (void *)UARTDM_1P3 },
+ { .compatible = "qcom,msm-uartdm-v1.4", .data = (void *)UARTDM_1P4 },
{ }
};
@@ -909,6 +1041,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
struct msm_port *msm_port;
struct resource *resource;
struct uart_port *port;
+ const struct of_device_id *id;
int irq;
if (pdev->id == -1)
@@ -923,8 +1056,9 @@ static int __init msm_serial_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
msm_port = UART_TO_MSM(port);
- if (of_match_device(msm_uartdm_table, &pdev->dev))
- msm_port->is_uartdm = 1;
+ id = of_match_device(msm_uartdm_table, &pdev->dev);
+ if (id)
+ msm_port->is_uartdm = (unsigned long)id->data;
else
msm_port->is_uartdm = 0;
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index 469fda50ac63..1e9b68b6f9eb 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -59,6 +59,7 @@
#define UART_CR_CMD_RESET_RFR (14 << 4)
#define UART_CR_CMD_PROTECTION_EN (16 << 4)
#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define UART_CR_CMD_FORCE_STALE (4 << 8)
#define UART_CR_CMD_RESET_TX_READY (3 << 8)
#define UART_CR_TX_DISABLE (1 << 3)
#define UART_CR_TX_ENABLE (1 << 2)
@@ -113,6 +114,14 @@
#define GSBI_PROTOCOL_UART 0x40
#define GSBI_PROTOCOL_IDLE 0x0
+#define UARTDM_RXFS 0x50
+#define UARTDM_RXFS_BUF_SHIFT 0x7
+#define UARTDM_RXFS_BUF_MASK 0x7
+
+#define UARTDM_DMEN 0x3C
+#define UARTDM_DMEN_RX_SC_ENABLE BIT(5)
+#define UARTDM_DMEN_TX_SC_ENABLE BIT(4)
+
#define UARTDM_DMRX 0x34
#define UARTDM_NCF_TX 0x40
#define UARTDM_RX_TOTAL_SNAP 0x38
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 77f035158d6c..dd8b1a5458ff 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -342,7 +342,14 @@ static void serial_omap_stop_tx(struct uart_port *port)
if ((up->rs485.flags & SER_RS485_ENABLED) &&
!(up->rs485.flags & SER_RS485_RX_DURING_TX)) {
- up->ier = UART_IER_RLSI | UART_IER_RDI;
+ /*
+ * Empty the RX FIFO, we are not interested in anything
+ * received during the half-duplex transmission.
+ */
+ serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_RCVR);
+ /* Re-enable RX interrupts */
+ up->ier |= UART_IER_RLSI | UART_IER_RDI;
+ up->port.read_status_mask |= UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
}
@@ -355,7 +362,7 @@ static void serial_omap_stop_rx(struct uart_port *port)
struct uart_omap_port *up = to_uart_omap_port(port);
pm_runtime_get_sync(up->dev);
- up->ier &= ~UART_IER_RLSI;
+ up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
pm_runtime_mark_last_busy(up->dev);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 8fa1134e0051..0931b3fe9edf 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1762,7 +1762,9 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
int fifosize;
int port_type;
struct pch_uart_driver_data *board;
+#ifdef CONFIG_DEBUG_FS
char name[32]; /* for debugfs file name */
+#endif
board = &drv_dat[id->driver_data];
port_type = board->port_type;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 9cd706df3b33..23f459600738 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1282,6 +1282,14 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
if (ret < 0)
goto probe_err;
+ if (!s3c24xx_uart_drv.state) {
+ ret = uart_register_driver(&s3c24xx_uart_drv);
+ if (ret < 0) {
+ pr_err("Failed to register Samsung UART driver\n");
+ return ret;
+ }
+ }
+
dbg("%s: adding port\n", __func__);
uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
platform_set_drvdata(pdev, &ourport->port);
@@ -1321,6 +1329,8 @@ static int s3c24xx_serial_remove(struct platform_device *dev)
uart_remove_one_port(&s3c24xx_uart_drv, port);
}
+ uart_unregister_driver(&s3c24xx_uart_drv);
+
return 0;
}
@@ -1820,35 +1830,7 @@ static struct platform_driver samsung_serial_driver = {
},
};
-/* module initialisation code */
-
-static int __init s3c24xx_serial_modinit(void)
-{
- int ret;
-
- ret = uart_register_driver(&s3c24xx_uart_drv);
- if (ret < 0) {
- pr_err("Failed to register Samsung UART driver\n");
- return ret;
- }
-
- ret = platform_driver_register(&samsung_serial_driver);
- if (ret < 0) {
- pr_err("Failed to register platform driver\n");
- uart_unregister_driver(&s3c24xx_uart_drv);
- }
-
- return ret;
-}
-
-static void __exit s3c24xx_serial_modexit(void)
-{
- platform_driver_unregister(&samsung_serial_driver);
- uart_unregister_driver(&s3c24xx_uart_drv);
-}
-
-module_init(s3c24xx_serial_modinit);
-module_exit(s3c24xx_serial_modexit);
+module_platform_driver(samsung_serial_driver);
MODULE_ALIAS("platform:samsung-uart");
MODULE_DESCRIPTION("Samsung SoC Serial port driver");
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index ece2049bd270..2cf5649a6dc0 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1319,9 +1319,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
uport = state->uart_port;
port = &state->port;
- pr_debug("uart_close(%d) called\n", uport->line);
+ pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
- if (tty_port_close_start(port, tty, filp) == 0)
+ if (!port->count || tty_port_close_start(port, tty, filp) == 0)
return;
/*
@@ -1762,7 +1762,7 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
}
/**
- * uart_parse_options - Parse serial port baud/parity/bits/flow contro.
+ * uart_parse_options - Parse serial port baud/parity/bits/flow control.
* @options: pointer to option string
* @baud: pointer to an 'int' variable for the baud rate.
* @parity: pointer to an 'int' variable for the parity.
@@ -2609,7 +2609,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
/*
* Register the port whether it's detected or not. This allows
- * setserial to be used to alter this ports parameters.
+ * setserial to be used to alter this port's parameters.
*/
tty_dev = tty_port_register_device_attr(port, drv->tty_driver,
uport->line, uport->dev, port, tty_dev_attr_groups);
@@ -2645,6 +2645,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
+ struct tty_struct *tty;
int ret = 0;
BUG_ON(in_interrupt());
@@ -2673,8 +2674,17 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
*/
tty_unregister_device(drv->tty_driver, uport->line);
- if (port->tty)
+ tty = tty_port_tty_get(port);
+ if (tty) {
tty_vhangup(port->tty);
+ tty_kref_put(tty);
+ }
+
+ /*
+ * If the port is used as a console, unregister it
+ */
+ if (uart_console(uport))
+ unregister_console(uport->cons);
/*
* Free the port IO and memory resources, if any.
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index be33d2b0613b..88236da0ddf7 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -428,7 +428,7 @@ static int sci_probe_regmap(struct plat_sci_port *cfg)
cfg->regtype = SCIx_HSCIF_REGTYPE;
break;
default:
- printk(KERN_ERR "Can't probe register map for given port\n");
+ pr_err("Can't probe register map for given port\n");
return -EINVAL;
}
@@ -788,7 +788,7 @@ static int sci_handle_errors(struct uart_port *port)
if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
copied++;
- dev_notice(port->dev, "overrun error");
+ dev_notice(port->dev, "overrun error\n");
}
if (status & SCxSR_FER(port)) {
@@ -830,7 +830,7 @@ static int sci_handle_errors(struct uart_port *port)
if (tty_insert_flip_char(tport, 0, TTY_PARITY))
copied++;
- dev_notice(port->dev, "parity error");
+ dev_notice(port->dev, "parity error\n");
}
if (copied)
@@ -911,7 +911,7 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
/* Disable future Rx interrupts */
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
disable_irq_nosync(irq);
- scr |= 0x4000;
+ scr |= SCSCR_RDRQE;
} else {
scr &= ~SCSCR_RIE;
}
@@ -1041,8 +1041,7 @@ static int sci_notifier(struct notifier_block *self,
sci_port = container_of(self, struct sci_port, freq_transition);
- if ((phase == CPUFREQ_POSTCHANGE) ||
- (phase == CPUFREQ_RESUMECHANGE)) {
+ if (phase == CPUFREQ_POSTCHANGE) {
struct uart_port *port = &sci_port->port;
spin_lock_irqsave(&port->lock, flags);
@@ -1200,7 +1199,9 @@ static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
*/
reg = sci_getreg(port, SCFCR);
if (reg->size)
- serial_port_out(port, SCFCR, serial_port_in(port, SCFCR) | 1);
+ serial_port_out(port, SCFCR,
+ serial_port_in(port, SCFCR) |
+ SCFCR_LOOP);
}
}
@@ -1290,7 +1291,8 @@ static void sci_dma_rx_complete(void *arg)
unsigned long flags;
int count;
- dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
+ dev_dbg(port->dev, "%s(%d) active #%d\n",
+ __func__, port->line, s->active_rx);
spin_lock_irqsave(&port->lock, flags);
@@ -1366,8 +1368,8 @@ static void sci_submit_rx(struct sci_port *s)
sci_rx_dma_release(s, true);
return;
}
- dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
- s->cookie_rx[i], i);
+ dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n",
+ __func__, s->cookie_rx[i], i);
}
s->active_rx = s->cookie_rx[0];
@@ -1426,8 +1428,8 @@ static void work_fn_rx(struct work_struct *work)
s->active_rx = s->cookie_rx[!new];
- dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
- s->cookie_rx[new], new, s->active_rx);
+ dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n",
+ __func__, s->cookie_rx[new], new, s->active_rx);
}
static void work_fn_tx(struct work_struct *work)
@@ -1480,8 +1482,8 @@ static void work_fn_tx(struct work_struct *work)
return;
}
- dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__,
- xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
+ dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
+ __func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
dma_async_issue_pending(chan);
}
@@ -1496,9 +1498,9 @@ static void sci_start_tx(struct uart_port *port)
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
u16 new, scr = serial_port_in(port, SCSCR);
if (s->chan_tx)
- new = scr | 0x8000;
+ new = scr | SCSCR_TDRQE;
else
- new = scr & ~0x8000;
+ new = scr & ~SCSCR_TDRQE;
if (new != scr)
serial_port_out(port, SCSCR, new);
}
@@ -1525,7 +1527,7 @@ static void sci_stop_tx(struct uart_port *port)
ctrl = serial_port_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- ctrl &= ~0x8000;
+ ctrl &= ~SCSCR_TDRQE;
ctrl &= ~SCSCR_TIE;
@@ -1539,7 +1541,7 @@ static void sci_start_rx(struct uart_port *port)
ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- ctrl &= ~0x4000;
+ ctrl &= ~SCSCR_RDRQE;
serial_port_out(port, SCSCR, ctrl);
}
@@ -1551,7 +1553,7 @@ static void sci_stop_rx(struct uart_port *port)
ctrl = serial_port_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- ctrl &= ~0x4000;
+ ctrl &= ~SCSCR_RDRQE;
ctrl &= ~port_rx_irq_mask(port);
@@ -1600,8 +1602,8 @@ static bool filter(struct dma_chan *chan, void *slave)
{
struct sh_dmae_slave *param = slave;
- dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
- param->shdma_slave.slave_id);
+ dev_dbg(chan->device->dev, "%s: slave ID %d\n",
+ __func__, param->shdma_slave.slave_id);
chan->private = &param->shdma_slave;
return true;
@@ -1614,7 +1616,7 @@ static void rx_timer_fn(unsigned long arg)
u16 scr = serial_port_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
- scr &= ~0x4000;
+ scr &= ~SCSCR_RDRQE;
enable_irq(s->irqs[SCIx_RXI_IRQ]);
}
serial_port_out(port, SCSCR, scr | SCSCR_RIE);
@@ -1630,8 +1632,7 @@ static void sci_request_dma(struct uart_port *port)
dma_cap_mask_t mask;
int nent;
- dev_dbg(port->dev, "%s: port %d\n", __func__,
- port->line);
+ dev_dbg(port->dev, "%s: port %d\n", __func__, port->line);
if (s->cfg->dma_slave_tx <= 0 || s->cfg->dma_slave_rx <= 0)
return;
@@ -1659,7 +1660,8 @@ static void sci_request_dma(struct uart_port *port)
if (!nent)
sci_tx_dma_release(s, false);
else
- dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
+ dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n",
+ __func__,
sg_dma_len(&s->sg_tx), port->state->xmit.buf,
&sg_dma_address(&s->sg_tx));
@@ -1871,13 +1873,13 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
smr_val = serial_port_in(port, SCSMR) & 3;
if ((termios->c_cflag & CSIZE) == CS7)
- smr_val |= 0x40;
+ smr_val |= SCSMR_CHR;
if (termios->c_cflag & PARENB)
- smr_val |= 0x20;
+ smr_val |= SCSMR_PE;
if (termios->c_cflag & PARODD)
- smr_val |= 0x30;
+ smr_val |= SCSMR_PE | SCSMR_ODD;
if (termios->c_cflag & CSTOPB)
- smr_val |= 0x08;
+ smr_val |= SCSMR_STOP;
uart_update_timeout(port, termios->c_cflag, baud);
@@ -1885,7 +1887,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
__func__, smr_val, cks, t, s->cfg->scscr);
if (t >= 0) {
- serial_port_out(port, SCSMR, (smr_val & ~3) | cks);
+ serial_port_out(port, SCSMR, (smr_val & ~SCSMR_CKS) | cks);
serial_port_out(port, SCBRR, t);
reg = sci_getreg(port, HSSRR);
if (reg->size)
@@ -1933,8 +1935,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
if (s->chan_rx) {
s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
port->fifosize / 2;
- dev_dbg(port->dev,
- "DMA Rx t-out %ums, tty t-out %u jiffies\n",
+ dev_dbg(port->dev, "DMA Rx t-out %ums, tty t-out %u jiffies\n",
s->rx_timeout * 1000 / HZ, port->timeout);
if (s->rx_timeout < msecs_to_jiffies(20))
s->rx_timeout = msecs_to_jiffies(20);
@@ -1953,7 +1954,7 @@ static void sci_pm(struct uart_port *port, unsigned int state,
struct sci_port *sci_port = to_sci_port(port);
switch (state) {
- case 3:
+ case UART_PM_STATE_OFF:
sci_port_disable(sci_port);
break;
default:
@@ -2018,7 +2019,7 @@ static int sci_remap_port(struct uart_port *port)
* need to do any remapping, just cast the cookie
* directly.
*/
- port->membase = (void __iomem *)port->mapbase;
+ port->membase = (void __iomem *)(uintptr_t)port->mapbase;
}
return 0;
@@ -2389,8 +2390,7 @@ static inline int sci_probe_earlyprintk(struct platform_device *pdev)
#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
-static char banner[] __initdata =
- KERN_INFO "SuperH (H)SCI(F) driver initialized\n";
+static const char banner[] __initconst = "SuperH (H)SCI(F) driver initialized";
static struct uart_driver sci_uart_driver = {
.owner = THIS_MODULE,
@@ -2424,25 +2424,25 @@ struct sci_port_info {
static const struct of_device_id of_sci_match[] = {
{
.compatible = "renesas,scif",
- .data = (void *)&(const struct sci_port_info) {
+ .data = &(const struct sci_port_info) {
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_REGTYPE,
},
}, {
.compatible = "renesas,scifa",
- .data = (void *)&(const struct sci_port_info) {
+ .data = &(const struct sci_port_info) {
.type = PORT_SCIFA,
.regtype = SCIx_SCIFA_REGTYPE,
},
}, {
.compatible = "renesas,scifb",
- .data = (void *)&(const struct sci_port_info) {
+ .data = &(const struct sci_port_info) {
.type = PORT_SCIFB,
.regtype = SCIx_SCIFB_REGTYPE,
},
}, {
.compatible = "renesas,hscif",
- .data = (void *)&(const struct sci_port_info) {
+ .data = &(const struct sci_port_info) {
.type = PORT_HSCIF,
.regtype = SCIx_HSCIF_REGTYPE,
},
@@ -2502,11 +2502,9 @@ static int sci_probe_single(struct platform_device *dev,
/* Sanity check */
if (unlikely(index >= SCI_NPORTS)) {
- dev_notice(&dev->dev, "Attempting to register port "
- "%d when only %d are available.\n",
+ dev_notice(&dev->dev, "Attempting to register port %d when only %d are available\n",
index+1, SCI_NPORTS);
- dev_notice(&dev->dev, "Consider bumping "
- "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
+ dev_notice(&dev->dev, "Consider bumping CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
return -EINVAL;
}
@@ -2564,6 +2562,7 @@ static int sci_probe(struct platform_device *dev)
ret = cpufreq_register_notifier(&sp->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
if (unlikely(ret < 0)) {
+ uart_remove_one_port(&sci_uart_driver, &sp->port);
sci_cleanup_single(sp);
return ret;
}
@@ -2615,7 +2614,7 @@ static int __init sci_init(void)
{
int ret;
- printk(banner);
+ pr_info("%s\n", banner);
ret = uart_register_driver(&sci_uart_driver);
if (likely(ret == 0)) {
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index b7bfe24d4ebc..68b0fd4b9a6a 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -24,7 +24,6 @@
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
-#include <linux/sirfsoc_dma.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
@@ -173,7 +172,7 @@ static void sirfsoc_uart_stop_tx(struct uart_port *port)
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
- if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
+ if (sirfport->tx_dma_chan) {
if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
dmaengine_pause(sirfport->tx_dma_chan);
sirfport->tx_dma_state = TX_DMA_PAUSE;
@@ -288,7 +287,7 @@ static void sirfsoc_uart_start_tx(struct uart_port *port)
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
- if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
+ if (sirfport->tx_dma_chan)
sirfsoc_uart_tx_with_dma(sirfport);
else {
sirfsoc_uart_pio_tx_chars(sirfport, 1);
@@ -310,7 +309,7 @@ static void sirfsoc_uart_stop_rx(struct uart_port *port)
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
- if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
+ if (sirfport->rx_dma_chan) {
if (!sirfport->is_marco)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
@@ -675,7 +674,7 @@ recv_char:
uart_handle_cts_change(port, cts_status);
wake_up_interruptible(&state->port.delta_msr_wait);
}
- if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
+ if (sirfport->rx_dma_chan) {
if (intr_status & uint_st->sirfsoc_rx_timeout)
sirfsoc_uart_handle_rx_tmo(sirfport);
if (intr_status & uint_st->sirfsoc_rx_done)
@@ -686,7 +685,7 @@ recv_char:
SIRFSOC_UART_IO_RX_MAX_CNT);
}
if (intr_status & uint_st->sirfsoc_txfifo_empty) {
- if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
+ if (sirfport->tx_dma_chan)
sirfsoc_uart_tx_with_dma(sirfport);
else {
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
@@ -778,7 +777,7 @@ static void sirfsoc_uart_start_rx(struct uart_port *port)
wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
- if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
+ if (sirfport->rx_dma_chan)
sirfsoc_uart_start_next_rx_dma(port);
else {
if (!sirfport->is_marco)
@@ -1014,11 +1013,11 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
(sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
SIRFSOC_USP_ASYNC_DIV2_OFFSET);
}
- if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
+ if (sirfport->tx_dma_chan)
wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
else
wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
- if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
+ if (sirfport->rx_dma_chan)
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
else
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
@@ -1049,93 +1048,6 @@ static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
clk_disable_unprepare(sirfport->clk);
}
-static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- dma_cap_mask_t dma_mask;
- struct dma_slave_config tx_slv_cfg = {
- .dst_maxburst = 2,
- };
-
- dma_cap_zero(dma_mask);
- dma_cap_set(DMA_SLAVE, dma_mask);
- sirfport->tx_dma_chan = dma_request_channel(dma_mask,
- (dma_filter_fn)sirfsoc_dma_filter_id,
- (void *)sirfport->tx_dma_no);
- if (!sirfport->tx_dma_chan) {
- dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
- sirfport->tx_dma_no);
- return -EPROBE_DEFER;
- }
- dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
-
- return 0;
-}
-
-static unsigned int sirfsoc_uart_init_rx_dma(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- dma_cap_mask_t dma_mask;
- int ret;
- int i, j;
- struct dma_slave_config slv_cfg = {
- .src_maxburst = 2,
- };
-
- dma_cap_zero(dma_mask);
- dma_cap_set(DMA_SLAVE, dma_mask);
- sirfport->rx_dma_chan = dma_request_channel(dma_mask,
- (dma_filter_fn)sirfsoc_dma_filter_id,
- (void *)sirfport->rx_dma_no);
- if (!sirfport->rx_dma_chan) {
- dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
- sirfport->rx_dma_no);
- ret = -EPROBE_DEFER;
- goto request_err;
- }
- for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
- sirfport->rx_dma_items[i].xmit.buf =
- dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
- &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
- if (!sirfport->rx_dma_items[i].xmit.buf) {
- dev_err(port->dev, "Uart alloc bufa failed\n");
- ret = -ENOMEM;
- goto alloc_coherent_err;
- }
- sirfport->rx_dma_items[i].xmit.head =
- sirfport->rx_dma_items[i].xmit.tail = 0;
- }
- dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
-
- return 0;
-alloc_coherent_err:
- for (j = 0; j < i; j++)
- dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
- sirfport->rx_dma_items[j].xmit.buf,
- sirfport->rx_dma_items[j].dma_addr);
- dma_release_channel(sirfport->rx_dma_chan);
-request_err:
- return ret;
-}
-
-static void sirfsoc_uart_uninit_tx_dma(struct sirfsoc_uart_port *sirfport)
-{
- dmaengine_terminate_all(sirfport->tx_dma_chan);
- dma_release_channel(sirfport->tx_dma_chan);
-}
-
-static void sirfsoc_uart_uninit_rx_dma(struct sirfsoc_uart_port *sirfport)
-{
- int i;
- struct uart_port *port = &sirfport->port;
- dmaengine_terminate_all(sirfport->rx_dma_chan);
- dma_release_channel(sirfport->rx_dma_chan);
- for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
- dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
- sirfport->rx_dma_items[i].xmit.buf,
- sirfport->rx_dma_items[i].dma_addr);
-}
-
static int sirfsoc_uart_startup(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
@@ -1174,18 +1086,12 @@ static int sirfsoc_uart_startup(struct uart_port *port)
wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
-
- if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
- ret = sirfsoc_uart_init_rx_dma(port);
- if (ret)
- goto init_rx_err;
+ if (sirfport->rx_dma_chan)
wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
- SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
- SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
- SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
- }
- if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
- sirfsoc_uart_init_tx_dma(port);
+ SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
+ SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
+ SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
+ if (sirfport->tx_dma_chan) {
sirfport->tx_dma_state = TX_DMA_IDLE;
wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
@@ -1232,12 +1138,8 @@ static void sirfsoc_uart_shutdown(struct uart_port *port)
gpio_set_value(sirfport->rts_gpio, 1);
free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
}
- if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
- sirfsoc_uart_uninit_rx_dma(sirfport);
- if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
- sirfsoc_uart_uninit_tx_dma(sirfport);
+ if (sirfport->tx_dma_chan)
sirfport->tx_dma_state = TX_DMA_IDLE;
- }
}
static const char *sirfsoc_uart_type(struct uart_port *port)
@@ -1313,8 +1215,8 @@ sirfsoc_uart_console_setup(struct console *co, char *options)
port->cons = co;
/* default console tx/rx transfer using io mode */
- sirfport->rx_dma_no = UNVALID_DMA_CHAN;
- sirfport->tx_dma_no = UNVALID_DMA_CHAN;
+ sirfport->rx_dma_chan = NULL;
+ sirfport->tx_dma_chan = NULL;
return uart_set_options(port, co, baud, parity, bits, flow);
}
@@ -1382,6 +1284,13 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
struct uart_port *port;
struct resource *res;
int ret;
+ int i, j;
+ struct dma_slave_config slv_cfg = {
+ .src_maxburst = 2,
+ };
+ struct dma_slave_config tx_slv_cfg = {
+ .dst_maxburst = 2,
+ };
const struct of_device_id *match;
match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
@@ -1402,27 +1311,10 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
"sirf,uart-has-rtscts");
- if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart")) {
+ if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart"))
sirfport->uart_reg->uart_type = SIRF_REAL_UART;
- if (of_property_read_u32(pdev->dev.of_node,
- "sirf,uart-dma-rx-channel",
- &sirfport->rx_dma_no))
- sirfport->rx_dma_no = UNVALID_DMA_CHAN;
- if (of_property_read_u32(pdev->dev.of_node,
- "sirf,uart-dma-tx-channel",
- &sirfport->tx_dma_no))
- sirfport->tx_dma_no = UNVALID_DMA_CHAN;
- }
if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) {
sirfport->uart_reg->uart_type = SIRF_USP_UART;
- if (of_property_read_u32(pdev->dev.of_node,
- "sirf,usp-dma-rx-channel",
- &sirfport->rx_dma_no))
- sirfport->rx_dma_no = UNVALID_DMA_CHAN;
- if (of_property_read_u32(pdev->dev.of_node,
- "sirf,usp-dma-tx-channel",
- &sirfport->tx_dma_no))
- sirfport->tx_dma_no = UNVALID_DMA_CHAN;
if (!sirfport->hw_flow_ctrl)
goto usp_no_flow_control;
if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
@@ -1515,8 +1407,32 @@ usp_no_flow_control:
goto port_err;
}
- return 0;
+ sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx");
+ for (i = 0; sirfport->rx_dma_chan && i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
+ sirfport->rx_dma_items[i].xmit.buf =
+ dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
+ &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
+ if (!sirfport->rx_dma_items[i].xmit.buf) {
+ dev_err(port->dev, "Uart alloc bufa failed\n");
+ ret = -ENOMEM;
+ goto alloc_coherent_err;
+ }
+ sirfport->rx_dma_items[i].xmit.head =
+ sirfport->rx_dma_items[i].xmit.tail = 0;
+ }
+ if (sirfport->rx_dma_chan)
+ dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
+ sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx");
+ if (sirfport->tx_dma_chan)
+ dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
+ return 0;
+alloc_coherent_err:
+ for (j = 0; j < i; j++)
+ dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
+ sirfport->rx_dma_items[j].xmit.buf,
+ sirfport->rx_dma_items[j].dma_addr);
+ dma_release_channel(sirfport->rx_dma_chan);
port_err:
clk_put(sirfport->clk);
err:
@@ -1529,6 +1445,19 @@ static int sirfsoc_uart_remove(struct platform_device *pdev)
struct uart_port *port = &sirfport->port;
clk_put(sirfport->clk);
uart_remove_one_port(&sirfsoc_uart_drv, port);
+ if (sirfport->rx_dma_chan) {
+ int i;
+ dmaengine_terminate_all(sirfport->rx_dma_chan);
+ dma_release_channel(sirfport->rx_dma_chan);
+ for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
+ dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
+ sirfport->rx_dma_items[i].xmit.buf,
+ sirfport->rx_dma_items[i].dma_addr);
+ }
+ if (sirfport->tx_dma_chan) {
+ dmaengine_terminate_all(sirfport->tx_dma_chan);
+ dma_release_channel(sirfport->tx_dma_chan);
+ }
return 0;
}
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index b7d679c0881b..8a6eddad2f3c 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -392,9 +392,6 @@ struct sirfsoc_uart_register sirfsoc_uart = {
/* Indicate how many buffers used */
#define SIRFSOC_RX_LOOP_BUF_CNT 2
-/* Indicate if DMA channel valid */
-#define IS_DMA_CHAN_VALID(x) ((x) != -1)
-#define UNVALID_DMA_CHAN -1
/* For Fast Baud Rate Calculation */
struct sirfsoc_baudrate_to_regv {
unsigned int baud_rate;
@@ -423,8 +420,6 @@ struct sirfsoc_uart_port {
/* for SiRFmarco, there are SET/CLR for UART_INT_EN */
bool is_marco;
struct sirfsoc_uart_register *uart_reg;
- int rx_dma_no;
- int tx_dma_no;
struct dma_chan *rx_dma_chan;
struct dma_chan *tx_dma_chan;
dma_addr_t tx_dma_addr;
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 5ae14b46cce0..d48e040cd8c5 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -7866,6 +7866,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+ memset(&new_line, 0, sizeof(new_line));
switch (flags){
case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 144202eef6fe..53ba8537de8d 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1766,6 +1766,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+ memset(&new_line, 0, sizeof(new_line));
switch (flags){
case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index b0e540137e39..90ca082935f6 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -65,6 +65,7 @@ static void tty_audit_log(const char *description, int major, int minor,
{
struct audit_buffer *ab;
struct task_struct *tsk = current;
+ pid_t pid = task_pid_nr(tsk);
uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
unsigned int sessionid = audit_get_sessionid(tsk);
@@ -74,7 +75,7 @@ static void tty_audit_log(const char *description, int major, int minor,
char name[sizeof(tsk->comm)];
audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
- " minor=%d comm=", description, tsk->pid, uid,
+ " minor=%d comm=", description, pid, uid,
loginuid, sessionid, major, minor);
get_task_comm(name, tsk);
audit_log_untrustedstring(ab, name);
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 765125dff20e..8ebd9f88a6f6 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -351,14 +351,11 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags);
* Takes any pending buffers and transfers their ownership to the
* ldisc side of the queue. It then schedules those characters for
* processing by the line discipline.
- * Note that this function can only be used when the low_latency flag
- * is unset. Otherwise the workqueue won't be flushed.
*/
void tty_schedule_flip(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- WARN_ON(port->low_latency);
buf->tail->commit = buf->tail->used;
schedule_work(&buf->work);
@@ -482,17 +479,15 @@ static void flush_to_ldisc(struct work_struct *work)
*/
void tty_flush_to_ldisc(struct tty_struct *tty)
{
- if (!tty->port->low_latency)
- flush_work(&tty->port->buf.work);
+ flush_work(&tty->port->buf.work);
}
/**
* tty_flip_buffer_push - terminal
* @port: tty port to push
*
- * Queue a push of the terminal flip buffers to the line discipline. This
- * function must not be called from IRQ context if port->low_latency is
- * set.
+ * Queue a push of the terminal flip buffers to the line discipline.
+ * Can be called from IRQ/atomic context.
*
* In the event of the queue being busy for flipping the work will be
* held off and retried later.
@@ -500,14 +495,7 @@ void tty_flush_to_ldisc(struct tty_struct *tty)
void tty_flip_buffer_push(struct tty_port *port)
{
- struct tty_bufhead *buf = &port->buf;
-
- buf->tail->commit = buf->tail->used;
-
- if (port->low_latency)
- flush_to_ldisc(&buf->work);
- else
- schedule_work(&buf->work);
+ tty_schedule_flip(port);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index c74a00ad7add..d3448a90f0f9 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1271,12 +1271,13 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p)
*
* Locking: None
*/
-static void tty_line_name(struct tty_driver *driver, int index, char *p)
+static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
{
if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
- strcpy(p, driver->name);
+ return sprintf(p, "%s", driver->name);
else
- sprintf(p, "%s%d", driver->name, index + driver->name_base);
+ return sprintf(p, "%s%d", driver->name,
+ index + driver->name_base);
}
/**
@@ -3545,9 +3546,19 @@ static ssize_t show_cons_active(struct device *dev,
if (i >= ARRAY_SIZE(cs))
break;
}
- while (i--)
- count += sprintf(buf + count, "%s%d%c",
- cs[i]->name, cs[i]->index, i ? ' ':'\n');
+ while (i--) {
+ int index = cs[i]->index;
+ struct tty_driver *drv = cs[i]->device(cs[i], &index);
+
+ /* don't resolve tty0 as some programs depend on it */
+ if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR))
+ count += tty_line_name(drv, index, buf + count);
+ else
+ count += sprintf(buf + count, "%s%d",
+ cs[i]->name, cs[i]->index);
+
+ count += sprintf(buf + count, "%c", i ? ' ':'\n');
+ }
console_unlock();
return count;
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index d8a55e87877f..0ffb0cbe2823 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -39,17 +39,10 @@
lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
# define __rel(l, n, i) \
lock_release(&(l)->dep_map, n, i)
-# ifdef CONFIG_PROVE_LOCKING
-# define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 2, NULL, i)
-# define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 2, n, i)
-# define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 2, NULL, i)
-# define lockdep_release(l, n, i) __rel(l, n, i)
-# else
-# define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
-# define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
-# define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
-# define lockdep_release(l, n, i) __rel(l, n, i)
-# endif
+#define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
+#define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
+#define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
+#define lockdep_release(l, n, i) __rel(l, n, i)
#else
# define lockdep_acquire(l, s, t, i) do { } while (0)
# define lockdep_acquire_nest(l, s, t, n, i) do { } while (0)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 23b5d32954bf..3ad0b61e35b4 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1590,9 +1590,9 @@ static void restore_cur(struct vc_data *vc)
vc->vc_need_wrap = 0;
}
-enum { ESnormal, ESesc, ESsquare, ESgetpars, ESgotpars, ESfunckey,
+enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey,
EShash, ESsetG0, ESsetG1, ESpercent, ESignore, ESnonstd,
- ESpalette };
+ ESpalette, ESosc };
/* console_lock is held (except via vc_init()) */
static void reset_terminal(struct vc_data *vc, int do_clear)
@@ -1652,11 +1652,15 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
* Control characters can be used in the _middle_
* of an escape sequence.
*/
+ if (vc->vc_state == ESosc && c>=8 && c<=13) /* ... except for OSC */
+ return;
switch (c) {
case 0:
return;
case 7:
- if (vc->vc_bell_duration)
+ if (vc->vc_state == ESosc)
+ vc->vc_state = ESnormal;
+ else if (vc->vc_bell_duration)
kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration);
return;
case 8:
@@ -1767,7 +1771,9 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
} else if (c=='R') { /* reset palette */
reset_palette(vc);
vc->vc_state = ESnormal;
- } else
+ } else if (c>='0' && c<='9')
+ vc->vc_state = ESosc;
+ else
vc->vc_state = ESnormal;
return;
case ESpalette:
@@ -1807,9 +1813,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
vc->vc_par[vc->vc_npar] *= 10;
vc->vc_par[vc->vc_npar] += c - '0';
return;
- } else
- vc->vc_state = ESgotpars;
- case ESgotpars:
+ }
vc->vc_state = ESnormal;
switch(c) {
case 'h':
@@ -2023,6 +2027,8 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
vc->vc_translate = set_translate(vc->vc_G1_charset, vc);
vc->vc_state = ESnormal;
return;
+ case ESosc:
+ return;
default:
vc->vc_state = ESnormal;
}
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 2e6b832e004b..e0cad4418085 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -2,10 +2,6 @@
# USB device configuration
#
-# These are unused now, remove them once they are no longer selected
-config USB_ARCH_HAS_OHCI
- bool
-
config USB_OHCI_BIG_ENDIAN_DESC
bool
@@ -17,18 +13,12 @@ config USB_OHCI_LITTLE_ENDIAN
default n if STB03xxx || PPC_MPC52xx
default y
-config USB_ARCH_HAS_EHCI
- bool
-
config USB_EHCI_BIG_ENDIAN_MMIO
bool
config USB_EHCI_BIG_ENDIAN_DESC
bool
-config USB_ARCH_HAS_XHCI
- bool
-
menuconfig USB_SUPPORT
bool "USB support"
depends on HAS_IOMEM
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index 7345d2115af2..480bd4d5710a 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -10,6 +10,7 @@ ci_hdrc-$(CONFIG_USB_CHIPIDEA_DEBUG) += debug.o
# Glue/Bridge layers go here
obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_msm.o
+obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_zevio.o
# PCI doesn't provide stubs, need to check
ifneq ($(CONFIG_PCI),)
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h
index a85713165688..83d06c1455b7 100644
--- a/drivers/usb/chipidea/bits.h
+++ b/drivers/usb/chipidea/bits.h
@@ -50,12 +50,14 @@
#define PORTSC_PTC (0x0FUL << 16)
#define PORTSC_PHCD(d) ((d) ? BIT(22) : BIT(23))
/* PTS and PTW for non lpm version only */
+#define PORTSC_PFSC BIT(24)
#define PORTSC_PTS(d) \
(u32)((((d) & 0x3) << 30) | (((d) & 0x4) ? BIT(25) : 0))
#define PORTSC_PTW BIT(28)
#define PORTSC_STS BIT(29)
/* DEVLC */
+#define DEVLC_PFSC BIT(23)
#define DEVLC_PSPD (0x03UL << 25)
#define DEVLC_PSPD_HS (0x02UL << 25)
#define DEVLC_PTW BIT(27)
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 88b80f7728e4..e206406ae1d9 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -196,8 +196,6 @@ struct ci_hdrc {
struct ci_hdrc_platform_data *platdata;
int vbus_active;
- /* FIXME: some day, we'll not use global phy */
- bool global_phy;
struct usb_phy *transceiver;
struct usb_hcd *hcd;
struct dentry *debugfs;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index c00f77257d36..2e58f8dfd311 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -96,7 +96,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
{
struct ci_hdrc_imx_data *data;
struct ci_hdrc_platform_data pdata = {
- .name = "ci_hdrc_imx",
+ .name = dev_name(&pdev->dev),
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_REQUIRE_TRANSCEIVER |
CI_HDRC_DISABLE_STREAMING,
diff --git a/drivers/usb/chipidea/ci_hdrc_zevio.c b/drivers/usb/chipidea/ci_hdrc_zevio.c
new file mode 100644
index 000000000000..3bf6489ef5ec
--- /dev/null
+++ b/drivers/usb/chipidea/ci_hdrc_zevio.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Based off drivers/usb/chipidea/ci_hdrc_msm.c
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/chipidea.h>
+
+#include "ci.h"
+
+static struct ci_hdrc_platform_data ci_hdrc_zevio_platdata = {
+ .name = "ci_hdrc_zevio",
+ .flags = CI_HDRC_REGS_SHARED,
+ .capoffset = DEF_CAPOFFSET,
+};
+
+static int ci_hdrc_zevio_probe(struct platform_device *pdev)
+{
+ struct platform_device *ci_pdev;
+
+ dev_dbg(&pdev->dev, "ci_hdrc_zevio_probe\n");
+
+ ci_pdev = ci_hdrc_add_device(&pdev->dev,
+ pdev->resource, pdev->num_resources,
+ &ci_hdrc_zevio_platdata);
+
+ if (IS_ERR(ci_pdev)) {
+ dev_err(&pdev->dev, "ci_hdrc_add_device failed!\n");
+ return PTR_ERR(ci_pdev);
+ }
+
+ platform_set_drvdata(pdev, ci_pdev);
+
+ return 0;
+}
+
+static int ci_hdrc_zevio_remove(struct platform_device *pdev)
+{
+ struct platform_device *ci_pdev = platform_get_drvdata(pdev);
+
+ ci_hdrc_remove_device(ci_pdev);
+
+ return 0;
+}
+
+static const struct of_device_id ci_hdrc_zevio_dt_ids[] = {
+ { .compatible = "lsi,zevio-usb", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver ci_hdrc_zevio_driver = {
+ .probe = ci_hdrc_zevio_probe,
+ .remove = ci_hdrc_zevio_remove,
+ .driver = {
+ .name = "zevio_usb",
+ .owner = THIS_MODULE,
+ .of_match_table = ci_hdrc_zevio_dt_ids,
+ },
+};
+
+MODULE_DEVICE_TABLE(of, ci_hdrc_zevio_dt_ids);
+module_platform_driver(ci_hdrc_zevio_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 33f22bc6ad7f..ca6831c5b763 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -64,6 +64,7 @@
#include <linux/usb/otg.h>
#include <linux/usb/chipidea.h>
#include <linux/usb/of.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/regulator/consumer.h>
@@ -298,6 +299,13 @@ int hw_device_reset(struct ci_hdrc *ci, u32 mode)
if (ci->platdata->flags & CI_HDRC_DISABLE_STREAMING)
hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
+ if (ci->platdata->flags & CI_HDRC_FORCE_FULLSPEED) {
+ if (ci->hw_bank.lpm)
+ hw_write(ci, OP_DEVLC, DEVLC_PFSC, DEVLC_PFSC);
+ else
+ hw_write(ci, OP_PORTSC, PORTSC_PFSC, PORTSC_PFSC);
+ }
+
/* USBMODE should be configured step by step */
hw_write(ci, OP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
hw_write(ci, OP_USBMODE, USBMODE_CM, mode);
@@ -412,6 +420,9 @@ static int ci_get_platdata(struct device *dev,
}
}
+ if (of_usb_get_maximum_speed(dev->of_node) == USB_SPEED_FULL)
+ platdata->flags |= CI_HDRC_FORCE_FULLSPEED;
+
return 0;
}
@@ -496,33 +507,6 @@ static void ci_get_otg_capable(struct ci_hdrc *ci)
}
}
-static int ci_usb_phy_init(struct ci_hdrc *ci)
-{
- if (ci->platdata->phy) {
- ci->transceiver = ci->platdata->phy;
- return usb_phy_init(ci->transceiver);
- } else {
- ci->global_phy = true;
- ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
- if (IS_ERR(ci->transceiver))
- ci->transceiver = NULL;
-
- return 0;
- }
-}
-
-static void ci_usb_phy_destroy(struct ci_hdrc *ci)
-{
- if (!ci->transceiver)
- return;
-
- otg_set_peripheral(ci->transceiver->otg, NULL);
- if (ci->global_phy)
- usb_put_phy(ci->transceiver);
- else
- usb_phy_shutdown(ci->transceiver);
-}
-
static int ci_hdrc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -532,7 +516,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
int ret;
enum usb_dr_mode dr_mode;
- if (!dev->platform_data) {
+ if (!dev_get_platdata(dev)) {
dev_err(dev, "platform data missing\n");
return -ENODEV;
}
@@ -549,7 +533,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
}
ci->dev = dev;
- ci->platdata = dev->platform_data;
+ ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
CI_HDRC_IMX28_WRITE_FIX);
@@ -561,7 +545,26 @@ static int ci_hdrc_probe(struct platform_device *pdev)
hw_phymode_configure(ci);
- ret = ci_usb_phy_init(ci);
+ if (ci->platdata->phy)
+ ci->transceiver = ci->platdata->phy;
+ else
+ ci->transceiver = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+
+ if (IS_ERR(ci->transceiver)) {
+ ret = PTR_ERR(ci->transceiver);
+ /*
+ * if -ENXIO is returned, it means PHY layer wasn't
+ * enabled, so it makes no sense to return -EPROBE_DEFER
+ * in that case, since no PHY driver will ever probe.
+ */
+ if (ret == -ENXIO)
+ return ret;
+
+ dev_err(dev, "no usb2 phy configured\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = usb_phy_init(ci->transceiver);
if (ret) {
dev_err(dev, "unable to init phy: %d\n", ret);
return ret;
@@ -572,8 +575,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ci->irq = platform_get_irq(pdev, 0);
if (ci->irq < 0) {
dev_err(dev, "missing IRQ\n");
- ret = -ENODEV;
- goto destroy_phy;
+ ret = ci->irq;
+ goto deinit_phy;
}
ci_get_otg_capable(ci);
@@ -590,23 +593,12 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ret = ci_hdrc_gadget_init(ci);
if (ret)
dev_info(dev, "doesn't support gadget\n");
- if (!ret && ci->transceiver) {
- ret = otg_set_peripheral(ci->transceiver->otg,
- &ci->gadget);
- /*
- * If we implement all USB functions using chipidea drivers,
- * it doesn't need to call above API, meanwhile, if we only
- * use gadget function, calling above API is useless.
- */
- if (ret && ret != -ENOTSUPP)
- goto destroy_phy;
- }
}
if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
dev_err(dev, "no supported roles\n");
ret = -ENODEV;
- goto destroy_phy;
+ goto deinit_phy;
}
if (ci->is_otg) {
@@ -663,8 +655,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
free_irq(ci->irq, ci);
stop:
ci_role_destroy(ci);
-destroy_phy:
- ci_usb_phy_destroy(ci);
+deinit_phy:
+ usb_phy_shutdown(ci->transceiver);
return ret;
}
@@ -677,7 +669,8 @@ static int ci_hdrc_remove(struct platform_device *pdev)
free_irq(ci->irq, ci);
ci_role_destroy(ci);
ci_hdrc_enter_lpm(ci, true);
- ci_usb_phy_destroy(ci);
+ usb_phy_shutdown(ci->transceiver);
+ kfree(ci->hw_bank.regmap);
return 0;
}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 4ab2cb62dfce..7739c64ef259 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -178,19 +178,6 @@ static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
}
/**
- * hw_test_and_clear_setup_status: test & clear setup status (execute without
- * interruption)
- * @n: endpoint number
- *
- * This function returns setup status
- */
-static int hw_test_and_clear_setup_status(struct ci_hdrc *ci, int n)
-{
- n = ep_to_bit(ci, n);
- return hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(n));
-}
-
-/**
* hw_ep_prime: primes endpoint (execute without interruption)
* @num: endpoint number
* @dir: endpoint direction
@@ -962,6 +949,156 @@ __acquires(hwep->lock)
}
/**
+ * isr_setup_packet_handler: setup packet handler
+ * @ci: UDC descriptor
+ *
+ * This function handles setup packet
+ */
+static void isr_setup_packet_handler(struct ci_hdrc *ci)
+__releases(ci->lock)
+__acquires(ci->lock)
+{
+ struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
+ struct usb_ctrlrequest req;
+ int type, num, dir, err = -EINVAL;
+ u8 tmode = 0;
+
+ /*
+ * Flush data and handshake transactions of previous
+ * setup packet.
+ */
+ _ep_nuke(ci->ep0out);
+ _ep_nuke(ci->ep0in);
+
+ /* read_setup_packet */
+ do {
+ hw_test_and_set_setup_guard(ci);
+ memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
+ } while (!hw_test_and_clear_setup_guard(ci));
+
+ type = req.bRequestType;
+
+ ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
+
+ switch (req.bRequest) {
+ case USB_REQ_CLEAR_FEATURE:
+ if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) ==
+ USB_ENDPOINT_HALT) {
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ dir = num & USB_ENDPOINT_DIR_MASK;
+ num &= USB_ENDPOINT_NUMBER_MASK;
+ if (dir) /* TX */
+ num += ci->hw_ep_max / 2;
+ if (!ci->ci_hw_ep[num].wedge) {
+ spin_unlock(&ci->lock);
+ err = usb_ep_clear_halt(
+ &ci->ci_hw_ep[num].ep);
+ spin_lock(&ci->lock);
+ if (err)
+ break;
+ }
+ err = isr_setup_status_phase(ci);
+ } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
+ le16_to_cpu(req.wValue) ==
+ USB_DEVICE_REMOTE_WAKEUP) {
+ if (req.wLength != 0)
+ break;
+ ci->remote_wakeup = 0;
+ err = isr_setup_status_phase(ci);
+ } else {
+ goto delegate;
+ }
+ break;
+ case USB_REQ_GET_STATUS:
+ if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
+ type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
+ type != (USB_DIR_IN|USB_RECIP_INTERFACE))
+ goto delegate;
+ if (le16_to_cpu(req.wLength) != 2 ||
+ le16_to_cpu(req.wValue) != 0)
+ break;
+ err = isr_get_status_response(ci, &req);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
+ goto delegate;
+ if (le16_to_cpu(req.wLength) != 0 ||
+ le16_to_cpu(req.wIndex) != 0)
+ break;
+ ci->address = (u8)le16_to_cpu(req.wValue);
+ ci->setaddr = true;
+ err = isr_setup_status_phase(ci);
+ break;
+ case USB_REQ_SET_FEATURE:
+ if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) ==
+ USB_ENDPOINT_HALT) {
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ dir = num & USB_ENDPOINT_DIR_MASK;
+ num &= USB_ENDPOINT_NUMBER_MASK;
+ if (dir) /* TX */
+ num += ci->hw_ep_max / 2;
+
+ spin_unlock(&ci->lock);
+ err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
+ spin_lock(&ci->lock);
+ if (!err)
+ isr_setup_status_phase(ci);
+ } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
+ if (req.wLength != 0)
+ break;
+ switch (le16_to_cpu(req.wValue)) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ ci->remote_wakeup = 1;
+ err = isr_setup_status_phase(ci);
+ break;
+ case USB_DEVICE_TEST_MODE:
+ tmode = le16_to_cpu(req.wIndex) >> 8;
+ switch (tmode) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ ci->test_mode = tmode;
+ err = isr_setup_status_phase(
+ ci);
+ break;
+ default:
+ break;
+ }
+ default:
+ goto delegate;
+ }
+ } else {
+ goto delegate;
+ }
+ break;
+ default:
+delegate:
+ if (req.wLength == 0) /* no data phase */
+ ci->ep0_dir = TX;
+
+ spin_unlock(&ci->lock);
+ err = ci->driver->setup(&ci->gadget, &req);
+ spin_lock(&ci->lock);
+ break;
+ }
+
+ if (err < 0) {
+ spin_unlock(&ci->lock);
+ if (usb_ep_set_halt(&hwep->ep))
+ dev_err(ci->dev, "error: ep_set_halt\n");
+ spin_lock(&ci->lock);
+ }
+}
+
+/**
* isr_tr_complete_handler: transaction complete interrupt handler
* @ci: UDC descriptor
*
@@ -972,12 +1109,10 @@ __releases(ci->lock)
__acquires(ci->lock)
{
unsigned i;
- u8 tmode = 0;
+ int err;
for (i = 0; i < ci->hw_ep_max; i++) {
struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
- int type, num, dir, err = -EINVAL;
- struct usb_ctrlrequest req;
if (hwep->ep.desc == NULL)
continue; /* not configured */
@@ -997,148 +1132,10 @@ __acquires(ci->lock)
}
}
- if (hwep->type != USB_ENDPOINT_XFER_CONTROL ||
- !hw_test_and_clear_setup_status(ci, i))
- continue;
-
- if (i != 0) {
- dev_warn(ci->dev, "ctrl traffic at endpoint %d\n", i);
- continue;
- }
-
- /*
- * Flush data and handshake transactions of previous
- * setup packet.
- */
- _ep_nuke(ci->ep0out);
- _ep_nuke(ci->ep0in);
-
- /* read_setup_packet */
- do {
- hw_test_and_set_setup_guard(ci);
- memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
- } while (!hw_test_and_clear_setup_guard(ci));
-
- type = req.bRequestType;
-
- ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
-
- switch (req.bRequest) {
- case USB_REQ_CLEAR_FEATURE:
- if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
- le16_to_cpu(req.wValue) ==
- USB_ENDPOINT_HALT) {
- if (req.wLength != 0)
- break;
- num = le16_to_cpu(req.wIndex);
- dir = num & USB_ENDPOINT_DIR_MASK;
- num &= USB_ENDPOINT_NUMBER_MASK;
- if (dir) /* TX */
- num += ci->hw_ep_max/2;
- if (!ci->ci_hw_ep[num].wedge) {
- spin_unlock(&ci->lock);
- err = usb_ep_clear_halt(
- &ci->ci_hw_ep[num].ep);
- spin_lock(&ci->lock);
- if (err)
- break;
- }
- err = isr_setup_status_phase(ci);
- } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
- le16_to_cpu(req.wValue) ==
- USB_DEVICE_REMOTE_WAKEUP) {
- if (req.wLength != 0)
- break;
- ci->remote_wakeup = 0;
- err = isr_setup_status_phase(ci);
- } else {
- goto delegate;
- }
- break;
- case USB_REQ_GET_STATUS:
- if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
- type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
- type != (USB_DIR_IN|USB_RECIP_INTERFACE))
- goto delegate;
- if (le16_to_cpu(req.wLength) != 2 ||
- le16_to_cpu(req.wValue) != 0)
- break;
- err = isr_get_status_response(ci, &req);
- break;
- case USB_REQ_SET_ADDRESS:
- if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
- goto delegate;
- if (le16_to_cpu(req.wLength) != 0 ||
- le16_to_cpu(req.wIndex) != 0)
- break;
- ci->address = (u8)le16_to_cpu(req.wValue);
- ci->setaddr = true;
- err = isr_setup_status_phase(ci);
- break;
- case USB_REQ_SET_FEATURE:
- if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
- le16_to_cpu(req.wValue) ==
- USB_ENDPOINT_HALT) {
- if (req.wLength != 0)
- break;
- num = le16_to_cpu(req.wIndex);
- dir = num & USB_ENDPOINT_DIR_MASK;
- num &= USB_ENDPOINT_NUMBER_MASK;
- if (dir) /* TX */
- num += ci->hw_ep_max/2;
-
- spin_unlock(&ci->lock);
- err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
- spin_lock(&ci->lock);
- if (!err)
- isr_setup_status_phase(ci);
- } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
- if (req.wLength != 0)
- break;
- switch (le16_to_cpu(req.wValue)) {
- case USB_DEVICE_REMOTE_WAKEUP:
- ci->remote_wakeup = 1;
- err = isr_setup_status_phase(ci);
- break;
- case USB_DEVICE_TEST_MODE:
- tmode = le16_to_cpu(req.wIndex) >> 8;
- switch (tmode) {
- case TEST_J:
- case TEST_K:
- case TEST_SE0_NAK:
- case TEST_PACKET:
- case TEST_FORCE_EN:
- ci->test_mode = tmode;
- err = isr_setup_status_phase(
- ci);
- break;
- default:
- break;
- }
- default:
- goto delegate;
- }
- } else {
- goto delegate;
- }
- break;
- default:
-delegate:
- if (req.wLength == 0) /* no data phase */
- ci->ep0_dir = TX;
-
- spin_unlock(&ci->lock);
- err = ci->driver->setup(&ci->gadget, &req);
- spin_lock(&ci->lock);
- break;
- }
-
- if (err < 0) {
- spin_unlock(&ci->lock);
- if (usb_ep_set_halt(&hwep->ep))
- dev_err(ci->dev, "error: ep_set_halt\n");
- spin_lock(&ci->lock);
- }
+ /* Only handle setup packet below */
+ if (i == 0 &&
+ hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(0)))
+ isr_setup_packet_handler(ci);
}
}
@@ -1193,6 +1190,11 @@ static int ep_enable(struct usb_ep *ep,
hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */
+ if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
+ dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
+ retval = -EINVAL;
+ }
+
/*
* Enable endpoints in the HW other than ep0 as ep0
* is always enabled
@@ -1837,12 +1839,6 @@ void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
dma_pool_destroy(ci->td_pool);
dma_pool_destroy(ci->qh_pool);
-
- if (ci->transceiver) {
- otg_set_peripheral(ci->transceiver->otg, NULL);
- if (ci->global_phy)
- usb_put_phy(ci->transceiver);
- }
}
static int udc_id_switch_for_device(struct ci_hdrc *ci)
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index fed7f68d025d..cb8e99156f5a 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -10,7 +10,6 @@ config USB_DEBUG
config USB_ANNOUNCE_NEW_DEVICES
bool "USB announce new devices"
- default N
help
Say Y here if you want the USB core to always announce the
idVendor, idProduct, Manufacturer, Product, and SerialNumber
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 062967c90b2a..1ab4df1de2da 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -10,7 +10,6 @@
#define USB_MAXALTSETTING 128 /* Hard limit */
-#define USB_MAXENDPOINTS 30 /* Hard limit */
#define USB_MAXCONFIG 8 /* Arbitrary limit */
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 90e18f6fa2bb..257876ea03a1 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -62,7 +62,7 @@
/* Mutual exclusion for removal, open, and release */
DEFINE_MUTEX(usbfs_mutex);
-struct dev_state {
+struct usb_dev_state {
struct list_head list; /* state list */
struct usb_device *dev;
struct file *file;
@@ -81,7 +81,7 @@ struct dev_state {
struct async {
struct list_head asynclist;
- struct dev_state *ps;
+ struct usb_dev_state *ps;
struct pid *pid;
const struct cred *cred;
unsigned int signr;
@@ -151,7 +151,7 @@ static void usbfs_decrease_memory_usage(unsigned amount)
atomic_sub(amount, &usbfs_memory_usage);
}
-static int connected(struct dev_state *ps)
+static int connected(struct usb_dev_state *ps)
{
return (!list_empty(&ps->list) &&
ps->dev->state != USB_STATE_NOTATTACHED);
@@ -184,7 +184,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
- struct dev_state *ps = file->private_data;
+ struct usb_dev_state *ps = file->private_data;
struct usb_device *dev = ps->dev;
ssize_t ret = 0;
unsigned len;
@@ -307,7 +307,7 @@ static void free_async(struct async *as)
static void async_newpending(struct async *as)
{
- struct dev_state *ps = as->ps;
+ struct usb_dev_state *ps = as->ps;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
@@ -317,7 +317,7 @@ static void async_newpending(struct async *as)
static void async_removepending(struct async *as)
{
- struct dev_state *ps = as->ps;
+ struct usb_dev_state *ps = as->ps;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
@@ -325,7 +325,7 @@ static void async_removepending(struct async *as)
spin_unlock_irqrestore(&ps->lock, flags);
}
-static struct async *async_getcompleted(struct dev_state *ps)
+static struct async *async_getcompleted(struct usb_dev_state *ps)
{
unsigned long flags;
struct async *as = NULL;
@@ -340,7 +340,7 @@ static struct async *async_getcompleted(struct dev_state *ps)
return as;
}
-static struct async *async_getpending(struct dev_state *ps,
+static struct async *async_getpending(struct usb_dev_state *ps,
void __user *userurb)
{
struct async *as;
@@ -448,7 +448,7 @@ static int copy_urb_data_to_user(u8 __user *userbuffer, struct urb *urb)
#define AS_CONTINUATION 1
#define AS_UNLINK 2
-static void cancel_bulk_urbs(struct dev_state *ps, unsigned bulk_addr)
+static void cancel_bulk_urbs(struct usb_dev_state *ps, unsigned bulk_addr)
__releases(ps->lock)
__acquires(ps->lock)
{
@@ -489,7 +489,7 @@ __acquires(ps->lock)
static void async_completed(struct urb *urb)
{
struct async *as = urb->context;
- struct dev_state *ps = as->ps;
+ struct usb_dev_state *ps = as->ps;
struct siginfo sinfo;
struct pid *pid = NULL;
u32 secid = 0;
@@ -529,7 +529,7 @@ static void async_completed(struct urb *urb)
wake_up(&ps->wait);
}
-static void destroy_async(struct dev_state *ps, struct list_head *list)
+static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
{
struct urb *urb;
struct async *as;
@@ -551,7 +551,7 @@ static void destroy_async(struct dev_state *ps, struct list_head *list)
spin_unlock_irqrestore(&ps->lock, flags);
}
-static void destroy_async_on_interface(struct dev_state *ps,
+static void destroy_async_on_interface(struct usb_dev_state *ps,
unsigned int ifnum)
{
struct list_head *p, *q, hitlist;
@@ -566,7 +566,7 @@ static void destroy_async_on_interface(struct dev_state *ps,
destroy_async(ps, &hitlist);
}
-static void destroy_all_async(struct dev_state *ps)
+static void destroy_all_async(struct usb_dev_state *ps)
{
destroy_async(ps, &ps->async_pending);
}
@@ -585,7 +585,7 @@ static int driver_probe(struct usb_interface *intf,
static void driver_disconnect(struct usb_interface *intf)
{
- struct dev_state *ps = usb_get_intfdata(intf);
+ struct usb_dev_state *ps = usb_get_intfdata(intf);
unsigned int ifnum = intf->altsetting->desc.bInterfaceNumber;
if (!ps)
@@ -628,7 +628,7 @@ struct usb_driver usbfs_driver = {
.resume = driver_resume,
};
-static int claimintf(struct dev_state *ps, unsigned int ifnum)
+static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
{
struct usb_device *dev = ps->dev;
struct usb_interface *intf;
@@ -650,7 +650,7 @@ static int claimintf(struct dev_state *ps, unsigned int ifnum)
return err;
}
-static int releaseintf(struct dev_state *ps, unsigned int ifnum)
+static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum)
{
struct usb_device *dev;
struct usb_interface *intf;
@@ -670,7 +670,7 @@ static int releaseintf(struct dev_state *ps, unsigned int ifnum)
return err;
}
-static int checkintf(struct dev_state *ps, unsigned int ifnum)
+static int checkintf(struct usb_dev_state *ps, unsigned int ifnum)
{
if (ps->dev->state != USB_STATE_CONFIGURED)
return -EHOSTUNREACH;
@@ -710,7 +710,7 @@ static int findintfep(struct usb_device *dev, unsigned int ep)
return -ENOENT;
}
-static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
+static int check_ctrlrecip(struct usb_dev_state *ps, unsigned int requesttype,
unsigned int request, unsigned int index)
{
int ret = 0;
@@ -769,6 +769,88 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
return ret;
}
+static struct usb_host_endpoint *ep_to_host_endpoint(struct usb_device *dev,
+ unsigned char ep)
+{
+ if (ep & USB_ENDPOINT_DIR_MASK)
+ return dev->ep_in[ep & USB_ENDPOINT_NUMBER_MASK];
+ else
+ return dev->ep_out[ep & USB_ENDPOINT_NUMBER_MASK];
+}
+
+static int parse_usbdevfs_streams(struct usb_dev_state *ps,
+ struct usbdevfs_streams __user *streams,
+ unsigned int *num_streams_ret,
+ unsigned int *num_eps_ret,
+ struct usb_host_endpoint ***eps_ret,
+ struct usb_interface **intf_ret)
+{
+ unsigned int i, num_streams, num_eps;
+ struct usb_host_endpoint **eps;
+ struct usb_interface *intf = NULL;
+ unsigned char ep;
+ int ifnum, ret;
+
+ if (get_user(num_streams, &streams->num_streams) ||
+ get_user(num_eps, &streams->num_eps))
+ return -EFAULT;
+
+ if (num_eps < 1 || num_eps > USB_MAXENDPOINTS)
+ return -EINVAL;
+
+ /* The XHCI controller allows max 2 ^ 16 streams */
+ if (num_streams_ret && (num_streams < 2 || num_streams > 65536))
+ return -EINVAL;
+
+ eps = kmalloc(num_eps * sizeof(*eps), GFP_KERNEL);
+ if (!eps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_eps; i++) {
+ if (get_user(ep, &streams->eps[i])) {
+ ret = -EFAULT;
+ goto error;
+ }
+ eps[i] = ep_to_host_endpoint(ps->dev, ep);
+ if (!eps[i]) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* usb_alloc/free_streams operate on an usb_interface */
+ ifnum = findintfep(ps->dev, ep);
+ if (ifnum < 0) {
+ ret = ifnum;
+ goto error;
+ }
+
+ if (i == 0) {
+ ret = checkintf(ps, ifnum);
+ if (ret < 0)
+ goto error;
+ intf = usb_ifnum_to_if(ps->dev, ifnum);
+ } else {
+ /* Verify all eps belong to the same interface */
+ if (ifnum != intf->altsetting->desc.bInterfaceNumber) {
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ }
+
+ if (num_streams_ret)
+ *num_streams_ret = num_streams;
+ *num_eps_ret = num_eps;
+ *eps_ret = eps;
+ *intf_ret = intf;
+
+ return 0;
+
+error:
+ kfree(eps);
+ return ret;
+}
+
static int match_devt(struct device *dev, void *data)
{
return dev->devt == (dev_t) (unsigned long) data;
@@ -791,11 +873,11 @@ static struct usb_device *usbdev_lookup_by_devt(dev_t devt)
static int usbdev_open(struct inode *inode, struct file *file)
{
struct usb_device *dev = NULL;
- struct dev_state *ps;
+ struct usb_dev_state *ps;
int ret;
ret = -ENOMEM;
- ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL);
+ ps = kmalloc(sizeof(struct usb_dev_state), GFP_KERNEL);
if (!ps)
goto out_free_ps;
@@ -852,7 +934,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
static int usbdev_release(struct inode *inode, struct file *file)
{
- struct dev_state *ps = file->private_data;
+ struct usb_dev_state *ps = file->private_data;
struct usb_device *dev = ps->dev;
unsigned int ifnum;
struct async *as;
@@ -883,7 +965,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
return 0;
}
-static int proc_control(struct dev_state *ps, void __user *arg)
+static int proc_control(struct usb_dev_state *ps, void __user *arg)
{
struct usb_device *dev = ps->dev;
struct usbdevfs_ctrltransfer ctrl;
@@ -970,7 +1052,7 @@ static int proc_control(struct dev_state *ps, void __user *arg)
return ret;
}
-static int proc_bulk(struct dev_state *ps, void __user *arg)
+static int proc_bulk(struct usb_dev_state *ps, void __user *arg)
{
struct usb_device *dev = ps->dev;
struct usbdevfs_bulktransfer bulk;
@@ -1043,7 +1125,21 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
return ret;
}
-static int proc_resetep(struct dev_state *ps, void __user *arg)
+static void check_reset_of_active_ep(struct usb_device *udev,
+ unsigned int epnum, char *ioctl_name)
+{
+ struct usb_host_endpoint **eps;
+ struct usb_host_endpoint *ep;
+
+ eps = (epnum & USB_DIR_IN) ? udev->ep_in : udev->ep_out;
+ ep = eps[epnum & 0x0f];
+ if (ep && !list_empty(&ep->urb_list))
+ dev_warn(&udev->dev, "Process %d (%s) called USBDEVFS_%s for active endpoint 0x%02x\n",
+ task_pid_nr(current), current->comm,
+ ioctl_name, epnum);
+}
+
+static int proc_resetep(struct usb_dev_state *ps, void __user *arg)
{
unsigned int ep;
int ret;
@@ -1056,11 +1152,12 @@ static int proc_resetep(struct dev_state *ps, void __user *arg)
ret = checkintf(ps, ret);
if (ret)
return ret;
+ check_reset_of_active_ep(ps->dev, ep, "RESETEP");
usb_reset_endpoint(ps->dev, ep);
return 0;
}
-static int proc_clearhalt(struct dev_state *ps, void __user *arg)
+static int proc_clearhalt(struct usb_dev_state *ps, void __user *arg)
{
unsigned int ep;
int pipe;
@@ -1074,6 +1171,7 @@ static int proc_clearhalt(struct dev_state *ps, void __user *arg)
ret = checkintf(ps, ret);
if (ret)
return ret;
+ check_reset_of_active_ep(ps->dev, ep, "CLEAR_HALT");
if (ep & USB_DIR_IN)
pipe = usb_rcvbulkpipe(ps->dev, ep & 0x7f);
else
@@ -1082,7 +1180,7 @@ static int proc_clearhalt(struct dev_state *ps, void __user *arg)
return usb_clear_halt(ps->dev, pipe);
}
-static int proc_getdriver(struct dev_state *ps, void __user *arg)
+static int proc_getdriver(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_getdriver gd;
struct usb_interface *intf;
@@ -1101,7 +1199,7 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
return ret;
}
-static int proc_connectinfo(struct dev_state *ps, void __user *arg)
+static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_connectinfo ci = {
.devnum = ps->dev->devnum,
@@ -1113,12 +1211,12 @@ static int proc_connectinfo(struct dev_state *ps, void __user *arg)
return 0;
}
-static int proc_resetdevice(struct dev_state *ps)
+static int proc_resetdevice(struct usb_dev_state *ps)
{
return usb_reset_device(ps->dev);
}
-static int proc_setintf(struct dev_state *ps, void __user *arg)
+static int proc_setintf(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_setinterface setintf;
int ret;
@@ -1127,11 +1225,14 @@ static int proc_setintf(struct dev_state *ps, void __user *arg)
return -EFAULT;
if ((ret = checkintf(ps, setintf.interface)))
return ret;
+
+ destroy_async_on_interface(ps, setintf.interface);
+
return usb_set_interface(ps->dev, setintf.interface,
setintf.altsetting);
}
-static int proc_setconfig(struct dev_state *ps, void __user *arg)
+static int proc_setconfig(struct usb_dev_state *ps, void __user *arg)
{
int u;
int status = 0;
@@ -1179,7 +1280,7 @@ static int proc_setconfig(struct dev_state *ps, void __user *arg)
return status;
}
-static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
+static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb,
struct usbdevfs_iso_packet_desc __user *iso_frame_desc,
void __user *arg)
{
@@ -1189,6 +1290,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
int i, ret, is_in, num_sgs = 0, ifnum = -1;
+ int number_of_packets = 0;
+ unsigned int stream_id = 0;
void *buf;
if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
@@ -1209,15 +1312,10 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
if (ret)
return ret;
}
- if ((uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0) {
- is_in = 1;
- ep = ps->dev->ep_in[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK];
- } else {
- is_in = 0;
- ep = ps->dev->ep_out[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK];
- }
+ ep = ep_to_host_endpoint(ps->dev, uurb->endpoint);
if (!ep)
return -ENOENT;
+ is_in = (uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0;
u = 0;
switch(uurb->type) {
@@ -1242,7 +1340,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
le16_to_cpup(&dr->wIndex));
if (ret)
goto error;
- uurb->number_of_packets = 0;
uurb->buffer_length = le16_to_cpup(&dr->wLength);
uurb->buffer += 8;
if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) {
@@ -1272,17 +1369,17 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
uurb->type = USBDEVFS_URB_TYPE_INTERRUPT;
goto interrupt_urb;
}
- uurb->number_of_packets = 0;
num_sgs = DIV_ROUND_UP(uurb->buffer_length, USB_SG_SIZE);
if (num_sgs == 1 || num_sgs > ps->dev->bus->sg_tablesize)
num_sgs = 0;
+ if (ep->streams)
+ stream_id = uurb->stream_id;
break;
case USBDEVFS_URB_TYPE_INTERRUPT:
if (!usb_endpoint_xfer_int(&ep->desc))
return -EINVAL;
interrupt_urb:
- uurb->number_of_packets = 0;
break;
case USBDEVFS_URB_TYPE_ISO:
@@ -1292,15 +1389,16 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return -EINVAL;
if (!usb_endpoint_xfer_isoc(&ep->desc))
return -EINVAL;
+ number_of_packets = uurb->number_of_packets;
isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) *
- uurb->number_of_packets;
+ number_of_packets;
if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) {
ret = -EFAULT;
goto error;
}
- for (totlen = u = 0; u < uurb->number_of_packets; u++) {
+ for (totlen = u = 0; u < number_of_packets; u++) {
/*
* arbitrary limit need for USB 3.0
* bMaxBurst (0~15 allowed, 1~16 packets)
@@ -1331,7 +1429,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
ret = -EFAULT;
goto error;
}
- as = alloc_async(uurb->number_of_packets);
+ as = alloc_async(number_of_packets);
if (!as) {
ret = -ENOMEM;
goto error;
@@ -1425,7 +1523,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
as->urb->setup_packet = (unsigned char *)dr;
dr = NULL;
as->urb->start_frame = uurb->start_frame;
- as->urb->number_of_packets = uurb->number_of_packets;
+ as->urb->number_of_packets = number_of_packets;
+ as->urb->stream_id = stream_id;
if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
ps->dev->speed == USB_SPEED_HIGH)
as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
@@ -1433,7 +1532,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
as->urb->interval = ep->desc.bInterval;
as->urb->context = as;
as->urb->complete = async_completed;
- for (totlen = u = 0; u < uurb->number_of_packets; u++) {
+ for (totlen = u = 0; u < number_of_packets; u++) {
as->urb->iso_frame_desc[u].offset = totlen;
as->urb->iso_frame_desc[u].length = isopkt[u].length;
totlen += isopkt[u].length;
@@ -1508,7 +1607,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return ret;
}
-static int proc_submiturb(struct dev_state *ps, void __user *arg)
+static int proc_submiturb(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_urb uurb;
@@ -1520,7 +1619,7 @@ static int proc_submiturb(struct dev_state *ps, void __user *arg)
arg);
}
-static int proc_unlinkurb(struct dev_state *ps, void __user *arg)
+static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg)
{
struct urb *urb;
struct async *as;
@@ -1580,7 +1679,7 @@ err_out:
return -EFAULT;
}
-static struct async *reap_as(struct dev_state *ps)
+static struct async *reap_as(struct usb_dev_state *ps)
{
DECLARE_WAITQUEUE(wait, current);
struct async *as = NULL;
@@ -1603,7 +1702,7 @@ static struct async *reap_as(struct dev_state *ps)
return as;
}
-static int proc_reapurb(struct dev_state *ps, void __user *arg)
+static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
if (as) {
@@ -1616,7 +1715,7 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
return -EIO;
}
-static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
+static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
{
int retval;
struct async *as;
@@ -1631,7 +1730,7 @@ static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
}
#ifdef CONFIG_COMPAT
-static int proc_control_compat(struct dev_state *ps,
+static int proc_control_compat(struct usb_dev_state *ps,
struct usbdevfs_ctrltransfer32 __user *p32)
{
struct usbdevfs_ctrltransfer __user *p;
@@ -1644,7 +1743,7 @@ static int proc_control_compat(struct dev_state *ps,
return proc_control(ps, p);
}
-static int proc_bulk_compat(struct dev_state *ps,
+static int proc_bulk_compat(struct usb_dev_state *ps,
struct usbdevfs_bulktransfer32 __user *p32)
{
struct usbdevfs_bulktransfer __user *p;
@@ -1661,7 +1760,7 @@ static int proc_bulk_compat(struct dev_state *ps,
return proc_bulk(ps, p);
}
-static int proc_disconnectsignal_compat(struct dev_state *ps, void __user *arg)
+static int proc_disconnectsignal_compat(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_disconnectsignal32 ds;
@@ -1699,7 +1798,7 @@ static int get_urb32(struct usbdevfs_urb *kurb,
return 0;
}
-static int proc_submiturb_compat(struct dev_state *ps, void __user *arg)
+static int proc_submiturb_compat(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_urb uurb;
@@ -1745,7 +1844,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
return 0;
}
-static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
+static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
if (as) {
@@ -1758,7 +1857,7 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
return -EIO;
}
-static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
+static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *arg)
{
int retval;
struct async *as;
@@ -1775,7 +1874,7 @@ static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
#endif
-static int proc_disconnectsignal(struct dev_state *ps, void __user *arg)
+static int proc_disconnectsignal(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_disconnectsignal ds;
@@ -1786,7 +1885,7 @@ static int proc_disconnectsignal(struct dev_state *ps, void __user *arg)
return 0;
}
-static int proc_claiminterface(struct dev_state *ps, void __user *arg)
+static int proc_claiminterface(struct usb_dev_state *ps, void __user *arg)
{
unsigned int ifnum;
@@ -1795,7 +1894,7 @@ static int proc_claiminterface(struct dev_state *ps, void __user *arg)
return claimintf(ps, ifnum);
}
-static int proc_releaseinterface(struct dev_state *ps, void __user *arg)
+static int proc_releaseinterface(struct usb_dev_state *ps, void __user *arg)
{
unsigned int ifnum;
int ret;
@@ -1808,7 +1907,7 @@ static int proc_releaseinterface(struct dev_state *ps, void __user *arg)
return 0;
}
-static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
+static int proc_ioctl(struct usb_dev_state *ps, struct usbdevfs_ioctl *ctl)
{
int size;
void *buf = NULL;
@@ -1884,7 +1983,7 @@ static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
return retval;
}
-static int proc_ioctl_default(struct dev_state *ps, void __user *arg)
+static int proc_ioctl_default(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_ioctl ctrl;
@@ -1894,7 +1993,7 @@ static int proc_ioctl_default(struct dev_state *ps, void __user *arg)
}
#ifdef CONFIG_COMPAT
-static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg)
+static int proc_ioctl_compat(struct usb_dev_state *ps, compat_uptr_t arg)
{
struct usbdevfs_ioctl32 __user *uioc;
struct usbdevfs_ioctl ctrl;
@@ -1912,7 +2011,7 @@ static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg)
}
#endif
-static int proc_claim_port(struct dev_state *ps, void __user *arg)
+static int proc_claim_port(struct usb_dev_state *ps, void __user *arg)
{
unsigned portnum;
int rc;
@@ -1926,7 +2025,7 @@ static int proc_claim_port(struct dev_state *ps, void __user *arg)
return rc;
}
-static int proc_release_port(struct dev_state *ps, void __user *arg)
+static int proc_release_port(struct usb_dev_state *ps, void __user *arg)
{
unsigned portnum;
@@ -1935,7 +2034,7 @@ static int proc_release_port(struct dev_state *ps, void __user *arg)
return usb_hub_release_port(ps->dev, portnum, ps);
}
-static int proc_get_capabilities(struct dev_state *ps, void __user *arg)
+static int proc_get_capabilities(struct usb_dev_state *ps, void __user *arg)
{
__u32 caps;
@@ -1951,7 +2050,7 @@ static int proc_get_capabilities(struct dev_state *ps, void __user *arg)
return 0;
}
-static int proc_disconnect_claim(struct dev_state *ps, void __user *arg)
+static int proc_disconnect_claim(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_disconnect_claim dc;
struct usb_interface *intf;
@@ -1983,6 +2082,45 @@ static int proc_disconnect_claim(struct dev_state *ps, void __user *arg)
return claimintf(ps, dc.interface);
}
+static int proc_alloc_streams(struct usb_dev_state *ps, void __user *arg)
+{
+ unsigned num_streams, num_eps;
+ struct usb_host_endpoint **eps;
+ struct usb_interface *intf;
+ int r;
+
+ r = parse_usbdevfs_streams(ps, arg, &num_streams, &num_eps,
+ &eps, &intf);
+ if (r)
+ return r;
+
+ destroy_async_on_interface(ps,
+ intf->altsetting[0].desc.bInterfaceNumber);
+
+ r = usb_alloc_streams(intf, eps, num_eps, num_streams, GFP_KERNEL);
+ kfree(eps);
+ return r;
+}
+
+static int proc_free_streams(struct usb_dev_state *ps, void __user *arg)
+{
+ unsigned num_eps;
+ struct usb_host_endpoint **eps;
+ struct usb_interface *intf;
+ int r;
+
+ r = parse_usbdevfs_streams(ps, arg, NULL, &num_eps, &eps, &intf);
+ if (r)
+ return r;
+
+ destroy_async_on_interface(ps,
+ intf->altsetting[0].desc.bInterfaceNumber);
+
+ r = usb_free_streams(intf, eps, num_eps, GFP_KERNEL);
+ kfree(eps);
+ return r;
+}
+
/*
* NOTE: All requests here that have interface numbers as parameters
* are assuming that somehow the configuration has been prevented from
@@ -1991,7 +2129,7 @@ static int proc_disconnect_claim(struct dev_state *ps, void __user *arg)
static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p)
{
- struct dev_state *ps = file->private_data;
+ struct usb_dev_state *ps = file->private_data;
struct inode *inode = file_inode(file);
struct usb_device *dev = ps->dev;
int ret = -ENOTTY;
@@ -2159,6 +2297,12 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
case USBDEVFS_DISCONNECT_CLAIM:
ret = proc_disconnect_claim(ps, p);
break;
+ case USBDEVFS_ALLOC_STREAMS:
+ ret = proc_alloc_streams(ps, p);
+ break;
+ case USBDEVFS_FREE_STREAMS:
+ ret = proc_free_streams(ps, p);
+ break;
}
usb_unlock_device(dev);
if (ret >= 0)
@@ -2192,7 +2336,7 @@ static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
static unsigned int usbdev_poll(struct file *file,
struct poll_table_struct *wait)
{
- struct dev_state *ps = file->private_data;
+ struct usb_dev_state *ps = file->private_data;
unsigned int mask = 0;
poll_wait(file, &ps->wait, wait);
@@ -2218,11 +2362,11 @@ const struct file_operations usbdev_file_operations = {
static void usbdev_remove(struct usb_device *udev)
{
- struct dev_state *ps;
+ struct usb_dev_state *ps;
struct siginfo sinfo;
while (!list_empty(&udev->filelist)) {
- ps = list_entry(udev->filelist.next, struct dev_state, list);
+ ps = list_entry(udev->filelist.next, struct usb_dev_state, list);
destroy_all_async(ps);
wake_up_all(&ps->wait);
list_del_init(&ps->list);
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ab90a0156828..888881e5f292 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -312,9 +312,9 @@ static int usb_probe_interface(struct device *dev)
return error;
}
- id = usb_match_id(intf, driver->id_table);
+ id = usb_match_dynamic_id(intf, driver);
if (!id)
- id = usb_match_dynamic_id(intf, driver);
+ id = usb_match_id(intf, driver->id_table);
if (!id)
return error;
@@ -400,8 +400,9 @@ static int usb_unbind_interface(struct device *dev)
{
struct usb_driver *driver = to_usb_driver(dev->driver);
struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_host_endpoint *ep, **eps = NULL;
struct usb_device *udev;
- int error, r, lpm_disable_error;
+ int i, j, error, r, lpm_disable_error;
intf->condition = USB_INTERFACE_UNBINDING;
@@ -425,6 +426,26 @@ static int usb_unbind_interface(struct device *dev)
driver->disconnect(intf);
usb_cancel_queued_reset(intf);
+ /* Free streams */
+ for (i = 0, j = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+ ep = &intf->cur_altsetting->endpoint[i];
+ if (ep->streams == 0)
+ continue;
+ if (j == 0) {
+ eps = kmalloc(USB_MAXENDPOINTS * sizeof(void *),
+ GFP_KERNEL);
+ if (!eps) {
+ dev_warn(dev, "oom, leaking streams\n");
+ break;
+ }
+ }
+ eps[j++] = ep;
+ }
+ if (j) {
+ usb_free_streams(intf, eps, j, GFP_KERNEL);
+ kfree(eps);
+ }
+
/* Reset other interface state.
* We cannot do a Set-Interface if the device is suspended or
* if it is prepared for a system sleep (since installing a new
@@ -990,8 +1011,7 @@ EXPORT_SYMBOL_GPL(usb_deregister);
* it doesn't support pre_reset/post_reset/reset_resume or
* because it doesn't support suspend/resume.
*
- * The caller must hold @intf's device's lock, but not its pm_mutex
- * and not @intf->dev.sem.
+ * The caller must hold @intf's device's lock, but not @intf's lock.
*/
void usb_forced_unbind_intf(struct usb_interface *intf)
{
@@ -1004,16 +1024,37 @@ void usb_forced_unbind_intf(struct usb_interface *intf)
intf->needs_binding = 1;
}
+/*
+ * Unbind drivers for @udev's marked interfaces. These interfaces have
+ * the needs_binding flag set, for example by usb_resume_interface().
+ *
+ * The caller must hold @udev's device lock.
+ */
+static void unbind_marked_interfaces(struct usb_device *udev)
+{
+ struct usb_host_config *config;
+ int i;
+ struct usb_interface *intf;
+
+ config = udev->actconfig;
+ if (config) {
+ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
+ intf = config->interface[i];
+ if (intf->dev.driver && intf->needs_binding)
+ usb_forced_unbind_intf(intf);
+ }
+ }
+}
+
/* Delayed forced unbinding of a USB interface driver and scan
* for rebinding.
*
- * The caller must hold @intf's device's lock, but not its pm_mutex
- * and not @intf->dev.sem.
+ * The caller must hold @intf's device's lock, but not @intf's lock.
*
* Note: Rebinds will be skipped if a system sleep transition is in
* progress and the PM "complete" callback hasn't occurred yet.
*/
-void usb_rebind_intf(struct usb_interface *intf)
+static void usb_rebind_intf(struct usb_interface *intf)
{
int rc;
@@ -1030,68 +1071,66 @@ void usb_rebind_intf(struct usb_interface *intf)
}
}
-#ifdef CONFIG_PM
-
-/* Unbind drivers for @udev's interfaces that don't support suspend/resume
- * There is no check for reset_resume here because it can be determined
- * only during resume whether reset_resume is needed.
+/*
+ * Rebind drivers to @udev's marked interfaces. These interfaces have
+ * the needs_binding flag set.
*
* The caller must hold @udev's device lock.
*/
-static void unbind_no_pm_drivers_interfaces(struct usb_device *udev)
+static void rebind_marked_interfaces(struct usb_device *udev)
{
struct usb_host_config *config;
int i;
struct usb_interface *intf;
- struct usb_driver *drv;
config = udev->actconfig;
if (config) {
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
intf = config->interface[i];
-
- if (intf->dev.driver) {
- drv = to_usb_driver(intf->dev.driver);
- if (!drv->suspend || !drv->resume)
- usb_forced_unbind_intf(intf);
- }
+ if (intf->needs_binding)
+ usb_rebind_intf(intf);
}
}
}
-/* Unbind drivers for @udev's interfaces that failed to support reset-resume.
- * These interfaces have the needs_binding flag set by usb_resume_interface().
+/*
+ * Unbind all of @udev's marked interfaces and then rebind all of them.
+ * This ordering is necessary because some drivers claim several interfaces
+ * when they are first probed.
*
* The caller must hold @udev's device lock.
*/
-static void unbind_no_reset_resume_drivers_interfaces(struct usb_device *udev)
+void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev)
{
- struct usb_host_config *config;
- int i;
- struct usb_interface *intf;
-
- config = udev->actconfig;
- if (config) {
- for (i = 0; i < config->desc.bNumInterfaces; ++i) {
- intf = config->interface[i];
- if (intf->dev.driver && intf->needs_binding)
- usb_forced_unbind_intf(intf);
- }
- }
+ unbind_marked_interfaces(udev);
+ rebind_marked_interfaces(udev);
}
-static void do_rebind_interfaces(struct usb_device *udev)
+#ifdef CONFIG_PM
+
+/* Unbind drivers for @udev's interfaces that don't support suspend/resume
+ * There is no check for reset_resume here because it can be determined
+ * only during resume whether reset_resume is needed.
+ *
+ * The caller must hold @udev's device lock.
+ */
+static void unbind_no_pm_drivers_interfaces(struct usb_device *udev)
{
struct usb_host_config *config;
int i;
struct usb_interface *intf;
+ struct usb_driver *drv;
config = udev->actconfig;
if (config) {
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
intf = config->interface[i];
- if (intf->needs_binding)
- usb_rebind_intf(intf);
+
+ if (intf->dev.driver) {
+ drv = to_usb_driver(intf->dev.driver);
+ if (!drv->suspend || !drv->resume)
+ usb_forced_unbind_intf(intf);
+ }
}
}
}
@@ -1420,7 +1459,7 @@ int usb_resume_complete(struct device *dev)
* whose needs_binding flag is set
*/
if (udev->state != USB_STATE_NOTATTACHED)
- do_rebind_interfaces(udev);
+ rebind_marked_interfaces(udev);
return 0;
}
@@ -1442,7 +1481,7 @@ int usb_resume(struct device *dev, pm_message_t msg)
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- unbind_no_reset_resume_drivers_interfaces(udev);
+ unbind_marked_interfaces(udev);
}
/* Avoid PM error messages for devices disconnected while suspended
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index acbfeb0a0119..358ca8dd784f 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -155,6 +155,7 @@ int usb_choose_configuration(struct usb_device *udev)
}
return i;
}
+EXPORT_SYMBOL_GPL(usb_choose_configuration);
static int generic_probe(struct usb_device *udev)
{
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 2518c3250750..9c4e2922b04d 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2049,7 +2049,7 @@ int usb_alloc_streams(struct usb_interface *interface,
{
struct usb_hcd *hcd;
struct usb_device *dev;
- int i;
+ int i, ret;
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
@@ -2058,13 +2058,24 @@ int usb_alloc_streams(struct usb_interface *interface,
if (dev->speed != USB_SPEED_SUPER)
return -EINVAL;
- /* Streams only apply to bulk endpoints. */
- for (i = 0; i < num_eps; i++)
+ for (i = 0; i < num_eps; i++) {
+ /* Streams only apply to bulk endpoints. */
if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
return -EINVAL;
+ /* Re-alloc is not allowed */
+ if (eps[i]->streams)
+ return -EINVAL;
+ }
- return hcd->driver->alloc_streams(hcd, dev, eps, num_eps,
+ ret = hcd->driver->alloc_streams(hcd, dev, eps, num_eps,
num_streams, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_eps; i++)
+ eps[i]->streams = ret;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(usb_alloc_streams);
@@ -2078,8 +2089,7 @@ EXPORT_SYMBOL_GPL(usb_alloc_streams);
* Reverts a group of bulk endpoints back to not using stream IDs.
* Can fail if we are given bad arguments, or HCD is broken.
*
- * Return: On success, the number of allocated streams. On failure, a negative
- * error code.
+ * Return: 0 on success. On failure, a negative error code.
*/
int usb_free_streams(struct usb_interface *interface,
struct usb_host_endpoint **eps, unsigned int num_eps,
@@ -2087,19 +2097,26 @@ int usb_free_streams(struct usb_interface *interface,
{
struct usb_hcd *hcd;
struct usb_device *dev;
- int i;
+ int i, ret;
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
if (dev->speed != USB_SPEED_SUPER)
return -EINVAL;
- /* Streams only apply to bulk endpoints. */
+ /* Double-free is not allowed */
for (i = 0; i < num_eps; i++)
- if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc))
+ if (!eps[i] || !eps[i]->streams)
return -EINVAL;
- return hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
+ ret = hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_eps; i++)
+ eps[i]->streams = 0;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(usb_free_streams);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 64ea21971be2..090469ebfcff 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -141,19 +141,27 @@ static int usb_device_supports_lpm(struct usb_device *udev)
return 0;
}
- /* All USB 3.0 must support LPM, but we need their max exit latency
- * information from the SuperSpeed Extended Capabilities BOS descriptor.
+ /*
+ * According to the USB 3.0 spec, all USB 3.0 devices must support LPM.
+ * However, there are some that don't, and they set the U1/U2 exit
+ * latencies to zero.
*/
if (!udev->bos->ss_cap) {
- dev_warn(&udev->dev, "No LPM exit latency info found. "
- "Power management will be impacted.\n");
+ dev_info(&udev->dev, "No LPM exit latency info found, disabling LPM.\n");
+ return 0;
+ }
+
+ if (udev->bos->ss_cap->bU1devExitLat == 0 &&
+ udev->bos->ss_cap->bU2DevExitLat == 0) {
+ if (udev->parent)
+ dev_info(&udev->dev, "LPM exit latency is zeroed, disabling LPM.\n");
+ else
+ dev_info(&udev->dev, "We don't know the algorithms for LPM for this host, disabling LPM.\n");
return 0;
}
- if (udev->parent->lpm_capable)
- return 1;
- dev_warn(&udev->dev, "Parent hub missing LPM exit latency info. "
- "Power management will be impacted.\n");
+ if (!udev->parent || udev->parent->lpm_capable)
+ return 1;
return 0;
}
@@ -499,7 +507,8 @@ static void led_work (struct work_struct *work)
changed++;
}
if (changed)
- schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD);
+ queue_delayed_work(system_power_efficient_wq,
+ &hub->leds, LED_CYCLE_PERIOD);
}
/* use a short timeout for hub/port status fetches */
@@ -1040,8 +1049,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
*/
if (type == HUB_INIT) {
delay = hub_power_on(hub, false);
- PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2);
- schedule_delayed_work(&hub->init_work,
+ INIT_DELAYED_WORK(&hub->init_work, hub_init_func2);
+ queue_delayed_work(system_power_efficient_wq,
+ &hub->init_work,
msecs_to_jiffies(delay));
/* Suppress autosuspend until init is done */
@@ -1194,8 +1204,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Don't do a long sleep inside a workqueue routine */
if (type == HUB_INIT2) {
- PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
- schedule_delayed_work(&hub->init_work,
+ INIT_DELAYED_WORK(&hub->init_work, hub_init_func3);
+ queue_delayed_work(system_power_efficient_wq,
+ &hub->init_work,
msecs_to_jiffies(delay));
return; /* Continues at init3: below */
} else {
@@ -1209,7 +1220,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
if (status < 0)
dev_err(hub->intfdev, "activate --> %d\n", status);
if (hub->has_indicators && blinkenlights)
- schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD);
+ queue_delayed_work(system_power_efficient_wq,
+ &hub->leds, LED_CYCLE_PERIOD);
/* Scan all ports that need attention */
kick_khubd(hub);
@@ -1788,7 +1800,7 @@ hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
* to one of these "claimed" ports, the program will "own" the device.
*/
static int find_port_owner(struct usb_device *hdev, unsigned port1,
- struct dev_state ***ppowner)
+ struct usb_dev_state ***ppowner)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
@@ -1806,10 +1818,10 @@ static int find_port_owner(struct usb_device *hdev, unsigned port1,
/* In the following three functions, the caller must hold hdev's lock */
int usb_hub_claim_port(struct usb_device *hdev, unsigned port1,
- struct dev_state *owner)
+ struct usb_dev_state *owner)
{
int rc;
- struct dev_state **powner;
+ struct usb_dev_state **powner;
rc = find_port_owner(hdev, port1, &powner);
if (rc)
@@ -1819,12 +1831,13 @@ int usb_hub_claim_port(struct usb_device *hdev, unsigned port1,
*powner = owner;
return rc;
}
+EXPORT_SYMBOL_GPL(usb_hub_claim_port);
int usb_hub_release_port(struct usb_device *hdev, unsigned port1,
- struct dev_state *owner)
+ struct usb_dev_state *owner)
{
int rc;
- struct dev_state **powner;
+ struct usb_dev_state **powner;
rc = find_port_owner(hdev, port1, &powner);
if (rc)
@@ -1834,8 +1847,9 @@ int usb_hub_release_port(struct usb_device *hdev, unsigned port1,
*powner = NULL;
return rc;
}
+EXPORT_SYMBOL_GPL(usb_hub_release_port);
-void usb_hub_release_all_ports(struct usb_device *hdev, struct dev_state *owner)
+void usb_hub_release_all_ports(struct usb_device *hdev, struct usb_dev_state *owner)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
int n;
@@ -3093,9 +3107,19 @@ static int finish_port_resume(struct usb_device *udev)
* operation is carried out here, after the port has been
* resumed.
*/
- if (udev->reset_resume)
+ if (udev->reset_resume) {
+ /*
+ * If the device morphs or switches modes when it is reset,
+ * we don't want to perform a reset-resume. We'll fail the
+ * resume, which will cause a logical disconnect, and then
+ * the device will be rediscovered.
+ */
retry_reset_resume:
- status = usb_reset_and_verify_device(udev);
+ if (udev->quirks & USB_QUIRK_RESET)
+ status = -ENODEV;
+ else
+ status = usb_reset_and_verify_device(udev);
+ }
/* 10.5.4.5 says be sure devices in the tree are still there.
* For now let's assume the device didn't go crazy on resume,
@@ -3958,7 +3982,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
connect_type = usb_get_hub_port_connect_type(udev->parent,
udev->portnum);
- if ((udev->bos->ext_cap->bmAttributes & USB_BESL_SUPPORT) ||
+ if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
udev->usb2_hw_lpm_allowed = 1;
usb_set_usb2_hardware_lpm(udev, 1);
@@ -4107,8 +4131,12 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
did_new_scheme = true;
retval = hub_enable_device(udev);
- if (retval < 0)
+ if (retval < 0) {
+ dev_err(&udev->dev,
+ "hub failed to enable device, error %d\n",
+ retval);
goto fail;
+ }
#define GET_DESCRIPTOR_BUFSIZE 64
buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO);
@@ -4311,7 +4339,8 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
/* hub LEDs are probably harder to miss than syslog */
if (hub->has_indicators) {
hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
- schedule_delayed_work (&hub->leds, 0);
+ queue_delayed_work(system_power_efficient_wq,
+ &hub->leds, 0);
}
}
kfree(qual);
@@ -4540,7 +4569,9 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
if (hub->has_indicators) {
hub->indicator[port1-1] =
INDICATOR_AMBER_BLINK;
- schedule_delayed_work (&hub->leds, 0);
+ queue_delayed_work(
+ system_power_efficient_wq,
+ &hub->leds, 0);
}
status = -ENOTCONN; /* Don't retry */
goto loop_disable;
@@ -4739,6 +4770,8 @@ static void hub_events(void)
/* deal with port status changes */
for (i = 1; i <= hdev->maxchild; i++) {
+ struct usb_device *udev = hub->ports[i - 1]->child;
+
if (test_bit(i, hub->busy_bits))
continue;
connect_change = test_bit(i, hub->change_bits);
@@ -4837,8 +4870,6 @@ static void hub_events(void)
*/
if (hub_port_warm_reset_required(hub, portstatus)) {
int status;
- struct usb_device *udev =
- hub->ports[i - 1]->child;
dev_dbg(hub_dev, "warm reset port %d\n", i);
if (!udev ||
@@ -4855,6 +4886,24 @@ static void hub_events(void)
usb_unlock_device(udev);
connect_change = 0;
}
+ /*
+ * On disconnect USB3 protocol ports transit from U0 to
+ * SS.Inactive to Rx.Detect. If this happens a warm-
+ * reset is not needed, but a (re)connect may happen
+ * before khubd runs and sees the disconnect, and the
+ * device may be an unknown state.
+ *
+ * If the port went through SS.Inactive without khubd
+ * seeing it the C_LINK_STATE change flag will be set,
+ * and we reset the dev to put it in a known state.
+ */
+ } else if (udev && hub_is_superspeed(hub->hdev) &&
+ (portchange & USB_PORT_STAT_C_LINK_STATE) &&
+ (portstatus & USB_PORT_STAT_CONNECTION)) {
+ usb_lock_device(udev);
+ usb_reset_device(udev);
+ usb_unlock_device(udev);
+ connect_change = 0;
}
if (connect_change)
@@ -5112,7 +5161,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct usb_device_descriptor descriptor = udev->descriptor;
struct usb_host_bos *bos;
- int i, ret = 0;
+ int i, j, ret = 0;
int port1 = udev->portnum;
if (udev->state == USB_STATE_NOTATTACHED ||
@@ -5238,6 +5287,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
ret);
goto re_enumerate;
}
+ /* Resetting also frees any allocated streams */
+ for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++)
+ intf->cur_altsetting->endpoint[j].streams = 0;
}
done:
@@ -5340,10 +5392,11 @@ int usb_reset_device(struct usb_device *udev)
else if (cintf->condition ==
USB_INTERFACE_BOUND)
rebind = 1;
+ if (rebind)
+ cintf->needs_binding = 1;
}
- if (ret == 0 && rebind)
- usb_rebind_intf(cintf);
}
+ usb_unbind_and_rebind_marked_interfaces(udev);
}
usb_autosuspend_device(udev);
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index df629a310e44..33bcb2c6f90a 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -89,7 +89,7 @@ struct usb_hub {
struct usb_port {
struct usb_device *child;
struct device dev;
- struct dev_state *port_owner;
+ struct usb_dev_state *port_owner;
enum usb_port_connect_type connect_type;
u8 portnum;
unsigned power_is_on:1;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index f829a1aad1c3..0c8a7fc4dad8 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -178,7 +178,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
*
* Return:
* If successful, 0. Otherwise a negative error number. The number of actual
- * bytes transferred will be stored in the @actual_length paramater.
+ * bytes transferred will be stored in the @actual_length parameter.
*/
int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout)
@@ -1293,8 +1293,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
struct usb_interface *iface;
struct usb_host_interface *alt;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
- int ret;
- int manual = 0;
+ int i, ret, manual = 0;
unsigned int epaddr;
unsigned int pipe;
@@ -1329,6 +1328,10 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
+ /* Changing alt-setting also frees any allocated streams */
+ for (i = 0; i < iface->cur_altsetting->desc.bNumEndpoints; i++)
+ iface->cur_altsetting->endpoint[i].streams = 0;
+
ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt);
if (ret < 0) {
dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n",
@@ -1920,6 +1923,7 @@ free_interfaces:
usb_autosuspend_device(dev);
return 0;
}
+EXPORT_SYMBOL_GPL(usb_set_configuration);
static LIST_HEAD(set_config_list);
static DEFINE_SPINLOCK(set_config_lock);
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 9ff665f1322f..991386ceb4ec 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -831,7 +831,7 @@ EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
*
* this allows all outstanding URBs to be unlinked starting
* from the back of the queue. This function is asynchronous.
- * The unlinking is just tiggered. It may happen after this
+ * The unlinking is just triggered. It may happen after this
* function has returned.
*
* This routine should not be called by a driver after its disconnect
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 823857767a16..75bf649da82d 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -2,7 +2,7 @@
#include <linux/acpi.h>
struct usb_hub_descriptor;
-struct dev_state;
+struct usb_dev_state;
/* Functions local to drivers/usb/core/ */
@@ -55,14 +55,10 @@ extern int usb_match_one_id_intf(struct usb_device *dev,
extern int usb_match_device(struct usb_device *dev,
const struct usb_device_id *id);
extern void usb_forced_unbind_intf(struct usb_interface *intf);
-extern void usb_rebind_intf(struct usb_interface *intf);
+extern void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev);
-extern int usb_hub_claim_port(struct usb_device *hdev, unsigned port,
- struct dev_state *owner);
-extern int usb_hub_release_port(struct usb_device *hdev, unsigned port,
- struct dev_state *owner);
extern void usb_hub_release_all_ports(struct usb_device *hdev,
- struct dev_state *owner);
+ struct usb_dev_state *owner);
extern bool usb_device_is_owned(struct usb_device *udev);
extern int usb_hub_init(void);
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 8205799e6db3..c93918b70d03 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -72,6 +72,26 @@ static const char *dwc2_op_state_str(struct dwc2_hsotg *hsotg)
}
/**
+ * dwc2_handle_usb_port_intr - handles OTG PRTINT interrupts.
+ * When the PRTINT interrupt fires, there are certain status bits in the Host
+ * Port that needs to get cleared.
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static void dwc2_handle_usb_port_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 hprt0 = readl(hsotg->regs + HPRT0);
+
+ if (hprt0 & HPRT0_ENACHG) {
+ hprt0 &= ~HPRT0_ENA;
+ writel(hprt0, hsotg->regs + HPRT0);
+ }
+
+ /* Clear interrupt */
+ writel(GINTSTS_PRTINT, hsotg->regs + GINTSTS);
+}
+
+/**
* dwc2_handle_mode_mismatch_intr() - Logs a mode mismatch warning message
*
* @hsotg: Programming view of DWC_otg controller
@@ -479,9 +499,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
if (dwc2_is_device_mode(hsotg)) {
dev_dbg(hsotg->dev,
" --Port interrupt received in Device mode--\n");
- gintsts = GINTSTS_PRTINT;
- writel(gintsts, hsotg->regs + GINTSTS);
- retval = 1;
+ dwc2_handle_usb_port_intr(hsotg);
+ retval = IRQ_HANDLED;
}
}
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 012f17ec1a37..47b9eb5389b4 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -975,8 +975,8 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd)
{
struct dwc2_hcd_urb *urb = qtd->urb;
- int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
+ int pipe_type;
int urb_xfer_done;
if (dbg_hc(chan))
@@ -984,6 +984,11 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
"--Host Channel %d Interrupt: Transfer Complete--\n",
chnum);
+ if (!urb)
+ goto handle_xfercomp_done;
+
+ pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+
if (hsotg->core_params->dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
if (pipe_type == USB_ENDPOINT_XFER_ISOC)
@@ -1005,9 +1010,6 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
}
}
- if (!urb)
- goto handle_xfercomp_done;
-
/* Update the QTD and URB states */
switch (pipe_type) {
case USB_ENDPOINT_XFER_CONTROL:
@@ -1105,7 +1107,7 @@ static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd)
{
struct dwc2_hcd_urb *urb = qtd->urb;
- int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+ int pipe_type;
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
chnum);
@@ -1119,6 +1121,8 @@ static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
if (!urb)
goto handle_stall_halt;
+ pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+
if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
dwc2_host_complete(hsotg, qtd, -EPIPE);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index a49217ae3533..d001417e8e37 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -61,9 +61,10 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
* dwc3_core_soft_reset - Issues core soft reset and PHY reset
* @dwc: pointer to our context structure
*/
-static void dwc3_core_soft_reset(struct dwc3 *dwc)
+static int dwc3_core_soft_reset(struct dwc3 *dwc)
{
u32 reg;
+ int ret;
/* Before Resetting PHY, put Core in Reset */
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
@@ -82,6 +83,15 @@ static void dwc3_core_soft_reset(struct dwc3 *dwc)
usb_phy_init(dwc->usb2_phy);
usb_phy_init(dwc->usb3_phy);
+ ret = phy_init(dwc->usb2_generic_phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_init(dwc->usb3_generic_phy);
+ if (ret < 0) {
+ phy_exit(dwc->usb2_generic_phy);
+ return ret;
+ }
mdelay(100);
/* Clear USB3 PHY reset */
@@ -100,6 +110,8 @@ static void dwc3_core_soft_reset(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_CORESOFTRESET;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ return 0;
}
/**
@@ -242,6 +254,90 @@ static void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
}
}
+static int dwc3_alloc_scratch_buffers(struct dwc3 *dwc)
+{
+ if (!dwc->has_hibernation)
+ return 0;
+
+ if (!dwc->nr_scratch)
+ return 0;
+
+ dwc->scratchbuf = kmalloc_array(dwc->nr_scratch,
+ DWC3_SCRATCHBUF_SIZE, GFP_KERNEL);
+ if (!dwc->scratchbuf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
+{
+ dma_addr_t scratch_addr;
+ u32 param;
+ int ret;
+
+ if (!dwc->has_hibernation)
+ return 0;
+
+ if (!dwc->nr_scratch)
+ return 0;
+
+ /* should never fall here */
+ if (!WARN_ON(dwc->scratchbuf))
+ return 0;
+
+ scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf,
+ dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dwc->dev, scratch_addr)) {
+ dev_err(dwc->dev, "failed to map scratch buffer\n");
+ ret = -EFAULT;
+ goto err0;
+ }
+
+ dwc->scratch_addr = scratch_addr;
+
+ param = lower_32_bits(scratch_addr);
+
+ ret = dwc3_send_gadget_generic_command(dwc,
+ DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO, param);
+ if (ret < 0)
+ goto err1;
+
+ param = upper_32_bits(scratch_addr);
+
+ ret = dwc3_send_gadget_generic_command(dwc,
+ DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI, param);
+ if (ret < 0)
+ goto err1;
+
+ return 0;
+
+err1:
+ dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+ DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
+
+err0:
+ return ret;
+}
+
+static void dwc3_free_scratch_buffers(struct dwc3 *dwc)
+{
+ if (!dwc->has_hibernation)
+ return;
+
+ if (!dwc->nr_scratch)
+ return;
+
+ /* should never fall here */
+ if (!WARN_ON(dwc->scratchbuf))
+ return;
+
+ dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+ DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
+ kfree(dwc->scratchbuf);
+}
+
static void dwc3_core_num_eps(struct dwc3 *dwc)
{
struct dwc3_hwparams *parms = &dwc->hwparams;
@@ -277,6 +373,7 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
static int dwc3_core_init(struct dwc3 *dwc)
{
unsigned long timeout;
+ u32 hwparams4 = dwc->hwparams.hwparams4;
u32 reg;
int ret;
@@ -306,7 +403,9 @@ static int dwc3_core_init(struct dwc3 *dwc)
cpu_relax();
} while (true);
- dwc3_core_soft_reset(dwc);
+ ret = dwc3_core_soft_reset(dwc);
+ if (ret)
+ goto err0;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
@@ -314,7 +413,29 @@ static int dwc3_core_init(struct dwc3 *dwc)
switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
- reg &= ~DWC3_GCTL_DSBLCLKGTNG;
+ /**
+ * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
+ * issue which would cause xHCI compliance tests to fail.
+ *
+ * Because of that we cannot enable clock gating on such
+ * configurations.
+ *
+ * Refers to:
+ *
+ * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based
+ * SOF/ITP Mode Used
+ */
+ if ((dwc->dr_mode == USB_DR_MODE_HOST ||
+ dwc->dr_mode == USB_DR_MODE_OTG) &&
+ (dwc->revision >= DWC3_REVISION_210A &&
+ dwc->revision <= DWC3_REVISION_250A))
+ reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
+ else
+ reg &= ~DWC3_GCTL_DSBLCLKGTNG;
+ break;
+ case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
+ /* enable hibernation here */
+ dwc->nr_scratch = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4);
break;
default:
dev_dbg(dwc->dev, "No power optimization available\n");
@@ -333,16 +454,36 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+ ret = dwc3_alloc_scratch_buffers(dwc);
+ if (ret)
+ goto err1;
+
+ ret = dwc3_setup_scratch_buffers(dwc);
+ if (ret)
+ goto err2;
+
return 0;
+err2:
+ dwc3_free_scratch_buffers(dwc);
+
+err1:
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
err0:
return ret;
}
static void dwc3_core_exit(struct dwc3 *dwc)
{
+ dwc3_free_scratch_buffers(dwc);
usb_phy_shutdown(dwc->usb2_phy);
usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
}
#define DWC3_ALIGN_MASK (16 - 1)
@@ -411,32 +552,52 @@ static int dwc3_probe(struct platform_device *pdev)
if (IS_ERR(dwc->usb2_phy)) {
ret = PTR_ERR(dwc->usb2_phy);
-
- /*
- * if -ENXIO is returned, it means PHY layer wasn't
- * enabled, so it makes no sense to return -EPROBE_DEFER
- * in that case, since no PHY driver will ever probe.
- */
- if (ret == -ENXIO)
+ if (ret == -ENXIO || ret == -ENODEV) {
+ dwc->usb2_phy = NULL;
+ } else if (ret == -EPROBE_DEFER) {
return ret;
-
- dev_err(dev, "no usb2 phy configured\n");
- return -EPROBE_DEFER;
+ } else {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
}
if (IS_ERR(dwc->usb3_phy)) {
ret = PTR_ERR(dwc->usb3_phy);
+ if (ret == -ENXIO || ret == -ENODEV) {
+ dwc->usb3_phy = NULL;
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ } else {
+ dev_err(dev, "no usb3 phy configured\n");
+ return ret;
+ }
+ }
- /*
- * if -ENXIO is returned, it means PHY layer wasn't
- * enabled, so it makes no sense to return -EPROBE_DEFER
- * in that case, since no PHY driver will ever probe.
- */
- if (ret == -ENXIO)
+ dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
+ if (IS_ERR(dwc->usb2_generic_phy)) {
+ ret = PTR_ERR(dwc->usb2_generic_phy);
+ if (ret == -ENOSYS || ret == -ENODEV) {
+ dwc->usb2_generic_phy = NULL;
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ } else {
+ dev_err(dev, "no usb2 phy configured\n");
return ret;
+ }
+ }
- dev_err(dev, "no usb3 phy configured\n");
- return -EPROBE_DEFER;
+ dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
+ if (IS_ERR(dwc->usb3_generic_phy)) {
+ ret = PTR_ERR(dwc->usb3_generic_phy);
+ if (ret == -ENOSYS || ret == -ENODEV) {
+ dwc->usb3_generic_phy = NULL;
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ } else {
+ dev_err(dev, "no usb3 phy configured\n");
+ return ret;
+ }
}
dwc->xhci_resources[0].start = res->start;
@@ -479,6 +640,14 @@ static int dwc3_probe(struct platform_device *pdev)
goto err0;
}
+ if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
+ dwc->dr_mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
+ dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
+
+ if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
+ dwc->dr_mode = USB_DR_MODE_OTG;
+
ret = dwc3_core_init(dwc);
if (ret) {
dev_err(dev, "failed to initialize core\n");
@@ -487,21 +656,20 @@ static int dwc3_probe(struct platform_device *pdev)
usb_phy_set_suspend(dwc->usb2_phy, 0);
usb_phy_set_suspend(dwc->usb3_phy, 0);
+ ret = phy_power_on(dwc->usb2_generic_phy);
+ if (ret < 0)
+ goto err1;
+
+ ret = phy_power_on(dwc->usb3_generic_phy);
+ if (ret < 0)
+ goto err_usb2phy_power;
ret = dwc3_event_buffers_setup(dwc);
if (ret) {
dev_err(dwc->dev, "failed to setup event buffers\n");
- goto err1;
+ goto err_usb3phy_power;
}
- if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
- dwc->dr_mode = USB_DR_MODE_HOST;
- else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
- dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
-
- if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
- dwc->dr_mode = USB_DR_MODE_OTG;
-
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
@@ -568,6 +736,12 @@ err3:
err2:
dwc3_event_buffers_cleanup(dwc);
+err_usb3phy_power:
+ phy_power_off(dwc->usb3_generic_phy);
+
+err_usb2phy_power:
+ phy_power_off(dwc->usb2_generic_phy);
+
err1:
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
@@ -585,6 +759,8 @@ static int dwc3_remove(struct platform_device *pdev)
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -682,6 +858,8 @@ static int dwc3_suspend(struct device *dev)
usb_phy_shutdown(dwc->usb3_phy);
usb_phy_shutdown(dwc->usb2_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
return 0;
}
@@ -690,9 +868,17 @@ static int dwc3_resume(struct device *dev)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
unsigned long flags;
+ int ret;
usb_phy_init(dwc->usb3_phy);
usb_phy_init(dwc->usb2_phy);
+ ret = phy_init(dwc->usb2_generic_phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_init(dwc->usb3_generic_phy);
+ if (ret < 0)
+ goto err_usb2phy_init;
spin_lock_irqsave(&dwc->lock, flags);
@@ -716,6 +902,11 @@ static int dwc3_resume(struct device *dev)
pm_runtime_enable(dev);
return 0;
+
+err_usb2phy_init:
+ phy_exit(dwc->usb2_generic_phy);
+
+ return ret;
}
static const struct dev_pm_ops dwc3_dev_pm_ops = {
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index f8af8d44af85..57332e3768e4 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -31,11 +31,14 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
+#include <linux/phy/phy.h>
+
/* Global constants */
#define DWC3_EP0_BOUNCE_SIZE 512
#define DWC3_ENDPOINTS_NUM 32
#define DWC3_XHCI_RESOURCES_NUM 2
+#define DWC3_SCRATCHBUF_SIZE 4096 /* each buffer is assumed to be 4KiB */
#define DWC3_EVENT_SIZE 4 /* bytes */
#define DWC3_EVENT_MAX_NUM 64 /* 2 events/endpoint */
#define DWC3_EVENT_BUFFERS_SIZE (DWC3_EVENT_SIZE * DWC3_EVENT_MAX_NUM)
@@ -157,6 +160,7 @@
#define DWC3_GCTL_PRTCAP_OTG 3
#define DWC3_GCTL_CORESOFTRESET (1 << 11)
+#define DWC3_GCTL_SOFITPSYNC (1 << 10)
#define DWC3_GCTL_SCALEDOWN(n) ((n) << 4)
#define DWC3_GCTL_SCALEDOWN_MASK DWC3_GCTL_SCALEDOWN(3)
#define DWC3_GCTL_DISSCRAMBLE (1 << 3)
@@ -318,7 +322,7 @@
/* Device Endpoint Command Register */
#define DWC3_DEPCMD_PARAM_SHIFT 16
#define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT)
-#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
+#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
#define DWC3_DEPCMD_STATUS(x) (((x) >> 15) & 1)
#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
#define DWC3_DEPCMD_CMDACT (1 << 10)
@@ -393,6 +397,7 @@ struct dwc3_event_buffer {
* @busy_slot: first slot which is owned by HW
* @desc: usb_endpoint_descriptor pointer
* @dwc: pointer to DWC controller
+ * @saved_state: ep state saved during hibernation
* @flags: endpoint flags (wedged, stalled, ...)
* @current_trb: index of current used trb
* @number: endpoint number (1 - 15)
@@ -415,6 +420,7 @@ struct dwc3_ep {
const struct usb_ss_ep_comp_descriptor *comp_desc;
struct dwc3 *dwc;
+ u32 saved_state;
unsigned flags;
#define DWC3_EP_ENABLED (1 << 0)
#define DWC3_EP_STALL (1 << 1)
@@ -598,6 +604,7 @@ struct dwc3_scratchpad_array {
* @ep0_trb: dma address of ep0_trb
* @ep0_usb_req: dummy req used while handling STD USB requests
* @ep0_bounce_addr: dma address of ep0_bounce
+ * @scratch_addr: dma address of scratchbuf
* @lock: for synchronizing
* @dev: pointer to our struct device
* @xhci: pointer to our xHCI child
@@ -606,6 +613,7 @@ struct dwc3_scratchpad_array {
* @gadget_driver: pointer to the gadget driver
* @regs: base address for our registers
* @regs_size: address space size
+ * @nr_scratch: number of scratch buffers
* @num_event_buffers: calculated number of event buffers
* @u1u2: only used on revisions <1.83a for workaround
* @maximum_speed: maximum speed requested (mainly for testing purposes)
@@ -613,16 +621,10 @@ struct dwc3_scratchpad_array {
* @dr_mode: requested mode of operation
* @usb2_phy: pointer to USB2 PHY
* @usb3_phy: pointer to USB3 PHY
+ * @usb2_generic_phy: pointer to USB2 PHY
+ * @usb3_generic_phy: pointer to USB3 PHY
* @dcfg: saved contents of DCFG register
* @gctl: saved contents of GCTL register
- * @is_selfpowered: true when we are selfpowered
- * @three_stage_setup: set if we perform a three phase setup
- * @ep0_bounced: true when we used bounce buffer
- * @ep0_expect_in: true when we expect a DATA IN transfer
- * @start_config_issued: true when StartConfig command has been issued
- * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
- * @needs_fifo_resize: not all users might want fifo resizing, flag it
- * @resize_fifos: tells us it's ok to reconfigure our TxFIFO sizes.
* @isoch_delay: wValue from Set Isochronous Delay request;
* @u2sel: parameter from Set SEL request.
* @u2pel: parameter from Set SEL request.
@@ -637,15 +639,31 @@ struct dwc3_scratchpad_array {
* @mem: points to start of memory which is used for this struct.
* @hwparams: copy of hwparams registers
* @root: debugfs root folder pointer
+ * @regset: debugfs pointer to regdump file
+ * @test_mode: true when we're entering a USB test mode
+ * @test_mode_nr: test feature selector
+ * @delayed_status: true when gadget driver asks for delayed status
+ * @ep0_bounced: true when we used bounce buffer
+ * @ep0_expect_in: true when we expect a DATA IN transfer
+ * @has_hibernation: true when dwc3 was configured with Hibernation
+ * @is_selfpowered: true when we are selfpowered
+ * @needs_fifo_resize: not all users might want fifo resizing, flag it
+ * @pullups_connected: true when Run/Stop bit is set
+ * @resize_fifos: tells us it's ok to reconfigure our TxFIFO sizes.
+ * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
+ * @start_config_issued: true when StartConfig command has been issued
+ * @three_stage_setup: set if we perform a three phase setup
*/
struct dwc3 {
struct usb_ctrlrequest *ctrl_req;
struct dwc3_trb *ep0_trb;
void *ep0_bounce;
+ void *scratchbuf;
u8 *setup_buf;
dma_addr_t ctrl_req_addr;
dma_addr_t ep0_trb_addr;
dma_addr_t ep0_bounce_addr;
+ dma_addr_t scratch_addr;
struct dwc3_request ep0_usb_req;
/* device lock */
@@ -665,6 +683,9 @@ struct dwc3 {
struct usb_phy *usb2_phy;
struct usb_phy *usb3_phy;
+ struct phy *usb2_generic_phy;
+ struct phy *usb3_generic_phy;
+
void __iomem *regs;
size_t regs_size;
@@ -674,6 +695,7 @@ struct dwc3 {
u32 dcfg;
u32 gctl;
+ u32 nr_scratch;
u32 num_event_buffers;
u32 u1u2;
u32 maximum_speed;
@@ -695,17 +717,9 @@ struct dwc3 {
#define DWC3_REVISION_230A 0x5533230a
#define DWC3_REVISION_240A 0x5533240a
#define DWC3_REVISION_250A 0x5533250a
-
- unsigned is_selfpowered:1;
- unsigned three_stage_setup:1;
- unsigned ep0_bounced:1;
- unsigned ep0_expect_in:1;
- unsigned start_config_issued:1;
- unsigned setup_packet_pending:1;
- unsigned delayed_status:1;
- unsigned needs_fifo_resize:1;
- unsigned resize_fifos:1;
- unsigned pullups_connected:1;
+#define DWC3_REVISION_260A 0x5533260a
+#define DWC3_REVISION_270A 0x5533270a
+#define DWC3_REVISION_280A 0x5533280a
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
@@ -730,6 +744,18 @@ struct dwc3 {
u8 test_mode;
u8 test_mode_nr;
+
+ unsigned delayed_status:1;
+ unsigned ep0_bounced:1;
+ unsigned ep0_expect_in:1;
+ unsigned has_hibernation:1;
+ unsigned is_selfpowered:1;
+ unsigned needs_fifo_resize:1;
+ unsigned pullups_connected:1;
+ unsigned resize_fifos:1;
+ unsigned setup_packet_pending:1;
+ unsigned start_config_issued:1;
+ unsigned three_stage_setup:1;
};
/* -------------------------------------------------------------------------- */
@@ -815,15 +841,15 @@ struct dwc3_event_depevt {
* 12 - VndrDevTstRcved
* @reserved15_12: Reserved, not used
* @event_info: Information about this event
- * @reserved31_24: Reserved, not used
+ * @reserved31_25: Reserved, not used
*/
struct dwc3_event_devt {
u32 one_bit:1;
u32 device_event:7;
u32 type:4;
u32 reserved15_12:4;
- u32 event_info:8;
- u32 reserved31_24:8;
+ u32 event_info:9;
+ u32 reserved31_25:7;
} __packed;
/**
@@ -856,6 +882,19 @@ union dwc3_event {
struct dwc3_event_gevt gevt;
};
+/**
+ * struct dwc3_gadget_ep_cmd_params - representation of endpoint command
+ * parameters
+ * @param2: third parameter
+ * @param1: second parameter
+ * @param0: first parameter
+ */
+struct dwc3_gadget_ep_cmd_params {
+ u32 param2;
+ u32 param1;
+ u32 param0;
+};
+
/*
* DWC3 Features to be used as Driver Data
*/
@@ -881,11 +920,31 @@ static inline void dwc3_host_exit(struct dwc3 *dwc)
#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_gadget_init(struct dwc3 *dwc);
void dwc3_gadget_exit(struct dwc3 *dwc);
+int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
+int dwc3_gadget_get_link_state(struct dwc3 *dwc);
+int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
+int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+ unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
+int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
{ return 0; }
static inline void dwc3_gadget_exit(struct dwc3 *dwc)
{ }
+static inline int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
+{ return 0; }
+static inline int dwc3_gadget_get_link_state(struct dwc3 *dwc)
+{ return 0; }
+static inline int dwc3_gadget_set_link_state(struct dwc3 *dwc,
+ enum dwc3_link_state state)
+{ return 0; }
+
+static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+ unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
+{ return 0; }
+static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
+ int cmd, u32 param)
+{ return 0; }
#endif
/* power management interface */
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index b269dbd47fc4..1160ff41bed4 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -29,7 +29,6 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/extcon.h>
-#include <linux/extcon/of_extcon.h>
#include <linux/regulator/consumer.h>
#include <linux/usb/otg.h>
@@ -424,11 +423,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "missing memory base resource\n");
- return -EINVAL;
- }
-
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -522,7 +516,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
dwc3_omap_enable_irqs(omap);
if (of_property_read_bool(node, "extcon")) {
- edev = of_extcon_get_extcon_dev(dev, 0);
+ edev = extcon_get_edev_by_phandle(dev, 0);
if (IS_ERR(edev)) {
dev_vdbg(dev, "couldn't get extcon device\n");
ret = -EPROBE_DEFER;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 2da0a5a2803a..a740eac74d56 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -68,6 +68,22 @@ int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
}
/**
+ * dwc3_gadget_get_link_state - Gets current state of USB Link
+ * @dwc: pointer to our context structure
+ *
+ * Caller should take care of locking. This function will
+ * return the link state on success (>= 0) or -ETIMEDOUT.
+ */
+int dwc3_gadget_get_link_state(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+
+ return DWC3_DSTS_USBLNKST(reg);
+}
+
+/**
* dwc3_gadget_set_link_state - Sets USB Link to a particular State
* @dwc: pointer to our context structure
* @state: the state to put link into
@@ -417,7 +433,7 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
const struct usb_endpoint_descriptor *desc,
const struct usb_ss_ep_comp_descriptor *comp_desc,
- bool ignore)
+ bool ignore, bool restore)
{
struct dwc3_gadget_ep_cmd_params params;
@@ -436,6 +452,11 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
if (ignore)
params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
+ if (restore) {
+ params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
+ params.param2 |= dep->saved_state;
+ }
+
params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
| DWC3_DEPCFG_XFER_NOT_READY_EN;
@@ -494,7 +515,7 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
const struct usb_endpoint_descriptor *desc,
const struct usb_ss_ep_comp_descriptor *comp_desc,
- bool ignore)
+ bool ignore, bool restore)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
@@ -508,7 +529,8 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return ret;
}
- ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
+ ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
+ restore);
if (ret)
return ret;
@@ -548,13 +570,13 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return 0;
}
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
+static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
if (!list_empty(&dep->req_queued)) {
- dwc3_stop_active_transfer(dwc, dep->number);
+ dwc3_stop_active_transfer(dwc, dep->number, true);
/* - giveback all requests to gadget driver */
while (!list_empty(&dep->req_queued)) {
@@ -659,7 +681,7 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
}
spin_lock_irqsave(&dwc->lock, flags);
- ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false);
+ ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -771,9 +793,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
else
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
-
- if (!req->request.no_interrupt && !chain)
- trb->ctrl |= DWC3_TRB_CTRL_IOC;
break;
case USB_ENDPOINT_XFER_BULK:
@@ -788,6 +807,9 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
BUG();
}
+ if (!req->request.no_interrupt && !chain)
+ trb->ctrl |= DWC3_TRB_CTRL_IOC;
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
trb->ctrl |= DWC3_TRB_CTRL_CSP;
@@ -1077,7 +1099,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
if (list_empty(&dep->req_queued)) {
- dwc3_stop_active_transfer(dwc, dep->number);
+ dwc3_stop_active_transfer(dwc, dep->number, true);
dep->flags = DWC3_EP_ENABLED;
}
return 0;
@@ -1107,6 +1129,23 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
return ret;
}
+ /*
+ * 4. Stream Capable Bulk Endpoints. We need to start the transfer
+ * right away, otherwise host will not know we have streams to be
+ * handled.
+ */
+ if (dep->stream_capable) {
+ int ret;
+
+ ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+ if (ret && ret != -EBUSY) {
+ struct dwc3 *dwc = dep->dwc;
+
+ dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
+ dep->name);
+ }
+ }
+
return 0;
}
@@ -1163,7 +1202,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
}
if (r == req) {
/* wait until it is processed */
- dwc3_stop_active_transfer(dwc, dep->number);
+ dwc3_stop_active_transfer(dwc, dep->number, true);
goto out1;
}
dev_err(dwc->dev, "request %p was not queued to %s\n",
@@ -1194,8 +1233,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
DWC3_DEPCMD_SETSTALL, &params);
if (ret)
- dev_err(dwc->dev, "failed to %s STALL on %s\n",
- value ? "set" : "clear",
+ dev_err(dwc->dev, "failed to set STALL on %s\n",
dep->name);
else
dep->flags |= DWC3_EP_STALL;
@@ -1203,8 +1241,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
DWC3_DEPCMD_CLEARSTALL, &params);
if (ret)
- dev_err(dwc->dev, "failed to %s STALL on %s\n",
- value ? "set" : "clear",
+ dev_err(dwc->dev, "failed to clear STALL on %s\n",
dep->name);
else
dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
@@ -1387,7 +1424,7 @@ static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
return 0;
}
-static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg;
u32 timeout = 500;
@@ -1402,9 +1439,17 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
if (dwc->revision >= DWC3_REVISION_194A)
reg &= ~DWC3_DCTL_KEEP_CONNECT;
reg |= DWC3_DCTL_RUN_STOP;
+
+ if (dwc->has_hibernation)
+ reg |= DWC3_DCTL_KEEP_CONNECT;
+
dwc->pullups_connected = true;
} else {
reg &= ~DWC3_DCTL_RUN_STOP;
+
+ if (dwc->has_hibernation && !suspend)
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+
dwc->pullups_connected = false;
}
@@ -1442,7 +1487,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
is_on = !!is_on;
spin_lock_irqsave(&dwc->lock, flags);
- ret = dwc3_gadget_run_stop(dwc, is_on);
+ ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -1549,14 +1594,16 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
+ false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err2;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
+ false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err3;
@@ -1849,15 +1896,12 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
*/
dep->flags = DWC3_EP_PENDING_REQUEST;
} else {
- dwc3_stop_active_transfer(dwc, dep->number);
+ dwc3_stop_active_transfer(dwc, dep->number, true);
dep->flags = DWC3_EP_ENABLED;
}
return 1;
}
- if ((event->status & DEPEVT_STATUS_IOC) &&
- (trb->ctrl & DWC3_TRB_CTRL_IOC))
- return 0;
return 1;
}
@@ -1999,7 +2043,25 @@ static void dwc3_disconnect_gadget(struct dwc3 *dwc)
}
}
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
+static void dwc3_suspend_gadget(struct dwc3 *dwc)
+{
+ if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
+ spin_unlock(&dwc->lock);
+ dwc->gadget_driver->suspend(&dwc->gadget);
+ spin_lock(&dwc->lock);
+ }
+}
+
+static void dwc3_resume_gadget(struct dwc3 *dwc)
+{
+ if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+ spin_unlock(&dwc->lock);
+ dwc->gadget_driver->resume(&dwc->gadget);
+ spin_lock(&dwc->lock);
+ }
+}
+
+static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
{
struct dwc3_ep *dep;
struct dwc3_gadget_ep_cmd_params params;
@@ -2031,7 +2093,8 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
*/
cmd = DWC3_DEPCMD_ENDTRANSFER;
- cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
+ cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
+ cmd |= DWC3_DEPCMD_CMDIOC;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(&params, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
@@ -2260,17 +2323,23 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
reg |= DWC3_DCTL_HIRD_THRES(12);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ } else {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
}
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
+ false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
+ false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
@@ -2378,9 +2447,50 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
dwc->link_state = next;
+ switch (next) {
+ case DWC3_LINK_STATE_U1:
+ if (dwc->speed == USB_SPEED_SUPER)
+ dwc3_suspend_gadget(dwc);
+ break;
+ case DWC3_LINK_STATE_U2:
+ case DWC3_LINK_STATE_U3:
+ dwc3_suspend_gadget(dwc);
+ break;
+ case DWC3_LINK_STATE_RESUME:
+ dwc3_resume_gadget(dwc);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
}
+static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
+ unsigned int evtinfo)
+{
+ unsigned int is_ss = evtinfo & BIT(4);
+
+ /**
+ * WORKAROUND: DWC3 revison 2.20a with hibernation support
+ * have a known issue which can cause USB CV TD.9.23 to fail
+ * randomly.
+ *
+ * Because of this issue, core could generate bogus hibernation
+ * events which SW needs to ignore.
+ *
+ * Refers to:
+ *
+ * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
+ * Device Fallback from SuperSpeed
+ */
+ if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
+ return;
+
+ /* enter hibernation here */
+}
+
static void dwc3_gadget_interrupt(struct dwc3 *dwc,
const struct dwc3_event_devt *event)
{
@@ -2397,6 +2507,13 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
case DWC3_DEVICE_EVENT_WAKEUP:
dwc3_gadget_wakeup_interrupt(dwc);
break;
+ case DWC3_DEVICE_EVENT_HIBER_REQ:
+ if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
+ "unexpected hibernation event\n"))
+ break;
+
+ dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
+ break;
case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
break;
@@ -2661,8 +2778,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
int dwc3_gadget_prepare(struct dwc3 *dwc)
{
- if (dwc->pullups_connected)
+ if (dwc->pullups_connected) {
dwc3_gadget_disable_irq(dwc);
+ dwc3_gadget_run_stop(dwc, true, true);
+ }
return 0;
}
@@ -2671,7 +2790,7 @@ void dwc3_gadget_complete(struct dwc3 *dwc)
{
if (dwc->pullups_connected) {
dwc3_gadget_enable_irq(dwc);
- dwc3_gadget_run_stop(dwc, true);
+ dwc3_gadget_run_stop(dwc, true, false);
}
}
@@ -2694,12 +2813,14 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
+ false);
if (ret)
goto err0;
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
+ false);
if (ret)
goto err1;
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index febe1aa7b714..a0ee75b68a80 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -56,12 +56,6 @@ struct dwc3;
/* DEPXFERCFG parameter 0 */
#define DWC3_DEPXFERCFG_NUM_XFER_RES(n) ((n) & 0xffff)
-struct dwc3_gadget_ep_cmd_params {
- u32 param2;
- u32 param1;
- u32 param0;
-};
-
/* -------------------------------------------------------------------------- */
#define to_dwc3_request(r) (container_of(r, struct dwc3_request, request))
@@ -85,9 +79,6 @@ static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status);
-int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
-int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
-
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event);
void dwc3_ep0_out_start(struct dwc3 *dwc);
@@ -95,9 +86,6 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags);
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);
-int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
- unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
-int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param);
/**
* dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 8154165aa601..3557c7e5040d 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -226,7 +226,7 @@ config USB_GR_UDC
config USB_OMAP
tristate "OMAP USB Device Controller"
depends on ARCH_OMAP1
- select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_H4_OTG
+ select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
help
Many Texas Instruments OMAP processors have flexible full
speed USB device controllers, with support for up to 30
@@ -301,7 +301,6 @@ config USB_PXA27X
gadget drivers to also be dynamically linked.
config USB_S3C_HSOTG
- depends on ARM
tristate "Designware/S3C HS/OtG USB Device controller"
help
The Designware USB2.0 high-speed gadget controller
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index cea8c20a1425..f605ad8c1902 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1758,15 +1758,15 @@ static int at91udc_probe(struct platform_device *pdev)
/* newer chips have more FIFO memory than rm9200 */
if (cpu_is_at91sam9260() || cpu_is_at91sam9g20()) {
- usb_ep_set_maxpacket_limit(&udc->ep[0].ep, 64);
- usb_ep_set_maxpacket_limit(&udc->ep[3].ep, 64);
- usb_ep_set_maxpacket_limit(&udc->ep[4].ep, 512);
- usb_ep_set_maxpacket_limit(&udc->ep[5].ep, 512);
+ udc->ep[0].maxpacket = 64;
+ udc->ep[3].maxpacket = 64;
+ udc->ep[4].maxpacket = 512;
+ udc->ep[5].maxpacket = 512;
} else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
- usb_ep_set_maxpacket_limit(&udc->ep[3].ep, 64);
+ udc->ep[3].maxpacket = 64;
} else if (cpu_is_at91sam9263()) {
- usb_ep_set_maxpacket_limit(&udc->ep[0].ep, 64);
- usb_ep_set_maxpacket_limit(&udc->ep[3].ep, 64);
+ udc->ep[0].maxpacket = 64;
+ udc->ep[3].maxpacket = 64;
}
udc->udp_baseaddr = ioremap(res->start, resource_size(res));
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 52771d4c44bc..9f65324f9ae0 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -1661,7 +1661,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
if (dma_status) {
int i;
- for (i = 1; i < USBA_NR_ENDPOINTS; i++)
+ for (i = 1; i < USBA_NR_DMAS; i++)
if (dma_status & (1 << i))
usba_dma_irq(udc, &udc->usba_ep[i]);
}
@@ -1670,7 +1670,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
if (ep_status) {
int i;
- for (i = 0; i < USBA_NR_ENDPOINTS; i++)
+ for (i = 0; i < udc->num_ep; i++)
if (ep_status & (1 << i)) {
if (ep_is_control(&udc->usba_ep[i]))
usba_control_irq(udc, &udc->usba_ep[i]);
@@ -1827,12 +1827,12 @@ static int atmel_usba_stop(struct usb_gadget *gadget,
toggle_bias(0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
- udc->driver = NULL;
-
clk_disable_unprepare(udc->hclk);
clk_disable_unprepare(udc->pclk);
- DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
+ DBG(DBG_GADGET, "unregistered driver `%s'\n", udc->driver->driver.name);
+
+ udc->driver = NULL;
return 0;
}
@@ -1914,6 +1914,12 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
i++;
}
+ if (i == 0) {
+ dev_err(&pdev->dev, "of_probe: no endpoint specified\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
return eps;
err:
return ERR_PTR(ret);
diff --git a/drivers/usb/gadget/atmel_usba_udc.h b/drivers/usb/gadget/atmel_usba_udc.h
index 2922db50befe..a70706e8cb02 100644
--- a/drivers/usb/gadget/atmel_usba_udc.h
+++ b/drivers/usb/gadget/atmel_usba_udc.h
@@ -210,7 +210,7 @@
#define USBA_FIFO_BASE(x) ((x) << 16)
/* Synth parameters */
-#define USBA_NR_ENDPOINTS 7
+#define USBA_NR_DMAS 7
#define EP0_FIFO_SIZE 64
#define EP0_EPT_SIZE USBA_EPT_SIZE_64
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d742bed7a5fa..fab906429b80 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1139,7 +1139,7 @@ struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev,
uc = copy_gadget_strings(sp, n_gstrings, n_strings);
if (IS_ERR(uc))
- return ERR_PTR(PTR_ERR(uc));
+ return ERR_CAST(uc);
n_gs = get_containers_gs(uc);
ret = usb_string_ids_tab(cdev, n_gs[0]->strings);
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 2b4334394076..2e164dca08e8 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -28,6 +28,10 @@
#include <linux/usb/composite.h>
#include <linux/usb/functionfs.h>
+#include <linux/aio.h>
+#include <linux/mmu_context.h>
+#include <linux/poll.h>
+
#include "u_fs.h"
#include "configfs.h"
@@ -99,6 +103,14 @@ static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
}
+static inline enum ffs_setup_state
+ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
+{
+ return (enum ffs_setup_state)
+ cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
+}
+
+
static void ffs_func_eps_disable(struct ffs_function *func);
static int __must_check ffs_func_eps_enable(struct ffs_function *func);
@@ -122,8 +134,8 @@ struct ffs_ep {
struct usb_ep *ep; /* P: ffs->eps_lock */
struct usb_request *req; /* P: epfile->mutex */
- /* [0]: full speed, [1]: high speed */
- struct usb_endpoint_descriptor *descs[2];
+ /* [0]: full speed, [1]: high speed, [2]: super speed */
+ struct usb_endpoint_descriptor *descs[3];
u8 num;
@@ -148,6 +160,25 @@ struct ffs_epfile {
unsigned char _pad;
};
+/* ffs_io_data structure ***************************************************/
+
+struct ffs_io_data {
+ bool aio;
+ bool read;
+
+ struct kiocb *kiocb;
+ const struct iovec *iovec;
+ unsigned long nr_segs;
+ char __user *buf;
+ size_t len;
+
+ struct mm_struct *mm;
+ struct work_struct work;
+
+ struct usb_ep *ep;
+ struct usb_request *req;
+};
+
static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
@@ -161,8 +192,10 @@ ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
DEFINE_MUTEX(ffs_lock);
EXPORT_SYMBOL(ffs_lock);
-static struct ffs_dev *ffs_find_dev(const char *name);
+static struct ffs_dev *_ffs_find_dev(const char *name);
+static struct ffs_dev *_ffs_alloc_dev(void);
static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
+static void _ffs_free_dev(struct ffs_dev *dev);
static void *ffs_acquire_dev(const char *dev_name);
static void ffs_release_dev(struct ffs_data *ffs_data);
static int ffs_ready(struct ffs_data *ffs);
@@ -218,7 +251,7 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
}
ffs->setup_state = FFS_NO_SETUP;
- return ffs->ep0req_status;
+ return req->status ? req->status : req->actual;
}
static int __ffs_ep0_stall(struct ffs_data *ffs)
@@ -244,7 +277,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
ENTER();
/* Fast check if setup was canceled */
- if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
+ if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
/* Acquire mutex */
@@ -310,8 +343,8 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
* rather then _irqsave
*/
spin_lock_irq(&ffs->ev.waitq.lock);
- switch (FFS_SETUP_STATE(ffs)) {
- case FFS_SETUP_CANCELED:
+ switch (ffs_setup_state_clear_cancelled(ffs)) {
+ case FFS_SETUP_CANCELLED:
ret = -EIDRM;
goto done_spin;
@@ -346,7 +379,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
/*
* We are guaranteed to be still in FFS_ACTIVE state
* but the state of setup could have changed from
- * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need
+ * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
* to check for that. If that happened we copied data
* from user space in vain but it's unlikely.
*
@@ -355,7 +388,8 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
* transition can be performed and it's protected by
* mutex.
*/
- if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
+ if (ffs_setup_state_clear_cancelled(ffs) ==
+ FFS_SETUP_CANCELLED) {
ret = -EIDRM;
done_spin:
spin_unlock_irq(&ffs->ev.waitq.lock);
@@ -421,7 +455,7 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
ENTER();
/* Fast check if setup was canceled */
- if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
+ if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
/* Acquire mutex */
@@ -441,8 +475,8 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
*/
spin_lock_irq(&ffs->ev.waitq.lock);
- switch (FFS_SETUP_STATE(ffs)) {
- case FFS_SETUP_CANCELED:
+ switch (ffs_setup_state_clear_cancelled(ffs)) {
+ case FFS_SETUP_CANCELLED:
ret = -EIDRM;
break;
@@ -489,7 +523,8 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
spin_lock_irq(&ffs->ev.waitq.lock);
/* See ffs_ep0_write() */
- if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
+ if (ffs_setup_state_clear_cancelled(ffs) ==
+ FFS_SETUP_CANCELLED) {
ret = -EIDRM;
break;
}
@@ -558,6 +593,45 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
return ret;
}
+static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
+{
+ struct ffs_data *ffs = file->private_data;
+ unsigned int mask = POLLWRNORM;
+ int ret;
+
+ poll_wait(file, &ffs->ev.waitq, wait);
+
+ ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+ if (unlikely(ret < 0))
+ return mask;
+
+ switch (ffs->state) {
+ case FFS_READ_DESCRIPTORS:
+ case FFS_READ_STRINGS:
+ mask |= POLLOUT;
+ break;
+
+ case FFS_ACTIVE:
+ switch (ffs->setup_state) {
+ case FFS_NO_SETUP:
+ if (ffs->ev.count)
+ mask |= POLLIN;
+ break;
+
+ case FFS_SETUP_PENDING:
+ case FFS_SETUP_CANCELLED:
+ mask |= (POLLIN | POLLOUT);
+ break;
+ }
+ case FFS_CLOSING:
+ break;
+ }
+
+ mutex_unlock(&ffs->mutex);
+
+ return mask;
+}
+
static const struct file_operations ffs_ep0_operations = {
.llseek = no_llseek,
@@ -566,6 +640,7 @@ static const struct file_operations ffs_ep0_operations = {
.read = ffs_ep0_read,
.release = ffs_ep0_release,
.unlocked_ioctl = ffs_ep0_ioctl,
+ .poll = ffs_ep0_poll,
};
@@ -581,8 +656,52 @@ static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
}
}
-static ssize_t ffs_epfile_io(struct file *file,
- char __user *buf, size_t len, int read)
+static void ffs_user_copy_worker(struct work_struct *work)
+{
+ struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
+ work);
+ int ret = io_data->req->status ? io_data->req->status :
+ io_data->req->actual;
+
+ if (io_data->read && ret > 0) {
+ int i;
+ size_t pos = 0;
+ use_mm(io_data->mm);
+ for (i = 0; i < io_data->nr_segs; i++) {
+ if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
+ &io_data->buf[pos],
+ io_data->iovec[i].iov_len))) {
+ ret = -EFAULT;
+ break;
+ }
+ pos += io_data->iovec[i].iov_len;
+ }
+ unuse_mm(io_data->mm);
+ }
+
+ aio_complete(io_data->kiocb, ret, ret);
+
+ usb_ep_free_request(io_data->ep, io_data->req);
+
+ io_data->kiocb->private = NULL;
+ if (io_data->read)
+ kfree(io_data->iovec);
+ kfree(io_data->buf);
+ kfree(io_data);
+}
+
+static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
+ struct usb_request *req)
+{
+ struct ffs_io_data *io_data = req->context;
+
+ ENTER();
+
+ INIT_WORK(&io_data->work, ffs_user_copy_worker);
+ schedule_work(&io_data->work);
+}
+
+static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
{
struct ffs_epfile *epfile = file->private_data;
struct ffs_ep *ep;
@@ -612,7 +731,7 @@ static ssize_t ffs_epfile_io(struct file *file,
}
/* Do we halt? */
- halt = !read == !epfile->in;
+ halt = (!io_data->read == !epfile->in);
if (halt && epfile->isoc) {
ret = -EINVAL;
goto error;
@@ -630,15 +749,32 @@ static ssize_t ffs_epfile_io(struct file *file,
* Controller may require buffer size to be aligned to
* maxpacketsize of an out endpoint.
*/
- data_len = read ? usb_ep_align_maybe(gadget, ep->ep, len) : len;
+ data_len = io_data->read ?
+ usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
+ io_data->len;
data = kmalloc(data_len, GFP_KERNEL);
if (unlikely(!data))
return -ENOMEM;
-
- if (!read && unlikely(copy_from_user(data, buf, len))) {
- ret = -EFAULT;
- goto error;
+ if (io_data->aio && !io_data->read) {
+ int i;
+ size_t pos = 0;
+ for (i = 0; i < io_data->nr_segs; i++) {
+ if (unlikely(copy_from_user(&data[pos],
+ io_data->iovec[i].iov_base,
+ io_data->iovec[i].iov_len))) {
+ ret = -EFAULT;
+ goto error;
+ }
+ pos += io_data->iovec[i].iov_len;
+ }
+ } else {
+ if (!io_data->read &&
+ unlikely(__copy_from_user(data, io_data->buf,
+ io_data->len))) {
+ ret = -EFAULT;
+ goto error;
+ }
}
}
@@ -661,40 +797,78 @@ static ssize_t ffs_epfile_io(struct file *file,
ret = -EBADMSG;
} else {
/* Fire the request */
- DECLARE_COMPLETION_ONSTACK(done);
+ struct usb_request *req;
- struct usb_request *req = ep->req;
- req->context = &done;
- req->complete = ffs_epfile_io_complete;
- req->buf = data;
- req->length = data_len;
+ if (io_data->aio) {
+ req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
+ if (unlikely(!req))
+ goto error_lock;
- ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+ req->buf = data;
+ req->length = io_data->len;
- spin_unlock_irq(&epfile->ffs->eps_lock);
+ io_data->buf = data;
+ io_data->ep = ep->ep;
+ io_data->req = req;
- if (unlikely(ret < 0)) {
- /* nop */
- } else if (unlikely(wait_for_completion_interruptible(&done))) {
- ret = -EINTR;
- usb_ep_dequeue(ep->ep, req);
+ req->context = io_data;
+ req->complete = ffs_epfile_async_io_complete;
+
+ ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+ if (unlikely(ret)) {
+ usb_ep_free_request(ep->ep, req);
+ goto error_lock;
+ }
+ ret = -EIOCBQUEUED;
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
} else {
- /*
- * XXX We may end up silently droping data here.
- * Since data_len (i.e. req->length) may be bigger
- * than len (after being rounded up to maxpacketsize),
- * we may end up with more data then user space has
- * space for.
- */
- ret = ep->status;
- if (read && ret > 0 &&
- unlikely(copy_to_user(buf, data,
- min_t(size_t, ret, len))))
- ret = -EFAULT;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ req = ep->req;
+ req->buf = data;
+ req->length = io_data->len;
+
+ req->context = &done;
+ req->complete = ffs_epfile_io_complete;
+
+ ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+
+ if (unlikely(ret < 0)) {
+ /* nop */
+ } else if (unlikely(
+ wait_for_completion_interruptible(&done))) {
+ ret = -EINTR;
+ usb_ep_dequeue(ep->ep, req);
+ } else {
+ /*
+ * XXX We may end up silently droping data
+ * here. Since data_len (i.e. req->length) may
+ * be bigger than len (after being rounded up
+ * to maxpacketsize), we may end up with more
+ * data then user space has space for.
+ */
+ ret = ep->status;
+ if (io_data->read && ret > 0) {
+ ret = min_t(size_t, ret, io_data->len);
+
+ if (unlikely(copy_to_user(io_data->buf,
+ data, ret)))
+ ret = -EFAULT;
+ }
+ }
+ kfree(data);
}
}
mutex_unlock(&epfile->mutex);
+ return ret;
+
+error_lock:
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ mutex_unlock(&epfile->mutex);
error:
kfree(data);
return ret;
@@ -704,17 +878,31 @@ static ssize_t
ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
loff_t *ptr)
{
+ struct ffs_io_data io_data;
+
ENTER();
- return ffs_epfile_io(file, (char __user *)buf, len, 0);
+ io_data.aio = false;
+ io_data.read = false;
+ io_data.buf = (char * __user)buf;
+ io_data.len = len;
+
+ return ffs_epfile_io(file, &io_data);
}
static ssize_t
ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
{
+ struct ffs_io_data io_data;
+
ENTER();
- return ffs_epfile_io(file, buf, len, 1);
+ io_data.aio = false;
+ io_data.read = true;
+ io_data.buf = buf;
+ io_data.len = len;
+
+ return ffs_epfile_io(file, &io_data);
}
static int
@@ -733,6 +921,89 @@ ffs_epfile_open(struct inode *inode, struct file *file)
return 0;
}
+static int ffs_aio_cancel(struct kiocb *kiocb)
+{
+ struct ffs_io_data *io_data = kiocb->private;
+ struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+ int value;
+
+ ENTER();
+
+ spin_lock_irq(&epfile->ffs->eps_lock);
+
+ if (likely(io_data && io_data->ep && io_data->req))
+ value = usb_ep_dequeue(io_data->ep, io_data->req);
+ else
+ value = -EINVAL;
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+
+ return value;
+}
+
+static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb,
+ const struct iovec *iovec,
+ unsigned long nr_segs, loff_t loff)
+{
+ struct ffs_io_data *io_data;
+
+ ENTER();
+
+ io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
+ if (unlikely(!io_data))
+ return -ENOMEM;
+
+ io_data->aio = true;
+ io_data->read = false;
+ io_data->kiocb = kiocb;
+ io_data->iovec = iovec;
+ io_data->nr_segs = nr_segs;
+ io_data->len = kiocb->ki_nbytes;
+ io_data->mm = current->mm;
+
+ kiocb->private = io_data;
+
+ kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+
+ return ffs_epfile_io(kiocb->ki_filp, io_data);
+}
+
+static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb,
+ const struct iovec *iovec,
+ unsigned long nr_segs, loff_t loff)
+{
+ struct ffs_io_data *io_data;
+ struct iovec *iovec_copy;
+
+ ENTER();
+
+ iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL);
+ if (unlikely(!iovec_copy))
+ return -ENOMEM;
+
+ memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
+
+ io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
+ if (unlikely(!io_data)) {
+ kfree(iovec_copy);
+ return -ENOMEM;
+ }
+
+ io_data->aio = true;
+ io_data->read = true;
+ io_data->kiocb = kiocb;
+ io_data->iovec = iovec_copy;
+ io_data->nr_segs = nr_segs;
+ io_data->len = kiocb->ki_nbytes;
+ io_data->mm = current->mm;
+
+ kiocb->private = io_data;
+
+ kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+
+ return ffs_epfile_io(kiocb->ki_filp, io_data);
+}
+
static int
ffs_epfile_release(struct inode *inode, struct file *file)
{
@@ -789,6 +1060,8 @@ static const struct file_operations ffs_epfile_operations = {
.open = ffs_epfile_open,
.write = ffs_epfile_write,
.read = ffs_epfile_read,
+ .aio_write = ffs_epfile_aio_write,
+ .aio_read = ffs_epfile_aio_read,
.release = ffs_epfile_release,
.unlocked_ioctl = ffs_epfile_ioctl,
};
@@ -1172,7 +1445,7 @@ static void ffs_data_clear(struct ffs_data *ffs)
if (ffs->epfiles)
ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
- kfree(ffs->raw_descs);
+ kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings);
kfree(ffs->stringtabs);
}
@@ -1184,14 +1457,15 @@ static void ffs_data_reset(struct ffs_data *ffs)
ffs_data_clear(ffs);
ffs->epfiles = NULL;
+ ffs->raw_descs_data = NULL;
ffs->raw_descs = NULL;
ffs->raw_strings = NULL;
ffs->stringtabs = NULL;
ffs->raw_descs_length = 0;
- ffs->raw_fs_descs_length = 0;
ffs->fs_descs_count = 0;
ffs->hs_descs_count = 0;
+ ffs->ss_descs_count = 0;
ffs->strings_count = 0;
ffs->interfaces_count = 0;
@@ -1334,7 +1608,24 @@ static int ffs_func_eps_enable(struct ffs_function *func)
spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
struct usb_endpoint_descriptor *ds;
- ds = ep->descs[ep->descs[1] ? 1 : 0];
+ int desc_idx;
+
+ if (ffs->gadget->speed == USB_SPEED_SUPER)
+ desc_idx = 2;
+ else if (ffs->gadget->speed == USB_SPEED_HIGH)
+ desc_idx = 1;
+ else
+ desc_idx = 0;
+
+ /* fall-back to lower speed if desc missing for current speed */
+ do {
+ ds = ep->descs[desc_idx];
+ } while (!ds && --desc_idx >= 0);
+
+ if (!ds) {
+ ret = -EINVAL;
+ break;
+ }
ep->ep->driver_data = ep;
ep->ep->desc = ds;
@@ -1469,6 +1760,12 @@ static int __must_check ffs_do_desc(char *data, unsigned len,
}
break;
+ case USB_DT_SS_ENDPOINT_COMP:
+ pr_vdebug("EP SS companion descriptor\n");
+ if (length != sizeof(struct usb_ss_ep_comp_descriptor))
+ goto inv_length;
+ break;
+
case USB_DT_OTHER_SPEED_CONFIG:
case USB_DT_INTERFACE_POWER:
case USB_DT_DEBUG:
@@ -1579,60 +1876,76 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
static int __ffs_data_got_descs(struct ffs_data *ffs,
char *const _data, size_t len)
{
- unsigned fs_count, hs_count;
- int fs_len, ret = -EINVAL;
- char *data = _data;
+ char *data = _data, *raw_descs;
+ unsigned counts[3], flags;
+ int ret = -EINVAL, i;
ENTER();
- if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_DESCRIPTORS_MAGIC ||
- get_unaligned_le32(data + 4) != len))
+ if (get_unaligned_le32(data + 4) != len)
goto error;
- fs_count = get_unaligned_le32(data + 8);
- hs_count = get_unaligned_le32(data + 12);
- if (!fs_count && !hs_count)
- goto einval;
-
- data += 16;
- len -= 16;
-
- if (likely(fs_count)) {
- fs_len = ffs_do_descs(fs_count, data, len,
- __ffs_data_do_entity, ffs);
- if (unlikely(fs_len < 0)) {
- ret = fs_len;
+ switch (get_unaligned_le32(data)) {
+ case FUNCTIONFS_DESCRIPTORS_MAGIC:
+ flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
+ data += 8;
+ len -= 8;
+ break;
+ case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
+ flags = get_unaligned_le32(data + 8);
+ if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
+ FUNCTIONFS_HAS_HS_DESC |
+ FUNCTIONFS_HAS_SS_DESC)) {
+ ret = -ENOSYS;
goto error;
}
+ data += 12;
+ len -= 12;
+ break;
+ default:
+ goto error;
+ }
- data += fs_len;
- len -= fs_len;
- } else {
- fs_len = 0;
+ /* Read fs_count, hs_count and ss_count (if present) */
+ for (i = 0; i < 3; ++i) {
+ if (!(flags & (1 << i))) {
+ counts[i] = 0;
+ } else if (len < 4) {
+ goto error;
+ } else {
+ counts[i] = get_unaligned_le32(data);
+ data += 4;
+ len -= 4;
+ }
}
- if (likely(hs_count)) {
- ret = ffs_do_descs(hs_count, data, len,
+ /* Read descriptors */
+ raw_descs = data;
+ for (i = 0; i < 3; ++i) {
+ if (!counts[i])
+ continue;
+ ret = ffs_do_descs(counts[i], data, len,
__ffs_data_do_entity, ffs);
- if (unlikely(ret < 0))
+ if (ret < 0)
goto error;
- } else {
- ret = 0;
+ data += ret;
+ len -= ret;
}
- if (unlikely(len != ret))
- goto einval;
+ if (raw_descs == data || len) {
+ ret = -EINVAL;
+ goto error;
+ }
- ffs->raw_fs_descs_length = fs_len;
- ffs->raw_descs_length = fs_len + ret;
- ffs->raw_descs = _data;
- ffs->fs_descs_count = fs_count;
- ffs->hs_descs_count = hs_count;
+ ffs->raw_descs_data = _data;
+ ffs->raw_descs = raw_descs;
+ ffs->raw_descs_length = data - raw_descs;
+ ffs->fs_descs_count = counts[0];
+ ffs->hs_descs_count = counts[1];
+ ffs->ss_descs_count = counts[2];
return 0;
-einval:
- ret = -EINVAL;
error:
kfree(_data);
return ret;
@@ -1789,7 +2102,7 @@ static void __ffs_event_add(struct ffs_data *ffs,
* the source does nothing.
*/
if (ffs->setup_state == FFS_SETUP_PENDING)
- ffs->setup_state = FFS_SETUP_CANCELED;
+ ffs->setup_state = FFS_SETUP_CANCELLED;
switch (type) {
case FUNCTIONFS_RESUME:
@@ -1850,21 +2163,28 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
struct usb_endpoint_descriptor *ds = (void *)desc;
struct ffs_function *func = priv;
struct ffs_ep *ffs_ep;
-
- /*
- * If hs_descriptors is not NULL then we are reading hs
- * descriptors now
- */
- const int isHS = func->function.hs_descriptors != NULL;
- unsigned idx;
+ unsigned ep_desc_id, idx;
+ static const char *speed_names[] = { "full", "high", "super" };
if (type != FFS_DESCRIPTOR)
return 0;
- if (isHS)
+ /*
+ * If ss_descriptors is not NULL, we are reading super speed
+ * descriptors; if hs_descriptors is not NULL, we are reading high
+ * speed descriptors; otherwise, we are reading full speed
+ * descriptors.
+ */
+ if (func->function.ss_descriptors) {
+ ep_desc_id = 2;
+ func->function.ss_descriptors[(long)valuep] = desc;
+ } else if (func->function.hs_descriptors) {
+ ep_desc_id = 1;
func->function.hs_descriptors[(long)valuep] = desc;
- else
+ } else {
+ ep_desc_id = 0;
func->function.fs_descriptors[(long)valuep] = desc;
+ }
if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
return 0;
@@ -1872,13 +2192,13 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
ffs_ep = func->eps + idx;
- if (unlikely(ffs_ep->descs[isHS])) {
- pr_vdebug("two %sspeed descriptors for EP %d\n",
- isHS ? "high" : "full",
+ if (unlikely(ffs_ep->descs[ep_desc_id])) {
+ pr_err("two %sspeed descriptors for EP %d\n",
+ speed_names[ep_desc_id],
ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
return -EINVAL;
}
- ffs_ep->descs[isHS] = ds;
+ ffs_ep->descs[ep_desc_id] = ds;
ffs_dump_mem(": Original ep desc", ds, ds->bLength);
if (ffs_ep->ep) {
@@ -2022,8 +2342,10 @@ static int _ffs_func_bind(struct usb_configuration *c,
const int full = !!func->ffs->fs_descs_count;
const int high = gadget_is_dualspeed(func->gadget) &&
func->ffs->hs_descs_count;
+ const int super = gadget_is_superspeed(func->gadget) &&
+ func->ffs->ss_descs_count;
- int ret;
+ int fs_len, hs_len, ret;
/* Make it a single chunk, less management later on */
vla_group(d);
@@ -2032,15 +2354,16 @@ static int _ffs_func_bind(struct usb_configuration *c,
full ? ffs->fs_descs_count + 1 : 0);
vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
high ? ffs->hs_descs_count + 1 : 0);
+ vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
+ super ? ffs->ss_descs_count + 1 : 0);
vla_item_with_sz(d, short, inums, ffs->interfaces_count);
- vla_item_with_sz(d, char, raw_descs,
- high ? ffs->raw_descs_length : ffs->raw_fs_descs_length);
+ vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
char *vlabuf;
ENTER();
- /* Only high speed but not supported by gadget? */
- if (unlikely(!(full | high)))
+ /* Has descriptors only for speeds gadget does not support */
+ if (unlikely(!(full | high | super)))
return -ENOTSUPP;
/* Allocate a single chunk, less management later on */
@@ -2050,8 +2373,10 @@ static int _ffs_func_bind(struct usb_configuration *c,
/* Zero */
memset(vla_ptr(vlabuf, d, eps), 0, d_eps__sz);
- memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs + 16,
- d_raw_descs__sz);
+ /* Copy descriptors */
+ memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
+ ffs->raw_descs_length);
+
memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
for (ret = ffs->eps_count; ret; --ret) {
struct ffs_ep *ptr;
@@ -2073,22 +2398,38 @@ static int _ffs_func_bind(struct usb_configuration *c,
*/
if (likely(full)) {
func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
- ret = ffs_do_descs(ffs->fs_descs_count,
- vla_ptr(vlabuf, d, raw_descs),
- d_raw_descs__sz,
- __ffs_func_bind_do_descs, func);
- if (unlikely(ret < 0))
+ fs_len = ffs_do_descs(ffs->fs_descs_count,
+ vla_ptr(vlabuf, d, raw_descs),
+ d_raw_descs__sz,
+ __ffs_func_bind_do_descs, func);
+ if (unlikely(fs_len < 0)) {
+ ret = fs_len;
goto error;
+ }
} else {
- ret = 0;
+ fs_len = 0;
}
if (likely(high)) {
func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
- ret = ffs_do_descs(ffs->hs_descs_count,
- vla_ptr(vlabuf, d, raw_descs) + ret,
- d_raw_descs__sz - ret,
- __ffs_func_bind_do_descs, func);
+ hs_len = ffs_do_descs(ffs->hs_descs_count,
+ vla_ptr(vlabuf, d, raw_descs) + fs_len,
+ d_raw_descs__sz - fs_len,
+ __ffs_func_bind_do_descs, func);
+ if (unlikely(hs_len < 0)) {
+ ret = hs_len;
+ goto error;
+ }
+ } else {
+ hs_len = 0;
+ }
+
+ if (likely(super)) {
+ func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
+ ret = ffs_do_descs(ffs->ss_descs_count,
+ vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
+ d_raw_descs__sz - fs_len - hs_len,
+ __ffs_func_bind_do_descs, func);
if (unlikely(ret < 0))
goto error;
}
@@ -2099,7 +2440,8 @@ static int _ffs_func_bind(struct usb_configuration *c,
* now.
*/
ret = ffs_do_descs(ffs->fs_descs_count +
- (high ? ffs->hs_descs_count : 0),
+ (high ? ffs->hs_descs_count : 0) +
+ (super ? ffs->ss_descs_count : 0),
vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
__ffs_func_bind_do_nums, func);
if (unlikely(ret < 0))
@@ -2258,7 +2600,7 @@ static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
static LIST_HEAD(ffs_devices);
-static struct ffs_dev *_ffs_find_dev(const char *name)
+static struct ffs_dev *_ffs_do_find_dev(const char *name)
{
struct ffs_dev *dev;
@@ -2275,7 +2617,7 @@ static struct ffs_dev *_ffs_find_dev(const char *name)
/*
* ffs_lock must be taken by the caller of this function
*/
-static struct ffs_dev *ffs_get_single_dev(void)
+static struct ffs_dev *_ffs_get_single_dev(void)
{
struct ffs_dev *dev;
@@ -2291,15 +2633,15 @@ static struct ffs_dev *ffs_get_single_dev(void)
/*
* ffs_lock must be taken by the caller of this function
*/
-static struct ffs_dev *ffs_find_dev(const char *name)
+static struct ffs_dev *_ffs_find_dev(const char *name)
{
struct ffs_dev *dev;
- dev = ffs_get_single_dev();
+ dev = _ffs_get_single_dev();
if (dev)
return dev;
- return _ffs_find_dev(name);
+ return _ffs_do_find_dev(name);
}
/* Configfs support *********************************************************/
@@ -2335,7 +2677,7 @@ static void ffs_free_inst(struct usb_function_instance *f)
opts = to_f_fs_opts(f);
ffs_dev_lock();
- ffs_free_dev(opts->dev);
+ _ffs_free_dev(opts->dev);
ffs_dev_unlock();
kfree(opts);
}
@@ -2390,7 +2732,7 @@ static struct usb_function_instance *ffs_alloc_inst(void)
opts->func_inst.set_inst_name = ffs_set_inst_name;
opts->func_inst.free_func_inst = ffs_free_inst;
ffs_dev_lock();
- dev = ffs_alloc_dev();
+ dev = _ffs_alloc_dev();
ffs_dev_unlock();
if (IS_ERR(dev)) {
kfree(opts);
@@ -2446,6 +2788,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
*/
func->function.fs_descriptors = NULL;
func->function.hs_descriptors = NULL;
+ func->function.ss_descriptors = NULL;
func->interfaces_nums = NULL;
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
@@ -2478,12 +2821,12 @@ static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
/*
* ffs_lock must be taken by the caller of this function
*/
-struct ffs_dev *ffs_alloc_dev(void)
+static struct ffs_dev *_ffs_alloc_dev(void)
{
struct ffs_dev *dev;
int ret;
- if (ffs_get_single_dev())
+ if (_ffs_get_single_dev())
return ERR_PTR(-EBUSY);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -2511,10 +2854,10 @@ static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
{
struct ffs_dev *existing;
- existing = _ffs_find_dev(name);
+ existing = _ffs_do_find_dev(name);
if (existing)
return -EBUSY;
-
+
dev->name = name;
return 0;
@@ -2555,7 +2898,7 @@ EXPORT_SYMBOL(ffs_single_dev);
/*
* ffs_lock must be taken by the caller of this function
*/
-void ffs_free_dev(struct ffs_dev *dev)
+static void _ffs_free_dev(struct ffs_dev *dev)
{
list_del(&dev->entry);
if (dev->name_allocated)
@@ -2572,7 +2915,7 @@ static void *ffs_acquire_dev(const char *dev_name)
ENTER();
ffs_dev_lock();
- ffs_dev = ffs_find_dev(dev_name);
+ ffs_dev = _ffs_find_dev(dev_name);
if (!ffs_dev)
ffs_dev = ERR_PTR(-ENODEV);
else if (ffs_dev->mounted)
@@ -2595,11 +2938,12 @@ static void ffs_release_dev(struct ffs_data *ffs_data)
ffs_dev_lock();
ffs_dev = ffs_data->private_data;
- if (ffs_dev)
+ if (ffs_dev) {
ffs_dev->mounted = false;
-
- if (ffs_dev->ffs_release_dev_callback)
- ffs_dev->ffs_release_dev_callback(ffs_dev);
+
+ if (ffs_dev->ffs_release_dev_callback)
+ ffs_dev->ffs_release_dev_callback(ffs_dev);
+ }
ffs_dev_unlock();
}
diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
index 36d4bb23087f..807b31c0edc3 100644
--- a/drivers/usb/gadget/f_midi.c
+++ b/drivers/usb/gadget/f_midi.c
@@ -664,9 +664,10 @@ static int f_midi_register_card(struct f_midi *midi)
.dev_free = f_midi_snd_free,
};
- err = snd_card_create(midi->index, midi->id, THIS_MODULE, 0, &card);
+ err = snd_card_new(&midi->gadget->dev, midi->index, midi->id,
+ THIS_MODULE, 0, &card);
if (err < 0) {
- ERROR(midi, "snd_card_create() failed\n");
+ ERROR(midi, "snd_card_new() failed\n");
goto fail;
}
midi->card = card;
@@ -703,8 +704,6 @@ static int f_midi_register_card(struct f_midi *midi)
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &gmidi_in_ops);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &gmidi_out_ops);
- snd_card_set_dev(card, &midi->gadget->dev);
-
/* register it - we're ready to go */
err = snd_card_register(card);
if (err < 0) {
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index f1a59190ac9a..df4a0dcbc993 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -276,7 +276,7 @@ static int geth_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
net = gether_connect(&geth->port);
- return IS_ERR(net) ? PTR_ERR(net) : 0;
+ return PTR_RET(net);
}
static void geth_disable(struct usb_function *f)
diff --git a/drivers/usb/gadget/f_uac2.c b/drivers/usb/gadget/f_uac2.c
index 2f23566e53d8..bc23040c7790 100644
--- a/drivers/usb/gadget/f_uac2.c
+++ b/drivers/usb/gadget/f_uac2.c
@@ -394,7 +394,7 @@ static int snd_uac2_probe(struct platform_device *pdev)
int err;
/* Choose any slot, with no id */
- err = snd_card_create(-1, NULL, THIS_MODULE, 0, &card);
+ err = snd_card_new(&pdev->dev, -1, NULL, THIS_MODULE, 0, &card);
if (err < 0)
return err;
@@ -421,8 +421,6 @@ static int snd_uac2_probe(struct platform_device *pdev)
strcpy(card->shortname, "UAC2_Gadget");
sprintf(card->longname, "UAC2_Gadget %i", pdev->id);
- snd_card_set_dev(card, &pdev->dev);
-
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
diff --git a/drivers/usb/gadget/gr_udc.c b/drivers/usb/gadget/gr_udc.c
index 914cbd84ee40..f984ee75324d 100644
--- a/drivers/usb/gadget/gr_udc.c
+++ b/drivers/usb/gadget/gr_udc.c
@@ -225,14 +225,8 @@ static void gr_dfs_create(struct gr_udc *dev)
const char *name = "gr_udc_state";
dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
- if (IS_ERR(dev->dfs_root)) {
- dev_err(dev->dev, "Failed to create debugfs directory\n");
- return;
- }
- dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root,
- dev, &gr_dfs_fops);
- if (IS_ERR(dev->dfs_state))
- dev_err(dev->dev, "Failed to create debugfs file %s\n", name);
+ dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root, dev,
+ &gr_dfs_fops);
}
static void gr_dfs_delete(struct gr_udc *dev)
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index b94c049ab0d0..b5be6f0308c2 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -439,11 +439,9 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
/* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
value = -ENOMEM;
- kbuf = kmalloc (len, GFP_KERNEL);
- if (!kbuf)
- goto free1;
- if (copy_from_user (kbuf, buf, len)) {
- value = -EFAULT;
+ kbuf = memdup_user(buf, len);
+ if (!kbuf) {
+ value = PTR_ERR(kbuf);
goto free1;
}
@@ -452,7 +450,6 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
data->name, len, (int) value);
free1:
mutex_unlock(&data->lock);
- kfree (kbuf);
return value;
}
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index 049ebab0d360..e471580a2a3b 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -55,7 +55,6 @@
#include <mach/hardware.h>
#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <mach/platform.h>
#include <mach/irqs.h>
@@ -3295,9 +3294,9 @@ usb_clk_enable_fail:
pll_set_fail:
clk_disable(udc->usb_pll_clk);
pll_enable_fail:
- clk_put(udc->usb_slv_clk);
-usb_otg_clk_get_fail:
clk_put(udc->usb_otg_clk);
+usb_otg_clk_get_fail:
+ clk_put(udc->usb_slv_clk);
usb_clk_get_fail:
clk_put(udc->usb_pll_clk);
pll_get_fail:
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 69b76efd11e9..6474081dcbaf 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -427,12 +427,17 @@ setup_rx_reqs(struct printer_dev *dev)
req->length = USB_BUFSIZE;
req->complete = rx_complete;
+ /* here, we unlock, and only unlock, to avoid deadlock. */
+ spin_unlock(&dev->lock);
error = usb_ep_queue(dev->out_ep, req, GFP_ATOMIC);
+ spin_lock(&dev->lock);
if (error) {
DBG(dev, "rx submit --> %d\n", error);
list_add(&req->list, &dev->rx_reqs);
break;
- } else {
+ }
+ /* if the req is empty, then add it into dev->rx_reqs_active. */
+ else if (list_empty(&req->list)) {
list_add(&req->list, &dev->rx_reqs_active);
}
}
@@ -1133,6 +1138,7 @@ static int __init printer_bind_config(struct usb_configuration *c)
NULL, "g_printer");
if (IS_ERR(dev->pdev)) {
ERROR(dev, "Failed to create device: g_printer\n");
+ status = PTR_ERR(dev->pdev);
goto fail;
}
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 1172eaeddd85..2a9cb674926a 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -617,7 +617,7 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
to_write = DIV_ROUND_UP(to_write, 4);
data = hs_req->req.buf + buf_pos;
- writesl(hsotg->regs + EPFIFO(hs_ep->index), data, to_write);
+ iowrite32_rep(hsotg->regs + EPFIFO(hs_ep->index), data, to_write);
return (to_write >= can_write) ? -ENOSPC : 0;
}
@@ -720,8 +720,8 @@ static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
ureq->length, ureq->actual);
if (0)
dev_dbg(hsotg->dev,
- "REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",
- ureq->buf, length, ureq->dma,
+ "REQ buf %p len %d dma 0x%pad noi=%d zp=%d snok=%d\n",
+ ureq->buf, length, &ureq->dma,
ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
maxreq = get_ep_limit(hs_ep);
@@ -789,8 +789,8 @@ static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
writel(ureq->dma, hsotg->regs + dma_reg);
- dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",
- __func__, ureq->dma, dma_reg);
+ dev_dbg(hsotg->dev, "%s: 0x%pad => 0x%08x\n",
+ __func__, &ureq->dma, dma_reg);
}
ctrl |= DxEPCTL_EPEna; /* ensure ep enabled */
@@ -1186,6 +1186,41 @@ static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg);
/**
+ * s3c_hsotg_stall_ep0 - stall ep0
+ * @hsotg: The device state
+ *
+ * Set stall for ep0 as response for setup request.
+ */
+static void s3c_hsotg_stall_ep0(struct s3c_hsotg *hsotg) {
+ struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+ u32 reg;
+ u32 ctrl;
+
+ dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
+ reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
+
+ /*
+ * DxEPCTL_Stall will be cleared by EP once it has
+ * taken effect, so no need to clear later.
+ */
+
+ ctrl = readl(hsotg->regs + reg);
+ ctrl |= DxEPCTL_Stall;
+ ctrl |= DxEPCTL_CNAK;
+ writel(ctrl, hsotg->regs + reg);
+
+ dev_dbg(hsotg->dev,
+ "written DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
+ ctrl, reg, readl(hsotg->regs + reg));
+
+ /*
+ * complete won't be called, so we enqueue
+ * setup request here
+ */
+ s3c_hsotg_enqueue_setup(hsotg);
+}
+
+/**
* s3c_hsotg_process_control - process a control request
* @hsotg: The device state
* @ctrl: The control request received
@@ -1262,38 +1297,8 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
* so respond with a STALL for the status stage to indicate failure.
*/
- if (ret < 0) {
- u32 reg;
- u32 ctrl;
-
- dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
- reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
-
- /*
- * DxEPCTL_Stall will be cleared by EP once it has
- * taken effect, so no need to clear later.
- */
-
- ctrl = readl(hsotg->regs + reg);
- ctrl |= DxEPCTL_Stall;
- ctrl |= DxEPCTL_CNAK;
- writel(ctrl, hsotg->regs + reg);
-
- dev_dbg(hsotg->dev,
- "written DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
- ctrl, reg, readl(hsotg->regs + reg));
-
- /*
- * don't believe we need to anything more to get the EP
- * to reply with a STALL packet
- */
-
- /*
- * complete won't be called, so we enqueue
- * setup request here
- */
- s3c_hsotg_enqueue_setup(hsotg);
- }
+ if (ret < 0)
+ s3c_hsotg_stall_ep0(hsotg);
}
/**
@@ -1488,7 +1493,7 @@ static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
* note, we might over-write the buffer end by 3 bytes depending on
* alignment of the data.
*/
- readsl(fifo, hs_req->req.buf + read_ptr, to_read);
+ ioread32_rep(fifo, hs_req->req.buf + read_ptr, to_read);
}
/**
@@ -2832,6 +2837,15 @@ static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
+ if (index == 0) {
+ if (value)
+ s3c_hsotg_stall_ep0(hs);
+ else
+ dev_warn(hs->dev,
+ "%s: can't clear halt on ep0\n", __func__);
+ return 0;
+ }
+
/* write both IN and OUT control registers */
epreg = DIEPCTL(index);
@@ -3760,10 +3774,55 @@ static int s3c_hsotg_remove(struct platform_device *pdev)
return 0;
}
-#if 1
-#define s3c_hsotg_suspend NULL
-#define s3c_hsotg_resume NULL
-#endif
+static int s3c_hsotg_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
+ unsigned long flags;
+ int ret = 0;
+
+ if (hsotg->driver)
+ dev_info(hsotg->dev, "suspending usb gadget %s\n",
+ hsotg->driver->driver.name);
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ s3c_hsotg_disconnect(hsotg);
+ s3c_hsotg_phy_disable(hsotg);
+ hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ if (hsotg->driver) {
+ int ep;
+ for (ep = 0; ep < hsotg->num_of_eps; ep++)
+ s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
+ hsotg->supplies);
+ }
+
+ return ret;
+}
+
+static int s3c_hsotg_resume(struct platform_device *pdev)
+{
+ struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
+ unsigned long flags;
+ int ret = 0;
+
+ if (hsotg->driver) {
+ dev_info(hsotg->dev, "resuming usb gadget %s\n",
+ hsotg->driver->driver.name);
+ ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
+ hsotg->supplies);
+ }
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ hsotg->last_rst = jiffies;
+ s3c_hsotg_phy_enable(hsotg);
+ s3c_hsotg_core_init(hsotg);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return ret;
+}
#ifdef CONFIG_OF
static const struct of_device_id s3c_hsotg_of_ids[] = {
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index ea4bbfe72ec0..10c6a128250c 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -1344,7 +1344,6 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
return 0;
err_add_udc:
-err_add_device:
clk_disable(hsudc->uclk);
err_res:
if (!IS_ERR_OR_NULL(hsudc->transceiver))
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index 0f8aad78b54f..460c266b8e24 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -1613,7 +1613,7 @@ static struct se_wwn *usbg_make_tport(
return ERR_PTR(-ENOMEM);
}
tport->tport_wwpn = wwpn;
- snprintf(tport->tport_name, sizeof(tport->tport_name), wnn_name);
+ snprintf(tport->tport_name, sizeof(tport->tport_name), "%s", wnn_name);
return &tport->tport_wwn;
}
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index b7d4f82872b7..50d09c289137 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -48,6 +48,8 @@
#define UETH__VERSION "29-May-2008"
+#define GETHER_NAPI_WEIGHT 32
+
struct eth_dev {
/* lock is held while accessing port_usb
*/
@@ -72,6 +74,7 @@ struct eth_dev {
struct sk_buff_head *list);
struct work_struct work;
+ struct napi_struct rx_napi;
unsigned long todo;
#define WORK_RX_MEMORY 0
@@ -253,18 +256,16 @@ enomem:
DBG(dev, "rx submit --> %d\n", retval);
if (skb)
dev_kfree_skb_any(skb);
- spin_lock_irqsave(&dev->req_lock, flags);
- list_add(&req->list, &dev->rx_reqs);
- spin_unlock_irqrestore(&dev->req_lock, flags);
}
return retval;
}
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
- struct sk_buff *skb = req->context, *skb2;
+ struct sk_buff *skb = req->context;
struct eth_dev *dev = ep->driver_data;
int status = req->status;
+ bool rx_queue = 0;
switch (status) {
@@ -288,30 +289,8 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
} else {
skb_queue_tail(&dev->rx_frames, skb);
}
- skb = NULL;
-
- skb2 = skb_dequeue(&dev->rx_frames);
- while (skb2) {
- if (status < 0
- || ETH_HLEN > skb2->len
- || skb2->len > VLAN_ETH_FRAME_LEN) {
- dev->net->stats.rx_errors++;
- dev->net->stats.rx_length_errors++;
- DBG(dev, "rx length %d\n", skb2->len);
- dev_kfree_skb_any(skb2);
- goto next_frame;
- }
- skb2->protocol = eth_type_trans(skb2, dev->net);
- dev->net->stats.rx_packets++;
- dev->net->stats.rx_bytes += skb2->len;
-
- /* no buffer copies needed, unless hardware can't
- * use skb buffers.
- */
- status = netif_rx(skb2);
-next_frame:
- skb2 = skb_dequeue(&dev->rx_frames);
- }
+ if (!status)
+ rx_queue = 1;
break;
/* software-driven interface shutdown */
@@ -334,22 +313,20 @@ quiesce:
/* FALLTHROUGH */
default:
+ rx_queue = 1;
+ dev_kfree_skb_any(skb);
dev->net->stats.rx_errors++;
DBG(dev, "rx status %d\n", status);
break;
}
- if (skb)
- dev_kfree_skb_any(skb);
- if (!netif_running(dev->net)) {
clean:
spin_lock(&dev->req_lock);
list_add(&req->list, &dev->rx_reqs);
spin_unlock(&dev->req_lock);
- req = NULL;
- }
- if (req)
- rx_submit(dev, req, GFP_ATOMIC);
+
+ if (rx_queue && likely(napi_schedule_prep(&dev->rx_napi)))
+ __napi_schedule(&dev->rx_napi);
}
static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -414,16 +391,24 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
{
struct usb_request *req;
unsigned long flags;
+ int rx_counts = 0;
/* fill unused rxq slots with some skb */
spin_lock_irqsave(&dev->req_lock, flags);
while (!list_empty(&dev->rx_reqs)) {
+
+ if (++rx_counts > qlen(dev->gadget, dev->qmult))
+ break;
+
req = container_of(dev->rx_reqs.next,
struct usb_request, list);
list_del_init(&req->list);
spin_unlock_irqrestore(&dev->req_lock, flags);
if (rx_submit(dev, req, gfp_flags) < 0) {
+ spin_lock_irqsave(&dev->req_lock, flags);
+ list_add(&req->list, &dev->rx_reqs);
+ spin_unlock_irqrestore(&dev->req_lock, flags);
defer_kevent(dev, WORK_RX_MEMORY);
return;
}
@@ -433,6 +418,41 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
spin_unlock_irqrestore(&dev->req_lock, flags);
}
+static int gether_poll(struct napi_struct *napi, int budget)
+{
+ struct eth_dev *dev = container_of(napi, struct eth_dev, rx_napi);
+ struct sk_buff *skb;
+ unsigned int work_done = 0;
+ int status = 0;
+
+ while ((skb = skb_dequeue(&dev->rx_frames))) {
+ if (status < 0
+ || ETH_HLEN > skb->len
+ || skb->len > VLAN_ETH_FRAME_LEN) {
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ DBG(dev, "rx length %d\n", skb->len);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+ skb->protocol = eth_type_trans(skb, dev->net);
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb->len;
+
+ status = netif_rx_ni(skb);
+ }
+
+ if (netif_running(dev->net)) {
+ rx_fill(dev, GFP_KERNEL);
+ work_done++;
+ }
+
+ if (work_done < budget)
+ napi_complete(&dev->rx_napi);
+
+ return work_done;
+}
+
static void eth_work(struct work_struct *work)
{
struct eth_dev *dev = container_of(work, struct eth_dev, work);
@@ -625,6 +645,7 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
/* and open the tx floodgates */
atomic_set(&dev->tx_qlen, 0);
netif_wake_queue(dev->net);
+ napi_enable(&dev->rx_napi);
}
static int eth_open(struct net_device *net)
@@ -651,6 +672,7 @@ static int eth_stop(struct net_device *net)
unsigned long flags;
VDBG(dev, "%s\n", __func__);
+ napi_disable(&dev->rx_napi);
netif_stop_queue(net);
DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
@@ -768,6 +790,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
return ERR_PTR(-ENOMEM);
dev = netdev_priv(net);
+ netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->req_lock);
INIT_WORK(&dev->work, eth_work);
@@ -830,6 +853,7 @@ struct net_device *gether_setup_name_default(const char *netname)
return ERR_PTR(-ENOMEM);
dev = netdev_priv(net);
+ netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->req_lock);
INIT_WORK(&dev->work, eth_work);
@@ -1113,6 +1137,7 @@ void gether_disconnect(struct gether *link)
{
struct eth_dev *dev = link->ioport;
struct usb_request *req;
+ struct sk_buff *skb;
WARN_ON(!dev);
if (!dev)
@@ -1139,6 +1164,12 @@ void gether_disconnect(struct gether *link)
spin_lock(&dev->req_lock);
}
spin_unlock(&dev->req_lock);
+
+ spin_lock(&dev->rx_frames.lock);
+ while ((skb = __skb_dequeue(&dev->rx_frames)))
+ dev_kfree_skb_any(skb);
+ spin_unlock(&dev->rx_frames.lock);
+
link->in_ep->driver_data = NULL;
link->in_ep->desc = NULL;
diff --git a/drivers/usb/gadget/u_fs.h b/drivers/usb/gadget/u_fs.h
index bc2d3718219b..bf0ba375d459 100644
--- a/drivers/usb/gadget/u_fs.h
+++ b/drivers/usb/gadget/u_fs.h
@@ -65,10 +65,8 @@ static inline void ffs_dev_unlock(void)
mutex_unlock(&ffs_lock);
}
-struct ffs_dev *ffs_alloc_dev(void);
int ffs_name_dev(struct ffs_dev *dev, const char *name);
int ffs_single_dev(struct ffs_dev *dev);
-void ffs_free_dev(struct ffs_dev *dev);
struct ffs_epfile;
struct ffs_function;
@@ -125,7 +123,7 @@ enum ffs_setup_state {
* setup. If this state is set read/write on ep0 return
* -EIDRM. This state is only set when adding event.
*/
- FFS_SETUP_CANCELED
+ FFS_SETUP_CANCELLED
};
struct ffs_data {
@@ -156,7 +154,6 @@ struct ffs_data {
*/
struct usb_request *ep0req; /* P: mutex */
struct completion ep0req_completion; /* P: mutex */
- int ep0req_status; /* P: mutex */
/* reference counter */
atomic_t ref;
@@ -168,19 +165,18 @@ struct ffs_data {
/*
* Possible transitions:
- * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock
+ * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock
* happens only in ep0 read which is P: mutex
- * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock
+ * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock
* happens only in ep0 i/o which is P: mutex
- * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELED -- P: ev.waitq.lock
- * + FFS_SETUP_CANCELED -> FFS_NO_SETUP -- cmpxchg
+ * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELLED -- P: ev.waitq.lock
+ * + FFS_SETUP_CANCELLED -> FFS_NO_SETUP -- cmpxchg
+ *
+ * This field should never be accessed directly and instead
+ * ffs_setup_state_clear_cancelled function should be used.
*/
enum ffs_setup_state setup_state;
-#define FFS_SETUP_STATE(ffs) \
- ((enum ffs_setup_state)cmpxchg(&(ffs)->setup_state, \
- FFS_SETUP_CANCELED, FFS_NO_SETUP))
-
/* Events & such. */
struct {
u8 types[4];
@@ -210,16 +206,16 @@ struct ffs_data {
/* filled by __ffs_data_got_descs() */
/*
- * Real descriptors are 16 bytes after raw_descs (so you need
- * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the
- * first full speed descriptor). raw_descs_length and
- * raw_fs_descs_length do not have those 16 bytes added.
+ * raw_descs is what you kfree, real_descs points inside of raw_descs,
+ * where full speed, high speed and super speed descriptors start.
+ * real_descs_length is the length of all those descriptors.
*/
+ const void *raw_descs_data;
const void *raw_descs;
unsigned raw_descs_length;
- unsigned raw_fs_descs_length;
unsigned fs_descs_count;
unsigned hs_descs_count;
+ unsigned ss_descs_count;
unsigned short strings_count;
unsigned short interfaces_count;
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index b369292d4b90..ad0aca812002 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -549,8 +549,8 @@ static void gs_rx_push(unsigned long _port)
port->read_started--;
}
- /* Push from tty to ldisc; without low_latency set this is handled by
- * a workqueue, so we won't get callbacks and can hold port_lock
+ /* Push from tty to ldisc; this is handled by a workqueue,
+ * so we won't get callbacks and can hold port_lock
*/
if (do_push)
tty_flip_buffer_push(&port->port);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index a9707da7da0b..3d9e54062d62 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -314,7 +314,6 @@ config USB_ISP1760_HCD
config USB_ISP1362_HCD
tristate "ISP1362 HCD support"
- default N
---help---
Supports the Philips ISP1362 chip as a host controller
@@ -326,7 +325,6 @@ config USB_ISP1362_HCD
config USB_FUSBH200_HCD
tristate "FUSBH200 HCD support"
depends on USB
- default N
---help---
Faraday FUSBH200 is designed to meet USB2.0 EHCI specification
with minor modification.
@@ -337,7 +335,6 @@ config USB_FUSBH200_HCD
config USB_FOTG210_HCD
tristate "FOTG210 HCD support"
depends on USB
- default N
---help---
Faraday FOTG210 is an OTG controller which can be configured as
an USB2.0 host. It is designed to meet USB2.0 EHCI specification
@@ -584,7 +581,6 @@ config FHCI_DEBUG
config USB_U132_HCD
tristate "Elan U132 Adapter Host Controller"
depends on USB_FTDI_ELAN
- default M
help
The U132 adapter is a USB to CardBus adapter specifically designed
for PC cards that contain an OHCI host controller. Typical PC cards
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 01536cfd361d..b3a0e11073aa 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -3,6 +3,7 @@
*
* Copyright 2007 Steven Brown <sbrown@cortland.com>
* Copyright 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
*
* Derived from the ohci-ssb driver
* Copyright 2007 Michael Buesch <m@bues.ch>
@@ -18,6 +19,7 @@
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/kernel.h>
@@ -25,6 +27,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
@@ -33,6 +36,13 @@
#include "ehci.h"
#define DRIVER_DESC "EHCI generic platform driver"
+#define EHCI_MAX_CLKS 3
+#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
+
+struct ehci_platform_priv {
+ struct clk *clks[EHCI_MAX_CLKS];
+ struct phy *phy;
+};
static const char hcd_name[] = "ehci-platform";
@@ -45,8 +55,6 @@ static int ehci_platform_reset(struct usb_hcd *hcd)
hcd->has_tt = pdata->has_tt;
ehci->has_synopsys_hc_bug = pdata->has_synopsys_hc_bug;
- ehci->big_endian_desc = pdata->big_endian_desc;
- ehci->big_endian_mmio = pdata->big_endian_mmio;
if (pdata->pre_setup) {
retval = pdata->pre_setup(hcd);
@@ -64,38 +72,91 @@ static int ehci_platform_reset(struct usb_hcd *hcd)
return 0;
}
+static int ehci_platform_power_on(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ int clk, ret;
+
+ for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++) {
+ ret = clk_prepare_enable(priv->clks[clk]);
+ if (ret)
+ goto err_disable_clks;
+ }
+
+ if (priv->phy) {
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto err_disable_clks;
+
+ ret = phy_power_on(priv->phy);
+ if (ret)
+ goto err_exit_phy;
+ }
+
+ return 0;
+
+err_exit_phy:
+ phy_exit(priv->phy);
+err_disable_clks:
+ while (--clk >= 0)
+ clk_disable_unprepare(priv->clks[clk]);
+
+ return ret;
+}
+
+static void ehci_platform_power_off(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ int clk;
+
+ if (priv->phy) {
+ phy_power_off(priv->phy);
+ phy_exit(priv->phy);
+ }
+
+ for (clk = EHCI_MAX_CLKS - 1; clk >= 0; clk--)
+ if (priv->clks[clk])
+ clk_disable_unprepare(priv->clks[clk]);
+}
+
static struct hc_driver __read_mostly ehci_platform_hc_driver;
static const struct ehci_driver_overrides platform_overrides __initconst = {
- .reset = ehci_platform_reset,
+ .reset = ehci_platform_reset,
+ .extra_priv_size = sizeof(struct ehci_platform_priv),
};
-static struct usb_ehci_pdata ehci_platform_defaults;
+static struct usb_ehci_pdata ehci_platform_defaults = {
+ .power_on = ehci_platform_power_on,
+ .power_suspend = ehci_platform_power_off,
+ .power_off = ehci_platform_power_off,
+};
static int ehci_platform_probe(struct platform_device *dev)
{
struct usb_hcd *hcd;
struct resource *res_mem;
- struct usb_ehci_pdata *pdata;
- int irq;
- int err;
+ struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
+ struct ehci_platform_priv *priv;
+ struct ehci_hcd *ehci;
+ int err, irq, clk = 0;
if (usb_disabled())
return -ENODEV;
/*
- * use reasonable defaults so platforms don't have to provide these.
- * with DT probing on ARM, none of these are set.
+ * Use reasonable defaults so platforms don't have to provide these
+ * with DT probing on ARM.
*/
- if (!dev_get_platdata(&dev->dev))
- dev->dev.platform_data = &ehci_platform_defaults;
+ if (!pdata)
+ pdata = &ehci_platform_defaults;
err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
if (err)
return err;
- pdata = dev_get_platdata(&dev->dev);
-
irq = platform_get_irq(dev, 0);
if (irq < 0) {
dev_err(&dev->dev, "no irq provided");
@@ -107,17 +168,72 @@ static int ehci_platform_probe(struct platform_device *dev)
return -ENXIO;
}
+ hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ platform_set_drvdata(dev, hcd);
+ dev->dev.platform_data = pdata;
+ priv = hcd_to_ehci_priv(hcd);
+ ehci = hcd_to_ehci(hcd);
+
+ if (pdata == &ehci_platform_defaults && dev->dev.of_node) {
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-regs"))
+ ehci->big_endian_mmio = 1;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-desc"))
+ ehci->big_endian_desc = 1;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian"))
+ ehci->big_endian_mmio = ehci->big_endian_desc = 1;
+
+ priv->phy = devm_phy_get(&dev->dev, "usb");
+ if (IS_ERR(priv->phy)) {
+ err = PTR_ERR(priv->phy);
+ if (err == -EPROBE_DEFER)
+ goto err_put_hcd;
+ priv->phy = NULL;
+ }
+
+ for (clk = 0; clk < EHCI_MAX_CLKS; clk++) {
+ priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
+ if (IS_ERR(priv->clks[clk])) {
+ err = PTR_ERR(priv->clks[clk]);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ priv->clks[clk] = NULL;
+ break;
+ }
+ }
+ }
+
+ if (pdata->big_endian_desc)
+ ehci->big_endian_desc = 1;
+ if (pdata->big_endian_mmio)
+ ehci->big_endian_mmio = 1;
+
+#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
+ if (ehci->big_endian_mmio) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_EHCI_BIG_ENDIAN_MMIO not set\n");
+ err = -EINVAL;
+ goto err_put_clks;
+ }
+#endif
+#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
+ if (ehci->big_endian_desc) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_EHCI_BIG_ENDIAN_DESC not set\n");
+ err = -EINVAL;
+ goto err_put_clks;
+ }
+#endif
+
if (pdata->power_on) {
err = pdata->power_on(dev);
if (err < 0)
- return err;
- }
-
- hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
- dev_name(&dev->dev));
- if (!hcd) {
- err = -ENOMEM;
- goto err_power;
+ goto err_put_clks;
}
hcd->rsrc_start = res_mem->start;
@@ -126,22 +242,28 @@ static int ehci_platform_probe(struct platform_device *dev)
hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
- goto err_put_hcd;
+ goto err_power;
}
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
- goto err_put_hcd;
+ goto err_power;
device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(dev, hcd);
return err;
-err_put_hcd:
- usb_put_hcd(hcd);
err_power:
if (pdata->power_off)
pdata->power_off(dev);
+err_put_clks:
+ while (--clk >= 0)
+ clk_put(priv->clks[clk]);
+err_put_hcd:
+ if (pdata == &ehci_platform_defaults)
+ dev->dev.platform_data = NULL;
+
+ usb_put_hcd(hcd);
return err;
}
@@ -150,13 +272,19 @@ static int ehci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ int clk;
usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
+ for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++)
+ clk_put(priv->clks[clk]);
+
+ usb_put_hcd(hcd);
+
if (pdata == &ehci_platform_defaults)
dev->dev.platform_data = NULL;
@@ -207,8 +335,10 @@ static int ehci_platform_resume(struct device *dev)
static const struct of_device_id vt8500_ehci_ids[] = {
{ .compatible = "via,vt8500-ehci", },
{ .compatible = "wm,prizm-ehci", },
+ { .compatible = "generic-ehci", },
{}
};
+MODULE_DEVICE_TABLE(of, vt8500_ehci_ids);
static const struct platform_device_id ehci_platform_table[] = {
{ "ehci-platform", 0 },
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index af28b748e87a..27ac6ad53c3d 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -38,10 +38,6 @@
#include "ehci.h"
-#define TEGRA_USB_BASE 0xC5000000
-#define TEGRA_USB2_BASE 0xC5004000
-#define TEGRA_USB3_BASE 0xC5008000
-
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
#define TEGRA_USB_DMA_ALIGN 32
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index e07669993f58..d0d8fadf7066 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -261,8 +261,44 @@ static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc)
dev_err(dev, "cannot listen to notifications: %d\n", result);
goto error_stop;
}
+ /*
+ * If WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS is set,
+ * disable transfer notifications.
+ */
+ if (hwahc->wa.quirks &
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS) {
+ struct usb_host_interface *cur_altsetting =
+ hwahc->wa.usb_iface->cur_altsetting;
+
+ result = usb_control_msg(hwahc->wa.usb_dev,
+ usb_sndctrlpipe(hwahc->wa.usb_dev, 0),
+ WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ WA_REQ_ALEREON_FEATURE_SET,
+ cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ /*
+ * If we successfully sent the control message, start DTI here
+ * because no transfer notifications will be received which is
+ * where DTI is normally started.
+ */
+ if (result == 0)
+ result = wa_dti_start(&hwahc->wa);
+ else
+ result = 0; /* OK. Continue normally. */
+
+ if (result < 0) {
+ dev_err(dev, "cannot start DTI: %d\n", result);
+ goto error_dti_start;
+ }
+ }
+
return result;
+error_dti_start:
+ wa_nep_disarm(&hwahc->wa);
error_stop:
__wa_clear_feature(&hwahc->wa, WA_ENABLE);
return result;
@@ -827,10 +863,12 @@ static void hwahc_disconnect(struct usb_interface *usb_iface)
static struct usb_device_id hwahc_id_table[] = {
/* Alereon 5310 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x02, 0x01),
- .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC },
+ .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
/* Alereon 5611 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x02, 0x01),
- .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC },
+ .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
/* FIXME: use class labels for this */
{ USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
{},
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 68f674cd095f..b6002c951c5c 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -3,6 +3,7 @@
*
* Copyright 2007 Michael Buesch <m@bues.ch>
* Copyright 2011-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
*
* Derived from the OCHI-SSB driver
* Derived from the OHCI-PCI driver
@@ -14,11 +15,14 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/usb/ohci_pdriver.h>
#include <linux/usb.h>
@@ -27,6 +31,13 @@
#include "ohci.h"
#define DRIVER_DESC "OHCI generic platform driver"
+#define OHCI_MAX_CLKS 3
+#define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv)
+
+struct ohci_platform_priv {
+ struct clk *clks[OHCI_MAX_CLKS];
+ struct phy *phy;
+};
static const char hcd_name[] = "ohci-platform";
@@ -36,10 +47,6 @@ static int ohci_platform_reset(struct usb_hcd *hcd)
struct usb_ohci_pdata *pdata = dev_get_platdata(&pdev->dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- if (pdata->big_endian_desc)
- ohci->flags |= OHCI_QUIRK_BE_DESC;
- if (pdata->big_endian_mmio)
- ohci->flags |= OHCI_QUIRK_BE_MMIO;
if (pdata->no_big_frame_no)
ohci->flags |= OHCI_QUIRK_FRAME_NO;
if (pdata->num_ports)
@@ -48,11 +55,67 @@ static int ohci_platform_reset(struct usb_hcd *hcd)
return ohci_setup(hcd);
}
+static int ohci_platform_power_on(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+ int clk, ret;
+
+ for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++) {
+ ret = clk_prepare_enable(priv->clks[clk]);
+ if (ret)
+ goto err_disable_clks;
+ }
+
+ if (priv->phy) {
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto err_disable_clks;
+
+ ret = phy_power_on(priv->phy);
+ if (ret)
+ goto err_exit_phy;
+ }
+
+ return 0;
+
+err_exit_phy:
+ phy_exit(priv->phy);
+err_disable_clks:
+ while (--clk >= 0)
+ clk_disable_unprepare(priv->clks[clk]);
+
+ return ret;
+}
+
+static void ohci_platform_power_off(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+ int clk;
+
+ if (priv->phy) {
+ phy_power_off(priv->phy);
+ phy_exit(priv->phy);
+ }
+
+ for (clk = OHCI_MAX_CLKS - 1; clk >= 0; clk--)
+ if (priv->clks[clk])
+ clk_disable_unprepare(priv->clks[clk]);
+}
+
static struct hc_driver __read_mostly ohci_platform_hc_driver;
static const struct ohci_driver_overrides platform_overrides __initconst = {
- .product_desc = "Generic Platform OHCI controller",
- .reset = ohci_platform_reset,
+ .product_desc = "Generic Platform OHCI controller",
+ .reset = ohci_platform_reset,
+ .extra_priv_size = sizeof(struct ohci_platform_priv),
+};
+
+static struct usb_ohci_pdata ohci_platform_defaults = {
+ .power_on = ohci_platform_power_on,
+ .power_suspend = ohci_platform_power_off,
+ .power_off = ohci_platform_power_off,
};
static int ohci_platform_probe(struct platform_device *dev)
@@ -60,17 +123,24 @@ static int ohci_platform_probe(struct platform_device *dev)
struct usb_hcd *hcd;
struct resource *res_mem;
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
- int irq;
- int err = -ENOMEM;
-
- if (!pdata) {
- WARN_ON(1);
- return -ENODEV;
- }
+ struct ohci_platform_priv *priv;
+ struct ohci_hcd *ohci;
+ int err, irq, clk = 0;
if (usb_disabled())
return -ENODEV;
+ /*
+ * Use reasonable defaults so platforms don't have to provide these
+ * with DT probing on ARM.
+ */
+ if (!pdata)
+ pdata = &ohci_platform_defaults;
+
+ err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
irq = platform_get_irq(dev, 0);
if (irq < 0) {
dev_err(&dev->dev, "no irq provided");
@@ -83,17 +153,72 @@ static int ohci_platform_probe(struct platform_device *dev)
return -ENXIO;
}
+ hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ platform_set_drvdata(dev, hcd);
+ dev->dev.platform_data = pdata;
+ priv = hcd_to_ohci_priv(hcd);
+ ohci = hcd_to_ohci(hcd);
+
+ if (pdata == &ohci_platform_defaults && dev->dev.of_node) {
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-regs"))
+ ohci->flags |= OHCI_QUIRK_BE_MMIO;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-desc"))
+ ohci->flags |= OHCI_QUIRK_BE_DESC;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian"))
+ ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
+
+ priv->phy = devm_phy_get(&dev->dev, "usb");
+ if (IS_ERR(priv->phy)) {
+ err = PTR_ERR(priv->phy);
+ if (err == -EPROBE_DEFER)
+ goto err_put_hcd;
+ priv->phy = NULL;
+ }
+
+ for (clk = 0; clk < OHCI_MAX_CLKS; clk++) {
+ priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
+ if (IS_ERR(priv->clks[clk])) {
+ err = PTR_ERR(priv->clks[clk]);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ priv->clks[clk] = NULL;
+ break;
+ }
+ }
+ }
+
+ if (pdata->big_endian_desc)
+ ohci->flags |= OHCI_QUIRK_BE_DESC;
+ if (pdata->big_endian_mmio)
+ ohci->flags |= OHCI_QUIRK_BE_MMIO;
+
+#ifndef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
+ if (ohci->flags & OHCI_QUIRK_BE_MMIO) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_OHCI_BIG_ENDIAN_MMIO not set\n");
+ err = -EINVAL;
+ goto err_put_clks;
+ }
+#endif
+#ifndef CONFIG_USB_OHCI_BIG_ENDIAN_DESC
+ if (ohci->flags & OHCI_QUIRK_BE_DESC) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_OHCI_BIG_ENDIAN_DESC not set\n");
+ err = -EINVAL;
+ goto err_put_clks;
+ }
+#endif
+
if (pdata->power_on) {
err = pdata->power_on(dev);
if (err < 0)
- return err;
- }
-
- hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
- dev_name(&dev->dev));
- if (!hcd) {
- err = -ENOMEM;
- goto err_power;
+ goto err_put_clks;
}
hcd->rsrc_start = res_mem->start;
@@ -102,11 +227,11 @@ static int ohci_platform_probe(struct platform_device *dev)
hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
- goto err_put_hcd;
+ goto err_power;
}
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
- goto err_put_hcd;
+ goto err_power;
device_wakeup_enable(hcd->self.controller);
@@ -114,11 +239,17 @@ static int ohci_platform_probe(struct platform_device *dev)
return err;
-err_put_hcd:
- usb_put_hcd(hcd);
err_power:
if (pdata->power_off)
pdata->power_off(dev);
+err_put_clks:
+ while (--clk >= 0)
+ clk_put(priv->clks[clk]);
+err_put_hcd:
+ if (pdata == &ohci_platform_defaults)
+ dev->dev.platform_data = NULL;
+
+ usb_put_hcd(hcd);
return err;
}
@@ -127,13 +258,22 @@ static int ohci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
+ struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+ int clk;
usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
+ for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++)
+ clk_put(priv->clks[clk]);
+
+ usb_put_hcd(hcd);
+
+ if (pdata == &ohci_platform_defaults)
+ dev->dev.platform_data = NULL;
+
return 0;
}
@@ -180,6 +320,12 @@ static int ohci_platform_resume(struct device *dev)
#define ohci_platform_resume NULL
#endif /* CONFIG_PM */
+static const struct of_device_id ohci_platform_ids[] = {
+ { .compatible = "generic-ohci", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ohci_platform_ids);
+
static const struct platform_device_id ohci_platform_table[] = {
{ "ohci-platform", 0 },
{ }
@@ -200,6 +346,7 @@ static struct platform_driver ohci_platform_driver = {
.owner = THIS_MODULE,
.name = "ohci-platform",
.pm = &ohci_platform_pm_ops,
+ .of_match_table = ohci_platform_ids,
}
};
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 44e6c9da8892..01833ab2b5c3 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -148,6 +148,7 @@ static void uhci_hcd_platform_shutdown(struct platform_device *op)
}
static const struct of_device_id platform_uhci_ids[] = {
+ { .compatible = "generic-uhci", },
{ .compatible = "platform-uhci", },
{}
};
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 9992fbfec85f..1ad6bc1951c7 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -732,9 +732,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* Set the U1 and U2 exit latencies. */
memcpy(buf, &usb_bos_descriptor,
USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE);
- temp = readl(&xhci->cap_regs->hcs_params3);
- buf[12] = HCS_U1_LATENCY(temp);
- put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
+ if ((xhci->quirks & XHCI_LPM_SUPPORT)) {
+ temp = readl(&xhci->cap_regs->hcs_params3);
+ buf[12] = HCS_U1_LATENCY(temp);
+ put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
+ }
/* Indicate whether the host has LTM support. */
temp = readl(&xhci->cap_regs->hcc_params);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bce4391a0e7d..c089668308ad 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -149,14 +149,140 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
}
}
+/*
+ * We need a radix tree for mapping physical addresses of TRBs to which stream
+ * ID they belong to. We need to do this because the host controller won't tell
+ * us which stream ring the TRB came from. We could store the stream ID in an
+ * event data TRB, but that doesn't help us for the cancellation case, since the
+ * endpoint may stop before it reaches that event data TRB.
+ *
+ * The radix tree maps the upper portion of the TRB DMA address to a ring
+ * segment that has the same upper portion of DMA addresses. For example, say I
+ * have segments of size 1KB, that are always 1KB aligned. A segment may
+ * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
+ * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
+ * pass the radix tree a key to get the right stream ID:
+ *
+ * 0x10c90fff >> 10 = 0x43243
+ * 0x10c912c0 >> 10 = 0x43244
+ * 0x10c91400 >> 10 = 0x43245
+ *
+ * Obviously, only those TRBs with DMA addresses that are within the segment
+ * will make the radix tree return the stream ID for that ring.
+ *
+ * Caveats for the radix tree:
+ *
+ * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
+ * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
+ * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
+ * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
+ * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
+ * extended systems (where the DMA address can be bigger than 32-bits),
+ * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
+ */
+static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
+ struct xhci_ring *ring,
+ struct xhci_segment *seg,
+ gfp_t mem_flags)
+{
+ unsigned long key;
+ int ret;
+
+ key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+ /* Skip any segments that were already added. */
+ if (radix_tree_lookup(trb_address_map, key))
+ return 0;
+
+ ret = radix_tree_maybe_preload(mem_flags);
+ if (ret)
+ return ret;
+ ret = radix_tree_insert(trb_address_map,
+ key, ring);
+ radix_tree_preload_end();
+ return ret;
+}
+
+static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
+ struct xhci_segment *seg)
+{
+ unsigned long key;
+
+ key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+ if (radix_tree_lookup(trb_address_map, key))
+ radix_tree_delete(trb_address_map, key);
+}
+
+static int xhci_update_stream_segment_mapping(
+ struct radix_tree_root *trb_address_map,
+ struct xhci_ring *ring,
+ struct xhci_segment *first_seg,
+ struct xhci_segment *last_seg,
+ gfp_t mem_flags)
+{
+ struct xhci_segment *seg;
+ struct xhci_segment *failed_seg;
+ int ret;
+
+ if (WARN_ON_ONCE(trb_address_map == NULL))
+ return 0;
+
+ seg = first_seg;
+ do {
+ ret = xhci_insert_segment_mapping(trb_address_map,
+ ring, seg, mem_flags);
+ if (ret)
+ goto remove_streams;
+ if (seg == last_seg)
+ return 0;
+ seg = seg->next;
+ } while (seg != first_seg);
+
+ return 0;
+
+remove_streams:
+ failed_seg = seg;
+ seg = first_seg;
+ do {
+ xhci_remove_segment_mapping(trb_address_map, seg);
+ if (seg == failed_seg)
+ return ret;
+ seg = seg->next;
+ } while (seg != first_seg);
+
+ return ret;
+}
+
+static void xhci_remove_stream_mapping(struct xhci_ring *ring)
+{
+ struct xhci_segment *seg;
+
+ if (WARN_ON_ONCE(ring->trb_address_map == NULL))
+ return;
+
+ seg = ring->first_seg;
+ do {
+ xhci_remove_segment_mapping(ring->trb_address_map, seg);
+ seg = seg->next;
+ } while (seg != ring->first_seg);
+}
+
+static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
+{
+ return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
+ ring->first_seg, ring->last_seg, mem_flags);
+}
+
/* XXX: Do we need the hcd structure in all these functions? */
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
if (!ring)
return;
- if (ring->first_seg)
+ if (ring->first_seg) {
+ if (ring->type == TYPE_STREAM)
+ xhci_remove_stream_mapping(ring);
xhci_free_segments_for_ring(xhci, ring->first_seg);
+ }
kfree(ring);
}
@@ -349,6 +475,21 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
if (ret)
return -ENOMEM;
+ if (ring->type == TYPE_STREAM)
+ ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
+ ring, first, last, flags);
+ if (ret) {
+ struct xhci_segment *next;
+ do {
+ next = first->next;
+ xhci_segment_free(xhci, first);
+ if (first == last)
+ break;
+ first = next;
+ } while (true);
+ return ret;
+ }
+
xhci_link_rings(xhci, ring, first, last, num_segs);
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ring expansion succeed, now has %d segments",
@@ -434,12 +575,12 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
- if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
- dma_free_coherent(dev,
- sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ if (size > MEDIUM_STREAM_ARRAY_SIZE)
+ dma_free_coherent(dev, size,
stream_ctx, dma);
- else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
+ else if (size <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_free(xhci->small_streams_pool,
stream_ctx, dma);
else
@@ -462,12 +603,12 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
gfp_t mem_flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
- if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
- return dma_alloc_coherent(dev,
- sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ if (size > MEDIUM_STREAM_ARRAY_SIZE)
+ return dma_alloc_coherent(dev, size,
dma, mem_flags);
- else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
+ else if (size <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_alloc(xhci->small_streams_pool,
mem_flags, dma);
else
@@ -510,36 +651,6 @@ struct xhci_ring *xhci_stream_id_to_ring(
* The number of stream contexts in the stream context array may be bigger than
* the number of streams the driver wants to use. This is because the number of
* stream context array entries must be a power of two.
- *
- * We need a radix tree for mapping physical addresses of TRBs to which stream
- * ID they belong to. We need to do this because the host controller won't tell
- * us which stream ring the TRB came from. We could store the stream ID in an
- * event data TRB, but that doesn't help us for the cancellation case, since the
- * endpoint may stop before it reaches that event data TRB.
- *
- * The radix tree maps the upper portion of the TRB DMA address to a ring
- * segment that has the same upper portion of DMA addresses. For example, say I
- * have segments of size 1KB, that are always 64-byte aligned. A segment may
- * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
- * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
- * pass the radix tree a key to get the right stream ID:
- *
- * 0x10c90fff >> 10 = 0x43243
- * 0x10c912c0 >> 10 = 0x43244
- * 0x10c91400 >> 10 = 0x43245
- *
- * Obviously, only those TRBs with DMA addresses that are within the segment
- * will make the radix tree return the stream ID for that ring.
- *
- * Caveats for the radix tree:
- *
- * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
- * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
- * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
- * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
- * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
- * extended systems (where the DMA address can be bigger than 32-bits),
- * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
*/
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
@@ -548,7 +659,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
struct xhci_stream_info *stream_info;
u32 cur_stream;
struct xhci_ring *cur_ring;
- unsigned long key;
u64 addr;
int ret;
@@ -603,6 +713,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
if (!cur_ring)
goto cleanup_rings;
cur_ring->stream_id = cur_stream;
+ cur_ring->trb_address_map = &stream_info->trb_address_map;
/* Set deq ptr, cycle bit, and stream context type */
addr = cur_ring->first_seg->dma |
SCT_FOR_CTX(SCT_PRI_TR) |
@@ -612,10 +723,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
cur_stream, (unsigned long long) addr);
- key = (unsigned long)
- (cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT);
- ret = radix_tree_insert(&stream_info->trb_address_map,
- key, cur_ring);
+ ret = xhci_update_stream_mapping(cur_ring, mem_flags);
if (ret) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
@@ -635,9 +743,6 @@ cleanup_rings:
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
- addr = cur_ring->first_seg->dma;
- radix_tree_delete(&stream_info->trb_address_map,
- addr >> TRB_SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
@@ -698,7 +803,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
{
int cur_stream;
struct xhci_ring *cur_ring;
- dma_addr_t addr;
if (!stream_info)
return;
@@ -707,9 +811,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
- addr = cur_ring->first_seg->dma;
- radix_tree_delete(&stream_info->trb_address_map,
- addr >> TRB_SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
@@ -1711,7 +1812,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (xhci->lpm_command)
xhci_free_command(xhci, xhci->lpm_command);
- xhci->cmd_ring_reserved_trbs = 0;
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
@@ -1776,6 +1876,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
no_bw:
+ xhci->cmd_ring_reserved_trbs = 0;
xhci->num_usb2_ports = 0;
xhci->num_usb3_ports = 0;
xhci->num_active_eps = 0;
@@ -2274,11 +2375,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/*
* Initialize the ring segment pool. The ring must be a contiguous
* structure comprised of TRBs. The TRBs must be 16 byte aligned,
- * however, the command ring segment needs 64-byte aligned segments,
- * so we pick the greater alignment need.
+ * however, the command ring segment needs 64-byte aligned segments
+ * and our use of dma addresses in the trb_address_map radix tree needs
+ * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
- TRB_SEGMENT_SIZE, 64, xhci->page_size);
+ TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 04f986d9234f..47390e369cd4 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -190,6 +190,10 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct usb_hcd *hcd;
driver = (struct hc_driver *)id->driver_data;
+
+ /* Prevent runtime suspending between USB-2 and USB-3 initialization */
+ pm_runtime_get_noresume(&dev->dev);
+
/* Register the USB 2.0 roothub.
* FIXME: USB core must know to register the USB 2.0 roothub first.
* This is sort of silly, because we could just set the HCD driver flags
@@ -199,7 +203,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
retval = usb_hcd_pci_probe(dev, id);
if (retval)
- return retval;
+ goto put_runtime_pm;
/* USB 2.0 roothub is stored in the PCI device now. */
hcd = dev_get_drvdata(&dev->dev);
@@ -222,11 +226,11 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto put_usb3_hcd;
/* Roothub already marked as USB 3.0 speed */
- /* We know the LPM timeout algorithms for this host, let the USB core
- * enable and disable LPM for devices under the USB 3.0 roothub.
- */
- if (xhci->quirks & XHCI_LPM_SUPPORT)
- hcd_to_bus(xhci->shared_hcd)->root_hub->lpm_capable = 1;
+ if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ xhci->shared_hcd->can_do_streams = 1;
+
+ /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ pm_runtime_put_noidle(&dev->dev);
return 0;
@@ -234,6 +238,8 @@ put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
dealloc_usb2_hcd:
usb_hcd_pci_remove(dev);
+put_runtime_pm:
+ pm_runtime_put_noidle(&dev->dev);
return retval;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 8abda5c73ca1..151901ce1ba9 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -158,6 +158,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
*/
*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
+ if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ xhci->shared_hcd->can_do_streams = 1;
+
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
@@ -226,6 +229,7 @@ static const struct dev_pm_ops xhci_plat_pm_ops = {
#ifdef CONFIG_OF
static const struct of_device_id usb_xhci_of_match[] = {
+ { .compatible = "generic-xhci" },
{ .compatible = "xhci-platform" },
{ },
};
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 0ed64eb68e48..5f926bea5ab1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -546,9 +546,9 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
+ struct xhci_virt_ep *ep = &dev->eps[ep_index];
struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
- struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
@@ -573,8 +573,16 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Finding endpoint context");
- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
- state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+ /* 4.6.9 the css flag is written to the stream context for streams */
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ struct xhci_stream_ctx *ctx =
+ &ep->stream_info->stream_ctx_array[stream_id];
+ state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring);
+ } else {
+ struct xhci_ep_ctx *ep_ctx
+ = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+ state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+ }
state->new_deq_ptr = cur_td->last_trb;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -892,6 +900,57 @@ remove_finished_td:
/* Return to the event handler with xhci->lock re-acquired */
}
+static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ struct xhci_td *cur_td;
+
+ while (!list_empty(&ring->td_list)) {
+ cur_td = list_first_entry(&ring->td_list,
+ struct xhci_td, td_list);
+ list_del_init(&cur_td->td_list);
+ if (!list_empty(&cur_td->cancelled_td_list))
+ list_del_init(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+ }
+}
+
+static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
+ int slot_id, int ep_index)
+{
+ struct xhci_td *cur_td;
+ struct xhci_virt_ep *ep;
+ struct xhci_ring *ring;
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ if ((ep->ep_state & EP_HAS_STREAMS) ||
+ (ep->ep_state & EP_GETTING_NO_STREAMS)) {
+ int stream_id;
+
+ for (stream_id = 0; stream_id < ep->stream_info->num_streams;
+ stream_id++) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Killing URBs for slot ID %u, ep index %u, stream %u",
+ slot_id, ep_index, stream_id + 1);
+ xhci_kill_ring_urbs(xhci,
+ ep->stream_info->stream_rings[stream_id]);
+ }
+ } else {
+ ring = ep->ring;
+ if (!ring)
+ return;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Killing URBs for slot ID %u, ep index %u",
+ slot_id, ep_index);
+ xhci_kill_ring_urbs(xhci, ring);
+ }
+ while (!list_empty(&ep->cancelled_td_list)) {
+ cur_td = list_first_entry(&ep->cancelled_td_list,
+ struct xhci_td, cancelled_td_list);
+ list_del_init(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+ }
+}
+
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
@@ -915,9 +974,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
struct xhci_hcd *xhci;
struct xhci_virt_ep *ep;
- struct xhci_virt_ep *temp_ep;
- struct xhci_ring *ring;
- struct xhci_td *cur_td;
int ret, i, j;
unsigned long flags;
@@ -974,34 +1030,8 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
for (i = 0; i < MAX_HC_SLOTS; i++) {
if (!xhci->devs[i])
continue;
- for (j = 0; j < 31; j++) {
- temp_ep = &xhci->devs[i]->eps[j];
- ring = temp_ep->ring;
- if (!ring)
- continue;
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Killing URBs for slot ID %u, "
- "ep index %u", i, j);
- while (!list_empty(&ring->td_list)) {
- cur_td = list_first_entry(&ring->td_list,
- struct xhci_td,
- td_list);
- list_del_init(&cur_td->td_list);
- if (!list_empty(&cur_td->cancelled_td_list))
- list_del_init(&cur_td->cancelled_td_list);
- xhci_giveback_urb_in_irq(xhci, cur_td,
- -ESHUTDOWN);
- }
- while (!list_empty(&temp_ep->cancelled_td_list)) {
- cur_td = list_first_entry(
- &temp_ep->cancelled_td_list,
- struct xhci_td,
- cancelled_td_list);
- list_del_init(&cur_td->cancelled_td_list);
- xhci_giveback_urb_in_irq(xhci, cur_td,
- -ESHUTDOWN);
- }
- }
+ for (j = 0; j < 31; j++)
+ xhci_kill_endpoint_urbs(xhci, i, j);
}
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -1073,17 +1103,18 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int stream_id;
struct xhci_ring *ep_ring;
struct xhci_virt_device *dev;
+ struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
dev = xhci->devs[slot_id];
+ ep = &dev->eps[ep_index];
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
if (!ep_ring) {
- xhci_warn(xhci, "WARN Set TR deq ptr command for "
- "freed stream ID %u\n",
+ xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
stream_id);
/* XXX: Harmless??? */
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
@@ -1099,12 +1130,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
switch (cmd_comp_code) {
case COMP_TRB_ERR:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
- "of stream ID configuration\n");
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
break;
case COMP_CTX_STATE:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
- "to incorrect slot or ep state.\n");
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
ep_state = le32_to_cpu(ep_ctx->ep_info);
ep_state &= EP_STATE_MASK;
slot_state = le32_to_cpu(slot_ctx->dev_state);
@@ -1114,13 +1143,12 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
slot_state, ep_state);
break;
case COMP_EBADSLT:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
- "slot %u was not enabled.\n", slot_id);
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
+ slot_id);
break;
default:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
- "completion code of %u.\n",
- cmd_comp_code);
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
+ cmd_comp_code);
break;
}
/* OK what do we do now? The endpoint state is hosed, and we
@@ -1130,23 +1158,28 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
* cancelling URBs, which might not be an error...
*/
} else {
+ u64 deq;
+ /* 4.6.10 deq ptr is written to the stream ctx for streams */
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ struct xhci_stream_ctx *ctx =
+ &ep->stream_info->stream_ctx_array[stream_id];
+ deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
+ } else {
+ deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
+ }
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Successful Set TR Deq Ptr cmd, deq = @%08llx",
- le64_to_cpu(ep_ctx->deq));
- if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
- dev->eps[ep_index].queued_deq_ptr) ==
- (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
+ "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
+ if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
+ ep->queued_deq_ptr) == deq) {
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
update_ring_for_set_deq_completion(xhci, dev,
ep_ring, ep_index);
} else {
- xhci_warn(xhci, "Mismatch between completed Set TR Deq "
- "Ptr command & xHCI internal state.\n");
+ xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
- dev->eps[ep_index].queued_deq_seg,
- dev->eps[ep_index].queued_deq_ptr);
+ ep->queued_deq_seg, ep->queued_deq_ptr);
}
}
@@ -4070,6 +4103,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
+ u32 trb_sct = 0;
u32 type = TRB_TYPE(TRB_SET_DEQ);
struct xhci_virt_ep *ep;
@@ -4088,7 +4122,9 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
}
ep->queued_deq_seg = deq_seg;
ep->queued_deq_ptr = deq_ptr;
- return queue_command(xhci, lower_32_bits(addr) | cycle_state,
+ if (stream_id)
+ trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
+ return queue_command(xhci, lower_32_bits(addr) | trb_sct | cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 924a6ccdb622..8fe4e124ddd4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -390,6 +390,10 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
}
legacy_irq:
+ if (!strlen(hcd->irq_descr))
+ snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
+ hcd->driver->description, hcd->self.busnum);
+
/* fall back to legacy interrupt*/
ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
hcd->irq_descr, hcd);
@@ -2678,6 +2682,20 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
return ret;
}
+static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
+ struct xhci_virt_device *vdev, int i)
+{
+ struct xhci_virt_ep *ep = &vdev->eps[i];
+
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
+ xhci_get_endpoint_address(i));
+ xhci_free_stream_info(xhci, ep->stream_info);
+ ep->stream_info = NULL;
+ ep->ep_state &= ~EP_HAS_STREAMS;
+ }
+}
+
/* Called after one or more calls to xhci_add_endpoint() or
* xhci_drop_endpoint(). If this call fails, the USB core is expected
* to call xhci_reset_bandwidth().
@@ -2742,8 +2760,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
/* Free any rings that were dropped, but not changed. */
for (i = 1; i < 31; ++i) {
if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
- !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
+ !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
+ }
}
xhci_zero_in_ctx(xhci, virt_dev);
/*
@@ -2759,6 +2779,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (virt_dev->eps[i].ring) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
}
+ xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
}
@@ -2954,7 +2975,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
if (ret <= 0)
return -EINVAL;
- if (ep->ss_ep_comp.bmAttributes == 0) {
+ if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
" descriptor for ep 0x%x does not support streams\n",
ep->desc.bEndpointAddress);
@@ -3121,6 +3142,12 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
num_streams);
+ /* MaxPSASize value 0 (2 streams) means streams are not supported */
+ if (HCC_MAX_PSA(xhci->hcc_params) < 4) {
+ xhci_dbg(xhci, "xHCI controller does not support streams.\n");
+ return -ENOSYS;
+ }
+
config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
if (!config_cmd) {
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
@@ -3519,6 +3546,8 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_virt_ep *ep = &virt_dev->eps[i];
if (ep->ep_state & EP_HAS_STREAMS) {
+ xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
+ xhci_get_endpoint_address(i));
xhci_free_stream_info(xhci, ep->stream_info);
ep->stream_info = NULL;
ep->ep_state &= ~EP_HAS_STREAMS;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 58ed9d088e63..d280e9213d08 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -703,6 +703,7 @@ struct xhci_ep_ctx {
/* deq bitmasks */
#define EP_CTX_CYCLE_MASK (1 << 0)
+#define SCTX_DEQ_MASK (~0xfL)
/**
@@ -1118,9 +1119,10 @@ enum xhci_setup_dev {
#define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23)
#define LAST_EP_INDEX 30
-/* Set TR Dequeue Pointer command TRB fields */
+/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
+#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
/* Port Status Change Event TRB fields */
@@ -1341,6 +1343,7 @@ struct xhci_ring {
unsigned int num_trbs_free_temp;
enum xhci_ring_type type;
bool last_td_was_short;
+ struct radix_tree_root *trb_address_map;
};
struct xhci_erst_entry {
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index ba5f70f92888..1bca274dc3b5 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -128,7 +128,6 @@ config USB_IDMOUSE
config USB_FTDI_ELAN
tristate "Elan PCMCIA CardBus Adapter USB Client"
- default M
help
ELAN's Uxxx series of adapters are USB to PCMCIA CardBus adapters.
Currently only the U132 adapter is available.
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index de98906f786d..06b5d77cd9ad 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -2123,8 +2123,8 @@ sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
u8 tmp8, tmp82, ramtype;
int bw = 0;
char *ramtypetext1 = NULL;
- const char *ramtypetext2[] = { "SDR SDRAM", "SDR SGRAM",
- "DDR SDRAM", "DDR SGRAM" };
+ static const char ram_datarate[4] = {'S', 'S', 'D', 'D'};
+ static const char ram_dynamictype[4] = {'D', 'G', 'D', 'G'};
static const int busSDR[4] = {64, 64, 128, 128};
static const int busDDR[4] = {32, 32, 64, 64};
static const int busDDRA[4] = {64+32, 64+32 , (64+32)*2, (64+32)*2};
@@ -2156,8 +2156,10 @@ sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
break;
}
- dev_info(&sisusb->sisusb_dev->dev, "%dMB %s %s, bus width %d\n", (sisusb->vramsize >> 20), ramtypetext1,
- ramtypetext2[ramtype], bw);
+
+ dev_info(&sisusb->sisusb_dev->dev, "%dMB %s %cDR S%cRAM, bus width %d\n",
+ sisusb->vramsize >> 20, ramtypetext1,
+ ram_datarate[ramtype], ram_dynamictype[ramtype], bw);
}
static int
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 78eb4ff33269..bdef0d6eb91d 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -22,8 +22,27 @@
enum led_type {
DELCOM_VISUAL_SIGNAL_INDICATOR,
DREAM_CHEEKY_WEBMAIL_NOTIFIER,
+ RISO_KAGAKU_LED
};
+/* the Webmail LED made by RISO KAGAKU CORP. decodes a color index
+ internally, we want to keep the red+green+blue sysfs api, so we decode
+ from 1-bit RGB to the riso kagaku color index according to this table... */
+
+static unsigned const char riso_kagaku_tbl[] = {
+/* R+2G+4B -> riso kagaku color index */
+ [0] = 0, /* black */
+ [1] = 2, /* red */
+ [2] = 1, /* green */
+ [3] = 5, /* yellow */
+ [4] = 3, /* blue */
+ [5] = 6, /* magenta */
+ [6] = 4, /* cyan */
+ [7] = 7 /* white */
+};
+
+#define RISO_KAGAKU_IX(r,g,b) riso_kagaku_tbl[((r)?1:0)+((g)?2:0)+((b)?4:0)]
+
/* table of devices that work with this driver */
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0fc5, 0x1223),
@@ -32,6 +51,8 @@ static const struct usb_device_id id_table[] = {
.driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
{ USB_DEVICE(0x1d34, 0x000a),
.driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
+ { USB_DEVICE(0x1294, 0x1320),
+ .driver_info = RISO_KAGAKU_LED },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -48,6 +69,7 @@ static void change_color(struct usb_led *led)
{
int retval = 0;
unsigned char *buffer;
+ int actlength;
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer) {
@@ -104,6 +126,18 @@ static void change_color(struct usb_led *led)
2000);
break;
+ case RISO_KAGAKU_LED:
+ buffer[0] = RISO_KAGAKU_IX(led->red, led->green, led->blue);
+ buffer[1] = 0;
+ buffer[2] = 0;
+ buffer[3] = 0;
+ buffer[4] = 0;
+
+ retval = usb_interrupt_msg(led->udev,
+ usb_sndctrlpipe(led->udev, 2),
+ buffer, 5, &actlength, 1000 /*ms timeout*/);
+ break;
+
default:
dev_err(&led->udev->dev, "unknown device type %d\n", led->type);
}
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 688dc8bb192d..8b789792f6fa 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -43,6 +43,7 @@ config USB_MUSB_HOST
config USB_MUSB_GADGET
bool "Gadget only mode"
depends on USB_GADGET=y || USB_GADGET=USB_MUSB_HDRC
+ depends on HAS_DMA
help
Select this when you want to use MUSB in gadget mode only,
thereby the host feature will be regressed.
@@ -50,6 +51,7 @@ config USB_MUSB_GADGET
config USB_MUSB_DUAL_ROLE
bool "Dual Role mode"
depends on ((USB=y || USB=USB_MUSB_HDRC) && (USB_GADGET=y || USB_GADGET=USB_MUSB_HDRC))
+ depends on HAS_DMA
help
This is the default mode of working of MUSB controller where
both host and gadget features are enabled.
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 239ad0b1ceb6..07576907e2c6 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -438,7 +438,6 @@ void musb_hnp_stop(struct musb *musb)
static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
u8 devctl)
{
- struct usb_otg *otg = musb->xceiv->otg;
irqreturn_t handled = IRQ_NONE;
dev_dbg(musb->controller, "<== DevCtl=%02x, int_usb=0x%x\n", devctl,
@@ -656,7 +655,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
break;
case OTG_STATE_B_PERIPHERAL:
musb_g_suspend(musb);
- musb->is_active = otg->gadget->b_hnp_enable;
+ musb->is_active = musb->g.b_hnp_enable;
if (musb->is_active) {
musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
@@ -672,7 +671,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
break;
case OTG_STATE_A_HOST:
musb->xceiv->state = OTG_STATE_A_SUSPEND;
- musb->is_active = otg->host->b_hnp_enable;
+ musb->is_active = musb->hcd->self.b_hnp_enable;
break;
case OTG_STATE_B_HOST:
/* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index f88929609bac..7b8bbf53127e 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -39,6 +39,7 @@ struct cppi41_dma_channel {
u32 transferred;
u32 packet_sz;
struct list_head tx_check;
+ struct work_struct dma_completion;
};
#define MUSB_DMA_NUM_CHANNELS 15
@@ -112,6 +113,18 @@ static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
return true;
}
+static bool is_isoc(struct musb_hw_ep *hw_ep, bool in)
+{
+ if (in && hw_ep->in_qh) {
+ if (hw_ep->in_qh->type == USB_ENDPOINT_XFER_ISOC)
+ return true;
+ } else if (hw_ep->out_qh) {
+ if (hw_ep->out_qh->type == USB_ENDPOINT_XFER_ISOC)
+ return true;
+ }
+ return false;
+}
+
static void cppi41_dma_callback(void *private_data);
static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
@@ -119,7 +132,8 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
struct musb *musb = hw_ep->musb;
- if (!cppi41_channel->prog_len) {
+ if (!cppi41_channel->prog_len ||
+ (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
/* done, complete */
cppi41_channel->channel.actual_len =
@@ -165,6 +179,32 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
}
}
+static void cppi_trans_done_work(struct work_struct *work)
+{
+ unsigned long flags;
+ struct cppi41_dma_channel *cppi41_channel =
+ container_of(work, struct cppi41_dma_channel, dma_completion);
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->musb;
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+ bool empty;
+
+ if (!cppi41_channel->is_tx && is_isoc(hw_ep, 1)) {
+ spin_lock_irqsave(&musb->lock, flags);
+ cppi41_trans_done(cppi41_channel);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ } else {
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty) {
+ spin_lock_irqsave(&musb->lock, flags);
+ cppi41_trans_done(cppi41_channel);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ } else {
+ schedule_work(&cppi41_channel->dma_completion);
+ }
+ }
+}
+
static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
{
struct cppi41_dma_controller *controller;
@@ -228,6 +268,14 @@ static void cppi41_dma_callback(void *private_data)
transferred < cppi41_channel->packet_sz)
cppi41_channel->prog_len = 0;
+ if (!cppi41_channel->is_tx) {
+ if (is_isoc(hw_ep, 1))
+ schedule_work(&cppi41_channel->dma_completion);
+ else
+ cppi41_trans_done(cppi41_channel);
+ goto out;
+ }
+
empty = musb_is_tx_fifo_empty(hw_ep);
if (empty) {
cppi41_trans_done(cppi41_channel);
@@ -264,6 +312,10 @@ static void cppi41_dma_callback(void *private_data)
goto out;
}
}
+ if (is_isoc(hw_ep, 0)) {
+ schedule_work(&cppi41_channel->dma_completion);
+ goto out;
+ }
list_add_tail(&cppi41_channel->tx_check,
&controller->early_tx_list);
if (!hrtimer_active(&controller->early_tx)) {
@@ -448,12 +500,25 @@ static int cppi41_dma_channel_program(struct dma_channel *channel,
dma_addr_t dma_addr, u32 len)
{
int ret;
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ int hb_mult = 0;
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
+ if (is_host_active(cppi41_channel->controller->musb)) {
+ if (cppi41_channel->is_tx)
+ hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
+ else
+ hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
+ }
+
channel->status = MUSB_DMA_STATUS_BUSY;
channel->actual_len = 0;
+
+ if (hb_mult)
+ packet_sz = hb_mult * (packet_sz & 0x7FF);
+
ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
if (!ret)
channel->status = MUSB_DMA_STATUS_FREE;
@@ -607,6 +672,8 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
cppi41_channel->port_num = port;
cppi41_channel->is_tx = is_tx;
INIT_LIST_HEAD(&cppi41_channel->tx_check);
+ INIT_WORK(&cppi41_channel->dma_completion,
+ cppi_trans_done_work);
musb_dma = &cppi41_channel->channel;
musb_dma->private_data = cppi41_channel;
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 7a109eae9b9a..3372ded5def7 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -45,6 +45,8 @@
#include <linux/of_irq.h>
#include <linux/usb/of.h>
+#include <linux/debugfs.h>
+
#include "musb_core.h"
static const struct of_device_id musb_dsps_of_match[];
@@ -136,6 +138,26 @@ struct dsps_glue {
unsigned long last_timer; /* last timer data for each instance */
struct dsps_context context;
+ struct debugfs_regset32 regset;
+ struct dentry *dbgfs_root;
+};
+
+static const struct debugfs_reg32 dsps_musb_regs[] = {
+ { "revision", 0x00 },
+ { "control", 0x14 },
+ { "status", 0x18 },
+ { "eoi", 0x24 },
+ { "intr0_stat", 0x30 },
+ { "intr1_stat", 0x34 },
+ { "intr0_set", 0x38 },
+ { "intr1_set", 0x3c },
+ { "txmode", 0x70 },
+ { "rxmode", 0x74 },
+ { "autoreq", 0xd0 },
+ { "srpfixtime", 0xd4 },
+ { "tdown", 0xd8 },
+ { "phy_utmi", 0xe0 },
+ { "mode", 0xe8 },
};
static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
@@ -368,6 +390,30 @@ out:
return ret;
}
+static int dsps_musb_dbg_init(struct musb *musb, struct dsps_glue *glue)
+{
+ struct dentry *root;
+ struct dentry *file;
+ char buf[128];
+
+ sprintf(buf, "%s.dsps", dev_name(musb->controller));
+ root = debugfs_create_dir(buf, NULL);
+ if (!root)
+ return -ENOMEM;
+ glue->dbgfs_root = root;
+
+ glue->regset.regs = dsps_musb_regs;
+ glue->regset.nregs = ARRAY_SIZE(dsps_musb_regs);
+ glue->regset.base = musb->ctrl_base;
+
+ file = debugfs_create_regset32("regdump", S_IRUGO, root, &glue->regset);
+ if (!file) {
+ debugfs_remove_recursive(root);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
static int dsps_musb_init(struct musb *musb)
{
struct device *dev = musb->controller;
@@ -377,6 +423,7 @@ static int dsps_musb_init(struct musb *musb)
void __iomem *reg_base;
struct resource *r;
u32 rev, val;
+ int ret;
r = platform_get_resource_byname(parent, IORESOURCE_MEM, "control");
if (!r)
@@ -410,6 +457,10 @@ static int dsps_musb_init(struct musb *musb)
val &= ~(1 << wrp->otg_disable);
dsps_writel(musb->ctrl_base, wrp->phy_utmi, val);
+ ret = dsps_musb_dbg_init(musb, glue);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -616,7 +667,7 @@ static int dsps_probe(struct platform_device *pdev)
wrp = match->data;
/* allocate glue */
- glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&pdev->dev, "unable to allocate glue memory\n");
return -ENOMEM;
@@ -644,7 +695,6 @@ err3:
pm_runtime_put(&pdev->dev);
err2:
pm_runtime_disable(&pdev->dev);
- kfree(glue);
return ret;
}
@@ -657,7 +707,9 @@ static int dsps_remove(struct platform_device *pdev)
/* disable usbss clocks */
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(glue);
+
+ debugfs_remove_recursive(glue->dbgfs_root);
+
return 0;
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index abb38c3833ef..eb06291a40c8 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1694,7 +1694,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
| MUSB_RXCSR_RXPKTRDY);
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
+ defined(CONFIG_USB_TI_CPPI41_DMA)
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
@@ -1707,10 +1708,30 @@ void musb_host_rx(struct musb *musb, u8 epnum)
if (d->status != -EILSEQ && d->status != -EOVERFLOW)
d->status = 0;
- if (++qh->iso_idx >= urb->number_of_packets)
+ if (++qh->iso_idx >= urb->number_of_packets) {
done = true;
- else
+ } else {
+#if defined(CONFIG_USB_TI_CPPI41_DMA)
+ struct dma_controller *c;
+ dma_addr_t *buf;
+ u32 length, ret;
+
+ c = musb->dma_controller;
+ buf = (void *)
+ urb->iso_frame_desc[qh->iso_idx].offset
+ + (u32)urb->transfer_dma;
+
+ length =
+ urb->iso_frame_desc[qh->iso_idx].length;
+
+ val |= MUSB_RXCSR_DMAENAB;
+ musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+ ret = c->channel_program(dma, qh->maxpacket,
+ 0, (u32) buf, length);
+#endif
done = false;
+ }
} else {
/* done if urb buffer is full or short packet is recd */
@@ -1750,7 +1771,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
}
/* we are expecting IN packets */
-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
+ defined(CONFIG_USB_TI_CPPI41_DMA)
if (dma) {
struct dma_controller *c;
u16 rx_count;
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 8aa59a2c5eb2..d341c149a2f9 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -37,7 +37,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/usb/musb-omap.h>
-#include <linux/usb/omap_control_usb.h>
+#include <linux/phy/omap_control_phy.h>
#include <linux/of_platform.h>
#include "musb_core.h"
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 7d1451d5bbea..416e0c8cf6ff 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -75,27 +75,6 @@ config NOP_USB_XCEIV
built-in with usb ip or which are autonomous and doesn't require any
phy programming such as ISP1x04 etc.
-config OMAP_CONTROL_USB
- tristate "OMAP CONTROL USB Driver"
- depends on ARCH_OMAP2PLUS || COMPILE_TEST
- help
- Enable this to add support for the USB part present in the control
- module. This driver has API to power on the USB2 PHY and to write to
- the mailbox. The mailbox is present only in omap4 and the register to
- power on the USB2 PHY is present in OMAP4 and OMAP5. OMAP5 has an
- additional register to power on USB3 PHY.
-
-config OMAP_USB3
- tristate "OMAP USB3 PHY Driver"
- depends on ARCH_OMAP2PLUS || COMPILE_TEST
- select OMAP_CONTROL_USB
- select USB_PHY
- help
- Enable this to support the USB3 PHY that is part of SOC. This
- driver takes care of all the PHY functionality apart from comparator.
- This driver interacts with the "OMAP Control USB Driver" to power
- on/off the PHY.
-
config AM335X_CONTROL_USB
tristate
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index be58adae3496..f8fa719a31b9 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -13,11 +13,9 @@ obj-$(CONFIG_ISP1301_OMAP) += phy-isp1301-omap.o
obj-$(CONFIG_MV_U3D_PHY) += phy-mv-u3d-usb.o
obj-$(CONFIG_NOP_USB_XCEIV) += phy-generic.o
obj-$(CONFIG_TAHVO_USB) += phy-tahvo.o
-obj-$(CONFIG_OMAP_CONTROL_USB) += phy-omap-control.o
obj-$(CONFIG_AM335X_CONTROL_USB) += phy-am335x-control.o
obj-$(CONFIG_AM335X_PHY_USB) += phy-am335x.o
obj-$(CONFIG_OMAP_OTG) += phy-omap-otg.o
-obj-$(CONFIG_OMAP_USB3) += phy-omap-usb3.o
obj-$(CONFIG_SAMSUNG_USBPHY) += phy-samsung-usb.o
obj-$(CONFIG_SAMSUNG_USB2PHY) += phy-samsung-usb2.o
obj-$(CONFIG_SAMSUNG_USB3PHY) += phy-samsung-usb3.o
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index 7aa314ef4a8a..c47e5a6edde2 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -317,10 +317,12 @@ int otg_statemachine(struct otg_fsm *fsm)
otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
break;
case OTG_STATE_A_HOST:
- if ((!fsm->a_bus_req || fsm->a_suspend_req_inf) &&
+ if (fsm->id || fsm->a_bus_drop)
+ otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
+ else if ((!fsm->a_bus_req || fsm->a_suspend_req_inf) &&
fsm->otg->host->b_hnp_enable)
otg_set_state(fsm, OTG_STATE_A_SUSPEND);
- else if (fsm->id || !fsm->b_conn || fsm->a_bus_drop)
+ else if (!fsm->b_conn)
otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
else if (!fsm->a_vbus_vld)
otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
@@ -346,8 +348,7 @@ int otg_statemachine(struct otg_fsm *fsm)
otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
break;
case OTG_STATE_A_WAIT_VFALL:
- if (fsm->a_wait_vfall_tmout || fsm->id || fsm->a_bus_req ||
- (!fsm->a_sess_vld && !fsm->b_conn))
+ if (fsm->a_wait_vfall_tmout)
otg_set_state(fsm, OTG_STATE_A_IDLE);
break;
case OTG_STATE_A_VBUS_ERR:
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index d204f745ed05..5b37b81f2ef6 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1428,7 +1428,8 @@ static int __init msm_otg_probe(struct platform_device *pdev)
motg->phy.otg = kzalloc(sizeof(struct usb_otg), GFP_KERNEL);
if (!motg->phy.otg) {
dev_err(&pdev->dev, "unable to allocate msm_otg\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_motg;
}
motg->pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index b42897b6474c..c42bdf0c4a1f 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012-2013 Freescale Semiconductor, Inc.
* Copyright (C) 2012 Marek Vasut <marex@denx.de>
* on behalf of DENX Software Engineering GmbH
*
@@ -20,6 +20,9 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#define DRIVER_NAME "mxs_phy"
@@ -28,18 +31,134 @@
#define HW_USBPHY_CTRL_SET 0x34
#define HW_USBPHY_CTRL_CLR 0x38
+#define HW_USBPHY_DEBUG_SET 0x54
+#define HW_USBPHY_DEBUG_CLR 0x58
+
+#define HW_USBPHY_IP 0x90
+#define HW_USBPHY_IP_SET 0x94
+#define HW_USBPHY_IP_CLR 0x98
+
#define BM_USBPHY_CTRL_SFTRST BIT(31)
#define BM_USBPHY_CTRL_CLKGATE BIT(30)
+#define BM_USBPHY_CTRL_ENAUTOSET_USBCLKS BIT(26)
+#define BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE BIT(25)
+#define BM_USBPHY_CTRL_ENVBUSCHG_WKUP BIT(23)
+#define BM_USBPHY_CTRL_ENIDCHG_WKUP BIT(22)
+#define BM_USBPHY_CTRL_ENDPDMCHG_WKUP BIT(21)
+#define BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD BIT(20)
+#define BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE BIT(19)
+#define BM_USBPHY_CTRL_ENAUTO_PWRON_PLL BIT(18)
#define BM_USBPHY_CTRL_ENUTMILEVEL3 BIT(15)
#define BM_USBPHY_CTRL_ENUTMILEVEL2 BIT(14)
#define BM_USBPHY_CTRL_ENHOSTDISCONDETECT BIT(1)
+#define BM_USBPHY_IP_FIX (BIT(17) | BIT(18))
+
+#define BM_USBPHY_DEBUG_CLKGATE BIT(30)
+
+/* Anatop Registers */
+#define ANADIG_ANA_MISC0 0x150
+#define ANADIG_ANA_MISC0_SET 0x154
+#define ANADIG_ANA_MISC0_CLR 0x158
+
+#define ANADIG_USB1_VBUS_DET_STAT 0x1c0
+#define ANADIG_USB2_VBUS_DET_STAT 0x220
+
+#define ANADIG_USB1_LOOPBACK_SET 0x1e4
+#define ANADIG_USB1_LOOPBACK_CLR 0x1e8
+#define ANADIG_USB2_LOOPBACK_SET 0x244
+#define ANADIG_USB2_LOOPBACK_CLR 0x248
+
+#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG BIT(12)
+#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL BIT(11)
+
+#define BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID BIT(3)
+#define BM_ANADIG_USB2_VBUS_DET_STAT_VBUS_VALID BIT(3)
+
+#define BM_ANADIG_USB1_LOOPBACK_UTMI_DIG_TST1 BIT(2)
+#define BM_ANADIG_USB1_LOOPBACK_TSTI_TX_EN BIT(5)
+#define BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 BIT(2)
+#define BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN BIT(5)
+
+#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
+
+/* Do disconnection between PHY and controller without vbus */
+#define MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS BIT(0)
+
+/*
+ * The PHY will be in messy if there is a wakeup after putting
+ * bus to suspend (set portsc.suspendM) but before setting PHY to low
+ * power mode (set portsc.phcd).
+ */
+#define MXS_PHY_ABNORMAL_IN_SUSPEND BIT(1)
+
+/*
+ * The SOF sends too fast after resuming, it will cause disconnection
+ * between host and high speed device.
+ */
+#define MXS_PHY_SENDING_SOF_TOO_FAST BIT(2)
+
+/*
+ * IC has bug fixes logic, they include
+ * MXS_PHY_ABNORMAL_IN_SUSPEND and MXS_PHY_SENDING_SOF_TOO_FAST
+ * which are described at above flags, the RTL will handle it
+ * according to different versions.
+ */
+#define MXS_PHY_NEED_IP_FIX BIT(3)
+
+struct mxs_phy_data {
+ unsigned int flags;
+};
+
+static const struct mxs_phy_data imx23_phy_data = {
+ .flags = MXS_PHY_ABNORMAL_IN_SUSPEND | MXS_PHY_SENDING_SOF_TOO_FAST,
+};
+
+static const struct mxs_phy_data imx6q_phy_data = {
+ .flags = MXS_PHY_SENDING_SOF_TOO_FAST |
+ MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
+ MXS_PHY_NEED_IP_FIX,
+};
+
+static const struct mxs_phy_data imx6sl_phy_data = {
+ .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
+ MXS_PHY_NEED_IP_FIX,
+};
+
+static const struct of_device_id mxs_phy_dt_ids[] = {
+ { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
+ { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
+ { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
+
struct mxs_phy {
struct usb_phy phy;
struct clk *clk;
+ const struct mxs_phy_data *data;
+ struct regmap *regmap_anatop;
+ int port_id;
};
-#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
+static inline bool is_imx6q_phy(struct mxs_phy *mxs_phy)
+{
+ return mxs_phy->data == &imx6q_phy_data;
+}
+
+static inline bool is_imx6sl_phy(struct mxs_phy *mxs_phy)
+{
+ return mxs_phy->data == &imx6sl_phy_data;
+}
+
+/*
+ * PHY needs some 32K cycles to switch from 32K clock to
+ * bus (such as AHB/AXI, etc) clock.
+ */
+static void mxs_phy_clock_switch_delay(void)
+{
+ usleep_range(300, 400);
+}
static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
{
@@ -53,19 +172,105 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
/* Power up the PHY */
writel(0, base + HW_USBPHY_PWD);
- /* enable FS/LS device */
- writel(BM_USBPHY_CTRL_ENUTMILEVEL2 |
- BM_USBPHY_CTRL_ENUTMILEVEL3,
+ /*
+ * USB PHY Ctrl Setting
+ * - Auto clock/power on
+ * - Enable full/low speed support
+ */
+ writel(BM_USBPHY_CTRL_ENAUTOSET_USBCLKS |
+ BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE |
+ BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD |
+ BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE |
+ BM_USBPHY_CTRL_ENAUTO_PWRON_PLL |
+ BM_USBPHY_CTRL_ENUTMILEVEL2 |
+ BM_USBPHY_CTRL_ENUTMILEVEL3,
base + HW_USBPHY_CTRL_SET);
+ if (mxs_phy->data->flags & MXS_PHY_NEED_IP_FIX)
+ writel(BM_USBPHY_IP_FIX, base + HW_USBPHY_IP_SET);
+
return 0;
}
+/* Return true if the vbus is there */
+static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
+{
+ unsigned int vbus_value;
+
+ if (mxs_phy->port_id == 0)
+ regmap_read(mxs_phy->regmap_anatop,
+ ANADIG_USB1_VBUS_DET_STAT,
+ &vbus_value);
+ else if (mxs_phy->port_id == 1)
+ regmap_read(mxs_phy->regmap_anatop,
+ ANADIG_USB2_VBUS_DET_STAT,
+ &vbus_value);
+
+ if (vbus_value & BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID)
+ return true;
+ else
+ return false;
+}
+
+static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
+{
+ void __iomem *base = mxs_phy->phy.io_priv;
+ u32 reg;
+
+ if (disconnect)
+ writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
+ base + HW_USBPHY_DEBUG_CLR);
+
+ if (mxs_phy->port_id == 0) {
+ reg = disconnect ? ANADIG_USB1_LOOPBACK_SET
+ : ANADIG_USB1_LOOPBACK_CLR;
+ regmap_write(mxs_phy->regmap_anatop, reg,
+ BM_ANADIG_USB1_LOOPBACK_UTMI_DIG_TST1 |
+ BM_ANADIG_USB1_LOOPBACK_TSTI_TX_EN);
+ } else if (mxs_phy->port_id == 1) {
+ reg = disconnect ? ANADIG_USB2_LOOPBACK_SET
+ : ANADIG_USB2_LOOPBACK_CLR;
+ regmap_write(mxs_phy->regmap_anatop, reg,
+ BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 |
+ BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN);
+ }
+
+ if (!disconnect)
+ writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
+ base + HW_USBPHY_DEBUG_SET);
+
+ /* Delay some time, and let Linestate be SE0 for controller */
+ if (disconnect)
+ usleep_range(500, 1000);
+}
+
+static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
+{
+ bool vbus_is_on = false;
+
+ /* If the SoCs don't need to disconnect line without vbus, quit */
+ if (!(mxs_phy->data->flags & MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS))
+ return;
+
+ /* If the SoCs don't have anatop, quit */
+ if (!mxs_phy->regmap_anatop)
+ return;
+
+ vbus_is_on = mxs_phy_get_vbus_status(mxs_phy);
+
+ if (on && !vbus_is_on)
+ __mxs_phy_disconnect_line(mxs_phy, true);
+ else
+ __mxs_phy_disconnect_line(mxs_phy, false);
+
+}
+
static int mxs_phy_init(struct usb_phy *phy)
{
int ret;
struct mxs_phy *mxs_phy = to_mxs_phy(phy);
+ mxs_phy_clock_switch_delay();
ret = clk_prepare_enable(mxs_phy->clk);
if (ret)
return ret;
@@ -94,6 +299,7 @@ static int mxs_phy_suspend(struct usb_phy *x, int suspend)
x->io_priv + HW_USBPHY_CTRL_SET);
clk_disable_unprepare(mxs_phy->clk);
} else {
+ mxs_phy_clock_switch_delay();
ret = clk_prepare_enable(mxs_phy->clk);
if (ret)
return ret;
@@ -105,11 +311,28 @@ static int mxs_phy_suspend(struct usb_phy *x, int suspend)
return 0;
}
+static int mxs_phy_set_wakeup(struct usb_phy *x, bool enabled)
+{
+ struct mxs_phy *mxs_phy = to_mxs_phy(x);
+ u32 value = BM_USBPHY_CTRL_ENVBUSCHG_WKUP |
+ BM_USBPHY_CTRL_ENDPDMCHG_WKUP |
+ BM_USBPHY_CTRL_ENIDCHG_WKUP;
+ if (enabled) {
+ mxs_phy_disconnect_line(mxs_phy, true);
+ writel_relaxed(value, x->io_priv + HW_USBPHY_CTRL_SET);
+ } else {
+ writel_relaxed(value, x->io_priv + HW_USBPHY_CTRL_CLR);
+ mxs_phy_disconnect_line(mxs_phy, false);
+ }
+
+ return 0;
+}
+
static int mxs_phy_on_connect(struct usb_phy *phy,
enum usb_device_speed speed)
{
- dev_dbg(phy->dev, "%s speed device has connected\n",
- (speed == USB_SPEED_HIGH) ? "high" : "non-high");
+ dev_dbg(phy->dev, "%s device has connected\n",
+ (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
if (speed == USB_SPEED_HIGH)
writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
@@ -121,8 +344,8 @@ static int mxs_phy_on_connect(struct usb_phy *phy,
static int mxs_phy_on_disconnect(struct usb_phy *phy,
enum usb_device_speed speed)
{
- dev_dbg(phy->dev, "%s speed device has disconnected\n",
- (speed == USB_SPEED_HIGH) ? "high" : "non-high");
+ dev_dbg(phy->dev, "%s device has disconnected\n",
+ (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
if (speed == USB_SPEED_HIGH)
writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
@@ -138,6 +361,9 @@ static int mxs_phy_probe(struct platform_device *pdev)
struct clk *clk;
struct mxs_phy *mxs_phy;
int ret;
+ const struct of_device_id *of_id =
+ of_match_device(mxs_phy_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
@@ -157,6 +383,22 @@ static int mxs_phy_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ /* Some SoCs don't have anatop registers */
+ if (of_get_property(np, "fsl,anatop", NULL)) {
+ mxs_phy->regmap_anatop = syscon_regmap_lookup_by_phandle
+ (np, "fsl,anatop");
+ if (IS_ERR(mxs_phy->regmap_anatop)) {
+ dev_dbg(&pdev->dev,
+ "failed to find regmap for anatop\n");
+ return PTR_ERR(mxs_phy->regmap_anatop);
+ }
+ }
+
+ ret = of_alias_get_id(np, "usbphy");
+ if (ret < 0)
+ dev_dbg(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ mxs_phy->port_id = ret;
+
mxs_phy->phy.io_priv = base;
mxs_phy->phy.dev = &pdev->dev;
mxs_phy->phy.label = DRIVER_NAME;
@@ -166,11 +408,15 @@ static int mxs_phy_probe(struct platform_device *pdev)
mxs_phy->phy.notify_connect = mxs_phy_on_connect;
mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect;
mxs_phy->phy.type = USB_PHY_TYPE_USB2;
+ mxs_phy->phy.set_wakeup = mxs_phy_set_wakeup;
mxs_phy->clk = clk;
+ mxs_phy->data = of_id->data;
platform_set_drvdata(pdev, mxs_phy);
+ device_set_wakeup_capable(&pdev->dev, true);
+
ret = usb_add_phy_dev(&mxs_phy->phy);
if (ret)
return ret;
@@ -187,11 +433,46 @@ static int mxs_phy_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id mxs_phy_dt_ids[] = {
- { .compatible = "fsl,imx23-usbphy", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
+#ifdef CONFIG_PM_SLEEP
+static void mxs_phy_enable_ldo_in_suspend(struct mxs_phy *mxs_phy, bool on)
+{
+ unsigned int reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR;
+
+ /* If the SoCs don't have anatop, quit */
+ if (!mxs_phy->regmap_anatop)
+ return;
+
+ if (is_imx6q_phy(mxs_phy))
+ regmap_write(mxs_phy->regmap_anatop, reg,
+ BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG);
+ else if (is_imx6sl_phy(mxs_phy))
+ regmap_write(mxs_phy->regmap_anatop,
+ reg, BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL);
+}
+
+static int mxs_phy_system_suspend(struct device *dev)
+{
+ struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ mxs_phy_enable_ldo_in_suspend(mxs_phy, true);
+
+ return 0;
+}
+
+static int mxs_phy_system_resume(struct device *dev)
+{
+ struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ mxs_phy_enable_ldo_in_suspend(mxs_phy, false);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(mxs_phy_pm, mxs_phy_system_suspend,
+ mxs_phy_system_resume);
static struct platform_driver mxs_phy_driver = {
.probe = mxs_phy_probe,
@@ -200,6 +481,7 @@ static struct platform_driver mxs_phy_driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = mxs_phy_dt_ids,
+ .pm = &mxs_phy_pm,
},
};
diff --git a/drivers/usb/phy/phy-omap-usb3.c b/drivers/usb/phy/phy-omap-usb3.c
deleted file mode 100644
index 0c6ba29bdddd..000000000000
--- a/drivers/usb/phy/phy-omap-usb3.c
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * omap-usb3 - USB PHY, talking to dwc3 controller in OMAP.
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/usb/omap_usb.h>
-#include <linux/of.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/pm_runtime.h>
-#include <linux/delay.h>
-#include <linux/usb/omap_control_usb.h>
-#include <linux/of_platform.h>
-
-#define PLL_STATUS 0x00000004
-#define PLL_GO 0x00000008
-#define PLL_CONFIGURATION1 0x0000000C
-#define PLL_CONFIGURATION2 0x00000010
-#define PLL_CONFIGURATION3 0x00000014
-#define PLL_CONFIGURATION4 0x00000020
-
-#define PLL_REGM_MASK 0x001FFE00
-#define PLL_REGM_SHIFT 0x9
-#define PLL_REGM_F_MASK 0x0003FFFF
-#define PLL_REGM_F_SHIFT 0x0
-#define PLL_REGN_MASK 0x000001FE
-#define PLL_REGN_SHIFT 0x1
-#define PLL_SELFREQDCO_MASK 0x0000000E
-#define PLL_SELFREQDCO_SHIFT 0x1
-#define PLL_SD_MASK 0x0003FC00
-#define PLL_SD_SHIFT 0x9
-#define SET_PLL_GO 0x1
-#define PLL_TICOPWDN 0x10000
-#define PLL_LOCK 0x2
-#define PLL_IDLE 0x1
-
-/*
- * This is an Empirical value that works, need to confirm the actual
- * value required for the USB3PHY_PLL_CONFIGURATION2.PLL_IDLE status
- * to be correctly reflected in the USB3PHY_PLL_STATUS register.
- */
-# define PLL_IDLE_TIME 100;
-
-struct usb_dpll_map {
- unsigned long rate;
- struct usb_dpll_params params;
-};
-
-static struct usb_dpll_map dpll_map[] = {
- {12000000, {1250, 5, 4, 20, 0} }, /* 12 MHz */
- {16800000, {3125, 20, 4, 20, 0} }, /* 16.8 MHz */
- {19200000, {1172, 8, 4, 20, 65537} }, /* 19.2 MHz */
- {20000000, {1000, 7, 4, 10, 0} }, /* 20 MHz */
- {26000000, {1250, 12, 4, 20, 0} }, /* 26 MHz */
- {38400000, {3125, 47, 4, 20, 92843} }, /* 38.4 MHz */
-};
-
-static struct usb_dpll_params *omap_usb3_get_dpll_params(unsigned long rate)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dpll_map); i++) {
- if (rate == dpll_map[i].rate)
- return &dpll_map[i].params;
- }
-
- return NULL;
-}
-
-static int omap_usb3_suspend(struct usb_phy *x, int suspend)
-{
- struct omap_usb *phy = phy_to_omapusb(x);
- int val;
- int timeout = PLL_IDLE_TIME;
-
- if (suspend && !phy->is_suspended) {
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
- val |= PLL_IDLE;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
-
- do {
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_STATUS);
- if (val & PLL_TICOPWDN)
- break;
- udelay(1);
- } while (--timeout);
-
- omap_control_usb_phy_power(phy->control_dev, 0);
-
- phy->is_suspended = 1;
- } else if (!suspend && phy->is_suspended) {
- phy->is_suspended = 0;
-
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
- val &= ~PLL_IDLE;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
-
- do {
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_STATUS);
- if (!(val & PLL_TICOPWDN))
- break;
- udelay(1);
- } while (--timeout);
- }
-
- return 0;
-}
-
-static void omap_usb_dpll_relock(struct omap_usb *phy)
-{
- u32 val;
- unsigned long timeout;
-
- omap_usb_writel(phy->pll_ctrl_base, PLL_GO, SET_PLL_GO);
-
- timeout = jiffies + msecs_to_jiffies(20);
- do {
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_STATUS);
- if (val & PLL_LOCK)
- break;
- } while (!WARN_ON(time_after(jiffies, timeout)));
-}
-
-static int omap_usb_dpll_lock(struct omap_usb *phy)
-{
- u32 val;
- unsigned long rate;
- struct usb_dpll_params *dpll_params;
-
- rate = clk_get_rate(phy->sys_clk);
- dpll_params = omap_usb3_get_dpll_params(rate);
- if (!dpll_params) {
- dev_err(phy->dev,
- "No DPLL configuration for %lu Hz SYS CLK\n", rate);
- return -EINVAL;
- }
-
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
- val &= ~PLL_REGN_MASK;
- val |= dpll_params->n << PLL_REGN_SHIFT;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
-
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
- val &= ~PLL_SELFREQDCO_MASK;
- val |= dpll_params->freq << PLL_SELFREQDCO_SHIFT;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
-
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
- val &= ~PLL_REGM_MASK;
- val |= dpll_params->m << PLL_REGM_SHIFT;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
-
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION4);
- val &= ~PLL_REGM_F_MASK;
- val |= dpll_params->mf << PLL_REGM_F_SHIFT;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION4, val);
-
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION3);
- val &= ~PLL_SD_MASK;
- val |= dpll_params->sd << PLL_SD_SHIFT;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION3, val);
-
- omap_usb_dpll_relock(phy);
-
- return 0;
-}
-
-static int omap_usb3_init(struct usb_phy *x)
-{
- struct omap_usb *phy = phy_to_omapusb(x);
- int ret;
-
- ret = omap_usb_dpll_lock(phy);
- if (ret)
- return ret;
-
- omap_control_usb_phy_power(phy->control_dev, 1);
-
- return 0;
-}
-
-static int omap_usb3_probe(struct platform_device *pdev)
-{
- struct omap_usb *phy;
- struct resource *res;
- struct device_node *node = pdev->dev.of_node;
- struct device_node *control_node;
- struct platform_device *control_pdev;
-
- if (!node)
- return -EINVAL;
-
- phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
- if (!phy) {
- dev_err(&pdev->dev, "unable to alloc mem for OMAP USB3 PHY\n");
- return -ENOMEM;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll_ctrl");
- phy->pll_ctrl_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(phy->pll_ctrl_base))
- return PTR_ERR(phy->pll_ctrl_base);
-
- phy->dev = &pdev->dev;
-
- phy->phy.dev = phy->dev;
- phy->phy.label = "omap-usb3";
- phy->phy.init = omap_usb3_init;
- phy->phy.set_suspend = omap_usb3_suspend;
- phy->phy.type = USB_PHY_TYPE_USB3;
-
- phy->is_suspended = 1;
- phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
- if (IS_ERR(phy->wkupclk)) {
- dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
- return PTR_ERR(phy->wkupclk);
- }
- clk_prepare(phy->wkupclk);
-
- phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m");
- if (IS_ERR(phy->optclk)) {
- dev_err(&pdev->dev, "unable to get usb_otg_ss_refclk960m\n");
- return PTR_ERR(phy->optclk);
- }
- clk_prepare(phy->optclk);
-
- phy->sys_clk = devm_clk_get(phy->dev, "sys_clkin");
- if (IS_ERR(phy->sys_clk)) {
- pr_err("%s: unable to get sys_clkin\n", __func__);
- return -EINVAL;
- }
-
- control_node = of_parse_phandle(node, "ctrl-module", 0);
- if (!control_node) {
- dev_err(&pdev->dev, "Failed to get control device phandle\n");
- return -EINVAL;
- }
- control_pdev = of_find_device_by_node(control_node);
- if (!control_pdev) {
- dev_err(&pdev->dev, "Failed to get control device\n");
- return -EINVAL;
- }
-
- phy->control_dev = &control_pdev->dev;
-
- omap_control_usb_phy_power(phy->control_dev, 0);
- usb_add_phy_dev(&phy->phy);
-
- platform_set_drvdata(pdev, phy);
-
- pm_runtime_enable(phy->dev);
- pm_runtime_get(&pdev->dev);
-
- return 0;
-}
-
-static int omap_usb3_remove(struct platform_device *pdev)
-{
- struct omap_usb *phy = platform_get_drvdata(pdev);
-
- clk_unprepare(phy->wkupclk);
- clk_unprepare(phy->optclk);
- usb_remove_phy(&phy->phy);
- if (!pm_runtime_suspended(&pdev->dev))
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_RUNTIME
-
-static int omap_usb3_runtime_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_usb *phy = platform_get_drvdata(pdev);
-
- clk_disable(phy->wkupclk);
- clk_disable(phy->optclk);
-
- return 0;
-}
-
-static int omap_usb3_runtime_resume(struct device *dev)
-{
- u32 ret = 0;
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_usb *phy = platform_get_drvdata(pdev);
-
- ret = clk_enable(phy->optclk);
- if (ret) {
- dev_err(phy->dev, "Failed to enable optclk %d\n", ret);
- goto err1;
- }
-
- ret = clk_enable(phy->wkupclk);
- if (ret) {
- dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
- goto err2;
- }
-
- return 0;
-
-err2:
- clk_disable(phy->optclk);
-
-err1:
- return ret;
-}
-
-static const struct dev_pm_ops omap_usb3_pm_ops = {
- SET_RUNTIME_PM_OPS(omap_usb3_runtime_suspend, omap_usb3_runtime_resume,
- NULL)
-};
-
-#define DEV_PM_OPS (&omap_usb3_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id omap_usb3_id_table[] = {
- { .compatible = "ti,omap-usb3" },
- {}
-};
-MODULE_DEVICE_TABLE(of, omap_usb3_id_table);
-#endif
-
-static struct platform_driver omap_usb3_driver = {
- .probe = omap_usb3_probe,
- .remove = omap_usb3_remove,
- .driver = {
- .name = "omap-usb3",
- .owner = THIS_MODULE,
- .pm = DEV_PM_OPS,
- .of_match_table = of_match_ptr(omap_usb3_id_table),
- },
-};
-
-module_platform_driver(omap_usb3_driver);
-
-MODULE_ALIAS("platform: omap_usb3");
-MODULE_AUTHOR("Texas Instruments Inc.");
-MODULE_DESCRIPTION("OMAP USB3 phy driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-rcar-gen2-usb.c b/drivers/usb/phy/phy-rcar-gen2-usb.c
index 551e0a6c0e22..388d89f6b141 100644
--- a/drivers/usb/phy/phy-rcar-gen2-usb.c
+++ b/drivers/usb/phy/phy-rcar-gen2-usb.c
@@ -177,15 +177,15 @@ static int rcar_gen2_usb_phy_probe(struct platform_device *pdev)
struct clk *clk;
int retval;
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
dev_err(dev, "No platform data\n");
return -EINVAL;
}
- clk = devm_clk_get(&pdev->dev, "usbhs");
+ clk = devm_clk_get(dev, "usbhs");
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "Can't get the clock\n");
+ dev_err(dev, "Can't get the clock\n");
return PTR_ERR(clk);
}
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 214172b68d5d..04778cf80d60 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -27,7 +27,7 @@
#include <linux/io.h>
#include <linux/usb/musb-omap.h>
#include <linux/usb/phy_companion.h>
-#include <linux/usb/omap_usb.h>
+#include <linux/phy/omap_usb.h>
#include <linux/i2c/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index 217339dd7a90..17ea3f271bd8 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -47,6 +47,8 @@ struct ulpi_info {
static struct ulpi_info ulpi_ids[] = {
ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"),
+ ULPI_INFO(ULPI_ID(0x0424, 0x0007), "SMSC USB3320"),
+ ULPI_INFO(ULPI_ID(0x0451, 0x1507), "TI TUSB1210"),
};
static int ulpi_set_otg_flags(struct usb_phy *phy)
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 82371f61f23d..2d72aa3564a3 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -323,11 +323,11 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
if (r)
goto out;
- dev_dbg(&port->dev, "%s - submitting interrupt urb", __func__);
+ dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__);
r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (r) {
- dev_err(&port->dev, "%s - failed submitting interrupt urb,"
- " error %d\n", __func__, r);
+ dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
+ __func__, r);
ch341_close(port);
goto out;
}
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 0ac3b3b3236c..2916dea3ede8 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -220,7 +220,7 @@ static int cyberjack_write(struct tty_struct *tty,
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev,
- "%s - failed submitting write urb, error %d",
+ "%s - failed submitting write urb, error %d\n",
__func__, result);
/* Throw away data. No better idea what to do with it. */
priv->wrfilled = 0;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index bccb1223143a..01bf53392819 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -279,7 +279,7 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
* the generic firmware, but are not used with
* NMEA and SiRF protocols */
dev_dbg(&port->dev,
- "%s - failed setting baud rate, unsupported speed of %d on Earthmate GPS",
+ "%s - failed setting baud rate, unsupported speed of %d on Earthmate GPS\n",
__func__, new_rate);
return -1;
}
@@ -1224,7 +1224,6 @@ static void cypress_write_int_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
struct cypress_private *priv = usb_get_serial_port_data(port);
struct device *dev = &urb->dev->dev;
- int result;
int status = urb->status;
switch (status) {
@@ -1239,21 +1238,9 @@ static void cypress_write_int_callback(struct urb *urb)
__func__, status);
priv->write_urb_in_use = 0;
return;
- case -EPIPE: /* no break needed; clear halt and resubmit */
- if (!priv->comm_is_ok)
- break;
- usb_clear_halt(port->serial->dev, 0x02);
- /* error in the urb, so we have to resubmit it */
- dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
- __func__, status);
- port->interrupt_out_urb->transfer_buffer_length = 1;
- result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC);
- if (!result)
- return;
- dev_err(dev, "%s - failed resubmitting write urb, error %d\n",
- __func__, result);
- cypress_set_dead(port);
- break;
+ case -EPIPE:
+ /* Cannot call usb_clear_halt while in_interrupt */
+ /* FALLTHROUGH */
default:
dev_err(dev, "%s - unexpected nonzero write status received: %d\n",
__func__, status);
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index b63ce023f96f..1bd192290b08 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -332,9 +332,9 @@ void usb_serial_generic_process_read_urb(struct urb *urb)
* stuff like 3G modems, so shortcircuit it in the 99.9999999% of
* cases where the USB serial is not a console anyway.
*/
- if (!port->port.console || !port->sysrq)
+ if (!port->port.console || !port->sysrq) {
tty_insert_flip_string(&port->port, ch, urb->actual_length);
- else {
+ } else {
for (i = 0; i < urb->actual_length; i++, ch++) {
if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(&port->port, *ch, TTY_NORMAL);
@@ -359,24 +359,38 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
dev_dbg(&port->dev, "%s - urb %d, len %d\n", __func__, i,
urb->actual_length);
-
- if (urb->status) {
- dev_dbg(&port->dev, "%s - non-zero urb status: %d\n",
- __func__, urb->status);
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&port->dev, "%s - urb stopped: %d\n",
+ __func__, urb->status);
return;
+ case -EPIPE:
+ dev_err(&port->dev, "%s - urb stopped: %d\n",
+ __func__, urb->status);
+ return;
+ default:
+ dev_err(&port->dev, "%s - nonzero urb status: %d\n",
+ __func__, urb->status);
+ goto resubmit;
}
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
port->serial->type->process_read_urb(urb);
+resubmit:
/* Throttle the device if requested by tty */
spin_lock_irqsave(&port->lock, flags);
port->throttled = port->throttle_req;
if (!port->throttled) {
spin_unlock_irqrestore(&port->lock, flags);
usb_serial_generic_submit_read_urb(port, i, GFP_ATOMIC);
- } else
+ } else {
spin_unlock_irqrestore(&port->lock, flags);
+ }
}
EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback);
@@ -384,29 +398,38 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
{
unsigned long flags;
struct usb_serial_port *port = urb->context;
- int status = urb->status;
int i;
- for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
+ for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
if (port->write_urbs[i] == urb)
break;
-
+ }
spin_lock_irqsave(&port->lock, flags);
port->tx_bytes -= urb->transfer_buffer_length;
set_bit(i, &port->write_urbs_free);
spin_unlock_irqrestore(&port->lock, flags);
- if (status) {
- dev_dbg(&port->dev, "%s - non-zero urb status: %d\n",
- __func__, status);
-
- spin_lock_irqsave(&port->lock, flags);
- kfifo_reset_out(&port->write_fifo);
- spin_unlock_irqrestore(&port->lock, flags);
- } else {
- usb_serial_generic_write_start(port, GFP_ATOMIC);
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&port->dev, "%s - urb stopped: %d\n",
+ __func__, urb->status);
+ return;
+ case -EPIPE:
+ dev_err_console(port, "%s - urb stopped: %d\n",
+ __func__, urb->status);
+ return;
+ default:
+ dev_err_console(port, "%s - nonzero urb status: %d\n",
+ __func__, urb->status);
+ goto resubmit;
}
+resubmit:
+ usb_serial_generic_write_start(port, GFP_ATOMIC);
usb_serial_port_softint(port);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback);
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index d00dae17d520..5ad4a0fb4b26 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1151,7 +1151,7 @@ static ssize_t vcc_mode_store(struct device *dev,
goto fail_store_vcc_mode;
}
- dev_dbg(dev, "%s: setting vcc_mode = %ld", __func__, v);
+ dev_dbg(dev, "%s: setting vcc_mode = %ld\n", __func__, v);
if ((v != 3) && (v != 5)) {
dev_err(dev, "%s - vcc_mode %ld is invalid\n", __func__, v);
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 265c6776b081..d3acaead5a81 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -397,17 +397,6 @@ static void usa26_instat_callback(struct urb *urb)
msg = (struct keyspan_usa26_portStatusMessage *)data;
-#if 0
- dev_dbg(&urb->dev->dev,
- "%s - port status: port %d cts %d dcd %d dsr %d ri %d toff %d txoff %d rxen %d cr %d",
- __func__, msg->port, msg->hskia_cts, msg->gpia_dcd, msg->dsr,
- msg->ri, msg->_txOff, msg->_txXoff, msg->rxEnabled,
- msg->controlResponse);
-#endif
-
- /* Now do something useful with the data */
-
-
/* Check port number from message and retrieve private data */
if (msg->port >= serial->num_ports) {
dev_dbg(&urb->dev->dev, "%s - Unexpected port number %d\n", __func__, msg->port);
@@ -523,9 +512,6 @@ static void usa28_instat_callback(struct urb *urb)
goto exit;
}
- /*dev_dbg(&urb->dev->dev, "%s %12ph", __func__, data);*/
-
- /* Now do something useful with the data */
msg = (struct keyspan_usa28_portStatusMessage *)data;
/* Check port number from message and retrieve private data */
@@ -605,9 +591,6 @@ static void usa49_instat_callback(struct urb *urb)
goto exit;
}
- /*dev_dbg(&urb->dev->dev, "%s: %11ph", __func__, data);*/
-
- /* Now do something useful with the data */
msg = (struct keyspan_usa49_portStatusMessage *)data;
/* Check port number from message and retrieve private data */
@@ -1793,12 +1776,6 @@ static int keyspan_usa28_send_setup(struct usb_serial *serial,
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed\n", __func__);
-#if 0
- else {
- dev_dbg(&port->dev, "%s - usb_submit_urb(setup) OK %d bytes\n", __func__,
- this_urb->transfer_buffer_length);
- }
-#endif
return 0;
}
@@ -1976,13 +1953,6 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err);
-#if 0
- else {
- dev_dbg(&port->dev, "%s - usb_submit_urb(%d) OK %d bytes (end %d)\n", __func__,
- outcont_urb, this_urb->transfer_buffer_length,
- usb_pipeendpoint(this_urb->pipe));
- }
-#endif
return 0;
}
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index e972412b614b..742d827f876c 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -189,7 +189,7 @@ exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
- "%s - usb_submit_urb failed with result %d",
+ "%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index c88cc4966b23..d7440b7557af 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -201,7 +201,7 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
else {
status = get_unaligned_le16(status_buf);
- dev_info(&port->serial->dev->dev, "read status %x %x",
+ dev_info(&port->serial->dev->dev, "read status %x %x\n",
status_buf[0], status_buf[1]);
*line_state_p = klsi_105_status2linestate(status);
@@ -464,7 +464,7 @@ static void klsi_105_set_termios(struct tty_struct *tty,
priv->cfg.baudrate = kl5kusb105a_sio_b115200;
break;
default:
- dev_dbg(dev, "KLSI USB->Serial converter: unsupported baudrate request, using default of 9600");
+ dev_dbg(dev, "unsupported baudrate, using 9600\n");
priv->cfg.baudrate = kl5kusb105a_sio_b9600;
baud = 9600;
break;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 618c1c1f227e..fee242387f55 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -557,7 +557,8 @@ static int kobil_ioctl(struct tty_struct *tty,
);
dev_dbg(&port->dev,
- "%s - Send reset_all_queues (FLUSH) URB returns: %i", __func__, result);
+ "%s - Send reset_all_queues (FLUSH) URB returns: %i\n",
+ __func__, result);
kfree(transfer_buffer);
return (result < 0) ? -EIO: 0;
default:
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 4eb277225a77..dfd728a263d2 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -209,7 +209,7 @@ static int write_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
index, NULL, 0, MOS_WDR_TIMEOUT);
if (status < 0)
dev_err(&usbdev->dev,
- "mos7720: usb_control_msg() failed: %d", status);
+ "mos7720: usb_control_msg() failed: %d\n", status);
return status;
}
@@ -240,7 +240,7 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
*data = *buf;
else if (status < 0)
dev_err(&usbdev->dev,
- "mos7720: usb_control_msg() failed: %d", status);
+ "mos7720: usb_control_msg() failed: %d\n", status);
kfree(buf);
return status;
@@ -399,7 +399,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
&mos_parport->deferred_urbs);
spin_unlock_irqrestore(&mos_parport->listlock, flags);
tasklet_schedule(&mos_parport->urb_tasklet);
- dev_dbg(&usbdev->dev, "tasklet scheduled");
+ dev_dbg(&usbdev->dev, "tasklet scheduled\n");
return 0;
}
@@ -418,7 +418,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
mutex_unlock(&serial->disc_mutex);
if (ret_val) {
dev_err(&usbdev->dev,
- "%s: submit_urb() failed: %d", __func__, ret_val);
+ "%s: submit_urb() failed: %d\n", __func__, ret_val);
spin_lock_irqsave(&mos_parport->listlock, flags);
list_del(&urbtrack->urblist_entry);
spin_unlock_irqrestore(&mos_parport->listlock, flags);
@@ -656,7 +656,7 @@ static size_t parport_mos7715_write_compat(struct parport *pp,
parport_epilogue(pp);
if (retval) {
dev_err(&mos_parport->serial->dev->dev,
- "mos7720: usb_bulk_msg() failed: %d", retval);
+ "mos7720: usb_bulk_msg() failed: %d\n", retval);
return 0;
}
return actual_len;
@@ -875,7 +875,7 @@ static void mos7715_interrupt_callback(struct urb *urb)
if (!(iir & 0x01)) { /* serial port interrupt pending */
switch (iir & 0x0f) {
case SERIAL_IIR_RLS:
- dev_dbg(dev, "Serial Port: Receiver status error or address bit detected in 9-bit mode\n\n");
+ dev_dbg(dev, "Serial Port: Receiver status error or address bit detected in 9-bit mode\n");
break;
case SERIAL_IIR_CTI:
dev_dbg(dev, "Serial Port: Receiver time out\n");
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index e9d967ff521b..393be562d875 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -522,11 +522,11 @@ static void mos7840_set_led_callback(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* This urb is terminated, clean up */
- dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d",
+ dev_dbg(&urb->dev->dev, "%s - urb shutting down: %d\n",
__func__, urb->status);
break;
default:
- dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d",
+ dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n",
__func__, urb->status);
}
}
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 7725ed261ed6..504f5bff79c0 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -372,7 +372,7 @@ static int qt2_open(struct tty_struct *tty, struct usb_serial_port *port)
device_port, data, 2, QT2_USB_TIMEOUT);
if (status < 0) {
- dev_err(&port->dev, "%s - open port failed %i", __func__,
+ dev_err(&port->dev, "%s - open port failed %i\n", __func__,
status);
kfree(data);
return status;
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 4ec04f73c800..ef0dbf0703c5 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -220,9 +220,9 @@ static int spcp8x5_get_msr(struct usb_serial_port *port, u8 *status)
GET_UART_STATUS, GET_UART_STATUS_TYPE,
0, GET_UART_STATUS_MSR, buf, 1, 100);
if (ret < 0)
- dev_err(&port->dev, "failed to get modem status: %d", ret);
+ dev_err(&port->dev, "failed to get modem status: %d\n", ret);
- dev_dbg(&port->dev, "0xc0:0x22:0:6 %d - 0x02%x", ret, *buf);
+ dev_dbg(&port->dev, "0xc0:0x22:0:6 %d - 0x02%x\n", ret, *buf);
*status = *buf;
kfree(buf);
@@ -342,8 +342,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
case 1000000:
buf[0] = 0x0b; break;
default:
- dev_err(&port->dev, "spcp825 driver does not support the "
- "baudrate requested, using default of 9600.\n");
+ dev_err(&port->dev, "unsupported baudrate, using 9600\n");
}
/* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 9fa7dd413e83..8fceec7298e0 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -74,9 +74,7 @@ static void symbol_int_callback(struct urb *urb)
tty_insert_flip_string(&port->port, &data[1], data_length);
tty_flip_buffer_push(&port->port);
} else {
- dev_dbg(&port->dev,
- "Improper amount of data received from the device, "
- "%d bytes", urb->actual_length);
+ dev_dbg(&port->dev, "%s - short packet\n", __func__);
}
exit:
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index ec7cea585663..3dd3ff8c50d3 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -293,7 +293,7 @@ static int ti_startup(struct usb_serial *serial)
int status;
dev_dbg(&dev->dev,
- "%s - product 0x%4X, num configurations %d, configuration value %d",
+ "%s - product 0x%4X, num configurations %d, configuration value %d\n",
__func__, le16_to_cpu(dev->descriptor.idProduct),
dev->descriptor.bNumConfigurations,
dev->actconfig->desc.bConfigurationValue);
@@ -803,7 +803,7 @@ static void ti_set_termios(struct tty_struct *tty,
tty_encode_baud_rate(tty, baud, baud);
dev_dbg(&port->dev,
- "%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d",
+ "%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d\n",
__func__, baud, config->wBaudRate, config->wFlags,
config->bDataBits, config->bParity, config->bStopBits,
config->cXon, config->cXoff, config->bUartMode);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 7c9dc28640bb..81fc0dfcfdcf 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -868,7 +868,7 @@ static int usb_serial_probe(struct usb_interface *interface,
max_endpoints = max(max_endpoints, (int)serial->num_ports);
serial->num_port_pointers = max_endpoints;
- dev_dbg(ddev, "setting up %d port structures for this device", max_endpoints);
+ dev_dbg(ddev, "setting up %d port structure(s)\n", max_endpoints);
for (i = 0; i < max_endpoints; ++i) {
port = kzalloc(sizeof(struct usb_serial_port), GFP_KERNEL);
if (!port)
@@ -923,9 +923,8 @@ static int usb_serial_probe(struct usb_interface *interface,
port = serial->port[i];
if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
goto probe_error;
- buffer_size = serial->type->bulk_out_size;
- if (!buffer_size)
- buffer_size = usb_endpoint_maxp(endpoint);
+ buffer_size = max_t(int, serial->type->bulk_out_size,
+ usb_endpoint_maxp(endpoint));
port->bulk_out_size = buffer_size;
port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
@@ -1034,7 +1033,7 @@ static int usb_serial_probe(struct usb_interface *interface,
for (i = 0; i < num_ports; ++i) {
port = serial->port[i];
dev_set_name(&port->dev, "ttyUSB%d", port->minor);
- dev_dbg(ddev, "registering %s", dev_name(&port->dev));
+ dev_dbg(ddev, "registering %s\n", dev_name(&port->dev));
device_enable_async_suspend(&port->dev);
retval = device_add(&port->dev);
@@ -1161,9 +1160,9 @@ static int usb_serial_reset_resume(struct usb_interface *intf)
usb_serial_unpoison_port_urbs(serial);
serial->suspending = 0;
- if (serial->type->reset_resume)
+ if (serial->type->reset_resume) {
rv = serial->type->reset_resume(serial);
- else {
+ } else {
rv = -EOPNOTSUPP;
intf->needs_binding = 1;
}
@@ -1338,9 +1337,9 @@ static int usb_serial_register(struct usb_serial_driver *driver)
if (retval) {
pr_err("problem %d when registering driver %s\n", retval, driver->description);
list_del(&driver->driver_list);
- } else
+ } else {
pr_info("USB Serial support registered for %s\n", driver->description);
-
+ }
mutex_unlock(&table_lock);
return retval;
}
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 1dd0604d1911..13b5bfbaf951 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -204,7 +204,7 @@ config USB_STORAGE_ENE_UB6250
config USB_UAS
tristate "USB Attached SCSI"
- depends on SCSI && BROKEN
+ depends on SCSI && USB_STORAGE
help
The USB Attached SCSI protocol is supported by some USB
storage devices. It permits higher performance by supporting
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
new file mode 100644
index 000000000000..bb05b984d5f6
--- /dev/null
+++ b/drivers/usb/storage/uas-detect.h
@@ -0,0 +1,96 @@
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include "usb.h"
+
+static int uas_is_interface(struct usb_host_interface *intf)
+{
+ return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE &&
+ intf->desc.bInterfaceSubClass == USB_SC_SCSI &&
+ intf->desc.bInterfaceProtocol == USB_PR_UAS);
+}
+
+static int uas_isnt_supported(struct usb_device *udev)
+{
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+ dev_warn(&udev->dev, "The driver for the USB controller %s does not "
+ "support scatter-gather which is\n",
+ hcd->driver->description);
+ dev_warn(&udev->dev, "required by the UAS driver. Please try an"
+ "alternative USB controller if you wish to use UAS.\n");
+ return -ENODEV;
+}
+
+static int uas_find_uas_alt_setting(struct usb_interface *intf)
+{
+ int i;
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int sg_supported = udev->bus->sg_tablesize != 0;
+
+ for (i = 0; i < intf->num_altsetting; i++) {
+ struct usb_host_interface *alt = &intf->altsetting[i];
+
+ if (uas_is_interface(alt)) {
+ if (!sg_supported)
+ return uas_isnt_supported(udev);
+ return alt->desc.bAlternateSetting;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static int uas_find_endpoints(struct usb_host_interface *alt,
+ struct usb_host_endpoint *eps[])
+{
+ struct usb_host_endpoint *endpoint = alt->endpoint;
+ unsigned i, n_endpoints = alt->desc.bNumEndpoints;
+
+ for (i = 0; i < n_endpoints; i++) {
+ unsigned char *extra = endpoint[i].extra;
+ int len = endpoint[i].extralen;
+ while (len >= 3) {
+ if (extra[1] == USB_DT_PIPE_USAGE) {
+ unsigned pipe_id = extra[2];
+ if (pipe_id > 0 && pipe_id < 5)
+ eps[pipe_id - 1] = &endpoint[i];
+ break;
+ }
+ len -= extra[0];
+ extra += extra[0];
+ }
+ }
+
+ if (!eps[0] || !eps[1] || !eps[2] || !eps[3])
+ return -ENODEV;
+
+ return 0;
+}
+
+static int uas_use_uas_driver(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_host_endpoint *eps[4] = { };
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ unsigned long flags = id->driver_info;
+ int r, alt;
+
+ usb_stor_adjust_quirks(udev, &flags);
+
+ if (flags & US_FL_IGNORE_UAS)
+ return 0;
+
+ if (udev->speed >= USB_SPEED_SUPER && !hcd->can_do_streams)
+ return 0;
+
+ alt = uas_find_uas_alt_setting(intf);
+ if (alt < 0)
+ return 0;
+
+ r = uas_find_endpoints(&intf->altsetting[alt], eps);
+ if (r < 0)
+ return 0;
+
+ return 1;
+}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index d966b59f7d7b..a7ac97cc5949 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -2,6 +2,7 @@
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
+ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*
@@ -13,17 +14,21 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/usb_usual.h>
#include <linux/usb/hcd.h>
#include <linux/usb/storage.h>
#include <linux/usb/uas.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
+#include "uas-detect.h"
+
/*
* The r00-r01c specs define this version of the SENSE IU data structure.
* It's still in use by several different firmware releases.
@@ -45,12 +50,17 @@ struct uas_dev_info {
struct usb_anchor sense_urbs;
struct usb_anchor data_urbs;
int qdepth, resetting;
- struct response_ui response;
+ struct response_iu response;
unsigned cmd_pipe, status_pipe, data_in_pipe, data_out_pipe;
unsigned use_streams:1;
unsigned uas_sense_old:1;
+ unsigned running_task:1;
+ unsigned shutdown:1;
struct scsi_cmnd *cmnd;
spinlock_t lock;
+ struct work_struct work;
+ struct list_head inflight_list;
+ struct list_head dead_list;
};
enum {
@@ -85,103 +95,117 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo, gfp_t gfp);
static void uas_do_work(struct work_struct *work);
static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller);
+static void uas_free_streams(struct uas_dev_info *devinfo);
+static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *caller);
-static DECLARE_WORK(uas_work, uas_do_work);
-static DEFINE_SPINLOCK(uas_work_lock);
-static LIST_HEAD(uas_work_list);
-
+/* Must be called with devinfo->lock held, will temporary unlock the lock */
static void uas_unlink_data_urbs(struct uas_dev_info *devinfo,
- struct uas_cmd_info *cmdinfo)
+ struct uas_cmd_info *cmdinfo,
+ unsigned long *lock_flags)
{
- unsigned long flags;
-
/*
* The UNLINK_DATA_URBS flag makes sure uas_try_complete
* (called by urb completion) doesn't release cmdinfo
* underneath us.
*/
- spin_lock_irqsave(&devinfo->lock, flags);
cmdinfo->state |= UNLINK_DATA_URBS;
- spin_unlock_irqrestore(&devinfo->lock, flags);
+ spin_unlock_irqrestore(&devinfo->lock, *lock_flags);
if (cmdinfo->data_in_urb)
usb_unlink_urb(cmdinfo->data_in_urb);
if (cmdinfo->data_out_urb)
usb_unlink_urb(cmdinfo->data_out_urb);
- spin_lock_irqsave(&devinfo->lock, flags);
+ spin_lock_irqsave(&devinfo->lock, *lock_flags);
cmdinfo->state &= ~UNLINK_DATA_URBS;
- spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_do_work(struct work_struct *work)
{
+ struct uas_dev_info *devinfo =
+ container_of(work, struct uas_dev_info, work);
struct uas_cmd_info *cmdinfo;
- struct uas_cmd_info *temp;
- struct list_head list;
unsigned long flags;
int err;
- spin_lock_irq(&uas_work_lock);
- list_replace_init(&uas_work_list, &list);
- spin_unlock_irq(&uas_work_lock);
-
- list_for_each_entry_safe(cmdinfo, temp, &list, list) {
+ spin_lock_irqsave(&devinfo->lock, flags);
+ list_for_each_entry(cmdinfo, &devinfo->inflight_list, list) {
struct scsi_pointer *scp = (void *)cmdinfo;
- struct scsi_cmnd *cmnd = container_of(scp,
- struct scsi_cmnd, SCp);
- struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
- spin_lock_irqsave(&devinfo->lock, flags);
- err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
+ struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd,
+ SCp);
+
+ if (!(cmdinfo->state & IS_IN_WORK_LIST))
+ continue;
+
+ err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_NOIO);
if (!err)
cmdinfo->state &= ~IS_IN_WORK_LIST;
- spin_unlock_irqrestore(&devinfo->lock, flags);
- if (err) {
- list_del(&cmdinfo->list);
- spin_lock_irq(&uas_work_lock);
- list_add_tail(&cmdinfo->list, &uas_work_list);
- spin_unlock_irq(&uas_work_lock);
- schedule_work(&uas_work);
- }
+ else
+ schedule_work(&devinfo->work);
}
+ spin_unlock_irqrestore(&devinfo->lock, flags);
}
-static void uas_abort_work(struct uas_dev_info *devinfo)
+static void uas_mark_cmd_dead(struct uas_dev_info *devinfo,
+ struct uas_cmd_info *cmdinfo,
+ int result, const char *caller)
+{
+ struct scsi_pointer *scp = (void *)cmdinfo;
+ struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd, SCp);
+
+ uas_log_cmd_state(cmnd, caller);
+ WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
+ WARN_ON_ONCE(cmdinfo->state & COMMAND_ABORTED);
+ cmdinfo->state |= COMMAND_ABORTED;
+ cmdinfo->state &= ~IS_IN_WORK_LIST;
+ cmnd->result = result << 16;
+ list_move_tail(&cmdinfo->list, &devinfo->dead_list);
+}
+
+static void uas_abort_inflight(struct uas_dev_info *devinfo, int result,
+ const char *caller)
{
struct uas_cmd_info *cmdinfo;
struct uas_cmd_info *temp;
- struct list_head list;
unsigned long flags;
- spin_lock_irq(&uas_work_lock);
- list_replace_init(&uas_work_list, &list);
- spin_unlock_irq(&uas_work_lock);
+ spin_lock_irqsave(&devinfo->lock, flags);
+ list_for_each_entry_safe(cmdinfo, temp, &devinfo->inflight_list, list)
+ uas_mark_cmd_dead(devinfo, cmdinfo, result, caller);
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+}
+
+static void uas_add_work(struct uas_cmd_info *cmdinfo)
+{
+ struct scsi_pointer *scp = (void *)cmdinfo;
+ struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd, SCp);
+ struct uas_dev_info *devinfo = cmnd->device->hostdata;
+
+ WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
+ cmdinfo->state |= IS_IN_WORK_LIST;
+ schedule_work(&devinfo->work);
+}
+
+static void uas_zap_dead(struct uas_dev_info *devinfo)
+{
+ struct uas_cmd_info *cmdinfo;
+ struct uas_cmd_info *temp;
+ unsigned long flags;
spin_lock_irqsave(&devinfo->lock, flags);
- list_for_each_entry_safe(cmdinfo, temp, &list, list) {
+ list_for_each_entry_safe(cmdinfo, temp, &devinfo->dead_list, list) {
struct scsi_pointer *scp = (void *)cmdinfo;
- struct scsi_cmnd *cmnd = container_of(scp,
- struct scsi_cmnd, SCp);
- struct uas_dev_info *di = (void *)cmnd->device->hostdata;
-
- if (di == devinfo) {
- cmdinfo->state |= COMMAND_ABORTED;
- cmdinfo->state &= ~IS_IN_WORK_LIST;
- if (devinfo->resetting) {
- /* uas_stat_cmplt() will not do that
- * when a device reset is in
- * progress */
- cmdinfo->state &= ~COMMAND_INFLIGHT;
- }
- uas_try_complete(cmnd, __func__);
- } else {
- /* not our uas device, relink into list */
- list_del(&cmdinfo->list);
- spin_lock_irq(&uas_work_lock);
- list_add_tail(&cmdinfo->list, &uas_work_list);
- spin_unlock_irq(&uas_work_lock);
- }
+ struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd,
+ SCp);
+ uas_log_cmd_state(cmnd, __func__);
+ WARN_ON_ONCE(!(cmdinfo->state & COMMAND_ABORTED));
+ /* all urbs are killed, clear inflight bits */
+ cmdinfo->state &= ~(COMMAND_INFLIGHT |
+ DATA_IN_URB_INFLIGHT |
+ DATA_OUT_URB_INFLIGHT);
+ uas_try_complete(cmnd, __func__);
}
+ devinfo->running_task = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
}
@@ -259,20 +283,19 @@ static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
- WARN_ON(!spin_is_locked(&devinfo->lock));
+ WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
if (cmdinfo->state & (COMMAND_INFLIGHT |
DATA_IN_URB_INFLIGHT |
DATA_OUT_URB_INFLIGHT |
UNLINK_DATA_URBS))
return -EBUSY;
- BUG_ON(cmdinfo->state & COMMAND_COMPLETED);
+ WARN_ON_ONCE(cmdinfo->state & COMMAND_COMPLETED);
cmdinfo->state |= COMMAND_COMPLETED;
usb_free_urb(cmdinfo->data_in_urb);
usb_free_urb(cmdinfo->data_out_urb);
- if (cmdinfo->state & COMMAND_ABORTED) {
+ if (cmdinfo->state & COMMAND_ABORTED)
scmd_printk(KERN_INFO, cmnd, "abort completed\n");
- cmnd->result = DID_ABORT << 16;
- }
+ list_del(&cmdinfo->list);
cmnd->scsi_done(cmnd);
return 0;
}
@@ -286,11 +309,7 @@ static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
cmdinfo->state |= direction | SUBMIT_STATUS_URB;
err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
if (err) {
- spin_lock(&uas_work_lock);
- list_add_tail(&cmdinfo->list, &uas_work_list);
- cmdinfo->state |= IS_IN_WORK_LIST;
- spin_unlock(&uas_work_lock);
- schedule_work(&uas_work);
+ uas_add_work(cmdinfo);
}
}
@@ -298,14 +317,20 @@ static void uas_stat_cmplt(struct urb *urb)
{
struct iu *iu = urb->transfer_buffer;
struct Scsi_Host *shost = urb->context;
- struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
struct scsi_cmnd *cmnd;
struct uas_cmd_info *cmdinfo;
unsigned long flags;
u16 tag;
if (urb->status) {
- dev_err(&urb->dev->dev, "URB BAD STATUS %d\n", urb->status);
+ if (urb->status == -ENOENT) {
+ dev_err(&urb->dev->dev, "stat urb: killed, stream %d\n",
+ urb->stream_id);
+ } else {
+ dev_err(&urb->dev->dev, "stat urb: status %d\n",
+ urb->status);
+ }
usb_free_urb(urb);
return;
}
@@ -324,6 +349,9 @@ static void uas_stat_cmplt(struct urb *urb)
if (!cmnd) {
if (iu->iu_id == IU_ID_RESPONSE) {
+ if (!devinfo->running_task)
+ dev_warn(&urb->dev->dev,
+ "stat urb: recv unexpected response iu\n");
/* store results for uas_eh_task_mgmt() */
memcpy(&devinfo->response, iu, sizeof(devinfo->response));
}
@@ -346,17 +374,25 @@ static void uas_stat_cmplt(struct urb *urb)
uas_sense(urb, cmnd);
if (cmnd->result != 0) {
/* cancel data transfers on error */
- spin_unlock_irqrestore(&devinfo->lock, flags);
- uas_unlink_data_urbs(devinfo, cmdinfo);
- spin_lock_irqsave(&devinfo->lock, flags);
+ uas_unlink_data_urbs(devinfo, cmdinfo, &flags);
}
cmdinfo->state &= ~COMMAND_INFLIGHT;
uas_try_complete(cmnd, __func__);
break;
case IU_ID_READ_READY:
+ if (!cmdinfo->data_in_urb ||
+ (cmdinfo->state & DATA_IN_URB_INFLIGHT)) {
+ scmd_printk(KERN_ERR, cmnd, "unexpected read rdy\n");
+ break;
+ }
uas_xfer_data(urb, cmnd, SUBMIT_DATA_IN_URB);
break;
case IU_ID_WRITE_READY:
+ if (!cmdinfo->data_out_urb ||
+ (cmdinfo->state & DATA_OUT_URB_INFLIGHT)) {
+ scmd_printk(KERN_ERR, cmnd, "unexpected write rdy\n");
+ break;
+ }
uas_xfer_data(urb, cmnd, SUBMIT_DATA_OUT_URB);
break;
default:
@@ -383,8 +419,15 @@ static void uas_data_cmplt(struct urb *urb)
sdb = scsi_out(cmnd);
cmdinfo->state &= ~DATA_OUT_URB_INFLIGHT;
}
- BUG_ON(sdb == NULL);
- if (urb->status) {
+ if (sdb == NULL) {
+ WARN_ON_ONCE(1);
+ } else if (urb->status) {
+ if (urb->status != -ECONNRESET) {
+ uas_log_cmd_state(cmnd, __func__);
+ scmd_printk(KERN_ERR, cmnd,
+ "data cmplt err %d stream %d\n",
+ urb->status, urb->stream_id);
+ }
/* error: no data transfered */
sdb->resid = sdb->length;
} else {
@@ -394,6 +437,17 @@ static void uas_data_cmplt(struct urb *urb)
spin_unlock_irqrestore(&devinfo->lock, flags);
}
+static void uas_cmd_cmplt(struct urb *urb)
+{
+ struct scsi_cmnd *cmnd = urb->context;
+
+ if (urb->status) {
+ uas_log_cmd_state(cmnd, __func__);
+ scmd_printk(KERN_ERR, cmnd, "cmd cmplt err %d\n", urb->status);
+ }
+ usb_free_urb(urb);
+}
+
static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
unsigned int pipe, u16 stream_id,
struct scsi_cmnd *cmnd,
@@ -408,8 +462,7 @@ static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
goto out;
usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length,
uas_data_cmplt, cmnd);
- if (devinfo->use_streams)
- urb->stream_id = stream_id;
+ urb->stream_id = stream_id;
urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
urb->sg = sdb->table.sgl;
out:
@@ -442,7 +495,7 @@ static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
}
static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
- struct scsi_cmnd *cmnd, u16 stream_id)
+ struct scsi_cmnd *cmnd)
{
struct usb_device *udev = devinfo->udev;
struct scsi_device *sdev = cmnd->device;
@@ -472,7 +525,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu) + len,
- usb_free_urb, NULL);
+ uas_cmd_cmplt, cmnd);
urb->transfer_flags |= URB_FREE_BUFFER;
out:
return urb;
@@ -512,13 +565,17 @@ static int uas_submit_task_urb(struct scsi_cmnd *cmnd, gfp_t gfp,
}
usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu),
- usb_free_urb, NULL);
+ uas_cmd_cmplt, cmnd);
urb->transfer_flags |= URB_FREE_BUFFER;
+ usb_anchor_urb(urb, &devinfo->cmd_urbs);
err = usb_submit_urb(urb, gfp);
- if (err)
+ if (err) {
+ usb_unanchor_urb(urb);
+ uas_log_cmd_state(cmnd, __func__);
+ scmd_printk(KERN_ERR, cmnd, "task submission err %d\n", err);
goto err;
- usb_anchor_urb(urb, &devinfo->cmd_urbs);
+ }
return 0;
@@ -533,38 +590,43 @@ err:
* daft to me.
*/
-static int uas_submit_sense_urb(struct Scsi_Host *shost,
- gfp_t gfp, unsigned int stream)
+static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd,
+ gfp_t gfp, unsigned int stream)
{
- struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
struct urb *urb;
+ int err;
urb = uas_alloc_sense_urb(devinfo, gfp, shost, stream);
if (!urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
- if (usb_submit_urb(urb, gfp)) {
+ return NULL;
+ usb_anchor_urb(urb, &devinfo->sense_urbs);
+ err = usb_submit_urb(urb, gfp);
+ if (err) {
+ usb_unanchor_urb(urb);
+ uas_log_cmd_state(cmnd, __func__);
shost_printk(KERN_INFO, shost,
- "sense urb submission failure\n");
+ "sense urb submission error %d stream %d\n",
+ err, stream);
usb_free_urb(urb);
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return NULL;
}
- usb_anchor_urb(urb, &devinfo->sense_urbs);
- return 0;
+ return urb;
}
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo, gfp_t gfp)
{
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct urb *urb;
int err;
- WARN_ON(!spin_is_locked(&devinfo->lock));
+ WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
if (cmdinfo->state & SUBMIT_STATUS_URB) {
- err = uas_submit_sense_urb(cmnd->device->host, gfp,
- cmdinfo->stream);
- if (err) {
- return err;
- }
+ urb = uas_submit_sense_urb(cmnd, gfp, cmdinfo->stream);
+ if (!urb)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~SUBMIT_STATUS_URB;
}
@@ -578,14 +640,18 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
}
if (cmdinfo->state & SUBMIT_DATA_IN_URB) {
- if (usb_submit_urb(cmdinfo->data_in_urb, gfp)) {
+ usb_anchor_urb(cmdinfo->data_in_urb, &devinfo->data_urbs);
+ err = usb_submit_urb(cmdinfo->data_in_urb, gfp);
+ if (err) {
+ usb_unanchor_urb(cmdinfo->data_in_urb);
+ uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_INFO, cmnd,
- "data in urb submission failure\n");
+ "data in urb submission error %d stream %d\n",
+ err, cmdinfo->data_in_urb->stream_id);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
cmdinfo->state |= DATA_IN_URB_INFLIGHT;
- usb_anchor_urb(cmdinfo->data_in_urb, &devinfo->data_urbs);
}
if (cmdinfo->state & ALLOC_DATA_OUT_URB) {
@@ -598,33 +664,37 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
}
if (cmdinfo->state & SUBMIT_DATA_OUT_URB) {
- if (usb_submit_urb(cmdinfo->data_out_urb, gfp)) {
+ usb_anchor_urb(cmdinfo->data_out_urb, &devinfo->data_urbs);
+ err = usb_submit_urb(cmdinfo->data_out_urb, gfp);
+ if (err) {
+ usb_unanchor_urb(cmdinfo->data_out_urb);
+ uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_INFO, cmnd,
- "data out urb submission failure\n");
+ "data out urb submission error %d stream %d\n",
+ err, cmdinfo->data_out_urb->stream_id);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
cmdinfo->state |= DATA_OUT_URB_INFLIGHT;
- usb_anchor_urb(cmdinfo->data_out_urb, &devinfo->data_urbs);
}
if (cmdinfo->state & ALLOC_CMD_URB) {
- cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd,
- cmdinfo->stream);
+ cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd);
if (!cmdinfo->cmd_urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~ALLOC_CMD_URB;
}
if (cmdinfo->state & SUBMIT_CMD_URB) {
- usb_get_urb(cmdinfo->cmd_urb);
- if (usb_submit_urb(cmdinfo->cmd_urb, gfp)) {
+ usb_anchor_urb(cmdinfo->cmd_urb, &devinfo->cmd_urbs);
+ err = usb_submit_urb(cmdinfo->cmd_urb, gfp);
+ if (err) {
+ usb_unanchor_urb(cmdinfo->cmd_urb);
+ uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_INFO, cmnd,
- "cmd urb submission failure\n");
+ "cmd urb submission error %d\n", err);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
- usb_anchor_urb(cmdinfo->cmd_urb, &devinfo->cmd_urbs);
- usb_put_urb(cmdinfo->cmd_urb);
cmdinfo->cmd_urb = NULL;
cmdinfo->state &= ~SUBMIT_CMD_URB;
cmdinfo->state |= COMMAND_INFLIGHT;
@@ -644,18 +714,22 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
+ spin_lock_irqsave(&devinfo->lock, flags);
+
if (devinfo->resetting) {
cmnd->result = DID_ERROR << 16;
cmnd->scsi_done(cmnd);
+ spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
- spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->cmnd) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
+ memset(cmdinfo, 0, sizeof(*cmdinfo));
+
if (blk_rq_tagged(cmnd->request)) {
cmdinfo->stream = cmnd->request->tag + 2;
} else {
@@ -692,13 +766,10 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
- spin_lock(&uas_work_lock);
- list_add_tail(&cmdinfo->list, &uas_work_list);
- cmdinfo->state |= IS_IN_WORK_LIST;
- spin_unlock(&uas_work_lock);
- schedule_work(&uas_work);
+ uas_add_work(cmdinfo);
}
+ list_add_tail(&cmdinfo->list, &devinfo->inflight_list);
spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
@@ -709,46 +780,78 @@ static int uas_eh_task_mgmt(struct scsi_cmnd *cmnd,
const char *fname, u8 function)
{
struct Scsi_Host *shost = cmnd->device->host;
- struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
- u16 tag = devinfo->qdepth - 1;
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+ u16 tag = devinfo->qdepth;
unsigned long flags;
+ struct urb *sense_urb;
+ int result = SUCCESS;
spin_lock_irqsave(&devinfo->lock, flags);
+
+ if (devinfo->resetting) {
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ return FAILED;
+ }
+
+ if (devinfo->running_task) {
+ shost_printk(KERN_INFO, shost,
+ "%s: %s: error already running a task\n",
+ __func__, fname);
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ return FAILED;
+ }
+
+ devinfo->running_task = 1;
memset(&devinfo->response, 0, sizeof(devinfo->response));
- if (uas_submit_sense_urb(shost, GFP_ATOMIC, tag)) {
+ sense_urb = uas_submit_sense_urb(cmnd, GFP_NOIO,
+ devinfo->use_streams ? tag : 0);
+ if (!sense_urb) {
shost_printk(KERN_INFO, shost,
"%s: %s: submit sense urb failed\n",
__func__, fname);
+ devinfo->running_task = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
return FAILED;
}
- if (uas_submit_task_urb(cmnd, GFP_ATOMIC, function, tag)) {
+ if (uas_submit_task_urb(cmnd, GFP_NOIO, function, tag)) {
shost_printk(KERN_INFO, shost,
"%s: %s: submit task mgmt urb failed\n",
__func__, fname);
+ devinfo->running_task = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
+ usb_kill_urb(sense_urb);
return FAILED;
}
spin_unlock_irqrestore(&devinfo->lock, flags);
if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 3000) == 0) {
+ /*
+ * Note we deliberately do not clear running_task here. If we
+ * allow new tasks to be submitted, there is no way to figure
+ * out if a received response_iu is for the failed task or for
+ * the new one. A bus-reset will eventually clear running_task.
+ */
shost_printk(KERN_INFO, shost,
"%s: %s timed out\n", __func__, fname);
return FAILED;
}
+
+ spin_lock_irqsave(&devinfo->lock, flags);
+ devinfo->running_task = 0;
if (be16_to_cpu(devinfo->response.tag) != tag) {
shost_printk(KERN_INFO, shost,
"%s: %s failed (wrong tag %d/%d)\n", __func__,
fname, be16_to_cpu(devinfo->response.tag), tag);
- return FAILED;
- }
- if (devinfo->response.response_code != RC_TMF_COMPLETE) {
+ result = FAILED;
+ } else if (devinfo->response.response_code != RC_TMF_COMPLETE) {
shost_printk(KERN_INFO, shost,
"%s: %s failed (rc 0x%x)\n", __func__,
fname, devinfo->response.response_code);
- return FAILED;
+ result = FAILED;
}
- return SUCCESS;
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+
+ return result;
}
static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
@@ -758,22 +861,19 @@ static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
unsigned long flags;
int ret;
- uas_log_cmd_state(cmnd, __func__);
spin_lock_irqsave(&devinfo->lock, flags);
- cmdinfo->state |= COMMAND_ABORTED;
- if (cmdinfo->state & IS_IN_WORK_LIST) {
- spin_lock(&uas_work_lock);
- list_del(&cmdinfo->list);
- cmdinfo->state &= ~IS_IN_WORK_LIST;
- spin_unlock(&uas_work_lock);
+
+ if (devinfo->resetting) {
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ return FAILED;
}
+
+ uas_mark_cmd_dead(devinfo, cmdinfo, DID_ABORT, __func__);
if (cmdinfo->state & COMMAND_INFLIGHT) {
spin_unlock_irqrestore(&devinfo->lock, flags);
ret = uas_eh_task_mgmt(cmnd, "ABORT TASK", TMF_ABORT_TASK);
} else {
- spin_unlock_irqrestore(&devinfo->lock, flags);
- uas_unlink_data_urbs(devinfo, cmdinfo);
- spin_lock_irqsave(&devinfo->lock, flags);
+ uas_unlink_data_urbs(devinfo, cmdinfo, &flags);
uas_try_complete(cmnd, __func__);
spin_unlock_irqrestore(&devinfo->lock, flags);
ret = SUCCESS;
@@ -795,14 +895,25 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
struct usb_device *udev = devinfo->udev;
int err;
+ err = usb_lock_device_for_reset(udev, devinfo->intf);
+ if (err) {
+ shost_printk(KERN_ERR, sdev->host,
+ "%s FAILED to get lock err %d\n", __func__, err);
+ return FAILED;
+ }
+
+ shost_printk(KERN_INFO, sdev->host, "%s start\n", __func__);
devinfo->resetting = 1;
- uas_abort_work(devinfo);
+ uas_abort_inflight(devinfo, DID_RESET, __func__);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
+ uas_zap_dead(devinfo);
err = usb_reset_device(udev);
devinfo->resetting = 0;
+ usb_unlock_device(udev);
+
if (err) {
shost_printk(KERN_INFO, sdev->host, "%s FAILED\n", __func__);
return FAILED;
@@ -814,7 +925,25 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
static int uas_slave_alloc(struct scsi_device *sdev)
{
- sdev->hostdata = (void *)sdev->host->hostdata[0];
+ sdev->hostdata = (void *)sdev->host->hostdata;
+
+ /* USB has unusual DMA-alignment requirements: Although the
+ * starting address of each scatter-gather element doesn't matter,
+ * the length of each element except the last must be divisible
+ * by the Bulk maxpacket value. There's currently no way to
+ * express this by block-layer constraints, so we'll cop out
+ * and simply require addresses to be aligned at 512-byte
+ * boundaries. This is okay since most block I/O involves
+ * hardware sectors that are multiples of 512 bytes in length,
+ * and since host controllers up through USB 2.0 have maxpacket
+ * values no larger than 512.
+ *
+ * But it doesn't suffice for Wireless USB, where Bulk maxpacket
+ * values can be as large as 2048. To make that work properly
+ * will require changes to the block layer.
+ */
+ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+
return 0;
}
@@ -822,7 +951,7 @@ static int uas_slave_configure(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo = sdev->hostdata;
scsi_set_tag_type(sdev, MSG_ORDERED_TAG);
- scsi_activate_tcq(sdev, devinfo->qdepth - 3);
+ scsi_activate_tcq(sdev, devinfo->qdepth - 2);
return 0;
}
@@ -843,7 +972,14 @@ static struct scsi_host_template uas_host_template = {
.ordered_tag = 1,
};
+#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
+ vendorName, productName, useProtocol, useTransport, \
+ initFunction, flags) \
+{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
+ .driver_info = (flags) }
+
static struct usb_device_id uas_usb_ids[] = {
+# include "unusual_uas.h"
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_UAS) },
/* 0xaa is a prototype device I happen to have access to */
@@ -852,105 +988,55 @@ static struct usb_device_id uas_usb_ids[] = {
};
MODULE_DEVICE_TABLE(usb, uas_usb_ids);
-static int uas_is_interface(struct usb_host_interface *intf)
-{
- return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE &&
- intf->desc.bInterfaceSubClass == USB_SC_SCSI &&
- intf->desc.bInterfaceProtocol == USB_PR_UAS);
-}
-
-static int uas_isnt_supported(struct usb_device *udev)
-{
- struct usb_hcd *hcd = bus_to_hcd(udev->bus);
-
- dev_warn(&udev->dev, "The driver for the USB controller %s does not "
- "support scatter-gather which is\n",
- hcd->driver->description);
- dev_warn(&udev->dev, "required by the UAS driver. Please try an"
- "alternative USB controller if you wish to use UAS.\n");
- return -ENODEV;
-}
+#undef UNUSUAL_DEV
static int uas_switch_interface(struct usb_device *udev,
- struct usb_interface *intf)
+ struct usb_interface *intf)
{
- int i;
- int sg_supported = udev->bus->sg_tablesize != 0;
-
- for (i = 0; i < intf->num_altsetting; i++) {
- struct usb_host_interface *alt = &intf->altsetting[i];
-
- if (uas_is_interface(alt)) {
- if (!sg_supported)
- return uas_isnt_supported(udev);
- return usb_set_interface(udev,
- alt->desc.bInterfaceNumber,
- alt->desc.bAlternateSetting);
- }
- }
+ int alt;
+
+ alt = uas_find_uas_alt_setting(intf);
+ if (alt < 0)
+ return alt;
- return -ENODEV;
+ return usb_set_interface(udev,
+ intf->altsetting[0].desc.bInterfaceNumber, alt);
}
-static void uas_configure_endpoints(struct uas_dev_info *devinfo)
+static int uas_configure_endpoints(struct uas_dev_info *devinfo)
{
struct usb_host_endpoint *eps[4] = { };
- struct usb_interface *intf = devinfo->intf;
struct usb_device *udev = devinfo->udev;
- struct usb_host_endpoint *endpoint = intf->cur_altsetting->endpoint;
- unsigned i, n_endpoints = intf->cur_altsetting->desc.bNumEndpoints;
+ int r;
devinfo->uas_sense_old = 0;
devinfo->cmnd = NULL;
- for (i = 0; i < n_endpoints; i++) {
- unsigned char *extra = endpoint[i].extra;
- int len = endpoint[i].extralen;
- while (len > 1) {
- if (extra[1] == USB_DT_PIPE_USAGE) {
- unsigned pipe_id = extra[2];
- if (pipe_id > 0 && pipe_id < 5)
- eps[pipe_id - 1] = &endpoint[i];
- break;
- }
- len -= extra[0];
- extra += extra[0];
- }
- }
+ r = uas_find_endpoints(devinfo->intf->cur_altsetting, eps);
+ if (r)
+ return r;
- /*
- * Assume that if we didn't find a control pipe descriptor, we're
- * using a device with old firmware that happens to be set up like
- * this.
- */
- if (!eps[0]) {
- devinfo->cmd_pipe = usb_sndbulkpipe(udev, 1);
- devinfo->status_pipe = usb_rcvbulkpipe(udev, 1);
- devinfo->data_in_pipe = usb_rcvbulkpipe(udev, 2);
- devinfo->data_out_pipe = usb_sndbulkpipe(udev, 2);
-
- eps[1] = usb_pipe_endpoint(udev, devinfo->status_pipe);
- eps[2] = usb_pipe_endpoint(udev, devinfo->data_in_pipe);
- eps[3] = usb_pipe_endpoint(udev, devinfo->data_out_pipe);
- } else {
- devinfo->cmd_pipe = usb_sndbulkpipe(udev,
- eps[0]->desc.bEndpointAddress);
- devinfo->status_pipe = usb_rcvbulkpipe(udev,
- eps[1]->desc.bEndpointAddress);
- devinfo->data_in_pipe = usb_rcvbulkpipe(udev,
- eps[2]->desc.bEndpointAddress);
- devinfo->data_out_pipe = usb_sndbulkpipe(udev,
- eps[3]->desc.bEndpointAddress);
- }
+ devinfo->cmd_pipe = usb_sndbulkpipe(udev,
+ usb_endpoint_num(&eps[0]->desc));
+ devinfo->status_pipe = usb_rcvbulkpipe(udev,
+ usb_endpoint_num(&eps[1]->desc));
+ devinfo->data_in_pipe = usb_rcvbulkpipe(udev,
+ usb_endpoint_num(&eps[2]->desc));
+ devinfo->data_out_pipe = usb_sndbulkpipe(udev,
+ usb_endpoint_num(&eps[3]->desc));
- devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1, 3, 256,
- GFP_KERNEL);
- if (devinfo->qdepth < 0) {
+ if (udev->speed != USB_SPEED_SUPER) {
devinfo->qdepth = 256;
devinfo->use_streams = 0;
} else {
+ devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1,
+ 3, 256, GFP_KERNEL);
+ if (devinfo->qdepth < 0)
+ return devinfo->qdepth;
devinfo->use_streams = 1;
}
+
+ return 0;
}
static void uas_free_streams(struct uas_dev_info *devinfo)
@@ -964,30 +1050,23 @@ static void uas_free_streams(struct uas_dev_info *devinfo)
usb_free_streams(devinfo->intf, eps, 3, GFP_KERNEL);
}
-/*
- * XXX: What I'd like to do here is register a SCSI host for each USB host in
- * the system. Follow usb-storage's design of registering a SCSI host for
- * each USB device for the moment. Can implement this by walking up the
- * USB hierarchy until we find a USB host.
- */
static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
- int result;
- struct Scsi_Host *shost;
+ int result = -ENOMEM;
+ struct Scsi_Host *shost = NULL;
struct uas_dev_info *devinfo;
struct usb_device *udev = interface_to_usbdev(intf);
- if (uas_switch_interface(udev, intf))
+ if (!uas_use_uas_driver(intf, id))
return -ENODEV;
- devinfo = kmalloc(sizeof(struct uas_dev_info), GFP_KERNEL);
- if (!devinfo)
- return -ENOMEM;
+ if (uas_switch_interface(udev, intf))
+ return -ENODEV;
- result = -ENOMEM;
- shost = scsi_host_alloc(&uas_host_template, sizeof(void *));
+ shost = scsi_host_alloc(&uas_host_template,
+ sizeof(struct uas_dev_info));
if (!shost)
- goto free;
+ goto set_alt0;
shost->max_cmd_len = 16 + 252;
shost->max_id = 1;
@@ -995,33 +1074,40 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
shost->max_channel = 0;
shost->sg_tablesize = udev->bus->sg_tablesize;
+ devinfo = (struct uas_dev_info *)shost->hostdata;
devinfo->intf = intf;
devinfo->udev = udev;
devinfo->resetting = 0;
+ devinfo->running_task = 0;
+ devinfo->shutdown = 0;
init_usb_anchor(&devinfo->cmd_urbs);
init_usb_anchor(&devinfo->sense_urbs);
init_usb_anchor(&devinfo->data_urbs);
spin_lock_init(&devinfo->lock);
- uas_configure_endpoints(devinfo);
+ INIT_WORK(&devinfo->work, uas_do_work);
+ INIT_LIST_HEAD(&devinfo->inflight_list);
+ INIT_LIST_HEAD(&devinfo->dead_list);
- result = scsi_init_shared_tag_map(shost, devinfo->qdepth - 3);
+ result = uas_configure_endpoints(devinfo);
if (result)
- goto free;
+ goto set_alt0;
- result = scsi_add_host(shost, &intf->dev);
+ result = scsi_init_shared_tag_map(shost, devinfo->qdepth - 2);
if (result)
- goto deconfig_eps;
+ goto free_streams;
- shost->hostdata[0] = (unsigned long)devinfo;
+ result = scsi_add_host(shost, &intf->dev);
+ if (result)
+ goto free_streams;
scsi_scan_host(shost);
usb_set_intfdata(intf, shost);
return result;
-deconfig_eps:
+free_streams:
uas_free_streams(devinfo);
- free:
- kfree(devinfo);
+set_alt0:
+ usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
if (shost)
scsi_host_put(shost);
return result;
@@ -1029,45 +1115,146 @@ deconfig_eps:
static int uas_pre_reset(struct usb_interface *intf)
{
-/* XXX: Need to return 1 if it's not our device in error handling */
+ struct Scsi_Host *shost = usb_get_intfdata(intf);
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+ unsigned long flags;
+
+ if (devinfo->shutdown)
+ return 0;
+
+ /* Block new requests */
+ spin_lock_irqsave(shost->host_lock, flags);
+ scsi_block_requests(shost);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /* Wait for any pending requests to complete */
+ flush_work(&devinfo->work);
+ if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 5000) == 0) {
+ shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__);
+ return 1;
+ }
+
+ uas_free_streams(devinfo);
+
return 0;
}
static int uas_post_reset(struct usb_interface *intf)
{
-/* XXX: Need to return 1 if it's not our device in error handling */
+ struct Scsi_Host *shost = usb_get_intfdata(intf);
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+ unsigned long flags;
+
+ if (devinfo->shutdown)
+ return 0;
+
+ if (uas_configure_endpoints(devinfo) != 0) {
+ shost_printk(KERN_ERR, shost,
+ "%s: alloc streams error after reset", __func__);
+ return 1;
+ }
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ scsi_report_bus_reset(shost, 0);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ scsi_unblock_requests(shost);
+
+ return 0;
+}
+
+static int uas_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct Scsi_Host *shost = usb_get_intfdata(intf);
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+
+ /* Wait for any pending requests to complete */
+ flush_work(&devinfo->work);
+ if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 5000) == 0) {
+ shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int uas_resume(struct usb_interface *intf)
+{
+ return 0;
+}
+
+static int uas_reset_resume(struct usb_interface *intf)
+{
+ struct Scsi_Host *shost = usb_get_intfdata(intf);
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+ unsigned long flags;
+
+ if (uas_configure_endpoints(devinfo) != 0) {
+ shost_printk(KERN_ERR, shost,
+ "%s: alloc streams error after reset", __func__);
+ return -EIO;
+ }
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ scsi_report_bus_reset(shost, 0);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
return 0;
}
static void uas_disconnect(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
- struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
devinfo->resetting = 1;
- uas_abort_work(devinfo);
+ cancel_work_sync(&devinfo->work);
+ uas_abort_inflight(devinfo, DID_NO_CONNECT, __func__);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
+ uas_zap_dead(devinfo);
scsi_remove_host(shost);
uas_free_streams(devinfo);
- kfree(devinfo);
+ scsi_host_put(shost);
}
/*
- * XXX: Should this plug into libusual so we can auto-upgrade devices from
- * Bulk-Only to UAS?
+ * Put the device back in usb-storage mode on shutdown, as some BIOS-es
+ * hang on reboot when the device is still in uas mode. Note the reset is
+ * necessary as some devices won't revert to usb-storage mode without it.
*/
+static void uas_shutdown(struct device *dev)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct Scsi_Host *shost = usb_get_intfdata(intf);
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+
+ if (system_state != SYSTEM_RESTART)
+ return;
+
+ devinfo->shutdown = 1;
+ uas_free_streams(devinfo);
+ usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
+ usb_reset_device(udev);
+}
+
static struct usb_driver uas_driver = {
.name = "uas",
.probe = uas_probe,
.disconnect = uas_disconnect,
.pre_reset = uas_pre_reset,
.post_reset = uas_post_reset,
+ .suspend = uas_suspend,
+ .resume = uas_resume,
+ .reset_resume = uas_reset_resume,
+ .drvwrap.driver.shutdown = uas_shutdown,
.id_table = uas_usb_ids,
};
module_usb_driver(uas_driver);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Matthew Wilcox and Sarah Sharp");
+MODULE_AUTHOR(
+ "Hans de Goede <hdegoede@redhat.com>, Matthew Wilcox and Sarah Sharp");
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index adbeb255616a..f4a82291894a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2086,6 +2086,11 @@ UNUSUAL_DEV( 0xed10, 0x7636, 0x0001, 0x0001,
"Digital MP3 Audio Player",
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
+/* Unusual uas devices */
+#if IS_ENABLED(CONFIG_USB_UAS)
+#include "unusual_uas.h"
+#endif
+
/* Control/Bulk transport for all SubClass values */
USUAL_DEV(USB_SC_RBC, USB_PR_CB),
USUAL_DEV(USB_SC_8020, USB_PR_CB),
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
new file mode 100644
index 000000000000..7244444df8ee
--- /dev/null
+++ b/drivers/usb/storage/unusual_uas.h
@@ -0,0 +1,52 @@
+/* Driver for USB Attached SCSI devices - Unusual Devices File
+ *
+ * (c) 2013 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on the same file for the usb-storage driver, which is:
+ * (c) 2000-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
+ * (c) 2000 Adam J. Richter (adam@yggdrasil.com), Yggdrasil Computing, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * IMPORTANT NOTE: This file must be included in another file which defines
+ * a UNUSUAL_DEV macro before this file is included.
+ */
+
+/*
+ * If you edit this file, please try to keep it sorted first by VendorID,
+ * then by ProductID.
+ *
+ * If you want to add an entry for this file, be sure to include the
+ * following information:
+ * - a patch that adds the entry for your device, including your
+ * email address right above the entry (plus maybe a brief
+ * explanation of the reason for the entry),
+ * - lsusb -v output for the device
+ * Send your submission to Hans de Goede <hdegoede@redhat.com>
+ * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
+ */
+
+/*
+ * This is an example entry for the US_FL_IGNORE_UAS flag. Once we have an
+ * actual entry using US_FL_IGNORE_UAS this entry should be removed.
+ *
+ * UNUSUAL_DEV( 0xabcd, 0x1234, 0x0100, 0x0100,
+ * "Example",
+ * "Storage with broken UAS",
+ * USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ * US_FL_IGNORE_UAS),
+ */
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 1c0b89f2a138..f1c96261a501 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -72,6 +72,10 @@
#include "sierra_ms.h"
#include "option_ms.h"
+#if IS_ENABLED(CONFIG_USB_UAS)
+#include "uas-detect.h"
+#endif
+
/* Some informational data */
MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
MODULE_DESCRIPTION("USB Mass Storage driver for Linux");
@@ -459,14 +463,14 @@ static int associate_dev(struct us_data *us, struct usb_interface *intf)
#define TOLOWER(x) ((x) | 0x20)
/* Adjust device flags based on the "quirks=" module parameter */
-static void adjust_quirks(struct us_data *us)
+void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
{
char *p;
- u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor);
- u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
+ u16 vid = le16_to_cpu(udev->descriptor.idVendor);
+ u16 pid = le16_to_cpu(udev->descriptor.idProduct);
unsigned f = 0;
unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
- US_FL_FIX_CAPACITY |
+ US_FL_FIX_CAPACITY | US_FL_IGNORE_UAS |
US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
@@ -537,14 +541,18 @@ static void adjust_quirks(struct us_data *us)
case 's':
f |= US_FL_SINGLE_LUN;
break;
+ case 'u':
+ f |= US_FL_IGNORE_UAS;
+ break;
case 'w':
f |= US_FL_NO_WP_DETECT;
break;
/* Ignore unrecognized flag characters */
}
}
- us->fflags = (us->fflags & ~mask) | f;
+ *fflags = (*fflags & ~mask) | f;
}
+EXPORT_SYMBOL_GPL(usb_stor_adjust_quirks);
/* Get the unusual_devs entries and the string descriptors */
static int get_device_info(struct us_data *us, const struct usb_device_id *id,
@@ -564,7 +572,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
idesc->bInterfaceProtocol :
unusual_dev->useTransport;
us->fflags = id->driver_info;
- adjust_quirks(us);
+ usb_stor_adjust_quirks(us->pusb_dev, &us->fflags);
if (us->fflags & US_FL_IGNORE_DEVICE) {
dev_info(pdev, "device ignored\n");
@@ -1035,6 +1043,12 @@ static int storage_probe(struct usb_interface *intf,
int result;
int size;
+ /* If uas is enabled and this device can do uas then ignore it. */
+#if IS_ENABLED(CONFIG_USB_UAS)
+ if (uas_use_uas_driver(intf, id))
+ return -ENXIO;
+#endif
+
/*
* If the device isn't standard (is handled by a subdriver
* module) then don't accept it.
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 75f70f04f37b..307e339a9478 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -201,4 +201,7 @@ extern int usb_stor_probe1(struct us_data **pus,
extern int usb_stor_probe2(struct us_data *us);
extern void usb_stor_disconnect(struct usb_interface *intf);
+extern void usb_stor_adjust_quirks(struct usb_device *dev,
+ unsigned long *fflags);
+
#endif
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index 3b959e83b28e..0677139c6065 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -284,7 +284,7 @@ void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
struct device *dev = wusbhc->dev;
struct wusb_dev *wusb_dev;
struct wusb_port *port;
- unsigned idx, devnum;
+ unsigned idx;
mutex_lock(&wusbhc->mutex);
@@ -312,8 +312,6 @@ void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
goto error_unlock;
}
- devnum = idx + 2;
-
/* Make sure we are using no crypto on that "virtual port" */
wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0);
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 368360f9a93a..252c7bd9218a 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -75,8 +75,6 @@ void __wa_destroy(struct wahc *wa)
if (wa->dti_urb) {
usb_kill_urb(wa->dti_urb);
usb_put_urb(wa->dti_urb);
- usb_kill_urb(wa->buf_in_urb);
- usb_put_urb(wa->buf_in_urb);
}
kfree(wa->dti_buf);
wa_nep_destroy(wa);
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index a2ef84b8397e..f2a8d29e17b9 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -125,7 +125,8 @@ struct wa_rpipe {
enum wa_dti_state {
WA_DTI_TRANSFER_RESULT_PENDING,
- WA_DTI_ISOC_PACKET_STATUS_PENDING
+ WA_DTI_ISOC_PACKET_STATUS_PENDING,
+ WA_DTI_BUF_IN_DATA_PENDING
};
enum wa_quirks {
@@ -134,8 +135,20 @@ enum wa_quirks {
* requests to be concatenated and not sent as separate packets.
*/
WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC = 0x01,
+ /*
+ * The Alereon HWA can be instructed to not send transfer notifications
+ * as an optimization.
+ */
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS = 0x02,
};
+enum wa_vendor_specific_requests {
+ WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS = 0x4C,
+ WA_REQ_ALEREON_FEATURE_SET = 0x01,
+ WA_REQ_ALEREON_FEATURE_CLEAR = 0x00,
+};
+
+#define WA_MAX_BUF_IN_URBS 4
/**
* Instance of a HWA Host Controller
*
@@ -206,7 +219,9 @@ struct wahc {
u32 dti_isoc_xfer_in_progress;
u8 dti_isoc_xfer_seg;
struct urb *dti_urb; /* URB for reading xfer results */
- struct urb *buf_in_urb; /* URB for reading data in */
+ /* URBs for reading data in */
+ struct urb buf_in_urbs[WA_MAX_BUF_IN_URBS];
+ int active_buf_in_urbs; /* number of buf_in_urbs active. */
struct edc dti_edc; /* DTI error density counter */
void *dti_buf;
size_t dti_buf_size;
@@ -234,6 +249,7 @@ struct wahc {
extern int wa_create(struct wahc *wa, struct usb_interface *iface,
kernel_ulong_t);
extern void __wa_destroy(struct wahc *wa);
+extern int wa_dti_start(struct wahc *wa);
void wa_reset_all(struct wahc *wa);
@@ -275,6 +291,8 @@ static inline void wa_rpipe_init(struct wahc *wa)
static inline void wa_init(struct wahc *wa)
{
+ int index;
+
edc_init(&wa->nep_edc);
atomic_set(&wa->notifs_queued, 0);
wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
@@ -288,6 +306,10 @@ static inline void wa_init(struct wahc *wa)
INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
wa->dto_in_use = 0;
atomic_set(&wa->xfer_id_count, 1);
+ /* init the buf in URBs */
+ for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
+ usb_init_urb(&(wa->buf_in_urbs[index]));
+ wa->active_buf_in_urbs = 0;
}
/**
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index 6ca80a4efc1b..6d6da127f6de 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -298,7 +298,7 @@ static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
break;
}
itr += hdr->bLength;
- itr_size -= hdr->bDescriptorType;
+ itr_size -= hdr->bLength;
}
out:
return epcd;
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 3cd96e936d77..c8e2a47d62a7 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -167,6 +167,8 @@ struct wa_xfer {
static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
struct wa_seg *seg, int curr_iso_frame);
+static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
+ int starting_index, enum wa_seg_status status);
static inline void wa_xfer_init(struct wa_xfer *xfer)
{
@@ -367,13 +369,13 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
break;
case WA_SEG_ERROR:
xfer->result = seg->result;
- dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zu(0x%08zX)\n",
+ dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
xfer, wa_xfer_id(xfer), seg->index, seg->result,
seg->result);
goto out;
case WA_SEG_ABORTED:
xfer->result = seg->result;
- dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zu(0x%08zX)\n",
+ dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
xfer, wa_xfer_id(xfer), seg->index, seg->result,
seg->result);
goto out;
@@ -390,6 +392,24 @@ out:
}
/*
+ * Mark the given segment as done. Return true if this completes the xfer.
+ * This should only be called for segs that have been submitted to an RPIPE.
+ * Delayed segs are not marked as submitted so they do not need to be marked
+ * as done when cleaning up.
+ *
+ * xfer->lock has to be locked
+ */
+static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
+ struct wa_seg *seg, enum wa_seg_status status)
+{
+ seg->status = status;
+ xfer->segs_done++;
+
+ /* check for done. */
+ return __wa_xfer_is_done(xfer);
+}
+
+/*
* Search for a transfer list ID on the HCD's URB list
*
* For 32 bit architectures, we use the pointer itself; for 64 bits, a
@@ -416,12 +436,51 @@ out:
struct wa_xfer_abort_buffer {
struct urb urb;
+ struct wahc *wa;
struct wa_xfer_abort cmd;
};
static void __wa_xfer_abort_cb(struct urb *urb)
{
struct wa_xfer_abort_buffer *b = urb->context;
+ struct wahc *wa = b->wa;
+
+ /*
+ * If the abort request URB failed, then the HWA did not get the abort
+ * command. Forcibly clean up the xfer without waiting for a Transfer
+ * Result from the HWA.
+ */
+ if (urb->status < 0) {
+ struct wa_xfer *xfer;
+ struct device *dev = &wa->usb_iface->dev;
+
+ xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
+ dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
+ __func__, urb->status);
+ if (xfer) {
+ unsigned long flags;
+ int done;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
+ __func__, xfer, wa_xfer_id(xfer));
+ spin_lock_irqsave(&xfer->lock, flags);
+ /* mark all segs as aborted. */
+ wa_complete_remaining_xfer_segs(xfer, 0,
+ WA_SEG_ABORTED);
+ done = __wa_xfer_is_done(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ wa_xfer_delayed_run(rpipe);
+ wa_xfer_put(xfer);
+ } else {
+ dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
+ __func__, le32_to_cpu(b->cmd.dwTransferID));
+ }
+ }
+
+ wa_put(wa); /* taken in __wa_xfer_abort */
usb_put_urb(&b->urb);
}
@@ -449,6 +508,7 @@ static int __wa_xfer_abort(struct wa_xfer *xfer)
b->cmd.bRequestType = WA_XFER_ABORT;
b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
+ b->wa = wa_get(xfer->wa);
usb_init_urb(&b->urb);
usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
@@ -462,6 +522,7 @@ static int __wa_xfer_abort(struct wa_xfer *xfer)
error_submit:
+ wa_put(xfer->wa);
if (printk_ratelimit())
dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
xfer, result);
@@ -733,6 +794,8 @@ static void wa_seg_dto_cb(struct urb *urb)
seg->isoc_frame_offset + seg->isoc_frame_index);
/* resubmit the URB with the next isoc frame. */
+ /* take a ref on resubmit. */
+ wa_xfer_get(xfer);
result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
if (result < 0) {
dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
@@ -760,9 +823,13 @@ static void wa_seg_dto_cb(struct urb *urb)
goto error_default;
}
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
return;
error_dto_submit:
+ /* taken on resubmit attempt. */
+ wa_xfer_put(xfer);
error_default:
spin_lock_irqsave(&xfer->lock, flags);
rpipe = xfer->ep->hcpriv;
@@ -772,12 +839,10 @@ error_default:
wa_reset_all(wa);
}
if (seg->status != WA_SEG_ERROR) {
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
__wa_xfer_abort(xfer);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (holding_dto) {
@@ -788,7 +853,8 @@ error_default:
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
-
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
}
/*
@@ -842,12 +908,11 @@ static void wa_seg_iso_pack_desc_cb(struct urb *urb)
}
if (seg->status != WA_SEG_ERROR) {
usb_unlink_urb(seg->dto_urb);
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
__wa_xfer_abort(xfer);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_ERROR);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
@@ -855,6 +920,8 @@ static void wa_seg_iso_pack_desc_cb(struct urb *urb)
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
}
/*
@@ -919,18 +986,18 @@ static void wa_seg_tr_cb(struct urb *urb)
}
usb_unlink_urb(seg->isoc_pack_desc_urb);
usb_unlink_urb(seg->dto_urb);
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
__wa_xfer_abort(xfer);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
}
/*
@@ -940,7 +1007,7 @@ static void wa_seg_tr_cb(struct urb *urb)
*/
static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
const unsigned int bytes_transferred,
- const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
+ const unsigned int bytes_to_transfer, int *out_num_sgs)
{
struct scatterlist *out_sg;
unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
@@ -1094,14 +1161,13 @@ static int __wa_populate_dto_urb(struct wa_xfer *xfer,
*/
static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
{
- int result, cnt, iso_frame_offset;
+ int result, cnt, isoc_frame_offset = 0;
size_t alloc_size = sizeof(*xfer->seg[0])
- sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
struct usb_device *usb_dev = xfer->wa->usb_dev;
const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
struct wa_seg *seg;
size_t buf_itr, buf_size, buf_itr_size;
- int isoc_frame_offset = 0;
result = -ENOMEM;
xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
@@ -1109,7 +1175,6 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
goto error_segs_kzalloc;
buf_itr = 0;
buf_size = xfer->urb->transfer_buffer_length;
- iso_frame_offset = 0;
for (cnt = 0; cnt < xfer->segs; cnt++) {
size_t iso_pkt_descr_size = 0;
int seg_isoc_frame_count = 0, seg_isoc_size = 0;
@@ -1318,30 +1383,41 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
/* default to done unless we encounter a multi-frame isoc segment. */
*dto_done = 1;
+ /*
+ * Take a ref for each segment urb so the xfer cannot disappear until
+ * all of the callbacks run.
+ */
+ wa_xfer_get(xfer);
/* submit the transfer request. */
+ seg->status = WA_SEG_SUBMITTED;
result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
if (result < 0) {
pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
__func__, xfer, seg->index, result);
- goto error_seg_submit;
+ wa_xfer_put(xfer);
+ goto error_tr_submit;
}
/* submit the isoc packet descriptor if present. */
if (seg->isoc_pack_desc_urb) {
+ wa_xfer_get(xfer);
result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
seg->isoc_frame_index = 0;
if (result < 0) {
pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
__func__, xfer, seg->index, result);
+ wa_xfer_put(xfer);
goto error_iso_pack_desc_submit;
}
}
/* submit the out data if this is an out request. */
if (seg->dto_urb) {
struct wahc *wa = xfer->wa;
+ wa_xfer_get(xfer);
result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
if (result < 0) {
pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
__func__, xfer, seg->index, result);
+ wa_xfer_put(xfer);
goto error_dto_submit;
}
/*
@@ -1353,7 +1429,6 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
&& (seg->isoc_frame_count > 1))
*dto_done = 0;
}
- seg->status = WA_SEG_SUBMITTED;
rpipe_avail_dec(rpipe);
return 0;
@@ -1361,7 +1436,7 @@ error_dto_submit:
usb_unlink_urb(seg->isoc_pack_desc_urb);
error_iso_pack_desc_submit:
usb_unlink_urb(&seg->tr_urb);
-error_seg_submit:
+error_tr_submit:
seg->status = WA_SEG_ERROR;
seg->result = result;
*dto_done = 1;
@@ -1393,6 +1468,12 @@ static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
list_node);
list_del(&seg->list_node);
xfer = seg->xfer;
+ /*
+ * Get a reference to the xfer in case the callbacks for the
+ * URBs submitted by __wa_seg_submit attempt to complete
+ * the xfer before this function completes.
+ */
+ wa_xfer_get(xfer);
result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
/* release the dto resource if this RPIPE is done with it. */
if (dto_done)
@@ -1401,13 +1482,23 @@ static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
xfer, wa_xfer_id(xfer), seg->index,
atomic_read(&rpipe->segs_available), result);
if (unlikely(result < 0)) {
+ int done;
+
spin_unlock_irqrestore(&rpipe->seg_lock, flags);
spin_lock_irqsave(&xfer->lock, flags);
__wa_xfer_abort(xfer);
+ /*
+ * This seg was marked as submitted when it was put on
+ * the RPIPE seg_list. Mark it done.
+ */
xfer->segs_done++;
+ done = __wa_xfer_is_done(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
spin_lock_irqsave(&rpipe->seg_lock, flags);
}
+ wa_xfer_put(xfer);
}
/*
* Mark this RPIPE as waiting if dto was not acquired, there are
@@ -1592,12 +1683,19 @@ static int wa_urb_enqueue_b(struct wa_xfer *xfer)
dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
goto error_xfer_setup;
}
+ /*
+ * Get a xfer reference since __wa_xfer_submit starts asynchronous
+ * operations that may try to complete the xfer before this function
+ * exits.
+ */
+ wa_xfer_get(xfer);
result = __wa_xfer_submit(xfer);
if (result < 0) {
dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
goto error_xfer_submit;
}
spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_xfer_put(xfer);
return 0;
/*
@@ -1623,6 +1721,7 @@ error_xfer_submit:
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
+ wa_xfer_put(xfer);
/* return success since the completion routine will run. */
return 0;
}
@@ -1832,20 +1931,20 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
/* check if it is safe to unlink. */
spin_lock_irqsave(&wa->xfer_list_lock, flags);
result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
+ if ((result == 0) && urb->hcpriv) {
+ /*
+ * Get a xfer ref to prevent a race with wa_xfer_giveback
+ * cleaning up the xfer while we are working with it.
+ */
+ wa_xfer_get(urb->hcpriv);
+ }
spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
if (result)
return result;
xfer = urb->hcpriv;
- if (xfer == NULL) {
- /*
- * Nothing setup yet enqueue will see urb->status !=
- * -EINPROGRESS (by hcd layer) and bail out with
- * error, no need to do completion
- */
- BUG_ON(urb->status == -EINPROGRESS);
- goto out;
- }
+ if (xfer == NULL)
+ return -ENOENT;
spin_lock_irqsave(&xfer->lock, flags);
pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
rpipe = xfer->ep->hcpriv;
@@ -1856,6 +1955,16 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
result = -ENOENT;
goto out_unlock;
}
+ /*
+ * Check for done to avoid racing with wa_xfer_giveback and completing
+ * twice.
+ */
+ if (__wa_xfer_is_done(xfer)) {
+ pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
+ xfer, wa_xfer_id(xfer));
+ result = -ENOENT;
+ goto out_unlock;
+ }
/* Check the delayed list -> if there, release and complete */
spin_lock_irqsave(&wa->xfer_list_lock, flags2);
if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
@@ -1865,6 +1974,11 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
goto out_unlock; /* setup(), enqueue_b() completes */
/* Ok, the xfer is in flight already, it's been setup and submitted.*/
xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
+ /*
+ * grab the rpipe->seg_lock here to prevent racing with
+ * __wa_xfer_delayed_run.
+ */
+ spin_lock(&rpipe->seg_lock);
for (cnt = 0; cnt < xfer->segs; cnt++) {
seg = xfer->seg[cnt];
pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
@@ -1885,16 +1999,24 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
*/
seg->status = WA_SEG_ABORTED;
seg->result = -ENOENT;
- spin_lock_irqsave(&rpipe->seg_lock, flags2);
list_del(&seg->list_node);
xfer->segs_done++;
- spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
break;
case WA_SEG_DONE:
case WA_SEG_ERROR:
case WA_SEG_ABORTED:
break;
/*
+ * The buf_in data for a segment in the
+ * WA_SEG_DTI_PENDING state is actively being read.
+ * Let wa_buf_in_cb handle it since it will be called
+ * and will increment xfer->segs_done. Cleaning up
+ * here could cause wa_buf_in_cb to access the xfer
+ * after it has been completed/freed.
+ */
+ case WA_SEG_DTI_PENDING:
+ break;
+ /*
* In the states below, the HWA device already knows
* about the transfer. If an abort request was sent,
* allow the HWA to process it and wait for the
@@ -1903,7 +2025,6 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
*/
case WA_SEG_SUBMITTED:
case WA_SEG_PENDING:
- case WA_SEG_DTI_PENDING:
/*
* Check if the abort was successfully sent. This could
* be false if the HWA has been removed but we haven't
@@ -1917,6 +2038,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
break;
}
}
+ spin_unlock(&rpipe->seg_lock);
xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
done = __wa_xfer_is_done(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
@@ -1924,11 +2046,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
+ wa_xfer_put(xfer);
return result;
out_unlock:
spin_unlock_irqrestore(&xfer->lock, flags);
-out:
+ wa_xfer_put(xfer);
return result;
dequeue_delayed:
@@ -1937,6 +2060,7 @@ dequeue_delayed:
xfer->result = urb->status;
spin_unlock_irqrestore(&xfer->lock, flags);
wa_xfer_giveback(xfer);
+ wa_xfer_put(xfer);
usb_put_urb(urb); /* we got a ref in enqueue() */
return 0;
}
@@ -1996,15 +2120,17 @@ static int wa_xfer_status_to_errno(u8 status)
* no other segment transfer results will be returned from the device.
* Mark the remaining submitted or pending xfers as completed so that
* the xfer will complete cleanly.
+ *
+ * xfer->lock must be held
+ *
*/
static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
- struct wa_seg *incoming_seg, enum wa_seg_status status)
+ int starting_index, enum wa_seg_status status)
{
int index;
struct wa_rpipe *rpipe = xfer->ep->hcpriv;
- for (index = incoming_seg->index + 1; index < xfer->segs_submitted;
- index++) {
+ for (index = starting_index; index < xfer->segs_submitted; index++) {
struct wa_seg *current_seg = xfer->seg[index];
BUG_ON(current_seg == NULL);
@@ -2033,73 +2159,110 @@ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
}
}
-/* Populate the wa->buf_in_urb based on the current isoc transfer state. */
-static void __wa_populate_buf_in_urb_isoc(struct wahc *wa, struct wa_xfer *xfer,
- struct wa_seg *seg, int curr_iso_frame)
+/* Populate the given urb based on the current isoc transfer state. */
+static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
+ struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
{
- BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
+ int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
+ int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
+ int next_frame_contiguous;
+ struct usb_iso_packet_descriptor *iso_frame;
+
+ BUG_ON(buf_in_urb->status == -EINPROGRESS);
+
+ /*
+ * If the current frame actual_length is contiguous with the next frame
+ * and actual_length is a multiple of the DTI endpoint max packet size,
+ * combine the current frame with the next frame in a single URB. This
+ * reduces the number of URBs that must be submitted in that case.
+ */
+ seg_index = seg->isoc_frame_index;
+ do {
+ next_frame_contiguous = 0;
+
+ iso_frame = &iso_frame_desc[urb_frame_index];
+ total_len += iso_frame->actual_length;
+ ++urb_frame_index;
+ ++seg_index;
+
+ if (seg_index < seg->isoc_frame_count) {
+ struct usb_iso_packet_descriptor *next_iso_frame;
+
+ next_iso_frame = &iso_frame_desc[urb_frame_index];
+
+ if ((iso_frame->offset + iso_frame->actual_length) ==
+ next_iso_frame->offset)
+ next_frame_contiguous = 1;
+ }
+ } while (next_frame_contiguous
+ && ((iso_frame->actual_length % dti_packet_size) == 0));
/* this should always be 0 before a resubmit. */
- wa->buf_in_urb->num_mapped_sgs = 0;
- wa->buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
- xfer->urb->iso_frame_desc[curr_iso_frame].offset;
- wa->buf_in_urb->transfer_buffer_length =
- xfer->urb->iso_frame_desc[curr_iso_frame].length;
- wa->buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- wa->buf_in_urb->transfer_buffer = NULL;
- wa->buf_in_urb->sg = NULL;
- wa->buf_in_urb->num_sgs = 0;
- wa->buf_in_urb->context = seg;
+ buf_in_urb->num_mapped_sgs = 0;
+ buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
+ iso_frame_desc[urb_start_frame].offset;
+ buf_in_urb->transfer_buffer_length = total_len;
+ buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ buf_in_urb->transfer_buffer = NULL;
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
+ buf_in_urb->context = seg;
+
+ /* return the number of frames included in this URB. */
+ return seg_index - seg->isoc_frame_index;
}
-/* Populate the wa->buf_in_urb based on the current transfer state. */
-static int wa_populate_buf_in_urb(struct wahc *wa, struct wa_xfer *xfer,
+/* Populate the given urb based on the current transfer state. */
+static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
unsigned int seg_idx, unsigned int bytes_transferred)
{
int result = 0;
struct wa_seg *seg = xfer->seg[seg_idx];
- BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
+ BUG_ON(buf_in_urb->status == -EINPROGRESS);
/* this should always be 0 before a resubmit. */
- wa->buf_in_urb->num_mapped_sgs = 0;
+ buf_in_urb->num_mapped_sgs = 0;
if (xfer->is_dma) {
- wa->buf_in_urb->transfer_dma = xfer->urb->transfer_dma
+ buf_in_urb->transfer_dma = xfer->urb->transfer_dma
+ (seg_idx * xfer->seg_size);
- wa->buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- wa->buf_in_urb->transfer_buffer = NULL;
- wa->buf_in_urb->sg = NULL;
- wa->buf_in_urb->num_sgs = 0;
+ buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ buf_in_urb->transfer_buffer = NULL;
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
} else {
/* do buffer or SG processing. */
- wa->buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
+ buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
if (xfer->urb->transfer_buffer) {
- wa->buf_in_urb->transfer_buffer =
+ buf_in_urb->transfer_buffer =
xfer->urb->transfer_buffer
+ (seg_idx * xfer->seg_size);
- wa->buf_in_urb->sg = NULL;
- wa->buf_in_urb->num_sgs = 0;
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
} else {
/* allocate an SG list to store seg_size bytes
and copy the subset of the xfer->urb->sg
that matches the buffer subset we are
about to read. */
- wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
+ buf_in_urb->sg = wa_xfer_create_subset_sg(
xfer->urb->sg,
seg_idx * xfer->seg_size,
bytes_transferred,
- &(wa->buf_in_urb->num_sgs));
+ &(buf_in_urb->num_sgs));
- if (!(wa->buf_in_urb->sg)) {
- wa->buf_in_urb->num_sgs = 0;
+ if (!(buf_in_urb->sg)) {
+ buf_in_urb->num_sgs = 0;
result = -ENOMEM;
}
- wa->buf_in_urb->transfer_buffer = NULL;
+ buf_in_urb->transfer_buffer = NULL;
}
}
- wa->buf_in_urb->transfer_buffer_length = bytes_transferred;
- wa->buf_in_urb->context = seg;
+ buf_in_urb->transfer_buffer_length = bytes_transferred;
+ buf_in_urb->context = seg;
return result;
}
@@ -2124,6 +2287,7 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
u8 usb_status;
unsigned rpipe_ready = 0;
unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
+ struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
spin_lock_irqsave(&xfer->lock, flags);
seg_idx = xfer_result->bTransferSegment & 0x7f;
@@ -2147,7 +2311,7 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
}
if (usb_status & 0x80) {
seg->result = wa_xfer_status_to_errno(usb_status);
- dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
+ dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
xfer, xfer->id, seg->index, usb_status);
seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
WA_SEG_ABORTED : WA_SEG_ERROR;
@@ -2162,7 +2326,8 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
* transfers with data or below for no data, the xfer will complete.
*/
if (xfer_result->bTransferSegment & 0x80)
- wa_complete_remaining_xfer_segs(xfer, seg, WA_SEG_DONE);
+ wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
+ WA_SEG_DONE);
if (usb_pipeisoc(xfer->urb->pipe)
&& (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
/* set up WA state to read the isoc packet status next. */
@@ -2173,20 +2338,21 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
&& (bytes_transferred > 0)) {
/* IN data phase: read to buffer */
seg->status = WA_SEG_DTI_PENDING;
- result = wa_populate_buf_in_urb(wa, xfer, seg_idx,
+ result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
bytes_transferred);
if (result < 0)
goto error_buf_in_populate;
- result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
- if (result < 0)
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
+ if (result < 0) {
+ --(wa->active_buf_in_urbs);
goto error_submit_buf_in;
+ }
} else {
/* OUT data phase or no data, complete it -- */
- seg->status = WA_SEG_DONE;
seg->result = bytes_transferred;
- xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
@@ -2205,15 +2371,15 @@ error_submit_buf_in:
dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
xfer, seg_idx, result);
seg->result = result;
- kfree(wa->buf_in_urb->sg);
- wa->buf_in_urb->sg = NULL;
+ kfree(buf_in_urb->sg);
+ buf_in_urb->sg = NULL;
error_buf_in_populate:
__wa_xfer_abort(xfer);
seg->status = WA_SEG_ERROR;
error_complete:
xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- wa_complete_remaining_xfer_segs(xfer, seg, seg->status);
+ wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
done = __wa_xfer_is_done(xfer);
/*
* queue work item to clear STALL for control endpoints.
@@ -2315,16 +2481,16 @@ static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
struct usb_iso_packet_descriptor *iso_frame_desc =
xfer->urb->iso_frame_desc;
- const int urb_frame_index =
+ const int xfer_frame_index =
seg->isoc_frame_offset + seg_index;
- iso_frame_desc[urb_frame_index].status =
+ iso_frame_desc[xfer_frame_index].status =
wa_xfer_status_to_errno(
le16_to_cpu(status_array[seg_index].PacketStatus));
- iso_frame_desc[urb_frame_index].actual_length =
+ iso_frame_desc[xfer_frame_index].actual_length =
le16_to_cpu(status_array[seg_index].PacketLength);
/* track the number of frames successfully transferred. */
- if (iso_frame_desc[urb_frame_index].actual_length > 0) {
+ if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
/* save the starting frame index for buf_in_urb. */
if (!data_frame_count)
first_frame_index = seg_index;
@@ -2333,30 +2499,64 @@ static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
}
if (xfer->is_inbound && data_frame_count) {
- int result;
+ int result, total_frames_read = 0, urb_index = 0;
+ struct urb *buf_in_urb;
+ /* IN data phase: read to buffer */
+ seg->status = WA_SEG_DTI_PENDING;
+
+ /* start with the first frame with data. */
seg->isoc_frame_index = first_frame_index;
- /* submit a read URB for the first frame with data. */
- __wa_populate_buf_in_urb_isoc(wa, xfer, seg,
- seg->isoc_frame_index + seg->isoc_frame_offset);
+ /* submit up to WA_MAX_BUF_IN_URBS read URBs. */
+ do {
+ int urb_frame_index, urb_frame_count;
+ struct usb_iso_packet_descriptor *iso_frame_desc;
+
+ buf_in_urb = &(wa->buf_in_urbs[urb_index]);
+ urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
+ buf_in_urb, xfer, seg);
+ /* advance frame index to start of next read URB. */
+ seg->isoc_frame_index += urb_frame_count;
+ total_frames_read += urb_frame_count;
+
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
+
+ /* skip 0-byte frames. */
+ urb_frame_index =
+ seg->isoc_frame_offset + seg->isoc_frame_index;
+ iso_frame_desc =
+ &(xfer->urb->iso_frame_desc[urb_frame_index]);
+ while ((seg->isoc_frame_index <
+ seg->isoc_frame_count) &&
+ (iso_frame_desc->actual_length == 0)) {
+ ++(seg->isoc_frame_index);
+ ++iso_frame_desc;
+ }
+ ++urb_index;
+
+ } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
+ && (seg->isoc_frame_index <
+ seg->isoc_frame_count));
- result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
if (result < 0) {
+ --(wa->active_buf_in_urbs);
dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
result);
wa_reset_all(wa);
- } else if (data_frame_count > 1)
- /* If we need to read multiple frames, set DTI busy. */
+ } else if (data_frame_count > total_frames_read)
+ /* If we need to read more frames, set DTI busy. */
dti_busy = 1;
} else {
/* OUT transfer or no more IN data, complete it -- */
- seg->status = WA_SEG_DONE;
- xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
}
spin_unlock_irqrestore(&xfer->lock, flags);
- wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
+ if (dti_busy)
+ wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
+ else
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
if (done)
wa_xfer_completion(xfer);
if (rpipe_ready)
@@ -2388,8 +2588,9 @@ static void wa_buf_in_cb(struct urb *urb)
struct wahc *wa;
struct device *dev;
struct wa_rpipe *rpipe;
- unsigned rpipe_ready = 0, seg_index, isoc_data_frame_count = 0;
+ unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
unsigned long flags;
+ int resubmit_dti = 0, active_buf_in_urbs;
u8 done = 0;
/* free the sg if it was used. */
@@ -2399,19 +2600,20 @@ static void wa_buf_in_cb(struct urb *urb)
spin_lock_irqsave(&xfer->lock, flags);
wa = xfer->wa;
dev = &wa->usb_iface->dev;
+ --(wa->active_buf_in_urbs);
+ active_buf_in_urbs = wa->active_buf_in_urbs;
if (usb_pipeisoc(xfer->urb->pipe)) {
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ int seg_index;
+
/*
- * Find the next isoc frame with data. Bail out after
- * isoc_data_frame_count > 1 since there is no need to walk
- * the entire frame array. We just need to know if
- * isoc_data_frame_count is 0, 1, or >1.
+ * Find the next isoc frame with data and count how many
+ * frames with data remain.
*/
- seg_index = seg->isoc_frame_index + 1;
- while ((seg_index < seg->isoc_frame_count)
- && (isoc_data_frame_count <= 1)) {
- struct usb_iso_packet_descriptor *iso_frame_desc =
- xfer->urb->iso_frame_desc;
+ seg_index = seg->isoc_frame_index;
+ while (seg_index < seg->isoc_frame_count) {
const int urb_frame_index =
seg->isoc_frame_offset + seg_index;
@@ -2432,24 +2634,39 @@ static void wa_buf_in_cb(struct urb *urb)
seg->result += urb->actual_length;
if (isoc_data_frame_count > 0) {
- int result;
- /* submit a read URB for the first frame with data. */
- __wa_populate_buf_in_urb_isoc(wa, xfer, seg,
- seg->isoc_frame_index + seg->isoc_frame_offset);
- result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
+ int result, urb_frame_count;
+
+ /* submit a read URB for the next frame with data. */
+ urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
+ xfer, seg);
+ /* advance index to start of next read URB. */
+ seg->isoc_frame_index += urb_frame_count;
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(urb, GFP_ATOMIC);
if (result < 0) {
+ --(wa->active_buf_in_urbs);
dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
result);
wa_reset_all(wa);
}
- } else {
+ /*
+ * If we are in this callback and
+ * isoc_data_frame_count > 0, it means that the dti_urb
+ * submission was delayed in wa_dti_cb. Once
+ * we submit the last buf_in_urb, we can submit the
+ * delayed dti_urb.
+ */
+ resubmit_dti = (isoc_data_frame_count ==
+ urb_frame_count);
+ } else if (active_buf_in_urbs == 0) {
rpipe = xfer->ep->hcpriv;
- seg->status = WA_SEG_DONE;
- dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
- xfer, seg->index, seg->result);
- xfer->segs_done++;
+ dev_dbg(dev,
+ "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ seg->result);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_DONE);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
@@ -2461,37 +2678,44 @@ static void wa_buf_in_cb(struct urb *urb)
case -ENOENT: /* as it was done by the who unlinked us */
break;
default: /* Other errors ... */
+ /*
+ * Error on data buf read. Only resubmit DTI if it hasn't
+ * already been done by previously hitting this error or by a
+ * successful completion of the previous buf_in_urb.
+ */
+ resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
spin_lock_irqsave(&xfer->lock, flags);
rpipe = xfer->ep->hcpriv;
if (printk_ratelimit())
- dev_err(dev, "xfer %p#%u: data in error %d\n",
- xfer, seg->index, urb->status);
+ dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ urb->status);
if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME)){
dev_err(dev, "DTO: URB max acceptable errors "
"exceeded, resetting device\n");
wa_reset_all(wa);
}
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- __wa_xfer_abort(xfer);
- done = __wa_xfer_is_done(xfer);
+ if (active_buf_in_urbs == 0)
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_ERROR);
+ else
+ __wa_xfer_abort(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
- /*
- * If we are in this callback and isoc_data_frame_count > 0, it means
- * that the dti_urb submission was delayed in wa_dti_cb. Once
- * isoc_data_frame_count gets to 1, we can submit the deferred URB
- * since the last buf_in_urb was just submitted.
- */
- if (isoc_data_frame_count == 1) {
- int result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+
+ if (resubmit_dti) {
+ int result;
+
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
+
+ result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
if (result < 0) {
dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
result);
@@ -2561,11 +2785,15 @@ static void wa_dti_cb(struct urb *urb)
xfer_result->hdr.bNotifyType);
break;
}
+ xfer_id = le32_to_cpu(xfer_result->dwTransferID);
usb_status = xfer_result->bTransferStatus & 0x3f;
- if (usb_status == WA_XFER_STATUS_NOT_FOUND)
+ if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
/* taken care of already */
+ dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
+ __func__, xfer_id,
+ xfer_result->bTransferSegment & 0x7f);
break;
- xfer_id = le32_to_cpu(xfer_result->dwTransferID);
+ }
xfer = wa_xfer_get_by_id(wa, xfer_id);
if (xfer == NULL) {
/* FIXME: transaction not found. */
@@ -2614,6 +2842,54 @@ out:
}
/*
+ * Initialize the DTI URB for reading transfer result notifications and also
+ * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
+ */
+int wa_dti_start(struct wahc *wa)
+{
+ const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
+ struct device *dev = &wa->usb_iface->dev;
+ int result = -ENOMEM, index;
+
+ if (wa->dti_urb != NULL) /* DTI URB already started */
+ goto out;
+
+ wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (wa->dti_urb == NULL) {
+ dev_err(dev, "Can't allocate DTI URB\n");
+ goto error_dti_urb_alloc;
+ }
+ usb_fill_bulk_urb(
+ wa->dti_urb, wa->usb_dev,
+ usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
+ wa->dti_buf, wa->dti_buf_size,
+ wa_dti_cb, wa);
+
+ /* init the buf in URBs */
+ for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
+ usb_fill_bulk_urb(
+ &(wa->buf_in_urbs[index]), wa->usb_dev,
+ usb_rcvbulkpipe(wa->usb_dev,
+ 0x80 | dti_epd->bEndpointAddress),
+ NULL, 0, wa_buf_in_cb, wa);
+ }
+ result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
+ result);
+ goto error_dti_urb_submit;
+ }
+out:
+ return 0;
+
+error_dti_urb_submit:
+ usb_put_urb(wa->dti_urb);
+ wa->dti_urb = NULL;
+error_dti_urb_alloc:
+ return result;
+}
+EXPORT_SYMBOL_GPL(wa_dti_start);
+/*
* Transfer complete notification
*
* Called from the notif.c code. We get a notification on EP2 saying
@@ -2627,15 +2903,10 @@ out:
* Follow up in wa_dti_cb(), as that's where the whole state
* machine starts.
*
- * So here we just initialize the DTI URB for reading transfer result
- * notifications and also the buffer-in URB, for reading buffers. Then
- * we just submit the DTI URB.
- *
* @wa shall be referenced
*/
void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
{
- int result;
struct device *dev = &wa->usb_iface->dev;
struct wa_notif_xfer *notif_xfer;
const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
@@ -2649,45 +2920,13 @@ void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
goto error;
}
- if (wa->dti_urb != NULL) /* DTI URB already started */
- goto out;
- wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (wa->dti_urb == NULL) {
- dev_err(dev, "Can't allocate DTI URB\n");
- goto error_dti_urb_alloc;
- }
- usb_fill_bulk_urb(
- wa->dti_urb, wa->usb_dev,
- usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
- wa->dti_buf, wa->dti_buf_size,
- wa_dti_cb, wa);
+ /* attempt to start the DTI ep processing. */
+ if (wa_dti_start(wa) < 0)
+ goto error;
- wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (wa->buf_in_urb == NULL) {
- dev_err(dev, "Can't allocate BUF-IN URB\n");
- goto error_buf_in_urb_alloc;
- }
- usb_fill_bulk_urb(
- wa->buf_in_urb, wa->usb_dev,
- usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
- NULL, 0, wa_buf_in_cb, wa);
- result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
- if (result < 0) {
- dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
- result);
- goto error_dti_urb_submit;
- }
-out:
return;
-error_dti_urb_submit:
- usb_put_urb(wa->buf_in_urb);
- wa->buf_in_urb = NULL;
-error_buf_in_urb_alloc:
- usb_put_urb(wa->dti_urb);
- wa->dti_urb = NULL;
-error_dti_urb_alloc:
error:
wa_reset_all(wa);
}
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index 26b3d9d1409f..af7b204b9215 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -13,6 +13,7 @@ menuconfig VFIO
depends on IOMMU_API
select VFIO_IOMMU_TYPE1 if X86
select VFIO_IOMMU_SPAPR_TCE if (PPC_POWERNV || PPC_PSERIES)
+ select ANON_INODES
help
VFIO provides a framework for secure userspace device drivers.
See Documentation/vfio.txt for more details.
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 210357691dc0..9dd49c9839ac 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -482,15 +482,19 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
for (i = 0; i < nvec; i++)
vdev->msix[i].entry = i;
- ret = pci_enable_msix(pdev, vdev->msix, nvec);
- if (ret) {
+ ret = pci_enable_msix_range(pdev, vdev->msix, 1, nvec);
+ if (ret < nvec) {
+ if (ret > 0)
+ pci_disable_msix(pdev);
kfree(vdev->msix);
kfree(vdev->ctx);
return ret;
}
} else {
- ret = pci_enable_msi_block(pdev, nvec);
- if (ret) {
+ ret = pci_enable_msi_range(pdev, 1, nvec);
+ if (ret < nvec) {
+ if (ret > 0)
+ pci_disable_msi(pdev);
kfree(vdev->ctx);
return ret;
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 21271d8df023..512f479d8a50 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1413,6 +1413,12 @@ int vfio_external_user_iommu_id(struct vfio_group *group)
}
EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
+long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
+{
+ return vfio_ioctl_check_extension(group->container, arg);
+}
+EXPORT_SYMBOL_GPL(vfio_external_check_extension);
+
/**
* Module/class support
*/
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 54af4e933695..6673e7be507f 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -30,7 +30,6 @@
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/pci.h> /* pci_bus_type */
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -55,11 +54,17 @@ MODULE_PARM_DESC(disable_hugepages,
"Disable VFIO IOMMU support for IOMMU hugepages.");
struct vfio_iommu {
- struct iommu_domain *domain;
+ struct list_head domain_list;
struct mutex lock;
struct rb_root dma_list;
+ bool v2;
+};
+
+struct vfio_domain {
+ struct iommu_domain *domain;
+ struct list_head next;
struct list_head group_list;
- bool cache;
+ int prot; /* IOMMU_CACHE */
};
struct vfio_dma {
@@ -99,7 +104,7 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
return NULL;
}
-static void vfio_insert_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
+static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
{
struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
struct vfio_dma *dma;
@@ -118,7 +123,7 @@ static void vfio_insert_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
rb_insert_color(&new->node, &iommu->dma_list);
}
-static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
+static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
{
rb_erase(&old->node, &iommu->dma_list);
}
@@ -322,32 +327,39 @@ static long vfio_unpin_pages(unsigned long pfn, long npage,
return unlocked;
}
-static int vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
- dma_addr_t iova, size_t *size)
+static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
{
- dma_addr_t start = iova, end = iova + *size;
+ dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
+ struct vfio_domain *domain, *d;
long unlocked = 0;
+ if (!dma->size)
+ return;
+ /*
+ * We use the IOMMU to track the physical addresses, otherwise we'd
+ * need a much more complicated tracking system. Unfortunately that
+ * means we need to use one of the iommu domains to figure out the
+ * pfns to unpin. The rest need to be unmapped in advance so we have
+ * no iommu translations remaining when the pages are unpinned.
+ */
+ domain = d = list_first_entry(&iommu->domain_list,
+ struct vfio_domain, next);
+
+ list_for_each_entry_continue(d, &iommu->domain_list, next)
+ iommu_unmap(d->domain, dma->iova, dma->size);
+
while (iova < end) {
size_t unmapped;
phys_addr_t phys;
- /*
- * We use the IOMMU to track the physical address. This
- * saves us from having a lot more entries in our mapping
- * tree. The downside is that we don't track the size
- * used to do the mapping. We request unmap of a single
- * page, but expect IOMMUs that support large pages to
- * unmap a larger chunk.
- */
- phys = iommu_iova_to_phys(iommu->domain, iova);
+ phys = iommu_iova_to_phys(domain->domain, iova);
if (WARN_ON(!phys)) {
iova += PAGE_SIZE;
continue;
}
- unmapped = iommu_unmap(iommu->domain, iova, PAGE_SIZE);
- if (!unmapped)
+ unmapped = iommu_unmap(domain->domain, iova, PAGE_SIZE);
+ if (WARN_ON(!unmapped))
break;
unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT,
@@ -357,119 +369,26 @@ static int vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
}
vfio_lock_acct(-unlocked);
-
- *size = iova - start;
-
- return 0;
}
-static int vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start,
- size_t *size, struct vfio_dma *dma)
+static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
{
- size_t offset, overlap, tmp;
- struct vfio_dma *split;
- int ret;
-
- if (!*size)
- return 0;
-
- /*
- * Existing dma region is completely covered, unmap all. This is
- * the likely case since userspace tends to map and unmap buffers
- * in one shot rather than multiple mappings within a buffer.
- */
- if (likely(start <= dma->iova &&
- start + *size >= dma->iova + dma->size)) {
- *size = dma->size;
- ret = vfio_unmap_unpin(iommu, dma, dma->iova, size);
- if (ret)
- return ret;
-
- /*
- * Did we remove more than we have? Should never happen
- * since a vfio_dma is contiguous in iova and vaddr.
- */
- WARN_ON(*size != dma->size);
-
- vfio_remove_dma(iommu, dma);
- kfree(dma);
- return 0;
- }
-
- /* Overlap low address of existing range */
- if (start <= dma->iova) {
- overlap = start + *size - dma->iova;
- ret = vfio_unmap_unpin(iommu, dma, dma->iova, &overlap);
- if (ret)
- return ret;
-
- vfio_remove_dma(iommu, dma);
-
- /*
- * Check, we may have removed to whole vfio_dma. If not
- * fixup and re-insert.
- */
- if (overlap < dma->size) {
- dma->iova += overlap;
- dma->vaddr += overlap;
- dma->size -= overlap;
- vfio_insert_dma(iommu, dma);
- } else
- kfree(dma);
-
- *size = overlap;
- return 0;
- }
-
- /* Overlap high address of existing range */
- if (start + *size >= dma->iova + dma->size) {
- offset = start - dma->iova;
- overlap = dma->size - offset;
-
- ret = vfio_unmap_unpin(iommu, dma, start, &overlap);
- if (ret)
- return ret;
-
- dma->size -= overlap;
- *size = overlap;
- return 0;
- }
-
- /* Split existing */
-
- /*
- * Allocate our tracking structure early even though it may not
- * be used. An Allocation failure later loses track of pages and
- * is more difficult to unwind.
- */
- split = kzalloc(sizeof(*split), GFP_KERNEL);
- if (!split)
- return -ENOMEM;
-
- offset = start - dma->iova;
-
- ret = vfio_unmap_unpin(iommu, dma, start, size);
- if (ret || !*size) {
- kfree(split);
- return ret;
- }
-
- tmp = dma->size;
+ vfio_unmap_unpin(iommu, dma);
+ vfio_unlink_dma(iommu, dma);
+ kfree(dma);
+}
- /* Resize the lower vfio_dma in place, before the below insert */
- dma->size = offset;
+static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
+{
+ struct vfio_domain *domain;
+ unsigned long bitmap = PAGE_MASK;
- /* Insert new for remainder, assuming it didn't all get unmapped */
- if (likely(offset + *size < tmp)) {
- split->size = tmp - offset - *size;
- split->iova = dma->iova + offset + *size;
- split->vaddr = dma->vaddr + offset + *size;
- split->prot = dma->prot;
- vfio_insert_dma(iommu, split);
- } else
- kfree(split);
+ mutex_lock(&iommu->lock);
+ list_for_each_entry(domain, &iommu->domain_list, next)
+ bitmap &= domain->domain->ops->pgsize_bitmap;
+ mutex_unlock(&iommu->lock);
- return 0;
+ return bitmap;
}
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
@@ -477,10 +396,10 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
{
uint64_t mask;
struct vfio_dma *dma;
- size_t unmapped = 0, size;
+ size_t unmapped = 0;
int ret = 0;
- mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1;
+ mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
if (unmap->iova & mask)
return -EINVAL;
@@ -491,20 +410,61 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
mutex_lock(&iommu->lock);
+ /*
+ * vfio-iommu-type1 (v1) - User mappings were coalesced together to
+ * avoid tracking individual mappings. This means that the granularity
+ * of the original mapping was lost and the user was allowed to attempt
+ * to unmap any range. Depending on the contiguousness of physical
+ * memory and page sizes supported by the IOMMU, arbitrary unmaps may
+ * or may not have worked. We only guaranteed unmap granularity
+ * matching the original mapping; even though it was untracked here,
+ * the original mappings are reflected in IOMMU mappings. This
+ * resulted in a couple unusual behaviors. First, if a range is not
+ * able to be unmapped, ex. a set of 4k pages that was mapped as a
+ * 2M hugepage into the IOMMU, the unmap ioctl returns success but with
+ * a zero sized unmap. Also, if an unmap request overlaps the first
+ * address of a hugepage, the IOMMU will unmap the entire hugepage.
+ * This also returns success and the returned unmap size reflects the
+ * actual size unmapped.
+ *
+ * We attempt to maintain compatibility with this "v1" interface, but
+ * we take control out of the hands of the IOMMU. Therefore, an unmap
+ * request offset from the beginning of the original mapping will
+ * return success with zero sized unmap. And an unmap request covering
+ * the first iova of mapping will unmap the entire range.
+ *
+ * The v2 version of this interface intends to be more deterministic.
+ * Unmap requests must fully cover previous mappings. Multiple
+ * mappings may still be unmaped by specifying large ranges, but there
+ * must not be any previous mappings bisected by the range. An error
+ * will be returned if these conditions are not met. The v2 interface
+ * will only return success and a size of zero if there were no
+ * mappings within the range.
+ */
+ if (iommu->v2) {
+ dma = vfio_find_dma(iommu, unmap->iova, 0);
+ if (dma && dma->iova != unmap->iova) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0);
+ if (dma && dma->iova + dma->size != unmap->iova + unmap->size) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ }
+
while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
- size = unmap->size;
- ret = vfio_remove_dma_overlap(iommu, unmap->iova, &size, dma);
- if (ret || !size)
+ if (!iommu->v2 && unmap->iova > dma->iova)
break;
- unmapped += size;
+ unmapped += dma->size;
+ vfio_remove_dma(iommu, dma);
}
+unlock:
mutex_unlock(&iommu->lock);
- /*
- * We may unmap more than requested, update the unmap struct so
- * userspace can know.
- */
+ /* Report how much was unmapped */
unmap->size = unmapped;
return ret;
@@ -516,22 +476,47 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
* soon, so this is just a temporary workaround to break mappings down into
* PAGE_SIZE. Better to map smaller pages than nothing.
*/
-static int map_try_harder(struct vfio_iommu *iommu, dma_addr_t iova,
+static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova,
unsigned long pfn, long npage, int prot)
{
long i;
int ret;
for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
- ret = iommu_map(iommu->domain, iova,
+ ret = iommu_map(domain->domain, iova,
(phys_addr_t)pfn << PAGE_SHIFT,
- PAGE_SIZE, prot);
+ PAGE_SIZE, prot | domain->prot);
if (ret)
break;
}
for (; i < npage && i > 0; i--, iova -= PAGE_SIZE)
- iommu_unmap(iommu->domain, iova, PAGE_SIZE);
+ iommu_unmap(domain->domain, iova, PAGE_SIZE);
+
+ return ret;
+}
+
+static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
+ unsigned long pfn, long npage, int prot)
+{
+ struct vfio_domain *d;
+ int ret;
+
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
+ npage << PAGE_SHIFT, prot | d->prot);
+ if (ret) {
+ if (ret != -EBUSY ||
+ map_try_harder(d, iova, pfn, npage, prot))
+ goto unwind;
+ }
+ }
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(d, &iommu->domain_list, next)
+ iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
return ret;
}
@@ -545,12 +530,12 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
long npage;
int ret = 0, prot = 0;
uint64_t mask;
- struct vfio_dma *dma = NULL;
+ struct vfio_dma *dma;
unsigned long pfn;
end = map->iova + map->size;
- mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1;
+ mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
/* READ/WRITE from device perspective */
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
@@ -561,9 +546,6 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
if (!prot)
return -EINVAL; /* No READ/WRITE? */
- if (iommu->cache)
- prot |= IOMMU_CACHE;
-
if (vaddr & mask)
return -EINVAL;
if (map->iova & mask)
@@ -588,180 +570,257 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
return -EEXIST;
}
- for (iova = map->iova; iova < end; iova += size, vaddr += size) {
- long i;
+ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma) {
+ mutex_unlock(&iommu->lock);
+ return -ENOMEM;
+ }
+
+ dma->iova = map->iova;
+ dma->vaddr = map->vaddr;
+ dma->prot = prot;
+
+ /* Insert zero-sized and grow as we map chunks of it */
+ vfio_link_dma(iommu, dma);
+ for (iova = map->iova; iova < end; iova += size, vaddr += size) {
/* Pin a contiguous chunk of memory */
npage = vfio_pin_pages(vaddr, (end - iova) >> PAGE_SHIFT,
prot, &pfn);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
- goto out;
- }
-
- /* Verify pages are not already mapped */
- for (i = 0; i < npage; i++) {
- if (iommu_iova_to_phys(iommu->domain,
- iova + (i << PAGE_SHIFT))) {
- ret = -EBUSY;
- goto out_unpin;
- }
+ break;
}
- ret = iommu_map(iommu->domain, iova,
- (phys_addr_t)pfn << PAGE_SHIFT,
- npage << PAGE_SHIFT, prot);
+ /* Map it! */
+ ret = vfio_iommu_map(iommu, iova, pfn, npage, prot);
if (ret) {
- if (ret != -EBUSY ||
- map_try_harder(iommu, iova, pfn, npage, prot)) {
- goto out_unpin;
- }
+ vfio_unpin_pages(pfn, npage, prot, true);
+ break;
}
size = npage << PAGE_SHIFT;
+ dma->size += size;
+ }
- /*
- * Check if we abut a region below - nothing below 0.
- * This is the most likely case when mapping chunks of
- * physically contiguous regions within a virtual address
- * range. Update the abutting entry in place since iova
- * doesn't change.
- */
- if (likely(iova)) {
- struct vfio_dma *tmp;
- tmp = vfio_find_dma(iommu, iova - 1, 1);
- if (tmp && tmp->prot == prot &&
- tmp->vaddr + tmp->size == vaddr) {
- tmp->size += size;
- iova = tmp->iova;
- size = tmp->size;
- vaddr = tmp->vaddr;
- dma = tmp;
- }
- }
+ if (ret)
+ vfio_remove_dma(iommu, dma);
- /*
- * Check if we abut a region above - nothing above ~0 + 1.
- * If we abut above and below, remove and free. If only
- * abut above, remove, modify, reinsert.
- */
- if (likely(iova + size)) {
- struct vfio_dma *tmp;
- tmp = vfio_find_dma(iommu, iova + size, 1);
- if (tmp && tmp->prot == prot &&
- tmp->vaddr == vaddr + size) {
- vfio_remove_dma(iommu, tmp);
- if (dma) {
- dma->size += tmp->size;
- kfree(tmp);
- } else {
- size += tmp->size;
- tmp->size = size;
- tmp->iova = iova;
- tmp->vaddr = vaddr;
- vfio_insert_dma(iommu, tmp);
- dma = tmp;
- }
- }
- }
+ mutex_unlock(&iommu->lock);
+ return ret;
+}
+
+static int vfio_bus_type(struct device *dev, void *data)
+{
+ struct bus_type **bus = data;
+
+ if (*bus && *bus != dev->bus)
+ return -EINVAL;
+
+ *bus = dev->bus;
+
+ return 0;
+}
+
+static int vfio_iommu_replay(struct vfio_iommu *iommu,
+ struct vfio_domain *domain)
+{
+ struct vfio_domain *d;
+ struct rb_node *n;
+ int ret;
+
+ /* Arbitrarily pick the first domain in the list for lookups */
+ d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
+ n = rb_first(&iommu->dma_list);
+
+ /* If there's not a domain, there better not be any mappings */
+ if (WARN_ON(n && !d))
+ return -EINVAL;
+
+ for (; n; n = rb_next(n)) {
+ struct vfio_dma *dma;
+ dma_addr_t iova;
+
+ dma = rb_entry(n, struct vfio_dma, node);
+ iova = dma->iova;
+
+ while (iova < dma->iova + dma->size) {
+ phys_addr_t phys = iommu_iova_to_phys(d->domain, iova);
+ size_t size;
- if (!dma) {
- dma = kzalloc(sizeof(*dma), GFP_KERNEL);
- if (!dma) {
- iommu_unmap(iommu->domain, iova, size);
- ret = -ENOMEM;
- goto out_unpin;
+ if (WARN_ON(!phys)) {
+ iova += PAGE_SIZE;
+ continue;
}
- dma->size = size;
- dma->iova = iova;
- dma->vaddr = vaddr;
- dma->prot = prot;
- vfio_insert_dma(iommu, dma);
- }
- }
+ size = PAGE_SIZE;
- WARN_ON(ret);
- mutex_unlock(&iommu->lock);
- return ret;
+ while (iova + size < dma->iova + dma->size &&
+ phys + size == iommu_iova_to_phys(d->domain,
+ iova + size))
+ size += PAGE_SIZE;
-out_unpin:
- vfio_unpin_pages(pfn, npage, prot, true);
+ ret = iommu_map(domain->domain, iova, phys,
+ size, dma->prot | domain->prot);
+ if (ret)
+ return ret;
-out:
- iova = map->iova;
- size = map->size;
- while ((dma = vfio_find_dma(iommu, iova, size))) {
- int r = vfio_remove_dma_overlap(iommu, iova,
- &size, dma);
- if (WARN_ON(r || !size))
- break;
+ iova += size;
+ }
}
- mutex_unlock(&iommu->lock);
- return ret;
+ return 0;
}
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
struct vfio_iommu *iommu = iommu_data;
- struct vfio_group *group, *tmp;
+ struct vfio_group *group, *g;
+ struct vfio_domain *domain, *d;
+ struct bus_type *bus = NULL;
int ret;
- group = kzalloc(sizeof(*group), GFP_KERNEL);
- if (!group)
- return -ENOMEM;
-
mutex_lock(&iommu->lock);
- list_for_each_entry(tmp, &iommu->group_list, next) {
- if (tmp->iommu_group == iommu_group) {
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ list_for_each_entry(g, &d->group_list, next) {
+ if (g->iommu_group != iommu_group)
+ continue;
+
mutex_unlock(&iommu->lock);
- kfree(group);
return -EINVAL;
}
}
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!group || !domain) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ group->iommu_group = iommu_group;
+
+ /* Determine bus_type in order to allocate a domain */
+ ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
+ if (ret)
+ goto out_free;
+
+ domain->domain = iommu_domain_alloc(bus);
+ if (!domain->domain) {
+ ret = -EIO;
+ goto out_free;
+ }
+
+ ret = iommu_attach_group(domain->domain, iommu_group);
+ if (ret)
+ goto out_domain;
+
+ INIT_LIST_HEAD(&domain->group_list);
+ list_add(&group->next, &domain->group_list);
+
+ if (!allow_unsafe_interrupts &&
+ !iommu_domain_has_cap(domain->domain, IOMMU_CAP_INTR_REMAP)) {
+ pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
+ __func__);
+ ret = -EPERM;
+ goto out_detach;
+ }
+
+ if (iommu_domain_has_cap(domain->domain, IOMMU_CAP_CACHE_COHERENCY))
+ domain->prot |= IOMMU_CACHE;
+
/*
- * TODO: Domain have capabilities that might change as we add
- * groups (see iommu->cache, currently never set). Check for
- * them and potentially disallow groups to be attached when it
- * would change capabilities (ugh).
+ * Try to match an existing compatible domain. We don't want to
+ * preclude an IOMMU driver supporting multiple bus_types and being
+ * able to include different bus_types in the same IOMMU domain, so
+ * we test whether the domains use the same iommu_ops rather than
+ * testing if they're on the same bus_type.
*/
- ret = iommu_attach_group(iommu->domain, iommu_group);
- if (ret) {
- mutex_unlock(&iommu->lock);
- kfree(group);
- return ret;
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ if (d->domain->ops == domain->domain->ops &&
+ d->prot == domain->prot) {
+ iommu_detach_group(domain->domain, iommu_group);
+ if (!iommu_attach_group(d->domain, iommu_group)) {
+ list_add(&group->next, &d->group_list);
+ iommu_domain_free(domain->domain);
+ kfree(domain);
+ mutex_unlock(&iommu->lock);
+ return 0;
+ }
+
+ ret = iommu_attach_group(domain->domain, iommu_group);
+ if (ret)
+ goto out_domain;
+ }
}
- group->iommu_group = iommu_group;
- list_add(&group->next, &iommu->group_list);
+ /* replay mappings on new domains */
+ ret = vfio_iommu_replay(iommu, domain);
+ if (ret)
+ goto out_detach;
+
+ list_add(&domain->next, &iommu->domain_list);
mutex_unlock(&iommu->lock);
return 0;
+
+out_detach:
+ iommu_detach_group(domain->domain, iommu_group);
+out_domain:
+ iommu_domain_free(domain->domain);
+out_free:
+ kfree(domain);
+ kfree(group);
+ mutex_unlock(&iommu->lock);
+ return ret;
+}
+
+static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu)
+{
+ struct rb_node *node;
+
+ while ((node = rb_first(&iommu->dma_list)))
+ vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
}
static void vfio_iommu_type1_detach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
struct vfio_iommu *iommu = iommu_data;
+ struct vfio_domain *domain;
struct vfio_group *group;
mutex_lock(&iommu->lock);
- list_for_each_entry(group, &iommu->group_list, next) {
- if (group->iommu_group == iommu_group) {
- iommu_detach_group(iommu->domain, iommu_group);
+ list_for_each_entry(domain, &iommu->domain_list, next) {
+ list_for_each_entry(group, &domain->group_list, next) {
+ if (group->iommu_group != iommu_group)
+ continue;
+
+ iommu_detach_group(domain->domain, iommu_group);
list_del(&group->next);
kfree(group);
- break;
+ /*
+ * Group ownership provides privilege, if the group
+ * list is empty, the domain goes away. If it's the
+ * last domain, then all the mappings go away too.
+ */
+ if (list_empty(&domain->group_list)) {
+ if (list_is_singular(&iommu->domain_list))
+ vfio_iommu_unmap_unpin_all(iommu);
+ iommu_domain_free(domain->domain);
+ list_del(&domain->next);
+ kfree(domain);
+ }
+ goto done;
}
}
+done:
mutex_unlock(&iommu->lock);
}
@@ -769,40 +828,17 @@ static void *vfio_iommu_type1_open(unsigned long arg)
{
struct vfio_iommu *iommu;
- if (arg != VFIO_TYPE1_IOMMU)
+ if (arg != VFIO_TYPE1_IOMMU && arg != VFIO_TYPE1v2_IOMMU)
return ERR_PTR(-EINVAL);
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&iommu->group_list);
+ INIT_LIST_HEAD(&iommu->domain_list);
iommu->dma_list = RB_ROOT;
mutex_init(&iommu->lock);
-
- /*
- * Wish we didn't have to know about bus_type here.
- */
- iommu->domain = iommu_domain_alloc(&pci_bus_type);
- if (!iommu->domain) {
- kfree(iommu);
- return ERR_PTR(-EIO);
- }
-
- /*
- * Wish we could specify required capabilities rather than create
- * a domain, see what comes out and hope it doesn't change along
- * the way. Fortunately we know interrupt remapping is global for
- * our iommus.
- */
- if (!allow_unsafe_interrupts &&
- !iommu_domain_has_cap(iommu->domain, IOMMU_CAP_INTR_REMAP)) {
- pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
- __func__);
- iommu_domain_free(iommu->domain);
- kfree(iommu);
- return ERR_PTR(-EPERM);
- }
+ iommu->v2 = (arg == VFIO_TYPE1v2_IOMMU);
return iommu;
}
@@ -810,26 +846,42 @@ static void *vfio_iommu_type1_open(unsigned long arg)
static void vfio_iommu_type1_release(void *iommu_data)
{
struct vfio_iommu *iommu = iommu_data;
+ struct vfio_domain *domain, *domain_tmp;
struct vfio_group *group, *group_tmp;
- struct rb_node *node;
- list_for_each_entry_safe(group, group_tmp, &iommu->group_list, next) {
- iommu_detach_group(iommu->domain, group->iommu_group);
- list_del(&group->next);
- kfree(group);
+ vfio_iommu_unmap_unpin_all(iommu);
+
+ list_for_each_entry_safe(domain, domain_tmp,
+ &iommu->domain_list, next) {
+ list_for_each_entry_safe(group, group_tmp,
+ &domain->group_list, next) {
+ iommu_detach_group(domain->domain, group->iommu_group);
+ list_del(&group->next);
+ kfree(group);
+ }
+ iommu_domain_free(domain->domain);
+ list_del(&domain->next);
+ kfree(domain);
}
- while ((node = rb_first(&iommu->dma_list))) {
- struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
- size_t size = dma->size;
- vfio_remove_dma_overlap(iommu, dma->iova, &size, dma);
- if (WARN_ON(!size))
+ kfree(iommu);
+}
+
+static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
+{
+ struct vfio_domain *domain;
+ int ret = 1;
+
+ mutex_lock(&iommu->lock);
+ list_for_each_entry(domain, &iommu->domain_list, next) {
+ if (!(domain->prot & IOMMU_CACHE)) {
+ ret = 0;
break;
+ }
}
+ mutex_unlock(&iommu->lock);
- iommu_domain_free(iommu->domain);
- iommu->domain = NULL;
- kfree(iommu);
+ return ret;
}
static long vfio_iommu_type1_ioctl(void *iommu_data,
@@ -841,7 +893,12 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
if (cmd == VFIO_CHECK_EXTENSION) {
switch (arg) {
case VFIO_TYPE1_IOMMU:
+ case VFIO_TYPE1v2_IOMMU:
return 1;
+ case VFIO_DMA_CC_IOMMU:
+ if (!iommu)
+ return 0;
+ return vfio_domains_have_iommu_cache(iommu);
default:
return 0;
}
@@ -858,7 +915,7 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
info.flags = 0;
- info.iova_pgsizes = iommu->domain->ops->pgsize_bitmap;
+ info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
return copy_to_user((void __user *)arg, &info, minsz);
@@ -911,9 +968,6 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
static int __init vfio_iommu_type1_init(void)
{
- if (!iommu_present(&pci_bus_type))
- return -ENODEV;
-
return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1);
}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index dade5b7699bc..6c793bc683d9 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -27,12 +27,6 @@ config VGASTATE
tristate
default n
-config VIDEO_OUTPUT_CONTROL
- tristate "Lowlevel video output switch controls"
- help
- This framework adds support for low-level control of the video
- output switch.
-
config VIDEOMODE_HELPERS
bool
@@ -802,18 +796,9 @@ config FB_HGA
As this card technology is at least 25 years old,
most people will answer N here.
-config FB_SGIVW
- tristate "SGI Visual Workstation framebuffer support"
- depends on FB && X86_VISWS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- SGI Visual Workstation support for framebuffer graphics.
-
config FB_GBE
bool "SGI Graphics Backend frame buffer support"
- depends on (FB = y) && (SGI_IP32 || X86_VISWS)
+ depends on (FB = y) && SGI_IP32
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -982,7 +967,7 @@ config FB_PVR2
config FB_OPENCORES
tristate "OpenCores VGA/LCD core 2.0 framebuffer support"
- depends on FB
+ depends on FB && HAS_DMA
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index ae17ddf49a00..1be26fe10592 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -75,7 +75,6 @@ obj-$(CONFIG_FB_CG14) += cg14.o sbuslib.o
obj-$(CONFIG_FB_P9100) += p9100.o sbuslib.o
obj-$(CONFIG_FB_TCX) += tcx.o sbuslib.o
obj-$(CONFIG_FB_LEO) += leo.o sbuslib.o
-obj-$(CONFIG_FB_SGIVW) += sgivwfb.o
obj-$(CONFIG_FB_ACORN) += acornfb.o
obj-$(CONFIG_FB_ATARI) += atafb.o c2p_iplan2.o atafb_mfb.o \
atafb_iplan2p2.o atafb_iplan2p4.o atafb_iplan2p8.o
@@ -172,8 +171,6 @@ obj-$(CONFIG_FB_SIMPLE) += simplefb.o
# the test framebuffer is last
obj-$(CONFIG_FB_VIRTUAL) += vfb.o
-#video output switch sysfs driver
-obj-$(CONFIG_VIDEO_OUTPUT_CONTROL) += output.o
obj-$(CONFIG_VIDEOMODE_HELPERS) += display_timing.o videomode.o
ifeq ($(CONFIG_OF),y)
obj-$(CONFIG_VIDEOMODE_HELPERS) += of_display_timing.o of_videomode.o
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index cd961622f9c1..e683b6ef9594 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -1190,12 +1190,12 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
if (!sinfo->config)
goto free_info;
- strcpy(info->fix.id, sinfo->pdev->name);
info->flags = ATMEL_LCDFB_FBINFO_DEFAULT;
info->pseudo_palette = sinfo->pseudo_palette;
info->fbops = &atmel_lcdfb_ops;
info->fix = atmel_lcdfb_fix;
+ strcpy(info->fix.id, sinfo->pdev->name);
/* Enable LCDC Clocks */
sinfo->bus_clk = clk_get(dev, "hclk");
@@ -1298,6 +1298,12 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
goto unregister_irqs;
}
+ ret = atmel_lcdfb_set_par(info);
+ if (ret < 0) {
+ dev_err(dev, "set par failed: %d\n", ret);
+ goto unregister_irqs;
+ }
+
dev_set_drvdata(dev, info);
/*
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 28fafbf864a5..c3d0074a32db 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -862,8 +862,8 @@ static int aty_var_to_crtc(const struct fb_info *info,
h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
- if ((xres > 1600) || (yres > 1200)) {
- FAIL("MACH64 chips are designed for max 1600x1200\n"
+ if ((xres > 1920) || (yres > 1200)) {
+ FAIL("MACH64 chips are designed for max 1920x1200\n"
"select another resolution.");
}
h_sync_strt = h_disp + var->right_margin;
@@ -2653,7 +2653,8 @@ static int aty_init(struct fb_info *info)
FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_COPYAREA |
- FBINFO_HWACCEL_YPAN;
+ FBINFO_HWACCEL_YPAN |
+ FBINFO_READS_FAST;
#ifdef CONFIG_PMAC_BACKLIGHT
if (M64_HAS(G3_PB_1_1) && of_machine_is_compatible("PowerBook1,1")) {
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
index e45833ce975b..182bd680141f 100644
--- a/drivers/video/aty/mach64_accel.c
+++ b/drivers/video/aty/mach64_accel.c
@@ -4,6 +4,7 @@
*/
#include <linux/delay.h>
+#include <asm/unaligned.h>
#include <linux/fb.h>
#include <video/mach64.h>
#include "atyfb.h"
@@ -419,7 +420,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
u32 *pbitmap, dwords = (src_bytes + 3) / 4;
for (pbitmap = (u32*)(image->data); dwords; dwords--, pbitmap++) {
wait_for_fifo(1, par);
- aty_st_le32(HOST_DATA0, le32_to_cpup(pbitmap), par);
+ aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par);
}
}
diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
index 95ec042ddbf8..0fe02e22d9a4 100644
--- a/drivers/video/aty/mach64_cursor.c
+++ b/drivers/video/aty/mach64_cursor.c
@@ -5,6 +5,7 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/string.h>
+#include "../fb_draw.h"
#include <asm/io.h>
@@ -157,24 +158,33 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
+ u16 l = 0xaaaa;
b = *src++;
m = *msk++;
switch (cursor->rop) {
case ROP_XOR:
// Upper 4 bits of mask data
- fb_writeb(cursor_bits_lookup[(b ^ m) >> 4], dst++);
+ l = cursor_bits_lookup[(b ^ m) >> 4] |
// Lower 4 bits of mask
- fb_writeb(cursor_bits_lookup[(b ^ m) & 0x0f],
- dst++);
+ (cursor_bits_lookup[(b ^ m) & 0x0f] << 8);
break;
case ROP_COPY:
// Upper 4 bits of mask data
- fb_writeb(cursor_bits_lookup[(b & m) >> 4], dst++);
+ l = cursor_bits_lookup[(b & m) >> 4] |
// Lower 4 bits of mask
- fb_writeb(cursor_bits_lookup[(b & m) & 0x0f],
- dst++);
+ (cursor_bits_lookup[(b & m) & 0x0f] << 8);
break;
}
+ /*
+ * If cursor size is not a multiple of 8 characters
+ * we must pad it with transparent pattern (0xaaaa).
+ */
+ if ((j + 1) * 8 > cursor->image.width) {
+ l = comp(l, 0xaaaa,
+ (1 << ((cursor->image.width & 7) * 2)) - 1);
+ }
+ fb_writeb(l & 0xff, dst++);
+ fb_writeb(l >> 8, dst++);
}
dst += offset;
}
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
index ee0c0a982e4e..ec5350f2c28a 100644
--- a/drivers/video/backlight/aat2870_bl.c
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -149,8 +149,6 @@ static int aat2870_bl_probe(struct platform_device *pdev)
sizeof(struct aat2870_bl_driver_data),
GFP_KERNEL);
if (!aat2870_bl) {
- dev_err(&pdev->dev,
- "Failed to allocate memory for aat2870 backlight\n");
ret = -ENOMEM;
goto out;
}
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 9d656717d0f7..be8d83deca7d 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -224,10 +224,8 @@ static int adp8860_led_probe(struct i2c_client *client)
led = devm_kzalloc(&client->dev, sizeof(*led) * pdata->num_leds,
GFP_KERNEL);
- if (led == NULL) {
- dev_err(&client->dev, "failed to alloc memory\n");
+ if (led == NULL)
return -ENOMEM;
- }
ret = adp8860_write(client, ADP8860_ISCFR, pdata->led_fade_law);
ret = adp8860_write(client, ADP8860_ISCT1,
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 63707205326b..251af4d38d86 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -246,10 +246,8 @@ static int adp8870_led_probe(struct i2c_client *client)
led = devm_kzalloc(&client->dev, pdata->num_leds * sizeof(*led),
GFP_KERNEL);
- if (led == NULL) {
- dev_err(&client->dev, "failed to alloc memory\n");
+ if (led == NULL)
return -ENOMEM;
- }
ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
if (ret)
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 5d05555fe841..bd2172c2d650 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -34,13 +34,15 @@ static const char *const backlight_types[] = {
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
/* This callback gets called when something important happens inside a
* framebuffer driver. We're looking if that important event is blanking,
- * and if it is, we're switching backlight power as well ...
+ * and if it is and necessary, we're switching backlight power as well ...
*/
static int fb_notifier_callback(struct notifier_block *self,
unsigned long event, void *data)
{
struct backlight_device *bd;
struct fb_event *evdata = data;
+ int node = evdata->info->node;
+ int fb_blank = 0;
/* If we aren't interested in this event, skip it immediately ... */
if (event != FB_EVENT_BLANK && event != FB_EVENT_CONBLANK)
@@ -51,12 +53,24 @@ static int fb_notifier_callback(struct notifier_block *self,
if (bd->ops)
if (!bd->ops->check_fb ||
bd->ops->check_fb(bd, evdata->info)) {
- bd->props.fb_blank = *(int *)evdata->data;
- if (bd->props.fb_blank == FB_BLANK_UNBLANK)
- bd->props.state &= ~BL_CORE_FBBLANK;
- else
- bd->props.state |= BL_CORE_FBBLANK;
- backlight_update_status(bd);
+ fb_blank = *(int *)evdata->data;
+ if (fb_blank == FB_BLANK_UNBLANK &&
+ !bd->fb_bl_on[node]) {
+ bd->fb_bl_on[node] = true;
+ if (!bd->use_count++) {
+ bd->props.state &= ~BL_CORE_FBBLANK;
+ bd->props.fb_blank = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+ }
+ } else if (fb_blank != FB_BLANK_UNBLANK &&
+ bd->fb_bl_on[node]) {
+ bd->fb_bl_on[node] = false;
+ if (!(--bd->use_count)) {
+ bd->props.state |= BL_CORE_FBBLANK;
+ bd->props.fb_blank = fb_blank;
+ backlight_update_status(bd);
+ }
+ }
}
mutex_unlock(&bd->ops_lock);
return 0;
@@ -333,7 +347,7 @@ struct backlight_device *backlight_device_register(const char *name,
rc = device_register(&new_bd->dev);
if (rc) {
- kfree(new_bd);
+ put_device(&new_bd->dev);
return ERR_PTR(rc);
}
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index db8db5fa6583..51d18d637e2b 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -543,10 +543,8 @@ static int corgi_lcd_probe(struct spi_device *spi)
}
lcd = devm_kzalloc(&spi->dev, sizeof(struct corgi_lcd), GFP_KERNEL);
- if (!lcd) {
- dev_err(&spi->dev, "failed to allocate memory\n");
+ if (!lcd)
return -ENOMEM;
- }
lcd->spi_dev = spi;
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index 81fb12770c2a..a2eba12e1cb7 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -13,6 +13,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/platform_data/gpio_backlight.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -23,6 +25,7 @@ struct gpio_backlight {
int gpio;
int active;
+ int def_value;
};
static int gpio_backlight_update_status(struct backlight_device *bl)
@@ -60,6 +63,29 @@ static const struct backlight_ops gpio_backlight_ops = {
.check_fb = gpio_backlight_check_fb,
};
+static int gpio_backlight_probe_dt(struct platform_device *pdev,
+ struct gpio_backlight *gbl)
+{
+ struct device_node *np = pdev->dev.of_node;
+ enum of_gpio_flags gpio_flags;
+
+ gbl->gpio = of_get_gpio_flags(np, 0, &gpio_flags);
+
+ if (!gpio_is_valid(gbl->gpio)) {
+ if (gbl->gpio != -EPROBE_DEFER) {
+ dev_err(&pdev->dev,
+ "Error: The gpios parameter is missing or invalid.\n");
+ }
+ return gbl->gpio;
+ }
+
+ gbl->active = (gpio_flags & OF_GPIO_ACTIVE_LOW) ? 0 : 1;
+
+ gbl->def_value = of_property_read_bool(np, "default-on");
+
+ return 0;
+}
+
static int gpio_backlight_probe(struct platform_device *pdev)
{
struct gpio_backlight_platform_data *pdata =
@@ -67,10 +93,12 @@ static int gpio_backlight_probe(struct platform_device *pdev)
struct backlight_properties props;
struct backlight_device *bl;
struct gpio_backlight *gbl;
+ struct device_node *np = pdev->dev.of_node;
int ret;
- if (!pdata) {
- dev_err(&pdev->dev, "failed to find platform data\n");
+ if (!pdata && !np) {
+ dev_err(&pdev->dev,
+ "failed to find platform data or device tree node.\n");
return -ENODEV;
}
@@ -79,14 +107,22 @@ static int gpio_backlight_probe(struct platform_device *pdev)
return -ENOMEM;
gbl->dev = &pdev->dev;
- gbl->fbdev = pdata->fbdev;
- gbl->gpio = pdata->gpio;
- gbl->active = pdata->active_low ? 0 : 1;
+
+ if (np) {
+ ret = gpio_backlight_probe_dt(pdev, gbl);
+ if (ret)
+ return ret;
+ } else {
+ gbl->fbdev = pdata->fbdev;
+ gbl->gpio = pdata->gpio;
+ gbl->active = pdata->active_low ? 0 : 1;
+ gbl->def_value = pdata->def_value;
+ }
ret = devm_gpio_request_one(gbl->dev, gbl->gpio, GPIOF_DIR_OUT |
(gbl->active ? GPIOF_INIT_LOW
: GPIOF_INIT_HIGH),
- pdata->name);
+ pdata ? pdata->name : "backlight");
if (ret < 0) {
dev_err(&pdev->dev, "unable to request GPIO\n");
return ret;
@@ -103,17 +139,25 @@ static int gpio_backlight_probe(struct platform_device *pdev)
return PTR_ERR(bl);
}
- bl->props.brightness = pdata->def_value;
+ bl->props.brightness = gbl->def_value;
backlight_update_status(bl);
platform_set_drvdata(pdev, bl);
return 0;
}
+#ifdef CONFIG_OF
+static struct of_device_id gpio_backlight_of_match[] = {
+ { .compatible = "gpio-backlight" },
+ { /* sentinel */ }
+};
+#endif
+
static struct platform_driver gpio_backlight_driver = {
.driver = {
.name = "gpio-backlight",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(gpio_backlight_of_match),
},
.probe = gpio_backlight_probe,
};
diff --git a/drivers/video/backlight/hx8357.c b/drivers/video/backlight/hx8357.c
index 985e854e244b..23f50b92a930 100644
--- a/drivers/video/backlight/hx8357.c
+++ b/drivers/video/backlight/hx8357.c
@@ -587,10 +587,8 @@ static int hx8357_probe(struct spi_device *spi)
int i, ret;
lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd) {
- dev_err(&spi->dev, "Couldn't allocate lcd internal structure!\n");
+ if (!lcd)
return -ENOMEM;
- }
ret = spi_setup(spi);
if (ret < 0) {
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index 73464e4b4c74..ea67fe199e34 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -482,10 +482,8 @@ static int ili922x_probe(struct spi_device *spi)
u16 reg = 0;
ili = devm_kzalloc(&spi->dev, sizeof(*ili), GFP_KERNEL);
- if (!ili) {
- dev_err(&spi->dev, "cannot alloc priv data\n");
+ if (!ili)
return -ENOMEM;
- }
ili->spi = spi;
spi_set_drvdata(spi, ili);
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index e2b8b40a9bd9..2cf39e6d519d 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -219,10 +219,8 @@ int ili9320_probe_spi(struct spi_device *spi,
/* allocate and initialse our state */
ili = devm_kzalloc(&spi->dev, sizeof(struct ili9320), GFP_KERNEL);
- if (ili == NULL) {
- dev_err(dev, "no memory for device\n");
+ if (ili == NULL)
return -ENOMEM;
- }
ili->access.spi.id = ILI9320_SPI_IDCODE | ILI9320_SPI_ID(1);
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 63e763828e0e..5fa2649c9631 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -181,11 +181,8 @@ static int l4f00242t03_probe(struct spi_device *spi)
priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
GFP_KERNEL);
-
- if (priv == NULL) {
- dev_err(&spi->dev, "No memory for this device.\n");
+ if (priv == NULL)
return -ENOMEM;
- }
spi_set_drvdata(spi, priv);
spi->bits_per_word = 9;
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
index 187d1c283c1d..cff1fbe89a1b 100644
--- a/drivers/video/backlight/lm3533_bl.c
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -296,11 +296,8 @@ static int lm3533_bl_probe(struct platform_device *pdev)
}
bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL);
- if (!bl) {
- dev_err(&pdev->dev,
- "failed to allocate memory for backlight\n");
+ if (!bl)
return -ENOMEM;
- }
bl->lm3533 = lm3533;
bl->id = pdev->id;
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 6fd60adf922e..5f36808d214f 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -349,8 +349,9 @@ static int lm3639_probe(struct i2c_client *client,
props.brightness = pdata->init_brt_led;
props.max_brightness = pdata->max_brt_led;
pchip->bled =
- backlight_device_register("lm3639_bled", pchip->dev, pchip,
- &lm3639_bled_ops, &props);
+ devm_backlight_device_register(pchip->dev, "lm3639_bled",
+ pchip->dev, pchip, &lm3639_bled_ops,
+ &props);
if (IS_ERR(pchip->bled)) {
dev_err(&client->dev, "fail : backlight register\n");
ret = PTR_ERR(pchip->bled);
@@ -360,7 +361,7 @@ static int lm3639_probe(struct i2c_client *client,
ret = device_create_file(&(pchip->bled->dev), &dev_attr_bled_mode);
if (ret < 0) {
dev_err(&client->dev, "failed : add sysfs entries\n");
- goto err_bled_mode;
+ goto err_out;
}
/* flash */
@@ -391,8 +392,6 @@ err_torch:
led_classdev_unregister(&pchip->cdev_flash);
err_flash:
device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
-err_bled_mode:
- backlight_device_unregister(pchip->bled);
err_out:
return ret;
}
@@ -407,10 +406,8 @@ static int lm3639_remove(struct i2c_client *client)
led_classdev_unregister(&pchip->cdev_torch);
if (&pchip->cdev_flash)
led_classdev_unregister(&pchip->cdev_flash);
- if (pchip->bled) {
+ if (pchip->bled)
device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
- backlight_device_unregister(pchip->bled);
- }
return 0;
}
@@ -432,6 +429,6 @@ static struct i2c_driver lm3639_i2c_driver = {
module_i2c_driver(lm3639_i2c_driver);
MODULE_DESCRIPTION("Texas Instruments Backlight+Flash LED driver for LM3639");
-MODULE_AUTHOR("Daniel Jeong <daniel.jeong@ti.com>");
-MODULE_AUTHOR("G.Shark Jeong <gshark.jeong@gmail.com>");
+MODULE_AUTHOR("Daniel Jeong <gshark.jeong@gmail.com>");
+MODULE_AUTHOR("Ldd Mlp <ldd-mlp@list.ti.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index de8832504f68..14590c54aedf 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -168,10 +168,8 @@ static int lms283gf05_probe(struct spi_device *spi)
st = devm_kzalloc(&spi->dev, sizeof(struct lms283gf05_state),
GFP_KERNEL);
- if (st == NULL) {
- dev_err(&spi->dev, "No memory for device state\n");
+ if (st == NULL)
return -ENOMEM;
- }
ld = devm_lcd_device_register(&spi->dev, "lms283gf05", &spi->dev, st,
&lms_ops);
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index d01884d4f1bf..c3d2e209fc8f 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -94,10 +94,8 @@ static int platform_lcd_probe(struct platform_device *pdev)
plcd = devm_kzalloc(&pdev->dev, sizeof(struct platform_lcd),
GFP_KERNEL);
- if (!plcd) {
- dev_err(dev, "no memory for state\n");
+ if (!plcd)
return -ENOMEM;
- }
plcd->us = dev;
plcd->pdata = pdata;
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index cbba37e6836e..595dcf561020 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -200,7 +200,6 @@ tps65217_bl_parse_dt(struct platform_device *pdev)
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
- dev_err(&pdev->dev, "failed to allocate platform data\n");
err = ERR_PTR(-ENOMEM);
goto err;
}
@@ -296,10 +295,8 @@ static int tps65217_bl_probe(struct platform_device *pdev)
tps65217_bl = devm_kzalloc(&pdev->dev, sizeof(*tps65217_bl),
GFP_KERNEL);
- if (tps65217_bl == NULL) {
- dev_err(&pdev->dev, "allocation of struct tps65217_bl failed\n");
+ if (tps65217_bl == NULL)
return -ENOMEM;
- }
tps65217_bl->tps = tps;
tps65217_bl->dev = &pdev->dev;
diff --git a/drivers/video/cfbcopyarea.c b/drivers/video/cfbcopyarea.c
index bb5a96b1645d..bcb57235fcc7 100644
--- a/drivers/video/cfbcopyarea.c
+++ b/drivers/video/cfbcopyarea.c
@@ -43,13 +43,22 @@
*/
static void
-bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
- const unsigned long __iomem *src, int src_idx, int bits,
+bitcpy(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
+ const unsigned long __iomem *src, unsigned src_idx, int bits,
unsigned n, u32 bswapmask)
{
unsigned long first, last;
int const shift = dst_idx-src_idx;
- int left, right;
+
+#if 0
+ /*
+ * If you suspect bug in this function, compare it with this simple
+ * memmove implementation.
+ */
+ fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
+ (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
+ return;
+#endif
first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
@@ -98,9 +107,8 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
unsigned long d0, d1;
int m;
- right = shift & (bits - 1);
- left = -shift & (bits - 1);
- bswapmask &= shift;
+ int const left = shift & (bits - 1);
+ int const right = -shift & (bits - 1);
if (dst_idx+n <= bits) {
// Single destination word
@@ -110,15 +118,15 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
d0 = fb_rev_pixels_in_long(d0, bswapmask);
if (shift > 0) {
// Single source word
- d0 >>= right;
+ d0 <<= left;
} else if (src_idx+n <= bits) {
// Single source word
- d0 <<= left;
+ d0 >>= right;
} else {
// 2 source words
d1 = FB_READL(src + 1);
d1 = fb_rev_pixels_in_long(d1, bswapmask);
- d0 = d0<<left | d1>>right;
+ d0 = d0 >> right | d1 << left;
}
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
@@ -135,60 +143,59 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
if (shift > 0) {
// Single source word
d1 = d0;
- d0 >>= right;
- dst++;
+ d0 <<= left;
n -= bits - dst_idx;
} else {
// 2 source words
d1 = FB_READL(src++);
d1 = fb_rev_pixels_in_long(d1, bswapmask);
- d0 = d0<<left | d1>>right;
- dst++;
+ d0 = d0 >> right | d1 << left;
n -= bits - dst_idx;
}
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
d0 = d1;
+ dst++;
// Main chunk
m = n % bits;
n /= bits;
while ((n >= 4) && !bswapmask) {
d1 = FB_READL(src++);
- FB_WRITEL(d0 << left | d1 >> right, dst++);
+ FB_WRITEL(d0 >> right | d1 << left, dst++);
d0 = d1;
d1 = FB_READL(src++);
- FB_WRITEL(d0 << left | d1 >> right, dst++);
+ FB_WRITEL(d0 >> right | d1 << left, dst++);
d0 = d1;
d1 = FB_READL(src++);
- FB_WRITEL(d0 << left | d1 >> right, dst++);
+ FB_WRITEL(d0 >> right | d1 << left, dst++);
d0 = d1;
d1 = FB_READL(src++);
- FB_WRITEL(d0 << left | d1 >> right, dst++);
+ FB_WRITEL(d0 >> right | d1 << left, dst++);
d0 = d1;
n -= 4;
}
while (n--) {
d1 = FB_READL(src++);
d1 = fb_rev_pixels_in_long(d1, bswapmask);
- d0 = d0 << left | d1 >> right;
+ d0 = d0 >> right | d1 << left;
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(d0, dst++);
d0 = d1;
}
// Trailing bits
- if (last) {
- if (m <= right) {
+ if (m) {
+ if (m <= bits - right) {
// Single source word
- d0 <<= left;
+ d0 >>= right;
} else {
// 2 source words
d1 = FB_READL(src);
d1 = fb_rev_pixels_in_long(d1,
bswapmask);
- d0 = d0<<left | d1>>right;
+ d0 = d0 >> right | d1 << left;
}
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
@@ -202,43 +209,46 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
*/
static void
-bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
- const unsigned long __iomem *src, int src_idx, int bits,
+bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
+ const unsigned long __iomem *src, unsigned src_idx, int bits,
unsigned n, u32 bswapmask)
{
unsigned long first, last;
int shift;
- dst += (n-1)/bits;
- src += (n-1)/bits;
- if ((n-1) % bits) {
- dst_idx += (n-1) % bits;
- dst += dst_idx >> (ffs(bits) - 1);
- dst_idx &= bits - 1;
- src_idx += (n-1) % bits;
- src += src_idx >> (ffs(bits) - 1);
- src_idx &= bits - 1;
- }
+#if 0
+ /*
+ * If you suspect bug in this function, compare it with this simple
+ * memmove implementation.
+ */
+ fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
+ (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
+ return;
+#endif
+
+ dst += (dst_idx + n - 1) / bits;
+ src += (src_idx + n - 1) / bits;
+ dst_idx = (dst_idx + n - 1) % bits;
+ src_idx = (src_idx + n - 1) % bits;
shift = dst_idx-src_idx;
- first = fb_shifted_pixels_mask_long(p, bits - 1 - dst_idx, bswapmask);
- last = ~fb_shifted_pixels_mask_long(p, bits - 1 - ((dst_idx-n) % bits),
- bswapmask);
+ first = ~fb_shifted_pixels_mask_long(p, (dst_idx + 1) % bits, bswapmask);
+ last = fb_shifted_pixels_mask_long(p, (bits + dst_idx + 1 - n) % bits, bswapmask);
if (!shift) {
// Same alignment for source and dest
if ((unsigned long)dst_idx+1 >= n) {
// Single word
- if (last)
- first &= last;
- FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
+ if (first)
+ last &= first;
+ FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
} else {
// Multiple destination words
// Leading bits
- if (first != ~0UL) {
+ if (first) {
FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
dst--;
src--;
@@ -262,7 +272,7 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
FB_WRITEL(FB_READL(src--), dst--);
// Trailing bits
- if (last)
+ if (last != -1UL)
FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
}
} else {
@@ -270,29 +280,28 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
unsigned long d0, d1;
int m;
- int const left = -shift & (bits-1);
- int const right = shift & (bits-1);
- bswapmask &= shift;
+ int const left = shift & (bits-1);
+ int const right = -shift & (bits-1);
if ((unsigned long)dst_idx+1 >= n) {
// Single destination word
- if (last)
- first &= last;
+ if (first)
+ last &= first;
d0 = FB_READL(src);
if (shift < 0) {
// Single source word
- d0 <<= left;
+ d0 >>= right;
} else if (1+(unsigned long)src_idx >= n) {
// Single source word
- d0 >>= right;
+ d0 <<= left;
} else {
// 2 source words
d1 = FB_READL(src - 1);
d1 = fb_rev_pixels_in_long(d1, bswapmask);
- d0 = d0>>right | d1<<left;
+ d0 = d0 << left | d1 >> right;
}
d0 = fb_rev_pixels_in_long(d0, bswapmask);
- FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
+ FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
} else {
// Multiple destination words
/** We must always remember the last value read, because in case
@@ -307,12 +316,12 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
if (shift < 0) {
// Single source word
d1 = d0;
- d0 <<= left;
+ d0 >>= right;
} else {
// 2 source words
d1 = FB_READL(src--);
d1 = fb_rev_pixels_in_long(d1, bswapmask);
- d0 = d0>>right | d1<<left;
+ d0 = d0 << left | d1 >> right;
}
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
@@ -325,39 +334,39 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
n /= bits;
while ((n >= 4) && !bswapmask) {
d1 = FB_READL(src--);
- FB_WRITEL(d0 >> right | d1 << left, dst--);
+ FB_WRITEL(d0 << left | d1 >> right, dst--);
d0 = d1;
d1 = FB_READL(src--);
- FB_WRITEL(d0 >> right | d1 << left, dst--);
+ FB_WRITEL(d0 << left | d1 >> right, dst--);
d0 = d1;
d1 = FB_READL(src--);
- FB_WRITEL(d0 >> right | d1 << left, dst--);
+ FB_WRITEL(d0 << left | d1 >> right, dst--);
d0 = d1;
d1 = FB_READL(src--);
- FB_WRITEL(d0 >> right | d1 << left, dst--);
+ FB_WRITEL(d0 << left | d1 >> right, dst--);
d0 = d1;
n -= 4;
}
while (n--) {
d1 = FB_READL(src--);
d1 = fb_rev_pixels_in_long(d1, bswapmask);
- d0 = d0 >> right | d1 << left;
+ d0 = d0 << left | d1 >> right;
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(d0, dst--);
d0 = d1;
}
// Trailing bits
- if (last) {
- if (m <= left) {
+ if (m) {
+ if (m <= bits - left) {
// Single source word
- d0 >>= right;
+ d0 <<= left;
} else {
// 2 source words
d1 = FB_READL(src);
d1 = fb_rev_pixels_in_long(d1,
bswapmask);
- d0 = d0>>right | d1<<left;
+ d0 = d0 << left | d1 >> right;
}
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
@@ -371,9 +380,9 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
u32 height = area->height, width = area->width;
unsigned long const bits_per_line = p->fix.line_length*8u;
- unsigned long __iomem *dst = NULL, *src = NULL;
+ unsigned long __iomem *base = NULL;
int bits = BITS_PER_LONG, bytes = bits >> 3;
- int dst_idx = 0, src_idx = 0, rev_copy = 0;
+ unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
u32 bswapmask = fb_compute_bswapmask(p);
if (p->state != FBINFO_STATE_RUNNING)
@@ -389,7 +398,7 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
// split the base of the framebuffer into a long-aligned address and the
// index of the first bit
- dst = src = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
+ base = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1));
// add offset of source and target area
dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel;
@@ -402,20 +411,14 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
while (height--) {
dst_idx -= bits_per_line;
src_idx -= bits_per_line;
- dst += dst_idx >> (ffs(bits) - 1);
- dst_idx &= (bytes - 1);
- src += src_idx >> (ffs(bits) - 1);
- src_idx &= (bytes - 1);
- bitcpy_rev(p, dst, dst_idx, src, src_idx, bits,
+ bitcpy_rev(p, base + (dst_idx / bits), dst_idx % bits,
+ base + (src_idx / bits), src_idx % bits, bits,
width*p->var.bits_per_pixel, bswapmask);
}
} else {
while (height--) {
- dst += dst_idx >> (ffs(bits) - 1);
- dst_idx &= (bytes - 1);
- src += src_idx >> (ffs(bits) - 1);
- src_idx &= (bytes - 1);
- bitcpy(p, dst, dst_idx, src, src_idx, bits,
+ bitcpy(p, base + (dst_idx / bits), dst_idx % bits,
+ base + (src_idx / bits), src_idx % bits, bits,
width*p->var.bits_per_pixel, bswapmask);
dst_idx += bits_per_line;
src_idx += bits_per_line;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 4e39291ac8b4..f447734b09b4 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -759,7 +759,7 @@ static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo,
newinfo in an undefined state. Thus, a call to
fb_set_par() may be needed for the newinfo.
*/
- if (newinfo->fbops->fb_set_par) {
+ if (newinfo && newinfo->fbops->fb_set_par) {
ret = newinfo->fbops->fb_set_par(newinfo);
if (ret)
@@ -3028,8 +3028,31 @@ static int fbcon_fb_unbind(int idx)
if (con2fb_map[i] == idx)
set_con2fb_map(i, new_idx, 0);
}
- } else
+ } else {
+ struct fb_info *info = registered_fb[idx];
+
+ /* This is sort of like set_con2fb_map, except it maps
+ * the consoles to no device and then releases the
+ * oldinfo to free memory and cancel the cursor blink
+ * timer. I can imagine this just becoming part of
+ * set_con2fb_map where new_idx is -1
+ */
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ if (con2fb_map[i] == idx) {
+ con2fb_map[i] = -1;
+ if (!search_fb_in_map(idx)) {
+ ret = con2fb_release_oldinfo(vc_cons[i].d,
+ info, NULL, i,
+ idx, 0);
+ if (ret) {
+ con2fb_map[i] = idx;
+ return ret;
+ }
+ }
+ }
+ }
ret = fbcon_unbind();
+ }
return ret;
}
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index a1d74dd11988..0c0ba920ea48 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -1546,7 +1546,7 @@ err_pm_runtime_disable:
return ret;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static struct lcdc_context {
u32 clk_enable;
u32 ctrl;
@@ -1610,9 +1610,9 @@ static void lcd_context_restore(void)
return;
}
-static int fb_suspend(struct platform_device *dev, pm_message_t state)
+static int fb_suspend(struct device *dev)
{
- struct fb_info *info = platform_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct da8xx_fb_par *par = info->par;
console_lock();
@@ -1622,18 +1622,18 @@ static int fb_suspend(struct platform_device *dev, pm_message_t state)
fb_set_suspend(info, 1);
lcd_disable_raster(DA8XX_FRAME_WAIT);
lcd_context_save();
- pm_runtime_put_sync(&dev->dev);
+ pm_runtime_put_sync(dev);
console_unlock();
return 0;
}
-static int fb_resume(struct platform_device *dev)
+static int fb_resume(struct device *dev)
{
- struct fb_info *info = platform_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct da8xx_fb_par *par = info->par;
console_lock();
- pm_runtime_get_sync(&dev->dev);
+ pm_runtime_get_sync(dev);
lcd_context_restore();
if (par->blank == FB_BLANK_UNBLANK) {
lcd_enable_raster();
@@ -1647,19 +1647,17 @@ static int fb_resume(struct platform_device *dev)
return 0;
}
-#else
-#define fb_suspend NULL
-#define fb_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(fb_pm_ops, fb_suspend, fb_resume);
+
static struct platform_driver da8xx_fb_driver = {
.probe = fb_probe,
.remove = fb_remove,
- .suspend = fb_suspend,
- .resume = fb_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .pm = &fb_pm_ops,
},
};
module_platform_driver(da8xx_fb_driver);
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index cd7c0df9f24b..ae9618ff6735 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -73,7 +73,6 @@ static void efifb_destroy(struct fb_info *info)
release_mem_region(info->apertures->ranges[0].base,
info->apertures->ranges[0].size);
fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
}
static struct fb_ops efifb_ops = {
@@ -244,6 +243,7 @@ static int efifb_probe(struct platform_device *dev)
err = -ENOMEM;
goto err_release_mem;
}
+ platform_set_drvdata(dev, info);
info->pseudo_palette = info->par;
info->par = NULL;
@@ -337,12 +337,23 @@ err_release_mem:
return err;
}
+static int efifb_remove(struct platform_device *pdev)
+{
+ struct fb_info *info = platform_get_drvdata(pdev);
+
+ unregister_framebuffer(info);
+ framebuffer_release(info);
+
+ return 0;
+}
+
static struct platform_driver efifb_driver = {
.driver = {
.name = "efi-framebuffer",
.owner = THIS_MODULE,
},
.probe = efifb_probe,
+ .remove = efifb_remove,
};
module_platform_driver(efifb_driver);
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig
index 75c8a8e7efc0..fcf2d48ac6d1 100644
--- a/drivers/video/exynos/Kconfig
+++ b/drivers/video/exynos/Kconfig
@@ -29,11 +29,4 @@ config EXYNOS_LCD_S6E8AX0
If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its
LCD control driver.
-config EXYNOS_DP
- bool "EXYNOS DP driver support"
- depends on OF && ARCH_EXYNOS
- default n
- help
- This enables support for DP device.
-
endif # EXYNOS_VIDEO
diff --git a/drivers/video/exynos/Makefile b/drivers/video/exynos/Makefile
index ec7772e452a9..b5b1bd228abb 100644
--- a/drivers/video/exynos/Makefile
+++ b/drivers/video/exynos/Makefile
@@ -5,4 +5,3 @@
obj-$(CONFIG_EXYNOS_MIPI_DSI) += exynos_mipi_dsi.o exynos_mipi_dsi_common.o \
exynos_mipi_dsi_lowlevel.o
obj-$(CONFIG_EXYNOS_LCD_S6E8AX0) += s6e8ax0.o
-obj-$(CONFIG_EXYNOS_DP) += exynos_dp_core.o exynos_dp_reg.o
diff --git a/drivers/video/exynos/s6e8ax0.c b/drivers/video/exynos/s6e8ax0.c
index ca2602413aa4..29e70ed3f154 100644
--- a/drivers/video/exynos/s6e8ax0.c
+++ b/drivers/video/exynos/s6e8ax0.c
@@ -794,19 +794,18 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
return ret;
}
- lcd->ld = lcd_device_register("s6e8ax0", lcd->dev, lcd,
+ lcd->ld = devm_lcd_device_register(lcd->dev, "s6e8ax0", lcd->dev, lcd,
&s6e8ax0_lcd_ops);
if (IS_ERR(lcd->ld)) {
dev_err(lcd->dev, "failed to register lcd ops.\n");
return PTR_ERR(lcd->ld);
}
- lcd->bd = backlight_device_register("s6e8ax0-bl", lcd->dev, lcd,
- &s6e8ax0_backlight_ops, NULL);
+ lcd->bd = devm_backlight_device_register(lcd->dev, "s6e8ax0-bl",
+ lcd->dev, lcd, &s6e8ax0_backlight_ops, NULL);
if (IS_ERR(lcd->bd)) {
dev_err(lcd->dev, "failed to register backlight ops.\n");
- ret = PTR_ERR(lcd->bd);
- goto err_backlight_register;
+ return PTR_ERR(lcd->bd);
}
lcd->bd->props.max_brightness = MAX_BRIGHTNESS;
@@ -834,10 +833,6 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
dev_dbg(lcd->dev, "probed s6e8ax0 panel driver.\n");
return 0;
-
-err_backlight_register:
- lcd_device_unregister(lcd->ld);
- return ret;
}
#ifdef CONFIG_PM
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 7309ac704e26..b6d5008f361f 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1596,8 +1596,7 @@ static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
(primary && gen_aper && gen_aper->count &&
gen_aper->ranges[0].base == VGA_FB_PHYS)) {
- printk(KERN_INFO "fb: conflicting fb hw usage "
- "%s vs %s - removing generic driver\n",
+ printk(KERN_INFO "fb: switching to %s from %s\n",
name, registered_fb[i]->fix.id);
ret = do_unregister_framebuffer(registered_fb[i]);
if (ret)
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index 4c7cb368a9dc..3ec65a878ac8 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -45,10 +45,6 @@ struct gbefb_par {
#define GBE_BASE 0x16000000 /* SGI O2 */
#endif
-#ifdef CONFIG_X86_VISWS
-#define GBE_BASE 0xd0000000 /* SGI Visual Workstation */
-#endif
-
/* macro for fastest write-though access to the framebuffer */
#ifdef CONFIG_MIPS
#ifdef CONFIG_CPU_R10000
diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
index 130708f96430..e23392ec5af3 100644
--- a/drivers/video/hyperv_fb.c
+++ b/drivers/video/hyperv_fb.c
@@ -42,6 +42,7 @@
#include <linux/completion.h>
#include <linux/fb.h>
#include <linux/pci.h>
+#include <linux/efi.h>
#include <linux/hyperv.h>
@@ -212,6 +213,7 @@ struct synthvid_msg {
struct hvfb_par {
struct fb_info *info;
+ struct resource mem;
bool fb_ready; /* fb device is ready */
struct completion wait;
u32 synthvid_version;
@@ -460,13 +462,13 @@ static int synthvid_connect_vsp(struct hv_device *hdev)
goto error;
}
- if (par->synthvid_version == SYNTHVID_VERSION_WIN7) {
+ if (par->synthvid_version == SYNTHVID_VERSION_WIN7)
screen_depth = SYNTHVID_DEPTH_WIN7;
- screen_fb_size = SYNTHVID_FB_SIZE_WIN7;
- } else {
+ else
screen_depth = SYNTHVID_DEPTH_WIN8;
- screen_fb_size = SYNTHVID_FB_SIZE_WIN8;
- }
+
+ screen_fb_size = hdev->channel->offermsg.offer.
+ mmio_megabytes * 1024 * 1024;
return 0;
@@ -627,26 +629,46 @@ static void hvfb_get_option(struct fb_info *info)
/* Get framebuffer memory from Hyper-V video pci space */
static int hvfb_getmem(struct fb_info *info)
{
- struct pci_dev *pdev;
- ulong fb_phys;
+ struct hvfb_par *par = info->par;
+ struct pci_dev *pdev = NULL;
void __iomem *fb_virt;
+ int gen2vm = efi_enabled(EFI_BOOT);
+ int ret;
- pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ par->mem.name = KBUILD_MODNAME;
+ par->mem.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (gen2vm) {
+ ret = allocate_resource(&hyperv_mmio, &par->mem,
+ screen_fb_size,
+ 0, -1,
+ screen_fb_size,
+ NULL, NULL);
+ if (ret != 0) {
+ pr_err("Unable to allocate framebuffer memory\n");
+ return -ENODEV;
+ }
+ } else {
+ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
- if (!pdev) {
- pr_err("Unable to find PCI Hyper-V video\n");
- return -ENODEV;
- }
+ if (!pdev) {
+ pr_err("Unable to find PCI Hyper-V video\n");
+ return -ENODEV;
+ }
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, 0) < screen_fb_size)
- goto err1;
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
+ pci_resource_len(pdev, 0) < screen_fb_size)
+ goto err1;
- fb_phys = pci_resource_end(pdev, 0) - screen_fb_size + 1;
- if (!request_mem_region(fb_phys, screen_fb_size, KBUILD_MODNAME))
- goto err1;
+ par->mem.end = pci_resource_end(pdev, 0);
+ par->mem.start = par->mem.end - screen_fb_size + 1;
+ ret = request_resource(&pdev->resource[0], &par->mem);
+ if (ret != 0) {
+ pr_err("Unable to request framebuffer memory\n");
+ goto err1;
+ }
+ }
- fb_virt = ioremap(fb_phys, screen_fb_size);
+ fb_virt = ioremap(par->mem.start, screen_fb_size);
if (!fb_virt)
goto err2;
@@ -654,30 +676,44 @@ static int hvfb_getmem(struct fb_info *info)
if (!info->apertures)
goto err3;
- info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
- info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
- info->fix.smem_start = fb_phys;
+ if (gen2vm) {
+ info->apertures->ranges[0].base = screen_info.lfb_base;
+ info->apertures->ranges[0].size = screen_info.lfb_size;
+ remove_conflicting_framebuffers(info->apertures,
+ KBUILD_MODNAME, false);
+ } else {
+ info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
+ }
+
+ info->fix.smem_start = par->mem.start;
info->fix.smem_len = screen_fb_size;
info->screen_base = fb_virt;
info->screen_size = screen_fb_size;
- pci_dev_put(pdev);
+ if (!gen2vm)
+ pci_dev_put(pdev);
+
return 0;
err3:
iounmap(fb_virt);
err2:
- release_mem_region(fb_phys, screen_fb_size);
+ release_resource(&par->mem);
err1:
- pci_dev_put(pdev);
+ if (!gen2vm)
+ pci_dev_put(pdev);
+
return -ENOMEM;
}
/* Release the framebuffer */
static void hvfb_putmem(struct fb_info *info)
{
+ struct hvfb_par *par = info->par;
+
iounmap(info->screen_base);
- release_mem_region(info->fix.smem_start, screen_fb_size);
+ release_resource(&par->mem);
}
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 44ee678481d5..f6e621684953 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -30,10 +30,13 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/lcd.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
@@ -45,12 +48,6 @@
*/
#define DEBUG_VAR 1
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
- (defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) && \
- defined(CONFIG_FB_IMX_MODULE))
-#define PWMR_BACKLIGHT_AVAILABLE
-#endif
-
#define DRIVER_NAME "imx-fb"
#define LCDC_SSA 0x00
@@ -153,11 +150,8 @@ struct imxfb_info {
* the framebuffer memory region to.
*/
dma_addr_t map_dma;
- u_char *map_cpu;
u_int map_size;
- u_char *screen_cpu;
- dma_addr_t screen_dma;
u_int palette_size;
dma_addr_t dbar1;
@@ -167,18 +161,13 @@ struct imxfb_info {
u_int pwmr;
u_int lscr1;
u_int dmacr;
- u_int cmap_inverse:1,
- cmap_static:1,
- unused:30;
+ bool cmap_inverse;
+ bool cmap_static;
struct imx_fb_videomode *mode;
int num_modes;
-#ifdef PWMR_BACKLIGHT_AVAILABLE
- struct backlight_device *bl;
-#endif
- void (*lcd_power)(int);
- void (*backlight_power)(int);
+ struct regulator *lcd_pwr;
};
static struct platform_device_id imxfb_devtype[] = {
@@ -484,83 +473,6 @@ static int imxfb_set_par(struct fb_info *info)
return 0;
}
-#ifdef PWMR_BACKLIGHT_AVAILABLE
-static int imxfb_bl_get_brightness(struct backlight_device *bl)
-{
- struct imxfb_info *fbi = bl_get_data(bl);
-
- return readl(fbi->regs + LCDC_PWMR) & 0xFF;
-}
-
-static int imxfb_bl_update_status(struct backlight_device *bl)
-{
- struct imxfb_info *fbi = bl_get_data(bl);
- int brightness = bl->props.brightness;
-
- if (!fbi->pwmr)
- return 0;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- fbi->pwmr = (fbi->pwmr & ~0xFF) | brightness;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
- clk_prepare_enable(fbi->clk_ipg);
- clk_prepare_enable(fbi->clk_ahb);
- clk_prepare_enable(fbi->clk_per);
- }
- writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
- if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
- clk_disable_unprepare(fbi->clk_per);
- clk_disable_unprepare(fbi->clk_ahb);
- clk_disable_unprepare(fbi->clk_ipg);
- }
-
- return 0;
-}
-
-static const struct backlight_ops imxfb_lcdc_bl_ops = {
- .update_status = imxfb_bl_update_status,
- .get_brightness = imxfb_bl_get_brightness,
-};
-
-static void imxfb_init_backlight(struct imxfb_info *fbi)
-{
- struct backlight_properties props;
- struct backlight_device *bl;
-
- if (fbi->bl)
- return;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 0xff;
- props.type = BACKLIGHT_RAW;
- writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
-
- bl = backlight_device_register("imxfb-bl", &fbi->pdev->dev, fbi,
- &imxfb_lcdc_bl_ops, &props);
- if (IS_ERR(bl)) {
- dev_err(&fbi->pdev->dev, "error %ld on backlight register\n",
- PTR_ERR(bl));
- return;
- }
-
- fbi->bl = bl;
- bl->props.power = FB_BLANK_UNBLANK;
- bl->props.fb_blank = FB_BLANK_UNBLANK;
- bl->props.brightness = imxfb_bl_get_brightness(bl);
-}
-
-static void imxfb_exit_backlight(struct imxfb_info *fbi)
-{
- if (fbi->bl)
- backlight_device_unregister(fbi->bl);
-}
-#endif
-
static void imxfb_enable_controller(struct imxfb_info *fbi)
{
@@ -569,7 +481,7 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
pr_debug("Enabling LCD controller\n");
- writel(fbi->screen_dma, fbi->regs + LCDC_SSA);
+ writel(fbi->map_dma, fbi->regs + LCDC_SSA);
/* panning offset 0 (0 pixel offset) */
writel(0x00000000, fbi->regs + LCDC_POS);
@@ -588,11 +500,6 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
clk_prepare_enable(fbi->clk_ahb);
clk_prepare_enable(fbi->clk_per);
fbi->enabled = true;
-
- if (fbi->backlight_power)
- fbi->backlight_power(1);
- if (fbi->lcd_power)
- fbi->lcd_power(1);
}
static void imxfb_disable_controller(struct imxfb_info *fbi)
@@ -602,11 +509,6 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)
pr_debug("Disabling LCD controller\n");
- if (fbi->backlight_power)
- fbi->backlight_power(0);
- if (fbi->lcd_power)
- fbi->lcd_power(0);
-
clk_disable_unprepare(fbi->clk_per);
clk_disable_unprepare(fbi->clk_ipg);
clk_disable_unprepare(fbi->clk_ahb);
@@ -709,10 +611,8 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
fbi->regs + LCDC_SIZE);
writel(fbi->pcr, fbi->regs + LCDC_PCR);
-#ifndef PWMR_BACKLIGHT_AVAILABLE
if (fbi->pwmr)
writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
-#endif
writel(fbi->lscr1, fbi->regs + LCDC_LSCR1);
/* dmacr = 0 is no valid value, as we need DMA control marks. */
@@ -722,37 +622,6 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
return 0;
}
-#ifdef CONFIG_PM
-/*
- * Power management hooks. Note that we won't be called from IRQ context,
- * unlike the blank functions above, so we may sleep.
- */
-static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct fb_info *info = platform_get_drvdata(dev);
- struct imxfb_info *fbi = info->par;
-
- pr_debug("%s\n", __func__);
-
- imxfb_disable_controller(fbi);
- return 0;
-}
-
-static int imxfb_resume(struct platform_device *dev)
-{
- struct fb_info *info = platform_get_drvdata(dev);
- struct imxfb_info *fbi = info->par;
-
- pr_debug("%s\n", __func__);
-
- imxfb_enable_controller(fbi);
- return 0;
-}
-#else
-#define imxfb_suspend NULL
-#define imxfb_resume NULL
-#endif
-
static int imxfb_init_fbinfo(struct platform_device *pdev)
{
struct imx_fb_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -790,14 +659,9 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
info->flags = FBINFO_FLAG_DEFAULT |
FBINFO_READS_FAST;
if (pdata) {
- info->var.grayscale = pdata->cmap_greyscale;
- fbi->cmap_inverse = pdata->cmap_inverse;
- fbi->cmap_static = pdata->cmap_static;
fbi->lscr1 = pdata->lscr1;
fbi->dmacr = pdata->dmacr;
fbi->pwmr = pdata->pwmr;
- fbi->lcd_power = pdata->lcd_power;
- fbi->backlight_power = pdata->backlight_power;
} else {
np = pdev->dev.of_node;
info->var.grayscale = of_property_read_bool(np,
@@ -806,14 +670,12 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
fbi->cmap_static = of_property_read_bool(np, "cmap-static");
fbi->lscr1 = IMXFB_LSCR1_DEFAULT;
+
+ of_property_read_u32(np, "fsl,lpccr", &fbi->pwmr);
+
of_property_read_u32(np, "fsl,lscr1", &fbi->lscr1);
of_property_read_u32(np, "fsl,dmacr", &fbi->dmacr);
-
- /* These two function pointers could be used by some specific
- * platforms. */
- fbi->lcd_power = NULL;
- fbi->backlight_power = NULL;
}
return 0;
@@ -856,9 +718,98 @@ static int imxfb_of_read_mode(struct device *dev, struct device_node *np,
return 0;
}
+static int imxfb_lcd_check_fb(struct lcd_device *lcddev, struct fb_info *fi)
+{
+ struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev);
+
+ if (!fi || fi->par == fbi)
+ return 1;
+
+ return 0;
+}
+
+static int imxfb_lcd_get_contrast(struct lcd_device *lcddev)
+{
+ struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev);
+
+ return fbi->pwmr & 0xff;
+}
+
+static int imxfb_lcd_set_contrast(struct lcd_device *lcddev, int contrast)
+{
+ struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev);
+
+ if (fbi->pwmr && fbi->enabled) {
+ if (contrast > 255)
+ contrast = 255;
+ else if (contrast < 0)
+ contrast = 0;
+
+ fbi->pwmr &= ~0xff;
+ fbi->pwmr |= contrast;
+
+ writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
+ }
+
+ return 0;
+}
+
+static int imxfb_lcd_get_power(struct lcd_device *lcddev)
+{
+ struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev);
+
+ if (!IS_ERR(fbi->lcd_pwr))
+ return regulator_is_enabled(fbi->lcd_pwr);
+
+ return 1;
+}
+
+static int imxfb_lcd_set_power(struct lcd_device *lcddev, int power)
+{
+ struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev);
+
+ if (!IS_ERR(fbi->lcd_pwr)) {
+ if (power)
+ return regulator_enable(fbi->lcd_pwr);
+ else
+ return regulator_disable(fbi->lcd_pwr);
+ }
+
+ return 0;
+}
+
+static struct lcd_ops imxfb_lcd_ops = {
+ .check_fb = imxfb_lcd_check_fb,
+ .get_contrast = imxfb_lcd_get_contrast,
+ .set_contrast = imxfb_lcd_set_contrast,
+ .get_power = imxfb_lcd_get_power,
+ .set_power = imxfb_lcd_set_power,
+};
+
+static int imxfb_setup(void)
+{
+ char *opt, *options = NULL;
+
+ if (fb_get_options("imxfb", &options))
+ return -ENODEV;
+
+ if (!options || !*options)
+ return 0;
+
+ while ((opt = strsep(&options, ",")) != NULL) {
+ if (!*opt)
+ continue;
+ else
+ fb_mode = opt;
+ }
+
+ return 0;
+}
+
static int imxfb_probe(struct platform_device *pdev)
{
struct imxfb_info *fbi;
+ struct lcd_device *lcd;
struct fb_info *info;
struct imx_fb_platform_data *pdata;
struct resource *res;
@@ -869,6 +820,10 @@ static int imxfb_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "i.MX Framebuffer driver\n");
+ ret = imxfb_setup();
+ if (ret < 0)
+ return ret;
+
of_id = of_match_device(imxfb_of_dev_id, &pdev->dev);
if (of_id)
pdev->id_entry = of_id->data;
@@ -966,32 +921,18 @@ static int imxfb_probe(struct platform_device *pdev)
goto failed_ioremap;
}
- /* Seems not being used by anyone, so no support for oftree */
- if (!pdata || !pdata->fixed_screen_cpu) {
- fbi->map_size = PAGE_ALIGN(info->fix.smem_len);
- fbi->map_cpu = dma_alloc_writecombine(&pdev->dev,
- fbi->map_size, &fbi->map_dma, GFP_KERNEL);
+ fbi->map_size = PAGE_ALIGN(info->fix.smem_len);
+ info->screen_base = dma_alloc_writecombine(&pdev->dev, fbi->map_size,
+ &fbi->map_dma, GFP_KERNEL);
- if (!fbi->map_cpu) {
- dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
- ret = -ENOMEM;
- goto failed_map;
- }
-
- info->screen_base = fbi->map_cpu;
- fbi->screen_cpu = fbi->map_cpu;
- fbi->screen_dma = fbi->map_dma;
- info->fix.smem_start = fbi->screen_dma;
- } else {
- /* Fixed framebuffer mapping enables location of the screen in eSRAM */
- fbi->map_cpu = pdata->fixed_screen_cpu;
- fbi->map_dma = pdata->fixed_screen_dma;
- info->screen_base = fbi->map_cpu;
- fbi->screen_cpu = fbi->map_cpu;
- fbi->screen_dma = fbi->map_dma;
- info->fix.smem_start = fbi->screen_dma;
+ if (!info->screen_base) {
+ dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
+ ret = -ENOMEM;
+ goto failed_map;
}
+ info->fix.smem_start = fbi->map_dma;
+
if (pdata && pdata->init) {
ret = pdata->init(fbi->pdev);
if (ret)
@@ -1020,23 +961,37 @@ static int imxfb_probe(struct platform_device *pdev)
goto failed_register;
}
+ fbi->lcd_pwr = devm_regulator_get(&pdev->dev, "lcd");
+ if (IS_ERR(fbi->lcd_pwr) && (PTR_ERR(fbi->lcd_pwr) == -EPROBE_DEFER)) {
+ ret = -EPROBE_DEFER;
+ goto failed_lcd;
+ }
+
+ lcd = devm_lcd_device_register(&pdev->dev, "imxfb-lcd", &pdev->dev, fbi,
+ &imxfb_lcd_ops);
+ if (IS_ERR(lcd)) {
+ ret = PTR_ERR(lcd);
+ goto failed_lcd;
+ }
+
+ lcd->props.max_contrast = 0xff;
+
imxfb_enable_controller(fbi);
fbi->pdev = pdev;
-#ifdef PWMR_BACKLIGHT_AVAILABLE
- imxfb_init_backlight(fbi);
-#endif
return 0;
+failed_lcd:
+ unregister_framebuffer(info);
+
failed_register:
fb_dealloc_cmap(&info->cmap);
failed_cmap:
if (pdata && pdata->exit)
pdata->exit(fbi->pdev);
failed_platform_init:
- if (pdata && !pdata->fixed_screen_cpu)
- dma_free_writecombine(&pdev->dev,fbi->map_size,fbi->map_cpu,
- fbi->map_dma);
+ dma_free_writecombine(&pdev->dev, fbi->map_size, info->screen_base,
+ fbi->map_dma);
failed_map:
iounmap(fbi->regs);
failed_ioremap:
@@ -1061,9 +1016,6 @@ static int imxfb_remove(struct platform_device *pdev)
imxfb_disable_controller(fbi);
-#ifdef PWMR_BACKLIGHT_AVAILABLE
- imxfb_exit_backlight(fbi);
-#endif
unregister_framebuffer(info);
pdata = dev_get_platdata(&pdev->dev);
@@ -1074,69 +1026,49 @@ static int imxfb_remove(struct platform_device *pdev)
kfree(info->pseudo_palette);
framebuffer_release(info);
+ dma_free_writecombine(&pdev->dev, fbi->map_size, info->screen_base,
+ fbi->map_dma);
+
iounmap(fbi->regs);
release_mem_region(res->start, resource_size(res));
return 0;
}
-static void imxfb_shutdown(struct platform_device *dev)
+static int __maybe_unused imxfb_suspend(struct device *dev)
{
- struct fb_info *info = platform_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct imxfb_info *fbi = info->par;
- imxfb_disable_controller(fbi);
-}
-
-static struct platform_driver imxfb_driver = {
- .suspend = imxfb_suspend,
- .resume = imxfb_resume,
- .remove = imxfb_remove,
- .shutdown = imxfb_shutdown,
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = imxfb_of_dev_id,
- },
- .id_table = imxfb_devtype,
-};
-
-static int imxfb_setup(void)
-{
-#ifndef MODULE
- char *opt, *options = NULL;
- if (fb_get_options("imxfb", &options))
- return -ENODEV;
-
- if (!options || !*options)
- return 0;
+ imxfb_disable_controller(fbi);
- while ((opt = strsep(&options, ",")) != NULL) {
- if (!*opt)
- continue;
- else
- fb_mode = opt;
- }
-#endif
return 0;
}
-static int __init imxfb_init(void)
+static int __maybe_unused imxfb_resume(struct device *dev)
{
- int ret = imxfb_setup();
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct imxfb_info *fbi = info->par;
- if (ret < 0)
- return ret;
+ imxfb_enable_controller(fbi);
- return platform_driver_probe(&imxfb_driver, imxfb_probe);
+ return 0;
}
-static void __exit imxfb_cleanup(void)
-{
- platform_driver_unregister(&imxfb_driver);
-}
+static SIMPLE_DEV_PM_OPS(imxfb_pm_ops, imxfb_suspend, imxfb_resume);
-module_init(imxfb_init);
-module_exit(imxfb_cleanup);
+static struct platform_driver imxfb_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = imxfb_of_dev_id,
+ .owner = THIS_MODULE,
+ .pm = &imxfb_pm_ops,
+ },
+ .probe = imxfb_probe,
+ .remove = imxfb_remove,
+ .id_table = imxfb_devtype,
+};
+module_platform_driver(imxfb_driver);
MODULE_DESCRIPTION("Freescale i.MX framebuffer driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 39ac49e0682c..0037104d66ac 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -54,7 +54,7 @@ config LOGO_PARISC_CLUT224
config LOGO_SGI_CLUT224
bool "224-color SGI Linux logo"
- depends on SGI_IP22 || SGI_IP27 || SGI_IP32 || X86_VISWS
+ depends on SGI_IP22 || SGI_IP27 || SGI_IP32
default y
config LOGO_SUN_CLUT224
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index b670cbda38e3..940cd196eef5 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -81,7 +81,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
logo = &logo_parisc_clut224;
#endif
#ifdef CONFIG_LOGO_SGI_CLUT224
- /* SGI Linux logo on MIPS/MIPS64 and VISWS */
+ /* SGI Linux logo on MIPS/MIPS64 */
logo = &logo_sgi_clut224;
#endif
#ifdef CONFIG_LOGO_SUN_CLUT224
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index 8335a6fe303e..0d5cb85d071a 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -192,10 +192,18 @@ void matrox_cfbX_init(struct matrox_fb_info *minfo)
minfo->accel.m_dwg_rect = M_DWG_TRAP | M_DWG_SOLID | M_DWG_ARZERO | M_DWG_SGNZERO | M_DWG_SHIFTZERO;
if (isMilleniumII(minfo)) minfo->accel.m_dwg_rect |= M_DWG_TRANSC;
minfo->accel.m_opmode = mopmode;
+ minfo->accel.m_access = maccess;
+ minfo->accel.m_pitch = mpitch;
}
EXPORT_SYMBOL(matrox_cfbX_init);
+static void matrox_accel_restore_maccess(struct matrox_fb_info *minfo)
+{
+ mga_outl(M_MACCESS, minfo->accel.m_access);
+ mga_outl(M_PITCH, minfo->accel.m_pitch);
+}
+
static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
int sx, int dy, int dx, int height, int width)
{
@@ -207,7 +215,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
CRITBEGIN
if ((dy < sy) || ((dy == sy) && (dx <= sx))) {
- mga_fifo(2);
+ mga_fifo(4);
+ matrox_accel_restore_maccess(minfo);
mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_SGNZERO |
M_DWG_BFCOL | M_DWG_REPLACE);
mga_outl(M_AR5, vxres);
@@ -215,7 +224,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
start = sy*vxres+sx+curr_ydstorg(minfo);
end = start+width;
} else {
- mga_fifo(3);
+ mga_fifo(5);
+ matrox_accel_restore_maccess(minfo);
mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_BFCOL | M_DWG_REPLACE);
mga_outl(M_SGN, 5);
mga_outl(M_AR5, -vxres);
@@ -224,7 +234,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
start = end+width;
dy += height-1;
}
- mga_fifo(4);
+ mga_fifo(6);
+ matrox_accel_restore_maccess(minfo);
mga_outl(M_AR0, end);
mga_outl(M_AR3, start);
mga_outl(M_FXBNDRY, ((dx+width)<<16) | dx);
@@ -246,7 +257,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
CRITBEGIN
if ((dy < sy) || ((dy == sy) && (dx <= sx))) {
- mga_fifo(2);
+ mga_fifo(4);
+ matrox_accel_restore_maccess(minfo);
mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_SGNZERO |
M_DWG_BFCOL | M_DWG_REPLACE);
mga_outl(M_AR5, vxres);
@@ -254,7 +266,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
start = sy*vxres+sx+curr_ydstorg(minfo);
end = start+width;
} else {
- mga_fifo(3);
+ mga_fifo(5);
+ matrox_accel_restore_maccess(minfo);
mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_BFCOL | M_DWG_REPLACE);
mga_outl(M_SGN, 5);
mga_outl(M_AR5, -vxres);
@@ -263,7 +276,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
start = end+width;
dy += height-1;
}
- mga_fifo(5);
+ mga_fifo(7);
+ matrox_accel_restore_maccess(minfo);
mga_outl(M_AR0, end);
mga_outl(M_AR3, start);
mga_outl(M_FXBNDRY, ((dx+width)<<16) | dx);
@@ -298,7 +312,8 @@ static void matroxfb_accel_clear(struct matrox_fb_info *minfo, u_int32_t color,
CRITBEGIN
- mga_fifo(5);
+ mga_fifo(7);
+ matrox_accel_restore_maccess(minfo);
mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE);
mga_outl(M_FCOL, color);
mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx);
@@ -341,7 +356,8 @@ static void matroxfb_cfb4_clear(struct matrox_fb_info *minfo, u_int32_t bgx,
width >>= 1;
sx >>= 1;
if (width) {
- mga_fifo(5);
+ mga_fifo(7);
+ matrox_accel_restore_maccess(minfo);
mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE2);
mga_outl(M_FCOL, bgx);
mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx);
@@ -415,7 +431,8 @@ static void matroxfb_1bpp_imageblit(struct matrox_fb_info *minfo, u_int32_t fgx,
CRITBEGIN
- mga_fifo(3);
+ mga_fifo(5);
+ matrox_accel_restore_maccess(minfo);
if (easy)
mga_outl(M_DWGCTL, M_DWG_ILOAD | M_DWG_SGNZERO | M_DWG_SHIFTZERO | M_DWG_BMONOWF | M_DWG_LINEAR | M_DWG_REPLACE);
else
@@ -425,7 +442,8 @@ static void matroxfb_1bpp_imageblit(struct matrox_fb_info *minfo, u_int32_t fgx,
fxbndry = ((xx + width - 1) << 16) | xx;
mmio = minfo->mmio.vbase;
- mga_fifo(6);
+ mga_fifo(8);
+ matrox_accel_restore_maccess(minfo);
mga_writel(mmio, M_FXBNDRY, fxbndry);
mga_writel(mmio, M_AR0, ar0);
mga_writel(mmio, M_AR3, 0);
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 87c64ff4546c..7116c5309c7d 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -1773,7 +1773,8 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b)
FBINFO_HWACCEL_FILLRECT | /* And fillrect */
FBINFO_HWACCEL_IMAGEBLIT | /* And imageblit */
FBINFO_HWACCEL_XPAN | /* And we support both horizontal */
- FBINFO_HWACCEL_YPAN; /* And vertical panning */
+ FBINFO_HWACCEL_YPAN | /* And vertical panning */
+ FBINFO_READS_FAST;
minfo->video.len_usable &= PAGE_MASK;
fb_alloc_cmap(&minfo->fbcon.cmap, 256, 1);
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index 11ed57bb704e..556d96ce40bf 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -307,6 +307,8 @@ struct matrox_accel_data {
#endif
u_int32_t m_dwg_rect;
u_int32_t m_opmode;
+ u_int32_t m_access;
+ u_int32_t m_pitch;
};
struct v4l2_queryctrl;
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c
index ccd9073f706f..5ee3b5505f7f 100644
--- a/drivers/video/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/omap2/displays-new/connector-analog-tv.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <video/omapdss.h>
#include <video/omap-panel-data.h>
@@ -31,7 +32,7 @@ struct panel_drv_data {
static const struct omap_video_timings tvc_pal_timings = {
.x_res = 720,
.y_res = 574,
- .pixel_clock = 13500,
+ .pixelclock = 13500000,
.hsw = 64,
.hfp = 12,
.hbp = 68,
@@ -42,6 +43,12 @@ static const struct omap_video_timings tvc_pal_timings = {
.interlace = true,
};
+static const struct of_device_id tvc_of_match[];
+
+struct tvc_of_data {
+ enum omap_dss_venc_type connector_type;
+};
+
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
static int tvc_connect(struct omap_dss_device *dssdev)
@@ -91,8 +98,12 @@ static int tvc_enable(struct omap_dss_device *dssdev)
in->ops.atv->set_timings(in, &ddata->timings);
- in->ops.atv->set_type(in, ddata->connector_type);
- in->ops.atv->invert_vid_out_polarity(in, ddata->invert_polarity);
+ if (!ddata->dev->of_node) {
+ in->ops.atv->set_type(in, ddata->connector_type);
+
+ in->ops.atv->invert_vid_out_polarity(in,
+ ddata->invert_polarity);
+ }
r = in->ops.atv->enable(in);
if (r)
@@ -205,6 +216,23 @@ static int tvc_probe_pdata(struct platform_device *pdev)
return 0;
}
+static int tvc_probe_of(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+ struct omap_dss_device *in;
+
+ in = omapdss_of_find_source_for_first_ep(node);
+ if (IS_ERR(in)) {
+ dev_err(&pdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
+ ddata->in = in;
+
+ return 0;
+}
+
static int tvc_probe(struct platform_device *pdev)
{
struct panel_drv_data *ddata;
@@ -222,6 +250,10 @@ static int tvc_probe(struct platform_device *pdev)
r = tvc_probe_pdata(pdev);
if (r)
return r;
+ } else if (pdev->dev.of_node) {
+ r = tvc_probe_of(pdev);
+ if (r)
+ return r;
} else {
return -ENODEV;
}
@@ -263,12 +295,19 @@ static int __exit tvc_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id tvc_of_match[] = {
+ { .compatible = "omapdss,svideo-connector", },
+ { .compatible = "omapdss,composite-video-connector", },
+ {},
+};
+
static struct platform_driver tvc_connector_driver = {
.probe = tvc_probe,
.remove = __exit_p(tvc_remove),
.driver = {
.name = "connector-analog-tv",
.owner = THIS_MODULE,
+ .of_match_table = tvc_of_match,
},
};
diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/omap2/displays-new/connector-dvi.c
index b6c50904038e..74de2bc50c4f 100644
--- a/drivers/video/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/omap2/displays-new/connector-dvi.c
@@ -23,7 +23,7 @@ static const struct omap_video_timings dvic_default_timings = {
.x_res = 640,
.y_res = 480,
- .pixel_clock = 23500,
+ .pixelclock = 23500000,
.hfp = 48,
.hsw = 32,
@@ -277,6 +277,37 @@ static int dvic_probe_pdata(struct platform_device *pdev)
return 0;
}
+static int dvic_probe_of(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+ struct omap_dss_device *in;
+ struct device_node *adapter_node;
+ struct i2c_adapter *adapter;
+
+ in = omapdss_of_find_source_for_first_ep(node);
+ if (IS_ERR(in)) {
+ dev_err(&pdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
+ ddata->in = in;
+
+ adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
+ if (adapter_node) {
+ adapter = of_find_i2c_adapter_by_node(adapter_node);
+ if (adapter == NULL) {
+ dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
+ omap_dss_put_device(ddata->in);
+ return -EPROBE_DEFER;
+ }
+
+ ddata->i2c_adapter = adapter;
+ }
+
+ return 0;
+}
+
static int dvic_probe(struct platform_device *pdev)
{
struct panel_drv_data *ddata;
@@ -293,6 +324,10 @@ static int dvic_probe(struct platform_device *pdev)
r = dvic_probe_pdata(pdev);
if (r)
return r;
+ } else if (pdev->dev.of_node) {
+ r = dvic_probe_of(pdev);
+ if (r)
+ return r;
} else {
return -ENODEV;
}
@@ -342,12 +377,20 @@ static int __exit dvic_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id dvic_of_match[] = {
+ { .compatible = "omapdss,dvi-connector", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dvic_of_match);
+
static struct platform_driver dvi_connector_driver = {
.probe = dvic_probe,
.remove = __exit_p(dvic_remove),
.driver = {
.name = "connector-dvi",
.owner = THIS_MODULE,
+ .of_match_table = dvic_of_match,
},
};
diff --git a/drivers/video/omap2/displays-new/connector-hdmi.c b/drivers/video/omap2/displays-new/connector-hdmi.c
index 9abe2c039ae9..29ed21b9dce5 100644
--- a/drivers/video/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/omap2/displays-new/connector-hdmi.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <drm/drm_edid.h>
@@ -21,7 +22,7 @@
static const struct omap_video_timings hdmic_default_timings = {
.x_res = 640,
.y_res = 480,
- .pixel_clock = 25175,
+ .pixelclock = 25175000,
.hsw = 96,
.hfp = 16,
.hbp = 48,
@@ -301,6 +302,23 @@ static int hdmic_probe_pdata(struct platform_device *pdev)
return 0;
}
+static int hdmic_probe_of(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+ struct omap_dss_device *in;
+
+ in = omapdss_of_find_source_for_first_ep(node);
+ if (IS_ERR(in)) {
+ dev_err(&pdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
+ ddata->in = in;
+
+ return 0;
+}
+
static int hdmic_probe(struct platform_device *pdev)
{
struct panel_drv_data *ddata;
@@ -318,6 +336,10 @@ static int hdmic_probe(struct platform_device *pdev)
r = hdmic_probe_pdata(pdev);
if (r)
return r;
+ } else if (pdev->dev.of_node) {
+ r = hdmic_probe_of(pdev);
+ if (r)
+ return r;
} else {
return -ENODEV;
}
@@ -359,12 +381,20 @@ static int __exit hdmic_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id hdmic_of_match[] = {
+ { .compatible = "omapdss,hdmi-connector", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hdmic_of_match);
+
static struct platform_driver hdmi_connector_driver = {
.probe = hdmic_probe,
.remove = __exit_p(hdmic_remove),
.driver = {
.name = "connector-hdmi",
.owner = THIS_MODULE,
+ .of_match_table = hdmic_of_match,
},
};
diff --git a/drivers/video/omap2/displays-new/encoder-tfp410.c b/drivers/video/omap2/displays-new/encoder-tfp410.c
index 4a291e756be9..b4e9a42a79e6 100644
--- a/drivers/video/omap2/displays-new/encoder-tfp410.c
+++ b/drivers/video/omap2/displays-new/encoder-tfp410.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/of_gpio.h>
#include <video/omapdss.h>
#include <video/omap-panel-data.h>
@@ -82,7 +83,8 @@ static int tfp410_enable(struct omap_dss_device *dssdev)
return 0;
in->ops.dpi->set_timings(in, &ddata->timings);
- in->ops.dpi->set_data_lines(in, ddata->data_lines);
+ if (ddata->data_lines)
+ in->ops.dpi->set_data_lines(in, ddata->data_lines);
r = in->ops.dpi->enable(in);
if (r)
@@ -179,6 +181,33 @@ static int tfp410_probe_pdata(struct platform_device *pdev)
return 0;
}
+static int tfp410_probe_of(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+ struct omap_dss_device *in;
+ int gpio;
+
+ gpio = of_get_named_gpio(node, "powerdown-gpios", 0);
+
+ if (gpio_is_valid(gpio) || gpio == -ENOENT) {
+ ddata->pd_gpio = gpio;
+ } else {
+ dev_err(&pdev->dev, "failed to parse PD gpio\n");
+ return gpio;
+ }
+
+ in = omapdss_of_find_source_for_first_ep(node);
+ if (IS_ERR(in)) {
+ dev_err(&pdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
+ ddata->in = in;
+
+ return 0;
+}
+
static int tfp410_probe(struct platform_device *pdev)
{
struct panel_drv_data *ddata;
@@ -195,6 +224,10 @@ static int tfp410_probe(struct platform_device *pdev)
r = tfp410_probe_pdata(pdev);
if (r)
return r;
+ } else if (pdev->dev.of_node) {
+ r = tfp410_probe_of(pdev);
+ if (r)
+ return r;
} else {
return -ENODEV;
}
@@ -251,12 +284,20 @@ static int __exit tfp410_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id tfp410_of_match[] = {
+ { .compatible = "omapdss,ti,tfp410", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, tfp410_of_match);
+
static struct platform_driver tfp410_driver = {
.probe = tfp410_probe,
.remove = __exit_p(tfp410_remove),
.driver = {
.name = "tfp410",
.owner = THIS_MODULE,
+ .of_match_table = tfp410_of_match,
},
};
diff --git a/drivers/video/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/omap2/displays-new/encoder-tpd12s015.c
index d5c936cb217f..7e33686171e3 100644
--- a/drivers/video/omap2/displays-new/encoder-tpd12s015.c
+++ b/drivers/video/omap2/displays-new/encoder-tpd12s015.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
+#include <linux/of_gpio.h>
#include <video/omapdss.h>
#include <video/omap-panel-data.h>
@@ -289,6 +290,49 @@ static int tpd_probe_pdata(struct platform_device *pdev)
return 0;
}
+static int tpd_probe_of(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+ struct omap_dss_device *in;
+ int gpio;
+
+ /* CT CP HPD GPIO */
+ gpio = of_get_gpio(node, 0);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(&pdev->dev, "failed to parse CT CP HPD gpio\n");
+ return gpio;
+ }
+ ddata->ct_cp_hpd_gpio = gpio;
+
+ /* LS OE GPIO */
+ gpio = of_get_gpio(node, 1);
+ if (gpio_is_valid(gpio) || gpio == -ENOENT) {
+ ddata->ls_oe_gpio = gpio;
+ } else {
+ dev_err(&pdev->dev, "failed to parse LS OE gpio\n");
+ return gpio;
+ }
+
+ /* HPD GPIO */
+ gpio = of_get_gpio(node, 2);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(&pdev->dev, "failed to parse HPD gpio\n");
+ return gpio;
+ }
+ ddata->hpd_gpio = gpio;
+
+ in = omapdss_of_find_source_for_first_ep(node);
+ if (IS_ERR(in)) {
+ dev_err(&pdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
+ ddata->in = in;
+
+ return 0;
+}
+
static int tpd_probe(struct platform_device *pdev)
{
struct omap_dss_device *in, *dssdev;
@@ -307,6 +351,10 @@ static int tpd_probe(struct platform_device *pdev)
r = tpd_probe_pdata(pdev);
if (r)
return r;
+ } else if (pdev->dev.of_node) {
+ r = tpd_probe_of(pdev);
+ if (r)
+ return r;
} else {
return -ENODEV;
}
@@ -379,12 +427,20 @@ static int __exit tpd_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id tpd_of_match[] = {
+ { .compatible = "omapdss,ti,tpd12s015", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, tpd_of_match);
+
static struct platform_driver tpd_driver = {
.probe = tpd_probe,
.remove = __exit_p(tpd_remove),
.driver = {
.name = "tpd12s015",
.owner = THIS_MODULE,
+ .of_match_table = tpd_of_match,
},
};
diff --git a/drivers/video/omap2/displays-new/panel-dsi-cm.c b/drivers/video/omap2/displays-new/panel-dsi-cm.c
index b7baafe83aa3..d6f14e8717e8 100644
--- a/drivers/video/omap2/displays-new/panel-dsi-cm.c
+++ b/drivers/video/omap2/displays-new/panel-dsi-cm.c
@@ -22,6 +22,8 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <video/omapdss.h>
#include <video/omap-panel-data.h>
@@ -595,10 +597,13 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
.lp_clk_max = 10000000,
};
- r = in->ops.dsi->configure_pins(in, &ddata->pin_config);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to configure DSI pins\n");
- goto err0;
+ if (ddata->pin_config.num_pins > 0) {
+ r = in->ops.dsi->configure_pins(in, &ddata->pin_config);
+ if (r) {
+ dev_err(&ddata->pdev->dev,
+ "failed to configure DSI pins\n");
+ goto err0;
+ }
}
r = in->ops.dsi->set_config(in, &dsi_config);
@@ -1156,6 +1161,41 @@ static int dsicm_probe_pdata(struct platform_device *pdev)
return 0;
}
+static int dsicm_probe_of(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *in;
+ int gpio;
+
+ gpio = of_get_named_gpio(node, "reset-gpios", 0);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(&pdev->dev, "failed to parse reset gpio\n");
+ return gpio;
+ }
+ ddata->reset_gpio = gpio;
+
+ gpio = of_get_named_gpio(node, "te-gpios", 0);
+ if (gpio_is_valid(gpio) || gpio == -ENOENT) {
+ ddata->ext_te_gpio = gpio;
+ } else {
+ dev_err(&pdev->dev, "failed to parse TE gpio\n");
+ return gpio;
+ }
+
+ in = omapdss_of_find_source_for_first_ep(node);
+ if (IS_ERR(in)) {
+ dev_err(&pdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
+ ddata->in = in;
+
+ /* TODO: ulps, backlight */
+
+ return 0;
+}
+
static int dsicm_probe(struct platform_device *pdev)
{
struct backlight_properties props;
@@ -1178,13 +1218,17 @@ static int dsicm_probe(struct platform_device *pdev)
r = dsicm_probe_pdata(pdev);
if (r)
return r;
+ } else if (pdev->dev.of_node) {
+ r = dsicm_probe_of(pdev);
+ if (r)
+ return r;
} else {
return -ENODEV;
}
ddata->timings.x_res = 864;
ddata->timings.y_res = 480;
- ddata->timings.pixel_clock = DIV_ROUND_UP(864 * 480 * 60, 1000);
+ ddata->timings.pixelclock = 864 * 480 * 60;
dssdev = &ddata->dssdev;
dssdev->dev = dev;
@@ -1320,12 +1364,20 @@ static int __exit dsicm_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id dsicm_of_match[] = {
+ { .compatible = "omapdss,panel-dsi-cm", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dsicm_of_match);
+
static struct platform_driver dsicm_driver = {
.probe = dsicm_probe,
.remove = __exit_p(dsicm_remove),
.driver = {
.name = "panel-dsi-cm",
.owner = THIS_MODULE,
+ .of_match_table = dsicm_of_match,
},
};
diff --git a/drivers/video/omap2/displays-new/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays-new/panel-lgphilips-lb035q02.c
index 6e8977b18950..2e6b513222d9 100644
--- a/drivers/video/omap2/displays-new/panel-lgphilips-lb035q02.c
+++ b/drivers/video/omap2/displays-new/panel-lgphilips-lb035q02.c
@@ -23,7 +23,7 @@ static struct omap_video_timings lb035q02_timings = {
.x_res = 320,
.y_res = 240,
- .pixel_clock = 6500,
+ .pixelclock = 6500000,
.hsw = 2,
.hfp = 20,
diff --git a/drivers/video/omap2/displays-new/panel-nec-nl8048hl11.c b/drivers/video/omap2/displays-new/panel-nec-nl8048hl11.c
index bb217da65c5f..996fa004b48c 100644
--- a/drivers/video/omap2/displays-new/panel-nec-nl8048hl11.c
+++ b/drivers/video/omap2/displays-new/panel-nec-nl8048hl11.c
@@ -40,7 +40,7 @@ struct panel_drv_data {
* NEC PIX Clock Ratings
* MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz
*/
-#define LCD_PIXEL_CLOCK 23800
+#define LCD_PIXEL_CLOCK 23800000
static const struct {
unsigned char addr;
@@ -69,7 +69,7 @@ static const struct {
static const struct omap_video_timings nec_8048_panel_timings = {
.x_res = LCD_XRES,
.y_res = LCD_YRES,
- .pixel_clock = LCD_PIXEL_CLOCK,
+ .pixelclock = LCD_PIXEL_CLOCK,
.hfp = 6,
.hsw = 1,
.hbp = 4,
diff --git a/drivers/video/omap2/displays-new/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays-new/panel-sharp-ls037v7dw01.c
index 72a4fb5aa6b1..b2f710be565d 100644
--- a/drivers/video/omap2/displays-new/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays-new/panel-sharp-ls037v7dw01.c
@@ -37,7 +37,7 @@ static const struct omap_video_timings sharp_ls_timings = {
.x_res = 480,
.y_res = 640,
- .pixel_clock = 19200,
+ .pixelclock = 19200000,
.hsw = 2,
.hfp = 1,
diff --git a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
index 8e97d06921ff..c7ba4d8b928a 100644
--- a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
@@ -30,6 +30,8 @@
#include <linux/backlight.h>
#include <linux/fb.h>
#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <video/omapdss.h>
#include <video/omap-panel-data.h>
@@ -93,7 +95,7 @@ struct panel_drv_data {
static const struct omap_video_timings acx565akm_panel_timings = {
.x_res = 800,
.y_res = 480,
- .pixel_clock = 24000,
+ .pixelclock = 24000000,
.hfp = 28,
.hsw = 4,
.hbp = 24,
@@ -547,7 +549,9 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
in->ops.sdi->set_timings(in, &ddata->videomode);
- in->ops.sdi->set_datapairs(in, ddata->datapairs);
+
+ if (ddata->datapairs > 0)
+ in->ops.sdi->set_datapairs(in, ddata->datapairs);
r = in->ops.sdi->enable(in);
if (r) {
@@ -726,6 +730,22 @@ static int acx565akm_probe_pdata(struct spi_device *spi)
return 0;
}
+static int acx565akm_probe_of(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct device_node *np = spi->dev.of_node;
+
+ ddata->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
+
+ ddata->in = omapdss_of_find_source_for_first_ep(np);
+ if (IS_ERR(ddata->in)) {
+ dev_err(&spi->dev, "failed to find video source\n");
+ return PTR_ERR(ddata->in);
+ }
+
+ return 0;
+}
+
static int acx565akm_probe(struct spi_device *spi)
{
struct panel_drv_data *ddata;
@@ -753,7 +773,12 @@ static int acx565akm_probe(struct spi_device *spi)
r = acx565akm_probe_pdata(spi);
if (r)
return r;
+ } else if (spi->dev.of_node) {
+ r = acx565akm_probe_of(spi);
+ if (r)
+ return r;
} else {
+ dev_err(&spi->dev, "platform data missing!\n");
return -ENODEV;
}
@@ -864,10 +889,16 @@ static int acx565akm_remove(struct spi_device *spi)
return 0;
}
+static const struct of_device_id acx565akm_of_match[] = {
+ { .compatible = "omapdss,sony,acx565akm", },
+ {},
+};
+
static struct spi_driver acx565akm_driver = {
.driver = {
.name = "acx565akm",
.owner = THIS_MODULE,
+ .of_match_table = acx565akm_of_match,
},
.probe = acx565akm_probe,
.remove = acx565akm_remove,
diff --git a/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c b/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c
index 9a08908fe998..fae6adc005a7 100644
--- a/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c
+++ b/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c
@@ -45,7 +45,7 @@ struct panel_drv_data {
static struct omap_video_timings td028ttec1_panel_timings = {
.x_res = 480,
.y_res = 640,
- .pixel_clock = 22153,
+ .pixelclock = 22153000,
.hfp = 24,
.hsw = 8,
.hbp = 8,
diff --git a/drivers/video/omap2/displays-new/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays-new/panel-tpo-td043mtea1.c
index eadc6529fa3d..875b40263b33 100644
--- a/drivers/video/omap2/displays-new/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays-new/panel-tpo-td043mtea1.c
@@ -76,7 +76,7 @@ static const struct omap_video_timings tpo_td043_timings = {
.x_res = 800,
.y_res = 480,
- .pixel_clock = 36000,
+ .pixelclock = 36000000,
.hsw = 1,
.hfp = 68,
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index d3aa91bdd6a8..8aec8bda27cc 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \
- output.o
+ output.o dss-of.o
# DSS compat layer files
omapdss-y += manager.o manager-sysfs.o overlay.o overlay-sysfs.o apply.o \
dispc-compat.o display-sysfs.o
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 77d6221618f4..2bbdb7ff7daf 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -100,8 +100,6 @@ static struct {
struct platform_device *pdev;
void __iomem *base;
- int ctx_loss_cnt;
-
int irq;
unsigned long core_clk_rate;
@@ -357,29 +355,20 @@ static void dispc_save_context(void)
if (dss_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
- dispc.ctx_loss_cnt = dss_get_ctx_loss_count();
dispc.ctx_valid = true;
- DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
+ DSSDBG("context saved\n");
}
static void dispc_restore_context(void)
{
- int i, j, ctx;
+ int i, j;
DSSDBG("dispc_restore_context\n");
if (!dispc.ctx_valid)
return;
- ctx = dss_get_ctx_loss_count();
-
- if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
- return;
-
- DSSDBG("ctx_loss_count: saved %d, current %d\n",
- dispc.ctx_loss_cnt, ctx);
-
/*RR(IRQENABLE);*/
/*RR(CONTROL);*/
RR(CONFIG);
@@ -2884,7 +2873,7 @@ bool dispc_mgr_timings_ok(enum omap_channel channel,
timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
- timings_ok &= _dispc_mgr_pclk_ok(channel, timings->pixel_clock * 1000);
+ timings_ok &= _dispc_mgr_pclk_ok(channel, timings->pixelclock);
if (dss_mgr_is_lcd(channel)) {
timings_ok &= _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
@@ -2979,10 +2968,10 @@ void dispc_mgr_set_timings(enum omap_channel channel,
xtot = t.x_res + t.hfp + t.hsw + t.hbp;
ytot = t.y_res + t.vfp + t.vsw + t.vbp;
- ht = (timings->pixel_clock * 1000) / xtot;
- vt = (timings->pixel_clock * 1000) / xtot / ytot;
+ ht = timings->pixelclock / xtot;
+ vt = timings->pixelclock / xtot / ytot;
- DSSDBG("pck %u\n", timings->pixel_clock);
+ DSSDBG("pck %u\n", timings->pixelclock);
DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
DSSDBG("vsync_level %d hsync_level %d data_pclk_edge %d de_level %d sync_pclk_edge %d\n",
@@ -3768,6 +3757,15 @@ static int dispc_runtime_suspend(struct device *dev)
static int dispc_runtime_resume(struct device *dev)
{
+ /*
+ * The reset value for load mode is 0 (OMAP_DSS_LOAD_CLUT_AND_FRAME)
+ * but we always initialize it to 2 (OMAP_DSS_LOAD_FRAME_ONLY) in
+ * _omap_dispc_initial_config(). We can thus use it to detect if
+ * we have lost register context.
+ */
+ if (REG_GET(DISPC_CONFIG, 2, 1) == OMAP_DSS_LOAD_FRAME_ONLY)
+ return 0;
+
_omap_dispc_initial_config();
dispc_restore_context();
@@ -3780,12 +3778,20 @@ static const struct dev_pm_ops dispc_pm_ops = {
.runtime_resume = dispc_runtime_resume,
};
+static const struct of_device_id dispc_of_match[] = {
+ { .compatible = "ti,omap2-dispc", },
+ { .compatible = "ti,omap3-dispc", },
+ { .compatible = "ti,omap4-dispc", },
+ {},
+};
+
static struct platform_driver omap_dispchw_driver = {
.remove = __exit_p(omap_dispchw_remove),
.driver = {
.name = "omapdss_dispc",
.owner = THIS_MODULE,
.pm = &dispc_pm_ops,
+ .of_match_table = dispc_of_match,
},
};
diff --git a/drivers/video/omap2/dss/display-sysfs.c b/drivers/video/omap2/dss/display-sysfs.c
index f7b5f9561041..5a2095a98ed8 100644
--- a/drivers/video/omap2/dss/display-sysfs.c
+++ b/drivers/video/omap2/dss/display-sysfs.c
@@ -132,7 +132,7 @@ static ssize_t display_timings_show(struct device *dev,
dssdev->driver->get_timings(dssdev, &t);
return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
- t.pixel_clock,
+ t.pixelclock,
t.x_res, t.hfp, t.hbp, t.hsw,
t.y_res, t.vfp, t.vbp, t.vsw);
}
@@ -158,7 +158,7 @@ static ssize_t display_timings_store(struct device *dev,
}
#endif
if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu",
- &t.pixel_clock,
+ &t.pixelclock,
&t.x_res, &t.hfp, &t.hbp, &t.hsw,
&t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9)
return -EINVAL;
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 669a81fdf58e..2412a0dd0c13 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <video/omapdss.h>
#include "dss.h"
@@ -133,9 +134,32 @@ static int disp_num_counter;
int omapdss_register_display(struct omap_dss_device *dssdev)
{
struct omap_dss_driver *drv = dssdev->driver;
+ int id;
- snprintf(dssdev->alias, sizeof(dssdev->alias),
- "display%d", disp_num_counter++);
+ /*
+ * Note: this presumes all the displays are either using DT or non-DT,
+ * which normally should be the case. This also presumes that all
+ * displays either have an DT alias, or none has.
+ */
+
+ if (dssdev->dev->of_node) {
+ id = of_alias_get_id(dssdev->dev->of_node, "display");
+
+ if (id < 0)
+ id = disp_num_counter++;
+ } else {
+ id = disp_num_counter++;
+ }
+
+ snprintf(dssdev->alias, sizeof(dssdev->alias), "display%d", id);
+
+ /* Use 'label' property for name, if it exists */
+ if (dssdev->dev->of_node)
+ of_property_read_string(dssdev->dev->of_node, "label",
+ &dssdev->name);
+
+ if (dssdev->name == NULL)
+ dssdev->name = dssdev->alias;
if (drv && drv->get_resolution == NULL)
drv->get_resolution = omapdss_default_get_resolution;
@@ -248,7 +272,7 @@ void videomode_to_omap_video_timings(const struct videomode *vm,
{
memset(ovt, 0, sizeof(*ovt));
- ovt->pixel_clock = vm->pixelclock / 1000;
+ ovt->pixelclock = vm->pixelclock;
ovt->x_res = vm->hactive;
ovt->hbp = vm->hback_porch;
ovt->hfp = vm->hfront_porch;
@@ -280,7 +304,7 @@ void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
{
memset(vm, 0, sizeof(*vm));
- vm->pixelclock = ovt->pixel_clock * 1000;
+ vm->pixelclock = ovt->pixelclock;
vm->hactive = ovt->x_res;
vm->hback_porch = ovt->hbp;
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 23ef21ffc2c4..157921db447a 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -30,6 +30,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/string.h>
+#include <linux/of.h>
#include <video/omapdss.h>
@@ -49,6 +50,8 @@ static struct {
int data_lines;
struct omap_dss_device output;
+
+ bool port_initialized;
} dpi;
static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
@@ -307,22 +310,21 @@ static int dpi_set_mode(struct omap_overlay_manager *mgr)
int r = 0;
if (dpi.dsidev)
- r = dpi_set_dsi_clk(mgr->id, t->pixel_clock * 1000, &fck,
+ r = dpi_set_dsi_clk(mgr->id, t->pixelclock, &fck,
&lck_div, &pck_div);
else
- r = dpi_set_dispc_clk(t->pixel_clock * 1000, &fck,
+ r = dpi_set_dispc_clk(t->pixelclock, &fck,
&lck_div, &pck_div);
if (r)
return r;
- pck = fck / lck_div / pck_div / 1000;
+ pck = fck / lck_div / pck_div;
- if (pck != t->pixel_clock) {
- DSSWARN("Could not find exact pixel clock. "
- "Requested %d kHz, got %lu kHz\n",
- t->pixel_clock, pck);
+ if (pck != t->pixelclock) {
+ DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
+ t->pixelclock, pck);
- t->pixel_clock = pck;
+ t->pixelclock = pck;
}
dss_mgr_set_timings(mgr, t);
@@ -480,17 +482,17 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
return -EINVAL;
- if (timings->pixel_clock == 0)
+ if (timings->pixelclock == 0)
return -EINVAL;
if (dpi.dsidev) {
- ok = dpi_dsi_clk_calc(timings->pixel_clock * 1000, &ctx);
+ ok = dpi_dsi_clk_calc(timings->pixelclock, &ctx);
if (!ok)
return -EINVAL;
fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
} else {
- ok = dpi_dss_clk_calc(timings->pixel_clock * 1000, &ctx);
+ ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
if (!ok)
return -EINVAL;
@@ -500,9 +502,9 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
lck_div = ctx.dispc_cinfo.lck_div;
pck_div = ctx.dispc_cinfo.pck_div;
- pck = fck / lck_div / pck_div / 1000;
+ pck = fck / lck_div / pck_div;
- timings->pixel_clock = pck;
+ timings->pixelclock = pck;
return 0;
}
@@ -726,3 +728,47 @@ void __exit dpi_uninit_platform_driver(void)
{
platform_driver_unregister(&omap_dpi_driver);
}
+
+int __init dpi_init_port(struct platform_device *pdev, struct device_node *port)
+{
+ struct device_node *ep;
+ u32 datalines;
+ int r;
+
+ ep = omapdss_of_get_next_endpoint(port, NULL);
+ if (!ep)
+ return 0;
+
+ r = of_property_read_u32(ep, "data-lines", &datalines);
+ if (r) {
+ DSSERR("failed to parse datalines\n");
+ goto err_datalines;
+ }
+
+ dpi.data_lines = datalines;
+
+ of_node_put(ep);
+
+ dpi.pdev = pdev;
+
+ mutex_init(&dpi.lock);
+
+ dpi_init_output(pdev);
+
+ dpi.port_initialized = true;
+
+ return 0;
+
+err_datalines:
+ of_node_put(ep);
+
+ return r;
+}
+
+void __exit dpi_uninit_port(void)
+{
+ if (!dpi.port_initialized)
+ return;
+
+ dpi_uninit_output(dpi.pdev);
+}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index a820c37e323e..121d1049d0bc 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -38,6 +38,8 @@
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <video/omapdss.h>
#include <video/mipi_display.h>
@@ -386,6 +388,13 @@ struct dsi_packet_sent_handler_data {
struct completion *completion;
};
+struct dsi_module_id_data {
+ u32 address;
+ int id;
+};
+
+static const struct of_device_id dsi_of_match[];
+
#ifdef DSI_PERF_MEASURE
static bool dsi_perf;
module_param(dsi_perf, bool, 0644);
@@ -1151,15 +1160,11 @@ static int dsi_regulator_init(struct platform_device *dsidev)
if (dsi->vdds_dsi_reg != NULL)
return 0;
- vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "vdds_dsi");
-
- /* DT HACK: try VCXIO to make omapdss work for o4 sdp/panda */
- if (IS_ERR(vdds_dsi))
- vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "VCXIO");
+ vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "vdd");
if (IS_ERR(vdds_dsi)) {
if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
- DSSERR("can't get VDDS_DSI regulator\n");
+ DSSERR("can't get DSI VDD regulator\n");
return PTR_ERR(vdds_dsi);
}
@@ -4616,7 +4621,7 @@ static void print_dsi_vm(const char *str,
static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
{
- unsigned long pck = t->pixel_clock * 1000;
+ unsigned long pck = t->pixelclock;
int hact, bl, tot;
hact = t->x_res;
@@ -4656,7 +4661,7 @@ static void print_dsi_dispc_vm(const char *str,
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
- vm.pixel_clock = pck / 1000;
+ vm.pixelclock = pck;
vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
vm.hfp = div64_u64((u64)t->hfp * pck, byteclk);
@@ -4678,7 +4683,7 @@ static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
ctx->dispc_cinfo.pck = pck;
*t = *ctx->config->timings;
- t->pixel_clock = pck / 1000;
+ t->pixelclock = pck;
t->x_res = ctx->config->timings->x_res;
t->y_res = ctx->config->timings->y_res;
t->hsw = t->hfp = t->hbp = t->vsw = 1;
@@ -4732,7 +4737,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
* especially as we go to LP between each pixel packet due to HW
* "feature". So let's just estimate very roughly and multiply by 1.5.
*/
- pck = cfg->timings->pixel_clock * 1000;
+ pck = cfg->timings->pixelclock;
pck = pck * 3 / 2;
txbyteclk = pck * bitspp / 8 / ndl;
@@ -4909,7 +4914,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dispc_vm = &ctx->dispc_vm;
*dispc_vm = *req_vm;
- dispc_vm->pixel_clock = dispc_pck / 1000;
+ dispc_vm->pixelclock = dispc_pck;
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
hsa = div64_u64((u64)req_vm->hsw * dispc_pck,
@@ -5031,9 +5036,9 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
ctx->dsi_cinfo.clkin = clkin;
/* these limits should come from the panel driver */
- ctx->req_pck_min = t->pixel_clock * 1000 - 1000;
- ctx->req_pck_nom = t->pixel_clock * 1000;
- ctx->req_pck_max = t->pixel_clock * 1000 + 1000;
+ ctx->req_pck_min = t->pixelclock - 1000;
+ ctx->req_pck_nom = t->pixelclock;
+ ctx->req_pck_max = t->pixelclock + 1000;
byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);
@@ -5370,12 +5375,69 @@ static void dsi_uninit_output(struct platform_device *dsidev)
omapdss_unregister_output(out);
}
+static int dsi_probe_of(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
+ struct property *prop;
+ u32 lane_arr[10];
+ int len, num_pins;
+ int r, i;
+ struct device_node *ep;
+ struct omap_dsi_pin_config pin_cfg;
+
+ ep = omapdss_of_get_first_endpoint(node);
+ if (!ep)
+ return 0;
+
+ prop = of_find_property(ep, "lanes", &len);
+ if (prop == NULL) {
+ dev_err(&pdev->dev, "failed to find lane data\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ num_pins = len / sizeof(u32);
+
+ if (num_pins < 4 || num_pins % 2 != 0 ||
+ num_pins > dsi->num_lanes_supported * 2) {
+ dev_err(&pdev->dev, "bad number of lanes\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins);
+ if (r) {
+ dev_err(&pdev->dev, "failed to read lane data\n");
+ goto err;
+ }
+
+ pin_cfg.num_pins = num_pins;
+ for (i = 0; i < num_pins; ++i)
+ pin_cfg.pins[i] = (int)lane_arr[i];
+
+ r = dsi_configure_pins(&dsi->output, &pin_cfg);
+ if (r) {
+ dev_err(&pdev->dev, "failed to configure pins");
+ goto err;
+ }
+
+ of_node_put(ep);
+
+ return 0;
+
+err:
+ of_node_put(ep);
+ return r;
+}
+
/* DSI1 HW IP initialisation */
static int omap_dsihw_probe(struct platform_device *dsidev)
{
u32 rev;
int r, i;
struct dsi_data *dsi;
+ struct resource *dsi_mem;
struct resource *res;
struct resource temp_res;
@@ -5383,7 +5445,6 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
if (!dsi)
return -ENOMEM;
- dsi->module_id = dsidev->id;
dsi->pdev = dsidev;
dev_set_drvdata(&dsidev->dev, dsi);
@@ -5421,6 +5482,8 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
res = &temp_res;
}
+ dsi_mem = res;
+
dsi->proto_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
if (!dsi->proto_base) {
@@ -5481,6 +5544,31 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
return r;
}
+ if (dsidev->dev.of_node) {
+ const struct of_device_id *match;
+ const struct dsi_module_id_data *d;
+
+ match = of_match_node(dsi_of_match, dsidev->dev.of_node);
+ if (!match) {
+ DSSERR("unsupported DSI module\n");
+ return -ENODEV;
+ }
+
+ d = match->data;
+
+ while (d->address != 0 && d->address != dsi_mem->start)
+ d++;
+
+ if (d->address == 0) {
+ DSSERR("unsupported DSI module\n");
+ return -ENODEV;
+ }
+
+ dsi->module_id = d->id;
+ } else {
+ dsi->module_id = dsidev->id;
+ }
+
/* DSI VCs initialization */
for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
dsi->vc[i].source = DSI_VC_SOURCE_L4;
@@ -5516,6 +5604,19 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
dsi_init_output(dsidev);
+ if (dsidev->dev.of_node) {
+ r = dsi_probe_of(dsidev);
+ if (r) {
+ DSSERR("Invalid DSI DT data\n");
+ goto err_probe_of;
+ }
+
+ r = of_platform_populate(dsidev->dev.of_node, NULL, NULL,
+ &dsidev->dev);
+ if (r)
+ DSSERR("Failed to populate DSI child devices: %d\n", r);
+ }
+
dsi_runtime_put(dsidev);
if (dsi->module_id == 0)
@@ -5529,17 +5630,31 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
else if (dsi->module_id == 1)
dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
#endif
+
return 0;
+err_probe_of:
+ dsi_uninit_output(dsidev);
+ dsi_runtime_put(dsidev);
+
err_runtime_get:
pm_runtime_disable(&dsidev->dev);
return r;
}
+static int dsi_unregister_child(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ platform_device_unregister(pdev);
+ return 0;
+}
+
static int __exit omap_dsihw_remove(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ device_for_each_child(&dsidev->dev, NULL, dsi_unregister_child);
+
WARN_ON(dsi->scp_clk_refcount > 0);
dsi_uninit_output(dsidev);
@@ -5577,6 +5692,23 @@ static const struct dev_pm_ops dsi_pm_ops = {
.runtime_resume = dsi_runtime_resume,
};
+static const struct dsi_module_id_data dsi_of_data_omap3[] = {
+ { .address = 0x4804fc00, .id = 0, },
+ { },
+};
+
+static const struct dsi_module_id_data dsi_of_data_omap4[] = {
+ { .address = 0x58004000, .id = 0, },
+ { .address = 0x58005000, .id = 1, },
+ { },
+};
+
+static const struct of_device_id dsi_of_match[] = {
+ { .compatible = "ti,omap3-dsi", .data = dsi_of_data_omap3, },
+ { .compatible = "ti,omap4-dsi", .data = dsi_of_data_omap4, },
+ {},
+};
+
static struct platform_driver omap_dsihw_driver = {
.probe = omap_dsihw_probe,
.remove = __exit_p(omap_dsihw_remove),
@@ -5584,6 +5716,7 @@ static struct platform_driver omap_dsihw_driver = {
.name = "omapdss_dsi",
.owner = THIS_MODULE,
.pm = &dsi_pm_ops,
+ .of_match_table = dsi_of_match,
},
};
diff --git a/drivers/video/omap2/dss/dss-of.c b/drivers/video/omap2/dss/dss-of.c
new file mode 100644
index 000000000000..a4b20aaf6142
--- /dev/null
+++ b/drivers/video/omap2/dss/dss-of.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/seq_file.h>
+
+#include <video/omapdss.h>
+
+struct device_node *
+omapdss_of_get_next_port(const struct device_node *parent,
+ struct device_node *prev)
+{
+ struct device_node *port = NULL;
+
+ if (!parent)
+ return NULL;
+
+ if (!prev) {
+ struct device_node *ports;
+ /*
+ * It's the first call, we have to find a port subnode
+ * within this node or within an optional 'ports' node.
+ */
+ ports = of_get_child_by_name(parent, "ports");
+ if (ports)
+ parent = ports;
+
+ port = of_get_child_by_name(parent, "port");
+
+ /* release the 'ports' node */
+ of_node_put(ports);
+ } else {
+ struct device_node *ports;
+
+ ports = of_get_parent(prev);
+ if (!ports)
+ return NULL;
+
+ do {
+ port = of_get_next_child(ports, prev);
+ if (!port) {
+ of_node_put(ports);
+ return NULL;
+ }
+ prev = port;
+ } while (of_node_cmp(port->name, "port") != 0);
+ }
+
+ return port;
+}
+EXPORT_SYMBOL_GPL(omapdss_of_get_next_port);
+
+struct device_node *
+omapdss_of_get_next_endpoint(const struct device_node *parent,
+ struct device_node *prev)
+{
+ struct device_node *ep = NULL;
+
+ if (!parent)
+ return NULL;
+
+ do {
+ ep = of_get_next_child(parent, prev);
+ if (!ep)
+ return NULL;
+ prev = ep;
+ } while (of_node_cmp(ep->name, "endpoint") != 0);
+
+ return ep;
+}
+EXPORT_SYMBOL_GPL(omapdss_of_get_next_endpoint);
+
+static struct device_node *
+omapdss_of_get_remote_device_node(const struct device_node *node)
+{
+ struct device_node *np;
+ int i;
+
+ np = of_parse_phandle(node, "remote-endpoint", 0);
+
+ if (!np)
+ return NULL;
+
+ np = of_get_next_parent(np);
+
+ for (i = 0; i < 3 && np; ++i) {
+ struct property *prop;
+
+ prop = of_find_property(np, "compatible", NULL);
+
+ if (prop)
+ return np;
+
+ np = of_get_next_parent(np);
+ }
+
+ return NULL;
+}
+
+struct device_node *
+omapdss_of_get_first_endpoint(const struct device_node *parent)
+{
+ struct device_node *port, *ep;
+
+ port = omapdss_of_get_next_port(parent, NULL);
+
+ if (!port)
+ return NULL;
+
+ ep = omapdss_of_get_next_endpoint(port, NULL);
+
+ of_node_put(port);
+
+ return ep;
+}
+EXPORT_SYMBOL_GPL(omapdss_of_get_first_endpoint);
+
+struct omap_dss_device *
+omapdss_of_find_source_for_first_ep(struct device_node *node)
+{
+ struct device_node *ep;
+ struct device_node *src_node;
+ struct omap_dss_device *src;
+
+ ep = omapdss_of_get_first_endpoint(node);
+ if (!ep)
+ return ERR_PTR(-EINVAL);
+
+ src_node = omapdss_of_get_remote_device_node(ep);
+
+ of_node_put(ep);
+
+ if (!src_node)
+ return ERR_PTR(-EINVAL);
+
+ src = omap_dss_find_output_by_node(src_node);
+
+ of_node_put(src_node);
+
+ if (!src)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return src;
+}
+EXPORT_SYMBOL_GPL(omapdss_of_find_source_for_first_ep);
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 9a145da35ad3..825c019ddee7 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -23,6 +23,7 @@
#define DSS_SUBSYS_NAME "DSS"
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/io.h>
#include <linux/export.h>
#include <linux/err.h>
@@ -33,6 +34,7 @@
#include <linux/pm_runtime.h>
#include <linux/gfp.h>
#include <linux/sizes.h>
+#include <linux/of.h>
#include <video/omapdss.h>
@@ -154,22 +156,6 @@ static void dss_restore_context(void)
#undef SR
#undef RR
-int dss_get_ctx_loss_count(void)
-{
- struct platform_device *core_pdev = dss_get_core_pdev();
- struct omap_dss_board_info *board_data = core_pdev->dev.platform_data;
- int cnt;
-
- if (!board_data->get_context_loss_count)
- return -ENOENT;
-
- cnt = board_data->get_context_loss_count(&dss.pdev->dev);
-
- WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
-
- return cnt;
-}
-
void dss_sdi_init(int datapairs)
{
u32 l;
@@ -788,6 +774,56 @@ static int __init dss_init_features(struct platform_device *pdev)
return 0;
}
+static int __init dss_init_ports(struct platform_device *pdev)
+{
+ struct device_node *parent = pdev->dev.of_node;
+ struct device_node *port;
+ int r;
+
+ if (parent == NULL)
+ return 0;
+
+ port = omapdss_of_get_next_port(parent, NULL);
+ if (!port) {
+#ifdef CONFIG_OMAP2_DSS_DPI
+ dpi_init_port(pdev, parent);
+#endif
+ return 0;
+ }
+
+ do {
+ u32 reg;
+
+ r = of_property_read_u32(port, "reg", &reg);
+ if (r)
+ reg = 0;
+
+#ifdef CONFIG_OMAP2_DSS_DPI
+ if (reg == 0)
+ dpi_init_port(pdev, port);
+#endif
+
+#ifdef CONFIG_OMAP2_DSS_SDI
+ if (reg == 1)
+ sdi_init_port(pdev, port);
+#endif
+
+ } while ((port = omapdss_of_get_next_port(parent, port)) != NULL);
+
+ return 0;
+}
+
+static void dss_uninit_ports(void)
+{
+#ifdef CONFIG_OMAP2_DSS_DPI
+ dpi_uninit_port();
+#endif
+
+#ifdef CONFIG_OMAP2_DSS_SDI
+ sdi_uninit_port();
+#endif
+}
+
/* DSS HW IP initialisation */
static int __init omap_dsshw_probe(struct platform_device *pdev)
{
@@ -846,6 +882,8 @@ static int __init omap_dsshw_probe(struct platform_device *pdev)
dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
+ dss_init_ports(pdev);
+
rev = dss_read_reg(DSS_REVISION);
printk(KERN_INFO "OMAP DSS rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
@@ -865,6 +903,8 @@ err_setup_clocks:
static int __exit omap_dsshw_remove(struct platform_device *pdev)
{
+ dss_uninit_ports();
+
pm_runtime_disable(&pdev->dev);
dss_put_clocks();
@@ -902,12 +942,22 @@ static const struct dev_pm_ops dss_pm_ops = {
.runtime_resume = dss_runtime_resume,
};
+static const struct of_device_id dss_of_match[] = {
+ { .compatible = "ti,omap2-dss", },
+ { .compatible = "ti,omap3-dss", },
+ { .compatible = "ti,omap4-dss", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dss_of_match);
+
static struct platform_driver omap_dsshw_driver = {
.remove = __exit_p(omap_dsshw_remove),
.driver = {
.name = "omapdss_dss",
.owner = THIS_MODULE,
.pm = &dss_pm_ops,
+ .of_match_table = dss_of_match,
},
};
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 057f24c8a332..918fec182424 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -225,8 +225,6 @@ void dss_dump_clocks(struct seq_file *s);
void dss_debug_dump_clocks(struct seq_file *s);
#endif
-int dss_get_ctx_loss_count(void);
-
void dss_sdi_init(int datapairs);
int dss_sdi_enable(void);
void dss_sdi_disable(void);
@@ -252,6 +250,9 @@ bool dss_div_calc(unsigned long pck, unsigned long fck_min,
int sdi_init_platform_driver(void) __init;
void sdi_uninit_platform_driver(void) __exit;
+int sdi_init_port(struct platform_device *pdev, struct device_node *port) __init;
+void sdi_uninit_port(void) __exit;
+
/* DSI */
typedef bool (*dsi_pll_calc_func)(int regn, int regm, unsigned long fint,
@@ -363,6 +364,9 @@ static inline bool dsi_pll_calc(struct platform_device *dsidev,
int dpi_init_platform_driver(void) __init;
void dpi_uninit_platform_driver(void) __exit;
+int dpi_init_port(struct platform_device *pdev, struct device_node *port) __init;
+void dpi_uninit_port(void) __exit;
+
/* DISPC */
int dispc_init_platform_driver(void) __init;
void dispc_uninit_platform_driver(void) __exit;
diff --git a/drivers/video/omap2/dss/hdmi4.c b/drivers/video/omap2/dss/hdmi4.c
index 4a74538f9ea5..f5f7944a1fd1 100644
--- a/drivers/video/omap2/dss/hdmi4.c
+++ b/drivers/video/omap2/dss/hdmi4.c
@@ -88,15 +88,11 @@ static int hdmi_init_regulator(void)
if (hdmi.vdda_hdmi_dac_reg != NULL)
return 0;
- reg = devm_regulator_get(&hdmi.pdev->dev, "vdda_hdmi_dac");
-
- /* DT HACK: try VDAC to make omapdss work for o4 sdp/panda */
- if (IS_ERR(reg))
- reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
+ reg = devm_regulator_get(&hdmi.pdev->dev, "vdda");
if (IS_ERR(reg)) {
if (PTR_ERR(reg) != -EPROBE_DEFER)
- DSSERR("can't get VDDA_HDMI_DAC regulator\n");
+ DSSERR("can't get VDDA regulator\n");
return PTR_ERR(reg);
}
@@ -153,7 +149,8 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
- phy = p->pixel_clock;
+ /* the functions below use kHz pixel clock. TODO: change to Hz */
+ phy = p->pixelclock / 1000;
hdmi_pll_compute(&hdmi.pll, clk_get_rate(hdmi.sys_clk), phy);
@@ -238,13 +235,13 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
if (t != NULL) {
hdmi.cfg = *t;
- dispc_set_tv_pclk(t->timings.pixel_clock * 1000);
+ dispc_set_tv_pclk(t->timings.pixelclock);
} else {
hdmi.cfg.timings = *timings;
hdmi.cfg.cm.code = 0;
hdmi.cfg.cm.mode = HDMI_DVI;
- dispc_set_tv_pclk(timings->pixel_clock * 1000);
+ dispc_set_tv_pclk(timings->pixelclock);
}
DSSDBG("using mode: %s, code %d\n", hdmi.cfg.cm.mode == HDMI_DVI ?
@@ -509,7 +506,7 @@ static int hdmi_audio_config(struct omap_dss_device *dssdev,
struct omap_dss_audio *audio)
{
int r;
- u32 pclk = hdmi.cfg.timings.pixel_clock;
+ u32 pclk = hdmi.cfg.timings.pixelclock;
mutex_lock(&hdmi.lock);
@@ -679,6 +676,11 @@ static const struct dev_pm_ops hdmi_pm_ops = {
.runtime_resume = hdmi_runtime_resume,
};
+static const struct of_device_id hdmi_of_match[] = {
+ { .compatible = "ti,omap4-hdmi", },
+ {},
+};
+
static struct platform_driver omapdss_hdmihw_driver = {
.probe = omapdss_hdmihw_probe,
.remove = __exit_p(omapdss_hdmihw_remove),
@@ -686,6 +688,7 @@ static struct platform_driver omapdss_hdmihw_driver = {
.name = "omapdss_hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
+ .of_match_table = hdmi_of_match,
},
};
diff --git a/drivers/video/omap2/dss/hdmi_common.c b/drivers/video/omap2/dss/hdmi_common.c
index 0614922902dd..b11afac8e068 100644
--- a/drivers/video/omap2/dss/hdmi_common.c
+++ b/drivers/video/omap2/dss/hdmi_common.c
@@ -23,91 +23,91 @@
static const struct hdmi_config cea_timings[] = {
{
- { 640, 480, 25200, 96, 16, 48, 2, 10, 33,
+ { 640, 480, 25200000, 96, 16, 48, 2, 10, 33,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 1, HDMI_HDMI },
},
{
- { 720, 480, 27027, 62, 16, 60, 6, 9, 30,
+ { 720, 480, 27027000, 62, 16, 60, 6, 9, 30,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 2, HDMI_HDMI },
},
{
- { 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
+ { 1280, 720, 74250000, 40, 110, 220, 5, 5, 20,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 4, HDMI_HDMI },
},
{
- { 1920, 540, 74250, 44, 88, 148, 5, 2, 15,
+ { 1920, 540, 74250000, 44, 88, 148, 5, 2, 15,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
true, },
{ 5, HDMI_HDMI },
},
{
- { 1440, 240, 27027, 124, 38, 114, 3, 4, 15,
+ { 1440, 240, 27027000, 124, 38, 114, 3, 4, 15,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
true, },
{ 6, HDMI_HDMI },
},
{
- { 1920, 1080, 148500, 44, 88, 148, 5, 4, 36,
+ { 1920, 1080, 148500000, 44, 88, 148, 5, 4, 36,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 16, HDMI_HDMI },
},
{
- { 720, 576, 27000, 64, 12, 68, 5, 5, 39,
+ { 720, 576, 27000000, 64, 12, 68, 5, 5, 39,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 17, HDMI_HDMI },
},
{
- { 1280, 720, 74250, 40, 440, 220, 5, 5, 20,
+ { 1280, 720, 74250000, 40, 440, 220, 5, 5, 20,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 19, HDMI_HDMI },
},
{
- { 1920, 540, 74250, 44, 528, 148, 5, 2, 15,
+ { 1920, 540, 74250000, 44, 528, 148, 5, 2, 15,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
true, },
{ 20, HDMI_HDMI },
},
{
- { 1440, 288, 27000, 126, 24, 138, 3, 2, 19,
+ { 1440, 288, 27000000, 126, 24, 138, 3, 2, 19,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
true, },
{ 21, HDMI_HDMI },
},
{
- { 1440, 576, 54000, 128, 24, 136, 5, 5, 39,
+ { 1440, 576, 54000000, 128, 24, 136, 5, 5, 39,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 29, HDMI_HDMI },
},
{
- { 1920, 1080, 148500, 44, 528, 148, 5, 4, 36,
+ { 1920, 1080, 148500000, 44, 528, 148, 5, 4, 36,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 31, HDMI_HDMI },
},
{
- { 1920, 1080, 74250, 44, 638, 148, 5, 4, 36,
+ { 1920, 1080, 74250000, 44, 638, 148, 5, 4, 36,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 32, HDMI_HDMI },
},
{
- { 2880, 480, 108108, 248, 64, 240, 6, 9, 30,
+ { 2880, 480, 108108000, 248, 64, 240, 6, 9, 30,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 35, HDMI_HDMI },
},
{
- { 2880, 576, 108000, 256, 48, 272, 5, 5, 39,
+ { 2880, 576, 108000000, 256, 48, 272, 5, 5, 39,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 37, HDMI_HDMI },
@@ -117,121 +117,121 @@ static const struct hdmi_config cea_timings[] = {
static const struct hdmi_config vesa_timings[] = {
/* VESA From Here */
{
- { 640, 480, 25175, 96, 16, 48, 2, 11, 31,
+ { 640, 480, 25175000, 96, 16, 48, 2, 11, 31,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 4, HDMI_DVI },
},
{
- { 800, 600, 40000, 128, 40, 88, 4, 1, 23,
+ { 800, 600, 40000000, 128, 40, 88, 4, 1, 23,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 9, HDMI_DVI },
},
{
- { 848, 480, 33750, 112, 16, 112, 8, 6, 23,
+ { 848, 480, 33750000, 112, 16, 112, 8, 6, 23,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 0xE, HDMI_DVI },
},
{
- { 1280, 768, 79500, 128, 64, 192, 7, 3, 20,
+ { 1280, 768, 79500000, 128, 64, 192, 7, 3, 20,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 0x17, HDMI_DVI },
},
{
- { 1280, 800, 83500, 128, 72, 200, 6, 3, 22,
+ { 1280, 800, 83500000, 128, 72, 200, 6, 3, 22,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 0x1C, HDMI_DVI },
},
{
- { 1360, 768, 85500, 112, 64, 256, 6, 3, 18,
+ { 1360, 768, 85500000, 112, 64, 256, 6, 3, 18,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 0x27, HDMI_DVI },
},
{
- { 1280, 960, 108000, 112, 96, 312, 3, 1, 36,
+ { 1280, 960, 108000000, 112, 96, 312, 3, 1, 36,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 0x20, HDMI_DVI },
},
{
- { 1280, 1024, 108000, 112, 48, 248, 3, 1, 38,
+ { 1280, 1024, 108000000, 112, 48, 248, 3, 1, 38,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 0x23, HDMI_DVI },
},
{
- { 1024, 768, 65000, 136, 24, 160, 6, 3, 29,
+ { 1024, 768, 65000000, 136, 24, 160, 6, 3, 29,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 0x10, HDMI_DVI },
},
{
- { 1400, 1050, 121750, 144, 88, 232, 4, 3, 32,
+ { 1400, 1050, 121750000, 144, 88, 232, 4, 3, 32,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 0x2A, HDMI_DVI },
},
{
- { 1440, 900, 106500, 152, 80, 232, 6, 3, 25,
+ { 1440, 900, 106500000, 152, 80, 232, 6, 3, 25,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 0x2F, HDMI_DVI },
},
{
- { 1680, 1050, 146250, 176 , 104, 280, 6, 3, 30,
+ { 1680, 1050, 146250000, 176 , 104, 280, 6, 3, 30,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
false, },
{ 0x3A, HDMI_DVI },
},
{
- { 1366, 768, 85500, 143, 70, 213, 3, 3, 24,
+ { 1366, 768, 85500000, 143, 70, 213, 3, 3, 24,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 0x51, HDMI_DVI },
},
{
- { 1920, 1080, 148500, 44, 148, 80, 5, 4, 36,
+ { 1920, 1080, 148500000, 44, 148, 80, 5, 4, 36,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 0x52, HDMI_DVI },
},
{
- { 1280, 768, 68250, 32, 48, 80, 7, 3, 12,
+ { 1280, 768, 68250000, 32, 48, 80, 7, 3, 12,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 0x16, HDMI_DVI },
},
{
- { 1400, 1050, 101000, 32, 48, 80, 4, 3, 23,
+ { 1400, 1050, 101000000, 32, 48, 80, 4, 3, 23,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 0x29, HDMI_DVI },
},
{
- { 1680, 1050, 119000, 32, 48, 80, 6, 3, 21,
+ { 1680, 1050, 119000000, 32, 48, 80, 6, 3, 21,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 0x39, HDMI_DVI },
},
{
- { 1280, 800, 79500, 32, 48, 80, 6, 3, 14,
+ { 1280, 800, 79500000, 32, 48, 80, 6, 3, 14,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 0x1B, HDMI_DVI },
},
{
- { 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
+ { 1280, 720, 74250000, 40, 110, 220, 5, 5, 20,
OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 0x55, HDMI_DVI },
},
{
- { 1920, 1200, 154000, 32, 48, 80, 6, 3, 26,
+ { 1920, 1200, 154000000, 32, 48, 80, 6, 3, 26,
OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
false, },
{ 0x44, HDMI_DVI },
@@ -277,8 +277,8 @@ static bool hdmi_timings_compare(struct omap_video_timings *timing1,
{
int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync;
- if ((DIV_ROUND_CLOSEST(timing2->pixel_clock, 1000) ==
- DIV_ROUND_CLOSEST(timing1->pixel_clock, 1000)) &&
+ if ((DIV_ROUND_CLOSEST(timing2->pixelclock, 1000000) ==
+ DIV_ROUND_CLOSEST(timing1->pixelclock, 1000000)) &&
(timing2->x_res == timing1->x_res) &&
(timing2->y_res == timing1->y_res)) {
diff --git a/drivers/video/omap2/dss/hdmi_wp.c b/drivers/video/omap2/dss/hdmi_wp.c
index cd620c6e43a0..f5f4ccf50d90 100644
--- a/drivers/video/omap2/dss/hdmi_wp.c
+++ b/drivers/video/omap2/dss/hdmi_wp.c
@@ -171,6 +171,8 @@ void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
video_fmt->y_res = param->timings.y_res;
video_fmt->x_res = param->timings.x_res;
+ if (param->timings.interlace)
+ video_fmt->y_res /= 2;
timings->hbp = param->timings.hbp;
timings->hfp = param->timings.hfp;
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index ba806c9e7f54..911dcc9173a6 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -26,6 +26,7 @@
#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/of.h>
#include <video/omapdss.h>
#include "dss.h"
@@ -41,6 +42,8 @@ static struct {
int datapairs;
struct omap_dss_device output;
+
+ bool port_initialized;
} sdi;
struct sdi_clk_calc_ctx {
@@ -149,20 +152,19 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- r = sdi_calc_clock_div(t->pixel_clock * 1000, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(t->pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
sdi.mgr_config.clock_info = dispc_cinfo;
- pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div / 1000;
+ pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
- if (pck != t->pixel_clock) {
- DSSWARN("Could not find exact pixel clock. Requested %d kHz, "
- "got %lu kHz\n",
- t->pixel_clock, pck);
+ if (pck != t->pixelclock) {
+ DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
+ t->pixelclock, pck);
- t->pixel_clock = pck;
+ t->pixelclock = pck;
}
@@ -244,7 +246,7 @@ static int sdi_check_timings(struct omap_dss_device *dssdev,
if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
return -EINVAL;
- if (timings->pixel_clock == 0)
+ if (timings->pixelclock == 0)
return -EINVAL;
return 0;
@@ -387,3 +389,45 @@ void __exit sdi_uninit_platform_driver(void)
{
platform_driver_unregister(&omap_sdi_driver);
}
+
+int __init sdi_init_port(struct platform_device *pdev, struct device_node *port)
+{
+ struct device_node *ep;
+ u32 datapairs;
+ int r;
+
+ ep = omapdss_of_get_next_endpoint(port, NULL);
+ if (!ep)
+ return 0;
+
+ r = of_property_read_u32(ep, "datapairs", &datapairs);
+ if (r) {
+ DSSERR("failed to parse datapairs\n");
+ goto err_datapairs;
+ }
+
+ sdi.datapairs = datapairs;
+
+ of_node_put(ep);
+
+ sdi.pdev = pdev;
+
+ sdi_init_output(pdev);
+
+ sdi.port_initialized = true;
+
+ return 0;
+
+err_datapairs:
+ of_node_put(ep);
+
+ return r;
+}
+
+void __exit sdi_uninit_port(void)
+{
+ if (!sdi.port_initialized)
+ return;
+
+ sdi_uninit_output(sdi.pdev);
+}
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 2cd7f7e42105..21d81113962b 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -34,6 +34,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include <video/omapdss.h>
@@ -264,7 +265,7 @@ static const struct venc_config venc_config_pal_bdghi = {
const struct omap_video_timings omap_dss_pal_timings = {
.x_res = 720,
.y_res = 574,
- .pixel_clock = 13500,
+ .pixelclock = 13500000,
.hsw = 64,
.hfp = 12,
.hbp = 68,
@@ -279,7 +280,7 @@ EXPORT_SYMBOL(omap_dss_pal_timings);
const struct omap_video_timings omap_dss_ntsc_timings = {
.x_res = 720,
.y_res = 482,
- .pixel_clock = 13500,
+ .pixelclock = 13500000,
.hsw = 64,
.hfp = 16,
.hbp = 58,
@@ -636,7 +637,10 @@ static int venc_init_regulator(void)
if (venc.vdda_dac_reg != NULL)
return 0;
- vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda_dac");
+ if (venc.pdev->dev.of_node)
+ vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda");
+ else
+ vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda_dac");
if (IS_ERR(vdda_dac)) {
if (PTR_ERR(vdda_dac) != -EPROBE_DEFER)
@@ -805,6 +809,48 @@ static void __exit venc_uninit_output(struct platform_device *pdev)
omapdss_unregister_output(out);
}
+static int venc_probe_of(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *ep;
+ u32 channels;
+ int r;
+
+ ep = omapdss_of_get_first_endpoint(node);
+ if (!ep)
+ return 0;
+
+ venc.invert_polarity = of_property_read_bool(ep, "ti,invert-polarity");
+
+ r = of_property_read_u32(ep, "ti,channels", &channels);
+ if (r) {
+ dev_err(&pdev->dev,
+ "failed to read property 'ti,channels': %d\n", r);
+ goto err;
+ }
+
+ switch (channels) {
+ case 1:
+ venc.type = OMAP_DSS_VENC_TYPE_COMPOSITE;
+ break;
+ case 2:
+ venc.type = OMAP_DSS_VENC_TYPE_SVIDEO;
+ break;
+ default:
+ dev_err(&pdev->dev, "bad channel propert '%d'\n", channels);
+ r = -EINVAL;
+ goto err;
+ }
+
+ of_node_put(ep);
+
+ return 0;
+err:
+ of_node_put(ep);
+
+ return 0;
+}
+
/* VENC HW IP initialisation */
static int omap_venchw_probe(struct platform_device *pdev)
{
@@ -846,12 +892,21 @@ static int omap_venchw_probe(struct platform_device *pdev)
venc_runtime_put();
+ if (pdev->dev.of_node) {
+ r = venc_probe_of(pdev);
+ if (r) {
+ DSSERR("Invalid DT data\n");
+ goto err_probe_of;
+ }
+ }
+
dss_debugfs_create_file("venc", venc_dump_regs);
venc_init_output(pdev);
return 0;
+err_probe_of:
err_runtime_get:
pm_runtime_disable(&pdev->dev);
return r;
@@ -895,6 +950,14 @@ static const struct dev_pm_ops venc_pm_ops = {
.runtime_resume = venc_runtime_resume,
};
+
+static const struct of_device_id venc_of_match[] = {
+ { .compatible = "ti,omap2-venc", },
+ { .compatible = "ti,omap3-venc", },
+ { .compatible = "ti,omap4-venc", },
+ {},
+};
+
static struct platform_driver omap_venchw_driver = {
.probe = omap_venchw_probe,
.remove = __exit_p(omap_venchw_remove),
@@ -902,6 +965,7 @@ static struct platform_driver omap_venchw_driver = {
.name = "omapdss_venc",
.owner = THIS_MODULE,
.pm = &venc_pm_ops,
+ .of_match_table = venc_of_match,
},
};
diff --git a/drivers/video/omap2/dss/venc_panel.c b/drivers/video/omap2/dss/venc_panel.c
index f7d92c57bd73..af68cd444d7e 100644
--- a/drivers/video/omap2/dss/venc_panel.c
+++ b/drivers/video/omap2/dss/venc_panel.c
@@ -89,7 +89,7 @@ static int venc_panel_probe(struct omap_dss_device *dssdev)
const struct omap_video_timings default_timings = {
.x_res = 720,
.y_res = 574,
- .pixel_clock = 13500,
+ .pixelclock = 13500000,
.hsw = 64,
.hfp = 12,
.hbp = 68,
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index fcb9e932d00c..ec2d132c782d 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -723,8 +723,8 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
display->driver->get_timings(display, &timings);
/* pixclock in ps, the rest in pixclock */
- var->pixclock = timings.pixel_clock != 0 ?
- KHZ2PICOS(timings.pixel_clock) :
+ var->pixclock = timings.pixelclock != 0 ?
+ KHZ2PICOS(timings.pixelclock / 1000) :
0;
var->left_margin = timings.hbp;
var->right_margin = timings.hfp;
@@ -2077,7 +2077,7 @@ static int omapfb_mode_to_timings(const char *mode_str,
timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
}
- timings->pixel_clock = PICOS2KHZ(var->pixclock);
+ timings->pixelclock = PICOS2KHZ(var->pixclock) * 1000;
timings->hbp = var->left_margin;
timings->hfp = var->right_margin;
timings->vbp = var->upper_margin;
@@ -2229,7 +2229,7 @@ static void fb_videomode_to_omap_timings(struct fb_videomode *m,
t->x_res = m->xres;
t->y_res = m->yres;
- t->pixel_clock = PICOS2KHZ(m->pixclock);
+ t->pixelclock = PICOS2KHZ(m->pixclock) * 1000;
t->hsw = m->hsync_len;
t->hfp = m->right_margin;
t->hbp = m->left_margin;
@@ -2417,6 +2417,55 @@ static int omapfb_init_connections(struct omapfb2_device *fbdev,
return 0;
}
+static struct omap_dss_device *
+omapfb_find_default_display(struct omapfb2_device *fbdev)
+{
+ const char *def_name;
+ int i;
+
+ /*
+ * Search with the display name from the user or the board file,
+ * comparing to display names and aliases
+ */
+
+ def_name = omapdss_get_default_display_name();
+
+ if (def_name) {
+ for (i = 0; i < fbdev->num_displays; ++i) {
+ struct omap_dss_device *dssdev;
+
+ dssdev = fbdev->displays[i].dssdev;
+
+ if (dssdev->name && strcmp(def_name, dssdev->name) == 0)
+ return dssdev;
+
+ if (strcmp(def_name, dssdev->alias) == 0)
+ return dssdev;
+ }
+
+ /* def_name given but not found */
+ return NULL;
+ }
+
+ /* then look for DT alias display0 */
+ for (i = 0; i < fbdev->num_displays; ++i) {
+ struct omap_dss_device *dssdev;
+ int id;
+
+ dssdev = fbdev->displays[i].dssdev;
+
+ if (dssdev->dev->of_node == NULL)
+ continue;
+
+ id = of_alias_get_id(dssdev->dev->of_node, "display");
+ if (id == 0)
+ return dssdev;
+ }
+
+ /* return the first display we have in the list */
+ return fbdev->displays[0].dssdev;
+}
+
static int omapfb_probe(struct platform_device *pdev)
{
struct omapfb2_device *fbdev = NULL;
@@ -2494,23 +2543,7 @@ static int omapfb_probe(struct platform_device *pdev)
for (i = 0; i < fbdev->num_managers; i++)
fbdev->managers[i] = omap_dss_get_overlay_manager(i);
- def_display = NULL;
-
- for (i = 0; i < fbdev->num_displays; ++i) {
- struct omap_dss_device *dssdev;
- const char *def_name;
-
- def_name = omapdss_get_default_display_name();
-
- dssdev = fbdev->displays[i].dssdev;
-
- if (def_name == NULL ||
- (dssdev->name && strcmp(def_name, dssdev->name) == 0)) {
- def_display = dssdev;
- break;
- }
- }
-
+ def_display = omapfb_find_default_display(fbdev);
if (def_display == NULL) {
dev_err(fbdev->dev, "failed to find default display\n");
r = -EPROBE_DEFER;
diff --git a/drivers/video/output.c b/drivers/video/output.c
deleted file mode 100644
index 1446c49fe6af..000000000000
--- a/drivers/video/output.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * output.c - Display Output Switch driver
- *
- * Copyright (C) 2006 Luming Yu <luming.yu@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <linux/module.h>
-#include <linux/video_output.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/ctype.h>
-
-
-MODULE_DESCRIPTION("Display Output Switcher Lowlevel Control Abstraction");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luming Yu <luming.yu@intel.com>");
-
-static ssize_t state_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ssize_t ret_size = 0;
- struct output_device *od = to_output_device(dev);
- if (od->props)
- ret_size = sprintf(buf,"%.8x\n",od->props->get_status(od));
- return ret_size;
-}
-
-static ssize_t state_store(struct device *dev, struct device_attribute *attr,
- const char *buf,size_t count)
-{
- char *endp;
- struct output_device *od = to_output_device(dev);
- int request_state = simple_strtoul(buf,&endp,0);
- size_t size = endp - buf;
-
- if (isspace(*endp))
- size++;
- if (size != count)
- return -EINVAL;
-
- if (od->props) {
- od->request_state = request_state;
- od->props->set_state(od);
- }
- return count;
-}
-static DEVICE_ATTR_RW(state);
-
-static void video_output_release(struct device *dev)
-{
- struct output_device *od = to_output_device(dev);
- kfree(od);
-}
-
-static struct attribute *video_output_attrs[] = {
- &dev_attr_state.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(video_output);
-
-static struct class video_output_class = {
- .name = "video_output",
- .dev_release = video_output_release,
- .dev_groups = video_output_groups,
-};
-
-struct output_device *video_output_register(const char *name,
- struct device *dev,
- void *devdata,
- struct output_properties *op)
-{
- struct output_device *new_dev;
- int ret_code = 0;
-
- new_dev = kzalloc(sizeof(struct output_device),GFP_KERNEL);
- if (!new_dev) {
- ret_code = -ENOMEM;
- goto error_return;
- }
- new_dev->props = op;
- new_dev->dev.class = &video_output_class;
- new_dev->dev.parent = dev;
- dev_set_name(&new_dev->dev, "%s", name);
- dev_set_drvdata(&new_dev->dev, devdata);
- ret_code = device_register(&new_dev->dev);
- if (ret_code) {
- kfree(new_dev);
- goto error_return;
- }
- return new_dev;
-
-error_return:
- return ERR_PTR(ret_code);
-}
-EXPORT_SYMBOL(video_output_register);
-
-void video_output_unregister(struct output_device *dev)
-{
- if (!dev)
- return;
- device_unregister(&dev->dev);
-}
-EXPORT_SYMBOL(video_output_unregister);
-
-static void __exit video_output_class_exit(void)
-{
- class_unregister(&video_output_class);
-}
-
-static int __init video_output_class_init(void)
-{
- return class_register(&video_output_class);
-}
-
-postcore_initcall(video_output_class_init);
-module_exit(video_output_class_exit);
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index ad382b3396cd..417f9a27eb7d 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -107,7 +107,6 @@ struct pxa3xx_gcu_priv {
struct timeval base_time;
struct pxa3xx_gcu_batch *free;
-
struct pxa3xx_gcu_batch *ready;
struct pxa3xx_gcu_batch *ready_last;
struct pxa3xx_gcu_batch *running;
@@ -368,27 +367,35 @@ pxa3xx_gcu_wait_free(struct pxa3xx_gcu_priv *priv)
/* Misc device layer */
-static inline struct pxa3xx_gcu_priv *file_dev(struct file *file)
+static inline struct pxa3xx_gcu_priv *to_pxa3xx_gcu_priv(struct file *file)
{
struct miscdevice *dev = file->private_data;
return container_of(dev, struct pxa3xx_gcu_priv, misc_dev);
}
+/*
+ * provide an empty .open callback, so the core sets file->private_data
+ * for us.
+ */
+static int pxa3xx_gcu_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
static ssize_t
-pxa3xx_gcu_misc_write(struct file *file, const char *buff,
- size_t count, loff_t *offp)
+pxa3xx_gcu_write(struct file *file, const char *buff,
+ size_t count, loff_t *offp)
{
int ret;
unsigned long flags;
struct pxa3xx_gcu_batch *buffer;
- struct pxa3xx_gcu_priv *priv = file_dev(file);
+ struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
int words = count / 4;
/* Does not need to be atomic. There's a lock in user space,
* but anyhow, this is just for statistics. */
priv->shared->num_writes++;
-
priv->shared->num_words += words;
/* Last word reserved for batch buffer end command */
@@ -406,10 +413,8 @@ pxa3xx_gcu_misc_write(struct file *file, const char *buff,
* Get buffer from free list
*/
spin_lock_irqsave(&priv->spinlock, flags);
-
buffer = priv->free;
priv->free = buffer->next;
-
spin_unlock_irqrestore(&priv->spinlock, flags);
@@ -454,10 +459,10 @@ pxa3xx_gcu_misc_write(struct file *file, const char *buff,
static long
-pxa3xx_gcu_misc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+pxa3xx_gcu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
unsigned long flags;
- struct pxa3xx_gcu_priv *priv = file_dev(file);
+ struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
switch (cmd) {
case PXA3XX_GCU_IOCTL_RESET:
@@ -474,10 +479,10 @@ pxa3xx_gcu_misc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
static int
-pxa3xx_gcu_misc_mmap(struct file *file, struct vm_area_struct *vma)
+pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned int size = vma->vm_end - vma->vm_start;
- struct pxa3xx_gcu_priv *priv = file_dev(file);
+ struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
switch (vma->vm_pgoff) {
case 0:
@@ -532,8 +537,8 @@ static inline void pxa3xx_gcu_init_debug_timer(void) {}
#endif
static int
-add_buffer(struct platform_device *dev,
- struct pxa3xx_gcu_priv *priv)
+pxa3xx_gcu_add_buffer(struct device *dev,
+ struct pxa3xx_gcu_priv *priv)
{
struct pxa3xx_gcu_batch *buffer;
@@ -541,7 +546,7 @@ add_buffer(struct platform_device *dev,
if (!buffer)
return -ENOMEM;
- buffer->ptr = dma_alloc_coherent(&dev->dev, PXA3XX_GCU_BATCH_WORDS * 4,
+ buffer->ptr = dma_alloc_coherent(dev, PXA3XX_GCU_BATCH_WORDS * 4,
&buffer->phys, GFP_KERNEL);
if (!buffer->ptr) {
kfree(buffer);
@@ -549,57 +554,49 @@ add_buffer(struct platform_device *dev,
}
buffer->next = priv->free;
-
priv->free = buffer;
return 0;
}
static void
-free_buffers(struct platform_device *dev,
- struct pxa3xx_gcu_priv *priv)
+pxa3xx_gcu_free_buffers(struct device *dev,
+ struct pxa3xx_gcu_priv *priv)
{
struct pxa3xx_gcu_batch *next, *buffer = priv->free;
while (buffer) {
next = buffer->next;
- dma_free_coherent(&dev->dev, PXA3XX_GCU_BATCH_WORDS * 4,
+ dma_free_coherent(dev, PXA3XX_GCU_BATCH_WORDS * 4,
buffer->ptr, buffer->phys);
kfree(buffer);
-
buffer = next;
}
priv->free = NULL;
}
-static const struct file_operations misc_fops = {
- .owner = THIS_MODULE,
- .write = pxa3xx_gcu_misc_write,
- .unlocked_ioctl = pxa3xx_gcu_misc_ioctl,
- .mmap = pxa3xx_gcu_misc_mmap
+static const struct file_operations pxa3xx_gcu_miscdev_fops = {
+ .owner = THIS_MODULE,
+ .open = pxa3xx_gcu_open,
+ .write = pxa3xx_gcu_write,
+ .unlocked_ioctl = pxa3xx_gcu_ioctl,
+ .mmap = pxa3xx_gcu_mmap,
};
-static int pxa3xx_gcu_probe(struct platform_device *dev)
+static int pxa3xx_gcu_probe(struct platform_device *pdev)
{
int i, ret, irq;
struct resource *r;
struct pxa3xx_gcu_priv *priv;
+ struct device *dev = &pdev->dev;
- priv = kzalloc(sizeof(struct pxa3xx_gcu_priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(struct pxa3xx_gcu_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- for (i = 0; i < 8; i++) {
- ret = add_buffer(dev, priv);
- if (ret) {
- dev_err(&dev->dev, "failed to allocate DMA memory\n");
- goto err_free_priv;
- }
- }
-
init_waitqueue_head(&priv->wait_idle);
init_waitqueue_head(&priv->wait_free);
spin_lock_init(&priv->spinlock);
@@ -611,125 +608,99 @@ static int pxa3xx_gcu_probe(struct platform_device *dev)
priv->misc_dev.minor = MISCDEV_MINOR,
priv->misc_dev.name = DRV_NAME,
- priv->misc_dev.fops = &misc_fops,
+ priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops;
- /* register misc device */
- ret = misc_register(&priv->misc_dev);
- if (ret < 0) {
- dev_err(&dev->dev, "misc_register() for minor %d failed\n",
- MISCDEV_MINOR);
- goto err_free_priv;
+ /* handle IO resources */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->mmio_base = devm_request_and_ioremap(dev, r);
+ if (IS_ERR(priv->mmio_base)) {
+ dev_err(dev, "failed to map I/O memory\n");
+ return PTR_ERR(priv->mmio_base);
}
- /* handle IO resources */
- r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- dev_err(&dev->dev, "no I/O memory resource defined\n");
- ret = -ENODEV;
- goto err_misc_deregister;
+ /* enable the clock */
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(priv->clk);
}
- if (!request_mem_region(r->start, resource_size(r), dev->name)) {
- dev_err(&dev->dev, "failed to request I/O memory\n");
- ret = -EBUSY;
- goto err_misc_deregister;
+ /* request the IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no IRQ defined\n");
+ return -ENODEV;
}
- priv->mmio_base = ioremap_nocache(r->start, resource_size(r));
- if (!priv->mmio_base) {
- dev_err(&dev->dev, "failed to map I/O memory\n");
- ret = -EBUSY;
- goto err_free_mem_region;
+ ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq,
+ 0, DRV_NAME, priv);
+ if (ret < 0) {
+ dev_err(dev, "request_irq failed\n");
+ return ret;
}
/* allocate dma memory */
- priv->shared = dma_alloc_coherent(&dev->dev, SHARED_SIZE,
+ priv->shared = dma_alloc_coherent(dev, SHARED_SIZE,
&priv->shared_phys, GFP_KERNEL);
-
if (!priv->shared) {
- dev_err(&dev->dev, "failed to allocate DMA memory\n");
- ret = -ENOMEM;
- goto err_free_io;
+ dev_err(dev, "failed to allocate DMA memory\n");
+ return -ENOMEM;
}
- /* enable the clock */
- priv->clk = clk_get(&dev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(&dev->dev, "failed to get clock\n");
- ret = -ENODEV;
+ /* register misc device */
+ ret = misc_register(&priv->misc_dev);
+ if (ret < 0) {
+ dev_err(dev, "misc_register() for minor %d failed\n",
+ MISCDEV_MINOR);
goto err_free_dma;
}
ret = clk_enable(priv->clk);
if (ret < 0) {
- dev_err(&dev->dev, "failed to enable clock\n");
- goto err_put_clk;
- }
-
- /* request the IRQ */
- irq = platform_get_irq(dev, 0);
- if (irq < 0) {
- dev_err(&dev->dev, "no IRQ defined\n");
- ret = -ENODEV;
- goto err_put_clk;
+ dev_err(dev, "failed to enable clock\n");
+ goto err_misc_deregister;
}
- ret = request_irq(irq, pxa3xx_gcu_handle_irq,
- 0, DRV_NAME, priv);
- if (ret) {
- dev_err(&dev->dev, "request_irq failed\n");
- ret = -EBUSY;
- goto err_put_clk;
+ for (i = 0; i < 8; i++) {
+ ret = pxa3xx_gcu_add_buffer(dev, priv);
+ if (ret) {
+ dev_err(dev, "failed to allocate DMA memory\n");
+ goto err_disable_clk;
+ }
}
- platform_set_drvdata(dev, priv);
+ platform_set_drvdata(pdev, priv);
priv->resource_mem = r;
pxa3xx_gcu_reset(priv);
pxa3xx_gcu_init_debug_timer();
- dev_info(&dev->dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n",
+ dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n",
(void *) r->start, (void *) priv->shared_phys,
SHARED_SIZE, irq);
return 0;
-err_put_clk:
- clk_disable(priv->clk);
- clk_put(priv->clk);
-
err_free_dma:
- dma_free_coherent(&dev->dev, SHARED_SIZE,
+ dma_free_coherent(dev, SHARED_SIZE,
priv->shared, priv->shared_phys);
-err_free_io:
- iounmap(priv->mmio_base);
-
-err_free_mem_region:
- release_mem_region(r->start, resource_size(r));
-
err_misc_deregister:
misc_deregister(&priv->misc_dev);
-err_free_priv:
- free_buffers(dev, priv);
- kfree(priv);
+err_disable_clk:
+ clk_disable(priv->clk);
+
return ret;
}
-static int pxa3xx_gcu_remove(struct platform_device *dev)
+static int pxa3xx_gcu_remove(struct platform_device *pdev)
{
- struct pxa3xx_gcu_priv *priv = platform_get_drvdata(dev);
- struct resource *r = priv->resource_mem;
+ struct pxa3xx_gcu_priv *priv = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
pxa3xx_gcu_wait_idle(priv);
-
misc_deregister(&priv->misc_dev);
- dma_free_coherent(&dev->dev, SHARED_SIZE,
- priv->shared, priv->shared_phys);
- iounmap(priv->mmio_base);
- release_mem_region(r->start, resource_size(r));
- clk_disable(priv->clk);
- free_buffers(dev, priv);
- kfree(priv);
+ dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys);
+ pxa3xx_gcu_free_buffers(dev, priv);
return 0;
}
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
deleted file mode 100644
index bc74d0408998..000000000000
--- a/drivers/video/sgivwfb.c
+++ /dev/null
@@ -1,889 +0,0 @@
-/*
- * linux/drivers/video/sgivwfb.c -- SGI DBE frame buffer device
- *
- * Copyright (C) 1999 Silicon Graphics, Inc.
- * Jeffrey Newquist, newquist@engr.sgi.som
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-
-#include <asm/io.h>
-#include <asm/mtrr.h>
-#include <asm/visws/sgivw.h>
-
-#define INCLUDE_TIMING_TABLE_DATA
-#define DBE_REG_BASE par->regs
-#include <video/sgivw.h>
-
-struct sgivw_par {
- struct asregs *regs;
- u32 cmap_fifo;
- u_long timing_num;
-};
-
-#define FLATPANEL_SGI_1600SW 5
-
-/*
- * RAM we reserve for the frame buffer. This defines the maximum screen
- * size
- *
- * The default can be overridden if the driver is compiled as a module
- */
-
-static int ypan = 0;
-static int ywrap = 0;
-
-static int flatpanel_id = -1;
-
-static struct fb_fix_screeninfo sgivwfb_fix = {
- .id = "SGI Vis WS FB",
- .type = FB_TYPE_PACKED_PIXELS,
- .visual = FB_VISUAL_PSEUDOCOLOR,
- .mmio_start = DBE_REG_PHYS,
- .mmio_len = DBE_REG_SIZE,
- .accel = FB_ACCEL_NONE,
- .line_length = 640,
-};
-
-static struct fb_var_screeninfo sgivwfb_var = {
- /* 640x480, 8 bpp */
- .xres = 640,
- .yres = 480,
- .xres_virtual = 640,
- .yres_virtual = 480,
- .bits_per_pixel = 8,
- .red = { 0, 8, 0 },
- .green = { 0, 8, 0 },
- .blue = { 0, 8, 0 },
- .height = -1,
- .width = -1,
- .pixclock = 20000,
- .left_margin = 64,
- .right_margin = 64,
- .upper_margin = 32,
- .lower_margin = 32,
- .hsync_len = 64,
- .vsync_len = 2,
- .vmode = FB_VMODE_NONINTERLACED
-};
-
-static struct fb_var_screeninfo sgivwfb_var1600sw = {
- /* 1600x1024, 8 bpp */
- .xres = 1600,
- .yres = 1024,
- .xres_virtual = 1600,
- .yres_virtual = 1024,
- .bits_per_pixel = 8,
- .red = { 0, 8, 0 },
- .green = { 0, 8, 0 },
- .blue = { 0, 8, 0 },
- .height = -1,
- .width = -1,
- .pixclock = 9353,
- .left_margin = 20,
- .right_margin = 30,
- .upper_margin = 37,
- .lower_margin = 3,
- .hsync_len = 20,
- .vsync_len = 3,
- .vmode = FB_VMODE_NONINTERLACED
-};
-
-/*
- * Interface used by the world
- */
-int sgivwfb_init(void);
-
-static int sgivwfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info);
-static int sgivwfb_set_par(struct fb_info *info);
-static int sgivwfb_setcolreg(u_int regno, u_int red, u_int green,
- u_int blue, u_int transp,
- struct fb_info *info);
-static int sgivwfb_mmap(struct fb_info *info,
- struct vm_area_struct *vma);
-
-static struct fb_ops sgivwfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = sgivwfb_check_var,
- .fb_set_par = sgivwfb_set_par,
- .fb_setcolreg = sgivwfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
- .fb_mmap = sgivwfb_mmap,
-};
-
-/*
- * Internal routines
- */
-static unsigned long bytes_per_pixel(int bpp)
-{
- switch (bpp) {
- case 8:
- return 1;
- case 16:
- return 2;
- case 32:
- return 4;
- default:
- printk(KERN_INFO "sgivwfb: unsupported bpp %d\n", bpp);
- return 0;
- }
-}
-
-static unsigned long get_line_length(int xres_virtual, int bpp)
-{
- return (xres_virtual * bytes_per_pixel(bpp));
-}
-
-/*
- * Function: dbe_TurnOffDma
- * Parameters: (None)
- * Description: This should turn off the monitor and dbe. This is used
- * when switching between the serial console and the graphics
- * console.
- */
-
-static void dbe_TurnOffDma(struct sgivw_par *par)
-{
- unsigned int readVal;
- int i;
-
- // Check to see if things are already turned off:
- // 1) Check to see if dbe is not using the internal dotclock.
- // 2) Check to see if the xy counter in dbe is already off.
-
- DBE_GETREG(ctrlstat, readVal);
- if (GET_DBE_FIELD(CTRLSTAT, PCLKSEL, readVal) < 2)
- return;
-
- DBE_GETREG(vt_xy, readVal);
- if (GET_DBE_FIELD(VT_XY, VT_FREEZE, readVal) == 1)
- return;
-
- // Otherwise, turn off dbe
-
- DBE_GETREG(ovr_control, readVal);
- SET_DBE_FIELD(OVR_CONTROL, OVR_DMA_ENABLE, readVal, 0);
- DBE_SETREG(ovr_control, readVal);
- udelay(1000);
- DBE_GETREG(frm_control, readVal);
- SET_DBE_FIELD(FRM_CONTROL, FRM_DMA_ENABLE, readVal, 0);
- DBE_SETREG(frm_control, readVal);
- udelay(1000);
- DBE_GETREG(did_control, readVal);
- SET_DBE_FIELD(DID_CONTROL, DID_DMA_ENABLE, readVal, 0);
- DBE_SETREG(did_control, readVal);
- udelay(1000);
-
- // XXX HACK:
- //
- // This was necessary for GBE--we had to wait through two
- // vertical retrace periods before the pixel DMA was
- // turned off for sure. I've left this in for now, in
- // case dbe needs it.
-
- for (i = 0; i < 10000; i++) {
- DBE_GETREG(frm_inhwctrl, readVal);
- if (GET_DBE_FIELD(FRM_INHWCTRL, FRM_DMA_ENABLE, readVal) ==
- 0)
- udelay(10);
- else {
- DBE_GETREG(ovr_inhwctrl, readVal);
- if (GET_DBE_FIELD
- (OVR_INHWCTRL, OVR_DMA_ENABLE, readVal) == 0)
- udelay(10);
- else {
- DBE_GETREG(did_inhwctrl, readVal);
- if (GET_DBE_FIELD
- (DID_INHWCTRL, DID_DMA_ENABLE,
- readVal) == 0)
- udelay(10);
- else
- break;
- }
- }
- }
-}
-
-/*
- * Set the User Defined Part of the Display. Again if par use it to get
- * real video mode.
- */
-static int sgivwfb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- struct sgivw_par *par = (struct sgivw_par *)info->par;
- struct dbe_timing_info *timing;
- u_long line_length;
- u_long min_mode;
- int req_dot;
- int test_mode;
-
- /*
- * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal!
- * as FB_VMODE_SMOOTH_XPAN is only used internally
- */
-
- if (var->vmode & FB_VMODE_CONUPDATE) {
- var->vmode |= FB_VMODE_YWRAP;
- var->xoffset = info->var.xoffset;
- var->yoffset = info->var.yoffset;
- }
-
- /* XXX FIXME - forcing var's */
- var->xoffset = 0;
- var->yoffset = 0;
-
- /* Limit bpp to 8, 16, and 32 */
- if (var->bits_per_pixel <= 8)
- var->bits_per_pixel = 8;
- else if (var->bits_per_pixel <= 16)
- var->bits_per_pixel = 16;
- else if (var->bits_per_pixel <= 32)
- var->bits_per_pixel = 32;
- else
- return -EINVAL;
-
- var->grayscale = 0; /* No grayscale for now */
-
- /* determine valid resolution and timing */
- for (min_mode = 0; min_mode < ARRAY_SIZE(dbeVTimings); min_mode++) {
- if (dbeVTimings[min_mode].width >= var->xres &&
- dbeVTimings[min_mode].height >= var->yres)
- break;
- }
-
- if (min_mode == ARRAY_SIZE(dbeVTimings))
- return -EINVAL; /* Resolution to high */
-
- /* XXX FIXME - should try to pick best refresh rate */
- /* for now, pick closest dot-clock within 3MHz */
- req_dot = PICOS2KHZ(var->pixclock);
- printk(KERN_INFO "sgivwfb: requested pixclock=%d ps (%d KHz)\n",
- var->pixclock, req_dot);
- test_mode = min_mode;
- while (dbeVTimings[min_mode].width == dbeVTimings[test_mode].width) {
- if (dbeVTimings[test_mode].cfreq + 3000 > req_dot)
- break;
- test_mode++;
- }
- if (dbeVTimings[min_mode].width != dbeVTimings[test_mode].width)
- test_mode--;
- min_mode = test_mode;
- timing = &dbeVTimings[min_mode];
- printk(KERN_INFO "sgivwfb: granted dot-clock=%d KHz\n", timing->cfreq);
-
- /* Adjust virtual resolution, if necessary */
- if (var->xres > var->xres_virtual || (!ywrap && !ypan))
- var->xres_virtual = var->xres;
- if (var->yres > var->yres_virtual || (!ywrap && !ypan))
- var->yres_virtual = var->yres;
-
- /*
- * Memory limit
- */
- line_length = get_line_length(var->xres_virtual, var->bits_per_pixel);
- if (line_length * var->yres_virtual > sgivwfb_mem_size)
- return -ENOMEM; /* Virtual resolution to high */
-
- info->fix.line_length = line_length;
-
- switch (var->bits_per_pixel) {
- case 8:
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 0;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 0;
- var->transp.length = 0;
- break;
- case 16: /* RGBA 5551 */
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 6;
- var->green.length = 5;
- var->blue.offset = 1;
- var->blue.length = 5;
- var->transp.offset = 0;
- var->transp.length = 0;
- break;
- case 32: /* RGB 8888 */
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 16;
- var->blue.length = 8;
- var->transp.offset = 24;
- var->transp.length = 8;
- break;
- }
- var->red.msb_right = 0;
- var->green.msb_right = 0;
- var->blue.msb_right = 0;
- var->transp.msb_right = 0;
-
- /* set video timing information */
- var->pixclock = KHZ2PICOS(timing->cfreq);
- var->left_margin = timing->htotal - timing->hsync_end;
- var->right_margin = timing->hsync_start - timing->width;
- var->upper_margin = timing->vtotal - timing->vsync_end;
- var->lower_margin = timing->vsync_start - timing->height;
- var->hsync_len = timing->hsync_end - timing->hsync_start;
- var->vsync_len = timing->vsync_end - timing->vsync_start;
-
- /* Ouch. This breaks the rules but timing_num is only important if you
- * change a video mode */
- par->timing_num = min_mode;
-
- printk(KERN_INFO "sgivwfb: new video mode xres=%d yres=%d bpp=%d\n",
- var->xres, var->yres, var->bits_per_pixel);
- printk(KERN_INFO " vxres=%d vyres=%d\n", var->xres_virtual,
- var->yres_virtual);
- return 0;
-}
-
-/*
- * Setup flatpanel related registers.
- */
-static void sgivwfb_setup_flatpanel(struct sgivw_par *par, struct dbe_timing_info *currentTiming)
-{
- int fp_wid, fp_hgt, fp_vbs, fp_vbe;
- u32 outputVal = 0;
-
- SET_DBE_FIELD(VT_FLAGS, HDRV_INVERT, outputVal,
- (currentTiming->flags & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1);
- SET_DBE_FIELD(VT_FLAGS, VDRV_INVERT, outputVal,
- (currentTiming->flags & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1);
- DBE_SETREG(vt_flags, outputVal);
-
- /* Turn on the flat panel */
- switch (flatpanel_id) {
- case FLATPANEL_SGI_1600SW:
- fp_wid = 1600;
- fp_hgt = 1024;
- fp_vbs = 0;
- fp_vbe = 1600;
- currentTiming->pll_m = 4;
- currentTiming->pll_n = 1;
- currentTiming->pll_p = 0;
- break;
- default:
- fp_wid = fp_hgt = fp_vbs = fp_vbe = 0xfff;
- }
-
- outputVal = 0;
- SET_DBE_FIELD(FP_DE, FP_DE_ON, outputVal, fp_vbs);
- SET_DBE_FIELD(FP_DE, FP_DE_OFF, outputVal, fp_vbe);
- DBE_SETREG(fp_de, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(FP_HDRV, FP_HDRV_OFF, outputVal, fp_wid);
- DBE_SETREG(fp_hdrv, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(FP_VDRV, FP_VDRV_ON, outputVal, 1);
- SET_DBE_FIELD(FP_VDRV, FP_VDRV_OFF, outputVal, fp_hgt + 1);
- DBE_SETREG(fp_vdrv, outputVal);
-}
-
-/*
- * Set the hardware according to 'par'.
- */
-static int sgivwfb_set_par(struct fb_info *info)
-{
- struct sgivw_par *par = info->par;
- int i, j, htmp, temp;
- u32 readVal, outputVal;
- int wholeTilesX, maxPixelsPerTileX;
- int frmWrite1, frmWrite2, frmWrite3b;
- struct dbe_timing_info *currentTiming; /* Current Video Timing */
- int xpmax, ypmax; // Monitor resolution
- int bytesPerPixel; // Bytes per pixel
-
- currentTiming = &dbeVTimings[par->timing_num];
- bytesPerPixel = bytes_per_pixel(info->var.bits_per_pixel);
- xpmax = currentTiming->width;
- ypmax = currentTiming->height;
-
- /* dbe_InitGraphicsBase(); */
- /* Turn on dotclock PLL */
- DBE_SETREG(ctrlstat, 0x20000000);
-
- dbe_TurnOffDma(par);
-
- /* dbe_CalculateScreenParams(); */
- maxPixelsPerTileX = 512 / bytesPerPixel;
- wholeTilesX = xpmax / maxPixelsPerTileX;
- if (wholeTilesX * maxPixelsPerTileX < xpmax)
- wholeTilesX++;
-
- printk(KERN_DEBUG "sgivwfb: pixPerTile=%d wholeTilesX=%d\n",
- maxPixelsPerTileX, wholeTilesX);
-
- /* dbe_InitGammaMap(); */
- udelay(10);
-
- for (i = 0; i < 256; i++) {
- DBE_ISETREG(gmap, i, (i << 24) | (i << 16) | (i << 8));
- }
-
- /* dbe_TurnOn(); */
- DBE_GETREG(vt_xy, readVal);
- if (GET_DBE_FIELD(VT_XY, VT_FREEZE, readVal) == 1) {
- DBE_SETREG(vt_xy, 0x00000000);
- udelay(1);
- } else
- dbe_TurnOffDma(par);
-
- /* dbe_Initdbe(); */
- for (i = 0; i < 256; i++) {
- for (j = 0; j < 100; j++) {
- DBE_GETREG(cm_fifo, readVal);
- if (readVal != 0x00000000)
- break;
- else
- udelay(10);
- }
-
- // DBE_ISETREG(cmap, i, 0x00000000);
- DBE_ISETREG(cmap, i, (i << 8) | (i << 16) | (i << 24));
- }
-
- /* dbe_InitFramebuffer(); */
- frmWrite1 = 0;
- SET_DBE_FIELD(FRM_SIZE_TILE, FRM_WIDTH_TILE, frmWrite1,
- wholeTilesX);
- SET_DBE_FIELD(FRM_SIZE_TILE, FRM_RHS, frmWrite1, 0);
-
- switch (bytesPerPixel) {
- case 1:
- SET_DBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, frmWrite1,
- DBE_FRM_DEPTH_8);
- break;
- case 2:
- SET_DBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, frmWrite1,
- DBE_FRM_DEPTH_16);
- break;
- case 4:
- SET_DBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, frmWrite1,
- DBE_FRM_DEPTH_32);
- break;
- }
-
- frmWrite2 = 0;
- SET_DBE_FIELD(FRM_SIZE_PIXEL, FB_HEIGHT_PIX, frmWrite2, ypmax);
-
- // Tell dbe about the framebuffer location and type
- // XXX What format is the FRM_TILE_PTR?? 64K aligned address?
- frmWrite3b = 0;
- SET_DBE_FIELD(FRM_CONTROL, FRM_TILE_PTR, frmWrite3b,
- sgivwfb_mem_phys >> 9);
- SET_DBE_FIELD(FRM_CONTROL, FRM_DMA_ENABLE, frmWrite3b, 1);
- SET_DBE_FIELD(FRM_CONTROL, FRM_LINEAR, frmWrite3b, 1);
-
- /* Initialize DIDs */
-
- outputVal = 0;
- switch (bytesPerPixel) {
- case 1:
- SET_DBE_FIELD(WID, TYP, outputVal, DBE_CMODE_I8);
- break;
- case 2:
- SET_DBE_FIELD(WID, TYP, outputVal, DBE_CMODE_RGBA5);
- break;
- case 4:
- SET_DBE_FIELD(WID, TYP, outputVal, DBE_CMODE_RGB8);
- break;
- }
- SET_DBE_FIELD(WID, BUF, outputVal, DBE_BMODE_BOTH);
-
- for (i = 0; i < 32; i++) {
- DBE_ISETREG(mode_regs, i, outputVal);
- }
-
- /* dbe_InitTiming(); */
- DBE_SETREG(vt_intr01, 0xffffffff);
- DBE_SETREG(vt_intr23, 0xffffffff);
-
- DBE_GETREG(dotclock, readVal);
- DBE_SETREG(dotclock, readVal & 0xffff);
-
- DBE_SETREG(vt_xymax, 0x00000000);
- outputVal = 0;
- SET_DBE_FIELD(VT_VSYNC, VT_VSYNC_ON, outputVal,
- currentTiming->vsync_start);
- SET_DBE_FIELD(VT_VSYNC, VT_VSYNC_OFF, outputVal,
- currentTiming->vsync_end);
- DBE_SETREG(vt_vsync, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(VT_HSYNC, VT_HSYNC_ON, outputVal,
- currentTiming->hsync_start);
- SET_DBE_FIELD(VT_HSYNC, VT_HSYNC_OFF, outputVal,
- currentTiming->hsync_end);
- DBE_SETREG(vt_hsync, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(VT_VBLANK, VT_VBLANK_ON, outputVal,
- currentTiming->vblank_start);
- SET_DBE_FIELD(VT_VBLANK, VT_VBLANK_OFF, outputVal,
- currentTiming->vblank_end);
- DBE_SETREG(vt_vblank, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(VT_HBLANK, VT_HBLANK_ON, outputVal,
- currentTiming->hblank_start);
- SET_DBE_FIELD(VT_HBLANK, VT_HBLANK_OFF, outputVal,
- currentTiming->hblank_end - 3);
- DBE_SETREG(vt_hblank, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(VT_VCMAP, VT_VCMAP_ON, outputVal,
- currentTiming->vblank_start);
- SET_DBE_FIELD(VT_VCMAP, VT_VCMAP_OFF, outputVal,
- currentTiming->vblank_end);
- DBE_SETREG(vt_vcmap, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(VT_HCMAP, VT_HCMAP_ON, outputVal,
- currentTiming->hblank_start);
- SET_DBE_FIELD(VT_HCMAP, VT_HCMAP_OFF, outputVal,
- currentTiming->hblank_end - 3);
- DBE_SETREG(vt_hcmap, outputVal);
-
- if (flatpanel_id != -1)
- sgivwfb_setup_flatpanel(par, currentTiming);
-
- outputVal = 0;
- temp = currentTiming->vblank_start - currentTiming->vblank_end - 1;
- if (temp > 0)
- temp = -temp;
-
- SET_DBE_FIELD(DID_START_XY, DID_STARTY, outputVal, (u32) temp);
- if (currentTiming->hblank_end >= 20)
- SET_DBE_FIELD(DID_START_XY, DID_STARTX, outputVal,
- currentTiming->hblank_end - 20);
- else
- SET_DBE_FIELD(DID_START_XY, DID_STARTX, outputVal,
- currentTiming->htotal - (20 -
- currentTiming->
- hblank_end));
- DBE_SETREG(did_start_xy, outputVal);
-
- outputVal = 0;
- SET_DBE_FIELD(CRS_START_XY, CRS_STARTY, outputVal,
- (u32) (temp + 1));
- if (currentTiming->hblank_end >= DBE_CRS_MAGIC)
- SET_DBE_FIELD(CRS_START_XY, CRS_STARTX, outputVal,
- currentTiming->hblank_end - DBE_CRS_MAGIC);
- else
- SET_DBE_FIELD(CRS_START_XY, CRS_STARTX, outputVal,
- currentTiming->htotal - (DBE_CRS_MAGIC -
- currentTiming->
- hblank_end));
- DBE_SETREG(crs_start_xy, outputVal);
-
- outputVal = 0;
- SET_DBE_FIELD(VC_START_XY, VC_STARTY, outputVal, (u32) temp);
- SET_DBE_FIELD(VC_START_XY, VC_STARTX, outputVal,
- currentTiming->hblank_end - 4);
- DBE_SETREG(vc_start_xy, outputVal);
-
- DBE_SETREG(frm_size_tile, frmWrite1);
- DBE_SETREG(frm_size_pixel, frmWrite2);
-
- outputVal = 0;
- SET_DBE_FIELD(DOTCLK, M, outputVal, currentTiming->pll_m - 1);
- SET_DBE_FIELD(DOTCLK, N, outputVal, currentTiming->pll_n - 1);
- SET_DBE_FIELD(DOTCLK, P, outputVal, currentTiming->pll_p);
- SET_DBE_FIELD(DOTCLK, RUN, outputVal, 1);
- DBE_SETREG(dotclock, outputVal);
-
- udelay(11 * 1000);
-
- DBE_SETREG(vt_vpixen, 0xffffff);
- DBE_SETREG(vt_hpixen, 0xffffff);
-
- outputVal = 0;
- SET_DBE_FIELD(VT_XYMAX, VT_MAXX, outputVal, currentTiming->htotal);
- SET_DBE_FIELD(VT_XYMAX, VT_MAXY, outputVal, currentTiming->vtotal);
- DBE_SETREG(vt_xymax, outputVal);
-
- outputVal = frmWrite1;
- SET_DBE_FIELD(FRM_SIZE_TILE, FRM_FIFO_RESET, outputVal, 1);
- DBE_SETREG(frm_size_tile, outputVal);
- DBE_SETREG(frm_size_tile, frmWrite1);
-
- outputVal = 0;
- SET_DBE_FIELD(OVR_WIDTH_TILE, OVR_FIFO_RESET, outputVal, 1);
- DBE_SETREG(ovr_width_tile, outputVal);
- DBE_SETREG(ovr_width_tile, 0);
-
- DBE_SETREG(frm_control, frmWrite3b);
- DBE_SETREG(did_control, 0);
-
- // Wait for dbe to take frame settings
- for (i = 0; i < 100000; i++) {
- DBE_GETREG(frm_inhwctrl, readVal);
- if (GET_DBE_FIELD(FRM_INHWCTRL, FRM_DMA_ENABLE, readVal) !=
- 0)
- break;
- else
- udelay(1);
- }
-
- if (i == 100000)
- printk(KERN_INFO
- "sgivwfb: timeout waiting for frame DMA enable.\n");
-
- outputVal = 0;
- htmp = currentTiming->hblank_end - 19;
- if (htmp < 0)
- htmp += currentTiming->htotal; /* allow blank to wrap around */
- SET_DBE_FIELD(VT_HPIXEN, VT_HPIXEN_ON, outputVal, htmp);
- SET_DBE_FIELD(VT_HPIXEN, VT_HPIXEN_OFF, outputVal,
- ((htmp + currentTiming->width -
- 2) % currentTiming->htotal));
- DBE_SETREG(vt_hpixen, outputVal);
-
- outputVal = 0;
- SET_DBE_FIELD(VT_VPIXEN, VT_VPIXEN_OFF, outputVal,
- currentTiming->vblank_start);
- SET_DBE_FIELD(VT_VPIXEN, VT_VPIXEN_ON, outputVal,
- currentTiming->vblank_end);
- DBE_SETREG(vt_vpixen, outputVal);
-
- // Turn off mouse cursor
- par->regs->crs_ctl = 0;
-
- // XXX What's this section for??
- DBE_GETREG(ctrlstat, readVal);
- readVal &= 0x02000000;
-
- if (readVal != 0) {
- DBE_SETREG(ctrlstat, 0x30000000);
- }
- return 0;
-}
-
-/*
- * Set a single color register. The values supplied are already
- * rounded down to the hardware's capabilities (according to the
- * entries in the var structure). Return != 0 for invalid regno.
- */
-
-static int sgivwfb_setcolreg(u_int regno, u_int red, u_int green,
- u_int blue, u_int transp,
- struct fb_info *info)
-{
- struct sgivw_par *par = (struct sgivw_par *) info->par;
-
- if (regno > 255)
- return 1;
- red >>= 8;
- green >>= 8;
- blue >>= 8;
-
- /* wait for the color map FIFO to have a free entry */
- while (par->cmap_fifo == 0)
- par->cmap_fifo = par->regs->cm_fifo;
-
- par->regs->cmap[regno] = (red << 24) | (green << 16) | (blue << 8);
- par->cmap_fifo--; /* assume FIFO is filling up */
- return 0;
-}
-
-static int sgivwfb_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- int r;
-
- pgprot_val(vma->vm_page_prot) =
- pgprot_val(vma->vm_page_prot) | _PAGE_PCD;
-
- r = vm_iomap_memory(vma, sgivwfb_mem_phys, sgivwfb_mem_size);
-
- printk(KERN_DEBUG "sgivwfb: mmap framebuffer P(%lx)->V(%lx)\n",
- sgivwfb_mem_phys + (vma->vm_pgoff << PAGE_SHIFT), vma->vm_start);
-
- return r;
-}
-
-int __init sgivwfb_setup(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!strncmp(this_opt, "monitor:", 8)) {
- if (!strncmp(this_opt + 8, "crt", 3))
- flatpanel_id = -1;
- else if (!strncmp(this_opt + 8, "1600sw", 6))
- flatpanel_id = FLATPANEL_SGI_1600SW;
- }
- }
- return 0;
-}
-
-/*
- * Initialisation
- */
-static int sgivwfb_probe(struct platform_device *dev)
-{
- struct sgivw_par *par;
- struct fb_info *info;
- char *monitor;
-
- info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 16, &dev->dev);
- if (!info)
- return -ENOMEM;
- par = info->par;
-
- if (!request_mem_region(DBE_REG_PHYS, DBE_REG_SIZE, "sgivwfb")) {
- printk(KERN_ERR "sgivwfb: couldn't reserve mmio region\n");
- framebuffer_release(info);
- return -EBUSY;
- }
-
- par->regs = (struct asregs *) ioremap_nocache(DBE_REG_PHYS, DBE_REG_SIZE);
- if (!par->regs) {
- printk(KERN_ERR "sgivwfb: couldn't ioremap registers\n");
- goto fail_ioremap_regs;
- }
-
- mtrr_add(sgivwfb_mem_phys, sgivwfb_mem_size, MTRR_TYPE_WRCOMB, 1);
-
- sgivwfb_fix.smem_start = sgivwfb_mem_phys;
- sgivwfb_fix.smem_len = sgivwfb_mem_size;
- sgivwfb_fix.ywrapstep = ywrap;
- sgivwfb_fix.ypanstep = ypan;
-
- info->fix = sgivwfb_fix;
-
- switch (flatpanel_id) {
- case FLATPANEL_SGI_1600SW:
- info->var = sgivwfb_var1600sw;
- monitor = "SGI 1600SW flatpanel";
- break;
- default:
- info->var = sgivwfb_var;
- monitor = "CRT";
- }
-
- printk(KERN_INFO "sgivwfb: %s monitor selected\n", monitor);
-
- info->fbops = &sgivwfb_ops;
- info->pseudo_palette = (void *) (par + 1);
- info->flags = FBINFO_DEFAULT;
-
- info->screen_base = ioremap_nocache((unsigned long) sgivwfb_mem_phys, sgivwfb_mem_size);
- if (!info->screen_base) {
- printk(KERN_ERR "sgivwfb: couldn't ioremap screen_base\n");
- goto fail_ioremap_fbmem;
- }
-
- if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
- goto fail_color_map;
-
- if (register_framebuffer(info) < 0) {
- printk(KERN_ERR "sgivwfb: couldn't register framebuffer\n");
- goto fail_register_framebuffer;
- }
-
- platform_set_drvdata(dev, info);
-
- fb_info(info, "SGI DBE frame buffer device, using %ldK of video memory at %#lx\n",
- sgivwfb_mem_size >> 10, sgivwfb_mem_phys);
- return 0;
-
-fail_register_framebuffer:
- fb_dealloc_cmap(&info->cmap);
-fail_color_map:
- iounmap((char *) info->screen_base);
-fail_ioremap_fbmem:
- iounmap(par->regs);
-fail_ioremap_regs:
- release_mem_region(DBE_REG_PHYS, DBE_REG_SIZE);
- framebuffer_release(info);
- return -ENXIO;
-}
-
-static int sgivwfb_remove(struct platform_device *dev)
-{
- struct fb_info *info = platform_get_drvdata(dev);
-
- if (info) {
- struct sgivw_par *par = info->par;
-
- unregister_framebuffer(info);
- dbe_TurnOffDma(par);
- iounmap(par->regs);
- iounmap(info->screen_base);
- release_mem_region(DBE_REG_PHYS, DBE_REG_SIZE);
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
- return 0;
-}
-
-static struct platform_driver sgivwfb_driver = {
- .probe = sgivwfb_probe,
- .remove = sgivwfb_remove,
- .driver = {
- .name = "sgivwfb",
- },
-};
-
-static struct platform_device *sgivwfb_device;
-
-int __init sgivwfb_init(void)
-{
- int ret;
-
-#ifndef MODULE
- char *option = NULL;
-
- if (fb_get_options("sgivwfb", &option))
- return -ENODEV;
- sgivwfb_setup(option);
-#endif
- ret = platform_driver_register(&sgivwfb_driver);
- if (!ret) {
- sgivwfb_device = platform_device_alloc("sgivwfb", 0);
- if (sgivwfb_device) {
- ret = platform_device_add(sgivwfb_device);
- } else
- ret = -ENOMEM;
- if (ret) {
- platform_driver_unregister(&sgivwfb_driver);
- platform_device_put(sgivwfb_device);
- }
- }
- return ret;
-}
-
-module_init(sgivwfb_init);
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-static void __exit sgivwfb_exit(void)
-{
- platform_device_unregister(sgivwfb_device);
- platform_driver_unregister(&sgivwfb_driver);
-}
-
-module_exit(sgivwfb_exit);
-
-#endif /* MODULE */
diff --git a/drivers/video/sis/init.c b/drivers/video/sis/init.c
index 4f26bc28e60b..bd40f5ecd901 100644
--- a/drivers/video/sis/init.c
+++ b/drivers/video/sis/init.c
@@ -651,6 +651,7 @@ SiS_GetModeID_LCD(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDispla
switch(VDisplay) {
case 720:
ModeIndex = ModeIndex_1280x720[Depth];
+ break;
case 768:
if(VGAEngine == SIS_300_VGA) {
ModeIndex = ModeIndex_300_1280x768[Depth];
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index 07c7df9ee77b..65ba9921506e 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -182,6 +182,8 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (var->xres_virtual != var->xres || var->yres_virtual != var->yres)
return -EINVAL;
+ if (var->xres * var->yres * (var->bits_per_pixel >> 3) > info->fix.smem_len)
+ return -EINVAL;
if (var->nonstd)
return -EINVAL;
if (1000000000 / var->pixclock > TGA_PLL_MAX_FREQ)
@@ -190,8 +192,8 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
return -EINVAL;
/* Some of the acceleration routines assume the line width is
- a multiple of 64 bytes. */
- if (var->xres * (par->tga_type == TGA_TYPE_8PLANE ? 1 : 4) % 64)
+ a multiple of 8 bytes. */
+ if (var->xres * (par->tga_type == TGA_TYPE_8PLANE ? 1 : 4) % 8)
return -EINVAL;
return 0;
@@ -262,6 +264,7 @@ tgafb_set_par(struct fb_info *info)
par->yres = info->var.yres;
par->pll_freq = pll_freq = 1000000000 / info->var.pixclock;
par->bits_per_pixel = info->var.bits_per_pixel;
+ info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
tga_type = par->tga_type;
@@ -1136,222 +1139,57 @@ copyarea_line_32bpp(struct fb_info *info, u32 dy, u32 sy,
__raw_writel(TGA_MODE_SBM_24BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG);
}
-/* The general case of forward copy in 8bpp mode. */
+/* The (almost) general case of backward copy in 8bpp mode. */
static inline void
-copyarea_foreward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
- u32 height, u32 width, u32 line_length)
+copyarea_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+ u32 height, u32 width, u32 line_length,
+ const struct fb_copyarea *area)
{
struct tga_par *par = (struct tga_par *) info->par;
- unsigned long i, copied, left;
- unsigned long dpos, spos, dalign, salign, yincr;
- u32 smask_first, dmask_first, dmask_last;
- int pixel_shift, need_prime, need_second;
- unsigned long n64, n32, xincr_first;
+ unsigned i, yincr;
+ int depos, sepos, backward, last_step, step;
+ u32 mask_last;
+ unsigned n32;
void __iomem *tga_regs;
void __iomem *tga_fb;
- yincr = line_length;
- if (dy > sy) {
- dy += height - 1;
- sy += height - 1;
- yincr = -yincr;
- }
-
- /* Compute the offsets and alignments in the frame buffer.
- More than anything else, these control how we do copies. */
- dpos = dy * line_length + dx;
- spos = sy * line_length + sx;
- dalign = dpos & 7;
- salign = spos & 7;
- dpos &= -8;
- spos &= -8;
-
- /* Compute the value for the PIXELSHIFT register. This controls
- both non-co-aligned source and destination and copy direction. */
- if (dalign >= salign)
- pixel_shift = dalign - salign;
- else
- pixel_shift = 8 - (salign - dalign);
-
- /* Figure out if we need an additional priming step for the
- residue register. */
- need_prime = (salign > dalign);
- if (need_prime)
- dpos -= 8;
-
- /* Begin by copying the leading unaligned destination. Copy enough
- to make the next destination address 32-byte aligned. */
- copied = 32 - (dalign + (dpos & 31));
- if (copied == 32)
- copied = 0;
- xincr_first = (copied + 7) & -8;
- smask_first = dmask_first = (1ul << copied) - 1;
- smask_first <<= salign;
- dmask_first <<= dalign + need_prime*8;
- if (need_prime && copied > 24)
- copied -= 8;
- left = width - copied;
-
- /* Care for small copies. */
- if (copied > width) {
- u32 t;
- t = (1ul << width) - 1;
- t <<= dalign + need_prime*8;
- dmask_first &= t;
- left = 0;
- }
-
- /* Attempt to use 64-byte copies. This is only possible if the
- source and destination are co-aligned at 64 bytes. */
- n64 = need_second = 0;
- if ((dpos & 63) == (spos & 63)
- && (height == 1 || line_length % 64 == 0)) {
- /* We may need a 32-byte copy to ensure 64 byte alignment. */
- need_second = (dpos + xincr_first) & 63;
- if ((need_second & 32) != need_second)
- printk(KERN_ERR "tgafb: need_second wrong\n");
- if (left >= need_second + 64) {
- left -= need_second;
- n64 = left / 64;
- left %= 64;
- } else
- need_second = 0;
- }
-
- /* Copy trailing full 32-byte sections. This will be the main
- loop if the 64 byte loop can't be used. */
- n32 = left / 32;
- left %= 32;
-
- /* Copy the trailing unaligned destination. */
- dmask_last = (1ul << left) - 1;
-
- tga_regs = par->tga_regs_base;
- tga_fb = par->tga_fb_base;
-
- /* Set up the MODE and PIXELSHIFT registers. */
- __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_COPY, tga_regs+TGA_MODE_REG);
- __raw_writel(pixel_shift, tga_regs+TGA_PIXELSHIFT_REG);
- wmb();
-
- for (i = 0; i < height; ++i) {
- unsigned long j;
- void __iomem *sfb;
- void __iomem *dfb;
-
- sfb = tga_fb + spos;
- dfb = tga_fb + dpos;
- if (dmask_first) {
- __raw_writel(smask_first, sfb);
- wmb();
- __raw_writel(dmask_first, dfb);
- wmb();
- sfb += xincr_first;
- dfb += xincr_first;
- }
-
- if (need_second) {
- __raw_writel(0xffffffff, sfb);
- wmb();
- __raw_writel(0xffffffff, dfb);
- wmb();
- sfb += 32;
- dfb += 32;
- }
-
- if (n64 && (((unsigned long)sfb | (unsigned long)dfb) & 63))
- printk(KERN_ERR
- "tgafb: misaligned copy64 (s:%p, d:%p)\n",
- sfb, dfb);
-
- for (j = 0; j < n64; ++j) {
- __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC);
- wmb();
- __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST);
- wmb();
- sfb += 64;
- dfb += 64;
- }
-
- for (j = 0; j < n32; ++j) {
- __raw_writel(0xffffffff, sfb);
- wmb();
- __raw_writel(0xffffffff, dfb);
- wmb();
- sfb += 32;
- dfb += 32;
- }
-
- if (dmask_last) {
- __raw_writel(0xffffffff, sfb);
- wmb();
- __raw_writel(dmask_last, dfb);
- wmb();
- }
-
- spos += yincr;
- dpos += yincr;
+ /* Do acceleration only if we are aligned on 8 pixels */
+ if ((dx | sx | width) & 7) {
+ cfb_copyarea(info, area);
+ return;
}
- /* Reset the MODE register to normal. */
- __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG);
-}
-
-/* The (almost) general case of backward copy in 8bpp mode. */
-static inline void
-copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
- u32 height, u32 width, u32 line_length,
- const struct fb_copyarea *area)
-{
- struct tga_par *par = (struct tga_par *) info->par;
- unsigned long i, left, yincr;
- unsigned long depos, sepos, dealign, sealign;
- u32 mask_first, mask_last;
- unsigned long n32;
- void __iomem *tga_regs;
- void __iomem *tga_fb;
-
yincr = line_length;
if (dy > sy) {
dy += height - 1;
sy += height - 1;
yincr = -yincr;
}
+ backward = dy == sy && dx > sx && dx < sx + width;
/* Compute the offsets and alignments in the frame buffer.
More than anything else, these control how we do copies. */
- depos = dy * line_length + dx + width;
- sepos = sy * line_length + sx + width;
- dealign = depos & 7;
- sealign = sepos & 7;
-
- /* ??? The documentation appears to be incorrect (or very
- misleading) wrt how pixel shifting works in backward copy
- mode, i.e. when PIXELSHIFT is negative. I give up for now.
- Do handle the common case of co-aligned backward copies,
- but frob everything else back on generic code. */
- if (dealign != sealign) {
- cfb_copyarea(info, area);
- return;
- }
-
- /* We begin the copy with the trailing pixels of the
- unaligned destination. */
- mask_first = (1ul << dealign) - 1;
- left = width - dealign;
-
- /* Care for small copies. */
- if (dealign > width) {
- mask_first ^= (1ul << (dealign - width)) - 1;
- left = 0;
- }
+ depos = dy * line_length + dx;
+ sepos = sy * line_length + sx;
+ if (backward)
+ depos += width, sepos += width;
/* Next copy full words at a time. */
- n32 = left / 32;
- left %= 32;
+ n32 = width / 32;
+ last_step = width % 32;
/* Finally copy the unaligned head of the span. */
- mask_last = -1 << (32 - left);
+ mask_last = (1ul << last_step) - 1;
+
+ if (!backward) {
+ step = 32;
+ last_step = 32;
+ } else {
+ step = -32;
+ last_step = -last_step;
+ sepos -= 32;
+ depos -= 32;
+ }
tga_regs = par->tga_regs_base;
tga_fb = par->tga_fb_base;
@@ -1368,25 +1206,33 @@ copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
sfb = tga_fb + sepos;
dfb = tga_fb + depos;
- if (mask_first) {
- __raw_writel(mask_first, sfb);
- wmb();
- __raw_writel(mask_first, dfb);
- wmb();
- }
- for (j = 0; j < n32; ++j) {
- sfb -= 32;
- dfb -= 32;
+ for (j = 0; j < n32; j++) {
+ if (j < 2 && j + 1 < n32 && !backward &&
+ !(((unsigned long)sfb | (unsigned long)dfb) & 63)) {
+ do {
+ __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC);
+ wmb();
+ __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST);
+ wmb();
+ sfb += 64;
+ dfb += 64;
+ j += 2;
+ } while (j + 1 < n32);
+ j--;
+ continue;
+ }
__raw_writel(0xffffffff, sfb);
wmb();
__raw_writel(0xffffffff, dfb);
wmb();
+ sfb += step;
+ dfb += step;
}
if (mask_last) {
- sfb -= 32;
- dfb -= 32;
+ sfb += last_step - step;
+ dfb += last_step - step;
__raw_writel(mask_last, sfb);
wmb();
__raw_writel(mask_last, dfb);
@@ -1434,7 +1280,7 @@ tgafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
bpp = info->var.bits_per_pixel;
/* Detect copies of the entire line. */
- if (width * (bpp >> 3) == line_length) {
+ if (!(line_length & 63) && width * (bpp >> 3) == line_length) {
if (bpp == 8)
copyarea_line_8bpp(info, dy, sy, height, width);
else
@@ -1447,14 +1293,9 @@ tgafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
else if (bpp == 32)
cfb_copyarea(info, area);
- /* Detect overlapping source and destination that requires
- a backward copy. */
- else if (dy == sy && dx > sx && dx < sx + width)
- copyarea_backward_8bpp(info, dx, dy, sx, sy, height,
- width, line_length, area);
else
- copyarea_foreward_8bpp(info, dx, dy, sx, sy, height,
- width, line_length);
+ copyarea_8bpp(info, dx, dy, sx, sy, height,
+ width, line_length, area);
}
@@ -1470,6 +1311,7 @@ tgafb_init_fix(struct fb_info *info)
int tga_bus_tc = TGA_BUS_TC(par->dev);
u8 tga_type = par->tga_type;
const char *tga_type_name = NULL;
+ unsigned memory_size;
switch (tga_type) {
case TGA_TYPE_8PLANE:
@@ -1477,22 +1319,27 @@ tgafb_init_fix(struct fb_info *info)
tga_type_name = "Digital ZLXp-E1";
if (tga_bus_tc)
tga_type_name = "Digital ZLX-E1";
+ memory_size = 2097152;
break;
case TGA_TYPE_24PLANE:
if (tga_bus_pci)
tga_type_name = "Digital ZLXp-E2";
if (tga_bus_tc)
tga_type_name = "Digital ZLX-E2";
+ memory_size = 8388608;
break;
case TGA_TYPE_24PLUSZ:
if (tga_bus_pci)
tga_type_name = "Digital ZLXp-E3";
if (tga_bus_tc)
tga_type_name = "Digital ZLX-E3";
+ memory_size = 16777216;
break;
}
- if (!tga_type_name)
+ if (!tga_type_name) {
tga_type_name = "Unknown";
+ memory_size = 16777216;
+ }
strlcpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
@@ -1502,9 +1349,8 @@ tgafb_init_fix(struct fb_info *info)
? FB_VISUAL_PSEUDOCOLOR
: FB_VISUAL_DIRECTCOLOR);
- info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
info->fix.smem_start = (size_t) par->tga_fb_base;
- info->fix.smem_len = info->fix.line_length * par->yres;
+ info->fix.smem_len = memory_size;
info->fix.mmio_start = (size_t) par->tga_regs_base;
info->fix.mmio_len = 512;
@@ -1628,6 +1474,9 @@ static int tgafb_register(struct device *dev)
modedb_tga = &modedb_tc;
modedbsize_tga = 1;
}
+
+ tgafb_init_fix(info);
+
ret = fb_find_mode(&info->var, info,
mode_option ? mode_option : mode_option_tga,
modedb_tga, modedbsize_tga, NULL,
@@ -1645,7 +1494,6 @@ static int tgafb_register(struct device *dev)
}
tgafb_set_par(info);
- tgafb_init_fix(info);
if (register_framebuffer(info) < 0) {
printk(KERN_ERR "tgafb: Could not register framebuffer\n");
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 256fba7f4641..509d452e8f91 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -190,7 +190,7 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
uvfb_tasks[seq] = task;
mutex_unlock(&uvfb_lock);
- err = cn_netlink_send(m, 0, GFP_KERNEL);
+ err = cn_netlink_send(m, 0, 0, GFP_KERNEL);
if (err == -ESRCH) {
/*
* Try to start the userspace helper if sending
@@ -204,7 +204,7 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
"helper is installed and executable\n");
} else {
v86d_started = 1;
- err = cn_netlink_send(m, 0, gfp_any());
+ err = cn_netlink_send(m, 0, 0, gfp_any());
if (err == -ENOBUFS)
err = 0;
}
@@ -1474,12 +1474,7 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
* used video mode, i.e. the minimum amount of
* memory we need.
*/
- if (mode != NULL) {
- size_vmode = info->var.yres * mode->bytes_per_scan_line;
- } else {
- size_vmode = info->var.yres * info->var.xres *
- ((info->var.bits_per_pixel + 7) >> 3);
- }
+ size_vmode = info->var.yres * mode->bytes_per_scan_line;
/*
* size_total -- all video memory we have. Used for mtrr
@@ -1812,11 +1807,9 @@ static int uvesafb_remove(struct platform_device *dev)
fb_destroy_modedb(info->monspecs.modedb);
fb_dealloc_cmap(&info->cmap);
- if (par) {
- kfree(par->vbe_modes);
- kfree(par->vbe_state_orig);
- kfree(par->vbe_state_saved);
- }
+ kfree(par->vbe_modes);
+ kfree(par->vbe_state_orig);
+ kfree(par->vbe_state_saved);
framebuffer_release(info);
}
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index 1c7da3b098d6..6170e7f58640 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -179,7 +179,6 @@ static void vesafb_destroy(struct fb_info *info)
if (info->screen_base)
iounmap(info->screen_base);
release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
- framebuffer_release(info);
}
static struct fb_ops vesafb_ops = {
@@ -297,6 +296,7 @@ static int vesafb_probe(struct platform_device *dev)
release_mem_region(vesafb_fix.smem_start, size_total);
return -ENOMEM;
}
+ platform_set_drvdata(dev, info);
info->pseudo_palette = info->par;
info->par = NULL;
@@ -499,12 +499,23 @@ err:
return err;
}
+static int vesafb_remove(struct platform_device *pdev)
+{
+ struct fb_info *info = platform_get_drvdata(pdev);
+
+ unregister_framebuffer(info);
+ framebuffer_release(info);
+
+ return 0;
+}
+
static struct platform_driver vesafb_driver = {
.driver = {
.name = "vesa-framebuffer",
.owner = THIS_MODULE,
},
.probe = vesafb_probe,
+ .remove = vesafb_remove,
};
module_platform_driver(vesafb_driver);
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 6ff1a91e9dfd..553cff2f3f4c 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -33,7 +33,6 @@
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/io.h>
-#include <linux/xilinxfb.h>
#include <linux/slab.h>
#ifdef CONFIG_PPC_DCR
@@ -84,6 +83,20 @@
#define PALETTE_ENTRIES_NO 16 /* passed to fb_alloc_cmap() */
+/* ML300/403 reference design framebuffer driver platform data struct */
+struct xilinxfb_platform_data {
+ u32 rotate_screen; /* Flag to rotate display 180 degrees */
+ u32 screen_height_mm; /* Physical dimensions of screen in mm */
+ u32 screen_width_mm;
+ u32 xres, yres; /* resolution of screen in pixels */
+ u32 xvirt, yvirt; /* resolution of memory buffer */
+
+ /* Physical address of framebuffer memory; If non-zero, driver
+ * will use provided memory address instead of allocating one from
+ * the consistent pool. */
+ u32 fb_phys;
+};
+
/*
* Default xilinxfb configuration
*/
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 34bdabaecbd6..25ebe8eecdb7 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -108,8 +108,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
/* We should always be able to add one buffer to an empty queue. */
- if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
- BUG();
+ virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
virtqueue_kick(vq);
/* When host has read buffer, this completes via balloon_ack */
@@ -258,8 +257,7 @@ static void stats_handle_request(struct virtio_balloon *vb)
if (!virtqueue_get_buf(vq, &len))
return;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
- if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
- BUG();
+ virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
virtqueue_kick(vq);
}
@@ -310,6 +308,12 @@ static int balloon(void *_vballoon)
else if (diff < 0)
leak_balloon(vb, -diff);
update_balloon_size(vb);
+
+ /*
+ * For large balloon changes, we could spend a lot of time
+ * and always have work to do. Be nice if preempt disabled.
+ */
+ cond_resched();
}
return 0;
}
@@ -338,7 +342,7 @@ static int init_vqs(struct virtio_balloon *vb)
/*
* Prime this virtqueue with one buffer so the hypervisor can
- * use it to signal us later.
+ * use it to signal us later (it can't be broken yet!).
*/
sg_init_one(&sg, vb->stats, sizeof vb->stats);
if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index a416f9b2a7f6..101db3faf5d4 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -333,10 +333,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
for (i = 0; i < nvectors; ++i)
vp_dev->msix_entries[i].entry = i;
- /* pci_enable_msix returns positive if we can't get this many. */
- err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors);
- if (err > 0)
- err = -ENOSPC;
+ err = pci_enable_msix_exact(vp_dev->pci_dev,
+ vp_dev->msix_entries, nvectors);
if (err)
goto error;
vp_dev->msix_enabled = 1;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 28b5338fff71..1e443629f76d 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
+#include <linux/kmemleak.h>
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
@@ -203,6 +204,11 @@ static inline int virtqueue_add(struct virtqueue *_vq,
BUG_ON(data == NULL);
+ if (unlikely(vq->broken)) {
+ END_USE(vq);
+ return -EIO;
+ }
+
#ifdef DEBUG
{
ktime_t now = ktime_get();
@@ -309,7 +315,7 @@ add_head:
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_sgs(struct virtqueue *_vq,
struct scatterlist *sgs[],
@@ -347,7 +353,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_outbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num,
@@ -369,7 +375,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_inbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num,
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 1b5d48c578e1..bfb2d3f06738 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -869,14 +869,13 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
spin_lock(&image->lock);
- /* The following code handles VME address alignment problem
- * in order to assure the maximal data width cycle.
- * We cannot use memcpy_xxx directly here because it
- * may cut data transfer in 8-bits cycles, thus making
- * D16 cycle impossible.
- * From the other hand, the bridge itself assures that
- * maximal configured data cycle is used and splits it
- * automatically for non-aligned addresses.
+ /* The following code handles VME address alignment. We cannot use
+ * memcpy_xxx here because it may cut data transfers in to 8-bit
+ * cycles when D16 or D32 cycles are required on the VME bus.
+ * On the other hand, the bridge itself assures that the maximum data
+ * cycle configured for the transfer is used and splits it
+ * automatically for non-aligned addresses, so we don't want the
+ * overhead of needlessly forcing small transfers for the entire cycle.
*/
if ((uintptr_t)addr & 0x1) {
*(u8 *)buf = ioread8(addr);
@@ -896,9 +895,9 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
}
count32 = (count - done) & ~0x3;
- if (count32 > 0) {
- memcpy_fromio(buf + done, addr + done, (unsigned int)count);
- done += count32;
+ while (done < count32) {
+ *(u32 *)(buf + done) = ioread32(addr + done);
+ done += 4;
}
if ((count - done) & 0x2) {
@@ -930,7 +929,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
spin_lock(&image->lock);
/* Here we apply for the same strategy we do in master_read
- * function in order to assure D16 cycle when required.
+ * function in order to assure the correct cycles.
*/
if ((uintptr_t)addr & 0x1) {
iowrite8(*(u8 *)buf, addr);
@@ -950,9 +949,9 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
}
count32 = (count - done) & ~0x3;
- if (count32 > 0) {
- memcpy_toio(addr + done, buf + done, count32);
- done += count32;
+ while (done < count32) {
+ iowrite32(*(u32 *)(buf + done), addr + done);
+ done += 4;
}
if ((count - done) & 0x2) {
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 9911cd5fddb5..06990c6a1a69 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -1276,8 +1276,8 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
spin_lock(&image->lock);
/* The following code handles VME address alignment. We cannot use
- * memcpy_xxx directly here because it may cut small data transfers in
- * to 8-bit cycles, thus making D16 cycle impossible.
+ * memcpy_xxx here because it may cut data transfers in to 8-bit
+ * cycles when D16 or D32 cycles are required on the VME bus.
* On the other hand, the bridge itself assures that the maximum data
* cycle configured for the transfer is used and splits it
* automatically for non-aligned addresses, so we don't want the
@@ -1301,9 +1301,9 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
}
count32 = (count - done) & ~0x3;
- if (count32 > 0) {
- memcpy_fromio(buf + done, addr + done, count32);
- done += count32;
+ while (done < count32) {
+ *(u32 *)(buf + done) = ioread32(addr + done);
+ done += 4;
}
if ((count - done) & 0x2) {
@@ -1363,7 +1363,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
spin_lock(&image->lock);
/* Here we apply for the same strategy we do in master_read
- * function in order to assure D16 cycle when required.
+ * function in order to assure the correct cycles.
*/
if ((uintptr_t)addr & 0x1) {
iowrite8(*(u8 *)buf, addr);
@@ -1383,9 +1383,9 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
}
count32 = (count - done) & ~0x3;
- if (count32 > 0) {
- memcpy_toio(addr + done, buf + done, count32);
- done += count32;
+ while (done < count32) {
+ iowrite32(*(u32 *)(buf + done), addr + done);
+ done += 4;
}
if ((count - done) & 0x2) {
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index efc7f075fcbe..1708b2300c7a 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -36,13 +36,12 @@ config W1_MASTER_DS2482
config W1_MASTER_MXC
tristate "Freescale MXC 1-wire busmaster"
- depends on W1 && ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
help
Say Y here to enable MXC 1-wire host
config W1_MASTER_DS1WM
tristate "Maxim DS1WM 1-wire busmaster"
- depends on W1
help
Say Y here to enable the DS1WM 1-wire driver, such as that
in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 4f7e1d770f81..7404ad3062b7 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1,5 +1,5 @@
/*
- * dscore.c
+ * ds2490.c USB to one wire bridge
*
* Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
@@ -28,6 +28,10 @@
#include "../w1_int.h"
#include "../w1.h"
+/* USB Standard */
+/* USB Control request vendor type */
+#define VENDOR 0x40
+
/* COMMAND TYPE CODES */
#define CONTROL_CMD 0x00
#define COMM_CMD 0x01
@@ -107,6 +111,8 @@
#define ST_HALT 0x10 /* DS2490 is currently halted */
#define ST_IDLE 0x20 /* DS2490 is currently idle */
#define ST_EPOF 0x80
+/* Status transfer size, 16 bytes status, 16 byte result flags */
+#define ST_SIZE 0x20
/* Result Register flags */
#define RR_DETECT 0xA5 /* New device detected */
@@ -198,7 +204,7 @@ static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index)
int err;
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
- CONTROL_CMD, 0x40, value, index, NULL, 0, 1000);
+ CONTROL_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
printk(KERN_ERR "Failed to send command control message %x.%x: err=%d.\n",
value, index, err);
@@ -213,7 +219,7 @@ static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index)
int err;
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
- MODE_CMD, 0x40, value, index, NULL, 0, 1000);
+ MODE_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
printk(KERN_ERR "Failed to send mode control message %x.%x: err=%d.\n",
value, index, err);
@@ -228,7 +234,7 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
int err;
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
- COMM_CMD, 0x40, value, index, NULL, 0, 1000);
+ COMM_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
printk(KERN_ERR "Failed to send control message %x.%x: err=%d.\n",
value, index, err);
@@ -246,7 +252,8 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
memset(st, 0, sizeof(*st));
count = 0;
- err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_STATUS]), buf, size, &count, 100);
+ err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev,
+ dev->ep[EP_STATUS]), buf, size, &count, 100);
if (err < 0) {
printk(KERN_ERR "Failed to read 1-wire data from 0x%x: err=%d.\n", dev->ep[EP_STATUS], err);
return err;
@@ -353,7 +360,7 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]),
buf, size, &count, 1000);
if (err < 0) {
- u8 buf[0x20];
+ u8 buf[ST_SIZE];
int count;
printk(KERN_INFO "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
@@ -398,7 +405,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
{
struct ds_status st;
int count = 0, err = 0;
- u8 buf[0x20];
+ u8 buf[ST_SIZE];
do {
err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0);
@@ -450,10 +457,11 @@ int ds_detect(struct ds_device *dev, struct ds_status *st)
static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
{
- u8 buf[0x20];
+ u8 buf[ST_SIZE];
int err, count = 0;
do {
+ st->status = 0;
err = ds_recv_status_nodump(dev, st, buf, sizeof(buf));
#if 0
if (err >= 0) {
@@ -464,7 +472,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
printk("\n");
}
#endif
- } while (!(buf[0x08] & ST_IDLE) && !(err < 0) && ++count < 100);
+ } while (!(st->status & ST_IDLE) && !(err < 0) && ++count < 100);
if (err >= 16 && st->status & ST_EPOF) {
printk(KERN_INFO "Resetting device after ST_EPOF.\n");
@@ -690,37 +698,106 @@ static int ds_write_block(struct ds_device *dev, u8 *buf, int len)
return !(err == len);
}
-#if 0
-
-static int ds_search(struct ds_device *dev, u64 init, u64 *buf, u8 id_number, int conditional_search)
+static void ds9490r_search(void *data, struct w1_master *master,
+ u8 search_type, w1_slave_found_callback callback)
{
+ /* When starting with an existing id, the first id returned will
+ * be that device (if it is still on the bus most likely).
+ *
+ * If the number of devices found is less than or equal to the
+ * search_limit, that number of IDs will be returned. If there are
+ * more, search_limit IDs will be returned followed by a non-zero
+ * discrepency value.
+ */
+ struct ds_device *dev = data;
int err;
u16 value, index;
struct ds_status st;
+ u8 st_buf[ST_SIZE];
+ int search_limit;
+ int found = 0;
+ int i;
- memset(buf, 0, sizeof(buf));
+ /* DS18b20 spec, 13.16 ms per device, 75 per second, sleep for
+ * discovering 8 devices (1 bulk transfer and 1/2 FIFO size) at a time.
+ */
+ const unsigned long jtime = msecs_to_jiffies(1000*8/75);
+ /* FIFO 128 bytes, bulk packet size 64, read a multiple of the
+ * packet size.
+ */
+ u64 buf[2*64/8];
- err = ds_send_data(ds_dev, (unsigned char *)&init, 8);
- if (err)
- return err;
+ mutex_lock(&master->bus_mutex);
- ds_wait_status(ds_dev, &st);
+ /* address to start searching at */
+ if (ds_send_data(dev, (u8 *)&master->search_id, 8) < 0)
+ goto search_out;
+ master->search_id = 0;
- value = COMM_SEARCH_ACCESS | COMM_IM | COMM_SM | COMM_F | COMM_RTS;
- index = (conditional_search ? 0xEC : 0xF0) | (id_number << 8);
- err = ds_send_control(ds_dev, value, index);
- if (err)
- return err;
+ value = COMM_SEARCH_ACCESS | COMM_IM | COMM_RST | COMM_SM | COMM_F |
+ COMM_RTS;
+ search_limit = master->max_slave_count;
+ if (search_limit > 255)
+ search_limit = 0;
+ index = search_type | (search_limit << 8);
+ if (ds_send_control(dev, value, index) < 0)
+ goto search_out;
- ds_wait_status(ds_dev, &st);
+ do {
+ schedule_timeout(jtime);
- err = ds_recv_data(ds_dev, (unsigned char *)buf, 8*id_number);
- if (err < 0)
- return err;
+ if (ds_recv_status_nodump(dev, &st, st_buf, sizeof(st_buf)) <
+ sizeof(st)) {
+ break;
+ }
- return err/8;
+ if (st.data_in_buffer_status) {
+ /* Bulk in can receive partial ids, but when it does
+ * they fail crc and will be discarded anyway.
+ * That has only been seen when status in buffer
+ * is 0 and bulk is read anyway, so don't read
+ * bulk without first checking if status says there
+ * is data to read.
+ */
+ err = ds_recv_data(dev, (u8 *)buf, sizeof(buf));
+ if (err < 0)
+ break;
+ for (i = 0; i < err/8; ++i) {
+ ++found;
+ if (found <= search_limit)
+ callback(master, buf[i]);
+ /* can't know if there will be a discrepancy
+ * value after until the next id */
+ if (found == search_limit)
+ master->search_id = buf[i];
+ }
+ }
+
+ if (test_bit(W1_ABORT_SEARCH, &master->flags))
+ break;
+ } while (!(st.status & (ST_IDLE | ST_HALT)));
+
+ /* only continue the search if some weren't found */
+ if (found <= search_limit) {
+ master->search_id = 0;
+ } else if (!test_bit(W1_WARN_MAX_COUNT, &master->flags)) {
+ /* Only max_slave_count will be scanned in a search,
+ * but it will start where it left off next search
+ * until all ids are identified and then it will start
+ * over. A continued search will report the previous
+ * last id as the first id (provided it is still on the
+ * bus).
+ */
+ dev_info(&dev->udev->dev, "%s: max_slave_count %d reached, "
+ "will continue next search.\n", __func__,
+ master->max_slave_count);
+ set_bit(W1_WARN_MAX_COUNT, &master->flags);
+ }
+search_out:
+ mutex_unlock(&master->bus_mutex);
}
+#if 0
static int ds_match_access(struct ds_device *dev, u64 init)
{
int err;
@@ -894,6 +971,7 @@ static int ds_w1_init(struct ds_device *dev)
dev->master.write_block = &ds9490r_write_block;
dev->master.reset_bus = &ds9490r_reset;
dev->master.set_pullup = &ds9490r_set_pullup;
+ dev->master.search = &ds9490r_search;
return w1_add_master_device(&dev->master);
}
@@ -910,15 +988,13 @@ static int ds_probe(struct usb_interface *intf,
struct usb_endpoint_descriptor *endpoint;
struct usb_host_interface *iface_desc;
struct ds_device *dev;
- int i, err;
+ int i, err, alt;
- dev = kmalloc(sizeof(struct ds_device), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct ds_device), GFP_KERNEL);
if (!dev) {
printk(KERN_INFO "Failed to allocate new DS9490R structure.\n");
return -ENOMEM;
}
- dev->spu_sleep = 0;
- dev->spu_bit = 0;
dev->udev = usb_get_dev(udev);
if (!dev->udev) {
err = -ENOMEM;
@@ -928,20 +1004,25 @@ static int ds_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
- err = usb_set_interface(dev->udev, intf->altsetting[0].desc.bInterfaceNumber, 3);
+ err = usb_reset_configuration(dev->udev);
if (err) {
- printk(KERN_ERR "Failed to set alternative setting 3 for %d interface: err=%d.\n",
- intf->altsetting[0].desc.bInterfaceNumber, err);
+ dev_err(&dev->udev->dev,
+ "Failed to reset configuration: err=%d.\n", err);
goto err_out_clear;
}
- err = usb_reset_configuration(dev->udev);
+ /* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
+ alt = 3;
+ err = usb_set_interface(dev->udev,
+ intf->altsetting[alt].desc.bInterfaceNumber, alt);
if (err) {
- printk(KERN_ERR "Failed to reset configuration: err=%d.\n", err);
+ dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
+ "for %d interface: err=%d.\n", alt,
+ intf->altsetting[alt].desc.bInterfaceNumber, err);
goto err_out_clear;
}
- iface_desc = &intf->altsetting[0];
+ iface_desc = &intf->altsetting[alt];
if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
printk(KERN_INFO "Num endpoints=%d. It is not DS9490R.\n", iface_desc->desc.bNumEndpoints);
err = -EINVAL;
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 1e5d94c5afc9..67b067a3e2ab 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -10,24 +10,16 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
*/
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include "../w1.h"
#include "../w1_int.h"
-#include "../w1_log.h"
/* According to the mx27 Datasheet the reset procedure should take up to about
* 1350us. We set the timeout to 500*100us = 50ms for sure */
@@ -36,13 +28,13 @@
/*
* MXC W1 Register offsets
*/
-#define MXC_W1_CONTROL 0x00
-#define MXC_W1_TIME_DIVIDER 0x02
-#define MXC_W1_RESET 0x04
-#define MXC_W1_COMMAND 0x06
-#define MXC_W1_TXRX 0x08
-#define MXC_W1_INTERRUPT 0x0A
-#define MXC_W1_INTERRUPT_EN 0x0C
+#define MXC_W1_CONTROL 0x00
+# define MXC_W1_CONTROL_RDST BIT(3)
+# define MXC_W1_CONTROL_WR(x) BIT(5 - (x))
+# define MXC_W1_CONTROL_PST BIT(6)
+# define MXC_W1_CONTROL_RPP BIT(7)
+#define MXC_W1_TIME_DIVIDER 0x02
+#define MXC_W1_RESET 0x04
struct mxc_w1_device {
void __iomem *regs;
@@ -61,12 +53,12 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
unsigned int timeout_cnt = 0;
struct mxc_w1_device *dev = data;
- __raw_writeb(0x80, (dev->regs + MXC_W1_CONTROL));
+ writeb(MXC_W1_CONTROL_RPP, (dev->regs + MXC_W1_CONTROL));
while (1) {
- reg_val = __raw_readb(dev->regs + MXC_W1_CONTROL);
+ reg_val = readb(dev->regs + MXC_W1_CONTROL);
- if (((reg_val >> 7) & 0x1) == 0 ||
+ if (!(reg_val & MXC_W1_CONTROL_RPP) ||
timeout_cnt > MXC_W1_RESET_TIMEOUT)
break;
else
@@ -74,7 +66,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
udelay(100);
}
- return (reg_val >> 7) & 0x1;
+ return !!(reg_val & MXC_W1_CONTROL_PST);
}
/*
@@ -90,16 +82,16 @@ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
* datasheet.
*/
- __raw_writeb((1 << (5 - bit)), ctrl_addr);
+ writeb(MXC_W1_CONTROL_WR(bit), ctrl_addr);
while (timeout_cnt--) {
- if (!((__raw_readb(ctrl_addr) >> (5 - bit)) & 0x1))
+ if (!(readb(ctrl_addr) & MXC_W1_CONTROL_WR(bit)))
break;
udelay(1);
}
- return ((__raw_readb(ctrl_addr)) >> 3) & 0x1;
+ return !!(readb(ctrl_addr) & MXC_W1_CONTROL_RDST);
}
static int mxc_w1_probe(struct platform_device *pdev)
@@ -139,7 +131,7 @@ static int mxc_w1_probe(struct platform_device *pdev)
if (err)
return err;
- __raw_writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER);
+ writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER);
mdev->bus_master.data = mdev;
mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus;
@@ -177,6 +169,7 @@ MODULE_DEVICE_TABLE(of, mxc_w1_dt_ids);
static struct platform_driver mxc_w1_driver = {
.driver = {
.name = "mxc_w1",
+ .owner = THIS_MODULE,
.of_match_table = mxc_w1_dt_ids,
},
.probe = mxc_w1_probe,
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 9709b8b484ba..1d111e56c8c8 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -89,11 +89,22 @@ static int w1_gpio_probe_dt(struct platform_device *pdev)
pdata->is_open_drain = 1;
gpio = of_get_gpio(np, 0);
- if (gpio < 0)
+ if (gpio < 0) {
+ if (gpio != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to parse gpio property for data pin (%d)\n",
+ gpio);
+
return gpio;
+ }
pdata->pin = gpio;
- pdata->ext_pullup_enable_pin = of_get_gpio(np, 1);
+ gpio = of_get_gpio(np, 1);
+ if (gpio == -EPROBE_DEFER)
+ return gpio;
+ /* ignore other errors as the pullup gpio is optional */
+ pdata->ext_pullup_enable_pin = gpio;
+
pdev->dev.platform_data = pdata;
return 0;
@@ -107,10 +118,8 @@ static int w1_gpio_probe(struct platform_device *pdev)
if (of_have_populated_dt()) {
err = w1_gpio_probe_dt(pdev);
- if (err < 0) {
- dev_err(&pdev->dev, "Failed to parse DT\n");
+ if (err < 0)
return err;
- }
}
pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 5e6a3c9e510b..1cdce80b6abf 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -72,7 +72,6 @@ config W1_SLAVE_DS2433_CRC
config W1_SLAVE_DS2760
tristate "Dallas 2760 battery monitor chip (HP iPAQ & others)"
- depends on W1
help
If you enable this you will have the DS2760 battery monitor
chip support.
@@ -85,7 +84,6 @@ config W1_SLAVE_DS2760
config W1_SLAVE_DS2780
tristate "Dallas 2780 battery monitor chip"
- depends on W1
help
If you enable this you will have the DS2780 battery monitor
chip support.
@@ -98,7 +96,6 @@ config W1_SLAVE_DS2780
config W1_SLAVE_DS2781
tristate "Dallas 2781 battery monitor chip"
- depends on W1
help
If you enable this you will have the DS2781 battery monitor
chip support.
@@ -111,7 +108,6 @@ config W1_SLAVE_DS2781
config W1_SLAVE_DS28E04
tristate "4096-Bit Addressable 1-Wire EEPROM with PIO (DS28E04-100)"
- depends on W1
select CRC16
help
If you enable this you will have the DS28E04-100
@@ -124,7 +120,6 @@ config W1_SLAVE_DS28E04
config W1_SLAVE_BQ27000
tristate "BQ27000 slave support"
- depends on W1
help
Say Y here if you want to use a hdq
bq27000 slave support.
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 8b5ff33f72cf..1f11a20a8ab9 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include "../w1.h"
@@ -58,6 +59,19 @@ MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
+static int w1_therm_add_slave(struct w1_slave *sl)
+{
+ sl->family_data = kzalloc(9, GFP_KERNEL);
+ if (!sl->family_data)
+ return -ENOMEM;
+ return 0;
+}
+
+static void w1_therm_remove_slave(struct w1_slave *sl)
+{
+ kfree(sl->family_data);
+ sl->family_data = NULL;
+}
static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf);
@@ -71,6 +85,8 @@ static struct attribute *w1_therm_attrs[] = {
ATTRIBUTE_GROUPS(w1_therm);
static struct w1_family_ops w1_therm_fops = {
+ .add_slave = w1_therm_add_slave,
+ .remove_slave = w1_therm_remove_slave,
.groups = w1_therm_groups,
};
@@ -253,12 +269,13 @@ static ssize_t w1_slave_show(struct device *device,
c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
crc, (verdict) ? "YES" : "NO");
if (verdict)
- memcpy(sl->rom, rom, sizeof(sl->rom));
+ memcpy(sl->family_data, rom, sizeof(rom));
else
dev_warn(device, "Read failed CRC check\n");
for (i = 0; i < 9; ++i)
- c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", sl->rom[i]);
+ c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ",
+ ((u8 *)sl->family_data)[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
w1_convert_temp(rom, sl->family->fid));
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 66efa96c4603..b96f61b15dc6 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -46,18 +46,29 @@ MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
static int w1_timeout = 10;
-int w1_max_slave_count = 10;
+int w1_max_slave_count = 64;
int w1_max_slave_ttl = 10;
module_param_named(timeout, w1_timeout, int, 0);
+MODULE_PARM_DESC(timeout, "time in seconds between automatic slave searches");
+/* A search stops when w1_max_slave_count devices have been found in that
+ * search. The next search will start over and detect the same set of devices
+ * on a static 1-wire bus. Memory is not allocated based on this number, just
+ * on the number of devices known to the kernel. Having a high number does not
+ * consume additional resources. As a special case, if there is only one
+ * device on the network and w1_max_slave_count is set to 1, the device id can
+ * be read directly skipping the normal slower search process.
+ */
module_param_named(max_slave_count, w1_max_slave_count, int, 0);
+MODULE_PARM_DESC(max_slave_count,
+ "maximum number of slaves detected in a search");
module_param_named(slave_ttl, w1_max_slave_ttl, int, 0);
+MODULE_PARM_DESC(slave_ttl,
+ "Number of searches not seeing a slave before it will be removed");
DEFINE_MUTEX(w1_mlock);
LIST_HEAD(w1_masters);
-static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn);
-
static int w1_master_match(struct device *dev, struct device_driver *drv)
{
return 1;
@@ -81,19 +92,10 @@ static void w1_slave_release(struct device *dev)
{
struct w1_slave *sl = dev_to_w1_slave(dev);
- dev_dbg(dev, "%s: Releasing %s.\n", __func__, sl->name);
-
- while (atomic_read(&sl->refcnt)) {
- dev_dbg(dev, "Waiting for %s to become free: refcnt=%d.\n",
- sl->name, atomic_read(&sl->refcnt));
- if (msleep_interruptible(1000))
- flush_signals(current);
- }
+ dev_dbg(dev, "%s: Releasing %s [%p]\n", __func__, sl->name, sl);
w1_family_put(sl->family);
sl->master->slave_count--;
-
- complete(&sl->released);
}
static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -243,7 +245,9 @@ static ssize_t w1_master_attribute_store_search(struct device * dev,
mutex_lock(&md->mutex);
md->search_count = tmp;
mutex_unlock(&md->mutex);
- wake_up_process(md->thread);
+ /* Only wake if it is going to be searching. */
+ if (tmp)
+ wake_up_process(md->thread);
return count;
}
@@ -277,7 +281,6 @@ static ssize_t w1_master_attribute_store_pullup(struct device *dev,
mutex_lock(&md->mutex);
md->enable_pullup = tmp;
mutex_unlock(&md->mutex);
- wake_up_process(md->thread);
return count;
}
@@ -314,6 +317,24 @@ static ssize_t w1_master_attribute_show_timeout(struct device *dev, struct devic
return count;
}
+static ssize_t w1_master_attribute_store_max_slave_count(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int tmp;
+ struct w1_master *md = dev_to_w1_master(dev);
+
+ if (kstrtoint(buf, 0, &tmp) == -EINVAL || tmp < 1)
+ return -EINVAL;
+
+ mutex_lock(&md->mutex);
+ md->max_slave_count = tmp;
+ /* allow each time the max_slave_count is updated */
+ clear_bit(W1_WARN_MAX_COUNT, &md->flags);
+ mutex_unlock(&md->mutex);
+
+ return count;
+}
+
static ssize_t w1_master_attribute_show_max_slave_count(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w1_master *md = dev_to_w1_master(dev);
@@ -352,23 +373,20 @@ static ssize_t w1_master_attribute_show_slaves(struct device *dev,
{
struct w1_master *md = dev_to_w1_master(dev);
int c = PAGE_SIZE;
+ struct list_head *ent, *n;
+ struct w1_slave *sl = NULL;
- mutex_lock(&md->mutex);
-
- if (md->slave_count == 0)
- c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n");
- else {
- struct list_head *ent, *n;
- struct w1_slave *sl;
+ mutex_lock(&md->list_mutex);
- list_for_each_safe(ent, n, &md->slist) {
- sl = list_entry(ent, struct w1_slave, w1_slave_entry);
+ list_for_each_safe(ent, n, &md->slist) {
+ sl = list_entry(ent, struct w1_slave, w1_slave_entry);
- c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name);
- }
+ c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name);
}
+ if (!sl)
+ c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n");
- mutex_unlock(&md->mutex);
+ mutex_unlock(&md->list_mutex);
return PAGE_SIZE - c;
}
@@ -422,19 +440,22 @@ static int w1_atoreg_num(struct device *dev, const char *buf, size_t count,
}
/* Searches the slaves in the w1_master and returns a pointer or NULL.
- * Note: must hold the mutex
+ * Note: must not hold list_mutex
*/
-static struct w1_slave *w1_slave_search_device(struct w1_master *dev,
+struct w1_slave *w1_slave_search_device(struct w1_master *dev,
struct w1_reg_num *rn)
{
struct w1_slave *sl;
+ mutex_lock(&dev->list_mutex);
list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
if (sl->reg_num.family == rn->family &&
sl->reg_num.id == rn->id &&
sl->reg_num.crc == rn->crc) {
+ mutex_unlock(&dev->list_mutex);
return sl;
}
}
+ mutex_unlock(&dev->list_mutex);
return NULL;
}
@@ -491,7 +512,10 @@ static ssize_t w1_master_attribute_store_remove(struct device *dev,
mutex_lock(&md->mutex);
sl = w1_slave_search_device(md, &rn);
if (sl) {
- w1_slave_detach(sl);
+ result = w1_slave_detach(sl);
+ /* refcnt 0 means it was detached in the call */
+ if (result == 0)
+ result = count;
} else {
dev_info(dev, "Device %02x-%012llx doesn't exists\n", rn.family,
(unsigned long long)rn.id);
@@ -516,7 +540,7 @@ static ssize_t w1_master_attribute_store_remove(struct device *dev,
static W1_MASTER_ATTR_RO(name, S_IRUGO);
static W1_MASTER_ATTR_RO(slaves, S_IRUGO);
static W1_MASTER_ATTR_RO(slave_count, S_IRUGO);
-static W1_MASTER_ATTR_RO(max_slave_count, S_IRUGO);
+static W1_MASTER_ATTR_RW(max_slave_count, S_IRUGO | S_IWUSR | S_IWGRP);
static W1_MASTER_ATTR_RO(attempts, S_IRUGO);
static W1_MASTER_ATTR_RO(timeout, S_IRUGO);
static W1_MASTER_ATTR_RO(pointer, S_IRUGO);
@@ -686,12 +710,14 @@ static int __w1_attach_slave_device(struct w1_slave *sl)
dev_set_uevent_suppress(&sl->dev, false);
kobject_uevent(&sl->dev.kobj, KOBJ_ADD);
+ mutex_lock(&sl->master->list_mutex);
list_add_tail(&sl->w1_slave_entry, &sl->master->slist);
+ mutex_unlock(&sl->master->list_mutex);
return 0;
}
-static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
+int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
{
struct w1_slave *sl;
struct w1_family *f;
@@ -713,8 +739,8 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
memset(&msg, 0, sizeof(msg));
memcpy(&sl->reg_num, rn, sizeof(sl->reg_num));
- atomic_set(&sl->refcnt, 0);
- init_completion(&sl->released);
+ atomic_set(&sl->refcnt, 1);
+ atomic_inc(&sl->master->refcnt);
/* slave modules need to be loaded in a context with unlocked mutex */
mutex_unlock(&dev->mutex);
@@ -754,23 +780,48 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
return 0;
}
-void w1_slave_detach(struct w1_slave *sl)
+int w1_unref_slave(struct w1_slave *sl)
{
- struct w1_netlink_msg msg;
-
- dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__, sl->name, sl);
-
- list_del(&sl->w1_slave_entry);
-
- memset(&msg, 0, sizeof(msg));
- memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id));
- msg.type = W1_SLAVE_REMOVE;
- w1_netlink_send(sl->master, &msg);
-
- device_unregister(&sl->dev);
+ struct w1_master *dev = sl->master;
+ int refcnt;
+ mutex_lock(&dev->list_mutex);
+ refcnt = atomic_sub_return(1, &sl->refcnt);
+ if (refcnt == 0) {
+ struct w1_netlink_msg msg;
+
+ dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__,
+ sl->name, sl);
+
+ list_del(&sl->w1_slave_entry);
+
+ memset(&msg, 0, sizeof(msg));
+ memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id));
+ msg.type = W1_SLAVE_REMOVE;
+ w1_netlink_send(sl->master, &msg);
+
+ device_unregister(&sl->dev);
+ #ifdef DEBUG
+ memset(sl, 0, sizeof(*sl));
+ #endif
+ kfree(sl);
+ }
+ atomic_dec(&dev->refcnt);
+ mutex_unlock(&dev->list_mutex);
+ return refcnt;
+}
- wait_for_completion(&sl->released);
- kfree(sl);
+int w1_slave_detach(struct w1_slave *sl)
+{
+ /* Only detach a slave once as it decreases the refcnt each time. */
+ int destroy_now;
+ mutex_lock(&sl->master->list_mutex);
+ destroy_now = !test_bit(W1_SLAVE_DETACH, &sl->flags);
+ set_bit(W1_SLAVE_DETACH, &sl->flags);
+ mutex_unlock(&sl->master->list_mutex);
+
+ if (destroy_now)
+ destroy_now = !w1_unref_slave(sl);
+ return destroy_now ? 0 : -EBUSY;
}
struct w1_master *w1_search_master_id(u32 id)
@@ -799,7 +850,7 @@ struct w1_slave *w1_search_slave(struct w1_reg_num *id)
mutex_lock(&w1_mlock);
list_for_each_entry(dev, &w1_masters, w1_master_entry) {
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->list_mutex);
list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
if (sl->reg_num.family == id->family &&
sl->reg_num.id == id->id &&
@@ -810,7 +861,7 @@ struct w1_slave *w1_search_slave(struct w1_reg_num *id)
break;
}
}
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->list_mutex);
if (found)
break;
@@ -830,6 +881,7 @@ void w1_reconnect_slaves(struct w1_family *f, int attach)
dev_dbg(&dev->dev, "Reconnecting slaves in device %s "
"for family %02x.\n", dev->name, f->fid);
mutex_lock(&dev->mutex);
+ mutex_lock(&dev->list_mutex);
list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
/* If it is a new family, slaves with the default
* family driver and are that family will be
@@ -841,14 +893,19 @@ void w1_reconnect_slaves(struct w1_family *f, int attach)
(!attach && sl->family->fid == f->fid)) {
struct w1_reg_num rn;
+ mutex_unlock(&dev->list_mutex);
memcpy(&rn, &sl->reg_num, sizeof(rn));
- w1_slave_detach(sl);
-
- w1_attach_slave_device(dev, &rn);
+ /* If it was already in use let the automatic
+ * scan pick it up again later.
+ */
+ if (!w1_slave_detach(sl))
+ w1_attach_slave_device(dev, &rn);
+ mutex_lock(&dev->list_mutex);
}
}
dev_dbg(&dev->dev, "Reconnecting slaves in device %s "
"has been finished.\n", dev->name);
+ mutex_unlock(&dev->list_mutex);
mutex_unlock(&dev->mutex);
}
mutex_unlock(&w1_mlock);
@@ -876,7 +933,12 @@ void w1_slave_found(struct w1_master *dev, u64 rn)
}
/**
- * Performs a ROM Search & registers any devices found.
+ * w1_search() - Performs a ROM Search & registers any devices found.
+ * @dev: The master device to search
+ * @search_type: W1_SEARCH to search all devices, or W1_ALARM_SEARCH
+ * to return only devices in the alarmed state
+ * @cb: Function to call when a device is found
+ *
* The 1-wire search is a simple binary tree search.
* For each bit of the address, we read two bits and write one bit.
* The bit written will put to sleep all devies that don't match that bit.
@@ -886,8 +948,6 @@ void w1_slave_found(struct w1_master *dev, u64 rn)
*
* See "Application note 187 1-wire search algorithm" at www.maxim-ic.com
*
- * @dev The master device to search
- * @cb Function to call when a device is found
*/
void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb)
{
@@ -898,7 +958,8 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
u8 triplet_ret = 0;
search_bit = 0;
- rn = last_rn = 0;
+ rn = dev->search_id;
+ last_rn = 0;
last_device = 0;
last_zero = -1;
@@ -945,7 +1006,7 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
else
search_bit = ((last_rn >> i) & 0x1);
- /** Read two bits and write one bit */
+ /* Read two bits and write one bit */
triplet_ret = w1_triplet(dev, search_bit);
/* quit if no device responded */
@@ -960,8 +1021,7 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
tmp64 = (triplet_ret >> 2);
rn |= (tmp64 << i);
- /* ensure we're called from kthread and not by netlink callback */
- if (!dev->priv && kthread_should_stop()) {
+ if (test_bit(W1_ABORT_SEARCH, &dev->flags)) {
mutex_unlock(&dev->bus_mutex);
dev_dbg(&dev->dev, "Abort w1_search\n");
return;
@@ -970,11 +1030,30 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
mutex_unlock(&dev->bus_mutex);
if ( (triplet_ret & 0x03) != 0x03 ) {
- if ( (desc_bit == last_zero) || (last_zero < 0))
+ if ((desc_bit == last_zero) || (last_zero < 0)) {
last_device = 1;
+ dev->search_id = 0;
+ } else {
+ dev->search_id = rn;
+ }
desc_bit = last_zero;
cb(dev, rn);
}
+
+ if (!last_device && slave_count == dev->max_slave_count &&
+ !test_bit(W1_WARN_MAX_COUNT, &dev->flags)) {
+ /* Only max_slave_count will be scanned in a search,
+ * but it will start where it left off next search
+ * until all ids are identified and then it will start
+ * over. A continued search will report the previous
+ * last id as the first id (provided it is still on the
+ * bus).
+ */
+ dev_info(&dev->dev, "%s: max_slave_count %d reached, "
+ "will continue next search.\n", __func__,
+ dev->max_slave_count);
+ set_bit(W1_WARN_MAX_COUNT, &dev->flags);
+ }
}
}
@@ -983,17 +1062,24 @@ void w1_search_process_cb(struct w1_master *dev, u8 search_type,
{
struct w1_slave *sl, *sln;
+ mutex_lock(&dev->list_mutex);
list_for_each_entry(sl, &dev->slist, w1_slave_entry)
clear_bit(W1_SLAVE_ACTIVE, &sl->flags);
+ mutex_unlock(&dev->list_mutex);
w1_search_devices(dev, search_type, cb);
+ mutex_lock(&dev->list_mutex);
list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
- if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl)
+ if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl) {
+ mutex_unlock(&dev->list_mutex);
w1_slave_detach(sl);
+ mutex_lock(&dev->list_mutex);
+ }
else if (test_bit(W1_SLAVE_ACTIVE, &sl->flags))
sl->ttl = dev->slave_ttl;
}
+ mutex_unlock(&dev->list_mutex);
if (dev->search_count > 0)
dev->search_count--;
@@ -1004,6 +1090,32 @@ static void w1_search_process(struct w1_master *dev, u8 search_type)
w1_search_process_cb(dev, search_type, w1_slave_found);
}
+/**
+ * w1_process_callbacks() - execute each dev->async_list callback entry
+ * @dev: w1_master device
+ *
+ * Return: 1 if there were commands to executed 0 otherwise
+ */
+int w1_process_callbacks(struct w1_master *dev)
+{
+ int ret = 0;
+ struct w1_async_cmd *async_cmd, *async_n;
+
+ /* The list can be added to in another thread, loop until it is empty */
+ while (!list_empty(&dev->async_list)) {
+ list_for_each_entry_safe(async_cmd, async_n, &dev->async_list,
+ async_entry) {
+ /* drop the lock, if it is a search it can take a long
+ * time */
+ mutex_unlock(&dev->list_mutex);
+ async_cmd->cb(dev, async_cmd);
+ ret = 1;
+ mutex_lock(&dev->list_mutex);
+ }
+ }
+ return ret;
+}
+
int w1_process(void *data)
{
struct w1_master *dev = (struct w1_master *) data;
@@ -1011,23 +1123,46 @@ int w1_process(void *data)
* time can be calculated in jiffies once.
*/
const unsigned long jtime = msecs_to_jiffies(w1_timeout * 1000);
+ /* remainder if it woke up early */
+ unsigned long jremain = 0;
- while (!kthread_should_stop()) {
- if (dev->search_count) {
+ for (;;) {
+
+ if (!jremain && dev->search_count) {
mutex_lock(&dev->mutex);
w1_search_process(dev, W1_SEARCH);
mutex_unlock(&dev->mutex);
}
+ mutex_lock(&dev->list_mutex);
+ /* Note, w1_process_callback drops the lock while processing,
+ * but locks it again before returning.
+ */
+ if (!w1_process_callbacks(dev) && jremain) {
+ /* a wake up is either to stop the thread, process
+ * callbacks, or search, it isn't process callbacks, so
+ * schedule a search.
+ */
+ jremain = 1;
+ }
+
try_to_freeze();
__set_current_state(TASK_INTERRUPTIBLE);
+ /* hold list_mutex until after interruptible to prevent loosing
+ * the wakeup signal when async_cmd is added.
+ */
+ mutex_unlock(&dev->list_mutex);
+
if (kthread_should_stop())
break;
/* Only sleep when the search is active. */
- if (dev->search_count)
- schedule_timeout(jtime);
+ if (dev->search_count) {
+ if (!jremain)
+ jremain = jtime;
+ jremain = schedule_timeout(jremain);
+ }
else
schedule();
}
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index ca8081a101d6..734dab7fc687 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -22,6 +22,13 @@
#ifndef __W1_H
#define __W1_H
+/**
+ * struct w1_reg_num - broken out slave device id
+ *
+ * @family: identifies the type of device
+ * @id: along with family is the unique device id
+ * @crc: checksum of the other bytes
+ */
struct w1_reg_num
{
#if defined(__LITTLE_ENDIAN_BITFIELD)
@@ -58,7 +65,24 @@ struct w1_reg_num
#define W1_RESUME_CMD 0xA5
#define W1_SLAVE_ACTIVE 0
+#define W1_SLAVE_DETACH 1
+/**
+ * struct w1_slave - holds a single slave device on the bus
+ *
+ * @owner: Points to the one wire "wire" kernel module.
+ * @name: Device id is ascii.
+ * @w1_slave_entry: data for the linked list
+ * @reg_num: the slave id in binary
+ * @refcnt: reference count, delete when 0
+ * @flags: bit flags for W1_SLAVE_ACTIVE W1_SLAVE_DETACH
+ * @ttl: decrement per search this slave isn't found, deatch at 0
+ * @master: bus which this slave is on
+ * @family: module for device family type
+ * @family_data: pointer for use by the family module
+ * @dev: kernel device identifier
+ *
+ */
struct w1_slave
{
struct module *owner;
@@ -66,7 +90,6 @@ struct w1_slave
struct list_head w1_slave_entry;
struct w1_reg_num reg_num;
atomic_t refcnt;
- u8 rom[9];
int ttl;
unsigned long flags;
@@ -74,99 +97,146 @@ struct w1_slave
struct w1_family *family;
void *family_data;
struct device dev;
- struct completion released;
};
typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
/**
+ * struct w1_bus_master - operations available on a bus master
+ *
+ * @data: the first parameter in all the functions below
+ *
+ * @read_bit: Sample the line level @return the level read (0 or 1)
+ *
+ * @write_bit: Sets the line level
+ *
+ * @touch_bit: the lowest-level function for devices that really support the
+ * 1-wire protocol.
+ * touch_bit(0) = write-0 cycle
+ * touch_bit(1) = write-1 / read cycle
+ * @return the bit read (0 or 1)
+ *
+ * @read_byte: Reads a bytes. Same as 8 touch_bit(1) calls.
+ * @return the byte read
+ *
+ * @write_byte: Writes a byte. Same as 8 touch_bit(x) calls.
+ *
+ * @read_block: Same as a series of read_byte() calls
+ * @return the number of bytes read
+ *
+ * @write_block: Same as a series of write_byte() calls
+ *
+ * @triplet: Combines two reads and a smart write for ROM searches
+ * @return bit0=Id bit1=comp_id bit2=dir_taken
+ *
+ * @reset_bus: long write-0 with a read for the presence pulse detection
+ * @return -1=Error, 0=Device present, 1=No device present
+ *
+ * @set_pullup: Put out a strong pull-up pulse of the specified duration.
+ * @return -1=Error, 0=completed
+ *
+ * @search: Really nice hardware can handles the different types of ROM search
+ * w1_master* is passed to the slave found callback.
+ * u8 is search_type, W1_SEARCH or W1_ALARM_SEARCH
+ *
* Note: read_bit and write_bit are very low level functions and should only
* be used with hardware that doesn't really support 1-wire operations,
* like a parallel/serial port.
* Either define read_bit and write_bit OR define, at minimum, touch_bit and
* reset_bus.
+ *
*/
struct w1_bus_master
{
- /** the first parameter in all the functions below */
void *data;
- /**
- * Sample the line level
- * @return the level read (0 or 1)
- */
u8 (*read_bit)(void *);
- /** Sets the line level */
void (*write_bit)(void *, u8);
- /**
- * touch_bit is the lowest-level function for devices that really
- * support the 1-wire protocol.
- * touch_bit(0) = write-0 cycle
- * touch_bit(1) = write-1 / read cycle
- * @return the bit read (0 or 1)
- */
u8 (*touch_bit)(void *, u8);
- /**
- * Reads a bytes. Same as 8 touch_bit(1) calls.
- * @return the byte read
- */
u8 (*read_byte)(void *);
- /**
- * Writes a byte. Same as 8 touch_bit(x) calls.
- */
void (*write_byte)(void *, u8);
- /**
- * Same as a series of read_byte() calls
- * @return the number of bytes read
- */
u8 (*read_block)(void *, u8 *, int);
- /** Same as a series of write_byte() calls */
void (*write_block)(void *, const u8 *, int);
- /**
- * Combines two reads and a smart write for ROM searches
- * @return bit0=Id bit1=comp_id bit2=dir_taken
- */
u8 (*triplet)(void *, u8);
- /**
- * long write-0 with a read for the presence pulse detection
- * @return -1=Error, 0=Device present, 1=No device present
- */
u8 (*reset_bus)(void *);
- /**
- * Put out a strong pull-up pulse of the specified duration.
- * @return -1=Error, 0=completed
- */
u8 (*set_pullup)(void *, int);
- /** Really nice hardware can handles the different types of ROM search
- * w1_master* is passed to the slave found callback.
- */
void (*search)(void *, struct w1_master *,
u8, w1_slave_found_callback);
};
+/**
+ * enum w1_master_flags - bitfields used in w1_master.flags
+ * @W1_ABORT_SEARCH: abort searching early on shutdown
+ * @W1_WARN_MAX_COUNT: limit warning when the maximum count is reached
+ */
+enum w1_master_flags {
+ W1_ABORT_SEARCH = 0,
+ W1_WARN_MAX_COUNT = 1,
+};
+
+/**
+ * struct w1_master - one per bus master
+ * @w1_master_entry: master linked list
+ * @owner: module owner
+ * @name: dynamically allocate bus name
+ * @list_mutex: protect slist and async_list
+ * @slist: linked list of slaves
+ * @async_list: linked list of netlink commands to execute
+ * @max_slave_count: maximum number of slaves to search for at a time
+ * @slave_count: current number of slaves known
+ * @attempts: number of searches ran
+ * @slave_ttl: number of searches before a slave is timed out
+ * @initialized: prevent init/removal race conditions
+ * @id: w1 bus number
+ * @search_count: number of automatic searches to run, -1 unlimited
+ * @search_id: allows continuing a search
+ * @refcnt: reference count
+ * @priv: private data storage
+ * @priv_size: size allocated
+ * @enable_pullup: allows a strong pullup
+ * @pullup_duration: time for the next strong pullup
+ * @flags: one of w1_master_flags
+ * @thread: thread for bus search and netlink commands
+ * @mutex: protect most of w1_master
+ * @bus_mutex: pretect concurrent bus access
+ * @driver: sysfs driver
+ * @dev: sysfs device
+ * @bus_master: io operations available
+ * @seq: sequence number used for netlink broadcasts
+ * @portid: destination for the current netlink command
+ */
struct w1_master
{
struct list_head w1_master_entry;
struct module *owner;
unsigned char name[W1_MAXNAMELEN];
+ /* list_mutex protects just slist and async_list so slaves can be
+ * searched for and async commands added while the master has
+ * w1_master.mutex locked and is operating on the bus.
+ * lock order w1_mlock, w1_master.mutex, w1_master.list_mutex
+ */
+ struct mutex list_mutex;
struct list_head slist;
+ struct list_head async_list;
int max_slave_count, slave_count;
unsigned long attempts;
int slave_ttl;
int initialized;
u32 id;
int search_count;
+ /* id to start searching on, to continue a search or 0 to restart */
+ u64 search_id;
atomic_t refcnt;
@@ -178,6 +248,8 @@ struct w1_master
/** 5V strong pullup duration in milliseconds, zero disabled. */
int pullup_duration;
+ long flags;
+
struct task_struct *thread;
struct mutex mutex;
struct mutex bus_mutex;
@@ -188,16 +260,41 @@ struct w1_master
struct w1_bus_master *bus_master;
u32 seq;
+ /* port id to send netlink responses to. The value is temporarily
+ * stored here while processing a message, set after locking the
+ * mutex, zero before unlocking the mutex.
+ */
+ u32 portid;
+};
+
+/**
+ * struct w1_async_cmd - execute callback from the w1_process kthread
+ * @async_entry: link entry
+ * @cb: callback function, must list_del and destroy this list before
+ * returning
+ *
+ * When inserted into the w1_master async_list, w1_process will execute
+ * the callback. Embed this into the structure with the command details.
+ */
+struct w1_async_cmd {
+ struct list_head async_entry;
+ void (*cb)(struct w1_master *dev, struct w1_async_cmd *async_cmd);
};
int w1_create_master_attributes(struct w1_master *);
void w1_destroy_master_attributes(struct w1_master *master);
void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
+/* call w1_unref_slave to release the reference counts w1_search_slave added */
struct w1_slave *w1_search_slave(struct w1_reg_num *id);
+/* decrements the reference on sl->master and sl, and cleans up if zero
+ * returns the reference count after it has been decremented */
+int w1_unref_slave(struct w1_slave *sl);
void w1_slave_found(struct w1_master *dev, u64 rn);
void w1_search_process_cb(struct w1_master *dev, u8 search_type,
w1_slave_found_callback cb);
+struct w1_slave *w1_slave_search_device(struct w1_master *dev,
+ struct w1_reg_num *rn);
struct w1_master *w1_search_master_id(u32 id);
/* Disconnect and reconnect devices in the given family. Used for finding
@@ -206,7 +303,9 @@ struct w1_master *w1_search_master_id(u32 id);
* has just been registered, to 0 when it has been unregistered.
*/
void w1_reconnect_slaves(struct w1_family *f, int attach);
-void w1_slave_detach(struct w1_slave *sl);
+int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn);
+/* 0 success, otherwise EBUSY */
+int w1_slave_detach(struct w1_slave *sl);
u8 w1_triplet(struct w1_master *dev, int bdir);
void w1_write_8(struct w1_master *, u8);
@@ -242,6 +341,7 @@ extern int w1_max_slave_ttl;
extern struct list_head w1_masters;
extern struct mutex w1_mlock;
+extern int w1_process_callbacks(struct w1_master *dev);
extern int w1_process(void *);
#endif /* __KERNEL__ */
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index e9309778ee72..3bff6b37b472 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -31,6 +31,10 @@
DEFINE_SPINLOCK(w1_flock);
static LIST_HEAD(w1_families);
+/**
+ * w1_register_family() - register a device family driver
+ * @newf: family to register
+ */
int w1_register_family(struct w1_family *newf)
{
struct list_head *ent, *n;
@@ -59,6 +63,10 @@ int w1_register_family(struct w1_family *newf)
return ret;
}
+/**
+ * w1_unregister_family() - unregister a device family driver
+ * @fent: family to unregister
+ */
void w1_unregister_family(struct w1_family *fent)
{
struct list_head *ent, *n;
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 4ad0e81b6404..26ca1343055b 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -48,6 +48,12 @@
struct w1_slave;
+/**
+ * struct w1_family_ops - operations for a family type
+ * @add_slave: add_slave
+ * @remove_slave: remove_slave
+ * @groups: sysfs group
+ */
struct w1_family_ops
{
int (* add_slave)(struct w1_slave *);
@@ -55,6 +61,13 @@ struct w1_family_ops
const struct attribute_group **groups;
};
+/**
+ * struct w1_family - reference counted family structure.
+ * @family_entry: family linked list
+ * @fid: 8 bit family identifier
+ * @fops: operations for this family
+ * @refcnt: reference counter
+ */
struct w1_family
{
struct list_head family_entry;
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 590bd8a7cd1b..9b084db739c7 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -75,8 +75,10 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
atomic_set(&dev->refcnt, 2);
INIT_LIST_HEAD(&dev->slist);
+ INIT_LIST_HEAD(&dev->async_list);
mutex_init(&dev->mutex);
mutex_init(&dev->bus_mutex);
+ mutex_init(&dev->list_mutex);
memcpy(&dev->dev, device, sizeof(struct device));
dev_set_name(&dev->dev, "w1_bus_master%u", dev->id);
@@ -103,6 +105,10 @@ static void w1_free_dev(struct w1_master *dev)
device_unregister(&dev->dev);
}
+/**
+ * w1_add_master_device() - registers a new master device
+ * @master: master bus device to register
+ */
int w1_add_master_device(struct w1_bus_master *master)
{
struct w1_master *dev, *entry;
@@ -172,6 +178,7 @@ int w1_add_master_device(struct w1_bus_master *master)
#if 0 /* Thread cleanup code, not required currently. */
err_out_kill_thread:
+ set_bit(W1_ABORT_SEARCH, &dev->flags);
kthread_stop(dev->thread);
#endif
err_out_rm_attr:
@@ -187,16 +194,22 @@ void __w1_remove_master_device(struct w1_master *dev)
struct w1_netlink_msg msg;
struct w1_slave *sl, *sln;
- kthread_stop(dev->thread);
-
mutex_lock(&w1_mlock);
list_del(&dev->w1_master_entry);
mutex_unlock(&w1_mlock);
+ set_bit(W1_ABORT_SEARCH, &dev->flags);
+ kthread_stop(dev->thread);
+
mutex_lock(&dev->mutex);
- list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry)
+ mutex_lock(&dev->list_mutex);
+ list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
+ mutex_unlock(&dev->list_mutex);
w1_slave_detach(sl);
+ mutex_lock(&dev->list_mutex);
+ }
w1_destroy_master_attributes(dev);
+ mutex_unlock(&dev->list_mutex);
mutex_unlock(&dev->mutex);
atomic_dec(&dev->refcnt);
@@ -206,7 +219,9 @@ void __w1_remove_master_device(struct w1_master *dev)
if (msleep_interruptible(1000))
flush_signals(current);
+ w1_process_callbacks(dev);
}
+ w1_process_callbacks(dev);
memset(&msg, 0, sizeof(msg));
msg.id.mst.id = dev->id;
@@ -216,6 +231,10 @@ void __w1_remove_master_device(struct w1_master *dev)
w1_free_dev(dev);
}
+/**
+ * w1_remove_master_device() - unregister a master device
+ * @bm: master bus device to remove
+ */
void w1_remove_master_device(struct w1_bus_master *bm)
{
struct w1_master *dev, *found = NULL;
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index e10acc237733..282092421cc9 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -62,7 +62,9 @@ static void w1_write_bit(struct w1_master *dev, int bit);
static u8 w1_read_bit(struct w1_master *dev);
/**
- * Generates a write-0 or write-1 cycle and samples the level.
+ * w1_touch_bit() - Generates a write-0 or write-1 cycle and samples the level.
+ * @dev: the master device
+ * @bit: 0 - write a 0, 1 - write a 0 read the level
*/
static u8 w1_touch_bit(struct w1_master *dev, int bit)
{
@@ -77,7 +79,10 @@ static u8 w1_touch_bit(struct w1_master *dev, int bit)
}
/**
- * Generates a write-0 or write-1 cycle.
+ * w1_write_bit() - Generates a write-0 or write-1 cycle.
+ * @dev: the master device
+ * @bit: bit to write
+ *
* Only call if dev->bus_master->touch_bit is NULL
*/
static void w1_write_bit(struct w1_master *dev, int bit)
@@ -102,11 +107,12 @@ static void w1_write_bit(struct w1_master *dev, int bit)
}
/**
+ * w1_pre_write() - pre-write operations
+ * @dev: the master device
+ *
* Pre-write operation, currently only supporting strong pullups.
* Program the hardware for a strong pullup, if one has been requested and
* the hardware supports it.
- *
- * @param dev the master device
*/
static void w1_pre_write(struct w1_master *dev)
{
@@ -118,11 +124,12 @@ static void w1_pre_write(struct w1_master *dev)
}
/**
+ * w1_post_write() - post-write options
+ * @dev: the master device
+ *
* Post-write operation, currently only supporting strong pullups.
* If a strong pullup was requested, clear it if the hardware supports
* them, or execute the delay otherwise, in either case clear the request.
- *
- * @param dev the master device
*/
static void w1_post_write(struct w1_master *dev)
{
@@ -136,10 +143,9 @@ static void w1_post_write(struct w1_master *dev)
}
/**
- * Writes 8 bits.
- *
- * @param dev the master device
- * @param byte the byte to write
+ * w1_write_8() - Writes 8 bits.
+ * @dev: the master device
+ * @byte: the byte to write
*/
void w1_write_8(struct w1_master *dev, u8 byte)
{
@@ -161,7 +167,9 @@ EXPORT_SYMBOL_GPL(w1_write_8);
/**
- * Generates a write-1 cycle and samples the level.
+ * w1_read_bit() - Generates a write-1 cycle and samples the level.
+ * @dev: the master device
+ *
* Only call if dev->bus_master->touch_bit is NULL
*/
static u8 w1_read_bit(struct w1_master *dev)
@@ -185,16 +193,17 @@ static u8 w1_read_bit(struct w1_master *dev)
}
/**
- * Does a triplet - used for searching ROM addresses.
+ * w1_triplet() - * Does a triplet - used for searching ROM addresses.
+ * @dev: the master device
+ * @bdir: the bit to write if both id_bit and comp_bit are 0
+ *
* Return bits:
* bit 0 = id_bit
* bit 1 = comp_bit
* bit 2 = dir_taken
* If both bits 0 & 1 are set, the search should be restarted.
*
- * @param dev the master device
- * @param bdir the bit to write if both id_bit and comp_bit are 0
- * @return bit fields - see above
+ * Return: bit fields - see above
*/
u8 w1_triplet(struct w1_master *dev, int bdir)
{
@@ -226,10 +235,10 @@ u8 w1_triplet(struct w1_master *dev, int bdir)
}
/**
- * Reads 8 bits.
+ * w1_read_8() - Reads 8 bits.
+ * @dev: the master device
*
- * @param dev the master device
- * @return the byte read
+ * Return: the byte read
*/
u8 w1_read_8(struct w1_master *dev)
{
@@ -247,11 +256,10 @@ u8 w1_read_8(struct w1_master *dev)
EXPORT_SYMBOL_GPL(w1_read_8);
/**
- * Writes a series of bytes.
- *
- * @param dev the master device
- * @param buf pointer to the data to write
- * @param len the number of bytes to write
+ * w1_write_block() - Writes a series of bytes.
+ * @dev: the master device
+ * @buf: pointer to the data to write
+ * @len: the number of bytes to write
*/
void w1_write_block(struct w1_master *dev, const u8 *buf, int len)
{
@@ -269,11 +277,10 @@ void w1_write_block(struct w1_master *dev, const u8 *buf, int len)
EXPORT_SYMBOL_GPL(w1_write_block);
/**
- * Touches a series of bytes.
- *
- * @param dev the master device
- * @param buf pointer to the data to write
- * @param len the number of bytes to write
+ * w1_touch_block() - Touches a series of bytes.
+ * @dev: the master device
+ * @buf: pointer to the data to write
+ * @len: the number of bytes to write
*/
void w1_touch_block(struct w1_master *dev, u8 *buf, int len)
{
@@ -294,12 +301,11 @@ void w1_touch_block(struct w1_master *dev, u8 *buf, int len)
EXPORT_SYMBOL_GPL(w1_touch_block);
/**
- * Reads a series of bytes.
- *
- * @param dev the master device
- * @param buf pointer to the buffer to fill
- * @param len the number of bytes to read
- * @return the number of bytes read
+ * w1_read_block() - Reads a series of bytes.
+ * @dev: the master device
+ * @buf: pointer to the buffer to fill
+ * @len: the number of bytes to read
+ * Return: the number of bytes read
*/
u8 w1_read_block(struct w1_master *dev, u8 *buf, int len)
{
@@ -319,10 +325,9 @@ u8 w1_read_block(struct w1_master *dev, u8 *buf, int len)
EXPORT_SYMBOL_GPL(w1_read_block);
/**
- * Issues a reset bus sequence.
- *
- * @param dev The bus master pointer
- * @return 0=Device present, 1=No device present or error
+ * w1_reset_bus() - Issues a reset bus sequence.
+ * @dev: the master device
+ * Return: 0=Device present, 1=No device present or error
*/
int w1_reset_bus(struct w1_master *dev)
{
@@ -383,12 +388,15 @@ void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_cal
}
/**
+ * w1_reset_select_slave() - reset and select a slave
+ * @sl: the slave to select
+ *
* Resets the bus and then selects the slave by sending either a skip rom
- * or a rom match.
+ * or a rom match. A skip rom is issued if there is only one device
+ * registered on the bus.
* The w1 master lock must be held.
*
- * @param sl the slave to select
- * @return 0=success, anything else=error
+ * Return: 0=success, anything else=error
*/
int w1_reset_select_slave(struct w1_slave *sl)
{
@@ -409,6 +417,9 @@ int w1_reset_select_slave(struct w1_slave *sl)
EXPORT_SYMBOL_GPL(w1_reset_select_slave);
/**
+ * w1_reset_resume_command() - resume instead of another match ROM
+ * @dev: the master device
+ *
* When the workflow with a slave amongst many requires several
* successive commands a reset between each, this function is similar
* to doing a reset then a match ROM for the last matched ROM. The
@@ -420,8 +431,6 @@ EXPORT_SYMBOL_GPL(w1_reset_select_slave);
* doesn't work of course, but the resume command is the next best thing.
*
* The w1 master lock must be held.
- *
- * @param dev the master device
*/
int w1_reset_resume_command(struct w1_master *dev)
{
@@ -435,6 +444,10 @@ int w1_reset_resume_command(struct w1_master *dev)
EXPORT_SYMBOL_GPL(w1_reset_resume_command);
/**
+ * w1_next_pullup() - register for a strong pullup
+ * @dev: the master device
+ * @delay: time in milliseconds
+ *
* Put out a strong pull-up of the specified duration after the next write
* operation. Not all hardware supports strong pullups. Hardware that
* doesn't support strong pullups will sleep for the given time after the
@@ -442,8 +455,7 @@ EXPORT_SYMBOL_GPL(w1_reset_resume_command);
* the next write, specifying zero will clear a previous request.
* The w1 master lock must be held.
*
- * @param delay time in milliseconds
- * @return 0=success, anything else=error
+ * Return: 0=success, anything else=error
*/
void w1_next_pullup(struct w1_master *dev, int delay)
{
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 40788c925d1c..5234964fe001 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -45,7 +45,7 @@ void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
memcpy(w, msg, sizeof(struct w1_netlink_msg));
- cn_netlink_send(m, 0, GFP_KERNEL);
+ cn_netlink_send(m, dev->portid, 0, GFP_KERNEL);
}
static void w1_send_slave(struct w1_master *dev, u64 rn)
@@ -54,53 +54,95 @@ static void w1_send_slave(struct w1_master *dev, u64 rn)
struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
int avail;
-
- /* update kernel slave list */
- w1_slave_found(dev, rn);
+ u64 *data;
avail = dev->priv_size - cmd->len;
- if (avail > 8) {
- u64 *data = (void *)(cmd + 1) + cmd->len;
+ if (avail < 8) {
+ msg->ack++;
+ cn_netlink_send(msg, dev->portid, 0, GFP_KERNEL);
- *data = rn;
- cmd->len += 8;
- hdr->len += 8;
- msg->len += 8;
- return;
+ msg->len = sizeof(struct w1_netlink_msg) +
+ sizeof(struct w1_netlink_cmd);
+ hdr->len = sizeof(struct w1_netlink_cmd);
+ cmd->len = 0;
}
- msg->ack++;
- cn_netlink_send(msg, 0, GFP_KERNEL);
+ data = (void *)(cmd + 1) + cmd->len;
- msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
- hdr->len = sizeof(struct w1_netlink_cmd);
- cmd->len = 0;
+ *data = rn;
+ cmd->len += 8;
+ hdr->len += 8;
+ msg->len += 8;
}
-static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg,
- unsigned int avail)
+static void w1_found_send_slave(struct w1_master *dev, u64 rn)
{
- struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
- struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
- int search_type = (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH;
+ /* update kernel slave list */
+ w1_slave_found(dev, rn);
- dev->priv = msg;
- dev->priv_size = avail;
+ w1_send_slave(dev, rn);
+}
+
+/* Get the current slave list, or search (with or without alarm) */
+static int w1_get_slaves(struct w1_master *dev,
+ struct cn_msg *req_msg, struct w1_netlink_msg *req_hdr,
+ struct w1_netlink_cmd *req_cmd)
+{
+ struct cn_msg *msg;
+ struct w1_netlink_msg *hdr;
+ struct w1_netlink_cmd *cmd;
+ struct w1_slave *sl;
+
+ msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->id = req_msg->id;
+ msg->seq = req_msg->seq;
+ msg->ack = 0;
+ msg->len = sizeof(struct w1_netlink_msg) +
+ sizeof(struct w1_netlink_cmd);
+
+ hdr = (struct w1_netlink_msg *)(msg + 1);
+ cmd = (struct w1_netlink_cmd *)(hdr + 1);
+
+ hdr->type = W1_MASTER_CMD;
+ hdr->id = req_hdr->id;
+ hdr->len = sizeof(struct w1_netlink_cmd);
+
+ cmd->cmd = req_cmd->cmd;
+ cmd->len = 0;
- w1_search_process_cb(dev, search_type, w1_send_slave);
+ dev->priv = msg;
+ dev->priv_size = PAGE_SIZE - msg->len - sizeof(struct cn_msg);
+
+ if (req_cmd->cmd == W1_CMD_LIST_SLAVES) {
+ __u64 rn;
+ mutex_lock(&dev->list_mutex);
+ list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
+ memcpy(&rn, &sl->reg_num, sizeof(rn));
+ w1_send_slave(dev, rn);
+ }
+ mutex_unlock(&dev->list_mutex);
+ } else {
+ w1_search_process_cb(dev, cmd->cmd == W1_CMD_ALARM_SEARCH ?
+ W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave);
+ }
msg->ack = 0;
- cn_netlink_send(msg, 0, GFP_KERNEL);
+ cn_netlink_send(msg, dev->portid, 0, GFP_KERNEL);
dev->priv = NULL;
dev->priv_size = 0;
+ kfree(msg);
+
return 0;
}
static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr,
- struct w1_netlink_cmd *cmd)
+ struct w1_netlink_cmd *cmd, u32 portid)
{
void *data;
struct w1_netlink_msg *h;
@@ -131,7 +173,7 @@ static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr,
memcpy(c->data, cmd->data, c->len);
- err = cn_netlink_send(cm, 0, GFP_KERNEL);
+ err = cn_netlink_send(cm, portid, 0, GFP_KERNEL);
kfree(data);
@@ -146,11 +188,11 @@ static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg,
switch (cmd->cmd) {
case W1_CMD_TOUCH:
w1_touch_block(dev, cmd->data, cmd->len);
- w1_send_read_reply(msg, hdr, cmd);
+ w1_send_read_reply(msg, hdr, cmd, dev->portid);
break;
case W1_CMD_READ:
w1_read_block(dev, cmd->data, cmd->len);
- w1_send_read_reply(msg, hdr, cmd);
+ w1_send_read_reply(msg, hdr, cmd, dev->portid);
break;
case W1_CMD_WRITE:
w1_write_block(dev, cmd->data, cmd->len);
@@ -163,38 +205,57 @@ static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg,
return err;
}
-static int w1_process_command_master(struct w1_master *dev, struct cn_msg *req_msg,
- struct w1_netlink_msg *req_hdr, struct w1_netlink_cmd *req_cmd)
+static int w1_process_command_addremove(struct w1_master *dev,
+ struct cn_msg *msg, struct w1_netlink_msg *hdr,
+ struct w1_netlink_cmd *cmd)
{
- int err = -EINVAL;
- struct cn_msg *msg;
- struct w1_netlink_msg *hdr;
- struct w1_netlink_cmd *cmd;
+ struct w1_slave *sl;
+ int err = 0;
+ struct w1_reg_num *id;
- msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (cmd->len != 8)
+ return -EINVAL;
- msg->id = req_msg->id;
- msg->seq = req_msg->seq;
- msg->ack = 0;
- msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
+ id = (struct w1_reg_num *)cmd->data;
- hdr = (struct w1_netlink_msg *)(msg + 1);
- cmd = (struct w1_netlink_cmd *)(hdr + 1);
+ sl = w1_slave_search_device(dev, id);
+ switch (cmd->cmd) {
+ case W1_CMD_SLAVE_ADD:
+ if (sl)
+ err = -EINVAL;
+ else
+ err = w1_attach_slave_device(dev, id);
+ break;
+ case W1_CMD_SLAVE_REMOVE:
+ if (sl)
+ w1_slave_detach(sl);
+ else
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
- hdr->type = W1_MASTER_CMD;
- hdr->id = req_hdr->id;
- hdr->len = sizeof(struct w1_netlink_cmd);
+ return err;
+}
- cmd->cmd = req_cmd->cmd;
- cmd->len = 0;
+static int w1_process_command_master(struct w1_master *dev,
+ struct cn_msg *req_msg, struct w1_netlink_msg *req_hdr,
+ struct w1_netlink_cmd *req_cmd)
+{
+ int err = -EINVAL;
- switch (cmd->cmd) {
+ /* drop bus_mutex for search (does it's own locking), and add/remove
+ * which doesn't use the bus
+ */
+ switch (req_cmd->cmd) {
case W1_CMD_SEARCH:
case W1_CMD_ALARM_SEARCH:
- err = w1_process_search_command(dev, msg,
- PAGE_SIZE - msg->len - sizeof(struct cn_msg));
+ case W1_CMD_LIST_SLAVES:
+ mutex_unlock(&dev->bus_mutex);
+ err = w1_get_slaves(dev, req_msg, req_hdr, req_cmd);
+ mutex_lock(&dev->bus_mutex);
break;
case W1_CMD_READ:
case W1_CMD_WRITE:
@@ -204,12 +265,20 @@ static int w1_process_command_master(struct w1_master *dev, struct cn_msg *req_m
case W1_CMD_RESET:
err = w1_reset_bus(dev);
break;
+ case W1_CMD_SLAVE_ADD:
+ case W1_CMD_SLAVE_REMOVE:
+ mutex_unlock(&dev->bus_mutex);
+ mutex_lock(&dev->mutex);
+ err = w1_process_command_addremove(dev, req_msg, req_hdr,
+ req_cmd);
+ mutex_unlock(&dev->mutex);
+ mutex_lock(&dev->bus_mutex);
+ break;
default:
err = -EINVAL;
break;
}
- kfree(msg);
return err;
}
@@ -223,7 +292,8 @@ static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg,
return w1_process_command_io(sl->master, msg, hdr, cmd);
}
-static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mcmd)
+static int w1_process_command_root(struct cn_msg *msg,
+ struct w1_netlink_msg *mcmd, u32 portid)
{
struct w1_master *m;
struct cn_msg *cn;
@@ -256,7 +326,7 @@ static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mc
mutex_lock(&w1_mlock);
list_for_each_entry(m, &w1_masters, w1_master_entry) {
if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) {
- cn_netlink_send(cn, 0, GFP_KERNEL);
+ cn_netlink_send(cn, portid, 0, GFP_KERNEL);
cn->ack++;
cn->len = sizeof(struct w1_netlink_msg);
w->len = 0;
@@ -269,7 +339,7 @@ static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mc
id++;
}
cn->ack = 0;
- cn_netlink_send(cn, 0, GFP_KERNEL);
+ cn_netlink_send(cn, portid, 0, GFP_KERNEL);
mutex_unlock(&w1_mlock);
kfree(cn);
@@ -277,7 +347,7 @@ static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mc
}
static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rmsg,
- struct w1_netlink_cmd *rcmd, int error)
+ struct w1_netlink_cmd *rcmd, int portid, int error)
{
struct cn_msg *cmsg;
struct w1_netlink_msg *msg;
@@ -304,35 +374,147 @@ static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rm
cmsg->len += sizeof(*cmd);
}
- error = cn_netlink_send(cmsg, 0, GFP_KERNEL);
+ error = cn_netlink_send(cmsg, portid, 0, GFP_KERNEL);
kfree(cmsg);
return error;
}
+/* Bundle together a reference count, the full message, and broken out
+ * commands to be executed on each w1 master kthread in one memory allocation.
+ */
+struct w1_cb_block {
+ atomic_t refcnt;
+ u32 portid; /* Sending process port ID */
+ struct cn_msg msg;
+ /* cn_msg data */
+ /* one or more variable length struct w1_cb_node */
+};
+struct w1_cb_node {
+ struct w1_async_cmd async;
+ /* pointers within w1_cb_block and msg data */
+ struct w1_cb_block *block;
+ struct w1_netlink_msg *m;
+ struct w1_slave *sl;
+ struct w1_master *dev;
+};
+
+static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd)
+{
+ struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node,
+ async);
+ u16 mlen = node->m->len;
+ u8 *cmd_data = node->m->data;
+ int err = 0;
+ struct w1_slave *sl = node->sl;
+ struct w1_netlink_cmd *cmd = NULL;
+
+ mutex_lock(&dev->bus_mutex);
+ dev->portid = node->block->portid;
+ if (sl && w1_reset_select_slave(sl))
+ err = -ENODEV;
+
+ while (mlen && !err) {
+ cmd = (struct w1_netlink_cmd *)cmd_data;
+
+ if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) {
+ err = -E2BIG;
+ break;
+ }
+
+ if (sl)
+ err = w1_process_command_slave(sl, &node->block->msg,
+ node->m, cmd);
+ else
+ err = w1_process_command_master(dev, &node->block->msg,
+ node->m, cmd);
+
+ w1_netlink_send_error(&node->block->msg, node->m, cmd,
+ node->block->portid, err);
+ err = 0;
+
+ cmd_data += cmd->len + sizeof(struct w1_netlink_cmd);
+ mlen -= cmd->len + sizeof(struct w1_netlink_cmd);
+ }
+
+ if (!cmd || err)
+ w1_netlink_send_error(&node->block->msg, node->m, cmd,
+ node->block->portid, err);
+
+ if (sl)
+ w1_unref_slave(sl);
+ else
+ atomic_dec(&dev->refcnt);
+ dev->portid = 0;
+ mutex_unlock(&dev->bus_mutex);
+
+ mutex_lock(&dev->list_mutex);
+ list_del(&async_cmd->async_entry);
+ mutex_unlock(&dev->list_mutex);
+
+ if (atomic_sub_return(1, &node->block->refcnt) == 0)
+ kfree(node->block);
+}
+
static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
{
struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1);
- struct w1_netlink_cmd *cmd;
struct w1_slave *sl;
struct w1_master *dev;
+ u16 msg_len;
int err = 0;
+ struct w1_cb_block *block = NULL;
+ struct w1_cb_node *node = NULL;
+ int node_count = 0;
+
+ /* Count the number of master or slave commands there are to allocate
+ * space for one cb_node each.
+ */
+ msg_len = msg->len;
+ while (msg_len && !err) {
+ if (m->len + sizeof(struct w1_netlink_msg) > msg_len) {
+ err = -E2BIG;
+ break;
+ }
+
+ if (m->type == W1_MASTER_CMD || m->type == W1_SLAVE_CMD)
+ ++node_count;
- while (msg->len && !err) {
+ msg_len -= sizeof(struct w1_netlink_msg) + m->len;
+ m = (struct w1_netlink_msg *)(((u8 *)m) +
+ sizeof(struct w1_netlink_msg) + m->len);
+ }
+ m = (struct w1_netlink_msg *)(msg + 1);
+ if (node_count) {
+ /* msg->len doesn't include itself */
+ long size = sizeof(struct w1_cb_block) + msg->len +
+ node_count*sizeof(struct w1_cb_node);
+ block = kmalloc(size, GFP_KERNEL);
+ if (!block) {
+ w1_netlink_send_error(msg, m, NULL, nsp->portid,
+ -ENOMEM);
+ return;
+ }
+ atomic_set(&block->refcnt, 1);
+ block->portid = nsp->portid;
+ memcpy(&block->msg, msg, sizeof(*msg) + msg->len);
+ node = (struct w1_cb_node *)((u8 *)block->msg.data + msg->len);
+ }
+
+ msg_len = msg->len;
+ while (msg_len && !err) {
struct w1_reg_num id;
u16 mlen = m->len;
- u8 *cmd_data = m->data;
dev = NULL;
sl = NULL;
- cmd = NULL;
memcpy(&id, m->id.id, sizeof(id));
#if 0
printk("%s: %02x.%012llx.%02x: type=%02x, len=%u.\n",
__func__, id.family, (unsigned long long)id.id, id.crc, m->type, m->len);
#endif
- if (m->len + sizeof(struct w1_netlink_msg) > msg->len) {
+ if (m->len + sizeof(struct w1_netlink_msg) > msg_len) {
err = -E2BIG;
break;
}
@@ -344,7 +526,7 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
if (sl)
dev = sl->master;
} else {
- err = w1_process_command_root(msg, m);
+ err = w1_process_command_root(msg, m, nsp->portid);
goto out_cont;
}
@@ -357,41 +539,24 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
if (!mlen)
goto out_cont;
- mutex_lock(&dev->mutex);
-
- if (sl && w1_reset_select_slave(sl)) {
- err = -ENODEV;
- goto out_up;
- }
+ atomic_inc(&block->refcnt);
+ node->async.cb = w1_process_cb;
+ node->block = block;
+ node->m = (struct w1_netlink_msg *)((u8 *)&block->msg +
+ (size_t)((u8 *)m - (u8 *)msg));
+ node->sl = sl;
+ node->dev = dev;
- while (mlen) {
- cmd = (struct w1_netlink_cmd *)cmd_data;
+ mutex_lock(&dev->list_mutex);
+ list_add_tail(&node->async.async_entry, &dev->async_list);
+ wake_up_process(dev->thread);
+ mutex_unlock(&dev->list_mutex);
+ ++node;
- if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) {
- err = -E2BIG;
- break;
- }
-
- if (sl)
- err = w1_process_command_slave(sl, msg, m, cmd);
- else
- err = w1_process_command_master(dev, msg, m, cmd);
-
- w1_netlink_send_error(msg, m, cmd, err);
- err = 0;
-
- cmd_data += cmd->len + sizeof(struct w1_netlink_cmd);
- mlen -= cmd->len + sizeof(struct w1_netlink_cmd);
- }
-out_up:
- atomic_dec(&dev->refcnt);
- if (sl)
- atomic_dec(&sl->refcnt);
- mutex_unlock(&dev->mutex);
out_cont:
- if (!cmd || err)
- w1_netlink_send_error(msg, m, cmd, err);
- msg->len -= sizeof(struct w1_netlink_msg) + m->len;
+ if (err)
+ w1_netlink_send_error(msg, m, NULL, nsp->portid, err);
+ msg_len -= sizeof(struct w1_netlink_msg) + m->len;
m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len);
/*
@@ -400,6 +565,8 @@ out_cont:
if (err == -ENODEV)
err = 0;
}
+ if (block && atomic_sub_return(1, &block->refcnt) == 0)
+ kfree(block);
}
int w1_init_netlink(void)
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index b0922dc29658..1e9504e67650 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -27,6 +27,18 @@
#include "w1.h"
+/**
+ * enum w1_netlink_message_types - message type
+ *
+ * @W1_SLAVE_ADD: notification that a slave device was added
+ * @W1_SLAVE_REMOVE: notification that a slave device was removed
+ * @W1_MASTER_ADD: notification that a new bus master was added
+ * @W1_MASTER_REMOVE: notification that a bus masterwas removed
+ * @W1_MASTER_CMD: initiate operations on a specific master
+ * @W1_SLAVE_CMD: sends reset, selects the slave, then does a read/write/touch
+ * operation
+ * @W1_LIST_MASTERS: used to determine the bus master identifiers
+ */
enum w1_netlink_message_types {
W1_SLAVE_ADD = 0,
W1_SLAVE_REMOVE,
@@ -52,6 +64,22 @@ struct w1_netlink_msg
__u8 data[0];
};
+/**
+ * enum w1_commands - commands available for master or slave operations
+ * @W1_CMD_READ: read len bytes
+ * @W1_CMD_WRITE: write len bytes
+ * @W1_CMD_SEARCH: initiate a standard search, returns only the slave
+ * devices found during that search
+ * @W1_CMD_ALARM_SEARCH: search for devices that are currently alarming
+ * @W1_CMD_TOUCH: Touches a series of bytes.
+ * @W1_CMD_RESET: sends a bus reset on the given master
+ * @W1_CMD_SLAVE_ADD: adds a slave to the given master,
+ * 8 byte slave id at data[0]
+ * @W1_CMD_SLAVE_REMOVE: removes a slave to the given master,
+ * 8 byte slave id at data[0]
+ * @W1_CMD_LIST_SLAVES: list of slaves registered on this master
+ * @W1_CMD_MAX: number of available commands
+ */
enum w1_commands {
W1_CMD_READ = 0,
W1_CMD_WRITE,
@@ -59,7 +87,10 @@ enum w1_commands {
W1_CMD_ALARM_SEARCH,
W1_CMD_TOUCH,
W1_CMD_RESET,
- W1_CMD_MAX,
+ W1_CMD_SLAVE_ADD,
+ W1_CMD_SLAVE_REMOVE,
+ W1_CMD_LIST_SLAVES,
+ W1_CMD_MAX
};
struct w1_netlink_cmd
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 79d25894343a..74ec8fc5cc03 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -111,6 +111,15 @@ config WM8350_WATCHDOG
Support for the watchdog in the WM8350 AudioPlus PMIC. When
the watchdog triggers the system will be reset.
+config XILINX_WATCHDOG
+ tristate "Xilinx Watchdog timer"
+ select WATCHDOG_CORE
+ help
+ Watchdog driver for the xps_timebase_wdt ip core.
+
+ To compile this driver as a module, choose M here: the
+ module will be called of_xilinx_wdt.
+
# ALPHA Architecture
# ARM Architecture
@@ -292,7 +301,7 @@ config DAVINCI_WATCHDOG
config ORION_WATCHDOG
tristate "Orion watchdog"
- depends on ARCH_ORION5X || ARCH_KIRKWOOD || ARCH_DOVE
+ depends on ARCH_ORION5X || ARCH_KIRKWOOD || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU
select WATCHDOG_CORE
help
Say Y here if to include support for the watchdog timer
@@ -421,6 +430,17 @@ config SIRFSOC_WATCHDOG
Support for CSR SiRFprimaII and SiRFatlasVI watchdog. When
the watchdog triggers the system will be reset.
+config TEGRA_WATCHDOG
+ tristate "Tegra watchdog"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the watchdog timer
+ embedded in NVIDIA Tegra SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tegra_wdt.
+
# AVR32 Architecture
config AT32AP700X_WDT
@@ -533,7 +553,7 @@ config GEODE_WDT
config SC520_WDT
tristate "AMD Elan SC520 processor Watchdog"
- depends on X86
+ depends on MELAN
help
This is the driver for the hardware watchdog built in to the
AMD "Elan" SC520 microcomputer commonly used in embedded systems.
@@ -1023,18 +1043,6 @@ config M54xx_WATCHDOG
# MicroBlaze Architecture
-config XILINX_WATCHDOG
- tristate "Xilinx Watchdog timer"
- depends on MICROBLAZE
- ---help---
- Watchdog driver for the xps_timebase_wdt ip core.
-
- IMPORTANT: The xps_timebase_wdt parent must have the property
- "clock-frequency" at device tree.
-
- To compile this driver as a module, choose M here: the
- module will be called of_xilinx_wdt.
-
# MIPS Architecture
config ATH79_WDT
@@ -1160,7 +1168,7 @@ config BCM2835_WDT
config BCM_KONA_WDT
tristate "BCM Kona Watchdog"
- depends on ARCH_BCM
+ depends on ARCH_BCM_MOBILE
select WATCHDOG_CORE
help
Support for the watchdog timer on the following Broadcom BCM281xx
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 985a66cda76f..1b5f3d5efad5 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o
obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o
obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
+obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c
index 5cf1621def9c..5614416f1032 100644
--- a/drivers/watchdog/acquirewdt.c
+++ b/drivers/watchdog/acquirewdt.c
@@ -239,7 +239,7 @@ static struct miscdevice acq_miscdev = {
* Init & exit routines
*/
-static int acq_probe(struct platform_device *dev)
+static int __init acq_probe(struct platform_device *dev)
{
int ret;
@@ -291,7 +291,6 @@ static void acq_shutdown(struct platform_device *dev)
}
static struct platform_driver acquirewdt_driver = {
- .probe = acq_probe,
.remove = acq_remove,
.shutdown = acq_shutdown,
.driver = {
@@ -306,20 +305,18 @@ static int __init acq_init(void)
pr_info("WDT driver for Acquire single board computer initialising\n");
- err = platform_driver_register(&acquirewdt_driver);
- if (err)
- return err;
-
acq_platform_device = platform_device_register_simple(DRV_NAME,
-1, NULL, 0);
- if (IS_ERR(acq_platform_device)) {
- err = PTR_ERR(acq_platform_device);
- goto unreg_platform_driver;
- }
+ if (IS_ERR(acq_platform_device))
+ return PTR_ERR(acq_platform_device);
+
+ err = platform_driver_probe(&acquirewdt_driver, acq_probe);
+ if (err)
+ goto unreg_platform_device;
return 0;
-unreg_platform_driver:
- platform_driver_unregister(&acquirewdt_driver);
+unreg_platform_device:
+ platform_device_unregister(acq_platform_device);
return err;
}
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index a8961addc59c..7796db7fa6e1 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -238,7 +238,7 @@ static struct miscdevice advwdt_miscdev = {
* Init & exit routines
*/
-static int advwdt_probe(struct platform_device *dev)
+static int __init advwdt_probe(struct platform_device *dev)
{
int ret;
@@ -299,7 +299,6 @@ static void advwdt_shutdown(struct platform_device *dev)
}
static struct platform_driver advwdt_driver = {
- .probe = advwdt_probe,
.remove = advwdt_remove,
.shutdown = advwdt_shutdown,
.driver = {
@@ -314,21 +313,19 @@ static int __init advwdt_init(void)
pr_info("WDT driver for Advantech single board computer initialising\n");
- err = platform_driver_register(&advwdt_driver);
- if (err)
- return err;
-
advwdt_platform_device = platform_device_register_simple(DRV_NAME,
-1, NULL, 0);
- if (IS_ERR(advwdt_platform_device)) {
- err = PTR_ERR(advwdt_platform_device);
- goto unreg_platform_driver;
- }
+ if (IS_ERR(advwdt_platform_device))
+ return PTR_ERR(advwdt_platform_device);
+
+ err = platform_driver_probe(&advwdt_driver, advwdt_probe);
+ if (err)
+ goto unreg_platform_device;
return 0;
-unreg_platform_driver:
- platform_driver_unregister(&advwdt_driver);
+unreg_platform_device:
+ platform_device_unregister(advwdt_platform_device);
return err;
}
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 3a996576343a..ae6c287a49cb 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/at32ap700x_wdt.c b/drivers/watchdog/at32ap700x_wdt.c
index afe7d17e6776..25b5c67d3af9 100644
--- a/drivers/watchdog/at32ap700x_wdt.c
+++ b/drivers/watchdog/at32ap700x_wdt.c
@@ -323,10 +323,8 @@ static int __init at32_wdt_probe(struct platform_device *pdev)
wdt = devm_kzalloc(&pdev->dev, sizeof(struct wdt_at32ap700x),
GFP_KERNEL);
- if (!wdt) {
- dev_dbg(&pdev->dev, "no memory for wdt structure\n");
+ if (!wdt)
return -ENOMEM;
- }
wdt->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!wdt->regs) {
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 9fa1f69dac13..399c3fddecf6 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -22,7 +22,6 @@
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index cafa973c43be..8df450c090a9 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -114,10 +114,8 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
int err;
wdt = devm_kzalloc(dev, sizeof(struct bcm2835_wdt), GFP_KERNEL);
- if (!wdt) {
- dev_err(dev, "Failed to allocate memory for watchdog device");
+ if (!wdt)
return -ENOMEM;
- }
platform_set_drvdata(pdev, wdt);
spin_lock_init(&wdt->lock);
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index b4021a2b459b..b61fcc535979 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -16,7 +16,6 @@
#include <linux/bcm47xx_wdt.h>
#include <linux/bitops.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 4eb188b87f8e..5a8e879a430a 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -15,7 +15,6 @@
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
@@ -45,7 +44,6 @@
static struct {
void __iomem *regs;
struct timer_list timer;
- int default_ticks;
unsigned long inuse;
atomic_t ticks;
} bcm63xx_wdt_device;
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index f1b8d555080e..a8dbceb32914 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -138,14 +138,6 @@ static void __booke_wdt_enable(void *data)
val &= ~WDTP_MASK;
val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period));
-#ifdef CONFIG_PPC_BOOK3E_64
- /*
- * Crit ints are currently broken on PPC64 Book-E, so
- * just disable them for now.
- */
- val &= ~TCR_WIE;
-#endif
-
mtspr(SPRN_TCR, val);
}
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index f7ae49edb518..6d03e8e30f8b 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -27,7 +27,6 @@
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/timer.h>
#include <linux/completion.h>
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 213225edd059..e55ed702209f 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -21,7 +21,6 @@
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/major.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index f09c54e9686f..2e9589652e1e 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -185,7 +185,6 @@ static int da9052_wdt_probe(struct platform_device *pdev)
driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
GFP_KERNEL);
if (!driver_data) {
- dev_err(da9052->dev, "Unable to alloacate watchdog device\n");
ret = -ENOMEM;
goto err;
}
diff --git a/drivers/watchdog/da9055_wdt.c b/drivers/watchdog/da9055_wdt.c
index 575f37a965a4..495089d8dbfe 100644
--- a/drivers/watchdog/da9055_wdt.c
+++ b/drivers/watchdog/da9055_wdt.c
@@ -151,10 +151,8 @@ static int da9055_wdt_probe(struct platform_device *pdev)
driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
GFP_KERNEL);
- if (!driver_data) {
- dev_err(da9055->dev, "Failed to allocate watchdog device\n");
+ if (!driver_data)
return -ENOMEM;
- }
driver_data->da9055 = da9055;
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index b1bae03742a9..d09ad2254b57 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -16,7 +16,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/device.h>
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index d1d07f2f69df..5f54e1e5819a 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -118,16 +118,9 @@ static int ep93xx_wdt_probe(struct platform_device *pdev)
int err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
-
- mmio_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!mmio_base)
- return -ENXIO;
+ mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mmio_base))
+ return PTR_ERR(mmio_base);
if (timeout < 1 || timeout > 3600) {
timeout = WDT_TIMEOUT;
@@ -172,9 +165,9 @@ static struct platform_driver ep93xx_wdt_driver = {
module_platform_driver(ep93xx_wdt_driver);
-MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>,"
- "Alessandro Zummo <a.zummo@towertech.it>,"
- "H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>");
+MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
+MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
MODULE_DESCRIPTION("EP93xx Watchdog");
MODULE_LICENSE("GPL");
MODULE_VERSION(WDT_VERSION);
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 4a6ae84b42bc..4c43e3fa8bd2 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -215,7 +215,7 @@ static struct miscdevice geodewdt_miscdev = {
.fops = &geodewdt_fops,
};
-static int geodewdt_probe(struct platform_device *dev)
+static int __init geodewdt_probe(struct platform_device *dev)
{
int ret;
@@ -255,7 +255,6 @@ static void geodewdt_shutdown(struct platform_device *dev)
}
static struct platform_driver geodewdt_driver = {
- .probe = geodewdt_probe,
.remove = geodewdt_remove,
.shutdown = geodewdt_shutdown,
.driver = {
@@ -268,20 +267,18 @@ static int __init geodewdt_init(void)
{
int ret;
- ret = platform_driver_register(&geodewdt_driver);
- if (ret)
- return ret;
-
geodewdt_platform_device = platform_device_register_simple(DRV_NAME,
-1, NULL, 0);
- if (IS_ERR(geodewdt_platform_device)) {
- ret = PTR_ERR(geodewdt_platform_device);
+ if (IS_ERR(geodewdt_platform_device))
+ return PTR_ERR(geodewdt_platform_device);
+
+ ret = platform_driver_probe(&geodewdt_driver, geodewdt_probe);
+ if (ret)
goto err;
- }
return 0;
err:
- platform_driver_unregister(&geodewdt_driver);
+ platform_device_unregister(geodewdt_platform_device);
return ret;
}
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 2b75e8b47279..75d2243b94f5 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -17,7 +17,6 @@
#include <linux/device.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index 25a2bfdb4e9d..d7befd58b391 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -36,7 +36,6 @@
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/uaccess.h>
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 04f8af65acfd..0ba1b7c99760 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -48,7 +48,7 @@
/* Module and version information */
#define DRV_NAME "iTCO_wdt"
-#define DRV_VERSION "1.10"
+#define DRV_VERSION "1.11"
/* Includes */
#include <linux/module.h> /* For module specific items */
@@ -92,9 +92,12 @@ static struct { /* this is private data for the iTCO_wdt device */
unsigned int iTCO_version;
struct resource *tco_res;
struct resource *smi_res;
- struct resource *gcs_res;
- /* NO_REBOOT flag is Memory-Mapped GCS register bit 5 (TCO version 2)*/
- unsigned long __iomem *gcs;
+ /*
+ * NO_REBOOT flag is Memory-Mapped GCS register bit 5 (TCO version 2),
+ * or memory-mapped PMC register bit 4 (TCO version 3).
+ */
+ struct resource *gcs_pmc_res;
+ unsigned long __iomem *gcs_pmc;
/* the lock for io operations */
spinlock_t io_lock;
struct platform_device *dev;
@@ -125,11 +128,19 @@ MODULE_PARM_DESC(turn_SMI_watchdog_clear_off,
* Some TCO specific functions
*/
-static inline unsigned int seconds_to_ticks(int seconds)
+/*
+ * The iTCO v1 and v2's internal timer is stored as ticks which decrement
+ * every 0.6 seconds. v3's internal timer is stored as seconds (some
+ * datasheets incorrectly state 0.6 seconds).
+ */
+static inline unsigned int seconds_to_ticks(int secs)
{
- /* the internal timer is stored as ticks which decrement
- * every 0.6 seconds */
- return (seconds * 10) / 6;
+ return iTCO_wdt_private.iTCO_version == 3 ? secs : (secs * 10) / 6;
+}
+
+static inline unsigned int ticks_to_seconds(int ticks)
+{
+ return iTCO_wdt_private.iTCO_version == 3 ? ticks : (ticks * 6) / 10;
}
static void iTCO_wdt_set_NO_REBOOT_bit(void)
@@ -137,10 +148,14 @@ static void iTCO_wdt_set_NO_REBOOT_bit(void)
u32 val32;
/* Set the NO_REBOOT bit: this disables reboots */
- if (iTCO_wdt_private.iTCO_version == 2) {
- val32 = readl(iTCO_wdt_private.gcs);
+ if (iTCO_wdt_private.iTCO_version == 3) {
+ val32 = readl(iTCO_wdt_private.gcs_pmc);
+ val32 |= 0x00000010;
+ writel(val32, iTCO_wdt_private.gcs_pmc);
+ } else if (iTCO_wdt_private.iTCO_version == 2) {
+ val32 = readl(iTCO_wdt_private.gcs_pmc);
val32 |= 0x00000020;
- writel(val32, iTCO_wdt_private.gcs);
+ writel(val32, iTCO_wdt_private.gcs_pmc);
} else if (iTCO_wdt_private.iTCO_version == 1) {
pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
val32 |= 0x00000002;
@@ -154,12 +169,20 @@ static int iTCO_wdt_unset_NO_REBOOT_bit(void)
u32 val32;
/* Unset the NO_REBOOT bit: this enables reboots */
- if (iTCO_wdt_private.iTCO_version == 2) {
- val32 = readl(iTCO_wdt_private.gcs);
+ if (iTCO_wdt_private.iTCO_version == 3) {
+ val32 = readl(iTCO_wdt_private.gcs_pmc);
+ val32 &= 0xffffffef;
+ writel(val32, iTCO_wdt_private.gcs_pmc);
+
+ val32 = readl(iTCO_wdt_private.gcs_pmc);
+ if (val32 & 0x00000010)
+ ret = -EIO;
+ } else if (iTCO_wdt_private.iTCO_version == 2) {
+ val32 = readl(iTCO_wdt_private.gcs_pmc);
val32 &= 0xffffffdf;
- writel(val32, iTCO_wdt_private.gcs);
+ writel(val32, iTCO_wdt_private.gcs_pmc);
- val32 = readl(iTCO_wdt_private.gcs);
+ val32 = readl(iTCO_wdt_private.gcs_pmc);
if (val32 & 0x00000020)
ret = -EIO;
} else if (iTCO_wdt_private.iTCO_version == 1) {
@@ -192,7 +215,7 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev)
/* Force the timer to its reload value by writing to the TCO_RLD
register */
- if (iTCO_wdt_private.iTCO_version == 2)
+ if (iTCO_wdt_private.iTCO_version >= 2)
outw(0x01, TCO_RLD);
else if (iTCO_wdt_private.iTCO_version == 1)
outb(0x01, TCO_RLD);
@@ -240,9 +263,9 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev)
iTCO_vendor_pre_keepalive(iTCO_wdt_private.smi_res, wd_dev->timeout);
/* Reload the timer by writing to the TCO Timer Counter register */
- if (iTCO_wdt_private.iTCO_version == 2)
+ if (iTCO_wdt_private.iTCO_version >= 2) {
outw(0x01, TCO_RLD);
- else if (iTCO_wdt_private.iTCO_version == 1) {
+ } else if (iTCO_wdt_private.iTCO_version == 1) {
/* Reset the timeout status bit so that the timer
* needs to count down twice again before rebooting */
outw(0x0008, TCO1_STS); /* write 1 to clear bit */
@@ -270,14 +293,14 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
/* "Values of 0h-3h are ignored and should not be attempted" */
if (tmrval < 0x04)
return -EINVAL;
- if (((iTCO_wdt_private.iTCO_version == 2) && (tmrval > 0x3ff)) ||
+ if (((iTCO_wdt_private.iTCO_version >= 2) && (tmrval > 0x3ff)) ||
((iTCO_wdt_private.iTCO_version == 1) && (tmrval > 0x03f)))
return -EINVAL;
iTCO_vendor_pre_set_heartbeat(tmrval);
/* Write new heartbeat to watchdog */
- if (iTCO_wdt_private.iTCO_version == 2) {
+ if (iTCO_wdt_private.iTCO_version >= 2) {
spin_lock(&iTCO_wdt_private.io_lock);
val16 = inw(TCOv2_TMR);
val16 &= 0xfc00;
@@ -312,13 +335,13 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
unsigned int time_left = 0;
/* read the TCO Timer */
- if (iTCO_wdt_private.iTCO_version == 2) {
+ if (iTCO_wdt_private.iTCO_version >= 2) {
spin_lock(&iTCO_wdt_private.io_lock);
val16 = inw(TCO_RLD);
val16 &= 0x3ff;
spin_unlock(&iTCO_wdt_private.io_lock);
- time_left = (val16 * 6) / 10;
+ time_left = ticks_to_seconds(val16);
} else if (iTCO_wdt_private.iTCO_version == 1) {
spin_lock(&iTCO_wdt_private.io_lock);
val8 = inb(TCO_RLD);
@@ -327,7 +350,7 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
val8 += (inb(TCOv1_TMR) & 0x3f);
spin_unlock(&iTCO_wdt_private.io_lock);
- time_left = (val8 * 6) / 10;
+ time_left = ticks_to_seconds(val8);
}
return time_left;
}
@@ -347,15 +370,15 @@ static const struct watchdog_info ident = {
static const struct watchdog_ops iTCO_wdt_ops = {
.owner = THIS_MODULE,
.start = iTCO_wdt_start,
- .stop = iTCO_wdt_stop,
- .ping = iTCO_wdt_ping,
+ .stop = iTCO_wdt_stop,
+ .ping = iTCO_wdt_ping,
.set_timeout = iTCO_wdt_set_timeout,
.get_timeleft = iTCO_wdt_get_timeleft,
};
static struct watchdog_device iTCO_wdt_watchdog_dev = {
.info = &ident,
- .ops = &iTCO_wdt_ops,
+ .ops = &iTCO_wdt_ops,
};
/*
@@ -376,16 +399,16 @@ static void iTCO_wdt_cleanup(void)
resource_size(iTCO_wdt_private.tco_res));
release_region(iTCO_wdt_private.smi_res->start,
resource_size(iTCO_wdt_private.smi_res));
- if (iTCO_wdt_private.iTCO_version == 2) {
- iounmap(iTCO_wdt_private.gcs);
- release_mem_region(iTCO_wdt_private.gcs_res->start,
- resource_size(iTCO_wdt_private.gcs_res));
+ if (iTCO_wdt_private.iTCO_version >= 2) {
+ iounmap(iTCO_wdt_private.gcs_pmc);
+ release_mem_region(iTCO_wdt_private.gcs_pmc_res->start,
+ resource_size(iTCO_wdt_private.gcs_pmc_res));
}
iTCO_wdt_private.tco_res = NULL;
iTCO_wdt_private.smi_res = NULL;
- iTCO_wdt_private.gcs_res = NULL;
- iTCO_wdt_private.gcs = NULL;
+ iTCO_wdt_private.gcs_pmc_res = NULL;
+ iTCO_wdt_private.gcs_pmc = NULL;
}
static int iTCO_wdt_probe(struct platform_device *dev)
@@ -414,27 +437,27 @@ static int iTCO_wdt_probe(struct platform_device *dev)
iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent);
/*
- * Get the Memory-Mapped GCS register, we need it for the
- * NO_REBOOT flag (TCO v2).
+ * Get the Memory-Mapped GCS or PMC register, we need it for the
+ * NO_REBOOT flag (TCO v2 and v3).
*/
- if (iTCO_wdt_private.iTCO_version == 2) {
- iTCO_wdt_private.gcs_res = platform_get_resource(dev,
+ if (iTCO_wdt_private.iTCO_version >= 2) {
+ iTCO_wdt_private.gcs_pmc_res = platform_get_resource(dev,
IORESOURCE_MEM,
- ICH_RES_MEM_GCS);
+ ICH_RES_MEM_GCS_PMC);
- if (!iTCO_wdt_private.gcs_res)
+ if (!iTCO_wdt_private.gcs_pmc_res)
goto out;
- if (!request_mem_region(iTCO_wdt_private.gcs_res->start,
- resource_size(iTCO_wdt_private.gcs_res), dev->name)) {
+ if (!request_mem_region(iTCO_wdt_private.gcs_pmc_res->start,
+ resource_size(iTCO_wdt_private.gcs_pmc_res), dev->name)) {
ret = -EBUSY;
goto out;
}
- iTCO_wdt_private.gcs = ioremap(iTCO_wdt_private.gcs_res->start,
- resource_size(iTCO_wdt_private.gcs_res));
- if (!iTCO_wdt_private.gcs) {
+ iTCO_wdt_private.gcs_pmc = ioremap(iTCO_wdt_private.gcs_pmc_res->start,
+ resource_size(iTCO_wdt_private.gcs_pmc_res));
+ if (!iTCO_wdt_private.gcs_pmc) {
ret = -EIO;
- goto unreg_gcs;
+ goto unreg_gcs_pmc;
}
}
@@ -442,7 +465,7 @@ static int iTCO_wdt_probe(struct platform_device *dev)
if (iTCO_wdt_unset_NO_REBOOT_bit() && iTCO_vendor_check_noreboot_on()) {
pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
ret = -ENODEV; /* Cannot reset NO_REBOOT bit */
- goto unmap_gcs;
+ goto unmap_gcs_pmc;
}
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
@@ -454,7 +477,7 @@ static int iTCO_wdt_probe(struct platform_device *dev)
pr_err("I/O address 0x%04llx already in use, device disabled\n",
(u64)SMI_EN);
ret = -EBUSY;
- goto unmap_gcs;
+ goto unmap_gcs_pmc;
}
if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
/*
@@ -478,14 +501,18 @@ static int iTCO_wdt_probe(struct platform_device *dev)
ich_info->name, ich_info->iTCO_version, (u64)TCOBASE);
/* Clear out the (probably old) status */
- outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
- outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */
- outw(0x0004, TCO2_STS); /* Clear BOOT_STS bit */
+ if (iTCO_wdt_private.iTCO_version == 3) {
+ outl(0x20008, TCO1_STS);
+ } else {
+ outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
+ outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */
+ outw(0x0004, TCO2_STS); /* Clear BOOT_STS bit */
+ }
iTCO_wdt_watchdog_dev.bootstatus = 0;
iTCO_wdt_watchdog_dev.timeout = WATCHDOG_TIMEOUT;
watchdog_set_nowayout(&iTCO_wdt_watchdog_dev, nowayout);
- iTCO_wdt_watchdog_dev.parent = dev->dev.parent;
+ iTCO_wdt_watchdog_dev.parent = &dev->dev;
/* Make sure the watchdog is not running */
iTCO_wdt_stop(&iTCO_wdt_watchdog_dev);
@@ -515,18 +542,18 @@ unreg_tco:
unreg_smi:
release_region(iTCO_wdt_private.smi_res->start,
resource_size(iTCO_wdt_private.smi_res));
-unmap_gcs:
- if (iTCO_wdt_private.iTCO_version == 2)
- iounmap(iTCO_wdt_private.gcs);
-unreg_gcs:
- if (iTCO_wdt_private.iTCO_version == 2)
- release_mem_region(iTCO_wdt_private.gcs_res->start,
- resource_size(iTCO_wdt_private.gcs_res));
+unmap_gcs_pmc:
+ if (iTCO_wdt_private.iTCO_version >= 2)
+ iounmap(iTCO_wdt_private.gcs_pmc);
+unreg_gcs_pmc:
+ if (iTCO_wdt_private.iTCO_version >= 2)
+ release_mem_region(iTCO_wdt_private.gcs_pmc_res->start,
+ resource_size(iTCO_wdt_private.gcs_pmc_res));
out:
iTCO_wdt_private.tco_res = NULL;
iTCO_wdt_private.smi_res = NULL;
- iTCO_wdt_private.gcs_res = NULL;
- iTCO_wdt_private.gcs = NULL;
+ iTCO_wdt_private.gcs_pmc_res = NULL;
+ iTCO_wdt_private.gcs_pmc = NULL;
return ret;
}
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index 7ae36690c449..4247c498ee78 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -277,7 +277,7 @@ static struct miscdevice ibwdt_miscdev = {
* Init & exit routines
*/
-static int ibwdt_probe(struct platform_device *dev)
+static int __init ibwdt_probe(struct platform_device *dev)
{
int res;
@@ -336,7 +336,6 @@ static void ibwdt_shutdown(struct platform_device *dev)
}
static struct platform_driver ibwdt_driver = {
- .probe = ibwdt_probe,
.remove = ibwdt_remove,
.shutdown = ibwdt_shutdown,
.driver = {
@@ -351,21 +350,19 @@ static int __init ibwdt_init(void)
pr_info("WDT driver for IB700 single board computer initialising\n");
- err = platform_driver_register(&ibwdt_driver);
- if (err)
- return err;
-
ibwdt_platform_device = platform_device_register_simple(DRV_NAME,
-1, NULL, 0);
- if (IS_ERR(ibwdt_platform_device)) {
- err = PTR_ERR(ibwdt_platform_device);
- goto unreg_platform_driver;
- }
+ if (IS_ERR(ibwdt_platform_device))
+ return PTR_ERR(ibwdt_platform_device);
+
+ err = platform_driver_probe(&ibwdt_driver, ibwdt_probe);
+ if (err)
+ goto unreg_platform_device;
return 0;
-unreg_platform_driver:
- platform_driver_unregister(&ibwdt_driver);
+unreg_platform_device:
+ platform_device_unregister(ibwdt_platform_device);
return err;
}
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index db0a34460e57..366b0474f278 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -360,7 +360,7 @@ struct ibmasr_id {
int type;
};
-static struct ibmasr_id __initdata ibmasr_id_table[] = {
+static struct ibmasr_id ibmasr_id_table[] __initdata = {
{ "IBM Automatic Server Restart - eserver xSeries 220", ASMTYPE_TOPAZ },
{ "IBM Automatic Server Restart - Machine Type 8673", ASMTYPE_PEARL },
{ "IBM Automatic Server Restart - Machine Type 8480", ASMTYPE_JASPER },
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 1b5c25a47b87..5d20cdd30efe 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -41,24 +41,15 @@ MODULE_PARM_DESC(nowayout,
static void indydog_start(void)
{
- u32 mc_ctrl0;
-
spin_lock(&indydog_lock);
- mc_ctrl0 = sgimc->cpuctrl0;
- mc_ctrl0 = sgimc->cpuctrl0 | SGIMC_CCTRL0_WDOG;
- sgimc->cpuctrl0 = mc_ctrl0;
+ sgimc->cpuctrl0 |= SGIMC_CCTRL0_WDOG;
spin_unlock(&indydog_lock);
}
static void indydog_stop(void)
{
- u32 mc_ctrl0;
-
spin_lock(&indydog_lock);
-
- mc_ctrl0 = sgimc->cpuctrl0;
- mc_ctrl0 &= ~SGIMC_CCTRL0_WDOG;
- sgimc->cpuctrl0 = mc_ctrl0;
+ sgimc->cpuctrl0 &= ~SGIMC_CCTRL0_WDOG;
spin_unlock(&indydog_lock);
pr_info("Stopped watchdog timer\n");
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index e13e65e996aa..0caab6241eb7 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -211,7 +211,6 @@ static int intel_scu_set_heartbeat(u32 t)
int ipc_ret;
int retry_count;
u32 soft_value;
- u32 hw_pre_value;
u32 hw_value;
watchdog_device.timer_set = t;
@@ -273,8 +272,7 @@ static int intel_scu_set_heartbeat(u32 t)
watchdog_device.timer_load_count_addr);
/* read count value before starting timer */
- hw_pre_value = ioread32(watchdog_device.timer_load_count_addr);
- hw_pre_value = hw_pre_value & 0xFFFF0000;
+ ioread32(watchdog_device.timer_load_count_addr);
/* Start the timer */
iowrite32(0x00000003, watchdog_device.timer_control_addr);
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index e2bba68ae71e..0b93739c0106 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -54,6 +54,7 @@
/* Defaults for Module Parameter */
#define DEFAULT_NOGAMEPORT 0
+#define DEFAULT_NOCIR 0
#define DEFAULT_EXCLUSIVE 1
#define DEFAULT_TIMEOUT 60
#define DEFAULT_TESTMODE 0
@@ -136,11 +137,13 @@
#define WDTS_LOCKED 3
#define WDTS_USE_GP 4
#define WDTS_EXPECTED 5
+#define WDTS_USE_CIR 6
static unsigned int base, gpact, ciract, max_units, chip_type;
static unsigned long wdt_status;
static int nogameport = DEFAULT_NOGAMEPORT;
+static int nocir = DEFAULT_NOCIR;
static int exclusive = DEFAULT_EXCLUSIVE;
static int timeout = DEFAULT_TIMEOUT;
static int testmode = DEFAULT_TESTMODE;
@@ -149,6 +152,9 @@ static bool nowayout = DEFAULT_NOWAYOUT;
module_param(nogameport, int, 0);
MODULE_PARM_DESC(nogameport, "Forbid the activation of game port, default="
__MODULE_STRING(DEFAULT_NOGAMEPORT));
+module_param(nocir, int, 0);
+MODULE_PARM_DESC(nocir, "Forbid the use of Consumer IR interrupts to reset timer, default="
+ __MODULE_STRING(DEFAULT_NOCIR));
module_param(exclusive, int, 0);
MODULE_PARM_DESC(exclusive, "Watchdog exclusive device open, default="
__MODULE_STRING(DEFAULT_EXCLUSIVE));
@@ -258,9 +264,17 @@ static void wdt_keepalive(void)
{
if (test_bit(WDTS_USE_GP, &wdt_status))
inb(base);
- else
+ else if (test_bit(WDTS_USE_CIR, &wdt_status))
/* The timer reloads with around 5 msec delay */
outb(0x55, CIR_DR(base));
+ else {
+ if (superio_enter())
+ return;
+
+ superio_select(GPIO);
+ wdt_update_timeout();
+ superio_exit();
+ }
set_bit(WDTS_KEEPALIVE, &wdt_status);
}
@@ -273,7 +287,7 @@ static int wdt_start(void)
superio_select(GPIO);
if (test_bit(WDTS_USE_GP, &wdt_status))
superio_outb(WDT_GAMEPORT, WDTCTRL);
- else
+ else if (test_bit(WDTS_USE_CIR, &wdt_status))
superio_outb(WDT_CIRINT, WDTCTRL);
wdt_update_timeout();
@@ -660,7 +674,7 @@ static int __init it87_wdt_init(void)
}
/* If we haven't Gameport support, try to get CIR support */
- if (!test_bit(WDTS_USE_GP, &wdt_status)) {
+ if (!nocir && !test_bit(WDTS_USE_GP, &wdt_status)) {
if (!request_region(CIR_BASE, 8, WATCHDOG_NAME)) {
if (gp_rreq_fail)
pr_err("I/O Address 0x%04x and 0x%04x already in use\n",
@@ -682,6 +696,7 @@ static int __init it87_wdt_init(void)
superio_select(GAMEPORT);
superio_outb(gpact, ACTREG);
}
+ set_bit(WDTS_USE_CIR, &wdt_status);
}
if (timeout < 1 || timeout > max_units * 60) {
@@ -707,7 +722,7 @@ static int __init it87_wdt_init(void)
}
/* Initialize CIR to use it as keepalive source */
- if (!test_bit(WDTS_USE_GP, &wdt_status)) {
+ if (test_bit(WDTS_USE_CIR, &wdt_status)) {
outb(0x00, CIR_RCR(base));
outb(0xc0, CIR_TCR1(base));
outb(0x5c, CIR_TCR2(base));
@@ -717,9 +732,9 @@ static int __init it87_wdt_init(void)
outb(0x09, CIR_IER(base));
}
- pr_info("Chip IT%04x revision %d initialized. timeout=%d sec (nowayout=%d testmode=%d exclusive=%d nogameport=%d)\n",
+ pr_info("Chip IT%04x revision %d initialized. timeout=%d sec (nowayout=%d testmode=%d exclusive=%d nogameport=%d nocir=%d)\n",
chip_type, chip_rev, timeout,
- nowayout, testmode, exclusive, nogameport);
+ nowayout, testmode, exclusive, nogameport, nocir);
superio_exit();
return 0;
@@ -727,8 +742,10 @@ static int __init it87_wdt_init(void)
err_out_reboot:
unregister_reboot_notifier(&wdt_notifier);
err_out_region:
- release_region(base, test_bit(WDTS_USE_GP, &wdt_status) ? 1 : 8);
- if (!test_bit(WDTS_USE_GP, &wdt_status)) {
+ if (test_bit(WDTS_USE_GP, &wdt_status))
+ release_region(base, 1);
+ else if (test_bit(WDTS_USE_CIR, &wdt_status)) {
+ release_region(base, 8);
superio_select(CIR);
superio_outb(ciract, ACTREG);
}
@@ -754,7 +771,7 @@ static void __exit it87_wdt_exit(void)
if (test_bit(WDTS_USE_GP, &wdt_status)) {
superio_select(GAMEPORT);
superio_outb(gpact, ACTREG);
- } else {
+ } else if (test_bit(WDTS_USE_CIR, &wdt_status)) {
superio_select(CIR);
superio_outb(ciract, ACTREG);
}
@@ -763,7 +780,11 @@ static void __exit it87_wdt_exit(void)
misc_deregister(&wdt_miscdev);
unregister_reboot_notifier(&wdt_notifier);
- release_region(base, test_bit(WDTS_USE_GP, &wdt_status) ? 1 : 8);
+
+ if (test_bit(WDTS_USE_GP, &wdt_status))
+ release_region(base, 1);
+ else if (test_bit(WDTS_USE_CIR, &wdt_status))
+ release_region(base, 8);
}
module_init(it87_wdt_init);
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 3aa50cfa335f..91e45ca589e6 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -18,7 +18,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/device.h>
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index bdb3f4a5b27c..0e9cc6f5a919 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -20,7 +20,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index c1f65b4c0aa4..7831955cd9e1 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -237,6 +237,7 @@ static const struct of_device_id mpc8xxx_wdt_match[] = {
.compatible = "fsl,mpc823-wdt",
.data = &(struct mpc8xxx_wdt_type) {
.prescaler = 0x800,
+ .hw_enabled = true,
},
},
{},
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index edb31ffd7927..ff27c4ac96e4 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -40,7 +40,6 @@
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/timer.h>
#include <linux/completion.h>
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index a0d893b0930e..7135803ca1a3 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -12,7 +12,6 @@
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/kernel.h>
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 461208831428..4baf2d788920 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -708,10 +708,13 @@ static int __init octeon_wdt_init(void)
cpumask_clear(&irq_enabled_cpus);
+ cpu_notifier_register_begin();
for_each_online_cpu(cpu)
octeon_wdt_setup_interrupt(cpu);
- register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
+ __register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
+ cpu_notifier_register_done();
+
out:
return ret;
}
@@ -725,7 +728,8 @@ static void __exit octeon_wdt_cleanup(void)
misc_deregister(&octeon_wdt_miscdev);
- unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
+ cpu_notifier_register_begin();
+ __unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
for_each_online_cpu(cpu) {
int core = cpu2core(cpu);
@@ -734,6 +738,9 @@ static void __exit octeon_wdt_cleanup(void)
/* Free the interrupt handler */
free_irq(OCTEON_IRQ_WDOG0 + core, octeon_wdt_poke_irq);
}
+
+ cpu_notifier_register_done();
+
/*
* Disable the boot-bus memory, the code it points to is soon
* to go missing.
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index fb57103c8ebc..57ccae8327ff 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -1,6 +1,7 @@
/*
* Watchdog Device Driver for Xilinx axi/xps_timebase_wdt
*
+ * (C) Copyright 2013 - 2014 Xilinx, Inc.
* (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>)
*
* This program is free software; you can redistribute it and/or
@@ -9,18 +10,13 @@
* 2 of the License, or (at your option) any later version.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/watchdog.h>
#include <linux/io.h>
-#include <linux/uaccess.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
@@ -43,102 +39,103 @@
#define XWT_TIMER_FAILED 0xFFFFFFFF
#define WATCHDOG_NAME "Xilinx Watchdog"
-#define PFX WATCHDOG_NAME ": "
struct xwdt_device {
- struct resource res;
void __iomem *base;
- u32 nowayout;
u32 wdt_interval;
- u32 boot_status;
+ spinlock_t spinlock;
+ struct watchdog_device xilinx_wdt_wdd;
};
-static struct xwdt_device xdev;
-
-static u32 timeout;
-static u32 control_status_reg;
-static u8 expect_close;
-static u8 no_timeout;
-static unsigned long driver_open;
-
-static DEFINE_SPINLOCK(spinlock);
-
-static void xwdt_start(void)
+static int xilinx_wdt_start(struct watchdog_device *wdd)
{
- spin_lock(&spinlock);
+ u32 control_status_reg;
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+
+ spin_lock(&xdev->spinlock);
/* Clean previous status and enable the watchdog timer */
- control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
+ control_status_reg = ioread32(xdev->base + XWT_TWCSR0_OFFSET);
control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK);
iowrite32((control_status_reg | XWT_CSR0_EWDT1_MASK),
- xdev.base + XWT_TWCSR0_OFFSET);
+ xdev->base + XWT_TWCSR0_OFFSET);
+
+ iowrite32(XWT_CSRX_EWDT2_MASK, xdev->base + XWT_TWCSR1_OFFSET);
- iowrite32(XWT_CSRX_EWDT2_MASK, xdev.base + XWT_TWCSR1_OFFSET);
+ spin_unlock(&xdev->spinlock);
- spin_unlock(&spinlock);
+ return 0;
}
-static void xwdt_stop(void)
+static int xilinx_wdt_stop(struct watchdog_device *wdd)
{
- spin_lock(&spinlock);
+ u32 control_status_reg;
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+
+ spin_lock(&xdev->spinlock);
- control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
+ control_status_reg = ioread32(xdev->base + XWT_TWCSR0_OFFSET);
iowrite32((control_status_reg & ~XWT_CSR0_EWDT1_MASK),
- xdev.base + XWT_TWCSR0_OFFSET);
+ xdev->base + XWT_TWCSR0_OFFSET);
- iowrite32(0, xdev.base + XWT_TWCSR1_OFFSET);
+ iowrite32(0, xdev->base + XWT_TWCSR1_OFFSET);
- spin_unlock(&spinlock);
+ spin_unlock(&xdev->spinlock);
pr_info("Stopped!\n");
+
+ return 0;
}
-static void xwdt_keepalive(void)
+static int xilinx_wdt_keepalive(struct watchdog_device *wdd)
{
- spin_lock(&spinlock);
+ u32 control_status_reg;
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
- control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
- control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK);
- iowrite32(control_status_reg, xdev.base + XWT_TWCSR0_OFFSET);
+ spin_lock(&xdev->spinlock);
- spin_unlock(&spinlock);
-}
+ control_status_reg = ioread32(xdev->base + XWT_TWCSR0_OFFSET);
+ control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK);
+ iowrite32(control_status_reg, xdev->base + XWT_TWCSR0_OFFSET);
-static void xwdt_get_status(int *status)
-{
- int new_status;
+ spin_unlock(&xdev->spinlock);
- spin_lock(&spinlock);
+ return 0;
+}
- control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
- new_status = ((control_status_reg &
- (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK)) != 0);
- spin_unlock(&spinlock);
+static const struct watchdog_info xilinx_wdt_ident = {
+ .options = WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+ .firmware_version = 1,
+ .identity = WATCHDOG_NAME,
+};
- *status = 0;
- if (new_status & 1)
- *status |= WDIOF_CARDRESET;
-}
+static const struct watchdog_ops xilinx_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = xilinx_wdt_start,
+ .stop = xilinx_wdt_stop,
+ .ping = xilinx_wdt_keepalive,
+};
-static u32 xwdt_selftest(void)
+static u32 xwdt_selftest(struct xwdt_device *xdev)
{
int i;
u32 timer_value1;
u32 timer_value2;
- spin_lock(&spinlock);
+ spin_lock(&xdev->spinlock);
- timer_value1 = ioread32(xdev.base + XWT_TBR_OFFSET);
- timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET);
+ timer_value1 = ioread32(xdev->base + XWT_TBR_OFFSET);
+ timer_value2 = ioread32(xdev->base + XWT_TBR_OFFSET);
for (i = 0;
((i <= XWT_MAX_SELFTEST_LOOP_COUNT) &&
(timer_value2 == timer_value1)); i++) {
- timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET);
+ timer_value2 = ioread32(xdev->base + XWT_TBR_OFFSET);
}
- spin_unlock(&spinlock);
+ spin_unlock(&xdev->spinlock);
if (timer_value2 != timer_value1)
return ~XWT_TIMER_FAILED;
@@ -146,238 +143,83 @@ static u32 xwdt_selftest(void)
return XWT_TIMER_FAILED;
}
-static int xwdt_open(struct inode *inode, struct file *file)
-{
- /* Only one process can handle the wdt at a time */
- if (test_and_set_bit(0, &driver_open))
- return -EBUSY;
-
- /* Make sure that the module are always loaded...*/
- if (xdev.nowayout)
- __module_get(THIS_MODULE);
-
- xwdt_start();
- pr_info("Started...\n");
-
- return nonseekable_open(inode, file);
-}
-
-static int xwdt_release(struct inode *inode, struct file *file)
-{
- if (expect_close == 42) {
- xwdt_stop();
- } else {
- pr_crit("Unexpected close, not stopping watchdog!\n");
- xwdt_keepalive();
- }
-
- clear_bit(0, &driver_open);
- expect_close = 0;
- return 0;
-}
-
-/*
- * xwdt_write:
- * @file: file handle to the watchdog
- * @buf: buffer to write (unused as data does not matter here
- * @count: count of bytes
- * @ppos: pointer to the position to write. No seeks allowed
- *
- * A write to a watchdog device is defined as a keepalive signal. Any
- * write of data will do, as we don't define content meaning.
- */
-static ssize_t xwdt_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
-{
- if (len) {
- if (!xdev.nowayout) {
- size_t i;
-
- /* In case it was set long ago */
- expect_close = 0;
-
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, buf + i))
- return -EFAULT;
- if (c == 'V')
- expect_close = 42;
- }
- }
- xwdt_keepalive();
- }
- return len;
-}
-
-static const struct watchdog_info ident = {
- .options = WDIOF_MAGICCLOSE |
- WDIOF_KEEPALIVEPING,
- .firmware_version = 1,
- .identity = WATCHDOG_NAME,
-};
-
-/*
- * xwdt_ioctl:
- * @file: file handle to the device
- * @cmd: watchdog command
- * @arg: argument pointer
- *
- * The watchdog API defines a common set of functions for all watchdogs
- * according to their available features.
- */
-static long xwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int status;
-
- union {
- struct watchdog_info __user *ident;
- int __user *i;
- } uarg;
-
- uarg.i = (int __user *)arg;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(uarg.ident, &ident,
- sizeof(ident)) ? -EFAULT : 0;
-
- case WDIOC_GETBOOTSTATUS:
- return put_user(xdev.boot_status, uarg.i);
-
- case WDIOC_GETSTATUS:
- xwdt_get_status(&status);
- return put_user(status, uarg.i);
-
- case WDIOC_KEEPALIVE:
- xwdt_keepalive();
- return 0;
-
- case WDIOC_GETTIMEOUT:
- if (no_timeout)
- return -ENOTTY;
- else
- return put_user(timeout, uarg.i);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations xwdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = xwdt_write,
- .open = xwdt_open,
- .release = xwdt_release,
- .unlocked_ioctl = xwdt_ioctl,
-};
-
-static struct miscdevice xwdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &xwdt_fops,
-};
-
static int xwdt_probe(struct platform_device *pdev)
{
int rc;
- u32 *tmptr;
- u32 *pfreq;
-
- no_timeout = 0;
-
- pfreq = (u32 *)of_get_property(pdev->dev.of_node,
- "clock-frequency", NULL);
-
- if (pfreq == NULL) {
- pr_warn("The watchdog clock frequency cannot be obtained!\n");
- no_timeout = 1;
- }
-
- rc = of_address_to_resource(pdev->dev.of_node, 0, &xdev.res);
- if (rc) {
- pr_warn("invalid address!\n");
- return rc;
- }
-
- tmptr = (u32 *)of_get_property(pdev->dev.of_node,
- "xlnx,wdt-interval", NULL);
- if (tmptr == NULL) {
- pr_warn("Parameter \"xlnx,wdt-interval\" not found in device tree!\n");
- no_timeout = 1;
- } else {
- xdev.wdt_interval = *tmptr;
- }
-
- tmptr = (u32 *)of_get_property(pdev->dev.of_node,
- "xlnx,wdt-enable-once", NULL);
- if (tmptr == NULL) {
- pr_warn("Parameter \"xlnx,wdt-enable-once\" not found in device tree!\n");
- xdev.nowayout = WATCHDOG_NOWAYOUT;
- }
-
-/*
- * Twice of the 2^wdt_interval / freq because the first wdt overflow is
- * ignored (interrupt), reset is only generated at second wdt overflow
- */
- if (!no_timeout)
- timeout = 2 * ((1<<xdev.wdt_interval) / *pfreq);
-
- if (!request_mem_region(xdev.res.start,
- xdev.res.end - xdev.res.start + 1, WATCHDOG_NAME)) {
- rc = -ENXIO;
- pr_err("memory request failure!\n");
- goto err_out;
- }
-
- xdev.base = ioremap(xdev.res.start, xdev.res.end - xdev.res.start + 1);
- if (xdev.base == NULL) {
- rc = -ENOMEM;
- pr_err("ioremap failure!\n");
- goto release_mem;
- }
-
- rc = xwdt_selftest();
+ u32 pfreq = 0, enable_once = 0;
+ struct resource *res;
+ struct xwdt_device *xdev;
+ struct watchdog_device *xilinx_wdt_wdd;
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
+ xilinx_wdt_wdd->info = &xilinx_wdt_ident;
+ xilinx_wdt_wdd->ops = &xilinx_wdt_ops;
+ xilinx_wdt_wdd->parent = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xdev->base))
+ return PTR_ERR(xdev->base);
+
+ rc = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &pfreq);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "The watchdog clock frequency cannot be obtained\n");
+
+ rc = of_property_read_u32(pdev->dev.of_node, "xlnx,wdt-interval",
+ &xdev->wdt_interval);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "Parameter \"xlnx,wdt-interval\" not found\n");
+
+ rc = of_property_read_u32(pdev->dev.of_node, "xlnx,wdt-enable-once",
+ &enable_once);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "Parameter \"xlnx,wdt-enable-once\" not found\n");
+
+ watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
+
+ /*
+ * Twice of the 2^wdt_interval / freq because the first wdt overflow is
+ * ignored (interrupt), reset is only generated at second wdt overflow
+ */
+ if (pfreq && xdev->wdt_interval)
+ xilinx_wdt_wdd->timeout = 2 * ((1 << xdev->wdt_interval) /
+ pfreq);
+
+ spin_lock_init(&xdev->spinlock);
+ watchdog_set_drvdata(xilinx_wdt_wdd, xdev);
+
+ rc = xwdt_selftest(xdev);
if (rc == XWT_TIMER_FAILED) {
- pr_err("SelfTest routine error!\n");
- goto unmap_io;
+ dev_err(&pdev->dev, "SelfTest routine error\n");
+ return rc;
}
- xwdt_get_status(&xdev.boot_status);
-
- rc = misc_register(&xwdt_miscdev);
+ rc = watchdog_register_device(xilinx_wdt_wdd);
if (rc) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- xwdt_miscdev.minor, rc);
- goto unmap_io;
+ dev_err(&pdev->dev, "Cannot register watchdog (err=%d)\n", rc);
+ return rc;
}
- if (no_timeout)
- pr_info("driver loaded (timeout=? sec, nowayout=%d)\n",
- xdev.nowayout);
- else
- pr_info("driver loaded (timeout=%d sec, nowayout=%d)\n",
- timeout, xdev.nowayout);
+ dev_info(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds\n",
+ xdev->base, xilinx_wdt_wdd->timeout);
- expect_close = 0;
- clear_bit(0, &driver_open);
+ platform_set_drvdata(pdev, xdev);
return 0;
-
-unmap_io:
- iounmap(xdev.base);
-release_mem:
- release_mem_region(xdev.res.start, resource_size(&xdev.res));
-err_out:
- return rc;
}
-static int xwdt_remove(struct platform_device *dev)
+static int xwdt_remove(struct platform_device *pdev)
{
- misc_deregister(&xwdt_miscdev);
- iounmap(xdev.base);
- release_mem_region(xdev.res.start, resource_size(&xdev.res));
+ struct xwdt_device *xdev = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&xdev->xilinx_wdt_wdd);
return 0;
}
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 09cf0135e8ac..3691b157516a 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -34,7 +34,6 @@
#include <linux/mm.h>
#include <linux/watchdog.h>
#include <linux/reboot.h>
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/moduleparam.h>
@@ -58,7 +57,6 @@ struct omap_wdt_dev {
void __iomem *base; /* physical */
struct device *dev;
bool omap_wdt_users;
- struct resource *mem;
int wdt_trgr_pattern;
struct mutex lock; /* to avoid races with PM */
};
@@ -207,7 +205,7 @@ static int omap_wdt_probe(struct platform_device *pdev)
{
struct omap_wd_timer_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct watchdog_device *omap_wdt;
- struct resource *res, *mem;
+ struct resource *res;
struct omap_wdt_dev *wdev;
u32 rs;
int ret;
@@ -216,29 +214,20 @@ static int omap_wdt_probe(struct platform_device *pdev)
if (!omap_wdt)
return -ENOMEM;
- /* reserve static register mappings */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
-
- mem = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name);
- if (!mem)
- return -EBUSY;
-
wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
if (!wdev)
return -ENOMEM;
wdev->omap_wdt_users = false;
- wdev->mem = mem;
wdev->dev = &pdev->dev;
wdev->wdt_trgr_pattern = 0x1234;
mutex_init(&wdev->lock);
- wdev->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!wdev->base)
- return -ENOMEM;
+ /* reserve static register mappings */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wdev->base))
+ return PTR_ERR(wdev->base);
omap_wdt->info = &omap_wdt_info;
omap_wdt->ops = &omap_wdt_ops;
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index f7722a424676..9b3c41d18703 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -18,102 +18,204 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <mach/bridge-regs.h>
+#include <linux/of_device.h>
+
+/* RSTOUT mask register physical address for Orion5x, Kirkwood and Dove */
+#define ORION_RSTOUT_MASK_OFFSET 0x20108
+
+/* Internal registers can be configured at any 1 MiB aligned address */
+#define INTERNAL_REGS_MASK ~(SZ_1M - 1)
/*
* Watchdog timer block registers.
*/
#define TIMER_CTRL 0x0000
-#define WDT_EN 0x0010
-#define WDT_VAL 0x0024
+#define TIMER_A370_STATUS 0x04
#define WDT_MAX_CYCLE_COUNT 0xffffffff
-#define WDT_IN_USE 0
-#define WDT_OK_TO_CLOSE 1
-#define WDT_RESET_OUT_EN BIT(1)
-#define WDT_INT_REQ BIT(3)
+#define WDT_A370_RATIO_MASK(v) ((v) << 16)
+#define WDT_A370_RATIO_SHIFT 5
+#define WDT_A370_RATIO (1 << WDT_A370_RATIO_SHIFT)
+
+#define WDT_AXP_FIXED_ENABLE_BIT BIT(10)
+#define WDT_A370_EXPIRED BIT(31)
static bool nowayout = WATCHDOG_NOWAYOUT;
static int heartbeat = -1; /* module parameter (seconds) */
-static unsigned int wdt_max_duration; /* (seconds) */
-static struct clk *clk;
-static unsigned int wdt_tclk;
-static void __iomem *wdt_reg;
-static DEFINE_SPINLOCK(wdt_lock);
-static int orion_wdt_ping(struct watchdog_device *wdt_dev)
+struct orion_watchdog;
+
+struct orion_watchdog_data {
+ int wdt_counter_offset;
+ int wdt_enable_bit;
+ int rstout_enable_bit;
+ int (*clock_init)(struct platform_device *,
+ struct orion_watchdog *);
+ int (*start)(struct watchdog_device *);
+};
+
+struct orion_watchdog {
+ struct watchdog_device wdt;
+ void __iomem *reg;
+ void __iomem *rstout;
+ unsigned long clk_rate;
+ struct clk *clk;
+ const struct orion_watchdog_data *data;
+};
+
+static int orion_wdt_clock_init(struct platform_device *pdev,
+ struct orion_watchdog *dev)
{
- spin_lock(&wdt_lock);
+ int ret;
- /* Reload watchdog duration */
- writel(wdt_tclk * wdt_dev->timeout, wdt_reg + WDT_VAL);
+ dev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk))
+ return PTR_ERR(dev->clk);
+ ret = clk_prepare_enable(dev->clk);
+ if (ret) {
+ clk_put(dev->clk);
+ return ret;
+ }
- spin_unlock(&wdt_lock);
+ dev->clk_rate = clk_get_rate(dev->clk);
return 0;
}
-static int orion_wdt_start(struct watchdog_device *wdt_dev)
+static int armada370_wdt_clock_init(struct platform_device *pdev,
+ struct orion_watchdog *dev)
{
- u32 reg;
+ int ret;
- spin_lock(&wdt_lock);
+ dev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk))
+ return PTR_ERR(dev->clk);
+ ret = clk_prepare_enable(dev->clk);
+ if (ret) {
+ clk_put(dev->clk);
+ return ret;
+ }
+
+ /* Setup watchdog input clock */
+ atomic_io_modify(dev->reg + TIMER_CTRL,
+ WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT),
+ WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT));
+
+ dev->clk_rate = clk_get_rate(dev->clk) / WDT_A370_RATIO;
+ return 0;
+}
+
+static int armadaxp_wdt_clock_init(struct platform_device *pdev,
+ struct orion_watchdog *dev)
+{
+ int ret;
+
+ dev->clk = of_clk_get_by_name(pdev->dev.of_node, "fixed");
+ if (IS_ERR(dev->clk))
+ return PTR_ERR(dev->clk);
+ ret = clk_prepare_enable(dev->clk);
+ if (ret) {
+ clk_put(dev->clk);
+ return ret;
+ }
+
+ /* Enable the fixed watchdog clock input */
+ atomic_io_modify(dev->reg + TIMER_CTRL,
+ WDT_AXP_FIXED_ENABLE_BIT,
+ WDT_AXP_FIXED_ENABLE_BIT);
+
+ dev->clk_rate = clk_get_rate(dev->clk);
+ return 0;
+}
+
+static int orion_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
+ /* Reload watchdog duration */
+ writel(dev->clk_rate * wdt_dev->timeout,
+ dev->reg + dev->data->wdt_counter_offset);
+ return 0;
+}
+
+static int armada370_start(struct watchdog_device *wdt_dev)
+{
+ struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
/* Set watchdog duration */
- writel(wdt_tclk * wdt_dev->timeout, wdt_reg + WDT_VAL);
+ writel(dev->clk_rate * wdt_dev->timeout,
+ dev->reg + dev->data->wdt_counter_offset);
- /* Clear watchdog timer interrupt */
- writel(~WDT_INT_REQ, BRIDGE_CAUSE);
+ /* Clear the watchdog expiration bit */
+ atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0);
/* Enable watchdog timer */
- reg = readl(wdt_reg + TIMER_CTRL);
- reg |= WDT_EN;
- writel(reg, wdt_reg + TIMER_CTRL);
+ atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
+ dev->data->wdt_enable_bit);
+
+ atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit,
+ dev->data->rstout_enable_bit);
+ return 0;
+}
+
+static int orion_start(struct watchdog_device *wdt_dev)
+{
+ struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
+
+ /* Set watchdog duration */
+ writel(dev->clk_rate * wdt_dev->timeout,
+ dev->reg + dev->data->wdt_counter_offset);
+
+ /* Enable watchdog timer */
+ atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
+ dev->data->wdt_enable_bit);
/* Enable reset on watchdog */
- reg = readl(RSTOUTn_MASK);
- reg |= WDT_RESET_OUT_EN;
- writel(reg, RSTOUTn_MASK);
+ atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit,
+ dev->data->rstout_enable_bit);
- spin_unlock(&wdt_lock);
return 0;
}
-static int orion_wdt_stop(struct watchdog_device *wdt_dev)
+static int orion_wdt_start(struct watchdog_device *wdt_dev)
{
- u32 reg;
+ struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
- spin_lock(&wdt_lock);
+ /* There are some per-SoC quirks to handle */
+ return dev->data->start(wdt_dev);
+}
+
+static int orion_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
/* Disable reset on watchdog */
- reg = readl(RSTOUTn_MASK);
- reg &= ~WDT_RESET_OUT_EN;
- writel(reg, RSTOUTn_MASK);
+ atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit, 0);
/* Disable watchdog timer */
- reg = readl(wdt_reg + TIMER_CTRL);
- reg &= ~WDT_EN;
- writel(reg, wdt_reg + TIMER_CTRL);
+ atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0);
- spin_unlock(&wdt_lock);
return 0;
}
-static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev)
+static int orion_wdt_enabled(struct orion_watchdog *dev)
{
- unsigned int time_left;
+ bool enabled, running;
+
+ enabled = readl(dev->rstout) & dev->data->rstout_enable_bit;
+ running = readl(dev->reg + TIMER_CTRL) & dev->data->wdt_enable_bit;
- spin_lock(&wdt_lock);
- time_left = readl(wdt_reg + WDT_VAL) / wdt_tclk;
- spin_unlock(&wdt_lock);
+ return enabled && running;
+}
- return time_left;
+static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev)
+{
+ struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
+ return readl(dev->reg + dev->data->wdt_counter_offset) / dev->clk_rate;
}
static int orion_wdt_set_timeout(struct watchdog_device *wdt_dev,
@@ -137,68 +239,188 @@ static const struct watchdog_ops orion_wdt_ops = {
.get_timeleft = orion_wdt_get_timeleft,
};
-static struct watchdog_device orion_wdt = {
- .info = &orion_wdt_info,
- .ops = &orion_wdt_ops,
- .min_timeout = 1,
+static irqreturn_t orion_wdt_irq(int irq, void *devid)
+{
+ panic("Watchdog Timeout");
+ return IRQ_HANDLED;
+}
+
+/*
+ * The original devicetree binding for this driver specified only
+ * one memory resource, so in order to keep DT backwards compatibility
+ * we try to fallback to a hardcoded register address, if the resource
+ * is missing from the devicetree.
+ */
+static void __iomem *orion_wdt_ioremap_rstout(struct platform_device *pdev,
+ phys_addr_t internal_regs)
+{
+ struct resource *res;
+ phys_addr_t rstout;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res)
+ return devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
+ /* This workaround works only for "orion-wdt", DT-enabled */
+ if (!of_device_is_compatible(pdev->dev.of_node, "marvell,orion-wdt"))
+ return NULL;
+
+ rstout = internal_regs + ORION_RSTOUT_MASK_OFFSET;
+
+ WARN(1, FW_BUG "falling back to harcoded RSTOUT reg %pa\n", &rstout);
+ return devm_ioremap(&pdev->dev, rstout, 0x4);
+}
+
+static const struct orion_watchdog_data orion_data = {
+ .rstout_enable_bit = BIT(1),
+ .wdt_enable_bit = BIT(4),
+ .wdt_counter_offset = 0x24,
+ .clock_init = orion_wdt_clock_init,
+ .start = orion_start,
+};
+
+static const struct orion_watchdog_data armada370_data = {
+ .rstout_enable_bit = BIT(8),
+ .wdt_enable_bit = BIT(8),
+ .wdt_counter_offset = 0x34,
+ .clock_init = armada370_wdt_clock_init,
+ .start = armada370_start,
};
+static const struct orion_watchdog_data armadaxp_data = {
+ .rstout_enable_bit = BIT(8),
+ .wdt_enable_bit = BIT(8),
+ .wdt_counter_offset = 0x34,
+ .clock_init = armadaxp_wdt_clock_init,
+ .start = armada370_start,
+};
+
+static const struct of_device_id orion_wdt_of_match_table[] = {
+ {
+ .compatible = "marvell,orion-wdt",
+ .data = &orion_data,
+ },
+ {
+ .compatible = "marvell,armada-370-wdt",
+ .data = &armada370_data,
+ },
+ {
+ .compatible = "marvell,armada-xp-wdt",
+ .data = &armadaxp_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, orion_wdt_of_match_table);
+
static int orion_wdt_probe(struct platform_device *pdev)
{
+ struct orion_watchdog *dev;
+ const struct of_device_id *match;
+ unsigned int wdt_max_duration; /* (seconds) */
struct resource *res;
- int ret;
+ int ret, irq;
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "Orion Watchdog missing clock\n");
- return -ENODEV;
- }
- clk_prepare_enable(clk);
- wdt_tclk = clk_get_rate(clk);
+ dev = devm_kzalloc(&pdev->dev, sizeof(struct orion_watchdog),
+ GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ match = of_match_device(orion_wdt_of_match_table, &pdev->dev);
+ if (!match)
+ /* Default legacy match */
+ match = &orion_wdt_of_match_table[0];
+
+ dev->wdt.info = &orion_wdt_info;
+ dev->wdt.ops = &orion_wdt_ops;
+ dev->wdt.min_timeout = 1;
+ dev->data = match->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- wdt_reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!wdt_reg)
- return -ENOMEM;
- wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk;
+ dev->reg = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!dev->reg)
+ return -ENOMEM;
- orion_wdt.timeout = wdt_max_duration;
- orion_wdt.max_timeout = wdt_max_duration;
- watchdog_init_timeout(&orion_wdt, heartbeat, &pdev->dev);
+ dev->rstout = orion_wdt_ioremap_rstout(pdev, res->start &
+ INTERNAL_REGS_MASK);
+ if (!dev->rstout)
+ return -ENODEV;
- watchdog_set_nowayout(&orion_wdt, nowayout);
- ret = watchdog_register_device(&orion_wdt);
+ ret = dev->data->clock_init(pdev, dev);
if (ret) {
- clk_disable_unprepare(clk);
+ dev_err(&pdev->dev, "cannot initialize clock\n");
return ret;
}
+ wdt_max_duration = WDT_MAX_CYCLE_COUNT / dev->clk_rate;
+
+ dev->wdt.timeout = wdt_max_duration;
+ dev->wdt.max_timeout = wdt_max_duration;
+ watchdog_init_timeout(&dev->wdt, heartbeat, &pdev->dev);
+
+ platform_set_drvdata(pdev, &dev->wdt);
+ watchdog_set_drvdata(&dev->wdt, dev);
+
+ /*
+ * Let's make sure the watchdog is fully stopped, unless it's
+ * explicitly enabled. This may be the case if the module was
+ * removed and re-insterted, or if the bootloader explicitly
+ * set a running watchdog before booting the kernel.
+ */
+ if (!orion_wdt_enabled(dev))
+ orion_wdt_stop(&dev->wdt);
+
+ /* Request the IRQ only after the watchdog is disabled */
+ irq = platform_get_irq(pdev, 0);
+ if (irq > 0) {
+ /*
+ * Not all supported platforms specify an interrupt for the
+ * watchdog, so let's make it optional.
+ */
+ ret = devm_request_irq(&pdev->dev, irq, orion_wdt_irq, 0,
+ pdev->name, dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ goto disable_clk;
+ }
+ }
+
+ watchdog_set_nowayout(&dev->wdt, nowayout);
+ ret = watchdog_register_device(&dev->wdt);
+ if (ret)
+ goto disable_clk;
+
pr_info("Initial timeout %d sec%s\n",
- orion_wdt.timeout, nowayout ? ", nowayout" : "");
+ dev->wdt.timeout, nowayout ? ", nowayout" : "");
return 0;
+
+disable_clk:
+ clk_disable_unprepare(dev->clk);
+ clk_put(dev->clk);
+ return ret;
}
static int orion_wdt_remove(struct platform_device *pdev)
{
- watchdog_unregister_device(&orion_wdt);
- clk_disable_unprepare(clk);
+ struct watchdog_device *wdt_dev = platform_get_drvdata(pdev);
+ struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
+
+ watchdog_unregister_device(wdt_dev);
+ clk_disable_unprepare(dev->clk);
+ clk_put(dev->clk);
return 0;
}
static void orion_wdt_shutdown(struct platform_device *pdev)
{
- orion_wdt_stop(&orion_wdt);
+ struct watchdog_device *wdt_dev = platform_get_drvdata(pdev);
+ orion_wdt_stop(wdt_dev);
}
-static const struct of_device_id orion_wdt_of_match_table[] = {
- { .compatible = "marvell,orion-wdt", },
- {},
-};
-MODULE_DEVICE_TABLE(of, orion_wdt_of_match_table);
-
static struct platform_driver orion_wdt_driver = {
.probe = orion_wdt_probe,
.remove = orion_wdt_remove,
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 5211d56b3681..9f15dd9435d1 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -512,9 +512,8 @@ static int __init pc87413_init(void)
return -EBUSY;
ret = register_reboot_notifier(&pc87413_notifier);
- if (ret != 0) {
+ if (ret != 0)
pr_err("cannot register reboot notifier (err=%d)\n", ret);
- }
ret = misc_register(&pc87413_miscdev);
if (ret != 0) {
@@ -575,8 +574,8 @@ static void __exit pc87413_exit(void)
module_init(pc87413_init);
module_exit(pc87413_exit);
-MODULE_AUTHOR("Sven Anders <anders@anduras.de>, "
- "Marcus Junker <junker@anduras.de>,");
+MODULE_AUTHOR("Sven Anders <anders@anduras.de>");
+MODULE_AUTHOR("Marcus Junker <junker@anduras.de>");
MODULE_DESCRIPTION("PC87413 WDT driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index e562e0476016..1a11aedc4fe8 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -645,10 +645,8 @@ static int usb_pcwd_probe(struct usb_interface *interface,
/* allocate memory for our device and initialize it */
usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL);
- if (usb_pcwd == NULL) {
- pr_err("Out of memory\n");
+ if (usb_pcwd == NULL)
goto error;
- }
usb_pcwd_device = usb_pcwd;
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 5bec20f5dc2d..15fb316e9437 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -24,7 +24,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/spinlock.h>
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 082d06262959..29cf4dcbc59c 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -27,7 +27,6 @@
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/timer.h>
#include <linux/completion.h>
diff --git a/drivers/watchdog/retu_wdt.c b/drivers/watchdog/retu_wdt.c
index f53615dc633d..a7a0695971e4 100644
--- a/drivers/watchdog/retu_wdt.c
+++ b/drivers/watchdog/retu_wdt.c
@@ -16,7 +16,6 @@
* GNU General Public License for more details.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/device.h>
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index 3dd8ed28adc8..cfed0fe264dc 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -10,7 +10,6 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/of.h>
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index aec946df6ed9..7c6ccd071baf 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -30,7 +30,6 @@
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
@@ -526,7 +525,11 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
goto err;
}
- clk_prepare_enable(wdt->clock);
+ ret = clk_prepare_enable(wdt->clock);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clock\n");
+ return ret;
+ }
ret = s3c2410wdt_cpufreq_register(wdt);
if (ret < 0) {
@@ -608,7 +611,6 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
err_clk:
clk_disable_unprepare(wdt->clock);
- wdt->clock = NULL;
err:
return ret;
@@ -628,7 +630,6 @@ static int s3c2410wdt_remove(struct platform_device *dev)
s3c2410wdt_cpufreq_deregister(wdt);
clk_disable_unprepare(wdt->clock);
- wdt->clock = NULL;
return 0;
}
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index f353e18b1a82..1cfd3f6a13d5 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -158,12 +158,11 @@ static void wdt_timer_ping(unsigned long data)
static void wdt_config(int writeval)
{
- __u16 dummy;
unsigned long flags;
/* buy some time (ping) */
spin_lock_irqsave(&wdt_spinlock, flags);
- dummy = readw(wdtmrctl); /* ensure write synchronization */
+ readw(wdtmrctl); /* ensure write synchronization */
writew(0xAAAA, wdtmrctl);
writew(0x5555, wdtmrctl);
/* unlock WDT = make WDT configuration register writable one time */
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index af3528f84d65..d04d02b41c32 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -293,8 +293,6 @@ static int sh_wdt_probe(struct platform_device *pdev)
static int sh_wdt_remove(struct platform_device *pdev)
{
- struct sh_wdt *wdt = platform_get_drvdata(pdev);
-
watchdog_unregister_device(&sh_wdt_dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index c04a1aa158e2..0dc5e323d59d 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -62,7 +62,7 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static int soft_noboot = 0;
+static int soft_noboot;
module_param(soft_noboot, int, 0);
MODULE_PARM_DESC(soft_noboot,
"Softdog action, set to 1 to ignore reboots, 0 to reboot (default=0)");
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 3f786ce0a6f2..47629d268e0a 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -16,7 +16,6 @@
#include <linux/amba/bus.h>
#include <linux/bitops.h>
#include <linux/clk.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
@@ -209,27 +208,15 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
struct sp805_wdt *wdt;
int ret = 0;
- if (!devm_request_mem_region(&adev->dev, adev->res.start,
- resource_size(&adev->res), "sp805_wdt")) {
- dev_warn(&adev->dev, "Failed to get memory region resource\n");
- ret = -ENOENT;
- goto err;
- }
-
wdt = devm_kzalloc(&adev->dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt) {
- dev_warn(&adev->dev, "Kzalloc failed\n");
ret = -ENOMEM;
goto err;
}
- wdt->base = devm_ioremap(&adev->dev, adev->res.start,
- resource_size(&adev->res));
- if (!wdt->base) {
- ret = -ENOMEM;
- dev_warn(&adev->dev, "ioremap fail\n");
- goto err;
- }
+ wdt->base = devm_ioremap_resource(&adev->dev, &adev->res);
+ if (IS_ERR(wdt->base))
+ return PTR_ERR(wdt->base);
wdt->clk = devm_clk_get(&adev->dev, NULL);
if (IS_ERR(wdt->clk)) {
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index bb64ae3f47da..3804d5e9baea 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -9,7 +9,6 @@
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 76332d893e12..cd00a7836cdc 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -205,7 +205,7 @@ static void sunxi_wdt_shutdown(struct platform_device *pdev)
}
static const struct of_device_id sunxi_wdt_dt_ids[] = {
- { .compatible = "allwinner,sun4i-wdt" },
+ { .compatible = "allwinner,sun4i-a10-wdt" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_wdt_dt_ids);
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
new file mode 100644
index 000000000000..750e2a26cb12
--- /dev/null
+++ b/drivers/watchdog/tegra_wdt.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+/* minimum and maximum watchdog trigger timeout, in seconds */
+#define MIN_WDT_TIMEOUT 1
+#define MAX_WDT_TIMEOUT 255
+
+/*
+ * Base of the WDT registers, from the timer base address. There are
+ * actually 5 watchdogs that can be configured (by pairing with an available
+ * timer), at bases 0x100 + (WDT ID) * 0x20, where WDT ID is 0 through 4.
+ * This driver only configures the first watchdog (WDT ID 0).
+ */
+#define WDT_BASE 0x100
+#define WDT_ID 0
+
+/*
+ * Register base of the timer that's selected for pairing with the watchdog.
+ * This driver arbitrarily uses timer 5, which is currently unused by
+ * other drivers (in particular, the Tegra clocksource driver). If this
+ * needs to change, take care that the new timer is not used by the
+ * clocksource driver.
+ */
+#define WDT_TIMER_BASE 0x60
+#define WDT_TIMER_ID 5
+
+/* WDT registers */
+#define WDT_CFG 0x0
+#define WDT_CFG_PERIOD_SHIFT 4
+#define WDT_CFG_PERIOD_MASK 0xff
+#define WDT_CFG_INT_EN (1 << 12)
+#define WDT_CFG_PMC2CAR_RST_EN (1 << 15)
+#define WDT_STS 0x4
+#define WDT_STS_COUNT_SHIFT 4
+#define WDT_STS_COUNT_MASK 0xff
+#define WDT_STS_EXP_SHIFT 12
+#define WDT_STS_EXP_MASK 0x3
+#define WDT_CMD 0x8
+#define WDT_CMD_START_COUNTER (1 << 0)
+#define WDT_CMD_DISABLE_COUNTER (1 << 1)
+#define WDT_UNLOCK (0xc)
+#define WDT_UNLOCK_PATTERN (0xc45a << 0)
+
+/* Timer registers */
+#define TIMER_PTV 0x0
+#define TIMER_EN (1 << 31)
+#define TIMER_PERIODIC (1 << 30)
+
+struct tegra_wdt {
+ struct watchdog_device wdd;
+ void __iomem *wdt_regs;
+ void __iomem *tmr_regs;
+};
+
+#define WDT_HEARTBEAT 120
+static int heartbeat = WDT_HEARTBEAT;
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeats in seconds. (default = "
+ __MODULE_STRING(WDT_HEARTBEAT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int tegra_wdt_start(struct watchdog_device *wdd)
+{
+ struct tegra_wdt *wdt = watchdog_get_drvdata(wdd);
+ u32 val;
+
+ /*
+ * This thing has a fixed 1MHz clock. Normally, we would set the
+ * period to 1 second by writing 1000000ul, but the watchdog system
+ * reset actually occurs on the 4th expiration of this counter,
+ * so we set the period to 1/4 of this amount.
+ */
+ val = 1000000ul / 4;
+ val |= (TIMER_EN | TIMER_PERIODIC);
+ writel(val, wdt->tmr_regs + TIMER_PTV);
+
+ /*
+ * Set number of periods and start counter.
+ *
+ * Interrupt handler is not required for user space
+ * WDT accesses, since the caller is responsible to ping the
+ * WDT to reset the counter before expiration, through ioctls.
+ */
+ val = WDT_TIMER_ID |
+ (wdd->timeout << WDT_CFG_PERIOD_SHIFT) |
+ WDT_CFG_PMC2CAR_RST_EN;
+ writel(val, wdt->wdt_regs + WDT_CFG);
+
+ writel(WDT_CMD_START_COUNTER, wdt->wdt_regs + WDT_CMD);
+
+ return 0;
+}
+
+static int tegra_wdt_stop(struct watchdog_device *wdd)
+{
+ struct tegra_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ writel(WDT_UNLOCK_PATTERN, wdt->wdt_regs + WDT_UNLOCK);
+ writel(WDT_CMD_DISABLE_COUNTER, wdt->wdt_regs + WDT_CMD);
+ writel(0, wdt->tmr_regs + TIMER_PTV);
+
+ return 0;
+}
+
+static int tegra_wdt_ping(struct watchdog_device *wdd)
+{
+ struct tegra_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ writel(WDT_CMD_START_COUNTER, wdt->wdt_regs + WDT_CMD);
+
+ return 0;
+}
+
+static int tegra_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ wdd->timeout = timeout;
+
+ if (watchdog_active(wdd))
+ return tegra_wdt_start(wdd);
+
+ return 0;
+}
+
+static unsigned int tegra_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct tegra_wdt *wdt = watchdog_get_drvdata(wdd);
+ u32 val;
+ int count;
+ int exp;
+
+ val = readl(wdt->wdt_regs + WDT_STS);
+
+ /* Current countdown (from timeout) */
+ count = (val >> WDT_STS_COUNT_SHIFT) & WDT_STS_COUNT_MASK;
+
+ /* Number of expirations (we are waiting for the 4th expiration) */
+ exp = (val >> WDT_STS_EXP_SHIFT) & WDT_STS_EXP_MASK;
+
+ /*
+ * The entire thing is divided by 4 because we are ticking down 4 times
+ * faster due to needing to wait for the 4th expiration.
+ */
+ return (((3 - exp) * wdd->timeout) + count) / 4;
+}
+
+static const struct watchdog_info tegra_wdt_info = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+ .firmware_version = 0,
+ .identity = "Tegra Watchdog",
+};
+
+static struct watchdog_ops tegra_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = tegra_wdt_start,
+ .stop = tegra_wdt_stop,
+ .ping = tegra_wdt_ping,
+ .set_timeout = tegra_wdt_set_timeout,
+ .get_timeleft = tegra_wdt_get_timeleft,
+};
+
+static int tegra_wdt_probe(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd;
+ struct tegra_wdt *wdt;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ /* This is the timer base. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ /*
+ * Allocate our watchdog driver data, which has the
+ * struct watchdog_device nested within it.
+ */
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ /* Initialize struct tegra_wdt. */
+ wdt->wdt_regs = regs + WDT_BASE;
+ wdt->tmr_regs = regs + WDT_TIMER_BASE;
+
+ /* Initialize struct watchdog_device. */
+ wdd = &wdt->wdd;
+ wdd->timeout = heartbeat;
+ wdd->info = &tegra_wdt_info;
+ wdd->ops = &tegra_wdt_ops;
+ wdd->min_timeout = MIN_WDT_TIMEOUT;
+ wdd->max_timeout = MAX_WDT_TIMEOUT;
+
+ watchdog_set_drvdata(wdd, wdt);
+
+ watchdog_set_nowayout(wdd, nowayout);
+
+ ret = watchdog_register_device(wdd);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register watchdog device\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, wdt);
+
+ dev_info(&pdev->dev,
+ "initialized (heartbeat = %d sec, nowayout = %d)\n",
+ heartbeat, nowayout);
+
+ return 0;
+}
+
+static int tegra_wdt_remove(struct platform_device *pdev)
+{
+ struct tegra_wdt *wdt = platform_get_drvdata(pdev);
+
+ tegra_wdt_stop(&wdt->wdd);
+
+ watchdog_unregister_device(&wdt->wdd);
+
+ dev_info(&pdev->dev, "removed wdt\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_wdt_runtime_suspend(struct device *dev)
+{
+ struct tegra_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd))
+ tegra_wdt_stop(&wdt->wdd);
+
+ return 0;
+}
+
+static int tegra_wdt_runtime_resume(struct device *dev)
+{
+ struct tegra_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd))
+ tegra_wdt_start(&wdt->wdd);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id tegra_wdt_of_match[] = {
+ { .compatible = "nvidia,tegra30-timer", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_wdt_of_match);
+
+static const struct dev_pm_ops tegra_wdt_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_wdt_runtime_suspend,
+ tegra_wdt_runtime_resume)
+};
+
+static struct platform_driver tegra_wdt_driver = {
+ .probe = tegra_wdt_probe,
+ .remove = tegra_wdt_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tegra-wdt",
+ .pm = &tegra_wdt_pm_ops,
+ .of_match_table = tegra_wdt_of_match,
+ },
+};
+module_platform_driver(tegra_wdt_driver);
+
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_DESCRIPTION("Tegra Watchdog Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 09d4831aa61f..afa9d6ef353a 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -61,7 +61,7 @@ struct ts72xx_wdt {
struct platform_device *pdev;
};
-struct platform_device *ts72xx_wdt_pdev;
+static struct platform_device *ts72xx_wdt_pdev;
/*
* TS-72xx Watchdog supports following timeouts (value written
@@ -394,10 +394,8 @@ static int ts72xx_wdt_probe(struct platform_device *pdev)
int error = 0;
wdt = devm_kzalloc(&pdev->dev, sizeof(struct ts72xx_wdt), GFP_KERNEL);
- if (!wdt) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!wdt)
return -ENOMEM;
- }
r1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
wdt->control_reg = devm_ioremap_resource(&pdev->dev, r1);
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
index 68b45fc9ba6a..e9ea856b8ff2 100644
--- a/drivers/watchdog/w83697hf_wdt.c
+++ b/drivers/watchdog/w83697hf_wdt.c
@@ -455,6 +455,6 @@ module_init(wdt_init);
module_exit(wdt_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marcus Junker <junker@anduras.de>, "
- "Samuel Tardieu <sam@rfc1149.net>");
+MODULE_AUTHOR("Marcus Junker <junker@anduras.de>");
+MODULE_AUTHOR("Samuel Tardieu <sam@rfc1149.net>");
MODULE_DESCRIPTION("w83697hf/hg WDT driver");
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 7355ddd0b207..ebbb183be618 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -139,9 +139,8 @@ static const struct watchdog_info ident = {
static long watchdog_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- unsigned int new_margin;
int __user *int_arg = (int __user *)arg;
- int ret = -ENOTTY;
+ int new_margin, ret = -ENOTTY;
switch (cmd) {
case WDIOC_GETSUPPORT:
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 3dc578e71211..48b2c058b009 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -49,7 +49,6 @@
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
-#include <linux/init.h>
#include <linux/fs.h>
#include <linux/pci.h>
#include <linux/io.h>
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index e243bd01c774..2fa17e746ff6 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -204,7 +204,6 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
GFP_KERNEL);
if (!driver_data) {
- dev_err(wm831x->dev, "Unable to alloacate watchdog device\n");
ret = -ENOMEM;
goto err;
}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 61a6ac8fa8fc..b7a506f2bb14 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -604,19 +604,29 @@ static void __init balloon_add_region(unsigned long start_pfn,
}
}
+static int alloc_balloon_scratch_page(int cpu)
+{
+ if (per_cpu(balloon_scratch_page, cpu) != NULL)
+ return 0;
+
+ per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
+ if (per_cpu(balloon_scratch_page, cpu) == NULL) {
+ pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+
static int balloon_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
int cpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
- if (per_cpu(balloon_scratch_page, cpu) != NULL)
- break;
- per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
- if (per_cpu(balloon_scratch_page, cpu) == NULL) {
- pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+ if (alloc_balloon_scratch_page(cpu))
return NOTIFY_BAD;
- }
break;
default:
break;
@@ -636,15 +646,17 @@ static int __init balloon_init(void)
return -ENODEV;
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- for_each_online_cpu(cpu)
- {
- per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
- if (per_cpu(balloon_scratch_page, cpu) == NULL) {
- pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+ register_cpu_notifier(&balloon_cpu_notifier);
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (alloc_balloon_scratch_page(cpu)) {
+ put_online_cpus();
+ unregister_cpu_notifier(&balloon_cpu_notifier);
return -ENOMEM;
}
}
- register_cpu_notifier(&balloon_cpu_notifier);
+ put_online_cpus();
}
pr_info("Initialising balloon driver\n");
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index d7ff91757307..5db43fc100a4 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -166,7 +166,6 @@ static void evtchn_2l_handle_events(unsigned cpu)
int start_word_idx, start_bit_idx;
int word_idx, bit_idx;
int i;
- struct irq_desc *desc;
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
@@ -176,11 +175,8 @@ static void evtchn_2l_handle_events(unsigned cpu)
unsigned int evtchn = evtchn_from_irq(irq);
word_idx = evtchn / BITS_PER_LONG;
bit_idx = evtchn % BITS_PER_LONG;
- if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx)) {
- desc = irq_to_desc(irq);
- if (desc)
- generic_handle_irq_desc(irq, desc);
- }
+ if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
+ generic_handle_irq(irq);
}
/*
@@ -245,11 +241,8 @@ static void evtchn_2l_handle_events(unsigned cpu)
port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
irq = get_evtchn_to_irq(port);
- if (irq != -1) {
- desc = irq_to_desc(irq);
- if (desc)
- generic_handle_irq_desc(irq, desc);
- }
+ if (irq != -1)
+ generic_handle_irq(irq);
bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index f4a9e3311297..dfa12a4a0a48 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -336,9 +336,8 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
BUG_ON(irq == -1);
#ifdef CONFIG_SMP
- cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
+ cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu));
#endif
-
xen_evtchn_port_bind_to_cpu(info, cpu);
info->cpu = cpu;
@@ -373,10 +372,8 @@ static void xen_irq_init(unsigned irq)
{
struct irq_info *info;
#ifdef CONFIG_SMP
- struct irq_desc *desc = irq_to_desc(irq);
-
/* By default all event channels notify CPU#0. */
- cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
+ cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0));
#endif
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -391,10 +388,10 @@ static void xen_irq_init(unsigned irq)
list_add_tail(&info->list, &xen_irq_list_head);
}
-static int __must_check xen_allocate_irq_dynamic(void)
+static int __must_check xen_allocate_irqs_dynamic(int nvec)
{
int first = 0;
- int irq;
+ int i, irq;
#ifdef CONFIG_X86_IO_APIC
/*
@@ -408,14 +405,22 @@ static int __must_check xen_allocate_irq_dynamic(void)
first = get_nr_irqs_gsi();
#endif
- irq = irq_alloc_desc_from(first, -1);
+ irq = irq_alloc_descs_from(first, nvec, -1);
- if (irq >= 0)
- xen_irq_init(irq);
+ if (irq >= 0) {
+ for (i = 0; i < nvec; i++)
+ xen_irq_init(irq + i);
+ }
return irq;
}
+static inline int __must_check xen_allocate_irq_dynamic(void)
+{
+
+ return xen_allocate_irqs_dynamic(1);
+}
+
static int __must_check xen_allocate_irq_gsi(unsigned gsi)
{
int irq;
@@ -469,9 +474,6 @@ static void xen_evtchn_close(unsigned int port)
close.port = port;
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
BUG();
-
- /* Closed ports are implicitly re-bound to VCPU0. */
- bind_evtchn_to_cpu(port, 0);
}
static void pirq_query_unmask(int irq)
@@ -490,13 +492,6 @@ static void pirq_query_unmask(int irq)
info->u.pirq.flags |= PIRQ_NEEDS_EOI;
}
-static bool probing_irq(int irq)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- return desc && desc->action == NULL;
-}
-
static void eoi_pirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
@@ -538,8 +533,7 @@ static unsigned int __startup_pirq(unsigned int irq)
BIND_PIRQ__WILL_SHARE : 0;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
if (rc != 0) {
- if (!probing_irq(irq))
- pr_info("Failed to obtain physical IRQ %d\n", irq);
+ pr_warn("Failed to obtain physical IRQ %d\n", irq);
return 0;
}
evtchn = bind_pirq.port;
@@ -741,22 +735,25 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
}
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
- int pirq, const char *name, domid_t domid)
+ int pirq, int nvec, const char *name, domid_t domid)
{
- int irq, ret;
+ int i, irq, ret;
mutex_lock(&irq_mapping_update_lock);
- irq = xen_allocate_irq_dynamic();
+ irq = xen_allocate_irqs_dynamic(nvec);
if (irq < 0)
goto out;
- irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
- name);
+ for (i = 0; i < nvec; i++) {
+ irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
+
+ ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
+ i == 0 ? 0 : PIRQ_MSI_GROUP);
+ if (ret < 0)
+ goto error_irq;
+ }
- ret = xen_irq_info_pirq_setup(irq, 0, pirq, 0, domid, 0);
- if (ret < 0)
- goto error_irq;
ret = irq_set_msi_desc(irq, msidesc);
if (ret < 0)
goto error_irq;
@@ -764,7 +761,8 @@ out:
mutex_unlock(&irq_mapping_update_lock);
return irq;
error_irq:
- __unbind_from_irq(irq);
+ for (; i >= 0; i--)
+ __unbind_from_irq(irq + i);
mutex_unlock(&irq_mapping_update_lock);
return ret;
}
@@ -772,18 +770,18 @@ error_irq:
int xen_destroy_irq(int irq)
{
- struct irq_desc *desc;
struct physdev_unmap_pirq unmap_irq;
struct irq_info *info = info_for_irq(irq);
int rc = -ENOENT;
mutex_lock(&irq_mapping_update_lock);
- desc = irq_to_desc(irq);
- if (!desc)
- goto out;
-
- if (xen_initial_domain()) {
+ /*
+ * If trying to remove a vector in a MSI group different
+ * than the first one skip the PIRQ unmap unless this vector
+ * is the first one in the group.
+ */
+ if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
unmap_irq.pirq = info->u.pirq.pirq;
unmap_irq.domid = info->u.pirq.domid;
rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
@@ -1250,6 +1248,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
irq_enter();
#ifdef CONFIG_X86
exit_idle();
+ inc_irq_stat(irq_hv_callback_count);
#endif
__xen_evtchn_do_upcall();
@@ -1339,31 +1338,11 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
bool force)
{
- unsigned tcpu = cpumask_first(dest);
+ unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
return rebind_irq_to_cpu(data->irq, tcpu);
}
-static int retrigger_evtchn(int evtchn)
-{
- int masked;
-
- if (!VALID_EVTCHN(evtchn))
- return 0;
-
- masked = test_and_set_mask(evtchn);
- set_evtchn(evtchn);
- if (!masked)
- unmask_evtchn(evtchn);
-
- return 1;
-}
-
-int resend_irq_on_evtchn(unsigned int irq)
-{
- return retrigger_evtchn(evtchn_from_irq(irq));
-}
-
static void enable_dynirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
@@ -1398,7 +1377,18 @@ static void mask_ack_dynirq(struct irq_data *data)
static int retrigger_dynirq(struct irq_data *data)
{
- return retrigger_evtchn(evtchn_from_irq(data->irq));
+ unsigned int evtchn = evtchn_from_irq(data->irq);
+ int masked;
+
+ if (!VALID_EVTCHN(evtchn))
+ return 0;
+
+ masked = test_and_set_mask(evtchn);
+ set_evtchn(evtchn);
+ if (!masked)
+ unmask_evtchn(evtchn);
+
+ return 1;
}
static void restore_pirqs(void)
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 1de2a191b395..96109a9972b6 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -235,14 +235,10 @@ static uint32_t clear_linked(volatile event_word_t *word)
static void handle_irq_for_port(unsigned port)
{
int irq;
- struct irq_desc *desc;
irq = get_evtchn_to_irq(port);
- if (irq != -1) {
- desc = irq_to_desc(irq);
- if (desc)
- generic_handle_irq_desc(irq, desc);
- }
+ if (irq != -1)
+ generic_handle_irq(irq);
}
static void consume_one_event(unsigned cpu,
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
index 677f41a0fff9..50c2050a1e32 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -53,6 +53,7 @@ struct irq_info {
#define PIRQ_NEEDS_EOI (1 << 0)
#define PIRQ_SHAREABLE (1 << 1)
+#define PIRQ_MSI_GROUP (1 << 2)
struct evtchn_ops {
unsigned (*max_channels)(void);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index b84e3ab839aa..6d325bda76da 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -933,9 +933,6 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count)
{
int i, ret;
- bool lazy = false;
- pte_t *pte;
- unsigned long mfn;
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
if (ret)
@@ -947,45 +944,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
&map_ops[i].status, __func__);
- /* this is basically a nop on x86 */
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
- for (i = 0; i < count; i++) {
- if (map_ops[i].status)
- continue;
- set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
- map_ops[i].dev_bus_addr >> PAGE_SHIFT);
- }
- return ret;
- }
-
- if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
- arch_enter_lazy_mmu_mode();
- lazy = true;
- }
-
- for (i = 0; i < count; i++) {
- /* Do not add to override if the map failed. */
- if (map_ops[i].status)
- continue;
-
- if (map_ops[i].flags & GNTMAP_contains_pte) {
- pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
- (map_ops[i].host_addr & ~PAGE_MASK));
- mfn = pte_mfn(*pte);
- } else {
- mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
- }
- ret = m2p_add_override(mfn, pages[i], kmap_ops ?
- &kmap_ops[i] : NULL);
- if (ret)
- goto out;
- }
-
- out:
- if (lazy)
- arch_leave_lazy_mmu_mode();
-
- return ret;
+ return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
}
EXPORT_SYMBOL_GPL(gnttab_map_refs);
@@ -993,39 +952,13 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
- int i, ret;
- bool lazy = false;
+ int ret;
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret)
return ret;
- /* this is basically a nop on x86 */
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
- for (i = 0; i < count; i++) {
- set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
- INVALID_P2M_ENTRY);
- }
- return ret;
- }
-
- if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
- arch_enter_lazy_mmu_mode();
- lazy = true;
- }
-
- for (i = 0; i < count; i++) {
- ret = m2p_remove_override(pages[i], kmap_ops ?
- &kmap_ops[i] : NULL);
- if (ret)
- goto out;
- }
-
- out:
- if (lazy)
- arch_leave_lazy_mmu_mode();
-
- return ret;
+ return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count);
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 624e8dc24532..fc6c94c0b436 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -46,6 +46,20 @@ struct suspend_info {
void (*post)(int cancelled);
};
+static RAW_NOTIFIER_HEAD(xen_resume_notifier);
+
+void xen_resume_notifier_register(struct notifier_block *nb)
+{
+ raw_notifier_chain_register(&xen_resume_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(xen_resume_notifier_register);
+
+void xen_resume_notifier_unregister(struct notifier_block *nb)
+{
+ raw_notifier_chain_unregister(&xen_resume_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(xen_resume_notifier_unregister);
+
#ifdef CONFIG_HIBERNATE_CALLBACKS
static void xen_hvm_post_suspend(int cancelled)
{
@@ -152,6 +166,8 @@ static void do_suspend(void)
err = stop_machine(xen_suspend, &si, cpumask_of(0));
+ raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
+
dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
if (err) {
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index 79e1dff7ed4f..0aac403d53fd 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -40,6 +40,7 @@
#include <linux/capability.h>
#include <xen/xen.h>
+#include <xen/acpi.h>
#include <xen/xenbus.h>
#include <xen/events.h>
#include <xen/interface/platform.h>
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index a1361c312c06..3454973dc3bb 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -45,7 +45,7 @@ static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
static uint64_t callback_via;
-unsigned long alloc_xen_mmio(unsigned long len)
+static unsigned long alloc_xen_mmio(unsigned long len)
{
unsigned long addr;
diff --git a/drivers/xen/xen-acpi-cpuhotplug.c b/drivers/xen/xen-acpi-cpuhotplug.c
index 80875fb770ed..3e62ee4b3b66 100644
--- a/drivers/xen/xen-acpi-cpuhotplug.c
+++ b/drivers/xen/xen-acpi-cpuhotplug.c
@@ -313,7 +313,7 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
goto out;
}
- (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
+ (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
out:
acpi_scan_lock_release();
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c
index f8d18626969a..34e40b733f9a 100644
--- a/drivers/xen/xen-acpi-memhotplug.c
+++ b/drivers/xen/xen-acpi-memhotplug.c
@@ -285,7 +285,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
return;
}
- (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
+ (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
return;
}
diff --git a/drivers/xen/xen-acpi-pad.c b/drivers/xen/xen-acpi-pad.c
index 40c4bc06b5fa..f83b754505f8 100644
--- a/drivers/xen/xen-acpi-pad.c
+++ b/drivers/xen/xen-acpi-pad.c
@@ -77,27 +77,14 @@ static int acpi_pad_pur(acpi_handle handle)
return num;
}
-/* Notify firmware how many CPUs are idle */
-static void acpi_pad_ost(acpi_handle handle, int stat,
- uint32_t idle_nums)
-{
- union acpi_object params[3] = {
- {.type = ACPI_TYPE_INTEGER,},
- {.type = ACPI_TYPE_INTEGER,},
- {.type = ACPI_TYPE_BUFFER,},
- };
- struct acpi_object_list arg_list = {3, params};
-
- params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
- params[1].integer.value = stat;
- params[2].buffer.length = 4;
- params[2].buffer.pointer = (void *)&idle_nums;
- acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-}
-
static void acpi_pad_handle_notify(acpi_handle handle)
{
int idle_nums;
+ struct acpi_buffer param = {
+ .length = 4,
+ .pointer = (void *)&idle_nums,
+ };
+
mutex_lock(&xen_cpu_lock);
idle_nums = acpi_pad_pur(handle);
@@ -109,7 +96,8 @@ static void acpi_pad_handle_notify(acpi_handle handle)
idle_nums = xen_acpi_pad_idle_cpus(idle_nums)
?: xen_acpi_pad_idle_cpus_num();
if (idle_nums >= 0)
- acpi_pad_ost(handle, 0, idle_nums);
+ acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY,
+ 0, &param);
mutex_unlock(&xen_cpu_lock);
}
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 7231859119f1..82358d14ecf1 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -27,10 +27,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/syscore_ops.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <xen/xen.h>
+#include <xen/xen-ops.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
@@ -495,14 +495,15 @@ static int xen_upload_processor_pm_data(void)
return rc;
}
-static void xen_acpi_processor_resume(void)
+static int xen_acpi_processor_resume(struct notifier_block *nb,
+ unsigned long action, void *data)
{
bitmap_zero(acpi_ids_done, nr_acpi_bits);
- xen_upload_processor_pm_data();
+ return xen_upload_processor_pm_data();
}
-static struct syscore_ops xap_syscore_ops = {
- .resume = xen_acpi_processor_resume,
+struct notifier_block xen_acpi_processor_resume_nb = {
+ .notifier_call = xen_acpi_processor_resume,
};
static int __init xen_acpi_processor_init(void)
@@ -555,7 +556,7 @@ static int __init xen_acpi_processor_init(void)
if (rc)
goto err_unregister;
- register_syscore_ops(&xap_syscore_ops);
+ xen_resume_notifier_register(&xen_acpi_processor_resume_nb);
return 0;
err_unregister:
@@ -574,7 +575,7 @@ static void __exit xen_acpi_processor_exit(void)
{
int i;
- unregister_syscore_ops(&xap_syscore_ops);
+ xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb);
kfree(acpi_ids_done);
kfree(acpi_id_present);
kfree(acpi_id_cst_present);
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 64eb0cd8b8af..929dd46bb40c 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -213,8 +213,7 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
entries[i].vector = op->msix_entries[i].vector;
}
- result = pci_enable_msix(dev, entries, op->value);
-
+ result = pci_enable_msix_exact(dev, entries, op->value);
if (result == 0) {
for (i = 0; i < op->value; i++) {
op->msix_entries[i].entry = entries[i].entry;
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 745ad79c1d8e..3b2bffde534f 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -170,6 +170,7 @@ static void frontswap_selfshrink(void)
tgt_frontswap_pages = cur_frontswap_pages -
(cur_frontswap_pages / frontswap_hysteresis);
frontswap_shrink(tgt_frontswap_pages);
+ frontswap_inertia_counter = frontswap_inertia;
}
#endif /* CONFIG_FRONTSWAP */
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 01d59e66565d..439c9dca9eee 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -401,33 +401,6 @@ EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
/**
- * Bind to an existing interdomain event channel in another domain. Returns 0
- * on success and stores the local port in *port. On error, returns -errno,
- * switches the device to XenbusStateClosing, and saves the error in XenStore.
- */
-int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
-{
- struct evtchn_bind_interdomain bind_interdomain;
- int err;
-
- bind_interdomain.remote_dom = dev->otherend_id;
- bind_interdomain.remote_port = remote_port;
-
- err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
- &bind_interdomain);
- if (err)
- xenbus_dev_fatal(dev, err,
- "binding to event channel %d from domain %d",
- remote_port, dev->otherend_id);
- else
- *port = bind_interdomain.local_port;
-
- return err;
-}
-EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
-
-
-/**
* Free an existing event channel. Returns 0 on success or -errno on error.
*/
int xenbus_free_evtchn(struct xenbus_device *dev, int port)